diff -ruw linux-4.4.115/arch/arm/boot/dts/include/dt-bindings/input/linux-event-codes.h linux-4.4.115-fbx/arch/arm/boot/dts/include/dt-bindings/input/linux-event-codes.h
--- linux-4.4.115/arch/arm/boot/dts/include/dt-bindings/input/linux-event-codes.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/include/dt-bindings/input/linux-event-codes.h	2019-10-29 09:26:25.545221791 +0100
@@ -611,6 +611,37 @@
 #define KEY_KBDINPUTASSIST_ACCEPT		0x264
 #define KEY_KBDINPUTASSIST_CANCEL		0x265
 
+/* Diagonal movement keys */
+#define KEY_RIGHT_UP			0x266
+#define KEY_RIGHT_DOWN			0x267
+#define KEY_LEFT_UP			0x268
+#define KEY_LEFT_DOWN			0x269
+
+#define KEY_ROOT_MENU			0x26a /* Show Device's Root Menu */
+/* Show Top Menu of the Media (e.g. DVD) */
+#define KEY_MEDIA_TOP_MENU		0x26b
+#define KEY_NUMERIC_11			0x26c
+#define KEY_NUMERIC_12			0x26d
+/*
+ * Toggle Audio Description: refers to an audio service that helps blind and
+ * visually impaired consumers understand the action in a program. Note: in
+ * some countries this is referred to as "Video Description".
+ */
+#define KEY_AUDIO_DESC			0x26e
+#define KEY_3D_MODE			0x26f
+#define KEY_NEXT_FAVORITE		0x270
+#define KEY_STOP_RECORD			0x271
+#define KEY_PAUSE_RECORD		0x272
+#define KEY_VOD				0x273 /* Video on Demand */
+#define KEY_UNMUTE			0x274
+#define KEY_FASTREVERSE			0x275
+#define KEY_SLOWREVERSE			0x276
+/*
+ * Control a data application associated with the currently viewed channel,
+ * e.g. teletext or data broadcast application (MHEG, MHP, HbbTV, etc.)
+ */
+#define KEY_DATA			0x275
+
 #define BTN_TRIGGER_HAPPY		0x2c0
 #define BTN_TRIGGER_HAPPY1		0x2c0
 #define BTN_TRIGGER_HAPPY2		0x2c1
@@ -653,6 +684,18 @@
 #define BTN_TRIGGER_HAPPY39		0x2e6
 #define BTN_TRIGGER_HAPPY40		0x2e7
 
+#define KEY_APP_TV			0x2f1
+#define KEY_APP_REPLAY			0x2f2
+#define KEY_APP_VIDEOCLUB		0x2f3
+#define KEY_APP_WHATSON			0x2f4
+#define KEY_APP_RECORDS			0x2f5
+#define KEY_APP_MEDIA			0x2f6
+#define KEY_APP_YOUTUBE			0x2f7
+#define KEY_APP_RADIOS			0x2f8
+#define KEY_APP_CANALVOD		0x2f9
+#define KEY_APP_PIP			0x2fa
+#define KEY_APP_NETFLIX			0x2fb
+
 /* We avoid low common keys in module aliases so they don't get huge. */
 #define KEY_MIN_INTERESTING	KEY_MUTE
 #define KEY_MAX			0x2ff
@@ -749,7 +792,11 @@
 #define SW_ROTATE_LOCK		0x0c  /* set = rotate locked/disabled */
 #define SW_LINEIN_INSERT	0x0d  /* set = inserted */
 #define SW_MUTE_DEVICE		0x0e  /* set = device disabled */
-#define SW_MAX			0x0f
+#define SW_HPHL_OVERCURRENT	0x0f  /* set = over current on left hph */
+#define SW_HPHR_OVERCURRENT	0x10  /* set = over current on right hph */
+#define SW_MICROPHONE2_INSERT   0x11  /* set = inserted */
+#define SW_UNSUPPORT_INSERT	0x12  /* set = unsupported device inserted */
+#define SW_MAX			0x20
 #define SW_CNT			(SW_MAX+1)
 
 /*
diff -ruw linux-4.4.115/arch/arm64/boot/dts/include/dt-bindings/input/linux-event-codes.h linux-4.4.115-fbx/arch/arm64/boot/dts/include/dt-bindings/input/linux-event-codes.h
--- linux-4.4.115/arch/arm64/boot/dts/include/dt-bindings/input/linux-event-codes.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/include/dt-bindings/input/linux-event-codes.h	2019-10-29 09:26:25.545221791 +0100
@@ -611,6 +611,37 @@
 #define KEY_KBDINPUTASSIST_ACCEPT		0x264
 #define KEY_KBDINPUTASSIST_CANCEL		0x265
 
+/* Diagonal movement keys */
+#define KEY_RIGHT_UP			0x266
+#define KEY_RIGHT_DOWN			0x267
+#define KEY_LEFT_UP			0x268
+#define KEY_LEFT_DOWN			0x269
+
+#define KEY_ROOT_MENU			0x26a /* Show Device's Root Menu */
+/* Show Top Menu of the Media (e.g. DVD) */
+#define KEY_MEDIA_TOP_MENU		0x26b
+#define KEY_NUMERIC_11			0x26c
+#define KEY_NUMERIC_12			0x26d
+/*
+ * Toggle Audio Description: refers to an audio service that helps blind and
+ * visually impaired consumers understand the action in a program. Note: in
+ * some countries this is referred to as "Video Description".
+ */
+#define KEY_AUDIO_DESC			0x26e
+#define KEY_3D_MODE			0x26f
+#define KEY_NEXT_FAVORITE		0x270
+#define KEY_STOP_RECORD			0x271
+#define KEY_PAUSE_RECORD		0x272
+#define KEY_VOD				0x273 /* Video on Demand */
+#define KEY_UNMUTE			0x274
+#define KEY_FASTREVERSE			0x275
+#define KEY_SLOWREVERSE			0x276
+/*
+ * Control a data application associated with the currently viewed channel,
+ * e.g. teletext or data broadcast application (MHEG, MHP, HbbTV, etc.)
+ */
+#define KEY_DATA			0x275
+
 #define BTN_TRIGGER_HAPPY		0x2c0
 #define BTN_TRIGGER_HAPPY1		0x2c0
 #define BTN_TRIGGER_HAPPY2		0x2c1
@@ -653,6 +684,18 @@
 #define BTN_TRIGGER_HAPPY39		0x2e6
 #define BTN_TRIGGER_HAPPY40		0x2e7
 
+#define KEY_APP_TV			0x2f1
+#define KEY_APP_REPLAY			0x2f2
+#define KEY_APP_VIDEOCLUB		0x2f3
+#define KEY_APP_WHATSON			0x2f4
+#define KEY_APP_RECORDS			0x2f5
+#define KEY_APP_MEDIA			0x2f6
+#define KEY_APP_YOUTUBE			0x2f7
+#define KEY_APP_RADIOS			0x2f8
+#define KEY_APP_CANALVOD		0x2f9
+#define KEY_APP_PIP			0x2fa
+#define KEY_APP_NETFLIX			0x2fb
+
 /* We avoid low common keys in module aliases so they don't get huge. */
 #define KEY_MIN_INTERESTING	KEY_MUTE
 #define KEY_MAX			0x2ff
@@ -749,7 +792,11 @@
 #define SW_ROTATE_LOCK		0x0c  /* set = rotate locked/disabled */
 #define SW_LINEIN_INSERT	0x0d  /* set = inserted */
 #define SW_MUTE_DEVICE		0x0e  /* set = device disabled */
-#define SW_MAX			0x0f
+#define SW_HPHL_OVERCURRENT	0x0f  /* set = over current on left hph */
+#define SW_HPHR_OVERCURRENT	0x10  /* set = over current on right hph */
+#define SW_MICROPHONE2_INSERT   0x11  /* set = inserted */
+#define SW_UNSUPPORT_INSERT	0x12  /* set = unsupported device inserted */
+#define SW_MAX			0x20
 #define SW_CNT			(SW_MAX+1)
 
 /*
diff -ruw linux-4.4.115/arch/arm64/boot/dts/Makefile linux-4.4.115-fbx/arch/arm64/boot/dts/Makefile
--- linux-4.4.115/arch/arm64/boot/dts/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/Makefile	2019-01-22 16:16:21.523228476 +0100
@@ -21,3 +21,4 @@
 dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(foreach d,$(dts-dirs), $(wildcard $(dtstree)/$(d)/*.dts)))
 
 always		:= $(dtb-y)
+targets += dtbs
diff -ruw linux-4.4.115/arch/arm64/boot/dts/qcom/Makefile linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/Makefile
--- linux-4.4.115/arch/arm64/boot/dts/qcom/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/Makefile	2019-10-29 09:26:22.901195917 +0100
@@ -1,5 +1,263 @@
-dtb-$(CONFIG_ARCH_QCOM)	+= apq8016-sbc.dtb msm8916-mtp.dtb
+dtb-$(CONFIG_ARCH_MSM8996) += msm8996-v2-pmi8994-cdp.dtb \
+	msm8996-v2-pmi8994-mtp.dtb \
+	msm8996-v2-pmi8994-pmk8001-cdp.dtb \
+	msm8996-v2-pmi8994-pmk8001-mtp.dtb \
+	msm8996-v2-pmi8994-pm8004-cdp.dtb \
+	msm8996-v2-pmi8994-pm8004-mtp.dtb \
+	msm8996-v2-pmi8994-pm8004-pmk8001-cdp.dtb \
+	msm8996-v2-pmi8994-pm8004-pmk8001-mtp.dtb \
+	msm8996-v2-fluid.dtb \
+	msm8996-v2-liquid.dtb \
+	msm8996-v2-dtp.dtb \
+	msm8996-v3-auto-cdp.dtb \
+	msm8996-v3-auto-adp.dtb \
+	msm8996-v3-pmi8994-cdp.dtb \
+	msm8996-v3-pmi8994-mtp.dtb \
+	msm8996-v3-pmi8994-pmk8001-cdp.dtb \
+	msm8996-v3-pmi8994-pmk8001-mtp.dtb \
+	msm8996-v3-pmi8994-pm8004-cdp.dtb \
+	msm8996-v3-pmi8994-pm8004-mtp.dtb \
+	msm8996-v3-pmi8994-pm8004-pmk8001-cdp.dtb \
+	msm8996-v3-pmi8994-pm8004-pmk8001-mtp.dtb \
+	msm8996-v3-pmi8996-cdp.dtb \
+	msm8996-v3-pmi8996-mtp.dtb \
+	msm8996-v3-pmi8996-pmk8001-cdp.dtb \
+	msm8996-v3-pmi8996-pmk8001-mtp.dtb \
+	msm8996-v3-fluid.dtb \
+	msm8996-v3-liquid.dtb \
+	msm8996-v3-dtp.dtb \
+	msm8996-v3-pm8004-mmxf-adp.dtb \
+	msm8996-v3-pm8004-agave-adp.dtb \
+	msm8996-v3-pm8004-agave-adp-lite.dtb \
+	msm8996pro-auto-adp.dtb \
+	msm8996pro-auto-adp-lite.dtb \
+	msm8996pro-auto-cdp.dtb \
+	msm8996pro-auto-cv2x.dtb \
+	msm8996pro-pmi8994-cdp.dtb \
+	msm8996pro-pmi8994-mtp.dtb \
+	msm8996pro-pmi8994-pmk8001-cdp.dtb \
+	msm8996pro-pmi8994-pmk8001-mtp.dtb \
+	msm8996pro-pmi8994-pm8004-cdp.dtb \
+	msm8996pro-pmi8994-pm8004-mtp.dtb \
+	msm8996pro-pmi8994-pm8004-pmk8001-cdp.dtb \
+	msm8996pro-pmi8994-pm8004-pmk8001-mtp.dtb \
+	msm8996pro-pmi8996-cdp.dtb \
+	msm8996pro-pmi8996-mtp.dtb \
+	msm8996pro-pmi8996-pmk8001-cdp.dtb \
+	msm8996pro-pmi8996-pmk8001-mtp.dtb \
+	msm8996pro-v1.1-auto-cdp.dtb \
+	msm8996pro-v1.1-pmi8994-cdp.dtb \
+	msm8996pro-v1.1-pmi8994-mtp.dtb \
+	msm8996pro-v1.1-pmi8994-pmk8001-cdp.dtb \
+	msm8996pro-v1.1-pmi8994-pmk8001-mtp.dtb \
+	msm8996pro-v1.1-pmi8994-pm8004-cdp.dtb \
+	msm8996pro-v1.1-pmi8994-pm8004-mtp.dtb \
+	msm8996pro-v1.1-pmi8994-pm8004-pmk8001-cdp.dtb \
+	msm8996pro-v1.1-pmi8994-pm8004-pmk8001-mtp.dtb \
+	msm8996pro-v1.1-pmi8996-cdp.dtb \
+	msm8996pro-v1.1-pmi8996-mtp.dtb \
+	msm8996pro-v1.1-pmi8996-pmk8001-cdp.dtb \
+	msm8996pro-v1.1-pmi8996-pmk8001-mtp.dtb \
+	apq8096pro-auto-cdp.dtb \
+	apq8096pro-v1.1-auto-adp.dtb \
+	apq8096pro-v1.1-auto-adp-lite.dtb \
+	apq8096pro-liquid.dtb \
+	apq8096pro-v1.1-auto-cdp.dtb \
+	msm8996-v3.0-pmi8994-cdp.dtb \
+	msm8996-v3.0-pmi8994-mtp.dtb \
+	msm8996-v3.0-pmi8994-pm8004-cdp.dtb \
+	msm8996-v3.0-pmi8994-pm8004-mtp.dtb \
+	msm8996-v3.0-pmi8994-pm8004-pmk8001-cdp.dtb \
+	msm8996-v3.0-pmi8994-pmk8001-cdp.dtb \
+	msm8996-v3.0-pmi8996-cdp.dtb \
+	msm8996-v3.0-pmi8996-mtp.dtb \
+	msm8996-v3.0-fluid.dtb \
+	msm8996-v3.0-liquid.dtb \
+	msm8996-v3.0-dtp.dtb \
+	apq8096-v2-pmi8994-cdp.dtb \
+	apq8096-v2-pmi8994-mtp.dtb \
+	apq8096-v2-pmi8994-pmk8001-cdp.dtb \
+	apq8096-v2-pmi8994-pm8004-cdp.dtb \
+	apq8096-v2-pmi8994-pm8004-pmk8001-cdp.dtb \
+	apq8096-v2-liquid.dtb \
+	apq8096-v2-dragonboard.dtb \
+	apq8096-v2-auto-dragonboard.dtb \
+	apq8096-v3-pmi8994-cdp.dtb \
+	apq8096-v3-pmi8994-mtp.dtb \
+	apq8096-v3-pmi8994-pmk8001-cdp.dtb \
+	apq8096-v3-pmi8994-pm8004-cdp.dtb \
+	apq8096-v3-pmi8994-pm8004-pmk8001-cdp.dtb \
+	apq8096-v3-pmi8996-cdp.dtb \
+	apq8096-v3-pmi8996-mtp.dtb \
+	apq8096-v3-liquid.dtb \
+	apq8096-v3-dragonboard.dtb \
+	apq8096-v3-sbc.dtb \
+	apq8096-v3-auto-dragonboard.dtb \
+	apq8096-v3-auto-adp.dtb \
+	apq8096-v3-auto-cdp.dtb \
+	apq8096-v3.0-pmi8994-cdp.dtb \
+	apq8096-v3.0-pmi8994-mtp.dtb \
+	apq8096-v3.0-pmi8994-pm8004-cdp.dtb \
+	apq8096-v3.0-pmi8994-pm8004-pmk8001-cdp.dtb \
+	apq8096-v3.0-pmi8994-pmk8001-cdp.dtb \
+	apq8096-v3.0-pmi8996-cdp.dtb \
+	apq8096-v3.0-pmi8996-mtp.dtb \
+	apq8096-v3.0-liquid.dtb \
+	apq8096-v3.0-dragonboard.dtb \
+	apq8096-v3-pmi8994-mdm9x55-i2s-cdp.dtb \
+	apq8096-v3-pmi8994-pm8004-mdm9x55-i2s-cdp.dtb \
+	apq8096-v3-pmi8994-pm8004-pmk8001-mdm9x55-i2s-cdp.dtb \
+	apq8096-v3-pmi8994-pmk8001-mdm9x55-i2s-cdp.dtb \
+	apq8096-v3-pmi8996-mdm9x55-i2s-cdp.dtb \
+	apq8096-v3-pmi8994-mdm9x55-i2s-mtp.dtb \
+	apq8096-v3-pmi8994-mdm9x55-slimbus-mtp.dtb \
+	apq8096-v3-pmi8996-mdm9x55-i2s-mtp.dtb \
+	apq8096-v3-pmi8996-mdm9x55-slimbus-mtp.dtb \
+	apq8096-v3-pmi8996-dragonboard.dtb \
+	msm8996-auto-mizar.dtb
 
-always		:= $(dtb-y)
+dtb-$(CONFIG_MSM_GVM_QUIN) += vplatform-lfv-msm8996-telematics.dtb \
+	vplatform-lfv-msm8996-ivi.dtb \
+	vplatform-lfv-msm8996-baseline.dtb \
+	vplatform-lfv-msm8996-ivi-la.dtb \
+	vplatform-lfv-msm8996-ivi-lv-mt.dtb
+
+dtb-$(CONFIG_ARCH_MSM8998) += \
+	apq8098-v2.1-mediabox.dtb \
+	apq8098-freebox-proto.dtb \
+	apq8098-freebox-oarfish.dtb \
+	apq8098-freebox-batfish.dtb
+
+dtb-$(CONFIG_ARCH_MSMHAMSTER) += msmhamster-rumi.dtb
+
+dtb-$(CONFIG_ARCH_SDM660) += sdm660-sim.dtb \
+	sdm660-internal-codec-cdp.dtb \
+	sdm660-internal-codec-mtp.dtb \
+	sdm660-internal-codec-rcm.dtb \
+	sdm660-cdp.dtb \
+	sdm660-mtp.dtb \
+	sdm660-qrd.dtb \
+	sdm660-rcm.dtb \
+	sdm660-rumi.dtb \
+	sdm660-pm660a-cdp.dtb \
+	sdm660-pm660a-mtp.dtb \
+	sdm660-pm660a-qrd.dtb \
+	sdm660-pm660a-rcm.dtb \
+	sdm660-pm660a-rumi.dtb \
+	sdm660-internal-codec-pm660a-cdp.dtb \
+	sdm660-internal-codec-pm660a-mtp.dtb \
+	sdm660-internal-codec-pm660a-rcm.dtb \
+	sdm660-pm660a-sim.dtb \
+	sda660-cdp.dtb \
+	sda660-mtp.dtb \
+	sda660-rcm.dtb \
+	sda660-pm660a-cdp.dtb \
+	sda660-pm660a-mtp.dtb \
+	sda660-pm660a-rcm.dtb \
+	sda660-pm660a-qrd-hdk.dtb \
+	sdm660-headset-jacktype-no-cdp.dtb \
+	sdm660-headset-jacktype-no-rcm.dtb \
+	sdm660-pm660a-headset-jacktype-no-cdp.dtb \
+	sdm660-pm660a-headset-jacktype-no-rcm.dtb \
+	sdm660-usbc-audio-mtp.dtb \
+	sdm660-usbc-audio-rcm.dtb \
+	sdm660-fhd-cdp.dtb \
+	sdm660-pm660a-fhd-cdp.dtb \
+	sdm658-mtp.dtb \
+	sdm658-cdp.dtb \
+	sdm658-rcm.dtb \
+	sdm658-qrd.dtb \
+	sdm658-pm660a-mtp.dtb \
+	sdm658-pm660a-cdp.dtb \
+	sdm658-pm660a-rcm.dtb \
+	sdm658-pm660a-qrd.dtb \
+	sdm658-internal-codec-mtp.dtb \
+	sdm658-internal-codec-cdp.dtb \
+	sdm658-internal-codec-rcm.dtb \
+	sdm658-internal-codec-pm660a-mtp.dtb \
+	sdm658-internal-codec-pm660a-cdp.dtb \
+	sdm658-internal-codec-pm660a-rcm.dtb \
+	sda658-cdp.dtb \
+	sda658-mtp.dtb \
+	sda658-rcm.dtb \
+	sda658-pm660a-mtp.dtb \
+	sda658-pm660a-cdp.dtb \
+	sda658-pm660a-rcm.dtb \
+	sdm636-cdp.dtb \
+	sdm636-mtp.dtb \
+	sdm636-qrd.dtb \
+	sdm636-rcm.dtb \
+	sdm636-headset-jacktype-no-cdp.dtb \
+	sdm636-headset-jacktype-no-rcm.dtb \
+	sdm636-internal-codec-cdp.dtb \
+	sdm636-internal-codec-mtp.dtb \
+	sdm636-internal-codec-pm660a-cdp.dtb \
+	sdm636-internal-codec-pm660a-mtp.dtb \
+	sdm636-internal-codec-pm660a-rcm.dtb \
+	sdm636-internal-codec-rcm.dtb \
+	sdm636-pm660a-headset-jacktype-no-cdp.dtb \
+	sdm636-pm660a-headset-jacktype-no-rcm.dtb \
+	sdm636-pm660a-cdp.dtb \
+	sdm636-pm660a-mtp.dtb \
+	sdm636-pm660a-qrd.dtb \
+	sdm636-pm660a-rcm.dtb \
+	sdm636-usbc-audio-mtp.dtb \
+	sdm636-usbc-audio-rcm.dtb \
+	sda636-cdp.dtb \
+	sda636-mtp.dtb \
+	sda636-rcm.dtb \
+	sda636-pm660a-cdp.dtb \
+	sda636-pm660a-mtp.dtb \
+	sda636-pm660a-qrd-hdk.dtb \
+	sda636-pm660a-rcm.dtb
+
+dtb-$(CONFIG_ARCH_SDM630) += sdm630-rumi.dtb \
+	sdm630-pm660a-rumi.dtb \
+	sdm630-mtp.dtb \
+	sdm630-usbc-audio-mtp.dtb \
+	sdm630-usbc-audio-rcm.dtb \
+	sdm630-cdp.dtb \
+	sdm630-rcm.dtb \
+	sdm630-internal-codec-mtp.dtb \
+	sdm630-internal-codec-cdp.dtb \
+	sdm630-internal-codec-rcm.dtb \
+	sdm630-pm660a-cdp.dtb \
+	sdm630-pm660a-mtp.dtb \
+	sdm630-pm660a-rcm.dtb \
+	sdm630-pm660a-qrd.dtb \
+	sdm630-internal-codec-pm660a-cdp.dtb \
+	sdm630-internal-codec-pm660a-mtp.dtb \
+	sdm630-internal-codec-pm660a-rcm.dtb \
+	sda630-mtp.dtb \
+	sda630-cdp.dtb \
+	sda630-rcm.dtb \
+	sda630-pm660a-mtp.dtb \
+	sda630-pm660a-cdp.dtb \
+	sda630-pm660a-rcm.dtb \
+	sda630-pm660a-qrd-hdk.dtb \
+	sdm630-headset-jacktype-no-cdp.dtb \
+	sdm630-headset-jacktype-no-rcm.dtb \
+	sdm630-pm660a-headset-jacktype-no-cdp.dtb \
+	sdm630-pm660a-headset-jacktype-no-rcm.dtb
+
+ifeq ($(CONFIG_ARM64),y)
+always          := $(dtb-y) qcom_dtbs
+always          += $(dtbo-y)
 subdir-y	:= $(dts-dirs)
-clean-files	:= *.dtb
+else
+targets += dtbs
+targets += $(addprefix ../, $(dtb-y))
+
+$(obj)/../%.dtb: $(src)/%.dts FORCE
+	$(call if_changed_dep,dtc)
+
+dtbs: $(addprefix $(obj)/../,$(dtb-y))
+endif
+clean-files := *.dtbo *.dtb qcom_dtbs qcom_dtbs.cmpxz
+
+cmd_dtbs		= ./scripts/dtbs.sh $@ $^
+quiet_cmd_dtbs		= DTBS    $@
+
+DTB_ALIGN=32
+$(obj)/qcom_dtbs: $(addprefix $(obj)/,$(dtb-y))
+	$(call cmd,dtbs)
diff -ruw linux-4.4.115/arch/arm64/boot/Makefile linux-4.4.115-fbx/arch/arm64/boot/Makefile
--- linux-4.4.115/arch/arm64/boot/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/boot/Makefile	2019-01-22 16:16:21.523228476 +0100
@@ -16,12 +16,23 @@
 
 targets := Image Image.gz
 
+DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES))
+ifneq ($(DTB_NAMES),)
+DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES))
+DTB_OBJS := $(addprefix $(obj)/dts/,$(DTB_LIST))
+else
+DTB_OBJS := $(shell find -L $(obj)/dts/ -name \*.dtb)
+endif
+
 $(obj)/Image: vmlinux FORCE
 	$(call if_changed,objcopy)
 
 $(obj)/Image.bz2: $(obj)/Image FORCE
 	$(call if_changed,bzip2)
 
+$(obj)/Image-dtb: $(obj)/Image $(DTB_OBJS) FORCE
+	$(call if_changed,cat)
+
 $(obj)/Image.gz: $(obj)/Image FORCE
 	$(call if_changed,gzip)
 
@@ -34,6 +45,9 @@
 $(obj)/Image.lzo: $(obj)/Image FORCE
 	$(call if_changed,lzo)
 
+$(obj)/Image.gz-dtb: $(obj)/Image.gz $(DTB_OBJS) FORCE
+	$(call if_changed,cat)
+
 install: $(obj)/Image
 	$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
 	$(obj)/Image System.map "$(INSTALL_PATH)"
diff -ruw linux-4.4.115/arch/arm64/crypto/aes-glue.c linux-4.4.115-fbx/arch/arm64/crypto/aes-glue.c
--- linux-4.4.115/arch/arm64/crypto/aes-glue.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/crypto/aes-glue.c	2019-01-22 16:16:21.535228585 +0100
@@ -294,7 +294,7 @@
 	.cra_blkcipher = {
 		.min_keysize	= AES_MIN_KEY_SIZE,
 		.max_keysize	= AES_MAX_KEY_SIZE,
-		.ivsize		= AES_BLOCK_SIZE,
+		.ivsize		= 0,
 		.setkey		= aes_setkey,
 		.encrypt	= ecb_encrypt,
 		.decrypt	= ecb_decrypt,
@@ -371,7 +371,7 @@
 	.cra_ablkcipher = {
 		.min_keysize	= AES_MIN_KEY_SIZE,
 		.max_keysize	= AES_MAX_KEY_SIZE,
-		.ivsize		= AES_BLOCK_SIZE,
+		.ivsize		= 0,
 		.setkey		= ablk_set_key,
 		.encrypt	= ablk_encrypt,
 		.decrypt	= ablk_decrypt,
diff -ruw linux-4.4.115/arch/arm64/crypto/Kconfig linux-4.4.115-fbx/arch/arm64/crypto/Kconfig
--- linux-4.4.115/arch/arm64/crypto/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/crypto/Kconfig	2019-01-22 16:16:21.531228549 +0100
@@ -23,6 +23,11 @@
 	depends on ARM64 && KERNEL_MODE_NEON
 	select CRYPTO_HASH
 
+config CRYPTO_POLY_HASH_ARM64_CE
+	tristate "poly_hash (for HEH encryption mode) using ARMv8 Crypto Extensions"
+	depends on ARM64 && KERNEL_MODE_NEON
+	select CRYPTO_HASH
+
 config CRYPTO_AES_ARM64_CE
 	tristate "AES core cipher using ARMv8 Crypto Extensions"
 	depends on ARM64 && KERNEL_MODE_NEON
diff -ruw linux-4.4.115/arch/arm64/crypto/Makefile linux-4.4.115-fbx/arch/arm64/crypto/Makefile
--- linux-4.4.115/arch/arm64/crypto/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/crypto/Makefile	2019-01-22 16:16:21.531228549 +0100
@@ -17,6 +17,9 @@
 obj-$(CONFIG_CRYPTO_GHASH_ARM64_CE) += ghash-ce.o
 ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o
 
+obj-$(CONFIG_CRYPTO_POLY_HASH_ARM64_CE) += poly-hash-ce.o
+poly-hash-ce-y := poly-hash-ce-glue.o poly-hash-ce-core.o
+
 obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o
 CFLAGS_aes-ce-cipher.o += -march=armv8-a+crypto
 
diff -ruw linux-4.4.115/arch/arm64/crypto/sha1-ce-core.S linux-4.4.115-fbx/arch/arm64/crypto/sha1-ce-core.S
--- linux-4.4.115/arch/arm64/crypto/sha1-ce-core.S	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/crypto/sha1-ce-core.S	2019-01-22 16:16:21.535228585 +0100
@@ -82,7 +82,8 @@
 	ldr		dgb, [x0, #16]
 
 	/* load sha1_ce_state::finalize */
-	ldr		w4, [x0, #:lo12:sha1_ce_offsetof_finalize]
+	ldr_l		w4, sha1_ce_offsetof_finalize, x4
+	ldr		w4, [x0, x4]
 
 	/* load input */
 0:	ld1		{v8.4s-v11.4s}, [x1], #64
@@ -132,7 +133,8 @@
 	 * the padding is handled by the C code in that case.
 	 */
 	cbz		x4, 3f
-	ldr		x4, [x0, #:lo12:sha1_ce_offsetof_count]
+	ldr_l		w4, sha1_ce_offsetof_count, x4
+	ldr		x4, [x0, x4]
 	movi		v9.2d, #0
 	mov		x8, #0x80000000
 	movi		v10.2d, #0
diff -ruw linux-4.4.115/arch/arm64/crypto/sha1-ce-glue.c linux-4.4.115-fbx/arch/arm64/crypto/sha1-ce-glue.c
--- linux-4.4.115/arch/arm64/crypto/sha1-ce-glue.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/crypto/sha1-ce-glue.c	2019-10-29 09:26:22.993196817 +0100
@@ -17,9 +17,6 @@
 #include <linux/crypto.h>
 #include <linux/module.h>
 
-#define ASM_EXPORT(sym, val) \
-	asm(".globl " #sym "; .set " #sym ", %0" :: "I"(val));
-
 MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
 MODULE_LICENSE("GPL v2");
@@ -32,6 +29,9 @@
 asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
 				  int blocks);
 
+const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
+const u32 sha1_ce_offsetof_finalize = offsetof(struct sha1_ce_state, finalize);
+
 static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
 			  unsigned int len)
 {
@@ -52,11 +52,6 @@
 	struct sha1_ce_state *sctx = shash_desc_ctx(desc);
 	bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
 
-	ASM_EXPORT(sha1_ce_offsetof_count,
-		   offsetof(struct sha1_ce_state, sst.count));
-	ASM_EXPORT(sha1_ce_offsetof_finalize,
-		   offsetof(struct sha1_ce_state, finalize));
-
 	/*
 	 * Allow the asm code to perform the finalization if there is no
 	 * partial data and the input is a round multiple of the block size.
diff -ruw linux-4.4.115/arch/arm64/crypto/sha2-ce-core.S linux-4.4.115-fbx/arch/arm64/crypto/sha2-ce-core.S
--- linux-4.4.115/arch/arm64/crypto/sha2-ce-core.S	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/crypto/sha2-ce-core.S	2019-01-22 16:16:21.535228585 +0100
@@ -88,7 +88,8 @@
 	ld1		{dgav.4s, dgbv.4s}, [x0]
 
 	/* load sha256_ce_state::finalize */
-	ldr		w4, [x0, #:lo12:sha256_ce_offsetof_finalize]
+	ldr_l		w4, sha256_ce_offsetof_finalize, x4
+	ldr		w4, [x0, x4]
 
 	/* load input */
 0:	ld1		{v16.4s-v19.4s}, [x1], #64
@@ -136,7 +137,8 @@
 	 * the padding is handled by the C code in that case.
 	 */
 	cbz		x4, 3f
-	ldr		x4, [x0, #:lo12:sha256_ce_offsetof_count]
+	ldr_l		w4, sha256_ce_offsetof_count, x4
+	ldr		x4, [x0, x4]
 	movi		v17.2d, #0
 	mov		x8, #0x80000000
 	movi		v18.2d, #0
diff -ruw linux-4.4.115/arch/arm64/crypto/sha2-ce-glue.c linux-4.4.115-fbx/arch/arm64/crypto/sha2-ce-glue.c
--- linux-4.4.115/arch/arm64/crypto/sha2-ce-glue.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/crypto/sha2-ce-glue.c	2019-10-29 09:26:22.993196817 +0100
@@ -17,9 +17,6 @@
 #include <linux/crypto.h>
 #include <linux/module.h>
 
-#define ASM_EXPORT(sym, val) \
-	asm(".globl " #sym "; .set " #sym ", %0" :: "I"(val));
-
 MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
 MODULE_LICENSE("GPL v2");
@@ -32,6 +29,11 @@
 asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
 				  int blocks);
 
+const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
+					      sst.count);
+const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
+						 finalize);
+
 static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
 			    unsigned int len)
 {
@@ -52,11 +54,6 @@
 	struct sha256_ce_state *sctx = shash_desc_ctx(desc);
 	bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
 
-	ASM_EXPORT(sha256_ce_offsetof_count,
-		   offsetof(struct sha256_ce_state, sst.count));
-	ASM_EXPORT(sha256_ce_offsetof_finalize,
-		   offsetof(struct sha256_ce_state, finalize));
-
 	/*
 	 * Allow the asm code to perform the finalization if there is no
 	 * partial data and the input is a round multiple of the block size.
diff -ruw linux-4.4.115/arch/arm64/include/asm/acpi.h linux-4.4.115-fbx/arch/arm64/include/asm/acpi.h
--- linux-4.4.115/arch/arm64/include/asm/acpi.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/acpi.h	2019-01-22 16:16:21.535228585 +0100
@@ -87,9 +87,26 @@
 static inline void acpi_init_cpus(void) { }
 #endif /* CONFIG_ACPI */
 
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
+bool acpi_parking_protocol_valid(int cpu);
+void __init
+acpi_set_mailbox_entry(int cpu, struct acpi_madt_generic_interrupt *processor);
+#else
+static inline bool acpi_parking_protocol_valid(int cpu) { return false; }
+static inline void
+acpi_set_mailbox_entry(int cpu, struct acpi_madt_generic_interrupt *processor)
+{}
+#endif
+
 static inline const char *acpi_get_enable_method(int cpu)
 {
-	return acpi_psci_present() ? "psci" : NULL;
+	if (acpi_psci_present())
+		return "psci";
+
+	if (acpi_parking_protocol_valid(cpu))
+		return "parking-protocol";
+
+	return NULL;
 }
 
 #ifdef	CONFIG_ACPI_APEI
diff -ruw linux-4.4.115/arch/arm64/include/asm/alternative.h linux-4.4.115-fbx/arch/arm64/include/asm/alternative.h
--- linux-4.4.115/arch/arm64/include/asm/alternative.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/alternative.h	2019-01-22 16:16:21.535228585 +0100
@@ -1,6 +1,9 @@
 #ifndef __ASM_ALTERNATIVE_H
 #define __ASM_ALTERNATIVE_H
 
+#include <asm/cpufeature.h>
+#include <asm/insn.h>
+
 #ifndef __ASSEMBLY__
 
 #include <linux/init.h>
@@ -19,7 +22,6 @@
 
 void __init apply_alternatives_all(void);
 void apply_alternatives(void *start, size_t length);
-void free_alternatives_memory(void);
 
 #define ALTINSTR_ENTRY(feature)						      \
 	" .word 661b - .\n"				/* label           */ \
@@ -64,6 +66,8 @@
 
 #else
 
+#include <asm/assembler.h>
+
 .macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len
 	.word \orig_offset - .
 	.word \alt_offset - .
@@ -87,55 +91,147 @@
 .endm
 
 /*
- * Begin an alternative code sequence.
+ * Alternative sequences
+ *
+ * The code for the case where the capability is not present will be
+ * assembled and linked as normal. There are no restrictions on this
+ * code.
+ *
+ * The code for the case where the capability is present will be
+ * assembled into a special section to be used for dynamic patching.
+ * Code for that case must:
  *
- * The code that follows this macro will be assembled and linked as
- * normal. There are no restrictions on this code.
+ * 1. Be exactly the same length (in bytes) as the default code
+ *    sequence.
+ *
+ * 2. Not contain a branch target that is used outside of the
+ *    alternative sequence it is defined in (branches into an
+ *    alternative sequence are not fixed up).
  */
-.macro alternative_if_not cap, enable = 1
-	.if \enable
+
+/*
+ * Begin an alternative code sequence.
+ */
+.macro alternative_if_not cap
+	.set .Lasm_alt_mode, 0
 	.pushsection .altinstructions, "a"
 	altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f
 	.popsection
 661:
-	.endif
+.endm
+
+.macro alternative_if cap
+	.set .Lasm_alt_mode, 1
+	.pushsection .altinstructions, "a"
+	altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f
+	.popsection
+	.pushsection .altinstr_replacement, "ax"
+	.align 2	/* So GAS knows label 661 is suitably aligned */
+661:
 .endm
 
 /*
- * Provide the alternative code sequence.
- *
- * The code that follows this macro is assembled into a special
- * section to be used for dynamic patching. Code that follows this
- * macro must:
- *
- * 1. Be exactly the same length (in bytes) as the default code
- *    sequence.
- *
- * 2. Not contain a branch target that is used outside of the
- *    alternative sequence it is defined in (branches into an
- *    alternative sequence are not fixed up).
+ * Provide the other half of the alternative code sequence.
  */
-.macro alternative_else, enable = 1
-	.if \enable
-662:	.pushsection .altinstr_replacement, "ax"
-663:
+.macro alternative_else
+662:
+	.if .Lasm_alt_mode==0
+	.pushsection .altinstr_replacement, "ax"
+	.else
+	.popsection
 	.endif
+663:
 .endm
 
 /*
  * Complete an alternative code sequence.
  */
-.macro alternative_endif, enable = 1
-	.if \enable
-664:	.popsection
+.macro alternative_endif
+664:
+	.if .Lasm_alt_mode==0
+	.popsection
+	.endif
 	.org	. - (664b-663b) + (662b-661b)
 	.org	. - (662b-661b) + (664b-663b)
-	.endif
+.endm
+
+/*
+ * Provides a trivial alternative or default sequence consisting solely
+ * of NOPs. The number of NOPs is chosen automatically to match the
+ * previous case.
+ */
+.macro alternative_else_nop_endif
+alternative_else
+	nops	(662b-661b) / AARCH64_INSN_SIZE
+alternative_endif
 .endm
 
 #define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...)	\
 	alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
 
+.macro user_alt, label, oldinstr, newinstr, cond
+9999:	alternative_insn "\oldinstr", "\newinstr", \cond
+	_ASM_EXTABLE 9999b, \label
+.endm
+
+/*
+ * Generate the assembly for UAO alternatives with exception table entries.
+ * This is complicated as there is no post-increment or pair versions of the
+ * unprivileged instructions, and USER() only works for single instructions.
+ */
+#ifdef CONFIG_ARM64_UAO
+	.macro uao_ldp l, reg1, reg2, addr, post_inc
+		alternative_if_not ARM64_HAS_UAO
+8888:			ldp	\reg1, \reg2, [\addr], \post_inc;
+8889:			nop;
+			nop;
+		alternative_else
+			ldtr	\reg1, [\addr];
+			ldtr	\reg2, [\addr, #8];
+			add	\addr, \addr, \post_inc;
+		alternative_endif
+
+		_asm_extable	8888b,\l;
+		_asm_extable	8889b,\l;
+	.endm
+
+	.macro uao_stp l, reg1, reg2, addr, post_inc
+		alternative_if_not ARM64_HAS_UAO
+8888:			stp	\reg1, \reg2, [\addr], \post_inc;
+8889:			nop;
+			nop;
+		alternative_else
+			sttr	\reg1, [\addr];
+			sttr	\reg2, [\addr, #8];
+			add	\addr, \addr, \post_inc;
+		alternative_endif
+
+		_asm_extable	8888b,\l;
+		_asm_extable	8889b,\l;
+	.endm
+
+	.macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc
+		alternative_if_not ARM64_HAS_UAO
+8888:			\inst	\reg, [\addr], \post_inc;
+			nop;
+		alternative_else
+			\alt_inst	\reg, [\addr];
+			add		\addr, \addr, \post_inc;
+		alternative_endif
+
+		_asm_extable	8888b,\l;
+	.endm
+#else
+	.macro uao_ldp l, reg1, reg2, addr, post_inc
+		USER(\l, ldp \reg1, \reg2, [\addr], \post_inc)
+	.endm
+	.macro uao_stp l, reg1, reg2, addr, post_inc
+		USER(\l, stp \reg1, \reg2, [\addr], \post_inc)
+	.endm
+	.macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc
+		USER(\l, \inst \reg, [\addr], \post_inc)
+	.endm
+#endif
 
 #endif  /*  __ASSEMBLY__  */
 
diff -ruw linux-4.4.115/arch/arm64/include/asm/arch_gicv3.h linux-4.4.115-fbx/arch/arm64/include/asm/arch_gicv3.h
--- linux-4.4.115/arch/arm64/include/asm/arch_gicv3.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/arch_gicv3.h	2019-01-22 16:16:21.535228585 +0100
@@ -103,7 +103,8 @@
 	u64 irqstat;
 
 	asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
-	dsb(sy);
+	/* As per the architecture specification */
+	mb();
 	return irqstat;
 }
 
@@ -132,6 +133,9 @@
 static inline void gic_write_pmr(u32 val)
 {
 	asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" ((u64)val));
+	/* As per the architecture specification */
+	isb();
+	mb();
 }
 
 static inline void gic_write_ctlr(u32 val)
@@ -149,6 +153,9 @@
 static inline void gic_write_sgi1r(u64 val)
 {
 	asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
+	/* As per the architecture specification */
+	isb();
+	mb();
 }
 
 static inline u32 gic_read_sre(void)
@@ -165,8 +172,8 @@
 	isb();
 }
 
-#define gic_read_typer(c)		readq_relaxed(c)
-#define gic_write_irouter(v, c)		writeq_relaxed(v, c)
+#define gic_read_typer(c)		readq_relaxed_no_log(c)
+#define gic_write_irouter(v, c)		writeq_relaxed_no_log(v, c)
 
 #endif /* __ASSEMBLY__ */
 #endif /* __ASM_ARCH_GICV3_H */
diff -ruw linux-4.4.115/arch/arm64/include/asm/arch_timer.h linux-4.4.115-fbx/arch/arm64/include/asm/arch_timer.h
--- linux-4.4.115/arch/arm64/include/asm/arch_timer.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/arch_timer.h	2019-01-22 16:16:21.535228585 +0100
@@ -119,7 +119,14 @@
 	u64 cval;
 
 	isb();
+#if IS_ENABLED(CONFIG_MSM_TIMER_LEAP)
+#define L32_BITS	0x00000000FFFFFFFF
+	do {
 	asm volatile("mrs %0, cntvct_el0" : "=r" (cval));
+	} while ((cval & L32_BITS) == L32_BITS);
+#else
+	asm volatile("mrs %0, cntvct_el0" : "=r" (cval));
+#endif
 
 	return cval;
 }
diff -ruw linux-4.4.115/arch/arm64/include/asm/assembler.h linux-4.4.115-fbx/arch/arm64/include/asm/assembler.h
--- linux-4.4.115/arch/arm64/include/asm/assembler.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/assembler.h	2019-10-29 09:26:22.993196817 +0100
@@ -1,5 +1,5 @@
 /*
- * Based on arch/arm/include/asm/assembler.h
+ * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
  *
  * Copyright (C) 1996-2000 Russell King
  * Copyright (C) 2012 ARM Ltd.
@@ -23,6 +23,10 @@
 #ifndef __ASM_ASSEMBLER_H
 #define __ASM_ASSEMBLER_H
 
+#include <asm/asm-offsets.h>
+#include <asm/cpufeature.h>
+#include <asm/page.h>
+#include <asm/pgtable-hwdef.h>
 #include <asm/ptrace.h>
 #include <asm/thread_info.h>
 
@@ -49,6 +53,27 @@
 	msr	daifclr, #2
 	.endm
 
+	.macro	save_and_disable_irq, flags
+	mrs	\flags, daif
+	msr	daifset, #2
+	.endm
+
+	.macro	restore_irq, flags
+	msr	daif, \flags
+	.endm
+
+/*
+ * Save/disable and restore interrupts.
+ */
+	.macro	save_and_disable_irqs, olddaif
+	mrs	\olddaif, daif
+	disable_irq
+	.endm
+
+	.macro	restore_irqs, olddaif
+	msr	daif, \olddaif
+	.endm
+
 /*
  * Enable and disable debug exceptions.
  */
@@ -94,12 +119,28 @@
 	dmb	\opt
 	.endm
 
+/*
+ * NOP sequence
+ */
+	.macro	nops, num
+	.rept	\num
+	nop
+	.endr
+	.endm
+
+/*
+ * Emit an entry into the exception table
+ */
+	.macro		_asm_extable, from, to
+	.pushsection	__ex_table, "a"
+	.align		3
+	.long		(\from - .), (\to - .)
+	.popsection
+	.endm
+
 #define USER(l, x...)				\
 9999:	x;					\
-	.section __ex_table,"a";		\
-	.align	3;				\
-	.quad	9999b,l;			\
-	.previous
+	_asm_extable	9999b, l
 
 /*
  * Register aliases.
@@ -194,6 +235,133 @@
 	.endm
 
 /*
+	 * @dst: Result of per_cpu(sym, smp_processor_id())
+	 * @sym: The name of the per-cpu variable
+	 * @tmp: scratch register
+	 */
+	.macro adr_this_cpu, dst, sym, tmp
+	adr_l	\dst, \sym
+	mrs	\tmp, tpidr_el1
+	add	\dst, \dst, \tmp
+	.endm
+
+	/*
+	 * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
+	 * @sym: The name of the per-cpu variable
+	 * @tmp: scratch register
+	 */
+	.macro ldr_this_cpu dst, sym, tmp
+	adr_l	\dst, \sym
+	mrs	\tmp, tpidr_el1
+	ldr	\dst, [\dst, \tmp]
+	.endm
+
+/*
+ * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
+ */
+	.macro	vma_vm_mm, rd, rn
+	ldr	\rd, [\rn, #VMA_VM_MM]
+	.endm
+
+/*
+ * mmid - get context id from mm pointer (mm->context.id)
+ */
+	.macro	mmid, rd, rn
+	ldr	\rd, [\rn, #MM_CONTEXT_ID]
+	.endm
+
+/*
+ * dcache_line_size - get the minimum D-cache line size from the CTR register.
+ */
+	.macro	dcache_line_size, reg, tmp
+	mrs	\tmp, ctr_el0			// read CTR
+	ubfm	\tmp, \tmp, #16, #19		// cache line size encoding
+	mov	\reg, #4			// bytes per word
+	lsl	\reg, \reg, \tmp		// actual cache line size
+	.endm
+
+/*
+ * icache_line_size - get the minimum I-cache line size from the CTR register.
+ */
+	.macro	icache_line_size, reg, tmp
+	mrs	\tmp, ctr_el0			// read CTR
+	and	\tmp, \tmp, #0xf		// cache line size encoding
+	mov	\reg, #4			// bytes per word
+	lsl	\reg, \reg, \tmp		// actual cache line size
+	.endm
+
+/*
+ * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
+ */
+	.macro	tcr_set_idmap_t0sz, valreg, tmpreg
+#ifndef CONFIG_ARM64_VA_BITS_48
+	ldr_l	\tmpreg, idmap_t0sz
+	bfi	\valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
+#endif
+	.endm
+
+/*
+ * Macro to perform a data cache maintenance for the interval
+ * [kaddr, kaddr + size)
+ *
+ * 	op:		operation passed to dc instruction
+ * 	domain:		domain used in dsb instruciton
+ * 	kaddr:		starting virtual address of the region
+ * 	size:		size of the region
+ * 	Corrupts:	kaddr, size, tmp1, tmp2
+ */
+	.macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
+	dcache_line_size \tmp1, \tmp2
+	add	\size, \kaddr, \size
+	sub	\tmp2, \tmp1, #1
+	bic	\kaddr, \kaddr, \tmp2
+9998:
+	.if	(\op == cvau || \op == cvac)
+alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
+	dc	\op, \kaddr
+alternative_else
+	dc	civac, \kaddr
+alternative_endif
+	.else
+	dc	\op, \kaddr
+	.endif
+	add	\kaddr, \kaddr, \tmp1
+	cmp	\kaddr, \size
+	b.lo	9998b
+	dsb	\domain
+	.endm
+
+/*
+ * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
+ */
+	.macro	reset_pmuserenr_el0, tmpreg
+	mrs	\tmpreg, id_aa64dfr0_el1	// Check ID_AA64DFR0_EL1 PMUVer
+	sbfx	\tmpreg, \tmpreg, #8, #4
+	cmp	\tmpreg, #1			// Skip if no PMU present
+	b.lt	9000f
+	msr	pmuserenr_el0, xzr		// Disable PMU access from EL0
+9000:
+	.endm
+
+/*
+ * copy_page - copy src to dest using temp registers t1-t8
+ */
+	.macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
+9998:	ldp	\t1, \t2, [\src]
+	ldp	\t3, \t4, [\src, #16]
+	ldp	\t5, \t6, [\src, #32]
+	ldp	\t7, \t8, [\src, #48]
+	add	\src, \src, #64
+	stnp	\t1, \t2, [\dest]
+	stnp	\t3, \t4, [\dest, #16]
+	stnp	\t5, \t6, [\dest, #32]
+	stnp	\t7, \t8, [\dest, #48]
+	add	\dest, \dest, #64
+	tst	\src, #(PAGE_SIZE - 1)
+	b.ne	9998b
+	.endm
+
+/*
  * Annotate a function as position independent, i.e., safe to be called before
  * the kernel virtual mapping is activated.
  */
@@ -204,4 +372,42 @@
 	.size	__pi_##x, . - x;	\
 	ENDPROC(x)
 
+	/*
+	 * Emit a 64-bit absolute little endian symbol reference in a way that
+	 * ensures that it will be resolved at build time, even when building a
+	 * PIE binary. This requires cooperation from the linker script, which
+	 * must emit the lo32/hi32 halves individually.
+	 */
+	.macro	le64sym, sym
+	.long	\sym\()_lo32
+	.long	\sym\()_hi32
+	.endm
+
+	/*
+	 * mov_q - move an immediate constant into a 64-bit register using
+	 *         between 2 and 4 movz/movk instructions (depending on the
+	 *         magnitude and sign of the operand)
+	 */
+	.macro	mov_q, reg, val
+	.if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
+	movz	\reg, :abs_g1_s:\val
+	.else
+	.if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
+	movz	\reg, :abs_g2_s:\val
+	.else
+	movz	\reg, :abs_g3:\val
+	movk	\reg, :abs_g2_nc:\val
+	.endif
+	movk	\reg, :abs_g1_nc:\val
+	.endif
+	movk	\reg, :abs_g0_nc:\val
+	.endm
+
+/*
+ * Return the current thread_info.
+ */
+	.macro	get_thread_info, rd
+	mrs	\rd, sp_el0
+	.endm
+
 #endif	/* __ASM_ASSEMBLER_H */
diff -ruw linux-4.4.115/arch/arm64/include/asm/barrier.h linux-4.4.115-fbx/arch/arm64/include/asm/barrier.h
--- linux-4.4.115/arch/arm64/include/asm/barrier.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/barrier.h	2019-01-22 16:16:21.535228585 +0100
@@ -20,6 +20,9 @@
 
 #ifndef __ASSEMBLY__
 
+#define __nops(n)	".rept	" #n "\nnop\n.endr\n"
+#define nops(n)		asm volatile(__nops(n))
+
 #define sev()		asm volatile("sev" : : : "memory")
 #define wfe()		asm volatile("wfe" : : : "memory")
 #define wfi()		asm volatile("wfi" : : : "memory")
diff -ruw linux-4.4.115/arch/arm64/include/asm/boot.h linux-4.4.115-fbx/arch/arm64/include/asm/boot.h
--- linux-4.4.115/arch/arm64/include/asm/boot.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/boot.h	2019-01-22 16:16:21.535228585 +0100
@@ -11,4 +11,10 @@
 #define MIN_FDT_ALIGN		8
 #define MAX_FDT_SIZE		SZ_2M
 
+/*
+ * arm64 requires the kernel image to placed
+ * TEXT_OFFSET bytes beyond a 2 MB aligned base
+ */
+#define MIN_KIMG_ALIGN		SZ_2M
+
 #endif
diff -ruw linux-4.4.115/arch/arm64/include/asm/bug.h linux-4.4.115-fbx/arch/arm64/include/asm/bug.h
--- linux-4.4.115/arch/arm64/include/asm/bug.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/bug.h	2019-10-29 09:26:22.993196817 +0100
@@ -18,7 +18,7 @@
 #ifndef _ARCH_ARM64_ASM_BUG_H
 #define _ARCH_ARM64_ASM_BUG_H
 
-#include <asm/debug-monitors.h>
+#include <asm/brk-imm.h>
 
 #ifdef CONFIG_GENERIC_BUG
 #define HAVE_ARCH_BUG
diff -ruw linux-4.4.115/arch/arm64/include/asm/cacheflush.h linux-4.4.115-fbx/arch/arm64/include/asm/cacheflush.h
--- linux-4.4.115/arch/arm64/include/asm/cacheflush.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/cacheflush.h	2019-01-22 16:16:21.535228585 +0100
@@ -40,6 +40,10 @@
  *	the implementation assumes non-aliasing VIPT D-cache and (aliasing)
  *	VIPT or ASID-tagged VIVT I-cache.
  *
+ *	flush_cache_all()
+ *
+ *		Unconditionally clean and invalidate the entire cache.
+ *
  *	flush_cache_mm(mm)
  *
  *		Clean and invalidate all user space cache entries
@@ -65,9 +69,11 @@
  *		- kaddr  - page address
  *		- size   - region size
  */
+extern void flush_cache_all(void);
 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
 extern void flush_icache_range(unsigned long start, unsigned long end);
 extern void __flush_dcache_area(void *addr, size_t len);
+extern void __clean_dcache_area_pou(void *addr, size_t len);
 extern long __flush_cache_user_range(unsigned long start, unsigned long end);
 
 static inline void flush_cache_mm(struct mm_struct *mm)
@@ -85,6 +91,12 @@
 extern void __dma_map_area(const void *, size_t, int);
 extern void __dma_unmap_area(const void *, size_t, int);
 extern void __dma_flush_range(const void *, const void *);
+extern void __dma_inv_range(const void *, const void *);
+extern void __dma_clean_range(const void *, const void *);
+
+#define dmac_flush_range __dma_flush_range
+#define dmac_inv_range __dma_inv_range
+#define dmac_clean_range __dma_clean_range
 
 /*
  * Copy user data from/to a page which is mapped into a different
@@ -154,9 +166,21 @@
 int set_memory_rw(unsigned long addr, int numpages);
 int set_memory_x(unsigned long addr, int numpages);
 int set_memory_nx(unsigned long addr, int numpages);
+#ifdef CONFIG_KERNEL_TEXT_RDONLY
+void set_kernel_text_ro(void);
+#else
+static inline void set_kernel_text_ro(void) { }
+#endif
 
 #ifdef CONFIG_DEBUG_RODATA
 void mark_rodata_ro(void);
 #endif
 
+#ifdef CONFIG_FREE_PAGES_RDONLY
+#define mark_addr_rdonly(a)	set_memory_ro((unsigned long)a, 1);
+#define mark_addr_rdwrite(a)	set_memory_rw((unsigned long)a, 1);
+#else
+#define mark_addr_rdonly(a)
+#define mark_addr_rdwrite(a)
+#endif
 #endif
diff -ruw linux-4.4.115/arch/arm64/include/asm/cache.h linux-4.4.115-fbx/arch/arm64/include/asm/cache.h
--- linux-4.4.115/arch/arm64/include/asm/cache.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/cache.h	2019-01-22 16:16:21.535228585 +0100
@@ -18,17 +18,17 @@
 
 #include <asm/cachetype.h>
 
-#define L1_CACHE_SHIFT		7
+#define L1_CACHE_SHIFT		6
 #define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
 
 /*
  * Memory returned by kmalloc() may be used for DMA, so we must make
- * sure that all such allocations are cache aligned. Otherwise,
- * unrelated code may cause parts of the buffer to be read into the
- * cache before the transfer is done, causing old data to be seen by
- * the CPU.
+ * sure that all such allocations are aligned to the maximum *known*
+ * cache line size on ARMv8 systems. Otherwise, unrelated code may
+ * cause parts of the buffer to be read into the cache before the
+ * transfer is done, causing old data to be seen by the CPU.
  */
-#define ARCH_DMA_MINALIGN	L1_CACHE_BYTES
+#define ARCH_DMA_MINALIGN	(128)
 
 #ifndef __ASSEMBLY__
 
diff -ruw linux-4.4.115/arch/arm64/include/asm/cmpxchg.h linux-4.4.115-fbx/arch/arm64/include/asm/cmpxchg.h
--- linux-4.4.115/arch/arm64/include/asm/cmpxchg.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/cmpxchg.h	2019-01-22 16:16:21.535228585 +0100
@@ -19,7 +19,6 @@
 #define __ASM_CMPXCHG_H
 
 #include <linux/bug.h>
-#include <linux/mmdebug.h>
 
 #include <asm/atomic.h>
 #include <asm/barrier.h>
diff -ruw linux-4.4.115/arch/arm64/include/asm/cpufeature.h linux-4.4.115-fbx/arch/arm64/include/asm/cpufeature.h
--- linux-4.4.115/arch/arm64/include/asm/cpufeature.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/cpufeature.h	2019-10-29 09:26:22.997196856 +0100
@@ -30,14 +30,22 @@
 #define ARM64_HAS_LSE_ATOMICS			5
 #define ARM64_WORKAROUND_CAVIUM_23154		6
 #define ARM64_WORKAROUND_834220			7
-#define ARM64_WORKAROUND_CAVIUM_27456		8
-
-#define ARM64_NCAPS				9
+#define ARM64_HAS_NO_HW_PREFETCH		8
+#define ARM64_HAS_UAO				9
+#define ARM64_ALT_PAN_NOT_UAO			10
+
+#define ARM64_WORKAROUND_CAVIUM_27456		11
+#define ARM64_HAS_VIRT_HOST_EXTN		12
+#define ARM64_HARDEN_BRANCH_PREDICTOR		13
+#define ARM64_UNMAP_KERNEL_AT_EL0		14
+#define ARM64_NCAPS				15
 
 #ifndef __ASSEMBLY__
 
 #include <linux/kernel.h>
 
+extern const char *machine_name;
+
 /* CPU feature register tracking */
 enum ftr_type {
 	FTR_EXACT,	/* Use a predefined safe value */
@@ -163,7 +171,9 @@
 
 void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
 			    const char *info);
+void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps);
 void check_local_cpu_errata(void);
+void __init enable_errata_workarounds(void);
 
 #ifdef CONFIG_HOTPLUG_CPU
 void verify_local_cpu_capabilities(void);
@@ -177,7 +187,7 @@
 
 static inline bool cpu_supports_mixed_endian_el0(void)
 {
-	return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
+	return id_aa64mmfr0_mixed_endian_el0(read_cpuid(SYS_ID_AA64MMFR0_EL1));
 }
 
 static inline bool system_supports_mixed_endian_el0(void)
@@ -185,6 +195,12 @@
 	return id_aa64mmfr0_mixed_endian_el0(read_system_reg(SYS_ID_AA64MMFR0_EL1));
 }
 
+static inline bool system_uses_ttbr0_pan(void)
+{
+	return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
+		!cpus_have_cap(ARM64_HAS_PAN);
+}
+
 #endif /* __ASSEMBLY__ */
 
 #endif
diff -ruw linux-4.4.115/arch/arm64/include/asm/cpu.h linux-4.4.115-fbx/arch/arm64/include/asm/cpu.h
--- linux-4.4.115/arch/arm64/include/asm/cpu.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/cpu.h	2019-01-22 16:16:21.535228585 +0100
@@ -36,6 +36,7 @@
 	u64		reg_id_aa64isar1;
 	u64		reg_id_aa64mmfr0;
 	u64		reg_id_aa64mmfr1;
+	u64		reg_id_aa64mmfr2;
 	u64		reg_id_aa64pfr0;
 	u64		reg_id_aa64pfr1;
 
diff -ruw linux-4.4.115/arch/arm64/include/asm/cputype.h linux-4.4.115-fbx/arch/arm64/include/asm/cputype.h
--- linux-4.4.115/arch/arm64/include/asm/cputype.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/cputype.h	2019-10-29 09:26:22.997196856 +0100
@@ -32,11 +32,9 @@
 #define MPIDR_AFFINITY_LEVEL(mpidr, level) \
 	((mpidr >> MPIDR_LEVEL_SHIFT(level)) & MPIDR_LEVEL_MASK)
 
-#define read_cpuid(reg) ({						\
-	u64 __val;							\
-	asm("mrs	%0, " #reg : "=r" (__val));			\
-	__val;								\
-})
+#define MMFR0_16KGRAN_SIZE	15
+#define MMFR0_16KGRAN_SHFT	20
+#define MMFR0_EL1_16KGRAN_MASK	(MMFR0_16KGRAN_SIZE << MMFR0_16KGRAN_SHFT)
 
 #define MIDR_REVISION_MASK	0xf
 #define MIDR_REVISION(midr)	((midr) & MIDR_REVISION_MASK)
@@ -57,26 +55,65 @@
 #define MIDR_IMPLEMENTOR(midr)	\
 	(((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
 
-#define MIDR_CPU_PART(imp, partnum) \
+#define MIDR_CPU_MODEL(imp, partnum) \
 	(((imp)			<< MIDR_IMPLEMENTOR_SHIFT) | \
 	(0xf			<< MIDR_ARCHITECTURE_SHIFT) | \
 	((partnum)		<< MIDR_PARTNUM_SHIFT))
 
+#define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
+			     MIDR_ARCHITECTURE_MASK)
+
+#define MIDR_IS_CPU_MODEL_RANGE(midr, model, rv_min, rv_max)		\
+({									\
+	u32 _model = (midr) & MIDR_CPU_MODEL_MASK;			\
+	u32 rv = (midr) & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK);	\
+									\
+	_model == (model) && rv >= (rv_min) && rv <= (rv_max);		\
+ })
+
 #define ARM_CPU_IMP_ARM			0x41
 #define ARM_CPU_IMP_APM			0x50
 #define ARM_CPU_IMP_CAVIUM		0x43
+#define ARM_CPU_IMP_QCOM	0x51
 
 #define ARM_CPU_PART_AEM_V8		0xD0F
 #define ARM_CPU_PART_FOUNDATION		0xD00
 #define ARM_CPU_PART_CORTEX_A57		0xD07
+#define ARM_CPU_PART_CORTEX_A72		0xD08
 #define ARM_CPU_PART_CORTEX_A53		0xD03
+#define ARM_CPU_PART_CORTEX_A72		0xD08
+#define ARM_CPU_PART_CORTEX_A73		0xD09
+#define ARM_CPU_PART_CORTEX_A75		0xD0A
+#define ARM_CPU_PART_KRYO2XX_GOLD	0x800
+#define ARM_CPU_PART_KRYO2XX_SILVER	0x801
+#define	QCOM_CPU_PART_KRYO		0x200
 
 #define APM_CPU_PART_POTENZA		0x000
 
 #define CAVIUM_CPU_PART_THUNDERX	0x0A1
 
+#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
+#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
+#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
+#define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73)
+#define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75)
+#define MIDR_THUNDERX	MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+#define MIDR_KRYO2XX_SILVER \
+	MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, ARM_CPU_PART_KRYO2XX_SILVER)
+#define MIDR_KRYO2XX_GOLD \
+	MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, ARM_CPU_PART_KRYO2XX_GOLD)
+#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
+
 #ifndef __ASSEMBLY__
 
+#include <asm/sysreg.h>
+
+#define read_cpuid(reg) ({						\
+	u64 __val;							\
+	asm("mrs_s	%0, " __stringify(reg) : "=r" (__val));		\
+	__val;								\
+})
+
 /*
  * The CPU ID never changes at run time, so we might as well tell the
  * compiler that it's constant.  Use this function to read the CPU ID
@@ -84,12 +121,12 @@
  */
 static inline u32 __attribute_const__ read_cpuid_id(void)
 {
-	return read_cpuid(MIDR_EL1);
+	return read_cpuid(SYS_MIDR_EL1);
 }
 
 static inline u64 __attribute_const__ read_cpuid_mpidr(void)
 {
-	return read_cpuid(MPIDR_EL1);
+	return read_cpuid(SYS_MPIDR_EL1);
 }
 
 static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
@@ -104,7 +141,7 @@
 
 static inline u32 __attribute_const__ read_cpuid_cachetype(void)
 {
-	return read_cpuid(CTR_EL0);
+	return read_cpuid(SYS_CTR_EL0);
 }
 #endif /* __ASSEMBLY__ */
 
diff -ruw linux-4.4.115/arch/arm64/include/asm/debug-monitors.h linux-4.4.115-fbx/arch/arm64/include/asm/debug-monitors.h
--- linux-4.4.115/arch/arm64/include/asm/debug-monitors.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/debug-monitors.h	2019-01-22 16:16:21.539228621 +0100
@@ -20,6 +20,7 @@
 
 #include <linux/errno.h>
 #include <linux/types.h>
+#include <asm/brk-imm.h>
 #include <asm/esr.h>
 #include <asm/insn.h>
 #include <asm/ptrace.h>
@@ -47,19 +48,6 @@
 #define BREAK_INSTR_SIZE		AARCH64_INSN_SIZE
 
 /*
- * #imm16 values used for BRK instruction generation
- * Allowed values for kgbd are 0x400 - 0x7ff
- * 0x100: for triggering a fault on purpose (reserved)
- * 0x400: for dynamic BRK instruction
- * 0x401: for compile time BRK instruction
- * 0x800: kernel-mode BUG() and WARN() traps
- */
-#define FAULT_BRK_IMM			0x100
-#define KGDB_DYN_DBG_BRK_IMM		0x400
-#define KGDB_COMPILED_DBG_BRK_IMM	0x401
-#define BUG_BRK_IMM			0x800
-
-/*
  * BRK instruction encoding
  * The #imm16 value should be placed at bits[20:5] within BRK ins
  */
@@ -78,6 +66,11 @@
 
 #define CACHE_FLUSH_IS_SAFE		1
 
+/* kprobes BRK opcodes with ESR encoding  */
+#define BRK64_ESR_MASK		0xFFFF
+#define BRK64_ESR_KPROBES	0x0004
+#define BRK64_OPCODE_KPROBES	(AARCH64_BREAK_MON | (BRK64_ESR_KPROBES << 5))
+
 /* AArch32 */
 #define DBG_ESR_EVT_BKPT	0x4
 #define DBG_ESR_EVT_VECC	0x5
diff -ruw linux-4.4.115/arch/arm64/include/asm/device.h linux-4.4.115-fbx/arch/arm64/include/asm/device.h
--- linux-4.4.115/arch/arm64/include/asm/device.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/device.h	2019-01-22 16:16:21.539228621 +0100
@@ -17,14 +17,24 @@
 #define __ASM_DEVICE_H
 
 struct dev_archdata {
-	struct dma_map_ops *dma_ops;
+	const struct dma_map_ops *dma_ops;
 #ifdef CONFIG_IOMMU_API
 	void *iommu;			/* private IOMMU data */
 #endif
 	bool dma_coherent;
+#ifdef CONFIG_ARM64_DMA_USE_IOMMU
+	struct dma_iommu_mapping	*mapping;
+#endif
 };
 
 struct pdev_archdata {
+	u64 dma_mask;
 };
 
+#ifdef CONFIG_ARM64_DMA_USE_IOMMU
+#define to_dma_iommu_mapping(dev) ((dev)->archdata.mapping)
+#else
+#define to_dma_iommu_mapping(dev) NULL
+#endif
+
 #endif
diff -ruw linux-4.4.115/arch/arm64/include/asm/dma-mapping.h linux-4.4.115-fbx/arch/arm64/include/asm/dma-mapping.h
--- linux-4.4.115/arch/arm64/include/asm/dma-mapping.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/dma-mapping.h	2019-01-22 16:16:21.539228621 +0100
@@ -27,7 +27,7 @@
 #define DMA_ERROR_CODE	(~(dma_addr_t)0)
 extern struct dma_map_ops dummy_dma_ops;
 
-static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
 {
 	if (dev && dev->archdata.dma_ops)
 		return dev->archdata.dma_ops;
@@ -39,7 +39,7 @@
 	return &dummy_dma_ops;
 }
 
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
 {
 	if (xen_initial_domain())
 		return xen_dma_ops;
@@ -47,6 +47,12 @@
 		return __generic_dma_ops(dev);
 }
 
+static inline void set_dma_ops(struct device *dev,
+			const struct dma_map_ops *dma_ops)
+{
+	dev->archdata.dma_ops = dma_ops;
+}
+
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 			struct iommu_ops *iommu, bool coherent);
 #define arch_setup_dma_ops	arch_setup_dma_ops
diff -ruw linux-4.4.115/arch/arm64/include/asm/efi.h linux-4.4.115-fbx/arch/arm64/include/asm/efi.h
--- linux-4.4.115/arch/arm64/include/asm/efi.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/efi.h	2019-01-22 16:16:21.539228621 +0100
@@ -1,8 +1,11 @@
 #ifndef _ASM_EFI_H
 #define _ASM_EFI_H
 
+#include <asm/cpufeature.h>
 #include <asm/io.h>
+#include <asm/mmu_context.h>
 #include <asm/neon.h>
+#include <asm/tlbflush.h>
 
 #ifdef CONFIG_EFI
 extern void efi_init(void);
@@ -10,6 +13,8 @@
 #define efi_init()
 #endif
 
+int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
+
 #define efi_call_virt(f, ...)						\
 ({									\
 	efi_##f##_t *__f;						\
@@ -63,6 +68,34 @@
  *   Services are enabled and the EFI_RUNTIME_SERVICES bit set.
  */
 
+static inline void efi_set_pgd(struct mm_struct *mm)
+{
+	__switch_mm(mm);
+
+	if (system_uses_ttbr0_pan()) {
+		if (mm != current->active_mm) {
+			/*
+			 * Update the current thread's saved ttbr0 since it is
+			 * restored as part of a return from exception. Enable
+			 * access to the valid TTBR0_EL1 and invoke the errata
+			 * workaround directly since there is no return from
+			 * exception when invoking the EFI run-time services.
+			 */
+			update_saved_ttbr0(current, mm);
+			uaccess_ttbr0_enable();
+			post_ttbr_update_workaround();
+		} else {
+			/*
+			 * Defer the switch to the current thread's TTBR0_EL1
+			 * until uaccess_enable(). Restore the current
+			 * thread's saved ttbr0 corresponding to its active_mm
+			 */
+			uaccess_ttbr0_disable();
+			update_saved_ttbr0(current, current->active_mm);
+		}
+	}
+}
+
 void efi_virtmap_load(void);
 void efi_virtmap_unload(void);
 
diff -ruw linux-4.4.115/arch/arm64/include/asm/elf.h linux-4.4.115-fbx/arch/arm64/include/asm/elf.h
--- linux-4.4.115/arch/arm64/include/asm/elf.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/elf.h	2019-01-22 16:16:21.539228621 +0100
@@ -23,15 +23,7 @@
  */
 #include <asm/ptrace.h>
 #include <asm/user.h>
-
-typedef unsigned long elf_greg_t;
-
-#define ELF_NGREG (sizeof(struct user_pt_regs) / sizeof(elf_greg_t))
-#define ELF_CORE_COPY_REGS(dest, regs)	\
-	*(struct user_pt_regs *)&(dest) = (regs)->user_regs;
-
-typedef elf_greg_t elf_gregset_t[ELF_NGREG];
-typedef struct user_fpsimd_state elf_fpregset_t;
+#include <asm/fpsimd.h>
 
 /*
  * AArch64 static relocation types.
@@ -86,6 +78,8 @@
 #define R_AARCH64_MOVW_PREL_G2_NC	292
 #define R_AARCH64_MOVW_PREL_G3		293
 
+#define R_AARCH64_RELATIVE		1027
+
 /*
  * These are used to set parameters in the core dumps.
  */
@@ -126,6 +120,17 @@
  */
 #define ELF_ET_DYN_BASE		(2 * TASK_SIZE_64 / 3)
 
+#ifndef __ASSEMBLY__
+
+typedef unsigned long elf_greg_t;
+
+#define ELF_NGREG (sizeof(struct user_pt_regs) / sizeof(elf_greg_t))
+#define ELF_CORE_COPY_REGS(dest, regs)	\
+	*(struct user_pt_regs *)&(dest) = (regs)->user_regs;
+
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+typedef struct user_fpsimd_state elf_fpregset_t;
+
 /*
  * When the program starts, a1 contains a pointer to a function to be
  * registered with atexit, as per the SVR4 ABI.  A value of 0 means we have no
@@ -165,7 +170,7 @@
 #ifdef CONFIG_COMPAT
 
 /* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */
-#define COMPAT_ELF_ET_DYN_BASE		0x000400000UL
+#define COMPAT_ELF_ET_DYN_BASE		(2 * TASK_SIZE_32 / 3)
 
 /* AArch32 registers. */
 #define COMPAT_ELF_NGREG		18
@@ -178,7 +183,11 @@
 					 ((x)->e_flags & EF_ARM_EABI_MASK))
 
 #define compat_start_thread		compat_start_thread
-#define COMPAT_SET_PERSONALITY(ex)	set_thread_flag(TIF_32BIT);
+#define COMPAT_SET_PERSONALITY(ex)					\
+do {									\
+	set_thread_flag(TIF_32BIT);					\
+} while (0)
+
 #define COMPAT_ARCH_DLINFO
 extern int aarch32_setup_vectors_page(struct linux_binprm *bprm,
 				      int uses_interp);
@@ -187,4 +196,6 @@
 
 #endif /* CONFIG_COMPAT */
 
+#endif /* !__ASSEMBLY__ */
+
 #endif
diff -ruw linux-4.4.115/arch/arm64/include/asm/esr.h linux-4.4.115-fbx/arch/arm64/include/asm/esr.h
--- linux-4.4.115/arch/arm64/include/asm/esr.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/esr.h	2019-01-22 16:16:21.539228621 +0100
@@ -74,6 +74,7 @@
 
 #define ESR_ELx_EC_SHIFT	(26)
 #define ESR_ELx_EC_MASK		(UL(0x3F) << ESR_ELx_EC_SHIFT)
+#define ESR_ELx_EC(esr)		(((esr) & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT)
 
 #define ESR_ELx_IL		(UL(1) << 25)
 #define ESR_ELx_ISS_MASK	(ESR_ELx_IL - 1)
@@ -108,6 +109,46 @@
 	((ESR_ELx_EC_BRK64 << ESR_ELx_EC_SHIFT) | ESR_ELx_IL |	\
 	 ((imm) & 0xffff))
 
+/* ISS field definitions for System instruction traps */
+#define ESR_ELx_SYS64_ISS_RES0_SHIFT	22
+#define ESR_ELx_SYS64_ISS_RES0_MASK	(UL(0x7) << ESR_ELx_SYS64_ISS_RES0_SHIFT)
+#define ESR_ELx_SYS64_ISS_DIR_MASK	0x1
+#define ESR_ELx_SYS64_ISS_DIR_READ	0x1
+#define ESR_ELx_SYS64_ISS_DIR_WRITE	0x0
+
+#define ESR_ELx_SYS64_ISS_RT_SHIFT	5
+#define ESR_ELx_SYS64_ISS_RT_MASK	(UL(0x1f) << ESR_ELx_SYS64_ISS_RT_SHIFT)
+#define ESR_ELx_SYS64_ISS_CRM_SHIFT	1
+#define ESR_ELx_SYS64_ISS_CRM_MASK	(UL(0xf) << ESR_ELx_SYS64_ISS_CRM_SHIFT)
+#define ESR_ELx_SYS64_ISS_CRN_SHIFT	10
+#define ESR_ELx_SYS64_ISS_CRN_MASK	(UL(0xf) << ESR_ELx_SYS64_ISS_CRN_SHIFT)
+#define ESR_ELx_SYS64_ISS_OP1_SHIFT	14
+#define ESR_ELx_SYS64_ISS_OP1_MASK	(UL(0x7) << ESR_ELx_SYS64_ISS_OP1_SHIFT)
+#define ESR_ELx_SYS64_ISS_OP2_SHIFT	17
+#define ESR_ELx_SYS64_ISS_OP2_MASK	(UL(0x7) << ESR_ELx_SYS64_ISS_OP2_SHIFT)
+#define ESR_ELx_SYS64_ISS_OP0_SHIFT	20
+#define ESR_ELx_SYS64_ISS_OP0_MASK	(UL(0x3) << ESR_ELx_SYS64_ISS_OP0_SHIFT)
+#define ESR_ELx_SYS64_ISS_SYS_MASK	(ESR_ELx_SYS64_ISS_OP0_MASK | \
+					 ESR_ELx_SYS64_ISS_OP1_MASK | \
+					 ESR_ELx_SYS64_ISS_OP2_MASK | \
+					 ESR_ELx_SYS64_ISS_CRN_MASK | \
+					 ESR_ELx_SYS64_ISS_CRM_MASK)
+#define ESR_ELx_SYS64_ISS_SYS_VAL(op0, op1, op2, crn, crm) \
+					(((op0) << ESR_ELx_SYS64_ISS_OP0_SHIFT) | \
+					 ((op1) << ESR_ELx_SYS64_ISS_OP1_SHIFT) | \
+					 ((op2) << ESR_ELx_SYS64_ISS_OP2_SHIFT) | \
+					 ((crn) << ESR_ELx_SYS64_ISS_CRN_SHIFT) | \
+					 ((crm) << ESR_ELx_SYS64_ISS_CRM_SHIFT))
+
+#define ESR_ELx_SYS64_ISS_SYS_OP_MASK	(ESR_ELx_SYS64_ISS_SYS_MASK | \
+					 ESR_ELx_SYS64_ISS_DIR_MASK)
+
+#define ESR_ELx_SYS64_ISS_SYS_CNTVCT	(ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 2, 14, 0) | \
+					 ESR_ELx_SYS64_ISS_DIR_READ)
+
+#define ESR_ELx_SYS64_ISS_SYS_CNTFRQ	(ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 0, 14, 0) | \
+					 ESR_ELx_SYS64_ISS_DIR_READ)
+
 #ifndef __ASSEMBLY__
 #include <asm/types.h>
 
diff -ruw linux-4.4.115/arch/arm64/include/asm/exception.h linux-4.4.115-fbx/arch/arm64/include/asm/exception.h
--- linux-4.4.115/arch/arm64/include/asm/exception.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/exception.h	2019-01-22 16:16:21.539228621 +0100
@@ -18,7 +18,7 @@
 #ifndef __ASM_EXCEPTION_H
 #define __ASM_EXCEPTION_H
 
-#include <linux/ftrace.h>
+#include <linux/interrupt.h>
 
 #define __exception	__attribute__((section(".exception.text")))
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff -ruw linux-4.4.115/arch/arm64/include/asm/exec.h linux-4.4.115-fbx/arch/arm64/include/asm/exec.h
--- linux-4.4.115/arch/arm64/include/asm/exec.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/exec.h	2019-01-22 16:16:21.539228621 +0100
@@ -18,6 +18,9 @@
 #ifndef __ASM_EXEC_H
 #define __ASM_EXEC_H
 
+#include <linux/sched.h>
+
 extern unsigned long arch_align_stack(unsigned long sp);
+void uao_thread_switch(struct task_struct *next);
 
 #endif	/* __ASM_EXEC_H */
diff -ruw linux-4.4.115/arch/arm64/include/asm/fixmap.h linux-4.4.115-fbx/arch/arm64/include/asm/fixmap.h
--- linux-4.4.115/arch/arm64/include/asm/fixmap.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/fixmap.h	2019-01-22 16:16:21.539228621 +0100
@@ -50,6 +50,11 @@
 
 	FIX_EARLYCON_MEM_BASE,
 	FIX_TEXT_POKE0,
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+	FIX_ENTRY_TRAMP_DATA,
+	FIX_ENTRY_TRAMP_TEXT,
+#define TRAMP_VALIAS		(__fix_to_virt(FIX_ENTRY_TRAMP_TEXT))
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
 	__end_of_permanent_fixed_addresses,
 
 	/*
@@ -62,6 +67,16 @@
 
 	FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
 	FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
+
+	/*
+	 * Used for kernel page table creation, so unmapped memory may be used
+	 * for tables.
+	 */
+	FIX_PTE,
+	FIX_PMD,
+	FIX_PUD,
+	FIX_PGD,
+
 	__end_of_fixed_addresses
 };
 
diff -ruw linux-4.4.115/arch/arm64/include/asm/fpsimd.h linux-4.4.115-fbx/arch/arm64/include/asm/fpsimd.h
--- linux-4.4.115/arch/arm64/include/asm/fpsimd.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/fpsimd.h	2019-01-22 16:16:21.539228621 +0100
@@ -81,6 +81,14 @@
 				      u32 num_regs);
 extern void fpsimd_load_partial_state(struct fpsimd_partial_state *state);
 
+#ifdef CONFIG_ENABLE_FP_SIMD_SETTINGS
+extern void fpsimd_disable_trap(void);
+extern void fpsimd_enable_trap(void);
+#else
+static inline void fpsimd_disable_trap(void) {}
+static inline void fpsimd_enable_trap(void) {}
+#endif
+
 #endif
 
 #endif
diff -ruw linux-4.4.115/arch/arm64/include/asm/ftrace.h linux-4.4.115-fbx/arch/arm64/include/asm/ftrace.h
--- linux-4.4.115/arch/arm64/include/asm/ftrace.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/ftrace.h	2019-01-22 16:16:21.539228621 +0100
@@ -28,6 +28,8 @@
 
 extern unsigned long ftrace_graph_call;
 
+extern void return_to_handler(void);
+
 static inline unsigned long ftrace_call_adjust(unsigned long addr)
 {
 	/*
diff -ruw linux-4.4.115/arch/arm64/include/asm/futex.h linux-4.4.115-fbx/arch/arm64/include/asm/futex.h
--- linux-4.4.115/arch/arm64/include/asm/futex.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/futex.h	2019-10-29 09:26:22.997196856 +0100
@@ -21,15 +21,12 @@
 #include <linux/futex.h>
 #include <linux/uaccess.h>
 
-#include <asm/alternative.h>
-#include <asm/cpufeature.h>
 #include <asm/errno.h>
-#include <asm/sysreg.h>
 
 #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg)		\
+do {									\
+	uaccess_enable();						\
 	asm volatile(							\
-	ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,		\
-		    CONFIG_ARM64_PAN)					\
 "	prfm	pstl1strm, %2\n"					\
 "1:	ldxr	%w1, %2\n"						\
 	insn "\n"							\
@@ -42,15 +39,13 @@
 "4:	mov	%w0, %w5\n"						\
 "	b	3b\n"							\
 "	.popsection\n"							\
-"	.pushsection __ex_table,\"a\"\n"				\
-"	.align	3\n"							\
-"	.quad	1b, 4b, 2b, 4b\n"					\
-"	.popsection\n"							\
-	ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,		\
-		    CONFIG_ARM64_PAN)					\
+	_ASM_EXTABLE(1b, 4b)						\
+	_ASM_EXTABLE(2b, 4b)						\
 	: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp)	\
 	: "r" (oparg), "Ir" (-EFAULT)					\
-	: "memory")
+	: "memory");							\
+	uaccess_disable();						\
+} while (0)
 
 static inline int
 futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
@@ -120,8 +115,8 @@
 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
 		return -EFAULT;
 
+	uaccess_enable();
 	asm volatile("// futex_atomic_cmpxchg_inatomic\n"
-ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
 "	prfm	pstl1strm, %2\n"
 "1:	ldxr	%w1, %2\n"
 "	sub	%w3, %w1, %w4\n"
@@ -134,14 +129,12 @@
 "4:	mov	%w0, %w6\n"
 "	b	3b\n"
 "	.popsection\n"
-"	.pushsection __ex_table,\"a\"\n"
-"	.align	3\n"
-"	.quad	1b, 4b, 2b, 4b\n"
-"	.popsection\n"
-ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
+	_ASM_EXTABLE(1b, 4b)
+	_ASM_EXTABLE(2b, 4b)
 	: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
 	: "r" (oldval), "r" (newval), "Ir" (-EFAULT)
 	: "memory");
+	uaccess_disable();
 
 	*uval = val;
 	return ret;
diff -ruw linux-4.4.115/arch/arm64/include/asm/hardirq.h linux-4.4.115-fbx/arch/arm64/include/asm/hardirq.h
--- linux-4.4.115/arch/arm64/include/asm/hardirq.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/hardirq.h	2019-01-22 16:16:21.539228621 +0100
@@ -20,7 +20,7 @@
 #include <linux/threads.h>
 #include <asm/irq.h>
 
-#define NR_IPI	5
+#define NR_IPI	7
 
 typedef struct {
 	unsigned int __softirq_pending;
diff -ruw linux-4.4.115/arch/arm64/include/asm/hw_breakpoint.h linux-4.4.115-fbx/arch/arm64/include/asm/hw_breakpoint.h
--- linux-4.4.115/arch/arm64/include/asm/hw_breakpoint.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/hw_breakpoint.h	2019-01-22 16:16:21.539228621 +0100
@@ -68,7 +68,11 @@
 /* Lengths */
 #define ARM_BREAKPOINT_LEN_1	0x1
 #define ARM_BREAKPOINT_LEN_2	0x3
+#define ARM_BREAKPOINT_LEN_3	0x7
 #define ARM_BREAKPOINT_LEN_4	0xf
+#define ARM_BREAKPOINT_LEN_5	0x1f
+#define ARM_BREAKPOINT_LEN_6	0x3f
+#define ARM_BREAKPOINT_LEN_7	0x7f
 #define ARM_BREAKPOINT_LEN_8	0xff
 
 /* Kernel stepping */
@@ -110,7 +114,7 @@
 struct pmu;
 
 extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
-				  int *gen_len, int *gen_type);
+				  int *gen_len, int *gen_type, int *offset);
 extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
 extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
 extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
diff -ruw linux-4.4.115/arch/arm64/include/asm/insn.h linux-4.4.115-fbx/arch/arm64/include/asm/insn.h
--- linux-4.4.115/arch/arm64/include/asm/insn.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/insn.h	2019-01-22 16:16:21.539228621 +0100
@@ -120,6 +120,29 @@
 	AARCH64_INSN_REG_SP = 31  /* Stack pointer: as load/store base reg */
 };
 
+enum aarch64_insn_special_register {
+	AARCH64_INSN_SPCLREG_SPSR_EL1	= 0xC200,
+	AARCH64_INSN_SPCLREG_ELR_EL1	= 0xC201,
+	AARCH64_INSN_SPCLREG_SP_EL0	= 0xC208,
+	AARCH64_INSN_SPCLREG_SPSEL	= 0xC210,
+	AARCH64_INSN_SPCLREG_CURRENTEL	= 0xC212,
+	AARCH64_INSN_SPCLREG_DAIF	= 0xDA11,
+	AARCH64_INSN_SPCLREG_NZCV	= 0xDA10,
+	AARCH64_INSN_SPCLREG_FPCR	= 0xDA20,
+	AARCH64_INSN_SPCLREG_DSPSR_EL0	= 0xDA28,
+	AARCH64_INSN_SPCLREG_DLR_EL0	= 0xDA29,
+	AARCH64_INSN_SPCLREG_SPSR_EL2	= 0xE200,
+	AARCH64_INSN_SPCLREG_ELR_EL2	= 0xE201,
+	AARCH64_INSN_SPCLREG_SP_EL1	= 0xE208,
+	AARCH64_INSN_SPCLREG_SPSR_INQ	= 0xE218,
+	AARCH64_INSN_SPCLREG_SPSR_ABT	= 0xE219,
+	AARCH64_INSN_SPCLREG_SPSR_UND	= 0xE21A,
+	AARCH64_INSN_SPCLREG_SPSR_FIQ	= 0xE21B,
+	AARCH64_INSN_SPCLREG_SPSR_EL3	= 0xF200,
+	AARCH64_INSN_SPCLREG_ELR_EL3	= 0xF201,
+	AARCH64_INSN_SPCLREG_SP_EL2	= 0xF210
+};
+
 enum aarch64_insn_variant {
 	AARCH64_INSN_VARIANT_32BIT,
 	AARCH64_INSN_VARIANT_64BIT
@@ -223,8 +246,15 @@
 static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \
 { return (val); }
 
+__AARCH64_INSN_FUNCS(adr_adrp,	0x1F000000, 0x10000000)
+__AARCH64_INSN_FUNCS(prfm_lit,	0xFF000000, 0xD8000000)
 __AARCH64_INSN_FUNCS(str_reg,	0x3FE0EC00, 0x38206800)
 __AARCH64_INSN_FUNCS(ldr_reg,	0x3FE0EC00, 0x38606800)
+__AARCH64_INSN_FUNCS(ldr_lit,	0xBF000000, 0x18000000)
+__AARCH64_INSN_FUNCS(ldrsw_lit,	0xFF000000, 0x98000000)
+__AARCH64_INSN_FUNCS(exclusive,	0x3F800000, 0x08000000)
+__AARCH64_INSN_FUNCS(load_ex,	0x3F400000, 0x08400000)
+__AARCH64_INSN_FUNCS(store_ex,	0x3F400000, 0x08000000)
 __AARCH64_INSN_FUNCS(stp_post,	0x7FC00000, 0x28800000)
 __AARCH64_INSN_FUNCS(ldp_post,	0x7FC00000, 0x28C00000)
 __AARCH64_INSN_FUNCS(stp_pre,	0x7FC00000, 0x29800000)
@@ -273,10 +303,15 @@
 __AARCH64_INSN_FUNCS(hvc,	0xFFE0001F, 0xD4000002)
 __AARCH64_INSN_FUNCS(smc,	0xFFE0001F, 0xD4000003)
 __AARCH64_INSN_FUNCS(brk,	0xFFE0001F, 0xD4200000)
+__AARCH64_INSN_FUNCS(exception,	0xFF000000, 0xD4000000)
 __AARCH64_INSN_FUNCS(hint,	0xFFFFF01F, 0xD503201F)
 __AARCH64_INSN_FUNCS(br,	0xFFFFFC1F, 0xD61F0000)
 __AARCH64_INSN_FUNCS(blr,	0xFFFFFC1F, 0xD63F0000)
 __AARCH64_INSN_FUNCS(ret,	0xFFFFFC1F, 0xD65F0000)
+__AARCH64_INSN_FUNCS(eret,	0xFFFFFFFF, 0xD69F03E0)
+__AARCH64_INSN_FUNCS(mrs,	0xFFF00000, 0xD5300000)
+__AARCH64_INSN_FUNCS(msr_imm,	0xFFF8F01F, 0xD500401F)
+__AARCH64_INSN_FUNCS(msr_reg,	0xFFF00000, 0xD5100000)
 
 #undef	__AARCH64_INSN_FUNCS
 
@@ -286,6 +321,8 @@
 int aarch64_insn_read(void *addr, u32 *insnp);
 int aarch64_insn_write(void *addr, u32 insn);
 enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn);
+bool aarch64_insn_uses_literal(u32 insn);
+bool aarch64_insn_is_branch(u32 insn);
 u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn);
 u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
 				  u32 insn, u64 imm);
@@ -367,9 +404,13 @@
 #define A32_RT_OFFSET	12
 #define A32_RT2_OFFSET	 0
 
+u32 aarch64_insn_extract_system_reg(u32 insn);
 u32 aarch32_insn_extract_reg_num(u32 insn, int offset);
 u32 aarch32_insn_mcr_extract_opc2(u32 insn);
 u32 aarch32_insn_mcr_extract_crm(u32 insn);
+
+typedef bool (pstate_check_t)(unsigned long);
+extern pstate_check_t * const aarch32_opcode_cond_checks[16];
 #endif /* __ASSEMBLY__ */
 
 #endif	/* __ASM_INSN_H */
diff -ruw linux-4.4.115/arch/arm64/include/asm/io.h linux-4.4.115-fbx/arch/arm64/include/asm/io.h
--- linux-4.4.115/arch/arm64/include/asm/io.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/io.h	2019-01-22 16:16:21.539228621 +0100
@@ -31,38 +31,35 @@
 #include <asm/early_ioremap.h>
 #include <asm/alternative.h>
 #include <asm/cpufeature.h>
+#include <linux/msm_rtb.h>
 
 #include <xen/xen.h>
 
 /*
  * Generic IO read/write.  These perform native-endian accesses.
+ * that some architectures will want to re-define __raw_{read,write}w.
  */
-#define __raw_writeb __raw_writeb
-static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
+static inline void __raw_writeb_no_log(u8 val, volatile void __iomem *addr)
 {
 	asm volatile("strb %w0, [%1]" : : "r" (val), "r" (addr));
 }
 
-#define __raw_writew __raw_writew
-static inline void __raw_writew(u16 val, volatile void __iomem *addr)
+static inline void __raw_writew_no_log(u16 val, volatile void __iomem *addr)
 {
 	asm volatile("strh %w0, [%1]" : : "r" (val), "r" (addr));
 }
 
-#define __raw_writel __raw_writel
-static inline void __raw_writel(u32 val, volatile void __iomem *addr)
+static inline void __raw_writel_no_log(u32 val, volatile void __iomem *addr)
 {
 	asm volatile("str %w0, [%1]" : : "r" (val), "r" (addr));
 }
 
-#define __raw_writeq __raw_writeq
-static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
+static inline void __raw_writeq_no_log(u64 val, volatile void __iomem *addr)
 {
 	asm volatile("str %0, [%1]" : : "r" (val), "r" (addr));
 }
 
-#define __raw_readb __raw_readb
-static inline u8 __raw_readb(const volatile void __iomem *addr)
+static inline u8 __raw_readb_no_log(const volatile void __iomem *addr)
 {
 	u8 val;
 	asm volatile(ALTERNATIVE("ldrb %w0, [%1]",
@@ -72,8 +69,7 @@
 	return val;
 }
 
-#define __raw_readw __raw_readw
-static inline u16 __raw_readw(const volatile void __iomem *addr)
+static inline u16 __raw_readw_no_log(const volatile void __iomem *addr)
 {
 	u16 val;
 
@@ -84,8 +80,7 @@
 	return val;
 }
 
-#define __raw_readl __raw_readl
-static inline u32 __raw_readl(const volatile void __iomem *addr)
+static inline u32 __raw_readl_no_log(const volatile void __iomem *addr)
 {
 	u32 val;
 	asm volatile(ALTERNATIVE("ldr %w0, [%1]",
@@ -95,8 +90,7 @@
 	return val;
 }
 
-#define __raw_readq __raw_readq
-static inline u64 __raw_readq(const volatile void __iomem *addr)
+static inline u64 __raw_readq_no_log(const volatile void __iomem *addr)
 {
 	u64 val;
 	asm volatile(ALTERNATIVE("ldr %0, [%1]",
@@ -106,6 +100,46 @@
 	return val;
 }
 
+/*
+ * There may be cases when  clients don't want to support or can't support the
+ * logging, The appropriate functions can be used but clinets should carefully
+ * consider why they can't support the logging
+ */
+
+#define __raw_write_logged(v, a, _t) ({ \
+	int _ret; \
+	volatile void __iomem *_a = (a); \
+	void *_addr = (void __force *)(_a); \
+	_ret = uncached_logk(LOGK_WRITEL, _addr); \
+	ETB_WAYPOINT; \
+	__raw_write##_t##_no_log((v), _a); \
+	if (_ret) \
+		LOG_BARRIER; \
+	})
+
+#define __raw_writeb(v, a)	__raw_write_logged((v), a, b)
+#define __raw_writew(v, a)	__raw_write_logged((v), a, w)
+#define __raw_writel(v, a)	__raw_write_logged((v), a, l)
+#define __raw_writeq(v, a)	__raw_write_logged((v), a, q)
+
+#define __raw_read_logged(a, _l, _t)    ({ \
+	_t __a; \
+	const volatile void __iomem *_a = (const volatile void __iomem *)(a); \
+	void *_addr = (void __force *)(_a); \
+	int _ret; \
+	_ret = uncached_logk(LOGK_READL, _addr); \
+	ETB_WAYPOINT; \
+	__a = __raw_read##_l##_no_log(_a); \
+	if (_ret) \
+		LOG_BARRIER; \
+	__a; \
+	})
+
+#define __raw_readb(a)		__raw_read_logged((a), b, u8)
+#define __raw_readw(a)		__raw_read_logged((a), w, u16)
+#define __raw_readl(a)		__raw_read_logged((a), l, u32)
+#define __raw_readq(a)		__raw_read_logged((a), q, u64)
+
 /* IO barriers */
 #define __iormb()		rmb()
 #define __iowmb()		wmb()
@@ -127,6 +161,16 @@
 #define writel_relaxed(v,c)	((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
 #define writeq_relaxed(v,c)	((void)__raw_writeq((__force u64)cpu_to_le64(v),(c)))
 
+#define readb_relaxed_no_log(c)	({ u8 __v = __raw_readb_no_log(c); __v; })
+#define readw_relaxed_no_log(c)	({ u16 __v = le16_to_cpu((__force __le16)__raw_readw_no_log(c)); __v; })
+#define readl_relaxed_no_log(c)	({ u32 __v = le32_to_cpu((__force __le32)__raw_readl_no_log(c)); __v; })
+#define readq_relaxed_no_log(c)	({ u64 __v = le64_to_cpu((__force __le64)__raw_readq_no_log(c)); __v; })
+
+#define writeb_relaxed_no_log(v, c)	((void)__raw_writeb_no_log((v), (c)))
+#define writew_relaxed_no_log(v, c)	((void)__raw_writew_no_log((__force u16)cpu_to_le16(v), (c)))
+#define writel_relaxed_no_log(v, c)	((void)__raw_writel_no_log((__force u32)cpu_to_le32(v), (c)))
+#define writeq_relaxed_no_log(v, c)	((void)__raw_writeq_no_log((__force u64)cpu_to_le64(v), (c)))
+
 /*
  * I/O memory access primitives. Reads are ordered relative to any
  * following Normal memory access. Writes are ordered relative to any prior
@@ -142,6 +186,16 @@
 #define writel(v,c)		({ __iowmb(); writel_relaxed((v),(c)); })
 #define writeq(v,c)		({ __iowmb(); writeq_relaxed((v),(c)); })
 
+#define readb_no_log(c)		({ u8  __v = readb_relaxed_no_log(c); __iormb(); __v; })
+#define readw_no_log(c)		({ u16 __v = readw_relaxed_no_log(c); __iormb(); __v; })
+#define readl_no_log(c)		({ u32 __v = readl_relaxed_no_log(c); __iormb(); __v; })
+#define readq_no_log(c)		({ u64 __v = readq_relaxed_no_log(c); __iormb(); __v; })
+
+#define writeb_no_log(v, c)		({ __iowmb(); writeb_relaxed_no_log((v), (c)); })
+#define writew_no_log(v, c)		({ __iowmb(); writew_relaxed_no_log((v), (c)); })
+#define writel_no_log(v, c)		({ __iowmb(); writel_relaxed_no_log((v), (c)); })
+#define writeq_no_log(v, c)		({ __iowmb(); writeq_relaxed_no_log((v), (c)); })
+
 /*
  *  I/O port access primitives.
  */
diff -ruw linux-4.4.115/arch/arm64/include/asm/irq.h linux-4.4.115-fbx/arch/arm64/include/asm/irq.h
--- linux-4.4.115/arch/arm64/include/asm/irq.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/irq.h	2019-01-22 16:16:21.539228621 +0100
@@ -1,10 +1,45 @@
 #ifndef __ASM_IRQ_H
 #define __ASM_IRQ_H
 
+#define IRQ_STACK_SIZE			THREAD_SIZE
+#define IRQ_STACK_START_SP		THREAD_START_SP
+
+#ifndef __ASSEMBLER__
+
+#include <linux/percpu.h>
+
 #include <asm-generic/irq.h>
+#include <asm/thread_info.h>
 
 struct pt_regs;
 
+DECLARE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack);
+
+/*
+ * The highest address on the stack, and the first to be used. Used to
+ * find the dummy-stack frame put down by el?_irq() in entry.S, which
+ * is structured as follows:
+ *
+ *       ------------
+ *       |          |  <- irq_stack_ptr
+ *   top ------------
+ *       |   x19    | <- irq_stack_ptr - 0x08
+ *       ------------
+ *       |   x29    | <- irq_stack_ptr - 0x10
+ *       ------------
+ *
+ * where x19 holds a copy of the task stack pointer where the struct pt_regs
+ * from kernel_entry can be found.
+ *
+ */
+#define IRQ_STACK_PTR(cpu) ((unsigned long)per_cpu(irq_stack, cpu) + IRQ_STACK_START_SP)
+
+/*
+ * The offset from irq_stack_ptr where entry.S will store the original
+ * stack pointer. Used by unwind_frame() and dump_backtrace().
+ */
+#define IRQ_STACK_TO_TASK_STACK(ptr) (*((unsigned long *)((ptr) - 0x08)))
+
 extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
 
 static inline int nr_legacy_irqs(void)
@@ -12,4 +47,17 @@
 	return 0;
 }
 
+void arch_trigger_all_cpu_backtrace(void);
+#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
+
+static inline bool on_irq_stack(unsigned long sp, int cpu)
+{
+	/* variable names the same as kernel/stacktrace.c */
+	unsigned long low = (unsigned long)per_cpu(irq_stack, cpu);
+	unsigned long high = low + IRQ_STACK_START_SP;
+
+	return (low <= sp && sp <= high);
+}
+
+#endif /* !__ASSEMBLER__ */
 #endif
diff -ruw linux-4.4.115/arch/arm64/include/asm/kasan.h linux-4.4.115-fbx/arch/arm64/include/asm/kasan.h
--- linux-4.4.115/arch/arm64/include/asm/kasan.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/kasan.h	2019-01-22 16:16:21.539228621 +0100
@@ -7,13 +7,14 @@
 
 #include <linux/linkage.h>
 #include <asm/memory.h>
+#include <asm/pgtable-types.h>
 
 /*
  * KASAN_SHADOW_START: beginning of the kernel virtual addresses.
  * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/8 of kernel virtual addresses.
  */
 #define KASAN_SHADOW_START      (VA_START)
-#define KASAN_SHADOW_END        (KASAN_SHADOW_START + (1UL << (VA_BITS - 3)))
+#define KASAN_SHADOW_END        (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
 
 /*
  * This value is used to map an address to the corresponding shadow
@@ -28,10 +29,12 @@
 #define KASAN_SHADOW_OFFSET     (KASAN_SHADOW_END - (1ULL << (64 - 3)))
 
 void kasan_init(void);
+void kasan_copy_shadow(pgd_t *pgdir);
 asmlinkage void kasan_early_init(void);
 
 #else
 static inline void kasan_init(void) { }
+static inline void kasan_copy_shadow(pgd_t *pgdir) { }
 #endif
 
 #endif
diff -ruw linux-4.4.115/arch/arm64/include/asm/Kbuild linux-4.4.115-fbx/arch/arm64/include/asm/Kbuild
--- linux-4.4.115/arch/arm64/include/asm/Kbuild	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/Kbuild	2019-10-29 09:26:22.993196817 +0100
@@ -2,7 +2,6 @@
 
 generic-y += bug.h
 generic-y += bugs.h
-generic-y += checksum.h
 generic-y += clkdev.h
 generic-y += cputime.h
 generic-y += current.h
@@ -14,6 +13,7 @@
 generic-y += emergency-restart.h
 generic-y += errno.h
 generic-y += ftrace.h
+generic-y += hash.h
 generic-y += hw_irq.h
 generic-y += ioctl.h
 generic-y += ioctls.h
diff -ruw linux-4.4.115/arch/arm64/include/asm/kernel-pgtable.h linux-4.4.115-fbx/arch/arm64/include/asm/kernel-pgtable.h
--- linux-4.4.115/arch/arm64/include/asm/kernel-pgtable.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/kernel-pgtable.h	2019-01-22 16:16:21.539228621 +0100
@@ -19,6 +19,8 @@
 #ifndef __ASM_KERNEL_PGTABLE_H
 #define __ASM_KERNEL_PGTABLE_H
 
+#include <asm/pgtable.h>
+#include <asm/sparsemem.h>
 
 /*
  * The linear mapping and the start of memory are both 2M aligned (per
@@ -53,6 +55,12 @@
 #define SWAPPER_DIR_SIZE	(SWAPPER_PGTABLE_LEVELS * PAGE_SIZE)
 #define IDMAP_DIR_SIZE		(IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
 
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+#define RESERVED_TTBR0_SIZE	(PAGE_SIZE)
+#else
+#define RESERVED_TTBR0_SIZE	(0)
+#endif
+
 /* Initial memory map size */
 #if ARM64_SWAPPER_USES_SECTION_MAPS
 #define SWAPPER_BLOCK_SHIFT	SECTION_SHIFT
@@ -70,8 +78,16 @@
 /*
  * Initial memory map attributes.
  */
-#define SWAPPER_PTE_FLAGS	(PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
-#define SWAPPER_PMD_FLAGS	(PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
+#define _SWAPPER_PTE_FLAGS	(PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
+#define _SWAPPER_PMD_FLAGS	(PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+#define SWAPPER_PTE_FLAGS	(_SWAPPER_PTE_FLAGS | PTE_NG)
+#define SWAPPER_PMD_FLAGS	(_SWAPPER_PMD_FLAGS | PMD_SECT_NG)
+#else
+#define SWAPPER_PTE_FLAGS	_SWAPPER_PTE_FLAGS
+#define SWAPPER_PMD_FLAGS	_SWAPPER_PMD_FLAGS
+#endif
 
 #if ARM64_SWAPPER_USES_SECTION_MAPS
 #define SWAPPER_MM_MMUFLAGS	(PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
@@ -79,5 +95,31 @@
 #define SWAPPER_MM_MMUFLAGS	(PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
 #endif
 
+/*
+ * To make optimal use of block mappings when laying out the linear
+ * mapping, round down the base of physical memory to a size that can
+ * be mapped efficiently, i.e., either PUD_SIZE (4k granule) or PMD_SIZE
+ * (64k granule), or a multiple that can be mapped using contiguous bits
+ * in the page tables: 32 * PMD_SIZE (16k granule)
+ */
+#if defined(CONFIG_ARM64_4K_PAGES)
+#define ARM64_MEMSTART_SHIFT		PUD_SHIFT
+#elif defined(CONFIG_ARM64_16K_PAGES)
+#define ARM64_MEMSTART_SHIFT		(PMD_SHIFT + 5)
+#else
+#define ARM64_MEMSTART_SHIFT		PMD_SHIFT
+#endif
+
+/*
+ * sparsemem vmemmap imposes an additional requirement on the alignment of
+ * memstart_addr, due to the fact that the base of the vmemmap region
+ * has a direct correspondence, and needs to appear sufficiently aligned
+ * in the virtual address space.
+ */
+#if defined(CONFIG_SPARSEMEM_VMEMMAP) && ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS
+#define ARM64_MEMSTART_ALIGN	(1UL << SECTION_SIZE_BITS)
+#else
+#define ARM64_MEMSTART_ALIGN	(1UL << ARM64_MEMSTART_SHIFT)
+#endif
 
 #endif	/* __ASM_KERNEL_PGTABLE_H */
diff -ruw linux-4.4.115/arch/arm64/include/asm/kvm_arm.h linux-4.4.115-fbx/arch/arm64/include/asm/kvm_arm.h
--- linux-4.4.115/arch/arm64/include/asm/kvm_arm.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/kvm_arm.h	2019-10-29 09:26:22.997196856 +0100
@@ -83,17 +83,6 @@
 #define HCR_INT_OVERRIDE   (HCR_FMO | HCR_IMO)
 
 
-/* Hyp System Control Register (SCTLR_EL2) bits */
-#define SCTLR_EL2_EE	(1 << 25)
-#define SCTLR_EL2_WXN	(1 << 19)
-#define SCTLR_EL2_I	(1 << 12)
-#define SCTLR_EL2_SA	(1 << 3)
-#define SCTLR_EL2_C	(1 << 2)
-#define SCTLR_EL2_A	(1 << 1)
-#define SCTLR_EL2_M	1
-#define SCTLR_EL2_FLAGS	(SCTLR_EL2_M | SCTLR_EL2_A | SCTLR_EL2_C |	\
-			 SCTLR_EL2_SA | SCTLR_EL2_I)
-
 /* TCR_EL2 Registers bits */
 #define TCR_EL2_RES1	((1 << 31) | (1 << 23))
 #define TCR_EL2_TBI	(1 << 20)
@@ -123,6 +112,7 @@
 #define VTCR_EL2_SL0_LVL1	(1 << 6)
 #define VTCR_EL2_T0SZ_MASK	0x3f
 #define VTCR_EL2_T0SZ_40B	24
+#define VTCR_EL2_VS		19
 
 /*
  * We configure the Stage-2 page tables to always restrict the IPA space to be
@@ -166,7 +156,7 @@
 
 #define VTTBR_BADDR_MASK  (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_X)
 #define VTTBR_VMID_SHIFT  (UL(48))
-#define VTTBR_VMID_MASK	  (UL(0xFF) << VTTBR_VMID_SHIFT)
+#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
 
 /* Hyp System Trap Register */
 #define HSTR_EL2_T(x)	(1 << x)
diff -ruw linux-4.4.115/arch/arm64/include/asm/kvm_asm.h linux-4.4.115-fbx/arch/arm64/include/asm/kvm_asm.h
--- linux-4.4.115/arch/arm64/include/asm/kvm_asm.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/kvm_asm.h	2019-01-22 16:16:21.539228621 +0100
@@ -20,94 +20,38 @@
 
 #include <asm/virt.h>
 
-/*
- * 0 is reserved as an invalid value.
- * Order *must* be kept in sync with the hyp switch code.
- */
-#define	MPIDR_EL1	1	/* MultiProcessor Affinity Register */
-#define	CSSELR_EL1	2	/* Cache Size Selection Register */
-#define	SCTLR_EL1	3	/* System Control Register */
-#define	ACTLR_EL1	4	/* Auxiliary Control Register */
-#define	CPACR_EL1	5	/* Coprocessor Access Control */
-#define	TTBR0_EL1	6	/* Translation Table Base Register 0 */
-#define	TTBR1_EL1	7	/* Translation Table Base Register 1 */
-#define	TCR_EL1		8	/* Translation Control Register */
-#define	ESR_EL1		9	/* Exception Syndrome Register */
-#define	AFSR0_EL1	10	/* Auxilary Fault Status Register 0 */
-#define	AFSR1_EL1	11	/* Auxilary Fault Status Register 1 */
-#define	FAR_EL1		12	/* Fault Address Register */
-#define	MAIR_EL1	13	/* Memory Attribute Indirection Register */
-#define	VBAR_EL1	14	/* Vector Base Address Register */
-#define	CONTEXTIDR_EL1	15	/* Context ID Register */
-#define	TPIDR_EL0	16	/* Thread ID, User R/W */
-#define	TPIDRRO_EL0	17	/* Thread ID, User R/O */
-#define	TPIDR_EL1	18	/* Thread ID, Privileged */
-#define	AMAIR_EL1	19	/* Aux Memory Attribute Indirection Register */
-#define	CNTKCTL_EL1	20	/* Timer Control Register (EL1) */
-#define	PAR_EL1		21	/* Physical Address Register */
-#define MDSCR_EL1	22	/* Monitor Debug System Control Register */
-#define MDCCINT_EL1	23	/* Monitor Debug Comms Channel Interrupt Enable Reg */
-
-/* 32bit specific registers. Keep them at the end of the range */
-#define	DACR32_EL2	24	/* Domain Access Control Register */
-#define	IFSR32_EL2	25	/* Instruction Fault Status Register */
-#define	FPEXC32_EL2	26	/* Floating-Point Exception Control Register */
-#define	DBGVCR32_EL2	27	/* Debug Vector Catch Register */
-#define	NR_SYS_REGS	28
-
-/* 32bit mapping */
-#define c0_MPIDR	(MPIDR_EL1 * 2)	/* MultiProcessor ID Register */
-#define c0_CSSELR	(CSSELR_EL1 * 2)/* Cache Size Selection Register */
-#define c1_SCTLR	(SCTLR_EL1 * 2)	/* System Control Register */
-#define c1_ACTLR	(ACTLR_EL1 * 2)	/* Auxiliary Control Register */
-#define c1_CPACR	(CPACR_EL1 * 2)	/* Coprocessor Access Control */
-#define c2_TTBR0	(TTBR0_EL1 * 2)	/* Translation Table Base Register 0 */
-#define c2_TTBR0_high	(c2_TTBR0 + 1)	/* TTBR0 top 32 bits */
-#define c2_TTBR1	(TTBR1_EL1 * 2)	/* Translation Table Base Register 1 */
-#define c2_TTBR1_high	(c2_TTBR1 + 1)	/* TTBR1 top 32 bits */
-#define c2_TTBCR	(TCR_EL1 * 2)	/* Translation Table Base Control R. */
-#define c3_DACR		(DACR32_EL2 * 2)/* Domain Access Control Register */
-#define c5_DFSR		(ESR_EL1 * 2)	/* Data Fault Status Register */
-#define c5_IFSR		(IFSR32_EL2 * 2)/* Instruction Fault Status Register */
-#define c5_ADFSR	(AFSR0_EL1 * 2)	/* Auxiliary Data Fault Status R */
-#define c5_AIFSR	(AFSR1_EL1 * 2)	/* Auxiliary Instr Fault Status R */
-#define c6_DFAR		(FAR_EL1 * 2)	/* Data Fault Address Register */
-#define c6_IFAR		(c6_DFAR + 1)	/* Instruction Fault Address Register */
-#define c7_PAR		(PAR_EL1 * 2)	/* Physical Address Register */
-#define c7_PAR_high	(c7_PAR + 1)	/* PAR top 32 bits */
-#define c10_PRRR	(MAIR_EL1 * 2)	/* Primary Region Remap Register */
-#define c10_NMRR	(c10_PRRR + 1)	/* Normal Memory Remap Register */
-#define c12_VBAR	(VBAR_EL1 * 2)	/* Vector Base Address Register */
-#define c13_CID		(CONTEXTIDR_EL1 * 2)	/* Context ID Register */
-#define c13_TID_URW	(TPIDR_EL0 * 2)	/* Thread ID, User R/W */
-#define c13_TID_URO	(TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
-#define c13_TID_PRIV	(TPIDR_EL1 * 2)	/* Thread ID, Privileged */
-#define c10_AMAIR0	(AMAIR_EL1 * 2)	/* Aux Memory Attr Indirection Reg */
-#define c10_AMAIR1	(c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
-#define c14_CNTKCTL	(CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
-
-#define cp14_DBGDSCRext	(MDSCR_EL1 * 2)
-#define cp14_DBGBCR0	(DBGBCR0_EL1 * 2)
-#define cp14_DBGBVR0	(DBGBVR0_EL1 * 2)
-#define cp14_DBGBXVR0	(cp14_DBGBVR0 + 1)
-#define cp14_DBGWCR0	(DBGWCR0_EL1 * 2)
-#define cp14_DBGWVR0	(DBGWVR0_EL1 * 2)
-#define cp14_DBGDCCINT	(MDCCINT_EL1 * 2)
-
-#define NR_COPRO_REGS	(NR_SYS_REGS * 2)
-
 #define ARM_EXCEPTION_IRQ	  0
 #define ARM_EXCEPTION_TRAP	  1
+/* The hyp-stub will return this for any kvm_call_hyp() call */
+#define ARM_EXCEPTION_HYP_GONE	  2
 
 #define KVM_ARM64_DEBUG_DIRTY_SHIFT	0
 #define KVM_ARM64_DEBUG_DIRTY		(1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
 
+#define kvm_ksym_ref(sym)		phys_to_virt((u64)&sym - kimage_voffset)
+
 #ifndef __ASSEMBLY__
+#if __GNUC__ > 4
+#define kvm_ksym_shift			(PAGE_OFFSET - KIMAGE_VADDR)
+#else
+/*
+ * GCC versions 4.9 and older will fold the constant below into the addend of
+ * the reference to 'sym' above if kvm_ksym_shift is declared static or if the
+ * constant is used directly. However, since we use the small code model for
+ * the core kernel, the reference to 'sym' will be emitted as a adrp/add pair,
+ * with a +/- 4 GB range, resulting in linker relocation errors if the shift
+ * is sufficiently large. So prevent the compiler from folding the shift into
+ * the addend, by making the shift a variable with external linkage.
+ */
+__weak u64 kvm_ksym_shift = PAGE_OFFSET - KIMAGE_VADDR;
+#endif
+
 struct kvm;
 struct kvm_vcpu;
 
 extern char __kvm_hyp_init[];
 extern char __kvm_hyp_init_end[];
+extern char __kvm_hyp_reset[];
 
 extern char __kvm_hyp_vector[];
 
diff -ruw linux-4.4.115/arch/arm64/include/asm/kvm_host.h linux-4.4.115-fbx/arch/arm64/include/asm/kvm_host.h
--- linux-4.4.115/arch/arm64/include/asm/kvm_host.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/kvm_host.h	2019-01-22 16:16:21.539228621 +0100
@@ -25,7 +25,6 @@
 #include <linux/types.h>
 #include <linux/kvm_types.h>
 #include <asm/kvm.h>
-#include <asm/kvm_asm.h>
 #include <asm/kvm_mmio.h>
 
 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -45,6 +44,7 @@
 int __attribute_const__ kvm_target_cpu(void);
 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
 int kvm_arch_dev_ioctl_check_extension(long ext);
+phys_addr_t kvm_hyp_reset_entry(void);
 
 struct kvm_arch {
 	/* The VMID generation used for the virt. memory system */
@@ -85,6 +85,86 @@
 	u64 hpfar_el2;		/* Hyp IPA Fault Address Register */
 };
 
+/*
+ * 0 is reserved as an invalid value.
+ * Order should be kept in sync with the save/restore code.
+ */
+enum vcpu_sysreg {
+	__INVALID_SYSREG__,
+	MPIDR_EL1,	/* MultiProcessor Affinity Register */
+	CSSELR_EL1,	/* Cache Size Selection Register */
+	SCTLR_EL1,	/* System Control Register */
+	ACTLR_EL1,	/* Auxiliary Control Register */
+	CPACR_EL1,	/* Coprocessor Access Control */
+	TTBR0_EL1,	/* Translation Table Base Register 0 */
+	TTBR1_EL1,	/* Translation Table Base Register 1 */
+	TCR_EL1,	/* Translation Control Register */
+	ESR_EL1,	/* Exception Syndrome Register */
+	AFSR0_EL1,	/* Auxilary Fault Status Register 0 */
+	AFSR1_EL1,	/* Auxilary Fault Status Register 1 */
+	FAR_EL1,	/* Fault Address Register */
+	MAIR_EL1,	/* Memory Attribute Indirection Register */
+	VBAR_EL1,	/* Vector Base Address Register */
+	CONTEXTIDR_EL1,	/* Context ID Register */
+	TPIDR_EL0,	/* Thread ID, User R/W */
+	TPIDRRO_EL0,	/* Thread ID, User R/O */
+	TPIDR_EL1,	/* Thread ID, Privileged */
+	AMAIR_EL1,	/* Aux Memory Attribute Indirection Register */
+	CNTKCTL_EL1,	/* Timer Control Register (EL1) */
+	PAR_EL1,	/* Physical Address Register */
+	MDSCR_EL1,	/* Monitor Debug System Control Register */
+	MDCCINT_EL1,	/* Monitor Debug Comms Channel Interrupt Enable Reg */
+
+	/* 32bit specific registers. Keep them at the end of the range */
+	DACR32_EL2,	/* Domain Access Control Register */
+	IFSR32_EL2,	/* Instruction Fault Status Register */
+	FPEXC32_EL2,	/* Floating-Point Exception Control Register */
+	DBGVCR32_EL2,	/* Debug Vector Catch Register */
+
+	NR_SYS_REGS	/* Nothing after this line! */
+};
+
+/* 32bit mapping */
+#define c0_MPIDR	(MPIDR_EL1 * 2)	/* MultiProcessor ID Register */
+#define c0_CSSELR	(CSSELR_EL1 * 2)/* Cache Size Selection Register */
+#define c1_SCTLR	(SCTLR_EL1 * 2)	/* System Control Register */
+#define c1_ACTLR	(ACTLR_EL1 * 2)	/* Auxiliary Control Register */
+#define c1_CPACR	(CPACR_EL1 * 2)	/* Coprocessor Access Control */
+#define c2_TTBR0	(TTBR0_EL1 * 2)	/* Translation Table Base Register 0 */
+#define c2_TTBR0_high	(c2_TTBR0 + 1)	/* TTBR0 top 32 bits */
+#define c2_TTBR1	(TTBR1_EL1 * 2)	/* Translation Table Base Register 1 */
+#define c2_TTBR1_high	(c2_TTBR1 + 1)	/* TTBR1 top 32 bits */
+#define c2_TTBCR	(TCR_EL1 * 2)	/* Translation Table Base Control R. */
+#define c3_DACR		(DACR32_EL2 * 2)/* Domain Access Control Register */
+#define c5_DFSR		(ESR_EL1 * 2)	/* Data Fault Status Register */
+#define c5_IFSR		(IFSR32_EL2 * 2)/* Instruction Fault Status Register */
+#define c5_ADFSR	(AFSR0_EL1 * 2)	/* Auxiliary Data Fault Status R */
+#define c5_AIFSR	(AFSR1_EL1 * 2)	/* Auxiliary Instr Fault Status R */
+#define c6_DFAR		(FAR_EL1 * 2)	/* Data Fault Address Register */
+#define c6_IFAR		(c6_DFAR + 1)	/* Instruction Fault Address Register */
+#define c7_PAR		(PAR_EL1 * 2)	/* Physical Address Register */
+#define c7_PAR_high	(c7_PAR + 1)	/* PAR top 32 bits */
+#define c10_PRRR	(MAIR_EL1 * 2)	/* Primary Region Remap Register */
+#define c10_NMRR	(c10_PRRR + 1)	/* Normal Memory Remap Register */
+#define c12_VBAR	(VBAR_EL1 * 2)	/* Vector Base Address Register */
+#define c13_CID		(CONTEXTIDR_EL1 * 2)	/* Context ID Register */
+#define c13_TID_URW	(TPIDR_EL0 * 2)	/* Thread ID, User R/W */
+#define c13_TID_URO	(TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
+#define c13_TID_PRIV	(TPIDR_EL1 * 2)	/* Thread ID, Privileged */
+#define c10_AMAIR0	(AMAIR_EL1 * 2)	/* Aux Memory Attr Indirection Reg */
+#define c10_AMAIR1	(c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
+#define c14_CNTKCTL	(CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
+
+#define cp14_DBGDSCRext	(MDSCR_EL1 * 2)
+#define cp14_DBGBCR0	(DBGBCR0_EL1 * 2)
+#define cp14_DBGBVR0	(DBGBVR0_EL1 * 2)
+#define cp14_DBGBXVR0	(cp14_DBGBVR0 + 1)
+#define cp14_DBGWCR0	(DBGWCR0_EL1 * 2)
+#define cp14_DBGWVR0	(DBGWVR0_EL1 * 2)
+#define cp14_DBGDCCINT	(MDCCINT_EL1 * 2)
+
+#define NR_COPRO_REGS	(NR_SYS_REGS * 2)
+
 struct kvm_cpu_context {
 	struct kvm_regs	gp_regs;
 	union {
@@ -222,7 +302,7 @@
 struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
 
-u64 kvm_call_hyp(void *hypfn, ...);
+u64 __kvm_call_hyp(void *hypfn, ...);
 void force_vm_exit(const cpumask_t *mask);
 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
 
@@ -243,11 +323,25 @@
 	 * Call initialization code, and switch to the full blown
 	 * HYP code.
 	 */
-	kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr,
+	__kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr,
 		     hyp_stack_ptr, vector_ptr);
 }
 
-static inline void kvm_arch_hardware_disable(void) {}
+static inline void __cpu_init_stage2(void)
+{
+}
+
+static inline void __cpu_reset_hyp_mode(phys_addr_t boot_pgd_ptr,
+					phys_addr_t phys_idmap_start)
+{
+	/*
+	 * Call reset code, and switch back to stub hyp vectors.
+	 * Uses __kvm_call_hyp() to avoid kaslr's kvm_ksym_ref() translation.
+	 */
+	__kvm_call_hyp((void *)kvm_hyp_reset_entry(),
+		       boot_pgd_ptr, phys_idmap_start);
+}
+
 static inline void kvm_arch_hardware_unsetup(void) {}
 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
 static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
@@ -258,4 +352,6 @@
 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
 
+#define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__)
+
 #endif /* __ARM64_KVM_HOST_H__ */
diff -ruw linux-4.4.115/arch/arm64/include/asm/kvm_mmio.h linux-4.4.115-fbx/arch/arm64/include/asm/kvm_mmio.h
--- linux-4.4.115/arch/arm64/include/asm/kvm_mmio.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/kvm_mmio.h	2019-01-22 16:16:21.539228621 +0100
@@ -19,7 +19,6 @@
 #define __ARM64_KVM_MMIO_H__
 
 #include <linux/kvm_host.h>
-#include <asm/kvm_asm.h>
 #include <asm/kvm_arm.h>
 
 /*
diff -ruw linux-4.4.115/arch/arm64/include/asm/lse.h linux-4.4.115-fbx/arch/arm64/include/asm/lse.h
--- linux-4.4.115/arch/arm64/include/asm/lse.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/lse.h	2019-01-22 16:16:21.539228621 +0100
@@ -26,6 +26,7 @@
 
 /* Macro for constructing calls to out-of-line ll/sc atomics */
 #define __LL_SC_CALL(op)	"bl\t" __stringify(__LL_SC_PREFIX(op)) "\n"
+#define __LL_SC_CLOBBERS	"x16", "x17", "x30"
 
 /* In-line patching at runtime */
 #define ARM64_LSE_ATOMIC_INSN(llsc, lse)				\
diff -ruw linux-4.4.115/arch/arm64/include/asm/memory.h linux-4.4.115-fbx/arch/arm64/include/asm/memory.h
--- linux-4.4.115/arch/arm64/include/asm/memory.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/memory.h	2019-10-29 09:26:22.997196856 +0100
@@ -24,6 +24,7 @@
 #include <linux/compiler.h>
 #include <linux/const.h>
 #include <linux/types.h>
+#include <asm/bug.h>
 #include <asm/sizes.h>
 
 /*
@@ -45,15 +46,15 @@
  * VA_START - the first kernel virtual address.
  * TASK_SIZE - the maximum size of a user space task.
  * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
- * The module space lives between the addresses given by TASK_SIZE
- * and PAGE_OFFSET - it must be within 128MB of the kernel text.
  */
 #define VA_BITS			(CONFIG_ARM64_VA_BITS)
 #define VA_START		(UL(0xffffffffffffffff) << VA_BITS)
 #define PAGE_OFFSET		(UL(0xffffffffffffffff) << (VA_BITS - 1))
-#define MODULES_END		(PAGE_OFFSET)
-#define MODULES_VADDR		(MODULES_END - SZ_64M)
-#define PCI_IO_END		(MODULES_VADDR - SZ_2M)
+#define KIMAGE_VADDR		(MODULES_END)
+#define MODULES_END		(MODULES_VADDR + MODULES_VSIZE)
+#define MODULES_VADDR		(VA_START + KASAN_SHADOW_SIZE)
+#define MODULES_VSIZE		(SZ_128M)
+#define PCI_IO_END		(PAGE_OFFSET - SZ_2M)
 #define PCI_IO_START		(PCI_IO_END - PCI_IO_SIZE)
 #define FIXADDR_TOP		(PCI_IO_START - SZ_2M)
 #define TASK_SIZE_64		(UL(1) << VA_BITS)
@@ -70,13 +71,31 @@
 
 #define TASK_UNMAPPED_BASE	(PAGE_ALIGN(TASK_SIZE / 4))
 
+#define KERNEL_START      _text
+#define KERNEL_END        _end
+
+/*
+ * The size of the KASAN shadow region. This should be 1/8th of the
+ * size of the entire kernel virtual address space.
+ */
+#ifdef CONFIG_KASAN
+#define KASAN_SHADOW_SIZE	(UL(1) << (VA_BITS - 3))
+#else
+#define KASAN_SHADOW_SIZE	(0)
+#endif
+
 /*
  * Physical vs virtual RAM address space conversion.  These are
  * private definitions which should NOT be used outside memory.h
  * files.  Use virt_to_phys/phys_to_virt/__pa/__va instead.
  */
-#define __virt_to_phys(x)	(((phys_addr_t)(x) - PAGE_OFFSET + PHYS_OFFSET))
-#define __phys_to_virt(x)	((unsigned long)((x) - PHYS_OFFSET + PAGE_OFFSET))
+#define __virt_to_phys(x) ({						\
+	phys_addr_t __x = (phys_addr_t)(x);				\
+	__x & BIT(VA_BITS - 1) ? (__x & ~PAGE_OFFSET) + PHYS_OFFSET :	\
+				 (__x - kimage_voffset); })
+
+#define __phys_to_virt(x)	((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET)
+#define __phys_to_kimg(x)	((unsigned long)((x) + kimage_voffset))
 
 /*
  * Convert a page to/from a physical address
@@ -100,19 +119,45 @@
 #define MT_S2_NORMAL		0xf
 #define MT_S2_DEVICE_nGnRE	0x1
 
+#ifdef CONFIG_ARM64_4K_PAGES
+#define IOREMAP_MAX_ORDER	(PUD_SHIFT)
+#else
+#define IOREMAP_MAX_ORDER	(PMD_SHIFT)
+#endif
+
+#ifdef CONFIG_BLK_DEV_INITRD
+#define __early_init_dt_declare_initrd(__start, __end)			\
+	do {								\
+		initrd_start = (__start);				\
+		initrd_end = (__end);					\
+	} while (0)
+#endif
+
 #ifndef __ASSEMBLY__
 
-extern phys_addr_t		memstart_addr;
+#include <linux/bitops.h>
+#include <linux/mmdebug.h>
+
+extern s64			memstart_addr;
 /* PHYS_OFFSET - the physical address of the start of memory. */
-#define PHYS_OFFSET		({ memstart_addr; })
+#define PHYS_OFFSET		({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })
+
+/* the virtual base of the kernel image (minus TEXT_OFFSET) */
+extern u64			kimage_vaddr;
+
+/* the offset between the kernel virtual and physical mappings */
+extern u64			kimage_voffset;
+
+static inline unsigned long kaslr_offset(void)
+{
+	return kimage_vaddr - KIMAGE_VADDR;
+}
 
 /*
- * The maximum physical address that the linear direct mapping
- * of system RAM can cover. (PAGE_OFFSET can be interpreted as
- * a 2's complement signed quantity and negated to derive the
- * maximum size of the linear mapping.)
+ * Allow all memory at the discovery stage. We will clip it later.
  */
-#define MAX_MEMBLOCK_ADDR	({ memstart_addr - PAGE_OFFSET - 1; })
+#define MIN_MEMBLOCK_ADDR	0
+#define MAX_MEMBLOCK_ADDR	U64_MAX
 
 /*
  * PFNs are used to describe any physical page; this means
@@ -148,6 +193,7 @@
 #define __va(x)			((void *)__phys_to_virt((phys_addr_t)(x)))
 #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
 #define virt_to_pfn(x)      __phys_to_pfn(__virt_to_phys(x))
+#define sym_to_pfn(x)	    __phys_to_pfn(__pa_symbol(x))
 
 /*
  *  virt_to_page(k)	convert a _valid_ virtual address to struct page *
@@ -156,7 +202,11 @@
 #define ARCH_PFN_OFFSET		((unsigned long)PHYS_PFN_OFFSET)
 
 #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define	virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#define _virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+#define _virt_addr_is_linear(kaddr)	(((u64)(kaddr)) >= PAGE_OFFSET)
+#define virt_addr_valid(kaddr)		(_virt_addr_is_linear(kaddr) && \
+					 _virt_addr_valid(kaddr))
 
 #endif
 
diff -ruw linux-4.4.115/arch/arm64/include/asm/mmu_context.h linux-4.4.115-fbx/arch/arm64/include/asm/mmu_context.h
--- linux-4.4.115/arch/arm64/include/asm/mmu_context.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/mmu_context.h	2019-01-22 16:16:21.539228621 +0100
@@ -23,23 +23,30 @@
 #include <linux/sched.h>
 
 #include <asm/cacheflush.h>
+#include <asm/cpufeature.h>
 #include <asm/proc-fns.h>
 #include <asm-generic/mm_hooks.h>
 #include <asm/cputype.h>
 #include <asm/pgtable.h>
+#include <linux/msm_rtb.h>
+#include <asm/tlbflush.h>
 
 #ifdef CONFIG_PID_IN_CONTEXTIDR
 static inline void contextidr_thread_switch(struct task_struct *next)
 {
+	pid_t pid = task_pid_nr(next);
 	asm(
 	"	msr	contextidr_el1, %0\n"
 	"	isb"
 	:
-	: "r" (task_pid_nr(next)));
+	: "r" (pid));
+	uncached_logk(LOGK_CTXID, (void *)(u64)pid);
+
 }
 #else
 static inline void contextidr_thread_switch(struct task_struct *next)
 {
+	uncached_logk(LOGK_CTXID, (void *)(u64)task_pid_nr(next));
 }
 #endif
 
@@ -48,7 +55,7 @@
  */
 static inline void cpu_set_reserved_ttbr0(void)
 {
-	unsigned long ttbr = page_to_phys(empty_zero_page);
+	unsigned long ttbr = __pa_symbol(empty_zero_page);
 
 	asm(
 	"	msr	ttbr0_el1, %0			// set TTBR0\n"
@@ -57,6 +64,13 @@
 	: "r" (ttbr));
 }
 
+static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
+{
+	BUG_ON(pgd == swapper_pg_dir);
+	cpu_set_reserved_ttbr0();
+	cpu_do_switch_mm(virt_to_phys(pgd),mm);
+}
+
 /*
  * TCR.T0SZ value to use when the ID map is active. Usually equals
  * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
@@ -73,7 +87,7 @@
 /*
  * Set TCR.T0SZ to its default value (based on VA_BITS)
  */
-static inline void cpu_set_default_tcr_t0sz(void)
+static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
 {
 	unsigned long tcr;
 
@@ -86,7 +100,62 @@
 	"	msr	tcr_el1, %0	;"
 	"	isb"
 	: "=&r" (tcr)
-	: "r"(TCR_T0SZ(VA_BITS)), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH));
+	: "r"(t0sz), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH));
+}
+
+#define cpu_set_default_tcr_t0sz()	__cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS))
+#define cpu_set_idmap_tcr_t0sz()	__cpu_set_tcr_t0sz(idmap_t0sz)
+
+/*
+ * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm.
+ *
+ * The idmap lives in the same VA range as userspace, but uses global entries
+ * and may use a different TCR_EL1.T0SZ. To avoid issues resulting from
+ * speculative TLB fetches, we must temporarily install the reserved page
+ * tables while we invalidate the TLBs and set up the correct TCR_EL1.T0SZ.
+ *
+ * If current is a not a user task, the mm covers the TTBR1_EL1 page tables,
+ * which should not be installed in TTBR0_EL1. In this case we can leave the
+ * reserved page tables in place.
+ */
+static inline void cpu_uninstall_idmap(void)
+{
+	struct mm_struct *mm = current->active_mm;
+
+	cpu_set_reserved_ttbr0();
+	local_flush_tlb_all();
+	cpu_set_default_tcr_t0sz();
+
+	if (mm != &init_mm && !system_uses_ttbr0_pan())
+		cpu_switch_mm(mm->pgd, mm);
+}
+
+static inline void cpu_install_idmap(void)
+{
+	cpu_set_reserved_ttbr0();
+	local_flush_tlb_all();
+	cpu_set_idmap_tcr_t0sz();
+
+	cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm);
+}
+
+/*
+ * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
+ * avoiding the possibility of conflicting TLB entries being allocated.
+ */
+static inline void cpu_replace_ttbr1(pgd_t *pgd)
+{
+	typedef void (ttbr_replace_func)(phys_addr_t);
+	extern ttbr_replace_func idmap_cpu_replace_ttbr1;
+	ttbr_replace_func *replace_phys;
+
+	phys_addr_t pgd_phys = virt_to_phys(pgd);
+
+	replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
+
+	cpu_install_idmap();
+	replace_phys(pgd_phys);
+	cpu_uninstall_idmap();
 }
 
 /*
@@ -103,35 +172,43 @@
 
 #define init_new_context(tsk,mm)	({ atomic64_set(&(mm)->context.id, 0); 0; })
 
-/*
- * This is called when "tsk" is about to enter lazy TLB mode.
- *
- * mm:  describes the currently active mm context
- * tsk: task which is entering lazy tlb
- * cpu: cpu number which is entering lazy tlb
- *
- * tsk->mm will be NULL
- */
-static inline void
-enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+static inline void update_saved_ttbr0(struct task_struct *tsk,
+				      struct mm_struct *mm)
+{
+	u64 ttbr;
+
+	if (!system_uses_ttbr0_pan())
+		return;
+
+	if (mm == &init_mm)
+		ttbr = __pa_symbol(empty_zero_page);
+	else
+		ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
+
+	WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
+}
+#else
+static inline void update_saved_ttbr0(struct task_struct *tsk,
+				      struct mm_struct *mm)
 {
 }
+#endif
 
+static inline void
+enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
 /*
- * This is the actual mm switch as far as the scheduler
- * is concerned.  No registers are touched.  We avoid
- * calling the CPU specific function when the mm hasn't
- * actually changed.
+	 * We don't actually care about the ttbr0 mapping, so point it at the
+	 * zero page.
  */
-static inline void
-switch_mm(struct mm_struct *prev, struct mm_struct *next,
-	  struct task_struct *tsk)
+	update_saved_ttbr0(tsk, &init_mm);
+}
+
+static inline void __switch_mm(struct mm_struct *next)
 {
 	unsigned int cpu = smp_processor_id();
 
-	if (prev == next)
-		return;
-
 	/*
 	 * init_mm.pgd does not contain any user mappings and it is always
 	 * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
@@ -144,7 +221,25 @@
 	check_and_switch_context(next, cpu);
 }
 
+static inline void
+switch_mm(struct mm_struct *prev, struct mm_struct *next,
+	  struct task_struct *tsk)
+{
+	if (prev != next)
+		__switch_mm(next);
+
+	/*
+	 * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
+	 * value may have not been initialised yet (activate_mm caller) or the
+	 * ASID has changed since the last run (following the context switch
+	 * of another thread of the same process).
+	 */
+	update_saved_ttbr0(tsk, next);
+}
+
 #define deactivate_mm(tsk,mm)	do { } while (0)
-#define activate_mm(prev,next)	switch_mm(prev, next, NULL)
+#define activate_mm(prev,next)	switch_mm(prev, next, current)
+
+void post_ttbr_update_workaround(void);
 
 #endif
diff -ruw linux-4.4.115/arch/arm64/include/asm/mmu.h linux-4.4.115-fbx/arch/arm64/include/asm/mmu.h
--- linux-4.4.115/arch/arm64/include/asm/mmu.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/mmu.h	2019-01-22 16:16:21.539228621 +0100
@@ -16,6 +16,15 @@
 #ifndef __ASM_MMU_H
 #define __ASM_MMU_H
 
+#define USER_ASID_FLAG	(UL(1) << 48)
+#define TTBR_ASID_MASK	(UL(0xffff) << 48)
+
+#ifndef __ASSEMBLY__
+#include <linux/smp.h>
+
+#include <asm/cpufeature.h>
+#include <asm/percpu.h>
+
 typedef struct {
 	atomic64_t	id;
 	void		*vdso;
@@ -28,6 +37,49 @@
  */
 #define ASID(mm)	((mm)->context.id.counter & 0xffff)
 
+static inline bool arm64_kernel_unmapped_at_el0(void)
+{
+	return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
+	       cpus_have_cap(ARM64_UNMAP_KERNEL_AT_EL0);
+}
+
+typedef void (*bp_hardening_cb_t)(void);
+
+struct bp_hardening_data {
+	int			hyp_vectors_slot;
+	bp_hardening_cb_t	fn;
+};
+
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
+
+DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
+
+static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
+{
+	return this_cpu_ptr(&bp_hardening_data);
+}
+
+static inline void arm64_apply_bp_hardening(void)
+{
+	struct bp_hardening_data *d;
+
+	if (!cpus_have_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
+		return;
+
+	d = arm64_get_bp_hardening_data();
+	if (d->fn)
+		d->fn();
+}
+#else
+static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
+{
+	return NULL;
+}
+
+static inline void arm64_apply_bp_hardening(void)	{ }
+#endif	/* CONFIG_HARDEN_BRANCH_PREDICTOR */
+
 extern void paging_init(void);
 extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
 extern void init_mem_pgprot(void);
@@ -35,5 +87,13 @@
 			       unsigned long virt, phys_addr_t size,
 			       pgprot_t prot);
 extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
+#ifdef CONFIG_MEMORY_HOTPLUG
+extern void hotplug_paging(phys_addr_t start, phys_addr_t size);
+#ifdef CONFIG_MEMORY_HOTREMOVE
+extern void remove_pagetable(unsigned long start,
+	unsigned long end, bool direct);
+#endif
+#endif
 
+#endif	/* !__ASSEMBLY__ */
 #endif
diff -ruw linux-4.4.115/arch/arm64/include/asm/module.h linux-4.4.115-fbx/arch/arm64/include/asm/module.h
--- linux-4.4.115/arch/arm64/include/asm/module.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/module.h	2019-01-22 16:16:21.539228621 +0100
@@ -17,7 +17,29 @@
 #define __ASM_MODULE_H
 
 #include <asm-generic/module.h>
+#include <asm/memory.h>
 
 #define MODULE_ARCH_VERMAGIC	"aarch64"
 
+#ifdef CONFIG_ARM64_MODULE_PLTS
+struct mod_arch_specific {
+	struct elf64_shdr	*plt;
+	int			plt_num_entries;
+	int			plt_max_entries;
+};
+#endif
+
+u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela,
+			  Elf64_Sym *sym);
+
+#ifdef CONFIG_RANDOMIZE_BASE
+#ifdef CONFIG_MODVERSIONS
+#define ARCH_RELOCATES_KCRCTAB
+#define reloc_start 		(kimage_vaddr - KIMAGE_VADDR)
+#endif
+extern u64 module_alloc_base;
+#else
+#define module_alloc_base	((u64)_etext - MODULES_VSIZE)
+#endif
+
 #endif /* __ASM_MODULE_H */
diff -ruw linux-4.4.115/arch/arm64/include/asm/page.h linux-4.4.115-fbx/arch/arm64/include/asm/page.h
--- linux-4.4.115/arch/arm64/include/asm/page.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/page.h	2019-01-22 16:16:21.539228621 +0100
@@ -19,6 +19,8 @@
 #ifndef __ASM_PAGE_H
 #define __ASM_PAGE_H
 
+#include <linux/const.h>
+
 /* PAGE_SHIFT determines the page size */
 /* CONT_SHIFT determines the number of pages which can be tracked together  */
 #ifdef CONFIG_ARM64_64K_PAGES
diff -ruw linux-4.4.115/arch/arm64/include/asm/pci.h linux-4.4.115-fbx/arch/arm64/include/asm/pci.h
--- linux-4.4.115/arch/arm64/include/asm/pci.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/pci.h	2019-01-22 16:16:21.543228657 +0100
@@ -41,3 +41,8 @@
 
 #endif  /* __KERNEL__ */
 #endif  /* __ASM_PCI_H */
+
+#ifdef CONFIG_PCI_MSM
+#define arch_setup_msi_irqs arch_setup_msi_irqs
+#define arch_teardown_msi_irqs arch_teardown_msi_irqs
+#endif
diff -ruw linux-4.4.115/arch/arm64/include/asm/percpu.h linux-4.4.115-fbx/arch/arm64/include/asm/percpu.h
--- linux-4.4.115/arch/arm64/include/asm/percpu.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/percpu.h	2019-10-29 09:26:22.997196856 +0100
@@ -16,6 +16,8 @@
 #ifndef __ASM_PERCPU_H
 #define __ASM_PERCPU_H
 
+#include <asm/stack_pointer.h>
+
 static inline void set_my_cpu_offset(unsigned long off)
 {
 	asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
diff -ruw linux-4.4.115/arch/arm64/include/asm/perf_event.h linux-4.4.115-fbx/arch/arm64/include/asm/perf_event.h
--- linux-4.4.115/arch/arm64/include/asm/perf_event.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/perf_event.h	2019-01-22 16:16:21.543228657 +0100
@@ -17,6 +17,8 @@
 #ifndef __ASM_PERF_EVENT_H
 #define __ASM_PERF_EVENT_H
 
+#include <asm/stack_pointer.h>
+
 #ifdef CONFIG_PERF_EVENTS
 struct pt_regs;
 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
@@ -31,4 +33,91 @@
 	(regs)->pstate = PSR_MODE_EL1h;	\
 }
 
+static inline u32 armv8pmu_pmcr_read_reg(void)
+{
+	u32 val;
+
+	asm volatile("mrs %0, pmcr_el0" : "=r" (val));
+	return val;
+}
+
+static inline u32 armv8pmu_pmccntr_read_reg(void)
+{
+	u32 val;
+
+	asm volatile("mrs %0, pmccntr_el0" : "=r" (val));
+	return val;
+}
+
+static inline u32 armv8pmu_pmxevcntr_read_reg(void)
+{
+	u32 val;
+
+	asm volatile("mrs %0, pmxevcntr_el0" : "=r" (val));
+	return val;
+}
+
+static inline u32 armv8pmu_pmovsclr_read_reg(void)
+{
+	u32 val;
+
+	asm volatile("mrs %0, pmovsclr_el0" : "=r" (val));
+	return val;
+}
+
+static inline void armv8pmu_pmcr_write_reg(u32 val)
+{
+	asm volatile("msr pmcr_el0, %0" :: "r" (val));
+}
+
+static inline void armv8pmu_pmselr_write_reg(u32 val)
+{
+	asm volatile("msr pmselr_el0, %0" :: "r" (val));
+}
+
+static inline void armv8pmu_pmccntr_write_reg(u32 val)
+{
+	asm volatile("msr pmccntr_el0, %0" :: "r" (val));
+}
+
+static inline void armv8pmu_pmxevcntr_write_reg(u32 val)
+{
+	asm volatile("msr pmxevcntr_el0, %0" :: "r" (val));
+}
+
+static inline void armv8pmu_pmxevtyper_write_reg(u32 val)
+{
+	asm volatile("msr pmxevtyper_el0, %0" :: "r" (val));
+}
+
+static inline void armv8pmu_pmcntenset_write_reg(u32 val)
+{
+	asm volatile("msr pmcntenset_el0, %0" :: "r" (val));
+}
+
+static inline void armv8pmu_pmcntenclr_write_reg(u32 val)
+{
+	asm volatile("msr pmcntenclr_el0, %0" :: "r" (val));
+}
+
+static inline void armv8pmu_pmintenset_write_reg(u32 val)
+{
+	asm volatile("msr pmintenset_el1, %0" :: "r" (val));
+}
+
+static inline void armv8pmu_pmintenclr_write_reg(u32 val)
+{
+	asm volatile("msr pmintenclr_el1, %0" :: "r" (val));
+}
+
+static inline void armv8pmu_pmovsclr_write_reg(u32 val)
+{
+	asm volatile("msr pmovsclr_el0, %0" :: "r" (val));
+}
+
+static inline void armv8pmu_pmuserenr_write_reg(u32 val)
+{
+	asm volatile("msr pmuserenr_el0, %0" :: "r" (val));
+}
+
 #endif
diff -ruw linux-4.4.115/arch/arm64/include/asm/pgalloc.h linux-4.4.115-fbx/arch/arm64/include/asm/pgalloc.h
--- linux-4.4.115/arch/arm64/include/asm/pgalloc.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/pgalloc.h	2019-01-22 16:16:21.543228657 +0100
@@ -42,11 +42,20 @@
 	free_page((unsigned long)pmd);
 }
 
-static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot)
 {
-	set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
+	set_pud(pud, __pud(pmd | prot));
 }
 
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+	__pud_populate(pud, __pa(pmd), PMD_TYPE_TABLE);
+}
+#else
+static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot)
+{
+	BUILD_BUG();
+}
 #endif	/* CONFIG_PGTABLE_LEVELS > 2 */
 
 #if CONFIG_PGTABLE_LEVELS > 3
@@ -62,11 +71,20 @@
 	free_page((unsigned long)pud);
 }
 
-static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot)
 {
-	set_pgd(pgd, __pgd(__pa(pud) | PUD_TYPE_TABLE));
+	set_pgd(pgdp, __pgd(pud | prot));
 }
 
+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+{
+	__pgd_populate(pgd, __pa(pud), PUD_TYPE_TABLE);
+}
+#else
+static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot)
+{
+	BUILD_BUG();
+}
 #endif	/* CONFIG_PGTABLE_LEVELS > 3 */
 
 extern pgd_t *pgd_alloc(struct mm_struct *mm);
diff -ruw linux-4.4.115/arch/arm64/include/asm/pgtable.h linux-4.4.115-fbx/arch/arm64/include/asm/pgtable.h
--- linux-4.4.115/arch/arm64/include/asm/pgtable.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/pgtable.h	2019-10-29 09:26:22.997196856 +0100
@@ -19,6 +19,7 @@
 #include <asm/bug.h>
 #include <asm/proc-fns.h>
 
+#include <asm/bug.h>
 #include <asm/memory.h>
 #include <asm/pgtable-hwdef.h>
 
@@ -36,19 +37,13 @@
  *
  * VMEMAP_SIZE: allows the whole linear region to be covered by a struct page array
  *	(rounded up to PUD_SIZE).
- * VMALLOC_START: beginning of the kernel VA space
+ * VMALLOC_START: beginning of the kernel vmalloc space
  * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
  *	fixed mappings and modules
  */
 #define VMEMMAP_SIZE		ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
 
-#ifndef CONFIG_KASAN
-#define VMALLOC_START		(VA_START)
-#else
-#include <asm/kasan.h>
-#define VMALLOC_START		(KASAN_SHADOW_END + SZ_64K)
-#endif
-
+#define VMALLOC_START		(MODULES_END)
 #define VMALLOC_END		(PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
 
 #define VMEMMAP_START		(VMALLOC_END + SZ_64K)
@@ -59,6 +54,7 @@
 
 #ifndef __ASSEMBLY__
 
+#include <asm/fixmap.h>
 #include <linux/mmdebug.h>
 
 extern void __pte_error(const char *file, int line, unsigned long val);
@@ -66,8 +62,16 @@
 extern void __pud_error(const char *file, int line, unsigned long val);
 extern void __pgd_error(const char *file, int line, unsigned long val);
 
-#define PROT_DEFAULT		(PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
-#define PROT_SECT_DEFAULT	(PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
+#define _PROT_DEFAULT		(PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
+#define _PROT_SECT_DEFAULT	(PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+#define PROT_DEFAULT		(_PROT_DEFAULT | PTE_NG)
+#define PROT_SECT_DEFAULT	(_PROT_SECT_DEFAULT | PMD_SECT_NG)
+#else
+#define PROT_DEFAULT		_PROT_DEFAULT
+#define PROT_SECT_DEFAULT	_PROT_SECT_DEFAULT
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
 
 #define PROT_DEVICE_nGnRnE	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
 #define PROT_DEVICE_nGnRE	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
@@ -80,6 +84,7 @@
 #define PROT_SECT_NORMAL_EXEC	(PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
 
 #define _PAGE_DEFAULT		(PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
+#define _HYP_PAGE_DEFAULT	(_PAGE_DEFAULT & ~PTE_NG)
 
 #define PAGE_KERNEL		__pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
 #define PAGE_KERNEL_RO		__pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
@@ -87,13 +92,13 @@
 #define PAGE_KERNEL_EXEC	__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
 #define PAGE_KERNEL_EXEC_CONT	__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
 
-#define PAGE_HYP		__pgprot(_PAGE_DEFAULT | PTE_HYP)
+#define PAGE_HYP		__pgprot(_HYP_PAGE_DEFAULT | PTE_HYP)
 #define PAGE_HYP_DEVICE		__pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
 
 #define PAGE_S2			__pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
 #define PAGE_S2_DEVICE		__pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
 
-#define PAGE_NONE		__pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
+#define PAGE_NONE		__pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_NG | PTE_PXN | PTE_UXN)
 #define PAGE_SHARED		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
 #define PAGE_SHARED_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
 #define PAGE_COPY		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
@@ -123,8 +128,8 @@
  * ZERO_PAGE is a global shared page that is always zero: used
  * for zero-mapped memory areas etc..
  */
-extern struct page *empty_zero_page;
-#define ZERO_PAGE(vaddr)	(empty_zero_page)
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
+#define ZERO_PAGE(vaddr)	phys_to_page(__pa_symbol(empty_zero_page))
 
 #define pte_ERROR(pte)		__pte_error(__FILE__, __LINE__, pte_val(pte))
 
@@ -136,16 +141,6 @@
 #define pte_clear(mm,addr,ptep)	set_pte(ptep, __pte(0))
 #define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
 
-/* Find an entry in the third-level page table. */
-#define pte_index(addr)		(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-
-#define pte_offset_kernel(dir,addr)	(pmd_page_vaddr(*(dir)) + pte_index(addr))
-
-#define pte_offset_map(dir,addr)	pte_offset_kernel((dir), (addr))
-#define pte_offset_map_nested(dir,addr)	pte_offset_kernel((dir), (addr))
-#define pte_unmap(pte)			do { } while (0)
-#define pte_unmap_nested(pte)		do { } while (0)
-
 /*
  * The following only work if pte_present(). Undefined behaviour otherwise.
  */
@@ -168,6 +163,16 @@
 #define pte_valid(pte)		(!!(pte_val(pte) & PTE_VALID))
 #define pte_valid_not_user(pte) \
 	((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
+#define pte_valid_young(pte) \
+	((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
+
+/*
+ * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
+ * so that we don't erroneously return false for pages that have been
+ * remapped as PROT_NONE but are yet to be flushed from the TLB.
+ */
+#define pte_accessible(mm, pte)	\
+	(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
 
 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
 {
@@ -218,7 +223,8 @@
 
 static inline pte_t pte_mkcont(pte_t pte)
 {
-	return set_pte_bit(pte, __pgprot(PTE_CONT));
+	pte = set_pte_bit(pte, __pgprot(PTE_CONT));
+	return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
 }
 
 static inline pte_t pte_mknoncont(pte_t pte)
@@ -226,8 +232,41 @@
 	return clear_pte_bit(pte, __pgprot(PTE_CONT));
 }
 
+static inline pmd_t pmd_mkcont(pmd_t pmd)
+{
+	return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
+}
+
 static inline void set_pte(pte_t *ptep, pte_t pte)
 {
+#ifdef CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE
+	pteval_t old = pte_val(*ptep);
+	pteval_t new = pte_val(pte);
+
+	/* Only problematic if valid -> valid */
+	if (!(old & new & PTE_VALID))
+		goto pte_ok;
+
+	/* Changing attributes should go via an invalid entry */
+	if (WARN_ON((old & PTE_ATTRINDX_MASK) != (new & PTE_ATTRINDX_MASK)))
+		goto pte_bad;
+
+	/* Change of OA is only an issue if one mapping is writable */
+	if (!(old & new & PTE_RDONLY) &&
+	    WARN_ON(pte_pfn(*ptep) != pte_pfn(pte)))
+		goto pte_bad;
+
+	goto pte_ok;
+
+pte_bad:
+	*ptep = __pte(0);
+	dsb(ishst);
+	asm("tlbi	vmalle1is");
+	dsb(ish);
+	isb();
+pte_ok:
+#endif
+
 	*ptep = pte;
 
 	/*
@@ -299,7 +338,7 @@
 /*
  * Hugetlb definitions.
  */
-#define HUGE_MAX_HSTATE		2
+#define HUGE_MAX_HSTATE		4
 #define HPAGE_SHIFT		PMD_SHIFT
 #define HPAGE_SIZE		(_AC(1, UL) << HPAGE_SHIFT)
 #define HPAGE_MASK		(~(HPAGE_SIZE - 1))
@@ -354,6 +393,7 @@
 #define pmd_mksplitting(pmd)	pte_pmd(pte_mkspecial(pmd_pte(pmd)))
 #define pmd_mkold(pmd)		pte_pmd(pte_mkold(pmd_pte(pmd)))
 #define pmd_mkwrite(pmd)	pte_pmd(pte_mkwrite(pmd_pte(pmd)))
+#define pmd_mkclean(pmd)       pte_pmd(pte_mkclean(pmd_pte(pmd)))
 #define pmd_mkdirty(pmd)	pte_pmd(pte_mkdirty(pmd_pte(pmd)))
 #define pmd_mkyoung(pmd)	pte_pmd(pte_mkyoung(pmd_pte(pmd)))
 #define pmd_mknotpresent(pmd)	(__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
@@ -425,13 +465,36 @@
 	set_pmd(pmdp, __pmd(0));
 }
 
-static inline pte_t *pmd_page_vaddr(pmd_t pmd)
+static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
+{
+	return pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK;
+}
+
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
 {
-	return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
+	return (unsigned long) __va(pmd_page_paddr(pmd));
 }
 
+/* Find an entry in the third-level page table. */
+#define pte_index(addr)		(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+
+#define pte_offset_phys(dir,addr)	(pmd_page_paddr(*(dir)) + pte_index(addr) * sizeof(pte_t))
+#define pte_offset_kernel(dir,addr)	((pte_t *)__va(pte_offset_phys((dir), (addr))))
+
+#define pte_offset_map(dir,addr)	pte_offset_kernel((dir), (addr))
+#define pte_offset_map_nested(dir,addr)	pte_offset_kernel((dir), (addr))
+#define pte_unmap(pte)			do { } while (0)
+#define pte_unmap_nested(pte)		do { } while (0)
+
+#define pte_set_fixmap(addr)		((pte_t *)set_fixmap_offset(FIX_PTE, addr))
+#define pte_set_fixmap_offset(pmd, addr)	pte_set_fixmap(pte_offset_phys(pmd, addr))
+#define pte_clear_fixmap()		clear_fixmap(FIX_PTE)
+
 #define pmd_page(pmd)		pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
 
+/* use ONLY for statically allocated translation tables */
+#define pte_offset_kimg(dir,addr)	((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
+
 /*
  * Conversion functions: convert a page and protection to a page entry,
  * and a page entry and page directory to the page they refer to.
@@ -458,21 +521,42 @@
 	set_pud(pudp, __pud(0));
 }
 
-static inline pmd_t *pud_page_vaddr(pud_t pud)
+static inline phys_addr_t pud_page_paddr(pud_t pud)
 {
-	return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
+	return pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK;
+}
+
+static inline unsigned long pud_page_vaddr(pud_t pud)
+{
+	return (unsigned long) __va(pud_page_paddr(pud));
 }
 
 /* Find an entry in the second-level page table. */
 #define pmd_index(addr)		(((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
 
-static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
-{
-	return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
-}
+#define pmd_offset_phys(dir, addr)	(pud_page_paddr(*(dir)) + pmd_index(addr) * sizeof(pmd_t))
+#define pmd_offset(dir, addr)		((pmd_t *)__va(pmd_offset_phys((dir), (addr))))
+
+#define pmd_set_fixmap(addr)		((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
+#define pmd_set_fixmap_offset(pud, addr)	pmd_set_fixmap(pmd_offset_phys(pud, addr))
+#define pmd_clear_fixmap()		clear_fixmap(FIX_PMD)
 
 #define pud_page(pud)		pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
 
+/* use ONLY for statically allocated translation tables */
+#define pmd_offset_kimg(dir,addr)	((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
+
+#else
+
+#define pud_page_paddr(pud)	({ BUILD_BUG(); 0; })
+
+/* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
+#define pmd_set_fixmap(addr)		NULL
+#define pmd_set_fixmap_offset(pudp, addr)	((pmd_t *)pudp)
+#define pmd_clear_fixmap()
+
+#define pmd_offset_kimg(dir,addr)	((pmd_t *)dir)
+
 #endif	/* CONFIG_PGTABLE_LEVELS > 2 */
 
 #if CONFIG_PGTABLE_LEVELS > 3
@@ -494,21 +578,42 @@
 	set_pgd(pgdp, __pgd(0));
 }
 
-static inline pud_t *pgd_page_vaddr(pgd_t pgd)
+static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
+{
+	return pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK;
+}
+
+static inline unsigned long pgd_page_vaddr(pgd_t pgd)
 {
-	return __va(pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK);
+	return (unsigned long) __va(pgd_page_paddr(pgd));
 }
 
 /* Find an entry in the frst-level page table. */
 #define pud_index(addr)		(((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
 
-static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
-{
-	return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(addr);
-}
+#define pud_offset_phys(dir, addr)	(pgd_page_paddr(*(dir)) + pud_index(addr) * sizeof(pud_t))
+#define pud_offset(dir, addr)		((pud_t *)__va(pud_offset_phys((dir), (addr))))
+
+#define pud_set_fixmap(addr)		((pud_t *)set_fixmap_offset(FIX_PUD, addr))
+#define pud_set_fixmap_offset(pgd, addr)	pud_set_fixmap(pud_offset_phys(pgd, addr))
+#define pud_clear_fixmap()		clear_fixmap(FIX_PUD)
 
 #define pgd_page(pgd)		pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
 
+/* use ONLY for statically allocated translation tables */
+#define pud_offset_kimg(dir,addr)	((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
+
+#else
+
+#define pgd_page_paddr(pgd)	({ BUILD_BUG(); 0;})
+
+/* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
+#define pud_set_fixmap(addr)		NULL
+#define pud_set_fixmap_offset(pgdp, addr)	((pud_t *)pgdp)
+#define pud_clear_fixmap()
+
+#define pud_offset_kimg(dir,addr)	((pud_t *)dir)
+
 #endif  /* CONFIG_PGTABLE_LEVELS > 3 */
 
 #define pgd_ERROR(pgd)		__pgd_error(__FILE__, __LINE__, pgd_val(pgd))
@@ -516,11 +621,16 @@
 /* to find an entry in a page-table-directory */
 #define pgd_index(addr)		(((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
 
-#define pgd_offset(mm, addr)	((mm)->pgd+pgd_index(addr))
+#define pgd_offset_raw(pgd, addr)	((pgd) + pgd_index(addr))
+
+#define pgd_offset(mm, addr)	(pgd_offset_raw((mm)->pgd, (addr)))
 
 /* to find an entry in a kernel page-table-directory */
 #define pgd_offset_k(addr)	pgd_offset(&init_mm, addr)
 
+#define pgd_set_fixmap(addr)	((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
+#define pgd_clear_fixmap()	clear_fixmap(FIX_PGD)
+
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
 	const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
@@ -649,6 +759,7 @@
 
 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
+extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
 
 /*
  * Encode and decode a swap entry:
@@ -681,7 +792,8 @@
 
 #include <asm-generic/pgtable.h>
 
-#define pgtable_cache_init() do { } while (0)
+void pgd_cache_init(void);
+#define pgtable_cache_init	pgd_cache_init
 
 /*
  * On AArch64, the cache coherency is handled via the set_pte_at() function.
diff -ruw linux-4.4.115/arch/arm64/include/asm/pgtable-hwdef.h linux-4.4.115-fbx/arch/arm64/include/asm/pgtable-hwdef.h
--- linux-4.4.115/arch/arm64/include/asm/pgtable-hwdef.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/pgtable-hwdef.h	2019-01-22 16:16:21.543228657 +0100
@@ -90,7 +90,23 @@
 /*
  * Contiguous page definitions.
  */
-#define CONT_PTES		(_AC(1, UL) << CONT_SHIFT)
+#ifdef CONFIG_ARM64_64K_PAGES
+#define CONT_PTE_SHIFT		5
+#define CONT_PMD_SHIFT		5
+#elif defined(CONFIG_ARM64_16K_PAGES)
+#define CONT_PTE_SHIFT		7
+#define CONT_PMD_SHIFT		5
+#else
+#define CONT_PTE_SHIFT		4
+#define CONT_PMD_SHIFT		4
+#endif
+
+#define CONT_PTES		(1 << CONT_PTE_SHIFT)
+#define CONT_PTE_SIZE		(CONT_PTES * PAGE_SIZE)
+#define CONT_PTE_MASK		(~(CONT_PTE_SIZE - 1))
+#define CONT_PMDS		(1 << CONT_PMD_SHIFT)
+#define CONT_PMD_SIZE		(CONT_PMDS * PMD_SIZE)
+#define CONT_PMD_MASK		(~(CONT_PMD_SIZE - 1))
 /* the the numerical offset of the PTE within a range of CONT_PTES */
 #define CONT_RANGE_OFFSET(addr) (((addr)>>PAGE_SHIFT)&(CONT_PTES-1))
 
@@ -208,6 +224,8 @@
 #define TCR_TG1_16K		(UL(1) << 30)
 #define TCR_TG1_4K		(UL(2) << 30)
 #define TCR_TG1_64K		(UL(3) << 30)
+
+#define TCR_A1			(UL(1) << 22)
 #define TCR_ASID16		(UL(1) << 36)
 #define TCR_TBI0		(UL(1) << 37)
 #define TCR_HA			(UL(1) << 39)
diff -ruw linux-4.4.115/arch/arm64/include/asm/processor.h linux-4.4.115-fbx/arch/arm64/include/asm/processor.h
--- linux-4.4.115/arch/arm64/include/asm/processor.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/processor.h	2019-01-22 16:16:21.543228657 +0100
@@ -29,8 +29,10 @@
 
 #include <linux/string.h>
 
+#include <asm/alternative.h>
 #include <asm/fpsimd.h>
 #include <asm/hw_breakpoint.h>
+#include <asm/lse.h>
 #include <asm/pgtable-hwdef.h>
 #include <asm/ptrace.h>
 #include <asm/types.h>
@@ -49,6 +51,9 @@
 #define ARCH_LOW_ADDRESS_LIMIT	(arm64_dma_phys_limit - 1)
 #endif /* __KERNEL__ */
 
+extern unsigned int boot_reason;
+extern unsigned int cold_boot;
+
 struct debug_info {
 	/* Have we suspended stepping by a debugger? */
 	int			suspended_step;
@@ -177,9 +182,11 @@
 }
 
 #define ARCH_HAS_SPINLOCK_PREFETCH
-static inline void spin_lock_prefetch(const void *x)
+static inline void spin_lock_prefetch(const void *ptr)
 {
-	prefetchw(x);
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+		     "prfm pstl1strm, %a0",
+		     "nop") : : "p" (ptr));
 }
 
 #define HAVE_ARCH_PICK_MMAP_LAYOUT
@@ -187,5 +194,6 @@
 #endif
 
 int cpu_enable_pan(void *__unused);
+int cpu_enable_uao(void *__unused);
 
 #endif /* __ASM_PROCESSOR_H */
diff -ruw linux-4.4.115/arch/arm64/include/asm/proc-fns.h linux-4.4.115-fbx/arch/arm64/include/asm/proc-fns.h
--- linux-4.4.115/arch/arm64/include/asm/proc-fns.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/proc-fns.h	2019-01-22 16:16:21.543228657 +0100
@@ -28,19 +28,17 @@
 struct mm_struct;
 struct cpu_suspend_ctx;
 
+extern void cpu_cache_off(void);
 extern void cpu_do_idle(void);
 extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
+extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
+void cpu_soft_restart(phys_addr_t cpu_reset,
+		unsigned long addr) __attribute__((noreturn));
 extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr);
 extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
 
 #include <asm/memory.h>
 
-#define cpu_switch_mm(pgd,mm)				\
-do {							\
-	BUG_ON(pgd == swapper_pg_dir);			\
-	cpu_do_switch_mm(virt_to_phys(pgd),mm);		\
-} while (0)
-
 #endif /* __ASSEMBLY__ */
 #endif /* __KERNEL__ */
 #endif /* __ASM_PROCFNS_H */
diff -ruw linux-4.4.115/arch/arm64/include/asm/ptrace.h linux-4.4.115-fbx/arch/arm64/include/asm/ptrace.h
--- linux-4.4.115/arch/arm64/include/asm/ptrace.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/ptrace.h	2019-01-22 16:16:21.543228657 +0100
@@ -121,6 +121,8 @@
 	u64 unused;	// maintain 16 byte alignment
 };
 
+#define MAX_REG_OFFSET offsetof(struct pt_regs, pstate)
+
 #define arch_has_single_step()	(1)
 
 #ifdef CONFIG_COMPAT
@@ -146,9 +148,57 @@
 #define fast_interrupts_enabled(regs) \
 	(!((regs)->pstate & PSR_F_BIT))
 
-#define user_stack_pointer(regs) \
+#define GET_USP(regs) \
 	(!compat_user_mode(regs) ? (regs)->sp : (regs)->compat_sp)
 
+#define SET_USP(ptregs, value) \
+	(!compat_user_mode(regs) ? ((regs)->sp = value) : ((regs)->compat_sp = value))
+
+extern int regs_query_register_offset(const char *name);
+extern const char *regs_query_register_name(unsigned int offset);
+extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
+					       unsigned int n);
+
+/**
+ * regs_get_register() - get register value from its offset
+ * @regs:	pt_regs from which register value is gotten
+ * @offset:	offset of the register.
+ *
+ * regs_get_register returns the value of a register whose offset from @regs.
+ * The @offset is the offset of the register in struct pt_regs.
+ * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
+ */
+static inline u64 regs_get_register(struct pt_regs *regs, unsigned int offset)
+{
+	u64 val = 0;
+
+	offset >>= 3;
+	switch (offset) {
+	case 0 ... 30:
+		val = regs->regs[offset];
+		break;
+	case offsetof(struct pt_regs, sp) >> 3:
+		val = regs->sp;
+		break;
+	case offsetof(struct pt_regs, pc) >> 3:
+		val = regs->pc;
+		break;
+	case offsetof(struct pt_regs, pstate) >> 3:
+		val = regs->pstate;
+		break;
+	default:
+		val = 0;
+	}
+
+	return val;
+}
+
+/* Valid only for Kernel mode traps. */
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+	return regs->sp;
+}
+
 static inline unsigned long regs_return_value(struct pt_regs *regs)
 {
 	return regs->regs[0];
@@ -158,8 +208,15 @@
 struct task_struct;
 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task);
 
-#define instruction_pointer(regs)	((unsigned long)(regs)->pc)
+#define GET_IP(regs)		((unsigned long)(regs)->pc)
+#define SET_IP(regs, value)	((regs)->pc = ((u64) (value)))
+
+#define GET_FP(ptregs)		((unsigned long)(ptregs)->regs[29])
+#define SET_FP(ptregs, value)	((ptregs)->regs[29] = ((u64) (value)))
+
+#include <asm-generic/ptrace.h>
 
+#undef profile_pc
 extern unsigned long profile_pc(struct pt_regs *regs);
 
 #endif /* __ASSEMBLY__ */
diff -ruw linux-4.4.115/arch/arm64/include/asm/shmparam.h linux-4.4.115-fbx/arch/arm64/include/asm/shmparam.h
--- linux-4.4.115/arch/arm64/include/asm/shmparam.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/shmparam.h	2019-01-22 16:16:21.543228657 +0100
@@ -21,7 +21,7 @@
  * alignment value. Since we don't have aliasing D-caches, the rest of
  * the time we can safely use PAGE_SIZE.
  */
-#define COMPAT_SHMLBA	0x4000
+#define COMPAT_SHMLBA	(4 * PAGE_SIZE)
 
 #include <asm-generic/shmparam.h>
 
diff -ruw linux-4.4.115/arch/arm64/include/asm/signal32.h linux-4.4.115-fbx/arch/arm64/include/asm/signal32.h
--- linux-4.4.115/arch/arm64/include/asm/signal32.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/signal32.h	2019-01-22 16:16:21.543228657 +0100
@@ -22,8 +22,6 @@
 
 #define AARCH32_KERN_SIGRET_CODE_OFFSET	0x500
 
-extern const compat_ulong_t aarch32_sigret_code[6];
-
 int compat_setup_frame(int usig, struct ksignal *ksig, sigset_t *set,
 		       struct pt_regs *regs);
 int compat_setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
diff -ruw linux-4.4.115/arch/arm64/include/asm/smp.h linux-4.4.115-fbx/arch/arm64/include/asm/smp.h
--- linux-4.4.115/arch/arm64/include/asm/smp.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/smp.h	2019-01-22 16:16:21.543228657 +0100
@@ -16,11 +16,22 @@
 #ifndef __ASM_SMP_H
 #define __ASM_SMP_H
 
+#include <asm/percpu.h>
+
 #include <linux/threads.h>
 #include <linux/cpumask.h>
 #include <linux/thread_info.h>
 
-#define raw_smp_processor_id() (current_thread_info()->cpu)
+DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
+
+/*
+ * We don't use this_cpu_read(cpu_number) as that has implicit writes to
+ * preempt_count, and associated (compiler) barriers, that we'd like to avoid
+ * the expense of. If we're preemptible, the value can be stale at use anyway.
+ * And we can't use this_cpu_ptr() either, as that winds up recursing back
+ * here under CONFIG_DEBUG_PREEMPT=y.
+ */
+#define raw_smp_processor_id() (*raw_cpu_ptr(&cpu_number))
 
 struct seq_file;
 
@@ -57,6 +68,9 @@
  */
 struct secondary_data {
 	void *stack;
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+	struct task_struct *task;
+#endif
 };
 extern struct secondary_data secondary_data;
 extern void secondary_entry(void);
@@ -64,6 +78,15 @@
 extern void arch_send_call_function_single_ipi(int cpu);
 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
 
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
+extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
+#else
+static inline void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
+{
+	BUILD_BUG();
+}
+#endif
+
 extern int __cpu_disable(void);
 
 extern void __cpu_die(unsigned int cpu);
diff -ruw linux-4.4.115/arch/arm64/include/asm/spinlock.h linux-4.4.115-fbx/arch/arm64/include/asm/spinlock.h
--- linux-4.4.115/arch/arm64/include/asm/spinlock.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/spinlock.h	2019-10-29 09:26:22.997196856 +0100
@@ -26,9 +26,32 @@
  * The memory barriers are implicit with the load-acquire and store-release
  * instructions.
  */
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+	unsigned int tmp;
+	arch_spinlock_t lockval;
 
-#define arch_spin_unlock_wait(lock) \
-	do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
+	asm volatile(
+"	sevl\n"
+"1:	wfe\n"
+"2:	ldaxr	%w0, %2\n"
+"	eor	%w1, %w0, %w0, ror #16\n"
+"	cbnz	%w1, 1b\n"
+	/* Serialise against any concurrent lockers */
+	ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+"	stxr	%w1, %w0, %2\n"
+"	nop\n"
+"	nop\n",
+	/* LSE atomics */
+"	mov	%w1, %w0\n"
+"	cas	%w0, %w0, %2\n"
+"	eor	%w1, %w1, %w0\n")
+"	cbnz	%w1, 2b\n"
+	: "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
+	:
+	: "memory");
+}
 
 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
 
@@ -80,7 +103,6 @@
 
 	asm volatile(ARM64_LSE_ATOMIC_INSN(
 	/* LL/SC */
-	"	prfm	pstl1strm, %2\n"
 	"1:	ldaxr	%w0, %2\n"
 	"	eor	%w1, %w0, %w0, ror #16\n"
 	"	cbnz	%w1, 2f\n"
diff -ruw linux-4.4.115/arch/arm64/include/asm/stacktrace.h linux-4.4.115-fbx/arch/arm64/include/asm/stacktrace.h
--- linux-4.4.115/arch/arm64/include/asm/stacktrace.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/stacktrace.h	2019-01-22 16:16:21.543228657 +0100
@@ -16,14 +16,19 @@
 #ifndef __ASM_STACKTRACE_H
 #define __ASM_STACKTRACE_H
 
+struct task_struct;
+
 struct stackframe {
 	unsigned long fp;
 	unsigned long sp;
 	unsigned long pc;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	unsigned int graph;
+#endif
 };
 
-extern int unwind_frame(struct stackframe *frame);
-extern void walk_stackframe(struct stackframe *frame,
+extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
+extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
 			    int (*fn)(struct stackframe *, void *), void *data);
 
 #endif	/* __ASM_STACKTRACE_H */
diff -ruw linux-4.4.115/arch/arm64/include/asm/suspend.h linux-4.4.115-fbx/arch/arm64/include/asm/suspend.h
--- linux-4.4.115/arch/arm64/include/asm/suspend.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/suspend.h	2019-01-22 16:16:21.543228657 +0100
@@ -1,7 +1,8 @@
 #ifndef __ASM_SUSPEND_H
 #define __ASM_SUSPEND_H
 
-#define NR_CTX_REGS 11
+#define NR_CTX_REGS 12
+#define NR_CALLEE_SAVED_REGS 12
 
 /*
  * struct cpu_suspend_ctx must be 16-byte aligned since it is allocated on
@@ -16,11 +17,34 @@
 	u64 sp;
 } __aligned(16);
 
-struct sleep_save_sp {
-	phys_addr_t *save_ptr_stash;
-	phys_addr_t save_ptr_stash_phys;
+/*
+ * Memory to save the cpu state is allocated on the stack by
+ * __cpu_suspend_enter()'s caller, and populated by __cpu_suspend_enter().
+ * This data must survive until cpu_resume() is called.
+ *
+ * This struct desribes the size and the layout of the saved cpu state.
+ * The layout of the callee_saved_regs is defined by the implementation
+ * of __cpu_suspend_enter(), and cpu_resume(). This struct must be passed
+ * in by the caller as __cpu_suspend_enter()'s stack-frame is gone once it
+ * returns, and the data would be subsequently corrupted by the call to the
+ * finisher.
+ */
+struct sleep_stack_data {
+	struct cpu_suspend_ctx	system_regs;
+	unsigned long		callee_saved_regs[NR_CALLEE_SAVED_REGS];
 };
 
+extern unsigned long *sleep_save_stash;
+
 extern int cpu_suspend(unsigned long arg, int (*fn)(unsigned long));
 extern void cpu_resume(void);
+int __cpu_suspend_enter(struct sleep_stack_data *state);
+void __cpu_suspend_exit(void);
+void _cpu_resume(void);
+
+int swsusp_arch_suspend(void);
+int swsusp_arch_resume(void);
+int arch_hibernation_header_save(void *addr, unsigned int max_size);
+int arch_hibernation_header_restore(void *addr);
+
 #endif
diff -ruw linux-4.4.115/arch/arm64/include/asm/sysreg.h linux-4.4.115-fbx/arch/arm64/include/asm/sysreg.h
--- linux-4.4.115/arch/arm64/include/asm/sysreg.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/sysreg.h	2019-10-29 09:26:23.001196895 +0100
@@ -20,6 +20,8 @@
 #ifndef __ASM_SYSREG_H
 #define __ASM_SYSREG_H
 
+#include <linux/stringify.h>
+
 #include <asm/opcodes.h>
 
 /*
@@ -70,20 +72,35 @@
 
 #define SYS_ID_AA64MMFR0_EL1		sys_reg(3, 0, 0, 7, 0)
 #define SYS_ID_AA64MMFR1_EL1		sys_reg(3, 0, 0, 7, 1)
+#define SYS_ID_AA64MMFR2_EL1		sys_reg(3, 0, 0, 7, 2)
 
 #define SYS_CNTFRQ_EL0			sys_reg(3, 3, 14, 0, 0)
 #define SYS_CTR_EL0			sys_reg(3, 3, 0, 0, 1)
 #define SYS_DCZID_EL0			sys_reg(3, 3, 0, 0, 7)
 
 #define REG_PSTATE_PAN_IMM		sys_reg(0, 0, 4, 0, 4)
+#define REG_PSTATE_UAO_IMM		sys_reg(0, 0, 4, 0, 3)
 
 #define SET_PSTATE_PAN(x) __inst_arm(0xd5000000 | REG_PSTATE_PAN_IMM |\
 				     (!!x)<<8 | 0x1f)
+#define SET_PSTATE_UAO(x) __inst_arm(0xd5000000 | REG_PSTATE_UAO_IMM |\
+				     (!!x)<<8 | 0x1f)
 
-/* SCTLR_EL1 */
-#define SCTLR_EL1_CP15BEN	(0x1 << 5)
-#define SCTLR_EL1_SED		(0x1 << 8)
-#define SCTLR_EL1_SPAN		(0x1 << 23)
+/* Common SCTLR_ELx flags. */
+#define SCTLR_ELx_EE    (1 << 25)
+#define SCTLR_ELx_I	(1 << 12)
+#define SCTLR_ELx_SA	(1 << 3)
+#define SCTLR_ELx_C	(1 << 2)
+#define SCTLR_ELx_A	(1 << 1)
+#define SCTLR_ELx_M	1
+
+#define SCTLR_ELx_FLAGS	(SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
+			 SCTLR_ELx_SA | SCTLR_ELx_I)
+
+/* SCTLR_EL1 specific flags. */
+#define SCTLR_EL1_SPAN		(1 << 23)
+#define SCTLR_EL1_SED		(1 << 8)
+#define SCTLR_EL1_CP15BEN	(1 << 5)
 
 
 /* id_aa64isar0 */
@@ -95,6 +112,7 @@
 #define ID_AA64ISAR0_AES_SHIFT		4
 
 /* id_aa64pfr0 */
+#define ID_AA64PFR0_CSV2_SHIFT		56
 #define ID_AA64PFR0_GIC_SHIFT		24
 #define ID_AA64PFR0_ASIMD_SHIFT		20
 #define ID_AA64PFR0_FP_SHIFT		16
@@ -135,6 +153,9 @@
 #define ID_AA64MMFR1_VMIDBITS_SHIFT	4
 #define ID_AA64MMFR1_HADBS_SHIFT	0
 
+/* id_aa64mmfr2 */
+#define ID_AA64MMFR2_UAO_SHIFT		4
+
 /* id_aa64dfr0 */
 #define ID_AA64DFR0_CTX_CMPS_SHIFT	28
 #define ID_AA64DFR0_WRPS_SHIFT		20
@@ -194,32 +215,34 @@
 #ifdef __ASSEMBLY__
 
 	.irp	num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
-	.equ	__reg_num_x\num, \num
+	.equ	.L__reg_num_x\num, \num
 	.endr
-	.equ	__reg_num_xzr, 31
+	.equ	.L__reg_num_xzr, 31
 
 	.macro	mrs_s, rt, sreg
-	.inst	0xd5200000|(\sreg)|(__reg_num_\rt)
+	.inst	0xd5200000|(\sreg)|(.L__reg_num_\rt)
 	.endm
 
 	.macro	msr_s, sreg, rt
-	.inst	0xd5000000|(\sreg)|(__reg_num_\rt)
+	.inst	0xd5000000|(\sreg)|(.L__reg_num_\rt)
 	.endm
 
 #else
 
+#include <linux/types.h>
+
 asm(
 "	.irp	num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n"
-"	.equ	__reg_num_x\\num, \\num\n"
+"	.equ	.L__reg_num_x\\num, \\num\n"
 "	.endr\n"
-"	.equ	__reg_num_xzr, 31\n"
+"	.equ	.L__reg_num_xzr, 31\n"
 "\n"
 "	.macro	mrs_s, rt, sreg\n"
-"	.inst	0xd5200000|(\\sreg)|(__reg_num_\\rt)\n"
+"	.inst	0xd5200000|(\\sreg)|(.L__reg_num_\\rt)\n"
 "	.endm\n"
 "\n"
 "	.macro	msr_s, sreg, rt\n"
-"	.inst	0xd5000000|(\\sreg)|(__reg_num_\\rt)\n"
+"	.inst	0xd5000000|(\\sreg)|(.L__reg_num_\\rt)\n"
 "	.endm\n"
 );
 
@@ -232,6 +255,23 @@
 	val |= set;
 	asm volatile("msr sctlr_el1, %0" : : "r" (val));
 }
+
+/*
+ * Unlike read_cpuid, calls to read_sysreg are never expected to be
+ * optimized away or replaced with synthetic values.
+ */
+#define read_sysreg(r) ({					\
+	u64 __val;						\
+	asm volatile("mrs %0, " __stringify(r) : "=r" (__val));	\
+	__val;							\
+})
+
+#define write_sysreg(v, r) do {					\
+	u64 __val = (u64)v;					\
+	asm volatile("msr " __stringify(r) ", %0"		\
+		     : : "r" (__val));				\
+} while (0)
+
 #endif
 
 #endif	/* __ASM_SYSREG_H */
diff -ruw linux-4.4.115/arch/arm64/include/asm/system_misc.h linux-4.4.115-fbx/arch/arm64/include/asm/system_misc.h
--- linux-4.4.115/arch/arm64/include/asm/system_misc.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/system_misc.h	2019-01-22 16:16:21.543228657 +0100
@@ -44,6 +44,7 @@
 extern void __show_regs(struct pt_regs *);
 
 extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
+extern char* (*arch_read_hardware_id)(void);
 
 #define show_unhandled_signals_ratelimited()				\
 ({									\
diff -ruw linux-4.4.115/arch/arm64/include/asm/thread_info.h linux-4.4.115-fbx/arch/arm64/include/asm/thread_info.h
--- linux-4.4.115/arch/arm64/include/asm/thread_info.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/thread_info.h	2019-10-29 09:26:23.001196895 +0100
@@ -36,22 +36,36 @@
 
 struct task_struct;
 
+#include <asm/stack_pointer.h>
 #include <asm/types.h>
 
 typedef unsigned long mm_segment_t;
 
 /*
  * low level task data that entry.S needs immediate access to.
- * __switch_to() assumes cpu_context follows immediately after cpu_domain.
  */
 struct thread_info {
 	unsigned long		flags;		/* low level flags */
 	mm_segment_t		addr_limit;	/* address limit */
+#ifndef CONFIG_THREAD_INFO_IN_TASK
 	struct task_struct	*task;		/* main task structure */
+#endif
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+	u64			ttbr0;		/* saved TTBR0_EL1 */
+#endif
 	int			preempt_count;	/* 0 => preemptable, <0 => bug */
+#ifndef CONFIG_THREAD_INFO_IN_TASK
 	int			cpu;		/* cpu */
+#endif
 };
 
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+#define INIT_THREAD_INFO(tsk)						\
+{									\
+	.preempt_count	= INIT_PREEMPT_COUNT,				\
+	.addr_limit	= KERNEL_DS,					\
+}
+#else
 #define INIT_THREAD_INFO(tsk)						\
 {									\
 	.task		= &tsk,						\
@@ -60,25 +74,28 @@
 	.addr_limit	= KERNEL_DS,					\
 }
 
-#define init_thread_info	(init_thread_union.thread_info)
-#define init_stack		(init_thread_union.stack)
-
-/*
- * how to get the current stack pointer from C
- */
-register unsigned long current_stack_pointer asm ("sp");
-
 /*
  * how to get the thread information struct from C
  */
 static inline struct thread_info *current_thread_info(void) __attribute_const__;
 
+/*
+ * struct thread_info can be accessed directly via sp_el0.
+ */
 static inline struct thread_info *current_thread_info(void)
 {
-	return (struct thread_info *)
-		(current_stack_pointer & ~(THREAD_SIZE - 1));
+	unsigned long sp_el0;
+
+	asm ("mrs %0, sp_el0" : "=r" (sp_el0));
+
+	return (struct thread_info *)sp_el0;
 }
 
+#define init_thread_info	(init_thread_union.thread_info)
+#endif
+
+#define init_stack		(init_thread_union.stack)
+
 #define thread_saved_pc(tsk)	\
 	((unsigned long)(tsk->thread.cpu_context.pc))
 #define thread_saved_sp(tsk)	\
@@ -113,6 +130,7 @@
 #define TIF_RESTORE_SIGMASK	20
 #define TIF_SINGLESTEP		21
 #define TIF_32BIT		22	/* 32bit process */
+#define TIF_MM_RELEASED		24
 
 #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
diff -ruw linux-4.4.115/arch/arm64/include/asm/tlbflush.h linux-4.4.115-fbx/arch/arm64/include/asm/tlbflush.h
--- linux-4.4.115/arch/arm64/include/asm/tlbflush.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/tlbflush.h	2019-01-22 16:16:21.543228657 +0100
@@ -23,6 +23,30 @@
 
 #include <linux/sched.h>
 #include <asm/cputype.h>
+#include <asm/mmu.h>
+
+/*
+ * Raw TLBI operations.
+ *
+ * Where necessary, use the __tlbi() macro to avoid asm()
+ * boilerplate. Drivers and most kernel code should use the TLB
+ * management routines in preference to the macro below.
+ *
+ * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending
+ * on whether a particular TLBI operation takes an argument or
+ * not. The macros handles invoking the asm with or without the
+ * register argument as appropriate.
+ */
+#define __TLBI_0(op, arg)		asm ("tlbi " #op)
+#define __TLBI_1(op, arg)		asm ("tlbi " #op ", %0" : : "r" (arg))
+#define __TLBI_N(op, arg, n, ...)	__TLBI_##n(op, arg)
+
+#define __tlbi(op, ...)		__TLBI_N(op, ##__VA_ARGS__, 1, 0)
+
+#define __tlbi_user(op, arg) do {						\
+	if (arm64_kernel_unmapped_at_el0())					\
+		__tlbi(op, (arg) | USER_ASID_FLAG);				\
+} while (0)
 
 /*
  *	TLB Management
@@ -66,7 +90,7 @@
 static inline void local_flush_tlb_all(void)
 {
 	dsb(nshst);
-	asm("tlbi	vmalle1");
+	__tlbi(vmalle1);
 	dsb(nsh);
 	isb();
 }
@@ -74,7 +98,7 @@
 static inline void flush_tlb_all(void)
 {
 	dsb(ishst);
-	asm("tlbi	vmalle1is");
+	__tlbi(vmalle1is);
 	dsb(ish);
 	isb();
 }
@@ -84,7 +108,8 @@
 	unsigned long asid = ASID(mm) << 48;
 
 	dsb(ishst);
-	asm("tlbi	aside1is, %0" : : "r" (asid));
+	__tlbi(aside1is, asid);
+	__tlbi_user(aside1is, asid);
 	dsb(ish);
 }
 
@@ -94,7 +119,8 @@
 	unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48);
 
 	dsb(ishst);
-	asm("tlbi	vale1is, %0" : : "r" (addr));
+	__tlbi(vale1is, addr);
+	__tlbi_user(vale1is, addr);
 	dsb(ish);
 }
 
@@ -121,10 +147,13 @@
 
 	dsb(ishst);
 	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
-		if (last_level)
-			asm("tlbi vale1is, %0" : : "r"(addr));
-		else
-			asm("tlbi vae1is, %0" : : "r"(addr));
+		if (last_level) {
+			__tlbi(vale1is, addr);
+			__tlbi_user(vale1is, addr);
+		} else {
+			__tlbi(vae1is, addr);
+			__tlbi_user(vae1is, addr);
+		}
 	}
 	dsb(ish);
 }
@@ -149,7 +178,7 @@
 
 	dsb(ishst);
 	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
-		asm("tlbi vaae1is, %0" : : "r"(addr));
+		__tlbi(vaae1is, addr);
 	dsb(ish);
 	isb();
 }
@@ -163,7 +192,8 @@
 {
 	unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
 
-	asm("tlbi	vae1is, %0" : : "r" (addr));
+	__tlbi(vae1is, addr);
+	__tlbi_user(vae1is, addr);
 	dsb(ish);
 }
 
diff -ruw linux-4.4.115/arch/arm64/include/asm/topology.h linux-4.4.115-fbx/arch/arm64/include/asm/topology.h
--- linux-4.4.115/arch/arm64/include/asm/topology.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/topology.h	2019-01-22 16:16:21.543228657 +0100
@@ -21,6 +21,16 @@
 void init_cpu_topology(void);
 void store_cpu_topology(unsigned int cpuid);
 const struct cpumask *cpu_coregroup_mask(int cpu);
+unsigned long arch_get_cpu_efficiency(int cpu);
+
+struct sched_domain;
+#ifdef CONFIG_CPU_FREQ
+#define arch_scale_freq_capacity cpufreq_scale_freq_capacity
+extern unsigned long cpufreq_scale_freq_capacity(struct sched_domain *sd, int cpu);
+extern unsigned long cpufreq_scale_max_freq_capacity(int cpu);
+#endif
+#define arch_scale_cpu_capacity scale_cpu_capacity
+extern unsigned long scale_cpu_capacity(struct sched_domain *sd, int cpu);
 
 #include <asm-generic/topology.h>
 
diff -ruw linux-4.4.115/arch/arm64/include/asm/traps.h linux-4.4.115-fbx/arch/arm64/include/asm/traps.h
--- linux-4.4.115/arch/arm64/include/asm/traps.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/traps.h	2019-10-29 09:26:23.001196895 +0100
@@ -34,7 +34,6 @@
 void register_undef_hook(struct undef_hook *hook);
 void unregister_undef_hook(struct undef_hook *hook);
 
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 static inline int __in_irqentry_text(unsigned long ptr)
 {
 	extern char __irqentry_text_start[];
@@ -43,12 +42,6 @@
 	return ptr >= (unsigned long)&__irqentry_text_start &&
 	       ptr < (unsigned long)&__irqentry_text_end;
 }
-#else
-static inline int __in_irqentry_text(unsigned long ptr)
-{
-	return 0;
-}
-#endif
 
 static inline int in_exception_text(unsigned long ptr)
 {
@@ -62,4 +55,5 @@
 	return in ? : __in_irqentry_text(ptr);
 }
 
+static inline void get_pct_hook_init(void) {}
 #endif
diff -ruw linux-4.4.115/arch/arm64/include/asm/uaccess.h linux-4.4.115-fbx/arch/arm64/include/asm/uaccess.h
--- linux-4.4.115/arch/arm64/include/asm/uaccess.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/uaccess.h	2019-10-29 09:26:23.001196895 +0100
@@ -18,6 +18,13 @@
 #ifndef __ASM_UACCESS_H
 #define __ASM_UACCESS_H
 
+#include <asm/alternative.h>
+#include <asm/kernel-pgtable.h>
+#include <asm/mmu.h>
+#include <asm/sysreg.h>
+
+#ifndef __ASSEMBLY__
+
 /*
  * User space memory access functions
  */
@@ -25,10 +32,8 @@
 #include <linux/string.h>
 #include <linux/thread_info.h>
 
-#include <asm/alternative.h>
 #include <asm/cpufeature.h>
 #include <asm/ptrace.h>
-#include <asm/sysreg.h>
 #include <asm/errno.h>
 #include <asm/memory.h>
 #include <asm/compiler.h>
@@ -37,11 +42,11 @@
 #define VERIFY_WRITE 1
 
 /*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue.  No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
+ * The exception table consists of pairs of relative offsets: the first
+ * is the relative offset to an instruction that is allowed to fault,
+ * and the second is the relative offset at which the program should
+ * continue. No registers are modified, so it is entirely up to the
+ * continuation code to figure out what to do.
  *
  * All the routines below use bits of fixup code that are out of line
  * with the main instruction path.  This means when everything is well,
@@ -51,9 +56,11 @@
 
 struct exception_table_entry
 {
-	unsigned long insn, fixup;
+	int insn, fixup;
 };
 
+#define ARCH_HAS_RELATIVE_EXTABLE
+
 extern int fixup_exception(struct pt_regs *regs);
 
 #define KERNEL_DS	(-1UL)
@@ -65,6 +72,16 @@
 static inline void set_fs(mm_segment_t fs)
 {
 	current_thread_info()->addr_limit = fs;
+
+	/*
+	 * Enable/disable UAO so that copy_to_user() etc can access
+	 * kernel memory with the unprivileged instructions.
+	 */
+	if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS)
+		asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
+	else
+		asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO,
+				CONFIG_ARM64_UAO));
 }
 
 #define segment_eq(a, b)	((a) == (b))
@@ -114,6 +131,121 @@
 #define access_ok(type, addr, size)	__range_ok(addr, size)
 #define user_addr_max			get_fs
 
+#define _ASM_EXTABLE(from, to)						\
+	"	.pushsection	__ex_table, \"a\"\n"			\
+	"	.align		3\n"					\
+	"	.long		(" #from " - .), (" #to " - .)\n"	\
+	"	.popsection\n"
+
+/*
+ * User access enabling/disabling.
+ */
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+static inline void __uaccess_ttbr0_disable(void)
+{
+	unsigned long flags, ttbr;
+
+	local_irq_save(flags);
+	ttbr = read_sysreg(ttbr1_el1);
+	ttbr &= ~TTBR_ASID_MASK;
+	/* reserved_ttbr0 placed at the end of swapper_pg_dir */
+	write_sysreg(ttbr + SWAPPER_DIR_SIZE, ttbr0_el1);
+	isb();
+	/* Set reserved ASID */
+	write_sysreg(ttbr, ttbr1_el1);
+	isb();
+	local_irq_restore(flags);
+}
+
+static inline void __uaccess_ttbr0_enable(void)
+{
+	unsigned long flags, ttbr0, ttbr1;
+
+	/*
+	 * Disable interrupts to avoid preemption between reading the 'ttbr0'
+	 * variable and the MSR. A context switch could trigger an ASID
+	 * roll-over and an update of 'ttbr0'.
+	 */
+	local_irq_save(flags);
+	ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
+
+	/* Restore active ASID */
+	ttbr1 = read_sysreg(ttbr1_el1);
+	ttbr1 &= ~TTBR_ASID_MASK;		/* safety measure */
+	ttbr1 |= ttbr0 & TTBR_ASID_MASK;
+	write_sysreg(ttbr1, ttbr1_el1);
+	isb();
+
+	/* Restore user page table */
+	write_sysreg(ttbr0, ttbr0_el1);
+	isb();
+	local_irq_restore(flags);
+}
+
+static inline bool uaccess_ttbr0_disable(void)
+{
+	if (!system_uses_ttbr0_pan())
+		return false;
+	__uaccess_ttbr0_disable();
+	return true;
+}
+
+static inline bool uaccess_ttbr0_enable(void)
+{
+	if (!system_uses_ttbr0_pan())
+		return false;
+	__uaccess_ttbr0_enable();
+	return true;
+}
+#else
+static inline bool uaccess_ttbr0_disable(void)
+{
+	return false;
+}
+
+static inline bool uaccess_ttbr0_enable(void)
+{
+	return false;
+}
+#endif
+
+#define __uaccess_disable(alt)						\
+do {									\
+	if (!uaccess_ttbr0_disable())					\
+		asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt,		\
+				CONFIG_ARM64_PAN));			\
+} while (0)
+
+#define __uaccess_enable(alt)						\
+do {									\
+	if (!uaccess_ttbr0_enable())					\
+		asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt,		\
+				CONFIG_ARM64_PAN));			\
+} while (0)
+
+static inline void uaccess_disable(void)
+{
+	__uaccess_disable(ARM64_HAS_PAN);
+}
+
+static inline void uaccess_enable(void)
+{
+	__uaccess_enable(ARM64_HAS_PAN);
+}
+
+/*
+ * These functions are no-ops when UAO is present.
+ */
+static inline void uaccess_disable_not_uao(void)
+{
+	__uaccess_disable(ARM64_ALT_PAN_NOT_UAO);
+}
+
+static inline void uaccess_enable_not_uao(void)
+{
+	__uaccess_enable(ARM64_ALT_PAN_NOT_UAO);
+}
+
 /*
  * The "__xxx" versions of the user access functions do not verify the address
  * space - it must have been done previously with a separate "access_ok()"
@@ -122,9 +254,10 @@
  * The "__xxx_error" versions set the third argument to -EFAULT if an error
  * occurs, and leave it unchanged on success.
  */
-#define __get_user_asm(instr, reg, x, addr, err)			\
+#define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature)	\
 	asm volatile(							\
-	"1:	" instr "	" reg "1, [%2]\n"			\
+	"1:"ALTERNATIVE(instr "     " reg "1, [%2]\n",			\
+			alt_instr " " reg "1, [%2]\n", feature)		\
 	"2:\n"								\
 	"	.section .fixup, \"ax\"\n"				\
 	"	.align	2\n"						\
@@ -132,10 +265,7 @@
 	"	mov	%1, #0\n"					\
 	"	b	2b\n"						\
 	"	.previous\n"						\
-	"	.section __ex_table,\"a\"\n"				\
-	"	.align	3\n"						\
-	"	.quad	1b, 3b\n"					\
-	"	.previous"						\
+	_ASM_EXTABLE(1b, 3b)						\
 	: "+r" (err), "=&r" (x)						\
 	: "r" (addr), "i" (-EFAULT))
 
@@ -143,27 +273,29 @@
 do {									\
 	unsigned long __gu_val;						\
 	__chk_user_ptr(ptr);						\
-	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,	\
-			CONFIG_ARM64_PAN));				\
+	uaccess_enable_not_uao();					\
 	switch (sizeof(*(ptr))) {					\
 	case 1:								\
-		__get_user_asm("ldrb", "%w", __gu_val, (ptr), (err));	\
+		__get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr),  \
+			       (err), ARM64_HAS_UAO);			\
 		break;							\
 	case 2:								\
-		__get_user_asm("ldrh", "%w", __gu_val, (ptr), (err));	\
+		__get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr),  \
+			       (err), ARM64_HAS_UAO);			\
 		break;							\
 	case 4:								\
-		__get_user_asm("ldr", "%w", __gu_val, (ptr), (err));	\
+		__get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr),	\
+			       (err), ARM64_HAS_UAO);			\
 		break;							\
 	case 8:								\
-		__get_user_asm("ldr", "%",  __gu_val, (ptr), (err));	\
+		__get_user_asm("ldr", "ldtr", "%",  __gu_val, (ptr),	\
+			       (err), ARM64_HAS_UAO);			\
 		break;							\
 	default:							\
 		BUILD_BUG();						\
 	}								\
+	uaccess_disable_not_uao();					\
 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
-	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,	\
-			CONFIG_ARM64_PAN));				\
 } while (0)
 
 #define __get_user(x, ptr)						\
@@ -190,19 +322,17 @@
 		((x) = 0, -EFAULT);					\
 })
 
-#define __put_user_asm(instr, reg, x, addr, err)			\
+#define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature)	\
 	asm volatile(							\
-	"1:	" instr "	" reg "1, [%2]\n"			\
+	"1:"ALTERNATIVE(instr "     " reg "1, [%2]\n",			\
+			alt_instr " " reg "1, [%2]\n", feature)		\
 	"2:\n"								\
 	"	.section .fixup,\"ax\"\n"				\
 	"	.align	2\n"						\
 	"3:	mov	%w0, %3\n"					\
 	"	b	2b\n"						\
 	"	.previous\n"						\
-	"	.section __ex_table,\"a\"\n"				\
-	"	.align	3\n"						\
-	"	.quad	1b, 3b\n"					\
-	"	.previous"						\
+	_ASM_EXTABLE(1b, 3b)						\
 	: "+r" (err)							\
 	: "r" (x), "r" (addr), "i" (-EFAULT))
 
@@ -210,26 +340,28 @@
 do {									\
 	__typeof__(*(ptr)) __pu_val = (x);				\
 	__chk_user_ptr(ptr);						\
-	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,	\
-			CONFIG_ARM64_PAN));				\
+	uaccess_enable_not_uao();					\
 	switch (sizeof(*(ptr))) {					\
 	case 1:								\
-		__put_user_asm("strb", "%w", __pu_val, (ptr), (err));	\
+		__put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr),	\
+			       (err), ARM64_HAS_UAO);			\
 		break;							\
 	case 2:								\
-		__put_user_asm("strh", "%w", __pu_val, (ptr), (err));	\
+		__put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr),	\
+			       (err), ARM64_HAS_UAO);			\
 		break;							\
 	case 4:								\
-		__put_user_asm("str",  "%w", __pu_val, (ptr), (err));	\
+		__put_user_asm("str", "sttr", "%w", __pu_val, (ptr),	\
+			       (err), ARM64_HAS_UAO);			\
 		break;							\
 	case 8:								\
-		__put_user_asm("str",  "%", __pu_val, (ptr), (err));	\
+		__put_user_asm("str", "sttr", "%", __pu_val, (ptr),	\
+			       (err), ARM64_HAS_UAO);			\
 		break;							\
 	default:							\
 		BUILD_BUG();						\
 	}								\
-	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,	\
-			CONFIG_ARM64_PAN));				\
+	uaccess_disable_not_uao();					\
 } while (0)
 
 #define __put_user(x, ptr)						\
@@ -256,24 +388,39 @@
 		-EFAULT;						\
 })
 
-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
+extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
+extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
 extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
 
+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+	check_object_size(to, n, false);
+	return __arch_copy_from_user(to, from, n);
+}
+
+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+	check_object_size(from, n, true);
+	return __arch_copy_to_user(to, from, n);
+}
+
 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-	if (access_ok(VERIFY_READ, from, n))
-		n = __copy_from_user(to, from, n);
-	else /* security hole - plug it */
+	if (access_ok(VERIFY_READ, from, n)) {
+		check_object_size(to, n, false);
+		n = __arch_copy_from_user(to, from, n);
+	} else /* security hole - plug it */
 		memset(to, 0, n);
 	return n;
 }
 
 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
 {
-	if (access_ok(VERIFY_WRITE, to, n))
-		n = __copy_to_user(to, from, n);
+	if (access_ok(VERIFY_WRITE, to, n)) {
+		check_object_size(from, n, true);
+		n = __arch_copy_to_user(to, from, n);
+	}
 	return n;
 }
 
@@ -299,4 +446,77 @@
 extern __must_check long strlen_user(const char __user *str);
 extern __must_check long strnlen_user(const char __user *str, long n);
 
+#else	/* __ASSEMBLY__ */
+
+#include <asm/assembler.h>
+
+/*
+ * User access enabling/disabling macros.
+ */
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+	.macro	__uaccess_ttbr0_disable, tmp1
+	mrs	\tmp1, ttbr1_el1		// swapper_pg_dir
+	bic	\tmp1, \tmp1, #TTBR_ASID_MASK
+	add	\tmp1, \tmp1, #SWAPPER_DIR_SIZE	// reserved_ttbr0 at the end of swapper_pg_dir
+	msr	ttbr0_el1, \tmp1		// set reserved TTBR0_EL1
+	isb
+	sub	\tmp1, \tmp1, #SWAPPER_DIR_SIZE
+	msr	ttbr1_el1, \tmp1		// set reserved ASID
+	isb
+	.endm
+
+	.macro	__uaccess_ttbr0_enable, tmp1, tmp2
+	get_thread_info \tmp1
+	ldr	\tmp1, [\tmp1, #TSK_TI_TTBR0]	// load saved TTBR0_EL1
+	mrs	\tmp2, ttbr1_el1
+	extr    \tmp2, \tmp2, \tmp1, #48
+	ror     \tmp2, \tmp2, #16
+	msr	ttbr1_el1, \tmp2		// set the active ASID
+	isb
+	msr	ttbr0_el1, \tmp1		// set the non-PAN TTBR0_EL1
+	isb
+	.endm
+
+	.macro	uaccess_ttbr0_disable, tmp1, tmp2
+alternative_if_not ARM64_HAS_PAN
+	save_and_disable_irq \tmp2		// avoid preemption
+	__uaccess_ttbr0_disable \tmp1
+	restore_irq \tmp2
+alternative_else_nop_endif
+	.endm
+
+	.macro	uaccess_ttbr0_enable, tmp1, tmp2, tmp3
+alternative_if_not ARM64_HAS_PAN
+	save_and_disable_irq \tmp3		// avoid preemption
+	__uaccess_ttbr0_enable \tmp1, \tmp2
+	restore_irq \tmp3
+alternative_else_nop_endif
+	.endm
+#else
+	.macro	uaccess_ttbr0_disable, tmp1, tmp2
+	.endm
+
+	.macro	uaccess_ttbr0_enable, tmp1, tmp2, tmp3
+	.endm
+#endif
+
+/*
+ * These macros are no-ops when UAO is present.
+ */
+	.macro	uaccess_disable_not_uao, tmp1, tmp2
+	uaccess_ttbr0_disable \tmp1, \tmp2
+alternative_if ARM64_ALT_PAN_NOT_UAO
+	SET_PSTATE_PAN(1)
+alternative_else_nop_endif
+	.endm
+
+	.macro	uaccess_enable_not_uao, tmp1, tmp2, tmp3
+	uaccess_ttbr0_enable \tmp1, \tmp2, \tmp3
+alternative_if ARM64_ALT_PAN_NOT_UAO
+	SET_PSTATE_PAN(0)
+alternative_else_nop_endif
+	.endm
+
+#endif	/* __ASSEMBLY__ */
+
 #endif /* __ASM_UACCESS_H */
diff -ruw linux-4.4.115/arch/arm64/include/asm/vdso_datapage.h linux-4.4.115-fbx/arch/arm64/include/asm/vdso_datapage.h
--- linux-4.4.115/arch/arm64/include/asm/vdso_datapage.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/vdso_datapage.h	2019-01-22 16:16:21.543228657 +0100
@@ -22,6 +22,8 @@
 
 struct vdso_data {
 	__u64 cs_cycle_last;	/* Timebase at clocksource init */
+	__u64 raw_time_sec;	/* Raw time */
+	__u64 raw_time_nsec;
 	__u64 xtime_clock_sec;	/* Kernel time */
 	__u64 xtime_clock_nsec;
 	__u64 xtime_coarse_sec;	/* Coarse time */
@@ -29,8 +31,10 @@
 	__u64 wtm_clock_sec;	/* Wall to monotonic time */
 	__u64 wtm_clock_nsec;
 	__u32 tb_seq_count;	/* Timebase sequence counter */
-	__u32 cs_mult;		/* Clocksource multiplier */
-	__u32 cs_shift;		/* Clocksource shift */
+	/* cs_* members must be adjacent and in this order (ldp accesses) */
+	__u32 cs_mono_mult;	/* NTP-adjusted clocksource multiplier */
+	__u32 cs_shift;		/* Clocksource shift (mono = raw) */
+	__u32 cs_raw_mult;	/* Raw clocksource multiplier */
 	__u32 tz_minuteswest;	/* Whacky timezone stuff */
 	__u32 tz_dsttime;
 	__u32 use_syscall;
diff -ruw linux-4.4.115/arch/arm64/include/asm/virt.h linux-4.4.115-fbx/arch/arm64/include/asm/virt.h
--- linux-4.4.115/arch/arm64/include/asm/virt.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/virt.h	2019-10-29 09:26:23.001196895 +0100
@@ -18,11 +18,29 @@
 #ifndef __ASM__VIRT_H
 #define __ASM__VIRT_H
 
+/*
+ * The arm64 hcall implementation uses x0 to specify the hcall type. A value
+ * less than 0xfff indicates a special hcall, such as get/set vector.
+ * Any other value is used as a pointer to the function to call.
+ */
+
+/* HVC_GET_VECTORS - Return the value of the vbar_el2 register. */
+#define HVC_GET_VECTORS 0
+
+/*
+ * HVC_SET_VECTORS - Set the value of the vbar_el2 register.
+ *
+ * @x1: Physical address of the new vector table.
+ */
+#define HVC_SET_VECTORS 1
+
 #define BOOT_CPU_MODE_EL1	(0xe11)
 #define BOOT_CPU_MODE_EL2	(0xe12)
 
 #ifndef __ASSEMBLY__
 
+#include <asm/ptrace.h>
+
 /*
  * __boot_cpu_mode records what mode CPUs were booted in.
  * A correctly-implemented bootloader must start all CPUs in the same mode:
@@ -50,6 +68,14 @@
 	return __boot_cpu_mode[0] != __boot_cpu_mode[1];
 }
 
+static inline bool is_kernel_in_hyp_mode(void)
+{
+	u64 el;
+
+	asm("mrs %0, CurrentEL" : "=r" (el));
+	return el == CurrentEL_EL2;
+}
+
 /* The section containing the hypervisor text */
 extern char __hyp_text_start[];
 extern char __hyp_text_end[];
diff -ruw linux-4.4.115/arch/arm64/include/asm/word-at-a-time.h linux-4.4.115-fbx/arch/arm64/include/asm/word-at-a-time.h
--- linux-4.4.115/arch/arm64/include/asm/word-at-a-time.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/asm/word-at-a-time.h	2019-01-22 16:16:21.543228657 +0100
@@ -16,6 +16,8 @@
 #ifndef __ASM_WORD_AT_A_TIME_H
 #define __ASM_WORD_AT_A_TIME_H
 
+#include <asm/uaccess.h>
+
 #ifndef __AARCH64EB__
 
 #include <linux/kernel.h>
@@ -81,10 +83,7 @@
 #endif
 	"	b	2b\n"
 	"	.popsection\n"
-	"	.pushsection __ex_table,\"a\"\n"
-	"	.align	3\n"
-	"	.quad	1b, 3b\n"
-	"	.popsection"
+	_ASM_EXTABLE(1b, 3b)
 	: "=&r" (ret), "=&r" (offset)
 	: "r" (addr), "Q" (*(unsigned long *)addr));
 
diff -ruw linux-4.4.115/arch/arm64/include/uapi/asm/ptrace.h linux-4.4.115-fbx/arch/arm64/include/uapi/asm/ptrace.h
--- linux-4.4.115/arch/arm64/include/uapi/asm/ptrace.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/include/uapi/asm/ptrace.h	2019-01-22 16:16:21.547228694 +0100
@@ -45,6 +45,7 @@
 #define PSR_A_BIT	0x00000100
 #define PSR_D_BIT	0x00000200
 #define PSR_PAN_BIT	0x00400000
+#define PSR_UAO_BIT	0x00800000
 #define PSR_Q_BIT	0x08000000
 #define PSR_V_BIT	0x10000000
 #define PSR_C_BIT	0x20000000
diff -ruw linux-4.4.115/arch/arm64/Kconfig linux-4.4.115-fbx/arch/arm64/Kconfig
--- linux-4.4.115/arch/arm64/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/Kconfig	2019-10-29 09:32:37.124855595 +0100
@@ -13,6 +13,8 @@
 	select ARCH_WANT_OPTIONAL_GPIOLIB
 	select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
 	select ARCH_WANT_FRAME_POINTERS
+	select ARCH_HAVE_CUSTOM_GPIO_H
+	select ARCH_HAS_UBSAN_SANITIZE_ALL
 	select ARM_AMBA
 	select ARM_ARCH_TIMER
 	select ARM_GIC
@@ -23,12 +25,13 @@
 	select ARM_PSCI_FW
 	select BUILDTIME_EXTABLE_SORT
 	select CLONE_BACKWARDS
-	select COMMON_CLK
+	select COMMON_CLK if !ARCH_QCOM
 	select CPU_PM if (SUSPEND || CPU_IDLE)
 	select DCACHE_WORD_ACCESS
 	select EDAC_SUPPORT
 	select FRAME_POINTER
 	select GENERIC_ALLOCATOR
+	select EDAC_SUPPORT
 	select GENERIC_CLOCKEVENTS
 	select GENERIC_CLOCKEVENTS_BROADCAST
 	select GENERIC_CPU_AUTOPROBE
@@ -48,9 +51,13 @@
 	select HAVE_ALIGNED_STRUCT_PAGE if SLUB
 	select HAVE_ARCH_AUDITSYSCALL
 	select HAVE_ARCH_BITREVERSE
+	select HAVE_ARCH_HARDENED_USERCOPY
+	select HAVE_ARCH_HUGE_VMAP
 	select HAVE_ARCH_JUMP_LABEL
 	select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
 	select HAVE_ARCH_KGDB
+	select HAVE_ARCH_MMAP_RND_BITS
+	select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
 	select HAVE_ARCH_SECCOMP_FILTER
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_BPF_JIT
@@ -70,15 +77,18 @@
 	select HAVE_FUNCTION_TRACER
 	select HAVE_FUNCTION_GRAPH_TRACER
 	select HAVE_GENERIC_DMA_COHERENT
-	select HAVE_HW_BREAKPOINT if PERF_EVENTS
+	select HAVE_IRQ_TIME_ACCOUNTING
 	select HAVE_MEMBLOCK
 	select HAVE_PATA_PLATFORM
 	select HAVE_PERF_EVENTS
 	select HAVE_PERF_REGS
 	select HAVE_PERF_USER_STACK_DUMP
+	select HAVE_REGS_AND_STACK_ACCESS_API
 	select HAVE_RCU_TABLE_FREE
 	select HAVE_SYSCALL_TRACEPOINTS
-	select IOMMU_DMA if IOMMU_SUPPORT
+	select IOMMU_DMA if (IOMMU_SUPPORT && !ARCH_QCOM)
+	select HAVE_KPROBES
+	select HAVE_KRETPROBES if HAVE_KPROBES
 	select IRQ_DOMAIN
 	select IRQ_FORCED_THREADING
 	select MODULES_USE_ELF_RELA
@@ -93,6 +103,8 @@
 	select SPARSE_IRQ
 	select SYSCTL_EXCEPTION_TRACE
 	select HAVE_CONTEXT_TRACKING
+	select HAVE_ARM_SMCCC
+	select THREAD_INFO_IN_TASK
 	help
 	  ARM 64-bit (AArch64) Linux support.
 
@@ -105,9 +117,40 @@
 config MMU
 	def_bool y
 
+config ARCH_MMAP_RND_BITS_MIN
+       default 14 if ARM64_64K_PAGES
+       default 16 if ARM64_16K_PAGES
+       default 18
+
+# max bits determined by the following formula:
+#  VA_BITS - PAGE_SHIFT - 3
+config ARCH_MMAP_RND_BITS_MAX
+       default 19 if ARM64_VA_BITS=36
+       default 24 if ARM64_VA_BITS=39
+       default 27 if ARM64_VA_BITS=42
+       default 30 if ARM64_VA_BITS=47
+       default 29 if ARM64_VA_BITS=48 && ARM64_64K_PAGES
+       default 31 if ARM64_VA_BITS=48 && ARM64_16K_PAGES
+       default 33 if ARM64_VA_BITS=48
+       default 14 if ARM64_64K_PAGES
+       default 16 if ARM64_16K_PAGES
+       default 18
+
+config ARCH_MMAP_RND_COMPAT_BITS_MIN
+       default 7 if ARM64_64K_PAGES
+       default 9 if ARM64_16K_PAGES
+       default 11
+
+config ARCH_MMAP_RND_COMPAT_BITS_MAX
+       default 16
+
 config NO_IOPORT_MAP
 	def_bool y if !PCI
 
+config ILLEGAL_POINTER_VALUE
+	hex
+	default 0xdead000000000000
+
 config STACKTRACE_SUPPORT
 	def_bool y
 
@@ -159,6 +202,32 @@
 config SMP
 	def_bool y
 
+config ARM64_DMA_USE_IOMMU
+	bool
+	select ARM_HAS_SG_CHAIN
+	select NEED_SG_DMA_LENGTH
+
+if ARM64_DMA_USE_IOMMU
+
+config ARM64_DMA_IOMMU_ALIGNMENT
+	int "Maximum PAGE_SIZE order of alignment for DMA IOMMU buffers"
+	range 4 9
+	default 9
+	help
+	  DMA mapping framework by default aligns all buffers to the smallest
+	  PAGE_SIZE order which is greater than or equal to the requested buffer
+	  size. This works well for buffers up to a few hundreds kilobytes, but
+	  for larger buffers it just a waste of address space. Drivers which has
+	  relatively small addressing window (like 64Mib) might run out of
+	  virtual space with just a few allocations.
+
+	  With this parameter you can specify the maximum PAGE_SIZE order for
+	  DMA IOMMU buffers. Larger buffers will be aligned only to this
+	  specified order. The order is expressed as a power of two multiplied
+	  by the PAGE_SIZE.
+
+endif
+
 config SWIOTLB
 	def_bool y
 
@@ -180,6 +249,15 @@
 	default 3 if ARM64_16K_PAGES && ARM64_VA_BITS_47
 	default 4 if !ARM64_64K_PAGES && ARM64_VA_BITS_48
 
+config MSM_GVM_QUIN
+	bool "Enable virtualization Support for MSM kernel required for QUIN platform"
+	help
+		This enables support for MSM Kernel based virtual
+		machine for QUIN platform.
+		This helps to enable virtual driver support.
+		This should work on 64bit machine.
+		If you don't know what to do here, say N.
+
 source "init/Kconfig"
 
 source "kernel/Kconfig.freezer"
@@ -363,6 +441,7 @@
 	bool "Cortex-A53: 843419: A load or store might access an incorrect address"
 	depends on MODULES
 	default y
+	select ARM64_MODULE_CMODEL_LARGE
 	help
 	  This option builds kernel modules using the large memory model in
 	  order to avoid the use of the ADRP instruction, which can cause
@@ -443,6 +522,25 @@
 	  requires applications compiled with 16K (or a multiple of 16K)
 	  aligned segments.
 
+config ARM64_DCACHE_DISABLE
+	bool "Disable CPU Data Caches"
+	help
+	  Disable CPU data cache usage by setting the SCTLR[C] bit during
+	  kernel initialization. This will result in a considerable
+	  performance impact, but may be useful in certain situations.
+
+	  If you are not sure what to do, select 'N' here.
+
+config ARM64_ICACHE_DISABLE
+	bool "Disable CPU Instruction Caches"
+	help
+	  Disable CPU instruction cache usage by setting the SCTLR[I]
+	  bit during kernel initialization. This will result in a
+	  considerable performance impact, but may be useful in certain
+	  situations.
+
+	  If you are not sure what to do, select 'N' here.
+
 config ARM64_64K_PAGES
 	bool "64KB"
 	help
@@ -453,6 +551,37 @@
 
 endchoice
 
+config MSM_APP_API
+	bool "API support to enable / disable app settings for MSM8996"
+	depends on ARCH_MSM8996 && (ENABLE_FP_SIMD_SETTINGS || MSM_APP_SETTINGS)
+	help
+	  Add API support to enable / disable the app settings to be used
+	  at runtime. These APIs are used to enable / disable app setting
+	  when specific aarch32 or aarch64 processes are running.
+
+	  If you are not sure what to do, select 'N' here.
+
+config ENABLE_FP_SIMD_SETTINGS
+	bool "Enable FP(Floating Point) Settings for Qualcomm MSM8996"
+	depends on ARCH_MSM8996
+	select MSM_APP_API
+	help
+	  Enable FP(Floating Point) and SIMD settings for the MSM8996 during
+	  the execution of the aarch32 processes and disable these settings
+	  when you switch to the aarch64 processes.
+
+	  If you are not sure what to do, select 'N' here.
+
+config MSM_APP_SETTINGS
+	bool "Support to enable / disable app settings for MSM8996"
+	depends on ARCH_MSM8996
+	select MSM_APP_API
+	help
+	  Expose an interface used by the userspace at runtime to
+	  enable / disable the app specific settings.
+
+	  If you are not sure what to do, select 'N' here.
+
 choice
 	prompt "Virtual address space size"
 	default ARM64_VA_BITS_39 if ARM64_4K_PAGES
@@ -524,9 +653,38 @@
 	  Say Y here to experiment with turning CPUs off and on.  CPUs
 	  can be controlled through /sys/devices/system/cpu.
 
+config ARCH_ENABLE_MEMORY_HOTPLUG
+    depends on !NUMA
+	def_bool y
+
+config ARCH_ENABLE_MEMORY_HOTREMOVE
+	def_bool y
+
+# The GPIO number here must be sorted by descending number. In case of
+# a multiplatform kernel, we just want the highest value required by the
+# selected platforms.
+config ARCH_NR_GPIO
+        int
+        default 1024 if ARCH_TEGRA
+        default 1024 if ARCH_QCOM
+        default 256
+        help
+          Maximum number of GPIOs in the system.
+
+          If unsure, leave the default value.
+
+config QCOM_TLB_EL2_HANDLER
+	bool "Raise TLB conflict exception to EL2"
+	help
+	  This option enables TLB conflict to be handled
+	  by EL2.
+
 source kernel/Kconfig.preempt
 source kernel/Kconfig.hz
 
+config ARCH_SUPPORTS_DEBUG_PAGEALLOC
+	def_bool y
+
 config ARCH_HAS_HOLES_MEMORYMODEL
 	def_bool y if SPARSEMEM
 
@@ -547,10 +705,34 @@
 	def_bool y
 	depends on ARM_PMU
 
-config SYS_SUPPORTS_HUGETLBFS
-	def_bool y
+config ARM64_REG_REBALANCE_ON_CTX_SW
+	bool "Rebalance registers during context switches."
+	def_bool ARCH_MSM8996
+	help
+	 Forcefully re-balance register rename pools on context switches for
+	 improved performance on some devices.
+
+config PERF_EVENTS_USERMODE
+	bool "Enable usermode access for perf events"
+	depends on PERF_EVENTS
+	help
+	  Enable user-mode access to performance counters for perf events.
+	  If enabled, the access permissions allowing CPU performance
+	  counters to be accessed from user-mode are set.
+
+	  If you want user-mode programs to access perf events, say Y
+
+config PERF_EVENTS_RESET_PMU_DEBUGFS
+	bool "Reset PMU via debugfs node"
+	depends on PERF_EVENTS
+	help
+		Enable the debugfs node that can be used to reset PMUs and all
+		state variables associated with PMUs. If enabled, PMU and internal
+		state variable are cleared.
+		If you want to reset PMU and PMU related internal Perf variables
+		via debugfs then say Y.
 
-config ARCH_WANT_GENERAL_HUGETLB
+config SYS_SUPPORTS_HUGETLBFS
 	def_bool y
 
 config ARCH_WANT_HUGE_PMD_SHARE
@@ -564,6 +746,10 @@
 
 source "mm/Kconfig"
 
+config ARCH_MEMORY_PROBE
+	def_bool y
+	depends on MEMORY_HOTPLUG
+
 config SECCOMP
 	bool "Enable seccomp to safely compute untrusted bytecode"
 	---help---
@@ -611,6 +797,35 @@
 	  However for 4K, we choose a higher default value, 11 as opposed to 10, giving us
 	  4M allocations matching the default size used by generic code.
 
+config UNMAP_KERNEL_AT_EL0
+	bool "Unmap kernel when running in userspace (aka \"KAISER\")" if EXPERT
+	default y
+	help
+	  Speculation attacks against some high-performance processors can
+	  be used to bypass MMU permission checks and leak kernel data to
+	  userspace. This can be defended against by unmapping the kernel
+	  when running in userspace, mapping it back in on exception entry
+	  via a trampoline page in the vector table.
+
+	  If unsure, say Y.
+
+config HARDEN_BRANCH_PREDICTOR
+	bool "Harden the branch predictor against aliasing attacks" if EXPERT
+	default y
+	help
+	  Speculation attacks against some high-performance processors rely on
+	  being able to manipulate the branch predictor for a victim context by
+	  executing aliasing branches in the attacker context.  Such attacks
+	  can be partially mitigated against by clearing internal branch
+	  predictor state and limiting the prediction logic in some situations.
+
+	  This config option will take CPU-specific actions to harden the
+	  branch predictor against aliasing attacks and may rely on specific
+	  instruction sequences or control bits being set by the system
+	  firmware.
+
+	  If unsure, say Y.
+
 menuconfig ARMV8_DEPRECATED
 	bool "Emulate deprecated/obsolete ARMv8 instructions"
 	depends on COMPAT
@@ -678,11 +893,18 @@
 	  If unsure, say Y
 endif
 
+config ARM64_SW_TTBR0_PAN
+	bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
+	help
+	  Enabling this option prevents the kernel from accessing
+	  user-space memory directly by pointing TTBR0_EL1 to a reserved
+	  zeroed area and reserved ASID. The user access routines
+	  restore the valid TTBR0_EL1 temporarily.
+
 menu "ARMv8.1 architectural features"
 
 config ARM64_HW_AFDBM
 	bool "Support for hardware updates of the Access and Dirty page flags"
-	default y
 	help
 	  The ARMv8.1 architecture extensions introduce support for
 	  hardware updates of the access and dirty information in page
@@ -699,7 +921,6 @@
 
 config ARM64_PAN
 	bool "Enable support for Privileged Access Never (PAN)"
-	default y
 	help
 	 Privileged Access Never (PAN; part of the ARMv8.1 Extensions)
 	 prevents the kernel or hypervisor from accessing user-space (EL0)
@@ -725,10 +946,92 @@
 
 endmenu
 
+config ARM64_UAO
+	bool "Enable support for User Access Override (UAO)"
+	help
+	  User Access Override (UAO; part of the ARMv8.2 Extensions)
+	  causes the 'unprivileged' variant of the load/store instructions to
+	  be overriden to be privileged.
+
+	  This option changes get_user() and friends to use the 'unprivileged'
+	  variant of the load/store instructions. This ensures that user-space
+	  really did have access to the supplied memory. When addr_limit is
+	  set to kernel memory the UAO bit will be set, allowing privileged
+	  access to kernel memory.
+
+	  Choosing this option will cause copy_to_user() et al to use user-space
+	  memory permissions.
+
+	  The feature is detected at runtime, the kernel will use the
+	  regular load/store instructions if the cpu does not implement the
+	  feature.
+
+config ARM64_MODULE_CMODEL_LARGE
+	bool
+
+config ARM64_MODULE_PLTS
+	bool
+	select ARM64_MODULE_CMODEL_LARGE
+	select HAVE_MOD_ARCH_SPECIFIC
+
+config RELOCATABLE
+	bool
+	help
+	  This builds the kernel as a Position Independent Executable (PIE),
+	  which retains all relocation metadata required to relocate the
+	  kernel binary at runtime to a different virtual address than the
+	  address it was linked at.
+	  Since AArch64 uses the RELA relocation format, this requires a
+	  relocation pass at runtime even if the kernel is loaded at the
+	  same address it was linked at.
+
+config RANDOMIZE_BASE
+	bool "Randomize the address of the kernel image"
+	select ARM64_MODULE_PLTS if MODULES
+	select RELOCATABLE
+	help
+	  Randomizes the virtual address at which the kernel image is
+	  loaded, as a security feature that deters exploit attempts
+	  relying on knowledge of the location of kernel internals.
+
+	  It is the bootloader's job to provide entropy, by passing a
+	  random u64 value in /chosen/kaslr-seed at kernel entry.
+
+	  When booting via the UEFI stub, it will invoke the firmware's
+	  EFI_RNG_PROTOCOL implementation (if available) to supply entropy
+	  to the kernel proper. In addition, it will randomise the physical
+	  location of the kernel Image as well.
+
+	  If unsure, say N.
+
+config RANDOMIZE_MODULE_REGION_FULL
+	bool "Randomize the module region independently from the core kernel"
+	depends on RANDOMIZE_BASE && !DYNAMIC_FTRACE
+	default y
+	help
+	  Randomizes the location of the module region without considering the
+	  location of the core kernel. This way, it is impossible for modules
+	  to leak information about the location of core kernel data structures
+	  but it does imply that function calls between modules and the core
+	  kernel will need to be resolved via veneers in the module PLT.
+
+	  When this option is not set, the module region will be randomized over
+	  a limited range that contains the [_stext, _etext] interval of the
+	  core kernel, so branch relocations are always in range.
+
 endmenu
 
 menu "Boot options"
 
+config ARM64_ACPI_PARKING_PROTOCOL
+	bool "Enable support for the ARM64 ACPI parking protocol"
+	depends on ACPI
+	help
+	  Enable support for the ARM64 ACPI parking protocol. If disabled
+	  the kernel will not allow booting through the ARM64 ACPI parking
+	  protocol even if the corresponding data is present in the ACPI
+	  MADT table.
+
 config CMDLINE
 	string "Default kernel command string"
 	default ""
@@ -737,6 +1040,23 @@
 	  entering them here. As a minimum, you should specify the the
 	  root device (e.g. root=/dev/nfs).
 
+choice
+	prompt "Kernel command line type" if CMDLINE != ""
+	default CMDLINE_FROM_BOOTLOADER
+
+config CMDLINE_FROM_BOOTLOADER
+	bool "Use bootloader kernel arguments if available"
+	help
+	  Uses the command-line options passed by the boot loader. If
+	  the boot loader doesn't provide any, the default kernel command
+	  string provided in CMDLINE will be used.
+
+config CMDLINE_EXTEND
+	bool "Extend bootloader kernel arguments"
+	help
+	  The command-line arguments provided by the boot loader will be
+	  appended to the default kernel command string.
+
 config CMDLINE_FORCE
 	bool "Always use the default kernel command string"
 	help
@@ -744,6 +1064,7 @@
 	  loader passes other arguments to the kernel.
 	  This is useful if you cannot or don't want to change the
 	  command-line options your boot loader passes to the kernel.
+endchoice
 
 config EFI_STUB
 	bool
@@ -776,6 +1097,46 @@
 	  However, even with this option, the resultant kernel should
 	  continue to boot on existing non-UEFI platforms.
 
+config BUILD_ARM64_APPENDED_DTB_IMAGE
+	bool "Build a concatenated Image.gz/dtb by default"
+	depends on OF
+	help
+	  Enabling this option will cause a concatenated Image.gz and list of
+	  DTBs to be built by default (instead of a standalone Image.gz.)
+	  The image will built in arch/arm64/boot/Image.gz-dtb
+
+choice
+	prompt "Appended DTB Kernel Image name"
+	depends on BUILD_ARM64_APPENDED_DTB_IMAGE
+	help
+	  Enabling this option will cause a specific kernel image Image or
+	  Image.gz to be used for final image creation.
+	  The image will built in arch/arm64/boot/IMAGE-NAME-dtb
+
+	config IMG_GZ_DTB
+		bool "Image.gz-dtb"
+	config IMG_DTB
+		bool "Image-dtb"
+endchoice
+
+config BUILD_ARM64_APPENDED_KERNEL_IMAGE_NAME
+	string
+	depends on BUILD_ARM64_APPENDED_DTB_IMAGE
+	default "Image.gz-dtb" if IMG_GZ_DTB
+	default "Image-dtb" if IMG_DTB
+
+config BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES
+	string "Default dtb names"
+	depends on BUILD_ARM64_APPENDED_DTB_IMAGE
+	help
+	  Space separated list of names of dtbs to append when
+	  building a concatenated Image.gz-dtb.
+
+config BUILD_ARM64_DT_OVERLAY
+	bool "enable DT overlay compilation support"
+	depends on OF
+	help
+	  This option enables support for DT overlay compilation.
 endmenu
 
 menu "Userspace binary formats"
@@ -811,6 +1172,14 @@
 
 source "kernel/power/Kconfig"
 
+config ARCH_HIBERNATION_POSSIBLE
+	def_bool y
+	depends on CPU_PM
+
+config ARCH_HIBERNATION_HEADER
+	def_bool y
+	depends on HIBERNATION
+
 config ARCH_SUSPEND_POSSIBLE
 	def_bool y
 
diff -ruw linux-4.4.115/arch/arm64/Kconfig.debug linux-4.4.115-fbx/arch/arm64/Kconfig.debug
--- linux-4.4.115/arch/arm64/Kconfig.debug	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/Kconfig.debug	2019-01-22 16:16:21.523228476 +0100
@@ -64,16 +64,16 @@
 
 config DEBUG_RODATA
 	bool "Make kernel text and rodata read-only"
+	default y
 	help
 	  If this is set, kernel text and rodata will be made read-only. This
 	  is to help catch accidental or malicious attempts to change the
-	  kernel's executable code. Additionally splits rodata from kernel
-	  text so it can be made explicitly non-executable.
+	  kernel's executable code.
 
           If in doubt, say Y
 
 config DEBUG_ALIGN_RODATA
-	depends on DEBUG_RODATA && ARM64_4K_PAGES
+	depends on DEBUG_RODATA
 	bool "Align linker sections up to SECTION_SIZE"
 	help
 	  If this option is enabled, sections that may potentially be marked as
@@ -85,6 +85,46 @@
 
 	  If in doubt, say N
 
+config FORCE_PAGES
+        bool "Force lowmem to be mapped with 4K pages"
+        help
+          There are some advanced debug features that can only be done when
+          memory is mapped with pages instead of sections. Enable this option
+          to always map lowmem pages with pages. This may have a performance
+          cost due to increased TLB pressure.
+
+          If unsure say N.
+
+config FREE_PAGES_RDONLY
+        bool "Set pages as read only while on the buddy list"
+        select FORCE_PAGES
+        select DEBUG_PAGEALLOC
+        help
+          Pages are always mapped in the kernel. This means that anyone
+          can write to the page if they have the address. Enable this option
+          to mark pages as read only to trigger a fault if any code attempts
+          to write to a page on the buddy list. This may have a performance
+          impact.
+
+          If unsure, say N.
+
+config KERNEL_TEXT_RDONLY
+        bool "Set kernel text section pages as read only"
+	depends on FREE_PAGES_RDONLY
+        depends on !DEBUG_RODATA
+        help
+          The kernel text pages are always mapped in the kernel.
+	  This means that anyone can write to the page if they have
+	  the address. Enable this option to mark the kernel text pages
+	  as read only to trigger a fault if any code attempts to write
+	  to a page part of the kernel text section. This may have a
+	  performance impact.
+
+          If unsure, say N.
+
+config ARM64_STRICT_BREAK_BEFORE_MAKE
+	bool "Enforce strict break-before-make on page table updates "
+
 source "drivers/hwtracing/coresight/Kconfig"
 
 endmenu
diff -ruw linux-4.4.115/arch/arm64/Kconfig.platforms linux-4.4.115-fbx/arch/arm64/Kconfig.platforms
--- linux-4.4.115/arch/arm64/Kconfig.platforms	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/Kconfig.platforms	2019-10-29 09:26:22.989196778 +0100
@@ -49,9 +49,65 @@
 config ARCH_QCOM
 	bool "Qualcomm Platforms"
 	select PINCTRL
+	select CLKDEV_LOOKUP
+	select HAVE_CLK
+	select HAVE_CLK_PREPARE
+	select PM_OPP
+    	select SOC_BUS
+	select MSM_IRQ
+	select THERMAL_WRITABLE_TRIPS
+	select RATIONAL
+	select ARCH_HAS_RESET_CONTROLLER
 	help
 	  This enables support for the ARMv8 based Qualcomm chipsets.
 
+config ARCH_MSM8996
+        bool "Enable Support for Qualcomm MSM8996"
+	depends on ARCH_QCOM
+	select COMMON_CLK_MSM
+	help
+	This enables support for the MSM8996 chipset. If you do not
+	wish to build a kernel that runs on this chipset, say 'N' here.
+
+config ARCH_MSM8998
+        bool "Enable Support for Qualcomm MSM8998"
+	depends on ARCH_QCOM
+	select COMMON_CLK_MSM
+	help
+	This enables support for the MSM8998 chipset. If you do not
+	wish to build a kernel that runs on this chipset, say 'N' here.
+
+config ARCH_MSMHAMSTER
+	bool "Enable Support for Qualcomm Technologies Inc MSMHAMSTER"
+	depends on ARCH_QCOM
+	select COMMON_CLK_MSM
+	help
+	  This enables support for the MSMHAMSTER chipset.
+	  If you do not wish to build a kernel that runs
+	  on this chipset,say 'N' here.
+
+config ARCH_SDM660
+	bool "Enable Support for Qualcomm Technologies Inc SDM660"
+	depends on ARCH_QCOM
+	select COMMON_CLK
+	select COMMON_CLK_QCOM
+	select QCOM_GDSC
+	help
+	  This enables support for the SDM660 chipset.
+	  If you do not wish to build a kernel that runs
+	  on this chipset,say 'N' here.
+
+config ARCH_SDM630
+	bool "Enable Support for Qualcomm Technologies Inc SDM630"
+	depends on ARCH_QCOM
+	select COMMON_CLK
+	select COMMON_CLK_QCOM
+	select QCOM_GDSC
+	help
+	  This enables support for the SDM630 chipset.
+	  If you do not wish to build a kernel that runs
+	  on this chipset,say 'N' here.
+
 config ARCH_ROCKCHIP
 	bool "Rockchip Platforms"
 	select ARCH_HAS_RESET_CONTROLLER
diff -ruw linux-4.4.115/arch/arm64/kernel/alternative.c linux-4.4.115-fbx/arch/arm64/kernel/alternative.c
--- linux-4.4.115/arch/arm64/kernel/alternative.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/alternative.c	2019-10-29 09:26:23.001196895 +0100
@@ -108,7 +108,7 @@
 
 		for (i = 0; i < nr_inst; i++) {
 			insn = get_alt_insn(alt, origptr + i, replptr + i);
-			*(origptr + i) = cpu_to_le32(insn);
+			BUG_ON(aarch64_insn_patch_text_nosync(origptr + i, insn));
 		}
 
 		flush_icache_range((uintptr_t)origptr,
@@ -158,9 +158,3 @@
 
 	__apply_alternatives(&region);
 }
-
-void free_alternatives_memory(void)
-{
-	free_reserved_area(__alt_instructions, __alt_instructions_end,
-			   0, "alternatives");
-}
diff -ruw linux-4.4.115/arch/arm64/kernel/arm64ksyms.c linux-4.4.115-fbx/arch/arm64/kernel/arm64ksyms.c
--- linux-4.4.115/arch/arm64/kernel/arm64ksyms.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/arm64ksyms.c	2019-01-22 16:16:21.547228694 +0100
@@ -26,15 +26,18 @@
 #include <linux/syscalls.h>
 #include <linux/uaccess.h>
 #include <linux/io.h>
+#include <linux/kprobes.h>
+#include <linux/arm-smccc.h>
 
+#include <asm/cacheflush.h>
 #include <asm/checksum.h>
 
 EXPORT_SYMBOL(copy_page);
 EXPORT_SYMBOL(clear_page);
 
 	/* user mem (segment) */
-EXPORT_SYMBOL(__copy_from_user);
-EXPORT_SYMBOL(__copy_to_user);
+EXPORT_SYMBOL(__arch_copy_from_user);
+EXPORT_SYMBOL(__arch_copy_to_user);
 EXPORT_SYMBOL(__clear_user);
 EXPORT_SYMBOL(__copy_in_user);
 
@@ -67,4 +70,13 @@
 
 #ifdef CONFIG_FUNCTION_TRACER
 EXPORT_SYMBOL(_mcount);
+NOKPROBE_SYMBOL(_mcount);
 #endif
+	/* caching functions */
+EXPORT_SYMBOL(__dma_inv_range);
+EXPORT_SYMBOL(__dma_clean_range);
+EXPORT_SYMBOL(__dma_flush_range);
+
+	/* arm-smccc */
+EXPORT_SYMBOL(arm_smccc_smc);
+EXPORT_SYMBOL(arm_smccc_hvc);
diff -ruw linux-4.4.115/arch/arm64/kernel/armv8_deprecated.c linux-4.4.115-fbx/arch/arm64/kernel/armv8_deprecated.c
--- linux-4.4.115/arch/arm64/kernel/armv8_deprecated.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/armv8_deprecated.c	2019-01-22 16:16:21.547228694 +0100
@@ -14,7 +14,6 @@
 #include <linux/slab.h>
 #include <linux/sysctl.h>
 
-#include <asm/alternative.h>
 #include <asm/cpufeature.h>
 #include <asm/insn.h>
 #include <asm/opcodes.h>
@@ -62,7 +61,7 @@
 };
 
 static LIST_HEAD(insn_emulation);
-static int nr_insn_emulated;
+static int nr_insn_emulated __initdata;
 static DEFINE_RAW_SPINLOCK(insn_emulation_lock);
 
 static void register_emulation_hooks(struct insn_emulation_ops *ops)
@@ -173,7 +172,7 @@
 	return ret;
 }
 
-static void register_insn_emulation(struct insn_emulation_ops *ops)
+static void __init register_insn_emulation(struct insn_emulation_ops *ops)
 {
 	unsigned long flags;
 	struct insn_emulation *insn;
@@ -237,7 +236,7 @@
 	{ }
 };
 
-static void register_insn_emulation_sysctl(struct ctl_table *table)
+static void __init register_insn_emulation_sysctl(struct ctl_table *table)
 {
 	unsigned long flags;
 	int i = 0;
@@ -281,9 +280,9 @@
  * Error-checking SWP macros implemented using ldxr{b}/stxr{b}
  */
 #define __user_swpX_asm(data, addr, res, temp, B)		\
+do {								\
+	uaccess_enable();					\
 	__asm__ __volatile__(					\
-	ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,	\
-		    CONFIG_ARM64_PAN)				\
 	"0:	ldxr"B"		%w2, [%3]\n"			\
 	"1:	stxr"B"		%w0, %w1, [%3]\n"		\
 	"	cbz		%w0, 2f\n"			\
@@ -297,17 +296,14 @@
 	"4:	mov		%w0, %w5\n"			\
 	"	b		3b\n"				\
 	"	.popsection"					\
-	"	.pushsection	 __ex_table,\"a\"\n"		\
-	"	.align		3\n"				\
-	"	.quad		0b, 4b\n"			\
-	"	.quad		1b, 4b\n"			\
-	"	.popsection\n"					\
-	ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,	\
-		CONFIG_ARM64_PAN)				\
+	_ASM_EXTABLE(0b, 4b)					\
+	_ASM_EXTABLE(1b, 4b)					\
 	: "=&r" (res), "+r" (data), "=&r" (temp)		\
 	: "r" ((unsigned long)addr), "i" (-EAGAIN),		\
 	  "i" (-EFAULT)						\
-	: "memory")
+	: "memory");						\
+	uaccess_disable();					\
+} while (0)
 
 #define __user_swp_asm(data, addr, res, temp) \
 	__user_swpX_asm(data, addr, res, temp, "")
@@ -370,6 +366,21 @@
 	return res;
 }
 
+#define	ARM_OPCODE_CONDITION_UNCOND	0xf
+
+static unsigned int __kprobes aarch32_check_condition(u32 opcode, u32 psr)
+{
+	u32 cc_bits  = opcode >> 28;
+
+	if (cc_bits != ARM_OPCODE_CONDITION_UNCOND) {
+		if ((*aarch32_opcode_cond_checks[cc_bits])(psr))
+			return ARM_OPCODE_CONDTEST_PASS;
+		else
+			return ARM_OPCODE_CONDTEST_FAIL;
+	}
+	return ARM_OPCODE_CONDTEST_UNCOND;
+}
+
 /*
  * swp_handler logs the id of calling process, dissects the instruction, sanity
  * checks the memory location, calls emulate_swpX for the actual operation and
@@ -384,7 +395,7 @@
 
 	type = instr & TYPE_SWPB;
 
-	switch (arm_check_condition(instr, regs->pstate)) {
+	switch (aarch32_check_condition(instr, regs->pstate)) {
 	case ARM_OPCODE_CONDTEST_PASS:
 		break;
 	case ARM_OPCODE_CONDTEST_FAIL:
@@ -465,7 +476,7 @@
 {
 	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
 
-	switch (arm_check_condition(instr, regs->pstate)) {
+	switch (aarch32_check_condition(instr, regs->pstate)) {
 	case ARM_OPCODE_CONDTEST_PASS:
 		break;
 	case ARM_OPCODE_CONDTEST_FAIL:
diff -ruw linux-4.4.115/arch/arm64/kernel/asm-offsets.c linux-4.4.115-fbx/arch/arm64/kernel/asm-offsets.c
--- linux-4.4.115/arch/arm64/kernel/asm-offsets.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/asm-offsets.c	2019-01-22 16:16:21.547228694 +0100
@@ -22,22 +22,33 @@
 #include <linux/mm.h>
 #include <linux/dma-mapping.h>
 #include <linux/kvm_host.h>
+#include <linux/suspend.h>
+#include <asm/fixmap.h>
 #include <asm/thread_info.h>
 #include <asm/memory.h>
 #include <asm/smp_plat.h>
 #include <asm/suspend.h>
 #include <asm/vdso_datapage.h>
 #include <linux/kbuild.h>
+#include <linux/arm-smccc.h>
 
 int main(void)
 {
   DEFINE(TSK_ACTIVE_MM,		offsetof(struct task_struct, active_mm));
   BLANK();
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+  DEFINE(TSK_TI_FLAGS,		offsetof(struct task_struct, thread_info.flags));
+  DEFINE(TSK_TI_PREEMPT,	offsetof(struct task_struct, thread_info.preempt_count));
+  DEFINE(TSK_TI_ADDR_LIMIT,	offsetof(struct task_struct, thread_info.addr_limit));
+  DEFINE(TSK_STACK,		offsetof(struct task_struct, stack));
+#else
   DEFINE(TI_FLAGS,		offsetof(struct thread_info, flags));
   DEFINE(TI_PREEMPT,		offsetof(struct thread_info, preempt_count));
   DEFINE(TI_ADDR_LIMIT,		offsetof(struct thread_info, addr_limit));
-  DEFINE(TI_TASK,		offsetof(struct thread_info, task));
-  DEFINE(TI_CPU,		offsetof(struct thread_info, cpu));
+#endif
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+  DEFINE(TSK_TI_TTBR0,		offsetof(struct thread_info, ttbr0));
+#endif
   BLANK();
   DEFINE(THREAD_CPU_CONTEXT,	offsetof(struct task_struct, thread.cpu_context));
   BLANK();
@@ -49,6 +60,17 @@
   DEFINE(S_X5,			offsetof(struct pt_regs, regs[5]));
   DEFINE(S_X6,			offsetof(struct pt_regs, regs[6]));
   DEFINE(S_X7,			offsetof(struct pt_regs, regs[7]));
+  DEFINE(S_X8,			offsetof(struct pt_regs, regs[8]));
+  DEFINE(S_X10,			offsetof(struct pt_regs, regs[10]));
+  DEFINE(S_X12,			offsetof(struct pt_regs, regs[12]));
+  DEFINE(S_X14,			offsetof(struct pt_regs, regs[14]));
+  DEFINE(S_X16,			offsetof(struct pt_regs, regs[16]));
+  DEFINE(S_X18,			offsetof(struct pt_regs, regs[18]));
+  DEFINE(S_X20,			offsetof(struct pt_regs, regs[20]));
+  DEFINE(S_X22,			offsetof(struct pt_regs, regs[22]));
+  DEFINE(S_X24,			offsetof(struct pt_regs, regs[24]));
+  DEFINE(S_X26,			offsetof(struct pt_regs, regs[26]));
+  DEFINE(S_X28,			offsetof(struct pt_regs, regs[28]));
   DEFINE(S_LR,			offsetof(struct pt_regs, regs[30]));
   DEFINE(S_SP,			offsetof(struct pt_regs, sp));
 #ifdef CONFIG_COMPAT
@@ -76,6 +98,7 @@
   BLANK();
   DEFINE(CLOCK_REALTIME,	CLOCK_REALTIME);
   DEFINE(CLOCK_MONOTONIC,	CLOCK_MONOTONIC);
+  DEFINE(CLOCK_MONOTONIC_RAW,	CLOCK_MONOTONIC_RAW);
   DEFINE(CLOCK_REALTIME_RES,	MONOTONIC_RES_NSEC);
   DEFINE(CLOCK_REALTIME_COARSE,	CLOCK_REALTIME_COARSE);
   DEFINE(CLOCK_MONOTONIC_COARSE,CLOCK_MONOTONIC_COARSE);
@@ -83,6 +106,8 @@
   DEFINE(NSEC_PER_SEC,		NSEC_PER_SEC);
   BLANK();
   DEFINE(VDSO_CS_CYCLE_LAST,	offsetof(struct vdso_data, cs_cycle_last));
+  DEFINE(VDSO_RAW_TIME_SEC,	offsetof(struct vdso_data, raw_time_sec));
+  DEFINE(VDSO_RAW_TIME_NSEC,	offsetof(struct vdso_data, raw_time_nsec));
   DEFINE(VDSO_XTIME_CLK_SEC,	offsetof(struct vdso_data, xtime_clock_sec));
   DEFINE(VDSO_XTIME_CLK_NSEC,	offsetof(struct vdso_data, xtime_clock_nsec));
   DEFINE(VDSO_XTIME_CRS_SEC,	offsetof(struct vdso_data, xtime_coarse_sec));
@@ -90,7 +115,8 @@
   DEFINE(VDSO_WTM_CLK_SEC,	offsetof(struct vdso_data, wtm_clock_sec));
   DEFINE(VDSO_WTM_CLK_NSEC,	offsetof(struct vdso_data, wtm_clock_nsec));
   DEFINE(VDSO_TB_SEQ_COUNT,	offsetof(struct vdso_data, tb_seq_count));
-  DEFINE(VDSO_CS_MULT,		offsetof(struct vdso_data, cs_mult));
+  DEFINE(VDSO_CS_MONO_MULT,	offsetof(struct vdso_data, cs_mono_mult));
+  DEFINE(VDSO_CS_RAW_MULT,	offsetof(struct vdso_data, cs_raw_mult));
   DEFINE(VDSO_CS_SHIFT,		offsetof(struct vdso_data, cs_shift));
   DEFINE(VDSO_TZ_MINWEST,	offsetof(struct vdso_data, tz_minuteswest));
   DEFINE(VDSO_TZ_DSTTIME,	offsetof(struct vdso_data, tz_dsttime));
@@ -104,63 +130,39 @@
   DEFINE(TZ_MINWEST,		offsetof(struct timezone, tz_minuteswest));
   DEFINE(TZ_DSTTIME,		offsetof(struct timezone, tz_dsttime));
   BLANK();
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+  DEFINE(CPU_BOOT_STACK,	offsetof(struct secondary_data, stack));
+  DEFINE(CPU_BOOT_TASK,		offsetof(struct secondary_data, task));
+  BLANK();
+#endif
 #ifdef CONFIG_KVM_ARM_HOST
   DEFINE(VCPU_CONTEXT,		offsetof(struct kvm_vcpu, arch.ctxt));
   DEFINE(CPU_GP_REGS,		offsetof(struct kvm_cpu_context, gp_regs));
   DEFINE(CPU_USER_PT_REGS,	offsetof(struct kvm_regs, regs));
   DEFINE(CPU_FP_REGS,		offsetof(struct kvm_regs, fp_regs));
-  DEFINE(CPU_SP_EL1,		offsetof(struct kvm_regs, sp_el1));
-  DEFINE(CPU_ELR_EL1,		offsetof(struct kvm_regs, elr_el1));
-  DEFINE(CPU_SPSR,		offsetof(struct kvm_regs, spsr));
-  DEFINE(CPU_SYSREGS,		offsetof(struct kvm_cpu_context, sys_regs));
+  DEFINE(VCPU_FPEXC32_EL2,	offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2]));
   DEFINE(VCPU_ESR_EL2,		offsetof(struct kvm_vcpu, arch.fault.esr_el2));
   DEFINE(VCPU_FAR_EL2,		offsetof(struct kvm_vcpu, arch.fault.far_el2));
   DEFINE(VCPU_HPFAR_EL2,	offsetof(struct kvm_vcpu, arch.fault.hpfar_el2));
-  DEFINE(VCPU_DEBUG_FLAGS,	offsetof(struct kvm_vcpu, arch.debug_flags));
-  DEFINE(VCPU_DEBUG_PTR,	offsetof(struct kvm_vcpu, arch.debug_ptr));
-  DEFINE(DEBUG_BCR, 		offsetof(struct kvm_guest_debug_arch, dbg_bcr));
-  DEFINE(DEBUG_BVR, 		offsetof(struct kvm_guest_debug_arch, dbg_bvr));
-  DEFINE(DEBUG_WCR, 		offsetof(struct kvm_guest_debug_arch, dbg_wcr));
-  DEFINE(DEBUG_WVR, 		offsetof(struct kvm_guest_debug_arch, dbg_wvr));
-  DEFINE(VCPU_HCR_EL2,		offsetof(struct kvm_vcpu, arch.hcr_el2));
-  DEFINE(VCPU_MDCR_EL2,	offsetof(struct kvm_vcpu, arch.mdcr_el2));
-  DEFINE(VCPU_IRQ_LINES,	offsetof(struct kvm_vcpu, arch.irq_lines));
   DEFINE(VCPU_HOST_CONTEXT,	offsetof(struct kvm_vcpu, arch.host_cpu_context));
-  DEFINE(VCPU_HOST_DEBUG_STATE, offsetof(struct kvm_vcpu, arch.host_debug_state));
-  DEFINE(VCPU_TIMER_CNTV_CTL,	offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
-  DEFINE(VCPU_TIMER_CNTV_CVAL,	offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval));
-  DEFINE(KVM_TIMER_CNTVOFF,	offsetof(struct kvm, arch.timer.cntvoff));
-  DEFINE(KVM_TIMER_ENABLED,	offsetof(struct kvm, arch.timer.enabled));
-  DEFINE(VCPU_KVM,		offsetof(struct kvm_vcpu, kvm));
-  DEFINE(VCPU_VGIC_CPU,		offsetof(struct kvm_vcpu, arch.vgic_cpu));
-  DEFINE(VGIC_V2_CPU_HCR,	offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
-  DEFINE(VGIC_V2_CPU_VMCR,	offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
-  DEFINE(VGIC_V2_CPU_MISR,	offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
-  DEFINE(VGIC_V2_CPU_EISR,	offsetof(struct vgic_cpu, vgic_v2.vgic_eisr));
-  DEFINE(VGIC_V2_CPU_ELRSR,	offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
-  DEFINE(VGIC_V2_CPU_APR,	offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
-  DEFINE(VGIC_V2_CPU_LR,	offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
-  DEFINE(VGIC_V3_CPU_SRE,	offsetof(struct vgic_cpu, vgic_v3.vgic_sre));
-  DEFINE(VGIC_V3_CPU_HCR,	offsetof(struct vgic_cpu, vgic_v3.vgic_hcr));
-  DEFINE(VGIC_V3_CPU_VMCR,	offsetof(struct vgic_cpu, vgic_v3.vgic_vmcr));
-  DEFINE(VGIC_V3_CPU_MISR,	offsetof(struct vgic_cpu, vgic_v3.vgic_misr));
-  DEFINE(VGIC_V3_CPU_EISR,	offsetof(struct vgic_cpu, vgic_v3.vgic_eisr));
-  DEFINE(VGIC_V3_CPU_ELRSR,	offsetof(struct vgic_cpu, vgic_v3.vgic_elrsr));
-  DEFINE(VGIC_V3_CPU_AP0R,	offsetof(struct vgic_cpu, vgic_v3.vgic_ap0r));
-  DEFINE(VGIC_V3_CPU_AP1R,	offsetof(struct vgic_cpu, vgic_v3.vgic_ap1r));
-  DEFINE(VGIC_V3_CPU_LR,	offsetof(struct vgic_cpu, vgic_v3.vgic_lr));
-  DEFINE(VGIC_CPU_NR_LR,	offsetof(struct vgic_cpu, nr_lr));
-  DEFINE(KVM_VTTBR,		offsetof(struct kvm, arch.vttbr));
-  DEFINE(KVM_VGIC_VCTRL,	offsetof(struct kvm, arch.vgic.vctrl_base));
 #endif
 #ifdef CONFIG_CPU_PM
   DEFINE(CPU_SUSPEND_SZ,	sizeof(struct cpu_suspend_ctx));
   DEFINE(CPU_CTX_SP,		offsetof(struct cpu_suspend_ctx, sp));
   DEFINE(MPIDR_HASH_MASK,	offsetof(struct mpidr_hash, mask));
   DEFINE(MPIDR_HASH_SHIFTS,	offsetof(struct mpidr_hash, shift_aff));
-  DEFINE(SLEEP_SAVE_SP_SZ,	sizeof(struct sleep_save_sp));
-  DEFINE(SLEEP_SAVE_SP_PHYS,	offsetof(struct sleep_save_sp, save_ptr_stash_phys));
-  DEFINE(SLEEP_SAVE_SP_VIRT,	offsetof(struct sleep_save_sp, save_ptr_stash));
+  DEFINE(SLEEP_STACK_DATA_SYSTEM_REGS,	offsetof(struct sleep_stack_data, system_regs));
+  DEFINE(SLEEP_STACK_DATA_CALLEE_REGS,	offsetof(struct sleep_stack_data, callee_saved_regs));
+#endif
+  DEFINE(ARM_SMCCC_RES_X0_OFFS,	offsetof(struct arm_smccc_res, a0));
+  DEFINE(ARM_SMCCC_RES_X2_OFFS,	offsetof(struct arm_smccc_res, a2));
+  BLANK();
+  DEFINE(HIBERN_PBE_ORIG,	offsetof(struct pbe, orig_address));
+  DEFINE(HIBERN_PBE_ADDR,	offsetof(struct pbe, address));
+  DEFINE(HIBERN_PBE_NEXT,	offsetof(struct pbe, next));
+  BLANK();
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+  DEFINE(TRAMP_VALIAS,		TRAMP_VALIAS);
 #endif
   return 0;
 }
diff -ruw linux-4.4.115/arch/arm64/kernel/cpu_errata.c linux-4.4.115-fbx/arch/arm64/kernel/cpu_errata.c
--- linux-4.4.115/arch/arm64/kernel/cpu_errata.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/cpu_errata.c	2019-01-22 16:16:21.547228694 +0100
@@ -21,32 +21,168 @@
 #include <asm/cputype.h>
 #include <asm/cpufeature.h>
 
-#define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
-#define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
-#define MIDR_THUNDERX	MIDR_CPU_PART(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
-
-#define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
-			MIDR_ARCHITECTURE_MASK)
-
 static bool __maybe_unused
 is_affected_midr_range(const struct arm64_cpu_capabilities *entry)
 {
-	u32 midr = read_cpuid_id();
+	return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model,
+				       entry->midr_range_min,
+				       entry->midr_range_max);
+}
+
+static bool __maybe_unused
+is_kryo_midr(const struct arm64_cpu_capabilities *entry)
+{
+	u32 model;
+
+	model = read_cpuid_id();
+	model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
+		MIDR_ARCHITECTURE_MASK;
+
+	return model == entry->midr_model;
+}
+
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+
+DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
+
+#ifdef CONFIG_KVM
+extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[];
+extern char __qcom_hyp_sanitize_link_stack_start[];
+extern char __qcom_hyp_sanitize_link_stack_end[];
+
+static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
+				const char *hyp_vecs_end)
+{
+	void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
+	int i;
+
+	for (i = 0; i < SZ_2K; i += 0x80)
+		memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
+
+	flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
+}
+
+static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+				      const char *hyp_vecs_start,
+				      const char *hyp_vecs_end)
+{
+	static int last_slot = -1;
+	static DEFINE_SPINLOCK(bp_lock);
+	int cpu, slot = -1;
+
+	spin_lock(&bp_lock);
+	for_each_possible_cpu(cpu) {
+		if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
+			slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
+			break;
+		}
+	}
+
+	if (slot == -1) {
+		last_slot++;
+		BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
+			/ SZ_2K) <= last_slot);
+		slot = last_slot;
+		__copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
+	}
+
+	__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
+	__this_cpu_write(bp_hardening_data.fn, fn);
+	spin_unlock(&bp_lock);
+}
+#else
+#define __psci_hyp_bp_inval_start		NULL
+#define __psci_hyp_bp_inval_end			NULL
+#define __qcom_hyp_sanitize_link_stack_start	NULL
+#define __qcom_hyp_sanitize_link_stack_end	NULL
+
+static void __maybe_unused __install_bp_hardening_cb(bp_hardening_cb_t fn,
+				      const char *hyp_vecs_start,
+				      const char *hyp_vecs_end)
+{
+	__this_cpu_write(bp_hardening_data.fn, fn);
+}
+#endif	/* CONFIG_KVM */
 
-	if ((midr & CPU_MODEL_MASK) != entry->midr_model)
-		return false;
+static void __maybe_unused install_bp_hardening_cb(
+				const struct arm64_cpu_capabilities *entry,
+				bp_hardening_cb_t fn,
+				const char *hyp_vecs_start,
+				const char *hyp_vecs_end)
+{
+	u64 pfr0;
 
-	midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
+	if (!entry->matches(entry))
+		return;
 
-	return (midr >= entry->midr_range_min && midr <= entry->midr_range_max);
+	pfr0 = read_cpuid(SYS_ID_AA64PFR0_EL1);
+	if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
+		return;
+
+	__install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
 }
 
+#include <linux/psci.h>
+
+static int enable_psci_bp_hardening(void *data)
+{
+	const struct arm64_cpu_capabilities *entry = data;
+
+	if (psci_ops.get_version)
+		install_bp_hardening_cb(entry,
+				       (bp_hardening_cb_t)psci_ops.get_version,
+				       __psci_hyp_bp_inval_start,
+				       __psci_hyp_bp_inval_end);
+
+	return 0;
+}
+
+static void __maybe_unused qcom_link_stack_sanitization(void)
+{
+	u64 tmp;
+
+	asm volatile("mov	%0, x30		\n"
+		     ".rept	16		\n"
+		     "bl	. + 4		\n"
+		     ".endr			\n"
+		     "mov	x30, %0		\n"
+		     : "=&r" (tmp));
+}
+
+static void __maybe_unused qcom_bp_hardening(void)
+{
+	qcom_link_stack_sanitization();
+	if (psci_ops.get_version)
+		psci_ops.get_version();
+}
+
+static int __maybe_unused enable_qcom_bp_hardening(void *data)
+{
+	const struct arm64_cpu_capabilities *entry = data;
+
+	install_bp_hardening_cb(entry,
+				(bp_hardening_cb_t)qcom_bp_hardening,
+				__psci_hyp_bp_inval_start,
+				__psci_hyp_bp_inval_end);
+	return 0;
+}
+
+#endif	/* CONFIG_HARDEN_BRANCH_PREDICTOR */
+
 #define MIDR_RANGE(model, min, max) \
 	.matches = is_affected_midr_range, \
 	.midr_model = model, \
 	.midr_range_min = min, \
 	.midr_range_max = max
 
+#define MIDR_ALL_VERSIONS(model) \
+	.matches = is_affected_midr_range, \
+	.midr_model = model, \
+	.midr_range_min = 0, \
+	.midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK)
+
 const struct arm64_cpu_capabilities arm64_errata[] = {
 #if	defined(CONFIG_ARM64_ERRATUM_826319) || \
 	defined(CONFIG_ARM64_ERRATUM_827319) || \
@@ -91,6 +227,12 @@
 		.capability = ARM64_WORKAROUND_845719,
 		MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
 	},
+	{
+	/* Kryo2xx Silver rAp4 */
+		.desc = "Kryo2xx Silver erratum 845719",
+		.capability = ARM64_WORKAROUND_845719,
+		MIDR_RANGE(MIDR_KRYO2XX_SILVER, 0xA00004, 0xA00004),
+	},
 #endif
 #ifdef CONFIG_CAVIUM_ERRATUM_23154
 	{
@@ -109,6 +251,39 @@
 			   (1 << MIDR_VARIANT_SHIFT) | 1),
 	},
 #endif
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+	{
+		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+		MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+		.enable = enable_psci_bp_hardening,
+	},
+	{
+		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+		MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+		.enable = enable_psci_bp_hardening,
+	},
+	{
+		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+		MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+		.enable = enable_psci_bp_hardening,
+	},
+	{
+		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+		MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+		.enable = enable_psci_bp_hardening,
+	},
+	{
+		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+		MIDR_ALL_VERSIONS(MIDR_KRYO2XX_GOLD),
+		.enable = enable_psci_bp_hardening,
+	},
+	{
+		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+		.midr_model = MIDR_QCOM_KRYO,
+		.matches = is_kryo_midr,
+		.enable = enable_qcom_bp_hardening,
+	},
+#endif
 	{
 	}
 };
@@ -117,3 +292,8 @@
 {
 	update_cpu_capabilities(arm64_errata, "enabling workaround for");
 }
+
+void __init enable_errata_workarounds(void)
+{
+	enable_cpu_capabilities(arm64_errata);
+}
diff -ruw linux-4.4.115/arch/arm64/kernel/cpufeature.c linux-4.4.115-fbx/arch/arm64/kernel/cpufeature.c
--- linux-4.4.115/arch/arm64/kernel/cpufeature.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/cpufeature.c	2019-10-29 09:26:23.001196895 +0100
@@ -23,11 +23,13 @@
 #include <linux/sort.h>
 #include <linux/stop_machine.h>
 #include <linux/types.h>
+#include <linux/mm.h>
 #include <asm/cpu.h>
 #include <asm/cpufeature.h>
 #include <asm/cpu_ops.h>
 #include <asm/processor.h>
 #include <asm/sysreg.h>
+#include <asm/virt.h>
 
 unsigned long elf_hwcap __read_mostly;
 EXPORT_SYMBOL_GPL(elf_hwcap);
@@ -45,6 +47,7 @@
 #endif
 
 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
+EXPORT_SYMBOL(cpu_hwcaps);
 
 #define __ARM64_FTR_BITS(SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
 	{						\
@@ -69,6 +72,10 @@
 		.width = 0,				\
 	}
 
+/* meta feature for alternatives */
+static bool __maybe_unused
+cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry);
+
 static struct arm64_ftr_bits ftr_id_aa64isar0[] = {
 	ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
 	ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
@@ -85,6 +92,7 @@
 static struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
 	ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
 	ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0),
+	ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
 	ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0),
 	ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
 	ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
@@ -125,6 +133,11 @@
 	ARM64_FTR_END,
 };
 
+static struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
+	ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
+	ARM64_FTR_END,
+};
+
 static struct arm64_ftr_bits ftr_ctr[] = {
 	U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1),	/* RAO */
 	ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0),
@@ -286,6 +299,7 @@
 	/* Op1 = 0, CRn = 0, CRm = 7 */
 	ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
 	ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
+	ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
 
 	/* Op1 = 3, CRn = 0, CRm = 0 */
 	ARM64_FTR_REG(SYS_CTR_EL0, ftr_ctr),
@@ -410,6 +424,7 @@
 	init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
 	init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
 	init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
+	init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
 	init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
 	init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
 	init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
@@ -519,6 +534,8 @@
 				      info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
 	taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
 				      info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
+	taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
+				      info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
 
 	/*
 	 * EL3 is not our concern.
@@ -623,6 +640,56 @@
 	return has_sre;
 }
 
+static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry)
+{
+	u32 midr = read_cpuid_id();
+	u32 rv_min, rv_max;
+
+	/* Cavium ThunderX pass 1.x and 2.x */
+	rv_min = 0;
+	rv_max = (1 << MIDR_VARIANT_SHIFT) | MIDR_REVISION_MASK;
+
+	return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX, rv_min, rv_max);
+}
+
+static bool runs_at_el2(const struct arm64_cpu_capabilities *entry)
+{
+	return is_kernel_in_hyp_mode();
+}
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
+
+static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry)
+{
+	/* Forced on command line? */
+	if (__kpti_forced) {
+		pr_info_once("kernel page table isolation forced %s by command line option\n",
+			     __kpti_forced > 0 ? "ON" : "OFF");
+		return __kpti_forced > 0;
+	}
+
+	/* Useful for KASLR robustness */
+	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+		return true;
+
+	return false;
+}
+
+static int __init parse_kpti(char *str)
+{
+	bool enabled;
+	int ret = strtobool(str, &enabled);
+
+	if (ret)
+		return ret;
+
+	__kpti_forced = enabled ? 1 : -1;
+	return 0;
+}
+__setup("kpti=", parse_kpti);
+#endif	/* CONFIG_UNMAP_KERNEL_AT_EL0 */
+
 static const struct arm64_cpu_capabilities arm64_features[] = {
 	{
 		.desc = "GIC system register CPU interface",
@@ -653,6 +720,39 @@
 		.min_field_value = 2,
 	},
 #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
+	{
+		.desc = "Software prefetching using PRFM",
+		.capability = ARM64_HAS_NO_HW_PREFETCH,
+		.matches = has_no_hw_prefetch,
+	},
+#ifdef CONFIG_ARM64_UAO
+	{
+		.desc = "User Access Override",
+		.capability = ARM64_HAS_UAO,
+		.matches = has_cpuid_feature,
+		.sys_reg = SYS_ID_AA64MMFR2_EL1,
+		.field_pos = ID_AA64MMFR2_UAO_SHIFT,
+		.min_field_value = 1,
+		.enable = cpu_enable_uao,
+	},
+#endif /* CONFIG_ARM64_UAO */
+#ifdef CONFIG_ARM64_PAN
+	{
+		.capability = ARM64_ALT_PAN_NOT_UAO,
+		.matches = cpufeature_pan_not_uao,
+	},
+#endif /* CONFIG_ARM64_PAN */
+	{
+		.desc = "Virtualization Host Extensions",
+		.capability = ARM64_HAS_VIRT_HOST_EXTN,
+		.matches = runs_at_el2,
+	},
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+	{
+		.capability = ARM64_UNMAP_KERNEL_AT_EL0,
+		.matches = unmap_kernel_at_el0,
+	},
+#endif
 	{},
 };
 
@@ -686,7 +786,7 @@
 	{},
 };
 
-static void cap_set_hwcap(const struct arm64_cpu_capabilities *cap)
+static void __init cap_set_hwcap(const struct arm64_cpu_capabilities *cap)
 {
 	switch (cap->hwcap_type) {
 	case CAP_HWCAP:
@@ -731,12 +831,12 @@
 	return rc;
 }
 
-static void setup_cpu_hwcaps(void)
+static void __init setup_cpu_hwcaps(void)
 {
 	int i;
 	const struct arm64_cpu_capabilities *hwcaps = arm64_hwcaps;
 
-	for (i = 0; hwcaps[i].desc; i++)
+	for (i = 0; hwcaps[i].matches; i++)
 		if (hwcaps[i].matches(&hwcaps[i]))
 			cap_set_hwcap(&hwcaps[i]);
 }
@@ -746,11 +846,11 @@
 {
 	int i;
 
-	for (i = 0; caps[i].desc; i++) {
+	for (i = 0; caps[i].matches; i++) {
 		if (!caps[i].matches(&caps[i]))
 			continue;
 
-		if (!cpus_have_cap(caps[i].capability))
+		if (!cpus_have_cap(caps[i].capability) && caps[i].desc)
 			pr_info("%s %s\n", info, caps[i].desc);
 		cpus_set_cap(caps[i].capability);
 	}
@@ -760,11 +860,11 @@
  * Run through the enabled capabilities and enable() it on all active
  * CPUs
  */
-static void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
+void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
 {
 	int i;
 
-	for (i = 0; caps[i].desc; i++)
+	for (i = 0; caps[i].matches; i++)
 		if (caps[i].enable && cpus_have_cap(caps[i].capability))
 			/*
 			 * Use stop_machine() as it schedules the work allowing
@@ -772,7 +872,8 @@
 			 * uses an IPI, giving us a PSTATE that disappears when
 			 * we return.
 			 */
-			stop_machine(caps[i].enable, NULL, cpu_online_mask);
+			stop_machine(caps[i].enable, (void *)&caps[i],
+							cpu_online_mask);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -798,35 +899,36 @@
 static u64 __raw_read_system_reg(u32 sys_id)
 {
 	switch (sys_id) {
-	case SYS_ID_PFR0_EL1:		return (u64)read_cpuid(ID_PFR0_EL1);
-	case SYS_ID_PFR1_EL1:		return (u64)read_cpuid(ID_PFR1_EL1);
-	case SYS_ID_DFR0_EL1:		return (u64)read_cpuid(ID_DFR0_EL1);
-	case SYS_ID_MMFR0_EL1:		return (u64)read_cpuid(ID_MMFR0_EL1);
-	case SYS_ID_MMFR1_EL1:		return (u64)read_cpuid(ID_MMFR1_EL1);
-	case SYS_ID_MMFR2_EL1:		return (u64)read_cpuid(ID_MMFR2_EL1);
-	case SYS_ID_MMFR3_EL1:		return (u64)read_cpuid(ID_MMFR3_EL1);
-	case SYS_ID_ISAR0_EL1:		return (u64)read_cpuid(ID_ISAR0_EL1);
-	case SYS_ID_ISAR1_EL1:		return (u64)read_cpuid(ID_ISAR1_EL1);
-	case SYS_ID_ISAR2_EL1:		return (u64)read_cpuid(ID_ISAR2_EL1);
-	case SYS_ID_ISAR3_EL1:		return (u64)read_cpuid(ID_ISAR3_EL1);
-	case SYS_ID_ISAR4_EL1:		return (u64)read_cpuid(ID_ISAR4_EL1);
-	case SYS_ID_ISAR5_EL1:		return (u64)read_cpuid(ID_ISAR4_EL1);
-	case SYS_MVFR0_EL1:		return (u64)read_cpuid(MVFR0_EL1);
-	case SYS_MVFR1_EL1:		return (u64)read_cpuid(MVFR1_EL1);
-	case SYS_MVFR2_EL1:		return (u64)read_cpuid(MVFR2_EL1);
-
-	case SYS_ID_AA64PFR0_EL1:	return (u64)read_cpuid(ID_AA64PFR0_EL1);
-	case SYS_ID_AA64PFR1_EL1:	return (u64)read_cpuid(ID_AA64PFR0_EL1);
-	case SYS_ID_AA64DFR0_EL1:	return (u64)read_cpuid(ID_AA64DFR0_EL1);
-	case SYS_ID_AA64DFR1_EL1:	return (u64)read_cpuid(ID_AA64DFR0_EL1);
-	case SYS_ID_AA64MMFR0_EL1:	return (u64)read_cpuid(ID_AA64MMFR0_EL1);
-	case SYS_ID_AA64MMFR1_EL1:	return (u64)read_cpuid(ID_AA64MMFR1_EL1);
-	case SYS_ID_AA64ISAR0_EL1:	return (u64)read_cpuid(ID_AA64ISAR0_EL1);
-	case SYS_ID_AA64ISAR1_EL1:	return (u64)read_cpuid(ID_AA64ISAR1_EL1);
-
-	case SYS_CNTFRQ_EL0:		return (u64)read_cpuid(CNTFRQ_EL0);
-	case SYS_CTR_EL0:		return (u64)read_cpuid(CTR_EL0);
-	case SYS_DCZID_EL0:		return (u64)read_cpuid(DCZID_EL0);
+	case SYS_ID_PFR0_EL1:		return read_cpuid(SYS_ID_PFR0_EL1);
+	case SYS_ID_PFR1_EL1:		return read_cpuid(SYS_ID_PFR1_EL1);
+	case SYS_ID_DFR0_EL1:		return read_cpuid(SYS_ID_DFR0_EL1);
+	case SYS_ID_MMFR0_EL1:		return read_cpuid(SYS_ID_MMFR0_EL1);
+	case SYS_ID_MMFR1_EL1:		return read_cpuid(SYS_ID_MMFR1_EL1);
+	case SYS_ID_MMFR2_EL1:		return read_cpuid(SYS_ID_MMFR2_EL1);
+	case SYS_ID_MMFR3_EL1:		return read_cpuid(SYS_ID_MMFR3_EL1);
+	case SYS_ID_ISAR0_EL1:		return read_cpuid(SYS_ID_ISAR0_EL1);
+	case SYS_ID_ISAR1_EL1:		return read_cpuid(SYS_ID_ISAR1_EL1);
+	case SYS_ID_ISAR2_EL1:		return read_cpuid(SYS_ID_ISAR2_EL1);
+	case SYS_ID_ISAR3_EL1:		return read_cpuid(SYS_ID_ISAR3_EL1);
+	case SYS_ID_ISAR4_EL1:		return read_cpuid(SYS_ID_ISAR4_EL1);
+	case SYS_ID_ISAR5_EL1:		return read_cpuid(SYS_ID_ISAR4_EL1);
+	case SYS_MVFR0_EL1:		return read_cpuid(SYS_MVFR0_EL1);
+	case SYS_MVFR1_EL1:		return read_cpuid(SYS_MVFR1_EL1);
+	case SYS_MVFR2_EL1:		return read_cpuid(SYS_MVFR2_EL1);
+
+	case SYS_ID_AA64PFR0_EL1:	return read_cpuid(SYS_ID_AA64PFR0_EL1);
+	case SYS_ID_AA64PFR1_EL1:	return read_cpuid(SYS_ID_AA64PFR0_EL1);
+	case SYS_ID_AA64DFR0_EL1:	return read_cpuid(SYS_ID_AA64DFR0_EL1);
+	case SYS_ID_AA64DFR1_EL1:	return read_cpuid(SYS_ID_AA64DFR0_EL1);
+	case SYS_ID_AA64MMFR0_EL1:	return read_cpuid(SYS_ID_AA64MMFR0_EL1);
+	case SYS_ID_AA64MMFR1_EL1:	return read_cpuid(SYS_ID_AA64MMFR1_EL1);
+	case SYS_ID_AA64MMFR2_EL1:	return read_cpuid(SYS_ID_AA64MMFR2_EL1);
+	case SYS_ID_AA64ISAR0_EL1:	return read_cpuid(SYS_ID_AA64ISAR0_EL1);
+	case SYS_ID_AA64ISAR1_EL1:	return read_cpuid(SYS_ID_AA64ISAR1_EL1);
+
+	case SYS_CNTFRQ_EL0:		return read_cpuid(SYS_CNTFRQ_EL0);
+	case SYS_CTR_EL0:		return read_cpuid(SYS_CTR_EL0);
+	case SYS_DCZID_EL0:		return read_cpuid(SYS_DCZID_EL0);
 	default:
 		BUG();
 		return 0;
@@ -876,7 +978,7 @@
 		return;
 
 	caps = arm64_features;
-	for (i = 0; caps[i].desc; i++) {
+	for (i = 0; caps[i].matches; i++) {
 		if (!cpus_have_cap(caps[i].capability) || !caps[i].sys_reg)
 			continue;
 		/*
@@ -886,10 +988,10 @@
 		if (!feature_matches(__raw_read_system_reg(caps[i].sys_reg), &caps[i]))
 			fail_incapable_cpu("arm64_features", &caps[i]);
 		if (caps[i].enable)
-			caps[i].enable(NULL);
+			caps[i].enable((void *)&caps[i]);
 	}
 
-	for (i = 0, caps = arm64_hwcaps; caps[i].desc; i++) {
+	for (i = 0, caps = arm64_hwcaps; caps[i].matches; i++) {
 		if (!cpus_have_hwcap(&caps[i]))
 			continue;
 		if (!feature_matches(__raw_read_system_reg(caps[i].sys_reg), &caps[i]))
@@ -905,7 +1007,7 @@
 
 #endif	/* CONFIG_HOTPLUG_CPU */
 
-static void setup_feature_capabilities(void)
+static void __init setup_feature_capabilities(void)
 {
 	update_cpu_capabilities(arm64_features, "detected feature:");
 	enable_cpu_capabilities(arm64_features);
@@ -918,6 +1020,7 @@
 
 	/* Set the CPU feature capabilies */
 	setup_feature_capabilities();
+	enable_errata_workarounds();
 	setup_cpu_hwcaps();
 
 	/* Advertise that we have computed the system capabilities */
@@ -931,7 +1034,13 @@
 	if (!cwg)
 		pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
 			cls);
-	if (L1_CACHE_BYTES < cls)
-		pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
-			L1_CACHE_BYTES, cls);
+	if (ARCH_DMA_MINALIGN < cls)
+		pr_warn("ARCH_DMA_MINALIGN smaller than the Cache Writeback Granule (%d < %d)\n",
+			ARCH_DMA_MINALIGN, cls);
+}
+
+static bool __maybe_unused
+cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry)
+{
+	return (cpus_have_cap(ARM64_HAS_PAN) && !cpus_have_cap(ARM64_HAS_UAO));
 }
diff -ruw linux-4.4.115/arch/arm64/kernel/cpuinfo.c linux-4.4.115-fbx/arch/arm64/kernel/cpuinfo.c
--- linux-4.4.115/arch/arm64/kernel/cpuinfo.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/cpuinfo.c	2019-01-22 16:16:21.547228694 +0100
@@ -19,6 +19,7 @@
 #include <asm/cpu.h>
 #include <asm/cputype.h>
 #include <asm/cpufeature.h>
+#include <asm/elf.h>
 
 #include <linux/bitops.h>
 #include <linux/bug.h>
@@ -33,6 +34,10 @@
 #include <linux/sched.h>
 #include <linux/smp.h>
 #include <linux/delay.h>
+#include <linux/of_fdt.h>
+
+char* (*arch_read_hardware_id)(void);
+EXPORT_SYMBOL(arch_read_hardware_id);
 
 /*
  * In case the boot CPU is hotpluggable, we record its initial state and
@@ -106,7 +111,9 @@
 	int i, j;
 	bool compat = personality(current->personality) == PER_LINUX32;
 
-	for_each_online_cpu(i) {
+	seq_printf(m, "Processor\t: AArch64 Processor rev %d (%s)\n",
+		read_cpuid_id() & 15, ELF_PLATFORM);
+	for_each_present_cpu(i) {
 		struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
 		u32 midr = cpuinfo->reg_midr;
 
@@ -156,6 +163,11 @@
 		seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
 	}
 
+	if (!arch_read_hardware_id)
+		seq_printf(m, "Hardware\t: %s\n", machine_name);
+	else
+		seq_printf(m, "Hardware\t: %s\n", arch_read_hardware_id());
+
 	return 0;
 }
 
@@ -201,42 +213,48 @@
 	if (l1ip == ICACHE_POLICY_AIVIVT)
 		set_bit(ICACHEF_AIVIVT, &__icache_flags);
 
-	pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
+	pr_debug("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
 }
 
 static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
 {
 	info->reg_cntfrq = arch_timer_get_cntfrq();
 	info->reg_ctr = read_cpuid_cachetype();
-	info->reg_dczid = read_cpuid(DCZID_EL0);
+	info->reg_dczid = read_cpuid(SYS_DCZID_EL0);
 	info->reg_midr = read_cpuid_id();
 
-	info->reg_id_aa64dfr0 = read_cpuid(ID_AA64DFR0_EL1);
-	info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1);
-	info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
-	info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
-	info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
-	info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
-	info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
-	info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
-
-	info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
-	info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
-	info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
-	info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
-	info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
-	info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
-	info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
-	info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
-	info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
-	info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
-	info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
-	info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
-	info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
-
-	info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
-	info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
-	info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
+	info->reg_id_aa64dfr0 = read_cpuid(SYS_ID_AA64DFR0_EL1);
+	info->reg_id_aa64dfr1 = read_cpuid(SYS_ID_AA64DFR1_EL1);
+	info->reg_id_aa64isar0 = read_cpuid(SYS_ID_AA64ISAR0_EL1);
+	info->reg_id_aa64isar1 = read_cpuid(SYS_ID_AA64ISAR1_EL1);
+	/*
+	 * Explicitly mask out 16KB granule since we donot
+	 * want to support it
+	 */
+	info->reg_id_aa64mmfr0 = read_cpuid(SYS_ID_AA64MMFR0_EL1) &
+					(~MMFR0_EL1_16KGRAN_MASK);
+	info->reg_id_aa64mmfr1 = read_cpuid(SYS_ID_AA64MMFR1_EL1);
+	info->reg_id_aa64mmfr2 = read_cpuid(SYS_ID_AA64MMFR2_EL1);
+	info->reg_id_aa64pfr0 = read_cpuid(SYS_ID_AA64PFR0_EL1);
+	info->reg_id_aa64pfr1 = read_cpuid(SYS_ID_AA64PFR1_EL1);
+
+	info->reg_id_dfr0 = read_cpuid(SYS_ID_DFR0_EL1);
+	info->reg_id_isar0 = read_cpuid(SYS_ID_ISAR0_EL1);
+	info->reg_id_isar1 = read_cpuid(SYS_ID_ISAR1_EL1);
+	info->reg_id_isar2 = read_cpuid(SYS_ID_ISAR2_EL1);
+	info->reg_id_isar3 = read_cpuid(SYS_ID_ISAR3_EL1);
+	info->reg_id_isar4 = read_cpuid(SYS_ID_ISAR4_EL1);
+	info->reg_id_isar5 = read_cpuid(SYS_ID_ISAR5_EL1);
+	info->reg_id_mmfr0 = read_cpuid(SYS_ID_MMFR0_EL1);
+	info->reg_id_mmfr1 = read_cpuid(SYS_ID_MMFR1_EL1);
+	info->reg_id_mmfr2 = read_cpuid(SYS_ID_MMFR2_EL1);
+	info->reg_id_mmfr3 = read_cpuid(SYS_ID_MMFR3_EL1);
+	info->reg_id_pfr0 = read_cpuid(SYS_ID_PFR0_EL1);
+	info->reg_id_pfr1 = read_cpuid(SYS_ID_PFR1_EL1);
+
+	info->reg_mvfr0 = read_cpuid(SYS_MVFR0_EL1);
+	info->reg_mvfr1 = read_cpuid(SYS_MVFR1_EL1);
+	info->reg_mvfr2 = read_cpuid(SYS_MVFR2_EL1);
 
 	cpuinfo_detect_icache_policy(info);
 
diff -ruw linux-4.4.115/arch/arm64/kernel/cpu_ops.c linux-4.4.115-fbx/arch/arm64/kernel/cpu_ops.c
--- linux-4.4.115/arch/arm64/kernel/cpu_ops.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/cpu_ops.c	2019-10-29 09:26:23.001196895 +0100
@@ -25,19 +25,30 @@
 #include <asm/smp_plat.h>
 
 extern const struct cpu_operations smp_spin_table_ops;
+extern const struct cpu_operations acpi_parking_protocol_ops;
 extern const struct cpu_operations cpu_psci_ops;
 
 const struct cpu_operations *cpu_ops[NR_CPUS];
 
-static const struct cpu_operations *supported_cpu_ops[] __initconst = {
+static const struct cpu_operations *dt_supported_cpu_ops[] __initconst = {
 	&smp_spin_table_ops,
 	&cpu_psci_ops,
 	NULL,
 };
 
+static const struct cpu_operations *acpi_supported_cpu_ops[] __initconst = {
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
+	&acpi_parking_protocol_ops,
+#endif
+	&cpu_psci_ops,
+	NULL,
+};
+
 static const struct cpu_operations * __init cpu_get_ops(const char *name)
 {
-	const struct cpu_operations **ops = supported_cpu_ops;
+	const struct cpu_operations **ops;
+
+	ops = acpi_disabled ? dt_supported_cpu_ops : acpi_supported_cpu_ops;
 
 	while (*ops) {
 		if (!strcmp(name, (*ops)->name))
@@ -75,9 +86,17 @@
 		}
 	} else {
 		enable_method = acpi_get_enable_method(cpu);
-		if (!enable_method)
+		if (!enable_method) {
+			/*
+			 * In ACPI systems the boot CPU does not require
+			 * checking the enable method since for some
+			 * boot protocol (ie parking protocol) it need not
+			 * be initialized. Don't warn spuriously.
+			 */
+			if (cpu != 0)
 			pr_err("Unsupported ACPI enable-method\n");
 	}
+	}
 
 	return enable_method;
 }
diff -ruw linux-4.4.115/arch/arm64/kernel/debug-monitors.c linux-4.4.115-fbx/arch/arm64/kernel/debug-monitors.c
--- linux-4.4.115/arch/arm64/kernel/debug-monitors.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/debug-monitors.c	2019-01-22 16:16:21.547228694 +0100
@@ -23,6 +23,7 @@
 #include <linux/hardirq.h>
 #include <linux/init.h>
 #include <linux/ptrace.h>
+#include <linux/kprobes.h>
 #include <linux/stat.h>
 #include <linux/uaccess.h>
 
@@ -48,6 +49,7 @@
 	asm volatile("msr mdscr_el1, %0" :: "r" (mdscr));
 	local_dbg_restore(flags);
 }
+NOKPROBE_SYMBOL(mdscr_write);
 
 static u32 mdscr_read(void)
 {
@@ -55,6 +57,7 @@
 	asm volatile("mrs %0, mdscr_el1" : "=r" (mdscr));
 	return mdscr;
 }
+NOKPROBE_SYMBOL(mdscr_read);
 
 /*
  * Allow root to disable self-hosted debug from userspace.
@@ -103,6 +106,7 @@
 		mdscr_write(mdscr);
 	}
 }
+NOKPROBE_SYMBOL(enable_debug_monitors);
 
 void disable_debug_monitors(enum dbg_active_el el)
 {
@@ -123,6 +127,7 @@
 		mdscr_write(mdscr);
 	}
 }
+NOKPROBE_SYMBOL(disable_debug_monitors);
 
 /*
  * OS lock clearing.
@@ -173,6 +178,7 @@
 	spsr |= DBG_SPSR_SS;
 	regs->pstate = spsr;
 }
+NOKPROBE_SYMBOL(set_regs_spsr_ss);
 
 static void clear_regs_spsr_ss(struct pt_regs *regs)
 {
@@ -182,6 +188,7 @@
 	spsr &= ~DBG_SPSR_SS;
 	regs->pstate = spsr;
 }
+NOKPROBE_SYMBOL(clear_regs_spsr_ss);
 
 /* EL1 Single Step Handler hooks */
 static LIST_HEAD(step_hook);
@@ -225,6 +232,7 @@
 
 	return retval;
 }
+NOKPROBE_SYMBOL(call_step_hook);
 
 static int single_step_handler(unsigned long addr, unsigned int esr,
 			       struct pt_regs *regs)
@@ -253,6 +261,10 @@
 		 */
 		user_rewind_single_step(current);
 	} else {
+#ifdef	CONFIG_KPROBES
+		if (kprobe_single_step_handler(regs, esr) == DBG_HOOK_HANDLED)
+			return 0;
+#endif
 		if (call_step_hook(regs, esr) == DBG_HOOK_HANDLED)
 			return 0;
 
@@ -266,6 +278,7 @@
 
 	return 0;
 }
+NOKPROBE_SYMBOL(single_step_handler);
 
 /*
  * Breakpoint handler is re-entrant as another breakpoint can
@@ -303,6 +316,7 @@
 
 	return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
 }
+NOKPROBE_SYMBOL(call_break_hook);
 
 static int brk_handler(unsigned long addr, unsigned int esr,
 		       struct pt_regs *regs)
@@ -318,13 +332,21 @@
 		};
 
 		force_sig_info(SIGTRAP, &info, current);
-	} else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) {
-		pr_warning("Unexpected kernel BRK exception at EL1\n");
+	}
+#ifdef	CONFIG_KPROBES
+	else if ((esr & BRK64_ESR_MASK) == BRK64_ESR_KPROBES) {
+		if (kprobe_breakpoint_handler(regs, esr) != DBG_HOOK_HANDLED)
+			return -EFAULT;
+	}
+#endif
+	else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) {
+		pr_warn("Unexpected kernel BRK exception at EL1\n");
 		return -EFAULT;
 	}
 
 	return 0;
 }
+NOKPROBE_SYMBOL(brk_handler);
 
 int aarch32_break_handler(struct pt_regs *regs)
 {
@@ -369,6 +391,7 @@
 	force_sig_info(SIGTRAP, &info, current);
 	return 0;
 }
+NOKPROBE_SYMBOL(aarch32_break_handler);
 
 static int __init debug_traps_init(void)
 {
@@ -390,6 +413,7 @@
 	if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP))
 		set_regs_spsr_ss(task_pt_regs(task));
 }
+NOKPROBE_SYMBOL(user_rewind_single_step);
 
 void user_fastforward_single_step(struct task_struct *task)
 {
@@ -405,6 +429,7 @@
 	mdscr_write(mdscr_read() | DBG_MDSCR_SS);
 	enable_debug_monitors(DBG_ACTIVE_EL1);
 }
+NOKPROBE_SYMBOL(kernel_enable_single_step);
 
 void kernel_disable_single_step(void)
 {
@@ -412,12 +437,14 @@
 	mdscr_write(mdscr_read() & ~DBG_MDSCR_SS);
 	disable_debug_monitors(DBG_ACTIVE_EL1);
 }
+NOKPROBE_SYMBOL(kernel_disable_single_step);
 
 int kernel_active_single_step(void)
 {
 	WARN_ON(!irqs_disabled());
 	return mdscr_read() & DBG_MDSCR_SS;
 }
+NOKPROBE_SYMBOL(kernel_active_single_step);
 
 /* ptrace API */
 void user_enable_single_step(struct task_struct *task)
@@ -427,8 +454,10 @@
 	if (!test_and_set_ti_thread_flag(ti, TIF_SINGLESTEP))
 		set_regs_spsr_ss(task_pt_regs(task));
 }
+NOKPROBE_SYMBOL(user_enable_single_step);
 
 void user_disable_single_step(struct task_struct *task)
 {
 	clear_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP);
 }
+NOKPROBE_SYMBOL(user_disable_single_step);
diff -ruw linux-4.4.115/arch/arm64/kernel/entry-fpsimd.S linux-4.4.115-fbx/arch/arm64/kernel/entry-fpsimd.S
--- linux-4.4.115/arch/arm64/kernel/entry-fpsimd.S	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/entry-fpsimd.S	2019-01-22 16:16:21.547228694 +0100
@@ -64,4 +64,20 @@
 	ret
 ENDPROC(fpsimd_load_partial_state)
 
+#ifdef CONFIG_ENABLE_FP_SIMD_SETTINGS
+ENTRY(fpsimd_enable_trap)
+	mrs x0, cpacr_el1
+	bic x0, x0, #(3 << 20)
+	orr x0, x0, #(1 << 20)
+	msr cpacr_el1, x0
+	ret
+ENDPROC(fpsimd_enable_trap)
+ENTRY(fpsimd_disable_trap)
+	mrs x0, cpacr_el1
+	orr x0, x0, #(3 << 20)
+	msr cpacr_el1, x0
+	ret
+ENDPROC(fpsimd_disable_trap)
+#endif
+
 #endif
diff -ruw linux-4.4.115/arch/arm64/kernel/entry.S linux-4.4.115-fbx/arch/arm64/kernel/entry.S
--- linux-4.4.115/arch/arm64/kernel/entry.S	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/entry.S	2019-01-22 16:16:21.547228694 +0100
@@ -27,8 +27,12 @@
 #include <asm/cpufeature.h>
 #include <asm/errno.h>
 #include <asm/esr.h>
+#include <asm/irq.h>
 #include <asm/memory.h>
+#include <asm/mmu.h>
+#include <asm/ptrace.h>
 #include <asm/thread_info.h>
+#include <asm/uaccess.h>
 #include <asm/asm-uaccess.h>
 #include <asm/unistd.h>
 
@@ -67,8 +71,31 @@
 #define BAD_FIQ		2
 #define BAD_ERROR	3
 
-	.macro	kernel_entry, el, regsize = 64
+	.macro kernel_ventry, el, label, regsize = 64
+	.align 7
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+alternative_if ARM64_UNMAP_KERNEL_AT_EL0
+	.if	\el == 0
+	.if	\regsize == 64
+	mrs	x30, tpidrro_el0
+	msr	tpidrro_el0, xzr
+	.else
+	mov	x30, xzr
+	.endif
+	.endif
+alternative_else_nop_endif
+#endif
+
 	sub	sp, sp, #S_FRAME_SIZE
+	b	el\()\el\()_\label
+	.endm
+
+	.macro tramp_alias, dst, sym
+	mov_q	\dst, TRAMP_VALIAS
+	add	\dst, \dst, #(\sym - .entry.tramp.text)
+	.endm
+
+	.macro	kernel_entry, el, regsize = 64
 	.if	\regsize == 32
 	mov	w0, w0				// zero upper 32 bits of x0
 	.endif
@@ -90,21 +117,64 @@
 
 	.if	\el == 0
 	mrs	x21, sp_el0
-	get_thread_info tsk			// Ensure MDSCR_EL1.SS is clear,
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+	ldr_this_cpu	tsk, __entry_task, x20	// Ensure MDSCR_EL1.SS is clear,
+	ldr	x19, [tsk, #TSK_TI_FLAGS]	// since we can unmask debug
+#else
+	mov	tsk, sp
+	and	tsk, tsk, #~(THREAD_SIZE - 1)	// Ensure MDSCR_EL1.SS is clear,
 	ldr	x19, [tsk, #TI_FLAGS]		// since we can unmask debug
+#endif
 	disable_step_tsk x19, x20		// exceptions when scheduling.
+
+	mov	x29, xzr			// fp pointed to user-space
 	.else
 	add	x21, sp, #S_FRAME_SIZE
 	get_thread_info tsk
 	/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+	ldr	x20, [tsk, #TSK_TI_ADDR_LIMIT]
+#else
 	ldr	x20, [tsk, #TI_ADDR_LIMIT]
+#endif
 	str	x20, [sp, #S_ORIG_ADDR_LIMIT]
 	mov	x20, #TASK_SIZE_64
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
+#else
 	str	x20, [tsk, #TI_ADDR_LIMIT]
+#endif
+	ALTERNATIVE(nop, SET_PSTATE_UAO(0), ARM64_HAS_UAO, CONFIG_ARM64_UAO)
 	.endif /* \el == 0 */
 	mrs	x22, elr_el1
 	mrs	x23, spsr_el1
 	stp	lr, x21, [sp, #S_LR]
+
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+	/*
+	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
+	 * EL0, there is no need to check the state of TTBR0_EL1 since
+	 * accesses are always enabled.
+	 * Note that the meaning of this bit differs from the ARMv8.1 PAN
+	 * feature as all TTBR0_EL1 accesses are disabled, not just those to
+	 * user mappings.
+	 */
+alternative_if ARM64_HAS_PAN
+	b	1f				// skip TTBR0 PAN
+alternative_else_nop_endif
+
+	.if	\el != 0
+	mrs	x21, ttbr0_el1
+	tst	x21, #TTBR_ASID_MASK		// Check for the reserved ASID
+	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
+	b.eq	1f				// TTBR0 access already disabled
+	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
+	.endif
+
+	__uaccess_ttbr0_disable x21
+1:
+#endif
+
 	stp	x22, x23, [sp, #S_PC]
 
 	/*
@@ -116,6 +186,13 @@
 	.endif
 
 	/*
+	 * Set sp_el0 to current thread_info.
+	 */
+	.if	\el == 0
+	msr	sp_el0, tsk
+	.endif
+
+	/*
 	 * Registers that may be useful after this macro is invoked:
 	 *
 	 * x21 - aborted SP
@@ -128,33 +205,70 @@
 	.if	\el != 0
 	/* Restore the task's original addr_limit. */
 	ldr	x20, [sp, #S_ORIG_ADDR_LIMIT]
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
+#else
 	str	x20, [tsk, #TI_ADDR_LIMIT]
+#endif
+
+	/* No need to restore UAO, it will be restored from SPSR_EL1 */
 	.endif
 
 	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
 	.if	\el == 0
 	ct_user_enter
+	.endif
+
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+	/*
+	 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
+	 * PAN bit checking.
+	 */
+alternative_if ARM64_HAS_PAN
+	b	2f				// skip TTBR0 PAN
+alternative_else_nop_endif
+
+	.if	\el != 0
+	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
+	.endif
+
+	__uaccess_ttbr0_enable x0, x1
+
+	.if	\el == 0
+	/*
+	 * Enable errata workarounds only if returning to user. The only
+	 * workaround currently required for TTBR0_EL1 changes are for the
+	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
+	 * corruption).
+	 */
+	bl	post_ttbr_update_workaround
+	.endif
+1:
+	.if	\el != 0
+	and	x22, x22, #~PSR_PAN_BIT		// ARMv8.0 CPUs do not understand this bit
+	.endif
+2:
+#endif
+
+	.if	\el == 0
 	ldr	x23, [sp, #S_SP]		// load return stack pointer
 	msr	sp_el0, x23
+	tst	x22, #PSR_MODE32_BIT		// native task?
+	b.eq	3f
+
 #ifdef CONFIG_ARM64_ERRATUM_845719
-alternative_if_not ARM64_WORKAROUND_845719
-	nop
-	nop
-#ifdef CONFIG_PID_IN_CONTEXTIDR
-	nop
-#endif
-alternative_else
-	tbz	x22, #4, 1f
+alternative_if ARM64_WORKAROUND_845719
 #ifdef CONFIG_PID_IN_CONTEXTIDR
 	mrs	x29, contextidr_el1
 	msr	contextidr_el1, x29
 #else
 	msr contextidr_el1, xzr
 #endif
-1:
-alternative_endif
+alternative_else_nop_endif
 #endif
+3:
 	.endif
+
 	msr	elr_el1, x21			// set up the return data
 	msr	spsr_el1, x22
 	ldp	x0, x1, [sp, #16 * 0]
@@ -174,12 +288,65 @@
 	ldp	x28, x29, [sp, #16 * 14]
 	ldr	lr, [sp, #S_LR]
 	add	sp, sp, #S_FRAME_SIZE		// restore sp
-	eret					// return to kernel
+
+	.if	\el == 0
+alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+	bne	4f
+	msr	far_el1, x30
+	tramp_alias	x30, tramp_exit_native
+	br	x30
+4:
+	tramp_alias	x30, tramp_exit_compat
+	br	x30
+#endif
+	.else
+	eret
+	.endif
 	.endm
 
-	.macro	get_thread_info, rd
-	mov	\rd, sp
-	and	\rd, \rd, #~(THREAD_SIZE - 1)	// top of stack
+	.macro	irq_stack_entry
+	mov	x19, sp			// preserve the original sp
+
+	/*
+	 * Compare sp with the base of the task stack.
+	 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
+	 * and should switch to the irq stack.
+	 */
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+	ldr	x25, [tsk, TSK_STACK]
+	eor	x25, x25, x19
+	and	x25, x25, #~(THREAD_SIZE - 1)
+	cbnz	x25, 9998f
+#else
+	and	x25, x19, #~(THREAD_SIZE - 1)
+	cmp	x25, tsk
+	b.ne	9998f
+#endif
+
+	adr_this_cpu x25, irq_stack, x26
+	mov	x26, #IRQ_STACK_START_SP
+	add	x26, x25, x26
+
+	/* switch to the irq stack */
+	mov	sp, x26
+
+	/*
+	 * Add a dummy stack frame, this non-standard format is fixed up
+	 * by unwind_frame()
+	 */
+	stp     x29, x19, [sp, #-16]!
+	mov	x29, sp
+
+9998:
+	.endm
+
+	/*
+	 * x19 should be preserved between irq_stack_entry and
+	 * irq_stack_exit.
+	 */
+	.macro	irq_stack_exit
+	mov	sp, x19
 	.endm
 
 /*
@@ -197,10 +364,11 @@
  * Interrupt handling.
  */
 	.macro	irq_handler
-	adrp	x1, handle_arch_irq
-	ldr	x1, [x1, #:lo12:handle_arch_irq]
+	ldr_l	x1, handle_arch_irq
 	mov	x0, sp
+	irq_stack_entry
 	blr	x1
+	irq_stack_exit
 	.endm
 
 	.text
@@ -208,34 +376,35 @@
 /*
  * Exception vectors.
  */
+	.pushsection ".entry.text", "ax"
 
 	.align	11
 ENTRY(vectors)
-	ventry	el1_sync_invalid		// Synchronous EL1t
-	ventry	el1_irq_invalid			// IRQ EL1t
-	ventry	el1_fiq_invalid			// FIQ EL1t
-	ventry	el1_error_invalid		// Error EL1t
-
-	ventry	el1_sync			// Synchronous EL1h
-	ventry	el1_irq				// IRQ EL1h
-	ventry	el1_fiq_invalid			// FIQ EL1h
-	ventry	el1_error_invalid		// Error EL1h
-
-	ventry	el0_sync			// Synchronous 64-bit EL0
-	ventry	el0_irq				// IRQ 64-bit EL0
-	ventry	el0_fiq_invalid			// FIQ 64-bit EL0
-	ventry	el0_error_invalid		// Error 64-bit EL0
+	kernel_ventry	1, sync_invalid			// Synchronous EL1t
+	kernel_ventry	1, irq_invalid			// IRQ EL1t
+	kernel_ventry	1, fiq_invalid			// FIQ EL1t
+	kernel_ventry	1, error_invalid		// Error EL1t
+
+	kernel_ventry	1, sync				// Synchronous EL1h
+	kernel_ventry	1, irq				// IRQ EL1h
+	kernel_ventry	1, fiq_invalid			// FIQ EL1h
+	kernel_ventry	1, error_invalid		// Error EL1h
+
+	kernel_ventry	0, sync				// Synchronous 64-bit EL0
+	kernel_ventry	0, irq				// IRQ 64-bit EL0
+	kernel_ventry	0, fiq_invalid			// FIQ 64-bit EL0
+	kernel_ventry	0, error_invalid		// Error 64-bit EL0
 
 #ifdef CONFIG_COMPAT
-	ventry	el0_sync_compat			// Synchronous 32-bit EL0
-	ventry	el0_irq_compat			// IRQ 32-bit EL0
-	ventry	el0_fiq_invalid_compat		// FIQ 32-bit EL0
-	ventry	el0_error_invalid_compat	// Error 32-bit EL0
-#else
-	ventry	el0_sync_invalid		// Synchronous 32-bit EL0
-	ventry	el0_irq_invalid			// IRQ 32-bit EL0
-	ventry	el0_fiq_invalid			// FIQ 32-bit EL0
-	ventry	el0_error_invalid		// Error 32-bit EL0
+	kernel_ventry	0, sync_compat, 32		// Synchronous 32-bit EL0
+	kernel_ventry	0, irq_compat, 32		// IRQ 32-bit EL0
+	kernel_ventry	0, fiq_invalid_compat, 32	// FIQ 32-bit EL0
+	kernel_ventry	0, error_invalid_compat, 32	// Error 32-bit EL0
+#else
+	kernel_ventry	0, sync_invalid, 32		// Synchronous 32-bit EL0
+	kernel_ventry	0, irq_invalid, 32		// IRQ 32-bit EL0
+	kernel_ventry	0, fiq_invalid, 32		// FIQ 32-bit EL0
+	kernel_ventry	0, error_invalid, 32		// Error 32-bit EL0
 #endif
 END(vectors)
 
@@ -243,7 +412,7 @@
  * Invalid mode handlers
  */
 	.macro	inv_entry, el, reason, regsize = 64
-	kernel_entry el, \regsize
+	kernel_entry \el, \regsize
 	mov	x0, sp
 	mov	x1, #\reason
 	mrs	x2, esr_el1
@@ -297,11 +466,16 @@
  */
 	.align	6
 el1_sync:
+#ifdef CONFIG_QCOM_TLB_EL2_HANDLER
+	smc #0xffff
+#endif
 	kernel_entry 1
 	mrs	x1, esr_el1			// read the syndrome register
 	lsr	x24, x1, #ESR_ELx_EC_SHIFT	// exception class
 	cmp	x24, #ESR_ELx_EC_DABT_CUR	// data abort in EL1
 	b.eq	el1_da
+	cmp	x24, #ESR_ELx_EC_IABT_CUR	// instruction abort in EL1
+	b.eq	el1_ia
 	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
 	b.eq	el1_undef
 	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
@@ -313,6 +487,11 @@
 	cmp	x24, #ESR_ELx_EC_BREAKPT_CUR	// debug exception in EL1
 	b.ge	el1_dbg
 	b	el1_inv
+
+el1_ia:
+	/*
+	 * Fall through to the Data abort case
+	 */
 el1_da:
 	/*
 	 * Data abort handling
@@ -373,13 +552,21 @@
 	bl	trace_hardirqs_off
 #endif
 
+	get_thread_info tsk
 	irq_handler
 
 #ifdef CONFIG_PREEMPT
-	get_thread_info tsk
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+	ldr	w24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
+#else
 	ldr	w24, [tsk, #TI_PREEMPT]		// get preempt count
+#endif
 	cbnz	w24, 1f				// preempt count != 0
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+	ldr	x0, [tsk, #TSK_TI_FLAGS]	// get flags
+#else
 	ldr	x0, [tsk, #TI_FLAGS]		// get flags
+#endif
 	tbz	x0, #TIF_NEED_RESCHED, 1f	// needs rescheduling?
 	bl	el1_preempt
 1:
@@ -394,7 +581,11 @@
 el1_preempt:
 	mov	x24, lr
 1:	bl	preempt_schedule_irq		// irq en/disable is done inside
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+	ldr	x0, [tsk, #TSK_TI_FLAGS]	// get new tasks TI_FLAGS
+#else
 	ldr	x0, [tsk, #TI_FLAGS]		// get new tasks TI_FLAGS
+#endif
 	tbnz	x0, #TIF_NEED_RESCHED, 1b	// needs rescheduling?
 	ret	x24
 #endif
@@ -418,7 +609,7 @@
 	cmp	x24, #ESR_ELx_EC_FP_EXC64	// FP/ASIMD exception
 	b.eq	el0_fpsimd_exc
 	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
-	b.eq	el0_undef
+	b.eq	el0_sys
 	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
 	b.eq	el0_sp_pc
 	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
@@ -442,7 +633,7 @@
 	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
 	b.eq	el0_ia
 	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
-	b.eq	el0_fpsimd_acc
+	b.eq	el0_fpsimd_acc_compat
 	cmp	x24, #ESR_ELx_EC_FP_EXC32	// FP/ASIMD exception
 	b.eq	el0_fpsimd_exc
 	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
@@ -495,13 +686,15 @@
 	 * Instruction abort handling
 	 */
 	mrs	x26, far_el1
-	// enable interrupts before calling the main handler
-	enable_dbg_and_irq
+	enable_dbg
+#ifdef CONFIG_TRACE_IRQFLAGS
+	bl	trace_hardirqs_off
+#endif
 	ct_user_exit
 	mov	x0, x26
-	orr	x1, x25, #1 << 24		// use reserved ISS bit for instruction aborts
+	mov	x1, x25
 	mov	x2, sp
-	bl	do_mem_abort
+	bl	do_el0_ia_bp_hardening
 	b	ret_to_user
 el0_fpsimd_acc:
 	/*
@@ -513,6 +706,17 @@
 	mov	x1, sp
 	bl	do_fpsimd_acc
 	b	ret_to_user
+el0_fpsimd_acc_compat:
+	/*
+	 * Floating Point or Advanced SIMD access
+	 */
+	enable_dbg
+	ct_user_exit
+	mov	x0, x25
+	mov	x1, sp
+	bl	do_fpsimd_acc_compat
+	b	ret_to_user
+
 el0_fpsimd_exc:
 	/*
 	 * Floating Point or Advanced SIMD exception
@@ -546,6 +750,16 @@
 	mov	x0, sp
 	bl	do_undefinstr
 	b	ret_to_user
+el0_sys:
+	/*
+	 * System instructions, for trapped cache maintenance instructions
+	 */
+	enable_dbg_and_irq
+	ct_user_exit
+	mov	x0, x25
+	mov	x1, sp
+	bl	do_sysinstr
+	b	ret_to_user
 el0_dbg:
 	/*
 	 * Debug exception handling
@@ -613,7 +827,40 @@
 	ldp	x27, x28, [x8], #16
 	ldp	x29, x9, [x8], #16
 	ldr	lr, [x8]
+#ifdef CONFIG_ARM64_REG_REBALANCE_ON_CTX_SW
+	orr	x13, x13, x13
+	orr	x14, x14, x14
+	orr	x15, x15, x15
+	orr	x16, x16, x16
+	orr	x17, x17, x17
+	orr	x18, x18, x18
+	orr	x19, x19, x19
+	orr	x20, x20, x20
+	orr	x21, x21, x21
+	mov	v0.16b, v0.16b
+	mov	v1.16b, v1.16b
+	mov	v2.16b, v2.16b
+	mov	v3.16b, v3.16b
+	mov	v4.16b, v4.16b
+	mov	v5.16b, v5.16b
+	mov	v6.16b, v6.16b
+	mov	v7.16b, v7.16b
+	mov	v8.16b, v8.16b
+	mov	v9.16b, v9.16b
+	mov	v10.16b, v10.16b
+	mov	v11.16b, v11.16b
+	mov	v12.16b, v12.16b
+	mov	v13.16b, v13.16b
+	mov	v14.16b, v14.16b
+	mov	v15.16b, v15.16b
+#endif
 	mov	sp, x9
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+	msr	sp_el0, x1
+#else
+	and	x9, x9, #~(THREAD_SIZE - 1)
+	msr	sp_el0, x9
+#endif
 	ret
 ENDPROC(cpu_switch_to)
 
@@ -624,7 +871,11 @@
 ret_fast_syscall:
 	disable_irq				// disable interrupts
 	str	x0, [sp, #S_X0]			// returned x0
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+	ldr	x1, [tsk, #TSK_TI_FLAGS]	// re-check for syscall tracing
+#else
 	ldr	x1, [tsk, #TI_FLAGS]		// re-check for syscall tracing
+#endif
 	and	x2, x1, #_TIF_SYSCALL_WORK
 	cbnz	x2, ret_fast_syscall_trace
 	and	x2, x1, #_TIF_WORK_MASK
@@ -641,14 +892,14 @@
 work_pending:
 	tbnz	x1, #TIF_NEED_RESCHED, work_resched
 	/* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
-	ldr	x2, [sp, #S_PSTATE]
 	mov	x0, sp				// 'regs'
-	tst	x2, #PSR_MODE_MASK		// user mode regs?
-	b.ne	no_work_pending			// returning to kernel
 	enable_irq				// enable interrupts for do_notify_resume()
 	bl	do_notify_resume
 	b	ret_to_user
 work_resched:
+#ifdef CONFIG_TRACE_IRQFLAGS
+	bl	trace_hardirqs_off		// the IRQs are off here, inform the tracing code
+#endif
 	bl	schedule
 
 /*
@@ -656,11 +907,14 @@
  */
 ret_to_user:
 	disable_irq				// disable interrupts
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+	ldr	x1, [tsk, #TSK_TI_FLAGS]
+#else
 	ldr	x1, [tsk, #TI_FLAGS]
+#endif
 	and	x2, x1, #_TIF_WORK_MASK
 	cbnz	x2, work_pending
 	enable_step_tsk x1, x2
-no_work_pending:
 	kernel_exit 0
 ENDPROC(ret_to_user)
 
@@ -689,7 +943,11 @@
 	enable_dbg_and_irq
 	ct_user_exit 1
 
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+	ldr	x16, [tsk, #TSK_TI_FLAGS]	// check for syscall hooks
+#else
 	ldr	x16, [tsk, #TI_FLAGS]		// check for syscall hooks
+#endif
 	tst	x16, #_TIF_SYSCALL_WORK
 	b.ne	__sys_trace
 	cmp     scno, sc_nr                     // check upper syscall limit
@@ -740,6 +998,120 @@
 	bl	do_ni_syscall
 	b	__sys_trace_return
 
+	.popsection				// .entry.text
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+/*
+ * Exception vectors trampoline.
+ */
+	.pushsection ".entry.tramp.text", "ax"
+
+	.macro tramp_map_kernel, tmp
+	mrs	\tmp, ttbr1_el1
+	sub	\tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
+	bic	\tmp, \tmp, #USER_ASID_FLAG
+	msr	ttbr1_el1, \tmp
+#ifdef CONFIG_ARCH_MSM8996
+	/* ASID already in \tmp[63:48] */
+	movk	\tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
+	movk	\tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
+	/* 2MB boundary containing the vectors, so we nobble the walk cache */
+	movk	\tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
+	isb
+	tlbi	vae1, \tmp
+	dsb	nsh
+#endif /* CONFIG_ARCH_MSM8996 */
+	.endm
+
+	.macro tramp_unmap_kernel, tmp
+	mrs	\tmp, ttbr1_el1
+	add	\tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
+	orr	\tmp, \tmp, #USER_ASID_FLAG
+	msr	ttbr1_el1, \tmp
+	/*
+	 * We avoid running the post_ttbr_update_workaround here because the
+	 * user and kernel ASIDs don't have conflicting mappings, so any
+	 * "blessing" as described in:
+	 *
+	 *   http://lkml.kernel.org/r/56BB848A.6060603@caviumnetworks.com
+	 *
+	 * will not hurt correctness. Whilst this may partially defeat the
+	 * point of using split ASIDs in the first place, it avoids
+	 * the hit of invalidating the entire I-cache on every return to
+	 * userspace.
+	 */
+	.endm
+
+	.macro tramp_ventry, regsize = 64
+	.align	7
+1:
+	.if	\regsize == 64
+	msr	tpidrro_el0, x30	// Restored in kernel_ventry
+	.endif
+	bl	2f
+	b	.
+2:
+	tramp_map_kernel	x30
+#ifdef CONFIG_RANDOMIZE_BASE
+	adr	x30, tramp_vectors + PAGE_SIZE
+#ifndef CONFIG_ARCH_MSM8996
+	isb
+#endif
+	ldr	x30, [x30]
+#else
+	ldr	x30, =vectors
+#endif
+	prfm	plil1strm, [x30, #(1b - tramp_vectors)]
+	msr	vbar_el1, x30
+	add	x30, x30, #(1b - tramp_vectors)
+	isb
+	ret
+	.endm
+
+	.macro tramp_exit, regsize = 64
+	adr	x30, tramp_vectors
+	msr	vbar_el1, x30
+	tramp_unmap_kernel	x30
+	.if	\regsize == 64
+	mrs	x30, far_el1
+	.endif
+	eret
+	.endm
+
+	.align	11
+ENTRY(tramp_vectors)
+	.space	0x400
+
+	tramp_ventry
+	tramp_ventry
+	tramp_ventry
+	tramp_ventry
+
+	tramp_ventry	32
+	tramp_ventry	32
+	tramp_ventry	32
+	tramp_ventry	32
+END(tramp_vectors)
+
+ENTRY(tramp_exit_native)
+	tramp_exit
+END(tramp_exit_native)
+
+ENTRY(tramp_exit_compat)
+	tramp_exit	32
+END(tramp_exit_compat)
+
+	.ltorg
+	.popsection				// .entry.tramp.text
+#ifdef CONFIG_RANDOMIZE_BASE
+	.pushsection ".rodata", "a"
+	.align PAGE_SHIFT
+	.globl	__entry_tramp_data_start
+__entry_tramp_data_start:
+	.quad	vectors
+	.popsection				// .rodata
+#endif /* CONFIG_RANDOMIZE_BASE */
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+
 /*
  * Special system call wrappers.
  */
diff -ruw linux-4.4.115/arch/arm64/kernel/fpsimd.c linux-4.4.115-fbx/arch/arm64/kernel/fpsimd.c
--- linux-4.4.115/arch/arm64/kernel/fpsimd.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/fpsimd.c	2019-01-22 16:16:21.551228730 +0100
@@ -20,6 +20,7 @@
 #include <linux/cpu.h>
 #include <linux/cpu_pm.h>
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/signal.h>
@@ -27,6 +28,7 @@
 
 #include <asm/fpsimd.h>
 #include <asm/cputype.h>
+#include <asm/app_api.h>
 
 #define FPEXC_IOF	(1 << 0)
 #define FPEXC_DZF	(1 << 1)
@@ -35,6 +37,8 @@
 #define FPEXC_IXF	(1 << 4)
 #define FPEXC_IDF	(1 << 7)
 
+#define FP_SIMD_BIT	31
+
 /*
  * In order to reduce the number of times the FPSIMD state is needlessly saved
  * and restored, we need to keep track of two things:
@@ -88,14 +92,42 @@
  *   whatever is in the FPSIMD registers is not saved to memory, but discarded.
  */
 static DEFINE_PER_CPU(struct fpsimd_state *, fpsimd_last_state);
+static DEFINE_PER_CPU(int, fpsimd_stg_enable);
+
+static int fpsimd_settings = 0x1; /* default = 0x1 */
+module_param(fpsimd_settings, int, 0644);
+
+void fpsimd_settings_enable(void)
+{
+	set_app_setting_bit(FP_SIMD_BIT);
+}
+
+void fpsimd_settings_disable(void)
+{
+	clear_app_setting_bit(FP_SIMD_BIT);
+}
 
 /*
  * Trapped FP/ASIMD access.
  */
 void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
 {
-	/* TODO: implement lazy context saving/restoring */
-	WARN_ON(1);
+	if (!fpsimd_settings)
+		return;
+
+	fpsimd_disable_trap();
+	fpsimd_settings_disable();
+	this_cpu_write(fpsimd_stg_enable, 0);
+}
+
+void do_fpsimd_acc_compat(unsigned int esr, struct pt_regs *regs)
+{
+	if (!fpsimd_settings)
+		return;
+
+	fpsimd_disable_trap();
+	fpsimd_settings_enable();
+	this_cpu_write(fpsimd_stg_enable, 1);
 }
 
 /*
@@ -135,6 +167,11 @@
 	if (current->mm && !test_thread_flag(TIF_FOREIGN_FPSTATE))
 		fpsimd_save_state(&current->thread.fpsimd_state);
 
+	if (fpsimd_settings && __this_cpu_read(fpsimd_stg_enable)) {
+		fpsimd_settings_disable();
+		this_cpu_write(fpsimd_stg_enable, 0);
+	}
+
 	if (next->mm) {
 		/*
 		 * If we are switching to a task whose most recent userland
@@ -152,6 +189,14 @@
 		else
 			set_ti_thread_flag(task_thread_info(next),
 					   TIF_FOREIGN_FPSTATE);
+
+		if (!fpsimd_settings)
+			return;
+
+		if (test_ti_thread_flag(task_thread_info(next), TIF_32BIT))
+			fpsimd_enable_trap();
+		else
+			fpsimd_disable_trap();
 	}
 }
 
@@ -291,7 +336,7 @@
 	.notifier_call = fpsimd_cpu_pm_notifier,
 };
 
-static void fpsimd_pm_init(void)
+static void __init fpsimd_pm_init(void)
 {
 	cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block);
 }
diff -ruw linux-4.4.115/arch/arm64/kernel/head.S linux-4.4.115-fbx/arch/arm64/kernel/head.S
--- linux-4.4.115/arch/arm64/kernel/head.S	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/head.S	2019-10-29 09:26:23.001196895 +0100
@@ -25,10 +25,12 @@
 #include <linux/irqchip/arm-gic-v3.h>
 
 #include <asm/assembler.h>
+#include <asm/boot.h>
 #include <asm/ptrace.h>
 #include <asm/asm-offsets.h>
 #include <asm/cache.h>
 #include <asm/cputype.h>
+#include <asm/elf.h>
 #include <asm/kernel-pgtable.h>
 #include <asm/memory.h>
 #include <asm/pgtable-hwdef.h>
@@ -48,9 +50,6 @@
 #error TEXT_OFFSET must be less than 2MB
 #endif
 
-#define KERNEL_START	_text
-#define KERNEL_END	_end
-
 /*
  * Kernel startup entry point.
  * ---------------------------
@@ -67,12 +66,11 @@
  * in the entry routines.
  */
 	__HEAD
-
+_head:
 	/*
 	 * DO NOT MODIFY. Image header expected by Linux boot-loaders.
 	 */
 #ifdef CONFIG_EFI
-efi_head:
 	/*
 	 * This add instruction has no meaningful effect except that
 	 * its opcode forms the magic "MZ" signature required by UEFI.
@@ -83,9 +81,9 @@
 	b	stext				// branch to kernel start, magic
 	.long	0				// reserved
 #endif
-	.quad	_kernel_offset_le		// Image load offset from start of RAM, little-endian
-	.quad	_kernel_size_le			// Effective size of kernel image, little-endian
-	.quad	_kernel_flags_le		// Informative flags, little-endian
+	le64sym	_kernel_offset_le		// Image load offset from start of RAM, little-endian
+	le64sym	_kernel_size_le			// Effective size of kernel image, little-endian
+	le64sym	_kernel_flags_le		// Informative flags, little-endian
 	.quad	0				// reserved
 	.quad	0				// reserved
 	.quad	0				// reserved
@@ -94,14 +92,12 @@
 	.byte	0x4d
 	.byte	0x64
 #ifdef CONFIG_EFI
-	.long	pe_header - efi_head		// Offset to the PE header.
+	.long	pe_header - _head		// Offset to the PE header.
 #else
 	.word	0				// reserved
 #endif
 
 #ifdef CONFIG_EFI
-	.globl	__efistub_stext_offset
-	.set	__efistub_stext_offset, stext - efi_head
 	.align 3
 pe_header:
 	.ascii	"PE"
@@ -121,11 +117,11 @@
 	.short	0x20b				// PE32+ format
 	.byte	0x02				// MajorLinkerVersion
 	.byte	0x14				// MinorLinkerVersion
-	.long	_end - stext			// SizeOfCode
+	.long	_end - efi_header_end		// SizeOfCode
 	.long	0				// SizeOfInitializedData
 	.long	0				// SizeOfUninitializedData
-	.long	__efistub_entry - efi_head	// AddressOfEntryPoint
-	.long	__efistub_stext_offset		// BaseOfCode
+	.long	__efistub_entry - _head		// AddressOfEntryPoint
+	.long	efi_header_end - _head		// BaseOfCode
 
 extra_header_fields:
 	.quad	0				// ImageBase
@@ -139,10 +135,10 @@
 	.short	0				// MinorSubsystemVersion
 	.long	0				// Win32VersionValue
 
-	.long	_end - efi_head			// SizeOfImage
+	.long	_end - _head			// SizeOfImage
 
 	// Everything before the kernel image is considered part of the header
-	.long	__efistub_stext_offset		// SizeOfHeaders
+	.long	efi_header_end - _head		// SizeOfHeaders
 	.long	0				// CheckSum
 	.short	0xa				// Subsystem (EFI application)
 	.short	0				// DllCharacteristics
@@ -186,10 +182,10 @@
 	.byte	0
 	.byte	0
 	.byte	0        		// end of 0 padding of section name
-	.long	_end - stext		// VirtualSize
-	.long	__efistub_stext_offset	// VirtualAddress
-	.long	_edata - stext		// SizeOfRawData
-	.long	__efistub_stext_offset	// PointerToRawData
+	.long	_end - efi_header_end	// VirtualSize
+	.long	efi_header_end - _head	// VirtualAddress
+	.long	_edata - efi_header_end	// SizeOfRawData
+	.long	efi_header_end - _head	// PointerToRawData
 
 	.long	0		// PointerToRelocations (0 for executables)
 	.long	0		// PointerToLineNumbers (0 for executables)
@@ -198,19 +194,23 @@
 	.long	0xe0500020	// Characteristics (section flags)
 
 	/*
-	 * EFI will load stext onwards at the 4k section alignment
+	 * EFI will load .text onwards at the 4k section alignment
 	 * described in the PE/COFF header. To ensure that instruction
 	 * sequences using an adrp and a :lo12: immediate will function
-	 * correctly at this alignment, we must ensure that stext is
+	 * correctly at this alignment, we must ensure that .text is
 	 * placed at a 4k boundary in the Image to begin with.
 	 */
 	.align 12
+efi_header_end:
 #endif
 
+	__INIT
+
 ENTRY(stext)
 	bl	preserve_boot_args
 	bl	el2_setup			// Drop to EL1, w20=cpu_boot_mode
 	adrp	x24, __PHYS_OFFSET
+	and	x23, x24, MIN_KIMG_ALIGN - 1	// KASLR offset, defaults to 0
 	bl	set_cpu_boot_mode_flag
 	bl	__create_page_tables		// x25=TTBR0, x26=TTBR1
 	/*
@@ -219,10 +219,10 @@
 	 * On return, the CPU will be ready for the MMU to be turned on and
 	 * the TCR will have been set.
 	 */
-	ldr	x27, =__mmap_switched		// address to jump to after
+	bl	__cpu_setup			// initialise processor
+	adr_l	x27, __primary_switch		// address to jump to after
 						// MMU has been enabled
-	adr_l	lr, __enable_mmu		// return (PIC) address
-	b	__cpu_setup			// initialise processor
+	b	__enable_mmu
 ENDPROC(stext)
 
 /*
@@ -311,21 +311,21 @@
 __create_page_tables:
 	adrp	x25, idmap_pg_dir
 	adrp	x26, swapper_pg_dir
-	mov	x27, lr
+	mov	x28, lr
 
 	/*
 	 * Invalidate the idmap and swapper page tables to avoid potential
 	 * dirty cache lines being evicted.
 	 */
 	mov	x0, x25
-	add	x1, x26, #SWAPPER_DIR_SIZE
+	add	x1, x26, #SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
 	bl	__inval_cache_range
 
 	/*
 	 * Clear the idmap and swapper page tables.
 	 */
 	mov	x0, x25
-	add	x6, x26, #SWAPPER_DIR_SIZE
+	add	x6, x26, #SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
 1:	stp	xzr, xzr, [x0], #16
 	stp	xzr, xzr, [x0], #16
 	stp	xzr, xzr, [x0], #16
@@ -333,7 +333,7 @@
 	cmp	x0, x6
 	b.lo	1b
 
-	ldr	x7, =SWAPPER_MM_MMUFLAGS
+	mov	x7, SWAPPER_MM_MMUFLAGS
 
 	/*
 	 * Create the identity mapping.
@@ -389,10 +389,13 @@
 	 * Map the kernel image (starting with PHYS_OFFSET).
 	 */
 	mov	x0, x26				// swapper_pg_dir
-	mov	x5, #PAGE_OFFSET
+	mov_q	x5, KIMAGE_VADDR + TEXT_OFFSET	// compile time __va(_text)
+	add	x5, x5, x23			// add KASLR displacement
 	create_pgd_entry x0, x5, x3, x6
-	ldr	x6, =KERNEL_END			// __va(KERNEL_END)
-	mov	x3, x24				// phys offset
+	adrp	x6, _end			// runtime __pa(_end)
+	adrp	x3, _text			// runtime __pa(_text)
+	sub	x6, x6, x3			// _end - _text
+	add	x6, x6, x5			// runtime __va(_end)
 	create_block_map x0, x7, x3, x5, x6
 
 	/*
@@ -401,12 +404,11 @@
 	 * tables again to remove any speculatively loaded cache lines.
 	 */
 	mov	x0, x25
-	add	x1, x26, #SWAPPER_DIR_SIZE
+	add	x1, x26, #SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
 	dmb	sy
 	bl	__inval_cache_range
 
-	mov	lr, x27
-	ret
+	ret	x28
 ENDPROC(__create_page_tables)
 	.ltorg
 
@@ -414,30 +416,67 @@
  * The following fragment of code is executed with the MMU enabled.
  */
 	.set	initial_sp, init_thread_union + THREAD_START_SP
-__mmap_switched:
-	adr_l	x6, __bss_start
-	adr_l	x7, __bss_stop
-
-1:	cmp	x6, x7
-	b.hs	2f
-	str	xzr, [x6], #8			// Clear BSS
-	b	1b
-2:
+__primary_switched:
+	mov	x28, lr				// preserve LR
+
+	adr_l	x8, vectors			// load VBAR_EL1 with virtual
+	msr	vbar_el1, x8			// vector table address
+	isb
+
+	// Clear BSS
+	adr_l	x0, __bss_start
+	mov	x1, xzr
+	adr_l	x2, __bss_stop
+	sub	x2, x2, x0
+	bl	__pi_memset
+	dsb	ishst				// Make zero page visible to PTW
+
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+        adrp    x4, init_thread_union
+        add     sp, x4, #THREAD_SIZE
+        adr_l   x5, init_task
+        msr     sp_el0, x5                      // Save thread_info
+#else
 	adr_l	sp, initial_sp, x4
+	mov	x4, sp
+	and	x4, x4, #~(THREAD_SIZE - 1)
+	msr	sp_el0, x4			// Save thread_info
+#endif
+
 	str_l	x21, __fdt_pointer, x5		// Save FDT pointer
-	str_l	x24, memstart_addr, x6		// Save PHYS_OFFSET
+
+	ldr_l	x4, kimage_vaddr		// Save the offset between
+	sub	x4, x4, x24			// the kernel virtual and
+	str_l	x4, kimage_voffset, x5		// physical mappings
+
 	mov	x29, #0
 #ifdef CONFIG_KASAN
 	bl	kasan_early_init
 #endif
+#ifdef CONFIG_RANDOMIZE_BASE
+	tst	x23, ~(MIN_KIMG_ALIGN - 1)	// already running randomized?
+	b.ne	0f
+	mov	x0, x21				// pass FDT address in x0
+	mov	x1, x23				// pass modulo offset in x1
+	bl	kaslr_early_init		// parse FDT for KASLR options
+	cbz	x0, 0f				// KASLR disabled? just proceed
+	orr	x23, x23, x0			// record KASLR offset
+	ret	x28				// we must enable KASLR, return
+						// to __enable_mmu()
+0:
+#endif
 	b	start_kernel
-ENDPROC(__mmap_switched)
+ENDPROC(__primary_switched)
 
 /*
  * end early head section, begin head code that is also used for
  * hotplug and needs to have the same protections as the text region
  */
 	.section ".text","ax"
+
+ENTRY(kimage_vaddr)
+	.quad		_text - TEXT_OFFSET
+
 /*
  * If we're fortunate enough to boot at EL2, ensure that the world is
  * sane before dropping to EL1.
@@ -544,7 +583,7 @@
  * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
  * in x20. See arch/arm64/include/asm/virt.h for more info.
  */
-ENTRY(set_cpu_boot_mode_flag)
+set_cpu_boot_mode_flag:
 	adr_l	x1, __boot_cpu_mode
 	cmp	w20, #BOOT_CPU_MODE_EL2
 	b.ne	1f
@@ -577,7 +616,7 @@
 	bl	el2_setup			// Drop to EL1, w20=cpu_boot_mode
 	bl	set_cpu_boot_mode_flag
 	mrs	x0, mpidr_el1
-	ldr     x1, =MPIDR_HWID_BITMASK
+	mov_q	x1, MPIDR_HWID_BITMASK
 	and	x0, x0, x1
 	adr_l	x3, secondary_holding_pen_release
 pen:	ldr	x4, [x3]
@@ -597,7 +636,7 @@
 	b	secondary_startup
 ENDPROC(secondary_entry)
 
-ENTRY(secondary_startup)
+secondary_startup:
 	/*
 	 * Common entry point for secondary CPUs.
 	 */
@@ -605,14 +644,26 @@
 	adrp	x26, swapper_pg_dir
 	bl	__cpu_setup			// initialise processor
 
-	ldr	x21, =secondary_data
-	ldr	x27, =__secondary_switched	// address to jump to after enabling the MMU
+	adr_l	x27, __secondary_switch		// address to jump to after enabling the MMU
 	b	__enable_mmu
 ENDPROC(secondary_startup)
 
-ENTRY(__secondary_switched)
-	ldr	x0, [x21]			// get secondary_data.stack
+__secondary_switched:
+	adr_l	x5, vectors
+	msr	vbar_el1, x5
+	isb
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+	adr_l	x0, secondary_data
+	ldr	x1, [x0, #CPU_BOOT_STACK]	// get secondary_data.stack
+	mov	sp, x1
+	ldr	x2, [x0, #CPU_BOOT_TASK]
+	msr	sp_el0, x2
+#else
+	ldr_l	x0, secondary_data		// get secondary_data.stack
 	mov	sp, x0
+	and	x0, x0, #~(THREAD_SIZE - 1)
+	msr	sp_el0, x0			// save thread_info
+#endif
 	mov	x29, #0
 	b	secondary_start_kernel
 ENDPROC(__secondary_switched)
@@ -629,13 +680,12 @@
  * If it isn't, park the CPU
  */
 	.section	".idmap.text", "ax"
-__enable_mmu:
+ENTRY(__enable_mmu)
+	mrs	x22, sctlr_el1			// preserve old SCTLR_EL1 value
 	mrs	x1, ID_AA64MMFR0_EL1
 	ubfx	x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
 	cmp	x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
 	b.ne	__no_granule_support
-	ldr	x5, =vectors
-	msr	vbar_el1, x5
 	msr	ttbr0_el1, x25			// load TTBR0
 	msr	ttbr1_el1, x26			// load TTBR1
 	isb
@@ -649,6 +699,28 @@
 	ic	iallu
 	dsb	nsh
 	isb
+#ifdef CONFIG_RANDOMIZE_BASE
+	mov	x19, x0				// preserve new SCTLR_EL1 value
+	blr	x27
+
+	/*
+	 * If we return here, we have a KASLR displacement in x23 which we need
+	 * to take into account by discarding the current kernel mapping and
+	 * creating a new one.
+	 */
+	msr	sctlr_el1, x22			// disable the MMU
+	isb
+	bl	__create_page_tables		// recreate kernel mapping
+
+	tlbi	vmalle1				// Remove any stale TLB entries
+	dsb	nsh
+
+	msr	sctlr_el1, x19			// re-enable the MMU
+	isb
+	ic	iallu				// flush instructions fetched
+	dsb	nsh				// via old mapping
+	isb
+#endif
 	br	x27
 ENDPROC(__enable_mmu)
 
@@ -656,3 +728,38 @@
 	wfe
 	b __no_granule_support
 ENDPROC(__no_granule_support)
+
+__primary_switch:
+#ifdef CONFIG_RELOCATABLE
+	/*
+	 * Iterate over each entry in the relocation table, and apply the
+	 * relocations in place.
+	 */
+	ldr	w9, =__rela_offset		// offset to reloc table
+	ldr	w10, =__rela_size		// size of reloc table
+
+	mov_q	x11, KIMAGE_VADDR		// default virtual offset
+	add	x11, x11, x23			// actual virtual offset
+	add	x9, x9, x11			// __va(.rela)
+	add	x10, x9, x10			// __va(.rela) + sizeof(.rela)
+
+0:	cmp	x9, x10
+	b.hs	1f
+	ldp	x11, x12, [x9], #24
+	ldr	x13, [x9, #-8]
+	cmp	w12, #R_AARCH64_RELATIVE
+	b.ne	0b
+	add	x13, x13, x23			// relocate
+	str	x13, [x11, x23]
+	b	0b
+
+1:
+#endif
+	ldr	x8, =__primary_switched
+	br	x8
+ENDPROC(__primary_switch)
+
+__secondary_switch:
+	ldr	x8, =__secondary_switched
+	br	x8
+ENDPROC(__secondary_switch)
diff -ruw linux-4.4.115/arch/arm64/kernel/hyp-stub.S linux-4.4.115-fbx/arch/arm64/kernel/hyp-stub.S
--- linux-4.4.115/arch/arm64/kernel/hyp-stub.S	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/hyp-stub.S	2019-10-29 09:26:23.005196935 +0100
@@ -22,6 +22,8 @@
 #include <linux/irqchip/arm-gic-v3.h>
 
 #include <asm/assembler.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
 #include <asm/ptrace.h>
 #include <asm/virt.h>
 
@@ -53,15 +55,26 @@
 	.align 11
 
 el1_sync:
-	mrs	x1, esr_el2
-	lsr	x1, x1, #26
-	cmp	x1, #0x16
-	b.ne	2f				// Not an HVC trap
-	cbz	x0, 1f
-	msr	vbar_el2, x0			// Set vbar_el2
-	b	2f
-1:	mrs	x0, vbar_el2			// Return vbar_el2
-2:	eret
+	mrs	x30, esr_el2
+	lsr	x30, x30, #ESR_ELx_EC_SHIFT
+
+	cmp	x30, #ESR_ELx_EC_HVC64
+	b.ne	9f				// Not an HVC trap
+
+	cmp	x0, #HVC_GET_VECTORS
+	b.ne	1f
+	mrs	x0, vbar_el2
+	b	9f
+
+1:	cmp	x0, #HVC_SET_VECTORS
+	b.ne	2f
+	msr	vbar_el2, x1
+	b	9f
+
+	/* Someone called kvm_call_hyp() against the hyp-stub... */
+2:	mov     x0, #ARM_EXCEPTION_HYP_GONE
+
+9:	eret
 ENDPROC(el1_sync)
 
 .macro invalid_vector	label
@@ -101,10 +114,18 @@
  */
 
 ENTRY(__hyp_get_vectors)
-	mov	x0, xzr
-	// fall through
-ENTRY(__hyp_set_vectors)
+	str	lr, [sp, #-16]!
+	mov	x0, #HVC_GET_VECTORS
 	hvc	#0
+	ldr	lr, [sp], #16
 	ret
 ENDPROC(__hyp_get_vectors)
+
+ENTRY(__hyp_set_vectors)
+	str	lr, [sp, #-16]!
+	mov	x1, x0
+	mov	x0, #HVC_SET_VECTORS
+	hvc	#0
+	ldr	lr, [sp], #16
+	ret
 ENDPROC(__hyp_set_vectors)
diff -ruw linux-4.4.115/arch/arm64/kernel/image.h linux-4.4.115-fbx/arch/arm64/kernel/image.h
--- linux-4.4.115/arch/arm64/kernel/image.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/image.h	2019-10-29 09:26:23.005196935 +0100
@@ -26,21 +26,27 @@
  * There aren't any ELF relocations we can use to endian-swap values known only
  * at link time (e.g. the subtraction of two symbol addresses), so we must get
  * the linker to endian-swap certain values before emitting them.
+ *
+ * Note that, in order for this to work when building the ELF64 PIE executable
+ * (for KASLR), these values should not be referenced via R_AARCH64_ABS64
+ * relocations, since these are fixed up at runtime rather than at build time
+ * when PIE is in effect. So we need to split them up in 32-bit high and low
+ * words.
  */
 #ifdef CONFIG_CPU_BIG_ENDIAN
-#define DATA_LE64(data)					\
-	((((data) & 0x00000000000000ff) << 56) |	\
-	 (((data) & 0x000000000000ff00) << 40) |	\
-	 (((data) & 0x0000000000ff0000) << 24) |	\
-	 (((data) & 0x00000000ff000000) << 8)  |	\
-	 (((data) & 0x000000ff00000000) >> 8)  |	\
-	 (((data) & 0x0000ff0000000000) >> 24) |	\
-	 (((data) & 0x00ff000000000000) >> 40) |	\
-	 (((data) & 0xff00000000000000) >> 56))
+#define DATA_LE32(data)				\
+	((((data) & 0x000000ff) << 24) |	\
+	 (((data) & 0x0000ff00) << 8)  |	\
+	 (((data) & 0x00ff0000) >> 8)  |	\
+	 (((data) & 0xff000000) >> 24))
 #else
-#define DATA_LE64(data) ((data) & 0xffffffffffffffff)
+#define DATA_LE32(data) ((data) & 0xffffffff)
 #endif
 
+#define DEFINE_IMAGE_LE64(sym, data)				\
+	sym##_lo32 = DATA_LE32((data) & 0xffffffff);		\
+	sym##_hi32 = DATA_LE32((data) >> 32)
+
 #ifdef CONFIG_CPU_BIG_ENDIAN
 #define __HEAD_FLAG_BE	1
 #else
@@ -49,8 +55,11 @@
 
 #define __HEAD_FLAG_PAGE_SIZE ((PAGE_SHIFT - 10) / 2)
 
+#define __HEAD_FLAG_PHYS_BASE	1
+
 #define __HEAD_FLAGS	((__HEAD_FLAG_BE << 0) |	\
-			 (__HEAD_FLAG_PAGE_SIZE << 1))
+				 (__HEAD_FLAG_PAGE_SIZE << 1) |	\
+				 (__HEAD_FLAG_PHYS_BASE << 3))
 
 /*
  * These will output as part of the Image header, which should be little-endian
@@ -58,12 +67,24 @@
  * endian swapped in head.S, all are done here for consistency.
  */
 #define HEAD_SYMBOLS						\
-	_kernel_size_le		= DATA_LE64(_end - _text);	\
-	_kernel_offset_le	= DATA_LE64(TEXT_OFFSET);	\
-	_kernel_flags_le	= DATA_LE64(__HEAD_FLAGS);
+	DEFINE_IMAGE_LE64(_kernel_size_le, _end - _text);	\
+	DEFINE_IMAGE_LE64(_kernel_offset_le, TEXT_OFFSET);	\
+	DEFINE_IMAGE_LE64(_kernel_flags_le, __HEAD_FLAGS);
 
 #ifdef CONFIG_EFI
 
+__efistub_stext_offset = stext - _text;
+
+/*
+ * Prevent the symbol aliases below from being emitted into the kallsyms
+ * table, by forcing them to be absolute symbols (which are conveniently
+ * ignored by scripts/kallsyms) rather than section relative symbols.
+ * The distinction is only relevant for partial linking, and only for symbols
+ * that are defined within a section declaration (which is not the case for
+ * the definitions below) so the resulting values will be identical.
+ */
+#define KALLSYMS_HIDE(sym)	ABSOLUTE(sym)
+
 /*
  * The EFI stub has its own symbol namespace prefixed by __efistub_, to
  * isolate it from the kernel proper. The following symbols are legally
@@ -73,25 +94,25 @@
  * linked at. The routines below are all implemented in assembler in a
  * position independent manner
  */
-__efistub_memcmp		= __pi_memcmp;
-__efistub_memchr		= __pi_memchr;
-__efistub_memcpy		= __pi_memcpy;
-__efistub_memmove		= __pi_memmove;
-__efistub_memset		= __pi_memset;
-__efistub_strlen		= __pi_strlen;
-__efistub_strcmp		= __pi_strcmp;
-__efistub_strncmp		= __pi_strncmp;
-__efistub___flush_dcache_area	= __pi___flush_dcache_area;
+__efistub_memcmp		= KALLSYMS_HIDE(__pi_memcmp);
+__efistub_memchr		= KALLSYMS_HIDE(__pi_memchr);
+__efistub_memcpy		= KALLSYMS_HIDE(__pi_memcpy);
+__efistub_memmove		= KALLSYMS_HIDE(__pi_memmove);
+__efistub_memset		= KALLSYMS_HIDE(__pi_memset);
+__efistub_strlen		= KALLSYMS_HIDE(__pi_strlen);
+__efistub_strcmp		= KALLSYMS_HIDE(__pi_strcmp);
+__efistub_strncmp		= KALLSYMS_HIDE(__pi_strncmp);
+__efistub___flush_dcache_area	= KALLSYMS_HIDE(__pi___flush_dcache_area);
 
 #ifdef CONFIG_KASAN
-__efistub___memcpy		= __pi_memcpy;
-__efistub___memmove		= __pi_memmove;
-__efistub___memset		= __pi_memset;
+__efistub___memcpy		= KALLSYMS_HIDE(__pi_memcpy);
+__efistub___memmove		= KALLSYMS_HIDE(__pi_memmove);
+__efistub___memset		= KALLSYMS_HIDE(__pi_memset);
 #endif
 
-__efistub__text			= _text;
-__efistub__end			= _end;
-__efistub__edata		= _edata;
+__efistub__text			= KALLSYMS_HIDE(_text);
+__efistub__end			= KALLSYMS_HIDE(_end);
+__efistub__edata		= KALLSYMS_HIDE(_edata);
 
 #endif
 
diff -ruw linux-4.4.115/arch/arm64/kernel/insn.c linux-4.4.115-fbx/arch/arm64/kernel/insn.c
--- linux-4.4.115/arch/arm64/kernel/insn.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/insn.c	2019-01-22 16:16:21.551228730 +0100
@@ -2,7 +2,7 @@
  * Copyright (C) 2013 Huawei Ltd.
  * Author: Jiang Liu <liuj97@gmail.com>
  *
- * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
+ * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -30,6 +30,7 @@
 #include <asm/cacheflush.h>
 #include <asm/debug-monitors.h>
 #include <asm/fixmap.h>
+#include <asm/opcodes.h>
 #include <asm/insn.h>
 
 #define AARCH64_INSN_SF_BIT	BIT(31)
@@ -95,8 +96,9 @@
 
 	if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
 		page = vmalloc_to_page(addr);
-	else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA))
-		page = virt_to_page(addr);
+	else if (!module && (IS_ENABLED(CONFIG_DEBUG_RODATA)
+			       	|| IS_ENABLED(CONFIG_KERNEL_TEXT_RDONLY)))
+		page = phys_to_page(__pa_symbol(addr));
 	else
 		return addr;
 
@@ -162,6 +164,32 @@
 		aarch64_insn_is_nop(insn);
 }
 
+bool __kprobes aarch64_insn_uses_literal(u32 insn)
+{
+	/* ldr/ldrsw (literal), prfm */
+
+	return aarch64_insn_is_ldr_lit(insn) ||
+		aarch64_insn_is_ldrsw_lit(insn) ||
+		aarch64_insn_is_adr_adrp(insn) ||
+		aarch64_insn_is_prfm_lit(insn);
+}
+
+bool __kprobes aarch64_insn_is_branch(u32 insn)
+{
+	/* b, bl, cb*, tb*, b.cond, br, blr */
+
+	return aarch64_insn_is_b(insn) ||
+		aarch64_insn_is_bl(insn) ||
+		aarch64_insn_is_cbz(insn) ||
+		aarch64_insn_is_cbnz(insn) ||
+		aarch64_insn_is_tbz(insn) ||
+		aarch64_insn_is_tbnz(insn) ||
+		aarch64_insn_is_ret(insn) ||
+		aarch64_insn_is_br(insn) ||
+		aarch64_insn_is_blr(insn) ||
+		aarch64_insn_is_bcond(insn);
+}
+
 /*
  * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
  * Section B2.6.5 "Concurrent modification and execution of instructions":
@@ -363,6 +391,9 @@
 	u32 immlo, immhi, mask;
 	int shift;
 
+	if (insn == AARCH64_BREAK_FAULT)
+		return AARCH64_BREAK_FAULT;
+
 	switch (type) {
 	case AARCH64_INSN_IMM_ADR:
 		shift = 0;
@@ -377,7 +408,7 @@
 		if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
 			pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
 			       type);
-			return 0;
+			return AARCH64_BREAK_FAULT;
 		}
 	}
 
@@ -394,9 +425,12 @@
 {
 	int shift;
 
+	if (insn == AARCH64_BREAK_FAULT)
+		return AARCH64_BREAK_FAULT;
+
 	if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
 		pr_err("%s: unknown register encoding %d\n", __func__, reg);
-		return 0;
+		return AARCH64_BREAK_FAULT;
 	}
 
 	switch (type) {
@@ -417,7 +451,7 @@
 	default:
 		pr_err("%s: unknown register type encoding %d\n", __func__,
 		       type);
-		return 0;
+		return AARCH64_BREAK_FAULT;
 	}
 
 	insn &= ~(GENMASK(4, 0) << shift);
@@ -446,7 +480,7 @@
 		break;
 	default:
 		pr_err("%s: unknown size encoding %d\n", __func__, type);
-		return 0;
+		return AARCH64_BREAK_FAULT;
 	}
 
 	insn &= ~GENMASK(31, 30);
@@ -460,14 +494,17 @@
 {
 	long offset;
 
-	/*
-	 * PC: A 64-bit Program Counter holding the address of the current
-	 * instruction. A64 instructions must be word-aligned.
-	 */
-	BUG_ON((pc & 0x3) || (addr & 0x3));
+	if ((pc & 0x3) || (addr & 0x3)) {
+		pr_err("%s: A64 instructions must be word aligned\n", __func__);
+		return range;
+	}
 
 	offset = ((long)addr - (long)pc);
-	BUG_ON(offset < -range || offset >= range);
+
+	if (offset < -range || offset >= range) {
+		pr_err("%s: offset out of range\n", __func__);
+		return range;
+	}
 
 	return offset;
 }
@@ -484,6 +521,8 @@
 	 * texts are within +/-128M.
 	 */
 	offset = branch_imm_common(pc, addr, SZ_128M);
+	if (offset >= SZ_128M)
+		return AARCH64_BREAK_FAULT;
 
 	switch (type) {
 	case AARCH64_INSN_BRANCH_LINK:
@@ -493,7 +532,7 @@
 		insn = aarch64_insn_get_b_value();
 		break;
 	default:
-		BUG_ON(1);
+		pr_err("%s: unknown branch encoding %d\n", __func__, type);
 		return AARCH64_BREAK_FAULT;
 	}
 
@@ -510,6 +549,8 @@
 	long offset;
 
 	offset = branch_imm_common(pc, addr, SZ_1M);
+	if (offset >= SZ_1M)
+		return AARCH64_BREAK_FAULT;
 
 	switch (type) {
 	case AARCH64_INSN_BRANCH_COMP_ZERO:
@@ -519,7 +560,7 @@
 		insn = aarch64_insn_get_cbnz_value();
 		break;
 	default:
-		BUG_ON(1);
+		pr_err("%s: unknown branch encoding %d\n", __func__, type);
 		return AARCH64_BREAK_FAULT;
 	}
 
@@ -530,7 +571,7 @@
 		insn |= AARCH64_INSN_SF_BIT;
 		break;
 	default:
-		BUG_ON(1);
+		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
 		return AARCH64_BREAK_FAULT;
 	}
 
@@ -550,7 +591,10 @@
 
 	insn = aarch64_insn_get_bcond_value();
 
-	BUG_ON(cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL);
+	if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
+		pr_err("%s: unknown condition encoding %d\n", __func__, cond);
+		return AARCH64_BREAK_FAULT;
+	}
 	insn |= cond;
 
 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
@@ -583,7 +627,7 @@
 		insn = aarch64_insn_get_ret_value();
 		break;
 	default:
-		BUG_ON(1);
+		pr_err("%s: unknown branch encoding %d\n", __func__, type);
 		return AARCH64_BREAK_FAULT;
 	}
 
@@ -606,7 +650,7 @@
 		insn = aarch64_insn_get_str_reg_value();
 		break;
 	default:
-		BUG_ON(1);
+		pr_err("%s: unknown load/store encoding %d\n", __func__, type);
 		return AARCH64_BREAK_FAULT;
 	}
 
@@ -645,26 +689,30 @@
 		insn = aarch64_insn_get_stp_post_value();
 		break;
 	default:
-		BUG_ON(1);
+		pr_err("%s: unknown load/store encoding %d\n", __func__, type);
 		return AARCH64_BREAK_FAULT;
 	}
 
 	switch (variant) {
 	case AARCH64_INSN_VARIANT_32BIT:
-		/* offset must be multiples of 4 in the range [-256, 252] */
-		BUG_ON(offset & 0x3);
-		BUG_ON(offset < -256 || offset > 252);
+		if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
+			pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
+			       __func__, offset);
+			return AARCH64_BREAK_FAULT;
+		}
 		shift = 2;
 		break;
 	case AARCH64_INSN_VARIANT_64BIT:
-		/* offset must be multiples of 8 in the range [-512, 504] */
-		BUG_ON(offset & 0x7);
-		BUG_ON(offset < -512 || offset > 504);
+		if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
+			pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
+			       __func__, offset);
+			return AARCH64_BREAK_FAULT;
+		}
 		shift = 3;
 		insn |= AARCH64_INSN_SF_BIT;
 		break;
 	default:
-		BUG_ON(1);
+		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
 		return AARCH64_BREAK_FAULT;
 	}
 
@@ -702,7 +750,7 @@
 		insn = aarch64_insn_get_subs_imm_value();
 		break;
 	default:
-		BUG_ON(1);
+		pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
 		return AARCH64_BREAK_FAULT;
 	}
 
@@ -713,11 +761,14 @@
 		insn |= AARCH64_INSN_SF_BIT;
 		break;
 	default:
-		BUG_ON(1);
+		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
 		return AARCH64_BREAK_FAULT;
 	}
 
-	BUG_ON(imm & ~(SZ_4K - 1));
+	if (imm & ~(SZ_4K - 1)) {
+		pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
+		return AARCH64_BREAK_FAULT;
+	}
 
 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
 
@@ -746,7 +797,7 @@
 		insn = aarch64_insn_get_sbfm_value();
 		break;
 	default:
-		BUG_ON(1);
+		pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
 		return AARCH64_BREAK_FAULT;
 	}
 
@@ -759,12 +810,18 @@
 		mask = GENMASK(5, 0);
 		break;
 	default:
-		BUG_ON(1);
+		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
 		return AARCH64_BREAK_FAULT;
 	}
 
-	BUG_ON(immr & ~mask);
-	BUG_ON(imms & ~mask);
+	if (immr & ~mask) {
+		pr_err("%s: invalid immr encoding %d\n", __func__, immr);
+		return AARCH64_BREAK_FAULT;
+	}
+	if (imms & ~mask) {
+		pr_err("%s: invalid imms encoding %d\n", __func__, imms);
+		return AARCH64_BREAK_FAULT;
+	}
 
 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
 
@@ -793,23 +850,33 @@
 		insn = aarch64_insn_get_movn_value();
 		break;
 	default:
-		BUG_ON(1);
+		pr_err("%s: unknown movewide encoding %d\n", __func__, type);
 		return AARCH64_BREAK_FAULT;
 	}
 
-	BUG_ON(imm & ~(SZ_64K - 1));
+	if (imm & ~(SZ_64K - 1)) {
+		pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
+		return AARCH64_BREAK_FAULT;
+	}
 
 	switch (variant) {
 	case AARCH64_INSN_VARIANT_32BIT:
-		BUG_ON(shift != 0 && shift != 16);
+		if (shift != 0 && shift != 16) {
+			pr_err("%s: invalid shift encoding %d\n", __func__,
+			       shift);
+			return AARCH64_BREAK_FAULT;
+		}
 		break;
 	case AARCH64_INSN_VARIANT_64BIT:
 		insn |= AARCH64_INSN_SF_BIT;
-		BUG_ON(shift != 0 && shift != 16 && shift != 32 &&
-		       shift != 48);
+		if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
+			pr_err("%s: invalid shift encoding %d\n", __func__,
+			       shift);
+			return AARCH64_BREAK_FAULT;
+		}
 		break;
 	default:
-		BUG_ON(1);
+		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
 		return AARCH64_BREAK_FAULT;
 	}
 
@@ -843,20 +910,28 @@
 		insn = aarch64_insn_get_subs_value();
 		break;
 	default:
-		BUG_ON(1);
+		pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
 		return AARCH64_BREAK_FAULT;
 	}
 
 	switch (variant) {
 	case AARCH64_INSN_VARIANT_32BIT:
-		BUG_ON(shift & ~(SZ_32 - 1));
+		if (shift & ~(SZ_32 - 1)) {
+			pr_err("%s: invalid shift encoding %d\n", __func__,
+			       shift);
+			return AARCH64_BREAK_FAULT;
+		}
 		break;
 	case AARCH64_INSN_VARIANT_64BIT:
 		insn |= AARCH64_INSN_SF_BIT;
-		BUG_ON(shift & ~(SZ_64 - 1));
+		if (shift & ~(SZ_64 - 1)) {
+			pr_err("%s: invalid shift encoding %d\n", __func__,
+			       shift);
+			return AARCH64_BREAK_FAULT;
+		}
 		break;
 	default:
-		BUG_ON(1);
+		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
 		return AARCH64_BREAK_FAULT;
 	}
 
@@ -885,11 +960,15 @@
 		insn = aarch64_insn_get_rev32_value();
 		break;
 	case AARCH64_INSN_DATA1_REVERSE_64:
-		BUG_ON(variant != AARCH64_INSN_VARIANT_64BIT);
+		if (variant != AARCH64_INSN_VARIANT_64BIT) {
+			pr_err("%s: invalid variant for reverse64 %d\n",
+			       __func__, variant);
+			return AARCH64_BREAK_FAULT;
+		}
 		insn = aarch64_insn_get_rev64_value();
 		break;
 	default:
-		BUG_ON(1);
+		pr_err("%s: unknown data1 encoding %d\n", __func__, type);
 		return AARCH64_BREAK_FAULT;
 	}
 
@@ -900,7 +979,7 @@
 		insn |= AARCH64_INSN_SF_BIT;
 		break;
 	default:
-		BUG_ON(1);
+		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
 		return AARCH64_BREAK_FAULT;
 	}
 
@@ -937,7 +1016,7 @@
 		insn = aarch64_insn_get_rorv_value();
 		break;
 	default:
-		BUG_ON(1);
+		pr_err("%s: unknown data2 encoding %d\n", __func__, type);
 		return AARCH64_BREAK_FAULT;
 	}
 
@@ -948,7 +1027,7 @@
 		insn |= AARCH64_INSN_SF_BIT;
 		break;
 	default:
-		BUG_ON(1);
+		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
 		return AARCH64_BREAK_FAULT;
 	}
 
@@ -976,7 +1055,7 @@
 		insn = aarch64_insn_get_msub_value();
 		break;
 	default:
-		BUG_ON(1);
+		pr_err("%s: unknown data3 encoding %d\n", __func__, type);
 		return AARCH64_BREAK_FAULT;
 	}
 
@@ -987,7 +1066,7 @@
 		insn |= AARCH64_INSN_SF_BIT;
 		break;
 	default:
-		BUG_ON(1);
+		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
 		return AARCH64_BREAK_FAULT;
 	}
 
@@ -1037,20 +1116,28 @@
 		insn = aarch64_insn_get_bics_value();
 		break;
 	default:
-		BUG_ON(1);
+		pr_err("%s: unknown logical encoding %d\n", __func__, type);
 		return AARCH64_BREAK_FAULT;
 	}
 
 	switch (variant) {
 	case AARCH64_INSN_VARIANT_32BIT:
-		BUG_ON(shift & ~(SZ_32 - 1));
+		if (shift & ~(SZ_32 - 1)) {
+			pr_err("%s: invalid shift encoding %d\n", __func__,
+			       shift);
+			return AARCH64_BREAK_FAULT;
+		}
 		break;
 	case AARCH64_INSN_VARIANT_64BIT:
 		insn |= AARCH64_INSN_SF_BIT;
-		BUG_ON(shift & ~(SZ_64 - 1));
+		if (shift & ~(SZ_64 - 1)) {
+			pr_err("%s: invalid shift encoding %d\n", __func__,
+			       shift);
+			return AARCH64_BREAK_FAULT;
+		}
 		break;
 	default:
-		BUG_ON(1);
+		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
 		return AARCH64_BREAK_FAULT;
 	}
 
@@ -1116,6 +1203,14 @@
 	BUG();
 }
 
+/*
+ * Extract the Op/CR data from a msr/mrs instruction.
+ */
+u32 aarch64_insn_extract_system_reg(u32 insn)
+{
+	return (insn & 0x1FFFE0) >> 5;
+}
+
 bool aarch32_insn_is_wide(u32 insn)
 {
 	return insn >= 0xe800;
@@ -1141,3 +1236,101 @@
 {
 	return insn & CRM_MASK;
 }
+
+static bool __kprobes __check_eq(unsigned long pstate)
+{
+	return (pstate & PSR_Z_BIT) != 0;
+}
+
+static bool __kprobes __check_ne(unsigned long pstate)
+{
+	return (pstate & PSR_Z_BIT) == 0;
+}
+
+static bool __kprobes __check_cs(unsigned long pstate)
+{
+	return (pstate & PSR_C_BIT) != 0;
+}
+
+static bool __kprobes __check_cc(unsigned long pstate)
+{
+	return (pstate & PSR_C_BIT) == 0;
+}
+
+static bool __kprobes __check_mi(unsigned long pstate)
+{
+	return (pstate & PSR_N_BIT) != 0;
+}
+
+static bool __kprobes __check_pl(unsigned long pstate)
+{
+	return (pstate & PSR_N_BIT) == 0;
+}
+
+static bool __kprobes __check_vs(unsigned long pstate)
+{
+	return (pstate & PSR_V_BIT) != 0;
+}
+
+static bool __kprobes __check_vc(unsigned long pstate)
+{
+	return (pstate & PSR_V_BIT) == 0;
+}
+
+static bool __kprobes __check_hi(unsigned long pstate)
+{
+	pstate &= ~(pstate >> 1);	/* PSR_C_BIT &= ~PSR_Z_BIT */
+	return (pstate & PSR_C_BIT) != 0;
+}
+
+static bool __kprobes __check_ls(unsigned long pstate)
+{
+	pstate &= ~(pstate >> 1);	/* PSR_C_BIT &= ~PSR_Z_BIT */
+	return (pstate & PSR_C_BIT) == 0;
+}
+
+static bool __kprobes __check_ge(unsigned long pstate)
+{
+	pstate ^= (pstate << 3);	/* PSR_N_BIT ^= PSR_V_BIT */
+	return (pstate & PSR_N_BIT) == 0;
+}
+
+static bool __kprobes __check_lt(unsigned long pstate)
+{
+	pstate ^= (pstate << 3);	/* PSR_N_BIT ^= PSR_V_BIT */
+	return (pstate & PSR_N_BIT) != 0;
+}
+
+static bool __kprobes __check_gt(unsigned long pstate)
+{
+	/*PSR_N_BIT ^= PSR_V_BIT */
+	unsigned long temp = pstate ^ (pstate << 3);
+
+	temp |= (pstate << 1);	/*PSR_N_BIT |= PSR_Z_BIT */
+	return (temp & PSR_N_BIT) == 0;
+}
+
+static bool __kprobes __check_le(unsigned long pstate)
+{
+	/*PSR_N_BIT ^= PSR_V_BIT */
+	unsigned long temp = pstate ^ (pstate << 3);
+
+	temp |= (pstate << 1);	/*PSR_N_BIT |= PSR_Z_BIT */
+	return (temp & PSR_N_BIT) != 0;
+}
+
+static bool __kprobes __check_al(unsigned long pstate)
+{
+	return true;
+}
+
+/*
+ * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
+ * it behaves identically to 0b1110 ("al").
+ */
+pstate_check_t * const aarch32_opcode_cond_checks[16] = {
+	__check_eq, __check_ne, __check_cs, __check_cc,
+	__check_mi, __check_pl, __check_vs, __check_vc,
+	__check_hi, __check_ls, __check_ge, __check_lt,
+	__check_gt, __check_le, __check_al, __check_al
+};
diff -ruw linux-4.4.115/arch/arm64/kernel/io.c linux-4.4.115-fbx/arch/arm64/kernel/io.c
--- linux-4.4.115/arch/arm64/kernel/io.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/io.c	2019-01-22 16:16:21.551228730 +0100
@@ -19,29 +19,29 @@
 #include <linux/export.h>
 #include <linux/types.h>
 #include <linux/io.h>
+#include <linux/msm_rtb.h>
 
 /*
  * Copy data from IO memory space to "real" memory space.
  */
 void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
 {
-	while (count && (!IS_ALIGNED((unsigned long)from, 8) ||
-			 !IS_ALIGNED((unsigned long)to, 8))) {
-		*(u8 *)to = __raw_readb(from);
+	while (count && !IS_ALIGNED((unsigned long)from, 8)) {
+		*(u8 *)to = __raw_readb_no_log(from);
 		from++;
 		to++;
 		count--;
 	}
 
 	while (count >= 8) {
-		*(u64 *)to = __raw_readq(from);
+		*(u64 *)to = __raw_readq_no_log(from);
 		from += 8;
 		to += 8;
 		count -= 8;
 	}
 
 	while (count) {
-		*(u8 *)to = __raw_readb(from);
+		*(u8 *)to = __raw_readb_no_log(from);
 		from++;
 		to++;
 		count--;
@@ -54,23 +54,22 @@
  */
 void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
 {
-	while (count && (!IS_ALIGNED((unsigned long)to, 8) ||
-			 !IS_ALIGNED((unsigned long)from, 8))) {
-		__raw_writeb(*(volatile u8 *)from, to);
+	while (count && !IS_ALIGNED((unsigned long)to, 8)) {
+		__raw_writeb_no_log(*(u8 *)from, to);
 		from++;
 		to++;
 		count--;
 	}
 
 	while (count >= 8) {
-		__raw_writeq(*(volatile u64 *)from, to);
+		__raw_writeq_no_log(*(u64 *)from, to);
 		from += 8;
 		to += 8;
 		count -= 8;
 	}
 
 	while (count) {
-		__raw_writeb(*(volatile u8 *)from, to);
+		__raw_writeb_no_log(*(u8 *)from, to);
 		from++;
 		to++;
 		count--;
@@ -90,19 +89,19 @@
 	qc |= qc << 32;
 
 	while (count && !IS_ALIGNED((unsigned long)dst, 8)) {
-		__raw_writeb(c, dst);
+		__raw_writeb_no_log(c, dst);
 		dst++;
 		count--;
 	}
 
 	while (count >= 8) {
-		__raw_writeq(qc, dst);
+		__raw_writeq_no_log(qc, dst);
 		dst += 8;
 		count -= 8;
 	}
 
 	while (count) {
-		__raw_writeb(c, dst);
+		__raw_writeb_no_log(c, dst);
 		dst++;
 		count--;
 	}
diff -ruw linux-4.4.115/arch/arm64/kernel/irq.c linux-4.4.115-fbx/arch/arm64/kernel/irq.c
--- linux-4.4.115/arch/arm64/kernel/irq.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/irq.c	2019-01-22 16:16:21.551228730 +0100
@@ -30,6 +30,9 @@
 
 unsigned long irq_err_count;
 
+/* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */
+DEFINE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack) __aligned(16);
+
 int arch_show_interrupts(struct seq_file *p, int prec)
 {
 	show_ipi_list(p, prec);
diff -ruw linux-4.4.115/arch/arm64/kernel/Makefile linux-4.4.115-fbx/arch/arm64/kernel/Makefile
--- linux-4.4.115/arch/arm64/kernel/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/Makefile	2019-01-22 16:16:21.547228694 +0100
@@ -14,10 +14,10 @@
 arm64-obj-y		:= debug-monitors.o entry.o irq.o fpsimd.o		\
 			   entry-fpsimd.o process.o ptrace.o setup.o signal.o	\
 			   sys.o stacktrace.o time.o traps.o io.o vdso.o	\
-			   hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o	\
+			   hyp-stub.o psci.o cpu_ops.o insn.o	\
 			   return_address.o cpuinfo.o cpu_errata.o		\
 			   cpufeature.o alternative.o cacheinfo.o		\
-			   smp.o smp_spin_table.o topology.o
+			   smp.o smp_spin_table.o topology.o smccc-call.o
 
 extra-$(CONFIG_EFI)			:= efi-entry.o
 
@@ -26,12 +26,13 @@
 	$(call if_changed,objcopy)
 
 arm64-obj-$(CONFIG_COMPAT)		+= sys32.o kuser32.o signal32.o 	\
-					   sys_compat.o entry32.o		\
-					   ../../arm/kernel/opcodes.o
+					   sys_compat.o entry32.o
 arm64-obj-$(CONFIG_FUNCTION_TRACER)	+= ftrace.o entry-ftrace.o
 arm64-obj-$(CONFIG_MODULES)		+= arm64ksyms.o module.o
+arm64-obj-$(CONFIG_ARM64_MODULE_PLTS)	+= module-plts.o
 arm64-obj-$(CONFIG_PERF_EVENTS)		+= perf_regs.o perf_callchain.o
-arm64-obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event.o
+arm64-obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_debug.o	perf_trace_counters.o	\
+					   perf_trace_user.o
 arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)	+= hw_breakpoint.o
 arm64-obj-$(CONFIG_CPU_PM)		+= sleep.o suspend.o
 arm64-obj-$(CONFIG_CPU_IDLE)		+= cpuidle.o
@@ -41,12 +42,18 @@
 arm64-obj-$(CONFIG_PCI)			+= pci.o
 arm64-obj-$(CONFIG_ARMV8_DEPRECATED)	+= armv8_deprecated.o
 arm64-obj-$(CONFIG_ACPI)		+= acpi.o
+arm64-obj-$(CONFIG_RANDOMIZE_BASE)	+= kaslr.o
+arm64-obj-$(CONFIG_MSM_APP_API)		+= app_api.o
+arm64-obj-$(CONFIG_MSM_APP_SETTINGS)	+= app_setting.o
+arm64-obj-$(CONFIG_HIBERNATION)		+= hibernate.o hibernate-asm.o
+arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL)	+= acpi_parking_protocol.o
+arm64-obj-$(CONFIG_PARAVIRT)		+= paravirt.o
 
-obj-y					+= $(arm64-obj-y) vdso/
+ifeq ($(CONFIG_KVM),y)
+arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR)	+= bpi.o
+endif
+
+obj-y					+= $(arm64-obj-y) vdso/ probes/
 obj-m					+= $(arm64-obj-m)
 head-y					:= head.o
 extra-y					+= $(head-y) vmlinux.lds
-
-# vDSO - this must be built first to generate the symbol offsets
-$(call objectify,$(arm64-obj-y)): $(obj)/vdso/vdso-offsets.h
-$(obj)/vdso/vdso-offsets.h: $(obj)/vdso
diff -ruw linux-4.4.115/arch/arm64/kernel/module.c linux-4.4.115-fbx/arch/arm64/kernel/module.c
--- linux-4.4.115/arch/arm64/kernel/module.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/module.c	2019-10-29 09:26:23.005196935 +0100
@@ -30,17 +30,30 @@
 #include <asm/insn.h>
 #include <asm/sections.h>
 
-#define	AARCH64_INSN_IMM_MOVNZ		AARCH64_INSN_IMM_MAX
-#define	AARCH64_INSN_IMM_MOVK		AARCH64_INSN_IMM_16
-
 void *module_alloc(unsigned long size)
 {
 	void *p;
 
-	p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END,
+	p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
+				module_alloc_base + MODULES_VSIZE,
 				GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
 				NUMA_NO_NODE, __builtin_return_address(0));
 
+	if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
+	    !IS_ENABLED(CONFIG_KASAN))
+		/*
+		 * KASAN can only deal with module allocations being served
+		 * from the reserved module region, since the remainder of
+		 * the vmalloc region is already backed by zero shadow pages,
+		 * and punching holes into it is non-trivial. Since the module
+		 * region is not randomized when KASAN is enabled, it is even
+		 * less likely that the module region gets exhausted, so we
+		 * can simply omit this fallback in that case.
+		 */
+		p = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START,
+				VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
+				NUMA_NO_NODE, __builtin_return_address(0));
+
 	if (p && (kasan_module_alloc(p, size) < 0)) {
 		vfree(p);
 		return NULL;
@@ -75,15 +88,18 @@
 
 static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
 {
-	u64 imm_mask = (1 << len) - 1;
 	s64 sval = do_reloc(op, place, val);
 
 	switch (len) {
 	case 16:
 		*(s16 *)place = sval;
+		if (sval < S16_MIN || sval > U16_MAX)
+			return -ERANGE;
 		break;
 	case 32:
 		*(s32 *)place = sval;
+		if (sval < S32_MIN || sval > U32_MAX)
+			return -ERANGE;
 		break;
 	case 64:
 		*(s64 *)place = sval;
@@ -92,34 +108,23 @@
 		pr_err("Invalid length (%d) for data relocation\n", len);
 		return 0;
 	}
-
-	/*
-	 * Extract the upper value bits (including the sign bit) and
-	 * shift them to bit 0.
-	 */
-	sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
-
-	/*
-	 * Overflow has occurred if the value is not representable in
-	 * len bits (i.e the bottom len bits are not sign-extended and
-	 * the top bits are not all zero).
-	 */
-	if ((u64)(sval + 1) > 2)
-		return -ERANGE;
-
 	return 0;
 }
 
+enum aarch64_insn_movw_imm_type {
+	AARCH64_INSN_IMM_MOVNZ,
+	AARCH64_INSN_IMM_MOVKZ,
+};
+
 static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
-			   int lsb, enum aarch64_insn_imm_type imm_type)
+			   int lsb, enum aarch64_insn_movw_imm_type imm_type)
 {
-	u64 imm, limit = 0;
+	u64 imm;
 	s64 sval;
 	u32 insn = le32_to_cpu(*(u32 *)place);
 
 	sval = do_reloc(op, place, val);
-	sval >>= lsb;
-	imm = sval & 0xffff;
+	imm = sval >> lsb;
 
 	if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
 		/*
@@ -128,7 +133,7 @@
 		 * immediate is less than zero.
 		 */
 		insn &= ~(3 << 29);
-		if ((s64)imm >= 0) {
+		if (sval >= 0) {
 			/* >=0: Set the instruction to MOVZ (opcode 10b). */
 			insn |= 2 << 29;
 		} else {
@@ -140,29 +145,13 @@
 			 */
 			imm = ~imm;
 		}
-		imm_type = AARCH64_INSN_IMM_MOVK;
 	}
 
 	/* Update the instruction with the new encoding. */
-	insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
+	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
 	*(u32 *)place = cpu_to_le32(insn);
 
-	/* Shift out the immediate field. */
-	sval >>= 16;
-
-	/*
-	 * For unsigned immediates, the overflow check is straightforward.
-	 * For signed immediates, the sign bit is actually the bit past the
-	 * most significant bit of the field.
-	 * The AARCH64_INSN_IMM_16 immediate type is unsigned.
-	 */
-	if (imm_type != AARCH64_INSN_IMM_16) {
-		sval++;
-		limit++;
-	}
-
-	/* Check the upper bits depending on the sign of the immediate. */
-	if ((u64)sval > limit)
+	if (imm > U16_MAX)
 		return -ERANGE;
 
 	return 0;
@@ -267,25 +256,25 @@
 			overflow_check = false;
 		case R_AARCH64_MOVW_UABS_G0:
 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
-					      AARCH64_INSN_IMM_16);
+					      AARCH64_INSN_IMM_MOVKZ);
 			break;
 		case R_AARCH64_MOVW_UABS_G1_NC:
 			overflow_check = false;
 		case R_AARCH64_MOVW_UABS_G1:
 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
-					      AARCH64_INSN_IMM_16);
+					      AARCH64_INSN_IMM_MOVKZ);
 			break;
 		case R_AARCH64_MOVW_UABS_G2_NC:
 			overflow_check = false;
 		case R_AARCH64_MOVW_UABS_G2:
 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
-					      AARCH64_INSN_IMM_16);
+					      AARCH64_INSN_IMM_MOVKZ);
 			break;
 		case R_AARCH64_MOVW_UABS_G3:
 			/* We're using the top bits so we can't overflow. */
 			overflow_check = false;
 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
-					      AARCH64_INSN_IMM_16);
+					      AARCH64_INSN_IMM_MOVKZ);
 			break;
 		case R_AARCH64_MOVW_SABS_G0:
 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
@@ -302,7 +291,7 @@
 		case R_AARCH64_MOVW_PREL_G0_NC:
 			overflow_check = false;
 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
-					      AARCH64_INSN_IMM_MOVK);
+					      AARCH64_INSN_IMM_MOVKZ);
 			break;
 		case R_AARCH64_MOVW_PREL_G0:
 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
@@ -311,7 +300,7 @@
 		case R_AARCH64_MOVW_PREL_G1_NC:
 			overflow_check = false;
 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
-					      AARCH64_INSN_IMM_MOVK);
+					      AARCH64_INSN_IMM_MOVKZ);
 			break;
 		case R_AARCH64_MOVW_PREL_G1:
 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
@@ -320,7 +309,7 @@
 		case R_AARCH64_MOVW_PREL_G2_NC:
 			overflow_check = false;
 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
-					      AARCH64_INSN_IMM_MOVK);
+					      AARCH64_INSN_IMM_MOVKZ);
 			break;
 		case R_AARCH64_MOVW_PREL_G2:
 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
@@ -388,6 +377,13 @@
 		case R_AARCH64_CALL26:
 			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
 					     AARCH64_INSN_IMM_26);
+
+			if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
+			    ovf == -ERANGE) {
+				val = module_emit_plt_entry(me, &rel[i], sym);
+				ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
+						     26, AARCH64_INSN_IMM_26);
+			}
 			break;
 
 		default:
diff -ruw linux-4.4.115/arch/arm64/kernel/perf_callchain.c linux-4.4.115-fbx/arch/arm64/kernel/perf_callchain.c
--- linux-4.4.115/arch/arm64/kernel/perf_callchain.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/perf_callchain.c	2019-01-22 16:16:21.551228730 +0100
@@ -164,8 +164,11 @@
 	frame.fp = regs->regs[29];
 	frame.sp = regs->sp;
 	frame.pc = regs->pc;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	frame.graph = current->curr_ret_stack;
+#endif
 
-	walk_stackframe(&frame, callchain_trace, entry);
+	walk_stackframe(current, &frame, callchain_trace, entry);
 }
 
 unsigned long perf_instruction_pointer(struct pt_regs *regs)
diff -ruw linux-4.4.115/arch/arm64/kernel/process.c linux-4.4.115-fbx/arch/arm64/kernel/process.c
--- linux-4.4.115/arch/arm64/kernel/process.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/process.c	2019-10-29 09:26:23.005196935 +0100
@@ -45,9 +45,14 @@
 #include <linux/personality.h>
 #include <linux/notifier.h>
 #include <trace/events/power.h>
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+#include <linux/percpu.h>
+#endif
 
+#include <asm/alternative.h>
 #include <asm/compat.h>
 #include <asm/cacheflush.h>
+#include <asm/exec.h>
 #include <asm/fpsimd.h>
 #include <asm/mmu_context.h>
 #include <asm/processor.h>
@@ -82,6 +87,16 @@
 	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
 }
 
+void arch_cpu_idle_enter(void)
+{
+	idle_notifier_call_chain(IDLE_START);
+}
+
+void arch_cpu_idle_exit(void)
+{
+	idle_notifier_call_chain(IDLE_END);
+}
+
 #ifdef CONFIG_HOTPLUG_CPU
 void arch_cpu_idle_dead(void)
 {
@@ -164,6 +179,64 @@
 	while (1);
 }
 
+/*
+ * dump a block of kernel memory from around the given address
+ */
+static void show_data(unsigned long addr, int nbytes, const char *name)
+{
+	int	i, j;
+	int	nlines;
+	u32	*p;
+
+	/*
+	 * don't attempt to dump non-kernel addresses or
+	 * values that are probably just small negative numbers
+	 */
+	if (addr < KIMAGE_VADDR || addr > -256UL)
+		return;
+
+	printk("\n%s: %#lx:\n", name, addr);
+
+	/*
+	 * round address down to a 32 bit boundary
+	 * and always dump a multiple of 32 bytes
+	 */
+	p = (u32 *)(addr & ~(sizeof(u32) - 1));
+	nbytes += (addr & (sizeof(u32) - 1));
+	nlines = (nbytes + 31) / 32;
+
+
+	for (i = 0; i < nlines; i++) {
+		/*
+		 * just display low 16 bits of address to keep
+		 * each line of the dump < 80 characters
+		 */
+		printk("%04lx ", (unsigned long)p & 0xffff);
+		for (j = 0; j < 8; j++) {
+			u32	data;
+			if (probe_kernel_address(p, data)) {
+				printk(" ********");
+			} else {
+				pr_cont(" %08x", data);
+			}
+			++p;
+		}
+		pr_cont("\n");
+	}
+}
+
+static void show_extra_register_data(struct pt_regs *regs, int nbytes)
+{
+	mm_segment_t fs;
+
+	fs = get_fs();
+	set_fs(KERNEL_DS);
+	show_data(regs->pc - nbytes, nbytes * 2, "PC");
+	show_data(regs->regs[30] - nbytes, nbytes * 2, "LR");
+	show_data(regs->sp - nbytes, nbytes * 2, "SP");
+	set_fs(fs);
+}
+
 void __show_regs(struct pt_regs *regs)
 {
 	int i, top_reg;
@@ -190,6 +263,8 @@
 		if (i % 2 == 0)
 			printk("\n");
 	}
+	if (!user_mode(regs))
+		show_extra_register_data(regs, 64);
 	printk("\n");
 }
 
@@ -289,6 +364,9 @@
 	} else {
 		memset(childregs, 0, sizeof(struct pt_regs));
 		childregs->pstate = PSR_MODE_EL1h;
+		if (IS_ENABLED(CONFIG_ARM64_UAO) &&
+		    cpus_have_cap(ARM64_HAS_UAO))
+			childregs->pstate |= PSR_UAO_BIT;
 		p->thread.cpu_context.x19 = stack_start;
 		p->thread.cpu_context.x20 = stk_sz;
 	}
@@ -302,20 +380,45 @@
 
 static void tls_thread_switch(struct task_struct *next)
 {
-	unsigned long tpidr, tpidrro;
+	unsigned long tpidr;
 
 	asm("mrs %0, tpidr_el0" : "=r" (tpidr));
 	*task_user_tls(current) = tpidr;
 
-	tpidr = *task_user_tls(next);
-	tpidrro = is_compat_thread(task_thread_info(next)) ?
-		  next->thread.tp_value : 0;
-
-	asm(
-	"	msr	tpidr_el0, %0\n"
-	"	msr	tpidrro_el0, %1"
-	: : "r" (tpidr), "r" (tpidrro));
+	if (is_compat_thread(task_thread_info(next)))
+		write_sysreg(next->thread.tp_value, tpidrro_el0);
+	else if (!arm64_kernel_unmapped_at_el0())
+		write_sysreg(0, tpidrro_el0);
+
+	write_sysreg(*task_user_tls(next), tpidr_el0);
+}
+
+/* Restore the UAO state depending on next's addr_limit */
+void uao_thread_switch(struct task_struct *next)
+{
+	if (IS_ENABLED(CONFIG_ARM64_UAO)) {
+		if (task_thread_info(next)->addr_limit == KERNEL_DS)
+			asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
+		else
+			asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO));
+	}
+}
+
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+/*
+ * We store our current task in sp_el0, which is clobbered by userspace. Keep a
+ * shadow copy so that we can restore this upon entry from userspace.
+ *
+ * This is *only* for exception entry from EL0, and is not valid until we
+ * __switch_to() a user task.
+ */
+DEFINE_PER_CPU(struct task_struct *, __entry_task);
+
+static void entry_task_switch(struct task_struct *next)
+{
+	__this_cpu_write(__entry_task, next);
 }
+#endif
 
 /*
  * Thread switching.
@@ -329,6 +432,10 @@
 	tls_thread_switch(next);
 	hw_breakpoint_thread_switch(next);
 	contextidr_thread_switch(next);
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+	entry_task_switch(next);
+#endif
+	uao_thread_switch(next);
 
 	/*
 	 * Complete any pending TLB or cache maintenance on this CPU in case
@@ -345,24 +452,35 @@
 unsigned long get_wchan(struct task_struct *p)
 {
 	struct stackframe frame;
-	unsigned long stack_page;
+	unsigned long stack_page, ret = 0;
 	int count = 0;
 	if (!p || p == current || p->state == TASK_RUNNING)
 		return 0;
 
+	stack_page = (unsigned long)try_get_task_stack(p);
+	if (!stack_page)
+		return 0;
+
 	frame.fp = thread_saved_fp(p);
 	frame.sp = thread_saved_sp(p);
 	frame.pc = thread_saved_pc(p);
-	stack_page = (unsigned long)task_stack_page(p);
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	frame.graph = p->curr_ret_stack;
+#endif
 	do {
 		if (frame.sp < stack_page ||
 		    frame.sp >= stack_page + THREAD_SIZE ||
-		    unwind_frame(&frame))
-			return 0;
-		if (!in_sched_functions(frame.pc))
-			return frame.pc;
+		    unwind_frame(p, &frame))
+			goto out;
+		if (!in_sched_functions(frame.pc)) {
+			ret = frame.pc;
+			goto out;
+		}
 	} while (count ++ < 16);
-	return 0;
+
+out:
+	put_task_stack(p);
+	return ret;
 }
 
 unsigned long arch_align_stack(unsigned long sp)
diff -ruw linux-4.4.115/arch/arm64/kernel/psci.c linux-4.4.115-fbx/arch/arm64/kernel/psci.c
--- linux-4.4.115/arch/arm64/kernel/psci.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/psci.c	2019-01-22 16:16:21.551228730 +0100
@@ -19,8 +19,8 @@
 #include <linux/of.h>
 #include <linux/smp.h>
 #include <linux/delay.h>
+#include <linux/mm.h>
 #include <linux/psci.h>
-#include <linux/slab.h>
 
 #include <uapi/linux/psci.h>
 
@@ -28,76 +28,10 @@
 #include <asm/cpu_ops.h>
 #include <asm/errno.h>
 #include <asm/smp_plat.h>
-#include <asm/suspend.h>
-
-static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
-
-static int __maybe_unused cpu_psci_cpu_init_idle(unsigned int cpu)
-{
-	int i, ret, count = 0;
-	u32 *psci_states;
-	struct device_node *state_node, *cpu_node;
-
-	cpu_node = of_get_cpu_node(cpu, NULL);
-	if (!cpu_node)
-		return -ENODEV;
-
-	/*
-	 * If the PSCI cpu_suspend function hook has not been initialized
-	 * idle states must not be enabled, so bail out
-	 */
-	if (!psci_ops.cpu_suspend)
-		return -EOPNOTSUPP;
-
-	/* Count idle states */
-	while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
-					      count))) {
-		count++;
-		of_node_put(state_node);
-	}
-
-	if (!count)
-		return -ENODEV;
-
-	psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
-	if (!psci_states)
-		return -ENOMEM;
-
-	for (i = 0; i < count; i++) {
-		u32 state;
-
-		state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
-
-		ret = of_property_read_u32(state_node,
-					   "arm,psci-suspend-param",
-					   &state);
-		if (ret) {
-			pr_warn(" * %s missing arm,psci-suspend-param property\n",
-				state_node->full_name);
-			of_node_put(state_node);
-			goto free_mem;
-		}
-
-		of_node_put(state_node);
-		pr_debug("psci-power-state %#x index %d\n", state, i);
-		if (!psci_power_state_is_valid(state)) {
-			pr_warn("Invalid PSCI power state %#x\n", state);
-			ret = -EINVAL;
-			goto free_mem;
-		}
-		psci_states[i] = state;
-	}
-	/* Idle states parsed correctly, initialize per-cpu pointer */
-	per_cpu(psci_power_state, cpu) = psci_states;
-	return 0;
-
-free_mem:
-	kfree(psci_states);
-	return ret;
-}
 
 static int __init cpu_psci_cpu_init(unsigned int cpu)
 {
+	pr_info("Initializing psci_cpu_init\n");
 	return 0;
 }
 
@@ -113,7 +47,8 @@
 
 static int cpu_psci_cpu_boot(unsigned int cpu)
 {
-	int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_entry));
+	int err = psci_ops.cpu_on(cpu_logical_map(cpu),
+				  __pa_symbol(secondary_entry));
 	if (err)
 		pr_err("failed to boot CPU%d (%d)\n", cpu, err);
 
@@ -164,12 +99,12 @@
 	for (i = 0; i < 10; i++) {
 		err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
 		if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) {
-			pr_info("CPU%d killed.\n", cpu);
+			pr_debug("CPU%d killed.\n", cpu);
 			return 0;
 		}
 
 		msleep(10);
-		pr_info("Retrying again to check for CPU kill\n");
+		pr_debug("Retrying again to check for CPU kill\n");
 	}
 
 	pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n",
@@ -178,38 +113,11 @@
 }
 #endif
 
-static int psci_suspend_finisher(unsigned long index)
-{
-	u32 *state = __this_cpu_read(psci_power_state);
-
-	return psci_ops.cpu_suspend(state[index - 1],
-				    virt_to_phys(cpu_resume));
-}
-
-static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index)
-{
-	int ret;
-	u32 *state = __this_cpu_read(psci_power_state);
-	/*
-	 * idle state index 0 corresponds to wfi, should never be called
-	 * from the cpu_suspend operations
-	 */
-	if (WARN_ON_ONCE(!index))
-		return -EINVAL;
-
-	if (!psci_power_state_loses_context(state[index - 1]))
-		ret = psci_ops.cpu_suspend(state[index - 1], 0);
-	else
-		ret = cpu_suspend(index, psci_suspend_finisher);
-
-	return ret;
-}
-
 const struct cpu_operations cpu_psci_ops = {
 	.name		= "psci",
 #ifdef CONFIG_CPU_IDLE
-	.cpu_init_idle	= cpu_psci_cpu_init_idle,
-	.cpu_suspend	= cpu_psci_cpu_suspend,
+	.cpu_init_idle	= psci_cpu_init_idle,
+	.cpu_suspend	= psci_cpu_suspend_enter,
 #endif
 	.cpu_init	= cpu_psci_cpu_init,
 	.cpu_prepare	= cpu_psci_cpu_prepare,
diff -ruw linux-4.4.115/arch/arm64/kernel/ptrace.c linux-4.4.115-fbx/arch/arm64/kernel/ptrace.c
--- linux-4.4.115/arch/arm64/kernel/ptrace.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/ptrace.c	2019-10-29 09:26:23.005196935 +0100
@@ -49,6 +49,106 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/syscalls.h>
 
+struct pt_regs_offset {
+	const char *name;
+	int offset;
+};
+
+#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
+#define REG_OFFSET_END {.name = NULL, .offset = 0}
+#define GPR_OFFSET_NAME(r) \
+	{.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
+
+static const struct pt_regs_offset regoffset_table[] = {
+	GPR_OFFSET_NAME(0),
+	GPR_OFFSET_NAME(1),
+	GPR_OFFSET_NAME(2),
+	GPR_OFFSET_NAME(3),
+	GPR_OFFSET_NAME(4),
+	GPR_OFFSET_NAME(5),
+	GPR_OFFSET_NAME(6),
+	GPR_OFFSET_NAME(7),
+	GPR_OFFSET_NAME(8),
+	GPR_OFFSET_NAME(9),
+	GPR_OFFSET_NAME(10),
+	GPR_OFFSET_NAME(11),
+	GPR_OFFSET_NAME(12),
+	GPR_OFFSET_NAME(13),
+	GPR_OFFSET_NAME(14),
+	GPR_OFFSET_NAME(15),
+	GPR_OFFSET_NAME(16),
+	GPR_OFFSET_NAME(17),
+	GPR_OFFSET_NAME(18),
+	GPR_OFFSET_NAME(19),
+	GPR_OFFSET_NAME(20),
+	GPR_OFFSET_NAME(21),
+	GPR_OFFSET_NAME(22),
+	GPR_OFFSET_NAME(23),
+	GPR_OFFSET_NAME(24),
+	GPR_OFFSET_NAME(25),
+	GPR_OFFSET_NAME(26),
+	GPR_OFFSET_NAME(27),
+	GPR_OFFSET_NAME(28),
+	GPR_OFFSET_NAME(29),
+	GPR_OFFSET_NAME(30),
+	{.name = "lr", .offset = offsetof(struct pt_regs, regs[30])},
+	REG_OFFSET_NAME(sp),
+	REG_OFFSET_NAME(pc),
+	REG_OFFSET_NAME(pstate),
+	REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name:	the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+	const struct pt_regs_offset *roff;
+
+	for (roff = regoffset_table; roff->name != NULL; roff++)
+		if (!strcmp(roff->name, name))
+			return roff->offset;
+	return -EINVAL;
+}
+
+/**
+ * regs_within_kernel_stack() - check the address in the stack
+ * @regs:      pt_regs which contains kernel stack pointer.
+ * @addr:      address which is checked.
+ *
+ * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
+ * If @addr is within the kernel stack, it returns true. If not, returns false.
+ */
+static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
+{
+	return ((addr & ~(THREAD_SIZE - 1))  ==
+		(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
+}
+
+/**
+ * regs_get_kernel_stack_nth() - get Nth entry of the stack
+ * @regs:	pt_regs which contains kernel stack pointer.
+ * @n:		stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * this returns 0.
+ */
+unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
+{
+	unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
+
+	addr += n;
+	if (regs_within_kernel_stack(regs, (unsigned long)addr))
+		return *addr;
+	else
+		return 0;
+}
+
 /*
  * TODO: does not yet catch signals sent when the child dies.
  * in exit.c or in signal.c.
@@ -227,13 +327,13 @@
 				     struct arch_hw_breakpoint_ctrl ctrl,
 				     struct perf_event_attr *attr)
 {
-	int err, len, type, disabled = !ctrl.enabled;
+	int err, len, type, offset, disabled = !ctrl.enabled;
 
 	attr->disabled = disabled;
 	if (disabled)
 		return 0;
 
-	err = arch_bp_generic_fields(ctrl, &len, &type);
+	err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
 	if (err)
 		return err;
 
@@ -252,6 +352,7 @@
 
 	attr->bp_len	= len;
 	attr->bp_type	= type;
+	attr->bp_addr	+= offset;
 
 	return 0;
 }
@@ -304,7 +405,7 @@
 	if (IS_ERR(bp))
 		return PTR_ERR(bp);
 
-	*addr = bp ? bp->attr.bp_addr : 0;
+	*addr = bp ? counter_arch_bp(bp)->address : 0;
 	return 0;
 }
 
diff -ruw linux-4.4.115/arch/arm64/kernel/return_address.c linux-4.4.115-fbx/arch/arm64/kernel/return_address.c
--- linux-4.4.115/arch/arm64/kernel/return_address.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/return_address.c	2019-01-22 16:16:21.555228766 +0100
@@ -12,6 +12,7 @@
 #include <linux/export.h>
 #include <linux/ftrace.h>
 
+#include <asm/stack_pointer.h>
 #include <asm/stacktrace.h>
 
 struct return_address_data {
@@ -43,8 +44,11 @@
 	frame.fp = (unsigned long)__builtin_frame_address(0);
 	frame.sp = current_stack_pointer;
 	frame.pc = (unsigned long)return_address; /* dummy */
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	frame.graph = current->curr_ret_stack;
+#endif
 
-	walk_stackframe(&frame, save_return_addr, &data);
+	walk_stackframe(current, &frame, save_return_addr, &data);
 
 	if (!data.level)
 		return data.addr;
diff -ruw linux-4.4.115/arch/arm64/kernel/setup.c linux-4.4.115-fbx/arch/arm64/kernel/setup.c
--- linux-4.4.115/arch/arm64/kernel/setup.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/setup.c	2019-01-22 16:16:21.555228766 +0100
@@ -44,6 +44,8 @@
 #include <linux/of_platform.h>
 #include <linux/efi.h>
 #include <linux/psci.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
 
 #include <asm/acpi.h>
 #include <asm/fixmap.h>
@@ -62,9 +64,17 @@
 #include <asm/memblock.h>
 #include <asm/efi.h>
 #include <asm/xen/hypervisor.h>
+#include <asm/mmu_context.h>
+
+unsigned int boot_reason;
+EXPORT_SYMBOL(boot_reason);
+
+unsigned int cold_boot;
+EXPORT_SYMBOL(cold_boot);
 
 phys_addr_t __fdt_pointer __initdata;
 
+const char *machine_name;
 /*
  * Standard memory resources
  */
@@ -174,7 +184,6 @@
 	 */
 	if (mpidr_hash_size() > 4 * num_possible_cpus())
 		pr_warn("Large number of MPIDR hash buckets detected\n");
-	__flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
 }
 
 static void __init setup_machine_fdt(phys_addr_t dt_phys)
@@ -192,7 +201,11 @@
 			cpu_relax();
 	}
 
-	dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name());
+	machine_name = of_flat_dt_get_machine_name();
+	if (machine_name) {
+		dump_stack_set_arch_desc("%s (DT)", machine_name);
+		pr_info("Machine: %s\n", machine_name);
+	}
 }
 
 static void __init request_standard_resources(void)
@@ -200,10 +213,10 @@
 	struct memblock_region *region;
 	struct resource *res;
 
-	kernel_code.start   = virt_to_phys(_text);
-	kernel_code.end     = virt_to_phys(_etext - 1);
-	kernel_data.start   = virt_to_phys(_sdata);
-	kernel_data.end     = virt_to_phys(_end - 1);
+	kernel_code.start   = __pa_symbol(_text);
+	kernel_code.end     = __pa_symbol(__init_begin - 1);
+	kernel_data.start   = __pa_symbol(_sdata);
+	kernel_data.end     = __pa_symbol(_end - 1);
 
 	for_each_memblock(memory, region) {
 		res = alloc_bootmem_low(sizeof(*res));
@@ -288,6 +301,8 @@
 
 u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
 
+void __init __weak init_random_pool(void) { }
+
 void __init setup_arch(char **cmdline_p)
 {
 	pr_info("Boot CPU: AArch64 Processor [%08x]\n", read_cpuid_id());
@@ -313,6 +328,12 @@
 	 */
 	local_async_enable();
 
+	/*
+	 * TTBR0 is only used for the identity mapping at this stage. Make it
+	 * point to zero page to avoid speculatively fetching new entries.
+	 */
+	cpu_uninstall_idmap();
+
 	efi_init();
 	arm64_memblock_init();
 
@@ -340,6 +361,19 @@
 	smp_init_cpus();
 	smp_build_mpidr_hash();
 
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+	/*
+	 * Make sure thread_info.ttbr0 always generates translation
+	 * faults in case uaccess_enable() is inadvertently called by the init
+	 * thread.
+	 */
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+	init_task.thread_info.ttbr0 = __pa_symbol(empty_zero_page);
+#else
+	init_thread_info.ttbr0 = __pa_symbol(empty_zero_page);
+#endif
+#endif
+
 #ifdef CONFIG_VT
 #if defined(CONFIG_VGA_CONSOLE)
 	conswitchp = &vga_con;
@@ -347,6 +381,7 @@
 	conswitchp = &dummy_con;
 #endif
 #endif
+	init_random_pool();
 	if (boot_args[1] || boot_args[2] || boot_args[3]) {
 		pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
 			"\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n"
@@ -380,4 +415,39 @@
 
 	return 0;
 }
-subsys_initcall(topology_init);
+postcore_initcall(topology_init);
+
+void arch_setup_pdev_archdata(struct platform_device *pdev)
+{
+	pdev->archdata.dma_mask = DMA_BIT_MASK(32);
+	pdev->dev.dma_mask = &pdev->archdata.dma_mask;
+}
+
+/*
+ * Dump out kernel offset information on panic.
+ */
+static int dump_kernel_offset(struct notifier_block *self, unsigned long v,
+			      void *p)
+{
+	const unsigned long offset = kaslr_offset();
+
+	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && offset > 0) {
+		pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
+			 offset, KIMAGE_VADDR);
+	} else {
+		pr_emerg("Kernel Offset: disabled\n");
+	}
+	return 0;
+}
+
+static struct notifier_block kernel_offset_notifier = {
+	.notifier_call = dump_kernel_offset
+};
+
+static int __init register_kernel_offset_dumper(void)
+{
+	atomic_notifier_chain_register(&panic_notifier_list,
+				       &kernel_offset_notifier);
+	return 0;
+}
+__initcall(register_kernel_offset_dumper);
diff -ruw linux-4.4.115/arch/arm64/kernel/sleep.S linux-4.4.115-fbx/arch/arm64/kernel/sleep.S
--- linux-4.4.115/arch/arm64/kernel/sleep.S	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/sleep.S	2019-10-29 09:26:23.005196935 +0100
@@ -49,39 +49,32 @@
 	orr	\dst, \dst, \mask		// dst|=(aff3>>rs3)
 	.endm
 /*
- * Save CPU state for a suspend and execute the suspend finisher.
- * On success it will return 0 through cpu_resume - ie through a CPU
- * soft/hard reboot from the reset vector.
- * On failure it returns the suspend finisher return value or force
- * -EOPNOTSUPP if the finisher erroneously returns 0 (the suspend finisher
- * is not allowed to return, if it does this must be considered failure).
- * It saves callee registers, and allocates space on the kernel stack
- * to save the CPU specific registers + some other data for resume.
+ * Save CPU state in the provided sleep_stack_data area, and publish its
+ * location for cpu_resume()'s use in sleep_save_stash.
  *
- *  x0 = suspend finisher argument
- *  x1 = suspend finisher function pointer
+ * cpu_resume() will restore this saved state, and return. Because the
+ * link-register is saved and restored, it will appear to return from this
+ * function. So that the caller can tell the suspend/resume paths apart,
+ * __cpu_suspend_enter() will always return a non-zero value, whereas the
+ * path through cpu_resume() will return 0.
+ *
+ *  x0 = struct sleep_stack_data area
  */
 ENTRY(__cpu_suspend_enter)
-	stp	x29, lr, [sp, #-96]!
-	stp	x19, x20, [sp,#16]
-	stp	x21, x22, [sp,#32]
-	stp	x23, x24, [sp,#48]
-	stp	x25, x26, [sp,#64]
-	stp	x27, x28, [sp,#80]
-	/*
-	 * Stash suspend finisher and its argument in x20 and x19
-	 */
-	mov	x19, x0
-	mov	x20, x1
+	stp	x29, lr, [x0, #SLEEP_STACK_DATA_CALLEE_REGS]
+	stp	x19, x20, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+16]
+	stp	x21, x22, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+32]
+	stp	x23, x24, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+48]
+	stp	x25, x26, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+64]
+	stp	x27, x28, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+80]
+
+	/* save the sp in cpu_suspend_ctx */
 	mov	x2, sp
-	sub	sp, sp, #CPU_SUSPEND_SZ	// allocate cpu_suspend_ctx
-	mov	x0, sp
-	/*
-	 * x0 now points to struct cpu_suspend_ctx allocated on the stack
-	 */
-	str	x2, [x0, #CPU_CTX_SP]
-	ldr	x1, =sleep_save_sp
-	ldr	x1, [x1, #SLEEP_SAVE_SP_VIRT]
+	str	x2, [x0, #SLEEP_STACK_DATA_SYSTEM_REGS + CPU_CTX_SP]
+
+	/* find the mpidr_hash */
+	ldr	x1, =sleep_save_stash
+	ldr	x1, [x1]
 	mrs	x7, mpidr_el1
 	ldr	x9, =mpidr_hash
 	ldr	x10, [x9, #MPIDR_HASH_MASK]
@@ -93,70 +86,28 @@
 	ldp	w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
 	compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
 	add	x1, x1, x8, lsl #3
-	bl	__cpu_suspend_save
-	/*
-	 * Grab suspend finisher in x20 and its argument in x19
-	 */
-	mov	x0, x19
-	mov	x1, x20
-	/*
-	 * We are ready for power down, fire off the suspend finisher
-	 * in x1, with argument in x0
-	 */
-	blr	x1
-        /*
-	 * Never gets here, unless suspend finisher fails.
-	 * Successful cpu_suspend should return from cpu_resume, returning
-	 * through this code path is considered an error
-	 * If the return value is set to 0 force x0 = -EOPNOTSUPP
-	 * to make sure a proper error condition is propagated
-	 */
-	cmp	x0, #0
-	mov	x3, #-EOPNOTSUPP
-	csel	x0, x3, x0, eq
-	add	sp, sp, #CPU_SUSPEND_SZ	// rewind stack pointer
-	ldp	x19, x20, [sp, #16]
-	ldp	x21, x22, [sp, #32]
-	ldp	x23, x24, [sp, #48]
-	ldp	x25, x26, [sp, #64]
-	ldp	x27, x28, [sp, #80]
-	ldp	x29, lr, [sp], #96
+
+	str	x0, [x1]
+	add	x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS
+	stp	x29, lr, [sp, #-16]!
+	bl	cpu_do_suspend
+	ldp	x29, lr, [sp], #16
+	mov	x0, #1
 	ret
 ENDPROC(__cpu_suspend_enter)
 	.ltorg
 
-/*
- * x0 must contain the sctlr value retrieved from restored context
- */
-	.pushsection	".idmap.text", "ax"
-ENTRY(cpu_resume_mmu)
-	ldr	x3, =cpu_resume_after_mmu
-	msr	sctlr_el1, x0		// restore sctlr_el1
-	isb
-	/*
-	 * Invalidate the local I-cache so that any instructions fetched
-	 * speculatively from the PoC are discarded, since they may have
-	 * been dynamically patched at the PoU.
-	 */
-	ic	iallu
-	dsb	nsh
-	isb
-	br	x3			// global jump to virtual address
-ENDPROC(cpu_resume_mmu)
-	.popsection
-cpu_resume_after_mmu:
-	mov	x0, #0			// return zero on success
-	ldp	x19, x20, [sp, #16]
-	ldp	x21, x22, [sp, #32]
-	ldp	x23, x24, [sp, #48]
-	ldp	x25, x26, [sp, #64]
-	ldp	x27, x28, [sp, #80]
-	ldp	x29, lr, [sp], #96
-	ret
-ENDPROC(cpu_resume_after_mmu)
-
 ENTRY(cpu_resume)
 	bl	el2_setup		// if in EL2 drop to EL1 cleanly
+	/* enable the MMU early - so we can access sleep_save_stash by va */
+	adr_l	lr, __enable_mmu	/* __cpu_setup will return here */
+	ldr	x27, =_cpu_resume	/* __enable_mmu will branch here */
+	adrp	x25, idmap_pg_dir
+	adrp	x26, swapper_pg_dir
+	b	__cpu_setup
+ENDPROC(cpu_resume)
+
+ENTRY(_cpu_resume)
 	mrs	x1, mpidr_el1
 	adrp	x8, mpidr_hash
 	add x8, x8, #:lo12:mpidr_hash // x8 = struct mpidr_hash phys address
@@ -166,17 +117,29 @@
 	ldp	w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)]
 	compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2
         /* x7 contains hash index, let's use it to grab context pointer */
-	ldr_l	x0, sleep_save_sp + SLEEP_SAVE_SP_PHYS
+	ldr_l	x0, sleep_save_stash
 	ldr	x0, [x0, x7, lsl #3]
+	add	x29, x0, #SLEEP_STACK_DATA_CALLEE_REGS
+	add	x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS
 	/* load sp from context */
 	ldr	x2, [x0, #CPU_CTX_SP]
-	/* load physical address of identity map page table in x1 */
-	adrp	x1, idmap_pg_dir
 	mov	sp, x2
 	/*
-	 * cpu_do_resume expects x0 to contain context physical address
-	 * pointer and x1 to contain physical address of 1:1 page tables
+	 * cpu_do_resume expects x0 to contain context address pointer
 	 */
-	bl	cpu_do_resume		// PC relative jump, MMU off
-	b	cpu_resume_mmu		// Resume MMU, never returns
-ENDPROC(cpu_resume)
+	bl	cpu_do_resume
+
+#ifdef CONFIG_KASAN
+	mov	x0, sp
+	bl	kasan_unpoison_task_stack_below
+#endif
+
+	ldp	x19, x20, [x29, #16]
+	ldp	x21, x22, [x29, #32]
+	ldp	x23, x24, [x29, #48]
+	ldp	x25, x26, [x29, #64]
+	ldp	x27, x28, [x29, #80]
+	ldp	x29, lr, [x29]
+	mov	x0, #0
+	ret
+ENDPROC(_cpu_resume)
diff -ruw linux-4.4.115/arch/arm64/kernel/smp.c linux-4.4.115-fbx/arch/arm64/kernel/smp.c
--- linux-4.4.115/arch/arm64/kernel/smp.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/smp.c	2019-10-29 09:26:23.005196935 +0100
@@ -53,10 +53,15 @@
 #include <asm/tlbflush.h>
 #include <asm/ptrace.h>
 #include <asm/virt.h>
+#include <asm/edac.h>
+#include <soc/qcom/minidump.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/ipi.h>
 
+DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
+EXPORT_PER_CPU_SYMBOL(cpu_number);
+
 /*
  * as from 2.5, kernels no longer have an init_tasks structure
  * so we need some other way of telling a new secondary core
@@ -70,6 +75,8 @@
 	IPI_CPU_STOP,
 	IPI_TIMER,
 	IPI_IRQ_WORK,
+	IPI_WAKEUP,
+	IPI_CPU_BACKTRACE,
 };
 
 /*
@@ -94,6 +101,9 @@
 	 * We need to tell the secondary core where to find its stack and the
 	 * page tables.
 	 */
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+	secondary_data.task = idle;
+#endif
 	secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
 	__flush_dcache_area(&secondary_data, sizeof(secondary_data));
 
@@ -117,6 +127,9 @@
 		pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
 	}
 
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+	secondary_data.task = NULL;
+#endif
 	secondary_data.stack = NULL;
 
 	return ret;
@@ -134,7 +147,12 @@
 asmlinkage void secondary_start_kernel(void)
 {
 	struct mm_struct *mm = &init_mm;
-	unsigned int cpu = smp_processor_id();
+	unsigned int cpu;
+
+	cpu = task_cpu(current);
+	set_my_cpu_offset(per_cpu_offset(cpu));
+
+	pr_debug("CPU%u: Booted secondary processor\n", cpu);
 
 	/*
 	 * All kernel threads share the same mm context; grab a
@@ -143,15 +161,11 @@
 	atomic_inc(&mm->mm_count);
 	current->active_mm = mm;
 
-	set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
-
 	/*
 	 * TTBR0 is only used for the identity mapping at this stage. Make it
 	 * point to zero page to avoid speculatively fetching new entries.
 	 */
-	cpu_set_reserved_ttbr0();
-	local_flush_tlb_all();
-	cpu_set_default_tcr_t0sz();
+	cpu_uninstall_idmap();
 
 	preempt_disable();
 	trace_hardirqs_off();
@@ -174,10 +188,10 @@
 	/*
 	 * Enable GIC and timers.
 	 */
-	notify_cpu_starting(cpu);
-
 	smp_store_cpu_info(cpu);
 
+	notify_cpu_starting(cpu);
+
 	/*
 	 * OK, now it's safe to let the boot CPU continue.  Wait for
 	 * the CPU migration code to notice that the CPU is online
@@ -268,7 +282,7 @@
 		pr_crit("CPU%u: cpu didn't die\n", cpu);
 		return;
 	}
-	pr_notice("CPU%u: shutdown\n", cpu);
+	pr_debug("CPU%u: shutdown\n", cpu);
 
 	/*
 	 * Now that the dying CPU is beyond the point of no return w.r.t.
@@ -290,7 +304,7 @@
  * of the other hotplug-cpu capable cores, so presumably coming
  * out of idle fixes this.
  */
-void cpu_die(void)
+void __ref cpu_die(void)
 {
 	unsigned int cpu = smp_processor_id();
 
@@ -308,7 +322,16 @@
 	 */
 	cpu_ops[cpu]->cpu_die(cpu);
 
-	BUG();
+	/*
+	 * Do not return to the idle loop - jump back to the secondary
+	 * cpu initialisation.  There's some initialisation which needs
+	 * to be repeated to undo the effects of taking the CPU offline.
+	 */
+
+	asm volatile("mov       sp, %0\n"
+		     "mov       x29, #0\n"
+		     "b         secondary_start_kernel"
+		     : : "r" (task_stack_page(current) + THREAD_START_SP));
 }
 #endif
 
@@ -444,6 +467,17 @@
 	/* map the logical cpu id to cpu MPIDR */
 	cpu_logical_map(cpu_count) = hwid;
 
+	/*
+	 * Set-up the ACPI parking protocol cpu entries
+	 * while initializing the cpu_logical_map to
+	 * avoid parsing MADT entries multiple times for
+	 * nothing (ie a valid cpu_logical_map entry should
+	 * contain a valid parking protocol data set to
+	 * initialize the cpu if the parking protocol is
+	 * the only available enable method).
+	 */
+	acpi_set_mailbox_entry(cpu_count, processor);
+
 	cpu_count++;
 }
 
@@ -466,6 +500,18 @@
 #else
 #define acpi_table_parse_madt(...)	do { } while (0)
 #endif
+void (*__smp_cross_call)(const struct cpumask *, unsigned int);
+DEFINE_PER_CPU(bool, pending_ipi);
+
+void smp_cross_call_common(const struct cpumask *cpumask, unsigned int func)
+{
+	unsigned int cpu;
+
+	for_each_cpu(cpu, cpumask)
+		per_cpu(pending_ipi, cpu) = true;
+
+	__smp_cross_call(cpumask, func);
+}
 
 /*
  * Enumerate the possible CPU set from the device tree and build the
@@ -597,6 +643,8 @@
 		if (max_cpus == 0)
 			break;
 
+		per_cpu(cpu_number, cpu) = cpu;
+
 		if (cpu == smp_processor_id())
 			continue;
 
@@ -612,8 +660,6 @@
 	}
 }
 
-void (*__smp_cross_call)(const struct cpumask *, unsigned int);
-
 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
 {
 	__smp_cross_call = fn;
@@ -626,10 +672,17 @@
 	S(IPI_CPU_STOP, "CPU stop interrupts"),
 	S(IPI_TIMER, "Timer broadcast interrupts"),
 	S(IPI_IRQ_WORK, "IRQ work interrupts"),
+	S(IPI_WAKEUP, "CPU wakeup interrupts"),
+	S(IPI_CPU_BACKTRACE, "CPU backtrace"),
 };
 
 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
 {
+	unsigned int cpu;
+
+	for_each_cpu(cpu, target)
+		per_cpu(pending_ipi, cpu) = true;
+
 	trace_ipi_raise(target, ipi_types[ipinr]);
 	__smp_cross_call(target, ipinr);
 }
@@ -661,14 +714,21 @@
 
 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 {
-	smp_cross_call(mask, IPI_CALL_FUNC);
+	smp_cross_call_common(mask, IPI_CALL_FUNC);
 }
 
 void arch_send_call_function_single_ipi(int cpu)
 {
-	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
+	smp_cross_call_common(cpumask_of(cpu), IPI_CALL_FUNC);
 }
 
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
+void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
+{
+	smp_cross_call(mask, IPI_WAKEUP);
+}
+#endif
+
 #ifdef CONFIG_IRQ_WORK
 void arch_irq_work_raise(void)
 {
@@ -677,29 +737,86 @@
 }
 #endif
 
-static DEFINE_RAW_SPINLOCK(stop_lock);
-
 /*
  * ipi_cpu_stop - handle IPI from smp_send_stop()
  */
 static void ipi_cpu_stop(unsigned int cpu)
 {
-	if (system_state == SYSTEM_BOOTING ||
-	    system_state == SYSTEM_RUNNING) {
-		raw_spin_lock(&stop_lock);
-		pr_crit("CPU%u: stopping\n", cpu);
-		dump_stack();
-		raw_spin_unlock(&stop_lock);
-	}
-
-	set_cpu_online(cpu, false);
+	set_cpu_active(cpu, false);
 
+	flush_cache_all();
 	local_irq_disable();
 
 	while (1)
 		cpu_relax();
 }
 
+static cpumask_t backtrace_mask;
+static DEFINE_RAW_SPINLOCK(backtrace_lock);
+
+/* "in progress" flag of arch_trigger_all_cpu_backtrace */
+static unsigned long backtrace_flag;
+
+static void smp_send_all_cpu_backtrace(void)
+{
+	unsigned int this_cpu = smp_processor_id();
+	int i;
+
+	if (test_and_set_bit(0, &backtrace_flag))
+		/*
+		 * If there is already a trigger_all_cpu_backtrace() in progress
+		 * (backtrace_flag == 1), don't output double cpu dump infos.
+		 */
+		return;
+
+	cpumask_copy(&backtrace_mask, cpu_online_mask);
+	cpumask_clear_cpu(this_cpu, &backtrace_mask);
+
+	pr_info("Backtrace for cpu %d (current):\n", this_cpu);
+	dump_stack();
+
+	pr_info("\nsending IPI to all other CPUs:\n");
+	if (!cpumask_empty(&backtrace_mask))
+		smp_cross_call_common(&backtrace_mask, IPI_CPU_BACKTRACE);
+
+	/* Wait for up to 10 seconds for all other CPUs to do the backtrace */
+	for (i = 0; i < 10 * 1000; i++) {
+		if (cpumask_empty(&backtrace_mask))
+			break;
+		mdelay(1);
+	}
+
+	clear_bit(0, &backtrace_flag);
+	smp_mb__after_atomic();
+}
+
+/*
+ * ipi_cpu_backtrace - handle IPI from smp_send_all_cpu_backtrace()
+ */
+static void ipi_cpu_backtrace(unsigned int cpu, struct pt_regs *regs)
+{
+	if (cpumask_test_cpu(cpu, &backtrace_mask)) {
+		raw_spin_lock(&backtrace_lock);
+		pr_warn("IPI backtrace for cpu %d\n", cpu);
+		show_regs(regs);
+		raw_spin_unlock(&backtrace_lock);
+		cpumask_clear_cpu(cpu, &backtrace_mask);
+	}
+}
+
+#ifdef CONFIG_SMP
+void arch_trigger_all_cpu_backtrace(void)
+{
+	smp_send_all_cpu_backtrace();
+}
+#else
+void arch_trigger_all_cpu_backtrace(void)
+{
+	dump_stack();
+}
+#endif
+
+
 /*
  * Main handler for inter-processor interrupts
  */
@@ -746,6 +863,18 @@
 		break;
 #endif
 
+	case IPI_CPU_BACKTRACE:
+		ipi_cpu_backtrace(cpu, regs);
+		break;
+
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
+	case IPI_WAKEUP:
+		WARN_ONCE(!acpi_parking_protocol_valid(cpu),
+			  "CPU%u: Wake-up IPI outside the ACPI parking protocol\n",
+			  cpu);
+		break;
+#endif
+
 	default:
 		pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
 		break;
@@ -753,18 +882,21 @@
 
 	if ((unsigned)ipinr < NR_IPI)
 		trace_ipi_exit_rcuidle(ipi_types[ipinr]);
+
+	per_cpu(pending_ipi, cpu) = false;
 	set_irq_regs(old_regs);
 }
 
 void smp_send_reschedule(int cpu)
 {
-	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
+	BUG_ON(cpu_is_offline(cpu));
+	smp_cross_call_common(cpumask_of(cpu), IPI_RESCHEDULE);
 }
 
 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 void tick_broadcast(const struct cpumask *mask)
 {
-	smp_cross_call(mask, IPI_TIMER);
+	smp_cross_call_common(mask, IPI_TIMER);
 }
 #endif
 
@@ -778,16 +910,20 @@
 		cpumask_copy(&mask, cpu_online_mask);
 		cpumask_clear_cpu(smp_processor_id(), &mask);
 
-		smp_cross_call(&mask, IPI_CPU_STOP);
+		if (system_state == SYSTEM_BOOTING ||
+		    system_state == SYSTEM_RUNNING)
+			pr_crit("SMP: stopping secondary CPUs\n");
+		smp_cross_call_common(&mask, IPI_CPU_STOP);
 	}
 
 	/* Wait up to one second for other CPUs to stop */
 	timeout = USEC_PER_SEC;
-	while (num_online_cpus() > 1 && timeout--)
+	while (num_active_cpus() > 1 && timeout--)
 		udelay(1);
 
-	if (num_online_cpus() > 1)
-		pr_warning("SMP: failed to stop secondary CPUs\n");
+	if (num_active_cpus() > 1)
+		pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
+			   cpumask_pr_args(cpu_online_mask));
 }
 
 /*
diff -ruw linux-4.4.115/arch/arm64/kernel/smp_spin_table.c linux-4.4.115-fbx/arch/arm64/kernel/smp_spin_table.c
--- linux-4.4.115/arch/arm64/kernel/smp_spin_table.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/smp_spin_table.c	2019-10-29 09:26:23.005196935 +0100
@@ -21,6 +21,7 @@
 #include <linux/of.h>
 #include <linux/smp.h>
 #include <linux/types.h>
+#include <linux/mm.h>
 
 #include <asm/cacheflush.h>
 #include <asm/cpu_ops.h>
@@ -96,7 +97,7 @@
 	 * boot-loader's endianess before jumping. This is mandated by
 	 * the boot protocol.
 	 */
-	writeq_relaxed(__pa(secondary_holding_pen), release_addr);
+	writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr);
 	__flush_dcache_area((__force void *)release_addr,
 			    sizeof(*release_addr));
 
diff -ruw linux-4.4.115/arch/arm64/kernel/stacktrace.c linux-4.4.115-fbx/arch/arm64/kernel/stacktrace.c
--- linux-4.4.115/arch/arm64/kernel/stacktrace.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/stacktrace.c	2019-01-22 16:16:21.555228766 +0100
@@ -15,11 +15,15 @@
  * You should have received a copy of the GNU General Public License
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
+#include <linux/kasan.h>
 #include <linux/kernel.h>
 #include <linux/export.h>
+#include <linux/ftrace.h>
 #include <linux/sched.h>
 #include <linux/stacktrace.h>
 
+#include <asm/irq.h>
+#include <asm/stack_pointer.h>
 #include <asm/stacktrace.h>
 
 /*
@@ -35,25 +39,86 @@
  *	ldp	x29, x30, [sp]
  *	add	sp, sp, #0x10
  */
-int notrace unwind_frame(struct stackframe *frame)
+int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
 {
 	unsigned long high, low;
 	unsigned long fp = frame->fp;
+	unsigned long irq_stack_ptr;
+
+	/*
+	 * Switching between stacks is valid when tracing current and in
+	 * non-preemptible context.
+	 */
+	if (tsk == current && !preemptible())
+		irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
+	else
+		irq_stack_ptr = 0;
 
 	low  = frame->sp;
-	high = ALIGN(low, THREAD_SIZE);
+	/* irq stacks are not THREAD_SIZE aligned */
+	if (on_irq_stack(frame->sp, raw_smp_processor_id()))
+		high = irq_stack_ptr;
+	else
+		high = ALIGN(low, THREAD_SIZE) - 0x20;
 
-	if (fp < low || fp > high - 0x18 || fp & 0xf)
+	if (fp < low || fp > high || fp & 0xf)
 		return -EINVAL;
 
+	kasan_disable_current();
+
 	frame->sp = fp + 0x10;
 	frame->fp = *(unsigned long *)(fp);
 	frame->pc = *(unsigned long *)(fp + 8);
 
+	kasan_enable_current();
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	if (tsk && tsk->ret_stack &&
+			(frame->pc == (unsigned long)return_to_handler)) {
+		/*
+		 * This is a case where function graph tracer has
+		 * modified a return address (LR) in a stack frame
+		 * to hook a function return.
+		 * So replace it to an original value.
+		 */
+		frame->pc = tsk->ret_stack[frame->graph--].ret;
+	}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+	/*
+	 * Check whether we are going to walk through from interrupt stack
+	 * to task stack.
+	 * If we reach the end of the stack - and its an interrupt stack,
+	 * unpack the dummy frame to find the original elr.
+	 *
+	 * Check the frame->fp we read from the bottom of the irq_stack,
+	 * and the original task stack pointer are both in current->stack.
+	 */
+	if (frame->sp == irq_stack_ptr) {
+		struct pt_regs *irq_args;
+		unsigned long orig_sp = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr);
+
+		if (object_is_on_stack((void *)orig_sp) &&
+		   object_is_on_stack((void *)frame->fp)) {
+			frame->sp = orig_sp;
+
+			/* orig_sp is the saved pt_regs, find the elr */
+			irq_args = (struct pt_regs *)orig_sp;
+			frame->pc = irq_args->pc;
+		} else {
+			/*
+			 * This frame has a non-standard format, and we
+			 * didn't fix it, because the data looked wrong.
+			 * Refuse to output this frame.
+			 */
+			return -EINVAL;
+		}
+	}
+
 	return 0;
 }
 
-void notrace walk_stackframe(struct stackframe *frame,
+void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
 		     int (*fn)(struct stackframe *, void *), void *data)
 {
 	while (1) {
@@ -61,12 +126,11 @@
 
 		if (fn(frame, data))
 			break;
-		ret = unwind_frame(frame);
+		ret = unwind_frame(tsk, frame);
 		if (ret < 0)
 			break;
 	}
 }
-EXPORT_SYMBOL(walk_stackframe);
 
 #ifdef CONFIG_STACKTRACE
 struct stack_trace_data {
@@ -93,34 +157,51 @@
 	return trace->nr_entries >= trace->max_entries;
 }
 
-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+static noinline void __save_stack_trace(struct task_struct *tsk,
+	struct stack_trace *trace, unsigned int nosched)
 {
 	struct stack_trace_data data;
 	struct stackframe frame;
 
+	if (!try_get_task_stack(tsk))
+		return;
+
 	data.trace = trace;
 	data.skip = trace->skip;
+	data.no_sched_functions = nosched;
 
 	if (tsk != current) {
-		data.no_sched_functions = 1;
 		frame.fp = thread_saved_fp(tsk);
 		frame.sp = thread_saved_sp(tsk);
 		frame.pc = thread_saved_pc(tsk);
 	} else {
-		data.no_sched_functions = 0;
+		/* We don't want this function nor the caller */
+		data.skip += 2;
 		frame.fp = (unsigned long)__builtin_frame_address(0);
 		frame.sp = current_stack_pointer;
-		frame.pc = (unsigned long)save_stack_trace_tsk;
+		frame.pc = (unsigned long)__save_stack_trace;
 	}
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	frame.graph = tsk->curr_ret_stack;
+#endif
 
-	walk_stackframe(&frame, save_trace, &data);
+	walk_stackframe(tsk, &frame, save_trace, &data);
 	if (trace->nr_entries < trace->max_entries)
 		trace->entries[trace->nr_entries++] = ULONG_MAX;
+
+	put_task_stack(tsk);
+}
+EXPORT_SYMBOL(save_stack_trace_tsk);
+
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+	__save_stack_trace(tsk, trace, 1);
 }
 
 void save_stack_trace(struct stack_trace *trace)
 {
-	save_stack_trace_tsk(current, trace);
+	__save_stack_trace(current, trace, 0);
 }
+
 EXPORT_SYMBOL_GPL(save_stack_trace);
 #endif
diff -ruw linux-4.4.115/arch/arm64/kernel/suspend.c linux-4.4.115-fbx/arch/arm64/kernel/suspend.c
--- linux-4.4.115/arch/arm64/kernel/suspend.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/suspend.c	2019-01-22 16:16:21.555228766 +0100
@@ -5,6 +5,7 @@
 #include <asm/cacheflush.h>
 #include <asm/cpufeature.h>
 #include <asm/debug-monitors.h>
+#include <asm/exec.h>
 #include <asm/pgtable.h>
 #include <asm/memory.h>
 #include <asm/mmu_context.h>
@@ -12,30 +13,11 @@
 #include <asm/suspend.h>
 #include <asm/tlbflush.h>
 
-extern int __cpu_suspend_enter(unsigned long arg, int (*fn)(unsigned long));
 /*
- * This is called by __cpu_suspend_enter() to save the state, and do whatever
- * flushing is required to ensure that when the CPU goes to sleep we have
- * the necessary data available when the caches are not searched.
- *
- * ptr: CPU context virtual address
- * save_ptr: address of the location where the context physical address
- *           must be saved
- */
-void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr,
-				phys_addr_t *save_ptr)
-{
-	*save_ptr = virt_to_phys(ptr);
-
-	cpu_do_suspend(ptr);
-	/*
-	 * Only flush the context that must be retrieved with the MMU
-	 * off. VA primitives ensure the flush is applied to all
-	 * cache levels so context is pushed to DRAM.
+ * This is allocated by cpu_suspend_init(), and used to store a pointer to
+ * the 'struct sleep_stack_data' the contains a particular CPUs state.
 	 */
-	__flush_dcache_area(ptr, sizeof(*ptr));
-	__flush_dcache_area(save_ptr, sizeof(*save_ptr));
-}
+unsigned long *sleep_save_stash;
 
 /*
  * This hook is provided so that cpu_suspend code can restore HW
@@ -53,6 +35,24 @@
 	hw_breakpoint_restore = hw_bp_restore;
 }
 
+void notrace __cpu_suspend_exit(void)
+{
+	/*
+	 * We are resuming from reset with the idmap active in TTBR0_EL1.
+	 * We must uninstall the idmap and restore the expected MMU
+	 * state before we can possibly return to userspace.
+	 */
+	cpu_uninstall_idmap();
+
+	/*
+	 * Restore HW breakpoint registers to sane values
+	 * before debug exceptions are possibly reenabled
+	 * through local_dbg_restore.
+	 */
+	if (hw_breakpoint_restore)
+		hw_breakpoint_restore(NULL);
+}
+
 /*
  * cpu_suspend
  *
@@ -62,9 +62,9 @@
  */
 int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
 {
-	struct mm_struct *mm = current->active_mm;
-	int ret;
+	int ret = 0;
 	unsigned long flags;
+	struct sleep_stack_data state;
 
 	/*
 	 * From this point debug exceptions are disabled to prevent
@@ -80,37 +80,9 @@
 	 */
 	pause_graph_tracing();
 
-	/*
-	 * mm context saved on the stack, it will be restored when
-	 * the cpu comes out of reset through the identity mapped
-	 * page tables, so that the thread address space is properly
-	 * set-up on function return.
-	 */
-	ret = __cpu_suspend_enter(arg, fn);
-	if (ret == 0) {
-		/*
-		 * We are resuming from reset with TTBR0_EL1 set to the
-		 * idmap to enable the MMU; set the TTBR0 to the reserved
-		 * page tables to prevent speculative TLB allocations, flush
-		 * the local tlb and set the default tcr_el1.t0sz so that
-		 * the TTBR0 address space set-up is properly restored.
-		 * If the current active_mm != &init_mm we entered cpu_suspend
-		 * with mappings in TTBR0 that must be restored, so we switch
-		 * them back to complete the address space configuration
-		 * restoration before returning.
-		 */
-		cpu_set_reserved_ttbr0();
-		local_flush_tlb_all();
-		cpu_set_default_tcr_t0sz();
-
-		if (mm != &init_mm)
-			cpu_switch_mm(mm->pgd, mm);
-
-		/*
-		 * Restore per-cpu offset before any kernel
-		 * subsystem relying on it has a chance to run.
-		 */
-		set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
+	if (__cpu_suspend_enter(&state)) {
+		/* Call the suspend finisher */
+		ret = fn(arg);
 
 		/*
 		 * PSTATE was not saved over suspend/resume, re-enable any
@@ -118,14 +90,17 @@
 		 */
 		asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
 				CONFIG_ARM64_PAN));
+		uao_thread_switch(current);
 
 		/*
 		 * Restore HW breakpoint registers to sane values
 		 * before debug exceptions are possibly reenabled
 		 * through local_dbg_restore.
 		 */
-		if (hw_breakpoint_restore)
-			hw_breakpoint_restore(NULL);
+		if (!ret)
+			ret = -EOPNOTSUPP;
+	} else {
+		__cpu_suspend_exit();
 	}
 
 	unpause_graph_tracing();
@@ -140,22 +115,15 @@
 	return ret;
 }
 
-struct sleep_save_sp sleep_save_sp;
-
 static int __init cpu_suspend_init(void)
 {
-	void *ctx_ptr;
-
 	/* ctx_ptr is an array of physical addresses */
-	ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(phys_addr_t), GFP_KERNEL);
+	sleep_save_stash = kcalloc(mpidr_hash_size(), sizeof(*sleep_save_stash),
+				   GFP_KERNEL);
 
-	if (WARN_ON(!ctx_ptr))
+	if (WARN_ON(!sleep_save_stash))
 		return -ENOMEM;
 
-	sleep_save_sp.save_ptr_stash = ctx_ptr;
-	sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr);
-	__flush_dcache_area(&sleep_save_sp, sizeof(struct sleep_save_sp));
-
 	return 0;
 }
 early_initcall(cpu_suspend_init);
diff -ruw linux-4.4.115/arch/arm64/kernel/time.c linux-4.4.115-fbx/arch/arm64/kernel/time.c
--- linux-4.4.115/arch/arm64/kernel/time.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/time.c	2019-01-22 16:16:21.555228766 +0100
@@ -52,8 +52,11 @@
 	frame.fp = regs->regs[29];
 	frame.sp = regs->sp;
 	frame.pc = regs->pc;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	frame.graph = -1; /* no task info */
+#endif
 	do {
-		int ret = unwind_frame(&frame);
+		int ret = unwind_frame(NULL, &frame);
 		if (ret < 0)
 			return 0;
 	} while (in_lock_functions(frame.pc));
diff -ruw linux-4.4.115/arch/arm64/kernel/topology.c linux-4.4.115-fbx/arch/arm64/kernel/topology.c
--- linux-4.4.115/arch/arm64/kernel/topology.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/topology.c	2019-01-22 16:16:21.555228766 +0100
@@ -19,10 +19,52 @@
 #include <linux/nodemask.h>
 #include <linux/of.h>
 #include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/sched_energy.h>
 
 #include <asm/cputype.h>
 #include <asm/topology.h>
 
+/*
+ * cpu power table
+ * This per cpu data structure describes the relative capacity of each core.
+ * On a heteregenous system, cores don't have the same computation capacity
+ * and we reflect that difference in the cpu_power field so the scheduler can
+ * take this difference into account during load balance. A per cpu structure
+ * is preferred because each CPU updates its own cpu_power field during the
+ * load balance except for idle cores. One idle core is selected to run the
+ * rebalance_domains for all idle cores and the cpu_power can be updated
+ * during this sequence.
+ */
+static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
+
+unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu)
+{
+	return per_cpu(cpu_scale, cpu);
+}
+
+static void set_power_scale(unsigned int cpu, unsigned long power)
+{
+	per_cpu(cpu_scale, cpu) = power;
+}
+
+unsigned long scale_cpu_capacity(struct sched_domain *sd, int cpu)
+{
+#ifdef CONFIG_CPU_FREQ
+	unsigned long max_freq_scale = cpufreq_scale_max_freq_capacity(cpu);
+
+	return per_cpu(cpu_scale, cpu) * max_freq_scale >> SCHED_CAPACITY_SHIFT;
+#else
+	return per_cpu(cpu_scale, cpu);
+#endif
+}
+
+static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
+{
+	per_cpu(cpu_scale, cpu) = capacity;
+}
+
 static int __init get_cpu_for_node(struct device_node *node)
 {
 	struct device_node *cpu_node;
@@ -161,6 +203,46 @@
 	return 0;
 }
 
+struct cpu_efficiency {
+	const char *compatible;
+	unsigned long efficiency;
+};
+
+/*
+ * Table of relative efficiency of each processors
+ * The efficiency value must fit in 20bit and the final
+ * cpu_scale value must be in the range
+ *   0 < cpu_scale < 3*SCHED_CAPACITY_SCALE/2
+ * in order to return at most 1 when DIV_ROUND_CLOSEST
+ * is used to compute the capacity of a CPU.
+ * Processors that are not defined in the table,
+ * use the default SCHED_CAPACITY_SCALE value for cpu_scale.
+ */
+static const struct cpu_efficiency table_efficiency[] = {
+	{ NULL, },
+};
+
+static unsigned long *__cpu_capacity;
+#define cpu_capacity(cpu)	__cpu_capacity[cpu]
+
+static unsigned long middle_capacity = 1;
+
+static DEFINE_PER_CPU(unsigned long, cpu_efficiency) = SCHED_CAPACITY_SCALE;
+
+unsigned long arch_get_cpu_efficiency(int cpu)
+{
+	return per_cpu(cpu_efficiency, cpu);
+}
+EXPORT_SYMBOL(arch_get_cpu_efficiency);
+
+/*
+ * Iterate all CPUs' descriptor in DT and compute the efficiency
+ * (as per table_efficiency). Also calculate a middle efficiency
+ * as close as possible to  (max{eff_i} - min{eff_i}) / 2
+ * This is later used to scale the cpu_power field such that an
+ * 'average' CPU is of middle power. Also see the comments near
+ * table_efficiency[] and update_cpu_power().
+ */
 static int __init parse_dt_topology(void)
 {
 	struct device_node *cn, *map;
@@ -200,17 +282,179 @@
 	return ret;
 }
 
+static void __init parse_dt_cpu_power(void)
+{
+	const struct cpu_efficiency *cpu_eff;
+	struct device_node *cn;
+	unsigned long min_capacity = ULONG_MAX;
+	unsigned long max_capacity = 0;
+	unsigned long capacity = 0;
+	int cpu;
+
+	__cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
+				 GFP_NOWAIT);
+
+	for_each_possible_cpu(cpu) {
+		const u32 *rate;
+		int len;
+		u32 efficiency;
+
+		/* Too early to use cpu->of_node */
+		cn = of_get_cpu_node(cpu, NULL);
+		if (!cn) {
+			pr_err("Missing device node for CPU %d\n", cpu);
+			continue;
+		}
+
+		/*
+		 * The CPU efficiency value passed from the device tree
+		 * overrides the value defined in the table_efficiency[]
+		 */
+		if (of_property_read_u32(cn, "efficiency", &efficiency) < 0) {
+
+			for (cpu_eff = table_efficiency;
+					cpu_eff->compatible; cpu_eff++)
+
+				if (of_device_is_compatible(cn,
+						cpu_eff->compatible))
+					break;
+
+			if (cpu_eff->compatible == NULL) {
+				pr_warn("%s: Unknown CPU type\n",
+						cn->full_name);
+				continue;
+			}
+
+			efficiency = cpu_eff->efficiency;
+		}
+
+		per_cpu(cpu_efficiency, cpu) = efficiency;
+
+		rate = of_get_property(cn, "clock-frequency", &len);
+		if (!rate || len != 4) {
+			pr_err("%s: Missing clock-frequency property\n",
+				cn->full_name);
+			continue;
+		}
+
+		capacity = ((be32_to_cpup(rate)) >> 20) * efficiency;
+
+		/* Save min capacity of the system */
+		if (capacity < min_capacity)
+			min_capacity = capacity;
+
+		/* Save max capacity of the system */
+		if (capacity > max_capacity)
+			max_capacity = capacity;
+
+		cpu_capacity(cpu) = capacity;
+	}
+
+	/* If min and max capacities are equal we bypass the update of the
+	 * cpu_scale because all CPUs have the same capacity. Otherwise, we
+	 * compute a middle_capacity factor that will ensure that the capacity
+	 * of an 'average' CPU of the system will be as close as possible to
+	 * SCHED_CAPACITY_SCALE, which is the default value, but with the
+	 * constraint explained near table_efficiency[].
+	 */
+	if (min_capacity == max_capacity)
+		return;
+	else if (4 * max_capacity < (3 * (max_capacity + min_capacity)))
+		middle_capacity = (min_capacity + max_capacity)
+				>> (SCHED_CAPACITY_SHIFT+1);
+	else
+		middle_capacity = ((max_capacity / 3)
+				>> (SCHED_CAPACITY_SHIFT-1)) + 1;
+}
+
+/*
+ * Look for a customed capacity of a CPU in the cpu_topo_data table during the
+ * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
+ * function returns directly for SMP system.
+ */
+static void update_cpu_power(unsigned int cpu)
+{
+	if (!cpu_capacity(cpu))
+		return;
+
+	set_power_scale(cpu, cpu_capacity(cpu) / middle_capacity);
+
+	pr_info("CPU%u: update cpu_power %lu\n",
+		cpu, arch_scale_freq_power(NULL, cpu));
+}
+
 /*
  * cpu topology table
  */
 struct cpu_topology cpu_topology[NR_CPUS];
 EXPORT_SYMBOL_GPL(cpu_topology);
 
+/* sd energy functions */
+static inline
+const struct sched_group_energy * const cpu_cluster_energy(int cpu)
+{
+	struct sched_group_energy *sge = sge_array[cpu][SD_LEVEL1];
+
+	if (!sge) {
+		pr_warn("Invalid sched_group_energy for Cluster%d\n", cpu);
+		return NULL;
+	}
+
+	return sge;
+}
+
+static inline
+const struct sched_group_energy * const cpu_core_energy(int cpu)
+{
+	struct sched_group_energy *sge = sge_array[cpu][SD_LEVEL0];
+
+	if (!sge) {
+		pr_warn("Invalid sched_group_energy for CPU%d\n", cpu);
+		return NULL;
+	}
+
+	return sge;
+}
+
 const struct cpumask *cpu_coregroup_mask(int cpu)
 {
 	return &cpu_topology[cpu].core_sibling;
 }
 
+static int cpu_cpu_flags(void)
+{
+	return SD_ASYM_CPUCAPACITY;
+}
+
+static inline int cpu_corepower_flags(void)
+{
+	return SD_SHARE_PKG_RESOURCES  | SD_SHARE_POWERDOMAIN | \
+	       SD_SHARE_CAP_STATES;
+}
+
+static struct sched_domain_topology_level arm64_topology[] = {
+#ifdef CONFIG_SCHED_MC
+	{ cpu_coregroup_mask, cpu_corepower_flags, cpu_core_energy, SD_INIT_NAME(MC) },
+#endif
+	{ cpu_cpu_mask, cpu_cpu_flags, cpu_cluster_energy, SD_INIT_NAME(DIE) },
+	{ NULL, },
+};
+
+static void update_cpu_capacity(unsigned int cpu)
+{
+	unsigned long capacity = SCHED_CAPACITY_SCALE;
+
+	if (sched_energy_aware && cpu_core_energy(cpu)) {
+		int max_cap_idx = cpu_core_energy(cpu)->nr_cap_states - 1;
+		capacity = cpu_core_energy(cpu)->cap_states[max_cap_idx].cap;
+	}
+
+	set_capacity_scale(cpu, capacity);
+
+	pr_info("CPU%d: update cpu_capacity %lu\n",
+		cpu, arch_scale_cpu_capacity(NULL, cpu));
+}
+
 static void update_siblings_masks(unsigned int cpuid)
 {
 	struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
@@ -272,6 +516,8 @@
 
 topology_populated:
 	update_siblings_masks(cpuid);
+	update_cpu_power(cpuid);
+	update_cpu_capacity(cpuid);
 }
 
 static void __init reset_cpu_topology(void)
@@ -292,14 +538,33 @@
 	}
 }
 
+static void __init reset_cpu_power(void)
+{
+	unsigned int cpu;
+
+	for_each_possible_cpu(cpu)
+		set_power_scale(cpu, SCHED_CAPACITY_SCALE);
+}
+
 void __init init_cpu_topology(void)
 {
+	int cpu;
+
 	reset_cpu_topology();
 
 	/*
 	 * Discard anything that was parsed if we hit an error so we
 	 * don't use partial information.
 	 */
-	if (of_have_populated_dt() && parse_dt_topology())
+	if (of_have_populated_dt() && parse_dt_topology()) {
 		reset_cpu_topology();
+	} else {
+		set_sched_topology(arm64_topology);
+		for_each_possible_cpu(cpu)
+			update_siblings_masks(cpu);
+	}
+
+	reset_cpu_power();
+	parse_dt_cpu_power();
+	init_sched_energy_costs();
 }
diff -ruw linux-4.4.115/arch/arm64/kernel/traps.c linux-4.4.115-fbx/arch/arm64/kernel/traps.c
--- linux-4.4.115/arch/arm64/kernel/traps.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/traps.c	2019-10-29 09:26:23.009196974 +0100
@@ -33,14 +33,20 @@
 #include <linux/syscalls.h>
 
 #include <asm/atomic.h>
+#include <asm/barrier.h>
 #include <asm/bug.h>
 #include <asm/debug-monitors.h>
 #include <asm/esr.h>
 #include <asm/insn.h>
 #include <asm/traps.h>
+#include <asm/stack_pointer.h>
 #include <asm/stacktrace.h>
 #include <asm/exception.h>
 #include <asm/system_misc.h>
+#include <asm/esr.h>
+#include <asm/edac.h>
+
+#include <trace/events/exception.h>
 
 static const char *handler[]= {
 	"Synchronous Abort",
@@ -146,17 +152,32 @@
 static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
 {
 	struct stackframe frame;
+	unsigned long irq_stack_ptr;
+	int skip;
+
+	pr_debug("%s(regs = %pK tsk = %pK)\n", __func__, regs, tsk);
+
+	if (!tsk)
+		tsk = current;
+
+	if (!try_get_task_stack(tsk))
+		return;
+
+	/*
+	 * Switching between stacks is valid when tracing current and in
+	 * non-preemptible context.
+	 */
+	if (tsk == current && !preemptible())
+		irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
+	else
+		irq_stack_ptr = 0;
 
 	pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
 
 	if (!tsk)
 		tsk = current;
 
-	if (regs) {
-		frame.fp = regs->regs[29];
-		frame.sp = regs->sp;
-		frame.pc = regs->pc;
-	} else if (tsk == current) {
+	if (tsk == current) {
 		frame.fp = (unsigned long)__builtin_frame_address(0);
 		frame.sp = current_stack_pointer;
 		frame.pc = (unsigned long)dump_backtrace;
@@ -168,24 +189,54 @@
 		frame.sp = thread_saved_sp(tsk);
 		frame.pc = thread_saved_pc(tsk);
 	}
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	frame.graph = tsk->curr_ret_stack;
+#endif
 
-	pr_emerg("Call trace:\n");
+	skip = !!regs;
+	printk("Call trace:\n");
 	while (1) {
 		unsigned long where = frame.pc;
 		unsigned long stack;
 		int ret;
 
+		/* skip until specified stack frame */
+		if (!skip) {
 		dump_backtrace_entry(where);
-		ret = unwind_frame(&frame);
+		} else if (frame.fp == regs->regs[29]) {
+			skip = 0;
+			/*
+			 * Mostly, this is the case where this function is
+			 * called in panic/abort. As exception handler's
+			 * stack frame does not contain the corresponding pc
+			 * at which an exception has taken place, use regs->pc
+			 * instead.
+			 */
+			dump_backtrace_entry(regs->pc);
+		}
+		ret = unwind_frame(tsk, &frame);
 		if (ret < 0)
 			break;
 		stack = frame.sp;
-		if (in_exception_text(where))
+		if (in_exception_text(where)) {
+			/*
+			 * If we switched to the irq_stack before calling this
+			 * exception handler, then the pt_regs will be on the
+			 * task stack. The easiest way to tell is if the large
+			 * pt_regs would overlap with the end of the irq_stack.
+			 */
+			if (stack < irq_stack_ptr &&
+			    (stack + sizeof(struct pt_regs)) > irq_stack_ptr)
+				stack = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr);
+
 			dump_mem("", "Exception stack", stack,
 				 stack + sizeof(struct pt_regs), false);
 	}
 }
 
+	put_task_stack(tsk);
+}
+
 void show_stack(struct task_struct *tsk, unsigned long *sp)
 {
 	dump_backtrace(NULL, tsk);
@@ -199,10 +250,9 @@
 #endif
 #define S_SMP " SMP"
 
-static int __die(const char *str, int err, struct thread_info *thread,
-		 struct pt_regs *regs)
+static int __die(const char *str, int err, struct pt_regs *regs)
 {
-	struct task_struct *tsk = thread->task;
+	struct task_struct *tsk = current;
 	static int die_counter;
 	int ret;
 
@@ -217,12 +267,10 @@
 	print_modules();
 	__show_regs(regs);
 	pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
-		 TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);
+		 TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
+		 end_of_stack(tsk));
 
 	if (!user_mode(regs) || in_interrupt()) {
-		dump_mem(KERN_EMERG, "Stack: ", regs->sp,
-			 THREAD_SIZE + (unsigned long)task_stack_page(tsk),
-			 compat_user_mode(regs));
 		dump_backtrace(regs, tsk);
 		dump_instr(KERN_EMERG, regs);
 	}
@@ -230,39 +278,75 @@
 	return ret;
 }
 
-static DEFINE_RAW_SPINLOCK(die_lock);
+static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+static int die_owner = -1;
+static unsigned int die_nest_count;
 
-/*
- * This function is protected against re-entrancy.
- */
-void die(const char *str, struct pt_regs *regs, int err)
+static unsigned long oops_begin(void)
 {
-	struct thread_info *thread = current_thread_info();
-	int ret;
+	int cpu;
+	unsigned long flags;
 
 	oops_enter();
 
-	raw_spin_lock_irq(&die_lock);
+	/* racy, but better than risking deadlock. */
+	raw_local_irq_save(flags);
+	cpu = smp_processor_id();
+	if (!arch_spin_trylock(&die_lock)) {
+		if (cpu == die_owner)
+			/* nested oops. should stop eventually */;
+		else
+			arch_spin_lock(&die_lock);
+	}
+	die_nest_count++;
+	die_owner = cpu;
 	console_verbose();
 	bust_spinlocks(1);
-	ret = __die(str, err, thread, regs);
+	return flags;
+}
 
-	if (regs && kexec_should_crash(thread->task))
+static void oops_end(unsigned long flags, struct pt_regs *regs, int notify)
+{
+	if (regs && kexec_should_crash(current))
 		crash_kexec(regs);
 
 	bust_spinlocks(0);
+	die_owner = -1;
 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
-	raw_spin_unlock_irq(&die_lock);
+	die_nest_count--;
+	if (!die_nest_count)
+		/* Nest count reaches zero, release the lock. */
+		arch_spin_unlock(&die_lock);
+	raw_local_irq_restore(flags);
 	oops_exit();
 
 	if (in_interrupt())
 		panic("Fatal exception in interrupt");
 	if (panic_on_oops)
 		panic("Fatal exception");
-	if (ret != NOTIFY_STOP)
+	if (notify != NOTIFY_STOP)
 		do_exit(SIGSEGV);
 }
 
+/*
+ * This function is protected against re-entrancy.
+ */
+void die(const char *str, struct pt_regs *regs, int err)
+{
+	enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
+	unsigned long flags = oops_begin();
+	int ret;
+
+	if (!user_mode(regs))
+		bug_type = report_bug(regs->pc, regs);
+	if (bug_type != BUG_TRAP_TYPE_NONE)
+		str = "Oops - BUG";
+
+	ret = __die(str, err, regs);
+
+	oops_end(flags, regs, ret);
+}
+
 void arm64_notify_die(const char *str, struct pt_regs *regs,
 		      struct siginfo *info, int err)
 {
@@ -350,6 +434,8 @@
 	if (call_undef_hook(regs) == 0)
 		return;
 
+	trace_undef_instr(regs, (void *)pc);
+
 	if (unhandled_signal(current, SIGILL) && show_unhandled_signals_ratelimited()) {
 		pr_info("%s[%d]: undefined instruction: pc=%p\n",
 			current->comm, task_pid_nr(current), pc);
@@ -364,6 +450,38 @@
 	arm64_notify_die("Oops - undefined instruction", regs, &info, 0);
 }
 
+static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
+{
+	int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+
+	isb();
+	if (rt != 31)
+		regs->regs[rt] = arch_counter_get_cntvct();
+	regs->pc += 4;
+}
+
+static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
+{
+	int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+
+	if (rt != 31)
+		regs->regs[rt] = read_sysreg(cntfrq_el0);
+	regs->pc += 4;
+}
+
+asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
+{
+	if ((esr & ESR_ELx_SYS64_ISS_SYS_OP_MASK) == ESR_ELx_SYS64_ISS_SYS_CNTVCT) {
+		cntvct_read_handler(esr, regs);
+		return;
+	} else if ((esr & ESR_ELx_SYS64_ISS_SYS_OP_MASK) == ESR_ELx_SYS64_ISS_SYS_CNTFRQ) {
+		cntfrq_read_handler(esr, regs);
+		return;
+	}
+
+	do_undefinstr(regs);
+}
+
 long compat_arm_syscall(struct pt_regs *regs);
 
 asmlinkage long do_ni_syscall(struct pt_regs *regs)
@@ -430,7 +548,7 @@
 
 const char *esr_get_class_string(u32 esr)
 {
-	return esr_class_str[esr >> ESR_ELx_EC_SHIFT];
+	return esr_class_str[ESR_ELx_EC(esr)];
 }
 
 /*
@@ -444,6 +562,12 @@
 	pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n",
 		handler[reason], esr, esr_get_class_string(esr));
 
+	if (esr >> ESR_ELx_EC_SHIFT == ESR_ELx_EC_SERROR) {
+		pr_crit("System error detected. ESR.ISS = %08x\n",
+			esr & 0xffffff);
+		arm64_check_cache_ecc(NULL);
+	}
+
 	die("Oops - bad mode", regs, 0);
 	local_irq_disable();
 	panic("bad mode");
@@ -476,22 +600,22 @@
 
 void __pte_error(const char *file, int line, unsigned long val)
 {
-	pr_crit("%s:%d: bad pte %016lx.\n", file, line, val);
+	pr_err("%s:%d: bad pte %016lx.\n", file, line, val);
 }
 
 void __pmd_error(const char *file, int line, unsigned long val)
 {
-	pr_crit("%s:%d: bad pmd %016lx.\n", file, line, val);
+	pr_err("%s:%d: bad pmd %016lx.\n", file, line, val);
 }
 
 void __pud_error(const char *file, int line, unsigned long val)
 {
-	pr_crit("%s:%d: bad pud %016lx.\n", file, line, val);
+	pr_err("%s:%d: bad pud %016lx.\n", file, line, val);
 }
 
 void __pgd_error(const char *file, int line, unsigned long val)
 {
-	pr_crit("%s:%d: bad pgd %016lx.\n", file, line, val);
+	pr_err("%s:%d: bad pgd %016lx.\n", file, line, val);
 }
 
 /* GENERIC_BUG traps */
diff -ruw linux-4.4.115/arch/arm64/kernel/vdso/gettimeofday.S linux-4.4.115-fbx/arch/arm64/kernel/vdso/gettimeofday.S
--- linux-4.4.115/arch/arm64/kernel/vdso/gettimeofday.S	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/vdso/gettimeofday.S	2019-10-29 09:26:23.009196974 +0100
@@ -26,24 +26,109 @@
 #define NSEC_PER_SEC_HI16	0x3b9a
 
 vdso_data	.req	x6
-use_syscall	.req	w7
-seqcnt		.req	w8
+seqcnt		.req	w7
+w_tmp		.req	w8
+x_tmp		.req	x8
+
+/*
+ * Conventions for macro arguments:
+ * - An argument is write-only if its name starts with "res".
+ * - All other arguments are read-only, unless otherwise specified.
+ */
 
 	.macro	seqcnt_acquire
 9999:	ldr	seqcnt, [vdso_data, #VDSO_TB_SEQ_COUNT]
 	tbnz	seqcnt, #0, 9999b
 	dmb	ishld
-	ldr	use_syscall, [vdso_data, #VDSO_USE_SYSCALL]
 	.endm
 
-	.macro	seqcnt_read, cnt
+	.macro	seqcnt_check fail
 	dmb	ishld
-	ldr	\cnt, [vdso_data, #VDSO_TB_SEQ_COUNT]
+	ldr	w_tmp, [vdso_data, #VDSO_TB_SEQ_COUNT]
+	cmp	w_tmp, seqcnt
+	b.ne	\fail
 	.endm
 
-	.macro	seqcnt_check, cnt, fail
-	cmp	\cnt, seqcnt
-	b.ne	\fail
+	.macro	syscall_check fail
+	ldr	w_tmp, [vdso_data, #VDSO_USE_SYSCALL]
+	cbnz	w_tmp, \fail
+	.endm
+
+	.macro get_nsec_per_sec res
+	mov	\res, #NSEC_PER_SEC_LO16
+	movk	\res, #NSEC_PER_SEC_HI16, lsl #16
+	.endm
+
+	/*
+	 * Returns the clock delta, in nanoseconds left-shifted by the clock
+	 * shift.
+	 */
+	.macro	get_clock_shifted_nsec res, cycle_last, mult
+	/* Read the virtual counter. */
+	isb
+	mrs	x_tmp, cntvct_el0
+	/* Calculate cycle delta and convert to ns. */
+	sub	\res, x_tmp, \cycle_last
+	/* We can only guarantee 56 bits of precision. */
+	movn	x_tmp, #0xff00, lsl #48
+	and	\res, x_tmp, \res
+	mul	\res, \res, \mult
+	.endm
+
+	/*
+	 * Returns in res_{sec,nsec} the REALTIME timespec, based on the
+	 * "wall time" (xtime) and the clock_mono delta.
+	 */
+	.macro	get_ts_realtime res_sec, res_nsec, \
+			clock_nsec, xtime_sec, xtime_nsec, nsec_to_sec
+	add	\res_nsec, \clock_nsec, \xtime_nsec
+	udiv	x_tmp, \res_nsec, \nsec_to_sec
+	add	\res_sec, \xtime_sec, x_tmp
+	msub	\res_nsec, x_tmp, \nsec_to_sec, \res_nsec
+	.endm
+
+	/*
+	 * Returns in res_{sec,nsec} the timespec based on the clock_raw delta,
+	 * used for CLOCK_MONOTONIC_RAW.
+	 */
+	.macro	get_ts_clock_raw res_sec, res_nsec, clock_nsec, nsec_to_sec
+	udiv	\res_sec, \clock_nsec, \nsec_to_sec
+	msub	\res_nsec, \res_sec, \nsec_to_sec, \clock_nsec
+	.endm
+
+	/* sec and nsec are modified in place. */
+	.macro add_ts sec, nsec, ts_sec, ts_nsec, nsec_to_sec
+	/* Add timespec. */
+	add	\sec, \sec, \ts_sec
+	add	\nsec, \nsec, \ts_nsec
+
+	/* Normalise the new timespec. */
+	cmp	\nsec, \nsec_to_sec
+	b.lt	9999f
+	sub	\nsec, \nsec, \nsec_to_sec
+	add	\sec, \sec, #1
+9999:
+	cmp	\nsec, #0
+	b.ge	9998f
+	add	\nsec, \nsec, \nsec_to_sec
+	sub	\sec, \sec, #1
+9998:
+	.endm
+
+	.macro clock_gettime_return, shift=0
+	.if \shift == 1
+	lsr	x11, x11, x12
+	.endif
+	stp	x10, x11, [x1, #TSPEC_TV_SEC]
+	mov	x0, xzr
+	ret
+	.endm
+
+	.macro jump_slot jumptable, index, label
+	.if (. - \jumptable) != 4 * (\index)
+	.error "Jump slot index mismatch"
+	.endif
+	b	\label
 	.endm
 
 	.text
@@ -51,18 +136,25 @@
 /* int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz); */
 ENTRY(__kernel_gettimeofday)
 	.cfi_startproc
-	mov	x2, x30
-	.cfi_register x30, x2
-
-	/* Acquire the sequence counter and get the timespec. */
 	adr	vdso_data, _vdso_data
-1:	seqcnt_acquire
-	cbnz	use_syscall, 4f
-
 	/* If tv is NULL, skip to the timezone code. */
 	cbz	x0, 2f
-	bl	__do_get_tspec
-	seqcnt_check w9, 1b
+
+	/* Compute the time of day. */
+1:	seqcnt_acquire
+	syscall_check fail=4f
+	ldr	x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
+	/* w11 = cs_mono_mult, w12 = cs_shift */
+	ldp	w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
+	ldp	x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
+	seqcnt_check fail=1b
+
+	get_nsec_per_sec res=x9
+	lsl	x9, x9, x12
+
+	get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
+	get_ts_realtime res_sec=x10, res_nsec=x11, \
+		clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
 
 	/* Convert ns to us. */
 	mov	x13, #1000
@@ -76,95 +168,126 @@
 	stp	w4, w5, [x1, #TZ_MINWEST]
 3:
 	mov	x0, xzr
-	ret	x2
+	ret
 4:
 	/* Syscall fallback. */
 	mov	x8, #__NR_gettimeofday
 	svc	#0
-	ret	x2
+	ret
 	.cfi_endproc
 ENDPROC(__kernel_gettimeofday)
 
+#define JUMPSLOT_MAX CLOCK_MONOTONIC_COARSE
+
 /* int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp); */
 ENTRY(__kernel_clock_gettime)
 	.cfi_startproc
-	cmp	w0, #CLOCK_REALTIME
-	ccmp	w0, #CLOCK_MONOTONIC, #0x4, ne
-	b.ne	2f
-
-	mov	x2, x30
-	.cfi_register x30, x2
-
-	/* Get kernel timespec. */
+	cmp	w0, #JUMPSLOT_MAX
+	b.hi	syscall
 	adr	vdso_data, _vdso_data
-1:	seqcnt_acquire
-	cbnz	use_syscall, 7f
-
-	bl	__do_get_tspec
-	seqcnt_check w9, 1b
-
-	mov	x30, x2
-
-	cmp	w0, #CLOCK_MONOTONIC
-	b.ne	6f
+	adr	x_tmp, jumptable
+	add	x_tmp, x_tmp, w0, uxtw #2
+	br	x_tmp
+
+	ALIGN
+jumptable:
+	jump_slot jumptable, CLOCK_REALTIME, realtime
+	jump_slot jumptable, CLOCK_MONOTONIC, monotonic
+	b	syscall
+	b	syscall
+	jump_slot jumptable, CLOCK_MONOTONIC_RAW, monotonic_raw
+	jump_slot jumptable, CLOCK_REALTIME_COARSE, realtime_coarse
+	jump_slot jumptable, CLOCK_MONOTONIC_COARSE, monotonic_coarse
+
+	.if (. - jumptable) != 4 * (JUMPSLOT_MAX + 1)
+	.error	"Wrong jumptable size"
+	.endif
+
+	ALIGN
+realtime:
+	seqcnt_acquire
+	syscall_check fail=syscall
+	ldr	x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
+	/* w11 = cs_mono_mult, w12 = cs_shift */
+	ldp	w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
+	ldp	x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
+	seqcnt_check fail=realtime
 
-	/* Get wtm timespec. */
-	ldp	x13, x14, [vdso_data, #VDSO_WTM_CLK_SEC]
+	/* All computations are done with left-shifted nsecs. */
+	get_nsec_per_sec res=x9
+	lsl	x9, x9, x12
+
+	get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
+	get_ts_realtime res_sec=x10, res_nsec=x11, \
+		clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
+	clock_gettime_return, shift=1
+
+	ALIGN
+monotonic:
+	seqcnt_acquire
+	syscall_check fail=syscall
+	ldr	x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
+	/* w11 = cs_mono_mult, w12 = cs_shift */
+	ldp	w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
+	ldp	x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
+	ldp	x3, x4, [vdso_data, #VDSO_WTM_CLK_SEC]
+	seqcnt_check fail=monotonic
 
-	/* Check the sequence counter. */
-	seqcnt_read w9
-	seqcnt_check w9, 1b
-	b	4f
-2:
-	cmp	w0, #CLOCK_REALTIME_COARSE
-	ccmp	w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
-	b.ne	8f
+	/* All computations are done with left-shifted nsecs. */
+	lsl	x4, x4, x12
+	get_nsec_per_sec res=x9
+	lsl	x9, x9, x12
+
+	get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
+	get_ts_realtime res_sec=x10, res_nsec=x11, \
+		clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
+
+	add_ts sec=x10, nsec=x11, ts_sec=x3, ts_nsec=x4, nsec_to_sec=x9
+	clock_gettime_return, shift=1
+
+	ALIGN
+monotonic_raw:
+	seqcnt_acquire
+	syscall_check fail=syscall
+	ldr	x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
+	/* w11 = cs_raw_mult, w12 = cs_shift */
+	ldp	w12, w11, [vdso_data, #VDSO_CS_SHIFT]
+	ldp	x13, x14, [vdso_data, #VDSO_RAW_TIME_SEC]
+	seqcnt_check fail=monotonic_raw
 
-	/* xtime_coarse_nsec is already right-shifted */
-	mov	x12, #0
+	/* All computations are done with left-shifted nsecs. */
+	lsl	x14, x14, x12
+	get_nsec_per_sec res=x9
+	lsl	x9, x9, x12
 
-	/* Get coarse timespec. */
-	adr	vdso_data, _vdso_data
-3:	seqcnt_acquire
+	get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
+	get_ts_clock_raw res_sec=x10, res_nsec=x11, \
+		clock_nsec=x15, nsec_to_sec=x9
+
+	add_ts sec=x10, nsec=x11, ts_sec=x13, ts_nsec=x14, nsec_to_sec=x9
+	clock_gettime_return, shift=1
+
+	ALIGN
+realtime_coarse:
+	seqcnt_acquire
 	ldp	x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC]
+	seqcnt_check fail=realtime_coarse
+	clock_gettime_return
 
-	/* Get wtm timespec. */
+	ALIGN
+monotonic_coarse:
+	seqcnt_acquire
+	ldp	x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC]
 	ldp	x13, x14, [vdso_data, #VDSO_WTM_CLK_SEC]
+	seqcnt_check fail=monotonic_coarse
 
-	/* Check the sequence counter. */
-	seqcnt_read w9
-	seqcnt_check w9, 3b
-
-	cmp	w0, #CLOCK_MONOTONIC_COARSE
-	b.ne	6f
-4:
-	/* Add on wtm timespec. */
-	add	x10, x10, x13
-	lsl	x14, x14, x12
-	add	x11, x11, x14
+	/* Computations are done in (non-shifted) nsecs. */
+	get_nsec_per_sec res=x9
+	add_ts sec=x10, nsec=x11, ts_sec=x13, ts_nsec=x14, nsec_to_sec=x9
+	clock_gettime_return
 
-	/* Normalise the new timespec. */
-	mov	x15, #NSEC_PER_SEC_LO16
-	movk	x15, #NSEC_PER_SEC_HI16, lsl #16
-	lsl	x15, x15, x12
-	cmp	x11, x15
-	b.lt	5f
-	sub	x11, x11, x15
-	add	x10, x10, #1
-5:
-	cmp	x11, #0
-	b.ge	6f
-	add	x11, x11, x15
-	sub	x10, x10, #1
-
-6:	/* Store to the user timespec. */
-	lsr	x11, x11, x12
-	stp	x10, x11, [x1, #TSPEC_TV_SEC]
-	mov	x0, xzr
-	ret
-7:
-	mov	x30, x2
-8:	/* Syscall fallback. */
+	ALIGN
+syscall: /* Syscall fallback. */
 	mov	x8, #__NR_clock_gettime
 	svc	#0
 	ret
@@ -176,6 +299,7 @@
 	.cfi_startproc
 	cmp	w0, #CLOCK_REALTIME
 	ccmp	w0, #CLOCK_MONOTONIC, #0x4, ne
+	ccmp	w0, #CLOCK_MONOTONIC_RAW, #0x4, ne
 	b.ne	1f
 
 	ldr	x2, 5f
@@ -186,7 +310,7 @@
 	b.ne	4f
 	ldr	x2, 6f
 2:
-	cbz	w1, 3f
+	cbz	x1, 3f
 	stp	xzr, x2, [x1]
 
 3:	/* res == NULL. */
@@ -203,46 +327,3 @@
 	.quad	CLOCK_COARSE_RES
 	.cfi_endproc
 ENDPROC(__kernel_clock_getres)
-
-/*
- * Read the current time from the architected counter.
- * Expects vdso_data to be initialised.
- * Clobbers the temporary registers (x9 - x15).
- * Returns:
- *  - w9		= vDSO sequence counter
- *  - (x10, x11)	= (ts->tv_sec, shifted ts->tv_nsec)
- *  - w12		= cs_shift
- */
-ENTRY(__do_get_tspec)
-	.cfi_startproc
-
-	/* Read from the vDSO data page. */
-	ldr	x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
-	ldp	x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
-	ldp	w11, w12, [vdso_data, #VDSO_CS_MULT]
-	seqcnt_read w9
-
-	/* Read the virtual counter. */
-	isb
-	mrs	x15, cntvct_el0
-
-	/* Calculate cycle delta and convert to ns. */
-	sub	x10, x15, x10
-	/* We can only guarantee 56 bits of precision. */
-	movn	x15, #0xff00, lsl #48
-	and	x10, x15, x10
-	mul	x10, x10, x11
-
-	/* Use the kernel time to calculate the new timespec. */
-	mov	x11, #NSEC_PER_SEC_LO16
-	movk	x11, #NSEC_PER_SEC_HI16, lsl #16
-	lsl	x11, x11, x12
-	add	x15, x10, x14
-	udiv	x14, x15, x11
-	add	x10, x13, x14
-	mul	x13, x14, x11
-	sub	x11, x15, x13
-
-	ret
-	.cfi_endproc
-ENDPROC(__do_get_tspec)
diff -ruw linux-4.4.115/arch/arm64/kernel/vdso/Makefile linux-4.4.115-fbx/arch/arm64/kernel/vdso/Makefile
--- linux-4.4.115/arch/arm64/kernel/vdso/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/vdso/Makefile	2019-01-22 16:16:21.555228766 +0100
@@ -23,7 +23,7 @@
 ccflags-y += -Wl,-shared
 
 obj-y += vdso.o
-extra-y += vdso.lds vdso-offsets.h
+extra-y += vdso.lds
 CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
 
 # Force dependency (incbin is bad)
@@ -42,11 +42,10 @@
 gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
 quiet_cmd_vdsosym = VDSOSYM $@
 define cmd_vdsosym
-	$(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ && \
-	cp $@ include/generated/
+	$(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
 endef
 
-$(obj)/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
+include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
 	$(call if_changed,vdsosym)
 
 # Assembly rules for the .S files
diff -ruw linux-4.4.115/arch/arm64/kernel/vdso/vdso.S linux-4.4.115-fbx/arch/arm64/kernel/vdso/vdso.S
--- linux-4.4.115/arch/arm64/kernel/vdso/vdso.S	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/vdso/vdso.S	2019-01-22 16:16:21.555228766 +0100
@@ -21,9 +21,8 @@
 #include <linux/const.h>
 #include <asm/page.h>
 
-	__PAGE_ALIGNED_DATA
-
 	.globl vdso_start, vdso_end
+	.section .rodata
 	.balign PAGE_SIZE
 vdso_start:
 	.incbin "arch/arm64/kernel/vdso/vdso.so"
diff -ruw linux-4.4.115/arch/arm64/kernel/vdso.c linux-4.4.115-fbx/arch/arm64/kernel/vdso.c
--- linux-4.4.115/arch/arm64/kernel/vdso.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/vdso.c	2019-10-29 09:26:23.009196974 +0100
@@ -55,7 +55,7 @@
  */
 static struct page *vectors_page[1];
 
-static int alloc_vectors_page(void)
+static int __init alloc_vectors_page(void)
 {
 	extern char __kuser_helper_start[], __kuser_helper_end[];
 	extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
@@ -88,7 +88,7 @@
 {
 	struct mm_struct *mm = current->mm;
 	unsigned long addr = AARCH32_VECTORS_BASE;
-	static struct vm_special_mapping spec = {
+	static const struct vm_special_mapping spec = {
 		.name	= "[vectors]",
 		.pages	= vectors_page,
 
@@ -114,6 +114,7 @@
 static int __init vdso_init(void)
 {
 	int i;
+	unsigned long pfn;
 
 	if (memcmp(&vdso_start, "\177ELF", 4)) {
 		pr_err("vDSO is not a valid ELF object!\n");
@@ -131,11 +132,14 @@
 		return -ENOMEM;
 
 	/* Grab the vDSO data page. */
-	vdso_pagelist[0] = virt_to_page(vdso_data);
+	vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
+
 
 	/* Grab the vDSO code pages. */
+	pfn = sym_to_pfn(&vdso_start);
+
 	for (i = 0; i < vdso_pages; i++)
-		vdso_pagelist[i + 1] = virt_to_page(&vdso_start + i * PAGE_SIZE);
+		vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
 
 	/* Populate the special mapping structures */
 	vdso_spec[0] = (struct vm_special_mapping) {
@@ -212,10 +216,16 @@
 	vdso_data->wtm_clock_nsec		= tk->wall_to_monotonic.tv_nsec;
 
 	if (!use_syscall) {
+		/* tkr_mono.cycle_last == tkr_raw.cycle_last */
 		vdso_data->cs_cycle_last	= tk->tkr_mono.cycle_last;
+		vdso_data->raw_time_sec         = tk->raw_sec;
+		vdso_data->raw_time_nsec        = tk->tkr_raw.xtime_nsec;
 		vdso_data->xtime_clock_sec	= tk->xtime_sec;
 		vdso_data->xtime_clock_nsec	= tk->tkr_mono.xtime_nsec;
-		vdso_data->cs_mult		= tk->tkr_mono.mult;
+		/* tkr_raw.xtime_nsec == 0 */
+		vdso_data->cs_mono_mult		= tk->tkr_mono.mult;
+		vdso_data->cs_raw_mult		= tk->tkr_raw.mult;
+		/* tkr_mono.shift == tkr_raw.shift */
 		vdso_data->cs_shift		= tk->tkr_mono.shift;
 	}
 
diff -ruw linux-4.4.115/arch/arm64/kernel/vmlinux.lds.S linux-4.4.115-fbx/arch/arm64/kernel/vmlinux.lds.S
--- linux-4.4.115/arch/arm64/kernel/vmlinux.lds.S	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/vmlinux.lds.S	2019-10-29 09:26:23.009196974 +0100
@@ -11,6 +11,7 @@
 #include <asm/memory.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
+#include <asm/cache.h>
 
 #include "image.h"
 
@@ -46,6 +47,27 @@
 	*(.idmap.text)					\
 	VMLINUX_SYMBOL(__idmap_text_end) = .;
 
+#ifdef CONFIG_HIBERNATION
+#define HIBERNATE_TEXT					\
+	. = ALIGN(SZ_4K);				\
+	VMLINUX_SYMBOL(__hibernate_exit_text_start) = .;\
+	*(.hibernate_exit.text)				\
+	VMLINUX_SYMBOL(__hibernate_exit_text_end) = .;
+#else
+#define HIBERNATE_TEXT
+#endif
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+#define TRAMP_TEXT					\
+	. = ALIGN(PAGE_SIZE);				\
+	VMLINUX_SYMBOL(__entry_tramp_text_start) = .;	\
+	*(.entry.tramp.text)				\
+	. = ALIGN(PAGE_SIZE);				\
+	VMLINUX_SYMBOL(__entry_tramp_text_end) = .;
+#else
+#define TRAMP_TEXT
+#endif
+
 /*
  * The size of the PE/COFF section that covers the kernel image, which
  * runs from stext to _edata, must be a round multiple of the PE/COFF
@@ -63,14 +85,19 @@
 #endif
 
 #if defined(CONFIG_DEBUG_ALIGN_RODATA)
-#define ALIGN_DEBUG_RO			. = ALIGN(1<<SECTION_SHIFT);
-#define ALIGN_DEBUG_RO_MIN(min)		ALIGN_DEBUG_RO
-#elif defined(CONFIG_DEBUG_RODATA)
-#define ALIGN_DEBUG_RO			. = ALIGN(1<<PAGE_SHIFT);
-#define ALIGN_DEBUG_RO_MIN(min)		ALIGN_DEBUG_RO
+/*
+ *  4 KB granule:   1 level 2 entry
+ * 16 KB granule: 128 level 3 entries, with contiguous bit
+ * 64 KB granule:  32 level 3 entries, with contiguous bit
+ */
+#define SEGMENT_ALIGN			SZ_2M
 #else
-#define ALIGN_DEBUG_RO
-#define ALIGN_DEBUG_RO_MIN(min)		. = ALIGN(min);
+/*
+ *  4 KB granule:  16 level 3 entries, with contiguous bit
+ * 16 KB granule:   4 level 3 entries, without contiguous bit
+ * 64 KB granule:   1 level 3 entry
+ */
+#define SEGMENT_ALIGN			SZ_64K
 #endif
 
 SECTIONS
@@ -87,40 +114,46 @@
 		EXIT_CALL
 		*(.discard)
 		*(.discard.*)
+		*(.interp .dynamic)
+		*(.dynsym .dynstr .hash)
 	}
 
-	. = PAGE_OFFSET + TEXT_OFFSET;
+	. = KIMAGE_VADDR + TEXT_OFFSET;
 
 	.head.text : {
 		_text = .;
 		HEAD_TEXT
 	}
-	ALIGN_DEBUG_RO
 	.text : {			/* Real text segment		*/
 		_stext = .;		/* Text and read-only data	*/
 			__exception_text_start = .;
 			*(.exception.text)
 			__exception_text_end = .;
 			IRQENTRY_TEXT
+			ENTRY_TEXT
+			SOFTIRQENTRY_TEXT
 			TEXT_TEXT
 			SCHED_TEXT
 			LOCK_TEXT
+			KPROBES_TEXT
 			HYPERVISOR_TEXT
 			IDMAP_TEXT
+			HIBERNATE_TEXT
+			TRAMP_TEXT
 			*(.fixup)
 			*(.gnu.warning)
 		. = ALIGN(16);
 		*(.got)			/* Global offset table		*/
 	}
 
-	ALIGN_DEBUG_RO
-	RO_DATA(PAGE_SIZE)
-	EXCEPTION_TABLE(8)
+	. = ALIGN(SEGMENT_ALIGN);
+	_etext = .;			/* End of text section */
+
+	RO_DATA(PAGE_SIZE)		/* everything from this point to     */
+	EXCEPTION_TABLE(8)		/* __init_begin will be marked RO NX */
 	NOTES
-	ALIGN_DEBUG_RO
-	_etext = .;			/* End of text and rodata section */
 
-	ALIGN_DEBUG_RO_MIN(PAGE_SIZE)
+	. = ALIGN(SEGMENT_ALIGN);
 	__init_begin = .;
 
 	INIT_TEXT_SECTION(8)
@@ -128,7 +161,6 @@
 		ARM_EXIT_KEEP(EXIT_TEXT)
 	}
 
-	ALIGN_DEBUG_RO_MIN(16)
 	.init.data : {
 		INIT_DATA
 		INIT_SETUP(16)
@@ -143,9 +175,6 @@
 
 	PERCPU_SECTION(L1_CACHE_BYTES)
 
-	. = ALIGN(PAGE_SIZE);
-	__init_end = .;
-
 	. = ALIGN(4);
 	.altinstructions : {
 		__alt_instructions = .;
@@ -155,8 +184,16 @@
 	.altinstr_replacement : {
 		*(.altinstr_replacement)
 	}
+	.rela : ALIGN(8) {
+		*(.rela .rela*)
+	}
+
+	__rela_offset	= ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR);
+	__rela_size	= SIZEOF(.rela);
+
+	. = ALIGN(SEGMENT_ALIGN);
+	__init_end = .;
 
-	. = ALIGN(PAGE_SIZE);
 	_data = .;
 	_sdata = .;
 	RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
@@ -171,6 +208,16 @@
 	swapper_pg_dir = .;
 	. += SWAPPER_DIR_SIZE;
 
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+	reserved_ttbr0 = .;
+	. += RESERVED_TTBR0_SIZE;
+#endif
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+	tramp_pg_dir = .;
+	. += PAGE_SIZE;
+#endif
+
 	_end = .;
 
 	STABS_DEBUG
@@ -186,8 +233,16 @@
 	"HYP init code too big or misaligned")
 ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
 	"ID map text too big or misaligned")
+#ifdef CONFIG_HIBERNATION
+ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
+	<= SZ_4K, "Hibernate exit text too big or misaligned")
+#endif
 
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
+	"Entry trampoline text too big")
+#endif
 /*
  * If padding is applied before .head.text, virt<->phys conversions will fail.
  */
-ASSERT(_text == (PAGE_OFFSET + TEXT_OFFSET), "HEAD is misaligned")
+ASSERT(_text == (KIMAGE_VADDR + TEXT_OFFSET), "HEAD is misaligned")
diff -ruw linux-4.4.115/arch/arm64/kvm/Makefile linux-4.4.115-fbx/arch/arm64/kvm/Makefile
--- linux-4.4.115/arch/arm64/kvm/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kvm/Makefile	2019-01-22 16:16:21.555228766 +0100
@@ -10,6 +10,7 @@
 ARM=../../../arch/arm/kvm
 
 obj-$(CONFIG_KVM_ARM_HOST) += kvm.o
+obj-$(CONFIG_KVM_ARM_HOST) += hyp/
 
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o
@@ -22,8 +23,6 @@
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2-emul.o
-kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v2-switch.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o
-kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v3-switch.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o
diff -ruw linux-4.4.115/arch/arm64/lib/clear_user.S linux-4.4.115-fbx/arch/arm64/lib/clear_user.S
--- linux-4.4.115/arch/arm64/lib/clear_user.S	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/lib/clear_user.S	2019-01-22 16:16:21.559228802 +0100
@@ -17,10 +17,7 @@
  */
 #include <linux/linkage.h>
 
-#include <asm/alternative.h>
-#include <asm/assembler.h>
-#include <asm/cpufeature.h>
-#include <asm/sysreg.h>
+#include <asm/uaccess.h>
 
 	.text
 
@@ -33,29 +30,27 @@
  * Alignment fixed up by hardware.
  */
 ENTRY(__clear_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
-	    CONFIG_ARM64_PAN)
+	uaccess_enable_not_uao x2, x3, x4
 	mov	x2, x1			// save the size for fixup return
 	subs	x1, x1, #8
 	b.mi	2f
 1:
-USER(9f, str	xzr, [x0], #8	)
+uao_user_alternative 9f, str, sttr, xzr, x0, 8
 	subs	x1, x1, #8
 	b.pl	1b
 2:	adds	x1, x1, #4
 	b.mi	3f
-USER(9f, str	wzr, [x0], #4	)
+uao_user_alternative 9f, str, sttr, wzr, x0, 4
 	sub	x1, x1, #4
 3:	adds	x1, x1, #2
 	b.mi	4f
-USER(9f, strh	wzr, [x0], #2	)
+uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
 	sub	x1, x1, #2
 4:	adds	x1, x1, #1
 	b.mi	5f
-USER(9f, strb	wzr, [x0]	)
+uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
 5:	mov	x0, #0
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
-	    CONFIG_ARM64_PAN)
+	uaccess_disable_not_uao x2, x3
 	ret
 ENDPROC(__clear_user)
 
diff -ruw linux-4.4.115/arch/arm64/lib/copy_from_user.S linux-4.4.115-fbx/arch/arm64/lib/copy_from_user.S
--- linux-4.4.115/arch/arm64/lib/copy_from_user.S	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/lib/copy_from_user.S	2019-01-22 16:16:21.559228802 +0100
@@ -16,11 +16,8 @@
 
 #include <linux/linkage.h>
 
-#include <asm/alternative.h>
-#include <asm/assembler.h>
 #include <asm/cache.h>
-#include <asm/cpufeature.h>
-#include <asm/sysreg.h>
+#include <asm/uaccess.h>
 
 /*
  * Copy from user space to a kernel buffer (alignment handled by the hardware)
@@ -34,7 +31,7 @@
  */
 
 	.macro ldrb1 ptr, regB, val
-	USER(9998f, ldrb  \ptr, [\regB], \val)
+	uao_user_alternative 9998f, ldrb, ldtrb, \ptr, \regB, \val
 	.endm
 
 	.macro strb1 ptr, regB, val
@@ -42,7 +39,7 @@
 	.endm
 
 	.macro ldrh1 ptr, regB, val
-	USER(9998f, ldrh  \ptr, [\regB], \val)
+	uao_user_alternative 9998f, ldrh, ldtrh, \ptr, \regB, \val
 	.endm
 
 	.macro strh1 ptr, regB, val
@@ -50,7 +47,7 @@
 	.endm
 
 	.macro ldr1 ptr, regB, val
-	USER(9998f, ldr \ptr, [\regB], \val)
+	uao_user_alternative 9998f, ldr, ldtr, \ptr, \regB, \val
 	.endm
 
 	.macro str1 ptr, regB, val
@@ -58,7 +55,7 @@
 	.endm
 
 	.macro ldp1 ptr, regB, regC, val
-	USER(9998f, ldp \ptr, \regB, [\regC], \val)
+	uao_ldp 9998f, \ptr, \regB, \regC, \val
 	.endm
 
 	.macro stp1 ptr, regB, regC, val
@@ -66,16 +63,14 @@
 	.endm
 
 end	.req	x5
-ENTRY(__copy_from_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
-	    CONFIG_ARM64_PAN)
+ENTRY(__arch_copy_from_user)
+	uaccess_enable_not_uao x3, x4, x5
 	add	end, x0, x2
 #include "copy_template.S"
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
-	    CONFIG_ARM64_PAN)
+	uaccess_disable_not_uao x3, x4
 	mov	x0, #0				// Nothing to copy
 	ret
-ENDPROC(__copy_from_user)
+ENDPROC(__arch_copy_from_user)
 
 	.section .fixup,"ax"
 	.align	2
diff -ruw linux-4.4.115/arch/arm64/lib/copy_in_user.S linux-4.4.115-fbx/arch/arm64/lib/copy_in_user.S
--- linux-4.4.115/arch/arm64/lib/copy_in_user.S	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/lib/copy_in_user.S	2019-01-22 16:16:21.559228802 +0100
@@ -18,11 +18,8 @@
 
 #include <linux/linkage.h>
 
-#include <asm/alternative.h>
-#include <asm/assembler.h>
 #include <asm/cache.h>
-#include <asm/cpufeature.h>
-#include <asm/sysreg.h>
+#include <asm/uaccess.h>
 
 /*
  * Copy from user space to user space (alignment handled by the hardware)
@@ -35,45 +32,43 @@
  *	x0 - bytes not copied
  */
 	.macro ldrb1 ptr, regB, val
-	USER(9998f, ldrb  \ptr, [\regB], \val)
+	uao_user_alternative 9998f, ldrb, ldtrb, \ptr, \regB, \val
 	.endm
 
 	.macro strb1 ptr, regB, val
-	USER(9998f, strb \ptr, [\regB], \val)
+	uao_user_alternative 9998f, strb, sttrb, \ptr, \regB, \val
 	.endm
 
 	.macro ldrh1 ptr, regB, val
-	USER(9998f, ldrh  \ptr, [\regB], \val)
+	uao_user_alternative 9998f, ldrh, ldtrh, \ptr, \regB, \val
 	.endm
 
 	.macro strh1 ptr, regB, val
-	USER(9998f, strh \ptr, [\regB], \val)
+	uao_user_alternative 9998f, strh, sttrh, \ptr, \regB, \val
 	.endm
 
 	.macro ldr1 ptr, regB, val
-	USER(9998f, ldr \ptr, [\regB], \val)
+	uao_user_alternative 9998f, ldr, ldtr, \ptr, \regB, \val
 	.endm
 
 	.macro str1 ptr, regB, val
-	USER(9998f, str \ptr, [\regB], \val)
+	uao_user_alternative 9998f, str, sttr, \ptr, \regB, \val
 	.endm
 
 	.macro ldp1 ptr, regB, regC, val
-	USER(9998f, ldp \ptr, \regB, [\regC], \val)
+	uao_ldp 9998f, \ptr, \regB, \regC, \val
 	.endm
 
 	.macro stp1 ptr, regB, regC, val
-	USER(9998f, stp \ptr, \regB, [\regC], \val)
+	uao_stp 9998f, \ptr, \regB, \regC, \val
 	.endm
 
 end	.req	x5
 ENTRY(__copy_in_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
-	    CONFIG_ARM64_PAN)
+	uaccess_enable_not_uao x3, x4, x5
 	add	end, x0, x2
 #include "copy_template.S"
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
-	    CONFIG_ARM64_PAN)
+	uaccess_disable_not_uao x3, x4
 	mov	x0, #0
 	ret
 ENDPROC(__copy_in_user)
diff -ruw linux-4.4.115/arch/arm64/lib/copy_page.S linux-4.4.115-fbx/arch/arm64/lib/copy_page.S
--- linux-4.4.115/arch/arm64/lib/copy_page.S	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/lib/copy_page.S	2019-01-22 16:16:21.559228802 +0100
@@ -18,6 +18,8 @@
 #include <linux/const.h>
 #include <asm/assembler.h>
 #include <asm/page.h>
+#include <asm/cpufeature.h>
+#include <asm/alternative.h>
 
 /*
  * Copy a page from src to dest (both are page aligned)
@@ -27,20 +29,65 @@
  *	x1 - src
  */
 ENTRY(copy_page)
-	/* Assume cache line size is 64 bytes. */
-	prfm	pldl1strm, [x1, #64]
-1:	ldp	x2, x3, [x1]
+alternative_if_not ARM64_HAS_NO_HW_PREFETCH
+	nop
+	nop
+alternative_else
+	# Prefetch two cache lines ahead.
+	prfm    pldl1strm, [x1, #128]
+	prfm    pldl1strm, [x1, #256]
+alternative_endif
+
+	ldp	x2, x3, [x1]
 	ldp	x4, x5, [x1, #16]
 	ldp	x6, x7, [x1, #32]
 	ldp	x8, x9, [x1, #48]
-	add	x1, x1, #64
-	prfm	pldl1strm, [x1, #64]
+	ldp	x10, x11, [x1, #64]
+	ldp	x12, x13, [x1, #80]
+	ldp	x14, x15, [x1, #96]
+	ldp	x16, x17, [x1, #112]
+
+	mov	x18, #(PAGE_SIZE - 128)
+	add	x1, x1, #128
+1:
+	subs	x18, x18, #128
+
+alternative_if_not ARM64_HAS_NO_HW_PREFETCH
+	nop
+alternative_else
+	prfm    pldl1strm, [x1, #384]
+alternative_endif
+
 	stnp	x2, x3, [x0]
+	ldp	x2, x3, [x1]
 	stnp	x4, x5, [x0, #16]
+	ldp	x4, x5, [x1, #16]
 	stnp	x6, x7, [x0, #32]
+	ldp	x6, x7, [x1, #32]
 	stnp	x8, x9, [x0, #48]
-	add	x0, x0, #64
-	tst	x1, #(PAGE_SIZE - 1)
-	b.ne	1b
+	ldp	x8, x9, [x1, #48]
+	stnp	x10, x11, [x0, #64]
+	ldp	x10, x11, [x1, #64]
+	stnp	x12, x13, [x0, #80]
+	ldp	x12, x13, [x1, #80]
+	stnp	x14, x15, [x0, #96]
+	ldp	x14, x15, [x1, #96]
+	stnp	x16, x17, [x0, #112]
+	ldp	x16, x17, [x1, #112]
+
+	add	x0, x0, #128
+	add	x1, x1, #128
+
+	b.gt	1b
+
+	stnp	x2, x3, [x0]
+	stnp	x4, x5, [x0, #16]
+	stnp	x6, x7, [x0, #32]
+	stnp	x8, x9, [x0, #48]
+	stnp	x10, x11, [x0, #64]
+	stnp	x12, x13, [x0, #80]
+	stnp	x14, x15, [x0, #96]
+	stnp	x16, x17, [x0, #112]
+
 	ret
 ENDPROC(copy_page)
diff -ruw linux-4.4.115/arch/arm64/lib/copy_to_user.S linux-4.4.115-fbx/arch/arm64/lib/copy_to_user.S
--- linux-4.4.115/arch/arm64/lib/copy_to_user.S	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/lib/copy_to_user.S	2019-01-22 16:16:21.559228802 +0100
@@ -16,11 +16,8 @@
 
 #include <linux/linkage.h>
 
-#include <asm/alternative.h>
-#include <asm/assembler.h>
 #include <asm/cache.h>
-#include <asm/cpufeature.h>
-#include <asm/sysreg.h>
+#include <asm/uaccess.h>
 
 /*
  * Copy to user space from a kernel buffer (alignment handled by the hardware)
@@ -37,7 +34,7 @@
 	.endm
 
 	.macro strb1 ptr, regB, val
-	USER(9998f, strb \ptr, [\regB], \val)
+	uao_user_alternative 9998f, strb, sttrb, \ptr, \regB, \val
 	.endm
 
 	.macro ldrh1 ptr, regB, val
@@ -45,7 +42,7 @@
 	.endm
 
 	.macro strh1 ptr, regB, val
-	USER(9998f, strh \ptr, [\regB], \val)
+	uao_user_alternative 9998f, strh, sttrh, \ptr, \regB, \val
 	.endm
 
 	.macro ldr1 ptr, regB, val
@@ -53,7 +50,7 @@
 	.endm
 
 	.macro str1 ptr, regB, val
-	USER(9998f, str \ptr, [\regB], \val)
+	uao_user_alternative 9998f, str, sttr, \ptr, \regB, \val
 	.endm
 
 	.macro ldp1 ptr, regB, regC, val
@@ -61,20 +58,18 @@
 	.endm
 
 	.macro stp1 ptr, regB, regC, val
-	USER(9998f, stp \ptr, \regB, [\regC], \val)
+	uao_stp 9998f, \ptr, \regB, \regC, \val
 	.endm
 
 end	.req	x5
-ENTRY(__copy_to_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
-	    CONFIG_ARM64_PAN)
+ENTRY(__arch_copy_to_user)
+	uaccess_enable_not_uao x3, x4, x5
 	add	end, x0, x2
 #include "copy_template.S"
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
-	    CONFIG_ARM64_PAN)
+	uaccess_disable_not_uao x3, x4
 	mov	x0, #0
 	ret
-ENDPROC(__copy_to_user)
+ENDPROC(__arch_copy_to_user)
 
 	.section .fixup,"ax"
 	.align	2
diff -ruw linux-4.4.115/arch/arm64/lib/Makefile linux-4.4.115-fbx/arch/arm64/lib/Makefile
--- linux-4.4.115/arch/arm64/lib/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/lib/Makefile	2019-01-22 16:16:21.559228802 +0100
@@ -4,15 +4,16 @@
 		   memcmp.o strcmp.o strncmp.o strlen.o strnlen.o	\
 		   strchr.o strrchr.o
 
-# Tell the compiler to treat all general purpose registers as
-# callee-saved, which allows for efficient runtime patching of the bl
-# instruction in the caller with an atomic instruction when supported by
-# the CPU. Result and argument registers are handled correctly, based on
-# the function prototype.
+# Tell the compiler to treat all general purpose registers (with the
+# exception of the IP registers, which are already handled by the caller
+# in case of a PLT) as callee-saved, which allows for efficient runtime
+# patching of the bl instruction in the caller with an atomic instruction
+# when supported by the CPU. Result and argument registers are handled
+# correctly, based on the function prototype.
 lib-$(CONFIG_ARM64_LSE_ATOMICS) += atomic_ll_sc.o
 CFLAGS_atomic_ll_sc.o	:= -fcall-used-x0 -ffixed-x1 -ffixed-x2		\
 		   -ffixed-x3 -ffixed-x4 -ffixed-x5 -ffixed-x6		\
 		   -ffixed-x7 -fcall-saved-x8 -fcall-saved-x9		\
 		   -fcall-saved-x10 -fcall-saved-x11 -fcall-saved-x12	\
 		   -fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15	\
-		   -fcall-saved-x16 -fcall-saved-x17 -fcall-saved-x18
+		   -fcall-saved-x18
diff -ruw linux-4.4.115/arch/arm64/Makefile linux-4.4.115-fbx/arch/arm64/Makefile
--- linux-4.4.115/arch/arm64/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/Makefile	2019-10-29 09:26:22.989196778 +0100
@@ -15,6 +15,10 @@
 OBJCOPYFLAGS	:=-O binary -R .note -R .note.gnu.build-id -R .comment -S
 GZFLAGS		:=-9
 
+ifneq ($(CONFIG_RELOCATABLE),)
+LDFLAGS_vmlinux		+= -pie -Bsymbolic
+endif
+
 KBUILD_DEFCONFIG := defconfig
 
 # Check for binutils support for specific extensions
@@ -26,8 +30,17 @@
   endif
 endif
 
-KBUILD_CFLAGS	+= -mgeneral-regs-only $(lseinstr)
+ifeq ($(cc-name),clang)
+# This is a workaround for https://bugs.llvm.org/show_bug.cgi?id=30792.
+# TODO: revert when this is fixed in LLVM.
+KBUILD_CFLAGS	+= -mno-implicit-float
+else
+KBUILD_CFLAGS	+= -mgeneral-regs-only
+endif
+KBUILD_CFLAGS	+= $(lseinstr)
+KBUILD_CFLAGS	+= -fno-pic
 KBUILD_CFLAGS	+= $(call cc-option, -mpc-relative-literal-loads)
+KBUILD_CFLAGS	+= -fno-asynchronous-unwind-tables
 KBUILD_AFLAGS	+= $(lseinstr)
 
 ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
@@ -42,10 +55,14 @@
 
 CHECKFLAGS	+= -D__aarch64__
 
-ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
+ifeq ($(CONFIG_ARM64_MODULE_CMODEL_LARGE), y)
 KBUILD_CFLAGS_MODULE	+= -mcmodel=large
 endif
 
+ifeq ($(CONFIG_ARM64_MODULE_PLTS),y)
+KBUILD_LDFLAGS_MODULE	+= -T $(srctree)/arch/arm64/kernel/module.lds
+endif
+
 # Default value
 head-y		:= arch/arm64/kernel/head.o
 
@@ -56,6 +73,10 @@
 TEXT_OFFSET := 0x00080000
 endif
 
+ifeq ($(cc-name),clang)
+KBUILD_CFLAGS += $(call cc-disable-warning, asm-operand-widths)
+endif
+
 # KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - 3)) - (1 << 61)
 # in 32-bit arithmetic
 KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \
@@ -74,9 +95,18 @@
 core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
 
 # Default target when executing plain make
+ifeq ($(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE),y)
+KBUILD_IMAGE	:= $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_KERNEL_IMAGE_NAME))
+else
 KBUILD_IMAGE	:= Image.gz
+endif
+
 KBUILD_DTBS	:= dtbs
 
+ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y)
+export DTC_FLAGS := -@
+endif
+
 all:	$(KBUILD_IMAGE) $(KBUILD_DTBS)
 
 boot := arch/arm64/boot
@@ -101,6 +131,9 @@
 dtbs_install:
 	$(Q)$(MAKE) $(dtbinst)=$(boot)/dts
 
+Image-dtb Image.gz-dtb: vmlinux scripts dtbs
+	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
 PHONY += vdso_install
 vdso_install:
 	$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
@@ -110,6 +143,16 @@
 	$(Q)$(MAKE) $(clean)=$(boot)
 	$(Q)$(MAKE) $(clean)=$(boot)/dts
 
+# We need to generate vdso-offsets.h before compiling certain files in kernel/.
+# In order to do that, we should use the archprepare target, but we can't since
+# asm-offsets.h is included in some files used to generate vdso-offsets.h, and
+# asm-offsets.h is built in prepare0, for which archprepare is a dependency.
+# Therefore we need to generate the header after prepare0 has been made, hence
+# this hack.
+prepare: vdso_prepare
+vdso_prepare: prepare0
+	$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso include/generated/vdso-offsets.h
+
 define archhelp
   echo  '* Image.gz      - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
   echo  '  Image         - Uncompressed kernel image (arch/$(ARCH)/boot/Image)'
diff -ruw linux-4.4.115/arch/arm64/mm/cache.S linux-4.4.115-fbx/arch/arm64/mm/cache.S
--- linux-4.4.115/arch/arm64/mm/cache.S	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/mm/cache.S	2019-01-22 16:16:21.559228802 +0100
@@ -23,8 +23,80 @@
 #include <asm/assembler.h>
 #include <asm/cpufeature.h>
 #include <asm/alternative.h>
+#include <asm/uaccess.h>
 
-#include "proc-macros.S"
+/*
+ *	__flush_dcache_all()
+ *
+ *	Flush the whole D-cache.
+ *
+ *	Corrupted registers: x0-x7, x9-x11
+ */
+__flush_dcache_all:
+	dmb	sy				// ensure ordering with previous memory accesses
+	mrs	x0, clidr_el1			// read clidr
+	and	x3, x0, #0x7000000		// extract loc from clidr
+	lsr	x3, x3, #23			// left align loc bit field
+	cbz	x3, finished			// if loc is 0, then no need to clean
+	mov	x10, #0				// start clean at cache level 0
+loop1:
+	add	x2, x10, x10, lsr #1		// work out 3x current cache level
+	lsr	x1, x0, x2			// extract cache type bits from clidr
+	and	x1, x1, #7			// mask of the bits for current cache only
+	cmp	x1, #2				// see what cache we have at this level
+	b.lt	skip				// skip if no cache, or just i-cache
+	save_and_disable_irqs x9		// make CSSELR and CCSIDR access atomic
+	msr	csselr_el1, x10			// select current cache level in csselr
+	isb					// isb to sych the new cssr&csidr
+	mrs	x1, ccsidr_el1			// read the new ccsidr
+	restore_irqs x9
+	and	x2, x1, #7			// extract the length of the cache lines
+	add	x2, x2, #4			// add 4 (line length offset)
+	mov	x4, #0x3ff
+	and	x4, x4, x1, lsr #3		// find maximum number on the way size
+	clz	w5, w4				// find bit position of way size increment
+	mov	x7, #0x7fff
+	and	x7, x7, x1, lsr #13		// extract max number of the index size
+loop2:
+	mov	x9, x4				// create working copy of max way size
+loop3:
+	lsl	x6, x9, x5
+	orr	x11, x10, x6			// factor way and cache number into x11
+	lsl	x6, x7, x2
+	orr	x11, x11, x6			// factor index number into x11
+	dc	cisw, x11			// clean & invalidate by set/way
+	subs	x9, x9, #1			// decrement the way
+	b.ge	loop3
+	subs	x7, x7, #1			// decrement the index
+	b.ge	loop2
+skip:
+	add	x10, x10, #2			// increment cache number
+	cmp	x3, x10
+	b.gt	loop1
+finished:
+	mov	x10, #0				// swith back to cache level 0
+	msr	csselr_el1, x10			// select current cache level in csselr
+	dsb	sy
+	isb
+	ret
+ENDPROC(__flush_dcache_all)
+
+/*
+ *	flush_cache_all()
+ *
+ *	Flush the entire cache system.  The data cache flush is now achieved
+ *	using atomic clean / invalidates working outwards from L1 cache. This
+ *	is done using Set/Way based cache maintainance instructions.  The
+ *	instruction cache can still be invalidated back to the point of
+ *	unification in a single instruction.
+ */
+ENTRY(flush_cache_all)
+	mov	x12, lr
+	bl	__flush_dcache_all
+	mov	x0, #0
+	ic	ialluis				// I+BTB cache invalidate
+	ret	x12
+ENDPROC(flush_cache_all)
 
 /*
  *	flush_icache_range(start,end)
@@ -50,11 +122,12 @@
  *	- end     - virtual end address of region
  */
 ENTRY(__flush_cache_user_range)
+	uaccess_ttbr0_enable x2, x3, x4
 	dcache_line_size x2, x3
 	sub	x3, x2, #1
 	bic	x4, x0, x3
 1:
-USER(9f, dc	cvau, x4	)		// clean D line to PoU
+user_alt 9f, "dc cvau, x4",  "dc civac, x4",  ARM64_WORKAROUND_CLEAN_CACHE
 	add	x4, x4, x2
 	cmp	x4, x1
 	b.lo	1b
@@ -71,36 +144,44 @@
 	dsb	ish
 	isb
 	mov	x0, #0
+1:
+	uaccess_ttbr0_disable x1, x2
 	ret
 9:
 	mov	x0, #-EFAULT
-	ret
+	b	1b
 ENDPROC(flush_icache_range)
 ENDPROC(__flush_cache_user_range)
 
 /*
  *	__flush_dcache_area(kaddr, size)
  *
- *	Ensure that the data held in the page kaddr is written back to the
- *	page in question.
+ *	Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ *	are cleaned and invalidated to the PoC.
  *
  *	- kaddr   - kernel address
  *	- size    - size in question
  */
 ENTRY(__flush_dcache_area)
-	dcache_line_size x2, x3
-	add	x1, x0, x1
-	sub	x3, x2, #1
-	bic	x0, x0, x3
-1:	dc	civac, x0			// clean & invalidate D line / unified line
-	add	x0, x0, x2
-	cmp	x0, x1
-	b.lo	1b
-	dsb	sy
+	dcache_by_line_op civac, sy, x0, x1, x2, x3
 	ret
 ENDPIPROC(__flush_dcache_area)
 
 /*
+ *	__clean_dcache_area_pou(kaddr, size)
+ *
+ * 	Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * 	are cleaned to the PoU.
+ *
+ *	- kaddr   - kernel address
+ *	- size    - size in question
+ */
+ENTRY(__clean_dcache_area_pou)
+	dcache_by_line_op cvau, ish, x0, x1, x2, x3
+	ret
+ENDPROC(__clean_dcache_area_pou)
+
+/*
  *	__inval_cache_range(start, end)
  *	- start   - start address of region
  *	- end     - end address of region
@@ -113,7 +194,7 @@
  *	- start   - virtual start address of region
  *	- end     - virtual end address of region
  */
-__dma_inv_range:
+ENTRY(__dma_inv_range)
 	dcache_line_size x2, x3
 	sub	x3, x2, #1
 	tst	x1, x3				// end cache line aligned?
@@ -139,7 +220,7 @@
  *	- start   - virtual start address of region
  *	- end     - virtual end address of region
  */
-__dma_clean_range:
+ENTRY(__dma_clean_range)
 	dcache_line_size x2, x3
 	sub	x3, x2, #1
 	bic	x0, x0, x3
diff -ruw linux-4.4.115/arch/arm64/mm/context.c linux-4.4.115-fbx/arch/arm64/mm/context.c
--- linux-4.4.115/arch/arm64/mm/context.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/mm/context.c	2019-01-22 16:16:21.559228802 +0100
@@ -38,7 +38,16 @@
 
 #define ASID_MASK		(~GENMASK(asid_bits - 1, 0))
 #define ASID_FIRST_VERSION	(1UL << asid_bits)
-#define NUM_USER_ASIDS		ASID_FIRST_VERSION
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+#define NUM_USER_ASIDS		(ASID_FIRST_VERSION >> 1)
+#define asid2idx(asid)		(((asid) & ~ASID_MASK) >> 1)
+#define idx2asid(idx)		(((idx) << 1) & ~ASID_MASK)
+#else
+#define NUM_USER_ASIDS		(ASID_FIRST_VERSION)
+#define asid2idx(asid)		((asid) & ~ASID_MASK)
+#define idx2asid(idx)		asid2idx(idx)
+#endif
 
 static void flush_context(unsigned int cpu)
 {
@@ -65,7 +74,7 @@
 		 */
 		if (asid == 0)
 			asid = per_cpu(reserved_asids, i);
-		__set_bit(asid & ~ASID_MASK, asid_map);
+		__set_bit(asid2idx(asid), asid_map);
 		per_cpu(reserved_asids, i) = asid;
 	}
 
@@ -120,16 +129,16 @@
 		 * We had a valid ASID in a previous life, so try to re-use
 		 * it if possible.
 		 */
-		asid &= ~ASID_MASK;
-		if (!__test_and_set_bit(asid, asid_map))
+		if (!__test_and_set_bit(asid2idx(asid), asid_map))
 			return newasid;
 	}
 
 	/*
 	 * Allocate a free ASID. If we can't find one, take a note of the
-	 * currently active ASIDs and mark the TLBs as requiring flushes.
-	 * We always count from ASID #1, as we use ASID #0 when setting a
-	 * reserved TTBR0 for the init_mm.
+	 * currently active ASIDs and mark the TLBs as requiring flushes.  We
+	 * always count from ASID #2 (index 1), as we use ASID #0 when setting
+	 * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
+	 * pairs.
 	 */
 	asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
 	if (asid != NUM_USER_ASIDS)
@@ -146,7 +155,7 @@
 set_asid:
 	__set_bit(asid, asid_map);
 	cur_idx = asid;
-	return asid | generation;
+	return idx2asid(asid) | generation;
 }
 
 void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
@@ -182,12 +191,29 @@
 	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
 
 switch_mm_fastpath:
+
+	arm64_apply_bp_hardening();
+
+	/*
+	 * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
+	 * emulating PAN.
+	 */
+	if (!system_uses_ttbr0_pan())
 	cpu_switch_mm(mm->pgd, mm);
 }
 
+/* Errata workaround post TTBRx_EL1 update. */
+asmlinkage void post_ttbr_update_workaround(void)
+{
+	asm(ALTERNATIVE("nop; nop; nop",
+			"ic iallu; dsb nsh; isb",
+			ARM64_WORKAROUND_CAVIUM_27456,
+			CONFIG_CAVIUM_ERRATUM_27456));
+}
+
 static int asids_init(void)
 {
-	int fld = cpuid_feature_extract_field(read_cpuid(ID_AA64MMFR0_EL1), 4);
+	int fld = cpuid_feature_extract_field(read_cpuid(SYS_ID_AA64MMFR0_EL1), 4);
 
 	switch (fld) {
 	default:
diff -ruw linux-4.4.115/arch/arm64/mm/copypage.c linux-4.4.115-fbx/arch/arm64/mm/copypage.c
--- linux-4.4.115/arch/arm64/mm/copypage.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/mm/copypage.c	2019-01-22 16:16:21.559228802 +0100
@@ -24,8 +24,9 @@
 
 void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
 {
+	struct page *page = virt_to_page(kto);
 	copy_page(kto, kfrom);
-	__flush_dcache_area(kto, PAGE_SIZE);
+	flush_dcache_page(page);
 }
 EXPORT_SYMBOL_GPL(__cpu_copy_user_page);
 
diff -ruw linux-4.4.115/arch/arm64/mm/dma-mapping.c linux-4.4.115-fbx/arch/arm64/mm/dma-mapping.c
--- linux-4.4.115/arch/arm64/mm/dma-mapping.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/mm/dma-mapping.c	2019-01-22 16:16:21.563228839 +0100
@@ -3,6 +3,7 @@
  *
  * Copyright (C) 2012 ARM Ltd.
  * Author: Catalin Marinas <catalin.marinas@arm.com>
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -24,23 +25,51 @@
 #include <linux/genalloc.h>
 #include <linux/dma-mapping.h>
 #include <linux/dma-contiguous.h>
+#include <linux/mm.h>
+#include <linux/iommu.h>
 #include <linux/vmalloc.h>
 #include <linux/swiotlb.h>
 
 #include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <linux/io.h>
+#include <asm/dma-iommu.h>
+#include <linux/dma-mapping-fast.h>
+#include <linux/msm_dma_iommu_mapping.h>
+
+#include "mm.h"
+
 
 static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
 				 bool coherent)
 {
-	if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+	if (dma_get_attr(DMA_ATTR_STRONGLY_ORDERED, attrs))
+		return pgprot_noncached(prot);
+	else if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
 		return pgprot_writecombine(prot);
 	return prot;
 }
 
-static struct gen_pool *atomic_pool;
+static bool is_dma_coherent(struct device *dev, struct dma_attrs *attrs)
+{
+	bool is_coherent;
+
+	if (dma_get_attr(DMA_ATTR_FORCE_COHERENT, attrs))
+		is_coherent = true;
+	else if (dma_get_attr(DMA_ATTR_FORCE_NON_COHERENT, attrs))
+		is_coherent = false;
+	else if (is_device_dma_coherent(dev))
+		is_coherent = true;
+	else
+		is_coherent = false;
+
+	return is_coherent;
+}
 
+static struct gen_pool *atomic_pool;
+#define NO_KERNEL_MAPPING_DUMMY 0x2222
 #define DEFAULT_DMA_COHERENT_POOL_SIZE  SZ_256K
-static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE;
+static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
 
 static int __init early_coherent_pool(char *p)
 {
@@ -86,10 +115,47 @@
 	return 1;
 }
 
+static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
+			    void *data)
+{
+	struct page *page = virt_to_page(addr);
+	pgprot_t prot = *(pgprot_t *)data;
+
+	set_pte(pte, mk_pte(page, prot));
+	return 0;
+}
+
+static int __dma_clear_pte(pte_t *pte, pgtable_t token, unsigned long addr,
+			    void *data)
+{
+	pte_clear(&init_mm, addr, pte);
+	return 0;
+}
+
+static void __dma_remap(struct page *page, size_t size, pgprot_t prot,
+			bool no_kernel_map)
+{
+	unsigned long start = (unsigned long) page_address(page);
+	unsigned end = start + size;
+	int (*func)(pte_t *pte, pgtable_t token, unsigned long addr,
+			    void *data);
+
+	if (no_kernel_map)
+		func = __dma_clear_pte;
+	else
+		func = __dma_update_pte;
+
+	apply_to_page_range(&init_mm, start, size, func, &prot);
+	mb();
+	flush_tlb_kernel_range(start, end);
+}
+
 static void *__dma_alloc_coherent(struct device *dev, size_t size,
 				  dma_addr_t *dma_handle, gfp_t flags,
 				  struct dma_attrs *attrs)
 {
+	void *addr;
+
 	if (dev == NULL) {
 		WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
 		return NULL;
@@ -100,7 +166,6 @@
 		flags |= GFP_DMA;
 	if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) {
 		struct page *page;
-		void *addr;
 
 		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
 							get_order(size));
@@ -110,10 +175,20 @@
 		*dma_handle = phys_to_dma(dev, page_to_phys(page));
 		addr = page_address(page);
 		memset(addr, 0, size);
-		return addr;
 	} else {
-		return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
+		addr = swiotlb_alloc_coherent(dev, size, dma_handle, flags);
 	}
+
+	if (addr && (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs) ||
+		dma_get_attr(DMA_ATTR_STRONGLY_ORDERED, attrs))) {
+		/*
+		 * flush the caches here because we can't later
+		 */
+		__dma_flush_range(addr, addr + size);
+		__dma_remap(virt_to_page(addr), size, 0, true);
+	}
+
+	return addr;
 }
 
 static void __dma_free_coherent(struct device *dev, size_t size,
@@ -123,11 +198,16 @@
 	bool freed;
 	phys_addr_t paddr = dma_to_phys(dev, dma_handle);
 
+	size = PAGE_ALIGN(size);
 	if (dev == NULL) {
 		WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
 		return;
 	}
 
+	if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs) ||
+	    dma_get_attr(DMA_ATTR_STRONGLY_ORDERED, attrs))
+		__dma_remap(phys_to_page(paddr), size, PAGE_KERNEL, false);
+
 	freed = dma_release_from_contiguous(dev,
 					phys_to_page(paddr),
 					size >> PAGE_SHIFT);
@@ -141,8 +221,7 @@
 {
 	struct page *page;
 	void *ptr, *coherent_ptr;
-	bool coherent = is_device_dma_coherent(dev);
-	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
+	bool coherent = is_dma_coherent(dev, attrs);
 
 	size = PAGE_ALIGN(size);
 
@@ -164,16 +243,22 @@
 	if (coherent)
 		return ptr;
 
+	if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
+		coherent_ptr = (void *)NO_KERNEL_MAPPING_DUMMY;
+	} else {
+		if (!dma_get_attr(DMA_ATTR_STRONGLY_ORDERED, attrs))
 	/* remove any dirty cache lines on the kernel alias */
 	__dma_flush_range(ptr, ptr + size);
 
 	/* create a coherent mapping */
 	page = virt_to_page(ptr);
 	coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
-						   prot, NULL);
+					__get_dma_pgprot(attrs,
+						__pgprot(PROT_NORMAL_NC), false),
+						NULL);
 	if (!coherent_ptr)
 		goto no_map;
-
+	}
 	return coherent_ptr;
 
 no_map:
@@ -191,9 +276,10 @@
 
 	size = PAGE_ALIGN(size);
 
-	if (!is_device_dma_coherent(dev)) {
+	if (!is_dma_coherent(dev, attrs)) {
 		if (__free_from_pool(vaddr, size))
 			return;
+		if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
 		vunmap(vaddr);
 	}
 	__dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
@@ -207,7 +293,7 @@
 	dma_addr_t dev_addr;
 
 	dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
-	if (!is_device_dma_coherent(dev))
+	if (!is_dma_coherent(dev, attrs))
 		__dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
 
 	return dev_addr;
@@ -218,7 +304,7 @@
 				 size_t size, enum dma_data_direction dir,
 				 struct dma_attrs *attrs)
 {
-	if (!is_device_dma_coherent(dev))
+	if (!is_dma_coherent(dev, attrs))
 		__dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
 	swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
 }
@@ -231,7 +317,7 @@
 	int i, ret;
 
 	ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
-	if (!is_device_dma_coherent(dev))
+	if (!is_dma_coherent(dev, attrs))
 		for_each_sg(sgl, sg, ret, i)
 			__dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
 				       sg->length, dir);
@@ -247,7 +333,7 @@
 	struct scatterlist *sg;
 	int i;
 
-	if (!is_device_dma_coherent(dev))
+	if (!is_dma_coherent(dev, attrs))
 		for_each_sg(sgl, sg, nelems, i)
 			__dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
 					 sg->length, dir);
@@ -313,7 +399,7 @@
 	unsigned long off = vma->vm_pgoff;
 
 	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
-					     is_device_dma_coherent(dev));
+					     is_dma_coherent(dev, attrs));
 
 	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
 		return ret;
@@ -341,6 +427,55 @@
 	return ret;
 }
 
+static void *arm64_dma_remap(struct device *dev, void *cpu_addr,
+			dma_addr_t handle, size_t size,
+			struct dma_attrs *attrs)
+{
+	struct page *page = phys_to_page(dma_to_phys(dev, handle));
+	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
+	unsigned long offset = handle & ~PAGE_MASK;
+	struct vm_struct *area;
+	unsigned long addr;
+
+	size = PAGE_ALIGN(size + offset);
+
+	/*
+	 * DMA allocation can be mapped to user space, so lets
+	 * set VM_USERMAP flags too.
+	 */
+	area = get_vm_area(size, VM_USERMAP);
+	if (!area)
+		return NULL;
+
+	addr = (unsigned long)area->addr;
+	area->phys_addr = __pfn_to_phys(page_to_pfn(page));
+
+	if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
+		vunmap((void *)addr);
+		return NULL;
+	}
+	return (void *)addr + offset;
+}
+
+static void arm64_dma_unremap(struct device *dev, void *remapped_addr,
+				size_t size)
+{
+	struct vm_struct *area;
+
+	size = PAGE_ALIGN(size);
+	remapped_addr = (void *)((unsigned long)remapped_addr & PAGE_MASK);
+
+	area = find_vm_area(remapped_addr);
+	if (!area) {
+		WARN(1, "trying to free invalid coherent area: %p\n",
+			remapped_addr);
+		return;
+	}
+	vunmap(remapped_addr);
+	flush_tlb_kernel_range((unsigned long)remapped_addr,
+			(unsigned long)(remapped_addr + size));
+}
+
 static struct dma_map_ops swiotlb_dma_ops = {
 	.alloc = __dma_alloc,
 	.free = __dma_free,
@@ -356,6 +491,8 @@
 	.sync_sg_for_device = __swiotlb_sync_sg_for_device,
 	.dma_supported = swiotlb_dma_supported,
 	.mapping_error = swiotlb_dma_mapping_error,
+	.remap = arm64_dma_remap,
+	.unremap = arm64_dma_unremap,
 };
 
 static int __init atomic_pool_init(void)
@@ -406,7 +543,7 @@
 	goto out;
 
 remove_mapping:
-	dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
+	dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP, true);
 destroy_genpool:
 	gen_pool_destroy(atomic_pool);
 	atomic_pool = NULL;
@@ -427,6 +564,7 @@
 			   dma_addr_t *dma_handle, gfp_t flags,
 			   struct dma_attrs *attrs)
 {
+	WARN(1, "dma alloc failure, device may be missing a call to arch_setup_dma_ops");
 	return NULL;
 }
 
@@ -542,7 +680,7 @@
 				 dma_addr_t *handle, gfp_t gfp,
 				 struct dma_attrs *attrs)
 {
-	bool coherent = is_device_dma_coherent(dev);
+	bool coherent = is_dma_coherent(dev, attrs);
 	int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
 	size_t iosize = size;
 	void *addr;
@@ -624,7 +762,7 @@
 		if (WARN_ON(!area || !area->pages))
 			return;
 		iommu_dma_free(dev, area->pages, iosize, &handle);
-		dma_common_free_remap(cpu_addr, size, VM_USERMAP);
+		dma_common_free_remap(cpu_addr, size, VM_USERMAP, true);
 	} else {
 		iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
 		__free_pages(virt_to_page(cpu_addr), get_order(size));
@@ -639,7 +777,7 @@
 	int ret;
 
 	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
-					     is_device_dma_coherent(dev));
+					     is_dma_coherent(dev, attrs));
 
 	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
 		return ret;
@@ -696,7 +834,7 @@
 				   enum dma_data_direction dir,
 				   struct dma_attrs *attrs)
 {
-	bool coherent = is_device_dma_coherent(dev);
+	bool coherent = is_dma_coherent(dev, attrs);
 	int prot = dma_direction_to_prot(dir, coherent);
 	dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
 
@@ -749,7 +887,7 @@
 				int nelems, enum dma_data_direction dir,
 				struct dma_attrs *attrs)
 {
-	bool coherent = is_device_dma_coherent(dev);
+	bool coherent = is_dma_coherent(dev, attrs);
 
 	if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
 		__iommu_sync_sg_for_device(dev, sgl, nelems, dir);
@@ -782,7 +920,6 @@
 	.sync_single_for_device = __iommu_sync_single_for_device,
 	.sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
 	.sync_sg_for_device = __iommu_sync_sg_for_device,
-	.dma_supported = iommu_dma_supported,
 	.mapping_error = iommu_dma_mapping_error,
 };
 
@@ -896,7 +1033,7 @@
 	return 0;
 }
 
-static int register_iommu_dma_ops_notifier(struct bus_type *bus)
+static int __init register_iommu_dma_ops_notifier(struct bus_type *bus)
 {
 	struct notifier_block *nb = kzalloc(sizeof(*nb), GFP_KERNEL);
 	int ret;
@@ -993,3 +1130,922 @@
 	dev->archdata.dma_coherent = coherent;
 	__iommu_setup_dma_ops(dev, dma_base, size, iommu);
 }
+EXPORT_SYMBOL(arch_setup_dma_ops);
+
+#ifdef CONFIG_ARM64_DMA_USE_IOMMU
+
+static int __get_iommu_pgprot(struct dma_attrs *attrs, int prot,
+			      bool coherent)
+{
+	if (!dma_get_attr(DMA_ATTR_EXEC_MAPPING, attrs))
+		prot |= IOMMU_NOEXEC;
+	if (coherent)
+		prot |= IOMMU_CACHE;
+
+	return prot;
+}
+
+/*
+ * Make an area consistent for devices.
+ * Note: Drivers should NOT use this function directly, as it will break
+ * platforms with CONFIG_DMABOUNCE.
+ * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
+ */
+static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
+	size_t size, enum dma_data_direction dir)
+{
+	__dma_map_area(page_address(page) + off, size, dir);
+}
+
+static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
+	size_t size, enum dma_data_direction dir)
+{
+	__dma_unmap_area(page_address(page) + off, size, dir);
+
+	/*
+	 * Mark the D-cache clean for this page to avoid extra flushing.
+	 */
+	if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
+		set_bit(PG_dcache_clean, &page->flags);
+}
+
+static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
+{
+	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+		return -EIO;
+
+	*dev->dma_mask = dma_mask;
+
+	return 0;
+}
+
+/* IOMMU */
+
+static void __dma_clear_buffer(struct page *page, size_t size,
+			       struct dma_attrs *attrs, bool is_coherent)
+{
+	/*
+	 * Ensure that the allocated pages are zeroed, and that any data
+	 * lurking in the kernel direct-mapped region is invalidated.
+	 */
+	void *ptr = page_address(page);
+	if (!dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs))
+		memset(ptr, 0, size);
+	if (!is_coherent)
+		dmac_flush_range(ptr, ptr + size);
+}
+
+static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
+				      size_t size)
+{
+	unsigned int order = get_order(size);
+	unsigned int align = 0;
+	unsigned int count, start;
+	unsigned long flags;
+
+	if (order > CONFIG_ARM64_DMA_IOMMU_ALIGNMENT)
+		order = CONFIG_ARM64_DMA_IOMMU_ALIGNMENT;
+
+	count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	align = (1 << order) - 1;
+
+	spin_lock_irqsave(&mapping->lock, flags);
+	start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
+					   count, align);
+	if (start > mapping->bits) {
+		spin_unlock_irqrestore(&mapping->lock, flags);
+		return DMA_ERROR_CODE;
+	}
+
+	bitmap_set(mapping->bitmap, start, count);
+	spin_unlock_irqrestore(&mapping->lock, flags);
+
+	return mapping->base + (start << PAGE_SHIFT);
+}
+
+static inline void __free_iova(struct dma_iommu_mapping *mapping,
+			       dma_addr_t addr, size_t size)
+{
+	unsigned int start = (addr - mapping->base) >> PAGE_SHIFT;
+	unsigned int count = size >> PAGE_SHIFT;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mapping->lock, flags);
+	bitmap_clear(mapping->bitmap, start, count);
+	spin_unlock_irqrestore(&mapping->lock, flags);
+}
+
+static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
+					  gfp_t gfp, struct dma_attrs *attrs)
+{
+	struct page **pages;
+	size_t count = size >> PAGE_SHIFT;
+	size_t array_size = count * sizeof(struct page *);
+	int i = 0;
+	bool is_coherent = is_dma_coherent(dev, attrs);
+
+	if (array_size <= PAGE_SIZE)
+		pages = kzalloc(array_size, gfp);
+	else
+		pages = vzalloc(array_size);
+	if (!pages)
+		return NULL;
+
+	if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
+		unsigned long order = get_order(size);
+		struct page *page;
+
+		page = dma_alloc_from_contiguous(dev, count, order);
+		if (!page)
+			goto error;
+
+		__dma_clear_buffer(page, size, attrs, is_coherent);
+
+		for (i = 0; i < count; i++)
+			pages[i] = page + i;
+
+		return pages;
+	}
+
+	/*
+	 * IOMMU can map any pages, so himem can also be used here
+	 */
+	gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
+
+	while (count) {
+		int j, order = __fls(count);
+
+		pages[i] = alloc_pages(gfp, order);
+		while (!pages[i] && order)
+			pages[i] = alloc_pages(gfp, --order);
+		if (!pages[i])
+			goto error;
+
+		if (order) {
+			split_page(pages[i], order);
+			j = 1 << order;
+			while (--j)
+				pages[i + j] = pages[i] + j;
+		}
+
+		__dma_clear_buffer(pages[i], PAGE_SIZE << order, attrs,
+				   is_coherent);
+		i += 1 << order;
+		count -= 1 << order;
+	}
+
+	return pages;
+error:
+	while (i--)
+		if (pages[i])
+			__free_pages(pages[i], 0);
+	if (array_size <= PAGE_SIZE)
+		kfree(pages);
+	else
+		vfree(pages);
+	return NULL;
+}
+
+static int __iommu_free_buffer(struct device *dev, struct page **pages,
+			       size_t size, struct dma_attrs *attrs)
+{
+	int count = size >> PAGE_SHIFT;
+	int array_size = count * sizeof(struct page *);
+	int i;
+
+	if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
+		dma_release_from_contiguous(dev, pages[0], count);
+	} else {
+		for (i = 0; i < count; i++)
+			if (pages[i])
+				__free_pages(pages[i], 0);
+	}
+
+	if (array_size <= PAGE_SIZE)
+		kfree(pages);
+	else
+		vfree(pages);
+	return 0;
+}
+
+/*
+ * Create a CPU mapping for a specified pages
+ */
+static void *
+__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
+		    const void *caller)
+{
+	return dma_common_pages_remap(pages, size, VM_USERMAP, prot, caller);
+}
+
+/*
+ * Create a mapping in device IO address space for specified pages
+ */
+static dma_addr_t __iommu_create_mapping(struct device *dev,
+					struct page **pages, size_t size,
+					struct dma_attrs *attrs)
+{
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	dma_addr_t dma_addr, iova;
+	int i, ret;
+	int prot = IOMMU_READ | IOMMU_WRITE;
+
+	dma_addr = __alloc_iova(mapping, size);
+	if (dma_addr == DMA_ERROR_CODE)
+		return dma_addr;
+	prot = __get_iommu_pgprot(attrs, prot,
+				  is_dma_coherent(dev, attrs));
+
+	iova = dma_addr;
+	for (i = 0; i < count; ) {
+		unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
+		phys_addr_t phys = page_to_phys(pages[i]);
+		unsigned int len, j;
+
+		for (j = i + 1; j < count; j++, next_pfn++)
+			if (page_to_pfn(pages[j]) != next_pfn)
+				break;
+
+		len = (j - i) << PAGE_SHIFT;
+		ret = iommu_map(mapping->domain, iova, phys, len, prot);
+		if (ret < 0)
+			goto fail;
+		iova += len;
+		i = j;
+	}
+	return dma_addr;
+fail:
+	iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
+	__free_iova(mapping, dma_addr, size);
+	return DMA_ERROR_CODE;
+}
+
+static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova,
+				size_t size)
+{
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+	/*
+	 * add optional in-page offset from iova to size and align
+	 * result to page size
+	 */
+	size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
+	iova &= PAGE_MASK;
+
+	iommu_unmap(mapping->domain, iova, size);
+	__free_iova(mapping, iova, size);
+	return 0;
+}
+
+static struct page **__atomic_get_pages(void *addr)
+{
+	struct page *page;
+	phys_addr_t phys;
+
+	phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr);
+	page = phys_to_page(phys);
+
+	return (struct page **)page;
+}
+
+static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
+{
+	struct vm_struct *area;
+
+	if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
+		return __atomic_get_pages(cpu_addr);
+
+	if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+		return cpu_addr;
+
+	area = find_vm_area(cpu_addr);
+	if (area)
+		return area->pages;
+	return NULL;
+}
+
+static void *__iommu_alloc_atomic(struct device *dev, size_t size,
+				  dma_addr_t *handle, gfp_t gfp,
+				struct dma_attrs *attrs)
+{
+	struct page *page;
+	struct page **pages;
+	size_t count = size >> PAGE_SHIFT;
+	size_t array_size = count * sizeof(struct page *);
+	int i;
+	void *addr;
+	bool coherent = is_dma_coherent(dev, attrs);
+
+	if (array_size <= PAGE_SIZE)
+		pages = kzalloc(array_size, gfp);
+	else
+		pages = vzalloc(array_size);
+
+	if (!pages)
+		return NULL;
+
+	if (coherent) {
+		page = alloc_pages(gfp, get_order(size));
+		addr = page ? page_address(page) : NULL;
+	} else {
+		addr = __alloc_from_pool(size, &page, gfp);
+	}
+
+	if (!addr)
+		goto err_free;
+
+	for (i = 0; i < count ; i++)
+		pages[i] = page + i;
+
+	*handle = __iommu_create_mapping(dev, pages, size, attrs);
+	if (*handle == DMA_ERROR_CODE)
+		goto err_mapping;
+
+	kvfree(pages);
+	return addr;
+
+err_mapping:
+	if (coherent)
+		__free_pages(page, get_order(size));
+	else
+		__free_from_pool(addr, size);
+err_free:
+	kvfree(pages);
+	return NULL;
+}
+
+static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
+				dma_addr_t handle, size_t size)
+{
+	__iommu_remove_mapping(dev, handle, size);
+	__free_from_pool(cpu_addr, size);
+}
+
+static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
+	    dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+{
+	bool coherent = is_dma_coherent(dev, attrs);
+	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
+	struct page **pages;
+	void *addr = NULL;
+
+	*handle = DMA_ERROR_CODE;
+	size = PAGE_ALIGN(size);
+
+	if (!gfpflags_allow_blocking(gfp))
+		return __iommu_alloc_atomic(dev, size, handle, gfp, attrs);
+
+	/*
+	 * Following is a work-around (a.k.a. hack) to prevent pages
+	 * with __GFP_COMP being passed to split_page() which cannot
+	 * handle them.  The real problem is that this flag probably
+	 * should be 0 on ARM as it is not supported on this
+	 * platform; see CONFIG_HUGETLBFS.
+	 */
+	gfp &= ~(__GFP_COMP);
+
+	pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
+	if (!pages)
+		return NULL;
+
+	*handle = __iommu_create_mapping(dev, pages, size, attrs);
+	if (*handle == DMA_ERROR_CODE)
+		goto err_buffer;
+
+	if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+		return pages;
+
+	addr = __iommu_alloc_remap(pages, size, gfp, prot,
+				   __builtin_return_address(0));
+	if (!addr)
+		goto err_mapping;
+
+	return addr;
+
+err_mapping:
+	__iommu_remove_mapping(dev, *handle, size);
+err_buffer:
+	__iommu_free_buffer(dev, pages, size, attrs);
+	return NULL;
+}
+
+static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+		    void *cpu_addr, dma_addr_t dma_addr, size_t size,
+		    struct dma_attrs *attrs)
+{
+	unsigned long uaddr = vma->vm_start;
+	unsigned long usize = vma->vm_end - vma->vm_start;
+	struct page **pages = __iommu_get_pages(cpu_addr, attrs);
+	bool coherent = is_dma_coherent(dev, attrs);
+
+	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
+					     coherent);
+
+	if (!pages)
+		return -ENXIO;
+
+	do {
+		int ret = vm_insert_page(vma, uaddr, *pages++);
+		if (ret) {
+			pr_err("Remapping memory failed: %d\n", ret);
+			return ret;
+		}
+		uaddr += PAGE_SIZE;
+		usize -= PAGE_SIZE;
+	} while (usize > 0);
+
+	return 0;
+}
+
+/*
+ * free a page as defined by the above mapping.
+ * Must not be called with IRQs disabled.
+ */
+void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+			  dma_addr_t handle, struct dma_attrs *attrs)
+{
+	struct page **pages;
+	size = PAGE_ALIGN(size);
+
+	if (__in_atomic_pool(cpu_addr, size)) {
+		__iommu_free_atomic(dev, cpu_addr, handle, size);
+		return;
+	}
+
+	pages = __iommu_get_pages(cpu_addr, attrs);
+	if (!pages) {
+		WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
+		return;
+	}
+
+	if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+		dma_common_free_remap(cpu_addr, size, VM_USERMAP, true);
+
+	__iommu_remove_mapping(dev, handle, size);
+	__iommu_free_buffer(dev, pages, size, attrs);
+}
+
+int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
+				 void *cpu_addr, dma_addr_t dma_addr,
+				 size_t size, struct dma_attrs *attrs)
+{
+	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	struct page **pages = __iommu_get_pages(cpu_addr, attrs);
+
+	if (!pages)
+		return -ENXIO;
+
+	return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
+					 GFP_KERNEL);
+}
+
+static int __dma_direction_to_prot(enum dma_data_direction dir)
+{
+	int prot;
+
+	switch (dir) {
+	case DMA_BIDIRECTIONAL:
+		prot = IOMMU_READ | IOMMU_WRITE;
+		break;
+	case DMA_TO_DEVICE:
+		prot = IOMMU_READ;
+		break;
+	case DMA_FROM_DEVICE:
+		prot = IOMMU_WRITE;
+		break;
+	default:
+		prot = 0;
+	}
+
+	return prot;
+}
+
+/**
+ * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
+ *
+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
+ * The scatter gather list elements are merged together (if possible) and
+ * tagged with the appropriate dma address and length. They are obtained via
+ * sg_dma_{address,length}.
+ */
+int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
+		int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+	struct scatterlist *s;
+	int ret, i;
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	unsigned int total_length = 0, current_offset = 0;
+	dma_addr_t iova;
+	int prot = __dma_direction_to_prot(dir);
+
+	for_each_sg(sg, s, nents, i)
+		total_length += s->length;
+
+	iova = __alloc_iova(mapping, total_length);
+	if (iova == DMA_ERROR_CODE) {
+		dev_err(dev, "Couldn't allocate iova for sg %p\n", sg);
+		return 0;
+	}
+	prot = __get_iommu_pgprot(attrs, prot,
+				  is_dma_coherent(dev, attrs));
+
+	ret = iommu_map_sg(mapping->domain, iova, sg, nents, prot);
+	if (ret != total_length) {
+		__free_iova(mapping, iova, total_length);
+		return 0;
+	}
+
+	for_each_sg(sg, s, nents, i) {
+		s->dma_address = iova + current_offset;
+		s->dma_length = total_length - current_offset;
+		current_offset += s->length;
+	}
+
+	return nents;
+}
+
+/**
+ * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ *
+ * Unmap a set of streaming mode DMA translations.  Again, CPU access
+ * rules concerning calls here are the same as for dma_unmap_single().
+ */
+void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+			enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	unsigned int total_length = sg_dma_len(sg);
+	dma_addr_t iova = sg_dma_address(sg);
+
+	total_length = PAGE_ALIGN((iova & ~PAGE_MASK) + total_length);
+	iova &= PAGE_MASK;
+
+	iommu_unmap(mapping->domain, iova, total_length);
+	__free_iova(mapping, iova, total_length);
+}
+
+/**
+ * arm_iommu_sync_sg_for_cpu
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+			int nents, enum dma_data_direction dir)
+{
+	struct scatterlist *s;
+	int i;
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	dma_addr_t iova = sg_dma_address(sg);
+	bool iova_coherent = iommu_is_iova_coherent(mapping->domain, iova);
+
+	if (iova_coherent)
+		return;
+
+	for_each_sg(sg, s, nents, i)
+		__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
+
+}
+
+/**
+ * arm_iommu_sync_sg_for_device
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+			int nents, enum dma_data_direction dir)
+{
+	struct scatterlist *s;
+	int i;
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	dma_addr_t iova = sg_dma_address(sg);
+	bool iova_coherent = iommu_is_iova_coherent(mapping->domain, iova);
+
+	if (iova_coherent)
+		return;
+
+	for_each_sg(sg, s, nents, i)
+		__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+}
+
+
+/**
+ * arm_coherent_iommu_map_page
+ * @dev: valid struct device pointer
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Coherent IOMMU aware version of arm_dma_map_page()
+ */
+static dma_addr_t arm_coherent_iommu_map_page(struct device *dev,
+	     struct page *page, unsigned long offset, size_t size,
+	     enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	dma_addr_t dma_addr;
+	int ret, prot, len, start_offset, map_offset;
+
+	map_offset = offset & ~PAGE_MASK;
+	start_offset = offset & PAGE_MASK;
+	len = PAGE_ALIGN(map_offset + size);
+
+	dma_addr = __alloc_iova(mapping, len);
+	if (dma_addr == DMA_ERROR_CODE)
+		return dma_addr;
+
+	prot = __dma_direction_to_prot(dir);
+	prot = __get_iommu_pgprot(attrs, prot,
+				  is_dma_coherent(dev, attrs));
+
+	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page) +
+			start_offset, len, prot);
+	if (ret < 0)
+		goto fail;
+
+	return dma_addr + map_offset;
+fail:
+	__free_iova(mapping, dma_addr, len);
+	return DMA_ERROR_CODE;
+}
+
+/**
+ * arm_iommu_map_page
+ * @dev: valid struct device pointer
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * IOMMU aware version of arm_dma_map_page()
+ */
+static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
+	     unsigned long offset, size_t size, enum dma_data_direction dir,
+	     struct dma_attrs *attrs)
+{
+	if (!is_dma_coherent(dev, attrs) &&
+	      !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+		__dma_page_cpu_to_dev(page, offset, size, dir);
+
+	return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
+}
+
+/**
+ * arm_iommu_unmap_page
+ * @dev: valid struct device pointer
+ * @handle: DMA address of buffer
+ * @size: size of buffer (same as passed to dma_map_page)
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
+ *
+ * IOMMU aware version of arm_dma_unmap_page()
+ */
+static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
+		size_t size, enum dma_data_direction dir,
+		struct dma_attrs *attrs)
+{
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	dma_addr_t iova = handle & PAGE_MASK;
+	struct page *page = phys_to_page(iommu_iova_to_phys(
+						mapping->domain, iova));
+	int offset = handle & ~PAGE_MASK;
+	int len = PAGE_ALIGN(size + offset);
+
+	if (!(is_dma_coherent(dev, attrs) ||
+	      dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)))
+		__dma_page_dev_to_cpu(page, offset, size, dir);
+
+	iommu_unmap(mapping->domain, iova, len);
+	__free_iova(mapping, iova, len);
+}
+
+static void arm_iommu_sync_single_for_cpu(struct device *dev,
+		dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	dma_addr_t iova = handle & PAGE_MASK;
+	struct page *page = phys_to_page(iommu_iova_to_phys(
+						mapping->domain, iova));
+	unsigned int offset = handle & ~PAGE_MASK;
+	bool iova_coherent = iommu_is_iova_coherent(mapping->domain, handle);
+
+	if (!iova_coherent)
+		__dma_page_dev_to_cpu(page, offset, size, dir);
+}
+
+static void arm_iommu_sync_single_for_device(struct device *dev,
+		dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	dma_addr_t iova = handle & PAGE_MASK;
+	struct page *page = phys_to_page(iommu_iova_to_phys(
+						mapping->domain, iova));
+	unsigned int offset = handle & ~PAGE_MASK;
+	bool iova_coherent = iommu_is_iova_coherent(mapping->domain, handle);
+
+	if (!iova_coherent)
+		__dma_page_cpu_to_dev(page, offset, size, dir);
+}
+
+static int arm_iommu_dma_supported(struct device *dev, u64 mask)
+{
+	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
+
+	if (!mapping) {
+		dev_warn(dev, "No IOMMU mapping for device\n");
+		return 0;
+	}
+
+	return iommu_dma_supported(mapping->domain, dev, mask);
+}
+
+static int arm_iommu_mapping_error(struct device *dev,
+				   dma_addr_t dma_addr)
+{
+	return dma_addr == DMA_ERROR_CODE;
+}
+
+const struct dma_map_ops iommu_ops = {
+	.alloc		= arm_iommu_alloc_attrs,
+	.free		= arm_iommu_free_attrs,
+	.mmap		= arm_iommu_mmap_attrs,
+	.get_sgtable	= arm_iommu_get_sgtable,
+
+	.map_page		= arm_iommu_map_page,
+	.unmap_page		= arm_iommu_unmap_page,
+	.sync_single_for_cpu	= arm_iommu_sync_single_for_cpu,
+	.sync_single_for_device	= arm_iommu_sync_single_for_device,
+
+	.map_sg			= arm_iommu_map_sg,
+	.unmap_sg		= arm_iommu_unmap_sg,
+	.sync_sg_for_cpu	= arm_iommu_sync_sg_for_cpu,
+	.sync_sg_for_device	= arm_iommu_sync_sg_for_device,
+
+	.set_dma_mask		= arm_dma_set_mask,
+	.dma_supported		= arm_iommu_dma_supported,
+	.mapping_error		= arm_iommu_mapping_error,
+};
+
+/**
+ * arm_iommu_create_mapping
+ * @bus: pointer to the bus holding the client device (for IOMMU calls)
+ * @base: start address of the valid IO address space
+ * @size: maximum size of the valid IO address space
+ *
+ * Creates a mapping structure which holds information about used/unused
+ * IO address ranges, which is required to perform memory allocation and
+ * mapping with IOMMU aware functions.
+ *
+ * The client device need to be attached to the mapping with
+ * arm_iommu_attach_device function.
+ */
+struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
+{
+	unsigned int bits = size >> PAGE_SHIFT;
+	unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
+	struct dma_iommu_mapping *mapping;
+	int err = -ENOMEM;
+
+	if (!bitmap_size)
+		return ERR_PTR(-EINVAL);
+
+	mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
+	if (!mapping)
+		goto err;
+
+	mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL | __GFP_NOWARN |
+							__GFP_NORETRY);
+	if (!mapping->bitmap)
+		mapping->bitmap = vzalloc(bitmap_size);
+
+	if (!mapping->bitmap)
+		goto err2;
+
+	mapping->base = base;
+	mapping->bits = bits;
+	spin_lock_init(&mapping->lock);
+
+	mapping->domain = iommu_domain_alloc(bus);
+	if (!mapping->domain)
+		goto err3;
+
+	kref_init(&mapping->kref);
+	return mapping;
+err3:
+	kvfree(mapping->bitmap);
+err2:
+	kfree(mapping);
+err:
+	return ERR_PTR(err);
+}
+EXPORT_SYMBOL(arm_iommu_create_mapping);
+
+static void release_iommu_mapping(struct kref *kref)
+{
+	struct dma_iommu_mapping *mapping =
+		container_of(kref, struct dma_iommu_mapping, kref);
+
+	iommu_domain_free(mapping->domain);
+	kvfree(mapping->bitmap);
+	kfree(mapping);
+}
+
+void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
+{
+	if (mapping)
+		kref_put(&mapping->kref, release_iommu_mapping);
+}
+EXPORT_SYMBOL(arm_iommu_release_mapping);
+
+/**
+ * arm_iommu_attach_device
+ * @dev: valid struct device pointer
+ * @mapping: io address space mapping structure (returned from
+ *	arm_iommu_create_mapping)
+ *
+ * Attaches specified io address space mapping to the provided device,
+ * this replaces the dma operations (dma_map_ops pointer) with the
+ * IOMMU aware version. More than one client might be attached to
+ * the same io address space mapping.
+ */
+int arm_iommu_attach_device(struct device *dev,
+			    struct dma_iommu_mapping *mapping)
+{
+	int err;
+	int s1_bypass = 0, is_fast = 0;
+
+	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
+	if (is_fast)
+		return fast_smmu_attach_device(dev, mapping);
+
+	err = iommu_attach_device(mapping->domain, dev);
+	if (err)
+		return err;
+
+	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
+					&s1_bypass);
+
+	kref_get(&mapping->kref);
+	dev->archdata.mapping = mapping;
+	if (!s1_bypass)
+		set_dma_ops(dev, &iommu_ops);
+
+	pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
+	return 0;
+}
+EXPORT_SYMBOL(arm_iommu_attach_device);
+
+/**
+ * arm_iommu_detach_device
+ * @dev: valid struct device pointer
+ *
+ * Detaches the provided device from a previously attached map.
+ * This voids the dma operations (dma_map_ops pointer)
+ */
+void arm_iommu_detach_device(struct device *dev)
+{
+	struct dma_iommu_mapping *mapping;
+	int is_fast, s1_bypass = 0;
+
+	mapping = to_dma_iommu_mapping(dev);
+	if (!mapping) {
+		dev_warn(dev, "Not attached\n");
+		return;
+	}
+
+	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
+	if (is_fast) {
+		fast_smmu_detach_device(dev, mapping);
+		return;
+	}
+
+	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
+					&s1_bypass);
+
+	if (msm_dma_unmap_all_for_dev(dev))
+		dev_warn(dev, "IOMMU detach with outstanding mappings\n");
+
+	iommu_detach_device(mapping->domain, dev);
+	kref_put(&mapping->kref, release_iommu_mapping);
+	dev->archdata.mapping = NULL;
+	if (!s1_bypass)
+		set_dma_ops(dev, NULL);
+
+	pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
+}
+EXPORT_SYMBOL(arm_iommu_detach_device);
+
+#endif
diff -ruw linux-4.4.115/arch/arm64/mm/extable.c linux-4.4.115-fbx/arch/arm64/mm/extable.c
--- linux-4.4.115/arch/arm64/mm/extable.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/mm/extable.c	2019-01-22 16:16:21.563228839 +0100
@@ -11,7 +11,7 @@
 
 	fixup = search_exception_tables(instruction_pointer(regs));
 	if (fixup)
-		regs->pc = fixup->fixup;
+		regs->pc = (unsigned long)&fixup->fixup + fixup->fixup;
 
 	return fixup != NULL;
 }
diff -ruw linux-4.4.115/arch/arm64/mm/fault.c linux-4.4.115-fbx/arch/arm64/mm/fault.c
--- linux-4.4.115/arch/arm64/mm/fault.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/mm/fault.c	2019-10-29 09:26:23.009196974 +0100
@@ -40,9 +40,35 @@
 #include <asm/system_misc.h>
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
+#include <asm/edac.h>
+#include <soc/qcom/scm.h>
+
+#include <trace/events/exception.h>
 
 static const char *fault_name(unsigned int esr);
 
+#ifdef CONFIG_KPROBES
+static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
+{
+	int ret = 0;
+
+	/* kprobe_running() needs smp_processor_id() */
+	if (!user_mode(regs)) {
+		preempt_disable();
+		if (kprobe_running() && kprobe_fault_handler(regs, esr))
+			ret = 1;
+		preempt_enable();
+	}
+
+	return ret;
+}
+#else
+static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
+{
+	return 0;
+}
+#endif
+
 /*
  * Dump out the page tables associated with 'addr' in mm 'mm'.
  */
@@ -133,6 +159,11 @@
 }
 #endif
 
+static bool is_el1_instruction_abort(unsigned int esr)
+{
+	return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR;
+}
+
 /*
  * The kernel tried to access some page that wasn't present.
  */
@@ -141,8 +172,9 @@
 {
 	/*
 	 * Are we prepared to handle this kernel fault?
+	 * We are almost certainly not prepared to handle instruction faults.
 	 */
-	if (fixup_exception(regs))
+	if (!is_el1_instruction_abort(esr) && fixup_exception(regs))
 		return;
 
 	/*
@@ -169,6 +201,8 @@
 {
 	struct siginfo si;
 
+	trace_user_fault(tsk, addr, esr);
+
 	if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) {
 		pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
 			tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
@@ -204,8 +238,6 @@
 #define VM_FAULT_BADMAP		0x010000
 #define VM_FAULT_BADACCESS	0x020000
 
-#define ESR_LNX_EXEC		(1 << 24)
-
 static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
 			   unsigned int mm_flags, unsigned long vm_flags,
 			   struct task_struct *tsk)
@@ -244,6 +276,26 @@
 	return fault;
 }
 
+static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs)
+{
+	unsigned int ec       = ESR_ELx_EC(esr);
+	unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
+
+	if (ec != ESR_ELx_EC_DABT_CUR && ec != ESR_ELx_EC_IABT_CUR)
+		return false;
+
+	if (system_uses_ttbr0_pan())
+		return fsc_type == ESR_ELx_FSC_FAULT &&
+			(regs->pstate & PSR_PAN_BIT);
+	else
+		return fsc_type == ESR_ELx_FSC_PERM;
+}
+
+static bool is_el0_instruction_abort(unsigned int esr)
+{
+	return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW;
+}
+
 static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
 				   struct pt_regs *regs)
 {
@@ -253,6 +305,9 @@
 	unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
 	unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
+	if (notify_page_fault(regs, esr))
+		return 0;
+
 	tsk = current;
 	mm  = tsk->mm;
 
@@ -270,19 +325,21 @@
 	if (user_mode(regs))
 		mm_flags |= FAULT_FLAG_USER;
 
-	if (esr & ESR_LNX_EXEC) {
+	if (is_el0_instruction_abort(esr)) {
 		vm_flags = VM_EXEC;
-	} else if ((esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM)) {
+	} else if (((esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM)) ||
+			((esr & ESR_ELx_CM) && !(mm_flags & FAULT_FLAG_USER))) {
 		vm_flags = VM_WRITE;
 		mm_flags |= FAULT_FLAG_WRITE;
 	}
 
-	/*
-	 * PAN bit set implies the fault happened in kernel space, but not
-	 * in the arch's user access functions.
-	 */
-	if (IS_ENABLED(CONFIG_ARM64_PAN) && (regs->pstate & PSR_PAN_BIT))
-		goto no_context;
+	if (addr < USER_DS && is_permission_fault(esr, regs)) {
+		if (is_el1_instruction_abort(esr))
+			die("Attempting to execute userspace memory", regs, esr);
+
+		if (!search_exception_tables(regs->pc))
+			die("Accessing user space memory outside uaccess.h routines", regs, esr);
+	}
 
 	/*
 	 * As per x86, we may deadlock here. However, since the kernel only
@@ -399,6 +456,19 @@
 }
 
 /*
+ * TLB conflict is already handled in EL2. This rourtine should return zero
+ * so that, do_mem_abort would not crash kernel thinking TLB conflict not
+ * handled.
+*/
+#ifdef CONFIG_QCOM_TLB_EL2_HANDLER
+static int do_tlb_conf_fault(unsigned long addr,
+				unsigned int esr,
+				struct pt_regs *regs)
+{
+	return 0;
+}
+#endif
+/*
  * First Level Translation Fault Handler
  *
  * We enter here because the first level page table doesn't contain a valid
@@ -431,10 +501,11 @@
  */
 static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
 {
+	arm64_check_cache_ecc(NULL);
 	return 1;
 }
 
-static struct fault_info {
+static const struct fault_info {
 	int	(*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs);
 	int	sig;
 	int	code;
@@ -488,7 +559,11 @@
 	{ do_bad,		SIGBUS,  0,		"unknown 45"			},
 	{ do_bad,		SIGBUS,  0,		"unknown 46"			},
 	{ do_bad,		SIGBUS,  0,		"unknown 47"			},
+#ifdef CONFIG_QCOM_TLB_EL2_HANDLER
+	{ do_tlb_conf_fault,	SIGBUS,  0,		"TLB conflict abort"		},
+#else
 	{ do_bad,		SIGBUS,  0,		"TLB conflict abort"		},
+#endif
 	{ do_bad,		SIGBUS,  0,		"unknown 49"			},
 	{ do_bad,		SIGBUS,  0,		"unknown 50"			},
 	{ do_bad,		SIGBUS,  0,		"unknown 51"			},
@@ -534,6 +609,22 @@
 	arm64_notify_die("", regs, &info, esr);
 }
 
+asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
+						   unsigned int esr,
+						   struct pt_regs *regs)
+{
+	/*
+	 * We've taken an instruction abort from userspace and not yet
+	 * re-enabled IRQs. If the address is a kernel address, apply
+	 * BP hardening prior to enabling IRQs and pre-emption.
+	 */
+	if (addr > TASK_SIZE)
+		arm64_apply_bp_hardening();
+
+	local_irq_enable();
+	do_mem_abort(addr, esr, regs);
+}
+
 /*
  * Handle stack alignment exceptions.
  */
@@ -609,6 +700,7 @@
 
 	return 0;
 }
+NOKPROBE_SYMBOL(do_debug_exception);
 
 #ifdef CONFIG_ARM64_PAN
 int cpu_enable_pan(void *__unused)
@@ -624,3 +716,17 @@
 	return 0;
 }
 #endif /* CONFIG_ARM64_PAN */
+
+#ifdef CONFIG_ARM64_UAO
+/*
+ * Kernel threads have fs=KERNEL_DS by default, and don't need to call
+ * set_fs(), devtmpfs in particular relies on this behaviour.
+ * We need to enable the feature at runtime (instead of adding it to
+ * PSR_MODE_EL1h) as the feature may not be implemented by the cpu.
+ */
+int cpu_enable_uao(void *__unused)
+{
+	asm(SET_PSTATE_UAO(1));
+	return 0;
+}
+#endif /* CONFIG_ARM64_UAO */
diff -ruw linux-4.4.115/arch/arm64/mm/flush.c linux-4.4.115-fbx/arch/arm64/mm/flush.c
--- linux-4.4.115/arch/arm64/mm/flush.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/mm/flush.c	2019-10-29 09:26:23.013197013 +0100
@@ -34,19 +34,24 @@
 		__flush_icache_all();
 }
 
-static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
-				unsigned long uaddr, void *kaddr,
-				unsigned long len)
+static void sync_icache_aliases(void *kaddr, unsigned long len)
 {
-	if (vma->vm_flags & VM_EXEC) {
 		unsigned long addr = (unsigned long)kaddr;
+
 		if (icache_is_aliasing()) {
-			__flush_dcache_area(kaddr, len);
+		__clean_dcache_area_pou(kaddr, len);
 			__flush_icache_all();
 		} else {
 			flush_icache_range(addr, addr + len);
 		}
 	}
+
+static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
+				unsigned long uaddr, void *kaddr,
+				unsigned long len)
+{
+	if (vma->vm_flags & VM_EXEC)
+		sync_icache_aliases(kaddr, len);
 }
 
 /*
@@ -74,13 +79,11 @@
 	if (!page_mapping(page))
 		return;
 
-	if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
-		__flush_dcache_area(page_address(page),
+	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
+		sync_icache_aliases(page_address(page),
 				PAGE_SIZE << compound_order(page));
+	else if (icache_is_aivivt())
 		__flush_icache_all();
-	} else if (icache_is_aivivt()) {
-		__flush_icache_all();
-	}
 }
 
 /*
@@ -98,6 +101,7 @@
 /*
  * Additional functions defined in assembly.
  */
+EXPORT_SYMBOL(flush_cache_all);
 EXPORT_SYMBOL(flush_icache_range);
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff -ruw linux-4.4.115/arch/arm64/mm/init.c linux-4.4.115-fbx/arch/arm64/mm/init.c
--- linux-4.4.115/arch/arm64/mm/init.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/mm/init.c	2019-10-29 09:26:23.013197013 +0100
@@ -34,8 +34,12 @@
 #include <linux/dma-contiguous.h>
 #include <linux/efi.h>
 #include <linux/swiotlb.h>
+#include <linux/mm.h>
 
+#include <asm/boot.h>
 #include <asm/fixmap.h>
+#include <asm/kasan.h>
+#include <asm/kernel-pgtable.h>
 #include <asm/memory.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
@@ -45,7 +49,13 @@
 
 #include "mm.h"
 
-phys_addr_t memstart_addr __read_mostly = 0;
+/*
+ * We need to be able to catch inadvertent references to memstart_addr
+ * that occur (potentially in generic code) before arm64_memblock_init()
+ * executes, which assigns it its actual value. So use a default value
+ * that cannot be mistaken for a real physical address.
+ */
+s64 memstart_addr __read_mostly = -1;
 phys_addr_t arm64_dma_phys_limit __read_mostly;
 
 #ifdef CONFIG_BLK_DEV_INITRD
@@ -58,8 +68,8 @@
 	if (*endp == ',') {
 		size = memparse(endp + 1, NULL);
 
-		initrd_start = (unsigned long)__va(start);
-		initrd_end = (unsigned long)__va(start + size);
+		initrd_start = start;
+		initrd_end = start + size;
 	}
 	return 0;
 }
@@ -71,7 +81,7 @@
  * currently assumes that for memory starting above 4G, 32-bit devices will
  * use a DMA offset.
  */
-static phys_addr_t max_zone_dma_phys(void)
+static phys_addr_t __init max_zone_dma_phys(void)
 {
 	phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
 	return min(offset + (1ULL << 32), memblock_end_of_DRAM());
@@ -118,19 +128,21 @@
 }
 
 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
+#define PFN_MASK ((1UL << (64 - PAGE_SHIFT)) - 1)
+
 int pfn_valid(unsigned long pfn)
 {
-	return memblock_is_memory(pfn << PAGE_SHIFT);
+	return (pfn & PFN_MASK) == pfn && memblock_is_map_memory(pfn << PAGE_SHIFT);
 }
 EXPORT_SYMBOL(pfn_valid);
 #endif
 
 #ifndef CONFIG_SPARSEMEM
-static void arm64_memory_present(void)
+static void __init arm64_memory_present(void)
 {
 }
 #else
-static void arm64_memory_present(void)
+static void __init arm64_memory_present(void)
 {
 	struct memblock_region *reg;
 
@@ -141,6 +153,7 @@
 #endif
 
 static phys_addr_t memory_limit = (phys_addr_t)ULLONG_MAX;
+static phys_addr_t bootloader_memory_limit;
 
 /*
  * Limit the memory size that was specified via FDT.
@@ -159,16 +172,80 @@
 
 void __init arm64_memblock_init(void)
 {
+	const s64 linear_region_size = -(s64)PAGE_OFFSET;
+
+	/*
+	 * Ensure that the linear region takes up exactly half of the kernel
+	 * virtual address space. This way, we can distinguish a linear address
+	 * from a kernel/module/vmalloc address by testing a single bit.
+	 */
+	BUILD_BUG_ON(linear_region_size != BIT(VA_BITS - 1));
+
+	/*
+	 * Select a suitable value for the base of physical memory.
+	 */
+	memstart_addr = round_down(memblock_start_of_DRAM(),
+				   ARM64_MEMSTART_ALIGN);
+
+	/*
+	 * Remove the memory that we will not be able to cover with the
+	 * linear mapping. Take care not to clip the kernel which may be
+	 * high in memory.
+	 */
+	memblock_remove(max_t(u64, memstart_addr + linear_region_size,
+			__pa_symbol(_end)), ULLONG_MAX);
+	if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
+		/* ensure that memstart_addr remains sufficiently aligned */
+		memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
+					 ARM64_MEMSTART_ALIGN);
+		memblock_remove(0, memstart_addr);
+	}
+
+	/*
+	 * Apply the memory limit if it was set. Since the kernel may be loaded
+	 * high up in memory, add back the kernel region that must be accessible
+	 * via the linear mapping.
+	 */
+	if (memory_limit != (phys_addr_t)ULLONG_MAX) {
+		/*
+		 * Save bootloader imposed memory limit before we overwirte
+		 * memblock.
+		 */
+		bootloader_memory_limit = memblock_end_of_DRAM();
 	memblock_enforce_memory_limit(memory_limit);
+		memblock_add(__pa_symbol(_text), (u64)(_end - _text));
+	}
+
+	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+		extern u16 memstart_offset_seed;
+		u64 range = linear_region_size -
+			    (memblock_end_of_DRAM() - memblock_start_of_DRAM());
+
+		/*
+		 * If the size of the linear region exceeds, by a sufficient
+		 * margin, the size of the region that the available physical
+		 * memory spans, randomize the linear region as well.
+		 */
+		if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
+			range = range / ARM64_MEMSTART_ALIGN + 1;
+			memstart_addr -= ARM64_MEMSTART_ALIGN *
+					 ((range * memstart_offset_seed) >> 16);
+		}
+	}
 
 	/*
 	 * Register the kernel text, kernel data, initrd, and initial
 	 * pagetables with memblock.
 	 */
-	memblock_reserve(__pa(_text), _end - _text);
+	memblock_reserve(__pa_symbol(_text), _end - _text);
 #ifdef CONFIG_BLK_DEV_INITRD
-	if (initrd_start)
-		memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start);
+	if (initrd_start) {
+		memblock_reserve(initrd_start, initrd_end - initrd_start);
+
+		/* the generic initrd code expects virtual addresses */
+		initrd_start = __phys_to_virt(initrd_start);
+		initrd_end = __phys_to_virt(initrd_end);
+	}
 #endif
 
 	early_init_fdt_scan_reserved_mem();
@@ -302,35 +379,38 @@
 #ifdef CONFIG_KASAN
 		  "    kasan   : 0x%16lx - 0x%16lx   (%6ld GB)\n"
 #endif
+		  "    modules : 0x%16lx - 0x%16lx   (%6ld MB)\n"
 		  "    vmalloc : 0x%16lx - 0x%16lx   (%6ld GB)\n"
+		  "      .init : 0x%p" " - 0x%p" "   (%6ld KB)\n"
+		  "      .text : 0x%p" " - 0x%p" "   (%6ld KB)\n"
+		  "    .rodata : 0x%p" " - 0x%p" "   (%6ld KB)\n"
+		  "      .data : 0x%p" " - 0x%p" "   (%6ld KB)\n"
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
 		  "    vmemmap : 0x%16lx - 0x%16lx   (%6ld GB maximum)\n"
 		  "              0x%16lx - 0x%16lx   (%6ld MB actual)\n"
 #endif
 		  "    fixed   : 0x%16lx - 0x%16lx   (%6ld KB)\n"
 		  "    PCI I/O : 0x%16lx - 0x%16lx   (%6ld MB)\n"
-		  "    modules : 0x%16lx - 0x%16lx   (%6ld MB)\n"
-		  "    memory  : 0x%16lx - 0x%16lx   (%6ld MB)\n"
-		  "      .init : 0x%p" " - 0x%p" "   (%6ld KB)\n"
-		  "      .text : 0x%p" " - 0x%p" "   (%6ld KB)\n"
-		  "      .data : 0x%p" " - 0x%p" "   (%6ld KB)\n",
+		  "    memory  : 0x%16lx - 0x%16lx   (%6ld MB)\n",
 #ifdef CONFIG_KASAN
 		  MLG(KASAN_SHADOW_START, KASAN_SHADOW_END),
 #endif
+		  MLM(MODULES_VADDR, MODULES_END),
 		  MLG(VMALLOC_START, VMALLOC_END),
+		  MLK_ROUNDUP(__init_begin, __init_end),
+		  MLK_ROUNDUP(_text, _etext),
+		  MLK_ROUNDUP(__start_rodata, __init_begin),
+		  MLK_ROUNDUP(_sdata, _edata),
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
 		  MLG(VMEMMAP_START,
 		      VMEMMAP_START + VMEMMAP_SIZE),
-		  MLM((unsigned long)virt_to_page(PAGE_OFFSET),
+		  MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()),
 		      (unsigned long)virt_to_page(high_memory)),
 #endif
 		  MLK(FIXADDR_START, FIXADDR_TOP),
 		  MLM(PCI_IO_START, PCI_IO_END),
-		  MLM(MODULES_VADDR, MODULES_END),
-		  MLM(PAGE_OFFSET, (unsigned long)high_memory),
-		  MLK_ROUNDUP(__init_begin, __init_end),
-		  MLK_ROUNDUP(_text, _etext),
-		  MLK_ROUNDUP(_sdata, _edata));
+		  MLM(__phys_to_virt(memblock_start_of_DRAM()),
+		      (unsigned long)high_memory));
 
 #undef MLK
 #undef MLM
@@ -356,11 +436,15 @@
 	}
 }
 
+static inline void poison_init_mem(void *s, size_t count)
+{
+	memset(s, 0, count);
+}
+
 void free_initmem(void)
 {
-	fixup_init();
 	free_initmem_default(0);
-	free_alternatives_memory();
+	fixup_init();
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
@@ -381,3 +465,174 @@
 
 __setup("keepinitrd", keepinitrd_setup);
 #endif
+
+#ifdef CONFIG_KERNEL_TEXT_RDONLY
+void set_kernel_text_ro(void)
+{
+	unsigned long start = PFN_ALIGN(_stext);
+	unsigned long end = PFN_ALIGN(_etext);
+
+	/*
+	 * Set the kernel identity mapping for text RO.
+	 */
+	set_memory_ro(start, (end - start) >> PAGE_SHIFT);
+}
+#endif
+/*
+ * Dump out memory limit information on panic.
+ */
+static int dump_mem_limit(struct notifier_block *self, unsigned long v, void *p)
+{
+	if (memory_limit != (phys_addr_t)ULLONG_MAX) {
+		pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
+	} else {
+		pr_emerg("Memory Limit: none\n");
+	}
+	return 0;
+}
+
+static struct notifier_block mem_limit_notifier = {
+	.notifier_call = dump_mem_limit,
+};
+
+static int __init register_mem_limit_dumper(void)
+{
+	atomic_notifier_chain_register(&panic_notifier_list,
+				       &mem_limit_notifier);
+	return 0;
+}
+__initcall(register_mem_limit_dumper);
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
+{
+	pg_data_t *pgdat;
+	struct zone *zone;
+	unsigned long start_pfn = start >> PAGE_SHIFT;
+	unsigned long nr_pages = size >> PAGE_SHIFT;
+	unsigned long end_pfn = start_pfn + nr_pages;
+	unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
+	int ret;
+
+	if (end_pfn > max_sparsemem_pfn) {
+		pr_err("end_pfn too big");
+		return -1;
+	}
+	hotplug_paging(start, size);
+
+	/*
+	 * Mark the first page in the range as unusable. This is needed
+	 * because __add_section (within __add_pages) wants pfn_valid
+	 * of it to be false, and in arm64 pfn falid is implemented by
+	 * just checking at the nomap flag for existing blocks.
+	 *
+	 * A small trick here is that __add_section() requires only
+	 * phys_start_pfn (that is the first pfn of a section) to be
+	 * invalid. Regardless of whether it was assumed (by the function
+	 * author) that all pfns within a section are either all valid
+	 * or all invalid, it allows to avoid looping twice (once here,
+	 * second when memblock_clear_nomap() is called) through all
+	 * pfns of the section and modify only one pfn. Thanks to that,
+	 * further, in __add_zone() only this very first pfn is skipped
+	 * and corresponding page is not flagged reserved. Therefore it
+	 * is enough to correct this setup only for it.
+	 *
+	 * When arch_add_memory() returns the walk_memory_range() function
+	 * is called and passed with online_memory_block() callback,
+	 * which execution finally reaches the memory_block_action()
+	 * function, where also only the first pfn of a memory block is
+	 * checked to be reserved. Above, it was first pfn of a section,
+	 * here it is a block but
+	 * (drivers/base/memory.c):
+	 *     sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
+	 * (include/linux/memory.h):
+	 *     #define MIN_MEMORY_BLOCK_SIZE     (1UL << SECTION_SIZE_BITS)
+	 * so we can consider block and section equivalently
+	 */
+	memblock_mark_nomap(start, 1<<PAGE_SHIFT);
+
+	pgdat = NODE_DATA(nid);
+
+	zone = pgdat->node_zones +
+		zone_for_memory(nid, start, size, ZONE_NORMAL, for_device);
+	ret = __add_pages(nid, zone, start_pfn, nr_pages);
+
+	/*
+	 * Make the pages usable after they have been added.
+	 * This will make pfn_valid return true
+	 */
+	memblock_clear_nomap(start, 1<<PAGE_SHIFT);
+
+	/*
+	 * This is a hack to avoid having to mix arch specific code
+	 * into arch independent code. SetPageReserved is supposed
+	 * to be called by __add_zone (within __add_section, within
+	 * __add_pages). However, when it is called there, it assumes that
+	 * pfn_valid returns true.  For the way pfn_valid is implemented
+	 * in arm64 (a check on the nomap flag), the only way to make
+	 * this evaluate true inside __add_zone is to clear the nomap
+	 * flags of blocks in architecture independent code.
+	 *
+	 * To avoid this, we set the Reserved flag here after we cleared
+	 * the nomap flag in the line above.
+	 */
+	SetPageReserved(pfn_to_page(start_pfn));
+
+	if (ret)
+		pr_warn("%s: Problem encountered in __add_pages() ret=%d\n",
+			__func__, ret);
+
+	return ret;
+}
+
+#ifdef CONFIG_MEMORY_HOTREMOVE
+static void kernel_physical_mapping_remove(unsigned long start,
+	unsigned long end)
+{
+	start = (unsigned long)__va(start);
+	end = (unsigned long)__va(end);
+
+	remove_pagetable(start, end, true);
+
+}
+
+int arch_remove_memory(u64 start, u64 size)
+{
+	unsigned long start_pfn = start >> PAGE_SHIFT;
+	unsigned long nr_pages = size >> PAGE_SHIFT;
+	struct page *page = pfn_to_page(start_pfn);
+	struct zone *zone;
+	int ret = 0;
+
+	zone = page_zone(page);
+	ret = __remove_pages(zone, start_pfn, nr_pages);
+	WARN_ON_ONCE(ret);
+
+	kernel_physical_mapping_remove(start, start + size);
+
+	return ret;
+}
+
+#endif /* CONFIG_MEMORY_HOTREMOVE */
+static int arm64_online_page(struct page *page)
+{
+	unsigned long target_pfn = page_to_pfn(page);
+	unsigned long limit = __phys_to_pfn(bootloader_memory_limit);
+
+	if (target_pfn >= limit)
+		return -EINVAL;
+
+	__online_page_set_limits(page);
+	__online_page_increment_counters(page);
+	__online_page_free(page);
+
+	return 0;
+}
+
+static int __init arm64_memory_hotplug_init(void)
+{
+	set_online_page_callback(&arm64_online_page);
+	return 0;
+}
+subsys_initcall(arm64_memory_hotplug_init);
+#endif /* CONFIG_MEMORY_HOTPLUG */
diff -ruw linux-4.4.115/arch/arm64/mm/mmap.c linux-4.4.115-fbx/arch/arm64/mm/mmap.c
--- linux-4.4.115/arch/arm64/mm/mmap.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/mm/mmap.c	2019-01-22 16:16:21.563228839 +0100
@@ -51,8 +51,12 @@
 {
 	unsigned long rnd;
 
-	rnd = (unsigned long)get_random_int() & STACK_RND_MASK;
-
+#ifdef CONFIG_COMPAT
+	if (test_thread_flag(TIF_32BIT))
+		rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
+	else
+#endif
+		rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
 	return rnd << PAGE_SHIFT;
 }
 
diff -ruw linux-4.4.115/arch/arm64/mm/mmu.c linux-4.4.115-fbx/arch/arm64/mm/mmu.c
--- linux-4.4.115/arch/arm64/mm/mmu.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/mm/mmu.c	2019-10-29 09:26:23.013197013 +0100
@@ -29,9 +29,14 @@
 #include <linux/io.h>
 #include <linux/slab.h>
 #include <linux/stop_machine.h>
+#include <linux/dma-contiguous.h>
+#include <linux/cma.h>
+#include <linux/mm.h>
 
+#include <asm/barrier.h>
 #include <asm/cputype.h>
 #include <asm/fixmap.h>
+#include <asm/kasan.h>
 #include <asm/kernel-pgtable.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
@@ -44,13 +49,22 @@
 
 u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
 
+u64 kimage_voffset __read_mostly;
+EXPORT_SYMBOL(kimage_voffset);
+
 /*
  * Empty_zero_page is a special page that is used for zero-initialized data
  * and COW.
  */
-struct page *empty_zero_page;
+unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
 EXPORT_SYMBOL(empty_zero_page);
 
+static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
+static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
+static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
+
+static bool dma_overlap(phys_addr_t start, phys_addr_t end);
+
 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 			      unsigned long size, pgprot_t vma_prot)
 {
@@ -62,16 +76,30 @@
 }
 EXPORT_SYMBOL(phys_mem_access_prot);
 
-static void __init *early_alloc(unsigned long sz)
+static phys_addr_t __init early_pgtable_alloc(void)
 {
 	phys_addr_t phys;
 	void *ptr;
 
-	phys = memblock_alloc(sz, sz);
+	phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 	BUG_ON(!phys);
-	ptr = __va(phys);
-	memset(ptr, 0, sz);
-	return ptr;
+
+	/*
+	 * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
+	 * slot will be free, so we can (ab)use the FIX_PTE slot to initialise
+	 * any level of table.
+	 */
+	ptr = pte_set_fixmap(phys);
+
+	memset(ptr, 0, PAGE_SIZE);
+
+	/*
+	 * Implicit barriers also ensure the zeroed page is visible to the page
+	 * table walker
+	 */
+	pte_clear_fixmap();
+
+	return phys;
 }
 
 /*
@@ -95,24 +123,30 @@
 static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
 				  unsigned long end, unsigned long pfn,
 				  pgprot_t prot,
-				  void *(*alloc)(unsigned long size))
+				  phys_addr_t (*pgtable_alloc)(void))
 {
 	pte_t *pte;
 
 	if (pmd_none(*pmd) || pmd_sect(*pmd)) {
-		pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
+		phys_addr_t pte_phys;
+		BUG_ON(!pgtable_alloc);
+		pte_phys = pgtable_alloc();
+		pte = pte_set_fixmap(pte_phys);
 		if (pmd_sect(*pmd))
 			split_pmd(pmd, pte);
-		__pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
+		__pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
 		flush_tlb_all();
+		pte_clear_fixmap();
 	}
 	BUG_ON(pmd_bad(*pmd));
 
-	pte = pte_offset_kernel(pmd, addr);
+	pte = pte_set_fixmap_offset(pmd, addr);
 	do {
 		set_pte(pte, pfn_pte(pfn, prot));
 		pfn++;
 	} while (pte++, addr += PAGE_SIZE, addr != end);
+
+	pte_clear_fixmap();
 }
 
 static void split_pud(pud_t *old_pud, pmd_t *pmd)
@@ -127,10 +161,29 @@
 	} while (pmd++, i++, i < PTRS_PER_PMD);
 }
 
-static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
-				  unsigned long addr, unsigned long end,
+#ifdef CONFIG_DEBUG_PAGEALLOC
+static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
+{
+
+	/*
+	 * If debug_page_alloc is enabled we must map the linear map
+	 * using pages. However, other mappings created by
+	 * create_mapping_noalloc must use sections in some cases. Allow
+	 * sections to be used in those cases, where no pgtable_alloc
+	 * function is provided.
+	 */
+	return !pgtable_alloc || !debug_pagealloc_enabled();
+}
+#else
+static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
+{
+	return true;
+}
+#endif
+
+static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
 				  phys_addr_t phys, pgprot_t prot,
-				  void *(*alloc)(unsigned long size))
+				  phys_addr_t (*pgtable_alloc)(void))
 {
 	pmd_t *pmd;
 	unsigned long next;
@@ -139,7 +192,10 @@
 	 * Check for initial section mappings in the pgd/pud and remove them.
 	 */
 	if (pud_none(*pud) || pud_sect(*pud)) {
-		pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
+		phys_addr_t pmd_phys;
+		BUG_ON(!pgtable_alloc);
+		pmd_phys = pgtable_alloc();
+		pmd = pmd_set_fixmap(pmd_phys);
 		if (pud_sect(*pud)) {
 			/*
 			 * need to have the 1G of mappings continue to be
@@ -147,19 +203,21 @@
 			 */
 			split_pud(pud, pmd);
 		}
-		pud_populate(mm, pud, pmd);
+		__pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
 		flush_tlb_all();
+		pmd_clear_fixmap();
 	}
 	BUG_ON(pud_bad(*pud));
 
-	pmd = pmd_offset(pud, addr);
+	pmd = pmd_set_fixmap_offset(pud, addr);
 	do {
 		next = pmd_addr_end(addr, end);
 		/* try section mapping first */
-		if (((addr | next | phys) & ~SECTION_MASK) == 0) {
+		if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
+		      block_mappings_allowed(pgtable_alloc) &&
+		      !dma_overlap(phys, phys + next - addr)) {
 			pmd_t old_pmd =*pmd;
-			set_pmd(pmd, __pmd(phys |
-					   pgprot_val(mk_sect_prot(prot))));
+			pmd_set_huge(pmd, phys, prot);
 			/*
 			 * Check for previous table entries created during
 			 * boot (__create_page_tables) and flush them.
@@ -167,17 +225,19 @@
 			if (!pmd_none(old_pmd)) {
 				flush_tlb_all();
 				if (pmd_table(old_pmd)) {
-					phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
+					phys_addr_t table = pmd_page_paddr(old_pmd);
 					if (!WARN_ON_ONCE(slab_is_available()))
 						memblock_free(table, PAGE_SIZE);
 				}
 			}
 		} else {
 			alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
-				       prot, alloc);
+				       prot, pgtable_alloc);
 		}
 		phys += next - addr;
 	} while (pmd++, addr = next, addr != end);
+
+	pmd_clear_fixmap();
 }
 
 static inline bool use_1G_block(unsigned long addr, unsigned long next,
@@ -192,31 +252,33 @@
 	return true;
 }
 
-static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
-				  unsigned long addr, unsigned long end,
+static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
 				  phys_addr_t phys, pgprot_t prot,
-				  void *(*alloc)(unsigned long size))
+				  phys_addr_t (*pgtable_alloc)(void))
 {
 	pud_t *pud;
 	unsigned long next;
 
 	if (pgd_none(*pgd)) {
-		pud = alloc(PTRS_PER_PUD * sizeof(pud_t));
-		pgd_populate(mm, pgd, pud);
+		phys_addr_t pud_phys;
+		BUG_ON(!pgtable_alloc);
+		pud_phys = pgtable_alloc();
+		__pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE);
 	}
 	BUG_ON(pgd_bad(*pgd));
 
-	pud = pud_offset(pgd, addr);
+	pud = pud_set_fixmap_offset(pgd, addr);
 	do {
 		next = pud_addr_end(addr, end);
 
 		/*
 		 * For 4K granule only, attempt to put down a 1GB block
 		 */
-		if (use_1G_block(addr, next, phys)) {
+		if (use_1G_block(addr, next, phys) &&
+		    block_mappings_allowed(pgtable_alloc) &&
+		    !dma_overlap(phys, phys + next - addr)) {
 			pud_t old_pud = *pud;
-			set_pud(pud, __pud(phys |
-					   pgprot_val(mk_sect_prot(prot))));
+			pud_set_huge(pud, phys, prot);
 
 			/*
 			 * If we have an old value for a pud, it will
@@ -228,51 +290,74 @@
 			if (!pud_none(old_pud)) {
 				flush_tlb_all();
 				if (pud_table(old_pud)) {
-					phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
+					phys_addr_t table = pud_page_paddr(old_pud);
 					if (!WARN_ON_ONCE(slab_is_available()))
 						memblock_free(table, PAGE_SIZE);
 				}
 			}
 		} else {
-			alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc);
+			alloc_init_pmd(pud, addr, next, phys, prot,
+				       pgtable_alloc);
 		}
 		phys += next - addr;
 	} while (pud++, addr = next, addr != end);
+
+	pud_clear_fixmap();
 }
 
 /*
  * Create the page directory entries and any necessary page tables for the
  * mapping specified by 'md'.
  */
-static void  __create_mapping(struct mm_struct *mm, pgd_t *pgd,
-				    phys_addr_t phys, unsigned long virt,
+static void init_pgd(pgd_t *pgd, phys_addr_t phys, unsigned long virt,
 				    phys_addr_t size, pgprot_t prot,
-				    void *(*alloc)(unsigned long size))
+				    phys_addr_t (*pgtable_alloc)(void))
 {
 	unsigned long addr, length, end, next;
 
+	/*
+	 * If the virtual and physical address don't have the same offset
+	 * within a page, we cannot map the region as the caller expects.
+	 */
+	if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
+		return;
+
+	phys &= PAGE_MASK;
 	addr = virt & PAGE_MASK;
 	length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
 
 	end = addr + length;
 	do {
 		next = pgd_addr_end(addr, end);
-		alloc_init_pud(mm, pgd, addr, next, phys, prot, alloc);
+		alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc);
 		phys += next - addr;
 	} while (pgd++, addr = next, addr != end);
 }
 
-static void *late_alloc(unsigned long size)
+static phys_addr_t late_pgtable_alloc(void)
 {
-	void *ptr;
-
-	BUG_ON(size > PAGE_SIZE);
-	ptr = (void *)__get_free_page(PGALLOC_GFP);
+	void *ptr = (void *)__get_free_page(PGALLOC_GFP);
 	BUG_ON(!ptr);
-	return ptr;
+
+	/* Ensure the zeroed page is visible to the page table walker */
+	dsb(ishst);
+	return __pa(ptr);
 }
 
-static void __init create_mapping(phys_addr_t phys, unsigned long virt,
+static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
+				 unsigned long virt, phys_addr_t size,
+				 pgprot_t prot,
+				 phys_addr_t (*alloc)(void))
+{
+	init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc);
+}
+
+/*
+ * This function can only be used to modify existing table entries,
+ * without allocating new levels of table. Note that this permits the
+ * creation of new section or page entries.
+ */
+static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
 				  phys_addr_t size, pgprot_t prot)
 {
 	if (virt < VMALLOC_START) {
@@ -280,16 +365,16 @@
 			&phys, virt);
 		return;
 	}
-	__create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
-			 size, prot, early_alloc);
+	__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
+			     NULL);
 }
 
 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
 			       unsigned long virt, phys_addr_t size,
 			       pgprot_t prot)
 {
-	__create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
-				late_alloc);
+	__create_pgd_mapping(mm->pgd, phys, virt, size, prot,
+			     late_pgtable_alloc);
 }
 
 static void create_mapping_late(phys_addr_t phys, unsigned long virt,
@@ -301,69 +386,57 @@
 		return;
 	}
 
-	return __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK),
-				phys, virt, size, prot, late_alloc);
+	__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
+			     late_pgtable_alloc);
 }
 
-#ifdef CONFIG_DEBUG_RODATA
-static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
+static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
 {
+	unsigned long kernel_start = __pa_symbol(_text);
+	unsigned long kernel_end = __pa_symbol(__init_begin);
+
 	/*
-	 * Set up the executable regions using the existing section mappings
-	 * for now. This will get more fine grained later once all memory
-	 * is mapped
+	 * Take care not to create a writable alias for the
+	 * read-only text and rodata sections of the kernel image.
 	 */
-	unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
-	unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
 
-	if (end < kernel_x_start) {
-		create_mapping(start, __phys_to_virt(start),
-			end - start, PAGE_KERNEL);
-	} else if (start >= kernel_x_end) {
-		create_mapping(start, __phys_to_virt(start),
-			end - start, PAGE_KERNEL);
-	} else {
-		if (start < kernel_x_start)
-			create_mapping(start, __phys_to_virt(start),
-				kernel_x_start - start,
-				PAGE_KERNEL);
-		create_mapping(kernel_x_start,
-				__phys_to_virt(kernel_x_start),
-				kernel_x_end - kernel_x_start,
-				PAGE_KERNEL_EXEC);
-		if (kernel_x_end < end)
-			create_mapping(kernel_x_end,
-				__phys_to_virt(kernel_x_end),
-				end - kernel_x_end,
-				PAGE_KERNEL);
+	/* No overlap with the kernel text/rodata */
+	if (end < kernel_start || start >= kernel_end) {
+		__create_pgd_mapping(pgd, start, __phys_to_virt(start),
+				     end - start, PAGE_KERNEL,
+				     early_pgtable_alloc);
+		return;
 	}
 
+	/*
+	 * This block overlaps the kernel text/rodata mappings.
+	 * Map the portion(s) which don't overlap.
+	 */
+	if (start < kernel_start)
+		__create_pgd_mapping(pgd, start,
+				     __phys_to_virt(start),
+				     kernel_start - start, PAGE_KERNEL,
+				     early_pgtable_alloc);
+	if (kernel_end < end)
+		__create_pgd_mapping(pgd, kernel_end,
+				     __phys_to_virt(kernel_end),
+				     end - kernel_end, PAGE_KERNEL,
+				     early_pgtable_alloc);
+
+	/*
+	 * Map the linear alias of the [_text, __init_begin) interval as
+	 * read-only/non-executable. This makes the contents of the
+	 * region accessible to subsystems such as hibernate, but
+	 * protects it from inadvertent modification or execution.
+	 */
+	__create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start),
+			     kernel_end - kernel_start, PAGE_KERNEL_RO,
+			     early_pgtable_alloc);
 }
-#else
-static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
-{
-	create_mapping(start, __phys_to_virt(start), end - start,
-			PAGE_KERNEL_EXEC);
-}
-#endif
 
-static void __init map_mem(void)
+static void __init map_mem(pgd_t *pgd)
 {
 	struct memblock_region *reg;
-	phys_addr_t limit;
-
-	/*
-	 * Temporarily limit the memblock range. We need to do this as
-	 * create_mapping requires puds, pmds and ptes to be allocated from
-	 * memory addressable from the initial direct kernel mapping.
-	 *
-	 * The initial direct kernel mapping, located at swapper_pg_dir, gives
-	 * us PUD_SIZE (with SECTION maps) or PMD_SIZE (without SECTION maps,
-	 * memory starting from PHYS_OFFSET (which must be aligned to 2MB as
-	 * per Documentation/arm64/booting.txt).
-	 */
-	limit = PHYS_OFFSET + SWAPPER_INIT_MAP_SIZE;
-	memblock_set_current_limit(limit);
 
 	/* map all the memory banks */
 	for_each_memblock(memory, reg) {
@@ -372,70 +445,160 @@
 
 		if (start >= end)
 			break;
+		if (memblock_is_nomap(reg))
+			continue;
+
+		__map_memblock(pgd, start, end);
+	}
+}
+
+void mark_rodata_ro(void)
+{
+	unsigned long section_size;
 
-		if (ARM64_SWAPPER_USES_SECTION_MAPS) {
+	section_size = (unsigned long)_etext - (unsigned long)_text;
+	create_mapping_late(__pa_symbol(_text), (unsigned long)_text,
+			    section_size, PAGE_KERNEL_ROX);
 			/*
-			 * For the first memory bank align the start address and
-			 * current memblock limit to prevent create_mapping() from
-			 * allocating pte page tables from unmapped memory. With
-			 * the section maps, if the first block doesn't end on section
-			 * size boundary, create_mapping() will try to allocate a pte
-			 * page, which may be returned from an unmapped area.
-			 * When section maps are not used, the pte page table for the
-			 * current limit is already present in swapper_pg_dir.
+	 * mark .rodata as read only. Use __init_begin rather than __end_rodata
+	 * to cover NOTES and EXCEPTION_TABLE.
 			 */
-			if (start < limit)
-				start = ALIGN(start, SECTION_SIZE);
-			if (end < limit) {
-				limit = end & SECTION_MASK;
-				memblock_set_current_limit(limit);
+	section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
+	create_mapping_late(__pa_symbol(__start_rodata),
+			    (unsigned long)__start_rodata,
+			    section_size, PAGE_KERNEL_RO);
 			}
+
+void fixup_init(void)
+{
+	/*
+	 * Unmap the __init region but leave the VM area in place. This
+	 * prevents the region from being reused for kernel modules, which
+	 * is not supported by kallsyms.
+	 */
+	unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
 		}
-		__map_memblock(start, end);
-	}
 
-	/* Limit no longer required. */
-	memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
+static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
+				      pgprot_t prot, struct vm_struct *vma)
+{
+	phys_addr_t pa_start = __pa_symbol(va_start);
+	unsigned long size = va_end - va_start;
+
+	BUG_ON(!PAGE_ALIGNED(pa_start));
+	BUG_ON(!PAGE_ALIGNED(size));
+
+	__create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
+			     early_pgtable_alloc);
+
+	vma->addr	= va_start;
+	vma->phys_addr	= pa_start;
+	vma->size	= size;
+	vma->flags	= VM_MAP;
+	vma->caller	= __builtin_return_address(0);
+
+	vm_area_add_early(vma);
 }
 
-static void __init fixup_executable(void)
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+static int __init map_entry_trampoline(void)
 {
-#ifdef CONFIG_DEBUG_RODATA
-	/* now that we are actually fully mapped, make the start/end more fine grained */
-	if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
-		unsigned long aligned_start = round_down(__pa(_stext),
-							 SWAPPER_BLOCK_SIZE);
+	extern char __entry_tramp_text_start[];
+
+	pgprot_t prot = PAGE_KERNEL_EXEC;
+	phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
+
+	/* The trampoline is always mapped and can therefore be global */
+	pgprot_val(prot) &= ~PTE_NG;
+
+	/* Map only the text into the trampoline page table */
+	memset(tramp_pg_dir, 0, PGD_SIZE);
+	__create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
+			     prot, late_pgtable_alloc);
+
+	/* Map both the text and data into the kernel page table */
+	__set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
+	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+		extern char __entry_tramp_data_start[];
 
-		create_mapping(aligned_start, __phys_to_virt(aligned_start),
-				__pa(_stext) - aligned_start,
-				PAGE_KERNEL);
+		__set_fixmap(FIX_ENTRY_TRAMP_DATA,
+			     __pa_symbol(__entry_tramp_data_start),
+			     PAGE_KERNEL_RO);
 	}
 
-	if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
-		unsigned long aligned_end = round_up(__pa(__init_end),
-							  SWAPPER_BLOCK_SIZE);
-		create_mapping(__pa(__init_end), (unsigned long)__init_end,
-				aligned_end - __pa(__init_end),
-				PAGE_KERNEL);
+	return 0;
 	}
+core_initcall(map_entry_trampoline);
 #endif
-}
 
-#ifdef CONFIG_DEBUG_RODATA
-void mark_rodata_ro(void)
+/*
+ * Create fine-grained mappings for the kernel.
+ */
+static void __init map_kernel(pgd_t *pgd)
 {
-	create_mapping_late(__pa(_stext), (unsigned long)_stext,
-				(unsigned long)_etext - (unsigned long)_stext,
-				PAGE_KERNEL_ROX);
+	static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_init, vmlinux_data;
+
+	map_kernel_segment(pgd, _text, _etext, PAGE_KERNEL_EXEC, &vmlinux_text);
+	map_kernel_segment(pgd, __start_rodata, __init_begin, PAGE_KERNEL, &vmlinux_rodata);
+	map_kernel_segment(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC,
+			   &vmlinux_init);
+	map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data);
 
+	if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
+		/*
+		 * The fixmap falls in a separate pgd to the kernel, and doesn't
+		 * live in the carveout for the swapper_pg_dir. We can simply
+		 * re-use the existing dir for the fixmap.
+		 */
+		set_pgd(pgd_offset_raw(pgd, FIXADDR_START),
+			*pgd_offset_k(FIXADDR_START));
+	} else if (CONFIG_PGTABLE_LEVELS > 3) {
+		/*
+		 * The fixmap shares its top level pgd entry with the kernel
+		 * mapping. This can really only occur when we are running
+		 * with 16k/4 levels, so we can simply reuse the pud level
+		 * entry instead.
+		 */
+		BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
+		set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START),
+			__pud(__pa_symbol(bm_pmd) | PUD_TYPE_TABLE));
+		pud_clear_fixmap();
+	} else {
+		BUG();
 }
-#endif
 
-void fixup_init(void)
+	kasan_copy_shadow(pgd);
+}
+
+struct dma_contig_early_reserve {
+	phys_addr_t base;
+	unsigned long size;
+};
+
+static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS];
+
+static int dma_mmu_remap_num;
+
+void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
 {
-	create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
-			(unsigned long)__init_end - (unsigned long)__init_begin,
-			PAGE_KERNEL);
+	dma_mmu_remap[dma_mmu_remap_num].base = base;
+	dma_mmu_remap[dma_mmu_remap_num].size = size;
+	dma_mmu_remap_num++;
+}
+
+static bool dma_overlap(phys_addr_t start, phys_addr_t end)
+{
+	int i;
+
+	for (i = 0; i < dma_mmu_remap_num; i++) {
+		phys_addr_t dma_base = dma_mmu_remap[i].base;
+		phys_addr_t dma_end = dma_mmu_remap[i].base +
+			dma_mmu_remap[i].size;
+
+		if ((dma_base < end) && (dma_end > start))
+			return true;
+	}
+	return false;
 }
 
 /*
@@ -444,31 +607,455 @@
  */
 void __init paging_init(void)
 {
-	void *zero_page;
+	phys_addr_t pgd_phys = early_pgtable_alloc();
+	pgd_t *pgd = pgd_set_fixmap(pgd_phys);
+
+	map_kernel(pgd);
+	map_mem(pgd);
+
+	/*
+	 * We want to reuse the original swapper_pg_dir so we don't have to
+	 * communicate the new address to non-coherent secondaries in
+	 * secondary_entry, and so cpu_switch_mm can generate the address with
+	 * adrp+add rather than a load from some global variable.
+	 *
+	 * To do this we need to go via a temporary pgd.
+	 */
+	cpu_replace_ttbr1(__va(pgd_phys));
+	memcpy(swapper_pg_dir, pgd, PAGE_SIZE);
+	cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
 
-	map_mem();
-	fixup_executable();
+	pgd_clear_fixmap();
+	memblock_free(pgd_phys, PAGE_SIZE);
 
-	/* allocate the zero page. */
-	zero_page = early_alloc(PAGE_SIZE);
+	/*
+	 * We only reuse the PGD from the swapper_pg_dir, not the pud + pmd
+	 * allocated with it.
+	 */
+	memblock_free(__pa_symbol(swapper_pg_dir) + PAGE_SIZE,
+		      SWAPPER_DIR_SIZE - PAGE_SIZE);
 
 	bootmem_init();
+}
 
-	empty_zero_page = virt_to_page(zero_page);
+#ifdef CONFIG_MEMORY_HOTPLUG
+static phys_addr_t pgd_pgtable_alloc(void)
+{
+        void *ptr = (void *)__get_free_page(PGALLOC_GFP);
+        if (!ptr || !pgtable_page_ctor(virt_to_page(ptr)))
+                BUG();
 
-	/* Ensure the zero page is visible to the page table walker */
+        /* Ensure the zeroed page is visible to the page table walker */
 	dsb(ishst);
+        return __pa(ptr);
+}
 
 	/*
-	 * TTBR0 is only used for the identity mapping at this stage. Make it
-	 * point to zero page to avoid speculatively fetching new entries.
+ * hotplug_paging() is used by memory hotplug to build new page tables
+ * for hot added memory.
 	 */
-	cpu_set_reserved_ttbr0();
-	local_flush_tlb_all();
-	cpu_set_default_tcr_t0sz();
+void hotplug_paging(phys_addr_t start, phys_addr_t size)
+{
+
+	struct page *pg;
+	phys_addr_t pgd_phys = pgd_pgtable_alloc();
+	pgd_t *pgd = pgd_set_fixmap(pgd_phys);
+
+	memcpy(pgd, swapper_pg_dir, PAGE_SIZE);
+
+	__create_pgd_mapping(pgd, start, __phys_to_virt(start), size,
+		PAGE_KERNEL, pgd_pgtable_alloc);
+
+	cpu_replace_ttbr1(__va(pgd_phys));
+	memcpy(swapper_pg_dir, pgd, PAGE_SIZE);
+	cpu_replace_ttbr1(swapper_pg_dir);
+
+	pgd_clear_fixmap();
+
+	pg = phys_to_page(pgd_phys);
+	pgtable_page_dtor(pg);
+	__free_pages(pg, 0);
+}
+
+#ifdef CONFIG_MEMORY_HOTREMOVE
+#define PAGE_INUSE 0xFD
+
+static void  free_pagetable(struct page *page, int order, bool direct)
+{
+	unsigned long magic;
+	unsigned int nr_pages = 1 << order;
+
+	/* bootmem page has reserved flag */
+	if (PageReserved(page)) {
+		__ClearPageReserved(page);
+
+		magic = (unsigned long)page->lru.next;
+		if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) {
+			while (nr_pages--)
+				put_page_bootmem(page++);
+		} else {
+			while (nr_pages--)
+				free_reserved_page(page++);
+		}
+	} else {
+		/*
+		 * Only direct pagetable allocation (those allocated via
+		 * hotplug) call the pgtable_page_ctor; vmemmap pgtable
+		 * allocations don't.
+		 */
+		if (direct)
+			pgtable_page_dtor(page);
+
+		free_pages((unsigned long)page_address(page), order);
+	}
+}
+
+static void free_pte_table(pmd_t *pmd, bool direct)
+{
+	pte_t *pte_start, *pte;
+	struct page *page;
+	int i;
+
+	pte_start =  (pte_t *) pmd_page_vaddr(*pmd);
+	/* Check if there is no valid entry in the PMD */
+	for (i = 0; i < PTRS_PER_PTE; i++) {
+		pte = pte_start + i;
+		if (!pte_none(*pte))
+			return;
+	}
+
+	page = pmd_page(*pmd);
+
+	free_pagetable(page, 0, direct);
+
+	/*
+	 * This spin lock could be only taken in _pte_aloc_kernel
+	 * in mm/memory.c and nowhere else (for arm64). Not sure if
+	 * the function above can be called concurrently. In doubt,
+	 * I am living it here for now, but it probably can be removed
+	 */
+	spin_lock(&init_mm.page_table_lock);
+	pmd_clear(pmd);
+	spin_unlock(&init_mm.page_table_lock);
+}
+
+static void free_pmd_table(pud_t *pud, bool direct)
+{
+	pmd_t *pmd_start, *pmd;
+	struct page *page;
+	int i;
+
+	pmd_start = (pmd_t *) pud_page_vaddr(*pud);
+	/* Check if there is no valid entry in the PMD */
+	for (i = 0; i < PTRS_PER_PMD; i++) {
+		pmd = pmd_start + i;
+		if (!pmd_none(*pmd))
+			return;
+	}
+
+	page = pud_page(*pud);
+
+	free_pagetable(page, 0, direct);
+
+	/*
+	 * This spin lock could be only taken in _pte_aloc_kernel
+	 * in mm/memory.c and nowhere else (for arm64). Not sure if
+	 * the function above can be called concurrently. In doubt,
+	 * I am living it here for now, but it probably can be removed
+	 */
+	spin_lock(&init_mm.page_table_lock);
+	pud_clear(pud);
+	spin_unlock(&init_mm.page_table_lock);
 }
 
 /*
+ * When the PUD is folded on the PGD (three levels of paging),
+ * there's no need to free PUDs
+ */
+#if CONFIG_PGTABLE_LEVELS > 3
+static void free_pud_table(pgd_t *pgd, bool direct)
+{
+	pud_t *pud_start, *pud;
+	struct page *page;
+	int i;
+
+	pud_start = (pud_t *) pgd_page_vaddr(*pgd);
+	/* Check if there is no valid entry in the PUD */
+	for (i = 0; i < PTRS_PER_PUD; i++) {
+		pud = pud_start + i;
+		if (!pud_none(*pud))
+			return;
+	}
+
+	page = pgd_page(*pgd);
+
+	free_pagetable(page, 0, direct);
+
+	/*
+	 * This spin lock could be only
+	 * taken in _pte_aloc_kernel in
+	 * mm/memory.c and nowhere else
+	 * (for arm64). Not sure if the
+	 * function above can be called
+	 * concurrently. In doubt,
+	 * I am living it here for now,
+	 * but it probably can be removed.
+	 */
+	spin_lock(&init_mm.page_table_lock);
+	pgd_clear(pgd);
+	spin_unlock(&init_mm.page_table_lock);
+}
+#endif
+
+static void remove_pte_table(pte_t *pte, unsigned long addr,
+	unsigned long end, bool direct)
+{
+	unsigned long next;
+	void *page_addr;
+
+	for (; addr < end; addr = next, pte++) {
+		next = (addr + PAGE_SIZE) & PAGE_MASK;
+		if (next > end)
+			next = end;
+
+		if (!pte_present(*pte))
+			continue;
+
+		if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) {
+			/*
+			 * Do not free direct mapping pages since they were
+			 * freed when offlining, or simplely not in use.
+			 */
+			if (!direct)
+				free_pagetable(pte_page(*pte), 0, direct);
+
+			/*
+			 * This spin lock could be only
+			 * taken in _pte_aloc_kernel in
+			 * mm/memory.c and nowhere else
+			 * (for arm64). Not sure if the
+			 * function above can be called
+			 * concurrently. In doubt,
+			 * I am living it here for now,
+			 * but it probably can be removed.
+			 */
+			spin_lock(&init_mm.page_table_lock);
+			pte_clear(&init_mm, addr, pte);
+			spin_unlock(&init_mm.page_table_lock);
+		} else {
+			/*
+			 * If we are here, we are freeing vmemmap pages since
+			 * direct mapped memory ranges to be freed are aligned.
+			 *
+			 * If we are not removing the whole page, it means
+			 * other page structs in this page are being used and
+			 * we canot remove them. So fill the unused page_structs
+			 * with 0xFD, and remove the page when it is wholly
+			 * filled with 0xFD.
+			 */
+			memset((void *)addr, PAGE_INUSE, next - addr);
+
+			page_addr = page_address(pte_page(*pte));
+			if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
+				free_pagetable(pte_page(*pte), 0, direct);
+
+				/*
+				 * This spin lock could be only
+				 * taken in _pte_aloc_kernel in
+				 * mm/memory.c and nowhere else
+				 * (for arm64). Not sure if the
+				 * function above can be called
+				 * concurrently. In doubt,
+				 * I am living it here for now,
+				 * but it probably can be removed.
+				 */
+				spin_lock(&init_mm.page_table_lock);
+				pte_clear(&init_mm, addr, pte);
+				spin_unlock(&init_mm.page_table_lock);
+			}
+		}
+	}
+
+	// I am adding this flush here in simmetry to the x86 code.
+	// Why do I need to call it here and not in remove_p[mu]d
+	flush_tlb_all();
+}
+
+static void remove_pmd_table(pmd_t *pmd, unsigned long addr,
+	unsigned long end, bool direct)
+{
+	unsigned long next;
+	void *page_addr;
+	pte_t *pte;
+
+	for (; addr < end; addr = next, pmd++) {
+		next = pmd_addr_end(addr, end);
+
+		if (!pmd_present(*pmd))
+			continue;
+
+		// check if we are using 2MB section mappings
+		if (pmd_sect(*pmd)) {
+			if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) {
+				if (!direct) {
+					free_pagetable(pmd_page(*pmd),
+						get_order(PMD_SIZE), direct);
+				}
+				/*
+				 * This spin lock could be only
+				 * taken in _pte_aloc_kernel in
+				 * mm/memory.c and nowhere else
+				 * (for arm64). Not sure if the
+				 * function above can be called
+				 * concurrently. In doubt,
+				 * I am living it here for now,
+				 * but it probably can be removed.
+				 */
+				spin_lock(&init_mm.page_table_lock);
+				pmd_clear(pmd);
+				spin_unlock(&init_mm.page_table_lock);
+			} else {
+				/* If here, we are freeing vmemmap pages. */
+				memset((void *)addr, PAGE_INUSE, next - addr);
+
+				page_addr = page_address(pmd_page(*pmd));
+				if (!memchr_inv(page_addr, PAGE_INUSE,
+						PMD_SIZE)) {
+					free_pagetable(pmd_page(*pmd),
+						get_order(PMD_SIZE), direct);
+
+					/*
+					 * This spin lock could be only
+					 * taken in _pte_aloc_kernel in
+					 * mm/memory.c and nowhere else
+					 * (for arm64). Not sure if the
+					 * function above can be called
+					 * concurrently. In doubt,
+					 * I am living it here for now,
+					 * but it probably can be removed.
+					 */
+					spin_lock(&init_mm.page_table_lock);
+					pmd_clear(pmd);
+					spin_unlock(&init_mm.page_table_lock);
+				}
+			}
+			continue;
+		}
+
+		BUG_ON(!pmd_table(*pmd));
+
+		pte = pte_offset_map(pmd, addr);
+		remove_pte_table(pte, addr, next, direct);
+		free_pte_table(pmd, direct);
+	}
+}
+
+static void remove_pud_table(pud_t *pud, unsigned long addr,
+	unsigned long end, bool direct)
+{
+	unsigned long next;
+	pmd_t *pmd;
+	void *page_addr;
+
+	for (; addr < end; addr = next, pud++) {
+		next = pud_addr_end(addr, end);
+		if (!pud_present(*pud))
+			continue;
+		/*
+		 * If we are using 4K granules, check if we are using
+		 * 1GB section mapping.
+		 */
+		if (pud_sect(*pud)) {
+			if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) {
+				if (!direct) {
+					free_pagetable(pud_page(*pud),
+						get_order(PUD_SIZE), direct);
+				}
+
+				/*
+				 * This spin lock could be only
+				 * taken in _pte_aloc_kernel in
+				 * mm/memory.c and nowhere else
+				 * (for arm64). Not sure if the
+				 * function above can be called
+				 * concurrently. In doubt,
+				 * I am living it here for now,
+				 * but it probably can be removed.
+				 */
+				spin_lock(&init_mm.page_table_lock);
+				pud_clear(pud);
+				spin_unlock(&init_mm.page_table_lock);
+			} else {
+				/* If here, we are freeing vmemmap pages. */
+				memset((void *)addr, PAGE_INUSE, next - addr);
+
+				page_addr = page_address(pud_page(*pud));
+				if (!memchr_inv(page_addr, PAGE_INUSE,
+						PUD_SIZE)) {
+
+					free_pagetable(pud_page(*pud),
+						get_order(PUD_SIZE), direct);
+
+					/*
+					 * This spin lock could be only
+					 * taken in _pte_aloc_kernel in
+					 * mm/memory.c and nowhere else
+					 * (for arm64). Not sure if the
+					 * function above can be called
+					 * concurrently. In doubt,
+					 * I am living it here for now,
+					 * but it probably can be removed.
+					 */
+					spin_lock(&init_mm.page_table_lock);
+					pud_clear(pud);
+					spin_unlock(&init_mm.page_table_lock);
+				}
+			}
+			continue;
+		}
+
+		BUG_ON(!pud_table(*pud));
+
+		pmd = pmd_offset(pud, addr);
+		remove_pmd_table(pmd, addr, next, direct);
+		free_pmd_table(pud, direct);
+	}
+}
+
+void remove_pagetable(unsigned long start, unsigned long end, bool direct)
+{
+	unsigned long next;
+	unsigned long addr;
+	pgd_t *pgd;
+	pud_t *pud;
+
+	for (addr = start; addr < end; addr = next) {
+		next = pgd_addr_end(addr, end);
+
+		pgd = pgd_offset_k(addr);
+		if (pgd_none(*pgd))
+			continue;
+
+		pud = pud_offset(pgd, addr);
+		remove_pud_table(pud, addr, next, direct);
+		/*
+		 * When the PUD is folded on the PGD (three levels of paging),
+		 * I did already clear the PMD page in free_pmd_table,
+		 * and reset the corresponding PGD==PUD entry.
+		 */
+#if CONFIG_PGTABLE_LEVELS > 3
+		free_pud_table(pgd, direct);
+#endif
+	}
+
+	flush_tlb_all();
+}
+
+
+#endif /* CONFIG_MEMORY_HOTREMOVE */
+#endif /* CONFIG_MEMORY_HOTPLUG */
+
+/*
  * Check whether a kernel address is valid (derived from arch/x86/).
  */
 int kern_addr_valid(unsigned long addr)
@@ -519,6 +1106,7 @@
 	pgd_t *pgd;
 	pud_t *pud;
 	pmd_t *pmd;
+	int ret = 0;
 
 	do {
 		next = pmd_addr_end(addr, end);
@@ -536,37 +1124,40 @@
 			void *p = NULL;
 
 			p = vmemmap_alloc_block_buf(PMD_SIZE, node);
-			if (!p)
-				return -ENOMEM;
+			if (!p) {
+#ifdef CONFIG_MEMORY_HOTPLUG
+				vmemmap_free(start, end);
+#endif
+				ret = -ENOMEM;
+				break;
+			}
 
 			set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
 		} else
 			vmemmap_verify((pte_t *)pmd, node, addr, next);
 	} while (addr = next, addr != end);
 
-	return 0;
+	if (ret)
+		return vmemmap_populate_basepages(start, end, node);
+	else
+		return ret;
 }
 #endif	/* CONFIG_ARM64_64K_PAGES */
 void vmemmap_free(unsigned long start, unsigned long end)
 {
+#ifdef CONFIG_MEMORY_HOTREMOVE
+	remove_pagetable(start, end, false);
+#endif
 }
 #endif	/* CONFIG_SPARSEMEM_VMEMMAP */
 
-static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
-#if CONFIG_PGTABLE_LEVELS > 2
-static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
-#endif
-#if CONFIG_PGTABLE_LEVELS > 3
-static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
-#endif
-
 static inline pud_t * fixmap_pud(unsigned long addr)
 {
 	pgd_t *pgd = pgd_offset_k(addr);
 
 	BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
 
-	return pud_offset(pgd, addr);
+	return pud_offset_kimg(pgd, addr);
 }
 
 static inline pmd_t * fixmap_pmd(unsigned long addr)
@@ -575,18 +1166,20 @@
 
 	BUG_ON(pud_none(*pud) || pud_bad(*pud));
 
-	return pmd_offset(pud, addr);
+	return pmd_offset_kimg(pud, addr);
 }
 
 static inline pte_t * fixmap_pte(unsigned long addr)
 {
-	pmd_t *pmd = fixmap_pmd(addr);
-
-	BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
-
-	return pte_offset_kernel(pmd, addr);
+	return &bm_pte[pte_index(addr)];
 }
 
+/*
+ * The p*d_populate functions call virt_to_phys implicitly so they can't be used
+ * directly on kernel symbols (bm_p*d). This function is called too early to use
+ * lm_alias so __p*d_populate functions must be used to populate with the
+ * physical address from __pa_symbol.
+ */
 void __init early_fixmap_init(void)
 {
 	pgd_t *pgd;
@@ -595,15 +1188,29 @@
 	unsigned long addr = FIXADDR_START;
 
 	pgd = pgd_offset_k(addr);
-	pgd_populate(&init_mm, pgd, bm_pud);
-	pud = pud_offset(pgd, addr);
-	pud_populate(&init_mm, pud, bm_pmd);
-	pmd = pmd_offset(pud, addr);
-	pmd_populate_kernel(&init_mm, pmd, bm_pte);
+	if (CONFIG_PGTABLE_LEVELS > 3 &&
+	    !(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa_symbol(bm_pud))) {
+		/*
+		 * We only end up here if the kernel mapping and the fixmap
+		 * share the top level pgd entry, which should only happen on
+		 * 16k/4 levels configurations.
+		 */
+		BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
+		pud = pud_offset_kimg(pgd, addr);
+	} else {
+		if (pgd_none(*pgd))
+			__pgd_populate(pgd, __pa_symbol(bm_pud),
+				       PUD_TYPE_TABLE);
+		pud = fixmap_pud(addr);
+	}
+	if (pud_none(*pud))
+		__pud_populate(pud, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
+	pmd = fixmap_pmd(addr);
+	__pmd_populate(pmd, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
 
 	/*
 	 * The boot-ioremap range spans multiple pmds, for which
-	 * we are not preparted:
+	 * we are not prepared:
 	 */
 	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
 		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
@@ -642,11 +1249,10 @@
 	}
 }
 
-void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
+void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
 {
 	const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
-	pgprot_t prot = PAGE_KERNEL_RO;
-	int size, offset;
+	int offset;
 	void *dt_virt;
 
 	/*
@@ -663,7 +1269,7 @@
 	/*
 	 * Make sure that the FDT region can be mapped without the need to
 	 * allocate additional translation table pages, so that it is safe
-	 * to call create_mapping() this early.
+	 * to call create_mapping_noalloc() this early.
 	 *
 	 * On 64k pages, the FDT will be mapped using PTEs, so we need to
 	 * be in the same PMD as the rest of the fixmap.
@@ -679,21 +1285,73 @@
 	dt_virt = (void *)dt_virt_base + offset;
 
 	/* map the first chunk so we can read the size from the header */
-	create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
-		       SWAPPER_BLOCK_SIZE, prot);
+	create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
+			dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
 
 	if (fdt_magic(dt_virt) != FDT_MAGIC)
 		return NULL;
 
-	size = fdt_totalsize(dt_virt);
-	if (size > MAX_FDT_SIZE)
+	*size = fdt_totalsize(dt_virt);
+	if (*size > MAX_FDT_SIZE)
 		return NULL;
 
-	if (offset + size > SWAPPER_BLOCK_SIZE)
-		create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
-			       round_up(offset + size, SWAPPER_BLOCK_SIZE), prot);
+	if (offset + *size > SWAPPER_BLOCK_SIZE)
+		create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
+			       round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot);
 
-	memblock_reserve(dt_phys, size);
+	return dt_virt;
+}
+
+void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
+{
+	void *dt_virt;
+	int size;
 
+	dt_virt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
+	if (!dt_virt)
+		return NULL;
+
+	memblock_reserve(dt_phys, size);
 	return dt_virt;
 }
+
+int __init arch_ioremap_pud_supported(void)
+{
+	/* only 4k granule supports level 1 block mappings */
+	return IS_ENABLED(CONFIG_ARM64_4K_PAGES);
+}
+
+int __init arch_ioremap_pmd_supported(void)
+{
+	return 1;
+}
+
+int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
+{
+	BUG_ON(phys & ~PUD_MASK);
+	set_pud(pud, __pud(phys | PUD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
+	return 1;
+}
+
+int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
+{
+	BUG_ON(phys & ~PMD_MASK);
+	set_pmd(pmd, __pmd(phys | PMD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
+	return 1;
+}
+
+int pud_clear_huge(pud_t *pud)
+{
+	if (!pud_sect(*pud))
+		return 0;
+	pud_clear(pud);
+	return 1;
+}
+
+int pmd_clear_huge(pmd_t *pmd)
+{
+	if (!pmd_sect(*pmd))
+		return 0;
+	pmd_clear(pmd);
+	return 1;
+}
diff -ruw linux-4.4.115/arch/arm64/mm/pageattr.c linux-4.4.115-fbx/arch/arm64/mm/pageattr.c
--- linux-4.4.115/arch/arm64/mm/pageattr.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/mm/pageattr.c	2019-10-29 09:26:23.013197013 +0100
@@ -14,6 +14,7 @@
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/sched.h>
+#include <linux/vmalloc.h>
 
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
@@ -36,14 +37,32 @@
 	return 0;
 }
 
+/*
+ * This function assumes that the range is mapped with PAGE_SIZE pages.
+ */
+static int __change_memory_common(unsigned long start, unsigned long size,
+				pgprot_t set_mask, pgprot_t clear_mask)
+{
+	struct page_change_data data;
+	int ret;
+
+	data.set_mask = set_mask;
+	data.clear_mask = clear_mask;
+
+	ret = apply_to_page_range(&init_mm, start, size, change_page_range,
+					&data);
+
+	flush_tlb_kernel_range(start, start + size);
+	return ret;
+}
+
 static int change_memory_common(unsigned long addr, int numpages,
 				pgprot_t set_mask, pgprot_t clear_mask)
 {
 	unsigned long start = addr;
 	unsigned long size = PAGE_SIZE*numpages;
 	unsigned long end = start + size;
-	int ret;
-	struct page_change_data data;
+	struct vm_struct *area;
 
 	if (!PAGE_ALIGNED(addr)) {
 		start &= PAGE_MASK;
@@ -51,23 +70,36 @@
 		WARN_ON_ONCE(1);
 	}
 
+	if (!IS_ENABLED(CONFIG_FORCE_PAGES)) {
 	if (start < MODULES_VADDR || start >= MODULES_END)
 		return -EINVAL;
 
 	if (end < MODULES_VADDR || end >= MODULES_END)
 		return -EINVAL;
+	}
+	/*
+	 * Kernel VA mappings are always live, and splitting live section
+	 * mappings into page mappings may cause TLB conflicts. This means
+	 * we have to ensure that changing the permission bits of the range
+	 * we are operating on does not result in such splitting.
+	 *
+	 * Let's restrict ourselves to mappings created by vmalloc (or vmap).
+	 * Those are guaranteed to consist entirely of page mappings, and
+	 * splitting is never needed.
+	 *
+	 * So check whether the [addr, addr + size) interval is entirely
+	 * covered by precisely one VM area that has the VM_ALLOC flag set.
+	 */
+	area = find_vm_area((void *)addr);
+	if (!area ||
+	    end > (unsigned long)area->addr + area->size ||
+	    !(area->flags & VM_ALLOC))
+		return -EINVAL;
 
 	if (!numpages)
 		return 0;
 
-	data.set_mask = set_mask;
-	data.clear_mask = clear_mask;
-
-	ret = apply_to_page_range(&init_mm, start, size, change_page_range,
-					&data);
-
-	flush_tlb_kernel_range(start, end);
-	return ret;
+	return __change_memory_common(start, size, set_mask, clear_mask);
 }
 
 int set_memory_ro(unsigned long addr, int numpages)
@@ -99,3 +131,19 @@
 					__pgprot(PTE_PXN));
 }
 EXPORT_SYMBOL_GPL(set_memory_x);
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+void __kernel_map_pages(struct page *page, int numpages, int enable)
+{
+	unsigned long addr = (unsigned long) page_address(page);
+
+	if (enable)
+		__change_memory_common(addr, PAGE_SIZE * numpages,
+					__pgprot(PTE_VALID),
+					__pgprot(0));
+	else
+		__change_memory_common(addr, PAGE_SIZE * numpages,
+					__pgprot(0),
+					__pgprot(PTE_VALID));
+}
+#endif
diff -ruw linux-4.4.115/arch/arm64/mm/pgd.c linux-4.4.115-fbx/arch/arm64/mm/pgd.c
--- linux-4.4.115/arch/arm64/mm/pgd.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/mm/pgd.c	2019-01-22 16:16:21.563228839 +0100
@@ -46,14 +46,14 @@
 		kmem_cache_free(pgd_cache, pgd);
 }
 
-static int __init pgd_cache_init(void)
+void __init pgd_cache_init(void)
 {
+	if (PGD_SIZE == PAGE_SIZE)
+		return;
+
 	/*
 	 * Naturally aligned pgds required by the architecture.
 	 */
-	if (PGD_SIZE != PAGE_SIZE)
 		pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
 					      SLAB_PANIC, NULL);
-	return 0;
 }
-core_initcall(pgd_cache_init);
diff -ruw linux-4.4.115/arch/arm64/mm/proc.S linux-4.4.115-fbx/arch/arm64/mm/proc.S
--- linux-4.4.115/arch/arm64/mm/proc.S	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/mm/proc.S	2019-10-29 09:26:23.013197013 +0100
@@ -23,13 +23,11 @@
 #include <asm/assembler.h>
 #include <asm/asm-offsets.h>
 #include <asm/hwcap.h>
-#include <asm/pgtable-hwdef.h>
 #include <asm/pgtable.h>
+#include <asm/pgtable-hwdef.h>
 #include <asm/cpufeature.h>
 #include <asm/alternative.h>
 
-#include "proc-macros.S"
-
 #ifdef CONFIG_ARM64_64K_PAGES
 #define TCR_TG_FLAGS	TCR_TG0_64K | TCR_TG1_64K
 #elif defined(CONFIG_ARM64_16K_PAGES)
@@ -46,6 +44,52 @@
 #define MAIR(attr, mt)	((attr) << ((mt) * 8))
 
 /*
+ *	cpu_cache_off()
+ *
+ *	Turn the CPU D-cache off.
+ */
+ENTRY(cpu_cache_off)
+	mrs	x0, sctlr_el1
+	bic	x0, x0, #1 << 2			// clear SCTLR.C
+	msr	sctlr_el1, x0
+	isb
+	ret
+ENDPROC(cpu_cache_off)
+
+/*
+ *	cpu_reset(loc)
+ *
+ *	Perform a soft reset of the system.  Put the CPU into the same state
+ *	as it would be if it had been reset, and branch to what would be the
+ *	reset vector. It must be executed with the flat identity mapping.
+ *
+ *	- loc   - location to jump to for soft reset
+ */
+	.align	5
+ENTRY(cpu_reset)
+	mrs	x1, sctlr_el1
+	bic	x1, x1, #1
+	msr	sctlr_el1, x1			// disable the MMU
+	isb
+	ret	x0
+ENDPROC(cpu_reset)
+
+ENTRY(cpu_soft_restart)
+	/* Save address of cpu_reset() and reset address */
+	mov	x19, x0
+	mov	x20, x1
+
+	/* Turn D-cache off */
+	bl	cpu_cache_off
+
+	/* Push out all dirty data, and ensure cache is empty */
+	bl	flush_cache_all
+
+	mov	x0, x20
+	ret	x19
+ENDPROC(cpu_soft_restart)
+
+/*
  *	cpu_do_idle()
  *
  *	Idle the processor (wait for interrupt).
@@ -66,62 +110,56 @@
 	mrs	x2, tpidr_el0
 	mrs	x3, tpidrro_el0
 	mrs	x4, contextidr_el1
-	mrs	x5, mair_el1
-	mrs	x6, cpacr_el1
-	mrs	x7, ttbr1_el1
-	mrs	x8, tcr_el1
-	mrs	x9, vbar_el1
-	mrs	x10, mdscr_el1
-	mrs	x11, oslsr_el1
-	mrs	x12, sctlr_el1
+	mrs	x5, cpacr_el1
+	mrs	x6, tcr_el1
+	mrs	x7, vbar_el1
+	mrs	x8, mdscr_el1
+	mrs	x9, oslsr_el1
+	mrs	x10, sctlr_el1
+	mrs	x11, tpidr_el1
+	mrs	x12, sp_el0
 	stp	x2, x3, [x0]
-	stp	x4, x5, [x0, #16]
-	stp	x6, x7, [x0, #32]
-	stp	x8, x9, [x0, #48]
-	stp	x10, x11, [x0, #64]
-	str	x12, [x0, #80]
+	stp	x4, xzr, [x0, #16]
+	stp	x5, x6, [x0, #32]
+	stp	x7, x8, [x0, #48]
+	stp	x9, x10, [x0, #64]
+	stp	x11, x12, [x0, #80]
 	ret
 ENDPROC(cpu_do_suspend)
 
 /**
  * cpu_do_resume - restore CPU register context
  *
- * x0: Physical address of context pointer
- * x1: ttbr0_el1 to be restored
- *
- * Returns:
- *	sctlr_el1 value in x0
+ * x0: Address of context pointer
  */
 ENTRY(cpu_do_resume)
-	/*
-	 * Invalidate local tlb entries before turning on MMU
-	 */
-	tlbi	vmalle1
 	ldp	x2, x3, [x0]
 	ldp	x4, x5, [x0, #16]
-	ldp	x6, x7, [x0, #32]
-	ldp	x8, x9, [x0, #48]
-	ldp	x10, x11, [x0, #64]
-	ldr	x12, [x0, #80]
+	ldp	x6, x8, [x0, #32]
+	ldp	x9, x10, [x0, #48]
+	ldp	x11, x12, [x0, #64]
+	ldp	x13, x14, [x0, #80]
 	msr	tpidr_el0, x2
 	msr	tpidrro_el0, x3
 	msr	contextidr_el1, x4
-	msr	mair_el1, x5
 	msr	cpacr_el1, x6
-	msr	ttbr0_el1, x1
-	msr	ttbr1_el1, x7
-	tcr_set_idmap_t0sz x8, x7
+
+	/* Don't change t0sz here, mask those bits when restoring */
+	mrs	x5, tcr_el1
+	bfi	x8, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
+
 	msr	tcr_el1, x8
 	msr	vbar_el1, x9
 	msr	mdscr_el1, x10
+	msr	sctlr_el1, x12
+	msr	tpidr_el1, x13
+	msr	sp_el0, x14
 	/*
 	 * Restore oslsr_el1 by writing oslar_el1
 	 */
 	ubfx	x11, x11, #1, #1
 	msr	oslar_el1, x11
 	reset_pmuserenr_el0 x0			// Disable PMU access from EL0
-	mov	x0, x12
-	dsb	nsh		// Make sure local tlb invalidation completed
 	isb
 	ret
 ENDPROC(cpu_do_resume)
@@ -135,24 +173,46 @@
  *	- pgd_phys - physical address of new TTB
  */
 ENTRY(cpu_do_switch_mm)
+	mrs	x2, ttbr1_el1
 	mmid	x1, x1				// get mm->context.id
-	bfi	x0, x1, #48, #16		// set the ASID
-	msr	ttbr0_el1, x0			// set TTBR0
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+	bfi	x0, x1, #48, #16		// set the ASID field in TTBR0
+#endif
+	bfi	x2, x1, #48, #16		// set the ASID
+	msr	ttbr1_el1, x2			// in TTBR1 (since TCR.A1 is set)
 	isb
-alternative_if_not ARM64_WORKAROUND_CAVIUM_27456
-	ret
-	nop
-	nop
-	nop
-alternative_else
-	ic	iallu
-	dsb	nsh
+	msr	ttbr0_el1, x0			// now update TTBR0
 	isb
-	ret
-alternative_endif
+	b	post_ttbr_update_workaround	// Back to C code...
 ENDPROC(cpu_do_switch_mm)
 
-	.section ".text.init", #alloc, #execinstr
+	.pushsection ".idmap.text", "ax"
+/*
+ * void idmap_cpu_replace_ttbr1(phys_addr_t new_pgd)
+ *
+ * This is the low-level counterpart to cpu_replace_ttbr1, and should not be
+ * called by anything else. It can only be executed from a TTBR0 mapping.
+ */
+ENTRY(idmap_cpu_replace_ttbr1)
+	mrs	x2, daif
+	msr	daifset, #0xf
+
+	adrp	x1, empty_zero_page
+	msr	ttbr1_el1, x1
+	isb
+
+	tlbi	vmalle1
+	dsb	nsh
+	isb
+
+	msr	ttbr1_el1, x0
+	isb
+
+	msr	daif, x2
+
+	ret
+ENDPROC(idmap_cpu_replace_ttbr1)
+	.popsection
 
 /*
  *	__cpu_setup
@@ -203,7 +263,7 @@
 	 * both user and kernel.
 	 */
 	ldr	x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
-			TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0
+			TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 | TCR_A1
 	tcr_set_idmap_t0sz	x10, x9
 
 	/*
@@ -243,5 +303,16 @@
 	 */
 	.type	crval, #object
 crval:
+#ifdef CONFIG_ARM64_ICACHE_DISABLE
+#define CR_IBIT		0
+#else
+#define CR_IBIT		0x1000
+#endif
+
+#ifdef CONFIG_ARM64_DCACHE_DISABLE
+#define CR_CBIT		0
+#else
+#define CR_CBIT		0x4
+#endif
 	.word	0xfcffffff			// clear
-	.word	0x34d5d91d			// set
+	.word	0x34d5d91d | CR_IBIT | CR_CBIT	// set
diff -ruw linux-4.4.115/arch/cris/boot/dts/include/dt-bindings/input/linux-event-codes.h linux-4.4.115-fbx/arch/cris/boot/dts/include/dt-bindings/input/linux-event-codes.h
--- linux-4.4.115/arch/cris/boot/dts/include/dt-bindings/input/linux-event-codes.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/cris/boot/dts/include/dt-bindings/input/linux-event-codes.h	2019-10-29 09:26:25.545221791 +0100
@@ -611,6 +611,37 @@
 #define KEY_KBDINPUTASSIST_ACCEPT		0x264
 #define KEY_KBDINPUTASSIST_CANCEL		0x265
 
+/* Diagonal movement keys */
+#define KEY_RIGHT_UP			0x266
+#define KEY_RIGHT_DOWN			0x267
+#define KEY_LEFT_UP			0x268
+#define KEY_LEFT_DOWN			0x269
+
+#define KEY_ROOT_MENU			0x26a /* Show Device's Root Menu */
+/* Show Top Menu of the Media (e.g. DVD) */
+#define KEY_MEDIA_TOP_MENU		0x26b
+#define KEY_NUMERIC_11			0x26c
+#define KEY_NUMERIC_12			0x26d
+/*
+ * Toggle Audio Description: refers to an audio service that helps blind and
+ * visually impaired consumers understand the action in a program. Note: in
+ * some countries this is referred to as "Video Description".
+ */
+#define KEY_AUDIO_DESC			0x26e
+#define KEY_3D_MODE			0x26f
+#define KEY_NEXT_FAVORITE		0x270
+#define KEY_STOP_RECORD			0x271
+#define KEY_PAUSE_RECORD		0x272
+#define KEY_VOD				0x273 /* Video on Demand */
+#define KEY_UNMUTE			0x274
+#define KEY_FASTREVERSE			0x275
+#define KEY_SLOWREVERSE			0x276
+/*
+ * Control a data application associated with the currently viewed channel,
+ * e.g. teletext or data broadcast application (MHEG, MHP, HbbTV, etc.)
+ */
+#define KEY_DATA			0x275
+
 #define BTN_TRIGGER_HAPPY		0x2c0
 #define BTN_TRIGGER_HAPPY1		0x2c0
 #define BTN_TRIGGER_HAPPY2		0x2c1
@@ -653,6 +684,18 @@
 #define BTN_TRIGGER_HAPPY39		0x2e6
 #define BTN_TRIGGER_HAPPY40		0x2e7
 
+#define KEY_APP_TV			0x2f1
+#define KEY_APP_REPLAY			0x2f2
+#define KEY_APP_VIDEOCLUB		0x2f3
+#define KEY_APP_WHATSON			0x2f4
+#define KEY_APP_RECORDS			0x2f5
+#define KEY_APP_MEDIA			0x2f6
+#define KEY_APP_YOUTUBE			0x2f7
+#define KEY_APP_RADIOS			0x2f8
+#define KEY_APP_CANALVOD		0x2f9
+#define KEY_APP_PIP			0x2fa
+#define KEY_APP_NETFLIX			0x2fb
+
 /* We avoid low common keys in module aliases so they don't get huge. */
 #define KEY_MIN_INTERESTING	KEY_MUTE
 #define KEY_MAX			0x2ff
@@ -749,7 +792,11 @@
 #define SW_ROTATE_LOCK		0x0c  /* set = rotate locked/disabled */
 #define SW_LINEIN_INSERT	0x0d  /* set = inserted */
 #define SW_MUTE_DEVICE		0x0e  /* set = device disabled */
-#define SW_MAX			0x0f
+#define SW_HPHL_OVERCURRENT	0x0f  /* set = over current on left hph */
+#define SW_HPHR_OVERCURRENT	0x10  /* set = over current on right hph */
+#define SW_MICROPHONE2_INSERT   0x11  /* set = inserted */
+#define SW_UNSUPPORT_INSERT	0x12  /* set = unsupported device inserted */
+#define SW_MAX			0x20
 #define SW_CNT			(SW_MAX+1)
 
 /*
diff -ruw linux-4.4.115/arch/Kconfig linux-4.4.115-fbx/arch/Kconfig
--- linux-4.4.115/arch/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/Kconfig	2019-10-29 09:26:22.861195525 +0100
@@ -225,8 +225,8 @@
 config ARCH_TASK_STRUCT_ALLOCATOR
 	bool
 
-# Select if arch has its private alloc_thread_info() function
-config ARCH_THREAD_INFO_ALLOCATOR
+# Select if arch has its private alloc_thread_stack() function
+config ARCH_THREAD_STACK_ALLOCATOR
 	bool
 
 # Select if arch wants to size task_struct dynamically via arch_task_struct_size:
@@ -423,6 +423,15 @@
 
 endchoice
 
+config HAVE_ARCH_WITHIN_STACK_FRAMES
+	bool
+	help
+	  An architecture should select this if it can walk the kernel stack
+	  frames to determine if an object is part of either the arguments
+	  or local variables (i.e. that it excludes saved return addresses,
+	  and similar) by implementing an inline arch_within_stack_frames(),
+	  which is used by CONFIG_HARDENED_USERCOPY.
+
 config HAVE_CONTEXT_TRACKING
 	bool
 	help
@@ -518,6 +527,74 @@
 	  normal C parameter passing, rather than extracting the syscall
 	  argument from pt_regs.
 
+config HAVE_ARCH_MMAP_RND_BITS
+	bool
+	help
+	  An arch should select this symbol if it supports setting a variable
+	  number of bits for use in establishing the base address for mmap
+	  allocations, has MMU enabled and provides values for both:
+	  - ARCH_MMAP_RND_BITS_MIN
+	  - ARCH_MMAP_RND_BITS_MAX
+
+config ARCH_MMAP_RND_BITS_MIN
+	int
+
+config ARCH_MMAP_RND_BITS_MAX
+	int
+
+config ARCH_MMAP_RND_BITS_DEFAULT
+	int
+
+config ARCH_MMAP_RND_BITS
+	int "Number of bits to use for ASLR of mmap base address" if EXPERT
+	range ARCH_MMAP_RND_BITS_MIN ARCH_MMAP_RND_BITS_MAX
+	default ARCH_MMAP_RND_BITS_DEFAULT if ARCH_MMAP_RND_BITS_DEFAULT
+	default ARCH_MMAP_RND_BITS_MIN
+	depends on HAVE_ARCH_MMAP_RND_BITS
+	help
+	  This value can be used to select the number of bits to use to
+	  determine the random offset to the base address of vma regions
+	  resulting from mmap allocations. This value will be bounded
+	  by the architecture's minimum and maximum supported values.
+
+	  This value can be changed after boot using the
+	  /proc/sys/vm/mmap_rnd_bits tunable
+
+config HAVE_ARCH_MMAP_RND_COMPAT_BITS
+	bool
+	help
+	  An arch should select this symbol if it supports running applications
+	  in compatibility mode, supports setting a variable number of bits for
+	  use in establishing the base address for mmap allocations, has MMU
+	  enabled and provides values for both:
+	  - ARCH_MMAP_RND_COMPAT_BITS_MIN
+	  - ARCH_MMAP_RND_COMPAT_BITS_MAX
+
+config ARCH_MMAP_RND_COMPAT_BITS_MIN
+	int
+
+config ARCH_MMAP_RND_COMPAT_BITS_MAX
+	int
+
+config ARCH_MMAP_RND_COMPAT_BITS_DEFAULT
+	int
+
+config ARCH_MMAP_RND_COMPAT_BITS
+	int "Number of bits to use for ASLR of mmap base address for compatible applications" if EXPERT
+	range ARCH_MMAP_RND_COMPAT_BITS_MIN ARCH_MMAP_RND_COMPAT_BITS_MAX
+	default ARCH_MMAP_RND_COMPAT_BITS_DEFAULT if ARCH_MMAP_RND_COMPAT_BITS_DEFAULT
+	default ARCH_MMAP_RND_COMPAT_BITS_MIN
+	depends on HAVE_ARCH_MMAP_RND_COMPAT_BITS
+	help
+	  This value can be used to select the number of bits to use to
+	  determine the random offset to the base address of vma regions
+	  resulting from mmap allocations for compatible applications This
+	  value will be bounded by the architecture's minimum and maximum
+	  supported values.
+
+	  This value can be changed after boot using the
+	  /proc/sys/vm/mmap_rnd_compat_bits tunable
+
 #
 # ABI hall of shame
 #
diff -ruw linux-4.4.115/arch/metag/boot/dts/include/dt-bindings/input/linux-event-codes.h linux-4.4.115-fbx/arch/metag/boot/dts/include/dt-bindings/input/linux-event-codes.h
--- linux-4.4.115/arch/metag/boot/dts/include/dt-bindings/input/linux-event-codes.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/metag/boot/dts/include/dt-bindings/input/linux-event-codes.h	2019-10-29 09:26:25.545221791 +0100
@@ -611,6 +611,37 @@
 #define KEY_KBDINPUTASSIST_ACCEPT		0x264
 #define KEY_KBDINPUTASSIST_CANCEL		0x265
 
+/* Diagonal movement keys */
+#define KEY_RIGHT_UP			0x266
+#define KEY_RIGHT_DOWN			0x267
+#define KEY_LEFT_UP			0x268
+#define KEY_LEFT_DOWN			0x269
+
+#define KEY_ROOT_MENU			0x26a /* Show Device's Root Menu */
+/* Show Top Menu of the Media (e.g. DVD) */
+#define KEY_MEDIA_TOP_MENU		0x26b
+#define KEY_NUMERIC_11			0x26c
+#define KEY_NUMERIC_12			0x26d
+/*
+ * Toggle Audio Description: refers to an audio service that helps blind and
+ * visually impaired consumers understand the action in a program. Note: in
+ * some countries this is referred to as "Video Description".
+ */
+#define KEY_AUDIO_DESC			0x26e
+#define KEY_3D_MODE			0x26f
+#define KEY_NEXT_FAVORITE		0x270
+#define KEY_STOP_RECORD			0x271
+#define KEY_PAUSE_RECORD		0x272
+#define KEY_VOD				0x273 /* Video on Demand */
+#define KEY_UNMUTE			0x274
+#define KEY_FASTREVERSE			0x275
+#define KEY_SLOWREVERSE			0x276
+/*
+ * Control a data application associated with the currently viewed channel,
+ * e.g. teletext or data broadcast application (MHEG, MHP, HbbTV, etc.)
+ */
+#define KEY_DATA			0x275
+
 #define BTN_TRIGGER_HAPPY		0x2c0
 #define BTN_TRIGGER_HAPPY1		0x2c0
 #define BTN_TRIGGER_HAPPY2		0x2c1
@@ -653,6 +684,18 @@
 #define BTN_TRIGGER_HAPPY39		0x2e6
 #define BTN_TRIGGER_HAPPY40		0x2e7
 
+#define KEY_APP_TV			0x2f1
+#define KEY_APP_REPLAY			0x2f2
+#define KEY_APP_VIDEOCLUB		0x2f3
+#define KEY_APP_WHATSON			0x2f4
+#define KEY_APP_RECORDS			0x2f5
+#define KEY_APP_MEDIA			0x2f6
+#define KEY_APP_YOUTUBE			0x2f7
+#define KEY_APP_RADIOS			0x2f8
+#define KEY_APP_CANALVOD		0x2f9
+#define KEY_APP_PIP			0x2fa
+#define KEY_APP_NETFLIX			0x2fb
+
 /* We avoid low common keys in module aliases so they don't get huge. */
 #define KEY_MIN_INTERESTING	KEY_MUTE
 #define KEY_MAX			0x2ff
@@ -749,7 +792,11 @@
 #define SW_ROTATE_LOCK		0x0c  /* set = rotate locked/disabled */
 #define SW_LINEIN_INSERT	0x0d  /* set = inserted */
 #define SW_MUTE_DEVICE		0x0e  /* set = device disabled */
-#define SW_MAX			0x0f
+#define SW_HPHL_OVERCURRENT	0x0f  /* set = over current on left hph */
+#define SW_HPHR_OVERCURRENT	0x10  /* set = over current on right hph */
+#define SW_MICROPHONE2_INSERT   0x11  /* set = inserted */
+#define SW_UNSUPPORT_INSERT	0x12  /* set = unsupported device inserted */
+#define SW_MAX			0x20
 #define SW_CNT			(SW_MAX+1)
 
 /*
diff -ruw linux-4.4.115/arch/mips/boot/dts/include/dt-bindings/input/linux-event-codes.h linux-4.4.115-fbx/arch/mips/boot/dts/include/dt-bindings/input/linux-event-codes.h
--- linux-4.4.115/arch/mips/boot/dts/include/dt-bindings/input/linux-event-codes.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/mips/boot/dts/include/dt-bindings/input/linux-event-codes.h	2019-10-29 09:26:25.545221791 +0100
@@ -611,6 +611,37 @@
 #define KEY_KBDINPUTASSIST_ACCEPT		0x264
 #define KEY_KBDINPUTASSIST_CANCEL		0x265
 
+/* Diagonal movement keys */
+#define KEY_RIGHT_UP			0x266
+#define KEY_RIGHT_DOWN			0x267
+#define KEY_LEFT_UP			0x268
+#define KEY_LEFT_DOWN			0x269
+
+#define KEY_ROOT_MENU			0x26a /* Show Device's Root Menu */
+/* Show Top Menu of the Media (e.g. DVD) */
+#define KEY_MEDIA_TOP_MENU		0x26b
+#define KEY_NUMERIC_11			0x26c
+#define KEY_NUMERIC_12			0x26d
+/*
+ * Toggle Audio Description: refers to an audio service that helps blind and
+ * visually impaired consumers understand the action in a program. Note: in
+ * some countries this is referred to as "Video Description".
+ */
+#define KEY_AUDIO_DESC			0x26e
+#define KEY_3D_MODE			0x26f
+#define KEY_NEXT_FAVORITE		0x270
+#define KEY_STOP_RECORD			0x271
+#define KEY_PAUSE_RECORD		0x272
+#define KEY_VOD				0x273 /* Video on Demand */
+#define KEY_UNMUTE			0x274
+#define KEY_FASTREVERSE			0x275
+#define KEY_SLOWREVERSE			0x276
+/*
+ * Control a data application associated with the currently viewed channel,
+ * e.g. teletext or data broadcast application (MHEG, MHP, HbbTV, etc.)
+ */
+#define KEY_DATA			0x275
+
 #define BTN_TRIGGER_HAPPY		0x2c0
 #define BTN_TRIGGER_HAPPY1		0x2c0
 #define BTN_TRIGGER_HAPPY2		0x2c1
@@ -653,6 +684,18 @@
 #define BTN_TRIGGER_HAPPY39		0x2e6
 #define BTN_TRIGGER_HAPPY40		0x2e7
 
+#define KEY_APP_TV			0x2f1
+#define KEY_APP_REPLAY			0x2f2
+#define KEY_APP_VIDEOCLUB		0x2f3
+#define KEY_APP_WHATSON			0x2f4
+#define KEY_APP_RECORDS			0x2f5
+#define KEY_APP_MEDIA			0x2f6
+#define KEY_APP_YOUTUBE			0x2f7
+#define KEY_APP_RADIOS			0x2f8
+#define KEY_APP_CANALVOD		0x2f9
+#define KEY_APP_PIP			0x2fa
+#define KEY_APP_NETFLIX			0x2fb
+
 /* We avoid low common keys in module aliases so they don't get huge. */
 #define KEY_MIN_INTERESTING	KEY_MUTE
 #define KEY_MAX			0x2ff
@@ -749,7 +792,11 @@
 #define SW_ROTATE_LOCK		0x0c  /* set = rotate locked/disabled */
 #define SW_LINEIN_INSERT	0x0d  /* set = inserted */
 #define SW_MUTE_DEVICE		0x0e  /* set = device disabled */
-#define SW_MAX			0x0f
+#define SW_HPHL_OVERCURRENT	0x0f  /* set = over current on left hph */
+#define SW_HPHR_OVERCURRENT	0x10  /* set = over current on right hph */
+#define SW_MICROPHONE2_INSERT   0x11  /* set = inserted */
+#define SW_UNSUPPORT_INSERT	0x12  /* set = unsupported device inserted */
+#define SW_MAX			0x20
 #define SW_CNT			(SW_MAX+1)
 
 /*
diff -ruw linux-4.4.115/arch/powerpc/boot/dts/include/dt-bindings/input/linux-event-codes.h linux-4.4.115-fbx/arch/powerpc/boot/dts/include/dt-bindings/input/linux-event-codes.h
--- linux-4.4.115/arch/powerpc/boot/dts/include/dt-bindings/input/linux-event-codes.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/arch/powerpc/boot/dts/include/dt-bindings/input/linux-event-codes.h	2019-10-29 09:26:25.545221791 +0100
@@ -611,6 +611,37 @@
 #define KEY_KBDINPUTASSIST_ACCEPT		0x264
 #define KEY_KBDINPUTASSIST_CANCEL		0x265
 
+/* Diagonal movement keys */
+#define KEY_RIGHT_UP			0x266
+#define KEY_RIGHT_DOWN			0x267
+#define KEY_LEFT_UP			0x268
+#define KEY_LEFT_DOWN			0x269
+
+#define KEY_ROOT_MENU			0x26a /* Show Device's Root Menu */
+/* Show Top Menu of the Media (e.g. DVD) */
+#define KEY_MEDIA_TOP_MENU		0x26b
+#define KEY_NUMERIC_11			0x26c
+#define KEY_NUMERIC_12			0x26d
+/*
+ * Toggle Audio Description: refers to an audio service that helps blind and
+ * visually impaired consumers understand the action in a program. Note: in
+ * some countries this is referred to as "Video Description".
+ */
+#define KEY_AUDIO_DESC			0x26e
+#define KEY_3D_MODE			0x26f
+#define KEY_NEXT_FAVORITE		0x270
+#define KEY_STOP_RECORD			0x271
+#define KEY_PAUSE_RECORD		0x272
+#define KEY_VOD				0x273 /* Video on Demand */
+#define KEY_UNMUTE			0x274
+#define KEY_FASTREVERSE			0x275
+#define KEY_SLOWREVERSE			0x276
+/*
+ * Control a data application associated with the currently viewed channel,
+ * e.g. teletext or data broadcast application (MHEG, MHP, HbbTV, etc.)
+ */
+#define KEY_DATA			0x275
+
 #define BTN_TRIGGER_HAPPY		0x2c0
 #define BTN_TRIGGER_HAPPY1		0x2c0
 #define BTN_TRIGGER_HAPPY2		0x2c1
@@ -653,6 +684,18 @@
 #define BTN_TRIGGER_HAPPY39		0x2e6
 #define BTN_TRIGGER_HAPPY40		0x2e7
 
+#define KEY_APP_TV			0x2f1
+#define KEY_APP_REPLAY			0x2f2
+#define KEY_APP_VIDEOCLUB		0x2f3
+#define KEY_APP_WHATSON			0x2f4
+#define KEY_APP_RECORDS			0x2f5
+#define KEY_APP_MEDIA			0x2f6
+#define KEY_APP_YOUTUBE			0x2f7
+#define KEY_APP_RADIOS			0x2f8
+#define KEY_APP_CANALVOD		0x2f9
+#define KEY_APP_PIP			0x2fa
+#define KEY_APP_NETFLIX			0x2fb
+
 /* We avoid low common keys in module aliases so they don't get huge. */
 #define KEY_MIN_INTERESTING	KEY_MUTE
 #define KEY_MAX			0x2ff
@@ -749,7 +792,11 @@
 #define SW_ROTATE_LOCK		0x0c  /* set = rotate locked/disabled */
 #define SW_LINEIN_INSERT	0x0d  /* set = inserted */
 #define SW_MUTE_DEVICE		0x0e  /* set = device disabled */
-#define SW_MAX			0x0f
+#define SW_HPHL_OVERCURRENT	0x0f  /* set = over current on left hph */
+#define SW_HPHR_OVERCURRENT	0x10  /* set = over current on right hph */
+#define SW_MICROPHONE2_INSERT   0x11  /* set = inserted */
+#define SW_UNSUPPORT_INSERT	0x12  /* set = unsupported device inserted */
+#define SW_MAX			0x20
 #define SW_CNT			(SW_MAX+1)
 
 /*
diff -ruw linux-4.4.115/block/bio.c linux-4.4.115-fbx/block/bio.c
--- linux-4.4.115/block/bio.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/block/bio.c	2019-10-29 09:26:23.329200105 +0100
@@ -31,6 +31,8 @@
 
 #include <trace/events/block.h>
 
+#include "blk.h"
+
 /*
  * Test patch to inline a certain number of bi_io_vec's inside the bio
  * itself, to shrink a bio data allocation from two mempool calls to one
@@ -590,6 +592,7 @@
 	bio->bi_rw = bio_src->bi_rw;
 	bio->bi_iter = bio_src->bi_iter;
 	bio->bi_io_vec = bio_src->bi_io_vec;
+	bio->bi_dio_inode = bio_src->bi_dio_inode;
 
 	bio_clone_blkcg_association(bio, bio_src);
 }
@@ -1782,8 +1785,10 @@
 			bio_put(bio);
 			bio = parent;
 		} else {
-			if (bio->bi_end_io)
+			if (bio->bi_end_io) {
+				blk_update_perf_stats(bio);
 				bio->bi_end_io(bio);
+			}
 			bio = NULL;
 		}
 	}
diff -ruw linux-4.4.115/block/blk-core.c linux-4.4.115-fbx/block/blk-core.c
--- linux-4.4.115/block/blk-core.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/block/blk-core.c	2019-10-29 09:26:23.333200144 +0100
@@ -11,6 +11,12 @@
 /*
  * This handles all read/write requests to block devices
  */
+
+#ifdef CONFIG_BLOCK_PERF_FRAMEWORK
+#define DRIVER_NAME "Block"
+#define pr_fmt(fmt) DRIVER_NAME ": %s: " fmt, __func__
+#endif
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/backing-dev.h>
@@ -34,12 +40,20 @@
 #include <linux/pm_runtime.h>
 #include <linux/blk-cgroup.h>
 
+#ifdef CONFIG_BLOCK_PERF_FRAMEWORK
+#include <linux/ktime.h>
+#include <linux/spinlock.h>
+#include <linux/debugfs.h>
+#endif
+
 #define CREATE_TRACE_POINTS
 #include <trace/events/block.h>
 
 #include "blk.h"
 #include "blk-mq.h"
 
+#include <linux/math64.h>
+
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
@@ -73,7 +87,7 @@
 	 * flip its congestion state for events on other blkcgs.
 	 */
 	if (rl == &rl->q->root_rl)
-		clear_wb_congested(rl->q->backing_dev_info.wb.congested, sync);
+		clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
 #endif
 }
 
@@ -84,7 +98,7 @@
 #else
 	/* see blk_clear_congested() */
 	if (rl == &rl->q->root_rl)
-		set_wb_congested(rl->q->backing_dev_info.wb.congested, sync);
+		set_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
 #endif
 }
 
@@ -108,14 +122,12 @@
  * @bdev:	device
  *
  * Locates the passed device's request queue and returns the address of its
- * backing_dev_info.  This function can only be called if @bdev is opened
- * and the return value is never NULL.
+ * backing_dev_info. The return value is never NULL however we may return
+ * &noop_backing_dev_info if the bdev is not currently open.
  */
 struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
 {
-	struct request_queue *q = bdev_get_queue(bdev);
-
-	return &q->backing_dev_info;
+	return bdev->bd_bdi;
 }
 EXPORT_SYMBOL(blk_get_backing_dev_info);
 
@@ -583,7 +595,7 @@
 	blk_flush_integrity();
 
 	/* @q won't process any more request, flush async actions */
-	del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
+	del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
 	blk_sync_queue(q);
 
 	if (q->mq_ops)
@@ -595,8 +607,6 @@
 		q->queue_lock = &q->__queue_lock;
 	spin_unlock_irq(lock);
 
-	bdi_unregister(&q->backing_dev_info);
-
 	/* @q is and will stay empty, shutdown and put */
 	blk_put_queue(q);
 }
@@ -685,7 +695,6 @@
 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 {
 	struct request_queue *q;
-	int err;
 
 	q = kmem_cache_alloc_node(blk_requestq_cachep,
 				gfp_mask | __GFP_ZERO, node_id);
@@ -700,17 +709,17 @@
 	if (!q->bio_split)
 		goto fail_id;
 
-	q->backing_dev_info.ra_pages =
+	q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id);
+	if (!q->backing_dev_info)
+		goto fail_split;
+
+	q->backing_dev_info->ra_pages =
 			(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
-	q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK;
-	q->backing_dev_info.name = "block";
+	q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
+	q->backing_dev_info->name = "block";
 	q->node = node_id;
 
-	err = bdi_init(&q->backing_dev_info);
-	if (err)
-		goto fail_split;
-
-	setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
+	setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
 		    laptop_mode_timer_fn, (unsigned long) q);
 	setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
 	INIT_LIST_HEAD(&q->queue_head);
@@ -760,7 +769,7 @@
 fail_ref:
 	percpu_ref_exit(&q->q_usage_counter);
 fail_bdi:
-	bdi_destroy(&q->backing_dev_info);
+	bdi_put(q->backing_dev_info);
 fail_split:
 	bioset_free(q->bio_split);
 fail_id:
@@ -1183,7 +1192,7 @@
 	 * disturb iosched and blkcg but weird is bettern than dead.
 	 */
 	printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n",
-			   __func__, dev_name(q->backing_dev_info.dev));
+			   __func__, dev_name(q->backing_dev_info->dev));
 
 	rq->cmd_flags &= ~REQ_ELVPRIV;
 	rq->elv.icq = NULL;
@@ -1277,8 +1286,6 @@
 {
 	struct request *rq;
 
-	BUG_ON(rw != READ && rw != WRITE);
-
 	/* create ioc upfront */
 	create_io_context(gfp_mask, q->node);
 
@@ -1450,9 +1457,11 @@
 #ifdef CONFIG_PM
 static void blk_pm_put_request(struct request *rq)
 {
-	if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending)
+	if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && rq->q->nr_pending) {
+		if (!--rq->q->nr_pending)
 		pm_runtime_mark_last_busy(rq->q->dev);
 }
+}
 #else
 static inline void blk_pm_put_request(struct request *rq) {}
 #endif
@@ -1477,6 +1486,9 @@
 	/* this is a bio leak */
 	WARN_ON(req->bio != NULL);
 
+	/* this is a bio leak if the bio is not tagged with BIO_DONTFREE */
+	WARN_ON(req->bio && !bio_flagged(req->bio, BIO_DONTFREE));
+
 	/*
 	 * Request may not have originated from ll_rw_blk. if not,
 	 * it didn't come out of our reserved rq pools
@@ -1698,6 +1710,7 @@
 	req->ioprio = bio_prio(bio);
 	blk_rq_bio_prep(req->q, req, bio);
 }
+EXPORT_SYMBOL(init_request_from_bio);
 
 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
 {
@@ -1722,7 +1735,8 @@
 		return BLK_QC_T_NONE;
 	}
 
-	if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
+	if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_POST_FLUSH_BARRIER |
+			  REQ_BARRIER)) {
 		spin_lock_irq(q->queue_lock);
 		where = ELEVATOR_INSERT_FLUSH;
 		goto get_rq;
@@ -2105,6 +2119,477 @@
 }
 EXPORT_SYMBOL(generic_make_request);
 
+#ifdef CONFIG_BLK_DEV_IO_TRACE
+static inline struct task_struct *get_dirty_task(struct bio *bio)
+{
+	/*
+	 * Not all the pages in the bio are dirtied by the
+	 * same task but most likely it will be, since the
+	 * sectors accessed on the device must be adjacent.
+	 */
+	if (bio->bi_io_vec && bio->bi_io_vec->bv_page &&
+		bio->bi_io_vec->bv_page->tsk_dirty)
+			return bio->bi_io_vec->bv_page->tsk_dirty;
+	else
+		return current;
+}
+#else
+static inline struct task_struct *get_dirty_task(struct bio *bio)
+{
+	return current;
+}
+#endif
+
+#ifdef CONFIG_BLOCK_PERF_FRAMEWORK
+#define BLK_PERF_SIZE (1024 * 15)
+#define BLK_PERF_HIST_SIZE (sizeof(u32) * BLK_PERF_SIZE)
+
+struct blk_perf_stats {
+	u32 *read_hist;
+	u32 *write_hist;
+	u32 *flush_hist;
+	int buffers_alloced;
+	ktime_t max_read_time;
+	ktime_t max_write_time;
+	ktime_t max_flush_time;
+	ktime_t min_write_time;
+	ktime_t min_read_time;
+	ktime_t min_flush_time;
+	ktime_t total_write_time;
+	ktime_t total_read_time;
+	u64 total_read_size;
+	u64 total_write_size;
+	spinlock_t lock;
+	int is_enabled;
+};
+
+static struct blk_perf_stats blk_perf;
+static struct dentry *blk_perf_debug_dir;
+
+static int alloc_histogram_buffers(void)
+{
+	int ret = 0;
+
+	if (!blk_perf.read_hist)
+		blk_perf.read_hist = kzalloc(BLK_PERF_HIST_SIZE, GFP_KERNEL);
+
+	if (!blk_perf.write_hist)
+		blk_perf.write_hist = kzalloc(BLK_PERF_HIST_SIZE, GFP_KERNEL);
+
+	if (!blk_perf.flush_hist)
+		blk_perf.flush_hist = kzalloc(BLK_PERF_HIST_SIZE, GFP_KERNEL);
+
+	if (!blk_perf.read_hist || !blk_perf.write_hist || !blk_perf.flush_hist)
+		ret = -ENOMEM;
+
+	if (!ret)
+		blk_perf.buffers_alloced = 1;
+	return ret;
+}
+
+static void clear_histogram_buffers(void)
+{
+	if (!blk_perf.buffers_alloced)
+		return;
+	memset(blk_perf.read_hist, 0, BLK_PERF_HIST_SIZE);
+	memset(blk_perf.write_hist, 0, BLK_PERF_HIST_SIZE);
+	memset(blk_perf.flush_hist, 0, BLK_PERF_HIST_SIZE);
+}
+
+static int enable_perf(void *data, u64 val)
+{
+	int ret;
+
+	if (!blk_perf.buffers_alloced)
+		ret = alloc_histogram_buffers();
+
+	if (ret)
+		return ret;
+
+	spin_lock(&blk_perf.lock);
+	blk_perf.is_enabled = val;
+	spin_unlock(&blk_perf.lock);
+	return 0;
+}
+
+static int is_perf_enabled(void *data, u64 *val)
+{
+	spin_lock(&blk_perf.lock);
+	*val = blk_perf.is_enabled;
+	spin_unlock(&blk_perf.lock);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(enable_perf_fops, is_perf_enabled, enable_perf,
+			"%llu\n");
+
+static char *blk_debug_buffer;
+static u32 blk_debug_data_size;
+static DEFINE_MUTEX(blk_perf_debug_buffer_mutex);
+
+static ssize_t blk_perf_read(struct file *file, char __user *buf,
+			  size_t count, loff_t *file_pos)
+{
+	ssize_t ret = 0;
+
+	mutex_lock(&blk_perf_debug_buffer_mutex);
+	ret = simple_read_from_buffer(buf, count, file_pos, blk_debug_buffer,
+					blk_debug_data_size);
+	mutex_unlock(&blk_perf_debug_buffer_mutex);
+
+	return ret;
+}
+
+static int blk_debug_buffer_alloc(u32 buffer_size)
+{
+	int ret = 0;
+
+	mutex_lock(&blk_perf_debug_buffer_mutex);
+	if (blk_debug_buffer != NULL) {
+		pr_err("blk_debug_buffer is in use\n");
+		ret = -EBUSY;
+		goto end;
+	}
+	blk_debug_buffer = kzalloc(buffer_size, GFP_KERNEL);
+	if (!blk_debug_buffer)
+		ret = -ENOMEM;
+end:
+	mutex_unlock(&blk_perf_debug_buffer_mutex);
+	return ret;
+}
+
+static int blk_perf_close(struct inode *inode, struct file *file)
+{
+	mutex_lock(&blk_perf_debug_buffer_mutex);
+	blk_debug_data_size = 0;
+	kfree(blk_debug_buffer);
+	blk_debug_buffer = NULL;
+	mutex_unlock(&blk_perf_debug_buffer_mutex);
+	return 0;
+}
+
+static u32 fill_basic_perf_info(char *buffer, u32 buffer_size)
+{
+	u32 size = 0;
+
+	size += scnprintf(buffer + size, buffer_size - size, "\n");
+
+	spin_lock(&blk_perf.lock);
+	size += scnprintf(buffer + size, buffer_size - size,
+			  "max_read_time_ms: %llu\n",
+			  ktime_to_ms(blk_perf.max_read_time));
+
+	size += scnprintf(buffer + size, buffer_size - size,
+			  "min_read_time_ms: %llu\n",
+			  ktime_to_ms(blk_perf.min_read_time));
+
+	size += scnprintf(buffer + size, buffer_size - size,
+			  "total_read_time_ms: %llu\n",
+			  ktime_to_ms(blk_perf.total_read_time));
+
+	size += scnprintf(buffer + size, buffer_size - size,
+			  "total_read_size: %llu\n\n",
+			  blk_perf.total_read_size);
+
+	size += scnprintf(buffer + size, buffer_size - size,
+			  "max_write_time_ms: %llu\n",
+			  ktime_to_ms(blk_perf.max_write_time));
+
+	size += scnprintf(buffer + size, buffer_size - size,
+			  "min_write_time_ms: %llu\n",
+			  ktime_to_ms(blk_perf.min_write_time));
+
+	size += scnprintf(buffer + size, buffer_size - size,
+			  "total_write_time_ms: %llu\n",
+			  ktime_to_ms(blk_perf.total_write_time));
+
+	size += scnprintf(buffer + size, buffer_size - size,
+			  "total_write_size: %llu\n\n",
+			  blk_perf.total_write_size);
+
+	size += scnprintf(buffer + size, buffer_size - size,
+			  "max_flush_time_ms: %llu\n",
+			  ktime_to_ms(blk_perf.max_flush_time));
+
+	size += scnprintf(buffer + size, buffer_size - size,
+			  "min_flush_time_ms: %llu\n\n",
+			  ktime_to_ms(blk_perf.min_flush_time));
+
+	spin_unlock(&blk_perf.lock);
+
+	return size;
+}
+
+static int basic_perf_open(struct inode *inode, struct file *file)
+{
+	u32 buffer_size;
+	int ret;
+
+	buffer_size = BLK_PERF_HIST_SIZE;
+	ret = blk_debug_buffer_alloc(buffer_size);
+	if (ret)
+		return ret;
+
+	mutex_lock(&blk_perf_debug_buffer_mutex);
+	blk_debug_data_size = fill_basic_perf_info(blk_debug_buffer,
+						   buffer_size);
+	mutex_unlock(&blk_perf_debug_buffer_mutex);
+	return 0;
+}
+
+
+static const struct file_operations basic_perf_ops = {
+	.read = blk_perf_read,
+	.release = blk_perf_close,
+	.open = basic_perf_open,
+};
+
+static int hist_open_helper(void *hist_buf)
+{
+	int ret;
+
+	if (!blk_perf.buffers_alloced)
+		return -EINVAL;
+
+	ret = blk_debug_buffer_alloc(BLK_PERF_HIST_SIZE);
+	if (ret)
+		return ret;
+
+	spin_lock(&blk_perf.lock);
+	memcpy(blk_debug_buffer, hist_buf, BLK_PERF_HIST_SIZE);
+	spin_unlock(&blk_perf.lock);
+
+	mutex_lock(&blk_perf_debug_buffer_mutex);
+	blk_debug_data_size = BLK_PERF_HIST_SIZE;
+	mutex_unlock(&blk_perf_debug_buffer_mutex);
+	return 0;
+}
+
+static int write_hist_open(struct inode *inode, struct file *file)
+{
+	return hist_open_helper(blk_perf.write_hist);
+}
+
+static const struct file_operations write_hist_ops = {
+	.read = blk_perf_read,
+	.release = blk_perf_close,
+	.open = write_hist_open,
+};
+
+
+static int read_hist_open(struct inode *inode, struct file *file)
+{
+	return hist_open_helper(blk_perf.read_hist);
+}
+
+static const struct file_operations read_hist_ops = {
+	.read = blk_perf_read,
+	.release = blk_perf_close,
+	.open = read_hist_open,
+};
+
+static int flush_hist_open(struct inode *inode, struct file *file)
+{
+	return hist_open_helper(blk_perf.flush_hist);
+}
+
+static const struct file_operations flush_hist_ops = {
+	.read = blk_perf_read,
+	.release = blk_perf_close,
+	.open = flush_hist_open,
+};
+
+static void clear_perf_stats_helper(void)
+{
+	spin_lock(&blk_perf.lock);
+	blk_perf.max_write_time = ktime_set(0, 0);
+	blk_perf.max_read_time = ktime_set(0, 0);
+	blk_perf.max_flush_time = ktime_set(0, 0);
+	blk_perf.min_write_time = ktime_set(KTIME_MAX, 0);
+	blk_perf.min_read_time = ktime_set(KTIME_MAX, 0);
+	blk_perf.min_flush_time = ktime_set(KTIME_MAX, 0);
+	blk_perf.total_write_time = ktime_set(0, 0);
+	blk_perf.total_read_time = ktime_set(0, 0);
+	blk_perf.total_read_size = 0;
+	blk_perf.total_write_size = 0;
+	blk_perf.is_enabled = 0;
+	clear_histogram_buffers();
+	spin_unlock(&blk_perf.lock);
+}
+
+static int clear_perf_stats(void *data, u64 val)
+{
+	clear_perf_stats_helper();
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clear_perf_stats_fops, NULL, clear_perf_stats,
+			"%llu\n");
+
+static void blk_debugfs_init(void)
+{
+	struct dentry *f_ent;
+
+	blk_perf_debug_dir = debugfs_create_dir("block_perf", NULL);
+	if (IS_ERR(blk_perf_debug_dir)) {
+		pr_err("Failed to create block_perf debug_fs directory\n");
+		return;
+	}
+
+	f_ent = debugfs_create_file("basic_perf", 0400, blk_perf_debug_dir,
+					NULL, &basic_perf_ops);
+	if (IS_ERR(f_ent)) {
+		pr_err("Failed to create debug_fs basic_perf file\n");
+		return;
+	}
+
+	f_ent = debugfs_create_file("write_hist", 0400, blk_perf_debug_dir,
+					NULL, &write_hist_ops);
+	if (IS_ERR(f_ent)) {
+		pr_err("Failed to create debug_fs write_hist file\n");
+		return;
+	}
+
+	f_ent = debugfs_create_file("read_hist", 0400, blk_perf_debug_dir,
+					NULL, &read_hist_ops);
+	if (IS_ERR(f_ent)) {
+		pr_err("Failed to create debug_fs read_hist file\n");
+		return;
+	}
+
+	f_ent = debugfs_create_file("flush_hist", 0400, blk_perf_debug_dir,
+					NULL, &flush_hist_ops);
+	if (IS_ERR(f_ent)) {
+		pr_err("Failed to create debug_fs flush_hist file\n");
+		return;
+	}
+
+	f_ent = debugfs_create_file("enable_perf", 0600, blk_perf_debug_dir,
+					NULL, &enable_perf_fops);
+	if (IS_ERR(f_ent)) {
+		pr_err("Failed to create debug_fs enable_perf file\n");
+		return;
+	}
+
+	f_ent = debugfs_create_file("clear_perf_stats", 0200,
+				     blk_perf_debug_dir, NULL,
+				     &clear_perf_stats_fops);
+	if (IS_ERR(f_ent)) {
+		pr_err("Failed to create debug_fs clear_perf_stats file\n");
+		return;
+	}
+}
+
+static void blk_init_perf(void)
+{
+	blk_debugfs_init();
+	spin_lock_init(&blk_perf.lock);
+
+	clear_perf_stats_helper();
+}
+
+
+static void set_submit_info(struct bio *bio, unsigned int count)
+{
+	ktime_t submit_time;
+
+	if (unlikely(blk_perf.is_enabled))  {
+		submit_time = ktime_get();
+		bio->submit_time.tv64 = submit_time.tv64;
+		bio->blk_sector_count = count;
+		return;
+	}
+
+	bio->submit_time.tv64 = 0;
+	bio->blk_sector_count = 0;
+}
+
+void blk_update_perf_read_write_stats(ktime_t bio_process_time, int is_write,
+					int count)
+{
+	u32 bio_process_time_ms;
+
+	bio_process_time_ms = ktime_to_ms(bio_process_time);
+	if (bio_process_time_ms >= BLK_PERF_SIZE)
+		bio_process_time_ms = BLK_PERF_SIZE - 1;
+
+	if (is_write) {
+		if (ktime_after(bio_process_time, blk_perf.max_write_time))
+			blk_perf.max_write_time = bio_process_time;
+
+		if (ktime_before(bio_process_time, blk_perf.min_write_time))
+			blk_perf.min_write_time = bio_process_time;
+		blk_perf.total_write_time =
+			ktime_add(blk_perf.total_write_time, bio_process_time);
+		blk_perf.total_write_size += count;
+		blk_perf.write_hist[bio_process_time_ms] += count;
+
+	} else {
+		if (ktime_after(bio_process_time, blk_perf.max_read_time))
+			blk_perf.max_read_time = bio_process_time;
+
+		if (ktime_before(bio_process_time, blk_perf.min_read_time))
+			blk_perf.min_read_time = bio_process_time;
+		blk_perf.total_read_time =
+			 ktime_add(blk_perf.total_read_time, bio_process_time);
+		blk_perf.total_read_size += count;
+		blk_perf.read_hist[bio_process_time_ms] += count;
+	}
+}
+void blk_update_perf_stats(struct bio *bio)
+{
+	ktime_t bio_process_time;
+	u32 bio_process_time_ms;
+	u32 count;
+
+	spin_lock(&blk_perf.lock);
+	if (likely(!blk_perf.is_enabled))
+		goto end;
+	if (!bio->submit_time.tv64)
+		goto end;
+	bio_process_time = ktime_sub(ktime_get(), bio->submit_time);
+
+	count = bio->blk_sector_count;
+
+	if (count) {
+		int is_write = 0;
+
+		if (bio->bi_rw & WRITE ||
+		    unlikely(bio->bi_rw & REQ_WRITE_SAME))
+			is_write = 1;
+
+		blk_update_perf_read_write_stats(bio_process_time, is_write,
+						 count);
+	} else {
+
+		bio_process_time_ms = ktime_to_ms(bio_process_time);
+		if (bio_process_time_ms >= BLK_PERF_SIZE)
+			bio_process_time_ms = BLK_PERF_SIZE - 1;
+
+		if (ktime_after(bio_process_time, blk_perf.max_flush_time))
+			blk_perf.max_flush_time = bio_process_time;
+
+		if (ktime_before(bio_process_time, blk_perf.min_flush_time))
+			blk_perf.min_flush_time = bio_process_time;
+
+		blk_perf.flush_hist[bio_process_time_ms] += 1;
+	}
+end:
+	spin_unlock(&blk_perf.lock);
+
+}
+#else
+static inline  void set_submit_info(struct bio *bio, unsigned int count)
+{
+	(void) bio;
+	(void) count;
+}
+
+static inline void blk_init_perf(void)
+{
+}
+#endif /* #ifdef CONFIG_BLOCK_PERF_FRAMEWORK */
+
 /**
  * submit_bio - submit a bio to the block device layer for I/O
  * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
@@ -2117,6 +2602,7 @@
  */
 blk_qc_t submit_bio(int rw, struct bio *bio)
 {
+	unsigned int count = 0;
 	bio->bi_rw |= rw;
 
 	/*
@@ -2124,8 +2610,6 @@
 	 * go through the normal accounting stuff before submission.
 	 */
 	if (bio_has_data(bio)) {
-		unsigned int count;
-
 		if (unlikely(rw & REQ_WRITE_SAME))
 			count = bdev_logical_block_size(bio->bi_bdev) >> 9;
 		else
@@ -2140,8 +2624,11 @@
 
 		if (unlikely(block_dump)) {
 			char b[BDEVNAME_SIZE];
+			struct task_struct *tsk;
+
+			tsk = get_dirty_task(bio);
 			printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
-			current->comm, task_pid_nr(current),
+				tsk->comm, task_pid_nr(tsk),
 				(rw & WRITE) ? "WRITE" : "READ",
 				(unsigned long long)bio->bi_iter.bi_sector,
 				bdevname(bio->bi_bdev, b),
@@ -2149,6 +2636,7 @@
 		}
 	}
 
+	set_submit_info(bio, count);
 	return generic_make_request(bio);
 }
 EXPORT_SYMBOL(submit_bio);
@@ -2645,6 +3133,15 @@
 	blk_account_io_completion(req, nr_bytes);
 
 	total_bytes = 0;
+
+	/*
+	 * Check for this if flagged, Req based dm needs to perform
+	 * post processing, hence dont end bios or request.DM
+	 * layer takes care.
+	 */
+	if (bio_flagged(req->bio, BIO_DONTFREE))
+		return false;
+
 	while (req->bio) {
 		struct bio *bio = req->bio;
 		unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
@@ -2751,7 +3248,7 @@
 	BUG_ON(blk_queued_rq(req));
 
 	if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
-		laptop_io_completion(&req->q->backing_dev_info);
+		laptop_io_completion(req->q->backing_dev_info);
 
 	blk_delete_timer(req);
 
@@ -3560,6 +4057,55 @@
 
 	blk_requestq_cachep = kmem_cache_create("blkdev_queue",
 			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
-
+	blk_init_perf();
 	return 0;
 }
+
+/*
+ * Blk IO latency support. We want this to be as cheap as possible, so doing
+ * this lockless (and avoiding atomics), a few off by a few errors in this
+ * code is not harmful, and we don't want to do anything that is
+ * perf-impactful.
+ * TODO : If necessary, we can make the histograms per-cpu and aggregate
+ * them when printing them out.
+ */
+ssize_t
+blk_latency_hist_show(char* name, struct io_latency_state *s, char *buf,
+		int buf_size)
+{
+	int i;
+	int bytes_written = 0;
+	u_int64_t num_elem, elem;
+	int pct;
+	u_int64_t average;
+
+       num_elem = s->latency_elems;
+       if (num_elem > 0) {
+	       average = div64_u64(s->latency_sum, s->latency_elems);
+	       bytes_written += scnprintf(buf + bytes_written,
+			       buf_size - bytes_written,
+			       "IO svc_time %s Latency Histogram (n = %llu,"
+			       " average = %llu):\n", name, num_elem, average);
+	       for (i = 0;
+		    i < ARRAY_SIZE(latency_x_axis_us);
+		    i++) {
+		       elem = s->latency_y_axis[i];
+		       pct = div64_u64(elem * 100, num_elem);
+		       bytes_written += scnprintf(buf + bytes_written,
+				       PAGE_SIZE - bytes_written,
+				       "\t< %6lluus%15llu%15d%%\n",
+				       latency_x_axis_us[i],
+				       elem, pct);
+	       }
+	       /* Last element in y-axis table is overflow */
+	       elem = s->latency_y_axis[i];
+	       pct = div64_u64(elem * 100, num_elem);
+	       bytes_written += scnprintf(buf + bytes_written,
+			       PAGE_SIZE - bytes_written,
+			       "\t>=%6lluus%15llu%15d%%\n",
+			       latency_x_axis_us[i - 1], elem, pct);
+	}
+
+	return bytes_written;
+}
+EXPORT_SYMBOL(blk_latency_hist_show);
diff -ruw linux-4.4.115/block/blk-flush.c linux-4.4.115-fbx/block/blk-flush.c
--- linux-4.4.115/block/blk-flush.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/block/blk-flush.c	2019-01-22 16:16:22.715239271 +0100
@@ -62,6 +62,45 @@
  * The above peculiarity requires that each FLUSH/FUA request has only one
  * bio attached to it, which is guaranteed as they aren't allowed to be
  * merged in the usual way.
+ *
+ * Cache Barrier support:
+ *
+ * Cache barrier is a requests that instruct the storage devices to apply some
+ * ordering when writing data from the device's cache to the medium. Write
+ * requests arriving before a 'cache barrier' request will be written to the
+ * medium before write requests that will arrive after the 'cache barrier'.
+ * Since the barrier request is not supported by all block devices, the
+ * appropriate fallback is flush request. This will make sure application using
+ * it can relay on correct functionality without consider the specification of
+ * the device.
+ *
+ * If a barrier request is queued, it will follow the same path as a flush
+ * request. When its time to issue the request, the flush pending list will
+ * be scanned and if it contains only requests marked with barrier, a barrier
+ * request will be issued. Otherwise, if at least one flush is pending - flush
+ * will be issued.
+ * A barrier request is a flush request marked with the REQ_BARRIER flag. It
+ * is the LLD responsibility to test this flag if it supports the barrier
+ * feature and decide whether to issue a flush or a barrier request.
+ *
+ * When considering a barrier request, three sequences must be addressed:
+ * 1. (A)Barrier -> (B)Data, This sequence will be marked with
+ *    WRITE_FLUSH_BARRIER or (REQ_FLUSH | REQ_BARRIER).
+ *    This scenario will be split to a PREFLUSH and DATA and no additional
+ *    execution phase are required. If barrier is not supported, a flush
+ *    will be issued instead (A).
+ * 2. (A)Data -> (B)Barrier, This sequence will be marked with
+ *    WRITE_POST_FLUSH_BARRIER or (REQ_POST_FLUSH_BARRIER | REQ_BARRIER).
+ *    This request, when barrier is supported, this request will execute DATA
+ *    and than POSTFLUSH.
+ *    If barrier is not supported, but FUA is. The barrier may be replaced
+ *    with DATA+FUA.
+ *    If barrier and FUA are not supported, a flush must be issued instead of
+ *    (B). This is similar to current FUA fallback.
+ * 3. (A)Barrier -> (B)Data -> (C)Barrier, This sequence will be marked with
+ *    WRITE_ORDERED_FLUSH_BARRIER or (REQ_FLUSH | REQ_POST_FLUSH_BARRIER |
+ *    REQ_BARRIER). This scenario is just a combination of the previous two,
+ *    and no additional logic is required.
  */
 
 #include <linux/kernel.h>
@@ -105,8 +144,26 @@
 	if (fflags & REQ_FLUSH) {
 		if (rq->cmd_flags & REQ_FLUSH)
 			policy |= REQ_FSEQ_PREFLUSH;
-		if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
+		/*
+		 * Use post flush when:
+		 * 1. If FUA is desired but not supported,
+		 * 2. If post barrier is desired and supported
+		 * 3. If post barrier is desired and not supported and FUA is
+		 *    not supported.
+		 */
+		if ((!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA)) ||
+			((fflags & REQ_BARRIER) && (rq->cmd_flags &
+				REQ_POST_FLUSH_BARRIER)) ||
+			((!(fflags & REQ_BARRIER) && !(fflags & REQ_FUA) &&
+				(rq->cmd_flags & REQ_POST_FLUSH_BARRIER))))
 			policy |= REQ_FSEQ_POSTFLUSH;
+		/*
+		 * If post barrier is desired and not supported but FUA is
+		 * supported append FUA flag.
+		 */
+		if ((rq->cmd_flags & REQ_POST_FLUSH_BARRIER) &&
+				!(fflags & REQ_BARRIER) && (fflags & REQ_FUA))
+			rq->cmd_flags |= REQ_FUA;
 	}
 	return policy;
 }
@@ -290,9 +347,10 @@
 static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
 {
 	struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
-	struct request *first_rq =
+	struct request *rq, *n, *first_rq =
 		list_first_entry(pending, struct request, flush.list);
 	struct request *flush_rq = fq->flush_rq;
+	u64 barrier_flag = REQ_BARRIER;
 
 	/* C1 described at the top of this file */
 	if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
@@ -330,6 +388,12 @@
 
 	flush_rq->cmd_type = REQ_TYPE_FS;
 	flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
+	/* Issue a barrier only if all pending flushes request it */
+	list_for_each_entry_safe(rq, n, pending, flush.list) {
+		barrier_flag &= rq->cmd_flags;
+	}
+	flush_rq->cmd_flags |= barrier_flag;
+
 	flush_rq->rq_disk = first_rq->rq_disk;
 	flush_rq->end_io = flush_end_io;
 
@@ -388,6 +452,8 @@
 	unsigned int policy = blk_flush_policy(fflags, rq);
 	struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
 
+	WARN_ON((rq->cmd_flags & REQ_POST_FLUSH_BARRIER) &&
+			!blk_rq_sectors(rq));
 	/*
 	 * @policy now records what operations need to be done.  Adjust
 	 * REQ_FLUSH and FUA for the driver.
@@ -447,20 +513,8 @@
 	blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
 }
 
-/**
- * blkdev_issue_flush - queue a flush
- * @bdev:	blockdev to issue flush for
- * @gfp_mask:	memory allocation flags (for bio_alloc)
- * @error_sector:	error sector
- *
- * Description:
- *    Issue a flush for the block device in question. Caller can supply
- *    room for storing the error offset in case of a flush error, if they
- *    wish to. If WAIT flag is not passed then caller may check only what
- *    request was pushed in some internal queue for later handling.
- */
-int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
-		sector_t *error_sector)
+static int __blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
+		sector_t *error_sector, int flush_type)
 {
 	struct request_queue *q;
 	struct bio *bio;
@@ -485,7 +539,7 @@
 	bio = bio_alloc(gfp_mask, 0);
 	bio->bi_bdev = bdev;
 
-	ret = submit_bio_wait(WRITE_FLUSH, bio);
+	ret = submit_bio_wait(flush_type, bio);
 
 	/*
 	 * The driver must store the error location in ->bi_sector, if
@@ -498,6 +552,45 @@
 	bio_put(bio);
 	return ret;
 }
+
+/**
+ * blkdev_issue_barrier - queue a barrier
+ * @bdev:	blockdev to issue barrier for
+ * @gfp_mask:	memory allocation flags (for bio_alloc)
+ * @error_sector:	error sector
+ *
+ * Description:
+ *    If blkdev supports the barrier API, issue barrier, otherwise issue a
+ *    flush Caller can supply room for storing the error offset in case of a
+ *    flush error, if they wish to. If WAIT flag is not passed then caller may
+ *    check only what request was pushed in some internal queue for later
+ *    handling.
+ */
+int blkdev_issue_barrier(struct block_device *bdev, gfp_t gfp_mask,
+		sector_t *error_sector)
+{
+	return __blkdev_issue_flush(bdev, gfp_mask, error_sector,
+			WRITE_FLUSH_BARRIER);
+}
+EXPORT_SYMBOL(blkdev_issue_barrier);
+
+/**
+ * blkdev_issue_flush - queue a flush
+ * @bdev:	blockdev to issue flush for
+ * @gfp_mask:	memory allocation flags (for bio_alloc)
+ * @error_sector:	error sector
+ *
+ * Description:
+ *    Issue a flush for the block device in question. Caller can supply
+ *    room for storing the error offset in case of a flush error, if they
+ *    wish to. If WAIT flag is not passed then caller may check only what
+ *    request was pushed in some internal queue for later handling.
+ */
+int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
+		sector_t *error_sector)
+{
+	return __blkdev_issue_flush(bdev, gfp_mask, error_sector, WRITE_FLUSH);
+}
 EXPORT_SYMBOL(blkdev_issue_flush);
 
 struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
diff -ruw linux-4.4.115/block/blk.h linux-4.4.115-fbx/block/blk.h
--- linux-4.4.115/block/blk.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/block/blk.h	2019-01-22 16:16:22.723239343 +0100
@@ -112,6 +112,15 @@
 void blk_account_io_completion(struct request *req, unsigned int bytes);
 void blk_account_io_done(struct request *req);
 
+#ifdef CONFIG_BLOCK_PERF_FRAMEWORK
+void blk_update_perf_stats(struct bio *bio);
+#else
+static inline void blk_update_perf_stats(struct bio *bio)
+{
+	(void) bio;
+}
+#endif
+
 /*
  * Internal atomic flags for request handling
  */
@@ -214,7 +223,6 @@
 int attempt_front_merge(struct request_queue *q, struct request *rq);
 int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
 				struct request *next);
-void blk_recalc_rq_segments(struct request *rq);
 void blk_rq_set_mixed_merge(struct request *rq);
 bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
 int blk_try_merge(struct request *rq, struct bio *bio);
diff -ruw linux-4.4.115/block/blk-merge.c linux-4.4.115-fbx/block/blk-merge.c
--- linux-4.4.115/block/blk-merge.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/block/blk-merge.c	2019-01-22 16:16:22.719239307 +0100
@@ -6,6 +6,8 @@
 #include <linux/bio.h>
 #include <linux/blkdev.h>
 #include <linux/scatterlist.h>
+#include <linux/pfk.h>
+#include <linux/pft.h>
 
 #include "blk.h"
 
@@ -488,6 +490,93 @@
 }
 EXPORT_SYMBOL(blk_rq_map_sg);
 
+/*
+ * map a request to scatterlist without combining PHY CONT
+ * blocks, return number of sg entries setup. Caller
+ * must make sure sg can hold rq->nr_phys_segments entries
+ */
+int blk_rq_map_sg_no_cluster(struct request_queue *q, struct request *rq,
+		  struct scatterlist *sglist)
+{
+	struct bio_vec bvec, bvprv = { NULL };
+	struct req_iterator iter;
+	struct scatterlist *sg;
+	int nsegs, cluster = 0;
+
+	nsegs = 0;
+
+	/*
+	 * for each bio in rq
+	 */
+	sg = NULL;
+	rq_for_each_segment(bvec, rq, iter) {
+		__blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
+				     &nsegs, &cluster);
+	} /* segments in rq */
+
+
+	if (!sg)
+		return nsegs;
+
+	if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
+	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
+		unsigned int pad_len =
+			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
+
+		sg->length += pad_len;
+		rq->extra_len += pad_len;
+	}
+
+	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
+		if (rq->cmd_flags & REQ_WRITE)
+			memset(q->dma_drain_buffer, 0, q->dma_drain_size);
+
+		sg->page_link &= ~0x02;
+		sg = sg_next(sg);
+		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
+			    q->dma_drain_size,
+			    ((unsigned long)q->dma_drain_buffer) &
+			    (PAGE_SIZE - 1));
+		nsegs++;
+		rq->extra_len += q->dma_drain_size;
+	}
+
+	if (sg)
+		sg_mark_end(sg);
+
+	return nsegs;
+}
+EXPORT_SYMBOL(blk_rq_map_sg_no_cluster);
+
+/**
+ * blk_bio_map_sg - map a bio to a scatterlist
+ * @q: request_queue in question
+ * @bio: bio being mapped
+ * @sglist: scatterlist being mapped
+ *
+ * Note:
+ *    Caller must make sure sg can hold bio->bi_phys_segments entries
+ *
+ * Will return the number of sg entries setup
+ */
+int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
+		   struct scatterlist *sglist)
+{
+	struct scatterlist *sg = NULL;
+	int nsegs;
+	struct bio *next = bio->bi_next;
+
+	bio->bi_next = NULL;
+	nsegs = __blk_bios_map_sg(q, bio, sglist, &sg);
+	bio->bi_next = next;
+	if (sg)
+		sg_mark_end(sg);
+
+	WARN_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments);
+	return nsegs;
+}
+EXPORT_SYMBOL(blk_bio_map_sg);
+
 static inline int ll_new_hw_segment(struct request_queue *q,
 				    struct request *req,
 				    struct bio *bio)
@@ -663,6 +752,12 @@
 	}
 }
 
+static bool crypto_not_mergeable(const struct bio *bio, const struct bio *nxt)
+{
+	return (!pft_allow_merge_bio(bio, nxt) ||
+		!pfk_allow_merge_bio(bio, nxt));
+}
+
 /*
  * Has to be called with the request spinlock acquired
  */
@@ -690,6 +785,9 @@
 	    !blk_write_same_mergeable(req->bio, next->bio))
 		return 0;
 
+	if (crypto_not_mergeable(req->bio, next->bio))
+		return 0;
+
 	/*
 	 * If we are allowed to merge, then append bio list
 	 * from next to rq and release next. merge_requests_fn
@@ -794,6 +892,9 @@
 	    !blk_write_same_mergeable(rq->bio, bio))
 		return false;
 
+	if (crypto_not_mergeable(rq->bio, bio))
+		return false;
+
 	return true;
 }
 
diff -ruw linux-4.4.115/block/blk-mq.c linux-4.4.115-fbx/block/blk-mq.c
--- linux-4.4.115/block/blk-mq.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/block/blk-mq.c	2019-10-29 09:26:23.333200144 +0100
@@ -1782,10 +1782,6 @@
 		INIT_LIST_HEAD(&__ctx->rq_list);
 		__ctx->queue = q;
 
-		/* If the cpu isn't online, the cpu is mapped to first hctx */
-		if (!cpu_online(i))
-			continue;
-
 		hctx = q->mq_ops->map_queue(q, i);
 
 		/*
@@ -1819,11 +1815,8 @@
 	 * Map software to hardware queues
 	 */
 	queue_for_each_ctx(q, ctx, i) {
-		/* If the cpu isn't online, the cpu is mapped to first hctx */
-		if (!cpumask_test_cpu(i, online_mask))
-			continue;
-
 		hctx = q->mq_ops->map_queue(q, i);
+		if (cpumask_test_cpu(i, online_mask))
 		cpumask_set_cpu(i, hctx->cpumask);
 		ctx->index_hw = hctx->nr_ctx;
 		hctx->ctxs[hctx->nr_ctx++] = ctx;
@@ -1862,16 +1855,21 @@
 
 		/*
 		 * Initialize batch roundrobin counts
+		 * Set next_cpu for only those hctxs that have an online CPU
+		 * in their cpumask field. For hctxs that belong to few online
+		 * and few offline CPUs, this will always provide one CPU from
+		 * online ones. For hctxs belonging to all offline CPUs, their
+		 * cpumask will be updated in reinit_notify.
 		 */
+		if (cpumask_first(hctx->cpumask) < nr_cpu_ids) {
 		hctx->next_cpu = cpumask_first(hctx->cpumask);
 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
 	}
+	}
 
 	queue_for_each_ctx(q, ctx, i) {
-		if (!cpumask_test_cpu(i, online_mask))
-			continue;
-
 		hctx = q->mq_ops->map_queue(q, i);
+		if (cpumask_test_cpu(i, online_mask))
 		cpumask_set_cpu(i, hctx->tags->cpumask);
 	}
 }
@@ -2100,38 +2098,13 @@
 	blk_mq_free_hw_queues(q, set);
 }
 
-/* Basically redo blk_mq_init_queue with queue frozen */
-static void blk_mq_queue_reinit(struct request_queue *q,
-				const struct cpumask *online_mask)
-{
-	WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
-
-	blk_mq_sysfs_unregister(q);
-
-	blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues, online_mask);
-
-	/*
-	 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
-	 * we should change hctx numa_node according to new topology (this
-	 * involves free and re-allocate memory, worthy doing?)
-	 */
-
-	blk_mq_map_swqueue(q, online_mask);
-
-	blk_mq_sysfs_register(q);
-}
-
 static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
 				      unsigned long action, void *hcpu)
 {
 	struct request_queue *q;
+	struct blk_mq_hw_ctx *hctx;
+	int i;
 	int cpu = (unsigned long)hcpu;
-	/*
-	 * New online cpumask which is going to be set in this hotplug event.
-	 * Declare this cpumasks as global as cpu-hotplug operation is invoked
-	 * one-by-one and dynamically allocating this could result in a failure.
-	 */
-	static struct cpumask online_new;
 
 	/*
 	 * Before hotadded cpu starts handling requests, new mappings must
@@ -2153,44 +2126,31 @@
 	switch (action & ~CPU_TASKS_FROZEN) {
 	case CPU_DEAD:
 	case CPU_UP_CANCELED:
-		cpumask_copy(&online_new, cpu_online_mask);
+		mutex_lock(&all_q_mutex);
+		list_for_each_entry(q, &all_q_list, all_q_node) {
+			queue_for_each_hw_ctx(q, hctx, i) {
+				cpumask_clear_cpu(cpu, hctx->cpumask);
+				cpumask_clear_cpu(cpu, hctx->tags->cpumask);
+			}
+		}
+		mutex_unlock(&all_q_mutex);
 		break;
 	case CPU_UP_PREPARE:
-		cpumask_copy(&online_new, cpu_online_mask);
-		cpumask_set_cpu(cpu, &online_new);
+		/* Update hctx->cpumask for newly onlined CPUs */
+		mutex_lock(&all_q_mutex);
+		list_for_each_entry(q, &all_q_list, all_q_node) {
+			queue_for_each_hw_ctx(q, hctx, i) {
+				cpumask_set_cpu(cpu, hctx->cpumask);
+				hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
+				cpumask_set_cpu(cpu, hctx->tags->cpumask);
+			}
+		}
+		mutex_unlock(&all_q_mutex);
 		break;
 	default:
 		return NOTIFY_OK;
 	}
 
-	mutex_lock(&all_q_mutex);
-
-	/*
-	 * We need to freeze and reinit all existing queues.  Freezing
-	 * involves synchronous wait for an RCU grace period and doing it
-	 * one by one may take a long time.  Start freezing all queues in
-	 * one swoop and then wait for the completions so that freezing can
-	 * take place in parallel.
-	 */
-	list_for_each_entry(q, &all_q_list, all_q_node)
-		blk_mq_freeze_queue_start(q);
-	list_for_each_entry(q, &all_q_list, all_q_node) {
-		blk_mq_freeze_queue_wait(q);
-
-		/*
-		 * timeout handler can't touch hw queue during the
-		 * reinitialization
-		 */
-		del_timer_sync(&q->timeout);
-	}
-
-	list_for_each_entry(q, &all_q_list, all_q_node)
-		blk_mq_queue_reinit(q, &online_new);
-
-	list_for_each_entry(q, &all_q_list, all_q_node)
-		blk_mq_unfreeze_queue(q);
-
-	mutex_unlock(&all_q_mutex);
 	return NOTIFY_OK;
 }
 
diff -ruw linux-4.4.115/block/blk-mq-cpumap.c linux-4.4.115-fbx/block/blk-mq-cpumap.c
--- linux-4.4.115/block/blk-mq-cpumap.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/block/blk-mq-cpumap.c	2019-01-22 16:16:22.719239307 +0100
@@ -31,8 +31,8 @@
 	return cpu;
 }
 
-int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
-			    const struct cpumask *online_mask)
+static int blk_mq_update_queue_map(unsigned int *map,
+		unsigned int nr_queues, const struct cpumask *online_mask)
 {
 	unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
 	cpumask_var_t cpus;
@@ -52,18 +52,14 @@
 
 	queue = 0;
 	for_each_possible_cpu(i) {
-		if (!cpumask_test_cpu(i, online_mask)) {
-			map[i] = 0;
-			continue;
-		}
-
 		/*
 		 * Easy case - we have equal or more hardware queues. Or
 		 * there are no thread siblings to take into account. Do
 		 * 1:1 if enough, or sequential mapping if less.
 		 */
-		if (nr_queues >= nr_cpus || nr_cpus == nr_uniq_cpus) {
-			map[i] = cpu_to_queue_index(nr_cpus, nr_queues, queue);
+		if (nr_queues >= nr_cpu_ids) {
+			map[i] = cpu_to_queue_index(nr_cpu_ids, nr_queues,
+					queue);
 			queue++;
 			continue;
 		}
@@ -75,7 +71,7 @@
 		 */
 		first_sibling = get_first_sibling(i);
 		if (first_sibling == i) {
-			map[i] = cpu_to_queue_index(nr_uniq_cpus, nr_queues,
+			map[i] = cpu_to_queue_index(nr_cpu_ids, nr_queues,
 							queue);
 			queue++;
 		} else
diff -ruw linux-4.4.115/block/blk-mq.h linux-4.4.115-fbx/block/blk-mq.h
--- linux-4.4.115/block/blk-mq.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/block/blk-mq.h	2019-01-22 16:16:22.719239307 +0100
@@ -48,8 +48,6 @@
  * CPU -> queue mappings
  */
 extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
-extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
-				   const struct cpumask *online_mask);
 extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
 
 /*
diff -ruw linux-4.4.115/block/blk-settings.c linux-4.4.115-fbx/block/blk-settings.c
--- linux-4.4.115/block/blk-settings.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/block/blk-settings.c	2019-10-29 09:26:23.337200184 +0100
@@ -823,20 +823,24 @@
 /**
  * blk_queue_flush - configure queue's cache flush capability
  * @q:		the request queue for the device
- * @flush:	0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
+ * @flush:	0, REQ_FLUSH or REQ_FLUSH | REQ_FUA | REQ_BARRIER
  *
  * Tell block layer cache flush capability of @q.  If it supports
  * flushing, REQ_FLUSH should be set.  If it supports bypassing
- * write cache for individual writes, REQ_FUA should be set.
+ * write cache for individual writes, REQ_FUA should be set. If cache
+ * barrier is supported set REQ_BARRIER.
  */
 void blk_queue_flush(struct request_queue *q, unsigned int flush)
 {
-	WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
+	WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA | REQ_BARRIER));
 
-	if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
+	if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && ((flush & REQ_FUA) ||
+			(flush & REQ_BARRIER)))) {
 		flush &= ~REQ_FUA;
+		flush &= ~REQ_BARRIER;
+	}
 
-	q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
+	q->flush_flags = flush & (REQ_FLUSH | REQ_FUA | REQ_BARRIER);
 }
 EXPORT_SYMBOL_GPL(blk_queue_flush);
 
diff -ruw linux-4.4.115/block/blk-sysfs.c linux-4.4.115-fbx/block/blk-sysfs.c
--- linux-4.4.115/block/blk-sysfs.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/block/blk-sysfs.c	2019-10-29 09:26:23.337200184 +0100
@@ -75,7 +75,7 @@
 
 static ssize_t queue_ra_show(struct request_queue *q, char *page)
 {
-	unsigned long ra_kb = q->backing_dev_info.ra_pages <<
+	unsigned long ra_kb = q->backing_dev_info->ra_pages <<
 					(PAGE_CACHE_SHIFT - 10);
 
 	return queue_var_show(ra_kb, (page));
@@ -90,7 +90,7 @@
 	if (ret < 0)
 		return ret;
 
-	q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
+	q->backing_dev_info->ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
 
 	return ret;
 }
@@ -578,7 +578,7 @@
 	struct request_queue *q =
 		container_of(kobj, struct request_queue, kobj);
 
-	bdi_exit(&q->backing_dev_info);
+	bdi_put(q->backing_dev_info);
 	blkcg_exit_queue(q);
 
 	if (q->elevator) {
diff -ruw linux-4.4.115/block/elevator.c linux-4.4.115-fbx/block/elevator.c
--- linux-4.4.115/block/elevator.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/block/elevator.c	2019-01-22 16:16:22.727239379 +0100
@@ -538,7 +538,7 @@
 #ifdef CONFIG_PM
 static void blk_pm_requeue_request(struct request *rq)
 {
-	if (rq->q->dev && !(rq->cmd_flags & REQ_PM))
+	if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && rq->q->nr_pending)
 		rq->q->nr_pending--;
 }
 
diff -ruw linux-4.4.115/block/genhd.c linux-4.4.115-fbx/block/genhd.c
--- linux-4.4.115/block/genhd.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/block/genhd.c	2019-01-22 16:16:22.727239379 +0100
@@ -611,7 +611,7 @@
 	disk_alloc_events(disk);
 
 	/* Register BDI before referencing it from bdev */
-	bdi = &disk->queue->backing_dev_info;
+	bdi = disk->queue->backing_dev_info;
 	bdi_register_owner(bdi, disk_to_dev(disk));
 
 	blk_register_region(disk_devt(disk), disk->minors, NULL,
@@ -646,6 +646,8 @@
 	disk_part_iter_init(&piter, disk,
 			     DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE);
 	while ((part = disk_part_iter_next(&piter))) {
+		bdev_unhash_inode(MKDEV(disk->major,
+					disk->first_minor + part->partno));
 		invalidate_partition(disk, part->partno);
 		delete_partition(disk, part->partno);
 	}
@@ -656,7 +658,16 @@
 	disk->flags &= ~GENHD_FL_UP;
 
 	sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
+	if (disk->queue) {
+		/*
+		 * Unregister bdi before releasing device numbers (as they can
+		 * get reused and we'd get clashes in sysfs).
+		 */
+		bdi_unregister(disk->queue->backing_dev_info);
 	blk_unregister_queue(disk);
+	} else {
+		WARN_ON(1);
+	}
 	blk_unregister_region(disk_devt(disk), disk->minors);
 
 	part_stat_set_all(&disk->part0, 0);
@@ -1117,6 +1128,22 @@
 		blk_put_queue(disk->queue);
 	kfree(disk);
 }
+
+static int disk_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct gendisk *disk = dev_to_disk(dev);
+	struct disk_part_iter piter;
+	struct hd_struct *part;
+	int cnt = 0;
+
+	disk_part_iter_init(&piter, disk, 0);
+	while((part = disk_part_iter_next(&piter)))
+		cnt++;
+	disk_part_iter_exit(&piter);
+	add_uevent_var(env, "NPARTS=%u", cnt);
+	return 0;
+}
+
 struct class block_class = {
 	.name		= "block",
 };
@@ -1136,6 +1163,7 @@
 	.groups		= disk_attr_groups,
 	.release	= disk_release,
 	.devnode	= block_devnode,
+	.uevent		= disk_uevent,
 };
 
 #ifdef CONFIG_PROC_FS
diff -ruw linux-4.4.115/block/Kconfig linux-4.4.115-fbx/block/Kconfig
--- linux-4.4.115/block/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/block/Kconfig	2019-01-22 16:16:22.707239198 +0100
@@ -111,6 +111,13 @@
 
 	See Documentation/block/cmdline-partition.txt for more information.
 
+config BLOCK_PERF_FRAMEWORK
+	bool "Enable Block device performance measurement framework"
+	default n
+	---help---
+	Enabling this option allows you to measure the performance at the
+	block layer.
+
 menu "Partition Types"
 
 source "block/partitions/Kconfig"
diff -ruw linux-4.4.115/block/Kconfig.iosched linux-4.4.115-fbx/block/Kconfig.iosched
--- linux-4.4.115/block/Kconfig.iosched	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/block/Kconfig.iosched	2019-01-22 16:16:22.711239234 +0100
@@ -12,6 +12,17 @@
 	  that do their own scheduling and require only minimal assistance from
 	  the kernel.
 
+config IOSCHED_TEST
+	tristate "Test I/O scheduler"
+	depends on DEBUG_FS
+	default m
+	---help---
+	  The test I/O scheduler is a duplicate of the noop scheduler with
+	  addition of test utlity.
+	  It allows testing a block device by dispatching specific requests
+	  according to the test case and declare PASS/FAIL according to the
+	  requests completion error code.
+
 config IOSCHED_DEADLINE
 	tristate "Deadline I/O scheduler"
 	default y
diff -ruw linux-4.4.115/block/Makefile linux-4.4.115-fbx/block/Makefile
--- linux-4.4.115/block/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/block/Makefile	2019-01-22 16:16:22.711239234 +0100
@@ -18,6 +18,7 @@
 obj-$(CONFIG_IOSCHED_NOOP)	+= noop-iosched.o
 obj-$(CONFIG_IOSCHED_DEADLINE)	+= deadline-iosched.o
 obj-$(CONFIG_IOSCHED_CFQ)	+= cfq-iosched.o
+obj-$(CONFIG_IOSCHED_TEST)	+= test-iosched.o
 
 obj-$(CONFIG_BLOCK_COMPAT)	+= compat_ioctl.o
 obj-$(CONFIG_BLK_CMDLINE_PARSER)	+= cmdline-parser.o
diff -ruw linux-4.4.115/block/partition-generic.c linux-4.4.115-fbx/block/partition-generic.c
--- linux-4.4.115/block/partition-generic.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/block/partition-generic.c	2019-10-29 09:26:23.341200223 +0100
@@ -216,10 +216,21 @@
 	kfree(p);
 }
 
+static int part_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct hd_struct *part = dev_to_part(dev);
+
+	add_uevent_var(env, "PARTN=%u", part->partno);
+	if (part->info && part->info->volname[0])
+		add_uevent_var(env, "PARTNAME=%s", part->info->volname);
+	return 0;
+}
+
 struct device_type part_type = {
 	.name		= "partition",
 	.groups		= part_attr_groups,
 	.release	= part_release,
+	.uevent		= part_uevent,
 };
 
 static void delete_partition_rcu_cb(struct rcu_head *head)
diff -ruw linux-4.4.115/certs/Makefile linux-4.4.115-fbx/certs/Makefile
--- linux-4.4.115/certs/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/certs/Makefile	2019-10-29 09:26:23.341200223 +0100
@@ -17,6 +17,10 @@
 quiet_cmd_extract_certs  = EXTRACT_CERTS   $(patsubst "%",%,$(2))
       cmd_extract_certs  = scripts/extract-cert $(2) $@ || ( rm $@; exit 1)
 
+ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYS),"verity.x509.pem")
+SYSTEM_TRUSTED_KEYS_SRCPREFIX := $(srctree)/certs/
+endif
+
 targets += x509_certificate_list
 $(obj)/x509_certificate_list: scripts/extract-cert $(SYSTEM_TRUSTED_KEYS_SRCPREFIX)$(SYSTEM_TRUSTED_KEYS_FILENAME) FORCE
 	$(call if_changed,extract_certs,$(SYSTEM_TRUSTED_KEYS_SRCPREFIX)$(CONFIG_SYSTEM_TRUSTED_KEYS))
diff -ruw linux-4.4.115/crypto/api.c linux-4.4.115-fbx/crypto/api.c
--- linux-4.4.115/crypto/api.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/crypto/api.c	2019-01-22 16:16:22.739239488 +0100
@@ -24,6 +24,7 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/string.h>
+#include <linux/completion.h>
 #include "internal.h"
 
 LIST_HEAD(crypto_alg_list);
@@ -611,5 +612,17 @@
 }
 EXPORT_SYMBOL_GPL(crypto_has_alg);
 
+void crypto_req_done(struct crypto_async_request *req, int err)
+{
+	struct crypto_wait *wait = req->data;
+
+	if (err == -EINPROGRESS)
+		return;
+
+	wait->err = err;
+	complete(&wait->completion);
+}
+EXPORT_SYMBOL_GPL(crypto_req_done);
+
 MODULE_DESCRIPTION("Cryptographic core API");
 MODULE_LICENSE("GPL");
diff -ruw linux-4.4.115/crypto/blkcipher.c linux-4.4.115-fbx/crypto/blkcipher.c
--- linux-4.4.115/crypto/blkcipher.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/crypto/blkcipher.c	2019-10-29 09:26:23.345200262 +0100
@@ -373,6 +373,27 @@
 }
 EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block);
 
+/*
+ * This function allows ablkcipher algorithms to use the blkcipher_walk API to
+ * walk over their data.  The specified crypto_ablkcipher tfm is used to
+ * initialize the struct blkcipher_walk, and the crypto_blkcipher specified in
+ * desc->tfm is never used so it can be left NULL.  (Yes, this design is ugly,
+ * but it parallels blkcipher_aead_walk_virt_block() above.  In the 4.10 kernel
+ * this is starting to be cleaned up...)
+ */
+int blkcipher_ablkcipher_walk_virt(struct blkcipher_desc *desc,
+				   struct blkcipher_walk *walk,
+				   struct crypto_ablkcipher *tfm)
+{
+	walk->flags &= ~BLKCIPHER_WALK_PHYS;
+	walk->walk_blocksize = crypto_ablkcipher_blocksize(tfm);
+	walk->cipher_blocksize = walk->walk_blocksize;
+	walk->ivsize = crypto_ablkcipher_ivsize(tfm);
+	walk->alignmask = crypto_ablkcipher_alignmask(tfm);
+	return blkcipher_walk_first(desc, walk);
+}
+EXPORT_SYMBOL_GPL(blkcipher_ablkcipher_walk_virt);
+
 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
 			    unsigned int keylen)
 {
diff -ruw linux-4.4.115/crypto/gf128mul.c linux-4.4.115-fbx/crypto/gf128mul.c
--- linux-4.4.115/crypto/gf128mul.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/crypto/gf128mul.c	2019-01-22 16:16:22.747239560 +0100
@@ -44,7 +44,7 @@
  ---------------------------------------------------------------------------
  Issue 31/01/2006
 
- This file provides fast multiplication in GF(128) as required by several
+ This file provides fast multiplication in GF(2^128) as required by several
  cryptographic authentication modes
 */
 
@@ -88,37 +88,52 @@
 	q(0xf8), q(0xf9), q(0xfa), q(0xfb), q(0xfc), q(0xfd), q(0xfe), q(0xff) \
 }
 
-/*	Given the value i in 0..255 as the byte overflow when a field element
-    in GHASH is multiplied by x^8, this function will return the values that
-    are generated in the lo 16-bit word of the field value by applying the
-    modular polynomial. The values lo_byte and hi_byte are returned via the
-    macro xp_fun(lo_byte, hi_byte) so that the values can be assembled into
-    memory as required by a suitable definition of this macro operating on
-    the table above
+/*
+ * Given a value i in 0..255 as the byte overflow when a field element
+ * in GF(2^128) is multiplied by x^8, the following macro returns the
+ * 16-bit value that must be XOR-ed into the low-degree end of the
+ * product to reduce it modulo the irreducible polynomial x^128 + x^7 +
+ * x^2 + x + 1.
+ *
+ * There are two versions of the macro, and hence two tables: one for
+ * the "be" convention where the highest-order bit is the coefficient of
+ * the highest-degree polynomial term, and one for the "le" convention
+ * where the highest-order bit is the coefficient of the lowest-degree
+ * polynomial term.  In both cases the values are stored in CPU byte
+ * endianness such that the coefficients are ordered consistently across
+ * bytes, i.e. in the "be" table bits 15..0 of the stored value
+ * correspond to the coefficients of x^15..x^0, and in the "le" table
+ * bits 15..0 correspond to the coefficients of x^0..x^15.
+ *
+ * Therefore, provided that the appropriate byte endianness conversions
+ * are done by the multiplication functions (and these must be in place
+ * anyway to support both little endian and big endian CPUs), the "be"
+ * table can be used for multiplications of both "bbe" and "ble"
+ * elements, and the "le" table can be used for multiplications of both
+ * "lle" and "lbe" elements.
 */
 
-#define xx(p, q)	0x##p##q
-
-#define xda_bbe(i) ( \
-	(i & 0x80 ? xx(43, 80) : 0) ^ (i & 0x40 ? xx(21, c0) : 0) ^ \
-	(i & 0x20 ? xx(10, e0) : 0) ^ (i & 0x10 ? xx(08, 70) : 0) ^ \
-	(i & 0x08 ? xx(04, 38) : 0) ^ (i & 0x04 ? xx(02, 1c) : 0) ^ \
-	(i & 0x02 ? xx(01, 0e) : 0) ^ (i & 0x01 ? xx(00, 87) : 0) \
+#define xda_be(i) ( \
+	(i & 0x80 ? 0x4380 : 0) ^ (i & 0x40 ? 0x21c0 : 0) ^ \
+	(i & 0x20 ? 0x10e0 : 0) ^ (i & 0x10 ? 0x0870 : 0) ^ \
+	(i & 0x08 ? 0x0438 : 0) ^ (i & 0x04 ? 0x021c : 0) ^ \
+	(i & 0x02 ? 0x010e : 0) ^ (i & 0x01 ? 0x0087 : 0) \
 )
 
-#define xda_lle(i) ( \
-	(i & 0x80 ? xx(e1, 00) : 0) ^ (i & 0x40 ? xx(70, 80) : 0) ^ \
-	(i & 0x20 ? xx(38, 40) : 0) ^ (i & 0x10 ? xx(1c, 20) : 0) ^ \
-	(i & 0x08 ? xx(0e, 10) : 0) ^ (i & 0x04 ? xx(07, 08) : 0) ^ \
-	(i & 0x02 ? xx(03, 84) : 0) ^ (i & 0x01 ? xx(01, c2) : 0) \
+#define xda_le(i) ( \
+	(i & 0x80 ? 0xe100 : 0) ^ (i & 0x40 ? 0x7080 : 0) ^ \
+	(i & 0x20 ? 0x3840 : 0) ^ (i & 0x10 ? 0x1c20 : 0) ^ \
+	(i & 0x08 ? 0x0e10 : 0) ^ (i & 0x04 ? 0x0708 : 0) ^ \
+	(i & 0x02 ? 0x0384 : 0) ^ (i & 0x01 ? 0x01c2 : 0) \
 )
 
-static const u16 gf128mul_table_lle[256] = gf128mul_dat(xda_lle);
-static const u16 gf128mul_table_bbe[256] = gf128mul_dat(xda_bbe);
+static const u16 gf128mul_table_le[256] = gf128mul_dat(xda_le);
+static const u16 gf128mul_table_be[256] = gf128mul_dat(xda_be);
 
-/* These functions multiply a field element by x, by x^4 and by x^8
- * in the polynomial field representation. It uses 32-bit word operations
- * to gain speed but compensates for machine endianess and hence works
+/*
+ * The following functions multiply a field element by x or by x^8 in
+ * the polynomial field representation.  They use 64-bit word operations
+ * to gain speed but compensate for machine endianness and hence work
  * correctly on both styles of machine.
  */
 
@@ -126,7 +141,7 @@
 {
 	u64 a = be64_to_cpu(x->a);
 	u64 b = be64_to_cpu(x->b);
-	u64 _tt = gf128mul_table_lle[(b << 7) & 0xff];
+	u64 _tt = gf128mul_table_le[(b << 7) & 0xff];
 
 	r->b = cpu_to_be64((b >> 1) | (a << 63));
 	r->a = cpu_to_be64((a >> 1) ^ (_tt << 48));
@@ -136,7 +151,7 @@
 {
 	u64 a = be64_to_cpu(x->a);
 	u64 b = be64_to_cpu(x->b);
-	u64 _tt = gf128mul_table_bbe[a >> 63];
+	u64 _tt = gf128mul_table_be[a >> 63];
 
 	r->a = cpu_to_be64((a << 1) | (b >> 63));
 	r->b = cpu_to_be64((b << 1) ^ _tt);
@@ -146,7 +161,7 @@
 {
 	u64 a = le64_to_cpu(x->a);
 	u64 b = le64_to_cpu(x->b);
-	u64 _tt = gf128mul_table_bbe[b >> 63];
+	u64 _tt = gf128mul_table_be[b >> 63];
 
 	r->a = cpu_to_le64((a << 1) ^ _tt);
 	r->b = cpu_to_le64((b << 1) | (a >> 63));
@@ -157,7 +172,7 @@
 {
 	u64 a = be64_to_cpu(x->a);
 	u64 b = be64_to_cpu(x->b);
-	u64 _tt = gf128mul_table_lle[b & 0xff];
+	u64 _tt = gf128mul_table_le[b & 0xff];
 
 	x->b = cpu_to_be64((b >> 8) | (a << 56));
 	x->a = cpu_to_be64((a >> 8) ^ (_tt << 48));
@@ -167,12 +182,22 @@
 {
 	u64 a = be64_to_cpu(x->a);
 	u64 b = be64_to_cpu(x->b);
-	u64 _tt = gf128mul_table_bbe[a >> 56];
+	u64 _tt = gf128mul_table_be[a >> 56];
 
 	x->a = cpu_to_be64((a << 8) | (b >> 56));
 	x->b = cpu_to_be64((b << 8) ^ _tt);
 }
 
+static void gf128mul_x8_ble(be128 *x)
+{
+	u64 a = le64_to_cpu(x->b);
+	u64 b = le64_to_cpu(x->a);
+	u64 _tt = gf128mul_table_be[a >> 56];
+
+	x->b = cpu_to_le64((a << 8) | (b >> 56));
+	x->a = cpu_to_le64((b << 8) ^ _tt);
+}
+
 void gf128mul_lle(be128 *r, const be128 *b)
 {
 	be128 p[8];
@@ -249,9 +274,48 @@
 }
 EXPORT_SYMBOL(gf128mul_bbe);
 
+void gf128mul_ble(be128 *r, const be128 *b)
+{
+	be128 p[8];
+	int i;
+
+	p[0] = *r;
+	for (i = 0; i < 7; ++i)
+		gf128mul_x_ble((be128 *)&p[i + 1], (be128 *)&p[i]);
+
+	memset(r, 0, sizeof(*r));
+	for (i = 0;;) {
+		u8 ch = ((u8 *)b)[15 - i];
+
+		if (ch & 0x80)
+			be128_xor(r, r, &p[7]);
+		if (ch & 0x40)
+			be128_xor(r, r, &p[6]);
+		if (ch & 0x20)
+			be128_xor(r, r, &p[5]);
+		if (ch & 0x10)
+			be128_xor(r, r, &p[4]);
+		if (ch & 0x08)
+			be128_xor(r, r, &p[3]);
+		if (ch & 0x04)
+			be128_xor(r, r, &p[2]);
+		if (ch & 0x02)
+			be128_xor(r, r, &p[1]);
+		if (ch & 0x01)
+			be128_xor(r, r, &p[0]);
+
+		if (++i >= 16)
+			break;
+
+		gf128mul_x8_ble(r);
+	}
+}
+EXPORT_SYMBOL(gf128mul_ble);
+
+
 /*      This version uses 64k bytes of table space.
     A 16 byte buffer has to be multiplied by a 16 byte key
-    value in GF(128).  If we consider a GF(128) value in
+    value in GF(2^128).  If we consider a GF(2^128) value in
     the buffer's lowest byte, we can construct a table of
     the 256 16 byte values that result from the 256 values
     of this byte.  This requires 4096 bytes. But we also
@@ -352,8 +416,8 @@
 	int i;
 
 	for (i = 0; i < 16; i++)
-		kfree(t->t[i]);
-	kfree(t);
+		kzfree(t->t[i]);
+	kzfree(t);
 }
 EXPORT_SYMBOL(gf128mul_free_64k);
 
@@ -385,7 +449,7 @@
 
 /*      This version uses 4k bytes of table space.
     A 16 byte buffer has to be multiplied by a 16 byte key
-    value in GF(128).  If we consider a GF(128) value in a
+    value in GF(2^128).  If we consider a GF(2^128) value in a
     single byte, we can construct a table of the 256 16 byte
     values that result from the 256 values of this byte.
     This requires 4096 bytes. If we take the highest byte in
@@ -443,6 +507,28 @@
 }
 EXPORT_SYMBOL(gf128mul_init_4k_bbe);
 
+struct gf128mul_4k *gf128mul_init_4k_ble(const be128 *g)
+{
+	struct gf128mul_4k *t;
+	int j, k;
+
+	t = kzalloc(sizeof(*t), GFP_KERNEL);
+	if (!t)
+		goto out;
+
+	t->t[1] = *g;
+	for (j = 1; j <= 64; j <<= 1)
+		gf128mul_x_ble(&t->t[j + j], &t->t[j]);
+
+	for (j = 2; j < 256; j += j)
+		for (k = 1; k < j; ++k)
+			be128_xor(&t->t[j + k], &t->t[j], &t->t[k]);
+
+out:
+	return t;
+}
+EXPORT_SYMBOL(gf128mul_init_4k_ble);
+
 void gf128mul_4k_lle(be128 *a, struct gf128mul_4k *t)
 {
 	u8 *ap = (u8 *)a;
@@ -473,5 +559,20 @@
 }
 EXPORT_SYMBOL(gf128mul_4k_bbe);
 
+void gf128mul_4k_ble(be128 *a, struct gf128mul_4k *t)
+{
+	u8 *ap = (u8 *)a;
+	be128 r[1];
+	int i = 15;
+
+	*r = t->t[ap[15]];
+	while (i--) {
+		gf128mul_x8_ble(r);
+		be128_xor(r, r, &t->t[ap[i]]);
+	}
+	*a = *r;
+}
+EXPORT_SYMBOL(gf128mul_4k_ble);
+
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Functions for multiplying elements of GF(2^128)");
diff -ruw linux-4.4.115/crypto/Kconfig linux-4.4.115-fbx/crypto/Kconfig
--- linux-4.4.115/crypto/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/crypto/Kconfig	2019-10-29 09:26:23.345200262 +0100
@@ -289,6 +289,24 @@
 	  CBC: Cipher Block Chaining mode
 	  This block cipher algorithm is required for IPSec.
 
+config CRYPTO_HEH
+	tristate "HEH support"
+	select CRYPTO_CMAC
+	select CRYPTO_ECB
+	select CRYPTO_GF128MUL
+	select CRYPTO_MANAGER
+	select CRYPTO_POLY_HASH_ARM64_CE if ARM64 && KERNEL_MODE_NEON
+	help
+	  HEH: Hash-Encrypt-Hash mode
+	  HEH is a proposed block cipher mode of operation which extends the
+	  strong pseudo-random permutation (SPRP) property of block ciphers to
+	  arbitrary-length input strings.  This provides a stronger notion of
+	  security than existing block cipher modes of operation (e.g. CBC, CTR,
+	  XTS), though it is usually less performant.  Applications include disk
+	  encryption and encryption of file names and contents.  Currently, this
+	  implementation only provides a symmetric cipher interface, so it can't
+	  yet be used as an AEAD.
+
 config CRYPTO_CTR
 	tristate "CTR support"
 	select CRYPTO_BLKCIPHER
diff -ruw linux-4.4.115/crypto/Makefile linux-4.4.115-fbx/crypto/Makefile
--- linux-4.4.115/crypto/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/crypto/Makefile	2019-10-29 09:26:23.345200262 +0100
@@ -67,6 +67,7 @@
 obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
 obj-$(CONFIG_CRYPTO_ECB) += ecb.o
 obj-$(CONFIG_CRYPTO_CBC) += cbc.o
+obj-$(CONFIG_CRYPTO_HEH) += heh.o
 obj-$(CONFIG_CRYPTO_PCBC) += pcbc.o
 obj-$(CONFIG_CRYPTO_CTS) += cts.o
 obj-$(CONFIG_CRYPTO_LRW) += lrw.o
diff -ruw linux-4.4.115/crypto/shash.c linux-4.4.115-fbx/crypto/shash.c
--- linux-4.4.115/crypto/shash.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/crypto/shash.c	2019-10-29 09:26:23.349200301 +0100
@@ -686,6 +686,14 @@
 }
 EXPORT_SYMBOL_GPL(shash_free_instance);
 
+int crypto_grab_shash(struct crypto_shash_spawn *spawn,
+		      const char *name, u32 type, u32 mask)
+{
+	spawn->base.frontend = &crypto_shash_type;
+	return crypto_grab_spawn(&spawn->base, name, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_grab_shash);
+
 int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn,
 			    struct shash_alg *alg,
 			    struct crypto_instance *inst)
diff -ruw linux-4.4.115/crypto/testmgr.c linux-4.4.115-fbx/crypto/testmgr.c
--- linux-4.4.115/crypto/testmgr.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/crypto/testmgr.c	2019-10-29 09:26:23.353200340 +0100
@@ -3215,6 +3215,21 @@
 			}
 		}
 	}, {
+		.alg = "heh(aes)",
+		.test = alg_test_skcipher,
+		.suite = {
+			.cipher = {
+				.enc = {
+					.vecs = aes_heh_enc_tv_template,
+					.count = AES_HEH_ENC_TEST_VECTORS
+				},
+				.dec = {
+					.vecs = aes_heh_dec_tv_template,
+					.count = AES_HEH_DEC_TEST_VECTORS
+				}
+			}
+		}
+	}, {
 		.alg = "hmac(crc32)",
 		.test = alg_test_hash,
 		.suite = {
diff -ruw linux-4.4.115/drivers/amba/bus.c linux-4.4.115-fbx/drivers/amba/bus.c
--- linux-4.4.115/drivers/amba/bus.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/amba/bus.c	2019-10-29 09:26:23.381200614 +0100
@@ -82,7 +82,8 @@
 	struct amba_device *dev = to_amba_device(_dev);
 	char *driver_override, *old = dev->driver_override, *cp;
 
-	if (count > PATH_MAX)
+	/* We need to keep extra room for a newline */
+	if (count >= (PAGE_SIZE - 1))
 		return -EINVAL;
 
 	driver_override = kstrndup(buf, count, GFP_KERNEL);
diff -ruw linux-4.4.115/drivers/android/Kconfig linux-4.4.115-fbx/drivers/android/Kconfig
--- linux-4.4.115/drivers/android/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/android/Kconfig	2019-10-29 09:26:23.381200614 +0100
@@ -19,6 +19,18 @@
 	  Android process, using Binder to identify, invoke and pass arguments
 	  between said processes.
 
+config ANDROID_BINDER_DEVICES
+	string "Android Binder devices"
+	depends on ANDROID_BINDER_IPC
+	default "binder,hwbinder,vndbinder"
+	---help---
+	  Default value for the binder.devices parameter.
+
+	  The binder.devices parameter is a comma-separated list of strings
+	  that specifies the names of the binder device nodes that will be
+	  created. Each binder device has its own context manager, and is
+	  therefore logically separated from the other devices.
+
 config ANDROID_BINDER_IPC_32BIT
 	bool
 	depends on !64BIT && ANDROID_BINDER_IPC
@@ -32,6 +44,16 @@
 
 	  Note that enabling this will break newer Android user-space.
 
+config ANDROID_BINDER_IPC_SELFTEST
+	bool "Android Binder IPC Driver Selftest"
+	depends on ANDROID_BINDER_IPC
+	---help---
+	  This feature allows binder selftest to run.
+
+	  Binder selftest checks the allocation and free of binder buffers
+	  exhaustively with combinations of various buffer sizes and
+	  alignments.
+
 endif # if ANDROID
 
 endmenu
diff -ruw linux-4.4.115/drivers/android/Makefile linux-4.4.115-fbx/drivers/android/Makefile
--- linux-4.4.115/drivers/android/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/android/Makefile	2019-01-22 16:16:22.823240249 +0100
@@ -1,3 +1,4 @@
 ccflags-y += -I$(src)			# needed for trace events
 
-obj-$(CONFIG_ANDROID_BINDER_IPC)	+= binder.o
+obj-$(CONFIG_ANDROID_BINDER_IPC)	+= binder.o binder_alloc.o
+obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
diff -ruw linux-4.4.115/drivers/base/core.c linux-4.4.115-fbx/drivers/base/core.c
--- linux-4.4.115/drivers/base/core.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/base/core.c	2019-10-29 09:26:23.409200888 +0100
@@ -72,6 +72,11 @@
 	return restart_syscall();
 }
 
+void lock_device_hotplug_assert(void)
+{
+	lockdep_assert_held(&device_hotplug_lock);
+}
+
 #ifdef CONFIG_BLOCK
 static inline int device_is_not_partition(struct device *dev)
 {
diff -ruw linux-4.4.115/drivers/base/cpu.c linux-4.4.115-fbx/drivers/base/cpu.c
--- linux-4.4.115/drivers/base/cpu.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/base/cpu.c	2019-10-29 09:26:23.409200888 +0100
@@ -180,10 +180,177 @@
 };
 #endif
 
+#ifdef CONFIG_HOTPLUG_CPU
+
+static ssize_t isolate_show(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{
+	struct cpu *cpu = container_of(dev, struct cpu, dev);
+	ssize_t rc;
+	int cpuid = cpu->dev.id;
+	unsigned int isolated = cpu_isolated(cpuid);
+
+	rc = snprintf(buf, PAGE_SIZE-2, "%d\n", isolated);
+
+	return rc;
+}
+
+static DEVICE_ATTR_RO(isolate);
+
+static struct attribute *cpu_isolated_attrs[] = {
+	&dev_attr_isolate.attr,
+	NULL
+};
+
+static struct attribute_group cpu_isolated_attr_group = {
+	.attrs = cpu_isolated_attrs,
+};
+
+#endif
+
+#ifdef CONFIG_SCHED_HMP
+
+static ssize_t show_sched_static_cpu_pwr_cost(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct cpu *cpu = container_of(dev, struct cpu, dev);
+	ssize_t rc;
+	int cpuid = cpu->dev.id;
+	unsigned int pwr_cost;
+
+	pwr_cost = sched_get_static_cpu_pwr_cost(cpuid);
+
+	rc = snprintf(buf, PAGE_SIZE-2, "%d\n", pwr_cost);
+
+	return rc;
+}
+
+static ssize_t __ref store_sched_static_cpu_pwr_cost(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct cpu *cpu = container_of(dev, struct cpu, dev);
+	int err;
+	int cpuid = cpu->dev.id;
+	unsigned int pwr_cost;
+
+	err = kstrtouint(strstrip((char *)buf), 0, &pwr_cost);
+	if (err)
+		return err;
+
+	err = sched_set_static_cpu_pwr_cost(cpuid, pwr_cost);
+
+	if (err >= 0)
+		err = count;
+
+	return err;
+}
+
+static ssize_t show_sched_static_cluster_pwr_cost(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct cpu *cpu = container_of(dev, struct cpu, dev);
+	ssize_t rc;
+	int cpuid = cpu->dev.id;
+	unsigned int pwr_cost;
+
+	pwr_cost = sched_get_static_cluster_pwr_cost(cpuid);
+
+	rc = snprintf(buf, PAGE_SIZE-2, "%d\n", pwr_cost);
+
+	return rc;
+}
+
+static ssize_t __ref store_sched_static_cluster_pwr_cost(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct cpu *cpu = container_of(dev, struct cpu, dev);
+	int err;
+	int cpuid = cpu->dev.id;
+	unsigned int pwr_cost;
+
+	err = kstrtouint(strstrip((char *)buf), 0, &pwr_cost);
+	if (err)
+		return err;
+
+	err = sched_set_static_cluster_pwr_cost(cpuid, pwr_cost);
+
+	if (err >= 0)
+		err = count;
+
+	return err;
+}
+
+static ssize_t show_sched_cluser_wake_idle(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct cpu *cpu = container_of(dev, struct cpu, dev);
+	ssize_t rc;
+	int cpuid = cpu->dev.id;
+	unsigned int wake_up_idle;
+
+	wake_up_idle = sched_get_cluster_wake_idle(cpuid);
+
+	rc = scnprintf(buf, PAGE_SIZE-2, "%d\n", wake_up_idle);
+
+	return rc;
+}
+
+static ssize_t __ref store_sched_cluster_wake_idle(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct cpu *cpu = container_of(dev, struct cpu, dev);
+	int err;
+	int cpuid = cpu->dev.id;
+	unsigned int wake_up_idle;
+
+	err = kstrtouint(strstrip((char *)buf), 0, &wake_up_idle);
+	if (err)
+		return err;
+
+	err = sched_set_cluster_wake_idle(cpuid, wake_up_idle);
+
+	if (err >= 0)
+		err = count;
+
+	return err;
+}
+
+static DEVICE_ATTR(sched_static_cpu_pwr_cost, 0644,
+					show_sched_static_cpu_pwr_cost,
+					store_sched_static_cpu_pwr_cost);
+static DEVICE_ATTR(sched_static_cluster_pwr_cost, 0644,
+					show_sched_static_cluster_pwr_cost,
+					store_sched_static_cluster_pwr_cost);
+static DEVICE_ATTR(sched_cluster_wake_up_idle, 0644,
+					show_sched_cluser_wake_idle,
+					store_sched_cluster_wake_idle);
+
+static struct attribute *hmp_sched_cpu_attrs[] = {
+	&dev_attr_sched_static_cpu_pwr_cost.attr,
+	&dev_attr_sched_static_cluster_pwr_cost.attr,
+	&dev_attr_sched_cluster_wake_up_idle.attr,
+	NULL
+};
+
+static struct attribute_group sched_hmp_cpu_attr_group = {
+	.attrs = hmp_sched_cpu_attrs,
+};
+
+#endif /* CONFIG_SCHED_HMP */
+
 static const struct attribute_group *common_cpu_attr_groups[] = {
 #ifdef CONFIG_KEXEC
 	&crash_note_cpu_attr_group,
 #endif
+#ifdef CONFIG_SCHED_HMP
+	&sched_hmp_cpu_attr_group,
+#endif
+#ifdef CONFIG_HOTPLUG_CPU
+	&cpu_isolated_attr_group,
+#endif
 	NULL
 };
 
@@ -191,6 +358,12 @@
 #ifdef CONFIG_KEXEC
 	&crash_note_cpu_attr_group,
 #endif
+#ifdef CONFIG_SCHED_HMP
+	&sched_hmp_cpu_attr_group,
+#endif
+#ifdef CONFIG_HOTPLUG_CPU
+	&cpu_isolated_attr_group,
+#endif
 	NULL
 };
 
@@ -220,6 +393,7 @@
 	_CPU_ATTR(online, &cpu_online_mask),
 	_CPU_ATTR(possible, &cpu_possible_mask),
 	_CPU_ATTR(present, &cpu_present_mask),
+	_CPU_ATTR(core_ctl_isolated, &cpu_isolated_mask),
 };
 
 /*
@@ -454,6 +628,7 @@
 	&cpu_attrs[0].attr.attr,
 	&cpu_attrs[1].attr.attr,
 	&cpu_attrs[2].attr.attr,
+	&cpu_attrs[3].attr.attr,
 	&dev_attr_kernel_max.attr,
 	&dev_attr_offline.attr,
 	&dev_attr_isolated.attr,
diff -ruw linux-4.4.115/drivers/base/dd.c linux-4.4.115-fbx/drivers/base/dd.c
--- linux-4.4.115/drivers/base/dd.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/base/dd.c	2019-10-29 09:26:23.409200888 +0100
@@ -171,26 +171,49 @@
 	queue_work(deferred_wq, &deferred_probe_work);
 }
 
+static void enable_trigger_defer_cycle(void)
+{
+	driver_deferred_probe_enable = true;
+	driver_deferred_probe_trigger();
+	/*
+	 * Sort as many dependencies as possible before the next initcall
+	 * level
+	 */
+	flush_workqueue(deferred_wq);
+}
+
 /**
  * deferred_probe_initcall() - Enable probing of deferred devices
  *
  * We don't want to get in the way when the bulk of drivers are getting probed.
  * Instead, this initcall makes sure that deferred probing is delayed until
- * late_initcall time.
+ * all the registered initcall functions at a particular level are completed.
+ * This function is invoked at every *_initcall_sync level.
  */
 static int deferred_probe_initcall(void)
 {
+	if (!deferred_wq) {
 	deferred_wq = create_singlethread_workqueue("deferwq");
 	if (WARN_ON(!deferred_wq))
 		return -ENOMEM;
+	}
 
-	driver_deferred_probe_enable = true;
-	driver_deferred_probe_trigger();
-	/* Sort as many dependencies as possible before exiting initcalls */
-	flush_workqueue(deferred_wq);
+	enable_trigger_defer_cycle();
+	driver_deferred_probe_enable = false;
+	return 0;
+}
+arch_initcall_sync(deferred_probe_initcall);
+subsys_initcall_sync(deferred_probe_initcall);
+fs_initcall_sync(deferred_probe_initcall);
+device_initcall_sync(deferred_probe_initcall);
+
+static int deferred_probe_enable_fn(void)
+{
+	/* Enable deferred probing for all time */
+	enable_trigger_defer_cycle();
 	return 0;
 }
-late_initcall(deferred_probe_initcall);
+late_initcall(deferred_probe_enable_fn);
 
 static void driver_bound(struct device *dev)
 {
@@ -205,6 +228,8 @@
 
 	klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
 
+	device_pm_check_callbacks(dev);
+
 	/*
 	 * Make sure the device is no longer in one of the deferred lists and
 	 * kick off retrying all pending devices
@@ -697,6 +722,7 @@
 			dev->pm_domain->dismiss(dev);
 
 		klist_remove(&dev->p->knode_driver);
+		device_pm_check_callbacks(dev);
 		if (dev->bus)
 			blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
 						     BUS_NOTIFY_UNBOUND_DRIVER,
diff -ruw linux-4.4.115/drivers/base/dma-mapping.c linux-4.4.115-fbx/drivers/base/dma-mapping.c
--- linux-4.4.115/drivers/base/dma-mapping.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/base/dma-mapping.c	2019-01-22 16:16:22.875240720 +0100
@@ -309,7 +309,12 @@
 	void *ptr;
 	unsigned long pfn;
 
-	pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
+	pages = kmalloc(sizeof(struct page *) << get_order(size),
+			GFP_KERNEL | __GFP_NOWARN);
+
+	if (!pages)
+		pages = vmalloc(sizeof(struct page *) << get_order(size));
+
 	if (!pages)
 		return NULL;
 
@@ -318,20 +323,24 @@
 
 	ptr = dma_common_pages_remap(pages, size, vm_flags, prot, caller);
 
-	kfree(pages);
+	kvfree(pages);
 
 	return ptr;
 }
 
 /*
+	area->pages = pages;
+
  * unmaps a range previously mapped by dma_common_*_remap
  */
-void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
+void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags,
+			   bool no_warn)
 {
 	struct vm_struct *area = find_vm_area(cpu_addr);
 
 	if (!area || (area->flags & vm_flags) != vm_flags) {
-		WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
+		WARN(!no_warn, "trying to free invalid coherent area: %p\n",
+			cpu_addr);
 		return;
 	}
 
diff -ruw linux-4.4.115/drivers/base/firmware_class.c linux-4.4.115-fbx/drivers/base/firmware_class.c
--- linux-4.4.115/drivers/base/firmware_class.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/base/firmware_class.c	2019-10-29 09:26:23.413200927 +0100
@@ -29,6 +29,7 @@
 #include <linux/syscore_ops.h>
 #include <linux/reboot.h>
 #include <linux/security.h>
+#include <linux/io.h>
 
 #include <generated/utsrelease.h>
 
@@ -111,6 +112,7 @@
 #define FW_OPT_FALLBACK		0
 #endif
 #define FW_OPT_NO_WARN	(1U << 3)
+#define FW_OPT_NOCACHE	(1U << 4)
 
 struct firmware_cache {
 	/* firmware_buf instance will be added into the below list */
@@ -142,6 +144,11 @@
 	unsigned long status;
 	void *data;
 	size_t size;
+	phys_addr_t dest_addr;
+	size_t dest_size;
+	void * (*map_fw_mem)(phys_addr_t phys, size_t size, void *data);
+	void (*unmap_fw_mem)(void *virt, size_t size, void *data);
+	void *map_data;
 #ifdef CONFIG_FW_LOADER_USER_HELPER
 	bool is_paged_buf;
 	bool need_uevent;
@@ -163,6 +170,22 @@
 	const char *name;
 };
 
+struct fw_desc {
+	struct work_struct work;
+	const struct firmware **firmware_p;
+	const char *name;
+	struct device *device;
+	unsigned int opt_flags;
+	phys_addr_t dest_addr;
+	size_t dest_size;
+	void * (*map_fw_mem)(phys_addr_t phys, size_t size, void *data);
+	void (*unmap_fw_mem)(void *virt, size_t size, void *data);
+	void *map_data;
+	struct module *module;
+	void *context;
+	void (*cont)(const struct firmware *fw, void *context);
+};
+
 #define to_fwbuf(d) container_of(d, struct firmware_buf, ref)
 
 #define	FW_LOADER_NO_CACHE	0
@@ -249,6 +272,9 @@
 		 (unsigned int)buf->size);
 
 	list_del(&buf->list);
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+	list_del(&buf->pending_list);
+#endif
 	spin_unlock(&fwc->lock);
 
 #ifdef CONFIG_FW_LOADER_USER_HELPER
@@ -268,6 +294,11 @@
 static void fw_free_buf(struct firmware_buf *buf)
 {
 	struct firmware_cache *fwc = buf->fwc;
+	if (!fwc) {
+		kfree_const(buf->fw_id);
+		kfree(buf);
+		return;
+	}
 	spin_lock(&fwc->lock);
 	if (!kref_put(&buf->ref, __fw_free_buf))
 		spin_unlock(&fwc->lock);
@@ -280,7 +311,10 @@
 	"/lib/firmware/updates/" UTS_RELEASE,
 	"/lib/firmware/updates",
 	"/lib/firmware/" UTS_RELEASE,
-	"/lib/firmware"
+	"/lib/firmware",
+	"/lib64/firmware",
+	"/usr/lib/hotplug/firmware",
+	"/firmware/image"
 };
 
 /*
@@ -302,6 +336,13 @@
 	size = i_size_read(file_inode(file));
 	if (size <= 0)
 		return -EINVAL;
+	if (fw_buf->dest_size > 0 && fw_buf->dest_size < size)
+		return -EINVAL;
+
+	if (fw_buf->dest_addr)
+		buf = fw_buf->map_fw_mem(fw_buf->dest_addr,
+					   fw_buf->dest_size, fw_buf->map_data);
+	else
 	buf = vmalloc(size);
 	if (!buf)
 		return -ENOMEM;
@@ -316,14 +357,20 @@
 		goto fail;
 	fw_buf->data = buf;
 	fw_buf->size = size;
+	if (fw_buf->dest_addr)
+		fw_buf->unmap_fw_mem(buf, fw_buf->size, fw_buf->map_data);
 	return 0;
 fail:
+	if (fw_buf->dest_addr)
+		fw_buf->unmap_fw_mem(buf, fw_buf->size, fw_buf->map_data);
+	else
 	vfree(buf);
 	return rc;
 }
 
 static int fw_get_filesystem_firmware(struct device *device,
-				       struct firmware_buf *buf)
+				      struct firmware_buf *buf,
+				      phys_addr_t dest_addr, size_t dest_size)
 {
 	int i, len;
 	int rc = -ENOENT;
@@ -658,6 +705,10 @@
 	case 1:
 		/* discarding any previous partial load */
 		if (!test_bit(FW_STATUS_DONE, &fw_buf->status)) {
+			if (fw_buf->dest_addr) {
+				set_bit(FW_STATUS_LOADING, &fw_buf->status);
+				break;
+			}
 			for (i = 0; i < fw_buf->nr_pages; i++)
 				__free_page(fw_buf->pages[i]);
 			kfree(fw_buf->pages);
@@ -715,6 +766,102 @@
 
 static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
 
+static int __firmware_data_rw(struct firmware_priv *fw_priv, char *buffer,
+				loff_t *offset, size_t count, int read)
+{
+	u8 __iomem *fw_buf;
+	struct firmware_buf *buf = fw_priv->buf;
+	int retval = count;
+
+	if ((*offset + count) > buf->dest_size) {
+		pr_debug("%s: Failed size check.\n", __func__);
+		retval = -EINVAL;
+		goto out;
+	}
+
+	fw_buf = buf->map_fw_mem(buf->dest_addr + *offset, count,
+					buf->map_data);
+	if (!fw_buf) {
+		pr_debug("%s: Failed ioremap.\n", __func__);
+		retval = -ENOMEM;
+		goto out;
+	}
+
+	if (read)
+		memcpy(buffer, fw_buf, count);
+	else
+		memcpy(fw_buf, buffer, count);
+
+	*offset += count;
+	buf->unmap_fw_mem(fw_buf, count, buf->map_data);
+
+out:
+	return retval;
+}
+
+static ssize_t firmware_direct_read(struct file *filp, struct kobject *kobj,
+				  struct bin_attribute *bin_attr,
+				  char *buffer, loff_t offset, size_t count)
+{
+	struct device *dev = kobj_to_dev(kobj);
+	struct firmware_priv *fw_priv = to_firmware_priv(dev);
+	struct firmware *fw;
+	ssize_t ret_count;
+
+	if (!fw_priv->fw)
+		return -ENODEV;
+
+	mutex_lock(&fw_lock);
+	fw = fw_priv->fw;
+
+	if (offset > fw->size) {
+		ret_count = 0;
+		goto out;
+	}
+	if (count > fw->size - offset)
+		count = fw->size - offset;
+
+	if (test_bit(FW_STATUS_DONE, &fw_priv->buf->status)) {
+		ret_count = -ENODEV;
+		goto out;
+	}
+
+	ret_count = __firmware_data_rw(fw_priv, buffer, &offset, count, 1);
+out:
+	mutex_unlock(&fw_lock);
+	return ret_count;
+}
+
+static ssize_t firmware_direct_write(struct file *filp, struct kobject *kobj,
+				   struct bin_attribute *bin_attr,
+				   char *buffer, loff_t offset, size_t count)
+{
+	struct device *dev = kobj_to_dev(kobj);
+	struct firmware_priv *fw_priv = to_firmware_priv(dev);
+	struct firmware *fw;
+	ssize_t retval;
+
+	if (!capable(CAP_SYS_RAWIO))
+		return -EPERM;
+
+	mutex_lock(&fw_lock);
+	fw = fw_priv->fw;
+	if (!fw || !fw_priv->buf ||
+			test_bit(FW_STATUS_DONE, &fw_priv->buf->status)) {
+		retval = -ENODEV;
+		goto out;
+	}
+
+	retval = __firmware_data_rw(fw_priv, buffer, &offset, count, 0);
+	if (retval < 0)
+		goto out;
+
+	fw_priv->buf->size = max_t(size_t, offset, fw_priv->buf->size);
+out:
+	mutex_unlock(&fw_lock);
+	return retval;
+}
+
 static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
 				  struct bin_attribute *bin_attr,
 				  char *buffer, loff_t offset, size_t count)
@@ -864,6 +1011,13 @@
 	.write = firmware_data_write,
 };
 
+static struct bin_attribute firmware_direct_attr_data = {
+	.attr = { .name = "data", .mode = 0644 },
+	.size = 0,
+	.read = firmware_direct_read,
+	.write = firmware_direct_write,
+};
+
 static struct attribute *fw_dev_attrs[] = {
 	&dev_attr_loading.attr,
 	NULL
@@ -879,31 +1033,47 @@
 	.bin_attrs = fw_dev_bin_attrs,
 };
 
+static struct bin_attribute *fw_dev_direct_bin_attrs[] = {
+	&firmware_direct_attr_data,
+	NULL
+};
+
+static const struct attribute_group fw_dev_direct_attr_group = {
+	.attrs = fw_dev_attrs,
+	.bin_attrs = fw_dev_direct_bin_attrs,
+};
+
 static const struct attribute_group *fw_dev_attr_groups[] = {
 	&fw_dev_attr_group,
 	NULL
 };
 
+static  const struct attribute_group *fw_dev_direct_attr_groups[] = {
+	&fw_dev_direct_attr_group,
+	NULL
+};
+
 static struct firmware_priv *
-fw_create_instance(struct firmware *firmware, const char *fw_name,
-		   struct device *device, unsigned int opt_flags)
+fw_create_instance(struct firmware *firmware, struct fw_desc *desc)
 {
 	struct firmware_priv *fw_priv;
 	struct device *f_dev;
 
 	fw_priv = kzalloc(sizeof(*fw_priv), GFP_KERNEL);
 	if (!fw_priv) {
+		dev_err(desc->device, "%s: kmalloc failed\n", __func__);
 		fw_priv = ERR_PTR(-ENOMEM);
 		goto exit;
 	}
 
-	fw_priv->nowait = !!(opt_flags & FW_OPT_NOWAIT);
+	fw_priv->nowait = !!(desc->opt_flags & FW_OPT_NOWAIT);
 	fw_priv->fw = firmware;
+
 	f_dev = &fw_priv->dev;
 
 	device_initialize(f_dev);
-	dev_set_name(f_dev, "%s", fw_name);
-	f_dev->parent = device;
+	dev_set_name(f_dev, "%s", desc->name);
+	f_dev->parent = desc->device;
 	f_dev->class = &firmware_class;
 	f_dev->groups = fw_dev_attr_groups;
 exit:
@@ -919,7 +1089,10 @@
 	struct firmware_buf *buf = fw_priv->buf;
 
 	/* fall back on userspace loading */
-	buf->is_paged_buf = true;
+	buf->is_paged_buf = buf->dest_addr ? false : true;
+
+	if (buf->dest_addr)
+		f_dev->groups = fw_dev_direct_attr_groups;
 
 	dev_set_uevent_suppress(f_dev, true);
 
@@ -942,7 +1115,7 @@
 		timeout = MAX_JIFFY_OFFSET;
 	}
 
-	timeout = wait_for_completion_interruptible_timeout(&buf->completion,
+	timeout = wait_for_completion_killable_timeout(&buf->completion,
 			timeout);
 	if (timeout == -ERESTARTSYS || !timeout) {
 		retval = timeout;
@@ -955,7 +1128,7 @@
 
 	if (is_fw_load_aborted(buf))
 		retval = -EAGAIN;
-	else if (!buf->data)
+	else if (!buf->data && buf->is_paged_buf)
 		retval = -ENOMEM;
 
 	device_del(f_dev);
@@ -965,17 +1138,16 @@
 }
 
 static int fw_load_from_user_helper(struct firmware *firmware,
-				    const char *name, struct device *device,
-				    unsigned int opt_flags, long timeout)
+				    struct fw_desc *desc, long timeout)
 {
 	struct firmware_priv *fw_priv;
 
-	fw_priv = fw_create_instance(firmware, name, device, opt_flags);
+	fw_priv = fw_create_instance(firmware, desc);
 	if (IS_ERR(fw_priv))
 		return PTR_ERR(fw_priv);
 
 	fw_priv->buf = firmware->priv;
-	return _request_firmware_load(fw_priv, opt_flags, timeout);
+	return _request_firmware_load(fw_priv, desc->opt_flags, timeout);
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -996,9 +1168,8 @@
 
 #else /* CONFIG_FW_LOADER_USER_HELPER */
 static inline int
-fw_load_from_user_helper(struct firmware *firmware, const char *name,
-			 struct device *device, unsigned int opt_flags,
-			 long timeout)
+fw_load_from_user_helper(struct firmware *firmware,
+			 struct fw_desc *desc, long timeout)
 {
 	return -ENOENT;
 }
@@ -1037,8 +1208,7 @@
  * or a negative error code
  */
 static int
-_request_firmware_prepare(struct firmware **firmware_p, const char *name,
-			  struct device *device)
+_request_firmware_prepare(struct firmware **firmware_p, struct fw_desc *desc)
 {
 	struct firmware *firmware;
 	struct firmware_buf *buf;
@@ -1046,17 +1216,31 @@
 
 	*firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
 	if (!firmware) {
-		dev_err(device, "%s: kmalloc(struct firmware) failed\n",
+		dev_err(desc->device, "%s: kmalloc(struct firmware) failed\n",
 			__func__);
 		return -ENOMEM;
 	}
 
-	if (fw_get_builtin_firmware(firmware, name)) {
-		dev_dbg(device, "firmware: using built-in firmware %s\n", name);
+	if (fw_get_builtin_firmware(firmware, desc->name)) {
+		dev_dbg(desc->device, "firmware: using built-in firmware %s\n",
+			desc->name);
 		return 0; /* assigned */
 	}
 
-	ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf);
+	if (desc->opt_flags & FW_OPT_NOCACHE) {
+		buf = __allocate_fw_buf(desc->name, NULL);
+		if (!buf)
+			return -ENOMEM;
+		buf->dest_addr = desc->dest_addr;
+		buf->dest_size = desc->dest_size;
+		buf->map_fw_mem = desc->map_fw_mem;
+		buf->unmap_fw_mem = desc->unmap_fw_mem;
+		buf->map_data = desc->map_data;
+		firmware->priv = buf;
+		return 1;
+	}
+
+	ret = fw_lookup_and_allocate_buf(desc->name, &fw_cache, &buf);
 
 	/*
 	 * bind with 'buf' now to avoid warning in failure path
@@ -1095,15 +1279,19 @@
 	 * device may has been deleted already, but the problem
 	 * should be fixed in devres or driver core.
 	 */
-	/* don't cache firmware handled without uevent */
-	if (device && (opt_flags & FW_OPT_UEVENT))
+	/* don't cache firmware handled without uevent, or when explicitly
+	 * disabled
+	 */
+	if (device && (opt_flags & FW_OPT_UEVENT)
+	    && !(opt_flags & FW_OPT_NOCACHE))
 		fw_add_devm_name(device, buf->fw_id);
 
 	/*
 	 * After caching firmware image is started, let it piggyback
 	 * on request firmware.
 	 */
-	if (buf->fwc->state == FW_LOADER_START_CACHE) {
+	if (!(opt_flags & FW_OPT_NOCACHE)
+	    && (buf->fwc->state == FW_LOADER_START_CACHE)) {
 		if (fw_cache_piggyback_on_request(buf->fw_id))
 			kref_get(&buf->ref);
 	}
@@ -1115,58 +1303,56 @@
 }
 
 /* called from request_firmware() and request_firmware_work_func() */
-static int
-_request_firmware(const struct firmware **firmware_p, const char *name,
-		  struct device *device, unsigned int opt_flags)
+static int _request_firmware(struct fw_desc *desc)
 {
 	struct firmware *fw;
 	long timeout;
 	int ret;
 
-	if (!firmware_p)
+	if (!desc->firmware_p)
 		return -EINVAL;
 
-	if (!name || name[0] == '\0')
+	if (!desc->name || desc->name[0] == '\0')
 		return -EINVAL;
 
-	ret = _request_firmware_prepare(&fw, name, device);
+	ret = _request_firmware_prepare(&fw, desc);
 	if (ret <= 0) /* error or already assigned */
 		goto out;
 
 	ret = 0;
 	timeout = firmware_loading_timeout();
-	if (opt_flags & FW_OPT_NOWAIT) {
+	if (desc->opt_flags & FW_OPT_NOWAIT) {
 		timeout = usermodehelper_read_lock_wait(timeout);
 		if (!timeout) {
-			dev_dbg(device, "firmware: %s loading timed out\n",
-				name);
+			dev_dbg(desc->device, "firmware: %s loading timed out\n",
+				desc->name);
 			ret = -EBUSY;
 			goto out;
 		}
 	} else {
 		ret = usermodehelper_read_trylock();
 		if (WARN_ON(ret)) {
-			dev_err(device, "firmware: %s will not be loaded\n",
-				name);
+			dev_err(desc->device, "firmware: %s will not be loaded\n",
+				desc->name);
 			goto out;
 		}
 	}
 
-	ret = fw_get_filesystem_firmware(device, fw->priv);
+	ret = fw_get_filesystem_firmware(desc->device, fw->priv,
+					 desc->dest_addr, desc->dest_size);
 	if (ret) {
-		if (!(opt_flags & FW_OPT_NO_WARN))
-			dev_warn(device,
+		if (!(desc->opt_flags & FW_OPT_NO_WARN))
+			dev_dbg(desc->device,
 				 "Direct firmware load for %s failed with error %d\n",
-				 name, ret);
-		if (opt_flags & FW_OPT_USERHELPER) {
-			dev_warn(device, "Falling back to user helper\n");
-			ret = fw_load_from_user_helper(fw, name, device,
-						       opt_flags, timeout);
+				 desc->name, ret);
+		if (desc->opt_flags & FW_OPT_USERHELPER) {
+			dev_dbg(desc->device, "Falling back to user helper\n");
+			ret = fw_load_from_user_helper(fw, desc, timeout);
 		}
 	}
 
 	if (!ret)
-		ret = assign_firmware_buf(fw, device, opt_flags);
+		ret = assign_firmware_buf(fw, desc->device, desc->opt_flags);
 
 	usermodehelper_read_unlock();
 
@@ -1176,7 +1362,7 @@
 		fw = NULL;
 	}
 
-	*firmware_p = fw;
+	*desc->firmware_p = fw;
 	return ret;
 }
 
@@ -1204,13 +1390,21 @@
 request_firmware(const struct firmware **firmware_p, const char *name,
 		 struct device *device)
 {
+	struct fw_desc desc;
 	int ret;
 
+	desc.firmware_p = firmware_p;
+	desc.name = name;
+	desc.device = device;
+	desc.dest_addr = 0;
+	desc.dest_size = 0;
+	desc.opt_flags = FW_OPT_UEVENT | FW_OPT_FALLBACK;
+
 	/* Need to pin this module until return */
 	__module_get(THIS_MODULE);
-	ret = _request_firmware(firmware_p, name, device,
-				FW_OPT_UEVENT | FW_OPT_FALLBACK);
+	ret = _request_firmware(&desc);
 	module_put(THIS_MODULE);
+
 	return ret;
 }
 EXPORT_SYMBOL(request_firmware);
@@ -1229,17 +1423,70 @@
 int request_firmware_direct(const struct firmware **firmware_p,
 			    const char *name, struct device *device)
 {
+	struct fw_desc desc;
 	int ret;
 
+	desc.firmware_p = firmware_p;
+	desc.name = name;
+	desc.device = device;
+	desc.opt_flags = FW_OPT_UEVENT | FW_OPT_NO_WARN;
+
+	/* Need to pin this module until return */
 	__module_get(THIS_MODULE);
-	ret = _request_firmware(firmware_p, name, device,
-				FW_OPT_UEVENT | FW_OPT_NO_WARN);
+	ret = _request_firmware(&desc);
 	module_put(THIS_MODULE);
+
 	return ret;
 }
 EXPORT_SYMBOL_GPL(request_firmware_direct);
 
 /**
+ * request_firmware_into_buf: - send firmware request and wait for it
+ * @dest_addr: Destination address for the firmware
+ * @dest_size: Size of destination buffer
+ *
+ *      Similar to request_firmware, except takes in a buffer address and
+ *      copies firmware data directly to that buffer. Returns the size of
+ *      the firmware that was loaded at dest_addr. This API prevents the
+ *      caching of images.
+*/
+int
+request_firmware_into_buf(const char *name, struct device *device,
+			phys_addr_t dest_addr, size_t dest_size,
+			void * (*map_fw_mem)(phys_addr_t phys, size_t size,
+						void *data),
+			void (*unmap_fw_mem)(void *virt, size_t sz, void *data),
+			void *map_data)
+{
+	struct fw_desc desc;
+	const struct firmware *fp = NULL;
+	int ret;
+
+	if (dest_addr && !map_fw_mem)
+		return -EINVAL;
+	if (dest_addr && dest_size <= 0)
+		return -EINVAL;
+
+	desc.firmware_p = &fp;
+	desc.name = name;
+	desc.device = device;
+	desc.opt_flags = FW_OPT_FALLBACK | FW_OPT_UEVENT | FW_OPT_NOCACHE;
+	desc.dest_addr = dest_addr;
+	desc.dest_size = dest_size;
+	desc.map_fw_mem = map_fw_mem;
+	desc.unmap_fw_mem = unmap_fw_mem;
+	desc.map_data = map_data;
+
+	ret = _request_firmware(&desc);
+	if (ret)
+		return ret;
+	ret = fp->size;
+	release_firmware(fp);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(request_firmware_into_buf);
+
+/**
  * release_firmware: - release the resource associated with a firmware image
  * @fw: firmware resource to release
  **/
@@ -1254,31 +1501,70 @@
 EXPORT_SYMBOL(release_firmware);
 
 /* Async support */
-struct firmware_work {
-	struct work_struct work;
-	struct module *module;
-	const char *name;
-	struct device *device;
-	void *context;
-	void (*cont)(const struct firmware *fw, void *context);
-	unsigned int opt_flags;
-};
-
 static void request_firmware_work_func(struct work_struct *work)
 {
-	struct firmware_work *fw_work;
 	const struct firmware *fw;
+	struct fw_desc *desc;
+
+	desc = container_of(work, struct fw_desc, work);
+	desc->firmware_p = &fw;
+	_request_firmware(desc);
+	desc->cont(fw, desc->context);
+	put_device(desc->device); /* taken in request_firmware_nowait() */
+
+	module_put(desc->module);
+	kfree(desc);
+}
+
+int
+_request_firmware_nowait(
+	struct module *module, bool uevent,
+	const char *name, struct device *device, gfp_t gfp, void *context,
+	void (*cont)(const struct firmware *fw, void *context),
+	bool nocache, phys_addr_t dest_addr, size_t dest_size,
+	void * (*map_fw_mem)(phys_addr_t phys, size_t size, void *data),
+	void (*unmap_fw_mem)(void *virt, size_t size, void *data),
+	void *map_data)
+{
+	struct fw_desc *desc;
+
+	if (dest_addr && !map_fw_mem)
+		return -EINVAL;
+	if (dest_addr && dest_size <= 0)
+		return -EINVAL;
+
+	desc = kzalloc(sizeof(struct fw_desc), gfp);
+	if (!desc)
+		return -ENOMEM;
+
+	desc->module = module;
+	desc->name = name;
+	desc->device = device;
+	desc->context = context;
+	desc->cont = cont;
+	desc->dest_addr = dest_addr;
+	desc->dest_size = dest_size;
+	desc->map_fw_mem = map_fw_mem;
+	desc->unmap_fw_mem = unmap_fw_mem;
+	desc->map_data = map_data;
+	desc->opt_flags = FW_OPT_FALLBACK | FW_OPT_NOWAIT;
+
+	if (uevent)
+		desc->opt_flags |= FW_OPT_UEVENT;
+	else
+		desc->opt_flags |= FW_OPT_USERHELPER;
+	if (nocache)
+		desc->opt_flags |= FW_OPT_NOCACHE;
 
-	fw_work = container_of(work, struct firmware_work, work);
+	if (!try_module_get(module)) {
+		kfree(desc);
+		return -EFAULT;
+	}
 
-	_request_firmware(&fw, fw_work->name, fw_work->device,
-			  fw_work->opt_flags);
-	fw_work->cont(fw, fw_work->context);
-	put_device(fw_work->device); /* taken in request_firmware_nowait() */
-
-	module_put(fw_work->module);
-	kfree_const(fw_work->name);
-	kfree(fw_work);
+	get_device(desc->device);
+	INIT_WORK(&desc->work, request_firmware_work_func);
+	schedule_work(&desc->work);
+	return 0;
 }
 
 /**
@@ -1310,36 +1596,37 @@
 	const char *name, struct device *device, gfp_t gfp, void *context,
 	void (*cont)(const struct firmware *fw, void *context))
 {
-	struct firmware_work *fw_work;
-
-	fw_work = kzalloc(sizeof(struct firmware_work), gfp);
-	if (!fw_work)
-		return -ENOMEM;
-
-	fw_work->module = module;
-	fw_work->name = kstrdup_const(name, gfp);
-	if (!fw_work->name) {
-		kfree(fw_work);
-		return -ENOMEM;
-	}
-	fw_work->device = device;
-	fw_work->context = context;
-	fw_work->cont = cont;
-	fw_work->opt_flags = FW_OPT_NOWAIT | FW_OPT_FALLBACK |
-		(uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER);
-
-	if (!try_module_get(module)) {
-		kfree_const(fw_work->name);
-		kfree(fw_work);
-		return -EFAULT;
+	return _request_firmware_nowait(module, uevent, name, device, gfp,
+				context, cont, false, 0, 0, NULL, NULL, NULL);
 	}
+EXPORT_SYMBOL(request_firmware_nowait);
 
-	get_device(fw_work->device);
-	INIT_WORK(&fw_work->work, request_firmware_work_func);
-	schedule_work(&fw_work->work);
-	return 0;
+/**
+ * request_firmware_nowait_into_buf - asynchronous version of request_firmware
+ * @dest_addr: Destination address for the firmware
+ * @dest_size: Size of destination buffer
+ *
+ * Similar to request_firmware_nowait, except loads the firmware
+ * directly to a destination address without using an intermediate
+ * buffer.
+ *
+ **/
+int
+request_firmware_nowait_into_buf(
+	struct module *module, bool uevent,
+	const char *name, struct device *device, gfp_t gfp, void *context,
+	void (*cont)(const struct firmware *fw, void *context),
+	phys_addr_t dest_addr, size_t dest_size,
+	void * (*map_fw_mem)(phys_addr_t phys, size_t size, void *data),
+	void (*unmap_fw_mem)(void *virt, size_t size, void *data),
+	void *map_data)
+{
+	return _request_firmware_nowait(module, uevent, name, device, gfp,
+					context, cont, true, dest_addr,
+					dest_size, map_fw_mem, unmap_fw_mem,
+					map_data);
 }
-EXPORT_SYMBOL(request_firmware_nowait);
+EXPORT_SYMBOL_GPL(request_firmware_nowait_into_buf);
 
 #ifdef CONFIG_PM_SLEEP
 static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
diff -ruw linux-4.4.115/drivers/base/Makefile linux-4.4.115-fbx/drivers/base/Makefile
--- linux-4.4.115/drivers/base/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/base/Makefile	2019-01-22 16:16:22.871240683 +0100
@@ -9,7 +9,7 @@
 obj-$(CONFIG_DMA_CMA) += dma-contiguous.o
 obj-y			+= power/
 obj-$(CONFIG_HAS_DMA)	+= dma-mapping.o
-obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
+obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o dma-removed.o
 obj-$(CONFIG_ISA)	+= isa.o
 obj-$(CONFIG_FW_LOADER)	+= firmware_class.o
 obj-$(CONFIG_NUMA)	+= node.o
diff -ruw linux-4.4.115/drivers/base/platform.c linux-4.4.115-fbx/drivers/base/platform.c
--- linux-4.4.115/drivers/base/platform.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/base/platform.c	2019-01-22 16:16:22.875240720 +0100
@@ -117,6 +117,26 @@
 EXPORT_SYMBOL_GPL(platform_get_irq);
 
 /**
+ * platform_irq_count - Count the number of IRQs a platform device uses
+ * @dev: platform device
+ *
+ * Return: Number of IRQs a platform device uses or EPROBE_DEFER
+ */
+int platform_irq_count(struct platform_device *dev)
+{
+	int ret, nr = 0;
+
+	while ((ret = platform_get_irq(dev, nr)) >= 0)
+		nr++;
+
+	if (ret == -EPROBE_DEFER)
+		return ret;
+
+	return nr;
+}
+EXPORT_SYMBOL_GPL(platform_irq_count);
+
+/**
  * platform_get_resource_byname - get a resource for a device by name
  * @dev: platform device
  * @type: resource type
diff -ruw linux-4.4.115/drivers/base/power/main.c linux-4.4.115-fbx/drivers/base/power/main.c
--- linux-4.4.115/drivers/base/power/main.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/base/power/main.c	2019-10-29 09:26:23.413200927 +0100
@@ -33,6 +33,7 @@
 #include <linux/cpufreq.h>
 #include <linux/cpuidle.h>
 #include <linux/timer.h>
+#include <linux/wakeup_reason.h>
 
 #include "../base.h"
 #include "power.h"
@@ -125,6 +126,7 @@
 {
 	pr_debug("PM: Adding info for %s:%s\n",
 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
+	device_pm_check_callbacks(dev);
 	mutex_lock(&dpm_list_mtx);
 	if (dev->parent && dev->parent->power.is_prepared)
 		dev_warn(dev, "parent %s should not be sleeping\n",
@@ -147,6 +149,7 @@
 	mutex_unlock(&dpm_list_mtx);
 	device_wakeup_disable(dev);
 	pm_runtime_remove(dev);
+	device_pm_check_callbacks(dev);
 }
 
 /**
@@ -1348,6 +1351,7 @@
 	pm_callback_t callback = NULL;
 	char *info = NULL;
 	int error = 0;
+	char suspend_abort[MAX_SUSPEND_ABORT_LEN];
 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
 
 	TRACE_DEVICE(dev);
@@ -1368,6 +1372,9 @@
 		pm_wakeup_event(dev, 0);
 
 	if (pm_wakeup_pending()) {
+		pm_get_active_wakeup_sources(suspend_abort,
+			MAX_SUSPEND_ABORT_LEN);
+		log_suspend_abort_reason(suspend_abort);
 		async_error = -EBUSY;
 		goto Complete;
 	}
@@ -1570,6 +1577,11 @@
 
 	dev->power.wakeup_path = device_may_wakeup(dev);
 
+	if (dev->power.no_pm_callbacks) {
+		ret = 1;	/* Let device go direct_complete */
+		goto unlock;
+	}
+
 	if (dev->pm_domain) {
 		info = "preparing power domain ";
 		callback = dev->pm_domain->ops.prepare;
@@ -1592,6 +1604,7 @@
 	if (callback)
 		ret = callback(dev);
 
+unlock:
 	device_unlock(dev);
 
 	if (ret < 0) {
@@ -1720,3 +1733,30 @@
 	device_pm_unlock();
 }
 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
+
+static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
+{
+	if (!ops)
+		return true;
+
+	return !ops->prepare &&
+	       !ops->suspend &&
+	       !ops->suspend_late &&
+	       !ops->suspend_noirq &&
+	       !ops->resume_noirq &&
+	       !ops->resume_early &&
+	       !ops->resume &&
+	       !ops->complete;
+}
+
+void device_pm_check_callbacks(struct device *dev)
+{
+	spin_lock_irq(&dev->power.lock);
+	dev->power.no_pm_callbacks =
+		(!dev->bus || pm_ops_is_empty(dev->bus->pm)) &&
+		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
+		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
+		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
+		(!dev->driver || pm_ops_is_empty(dev->driver->pm));
+	spin_unlock_irq(&dev->power.lock);
+}
diff -ruw linux-4.4.115/drivers/base/power/opp/core.c linux-4.4.115-fbx/drivers/base/power/opp/core.c
--- linux-4.4.115/drivers/base/power/opp/core.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/base/power/opp/core.c	2019-01-22 16:16:22.879240756 +0100
@@ -13,50 +13,52 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/clk.h>
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/device.h>
 #include <linux/of.h>
 #include <linux/export.h>
+#include <linux/regulator/consumer.h>
 
 #include "opp.h"
 
 /*
- * The root of the list of all devices. All device_opp structures branch off
- * from here, with each device_opp containing the list of opp it supports in
+ * The root of the list of all opp-tables. All opp_table structures branch off
+ * from here, with each opp_table containing the list of opps it supports in
  * various states of availability.
  */
-static LIST_HEAD(dev_opp_list);
+static LIST_HEAD(opp_tables);
 /* Lock to allow exclusive modification to the device and opp lists */
-DEFINE_MUTEX(dev_opp_list_lock);
+DEFINE_MUTEX(opp_table_lock);
 
 #define opp_rcu_lockdep_assert()					\
 do {									\
 	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&			\
-				!lockdep_is_held(&dev_opp_list_lock),	\
+			 !lockdep_is_held(&opp_table_lock),		\
 			   "Missing rcu_read_lock() or "		\
-			   "dev_opp_list_lock protection");		\
+			 "opp_table_lock protection");			\
 } while (0)
 
-static struct device_list_opp *_find_list_dev(const struct device *dev,
-					      struct device_opp *dev_opp)
+static struct opp_device *_find_opp_dev(const struct device *dev,
+					struct opp_table *opp_table)
 {
-	struct device_list_opp *list_dev;
+	struct opp_device *opp_dev;
 
-	list_for_each_entry(list_dev, &dev_opp->dev_list, node)
-		if (list_dev->dev == dev)
-			return list_dev;
+	list_for_each_entry(opp_dev, &opp_table->dev_list, node)
+		if (opp_dev->dev == dev)
+			return opp_dev;
 
 	return NULL;
 }
 
-static struct device_opp *_managed_opp(const struct device_node *np)
+static struct opp_table *_managed_opp(const struct device_node *np)
 {
-	struct device_opp *dev_opp;
+	struct opp_table *opp_table;
 
-	list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) {
-		if (dev_opp->np == np) {
+	list_for_each_entry_rcu(opp_table, &opp_tables, node) {
+		if (opp_table->np == np) {
 			/*
 			 * Multiple devices can point to the same OPP table and
 			 * so will have same node-pointer, np.
@@ -64,7 +66,7 @@
 			 * But the OPPs will be considered as shared only if the
 			 * OPP table contains a "opp-shared" property.
 			 */
-			return dev_opp->shared_opp ? dev_opp : NULL;
+			return opp_table->shared_opp ? opp_table : NULL;
 		}
 	}
 
@@ -72,24 +74,24 @@
 }
 
 /**
- * _find_device_opp() - find device_opp struct using device pointer
- * @dev:	device pointer used to lookup device OPPs
+ * _find_opp_table() - find opp_table struct using device pointer
+ * @dev:	device pointer used to lookup OPP table
  *
- * Search list of device OPPs for one containing matching device. Does a RCU
- * reader operation to grab the pointer needed.
+ * Search OPP table for one containing matching device. Does a RCU reader
+ * operation to grab the pointer needed.
  *
- * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or
+ * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
  * -EINVAL based on type of error.
  *
  * Locking: For readers, this function must be called under rcu_read_lock().
- * device_opp is a RCU protected pointer, which means that device_opp is valid
+ * opp_table is a RCU protected pointer, which means that opp_table is valid
  * as long as we are under RCU lock.
  *
- * For Writers, this function must be called with dev_opp_list_lock held.
+ * For Writers, this function must be called with opp_table_lock held.
  */
-struct device_opp *_find_device_opp(struct device *dev)
+struct opp_table *_find_opp_table(struct device *dev)
 {
-	struct device_opp *dev_opp;
+	struct opp_table *opp_table;
 
 	opp_rcu_lockdep_assert();
 
@@ -98,9 +100,9 @@
 		return ERR_PTR(-EINVAL);
 	}
 
-	list_for_each_entry_rcu(dev_opp, &dev_opp_list, node)
-		if (_find_list_dev(dev, dev_opp))
-			return dev_opp;
+	list_for_each_entry_rcu(opp_table, &opp_tables, node)
+		if (_find_opp_dev(dev, opp_table))
+			return opp_table;
 
 	return ERR_PTR(-ENODEV);
 }
@@ -213,16 +215,16 @@
  */
 unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
 {
-	struct device_opp *dev_opp;
+	struct opp_table *opp_table;
 	unsigned long clock_latency_ns;
 
 	rcu_read_lock();
 
-	dev_opp = _find_device_opp(dev);
-	if (IS_ERR(dev_opp))
+	opp_table = _find_opp_table(dev);
+	if (IS_ERR(opp_table))
 		clock_latency_ns = 0;
 	else
-		clock_latency_ns = dev_opp->clock_latency_ns_max;
+		clock_latency_ns = opp_table->clock_latency_ns_max;
 
 	rcu_read_unlock();
 	return clock_latency_ns;
@@ -230,6 +232,82 @@
 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
 
 /**
+ * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
+ * @dev: device for which we do this operation
+ *
+ * Return: This function returns the max voltage latency in nanoseconds.
+ *
+ * Locking: This function takes rcu_read_lock().
+ */
+unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
+{
+	struct opp_table *opp_table;
+	struct dev_pm_opp *opp;
+	struct regulator *reg;
+	unsigned long latency_ns = 0;
+	unsigned long min_uV = ~0, max_uV = 0;
+	int ret;
+
+	rcu_read_lock();
+
+	opp_table = _find_opp_table(dev);
+	if (IS_ERR(opp_table)) {
+		rcu_read_unlock();
+		return 0;
+	}
+
+	reg = opp_table->regulator;
+	if (IS_ERR(reg)) {
+		/* Regulator may not be required for device */
+		if (reg)
+			dev_err(dev, "%s: Invalid regulator (%ld)\n", __func__,
+				PTR_ERR(reg));
+		rcu_read_unlock();
+		return 0;
+	}
+
+	list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
+		if (!opp->available)
+			continue;
+
+		if (opp->u_volt_min < min_uV)
+			min_uV = opp->u_volt_min;
+		if (opp->u_volt_max > max_uV)
+			max_uV = opp->u_volt_max;
+	}
+
+	rcu_read_unlock();
+
+	/*
+	 * The caller needs to ensure that opp_table (and hence the regulator)
+	 * isn't freed, while we are executing this routine.
+	 */
+	ret = regulator_set_voltage_time(reg, min_uV, max_uV);
+	if (ret > 0)
+		latency_ns = ret * 1000;
+
+	return latency_ns;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
+
+/**
+ * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
+ *					     nanoseconds
+ * @dev: device for which we do this operation
+ *
+ * Return: This function returns the max transition latency, in nanoseconds, to
+ * switch from one OPP to other.
+ *
+ * Locking: This function takes rcu_read_lock().
+ */
+unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
+{
+	return dev_pm_opp_get_max_volt_latency(dev) +
+		dev_pm_opp_get_max_clock_latency(dev);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
+
+/**
  * dev_pm_opp_get_suspend_opp() - Get suspend opp
  * @dev:	device for which we do this operation
  *
@@ -244,21 +322,21 @@
  */
 struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
 {
-	struct device_opp *dev_opp;
+	struct opp_table *opp_table;
 
 	opp_rcu_lockdep_assert();
 
-	dev_opp = _find_device_opp(dev);
-	if (IS_ERR(dev_opp) || !dev_opp->suspend_opp ||
-	    !dev_opp->suspend_opp->available)
+	opp_table = _find_opp_table(dev);
+	if (IS_ERR(opp_table) || !opp_table->suspend_opp ||
+	    !opp_table->suspend_opp->available)
 		return NULL;
 
-	return dev_opp->suspend_opp;
+	return opp_table->suspend_opp;
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
 
 /**
- * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
+ * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
  * @dev:	device for which we do this operation
  *
  * Return: This function returns the number of available opps if there are any,
@@ -268,21 +346,21 @@
  */
 int dev_pm_opp_get_opp_count(struct device *dev)
 {
-	struct device_opp *dev_opp;
+	struct opp_table *opp_table;
 	struct dev_pm_opp *temp_opp;
 	int count = 0;
 
 	rcu_read_lock();
 
-	dev_opp = _find_device_opp(dev);
-	if (IS_ERR(dev_opp)) {
-		count = PTR_ERR(dev_opp);
-		dev_err(dev, "%s: device OPP not found (%d)\n",
+	opp_table = _find_opp_table(dev);
+	if (IS_ERR(opp_table)) {
+		count = PTR_ERR(opp_table);
+		dev_err(dev, "%s: OPP table not found (%d)\n",
 			__func__, count);
 		goto out_unlock;
 	}
 
-	list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
+	list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
 		if (temp_opp->available)
 			count++;
 	}
@@ -299,7 +377,7 @@
  * @freq:		frequency to search for
  * @available:		true/false - match for available opp
  *
- * Return: Searches for exact match in the opp list and returns pointer to the
+ * Return: Searches for exact match in the opp table and returns pointer to the
  * matching opp if found, else returns ERR_PTR in case of error and should
  * be handled using IS_ERR. Error return values can be:
  * EINVAL:	for bad pointer
@@ -323,19 +401,20 @@
 					      unsigned long freq,
 					      bool available)
 {
-	struct device_opp *dev_opp;
+	struct opp_table *opp_table;
 	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
 
 	opp_rcu_lockdep_assert();
 
-	dev_opp = _find_device_opp(dev);
-	if (IS_ERR(dev_opp)) {
-		int r = PTR_ERR(dev_opp);
-		dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
+	opp_table = _find_opp_table(dev);
+	if (IS_ERR(opp_table)) {
+		int r = PTR_ERR(opp_table);
+
+		dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
 		return ERR_PTR(r);
 	}
 
-	list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
+	list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
 		if (temp_opp->available == available &&
 				temp_opp->rate == freq) {
 			opp = temp_opp;
@@ -371,7 +450,7 @@
 struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
 					     unsigned long *freq)
 {
-	struct device_opp *dev_opp;
+	struct opp_table *opp_table;
 	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
 
 	opp_rcu_lockdep_assert();
@@ -381,11 +460,11 @@
 		return ERR_PTR(-EINVAL);
 	}
 
-	dev_opp = _find_device_opp(dev);
-	if (IS_ERR(dev_opp))
-		return ERR_CAST(dev_opp);
+	opp_table = _find_opp_table(dev);
+	if (IS_ERR(opp_table))
+		return ERR_CAST(opp_table);
 
-	list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
+	list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
 		if (temp_opp->available && temp_opp->rate >= *freq) {
 			opp = temp_opp;
 			*freq = opp->rate;
@@ -421,7 +500,7 @@
 struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
 					      unsigned long *freq)
 {
-	struct device_opp *dev_opp;
+	struct opp_table *opp_table;
 	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
 
 	opp_rcu_lockdep_assert();
@@ -431,11 +510,11 @@
 		return ERR_PTR(-EINVAL);
 	}
 
-	dev_opp = _find_device_opp(dev);
-	if (IS_ERR(dev_opp))
-		return ERR_CAST(dev_opp);
+	opp_table = _find_opp_table(dev);
+	if (IS_ERR(opp_table))
+		return ERR_CAST(opp_table);
 
-	list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
+	list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
 		if (temp_opp->available) {
 			/* go to the next node, before choosing prev */
 			if (temp_opp->rate > *freq)
@@ -451,116 +530,343 @@
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
 
-/* List-dev Helpers */
-static void _kfree_list_dev_rcu(struct rcu_head *head)
+/*
+ * The caller needs to ensure that opp_table (and hence the clk) isn't freed,
+ * while clk returned here is used.
+ */
+static struct clk *_get_opp_clk(struct device *dev)
 {
-	struct device_list_opp *list_dev;
+	struct opp_table *opp_table;
+	struct clk *clk;
 
-	list_dev = container_of(head, struct device_list_opp, rcu_head);
-	kfree_rcu(list_dev, rcu_head);
+	rcu_read_lock();
+
+	opp_table = _find_opp_table(dev);
+	if (IS_ERR(opp_table)) {
+		dev_err(dev, "%s: device opp doesn't exist\n", __func__);
+		clk = ERR_CAST(opp_table);
+		goto unlock;
 }
 
-static void _remove_list_dev(struct device_list_opp *list_dev,
-			     struct device_opp *dev_opp)
+	clk = opp_table->clk;
+	if (IS_ERR(clk))
+		dev_err(dev, "%s: No clock available for the device\n",
+			__func__);
+
+unlock:
+	rcu_read_unlock();
+	return clk;
+}
+
+static int _set_opp_voltage(struct device *dev, struct regulator *reg,
+			    unsigned long u_volt, unsigned long u_volt_min,
+			    unsigned long u_volt_max)
 {
-	list_del(&list_dev->node);
-	call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head,
-		  _kfree_list_dev_rcu);
+	int ret;
+
+	/* Regulator not available for device */
+	if (IS_ERR(reg)) {
+		dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
+			PTR_ERR(reg));
+		return 0;
 }
 
-struct device_list_opp *_add_list_dev(const struct device *dev,
-				      struct device_opp *dev_opp)
+	dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, u_volt_min,
+		u_volt, u_volt_max);
+
+	ret = regulator_set_voltage_triplet(reg, u_volt_min, u_volt,
+					    u_volt_max);
+	if (ret)
+		dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
+			__func__, u_volt_min, u_volt, u_volt_max, ret);
+
+	return ret;
+}
+
+/**
+ * dev_pm_opp_set_rate() - Configure new OPP based on frequency
+ * @dev:	 device for which we do this operation
+ * @target_freq: frequency to achieve
+ *
+ * This configures the power-supplies and clock source to the levels specified
+ * by the OPP corresponding to the target_freq.
+ *
+ * Locking: This function takes rcu_read_lock().
+ */
+int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
 {
-	struct device_list_opp *list_dev;
+	struct opp_table *opp_table;
+	struct dev_pm_opp *old_opp, *opp;
+	struct regulator *reg;
+	struct clk *clk;
+	unsigned long freq, old_freq;
+	unsigned long u_volt, u_volt_min, u_volt_max;
+	unsigned long ou_volt, ou_volt_min, ou_volt_max;
+	int ret;
+
+	if (unlikely(!target_freq)) {
+		dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
+			target_freq);
+		return -EINVAL;
+	}
 
-	list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL);
-	if (!list_dev)
+	clk = _get_opp_clk(dev);
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+
+	freq = clk_round_rate(clk, target_freq);
+	if ((long)freq <= 0)
+		freq = target_freq;
+
+	old_freq = clk_get_rate(clk);
+
+	/* Return early if nothing to do */
+	if (old_freq == freq) {
+		dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
+			__func__, freq);
+		return 0;
+	}
+
+	rcu_read_lock();
+
+	opp_table = _find_opp_table(dev);
+	if (IS_ERR(opp_table)) {
+		dev_err(dev, "%s: device opp doesn't exist\n", __func__);
+		rcu_read_unlock();
+		return PTR_ERR(opp_table);
+	}
+
+	old_opp = dev_pm_opp_find_freq_ceil(dev, &old_freq);
+	if (!IS_ERR(old_opp)) {
+		ou_volt = old_opp->u_volt;
+		ou_volt_min = old_opp->u_volt_min;
+		ou_volt_max = old_opp->u_volt_max;
+	} else {
+		dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
+			__func__, old_freq, PTR_ERR(old_opp));
+	}
+
+	opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+	if (IS_ERR(opp)) {
+		ret = PTR_ERR(opp);
+		dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
+			__func__, freq, ret);
+		rcu_read_unlock();
+		return ret;
+	}
+
+	u_volt = opp->u_volt;
+	u_volt_min = opp->u_volt_min;
+	u_volt_max = opp->u_volt_max;
+
+	reg = opp_table->regulator;
+
+	rcu_read_unlock();
+
+	/* Scaling up? Scale voltage before frequency */
+	if (freq > old_freq) {
+		ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
+				       u_volt_max);
+		if (ret)
+			goto restore_voltage;
+	}
+
+	/* Change frequency */
+
+	dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
+		__func__, old_freq, freq);
+
+	ret = clk_set_rate(clk, freq);
+	if (ret) {
+		dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
+			ret);
+		goto restore_voltage;
+	}
+
+	/* Scaling down? Scale voltage after frequency */
+	if (freq < old_freq) {
+		ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
+				       u_volt_max);
+		if (ret)
+			goto restore_freq;
+	}
+
+	return 0;
+
+restore_freq:
+	if (clk_set_rate(clk, old_freq))
+		dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
+			__func__, old_freq);
+restore_voltage:
+	/* This shouldn't harm even if the voltages weren't updated earlier */
+	if (!IS_ERR(old_opp))
+		_set_opp_voltage(dev, reg, ou_volt, ou_volt_min, ou_volt_max);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
+
+/* OPP-dev Helpers */
+static void _kfree_opp_dev_rcu(struct rcu_head *head)
+{
+	struct opp_device *opp_dev;
+
+	opp_dev = container_of(head, struct opp_device, rcu_head);
+	kfree_rcu(opp_dev, rcu_head);
+}
+
+static void _remove_opp_dev(struct opp_device *opp_dev,
+			    struct opp_table *opp_table)
+{
+	opp_debug_unregister(opp_dev, opp_table);
+	list_del(&opp_dev->node);
+	call_srcu(&opp_table->srcu_head.srcu, &opp_dev->rcu_head,
+		  _kfree_opp_dev_rcu);
+}
+
+struct opp_device *_add_opp_dev(const struct device *dev,
+				struct opp_table *opp_table)
+{
+	struct opp_device *opp_dev;
+	int ret;
+
+	opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
+	if (!opp_dev)
 		return NULL;
 
-	/* Initialize list-dev */
-	list_dev->dev = dev;
-	list_add_rcu(&list_dev->node, &dev_opp->dev_list);
+	/* Initialize opp-dev */
+	opp_dev->dev = dev;
+	list_add_rcu(&opp_dev->node, &opp_table->dev_list);
 
-	return list_dev;
+	/* Create debugfs entries for the opp_table */
+	ret = opp_debug_register(opp_dev, opp_table);
+	if (ret)
+		dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
+			__func__, ret);
+
+	return opp_dev;
 }
 
 /**
- * _add_device_opp() - Find device OPP table or allocate a new one
+ * _add_opp_table() - Find OPP table or allocate a new one
  * @dev:	device for which we do this operation
  *
  * It tries to find an existing table first, if it couldn't find one, it
  * allocates a new OPP table and returns that.
  *
- * Return: valid device_opp pointer if success, else NULL.
+ * Return: valid opp_table pointer if success, else NULL.
  */
-static struct device_opp *_add_device_opp(struct device *dev)
+static struct opp_table *_add_opp_table(struct device *dev)
 {
-	struct device_opp *dev_opp;
-	struct device_list_opp *list_dev;
+	struct opp_table *opp_table;
+	struct opp_device *opp_dev;
+	struct device_node *np;
+	int ret;
 
-	/* Check for existing list for 'dev' first */
-	dev_opp = _find_device_opp(dev);
-	if (!IS_ERR(dev_opp))
-		return dev_opp;
+	/* Check for existing table for 'dev' first */
+	opp_table = _find_opp_table(dev);
+	if (!IS_ERR(opp_table))
+		return opp_table;
 
 	/*
-	 * Allocate a new device OPP table. In the infrequent case where a new
+	 * Allocate a new OPP table. In the infrequent case where a new
 	 * device is needed to be added, we pay this penalty.
 	 */
-	dev_opp = kzalloc(sizeof(*dev_opp), GFP_KERNEL);
-	if (!dev_opp)
+	opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
+	if (!opp_table)
 		return NULL;
 
-	INIT_LIST_HEAD(&dev_opp->dev_list);
+	INIT_LIST_HEAD(&opp_table->dev_list);
 
-	list_dev = _add_list_dev(dev, dev_opp);
-	if (!list_dev) {
-		kfree(dev_opp);
+	opp_dev = _add_opp_dev(dev, opp_table);
+	if (!opp_dev) {
+		kfree(opp_table);
 		return NULL;
 	}
 
-	srcu_init_notifier_head(&dev_opp->srcu_head);
-	INIT_LIST_HEAD(&dev_opp->opp_list);
+	/*
+	 * Only required for backward compatibility with v1 bindings, but isn't
+	 * harmful for other cases. And so we do it unconditionally.
+	 */
+	np = of_node_get(dev->of_node);
+	if (np) {
+		u32 val;
+
+		if (!of_property_read_u32(np, "clock-latency", &val))
+			opp_table->clock_latency_ns_max = val;
+		of_property_read_u32(np, "voltage-tolerance",
+				     &opp_table->voltage_tolerance_v1);
+		of_node_put(np);
+	}
+
+	/* Set regulator to a non-NULL error value */
+	opp_table->regulator = ERR_PTR(-ENXIO);
 
-	/* Secure the device list modification */
-	list_add_rcu(&dev_opp->node, &dev_opp_list);
-	return dev_opp;
+	/* Find clk for the device */
+	opp_table->clk = clk_get(dev, NULL);
+	if (IS_ERR(opp_table->clk)) {
+		ret = PTR_ERR(opp_table->clk);
+		if (ret != -EPROBE_DEFER)
+			dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
+				ret);
+	}
+
+	srcu_init_notifier_head(&opp_table->srcu_head);
+	INIT_LIST_HEAD(&opp_table->opp_list);
+
+	/* Secure the device table modification */
+	list_add_rcu(&opp_table->node, &opp_tables);
+	return opp_table;
 }
 
 /**
- * _kfree_device_rcu() - Free device_opp RCU handler
+ * _kfree_device_rcu() - Free opp_table RCU handler
  * @head:	RCU head
  */
 static void _kfree_device_rcu(struct rcu_head *head)
 {
-	struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
+	struct opp_table *opp_table = container_of(head, struct opp_table,
+						   rcu_head);
 
-	kfree_rcu(device_opp, rcu_head);
+	kfree_rcu(opp_table, rcu_head);
 }
 
 /**
- * _remove_device_opp() - Removes a device OPP table
- * @dev_opp: device OPP table to be removed.
+ * _remove_opp_table() - Removes a OPP table
+ * @opp_table: OPP table to be removed.
  *
- * Removes/frees device OPP table it it doesn't contain any OPPs.
+ * Removes/frees OPP table if it doesn't contain any OPPs.
  */
-static void _remove_device_opp(struct device_opp *dev_opp)
+static void _remove_opp_table(struct opp_table *opp_table)
 {
-	struct device_list_opp *list_dev;
+	struct opp_device *opp_dev;
+
+	if (!list_empty(&opp_table->opp_list))
+		return;
+
+	if (opp_table->supported_hw)
+		return;
 
-	if (!list_empty(&dev_opp->opp_list))
+	if (opp_table->prop_name)
 		return;
 
-	list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp,
+	if (!IS_ERR(opp_table->regulator))
+		return;
+
+	/* Release clk */
+	if (!IS_ERR(opp_table->clk))
+		clk_put(opp_table->clk);
+
+	opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
 				    node);
 
-	_remove_list_dev(list_dev, dev_opp);
+	_remove_opp_dev(opp_dev, opp_table);
 
 	/* dev_list must be empty now */
-	WARN_ON(!list_empty(&dev_opp->dev_list));
+	WARN_ON(!list_empty(&opp_table->dev_list));
 
-	list_del_rcu(&dev_opp->node);
-	call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
+	list_del_rcu(&opp_table->node);
+	call_srcu(&opp_table->srcu_head.srcu, &opp_table->rcu_head,
 		  _kfree_device_rcu);
 }
 
@@ -577,17 +883,17 @@
 
 /**
  * _opp_remove()  - Remove an OPP from a table definition
- * @dev_opp:	points back to the device_opp struct this opp belongs to
+ * @opp_table:	points back to the opp_table struct this opp belongs to
  * @opp:	pointer to the OPP to remove
  * @notify:	OPP_EVENT_REMOVE notification should be sent or not
  *
- * This function removes an opp definition from the opp list.
+ * This function removes an opp definition from the opp table.
  *
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
  * It is assumed that the caller holds required mutex for an RCU updater
  * strategy.
  */
-static void _opp_remove(struct device_opp *dev_opp,
+static void _opp_remove(struct opp_table *opp_table,
 			struct dev_pm_opp *opp, bool notify)
 {
 	/*
@@ -595,21 +901,23 @@
 	 * frequency/voltage list.
 	 */
 	if (notify)
-		srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
+		srcu_notifier_call_chain(&opp_table->srcu_head,
+					 OPP_EVENT_REMOVE, opp);
+	opp_debug_remove_one(opp);
 	list_del_rcu(&opp->node);
-	call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
+	call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
 
-	_remove_device_opp(dev_opp);
+	_remove_opp_table(opp_table);
 }
 
 /**
- * dev_pm_opp_remove()  - Remove an OPP from OPP list
+ * dev_pm_opp_remove()  - Remove an OPP from OPP table
  * @dev:	device for which we do this operation
  * @freq:	OPP to remove with matching 'freq'
  *
- * This function removes an opp from the opp list.
+ * This function removes an opp from the opp table.
  *
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
  * Hence this function internally uses RCU updater strategy with mutex locks
  * to keep the integrity of the internal data structures. Callers should ensure
  * that this function is *NOT* called under RCU protection or in contexts where
@@ -618,17 +926,17 @@
 void dev_pm_opp_remove(struct device *dev, unsigned long freq)
 {
 	struct dev_pm_opp *opp;
-	struct device_opp *dev_opp;
+	struct opp_table *opp_table;
 	bool found = false;
 
-	/* Hold our list modification lock here */
-	mutex_lock(&dev_opp_list_lock);
+	/* Hold our table modification lock here */
+	mutex_lock(&opp_table_lock);
 
-	dev_opp = _find_device_opp(dev);
-	if (IS_ERR(dev_opp))
+	opp_table = _find_opp_table(dev);
+	if (IS_ERR(opp_table))
 		goto unlock;
 
-	list_for_each_entry(opp, &dev_opp->opp_list, node) {
+	list_for_each_entry(opp, &opp_table->opp_list, node) {
 		if (opp->rate == freq) {
 			found = true;
 			break;
@@ -641,14 +949,14 @@
 		goto unlock;
 	}
 
-	_opp_remove(dev_opp, opp, true);
+	_opp_remove(opp_table, opp, true);
 unlock:
-	mutex_unlock(&dev_opp_list_lock);
+	mutex_unlock(&opp_table_lock);
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
 
 static struct dev_pm_opp *_allocate_opp(struct device *dev,
-					struct device_opp **dev_opp)
+					struct opp_table **opp_table)
 {
 	struct dev_pm_opp *opp;
 
@@ -659,8 +967,8 @@
 
 	INIT_LIST_HEAD(&opp->node);
 
-	*dev_opp = _add_device_opp(dev);
-	if (!*dev_opp) {
+	*opp_table = _add_opp_table(dev);
+	if (!*opp_table) {
 		kfree(opp);
 		return NULL;
 	}
@@ -668,21 +976,38 @@
 	return opp;
 }
 
+static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
+					 struct opp_table *opp_table)
+{
+	struct regulator *reg = opp_table->regulator;
+
+	if (!IS_ERR(reg) &&
+	    !regulator_is_supported_voltage(reg, opp->u_volt_min,
+					    opp->u_volt_max)) {
+		pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
+			__func__, opp->u_volt_min, opp->u_volt_max);
+		return false;
+	}
+
+	return true;
+}
+
 static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
-		    struct device_opp *dev_opp)
+		    struct opp_table *opp_table)
 {
 	struct dev_pm_opp *opp;
-	struct list_head *head = &dev_opp->opp_list;
+	struct list_head *head = &opp_table->opp_list;
+	int ret;
 
 	/*
 	 * Insert new OPP in order of increasing frequency and discard if
 	 * already present.
 	 *
-	 * Need to use &dev_opp->opp_list in the condition part of the 'for'
+	 * Need to use &opp_table->opp_list in the condition part of the 'for'
 	 * loop, don't replace it with head otherwise it will become an infinite
 	 * loop.
 	 */
-	list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
+	list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
 		if (new_opp->rate > opp->rate) {
 			head = &opp->node;
 			continue;
@@ -700,9 +1025,20 @@
 			0 : -EEXIST;
 	}
 
-	new_opp->dev_opp = dev_opp;
+	new_opp->opp_table = opp_table;
 	list_add_rcu(&new_opp->node, head);
 
+	ret = opp_debug_create_one(new_opp, opp_table);
+	if (ret)
+		dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
+			__func__, ret);
+
+	if (!_opp_supported_by_regulators(new_opp, opp_table)) {
+		new_opp->available = false;
+		dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
+			 __func__, new_opp->rate);
+	}
+
 	return 0;
 }
 
@@ -713,14 +1049,14 @@
  * @u_volt:	Voltage in uVolts for this OPP
  * @dynamic:	Dynamically added OPPs.
  *
- * This function adds an opp definition to the opp list and returns status.
+ * This function adds an opp definition to the opp table and returns status.
  * The opp is made available by default and it can be controlled using
  * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
  *
  * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
  * and freed by dev_pm_opp_of_remove_table.
  *
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
  * Hence this function internally uses RCU updater strategy with mutex locks
  * to keep the integrity of the internal data structures. Callers should ensure
  * that this function is *NOT* called under RCU protection or in contexts where
@@ -736,14 +1072,15 @@
 static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
 		       bool dynamic)
 {
-	struct device_opp *dev_opp;
+	struct opp_table *opp_table;
 	struct dev_pm_opp *new_opp;
+	unsigned long tol;
 	int ret;
 
-	/* Hold our list modification lock here */
-	mutex_lock(&dev_opp_list_lock);
+	/* Hold our table modification lock here */
+	mutex_lock(&opp_table_lock);
 
-	new_opp = _allocate_opp(dev, &dev_opp);
+	new_opp = _allocate_opp(dev, &opp_table);
 	if (!new_opp) {
 		ret = -ENOMEM;
 		goto unlock;
@@ -751,60 +1088,77 @@
 
 	/* populate the opp table */
 	new_opp->rate = freq;
+	tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
 	new_opp->u_volt = u_volt;
+	new_opp->u_volt_min = u_volt - tol;
+	new_opp->u_volt_max = u_volt + tol;
 	new_opp->available = true;
 	new_opp->dynamic = dynamic;
 
-	ret = _opp_add(dev, new_opp, dev_opp);
+	ret = _opp_add(dev, new_opp, opp_table);
 	if (ret)
 		goto free_opp;
 
-	mutex_unlock(&dev_opp_list_lock);
+	mutex_unlock(&opp_table_lock);
 
 	/*
 	 * Notify the changes in the availability of the operable
 	 * frequency/voltage list.
 	 */
-	srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
+	srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
 	return 0;
 
 free_opp:
-	_opp_remove(dev_opp, new_opp, false);
+	_opp_remove(opp_table, new_opp, false);
 unlock:
-	mutex_unlock(&dev_opp_list_lock);
+	mutex_unlock(&opp_table_lock);
 	return ret;
 }
 
 /* TODO: Support multiple regulators */
-static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev)
+static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
+			      struct opp_table *opp_table)
 {
 	u32 microvolt[3] = {0};
 	u32 val;
 	int count, ret;
+	struct property *prop = NULL;
+	char name[NAME_MAX];
+
+	/* Search for "opp-microvolt-<name>" */
+	if (opp_table->prop_name) {
+		snprintf(name, sizeof(name), "opp-microvolt-%s",
+			 opp_table->prop_name);
+		prop = of_find_property(opp->np, name, NULL);
+	}
+
+	if (!prop) {
+		/* Search for "opp-microvolt" */
+		sprintf(name, "opp-microvolt");
+		prop = of_find_property(opp->np, name, NULL);
 
 	/* Missing property isn't a problem, but an invalid entry is */
-	if (!of_find_property(opp->np, "opp-microvolt", NULL))
+		if (!prop)
 		return 0;
+	}
 
-	count = of_property_count_u32_elems(opp->np, "opp-microvolt");
+	count = of_property_count_u32_elems(opp->np, name);
 	if (count < 0) {
-		dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n",
-			__func__, count);
+		dev_err(dev, "%s: Invalid %s property (%d)\n",
+			__func__, name, count);
 		return count;
 	}
 
 	/* There can be one or three elements here */
 	if (count != 1 && count != 3) {
-		dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n",
-			__func__, count);
+		dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
+			__func__, name, count);
 		return -EINVAL;
 	}
 
-	ret = of_property_read_u32_array(opp->np, "opp-microvolt", microvolt,
-					 count);
+	ret = of_property_read_u32_array(opp->np, name, microvolt, count);
 	if (ret) {
-		dev_err(dev, "%s: error parsing opp-microvolt: %d\n", __func__,
-			ret);
+		dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
 		return -EINVAL;
 	}
 
@@ -818,22 +1172,391 @@
 		opp->u_volt_max = microvolt[2];
 	}
 
-	if (!of_property_read_u32(opp->np, "opp-microamp", &val))
+	/* Search for "opp-microamp-<name>" */
+	prop = NULL;
+	if (opp_table->prop_name) {
+		snprintf(name, sizeof(name), "opp-microamp-%s",
+			 opp_table->prop_name);
+		prop = of_find_property(opp->np, name, NULL);
+	}
+
+	if (!prop) {
+		/* Search for "opp-microamp" */
+		sprintf(name, "opp-microamp");
+		prop = of_find_property(opp->np, name, NULL);
+	}
+
+	if (prop && !of_property_read_u32(opp->np, name, &val))
 		opp->u_amp = val;
 
 	return 0;
 }
 
 /**
+ * dev_pm_opp_set_supported_hw() - Set supported platforms
+ * @dev: Device for which supported-hw has to be set.
+ * @versions: Array of hierarchy of versions to match.
+ * @count: Number of elements in the array.
+ *
+ * This is required only for the V2 bindings, and it enables a platform to
+ * specify the hierarchy of versions it supports. OPP layer will then enable
+ * OPPs, which are available for those versions, based on its 'opp-supported-hw'
+ * property.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
+				unsigned int count)
+{
+	struct opp_table *opp_table;
+	int ret = 0;
+
+	/* Hold our table modification lock here */
+	mutex_lock(&opp_table_lock);
+
+	opp_table = _add_opp_table(dev);
+	if (!opp_table) {
+		ret = -ENOMEM;
+		goto unlock;
+	}
+
+	/* Make sure there are no concurrent readers while updating opp_table */
+	WARN_ON(!list_empty(&opp_table->opp_list));
+
+	/* Do we already have a version hierarchy associated with opp_table? */
+	if (opp_table->supported_hw) {
+		dev_err(dev, "%s: Already have supported hardware list\n",
+			__func__);
+		ret = -EBUSY;
+		goto err;
+	}
+
+	opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
+					GFP_KERNEL);
+	if (!opp_table->supported_hw) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	opp_table->supported_hw_count = count;
+	mutex_unlock(&opp_table_lock);
+	return 0;
+
+err:
+	_remove_opp_table(opp_table);
+unlock:
+	mutex_unlock(&opp_table_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
+
+/**
+ * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
+ * @dev: Device for which supported-hw has to be put.
+ *
+ * This is required only for the V2 bindings, and is called for a matching
+ * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
+ * will not be freed.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_put_supported_hw(struct device *dev)
+{
+	struct opp_table *opp_table;
+
+	/* Hold our table modification lock here */
+	mutex_lock(&opp_table_lock);
+
+	/* Check for existing table for 'dev' first */
+	opp_table = _find_opp_table(dev);
+	if (IS_ERR(opp_table)) {
+		dev_err(dev, "Failed to find opp_table: %ld\n",
+			PTR_ERR(opp_table));
+		goto unlock;
+	}
+
+	/* Make sure there are no concurrent readers while updating opp_table */
+	WARN_ON(!list_empty(&opp_table->opp_list));
+
+	if (!opp_table->supported_hw) {
+		dev_err(dev, "%s: Doesn't have supported hardware list\n",
+			__func__);
+		goto unlock;
+	}
+
+	kfree(opp_table->supported_hw);
+	opp_table->supported_hw = NULL;
+	opp_table->supported_hw_count = 0;
+
+	/* Try freeing opp_table if this was the last blocking resource */
+	_remove_opp_table(opp_table);
+
+unlock:
+	mutex_unlock(&opp_table_lock);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
+
+/**
+ * dev_pm_opp_set_prop_name() - Set prop-extn name
+ * @dev: Device for which the prop-name has to be set.
+ * @name: name to postfix to properties.
+ *
+ * This is required only for the V2 bindings, and it enables a platform to
+ * specify the extn to be used for certain property names. The properties to
+ * which the extension will apply are opp-microvolt and opp-microamp. OPP core
+ * should postfix the property name with -<name> while looking for them.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
+{
+	struct opp_table *opp_table;
+	int ret = 0;
+
+	/* Hold our table modification lock here */
+	mutex_lock(&opp_table_lock);
+
+	opp_table = _add_opp_table(dev);
+	if (!opp_table) {
+		ret = -ENOMEM;
+		goto unlock;
+	}
+
+	/* Make sure there are no concurrent readers while updating opp_table */
+	WARN_ON(!list_empty(&opp_table->opp_list));
+
+	/* Do we already have a prop-name associated with opp_table? */
+	if (opp_table->prop_name) {
+		dev_err(dev, "%s: Already have prop-name %s\n", __func__,
+			opp_table->prop_name);
+		ret = -EBUSY;
+		goto err;
+	}
+
+	opp_table->prop_name = kstrdup(name, GFP_KERNEL);
+	if (!opp_table->prop_name) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	mutex_unlock(&opp_table_lock);
+	return 0;
+
+err:
+	_remove_opp_table(opp_table);
+unlock:
+	mutex_unlock(&opp_table_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
+
+/**
+ * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
+ * @dev: Device for which the prop-name has to be put.
+ *
+ * This is required only for the V2 bindings, and is called for a matching
+ * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
+ * will not be freed.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_put_prop_name(struct device *dev)
+{
+	struct opp_table *opp_table;
+
+	/* Hold our table modification lock here */
+	mutex_lock(&opp_table_lock);
+
+	/* Check for existing table for 'dev' first */
+	opp_table = _find_opp_table(dev);
+	if (IS_ERR(opp_table)) {
+		dev_err(dev, "Failed to find opp_table: %ld\n",
+			PTR_ERR(opp_table));
+		goto unlock;
+	}
+
+	/* Make sure there are no concurrent readers while updating opp_table */
+	WARN_ON(!list_empty(&opp_table->opp_list));
+
+	if (!opp_table->prop_name) {
+		dev_err(dev, "%s: Doesn't have a prop-name\n", __func__);
+		goto unlock;
+	}
+
+	kfree(opp_table->prop_name);
+	opp_table->prop_name = NULL;
+
+	/* Try freeing opp_table if this was the last blocking resource */
+	_remove_opp_table(opp_table);
+
+unlock:
+	mutex_unlock(&opp_table_lock);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
+
+/**
+ * dev_pm_opp_set_regulator() - Set regulator name for the device
+ * @dev: Device for which regulator name is being set.
+ * @name: Name of the regulator.
+ *
+ * In order to support OPP switching, OPP layer needs to know the name of the
+ * device's regulator, as the core would be required to switch voltages as well.
+ *
+ * This must be called before any OPPs are initialized for the device.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_set_regulator(struct device *dev, const char *name)
+{
+	struct opp_table *opp_table;
+	struct regulator *reg;
+	int ret;
+
+	mutex_lock(&opp_table_lock);
+
+	opp_table = _add_opp_table(dev);
+	if (!opp_table) {
+		ret = -ENOMEM;
+		goto unlock;
+	}
+
+	/* This should be called before OPPs are initialized */
+	if (WARN_ON(!list_empty(&opp_table->opp_list))) {
+		ret = -EBUSY;
+		goto err;
+	}
+
+	/* Already have a regulator set */
+	if (WARN_ON(!IS_ERR(opp_table->regulator))) {
+		ret = -EBUSY;
+		goto err;
+	}
+	/* Allocate the regulator */
+	reg = regulator_get_optional(dev, name);
+	if (IS_ERR(reg)) {
+		ret = PTR_ERR(reg);
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "%s: no regulator (%s) found: %d\n",
+				__func__, name, ret);
+		goto err;
+	}
+
+	opp_table->regulator = reg;
+
+	mutex_unlock(&opp_table_lock);
+	return 0;
+
+err:
+	_remove_opp_table(opp_table);
+unlock:
+	mutex_unlock(&opp_table_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
+
+/**
+ * dev_pm_opp_put_regulator() - Releases resources blocked for regulator
+ * @dev: Device for which regulator was set.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_put_regulator(struct device *dev)
+{
+	struct opp_table *opp_table;
+
+	mutex_lock(&opp_table_lock);
+
+	/* Check for existing table for 'dev' first */
+	opp_table = _find_opp_table(dev);
+	if (IS_ERR(opp_table)) {
+		dev_err(dev, "Failed to find opp_table: %ld\n",
+			PTR_ERR(opp_table));
+		goto unlock;
+	}
+
+	if (IS_ERR(opp_table->regulator)) {
+		dev_err(dev, "%s: Doesn't have regulator set\n", __func__);
+		goto unlock;
+	}
+
+	/* Make sure there are no concurrent readers while updating opp_table */
+	WARN_ON(!list_empty(&opp_table->opp_list));
+
+	regulator_put(opp_table->regulator);
+	opp_table->regulator = ERR_PTR(-ENXIO);
+
+	/* Try freeing opp_table if this was the last blocking resource */
+	_remove_opp_table(opp_table);
+
+unlock:
+	mutex_unlock(&opp_table_lock);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
+
+static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
+			      struct device_node *np)
+{
+	unsigned int count = opp_table->supported_hw_count;
+	u32 version;
+	int ret;
+
+	if (!opp_table->supported_hw)
+		return true;
+
+	while (count--) {
+		ret = of_property_read_u32_index(np, "opp-supported-hw", count,
+						 &version);
+		if (ret) {
+			dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
+				 __func__, count, ret);
+			return false;
+		}
+
+		/* Both of these are bitwise masks of the versions */
+		if (!(version & opp_table->supported_hw[count]))
+			return false;
+	}
+
+	return true;
+}
+
+/**
  * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
  * @dev:	device for which we do this operation
  * @np:		device node
  *
- * This function adds an opp definition to the opp list and returns status. The
+ * This function adds an opp definition to the opp table and returns status. The
  * opp can be controlled using dev_pm_opp_enable/disable functions and may be
  * removed by dev_pm_opp_remove.
  *
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
  * Hence this function internally uses RCU updater strategy with mutex locks
  * to keep the integrity of the internal data structures. Callers should ensure
  * that this function is *NOT* called under RCU protection or in contexts where
@@ -849,16 +1572,16 @@
  */
 static int _opp_add_static_v2(struct device *dev, struct device_node *np)
 {
-	struct device_opp *dev_opp;
+	struct opp_table *opp_table;
 	struct dev_pm_opp *new_opp;
 	u64 rate;
 	u32 val;
 	int ret;
 
-	/* Hold our list modification lock here */
-	mutex_lock(&dev_opp_list_lock);
+	/* Hold our table modification lock here */
+	mutex_lock(&opp_table_lock);
 
-	new_opp = _allocate_opp(dev, &dev_opp);
+	new_opp = _allocate_opp(dev, &opp_table);
 	if (!new_opp) {
 		ret = -ENOMEM;
 		goto unlock;
@@ -870,6 +1593,12 @@
 		goto free_opp;
 	}
 
+	/* Check if the OPP supports hardware's hierarchy of versions or not */
+	if (!_opp_is_supported(dev, opp_table, np)) {
+		dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
+		goto free_opp;
+	}
+
 	/*
 	 * Rate is defined as an unsigned long in clk API, and so casting
 	 * explicitly to its type. Must be fixed once rate is 64 bit
@@ -885,28 +1614,30 @@
 	if (!of_property_read_u32(np, "clock-latency-ns", &val))
 		new_opp->clock_latency_ns = val;
 
-	ret = opp_parse_supplies(new_opp, dev);
+	ret = opp_parse_supplies(new_opp, dev, opp_table);
 	if (ret)
 		goto free_opp;
 
-	ret = _opp_add(dev, new_opp, dev_opp);
+	ret = _opp_add(dev, new_opp, opp_table);
 	if (ret)
 		goto free_opp;
 
 	/* OPP to select on device suspend */
 	if (of_property_read_bool(np, "opp-suspend")) {
-		if (dev_opp->suspend_opp)
+		if (opp_table->suspend_opp) {
 			dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
-				 __func__, dev_opp->suspend_opp->rate,
+				 __func__, opp_table->suspend_opp->rate,
 				 new_opp->rate);
-		else
-			dev_opp->suspend_opp = new_opp;
+		} else {
+			new_opp->suspend = true;
+			opp_table->suspend_opp = new_opp;
+		}
 	}
 
-	if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max)
-		dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns;
+	if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
+		opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
 
-	mutex_unlock(&dev_opp_list_lock);
+	mutex_unlock(&opp_table_lock);
 
 	pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
 		 __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
@@ -917,13 +1648,13 @@
 	 * Notify the changes in the availability of the operable
 	 * frequency/voltage list.
 	 */
-	srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
+	srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
 	return 0;
 
 free_opp:
-	_opp_remove(dev_opp, new_opp, false);
+	_opp_remove(opp_table, new_opp, false);
 unlock:
-	mutex_unlock(&dev_opp_list_lock);
+	mutex_unlock(&opp_table_lock);
 	return ret;
 }
 
@@ -933,11 +1664,11 @@
  * @freq:	Frequency in Hz for this OPP
  * @u_volt:	Voltage in uVolts for this OPP
  *
- * This function adds an opp definition to the opp list and returns status.
+ * This function adds an opp definition to the opp table and returns status.
  * The opp is made available by default and it can be controlled using
  * dev_pm_opp_enable/disable functions.
  *
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
  * Hence this function internally uses RCU updater strategy with mutex locks
  * to keep the integrity of the internal data structures. Callers should ensure
  * that this function is *NOT* called under RCU protection or in contexts where
@@ -969,7 +1700,7 @@
  * copy operation, returns 0 if no modification was done OR modification was
  * successful.
  *
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
  * Hence this function internally uses RCU updater strategy with mutex locks to
  * keep the integrity of the internal data structures. Callers should ensure
  * that this function is *NOT* called under RCU protection or in contexts where
@@ -978,7 +1709,7 @@
 static int _opp_set_availability(struct device *dev, unsigned long freq,
 				 bool availability_req)
 {
-	struct device_opp *dev_opp;
+	struct opp_table *opp_table;
 	struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
 	int r = 0;
 
@@ -987,18 +1718,18 @@
 	if (!new_opp)
 		return -ENOMEM;
 
-	mutex_lock(&dev_opp_list_lock);
+	mutex_lock(&opp_table_lock);
 
-	/* Find the device_opp */
-	dev_opp = _find_device_opp(dev);
-	if (IS_ERR(dev_opp)) {
-		r = PTR_ERR(dev_opp);
+	/* Find the opp_table */
+	opp_table = _find_opp_table(dev);
+	if (IS_ERR(opp_table)) {
+		r = PTR_ERR(opp_table);
 		dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
 		goto unlock;
 	}
 
 	/* Do we have the frequency? */
-	list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
+	list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
 		if (tmp_opp->rate == freq) {
 			opp = tmp_opp;
 			break;
@@ -1019,21 +1750,21 @@
 	new_opp->available = availability_req;
 
 	list_replace_rcu(&opp->node, &new_opp->node);
-	mutex_unlock(&dev_opp_list_lock);
-	call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
+	mutex_unlock(&opp_table_lock);
+	call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
 
 	/* Notify the change of the OPP availability */
 	if (availability_req)
-		srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ENABLE,
-					 new_opp);
+		srcu_notifier_call_chain(&opp_table->srcu_head,
+					 OPP_EVENT_ENABLE, new_opp);
 	else
-		srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_DISABLE,
-					 new_opp);
+		srcu_notifier_call_chain(&opp_table->srcu_head,
+					 OPP_EVENT_DISABLE, new_opp);
 
 	return 0;
 
 unlock:
-	mutex_unlock(&dev_opp_list_lock);
+	mutex_unlock(&opp_table_lock);
 	kfree(new_opp);
 	return r;
 }
@@ -1047,7 +1778,7 @@
  * corresponding error value. It is meant to be used for users an OPP available
  * after being temporarily made unavailable with dev_pm_opp_disable.
  *
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
  * Hence this function indirectly uses RCU and mutex locks to keep the
  * integrity of the internal data structures. Callers should ensure that
  * this function is *NOT* called under RCU protection or in contexts where
@@ -1073,7 +1804,7 @@
  * control by users to make this OPP not available until the circumstances are
  * right to make it available again (with a call to dev_pm_opp_enable).
  *
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
  * Hence this function indirectly uses RCU and mutex locks to keep the
  * integrity of the internal data structures. Callers should ensure that
  * this function is *NOT* called under RCU protection or in contexts where
@@ -1091,26 +1822,26 @@
 
 /**
  * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
- * @dev:	device pointer used to lookup device OPPs.
+ * @dev:	device pointer used to lookup OPP table.
  *
  * Return: pointer to  notifier head if found, otherwise -ENODEV or
  * -EINVAL based on type of error casted as pointer. value must be checked
  *  with IS_ERR to determine valid pointer or error result.
  *
- * Locking: This function must be called under rcu_read_lock(). dev_opp is a RCU
- * protected pointer. The reason for the same is that the opp pointer which is
- * returned will remain valid for use with opp_get_{voltage, freq} only while
+ * Locking: This function must be called under rcu_read_lock(). opp_table is a
+ * RCU protected pointer. The reason for the same is that the opp pointer which
+ * is returned will remain valid for use with opp_get_{voltage, freq} only while
  * under the locked area. The pointer returned must be used prior to unlocking
  * with rcu_read_unlock() to maintain the integrity of the pointer.
  */
 struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
 {
-	struct device_opp *dev_opp = _find_device_opp(dev);
+	struct opp_table *opp_table = _find_opp_table(dev);
 
-	if (IS_ERR(dev_opp))
-		return ERR_CAST(dev_opp); /* matching type */
+	if (IS_ERR(opp_table))
+		return ERR_CAST(opp_table); /* matching type */
 
-	return &dev_opp->srcu_head;
+	return &opp_table->srcu_head;
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
 
@@ -1118,11 +1849,11 @@
 /**
  * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
  *				  entries
- * @dev:	device pointer used to lookup device OPPs.
+ * @dev:	device pointer used to lookup OPP table.
  *
  * Free OPPs created using static entries present in DT.
  *
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
  * Hence this function indirectly uses RCU updater strategy with mutex locks
  * to keep the integrity of the internal data structures. Callers should ensure
  * that this function is *NOT* called under RCU protection or in contexts where
@@ -1130,38 +1861,38 @@
  */
 void dev_pm_opp_of_remove_table(struct device *dev)
 {
-	struct device_opp *dev_opp;
+	struct opp_table *opp_table;
 	struct dev_pm_opp *opp, *tmp;
 
-	/* Hold our list modification lock here */
-	mutex_lock(&dev_opp_list_lock);
+	/* Hold our table modification lock here */
+	mutex_lock(&opp_table_lock);
 
-	/* Check for existing list for 'dev' */
-	dev_opp = _find_device_opp(dev);
-	if (IS_ERR(dev_opp)) {
-		int error = PTR_ERR(dev_opp);
+	/* Check for existing table for 'dev' */
+	opp_table = _find_opp_table(dev);
+	if (IS_ERR(opp_table)) {
+		int error = PTR_ERR(opp_table);
 
 		if (error != -ENODEV)
-			WARN(1, "%s: dev_opp: %d\n",
+			WARN(1, "%s: opp_table: %d\n",
 			     IS_ERR_OR_NULL(dev) ?
 					"Invalid device" : dev_name(dev),
 			     error);
 		goto unlock;
 	}
 
-	/* Find if dev_opp manages a single device */
-	if (list_is_singular(&dev_opp->dev_list)) {
+	/* Find if opp_table manages a single device */
+	if (list_is_singular(&opp_table->dev_list)) {
 		/* Free static OPPs */
-		list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
+		list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
 			if (!opp->dynamic)
-				_opp_remove(dev_opp, opp, true);
+				_opp_remove(opp_table, opp, true);
 		}
 	} else {
-		_remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp);
+		_remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
 	}
 
 unlock:
-	mutex_unlock(&dev_opp_list_lock);
+	mutex_unlock(&opp_table_lock);
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
 
@@ -1182,22 +1913,22 @@
 static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
 {
 	struct device_node *np;
-	struct device_opp *dev_opp;
+	struct opp_table *opp_table;
 	int ret = 0, count = 0;
 
-	mutex_lock(&dev_opp_list_lock);
+	mutex_lock(&opp_table_lock);
 
-	dev_opp = _managed_opp(opp_np);
-	if (dev_opp) {
+	opp_table = _managed_opp(opp_np);
+	if (opp_table) {
 		/* OPPs are already managed */
-		if (!_add_list_dev(dev, dev_opp))
+		if (!_add_opp_dev(dev, opp_table))
 			ret = -ENOMEM;
-		mutex_unlock(&dev_opp_list_lock);
+		mutex_unlock(&opp_table_lock);
 		return ret;
 	}
-	mutex_unlock(&dev_opp_list_lock);
+	mutex_unlock(&opp_table_lock);
 
-	/* We have opp-list node now, iterate over it and add OPPs */
+	/* We have opp-table node now, iterate over it and add OPPs */
 	for_each_available_child_of_node(opp_np, np) {
 		count++;
 
@@ -1214,19 +1945,19 @@
 	if (WARN_ON(!count))
 		return -ENOENT;
 
-	mutex_lock(&dev_opp_list_lock);
+	mutex_lock(&opp_table_lock);
 
-	dev_opp = _find_device_opp(dev);
-	if (WARN_ON(IS_ERR(dev_opp))) {
-		ret = PTR_ERR(dev_opp);
-		mutex_unlock(&dev_opp_list_lock);
+	opp_table = _find_opp_table(dev);
+	if (WARN_ON(IS_ERR(opp_table))) {
+		ret = PTR_ERR(opp_table);
+		mutex_unlock(&opp_table_lock);
 		goto free_table;
 	}
 
-	dev_opp->np = opp_np;
-	dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared");
+	opp_table->np = opp_np;
+	opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared");
 
-	mutex_unlock(&dev_opp_list_lock);
+	mutex_unlock(&opp_table_lock);
 
 	return 0;
 
@@ -1255,7 +1986,7 @@
 	 */
 	nr = prop->length / sizeof(u32);
 	if (nr % 2) {
-		dev_err(dev, "%s: Invalid OPP list\n", __func__);
+		dev_err(dev, "%s: Invalid OPP table\n", __func__);
 		return -EINVAL;
 	}
 
@@ -1275,11 +2006,11 @@
 
 /**
  * dev_pm_opp_of_add_table() - Initialize opp table from device tree
- * @dev:	device pointer used to lookup device OPPs.
+ * @dev:	device pointer used to lookup OPP table.
  *
  * Register the initial OPP table with the OPP library for given device.
  *
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
  * Hence this function indirectly uses RCU updater strategy with mutex locks
  * to keep the integrity of the internal data structures. Callers should ensure
  * that this function is *NOT* called under RCU protection or in contexts where
diff -ruw linux-4.4.115/drivers/base/power/opp/cpu.c linux-4.4.115-fbx/drivers/base/power/opp/cpu.c
--- linux-4.4.115/drivers/base/power/opp/cpu.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/base/power/opp/cpu.c	2019-01-22 16:16:22.879240756 +0100
@@ -31,7 +31,7 @@
  * @table:	Cpufreq table returned back to caller
  *
  * Generate a cpufreq table for a provided device- this assumes that the
- * opp list is already initialized and ready for usage.
+ * opp table is already initialized and ready for usage.
  *
  * This function allocates required memory for the cpufreq table. It is
  * expected that the caller does the required maintenance such as freeing
@@ -44,7 +44,7 @@
  * WARNING: It is  important for the callers to ensure refreshing their copy of
  * the table if any of the mentioned functions have been invoked in the interim.
  *
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
  * Since we just use the regular accessor functions to access the internal data
  * structures, we use RCU read lock inside this function. As a result, users of
  * this function DONOT need to use explicit locks for invoking.
@@ -122,15 +122,15 @@
 /* Required only for V1 bindings, as v2 can manage it from DT itself */
 int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
 {
-	struct device_list_opp *list_dev;
-	struct device_opp *dev_opp;
+	struct opp_device *opp_dev;
+	struct opp_table *opp_table;
 	struct device *dev;
 	int cpu, ret = 0;
 
-	mutex_lock(&dev_opp_list_lock);
+	mutex_lock(&opp_table_lock);
 
-	dev_opp = _find_device_opp(cpu_dev);
-	if (IS_ERR(dev_opp)) {
+	opp_table = _find_opp_table(cpu_dev);
+	if (IS_ERR(opp_table)) {
 		ret = -EINVAL;
 		goto unlock;
 	}
@@ -146,15 +146,15 @@
 			continue;
 		}
 
-		list_dev = _add_list_dev(dev, dev_opp);
-		if (!list_dev) {
-			dev_err(dev, "%s: failed to add list-dev for cpu%d device\n",
+		opp_dev = _add_opp_dev(dev, opp_table);
+		if (!opp_dev) {
+			dev_err(dev, "%s: failed to add opp-dev for cpu%d device\n",
 				__func__, cpu);
 			continue;
 		}
 	}
 unlock:
-	mutex_unlock(&dev_opp_list_lock);
+	mutex_unlock(&opp_table_lock);
 
 	return ret;
 }
@@ -214,7 +214,6 @@
 /*
  * Works only for OPP v2 bindings.
  *
- * cpumask should be already set to mask of cpu_dev->id.
  * Returns -ENOENT if operating-points-v2 bindings aren't supported.
  */
 int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
@@ -230,6 +229,8 @@
 		return -ENOENT;
 	}
 
+	cpumask_set_cpu(cpu_dev->id, cpumask);
+
 	/* OPPs are shared ? */
 	if (!of_property_read_bool(np, "opp-shared"))
 		goto put_cpu_node;
diff -ruw linux-4.4.115/drivers/base/power/opp/Makefile linux-4.4.115-fbx/drivers/base/power/opp/Makefile
--- linux-4.4.115/drivers/base/power/opp/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/base/power/opp/Makefile	2019-01-22 16:16:22.879240756 +0100
@@ -1,2 +1,3 @@
 ccflags-$(CONFIG_DEBUG_DRIVER)	:= -DDEBUG
 obj-y				+= core.o cpu.o
+obj-$(CONFIG_DEBUG_FS)		+= debugfs.o
diff -ruw linux-4.4.115/drivers/base/power/opp/opp.h linux-4.4.115-fbx/drivers/base/power/opp/opp.h
--- linux-4.4.115/drivers/base/power/opp/opp.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/base/power/opp/opp.h	2019-01-22 16:16:22.879240756 +0100
@@ -17,17 +17,21 @@
 #include <linux/device.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
+#include <linux/limits.h>
 #include <linux/pm_opp.h>
 #include <linux/rculist.h>
 #include <linux/rcupdate.h>
 
+struct clk;
+struct regulator;
+
 /* Lock to allow exclusive modification to the device and opp lists */
-extern struct mutex dev_opp_list_lock;
+extern struct mutex opp_table_lock;
 
 /*
  * Internal data structure organization with the OPP layer library is as
  * follows:
- * dev_opp_list (root)
+ * opp_tables (root)
  *	|- device 1 (represents voltage domain 1)
  *	|	|- opp 1 (availability, freq, voltage)
  *	|	|- opp 2 ..
@@ -36,23 +40,24 @@
  *	|- device 2 (represents the next voltage domain)
  *	...
  *	`- device m (represents mth voltage domain)
- * device 1, 2.. are represented by dev_opp structure while each opp
+ * device 1, 2.. are represented by opp_table structure while each opp
  * is represented by the opp structure.
  */
 
 /**
  * struct dev_pm_opp - Generic OPP description structure
- * @node:	opp list node. The nodes are maintained throughout the lifetime
+ * @node:	opp table node. The nodes are maintained throughout the lifetime
  *		of boot. It is expected only an optimal set of OPPs are
  *		added to the library by the SoC framework.
- *		RCU usage: opp list is traversed with RCU locks. node
+ *		RCU usage: opp table is traversed with RCU locks. node
  *		modification is possible realtime, hence the modifications
- *		are protected by the dev_opp_list_lock for integrity.
+ *		are protected by the opp_table_lock for integrity.
  *		IMPORTANT: the opp nodes should be maintained in increasing
  *		order.
- * @dynamic:	not-created from static DT entries.
  * @available:	true/false - marks if this OPP as available or not
+ * @dynamic:	not-created from static DT entries.
  * @turbo:	true if turbo (boost) OPP
+ * @suspend:	true if suspend OPP
  * @rate:	Frequency in hertz
  * @u_volt:	Target voltage in microvolts corresponding to this OPP
  * @u_volt_min:	Minimum voltage in microvolts corresponding to this OPP
@@ -60,9 +65,10 @@
  * @u_amp:	Maximum current drawn by the device in microamperes
  * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
  *		frequency from any other OPP's frequency.
- * @dev_opp:	points back to the device_opp struct this opp belongs to
+ * @opp_table:	points back to the opp_table struct this opp belongs to
  * @rcu_head:	RCU callback head used for deferred freeing
  * @np:		OPP's device node.
+ * @dentry:	debugfs dentry pointer (per opp)
  *
  * This structure stores the OPP information for a given device.
  */
@@ -72,6 +78,7 @@
 	bool available;
 	bool dynamic;
 	bool turbo;
+	bool suspend;
 	unsigned long rate;
 
 	unsigned long u_volt;
@@ -80,40 +87,60 @@
 	unsigned long u_amp;
 	unsigned long clock_latency_ns;
 
-	struct device_opp *dev_opp;
+	struct opp_table *opp_table;
 	struct rcu_head rcu_head;
 
 	struct device_node *np;
+
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *dentry;
+#endif
 };
 
 /**
- * struct device_list_opp - devices managed by 'struct device_opp'
+ * struct opp_device - devices managed by 'struct opp_table'
  * @node:	list node
  * @dev:	device to which the struct object belongs
  * @rcu_head:	RCU callback head used for deferred freeing
+ * @dentry:	debugfs dentry pointer (per device)
  *
- * This is an internal data structure maintaining the list of devices that are
- * managed by 'struct device_opp'.
+ * This is an internal data structure maintaining the devices that are managed
+ * by 'struct opp_table'.
  */
-struct device_list_opp {
+struct opp_device {
 	struct list_head node;
 	const struct device *dev;
 	struct rcu_head rcu_head;
+
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *dentry;
+#endif
 };
 
 /**
- * struct device_opp - Device opp structure
- * @node:	list node - contains the devices with OPPs that
+ * struct opp_table - Device opp structure
+ * @node:	table node - contains the devices with OPPs that
  *		have been registered. Nodes once added are not modified in this
- *		list.
- *		RCU usage: nodes are not modified in the list of device_opp,
- *		however addition is possible and is secured by dev_opp_list_lock
+ *		table.
+ *		RCU usage: nodes are not modified in the table of opp_table,
+ *		however addition is possible and is secured by opp_table_lock
  * @srcu_head:	notifier head to notify the OPP availability changes.
  * @rcu_head:	RCU callback head used for deferred freeing
  * @dev_list:	list of devices that share these OPPs
- * @opp_list:	list of opps
+ * @opp_list:	table of opps
  * @np:		struct device_node pointer for opp's DT node.
+ * @clock_latency_ns_max: Max clock latency in nanoseconds.
  * @shared_opp: OPP is shared between multiple devices.
+ * @suspend_opp: Pointer to OPP to be used during device suspend.
+ * @supported_hw: Array of version number to support.
+ * @supported_hw_count: Number of elements in supported_hw array.
+ * @prop_name: A name to postfix to many DT properties, while parsing them.
+ * @clk: Device's clock handle
+ * @regulator: Supply regulator
+ * @dentry:	debugfs dentry pointer of the real device directory (not links).
+ * @dentry_name: Name of the real dentry.
+ *
+ * @voltage_tolerance_v1: In percentage, for v1 bindings only.
  *
  * This is an internal data structure maintaining the link to opps attached to
  * a device. This structure is not meant to be shared to users as it is
@@ -123,7 +150,7 @@
  * need to wait for the grace period of both of them before freeing any
  * resources. And so we have used kfree_rcu() from within call_srcu() handlers.
  */
-struct device_opp {
+struct opp_table {
 	struct list_head node;
 
 	struct srcu_notifier_head srcu_head;
@@ -133,14 +160,48 @@
 
 	struct device_node *np;
 	unsigned long clock_latency_ns_max;
+
+	/* For backward compatibility with v1 bindings */
+	unsigned int voltage_tolerance_v1;
+
 	bool shared_opp;
 	struct dev_pm_opp *suspend_opp;
+
+	unsigned int *supported_hw;
+	unsigned int supported_hw_count;
+	const char *prop_name;
+	struct clk *clk;
+	struct regulator *regulator;
+
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *dentry;
+	char dentry_name[NAME_MAX];
+#endif
 };
 
 /* Routines internal to opp core */
-struct device_opp *_find_device_opp(struct device *dev);
-struct device_list_opp *_add_list_dev(const struct device *dev,
-				      struct device_opp *dev_opp);
+struct opp_table *_find_opp_table(struct device *dev);
+struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table);
 struct device_node *_of_get_opp_desc_node(struct device *dev);
 
+#ifdef CONFIG_DEBUG_FS
+void opp_debug_remove_one(struct dev_pm_opp *opp);
+int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table);
+int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table);
+void opp_debug_unregister(struct opp_device *opp_dev, struct opp_table *opp_table);
+#else
+static inline void opp_debug_remove_one(struct dev_pm_opp *opp) {}
+
+static inline int opp_debug_create_one(struct dev_pm_opp *opp,
+				       struct opp_table *opp_table)
+{ return 0; }
+static inline int opp_debug_register(struct opp_device *opp_dev,
+				     struct opp_table *opp_table)
+{ return 0; }
+
+static inline void opp_debug_unregister(struct opp_device *opp_dev,
+					struct opp_table *opp_table)
+{ }
+#endif		/* DEBUG_FS */
+
 #endif		/* __DRIVER_OPP_H__ */
diff -ruw linux-4.4.115/drivers/base/power/power.h linux-4.4.115-fbx/drivers/base/power/power.h
--- linux-4.4.115/drivers/base/power/power.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/base/power/power.h	2019-01-22 16:16:22.879240756 +0100
@@ -140,6 +140,7 @@
 extern void device_pm_move_before(struct device *, struct device *);
 extern void device_pm_move_after(struct device *, struct device *);
 extern void device_pm_move_last(struct device *);
+extern void device_pm_check_callbacks(struct device *dev);
 
 #else /* !CONFIG_PM_SLEEP */
 
@@ -158,6 +159,8 @@
 					struct device *devb) {}
 static inline void device_pm_move_last(struct device *dev) {}
 
+static inline void device_pm_check_callbacks(struct device *dev) {}
+
 #endif /* !CONFIG_PM_SLEEP */
 
 static inline void device_pm_init(struct device *dev)
diff -ruw linux-4.4.115/drivers/base/power/qos.c linux-4.4.115-fbx/drivers/base/power/qos.c
--- linux-4.4.115/drivers/base/power/qos.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/base/power/qos.c	2019-01-22 16:16:22.879240756 +0100
@@ -147,7 +147,7 @@
 	switch(req->type) {
 	case DEV_PM_QOS_RESUME_LATENCY:
 		ret = pm_qos_update_target(&qos->resume_latency,
-					   &req->data.pnode, action, value);
+					   &req->data.lat, action, value);
 		if (ret) {
 			value = pm_qos_read_value(&qos->resume_latency);
 			blocking_notifier_call_chain(&dev_pm_notifiers,
@@ -157,7 +157,7 @@
 		break;
 	case DEV_PM_QOS_LATENCY_TOLERANCE:
 		ret = pm_qos_update_target(&qos->latency_tolerance,
-					   &req->data.pnode, action, value);
+					   &req->data.lat, action, value);
 		if (ret) {
 			value = pm_qos_read_value(&qos->latency_tolerance);
 			req->dev->power.set_latency_tolerance(req->dev, value);
@@ -258,7 +258,7 @@
 
 	/* Flush the constraints lists for the device. */
 	c = &qos->resume_latency;
-	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
+	plist_for_each_entry_safe(req, tmp, &c->list, data.lat.node) {
 		/*
 		 * Update constraints list and call the notification
 		 * callbacks if needed
@@ -266,8 +266,11 @@
 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
 		memset(req, 0, sizeof(*req));
 	}
+
+	kfree(c->notifiers);
+
 	c = &qos->latency_tolerance;
-	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
+	plist_for_each_entry_safe(req, tmp, &c->list, data.lat.node) {
 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
 		memset(req, 0, sizeof(*req));
 	}
@@ -382,7 +385,7 @@
 	switch(req->type) {
 	case DEV_PM_QOS_RESUME_LATENCY:
 	case DEV_PM_QOS_LATENCY_TOLERANCE:
-		curr_value = req->data.pnode.prio;
+		curr_value = req->data.lat.node.prio;
 		break;
 	case DEV_PM_QOS_FLAGS:
 		curr_value = req->data.flr.flags;
@@ -835,7 +838,7 @@
 	ret = IS_ERR_OR_NULL(dev->power.qos)
 		|| !dev->power.qos->latency_tolerance_req ?
 			PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
-			dev->power.qos->latency_tolerance_req->data.pnode.prio;
+			dev->power.qos->latency_tolerance_req->data.lat.node.prio;
 	mutex_unlock(&dev_pm_qos_mtx);
 	return ret;
 }
diff -ruw linux-4.4.115/drivers/base/power/wakeup.c linux-4.4.115-fbx/drivers/base/power/wakeup.c
--- linux-4.4.115/drivers/base/power/wakeup.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/base/power/wakeup.c	2019-10-29 09:26:23.413200927 +0100
@@ -15,6 +15,7 @@
 #include <linux/seq_file.h>
 #include <linux/debugfs.h>
 #include <linux/pm_wakeirq.h>
+#include <linux/types.h>
 #include <trace/events/power.h>
 
 #include "power.h"
@@ -808,6 +809,37 @@
 }
 EXPORT_SYMBOL_GPL(pm_wakeup_event);
 
+void pm_get_active_wakeup_sources(char *pending_wakeup_source, size_t max)
+{
+	struct wakeup_source *ws, *last_active_ws = NULL;
+	int len = 0;
+	bool active = false;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+		if (ws->active && len < max) {
+			if (!active)
+				len += scnprintf(pending_wakeup_source, max,
+						"Pending Wakeup Sources: ");
+			len += scnprintf(pending_wakeup_source + len, max - len,
+				"%s ", ws->name);
+			active = true;
+		} else if (!active &&
+			   (!last_active_ws ||
+			    ktime_to_ns(ws->last_time) >
+			    ktime_to_ns(last_active_ws->last_time))) {
+			last_active_ws = ws;
+		}
+	}
+	if (!active && last_active_ws) {
+		scnprintf(pending_wakeup_source, max,
+				"Last active Wakeup Source: %s",
+				last_active_ws->name);
+	}
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(pm_get_active_wakeup_sources);
+
 void pm_print_active_wakeup_sources(void)
 {
 	struct wakeup_source *ws;
@@ -1015,7 +1047,7 @@
 		active_time = ktime_set(0, 0);
 	}
 
-	seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
+	seq_printf(m, "%-32s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
 		   ws->name, active_count, ws->event_count,
 		   ws->wakeup_count, ws->expire_count,
 		   ktime_to_ms(active_time), ktime_to_ms(total_time),
@@ -1036,7 +1068,7 @@
 	struct wakeup_source *ws;
 	int srcuidx;
 
-	seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
+	seq_puts(m, "name\t\t\t\t\tactive_count\tevent_count\twakeup_count\t"
 		"expire_count\tactive_since\ttotal_time\tmax_time\t"
 		"last_change\tprevent_suspend_time\n");
 
diff -ruw linux-4.4.115/drivers/base/regmap/internal.h linux-4.4.115-fbx/drivers/base/regmap/internal.h
--- linux-4.4.115/drivers/base/regmap/internal.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/base/regmap/internal.h	2019-01-22 16:16:22.883240792 +0100
@@ -85,6 +85,9 @@
 
 	struct list_head debugfs_off_cache;
 	struct mutex cache_lock;
+
+	unsigned int dump_address;
+	unsigned int dump_count;
 #endif
 
 	unsigned int max_register;
@@ -245,6 +248,10 @@
 int _regmap_raw_write(struct regmap *map, unsigned int reg,
 		      const void *val, size_t val_len);
 
+int _regmap_raw_multi_reg_write(struct regmap *map,
+				const struct reg_sequence *regs,
+				size_t num_regs);
+
 void regmap_async_complete_cb(struct regmap_async *async, int ret);
 
 enum regmap_endian regmap_get_val_endian(struct device *dev,
diff -ruw linux-4.4.115/drivers/base/regmap/Kconfig linux-4.4.115-fbx/drivers/base/regmap/Kconfig
--- linux-4.4.115/drivers/base/regmap/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/base/regmap/Kconfig	2019-01-22 16:16:22.883240792 +0100
@@ -3,7 +3,7 @@
 # subsystems should select the appropriate symbols.
 
 config REGMAP
-	default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ)
+	default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SWR)
 	select LZO_COMPRESS
 	select LZO_DECOMPRESS
 	select IRQ_DOMAIN if REGMAP_IRQ
@@ -29,3 +29,15 @@
 
 config REGMAP_IRQ
 	bool
+
+config REGMAP_SWR
+	tristate
+
+config REGMAP_ALLOW_WRITE_DEBUGFS
+	depends on REGMAP && DEBUG_FS
+	bool "Allow REGMAP debugfs write"
+	default n
+	help
+	  Say 'y' here to allow the regmap debugfs write. Regmap debugfs write
+	  could be risky when accessing some essential hardwares, so it is not
+	  recommended to enable this option on any production device.
diff -ruw linux-4.4.115/drivers/base/regmap/Makefile linux-4.4.115-fbx/drivers/base/regmap/Makefile
--- linux-4.4.115/drivers/base/regmap/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/base/regmap/Makefile	2019-01-22 16:16:22.883240792 +0100
@@ -10,3 +10,4 @@
 obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o
 obj-$(CONFIG_REGMAP_MMIO) += regmap-mmio.o
 obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o
+obj-$(CONFIG_REGMAP_SWR) += regmap-swr.o
diff -ruw linux-4.4.115/drivers/base/regmap/regcache.c linux-4.4.115-fbx/drivers/base/regmap/regcache.c
--- linux-4.4.115/drivers/base/regmap/regcache.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/base/regmap/regcache.c	2019-01-22 16:16:22.883240792 +0100
@@ -682,6 +682,53 @@
 	return ret;
 }
 
+static int regcache_sync_block_raw_multi_reg(struct regmap *map, void *block,
+					unsigned long *cache_present,
+					unsigned int block_base,
+					unsigned int start,
+					unsigned int end)
+{
+	unsigned int i, val;
+	unsigned int regtmp = 0;
+	int ret = 0;
+	struct reg_sequence *regs;
+	size_t num_regs = ((end - start) + 1);
+
+	regs = kcalloc(num_regs, sizeof(struct reg_default), GFP_KERNEL);
+	if (!regs)
+		return -ENOMEM;
+
+	num_regs = 0;
+	for (i = start; i < end; i++) {
+		regtmp = block_base + (i * map->reg_stride);
+
+		/* skip registers that are not defined/available */
+		if (!regcache_reg_present(cache_present, i))
+			continue;
+
+		val = regcache_get_val(map, block, i);
+
+		/* Is this the hardware default?  If so skip. */
+		ret = regcache_lookup_reg(map, regtmp);
+		if (ret >= 0 && val == map->reg_defaults[ret].def) {
+			continue;
+		} else {
+			regs[num_regs].reg = regtmp;
+			regs[num_regs].def = val;
+			regs[num_regs].delay_us = 0;
+			num_regs += 1;
+		}
+	}
+	ret = 0;
+	if (num_regs) {
+		dev_dbg(map->dev, "%s: start: 0x%x - end: 0x%x\n",
+			__func__, regs[0].reg, regs[num_regs-1].reg);
+		ret = _regmap_raw_multi_reg_write(map, regs, num_regs);
+	}
+	kfree(regs);
+	return ret;
+}
+
 static int regcache_sync_block_raw(struct regmap *map, void *block,
 			    unsigned long *cache_present,
 			    unsigned int block_base, unsigned int start,
@@ -729,7 +776,12 @@
 			unsigned int block_base, unsigned int start,
 			unsigned int end)
 {
-	if (regmap_can_raw_write(map) && !map->use_single_write)
+	if (regmap_can_raw_write(map) && map->can_multi_write)
+		return regcache_sync_block_raw_multi_reg(map, block,
+							 cache_present,
+							 block_base, start,
+							 end);
+	else if (regmap_can_raw_write(map) && !map->use_single_write)
 		return regcache_sync_block_raw(map, block, cache_present,
 					       block_base, start, end);
 	else
diff -ruw linux-4.4.115/drivers/base/regmap/regmap.c linux-4.4.115-fbx/drivers/base/regmap/regmap.c
--- linux-4.4.115/drivers/base/regmap/regmap.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/base/regmap/regmap.c	2019-10-29 09:26:23.413200927 +0100
@@ -1822,7 +1822,7 @@
  * they are all in the same page and have been changed to being page
  * relative. The page register has been written if that was necessary.
  */
-static int _regmap_raw_multi_reg_write(struct regmap *map,
+int _regmap_raw_multi_reg_write(struct regmap *map,
 				       const struct reg_sequence *regs,
 				       size_t num_regs)
 {
diff -ruw linux-4.4.115/drivers/base/regmap/regmap-debugfs.c linux-4.4.115-fbx/drivers/base/regmap/regmap-debugfs.c
--- linux-4.4.115/drivers/base/regmap/regmap-debugfs.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/base/regmap/regmap-debugfs.c	2019-01-22 16:16:22.883240792 +0100
@@ -259,8 +259,7 @@
 				   count, ppos);
 }
 
-#undef REGMAP_ALLOW_WRITE_DEBUGFS
-#ifdef REGMAP_ALLOW_WRITE_DEBUGFS
+#ifdef CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS
 /*
  * This can be dangerous especially when we have clients such as
  * PMICs, therefore don't provide any real compile time configuration option
@@ -310,6 +309,67 @@
 	.llseek = default_llseek,
 };
 
+static ssize_t regmap_data_read_file(struct file *file, char __user *user_buf,
+				    size_t count, loff_t *ppos)
+{
+	struct regmap *map = file->private_data;
+	int new_count;
+
+	regmap_calc_tot_len(map, NULL, 0);
+	new_count = map->dump_count * map->debugfs_tot_len;
+	if (new_count > count)
+		new_count = count;
+
+	if (*ppos == 0)
+		*ppos = map->dump_address * map->debugfs_tot_len;
+	else if (*ppos >= map->dump_address * map->debugfs_tot_len
+			+ map->dump_count * map->debugfs_tot_len)
+		return 0;
+	return regmap_read_debugfs(map, 0, map->max_register, user_buf,
+			new_count, ppos);
+}
+
+#ifdef CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS
+static ssize_t regmap_data_write_file(struct file *file,
+				     const char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	char buf[32];
+	size_t buf_size;
+	char *start = buf;
+	unsigned long value;
+	struct regmap *map = file->private_data;
+	int ret;
+
+	buf_size = min(count, (sizeof(buf)-1));
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	buf[buf_size] = 0;
+
+	while (*start == ' ')
+		start++;
+	if (kstrtoul(start, 16, &value))
+		return -EINVAL;
+
+	/* Userspace has been fiddling around behind the kernel's back */
+	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+
+	ret = regmap_write(map, map->dump_address, value);
+	if (ret < 0)
+		return ret;
+	return buf_size;
+}
+#else
+#define regmap_data_write_file NULL
+#endif
+
+static const struct file_operations regmap_data_fops = {
+	.open = simple_open,
+	.read = regmap_data_read_file,
+	.write = regmap_data_write_file,
+	.llseek = default_llseek,
+};
+
 static ssize_t regmap_range_read_file(struct file *file, char __user *user_buf,
 				      size_t count, loff_t *ppos)
 {
@@ -595,7 +655,7 @@
 	if (map->max_register || regmap_readable(map, 0)) {
 		umode_t registers_mode;
 
-#if defined(REGMAP_ALLOW_WRITE_DEBUGFS)
+#ifdef CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS
 		registers_mode = 0600;
 #else
 		registers_mode = 0400;
@@ -603,6 +663,15 @@
 
 		debugfs_create_file("registers", registers_mode, map->debugfs,
 				    map, &regmap_map_fops);
+
+		debugfs_create_x32("address", 0600, map->debugfs,
+				    &map->dump_address);
+		map->dump_count = 1;
+		debugfs_create_u32("count", 0600, map->debugfs,
+				    &map->dump_count);
+		debugfs_create_file("data", registers_mode, map->debugfs,
+				    map, &regmap_data_fops);
+
 		debugfs_create_file("access", 0400, map->debugfs,
 				    map, &regmap_access_fops);
 	}
diff -ruw linux-4.4.115/drivers/base/regmap/regmap-spmi.c linux-4.4.115-fbx/drivers/base/regmap/regmap-spmi.c
--- linux-4.4.115/drivers/base/regmap/regmap-spmi.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/base/regmap/regmap-spmi.c	2019-01-22 16:16:22.883240792 +0100
@@ -1,7 +1,7 @@
 /*
  * Register map access API - SPMI support
  *
- * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013,2016 The Linux Foundation. All rights reserved.
  *
  * Based on regmap-i2c.c:
  * Copyright 2011 Wolfson Microelectronics plc
diff -ruw linux-4.4.115/drivers/base/syscore.c linux-4.4.115-fbx/drivers/base/syscore.c
--- linux-4.4.115/drivers/base/syscore.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/base/syscore.c	2019-01-22 16:16:22.883240792 +0100
@@ -11,6 +11,7 @@
 #include <linux/module.h>
 #include <linux/suspend.h>
 #include <trace/events/power.h>
+#include <linux/wakeup_reason.h>
 
 static LIST_HEAD(syscore_ops_list);
 static DEFINE_MUTEX(syscore_ops_lock);
@@ -75,6 +76,8 @@
 	return 0;
 
  err_out:
+	log_suspend_abort_reason("System core suspend callback %pF failed",
+		ops->suspend);
 	pr_err("PM: System core suspend callback %pF failed.\n", ops->suspend);
 
 	list_for_each_entry_continue(ops, &syscore_ops_list, node)
diff -ruw linux-4.4.115/drivers/bluetooth/hci_h4.c linux-4.4.115-fbx/drivers/bluetooth/hci_h4.c
--- linux-4.4.115/drivers/bluetooth/hci_h4.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/bluetooth/hci_h4.c	2019-01-22 16:16:22.935241263 +0100
@@ -57,7 +57,7 @@
 {
 	struct h4_struct *h4;
 
-	BT_DBG("hu %p", hu);
+	BT_DBG("hu %pK", hu);
 
 	h4 = kzalloc(sizeof(*h4), GFP_KERNEL);
 	if (!h4)
@@ -74,7 +74,7 @@
 {
 	struct h4_struct *h4 = hu->priv;
 
-	BT_DBG("hu %p", hu);
+	BT_DBG("hu %pK", hu);
 
 	skb_queue_purge(&h4->txq);
 
@@ -88,7 +88,7 @@
 
 	hu->priv = NULL;
 
-	BT_DBG("hu %p", hu);
+	BT_DBG("hu %pK", hu);
 
 	skb_queue_purge(&h4->txq);
 
@@ -105,7 +105,7 @@
 {
 	struct h4_struct *h4 = hu->priv;
 
-	BT_DBG("hu %p skb %p", hu, skb);
+	BT_DBG("hu %pK skb %pK", hu, skb);
 
 	/* Prepend skb with frame type */
 	memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
diff -ruw linux-4.4.115/drivers/bluetooth/hci_ldisc.c linux-4.4.115-fbx/drivers/bluetooth/hci_ldisc.c
--- linux-4.4.115/drivers/bluetooth/hci_ldisc.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/bluetooth/hci_ldisc.c	2019-10-29 09:26:23.437201162 +0100
@@ -142,6 +142,8 @@
 	struct hci_dev *hdev = hu->hdev;
 	struct sk_buff *skb;
 
+	BT_DBG("hu %pK hdev %pK tty %pK", hu, hdev, tty);
+
 	/* REVISIT: should we cope with bad skbs or ->write() returning
 	 * and error value ?
 	 */
@@ -205,7 +207,7 @@
 /* Initialize device */
 static int hci_uart_open(struct hci_dev *hdev)
 {
-	BT_DBG("%s %p", hdev->name, hdev);
+	BT_DBG("%s %pK", hdev->name, hdev);
 
 	/* Nothing to do for UART driver */
 	return 0;
@@ -217,7 +219,7 @@
 	struct hci_uart *hu  = hci_get_drvdata(hdev);
 	struct tty_struct *tty = hu->tty;
 
-	BT_DBG("hdev %p tty %p", hdev, tty);
+	BT_DBG("hdev %pK tty %pK", hdev, tty);
 
 	if (hu->tx_skb) {
 		kfree_skb(hu->tx_skb); hu->tx_skb = NULL;
@@ -236,7 +238,7 @@
 /* Close device */
 static int hci_uart_close(struct hci_dev *hdev)
 {
-	BT_DBG("hdev %p", hdev);
+	BT_DBG("hdev %pK", hdev);
 
 	hci_uart_flush(hdev);
 	hdev->flush = NULL;
@@ -441,7 +443,7 @@
 {
 	struct hci_uart *hu;
 
-	BT_DBG("tty %p", tty);
+	BT_DBG("tty %pK", tty);
 
 	/* Error if the tty has no write op instead of leaving an exploitable
 	   hole */
@@ -483,7 +485,7 @@
 	struct hci_uart *hu = tty->disc_data;
 	struct hci_dev *hdev;
 
-	BT_DBG("tty %p", tty);
+	BT_DBG("tty %pK", tty);
 
 	/* Detach from the tty */
 	tty->disc_data = NULL;
diff -ruw linux-4.4.115/drivers/bluetooth/Kconfig linux-4.4.115-fbx/drivers/bluetooth/Kconfig
--- linux-4.4.115/drivers/bluetooth/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/bluetooth/Kconfig	2019-01-22 16:16:22.927241190 +0100
@@ -320,4 +320,32 @@
 	  Say Y here to compile support for Texas Instrument's WiLink7 driver
 	  into the kernel or say M to compile it as module (btwilink).
 
+config MSM_BT_POWER
+	tristate "MSM Bluetooth Power Control"
+	depends on ARCH_QCOM && RFKILL
+	default m
+	help
+	  Provides a parameter to switch on/off power from PMIC
+	  to Bluetooth device.
+
+config BTFM_SLIM
+	tristate "MSM Bluetooth/FM Slimbus Driver"
+	depends on MSM_BT_POWER
+	help
+	  This enables BT/FM slimbus driver to get multiple audio channel.
+	  This will make use of slimbus platform driver and slimbus codec
+	  driver to communicate with slimbus machine driver and LPSS which
+	  is Slimbus master.
+
+	  Slimbus slave initialization and configuration will be done through
+	  this driver.
+
+config BTFM_SLIM_WCN3990
+	tristate "MSM Bluetooth/FM WCN3990 Device"
+	depends on BTFM_SLIM
+	help
+	  This enables specific driver handle for WCN3990 device.
+	  It is designed to adapt any future BT/FM device to implement a specific
+	  chip initialization process and control.
+
 endmenu
diff -ruw linux-4.4.115/drivers/bluetooth/Makefile linux-4.4.115-fbx/drivers/bluetooth/Makefile
--- linux-4.4.115/drivers/bluetooth/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/bluetooth/Makefile	2019-01-22 16:16:22.927241190 +0100
@@ -23,6 +23,11 @@
 obj-$(CONFIG_BT_BCM)		+= btbcm.o
 obj-$(CONFIG_BT_RTL)		+= btrtl.o
 obj-$(CONFIG_BT_QCA)		+= btqca.o
+obj-$(CONFIG_MSM_BT_POWER)	+= bluetooth-power.o
+
+obj-$(CONFIG_BTFM_SLIM)				+= btfm_slim.o
+obj-$(CONFIG_BTFM_SLIM)				+= btfm_slim_codec.o
+obj-$(CONFIG_BTFM_SLIM_WCN3990)		+= btfm_slim_wcn3990.o
 
 btmrvl-y			:= btmrvl_main.o
 btmrvl-$(CONFIG_DEBUG_FS)	+= btmrvl_debugfs.o
diff -ruw linux-4.4.115/drivers/char/hw_random/Kconfig linux-4.4.115-fbx/drivers/char/hw_random/Kconfig
--- linux-4.4.115/drivers/char/hw_random/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/hw_random/Kconfig	2019-01-22 16:16:22.967241553 +0100
@@ -333,10 +333,25 @@
 
 	  If unsure, say Y.
 
+config HW_RANDOM_MSM_LEGACY
+        tristate "Qualcomm MSM Random Number Generator support (LEGACY)"
+        depends on HW_RANDOM && ARCH_QCOM
+        select CRYPTO_AES
+        select CRYPTO_ECB
+        default n
+        ---help---
+          This driver provides kernel-side support for the Random Number
+          Generator hardware found on Qualcomm MSM SoCs.
+
+          To compile this driver as a module, choose M here: the
+          module will be called msm_rng.
+
+          If unsure, say Y.
+
 config HW_RANDOM_MSM
 	tristate "Qualcomm SoCs Random Number Generator support"
 	depends on HW_RANDOM && ARCH_QCOM
-	default HW_RANDOM
+	default n
 	---help---
 	  This driver provides kernel-side support for the Random Number
 	  Generator hardware found on Qualcomm SoCs.
diff -ruw linux-4.4.115/drivers/char/hw_random/Makefile linux-4.4.115-fbx/drivers/char/hw_random/Makefile
--- linux-4.4.115/drivers/char/hw_random/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/hw_random/Makefile	2019-01-22 16:16:22.967241553 +0100
@@ -32,4 +32,5 @@
 obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o
 obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
 obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
+obj-$(CONFIG_HW_RANDOM_MSM_LEGACY) += msm_rng.o
 obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o
diff -ruw linux-4.4.115/drivers/char/Kconfig linux-4.4.115-fbx/drivers/char/Kconfig
--- linux-4.4.115/drivers/char/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/Kconfig	2019-10-29 09:26:23.441201201 +0100
@@ -48,6 +48,8 @@
 
 source "drivers/tty/serial/Kconfig"
 
+source "drivers/char/diag/Kconfig"
+
 config TTY_PRINTK
 	tristate "TTY driver to output user messages via printk"
 	depends on EXPERT && TTY
@@ -592,6 +594,16 @@
 
 source "drivers/s390/char/Kconfig"
 
+config MSM_SMD_PKT
+	bool "Enable device interface for some SMD packet ports"
+	default n
+	depends on MSM_SMD
+	help
+	  smd_pkt driver provides the interface for the userspace clients
+	  to communicate over smd via device nodes. This enable the
+	  usersapce clients to read and write to some smd packets channel
+	  for MSM chipset.
+
 config TILE_SROM
 	bool "Character-device access via hypervisor to the Tilera SPI ROM"
 	depends on TILE
@@ -605,5 +617,21 @@
 
 source "drivers/char/xillybus/Kconfig"
 
+config MSM_ADSPRPC
+	tristate "QTI ADSP RPC driver"
+	depends on MSM_SMD
+	help
+	  Provides a communication mechanism that allows for clients to
+	  make remote method invocations across processor boundary to
+	  applications DSP processor. Say M if you want to enable this
+	  module.
+
+config MSM_RDBG
+	tristate "QTI Remote debug driver"
+	help
+	  Implements a shared memory based transport mechanism that allows
+	  for a debugger running on a host PC to communicate with a remote
+	  stub running on peripheral subsystems such as the ADSP, MODEM etc.
+
 endmenu
 
diff -ruw linux-4.4.115/drivers/char/Makefile linux-4.4.115-fbx/drivers/char/Makefile
--- linux-4.4.115/drivers/char/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/Makefile	2019-10-29 09:26:23.441201201 +0100
@@ -9,6 +9,7 @@
 obj-$(CONFIG_VIRTIO_CONSOLE)	+= virtio_console.o
 obj-$(CONFIG_RAW_DRIVER)	+= raw.o
 obj-$(CONFIG_SGI_SNSC)		+= snsc.o snsc_event.o
+obj-$(CONFIG_MSM_SMD_PKT)	+= msm_smd_pkt.o
 obj-$(CONFIG_MSPEC)		+= mspec.o
 obj-$(CONFIG_MMTIMER)		+= mmtimer.o
 obj-$(CONFIG_UV_MMTIMER)	+= uv_mmtimer.o
@@ -59,4 +60,10 @@
 js-rtc-y = rtc.o
 
 obj-$(CONFIG_TILE_SROM)		+= tile-srom.o
+obj-$(CONFIG_DIAG_CHAR)		+= diag/
 obj-$(CONFIG_XILLYBUS)		+= xillybus/
+obj-$(CONFIG_MSM_ADSPRPC)       += adsprpc.o
+ifdef CONFIG_COMPAT
+obj-$(CONFIG_MSM_ADSPRPC)       += adsprpc_compat.o
+endif
+obj-$(CONFIG_MSM_RDBG)		+= rdbg.o
diff -ruw linux-4.4.115/drivers/char/misc.c linux-4.4.115-fbx/drivers/char/misc.c
--- linux-4.4.115/drivers/char/misc.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/misc.c	2019-01-22 16:16:22.975241625 +0100
@@ -59,7 +59,7 @@
 /*
  * Assigned numbers, used for dynamic minors
  */
-#define DYNAMIC_MINORS 64 /* like dynamic majors */
+#define DYNAMIC_MINORS 96 /* like dynamic majors */
 static DECLARE_BITMAP(misc_minors, DYNAMIC_MINORS);
 
 #ifdef CONFIG_PROC_FS
diff -ruw linux-4.4.115/drivers/clk/clk.c linux-4.4.115-fbx/drivers/clk/clk.c
--- linux-4.4.115/drivers/clk/clk.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/clk.c	2019-10-29 09:26:23.469201475 +0100
@@ -1,6 +1,7 @@
 /*
  * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
  * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -23,9 +24,12 @@
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/clkdev.h>
+#include <linux/regulator/consumer.h>
 
 #include "clk.h"
 
+#if defined(CONFIG_COMMON_CLK)
+
 static DEFINE_SPINLOCK(enable_lock);
 static DEFINE_MUTEX(prepare_lock);
 
@@ -39,6 +43,13 @@
 static HLIST_HEAD(clk_orphan_list);
 static LIST_HEAD(clk_notifier_list);
 
+struct clk_handoff_vdd {
+	struct list_head list;
+	struct clk_vdd_class *vdd_class;
+};
+
+static LIST_HEAD(clk_handoff_vdd_list);
+
 /***    private data structures    ***/
 
 struct clk_core {
@@ -60,6 +71,8 @@
 	bool			orphan;
 	unsigned int		enable_count;
 	unsigned int		prepare_count;
+	bool			need_handoff_enable;
+	bool			need_handoff_prepare;
 	unsigned long		min_rate;
 	unsigned long		max_rate;
 	unsigned long		accuracy;
@@ -73,6 +86,9 @@
 	struct hlist_node	debug_node;
 #endif
 	struct kref		ref;
+	struct clk_vdd_class	*vdd_class;
+	unsigned long		*rate_max;
+	int			num_rate_max;
 };
 
 #define CREATE_TRACE_POINTS
@@ -172,6 +188,9 @@
 	return core->ops->is_enabled(core->hw);
 }
 
+static void clk_core_unprepare(struct clk_core *core);
+static void clk_core_disable(struct clk_core *core);
+
 static void clk_unprepare_unused_subtree(struct clk_core *core)
 {
 	struct clk_core *child;
@@ -181,6 +200,19 @@
 	hlist_for_each_entry(child, &core->children, child_node)
 		clk_unprepare_unused_subtree(child);
 
+	/*
+	 * setting CLK_ENABLE_HAND_OFF flag triggers this conditional
+	 *
+	 * need_handoff_prepare implies this clk was already prepared by
+	 * __clk_init. now we have a proper user, so unset the flag in our
+	 * internal bookkeeping. See CLK_ENABLE_HAND_OFF flag in clk-provider.h
+	 * for details.
+	 */
+	if (core->need_handoff_prepare) {
+		core->need_handoff_prepare = false;
+		clk_core_unprepare(core);
+	}
+
 	if (core->prepare_count)
 		return;
 
@@ -207,6 +239,21 @@
 	hlist_for_each_entry(child, &core->children, child_node)
 		clk_disable_unused_subtree(child);
 
+	/*
+	 * setting CLK_ENABLE_HAND_OFF flag triggers this conditional
+	 *
+	 * need_handoff_enable implies this clk was already enabled by
+	 * __clk_init. now we have a proper user, so unset the flag in our
+	 * internal bookkeeping. See CLK_ENABLE_HAND_OFF flag in clk-provider.h
+	 * for details.
+	 */
+	if (core->need_handoff_enable) {
+		core->need_handoff_enable = false;
+		flags = clk_enable_lock();
+		clk_core_disable(core);
+		clk_enable_unlock(flags);
+	}
+
 	flags = clk_enable_lock();
 
 	if (core->enable_count)
@@ -241,9 +288,12 @@
 }
 __setup("clk_ignore_unused", clk_ignore_unused_setup);
 
+static int clk_unvote_vdd_level(struct clk_vdd_class *vdd_class, int level);
+
 static int clk_disable_unused(void)
 {
 	struct clk_core *core;
+	struct clk_handoff_vdd *v, *v_temp;
 
 	if (clk_ignore_unused) {
 		pr_warn("clk: Not disabling unused clocks\n");
@@ -264,6 +314,13 @@
 	hlist_for_each_entry(core, &clk_orphan_list, child_node)
 		clk_unprepare_unused_subtree(core);
 
+	list_for_each_entry_safe(v, v_temp, &clk_handoff_vdd_list, list) {
+		clk_unvote_vdd_level(v->vdd_class,
+					v->vdd_class->num_levels - 1);
+		list_del(&v->list);
+		kfree(v);
+	};
+
 	clk_prepare_unlock();
 
 	return 0;
@@ -545,6 +602,26 @@
 EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
 
 /*
+ * Aggregate the rate of all child nodes which are enabled and exclude the
+ * child node which requests for clk_aggregate_rate.
+ */
+unsigned long clk_aggregate_rate(struct clk_hw *hw,
+					const struct clk_core *parent)
+{
+	struct clk_core *child;
+	unsigned long aggre_rate = 0;
+
+	hlist_for_each_entry(child, &parent->children, child_node) {
+		if (child->enable_count &&
+				strcmp(child->name, hw->init->name))
+			aggre_rate = max(child->rate, aggre_rate);
+	}
+
+	return aggre_rate;
+}
+EXPORT_SYMBOL_GPL(clk_aggregate_rate);
+
+/*
  * Helper for finding best parent to provide a given frequency. This can be used
  * directly as a determine_rate callback (e.g. for a mux), or from a more
  * complex clock that may combine a mux with other operations.
@@ -563,6 +640,217 @@
 }
 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
 
+/*
+ *  Find the voltage level required for a given clock rate.
+ */
+static int clk_find_vdd_level(struct clk_core *clk, unsigned long rate)
+{
+	int level;
+
+	for (level = 0; level < clk->num_rate_max; level++)
+		if (rate <= clk->rate_max[level])
+			break;
+
+	if (level == clk->num_rate_max) {
+		pr_err("Rate %lu for %s is greater than highest Fmax\n", rate,
+				clk->name);
+		return -EINVAL;
+	}
+
+	return level;
+}
+
+/*
+ * Update voltage level given the current votes.
+ */
+static int clk_update_vdd(struct clk_vdd_class *vdd_class)
+{
+	int level, rc = 0, i, ignore;
+	struct regulator **r = vdd_class->regulator;
+	int *uv = vdd_class->vdd_uv;
+	int n_reg = vdd_class->num_regulators;
+	int cur_lvl = vdd_class->cur_level;
+	int max_lvl = vdd_class->num_levels - 1;
+	int cur_base = cur_lvl * n_reg;
+	int new_base;
+
+	/* aggregate votes */
+	for (level = max_lvl; level > 0; level--)
+		if (vdd_class->level_votes[level])
+			break;
+
+	if (level == cur_lvl)
+		return 0;
+
+	max_lvl = max_lvl * n_reg;
+	new_base = level * n_reg;
+
+	for (i = 0; i < vdd_class->num_regulators; i++) {
+		pr_debug("Set Voltage level Min %d, Max %d\n", uv[new_base + i],
+				uv[max_lvl + i]);
+		rc = regulator_set_voltage(r[i], uv[new_base + i],
+			vdd_class->use_max_uV ? INT_MAX : uv[max_lvl + i]);
+		if (rc)
+			goto set_voltage_fail;
+
+		if (cur_lvl == 0 || cur_lvl == vdd_class->num_levels)
+			rc = regulator_enable(r[i]);
+		else if (level == 0)
+			rc = regulator_disable(r[i]);
+		if (rc)
+			goto enable_disable_fail;
+	}
+
+	if (vdd_class->set_vdd && !vdd_class->num_regulators)
+		rc = vdd_class->set_vdd(vdd_class, level);
+
+	if (!rc)
+		vdd_class->cur_level = level;
+
+	return rc;
+
+enable_disable_fail:
+	regulator_set_voltage(r[i], uv[cur_base + i],
+			vdd_class->use_max_uV ? INT_MAX : uv[max_lvl + i]);
+
+set_voltage_fail:
+	for (i--; i >= 0; i--) {
+		regulator_set_voltage(r[i], uv[cur_base + i],
+		       vdd_class->use_max_uV ? INT_MAX : uv[max_lvl + i]);
+		if (cur_lvl == 0 || cur_lvl == vdd_class->num_levels)
+			regulator_disable(r[i]);
+		else if (level == 0)
+			ignore = regulator_enable(r[i]);
+	}
+
+	return rc;
+}
+
+/*
+ *  Vote for a voltage level.
+ */
+static int clk_vote_vdd_level(struct clk_vdd_class *vdd_class, int level)
+{
+	int rc = 0;
+
+	if (level >= vdd_class->num_levels)
+		return -EINVAL;
+
+	mutex_lock(&vdd_class->lock);
+
+	vdd_class->level_votes[level]++;
+
+	rc = clk_update_vdd(vdd_class);
+	if (rc)
+		vdd_class->level_votes[level]--;
+
+	mutex_unlock(&vdd_class->lock);
+
+	return rc;
+}
+
+/*
+ * Remove vote for a voltage level.
+ */
+static int clk_unvote_vdd_level(struct clk_vdd_class *vdd_class, int level)
+{
+	int rc = 0;
+
+	if (level >= vdd_class->num_levels)
+		return -EINVAL;
+
+	mutex_lock(&vdd_class->lock);
+
+	if (WARN(!vdd_class->level_votes[level],
+				"Reference counts are incorrect for %s level %d\n",
+				vdd_class->class_name, level))
+		goto out;
+
+	vdd_class->level_votes[level]--;
+
+	rc = clk_update_vdd(vdd_class);
+	if (rc)
+		vdd_class->level_votes[level]++;
+
+out:
+	mutex_unlock(&vdd_class->lock);
+	return rc;
+}
+
+/*
+ * Vote for a voltage level corresponding to a clock's rate.
+ */
+static int clk_vote_rate_vdd(struct clk_core *core, unsigned long rate)
+{
+	int level;
+
+	if (!core->vdd_class)
+		return 0;
+
+	level = clk_find_vdd_level(core, rate);
+	if (level < 0)
+		return level;
+
+	return clk_vote_vdd_level(core->vdd_class, level);
+}
+
+/*
+ * Remove vote for a voltage level corresponding to a clock's rate.
+ */
+static void clk_unvote_rate_vdd(struct clk_core *core, unsigned long rate)
+{
+	int level;
+
+	if (!core->vdd_class)
+		return;
+
+	level = clk_find_vdd_level(core, rate);
+	if (level < 0)
+		return;
+
+	clk_unvote_vdd_level(core->vdd_class, level);
+}
+
+static bool clk_is_rate_level_valid(struct clk_core *core, unsigned long rate)
+{
+	int level;
+
+	if (!core->vdd_class)
+		return true;
+
+	level = clk_find_vdd_level(core, rate);
+
+	return level >= 0;
+}
+
+static int clk_vdd_class_init(struct clk_vdd_class *vdd)
+{
+	struct clk_handoff_vdd *v;
+
+	if (vdd->skip_handoff)
+		return 0;
+
+	list_for_each_entry(v, &clk_handoff_vdd_list, list) {
+		if (v->vdd_class == vdd)
+			return 0;
+	}
+
+	pr_debug("voting for vdd_class %s\n", vdd->class_name);
+
+	if (clk_vote_vdd_level(vdd, vdd->num_levels - 1))
+		pr_err("failed to vote for %s\n", vdd->class_name);
+
+	v = kmalloc(sizeof(*v), GFP_KERNEL);
+	if (!v)
+		return -ENOMEM;
+
+	v->vdd_class = vdd;
+
+	list_add_tail(&v->list, &clk_handoff_vdd_list);
+
+	return 0;
+}
+
 /***        clk api        ***/
 
 static void clk_core_unprepare(struct clk_core *core)
@@ -575,6 +863,9 @@
 	if (WARN_ON(core->prepare_count == 0))
 		return;
 
+	if (WARN_ON(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL))
+		return;
+
 	if (--core->prepare_count > 0)
 		return;
 
@@ -586,6 +877,9 @@
 		core->ops->unprepare(core->hw);
 
 	trace_clk_unprepare_complete(core);
+
+	clk_unvote_rate_vdd(core, core->rate);
+
 	clk_core_unprepare(core->parent);
 }
 
@@ -627,12 +921,19 @@
 
 		trace_clk_prepare(core);
 
+		ret = clk_vote_rate_vdd(core, core->rate);
+		if (ret) {
+			clk_core_unprepare(core->parent);
+			return ret;
+		}
+
 		if (core->ops->prepare)
 			ret = core->ops->prepare(core->hw);
 
 		trace_clk_prepare_complete(core);
 
 		if (ret) {
+			clk_unvote_rate_vdd(core, core->rate);
 			clk_core_unprepare(core->parent);
 			return ret;
 		}
@@ -657,7 +958,7 @@
  */
 int clk_prepare(struct clk *clk)
 {
-	int ret;
+	int ret = 0;
 
 	if (!clk)
 		return 0;
@@ -680,6 +981,9 @@
 	if (WARN_ON(core->enable_count == 0))
 		return;
 
+	if (WARN_ON(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL))
+		return;
+
 	if (--core->enable_count > 0)
 		return;
 
@@ -769,7 +1073,7 @@
 int clk_enable(struct clk *clk)
 {
 	unsigned long flags;
-	int ret;
+	int ret = 0;
 
 	if (!clk)
 		return 0;
@@ -1379,6 +1683,9 @@
 		top = clk_calc_new_rates(parent, best_parent_rate);
 
 out:
+	if (!clk_is_rate_level_valid(core, rate))
+		return NULL;
+
 	clk_calc_subtree(core, new_rate, parent, p_index);
 
 	return top;
@@ -1427,7 +1734,7 @@
  * walk down a subtree and set the new rates notifying the rate
  * change on the way
  */
-static void clk_change_rate(struct clk_core *core)
+static int clk_change_rate(struct clk_core *core)
 {
 	struct clk_core *child;
 	struct hlist_node *tmp;
@@ -1435,6 +1742,7 @@
 	unsigned long best_parent_rate = 0;
 	bool skip_set_rate = false;
 	struct clk_core *old_parent;
+	int rc = 0;
 
 	old_rate = core->rate;
 
@@ -1443,6 +1751,15 @@
 	else if (core->parent)
 		best_parent_rate = core->parent->rate;
 
+	trace_clk_set_rate(core, core->new_rate);
+
+	/* Enforce vdd requirements for new frequency. */
+	if (core->prepare_count) {
+		rc = clk_vote_rate_vdd(core, core->new_rate);
+		if (rc)
+			goto out;
+	}
+
 	if (core->new_parent && core->new_parent != core->parent) {
 		old_parent = __clk_set_parent_before(core, core->new_parent);
 		trace_clk_set_parent(core, core->new_parent);
@@ -1460,13 +1777,19 @@
 		__clk_set_parent_after(core, core->new_parent, old_parent);
 	}
 
-	trace_clk_set_rate(core, core->new_rate);
-
-	if (!skip_set_rate && core->ops->set_rate)
-		core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
+	if (!skip_set_rate && core->ops->set_rate) {
+		rc = core->ops->set_rate(core->hw, core->new_rate,
+						best_parent_rate);
+		if (rc)
+			goto err_set_rate;
+	}
 
 	trace_clk_set_rate_complete(core, core->new_rate);
 
+	/* Release vdd requirements for old frequency. */
+	if (core->prepare_count)
+		clk_unvote_rate_vdd(core, old_rate);
+
 	core->rate = clk_recalc(core, best_parent_rate);
 
 	if (core->notifier_count && old_rate != core->rate)
@@ -1489,6 +1812,16 @@
 	/* handle the new child who might not be in core->children yet */
 	if (core->new_child)
 		clk_change_rate(core->new_child);
+
+	return rc;
+
+err_set_rate:
+	if (core->prepare_count)
+		clk_unvote_rate_vdd(core, core->new_rate);
+out:
+	trace_clk_set_rate_complete(core, core->new_rate);
+
+	return rc;
 }
 
 static int clk_core_set_rate_nolock(struct clk_core *core,
@@ -1523,7 +1856,13 @@
 	}
 
 	/* change the rates */
-	clk_change_rate(top);
+	ret = clk_change_rate(top);
+	if (ret) {
+		pr_err("%s: failed to set %s rate\n", __func__,
+				top->name);
+		clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
+		return ret;
+	}
 
 	core->req_rate = req_rate;
 
@@ -1953,6 +2292,18 @@
 }
 EXPORT_SYMBOL_GPL(clk_is_match);
 
+int clk_set_flags(struct clk *clk, unsigned long flags)
+{
+	if (!clk)
+		return 0;
+
+	if (!clk->core->ops->set_flags)
+		return -EINVAL;
+
+	return clk->core->ops->set_flags(clk->core->hw, flags);
+}
+EXPORT_SYMBOL_GPL(clk_set_flags);
+
 /***        debugfs support        ***/
 
 #ifdef CONFIG_DEBUG_FS
@@ -1960,6 +2311,7 @@
 
 static struct dentry *rootdir;
 static int inited = 0;
+static u32 debug_suspend;
 static DEFINE_MUTEX(clk_debug_lock);
 static HLIST_HEAD(clk_debug_list);
 
@@ -1974,6 +2326,56 @@
 	NULL,
 };
 
+static void clk_state_subtree(struct clk_core *c)
+{
+	int vdd_level = 0;
+	struct clk_core *child;
+
+	if (!c)
+		return;
+
+	if (c->vdd_class) {
+		vdd_level = clk_find_vdd_level(c, c->rate);
+		if (vdd_level < 0)
+			vdd_level = 0;
+	}
+
+	trace_clk_state(c->name, c->prepare_count, c->enable_count,
+						c->rate, vdd_level);
+
+	hlist_for_each_entry(child, &c->children, child_node)
+		clk_state_subtree(child);
+}
+
+static int clk_state_show(struct seq_file *s, void *data)
+{
+	struct clk_core *c;
+	struct hlist_head **lists = (struct hlist_head **)s->private;
+
+	clk_prepare_lock();
+
+	for (; *lists; lists++)
+		hlist_for_each_entry(c, *lists, child_node)
+			clk_state_subtree(c);
+
+	clk_prepare_unlock();
+
+	return 0;
+}
+
+
+static int clk_state_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, clk_state_show, inode->i_private);
+}
+
+static const struct file_operations clk_state_fops = {
+	.open		= clk_state_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
 static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
 				 int level)
 {
@@ -2102,6 +2504,349 @@
 	.release	= single_release,
 };
 
+static int clock_debug_rate_set(void *data, u64 val)
+{
+	struct clk_core *core = data;
+	int ret;
+
+	ret = clk_set_rate(core->hw->clk, val);
+	if (ret)
+		pr_err("clk_set_rate(%lu) failed (%d)\n",
+				(unsigned long)val, ret);
+
+	return ret;
+}
+
+static int clock_debug_rate_get(void *data, u64 *val)
+{
+	struct clk_core *core = data;
+
+	*val = core->hw->core->rate;
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clock_rate_fops, clock_debug_rate_get,
+			clock_debug_rate_set, "%llu\n");
+
+static ssize_t clock_parent_read(struct file *filp, char __user *ubuf,
+		size_t cnt, loff_t *ppos)
+{
+	char name[256] = {0};
+	struct clk_core *core = filp->private_data;
+	struct clk_core *p = core->hw->core->parent;
+
+	snprintf(name, sizeof(name), "%s\n", p ? p->name : "None\n");
+
+	return simple_read_from_buffer(ubuf, cnt, ppos, name, strlen(name));
+}
+
+static const struct file_operations clock_parent_fops = {
+	.open	= simple_open,
+	.read	= clock_parent_read,
+};
+
+static int clock_debug_enable_set(void *data, u64 val)
+{
+	struct clk_core *core = data;
+	int rc = 0;
+
+	if (val)
+		rc = clk_prepare_enable(core->hw->clk);
+	else
+		clk_disable_unprepare(core->hw->clk);
+
+	return rc;
+}
+
+static int clock_debug_enable_get(void *data, u64 *val)
+{
+	struct clk_core *core = data;
+	int enabled = 0;
+
+	enabled = core->enable_count;
+
+	*val = enabled;
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clock_enable_fops, clock_debug_enable_get,
+			clock_debug_enable_set, "%lld\n");
+
+#define clock_debug_output(m, c, fmt, ...)		\
+do {							\
+	if (m)						\
+		seq_printf(m, fmt, ##__VA_ARGS__);	\
+	else if (c)					\
+		pr_cont(fmt, ##__VA_ARGS__);		\
+	else						\
+		pr_info(fmt, ##__VA_ARGS__);		\
+} while (0)
+
+/*
+ * clock_debug_print_enabled_debug_suspend() - Print names of enabled clocks
+ * during suspend.
+ */
+static void clock_debug_print_enabled_debug_suspend(struct seq_file *s)
+{
+	struct clk_core *core;
+	int cnt = 0;
+
+	if (!mutex_trylock(&clk_debug_lock))
+		return;
+
+	clock_debug_output(s, 0, "Enabled clocks:\n");
+
+	hlist_for_each_entry(core, &clk_debug_list, debug_node) {
+		if (!core || !core->prepare_count)
+			continue;
+
+		if (core->vdd_class)
+			clock_debug_output(s, 0, " %s:%u:%u [%ld, %d]",
+					core->name, core->prepare_count,
+					core->enable_count, core->rate,
+					clk_find_vdd_level(core, core->rate));
+
+		else
+			clock_debug_output(s, 0, " %s:%u:%u [%ld]",
+					core->name, core->prepare_count,
+					core->enable_count, core->rate);
+		cnt++;
+	}
+
+	mutex_unlock(&clk_debug_lock);
+
+	if (cnt)
+		clock_debug_output(s, 0, "Enabled clock count: %d\n", cnt);
+	else
+		clock_debug_output(s, 0, "No clocks enabled.\n");
+}
+
+static int clock_debug_print_clock(struct clk_core *c, struct seq_file *s)
+{
+	char *start = "";
+	struct clk *clk;
+
+	if (!c || !c->prepare_count)
+		return 0;
+
+	clk = c->hw->clk;
+
+	clock_debug_output(s, 0, "\t");
+
+	do {
+		if (clk->core->vdd_class)
+			clock_debug_output(s, 1, "%s%s:%u:%u [%ld, %d]", start,
+					clk->core->name,
+					clk->core->prepare_count,
+					clk->core->enable_count,
+					clk->core->rate,
+				clk_find_vdd_level(clk->core, clk->core->rate));
+		else
+			clock_debug_output(s, 1, "%s%s:%u:%u [%ld]", start,
+					clk->core->name,
+					clk->core->prepare_count,
+					clk->core->enable_count,
+					clk->core->rate);
+		start = " -> ";
+	} while ((clk = clk_get_parent(clk)));
+
+	clock_debug_output(s, 1, "\n");
+
+	return 1;
+}
+
+/*
+ * clock_debug_print_enabled_clocks() - Print names of enabled clocks
+ */
+static void clock_debug_print_enabled_clocks(struct seq_file *s)
+{
+	struct clk_core *core;
+	int cnt = 0;
+
+	clock_debug_output(s, 0, "Enabled clocks:\n");
+
+	mutex_lock(&clk_debug_lock);
+
+	hlist_for_each_entry(core, &clk_debug_list, debug_node)
+		cnt += clock_debug_print_clock(core, s);
+
+	mutex_unlock(&clk_debug_lock);
+
+	if (cnt)
+		clock_debug_output(s, 0, "Enabled clock count: %d\n", cnt);
+	else
+		clock_debug_output(s, 0, "No clocks enabled.\n");
+}
+
+static int enabled_clocks_show(struct seq_file *s, void *unused)
+{
+	clock_debug_print_enabled_clocks(s);
+
+	return 0;
+}
+
+static int enabled_clocks_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, enabled_clocks_show, inode->i_private);
+}
+
+static const struct file_operations clk_enabled_list_fops = {
+	.open		= enabled_clocks_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+void clk_debug_print_hw(struct clk_core *clk, struct seq_file *f)
+{
+	if (IS_ERR_OR_NULL(clk))
+		return;
+
+	clk_debug_print_hw(clk->parent, f);
+
+	clock_debug_output(f, false, "%s\n", clk->name);
+
+	if (!clk->ops->list_registers)
+		return;
+
+	clk->ops->list_registers(f, clk->hw);
+}
+EXPORT_SYMBOL(clk_debug_print_hw);
+
+static int print_hw_show(struct seq_file *m, void *unused)
+{
+	struct clk_core *c = m->private;
+
+	clk_debug_print_hw(c, m);
+
+	return 0;
+}
+
+static int print_hw_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, print_hw_show, inode->i_private);
+}
+
+static const struct file_operations clock_print_hw_fops = {
+	.open		= print_hw_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static int list_rates_show(struct seq_file *s, void *unused)
+{
+	struct clk_core *core = s->private;
+	int level = 0, i = 0;
+	unsigned long rate, rate_max = 0;
+
+	/* Find max frequency supported within voltage constraints. */
+	if (!core->vdd_class) {
+		rate_max = ULONG_MAX;
+	} else {
+		for (level = 0; level < core->num_rate_max; level++)
+			if (core->rate_max[level])
+				rate_max = core->rate_max[level];
+	}
+
+	/*
+	 * List supported frequencies <= rate_max. Higher frequencies may
+	 * appear in the frequency table, but are not valid and should not
+	 * be listed.
+	 */
+	while (!IS_ERR_VALUE(rate =
+			core->ops->list_rate(core->hw, i++, rate_max))) {
+		if (rate <= 0)
+			break;
+		if (rate <= rate_max)
+			seq_printf(s, "%lu\n", rate);
+	}
+
+	return 0;
+}
+
+static int list_rates_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, list_rates_show, inode->i_private);
+}
+
+static const struct file_operations list_rates_fops = {
+	.open		= list_rates_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static void clock_print_rate_max_by_level(struct seq_file *s, int level)
+{
+	struct clk_core *core = s->private;
+	struct clk_vdd_class *vdd_class = core->vdd_class;
+	int off, i, vdd_level, nregs = vdd_class->num_regulators;
+
+	vdd_level = clk_find_vdd_level(core, core->rate);
+
+	seq_printf(s, "%2s%10lu", vdd_level == level ? "[" : "",
+		core->rate_max[level]);
+
+	for (i = 0; i < nregs; i++) {
+		off = nregs*level + i;
+		if (vdd_class->vdd_uv)
+			seq_printf(s, "%10u", vdd_class->vdd_uv[off]);
+	}
+
+	if (vdd_level == level)
+		seq_puts(s, "]");
+
+	seq_puts(s, "\n");
+}
+
+static int rate_max_show(struct seq_file *s, void *unused)
+{
+	struct clk_core *core = s->private;
+	struct clk_vdd_class *vdd_class = core->vdd_class;
+	int level = 0, i, nregs = vdd_class->num_regulators;
+	char reg_name[10];
+
+	int vdd_level = clk_find_vdd_level(core, core->rate);
+
+	if (vdd_level < 0) {
+		seq_printf(s, "could not find_vdd_level for %s, %ld\n",
+			core->name, core->rate);
+		return 0;
+	}
+
+	seq_printf(s, "%12s", "");
+	for (i = 0; i < nregs; i++) {
+		snprintf(reg_name, ARRAY_SIZE(reg_name), "reg %d", i);
+		seq_printf(s, "%10s", reg_name);
+	}
+
+	seq_printf(s, "\n%12s", "freq");
+	for (i = 0; i < nregs; i++)
+		seq_printf(s, "%10s", "uV");
+
+	seq_puts(s, "\n");
+
+	for (level = 0; level < core->num_rate_max; level++)
+		clock_print_rate_max_by_level(s, level);
+
+	return 0;
+}
+
+static int rate_max_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rate_max_show, inode->i_private);
+}
+
+static const struct file_operations rate_max_fops = {
+	.open		= rate_max_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
 static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
 {
 	struct dentry *d;
@@ -2118,11 +2863,21 @@
 
 	core->dentry = d;
 
-	d = debugfs_create_u32("clk_rate", S_IRUGO, core->dentry,
-			(u32 *)&core->rate);
+	d = debugfs_create_file("clk_rate", S_IRUGO, core->dentry, core,
+			&clock_rate_fops);
 	if (!d)
 		goto err_out;
 
+	if (core->ops->list_rate) {
+		if (!debugfs_create_file("clk_list_rates",
+				S_IRUGO, core->dentry, core, &list_rates_fops))
+			goto err_out;
+	}
+
+	if (core->vdd_class && !debugfs_create_file("clk_rate_max",
+				S_IRUGO, core->dentry, core, &rate_max_fops))
+		goto err_out;
+
 	d = debugfs_create_u32("clk_accuracy", S_IRUGO, core->dentry,
 			(u32 *)&core->accuracy);
 	if (!d)
@@ -2143,8 +2898,8 @@
 	if (!d)
 		goto err_out;
 
-	d = debugfs_create_u32("clk_enable_count", S_IRUGO, core->dentry,
-			(u32 *)&core->enable_count);
+	d = debugfs_create_file("clk_enable_count", S_IRUGO, core->dentry,
+			core, &clock_enable_fops);
 	if (!d)
 		goto err_out;
 
@@ -2153,6 +2908,16 @@
 	if (!d)
 		goto err_out;
 
+	d = debugfs_create_file("clk_parent", S_IRUGO, core->dentry, core,
+			&clock_parent_fops);
+	if (!d)
+		goto err_out;
+
+	d = debugfs_create_file("clk_print_regs", S_IRUGO, core->dentry,
+			core, &clock_print_hw_fops);
+	if (!d)
+		goto err_out;
+
 	if (core->ops->debug_init) {
 		ret = core->ops->debug_init(core->hw, core->dentry);
 		if (ret)
@@ -2224,6 +2989,19 @@
 }
 EXPORT_SYMBOL_GPL(clk_debugfs_add_file);
 
+/*
+ * Print the names of all enabled clocks and their parents if
+ * debug_suspend is set from debugfs.
+ */
+void clock_debug_print_enabled(void)
+{
+	if (likely(!debug_suspend))
+		return;
+
+	clock_debug_print_enabled_debug_suspend(NULL);
+}
+EXPORT_SYMBOL_GPL(clock_debug_print_enabled);
+
 /**
  * clk_debug_init - lazily populate the debugfs clk directory
  *
@@ -2263,6 +3041,22 @@
 	if (!d)
 		return -ENOMEM;
 
+	d = debugfs_create_file("clk_enabled_list", S_IRUGO, rootdir,
+				&clk_debug_list, &clk_enabled_list_fops);
+	if (!d)
+		return -ENOMEM;
+
+
+	d = debugfs_create_u32("debug_suspend", S_IRUGO | S_IWUSR,
+						rootdir, &debug_suspend);
+	if (!d)
+		return -ENOMEM;
+
+	d = debugfs_create_file("trace_clocks", S_IRUGO, rootdir, &all_lists,
+				&clk_state_fops);
+	if (!d)
+		return -ENOMEM;
+
 	mutex_lock(&clk_debug_lock);
 	hlist_for_each_entry(core, &clk_debug_list, debug_node)
 		clk_debug_create_one(core, rootdir);
@@ -2466,6 +3260,47 @@
 	if (core->ops->init)
 		core->ops->init(core->hw);
 
+	if (core->flags & CLK_IS_CRITICAL) {
+		unsigned long flags;
+
+		clk_core_prepare(core);
+
+		flags = clk_enable_lock();
+		clk_core_enable(core);
+		clk_enable_unlock(flags);
+	}
+
+	/*
+	 * enable clocks with the CLK_ENABLE_HAND_OFF flag set
+	 *
+	 * This flag causes the framework to enable the clock at registration
+	 * time, which is sometimes necessary for clocks that would cause a
+	 * system crash when gated (e.g. cpu, memory, etc). The prepare_count
+	 * is migrated over to the first clk consumer to call clk_prepare().
+	 * Similarly the clk's enable_count is migrated to the first consumer
+	 * to call clk_enable().
+	 */
+	if (core->flags & CLK_ENABLE_HAND_OFF) {
+		unsigned long flags;
+
+		/*
+		 * Few clocks might have hardware gating which would be required
+		 * to be ON before prepare/enabling the clocks. So check if the
+		 * clock has been turned ON earlier and we should
+		 * prepare/enable those clocks.
+		 */
+		if (clk_core_is_enabled(core)) {
+			core->need_handoff_prepare = true;
+			core->need_handoff_enable = true;
+			ret = clk_core_prepare(core);
+			if (ret)
+				goto out;
+			flags = clk_enable_lock();
+			clk_core_enable(core);
+			clk_enable_unlock(flags);
+		}
+	}
+
 	kref_init(&core->ref);
 out:
 	clk_prepare_unlock();
@@ -2545,8 +3380,19 @@
 	core->num_parents = hw->init->num_parents;
 	core->min_rate = 0;
 	core->max_rate = ULONG_MAX;
+	core->vdd_class = hw->init->vdd_class;
+	core->rate_max = hw->init->rate_max;
+	core->num_rate_max = hw->init->num_rate_max;
 	hw->core = core;
 
+	if (core->vdd_class) {
+		ret = clk_vdd_class_init(core->vdd_class);
+		if (ret) {
+			pr_err("Failed to initialize vdd class\n");
+			goto fail_parent_names;
+		}
+	}
+
 	/* allocate local copy in case parent_names is __initdata */
 	core->parent_names = kcalloc(core->num_parents, sizeof(char *),
 					GFP_KERNEL);
@@ -2901,6 +3747,8 @@
 }
 EXPORT_SYMBOL_GPL(clk_notifier_unregister);
 
+#endif /* CONFIG_COMMON_CLK */
+
 #ifdef CONFIG_OF
 /**
  * struct of_clk_provider - Clock provider registration structure
@@ -2931,6 +3779,8 @@
 }
 EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
 
+#if defined(CONFIG_COMMON_CLK)
+
 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
 {
 	struct clk_onecell_data *clk_data = data;
@@ -2945,6 +3795,11 @@
 }
 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
 
+#endif /* CONFIG_COMMON_CLK */
+
+/* forward declaration */
+void of_clk_del_provider(struct device_node *np);
+
 /**
  * of_clk_add_provider() - Register a clock provider for a node
  * @np: Device node pointer associated with clock provider
@@ -3100,8 +3955,10 @@
 			else
 				clk_name = NULL;
 		} else {
+#if defined(CONFIG_COMMON_CLK)
 			clk_name = __clk_get_name(clk);
 			clk_put(clk);
+#endif
 		}
 	}
 
@@ -3132,6 +3989,8 @@
 }
 EXPORT_SYMBOL_GPL(of_clk_parent_fill);
 
+#if defined(CONFIG_COMMON_CLK)
+
 struct clock_provider {
 	of_clk_init_cb_t clk_init_cb;
 	struct device_node *np;
@@ -3174,6 +4033,41 @@
 }
 
 /**
+ * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree
+ * @np: Device node pointer associated with clock provider
+ * @index: clock index
+ * @flags: pointer to clk_core->flags
+ *
+ * Detects if the clock-critical property exists and, if so, sets the
+ * corresponding CLK_IS_CRITICAL flag.
+ *
+ * Do not use this function. It exists only for legacy Device Tree
+ * bindings, such as the one-clock-per-node style that are outdated.
+ * Those bindings typically put all clock data into .dts and the Linux
+ * driver has no clock data, thus making it impossible to set this flag
+ * correctly from the driver. Only those drivers may call
+ * of_clk_detect_critical from their setup functions.
+ *
+ * Return: error code or zero on success
+ */
+int of_clk_detect_critical(struct device_node *np,
+					  int index, unsigned long *flags)
+{
+	struct property *prop;
+	const __be32 *cur;
+	uint32_t idx;
+
+	if (!np || !flags)
+		return -EINVAL;
+
+	of_property_for_each_u32(np, "clock-critical", prop, cur, idx)
+		if (index == idx)
+			*flags |= CLK_IS_CRITICAL;
+
+	return 0;
+}
+
+/**
  * of_clk_init() - Scan and init clock providers from the DT
  * @matches: array of compatible values and init functions for providers.
  *
@@ -3240,4 +4134,7 @@
 			force = true;
 	}
 }
+
+#endif /* CONFIG_COMMON_CLK */
+
 #endif
diff -ruw linux-4.4.115/drivers/clk/clkdev.c linux-4.4.115-fbx/drivers/clk/clkdev.c
--- linux-4.4.115/drivers/clk/clkdev.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/clkdev.c	2019-01-22 16:16:23.007241915 +0100
@@ -27,7 +27,7 @@
 static LIST_HEAD(clocks);
 static DEFINE_MUTEX(clocks_mutex);
 
-#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
+#if defined(CONFIG_OF)
 static struct clk *__of_clk_get(struct device_node *np, int index,
 			       const char *dev_id, const char *con_id)
 {
@@ -73,14 +73,10 @@
 		if (name)
 			index = of_property_match_string(np, "clock-names", name);
 		clk = __of_clk_get(np, index, dev_id, name);
-		if (!IS_ERR(clk)) {
+		if (!IS_ERR(clk))
 			break;
-		} else if (name && index >= 0) {
-			if (PTR_ERR(clk) != -EPROBE_DEFER)
-				pr_err("ERROR: could not get clock %s:%s(%i)\n",
-					np->full_name, name ? name : "", index);
+		else if (name && index >= 0)
 			return clk;
-		}
 
 		/*
 		 * No matching clock found on this node.  If the parent node
@@ -190,7 +186,7 @@
 out:
 	mutex_unlock(&clocks_mutex);
 
-	return cl ? clk : ERR_PTR(-ENOENT);
+	return cl ? cl->clk : ERR_PTR(-ENOENT);
 }
 EXPORT_SYMBOL(clk_get_sys);
 
diff -ruw linux-4.4.115/drivers/clk/clk.h linux-4.4.115-fbx/drivers/clk/clk.h
--- linux-4.4.115/drivers/clk/clk.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/clk.h	2019-01-22 16:16:23.007241915 +0100
@@ -11,7 +11,7 @@
 
 struct clk_hw;
 
-#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
+#if defined(CONFIG_OF)
 struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
 				       const char *dev_id, const char *con_id);
 #endif
@@ -20,6 +20,11 @@
 struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
 			     const char *con_id);
 void __clk_free_clk(struct clk *clk);
+
+/* Debugfs API to print the enabled clocks */
+void clock_debug_print_enabled(void);
+void clk_debug_print_hw(struct clk_core *clk, struct seq_file *f);
+
 #else
 /* All these casts to avoid ifdefs in clkdev... */
 static inline struct clk *
diff -ruw linux-4.4.115/drivers/clk/Kconfig linux-4.4.115-fbx/drivers/clk/Kconfig
--- linux-4.4.115/drivers/clk/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/Kconfig	2019-01-22 16:16:22.995241806 +0100
@@ -198,3 +198,4 @@
 
 source "drivers/clk/samsung/Kconfig"
 source "drivers/clk/tegra/Kconfig"
+source "drivers/clk/msm/Kconfig"
diff -ruw linux-4.4.115/drivers/clk/Makefile linux-4.4.115-fbx/drivers/clk/Makefile
--- linux-4.4.115/drivers/clk/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/Makefile	2019-01-22 16:16:22.995241806 +0100
@@ -1,7 +1,7 @@
 # common clock types
 obj-$(CONFIG_HAVE_CLK)		+= clk-devres.o
 obj-$(CONFIG_CLKDEV_LOOKUP)	+= clkdev.o
-obj-$(CONFIG_COMMON_CLK)	+= clk.o
+obj-$(CONFIG_OF)	        += clk.o
 obj-$(CONFIG_COMMON_CLK)	+= clk-divider.o
 obj-$(CONFIG_COMMON_CLK)	+= clk-fixed-factor.o
 obj-$(CONFIG_COMMON_CLK)	+= clk-fixed-rate.o
@@ -84,3 +84,4 @@
 obj-$(CONFIG_ARCH_ZX)			+= zte/
 obj-$(CONFIG_ARCH_ZYNQ)			+= zynq/
 obj-$(CONFIG_H8300)		+= h8300/
+obj-$(CONFIG_ARCH_QCOM)		+= msm/
diff -ruw linux-4.4.115/drivers/clk/qcom/Kconfig linux-4.4.115-fbx/drivers/clk/qcom/Kconfig
--- linux-4.4.115/drivers/clk/qcom/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/qcom/Kconfig	2019-01-22 16:16:23.027242096 +0100
@@ -2,6 +2,9 @@
 	bool
 	select PM_GENERIC_DOMAINS if PM
 
+config QCOM_RPMCC
+	bool
+
 config COMMON_CLK_QCOM
 	tristate "Support for Qualcomm's clock controllers"
 	depends on OF
@@ -9,6 +12,32 @@
 	select REGMAP_MMIO
 	select RESET_CONTROLLER
 
+config QCOM_CLK_RPM
+	tristate "RPM based Clock Controller"
+	depends on COMMON_CLK_QCOM && MFD_QCOM_RPM
+	select QCOM_RPMCC
+	help
+	  The RPM (Resource Power Manager) is a dedicated hardware engine for
+	  managing the shared SoC resources in order to keep the lowest power
+	  profile. It communicates with other hardware subsystems via shared
+	  memory and accepts clock requests, aggregates the requests and turns
+	  the clocks on/off or scales them on demand.
+	  Say Y if you want to support the clocks exposed by the RPM on
+	  platforms such as apq8064, msm8660, msm8960 etc.
+
+config QCOM_CLK_SMD_RPM
+	tristate "RPM over SMD based Clock Controller"
+	depends on COMMON_CLK_QCOM
+	select QCOM_RPMCC
+	help
+	  The RPM (Resource Power Manager) is a dedicated hardware engine for
+	  managing the shared SoC resources in order to keep the lowest power
+	  profile. It communicates with other hardware subsystems via shared
+	  memory and accepts clock requests, aggregates the requests and turns
+	  the clocks on/off or scales them on demand.
+	  Say Y if you want to support the clocks exposed by the RPM on
+	  platforms such as apq8016, apq8084, msm8974 etc.
+
 config APQ_GCC_8084
 	tristate "APQ8084 Global Clock Controller"
 	select QCOM_GDSC
@@ -106,3 +135,109 @@
 	  Support for the multimedia clock controller on msm8974 devices.
 	  Say Y if you want to support multimedia devices such as display,
 	  graphics, video encode/decode, camera, etc.
+
+config MSM_GCC_8996
+	tristate "MSM8996 Global Clock Controller"
+	depends on COMMON_CLK_QCOM
+	help
+	  Support for the global clock controller on msm8996 devices.
+	  Say Y if you want to use peripheral devices such as UART, SPI,
+	  i2c, USB, UFS, SD/eMMC, PCIe, etc.
+
+config MSM_MMCC_8996
+	tristate "MSM8996 Multimedia Clock Controller"
+	select MSM_GCC_8996
+	depends on COMMON_CLK_QCOM
+	help
+	  Support for the multimedia clock controller on msm8996 devices.
+	  Say Y if you want to support multimedia devices such as display,
+	  graphics, video encode/decode, camera, etc.
+
+config MSM_GCC_660
+	tristate "SDM660 Global Clock Controller"
+	select QCOM_GDSC
+	depends on COMMON_CLK_QCOM
+	---help---
+	  Support for the global clock controller on Qualcomm Technologies, Inc
+	  SDM660 devices.
+	  Say Y if you want to use peripheral devices such as UART, SPI, I2C,
+	  USB, UFS, SD/eMMC, PCIe, etc.
+
+config MSM_GPUCC_660
+	tristate "SDM660 Graphics Clock Controller"
+	select MSM_GCC_660
+	depends on COMMON_CLK_QCOM
+	help
+	  Support for the graphics clock controller on Qualcomm Technologies, Inc
+	  SDM660 devices.
+	  Say Y if you want to support graphics controller devices which will
+	  be required to enable those device.
+
+config MSM_MMCC_660
+	tristate "SDM660 Multimedia Clock Controller"
+	select MSM_GCC_660
+	depends on COMMON_CLK_QCOM
+	help
+	  Support for the multimedia clock controller on Qualcomm Technologies, Inc
+	  SDM660 devices.
+	  Say Y if you want to support multimedia devices such as display,
+	  video encode/decode, camera, etc.
+
+config QCOM_HFPLL
+	tristate "High-Frequency PLL (HFPLL) Clock Controller"
+	depends on COMMON_CLK_QCOM
+	help
+	  Support for the high-frequency PLLs present on MSM devices.
+	  Say Y if you want to support CPU frequency scaling on devices
+	  such as MSM8974, APQ8084, etc.
+
+config KPSS_XCC
+	tristate "KPSS Clock Controller"
+	depends on COMMON_CLK_QCOM
+	help
+	  Support for the Krait ACC and GCC clock controllers. Say Y
+	  if you want to support CPU frequency scaling on devices such
+	  as MSM8960, APQ8064, etc.
+
+config KRAITCC
+	tristate "Krait Clock Controller"
+	depends on COMMON_CLK_QCOM && ARM
+	select KRAIT_CLOCKS
+	help
+	  Support for the Krait CPU clocks on MSM devices.
+	  Say Y if you want to support CPU frequency scaling.
+
+config KRAIT_CLOCKS
+	bool
+	select KRAIT_L2_ACCESSORS
+
+config QCOM_A53
+	tristate "A53 Clock Controller"
+	depends on COMMON_CLK_QCOM
+	help
+	  Support for the A53 clock controller on MSM devices.
+	  Say Y if you want to support CPU frequency scaling on devices
+	  such as MSM8916.
+
+config CLOCK_CPU_OSM
+	tristate "OSM CPU Clock Controller"
+	depends on COMMON_CLK_QCOM
+	help
+	 Support for the osm clock controller.
+	 Operating State Manager (OSM) is a hardware engine used by some
+	 Qualcomm Technologies, Inc. (QTI) SoCs to manage frequency and
+	 voltage scaling in hardware. OSM is capable of controlling
+	 frequency and voltage requests for multiple clusters via the
+	 existence of multiple OSM domains.
+	 Say Y if you want to support osm clocks.
+
+config CLOCK_QPNP_DIV
+	tristate "QPNP PMIC clkdiv driver"
+	depends on COMMON_CLK_QCOM && SPMI
+	help
+	  This driver supports the clkdiv functionality on the Qualcomm
+	  Technologies, Inc. QPNP PMIC. It configures the frequency of
+	  clkdiv outputs on the PMIC. These clocks are typically wired
+	  through alternate functions on gpio pins.
+
+source "drivers/clk/qcom/mdss/Kconfig"
diff -ruw linux-4.4.115/drivers/clk/qcom/Makefile linux-4.4.115-fbx/drivers/clk/qcom/Makefile
--- linux-4.4.115/drivers/clk/qcom/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/qcom/Makefile	2019-01-22 16:16:23.027242096 +0100
@@ -2,14 +2,19 @@
 
 clk-qcom-y += common.o
 clk-qcom-y += clk-regmap.o
+clk-qcom-y += clk-alpha-pll.o
 clk-qcom-y += clk-pll.o
 clk-qcom-y += clk-rcg.o
 clk-qcom-y += clk-rcg2.o
 clk-qcom-y += clk-branch.o
 clk-qcom-y += clk-regmap-divider.o
 clk-qcom-y += clk-regmap-mux.o
-clk-qcom-y += reset.o
-clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
+clk-qcom-y += clk-regmap-mux-div.o
+clk-qcom-$(CONFIG_KRAIT_CLOCKS) += clk-krait.o
+clk-qcom-y += clk-hfpll.o
+clk-qcom-y += reset.o clk-voter.o
+clk-qcom-y += clk-dummy.o clk-debug.o
+clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o gdsc-regulator.o
 
 obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
 obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
@@ -20,5 +25,20 @@
 obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
 obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
 obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
+obj-$(CONFIG_MSM_GCC_8996) += gcc-msm8996.o
+obj-$(CONFIG_MSM_GCC_660) += gcc-sdm660.o
 obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
 obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
+obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
+obj-$(CONFIG_MSM_GPUCC_660) += gpucc-sdm660.o
+obj-$(CONFIG_MSM_MMCC_660) += mmcc-sdm660.o
+obj-$(CONFIG_KPSS_XCC) += kpss-xcc.o
+obj-$(CONFIG_QCOM_HFPLL) += hfpll.o
+obj-$(CONFIG_KRAITCC) += krait-cc.o
+obj-$(CONFIG_QCOM_A53) += clk-a53.o
+obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
+obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
+obj-$(CONFIG_CLOCK_CPU_OSM) += clk-cpu-osm.o
+obj-$(CONFIG_CLOCK_QPNP_DIV) += clk-qpnp-div.o
+
+obj-y += mdss/
diff -ruw linux-4.4.115/drivers/clocksource/arm_arch_timer.c linux-4.4.115-fbx/drivers/clocksource/arm_arch_timer.c
--- linux-4.4.115/drivers/clocksource/arm_arch_timer.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clocksource/arm_arch_timer.c	2019-10-29 09:26:23.489201671 +0100
@@ -25,6 +25,7 @@
 #include <linux/acpi.h>
 
 #include <asm/arch_timer.h>
+#include <asm/traps.h>
 #include <asm/virt.h>
 
 #include <clocksource/arm_arch_timer.h>
@@ -83,20 +84,20 @@
 		struct arch_timer *timer = to_arch_timer(clk);
 		switch (reg) {
 		case ARCH_TIMER_REG_CTRL:
-			writel_relaxed(val, timer->base + CNTP_CTL);
+			writel_relaxed_no_log(val, timer->base + CNTP_CTL);
 			break;
 		case ARCH_TIMER_REG_TVAL:
-			writel_relaxed(val, timer->base + CNTP_TVAL);
+			writel_relaxed_no_log(val, timer->base + CNTP_TVAL);
 			break;
 		}
 	} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
 		struct arch_timer *timer = to_arch_timer(clk);
 		switch (reg) {
 		case ARCH_TIMER_REG_CTRL:
-			writel_relaxed(val, timer->base + CNTV_CTL);
+			writel_relaxed_no_log(val, timer->base + CNTV_CTL);
 			break;
 		case ARCH_TIMER_REG_TVAL:
-			writel_relaxed(val, timer->base + CNTV_TVAL);
+			writel_relaxed_no_log(val, timer->base + CNTV_TVAL);
 			break;
 		}
 	} else {
@@ -114,20 +115,20 @@
 		struct arch_timer *timer = to_arch_timer(clk);
 		switch (reg) {
 		case ARCH_TIMER_REG_CTRL:
-			val = readl_relaxed(timer->base + CNTP_CTL);
+			val = readl_relaxed_no_log(timer->base + CNTP_CTL);
 			break;
 		case ARCH_TIMER_REG_TVAL:
-			val = readl_relaxed(timer->base + CNTP_TVAL);
+			val = readl_relaxed_no_log(timer->base + CNTP_TVAL);
 			break;
 		}
 	} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
 		struct arch_timer *timer = to_arch_timer(clk);
 		switch (reg) {
 		case ARCH_TIMER_REG_CTRL:
-			val = readl_relaxed(timer->base + CNTV_CTL);
+			val = readl_relaxed_no_log(timer->base + CNTV_CTL);
 			break;
 		case ARCH_TIMER_REG_TVAL:
-			val = readl_relaxed(timer->base + CNTV_TVAL);
+			val = readl_relaxed_no_log(timer->base + CNTV_TVAL);
 			break;
 		}
 	} else {
@@ -328,12 +329,16 @@
 	/* Disable user access to the timers and the physical counter */
 	/* Also disable virtual event stream */
 	cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
-			| ARCH_TIMER_USR_VT_ACCESS_EN
 			| ARCH_TIMER_VIRT_EVT_EN
 			| ARCH_TIMER_USR_PCT_ACCESS_EN);
 
 	/* Enable user access to the virtual counter */
+	cntkctl |= ARCH_TIMER_USR_VT_ACCESS_EN;
+
+	if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_VCT_ACCESS))
 	cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
+	else
+		cntkctl &= ~ARCH_TIMER_USR_VCT_ACCESS_EN;
 
 	arch_timer_set_cntkctl(cntkctl);
 }
@@ -371,7 +376,8 @@
 	if (!acpi_disabled ||
 	    of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) {
 		if (cntbase)
-			arch_timer_rate = readl_relaxed(cntbase + CNTFRQ);
+			arch_timer_rate = readl_relaxed_no_log(cntbase
+								+ CNTFRQ);
 		else
 			arch_timer_rate = arch_timer_get_cntfrq();
 	}
@@ -408,9 +414,9 @@
 	u32 vct_lo, vct_hi, tmp_hi;
 
 	do {
-		vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
-		vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
-		tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
+		vct_hi = readl_relaxed_no_log(arch_counter_base + CNTVCT_HI);
+		vct_lo = readl_relaxed_no_log(arch_counter_base + CNTVCT_LO);
+		tmp_hi = readl_relaxed_no_log(arch_counter_base + CNTVCT_HI);
 	} while (vct_hi != tmp_hi);
 
 	return ((u64) vct_hi << 32) | vct_lo;
@@ -686,6 +692,7 @@
 	arch_timer_banner(arch_timers_present);
 	arch_counter_register(arch_timers_present);
 	arch_timer_arch_init();
+	clocksource_select_force();
 }
 
 static void __init arch_timer_init(void)
@@ -756,7 +763,7 @@
 		return;
 	}
 
-	cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
+	cnttidr = readl_relaxed_no_log(cntctlbase + CNTTIDR);
 	iounmap(cntctlbase);
 
 	/*
@@ -804,6 +811,7 @@
 	arch_timer_detect_rate(base, np);
 	arch_timer_mem_register(base, irq);
 	arch_timer_common_init();
+	get_pct_hook_init();
 }
 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
 		       arch_timer_mem_init);
diff -ruw linux-4.4.115/drivers/clocksource/Kconfig linux-4.4.115-fbx/drivers/clocksource/Kconfig
--- linux-4.4.115/drivers/clocksource/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clocksource/Kconfig	2019-01-22 16:16:23.067242458 +0100
@@ -158,6 +158,23 @@
 	  This must be disabled for hardware validation purposes to detect any
 	  hardware anomalies of missing events.
 
+config MSM_TIMER_LEAP
+        bool "ARCH TIMER counter rollover"
+        default n
+        depends on ARM_ARCH_TIMER && ARM64
+        help
+          This option enables a check for least significant 32 bits of
+          counter rollover. On every counter read if least significant
+          32 bits are set, reread counter.
+
+config ARM_ARCH_TIMER_VCT_ACCESS
+	bool "Support for ARM architected timer virtual counter access in userspace"
+	default !ARM64
+	depends on ARM_ARCH_TIMER
+	help
+	  This option enables support for reading the ARM architected timer's
+	  virtual counter in userspace.
+
 config ARM_GLOBAL_TIMER
 	bool
 	select CLKSRC_OF if OF
diff -ruw linux-4.4.115/drivers/cpufreq/cpufreq.c linux-4.4.115-fbx/drivers/cpufreq/cpufreq.c
--- linux-4.4.115/drivers/cpufreq/cpufreq.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/cpufreq/cpufreq.c	2019-10-29 09:26:23.493201710 +0100
@@ -29,6 +29,9 @@
 #include <linux/suspend.h>
 #include <linux/syscore_ops.h>
 #include <linux/tick.h>
+#ifdef CONFIG_SMP
+#include <linux/sched.h>
+#endif
 #include <trace/events/power.h>
 
 static LIST_HEAD(cpufreq_policy_list);
@@ -127,6 +130,7 @@
  */
 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
 static struct srcu_notifier_head cpufreq_transition_notifier_list;
+struct atomic_notifier_head cpufreq_govinfo_notifier_list;
 
 static bool init_cpufreq_transition_notifier_list_called;
 static int __init init_cpufreq_transition_notifier_list(void)
@@ -137,6 +141,15 @@
 }
 pure_initcall(init_cpufreq_transition_notifier_list);
 
+static bool init_cpufreq_govinfo_notifier_list_called;
+static int __init init_cpufreq_govinfo_notifier_list(void)
+{
+	ATOMIC_INIT_NOTIFIER_HEAD(&cpufreq_govinfo_notifier_list);
+	init_cpufreq_govinfo_notifier_list_called = true;
+	return 0;
+}
+pure_initcall(init_cpufreq_govinfo_notifier_list);
+
 static int off __read_mostly;
 static int cpufreq_disabled(void)
 {
@@ -154,6 +167,12 @@
 }
 EXPORT_SYMBOL_GPL(have_governor_per_policy);
 
+bool cpufreq_driver_is_slow(void)
+{
+	return !(cpufreq_driver->flags & CPUFREQ_DRIVER_FAST);
+}
+EXPORT_SYMBOL_GPL(cpufreq_driver_is_slow);
+
 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
 {
 	if (have_governor_per_policy())
@@ -347,6 +366,50 @@
 #endif
 }
 
+/*********************************************************************
+ *               FREQUENCY INVARIANT CPU CAPACITY                    *
+ *********************************************************************/
+
+static DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
+static DEFINE_PER_CPU(unsigned long, max_freq_scale) = SCHED_CAPACITY_SCALE;
+
+static void
+scale_freq_capacity(struct cpufreq_policy *policy, struct cpufreq_freqs *freqs)
+{
+	unsigned long cur = freqs ? freqs->new : policy->cur;
+	unsigned long scale = (cur << SCHED_CAPACITY_SHIFT) / policy->max;
+	struct cpufreq_cpuinfo *cpuinfo = &policy->cpuinfo;
+	int cpu;
+
+	pr_debug("cpus %*pbl cur/cur max freq %lu/%u kHz freq scale %lu\n",
+		 cpumask_pr_args(policy->cpus), cur, policy->max, scale);
+
+	for_each_cpu(cpu, policy->cpus)
+		per_cpu(freq_scale, cpu) = scale;
+
+	if (freqs)
+		return;
+
+	scale = (policy->max << SCHED_CAPACITY_SHIFT) / cpuinfo->max_freq;
+
+	pr_debug("cpus %*pbl cur max/max freq %u/%u kHz max freq scale %lu\n",
+		 cpumask_pr_args(policy->cpus), policy->max, cpuinfo->max_freq,
+		 scale);
+
+	for_each_cpu(cpu, policy->cpus)
+		per_cpu(max_freq_scale, cpu) = scale;
+}
+
+unsigned long cpufreq_scale_freq_capacity(struct sched_domain *sd, int cpu)
+{
+	return per_cpu(freq_scale, cpu);
+}
+
+unsigned long cpufreq_scale_max_freq_capacity(int cpu)
+{
+	return per_cpu(max_freq_scale, cpu);
+}
+
 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
 		struct cpufreq_freqs *freqs, unsigned int state)
 {
@@ -423,6 +486,9 @@
 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
 		struct cpufreq_freqs *freqs)
 {
+#ifdef CONFIG_SMP
+	int cpu;
+#endif
 
 	/*
 	 * Catch double invocations of _begin() which lead to self-deadlock.
@@ -450,6 +516,12 @@
 
 	spin_unlock(&policy->transition_lock);
 
+	scale_freq_capacity(policy, freqs);
+#ifdef CONFIG_SMP
+	for_each_cpu(cpu, policy->cpus)
+		trace_cpu_capacity(capacity_curr_of(cpu), cpu);
+#endif
+
 	cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
 }
 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
@@ -469,6 +541,38 @@
 }
 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
 
+/**
+ * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
+ * one.
+ * @target_freq: target frequency to resolve.
+ *
+ * The target to driver frequency mapping is cached in the policy.
+ *
+ * Return: Lowest driver-supported frequency greater than or equal to the
+ * given target_freq, subject to policy (min/max) and driver limitations.
+ */
+unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
+					 unsigned int target_freq)
+{
+	target_freq = clamp_val(target_freq, policy->min, policy->max);
+	policy->cached_target_freq = target_freq;
+
+	if (cpufreq_driver->target_index) {
+		int idx, rv;
+
+		rv = cpufreq_frequency_table_target(policy, policy->freq_table,
+						    target_freq,
+						    CPUFREQ_RELATION_L,
+						    &idx);
+		if (rv)
+			return target_freq;
+		policy->cached_resolved_idx = idx;
+		return policy->freq_table[idx].frequency;
+        }
+
+	return target_freq;
+}
+EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
 
 /*********************************************************************
  *                          SYSFS INTERFACE                          *
@@ -1001,7 +1105,8 @@
 	if (has_target()) {
 		ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
 		if (ret) {
-			pr_err("%s: Failed to stop governor\n", __func__);
+			pr_err("%s: Failed to stop governor for CPU%u, policy CPU%u\n",
+			       __func__, cpu, policy->cpu);
 			return ret;
 		}
 	}
@@ -1016,7 +1121,8 @@
 			ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
 
 		if (ret) {
-			pr_err("%s: Failed to start governor\n", __func__);
+			pr_err("%s: Failed to start governor for CPU%u, policy CPU%u\n",
+			       __func__, cpu, policy->cpu);
 			return ret;
 		}
 	}
@@ -1329,7 +1435,8 @@
 	if (has_target()) {
 		int ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
 		if (ret)
-			pr_err("%s: Failed to stop governor\n", __func__);
+			pr_err("%s: Failed to stop governor for CPU%u\n",
+			       __func__, cpu);
 	}
 
 	down_write(&policy->rwsem);
@@ -1379,7 +1486,8 @@
 	if (has_target()) {
 		int ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
 		if (ret)
-			pr_err("%s: Failed to exit governor\n", __func__);
+			pr_err("%s: Failed to start governor for CPU%u, policy CPU%u\n",
+			       __func__, cpu, policy->cpu);
 	}
 
 	/*
@@ -1708,7 +1816,8 @@
 	if (cpufreq_disabled())
 		return -EINVAL;
 
-	WARN_ON(!init_cpufreq_transition_notifier_list_called);
+	WARN_ON(!init_cpufreq_transition_notifier_list_called ||
+		!init_cpufreq_govinfo_notifier_list_called);
 
 	switch (list) {
 	case CPUFREQ_TRANSITION_NOTIFIER:
@@ -1719,6 +1828,10 @@
 		ret = blocking_notifier_chain_register(
 				&cpufreq_policy_notifier_list, nb);
 		break;
+	case CPUFREQ_GOVINFO_NOTIFIER:
+		ret = atomic_notifier_chain_register(
+				&cpufreq_govinfo_notifier_list, nb);
+		break;
 	default:
 		ret = -EINVAL;
 	}
@@ -1753,6 +1866,10 @@
 		ret = blocking_notifier_chain_unregister(
 				&cpufreq_policy_notifier_list, nb);
 		break;
+	case CPUFREQ_GOVINFO_NOTIFIER:
+		ret = atomic_notifier_chain_unregister(
+				&cpufreq_govinfo_notifier_list, nb);
+		break;
 	default:
 		ret = -EINVAL;
 	}
@@ -1865,15 +1982,6 @@
 	pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
 		 policy->cpu, target_freq, relation, old_target_freq);
 
-	/*
-	 * This might look like a redundant call as we are checking it again
-	 * after finding index. But it is left intentionally for cases where
-	 * exactly same freq is called again and so we can save on few function
-	 * calls.
-	 */
-	if (target_freq == policy->cur)
-		return 0;
-
 	/* Save last value to restore later on errors */
 	policy->restore_freq = policy->cur;
 
@@ -2131,8 +2239,11 @@
 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
 			CPUFREQ_NOTIFY, new_policy);
 
+	scale_freq_capacity(new_policy, NULL);
+
 	policy->min = new_policy->min;
 	policy->max = new_policy->max;
+	trace_cpu_frequency_limits(policy->max, policy->min, policy->cpu);
 
 	pr_debug("new min and max freqs are %u - %u kHz\n",
 		 policy->min, policy->max);
@@ -2259,6 +2370,9 @@
 {
 	unsigned int cpu = (unsigned long)hcpu;
 
+	if (!cpufreq_driver)
+		return NOTIFY_OK;
+
 	switch (action & ~CPU_TASKS_FROZEN) {
 	case CPU_ONLINE:
 		cpufreq_online(cpu);
@@ -2425,6 +2539,9 @@
 
 	pr_debug("trying to register driver %s\n", driver_data->name);
 
+	/* Register for hotplug notifers before blocking hotplug. */
+	register_hotcpu_notifier(&cpufreq_cpu_notifier);
+
 	/* Protect against concurrent CPU online/offline. */
 	get_online_cpus();
 
@@ -2457,8 +2574,7 @@
 		goto err_if_unreg;
 	}
 
-	register_hotcpu_notifier(&cpufreq_cpu_notifier);
-	pr_debug("driver %s up and running\n", driver_data->name);
+	pr_info("driver %s up and running\n", driver_data->name);
 
 out:
 	put_online_cpus();
@@ -2491,7 +2607,7 @@
 	if (!cpufreq_driver || (driver != cpufreq_driver))
 		return -EINVAL;
 
-	pr_debug("unregistering driver %s\n", driver->name);
+	pr_info("unregistering driver %s\n", driver->name);
 
 	/* Protect against concurrent cpu hotplug */
 	get_online_cpus();
diff -ruw linux-4.4.115/drivers/cpufreq/Kconfig linux-4.4.115-fbx/drivers/cpufreq/Kconfig
--- linux-4.4.115/drivers/cpufreq/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/cpufreq/Kconfig	2019-10-29 09:26:23.493201710 +0100
@@ -102,6 +102,33 @@
 	  Be aware that not all cpufreq drivers support the conservative
 	  governor. If unsure have a look at the help section of the
 	  driver. Fallback governor will be the performance governor.
+
+config CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+	bool "interactive"
+	select CPU_FREQ_GOV_INTERACTIVE
+	help
+	  Use the CPUFreq governor 'interactive' as default. This allows
+	  you to get a full dynamic cpu frequency capable system by simply
+	  loading your cpufreq low-level hardware driver, using the
+	  'interactive' governor for latency-sensitive workloads.
+
+config CPU_FREQ_DEFAULT_GOV_SCHED
+	bool "sched"
+	select CPU_FREQ_GOV_SCHED
+	help
+	  Use the CPUfreq governor 'sched' as default. This scales
+	  cpu frequency using CPU utilization estimates from the
+	  scheduler.
+
+config CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
+	bool "schedutil"
+	depends on SMP
+	select CPU_FREQ_GOV_SCHEDUTIL
+	select CPU_FREQ_GOV_PERFORMANCE
+	help
+	  Use the 'schedutil' CPUFreq governor by default. If unsure,
+	  have a look at the help section of that governor. The fallback
+	  governor will be 'performance'.
 endchoice
 
 config CPU_FREQ_GOV_PERFORMANCE
@@ -159,6 +186,20 @@
 
 	  If in doubt, say N.
 
+config CPU_FREQ_GOV_INTERACTIVE
+	bool "'interactive' cpufreq policy governor"
+	help
+	  'interactive' - This driver adds a dynamic cpufreq policy governor
+	  designed for latency-sensitive workloads.
+
+	  This governor attempts to reduce the latency of clock
+	  increases so that the system is more responsive to
+	  interactive workloads.
+
+	  For details, take a look at linux/Documentation/cpu-freq.
+
+	  If in doubt, say N.
+
 config CPU_FREQ_GOV_CONSERVATIVE
 	tristate "'conservative' cpufreq governor"
 	depends on CPU_FREQ
@@ -183,6 +224,34 @@
 
 	  If in doubt, say N.
 
+config CPU_BOOST
+	tristate "Event base short term CPU freq boost"
+	depends on CPU_FREQ
+	help
+	  This driver boosts the frequency of one or more CPUs based on
+	  various events that might occur in the system. As of now, the
+	  events it reacts to are:
+	  - Migration of important threads from one CPU to another.
+
+	  If in doubt, say N.
+
+config CPU_FREQ_GOV_SCHEDUTIL
+	bool "'schedutil' cpufreq policy governor"
+	depends on CPU_FREQ && SMP
+	select CPU_FREQ_GOV_ATTR_SET
+	select IRQ_WORK
+	help
+	  This governor makes decisions based on the utilization data provided
+	  by the scheduler.  It sets the CPU frequency to be proportional to
+	  the utilization/capacity ratio coming from the scheduler.  If the
+	  utilization is frequency-invariant, the new frequency is also
+	  proportional to the maximum available frequency.  If that is not the
+	  case, it is proportional to the current frequency of the CPU.  The
+	  frequency tipping point is at utilization/capacity equal to 80% in
+	  both cases.
+
+	  If in doubt, say N.
+
 comment "CPU frequency scaling drivers"
 
 config CPUFREQ_DT
diff -ruw linux-4.4.115/drivers/cpufreq/Kconfig.arm linux-4.4.115-fbx/drivers/cpufreq/Kconfig.arm
--- linux-4.4.115/drivers/cpufreq/Kconfig.arm	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/cpufreq/Kconfig.arm	2019-01-22 16:16:23.075242531 +0100
@@ -255,3 +255,10 @@
 	  support for its operation.
 
 	  If in doubt, say N.
+
+config CPU_FREQ_MSM
+	bool "MSM CPUFreq support"
+	depends on CPU_FREQ
+	default y
+	help
+	  This enables the CPUFreq driver for Qualcomm CPUs.
diff -ruw linux-4.4.115/drivers/cpufreq/Makefile linux-4.4.115-fbx/drivers/cpufreq/Makefile
--- linux-4.4.115/drivers/cpufreq/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/cpufreq/Makefile	2019-10-29 09:26:23.493201710 +0100
@@ -1,5 +1,5 @@
 # CPUfreq core
-obj-$(CONFIG_CPU_FREQ)			+= cpufreq.o freq_table.o
+obj-$(CONFIG_CPU_FREQ)			+= cpufreq.o freq_table.o cpufreq_governor_attr_set.o
 
 # CPUfreq stats
 obj-$(CONFIG_CPU_FREQ_STAT)             += cpufreq_stats.o
@@ -10,7 +10,9 @@
 obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE)	+= cpufreq_userspace.o
 obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND)	+= cpufreq_ondemand.o
 obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE)	+= cpufreq_conservative.o
+obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE)	+= cpufreq_interactive.o
 obj-$(CONFIG_CPU_FREQ_GOV_COMMON)		+= cpufreq_governor.o
+obj-$(CONFIG_CPU_BOOST)			+= cpu-boost.o
 
 obj-$(CONFIG_CPUFREQ_DT)		+= cpufreq-dt.o
 
@@ -77,7 +79,7 @@
 obj-$(CONFIG_ARM_TEGRA124_CPUFREQ)	+= tegra124-cpufreq.o
 obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ)	+= vexpress-spc-cpufreq.o
 obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
-
+obj-$(CONFIG_CPU_FREQ_MSM)              += qcom-cpufreq.o
 
 ##################################################################################
 # PowerPC platform drivers
diff -ruw linux-4.4.115/drivers/cpuidle/cpuidle.c linux-4.4.115-fbx/drivers/cpuidle/cpuidle.c
--- linux-4.4.115/drivers/cpuidle/cpuidle.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/cpuidle/cpuidle.c	2019-01-22 16:16:23.095242712 +0100
@@ -193,7 +193,7 @@
 	}
 
 	/* Take note of the planned idle state. */
-	sched_idle_set_state(target_state);
+	sched_idle_set_state(target_state, index);
 
 	trace_cpu_idle_rcuidle(index, dev->cpu);
 	time_start = ktime_get();
@@ -206,7 +206,7 @@
 	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
 
 	/* The cpu is no longer idle or about to enter idle. */
-	sched_idle_set_state(NULL);
+	sched_idle_set_state(NULL, -1);
 
 	if (broadcast) {
 		if (WARN_ON_ONCE(!irqs_disabled()))
@@ -614,16 +614,31 @@
 
 #ifdef CONFIG_SMP
 
+static void smp_callback(void *v)
+{
+	/* we already woke the CPU up, nothing more to do */
+}
+
 /*
  * This function gets called when a part of the kernel has a new latency
- * requirement.  This means we need to get all processors out of their C-state,
- * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
- * wakes them all right up.
+ * requirement.  This means we need to get only those processors out of their
+ * C-state for which qos requirement is changed, and then recalculate a new
+ * suitable C-state. Just do a cross-cpu IPI; that wakes them all right up.
  */
 static int cpuidle_latency_notify(struct notifier_block *b,
 		unsigned long l, void *v)
 {
-	wake_up_all_idle_cpus();
+	struct cpumask cpus;
+
+	if (v)
+		cpumask_andnot(&cpus, v, cpu_isolated_mask);
+	else
+		cpumask_andnot(&cpus, cpu_online_mask, cpu_isolated_mask);
+
+	preempt_disable();
+	smp_call_function_many(&cpus, smp_callback, NULL, 1);
+	preempt_enable();
+
 	return NOTIFY_OK;
 }
 
diff -ruw linux-4.4.115/drivers/cpuidle/governors/menu.c linux-4.4.115-fbx/drivers/cpuidle/governors/menu.c
--- linux-4.4.115/drivers/cpuidle/governors/menu.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/cpuidle/governors/menu.c	2019-01-22 16:16:23.095242712 +0100
@@ -178,7 +178,12 @@
 
 	/* for higher loadavg, we are more reluctant */
 
-	mult += 2 * get_loadavg(load);
+	/*
+	 * this doesn't work as intended - it is almost always 0, but can
+	 * sometimes, depending on workload, spike very high into the hundreds
+	 * even when the average cpu load is under 10%.
+	 */
+	/* mult += 2 * get_loadavg(); */
 
 	/* for IO wait tasks (per cpu!) we add 5x each */
 	mult += 10 * nr_iowaiters;
diff -ruw linux-4.4.115/drivers/cpuidle/Makefile linux-4.4.115-fbx/drivers/cpuidle/Makefile
--- linux-4.4.115/drivers/cpuidle/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/cpuidle/Makefile	2019-01-22 16:16:23.091242676 +0100
@@ -27,3 +27,4 @@
 # POWERPC drivers
 obj-$(CONFIG_PSERIES_CPUIDLE)		+= cpuidle-pseries.o
 obj-$(CONFIG_POWERNV_CPUIDLE)		+= cpuidle-powernv.o
+obj-$(CONFIG_MSM_PM) += lpm-levels.o  lpm-levels-of.o lpm-workarounds.o
diff -ruw linux-4.4.115/drivers/crypto/Kconfig linux-4.4.115-fbx/drivers/crypto/Kconfig
--- linux-4.4.115/drivers/crypto/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/crypto/Kconfig	2019-01-22 16:16:23.095242712 +0100
@@ -346,6 +346,64 @@
 	  Select this to offload Samsung S5PV210 or S5PC110, Exynos from AES
 	  algorithms execution.
 
+config CRYPTO_DEV_QCE50
+        bool
+
+config FIPS_ENABLE
+        bool "FIPS140-2 compliant build"
+        default n
+        help
+          This flag is used to make current build FIPS140-2
+          compliant. This flag will enable the patch of code
+          which will perform this task. Please select Y here
+          to enable.
+
+config CRYPTO_DEV_QCRYPTO
+        tristate "Qualcomm Crypto accelerator"
+        select CRYPTO_DES
+        select CRYPTO_ALGAPI
+        select CRYPTO_AUTHENC
+        select CRYPTO_BLKCIPHER
+        default n
+        help
+          This driver supports Qualcomm crypto acceleration.
+          To compile this driver as a module, choose M here: the
+          module will be called qcrypto.
+
+config CRYPTO_DEV_QCOM_MSM_QCE
+        tristate "Qualcomm Crypto Engine (QCE) module"
+        select  CRYPTO_DEV_QCE50 if ARCH_APQ8084 || ARCH_MSM8916 || ARCH_MSM8994 || ARCH_MSM8996 || ARCH_MSM8992 || ARCH_MSMTITANIUM || ARCH_MSM8909 || ARCH_MSM8998 || ARCH_SDM660 || ARCH_SDM630
+        default n
+        help
+          This driver supports Qualcomm Crypto Engine in MSM7x30, MSM8660
+          MSM8x55, MSM8960, MSM9615, MSM8916, MSM8994, MSM8996, FSM9900,
+		  MSMTITANINUM, APQ8084, MSM8998, SDM660 and SDM630.
+
+          To compile this driver as a module, choose M here: the
+          For MSM7x30 MSM8660 and MSM8x55 the module is called qce
+          For MSM8960, APQ8064 and MSM9615 the module is called qce40
+          For MSM8974, MSM8916, MSM8994, MSM8996, MSM8992, MSMTITANIUM,
+		  APQ8084, MSM8998, SDM660 and SDM630 the module is called qce50.
+
+config CRYPTO_DEV_QCEDEV
+        tristate "QCEDEV Interface to CE module"
+        default n
+        help
+          This driver supports Qualcomm QCEDEV Crypto in MSM7x30, MSM8660,
+          MSM8960, MSM9615, APQ8064, MSM8974, MSM8916, MSM8994, MSM8996,
+          APQ8084, MSM8998, SDM660, SDM630. This exposes the
+          interface to the QCE hardware accelerator via IOCTLs.
+
+          To compile this driver as a module, choose M here: the
+          module will be called qcedev.
+
+config CRYPTO_DEV_OTA_CRYPTO
+        tristate "OTA Crypto module"
+        help
+          This driver supports Qualcomm OTA Crypto in the FSM9xxx.
+          To compile this driver as a module, choose M here: the
+          module will be called ota_crypto.
+
 config CRYPTO_DEV_NX
 	bool "Support for IBM PowerPC Nest (NX) cryptographic acceleration"
 	depends on PPC64
@@ -498,4 +556,9 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called sun4i-ss.
 
+if ARCH_QCOM
+source drivers/crypto/msm/Kconfig
+endif # ARCH_QCOM
+
+
 endif # CRYPTO_HW
diff -ruw linux-4.4.115/drivers/crypto/Makefile linux-4.4.115-fbx/drivers/crypto/Makefile
--- linux-4.4.115/drivers/crypto/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/crypto/Makefile	2019-01-22 16:16:23.095242712 +0100
@@ -29,3 +29,4 @@
 obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
 obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
 obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
+obj-$(CONFIG_ARCH_QCOM) += msm/
diff -ruw linux-4.4.115/drivers/devfreq/devfreq.c linux-4.4.115-fbx/drivers/devfreq/devfreq.c
--- linux-4.4.115/drivers/devfreq/devfreq.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/devfreq/devfreq.c	2019-10-29 09:26:23.513201906 +0100
@@ -69,11 +69,34 @@
 }
 
 /**
+ * devfreq_set_freq_limits() - Set min and max frequency from freq_table
+ * @devfreq:	the devfreq instance
+ */
+static void devfreq_set_freq_limits(struct devfreq *devfreq)
+{
+	int idx;
+	unsigned long min = ~0, max = 0;
+
+	if (!devfreq->profile->freq_table)
+		return;
+
+	for (idx = 0; idx < devfreq->profile->max_state; idx++) {
+		if (min > devfreq->profile->freq_table[idx])
+			min = devfreq->profile->freq_table[idx];
+		if (max < devfreq->profile->freq_table[idx])
+			max = devfreq->profile->freq_table[idx];
+	}
+
+	devfreq->min_freq = min;
+	devfreq->max_freq = max;
+}
+
+/**
  * devfreq_get_freq_level() - Lookup freq_table for the frequency
  * @devfreq:	the devfreq instance
  * @freq:	the target frequency
  */
-static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
+int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
 {
 	int lev;
 
@@ -83,6 +106,7 @@
 
 	return -EINVAL;
 }
+EXPORT_SYMBOL(devfreq_get_freq_level);
 
 /**
  * devfreq_update_status() - Update statistics of devfreq behavior
@@ -172,7 +196,7 @@
 		return -EINVAL;
 
 	/* Reevaluate the proper frequency */
-	err = devfreq->governor->get_target_freq(devfreq, &freq);
+	err = devfreq->governor->get_target_freq(devfreq, &freq, &flags);
 	if (err)
 		return err;
 
@@ -486,6 +510,7 @@
 						devfreq->profile->max_state,
 						GFP_KERNEL);
 	devfreq->last_stat_updated = jiffies;
+	devfreq_set_freq_limits(devfreq);
 
 	dev_set_name(&devfreq->dev, "%s", dev_name(dev));
 	err = device_register(&devfreq->dev);
@@ -536,7 +561,6 @@
 		return -EINVAL;
 
 	device_unregister(&devfreq->dev);
-	put_device(&devfreq->dev);
 
 	return 0;
 }
@@ -782,7 +806,7 @@
 	struct devfreq *df = to_devfreq(dev);
 	int ret;
 	char str_governor[DEVFREQ_NAME_LEN + 1];
-	struct devfreq_governor *governor;
+	const struct devfreq_governor *governor, *prev_gov;
 
 	ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
 	if (ret != 1)
@@ -807,12 +831,21 @@
 			goto out;
 		}
 	}
+	prev_gov = df->governor;
 	df->governor = governor;
 	strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
 	ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
-	if (ret)
+	if (ret) {
 		dev_warn(dev, "%s: Governor %s not started(%d)\n",
 			 __func__, df->governor->name, ret);
+		if (prev_gov) {
+			df->governor = prev_gov;
+			strncpy(df->governor_name, prev_gov->name,
+				DEVFREQ_NAME_LEN);
+			df->governor->event_handler(df, DEVFREQ_GOV_START,
+						    NULL);
+		}
+	}
 out:
 	mutex_unlock(&devfreq_list_lock);
 
@@ -969,19 +1002,26 @@
 	struct devfreq *df = to_devfreq(d);
 	struct device *dev = df->dev.parent;
 	struct dev_pm_opp *opp;
+	unsigned int i = 0, max_state = df->profile->max_state;
+	bool use_opp;
 	ssize_t count = 0;
 	unsigned long freq = 0;
 
 	rcu_read_lock();
-	do {
+	use_opp = dev_pm_opp_get_opp_count(dev) > 0;
+	while (use_opp || (!use_opp && i < max_state)) {
+		if (use_opp) {
 		opp = dev_pm_opp_find_freq_ceil(dev, &freq);
 		if (IS_ERR(opp))
 			break;
+		} else {
+			freq = df->profile->freq_table[i++];
+		}
 
 		count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
 				   "%lu ", freq);
 		freq++;
-	} while (1);
+	}
 	rcu_read_unlock();
 
 	/* Truncate the trailing space */
diff -ruw linux-4.4.115/drivers/devfreq/governor.h linux-4.4.115-fbx/drivers/devfreq/governor.h
--- linux-4.4.115/drivers/devfreq/governor.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/devfreq/governor.h	2019-01-22 16:16:23.135243074 +0100
@@ -4,6 +4,8 @@
  * Copyright (C) 2011 Samsung Electronics
  *	MyungJoo Ham <myungjoo.ham@samsung.com>
  *
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
@@ -25,9 +27,6 @@
 #define DEVFREQ_GOV_SUSPEND			0x4
 #define DEVFREQ_GOV_RESUME			0x5
 
-/* Caution: devfreq->lock must be locked before calling update_devfreq */
-extern int update_devfreq(struct devfreq *devfreq);
-
 extern void devfreq_monitor_start(struct devfreq *devfreq);
 extern void devfreq_monitor_stop(struct devfreq *devfreq);
 extern void devfreq_monitor_suspend(struct devfreq *devfreq);
@@ -38,4 +37,5 @@
 extern int devfreq_add_governor(struct devfreq_governor *governor);
 extern int devfreq_remove_governor(struct devfreq_governor *governor);
 
+extern int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq);
 #endif /* _GOVERNOR_H */
diff -ruw linux-4.4.115/drivers/devfreq/governor_performance.c linux-4.4.115-fbx/drivers/devfreq/governor_performance.c
--- linux-4.4.115/drivers/devfreq/governor_performance.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/devfreq/governor_performance.c	2019-01-22 16:16:23.135243074 +0100
@@ -14,7 +14,8 @@
 #include "governor.h"
 
 static int devfreq_performance_func(struct devfreq *df,
-				    unsigned long *freq)
+				    unsigned long *freq,
+				u32 *flag)
 {
 	/*
 	 * target callback should be able to get floor value as
@@ -31,13 +32,26 @@
 				unsigned int event, void *data)
 {
 	int ret = 0;
+	unsigned long freq;
 
-	if (event == DEVFREQ_GOV_START) {
 		mutex_lock(&devfreq->lock);
+	freq = devfreq->previous_freq;
+	switch (event) {
+	case DEVFREQ_GOV_START:
+		devfreq->profile->target(devfreq->dev.parent,
+				&freq,
+				DEVFREQ_FLAG_WAKEUP_MAXFREQ);
+		/* fall through */
+	case DEVFREQ_GOV_RESUME:
 		ret = update_devfreq(devfreq);
-		mutex_unlock(&devfreq->lock);
+		break;
+	case DEVFREQ_GOV_SUSPEND:
+		devfreq->profile->target(devfreq->dev.parent,
+				&freq,
+				DEVFREQ_FLAG_WAKEUP_MAXFREQ);
+		break;
 	}
-
+	mutex_unlock(&devfreq->lock);
 	return ret;
 }
 
diff -ruw linux-4.4.115/drivers/devfreq/governor_powersave.c linux-4.4.115-fbx/drivers/devfreq/governor_powersave.c
--- linux-4.4.115/drivers/devfreq/governor_powersave.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/devfreq/governor_powersave.c	2019-01-22 16:16:23.135243074 +0100
@@ -14,7 +14,8 @@
 #include "governor.h"
 
 static int devfreq_powersave_func(struct devfreq *df,
-				  unsigned long *freq)
+				  unsigned long *freq,
+				u32 *flag)
 {
 	/*
 	 * target callback should be able to get ceiling value as
@@ -29,7 +30,7 @@
 {
 	int ret = 0;
 
-	if (event == DEVFREQ_GOV_START) {
+	if (event == DEVFREQ_GOV_START || event == DEVFREQ_GOV_RESUME) {
 		mutex_lock(&devfreq->lock);
 		ret = update_devfreq(devfreq);
 		mutex_unlock(&devfreq->lock);
diff -ruw linux-4.4.115/drivers/devfreq/governor_simpleondemand.c linux-4.4.115-fbx/drivers/devfreq/governor_simpleondemand.c
--- linux-4.4.115/drivers/devfreq/governor_simpleondemand.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/devfreq/governor_simpleondemand.c	2019-01-22 16:16:23.135243074 +0100
@@ -19,7 +19,8 @@
 #define DFSO_UPTHRESHOLD	(90)
 #define DFSO_DOWNDIFFERENCTIAL	(5)
 static int devfreq_simple_ondemand_func(struct devfreq *df,
-					unsigned long *freq)
+					unsigned long *freq,
+					u32 *flag)
 {
 	int err;
 	struct devfreq_dev_status *stat;
@@ -28,6 +29,7 @@
 	unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL;
 	struct devfreq_simple_ondemand_data *data = df->data;
 	unsigned long max = (df->max_freq) ? df->max_freq : UINT_MAX;
+	unsigned long min = (df->min_freq) ? df->min_freq : 0;
 
 	err = devfreq_update_stats(df);
 	if (err)
@@ -45,18 +47,31 @@
 	    dfso_upthreshold < dfso_downdifferential)
 		return -EINVAL;
 
-	/* Assume MAX if it is going to be divided by zero */
-	if (stat->total_time == 0) {
-		*freq = max;
-		return 0;
-	}
-
 	/* Prevent overflow */
 	if (stat->busy_time >= (1 << 24) || stat->total_time >= (1 << 24)) {
 		stat->busy_time >>= 7;
 		stat->total_time >>= 7;
 	}
 
+	if (data && data->simple_scaling) {
+		if (stat->busy_time * 100 >
+		    stat->total_time * dfso_upthreshold)
+			*freq = max;
+		else if (stat->busy_time * 100 <
+			 stat->total_time *
+			 (dfso_upthreshold - dfso_downdifferential))
+			*freq = min;
+		else
+			*freq = df->previous_freq;
+		return 0;
+	}
+
+	/* Assume MAX if it is going to be divided by zero */
+	if (stat->total_time == 0) {
+		*freq = max;
+		return 0;
+	}
+
 	/* Set MAX if it's busy enough */
 	if (stat->busy_time * 100 >
 	    stat->total_time * dfso_upthreshold) {
diff -ruw linux-4.4.115/drivers/devfreq/governor_userspace.c linux-4.4.115-fbx/drivers/devfreq/governor_userspace.c
--- linux-4.4.115/drivers/devfreq/governor_userspace.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/devfreq/governor_userspace.c	2019-01-22 16:16:23.135243074 +0100
@@ -22,7 +22,8 @@
 	bool valid;
 };
 
-static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq)
+static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq,
+					u32 *flag)
 {
 	struct userspace_data *data = df->data;
 
diff -ruw linux-4.4.115/drivers/devfreq/Kconfig linux-4.4.115-fbx/drivers/devfreq/Kconfig
--- linux-4.4.115/drivers/devfreq/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/devfreq/Kconfig	2019-01-22 16:16:23.131243038 +0100
@@ -64,6 +64,114 @@
 	  Otherwise, the governor does not change the frequnecy
 	  given at the initialization.
 
+config DEVFREQ_GOV_QCOM_ADRENO_TZ
+	tristate "Qualcom  Adreno Trustzone"
+	depends on QCOM_KGSL && QCOM_SCM
+	help
+	  Trustzone based governor for the Adreno GPU.
+	  Sets the frequency using a "on-demand" algorithm.
+	  This governor is unlikely to be useful for other devices.
+
+config DEVFREQ_GOV_CPUFREQ
+	tristate "CPUfreq"
+	depends on CPU_FREQ
+	help
+	  Chooses frequency based on the online CPUs' current frequency and a
+	  CPU frequency to device frequency mapping table(s). This governor
+	  can be useful for controlling devices such as DDR, cache, CCI, etc.
+
+config QCOM_BIMC_BWMON
+	tristate "QCOM BIMC Bandwidth monitor hardware"
+	depends on ARCH_QCOM
+	help
+	  The BIMC Bandwidth monitor hardware allows for monitoring the
+	  traffic coming from each master port connected to the BIMC. It also
+	  has the capability to raise an IRQ when the count exceeds a
+	  programmable limit.
+
+config DEVFREQ_GOV_QCOM_GPUBW_MON
+	tristate "GPU BW voting governor"
+	depends on DEVFREQ_GOV_QCOM_ADRENO_TZ
+	help
+	 QTI GPU governor for GPU bus bandwidth voting.
+	 This governor works together with QTI Adreno Trustzone governor,
+	 and select bus frequency votes using an "on-demand" alorithm.
+	 This governor is unlikely to be useful for non-QTI devices.
+
+config ARMBW_HWMON
+	tristate "ARM PMU Bandwidth monitor hardware"
+	depends on ARCH_QCOM
+	help
+	  The PMU present on these ARM cores allow for the use of counters to
+	  monitor the traffic coming from each core to the bus. It also has the
+	  capability to raise an IRQ when the counter overflows, which can be
+	  used to get an IRQ when the count exceeds a certain value
+
+config ARM_MEMLAT_MON
+	tristate "ARM CPU Memory Latency monitor hardware"
+	depends on ARCH_QCOM
+	help
+	  The PMU present on these ARM cores allow for the use of counters to
+	  monitor the memory latency characteristics of an ARM CPU workload.
+	  This driver uses these counters to implement the APIs needed by
+	  the mem_latency devfreq governor.
+
+config QCOMCCI_HWMON
+	tristate "QCOM CCI Cache monitor hardware"
+	depends on ARCH_QCOM
+	help
+	  QCOM CCI has additional PMU counters that can be used to monitor
+	  cache requests. QCOM CCI hardware monitor device configures these
+	  registers to monitor cache and inform governor. It can also set an
+	  IRQ when count exceeds a programmable limit.
+
+config QCOM_M4M_HWMON
+	tristate "QCOM M4M cache monitor hardware"
+	depends on ARCH_QCOM
+	help
+	  QCOM M4M has counters that can be used to monitor requests coming to
+	  M4M. QCOM M4M hardware monitor device programs corresponding registers
+	  to monitor cache and inform governor. It can also set an IRQ when
+	  count exceeds a programmable limit.
+
+config DEVFREQ_GOV_QCOM_BW_HWMON
+	tristate "HW monitor based governor for device BW"
+	depends on QCOM_BIMC_BWMON
+	help
+	  HW monitor based governor for device to DDR bandwidth voting.
+	  This governor sets the CPU BW vote by using BIMC counters to monitor
+	  the CPU's use of DDR. Since this uses target specific counters it
+	  can conflict with existing profiling tools.  This governor is unlikely
+	  to be useful for non-QCOM devices.
+
+config DEVFREQ_GOV_QCOM_CACHE_HWMON
+	tristate "HW monitor based governor for cache frequency"
+	help
+	  HW monitor based governor for cache frequency scaling. This
+	  governor sets the cache frequency by using PM counters to monitor the
+	  CPU's use of cache. Since this governor uses some of the PM counters
+	  it can conflict with existing profiling tools. This governor is
+	  unlikely to be useful for other devices.
+
+config DEVFREQ_GOV_SPDM_HYP
+	bool "QCOM SPDM Hypervisor Governor"
+	depends on ARCH_QCOM
+	help
+	  Hypervisor based governor for CPU bandwidth voting
+	  for QCOM chipsets.
+	  Sets the frequency using a "on-demand" algorithm.
+	  This governor is unlikely to be useful for other devices.
+
+config DEVFREQ_GOV_MEMLAT
+	tristate "HW monitor based governor for device BW"
+	depends on ARM_MEMLAT_MON
+	help
+	  HW monitor based governor for device to DDR bandwidth voting.
+	  This governor sets the CPU BW vote based on stats obtained from memalat
+	  monitor if it determines that a workload is memory latency bound. Since
+	  this uses target specific counters it can conflict with existing profiling
+	  tools.
+
 comment "DEVFREQ Drivers"
 
 config ARM_EXYNOS4_BUS_DEVFREQ
@@ -98,6 +206,48 @@
          It reads ACTMON counters of memory controllers and adjusts the
          operating frequencies and voltages with OPP support.
 
+config DEVFREQ_SIMPLE_DEV
+	tristate "Device driver for simple clock device with no status info"
+	select DEVFREQ_GOV_PERFORMANCE
+	select DEVFREQ_GOV_POWERSAVE
+	select DEVFREQ_GOV_USERSPACE
+	select DEVFREQ_GOV_CPUFREQ
+	help
+	  Device driver for simple devices that control their frequency using
+	  clock APIs and don't have any form of status reporting.
+
+config QCOM_DEVFREQ_DEVBW
+	bool "QCOM DEVFREQ device for device master <-> slave IB/AB BW voting"
+	depends on ARCH_QCOM
+	select DEVFREQ_GOV_PERFORMANCE
+	select DEVFREQ_GOV_POWERSAVE
+	select DEVFREQ_GOV_USERSPACE
+	select DEVFREQ_GOV_CPUFREQ
+	default n
+	help
+	  Different devfreq governors use this devfreq device to make CPU to
+	  DDR IB/AB bandwidth votes. This driver provides a SoC topology
+	  agnostic interface to so that some of the devfreq governors can be
+	  shared across SoCs.
+
+config SPDM_SCM
+	bool "QCOM SPDM SCM based call support"
+	depends on DEVFREQ_SPDM
+	help
+	  SPDM driver support the dcvs algorithm logic being accessed via
+	  scm or hvc calls. This adds the support for SPDM interaction to
+          tz via SCM based call. If not selected then Hypervior interaction
+          will be activated.
+
+config DEVFREQ_SPDM
+	bool "QCOM SPDM based bandwidth voting"
+	depends on ARCH_QCOM
+	select DEVFREQ_GOV_SPDM_HYP
+	help
+	  This adds the support for SPDM based bandwidth voting on QCOM chipsets.
+	  This driver allows any SPDM based client to vote for bandwidth.
+	  Used with the QCOM SPDM Hypervisor Governor.
+
 source "drivers/devfreq/event/Kconfig"
 
 endif # PM_DEVFREQ
diff -ruw linux-4.4.115/drivers/devfreq/Makefile linux-4.4.115-fbx/drivers/devfreq/Makefile
--- linux-4.4.115/drivers/devfreq/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/devfreq/Makefile	2019-01-22 16:16:23.131243038 +0100
@@ -4,11 +4,27 @@
 obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE)	+= governor_performance.o
 obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE)	+= governor_powersave.o
 obj-$(CONFIG_DEVFREQ_GOV_USERSPACE)	+= governor_userspace.o
+obj-$(CONFIG_DEVFREQ_GOV_QCOM_ADRENO_TZ) += governor_msm_adreno_tz.o
+obj-$(CONFIG_DEVFREQ_GOV_CPUFREQ)	+= governor_cpufreq.o
+obj-$(CONFIG_QCOM_BIMC_BWMON)		+= bimc-bwmon.o
+obj-$(CONFIG_ARMBW_HWMON)		+= armbw-pm.o
+obj-$(CONFIG_ARM_MEMLAT_MON)		+= arm-memlat-mon.o
+obj-$(CONFIG_QCOMCCI_HWMON)		+= msmcci-hwmon.o
+obj-$(CONFIG_QCOM_M4M_HWMON)		+= m4m-hwmon.o
+obj-$(CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON)	+= governor_bw_hwmon.o
+obj-$(CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON)	+= governor_cache_hwmon.o
+obj-$(CONFIG_DEVFREQ_GOV_SPDM_HYP) 	+= governor_spdm_bw_hyp.o
+obj-$(CONFIG_DEVFREQ_GOV_QCOM_GPUBW_MON)        += governor_gpubw_mon.o
+obj-$(CONFIG_DEVFREQ_GOV_QCOM_GPUBW_MON) += governor_bw_vbif.o
+obj-$(CONFIG_DEVFREQ_GOV_MEMLAT)	+= governor_memlat.o
 
 # DEVFREQ Drivers
 obj-$(CONFIG_ARM_EXYNOS4_BUS_DEVFREQ)	+= exynos/
 obj-$(CONFIG_ARM_EXYNOS5_BUS_DEVFREQ)	+= exynos/
 obj-$(CONFIG_ARM_TEGRA_DEVFREQ)		+= tegra-devfreq.o
+obj-$(CONFIG_QCOM_DEVFREQ_DEVBW)		+= devfreq_devbw.o
+obj-$(CONFIG_DEVFREQ_SIMPLE_DEV)	+= devfreq_simple_dev.o
+obj-$(CONFIG_DEVFREQ_SPDM) 		+= devfreq_spdm.o devfreq_spdm_debugfs.o
 
 # DEVFREQ Event Drivers
 obj-$(CONFIG_PM_DEVFREQ_EVENT)		+= event/
diff -ruw linux-4.4.115/drivers/dma/Kconfig linux-4.4.115-fbx/drivers/dma/Kconfig
--- linux-4.4.115/drivers/dma/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/dma/Kconfig	2019-01-22 16:16:23.139243110 +0100
@@ -417,6 +417,16 @@
 	  Enable support for the QCOM BAM DMA controller.  This controller
 	  provides DMA capabilities for a variety of on-chip devices.
 
+config QCOM_SPS_DMA
+        tristate "Qualcomm technologies inc DMA driver for sps-BAM"
+        depends on ARCH_QCOM
+        select DMA_ENGINE
+        help
+          Enable support for Qualcomm technologies inc, BAM DMA engine.
+          This DMA-engine-driver is a wrapper of the sps-BAM library. DMA
+          engine callbacks are implemented using the sps-BAM functionality
+          to access HW.
+
 config SIRF_DMA
 	tristate "CSR SiRFprimaII/SiRFmarco DMA support"
 	depends on ARCH_SIRF
diff -ruw linux-4.4.115/drivers/dma/Makefile linux-4.4.115-fbx/drivers/dma/Makefile
--- linux-4.4.115/drivers/dma/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/dma/Makefile	2019-01-22 16:16:23.139243110 +0100
@@ -65,5 +65,6 @@
 obj-$(CONFIG_TI_EDMA) += edma.o
 obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
 obj-$(CONFIG_ZX_DMA) += zx296702_dma.o
+obj-$(CONFIG_QCOM_SPS_DMA) += qcom-sps-dma.o
 
 obj-y += xilinx/
diff -ruw linux-4.4.115/drivers/dma-buf/fence.c linux-4.4.115-fbx/drivers/dma-buf/fence.c
--- linux-4.4.115/drivers/dma-buf/fence.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/dma-buf/fence.c	2019-01-22 16:16:23.139243110 +0100
@@ -304,8 +304,12 @@
 	spin_lock_irqsave(fence->lock, flags);
 
 	ret = !list_empty(&cb->node);
-	if (ret)
+	if (ret) {
 		list_del_init(&cb->node);
+		if (list_empty(&fence->cb_list))
+			if (fence->ops->disable_signaling)
+				fence->ops->disable_signaling(fence);
+	}
 
 	spin_unlock_irqrestore(fence->lock, flags);
 
diff -ruw linux-4.4.115/drivers/edac/edac_core.h linux-4.4.115-fbx/drivers/edac/edac_core.h
--- linux-4.4.115/drivers/edac/edac_core.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/edac/edac_core.h	2019-01-22 16:16:23.171243400 +0100
@@ -223,9 +223,11 @@
 	/* Per instance controls for this edac_device */
 	int log_ue;		/* boolean for logging UEs */
 	int log_ce;		/* boolean for logging CEs */
+	int panic_on_ce;	/* boolean for panic'ing on an CE */
 	int panic_on_ue;	/* boolean for panic'ing on an UE */
 	unsigned poll_msec;	/* number of milliseconds to poll interval */
 	unsigned long delay;	/* number of jiffies for poll_msec */
+	bool defer_work;	/* Create a deferrable work for polling */
 
 	/* Additional top controller level attributes, but specified
 	 * by the low level driver.
diff -ruw linux-4.4.115/drivers/edac/edac_device.c linux-4.4.115-fbx/drivers/edac/edac_device.c
--- linux-4.4.115/drivers/edac/edac_device.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/edac/edac_device.c	2019-01-22 16:16:23.171243400 +0100
@@ -411,9 +411,15 @@
 	 * to used in the time period calculation
 	 * then calc the number of jiffies that represents
 	 */
+	if (!msec)
+		msec = 1000;
 	edac_dev->poll_msec = msec;
 	edac_dev->delay = msecs_to_jiffies(msec);
 
+	if (edac_dev->defer_work)
+		INIT_DEFERRABLE_WORK(&edac_dev->work,
+					edac_device_workq_function);
+	else
 	INIT_DELAYED_WORK(&edac_dev->work, edac_device_workq_function);
 
 	/* optimize here for the 1 second case, which will be normal value, to
@@ -523,7 +529,7 @@
 		 * enable workq processing on this instance,
 		 * default = 1000 msec
 		 */
-		edac_device_workq_setup(edac_dev, 1000);
+		edac_device_workq_setup(edac_dev, edac_dev->poll_msec);
 	} else {
 		edac_dev->op_state = OP_RUNNING_INTERRUPT;
 	}
@@ -608,6 +614,12 @@
 	return edac_dev->log_ue;
 }
 
+static inline int edac_device_get_panic_on_ce(struct edac_device_ctl_info
+					*edac_dev)
+{
+	return edac_dev->panic_on_ce;
+}
+
 static inline int edac_device_get_panic_on_ue(struct edac_device_ctl_info
 					*edac_dev)
 {
@@ -657,6 +669,11 @@
 				"CE: %s instance: %s block: %s '%s'\n",
 				edac_dev->ctl_name, instance->name,
 				block ? block->name : "N/A", msg);
+
+	if (edac_device_get_panic_on_ce(edac_dev))
+		panic("EDAC %s: CE instance: %s block %s '%s'\n",
+			edac_dev->ctl_name, instance->name,
+			block ? block->name : "N/A", msg);
 }
 EXPORT_SYMBOL_GPL(edac_device_handle_ce);
 
diff -ruw linux-4.4.115/drivers/edac/edac_device_sysfs.c linux-4.4.115-fbx/drivers/edac/edac_device_sysfs.c
--- linux-4.4.115/drivers/edac/edac_device_sysfs.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/edac/edac_device_sysfs.c	2019-01-22 16:16:23.171243400 +0100
@@ -62,6 +62,13 @@
 	return count;
 }
 
+/* 'panic_on_ce' */
+static ssize_t edac_device_ctl_panic_on_ce_show(struct edac_device_ctl_info
+						*ctl_info, char *data)
+{
+	return snprintf(data, PAGE_SIZE, "%u\n", ctl_info->panic_on_ce);
+}
+
 /* 'panic_on_ue' */
 static ssize_t edac_device_ctl_panic_on_ue_show(struct edac_device_ctl_info
 						*ctl_info, char *data)
@@ -69,6 +76,21 @@
 	return sprintf(data, "%u\n", ctl_info->panic_on_ue);
 }
 
+static ssize_t edac_device_ctl_panic_on_ce_store(struct edac_device_ctl_info
+						 *ctl_info, const char *data,
+						 size_t count)
+{
+	unsigned long val;
+
+	/* if parameter is zero, turn off flag, if non-zero turn on flag */
+	if (kstrtoul(data, 0, &val) < 0)
+		return -EINVAL;
+
+	ctl_info->panic_on_ce = !!val;
+
+	return count;
+}
+
 static ssize_t edac_device_ctl_panic_on_ue_store(struct edac_device_ctl_info
 						 *ctl_info, const char *data,
 						 size_t count)
@@ -156,6 +178,9 @@
 	edac_device_ctl_log_ue_show, edac_device_ctl_log_ue_store);
 CTL_INFO_ATTR(log_ce, S_IRUGO | S_IWUSR,
 	edac_device_ctl_log_ce_show, edac_device_ctl_log_ce_store);
+CTL_INFO_ATTR(panic_on_ce, S_IRUGO | S_IWUSR,
+	edac_device_ctl_panic_on_ce_show,
+	edac_device_ctl_panic_on_ce_store);
 CTL_INFO_ATTR(panic_on_ue, S_IRUGO | S_IWUSR,
 	edac_device_ctl_panic_on_ue_show,
 	edac_device_ctl_panic_on_ue_store);
@@ -164,6 +189,7 @@
 
 /* Base Attributes of the EDAC_DEVICE ECC object */
 static struct ctl_info_attribute *device_ctrl_attr[] = {
+	&attr_ctl_info_panic_on_ce,
 	&attr_ctl_info_panic_on_ue,
 	&attr_ctl_info_log_ue,
 	&attr_ctl_info_log_ce,
diff -ruw linux-4.4.115/drivers/edac/Kconfig linux-4.4.115-fbx/drivers/edac/Kconfig
--- linux-4.4.115/drivers/edac/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/edac/Kconfig	2019-01-22 16:16:23.167243364 +0100
@@ -390,4 +390,44 @@
 	  Support for error detection and correction on the
 	  APM X-Gene family of SOCs.
 
+config EDAC_CORTEX_ARM64
+	depends on EDAC_MM_EDAC && ARM64
+	bool "ARM Cortex A CPUs L1/L2 Caches"
+	help
+	   Support for error detection and correction on the
+	   ARM Cortex A53 and A57 CPUs. For debugging issues having to do with
+	   stability and overall system health, you should probably say 'Y'
+	   here.
+
+config EDAC_CORTEX_ARM64_PANIC_ON_CE
+	depends on EDAC_CORTEX_ARM64
+	bool "Panic on correctable errors"
+	help
+	   Forcibly cause a kernel panic if an correctable error (CE) is
+	   detected, even though the error is (by definition) correctable and
+	   would otherwise result in no adverse system effects. This can reduce
+	   debugging times on hardware which may be operating at voltages or
+	   frequencies outside normal specification.
+
+	   For production builds, you should definitely say 'N' here.
+
+config EDAC_CORTEX_ARM64_DBE_IRQ_ONLY
+	depends on EDAC_CORTEX_ARM64
+	bool "Only check for parity errors when an irq is generated"
+	help
+	   In ARM64, parity errors will cause an interrupt
+	   to be triggered but may also cause a data abort to
+	   occur. Only check for EDAC errors for the interrupt.
+	   If unsure, say no.
+
+config EDAC_CORTEX_ARM64_PANIC_ON_UE
+	depends on EDAC_CORTEX_ARM64
+	bool "Panic on uncorrectable errors"
+	help
+	   Forcibly cause a kernel panic if an uncorrectable error (UE) is
+	   detected. This can reduce debugging times on hardware which may be
+	   operating at voltages or frequencies outside normal specification.
+
+	   For production builds, you should probably say 'N' here.
+
 endif # EDAC
diff -ruw linux-4.4.115/drivers/edac/Makefile linux-4.4.115-fbx/drivers/edac/Makefile
--- linux-4.4.115/drivers/edac/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/edac/Makefile	2019-01-22 16:16:23.167243364 +0100
@@ -70,3 +70,4 @@
 obj-$(CONFIG_EDAC_ALTERA_MC)		+= altera_edac.o
 obj-$(CONFIG_EDAC_SYNOPSYS)		+= synopsys_edac.o
 obj-$(CONFIG_EDAC_XGENE)		+= xgene_edac.o
+obj-$(CONFIG_EDAC_CORTEX_ARM64)         += cortex_arm64_edac.o
diff -ruw linux-4.4.115/drivers/extcon/extcon.c linux-4.4.115-fbx/drivers/extcon/extcon.c
--- linux-4.4.115/drivers/extcon/extcon.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/extcon/extcon.c	2019-01-22 16:16:23.183243509 +0100
@@ -63,6 +63,12 @@
 	[EXTCON_JACK_SPDIF_IN]		= "SPDIF-IN",
 	[EXTCON_JACK_SPDIF_OUT]		= "SPDIF-OUT",
 
+	/* connector orientation 0 - CC1, 1 - CC2 */
+	[EXTCON_USB_CC]			= "USB-CC",
+
+	/* connector speed 0 - High Speed, 1 - Super Speed */
+	[EXTCON_USB_SPEED]		= "USB-SPEED",
+
 	/* Display external connector */
 	[EXTCON_DISP_HDMI]		= "HDMI",
 	[EXTCON_DISP_MHL]		= "MHL",
@@ -78,7 +84,7 @@
 };
 
 static struct class *extcon_class;
-#if defined(CONFIG_ANDROID)
+#if defined(CONFIG_ANDROID) && !IS_ENABLED(CONFIG_SWITCH)
 static struct class_compat *switch_class;
 #endif /* CONFIG_ANDROID */
 
@@ -635,7 +641,7 @@
 			return PTR_ERR(extcon_class);
 		extcon_class->dev_groups = extcon_groups;
 
-#if defined(CONFIG_ANDROID)
+#if defined(CONFIG_ANDROID) && !IS_ENABLED(CONFIG_SWITCH)
 		switch_class = class_compat_register("switch");
 		if (WARN(!switch_class, "cannot allocate"))
 			return -ENOMEM;
@@ -921,7 +927,7 @@
 		put_device(&edev->dev);
 		goto err_dev;
 	}
-#if defined(CONFIG_ANDROID)
+#if defined(CONFIG_ANDROID) && !IS_ENABLED(CONFIG_SWITCH)
 	if (switch_class)
 		ret = class_compat_create_link(switch_class, &edev->dev, NULL);
 #endif /* CONFIG_ANDROID */
@@ -1010,7 +1016,7 @@
 		kfree(edev->cables);
 	}
 
-#if defined(CONFIG_ANDROID)
+#if defined(CONFIG_ANDROID) && !IS_ENABLED(CONFIG_SWITCH)
 	if (switch_class)
 		class_compat_remove_link(switch_class, &edev->dev, NULL);
 #endif
@@ -1139,7 +1145,7 @@
 
 static void __exit extcon_class_exit(void)
 {
-#if defined(CONFIG_ANDROID)
+#if defined(CONFIG_ANDROID) && !IS_ENABLED(CONFIG_SWITCH)
 	class_compat_unregister(switch_class);
 #endif
 	class_destroy(extcon_class);
diff -ruw linux-4.4.115/drivers/firmware/efi/libstub/Makefile linux-4.4.115-fbx/drivers/firmware/efi/libstub/Makefile
--- linux-4.4.115/drivers/firmware/efi/libstub/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/firmware/efi/libstub/Makefile	2019-01-22 16:16:23.191243581 +0100
@@ -8,20 +8,24 @@
 cflags-$(CONFIG_X86_64)		:= -mcmodel=small
 cflags-$(CONFIG_X86)		+= -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \
 				   -fPIC -fno-strict-aliasing -mno-red-zone \
-				   -mno-mmx -mno-sse -DDISABLE_BRANCH_PROFILING
+				   -mno-mmx -mno-sse
 
-cflags-$(CONFIG_ARM64)		:= $(subst -pg,,$(KBUILD_CFLAGS))
+cflags-$(CONFIG_ARM64)		:= $(subst -pg,,$(KBUILD_CFLAGS)) -fpie
 cflags-$(CONFIG_ARM)		:= $(subst -pg,,$(KBUILD_CFLAGS)) \
 				   -fno-builtin -fpic -mno-single-pic-base
 
 cflags-$(CONFIG_EFI_ARMSTUB)	+= -I$(srctree)/scripts/dtc/libfdt
 
-KBUILD_CFLAGS			:= $(cflags-y) \
+KBUILD_CFLAGS			:= $(cflags-y) -DDISABLE_BRANCH_PROFILING \
 				   $(call cc-option,-ffreestanding) \
 				   $(call cc-option,-fno-stack-protector)
 
 GCOV_PROFILE			:= n
 KASAN_SANITIZE			:= n
+UBSAN_SANITIZE			:= n
+
+# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
+KCOV_INSTRUMENT			:= n
 
 lib-y				:= efi-stub-helper.o
 
@@ -34,7 +38,8 @@
 lib-$(CONFIG_EFI_ARMSTUB)	+= arm-stub.o fdt.o string.o \
 				   $(patsubst %.c,lib-%.o,$(arm-deps))
 
-lib-$(CONFIG_ARM64)		+= arm64-stub.o
+lib-$(CONFIG_ARM)		+= arm32-stub.o
+lib-$(CONFIG_ARM64)		+= arm64-stub.o random.o
 CFLAGS_arm64-stub.o 		:= -DTEXT_OFFSET=$(TEXT_OFFSET)
 
 #
diff -ruw linux-4.4.115/drivers/firmware/efi/Makefile linux-4.4.115-fbx/drivers/firmware/efi/Makefile
--- linux-4.4.115/drivers/firmware/efi/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/firmware/efi/Makefile	2019-01-22 16:16:23.191243581 +0100
@@ -18,3 +18,6 @@
 obj-$(CONFIG_EFI_RUNTIME_WRAPPERS)	+= runtime-wrappers.o
 obj-$(CONFIG_EFI_STUB)			+= libstub/
 obj-$(CONFIG_EFI_FAKE_MEMMAP)		+= fake_mem.o
+
+arm-obj-$(CONFIG_EFI)			:= arm-init.o arm-runtime.o
+obj-$(CONFIG_ARM64)			+= $(arm-obj-y)
diff -ruw linux-4.4.115/drivers/firmware/Kconfig linux-4.4.115-fbx/drivers/firmware/Kconfig
--- linux-4.4.115/drivers/firmware/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/firmware/Kconfig	2019-10-29 09:26:23.537202141 +0100
@@ -173,8 +173,12 @@
 	def_bool y
 	depends on QCOM_SCM && ARM64
 
+config HAVE_ARM_SMCCC
+	bool
+
 source "drivers/firmware/broadcom/Kconfig"
 source "drivers/firmware/google/Kconfig"
 source "drivers/firmware/efi/Kconfig"
+source "drivers/firmware/qcom/Kconfig"
 
 endmenu
diff -ruw linux-4.4.115/drivers/firmware/Makefile linux-4.4.115-fbx/drivers/firmware/Makefile
--- linux-4.4.115/drivers/firmware/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/firmware/Makefile	2019-01-22 16:16:23.187243545 +0100
@@ -23,3 +23,4 @@
 obj-$(CONFIG_GOOGLE_FIRMWARE)	+= google/
 obj-$(CONFIG_EFI)		+= efi/
 obj-$(CONFIG_UEFI_CPER)		+= efi/
+obj-$(CONFIG_MSM_TZ_LOG)        += qcom/
\ No newline at end of file
diff -ruw linux-4.4.115/drivers/firmware/psci.c linux-4.4.115-fbx/drivers/firmware/psci.c
--- linux-4.4.115/drivers/firmware/psci.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/firmware/psci.c	2019-01-22 16:16:23.195243617 +0100
@@ -13,6 +13,8 @@
 
 #define pr_fmt(fmt) "psci: " fmt
 
+#include <linux/arm-smccc.h>
+#include <linux/cpuidle.h>
 #include <linux/errno.h>
 #include <linux/linkage.h>
 #include <linux/of.h>
@@ -20,10 +22,12 @@
 #include <linux/printk.h>
 #include <linux/psci.h>
 #include <linux/reboot.h>
+#include <linux/slab.h>
 #include <linux/suspend.h>
 
 #include <uapi/linux/psci.h>
 
+#include <asm/cpuidle.h>
 #include <asm/cputype.h>
 #include <asm/system_misc.h>
 #include <asm/smp_plat.h>
@@ -58,8 +62,6 @@
 
 typedef unsigned long (psci_fn)(unsigned long, unsigned long,
 				unsigned long, unsigned long);
-asmlinkage psci_fn __invoke_psci_fn_hvc;
-asmlinkage psci_fn __invoke_psci_fn_smc;
 static psci_fn *invoke_psci_fn;
 
 enum psci_function {
@@ -225,6 +227,130 @@
 			      psci_func_id, 0, 0);
 }
 
+#ifdef CONFIG_CPU_IDLE
+static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
+
+#ifdef CONFIG_ARM_PSCI
+static int psci_cpu_init(struct device_node *cpu_node, int cpu)
+{
+	return 0;
+}
+#endif
+
+static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
+{
+	int i, ret, count = 0;
+	u32 *psci_states;
+	struct device_node *state_node;
+
+	/*
+	 * If the PSCI cpu_suspend function hook has not been initialized
+	 * idle states must not be enabled, so bail out
+	 */
+	if (!psci_ops.cpu_suspend)
+		return -EOPNOTSUPP;
+
+	/* Count idle states */
+	while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
+					      count))) {
+		count++;
+		of_node_put(state_node);
+	}
+
+	if (!count)
+		return -ENODEV;
+
+	psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
+	if (!psci_states)
+		return -ENOMEM;
+
+	for (i = 0; i < count; i++) {
+		u32 state;
+
+		state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+
+		ret = of_property_read_u32(state_node,
+					   "arm,psci-suspend-param",
+					   &state);
+		if (ret) {
+			pr_warn(" * %s missing arm,psci-suspend-param property\n",
+				state_node->full_name);
+			of_node_put(state_node);
+			goto free_mem;
+		}
+
+		of_node_put(state_node);
+		pr_debug("psci-power-state %#x index %d\n", state, i);
+		if (!psci_power_state_is_valid(state)) {
+			pr_warn("Invalid PSCI power state %#x\n", state);
+			ret = -EINVAL;
+			goto free_mem;
+		}
+		psci_states[i] = state;
+	}
+	/* Idle states parsed correctly, initialize per-cpu pointer */
+	per_cpu(psci_power_state, cpu) = psci_states;
+	return 0;
+
+free_mem:
+	kfree(psci_states);
+	return ret;
+}
+
+int psci_cpu_init_idle(unsigned int cpu)
+{
+	struct device_node *cpu_node;
+	int ret;
+
+	cpu_node = of_get_cpu_node(cpu, NULL);
+	if (!cpu_node)
+		return -ENODEV;
+
+	ret = psci_dt_cpu_init_idle(cpu_node, cpu);
+
+	of_node_put(cpu_node);
+
+	return ret;
+}
+
+static int psci_suspend_finisher(unsigned long state_id)
+{
+	return psci_ops.cpu_suspend(state_id, virt_to_phys(cpu_resume));
+}
+
+int psci_cpu_suspend_enter(unsigned long state_id)
+{
+	int ret;
+	/*
+	 * idle state_id 0 corresponds to wfi, should never be called
+	 * from the cpu_suspend operations
+	 */
+	if (WARN_ON_ONCE(!state_id))
+		return -EINVAL;
+
+	if (!psci_power_state_loses_context(state_id))
+		ret = psci_ops.cpu_suspend(state_id, 0);
+	else
+		ret = cpu_suspend(state_id, psci_suspend_finisher);
+
+	return ret;
+}
+
+/* ARM specific CPU idle operations */
+#ifdef CONFIG_ARM
+static struct cpuidle_ops psci_cpuidle_ops __initdata = {
+	.suspend = psci_cpu_suspend_enter,
+#ifdef CONFIG_ARM_PSCI
+	.init = psci_cpu_init,
+#else
+	.init = psci_dt_cpu_init_idle,
+#endif
+};
+
+CPUIDLE_METHOD_OF_DECLARE(psci, "psci", &psci_cpuidle_ops);
+#endif
+#endif
+
 static int psci_system_suspend(unsigned long unused)
 {
 	return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
@@ -305,6 +431,8 @@
 static void __init psci_0_2_set_functions(void)
 {
 	pr_info("Using standard PSCI v0.2 function IDs\n");
+	psci_ops.get_version = psci_get_version;
+
 	psci_function_id[PSCI_FN_CPU_SUSPEND] =
 					PSCI_FN_NATIVE(0_2, CPU_SUSPEND);
 	psci_ops.cpu_suspend = psci_cpu_suspend;
diff -ruw linux-4.4.115/drivers/gpio/gpiolib-acpi.c linux-4.4.115-fbx/drivers/gpio/gpiolib-acpi.c
--- linux-4.4.115/drivers/gpio/gpiolib-acpi.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpio/gpiolib-acpi.c	2019-01-22 16:16:23.211243762 +0100
@@ -597,7 +597,7 @@
 	int idx, i;
 
 	for (i = 0, idx = 0; idx <= index; i++) {
-		struct acpi_gpio_info info;
+		struct acpi_gpio_info info = {0, 0};
 		struct gpio_desc *desc;
 
 		desc = acpi_get_gpiod_by_index(adev, NULL, i, &info);
diff -ruw linux-4.4.115/drivers/gpio/gpio-pca953x.c linux-4.4.115-fbx/drivers/gpio/gpio-pca953x.c
--- linux-4.4.115/drivers/gpio/gpio-pca953x.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpio/gpio-pca953x.c	2019-01-22 16:16:23.207243726 +0100
@@ -14,6 +14,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/interrupt.h>
 #include <linux/i2c.h>
 #include <linux/platform_data/pca953x.h>
@@ -681,8 +682,16 @@
 		invert = pdata->invert;
 		chip->names = pdata->names;
 	} else {
+		struct gpio_desc *reset_gpio;
+
 		chip->gpio_start = -1;
 		irq_base = 0;
+
+		/* See if we need to de-assert a reset pin */
+		reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+						     GPIOD_OUT_LOW);
+		if (IS_ERR(reset_gpio))
+			return PTR_ERR(reset_gpio);
 	}
 
 	chip->client = client;
diff -ruw linux-4.4.115/drivers/gpio/Kconfig linux-4.4.115-fbx/drivers/gpio/Kconfig
--- linux-4.4.115/drivers/gpio/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpio/Kconfig	2019-10-29 09:26:23.541202180 +0100
@@ -338,6 +338,25 @@
 	help
 	  Say yes here to support the PXA GPIO device
 
+config GPIO_QPNP_PIN
+	tristate "Qualcomm Technologies, Inc. QPNP GPIO support"
+	depends on SPMI
+	help
+	  Say 'y' here to include support for the Qualcomm Technologies, Inc.
+	  QPNP GPIO driver.  This driver supports Device Tree and allows a
+	  device_node to be registered as a gpio-controller.  It does not handle
+	  GPIO interrupts directly; they are handled via the SPMI arbiter
+	  interrupt driver.
+
+config GPIO_QPNP_PIN_DEBUG
+	bool "Qualcomm Technologies, Inc. QPNP GPIO debug support"
+	depends on GPIO_QPNP_PIN && DEBUG_FS
+	help
+	  Say 'y' here to include debug support for the Qualcomm Technologies,
+	  Inc. QPNP GPIO driver.  This provides a userspace debug interface to
+	  get and set all of the supported features of PMIC GPIO and MPP pins
+	  including those which are managed by the gpio framework.
+
 config GPIO_RCAR
 	tristate "Renesas R-Car GPIO"
 	depends on ARCH_SHMOBILE || COMPILE_TEST
diff -ruw linux-4.4.115/drivers/gpio/Makefile linux-4.4.115-fbx/drivers/gpio/Makefile
--- linux-4.4.115/drivers/gpio/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpio/Makefile	2019-04-24 19:28:47.284498086 +0200
@@ -76,6 +76,7 @@
 obj-$(CONFIG_GPIO_PCH)		+= gpio-pch.o
 obj-$(CONFIG_GPIO_PL061)	+= gpio-pl061.o
 obj-$(CONFIG_GPIO_PXA)		+= gpio-pxa.o
+obj-$(CONFIG_GPIO_QPNP_PIN)	+= qpnp-pin.o
 obj-$(CONFIG_GPIO_RC5T583)	+= gpio-rc5t583.o
 obj-$(CONFIG_GPIO_RDC321X)	+= gpio-rdc321x.o
 obj-$(CONFIG_GPIO_RCAR)		+= gpio-rcar.o
@@ -119,3 +120,4 @@
 obj-$(CONFIG_GPIO_ZEVIO)	+= gpio-zevio.o
 obj-$(CONFIG_GPIO_ZYNQ)		+= gpio-zynq.o
 obj-$(CONFIG_GPIO_ZX)		+= gpio-zx.o
+obj-$(CONFIG_MSM_SMP2P)		+= gpio-msm-smp2p.o
diff -ruw linux-4.4.115/drivers/gpu/drm/drm_edid.c linux-4.4.115-fbx/drivers/gpu/drm/drm_edid.c
--- linux-4.4.115/drivers/gpu/drm/drm_edid.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/drm_edid.c	2019-10-29 09:26:23.601202767 +0100
@@ -91,6 +91,14 @@
 #define LEVEL_GTF2	2
 #define LEVEL_CVT	3
 
+/*Enum storing luminance types for HDR blocks in EDID*/
+enum luminance_value {
+	NO_LUMINANCE_DATA = 3,
+	MAXIMUM_LUMINANCE = 4,
+	FRAME_AVERAGE_LUMINANCE = 5,
+	MINIMUM_LUMINANCE = 6
+};
+
 static struct edid_quirk {
 	char vendor[4];
 	int product_id;
@@ -994,9 +1002,224 @@
 	 .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 64 - 1920x1080@100Hz */
 	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
-		   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+		   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 	 .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+	/* 65 - 1280x720@24Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
+		   3080, 3300, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 66 - 1280x720@25Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
+		   3740, 3960, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 67 - 1280x720@30Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
+		   3080, 3300, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 68 - 1280x720@50Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+		   1760, 1980, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 69 - 1280x720@60Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+		   1430, 1650, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 70 - 1280x720@100Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
+		   1760, 1980, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 71 - 1280x720@120Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
+		   1430, 1650, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 72 - 1920x1080@24Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+		   2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 73 - 1920x1080@25Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+		   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 74 - 1920x1080@30Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+		   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 75 - 1920x1080@50Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+		   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 76 - 1920x1080@60Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+		   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 77 - 1920x1080@100Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
+		   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 78 - 1920x1080@120Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
+		   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 79 - 1680x720@24Hz */
+	{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 3040,
+		   3080, 3300, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 80 - 1680x720@25Hz */
+	{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2908,
+		   2948, 3168, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 81 - 1680x720@30Hz */
+	{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2380,
+		   2420, 2640, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 82 - 1680x720@50Hz */
+	{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 82500, 1680, 1940,
+		   1980, 2200, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 83 - 1680x720@60Hz */
+	{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 99000, 1680, 1940,
+		   1980, 2200, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 84 - 1680x720@100Hz */
+	{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 165000, 1680, 1740,
+		   1780, 2000, 0, 720, 725, 730, 825, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 85 - 1680x720@120Hz */
+	{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 198000, 1680, 1740,
+		   1780, 2000, 0, 720, 725, 730, 825, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 86 - 2560x1080@24Hz */
+	{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 99000, 2560, 3558,
+		   3602, 3750, 0, 1080, 1084, 1089, 1100, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 87 - 2560x1080@25Hz */
+	{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 90000, 2560, 3008,
+		   3052, 3200, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 88 - 2560x1080@30Hz */
+	{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 118800, 2560, 3328,
+		   3372, 3520, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 89 - 2560x1080@50Hz */
+	{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 185625, 2560, 3108,
+		   3152, 3300, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 90 - 2560x1080@60Hz */
+	{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 198000, 2560, 2808,
+		   2852, 3000, 0, 1080, 1084, 1089, 1100, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 91 - 2560x1080@100Hz */
+	{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 371250, 2560, 2778,
+		   2822, 2970, 0, 1080, 1084, 1089, 1250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 92 - 2560x1080@120Hz */
+	{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 495000, 2560, 3108,
+		   3152, 3300, 0, 1080, 1084, 1089, 1250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 93 - 3840x2160p@24Hz 16:9 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
+		   5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+	/* 94 - 3840x2160p@25Hz 16:9 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
+		   4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+	/* 95 - 3840x2160p@30Hz 16:9 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
+		   4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+	/* 96 - 3840x2160p@50Hz 16:9 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
+		   4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+	/* 97 - 3840x2160p@60Hz 16:9 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
+		   4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+	/* 98 - 4096x2160p@24Hz 256:135 */
+	{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5116,
+		   5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+	/* 99 - 4096x2160p@25Hz 256:135 */
+	{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5064,
+		   5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+	/* 100 - 4096x2160p@30Hz 256:135 */
+	{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 4184,
+		   4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+	/* 101 - 4096x2160p@50Hz 256:135 */
+	{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5064,
+		   5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+	/* 102 - 4096x2160p@60Hz 256:135 */
+	{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 4184,
+		   4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+	/* 103 - 3840x2160p@24Hz 64:27 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
+		   5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 104 - 3840x2160p@25Hz 64:27 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
+		   4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 105 - 3840x2160p@30Hz 64:27 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
+		   4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 106 - 3840x2160p@50Hz 64:27 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
+		   4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 107 - 3840x2160p@60Hz 64:27 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
+		   4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 };
 
 /*
@@ -1275,6 +1498,10 @@
  * level, drivers must make all reasonable efforts to expose it as an I2C
  * adapter and use drm_get_edid() instead of abusing this function.
  *
+ * The EDID may be overridden using debugfs override_edid or firmare EDID
+ * (drm_load_edid_firmware() and drm.edid_firmware parameter), in this priority
+ * order. Having either of them bypasses actual EDID reads.
+ *
  * Return: Pointer to valid EDID or NULL if we couldn't find any.
  */
 struct edid *drm_do_get_edid(struct drm_connector *connector,
@@ -1285,6 +1512,19 @@
 	int i, j = 0, valid_extensions = 0;
 	u8 *block, *new;
 	bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
+	struct edid *override = NULL;
+
+	if (connector->override_edid)
+		override = drm_edid_duplicate((const struct edid *)
+					      connector->edid_blob_ptr->data);
+
+#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
+	if (!override)
+		override = drm_load_edid_firmware(connector);
+#endif
+
+	if (!IS_ERR_OR_NULL(override))
+		return override;
 
 	if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
 		return NULL;
@@ -2487,12 +2727,16 @@
 
 	return closure.modes;
 }
-
+#define VIDEO_CAPABILITY_EXTENDED_DATA_BLOCK 0x0
 #define AUDIO_BLOCK	0x01
 #define VIDEO_BLOCK     0x02
 #define VENDOR_BLOCK    0x03
 #define SPEAKER_BLOCK	0x04
+#define HDR_STATIC_METADATA_EXTENDED_DATA_BLOCK 0x06
+#define COLORIMETRY_EXTENDED_DATA_BLOCK 0x05
+#define EXTENDED_TAG  0x07
 #define VIDEO_CAPABILITY_BLOCK	0x07
+#define Y420_VIDEO_DATA_BLOCK	0x0E
 #define EDID_BASIC_AUDIO	(1 << 6)
 #define EDID_CEA_YCRCB444	(1 << 5)
 #define EDID_CEA_YCRCB422	(1 << 4)
@@ -3081,6 +3325,21 @@
 	return hdmi_id == HDMI_IEEE_OUI;
 }
 
+static bool cea_db_is_hdmi_hf_vsdb(const u8 *db)
+{
+	int hdmi_id;
+
+	if (cea_db_tag(db) != VENDOR_BLOCK)
+		return false;
+
+	if (cea_db_payload_len(db) < 7)
+		return false;
+
+	hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
+
+	return hdmi_id == HDMI_IEEE_OUI_HF;
+}
+
 #define for_each_cea_db(cea, i, start, end) \
 	for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
 
@@ -3204,6 +3463,311 @@
 }
 
 static void
+parse_hdmi_hf_vsdb(struct drm_connector *connector, const u8 *db)
+{
+	u8 len = cea_db_payload_len(db);
+
+	if (len < 7)
+		return;
+
+	if (db[4] != 1)
+		return; /* invalid version */
+
+	connector->max_tmds_char = db[5] * 5;
+	connector->scdc_present = db[6] & (1 << 7);
+	connector->rr_capable = db[6] & (1 << 6);
+	connector->flags_3d = db[6] & 0x7;
+	connector->supports_scramble = connector->scdc_present &&
+			(db[6] & (1 << 3));
+
+	DRM_DEBUG_KMS("HDMI v2: max TMDS char %d, "
+			"scdc %s, "
+			"rr %s, "
+			"3D flags 0x%x, "
+			"scramble %s\n",
+			connector->max_tmds_char,
+			connector->scdc_present ? "available" : "not available",
+			connector->rr_capable ? "capable" : "not capable",
+			connector->flags_3d,
+			connector->supports_scramble ?
+				"supported" : "not supported");
+}
+
+static void
+drm_hdmi_extract_vsdbs_info(struct drm_connector *connector, struct edid *edid)
+{
+	const u8 *cea = drm_find_cea_extension(edid);
+	const u8 *db = NULL;
+
+	if (cea && cea_revision(cea) >= 3) {
+		int i, start, end;
+
+		if (cea_db_offsets(cea, &start, &end))
+			return;
+
+		for_each_cea_db(cea, i, start, end) {
+			db = &cea[i];
+
+			if (cea_db_tag(db) == VENDOR_BLOCK) {
+				/* HDMI Vendor-Specific Data Block */
+				if (cea_db_is_hdmi_vsdb(db))
+					parse_hdmi_vsdb(connector, db);
+				/* HDMI Forum Vendor-Specific Data Block */
+				else if (cea_db_is_hdmi_hf_vsdb(db))
+					parse_hdmi_hf_vsdb(connector, db);
+			}
+		}
+	}
+}
+
+/*
+ * drm_extract_vcdb_info - Parse the HDMI Video Capability Data Block
+ * @connector: connector corresponding to the HDMI sink
+ * @db: start of the CEA vendor specific block
+ *
+ * Parses the HDMI VCDB to extract sink info for @connector.
+ */
+static void
+drm_extract_vcdb_info(struct drm_connector *connector, const u8 *db)
+{
+	/*
+	 * Check if the sink specifies underscan
+	 * support for:
+	 * BIT 5: preferred video format
+	 * BIT 3: IT video format
+	 * BIT 1: CE video format
+	 */
+
+	connector->pt_scan_info =
+		(db[2] & (BIT(4) | BIT(5))) >> 4;
+	connector->it_scan_info =
+		(db[2] & (BIT(3) | BIT(2))) >> 2;
+	connector->ce_scan_info =
+		db[2] & (BIT(1) | BIT(0));
+	connector->rgb_qs =
+		db[2] & BIT(6);
+	connector->yuv_qs =
+		db[2] & BIT(7);
+
+	DRM_DEBUG_KMS("Scan Info (pt|it|ce): (%d|%d|%d)",
+			  (int) connector->pt_scan_info,
+			  (int) connector->it_scan_info,
+			  (int) connector->ce_scan_info);
+	DRM_DEBUG_KMS("rgb_quant_range_select %d", connector->rgb_qs);
+	DRM_DEBUG_KMS("ycc_quant_range_select %d", connector->yuv_qs);
+}
+
+static bool drm_edid_is_luminance_value_present(
+u32 block_length, enum luminance_value value)
+{
+	return block_length > NO_LUMINANCE_DATA && value <= block_length;
+}
+
+/*
+ * drm_extract_hdr_db - Parse the HDMI HDR extended block
+ * @connector: connector corresponding to the HDMI sink
+ * @db: start of the HDMI HDR extended block
+ *
+ * Parses the HDMI HDR extended block to extract sink info for @connector.
+ */
+static void
+drm_extract_hdr_db(struct drm_connector *connector, const u8 *db)
+{
+
+	u8 len = 0;
+
+	if (!db) {
+		DRM_ERROR("invalid db\n");
+		return;
+	}
+
+	len = db[0] & 0x1f;
+	/* Byte 3: Electro-Optical Transfer Functions */
+	connector->hdr_eotf = db[2] & 0x3F;
+
+	/* Byte 4: Static Metadata Descriptor Type 1 */
+	connector->hdr_metadata_type_one = (db[3] & BIT(0));
+
+	/* Byte 5: Desired Content Maximum Luminance */
+	if (drm_edid_is_luminance_value_present(len, MAXIMUM_LUMINANCE))
+		connector->hdr_max_luminance =
+			db[MAXIMUM_LUMINANCE];
+
+	/* Byte 6: Desired Content Max Frame-average Luminance */
+	if (drm_edid_is_luminance_value_present(len, FRAME_AVERAGE_LUMINANCE))
+		connector->hdr_avg_luminance =
+			db[FRAME_AVERAGE_LUMINANCE];
+
+	/* Byte 7: Desired Content Min Luminance */
+	if (drm_edid_is_luminance_value_present(len, MINIMUM_LUMINANCE))
+		connector->hdr_min_luminance =
+			db[MINIMUM_LUMINANCE];
+
+	connector->hdr_supported = true;
+
+	DRM_DEBUG_KMS("HDR electro-optical %d\n", connector->hdr_eotf);
+	DRM_DEBUG_KMS("metadata desc 1 %d\n", connector->hdr_metadata_type_one);
+	DRM_DEBUG_KMS("max luminance %d\n", connector->hdr_max_luminance);
+	DRM_DEBUG_KMS("avg luminance %d\n", connector->hdr_avg_luminance);
+	DRM_DEBUG_KMS("min luminance %d\n", connector->hdr_min_luminance);
+}
+
+/*
+ * drm_extract_colorimetry_db - Parse the HDMI colorimetry extended block
+ * @connector: connector corresponding to the HDMI sink
+ * @db: start of the HDMI colorimetry extended block
+ *
+ * Parses the HDMI colorimetry block to extract sink info for @connector.
+ */
+static void
+drm_extract_clrmetry_db(struct drm_connector *connector, const u8 *db)
+{
+
+	if (!db) {
+		DRM_ERROR("invalid db\n");
+		return;
+	}
+
+	/* Bit 0: xvYCC_601 */
+	if (db[2] & BIT(0))
+		connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_xvYCC_601;
+	/* Bit 0: xvYCC_709 */
+	if (db[2] & BIT(1))
+		connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_xvYCC_709;
+	/* Bit 0: sYCC_601 */
+	if (db[2] & BIT(2))
+		connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_sYCC_601;
+	/* Bit 0: ADBYCC_601 */
+	if (db[2] & BIT(3))
+		connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_ADBYCC_601;
+	/* Bit 0: ADB_RGB */
+	if (db[2] & BIT(4))
+		connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_ADB_RGB;
+	/* Bit 0: BT2020_CYCC */
+	if (db[2] & BIT(5))
+		connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_BT2020_CYCC;
+	/* Bit 0: BT2020_YCC */
+	if (db[2] & BIT(6))
+		connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_BT2020_YCC;
+	/* Bit 0: BT2020_RGB */
+	if (db[2] & BIT(7))
+		connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_BT2020_RGB;
+
+	DRM_DEBUG_KMS("colorimetry fmt 0x%x\n", connector->color_enc_fmt);
+}
+
+/*
+ * drm_hdmi_extract_extended_blk_info - Parse the HDMI extended tag blocks
+ * @connector: connector corresponding to the HDMI sink
+ * @edid: handle to the EDID structure
+ * Parses the all extended tag blocks extract sink info for @connector.
+ */
+static void
+drm_hdmi_extract_extended_blk_info(struct drm_connector *connector,
+struct edid *edid)
+{
+	const u8 *cea = drm_find_cea_extension(edid);
+	const u8 *db = NULL;
+
+	if (cea && cea_revision(cea) >= 3) {
+		int i, start, end;
+
+		if (cea_db_offsets(cea, &start, &end))
+			return;
+
+		for_each_cea_db(cea, i, start, end) {
+			db = &cea[i];
+
+			if (cea_db_tag(db) == EXTENDED_TAG) {
+				DRM_DEBUG_KMS("found extended tag block = %d\n",
+				db[1]);
+				switch (db[1]) {
+				case VIDEO_CAPABILITY_EXTENDED_DATA_BLOCK:
+					drm_extract_vcdb_info(connector, db);
+					break;
+				case HDR_STATIC_METADATA_EXTENDED_DATA_BLOCK:
+					drm_extract_hdr_db(connector, db);
+					break;
+				case COLORIMETRY_EXTENDED_DATA_BLOCK:
+					drm_extract_clrmetry_db(connector, db);
+					break;
+				default:
+					break;
+				}
+			}
+		}
+	}
+}
+
+static u8 *
+drm_edid_find_extended_tag_block(struct edid *edid, int blk_id)
+{
+	u8 *db = NULL;
+	u8 *cea = NULL;
+
+	if (!edid) {
+		pr_err("%s: invalid input\n", __func__);
+		return NULL;
+	}
+
+	cea = drm_find_cea_extension(edid);
+
+	if (cea && cea_revision(cea) >= 3) {
+		int i, start, end;
+
+		if (cea_db_offsets(cea, &start, &end))
+			return NULL;
+
+		for_each_cea_db(cea, i, start, end) {
+			db = &cea[i];
+			if ((cea_db_tag(db) == EXTENDED_TAG) &&
+				(db[1] == blk_id))
+				return db;
+		}
+	}
+	return NULL;
+}
+
+/*
+ * add_YCbCr420VDB_modes - add the modes found in Ycbcr420 VDB block
+ * @connector: connector corresponding to the HDMI sink
+ * @edid: handle to the EDID structure
+ * Parses the YCbCr420 VDB block and adds the modes to @connector.
+ */
+static int
+add_YCbCr420VDB_modes(struct drm_connector *connector, struct edid *edid)
+{
+
+	const u8 *db = NULL;
+	u32 i = 0;
+	u32 modes = 0;
+	u32 video_format = 0;
+	u8 len = 0;
+
+	/*Find the YCbCr420 VDB*/
+	db = drm_edid_find_extended_tag_block(edid, Y420_VIDEO_DATA_BLOCK);
+	/* Offset to byte 3 */
+	if (db) {
+		len = db[0] & 0x1F;
+		db += 2;
+		for (i = 0; i < len - 1; i++) {
+			struct drm_display_mode *mode;
+
+			video_format = *(db + i) & 0x7F;
+			mode = drm_display_mode_from_vic_index(connector,
+					db, len-1, i);
+			if (mode) {
+				DRM_DEBUG_KMS("Adding mode for vic = %d\n",
+				video_format);
+				drm_mode_probed_add(connector, mode);
+				modes++;
+			}
+		}
+	}
+	return modes;
+}
+
+static void
 monitor_name(struct detailed_timing *t, void *data)
 {
 	if (t->data.other_data.type == EDID_DETAIL_MONITOR_NAME)
@@ -3282,6 +3846,9 @@
 				/* HDMI Vendor-Specific Data Block */
 				if (cea_db_is_hdmi_vsdb(db))
 					parse_hdmi_vsdb(connector, db);
+				/* HDMI Forum Vendor-Specific Data Block */
+				else if (cea_db_is_hdmi_hf_vsdb(db))
+					parse_hdmi_hf_vsdb(connector, db);
 				break;
 			default:
 				break;
@@ -3738,6 +4305,10 @@
 			info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
 	}
 
+	/* Extract audio and video latency fields for the sink */
+	drm_hdmi_extract_vsdbs_info(connector, edid);
+	/* Extract info from extended tag blocks */
+	drm_hdmi_extract_extended_blk_info(connector, edid);
 	/* HDMI deep color modes supported? Assign to info, if so */
 	drm_assign_hdmi_deep_color_info(edid, info, connector);
 
@@ -3780,6 +4351,148 @@
 		info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
 }
 
+static int validate_displayid(u8 *displayid, int length, int idx)
+{
+	int i;
+	u8 csum = 0;
+	struct displayid_hdr *base;
+
+	base = (struct displayid_hdr *)&displayid[idx];
+
+	DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
+		      base->rev, base->bytes, base->prod_id, base->ext_count);
+
+	if (base->bytes + 5 > length - idx)
+		return -EINVAL;
+	for (i = idx; i <= base->bytes + 5; i++)
+		csum += displayid[i];
+
+	if (csum) {
+		DRM_ERROR("DisplayID checksum invalid, remainder is %d\n",
+				  csum);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static struct drm_display_mode *
+drm_mode_displayid_detailed(struct drm_device *dev,
+struct displayid_detailed_timings_1 *timings)
+{
+	struct drm_display_mode *mode;
+	unsigned pixel_clock = (timings->pixel_clock[0] |
+				(timings->pixel_clock[1] << 8) |
+				(timings->pixel_clock[2] << 16));
+	unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1;
+	unsigned hblank =
+		(timings->hblank[0] |
+		timings->hblank[1] << 8) + 1;
+	unsigned hsync = (timings->hsync[0] |
+			(timings->hsync[1] & 0x7f) << 8) + 1;
+	unsigned hsync_width = (timings->hsw[0] | timings->hsw[1] << 8) + 1;
+	unsigned vactive = (timings->vactive[0] | timings->vactive[1] << 8) + 1;
+	unsigned vblank =
+		(timings->vblank[0] |
+		 timings->vblank[1] << 8) + 1;
+	unsigned vsync =
+		(timings->vsync[0] |
+		 (timings->vsync[1] & 0x7f) << 8) + 1;
+	unsigned vsync_width = (timings->vsw[0] | timings->vsw[1] << 8) + 1;
+	bool hsync_positive = (timings->hsync[1] >> 7) & 0x1;
+	bool vsync_positive = (timings->vsync[1] >> 7) & 0x1;
+
+	mode = drm_mode_create(dev);
+	if (!mode)
+		return NULL;
+
+	mode->clock = pixel_clock * 10;
+	mode->hdisplay = hactive;
+	mode->hsync_start = mode->hdisplay + hsync;
+	mode->hsync_end = mode->hsync_start + hsync_width;
+	mode->htotal = mode->hdisplay + hblank;
+
+	mode->vdisplay = vactive;
+	mode->vsync_start = mode->vdisplay + vsync;
+	mode->vsync_end = mode->vsync_start + vsync_width;
+	mode->vtotal = mode->vdisplay + vblank;
+
+	mode->flags = 0;
+	mode->flags |= hsync_positive ?
+				DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
+	mode->flags |= vsync_positive ?
+				DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+	mode->type = DRM_MODE_TYPE_DRIVER;
+
+	if (timings->flags & 0x80)
+		mode->type |= DRM_MODE_TYPE_PREFERRED;
+	mode->vrefresh = drm_mode_vrefresh(mode);
+	drm_mode_set_name(mode);
+
+	return mode;
+}
+
+static int add_displayid_detailed_1_modes(struct drm_connector *connector,
+			struct displayid_block *block)
+{
+	struct displayid_detailed_timing_block *det =
+		(struct displayid_detailed_timing_block *)block;
+	int i;
+	int num_timings;
+	struct drm_display_mode *newmode;
+	int num_modes = 0;
+	/* blocks must be multiple of 20 bytes length */
+	if (block->num_bytes % 20)
+		return 0;
+
+	num_timings = block->num_bytes / 20;
+	for (i = 0; i < num_timings; i++) {
+		struct displayid_detailed_timings_1 *timings = &det->timings[i];
+
+		newmode = drm_mode_displayid_detailed(connector->dev, timings);
+		if (!newmode)
+			continue;
+
+		drm_mode_probed_add(connector, newmode);
+		num_modes++;
+	}
+	return num_modes;
+}
+
+static int add_displayid_detailed_modes(struct drm_connector *connector,
+					struct edid *edid)
+{
+	u8 *displayid;
+	int ret;
+	int idx = 1;
+	int length = EDID_LENGTH;
+	struct displayid_block *block;
+	int num_modes = 0;
+
+	displayid = drm_find_displayid_extension(edid);
+	if (!displayid)
+		return 0;
+
+	ret = validate_displayid(displayid, length, idx);
+	if (ret)
+		return 0;
+
+	idx += sizeof(struct displayid_hdr);
+	while (block = (struct displayid_block *)&displayid[idx],
+	       idx + sizeof(struct displayid_block) <= length &&
+	       idx + sizeof(struct displayid_block) +
+		   block->num_bytes <= length &&
+	       block->num_bytes > 0) {
+		idx += block->num_bytes + sizeof(struct displayid_block);
+		switch (block->tag) {
+		case DATA_BLOCK_TYPE_1_DETAILED_TIMING:
+			num_modes += add_displayid_detailed_1_modes(connector,
+						block);
+			break;
+		}
+	}
+	return num_modes;
+}
+
 /**
  * drm_add_edid_modes - add modes from EDID data, if available
  * @connector: connector we're probing
@@ -3825,6 +4538,8 @@
 	num_modes += add_established_modes(connector, edid);
 	num_modes += add_cea_modes(connector, edid);
 	num_modes += add_alternate_cea_modes(connector, edid);
+	num_modes += add_displayid_detailed_modes(connector, edid);
+	num_modes += add_YCbCr420VDB_modes(connector, edid);
 	if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
 		num_modes += add_inferred_modes(connector, edid);
 
@@ -4037,44 +4752,11 @@
 }
 EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode);
 
-static int drm_parse_display_id(struct drm_connector *connector,
-				u8 *displayid, int length,
-				bool is_edid_extension)
+static int drm_parse_tiled_block(struct drm_connector *connector,
+				 struct displayid_block *block)
 {
-	/* if this is an EDID extension the first byte will be 0x70 */
-	int idx = 0;
-	struct displayid_hdr *base;
-	struct displayid_block *block;
-	u8 csum = 0;
-	int i;
-
-	if (is_edid_extension)
-		idx = 1;
-
-	base = (struct displayid_hdr *)&displayid[idx];
-
-	DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
-		      base->rev, base->bytes, base->prod_id, base->ext_count);
-
-	if (base->bytes + 5 > length - idx)
-		return -EINVAL;
-
-	for (i = idx; i <= base->bytes + 5; i++) {
-		csum += displayid[i];
-	}
-	if (csum) {
-		DRM_ERROR("DisplayID checksum invalid, remainder is %d\n", csum);
-		return -EINVAL;
-	}
-
-	block = (struct displayid_block *)&displayid[idx + 4];
-	DRM_DEBUG_KMS("block id %d, rev %d, len %d\n",
-		      block->tag, block->rev, block->num_bytes);
-
-	switch (block->tag) {
-	case DATA_BLOCK_TILED_DISPLAY: {
-		struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block;
-
+	struct displayid_tiled_block *tile =
+		(struct displayid_tiled_block *)block;
 		u16 w, h;
 		u8 tile_v_loc, tile_h_loc;
 		u8 num_v_tile, num_h_tile;
@@ -4103,31 +4785,73 @@
 		DRM_DEBUG_KMS("tile_size %d x %d\n", w + 1, h + 1);
 		DRM_DEBUG_KMS("topo num tiles %dx%d, location %dx%d\n",
 		       num_h_tile + 1, num_v_tile + 1, tile_h_loc, tile_v_loc);
-		DRM_DEBUG_KMS("vend %c%c%c\n", tile->topology_id[0], tile->topology_id[1], tile->topology_id[2]);
+	DRM_DEBUG_KMS("vend %c%c%c\n", tile->topology_id[0],
+				  tile->topology_id[1], tile->topology_id[2]);
 
 		tg = drm_mode_get_tile_group(connector->dev, tile->topology_id);
-		if (!tg) {
-			tg = drm_mode_create_tile_group(connector->dev, tile->topology_id);
-		}
+	if (!tg)
+		tg = drm_mode_create_tile_group(connector->dev,
+				tile->topology_id);
+
 		if (!tg)
 			return -ENOMEM;
 
 		if (connector->tile_group != tg) {
 			/* if we haven't got a pointer,
-			   take the reference, drop ref to old tile group */
-			if (connector->tile_group) {
-				drm_mode_put_tile_group(connector->dev, connector->tile_group);
-			}
+		 * take the reference, drop ref to old tile group
+		 */
+		if (connector->tile_group)
+			drm_mode_put_tile_group(connector->dev,
+			connector->tile_group);
+
 			connector->tile_group = tg;
 		} else
 			/* if same tile group, then release the ref we just took. */
 			drm_mode_put_tile_group(connector->dev, tg);
+	return 0;
 	}
+
+static int drm_parse_display_id(struct drm_connector *connector,
+				u8 *displayid, int length,
+				bool is_edid_extension)
+{
+	/* if this is an EDID extension the first byte will be 0x70 */
+	int idx = 0;
+	struct displayid_block *block;
+	int ret;
+
+	if (is_edid_extension)
+		idx = 1;
+
+	ret = validate_displayid(displayid, length, idx);
+	if (ret)
+		return ret;
+
+	idx += sizeof(struct displayid_hdr);
+	while (block = (struct displayid_block *)&displayid[idx],
+	       idx + sizeof(struct displayid_block) <= length &&
+	       idx + sizeof(struct displayid_block) +
+		   block->num_bytes <= length &&
+	       block->num_bytes > 0) {
+		idx += block->num_bytes + sizeof(struct displayid_block);
+		DRM_DEBUG_KMS("block id 0x%x, rev %d, len %d\n",
+			      block->tag, block->rev, block->num_bytes);
+
+		switch (block->tag) {
+		case DATA_BLOCK_TILED_DISPLAY:
+			ret = drm_parse_tiled_block(connector, block);
+			if (ret)
+				return ret;
+			break;
+		case DATA_BLOCK_TYPE_1_DETAILED_TIMING:
+			/* handled in mode gathering code. */
 		break;
 	default:
-		printk("unknown displayid tag %d\n", block->tag);
+			DRM_DEBUG_KMS("found DisplayID tag 0x%x, unhandled\n",
+						  block->tag);
 		break;
 	}
+	}
 	return 0;
 }
 
diff -ruw linux-4.4.115/drivers/gpu/drm/drm_ioctl.c linux-4.4.115-fbx/drivers/gpu/drm/drm_ioctl.c
--- linux-4.4.115/drivers/gpu/drm/drm_ioctl.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/drm_ioctl.c	2019-10-29 09:26:23.605202806 +0100
@@ -312,6 +312,9 @@
 	case DRM_CAP_ADDFB2_MODIFIERS:
 		req->value = dev->mode_config.allow_fb_modifiers;
 		break;
+	case DRM_CAP_CRTC_IN_VBLANK_EVENT:
+		req->value = 1;
+		break;
 	default:
 		return -EINVAL;
 	}
diff -ruw linux-4.4.115/drivers/gpu/drm/drm_irq.c linux-4.4.115-fbx/drivers/gpu/drm/drm_irq.c
--- linux-4.4.115/drivers/gpu/drm/drm_irq.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/drm_irq.c	2019-10-29 09:26:23.605202806 +0100
@@ -1075,6 +1075,7 @@
 
 	e->pipe = pipe;
 	e->event.sequence = drm_vblank_count(dev, pipe);
+	e->event.crtc_id = 0;
 	list_add_tail(&e->base.link, &dev->vblank_event_list);
 }
 EXPORT_SYMBOL(drm_arm_vblank_event);
@@ -1098,7 +1099,15 @@
 void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
 			       struct drm_pending_vblank_event *e)
 {
-	drm_arm_vblank_event(crtc->dev, drm_crtc_index(crtc), e);
+	struct drm_device *dev = crtc->dev;
+	unsigned int pipe = drm_crtc_index(crtc);
+
+	assert_spin_locked(&dev->event_lock);
+
+	e->pipe = pipe;
+	e->event.sequence = drm_vblank_count(dev, pipe);
+	e->event.crtc_id = crtc->base.id;
+	list_add_tail(&e->base.link, &dev->vblank_event_list);
 }
 EXPORT_SYMBOL(drm_crtc_arm_vblank_event);
 
@@ -1127,6 +1136,7 @@
 		now = get_drm_timestamp();
 	}
 	e->pipe = pipe;
+	e->event.crtc_id = 0;
 	send_vblank_event(dev, e, seq, &now);
 }
 EXPORT_SYMBOL(drm_send_vblank_event);
@@ -1144,7 +1154,20 @@
 void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
 				struct drm_pending_vblank_event *e)
 {
-	drm_send_vblank_event(crtc->dev, drm_crtc_index(crtc), e);
+	struct drm_device *dev = crtc->dev;
+	unsigned int seq, pipe = drm_crtc_index(crtc);
+	struct timeval now;
+
+	if (dev->num_crtcs > 0) {
+		seq = drm_vblank_count_and_time(dev, pipe, &now);
+	} else {
+		seq = 0;
+
+		now = get_drm_timestamp();
+	}
+	e->pipe = pipe;
+	e->event.crtc_id = crtc->base.id;
+	send_vblank_event(dev, e, seq, &now);
 }
 EXPORT_SYMBOL(drm_crtc_send_vblank_event);
 
diff -ruw linux-4.4.115/drivers/gpu/drm/drm_mipi_dsi.c linux-4.4.115-fbx/drivers/gpu/drm/drm_mipi_dsi.c
--- linux-4.4.115/drivers/gpu/drm/drm_mipi_dsi.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/drm_mipi_dsi.c	2019-01-22 16:16:23.375245247 +0100
@@ -335,7 +335,7 @@
 		return -EINVAL;
 
 	memset(packet, 0, sizeof(*packet));
-	packet->header[0] = ((msg->channel & 0x3) << 6) | (msg->type & 0x3f);
+	packet->header[2] = ((msg->channel & 0x3) << 6) | (msg->type & 0x3f);
 
 	/* TODO: compute ECC if hardware support is not available */
 
@@ -347,16 +347,16 @@
 	 * and 2.
 	 */
 	if (mipi_dsi_packet_format_is_long(msg->type)) {
-		packet->header[1] = (msg->tx_len >> 0) & 0xff;
-		packet->header[2] = (msg->tx_len >> 8) & 0xff;
+		packet->header[0] = (msg->tx_len >> 0) & 0xff;
+		packet->header[1] = (msg->tx_len >> 8) & 0xff;
 
 		packet->payload_length = msg->tx_len;
 		packet->payload = msg->tx_buf;
 	} else {
 		const u8 *tx = msg->tx_buf;
 
-		packet->header[1] = (msg->tx_len > 0) ? tx[0] : 0;
-		packet->header[2] = (msg->tx_len > 1) ? tx[1] : 0;
+		packet->header[0] = (msg->tx_len > 0) ? tx[0] : 0;
+		packet->header[1] = (msg->tx_len > 1) ? tx[1] : 0;
 	}
 
 	packet->size = sizeof(packet->header) + packet->payload_length;
diff -ruw linux-4.4.115/drivers/gpu/drm/drm_mm.c linux-4.4.115-fbx/drivers/gpu/drm/drm_mm.c
--- linux-4.4.115/drivers/gpu/drm/drm_mm.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/drm_mm.c	2019-01-22 16:16:23.375245247 +0100
@@ -46,6 +46,8 @@
 #include <linux/slab.h>
 #include <linux/seq_file.h>
 #include <linux/export.h>
+#include <linux/interval_tree_generic.h>
+#include <linux/rbtree.h>
 
 /**
  * DOC: Overview
@@ -73,7 +75,8 @@
  * allocations and avoiding too much fragmentation. This means free space
  * searches are O(num_holes). Given that all the fancy features drm_mm supports
  * something better would be fairly complex and since gfx thrashing is a fairly
- * steep cliff not a real concern. Removing a node again is O(1).
+ * steep cliff not a real concern. Removing a node again is O(1). With the
+ * rbtree to track free holes, free hole search becomes O(log(num_holes)).
  *
  * drm_mm supports a few features: Alignment and range restrictions can be
  * supplied. Further more every &drm_mm_node has a color value (which is just an
@@ -103,6 +106,98 @@
 						u64 end,
 						enum drm_mm_search_flags flags);
 
+#define START(node) ((node)->start)
+#define LAST(node)  ((node)->start + (node)->size - 1)
+
+INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
+		     u64, __subtree_last,
+		     START, LAST, static inline, drm_mm_interval_tree)
+
+struct drm_mm_node *
+drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last)
+{
+	return drm_mm_interval_tree_iter_first(&mm->interval_tree,
+					       start, last);
+}
+EXPORT_SYMBOL(drm_mm_interval_first);
+
+struct drm_mm_node *
+drm_mm_interval_next(struct drm_mm_node *node, u64 start, u64 last)
+{
+	return drm_mm_interval_tree_iter_next(node, start, last);
+}
+EXPORT_SYMBOL(drm_mm_interval_next);
+
+static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
+					  struct drm_mm_node *node)
+{
+	struct drm_mm *mm = hole_node->mm;
+	struct rb_node **link, *rb;
+	struct drm_mm_node *parent;
+
+	node->__subtree_last = LAST(node);
+
+	if (hole_node->allocated) {
+		rb = &hole_node->rb;
+		while (rb) {
+			parent = rb_entry(rb, struct drm_mm_node, rb);
+			if (parent->__subtree_last >= node->__subtree_last)
+				break;
+
+			parent->__subtree_last = node->__subtree_last;
+			rb = rb_parent(rb);
+		}
+
+		rb = &hole_node->rb;
+		link = &hole_node->rb.rb_right;
+	} else {
+		rb = NULL;
+		link = &mm->interval_tree.rb_node;
+	}
+
+	while (*link) {
+		rb = *link;
+		parent = rb_entry(rb, struct drm_mm_node, rb);
+		if (parent->__subtree_last < node->__subtree_last)
+			parent->__subtree_last = node->__subtree_last;
+		if (node->start < parent->start)
+			link = &parent->rb.rb_left;
+		else
+			link = &parent->rb.rb_right;
+	}
+
+	rb_link_node(&node->rb, rb, link);
+	rb_insert_augmented(&node->rb,
+			    &mm->interval_tree,
+			    &drm_mm_interval_tree_augment);
+}
+
+static void
+rb_insert_hole_node(struct drm_mm_node *hole_node, struct drm_mm *mm)
+{
+	struct rb_node **new = &(mm->holes_tree.rb_node);
+	struct rb_node *parent = NULL;
+	struct drm_mm_node *cur;
+
+	while (*new) {
+		parent = *new;
+		cur = rb_entry(parent, struct drm_mm_node, hole_node);
+
+		if (__drm_mm_hole_node_start(hole_node)
+				< __drm_mm_hole_node_start(cur))
+			new = &parent->rb_left;
+		else
+			new = &parent->rb_right;
+	}
+	rb_link_node(&hole_node->hole_node, parent, new);
+	rb_insert_color(&hole_node->hole_node, &mm->holes_tree);
+}
+
+static void rb_erase_hole_node(struct drm_mm_node *hole_node, struct drm_mm *mm)
+{
+	rb_erase(&hole_node->hole_node, &mm->holes_tree);
+}
+
 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
 				 struct drm_mm_node *node,
 				 u64 size, unsigned alignment,
@@ -142,6 +237,7 @@
 	if (adj_start == hole_start) {
 		hole_node->hole_follows = 0;
 		list_del(&hole_node->hole_stack);
+		rb_erase_hole_node(hole_node, mm);
 	}
 
 	node->start = adj_start;
@@ -150,14 +246,16 @@
 	node->color = color;
 	node->allocated = 1;
 
-	INIT_LIST_HEAD(&node->hole_stack);
 	list_add(&node->node_list, &hole_node->node_list);
 
+	drm_mm_interval_tree_add_node(hole_node, node);
+
 	BUG_ON(node->start + node->size > adj_end);
 
 	node->hole_follows = 0;
 	if (__drm_mm_hole_node_start(node) < hole_end) {
 		list_add(&node->hole_stack, &mm->hole_stack);
+		rb_insert_hole_node(node, mm);
 		node->hole_follows = 1;
 	}
 }
@@ -178,40 +276,55 @@
  */
 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 {
-	struct drm_mm_node *hole;
 	u64 end = node->start + node->size;
-	u64 hole_start;
-	u64 hole_end;
+	struct drm_mm_node *hole;
+	u64 hole_start, hole_end;
 
-	BUG_ON(node == NULL);
+	if (WARN_ON(node->size == 0))
+		return -EINVAL;
 
 	/* Find the relevant hole to add our node to */
-	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
+	hole = drm_mm_interval_tree_iter_first(&mm->interval_tree,
+					       node->start, ~(u64)0);
+	if (hole) {
+		if (hole->start < end)
+			return -ENOSPC;
+	} else {
+		hole = list_entry(&mm->head_node.node_list,
+				  typeof(*hole), node_list);
+	}
+
+	hole = list_last_entry(&hole->node_list, typeof(*hole), node_list);
+	if (!hole->hole_follows)
+		return -ENOSPC;
+
+	hole_start = __drm_mm_hole_node_start(hole);
+	hole_end = __drm_mm_hole_node_end(hole);
 		if (hole_start > node->start || hole_end < end)
-			continue;
+		return -ENOSPC;
 
 		node->mm = mm;
 		node->allocated = 1;
 
-		INIT_LIST_HEAD(&node->hole_stack);
 		list_add(&node->node_list, &hole->node_list);
 
+	drm_mm_interval_tree_add_node(hole, node);
+
 		if (node->start == hole_start) {
 			hole->hole_follows = 0;
-			list_del_init(&hole->hole_stack);
+		list_del(&hole->hole_stack);
+		rb_erase_hole_node(hole, mm);
 		}
 
 		node->hole_follows = 0;
 		if (end != hole_end) {
 			list_add(&node->hole_stack, &mm->hole_stack);
+		rb_insert_hole_node(node, mm);
 			node->hole_follows = 1;
 		}
 
 		return 0;
 	}
-
-	return -ENOSPC;
-}
 EXPORT_SYMBOL(drm_mm_reserve_node);
 
 /**
@@ -237,6 +350,9 @@
 {
 	struct drm_mm_node *hole_node;
 
+	if (WARN_ON(size == 0))
+		return -EINVAL;
+
 	hole_node = drm_mm_search_free_generic(mm, size, alignment,
 					       color, sflags);
 	if (!hole_node)
@@ -287,6 +403,7 @@
 	if (adj_start == hole_start) {
 		hole_node->hole_follows = 0;
 		list_del(&hole_node->hole_stack);
+		rb_erase_hole_node(hole_node, mm);
 	}
 
 	node->start = adj_start;
@@ -295,9 +412,10 @@
 	node->color = color;
 	node->allocated = 1;
 
-	INIT_LIST_HEAD(&node->hole_stack);
 	list_add(&node->node_list, &hole_node->node_list);
 
+	drm_mm_interval_tree_add_node(hole_node, node);
+
 	BUG_ON(node->start < start);
 	BUG_ON(node->start < adj_start);
 	BUG_ON(node->start + node->size > adj_end);
@@ -306,6 +424,7 @@
 	node->hole_follows = 0;
 	if (__drm_mm_hole_node_start(node) < hole_end) {
 		list_add(&node->hole_stack, &mm->hole_stack);
+		rb_insert_hole_node(node, mm);
 		node->hole_follows = 1;
 	}
 }
@@ -336,6 +455,9 @@
 {
 	struct drm_mm_node *hole_node;
 
+	if (WARN_ON(size == 0))
+		return -EINVAL;
+
 	hole_node = drm_mm_search_free_in_range_generic(mm,
 							size, alignment, color,
 							start, end, sflags);
@@ -375,6 +497,7 @@
 		BUG_ON(__drm_mm_hole_node_start(node) ==
 		       __drm_mm_hole_node_end(node));
 		list_del(&node->hole_stack);
+		rb_erase_hole_node(node, mm);
 	} else
 		BUG_ON(__drm_mm_hole_node_start(node) !=
 		       __drm_mm_hole_node_end(node));
@@ -383,9 +506,11 @@
 	if (!prev_node->hole_follows) {
 		prev_node->hole_follows = 1;
 		list_add(&prev_node->hole_stack, &mm->hole_stack);
+		rb_insert_hole_node(prev_node, mm);
 	} else
 		list_move(&prev_node->hole_stack, &mm->hole_stack);
 
+	drm_mm_interval_tree_remove(node, &mm->interval_tree);
 	list_del(&node->node_list);
 	node->allocated = 0;
 }
@@ -408,6 +533,53 @@
 	return end >= start + size;
 }
 
+static struct drm_mm_node *get_first_hole(const struct drm_mm *mm,
+		enum drm_mm_search_flags flags)
+{
+	if (flags & DRM_MM_SEARCH_BOTTOM_UP) {
+		struct rb_node *node = rb_first(&mm->holes_tree);
+
+		if (!node)
+			return NULL;
+
+		return rb_entry(node, struct drm_mm_node, hole_node);
+	} else if (flags & DRM_MM_SEARCH_BELOW) {
+		return list_entry((mm)->hole_stack.prev,
+				struct drm_mm_node, hole_stack);
+	} else {
+		return list_entry((mm)->hole_stack.next,
+				struct drm_mm_node, hole_stack);
+	}
+}
+
+static struct drm_mm_node *get_next_hole(struct drm_mm_node *entry,
+		enum drm_mm_search_flags flags)
+{
+	if (flags & DRM_MM_SEARCH_BOTTOM_UP) {
+		struct rb_node *node = rb_next(&entry->hole_node);
+
+		if (!node)
+			return NULL;
+
+		return rb_entry(node, struct drm_mm_node, hole_node);
+	} else if (flags & DRM_MM_SEARCH_BELOW) {
+		return list_entry(entry->hole_stack.prev,
+				struct drm_mm_node, hole_stack);
+	} else {
+		return list_entry(entry->hole_stack.next,
+				struct drm_mm_node, hole_stack);
+	}
+}
+
+static bool drm_mm_hole_traversal_condition(const struct drm_mm *mm,
+		struct drm_mm_node *entry, enum drm_mm_search_flags flags)
+{
+	if (flags & DRM_MM_SEARCH_BOTTOM_UP)
+		return entry ? 1 : 0;
+	else
+		return (&entry->hole_stack != &(mm)->hole_stack) ? 1 : 0;
+}
+
 static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
 						      u64 size,
 						      unsigned alignment,
@@ -425,9 +597,14 @@
 	best = NULL;
 	best_size = ~0UL;
 
-	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
-			       flags & DRM_MM_SEARCH_BELOW) {
-		u64 hole_size = adj_end - adj_start;
+	for (entry = get_first_hole(mm, flags);
+			drm_mm_hole_traversal_condition(mm, entry, flags);
+			entry = get_next_hole(entry, flags)) {
+		u64 hole_size;
+
+		adj_start = drm_mm_hole_node_start(entry);
+		adj_end = drm_mm_hole_node_end(entry);
+		hole_size = adj_end - adj_start;
 
 		if (mm->color_adjust) {
 			mm->color_adjust(entry, color, &adj_start, &adj_end);
@@ -469,9 +646,14 @@
 	best = NULL;
 	best_size = ~0UL;
 
-	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
-			       flags & DRM_MM_SEARCH_BELOW) {
-		u64 hole_size = adj_end - adj_start;
+	for (entry = get_first_hole(mm, flags);
+			drm_mm_hole_traversal_condition(mm, entry, flags);
+			entry = get_next_hole(entry, flags)) {
+		u64 hole_size;
+
+		adj_start = drm_mm_hole_node_start(entry);
+		adj_end = drm_mm_hole_node_end(entry);
+		hole_size = adj_end - adj_start;
 
 		if (mm->color_adjust) {
 			mm->color_adjust(entry, color, &adj_start, &adj_end);
@@ -510,14 +692,21 @@
 {
 	list_replace(&old->node_list, &new->node_list);
 	list_replace(&old->hole_stack, &new->hole_stack);
+	rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
 	new->hole_follows = old->hole_follows;
 	new->mm = old->mm;
 	new->start = old->start;
 	new->size = old->size;
 	new->color = old->color;
+	new->__subtree_last = old->__subtree_last;
 
 	old->allocated = 0;
 	new->allocated = 1;
+
+	if (old->hole_follows)
+		rb_replace_node(&old->hole_node, &new->hole_node,
+			&old->mm->holes_tree);
+
 }
 EXPORT_SYMBOL(drm_mm_replace_node);
 
@@ -742,7 +931,6 @@
 
 	/* Clever trick to avoid a special case in the free hole tracking. */
 	INIT_LIST_HEAD(&mm->head_node.node_list);
-	INIT_LIST_HEAD(&mm->head_node.hole_stack);
 	mm->head_node.hole_follows = 1;
 	mm->head_node.scanned_block = 0;
 	mm->head_node.scanned_prev_free = 0;
@@ -752,7 +940,10 @@
 	mm->head_node.size = start - mm->head_node.start;
 	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
 
+	mm->interval_tree = RB_ROOT;
 	mm->color_adjust = NULL;
+	mm->holes_tree = RB_ROOT;
+	rb_insert_hole_node(&mm->head_node, mm);
 }
 EXPORT_SYMBOL(drm_mm_init);
 
diff -ruw linux-4.4.115/drivers/gpu/drm/drm_probe_helper.c linux-4.4.115-fbx/drivers/gpu/drm/drm_probe_helper.c
--- linux-4.4.115/drivers/gpu/drm/drm_probe_helper.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/drm_probe_helper.c	2019-10-29 09:26:23.605202806 +0100
@@ -199,20 +199,7 @@
 		goto prune;
 	}
 
-#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
-	count = drm_load_edid_firmware(connector);
-	if (count == 0)
-#endif
-	{
-		if (connector->override_edid) {
-			struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
-
-			count = drm_add_edid_modes(connector, edid);
-			drm_edid_to_eld(connector, edid);
-		} else
 			count = (*connector_funcs->get_modes)(connector);
-	}
-
 	if (count == 0 && connector->status == connector_status_connected)
 		count = drm_add_modes_noedid(connector, 1024, 768);
 	count += drm_helper_probe_add_cmdline_mode(connector);
diff -ruw linux-4.4.115/drivers/gpu/drm/Kconfig linux-4.4.115-fbx/drivers/gpu/drm/Kconfig
--- linux-4.4.115/drivers/gpu/drm/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/Kconfig	2019-07-19 15:49:45.457091212 +0200
@@ -8,6 +8,7 @@
 	tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
 	depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU && HAS_DMA
 	select HDMI
+	select FB
 	select FB_CMDLINE
 	select I2C
 	select I2C_ALGOBIT
@@ -52,7 +53,7 @@
 	depends on DRM
 	select DRM_KMS_HELPER
 	select DRM_KMS_FB_HELPER
-	default y
+	default n
 	help
 	  Choose this option if you have a need for the legacy fbdev
 	  support. Note that this support also provides the linux console
@@ -62,7 +63,7 @@
 
 config DRM_LOAD_EDID_FIRMWARE
 	bool "Allow to specify an EDID data set instead of probing for it"
-	depends on DRM_KMS_HELPER
+	depends on DRM
 	help
 	  Say Y here, if you want to use EDID data to be loaded from the
 	  /lib/firmware directory or one of the provided built-in
@@ -266,3 +267,5 @@
 source "drivers/gpu/drm/imx/Kconfig"
 
 source "drivers/gpu/drm/vc4/Kconfig"
+
+source "drivers/gpu/drm/msm-hyp/Kconfig"
diff -ruw linux-4.4.115/drivers/gpu/drm/Makefile linux-4.4.115-fbx/drivers/gpu/drm/Makefile
--- linux-4.4.115/drivers/gpu/drm/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/Makefile	2019-07-19 15:49:45.461091250 +0200
@@ -20,12 +20,12 @@
 drm-$(CONFIG_DRM_PANEL) += drm_panel.o
 drm-$(CONFIG_OF) += drm_of.o
 drm-$(CONFIG_AGP) += drm_agpsupport.o
+drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
 
 drm-y += $(drm-m)
 
 drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
 		drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o
-drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
 drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
 drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
 
@@ -68,6 +68,7 @@
 obj-$(CONFIG_DRM_BOCHS) += bochs/
 obj-$(CONFIG_DRM_VIRTIO_GPU) += virtio/
 obj-$(CONFIG_DRM_MSM) += msm/
+obj-$(CONFIG_DRM_MSM_HYP) += msm-hyp/
 obj-$(CONFIG_DRM_TEGRA) += tegra/
 obj-$(CONFIG_DRM_STI) += sti/
 obj-$(CONFIG_DRM_IMX) += imx/
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/adreno/a3xx_gpu.c linux-4.4.115-fbx/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
--- linux-4.4.115/drivers/gpu/drm/msm/adreno/a3xx_gpu.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/adreno/a3xx_gpu.c	2019-01-22 16:16:23.479246189 +0100
@@ -2,7 +2,7 @@
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published by
@@ -40,10 +40,11 @@
 extern bool hang_debug;
 
 static void a3xx_dump(struct msm_gpu *gpu);
+static bool a3xx_idle(struct msm_gpu *gpu);
 
-static void a3xx_me_init(struct msm_gpu *gpu)
+static bool a3xx_me_init(struct msm_gpu *gpu)
 {
-	struct msm_ringbuffer *ring = gpu->rb;
+	struct msm_ringbuffer *ring = gpu->rb[0];
 
 	OUT_PKT3(ring, CP_ME_INIT, 17);
 	OUT_RING(ring, 0x000003f7);
@@ -64,8 +65,8 @@
 	OUT_RING(ring, 0x00000000);
 	OUT_RING(ring, 0x00000000);
 
-	gpu->funcs->flush(gpu);
-	gpu->funcs->idle(gpu);
+	gpu->funcs->flush(gpu, ring);
+	return a3xx_idle(gpu);
 }
 
 static int a3xx_hw_init(struct msm_gpu *gpu)
@@ -294,9 +295,7 @@
 	/* clear ME_HALT to start micro engine */
 	gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
 
-	a3xx_me_init(gpu);
-
-	return 0;
+	return a3xx_me_init(gpu) ? 0 : -EINVAL;
 }
 
 static void a3xx_recover(struct msm_gpu *gpu)
@@ -330,17 +329,22 @@
 	kfree(a3xx_gpu);
 }
 
-static void a3xx_idle(struct msm_gpu *gpu)
+static bool a3xx_idle(struct msm_gpu *gpu)
 {
 	/* wait for ringbuffer to drain: */
-	adreno_idle(gpu);
+	if (!adreno_idle(gpu, gpu->rb[0]))
+		return false;
 
 	/* then wait for GPU to finish: */
 	if (spin_until(!(gpu_read(gpu, REG_A3XX_RBBM_STATUS) &
-			A3XX_RBBM_STATUS_GPU_BUSY)))
+			A3XX_RBBM_STATUS_GPU_BUSY))) {
 		DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
 
 	/* TODO maybe we need to reset GPU here to recover from hang? */
+		return false;
+	}
+
+	return true;
 }
 
 static irqreturn_t a3xx_irq(struct msm_gpu *gpu)
@@ -402,10 +406,8 @@
 #ifdef CONFIG_DEBUG_FS
 static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
 {
-	gpu->funcs->pm_resume(gpu);
 	seq_printf(m, "status:   %08x\n",
 			gpu_read(gpu, REG_A3XX_RBBM_STATUS));
-	gpu->funcs->pm_suspend(gpu);
 	adreno_show(gpu, m);
 }
 #endif
@@ -419,91 +421,13 @@
 }
 /* Register offset defines for A3XX */
 static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_AXXX_CP_DEBUG),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_AXXX_CP_ME_RAM_WADDR),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_AXXX_CP_ME_RAM_DATA),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
-			REG_A3XX_CP_PFP_UCODE_DATA),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
-			REG_A3XX_CP_PFP_UCODE_ADDR),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A3XX_CP_WFI_PEND_CTR),
 	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
+	REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
 	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
+	REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
 	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
 	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A3XX_CP_PROTECT_CTRL),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_AXXX_CP_ME_CNTL),
 	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_AXXX_CP_IB1_BASE),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_AXXX_CP_IB1_BUFSZ),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_AXXX_CP_IB2_BASE),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_AXXX_CP_IB2_BUFSZ),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_AXXX_CP_ME_RAM_RADDR),
-	REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_AXXX_SCRATCH_ADDR),
-	REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_AXXX_SCRATCH_UMSK),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A3XX_CP_ROQ_ADDR),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A3XX_CP_ROQ_DATA),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A3XX_CP_MERCIU_ADDR),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A3XX_CP_MERCIU_DATA),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A3XX_CP_MERCIU_DATA2),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A3XX_CP_MEQ_ADDR),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A3XX_CP_MEQ_DATA),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A3XX_CP_HW_FAULT),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
-			REG_A3XX_CP_PROTECT_STATUS),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A3XX_RBBM_STATUS),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
-			REG_A3XX_RBBM_PERFCTR_CTL),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
-			REG_A3XX_RBBM_PERFCTR_LOAD_CMD0),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
-			REG_A3XX_RBBM_PERFCTR_LOAD_CMD1),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
-			REG_A3XX_RBBM_PERFCTR_PWR_1_LO),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A3XX_RBBM_INT_0_MASK),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
-			REG_A3XX_RBBM_INT_0_STATUS),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
-			REG_A3XX_RBBM_AHB_ERROR_STATUS),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A3XX_RBBM_AHB_CMD),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
-			REG_A3XX_RBBM_INT_CLEAR_CMD),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A3XX_RBBM_CLOCK_CTL),
-	REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
-			REG_A3XX_VPC_VPC_DEBUG_RAM_SEL),
-	REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
-			REG_A3XX_VPC_VPC_DEBUG_RAM_READ),
-	REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
-			REG_A3XX_VSC_SIZE_ADDRESS),
-	REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A3XX_VFD_CONTROL_0),
-	REG_ADRENO_DEFINE(REG_ADRENO_VFD_INDEX_MAX, REG_A3XX_VFD_INDEX_MAX),
-	REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
-			REG_A3XX_SP_VS_PVT_MEM_ADDR_REG),
-	REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
-			REG_A3XX_SP_FS_PVT_MEM_ADDR_REG),
-	REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
-			REG_A3XX_SP_VS_OBJ_START_REG),
-	REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
-			REG_A3XX_SP_FS_OBJ_START_REG),
-	REG_ADRENO_DEFINE(REG_ADRENO_PA_SC_AA_CONFIG, REG_A3XX_PA_SC_AA_CONFIG),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PM_OVERRIDE2,
-			REG_A3XX_RBBM_PM_OVERRIDE2),
-	REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_REG2, REG_AXXX_CP_SCRATCH_REG2),
-	REG_ADRENO_DEFINE(REG_ADRENO_SQ_GPR_MANAGEMENT,
-			REG_A3XX_SQ_GPR_MANAGEMENT),
-	REG_ADRENO_DEFINE(REG_ADRENO_SQ_INST_STORE_MANAGMENT,
-			REG_A3XX_SQ_INST_STORE_MANAGMENT),
-	REG_ADRENO_DEFINE(REG_ADRENO_TP0_CHICKEN, REG_A3XX_TP0_CHICKEN),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A3XX_RBBM_RBBM_CTL),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
-			REG_A3XX_RBBM_SW_RESET_CMD),
-	REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
-			REG_A3XX_UCHE_CACHE_INVALIDATE0_REG),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
-			REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
-			REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI),
 };
 
 static const struct adreno_gpu_funcs funcs = {
@@ -513,10 +437,10 @@
 		.pm_suspend = msm_gpu_pm_suspend,
 		.pm_resume = msm_gpu_pm_resume,
 		.recover = a3xx_recover,
-		.last_fence = adreno_last_fence,
+		.submitted_fence = adreno_submitted_fence,
 		.submit = adreno_submit,
 		.flush = adreno_flush,
-		.idle = a3xx_idle,
+		.active_ring = adreno_active_ring,
 		.irq = a3xx_irq,
 		.destroy = a3xx_destroy,
 #ifdef CONFIG_DEBUG_FS
@@ -539,6 +463,7 @@
 	struct msm_gpu *gpu;
 	struct msm_drm_private *priv = dev->dev_private;
 	struct platform_device *pdev = priv->gpu_pdev;
+	struct msm_gpu_config a3xx_config = { 0 };
 	int ret;
 
 	if (!pdev) {
@@ -556,15 +481,19 @@
 	adreno_gpu = &a3xx_gpu->base;
 	gpu = &adreno_gpu->base;
 
-	a3xx_gpu->pdev = pdev;
-
 	gpu->perfcntrs = perfcntrs;
 	gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
 
 	adreno_gpu->registers = a3xx_registers;
 	adreno_gpu->reg_offsets = a3xx_register_offsets;
 
-	ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
+	a3xx_config.ioname = MSM_GPU_DEFAULT_IONAME;
+	a3xx_config.irqname = MSM_GPU_DEFAULT_IRQNAME;
+	a3xx_config.nr_rings = 1;
+	a3xx_config.va_start = 0x300000;
+	a3xx_config.va_end = 0xffffffff;
+
+	ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, &a3xx_config);
 	if (ret)
 		goto fail;
 
@@ -583,7 +512,7 @@
 #endif
 	}
 
-	if (!gpu->mmu) {
+	if (!gpu->aspace) {
 		/* TODO we think it is possible to configure the GPU to
 		 * restrict access to VRAM carveout.  But the required
 		 * registers are unknown.  For now just bail out and
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/adreno/a3xx_gpu.h linux-4.4.115-fbx/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
--- linux-4.4.115/drivers/gpu/drm/msm/adreno/a3xx_gpu.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/adreno/a3xx_gpu.h	2019-01-22 16:16:23.479246189 +0100
@@ -2,6 +2,8 @@
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published by
  * the Free Software Foundation.
@@ -28,7 +30,6 @@
 
 struct a3xx_gpu {
 	struct adreno_gpu base;
-	struct platform_device *pdev;
 
 	/* if OCMEM is used for GMEM: */
 	uint32_t ocmem_base;
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/adreno/a3xx.xml.h linux-4.4.115-fbx/drivers/gpu/drm/msm/adreno/a3xx.xml.h
--- linux-4.4.115/drivers/gpu/drm/msm/adreno/a3xx.xml.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/adreno/a3xx.xml.h	2019-01-22 16:16:23.479246189 +0100
@@ -8,17 +8,19 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10755 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14968 bytes, from 2015-05-20 20:12:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  67771 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  63970 bytes, from 2015-09-14 20:50:12)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2015-09-24 17:30:00)
+- ./adreno.xml               (    431 bytes, from 2016-10-24 21:12:27)
+- ./freedreno_copyright.xml  (   1572 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a2xx.xml          (  32901 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_common.xml (  12025 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_pm4.xml    (  19684 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a3xx.xml          (  83840 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a4xx.xml          ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a5xx.xml          (  81207 bytes, from 2016-10-26 19:36:59)
+- ./adreno/ocmem.xml         (   1773 bytes, from 2016-10-24 21:12:27)
 
-Copyright (C) 2013-2015 by the following authors:
+Copyright (C) 2013-2016 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
 Permission is hereby granted, free of charge, to any person obtaining
 a copy of this software and associated documentation files (the
@@ -111,10 +113,14 @@
 	VFMT_8_8_SNORM = 53,
 	VFMT_8_8_8_SNORM = 54,
 	VFMT_8_8_8_8_SNORM = 55,
-	VFMT_10_10_10_2_UINT = 60,
-	VFMT_10_10_10_2_UNORM = 61,
-	VFMT_10_10_10_2_SINT = 62,
-	VFMT_10_10_10_2_SNORM = 63,
+	VFMT_10_10_10_2_UINT = 56,
+	VFMT_10_10_10_2_UNORM = 57,
+	VFMT_10_10_10_2_SINT = 58,
+	VFMT_10_10_10_2_SNORM = 59,
+	VFMT_2_10_10_10_UINT = 60,
+	VFMT_2_10_10_10_UNORM = 61,
+	VFMT_2_10_10_10_SINT = 62,
+	VFMT_2_10_10_10_SNORM = 63,
 };
 
 enum a3xx_tex_fmt {
@@ -124,10 +130,14 @@
 	TFMT_Z16_UNORM = 9,
 	TFMT_X8Z24_UNORM = 10,
 	TFMT_Z32_FLOAT = 11,
-	TFMT_NV12_UV_TILED = 17,
-	TFMT_NV12_Y_TILED = 19,
-	TFMT_NV12_UV = 21,
-	TFMT_NV12_Y = 23,
+	TFMT_UV_64X32 = 16,
+	TFMT_VU_64X32 = 17,
+	TFMT_Y_64X32 = 18,
+	TFMT_NV12_64X32 = 19,
+	TFMT_UV_LINEAR = 20,
+	TFMT_VU_LINEAR = 21,
+	TFMT_Y_LINEAR = 22,
+	TFMT_NV12_LINEAR = 23,
 	TFMT_I420_Y = 24,
 	TFMT_I420_U = 26,
 	TFMT_I420_V = 27,
@@ -138,10 +148,12 @@
 	TFMT_DXT1 = 36,
 	TFMT_DXT3 = 37,
 	TFMT_DXT5 = 38,
+	TFMT_2_10_10_10_UNORM = 40,
 	TFMT_10_10_10_2_UNORM = 41,
 	TFMT_9_9_9_E5_FLOAT = 42,
 	TFMT_11_11_10_FLOAT = 43,
 	TFMT_A8_UNORM = 44,
+	TFMT_L8_UNORM = 45,
 	TFMT_L8_A8_UNORM = 47,
 	TFMT_8_UNORM = 48,
 	TFMT_8_8_UNORM = 49,
@@ -183,6 +195,8 @@
 	TFMT_32_SINT = 92,
 	TFMT_32_32_SINT = 93,
 	TFMT_32_32_32_32_SINT = 95,
+	TFMT_2_10_10_10_UINT = 96,
+	TFMT_10_10_10_2_UINT = 97,
 	TFMT_ETC2_RG11_SNORM = 112,
 	TFMT_ETC2_RG11_UNORM = 113,
 	TFMT_ETC2_R11_SNORM = 114,
@@ -215,6 +229,9 @@
 	RB_R8_UINT = 14,
 	RB_R8_SINT = 15,
 	RB_R10G10B10A2_UNORM = 16,
+	RB_A2R10G10B10_UNORM = 17,
+	RB_R10G10B10A2_UINT = 18,
+	RB_A2R10G10B10_UINT = 19,
 	RB_A8_UNORM = 20,
 	RB_R8_UNORM = 21,
 	RB_R16_FLOAT = 24,
@@ -244,38 +261,273 @@
 	RB_R32G32B32A32_UINT = 59,
 };
 
+enum a3xx_cp_perfcounter_select {
+	CP_ALWAYS_COUNT = 0,
+	CP_AHB_PFPTRANS_WAIT = 3,
+	CP_AHB_NRTTRANS_WAIT = 6,
+	CP_CSF_NRT_READ_WAIT = 8,
+	CP_CSF_I1_FIFO_FULL = 9,
+	CP_CSF_I2_FIFO_FULL = 10,
+	CP_CSF_ST_FIFO_FULL = 11,
+	CP_RESERVED_12 = 12,
+	CP_CSF_RING_ROQ_FULL = 13,
+	CP_CSF_I1_ROQ_FULL = 14,
+	CP_CSF_I2_ROQ_FULL = 15,
+	CP_CSF_ST_ROQ_FULL = 16,
+	CP_RESERVED_17 = 17,
+	CP_MIU_TAG_MEM_FULL = 18,
+	CP_MIU_NRT_WRITE_STALLED = 22,
+	CP_MIU_NRT_READ_STALLED = 23,
+	CP_ME_REGS_RB_DONE_FIFO_FULL = 26,
+	CP_ME_REGS_VS_EVENT_FIFO_FULL = 27,
+	CP_ME_REGS_PS_EVENT_FIFO_FULL = 28,
+	CP_ME_REGS_CF_EVENT_FIFO_FULL = 29,
+	CP_ME_MICRO_RB_STARVED = 30,
+	CP_AHB_RBBM_DWORD_SENT = 40,
+	CP_ME_BUSY_CLOCKS = 41,
+	CP_ME_WAIT_CONTEXT_AVAIL = 42,
+	CP_PFP_TYPE0_PACKET = 43,
+	CP_PFP_TYPE3_PACKET = 44,
+	CP_CSF_RB_WPTR_NEQ_RPTR = 45,
+	CP_CSF_I1_SIZE_NEQ_ZERO = 46,
+	CP_CSF_I2_SIZE_NEQ_ZERO = 47,
+	CP_CSF_RBI1I2_FETCHING = 48,
+};
+
+enum a3xx_gras_tse_perfcounter_select {
+	GRAS_TSEPERF_INPUT_PRIM = 0,
+	GRAS_TSEPERF_INPUT_NULL_PRIM = 1,
+	GRAS_TSEPERF_TRIVAL_REJ_PRIM = 2,
+	GRAS_TSEPERF_CLIPPED_PRIM = 3,
+	GRAS_TSEPERF_NEW_PRIM = 4,
+	GRAS_TSEPERF_ZERO_AREA_PRIM = 5,
+	GRAS_TSEPERF_FACENESS_CULLED_PRIM = 6,
+	GRAS_TSEPERF_ZERO_PIXEL_PRIM = 7,
+	GRAS_TSEPERF_OUTPUT_NULL_PRIM = 8,
+	GRAS_TSEPERF_OUTPUT_VISIBLE_PRIM = 9,
+	GRAS_TSEPERF_PRE_CLIP_PRIM = 10,
+	GRAS_TSEPERF_POST_CLIP_PRIM = 11,
+	GRAS_TSEPERF_WORKING_CYCLES = 12,
+	GRAS_TSEPERF_PC_STARVE = 13,
+	GRAS_TSERASPERF_STALL = 14,
+};
+
+enum a3xx_gras_ras_perfcounter_select {
+	GRAS_RASPERF_16X16_TILES = 0,
+	GRAS_RASPERF_8X8_TILES = 1,
+	GRAS_RASPERF_4X4_TILES = 2,
+	GRAS_RASPERF_WORKING_CYCLES = 3,
+	GRAS_RASPERF_STALL_CYCLES_BY_RB = 4,
+	GRAS_RASPERF_STALL_CYCLES_BY_VSC = 5,
+	GRAS_RASPERF_STARVE_CYCLES_BY_TSE = 6,
+};
+
+enum a3xx_hlsq_perfcounter_select {
+	HLSQ_PERF_SP_VS_CONSTANT = 0,
+	HLSQ_PERF_SP_VS_INSTRUCTIONS = 1,
+	HLSQ_PERF_SP_FS_CONSTANT = 2,
+	HLSQ_PERF_SP_FS_INSTRUCTIONS = 3,
+	HLSQ_PERF_TP_STATE = 4,
+	HLSQ_PERF_QUADS = 5,
+	HLSQ_PERF_PIXELS = 6,
+	HLSQ_PERF_VERTICES = 7,
+	HLSQ_PERF_FS8_THREADS = 8,
+	HLSQ_PERF_FS16_THREADS = 9,
+	HLSQ_PERF_FS32_THREADS = 10,
+	HLSQ_PERF_VS8_THREADS = 11,
+	HLSQ_PERF_VS16_THREADS = 12,
+	HLSQ_PERF_SP_VS_DATA_BYTES = 13,
+	HLSQ_PERF_SP_FS_DATA_BYTES = 14,
+	HLSQ_PERF_ACTIVE_CYCLES = 15,
+	HLSQ_PERF_STALL_CYCLES_SP_STATE = 16,
+	HLSQ_PERF_STALL_CYCLES_SP_VS = 17,
+	HLSQ_PERF_STALL_CYCLES_SP_FS = 18,
+	HLSQ_PERF_STALL_CYCLES_UCHE = 19,
+	HLSQ_PERF_RBBM_LOAD_CYCLES = 20,
+	HLSQ_PERF_DI_TO_VS_START_SP0 = 21,
+	HLSQ_PERF_DI_TO_FS_START_SP0 = 22,
+	HLSQ_PERF_VS_START_TO_DONE_SP0 = 23,
+	HLSQ_PERF_FS_START_TO_DONE_SP0 = 24,
+	HLSQ_PERF_SP_STATE_COPY_CYCLES_VS = 25,
+	HLSQ_PERF_SP_STATE_COPY_CYCLES_FS = 26,
+	HLSQ_PERF_UCHE_LATENCY_CYCLES = 27,
+	HLSQ_PERF_UCHE_LATENCY_COUNT = 28,
+};
+
+enum a3xx_pc_perfcounter_select {
+	PC_PCPERF_VISIBILITY_STREAMS = 0,
+	PC_PCPERF_TOTAL_INSTANCES = 1,
+	PC_PCPERF_PRIMITIVES_PC_VPC = 2,
+	PC_PCPERF_PRIMITIVES_KILLED_BY_VS = 3,
+	PC_PCPERF_PRIMITIVES_VISIBLE_BY_VS = 4,
+	PC_PCPERF_DRAWCALLS_KILLED_BY_VS = 5,
+	PC_PCPERF_DRAWCALLS_VISIBLE_BY_VS = 6,
+	PC_PCPERF_VERTICES_TO_VFD = 7,
+	PC_PCPERF_REUSED_VERTICES = 8,
+	PC_PCPERF_CYCLES_STALLED_BY_VFD = 9,
+	PC_PCPERF_CYCLES_STALLED_BY_TSE = 10,
+	PC_PCPERF_CYCLES_STALLED_BY_VBIF = 11,
+	PC_PCPERF_CYCLES_IS_WORKING = 12,
+};
+
+enum a3xx_rb_perfcounter_select {
+	RB_RBPERF_ACTIVE_CYCLES_ANY = 0,
+	RB_RBPERF_ACTIVE_CYCLES_ALL = 1,
+	RB_RBPERF_STARVE_CYCLES_BY_SP = 2,
+	RB_RBPERF_STARVE_CYCLES_BY_RAS = 3,
+	RB_RBPERF_STARVE_CYCLES_BY_MARB = 4,
+	RB_RBPERF_STALL_CYCLES_BY_MARB = 5,
+	RB_RBPERF_STALL_CYCLES_BY_HLSQ = 6,
+	RB_RBPERF_RB_MARB_DATA = 7,
+	RB_RBPERF_SP_RB_QUAD = 8,
+	RB_RBPERF_RAS_EARLY_Z_QUADS = 9,
+	RB_RBPERF_GMEM_CH0_READ = 10,
+	RB_RBPERF_GMEM_CH1_READ = 11,
+	RB_RBPERF_GMEM_CH0_WRITE = 12,
+	RB_RBPERF_GMEM_CH1_WRITE = 13,
+	RB_RBPERF_CP_CONTEXT_DONE = 14,
+	RB_RBPERF_CP_CACHE_FLUSH = 15,
+	RB_RBPERF_CP_ZPASS_DONE = 16,
+};
+
+enum a3xx_rbbm_perfcounter_select {
+	RBBM_ALAWYS_ON = 0,
+	RBBM_VBIF_BUSY = 1,
+	RBBM_TSE_BUSY = 2,
+	RBBM_RAS_BUSY = 3,
+	RBBM_PC_DCALL_BUSY = 4,
+	RBBM_PC_VSD_BUSY = 5,
+	RBBM_VFD_BUSY = 6,
+	RBBM_VPC_BUSY = 7,
+	RBBM_UCHE_BUSY = 8,
+	RBBM_VSC_BUSY = 9,
+	RBBM_HLSQ_BUSY = 10,
+	RBBM_ANY_RB_BUSY = 11,
+	RBBM_ANY_TEX_BUSY = 12,
+	RBBM_ANY_USP_BUSY = 13,
+	RBBM_ANY_MARB_BUSY = 14,
+	RBBM_ANY_ARB_BUSY = 15,
+	RBBM_AHB_STATUS_BUSY = 16,
+	RBBM_AHB_STATUS_STALLED = 17,
+	RBBM_AHB_STATUS_TXFR = 18,
+	RBBM_AHB_STATUS_TXFR_SPLIT = 19,
+	RBBM_AHB_STATUS_TXFR_ERROR = 20,
+	RBBM_AHB_STATUS_LONG_STALL = 21,
+	RBBM_RBBM_STATUS_MASKED = 22,
+};
+
 enum a3xx_sp_perfcounter_select {
+	SP_LM_LOAD_INSTRUCTIONS = 0,
+	SP_LM_STORE_INSTRUCTIONS = 1,
+	SP_LM_ATOMICS = 2,
+	SP_UCHE_LOAD_INSTRUCTIONS = 3,
+	SP_UCHE_STORE_INSTRUCTIONS = 4,
+	SP_UCHE_ATOMICS = 5,
+	SP_VS_TEX_INSTRUCTIONS = 6,
+	SP_VS_CFLOW_INSTRUCTIONS = 7,
+	SP_VS_EFU_INSTRUCTIONS = 8,
+	SP_VS_FULL_ALU_INSTRUCTIONS = 9,
+	SP_VS_HALF_ALU_INSTRUCTIONS = 10,
+	SP_FS_TEX_INSTRUCTIONS = 11,
 	SP_FS_CFLOW_INSTRUCTIONS = 12,
+	SP_FS_EFU_INSTRUCTIONS = 13,
 	SP_FS_FULL_ALU_INSTRUCTIONS = 14,
-	SP0_ICL1_MISSES = 26,
+	SP_FS_HALF_ALU_INSTRUCTIONS = 15,
+	SP_FS_BARY_INSTRUCTIONS = 16,
+	SP_VS_INSTRUCTIONS = 17,
+	SP_FS_INSTRUCTIONS = 18,
+	SP_ADDR_LOCK_COUNT = 19,
+	SP_UCHE_READ_TRANS = 20,
+	SP_UCHE_WRITE_TRANS = 21,
+	SP_EXPORT_VPC_TRANS = 22,
+	SP_EXPORT_RB_TRANS = 23,
+	SP_PIXELS_KILLED = 24,
+	SP_ICL1_REQUESTS = 25,
+	SP_ICL1_MISSES = 26,
+	SP_ICL0_REQUESTS = 27,
+	SP_ICL0_MISSES = 28,
 	SP_ALU_ACTIVE_CYCLES = 29,
+	SP_EFU_ACTIVE_CYCLES = 30,
+	SP_STALL_CYCLES_BY_VPC = 31,
+	SP_STALL_CYCLES_BY_TP = 32,
+	SP_STALL_CYCLES_BY_UCHE = 33,
+	SP_STALL_CYCLES_BY_RB = 34,
+	SP_ACTIVE_CYCLES_ANY = 35,
+	SP_ACTIVE_CYCLES_ALL = 36,
+};
+
+enum a3xx_tp_perfcounter_select {
+	TPL1_TPPERF_L1_REQUESTS = 0,
+	TPL1_TPPERF_TP0_L1_REQUESTS = 1,
+	TPL1_TPPERF_TP0_L1_MISSES = 2,
+	TPL1_TPPERF_TP1_L1_REQUESTS = 3,
+	TPL1_TPPERF_TP1_L1_MISSES = 4,
+	TPL1_TPPERF_TP2_L1_REQUESTS = 5,
+	TPL1_TPPERF_TP2_L1_MISSES = 6,
+	TPL1_TPPERF_TP3_L1_REQUESTS = 7,
+	TPL1_TPPERF_TP3_L1_MISSES = 8,
+	TPL1_TPPERF_OUTPUT_TEXELS_POINT = 9,
+	TPL1_TPPERF_OUTPUT_TEXELS_BILINEAR = 10,
+	TPL1_TPPERF_OUTPUT_TEXELS_MIP = 11,
+	TPL1_TPPERF_OUTPUT_TEXELS_ANISO = 12,
+	TPL1_TPPERF_BILINEAR_OPS = 13,
+	TPL1_TPPERF_QUADSQUADS_OFFSET = 14,
+	TPL1_TPPERF_QUADQUADS_SHADOW = 15,
+	TPL1_TPPERF_QUADS_ARRAY = 16,
+	TPL1_TPPERF_QUADS_PROJECTION = 17,
+	TPL1_TPPERF_QUADS_GRADIENT = 18,
+	TPL1_TPPERF_QUADS_1D2D = 19,
+	TPL1_TPPERF_QUADS_3DCUBE = 20,
+	TPL1_TPPERF_ZERO_LOD = 21,
+	TPL1_TPPERF_OUTPUT_TEXELS = 22,
+	TPL1_TPPERF_ACTIVE_CYCLES_ANY = 23,
+	TPL1_TPPERF_ACTIVE_CYCLES_ALL = 24,
+	TPL1_TPPERF_STALL_CYCLES_BY_ARB = 25,
+	TPL1_TPPERF_LATENCY = 26,
+	TPL1_TPPERF_LATENCY_TRANS = 27,
 };
 
-enum a3xx_rop_code {
-	ROP_CLEAR = 0,
-	ROP_NOR = 1,
-	ROP_AND_INVERTED = 2,
-	ROP_COPY_INVERTED = 3,
-	ROP_AND_REVERSE = 4,
-	ROP_INVERT = 5,
-	ROP_XOR = 6,
-	ROP_NAND = 7,
-	ROP_AND = 8,
-	ROP_EQUIV = 9,
-	ROP_NOOP = 10,
-	ROP_OR_INVERTED = 11,
-	ROP_COPY = 12,
-	ROP_OR_REVERSE = 13,
-	ROP_OR = 14,
-	ROP_SET = 15,
-};
-
-enum a3xx_rb_blend_opcode {
-	BLEND_DST_PLUS_SRC = 0,
-	BLEND_SRC_MINUS_DST = 1,
-	BLEND_DST_MINUS_SRC = 2,
-	BLEND_MIN_DST_SRC = 3,
-	BLEND_MAX_DST_SRC = 4,
+enum a3xx_vfd_perfcounter_select {
+	VFD_PERF_UCHE_BYTE_FETCHED = 0,
+	VFD_PERF_UCHE_TRANS = 1,
+	VFD_PERF_VPC_BYPASS_COMPONENTS = 2,
+	VFD_PERF_FETCH_INSTRUCTIONS = 3,
+	VFD_PERF_DECODE_INSTRUCTIONS = 4,
+	VFD_PERF_ACTIVE_CYCLES = 5,
+	VFD_PERF_STALL_CYCLES_UCHE = 6,
+	VFD_PERF_STALL_CYCLES_HLSQ = 7,
+	VFD_PERF_STALL_CYCLES_VPC_BYPASS = 8,
+	VFD_PERF_STALL_CYCLES_VPC_ALLOC = 9,
+};
+
+enum a3xx_vpc_perfcounter_select {
+	VPC_PERF_SP_LM_PRIMITIVES = 0,
+	VPC_PERF_COMPONENTS_FROM_SP = 1,
+	VPC_PERF_SP_LM_COMPONENTS = 2,
+	VPC_PERF_ACTIVE_CYCLES = 3,
+	VPC_PERF_STALL_CYCLES_LM = 4,
+	VPC_PERF_STALL_CYCLES_RAS = 5,
+};
+
+enum a3xx_uche_perfcounter_select {
+	UCHE_UCHEPERF_VBIF_READ_BEATS_TP = 0,
+	UCHE_UCHEPERF_VBIF_READ_BEATS_VFD = 1,
+	UCHE_UCHEPERF_VBIF_READ_BEATS_HLSQ = 2,
+	UCHE_UCHEPERF_VBIF_READ_BEATS_MARB = 3,
+	UCHE_UCHEPERF_VBIF_READ_BEATS_SP = 4,
+	UCHE_UCHEPERF_READ_REQUESTS_TP = 8,
+	UCHE_UCHEPERF_READ_REQUESTS_VFD = 9,
+	UCHE_UCHEPERF_READ_REQUESTS_HLSQ = 10,
+	UCHE_UCHEPERF_READ_REQUESTS_MARB = 11,
+	UCHE_UCHEPERF_READ_REQUESTS_SP = 12,
+	UCHE_UCHEPERF_WRITE_REQUESTS_MARB = 13,
+	UCHE_UCHEPERF_WRITE_REQUESTS_SP = 14,
+	UCHE_UCHEPERF_TAG_CHECK_FAILS = 15,
+	UCHE_UCHEPERF_EVICTS = 16,
+	UCHE_UCHEPERF_FLUSHES = 17,
+	UCHE_UCHEPERF_VBIF_LATENCY_CYCLES = 18,
+	UCHE_UCHEPERF_VBIF_LATENCY_SAMPLES = 19,
+	UCHE_UCHEPERF_ACTIVE_CYCLES = 20,
 };
 
 enum a3xx_intp_mode {
@@ -1138,13 +1390,14 @@
 {
 	return ((val) << A3XX_RB_COPY_CONTROL_MODE__SHIFT) & A3XX_RB_COPY_CONTROL_MODE__MASK;
 }
+#define A3XX_RB_COPY_CONTROL_MSAA_SRGB_DOWNSAMPLE		0x00000080
 #define A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK			0x00000f00
 #define A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT			8
 static inline uint32_t A3XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
 {
 	return ((val) << A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
 }
-#define A3XX_RB_COPY_CONTROL_UNK12				0x00001000
+#define A3XX_RB_COPY_CONTROL_DEPTH32_RESOLVE			0x00001000
 #define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK			0xffffc000
 #define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT			14
 static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
@@ -1217,7 +1470,7 @@
 {
 	return ((val) << A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
 }
-#define A3XX_RB_DEPTH_CONTROL_BF_ENABLE				0x00000080
+#define A3XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE			0x00000080
 #define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE			0x80000000
 
 #define REG_A3XX_RB_DEPTH_CLEAR					0x00002101
@@ -1429,15 +1682,23 @@
 #define REG_A3XX_PC_RESTART_INDEX				0x000021ed
 
 #define REG_A3XX_HLSQ_CONTROL_0_REG				0x00002200
-#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK		0x00000010
+#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK		0x00000030
 #define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT		4
 static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val)
 {
 	return ((val) << A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK;
 }
 #define A3XX_HLSQ_CONTROL_0_REG_FSSUPERTHREADENABLE		0x00000040
+#define A3XX_HLSQ_CONTROL_0_REG_COMPUTEMODE			0x00000100
 #define A3XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART			0x00000200
 #define A3XX_HLSQ_CONTROL_0_REG_RESERVED2			0x00000400
+#define A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__MASK	0x00fff000
+#define A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__SHIFT	12
+static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC(uint32_t val)
+{
+	return ((val) << A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__MASK;
+}
+#define A3XX_HLSQ_CONTROL_0_REG_FSONLYTEX			0x02000000
 #define A3XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE			0x04000000
 #define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK			0x08000000
 #define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT		27
@@ -1451,17 +1712,39 @@
 #define A3XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT			0x80000000
 
 #define REG_A3XX_HLSQ_CONTROL_1_REG				0x00002201
-#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK		0x00000040
+#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK		0x000000c0
 #define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT		6
 static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize val)
 {
 	return ((val) << A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK;
 }
 #define A3XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE		0x00000100
-#define A3XX_HLSQ_CONTROL_1_REG_RESERVED1			0x00000200
-#define A3XX_HLSQ_CONTROL_1_REG_ZWCOORD				0x02000000
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__MASK		0x00ff0000
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__SHIFT		16
+static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID(uint32_t val)
+{
+	return ((val) << A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__MASK;
+}
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__MASK		0xff000000
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__SHIFT		24
+static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID(uint32_t val)
+{
+	return ((val) << A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__MASK;
+}
 
 #define REG_A3XX_HLSQ_CONTROL_2_REG				0x00002202
+#define A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__MASK		0x000003fc
+#define A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__SHIFT		2
+static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID(uint32_t val)
+{
+	return ((val) << A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__MASK;
+}
+#define A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__MASK		0x03fc0000
+#define A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__SHIFT		18
+static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID(uint32_t val)
+{
+	return ((val) << A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__MASK;
+}
 #define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK	0xfc000000
 #define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT	26
 static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
@@ -1478,13 +1761,13 @@
 }
 
 #define REG_A3XX_HLSQ_VS_CONTROL_REG				0x00002204
-#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK		0x00000fff
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK		0x000003ff
 #define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT		0
 static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val)
 {
 	return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK;
 }
-#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK		0x00fff000
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK		0x001ff000
 #define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT	12
 static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
 {
@@ -1498,13 +1781,13 @@
 }
 
 #define REG_A3XX_HLSQ_FS_CONTROL_REG				0x00002205
-#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK		0x00000fff
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK		0x000003ff
 #define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT		0
 static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val)
 {
 	return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK;
 }
-#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK		0x00fff000
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK		0x001ff000
 #define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT	12
 static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
 {
@@ -1518,13 +1801,13 @@
 }
 
 #define REG_A3XX_HLSQ_CONST_VSPRESV_RANGE_REG			0x00002206
-#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK	0x0000ffff
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK	0x000001ff
 #define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT	0
 static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
 {
 	return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK;
 }
-#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK	0xffff0000
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK	0x01ff0000
 #define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT	16
 static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
 {
@@ -1532,13 +1815,13 @@
 }
 
 #define REG_A3XX_HLSQ_CONST_FSPRESV_RANGE_REG			0x00002207
-#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK	0x0000ffff
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK	0x000001ff
 #define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT	0
 static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
 {
 	return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK;
 }
-#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK	0xffff0000
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK	0x01ff0000
 #define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT	16
 static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
 {
@@ -1620,12 +1903,24 @@
 }
 
 #define REG_A3XX_VFD_CONTROL_1					0x00002241
-#define A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK			0x0000ffff
+#define A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK			0x0000000f
 #define A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT			0
 static inline uint32_t A3XX_VFD_CONTROL_1_MAXSTORAGE(uint32_t val)
 {
 	return ((val) << A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT) & A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK;
 }
+#define A3XX_VFD_CONTROL_1_MAXTHRESHOLD__MASK			0x000000f0
+#define A3XX_VFD_CONTROL_1_MAXTHRESHOLD__SHIFT			4
+static inline uint32_t A3XX_VFD_CONTROL_1_MAXTHRESHOLD(uint32_t val)
+{
+	return ((val) << A3XX_VFD_CONTROL_1_MAXTHRESHOLD__SHIFT) & A3XX_VFD_CONTROL_1_MAXTHRESHOLD__MASK;
+}
+#define A3XX_VFD_CONTROL_1_MINTHRESHOLD__MASK			0x00000f00
+#define A3XX_VFD_CONTROL_1_MINTHRESHOLD__SHIFT			8
+static inline uint32_t A3XX_VFD_CONTROL_1_MINTHRESHOLD(uint32_t val)
+{
+	return ((val) << A3XX_VFD_CONTROL_1_MINTHRESHOLD__SHIFT) & A3XX_VFD_CONTROL_1_MINTHRESHOLD__MASK;
+}
 #define A3XX_VFD_CONTROL_1_REGID4VTX__MASK			0x00ff0000
 #define A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT			16
 static inline uint32_t A3XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
@@ -2008,24 +2303,19 @@
 	return ((val) << A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK;
 }
 #define A3XX_SP_VS_CTRL_REG0_CACHEINVALID			0x00000004
+#define A3XX_SP_VS_CTRL_REG0_ALUSCHMODE				0x00000008
 #define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK		0x000003f0
 #define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT		4
 static inline uint32_t A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
 {
 	return ((val) << A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
 }
-#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK		0x0003fc00
+#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK		0x0000fc00
 #define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT		10
 static inline uint32_t A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
 {
 	return ((val) << A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
 }
-#define A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK		0x000c0000
-#define A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT		18
-static inline uint32_t A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
-{
-	return ((val) << A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK;
-}
 #define A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK			0x00100000
 #define A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT			20
 static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
@@ -2033,8 +2323,6 @@
 	return ((val) << A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
 }
 #define A3XX_SP_VS_CTRL_REG0_SUPERTHREADMODE			0x00200000
-#define A3XX_SP_VS_CTRL_REG0_PIXLODENABLE			0x00400000
-#define A3XX_SP_VS_CTRL_REG0_COMPUTEMODE			0x00800000
 #define A3XX_SP_VS_CTRL_REG0_LENGTH__MASK			0xff000000
 #define A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT			24
 static inline uint32_t A3XX_SP_VS_CTRL_REG0_LENGTH(uint32_t val)
@@ -2075,7 +2363,8 @@
 {
 	return ((val) << A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK;
 }
-#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK		0xfff00000
+#define A3XX_SP_VS_PARAM_REG_POS2DMODE				0x00010000
+#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK		0x01f00000
 #define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT		20
 static inline uint32_t A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
 {
@@ -2085,24 +2374,26 @@
 static inline uint32_t REG_A3XX_SP_VS_OUT(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
 
 static inline uint32_t REG_A3XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
-#define A3XX_SP_VS_OUT_REG_A_REGID__MASK			0x000001ff
+#define A3XX_SP_VS_OUT_REG_A_REGID__MASK			0x000000ff
 #define A3XX_SP_VS_OUT_REG_A_REGID__SHIFT			0
 static inline uint32_t A3XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
 {
 	return ((val) << A3XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_A_REGID__MASK;
 }
+#define A3XX_SP_VS_OUT_REG_A_HALF				0x00000100
 #define A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK			0x00001e00
 #define A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT			9
 static inline uint32_t A3XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
 {
 	return ((val) << A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
 }
-#define A3XX_SP_VS_OUT_REG_B_REGID__MASK			0x01ff0000
+#define A3XX_SP_VS_OUT_REG_B_REGID__MASK			0x00ff0000
 #define A3XX_SP_VS_OUT_REG_B_REGID__SHIFT			16
 static inline uint32_t A3XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
 {
 	return ((val) << A3XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_B_REGID__MASK;
 }
+#define A3XX_SP_VS_OUT_REG_B_HALF				0x01000000
 #define A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK			0x1e000000
 #define A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT			25
 static inline uint32_t A3XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
@@ -2113,25 +2404,25 @@
 static inline uint32_t REG_A3XX_SP_VS_VPC_DST(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
 
 static inline uint32_t REG_A3XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK			0x000000ff
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK			0x0000007f
 #define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT			0
 static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
 {
 	return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
 }
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK			0x0000ff00
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK			0x00007f00
 #define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT			8
 static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
 {
 	return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
 }
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK			0x00ff0000
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK			0x007f0000
 #define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT			16
 static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
 {
 	return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
 }
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK			0xff000000
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK			0x7f000000
 #define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT			24
 static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
 {
@@ -2139,6 +2430,12 @@
 }
 
 #define REG_A3XX_SP_VS_OBJ_OFFSET_REG				0x000022d4
+#define A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK	0x0000ffff
+#define A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT	0
+static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET(uint32_t val)
+{
+	return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK;
+}
 #define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK	0x01ff0000
 #define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT	16
 static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
@@ -2155,8 +2452,38 @@
 #define REG_A3XX_SP_VS_OBJ_START_REG				0x000022d5
 
 #define REG_A3XX_SP_VS_PVT_MEM_PARAM_REG			0x000022d6
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK	0x000000ff
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT	0
+static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM(uint32_t val)
+{
+	return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK;
+}
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK	0x00ffff00
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT	8
+static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET(uint32_t val)
+{
+	return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK;
+}
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK	0xff000000
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT	24
+static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+	return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK;
+}
 
 #define REG_A3XX_SP_VS_PVT_MEM_ADDR_REG				0x000022d7
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__MASK		0x0000001f
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT		0
+static inline uint32_t A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN(uint32_t val)
+{
+	return ((val) << A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT) & A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__MASK;
+}
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK	0xffffffe0
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT	5
+static inline uint32_t A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS(uint32_t val)
+{
+	return ((val >> 5) << A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK;
+}
 
 #define REG_A3XX_SP_VS_PVT_MEM_SIZE_REG				0x000022d8
 
@@ -2182,24 +2509,22 @@
 	return ((val) << A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK;
 }
 #define A3XX_SP_FS_CTRL_REG0_CACHEINVALID			0x00000004
+#define A3XX_SP_FS_CTRL_REG0_ALUSCHMODE				0x00000008
 #define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK		0x000003f0
 #define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT		4
 static inline uint32_t A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
 {
 	return ((val) << A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
 }
-#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK		0x0003fc00
+#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK		0x0000fc00
 #define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT		10
 static inline uint32_t A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
 {
 	return ((val) << A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
 }
-#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK		0x000c0000
-#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT		18
-static inline uint32_t A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
-{
-	return ((val) << A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK;
-}
+#define A3XX_SP_FS_CTRL_REG0_FSBYPASSENABLE			0x00020000
+#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP			0x00040000
+#define A3XX_SP_FS_CTRL_REG0_OUTORDERED				0x00080000
 #define A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK			0x00100000
 #define A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT			20
 static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
@@ -2235,7 +2560,7 @@
 {
 	return ((val) << A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK;
 }
-#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK		0x3f000000
+#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK		0x7f000000
 #define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT		24
 static inline uint32_t A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET(uint32_t val)
 {
@@ -2243,6 +2568,12 @@
 }
 
 #define REG_A3XX_SP_FS_OBJ_OFFSET_REG				0x000022e2
+#define A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK	0x0000ffff
+#define A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT	0
+static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET(uint32_t val)
+{
+	return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK;
+}
 #define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK	0x01ff0000
 #define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT	16
 static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
@@ -2259,8 +2590,38 @@
 #define REG_A3XX_SP_FS_OBJ_START_REG				0x000022e3
 
 #define REG_A3XX_SP_FS_PVT_MEM_PARAM_REG			0x000022e4
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK	0x000000ff
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT	0
+static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM(uint32_t val)
+{
+	return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK;
+}
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK	0x00ffff00
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT	8
+static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET(uint32_t val)
+{
+	return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK;
+}
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK	0xff000000
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT	24
+static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+	return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK;
+}
 
 #define REG_A3XX_SP_FS_PVT_MEM_ADDR_REG				0x000022e5
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__MASK		0x0000001f
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT		0
+static inline uint32_t A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN(uint32_t val)
+{
+	return ((val) << A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT) & A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__MASK;
+}
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK	0xffffffe0
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT	5
+static inline uint32_t A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS(uint32_t val)
+{
+	return ((val >> 5) << A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK;
+}
 
 #define REG_A3XX_SP_FS_PVT_MEM_SIZE_REG				0x000022e6
 
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/adreno/a4xx_gpu.c linux-4.4.115-fbx/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
--- linux-4.4.115/drivers/gpu/drm/msm/adreno/a4xx_gpu.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/adreno/a4xx_gpu.c	2019-01-22 16:16:23.483246225 +0100
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014,2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -31,6 +31,7 @@
 
 extern bool hang_debug;
 static void a4xx_dump(struct msm_gpu *gpu);
+static bool a4xx_idle(struct msm_gpu *gpu);
 
 /*
  * a4xx_enable_hwcg() - Program the clock control registers
@@ -102,14 +103,20 @@
 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00000222);
 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_HLSQ , 0x00000000);
 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000);
-	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00020000);
+	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00220000);
+	/* Early A430's have a timing issue with SP/TP power collapse;
+	   disabling HW clock gating prevents it. */
+	if (adreno_is_a430(adreno_gpu) && adreno_gpu->rev.patchid < 2)
+		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0);
+	else
 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA);
 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2, 0);
 }
 
-static void a4xx_me_init(struct msm_gpu *gpu)
+
+static bool a4xx_me_init(struct msm_gpu *gpu)
 {
-	struct msm_ringbuffer *ring = gpu->rb;
+	struct msm_ringbuffer *ring = gpu->rb[0];
 
 	OUT_PKT3(ring, CP_ME_INIT, 17);
 	OUT_RING(ring, 0x000003f7);
@@ -130,8 +137,8 @@
 	OUT_RING(ring, 0x00000000);
 	OUT_RING(ring, 0x00000000);
 
-	gpu->funcs->flush(gpu);
-	gpu->funcs->idle(gpu);
+	gpu->funcs->flush(gpu, ring);
+	return a4xx_idle(gpu);
 }
 
 static int a4xx_hw_init(struct msm_gpu *gpu)
@@ -141,7 +148,7 @@
 	uint32_t *ptr, len;
 	int i, ret;
 
-	if (adreno_is_a4xx(adreno_gpu)) {
+	if (adreno_is_a420(adreno_gpu)) {
 		gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT, 0x0001001F);
 		gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4);
 		gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
@@ -150,6 +157,13 @@
 		gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
 		gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
 		gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
+	} else if (adreno_is_a430(adreno_gpu)) {
+		gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
+		gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
+		gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018);
+		gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
+		gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
+		gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
 	} else {
 		BUG();
 	}
@@ -161,6 +175,10 @@
 	gpu_write(gpu, REG_A4XX_RBBM_SP_HYST_CNT, 0x10);
 	gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
 
+	if (adreno_is_a430(adreno_gpu)) {
+		gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL2, 0x30);
+	}
+
 	 /* Enable the RBBM error reporting bits */
 	gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL0, 0x00000001);
 
@@ -183,6 +201,14 @@
 	/* Turn on performance counters: */
 	gpu_write(gpu, REG_A4XX_RBBM_PERFCTR_CTL, 0x01);
 
+	/* use the first CP counter for timestamp queries.. userspace may set
+	 * this as well but it selects the same counter/countable:
+	 */
+	gpu_write(gpu, REG_A4XX_CP_PERFCTR_CP_SEL_0, CP_ALWAYS_COUNT);
+
+	if (adreno_is_a430(adreno_gpu))
+		gpu_write(gpu, REG_A4XX_UCHE_CACHE_WAYS_VFD, 0x07);
+
 	/* Disable L2 bypass to avoid UCHE out of bounds errors */
 	gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, 0xffff0000);
 	gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, 0xffff0000);
@@ -190,6 +216,15 @@
 	gpu_write(gpu, REG_A4XX_CP_DEBUG, (1 << 25) |
 			(adreno_is_a420(adreno_gpu) ? (1 << 29) : 0));
 
+	/* On A430 enable SP regfile sleep for power savings */
+	/* TODO downstream does this for !420, so maybe applies for 405 too? */
+	if (!adreno_is_a420(adreno_gpu)) {
+		gpu_write(gpu, REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_0,
+			0x00000441);
+		gpu_write(gpu, REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1,
+			0x00000441);
+	}
+
 	a4xx_enable_hwcg(gpu);
 
 	/*
@@ -204,10 +239,6 @@
 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, val);
 	}
 
-	ret = adreno_hw_init(gpu);
-	if (ret)
-		return ret;
-
 	/* setup access protection: */
 	gpu_write(gpu, REG_A4XX_CP_PROTECT_CTRL, 0x00000007);
 
@@ -262,8 +293,7 @@
 	/* clear ME_HALT to start micro engine */
 	gpu_write(gpu, REG_A4XX_CP_ME_CNTL, 0);
 
-	a4xx_me_init(gpu);
-	return 0;
+	return a4xx_me_init(gpu) ? 0 : -EINVAL;
 }
 
 static void a4xx_recover(struct msm_gpu *gpu)
@@ -297,17 +327,21 @@
 	kfree(a4xx_gpu);
 }
 
-static void a4xx_idle(struct msm_gpu *gpu)
+static bool a4xx_idle(struct msm_gpu *gpu)
 {
 	/* wait for ringbuffer to drain: */
-	adreno_idle(gpu);
+	if (!adreno_idle(gpu, gpu->rb[0]))
+		return false;
 
 	/* then wait for GPU to finish: */
 	if (spin_until(!(gpu_read(gpu, REG_A4XX_RBBM_STATUS) &
-					A4XX_RBBM_STATUS_GPU_BUSY)))
+					A4XX_RBBM_STATUS_GPU_BUSY))) {
 		DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
-
 	/* TODO maybe we need to reset GPU here to recover from hang? */
+		return false;
+	}
+
+	return true;
 }
 
 static irqreturn_t a4xx_irq(struct msm_gpu *gpu)
@@ -409,12 +443,8 @@
 #ifdef CONFIG_DEBUG_FS
 static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
 {
-	gpu->funcs->pm_resume(gpu);
-
 	seq_printf(m, "status:   %08x\n",
 			gpu_read(gpu, REG_A4XX_RBBM_STATUS));
-	gpu->funcs->pm_suspend(gpu);
-
 	adreno_show(gpu, m);
 
 }
@@ -422,87 +452,13 @@
 
 /* Register offset defines for A4XX, in order of enum adreno_regs */
 static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_A4XX_CP_DEBUG),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_A4XX_CP_ME_RAM_WADDR),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_A4XX_CP_ME_RAM_DATA),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
-			REG_A4XX_CP_PFP_UCODE_DATA),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
-			REG_A4XX_CP_PFP_UCODE_ADDR),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A4XX_CP_WFI_PEND_CTR),
 	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A4XX_CP_RB_BASE),
+	REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
 	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A4XX_CP_RB_RPTR_ADDR),
+	REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
 	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A4XX_CP_RB_RPTR),
 	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A4XX_CP_RB_WPTR),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A4XX_CP_PROTECT_CTRL),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_A4XX_CP_ME_CNTL),
 	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A4XX_CP_RB_CNTL),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_A4XX_CP_IB1_BASE),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_A4XX_CP_IB1_BUFSZ),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_A4XX_CP_IB2_BASE),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_A4XX_CP_IB2_BUFSZ),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_A4XX_CP_ME_RAM_RADDR),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A4XX_CP_ROQ_ADDR),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A4XX_CP_ROQ_DATA),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A4XX_CP_MERCIU_ADDR),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A4XX_CP_MERCIU_DATA),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A4XX_CP_MERCIU_DATA2),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A4XX_CP_MEQ_ADDR),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A4XX_CP_MEQ_DATA),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A4XX_CP_HW_FAULT),
-	REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
-			REG_A4XX_CP_PROTECT_STATUS),
-	REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_A4XX_CP_SCRATCH_ADDR),
-	REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_A4XX_CP_SCRATCH_UMASK),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A4XX_RBBM_STATUS),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
-			REG_A4XX_RBBM_PERFCTR_CTL),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
-			REG_A4XX_RBBM_PERFCTR_LOAD_CMD0),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
-			REG_A4XX_RBBM_PERFCTR_LOAD_CMD1),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
-			REG_A4XX_RBBM_PERFCTR_LOAD_CMD2),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
-			REG_A4XX_RBBM_PERFCTR_PWR_1_LO),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A4XX_RBBM_INT_0_MASK),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
-			REG_A4XX_RBBM_INT_0_STATUS),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
-			REG_A4XX_RBBM_AHB_ERROR_STATUS),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A4XX_RBBM_AHB_CMD),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A4XX_RBBM_CLOCK_CTL),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
-			REG_A4XX_RBBM_AHB_ME_SPLIT_STATUS),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
-			REG_A4XX_RBBM_AHB_PFP_SPLIT_STATUS),
-	REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
-			REG_A4XX_VPC_DEBUG_RAM_SEL),
-	REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
-			REG_A4XX_VPC_DEBUG_RAM_READ),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
-			REG_A4XX_RBBM_INT_CLEAR_CMD),
-	REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
-			REG_A4XX_VSC_SIZE_ADDRESS),
-	REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A4XX_VFD_CONTROL_0),
-	REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
-			REG_A4XX_SP_VS_PVT_MEM_ADDR),
-	REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
-			REG_A4XX_SP_FS_PVT_MEM_ADDR),
-	REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
-			REG_A4XX_SP_VS_OBJ_START),
-	REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
-			REG_A4XX_SP_FS_OBJ_START),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A4XX_RBBM_RBBM_CTL),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
-			REG_A4XX_RBBM_SW_RESET_CMD),
-	REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
-			REG_A4XX_UCHE_INVALIDATE0),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
-			REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_LO),
-	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
-			REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI),
 };
 
 static void a4xx_dump(struct msm_gpu *gpu)
@@ -512,23 +468,67 @@
 	adreno_dump(gpu);
 }
 
+static int a4xx_pm_resume(struct msm_gpu *gpu) {
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	int ret;
+
+	ret = msm_gpu_pm_resume(gpu);
+	if (ret)
+		return ret;
+
+	if (adreno_is_a430(adreno_gpu)) {
+		unsigned int reg;
+		/* Set the default register values; set SW_COLLAPSE to 0 */
+		gpu_write(gpu, REG_A4XX_RBBM_POWER_CNTL_IP, 0x778000);
+		do {
+			udelay(5);
+			reg = gpu_read(gpu, REG_A4XX_RBBM_POWER_STATUS);
+		} while (!(reg & A4XX_RBBM_POWER_CNTL_IP_SP_TP_PWR_ON));
+	}
+	return 0;
+}
+
+static int a4xx_pm_suspend(struct msm_gpu *gpu) {
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	int ret;
+
+	ret = msm_gpu_pm_suspend(gpu);
+	if (ret)
+		return ret;
+
+	if (adreno_is_a430(adreno_gpu)) {
+		/* Set the default register values; set SW_COLLAPSE to 1 */
+		gpu_write(gpu, REG_A4XX_RBBM_POWER_CNTL_IP, 0x778001);
+	}
+	return 0;
+}
+
+static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+{
+	*value = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO,
+		REG_A4XX_RBBM_PERFCTR_CP_0_HI);
+
+	return 0;
+}
+
 static const struct adreno_gpu_funcs funcs = {
 	.base = {
 		.get_param = adreno_get_param,
 		.hw_init = a4xx_hw_init,
-		.pm_suspend = msm_gpu_pm_suspend,
-		.pm_resume = msm_gpu_pm_resume,
+		.pm_suspend = a4xx_pm_suspend,
+		.pm_resume = a4xx_pm_resume,
 		.recover = a4xx_recover,
-		.last_fence = adreno_last_fence,
+		.submitted_fence = adreno_submitted_fence,
 		.submit = adreno_submit,
 		.flush = adreno_flush,
-		.idle = a4xx_idle,
+		.active_ring = adreno_active_ring,
 		.irq = a4xx_irq,
 		.destroy = a4xx_destroy,
 #ifdef CONFIG_DEBUG_FS
 		.show = a4xx_show,
 #endif
 	},
+	.get_timestamp = a4xx_get_timestamp,
 };
 
 struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
@@ -538,6 +538,7 @@
 	struct msm_gpu *gpu;
 	struct msm_drm_private *priv = dev->dev_private;
 	struct platform_device *pdev = priv->gpu_pdev;
+	struct msm_gpu_config a4xx_config = { 0 };
 	int ret;
 
 	if (!pdev) {
@@ -555,15 +556,19 @@
 	adreno_gpu = &a4xx_gpu->base;
 	gpu = &adreno_gpu->base;
 
-	a4xx_gpu->pdev = pdev;
-
 	gpu->perfcntrs = NULL;
 	gpu->num_perfcntrs = 0;
 
 	adreno_gpu->registers = a4xx_registers;
 	adreno_gpu->reg_offsets = a4xx_register_offsets;
 
-	ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
+	a4xx_config.ioname = MSM_GPU_DEFAULT_IONAME;
+	a4xx_config.irqname = MSM_GPU_DEFAULT_IRQNAME;
+	a4xx_config.nr_rings = 1;
+	a4xx_config.va_start = 0x300000;
+	a4xx_config.va_end = 0xffffffff;
+
+	ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, &a4xx_config);
 	if (ret)
 		goto fail;
 
@@ -582,7 +587,7 @@
 #endif
 	}
 
-	if (!gpu->mmu) {
+	if (!gpu->aspace) {
 		/* TODO we think it is possible to configure the GPU to
 		 * restrict access to VRAM carveout.  But the required
 		 * registers are unknown.  For now just bail out and
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/adreno/a4xx_gpu.h linux-4.4.115-fbx/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
--- linux-4.4.115/drivers/gpu/drm/msm/adreno/a4xx_gpu.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/adreno/a4xx_gpu.h	2019-01-22 16:16:23.483246225 +0100
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014,2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -23,7 +23,6 @@
 
 struct a4xx_gpu {
 	struct adreno_gpu base;
-	struct platform_device *pdev;
 
 	/* if OCMEM is used for GMEM: */
 	uint32_t ocmem_base;
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/adreno/a4xx.xml.h linux-4.4.115-fbx/drivers/gpu/drm/msm/adreno/a4xx.xml.h
--- linux-4.4.115/drivers/gpu/drm/msm/adreno/a4xx.xml.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/adreno/a4xx.xml.h	2019-01-22 16:16:23.479246189 +0100
@@ -8,17 +8,19 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10755 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14968 bytes, from 2015-05-20 20:12:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  67771 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  63970 bytes, from 2015-09-14 20:50:12)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2015-09-24 17:30:00)
+- ./adreno.xml               (    431 bytes, from 2016-10-24 21:12:27)
+- ./freedreno_copyright.xml  (   1572 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a2xx.xml          (  32901 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_common.xml (  12025 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_pm4.xml    (  19684 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a3xx.xml          (  83840 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a4xx.xml          ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a5xx.xml          (  81207 bytes, from 2016-10-26 19:36:59)
+- ./adreno/ocmem.xml         (   1773 bytes, from 2016-10-24 21:12:27)
 
-Copyright (C) 2013-2015 by the following authors:
+Copyright (C) 2013-2016 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
 Permission is hereby granted, free of charge, to any person obtaining
 a copy of this software and associated documentation files (the
@@ -45,13 +47,18 @@
 enum a4xx_color_fmt {
 	RB4_A8_UNORM = 1,
 	RB4_R8_UNORM = 2,
+	RB4_R8_SNORM = 3,
+	RB4_R8_UINT = 4,
+	RB4_R8_SINT = 5,
 	RB4_R4G4B4A4_UNORM = 8,
 	RB4_R5G5B5A1_UNORM = 10,
-	RB4_R5G6R5_UNORM = 14,
+	RB4_R5G6B5_UNORM = 14,
 	RB4_R8G8_UNORM = 15,
 	RB4_R8G8_SNORM = 16,
 	RB4_R8G8_UINT = 17,
 	RB4_R8G8_SINT = 18,
+	RB4_R16_UNORM = 19,
+	RB4_R16_SNORM = 20,
 	RB4_R16_FLOAT = 21,
 	RB4_R16_UINT = 22,
 	RB4_R16_SINT = 23,
@@ -63,12 +70,16 @@
 	RB4_R10G10B10A2_UNORM = 31,
 	RB4_R10G10B10A2_UINT = 34,
 	RB4_R11G11B10_FLOAT = 39,
+	RB4_R16G16_UNORM = 40,
+	RB4_R16G16_SNORM = 41,
 	RB4_R16G16_FLOAT = 42,
 	RB4_R16G16_UINT = 43,
 	RB4_R16G16_SINT = 44,
 	RB4_R32_FLOAT = 45,
 	RB4_R32_UINT = 46,
 	RB4_R32_SINT = 47,
+	RB4_R16G16B16A16_UNORM = 52,
+	RB4_R16G16B16A16_SNORM = 53,
 	RB4_R16G16B16A16_FLOAT = 54,
 	RB4_R16G16B16A16_UINT = 55,
 	RB4_R16G16B16A16_SINT = 56,
@@ -82,17 +93,10 @@
 
 enum a4xx_tile_mode {
 	TILE4_LINEAR = 0,
+	TILE4_2 = 2,
 	TILE4_3 = 3,
 };
 
-enum a4xx_rb_blend_opcode {
-	BLEND_DST_PLUS_SRC = 0,
-	BLEND_SRC_MINUS_DST = 1,
-	BLEND_DST_MINUS_SRC = 2,
-	BLEND_MIN_DST_SRC = 3,
-	BLEND_MAX_DST_SRC = 4,
-};
-
 enum a4xx_vtx_fmt {
 	VFMT4_32_FLOAT = 1,
 	VFMT4_32_32_FLOAT = 2,
@@ -106,6 +110,7 @@
 	VFMT4_32_32_FIXED = 10,
 	VFMT4_32_32_32_FIXED = 11,
 	VFMT4_32_32_32_32_FIXED = 12,
+	VFMT4_11_11_10_FLOAT = 13,
 	VFMT4_16_SINT = 16,
 	VFMT4_16_16_SINT = 17,
 	VFMT4_16_16_16_SINT = 18,
@@ -146,52 +151,76 @@
 	VFMT4_8_8_SNORM = 53,
 	VFMT4_8_8_8_SNORM = 54,
 	VFMT4_8_8_8_8_SNORM = 55,
-	VFMT4_10_10_10_2_UINT = 60,
-	VFMT4_10_10_10_2_UNORM = 61,
-	VFMT4_10_10_10_2_SINT = 62,
-	VFMT4_10_10_10_2_SNORM = 63,
+	VFMT4_10_10_10_2_UINT = 56,
+	VFMT4_10_10_10_2_UNORM = 57,
+	VFMT4_10_10_10_2_SINT = 58,
+	VFMT4_10_10_10_2_SNORM = 59,
+	VFMT4_2_10_10_10_UINT = 60,
+	VFMT4_2_10_10_10_UNORM = 61,
+	VFMT4_2_10_10_10_SINT = 62,
+	VFMT4_2_10_10_10_SNORM = 63,
 };
 
 enum a4xx_tex_fmt {
-	TFMT4_5_6_5_UNORM = 11,
-	TFMT4_5_5_5_1_UNORM = 10,
-	TFMT4_4_4_4_4_UNORM = 8,
-	TFMT4_X8Z24_UNORM = 71,
-	TFMT4_10_10_10_2_UNORM = 33,
 	TFMT4_A8_UNORM = 3,
-	TFMT4_L8_A8_UNORM = 13,
 	TFMT4_8_UNORM = 4,
-	TFMT4_8_8_UNORM = 14,
-	TFMT4_8_8_8_8_UNORM = 28,
 	TFMT4_8_SNORM = 5,
-	TFMT4_8_8_SNORM = 15,
-	TFMT4_8_8_8_8_SNORM = 29,
 	TFMT4_8_UINT = 6,
-	TFMT4_8_8_UINT = 16,
-	TFMT4_8_8_8_8_UINT = 30,
 	TFMT4_8_SINT = 7,
+	TFMT4_4_4_4_4_UNORM = 8,
+	TFMT4_5_5_5_1_UNORM = 9,
+	TFMT4_5_6_5_UNORM = 11,
+	TFMT4_L8_A8_UNORM = 13,
+	TFMT4_8_8_UNORM = 14,
+	TFMT4_8_8_SNORM = 15,
+	TFMT4_8_8_UINT = 16,
 	TFMT4_8_8_SINT = 17,
-	TFMT4_8_8_8_8_SINT = 31,
+	TFMT4_16_UNORM = 18,
+	TFMT4_16_SNORM = 19,
+	TFMT4_16_FLOAT = 20,
 	TFMT4_16_UINT = 21,
-	TFMT4_16_16_UINT = 41,
-	TFMT4_16_16_16_16_UINT = 54,
 	TFMT4_16_SINT = 22,
+	TFMT4_8_8_8_8_UNORM = 28,
+	TFMT4_8_8_8_8_SNORM = 29,
+	TFMT4_8_8_8_8_UINT = 30,
+	TFMT4_8_8_8_8_SINT = 31,
+	TFMT4_9_9_9_E5_FLOAT = 32,
+	TFMT4_10_10_10_2_UNORM = 33,
+	TFMT4_10_10_10_2_UINT = 34,
+	TFMT4_11_11_10_FLOAT = 37,
+	TFMT4_16_16_UNORM = 38,
+	TFMT4_16_16_SNORM = 39,
+	TFMT4_16_16_FLOAT = 40,
+	TFMT4_16_16_UINT = 41,
 	TFMT4_16_16_SINT = 42,
-	TFMT4_16_16_16_16_SINT = 55,
+	TFMT4_32_FLOAT = 43,
 	TFMT4_32_UINT = 44,
-	TFMT4_32_32_UINT = 57,
-	TFMT4_32_32_32_32_UINT = 64,
 	TFMT4_32_SINT = 45,
-	TFMT4_32_32_SINT = 58,
-	TFMT4_32_32_32_32_SINT = 65,
-	TFMT4_16_FLOAT = 20,
-	TFMT4_16_16_FLOAT = 40,
+	TFMT4_16_16_16_16_UNORM = 51,
+	TFMT4_16_16_16_16_SNORM = 52,
 	TFMT4_16_16_16_16_FLOAT = 53,
-	TFMT4_32_FLOAT = 43,
+	TFMT4_16_16_16_16_UINT = 54,
+	TFMT4_16_16_16_16_SINT = 55,
 	TFMT4_32_32_FLOAT = 56,
+	TFMT4_32_32_UINT = 57,
+	TFMT4_32_32_SINT = 58,
+	TFMT4_32_32_32_FLOAT = 59,
+	TFMT4_32_32_32_UINT = 60,
+	TFMT4_32_32_32_SINT = 61,
 	TFMT4_32_32_32_32_FLOAT = 63,
-	TFMT4_9_9_9_E5_FLOAT = 32,
-	TFMT4_11_11_10_FLOAT = 37,
+	TFMT4_32_32_32_32_UINT = 64,
+	TFMT4_32_32_32_32_SINT = 65,
+	TFMT4_X8Z24_UNORM = 71,
+	TFMT4_DXT1 = 86,
+	TFMT4_DXT3 = 87,
+	TFMT4_DXT5 = 88,
+	TFMT4_RGTC1_UNORM = 90,
+	TFMT4_RGTC1_SNORM = 91,
+	TFMT4_RGTC2_UNORM = 94,
+	TFMT4_RGTC2_SNORM = 95,
+	TFMT4_BPTC_UFLOAT = 97,
+	TFMT4_BPTC_FLOAT = 98,
+	TFMT4_BPTC = 99,
 	TFMT4_ATC_RGB = 100,
 	TFMT4_ATC_RGBA_EXPLICIT = 101,
 	TFMT4_ATC_RGBA_INTERPOLATED = 102,
@@ -240,6 +269,545 @@
 	EVEN_SPACING = 3,
 };
 
+enum a4xx_ccu_perfcounter_select {
+	CCU_BUSY_CYCLES = 0,
+	CCU_RB_DEPTH_RETURN_STALL = 2,
+	CCU_RB_COLOR_RETURN_STALL = 3,
+	CCU_DEPTH_BLOCKS = 6,
+	CCU_COLOR_BLOCKS = 7,
+	CCU_DEPTH_BLOCK_HIT = 8,
+	CCU_COLOR_BLOCK_HIT = 9,
+	CCU_DEPTH_FLAG1_COUNT = 10,
+	CCU_DEPTH_FLAG2_COUNT = 11,
+	CCU_DEPTH_FLAG3_COUNT = 12,
+	CCU_DEPTH_FLAG4_COUNT = 13,
+	CCU_COLOR_FLAG1_COUNT = 14,
+	CCU_COLOR_FLAG2_COUNT = 15,
+	CCU_COLOR_FLAG3_COUNT = 16,
+	CCU_COLOR_FLAG4_COUNT = 17,
+	CCU_PARTIAL_BLOCK_READ = 18,
+};
+
+enum a4xx_cp_perfcounter_select {
+	CP_ALWAYS_COUNT = 0,
+	CP_BUSY = 1,
+	CP_PFP_IDLE = 2,
+	CP_PFP_BUSY_WORKING = 3,
+	CP_PFP_STALL_CYCLES_ANY = 4,
+	CP_PFP_STARVE_CYCLES_ANY = 5,
+	CP_PFP_STARVED_PER_LOAD_ADDR = 6,
+	CP_PFP_STALLED_PER_STORE_ADDR = 7,
+	CP_PFP_PC_PROFILE = 8,
+	CP_PFP_MATCH_PM4_PKT_PROFILE = 9,
+	CP_PFP_COND_INDIRECT_DISCARDED = 10,
+	CP_LONG_RESUMPTIONS = 11,
+	CP_RESUME_CYCLES = 12,
+	CP_RESUME_TO_BOUNDARY_CYCLES = 13,
+	CP_LONG_PREEMPTIONS = 14,
+	CP_PREEMPT_CYCLES = 15,
+	CP_PREEMPT_TO_BOUNDARY_CYCLES = 16,
+	CP_ME_FIFO_EMPTY_PFP_IDLE = 17,
+	CP_ME_FIFO_EMPTY_PFP_BUSY = 18,
+	CP_ME_FIFO_NOT_EMPTY_NOT_FULL = 19,
+	CP_ME_FIFO_FULL_ME_BUSY = 20,
+	CP_ME_FIFO_FULL_ME_NON_WORKING = 21,
+	CP_ME_WAITING_FOR_PACKETS = 22,
+	CP_ME_BUSY_WORKING = 23,
+	CP_ME_STARVE_CYCLES_ANY = 24,
+	CP_ME_STARVE_CYCLES_PER_PROFILE = 25,
+	CP_ME_STALL_CYCLES_PER_PROFILE = 26,
+	CP_ME_PC_PROFILE = 27,
+	CP_RCIU_FIFO_EMPTY = 28,
+	CP_RCIU_FIFO_NOT_EMPTY_NOT_FULL = 29,
+	CP_RCIU_FIFO_FULL = 30,
+	CP_RCIU_FIFO_FULL_NO_CONTEXT = 31,
+	CP_RCIU_FIFO_FULL_AHB_MASTER = 32,
+	CP_RCIU_FIFO_FULL_OTHER = 33,
+	CP_AHB_IDLE = 34,
+	CP_AHB_STALL_ON_GRANT_NO_SPLIT = 35,
+	CP_AHB_STALL_ON_GRANT_SPLIT = 36,
+	CP_AHB_STALL_ON_GRANT_SPLIT_PROFILE = 37,
+	CP_AHB_BUSY_WORKING = 38,
+	CP_AHB_BUSY_STALL_ON_HRDY = 39,
+	CP_AHB_BUSY_STALL_ON_HRDY_PROFILE = 40,
+};
+
+enum a4xx_gras_ras_perfcounter_select {
+	RAS_SUPER_TILES = 0,
+	RAS_8X8_TILES = 1,
+	RAS_4X4_TILES = 2,
+	RAS_BUSY_CYCLES = 3,
+	RAS_STALL_CYCLES_BY_RB = 4,
+	RAS_STALL_CYCLES_BY_VSC = 5,
+	RAS_STARVE_CYCLES_BY_TSE = 6,
+	RAS_SUPERTILE_CYCLES = 7,
+	RAS_TILE_CYCLES = 8,
+	RAS_FULLY_COVERED_SUPER_TILES = 9,
+	RAS_FULLY_COVERED_8X8_TILES = 10,
+	RAS_4X4_PRIM = 11,
+	RAS_8X4_4X8_PRIM = 12,
+	RAS_8X8_PRIM = 13,
+};
+
+enum a4xx_gras_tse_perfcounter_select {
+	TSE_INPUT_PRIM = 0,
+	TSE_INPUT_NULL_PRIM = 1,
+	TSE_TRIVAL_REJ_PRIM = 2,
+	TSE_CLIPPED_PRIM = 3,
+	TSE_NEW_PRIM = 4,
+	TSE_ZERO_AREA_PRIM = 5,
+	TSE_FACENESS_CULLED_PRIM = 6,
+	TSE_ZERO_PIXEL_PRIM = 7,
+	TSE_OUTPUT_NULL_PRIM = 8,
+	TSE_OUTPUT_VISIBLE_PRIM = 9,
+	TSE_PRE_CLIP_PRIM = 10,
+	TSE_POST_CLIP_PRIM = 11,
+	TSE_BUSY_CYCLES = 12,
+	TSE_PC_STARVE = 13,
+	TSE_RAS_STALL = 14,
+	TSE_STALL_BARYPLANE_FIFO_FULL = 15,
+	TSE_STALL_ZPLANE_FIFO_FULL = 16,
+};
+
+enum a4xx_hlsq_perfcounter_select {
+	HLSQ_SP_VS_STAGE_CONSTANT = 0,
+	HLSQ_SP_VS_STAGE_INSTRUCTIONS = 1,
+	HLSQ_SP_FS_STAGE_CONSTANT = 2,
+	HLSQ_SP_FS_STAGE_INSTRUCTIONS = 3,
+	HLSQ_TP_STATE = 4,
+	HLSQ_QUADS = 5,
+	HLSQ_PIXELS = 6,
+	HLSQ_VERTICES = 7,
+	HLSQ_SP_VS_STAGE_DATA_BYTES = 13,
+	HLSQ_SP_FS_STAGE_DATA_BYTES = 14,
+	HLSQ_BUSY_CYCLES = 15,
+	HLSQ_STALL_CYCLES_SP_STATE = 16,
+	HLSQ_STALL_CYCLES_SP_VS_STAGE = 17,
+	HLSQ_STALL_CYCLES_SP_FS_STAGE = 18,
+	HLSQ_STALL_CYCLES_UCHE = 19,
+	HLSQ_RBBM_LOAD_CYCLES = 20,
+	HLSQ_DI_TO_VS_START_SP = 21,
+	HLSQ_DI_TO_FS_START_SP = 22,
+	HLSQ_VS_STAGE_START_TO_DONE_SP = 23,
+	HLSQ_FS_STAGE_START_TO_DONE_SP = 24,
+	HLSQ_SP_STATE_COPY_CYCLES_VS_STAGE = 25,
+	HLSQ_SP_STATE_COPY_CYCLES_FS_STAGE = 26,
+	HLSQ_UCHE_LATENCY_CYCLES = 27,
+	HLSQ_UCHE_LATENCY_COUNT = 28,
+	HLSQ_STARVE_CYCLES_VFD = 29,
+};
+
+enum a4xx_pc_perfcounter_select {
+	PC_VIS_STREAMS_LOADED = 0,
+	PC_VPC_PRIMITIVES = 2,
+	PC_DEAD_PRIM = 3,
+	PC_LIVE_PRIM = 4,
+	PC_DEAD_DRAWCALLS = 5,
+	PC_LIVE_DRAWCALLS = 6,
+	PC_VERTEX_MISSES = 7,
+	PC_STALL_CYCLES_VFD = 9,
+	PC_STALL_CYCLES_TSE = 10,
+	PC_STALL_CYCLES_UCHE = 11,
+	PC_WORKING_CYCLES = 12,
+	PC_IA_VERTICES = 13,
+	PC_GS_PRIMITIVES = 14,
+	PC_HS_INVOCATIONS = 15,
+	PC_DS_INVOCATIONS = 16,
+	PC_DS_PRIMITIVES = 17,
+	PC_STARVE_CYCLES_FOR_INDEX = 20,
+	PC_STARVE_CYCLES_FOR_TESS_FACTOR = 21,
+	PC_STARVE_CYCLES_FOR_VIZ_STREAM = 22,
+	PC_STALL_CYCLES_TESS = 23,
+	PC_STARVE_CYCLES_FOR_POSITION = 24,
+	PC_MODE0_DRAWCALL = 25,
+	PC_MODE1_DRAWCALL = 26,
+	PC_MODE2_DRAWCALL = 27,
+	PC_MODE3_DRAWCALL = 28,
+	PC_MODE4_DRAWCALL = 29,
+	PC_PREDICATED_DEAD_DRAWCALL = 30,
+	PC_STALL_CYCLES_BY_TSE_ONLY = 31,
+	PC_STALL_CYCLES_BY_VPC_ONLY = 32,
+	PC_VPC_POS_DATA_TRANSACTION = 33,
+	PC_BUSY_CYCLES = 34,
+	PC_STARVE_CYCLES_DI = 35,
+	PC_STALL_CYCLES_VPC = 36,
+	TESS_WORKING_CYCLES = 37,
+	TESS_NUM_CYCLES_SETUP_WORKING = 38,
+	TESS_NUM_CYCLES_PTGEN_WORKING = 39,
+	TESS_NUM_CYCLES_CONNGEN_WORKING = 40,
+	TESS_BUSY_CYCLES = 41,
+	TESS_STARVE_CYCLES_PC = 42,
+	TESS_STALL_CYCLES_PC = 43,
+};
+
+enum a4xx_pwr_perfcounter_select {
+	PWR_CORE_CLOCK_CYCLES = 0,
+	PWR_BUSY_CLOCK_CYCLES = 1,
+};
+
+enum a4xx_rb_perfcounter_select {
+	RB_BUSY_CYCLES = 0,
+	RB_BUSY_CYCLES_BINNING = 1,
+	RB_BUSY_CYCLES_RENDERING = 2,
+	RB_BUSY_CYCLES_RESOLVE = 3,
+	RB_STARVE_CYCLES_BY_SP = 4,
+	RB_STARVE_CYCLES_BY_RAS = 5,
+	RB_STARVE_CYCLES_BY_MARB = 6,
+	RB_STALL_CYCLES_BY_MARB = 7,
+	RB_STALL_CYCLES_BY_HLSQ = 8,
+	RB_RB_RB_MARB_DATA = 9,
+	RB_SP_RB_QUAD = 10,
+	RB_RAS_RB_Z_QUADS = 11,
+	RB_GMEM_CH0_READ = 12,
+	RB_GMEM_CH1_READ = 13,
+	RB_GMEM_CH0_WRITE = 14,
+	RB_GMEM_CH1_WRITE = 15,
+	RB_CP_CONTEXT_DONE = 16,
+	RB_CP_CACHE_FLUSH = 17,
+	RB_CP_ZPASS_DONE = 18,
+	RB_STALL_FIFO0_FULL = 19,
+	RB_STALL_FIFO1_FULL = 20,
+	RB_STALL_FIFO2_FULL = 21,
+	RB_STALL_FIFO3_FULL = 22,
+	RB_RB_HLSQ_TRANSACTIONS = 23,
+	RB_Z_READ = 24,
+	RB_Z_WRITE = 25,
+	RB_C_READ = 26,
+	RB_C_WRITE = 27,
+	RB_C_READ_LATENCY = 28,
+	RB_Z_READ_LATENCY = 29,
+	RB_STALL_BY_UCHE = 30,
+	RB_MARB_UCHE_TRANSACTIONS = 31,
+	RB_CACHE_STALL_MISS = 32,
+	RB_CACHE_STALL_FIFO_FULL = 33,
+	RB_8BIT_BLENDER_UNITS_ACTIVE = 34,
+	RB_16BIT_BLENDER_UNITS_ACTIVE = 35,
+	RB_SAMPLER_UNITS_ACTIVE = 36,
+	RB_TOTAL_PASS = 38,
+	RB_Z_PASS = 39,
+	RB_Z_FAIL = 40,
+	RB_S_FAIL = 41,
+	RB_POWER0 = 42,
+	RB_POWER1 = 43,
+	RB_POWER2 = 44,
+	RB_POWER3 = 45,
+	RB_POWER4 = 46,
+	RB_POWER5 = 47,
+	RB_POWER6 = 48,
+	RB_POWER7 = 49,
+};
+
+enum a4xx_rbbm_perfcounter_select {
+	RBBM_ALWAYS_ON = 0,
+	RBBM_VBIF_BUSY = 1,
+	RBBM_TSE_BUSY = 2,
+	RBBM_RAS_BUSY = 3,
+	RBBM_PC_DCALL_BUSY = 4,
+	RBBM_PC_VSD_BUSY = 5,
+	RBBM_VFD_BUSY = 6,
+	RBBM_VPC_BUSY = 7,
+	RBBM_UCHE_BUSY = 8,
+	RBBM_VSC_BUSY = 9,
+	RBBM_HLSQ_BUSY = 10,
+	RBBM_ANY_RB_BUSY = 11,
+	RBBM_ANY_TPL1_BUSY = 12,
+	RBBM_ANY_SP_BUSY = 13,
+	RBBM_ANY_MARB_BUSY = 14,
+	RBBM_ANY_ARB_BUSY = 15,
+	RBBM_AHB_STATUS_BUSY = 16,
+	RBBM_AHB_STATUS_STALLED = 17,
+	RBBM_AHB_STATUS_TXFR = 18,
+	RBBM_AHB_STATUS_TXFR_SPLIT = 19,
+	RBBM_AHB_STATUS_TXFR_ERROR = 20,
+	RBBM_AHB_STATUS_LONG_STALL = 21,
+	RBBM_STATUS_MASKED = 22,
+	RBBM_CP_BUSY_GFX_CORE_IDLE = 23,
+	RBBM_TESS_BUSY = 24,
+	RBBM_COM_BUSY = 25,
+	RBBM_DCOM_BUSY = 32,
+	RBBM_ANY_CCU_BUSY = 33,
+	RBBM_DPM_BUSY = 34,
+};
+
+enum a4xx_sp_perfcounter_select {
+	SP_LM_LOAD_INSTRUCTIONS = 0,
+	SP_LM_STORE_INSTRUCTIONS = 1,
+	SP_LM_ATOMICS = 2,
+	SP_GM_LOAD_INSTRUCTIONS = 3,
+	SP_GM_STORE_INSTRUCTIONS = 4,
+	SP_GM_ATOMICS = 5,
+	SP_VS_STAGE_TEX_INSTRUCTIONS = 6,
+	SP_VS_STAGE_CFLOW_INSTRUCTIONS = 7,
+	SP_VS_STAGE_EFU_INSTRUCTIONS = 8,
+	SP_VS_STAGE_FULL_ALU_INSTRUCTIONS = 9,
+	SP_VS_STAGE_HALF_ALU_INSTRUCTIONS = 10,
+	SP_FS_STAGE_TEX_INSTRUCTIONS = 11,
+	SP_FS_STAGE_CFLOW_INSTRUCTIONS = 12,
+	SP_FS_STAGE_EFU_INSTRUCTIONS = 13,
+	SP_FS_STAGE_FULL_ALU_INSTRUCTIONS = 14,
+	SP_FS_STAGE_HALF_ALU_INSTRUCTIONS = 15,
+	SP_VS_INSTRUCTIONS = 17,
+	SP_FS_INSTRUCTIONS = 18,
+	SP_ADDR_LOCK_COUNT = 19,
+	SP_UCHE_READ_TRANS = 20,
+	SP_UCHE_WRITE_TRANS = 21,
+	SP_EXPORT_VPC_TRANS = 22,
+	SP_EXPORT_RB_TRANS = 23,
+	SP_PIXELS_KILLED = 24,
+	SP_ICL1_REQUESTS = 25,
+	SP_ICL1_MISSES = 26,
+	SP_ICL0_REQUESTS = 27,
+	SP_ICL0_MISSES = 28,
+	SP_ALU_WORKING_CYCLES = 29,
+	SP_EFU_WORKING_CYCLES = 30,
+	SP_STALL_CYCLES_BY_VPC = 31,
+	SP_STALL_CYCLES_BY_TP = 32,
+	SP_STALL_CYCLES_BY_UCHE = 33,
+	SP_STALL_CYCLES_BY_RB = 34,
+	SP_BUSY_CYCLES = 35,
+	SP_HS_INSTRUCTIONS = 36,
+	SP_DS_INSTRUCTIONS = 37,
+	SP_GS_INSTRUCTIONS = 38,
+	SP_CS_INSTRUCTIONS = 39,
+	SP_SCHEDULER_NON_WORKING = 40,
+	SP_WAVE_CONTEXTS = 41,
+	SP_WAVE_CONTEXT_CYCLES = 42,
+	SP_POWER0 = 43,
+	SP_POWER1 = 44,
+	SP_POWER2 = 45,
+	SP_POWER3 = 46,
+	SP_POWER4 = 47,
+	SP_POWER5 = 48,
+	SP_POWER6 = 49,
+	SP_POWER7 = 50,
+	SP_POWER8 = 51,
+	SP_POWER9 = 52,
+	SP_POWER10 = 53,
+	SP_POWER11 = 54,
+	SP_POWER12 = 55,
+	SP_POWER13 = 56,
+	SP_POWER14 = 57,
+	SP_POWER15 = 58,
+};
+
+enum a4xx_tp_perfcounter_select {
+	TP_L1_REQUESTS = 0,
+	TP_L1_MISSES = 1,
+	TP_QUADS_OFFSET = 8,
+	TP_QUAD_SHADOW = 9,
+	TP_QUADS_ARRAY = 10,
+	TP_QUADS_GRADIENT = 11,
+	TP_QUADS_1D2D = 12,
+	TP_QUADS_3DCUBE = 13,
+	TP_BUSY_CYCLES = 16,
+	TP_STALL_CYCLES_BY_ARB = 17,
+	TP_STATE_CACHE_REQUESTS = 20,
+	TP_STATE_CACHE_MISSES = 21,
+	TP_POWER0 = 22,
+	TP_POWER1 = 23,
+	TP_POWER2 = 24,
+	TP_POWER3 = 25,
+	TP_POWER4 = 26,
+	TP_POWER5 = 27,
+	TP_POWER6 = 28,
+	TP_POWER7 = 29,
+};
+
+enum a4xx_uche_perfcounter_select {
+	UCHE_VBIF_READ_BEATS_TP = 0,
+	UCHE_VBIF_READ_BEATS_VFD = 1,
+	UCHE_VBIF_READ_BEATS_HLSQ = 2,
+	UCHE_VBIF_READ_BEATS_MARB = 3,
+	UCHE_VBIF_READ_BEATS_SP = 4,
+	UCHE_READ_REQUESTS_TP = 5,
+	UCHE_READ_REQUESTS_VFD = 6,
+	UCHE_READ_REQUESTS_HLSQ = 7,
+	UCHE_READ_REQUESTS_MARB = 8,
+	UCHE_READ_REQUESTS_SP = 9,
+	UCHE_WRITE_REQUESTS_MARB = 10,
+	UCHE_WRITE_REQUESTS_SP = 11,
+	UCHE_TAG_CHECK_FAILS = 12,
+	UCHE_EVICTS = 13,
+	UCHE_FLUSHES = 14,
+	UCHE_VBIF_LATENCY_CYCLES = 15,
+	UCHE_VBIF_LATENCY_SAMPLES = 16,
+	UCHE_BUSY_CYCLES = 17,
+	UCHE_VBIF_READ_BEATS_PC = 18,
+	UCHE_READ_REQUESTS_PC = 19,
+	UCHE_WRITE_REQUESTS_VPC = 20,
+	UCHE_STALL_BY_VBIF = 21,
+	UCHE_WRITE_REQUESTS_VSC = 22,
+	UCHE_POWER0 = 23,
+	UCHE_POWER1 = 24,
+	UCHE_POWER2 = 25,
+	UCHE_POWER3 = 26,
+	UCHE_POWER4 = 27,
+	UCHE_POWER5 = 28,
+	UCHE_POWER6 = 29,
+	UCHE_POWER7 = 30,
+};
+
+enum a4xx_vbif_perfcounter_select {
+	AXI_READ_REQUESTS_ID_0 = 0,
+	AXI_READ_REQUESTS_ID_1 = 1,
+	AXI_READ_REQUESTS_ID_2 = 2,
+	AXI_READ_REQUESTS_ID_3 = 3,
+	AXI_READ_REQUESTS_ID_4 = 4,
+	AXI_READ_REQUESTS_ID_5 = 5,
+	AXI_READ_REQUESTS_ID_6 = 6,
+	AXI_READ_REQUESTS_ID_7 = 7,
+	AXI_READ_REQUESTS_ID_8 = 8,
+	AXI_READ_REQUESTS_ID_9 = 9,
+	AXI_READ_REQUESTS_ID_10 = 10,
+	AXI_READ_REQUESTS_ID_11 = 11,
+	AXI_READ_REQUESTS_ID_12 = 12,
+	AXI_READ_REQUESTS_ID_13 = 13,
+	AXI_READ_REQUESTS_ID_14 = 14,
+	AXI_READ_REQUESTS_ID_15 = 15,
+	AXI0_READ_REQUESTS_TOTAL = 16,
+	AXI1_READ_REQUESTS_TOTAL = 17,
+	AXI2_READ_REQUESTS_TOTAL = 18,
+	AXI3_READ_REQUESTS_TOTAL = 19,
+	AXI_READ_REQUESTS_TOTAL = 20,
+	AXI_WRITE_REQUESTS_ID_0 = 21,
+	AXI_WRITE_REQUESTS_ID_1 = 22,
+	AXI_WRITE_REQUESTS_ID_2 = 23,
+	AXI_WRITE_REQUESTS_ID_3 = 24,
+	AXI_WRITE_REQUESTS_ID_4 = 25,
+	AXI_WRITE_REQUESTS_ID_5 = 26,
+	AXI_WRITE_REQUESTS_ID_6 = 27,
+	AXI_WRITE_REQUESTS_ID_7 = 28,
+	AXI_WRITE_REQUESTS_ID_8 = 29,
+	AXI_WRITE_REQUESTS_ID_9 = 30,
+	AXI_WRITE_REQUESTS_ID_10 = 31,
+	AXI_WRITE_REQUESTS_ID_11 = 32,
+	AXI_WRITE_REQUESTS_ID_12 = 33,
+	AXI_WRITE_REQUESTS_ID_13 = 34,
+	AXI_WRITE_REQUESTS_ID_14 = 35,
+	AXI_WRITE_REQUESTS_ID_15 = 36,
+	AXI0_WRITE_REQUESTS_TOTAL = 37,
+	AXI1_WRITE_REQUESTS_TOTAL = 38,
+	AXI2_WRITE_REQUESTS_TOTAL = 39,
+	AXI3_WRITE_REQUESTS_TOTAL = 40,
+	AXI_WRITE_REQUESTS_TOTAL = 41,
+	AXI_TOTAL_REQUESTS = 42,
+	AXI_READ_DATA_BEATS_ID_0 = 43,
+	AXI_READ_DATA_BEATS_ID_1 = 44,
+	AXI_READ_DATA_BEATS_ID_2 = 45,
+	AXI_READ_DATA_BEATS_ID_3 = 46,
+	AXI_READ_DATA_BEATS_ID_4 = 47,
+	AXI_READ_DATA_BEATS_ID_5 = 48,
+	AXI_READ_DATA_BEATS_ID_6 = 49,
+	AXI_READ_DATA_BEATS_ID_7 = 50,
+	AXI_READ_DATA_BEATS_ID_8 = 51,
+	AXI_READ_DATA_BEATS_ID_9 = 52,
+	AXI_READ_DATA_BEATS_ID_10 = 53,
+	AXI_READ_DATA_BEATS_ID_11 = 54,
+	AXI_READ_DATA_BEATS_ID_12 = 55,
+	AXI_READ_DATA_BEATS_ID_13 = 56,
+	AXI_READ_DATA_BEATS_ID_14 = 57,
+	AXI_READ_DATA_BEATS_ID_15 = 58,
+	AXI0_READ_DATA_BEATS_TOTAL = 59,
+	AXI1_READ_DATA_BEATS_TOTAL = 60,
+	AXI2_READ_DATA_BEATS_TOTAL = 61,
+	AXI3_READ_DATA_BEATS_TOTAL = 62,
+	AXI_READ_DATA_BEATS_TOTAL = 63,
+	AXI_WRITE_DATA_BEATS_ID_0 = 64,
+	AXI_WRITE_DATA_BEATS_ID_1 = 65,
+	AXI_WRITE_DATA_BEATS_ID_2 = 66,
+	AXI_WRITE_DATA_BEATS_ID_3 = 67,
+	AXI_WRITE_DATA_BEATS_ID_4 = 68,
+	AXI_WRITE_DATA_BEATS_ID_5 = 69,
+	AXI_WRITE_DATA_BEATS_ID_6 = 70,
+	AXI_WRITE_DATA_BEATS_ID_7 = 71,
+	AXI_WRITE_DATA_BEATS_ID_8 = 72,
+	AXI_WRITE_DATA_BEATS_ID_9 = 73,
+	AXI_WRITE_DATA_BEATS_ID_10 = 74,
+	AXI_WRITE_DATA_BEATS_ID_11 = 75,
+	AXI_WRITE_DATA_BEATS_ID_12 = 76,
+	AXI_WRITE_DATA_BEATS_ID_13 = 77,
+	AXI_WRITE_DATA_BEATS_ID_14 = 78,
+	AXI_WRITE_DATA_BEATS_ID_15 = 79,
+	AXI0_WRITE_DATA_BEATS_TOTAL = 80,
+	AXI1_WRITE_DATA_BEATS_TOTAL = 81,
+	AXI2_WRITE_DATA_BEATS_TOTAL = 82,
+	AXI3_WRITE_DATA_BEATS_TOTAL = 83,
+	AXI_WRITE_DATA_BEATS_TOTAL = 84,
+	AXI_DATA_BEATS_TOTAL = 85,
+	CYCLES_HELD_OFF_ID_0 = 86,
+	CYCLES_HELD_OFF_ID_1 = 87,
+	CYCLES_HELD_OFF_ID_2 = 88,
+	CYCLES_HELD_OFF_ID_3 = 89,
+	CYCLES_HELD_OFF_ID_4 = 90,
+	CYCLES_HELD_OFF_ID_5 = 91,
+	CYCLES_HELD_OFF_ID_6 = 92,
+	CYCLES_HELD_OFF_ID_7 = 93,
+	CYCLES_HELD_OFF_ID_8 = 94,
+	CYCLES_HELD_OFF_ID_9 = 95,
+	CYCLES_HELD_OFF_ID_10 = 96,
+	CYCLES_HELD_OFF_ID_11 = 97,
+	CYCLES_HELD_OFF_ID_12 = 98,
+	CYCLES_HELD_OFF_ID_13 = 99,
+	CYCLES_HELD_OFF_ID_14 = 100,
+	CYCLES_HELD_OFF_ID_15 = 101,
+	AXI_READ_REQUEST_HELD_OFF = 102,
+	AXI_WRITE_REQUEST_HELD_OFF = 103,
+	AXI_REQUEST_HELD_OFF = 104,
+	AXI_WRITE_DATA_HELD_OFF = 105,
+	OCMEM_AXI_READ_REQUEST_HELD_OFF = 106,
+	OCMEM_AXI_WRITE_REQUEST_HELD_OFF = 107,
+	OCMEM_AXI_REQUEST_HELD_OFF = 108,
+	OCMEM_AXI_WRITE_DATA_HELD_OFF = 109,
+	ELAPSED_CYCLES_DDR = 110,
+	ELAPSED_CYCLES_OCMEM = 111,
+};
+
+enum a4xx_vfd_perfcounter_select {
+	VFD_UCHE_BYTE_FETCHED = 0,
+	VFD_UCHE_TRANS = 1,
+	VFD_FETCH_INSTRUCTIONS = 3,
+	VFD_BUSY_CYCLES = 5,
+	VFD_STALL_CYCLES_UCHE = 6,
+	VFD_STALL_CYCLES_HLSQ = 7,
+	VFD_STALL_CYCLES_VPC_BYPASS = 8,
+	VFD_STALL_CYCLES_VPC_ALLOC = 9,
+	VFD_MODE_0_FIBERS = 13,
+	VFD_MODE_1_FIBERS = 14,
+	VFD_MODE_2_FIBERS = 15,
+	VFD_MODE_3_FIBERS = 16,
+	VFD_MODE_4_FIBERS = 17,
+	VFD_BFIFO_STALL = 18,
+	VFD_NUM_VERTICES_TOTAL = 19,
+	VFD_PACKER_FULL = 20,
+	VFD_UCHE_REQUEST_FIFO_FULL = 21,
+	VFD_STARVE_CYCLES_PC = 22,
+	VFD_STARVE_CYCLES_UCHE = 23,
+};
+
+enum a4xx_vpc_perfcounter_select {
+	VPC_SP_LM_COMPONENTS = 2,
+	VPC_SP0_LM_BYTES = 3,
+	VPC_SP1_LM_BYTES = 4,
+	VPC_SP2_LM_BYTES = 5,
+	VPC_SP3_LM_BYTES = 6,
+	VPC_WORKING_CYCLES = 7,
+	VPC_STALL_CYCLES_LM = 8,
+	VPC_STARVE_CYCLES_RAS = 9,
+	VPC_STREAMOUT_CYCLES = 10,
+	VPC_UCHE_TRANSACTIONS = 12,
+	VPC_STALL_CYCLES_UCHE = 13,
+	VPC_BUSY_CYCLES = 14,
+	VPC_STARVE_CYCLES_SP = 15,
+};
+
+enum a4xx_vsc_perfcounter_select {
+	VSC_BUSY_CYCLES = 0,
+	VSC_WORKING_CYCLES = 1,
+	VSC_STALL_CYCLES_UCHE = 2,
+	VSC_STARVE_CYCLES_RAS = 3,
+	VSC_EOT_NUM = 4,
+};
+
 enum a4xx_tex_filter {
 	A4XX_TEX_NEAREST = 0,
 	A4XX_TEX_LINEAR = 1,
@@ -326,6 +894,12 @@
 
 #define REG_A4XX_RB_PERFCTR_RB_SEL_7				0x00000cce
 
+#define REG_A4XX_RB_PERFCTR_CCU_SEL_0				0x00000ccf
+
+#define REG_A4XX_RB_PERFCTR_CCU_SEL_1				0x00000cd0
+
+#define REG_A4XX_RB_PERFCTR_CCU_SEL_2				0x00000cd1
+
 #define REG_A4XX_RB_PERFCTR_CCU_SEL_3				0x00000cd2
 
 #define REG_A4XX_RB_FRAME_BUFFER_DIMENSION			0x00000ce0
@@ -363,6 +937,7 @@
 {
 	return ((val >> 5) << A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT) & A4XX_RB_MODE_CONTROL_HEIGHT__MASK;
 }
+#define A4XX_RB_MODE_CONTROL_ENABLE_GMEM			0x00010000
 
 #define REG_A4XX_RB_RENDER_CONTROL				0x000020a1
 #define A4XX_RB_RENDER_CONTROL_BINNING_PASS			0x00000001
@@ -400,8 +975,13 @@
 #define A4XX_RB_MRT_CONTROL_READ_DEST_ENABLE			0x00000008
 #define A4XX_RB_MRT_CONTROL_BLEND				0x00000010
 #define A4XX_RB_MRT_CONTROL_BLEND2				0x00000020
-#define A4XX_RB_MRT_CONTROL_FASTCLEAR				0x00000400
-#define A4XX_RB_MRT_CONTROL_B11					0x00000800
+#define A4XX_RB_MRT_CONTROL_ROP_ENABLE				0x00000040
+#define A4XX_RB_MRT_CONTROL_ROP_CODE__MASK			0x00000f00
+#define A4XX_RB_MRT_CONTROL_ROP_CODE__SHIFT			8
+static inline uint32_t A4XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val)
+{
+	return ((val) << A4XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A4XX_RB_MRT_CONTROL_ROP_CODE__MASK;
+}
 #define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK		0x0f000000
 #define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT		24
 static inline uint32_t A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
@@ -461,7 +1041,7 @@
 }
 #define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK	0x000000e0
 #define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT	5
-static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a4xx_rb_blend_opcode val)
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
 {
 	return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
 }
@@ -479,7 +1059,7 @@
 }
 #define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK	0x00e00000
 #define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT	21
-static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a4xx_rb_blend_opcode val)
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
 {
 	return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
 }
@@ -490,13 +1070,19 @@
 	return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
 }
 
-#define REG_A4XX_RB_BLEND_RED					0x000020f3
-#define A4XX_RB_BLEND_RED_UINT__MASK				0x00007fff
+#define REG_A4XX_RB_BLEND_RED					0x000020f0
+#define A4XX_RB_BLEND_RED_UINT__MASK				0x000000ff
 #define A4XX_RB_BLEND_RED_UINT__SHIFT				0
 static inline uint32_t A4XX_RB_BLEND_RED_UINT(uint32_t val)
 {
 	return ((val) << A4XX_RB_BLEND_RED_UINT__SHIFT) & A4XX_RB_BLEND_RED_UINT__MASK;
 }
+#define A4XX_RB_BLEND_RED_SINT__MASK				0x0000ff00
+#define A4XX_RB_BLEND_RED_SINT__SHIFT				8
+static inline uint32_t A4XX_RB_BLEND_RED_SINT(uint32_t val)
+{
+	return ((val) << A4XX_RB_BLEND_RED_SINT__SHIFT) & A4XX_RB_BLEND_RED_SINT__MASK;
+}
 #define A4XX_RB_BLEND_RED_FLOAT__MASK				0xffff0000
 #define A4XX_RB_BLEND_RED_FLOAT__SHIFT				16
 static inline uint32_t A4XX_RB_BLEND_RED_FLOAT(float val)
@@ -504,13 +1090,27 @@
 	return ((util_float_to_half(val)) << A4XX_RB_BLEND_RED_FLOAT__SHIFT) & A4XX_RB_BLEND_RED_FLOAT__MASK;
 }
 
-#define REG_A4XX_RB_BLEND_GREEN					0x000020f4
-#define A4XX_RB_BLEND_GREEN_UINT__MASK				0x00007fff
+#define REG_A4XX_RB_BLEND_RED_F32				0x000020f1
+#define A4XX_RB_BLEND_RED_F32__MASK				0xffffffff
+#define A4XX_RB_BLEND_RED_F32__SHIFT				0
+static inline uint32_t A4XX_RB_BLEND_RED_F32(float val)
+{
+	return ((fui(val)) << A4XX_RB_BLEND_RED_F32__SHIFT) & A4XX_RB_BLEND_RED_F32__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_GREEN					0x000020f2
+#define A4XX_RB_BLEND_GREEN_UINT__MASK				0x000000ff
 #define A4XX_RB_BLEND_GREEN_UINT__SHIFT				0
 static inline uint32_t A4XX_RB_BLEND_GREEN_UINT(uint32_t val)
 {
 	return ((val) << A4XX_RB_BLEND_GREEN_UINT__SHIFT) & A4XX_RB_BLEND_GREEN_UINT__MASK;
 }
+#define A4XX_RB_BLEND_GREEN_SINT__MASK				0x0000ff00
+#define A4XX_RB_BLEND_GREEN_SINT__SHIFT				8
+static inline uint32_t A4XX_RB_BLEND_GREEN_SINT(uint32_t val)
+{
+	return ((val) << A4XX_RB_BLEND_GREEN_SINT__SHIFT) & A4XX_RB_BLEND_GREEN_SINT__MASK;
+}
 #define A4XX_RB_BLEND_GREEN_FLOAT__MASK				0xffff0000
 #define A4XX_RB_BLEND_GREEN_FLOAT__SHIFT			16
 static inline uint32_t A4XX_RB_BLEND_GREEN_FLOAT(float val)
@@ -518,13 +1118,27 @@
 	return ((util_float_to_half(val)) << A4XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A4XX_RB_BLEND_GREEN_FLOAT__MASK;
 }
 
-#define REG_A4XX_RB_BLEND_BLUE					0x000020f5
-#define A4XX_RB_BLEND_BLUE_UINT__MASK				0x00007fff
+#define REG_A4XX_RB_BLEND_GREEN_F32				0x000020f3
+#define A4XX_RB_BLEND_GREEN_F32__MASK				0xffffffff
+#define A4XX_RB_BLEND_GREEN_F32__SHIFT				0
+static inline uint32_t A4XX_RB_BLEND_GREEN_F32(float val)
+{
+	return ((fui(val)) << A4XX_RB_BLEND_GREEN_F32__SHIFT) & A4XX_RB_BLEND_GREEN_F32__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_BLUE					0x000020f4
+#define A4XX_RB_BLEND_BLUE_UINT__MASK				0x000000ff
 #define A4XX_RB_BLEND_BLUE_UINT__SHIFT				0
 static inline uint32_t A4XX_RB_BLEND_BLUE_UINT(uint32_t val)
 {
 	return ((val) << A4XX_RB_BLEND_BLUE_UINT__SHIFT) & A4XX_RB_BLEND_BLUE_UINT__MASK;
 }
+#define A4XX_RB_BLEND_BLUE_SINT__MASK				0x0000ff00
+#define A4XX_RB_BLEND_BLUE_SINT__SHIFT				8
+static inline uint32_t A4XX_RB_BLEND_BLUE_SINT(uint32_t val)
+{
+	return ((val) << A4XX_RB_BLEND_BLUE_SINT__SHIFT) & A4XX_RB_BLEND_BLUE_SINT__MASK;
+}
 #define A4XX_RB_BLEND_BLUE_FLOAT__MASK				0xffff0000
 #define A4XX_RB_BLEND_BLUE_FLOAT__SHIFT				16
 static inline uint32_t A4XX_RB_BLEND_BLUE_FLOAT(float val)
@@ -532,13 +1146,27 @@
 	return ((util_float_to_half(val)) << A4XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A4XX_RB_BLEND_BLUE_FLOAT__MASK;
 }
 
+#define REG_A4XX_RB_BLEND_BLUE_F32				0x000020f5
+#define A4XX_RB_BLEND_BLUE_F32__MASK				0xffffffff
+#define A4XX_RB_BLEND_BLUE_F32__SHIFT				0
+static inline uint32_t A4XX_RB_BLEND_BLUE_F32(float val)
+{
+	return ((fui(val)) << A4XX_RB_BLEND_BLUE_F32__SHIFT) & A4XX_RB_BLEND_BLUE_F32__MASK;
+}
+
 #define REG_A4XX_RB_BLEND_ALPHA					0x000020f6
-#define A4XX_RB_BLEND_ALPHA_UINT__MASK				0x00007fff
+#define A4XX_RB_BLEND_ALPHA_UINT__MASK				0x000000ff
 #define A4XX_RB_BLEND_ALPHA_UINT__SHIFT				0
 static inline uint32_t A4XX_RB_BLEND_ALPHA_UINT(uint32_t val)
 {
 	return ((val) << A4XX_RB_BLEND_ALPHA_UINT__SHIFT) & A4XX_RB_BLEND_ALPHA_UINT__MASK;
 }
+#define A4XX_RB_BLEND_ALPHA_SINT__MASK				0x0000ff00
+#define A4XX_RB_BLEND_ALPHA_SINT__SHIFT				8
+static inline uint32_t A4XX_RB_BLEND_ALPHA_SINT(uint32_t val)
+{
+	return ((val) << A4XX_RB_BLEND_ALPHA_SINT__SHIFT) & A4XX_RB_BLEND_ALPHA_SINT__MASK;
+}
 #define A4XX_RB_BLEND_ALPHA_FLOAT__MASK				0xffff0000
 #define A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT			16
 static inline uint32_t A4XX_RB_BLEND_ALPHA_FLOAT(float val)
@@ -546,6 +1174,14 @@
 	return ((util_float_to_half(val)) << A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A4XX_RB_BLEND_ALPHA_FLOAT__MASK;
 }
 
+#define REG_A4XX_RB_BLEND_ALPHA_F32				0x000020f7
+#define A4XX_RB_BLEND_ALPHA_F32__MASK				0xffffffff
+#define A4XX_RB_BLEND_ALPHA_F32__SHIFT				0
+static inline uint32_t A4XX_RB_BLEND_ALPHA_F32(float val)
+{
+	return ((fui(val)) << A4XX_RB_BLEND_ALPHA_F32__SHIFT) & A4XX_RB_BLEND_ALPHA_F32__MASK;
+}
+
 #define REG_A4XX_RB_ALPHA_CONTROL				0x000020f8
 #define A4XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK			0x000000ff
 #define A4XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT			0
@@ -568,7 +1204,7 @@
 {
 	return ((val) << A4XX_RB_FS_OUTPUT_ENABLE_BLEND__SHIFT) & A4XX_RB_FS_OUTPUT_ENABLE_BLEND__MASK;
 }
-#define A4XX_RB_FS_OUTPUT_FAST_CLEAR				0x00000100
+#define A4XX_RB_FS_OUTPUT_INDEPENDENT_BLEND			0x00000100
 #define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK			0xffff0000
 #define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT			16
 static inline uint32_t A4XX_RB_FS_OUTPUT_SAMPLE_MASK(uint32_t val)
@@ -734,8 +1370,9 @@
 {
 	return ((val) << A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
 }
-#define A4XX_RB_DEPTH_CONTROL_BF_ENABLE				0x00000080
+#define A4XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE			0x00000080
 #define A4XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE			0x00010000
+#define A4XX_RB_DEPTH_CONTROL_FORCE_FRAGZ_TO_FS			0x00020000
 #define A4XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE			0x80000000
 
 #define REG_A4XX_RB_DEPTH_CLEAR					0x00002102
@@ -996,8 +1633,386 @@
 
 #define REG_A4XX_RBBM_CFG_DEBBUS_SEL_D				0x0000004d
 
+#define REG_A4XX_RBBM_POWER_CNTL_IP				0x00000098
+#define A4XX_RBBM_POWER_CNTL_IP_SW_COLLAPSE			0x00000001
+#define A4XX_RBBM_POWER_CNTL_IP_SP_TP_PWR_ON			0x00100000
+
 #define REG_A4XX_RBBM_PERFCTR_CP_0_LO				0x0000009c
 
+#define REG_A4XX_RBBM_PERFCTR_CP_0_HI				0x0000009d
+
+#define REG_A4XX_RBBM_PERFCTR_CP_1_LO				0x0000009e
+
+#define REG_A4XX_RBBM_PERFCTR_CP_1_HI				0x0000009f
+
+#define REG_A4XX_RBBM_PERFCTR_CP_2_LO				0x000000a0
+
+#define REG_A4XX_RBBM_PERFCTR_CP_2_HI				0x000000a1
+
+#define REG_A4XX_RBBM_PERFCTR_CP_3_LO				0x000000a2
+
+#define REG_A4XX_RBBM_PERFCTR_CP_3_HI				0x000000a3
+
+#define REG_A4XX_RBBM_PERFCTR_CP_4_LO				0x000000a4
+
+#define REG_A4XX_RBBM_PERFCTR_CP_4_HI				0x000000a5
+
+#define REG_A4XX_RBBM_PERFCTR_CP_5_LO				0x000000a6
+
+#define REG_A4XX_RBBM_PERFCTR_CP_5_HI				0x000000a7
+
+#define REG_A4XX_RBBM_PERFCTR_CP_6_LO				0x000000a8
+
+#define REG_A4XX_RBBM_PERFCTR_CP_6_HI				0x000000a9
+
+#define REG_A4XX_RBBM_PERFCTR_CP_7_LO				0x000000aa
+
+#define REG_A4XX_RBBM_PERFCTR_CP_7_HI				0x000000ab
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_0_LO				0x000000ac
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_0_HI				0x000000ad
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_1_LO				0x000000ae
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_1_HI				0x000000af
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_2_LO				0x000000b0
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_2_HI				0x000000b1
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_3_LO				0x000000b2
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_3_HI				0x000000b3
+
+#define REG_A4XX_RBBM_PERFCTR_PC_0_LO				0x000000b4
+
+#define REG_A4XX_RBBM_PERFCTR_PC_0_HI				0x000000b5
+
+#define REG_A4XX_RBBM_PERFCTR_PC_1_LO				0x000000b6
+
+#define REG_A4XX_RBBM_PERFCTR_PC_1_HI				0x000000b7
+
+#define REG_A4XX_RBBM_PERFCTR_PC_2_LO				0x000000b8
+
+#define REG_A4XX_RBBM_PERFCTR_PC_2_HI				0x000000b9
+
+#define REG_A4XX_RBBM_PERFCTR_PC_3_LO				0x000000ba
+
+#define REG_A4XX_RBBM_PERFCTR_PC_3_HI				0x000000bb
+
+#define REG_A4XX_RBBM_PERFCTR_PC_4_LO				0x000000bc
+
+#define REG_A4XX_RBBM_PERFCTR_PC_4_HI				0x000000bd
+
+#define REG_A4XX_RBBM_PERFCTR_PC_5_LO				0x000000be
+
+#define REG_A4XX_RBBM_PERFCTR_PC_5_HI				0x000000bf
+
+#define REG_A4XX_RBBM_PERFCTR_PC_6_LO				0x000000c0
+
+#define REG_A4XX_RBBM_PERFCTR_PC_6_HI				0x000000c1
+
+#define REG_A4XX_RBBM_PERFCTR_PC_7_LO				0x000000c2
+
+#define REG_A4XX_RBBM_PERFCTR_PC_7_HI				0x000000c3
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_0_LO				0x000000c4
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_0_HI				0x000000c5
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_1_LO				0x000000c6
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_1_HI				0x000000c7
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_2_LO				0x000000c8
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_2_HI				0x000000c9
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_3_LO				0x000000ca
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_3_HI				0x000000cb
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_4_LO				0x000000cc
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_4_HI				0x000000cd
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_5_LO				0x000000ce
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_5_HI				0x000000cf
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_6_LO				0x000000d0
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_6_HI				0x000000d1
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_7_LO				0x000000d2
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_7_HI				0x000000d3
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_0_LO				0x000000d4
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_0_HI				0x000000d5
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_1_LO				0x000000d6
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_1_HI				0x000000d7
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_2_LO				0x000000d8
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_2_HI				0x000000d9
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_3_LO				0x000000da
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_3_HI				0x000000db
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_4_LO				0x000000dc
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_4_HI				0x000000dd
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_5_LO				0x000000de
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_5_HI				0x000000df
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_6_LO				0x000000e0
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_6_HI				0x000000e1
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_7_LO				0x000000e2
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_7_HI				0x000000e3
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_0_LO				0x000000e4
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_0_HI				0x000000e5
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_1_LO				0x000000e6
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_1_HI				0x000000e7
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_2_LO				0x000000e8
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_2_HI				0x000000e9
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_3_LO				0x000000ea
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_3_HI				0x000000eb
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_0_LO				0x000000ec
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_0_HI				0x000000ed
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_1_LO				0x000000ee
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_1_HI				0x000000ef
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_2_LO				0x000000f0
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_2_HI				0x000000f1
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_3_LO				0x000000f2
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_3_HI				0x000000f3
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_0_LO				0x000000f4
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_0_HI				0x000000f5
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_1_LO				0x000000f6
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_1_HI				0x000000f7
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_2_LO				0x000000f8
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_2_HI				0x000000f9
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_3_LO				0x000000fa
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_3_HI				0x000000fb
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_0_LO				0x000000fc
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_0_HI				0x000000fd
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_1_LO				0x000000fe
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_1_HI				0x000000ff
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_2_LO				0x00000100
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_2_HI				0x00000101
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_3_LO				0x00000102
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_3_HI				0x00000103
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_0_LO				0x00000104
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_0_HI				0x00000105
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_1_LO				0x00000106
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_1_HI				0x00000107
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_2_LO				0x00000108
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_2_HI				0x00000109
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_3_LO				0x0000010a
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_3_HI				0x0000010b
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_4_LO				0x0000010c
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_4_HI				0x0000010d
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_5_LO				0x0000010e
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_5_HI				0x0000010f
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_6_LO				0x00000110
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_6_HI				0x00000111
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_7_LO				0x00000112
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_7_HI				0x00000113
+
+#define REG_A4XX_RBBM_PERFCTR_TP_0_LO				0x00000114
+
+#define REG_A4XX_RBBM_PERFCTR_TP_0_HI				0x00000115
+
+#define REG_A4XX_RBBM_PERFCTR_TP_0_LO				0x00000114
+
+#define REG_A4XX_RBBM_PERFCTR_TP_0_HI				0x00000115
+
+#define REG_A4XX_RBBM_PERFCTR_TP_1_LO				0x00000116
+
+#define REG_A4XX_RBBM_PERFCTR_TP_1_HI				0x00000117
+
+#define REG_A4XX_RBBM_PERFCTR_TP_2_LO				0x00000118
+
+#define REG_A4XX_RBBM_PERFCTR_TP_2_HI				0x00000119
+
+#define REG_A4XX_RBBM_PERFCTR_TP_3_LO				0x0000011a
+
+#define REG_A4XX_RBBM_PERFCTR_TP_3_HI				0x0000011b
+
+#define REG_A4XX_RBBM_PERFCTR_TP_4_LO				0x0000011c
+
+#define REG_A4XX_RBBM_PERFCTR_TP_4_HI				0x0000011d
+
+#define REG_A4XX_RBBM_PERFCTR_TP_5_LO				0x0000011e
+
+#define REG_A4XX_RBBM_PERFCTR_TP_5_HI				0x0000011f
+
+#define REG_A4XX_RBBM_PERFCTR_TP_6_LO				0x00000120
+
+#define REG_A4XX_RBBM_PERFCTR_TP_6_HI				0x00000121
+
+#define REG_A4XX_RBBM_PERFCTR_TP_7_LO				0x00000122
+
+#define REG_A4XX_RBBM_PERFCTR_TP_7_HI				0x00000123
+
+#define REG_A4XX_RBBM_PERFCTR_SP_0_LO				0x00000124
+
+#define REG_A4XX_RBBM_PERFCTR_SP_0_HI				0x00000125
+
+#define REG_A4XX_RBBM_PERFCTR_SP_1_LO				0x00000126
+
+#define REG_A4XX_RBBM_PERFCTR_SP_1_HI				0x00000127
+
+#define REG_A4XX_RBBM_PERFCTR_SP_2_LO				0x00000128
+
+#define REG_A4XX_RBBM_PERFCTR_SP_2_HI				0x00000129
+
+#define REG_A4XX_RBBM_PERFCTR_SP_3_LO				0x0000012a
+
+#define REG_A4XX_RBBM_PERFCTR_SP_3_HI				0x0000012b
+
+#define REG_A4XX_RBBM_PERFCTR_SP_4_LO				0x0000012c
+
+#define REG_A4XX_RBBM_PERFCTR_SP_4_HI				0x0000012d
+
+#define REG_A4XX_RBBM_PERFCTR_SP_5_LO				0x0000012e
+
+#define REG_A4XX_RBBM_PERFCTR_SP_5_HI				0x0000012f
+
+#define REG_A4XX_RBBM_PERFCTR_SP_6_LO				0x00000130
+
+#define REG_A4XX_RBBM_PERFCTR_SP_6_HI				0x00000131
+
+#define REG_A4XX_RBBM_PERFCTR_SP_7_LO				0x00000132
+
+#define REG_A4XX_RBBM_PERFCTR_SP_7_HI				0x00000133
+
+#define REG_A4XX_RBBM_PERFCTR_SP_8_LO				0x00000134
+
+#define REG_A4XX_RBBM_PERFCTR_SP_8_HI				0x00000135
+
+#define REG_A4XX_RBBM_PERFCTR_SP_9_LO				0x00000136
+
+#define REG_A4XX_RBBM_PERFCTR_SP_9_HI				0x00000137
+
+#define REG_A4XX_RBBM_PERFCTR_SP_10_LO				0x00000138
+
+#define REG_A4XX_RBBM_PERFCTR_SP_10_HI				0x00000139
+
+#define REG_A4XX_RBBM_PERFCTR_SP_11_LO				0x0000013a
+
+#define REG_A4XX_RBBM_PERFCTR_SP_11_HI				0x0000013b
+
+#define REG_A4XX_RBBM_PERFCTR_RB_0_LO				0x0000013c
+
+#define REG_A4XX_RBBM_PERFCTR_RB_0_HI				0x0000013d
+
+#define REG_A4XX_RBBM_PERFCTR_RB_1_LO				0x0000013e
+
+#define REG_A4XX_RBBM_PERFCTR_RB_1_HI				0x0000013f
+
+#define REG_A4XX_RBBM_PERFCTR_RB_2_LO				0x00000140
+
+#define REG_A4XX_RBBM_PERFCTR_RB_2_HI				0x00000141
+
+#define REG_A4XX_RBBM_PERFCTR_RB_3_LO				0x00000142
+
+#define REG_A4XX_RBBM_PERFCTR_RB_3_HI				0x00000143
+
+#define REG_A4XX_RBBM_PERFCTR_RB_4_LO				0x00000144
+
+#define REG_A4XX_RBBM_PERFCTR_RB_4_HI				0x00000145
+
+#define REG_A4XX_RBBM_PERFCTR_RB_5_LO				0x00000146
+
+#define REG_A4XX_RBBM_PERFCTR_RB_5_HI				0x00000147
+
+#define REG_A4XX_RBBM_PERFCTR_RB_6_LO				0x00000148
+
+#define REG_A4XX_RBBM_PERFCTR_RB_6_HI				0x00000149
+
+#define REG_A4XX_RBBM_PERFCTR_RB_7_LO				0x0000014a
+
+#define REG_A4XX_RBBM_PERFCTR_RB_7_HI				0x0000014b
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_0_LO				0x0000014c
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_0_HI				0x0000014d
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_1_LO				0x0000014e
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_1_HI				0x0000014f
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_0_LO				0x00000166
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_0_HI				0x00000167
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_1_LO				0x00000168
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_1_HI				0x00000169
+
+#define REG_A4XX_RBBM_ALWAYSON_COUNTER_LO			0x0000016e
+
+#define REG_A4XX_RBBM_ALWAYSON_COUNTER_HI			0x0000016f
+
 static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP(uint32_t i0) { return 0x00000068 + 0x1*i0; }
 
 static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP_REG(uint32_t i0) { return 0x00000068 + 0x1*i0; }
@@ -1046,6 +2061,10 @@
 
 static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0) { return 0x0000008e + 0x1*i0; }
 
+#define REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_0			0x00000099
+
+#define REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1			0x0000009a
+
 #define REG_A4XX_RBBM_PERFCTR_PWR_1_LO				0x00000168
 
 #define REG_A4XX_RBBM_PERFCTR_CTL				0x00000170
@@ -1060,6 +2079,14 @@
 
 #define REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI			0x00000175
 
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_0			0x00000176
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_1			0x00000177
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_2			0x00000178
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_3			0x00000179
+
 #define REG_A4XX_RBBM_GPU_BUSY_MASKED				0x0000017a
 
 #define REG_A4XX_RBBM_INT_0_STATUS				0x0000017d
@@ -1099,6 +2126,11 @@
 
 #define REG_A4XX_RBBM_INTERFACE_RRDY_STATUS5			0x0000019f
 
+#define REG_A4XX_RBBM_POWER_STATUS				0x000001b0
+#define A4XX_RBBM_POWER_STATUS_SP_TP_PWR_ON			0x00100000
+
+#define REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL2			0x000001b8
+
 #define REG_A4XX_CP_SCRATCH_UMASK				0x00000228
 
 #define REG_A4XX_CP_SCRATCH_ADDR				0x00000229
@@ -1167,11 +2199,23 @@
 
 #define REG_A4XX_CP_DRAW_STATE_ADDR				0x00000232
 
-#define REG_A4XX_CP_PROTECT_REG_0				0x00000240
-
 static inline uint32_t REG_A4XX_CP_PROTECT(uint32_t i0) { return 0x00000240 + 0x1*i0; }
 
 static inline uint32_t REG_A4XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000240 + 0x1*i0; }
+#define A4XX_CP_PROTECT_REG_BASE_ADDR__MASK			0x0001ffff
+#define A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT			0
+static inline uint32_t A4XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
+{
+	return ((val) << A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A4XX_CP_PROTECT_REG_BASE_ADDR__MASK;
+}
+#define A4XX_CP_PROTECT_REG_MASK_LEN__MASK			0x1f000000
+#define A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT			24
+static inline uint32_t A4XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
+{
+	return ((val) << A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A4XX_CP_PROTECT_REG_MASK_LEN__MASK;
+}
+#define A4XX_CP_PROTECT_REG_TRAP_WRITE				0x20000000
+#define A4XX_CP_PROTECT_REG_TRAP_READ				0x40000000
 
 #define REG_A4XX_CP_PROTECT_CTRL				0x00000250
 
@@ -1191,6 +2235,20 @@
 
 #define REG_A4XX_CP_PERFCTR_CP_SEL_0				0x00000500
 
+#define REG_A4XX_CP_PERFCTR_CP_SEL_1				0x00000501
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_2				0x00000502
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_3				0x00000503
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_4				0x00000504
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_5				0x00000505
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_6				0x00000506
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_7				0x00000507
+
 #define REG_A4XX_CP_PERFCOMBINER_SELECT				0x0000050b
 
 static inline uint32_t REG_A4XX_CP_SCRATCH(uint32_t i0) { return 0x00000578 + 0x1*i0; }
@@ -1201,6 +2259,28 @@
 
 #define REG_A4XX_SP_MODE_CONTROL				0x00000ec3
 
+#define REG_A4XX_SP_PERFCTR_SP_SEL_0				0x00000ec4
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_1				0x00000ec5
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_2				0x00000ec6
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_3				0x00000ec7
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_4				0x00000ec8
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_5				0x00000ec9
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_6				0x00000eca
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_7				0x00000ecb
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_8				0x00000ecc
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_9				0x00000ecd
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_10				0x00000ece
+
 #define REG_A4XX_SP_PERFCTR_SP_SEL_11				0x00000ecf
 
 #define REG_A4XX_SP_SP_CTRL_REG					0x000022c0
@@ -1226,7 +2306,7 @@
 {
 	return ((val) << A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
 }
-#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK		0x0003fc00
+#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK		0x0000fc00
 #define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT		10
 static inline uint32_t A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
 {
@@ -1374,7 +2454,7 @@
 {
 	return ((val) << A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
 }
-#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK		0x0003fc00
+#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK		0x0000fc00
 #define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT		10
 static inline uint32_t A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
 {
@@ -1699,6 +2779,12 @@
 
 #define REG_A4XX_VPC_DEBUG_ECO_CONTROL				0x00000e64
 
+#define REG_A4XX_VPC_PERFCTR_VPC_SEL_0				0x00000e65
+
+#define REG_A4XX_VPC_PERFCTR_VPC_SEL_1				0x00000e66
+
+#define REG_A4XX_VPC_PERFCTR_VPC_SEL_2				0x00000e67
+
 #define REG_A4XX_VPC_PERFCTR_VPC_SEL_3				0x00000e68
 
 #define REG_A4XX_VPC_ATTR					0x00002140
@@ -1811,6 +2897,20 @@
 
 #define REG_A4XX_VFD_DEBUG_CONTROL				0x00000e40
 
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_0				0x00000e43
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_1				0x00000e44
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_2				0x00000e45
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_3				0x00000e46
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_4				0x00000e47
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_5				0x00000e48
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_6				0x00000e49
+
 #define REG_A4XX_VFD_PERFCTR_VFD_SEL_7				0x00000e4a
 
 #define REG_A4XX_VGT_CL_INITIATOR				0x000021d0
@@ -1967,6 +3067,20 @@
 
 #define REG_A4XX_TPL1_TP_MODE_CONTROL				0x00000f03
 
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_0				0x00000f04
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_1				0x00000f05
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_2				0x00000f06
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_3				0x00000f07
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_4				0x00000f08
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_5				0x00000f09
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_6				0x00000f0a
+
 #define REG_A4XX_TPL1_PERFCTR_TP_SEL_7				0x00000f0b
 
 #define REG_A4XX_TPL1_TP_TEX_OFFSET				0x00002380
@@ -2021,9 +3135,25 @@
 
 #define REG_A4XX_GRAS_PERFCTR_TSE_SEL_0				0x00000c88
 
+#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_1				0x00000c89
+
+#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_2				0x00000c8a
+
 #define REG_A4XX_GRAS_PERFCTR_TSE_SEL_3				0x00000c8b
 
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_0				0x00000c8c
+
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_1				0x00000c8d
+
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_2				0x00000c8e
+
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_3				0x00000c8f
+
 #define REG_A4XX_GRAS_CL_CLIP_CNTL				0x00002000
+#define A4XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE			0x00008000
+#define A4XX_GRAS_CL_CLIP_CNTL_ZNEAR_CLIP_DISABLE		0x00010000
+#define A4XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE		0x00020000
+#define A4XX_GRAS_CL_CLIP_CNTL_ZERO_GB_SCALE_Z			0x00400000
 
 #define REG_A4XX_GRAS_CLEAR_CNTL				0x00002003
 #define A4XX_GRAS_CLEAR_CNTL_NOT_FASTCLEAR			0x00000001
@@ -2114,6 +3244,7 @@
 
 #define REG_A4XX_GRAS_ALPHA_CONTROL				0x00002073
 #define A4XX_GRAS_ALPHA_CONTROL_ALPHA_TEST_ENABLE		0x00000004
+#define A4XX_GRAS_ALPHA_CONTROL_FORCE_FRAGZ_TO_FS		0x00000008
 
 #define REG_A4XX_GRAS_SU_POLY_OFFSET_SCALE			0x00002074
 #define A4XX_GRAS_SU_POLY_OFFSET_SCALE__MASK			0xffffffff
@@ -2285,6 +3416,20 @@
 
 #define REG_A4XX_UCHE_CACHE_WAYS_VFD				0x00000e8c
 
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_0			0x00000e8e
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_1			0x00000e8f
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_2			0x00000e90
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_3			0x00000e91
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_4			0x00000e92
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_5			0x00000e93
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_6			0x00000e94
+
 #define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_7			0x00000e95
 
 #define REG_A4XX_HLSQ_TIMEOUT_THRESHOLD				0x00000e00
@@ -2295,6 +3440,22 @@
 
 #define REG_A4XX_HLSQ_PERF_PIPE_MASK				0x00000e0e
 
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_0			0x00000e06
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_1			0x00000e07
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_2			0x00000e08
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_3			0x00000e09
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_4			0x00000e0a
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_5			0x00000e0b
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_6			0x00000e0c
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_7			0x00000e0d
+
 #define REG_A4XX_HLSQ_CONTROL_0_REG				0x000023c0
 #define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK		0x00000010
 #define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT		4
@@ -2545,14 +3706,42 @@
 #define REG_A4XX_PC_BINNING_COMMAND				0x00000d00
 #define A4XX_PC_BINNING_COMMAND_BINNING_ENABLE			0x00000001
 
+#define REG_A4XX_PC_TESSFACTOR_ADDR				0x00000d08
+
 #define REG_A4XX_PC_DRAWCALL_SETUP_OVERRIDE			0x00000d0c
 
 #define REG_A4XX_PC_PERFCTR_PC_SEL_0				0x00000d10
 
+#define REG_A4XX_PC_PERFCTR_PC_SEL_1				0x00000d11
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_2				0x00000d12
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_3				0x00000d13
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_4				0x00000d14
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_5				0x00000d15
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_6				0x00000d16
+
 #define REG_A4XX_PC_PERFCTR_PC_SEL_7				0x00000d17
 
 #define REG_A4XX_PC_BIN_BASE					0x000021c0
 
+#define REG_A4XX_PC_VSTREAM_CONTROL				0x000021c2
+#define A4XX_PC_VSTREAM_CONTROL_SIZE__MASK			0x003f0000
+#define A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT			16
+static inline uint32_t A4XX_PC_VSTREAM_CONTROL_SIZE(uint32_t val)
+{
+	return ((val) << A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT) & A4XX_PC_VSTREAM_CONTROL_SIZE__MASK;
+}
+#define A4XX_PC_VSTREAM_CONTROL_N__MASK				0x07c00000
+#define A4XX_PC_VSTREAM_CONTROL_N__SHIFT			22
+static inline uint32_t A4XX_PC_VSTREAM_CONTROL_N(uint32_t val)
+{
+	return ((val) << A4XX_PC_VSTREAM_CONTROL_N__SHIFT) & A4XX_PC_VSTREAM_CONTROL_N__MASK;
+}
+
 #define REG_A4XX_PC_PRIM_VTX_CNTL				0x000021c4
 #define A4XX_PC_PRIM_VTX_CNTL_VAROUT__MASK			0x0000000f
 #define A4XX_PC_PRIM_VTX_CNTL_VAROUT__SHIFT			0
@@ -2564,7 +3753,20 @@
 #define A4XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST		0x02000000
 #define A4XX_PC_PRIM_VTX_CNTL_PSIZE				0x04000000
 
-#define REG_A4XX_UNKNOWN_21C5					0x000021c5
+#define REG_A4XX_PC_PRIM_VTX_CNTL2				0x000021c5
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__MASK	0x00000007
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__SHIFT	0
+static inline uint32_t A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+	return ((val) << A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__SHIFT) & A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__MASK;
+}
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__MASK	0x00000038
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__SHIFT	3
+static inline uint32_t A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+	return ((val) << A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__SHIFT) & A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__MASK;
+}
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_ENABLE			0x00000040
 
 #define REG_A4XX_PC_RESTART_INDEX				0x000021c6
 
@@ -2602,12 +3804,8 @@
 {
 	return ((val) << A4XX_PC_HS_PARAM_SPACING__SHIFT) & A4XX_PC_HS_PARAM_SPACING__MASK;
 }
-#define A4XX_PC_HS_PARAM_PRIMTYPE__MASK				0x01800000
-#define A4XX_PC_HS_PARAM_PRIMTYPE__SHIFT			23
-static inline uint32_t A4XX_PC_HS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val)
-{
-	return ((val) << A4XX_PC_HS_PARAM_PRIMTYPE__SHIFT) & A4XX_PC_HS_PARAM_PRIMTYPE__MASK;
-}
+#define A4XX_PC_HS_PARAM_CW					0x00800000
+#define A4XX_PC_HS_PARAM_CONNECTED				0x01000000
 
 #define REG_A4XX_VBIF_VERSION					0x00003000
 
@@ -2646,20 +3844,6 @@
 
 #define REG_A4XX_UNKNOWN_20EF					0x000020ef
 
-#define REG_A4XX_UNKNOWN_20F0					0x000020f0
-
-#define REG_A4XX_UNKNOWN_20F1					0x000020f1
-
-#define REG_A4XX_UNKNOWN_20F2					0x000020f2
-
-#define REG_A4XX_UNKNOWN_20F7					0x000020f7
-#define A4XX_UNKNOWN_20F7__MASK					0xffffffff
-#define A4XX_UNKNOWN_20F7__SHIFT				0
-static inline uint32_t A4XX_UNKNOWN_20F7(float val)
-{
-	return ((fui(val)) << A4XX_UNKNOWN_20F7__SHIFT) & A4XX_UNKNOWN_20F7__MASK;
-}
-
 #define REG_A4XX_UNKNOWN_2152					0x00002152
 
 #define REG_A4XX_UNKNOWN_2153					0x00002153
@@ -2720,6 +3904,12 @@
 {
 	return ((val) << A4XX_TEX_SAMP_0_ANISO__SHIFT) & A4XX_TEX_SAMP_0_ANISO__MASK;
 }
+#define A4XX_TEX_SAMP_0_LOD_BIAS__MASK				0xfff80000
+#define A4XX_TEX_SAMP_0_LOD_BIAS__SHIFT				19
+static inline uint32_t A4XX_TEX_SAMP_0_LOD_BIAS(float val)
+{
+	return ((((int32_t)(val * 256.0))) << A4XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A4XX_TEX_SAMP_0_LOD_BIAS__MASK;
+}
 
 #define REG_A4XX_TEX_SAMP_1					0x00000001
 #define A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK			0x0000000e
@@ -2728,6 +3918,7 @@
 {
 	return ((val) << A4XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
 }
+#define A4XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF			0x00000010
 #define A4XX_TEX_SAMP_1_UNNORM_COORDS				0x00000020
 #define A4XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR			0x00000040
 #define A4XX_TEX_SAMP_1_MAX_LOD__MASK				0x000fff00
@@ -2796,7 +3987,7 @@
 {
 	return ((val) << A4XX_TEX_CONST_1_HEIGHT__SHIFT) & A4XX_TEX_CONST_1_HEIGHT__MASK;
 }
-#define A4XX_TEX_CONST_1_WIDTH__MASK				0x1fff8000
+#define A4XX_TEX_CONST_1_WIDTH__MASK				0x3fff8000
 #define A4XX_TEX_CONST_1_WIDTH__SHIFT				15
 static inline uint32_t A4XX_TEX_CONST_1_WIDTH(uint32_t val)
 {
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/adreno/adreno_common.xml.h linux-4.4.115-fbx/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
--- linux-4.4.115/drivers/gpu/drm/msm/adreno/adreno_common.xml.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/adreno/adreno_common.xml.h	2019-01-22 16:16:23.483246225 +0100
@@ -8,17 +8,19 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10755 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14968 bytes, from 2015-05-20 20:12:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  67771 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  63970 bytes, from 2015-09-14 20:50:12)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2015-09-24 17:30:00)
+- ./adreno.xml               (    431 bytes, from 2016-10-24 21:12:27)
+- ./freedreno_copyright.xml  (   1572 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a2xx.xml          (  32901 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_common.xml (  12025 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_pm4.xml    (  19684 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a3xx.xml          (  83840 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a4xx.xml          ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a5xx.xml          (  81207 bytes, from 2016-10-26 19:36:59)
+- ./adreno/ocmem.xml         (   1773 bytes, from 2016-10-24 21:12:27)
 
-Copyright (C) 2013-2015 by the following authors:
+Copyright (C) 2013-2016 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
 Permission is hereby granted, free of charge, to any person obtaining
 a copy of this software and associated documentation files (the
@@ -119,6 +121,25 @@
 	RB_COPY_DEPTH_STENCIL = 5,
 };
 
+enum a3xx_rop_code {
+	ROP_CLEAR = 0,
+	ROP_NOR = 1,
+	ROP_AND_INVERTED = 2,
+	ROP_COPY_INVERTED = 3,
+	ROP_AND_REVERSE = 4,
+	ROP_INVERT = 5,
+	ROP_XOR = 6,
+	ROP_NAND = 7,
+	ROP_AND = 8,
+	ROP_EQUIV = 9,
+	ROP_NOOP = 10,
+	ROP_OR_INVERTED = 11,
+	ROP_COPY = 12,
+	ROP_OR_REVERSE = 13,
+	ROP_OR = 14,
+	ROP_SET = 15,
+};
+
 enum a3xx_render_mode {
 	RB_RENDERING_PASS = 0,
 	RB_TILING_PASS = 1,
@@ -154,6 +175,14 @@
 	XYZW = 3,
 };
 
+enum a3xx_rb_blend_opcode {
+	BLEND_DST_PLUS_SRC = 0,
+	BLEND_SRC_MINUS_DST = 1,
+	BLEND_DST_MINUS_SRC = 2,
+	BLEND_MIN_DST_SRC = 3,
+	BLEND_MAX_DST_SRC = 4,
+};
+
 #define REG_AXXX_CP_RB_BASE					0x000001c0
 
 #define REG_AXXX_CP_RB_CNTL					0x000001c1
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/adreno/adreno_device.c linux-4.4.115-fbx/drivers/gpu/drm/msm/adreno/adreno_device.c
--- linux-4.4.115/drivers/gpu/drm/msm/adreno/adreno_device.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/adreno/adreno_device.c	2019-01-22 16:16:23.487246262 +0100
@@ -19,10 +19,6 @@
 
 #include "adreno_gpu.h"
 
-#if defined(DOWNSTREAM_CONFIG_MSM_BUS_SCALING) && !defined(CONFIG_OF)
-#  include <mach/kgsl.h>
-#endif
-
 #define ANY_ID 0xff
 
 bool hang_debug = false;
@@ -31,6 +27,7 @@
 
 struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
 struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
 
 static const struct adreno_info gpulist[] = {
 	{
@@ -73,6 +70,30 @@
 		.pfpfw = "a420_pfp.fw",
 		.gmem  = (SZ_1M + SZ_512K),
 		.init  = a4xx_gpu_init,
+	}, {
+		.rev   = ADRENO_REV(4, 3, 0, ANY_ID),
+		.revn  = 430,
+		.name  = "A430",
+		.pm4fw = "a420_pm4.fw",
+		.pfpfw = "a420_pfp.fw",
+		.gmem  = (SZ_1M + SZ_512K),
+		.init  = a4xx_gpu_init,
+	}, {
+		.rev = ADRENO_REV(5, 3, 0, ANY_ID),
+		.revn = 530,
+		.name = "A530",
+		.pm4fw = "a530_pm4.fw",
+		.pfpfw = "a530_pfp.fw",
+		.gmem = SZ_1M,
+		.init = a5xx_gpu_init,
+	}, {
+		.rev = ADRENO_REV(5, 4, 0, ANY_ID),
+		.revn = 540,
+		.name = "A540",
+		.pm4fw = "a530_pm4.fw",
+		.pfpfw = "a530_pfp.fw",
+		.gmem = SZ_1M,
+		.init = a5xx_gpu_init,
 	},
 };
 
@@ -82,6 +103,8 @@
 MODULE_FIRMWARE("a330_pfp.fw");
 MODULE_FIRMWARE("a420_pm4.fw");
 MODULE_FIRMWARE("a420_pfp.fw");
+MODULE_FIRMWARE("a530_fm4.fw");
+MODULE_FIRMWARE("a530_pfp.fw");
 
 static inline bool _rev_match(uint8_t entry, uint8_t id)
 {
@@ -141,17 +164,17 @@
 
 	if (gpu) {
 		int ret;
-		mutex_lock(&dev->struct_mutex);
-		gpu->funcs->pm_resume(gpu);
-		mutex_unlock(&dev->struct_mutex);
-		ret = gpu->funcs->hw_init(gpu);
+
+		pm_runtime_get_sync(&pdev->dev);
+		ret = msm_gpu_hw_init(gpu);
+		pm_runtime_put_sync_autosuspend(&pdev->dev);
 		if (ret) {
 			dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
+			mutex_lock(&dev->struct_mutex);
+			gpu->funcs->pm_suspend(gpu);
+			mutex_unlock(&dev->struct_mutex);
 			gpu->funcs->destroy(gpu);
 			gpu = NULL;
-		} else {
-			/* give inactive pm a chance to kick in: */
-			msm_gpu_retire(gpu);
 		}
 	}
 
@@ -168,12 +191,16 @@
 static int adreno_bind(struct device *dev, struct device *master, void *data)
 {
 	static struct adreno_platform_config config = {};
-#ifdef CONFIG_OF
-	struct device_node *child, *node = dev->of_node;
-	u32 val;
+	uint32_t val = 0;
 	int ret;
 
-	ret = of_property_read_u32(node, "qcom,chipid", &val);
+	/*
+	 * Read the chip ID from the device tree at bind time - we use this
+	 * information to load the correct functions. All the rest of the
+	 * (extensive) device tree probing should happen in the GPU specific
+	 * code
+	 */
+	ret = of_property_read_u32(dev->of_node, "qcom,chipid", &val);
 	if (ret) {
 		dev_err(dev, "could not find chipid: %d\n", ret);
 		return ret;
@@ -182,76 +209,6 @@
 	config.rev = ADRENO_REV((val >> 24) & 0xff,
 			(val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff);
 
-	/* find clock rates: */
-	config.fast_rate = 0;
-	config.slow_rate = ~0;
-	for_each_child_of_node(node, child) {
-		if (of_device_is_compatible(child, "qcom,gpu-pwrlevels")) {
-			struct device_node *pwrlvl;
-			for_each_child_of_node(child, pwrlvl) {
-				ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val);
-				if (ret) {
-					dev_err(dev, "could not find gpu-freq: %d\n", ret);
-					return ret;
-				}
-				config.fast_rate = max(config.fast_rate, val);
-				config.slow_rate = min(config.slow_rate, val);
-			}
-		}
-	}
-
-	if (!config.fast_rate) {
-		dev_err(dev, "could not find clk rates\n");
-		return -ENXIO;
-	}
-
-#else
-	struct kgsl_device_platform_data *pdata = dev->platform_data;
-	uint32_t version = socinfo_get_version();
-	if (cpu_is_apq8064ab()) {
-		config.fast_rate = 450000000;
-		config.slow_rate = 27000000;
-		config.bus_freq  = 4;
-		config.rev = ADRENO_REV(3, 2, 1, 0);
-	} else if (cpu_is_apq8064()) {
-		config.fast_rate = 400000000;
-		config.slow_rate = 27000000;
-		config.bus_freq  = 4;
-
-		if (SOCINFO_VERSION_MAJOR(version) == 2)
-			config.rev = ADRENO_REV(3, 2, 0, 2);
-		else if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
-				(SOCINFO_VERSION_MINOR(version) == 1))
-			config.rev = ADRENO_REV(3, 2, 0, 1);
-		else
-			config.rev = ADRENO_REV(3, 2, 0, 0);
-
-	} else if (cpu_is_msm8960ab()) {
-		config.fast_rate = 400000000;
-		config.slow_rate = 320000000;
-		config.bus_freq  = 4;
-
-		if (SOCINFO_VERSION_MINOR(version) == 0)
-			config.rev = ADRENO_REV(3, 2, 1, 0);
-		else
-			config.rev = ADRENO_REV(3, 2, 1, 1);
-
-	} else if (cpu_is_msm8930()) {
-		config.fast_rate = 400000000;
-		config.slow_rate = 27000000;
-		config.bus_freq  = 3;
-
-		if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
-			(SOCINFO_VERSION_MINOR(version) == 2))
-			config.rev = ADRENO_REV(3, 0, 5, 2);
-		else
-			config.rev = ADRENO_REV(3, 0, 5, 0);
-
-	}
-#  ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
-	config.bus_scale_table = pdata->bus_scale_table;
-#  endif
-#endif
 	dev->platform_data = &config;
 	set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
 	return 0;
@@ -286,12 +243,35 @@
 	{}
 };
 
+#ifdef CONFIG_PM
+static int adreno_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_gpu *gpu = platform_get_drvdata(pdev);
+
+	return gpu->funcs->pm_resume(gpu);
+}
+
+static int adreno_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_gpu *gpu = platform_get_drvdata(pdev);
+
+	return gpu->funcs->pm_suspend(gpu);
+}
+#endif
+
+static const struct dev_pm_ops adreno_pm_ops = {
+	SET_RUNTIME_PM_OPS(adreno_suspend, adreno_resume, NULL)
+};
+
 static struct platform_driver adreno_driver = {
 	.probe = adreno_probe,
 	.remove = adreno_remove,
 	.driver = {
 		.name = "adreno",
 		.of_match_table = dt_match,
+		.pm = &adreno_pm_ops,
 	},
 };
 
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/adreno/adreno_gpu.c linux-4.4.115-fbx/drivers/gpu/drm/msm/adreno/adreno_gpu.c
--- linux-4.4.115/drivers/gpu/drm/msm/adreno/adreno_gpu.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/adreno/adreno_gpu.c	2019-01-22 16:16:23.487246262 +0100
@@ -2,7 +2,7 @@
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2016-2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published by
@@ -17,12 +17,12 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/utsname.h>
 #include "adreno_gpu.h"
+#include "msm_snapshot.h"
 #include "msm_gem.h"
 #include "msm_mmu.h"
 
-#define RB_SIZE    SZ_32K
-#define RB_BLKSIZE 16
 
 int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
 {
@@ -35,123 +35,167 @@
 	case MSM_PARAM_GMEM_SIZE:
 		*value = adreno_gpu->gmem;
 		return 0;
+	case MSM_PARAM_GMEM_BASE:
+		*value = 0x100000;
+		return 0;
 	case MSM_PARAM_CHIP_ID:
 		*value = adreno_gpu->rev.patchid |
 				(adreno_gpu->rev.minor << 8) |
 				(adreno_gpu->rev.major << 16) |
 				(adreno_gpu->rev.core << 24);
 		return 0;
+	case MSM_PARAM_MAX_FREQ:
+		*value = gpu->gpufreq[0];
+		return 0;
+	case MSM_PARAM_TIMESTAMP:
+		if (adreno_gpu->funcs->get_timestamp) {
+			int ret;
+
+			pm_runtime_get_sync(&gpu->pdev->dev);
+			ret = adreno_gpu->funcs->get_timestamp(gpu, value);
+			pm_runtime_put_autosuspend(&gpu->pdev->dev);
+
+			return ret;
+		}
+		return -EINVAL;
+	case MSM_PARAM_NR_RINGS:
+		*value = gpu->nr_rings;
+		return 0;
+	case MSM_PARAM_GPU_HANG_TIMEOUT:
+		*value = DRM_MSM_HANGCHECK_PERIOD;
+		return 0;
 	default:
 		DBG("%s: invalid param: %u", gpu->name, param);
 		return -EINVAL;
 	}
 }
 
-#define rbmemptr(adreno_gpu, member)  \
-	((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
-
 int adreno_hw_init(struct msm_gpu *gpu)
 {
 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
-	int ret;
+	int i;
 
 	DBG("%s", gpu->name);
 
-	ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova);
+	for (i = 0; i < gpu->nr_rings; i++) {
+		struct msm_ringbuffer *ring = gpu->rb[i];
+
+		int ret = msm_gem_get_iova(ring->bo, gpu->aspace,
+			&ring->iova);
 	if (ret) {
-		gpu->rb_iova = 0;
-		dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
+			ring->iova = 0;
+			dev_err(gpu->dev->dev,
+				"could not map ringbuffer %d: %d\n", i, ret);
 		return ret;
 	}
 
-	/* Setup REG_CP_RB_CNTL: */
-	adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
-			/* size is log2(quad-words): */
-			AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
-			AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)));
+		/* reset ringbuffer(s): */
+		/* No need for a lock here, nobody else is peeking in */
+		ring->cur = ring->start;
+		ring->next = ring->start;
+
+		/* reset completed fence seqno, discard anything pending: */
+		ring->memptrs->fence = adreno_submitted_fence(gpu, ring);
+		ring->memptrs->rptr  = 0;
+	}
 
-	/* Setup ringbuffer address: */
-	adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_BASE, gpu->rb_iova);
-	adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
-			rbmemptr(adreno_gpu, rptr));
+	/*
+	 * Setup REG_CP_RB_CNTL.  The same value is used across targets (with
+	 * the excpetion of A430 that disables the RPTR shadow) - the cacluation
+	 * for the ringbuffer size and block size is moved to msm_gpu.h for the
+	 * pre-processor to deal with and the A430 variant is ORed in here
+	 */
+	adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
+		MSM_GPU_RB_CNTL_DEFAULT |
+		(adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
 
-	/* Setup scratch/timestamp: */
-	adreno_gpu_write(adreno_gpu, REG_ADRENO_SCRATCH_ADDR,
-			rbmemptr(adreno_gpu, fence));
+	/* Setup ringbuffer address - use ringbuffer[0] for GPU init */
+	adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE,
+		REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova);
 
-	adreno_gpu_write(adreno_gpu, REG_ADRENO_SCRATCH_UMSK, 0x1);
+	adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
+		REG_ADRENO_CP_RB_RPTR_ADDR_HI, rbmemptr(gpu->rb[0], rptr));
 
 	return 0;
 }
 
-static uint32_t get_wptr(struct msm_ringbuffer *ring)
+/* Use this helper to read rptr, since a430 doesn't update rptr in memory */
+static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
+		struct msm_ringbuffer *ring)
 {
-	return ring->cur - ring->start;
+	if (adreno_is_a430(adreno_gpu)) {
+		/*
+		 * If index is anything but 0 this will probably break horribly,
+		 * but I think that we have enough infrastructure in place to
+		 * ensure that it won't be. If not then this is why your
+		 * a430 stopped working.
+		 */
+		return ring->memptrs->rptr =
+			adreno_gpu_read(adreno_gpu, REG_ADRENO_CP_RB_RPTR);
+	}
+
+	return ring->memptrs->rptr;
 }
 
-uint32_t adreno_last_fence(struct msm_gpu *gpu)
+struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
 {
-	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
-	return adreno_gpu->memptrs->fence;
+	return gpu->rb[0];
+}
+
+uint32_t adreno_submitted_fence(struct msm_gpu *gpu,
+		struct msm_ringbuffer *ring)
+{
+	if (!ring)
+		return 0;
+
+	return ring->submitted_fence;
 }
 
 void adreno_recover(struct msm_gpu *gpu)
 {
-	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 	struct drm_device *dev = gpu->dev;
 	int ret;
 
-	gpu->funcs->pm_suspend(gpu);
-
-	/* reset ringbuffer: */
-	gpu->rb->cur = gpu->rb->start;
+	/*
+	 * XXX pm-runtime??  we *need* the device to be off after this
+	 * so maybe continuing to call ->pm_suspend/resume() is better?
+	 */
 
-	/* reset completed fence seqno, just discard anything pending: */
-	adreno_gpu->memptrs->fence = gpu->submitted_fence;
-	adreno_gpu->memptrs->rptr  = 0;
-	adreno_gpu->memptrs->wptr  = 0;
+	gpu->funcs->pm_suspend(gpu);
 
 	gpu->funcs->pm_resume(gpu);
-	ret = gpu->funcs->hw_init(gpu);
+
+	ret = msm_gpu_hw_init(gpu);
 	if (ret) {
 		dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
 		/* hmm, oh well? */
 	}
 }
 
-int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
-		struct msm_file_private *ctx)
+void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
 {
 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
-	struct msm_drm_private *priv = gpu->dev->dev_private;
-	struct msm_ringbuffer *ring = gpu->rb;
-	unsigned i, ibs = 0;
+	struct msm_ringbuffer *ring = gpu->rb[submit->ring];
+	unsigned i;
 
 	for (i = 0; i < submit->nr_cmds; i++) {
 		switch (submit->cmd[i].type) {
 		case MSM_SUBMIT_CMD_IB_TARGET_BUF:
 			/* ignore IB-targets */
 			break;
+		case MSM_SUBMIT_CMD_PROFILE_BUF:
 		case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
-			/* ignore if there has not been a ctx switch: */
-			if (priv->lastctx == ctx)
 				break;
 		case MSM_SUBMIT_CMD_BUF:
-			OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
-			OUT_RING(ring, submit->cmd[i].iova);
+			OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
+				CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
+			OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
 			OUT_RING(ring, submit->cmd[i].size);
-			ibs++;
+			OUT_PKT2(ring);
 			break;
 		}
 	}
 
-	/* on a320, at least, we seem to need to pad things out to an
-	 * even number of qwords to avoid issue w/ CP hanging on wrap-
-	 * around:
-	 */
-	if (ibs % 2)
-		OUT_PKT2(ring);
-
 	OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
 	OUT_RING(ring, submit->fence);
 
@@ -169,7 +213,7 @@
 
 	OUT_PKT3(ring, CP_EVENT_WRITE, 3);
 	OUT_RING(ring, CACHE_FLUSH_TS);
-	OUT_RING(ring, rbmemptr(adreno_gpu, fence));
+	OUT_RING(ring, rbmemptr(ring, fence));
 	OUT_RING(ring, submit->fence);
 
 	/* we could maybe be clever and only CP_COND_EXEC the interrupt: */
@@ -196,22 +240,23 @@
 	}
 #endif
 
-	gpu->funcs->flush(gpu);
-
-	return 0;
+	gpu->funcs->flush(gpu, ring);
 }
 
-void adreno_flush(struct msm_gpu *gpu)
+void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
 {
 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 	uint32_t wptr;
 
+	/* Copy the shadow to the actual register */
+	ring->cur = ring->next;
+
 	/*
-	 * Mask wptr value that we calculate to fit in the HW range. This is
+	 * Mask the wptr value that we calculate to fit in the HW range. This is
 	 * to account for the possibility that the last command fit exactly into
 	 * the ringbuffer and rb->next hasn't wrapped to zero yet
 	 */
-	wptr = get_wptr(gpu->rb) & ((gpu->rb->size / 4) - 1);
+	wptr = get_wptr(ring);
 
 	/* ensure writes to ringbuffer have hit system memory: */
 	mb();
@@ -219,22 +264,27 @@
 	adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr);
 }
 
-void adreno_idle(struct msm_gpu *gpu)
+bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
 {
 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
-	uint32_t wptr = get_wptr(gpu->rb);
+	uint32_t wptr = get_wptr(ring);
 
 	/* wait for CP to drain ringbuffer: */
-	if (spin_until(adreno_gpu->memptrs->rptr == wptr))
-		DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
+	if (!spin_until(get_rptr(adreno_gpu, ring) == wptr))
+		return true;
 
 	/* TODO maybe we need to reset GPU here to recover from hang? */
+	DRM_ERROR("%s: timeout waiting to drain ringbuffer %d rptr/wptr = %X/%X\n",
+		gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr);
+
+	return false;
 }
 
 #ifdef CONFIG_DEBUG_FS
 void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
 {
 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct msm_ringbuffer *ring;
 	int i;
 
 	seq_printf(m, "revision: %d (%d.%d.%d.%d)\n",
@@ -242,13 +292,18 @@
 			adreno_gpu->rev.major, adreno_gpu->rev.minor,
 			adreno_gpu->rev.patchid);
 
-	seq_printf(m, "fence:    %d/%d\n", adreno_gpu->memptrs->fence,
-			gpu->submitted_fence);
-	seq_printf(m, "rptr:     %d\n", adreno_gpu->memptrs->rptr);
-	seq_printf(m, "wptr:     %d\n", adreno_gpu->memptrs->wptr);
-	seq_printf(m, "rb wptr:  %d\n", get_wptr(gpu->rb));
-
-	gpu->funcs->pm_resume(gpu);
+	FOR_EACH_RING(gpu, ring, i) {
+		if (!ring)
+			continue;
+
+		seq_printf(m, "rb %d: fence:    %d/%d\n", i,
+			ring->memptrs->fence,
+			adreno_submitted_fence(gpu, ring));
+
+		seq_printf(m, "      rptr:     %d\n",
+			get_rptr(adreno_gpu, ring));
+		seq_printf(m, "rb wptr:  %d\n", get_wptr(ring));
+	}
 
 	/* dump these out in a form that can be parsed by demsm: */
 	seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
@@ -262,8 +317,6 @@
 			seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
 		}
 	}
-
-	gpu->funcs->pm_suspend(gpu);
 }
 #endif
 
@@ -275,22 +328,29 @@
  */
 void adreno_dump_info(struct msm_gpu *gpu)
 {
+	struct drm_device *dev = gpu->dev;
 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct msm_ringbuffer *ring;
 	int i;
 
-	printk("revision: %d (%d.%d.%d.%d)\n",
+	dev_err(dev->dev, "revision: %d (%d.%d.%d.%d)\n",
 			adreno_gpu->info->revn, adreno_gpu->rev.core,
 			adreno_gpu->rev.major, adreno_gpu->rev.minor,
 			adreno_gpu->rev.patchid);
 
-	printk("fence:    %d/%d\n", adreno_gpu->memptrs->fence,
-			gpu->submitted_fence);
-	printk("rptr:     %d\n", adreno_gpu->memptrs->rptr);
-	printk("wptr:     %d\n", adreno_gpu->memptrs->wptr);
-	printk("rb wptr:  %d\n", get_wptr(gpu->rb));
+	FOR_EACH_RING(gpu, ring, i) {
+		if (!ring)
+			continue;
+
+		dev_err(dev->dev, " ring %d: fence %d/%d rptr/wptr %x/%x\n", i,
+			ring->memptrs->fence,
+			adreno_submitted_fence(gpu, ring),
+			get_rptr(adreno_gpu, ring),
+			get_wptr(ring));
+	}
 
 	for (i = 0; i < 8; i++) {
-		printk("CP_SCRATCH_REG%d: %u\n", i,
+		pr_err("CP_SCRATCH_REG%d: %u\n", i,
 			gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
 	}
 }
@@ -315,32 +375,135 @@
 	}
 }
 
-static uint32_t ring_freewords(struct msm_gpu *gpu)
+static uint32_t ring_freewords(struct msm_ringbuffer *ring)
 {
-	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
-	uint32_t size = gpu->rb->size / 4;
-	uint32_t wptr = get_wptr(gpu->rb);
-	uint32_t rptr = adreno_gpu->memptrs->rptr;
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu);
+	uint32_t size = MSM_GPU_RINGBUFFER_SZ >> 2;
+	/* Use ring->next to calculate free size */
+	uint32_t wptr = ring->next - ring->start;
+	uint32_t rptr = get_rptr(adreno_gpu, ring);
 	return (rptr + (size - 1) - wptr) % size;
 }
 
-void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
+void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords)
 {
-	if (spin_until(ring_freewords(gpu) >= ndwords))
-		DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name);
+	if (spin_until(ring_freewords(ring) >= ndwords))
+		DRM_ERROR("%s: timeout waiting for space in ringubffer %d\n",
+			ring->gpu->name, ring->id);
 }
 
-static const char *iommu_ports[] = {
-		"gfx3d_user", "gfx3d_priv",
-		"gfx3d1_user", "gfx3d1_priv",
+/* Read the set of powerlevels */
+static int _adreno_get_pwrlevels(struct msm_gpu *gpu, struct device_node *node)
+{
+	struct device_node *child;
+
+	for_each_child_of_node(node, child) {
+		unsigned int index;
+
+		if (of_property_read_u32(child, "reg", &index))
+			return -EINVAL;
+
+		if (index >= ARRAY_SIZE(gpu->gpufreq))
+			continue;
+
+		gpu->nr_pwrlevels = max(gpu->nr_pwrlevels, index + 1);
+
+		of_property_read_u32(child, "qcom,gpu-freq",
+			&gpu->gpufreq[index]);
+		of_property_read_u32(child, "qcom,bus-freq",
+			&gpu->busfreq[index]);
+	}
+
+	DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
+		gpu->gpufreq[0],
+		gpu->gpufreq[gpu->nr_pwrlevels - 1],
+		gpu->busfreq[0]);
+
+	return 0;
+}
+
+/*
+ * Escape valve for targets that don't define the binning nodes. Get the
+ * first powerlevel node and parse it
+ */
+static int adreno_get_legacy_pwrlevels(struct msm_gpu *gpu,
+		struct device_node *parent)
+{
+	struct device_node *child;
+
+	child = of_find_node_by_name(parent, "qcom,gpu-pwrlevels");
+	if (child)
+		return _adreno_get_pwrlevels(gpu, child);
+
+	dev_err(gpu->dev->dev, "Unable to parse any powerlevels\n");
+	return -EINVAL;
+}
+
+/* Get the powerlevels for the target */
+static int adreno_get_pwrlevels(struct msm_gpu *gpu, struct device_node *parent)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct device_node *node, *child;
+
+	/* See if the target has defined a number of power bins */
+	node = of_find_node_by_name(parent, "qcom,gpu-pwrlevel-bins");
+	if (!node) {
+		/* If not look for the qcom,gpu-pwrlevels node */
+		return adreno_get_legacy_pwrlevels(gpu, parent);
+	}
+
+	for_each_child_of_node(node, child) {
+		unsigned int bin;
+
+		if (of_property_read_u32(child, "qcom,speed-bin", &bin))
+			continue;
+
+		/*
+		 * If the bin matches the bin specified by the fuses, then we
+		 * have a winner - parse it
+		 */
+		if (adreno_gpu->speed_bin == bin)
+			return _adreno_get_pwrlevels(gpu, child);
+	}
+
+	return -ENODEV;
+}
+
+static const struct {
+	const char *str;
+	uint32_t flag;
+} quirks[] = {
+	{ "qcom,gpu-quirk-two-pass-use-wfi", ADRENO_QUIRK_TWO_PASS_USE_WFI },
+	{ "qcom,gpu-quirk-fault-detect-mask", ADRENO_QUIRK_FAULT_DETECT_MASK },
 };
 
+/* Parse the statistics from the device tree */
+static int adreno_of_parse(struct platform_device *pdev, struct msm_gpu *gpu)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct device_node *node = pdev->dev.of_node;
+	int i, ret;
+
+	/* Probe the powerlevels */
+	ret = adreno_get_pwrlevels(gpu, node);
+	if (ret)
+		return ret;
+
+	/* Check to see if any quirks were specified in the device tree */
+	for (i = 0; i < ARRAY_SIZE(quirks); i++)
+		if (of_property_read_bool(node, quirks[i].str))
+			adreno_gpu->quirks |= quirks[i].flag;
+
+	return 0;
+}
+
 int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
-		struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs)
+		struct adreno_gpu *adreno_gpu,
+		const struct adreno_gpu_funcs *funcs,
+		struct msm_gpu_config *gpu_config)
 {
 	struct adreno_platform_config *config = pdev->dev.platform_data;
 	struct msm_gpu *gpu = &adreno_gpu->base;
-	struct msm_mmu *mmu;
 	int ret;
 
 	adreno_gpu->funcs = funcs;
@@ -349,22 +512,18 @@
 	adreno_gpu->revn = adreno_gpu->info->revn;
 	adreno_gpu->rev = config->rev;
 
-	gpu->fast_rate = config->fast_rate;
-	gpu->slow_rate = config->slow_rate;
-	gpu->bus_freq  = config->bus_freq;
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
-	gpu->bus_scale_table = config->bus_scale_table;
-#endif
-
-	DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
-			gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
+	/* Get the rest of the target configuration from the device tree */
+	adreno_of_parse(pdev, gpu);
 
 	ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
-			adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
-			RB_SIZE);
+			adreno_gpu->info->name, gpu_config);
 	if (ret)
 		return ret;
 
+	pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD);
+	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
 	ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev);
 	if (ret) {
 		dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
@@ -373,55 +532,150 @@
 	}
 
 	ret = request_firmware(&adreno_gpu->pfp, adreno_gpu->info->pfpfw, drm->dev);
-	if (ret) {
+	if (ret)
 		dev_err(drm->dev, "failed to load %s PFP firmware: %d\n",
 				adreno_gpu->info->pfpfw, ret);
+
 		return ret;
 	}
 
-	mmu = gpu->mmu;
-	if (mmu) {
-		ret = mmu->funcs->attach(mmu, iommu_ports,
-				ARRAY_SIZE(iommu_ports));
-		if (ret)
-			return ret;
+void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
+{
+	struct msm_gpu *gpu = &adreno_gpu->base;
+	struct drm_device *dev = gpu->dev;
+	struct msm_drm_private *priv = dev->dev_private;
+	struct platform_device *pdev = priv->gpu_pdev;
+
+	release_firmware(adreno_gpu->pm4);
+	release_firmware(adreno_gpu->pfp);
+
+	pm_runtime_disable(&pdev->dev);
+	msm_gpu_cleanup(gpu);
 	}
 
-	mutex_lock(&drm->struct_mutex);
-	adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs),
-			MSM_BO_UNCACHED);
-	mutex_unlock(&drm->struct_mutex);
-	if (IS_ERR(adreno_gpu->memptrs_bo)) {
-		ret = PTR_ERR(adreno_gpu->memptrs_bo);
-		adreno_gpu->memptrs_bo = NULL;
-		dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
-		return ret;
+static void adreno_snapshot_os(struct msm_gpu *gpu,
+		struct msm_snapshot *snapshot)
+{
+	struct msm_snapshot_linux header;
+
+	memset(&header, 0, sizeof(header));
+
+	header.osid = SNAPSHOT_OS_LINUX_V3;
+	strlcpy(header.release, utsname()->release, sizeof(header.release));
+	strlcpy(header.version, utsname()->version, sizeof(header.version));
+
+	header.seconds = get_seconds();
+	header.ctxtcount = 0;
+
+	SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_OS, 0);
 	}
 
-	adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo);
-	if (!adreno_gpu->memptrs) {
-		dev_err(drm->dev, "could not vmap memptrs\n");
-		return -ENOMEM;
+static void adreno_snapshot_ringbuffer(struct msm_gpu *gpu,
+		struct msm_snapshot *snapshot, struct msm_ringbuffer *ring)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct msm_snapshot_ringbuffer header;
+	unsigned int i, end = 0;
+	unsigned int *data = ring->start;
+
+	memset(&header, 0, sizeof(header));
+
+	/*
+	 * We only want to copy the active contents of each ring, so find the
+	 * last valid entry in the ringbuffer
+	 */
+	for (i = 0; i < MSM_GPU_RINGBUFFER_SZ >> 2; i++) {
+		if (data[i])
+			end = i;
 	}
 
-	ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->id,
-			&adreno_gpu->memptrs_iova);
-	if (ret) {
-		dev_err(drm->dev, "could not map memptrs: %d\n", ret);
-		return ret;
+	/* The dump always starts at 0 */
+	header.start = 0;
+	header.end = end;
+
+	/* This is the number of dwords being dumped */
+	header.count = end + 1;
+
+	/* This is the size of the actual ringbuffer */
+	header.rbsize = MSM_GPU_RINGBUFFER_SZ >> 2;
+
+	header.id = ring->id;
+	header.gpuaddr = ring->iova;
+	header.rptr = get_rptr(adreno_gpu, ring);
+	header.wptr = get_wptr(ring);
+	header.timestamp_queued = adreno_submitted_fence(gpu, ring);
+	header.timestamp_retired = ring->memptrs->fence;
+
+	/* Write the header even if the ringbuffer data is empty */
+	if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_RB_V2,
+		header.count))
+		return;
+
+	SNAPSHOT_MEMCPY(snapshot, ring->start, header.count * sizeof(u32));
+}
+
+static void adreno_snapshot_ringbuffers(struct msm_gpu *gpu,
+		struct msm_snapshot *snapshot)
+{
+	struct msm_ringbuffer *ring;
+	int i;
+
+	/* Write a new section for each ringbuffer */
+	FOR_EACH_RING(gpu, ring, i)
+		adreno_snapshot_ringbuffer(gpu, snapshot, ring);
+}
+
+void adreno_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot)
+{
+	adreno_snapshot_os(gpu, snapshot);
+	adreno_snapshot_ringbuffers(gpu, snapshot);
+}
+
+/* Return the group struct associated with the counter id */
+
+static struct adreno_counter_group *get_counter_group(struct msm_gpu *gpu,
+		u32 groupid)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+	if (!adreno_gpu->counter_groups)
+		return ERR_PTR(-ENODEV);
+
+	if (groupid >= adreno_gpu->nr_counter_groups)
+		return ERR_PTR(-ENODEV);
+
+	return (struct adreno_counter_group *)
+		adreno_gpu->counter_groups[groupid];
 	}
 
+int adreno_get_counter(struct msm_gpu *gpu, u32 groupid, u32 countable,
+		u32 *lo, u32 *hi)
+{
+	struct adreno_counter_group *group =
+		get_counter_group(gpu, groupid);
+
+	if (!IS_ERR_OR_NULL(group) && group->funcs.get)
+		return group->funcs.get(gpu, group, countable, lo, hi);
+
+	return -ENODEV;
+}
+
+u64 adreno_read_counter(struct msm_gpu *gpu, u32 groupid, int counterid)
+{
+	struct adreno_counter_group *group =
+		get_counter_group(gpu, groupid);
+
+	if (!IS_ERR_OR_NULL(group) && group->funcs.read)
+		return group->funcs.read(gpu, group, counterid);
+
 	return 0;
 }
 
-void adreno_gpu_cleanup(struct adreno_gpu *gpu)
+void adreno_put_counter(struct msm_gpu *gpu, u32 groupid, int counterid)
 {
-	if (gpu->memptrs_bo) {
-		if (gpu->memptrs_iova)
-			msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
-		drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
-	}
-	release_firmware(gpu->pm4);
-	release_firmware(gpu->pfp);
-	msm_gpu_cleanup(&gpu->base);
+	struct adreno_counter_group *group =
+		get_counter_group(gpu, groupid);
+
+	if (!IS_ERR_OR_NULL(group) && group->funcs.put)
+		group->funcs.put(gpu, group, counterid);
 }
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/adreno/adreno_gpu.h linux-4.4.115-fbx/drivers/gpu/drm/msm/adreno/adreno_gpu.h
--- linux-4.4.115/drivers/gpu/drm/msm/adreno/adreno_gpu.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/adreno/adreno_gpu.h	2019-01-22 16:16:23.487246262 +0100
@@ -2,7 +2,7 @@
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2016-2017 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published by
@@ -24,10 +24,17 @@
 
 #include "msm_gpu.h"
 
+/* arrg, somehow fb.h is getting pulled in: */
+#undef ROP_COPY
+#undef ROP_XOR
+
 #include "adreno_common.xml.h"
 #include "adreno_pm4.xml.h"
 
 #define REG_ADRENO_DEFINE(_offset, _reg) [_offset] = (_reg) + 1
+#define REG_SKIP ~0
+#define REG_ADRENO_SKIP(_offset) [_offset] = REG_SKIP
+
 /**
  * adreno_regs: List of registers that are used in across all
  * 3D devices. Each device type has different offset value for the same
@@ -35,73 +42,21 @@
  * and are indexed by the enumeration values defined in this enum
  */
 enum adreno_regs {
-	REG_ADRENO_CP_DEBUG,
-	REG_ADRENO_CP_ME_RAM_WADDR,
-	REG_ADRENO_CP_ME_RAM_DATA,
-	REG_ADRENO_CP_PFP_UCODE_DATA,
-	REG_ADRENO_CP_PFP_UCODE_ADDR,
-	REG_ADRENO_CP_WFI_PEND_CTR,
 	REG_ADRENO_CP_RB_BASE,
+	REG_ADRENO_CP_RB_BASE_HI,
 	REG_ADRENO_CP_RB_RPTR_ADDR,
+	REG_ADRENO_CP_RB_RPTR_ADDR_HI,
 	REG_ADRENO_CP_RB_RPTR,
 	REG_ADRENO_CP_RB_WPTR,
-	REG_ADRENO_CP_PROTECT_CTRL,
-	REG_ADRENO_CP_ME_CNTL,
 	REG_ADRENO_CP_RB_CNTL,
-	REG_ADRENO_CP_IB1_BASE,
-	REG_ADRENO_CP_IB1_BUFSZ,
-	REG_ADRENO_CP_IB2_BASE,
-	REG_ADRENO_CP_IB2_BUFSZ,
-	REG_ADRENO_CP_TIMESTAMP,
-	REG_ADRENO_CP_ME_RAM_RADDR,
-	REG_ADRENO_CP_ROQ_ADDR,
-	REG_ADRENO_CP_ROQ_DATA,
-	REG_ADRENO_CP_MERCIU_ADDR,
-	REG_ADRENO_CP_MERCIU_DATA,
-	REG_ADRENO_CP_MERCIU_DATA2,
-	REG_ADRENO_CP_MEQ_ADDR,
-	REG_ADRENO_CP_MEQ_DATA,
-	REG_ADRENO_CP_HW_FAULT,
-	REG_ADRENO_CP_PROTECT_STATUS,
-	REG_ADRENO_SCRATCH_ADDR,
-	REG_ADRENO_SCRATCH_UMSK,
-	REG_ADRENO_SCRATCH_REG2,
-	REG_ADRENO_RBBM_STATUS,
-	REG_ADRENO_RBBM_PERFCTR_CTL,
-	REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
-	REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
-	REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
-	REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
-	REG_ADRENO_RBBM_INT_0_MASK,
-	REG_ADRENO_RBBM_INT_0_STATUS,
-	REG_ADRENO_RBBM_AHB_ERROR_STATUS,
-	REG_ADRENO_RBBM_PM_OVERRIDE2,
-	REG_ADRENO_RBBM_AHB_CMD,
-	REG_ADRENO_RBBM_INT_CLEAR_CMD,
-	REG_ADRENO_RBBM_SW_RESET_CMD,
-	REG_ADRENO_RBBM_CLOCK_CTL,
-	REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
-	REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
-	REG_ADRENO_VPC_DEBUG_RAM_SEL,
-	REG_ADRENO_VPC_DEBUG_RAM_READ,
-	REG_ADRENO_VSC_SIZE_ADDRESS,
-	REG_ADRENO_VFD_CONTROL_0,
-	REG_ADRENO_VFD_INDEX_MAX,
-	REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
-	REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
-	REG_ADRENO_SP_VS_OBJ_START_REG,
-	REG_ADRENO_SP_FS_OBJ_START_REG,
-	REG_ADRENO_PA_SC_AA_CONFIG,
-	REG_ADRENO_SQ_GPR_MANAGEMENT,
-	REG_ADRENO_SQ_INST_STORE_MANAGMENT,
-	REG_ADRENO_TP0_CHICKEN,
-	REG_ADRENO_RBBM_RBBM_CTL,
-	REG_ADRENO_UCHE_INVALIDATE0,
-	REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
-	REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
 	REG_ADRENO_REGISTER_MAX,
 };
 
+enum adreno_quirks {
+	ADRENO_QUIRK_TWO_PASS_USE_WFI = 1,
+	ADRENO_QUIRK_FAULT_DETECT_MASK = 2,
+};
+
 struct adreno_rev {
 	uint8_t  core;
 	uint8_t  major;
@@ -114,6 +69,7 @@
 
 struct adreno_gpu_funcs {
 	struct msm_gpu_funcs base;
+	int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value);
 };
 
 struct adreno_info {
@@ -127,10 +83,34 @@
 
 const struct adreno_info *adreno_info(struct adreno_rev rev);
 
-struct adreno_rbmemptrs {
-	volatile uint32_t rptr;
-	volatile uint32_t wptr;
-	volatile uint32_t fence;
+struct adreno_counter {
+	u32 lo;
+	u32 hi;
+	u32 sel;
+	int load_bit;
+	u32 countable;
+	u32 refcount;
+	u64 value;
+};
+
+struct adreno_counter_group {
+	struct adreno_counter *counters;
+	size_t nr_counters;
+	spinlock_t lock;
+	struct {
+		int (*get)(struct msm_gpu *,
+			struct adreno_counter_group *, u32, u32 *, u32 *);
+		void (*enable)(struct msm_gpu *,
+			struct adreno_counter_group *, int, bool);
+		u64 (*read)(struct msm_gpu *,
+			struct adreno_counter_group *, int);
+		void (*put)(struct msm_gpu *,
+			struct adreno_counter_group *, int);
+		void (*save)(struct msm_gpu *,
+			struct adreno_counter_group *);
+		void (*restore)(struct msm_gpu *,
+			struct adreno_counter_group *);
+	} funcs;
 };
 
 struct adreno_gpu {
@@ -147,29 +127,24 @@
 	/* firmware: */
 	const struct firmware *pm4, *pfp;
 
-	/* ringbuffer rptr/wptr: */
-	// TODO should this be in msm_ringbuffer?  I think it would be
-	// different for z180..
-	struct adreno_rbmemptrs *memptrs;
-	struct drm_gem_object *memptrs_bo;
-	uint32_t memptrs_iova;
-
 	/*
 	 * Register offsets are different between some GPUs.
 	 * GPU specific offsets will be exported by GPU specific
 	 * code (a3xx_gpu.c) and stored in this common location.
 	 */
 	const unsigned int *reg_offsets;
+
+	uint32_t quirks;
+	uint32_t speed_bin;
+
+	const struct adreno_counter_group **counter_groups;
+	int nr_counter_groups;
 };
 #define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
 
 /* platform config data (ie. from DT, or pdata) */
 struct adreno_platform_config {
 	struct adreno_rev rev;
-	uint32_t fast_rate, slow_rate, bus_freq;
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
-	struct msm_bus_scale_pdata *bus_scale_table;
-#endif
 };
 
 #define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
@@ -186,6 +161,9 @@
 	__ret;                                             \
 })
 
+#define GPU_OF_NODE(_g) \
+	(((struct msm_drm_private *) \
+	  ((_g)->dev->dev_private))->gpu_pdev->dev.of_node)
 
 static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
 {
@@ -228,32 +206,55 @@
 	return gpu->revn == 420;
 }
 
+static inline int adreno_is_a430(struct adreno_gpu *gpu)
+{
+       return gpu->revn == 430;
+}
+
+static inline int adreno_is_a530(struct adreno_gpu *gpu)
+{
+	return gpu->revn == 530;
+}
+
+static inline int adreno_is_a540(struct adreno_gpu *gpu)
+{
+	return gpu->revn == 540;
+}
+
 int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
 int adreno_hw_init(struct msm_gpu *gpu);
-uint32_t adreno_last_fence(struct msm_gpu *gpu);
+uint32_t adreno_submitted_fence(struct msm_gpu *gpu,
+		struct msm_ringbuffer *ring);
 void adreno_recover(struct msm_gpu *gpu);
-int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
-		struct msm_file_private *ctx);
-void adreno_flush(struct msm_gpu *gpu);
-void adreno_idle(struct msm_gpu *gpu);
+void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
+void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
 #ifdef CONFIG_DEBUG_FS
 void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
 #endif
 void adreno_dump_info(struct msm_gpu *gpu);
 void adreno_dump(struct msm_gpu *gpu);
-void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords);
+void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords);
+struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu);
 
 int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
-		struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs);
+		struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
+		struct msm_gpu_config *config);
 void adreno_gpu_cleanup(struct adreno_gpu *gpu);
 
+void adreno_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
+
+int adreno_get_counter(struct msm_gpu *gpu, u32 groupid, u32 countable,
+		u32 *lo, u32 *hi);
+u64 adreno_read_counter(struct msm_gpu *gpu, u32 groupid, int counterid);
+void adreno_put_counter(struct msm_gpu *gpu, u32 groupid, int counterid);
 
 /* ringbuffer helpers (the parts that are adreno specific) */
 
 static inline void
 OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
 {
-	adreno_wait_ring(ring->gpu, cnt+1);
+	adreno_wait_ring(ring, cnt+1);
 	OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
 }
 
@@ -261,19 +262,49 @@
 static inline void
 OUT_PKT2(struct msm_ringbuffer *ring)
 {
-	adreno_wait_ring(ring->gpu, 1);
+	adreno_wait_ring(ring, 1);
 	OUT_RING(ring, CP_TYPE2_PKT);
 }
 
 static inline void
 OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
 {
-	adreno_wait_ring(ring->gpu, cnt+1);
+	adreno_wait_ring(ring, cnt+1);
 	OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
 }
 
+static inline u32 PM4_PARITY(u32 val)
+{
+	return (0x9669 >> (0xF & (val ^
+		(val >> 4) ^ (val >> 8) ^ (val >> 12) ^
+		(val >> 16) ^ ((val) >> 20) ^ (val >> 24) ^
+		(val >> 28)))) & 1;
+}
+
+/* Maximum number of values that can be executed for one opcode */
+#define TYPE4_MAX_PAYLOAD 127
+
+#define PKT4(_reg, _cnt) \
+	(CP_TYPE4_PKT | ((_cnt) << 0) | (PM4_PARITY((_cnt)) << 7) | \
+	 (((_reg) & 0x3FFFF) << 8) | (PM4_PARITY((_reg)) << 27))
+
+static inline void
+OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
+{
+	adreno_wait_ring(ring, cnt + 1);
+	OUT_RING(ring, PKT4(regindx, cnt));
+}
+
+static inline void
+OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
+{
+	adreno_wait_ring(ring, cnt + 1);
+	OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) |
+		((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23));
+}
+
 /*
- * adreno_checkreg_off() - Checks the validity of a register enum
+ * adreno_reg_check() - Checks the validity of a register enum
  * @gpu:		Pointer to struct adreno_gpu
  * @offset_name:	The register enum that is checked
  */
@@ -284,6 +315,16 @@
 			!gpu->reg_offsets[offset_name]) {
 		BUG();
 	}
+
+	/*
+	 * REG_SKIP is a special value that tell us that the register in
+	 * question isn't implemented on target but don't trigger a BUG(). This
+	 * is used to cleanly implement adreno_gpu_write64() and
+	 * adreno_gpu_read64() in a generic fashion
+	 */
+	if (gpu->reg_offsets[offset_name] == REG_SKIP)
+		return false;
+
 	return true;
 }
 
@@ -305,4 +346,40 @@
 		gpu_write(&gpu->base, reg - 1, data);
 }
 
+static inline void adreno_gpu_write64(struct adreno_gpu *gpu,
+		enum adreno_regs lo, enum adreno_regs hi, u64 data)
+{
+	adreno_gpu_write(gpu, lo, lower_32_bits(data));
+	adreno_gpu_write(gpu, hi, upper_32_bits(data));
+}
+
+static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
+{
+	return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2);
+}
+
+/*
+ * Given a register and a count, return a value to program into
+ * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
+ * registers starting at _reg.
+ *
+ * The register base needs to be a multiple of the length. If it is not, the
+ * hardware will quietly mask off the bits for you and shift the size. For
+ * example, if you intend the protection to start at 0x07 for a length of 4
+ * (0x07-0x0A) the hardware will actually protect (0x04-0x07) which might
+ * expose registers you intended to protect!
+ */
+#define ADRENO_PROTECT_RW(_reg, _len) \
+	((1 << 30) | (1 << 29) | \
+	((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
+
+/*
+ * Same as above, but allow reads over the range. For areas of mixed use (such
+ * as performance counters) this allows us to protect a much larger range with a
+ * single register
+ */
+#define ADRENO_PROTECT_RDONLY(_reg, _len) \
+	((1 << 29) \
+	((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
+
 #endif /* __ADRENO_GPU_H__ */
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h linux-4.4.115-fbx/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
--- linux-4.4.115/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h	2019-01-22 16:16:23.487246262 +0100
@@ -8,17 +8,19 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10755 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14968 bytes, from 2015-05-20 20:12:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  67771 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  63970 bytes, from 2015-09-14 20:50:12)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2015-09-24 17:30:00)
+- ./adreno.xml               (    431 bytes, from 2016-10-24 21:12:27)
+- ./freedreno_copyright.xml  (   1572 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a2xx.xml          (  32901 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_common.xml (  12025 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_pm4.xml    (  19684 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a3xx.xml          (  83840 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a4xx.xml          ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a5xx.xml          (  81207 bytes, from 2016-10-26 19:36:59)
+- ./adreno/ocmem.xml         (   1773 bytes, from 2016-10-24 21:12:27)
 
-Copyright (C) 2013-2015 by the following authors:
+Copyright (C) 2013-2016 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
 Permission is hereby granted, free of charge, to any person obtaining
 a copy of this software and associated documentation files (the
@@ -57,6 +59,7 @@
 	RST_PIX_CNT = 13,
 	RST_VTX_CNT = 14,
 	TILE_FLUSH = 15,
+	STAT_EVENT = 16,
 	CACHE_FLUSH_AND_INV_TS_EVENT = 20,
 	ZPASS_DONE = 21,
 	CACHE_FLUSH_AND_INV_EVENT = 22,
@@ -81,7 +84,6 @@
 	DI_PT_LINESTRIP_ADJ = 11,
 	DI_PT_TRI_ADJ = 12,
 	DI_PT_TRISTRIP_ADJ = 13,
-	DI_PT_PATCHES = 34,
 };
 
 enum pc_di_src_sel {
@@ -109,11 +111,15 @@
 	CP_TYPE1_PKT = 0x40000000,
 	CP_TYPE2_PKT = 0x80000000,
 	CP_TYPE3_PKT = 0xc0000000,
+	CP_TYPE4_PKT = 0x40000000,
+	CP_TYPE7_PKT = 0x70000000,
 };
 
 enum adreno_pm4_type3_packets {
 	CP_ME_INIT = 72,
 	CP_NOP = 16,
+	CP_PREEMPT_ENABLE = 28,
+	CP_PREEMPT_TOKEN = 30,
 	CP_INDIRECT_BUFFER = 63,
 	CP_INDIRECT_BUFFER_PFD = 55,
 	CP_WAIT_FOR_IDLE = 38,
@@ -162,6 +168,7 @@
 	CP_TEST_TWO_MEMS = 113,
 	CP_REG_WR_NO_CTXT = 120,
 	CP_RECORD_PFP_TIMESTAMP = 17,
+	CP_SET_SECURE_MODE = 102,
 	CP_WAIT_FOR_ME = 19,
 	CP_SET_DRAW_STATE = 67,
 	CP_DRAW_INDX_OFFSET = 56,
@@ -172,6 +179,26 @@
 	CP_UNKNOWN_1A = 26,
 	CP_UNKNOWN_4E = 78,
 	CP_WIDE_REG_WRITE = 116,
+	CP_SCRATCH_TO_REG = 77,
+	CP_REG_TO_SCRATCH = 74,
+	CP_WAIT_MEM_WRITES = 18,
+	CP_COND_REG_EXEC = 71,
+	CP_MEM_TO_REG = 66,
+	CP_EXEC_CS = 51,
+	CP_PERFCOUNTER_ACTION = 80,
+	CP_SMMU_TABLE_UPDATE = 83,
+	CP_CONTEXT_REG_BUNCH = 92,
+	CP_YIELD_ENABLE = 28,
+	CP_SKIP_IB2_ENABLE_GLOBAL = 29,
+	CP_SKIP_IB2_ENABLE_LOCAL = 35,
+	CP_SET_SUBDRAW_SIZE = 53,
+	CP_SET_VISIBILITY_OVERRIDE = 100,
+	CP_PREEMPT_ENABLE_GLOBAL = 105,
+	CP_PREEMPT_ENABLE_LOCAL = 106,
+	CP_CONTEXT_SWITCH_YIELD = 107,
+	CP_SET_RENDER_MODE = 108,
+	CP_COMPUTE_CHECKPOINT = 110,
+	CP_MEM_TO_MEM = 115,
 	IN_IB_PREFETCH_END = 23,
 	IN_SUBBLK_PREFETCH = 31,
 	IN_INSTR_PREFETCH = 32,
@@ -190,6 +217,7 @@
 	SB_VERT_SHADER = 4,
 	SB_GEOM_SHADER = 5,
 	SB_FRAG_SHADER = 6,
+	SB_COMPUTE_SHADER = 7,
 };
 
 enum adreno_state_type {
@@ -199,7 +227,11 @@
 
 enum adreno_state_src {
 	SS_DIRECT = 0,
+	SS_INVALID_ALL_IC = 2,
+	SS_INVALID_PART_IC = 3,
 	SS_INDIRECT = 4,
+	SS_INDIRECT_TCM = 5,
+	SS_INDIRECT_STM = 6,
 };
 
 enum a4xx_index_size {
@@ -227,7 +259,7 @@
 {
 	return ((val) << CP_LOAD_STATE_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE_0_STATE_BLOCK__MASK;
 }
-#define CP_LOAD_STATE_0_NUM_UNIT__MASK				0x7fc00000
+#define CP_LOAD_STATE_0_NUM_UNIT__MASK				0xffc00000
 #define CP_LOAD_STATE_0_NUM_UNIT__SHIFT				22
 static inline uint32_t CP_LOAD_STATE_0_NUM_UNIT(uint32_t val)
 {
@@ -379,7 +411,12 @@
 {
 	return ((val) << CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK;
 }
-#define CP_DRAW_INDX_OFFSET_0_TESSELLATE			0x00000100
+#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK			0x00000300
+#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT			8
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+	return ((val) << CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT) & CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK;
+}
 #define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK			0x00000c00
 #define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT			10
 static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum a4xx_index_size val)
@@ -499,5 +536,102 @@
 	return ((val) << CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__SHIFT) & CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__MASK;
 }
 
+#define REG_CP_REG_TO_MEM_0					0x00000000
+#define CP_REG_TO_MEM_0_REG__MASK				0x0000ffff
+#define CP_REG_TO_MEM_0_REG__SHIFT				0
+static inline uint32_t CP_REG_TO_MEM_0_REG(uint32_t val)
+{
+	return ((val) << CP_REG_TO_MEM_0_REG__SHIFT) & CP_REG_TO_MEM_0_REG__MASK;
+}
+#define CP_REG_TO_MEM_0_CNT__MASK				0x3ff80000
+#define CP_REG_TO_MEM_0_CNT__SHIFT				19
+static inline uint32_t CP_REG_TO_MEM_0_CNT(uint32_t val)
+{
+	return ((val) << CP_REG_TO_MEM_0_CNT__SHIFT) & CP_REG_TO_MEM_0_CNT__MASK;
+}
+#define CP_REG_TO_MEM_0_64B					0x40000000
+#define CP_REG_TO_MEM_0_ACCUMULATE				0x80000000
+
+#define REG_CP_REG_TO_MEM_1					0x00000001
+#define CP_REG_TO_MEM_1_DEST__MASK				0xffffffff
+#define CP_REG_TO_MEM_1_DEST__SHIFT				0
+static inline uint32_t CP_REG_TO_MEM_1_DEST(uint32_t val)
+{
+	return ((val) << CP_REG_TO_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_1_DEST__MASK;
+}
+
+#define REG_CP_DISPATCH_COMPUTE_0				0x00000000
+
+#define REG_CP_DISPATCH_COMPUTE_1				0x00000001
+#define CP_DISPATCH_COMPUTE_1_X__MASK				0xffffffff
+#define CP_DISPATCH_COMPUTE_1_X__SHIFT				0
+static inline uint32_t CP_DISPATCH_COMPUTE_1_X(uint32_t val)
+{
+	return ((val) << CP_DISPATCH_COMPUTE_1_X__SHIFT) & CP_DISPATCH_COMPUTE_1_X__MASK;
+}
+
+#define REG_CP_DISPATCH_COMPUTE_2				0x00000002
+#define CP_DISPATCH_COMPUTE_2_Y__MASK				0xffffffff
+#define CP_DISPATCH_COMPUTE_2_Y__SHIFT				0
+static inline uint32_t CP_DISPATCH_COMPUTE_2_Y(uint32_t val)
+{
+	return ((val) << CP_DISPATCH_COMPUTE_2_Y__SHIFT) & CP_DISPATCH_COMPUTE_2_Y__MASK;
+}
+
+#define REG_CP_DISPATCH_COMPUTE_3				0x00000003
+#define CP_DISPATCH_COMPUTE_3_Z__MASK				0xffffffff
+#define CP_DISPATCH_COMPUTE_3_Z__SHIFT				0
+static inline uint32_t CP_DISPATCH_COMPUTE_3_Z(uint32_t val)
+{
+	return ((val) << CP_DISPATCH_COMPUTE_3_Z__SHIFT) & CP_DISPATCH_COMPUTE_3_Z__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_0				0x00000000
+
+#define REG_CP_SET_RENDER_MODE_1				0x00000001
+#define CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK			0xffffffff
+#define CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT			0
+static inline uint32_t CP_SET_RENDER_MODE_1_ADDR_0_LO(uint32_t val)
+{
+	return ((val) << CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT) & CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_2				0x00000002
+#define CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK			0xffffffff
+#define CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT			0
+static inline uint32_t CP_SET_RENDER_MODE_2_ADDR_0_HI(uint32_t val)
+{
+	return ((val) << CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT) & CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_3				0x00000003
+#define CP_SET_RENDER_MODE_3_GMEM_ENABLE			0x00000010
+
+#define REG_CP_SET_RENDER_MODE_4				0x00000004
+
+#define REG_CP_SET_RENDER_MODE_5				0x00000005
+#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK			0xffffffff
+#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT			0
+static inline uint32_t CP_SET_RENDER_MODE_5_ADDR_1_LEN(uint32_t val)
+{
+	return ((val) << CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT) & CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_6				0x00000006
+#define CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK			0xffffffff
+#define CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT			0
+static inline uint32_t CP_SET_RENDER_MODE_6_ADDR_1_LO(uint32_t val)
+{
+	return ((val) << CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT) & CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_7				0x00000007
+#define CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK			0xffffffff
+#define CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT			0
+static inline uint32_t CP_SET_RENDER_MODE_7_ADDR_1_HI(uint32_t val)
+{
+	return ((val) << CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT) & CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK;
+}
+
 
 #endif /* ADRENO_PM4_XML */
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/dsi/dsi.h linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi/dsi.h
--- linux-4.4.115/drivers/gpu/drm/msm/dsi/dsi.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi/dsi.h	2019-01-22 16:16:23.491246298 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015,2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -89,7 +89,7 @@
 		u32 *clk_pre, u32 *clk_post);
 void msm_dsi_manager_phy_disable(int id);
 int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
-bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len);
+bool msm_dsi_manager_cmd_xfer_trigger(int id, u64 iova, u32 len);
 int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
 void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
 
@@ -143,7 +143,7 @@
 int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
 					const struct mipi_dsi_msg *msg);
 void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
-					u32 iova, u32 len);
+					u64 iova, u32 len);
 int msm_dsi_host_enable(struct mipi_dsi_host *host);
 int msm_dsi_host_disable(struct mipi_dsi_host *host);
 int msm_dsi_host_power_on(struct mipi_dsi_host *host);
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/dsi/dsi_host.c linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi/dsi_host.c
--- linux-4.4.115/drivers/gpu/drm/msm/dsi/dsi_host.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi/dsi_host.c	2019-10-29 09:26:23.625203002 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015,2017 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -836,24 +836,21 @@
 {
 	struct drm_device *dev = msm_host->dev;
 	int ret;
-	u32 iova;
+	u64 iova;
 
-	mutex_lock(&dev->struct_mutex);
 	msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
 	if (IS_ERR(msm_host->tx_gem_obj)) {
 		ret = PTR_ERR(msm_host->tx_gem_obj);
 		pr_err("%s: failed to allocate gem, %d\n", __func__, ret);
 		msm_host->tx_gem_obj = NULL;
-		mutex_unlock(&dev->struct_mutex);
 		return ret;
 	}
 
-	ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova);
+	ret = msm_gem_get_iova(msm_host->tx_gem_obj, NULL, &iova);
 	if (ret) {
 		pr_err("%s: failed to get iova, %d\n", __func__, ret);
 		return ret;
 	}
-	mutex_unlock(&dev->struct_mutex);
 
 	if (iova & 0x07) {
 		pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
@@ -974,7 +971,7 @@
 static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
 {
 	int ret;
-	u32 iova;
+	uint64_t iova;
 	bool triggered;
 
 	ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &iova);
@@ -1750,11 +1747,12 @@
 	return ret;
 }
 
-void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 iova, u32 len)
+void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u64 iova, u32 len)
 {
 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
 
-	dsi_write(msm_host, REG_DSI_DMA_BASE, iova);
+	/* FIXME: Verify that the iova < 32 bits? */
+	dsi_write(msm_host, REG_DSI_DMA_BASE, lower_32_bits(iova));
 	dsi_write(msm_host, REG_DSI_DMA_LEN, len);
 	dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
 
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/dsi/dsi_manager.c linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi/dsi_manager.c
--- linux-4.4.115/drivers/gpu/drm/msm/dsi/dsi_manager.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi/dsi_manager.c	2019-10-29 09:26:23.625203002 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015,2017 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -774,7 +774,7 @@
 	return ret;
 }
 
-bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len)
+bool msm_dsi_manager_cmd_xfer_trigger(int id, u64 iova, u32 len)
 {
 	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
 	struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0);
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/dsi/dsi.xml.h linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi/dsi.xml.h
--- linux-4.4.115/drivers/gpu/drm/msm/dsi/dsi.xml.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi/dsi.xml.h	2019-01-22 16:16:23.491246298 +0100
@@ -9,7 +9,7 @@
 
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2016-02-10 17:07:21)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2015-09-18 12:07:28)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2015-10-22 16:35:02)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  41472 bytes, from 2016-01-22 18:18:18)
 - /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
 Permission is hereby granted, free of charge, to any person obtaining
 a copy of this software and associated documentation files (the
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/edp/edp.c linux-4.4.115-fbx/drivers/gpu/drm/msm/edp/edp.c
--- linux-4.4.115/drivers/gpu/drm/msm/edp/edp.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/edp/edp.c	2019-01-22 16:16:23.495246334 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015,2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -54,7 +54,7 @@
 		ret = -ENOMEM;
 		goto fail;
 	}
-	DBG("eDP probed=%p", edp);
+	DBG("eDP probed=%pK", edp);
 
 	edp->pdev = pdev;
 	platform_set_drvdata(pdev, edp);
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/edp/edp.xml.h linux-4.4.115-fbx/drivers/gpu/drm/msm/edp/edp.xml.h
--- linux-4.4.115/drivers/gpu/drm/msm/edp/edp.xml.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/edp/edp.xml.h	2019-01-22 16:16:23.495246334 +0100
@@ -9,7 +9,7 @@
 
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2016-02-10 17:07:21)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2015-09-18 12:07:28)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2015-10-22 16:35:02)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  41472 bytes, from 2016-01-22 18:18:18)
 - /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
 Permission is hereby granted, free of charge, to any person obtaining
 a copy of this software and associated documentation files (the
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
--- linux-4.4.115/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c	2019-01-22 16:16:23.499246370 +0100
@@ -106,7 +106,7 @@
 	hdmi_set_mode(hdmi, true);
 
 	if (hdmi->hdcp_ctrl)
-		hdmi_hdcp_on(hdmi->hdcp_ctrl);
+		hdmi_hdcp_ctrl_on(hdmi->hdcp_ctrl);
 }
 
 static void hdmi_bridge_enable(struct drm_bridge *bridge)
@@ -124,7 +124,7 @@
 	struct hdmi_phy *phy = hdmi->phy;
 
 	if (hdmi->hdcp_ctrl)
-		hdmi_hdcp_off(hdmi->hdcp_ctrl);
+		hdmi_hdcp_ctrl_off(hdmi->hdcp_ctrl);
 
 	DBG("power down");
 	hdmi_set_mode(hdmi, false);
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/hdmi/hdmi.c linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi/hdmi.c
--- linux-4.4.115/drivers/gpu/drm/msm/hdmi/hdmi.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi/hdmi.c	2019-01-22 16:16:23.499246370 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016-2017 The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -56,7 +56,7 @@
 
 	/* Process HDCP: */
 	if (hdmi->hdcp_ctrl)
-		hdmi_hdcp_irq(hdmi->hdcp_ctrl);
+		hdmi_hdcp_ctrl_irq(hdmi->hdcp_ctrl);
 
 	/* TODO audio.. */
 
@@ -75,7 +75,8 @@
 		flush_workqueue(hdmi->workq);
 		destroy_workqueue(hdmi->workq);
 	}
-	hdmi_hdcp_destroy(hdmi);
+
+	hdmi_hdcp_ctrl_destroy(hdmi);
 	if (phy)
 		phy->funcs->destroy(phy);
 
@@ -94,7 +95,7 @@
 	struct hdmi_platform_config *config = pdev->dev.platform_data;
 	struct hdmi *hdmi = NULL;
 	struct resource *res;
-	int i, ret;
+	int i, ret = 0;
 
 	hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
 	if (!hdmi) {
@@ -118,9 +119,19 @@
 		}
 	}
 
+	res = platform_get_resource_byname(pdev,
+			IORESOURCE_MEM, config->mmio_name);
+	if (!res) {
+		dev_err(&pdev->dev, "failed to find ctrl resource\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+	hdmi->mmio_len = (u32)resource_size(res);
 	hdmi->mmio = msm_ioremap(pdev, config->mmio_name, "HDMI");
 	if (IS_ERR(hdmi->mmio)) {
 		ret = PTR_ERR(hdmi->mmio);
+		dev_info(&pdev->dev, "can't map hdmi resource\n");
+		hdmi->mmio = NULL;
 		goto fail;
 	}
 
@@ -129,13 +140,39 @@
 		config->mmio_name);
 	hdmi->mmio_phy_addr = res->start;
 
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+	config->qfprom_mmio_name);
+
+	if (!res) {
+		dev_err(&pdev->dev, "failed to find qfprom resource\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+	hdmi->qfprom_mmio_len = (u32)resource_size(res);
+
 	hdmi->qfprom_mmio = msm_ioremap(pdev,
 		config->qfprom_mmio_name, "HDMI_QFPROM");
+
 	if (IS_ERR(hdmi->qfprom_mmio)) {
-		dev_info(&pdev->dev, "can't find qfprom resource\n");
+		dev_info(&pdev->dev, "can't map qfprom resource\n");
 		hdmi->qfprom_mmio = NULL;
 	}
 
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+			config->hdcp_mmio_name);
+	if (!res) {
+		dev_err(&pdev->dev, "failed to find hdcp resource: %d\n", ret);
+		ret = -ENOMEM;
+		goto fail;
+	}
+	hdmi->hdcp_mmio_len = (u32)resource_size(res);
+	hdmi->hdcp_mmio = msm_ioremap(pdev,
+		config->hdcp_mmio_name, "HDMI_HDCP");
+	if (IS_ERR(hdmi->hdcp_mmio)) {
+		dev_info(&pdev->dev, "can't map hdcp resource\n");
+		hdmi->hdcp_mmio = NULL;
+	}
+
 	hdmi->hpd_regs = devm_kzalloc(&pdev->dev, sizeof(hdmi->hpd_regs[0]) *
 			config->hpd_reg_cnt, GFP_KERNEL);
 	if (!hdmi->hpd_regs) {
@@ -228,11 +265,16 @@
 		goto fail;
 	}
 
-	hdmi->hdcp_ctrl = hdmi_hdcp_init(hdmi);
+	hdmi->hdcp_ctrl = hdmi_hdcp_ctrl_init(hdmi);
 	if (IS_ERR(hdmi->hdcp_ctrl)) {
 		dev_warn(&pdev->dev, "failed to init hdcp: disabled\n");
 		hdmi->hdcp_ctrl = NULL;
 	}
+	/*making it false currently to avoid ifdefs
+	 *will get rid of this flag when HDCP SW
+	 *support gets added to HDMI DRM driver
+	 */
+	hdmi->is_hdcp_supported = false;
 
 	return hdmi;
 
@@ -382,13 +424,40 @@
 static struct hdmi_platform_config hdmi_tx_8996_config = {
 		.phy_init = NULL,
 		HDMI_CFG(pwr_reg, none),
-		HDMI_CFG(hpd_reg, none),
+		HDMI_CFG(hpd_reg, 8x74),
 		HDMI_CFG(pwr_clk, 8x74),
 		HDMI_CFG(hpd_clk, 8x74),
 		.hpd_freq      = hpd_clk_freq_8x74,
 };
 
+/*TO DO*/
+static const char *pwr_reg_names_8x98[] = {"core-vdda", "core-vcc"};
+/*TO DO*/
+static const char *hpd_reg_names_8x98[] = {"hpd-gdsc", "hpd-5v"};
+
+static const char *pwr_clk_names_8x98[] = {"core_extp_clk",
+				   "hpd_alt_iface_clk"};
+
+static const char *hpd_clk_names_8x98[] = {"hpd_iface_clk",
+				   "hpd_core_clk",
+				   "hpd_mdp_core_clk",
+				   "mnoc_clk",
+				   "hpd_misc_ahb_clk",
+				   "hpd_bus_clk"};
+
+static unsigned long hpd_clk_freq_8x98[] = {0, 19200000, 0, 0, 0, 0};
+
+static struct hdmi_platform_config hdmi_tx_8998_config = {
+		.phy_init = NULL,
+		HDMI_CFG(pwr_reg, 8x98),
+		HDMI_CFG(hpd_reg, 8x98),
+		HDMI_CFG(pwr_clk, 8x98),
+		HDMI_CFG(hpd_clk, 8x98),
+		.hpd_freq      = hpd_clk_freq_8x98,
+};
+
 static const struct of_device_id dt_match[] = {
+	{ .compatible = "qcom,hdmi-tx-8998", .data = &hdmi_tx_8998_config },
 	{ .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8996_config },
 	{ .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8994_config },
 	{ .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config },
@@ -424,7 +493,6 @@
 #ifdef CONFIG_OF
 	struct device_node *of_node = dev->of_node;
 	const struct of_device_id *match;
-
 	match = of_match_node(dt_match, of_node);
 	if (match && match->data) {
 		hdmi_cfg = (struct hdmi_platform_config *)match->data;
@@ -436,18 +504,20 @@
 
 	hdmi_cfg->mmio_name     = "core_physical";
 	hdmi_cfg->qfprom_mmio_name = "qfprom_physical";
+	hdmi_cfg->hdcp_mmio_name = "hdcp_physical";
 	hdmi_cfg->ddc_clk_gpio  = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk");
 	hdmi_cfg->ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data");
 	hdmi_cfg->hpd_gpio      = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd");
 	hdmi_cfg->mux_en_gpio   = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en");
 	hdmi_cfg->mux_sel_gpio  = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel");
 	hdmi_cfg->mux_lpm_gpio  = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm");
-
+	hdmi_cfg->hpd5v_gpio    = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd5v");
 #else
 	static struct hdmi_platform_config config = {};
 	static const char *hpd_clk_names[] = {
 			"core_clk", "master_iface_clk", "slave_iface_clk",
 	};
+
 	if (cpu_is_apq8064()) {
 		static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
 		config.phy_init      = hdmi_phy_8960_init;
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/hdmi/hdmi.h linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi/hdmi.h
--- linux-4.4.115/drivers/gpu/drm/msm/hdmi/hdmi.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi/hdmi.h	2019-01-22 16:16:23.499246370 +0100
@@ -27,6 +27,11 @@
 #include "msm_drv.h"
 #include "hdmi.xml.h"
 
+#define HDMI_SEC_TO_MS 1000
+#define HDMI_MS_TO_US 1000
+#define HDMI_SEC_TO_US (HDMI_SEC_TO_MS * HDMI_MS_TO_US)
+#define HDMI_KHZ_TO_HZ 1000
+#define HDMI_BUSY_WAIT_DELAY_US 100
 
 struct hdmi_phy;
 struct hdmi_platform_config;
@@ -45,15 +50,23 @@
 
 	const struct hdmi_platform_config *config;
 
+	/* hpd state: */
+	bool hpd_off;
+
 	/* audio state: */
 	struct hdmi_audio audio;
 
 	/* video state: */
 	bool power_on;
 	unsigned long int pixclock;
+	unsigned long int actual_pixclock;
 
 	void __iomem *mmio;
 	void __iomem *qfprom_mmio;
+	void __iomem *hdcp_mmio;
+	u32 mmio_len;
+	u32 qfprom_mmio_len;
+	u32 hdcp_mmio_len;
 	phys_addr_t mmio_phy_addr;
 
 	struct regulator **hpd_regs;
@@ -70,12 +83,16 @@
 	struct drm_encoder *encoder;
 
 	bool hdmi_mode;               /* are we in hdmi mode? */
-
+	bool is_hdcp_supported;
 	int irq;
+	void (*ddc_sw_done_cb)(void *data);
+	void *sw_done_cb_data;
 	struct workqueue_struct *workq;
 
 	struct hdmi_hdcp_ctrl *hdcp_ctrl;
-
+	bool use_hard_timeout;
+	int busy_wait_us;
+	u32 timeout_count;
 	/*
 	* spinlock to protect registers shared by different execution
 	* REG_HDMI_CTRL
@@ -91,7 +108,7 @@
 	struct hdmi_phy *(*phy_init)(struct hdmi *hdmi);
 	const char *mmio_name;
 	const char *qfprom_mmio_name;
-
+	const char *hdcp_mmio_name;
 	/* regulators that need to be on for hpd: */
 	const char **hpd_reg_names;
 	int hpd_reg_cnt;
@@ -110,12 +127,26 @@
 	int pwr_clk_cnt;
 
 	/* gpio's: */
-	int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio;
+	int ddc_clk_gpio, ddc_data_gpio;
+	int hpd_gpio, mux_en_gpio;
+	int mux_sel_gpio, hpd5v_gpio;
 	int mux_lpm_gpio;
 };
 
+struct hdmi_i2c_adapter {
+	struct i2c_adapter base;
+	struct hdmi *hdmi;
+	bool sw_done;
+	wait_queue_head_t ddc_event;
+};
+
 void hdmi_set_mode(struct hdmi *hdmi, bool power_on);
 
+#define to_hdmi_i2c_adapter(x) container_of(x, struct hdmi_i2c_adapter, base)
+
+int ddc_clear_irq(struct hdmi *hdmi);
+void init_ddc(struct hdmi *hdmi);
+
 static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data)
 {
 	msm_writel(data, hdmi->mmio + reg);
@@ -185,12 +216,19 @@
 struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi);
 
 /*
+ * DDC utility functions
+ */
+int hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset,
+				  u8 *data, u16 data_len, bool self_retry);
+int hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset,
+				   u8 *data, u16 data_len, bool self_retry);
+/*
  * hdcp
  */
-struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi);
-void hdmi_hdcp_destroy(struct hdmi *hdmi);
-void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl);
-void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl);
-void hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+struct hdmi_hdcp_ctrl *hdmi_hdcp_ctrl_init(struct hdmi *hdmi);
+void hdmi_hdcp_ctrl_destroy(struct hdmi *hdmi);
+void hdmi_hdcp_ctrl_on(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+void hdmi_hdcp_ctrl_off(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+void hdmi_hdcp_ctrl_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl);
 
 #endif /* __HDMI_CONNECTOR_H__ */
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
--- linux-4.4.115/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c	2019-01-22 16:16:23.499246370 +0100
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
 #include "hdmi.h"
 #include <linux/qcom_scm.h>
 
+#ifdef CONFIG_DRM_MSM_HDCP
 #define HDCP_REG_ENABLE 0x01
 #define HDCP_REG_DISABLE 0x00
 #define HDCP_PORT_ADDR 0x74
@@ -84,84 +85,6 @@
 	bool max_dev_exceeded;
 };
 
-static int hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset,
-	u8 *data, u16 data_len)
-{
-	int rc;
-	int retry = 5;
-	struct i2c_msg msgs[] = {
-		{
-			.addr	= addr >> 1,
-			.flags	= 0,
-			.len	= 1,
-			.buf	= &offset,
-		}, {
-			.addr	= addr >> 1,
-			.flags	= I2C_M_RD,
-			.len	= data_len,
-			.buf	= data,
-		}
-	};
-
-	DBG("Start DDC read");
-retry:
-	rc = i2c_transfer(hdmi->i2c, msgs, 2);
-
-	retry--;
-	if (rc == 2)
-		rc = 0;
-	else if (retry > 0)
-		goto retry;
-	else
-		rc = -EIO;
-
-	DBG("End DDC read %d", rc);
-
-	return rc;
-}
-
-#define HDCP_DDC_WRITE_MAX_BYTE_NUM 32
-
-static int hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset,
-	u8 *data, u16 data_len)
-{
-	int rc;
-	int retry = 10;
-	u8 buf[HDCP_DDC_WRITE_MAX_BYTE_NUM];
-	struct i2c_msg msgs[] = {
-		{
-			.addr	= addr >> 1,
-			.flags	= 0,
-			.len	= 1,
-		}
-	};
-
-	DBG("Start DDC write");
-	if (data_len > (HDCP_DDC_WRITE_MAX_BYTE_NUM - 1)) {
-		pr_err("%s: write size too big\n", __func__);
-		return -ERANGE;
-	}
-
-	buf[0] = offset;
-	memcpy(&buf[1], data, data_len);
-	msgs[0].buf = buf;
-	msgs[0].len = data_len + 1;
-retry:
-	rc = i2c_transfer(hdmi->i2c, msgs, 1);
-
-	retry--;
-	if (rc == 1)
-		rc = 0;
-	else if (retry > 0)
-		goto retry;
-	else
-		rc = -EIO;
-
-	DBG("End DDC write %d", rc);
-
-	return rc;
-}
-
 static int hdmi_hdcp_scm_wr(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 *preg,
 	u32 *pdata, u32 count)
 {
@@ -202,7 +125,7 @@
 	return ret;
 }
 
-void hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+void hdmi_hdcp_ctrl_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl)
 {
 	struct hdmi *hdmi = hdcp_ctrl->hdmi;
 	u32 reg_val, hdcp_int_status;
@@ -1310,7 +1233,7 @@
 	}
 }
 
-void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+void hdmi_hdcp_ctrl_on(struct hdmi_hdcp_ctrl *hdcp_ctrl)
 {
 	struct hdmi *hdmi = hdcp_ctrl->hdmi;
 	u32 reg_val;
@@ -1335,7 +1258,7 @@
 	queue_work(hdmi->workq, &hdcp_ctrl->hdcp_auth_work);
 }
 
-void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+void hdmi_hdcp_ctrl_off(struct hdmi_hdcp_ctrl *hdcp_ctrl)
 {
 	struct hdmi *hdmi = hdcp_ctrl->hdmi;
 	unsigned long flags;
@@ -1399,7 +1322,7 @@
 	DBG("HDCP: Off");
 }
 
-struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi)
+struct hdmi_hdcp_ctrl *hdmi_hdcp_ctrl_init(struct hdmi *hdmi)
 {
 	struct hdmi_hdcp_ctrl *hdcp_ctrl = NULL;
 
@@ -1428,10 +1351,33 @@
 	return hdcp_ctrl;
 }
 
-void hdmi_hdcp_destroy(struct hdmi *hdmi)
+void hdmi_hdcp_ctrl_destroy(struct hdmi *hdmi)
 {
 	if (hdmi && hdmi->hdcp_ctrl) {
 		kfree(hdmi->hdcp_ctrl);
 		hdmi->hdcp_ctrl = NULL;
 	}
 }
+
+#else
+struct hdmi_hdcp_ctrl *hdmi_hdcp_ctrl_init(struct hdmi *hdmi)
+{
+	return NULL;
+}
+
+void hdmi_hdcp_ctrl_destroy(struct hdmi *hdmi)
+{
+}
+
+void hdmi_hdcp_ctrl_on(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+}
+
+void hdmi_hdcp_ctrl_off(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+}
+
+void hdmi_hdcp_ctrl_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+}
+#endif
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
--- linux-4.4.115/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c	2019-01-22 16:16:23.499246370 +0100
@@ -17,66 +17,16 @@
 
 #include "hdmi.h"
 
-struct hdmi_i2c_adapter {
-	struct i2c_adapter base;
-	struct hdmi *hdmi;
-	bool sw_done;
-	wait_queue_head_t ddc_event;
-};
-#define to_hdmi_i2c_adapter(x) container_of(x, struct hdmi_i2c_adapter, base)
-
-static void init_ddc(struct hdmi_i2c_adapter *hdmi_i2c)
-{
-	struct hdmi *hdmi = hdmi_i2c->hdmi;
-
-	hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
-			HDMI_DDC_CTRL_SW_STATUS_RESET);
-	hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
-			HDMI_DDC_CTRL_SOFT_RESET);
-
-	hdmi_write(hdmi, REG_HDMI_DDC_SPEED,
-			HDMI_DDC_SPEED_THRESHOLD(2) |
-			HDMI_DDC_SPEED_PRESCALE(10));
-
-	hdmi_write(hdmi, REG_HDMI_DDC_SETUP,
-			HDMI_DDC_SETUP_TIMEOUT(0xff));
+#define MAX_TRANSACTIONS 4
 
-	/* enable reference timer for 27us */
-	hdmi_write(hdmi, REG_HDMI_DDC_REF,
-			HDMI_DDC_REF_REFTIMER_ENABLE |
-			HDMI_DDC_REF_REFTIMER(27));
-}
+#define SDE_DDC_TXN_CNT_MASK 0x07ff0000
+#define SDE_DDC_TXN_CNT_SHIFT 16
 
-static int ddc_clear_irq(struct hdmi_i2c_adapter *hdmi_i2c)
+static inline uint32_t SDE_HDMI_I2C_TRANSACTION_REG_CNT(uint32_t val)
 {
-	struct hdmi *hdmi = hdmi_i2c->hdmi;
-	struct drm_device *dev = hdmi->dev;
-	uint32_t retry = 0xffff;
-	uint32_t ddc_int_ctrl;
-
-	do {
-		--retry;
-
-		hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL,
-				HDMI_DDC_INT_CTRL_SW_DONE_ACK |
-				HDMI_DDC_INT_CTRL_SW_DONE_MASK);
-
-		ddc_int_ctrl = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL);
-
-	} while ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT) && retry);
-
-	if (!retry) {
-		dev_err(dev->dev, "timeout waiting for DDC\n");
-		return -ETIMEDOUT;
-	}
-
-	hdmi_i2c->sw_done = false;
-
-	return 0;
+	return ((val) << SDE_DDC_TXN_CNT_SHIFT) & SDE_DDC_TXN_CNT_MASK;
 }
 
-#define MAX_TRANSACTIONS 4
-
 static bool sw_done(struct hdmi_i2c_adapter *hdmi_i2c)
 {
 	struct hdmi *hdmi = hdmi_i2c->hdmi;
@@ -115,12 +65,13 @@
 
 	WARN_ON(!(hdmi_read(hdmi, REG_HDMI_CTRL) & HDMI_CTRL_ENABLE));
 
+
 	if (num == 0)
 		return num;
 
-	init_ddc(hdmi_i2c);
+	init_ddc(hdmi);
 
-	ret = ddc_clear_irq(hdmi_i2c);
+	ret = ddc_clear_irq(hdmi);
 	if (ret)
 		return ret;
 
@@ -155,7 +106,7 @@
 			}
 		}
 
-		i2c_trans = HDMI_I2C_TRANSACTION_REG_CNT(p->len) |
+		i2c_trans = SDE_HDMI_I2C_TRANSACTION_REG_CNT(p->len) |
 				HDMI_I2C_TRANSACTION_REG_RW(
 						(p->flags & I2C_M_RD) ? DDC_READ : DDC_WRITE) |
 				HDMI_I2C_TRANSACTION_REG_START;
@@ -180,6 +131,10 @@
 				hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS),
 				hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS),
 				hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL));
+		if (hdmi->use_hard_timeout) {
+			hdmi->use_hard_timeout = false;
+			hdmi->timeout_count = 0;
+		}
 		return ret;
 	}
 
@@ -213,6 +168,10 @@
 		}
 	}
 
+	if (hdmi->use_hard_timeout) {
+		hdmi->use_hard_timeout = false;
+		hdmi->timeout_count = jiffies_to_msecs(ret);
+	}
 	return i;
 }
 
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/hdmi/hdmi.xml.h linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
--- linux-4.4.115/drivers/gpu/drm/msm/hdmi/hdmi.xml.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi/hdmi.xml.h	2019-01-22 16:16:23.499246370 +0100
@@ -9,7 +9,7 @@
 
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2016-02-10 17:07:21)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2015-09-18 12:07:28)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2015-10-22 16:35:02)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  41472 bytes, from 2016-01-22 18:18:18)
 - /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
-Copyright (C) 2013-2015 by the following authors:
+Copyright (C) 2013-2016 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
 Permission is hereby granted, free of charge, to any person obtaining
 a copy of this software and associated documentation files (the
@@ -110,6 +111,8 @@
 #define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE			0x00000040
 #define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE			0x00000080
 
+#define REG_HDMI_INFOFRAME_CTRL1				0x00000030
+
 #define REG_HDMI_GEN_PKT_CTRL					0x00000034
 #define HDMI_GEN_PKT_CTRL_GENERIC0_SEND				0x00000001
 #define HDMI_GEN_PKT_CTRL_GENERIC0_CONT				0x00000002
@@ -149,6 +152,7 @@
 
 #define REG_HDMI_GENERIC1_HDR					0x000000a4
 
+#define MAX_REG_HDMI_GENERIC1_INDEX				6
 static inline uint32_t REG_HDMI_GENERIC1(uint32_t i0) { return 0x000000a8 + 0x4*i0; }
 
 static inline uint32_t REG_HDMI_ACR(enum hdmi_acr_cts i0) { return 0x000000c4 + 0x8*i0; }
@@ -462,13 +466,13 @@
 #define REG_HDMI_CEC_RD_FILTER					0x000002b0
 
 #define REG_HDMI_ACTIVE_HSYNC					0x000002b4
-#define HDMI_ACTIVE_HSYNC_START__MASK				0x00000fff
+#define HDMI_ACTIVE_HSYNC_START__MASK				0x00001fff
 #define HDMI_ACTIVE_HSYNC_START__SHIFT				0
 static inline uint32_t HDMI_ACTIVE_HSYNC_START(uint32_t val)
 {
 	return ((val) << HDMI_ACTIVE_HSYNC_START__SHIFT) & HDMI_ACTIVE_HSYNC_START__MASK;
 }
-#define HDMI_ACTIVE_HSYNC_END__MASK				0x0fff0000
+#define HDMI_ACTIVE_HSYNC_END__MASK				0x1fff0000
 #define HDMI_ACTIVE_HSYNC_END__SHIFT				16
 static inline uint32_t HDMI_ACTIVE_HSYNC_END(uint32_t val)
 {
@@ -476,13 +480,13 @@
 }
 
 #define REG_HDMI_ACTIVE_VSYNC					0x000002b8
-#define HDMI_ACTIVE_VSYNC_START__MASK				0x00000fff
+#define HDMI_ACTIVE_VSYNC_START__MASK				0x00001fff
 #define HDMI_ACTIVE_VSYNC_START__SHIFT				0
 static inline uint32_t HDMI_ACTIVE_VSYNC_START(uint32_t val)
 {
 	return ((val) << HDMI_ACTIVE_VSYNC_START__SHIFT) & HDMI_ACTIVE_VSYNC_START__MASK;
 }
-#define HDMI_ACTIVE_VSYNC_END__MASK				0x0fff0000
+#define HDMI_ACTIVE_VSYNC_END__MASK				0x1fff0000
 #define HDMI_ACTIVE_VSYNC_END__SHIFT				16
 static inline uint32_t HDMI_ACTIVE_VSYNC_END(uint32_t val)
 {
@@ -490,13 +494,13 @@
 }
 
 #define REG_HDMI_VSYNC_ACTIVE_F2				0x000002bc
-#define HDMI_VSYNC_ACTIVE_F2_START__MASK			0x00000fff
+#define HDMI_VSYNC_ACTIVE_F2_START__MASK			0x00001fff
 #define HDMI_VSYNC_ACTIVE_F2_START__SHIFT			0
 static inline uint32_t HDMI_VSYNC_ACTIVE_F2_START(uint32_t val)
 {
 	return ((val) << HDMI_VSYNC_ACTIVE_F2_START__SHIFT) & HDMI_VSYNC_ACTIVE_F2_START__MASK;
 }
-#define HDMI_VSYNC_ACTIVE_F2_END__MASK				0x0fff0000
+#define HDMI_VSYNC_ACTIVE_F2_END__MASK				0x1fff0000
 #define HDMI_VSYNC_ACTIVE_F2_END__SHIFT				16
 static inline uint32_t HDMI_VSYNC_ACTIVE_F2_END(uint32_t val)
 {
@@ -504,13 +508,13 @@
 }
 
 #define REG_HDMI_TOTAL						0x000002c0
-#define HDMI_TOTAL_H_TOTAL__MASK				0x00000fff
+#define HDMI_TOTAL_H_TOTAL__MASK				0x00001fff
 #define HDMI_TOTAL_H_TOTAL__SHIFT				0
 static inline uint32_t HDMI_TOTAL_H_TOTAL(uint32_t val)
 {
 	return ((val) << HDMI_TOTAL_H_TOTAL__SHIFT) & HDMI_TOTAL_H_TOTAL__MASK;
 }
-#define HDMI_TOTAL_V_TOTAL__MASK				0x0fff0000
+#define HDMI_TOTAL_V_TOTAL__MASK				0x1fff0000
 #define HDMI_TOTAL_V_TOTAL__SHIFT				16
 static inline uint32_t HDMI_TOTAL_V_TOTAL(uint32_t val)
 {
@@ -518,7 +522,7 @@
 }
 
 #define REG_HDMI_VSYNC_TOTAL_F2					0x000002c4
-#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK			0x00000fff
+#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK			0x00001fff
 #define HDMI_VSYNC_TOTAL_F2_V_TOTAL__SHIFT			0
 static inline uint32_t HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val)
 {
@@ -559,6 +563,20 @@
 
 #define REG_HDMI_CEC_WR_CHECK_CONFIG				0x00000370
 
+#define REG_HDMI_DDC_INT_CTRL0                                 0x00000430
+#define REG_HDMI_DDC_INT_CTRL1                                 0x00000434
+#define REG_HDMI_DDC_INT_CTRL2                                 0x00000438
+#define REG_HDMI_DDC_INT_CTRL3                                 0x0000043C
+#define REG_HDMI_DDC_INT_CTRL4                                 0x00000440
+#define REG_HDMI_DDC_INT_CTRL5                                 0x00000444
+#define REG_HDMI_SCRAMBLER_STATUS_DDC_CTRL                     0x00000464
+#define REG_HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL               0x00000468
+#define REG_HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL2              0x0000046C
+#define REG_HDMI_SCRAMBLER_STATUS_DDC_STATUS                   0x00000470
+#define REG_HDMI_SCRAMBLER_STATUS_DDC_TIMER_STATUS             0x00000474
+#define REG_HDMI_SCRAMBLER_STATUS_DDC_TIMER_STATUS2            0x00000478
+#define REG_HDMI_HW_DDC_CTRL                                   0x000004CC
+
 #define REG_HDMI_8x60_PHY_REG0					0x00000300
 #define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK			0x0000001c
 #define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT		2
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/Kconfig linux-4.4.115-fbx/drivers/gpu/drm/msm/Kconfig
--- linux-4.4.115/drivers/gpu/drm/msm/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/Kconfig	2019-10-29 09:26:23.621202963 +0100
@@ -3,13 +3,17 @@
 	tristate "MSM DRM"
 	depends on DRM
 	depends on ARCH_QCOM || (ARM && COMPILE_TEST)
-	depends on OF && COMMON_CLK
+	depends on OF
 	select REGULATOR
 	select DRM_KMS_HELPER
 	select DRM_PANEL
 	select SHMEM
 	select TMPFS
 	select QCOM_SCM
+	select BACKLIGHT_CLASS_DEVICE
+	select MSM_EXT_DISPLAY
+	select MMU_NOTIFIER
+	select INTERVAL_TREE
 	default y
 	help
 	  DRM/KMS driver for MSM/snapdragon.
@@ -33,6 +37,18 @@
 	  Choose this option if you have a need for MIPI DSI connector
 	  support.
 
+config DRM_MSM_DSI_STAGING
+	bool "Enable new DSI driver support in MSM DRM driver"
+	depends on DRM_MSM
+	select DRM_PANEL
+	select DRM_MIPI_DSI
+	default y
+	help
+	  Choose this option if you need MIPI DSI connector support on MSM
+	  which conforms to DRM. MIPI stands for Mobile Industry Processor
+	  Interface and DSI stands for Display Serial Interface which powers
+	  the primary display of your mobile device.
+
 config DRM_MSM_DSI_PLL
 	bool "Enable DSI PLL driver in MSM DRM"
 	depends on DRM_MSM_DSI && COMMON_CLK
@@ -54,3 +70,41 @@
 	default y
 	help
 	  Choose this option if the 20nm DSI PHY is used on the platform.
+
+config DRM_MSM_MDP4
+	tristate "MSM MDP4 DRM driver"
+	depends on DRM_MSM
+	default n
+	help
+	  Choose this option if MSM MDP4 revision support is needed in DRM/KMS.
+
+config DRM_MSM_HDCP
+	tristate "HDCP for MSM DRM"
+	depends on DRM_MSM
+	default n
+	help
+	  Chose this option if HDCP supported is needed in DRM/KMS driver.
+
+config DRM_SDE_WB
+	bool "Enable Writeback support in SDE DRM"
+	depends on DRM_MSM
+	default y
+	help
+	  Choose this option for writeback connector support.
+
+config DRM_SDE_HDMI
+	bool "Enable HDMI driver support in DRM SDE driver"
+	depends on DRM_MSM
+	default y
+	help
+	  Choose this option if HDMI connector support is needed in SDE driver.
+
+config DRM_SDE_EVTLOG_DEBUG
+	bool "Enable event logging in MSM DRM"
+	depends on DRM_MSM
+	help
+          The SDE DRM debugging provides support to enable display debugging
+          features to: dump SDE registers during driver errors, panic
+          driver during fatal errors and enable some display-driver logging
+          into an internal buffer (this avoids logging overhead).
+
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/Makefile linux-4.4.115-fbx/drivers/gpu/drm/msm/Makefile
--- linux-4.4.115/drivers/gpu/drm/msm/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/Makefile	2019-01-22 16:16:23.479246189 +0100
@@ -1,17 +1,19 @@
-ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm
+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm -Idrivers/gpu/drm/msm/dsi-staging
+ccflags-y += -Idrivers/gpu/drm/msm/hdmi
+ccflags-y += -Idrivers/gpu/drm/msm/hdmi-staging
 ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
+ccflags-$(CONFIG_SYNC) += -Idrivers/staging/android
+ccflags-$(CONFIG_DRM_MSM_DSI_PLL) += -Idrivers/gpu/drm/msm/dsi
+ccflags-y += -Idrivers/gpu/drm/msm/sde
 
-msm-y := \
-	adreno/adreno_device.o \
-	adreno/adreno_gpu.o \
-	adreno/a3xx_gpu.o \
-	adreno/a4xx_gpu.o \
+msm_drm-y := \
 	hdmi/hdmi.o \
 	hdmi/hdmi_audio.o \
 	hdmi/hdmi_bridge.o \
 	hdmi/hdmi_connector.o \
 	hdmi/hdmi_hdcp.o \
 	hdmi/hdmi_i2c.o \
+	hdmi/hdmi_util.o \
 	hdmi/hdmi_phy_8960.o \
 	hdmi/hdmi_phy_8x60.o \
 	hdmi/hdmi_phy_8x74.o \
@@ -23,13 +25,6 @@
 	edp/edp_phy.o \
 	mdp/mdp_format.o \
 	mdp/mdp_kms.o \
-	mdp/mdp4/mdp4_crtc.o \
-	mdp/mdp4/mdp4_dtv_encoder.o \
-	mdp/mdp4/mdp4_lcdc_encoder.o \
-	mdp/mdp4/mdp4_lvds_connector.o \
-	mdp/mdp4/mdp4_irq.o \
-	mdp/mdp4/mdp4_kms.o \
-	mdp/mdp4/mdp4_plane.o \
 	mdp/mdp5/mdp5_cfg.o \
 	mdp/mdp5/mdp5_ctl.o \
 	mdp/mdp5/mdp5_crtc.o \
@@ -38,34 +33,125 @@
 	mdp/mdp5/mdp5_kms.o \
 	mdp/mdp5/mdp5_plane.o \
 	mdp/mdp5/mdp5_smp.o \
+	sde/sde_crtc.o \
+	sde/sde_encoder.o \
+	sde/sde_encoder_phys_vid.o \
+	sde/sde_encoder_phys_cmd.o \
+	sde/sde_irq.o \
+	sde/sde_core_irq.o \
+	sde/sde_core_perf.o \
+	sde/sde_rm.o \
+	sde/sde_kms_utils.o \
+	sde/sde_kms.o \
+	sde/sde_plane.o \
+	sde/sde_connector.o \
+	sde/sde_backlight.o \
+	sde/sde_color_processing.o \
+	sde/sde_vbif.o \
+	sde/sde_splash.o \
+	sde_dbg.o \
+	sde_dbg_evtlog.o \
+	sde_io_util.o \
+	dba_bridge.o \
+	sde_edid_parser.o \
+	sde_hdcp_1x.o
+
+# use drm gpu driver only if qcom_kgsl driver not available
+ifneq ($(CONFIG_QCOM_KGSL),y)
+msm_drm-y += adreno/adreno_device.o \
+	adreno/adreno_gpu.o \
+	adreno/a3xx_gpu.o \
+	adreno/a4xx_gpu.o \
+	adreno/a5xx_gpu.o \
+	adreno/a5xx_power.o \
+	adreno/a5xx_preempt.o \
+	adreno/a5xx_snapshot.o \
+	adreno/a5xx_counters.o
+endif
+
+msm_drm-$(CONFIG_DRM_MSM_MDP4) += mdp/mdp4/mdp4_crtc.o \
+	mdp/mdp4/mdp4_dtv_encoder.o \
+	mdp/mdp4/mdp4_lcdc_encoder.o \
+	mdp/mdp4/mdp4_lvds_connector.o \
+	mdp/mdp4/mdp4_irq.o \
+	mdp/mdp4/mdp4_kms.o \
+	mdp/mdp4/mdp4_plane.o
+
+msm_drm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
+msm_drm-$(CONFIG_SYNC) += sde/sde_fence.o
+msm_drm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
+
+msm_drm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
+			dsi/dsi_cfg.o \
+			dsi/dsi_host.o \
+			dsi/dsi_manager.o \
+			dsi/phy/dsi_phy.o \
+			dsi/dsi_manager.o \
+			mdp/mdp5/mdp5_cmd_encoder.o
+
+msm_drm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
+msm_drm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
+
+msm_drm-$(CONFIG_DRM_MSM_DSI_STAGING) += dsi-staging/dsi_phy.o \
+				dsi-staging/dsi_clk_pwr.o \
+				dsi-staging/dsi_phy.o \
+				dsi-staging/dsi_phy_hw_v4_0.o \
+				dsi-staging/dsi_ctrl_hw_1_4.o \
+				dsi-staging/dsi_ctrl.o \
+				dsi-staging/dsi_catalog.o \
+				dsi-staging/dsi_drm.o \
+				dsi-staging/dsi_display.o \
+				dsi-staging/dsi_panel.o \
+				dsi-staging/dsi_display_test.o
+
+msm_drm-$(CONFIG_DRM_SDE_HDMI) += \
+	hdmi-staging/sde_hdmi_util.o \
+	hdmi-staging/sde_hdmi.o \
+	hdmi-staging/sde_hdmi_bridge.o \
+	hdmi-staging/sde_hdmi_audio.o \
+	hdmi-staging/sde_hdmi_hdcp2p2.o \
+
+msm_drm-$(CONFIG_DRM_MSM_DSI_PLL) += dsi/pll/dsi_pll.o \
+				dsi/pll/dsi_pll_28nm.o
+
+msm_drm-$(CONFIG_DRM_MSM) += \
+	sde/sde_hw_catalog.o \
+	sde/sde_hw_cdm.o \
+	sde/sde_hw_dspp.o \
+	sde/sde_hw_intf.o \
+	sde/sde_hw_lm.o \
+	sde/sde_hw_ctl.o \
+	sde/sde_hw_util.o \
+	sde/sde_hw_sspp.o \
+	sde/sde_hw_wb.o \
+	sde/sde_hw_pingpong.o \
+	sde/sde_hw_top.o \
+	sde/sde_hw_interrupts.o \
+	sde/sde_hw_vbif.o \
+	sde/sde_formats.o \
+	sde_power_handle.o \
+	sde/sde_hw_color_processing_v1_7.o
+
+msm_drm-$(CONFIG_DRM_SDE_WB) += sde/sde_wb.o \
+	sde/sde_encoder_phys_wb.o
+
+msm_drm-$(CONFIG_DRM_MSM) += \
 	msm_atomic.o \
 	msm_drv.o \
 	msm_fb.o \
 	msm_gem.o \
 	msm_gem_prime.o \
 	msm_gem_submit.o \
+	msm_gem_vma.o \
 	msm_gpu.o \
 	msm_iommu.o \
+	msm_smmu.o \
 	msm_perf.o \
 	msm_rd.o \
-	msm_ringbuffer.o
-
-msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
-msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
-
-msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
-			dsi/dsi_cfg.o \
-			dsi/dsi_host.o \
-			dsi/dsi_manager.o \
-			dsi/phy/dsi_phy.o \
-			mdp/mdp5/mdp5_cmd_encoder.o
-
-msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
-msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
-
-ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
-msm-y += dsi/pll/dsi_pll.o
-msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
-endif
+	msm_ringbuffer.o \
+	msm_prop.o \
+	msm_snapshot.o \
+	msm_submitqueue.o \
+	msm_trace_points.o
 
-obj-$(CONFIG_DRM_MSM)	+= msm.o
+obj-$(CONFIG_DRM_MSM)	+= msm_drm.o
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c linux-4.4.115-fbx/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
--- linux-4.4.115/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c	2019-01-22 16:16:23.503246407 +0100
@@ -547,7 +547,7 @@
 	if (cfg_handler)
 		mdp5_cfg_destroy(cfg_handler);
 
-	return NULL;
+	return ERR_PTR(ret);
 }
 
 static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c linux-4.4.115-fbx/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
--- linux-4.4.115/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c	2019-10-29 09:26:23.633203080 +0100
@@ -171,7 +171,7 @@
 		container_of(work, struct mdp5_crtc, unref_cursor_work);
 	struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
 
-	msm_gem_put_iova(val, mdp5_kms->id);
+	msm_gem_put_iova(val, mdp5_kms->aspace);
 	drm_gem_object_unreference_unlocked(val);
 }
 
@@ -509,7 +509,8 @@
 	struct drm_device *dev = crtc->dev;
 	struct mdp5_kms *mdp5_kms = get_kms(crtc);
 	struct drm_gem_object *cursor_bo, *old_bo = NULL;
-	uint32_t blendcfg, cursor_addr, stride;
+	uint32_t blendcfg, stride;
+	uint64_t cursor_addr;
 	int ret, bpp, lm;
 	unsigned int depth;
 	enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
@@ -536,7 +537,7 @@
 	if (!cursor_bo)
 		return -ENOENT;
 
-	ret = msm_gem_get_iova(cursor_bo, mdp5_kms->id, &cursor_addr);
+	ret = msm_gem_get_iova(cursor_bo, mdp5_kms->aspace, &cursor_addr);
 	if (ret)
 		return -EINVAL;
 
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c linux-4.4.115-fbx/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
--- linux-4.4.115/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c	2019-01-22 16:16:23.503246407 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016-2017 The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -18,13 +18,10 @@
 
 
 #include "msm_drv.h"
+#include "msm_gem.h"
 #include "msm_mmu.h"
 #include "mdp5_kms.h"
 
-static const char *iommu_ports[] = {
-		"mdp_0",
-};
-
 static int mdp5_hw_init(struct msm_kms *kms)
 {
 	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
@@ -130,13 +127,13 @@
 static void mdp5_destroy(struct msm_kms *kms)
 {
 	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
-	struct msm_mmu *mmu = mdp5_kms->mmu;
+	struct msm_gem_address_space *aspace = mdp5_kms->aspace;
 
 	mdp5_irq_domain_fini(mdp5_kms);
 
-	if (mmu) {
-		mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
-		mmu->funcs->destroy(mmu);
+	if (aspace) {
+		aspace->mmu->funcs->detach(aspace->mmu);
+		msm_gem_address_space_put(aspace);
 	}
 
 	if (mdp5_kms->ctlm)
@@ -474,7 +471,7 @@
 	struct mdp5_cfg *config;
 	struct mdp5_kms *mdp5_kms;
 	struct msm_kms *kms = NULL;
-	struct msm_mmu *mmu;
+	struct msm_gem_address_space *aspace;
 	uint32_t major, minor;
 	int i, ret;
 
@@ -595,33 +592,33 @@
 	mdelay(16);
 
 	if (config->platform.iommu) {
-		mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
+		struct msm_mmu *mmu = msm_smmu_new(&pdev->dev,
+				MSM_SMMU_DOMAIN_UNSECURE);
 		if (IS_ERR(mmu)) {
 			ret = PTR_ERR(mmu);
 			dev_err(dev->dev, "failed to init iommu: %d\n", ret);
 			iommu_domain_free(config->platform.iommu);
+		}
+
+		aspace = msm_gem_smmu_address_space_create(&pdev->dev,
+				mmu, "mdp5");
+		if (IS_ERR(aspace)) {
+			ret = PTR_ERR(aspace);
 			goto fail;
 		}
 
-		ret = mmu->funcs->attach(mmu, iommu_ports,
-				ARRAY_SIZE(iommu_ports));
+		mdp5_kms->aspace = aspace;
+
+		ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
 		if (ret) {
-			dev_err(dev->dev, "failed to attach iommu: %d\n", ret);
-			mmu->funcs->destroy(mmu);
+			dev_err(&pdev->dev, "failed to attach iommu: %d\n",
+				ret);
 			goto fail;
 		}
 	} else {
-		dev_info(dev->dev, "no iommu, fallback to phys "
-				"contig buffers for scanout\n");
-		mmu = NULL;
-	}
-	mdp5_kms->mmu = mmu;
-
-	mdp5_kms->id = msm_register_mmu(dev, mmu);
-	if (mdp5_kms->id < 0) {
-		ret = mdp5_kms->id;
-		dev_err(dev->dev, "failed to register mdp5 iommu: %d\n", ret);
-		goto fail;
+		dev_info(&pdev->dev,
+			 "no iommu, fallback to phys contig buffers for scanout\n");
+		aspace = NULL;
 	}
 
 	ret = modeset_init(mdp5_kms);
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h linux-4.4.115-fbx/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
--- linux-4.4.115/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h	2019-01-22 16:16:23.503246407 +0100
@@ -36,8 +36,7 @@
 
 
 	/* mapper-id used to request GEM buffer mapped for scanout: */
-	int id;
-	struct msm_mmu *mmu;
+	struct msm_gem_address_space *aspace;
 
 	struct mdp5_smp *smp;
 	struct mdp5_ctl_manager *ctlm;
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c linux-4.4.115-fbx/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
--- linux-4.4.115/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c	2019-10-29 09:26:23.633203080 +0100
@@ -260,7 +260,7 @@
 		return 0;
 
 	DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id);
-	return msm_framebuffer_prepare(fb, mdp5_kms->id);
+	return msm_framebuffer_prepare(fb, mdp5_kms->aspace);
 }
 
 static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
@@ -274,7 +274,7 @@
 		return;
 
 	DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id);
-	msm_framebuffer_cleanup(fb, mdp5_kms->id);
+	msm_framebuffer_cleanup(fb, mdp5_kms->aspace);
 }
 
 static int mdp5_plane_atomic_check(struct drm_plane *plane,
@@ -400,13 +400,13 @@
 			MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
 
 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
-			msm_framebuffer_iova(fb, mdp5_kms->id, 0));
+			msm_framebuffer_iova(fb, mdp5_kms->aspace, 0));
 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
-			msm_framebuffer_iova(fb, mdp5_kms->id, 1));
+			msm_framebuffer_iova(fb, mdp5_kms->aspace, 1));
 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
-			msm_framebuffer_iova(fb, mdp5_kms->id, 2));
+			msm_framebuffer_iova(fb, mdp5_kms->aspace, 2));
 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
-			msm_framebuffer_iova(fb, mdp5_kms->id, 3));
+			msm_framebuffer_iova(fb, mdp5_kms->aspace, 3));
 
 	plane->fb = fb;
 }
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h linux-4.4.115-fbx/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
--- linux-4.4.115/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h	2019-01-22 16:16:23.503246407 +0100
@@ -9,7 +9,7 @@
 
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2016-02-10 17:07:21)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2015-09-18 12:07:28)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2015-10-22 16:35:02)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  41472 bytes, from 2016-01-22 18:18:18)
 - /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
 Permission is hereby granted, free of charge, to any person obtaining
 a copy of this software and associated documentation files (the
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/mdp/mdp_common.xml.h linux-4.4.115-fbx/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
--- linux-4.4.115/drivers/gpu/drm/msm/mdp/mdp_common.xml.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/mdp/mdp_common.xml.h	2019-01-22 16:16:23.503246407 +0100
@@ -9,7 +9,7 @@
 
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2016-02-10 17:07:21)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2015-09-18 12:07:28)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2015-10-22 16:35:02)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  41472 bytes, from 2016-01-22 18:18:18)
 - /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
 Permission is hereby granted, free of charge, to any person obtaining
 a copy of this software and associated documentation files (the
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/mdp/mdp_format.c linux-4.4.115-fbx/drivers/gpu/drm/msm/mdp/mdp_format.c
--- linux-4.4.115/drivers/gpu/drm/msm/mdp/mdp_format.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/mdp/mdp_format.c	2019-01-22 16:16:23.503246407 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2016 The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -165,7 +165,11 @@
 	return i;
 }
 
-const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format)
+const struct msm_format *mdp_get_format(
+		struct msm_kms *kms,
+		uint32_t format,
+		const uint64_t *modifiers,
+		uint32_t modifiers_len)
 {
 	int i;
 	for (i = 0; i < ARRAY_SIZE(formats); i++) {
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/mdp/mdp_kms.h linux-4.4.115-fbx/drivers/gpu/drm/msm/mdp/mdp_kms.h
--- linux-4.4.115/drivers/gpu/drm/msm/mdp/mdp_kms.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/mdp/mdp_kms.h	2019-01-22 16:16:23.503246407 +0100
@@ -98,7 +98,9 @@
 #define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv)
 
 uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only);
-const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
+const struct msm_format *mdp_get_format(struct msm_kms *kms,
+		uint32_t format, const uint64_t *modifiers,
+		uint32_t modifiers_len);
 
 /* MDP capabilities */
 #define MDP_CAP_SMP		BIT(0)	/* Shared Memory Pool                 */
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/msm_atomic.c linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_atomic.c
--- linux-4.4.115/drivers/gpu/drm/msm/msm_atomic.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_atomic.c	2019-10-29 09:26:23.633203080 +0100
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2014 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -18,6 +19,7 @@
 #include "msm_drv.h"
 #include "msm_kms.h"
 #include "msm_gem.h"
+#include "sde_trace.h"
 
 struct msm_commit {
 	struct drm_device *dev;
@@ -25,10 +27,9 @@
 	uint32_t fence;
 	struct msm_fence_cb fence_cb;
 	uint32_t crtc_mask;
+	struct kthread_work commit_work;
 };
 
-static void fence_cb(struct msm_fence_cb *cb);
-
 /* block until specified crtcs are no longer pending update, and
  * atomically mark them as pending update
  */
@@ -59,75 +60,357 @@
 	spin_unlock(&priv->pending_crtcs_event.lock);
 }
 
-static struct msm_commit *commit_init(struct drm_atomic_state *state)
+static void commit_destroy(struct msm_commit *commit)
 {
-	struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
+	end_atomic(commit->dev->dev_private, commit->crtc_mask);
+	kfree(commit);
+}
 
-	if (!c)
-		return NULL;
+static void msm_atomic_wait_for_commit_done(
+		struct drm_device *dev,
+		struct drm_atomic_state *old_state,
+		int modeset_flags)
+{
+	struct drm_crtc *crtc;
+	struct msm_drm_private *priv = old_state->dev->dev_private;
+	struct msm_kms *kms = priv->kms;
+	int ncrtcs = old_state->dev->mode_config.num_crtc;
+	int i;
 
-	c->dev = state->dev;
-	c->state = state;
+	for (i = 0; i < ncrtcs; i++) {
+		int private_flags;
 
-	/* TODO we might need a way to indicate to run the cb on a
-	 * different wq so wait_for_vblanks() doesn't block retiring
-	 * bo's..
+		crtc = old_state->crtcs[i];
+
+		if (!crtc || !crtc->state || !crtc->state->enable)
+			continue;
+
+		/* If specified, only wait if requested flag is true */
+		private_flags = crtc->state->adjusted_mode.private_flags;
+		if (modeset_flags && !(modeset_flags & private_flags))
+			continue;
+
+		/* Legacy cursor ioctls are completely unsynced, and userspace
+		 * relies on that (by doing tons of cursor updates). */
+		if (old_state->legacy_cursor_update)
+			continue;
+
+		if (kms->funcs->wait_for_crtc_commit_done)
+			kms->funcs->wait_for_crtc_commit_done(kms, crtc);
+	}
+}
+
+static void
+msm_disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
+{
+	struct drm_connector *connector;
+	struct drm_connector_state *old_conn_state;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *old_crtc_state;
+	int i;
+
+	SDE_ATRACE_BEGIN("msm_disable");
+	for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+		const struct drm_encoder_helper_funcs *funcs;
+		struct drm_encoder *encoder;
+		struct drm_crtc_state *old_crtc_state;
+		unsigned int crtc_idx;
+
+		/*
+		 * Shut down everything that's in the changeset and currently
+		 * still on. So need to check the old, saved state.
+		 */
+		if (!old_conn_state->crtc)
+			continue;
+
+		crtc_idx = drm_crtc_index(old_conn_state->crtc);
+		old_crtc_state = old_state->crtc_states[crtc_idx];
+
+		if (!old_crtc_state->active ||
+		    !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
+			continue;
+
+		encoder = old_conn_state->best_encoder;
+
+		/* We shouldn't get this far if we didn't previously have
+		 * an encoder.. but WARN_ON() rather than explode.
+		 */
+		if (WARN_ON(!encoder))
+			continue;
+
+		if (msm_is_mode_seamless(
+				&connector->encoder->crtc->state->mode))
+			continue;
+
+		funcs = encoder->helper_private;
+
+		DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n",
+				 encoder->base.id, encoder->name);
+
+		/*
+		 * Each encoder has at most one connector (since we always steal
+		 * it away), so we won't call disable hooks twice.
 	 */
-	INIT_FENCE_CB(&c->fence_cb, fence_cb);
+		drm_bridge_disable(encoder->bridge);
+
+		/* Right function depends upon target state. */
+		if (connector->state->crtc && funcs->prepare)
+			funcs->prepare(encoder);
+		else if (funcs->disable)
+			funcs->disable(encoder);
+		else
+			funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
 
-	return c;
+		drm_bridge_post_disable(encoder->bridge);
 }
 
-static void commit_destroy(struct msm_commit *c)
+	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+		const struct drm_crtc_helper_funcs *funcs;
+
+		/* Shut down everything that needs a full modeset. */
+		if (!drm_atomic_crtc_needs_modeset(crtc->state))
+			continue;
+
+		if (!old_crtc_state->active)
+			continue;
+
+		if (msm_is_mode_seamless(&crtc->state->mode))
+			continue;
+
+		funcs = crtc->helper_private;
+
+		DRM_DEBUG_ATOMIC("disabling [CRTC:%d]\n",
+				 crtc->base.id);
+
+		/* Right function depends upon target state. */
+		if (crtc->state->enable && funcs->prepare)
+			funcs->prepare(crtc);
+		else if (funcs->disable)
+			funcs->disable(crtc);
+		else
+			funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+	}
+	SDE_ATRACE_END("msm_disable");
+}
+
+static void
+msm_crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *old_crtc_state;
+	struct drm_connector *connector;
+	struct drm_connector_state *old_conn_state;
+	int i;
+
+	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+		const struct drm_crtc_helper_funcs *funcs;
+
+		if (!crtc->state->mode_changed)
+			continue;
+
+		funcs = crtc->helper_private;
+
+		if (crtc->state->enable && funcs->mode_set_nofb) {
+			DRM_DEBUG_ATOMIC("modeset on [CRTC:%d]\n",
+					 crtc->base.id);
+
+			funcs->mode_set_nofb(crtc);
+		}
+	}
+
+	for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+		const struct drm_encoder_helper_funcs *funcs;
+		struct drm_crtc_state *new_crtc_state;
+		struct drm_encoder *encoder;
+		struct drm_display_mode *mode, *adjusted_mode;
+
+		if (!connector->state->best_encoder)
+			continue;
+
+		encoder = connector->state->best_encoder;
+		funcs = encoder->helper_private;
+		new_crtc_state = connector->state->crtc->state;
+		mode = &new_crtc_state->mode;
+		adjusted_mode = &new_crtc_state->adjusted_mode;
+
+		if (!new_crtc_state->mode_changed)
+			continue;
+
+		DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n",
+				 encoder->base.id, encoder->name);
+
+		/*
+		 * Each encoder has at most one connector (since we always steal
+		 * it away), so we won't call mode_set hooks twice.
+		 */
+		if (funcs->mode_set)
+			funcs->mode_set(encoder, mode, adjusted_mode);
+
+		drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode);
+	}
+}
+
+/**
+ * msm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
+ * @dev: DRM device
+ * @old_state: atomic state object with old state structures
+ *
+ * This function shuts down all the outputs that need to be shut down and
+ * prepares them (if required) with the new mode.
+ *
+ * For compatibility with legacy crtc helpers this should be called before
+ * drm_atomic_helper_commit_planes(), which is what the default commit function
+ * does. But drivers with different needs can group the modeset commits together
+ * and do the plane commits at the end. This is useful for drivers doing runtime
+ * PM since planes updates then only happen when the CRTC is actually enabled.
+ */
+static void msm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
+		struct drm_atomic_state *old_state)
 {
-	end_atomic(c->dev->dev_private, c->crtc_mask);
-	kfree(c);
+	msm_disable_outputs(dev, old_state);
+
+	drm_atomic_helper_update_legacy_modeset_state(dev, old_state);
+
+	msm_crtc_set_mode(dev, old_state);
 }
 
-static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
+/**
+ * msm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
+ * @dev: DRM device
+ * @old_state: atomic state object with old state structures
+ *
+ * This function enables all the outputs with the new configuration which had to
+ * be turned off for the update.
+ *
+ * For compatibility with legacy crtc helpers this should be called after
+ * drm_atomic_helper_commit_planes(), which is what the default commit function
+ * does. But drivers with different needs can group the modeset commits together
+ * and do the plane commits at the end. This is useful for drivers doing runtime
+ * PM since planes updates then only happen when the CRTC is actually enabled.
+ */
+static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
 		struct drm_atomic_state *old_state)
 {
 	struct drm_crtc *crtc;
-	struct msm_drm_private *priv = old_state->dev->dev_private;
+	struct drm_crtc_state *old_crtc_state;
+	struct drm_connector *connector;
+	struct drm_connector_state *old_conn_state;
+	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_kms *kms = priv->kms;
-	int ncrtcs = old_state->dev->mode_config.num_crtc;
+	int bridge_enable_count = 0;
 	int i;
 
-	for (i = 0; i < ncrtcs; i++) {
-		crtc = old_state->crtcs[i];
+	SDE_ATRACE_BEGIN("msm_enable");
+	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+		const struct drm_crtc_helper_funcs *funcs;
 
-		if (!crtc)
+		/* Need to filter out CRTCs where only planes change. */
+		if (!drm_atomic_crtc_needs_modeset(crtc->state))
 			continue;
 
-		if (!crtc->state->enable)
+		if (!crtc->state->active)
 			continue;
 
-		/* Legacy cursor ioctls are completely unsynced, and userspace
-		 * relies on that (by doing tons of cursor updates). */
-		if (old_state->legacy_cursor_update)
+		if (msm_is_mode_seamless(&crtc->state->mode))
 			continue;
 
-		kms->funcs->wait_for_crtc_commit_done(kms, crtc);
+		funcs = crtc->helper_private;
+
+		if (crtc->state->enable) {
+			DRM_DEBUG_ATOMIC("enabling [CRTC:%d]\n",
+					 crtc->base.id);
+
+			if (funcs->enable)
+				funcs->enable(crtc);
+			else
+				funcs->commit(crtc);
+		}
+	}
+
+	/* ensure bridge/encoder updates happen on same vblank */
+	msm_atomic_wait_for_commit_done(dev, old_state,
+			MSM_MODE_FLAG_VBLANK_PRE_MODESET);
+
+	for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+		const struct drm_encoder_helper_funcs *funcs;
+		struct drm_encoder *encoder;
+
+		if (!connector->state->best_encoder)
+			continue;
+
+		if (!connector->state->crtc->state->active ||
+		    !drm_atomic_crtc_needs_modeset(
+				    connector->state->crtc->state))
+			continue;
+
+		encoder = connector->state->best_encoder;
+		funcs = encoder->helper_private;
+
+		DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n",
+				 encoder->base.id, encoder->name);
+
+		/*
+		 * Each encoder has at most one connector (since we always steal
+		 * it away), so we won't call enable hooks twice.
+		 */
+		drm_bridge_pre_enable(encoder->bridge);
+		++bridge_enable_count;
+
+		if (funcs->enable)
+			funcs->enable(encoder);
+		else
+			funcs->commit(encoder);
+	}
+
+	if (kms->funcs->commit) {
+		DRM_DEBUG_ATOMIC("triggering commit\n");
+		kms->funcs->commit(kms, old_state);
+	}
+
+	/* If no bridges were pre_enabled, skip iterating over them again */
+	if (bridge_enable_count == 0) {
+		SDE_ATRACE_END("msm_enable");
+		return;
+	}
+
+	for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+		struct drm_encoder *encoder;
+
+		if (!connector->state->best_encoder)
+			continue;
+
+		if (!connector->state->crtc->state->active ||
+		    !drm_atomic_crtc_needs_modeset(
+				    connector->state->crtc->state))
+			continue;
+
+		encoder = connector->state->best_encoder;
+
+		DRM_DEBUG_ATOMIC("bridge enable enabling [ENCODER:%d:%s]\n",
+				 encoder->base.id, encoder->name);
+
+		drm_bridge_enable(encoder->bridge);
 	}
+	SDE_ATRACE_END("msm_enable");
 }
 
 /* The (potentially) asynchronous part of the commit.  At this point
  * nothing can fail short of armageddon.
  */
-static void complete_commit(struct msm_commit *c)
+static void complete_commit(struct msm_commit *commit)
 {
-	struct drm_atomic_state *state = c->state;
+	struct drm_atomic_state *state = commit->state;
 	struct drm_device *dev = state->dev;
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_kms *kms = priv->kms;
 
 	kms->funcs->prepare_commit(kms, state);
 
-	drm_atomic_helper_commit_modeset_disables(dev, state);
+	msm_atomic_helper_commit_modeset_disables(dev, state);
 
 	drm_atomic_helper_commit_planes(dev, state, false);
 
-	drm_atomic_helper_commit_modeset_enables(dev, state);
+	msm_atomic_helper_commit_modeset_enables(dev, state);
 
 	/* NOTE: _wait_for_vblanks() only waits for vblank on
 	 * enabled CRTCs.  So we end up faulting when disabling
@@ -142,7 +425,7 @@
 	 * not be critical path)
 	 */
 
-	msm_atomic_wait_for_commit_done(dev, state);
+	msm_atomic_wait_for_commit_done(dev, state, 0);
 
 	drm_atomic_helper_cleanup_planes(dev, state);
 
@@ -150,38 +433,109 @@
 
 	drm_atomic_state_free(state);
 
-	commit_destroy(c);
+	commit_destroy(commit);
 }
 
+static int msm_atomic_commit_dispatch(struct drm_device *dev,
+		struct drm_atomic_state *state, struct msm_commit *commit);
+
 static void fence_cb(struct msm_fence_cb *cb)
 {
-	struct msm_commit *c =
+	struct msm_commit *commit =
 			container_of(cb, struct msm_commit, fence_cb);
-	complete_commit(c);
+	int ret = -EINVAL;
+
+	ret = msm_atomic_commit_dispatch(commit->dev, commit->state, commit);
+	if (ret) {
+		DRM_ERROR("%s: atomic commit failed\n", __func__);
+		drm_atomic_state_free(commit->state);
+		commit_destroy(commit);
+	}
 }
 
-static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
+static void _msm_drm_commit_work_cb(struct kthread_work *work)
 {
-	struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0);
-	c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ));
+	struct msm_commit *commit =  NULL;
+
+	if (!work) {
+		DRM_ERROR("%s: Invalid commit work data!\n", __func__);
+		return;
 }
 
-int msm_atomic_check(struct drm_device *dev,
-		     struct drm_atomic_state *state)
+	commit = container_of(work, struct msm_commit, commit_work);
+
+	SDE_ATRACE_BEGIN("complete_commit");
+	complete_commit(commit);
+	SDE_ATRACE_END("complete_commit");
+}
+
+static struct msm_commit *commit_init(struct drm_atomic_state *state)
 {
-	int ret;
+	struct msm_commit *commit = kzalloc(sizeof(*commit), GFP_KERNEL);
 
-	/*
-	 * msm ->atomic_check can update ->mode_changed for pixel format
-	 * changes, hence must be run before we check the modeset changes.
+	if (!commit) {
+		DRM_ERROR("invalid commit\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	commit->dev = state->dev;
+	commit->state = state;
+
+	/* TODO we might need a way to indicate to run the cb on a
+	 * different wq so wait_for_vblanks() doesn't block retiring
+	 * bo's..
 	 */
-	ret = drm_atomic_helper_check_planes(dev, state);
-	if (ret)
-		return ret;
+	INIT_FENCE_CB(&commit->fence_cb, fence_cb);
+	init_kthread_work(&commit->commit_work, _msm_drm_commit_work_cb);
 
-	ret = drm_atomic_helper_check_modeset(dev, state);
-	if (ret)
-		return ret;
+	return commit;
+}
+
+static void commit_set_fence(struct msm_commit *commit,
+		struct drm_framebuffer *fb)
+{
+	struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0);
+	commit->fence = max(commit->fence,
+			msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ));
+}
+
+/* Start display thread function */
+static int msm_atomic_commit_dispatch(struct drm_device *dev,
+		struct drm_atomic_state *state, struct msm_commit *commit)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	struct drm_crtc *crtc = NULL;
+	struct drm_crtc_state *crtc_state = NULL;
+	int ret = -EINVAL, i = 0, j = 0;
+
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		for (j = 0; j < priv->num_crtcs; j++) {
+			if (priv->disp_thread[j].crtc_id ==
+						crtc->base.id) {
+				if (priv->disp_thread[j].thread) {
+					queue_kthread_work(
+						&priv->disp_thread[j].worker,
+							&commit->commit_work);
+					/* only return zero if work is
+					 * queued successfully.
+					 */
+					ret = 0;
+				} else {
+					DRM_ERROR(" Error for crtc_id: %d\n",
+						priv->disp_thread[j].crtc_id);
+				}
+				break;
+			}
+		}
+		/*
+		 * TODO: handle cases where there will be more than
+		 * one crtc per commit cycle. Remove this check then.
+		 * Current assumption is there will be only one crtc
+		 * per commit cycle.
+		 */
+		if (j < priv->num_crtcs)
+			break;
+	}
 
 	return ret;
 }
@@ -192,9 +546,8 @@
  * @state: the driver state object
  * @async: asynchronous commit
  *
- * This function commits a with drm_atomic_helper_check() pre-validated state
- * object. This can still fail when e.g. the framebuffer reservation fails. For
- * now this doesn't implement asynchronous commits.
+ * This function commits with drm_atomic_helper_check() pre-validated state
+ * object. This can still fail when e.g. the framebuffer reservation fails.
  *
  * RETURNS
  * Zero for success or -errno.
@@ -202,19 +555,29 @@
 int msm_atomic_commit(struct drm_device *dev,
 		struct drm_atomic_state *state, bool async)
 {
+	struct msm_drm_private *priv = dev->dev_private;
 	int nplanes = dev->mode_config.num_total_plane;
 	int ncrtcs = dev->mode_config.num_crtc;
 	ktime_t timeout;
-	struct msm_commit *c;
+	struct msm_commit *commit;
 	int i, ret;
 
+	if (!priv || priv->shutdown_in_progress) {
+		DRM_ERROR("priv is null or shutdown is in-progress\n");
+		return -EINVAL;
+	}
+
+	SDE_ATRACE_BEGIN("atomic_commit");
 	ret = drm_atomic_helper_prepare_planes(dev, state);
-	if (ret)
+	if (ret) {
+		SDE_ATRACE_END("atomic_commit");
 		return ret;
+	}
 
-	c = commit_init(state);
-	if (!c) {
-		ret = -ENOMEM;
+	commit = commit_init(state);
+	if (IS_ERR_OR_NULL(commit)) {
+		ret = PTR_ERR(commit);
+		DRM_ERROR("commit_init failed: %d\n", ret);
 		goto error;
 	}
 
@@ -225,7 +588,7 @@
 		struct drm_crtc *crtc = state->crtcs[i];
 		if (!crtc)
 			continue;
-		c->crtc_mask |= (1 << drm_crtc_index(crtc));
+		commit->crtc_mask |= (1 << drm_crtc_index(crtc));
 	}
 
 	/*
@@ -239,16 +602,17 @@
 			continue;
 
 		if ((plane->state->fb != new_state->fb) && new_state->fb)
-			add_fb(c, new_state->fb);
+			commit_set_fence(commit, new_state->fb);
 	}
 
 	/*
 	 * Wait for pending updates on any of the same crtc's and then
 	 * mark our set of crtc's as busy:
 	 */
-	ret = start_atomic(dev->dev_private, c->crtc_mask);
+	ret = start_atomic(dev->dev_private, commit->crtc_mask);
 	if (ret) {
-		kfree(c);
+		DRM_ERROR("start_atomic failed: %d\n", ret);
+		commit_destroy(commit);
 		goto error;
 	}
 
@@ -261,6 +625,16 @@
 	drm_atomic_helper_swap_state(dev, state);
 
 	/*
+	 * Provide the driver a chance to prepare for output fences. This is
+	 * done after the point of no return, but before asynchronous commits
+	 * are dispatched to work queues, so that the fence preparation is
+	 * finished before the .atomic_commit returns.
+	 */
+	if (priv && priv->kms && priv->kms->funcs &&
+			priv->kms->funcs->prepare_fence)
+		priv->kms->funcs->prepare_fence(priv->kms, state);
+
+	/*
 	 * Everything below can be run asynchronously without the need to grab
 	 * any modeset locks at all under one conditions: It must be guaranteed
 	 * that the asynchronous work has either been cancelled (if the driver
@@ -277,20 +651,23 @@
 	 */
 
 	if (async) {
-		msm_queue_fence_cb(dev, &c->fence_cb, c->fence);
+		msm_queue_fence_cb(dev, &commit->fence_cb, commit->fence);
+		SDE_ATRACE_END("atomic_commit");
 		return 0;
 	}
 
 	timeout = ktime_add_ms(ktime_get(), 1000);
 
 	/* uninterruptible wait */
-	msm_wait_fence(dev, c->fence, &timeout, false);
+	msm_wait_fence(dev, commit->fence, &timeout, false);
 
-	complete_commit(c);
+	complete_commit(commit);
 
+	SDE_ATRACE_END("atomic_commit");
 	return 0;
 
 error:
 	drm_atomic_helper_cleanup_planes(dev, state);
+	SDE_ATRACE_END("atomic_commit");
 	return ret;
 }
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/msm_drv.c linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_drv.c
--- linux-4.4.115/drivers/gpu/drm/msm/msm_drv.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_drv.c	2019-01-22 16:16:23.507246443 +0100
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -15,9 +16,15 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/of_address.h>
 #include "msm_drv.h"
 #include "msm_gpu.h"
 #include "msm_kms.h"
+#include "sde_wb.h"
+
+#define TEARDOWN_DEADLOCK_RETRY_MAX 5
+#include "msm_gem.h"
+#include "msm_mmu.h"
 
 static void msm_fb_output_poll_changed(struct drm_device *dev)
 {
@@ -29,23 +36,10 @@
 static const struct drm_mode_config_funcs mode_config_funcs = {
 	.fb_create = msm_framebuffer_create,
 	.output_poll_changed = msm_fb_output_poll_changed,
-	.atomic_check = msm_atomic_check,
+	.atomic_check = drm_atomic_helper_check,
 	.atomic_commit = msm_atomic_commit,
 };
 
-int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
-{
-	struct msm_drm_private *priv = dev->dev_private;
-	int idx = priv->num_mmus++;
-
-	if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus)))
-		return -EINVAL;
-
-	priv->mmus[idx] = mmu;
-
-	return idx;
-}
-
 #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
 static bool reglog = false;
 MODULE_PARM_DESC(reglog, "Enable register read/write logging");
@@ -94,15 +88,21 @@
 	}
 
 	if (reglog)
-		printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size);
+		dev_dbg(&pdev->dev, "IO:region %s %pK %08lx\n",
+			dbgname, ptr, size);
 
 	return ptr;
 }
 
+void msm_iounmap(struct platform_device *pdev, void __iomem *addr)
+{
+	devm_iounmap(&pdev->dev, addr);
+}
+
 void msm_writel(u32 data, void __iomem *addr)
 {
 	if (reglog)
-		printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
+		pr_debug("IO:W %pK %08x\n", addr, data);
 	writel(data, addr);
 }
 
@@ -110,7 +110,7 @@
 {
 	u32 val = readl(addr);
 	if (reglog)
-		printk(KERN_ERR "IO:R %p %08x\n", addr, val);
+		pr_err("IO:R %pK %08x\n", addr, val);
 	return val;
 }
 
@@ -120,7 +120,7 @@
 	bool enable;
 };
 
-static void vblank_ctrl_worker(struct work_struct *work)
+static void vblank_ctrl_worker(struct kthread_work *work)
 {
 	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
 						struct msm_vblank_ctrl, work);
@@ -129,12 +129,16 @@
 	struct msm_kms *kms = priv->kms;
 	struct vblank_event *vbl_ev, *tmp;
 	unsigned long flags;
+	LIST_HEAD(tmp_head);
 
 	spin_lock_irqsave(&vbl_ctrl->lock, flags);
 	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
 		list_del(&vbl_ev->node);
+		list_add_tail(&vbl_ev->node, &tmp_head);
+	}
 		spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
 
+	list_for_each_entry_safe(vbl_ev, tmp, &tmp_head, node) {
 		if (vbl_ev->enable)
 			kms->funcs->enable_vblank(kms,
 						priv->crtcs[vbl_ev->crtc_id]);
@@ -143,11 +147,7 @@
 						priv->crtcs[vbl_ev->crtc_id]);
 
 		kfree(vbl_ev);
-
-		spin_lock_irqsave(&vbl_ctrl->lock, flags);
 	}
-
-	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
 }
 
 static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
@@ -168,7 +168,7 @@
 	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
 	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
 
-	queue_work(priv->wq, &vbl_ctrl->work);
+	queue_kthread_work(&priv->disp_thread[crtc_id].worker, &vbl_ctrl->work);
 
 	return 0;
 }
@@ -180,21 +180,32 @@
 static int msm_unload(struct drm_device *dev)
 {
 	struct msm_drm_private *priv = dev->dev_private;
+	struct platform_device *pdev = dev->platformdev;
 	struct msm_kms *kms = priv->kms;
 	struct msm_gpu *gpu = priv->gpu;
 	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
 	struct vblank_event *vbl_ev, *tmp;
+	int i;
 
 	/* We must cancel and cleanup any pending vblank enable/disable
 	 * work before drm_irq_uninstall() to avoid work re-enabling an
 	 * irq after uninstall has disabled it.
 	 */
-	cancel_work_sync(&vbl_ctrl->work);
+	flush_kthread_work(&vbl_ctrl->work);
 	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
 		list_del(&vbl_ev->node);
 		kfree(vbl_ev);
 	}
 
+	/* clean up display commit worker threads */
+	for (i = 0; i < priv->num_crtcs; i++) {
+		if (priv->disp_thread[i].thread) {
+			flush_kthread_worker(&priv->disp_thread[i].worker);
+			kthread_stop(priv->disp_thread[i].thread);
+			priv->disp_thread[i].thread = NULL;
+		}
+	}
+
 	drm_kms_helper_poll_fini(dev);
 	drm_mode_config_cleanup(dev);
 	drm_vblank_cleanup(dev);
@@ -213,6 +224,10 @@
 
 	if (gpu) {
 		mutex_lock(&dev->struct_mutex);
+		/*
+		 * XXX what do we do here?
+		 * pm_runtime_enable(&pdev->dev);
+		 */
 		gpu->funcs->pm_suspend(gpu);
 		mutex_unlock(&dev->struct_mutex);
 		gpu->funcs->destroy(gpu);
@@ -226,6 +241,11 @@
 				priv->vram.paddr, &attrs);
 	}
 
+	sde_dbg_destroy();
+
+	sde_power_client_destroy(&priv->phandle, priv->pclient);
+	sde_power_resource_deinit(pdev, &priv->phandle);
+
 	component_unbind_all(dev->dev, dev);
 
 	dev->dev_private = NULL;
@@ -235,26 +255,26 @@
 	return 0;
 }
 
+#define KMS_MDP4 0
+#define KMS_SDE  1
+
 static int get_mdp_ver(struct platform_device *pdev)
 {
 #ifdef CONFIG_OF
 	static const struct of_device_id match_types[] = { {
-		.compatible = "qcom,mdss_mdp",
-		.data	= (void	*)5,
-	}, {
-		/* end node */
-	} };
+		.compatible = "qcom,sde-kms",
+		.data	= (void	*)KMS_SDE,
+	},
+	{} };
 	struct device *dev = &pdev->dev;
 	const struct of_device_id *match;
 	match = of_match_node(match_types, dev->of_node);
 	if (match)
 		return (int)(unsigned long)match->data;
 #endif
-	return 4;
+	return KMS_MDP4;
 }
 
-#include <linux/of_address.h>
-
 static int msm_init_vram(struct drm_device *dev)
 {
 	struct msm_drm_private *priv = dev->dev_private;
@@ -307,6 +327,7 @@
 		priv->vram.size = size;
 
 		drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
+		spin_lock_init(&priv->vram.lock);
 
 		dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
 		dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
@@ -330,12 +351,39 @@
 	return ret;
 }
 
+#ifdef CONFIG_OF
+static int msm_component_bind_all(struct device *dev,
+				struct drm_device *drm_dev)
+{
+	int ret;
+
+	ret = component_bind_all(dev, drm_dev);
+	if (ret)
+		DRM_ERROR("component_bind_all failed: %d\n", ret);
+
+	return ret;
+}
+#else
+static int msm_component_bind_all(struct device *dev,
+				struct drm_device *drm_dev)
+{
+	return 0;
+}
+#endif
+
+static int msm_power_enable_wrapper(void *handle, void *client, bool enable)
+{
+	return sde_power_resource_enable(handle, client, enable);
+}
+
 static int msm_load(struct drm_device *dev, unsigned long flags)
 {
 	struct platform_device *pdev = dev->platformdev;
 	struct msm_drm_private *priv;
 	struct msm_kms *kms;
-	int ret;
+	struct sde_dbg_power_ctrl dbg_power_ctrl = { NULL };
+	int ret, i;
+	struct sched_param param;
 
 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 	if (!priv) {
@@ -345,22 +393,38 @@
 
 	dev->dev_private = priv;
 
-	priv->wq = alloc_ordered_workqueue("msm", 0);
+	priv->wq = alloc_ordered_workqueue("msm_drm", 0);
 	init_waitqueue_head(&priv->fence_event);
 	init_waitqueue_head(&priv->pending_crtcs_event);
 
+	INIT_LIST_HEAD(&priv->client_event_list);
 	INIT_LIST_HEAD(&priv->inactive_list);
 	INIT_LIST_HEAD(&priv->fence_cbs);
 	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
-	INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
+	init_kthread_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
 	spin_lock_init(&priv->vblank_ctrl.lock);
+	hash_init(priv->mn_hash);
+	mutex_init(&priv->mn_lock);
 
 	drm_mode_config_init(dev);
 
 	platform_set_drvdata(pdev, dev);
 
+	ret = sde_power_resource_init(pdev, &priv->phandle);
+	if (ret) {
+		pr_err("sde power resource init failed\n");
+		goto fail;
+	}
+
+	priv->pclient = sde_power_client_create(&priv->phandle, "sde");
+	if (IS_ERR_OR_NULL(priv->pclient)) {
+		pr_err("sde power client create failed\n");
+		ret = -EINVAL;
+		goto fail;
+	}
+
 	/* Bind all our sub-components: */
-	ret = component_bind_all(dev->dev, dev);
+	ret = msm_component_bind_all(dev->dev, dev);
 	if (ret)
 		return ret;
 
@@ -368,12 +432,22 @@
 	if (ret)
 		goto fail;
 
+	dbg_power_ctrl.handle = &priv->phandle;
+	dbg_power_ctrl.client = priv->pclient;
+	dbg_power_ctrl.enable_fn = msm_power_enable_wrapper;
+	ret = sde_dbg_init(dev->primary->debugfs_root, &pdev->dev,
+			&dbg_power_ctrl);
+	if (ret) {
+		dev_err(dev->dev, "failed to init sde dbg: %d\n", ret);
+		goto fail;
+	}
+
 	switch (get_mdp_ver(pdev)) {
-	case 4:
+	case KMS_MDP4:
 		kms = mdp4_kms_init(dev);
 		break;
-	case 5:
-		kms = mdp5_kms_init(dev);
+	case KMS_SDE:
+		kms = sde_kms_init(dev);
 		break;
 	default:
 		kms = ERR_PTR(-ENODEV);
@@ -387,21 +461,55 @@
 		 * and (for example) use dmabuf/prime to share buffers with
 		 * imx drm driver on iMX5
 		 */
+		priv->kms = NULL;
 		dev_err(dev->dev, "failed to load kms\n");
 		ret = PTR_ERR(kms);
 		goto fail;
 	}
 
 	priv->kms = kms;
-
-	if (kms) {
 		pm_runtime_enable(dev->dev);
+
+	if (kms && kms->funcs && kms->funcs->hw_init) {
 		ret = kms->funcs->hw_init(kms);
 		if (ret) {
 			dev_err(dev->dev, "kms hw init failed: %d\n", ret);
 			goto fail;
 		}
 	}
+	/**
+	 * this priority was found during empiric testing to have appropriate
+	 * realtime scheduling to process display updates and interact with
+	 * other real time and normal priority task
+	 */
+	param.sched_priority = 16;
+	/* initialize commit thread structure */
+	for (i = 0; i < priv->num_crtcs; i++) {
+		priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
+		init_kthread_worker(&priv->disp_thread[i].worker);
+		priv->disp_thread[i].dev = dev;
+		priv->disp_thread[i].thread =
+			kthread_run(kthread_worker_fn,
+				&priv->disp_thread[i].worker,
+				"crtc_commit:%d",
+				priv->disp_thread[i].crtc_id);
+		ret = sched_setscheduler(priv->disp_thread[i].thread,
+							SCHED_FIFO, &param);
+		if (ret)
+			pr_warn("display thread priority update failed: %d\n",
+									ret);
+
+		if (IS_ERR(priv->disp_thread[i].thread)) {
+			dev_err(dev->dev, "failed to create kthread\n");
+			priv->disp_thread[i].thread = NULL;
+			/* clean up previously created threads if any */
+			for (i -= 1; i >= 0; i--) {
+				kthread_stop(priv->disp_thread[i].thread);
+				priv->disp_thread[i].thread = NULL;
+			}
+			goto fail;
+		}
+	}
 
 	dev->mode_config.funcs = &mode_config_funcs;
 
@@ -430,6 +538,15 @@
 	if (ret)
 		goto fail;
 
+	/* perform subdriver post initialization */
+	if (kms && kms->funcs && kms->funcs->postinit) {
+		ret = kms->funcs->postinit(kms);
+		if (ret) {
+			dev_err(dev->dev, "kms post init failed: %d\n", ret);
+			goto fail;
+		}
+	}
+
 	drm_kms_helper_poll_init(dev);
 
 	return 0;
@@ -439,6 +556,11 @@
 	return ret;
 }
 
+#ifdef CONFIG_QCOM_KGSL
+static void load_gpu(struct drm_device *dev)
+{
+}
+#else
 static void load_gpu(struct drm_device *dev)
 {
 	static DEFINE_MUTEX(init_lock);
@@ -451,47 +573,231 @@
 
 	mutex_unlock(&init_lock);
 }
+#endif
 
-static int msm_open(struct drm_device *dev, struct drm_file *file)
+static struct msm_file_private *setup_pagetable(struct msm_drm_private *priv)
 {
 	struct msm_file_private *ctx;
 
+	if (!priv || !priv->gpu)
+		return NULL;
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return ERR_PTR(-ENOMEM);
+
+	ctx->aspace = msm_gem_address_space_create_instance(
+		priv->gpu->aspace->mmu, "gpu", 0x100000000ULL,
+		TASK_SIZE_64 - 1);
+
+	if (IS_ERR(ctx->aspace)) {
+		int ret = PTR_ERR(ctx->aspace);
+
+		/*
+		 * If dynamic domains are not supported, everybody uses the
+		 * same pagetable
+		 */
+		if (ret != -EOPNOTSUPP) {
+			kfree(ctx);
+			return ERR_PTR(ret);
+		}
+
+		ctx->aspace = priv->gpu->aspace;
+	}
+
+	ctx->aspace->mmu->funcs->attach(ctx->aspace->mmu, NULL, 0);
+	return ctx;
+}
+
+static int msm_open(struct drm_device *dev, struct drm_file *file)
+{
+	struct msm_file_private *ctx = NULL;
+	struct msm_drm_private *priv;
+	struct msm_kms *kms;
+
+	if (!dev || !dev->dev_private)
+		return -ENODEV;
+
+	priv = dev->dev_private;
 	/* For now, load gpu on open.. to avoid the requirement of having
 	 * firmware in the initrd.
 	 */
 	load_gpu(dev);
 
-	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
-	if (!ctx)
-		return -ENOMEM;
+	ctx = setup_pagetable(priv);
+	if (IS_ERR(ctx))
+		return PTR_ERR(ctx);
+
+	if (ctx) {
+		INIT_LIST_HEAD(&ctx->counters);
+		msm_submitqueue_init(ctx);
+	}
 
 	file->driver_priv = ctx;
 
+	kms = priv->kms;
+
+	if (kms && kms->funcs && kms->funcs->postopen)
+		kms->funcs->postopen(kms, file);
+
 	return 0;
 }
 
 static void msm_preclose(struct drm_device *dev, struct drm_file *file)
 {
 	struct msm_drm_private *priv = dev->dev_private;
-	struct msm_file_private *ctx = file->driver_priv;
 	struct msm_kms *kms = priv->kms;
 
-	if (kms)
+	if (kms && kms->funcs && kms->funcs->preclose)
 		kms->funcs->preclose(kms, file);
+}
 
-	mutex_lock(&dev->struct_mutex);
-	if (ctx == priv->lastctx)
-		priv->lastctx = NULL;
-	mutex_unlock(&dev->struct_mutex);
+static void msm_postclose(struct drm_device *dev, struct drm_file *file)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_file_private *ctx = file->driver_priv;
+	struct msm_kms *kms = priv->kms;
+
+	if (kms && kms->funcs && kms->funcs->postclose)
+		kms->funcs->postclose(kms, file);
+
+	if (!ctx)
+		return;
+
+	msm_submitqueue_close(ctx);
+
+	if (priv->gpu) {
+		msm_gpu_cleanup_counters(priv->gpu, ctx);
+
+		if (ctx->aspace && ctx->aspace != priv->gpu->aspace) {
+			ctx->aspace->mmu->funcs->detach(ctx->aspace->mmu);
+			msm_gem_address_space_put(ctx->aspace);
+		}
+	}
 
 	kfree(ctx);
 }
 
+static int msm_disable_all_modes_commit(
+		struct drm_device *dev,
+		struct drm_atomic_state *state)
+{
+	struct drm_plane *plane;
+	struct drm_crtc *crtc;
+	unsigned plane_mask;
+	int ret;
+
+	plane_mask = 0;
+	drm_for_each_plane(plane, dev) {
+		struct drm_plane_state *plane_state;
+
+		plane_state = drm_atomic_get_plane_state(state, plane);
+		if (IS_ERR(plane_state)) {
+			ret = PTR_ERR(plane_state);
+			goto fail;
+		}
+
+		plane_state->rotation = BIT(DRM_ROTATE_0);
+
+		plane->old_fb = plane->fb;
+		plane_mask |= 1 << drm_plane_index(plane);
+
+		/* disable non-primary: */
+		if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+			continue;
+
+		DRM_DEBUG("disabling plane %d\n", plane->base.id);
+
+		ret = __drm_atomic_helper_disable_plane(plane, plane_state);
+		if (ret != 0)
+			DRM_ERROR("error %d disabling plane %d\n", ret,
+					plane->base.id);
+	}
+
+	drm_for_each_crtc(crtc, dev) {
+		struct drm_mode_set mode_set;
+
+		memset(&mode_set, 0, sizeof(struct drm_mode_set));
+		mode_set.crtc = crtc;
+
+		DRM_DEBUG("disabling crtc %d\n", crtc->base.id);
+
+		ret = __drm_atomic_helper_set_config(&mode_set, state);
+		if (ret != 0)
+			DRM_ERROR("error %d disabling crtc %d\n", ret,
+					crtc->base.id);
+	}
+
+	DRM_DEBUG("committing disables\n");
+	ret = drm_atomic_commit(state);
+
+fail:
+	drm_atomic_clean_old_fb(dev, plane_mask, ret);
+	DRM_DEBUG("disables result %d\n", ret);
+	return ret;
+}
+
+/**
+ * msm_clear_all_modes - disables all planes and crtcs via an atomic commit
+ *	based on restore_fbdev_mode_atomic in drm_fb_helper.c
+ * @dev: device pointer
+ * @Return: 0 on success, otherwise -error
+ */
+static int msm_disable_all_modes(struct drm_device *dev)
+{
+	struct drm_atomic_state *state;
+	int ret, i;
+
+	state = drm_atomic_state_alloc(dev);
+	if (!state)
+		return -ENOMEM;
+
+	state->acquire_ctx = dev->mode_config.acquire_ctx;
+
+	for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) {
+		ret = msm_disable_all_modes_commit(dev, state);
+		if (ret != -EDEADLK)
+			break;
+		drm_atomic_state_clear(state);
+		drm_atomic_legacy_backoff(state);
+	}
+
+	/* on successful atomic commit state ownership transfers to framework */
+	if (ret != 0)
+		drm_atomic_state_free(state);
+
+	return ret;
+}
+
 static void msm_lastclose(struct drm_device *dev)
 {
 	struct msm_drm_private *priv = dev->dev_private;
-	if (priv->fbdev)
+	struct msm_kms *kms = priv->kms;
+	int i;
+
+	/*
+	 * clean up vblank disable immediately as this is the last close.
+	 */
+	for (i = 0; i < dev->num_crtcs; i++) {
+		struct drm_vblank_crtc *vblank = &dev->vblank[i];
+		struct timer_list *disable_timer = &vblank->disable_timer;
+
+		if (del_timer_sync(disable_timer))
+			disable_timer->function(disable_timer->data);
+	}
+
+	/* wait for pending vblank requests to be executed by worker thread */
+	flush_workqueue(priv->wq);
+
+	if (priv->fbdev) {
 		drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
+	} else {
+		drm_modeset_lock_all(dev);
+		msm_disable_all_modes(dev);
+		drm_modeset_unlock_all(dev);
+		if (kms && kms->funcs && kms->funcs->lastclose)
+			kms->funcs->lastclose(kms);
+	}
 }
 
 static irqreturn_t msm_irq(int irq, void *arg)
@@ -533,7 +839,7 @@
 	struct msm_kms *kms = priv->kms;
 	if (!kms)
 		return -ENXIO;
-	DBG("dev=%p, crtc=%u", dev, pipe);
+	DBG("dev=%pK, crtc=%u", dev, pipe);
 	return vblank_ctrl_queue_work(priv, pipe, true);
 }
 
@@ -543,7 +849,7 @@
 	struct msm_kms *kms = priv->kms;
 	if (!kms)
 		return;
-	DBG("dev=%p, crtc=%u", dev, pipe);
+	DBG("dev=%pK, crtc=%u", dev, pipe);
 	vblank_ctrl_queue_work(priv, pipe, false);
 }
 
@@ -559,12 +865,21 @@
 
 	if (gpu) {
 		seq_printf(m, "%s Status:\n", gpu->name);
+		pm_runtime_get_sync(&gpu->pdev->dev);
 		gpu->funcs->show(gpu, m);
+		pm_runtime_put_sync(&gpu->pdev->dev);
 	}
 
 	return 0;
 }
 
+static int msm_snapshot_show(struct drm_device *dev, struct seq_file *m)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+
+	return msm_snapshot_write(priv->gpu, m);
+}
+
 static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
 {
 	struct msm_drm_private *priv = dev->dev_private;
@@ -629,11 +944,22 @@
 	return ret;
 }
 
+static int show_unlocked(struct seq_file *m, void *arg)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	int (*show)(struct drm_device *dev, struct seq_file *m) =
+			node->info_ent->data;
+
+	return show(dev, m);
+}
+
 static struct drm_info_list msm_debugfs_list[] = {
 		{"gpu", show_locked, 0, msm_gpu_show},
 		{"gem", show_locked, 0, msm_gem_show},
 		{ "mm", show_locked, 0, msm_mm_show },
 		{ "fb", show_locked, 0, msm_fb_show },
+		{ "snapshot", show_unlocked, 0, msm_snapshot_show },
 };
 
 static int late_init_minor(struct drm_minor *minor)
@@ -707,14 +1033,23 @@
 		ktime_t *timeout , bool interruptible)
 {
 	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_gpu *gpu = priv->gpu;
+	int index = FENCE_RING(fence);
+	uint32_t submitted;
 	int ret;
 
-	if (!priv->gpu)
-		return 0;
+	if (!gpu)
+		return -ENXIO;
 
-	if (fence > priv->gpu->submitted_fence) {
+	if (index > MSM_GPU_MAX_RINGS || index >= gpu->nr_rings ||
+		!gpu->rb[index])
+		return -EINVAL;
+
+	submitted = gpu->funcs->submitted_fence(gpu, gpu->rb[index]);
+
+	if (fence > submitted) {
 		DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
-				fence, priv->gpu->submitted_fence);
+			fence, submitted);
 		return -EINVAL;
 	}
 
@@ -744,7 +1079,7 @@
 
 		if (ret == 0) {
 			DBG("timeout waiting for fence: %u (completed: %u)",
-					fence, priv->completed_fence);
+					fence, priv->completed_fence[index]);
 			ret = -ETIMEDOUT;
 		} else if (ret != -ERESTARTSYS) {
 			ret = 0;
@@ -758,12 +1093,13 @@
 		struct msm_fence_cb *cb, uint32_t fence)
 {
 	struct msm_drm_private *priv = dev->dev_private;
+	int index = FENCE_RING(fence);
 	int ret = 0;
 
 	mutex_lock(&dev->struct_mutex);
 	if (!list_empty(&cb->work.entry)) {
 		ret = -EINVAL;
-	} else if (fence > priv->completed_fence) {
+	} else if (fence > priv->completed_fence[index]) {
 		cb->fence = fence;
 		list_add_tail(&cb->work.entry, &priv->fence_cbs);
 	} else {
@@ -778,22 +1114,22 @@
 void msm_update_fence(struct drm_device *dev, uint32_t fence)
 {
 	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_fence_cb *cb, *tmp;
+	int index = FENCE_RING(fence);
 
-	mutex_lock(&dev->struct_mutex);
-	priv->completed_fence = max(fence, priv->completed_fence);
-
-	while (!list_empty(&priv->fence_cbs)) {
-		struct msm_fence_cb *cb;
-
-		cb = list_first_entry(&priv->fence_cbs,
-				struct msm_fence_cb, work.entry);
+	if (index >= MSM_GPU_MAX_RINGS)
+		return;
 
-		if (cb->fence > priv->completed_fence)
-			break;
+	mutex_lock(&dev->struct_mutex);
+	priv->completed_fence[index] = max(fence, priv->completed_fence[index]);
 
+	list_for_each_entry_safe(cb, tmp, &priv->fence_cbs, work.entry) {
+		if (COMPARE_FENCE_LTE(cb->fence,
+			priv->completed_fence[index])) {
 		list_del_init(&cb->work.entry);
 		queue_work(priv->wq, &cb->work);
 	}
+	}
 
 	mutex_unlock(&dev->struct_mutex);
 
@@ -845,6 +1181,20 @@
 			args->flags, &args->handle);
 }
 
+static int msm_ioctl_gem_svm_new(struct drm_device *dev, void *data,
+		struct drm_file *file)
+{
+	struct drm_msm_gem_svm_new *args = data;
+
+	if (args->flags & ~MSM_BO_FLAGS) {
+		DRM_ERROR("invalid flags: %08x\n", args->flags);
+		return -EINVAL;
+	}
+
+	return msm_gem_svm_new_handle(dev, file, args->hostptr, args->size,
+			args->flags, &args->handle);
+}
+
 static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
 {
 	return ktime_set(timeout.tv_sec, timeout.tv_nsec);
@@ -897,17 +1247,49 @@
 {
 	struct drm_msm_gem_info *args = data;
 	struct drm_gem_object *obj;
+	struct msm_gem_object *msm_obj;
+	struct msm_file_private *ctx = file->driver_priv;
 	int ret = 0;
 
-	if (args->pad)
+	if (args->flags & ~MSM_INFO_FLAGS)
 		return -EINVAL;
 
 	obj = drm_gem_object_lookup(dev, file, args->handle);
 	if (!obj)
 		return -ENOENT;
 
+	msm_obj = to_msm_bo(obj);
+	if (args->flags & MSM_INFO_IOVA) {
+		struct msm_gem_address_space *aspace = NULL;
+		struct msm_drm_private *priv = dev->dev_private;
+		uint64_t iova;
+
+		if (msm_obj->flags & MSM_BO_SECURE && priv->gpu)
+			aspace = priv->gpu->secure_aspace;
+		else if (ctx)
+			aspace = ctx->aspace;
+
+		if (!aspace) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		ret = msm_gem_get_iova(obj, aspace, &iova);
+		if (!ret)
+			args->offset = iova;
+	} else {
+		if (msm_obj->flags & MSM_BO_SVM) {
+			/*
+			 * Offset for an SVM object is not needed as they are
+			 * already mmap'ed before the SVM ioctl is invoked.
+			 */
+			ret = -EACCES;
+			goto out;
+		}
 	args->offset = msm_gem_mmap_offset(obj);
+	}
 
+out:
 	drm_gem_object_unreference_unlocked(obj);
 
 	return ret;
@@ -917,16 +1299,495 @@
 		struct drm_file *file)
 {
 	struct drm_msm_wait_fence *args = data;
-	ktime_t timeout = to_ktime(args->timeout);
+	ktime_t timeout;
+
 
 	if (args->pad) {
 		DRM_ERROR("invalid pad: %08x\n", args->pad);
 		return -EINVAL;
 	}
 
+	/*
+	 * Special case - if the user passes a timeout of 0.0 just return the
+	 * current fence status (0 for retired, -EBUSY for active) with no
+	 * accompanying kernel logs. This can be a poor man's way of
+	 * determining the status of a fence.
+	 */
+	if (args->timeout.tv_sec == 0 && args->timeout.tv_nsec == 0)
+		return msm_wait_fence(dev, args->fence, NULL, true);
+
+	timeout = to_ktime(args->timeout);
 	return msm_wait_fence(dev, args->fence, &timeout, true);
 }
 
+static int msm_event_supported(struct drm_device *dev,
+		struct drm_msm_event_req *req)
+{
+	int ret = -EINVAL;
+	struct drm_mode_object *arg_obj;
+	struct drm_crtc *crtc;
+
+	arg_obj = drm_mode_object_find(dev, req->object_id, req->object_type);
+	if (!arg_obj)
+		return -ENOENT;
+
+	if (arg_obj->type == DRM_MODE_OBJECT_CRTC) {
+		crtc = obj_to_crtc(arg_obj);
+		req->index = drm_crtc_index(crtc);
+	}
+
+	switch (req->event) {
+	case DRM_EVENT_VBLANK:
+	case DRM_EVENT_HISTOGRAM:
+	case DRM_EVENT_AD:
+		if (arg_obj->type == DRM_MODE_OBJECT_CRTC)
+			ret = 0;
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
+
+static void msm_vblank_read_cb(struct drm_pending_event *e)
+{
+	struct drm_pending_vblank_event *vblank;
+	struct msm_drm_private *priv;
+	struct drm_file *file_priv;
+	struct drm_device *dev;
+	struct msm_drm_event *v;
+	int ret = 0;
+	bool need_vblank = false;
+
+	if (!e) {
+		DRM_ERROR("invalid pending event payload\n");
+		return;
+	}
+
+	vblank = container_of(e, struct drm_pending_vblank_event, base);
+	file_priv = vblank->base.file_priv;
+	dev = (file_priv && file_priv->minor) ? file_priv->minor->dev : NULL;
+	priv = (dev) ? dev->dev_private : NULL;
+	if (!priv) {
+		DRM_ERROR("invalid msm private\n");
+		return;
+	}
+
+	list_for_each_entry(v, &priv->client_event_list, base.link) {
+		if (v->base.file_priv != file_priv ||
+		    (v->event.type != DRM_EVENT_VBLANK &&
+		     v->event.type != DRM_EVENT_AD))
+			continue;
+		need_vblank = true;
+		/**
+		 * User-space client requests for N vsyncs when event
+		 * requested is DRM_EVENT_AD. Once the count reaches zero,
+		 * notify stop requesting for additional vsync's.
+		 */
+		if (v->event.type == DRM_EVENT_AD) {
+			if (vblank->event.user_data)
+				vblank->event.user_data--;
+			need_vblank = (vblank->event.user_data) ? true : false;
+		}
+		break;
+	}
+
+	if (!need_vblank) {
+		kfree(vblank);
+	} else {
+		ret = drm_vblank_get(dev, vblank->pipe);
+		if (!ret) {
+			list_add(&vblank->base.link, &dev->vblank_event_list);
+		} else {
+			DRM_ERROR("vblank enable failed ret %d\n", ret);
+			kfree(vblank);
+		}
+	}
+}
+
+static int msm_enable_vblank_event(struct drm_device *dev,
+			struct drm_msm_event_req *req, struct drm_file *file)
+{
+	struct drm_pending_vblank_event *e;
+	int ret = 0;
+	unsigned long flags;
+	struct drm_vblank_crtc *vblank;
+
+	if (WARN_ON(req->index >= dev->num_crtcs))
+		return -EINVAL;
+
+	vblank = &dev->vblank[req->index];
+	e = kzalloc(sizeof(*e), GFP_KERNEL);
+	if (!e)
+		return -ENOMEM;
+
+	e->pipe = req->index;
+	e->base.pid = current->pid;
+	e->event.base.type = DRM_EVENT_VBLANK;
+	e->event.base.length = sizeof(e->event);
+	e->event.user_data = req->client_context;
+	e->base.event = &e->event.base;
+	e->base.file_priv = file;
+	e->base.destroy = msm_vblank_read_cb;
+
+	ret = drm_vblank_get(dev, e->pipe);
+	if (ret) {
+		DRM_ERROR("failed to enable the vblank\n");
+		goto free;
+	}
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	if (!vblank->enabled) {
+		ret = -EINVAL;
+		goto err_unlock;
+	}
+
+	if (file->event_space < sizeof(e->event)) {
+		ret = -EBUSY;
+		goto err_unlock;
+	}
+	file->event_space -= sizeof(e->event);
+	list_add_tail(&e->base.link, &dev->vblank_event_list);
+err_unlock:
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+free:
+	if (ret)
+		kfree(e);
+	return ret;
+}
+
+static int msm_enable_event(struct drm_device *dev,
+			struct drm_msm_event_req *req, struct drm_file *file)
+{
+	int ret = -EINVAL;
+
+	switch (req->event) {
+	case DRM_EVENT_AD:
+	case DRM_EVENT_VBLANK:
+		ret = msm_enable_vblank_event(dev, req, file);
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
+
+static int msm_disable_vblank_event(struct drm_device *dev,
+			    struct drm_msm_event_req *req,
+			    struct drm_file *file)
+{
+	struct drm_pending_vblank_event *e, *t;
+
+	list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
+		if (e->pipe != req->index || file != e->base.file_priv)
+			continue;
+		list_del(&e->base.link);
+		drm_vblank_put(dev, req->index);
+		kfree(e);
+	}
+	return 0;
+}
+
+static int msm_disable_event(struct drm_device *dev,
+			    struct drm_msm_event_req *req,
+			    struct drm_file *file)
+{
+	int ret = -EINVAL;
+
+	switch (req->event) {
+	case DRM_EVENT_AD:
+	case DRM_EVENT_VBLANK:
+		ret = msm_disable_vblank_event(dev, req, file);
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
+
+
+static int msm_ioctl_register_event(struct drm_device *dev, void *data,
+				    struct drm_file *file)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	struct drm_msm_event_req *req_event = data;
+	struct msm_drm_event *client;
+	struct msm_drm_event *v;
+	unsigned long flag = 0;
+	bool dup_request = false;
+	int ret = 0;
+
+	if (msm_event_supported(dev, req_event)) {
+		DRM_ERROR("unsupported event %x object %x object id %d\n",
+			req_event->event, req_event->object_type,
+			req_event->object_id);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&dev->event_lock, flag);
+	list_for_each_entry(v, &priv->client_event_list, base.link) {
+		if (v->base.file_priv != file)
+			continue;
+		if (v->event.type == req_event->event &&
+			v->info.object_id == req_event->object_id) {
+			DRM_ERROR("duplicate request for event %x obj id %d\n",
+				v->event.type, v->info.object_id);
+			dup_request = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dev->event_lock, flag);
+
+	if (dup_request)
+		return -EINVAL;
+
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
+	if (!client)
+		return -ENOMEM;
+
+	client->base.file_priv = file;
+	client->base.pid = current->pid;
+	client->base.event = &client->event;
+	client->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+	client->event.type = req_event->event;
+	memcpy(&client->info, req_event, sizeof(client->info));
+
+	spin_lock_irqsave(&dev->event_lock, flag);
+	list_add_tail(&client->base.link, &priv->client_event_list);
+	spin_unlock_irqrestore(&dev->event_lock, flag);
+
+	ret = msm_enable_event(dev, req_event, file);
+	if (ret) {
+		DRM_ERROR("failed to enable event %x object %x object id %d\n",
+			req_event->event, req_event->object_type,
+			req_event->object_id);
+		spin_lock_irqsave(&dev->event_lock, flag);
+		list_del(&client->base.link);
+		spin_unlock_irqrestore(&dev->event_lock, flag);
+		kfree(client);
+	}
+	return ret;
+}
+
+static int msm_ioctl_deregister_event(struct drm_device *dev, void *data,
+				      struct drm_file *file)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	struct drm_msm_event_req *req_event = data;
+	struct msm_drm_event *client = NULL;
+	struct msm_drm_event *v, *vt;
+	unsigned long flag = 0;
+
+	if (msm_event_supported(dev, req_event)) {
+		DRM_ERROR("unsupported event %x object %x object id %d\n",
+			req_event->event, req_event->object_type,
+			req_event->object_id);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&dev->event_lock, flag);
+	msm_disable_event(dev, req_event, file);
+	list_for_each_entry_safe(v, vt, &priv->client_event_list, base.link) {
+		if (v->event.type == req_event->event &&
+		    v->info.object_id == req_event->object_id &&
+		    v->base.file_priv == file) {
+			client = v;
+			list_del(&client->base.link);
+			client->base.destroy(&client->base);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dev->event_lock, flag);
+
+	return 0;
+}
+
+static int msm_ioctl_gem_sync(struct drm_device *dev, void *data,
+			     struct drm_file *file)
+{
+
+	struct drm_msm_gem_sync *arg = data;
+	int i;
+
+	for (i = 0; i < arg->nr_ops; i++) {
+		struct drm_msm_gem_syncop syncop;
+		struct drm_gem_object *obj;
+		int ret;
+		void __user *ptr =
+			(void __user *)(uintptr_t)
+				(arg->ops + (i * sizeof(syncop)));
+
+		ret = copy_from_user(&syncop, ptr, sizeof(syncop));
+		if (ret)
+			return -EFAULT;
+
+		obj = drm_gem_object_lookup(dev, file, syncop.handle);
+		if (!obj)
+			return -ENOENT;
+
+		msm_gem_sync(obj, syncop.op);
+
+		drm_gem_object_unreference_unlocked(obj);
+	}
+
+	return 0;
+}
+
+void msm_send_crtc_notification(struct drm_crtc *crtc,
+				struct drm_event *event, u8 *payload)
+{
+	struct drm_device *dev = NULL;
+	struct msm_drm_private *priv = NULL;
+	unsigned long flags;
+	struct msm_drm_event *notify, *v;
+	int len = 0;
+
+	if (!crtc || !event || !event->length || !payload) {
+		DRM_ERROR("err param crtc %pK event %pK len %d payload %pK\n",
+			crtc, event, ((event) ? (event->length) : -1),
+			payload);
+		return;
+	}
+	dev = crtc->dev;
+	priv = (dev) ? dev->dev_private : NULL;
+	if (!dev || !priv) {
+		DRM_ERROR("invalid dev %pK priv %pK\n", dev, priv);
+		return;
+	}
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	list_for_each_entry(v, &priv->client_event_list, base.link) {
+		if (v->event.type != event->type ||
+			crtc->base.id != v->info.object_id)
+			continue;
+		len = event->length + sizeof(struct drm_msm_event_resp);
+		if (v->base.file_priv->event_space < len) {
+			DRM_ERROR("Insufficient space to notify\n");
+			continue;
+		}
+		notify = kzalloc(len, GFP_ATOMIC);
+		if (!notify)
+			continue;
+		notify->base.file_priv = v->base.file_priv;
+		notify->base.event = &notify->event;
+		notify->base.pid = v->base.pid;
+		notify->base.destroy =
+			(void (*)(struct drm_pending_event *)) kfree;
+		notify->event.type = v->event.type;
+		notify->event.length = len;
+		list_add(&notify->base.link,
+			&notify->base.file_priv->event_list);
+		notify->base.file_priv->event_space -= len;
+		memcpy(&notify->info, &v->info, sizeof(notify->info));
+		memcpy(notify->data, payload, event->length);
+		wake_up_interruptible(&notify->base.file_priv->event_wait);
+	}
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static int msm_ioctl_counter_get(struct drm_device *dev, void *data,
+		struct drm_file *file)
+{
+	struct msm_file_private *ctx = file->driver_priv;
+	struct msm_drm_private *priv = dev->dev_private;
+
+	if (priv->gpu)
+		return msm_gpu_counter_get(priv->gpu, data, ctx);
+
+	return -ENODEV;
+}
+
+static int msm_ioctl_counter_put(struct drm_device *dev, void *data,
+		struct drm_file *file)
+{
+	struct msm_file_private *ctx = file->driver_priv;
+	struct msm_drm_private *priv = dev->dev_private;
+
+	if (priv->gpu)
+		return msm_gpu_counter_put(priv->gpu, data, ctx);
+
+	return -ENODEV;
+}
+
+static int msm_ioctl_counter_read(struct drm_device *dev, void *data,
+		struct drm_file *file)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+
+	if (priv->gpu)
+		return msm_gpu_counter_read(priv->gpu, data);
+
+	return -ENODEV;
+}
+
+
+static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
+		struct drm_file *file)
+{
+	struct drm_msm_submitqueue *args = data;
+	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_gpu *gpu = priv->gpu;
+
+	if (args->flags & ~MSM_SUBMITQUEUE_FLAGS)
+		return -EINVAL;
+
+	if ((gpu->nr_rings > 1) &&
+		(!file->is_master && args->prio == 0)) {
+		DRM_ERROR("Only DRM master can set highest priority ringbuffer\n");
+		return -EPERM;
+	}
+
+	if (args->flags & MSM_SUBMITQUEUE_BYPASS_QOS_TIMEOUT &&
+		!capable(CAP_SYS_ADMIN)) {
+		DRM_ERROR(
+			"Only CAP_SYS_ADMIN processes can bypass the timer\n");
+		return -EPERM;
+	}
+
+	return msm_submitqueue_create(file->driver_priv, args->prio,
+		args->flags, &args->id);
+}
+
+static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data,
+		struct drm_file *file)
+{
+	struct drm_msm_submitqueue_query *args = data;
+	void __user *ptr = (void __user *)(uintptr_t) args->data;
+
+	return msm_submitqueue_query(file->driver_priv, args->id,
+		args->param, ptr, args->len);
+}
+
+static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
+		struct drm_file *file)
+{
+	struct drm_msm_submitqueue *args = data;
+
+	return msm_submitqueue_remove(file->driver_priv, args->id);
+}
+
+int msm_release(struct inode *inode, struct file *filp)
+{
+	struct drm_file *file_priv = filp->private_data;
+	struct drm_minor *minor = file_priv->minor;
+	struct drm_device *dev = minor->dev;
+	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_drm_event *v, *vt;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	list_for_each_entry_safe(v, vt, &priv->client_event_list, base.link) {
+		if (v->base.file_priv != file_priv)
+			continue;
+		list_del(&v->base.link);
+		msm_disable_event(dev, &v->info, file_priv);
+		v->base.destroy(&v->base);
+	}
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	return drm_release(inode, filp);
+}
+
 static const struct drm_ioctl_desc msm_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(MSM_GET_PARAM,    msm_ioctl_get_param,    DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(MSM_GEM_NEW,      msm_ioctl_gem_new,      DRM_AUTH|DRM_RENDER_ALLOW),
@@ -935,6 +1796,27 @@
 	DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT,   msm_ioctl_gem_submit,   DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE,   msm_ioctl_wait_fence,   DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(SDE_WB_CONFIG, sde_wb_config, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(MSM_REGISTER_EVENT,  msm_ioctl_register_event,
+			  DRM_UNLOCKED|DRM_CONTROL_ALLOW),
+	DRM_IOCTL_DEF_DRV(MSM_DEREGISTER_EVENT,  msm_ioctl_deregister_event,
+			  DRM_UNLOCKED|DRM_CONTROL_ALLOW),
+	DRM_IOCTL_DEF_DRV(MSM_COUNTER_GET, msm_ioctl_counter_get,
+			  DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(MSM_COUNTER_PUT, msm_ioctl_counter_put,
+			  DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(MSM_COUNTER_READ, msm_ioctl_counter_read,
+			  DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(MSM_GEM_SYNC, msm_ioctl_gem_sync,
+			  DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(MSM_GEM_SVM_NEW, msm_ioctl_gem_svm_new,
+			  DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW,  msm_ioctl_submitqueue_new,
+			  DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close,
+			  DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query,
+			  DRM_AUTH|DRM_RENDER_ALLOW),
 };
 
 static const struct vm_operations_struct vm_ops = {
@@ -946,7 +1828,7 @@
 static const struct file_operations fops = {
 	.owner              = THIS_MODULE,
 	.open               = drm_open,
-	.release            = drm_release,
+	.release            = msm_release,
 	.unlocked_ioctl     = drm_ioctl,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl       = drm_compat_ioctl,
@@ -968,6 +1850,7 @@
 	.unload             = msm_unload,
 	.open               = msm_open,
 	.preclose           = msm_preclose,
+	.postclose          = msm_postclose,
 	.lastclose          = msm_lastclose,
 	.set_busid          = drm_platform_set_busid,
 	.irq_handler        = msm_irq,
@@ -999,9 +1882,9 @@
 	.debugfs_cleanup    = msm_debugfs_cleanup,
 #endif
 	.ioctls             = msm_ioctls,
-	.num_ioctls         = DRM_MSM_NUM_IOCTLS,
+	.num_ioctls         = ARRAY_SIZE(msm_ioctls),
 	.fops               = &fops,
-	.name               = "msm",
+	.name               = "msm_drm",
 	.desc               = "MSM Snapdragon DRM",
 	.date               = "20130625",
 	.major              = 1,
@@ -1011,8 +1894,75 @@
 #ifdef CONFIG_PM_SLEEP
 static int msm_pm_suspend(struct device *dev)
 {
-	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct drm_device *ddev;
+	struct drm_modeset_acquire_ctx *ctx;
+	struct drm_connector *conn;
+	struct drm_atomic_state *state;
+	struct drm_crtc_state *crtc_state;
+	struct msm_drm_private *priv;
+	int ret = 0;
+
+	if (!dev)
+		return -EINVAL;
+
+	ddev = dev_get_drvdata(dev);
+	if (!ddev || !ddev->dev_private)
+		return -EINVAL;
+
+	priv = ddev->dev_private;
+	SDE_EVT32(0);
+
+	/* acquire modeset lock(s) */
+	drm_modeset_lock_all(ddev);
+	ctx = ddev->mode_config.acquire_ctx;
+
+	/* save current state for resume */
+	if (priv->suspend_state)
+		drm_atomic_state_free(priv->suspend_state);
+	priv->suspend_state = drm_atomic_helper_duplicate_state(ddev, ctx);
+	if (IS_ERR_OR_NULL(priv->suspend_state)) {
+		DRM_ERROR("failed to back up suspend state\n");
+		priv->suspend_state = NULL;
+		goto unlock;
+	}
+
+	/* create atomic state to disable all CRTCs */
+	state = drm_atomic_state_alloc(ddev);
+	if (IS_ERR_OR_NULL(state)) {
+		DRM_ERROR("failed to allocate crtc disable state\n");
+		goto unlock;
+	}
+
+	state->acquire_ctx = ctx;
+	drm_for_each_connector(conn, ddev) {
+
+		if (!conn->state || !conn->state->crtc ||
+				conn->dpms != DRM_MODE_DPMS_ON)
+			continue;
+
+		/* force CRTC to be inactive */
+		crtc_state = drm_atomic_get_crtc_state(state,
+				conn->state->crtc);
+		if (IS_ERR_OR_NULL(crtc_state)) {
+			DRM_ERROR("failed to get crtc %d state\n",
+					conn->state->crtc->base.id);
+			drm_atomic_state_free(state);
+			goto unlock;
+		}
+		crtc_state->active = false;
+	}
+
+	/* commit the "disable all" state */
+	ret = drm_atomic_commit(state);
+	if (ret < 0) {
+		DRM_ERROR("failed to disable crtcs, %d\n", ret);
+		drm_atomic_state_free(state);
+	}
 
+unlock:
+	drm_modeset_unlock_all(ddev);
+
+	/* disable hot-plug polling */
 	drm_kms_helper_poll_disable(ddev);
 
 	return 0;
@@ -1020,8 +1970,38 @@
 
 static int msm_pm_resume(struct device *dev)
 {
-	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct drm_device *ddev;
+	struct msm_drm_private *priv;
+	int ret;
+
+	if (!dev)
+		return -EINVAL;
+
+	ddev = dev_get_drvdata(dev);
+	if (!ddev || !ddev->dev_private)
+		return -EINVAL;
+
+	priv = ddev->dev_private;
+
+	SDE_EVT32(priv->suspend_state != NULL);
 
+	drm_mode_config_reset(ddev);
+
+	drm_modeset_lock_all(ddev);
+
+	if (priv->suspend_state) {
+		priv->suspend_state->acquire_ctx =
+			ddev->mode_config.acquire_ctx;
+		ret = drm_atomic_commit(priv->suspend_state);
+		if (ret < 0) {
+			DRM_ERROR("failed to restore state, %d\n", ret);
+			drm_atomic_state_free(priv->suspend_state);
+		}
+		priv->suspend_state = NULL;
+	}
+	drm_modeset_unlock_all(ddev);
+
+	/* enable hot-plug polling */
 	drm_kms_helper_poll_enable(ddev);
 
 	return 0;
@@ -1032,6 +2012,27 @@
 	SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
 };
 
+static int msm_drm_bind(struct device *dev)
+{
+	int ret;
+
+	ret = drm_platform_init(&msm_driver, to_platform_device(dev));
+	if (ret)
+		DRM_ERROR("drm_platform_init failed: %d\n", ret);
+
+	return ret;
+}
+
+static void msm_drm_unbind(struct device *dev)
+{
+	drm_put_dev(platform_get_drvdata(to_platform_device(dev)));
+}
+
+static const struct component_master_ops msm_drm_ops = {
+	.bind = msm_drm_bind,
+	.unbind = msm_drm_unbind,
+};
+
 /*
  * Componentized driver support:
  */
@@ -1063,27 +2064,31 @@
 
 	return 0;
 }
-#else
-static int compare_dev(struct device *dev, void *data)
+
+static int msm_add_master_component(struct device *dev,
+					struct component_match *match)
 {
-	return dev == data;
+	int ret;
+
+	ret = component_master_add_with_match(dev, &msm_drm_ops, match);
+	if (ret)
+		DRM_ERROR("component add match failed: %d\n", ret);
+
+	return ret;
 }
-#endif
 
-static int msm_drm_bind(struct device *dev)
+#else
+static int compare_dev(struct device *dev, void *data)
 {
-	return drm_platform_init(&msm_driver, to_platform_device(dev));
+	return dev == data;
 }
 
-static void msm_drm_unbind(struct device *dev)
+static int msm_add_master_component(struct device *dev,
+					struct component_match *match)
 {
-	drm_put_dev(platform_get_drvdata(to_platform_device(dev)));
+	return 0;
 }
-
-static const struct component_master_ops msm_drm_ops = {
-	.bind = msm_drm_bind,
-	.unbind = msm_drm_unbind,
-};
+#endif
 
 /*
  * Platform driver:
@@ -1091,10 +2096,14 @@
 
 static int msm_pdev_probe(struct platform_device *pdev)
 {
+	int ret;
 	struct component_match *match = NULL;
+
 #ifdef CONFIG_OF
 	add_components(&pdev->dev, &match, "connectors");
+#ifndef CONFIG_QCOM_KGSL
 	add_components(&pdev->dev, &match, "gpus");
+#endif
 #else
 	/* For non-DT case, it kinda sucks.  We don't actually have a way
 	 * to know whether or not we are waiting for certain devices (or if
@@ -1121,15 +2130,22 @@
 		component_match_add(&pdev->dev, &match, compare_dev, dev);
 	}
 #endif
+	/* on all devices that I am aware of, iommu's which cna map
+	 * any address the cpu can see are used:
+	 */
+	ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
+	if (ret)
+		return ret;
+
+	ret = msm_add_master_component(&pdev->dev, match);
 
-	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-	return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
+	return ret;
 }
 
 static int msm_pdev_remove(struct platform_device *pdev)
 {
+	msm_drm_unbind(&pdev->dev);
 	component_master_del(&pdev->dev, &msm_drm_ops);
-
 	return 0;
 }
 
@@ -1138,9 +2154,31 @@
 	{ }
 };
 
+static void msm_pdev_shutdown(struct platform_device *pdev)
+{
+	struct drm_device *ddev = platform_get_drvdata(pdev);
+	struct msm_drm_private *priv = NULL;
+
+	if (!ddev) {
+		DRM_ERROR("invalid drm device node\n");
+		return;
+	}
+
+	priv = ddev->dev_private;
+	if (!priv) {
+		DRM_ERROR("invalid msm drm private node\n");
+		return;
+	}
+
+	msm_lastclose(ddev);
+
+	/* set this after lastclose to allow kickoff from lastclose */
+	priv->shutdown_in_progress = true;
+}
+
 static const struct of_device_id dt_match[] = {
 	{ .compatible = "qcom,mdp" },      /* mdp4 */
-	{ .compatible = "qcom,mdss_mdp" }, /* mdp5 */
+	{ .compatible = "qcom,sde-kms" },  /* sde  */
 	{}
 };
 MODULE_DEVICE_TABLE(of, dt_match);
@@ -1148,17 +2186,30 @@
 static struct platform_driver msm_platform_driver = {
 	.probe      = msm_pdev_probe,
 	.remove     = msm_pdev_remove,
+	.shutdown   = msm_pdev_shutdown,
 	.driver     = {
-		.name   = "msm",
+		.name   = "msm_drm",
 		.of_match_table = dt_match,
 		.pm     = &msm_pm_ops,
+		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
 	},
 	.id_table   = msm_id,
 };
 
+#ifdef CONFIG_QCOM_KGSL
+void __init adreno_register(void)
+{
+}
+
+void __exit adreno_unregister(void)
+{
+}
+#endif
+
 static int __init msm_drm_register(void)
 {
 	DBG("init");
+	msm_smmu_driver_init();
 	msm_dsi_register();
 	msm_edp_register();
 	hdmi_register();
@@ -1174,6 +2225,7 @@
 	adreno_unregister();
 	msm_edp_unregister();
 	msm_dsi_unregister();
+	msm_smmu_driver_cleanup();
 }
 
 module_init(msm_drm_register);
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/msm_drv.h linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_drv.h
--- linux-4.4.115/drivers/gpu/drm/msm/msm_drv.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_drv.h	2019-10-29 09:26:23.633203080 +0100
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -31,7 +32,11 @@
 #include <linux/iommu.h>
 #include <linux/types.h>
 #include <linux/of_graph.h>
+#include <linux/of_device.h>
+#include <linux/sde_io_util.h>
+#include <linux/hashtable.h>
 #include <asm/sizes.h>
+#include <linux/kthread.h>
 
 #ifndef CONFIG_OF
 #include <mach/board.h>
@@ -48,40 +53,231 @@
 #include <drm/msm_drm.h>
 #include <drm/drm_gem.h>
 
+#include "sde_power_handle.h"
+
+#define GET_MAJOR_REV(rev)		((rev) >> 28)
+#define GET_MINOR_REV(rev)		(((rev) >> 16) & 0xFFF)
+#define GET_STEP_REV(rev)		((rev) & 0xFFFF)
+
 struct msm_kms;
 struct msm_gpu;
 struct msm_mmu;
 struct msm_rd_state;
 struct msm_perf_state;
 struct msm_gem_submit;
+struct msm_gem_address_space;
+struct msm_gem_vma;
 
-#define NUM_DOMAINS 2    /* one for KMS, then one per gpu core (?) */
+#define NUM_DOMAINS    4    /* one for KMS, then one per gpu core (?) */
+#define MAX_CRTCS      8
+#define MAX_PLANES     12
+#define MAX_ENCODERS   8
+#define MAX_BRIDGES    8
+#define MAX_CONNECTORS 8
 
 struct msm_file_private {
-	/* currently we don't do anything useful with this.. but when
-	 * per-context address spaces are supported we'd keep track of
-	 * the context's page-tables here.
-	 */
-	int dummy;
+	struct msm_gem_address_space *aspace;
+	struct list_head counters;
+	rwlock_t queuelock;
+	struct list_head submitqueues;
+	int queueid;
 };
 
 enum msm_mdp_plane_property {
-	PLANE_PROP_ZPOS,
+	/* blob properties, always put these first */
+	PLANE_PROP_SCALER_V1,
+	PLANE_PROP_SCALER_V2,
+	PLANE_PROP_CSC_V1,
+	PLANE_PROP_INFO,
+	PLANE_PROP_SCALER_LUT_ED,
+	PLANE_PROP_SCALER_LUT_CIR,
+	PLANE_PROP_SCALER_LUT_SEP,
+	PLANE_PROP_SKIN_COLOR,
+	PLANE_PROP_SKY_COLOR,
+	PLANE_PROP_FOLIAGE_COLOR,
+
+	/* # of blob properties */
+	PLANE_PROP_BLOBCOUNT,
+
+	/* range properties */
+	PLANE_PROP_ZPOS = PLANE_PROP_BLOBCOUNT,
 	PLANE_PROP_ALPHA,
-	PLANE_PROP_PREMULTIPLIED,
-	PLANE_PROP_MAX_NUM
+	PLANE_PROP_COLOR_FILL,
+	PLANE_PROP_H_DECIMATE,
+	PLANE_PROP_V_DECIMATE,
+	PLANE_PROP_INPUT_FENCE,
+	PLANE_PROP_HUE_ADJUST,
+	PLANE_PROP_SATURATION_ADJUST,
+	PLANE_PROP_VALUE_ADJUST,
+	PLANE_PROP_CONTRAST_ADJUST,
+
+	/* enum/bitmask properties */
+	PLANE_PROP_ROTATION,
+	PLANE_PROP_BLEND_OP,
+	PLANE_PROP_SRC_CONFIG,
+	PLANE_PROP_FB_TRANSLATION_MODE,
+
+	/* total # of properties */
+	PLANE_PROP_COUNT
+};
+
+enum msm_mdp_crtc_property {
+	CRTC_PROP_INFO,
+
+	/* # of blob properties */
+	CRTC_PROP_BLOBCOUNT,
+
+	/* range properties */
+	CRTC_PROP_INPUT_FENCE_TIMEOUT = CRTC_PROP_BLOBCOUNT,
+	CRTC_PROP_OUTPUT_FENCE,
+	CRTC_PROP_OUTPUT_FENCE_OFFSET,
+	CRTC_PROP_CORE_CLK,
+	CRTC_PROP_CORE_AB,
+	CRTC_PROP_CORE_IB,
+	CRTC_PROP_SECURITY_LEVEL,
+
+	/* total # of properties */
+	CRTC_PROP_COUNT
+};
+
+enum msm_mdp_conn_property {
+	/* blob properties, always put these first */
+	CONNECTOR_PROP_SDE_INFO,
+	CONNECTOR_PROP_HDR_INFO,
+	CONNECTOR_PROP_HDR_CONTROL,
+
+	/* # of blob properties */
+	CONNECTOR_PROP_BLOBCOUNT,
+
+	/* range properties */
+	CONNECTOR_PROP_OUT_FB = CONNECTOR_PROP_BLOBCOUNT,
+	CONNECTOR_PROP_RETIRE_FENCE,
+	CONNECTOR_PROP_DST_X,
+	CONNECTOR_PROP_DST_Y,
+	CONNECTOR_PROP_DST_W,
+	CONNECTOR_PROP_DST_H,
+	CONNECTOR_PROP_PLL_DELTA,
+	CONNECTOR_PROP_PLL_ENABLE,
+	CONNECTOR_PROP_HDCP_VERSION,
+
+	/* enum/bitmask properties */
+	CONNECTOR_PROP_TOPOLOGY_NAME,
+	CONNECTOR_PROP_TOPOLOGY_CONTROL,
+	CONNECTOR_PROP_LP,
+	CONNECTOR_PROP_HPD_OFF,
+
+	/* total # of properties */
+	CONNECTOR_PROP_COUNT
 };
 
 struct msm_vblank_ctrl {
-	struct work_struct work;
+	struct kthread_work work;
 	struct list_head event_list;
 	spinlock_t lock;
 };
 
+#define MAX_H_TILES_PER_DISPLAY 2
+
+/**
+ * enum msm_display_compression - compression method used for pixel stream
+ * @MSM_DISPLAY_COMPRESS_NONE:     Pixel data is not compressed
+ * @MSM_DISPLAY_COMPRESS_DSC:      DSC compresison is used
+ * @MSM_DISPLAY_COMPRESS_FBC:      FBC compression is used
+ */
+enum msm_display_compression {
+	MSM_DISPLAY_COMPRESS_NONE,
+	MSM_DISPLAY_COMPRESS_DSC,
+	MSM_DISPLAY_COMPRESS_FBC,
+};
+
+/**
+ * enum msm_display_caps - features/capabilities supported by displays
+ * @MSM_DISPLAY_CAP_VID_MODE:           Video or "active" mode supported
+ * @MSM_DISPLAY_CAP_CMD_MODE:           Command mode supported
+ * @MSM_DISPLAY_CAP_HOT_PLUG:           Hot plug detection supported
+ * @MSM_DISPLAY_CAP_EDID:               EDID supported
+ */
+enum msm_display_caps {
+	MSM_DISPLAY_CAP_VID_MODE	= BIT(0),
+	MSM_DISPLAY_CAP_CMD_MODE	= BIT(1),
+	MSM_DISPLAY_CAP_HOT_PLUG	= BIT(2),
+	MSM_DISPLAY_CAP_EDID		= BIT(3),
+};
+
+/**
+ * struct msm_display_info - defines display properties
+ * @intf_type:          DRM_MODE_CONNECTOR_ display type
+ * @capabilities:       Bitmask of display flags
+ * @num_of_h_tiles:     Number of horizontal tiles in case of split interface
+ * @h_tile_instance:    Controller instance used per tile. Number of elements is
+ *                      based on num_of_h_tiles
+ * @is_connected:       Set to true if display is connected
+ * @width_mm:           Physical width
+ * @height_mm:          Physical height
+ * @max_width:          Max width of display. In case of hot pluggable display
+ *                      this is max width supported by controller
+ * @max_height:         Max height of display. In case of hot pluggable display
+ *                      this is max height supported by controller
+ * @compression:        Compression supported by the display
+ */
+struct msm_display_info {
+	int intf_type;
+	uint32_t capabilities;
+
+	uint32_t num_of_h_tiles;
+	uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
+
+	bool is_connected;
+
+	unsigned int width_mm;
+	unsigned int height_mm;
+
+	uint32_t max_width;
+	uint32_t max_height;
+
+	enum msm_display_compression compression;
+};
+
+/**
+ * struct - msm_display_kickoff_params - info for display features at kickoff
+ * @hdr_ctrl: HDR control info passed from userspace
+ */
+struct msm_display_kickoff_params {
+	struct drm_msm_ext_panel_hdr_ctrl *hdr_ctrl;
+};
+
+/**
+ * struct msm_drm_event - defines custom event notification struct
+ * @base: base object required for event notification by DRM framework.
+ * @event: event object required for event notification by DRM framework.
+ * @info: contains information of DRM object for which events has been
+ *        requested.
+ * @data: memory location which contains response payload for event.
+ */
+struct msm_drm_event {
+	struct drm_pending_event base;
+	struct drm_event event;
+	struct drm_msm_event_req info;
+	u8 data[];
+};
+
+/* Commit thread specific structure */
+struct msm_drm_commit {
+	struct drm_device *dev;
+	struct task_struct *thread;
+	unsigned int crtc_id;
+	struct kthread_worker worker;
+};
+
+#define MSM_GPU_MAX_RINGS 4
+
 struct msm_drm_private {
 
 	struct msm_kms *kms;
 
+	struct sde_power_handle phandle;
+	struct sde_power_client *pclient;
+
 	/* subordinate devices, if present: */
 	struct platform_device *gpu_pdev;
 
@@ -101,11 +297,11 @@
 
 	/* when we have more than one 'msm_gpu' these need to be an array: */
 	struct msm_gpu *gpu;
-	struct msm_file_private *lastctx;
 
 	struct drm_fb_helper *fbdev;
 
-	uint32_t next_fence, completed_fence;
+	uint32_t completed_fence[MSM_GPU_MAX_RINGS];
+
 	wait_queue_head_t fence_event;
 
 	struct msm_rd_state *rd;
@@ -123,27 +319,43 @@
 	uint32_t pending_crtcs;
 	wait_queue_head_t pending_crtcs_event;
 
-	/* registered MMUs: */
-	unsigned int num_mmus;
-	struct msm_mmu *mmus[NUM_DOMAINS];
+	/* Registered address spaces.. currently this is fixed per # of
+	 * iommu's.  Ie. one for display block and one for gpu block.
+	 * Eventually, to do per-process gpu pagetables, we'll want one
+	 * of these per-process.
+	 */
+	unsigned int num_aspaces;
+	struct msm_gem_address_space *aspace[NUM_DOMAINS];
 
 	unsigned int num_planes;
-	struct drm_plane *planes[8];
+	struct drm_plane *planes[MAX_PLANES];
 
 	unsigned int num_crtcs;
-	struct drm_crtc *crtcs[8];
+	struct drm_crtc *crtcs[MAX_CRTCS];
+
+	struct msm_drm_commit disp_thread[MAX_CRTCS];
 
 	unsigned int num_encoders;
-	struct drm_encoder *encoders[8];
+	struct drm_encoder *encoders[MAX_ENCODERS];
 
 	unsigned int num_bridges;
-	struct drm_bridge *bridges[8];
+	struct drm_bridge *bridges[MAX_BRIDGES];
 
 	unsigned int num_connectors;
-	struct drm_connector *connectors[8];
+	struct drm_connector *connectors[MAX_CONNECTORS];
+
+	/* hash to store mm_struct to msm_mmu_notifier mappings */
+	DECLARE_HASHTABLE(mn_hash, 7);
+	/* protects mn_hash and the msm_mmu_notifier for the process */
+	struct mutex mn_lock;
 
 	/* Properties */
-	struct drm_property *plane_property[PLANE_PROP_MAX_NUM];
+	struct drm_property *plane_property[PLANE_PROP_COUNT];
+	struct drm_property *crtc_property[CRTC_PROP_COUNT];
+	struct drm_property *conn_property[CONNECTOR_PROP_COUNT];
+
+	/* Color processing properties for the crtc */
+	struct drm_property **cp_property;
 
 	/* VRAM carveout, used when no IOMMU: */
 	struct {
@@ -153,15 +365,50 @@
 		 * and position mm_node->start is in # of pages:
 		 */
 		struct drm_mm mm;
+		spinlock_t lock; /* Protects drm_mm node allocation/removal */
 	} vram;
 
 	struct msm_vblank_ctrl vblank_ctrl;
+
+	/* saved atomic state during system suspend */
+	struct drm_atomic_state *suspend_state;
+
+	/* list of clients waiting for events */
+	struct list_head client_event_list;
+
+	/* update the flag when msm driver receives shutdown notification */
+	bool shutdown_in_progress;
 };
 
 struct msm_format {
 	uint32_t pixel_format;
 };
 
+/*
+ * Some GPU targets can support multiple ringbuffers and preempt between them.
+ * In order to do this without massive API changes we will steal two bits from
+ * the top of the fence and use them to identify the ringbuffer, (0x00000001 for
+ * riug 0, 0x40000001 for ring 1, 0x50000001 for ring 2, etc). If you are going
+ * to do a fence comparision you have to make sure you are only comparing
+ * against fences from the same ring, but since fences within a ringbuffer are
+ * still contigious you can still use straight comparisons (i.e 0x40000001 is
+ * older than 0x40000002). Mathmatically there will be 0x3FFFFFFF timestamps
+ * per ring or ~103 days of 120 interrupts per second (two interrupts per frame
+ * at 60 FPS).
+ */
+#define FENCE_RING(_fence) ((_fence >> 30) & 3)
+#define FENCE(_ring, _fence) ((((_ring) & 3) << 30) | ((_fence) & 0x3FFFFFFF))
+
+static inline bool COMPARE_FENCE_LTE(uint32_t a, uint32_t b)
+{
+	return ((FENCE_RING(a) == FENCE_RING(b)) && a <= b);
+}
+
+static inline bool COMPARE_FENCE_LT(uint32_t a, uint32_t b)
+{
+	return ((FENCE_RING(a) == FENCE_RING(b)) && a < b);
+}
+
 /* callback from wq once fence has passed: */
 struct msm_fence_cb {
 	struct work_struct work;
@@ -176,19 +423,52 @@
 		(_cb)->func = _func;                         \
 	} while (0)
 
-int msm_atomic_check(struct drm_device *dev,
-		     struct drm_atomic_state *state);
+static inline bool msm_is_suspend_state(struct drm_device *dev)
+{
+	if (!dev || !dev->dev_private)
+		return false;
+
+	return ((struct msm_drm_private *)dev->dev_private)->suspend_state !=
+		NULL;
+}
+
 int msm_atomic_commit(struct drm_device *dev,
 		struct drm_atomic_state *state, bool async);
 
-int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
-
 int msm_wait_fence(struct drm_device *dev, uint32_t fence,
 		ktime_t *timeout, bool interruptible);
 int msm_queue_fence_cb(struct drm_device *dev,
 		struct msm_fence_cb *cb, uint32_t fence);
 void msm_update_fence(struct drm_device *dev, uint32_t fence);
 
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma, struct sg_table *sgt,
+		void *priv, bool invalidated);
+int msm_gem_map_vma(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma, struct sg_table *sgt,
+		void *priv, unsigned int flags);
+int msm_gem_reserve_iova(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *domain,
+		uint64_t hostptr, uint64_t size);
+void msm_gem_release_iova(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma);
+
+void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
+
+/* For GPU and legacy display */
+struct msm_gem_address_space *
+msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
+		int type, const char *name);
+struct msm_gem_address_space *
+msm_gem_address_space_create_instance(struct msm_mmu *parent, const char *name,
+		uint64_t start, uint64_t end);
+
+/* For SDE  display */
+struct msm_gem_address_space *
+msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
+		const char *name);
+
+void msm_gem_submit_free(struct msm_gem_submit *submit);
 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 		struct drm_file *file);
 
@@ -197,13 +477,14 @@
 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
-int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
-		uint32_t *iova);
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
-uint32_t msm_gem_iova(struct drm_gem_object *obj, int id);
+int msm_gem_get_iova(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace, uint64_t *iova);
+uint64_t msm_gem_iova(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace);
 struct page **msm_gem_get_pages(struct drm_gem_object *obj);
 void msm_gem_put_pages(struct drm_gem_object *obj);
-void msm_gem_put_iova(struct drm_gem_object *obj, int id);
+void msm_gem_put_iova(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace);
 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 		struct drm_mode_create_dumb *args);
 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -217,7 +498,6 @@
 		struct dma_buf_attachment *attach, struct sg_table *sg);
 int msm_gem_prime_pin(struct drm_gem_object *obj);
 void msm_gem_prime_unpin(struct drm_gem_object *obj);
-void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
 void *msm_gem_vaddr(struct drm_gem_object *obj);
 int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
 		struct msm_fence_cb *cb);
@@ -232,12 +512,29 @@
 		uint32_t size, uint32_t flags, uint32_t *handle);
 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
 		uint32_t size, uint32_t flags);
+struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
+		uint32_t size, uint32_t flags);
 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
-		uint32_t size, struct sg_table *sgt);
-
-int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id);
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id);
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane);
+		uint32_t size, struct sg_table *sgt, u32 flags);
+void msm_gem_sync(struct drm_gem_object *obj, u32 op);
+int msm_gem_svm_new_handle(struct drm_device *dev, struct drm_file *file,
+		uint64_t hostptr, uint64_t size,
+		uint32_t flags, uint32_t *handle);
+struct drm_gem_object *msm_gem_svm_new(struct drm_device *dev,
+		struct drm_file *file, uint64_t hostptr,
+		uint64_t size, uint32_t flags);
+void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
+		uint32_t flags, struct msm_gem_address_space *aspace,
+		struct drm_gem_object **bo, uint64_t *iova);
+void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
+		uint32_t flags, struct msm_gem_address_space *aspace,
+		struct drm_gem_object **bo, uint64_t *iova);
+int msm_framebuffer_prepare(struct drm_framebuffer *fb,
+		struct msm_gem_address_space *aspace);
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
+		struct msm_gem_address_space *aspace);
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
+		struct msm_gem_address_space *aspace, int plane);
 struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
 const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
 struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
@@ -247,6 +544,19 @@
 
 struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
 
+struct msm_gpu_submitqueue;
+int msm_submitqueue_init(struct msm_file_private *ctx);
+struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
+		u32 id);
+int msm_submitqueue_create(struct msm_file_private *ctx, u32 prio,
+		u32 flags, u32 *id);
+int msm_submitqueue_query(struct msm_file_private *ctx, u32 id, u32 param,
+		void __user *data, u32 len);
+int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
+void msm_submitqueue_close(struct msm_file_private *ctx);
+
+void msm_submitqueue_destroy(struct kref *kref);
+
 struct hdmi;
 int hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
 		struct drm_encoder *encoder);
@@ -265,6 +575,15 @@
 	MSM_DSI_CMD_ENCODER_ID = 1,
 	MSM_DSI_ENCODER_NUM = 2
 };
+
+/* *
+ * msm_send_crtc_notification - notify user-space clients of crtc events.
+ * @crtc: crtc that is generating the event.
+ * @event: event that needs to be notified.
+ * @payload: payload for the event.
+ */
+void msm_send_crtc_notification(struct drm_crtc *crtc,
+		struct drm_event *event, u8 *payload);
 #ifdef CONFIG_DRM_MSM_DSI
 void __init msm_dsi_register(void);
 void __exit msm_dsi_unregister(void);
@@ -302,6 +621,7 @@
 
 void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
 		const char *dbgname);
+void msm_iounmap(struct platform_device *dev, void __iomem *addr);
 void msm_writel(u32 data, void __iomem *addr);
 u32 msm_readl(const void __iomem *addr);
 
@@ -311,7 +631,8 @@
 static inline bool fence_completed(struct drm_device *dev, uint32_t fence)
 {
 	struct msm_drm_private *priv = dev->dev_private;
-	return priv->completed_fence >= fence;
+
+	return priv->completed_fence[FENCE_RING(fence)] >= fence;
 }
 
 static inline int align_pitch(int width, int bpp)
@@ -332,5 +653,4 @@
 /* for conditionally setting boolean flag(s): */
 #define COND(bool, val) ((bool) ? (val) : 0)
 
-
 #endif /* __MSM_DRV_H__ */
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/msm_fb.c linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_fb.c
--- linux-4.4.115/drivers/gpu/drm/msm/msm_fb.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_fb.c	2019-01-22 16:16:23.507246443 +0100
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -33,17 +34,33 @@
 		struct drm_file *file_priv,
 		unsigned int *handle)
 {
-	struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+	struct msm_framebuffer *msm_fb;
+
+	if (!fb) {
+		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+		return -EINVAL;
+	}
+
+	msm_fb = to_msm_framebuffer(fb);
+
 	return drm_gem_handle_create(file_priv,
 			msm_fb->planes[0], handle);
 }
 
 static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
 {
-	struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
-	int i, n = drm_format_num_planes(fb->pixel_format);
+	struct msm_framebuffer *msm_fb;
+	int i, n;
+
+	if (!fb) {
+		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+		return;
+	}
+
+	msm_fb = to_msm_framebuffer(fb);
+	n = drm_format_num_planes(fb->pixel_format);
 
-	DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
+	DBG("destroy: FB ID: %d (%pK)", fb->base.id, fb);
 
 	drm_framebuffer_cleanup(fb);
 
@@ -72,9 +89,16 @@
 #ifdef CONFIG_DEBUG_FS
 void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
 {
-	struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
-	int i, n = drm_format_num_planes(fb->pixel_format);
+	struct msm_framebuffer *msm_fb;
+	int i, n;
 
+	if (!fb) {
+		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+		return;
+	}
+
+	msm_fb = to_msm_framebuffer(fb);
+	n = drm_format_num_planes(fb->pixel_format);
 	seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
 			fb->width, fb->height, (char *)&fb->pixel_format,
 			fb->refcount.refcount.counter, fb->base.id);
@@ -92,15 +116,23 @@
  * should be fine, since only the scanout (mdpN) side of things needs
  * this, the gpu doesn't care about fb's.
  */
-int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
+int msm_framebuffer_prepare(struct drm_framebuffer *fb,
+		struct msm_gem_address_space *aspace)
 {
-	struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
-	int ret, i, n = drm_format_num_planes(fb->pixel_format);
-	uint32_t iova;
+	struct msm_framebuffer *msm_fb;
+	int ret, i, n;
+	uint64_t iova;
 
+	if (!fb) {
+		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+		return -EINVAL;
+	}
+
+	msm_fb = to_msm_framebuffer(fb);
+	n = drm_format_num_planes(fb->pixel_format);
 	for (i = 0; i < n; i++) {
-		ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova);
-		DBG("FB[%u]: iova[%d]: %08x (%d)", fb->base.id, i, iova, ret);
+		ret = msm_gem_get_iova(msm_fb->planes[i], aspace, &iova);
+		DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
 		if (ret)
 			return ret;
 	}
@@ -108,33 +140,62 @@
 	return 0;
 }
 
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id)
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
+		struct msm_gem_address_space *aspace)
 {
-	struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
-	int i, n = drm_format_num_planes(fb->pixel_format);
+	struct msm_framebuffer *msm_fb;
+	int i, n;
+
+	if (fb == NULL) {
+		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+		return;
+	}
+
+	msm_fb = to_msm_framebuffer(fb);
+	n = drm_format_num_planes(fb->pixel_format);
 
 	for (i = 0; i < n; i++)
-		msm_gem_put_iova(msm_fb->planes[i], id);
+		msm_gem_put_iova(msm_fb->planes[i], aspace);
 }
 
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane)
+/* FIXME: Leave this as a uint32_t and just return the lower 32 bits? */
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
+		struct msm_gem_address_space *aspace, int plane)
 {
-	struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+	struct msm_framebuffer *msm_fb;
+	uint64_t iova;
+
+	if (!fb) {
+		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+		return -EINVAL;
+	}
+
+	msm_fb = to_msm_framebuffer(fb);
 	if (!msm_fb->planes[plane])
 		return 0;
-	return msm_gem_iova(msm_fb->planes[plane], id) + fb->offsets[plane];
+
+	iova = msm_gem_iova(msm_fb->planes[plane], aspace) + fb->offsets[plane];
+
+	/* FIXME: Make sure it is < 32 bits */
+	return lower_32_bits(iova);
 }
 
 struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
 {
-	struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+	struct msm_framebuffer *msm_fb;
+
+	if (!fb) {
+		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+		return ERR_PTR(-EINVAL);
+	}
+
+	msm_fb = to_msm_framebuffer(fb);
 	return msm_fb->planes[plane];
 }
 
 const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb)
 {
-	struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
-	return msm_fb->format;
+	return fb ? (to_msm_framebuffer(fb))->format : NULL;
 }
 
 struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
@@ -175,18 +236,20 @@
 	struct msm_framebuffer *msm_fb = NULL;
 	struct drm_framebuffer *fb;
 	const struct msm_format *format;
-	int ret, i, n;
+	int ret, i, num_planes;
 	unsigned int hsub, vsub;
+	bool is_modified = false;
 
-	DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)",
+	DBG("create framebuffer: dev=%pK, mode_cmd=%pK (%dx%d@%4.4s)",
 			dev, mode_cmd, mode_cmd->width, mode_cmd->height,
 			(char *)&mode_cmd->pixel_format);
 
-	n = drm_format_num_planes(mode_cmd->pixel_format);
+	num_planes = drm_format_num_planes(mode_cmd->pixel_format);
 	hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
 	vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
 
-	format = kms->funcs->get_format(kms, mode_cmd->pixel_format);
+	format = kms->funcs->get_format(kms, mode_cmd->pixel_format,
+			mode_cmd->modifier, num_planes);
 	if (!format) {
 		dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
 				(char *)&mode_cmd->pixel_format);
@@ -204,27 +267,53 @@
 
 	msm_fb->format = format;
 
-	if (n > ARRAY_SIZE(msm_fb->planes)) {
+	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
+		for (i = 0; i < ARRAY_SIZE(mode_cmd->modifier); i++) {
+			if (mode_cmd->modifier[i]) {
+				is_modified = true;
+				break;
+			}
+		}
+	}
+
+	if (num_planes > ARRAY_SIZE(msm_fb->planes)) {
 		ret = -EINVAL;
 		goto fail;
 	}
 
-	for (i = 0; i < n; i++) {
+	if (is_modified) {
+		if (!kms->funcs->check_modified_format) {
+			dev_err(dev->dev, "can't check modified fb format\n");
+			ret = -EINVAL;
+			goto fail;
+		} else {
+			ret = kms->funcs->check_modified_format(
+				kms, msm_fb->format, mode_cmd, bos);
+			if (ret)
+				goto fail;
+		}
+	} else {
+		for (i = 0; i < num_planes; i++) {
 		unsigned int width = mode_cmd->width / (i ? hsub : 1);
 		unsigned int height = mode_cmd->height / (i ? vsub : 1);
 		unsigned int min_size;
+			unsigned int cpp;
+
+			cpp = drm_format_plane_cpp(mode_cmd->pixel_format, i);
 
 		min_size = (height - 1) * mode_cmd->pitches[i]
-			 + width * drm_format_plane_cpp(mode_cmd->pixel_format, i)
+				 + width * cpp
 			 + mode_cmd->offsets[i];
 
 		if (bos[i]->size < min_size) {
 			ret = -EINVAL;
 			goto fail;
 		}
+		}
+	}
 
+	for (i = 0; i < num_planes; i++)
 		msm_fb->planes[i] = bos[i];
-	}
 
 	drm_helper_mode_fill_fb_struct(fb, mode_cmd);
 
@@ -234,7 +323,7 @@
 		goto fail;
 	}
 
-	DBG("create: FB ID: %d (%p)", fb->base.id, fb);
+	DBG("create: FB ID: %d (%pK)", fb->base.id, fb);
 
 	return fb;
 
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/msm_gem.c linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_gem.c
--- linux-4.4.115/drivers/gpu/drm/msm/msm_gem.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_gem.c	2019-01-22 16:16:23.507246443 +0100
@@ -18,12 +18,159 @@
 #include <linux/spinlock.h>
 #include <linux/shmem_fs.h>
 #include <linux/dma-buf.h>
+#include <soc/qcom/secure_buffer.h>
 
 #include "msm_drv.h"
 #include "msm_gem.h"
 #include "msm_gpu.h"
 #include "msm_mmu.h"
 
+static void msm_gem_mn_free(struct kref *refcount)
+{
+	struct msm_mmu_notifier *msm_mn = container_of(refcount,
+			struct msm_mmu_notifier, refcount);
+
+	mmu_notifier_unregister(&msm_mn->mn, msm_mn->mm);
+	hash_del(&msm_mn->node);
+
+	kfree(msm_mn);
+}
+
+static int msm_gem_mn_get(struct msm_mmu_notifier *msm_mn)
+{
+	if (msm_mn)
+		return kref_get_unless_zero(&msm_mn->refcount);
+	return 0;
+}
+
+static void msm_gem_mn_put(struct msm_mmu_notifier *msm_mn)
+{
+	if (msm_mn) {
+		struct msm_drm_private *msm_dev = msm_mn->msm_dev;
+
+		mutex_lock(&msm_dev->mn_lock);
+		kref_put(&msm_mn->refcount, msm_gem_mn_free);
+		mutex_unlock(&msm_dev->mn_lock);
+	}
+}
+
+void msm_mn_invalidate_range_start(struct mmu_notifier *mn,
+		struct mm_struct *mm, unsigned long start, unsigned long end);
+
+static const struct mmu_notifier_ops msm_mn_ops = {
+	.invalidate_range_start = msm_mn_invalidate_range_start,
+};
+
+static struct msm_mmu_notifier *
+msm_gem_mn_find(struct msm_drm_private *msm_dev, struct mm_struct *mm,
+		struct msm_gem_address_space *aspace)
+{
+	struct msm_mmu_notifier *msm_mn;
+	int ret = 0;
+
+	mutex_lock(&msm_dev->mn_lock);
+	hash_for_each_possible(msm_dev->mn_hash, msm_mn, node,
+			(unsigned long) mm) {
+		if (msm_mn->mm == mm) {
+			if (!msm_gem_mn_get(msm_mn)) {
+				ret = -EINVAL;
+				goto fail;
+			}
+			mutex_unlock(&msm_dev->mn_lock);
+			return msm_mn;
+		}
+	}
+
+	msm_mn = kzalloc(sizeof(*msm_mn), GFP_KERNEL);
+	if (!msm_mn) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	msm_mn->mm = current->mm;
+	msm_mn->mn.ops = &msm_mn_ops;
+	ret = mmu_notifier_register(&msm_mn->mn, msm_mn->mm);
+	if (ret) {
+		kfree(msm_mn);
+		goto fail;
+	}
+
+	msm_mn->svm_tree = RB_ROOT;
+	spin_lock_init(&msm_mn->svm_tree_lock);
+	kref_init(&msm_mn->refcount);
+	msm_mn->msm_dev = msm_dev;
+
+	/* Insert the msm_mn into the hash */
+	hash_add(msm_dev->mn_hash, &msm_mn->node, (unsigned long) msm_mn->mm);
+	mutex_unlock(&msm_dev->mn_lock);
+
+	return msm_mn;
+
+fail:
+	mutex_unlock(&msm_dev->mn_lock);
+	return ERR_PTR(ret);
+}
+
+static int msm_gem_mn_register(struct msm_gem_svm_object *msm_svm_obj,
+		struct msm_gem_address_space *aspace)
+{
+	struct drm_gem_object *obj = &msm_svm_obj->msm_obj_base.base;
+	struct msm_drm_private *msm_dev = obj->dev->dev_private;
+	struct msm_mmu_notifier *msm_mn;
+
+	msm_svm_obj->mm = current->mm;
+	msm_svm_obj->svm_node.start = msm_svm_obj->hostptr;
+	msm_svm_obj->svm_node.last = msm_svm_obj->hostptr + obj->size - 1;
+
+	msm_mn = msm_gem_mn_find(msm_dev, msm_svm_obj->mm, aspace);
+	if (IS_ERR(msm_mn))
+		return PTR_ERR(msm_mn);
+
+	msm_svm_obj->msm_mn = msm_mn;
+
+	spin_lock(&msm_mn->svm_tree_lock);
+	interval_tree_insert(&msm_svm_obj->svm_node, &msm_mn->svm_tree);
+	spin_unlock(&msm_mn->svm_tree_lock);
+
+	return 0;
+}
+
+static void msm_gem_mn_unregister(struct msm_gem_svm_object *msm_svm_obj)
+{
+	struct msm_mmu_notifier *msm_mn = msm_svm_obj->msm_mn;
+
+	/* invalid: bo already unregistered */
+	if (!msm_mn || msm_svm_obj->invalid)
+		return;
+
+	spin_lock(&msm_mn->svm_tree_lock);
+	interval_tree_remove(&msm_svm_obj->svm_node, &msm_mn->svm_tree);
+	spin_unlock(&msm_mn->svm_tree_lock);
+}
+
+static int protect_pages(struct msm_gem_object *msm_obj)
+{
+	int perm = PERM_READ | PERM_WRITE;
+	int src = VMID_HLOS;
+	int dst = VMID_CP_PIXEL;
+
+	return hyp_assign_table(msm_obj->sgt, &src, 1, &dst, &perm, 1);
+}
+
+static int unprotect_pages(struct msm_gem_object *msm_obj)
+{
+	int perm = PERM_READ | PERM_WRITE | PERM_EXEC;
+	int src = VMID_CP_PIXEL;
+	int dst = VMID_HLOS;
+
+	return hyp_assign_table(msm_obj->sgt, &src, 1, &dst, &perm, 1);
+}
+
+static void *get_dmabuf_ptr(struct drm_gem_object *obj)
+{
+	return (obj && obj->import_attach) ? obj->import_attach->dmabuf : NULL;
+}
+
 static dma_addr_t physaddr(struct drm_gem_object *obj)
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -39,8 +186,7 @@
 }
 
 /* allocate pages from VRAM carveout, used when no IOMMU: */
-static struct page **get_pages_vram(struct drm_gem_object *obj,
-		int npages)
+static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	struct msm_drm_private *priv = obj->dev->dev_private;
@@ -52,8 +198,10 @@
 	if (!p)
 		return ERR_PTR(-ENOMEM);
 
+	spin_lock(&priv->vram.lock);
 	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
 			npages, 0, DRM_MM_SEARCH_DEFAULT);
+	spin_unlock(&priv->vram.lock);
 	if (ret) {
 		drm_free_large(p);
 		return ERR_PTR(ret);
@@ -68,7 +216,6 @@
 	return p;
 }
 
-/* called with dev->struct_mutex held */
 static struct page **get_pages(struct drm_gem_object *obj)
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -89,44 +236,80 @@
 			return p;
 		}
 
+		msm_obj->pages = p;
+
 		msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
 		if (IS_ERR(msm_obj->sgt)) {
-			dev_err(dev->dev, "failed to allocate sgt\n");
-			return ERR_CAST(msm_obj->sgt);
-		}
+			void *ptr = ERR_CAST(msm_obj->sgt);
 
-		msm_obj->pages = p;
+			msm_obj->sgt = NULL;
+			return ptr;
+		}
 
-		/* For non-cached buffers, ensure the new pages are clean
-		 * because display controller, GPU, etc. are not coherent:
+		/*
+		 * Make sure to flush the CPU cache for newly allocated memory
+		 * so we don't get ourselves into trouble with a dirty cache
 		 */
 		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
-			dma_map_sg(dev->dev, msm_obj->sgt->sgl,
+			dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl,
 					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+
+		/* Secure the pages if we need to */
+		if (use_pages(obj) && msm_obj->flags & MSM_BO_SECURE) {
+			int ret = protect_pages(msm_obj);
+
+			if (ret)
+				return ERR_PTR(ret);
+
+			/*
+			 * Set a flag to indicate the pages are locked by us and
+			 * need to be unlocked when the pages get freed
+			 */
+			msm_obj->flags |= MSM_BO_LOCKED;
+		}
 	}
 
 	return msm_obj->pages;
 }
 
+static void put_pages_vram(struct drm_gem_object *obj)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	struct msm_drm_private *priv = obj->dev->dev_private;
+
+	spin_lock(&priv->vram.lock);
+	drm_mm_remove_node(msm_obj->vram_node);
+	spin_unlock(&priv->vram.lock);
+
+	drm_free_large(msm_obj->pages);
+}
+
 static void put_pages(struct drm_gem_object *obj)
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
 	if (msm_obj->pages) {
-		/* For non-cached buffers, ensure the new pages are clean
-		 * because display controller, GPU, etc. are not coherent:
-		 */
-		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
-			dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
-					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+		if (msm_obj->flags & MSM_BO_LOCKED) {
+			unprotect_pages(msm_obj);
+			msm_obj->flags &= ~MSM_BO_LOCKED;
+		}
+
+		if (msm_obj->sgt)
 		sg_free_table(msm_obj->sgt);
 		kfree(msm_obj->sgt);
 
-		if (use_pages(obj))
-			drm_gem_put_pages(obj, msm_obj->pages, true, false);
-		else {
-			drm_mm_remove_node(msm_obj->vram_node);
-			drm_free_large(msm_obj->pages);
+		if (use_pages(obj)) {
+			if (msm_obj->flags & MSM_BO_SVM) {
+				int npages = obj->size >> PAGE_SHIFT;
+
+				release_pages(msm_obj->pages, npages, 0);
+				kfree(msm_obj->pages);
+			} else {
+				drm_gem_put_pages(obj, msm_obj->pages,
+						true, false);
+			}
+		} else {
+			put_pages_vram(obj);
 		}
 
 		msm_obj->pages = NULL;
@@ -135,11 +318,12 @@
 
 struct page **msm_gem_get_pages(struct drm_gem_object *obj)
 {
-	struct drm_device *dev = obj->dev;
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	struct page **p;
-	mutex_lock(&dev->struct_mutex);
+
+	mutex_lock(&msm_obj->lock);
 	p = get_pages(obj);
-	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&msm_obj->lock);
 	return p;
 }
 
@@ -153,6 +337,12 @@
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
+	/* We can't mmap secure objects or SVM objects */
+	if (msm_obj->flags & (MSM_BO_SECURE | MSM_BO_SVM)) {
+		drm_gem_vm_close(vma);
+		return -EACCES;
+	}
+
 	vma->vm_flags &= ~VM_PFNMAP;
 	vma->vm_flags |= VM_MIXEDMAP;
 
@@ -193,16 +383,17 @@
 int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	struct drm_gem_object *obj = vma->vm_private_data;
-	struct drm_device *dev = obj->dev;
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	struct page **pages;
 	unsigned long pfn;
 	pgoff_t pgoff;
 	int ret;
 
-	/* Make sure we don't parallel update on a fault, nor move or remove
-	 * something from beneath our feet
+	/*
+	 * vm_ops.open and close get and put a reference on obj.
+	 * So, we dont need to hold one here.
 	 */
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	ret = mutex_lock_interruptible(&msm_obj->lock);
 	if (ret)
 		goto out;
 
@@ -219,13 +410,13 @@
 
 	pfn = page_to_pfn(pages[pgoff]);
 
-	VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+	VERB("Inserting %pK pfn %lx, pa %lx", vmf->virtual_address,
 			pfn, pfn << PAGE_SHIFT);
 
 	ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
 
 out_unlock:
-	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&msm_obj->lock);
 out:
 	switch (ret) {
 	case -EAGAIN:
@@ -249,9 +440,10 @@
 static uint64_t mmap_offset(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	int ret;
 
-	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+	WARN_ON(!mutex_is_locked(&msm_obj->lock));
 
 	/* Make it mmapable */
 	ret = drm_gem_create_mmap_offset(obj);
@@ -267,85 +459,156 @@
 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
 {
 	uint64_t offset;
-	mutex_lock(&obj->dev->struct_mutex);
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+	mutex_lock(&msm_obj->lock);
 	offset = mmap_offset(obj);
-	mutex_unlock(&obj->dev->struct_mutex);
+	mutex_unlock(&msm_obj->lock);
 	return offset;
 }
 
-/* should be called under struct_mutex.. although it can be called
- * from atomic context without struct_mutex to acquire an extra
- * iova ref if you know one is already held.
- *
- * That means when I do eventually need to add support for unpinning
- * the refcnt counter needs to be atomic_t.
- */
-int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
-		uint32_t *iova)
+static void obj_remove_domain(struct msm_gem_vma *domain)
+{
+	if (domain) {
+		list_del(&domain->list);
+		kfree(domain);
+	}
+}
+
+/* Called with msm_obj->lock locked */
+static void
+put_iova(struct drm_gem_object *obj)
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
-	int ret = 0;
+	struct msm_gem_svm_object *msm_svm_obj;
+	struct msm_gem_vma *domain, *tmp;
+	bool invalid = false;
 
-	if (!msm_obj->domain[id].iova) {
-		struct msm_drm_private *priv = obj->dev->dev_private;
-		struct page **pages = get_pages(obj);
+	WARN_ON(!mutex_is_locked(&msm_obj->lock));
 
-		if (IS_ERR(pages))
-			return PTR_ERR(pages);
+	if (msm_obj->flags & MSM_BO_SVM) {
+		msm_svm_obj = to_msm_svm_obj(msm_obj);
+		invalid = msm_svm_obj->invalid;
+	}
 
+	list_for_each_entry_safe(domain, tmp, &msm_obj->domains, list) {
 		if (iommu_present(&platform_bus_type)) {
-			struct msm_mmu *mmu = priv->mmus[id];
-			uint32_t offset;
-
-			if (WARN_ON(!mmu))
-				return -EINVAL;
+			msm_gem_unmap_vma(domain->aspace, domain,
+				msm_obj->sgt, get_dmabuf_ptr(obj), invalid);
+		}
 
-			offset = (uint32_t)mmap_offset(obj);
-			ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
-					obj->size, IOMMU_READ | IOMMU_WRITE);
-			msm_obj->domain[id].iova = offset;
-		} else {
-			msm_obj->domain[id].iova = physaddr(obj);
+		obj_remove_domain(domain);
 		}
 	}
 
-	if (!ret)
-		*iova = msm_obj->domain[id].iova;
+static struct msm_gem_vma *obj_add_domain(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	struct msm_gem_vma *domain = kzalloc(sizeof(*domain), GFP_KERNEL);
 
-	return ret;
+	if (!domain)
+		return ERR_PTR(-ENOMEM);
+
+	domain->aspace = aspace;
+
+	list_add_tail(&domain->list, &msm_obj->domains);
+
+	return domain;
 }
 
-/* get iova, taking a reference.  Should have a matching put */
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
+static struct msm_gem_vma *obj_get_domain(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace)
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
-	int ret;
+	struct msm_gem_vma *domain;
 
-	/* this is safe right now because we don't unmap until the
-	 * bo is deleted:
-	 */
-	if (msm_obj->domain[id].iova) {
-		*iova = msm_obj->domain[id].iova;
+	list_for_each_entry(domain, &msm_obj->domains, list) {
+		if (domain->aspace == aspace)
+			return domain;
+	}
+
+	return NULL;
+}
+
+#ifndef IOMMU_PRIV
+#define IOMMU_PRIV 0
+#endif
+
+/* A reference to obj must be held before calling this function. */
+int msm_gem_get_iova(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace, uint64_t *iova)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	struct page **pages;
+	struct msm_gem_vma *domain;
+	int ret = 0;
+
+	mutex_lock(&msm_obj->lock);
+
+	if (!iommu_present(&platform_bus_type)) {
+		pages = get_pages(obj);
+
+		if (IS_ERR(pages)) {
+			mutex_unlock(&msm_obj->lock);
+			return PTR_ERR(pages);
+		}
+
+		*iova = (uint64_t) physaddr(obj);
+		mutex_unlock(&msm_obj->lock);
 		return 0;
 	}
 
-	mutex_lock(&obj->dev->struct_mutex);
-	ret = msm_gem_get_iova_locked(obj, id, iova);
-	mutex_unlock(&obj->dev->struct_mutex);
+	domain = obj_get_domain(obj, aspace);
+
+	if (!domain) {
+		domain = obj_add_domain(obj, aspace);
+		if (IS_ERR(domain)) {
+			mutex_unlock(&msm_obj->lock);
+			return PTR_ERR(domain);
+		}
+
+		pages = get_pages(obj);
+		if (IS_ERR(pages)) {
+			obj_remove_domain(domain);
+			mutex_unlock(&msm_obj->lock);
+			return PTR_ERR(pages);
+		}
+
+		ret = msm_gem_map_vma(aspace, domain, msm_obj->sgt,
+			get_dmabuf_ptr(obj), msm_obj->flags);
+	}
+
+	if (!ret)
+		*iova = domain->iova;
+	else
+		obj_remove_domain(domain);
+
+	mutex_unlock(&msm_obj->lock);
 	return ret;
 }
 
 /* get iova without taking a reference, used in places where you have
  * already done a 'msm_gem_get_iova()'.
  */
-uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
+uint64_t msm_gem_iova(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace)
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
-	WARN_ON(!msm_obj->domain[id].iova);
-	return msm_obj->domain[id].iova;
+	struct msm_gem_vma *domain;
+	uint64_t iova;
+
+	mutex_lock(&msm_obj->lock);
+	domain  = obj_get_domain(obj, aspace);
+	WARN_ON(!domain);
+	iova = domain ? domain->iova : 0;
+	mutex_unlock(&msm_obj->lock);
+
+	return iova;
 }
 
-void msm_gem_put_iova(struct drm_gem_object *obj, int id)
+void msm_gem_put_iova(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace)
 {
 	// XXX TODO ..
 	// NOTE: probably don't need a _locked() version.. we wouldn't
@@ -385,27 +648,31 @@
 	return ret;
 }
 
-void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
+void *msm_gem_vaddr(struct drm_gem_object *obj)
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
-	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
-	if (!msm_obj->vaddr) {
+
+	mutex_lock(&msm_obj->lock);
+
+	if (msm_obj->vaddr) {
+		mutex_unlock(&msm_obj->lock);
+		return msm_obj->vaddr;
+	}
+
+	if (obj->import_attach) {
+		msm_obj->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
+	} else {
 		struct page **pages = get_pages(obj);
-		if (IS_ERR(pages))
+		if (IS_ERR(pages)) {
+			mutex_unlock(&msm_obj->lock);
 			return ERR_CAST(pages);
+		}
 		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
 				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
 	}
-	return msm_obj->vaddr;
-}
+	mutex_unlock(&msm_obj->lock);
 
-void *msm_gem_vaddr(struct drm_gem_object *obj)
-{
-	void *ret;
-	mutex_lock(&obj->dev->struct_mutex);
-	ret = msm_gem_vaddr_locked(obj);
-	mutex_unlock(&obj->dev->struct_mutex);
-	return ret;
+	return msm_obj->vaddr;
 }
 
 /* setup callback for when bo is no longer busy..
@@ -474,19 +741,46 @@
 	return 0;
 }
 
+void msm_gem_sync(struct drm_gem_object *obj, u32 op)
+{
+	struct drm_device *dev = obj->dev;
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+	if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+		return;
+
+	switch (op) {
+	case MSM_GEM_SYNC_TO_CPU:
+		dma_sync_sg_for_cpu(dev->dev, msm_obj->sgt->sgl,
+			msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+		break;
+	case MSM_GEM_SYNC_TO_DEV:
+		dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl,
+			msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+		break;
+	}
+}
+
 #ifdef CONFIG_DEBUG_FS
 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 {
 	struct drm_device *dev = obj->dev;
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	struct msm_gem_vma *domain;
 	uint64_t off = drm_vma_node_start(&obj->vma_node);
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-	seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %zu\n",
+	seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %pK\t",
 			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
 			msm_obj->read_fence, msm_obj->write_fence,
 			obj->name, obj->refcount.refcount.counter,
-			off, msm_obj->vaddr, obj->size);
+			off, msm_obj->vaddr);
+
+	/* FIXME: we need to print the address space here too */
+	list_for_each_entry(domain, &msm_obj->domains, list)
+		seq_printf(m, " %08llx", domain->iova);
+
+	seq_puts(m, "\n");
 }
 
 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
@@ -510,29 +804,33 @@
 void msm_gem_free_object(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
-	struct msm_drm_private *priv = obj->dev->dev_private;
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
-	int id;
+	struct msm_gem_svm_object *msm_svm_obj = NULL;
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
 	/* object should not be on active list: */
 	WARN_ON(is_active(msm_obj));
 
+	if (msm_obj->flags & MSM_BO_SVM)
+		msm_svm_obj = to_msm_svm_obj(msm_obj);
+
 	list_del(&msm_obj->mm_list);
 
-	for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
-		struct msm_mmu *mmu = priv->mmus[id];
-		if (mmu && msm_obj->domain[id].iova) {
-			uint32_t offset = msm_obj->domain[id].iova;
-			mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
-		}
+	/* Unregister SVM object from mmu notifications */
+	if (msm_obj->flags & MSM_BO_SVM) {
+		msm_gem_mn_unregister(msm_svm_obj);
+		msm_gem_mn_put(msm_svm_obj->msm_mn);
+		msm_svm_obj->msm_mn = NULL;
 	}
 
+	mutex_lock(&msm_obj->lock);
+	put_iova(obj);
+
 	if (obj->import_attach) {
 		if (msm_obj->vaddr)
-			dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
-
+			dma_buf_vunmap(obj->import_attach->dmabuf,
+				msm_obj->vaddr);
 		/* Don't drop the pages for imported dmabuf, as they are not
 		 * ours, just free the array we allocated:
 		 */
@@ -549,7 +847,11 @@
 		reservation_object_fini(msm_obj->resv);
 
 	drm_gem_object_release(obj);
+	mutex_unlock(&msm_obj->lock);
 
+	if (msm_obj->flags & MSM_BO_SVM)
+		kfree(msm_svm_obj);
+	else
 	kfree(msm_obj);
 }
 
@@ -560,13 +862,28 @@
 	struct drm_gem_object *obj;
 	int ret;
 
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
+	obj = msm_gem_new(dev, size, flags);
+
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	ret = drm_gem_handle_create(file, obj, handle);
+
+	/* drop reference from allocate - handle holds it now */
+	drm_gem_object_unreference_unlocked(obj);
+
 		return ret;
+}
 
-	obj = msm_gem_new(dev, size, flags);
+/* convenience method to construct an SVM buffer object, and userspace handle */
+int msm_gem_svm_new_handle(struct drm_device *dev, struct drm_file *file,
+		uint64_t hostptr, uint64_t size,
+		uint32_t flags, uint32_t *handle)
+{
+	struct drm_gem_object *obj;
+	int ret;
 
-	mutex_unlock(&dev->struct_mutex);
+	obj = msm_gem_svm_new(dev, file, hostptr, size, flags);
 
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
@@ -579,13 +896,11 @@
 	return ret;
 }
 
-static int msm_gem_new_impl(struct drm_device *dev,
+static int msm_gem_obj_init(struct drm_device *dev,
 		uint32_t size, uint32_t flags,
-		struct drm_gem_object **obj)
+		struct msm_gem_object *msm_obj, bool struct_mutex_locked)
 {
 	struct msm_drm_private *priv = dev->dev_private;
-	struct msm_gem_object *msm_obj;
-	unsigned sz;
 	bool use_vram = false;
 
 	switch (flags & MSM_BO_CACHE_MASK) {
@@ -607,43 +922,72 @@
 	if (WARN_ON(use_vram && !priv->vram.size))
 		return -EINVAL;
 
-	sz = sizeof(*msm_obj);
-	if (use_vram)
-		sz += sizeof(struct drm_mm_node);
+	mutex_init(&msm_obj->lock);
 
-	msm_obj = kzalloc(sz, GFP_KERNEL);
-	if (!msm_obj)
-		return -ENOMEM;
+	if (use_vram) {
+		struct msm_gem_vma *domain = obj_add_domain(&msm_obj->base, 0);
 
-	if (use_vram)
-		msm_obj->vram_node = (void *)&msm_obj[1];
+		if (!IS_ERR(domain))
+			msm_obj->vram_node = &domain->node;
+	}
 
 	msm_obj->flags = flags;
 
 	msm_obj->resv = &msm_obj->_resv;
 	reservation_object_init(msm_obj->resv);
 
+	INIT_LIST_HEAD(&msm_obj->mm_list);
 	INIT_LIST_HEAD(&msm_obj->submit_entry);
-	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+	INIT_LIST_HEAD(&msm_obj->domains);
 
-	*obj = &msm_obj->base;
+	if (struct_mutex_locked) {
+		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+	} else {
+		mutex_lock(&dev->struct_mutex);
+		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+		mutex_unlock(&dev->struct_mutex);
+	}
 
 	return 0;
 }
 
-struct drm_gem_object *msm_gem_new(struct drm_device *dev,
-		uint32_t size, uint32_t flags)
+static struct drm_gem_object *msm_gem_new_impl(struct drm_device *dev,
+		uint32_t size, uint32_t flags, bool struct_mutex_locked)
 {
-	struct drm_gem_object *obj = NULL;
+	struct msm_gem_object *msm_obj;
 	int ret;
 
-	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
+	if (!msm_obj)
+		return ERR_PTR(-ENOMEM);
+
+	ret = msm_gem_obj_init(dev, size, flags, msm_obj, struct_mutex_locked);
+	if (ret) {
+		kfree(msm_obj);
+		return ERR_PTR(ret);
+	}
+
+	return &msm_obj->base;
+}
+
+static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
+		uint32_t size, uint32_t flags, bool struct_mutex_locked)
+{
+	struct drm_gem_object *obj;
+	int ret;
 
 	size = PAGE_ALIGN(size);
 
-	ret = msm_gem_new_impl(dev, size, flags, &obj);
-	if (ret)
-		goto fail;
+	/*
+	 * Disallow zero sized objects as they make the underlying
+	 * infrastructure grumpy
+	 */
+	if (!size)
+		return ERR_PTR(-EINVAL);
+
+	obj = msm_gem_new_impl(dev, size, flags, struct_mutex_locked);
+	if (IS_ERR(obj))
+		return obj;
 
 	if (use_pages(obj)) {
 		ret = drm_gem_object_init(dev, obj, size);
@@ -656,14 +1000,166 @@
 	return obj;
 
 fail:
-	if (obj)
-		drm_gem_object_unreference(obj);
+	drm_gem_object_unreference_unlocked(obj);
+
+	return ERR_PTR(ret);
+}
+
+struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
+		uint32_t size, uint32_t flags)
+{
+	return _msm_gem_new(dev, size, flags, true);
+}
+
+struct drm_gem_object *msm_gem_new(struct drm_device *dev,
+		uint32_t size, uint32_t flags)
+{
+	return _msm_gem_new(dev, size, flags, false);
+}
+
+static struct drm_gem_object *msm_svm_gem_new_impl(struct drm_device *dev,
+		uint32_t size, uint32_t flags)
+{
+	struct msm_gem_svm_object *msm_svm_obj;
+	struct msm_gem_object *msm_obj;
+	int ret;
+
+	msm_svm_obj = kzalloc(sizeof(*msm_svm_obj), GFP_KERNEL);
+	if (!msm_svm_obj)
+		return ERR_PTR(-ENOMEM);
+
+	msm_obj = &msm_svm_obj->msm_obj_base;
+
+	ret = msm_gem_obj_init(dev, size, flags | MSM_BO_SVM, msm_obj, false);
+	if (ret) {
+		kfree(msm_svm_obj);
+		return ERR_PTR(ret);
+	}
+
+	return &msm_obj->base;
+}
+
+/* convenience method to construct an SVM GEM bo, and userspace handle */
+struct drm_gem_object *msm_gem_svm_new(struct drm_device *dev,
+		struct drm_file *file, uint64_t hostptr,
+		uint64_t size, uint32_t flags)
+{
+	struct drm_gem_object *obj;
+	struct msm_file_private *ctx = file->driver_priv;
+	struct msm_gem_address_space *aspace;
+	struct msm_gem_object *msm_obj;
+	struct msm_gem_svm_object *msm_svm_obj;
+	struct msm_gem_vma *domain = NULL;
+	struct page **p;
+	int npages;
+	int num_pinned = 0;
+	int write;
+	int ret;
+
+	if (!ctx)
+		return ERR_PTR(-ENODEV);
+
+	/* if we don't have IOMMU, don't bother pretending we can import: */
+	if (!iommu_present(&platform_bus_type)) {
+		dev_err_once(dev->dev, "cannot import without IOMMU\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* hostptr and size must be page-aligned */
+	if (offset_in_page(hostptr | size))
+		return ERR_PTR(-EINVAL);
+
+	/* Only CPU cached SVM objects are allowed */
+	if ((flags & MSM_BO_CACHE_MASK) != MSM_BO_CACHED)
+		return ERR_PTR(-EINVAL);
+
+	/* Allocate and initialize a new msm_gem_object */
+	obj = msm_svm_gem_new_impl(dev, size, flags);
+	if (IS_ERR(obj))
+		return obj;
+
+	drm_gem_private_object_init(dev, obj, size);
+
+	msm_obj = to_msm_bo(obj);
+	aspace = ctx->aspace;
+	domain = obj_add_domain(&msm_obj->base, aspace);
+	if (IS_ERR(domain)) {
+		drm_gem_object_unreference_unlocked(obj);
+		return ERR_CAST(domain);
+	}
+
+	/* Reserve iova if not already in use, else fail */
+	ret = msm_gem_reserve_iova(aspace, domain, hostptr, size);
+	if (ret) {
+		obj_remove_domain(domain);
+		drm_gem_object_unreference_unlocked(obj);
+		return ERR_PTR(ret);
+	}
+
+	msm_svm_obj = to_msm_svm_obj(msm_obj);
+	msm_svm_obj->hostptr = hostptr;
+	msm_svm_obj->invalid = false;
+
+	ret = msm_gem_mn_register(msm_svm_obj, aspace);
+	if (ret)
+		goto fail;
+
+	/*
+	 * Get physical pages and map into smmu in the ioctl itself.
+	 * The driver handles iova allocation, physical page allocation and
+	 * SMMU map all in one go. If we break this, then we have to maintain
+	 * state to tell if physical pages allocation/map needs to happen.
+	 * For SVM, iova reservation needs to happen in the ioctl itself,
+	 * so do the rest right here as well.
+	 */
+	npages = size >> PAGE_SHIFT;
+	p = kcalloc(npages, sizeof(struct page *), GFP_KERNEL);
+	if (!p) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	write = (msm_obj->flags & MSM_BO_GPU_READONLY) ? 0 : 1;
+	/* This may hold mm->mmap_sem */
+	num_pinned = get_user_pages_fast(hostptr, npages, write, p);
+	if (num_pinned != npages) {
+		ret = -EINVAL;
+		goto free_pages;
+	}
+
+	msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
+	if (IS_ERR(msm_obj->sgt)) {
+		ret = PTR_ERR(msm_obj->sgt);
+		goto free_pages;
+	}
+
+	msm_obj->pages = p;
+
+	ret = aspace->mmu->funcs->map(aspace->mmu, domain->iova,
+			msm_obj->sgt, msm_obj->flags, get_dmabuf_ptr(obj));
+	if (ret)
+		goto free_pages;
+
+	kref_get(&aspace->kref);
+
+	return obj;
+
+free_pages:
+	release_pages(p, num_pinned, 0);
+	kfree(p);
+
+fail:
+	if (domain)
+		msm_gem_release_iova(aspace, domain);
+
+	obj_remove_domain(domain);
+	drm_gem_object_unreference_unlocked(obj);
 
 	return ERR_PTR(ret);
 }
 
 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
-		uint32_t size, struct sg_table *sgt)
+		uint32_t size, struct sg_table *sgt, u32 flags)
 {
 	struct msm_gem_object *msm_obj;
 	struct drm_gem_object *obj;
@@ -677,31 +1173,167 @@
 
 	size = PAGE_ALIGN(size);
 
-	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
-	if (ret)
-		goto fail;
+	obj = msm_gem_new_impl(dev, size, MSM_BO_WC, false);
+	if (IS_ERR(obj))
+		return obj;
 
 	drm_gem_private_object_init(dev, obj, size);
 
 	npages = size / PAGE_SIZE;
 
 	msm_obj = to_msm_bo(obj);
+	mutex_lock(&msm_obj->lock);
 	msm_obj->sgt = sgt;
 	msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
 	if (!msm_obj->pages) {
+		mutex_unlock(&msm_obj->lock);
 		ret = -ENOMEM;
 		goto fail;
 	}
 
-	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
-	if (ret)
+	/* OR the passed in flags */
+	msm_obj->flags |= flags;
+
+	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages,
+			NULL, npages);
+	if (ret) {
+		mutex_unlock(&msm_obj->lock);
 		goto fail;
+	}
+
+	mutex_unlock(&msm_obj->lock);
 
 	return obj;
 
 fail:
-	if (obj)
 		drm_gem_object_unreference_unlocked(obj);
 
 	return ERR_PTR(ret);
 }
+
+/* Timeout in ms, long enough so we are sure the GPU is hung */
+#define SVM_OBJ_WAIT_TIMEOUT 10000
+static void invalidate_svm_object(struct msm_gem_svm_object *msm_svm_obj)
+{
+	struct msm_gem_object *msm_obj = &msm_svm_obj->msm_obj_base;
+	struct drm_device *dev = msm_obj->base.dev;
+	struct msm_gem_vma *domain, *tmp;
+	uint32_t fence;
+	int ret;
+
+	if (is_active(msm_obj)) {
+		ktime_t timeout = ktime_add_ms(ktime_get(),
+				SVM_OBJ_WAIT_TIMEOUT);
+
+		/* Get the most recent fence that touches the object */
+		fence = msm_gem_fence(msm_obj, MSM_PREP_READ | MSM_PREP_WRITE);
+
+		/* Wait for the fence to retire */
+		ret = msm_wait_fence(dev, fence, &timeout, true);
+		if (ret)
+			/* The GPU could be hung! Not much we can do */
+			dev_err(dev->dev, "drm: Error (%d) waiting for svm object: 0x%llx",
+					ret, msm_svm_obj->hostptr);
+	}
+
+	/* GPU is done, unmap object from SMMU */
+	mutex_lock(&msm_obj->lock);
+	list_for_each_entry_safe(domain, tmp, &msm_obj->domains, list) {
+		struct msm_gem_address_space *aspace = domain->aspace;
+
+		if (domain->iova)
+			aspace->mmu->funcs->unmap(aspace->mmu,
+					domain->iova, msm_obj->sgt,
+					get_dmabuf_ptr(&msm_obj->base));
+	}
+	/* Let go of the physical pages */
+	put_pages(&msm_obj->base);
+	mutex_unlock(&msm_obj->lock);
+}
+
+void msm_mn_invalidate_range_start(struct mmu_notifier *mn,
+		struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+	struct msm_mmu_notifier *msm_mn =
+		container_of(mn, struct msm_mmu_notifier, mn);
+	struct interval_tree_node *itn = NULL;
+	struct msm_gem_svm_object *msm_svm_obj;
+	struct drm_gem_object *obj;
+	LIST_HEAD(inv_list);
+
+	if (!msm_gem_mn_get(msm_mn))
+		return;
+
+	spin_lock(&msm_mn->svm_tree_lock);
+	itn = interval_tree_iter_first(&msm_mn->svm_tree, start, end - 1);
+	while (itn) {
+		msm_svm_obj = container_of(itn,
+				struct msm_gem_svm_object, svm_node);
+		obj = &msm_svm_obj->msm_obj_base.base;
+
+		if (kref_get_unless_zero(&obj->refcount))
+			list_add(&msm_svm_obj->lnode, &inv_list);
+
+		itn = interval_tree_iter_next(itn, start, end - 1);
+	}
+	spin_unlock(&msm_mn->svm_tree_lock);
+
+	list_for_each_entry(msm_svm_obj, &inv_list, lnode) {
+		obj = &msm_svm_obj->msm_obj_base.base;
+		/* Unregister SVM object from mmu notifications */
+		msm_gem_mn_unregister(msm_svm_obj);
+		msm_svm_obj->invalid = true;
+		invalidate_svm_object(msm_svm_obj);
+		drm_gem_object_unreference_unlocked(obj);
+	}
+
+	msm_gem_mn_put(msm_mn);
+}
+
+/*
+ * Helper function to consolidate in-kernel buffer allocations that usually need
+ * to allocate a buffer object, iova and a virtual address all in one shot
+ */
+static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
+		uint32_t flags, struct msm_gem_address_space *aspace,
+		struct drm_gem_object **bo, uint64_t *iova, bool locked)
+{
+	void *vaddr;
+	struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
+	int ret;
+
+	if (IS_ERR(obj))
+		return ERR_CAST(obj);
+
+	ret = msm_gem_get_iova(obj, aspace, iova);
+	if (ret) {
+		drm_gem_object_unreference(obj);
+		return ERR_PTR(ret);
+	}
+
+	vaddr = msm_gem_vaddr(obj);
+	if (!vaddr) {
+		msm_gem_put_iova(obj, aspace);
+		drm_gem_object_unreference(obj);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	*bo = obj;
+	return vaddr;
+}
+
+void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
+		uint32_t flags, struct msm_gem_address_space *aspace,
+		struct drm_gem_object **bo, uint64_t *iova)
+{
+	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova,
+		false);
+}
+
+void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
+		uint32_t flags, struct msm_gem_address_space *aspace,
+		struct drm_gem_object **bo, uint64_t *iova)
+{
+	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova,
+		true);
+}
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/msm_gem.h linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_gem.h
--- linux-4.4.115/drivers/gpu/drm/msm/msm_gem.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_gem.h	2019-01-22 16:16:23.507246443 +0100
@@ -18,11 +18,33 @@
 #ifndef __MSM_GEM_H__
 #define __MSM_GEM_H__
 
+#include <linux/kref.h>
 #include <linux/reservation.h>
+#include <linux/mmu_notifier.h>
+#include <linux/interval_tree.h>
 #include "msm_drv.h"
 
 /* Additional internal-use only BO flags: */
 #define MSM_BO_STOLEN        0x10000000    /* try to use stolen/splash memory */
+#define MSM_BO_LOCKED        0x20000000    /* Pages have been securely locked */
+#define MSM_BO_SVM           0x40000000    /* bo is SVM */
+
+struct msm_gem_address_space {
+	const char *name;
+	struct msm_mmu *mmu;
+	struct kref kref;
+	struct drm_mm mm;
+	spinlock_t lock; /* Protects drm_mm node allocation/removal */
+	u64 va_len;
+};
+
+struct msm_gem_vma {
+	/* Node used by the GPU address space, but not the SDE address space */
+	struct drm_mm_node node;
+	struct msm_gem_address_space *aspace;
+	uint64_t iova;
+	struct list_head list;
+};
 
 struct msm_gem_object {
 	struct drm_gem_object base;
@@ -52,10 +74,7 @@
 	struct sg_table *sgt;
 	void *vaddr;
 
-	struct {
-		// XXX
-		uint32_t iova;
-	} domain[NUM_DOMAINS];
+	struct list_head domains;
 
 	/* normally (resv == &_resv) except for imported bo's */
 	struct reservation_object *resv;
@@ -65,9 +84,36 @@
 	 * an IOMMU.  Also used for stolen/splashscreen buffer.
 	 */
 	struct drm_mm_node *vram_node;
+	struct mutex lock; /* Protects resources associated with bo */
 };
 #define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
 
+struct msm_mmu_notifier {
+	struct mmu_notifier mn;
+	struct mm_struct *mm; /* mm_struct owning the mmu notifier mn */
+	struct hlist_node node;
+	struct rb_root svm_tree; /* interval tree holding all svm bos */
+	spinlock_t svm_tree_lock; /* Protects svm_tree*/
+	struct msm_drm_private *msm_dev;
+	struct kref refcount;
+};
+
+struct msm_gem_svm_object {
+	struct msm_gem_object msm_obj_base;
+	uint64_t hostptr;
+	struct mm_struct *mm; /* mm_struct the svm bo belongs to */
+	struct interval_tree_node svm_node;
+	struct msm_mmu_notifier *msm_mn;
+	struct list_head lnode;
+	/* bo has been unmapped on CPU, cannot be part of GPU submits */
+	bool invalid;
+};
+
+#define to_msm_svm_obj(x) \
+	((struct msm_gem_svm_object *) \
+	 container_of(x, struct msm_gem_svm_object, msm_obj_base))
+
+
 static inline bool is_active(struct msm_gem_object *msm_obj)
 {
 	return msm_obj->gpu != NULL;
@@ -86,7 +132,8 @@
 	return fence;
 }
 
-#define MAX_CMDS 4
+/* Internal submit flags */
+#define SUBMIT_FLAG_SKIP_HANGCHECK 0x00000001
 
 /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
  * associated with the cmdstream submission for synchronization (and
@@ -95,24 +142,30 @@
  */
 struct msm_gem_submit {
 	struct drm_device *dev;
-	struct msm_gpu *gpu;
-	struct list_head node;   /* node in gpu submit_list */
+	struct msm_gem_address_space *aspace;
+	struct list_head node;   /* node in ring submit list */
 	struct list_head bo_list;
 	struct ww_acquire_ctx ticket;
 	uint32_t fence;
-	bool valid;
+	int ring;
+	u32 flags;
+	uint64_t profile_buf_iova;
+	struct drm_msm_gem_submit_profile_buffer *profile_buf;
+	bool secure;
+	struct msm_gpu_submitqueue *queue;
+	int tick_index;
 	unsigned int nr_cmds;
 	unsigned int nr_bos;
 	struct {
 		uint32_t type;
 		uint32_t size;  /* in dwords */
-		uint32_t iova;
+		uint64_t iova;
 		uint32_t idx;   /* cmdstream buffer idx in bos[] */
-	} cmd[MAX_CMDS];
+	} *cmd;  /* array of size nr_cmds */
 	struct {
 		uint32_t flags;
 		struct msm_gem_object *obj;
-		uint32_t iova;
+		uint64_t iova;
 	} bos[0];
 };
 
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/msm_gem_prime.c linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_gem_prime.c
--- linux-4.4.115/drivers/gpu/drm/msm/msm_gem_prime.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_gem_prime.c	2019-01-22 16:16:23.507246443 +0100
@@ -19,6 +19,7 @@
 #include "msm_gem.h"
 
 #include <linux/dma-buf.h>
+#include <linux/ion.h>
 
 struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
 {
@@ -55,7 +56,16 @@
 struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
 		struct dma_buf_attachment *attach, struct sg_table *sg)
 {
-	return msm_gem_import(dev, attach->dmabuf->size, sg);
+	u32 flags = 0;
+
+	/*
+	 * Check to see if this is a secure buffer by way of Ion and set the
+	 * appropriate flag if so.
+	 */
+	if (ion_dma_buf_is_secure(attach->dmabuf))
+		flags |= MSM_BO_SECURE;
+
+	return msm_gem_import(dev, attach->dmabuf->size, sg, flags);
 }
 
 int msm_gem_prime_pin(struct drm_gem_object *obj)
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/msm_gem_submit.c linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_gem_submit.c
--- linux-4.4.115/drivers/gpu/drm/msm/msm_gem_submit.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_gem_submit.c	2019-10-29 09:26:23.633203080 +0100
@@ -18,6 +18,7 @@
 #include "msm_drv.h"
 #include "msm_gpu.h"
 #include "msm_gem.h"
+#include "msm_trace.h"
 
 /*
  * Cmdstream submission:
@@ -28,16 +29,14 @@
 #define BO_LOCKED   0x4000
 #define BO_PINNED   0x2000
 
-static inline void __user *to_user_ptr(u64 address)
-{
-	return (void __user *)(uintptr_t)address;
-}
-
 static struct msm_gem_submit *submit_create(struct drm_device *dev,
-		struct msm_gpu *gpu, uint32_t nr)
+		struct msm_gem_address_space *aspace,
+		uint32_t nr_bos, uint32_t nr_cmds,
+		struct msm_gpu_submitqueue *queue)
 {
 	struct msm_gem_submit *submit;
-	uint64_t sz = sizeof(*submit) + ((u64)nr * sizeof(submit->bos[0]));
+	uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
+		((u64)nr_cmds * sizeof(submit->cmd[0]));
 
 	if (sz > SIZE_MAX)
 		return NULL;
@@ -45,12 +44,24 @@
 	submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
 	if (submit) {
 		submit->dev = dev;
-		submit->gpu = gpu;
+		submit->aspace = aspace;
+		submit->queue = queue;
 
 		/* initially, until copy_from_user() and bo lookup succeeds: */
 		submit->nr_bos = 0;
 		submit->nr_cmds = 0;
 
+		submit->profile_buf = NULL;
+		submit->profile_buf_iova = 0;
+		submit->cmd = (void *)&submit->bos[nr_bos];
+
+		submit->secure = false;
+
+		/*
+		 * Initalize node so we can safely list_del() on it if
+		 * we fail in the submit path
+		 */
+		INIT_LIST_HEAD(&submit->node);
 		INIT_LIST_HEAD(&submit->bo_list);
 		ww_acquire_init(&submit->ticket, &reservation_ww_class);
 	}
@@ -66,7 +77,18 @@
 	return -EFAULT;
 }
 
-static int submit_lookup_objects(struct msm_gem_submit *submit,
+void msm_gem_submit_free(struct msm_gem_submit *submit)
+{
+	if (!submit)
+		return;
+
+	msm_submitqueue_put(submit->queue);
+	list_del(&submit->node);
+	kfree(submit);
+}
+
+static int submit_lookup_objects(struct msm_gpu *gpu,
+		struct msm_gem_submit *submit,
 		struct drm_msm_gem_submit *args, struct drm_file *file)
 {
 	unsigned i;
@@ -80,15 +102,18 @@
 		struct drm_gem_object *obj;
 		struct msm_gem_object *msm_obj;
 		void __user *userptr =
-			to_user_ptr(args->bos + (i * sizeof(submit_bo)));
+			u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
 
-		ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo));
-		if (unlikely(ret)) {
+		if (copy_from_user_inatomic(&submit_bo, userptr,
+			sizeof(submit_bo))) {
 			pagefault_enable();
 			spin_unlock(&file->table_lock);
-			ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
-			if (ret)
+			if (copy_from_user(&submit_bo, userptr,
+				sizeof(submit_bo))) {
+				ret = -EFAULT;
 				goto out;
+			}
+
 			spin_lock(&file->table_lock);
 			pagefault_disable();
 		}
@@ -116,6 +141,20 @@
 
 		msm_obj = to_msm_bo(obj);
 
+		/*
+		 * If the buffer is marked as secure make sure that we can
+		 * handle secure buffers and then mark the submission as secure
+		 */
+		if (msm_obj->flags & MSM_BO_SECURE) {
+			if (!gpu->secure_aspace) {
+				DRM_ERROR("Cannot handle secure buffers\n");
+				ret = -EINVAL;
+				goto out_unlock;
+			}
+
+			submit->secure = true;
+		}
+
 		if (!list_empty(&msm_obj->submit_entry)) {
 			DRM_ERROR("handle %u at index %u already on submit list\n",
 					submit_bo.handle, i);
@@ -140,12 +179,17 @@
 	return ret;
 }
 
-static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
+static void submit_unlock_unpin_bo(struct msm_gpu *gpu,
+		struct msm_gem_submit *submit, int i)
 {
 	struct msm_gem_object *msm_obj = submit->bos[i].obj;
+	struct msm_gem_address_space *aspace;
+
+	aspace = (msm_obj->flags & MSM_BO_SECURE) ?
+			gpu->secure_aspace : submit->aspace;
 
 	if (submit->bos[i].flags & BO_PINNED)
-		msm_gem_put_iova(&msm_obj->base, submit->gpu->id);
+		msm_gem_put_iova(&msm_obj->base, aspace);
 
 	if (submit->bos[i].flags & BO_LOCKED)
 		ww_mutex_unlock(&msm_obj->resv->lock);
@@ -157,16 +201,14 @@
 }
 
 /* This is where we make sure all the bo's are reserved and pin'd: */
-static int submit_validate_objects(struct msm_gem_submit *submit)
+static int submit_validate_objects(struct msm_gpu *gpu,
+		struct msm_gem_submit *submit)
 {
 	int contended, slow_locked = -1, i, ret = 0;
 
 retry:
-	submit->valid = true;
-
 	for (i = 0; i < submit->nr_bos; i++) {
 		struct msm_gem_object *msm_obj = submit->bos[i].obj;
-		uint32_t iova;
 
 		if (slow_locked == i)
 			slow_locked = -1;
@@ -181,28 +223,17 @@
 			submit->bos[i].flags |= BO_LOCKED;
 		}
 
-
-		/* if locking succeeded, pin bo: */
-		ret = msm_gem_get_iova_locked(&msm_obj->base,
-				submit->gpu->id, &iova);
-
-		/* this would break the logic in the fail path.. there is no
-		 * reason for this to happen, but just to be on the safe side
-		 * let's notice if this starts happening in the future:
+		/*
+		 * An invalid SVM object is part of
+		 * this submit's buffer list, fail.
 		 */
-		WARN_ON(ret == -EDEADLK);
-
-		if (ret)
+		if (msm_obj->flags & MSM_BO_SVM) {
+			struct msm_gem_svm_object *msm_svm_obj =
+				to_msm_svm_obj(msm_obj);
+			if (msm_svm_obj->invalid) {
+				ret = -EINVAL;
 			goto fail;
-
-		submit->bos[i].flags |= BO_PINNED;
-
-		if (iova == submit->bos[i].iova) {
-			submit->bos[i].flags |= BO_VALID;
-		} else {
-			submit->bos[i].iova = iova;
-			submit->bos[i].flags &= ~BO_VALID;
-			submit->valid = false;
+			}
 		}
 	}
 
@@ -212,10 +243,10 @@
 
 fail:
 	for (; i >= 0; i--)
-		submit_unlock_unpin_bo(submit, i);
+		submit_unlock_unpin_bo(gpu, submit, i);
 
 	if (slow_locked > 0)
-		submit_unlock_unpin_bo(submit, slow_locked);
+		submit_unlock_unpin_bo(gpu, submit, slow_locked);
 
 	if (ret == -EDEADLK) {
 		struct msm_gem_object *msm_obj = submit->bos[contended].obj;
@@ -232,9 +263,14 @@
 	return ret;
 }
 
-static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
-		struct msm_gem_object **obj, uint32_t *iova, bool *valid)
+static int submit_bo(struct msm_gpu *gpu,
+		struct msm_gem_submit *submit, uint32_t idx,
+		struct msm_gem_object **obj, uint64_t *iova, bool *valid)
 {
+	struct msm_gem_object *msm_obj;
+	struct msm_gem_address_space *aspace;
+	int ret;
+
 	if (idx >= submit->nr_bos) {
 		DRM_ERROR("invalid buffer index: %u (out of %u)\n",
 				idx, submit->nr_bos);
@@ -243,6 +279,39 @@
 
 	if (obj)
 		*obj = submit->bos[idx].obj;
+
+	/* Only map and pin if the caller needs either the iova or valid */
+	if (!iova && !valid)
+		return 0;
+
+	if (!(submit->bos[idx].flags & BO_PINNED)) {
+		uint64_t buf_iova;
+
+		msm_obj = submit->bos[idx].obj;
+		aspace = (msm_obj->flags & MSM_BO_SECURE) ?
+			gpu->secure_aspace : submit->aspace;
+
+		ret = msm_gem_get_iova(&msm_obj->base, aspace, &buf_iova);
+
+		/* this would break the logic in the fail path.. there is no
+		 * reason for this to happen, but just to be on the safe side
+		 * let's notice if this starts happening in the future:
+		 */
+		WARN_ON(ret == -EDEADLK);
+
+		if (ret)
+			return ret;
+
+		submit->bos[idx].flags |= BO_PINNED;
+
+		if (buf_iova == submit->bos[idx].iova) {
+			submit->bos[idx].flags |= BO_VALID;
+		} else {
+			submit->bos[idx].iova = buf_iova;
+			submit->bos[idx].flags &= ~BO_VALID;
+		}
+	}
+
 	if (iova)
 		*iova = submit->bos[idx].iova;
 	if (valid)
@@ -252,8 +321,10 @@
 }
 
 /* process the reloc's and patch up the cmdstream as needed: */
-static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
-		uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
+static int submit_reloc(struct msm_gpu *gpu,
+		struct msm_gem_submit *submit,
+		struct msm_gem_object *obj, uint32_t offset,
+		uint32_t nr_relocs, uint64_t relocs)
 {
 	uint32_t i, last_offset = 0;
 	uint32_t *ptr;
@@ -264,10 +335,18 @@
 		return -EINVAL;
 	}
 
+	if (obj->flags & MSM_BO_SECURE) {
+		DRM_ERROR("cannot do relocs on a secure buffer\n");
+		return -EINVAL;
+	}
+
+	if (nr_relocs == 0)
+		return 0;
+
 	/* For now, just map the entire thing.  Eventually we probably
 	 * to do it page-by-page, w/ kmap() if not vmap()d..
 	 */
-	ptr = msm_gem_vaddr_locked(&obj->base);
+	ptr = msm_gem_vaddr(&obj->base);
 
 	if (IS_ERR(ptr)) {
 		ret = PTR_ERR(ptr);
@@ -278,12 +357,13 @@
 	for (i = 0; i < nr_relocs; i++) {
 		struct drm_msm_gem_submit_reloc submit_reloc;
 		void __user *userptr =
-			to_user_ptr(relocs + (i * sizeof(submit_reloc)));
-		uint32_t iova, off;
+			u64_to_user_ptr(relocs + (i * sizeof(submit_reloc)));
+		uint64_t iova;
+		uint32_t off;
 		bool valid;
 
-		ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
-		if (ret)
+		if (copy_from_user(&submit_reloc, userptr,
+			sizeof(submit_reloc)))
 			return -EFAULT;
 
 		if (submit_reloc.submit_offset % 4) {
@@ -301,7 +381,8 @@
 			return -EINVAL;
 		}
 
-		ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
+		ret = submit_bo(gpu, submit, submit_reloc.reloc_idx,
+				NULL, &iova, &valid);
 		if (ret)
 			return ret;
 
@@ -323,13 +404,17 @@
 	return 0;
 }
 
-static void submit_cleanup(struct msm_gem_submit *submit, bool fail)
+static void submit_cleanup(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+		bool fail)
 {
 	unsigned i;
 
+	if (!submit)
+		return;
+
 	for (i = 0; i < submit->nr_bos; i++) {
 		struct msm_gem_object *msm_obj = submit->bos[i].obj;
-		submit_unlock_unpin_bo(submit, i);
+		submit_unlock_unpin_bo(gpu, submit, i);
 		list_del_init(&msm_obj->submit_entry);
 		drm_gem_object_unreference(&msm_obj->base);
 	}
@@ -344,6 +429,7 @@
 	struct drm_msm_gem_submit *args = data;
 	struct msm_file_private *ctx = file->driver_priv;
 	struct msm_gem_submit *submit;
+	struct msm_gpu_submitqueue *queue;
 	struct msm_gpu *gpu;
 	unsigned i;
 	int ret;
@@ -351,36 +437,41 @@
 	/* for now, we just have 3d pipe.. eventually this would need to
 	 * be more clever to dispatch to appropriate gpu module:
 	 */
-	if (args->pipe != MSM_PIPE_3D0)
+	if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
 		return -EINVAL;
 
 	gpu = priv->gpu;
+	if (!gpu || !ctx)
+		return -ENXIO;
 
-	if (args->nr_cmds > MAX_CMDS)
-		return -EINVAL;
+	queue = msm_submitqueue_get(ctx, args->queueid);
+	if (!queue)
+		return -ENOENT;
 
 	mutex_lock(&dev->struct_mutex);
 
-	submit = submit_create(dev, gpu, args->nr_bos);
+	submit = submit_create(dev, ctx->aspace, args->nr_bos, args->nr_cmds,
+		queue);
 	if (!submit) {
 		ret = -ENOMEM;
 		goto out;
 	}
 
-	ret = submit_lookup_objects(submit, args, file);
+	ret = submit_lookup_objects(gpu, submit, args, file);
 	if (ret)
 		goto out;
 
-	ret = submit_validate_objects(submit);
+	ret = submit_validate_objects(gpu, submit);
 	if (ret)
 		goto out;
 
 	for (i = 0; i < args->nr_cmds; i++) {
 		struct drm_msm_gem_submit_cmd submit_cmd;
 		void __user *userptr =
-			to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
+			u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
 		struct msm_gem_object *msm_obj;
-		uint32_t iova;
+		uint64_t iova;
+		size_t size;
 
 		ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
 		if (ret) {
@@ -393,6 +484,7 @@
 		case MSM_SUBMIT_CMD_BUF:
 		case MSM_SUBMIT_CMD_IB_TARGET_BUF:
 		case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+		case MSM_SUBMIT_CMD_PROFILE_BUF:
 			break;
 		default:
 			DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
@@ -400,7 +492,7 @@
 			goto out;
 		}
 
-		ret = submit_bo(submit, submit_cmd.submit_idx,
+		ret = submit_bo(gpu, submit, submit_cmd.submit_idx,
 				&msm_obj, &iova, NULL);
 		if (ret)
 			goto out;
@@ -412,9 +504,12 @@
 			goto out;
 		}
 
-		if ((submit_cmd.size + submit_cmd.submit_offset) >=
-				msm_obj->base.size) {
-			DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
+		size = submit_cmd.size + submit_cmd.submit_offset;
+
+		if (!submit_cmd.size || (size < submit_cmd.size) ||
+			(size > msm_obj->base.size)) {
+			DRM_ERROR("invalid cmdstream offset/size: %u/%u\n",
+				submit_cmd.submit_offset, submit_cmd.size);
 			ret = -EINVAL;
 			goto out;
 		}
@@ -424,24 +519,32 @@
 		submit->cmd[i].iova = iova + submit_cmd.submit_offset;
 		submit->cmd[i].idx  = submit_cmd.submit_idx;
 
-		if (submit->valid)
-			continue;
+		if (submit_cmd.type == MSM_SUBMIT_CMD_PROFILE_BUF) {
+			submit->profile_buf_iova = submit->cmd[i].iova;
+			submit->profile_buf = msm_gem_vaddr(&msm_obj->base)
+				+ submit_cmd.submit_offset;
+		}
 
-		ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
-				submit_cmd.nr_relocs, submit_cmd.relocs);
+		ret = submit_reloc(gpu, submit, msm_obj,
+				submit_cmd.submit_offset, submit_cmd.nr_relocs,
+				submit_cmd.relocs);
 		if (ret)
 			goto out;
 	}
 
 	submit->nr_cmds = i;
 
-	ret = msm_gpu_submit(gpu, submit, ctx);
+	/* Clamp the user submitted ring to the range of available rings */
+	submit->ring = clamp_t(uint32_t, queue->prio, 0, gpu->nr_rings - 1);
+
+	ret = msm_gpu_submit(gpu, submit);
 
 	args->fence = submit->fence;
 
 out:
-	if (submit)
-		submit_cleanup(submit, !!ret);
+	submit_cleanup(gpu, submit, !!ret);
+	if (ret)
+		msm_gem_submit_free(submit);
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
 }
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/msm_gpu.c linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_gpu.c
--- linux-4.4.115/drivers/gpu/drm/msm/msm_gpu.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_gpu.c	2019-10-29 09:26:23.633203080 +0100
@@ -2,6 +2,8 @@
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published by
  * the Free Software Foundation.
@@ -15,15 +17,19 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/devfreq.h>
+#include <linux/devfreq_cooling.h>
 #include "msm_gpu.h"
 #include "msm_gem.h"
 #include "msm_mmu.h"
-
+#include "msm_trace.h"
 
 /*
  * Power Management:
  */
 
+#define ACTIVE_POWER_LEVEL(gpu) min_t(unsigned int, 2, gpu->nr_pwrlevels - 3)
+
 #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
 #include <mach/board.h>
 static void bs_init(struct msm_gpu *gpu)
@@ -55,6 +61,138 @@
 static void bs_set(struct msm_gpu *gpu, int idx) {}
 #endif
 
+static int msm_devfreq_target(struct device *dev, unsigned long *freq,
+		u32 flags)
+{
+	struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
+	struct dev_pm_opp *opp;
+
+	opp = devfreq_recommended_opp(dev, freq, flags);
+
+	if (IS_ERR(opp))
+		return PTR_ERR(opp);
+
+	clk_set_rate(gpu->core_clk, *freq);
+
+	return 0;
+}
+
+static int msm_devfreq_get_dev_status(struct device *dev,
+		struct devfreq_dev_status *status)
+{
+	struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
+	uint64_t cycles;
+	ktime_t time;
+	u32 freq;
+
+	status->current_frequency = (unsigned long) clk_get_rate(gpu->core_clk);
+
+	cycles = gpu->funcs->gpu_busy(gpu);
+	freq = ((u32) status->current_frequency) / 1000000;
+	status->busy_time = ((u32) (cycles - gpu->devfreq.busy_cycles)) / freq;
+	gpu->devfreq.busy_cycles = cycles;
+
+	time = ktime_get();
+	status->total_time = ktime_us_delta(time, gpu->devfreq.time);
+	gpu->devfreq.time = time;
+
+	return 0;
+}
+
+static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+	struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
+
+	*freq = clk_get_rate(gpu->core_clk);
+	return 0;
+}
+
+static void msm_devfreq_manage_opp_notifier(struct device *dev,
+	struct notifier_block *nb, bool subscribe)
+{
+	struct srcu_notifier_head *nh;
+
+	rcu_read_lock();
+	nh = dev_pm_opp_get_notifier(dev);
+	if (IS_ERR(nh)) {
+		rcu_read_unlock();
+		return;
+	}
+	rcu_read_unlock();
+
+	if (subscribe)
+		srcu_notifier_chain_register(nh, nb);
+	else
+		srcu_notifier_chain_unregister(nh, nb);
+}
+
+static int msm_opp_notify(struct notifier_block *nb, unsigned long type,
+		void *in_opp)
+{
+	struct msm_gpu *gpu = container_of(nb, struct msm_gpu, nb);
+
+	if (type != OPP_EVENT_ENABLE && type != OPP_EVENT_DISABLE)
+		return -EINVAL;
+
+	/*
+	 * The opp table for the GPU device changed, call update_devfreq()
+	 * to adjust the GPU frequency if needed
+	 */
+	mutex_lock(&gpu->devfreq.devfreq->lock);
+	update_devfreq(gpu->devfreq.devfreq);
+	mutex_unlock(&gpu->devfreq.devfreq->lock);
+
+	return 0;
+}
+
+static struct devfreq_dev_profile msm_devfreq_profile = {
+	.polling_ms = 10,
+	.target = msm_devfreq_target,
+	.get_dev_status = msm_devfreq_get_dev_status,
+	.get_cur_freq = msm_devfreq_get_cur_freq,
+};
+
+static void msm_devfreq_init(struct msm_gpu *gpu)
+{
+	struct msm_drm_private *priv = gpu->dev->dev_private;
+	struct device *dev = &priv->gpu_pdev->dev;
+	unsigned int level = min_t(unsigned int, 2, gpu->nr_pwrlevels - 3);
+
+	/* Don't do devfreq if the GPU doesn't implement statistics gathering */
+	if (!gpu->funcs->gpu_busy)
+		return;
+
+	msm_devfreq_profile.initial_freq = gpu->gpufreq[level];
+	msm_devfreq_profile.freq_table = gpu->gpufreq;
+	msm_devfreq_profile.max_state = gpu->nr_pwrlevels - 1;
+
+	gpu->devfreq.devfreq = devm_devfreq_add_device(dev,
+			&msm_devfreq_profile, "simple_ondemand", NULL);
+
+	if (IS_ERR(gpu->devfreq.devfreq)) {
+		dev_err(dev, "Couldn't initialize GPU devfreq\n");
+		gpu->devfreq.devfreq = NULL;
+		return;
+	}
+
+	gpu->devfreq.cooling_dev = of_devfreq_cooling_register(dev->of_node,
+			gpu->devfreq.devfreq);
+
+	if (IS_ERR(gpu->devfreq.cooling_dev)) {
+		dev_err(dev, "Couldn't register GPU devfreq cooling device\n");
+		gpu->devfreq.cooling_dev = NULL;
+		return;
+	}
+
+	gpu->nb.notifier_call = msm_opp_notify;
+
+	/*
+	 * register for OPP notifcations so we can adjust the
+	 * GPU device power levels appropriately
+	 */
+	msm_devfreq_manage_opp_notifier(dev, &gpu->nb, true);
+}
+
 static int enable_pwrrail(struct msm_gpu *gpu)
 {
 	struct drm_device *dev = gpu->dev;
@@ -90,21 +228,21 @@
 
 static int enable_clk(struct msm_gpu *gpu)
 {
-	struct clk *rate_clk = NULL;
+	unsigned int level = ACTIVE_POWER_LEVEL(gpu);
+	uint32_t rate = gpu->gpufreq[level];
 	int i;
 
-	/* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
-	for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
-		if (gpu->grp_clks[i]) {
-			clk_prepare(gpu->grp_clks[i]);
-			rate_clk = gpu->grp_clks[i];
-		}
-	}
+	if (gpu->core_clk)
+		clk_set_rate(gpu->core_clk, rate);
+
+	if (gpu->rbbmtimer_clk)
+		clk_set_rate(gpu->rbbmtimer_clk, 19200000);
 
-	if (rate_clk && gpu->fast_rate)
-		clk_set_rate(rate_clk, gpu->fast_rate);
+	for (i = gpu->nr_clocks - 1; i >= 0; i--)
+		if (gpu->grp_clks[i])
+			clk_prepare(gpu->grp_clks[i]);
 
-	for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
+	for (i = gpu->nr_clocks - 1; i >= 0; i--)
 		if (gpu->grp_clks[i])
 			clk_enable(gpu->grp_clks[i]);
 
@@ -113,33 +251,34 @@
 
 static int disable_clk(struct msm_gpu *gpu)
 {
-	struct clk *rate_clk = NULL;
+	uint32_t rate = gpu->gpufreq[gpu->nr_pwrlevels - 1];
 	int i;
 
-	/* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
-	for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
-		if (gpu->grp_clks[i]) {
+	for (i = gpu->nr_clocks - 1; i >= 0; i--)
+		if (gpu->grp_clks[i])
 			clk_disable(gpu->grp_clks[i]);
-			rate_clk = gpu->grp_clks[i];
-		}
-	}
 
-	if (rate_clk && gpu->slow_rate)
-		clk_set_rate(rate_clk, gpu->slow_rate);
-
-	for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
+	for (i = gpu->nr_clocks - 1; i >= 0; i--)
 		if (gpu->grp_clks[i])
 			clk_unprepare(gpu->grp_clks[i]);
 
+	if (gpu->core_clk)
+		clk_set_rate(gpu->core_clk, rate);
+
+	if (gpu->rbbmtimer_clk)
+		clk_set_rate(gpu->rbbmtimer_clk, 0);
+
 	return 0;
 }
 
 static int enable_axi(struct msm_gpu *gpu)
 {
+	unsigned int level = ACTIVE_POWER_LEVEL(gpu);
 	if (gpu->ebi1_clk)
 		clk_prepare_enable(gpu->ebi1_clk);
-	if (gpu->bus_freq)
-		bs_set(gpu, gpu->bus_freq);
+
+	if (gpu->busfreq[level])
+		bs_set(gpu, gpu->busfreq[level]);
 	return 0;
 }
 
@@ -147,25 +286,27 @@
 {
 	if (gpu->ebi1_clk)
 		clk_disable_unprepare(gpu->ebi1_clk);
-	if (gpu->bus_freq)
+
+	if (gpu->busfreq[gpu->nr_pwrlevels - 1])
 		bs_set(gpu, 0);
 	return 0;
 }
 
-int msm_gpu_pm_resume(struct msm_gpu *gpu)
+static void msm_devfreq_resume(struct msm_gpu *gpu)
 {
-	struct drm_device *dev = gpu->dev;
-	int ret;
+	if (gpu->devfreq.devfreq) {
+		gpu->devfreq.busy_cycles =  0;
+		gpu->devfreq.time = ktime_get();
 
-	DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
-
-	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+		devfreq_resume_device(gpu->devfreq.devfreq);
+	}
+}
 
-	if (gpu->active_cnt++ > 0)
-		return 0;
+int msm_gpu_pm_resume(struct msm_gpu *gpu)
+{
+	int ret;
 
-	if (WARN_ON(gpu->active_cnt <= 0))
-		return -EINVAL;
+	DBG("%s", gpu->name);
 
 	ret = enable_pwrrail(gpu);
 	if (ret)
@@ -179,23 +320,26 @@
 	if (ret)
 		return ret;
 
+	if (gpu->aspace && gpu->aspace->mmu)
+		msm_mmu_enable(gpu->aspace->mmu);
+
+	gpu->needs_hw_init = true;
+	msm_devfreq_resume(gpu);
+
 	return 0;
 }
 
 int msm_gpu_pm_suspend(struct msm_gpu *gpu)
 {
-	struct drm_device *dev = gpu->dev;
 	int ret;
 
-	DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
+	DBG("%s", gpu->name);
 
-	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+	if (gpu->devfreq.devfreq)
+		devfreq_suspend_device(gpu->devfreq.devfreq);
 
-	if (--gpu->active_cnt > 0)
-		return 0;
-
-	if (WARN_ON(gpu->active_cnt < 0))
-		return -EINVAL;
+	if (gpu->aspace && gpu->aspace->mmu)
+		msm_mmu_disable(gpu->aspace->mmu);
 
 	ret = disable_axi(gpu);
 	if (ret)
@@ -212,82 +356,71 @@
 	return 0;
 }
 
-/*
- * Inactivity detection (for suspend):
- */
-
-static void inactive_worker(struct work_struct *work)
+int msm_gpu_hw_init(struct msm_gpu *gpu)
 {
-	struct msm_gpu *gpu = container_of(work, struct msm_gpu, inactive_work);
-	struct drm_device *dev = gpu->dev;
+	int ret;
 
-	if (gpu->inactive)
-		return;
+	if (!gpu->needs_hw_init)
+		return 0;
 
-	DBG("%s: inactive!\n", gpu->name);
-	mutex_lock(&dev->struct_mutex);
-	if (!(msm_gpu_active(gpu) || gpu->inactive)) {
-		disable_axi(gpu);
-		disable_clk(gpu);
-		gpu->inactive = true;
-	}
-	mutex_unlock(&dev->struct_mutex);
+	disable_irq(gpu->irq);
+	ret = gpu->funcs->hw_init(gpu);
+	if (!ret)
+		gpu->needs_hw_init = false;
+	enable_irq(gpu->irq);
+
+	return ret;
 }
 
-static void inactive_handler(unsigned long data)
+static void retire_guilty_submit(struct msm_gpu *gpu,
+		struct msm_ringbuffer *ring)
 {
-	struct msm_gpu *gpu = (struct msm_gpu *)data;
-	struct msm_drm_private *priv = gpu->dev->dev_private;
+	struct msm_gem_submit *submit = list_first_entry_or_null(&ring->submits,
+		struct msm_gem_submit, node);
 
-	queue_work(priv->wq, &gpu->inactive_work);
-}
+	if (!submit)
+		return;
 
-/* cancel inactive timer and make sure we are awake: */
-static void inactive_cancel(struct msm_gpu *gpu)
-{
-	DBG("%s", gpu->name);
-	del_timer(&gpu->inactive_timer);
-	if (gpu->inactive) {
-		enable_clk(gpu);
-		enable_axi(gpu);
-		gpu->inactive = false;
-	}
-}
+	submit->queue->faults++;
 
-static void inactive_start(struct msm_gpu *gpu)
-{
-	DBG("%s", gpu->name);
-	mod_timer(&gpu->inactive_timer,
-			round_jiffies_up(jiffies + DRM_MSM_INACTIVE_JIFFIES));
+	msm_gem_submit_free(submit);
 }
 
 /*
  * Hangcheck detection for locked gpu:
  */
 
-static void retire_submits(struct msm_gpu *gpu, uint32_t fence);
+static void retire_submits(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+		uint32_t fence);
 
 static void recover_worker(struct work_struct *work)
 {
 	struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
 	struct drm_device *dev = gpu->dev;
 
-	dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
-
 	mutex_lock(&dev->struct_mutex);
 	if (msm_gpu_active(gpu)) {
 		struct msm_gem_submit *submit;
-		uint32_t fence = gpu->funcs->last_fence(gpu);
+		struct msm_ringbuffer *ring;
+		int i;
 
-		/* retire completed submits, plus the one that hung: */
-		retire_submits(gpu, fence + 1);
+		/* Retire all events that have already passed */
+		FOR_EACH_RING(gpu, ring, i)
+			retire_submits(gpu, ring, ring->memptrs->fence);
 
-		inactive_cancel(gpu);
+		retire_guilty_submit(gpu, gpu->funcs->active_ring(gpu));
+
+		/* Recover the GPU */
 		gpu->funcs->recover(gpu);
+		/* Decrement the device usage count for the guilty submit */
+		pm_runtime_put_sync_autosuspend(&gpu->pdev->dev);
+
+		/* Replay the remaining on all rings, highest priority first */
+		for (i = 0;  i < gpu->nr_rings; i++) {
+			struct msm_ringbuffer *ring = gpu->rb[i];
 
-		/* replay the remaining submits after the one that hung: */
-		list_for_each_entry(submit, &gpu->submit_list, node) {
-			gpu->funcs->submit(gpu, submit, NULL);
+			list_for_each_entry(submit, &ring->submits, node)
+				gpu->funcs->submit(gpu, submit);
 		}
 	}
 	mutex_unlock(&dev->struct_mutex);
@@ -307,25 +440,43 @@
 	struct msm_gpu *gpu = (struct msm_gpu *)data;
 	struct drm_device *dev = gpu->dev;
 	struct msm_drm_private *priv = dev->dev_private;
-	uint32_t fence = gpu->funcs->last_fence(gpu);
+	struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
+	uint32_t fence = ring->memptrs->fence;
+	uint32_t submitted = gpu->funcs->submitted_fence(gpu, ring);
 
-	if (fence != gpu->hangcheck_fence) {
+	if (fence != ring->hangcheck_fence) {
 		/* some progress has been made.. ya! */
-		gpu->hangcheck_fence = fence;
-	} else if (fence < gpu->submitted_fence) {
-		/* no progress and not done.. hung! */
-		gpu->hangcheck_fence = fence;
-		dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n",
-				gpu->name);
+		ring->hangcheck_fence = fence;
+	} else if (fence < submitted) {
+		struct msm_gem_submit *submit;
+
+		ring->hangcheck_fence = fence;
+
+		/*
+		 * No progress done, but see if the current submit is
+		 * intentionally skipping the hangcheck
+		 */
+		submit = list_first_entry_or_null(&ring->submits,
+			struct msm_gem_submit, node);
+
+		if (!submit || (submit->queue->flags &
+			MSM_SUBMITQUEUE_BYPASS_QOS_TIMEOUT))
+			goto out;
+
+		/* no progress and not done and not special .. hung! */
+		dev_err(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
+				gpu->name, ring->id);
 		dev_err(dev->dev, "%s:     completed fence: %u\n",
 				gpu->name, fence);
 		dev_err(dev->dev, "%s:     submitted fence: %u\n",
-				gpu->name, gpu->submitted_fence);
+				gpu->name, submitted);
+
 		queue_work(priv->wq, &gpu->recover_work);
 	}
 
+out:
 	/* if still more pending work, reset the hangcheck timer: */
-	if (gpu->submitted_fence > gpu->hangcheck_fence)
+	if (submitted > ring->hangcheck_fence)
 		hangcheck_timer_reset(gpu);
 
 	/* workaround for missing irq: */
@@ -385,6 +536,8 @@
 {
 	unsigned long flags;
 
+	pm_runtime_get_sync(&gpu->pdev->dev);
+
 	spin_lock_irqsave(&gpu->perf_lock, flags);
 	/* we could dynamically enable/disable perfcntr registers too.. */
 	gpu->last_sample.active = msm_gpu_active(gpu);
@@ -398,6 +551,7 @@
 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
 {
 	gpu->perfcntr_active = false;
+	pm_runtime_put_sync(&gpu->pdev->dev);
 }
 
 /* returns -errno or # of cntrs sampled */
@@ -431,60 +585,74 @@
  * Cmdstream submission/retirement:
  */
 
-static void retire_submits(struct msm_gpu *gpu, uint32_t fence)
+static void retire_submits(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+		uint32_t fence)
 {
 	struct drm_device *dev = gpu->dev;
+	struct msm_gem_submit *submit, *tmp;
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-	while (!list_empty(&gpu->submit_list)) {
-		struct msm_gem_submit *submit;
-
-		submit = list_first_entry(&gpu->submit_list,
-				struct msm_gem_submit, node);
+	list_for_each_entry_safe(submit, tmp, &ring->submits, node) {
+		struct msm_memptr_ticks *ticks;
 
-		if (submit->fence <= fence) {
-			list_del(&submit->node);
-			kfree(submit);
-		} else {
+		if (submit->fence > fence)
 			break;
-		}
+
+		ticks = &(ring->memptrs->ticks[submit->tick_index]);
+
+		/* Add memory barrier to ensure the timer ticks are posted */
+		rmb();
+
+		trace_msm_retired(submit, ticks->started, ticks->retired);
+
+		pm_runtime_mark_last_busy(&gpu->pdev->dev);
+		pm_runtime_put_autosuspend(&gpu->pdev->dev);
+		msm_gem_submit_free(submit);
 	}
 }
 
-static void retire_worker(struct work_struct *work)
+static bool _fence_signaled(struct msm_gem_object *obj, uint32_t fence)
 {
-	struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
-	struct drm_device *dev = gpu->dev;
-	uint32_t fence = gpu->funcs->last_fence(gpu);
-
-	msm_update_fence(gpu->dev, fence);
+	if (obj->write_fence & 0x3FFFFFFF)
+		return COMPARE_FENCE_LTE(obj->write_fence, fence);
 
-	mutex_lock(&dev->struct_mutex);
-
-	retire_submits(gpu, fence);
+	return COMPARE_FENCE_LTE(obj->read_fence, fence);
+}
 
-	while (!list_empty(&gpu->active_list)) {
-		struct msm_gem_object *obj;
+static void _retire_ring(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+		uint32_t fence)
+{
+	struct msm_gem_object *obj, *tmp;
 
-		obj = list_first_entry(&gpu->active_list,
-				struct msm_gem_object, mm_list);
+	retire_submits(gpu, ring, fence);
 
-		if ((obj->read_fence <= fence) &&
-				(obj->write_fence <= fence)) {
-			/* move to inactive: */
+	list_for_each_entry_safe(obj, tmp, &gpu->active_list, mm_list) {
+		if (_fence_signaled(obj, fence)) {
 			msm_gem_move_to_inactive(&obj->base);
-			msm_gem_put_iova(&obj->base, gpu->id);
+			msm_gem_put_iova(&obj->base, gpu->aspace);
 			drm_gem_object_unreference(&obj->base);
-		} else {
-			break;
+		}
 		}
 	}
 
-	mutex_unlock(&dev->struct_mutex);
+static void retire_worker(struct work_struct *work)
+{
+	struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
+	struct drm_device *dev = gpu->dev;
+	struct msm_ringbuffer *ring;
+	int i;
+
+	FOR_EACH_RING(gpu, ring, i) {
+		if (!ring)
+			continue;
 
-	if (!msm_gpu_active(gpu))
-		inactive_start(gpu);
+		msm_update_fence(gpu->dev, ring->memptrs->fence);
+
+		mutex_lock(&dev->struct_mutex);
+		_retire_ring(gpu, ring, ring->memptrs->fence);
+		mutex_unlock(&dev->struct_mutex);
+	}
 }
 
 /* call from irq handler to schedule work to retire bo's */
@@ -496,26 +664,29 @@
 }
 
 /* add bo's to gpu's ring, and kick gpu: */
-int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
-		struct msm_file_private *ctx)
+int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
 {
 	struct drm_device *dev = gpu->dev;
-	struct msm_drm_private *priv = dev->dev_private;
-	int i, ret;
+	struct msm_ringbuffer *ring = gpu->rb[submit->ring];
+	int i;
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-	submit->fence = ++priv->next_fence;
+	submit->fence = FENCE(submit->ring, ++ring->seqno);
 
-	gpu->submitted_fence = submit->fence;
+	pm_runtime_get_sync(&gpu->pdev->dev);
 
-	inactive_cancel(gpu);
+	msm_gpu_hw_init(gpu);
 
-	list_add_tail(&submit->node, &gpu->submit_list);
+	list_add_tail(&submit->node, &ring->submits);
 
-	msm_rd_dump_submit(submit);
+	ring->submitted_fence = submit->fence;
+
+	submit->tick_index = ring->tick_index;
+	ring->tick_index = (ring->tick_index + 1) %
+		ARRAY_SIZE(ring->memptrs->ticks);
 
-	gpu->submitted_fence = submit->fence;
+	trace_msm_queued(submit);
 
 	update_sw_cntrs(gpu);
 
@@ -528,27 +699,147 @@
 		WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
 
 		if (!is_active(msm_obj)) {
-			uint32_t iova;
+			struct msm_gem_address_space *aspace;
+			uint64_t iova;
+
+			aspace = (msm_obj->flags & MSM_BO_SECURE) ?
+				gpu->secure_aspace : submit->aspace;
 
 			/* ring takes a reference to the bo and iova: */
 			drm_gem_object_reference(&msm_obj->base);
-			msm_gem_get_iova_locked(&msm_obj->base,
-					submit->gpu->id, &iova);
+			msm_gem_get_iova(&msm_obj->base, aspace, &iova);
+
+			submit->bos[i].iova = iova;
 		}
 
 		if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
 			msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
-
-		if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
+		else if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
 			msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
 	}
 
-	ret = gpu->funcs->submit(gpu, submit, ctx);
-	priv->lastctx = ctx;
+	msm_rd_dump_submit(submit);
+
+	gpu->funcs->submit(gpu, submit);
 
 	hangcheck_timer_reset(gpu);
 
-	return ret;
+	return 0;
+}
+
+struct msm_context_counter {
+	u32 groupid;
+	int counterid;
+	struct list_head node;
+};
+
+int msm_gpu_counter_get(struct msm_gpu *gpu, struct drm_msm_counter *data,
+	struct msm_file_private *ctx)
+{
+	struct msm_context_counter *entry;
+	int counterid;
+	u32 lo = 0, hi = 0;
+
+	if (!ctx || !gpu->funcs->get_counter)
+		return -ENODEV;
+
+	counterid = gpu->funcs->get_counter(gpu, data->groupid, data->countable,
+		&lo, &hi);
+
+	if (counterid < 0)
+		return counterid;
+
+	/*
+	 * Check to see if the counter in question is already held by this
+	 * process. If it does, put it back and return an error.
+	 */
+	list_for_each_entry(entry, &ctx->counters, node) {
+		if (entry->groupid == data->groupid &&
+			entry->counterid == counterid) {
+			gpu->funcs->put_counter(gpu, data->groupid, counterid);
+			return -EBUSY;
+		}
+	}
+
+	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry) {
+		gpu->funcs->put_counter(gpu, data->groupid, counterid);
+		return -ENOMEM;
+	}
+
+	entry->groupid = data->groupid;
+	entry->counterid = counterid;
+	list_add_tail(&entry->node, &ctx->counters);
+
+	data->counterid = counterid;
+	data->counter_lo = lo;
+	data->counter_hi = hi;
+
+	return 0;
+}
+
+int msm_gpu_counter_put(struct msm_gpu *gpu, struct drm_msm_counter *data,
+	struct msm_file_private *ctx)
+{
+	struct msm_context_counter *entry;
+
+	if (!gpu || !ctx)
+		return -ENODEV;
+
+	list_for_each_entry(entry, &ctx->counters, node) {
+		if (entry->groupid == data->groupid &&
+			entry->counterid == data->counterid) {
+			gpu->funcs->put_counter(gpu, data->groupid,
+				data->counterid);
+
+			list_del(&entry->node);
+			kfree(entry);
+
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+void msm_gpu_cleanup_counters(struct msm_gpu *gpu,
+	struct msm_file_private *ctx)
+{
+	struct msm_context_counter *entry, *tmp;
+
+	if (!ctx)
+		return;
+
+	list_for_each_entry_safe(entry, tmp, &ctx->counters, node) {
+		gpu->funcs->put_counter(gpu, entry->groupid, entry->counterid);
+		list_del(&entry->node);
+		kfree(entry);
+	}
+}
+
+u64 msm_gpu_counter_read(struct msm_gpu *gpu, struct drm_msm_counter_read *data)
+{
+	int i;
+
+	if (!gpu->funcs->read_counter)
+		return 0;
+
+	for (i = 0; i < data->nr_ops; i++) {
+		struct drm_msm_counter_read_op op;
+		void __user *ptr = (void __user *)(uintptr_t)
+			(data->ops + (i * sizeof(op)));
+
+		if (copy_from_user(&op, ptr, sizeof(op)))
+			return -EFAULT;
+
+		op.value = gpu->funcs->read_counter(gpu, op.groupid,
+			op.counterid);
+
+		if (copy_to_user(ptr, &op, sizeof(op)))
+			return -EFAULT;
+	}
+
+	return 0;
 }
 
 /*
@@ -561,17 +852,114 @@
 	return gpu->funcs->irq(gpu);
 }
 
-static const char *clk_names[] = {
-		"src_clk", "core_clk", "iface_clk", "mem_clk", "mem_iface_clk",
-		"alt_mem_iface_clk",
-};
+static struct clk *get_clock(struct device *dev, const char *name)
+{
+	struct clk *clk = devm_clk_get(dev, name);
+
+	DBG("clks[%s]: %p", name, clk);
+
+	return IS_ERR(clk) ? NULL : clk;
+}
+
+static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
+{
+	struct device *dev = &pdev->dev;
+	struct property *prop;
+	const char *name;
+	int i = 0;
+
+	gpu->nr_clocks = of_property_count_strings(dev->of_node, "clock-names");
+	if (gpu->nr_clocks < 1) {
+		gpu->nr_clocks = 0;
+		return 0;
+	}
+
+	gpu->grp_clks = devm_kcalloc(dev, sizeof(struct clk *), gpu->nr_clocks,
+		GFP_KERNEL);
+	if (!gpu->grp_clks)
+		return -ENOMEM;
+
+	of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
+		gpu->grp_clks[i] = get_clock(dev, name);
+
+		/* Remember the key clocks that we need to control later */
+		if (!strcmp(name, "core_clk"))
+			gpu->core_clk = gpu->grp_clks[i];
+		else if (!strcmp(name, "rbbmtimer_clk"))
+			gpu->rbbmtimer_clk = gpu->grp_clks[i];
+
+		++i;
+	}
+
+	return 0;
+}
+
+static struct msm_gem_address_space *
+msm_gpu_create_address_space(struct msm_gpu *gpu, struct device *dev,
+		int type, u64 start, u64 end, const char *name)
+{
+	struct msm_gem_address_space *aspace;
+	struct iommu_domain *iommu;
+
+	/*
+	 * If start == end then assume we don't want an address space; this is
+	 * mainly for targets to opt out of secure
+	 */
+	if (start == end)
+		return NULL;
+
+	iommu = iommu_domain_alloc(&platform_bus_type);
+	if (!iommu) {
+		dev_info(gpu->dev->dev,
+			"%s: no IOMMU, fallback to VRAM carveout!\n",
+			gpu->name);
+		return NULL;
+	}
+
+	iommu->geometry.aperture_start = start;
+	iommu->geometry.aperture_end = end;
+
+	dev_info(gpu->dev->dev, "%s: using IOMMU '%s'\n", gpu->name, name);
+
+	aspace = msm_gem_address_space_create(dev, iommu, type, name);
+	if (IS_ERR(aspace)) {
+		dev_err(gpu->dev->dev, "%s: failed to init IOMMU '%s': %ld\n",
+			gpu->name, name, PTR_ERR(aspace));
+
+		iommu_domain_free(iommu);
+		return NULL;
+	}
+
+	if (aspace->mmu) {
+		int ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
+
+		if (ret) {
+			dev_err(gpu->dev->dev,
+				"%s: failed to atach IOMMU '%s': %d\n",
+				gpu->name, name, ret);
+			msm_gem_address_space_put(aspace);
+			aspace = ERR_PTR(ret);
+		}
+	}
+
+	return aspace;
+}
+
+static void msm_gpu_destroy_address_space(struct msm_gem_address_space *aspace)
+{
+	if (!IS_ERR_OR_NULL(aspace) && aspace->mmu)
+		aspace->mmu->funcs->detach(aspace->mmu);
+
+	msm_gem_address_space_put(aspace);
+}
 
 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 		struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
-		const char *name, const char *ioname, const char *irqname, int ringsz)
+		const char *name, struct msm_gpu_config *config)
 {
-	struct iommu_domain *iommu;
-	int i, ret;
+	int i, ret, nr_rings;
+	void *memptrs;
+	uint64_t memptrs_iova;
 
 	if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
 		gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
@@ -579,33 +967,27 @@
 	gpu->dev = drm;
 	gpu->funcs = funcs;
 	gpu->name = name;
-	gpu->inactive = true;
 
 	INIT_LIST_HEAD(&gpu->active_list);
 	INIT_WORK(&gpu->retire_work, retire_worker);
-	INIT_WORK(&gpu->inactive_work, inactive_worker);
 	INIT_WORK(&gpu->recover_work, recover_worker);
 
-	INIT_LIST_HEAD(&gpu->submit_list);
 
-	setup_timer(&gpu->inactive_timer, inactive_handler,
-			(unsigned long)gpu);
 	setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
 			(unsigned long)gpu);
 
 	spin_lock_init(&gpu->perf_lock);
 
-	BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks));
 
 	/* Map registers: */
-	gpu->mmio = msm_ioremap(pdev, ioname, name);
+	gpu->mmio = msm_ioremap(pdev, config->ioname, name);
 	if (IS_ERR(gpu->mmio)) {
 		ret = PTR_ERR(gpu->mmio);
 		goto fail;
 	}
 
 	/* Get Interrupt: */
-	gpu->irq = platform_get_irq_byname(pdev, irqname);
+	gpu->irq = platform_get_irq_byname(pdev, config->irqname);
 	if (gpu->irq < 0) {
 		ret = gpu->irq;
 		dev_err(drm->dev, "failed to get irq: %d\n", ret);
@@ -615,17 +997,14 @@
 	ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
 			IRQF_TRIGGER_HIGH, gpu->name, gpu);
 	if (ret) {
+		gpu->irq = ret;
 		dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
 		goto fail;
 	}
 
-	/* Acquire clocks: */
-	for (i = 0; i < ARRAY_SIZE(clk_names); i++) {
-		gpu->grp_clks[i] = devm_clk_get(&pdev->dev, clk_names[i]);
-		DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]);
-		if (IS_ERR(gpu->grp_clks[i]))
-			gpu->grp_clks[i] = NULL;
-	}
+	ret = get_clocks(pdev, gpu);
+	if (ret)
+		goto fail;
 
 	gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk");
 	DBG("ebi1_clk: %p", gpu->ebi1_clk);
@@ -643,61 +1022,136 @@
 	if (IS_ERR(gpu->gpu_cx))
 		gpu->gpu_cx = NULL;
 
-	/* Setup IOMMU.. eventually we will (I think) do this once per context
-	 * and have separate page tables per context.  For now, to keep things
-	 * simple and to get something working, just use a single address space:
-	 */
-	iommu = iommu_domain_alloc(&platform_bus_type);
-	if (iommu) {
-		dev_info(drm->dev, "%s: using IOMMU\n", name);
-		gpu->mmu = msm_iommu_new(&pdev->dev, iommu);
-		if (IS_ERR(gpu->mmu)) {
-			ret = PTR_ERR(gpu->mmu);
-			dev_err(drm->dev, "failed to init iommu: %d\n", ret);
-			gpu->mmu = NULL;
-			iommu_domain_free(iommu);
-			goto fail;
-		}
+	platform_set_drvdata(pdev, gpu);
+
+	msm_devfreq_init(gpu);
+
+	gpu->aspace = msm_gpu_create_address_space(gpu, &pdev->dev,
+		MSM_IOMMU_DOMAIN_USER, config->va_start, config->va_end,
+		"gpu");
+
+	gpu->secure_aspace = msm_gpu_create_address_space(gpu, &pdev->dev,
+		MSM_IOMMU_DOMAIN_SECURE, config->secure_va_start,
+		config->secure_va_end, "gpu_secure");
 
-	} else {
-		dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
+	nr_rings = config->nr_rings;
+
+	if (nr_rings > ARRAY_SIZE(gpu->rb)) {
+		WARN(1, "Only creating %lu ringbuffers\n", ARRAY_SIZE(gpu->rb));
+		nr_rings = ARRAY_SIZE(gpu->rb);
 	}
-	gpu->id = msm_register_mmu(drm, gpu->mmu);
 
+	/* Allocate one buffer to hold all the memptr records for the rings */
+	memptrs = msm_gem_kernel_new(drm, sizeof(struct msm_memptrs) * nr_rings,
+		MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo, &memptrs_iova);
 
-	/* Create ringbuffer: */
-	mutex_lock(&drm->struct_mutex);
-	gpu->rb = msm_ringbuffer_new(gpu, ringsz);
-	mutex_unlock(&drm->struct_mutex);
-	if (IS_ERR(gpu->rb)) {
-		ret = PTR_ERR(gpu->rb);
-		gpu->rb = NULL;
-		dev_err(drm->dev, "could not create ringbuffer: %d\n", ret);
+	if (IS_ERR(memptrs)) {
+		ret = PTR_ERR(memptrs);
 		goto fail;
 	}
 
+	/* Create ringbuffer(s): */
+	for (i = 0; i < nr_rings; i++) {
+		gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
+		if (IS_ERR(gpu->rb[i])) {
+			ret = PTR_ERR(gpu->rb[i]);
+			gpu->rb[i] = NULL;
+			dev_err(drm->dev,
+				"could not create ringbuffer %d: %d\n", i, ret);
+			goto fail;
+		}
+
+		memptrs += sizeof(struct msm_memptrs);
+		memptrs_iova += sizeof(struct msm_memptrs);
+	}
+
+	gpu->nr_rings = nr_rings;
+
+#ifdef CONFIG_SMP
+	gpu->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
+	gpu->pm_qos_req_dma.irq = gpu->irq;
+#endif
+
+	pm_qos_add_request(&gpu->pm_qos_req_dma, PM_QOS_CPU_DMA_LATENCY,
+			PM_QOS_DEFAULT_VALUE);
+	gpu->pdev = pdev;
+	platform_set_drvdata(pdev, gpu);
+
 	bs_init(gpu);
 
+	gpu->snapshot = msm_snapshot_new(gpu);
+	if (IS_ERR(gpu->snapshot))
+		gpu->snapshot = NULL;
+
 	return 0;
 
 fail:
+	for (i = 0; i < ARRAY_SIZE(gpu->rb); i++)
+		msm_ringbuffer_destroy(gpu->rb[i]);
+
+	if (gpu->memptrs_bo) {
+		msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
+		drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
+	}
+
+	msm_gpu_destroy_address_space(gpu->aspace);
+	msm_gpu_destroy_address_space(gpu->secure_aspace);
+
 	return ret;
 }
 
 void msm_gpu_cleanup(struct msm_gpu *gpu)
 {
+	struct drm_device *dev = gpu->dev;
+	struct msm_drm_private *priv = dev->dev_private;
+	struct platform_device *pdev = priv->gpu_pdev;
+	int i;
+
 	DBG("%s", gpu->name);
 
 	WARN_ON(!list_empty(&gpu->active_list));
 
+	if (gpu->devfreq.devfreq) {
+		msm_devfreq_manage_opp_notifier(&pdev->dev, &gpu->nb, false);
+		devfreq_cooling_unregister(gpu->devfreq.cooling_dev);
+		devfreq_remove_device(gpu->devfreq.devfreq);
+	}
+
+	if (gpu->irq >= 0) {
+		disable_irq(gpu->irq);
+		devm_free_irq(&pdev->dev, gpu->irq, gpu);
+	}
+
 	bs_fini(gpu);
 
-	if (gpu->rb) {
-		if (gpu->rb_iova)
-			msm_gem_put_iova(gpu->rb->bo, gpu->id);
-		msm_ringbuffer_destroy(gpu->rb);
+	for (i = 0; i < ARRAY_SIZE(gpu->rb); i++)
+		msm_ringbuffer_destroy(gpu->rb[i]);
+
+	if (gpu->memptrs_bo) {
+		msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
+		drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
 	}
 
-	if (gpu->mmu)
-		gpu->mmu->funcs->destroy(gpu->mmu);
+	msm_snapshot_destroy(gpu, gpu->snapshot);
+
+	msm_gpu_destroy_address_space(gpu->aspace);
+	msm_gpu_destroy_address_space(gpu->secure_aspace);
+
+	if (gpu->gpu_reg)
+		devm_regulator_put(gpu->gpu_reg);
+
+	if (gpu->gpu_cx)
+		devm_regulator_put(gpu->gpu_cx);
+
+	if (gpu->ebi1_clk)
+		devm_clk_put(&pdev->dev, gpu->ebi1_clk);
+
+	for (i = gpu->nr_clocks - 1; i >= 0; i--)
+		if (gpu->grp_clks[i])
+			devm_clk_put(&pdev->dev, gpu->grp_clks[i]);
+
+	devm_kfree(&pdev->dev, gpu->grp_clks);
+
+	if (gpu->mmio)
+		devm_iounmap(&pdev->dev, gpu->mmio);
 }
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/msm_gpu.h linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_gpu.h
--- linux-4.4.115/drivers/gpu/drm/msm/msm_gpu.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_gpu.h	2019-01-22 16:16:23.507246443 +0100
@@ -2,6 +2,8 @@
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published by
  * the Free Software Foundation.
@@ -19,14 +21,30 @@
 #define __MSM_GPU_H__
 
 #include <linux/clk.h>
+#include <linux/pm_qos.h>
 #include <linux/regulator/consumer.h>
+#include <linux/notifier.h>
 
 #include "msm_drv.h"
 #include "msm_ringbuffer.h"
+#include "msm_snapshot.h"
 
 struct msm_gem_submit;
 struct msm_gpu_perfcntr;
 
+#define MSM_GPU_DEFAULT_IONAME  "kgsl_3d0_reg_memory"
+#define MSM_GPU_DEFAULT_IRQNAME "kgsl_3d0_irq"
+
+struct msm_gpu_config {
+	const char *ioname;
+	const char *irqname;
+	int nr_rings;
+	uint64_t va_start;
+	uint64_t va_end;
+	uint64_t secure_va_start;
+	uint64_t secure_va_end;
+};
+
 /* So far, with hardware that I've seen to date, we can have:
  *  + zero, one, or two z180 2d cores
  *  + a3xx or a2xx 3d core, which share a common CP (the firmware
@@ -46,23 +64,30 @@
 	int (*hw_init)(struct msm_gpu *gpu);
 	int (*pm_suspend)(struct msm_gpu *gpu);
 	int (*pm_resume)(struct msm_gpu *gpu);
-	int (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
-			struct msm_file_private *ctx);
-	void (*flush)(struct msm_gpu *gpu);
-	void (*idle)(struct msm_gpu *gpu);
+	void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
+	void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
 	irqreturn_t (*irq)(struct msm_gpu *irq);
-	uint32_t (*last_fence)(struct msm_gpu *gpu);
+	uint32_t (*submitted_fence)(struct msm_gpu *gpu,
+			struct msm_ringbuffer *ring);
+	struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
 	void (*recover)(struct msm_gpu *gpu);
 	void (*destroy)(struct msm_gpu *gpu);
 #ifdef CONFIG_DEBUG_FS
 	/* show GPU status in debugfs: */
 	void (*show)(struct msm_gpu *gpu, struct seq_file *m);
 #endif
+	int (*snapshot)(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
+	int (*get_counter)(struct msm_gpu *gpu, u32 groupid, u32 countable,
+		u32 *lo, u32 *hi);
+	void (*put_counter)(struct msm_gpu *gpu, u32 groupid, int counterid);
+	u64 (*read_counter)(struct msm_gpu *gpu, u32 groupid, int counterid);
+	u64 (*gpu_busy)(struct msm_gpu *gpu);
 };
 
 struct msm_gpu {
 	const char *name;
 	struct drm_device *dev;
+	struct platform_device *pdev;
 	const struct msm_gpu_funcs *funcs;
 
 	/* performance counters (hw & sw): */
@@ -77,17 +102,14 @@
 	const struct msm_gpu_perfcntr *perfcntrs;
 	uint32_t num_perfcntrs;
 
-	struct msm_ringbuffer *rb;
-	uint32_t rb_iova;
+	struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
+	int nr_rings;
 
 	/* list of GEM active objects: */
 	struct list_head active_list;
 
-	uint32_t submitted_fence;
-
-	/* is gpu powered/active? */
-	int active_cnt;
-	bool inactive;
+	/* does gpu need hw_init? */
+	bool needs_hw_init;
 
 	/* worker for handling active-list retiring: */
 	struct work_struct retire_work;
@@ -95,13 +117,22 @@
 	void __iomem *mmio;
 	int irq;
 
-	struct msm_mmu *mmu;
-	int id;
+	struct msm_gem_address_space *aspace;
+	struct msm_gem_address_space *secure_aspace;
 
 	/* Power Control: */
 	struct regulator *gpu_reg, *gpu_cx;
-	struct clk *ebi1_clk, *grp_clks[6];
-	uint32_t fast_rate, slow_rate, bus_freq;
+	struct clk **grp_clks;
+	struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
+	int nr_clocks;
+
+	uint32_t gpufreq[10];
+	uint32_t busfreq[10];
+	uint32_t nr_pwrlevels;
+
+	struct pm_qos_request pm_qos_req_dma;
+
+	struct drm_gem_object *memptrs_bo;
 
 #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
 	struct msm_bus_scale_pdata *bus_scale_table;
@@ -111,21 +142,62 @@
 	/* Hang and Inactivity Detection:
 	 */
 #define DRM_MSM_INACTIVE_PERIOD   66 /* in ms (roughly four frames) */
-#define DRM_MSM_INACTIVE_JIFFIES  msecs_to_jiffies(DRM_MSM_INACTIVE_PERIOD)
-	struct timer_list inactive_timer;
-	struct work_struct inactive_work;
+
 #define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
 #define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
 	struct timer_list hangcheck_timer;
-	uint32_t hangcheck_fence;
 	struct work_struct recover_work;
+	struct msm_snapshot *snapshot;
+
+	struct {
+		struct devfreq *devfreq;
+		u64 busy_cycles;
+		ktime_t time;
+		struct thermal_cooling_device *cooling_dev;
+	} devfreq;
 
-	struct list_head submit_list;
+	struct notifier_block nb;
 };
 
+struct msm_gpu_submitqueue {
+	int id;
+	u32 flags;
+	u32 prio;
+	int faults;
+	struct list_head node;
+	struct kref ref;
+};
+
+/* It turns out that all targets use the same ringbuffer size. */
+#define MSM_GPU_RINGBUFFER_SZ SZ_32K
+#define MSM_GPU_RINGBUFFER_BLKSIZE 32
+
+#define MSM_GPU_RB_CNTL_DEFAULT \
+		(AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | \
+		AXXX_CP_RB_CNTL_BLKSZ(ilog2(MSM_GPU_RINGBUFFER_BLKSIZE / 8)))
+
+static inline struct msm_ringbuffer *__get_ring(struct msm_gpu *gpu, int index)
+{
+	return (index < ARRAY_SIZE(gpu->rb) ? gpu->rb[index] : NULL);
+}
+
+#define FOR_EACH_RING(gpu, ring, index) \
+	for (index = 0, ring = (gpu)->rb[0]; \
+		index < (gpu)->nr_rings && index < ARRAY_SIZE((gpu)->rb); \
+		index++, ring = __get_ring(gpu, index))
+
 static inline bool msm_gpu_active(struct msm_gpu *gpu)
 {
-	return gpu->submitted_fence > gpu->funcs->last_fence(gpu);
+	struct msm_ringbuffer *ring;
+	int i;
+
+	FOR_EACH_RING(gpu, ring, i) {
+		if (gpu->funcs->submitted_fence(gpu, ring) >
+			ring->memptrs->fence)
+			return true;
+	}
+
+	return false;
 }
 
 /* Perf-Counters:
@@ -151,25 +223,84 @@
 	return msm_readl(gpu->mmio + (reg << 2));
 }
 
+static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
+{
+	uint32_t val = gpu_read(gpu, reg);
+
+	val &= ~mask;
+	gpu_write(gpu, reg, val | or);
+}
+
+static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi)
+{
+	u64 val;
+
+	/*
+	 * Why not a readq here? Two reasons: 1) many of the LO registers are
+	 * not quad word aligned and 2) the GPU hardware designers have a bit
+	 * of a history of putting registers where they fit, especially in
+	 * spins. The longer a GPU family goes the higher the chance that
+	 * we'll get burned.  We could do a series of validity checks if we
+	 * wanted to, but really is a readq() that much better? Nah.
+	 */
+
+	/*
+	 * For some lo/hi registers (like perfcounters), the hi value is latched
+	 * when the lo is read, so make sure to read the lo first to trigger
+	 * that
+	 */
+	val = (u64) msm_readl(gpu->mmio + (lo << 2));
+	val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32);
+
+	return val;
+}
+
+static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
+{
+	/* Why not a writeq here? Read the screed above */
+	msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2));
+	msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2));
+}
+
 int msm_gpu_pm_suspend(struct msm_gpu *gpu);
 int msm_gpu_pm_resume(struct msm_gpu *gpu);
 
+int msm_gpu_hw_init(struct msm_gpu *gpu);
+
 void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
 		uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs);
 
 void msm_gpu_retire(struct msm_gpu *gpu);
-int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
-		struct msm_file_private *ctx);
+int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
 
 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 		struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
-		const char *name, const char *ioname, const char *irqname, int ringsz);
+		const char *name, struct msm_gpu_config *config);
+
 void msm_gpu_cleanup(struct msm_gpu *gpu);
 
 struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
 void __init adreno_register(void);
 void __exit adreno_unregister(void);
 
+int msm_gpu_counter_get(struct msm_gpu *gpu, struct drm_msm_counter *data,
+	struct msm_file_private *ctx);
+
+int msm_gpu_counter_put(struct msm_gpu *gpu, struct drm_msm_counter *data,
+	struct msm_file_private *ctx);
+
+void msm_gpu_cleanup_counters(struct msm_gpu *gpu,
+	struct msm_file_private *ctx);
+
+u64 msm_gpu_counter_read(struct msm_gpu *gpu,
+		struct drm_msm_counter_read *data);
+
+static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue)
+{
+	if (queue)
+		kref_put(&queue->ref, msm_submitqueue_destroy);
+}
+
 #endif /* __MSM_GPU_H__ */
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/msm_iommu.c linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_iommu.c
--- linux-4.4.115/drivers/gpu/drm/msm/msm_iommu.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_iommu.c	2019-10-29 09:26:23.633203080 +0100
@@ -15,98 +15,206 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <soc/qcom/secure_buffer.h>
 #include "msm_drv.h"
-#include "msm_mmu.h"
-
-struct msm_iommu {
-	struct msm_mmu base;
-	struct iommu_domain *domain;
-};
-#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
+#include "msm_iommu.h"
 
 static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
 		unsigned long iova, int flags, void *arg)
 {
-	pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags);
+	pr_warn_ratelimited("*** fault: iova=%16llX, flags=%d\n", (u64) iova, flags);
 	return 0;
 }
 
-static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
+static void iommu_get_clocks(struct msm_iommu *iommu, struct device *dev)
+{
+	struct property *prop;
+	const char *name;
+	int i = 0;
+
+	iommu->nr_clocks =
+		of_property_count_strings(dev->of_node, "clock-names");
+
+	if (iommu->nr_clocks < 0)
+		return;
+
+	if (WARN_ON(iommu->nr_clocks > ARRAY_SIZE(iommu->clocks)))
+		iommu->nr_clocks = ARRAY_SIZE(iommu->clocks);
+
+	of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
+		if (i == iommu->nr_clocks)
+			break;
+
+		iommu->clocks[i++] =  clk_get(dev, name);
+	}
+}
+
+
+static void msm_iommu_clocks_enable(struct msm_mmu *mmu)
 {
 	struct msm_iommu *iommu = to_msm_iommu(mmu);
-	return iommu_attach_device(iommu->domain, mmu->dev);
+	int i;
+
+	if (!iommu->nr_clocks)
+		iommu_get_clocks(iommu, mmu->dev->parent);
+
+	for (i = 0; i < iommu->nr_clocks; i++) {
+		if (iommu->clocks[i])
+			clk_prepare_enable(iommu->clocks[i]);
+	}
 }
 
-static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt)
+static void msm_iommu_clocks_disable(struct msm_mmu *mmu)
 {
 	struct msm_iommu *iommu = to_msm_iommu(mmu);
-	iommu_detach_device(iommu->domain, mmu->dev);
+	int i;
+
+	for (i = 0; i < iommu->nr_clocks; i++) {
+		if (iommu->clocks[i])
+			clk_disable_unprepare(iommu->clocks[i]);
+	}
 }
 
-static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
-		struct sg_table *sgt, unsigned len, int prot)
+static int msm_iommu_attach(struct msm_mmu *mmu, const char **names,
+		int cnt)
 {
 	struct msm_iommu *iommu = to_msm_iommu(mmu);
-	struct iommu_domain *domain = iommu->domain;
-	struct scatterlist *sg;
-	unsigned int da = iova;
-	unsigned int i, j;
-	int ret;
 
-	if (!domain || !sgt)
-		return -EINVAL;
+	return iommu_attach_device(iommu->domain, mmu->dev);
+}
+
+static int msm_iommu_attach_user(struct msm_mmu *mmu, const char **names,
+		int cnt)
+{
+	struct msm_iommu *iommu = to_msm_iommu(mmu);
+	int ret, val = 1;
 
-	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
-		u32 pa = sg_phys(sg) - sg->offset;
-		size_t bytes = sg->length + sg->offset;
+	/* Hope springs eternal */
+	iommu->allow_dynamic = !iommu_domain_set_attr(iommu->domain,
+		DOMAIN_ATTR_ENABLE_TTBR1, &val) ? true : false;
 
-		VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
+	/* Mark the GPU as I/O coherent if it is supported */
+	iommu->is_coherent = of_dma_is_coherent(mmu->dev->of_node);
 
-		ret = iommu_map(domain, da, pa, bytes, prot);
+	ret = iommu_attach_device(iommu->domain, mmu->dev);
 		if (ret)
-			goto fail;
+		return ret;
 
-		da += bytes;
-	}
+	/*
+	 * Get the context bank for the base domain; this will be shared with
+	 * the children.
+	 */
+	iommu->cb = -1;
+	if (iommu_domain_get_attr(iommu->domain, DOMAIN_ATTR_CONTEXT_BANK,
+		&iommu->cb))
+		iommu->allow_dynamic = false;
 
 	return 0;
+}
+
+static int msm_iommu_attach_dynamic(struct msm_mmu *mmu, const char **names,
+		int cnt)
+{
+	static unsigned int procid;
+	struct msm_iommu *iommu = to_msm_iommu(mmu);
+	int ret;
+	unsigned int id;
+
+	/* Assign a unique procid for the domain to cut down on TLB churn */
+	id = ++procid;
+
+	iommu_domain_set_attr(iommu->domain, DOMAIN_ATTR_PROCID, &id);
+
+	ret = iommu_attach_device(iommu->domain, mmu->dev);
+	if (ret)
+		return ret;
 
-fail:
-	da = iova;
+	/*
+	 * Get the TTBR0 and the CONTEXTIDR - these will be used by the GPU to
+	 * switch the pagetable on its own.
+	 */
+	iommu_domain_get_attr(iommu->domain, DOMAIN_ATTR_TTBR0,
+		&iommu->ttbr0);
+	iommu_domain_get_attr(iommu->domain, DOMAIN_ATTR_CONTEXTIDR,
+		&iommu->contextidr);
 
-	for_each_sg(sgt->sgl, sg, i, j) {
-		size_t bytes = sg->length + sg->offset;
-		iommu_unmap(domain, da, bytes);
-		da += bytes;
+	return 0;
 	}
+
+static int msm_iommu_attach_secure(struct msm_mmu *mmu, const char **names,
+		int cnt)
+{
+	struct msm_iommu *iommu = to_msm_iommu(mmu);
+	int ret, vmid = VMID_CP_PIXEL;
+
+	ret = iommu_domain_set_attr(iommu->domain, DOMAIN_ATTR_SECURE_VMID,
+		&vmid);
+	if (ret)
 	return ret;
+
+	return iommu_attach_device(iommu->domain, mmu->dev);
+}
+
+static void msm_iommu_detach(struct msm_mmu *mmu)
+{
+	struct msm_iommu *iommu = to_msm_iommu(mmu);
+
+	iommu_detach_device(iommu->domain, mmu->dev);
+}
+
+static void msm_iommu_detach_dynamic(struct msm_mmu *mmu)
+{
+	struct msm_iommu *iommu = to_msm_iommu(mmu);
+	iommu_detach_device(iommu->domain, mmu->dev);
 }
 
-static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
-		struct sg_table *sgt, unsigned len)
+static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
+		struct sg_table *sgt, u32 flags, void *priv)
 {
 	struct msm_iommu *iommu = to_msm_iommu(mmu);
 	struct iommu_domain *domain = iommu->domain;
-	struct scatterlist *sg;
-	unsigned int da = iova;
-	int i;
+	int ret;
+	u32 prot = IOMMU_READ;
+
+	if (!domain || !sgt)
+		return -EINVAL;
 
-	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
-		size_t bytes = sg->length + sg->offset;
-		size_t unmapped;
+	if (!(flags & MSM_BO_GPU_READONLY))
+		prot |= IOMMU_WRITE;
 
-		unmapped = iommu_unmap(domain, da, bytes);
-		if (unmapped < bytes)
-			return unmapped;
+	if (flags & MSM_BO_PRIVILEGED)
+		prot |= IOMMU_PRIV;
 
-		VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
+	if ((flags & MSM_BO_CACHED) && msm_iommu_coherent(mmu))
+		prot |= IOMMU_CACHE;
 
-		BUG_ON(!PAGE_ALIGNED(bytes));
+	/* iommu_map_sg returns the number of bytes mapped */
+	ret =  iommu_map_sg(domain, iova, sgt->sgl, sgt->nents, prot);
+	if (ret)
+		sgt->sgl->dma_address = iova;
 
-		da += bytes;
+	return ret ? 0 : -ENOMEM;
 	}
 
-	return 0;
+static void msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
+		struct sg_table *sgt, void *priv)
+{
+	struct msm_iommu *iommu = to_msm_iommu(mmu);
+	struct iommu_domain *domain = iommu->domain;
+	struct scatterlist *sg;
+	size_t len = 0;
+	int ret, i;
+
+	for_each_sg(sgt->sgl, sg, sgt->nents, i)
+		len += sg->length;
+
+	ret = iommu_unmap(domain, iova, len);
+	if (ret != len)
+		dev_warn(mmu->dev, "could not unmap iova %llx\n", iova);
+
+	sgt->sgl->dma_address = 0;
 }
 
 static void msm_iommu_destroy(struct msm_mmu *mmu)
@@ -116,7 +224,30 @@
 	kfree(iommu);
 }
 
-static const struct msm_mmu_funcs funcs = {
+static struct device *find_context_bank(const char *name)
+{
+	struct device_node *node = of_find_node_by_name(NULL, name);
+	struct platform_device *pdev, *parent;
+
+	if (!node)
+		return ERR_PTR(-ENODEV);
+
+	if (!of_find_property(node, "iommus", NULL))
+		return ERR_PTR(-ENODEV);
+
+	/* Get the parent device */
+	parent = of_find_device_by_node(node->parent);
+
+	/* Populate the sub nodes */
+	of_platform_populate(parent->dev.of_node, NULL, NULL, &parent->dev);
+
+	/* Get the context bank device */
+	pdev = of_find_device_by_node(node);
+
+	return pdev ? &pdev->dev : ERR_PTR(-ENODEV);
+}
+
+static const struct msm_mmu_funcs default_funcs = {
 		.attach = msm_iommu_attach,
 		.detach = msm_iommu_detach,
 		.map = msm_iommu_map,
@@ -124,7 +255,52 @@
 		.destroy = msm_iommu_destroy,
 };
 
-struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
+static const struct msm_mmu_funcs user_funcs = {
+		.attach = msm_iommu_attach_user,
+		.detach = msm_iommu_detach,
+		.map = msm_iommu_map,
+		.unmap = msm_iommu_unmap,
+		.destroy = msm_iommu_destroy,
+		.enable = msm_iommu_clocks_enable,
+		.disable = msm_iommu_clocks_disable,
+};
+
+static const struct msm_mmu_funcs secure_funcs = {
+		.attach = msm_iommu_attach_secure,
+		.detach = msm_iommu_detach,
+		.map = msm_iommu_map,
+		.unmap = msm_iommu_unmap,
+		.destroy = msm_iommu_destroy,
+};
+
+static const struct msm_mmu_funcs dynamic_funcs = {
+		.attach = msm_iommu_attach_dynamic,
+		.detach = msm_iommu_detach_dynamic,
+		.map = msm_iommu_map,
+		.unmap = msm_iommu_unmap,
+		.destroy = msm_iommu_destroy,
+};
+
+static const struct {
+	const char *cbname;
+	const struct msm_mmu_funcs *funcs;
+} msm_iommu_domains[] = {
+	[MSM_IOMMU_DOMAIN_DEFAULT] = {
+		.cbname = NULL,
+		.funcs = &default_funcs,
+	},
+	[MSM_IOMMU_DOMAIN_USER] = {
+		.cbname = "gfx3d_user",
+		.funcs = &user_funcs,
+	},
+	[MSM_IOMMU_DOMAIN_SECURE] = {
+		.cbname = "gfx3d_secure",
+		.funcs = &secure_funcs
+	},
+};
+
+static struct msm_mmu *iommu_create(struct device *dev,
+		struct iommu_domain *domain, const struct msm_mmu_funcs *funcs)
 {
 	struct msm_iommu *iommu;
 
@@ -133,8 +309,73 @@
 		return ERR_PTR(-ENOMEM);
 
 	iommu->domain = domain;
-	msm_mmu_init(&iommu->base, dev, &funcs);
+	msm_mmu_init(&iommu->base, dev, funcs);
 	iommu_set_fault_handler(domain, msm_fault_handler, dev);
 
 	return &iommu->base;
 }
+
+struct msm_mmu *msm_iommu_new(struct device *parent,
+		enum msm_iommu_domain_type type, struct iommu_domain *domain)
+{
+	struct device *dev = parent;
+
+	if (type >= ARRAY_SIZE(msm_iommu_domains) ||
+		!msm_iommu_domains[type].funcs)
+		return ERR_PTR(-ENODEV);
+
+	if (msm_iommu_domains[type].cbname) {
+		dev = find_context_bank(msm_iommu_domains[type].cbname);
+		if (IS_ERR(dev))
+			return ERR_CAST(dev);
+	}
+
+	return iommu_create(dev, domain, msm_iommu_domains[type].funcs);
+}
+
+/*
+ * Given a base domain that is attached to a IOMMU device try to create a
+ * dynamic domain that is also attached to the same device but allocates a new
+ * pagetable. This is used to allow multiple pagetables to be attached to the
+ * same device.
+ */
+struct msm_mmu *msm_iommu_new_dynamic(struct msm_mmu *base)
+{
+	struct msm_iommu *base_iommu = to_msm_iommu(base);
+	struct iommu_domain *domain;
+	struct msm_mmu *mmu;
+	int ret, val = 1;
+	struct msm_iommu *child_iommu;
+
+	/* Don't continue if the base domain didn't have the support we need */
+	if (!base || base_iommu->allow_dynamic == false)
+		return ERR_PTR(-EOPNOTSUPP);
+
+	domain = iommu_domain_alloc(&platform_bus_type);
+	if (!domain)
+		return ERR_PTR(-ENODEV);
+
+	mmu = iommu_create(base->dev, domain, &dynamic_funcs);
+
+	if (IS_ERR(mmu)) {
+		if (domain)
+			iommu_domain_free(domain);
+		return mmu;
+	}
+
+	ret = iommu_domain_set_attr(domain, DOMAIN_ATTR_DYNAMIC, &val);
+	if (ret) {
+		msm_iommu_destroy(mmu);
+		return ERR_PTR(ret);
+	}
+
+	/* Set the context bank to match the base domain */
+	iommu_domain_set_attr(domain, DOMAIN_ATTR_CONTEXT_BANK,
+		&base_iommu->cb);
+
+	/* Mark the dynamic domain as I/O coherent if the base domain is */
+	child_iommu = to_msm_iommu(mmu);
+	child_iommu->is_coherent = base_iommu->is_coherent;
+
+	return mmu;
+}
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/msm_kms.h linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_kms.h
--- linux-4.4.115/drivers/gpu/drm/msm/msm_kms.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_kms.h	2019-10-29 09:26:23.633203080 +0100
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -25,6 +26,33 @@
 
 #define MAX_PLANE	4
 
+/**
+ * Device Private DRM Mode Flags
+ * drm_mode->private_flags
+ */
+/* Connector has interpreted seamless transition request as dynamic fps */
+#define MSM_MODE_FLAG_SEAMLESS_DYNAMIC_FPS	(1<<0)
+/* Transition to new mode requires a wait-for-vblank before the modeset */
+#define MSM_MODE_FLAG_VBLANK_PRE_MODESET	(1<<1)
+/*
+ * We need setting some flags in bridge, and using them in encoder. Add them in
+ * private_flags would be better for use. DRM_MODE_FLAG_SUPPORTS_RGB/YUV are
+ * flags that indicating the SINK supported color formats read from EDID. While,
+ * these flags defined here indicate the best color/bit depth foramt we choosed
+ * that would be better for display. For example the best mode display like:
+ * RGB+RGB_DC,YUV+YUV_DC, RGB,YUV. And we could not set RGB and YUV format at
+ * the same time. And also RGB_DC only set when RGB format is set,the same for
+ * YUV_DC.
+ */
+/* Enable RGB444 30 bit deep color */
+#define MSM_MODE_FLAG_RGB444_DC_ENABLE		(1<<2)
+/* Enable YUV420 30 bit deep color */
+#define MSM_MODE_FLAG_YUV420_DC_ENABLE		(1<<3)
+/* Choose RGB444 format to display */
+#define MSM_MODE_FLAG_COLOR_FORMAT_RGB444	(1<<4)
+/* Choose YUV420 format to display */
+#define MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420	(1<<5)
+
 /* As there are different display controller blocks depending on the
  * snapdragon version, the kms support is split out and the appropriate
  * implementation is loaded at runtime.  The kms module is responsible
@@ -33,6 +61,7 @@
 struct msm_kms_funcs {
 	/* hw initialization: */
 	int (*hw_init)(struct msm_kms *kms);
+	int (*postinit)(struct msm_kms *kms);
 	/* irq handling: */
 	void (*irq_preinstall)(struct msm_kms *kms);
 	int (*irq_postinstall)(struct msm_kms *kms);
@@ -41,21 +70,38 @@
 	int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
 	void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
 	/* modeset, bracketing atomic_commit(): */
-	void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
-	void (*complete_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
+	void (*prepare_fence)(struct msm_kms *kms,
+			struct drm_atomic_state *state);
+	void (*prepare_commit)(struct msm_kms *kms,
+			struct drm_atomic_state *state);
+	void (*commit)(struct msm_kms *kms, struct drm_atomic_state *state);
+	void (*complete_commit)(struct msm_kms *kms,
+			struct drm_atomic_state *state);
 	/* functions to wait for atomic commit completed on each CRTC */
 	void (*wait_for_crtc_commit_done)(struct msm_kms *kms,
 					struct drm_crtc *crtc);
+	/* get msm_format w/ optional format modifiers from drm_mode_fb_cmd2 */
+	const struct msm_format *(*get_format)(struct msm_kms *kms,
+					const uint32_t format,
+					const uint64_t *modifiers,
+					const uint32_t modifiers_len);
+	/* do format checking on format modified through fb_cmd2 modifiers */
+	int (*check_modified_format)(const struct msm_kms *kms,
+			const struct msm_format *msm_fmt,
+			const struct drm_mode_fb_cmd2 *cmd,
+			struct drm_gem_object **bos);
 	/* misc: */
-	const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
 	long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
 			struct drm_encoder *encoder);
 	int (*set_split_display)(struct msm_kms *kms,
 			struct drm_encoder *encoder,
 			struct drm_encoder *slave_encoder,
 			bool is_cmd_mode);
+	void (*postopen)(struct msm_kms *kms, struct drm_file *file);
 	/* cleanup: */
 	void (*preclose)(struct msm_kms *kms, struct drm_file *file);
+	void (*postclose)(struct msm_kms *kms, struct drm_file *file);
+	void (*lastclose)(struct msm_kms *kms);
 	void (*destroy)(struct msm_kms *kms);
 };
 
@@ -74,7 +120,33 @@
 	kms->funcs = funcs;
 }
 
+#ifdef CONFIG_DRM_MSM_MDP4
 struct msm_kms *mdp4_kms_init(struct drm_device *dev);
+#else
+static inline
+struct msm_kms *mdp4_kms_init(struct drm_device *dev) { return NULL; };
+#endif
 struct msm_kms *mdp5_kms_init(struct drm_device *dev);
+struct msm_kms *sde_kms_init(struct drm_device *dev);
+
+/**
+ * Mode Set Utility Functions
+ */
+static inline bool msm_is_mode_seamless(const struct drm_display_mode *mode)
+{
+	return (mode->flags & DRM_MODE_FLAG_SEAMLESS);
+}
+
+static inline bool msm_is_mode_dynamic_fps(const struct drm_display_mode *mode)
+{
+	return ((mode->flags & DRM_MODE_FLAG_SEAMLESS) &&
+		(mode->private_flags & MSM_MODE_FLAG_SEAMLESS_DYNAMIC_FPS));
+}
+
+static inline bool msm_needs_vblank_pre_modeset(
+		const struct drm_display_mode *mode)
+{
+	return (mode->private_flags & MSM_MODE_FLAG_VBLANK_PRE_MODESET);
+}
 
 #endif /* __MSM_KMS_H__ */
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/msm_mmu.h linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_mmu.h
--- linux-4.4.115/drivers/gpu/drm/msm/msm_mmu.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_mmu.h	2019-01-22 16:16:23.507246443 +0100
@@ -20,14 +20,34 @@
 
 #include <linux/iommu.h>
 
+struct msm_mmu;
+
+enum msm_mmu_domain_type {
+	MSM_SMMU_DOMAIN_UNSECURE,
+	MSM_SMMU_DOMAIN_NRT_UNSECURE,
+	MSM_SMMU_DOMAIN_SECURE,
+	MSM_SMMU_DOMAIN_NRT_SECURE,
+	MSM_SMMU_DOMAIN_MAX,
+};
+
+enum msm_iommu_domain_type {
+	MSM_IOMMU_DOMAIN_DEFAULT,
+	MSM_IOMMU_DOMAIN_USER,
+	MSM_IOMMU_DOMAIN_SECURE,
+};
+
 struct msm_mmu_funcs {
 	int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
-	void (*detach)(struct msm_mmu *mmu, const char **names, int cnt);
-	int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
-			unsigned len, int prot);
-	int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
-			unsigned len);
+	void (*detach)(struct msm_mmu *mmu);
+	int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
+			u32 flags, void *priv);
+	void (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
+			void *priv);
 	void (*destroy)(struct msm_mmu *mmu);
+	void (*enable)(struct msm_mmu *mmu);
+	void (*disable)(struct msm_mmu *mmu);
+	int (*set_property)(struct msm_mmu *mmu,
+				enum iommu_attr attr, void *data);
 };
 
 struct msm_mmu {
@@ -42,7 +62,31 @@
 	mmu->funcs = funcs;
 }
 
-struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
-struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
+/* Create a new SDE mmu device */
+struct msm_mmu *msm_smmu_new(struct device *dev,
+	enum msm_mmu_domain_type domain);
+
+/* Create a new legacy MDP4 or GPU mmu device */
+struct msm_mmu *msm_iommu_new(struct device *parent,
+		enum msm_iommu_domain_type type, struct iommu_domain *domain);
+
+/* Create a new dynamic domain for GPU */
+struct msm_mmu *msm_iommu_new_dynamic(struct msm_mmu *orig);
+
+static inline void msm_mmu_enable(struct msm_mmu *mmu)
+{
+	if (mmu->funcs->enable)
+		mmu->funcs->enable(mmu);
+}
+
+static inline void msm_mmu_disable(struct msm_mmu *mmu)
+{
+	if (mmu->funcs->disable)
+		mmu->funcs->disable(mmu);
+}
+
+/* SDE smmu driver initialize and cleanup functions */
+int __init msm_smmu_driver_init(void);
+void __exit msm_smmu_driver_cleanup(void);
 
 #endif /* __MSM_MMU_H__ */
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/msm_rd.c linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_rd.c
--- linux-4.4.115/drivers/gpu/drm/msm/msm_rd.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_rd.c	2019-10-29 09:26:23.637203119 +0100
@@ -27,6 +27,11 @@
  * This bypasses drm_debugfs_create_files() mainly because we need to use
  * our own fops for a bit more control.  In particular, we don't want to
  * do anything if userspace doesn't have the debugfs file open.
+ *
+ * The module-param "rd_full", which defaults to false, enables snapshotting
+ * all (non-written) buffers in the submit, rather than just cmdstream bo's.
+ * This is useful to capture the contents of (for example) vbo's or textures,
+ * or shader programs (if not emitted inline in cmdstream).
  */
 
 #ifdef CONFIG_DEBUG_FS
@@ -40,6 +45,10 @@
 #include "msm_gpu.h"
 #include "msm_gem.h"
 
+static bool rd_full = false;
+MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents");
+module_param_named(rd_full, rd_full, bool, 0600);
+
 enum rd_sect_type {
 	RD_NONE,
 	RD_TEST,       /* ascii text */
@@ -277,6 +286,36 @@
 	kfree(rd);
 }
 
+static void snapshot_buf(struct msm_rd_state *rd,
+		struct msm_gem_submit *submit, int idx,
+		uint64_t iova, uint32_t size)
+{
+	struct msm_gem_object *obj = submit->bos[idx].obj;
+	uint64_t offset = 0;
+
+	if (iova) {
+		offset = iova - submit->bos[idx].iova;
+	} else {
+		iova = submit->bos[idx].iova;
+		size = obj->base.size;
+	}
+
+	/* Always write the RD_GPUADDR so we know how big the buffer is */
+	rd_write_section(rd, RD_GPUADDR,
+			(uint64_t[2]) { iova, size }, 16);
+
+	/* But only dump contents for buffers marked as read and not secure */
+	if (submit->bos[idx].flags & MSM_SUBMIT_BO_READ &&
+		!(obj->flags & MSM_BO_SECURE)) {
+		const char *buf = msm_gem_vaddr(&obj->base);
+
+		if (IS_ERR_OR_NULL(buf))
+			return;
+
+		rd_write_section(rd, RD_BUFFER_CONTENTS, buf + offset, size);
+	}
+}
+
 /* called under struct_mutex */
 void msm_rd_dump_submit(struct msm_gem_submit *submit)
 {
@@ -300,24 +339,20 @@
 
 	rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
 
-	/* could be nice to have an option (module-param?) to snapshot
-	 * all the bo's associated with the submit.  Handy to see vtx
-	 * buffers, etc.  For now just the cmdstream bo's is enough.
-	 */
+	if (rd_full) {
+		for (i = 0; i < submit->nr_bos; i++)
+			snapshot_buf(rd, submit, i, 0, 0);
+	}
 
 	for (i = 0; i < submit->nr_cmds; i++) {
-		uint32_t idx  = submit->cmd[i].idx;
-		uint32_t iova = submit->cmd[i].iova;
+		uint64_t iova = submit->cmd[i].iova;
 		uint32_t szd  = submit->cmd[i].size; /* in dwords */
-		struct msm_gem_object *obj = submit->bos[idx].obj;
-		const char *buf = msm_gem_vaddr_locked(&obj->base);
-
-		buf += iova - submit->bos[idx].iova;
 
-		rd_write_section(rd, RD_GPUADDR,
-				(uint32_t[2]){ iova, szd * 4 }, 8);
-		rd_write_section(rd, RD_BUFFER_CONTENTS,
-				buf, szd * 4);
+		/* snapshot cmdstream bo's (if we haven't already): */
+		if (!rd_full) {
+			snapshot_buf(rd, submit, submit->cmd[i].idx,
+					submit->cmd[i].iova, szd * 4);
+		}
 
 		switch (submit->cmd[i].type) {
 		case MSM_SUBMIT_CMD_IB_TARGET_BUF:
@@ -329,7 +364,7 @@
 		case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
 		case MSM_SUBMIT_CMD_BUF:
 			rd_write_section(rd, RD_CMDSTREAM_ADDR,
-					(uint32_t[2]){ iova, szd }, 8);
+					(uint64_t[2]) { iova, szd }, 16);
 			break;
 		}
 	}
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/msm_ringbuffer.c linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_ringbuffer.c
--- linux-4.4.115/drivers/gpu/drm/msm/msm_ringbuffer.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_ringbuffer.c	2019-01-22 16:16:23.511246479 +0100
@@ -18,13 +18,14 @@
 #include "msm_ringbuffer.h"
 #include "msm_gpu.h"
 
-struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
+struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
+		struct msm_memptrs *memptrs, uint64_t memptrs_iova)
 {
 	struct msm_ringbuffer *ring;
 	int ret;
 
-	if (WARN_ON(!is_power_of_2(size)))
-		return ERR_PTR(-EINVAL);
+	/* We assume everwhere that MSM_GPU_RINGBUFFER_SZ is a power of 2 */
+	BUILD_BUG_ON(!is_power_of_2(MSM_GPU_RINGBUFFER_SZ));
 
 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
 	if (!ring) {
@@ -33,18 +34,26 @@
 	}
 
 	ring->gpu = gpu;
-	ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC);
+	ring->id = id;
+	ring->bo = msm_gem_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
+			MSM_BO_WC);
 	if (IS_ERR(ring->bo)) {
 		ret = PTR_ERR(ring->bo);
 		ring->bo = NULL;
 		goto fail;
 	}
 
-	ring->start = msm_gem_vaddr_locked(ring->bo);
-	ring->end   = ring->start + (size / 4);
+	ring->memptrs = memptrs;
+	ring->memptrs_iova = memptrs_iova;
+
+
+	ring->start = msm_gem_vaddr(ring->bo);
+	ring->end   = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
+	ring->next  = ring->start;
 	ring->cur   = ring->start;
 
-	ring->size = size;
+	INIT_LIST_HEAD(&ring->submits);
+	spin_lock_init(&ring->lock);
 
 	return ring;
 
@@ -56,7 +65,10 @@
 
 void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
 {
-	if (ring->bo)
+	if (ring && ring->bo) {
+		msm_gem_put_iova(ring->bo, ring->gpu->aspace);
 		drm_gem_object_unreference_unlocked(ring->bo);
+	}
+
 	kfree(ring);
 }
diff -ruw linux-4.4.115/drivers/gpu/drm/msm/msm_ringbuffer.h linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_ringbuffer.h
--- linux-4.4.115/drivers/gpu/drm/msm/msm_ringbuffer.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_ringbuffer.h	2019-01-22 16:16:23.511246479 +0100
@@ -20,14 +20,46 @@
 
 #include "msm_drv.h"
 
+#define rbmemptr(ring, member) \
+	((ring)->memptrs_iova + offsetof(struct msm_memptrs, member))
+
+struct msm_memptr_ticks {
+	uint64_t started;
+	uint64_t retired;
+};
+
+struct msm_memptrs {
+	volatile uint32_t rptr;
+	volatile uint32_t fence;
+	volatile uint64_t ttbr0;
+	volatile unsigned int contextidr;
+	struct msm_memptr_ticks ticks[128];
+};
+
+#define RING_TICKS_IOVA(ring, index, field) \
+	((ring)->memptrs_iova + offsetof(struct msm_memptrs, ticks) + \
+	 ((index) * sizeof(struct msm_memptr_ticks)) + \
+	 offsetof(struct msm_memptr_ticks, field))
+
 struct msm_ringbuffer {
 	struct msm_gpu *gpu;
-	int size;
+	int id;
 	struct drm_gem_object *bo;
-	uint32_t *start, *end, *cur;
+	uint32_t *start, *end, *cur, *next;
+	uint64_t iova;
+	uint32_t seqno;
+	uint32_t submitted_fence;
+	spinlock_t lock;
+	struct list_head submits;
+	uint32_t hangcheck_fence;
+
+	struct msm_memptrs *memptrs;
+	uint64_t memptrs_iova;
+	int tick_index;
 };
 
-struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size);
+struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
+		struct msm_memptrs *memptrs, uint64_t memptrs_iova);
 void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
 
 /* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */
@@ -35,9 +67,13 @@
 static inline void
 OUT_RING(struct msm_ringbuffer *ring, uint32_t data)
 {
-	if (ring->cur == ring->end)
-		ring->cur = ring->start;
-	*(ring->cur++) = data;
+	/*
+	 * ring->next points to the current command being written - it won't be
+	 * committed as ring->cur until the flush
+	 */
+	if (ring->next == ring->end)
+		ring->next = ring->start;
+	*(ring->next++) = data;
 }
 
 #endif /* __MSM_RINGBUFFER_H__ */
diff -ruw linux-4.4.115/drivers/gpu/Makefile linux-4.4.115-fbx/drivers/gpu/Makefile
--- linux-4.4.115/drivers/gpu/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/Makefile	2019-01-22 16:16:23.215243798 +0100
@@ -4,3 +4,4 @@
 obj-$(CONFIG_TEGRA_HOST1X)	+= host1x/
 obj-y			+= drm/ vga/
 obj-$(CONFIG_IMX_IPUV3_CORE)	+= ipu-v3/
+obj-$(CONFIG_QCOM_KGSL) += msm/
diff -ruw linux-4.4.115/drivers/hid/hid-apple.c linux-4.4.115-fbx/drivers/hid/hid-apple.c
--- linux-4.4.115/drivers/hid/hid-apple.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/hid/hid-apple.c	2019-01-22 16:16:23.731248471 +0100
@@ -440,6 +440,9 @@
 		.driver_data = APPLE_HAS_FN },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ANSI),
 		.driver_data = APPLE_HAS_FN },
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+		 USB_DEVICE_ID_APPLE_ALU_ANSI),
+		.driver_data = APPLE_HAS_FN },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ISO),
 		.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_JIS),
diff -ruw linux-4.4.115/drivers/hid/hid-core.c linux-4.4.115-fbx/drivers/hid/hid-core.c
--- linux-4.4.115/drivers/hid/hid-core.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/hid/hid-core.c	2019-10-29 09:26:23.697203707 +0100
@@ -1757,6 +1757,8 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_ISO) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_JIS) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ANSI) },
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+			 USB_DEVICE_ID_APPLE_ALU_ANSI) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ISO) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_JIS) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
@@ -2010,7 +2012,6 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
-	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
@@ -2062,6 +2063,8 @@
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) },
+	{ HID_BLUETOOTH_DEVICE(0x10eb, 0x0023) },
+	{ HID_BLUETOOTH_DEVICE(0x10eb, 0x0024) },
 	{ }
 };
 
diff -ruw linux-4.4.115/drivers/hid/Kconfig linux-4.4.115-fbx/drivers/hid/Kconfig
--- linux-4.4.115/drivers/hid/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/hid/Kconfig	2019-01-22 16:16:23.731248471 +0100
@@ -338,6 +338,10 @@
 	---help---
 	Support for Waltop tablets.
 
+config HID_FBX_REMOTE_AUDIO
+	tristate "Freebox BLE remote audio driver"
+	depends on HID && SND
+
 config HID_GYRATION
 	tristate "Gyration remote control"
 	depends on HID
diff -ruw linux-4.4.115/drivers/hid/Makefile linux-4.4.115-fbx/drivers/hid/Makefile
--- linux-4.4.115/drivers/hid/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/hid/Makefile	2019-01-22 16:16:23.731248471 +0100
@@ -40,6 +40,7 @@
 obj-$(CONFIG_HID_GEMBIRD)	+= hid-gembird.o
 obj-$(CONFIG_HID_GFRM)		+= hid-gfrm.o
 obj-$(CONFIG_HID_GT683R)	+= hid-gt683r.o
+obj-$(CONFIG_HID_FBX_REMOTE_AUDIO)	+= hid-fbx-remote-audio.o
 obj-$(CONFIG_HID_GYRATION)	+= hid-gyration.o
 obj-$(CONFIG_HID_HOLTEK)	+= hid-holtek-kbd.o
 obj-$(CONFIG_HID_HOLTEK)	+= hid-holtek-mouse.o
diff -ruw linux-4.4.115/drivers/hid/uhid.c linux-4.4.115-fbx/drivers/hid/uhid.c
--- linux-4.4.115/drivers/hid/uhid.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/hid/uhid.c	2019-10-29 09:26:23.705203785 +0100
@@ -28,6 +28,8 @@
 #define UHID_NAME	"uhid"
 #define UHID_BUFSIZE	32
 
+static DEFINE_MUTEX(uhid_open_mutex);
+
 struct uhid_device {
 	struct mutex devlock;
 	bool running;
@@ -142,15 +144,26 @@
 static int uhid_hid_open(struct hid_device *hid)
 {
 	struct uhid_device *uhid = hid->driver_data;
+	int retval = 0;
 
-	return uhid_queue_event(uhid, UHID_OPEN);
+	mutex_lock(&uhid_open_mutex);
+	if (!hid->open++) {
+		retval = uhid_queue_event(uhid, UHID_OPEN);
+		if (retval)
+			hid->open--;
+	}
+	mutex_unlock(&uhid_open_mutex);
+	return retval;
 }
 
 static void uhid_hid_close(struct hid_device *hid)
 {
 	struct uhid_device *uhid = hid->driver_data;
 
+	mutex_lock(&uhid_open_mutex);
+	if (!--hid->open)
 	uhid_queue_event(uhid, UHID_CLOSE);
+	mutex_unlock(&uhid_open_mutex);
 }
 
 static int uhid_hid_parse(struct hid_device *hid)
diff -ruw linux-4.4.115/drivers/hid/usbhid/hiddev.c linux-4.4.115-fbx/drivers/hid/usbhid/hiddev.c
--- linux-4.4.115/drivers/hid/usbhid/hiddev.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/hid/usbhid/hiddev.c	2019-10-29 09:26:23.705203785 +0100
@@ -510,13 +510,13 @@
 				goto inval;
 
 			field = report->field[uref->field_index];
+		}
 
 			if (cmd == HIDIOCGCOLLECTIONINDEX) {
 				if (uref->usage_index >= field->maxusage)
 					goto inval;
 			} else if (uref->usage_index >= field->report_count)
 				goto inval;
-		}
 
 		if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
 		    (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
diff -ruw linux-4.4.115/drivers/hwmon/Kconfig linux-4.4.115-fbx/drivers/hwmon/Kconfig
--- linux-4.4.115/drivers/hwmon/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/hwmon/Kconfig	2019-01-22 16:16:23.763248761 +0100
@@ -1200,6 +1200,34 @@
 	  These devices are hard to detect and rarely found on mainstream
 	  hardware.  If unsure, say N.
 
+config SENSORS_EPM_ADC
+	tristate "EPM ADC Driver for power measurement"
+	depends on I2C && SPI_MASTER
+	default n
+	help
+	  Provides interface for measuring the current on specific power rails
+	  through the channels on ADC1158 ADC
+
+config SENSORS_QPNP_ADC_VOLTAGE
+	tristate "Support for Qualcomm QPNP Voltage ADC"
+	depends on SPMI
+	help
+	  This is the VADC arbiter driver for Qualcomm QPNP ADC Chip.
+
+	  The driver supports reading the HKADC, XOADC through the ADC AMUX arbiter.
+	  The VADC includes support for the conversion sequencer. The driver supports
+	  reading the ADC through the AMUX channels for external pull-ups simultaneously.
+
+config SENSORS_QPNP_ADC_CURRENT
+	tristate "Support for Qualcomm QPNP current ADC"
+	depends on SPMI
+	help
+	  This is the IADC driver for Qualcomm QPNP ADC Chip.
+
+	  The driver supports single mode operation to read from upto seven channel
+	  configuration that include reading the external/internal Rsense, CSP_EX,
+	  CSN_EX pair along with the gain and offset calibration.
+
 source drivers/hwmon/pmbus/Kconfig
 
 config SENSORS_PWM_FAN
diff -ruw linux-4.4.115/drivers/hwmon/Makefile linux-4.4.115-fbx/drivers/hwmon/Makefile
--- linux-4.4.115/drivers/hwmon/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/hwmon/Makefile	2019-01-22 16:16:23.763248761 +0100
@@ -160,6 +160,9 @@
 obj-$(CONFIG_SENSORS_W83L786NG)	+= w83l786ng.o
 obj-$(CONFIG_SENSORS_WM831X)	+= wm831x-hwmon.o
 obj-$(CONFIG_SENSORS_WM8350)	+= wm8350-hwmon.o
+obj-$(CONFIG_SENSORS_EPM_ADC)	+= epm_adc.o
+obj-$(CONFIG_SENSORS_QPNP_ADC_VOLTAGE)	+= qpnp-adc-voltage.o qpnp-adc-common.o
+obj-$(CONFIG_SENSORS_QPNP_ADC_CURRENT)	+= qpnp-adc-current.o qpnp-adc-common.o
 
 obj-$(CONFIG_PMBUS)		+= pmbus/
 
diff -ruw linux-4.4.115/drivers/hwspinlock/Kconfig linux-4.4.115-fbx/drivers/hwspinlock/Kconfig
--- linux-4.4.115/drivers/hwspinlock/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/hwspinlock/Kconfig	2019-01-22 16:16:23.807249159 +0100
@@ -53,4 +53,15 @@
 
 	  If unsure, say N.
 
+config REMOTE_SPINLOCK_MSM
+	bool "MSM Remote Spinlock Functionality"
+	depends on ARCH_QCOM
+	select HWSPINLOCK
+	help
+	  Say y here to support the MSM Remote Spinlock functionality, which
+	  provides a synchronisation mechanism for the various processor on the
+	  SoC.
+
+	  If unsure, say N.
+
 endmenu
diff -ruw linux-4.4.115/drivers/hwspinlock/Makefile linux-4.4.115-fbx/drivers/hwspinlock/Makefile
--- linux-4.4.115/drivers/hwspinlock/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/hwspinlock/Makefile	2019-01-22 16:16:23.807249159 +0100
@@ -7,3 +7,4 @@
 obj-$(CONFIG_HWSPINLOCK_QCOM)		+= qcom_hwspinlock.o
 obj-$(CONFIG_HWSPINLOCK_SIRF)		+= sirf_hwspinlock.o
 obj-$(CONFIG_HSEM_U8500)		+= u8500_hsem.o
+obj-$(CONFIG_REMOTE_SPINLOCK_MSM)	+= msm_remote_spinlock.o
diff -ruw linux-4.4.115/drivers/hwtracing/coresight/Kconfig linux-4.4.115-fbx/drivers/hwtracing/coresight/Kconfig
--- linux-4.4.115/drivers/hwtracing/coresight/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/hwtracing/coresight/Kconfig	2019-01-22 16:16:23.807249159 +0100
@@ -12,6 +12,23 @@
 	  trace source gets enabled.
 
 if CORESIGHT
+
+config CORESIGHT_EVENT
+        tristate "CoreSight Event driver"
+        help
+          This driver provides support for registering with various events
+          and performing CoreSight actions like aborting trace on their
+          occurrence. These events can be controlled by using module
+          parameters.
+
+config CORESIGHT_CSR
+	bool "CoreSight Slave Register driver"
+	help
+	  This driver provides support for CoreSight Slave Register block
+	  that hosts miscellaneous configuration registers.
+	  Those configuration registers can be used to control, various
+	  coresight configurations.
+
 config CORESIGHT_LINKS_AND_SINKS
 	bool "CoreSight Link and Sink drivers"
 	help
@@ -23,6 +40,7 @@
 config CORESIGHT_LINK_AND_SINK_TMC
 	bool "Coresight generic TMC driver"
 	depends on CORESIGHT_LINKS_AND_SINKS
+	select CORESIGHT_CSR
 	help
 	  This enables support for the Trace Memory Controller driver.
 	  Depending on its configuration the device can act as a link (embedded
@@ -61,7 +79,6 @@
 
 config CORESIGHT_SOURCE_ETM4X
 	bool "CoreSight Embedded Trace Macrocell 4.x driver"
-	depends on ARM64
 	select CORESIGHT_LINKS_AND_SINKS
 	help
 	  This driver provides support for the ETM4.x tracer module, tracing the
@@ -69,6 +86,22 @@
 	  for instruction level tracing. Depending on the implemented version
 	  data tracing may also be available.
 
+config CORESIGHT_REMOTE_ETM
+	bool "Remote processor ETM trace support"
+	depends on MSM_QMI_INTERFACE
+	help
+	  Enables support for ETM trace collection on remote processor using
+	  CoreSight framework. Enabling this will allow turning on ETM
+	  tracing on remote processor via sysfs by configuring the required
+	  CoreSight components.
+
+config CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE
+	int "default enable bits for Remote processor ETM"
+	depends on CORESIGHT_REMOTE_ETM
+	help
+	  Support for enabling separated Remote processor ETM tracing. Depends
+	  on if instance id bit is set.
+
 config CORESIGHT_QCOM_REPLICATOR
 	bool "Qualcomm CoreSight Replicator driver"
 	depends on CORESIGHT_LINKS_AND_SINKS
@@ -77,4 +110,83 @@
 	  programmable ATB replicator sends the ATB trace stream from the
 	  ETB/ETF to the TPIUi and ETR.
 
+config CORESIGHT_STM
+	bool "CoreSight System Trace Macrocell driver"
+	select CORESIGHT_LINKS_AND_SINKS
+	help
+	  This driver provides support for hardware assisted software
+	  instrumentation based tracing. This is primarily useful for
+	  logging useful software events or data.
+
+config CORESIGHT_HWEVENT
+	bool "CoreSight Hardware Event driver"
+	depends on CORESIGHT_STM
+	select CORESIGHT_CSR
+	help
+	  This driver provides support for monitoring and tracing CoreSight
+	  Hardware Event across STM interface. It configures Coresight
+	  Hardware Event mux control registers to select hardware events
+	  based on user input.
+
+config CORESIGHT_CTI
+	bool "CoreSight Cross Trigger Interface driver"
+	help
+	  This driver provides support for Cross Trigger Interface that is
+	  used to input or output i.e. pass cross trigger events from one
+	  hardware component to another. It can also be used to pass
+	  software generated events.
+
+config CORESIGHT_CTI_SAVE_DISABLE
+	bool "Turn off CTI save and restore"
+	depends on CORESIGHT_CTI
+	help
+	  Turns off CoreSight CTI save and restore support for cpu CTIs. This
+	  avoids voting for the clocks during probe as well as the associated
+	  save and restore latency at the cost of breaking cpu CTI support on
+	  targets where cpu CTIs have to be preserved across power collapse.
+
+	  If unsure, say 'N' here to avoid breaking cpu CTI support.
+
+config CORESIGHT_TPDA
+	bool "CoreSight Trace, Profiling & Diagnostics Aggregator driver"
+	help
+	  This driver provides support for configuring aggregator. This is
+	  primarily useful for pulling the data sets from one or more
+	  attached monitors and pushing the resultant data out. Multiple
+	  monitors are connected on different input ports of TPDA.
+
+config CORESIGHT_TPDM
+	bool "CoreSight Trace, Profiling & Diagnostics Monitor driver"
+	help
+	  This driver provides support for configuring monitor. Monitors are
+	  primarily responsible for data set collection and support the
+	  ability to collect any permutation of data set types. Monitors are
+	  also responsible for interaction with system cross triggering.
+
+config CORESIGHT_TPDM_DEFAULT_ENABLE
+	bool "Turn on TPDM tracing by default"
+	depends on CORESIGHT_TPDM
+	help
+	  Turns on CoreSight TPDM tracing for different data set types by
+	  default. Otherwise, tracing is disabled by default but can be
+	  enabled via sysfs.
+
+	  If unsure, say 'N' here to avoid potential power and performance
+	  penalty.
+
+config CORESIGHT_QPDI
+	bool "CoreSight PMIC debug interface support"
+	help
+	  This driver provides support for controlling the PMIC debug interface
+	  feature. When enabled via sysfs it allows disagnostic access to the
+	  PMIC. Similarly this debug feature can be disabled via sysfs which
+	  prevents debug dongle detection.
+
+config CORESIGHT_SOURCE_DUMMY
+	bool "Dummy source support"
+	help
+	  Enables support for dummy source devices. Dummy source driver can be
+	  used for CoreSight sources that are owned and configured by some other
+	  subsystem and use Linux drivers to configure rest of trace path.
+
 endif
diff -ruw linux-4.4.115/drivers/hwtracing/coresight/Makefile linux-4.4.115-fbx/drivers/hwtracing/coresight/Makefile
--- linux-4.4.115/drivers/hwtracing/coresight/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/hwtracing/coresight/Makefile	2019-01-22 16:16:23.807249159 +0100
@@ -3,6 +3,9 @@
 #
 obj-$(CONFIG_CORESIGHT) += coresight.o
 obj-$(CONFIG_OF) += of_coresight.o
+obj-$(CONFIG_CORESIGHT_CSR) += coresight-csr.o
+obj-$(CONFIG_CORESIGHT_EVENT) += coresight-event.o
+obj-$(CONFIG_CORESIGHT_CTI) += coresight-cti.o
 obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o
 obj-$(CONFIG_CORESIGHT_SINK_TPIU) += coresight-tpiu.o
 obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o
@@ -11,3 +14,10 @@
 obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o
 obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o
 obj-$(CONFIG_CORESIGHT_QCOM_REPLICATOR) += coresight-replicator-qcom.o
+obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o
+obj-$(CONFIG_CORESIGHT_TPDA) += coresight-tpda.o
+obj-$(CONFIG_CORESIGHT_TPDM) += coresight-tpdm.o
+obj-$(CONFIG_CORESIGHT_REMOTE_ETM) += coresight-remote-etm.o
+obj-$(CONFIG_CORESIGHT_QPDI) += coresight-qpdi.o
+obj-$(CONFIG_CORESIGHT_HWEVENT) += coresight-hwevent.o
+obj-$(CONFIG_CORESIGHT_SOURCE_DUMMY) += coresight-dummy.o
diff -ruw linux-4.4.115/drivers/hwtracing/stm/Kconfig linux-4.4.115-fbx/drivers/hwtracing/stm/Kconfig
--- linux-4.4.115/drivers/hwtracing/stm/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/hwtracing/stm/Kconfig	2019-01-22 16:16:23.815249232 +0100
@@ -9,6 +9,8 @@
 
 	  Say Y here to enable System Trace Module device support.
 
+if STM
+
 config STM_DUMMY
 	tristate "Dummy STM driver"
 	help
@@ -25,3 +27,16 @@
 
 	  If you want to send kernel console messages over STM devices,
 	  say Y.
+
+config STM_SOURCE_HEARTBEAT
+	tristate "Heartbeat over STM devices"
+	help
+	  This is a kernel space trace source that sends periodic
+	  heartbeat messages to trace hosts over STM devices. It is
+	  also useful for testing stm class drivers and the stm class
+	  framework itself.
+
+	  If you want to send heartbeat messages over STM devices,
+	  say Y.
+
+endif
diff -ruw linux-4.4.115/drivers/hwtracing/stm/Makefile linux-4.4.115-fbx/drivers/hwtracing/stm/Makefile
--- linux-4.4.115/drivers/hwtracing/stm/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/hwtracing/stm/Makefile	2019-01-22 16:16:23.815249232 +0100
@@ -5,5 +5,7 @@
 obj-$(CONFIG_STM_DUMMY)	+= dummy_stm.o
 
 obj-$(CONFIG_STM_SOURCE_CONSOLE)	+= stm_console.o
+obj-$(CONFIG_STM_SOURCE_HEARTBEAT)	+= stm_heartbeat.o
 
 stm_console-y		:= console.o
+stm_heartbeat-y		:= heartbeat.o
diff -ruw linux-4.4.115/drivers/i2c/algos/i2c-algo-bit.c linux-4.4.115-fbx/drivers/i2c/algos/i2c-algo-bit.c
--- linux-4.4.115/drivers/i2c/algos/i2c-algo-bit.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/i2c/algos/i2c-algo-bit.c	2019-01-22 16:16:23.815249232 +0100
@@ -381,7 +381,8 @@
 		 * the SMBus PEC was wrong.
 		 */
 		} else if (retval == 0) {
-			dev_err(&i2c_adap->dev, "sendbytes: NAK bailout.\n");
+			dev_err(&i2c_adap->dev, "sendbytes: %02x NAK "
+				"bailout.\n", msg->addr);
 			return -EIO;
 
 		/* Timeout; or (someday) lost arbitration
diff -ruw linux-4.4.115/drivers/i2c/busses/Kconfig linux-4.4.115-fbx/drivers/i2c/busses/Kconfig
--- linux-4.4.115/drivers/i2c/busses/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/i2c/busses/Kconfig	2019-10-29 09:26:23.729204020 +0100
@@ -1198,4 +1198,16 @@
 	  This driver can also be built as a module. If so, the module will be
 	  called as i2c-opal.
 
+config I2C_MSM_V2
+        tristate "I2C_MSM_V2"
+        depends on ARCH_QCOM
+        help
+          If you say yes to this option, support will be included for the
+          built-in I2C interface and its DMA engine on the MSM family
+          processors.
+
+          This driver can also be built as a module.  If so, the module
+          will be called i2c-msm-v2.
+
+
 endmenu
diff -ruw linux-4.4.115/drivers/i2c/busses/Makefile linux-4.4.115-fbx/drivers/i2c/busses/Makefile
--- linux-4.4.115/drivers/i2c/busses/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/i2c/busses/Makefile	2019-10-29 09:26:23.729204020 +0100
@@ -96,6 +96,7 @@
 obj-$(CONFIG_I2C_XLR)		+= i2c-xlr.o
 obj-$(CONFIG_I2C_XLP9XX)	+= i2c-xlp9xx.o
 obj-$(CONFIG_I2C_RCAR)		+= i2c-rcar.o
+obj-$(CONFIG_I2C_MSM_V2)        += i2c-msm-v2.o
 
 # External I2C/SMBus adapter drivers
 obj-$(CONFIG_I2C_DIOLAN_U2C)	+= i2c-diolan-u2c.o
diff -ruw linux-4.4.115/drivers/iio/adc/Kconfig linux-4.4.115-fbx/drivers/iio/adc/Kconfig
--- linux-4.4.115/drivers/iio/adc/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/iio/adc/Kconfig	2019-01-22 16:16:23.875249775 +0100
@@ -303,6 +303,33 @@
 	  To compile this driver as a module, choose M here: the module will
 	  be called qcom-spmi-vadc.
 
+config QCOM_RRADC
+	tristate "Qualcomm Technologies Inc. PMIC Round robin ADC"
+	depends on SPMI
+	select REGMAP_SPMI
+	help
+	  This is the PMIC Round Robin ADC driver.
+
+	  The driver supports multiple channels read used for telemetry
+	  and supports clients to read batt_id, batt_therm, PMIC die
+	  temperature, USB_IN and DC_IN voltage and current.
+	  The RRADC is a 10-bit ADC.
+
+	  To compile this driver as a module, choose M here: the module will
+	  be called qcom-rradc.
+
+config QCOM_TADC
+	tristate "Qualcomm Technologies Inc. TADC driver"
+	depends on MFD_I2C_PMIC
+	help
+	  Say yes here to support the Qualcomm Technologies Inc. telemetry ADC.
+	  The TADC provides battery temperature, skin temperature,
+	  die temperature, battery voltage, battery current, input voltage,
+	  input current, and OTG current.
+
+	  The driver can also be built as a module. If so, the module will be
+	  called qcom-tadc.
+
 config ROCKCHIP_SARADC
 	tristate "Rockchip SARADC driver"
 	depends on ARCH_ROCKCHIP || (ARM && COMPILE_TEST)
diff -ruw linux-4.4.115/drivers/iio/adc/Makefile linux-4.4.115-fbx/drivers/iio/adc/Makefile
--- linux-4.4.115/drivers/iio/adc/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/iio/adc/Makefile	2019-01-22 16:16:23.875249775 +0100
@@ -29,6 +29,8 @@
 obj-$(CONFIG_NAU7802) += nau7802.o
 obj-$(CONFIG_QCOM_SPMI_IADC) += qcom-spmi-iadc.o
 obj-$(CONFIG_QCOM_SPMI_VADC) += qcom-spmi-vadc.o
+obj-$(CONFIG_QCOM_RRADC) += qcom-rradc.o
+obj-$(CONFIG_QCOM_TADC) += qcom-tadc.o
 obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
 obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
 obj-$(CONFIG_TI_ADC128S052) += ti-adc128s052.o
diff -ruw linux-4.4.115/drivers/iio/inkern.c linux-4.4.115-fbx/drivers/iio/inkern.c
--- linux-4.4.115/drivers/iio/inkern.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/iio/inkern.c	2019-01-22 16:16:23.891249920 +0100
@@ -664,3 +664,21 @@
 	return ret;
 }
 EXPORT_SYMBOL_GPL(iio_write_channel_raw);
+
+int iio_write_channel_processed(struct iio_channel *chan, int val)
+{
+	int ret;
+
+	mutex_lock(&chan->indio_dev->info_exist_lock);
+	if (chan->indio_dev->info == NULL) {
+		ret = -ENODEV;
+		goto err_unlock;
+	}
+
+	ret = iio_channel_write(chan, val, 0, IIO_CHAN_INFO_PROCESSED);
+err_unlock:
+	mutex_unlock(&chan->indio_dev->info_exist_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(iio_write_channel_processed);
diff -ruw linux-4.4.115/drivers/input/input.c linux-4.4.115-fbx/drivers/input/input.c
--- linux-4.4.115/drivers/input/input.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/input/input.c	2019-01-22 16:16:23.975250681 +0100
@@ -1667,8 +1667,14 @@
 	mutex_lock(&dev->mutex);
 	spin_lock_irqsave(&dev->event_lock, flags);
 
+	/*
+	 * Keys that have been pressed at suspend time are unlikely
+	 * to be still pressed when we resume.
+	 */
+	if (!test_bit(INPUT_PROP_NO_DUMMY_RELEASE, dev->propbit)) {
 	input_dev_toggle(dev, true);
 	input_dev_release_keys(dev);
+	}
 
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 	mutex_unlock(&dev->mutex);
diff -ruw linux-4.4.115/drivers/input/Kconfig linux-4.4.115-fbx/drivers/input/Kconfig
--- linux-4.4.115/drivers/input/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/input/Kconfig	2019-01-22 16:16:23.971250645 +0100
@@ -187,6 +187,19 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called apm-power.
 
+config INPUT_KEYRESET
+	bool "Reset key"
+	depends on INPUT
+	select INPUT_KEYCOMBO
+	---help---
+	  Say Y here if you want to reboot when some keys are pressed;
+
+config INPUT_KEYCOMBO
+	bool "Key combo"
+	depends on INPUT
+	---help---
+	  Say Y here if you want to take action when some keys are pressed;
+
 comment "Input Device Drivers"
 
 source "drivers/input/keyboard/Kconfig"
diff -ruw linux-4.4.115/drivers/input/Makefile linux-4.4.115-fbx/drivers/input/Makefile
--- linux-4.4.115/drivers/input/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/input/Makefile	2019-01-22 16:16:23.971250645 +0100
@@ -26,3 +26,6 @@
 obj-$(CONFIG_INPUT_MISC)	+= misc/
 
 obj-$(CONFIG_INPUT_APMPOWER)	+= apm-power.o
+obj-$(CONFIG_INPUT_KEYRESET)	+= keyreset.o
+obj-$(CONFIG_INPUT_KEYCOMBO)	+= keycombo.o
+
diff -ruw linux-4.4.115/drivers/input/misc/Kconfig linux-4.4.115-fbx/drivers/input/misc/Kconfig
--- linux-4.4.115/drivers/input/misc/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/input/misc/Kconfig	2019-01-22 16:16:23.991250826 +0100
@@ -103,6 +103,17 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called e3x0_button.
 
+config INPUT_HBTP_INPUT
+	tristate "HBTP input driver support"
+	help
+	  This option enables an input driver for the host based touch
+	  processing.
+
+	  Say Y to enable HBTP input driver.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called hbtp_input.
+
 config INPUT_PCSPKR
 	tristate "PC Speaker support"
 	depends on PCSPKR_PLATFORM
@@ -150,6 +161,15 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called pmic8xxx-pwrkey.
 
+config INPUT_QPNP_POWER_ON
+	tristate "QPNP PMIC Power-on support"
+	depends on SPMI
+	help
+	  This option enables device driver support for the power-on
+	  functionality of Qualcomm Technologies, Inc. PNP PMICs.  It supports
+	  reporting the change in status of the KPDPWR_N line (connected to the
+	  power-key) as well as reset features.
+
 config INPUT_SPARCSPKR
 	tristate "SPARC Speaker support"
 	depends on PCI && SPARC64
@@ -341,6 +361,17 @@
 	  To compile this driver as a module, choose M here: the module will be
 	  called ati_remote2.
 
+config INPUT_KEYCHORD
+	tristate "Key chord input driver support"
+	help
+	  Say Y here if you want to enable the key chord driver
+	  accessible at /dev/keychord.  This driver can be used
+	  for receiving notifications when client specified key
+	  combinations are pressed.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called keychord.
+
 config INPUT_KEYSPAN_REMOTE
 	tristate "Keyspan DMR USB remote control"
 	depends on USB_ARCH_HAS_HCD
@@ -509,6 +540,11 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called sgi_btns.
 
+config INPUT_GPIO
+	tristate "GPIO driver support"
+	help
+	  Say Y here if you want to support gpio based keys, wheels etc...
+
 config HP_SDC_RTC
 	tristate "HP SDC Real Time Clock"
 	depends on (GSC || HP300) && SERIO
@@ -796,4 +832,16 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called drv2667-haptics.
 
+source "drivers/input/misc/ots_pat9125/Kconfig"
+
+config INPUT_STMVL53L0
+	tristate "STM VL53L0 Proximity support"
+	depends on INPUT && I2C
+	help
+	 Say Y here if you want to use STMicroelectronics's proximity sensor
+	 through I2C interface.
+
+	 To compile this driver as a module, choose M here: the
+	 module will be called stmvl53l0.
+
 endif
diff -ruw linux-4.4.115/drivers/input/misc/Makefile linux-4.4.115-fbx/drivers/input/misc/Makefile
--- linux-4.4.115/drivers/input/misc/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/input/misc/Makefile	2019-01-22 16:16:23.991250826 +0100
@@ -34,9 +34,12 @@
 obj-$(CONFIG_INPUT_GP2A)		+= gp2ap002a00f.o
 obj-$(CONFIG_INPUT_GPIO_BEEPER)		+= gpio-beeper.o
 obj-$(CONFIG_INPUT_GPIO_TILT_POLLED)	+= gpio_tilt_polled.o
+obj-$(CONFIG_INPUT_GPIO)		+= gpio_event.o gpio_matrix.o gpio_input.o gpio_output.o gpio_axis.o
+obj-$(CONFIG_INPUT_HBTP_INPUT)		+= hbtp_input.o hbtp_vm.o
 obj-$(CONFIG_HP_SDC_RTC)		+= hp_sdc_rtc.o
 obj-$(CONFIG_INPUT_IMS_PCU)		+= ims-pcu.o
 obj-$(CONFIG_INPUT_IXP4XX_BEEPER)	+= ixp4xx-beeper.o
+obj-$(CONFIG_INPUT_KEYCHORD)		+= keychord.o
 obj-$(CONFIG_INPUT_KEYSPAN_REMOTE)	+= keyspan_remote.o
 obj-$(CONFIG_INPUT_KXTJ9)		+= kxtj9.o
 obj-$(CONFIG_INPUT_M68K_BEEP)		+= m68kspkr.o
@@ -54,6 +57,7 @@
 obj-$(CONFIG_INPUT_PM8941_PWRKEY)	+= pm8941-pwrkey.o
 obj-$(CONFIG_INPUT_PM8XXX_VIBRATOR)	+= pm8xxx-vibrator.o
 obj-$(CONFIG_INPUT_PMIC8XXX_PWRKEY)	+= pmic8xxx-pwrkey.o
+obj-$(CONFIG_INPUT_QPNP_POWER_ON)	+= qpnp-power-on.o
 obj-$(CONFIG_INPUT_POWERMATE)		+= powermate.o
 obj-$(CONFIG_INPUT_PWM_BEEPER)		+= pwm-beeper.o
 obj-$(CONFIG_INPUT_RB532_BUTTON)	+= rb532_button.o
@@ -75,3 +79,5 @@
 obj-$(CONFIG_INPUT_XEN_KBDDEV_FRONTEND)	+= xen-kbdfront.o
 obj-$(CONFIG_INPUT_YEALINK)		+= yealink.o
 obj-$(CONFIG_INPUT_IDEAPAD_SLIDEBAR)	+= ideapad_slidebar.o
+obj-$(CONFIG_INPUT_PIXART_OTS_PAT9125_SWITCH)	+= ots_pat9125/
+obj-$(CONFIG_INPUT_STMVL53L0)                 += vl53L0/
diff -ruw linux-4.4.115/drivers/input/touchscreen/Kconfig linux-4.4.115-fbx/drivers/input/touchscreen/Kconfig
--- linux-4.4.115/drivers/input/touchscreen/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/input/touchscreen/Kconfig	2019-01-22 16:16:24.027251152 +0100
@@ -11,7 +11,9 @@
 
 if INPUT_TOUCHSCREEN
 
-config TOUCHSCREEN_PROPERTIES
+source "drivers/input/touchscreen/synaptics_dsx/Kconfig"
+
+config OF_TOUCHSCREEN
 	def_tristate INPUT
 	depends on INPUT
 
@@ -115,6 +117,18 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called atmel_mxt_ts.
 
+config TOUCHSCREEN_ATMEL_MAXTOUCH_TS
+	tristate "Atmel Maxtouch Touchscreen Family"
+	depends on I2C
+	help
+	  Say Y here if you have Atmel MaXTouch Touchscreen
+	  using i2c connected to your system.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called atmel_maxtouch_ts.
+
 config TOUCHSCREEN_AUO_PIXCIR
 	tristate "AUO in-cell touchscreen using Pixcir ICs"
 	depends on I2C
@@ -1095,6 +1109,34 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called colibri_vf50_ts.
 
+config TOUCHSCREEN_FT5X06_PSENSOR
+       tristate "FocalTech proximity feature support"
+       depends on TOUCHSCREEN_FT5X06 && SENSORS
+       help
+         Say Y here if you want to support ft5x06's proximity
+         feature.
+
+         If unsure, say N.
+
+config TOUCHSCREEN_FT5X06_GESTURE
+       tristate "FocalTech gesture feature support"
+       depends on TOUCHSCREEN_FT5X06
+       help
+         Say Y here if you want to support ft5x06's gesture
+         feature.
+
+         If unsure, say N.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+	bool "Synaptics DSX firmware update extra sysfs attributes"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE
+	help
+	  Say Y here to enable support for extra sysfs attributes
+	  supporting firmware update in a development environment.
+	  This does not affect the core or other subsystem attributes.
+
+	  If unsure, say N.
+
 config TOUCHSCREEN_ROHM_BU21023
 	tristate "ROHM BU21023/24 Dual touch support resistive touchscreens"
 	depends on I2C
@@ -1106,4 +1148,81 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called bu21023_ts.
 
+config TOUCHSCREEN_MAXIM_STI
+	tristate "Maxim based STI touchscreens"
+	depends on SPI_MASTER
+	help
+	  Say Y here if you have a touchscreen interface using the
+	  Maxim STI based touch controller.
+
+	  If unsure, say N
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called maxim_sti.
+
+config SECURE_TOUCH
+	bool "Secure Touch"
+	depends on (TOUCHSCREEN_SYNAPTICS_I2C_RMI4 || \
+	  TOUCHSCREEN_SYNAPTICS_DSX_I2C_v21)
+	help
+	  Say Y here to enable Secure Touch in supported drivers.
+
+	  If unsure, say N.
+
+config TOUCHSCREEN_GEN_VKEYS
+       tristate "Touchscreen Virtual Keys Driver"
+       help
+         Say Y here if you want to generate a sysfs entry for virtual
+	 keys on Android.
+
+         If unsure, say N.
+
+         To compile this driver as a module, choose M here: the
+         module will be called gen_vkeys.
+
+config TOUCHSCREEN_FT5X06
+       tristate "FocalTech touchscreens"
+       depends on I2C
+       help
+         Say Y here if you have a ft5X06 touchscreen.
+	 Ft5x06 controllers are multi touch controllers which can
+	 report 5 touches at a time.
+
+         If unsure, say N.
+
+         To compile this driver as a module, choose M here: the
+         module will be called ft5x06_ts.
+
+config FT_SECURE_TOUCH
+	bool "Secure Touch support for Focaltech Touchscreen"
+	depends on TOUCHSCREEN_FT5X06
+	help
+	  Say Y here
+	  -Focaltech touch driver is connected
+	  -To enable secure touch for Focaltech touch driver
+
+	  If unsure, say N.
+
+config TOUCHSCREEN_IT7260_I2C
+	tristate "IT7260 Touchscreen Driver"
+	depends on I2C
+	help
+	  Say Y here if you have a IT7260 Touchscreen Driver
+	  connected to your system.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called it7258_ts_i2c.
+
+config TOUCHSCREEN_ST
+	bool "STMicroelectronics Touchscreen Driver"
+	depends on I2C
+	help
+	  Say Y here if you have a STMicroelectronics Touchscreen.
+
+	  If unsure, say N.
+
+source "drivers/input/touchscreen/st/Kconfig"
+
 endif
diff -ruw linux-4.4.115/drivers/input/touchscreen/Makefile linux-4.4.115-fbx/drivers/input/touchscreen/Makefile
--- linux-4.4.115/drivers/input/touchscreen/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/input/touchscreen/Makefile	2019-01-22 16:16:24.027251152 +0100
@@ -15,6 +15,7 @@
 obj-$(CONFIG_TOUCHSCREEN_ADS7846)	+= ads7846.o
 obj-$(CONFIG_TOUCHSCREEN_AR1021_I2C)	+= ar1021_i2c.o
 obj-$(CONFIG_TOUCHSCREEN_ATMEL_MXT)	+= atmel_mxt_ts.o
+obj-$(CONFIG_TOUCHSCREEN_ATMEL_MAXTOUCH_TS)	+= atmel_maxtouch_ts.o
 obj-$(CONFIG_TOUCHSCREEN_AUO_PIXCIR)	+= auo-pixcir-ts.o
 obj-$(CONFIG_TOUCHSCREEN_BU21013)	+= bu21013_ts.o
 obj-$(CONFIG_TOUCHSCREEN_CHIPONE_ICN8318)	+= chipone_icn8318.o
@@ -36,15 +37,19 @@
 obj-$(CONFIG_TOUCHSCREEN_ELO)		+= elo.o
 obj-$(CONFIG_TOUCHSCREEN_EGALAX)	+= egalax_ts.o
 obj-$(CONFIG_TOUCHSCREEN_FT6236)	+= ft6236.o
+obj-$(CONFIG_TOUCHSCREEN_FT5X06)	+= ft5x06_ts.o
 obj-$(CONFIG_TOUCHSCREEN_FUJITSU)	+= fujitsu_ts.o
 obj-$(CONFIG_TOUCHSCREEN_GOODIX)	+= goodix.o
+obj-$(CONFIG_TOUCHSCREEN_GEN_VKEYS)	+= gen_vkeys.o
 obj-$(CONFIG_TOUCHSCREEN_ILI210X)	+= ili210x.o
 obj-$(CONFIG_TOUCHSCREEN_IMX6UL_TSC)	+= imx6ul_tsc.o
 obj-$(CONFIG_TOUCHSCREEN_INEXIO)	+= inexio.o
 obj-$(CONFIG_TOUCHSCREEN_INTEL_MID)	+= intel-mid-touch.o
+obj-$(CONFIG_TOUCHSCREEN_IT7260_I2C)    += it7258_ts_i2c.o
 obj-$(CONFIG_TOUCHSCREEN_IPROC)		+= bcm_iproc_tsc.o
 obj-$(CONFIG_TOUCHSCREEN_LPC32XX)	+= lpc32xx_ts.o
 obj-$(CONFIG_TOUCHSCREEN_MAX11801)	+= max11801_ts.o
+obj-$(CONFIG_TOUCHSCREEN_MAXIM_STI)	+= maxim_sti.o
 obj-$(CONFIG_TOUCHSCREEN_MC13783)	+= mc13783_ts.o
 obj-$(CONFIG_TOUCHSCREEN_MCS5000)	+= mcs5000_ts.o
 obj-$(CONFIG_TOUCHSCREEN_MIGOR)		+= migor_ts.o
@@ -64,6 +69,7 @@
 obj-$(CONFIG_TOUCHSCREEN_STMPE)		+= stmpe-ts.o
 obj-$(CONFIG_TOUCHSCREEN_SUN4I)		+= sun4i-ts.o
 obj-$(CONFIG_TOUCHSCREEN_SUR40)		+= sur40.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_v21) += synaptics_dsx/
 obj-$(CONFIG_TOUCHSCREEN_TI_AM335X_TSC)	+= ti_am335x_tsc.o
 obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213)	+= touchit213.o
 obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT)	+= touchright.o
@@ -91,3 +97,4 @@
 obj-$(CONFIG_TOUCHSCREEN_ZFORCE)	+= zforce_ts.o
 obj-$(CONFIG_TOUCHSCREEN_COLIBRI_VF50)	+= colibri-vf50-ts.o
 obj-$(CONFIG_TOUCHSCREEN_ROHM_BU21023)	+= rohm_bu21023.o
+obj-$(CONFIG_TOUCHSCREEN_ST)		+= st/
diff -ruw linux-4.4.115/drivers/iommu/arm-smmu.c linux-4.4.115-fbx/drivers/iommu/arm-smmu.c
--- linux-4.4.115/drivers/iommu/arm-smmu.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/iommu/arm-smmu.c	2019-10-29 09:26:23.813204842 +0100
@@ -42,13 +42,21 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
+#include <linux/notifier.h>
 
 #include <linux/amba/bus.h>
+#include <soc/qcom/msm_tz_smmu.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/secure_buffer.h>
+#include <asm/cacheflush.h>
+#include <linux/msm-bus.h>
+#include <dt-bindings/msm/msm-bus-ids.h>
+#include <linux/msm_pcie.h>
 
 #include "io-pgtable.h"
 
 /* Maximum number of stream IDs assigned to a single device */
-#define MAX_MASTER_STREAMIDS		MAX_PHANDLE_ARGS
+#define MAX_MASTER_STREAMIDS		45
 
 /* Maximum number of context banks per SMMU */
 #define ARM_SMMU_MAX_CBS		128
@@ -148,7 +156,7 @@
 #define ARM_SMMU_GR0_sTLBGSYNC		0x70
 #define ARM_SMMU_GR0_sTLBGSTATUS	0x74
 #define sTLBGSTATUS_GSACTIVE		(1 << 0)
-#define TLB_LOOP_TIMEOUT		1000000	/* 1s! */
+#define TLB_LOOP_TIMEOUT		500000	/* 500ms */
 
 /* Stream mapping registers */
 #define ARM_SMMU_GR0_SMR(n)		(0x800 + ((n) << 2))
@@ -195,29 +203,38 @@
 #define ARM_SMMU_CB(smmu, n)		((n) * (1 << (smmu)->pgshift))
 
 #define ARM_SMMU_CB_SCTLR		0x0
+#define ARM_SMMU_CB_ACTLR		0x4
 #define ARM_SMMU_CB_RESUME		0x8
 #define ARM_SMMU_CB_TTBCR2		0x10
 #define ARM_SMMU_CB_TTBR0		0x20
 #define ARM_SMMU_CB_TTBR1		0x28
 #define ARM_SMMU_CB_TTBCR		0x30
+#define ARM_SMMU_CB_CONTEXTIDR		0x34
 #define ARM_SMMU_CB_S1_MAIR0		0x38
 #define ARM_SMMU_CB_S1_MAIR1		0x3c
 #define ARM_SMMU_CB_PAR_LO		0x50
 #define ARM_SMMU_CB_PAR_HI		0x54
 #define ARM_SMMU_CB_FSR			0x58
+#define ARM_SMMU_CB_FSRRESTORE		0x5c
 #define ARM_SMMU_CB_FAR_LO		0x60
 #define ARM_SMMU_CB_FAR_HI		0x64
 #define ARM_SMMU_CB_FSYNR0		0x68
 #define ARM_SMMU_CB_S1_TLBIVA		0x600
 #define ARM_SMMU_CB_S1_TLBIASID		0x610
+#define ARM_SMMU_CB_S1_TLBIALL		0x618
 #define ARM_SMMU_CB_S1_TLBIVAL		0x620
 #define ARM_SMMU_CB_S2_TLBIIPAS2	0x630
 #define ARM_SMMU_CB_S2_TLBIIPAS2L	0x638
+#define ARM_SMMU_CB_TLBSYNC		0x7f0
+#define ARM_SMMU_CB_TLBSTATUS		0x7f4
+#define TLBSTATUS_SACTIVE		(1 << 0)
 #define ARM_SMMU_CB_ATS1PR		0x800
 #define ARM_SMMU_CB_ATSR		0x8f0
+#define ARM_SMMU_GR1_CBFRSYNRA(n)	(0x400 + ((n) << 2))
 
 #define SCTLR_S1_ASIDPNE		(1 << 12)
 #define SCTLR_CFCFG			(1 << 7)
+#define SCTLR_HUPCF			(1 << 8)
 #define SCTLR_CFIE			(1 << 6)
 #define SCTLR_CFRE			(1 << 5)
 #define SCTLR_E				(1 << 4)
@@ -233,9 +250,6 @@
 #define RESUME_RETRY			(0 << 0)
 #define RESUME_TERMINATE		(1 << 0)
 
-#define TTBCR2_SEP_SHIFT		15
-#define TTBCR2_SEP_UPSTREAM		(0x7 << TTBCR2_SEP_SHIFT)
-
 #define TTBRn_ASID_SHIFT		48
 
 #define FSR_MULTI			(1 << 31)
@@ -249,6 +263,24 @@
 #define FSR_AFF				(1 << 2)
 #define FSR_TF				(1 << 1)
 
+/* Definitions for implementation-defined registers */
+#define ACTLR_QCOM_OSH_SHIFT		28
+#define ACTLR_QCOM_OSH			1
+
+#define ACTLR_QCOM_ISH_SHIFT		29
+#define ACTLR_QCOM_ISH			1
+
+#define ACTLR_QCOM_NSH_SHIFT		30
+#define ACTLR_QCOM_NSH			1
+
+#define ARM_SMMU_IMPL_DEF0(smmu) \
+	((smmu)->base + (2 * (1 << (smmu)->pgshift)))
+#define ARM_SMMU_IMPL_DEF1(smmu) \
+	((smmu)->base + (6 * (1 << (smmu)->pgshift)))
+#define IMPL_DEF1_MICRO_MMU_CTRL	0
+#define MICRO_MMU_CTRL_LOCAL_HALT_REQ	(1 << 2)
+#define MICRO_MMU_CTRL_IDLE		(1 << 3)
+
 #define FSR_IGN				(FSR_AFF | FSR_ASF | \
 					 FSR_TLBMCF | FSR_TLBLKF)
 #define FSR_FAULT			(FSR_MULTI | FSR_SS | FSR_UUT | \
@@ -284,11 +316,24 @@
 	struct arm_smmu_master_cfg	cfg;
 };
 
+enum smmu_model_id {
+	SMMU_MODEL_DEFAULT,
+	SMMU_MODEL_QCOM_V2,
+};
+
+struct arm_smmu_impl_def_reg {
+	u32 offset;
+	u32 value;
+};
+
 struct arm_smmu_device {
 	struct device			*dev;
 
+	enum smmu_model_id		model;
+
 	void __iomem			*base;
 	unsigned long			size;
+	phys_addr_t			phys_addr;
 	unsigned long			pgshift;
 
 #define ARM_SMMU_FEAT_COHERENT_WALK	(1 << 0)
@@ -300,6 +345,17 @@
 	u32				features;
 
 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
+#define ARM_SMMU_OPT_INVALIDATE_ON_MAP (1 << 1)
+#define ARM_SMMU_OPT_HALT_AND_TLB_ON_ATOS  (1 << 2)
+#define ARM_SMMU_OPT_REGISTER_SAVE	(1 << 3)
+#define ARM_SMMU_OPT_SKIP_INIT		(1 << 4)
+#define ARM_SMMU_OPT_ERRATA_CTX_FAULT_HANG (1 << 5)
+#define ARM_SMMU_OPT_FATAL_ASF		(1 << 6)
+#define ARM_SMMU_OPT_ERRATA_TZ_ATOS	(1 << 7)
+#define ARM_SMMU_OPT_NO_SMR_CHECK	(1 << 9)
+#define ARM_SMMU_OPT_DYNAMIC		(1 << 10)
+#define ARM_SMMU_OPT_HALT		(1 << 11)
+#define ARM_SMMU_OPT_STATIC_CB		(1 << 12)
 	u32				options;
 	enum arm_smmu_arch_version	version;
 
@@ -311,6 +367,8 @@
 	u32				num_mapping_groups;
 	DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
 
+	u32				ubs;
+
 	unsigned long			va_size;
 	unsigned long			ipa_size;
 	unsigned long			pa_size;
@@ -320,18 +378,57 @@
 	unsigned int			*irqs;
 
 	struct list_head		list;
+	struct list_head		static_cbndx_list;
 	struct rb_root			masters;
+
+	int				num_clocks;
+	struct clk			**clocks;
+
+	struct regulator		*gdsc;
+	struct notifier_block		regulator_nb;
+
+	/* Protects against domains attaching to the same SMMU concurrently */
+	struct mutex			attach_lock;
+	unsigned int			attach_count;
+	struct idr			asid_idr;
+
+	struct arm_smmu_impl_def_reg	*impl_def_attach_registers;
+	unsigned int			num_impl_def_attach_registers;
+
+	spinlock_t			atos_lock;
+	unsigned int			clock_refs_count;
+	spinlock_t			clock_refs_lock;
+
+	struct mutex			power_lock;
+	unsigned int			power_count;
+
+	u32				bus_client;
+	struct msm_bus_scale_pdata	*bus_pdata;
+
+	enum tz_smmu_device_id		sec_id;
+	int				regulator_defer;
 };
 
 struct arm_smmu_cfg {
 	u8				cbndx;
 	u8				irptndx;
 	u32				cbar;
+	u32				procid;
+	u16				asid;
+	u8				vmid;
 };
 #define INVALID_IRPTNDX			0xff
+#define INVALID_CBNDX			0xff
+#define INVALID_ASID			0xffff
+#define INVALID_VMID			0xff
+/*
+ * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
+ * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
+ */
+#define MAX_ASID			0xff
 
-#define ARM_SMMU_CB_ASID(cfg)		((cfg)->cbndx)
-#define ARM_SMMU_CB_VMID(cfg)		((cfg)->cbndx + 1)
+#define ARM_SMMU_CB_ASID(cfg)		((cfg)->asid)
+#define ARM_SMMU_CB_VMID(cfg)		((cfg)->vmid)
 
 enum arm_smmu_domain_stage {
 	ARM_SMMU_DOMAIN_S1 = 0,
@@ -339,13 +436,29 @@
 	ARM_SMMU_DOMAIN_NESTED,
 };
 
+struct arm_smmu_pte_info {
+	void *virt_addr;
+	size_t size;
+	struct list_head entry;
+};
+
 struct arm_smmu_domain {
 	struct arm_smmu_device		*smmu;
 	struct io_pgtable_ops		*pgtbl_ops;
-	spinlock_t			pgtbl_lock;
+	struct io_pgtable_cfg		pgtbl_cfg;
+	spinlock_t			pgtbl_spin_lock;
+	struct mutex			pgtbl_mutex_lock;
 	struct arm_smmu_cfg		cfg;
 	enum arm_smmu_domain_stage	stage;
 	struct mutex			init_mutex; /* Protects smmu pointer */
+	u32				attributes;
+	bool				slave_side_secure;
+	u32				secure_vmid;
+	struct list_head		pte_info_list;
+	struct list_head		unassign_list;
+	struct mutex			assign_lock;
+	struct list_head		secure_pool_list;
+	bool				non_fatal_faults;
 	struct iommu_domain		domain;
 };
 
@@ -361,9 +474,75 @@
 
 static struct arm_smmu_option_prop arm_smmu_options[] = {
 	{ ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
+	{ ARM_SMMU_OPT_INVALIDATE_ON_MAP, "qcom,smmu-invalidate-on-map" },
+	{ ARM_SMMU_OPT_HALT_AND_TLB_ON_ATOS, "qcom,halt-and-tlb-on-atos" },
+	{ ARM_SMMU_OPT_REGISTER_SAVE, "qcom,register-save" },
+	{ ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
+	{ ARM_SMMU_OPT_ERRATA_CTX_FAULT_HANG, "qcom,errata-ctx-fault-hang" },
+	{ ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
+	{ ARM_SMMU_OPT_ERRATA_TZ_ATOS, "qcom,errata-tz-atos" },
+	{ ARM_SMMU_OPT_NO_SMR_CHECK, "qcom,no-smr-check" },
+	{ ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
+	{ ARM_SMMU_OPT_HALT, "qcom,enable-smmu-halt"},
+	{ ARM_SMMU_OPT_STATIC_CB, "qcom,enable-static-cb"},
 	{ 0, NULL},
 };
 
+#define TYPE_TRANS	(S2CR_TYPE_TRANS >> S2CR_TYPE_SHIFT)
+#define TYPE_BYPASS	(S2CR_TYPE_BYPASS >> S2CR_TYPE_SHIFT)
+#define TYPE_FAULT	(S2CR_TYPE_FAULT >> S2CR_TYPE_SHIFT)
+
+struct static_cbndx_entry {
+	struct list_head list;
+	u8 cbndx;
+	u8 smr_idx;
+	u16 sid;
+	u8 type;
+};
+
+struct arm_iommus_node {
+	struct device_node	*master;
+	struct list_head	list;
+	struct list_head	iommuspec_list;
+};
+
+struct arm_iommus_spec {
+	struct of_phandle_args	iommu_spec;
+	struct list_head	list;
+};
+
+static LIST_HEAD(iommus_nodes);
+
+static int arm_smmu_enable_clocks_atomic(struct arm_smmu_device *smmu);
+static void arm_smmu_disable_clocks_atomic(struct arm_smmu_device *smmu);
+static void arm_smmu_prepare_pgtable(void *addr, void *cookie);
+static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
+static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
+					      dma_addr_t iova);
+static phys_addr_t arm_smmu_iova_to_phys_hard_no_halt(
+	struct iommu_domain *domain, dma_addr_t iova);
+static int arm_smmu_wait_for_halt(struct arm_smmu_device *smmu);
+static int arm_smmu_halt_nowait(struct arm_smmu_device *smmu);
+static void arm_smmu_resume(struct arm_smmu_device *smmu);
+static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
+					dma_addr_t iova);
+static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
+static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
+static int arm_smmu_halt(struct arm_smmu_device *smmu);
+static void arm_smmu_device_reset(struct arm_smmu_device *smmu);
+static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
+			     size_t size);
+static bool arm_smmu_is_master_side_secure(struct arm_smmu_domain *smmu_domain);
+static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu);
+static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain);
+static bool arm_smmu_has_secure_vmid(struct arm_smmu_domain *smmu_domain);
+static bool arm_smmu_is_iova_coherent(struct iommu_domain *domain,
+					dma_addr_t iova);
+static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
+					      dma_addr_t iova);
+
+static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain);
+
 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
 {
 	return container_of(dom, struct arm_smmu_domain, domain);
@@ -377,7 +556,7 @@
 		if (of_property_read_bool(smmu->dev->of_node,
 						arm_smmu_options[i].prop)) {
 			smmu->options |= arm_smmu_options[i].opt;
-			dev_notice(smmu->dev, "option %s\n",
+			dev_dbg(smmu->dev, "option %s\n",
 				arm_smmu_options[i].prop);
 		}
 	} while (arm_smmu_options[++i].opt);
@@ -417,6 +596,28 @@
 	return NULL;
 }
 
+static struct arm_smmu_master *find_smmu_master_by_sid(
+			struct arm_smmu_device *smmu, u32 sid)
+{
+	struct rb_node *next;
+	struct arm_smmu_master *master;
+	struct arm_smmu_master_cfg *cfg;
+	int i;
+
+	next = rb_first(&smmu->masters);
+	for (; next; next = rb_next(next)) {
+		master = container_of(next, struct arm_smmu_master, node);
+		cfg = &master->cfg;
+
+		for (i = 0; i < cfg->num_streamids; i++) {
+			if (cfg->streamids[i] == sid)
+				return master;
+		}
+	}
+
+	return NULL;
+}
+
 static struct arm_smmu_master_cfg *
 find_smmu_master_cfg(struct device *dev)
 {
@@ -456,25 +657,32 @@
 	return 0;
 }
 
+struct iommus_entry {
+	struct list_head list;
+	struct device_node *node;
+	u16 streamids[MAX_MASTER_STREAMIDS];
+	int num_sids;
+};
+
 static int register_smmu_master(struct arm_smmu_device *smmu,
-				struct device *dev,
-				struct of_phandle_args *masterspec)
+				struct iommus_entry *entry)
 {
 	int i;
 	struct arm_smmu_master *master;
+	struct device *dev = smmu->dev;
 
-	master = find_smmu_master(smmu, masterspec->np);
+	master = find_smmu_master(smmu, entry->node);
 	if (master) {
 		dev_err(dev,
 			"rejecting multiple registrations for master device %s\n",
-			masterspec->np->name);
+			entry->node->name);
 		return -EBUSY;
 	}
 
-	if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
+	if (entry->num_sids > MAX_MASTER_STREAMIDS) {
 		dev_err(dev,
 			"reached maximum number (%d) of stream IDs for master device %s\n",
-			MAX_MASTER_STREAMIDS, masterspec->np->name);
+			MAX_MASTER_STREAMIDS, entry->node->name);
 		return -ENOSPC;
 	}
 
@@ -482,22 +690,84 @@
 	if (!master)
 		return -ENOMEM;
 
-	master->of_node			= masterspec->np;
-	master->cfg.num_streamids	= masterspec->args_count;
+	master->of_node			= entry->node;
+	master->cfg.num_streamids	= entry->num_sids;
 
-	for (i = 0; i < master->cfg.num_streamids; ++i) {
-		u16 streamid = masterspec->args[i];
+	for (i = 0; i < master->cfg.num_streamids; ++i)
+		master->cfg.streamids[i] = entry->streamids[i];
 
-		if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
-		     (streamid >= smmu->num_mapping_groups)) {
-			dev_err(dev,
-				"stream ID for master device %s greater than maximum allowed (%d)\n",
-				masterspec->np->name, smmu->num_mapping_groups);
-			return -ERANGE;
+	return insert_smmu_master(smmu, master);
 		}
-		master->cfg.streamids[i] = streamid;
+
+static int arm_smmu_parse_iommus_properties(struct arm_smmu_device *smmu)
+{
+	struct arm_iommus_node *node, *nex;
+
+	list_for_each_entry_safe(node, nex, &iommus_nodes, list) {
+		struct iommus_entry *entry, *next;
+		struct arm_iommus_spec *iommuspec_node, *n;
+		LIST_HEAD(iommus);
+		int node_found = 0;
+
+		list_for_each_entry_safe(iommuspec_node, n,
+				&node->iommuspec_list, list) {
+			if (iommuspec_node->iommu_spec.np != smmu->dev->of_node)
+				continue;
+
+			/*
+			 * Since each master node will have iommu spec(s) of the
+			 * same device, we can delete this master node after
+			 * the devices are registered.
+			 */
+			node_found = 1;
+
+			list_for_each_entry(entry, &iommus, list)
+				if (entry->node == node->master)
+					break;
+			if (&entry->list == &iommus) {
+				entry = devm_kzalloc(smmu->dev, sizeof(*entry),
+						GFP_KERNEL);
+				if (!entry)
+					return -ENOMEM;
+				entry->node = node->master;
+				list_add(&entry->list, &iommus);
 	}
-	return insert_smmu_master(smmu, master);
+			switch (iommuspec_node->iommu_spec.args_count) {
+			case 0:
+				/*
+				 * For pci-e devices the SIDs are provided
+				 * at device attach time.
+				 */
+				break;
+			case 1:
+				entry->num_sids++;
+				entry->streamids[entry->num_sids - 1]
+					= iommuspec_node->iommu_spec.args[0];
+				break;
+			default:
+				BUG();
+			}
+			list_del(&iommuspec_node->list);
+			kfree(iommuspec_node);
+		}
+
+		list_for_each_entry_safe(entry, next, &iommus, list) {
+			int rc = register_smmu_master(smmu, entry);
+
+			if (rc)
+				dev_err(smmu->dev, "Couldn't register %s\n",
+						entry->node->name);
+			list_del(&entry->list);
+			devm_kfree(smmu->dev, entry);
+		}
+
+		if (node_found) {
+			list_del(&node->list);
+			kfree(node);
+		}
+	}
+
+	return 0;
 }
 
 static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
@@ -530,11 +800,272 @@
 	return idx;
 }
 
+static int __arm_smmu_set_bitmap(unsigned long *map, int idx)
+{
+	return test_and_set_bit(idx, map);
+}
+
+static struct static_cbndx_entry *arm_smmu_get_static_entry_from_sid(
+		struct arm_smmu_device *smmu, int sid)
+{
+	struct static_cbndx_entry *entry;
+
+	list_for_each_entry(entry, &smmu->static_cbndx_list, list) {
+		if (entry->sid == sid)
+			return entry;
+	}
+
+	return NULL;
+}
+
+static struct static_cbndx_entry *arm_smmu_get_static_entry_from_context(
+		struct arm_smmu_device *smmu, int idx)
+{
+	struct static_cbndx_entry *entry;
+
+	list_for_each_entry(entry, &smmu->static_cbndx_list, list) {
+		if (entry->type == TYPE_TRANS && entry->cbndx == idx)
+			return entry;
+	}
+
+	return NULL;
+}
+
+static struct static_cbndx_entry *arm_smmu_get_static_entry_from_smr(
+		struct arm_smmu_device *smmu, int idx)
+{
+	struct static_cbndx_entry *entry;
+
+	list_for_each_entry(entry, &smmu->static_cbndx_list, list) {
+		if (entry->smr_idx == idx)
+			return entry;
+	}
+
+	return NULL;
+}
+
+static int arm_smmu_alloc_smr_idx(struct arm_smmu_device *smmu, int start,
+		int end, int sid)
+{
+	struct static_cbndx_entry *entry = arm_smmu_get_static_entry_from_sid(
+								smmu, sid);
+
+	if (entry)
+		return entry->smr_idx;
+	else
+		return __arm_smmu_alloc_bitmap(smmu->smr_map, start, end);
+}
+
+static int arm_smmu_alloc_context_idx(struct arm_smmu_device *smmu, int start,
+		int end, u16 *streamids, int num_streamids)
+{
+	struct static_cbndx_entry *entry = NULL;
+	int i;
+
+	for (i = 0; i < num_streamids; ++i) {
+		entry = arm_smmu_get_static_entry_from_sid(smmu, streamids[i]);
+		if (entry && entry->type == TYPE_TRANS)
+			break;
+	}
+
+	if (entry && entry->type == TYPE_TRANS)
+		return entry->cbndx;
+	else
+		return __arm_smmu_alloc_bitmap(smmu->context_map, start, end);
+}
+
 static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
 {
 	clear_bit(idx, map);
 }
 
+static void arm_smmu_free_smr_idx(struct arm_smmu_device *smmu, int idx)
+{
+	struct static_cbndx_entry *entry = arm_smmu_get_static_entry_from_smr(
+								smmu, idx);
+
+	if (!entry)
+		__arm_smmu_free_bitmap(smmu->smr_map, idx);
+}
+
+static void arm_smmu_free_context_idx(struct arm_smmu_device *smmu, int idx)
+{
+	struct static_cbndx_entry *entry =
+		arm_smmu_get_static_entry_from_context(smmu, idx);
+
+	if (!entry)
+		__arm_smmu_free_bitmap(smmu->context_map, idx);
+}
+
+static void arm_smmu_unprepare_clocks(struct arm_smmu_device *smmu)
+{
+	int i;
+
+	for (i = smmu->num_clocks; i; --i)
+		clk_unprepare(smmu->clocks[i - 1]);
+}
+
+static int arm_smmu_prepare_clocks(struct arm_smmu_device *smmu)
+{
+	int i, ret = 0;
+
+	for (i = 0; i < smmu->num_clocks; ++i) {
+		ret = clk_prepare(smmu->clocks[i]);
+		if (ret) {
+			dev_err(smmu->dev, "Couldn't prepare clock #%d\n", i);
+			while (i--)
+				clk_unprepare(smmu->clocks[i]);
+			break;
+		}
+	}
+	return ret;
+}
+
+static int arm_smmu_request_bus(struct arm_smmu_device *smmu)
+{
+	if (!smmu->bus_client)
+		return 0;
+	return msm_bus_scale_client_update_request(smmu->bus_client, 1);
+}
+
+static int arm_smmu_unrequest_bus(struct arm_smmu_device *smmu)
+{
+	if (!smmu->bus_client)
+		return 0;
+	return msm_bus_scale_client_update_request(smmu->bus_client, 0);
+}
+
+static int arm_smmu_disable_regulators(struct arm_smmu_device *smmu)
+{
+	int ret = 0;
+
+	mutex_lock(&smmu->power_lock);
+	if (smmu->power_count == 0) {
+		WARN(1, "%s: Mismatched power count\n", dev_name(smmu->dev));
+		mutex_unlock(&smmu->power_lock);
+		return -EINVAL;
+	} else if (smmu->power_count > 1) {
+		smmu->power_count -= 1;
+		mutex_unlock(&smmu->power_lock);
+		return 0;
+	}
+
+	arm_smmu_unprepare_clocks(smmu);
+	arm_smmu_unrequest_bus(smmu);
+	if (smmu->gdsc) {
+		ret = regulator_disable_deferred(smmu->gdsc,
+						 smmu->regulator_defer);
+		WARN(ret, "%s: Regulator disable failed\n",
+			dev_name(smmu->dev));
+	}
+
+	smmu->power_count = 0;
+	mutex_unlock(&smmu->power_lock);
+	return ret;
+}
+
+static int arm_smmu_enable_regulators(struct arm_smmu_device *smmu)
+{
+	int ret;
+
+	mutex_lock(&smmu->power_lock);
+	if (smmu->power_count) {
+		smmu->power_count++;
+		mutex_unlock(&smmu->power_lock);
+		return 0;
+	}
+
+	if (smmu->gdsc) {
+		ret = regulator_enable(smmu->gdsc);
+		if (WARN_ON_ONCE(ret))
+			goto out;
+	}
+
+	ret = arm_smmu_request_bus(smmu);
+	if (WARN_ON_ONCE(ret))
+		goto out_reg;
+
+	ret = arm_smmu_prepare_clocks(smmu);
+	if (WARN_ON_ONCE(ret))
+		goto out_bus;
+
+	smmu->power_count = 1;
+	mutex_unlock(&smmu->power_lock);
+	return ret;
+
+out_bus:
+	arm_smmu_unrequest_bus(smmu);
+out_reg:
+	if (smmu->gdsc)
+		regulator_disable(smmu->gdsc);
+out:
+	mutex_unlock(&smmu->power_lock);
+	return ret;
+}
+
+static int arm_smmu_enable_clocks(struct arm_smmu_device *smmu)
+{
+	int ret = 0;
+
+	ret = arm_smmu_enable_regulators(smmu);
+	if (unlikely(ret))
+		return ret;
+	ret = arm_smmu_enable_clocks_atomic(smmu);
+	if (unlikely(ret))
+		arm_smmu_disable_regulators(smmu);
+
+	return ret;
+}
+
+static void arm_smmu_disable_clocks(struct arm_smmu_device *smmu)
+{
+	arm_smmu_disable_clocks_atomic(smmu);
+	arm_smmu_disable_regulators(smmu);
+}
+
+/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
+static int arm_smmu_enable_clocks_atomic(struct arm_smmu_device *smmu)
+{
+	int i, ret = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&smmu->clock_refs_lock, flags);
+	if (smmu->clock_refs_count++ > 0) {
+		spin_unlock_irqrestore(&smmu->clock_refs_lock, flags);
+		return 0;
+	}
+
+	for (i = 0; i < smmu->num_clocks; ++i) {
+		ret = clk_enable(smmu->clocks[i]);
+		if (WARN_ON_ONCE(ret)) {
+			dev_err(smmu->dev, "Couldn't enable clock #%d\n", i);
+			while (i--)
+				clk_disable(smmu->clocks[i]);
+			smmu->clock_refs_count--;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&smmu->clock_refs_lock, flags);
+	return ret;
+}
+
+/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
+static void arm_smmu_disable_clocks_atomic(struct arm_smmu_device *smmu)
+{
+	int i;
+	unsigned long flags;
+
+	spin_lock_irqsave(&smmu->clock_refs_lock, flags);
+	if (smmu->clock_refs_count-- > 1) {
+		spin_unlock_irqrestore(&smmu->clock_refs_lock, flags);
+		return;
+	}
+
+	for (i = smmu->num_clocks; i; --i)
+		clk_disable(smmu->clocks[i - 1]);
+	spin_unlock_irqrestore(&smmu->clock_refs_lock, flags);
+}
+
 /* Wait for any pending TLB invalidations to complete */
 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
 {
@@ -554,12 +1085,30 @@
 	}
 }
 
+static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
+				int cbndx)
+{
+	void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
+	u32 val;
+
+	writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
+	if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
+				      !(val & TLBSTATUS_SACTIVE),
+				      0, TLB_LOOP_TIMEOUT))
+		dev_err(smmu->dev, "TLBSYNC timeout!\n");
+}
+
 static void arm_smmu_tlb_sync(void *cookie)
 {
 	struct arm_smmu_domain *smmu_domain = cookie;
-	__arm_smmu_tlb_sync(smmu_domain->smmu);
+
+	if (smmu_domain->smmu == NULL)
+		return;
+
+	arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
 }
 
+/* Must be called with clocks/regulators enabled */
 static void arm_smmu_tlb_inv_context(void *cookie)
 {
 	struct arm_smmu_domain *smmu_domain = cookie;
@@ -568,19 +1117,23 @@
 	bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
 	void __iomem *base;
 
+	if (!smmu)
+		return;
+
 	if (stage1) {
 		base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
 		writel_relaxed(ARM_SMMU_CB_ASID(cfg),
 			       base + ARM_SMMU_CB_S1_TLBIASID);
+		arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
 	} else {
 		base = ARM_SMMU_GR0(smmu);
 		writel_relaxed(ARM_SMMU_CB_VMID(cfg),
 			       base + ARM_SMMU_GR0_TLBIVMID);
-	}
-
 	__arm_smmu_tlb_sync(smmu);
 }
+}
 
+/* Must be called with clocks/regulators enabled */
 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
 					  bool leaf, void *cookie)
 {
@@ -589,6 +1142,11 @@
 	struct arm_smmu_device *smmu = smmu_domain->smmu;
 	bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
 	void __iomem *reg;
+	int atomic_ctx = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
+
+	BUG_ON(atomic_ctx && !smmu);
+	if (!smmu)
+		return;
 
 	if (stage1) {
 		reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
@@ -618,61 +1176,331 @@
 	}
 }
 
+static void arm_smmu_tlbi_domain(struct iommu_domain *domain)
+{
+	arm_smmu_tlb_inv_context(to_smmu_domain(domain));
+}
+
+static int arm_smmu_enable_config_clocks(struct iommu_domain *domain)
+{
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+	return arm_smmu_enable_clocks(smmu_domain->smmu);
+}
+
+static void arm_smmu_disable_config_clocks(struct iommu_domain *domain)
+{
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+	arm_smmu_disable_clocks(smmu_domain->smmu);
+}
+
+struct arm_smmu_secure_pool_chunk {
+	void *addr;
+	size_t size;
+	struct list_head list;
+};
+
+static void *arm_smmu_secure_pool_remove(struct arm_smmu_domain *smmu_domain,
+					size_t size)
+{
+	struct arm_smmu_secure_pool_chunk *it;
+
+	list_for_each_entry(it, &smmu_domain->secure_pool_list, list) {
+		if (it->size == size) {
+			void *addr = it->addr;
+
+			list_del(&it->list);
+			kfree(it);
+			return addr;
+		}
+	}
+
+	return NULL;
+}
+
+static int arm_smmu_secure_pool_add(struct arm_smmu_domain *smmu_domain,
+				     void *addr, size_t size)
+{
+	struct arm_smmu_secure_pool_chunk *chunk;
+
+	chunk = kmalloc(sizeof(*chunk), GFP_ATOMIC);
+	if (!chunk)
+		return -ENOMEM;
+
+	chunk->addr = addr;
+	chunk->size = size;
+	memset(addr, 0, size);
+	list_add(&chunk->list, &smmu_domain->secure_pool_list);
+
+	return 0;
+}
+
+static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain)
+{
+	struct arm_smmu_secure_pool_chunk *it, *i;
+
+	list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
+		arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
+		/* pages will be freed later (after being unassigned) */
+		list_del(&it->list);
+		kfree(it);
+	}
+}
+
+static void *arm_smmu_alloc_pages_exact(void *cookie,
+					size_t size, gfp_t gfp_mask)
+{
+	void *ret;
+	struct arm_smmu_domain *smmu_domain = cookie;
+
+	if (!arm_smmu_is_master_side_secure(smmu_domain))
+		return alloc_pages_exact(size, gfp_mask);
+
+	ret = arm_smmu_secure_pool_remove(smmu_domain, size);
+	if (ret)
+		return ret;
+
+	ret = alloc_pages_exact(size, gfp_mask);
+	if (ret)
+		arm_smmu_prepare_pgtable(ret, cookie);
+
+	return ret;
+}
+
+static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
+{
+	struct arm_smmu_domain *smmu_domain = cookie;
+
+	if (!arm_smmu_is_master_side_secure(smmu_domain)) {
+		free_pages_exact(virt, size);
+		return;
+	}
+
+	if (arm_smmu_secure_pool_add(smmu_domain, virt, size))
+		arm_smmu_unprepare_pgtable(smmu_domain, virt, size);
+}
+
 static struct iommu_gather_ops arm_smmu_gather_ops = {
 	.tlb_flush_all	= arm_smmu_tlb_inv_context,
 	.tlb_add_flush	= arm_smmu_tlb_inv_range_nosync,
 	.tlb_sync	= arm_smmu_tlb_sync,
+	.alloc_pages_exact = arm_smmu_alloc_pages_exact,
+	.free_pages_exact = arm_smmu_free_pages_exact,
 };
 
+static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
+					 dma_addr_t iova, u32 fsr)
+{
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+	struct arm_smmu_device *smmu;
+	void __iomem *cb_base;
+	u64 sctlr, sctlr_orig;
+	phys_addr_t phys;
+
+	smmu = smmu_domain->smmu;
+	cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+
+	arm_smmu_halt_nowait(smmu);
+
+	writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
+
+	arm_smmu_wait_for_halt(smmu);
+
+	/* clear FSR to allow ATOS to log any faults */
+	writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
+
+	/* disable stall mode momentarily */
+	sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
+	sctlr = sctlr_orig & ~SCTLR_CFCFG;
+	writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
+
+	phys = arm_smmu_iova_to_phys_hard_no_halt(domain, iova);
+
+	if (!phys) {
+		dev_err(smmu->dev,
+			"ATOS failed. Will issue a TLBIALL and try again...\n");
+		arm_smmu_tlb_inv_context(smmu_domain);
+		phys = arm_smmu_iova_to_phys_hard_no_halt(domain, iova);
+		if (phys)
+			dev_err(smmu->dev,
+				"ATOS succeeded this time. Maybe we missed a TLB invalidation while messing with page tables earlier??\n");
+		else
+			dev_err(smmu->dev,
+				"ATOS still failed. If the page tables look good (check the software table walk) then hardware might be misbehaving.\n");
+	}
+
+	/* restore SCTLR */
+	writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
+
+	arm_smmu_resume(smmu);
+
+	return phys;
+}
+
 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
 {
-	int flags, ret;
-	u32 fsr, far, fsynr, resume;
-	unsigned long iova;
+	int flags, ret, tmp;
+	u32 fsr, fsynr, resume;
+	unsigned long iova, far;
 	struct iommu_domain *domain = dev;
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
-	struct arm_smmu_device *smmu = smmu_domain->smmu;
+	struct arm_smmu_device *smmu;
 	void __iomem *cb_base;
+	bool ctx_hang_errata;
+	bool fatal_asf;
+	void __iomem *gr1_base;
+	phys_addr_t phys_soft;
+	u32 sid;
+	bool non_fatal_fault = smmu_domain->non_fatal_faults;
+	struct arm_smmu_master *master;
+
+	static DEFINE_RATELIMIT_STATE(_rs,
+				      DEFAULT_RATELIMIT_INTERVAL,
+				      DEFAULT_RATELIMIT_BURST);
+
+	mutex_lock(&smmu_domain->init_mutex);
+	smmu = smmu_domain->smmu;
+	if (!smmu) {
+		ret = IRQ_HANDLED;
+		pr_err("took a fault on a detached domain (%p)\n", domain);
+		goto out_unlock;
+	}
+	ctx_hang_errata = smmu->options & ARM_SMMU_OPT_ERRATA_CTX_FAULT_HANG;
+	fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
+
+	if (arm_smmu_enable_clocks(smmu)) {
+		ret = IRQ_NONE;
+		goto out_unlock;
+	}
 
+	gr1_base = ARM_SMMU_GR1(smmu);
 	cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
 	fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
 
-	if (!(fsr & FSR_FAULT))
-		return IRQ_NONE;
+	if (!(fsr & FSR_FAULT)) {
+		arm_smmu_disable_clocks(smmu);
+		ret = IRQ_NONE;
+		goto out_unlock;
+	}
 
-	if (fsr & FSR_IGN)
-		dev_err_ratelimited(smmu->dev,
-				    "Unexpected context fault (fsr 0x%x)\n",
-				    fsr);
+	if (fatal_asf && (fsr & FSR_ASF)) {
+		dev_err(smmu->dev,
+			"Took an address size fault.  Refusing to recover.\n");
+		BUG();
+	}
 
 	fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
 	flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
+	if (fsr & FSR_TF)
+		flags |= IOMMU_FAULT_TRANSLATION;
+	if (fsr & FSR_PF)
+		flags |= IOMMU_FAULT_PERMISSION;
+	if (fsr & FSR_EF)
+		flags |= IOMMU_FAULT_EXTERNAL;
+	if (fsr & FSR_SS)
+		flags |= IOMMU_FAULT_TRANSACTION_STALLED;
 
 	far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO);
-	iova = far;
 #ifdef CONFIG_64BIT
-	far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI);
-	iova |= ((unsigned long)far << 32);
+	far |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI)) << 32;
 #endif
+	iova = far;
 
-	if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
+	phys_soft = arm_smmu_iova_to_phys(domain, iova);
+	sid = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
+	sid &= 0xffff;
+	master = find_smmu_master_by_sid(smmu, sid);
+	tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
+	if (!tmp || (tmp == -EBUSY)) {
+		dev_dbg(smmu->dev,
+			"Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
+			iova, fsr, fsynr, cfg->cbndx);
+		dev_dbg(smmu->dev,
+			"soft iova-to-phys=%pa\n", &phys_soft);
 		ret = IRQ_HANDLED;
-		resume = RESUME_RETRY;
+		resume = RESUME_TERMINATE;
 	} else {
-		dev_err_ratelimited(smmu->dev,
-		    "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
-		    iova, fsynr, cfg->cbndx);
+		phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
+							      fsr);
+
+		if (__ratelimit(&_rs)) {
+			dev_err(smmu->dev, "Context Fault for %s\n",
+				master ? master->of_node->name : "Unknown SID");
+
+			dev_err(smmu->dev,
+				"Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
+				iova, fsr, fsynr, cfg->cbndx);
+			dev_err(smmu->dev, "FAR    = %016lx\n",
+				(unsigned long)far);
+			dev_err(smmu->dev,
+				"FSR    = %08x [%s%s%s%s%s%s%s%s%s]\n",
+				fsr,
+				(fsr & 0x02) ? "TF " : "",
+				(fsr & 0x04) ? "AFF " : "",
+				(fsr & 0x08) ? "PF " : "",
+				(fsr & 0x10) ? "EF " : "",
+				(fsr & 0x20) ? "TLBMCF " : "",
+				(fsr & 0x40) ? "TLBLKF " : "",
+				(fsr & 0x80) ? "MHF " : "",
+				(fsr & 0x40000000) ? "SS " : "",
+				(fsr & 0x80000000) ? "MULTI " : "");
+			dev_err(smmu->dev,
+				"soft iova-to-phys=%pa\n", &phys_soft);
+			if (!phys_soft)
+				dev_err(smmu->dev,
+					"SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
+					dev_name(smmu->dev));
+			dev_err(smmu->dev,
+				"hard iova-to-phys (ATOS)=%pa\n", &phys_atos);
+			dev_err(smmu->dev, "SID=0x%x\n", sid);
+		}
 		ret = IRQ_NONE;
 		resume = RESUME_TERMINATE;
+		if (!non_fatal_fault) {
+			dev_err(smmu->dev,
+				"Unhandled context faults are fatal on this domain. Going down now...\n");
+			BUG();
+		}
 	}
 
+	/*
+	 * If the client returns -EBUSY, do not clear FSR and do not RESUME
+	 * if stalled. This is required to keep the IOMMU client stalled on
+	 * the outstanding fault. This gives the client a chance to take any
+	 * debug action and then terminate the stalled transaction.
+	 * So, the sequence in case of stall on fault should be:
+	 * 1) Do not clear FSR or write to RESUME here
+	 * 2) Client takes any debug action
+	 * 3) Client terminates the stalled transaction and resumes the IOMMU
+	 * 4) Client clears FSR. The FSR should only be cleared after 3) and
+	 *    not before so that the fault remains outstanding. This ensures
+	 *    SCTLR.HUPCF has the desired effect if subsequent transactions also
+	 *    need to be terminated.
+	 */
+	if (tmp != -EBUSY) {
 	/* Clear the faulting FSR */
-	writel(fsr, cb_base + ARM_SMMU_CB_FSR);
+		writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
+
+		/*
+		 * Barrier required to ensure that the FSR is cleared
+		 * before resuming SMMU operation
+		 */
+		wmb();
 
 	/* Retry or terminate any stalled transactions */
-	if (fsr & FSR_SS)
+		if (fsr & FSR_SS) {
+			if (ctx_hang_errata)
+				arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
 		writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
+		}
+	}
+
+	arm_smmu_disable_clocks(smmu);
+out_unlock:
+	mutex_unlock(&smmu_domain->init_mutex);
 
 	return ret;
 }
@@ -683,13 +1511,18 @@
 	struct arm_smmu_device *smmu = dev;
 	void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
 
+	if (arm_smmu_enable_clocks(smmu))
+		return IRQ_NONE;
+
 	gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
 	gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
 	gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
 	gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
 
-	if (!gfsr)
+	if (!gfsr) {
+		arm_smmu_disable_clocks(smmu);
 		return IRQ_NONE;
+	}
 
 	dev_err_ratelimited(smmu->dev,
 		"Unexpected global fault, this could be serious\n");
@@ -698,9 +1531,36 @@
 		gfsr, gfsynr0, gfsynr1, gfsynr2);
 
 	writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
+	arm_smmu_disable_clocks(smmu);
 	return IRQ_HANDLED;
 }
 
+static void arm_smmu_trigger_fault(struct iommu_domain *domain,
+				   unsigned long flags)
+{
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+	struct arm_smmu_device *smmu;
+	void __iomem *cb_base;
+
+	if (!smmu_domain->smmu) {
+		pr_err("Can't trigger faults on non-attached domains\n");
+		return;
+	}
+
+	smmu = smmu_domain->smmu;
+
+	cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+	if (arm_smmu_enable_clocks(smmu))
+		return;
+	dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
+		flags, cfg->cbndx);
+	writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
+	/* give the interrupt time to fire... */
+	msleep(1000);
+	arm_smmu_disable_clocks(smmu);
+}
+
 static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
 				       struct io_pgtable_cfg *pgtbl_cfg)
 {
@@ -723,6 +1583,9 @@
 		 */
 #ifdef CONFIG_64BIT
 		reg = CBA2R_RW64_64BIT;
+		if (!arm_smmu_has_secure_vmid(smmu_domain) &&
+			arm_smmu_is_static_cb(smmu))
+			msm_tz_set_cb_format(smmu->sec_id, cfg->cbndx);
 #else
 		reg = CBA2R_RW64_32BIT;
 #endif
@@ -741,9 +1604,8 @@
 	if (stage1) {
 		reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
 			(CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
-	} else {
-		reg |= ARM_SMMU_CB_VMID(cfg) << CBAR_VMID_SHIFT;
 	}
+	reg |= ARM_SMMU_CB_VMID(cfg) << CBAR_VMID_SHIFT;
 	writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
 
 	/* TTBRs */
@@ -767,7 +1629,6 @@
 		writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
 		if (smmu->version > ARM_SMMU_V1) {
 			reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
-			reg |= TTBCR2_SEP_UPSTREAM;
 			writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
 		}
 	} else {
@@ -783,8 +1644,25 @@
 		writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
 	}
 
+	if (smmu->model == SMMU_MODEL_QCOM_V2) {
+		reg = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
+			ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
+			ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
+		writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
+	}
+
 	/* SCTLR */
-	reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
+	reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_EAE_SBOP;
+
+	if (smmu_domain->attributes & (1 << DOMAIN_ATTR_CB_STALL_DISABLE)) {
+		reg &= ~SCTLR_CFCFG;
+		reg |= SCTLR_HUPCF;
+	}
+
+	if ((!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) &&
+	     !(smmu_domain->attributes & (1 << DOMAIN_ATTR_EARLY_MAP))) ||
+								!stage1)
+		reg |= SCTLR_M;
 	if (stage1)
 		reg |= SCTLR_S1_ASIDPNE;
 #ifdef __BIG_ENDIAN
@@ -793,20 +1671,107 @@
 	writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
 }
 
+static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu)
+{
+	return smmu->options & ARM_SMMU_OPT_STATIC_CB;
+}
+
+static bool arm_smmu_has_secure_vmid(struct arm_smmu_domain *smmu_domain)
+{
+	return smmu_domain->secure_vmid != VMID_INVAL;
+}
+
+static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain)
+{
+	return arm_smmu_has_secure_vmid(smmu_domain)
+		&& smmu_domain->slave_side_secure;
+}
+
+static bool arm_smmu_is_master_side_secure(struct arm_smmu_domain *smmu_domain)
+{
+	return arm_smmu_has_secure_vmid(smmu_domain)
+		&& !smmu_domain->slave_side_secure;
+}
+
+static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
+{
+	if (arm_smmu_is_master_side_secure(smmu_domain))
+		mutex_lock(&smmu_domain->assign_lock);
+}
+
+static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
+{
+	if (arm_smmu_is_master_side_secure(smmu_domain))
+		mutex_unlock(&smmu_domain->assign_lock);
+}
+
+static unsigned long arm_smmu_pgtbl_lock(struct arm_smmu_domain *smmu_domain)
+{
+	unsigned long flags = 0;
+
+	if (arm_smmu_is_slave_side_secure(smmu_domain))
+		mutex_lock(&smmu_domain->pgtbl_mutex_lock);
+	else
+		spin_lock_irqsave(&smmu_domain->pgtbl_spin_lock, flags);
+
+	return flags;
+}
+
+static void arm_smmu_pgtbl_unlock(struct arm_smmu_domain *smmu_domain,
+					unsigned long flags)
+{
+	if (arm_smmu_is_slave_side_secure(smmu_domain))
+		mutex_unlock(&smmu_domain->pgtbl_mutex_lock);
+	else
+		spin_unlock_irqrestore(&smmu_domain->pgtbl_spin_lock, flags);
+}
+
+static int arm_smmu_restore_sec_cfg(struct arm_smmu_device *smmu)
+{
+	int ret;
+	u64 scm_ret = 0;
+
+	if (!arm_smmu_is_static_cb(smmu))
+		return 0;
+
+	ret = scm_restore_sec_cfg(smmu->sec_id, 0x0, &scm_ret);
+	if (ret || scm_ret) {
+		pr_err("scm call IOMMU_SECURE_CFG failed\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static bool is_iommu_pt_coherent(struct arm_smmu_domain *smmu_domain)
+{
+	if (smmu_domain->attributes &
+			(1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT))
+		return true;
+	else if (smmu_domain->smmu && smmu_domain->smmu->dev)
+		return smmu_domain->smmu->dev->archdata.dma_coherent;
+	else
+		return false;
+}
+
 static int arm_smmu_init_domain_context(struct iommu_domain *domain,
-					struct arm_smmu_device *smmu)
+					struct arm_smmu_device *smmu,
+					struct arm_smmu_master_cfg *master_cfg)
 {
 	int irq, start, ret = 0;
 	unsigned long ias, oas;
+	int sep = 0;
 	struct io_pgtable_ops *pgtbl_ops;
-	struct io_pgtable_cfg pgtbl_cfg;
 	enum io_pgtable_fmt fmt;
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+	bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
+	unsigned long quirks =
+		smmu_domain->attributes & (1 << DOMAIN_ATTR_ENABLE_TTBR1) ?
+			IO_PGTABLE_QUIRK_ARM_TTBR1 : 0;
 
-	mutex_lock(&smmu_domain->init_mutex);
 	if (smmu_domain->smmu)
-		goto out_unlock;
+		goto out;
 
 	/*
 	 * Mapping the requested stage onto what we support is surprisingly
@@ -837,9 +1802,27 @@
 		start = smmu->num_s2_context_banks;
 		ias = smmu->va_size;
 		oas = smmu->ipa_size;
-		if (IS_ENABLED(CONFIG_64BIT))
+		if (IS_ENABLED(CONFIG_64BIT)) {
 			fmt = ARM_64_LPAE_S1;
-		else
+
+			if (quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) {
+
+				/*
+				 * When the UBS id is 5 we know that the bus
+				 * size is 49 bits and that bit 48 is the fixed
+				 * sign extension bit.  For any other bus size
+				 * we need to specify the sign extension bit
+				 * and adjust the input size accordingly
+				 */
+
+				if (smmu->ubs == 5) {
+					sep = 48;
+				} else {
+					sep = ias - 1;
+					ias--;
+				}
+			}
+		} else
 			fmt = ARM_32_LPAE_S1;
 		break;
 	case ARM_SMMU_DOMAIN_NESTED:
@@ -859,15 +1842,18 @@
 		break;
 	default:
 		ret = -EINVAL;
-		goto out_unlock;
+		goto out;
 	}
 
-	ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
-				      smmu->num_context_banks);
+	if (cfg->cbndx == INVALID_CBNDX) {
+		ret = arm_smmu_alloc_context_idx(smmu, start,
+			smmu->num_context_banks, master_cfg->streamids,
+			master_cfg->num_streamids);
 	if (IS_ERR_VALUE(ret))
-		goto out_unlock;
-
+			goto out;
 	cfg->cbndx = ret;
+	}
+
 	if (smmu->version == ARM_SMMU_V1) {
 		cfg->irptndx = atomic_inc_return(&smmu->irptndx);
 		cfg->irptndx %= smmu->num_context_irqs;
@@ -875,33 +1861,68 @@
 		cfg->irptndx = cfg->cbndx;
 	}
 
-	pgtbl_cfg = (struct io_pgtable_cfg) {
+	smmu_domain->smmu = smmu;
+
+	if (is_iommu_pt_coherent(smmu_domain))
+		quirks |= IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT;
+
+	if (arm_smmu_is_slave_side_secure(smmu_domain)) {
+		smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
+			.quirks		= quirks,
+			.pgsize_bitmap	= arm_smmu_ops.pgsize_bitmap,
+			.arm_msm_secure_cfg = {
+				.sec_id = smmu->sec_id,
+				.cbndx = cfg->cbndx,
+			},
+			.iommu_dev	= smmu->dev,
+		};
+		fmt = ARM_MSM_SECURE;
+	} else {
+
+		smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
+			.quirks		= quirks,
 		.pgsize_bitmap	= arm_smmu_ops.pgsize_bitmap,
 		.ias		= ias,
 		.oas		= oas,
+			.sep		= sep,
 		.tlb		= &arm_smmu_gather_ops,
 		.iommu_dev	= smmu->dev,
+			.iova_base	= domain->geometry.aperture_start,
+			.iova_end	= domain->geometry.aperture_end,
 	};
+	}
 
-	smmu_domain->smmu = smmu;
-	pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
+	if (is_fast)
+		fmt = ARM_V8L_FAST;
+
+	cfg->asid = cfg->cbndx + 1;
+	cfg->vmid = cfg->cbndx + 2;
+	pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
+					 smmu_domain);
 	if (!pgtbl_ops) {
 		ret = -ENOMEM;
 		goto out_clear_smmu;
 	}
-
-	/* Update our support page sizes to reflect the page table format */
-	arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
+	/*
+	 * assign any page table memory that might have been allocated
+	 * during alloc_io_pgtable_ops
+	 */
+	if (arm_smmu_is_master_side_secure(smmu_domain)) {
+		arm_smmu_secure_domain_lock(smmu_domain);
+		arm_smmu_assign_table(smmu_domain);
+		arm_smmu_secure_domain_unlock(smmu_domain);
+	}
 
 	/* Initialise the context bank with our page table cfg */
-	arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
+	arm_smmu_init_context_bank(smmu_domain, &smmu_domain->pgtbl_cfg);
 
 	/*
 	 * Request context fault interrupt. Do this last to avoid the
 	 * handler seeing a half-initialised domain state.
 	 */
 	irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
-	ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
+	ret = request_threaded_irq(irq, NULL, arm_smmu_context_fault,
+				IRQF_ONESHOT | IRQF_SHARED,
 			  "arm-smmu-context-fault", domain);
 	if (IS_ERR_VALUE(ret)) {
 		dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
@@ -909,16 +1930,13 @@
 		cfg->irptndx = INVALID_IRPTNDX;
 	}
 
-	mutex_unlock(&smmu_domain->init_mutex);
-
 	/* Publish page table ops for map/unmap */
 	smmu_domain->pgtbl_ops = pgtbl_ops;
 	return 0;
 
 out_clear_smmu:
 	smmu_domain->smmu = NULL;
-out_unlock:
-	mutex_unlock(&smmu_domain->init_mutex);
+out:
 	return ret;
 }
 
@@ -930,9 +1948,8 @@
 	void __iomem *cb_base;
 	int irq;
 
-	if (!smmu)
-		return;
-
+	if (arm_smmu_enable_clocks(smmu_domain->smmu))
+		goto free_irqs;
 	/*
 	 * Disable the context bank and free the page tables before freeing
 	 * it.
@@ -940,15 +1957,32 @@
 	cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
 	writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
 
+	arm_smmu_disable_clocks(smmu_domain->smmu);
+
+	if (smmu_domain->pgtbl_ops) {
+		free_io_pgtable_ops(smmu_domain->pgtbl_ops);
+		/* unassign any freed page table memory */
+		if (arm_smmu_is_master_side_secure(smmu_domain)) {
+			arm_smmu_secure_domain_lock(smmu_domain);
+			arm_smmu_secure_pool_destroy(smmu_domain);
+			arm_smmu_unassign_table(smmu_domain);
+			arm_smmu_secure_domain_unlock(smmu_domain);
+		}
+		smmu_domain->pgtbl_ops = NULL;
+	}
+
+free_irqs:
 	if (cfg->irptndx != INVALID_IRPTNDX) {
 		irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
 		free_irq(irq, domain);
 	}
 
-	if (smmu_domain->pgtbl_ops)
-		free_io_pgtable_ops(smmu_domain->pgtbl_ops);
-
-	__arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
+	arm_smmu_free_context_idx(smmu, cfg->cbndx);
+	smmu_domain->smmu = NULL;
+	cfg->cbndx = INVALID_CBNDX;
+	cfg->irptndx = INVALID_IRPTNDX;
+	cfg->asid = INVALID_ASID;
+	cfg->vmid = INVALID_VMID;
 }
 
 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
@@ -966,8 +2000,19 @@
 	if (!smmu_domain)
 		return NULL;
 
+	smmu_domain->secure_vmid = VMID_INVAL;
+	INIT_LIST_HEAD(&smmu_domain->pte_info_list);
+	INIT_LIST_HEAD(&smmu_domain->unassign_list);
+	INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
+	smmu_domain->cfg.cbndx = INVALID_CBNDX;
+	smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
+	smmu_domain->cfg.asid = INVALID_ASID;
+	smmu_domain->cfg.vmid = INVALID_VMID;
+
 	mutex_init(&smmu_domain->init_mutex);
-	spin_lock_init(&smmu_domain->pgtbl_lock);
+	spin_lock_init(&smmu_domain->pgtbl_spin_lock);
+	mutex_init(&smmu_domain->assign_lock);
+	mutex_init(&smmu_domain->pgtbl_mutex_lock);
 
 	return &smmu_domain->domain;
 }
@@ -980,7 +2025,18 @@
 	 * Free the domain resources. We assume that all devices have
 	 * already been detached.
 	 */
-	arm_smmu_destroy_domain_context(domain);
+	if (smmu_domain->pgtbl_ops) {
+		free_io_pgtable_ops(smmu_domain->pgtbl_ops);
+		/* unassign any freed page table memory */
+		if (arm_smmu_is_master_side_secure(smmu_domain)) {
+			arm_smmu_secure_domain_lock(smmu_domain);
+			arm_smmu_secure_pool_destroy(smmu_domain);
+			arm_smmu_unassign_table(smmu_domain);
+			arm_smmu_secure_domain_unlock(smmu_domain);
+		}
+		smmu_domain->pgtbl_ops = NULL;
+	}
+
 	kfree(smmu_domain);
 }
 
@@ -1006,8 +2062,8 @@
 
 	/* Allocate the SMRs on the SMMU */
 	for (i = 0; i < cfg->num_streamids; ++i) {
-		int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
-						  smmu->num_mapping_groups);
+		int idx = arm_smmu_alloc_smr_idx(smmu, 0,
+				smmu->num_mapping_groups, cfg->streamids[i]);
 		if (IS_ERR_VALUE(idx)) {
 			dev_err(smmu->dev, "failed to allocate free SMR\n");
 			goto err_free_smrs;
@@ -1032,7 +2088,7 @@
 
 err_free_smrs:
 	while (--i >= 0)
-		__arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
+		arm_smmu_free_smr_idx(smmu, smrs[i].idx);
 	kfree(smrs);
 	return -ENOSPC;
 }
@@ -1052,7 +2108,7 @@
 		u8 idx = smrs[i].idx;
 
 		writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
-		__arm_smmu_free_bitmap(smmu->smr_map, idx);
+		arm_smmu_free_smr_idx(smmu, idx);
 	}
 
 	cfg->smrs = NULL;
@@ -1098,6 +2154,8 @@
 	 * We *must* clear the S2CR first, because freeing the SMR means
 	 * that it can be re-allocated immediately.
 	 */
+	if (arm_smmu_enable_clocks(smmu))
+		return;
 	for (i = 0; i < cfg->num_streamids; ++i) {
 		u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
 
@@ -1106,6 +2164,123 @@
 	}
 
 	arm_smmu_master_free_smrs(smmu, cfg);
+	arm_smmu_disable_clocks(smmu);
+}
+
+static void arm_smmu_impl_def_programming(struct arm_smmu_device *smmu)
+{
+	int i;
+	struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
+
+	arm_smmu_halt(smmu);
+	for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
+		writel_relaxed(regs[i].value,
+			ARM_SMMU_GR0(smmu) + regs[i].offset);
+	arm_smmu_resume(smmu);
+}
+
+static int arm_smmu_attach_dynamic(struct iommu_domain *domain,
+					struct arm_smmu_device *smmu)
+{
+	int ret;
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	enum io_pgtable_fmt fmt;
+	struct io_pgtable_ops *pgtbl_ops = NULL;
+	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+
+	if (!(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
+		dev_err(smmu->dev, "dynamic domains not supported\n");
+		return -EPERM;
+	}
+
+	if (smmu_domain->smmu != NULL) {
+		dev_err(smmu->dev, "domain is already attached\n");
+		return -EBUSY;
+	}
+
+	if (smmu_domain->cfg.cbndx >= smmu->num_context_banks) {
+		dev_err(smmu->dev, "invalid context bank\n");
+		return -ENODEV;
+	}
+
+	if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) {
+		smmu_domain->cfg.cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
+	} else if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) {
+		smmu_domain->cfg.cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
+	} else {
+		/* dynamic only makes sense for S1. */
+		return -EINVAL;
+	}
+
+	smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
+		.pgsize_bitmap	= arm_smmu_ops.pgsize_bitmap,
+		.ias		= smmu->va_size,
+		.oas		= smmu->ipa_size,
+		.tlb		= &arm_smmu_gather_ops,
+		.iommu_dev	= smmu->dev,
+	};
+
+	fmt = IS_ENABLED(CONFIG_64BIT) ? ARM_64_LPAE_S1 : ARM_32_LPAE_S1;
+
+	pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
+					 smmu_domain);
+	if (!pgtbl_ops)
+		return -ENOMEM;
+
+	/*
+	 * assign any page table memory that might have been allocated
+	 * during alloc_io_pgtable_ops
+	 */
+	if (arm_smmu_is_master_side_secure(smmu_domain)) {
+		arm_smmu_secure_domain_lock(smmu_domain);
+		arm_smmu_assign_table(smmu_domain);
+		arm_smmu_secure_domain_unlock(smmu_domain);
+	}
+
+	cfg->vmid = cfg->cbndx + 2;
+	smmu_domain->smmu = smmu;
+
+	mutex_lock(&smmu->attach_lock);
+	/* try to avoid reusing an old ASID right away */
+	ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
+				smmu->num_context_banks + 2,
+				MAX_ASID + 1, GFP_KERNEL);
+	if (ret < 0) {
+		dev_err_ratelimited(smmu->dev,
+			"dynamic ASID allocation failed: %d\n", ret);
+		goto out;
+	}
+
+	smmu_domain->cfg.asid = ret;
+	smmu_domain->smmu = smmu;
+	smmu_domain->pgtbl_ops = pgtbl_ops;
+	ret = 0;
+out:
+	if (ret)
+		free_io_pgtable_ops(pgtbl_ops);
+	mutex_unlock(&smmu->attach_lock);
+
+	return ret;
+}
+
+static int arm_smmu_populate_cb(struct arm_smmu_device *smmu,
+		struct arm_smmu_domain *smmu_domain, struct device *dev)
+{
+	struct arm_smmu_master_cfg *cfg;
+	struct arm_smmu_cfg *smmu_cfg = &smmu_domain->cfg;
+	struct static_cbndx_entry *entry;
+
+	cfg = find_smmu_master_cfg(dev);
+	if (!cfg)
+		return -ENODEV;
+
+	entry = arm_smmu_get_static_entry_from_sid(smmu, cfg->streamids[0]);
+	if (entry && entry->type == TYPE_TRANS) {
+		smmu_cfg->cbndx = entry->cbndx;
+		return 0;
+	}
+
+	return -EINVAL;
 }
 
 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
@@ -1114,22 +2289,83 @@
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 	struct arm_smmu_device *smmu;
 	struct arm_smmu_master_cfg *cfg;
+	int atomic_ctx = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
 
+	mutex_lock(&smmu_domain->init_mutex);
 	smmu = find_smmu_for_device(dev);
 	if (!smmu) {
 		dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
+		mutex_unlock(&smmu_domain->init_mutex);
 		return -ENXIO;
 	}
 
+	if (smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC)) {
+		ret = arm_smmu_attach_dynamic(domain, smmu);
+		mutex_unlock(&smmu_domain->init_mutex);
+		return ret;
+	}
+
+	mutex_lock(&smmu->attach_lock);
+
 	if (dev->archdata.iommu) {
 		dev_err(dev, "already attached to IOMMU domain\n");
-		return -EEXIST;
+		ret = -EEXIST;
+		goto err_unlock;
+	}
+
+	if (!smmu->attach_count) {
+		/*
+		 * We need an extra power vote if we can't retain register
+		 * settings across a power collapse, or if this is an
+		 * atomic domain (since atomic domains can't sleep during
+		 * unmap, so regulators already need to be on to enable tlb
+		 * invalidation).  The result (due to regulator
+		 * refcounting) is that we never disable regulators while a
+		 * client is attached in these cases.
+		 */
+		if (!(smmu->options & ARM_SMMU_OPT_REGISTER_SAVE)) {
+			ret = arm_smmu_enable_regulators(smmu);
+			if (ret)
+				goto err_unlock;
+		}
+		ret = arm_smmu_enable_clocks(smmu);
+		if (ret)
+			goto err_disable_regulators;
+		arm_smmu_device_reset(smmu);
+		arm_smmu_impl_def_programming(smmu);
+	} else {
+		ret = arm_smmu_enable_clocks(smmu);
+		if (ret)
+			goto err_unlock;
+	}
+	smmu->attach_count++;
+
+	if (atomic_ctx) {
+		ret = arm_smmu_enable_regulators(smmu);
+		if (ret)
+			goto err_disable_clocks;
+	}
+
+	if (arm_smmu_is_static_cb(smmu)) {
+		ret = arm_smmu_populate_cb(smmu, smmu_domain, dev);
+
+		if (ret) {
+			dev_err(dev, "Failed to get valid context bank\n");
+			goto err_atomic_ctx;
+		}
+		smmu_domain->slave_side_secure = true;
+	}
+
+	cfg = find_smmu_master_cfg(dev);
+	if (!cfg) {
+		ret = -ENODEV;
+		goto err_atomic_ctx;
 	}
 
 	/* Ensure that the domain is finalised */
-	ret = arm_smmu_init_domain_context(domain, smmu);
+	ret = arm_smmu_init_domain_context(domain, smmu, cfg);
 	if (IS_ERR_VALUE(ret))
-		return ret;
+		goto err_atomic_ctx;
 
 	/*
 	 * Sanity check the domain. We don't support domains across
@@ -1139,31 +2375,196 @@
 		dev_err(dev,
 			"cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
 			dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
-		return -EINVAL;
+		ret = -EINVAL;
+		goto err_destroy_domain_context;
 	}
 
 	/* Looks ok, so add the device to the domain */
-	cfg = find_smmu_master_cfg(dev);
-	if (!cfg)
-		return -ENODEV;
-
 	ret = arm_smmu_domain_add_master(smmu_domain, cfg);
-	if (!ret)
+	if (ret)
+		goto err_destroy_domain_context;
 		dev->archdata.iommu = domain;
+	arm_smmu_disable_clocks(smmu);
+	mutex_unlock(&smmu->attach_lock);
+	mutex_unlock(&smmu_domain->init_mutex);
+	return ret;
+
+err_destroy_domain_context:
+	arm_smmu_destroy_domain_context(domain);
+err_atomic_ctx:
+	if (atomic_ctx)
+		arm_smmu_disable_regulators(smmu);
+err_disable_clocks:
+	arm_smmu_disable_clocks(smmu);
+	--smmu->attach_count;
+err_disable_regulators:
+	if (!smmu->attach_count &&
+	    (!(smmu->options & ARM_SMMU_OPT_REGISTER_SAVE)))
+		arm_smmu_disable_regulators(smmu);
+err_unlock:
+	mutex_unlock(&smmu->attach_lock);
+	mutex_unlock(&smmu_domain->init_mutex);
 	return ret;
 }
 
+static void arm_smmu_power_off(struct arm_smmu_device *smmu)
+{
+	/* Turn the thing off */
+	if (arm_smmu_enable_clocks(smmu))
+		return;
+	writel_relaxed(sCR0_CLIENTPD,
+		ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
+	arm_smmu_disable_clocks(smmu);
+	if (!(smmu->options & ARM_SMMU_OPT_REGISTER_SAVE))
+		arm_smmu_disable_regulators(smmu);
+}
+
+static void arm_smmu_detach_dynamic(struct iommu_domain *domain,
+					struct arm_smmu_device *smmu)
+{
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+	mutex_lock(&smmu->attach_lock);
+	if (smmu->attach_count > 0) {
+		if (arm_smmu_enable_clocks(smmu_domain->smmu))
+			goto idr_remove;
+		arm_smmu_tlb_inv_context(smmu_domain);
+		arm_smmu_disable_clocks(smmu_domain->smmu);
+	}
+idr_remove:
+	idr_remove(&smmu->asid_idr, smmu_domain->cfg.asid);
+	smmu_domain->cfg.asid = INVALID_ASID;
+	smmu_domain->smmu = NULL;
+	mutex_unlock(&smmu->attach_lock);
+}
+
 static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
 {
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 	struct arm_smmu_master_cfg *cfg;
+	struct arm_smmu_device *smmu;
+	int atomic_ctx = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
+
+	mutex_lock(&smmu_domain->init_mutex);
+	smmu = smmu_domain->smmu;
+	if (!smmu) {
+		dev_err(dev, "Domain already detached!\n");
+		mutex_unlock(&smmu_domain->init_mutex);
+		return;
+	}
+
+
+	if (smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC)) {
+		arm_smmu_detach_dynamic(domain, smmu);
+		mutex_unlock(&smmu_domain->init_mutex);
+		if (atomic_ctx)
+			arm_smmu_disable_regulators(smmu);
+		return;
+	}
+
+	mutex_lock(&smmu->attach_lock);
 
 	cfg = find_smmu_master_cfg(dev);
 	if (!cfg)
-		return;
+		goto unlock;
 
 	dev->archdata.iommu = NULL;
 	arm_smmu_domain_remove_master(smmu_domain, cfg);
+	arm_smmu_destroy_domain_context(domain);
+	if (!--smmu->attach_count)
+		arm_smmu_power_off(smmu);
+	if (atomic_ctx)
+		arm_smmu_disable_regulators(smmu);
+unlock:
+	mutex_unlock(&smmu->attach_lock);
+	mutex_unlock(&smmu_domain->init_mutex);
+}
+
+
+static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
+{
+	int ret = 0;
+	int dest_vmids[2] = {VMID_HLOS, smmu_domain->secure_vmid};
+	int dest_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ};
+	int source_vmid = VMID_HLOS;
+	struct arm_smmu_pte_info *pte_info, *temp;
+
+	if (!arm_smmu_is_master_side_secure(smmu_domain))
+		return ret;
+
+	list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
+		ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
+					PAGE_SIZE, &source_vmid, 1,
+					dest_vmids, dest_perms, 2);
+		if (WARN_ON(ret))
+			break;
+	}
+
+	list_for_each_entry_safe(pte_info, temp, &smmu_domain->pte_info_list,
+							entry) {
+		list_del(&pte_info->entry);
+		kfree(pte_info);
+	}
+	return ret;
+}
+
+static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
+{
+	int ret;
+	int dest_vmids = VMID_HLOS;
+	int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
+	int source_vmlist[2] = {smmu_domain->secure_vmid, VMID_HLOS};
+	struct arm_smmu_pte_info *pte_info, *temp;
+
+	if (!arm_smmu_is_master_side_secure(smmu_domain))
+		return;
+
+	list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
+		ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
+				PAGE_SIZE, source_vmlist, 2,
+				&dest_vmids, &dest_perms, 1);
+		if (WARN_ON(ret))
+			break;
+
+		free_pages_exact(pte_info->virt_addr, pte_info->size);
+	}
+
+	list_for_each_entry_safe(pte_info, temp, &smmu_domain->unassign_list,
+							entry) {
+		list_del(&pte_info->entry);
+		kfree(pte_info);
+	}
+	return;
+}
+
+static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
+{
+	struct arm_smmu_domain *smmu_domain = cookie;
+	struct arm_smmu_pte_info *pte_info;
+
+	BUG_ON(!arm_smmu_is_master_side_secure(smmu_domain));
+
+	pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
+	if (!pte_info)
+		return;
+
+	pte_info->virt_addr = addr;
+	pte_info->size = size;
+	list_add_tail(&pte_info->entry, &smmu_domain->unassign_list);
+}
+
+static void arm_smmu_prepare_pgtable(void *addr, void *cookie)
+{
+	struct arm_smmu_domain *smmu_domain = cookie;
+	struct arm_smmu_pte_info *pte_info;
+
+	BUG_ON(!arm_smmu_is_master_side_secure(smmu_domain));
+
+	pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
+	if (!pte_info)
+		return;
+	pte_info->virt_addr = addr;
+	list_add_tail(&pte_info->entry, &smmu_domain->pte_info_list);
 }
 
 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
@@ -1177,9 +2578,87 @@
 	if (!ops)
 		return -ENODEV;
 
-	spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+	arm_smmu_secure_domain_lock(smmu_domain);
+
+	flags = arm_smmu_pgtbl_lock(smmu_domain);
 	ret = ops->map(ops, iova, paddr, size, prot);
-	spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+	arm_smmu_pgtbl_unlock(smmu_domain, flags);
+
+	if (!ret)
+		ret = arm_smmu_assign_table(smmu_domain);
+
+	arm_smmu_secure_domain_unlock(smmu_domain);
+
+	return ret;
+}
+
+static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
+	      dma_addr_t iova)
+{
+	uint64_t ret;
+	unsigned long flags;
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
+
+	if (!ops)
+		return 0;
+
+	flags = arm_smmu_pgtbl_lock(smmu_domain);
+	ret = ops->iova_to_pte(ops, iova);
+	arm_smmu_pgtbl_unlock(smmu_domain, flags);
+	return ret;
+}
+
+static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
+			   struct scatterlist *sg, unsigned int nents, int prot)
+{
+	int ret;
+	size_t size;
+	unsigned long flags;
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
+	int atomic_ctx = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
+
+	if (!ops)
+		return -ENODEV;
+
+	if (arm_smmu_is_slave_side_secure(smmu_domain) && atomic_ctx) {
+		dev_err(smmu->dev, "Slave side atomic context not supported\n");
+		return 0;
+	}
+
+	if (arm_smmu_is_slave_side_secure(smmu_domain)) {
+		mutex_lock(&smmu_domain->init_mutex);
+
+		if (arm_smmu_enable_clocks(smmu)) {
+			mutex_unlock(&smmu_domain->init_mutex);
+			return 0;
+		}
+	}
+
+	arm_smmu_secure_domain_lock(smmu_domain);
+
+	flags = arm_smmu_pgtbl_lock(smmu_domain);
+	ret = ops->map_sg(ops, iova, sg, nents, prot, &size);
+	arm_smmu_pgtbl_unlock(smmu_domain, flags);
+
+	if (ret) {
+		if (arm_smmu_assign_table(smmu_domain)) {
+			ret = 0;
+			goto out;
+		}
+	} else {
+		arm_smmu_secure_domain_unlock(smmu_domain);
+		arm_smmu_unmap(domain, iova, size);
+	}
+
+out:
+	arm_smmu_secure_domain_unlock(smmu_domain);
+	if (arm_smmu_is_slave_side_secure(smmu_domain)) {
+		arm_smmu_disable_clocks(smmu_domain->smmu);
+		mutex_unlock(&smmu_domain->init_mutex);
+	}
 	return ret;
 }
 
@@ -1190,31 +2669,206 @@
 	unsigned long flags;
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 	struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
+	int atomic_ctx = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
 
 	if (!ops)
 		return 0;
 
-	spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+	if (arm_smmu_is_slave_side_secure(smmu_domain) && atomic_ctx) {
+		dev_err(smmu_domain->smmu->dev,
+				"Slave side atomic context not supported\n");
+		return 0;
+	}
+
+	/*
+	 * The contract here is that if you set DOMAIN_ATTR_ATOMIC your
+	 * domain *must* must be attached an SMMU during unmap.  This
+	 * function calls other functions that try to use smmu_domain->smmu
+	 * if it's not NULL (like the tlb invalidation routines).  So if
+	 * the client sets DOMAIN_ATTR_ATOMIC and detaches in the middle of
+	 * the unmap the smmu instance could go away and we could
+	 * dereference NULL.  This little BUG_ON should catch most gross
+	 * offenders but if atomic clients violate this contract then this
+	 * code is racy.
+	 */
+	BUG_ON(atomic_ctx && !smmu_domain->smmu);
+
+	if (atomic_ctx) {
+		if (arm_smmu_enable_clocks_atomic(smmu_domain->smmu))
+			return 0;
+	} else {
+		mutex_lock(&smmu_domain->init_mutex);
+		arm_smmu_secure_domain_lock(smmu_domain);
+		if (smmu_domain->smmu &&
+		    arm_smmu_enable_clocks(smmu_domain->smmu)) {
+			arm_smmu_secure_domain_unlock(smmu_domain);
+			mutex_unlock(&smmu_domain->init_mutex);
+			return 0;
+		}
+	}
+
+	flags = arm_smmu_pgtbl_lock(smmu_domain);
 	ret = ops->unmap(ops, iova, size);
-	spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+	arm_smmu_pgtbl_unlock(smmu_domain, flags);
+
+	/*
+	 * While splitting up block mappings, we might allocate page table
+	 * memory during unmap, so the vmids needs to be assigned to the
+	 * memory here as well.
+	 */
+	if (arm_smmu_assign_table(smmu_domain)) {
+		arm_smmu_unassign_table(smmu_domain);
+		arm_smmu_secure_domain_unlock(smmu_domain);
+		mutex_unlock(&smmu_domain->init_mutex);
+		return 0;
+	}
+
+	/* Also unassign any pages that were free'd during unmap */
+	arm_smmu_unassign_table(smmu_domain);
+
+	if (atomic_ctx) {
+		arm_smmu_disable_clocks_atomic(smmu_domain->smmu);
+	} else {
+		if (smmu_domain->smmu)
+			arm_smmu_disable_clocks(smmu_domain->smmu);
+		arm_smmu_secure_domain_unlock(smmu_domain);
+		mutex_unlock(&smmu_domain->init_mutex);
+	}
+
 	return ret;
 }
 
-static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
+static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
 					      dma_addr_t iova)
 {
+	phys_addr_t ret;
+	unsigned long flags;
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
+
+	if (!ops)
+		return 0;
+
+	flags = arm_smmu_pgtbl_lock(smmu_domain);
+	ret = ops->iova_to_phys(ops, iova);
+	arm_smmu_pgtbl_unlock(smmu_domain, flags);
+	return ret;
+}
+
+static bool arm_smmu_is_iova_coherent(struct iommu_domain *domain,
+					 dma_addr_t iova)
+{
+	bool ret;
+	unsigned long flags;
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
+
+	if (!ops)
+		return false;
+
+	flags = arm_smmu_pgtbl_lock(smmu_domain);
+	ret = ops->is_iova_coherent(ops, iova);
+	arm_smmu_pgtbl_unlock(smmu_domain, flags);
+	return ret;
+}
+
+static int arm_smmu_wait_for_halt(struct arm_smmu_device *smmu)
+{
+	void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
+	u32 tmp;
+
+	if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
+				      tmp, (tmp & MICRO_MMU_CTRL_IDLE),
+				      0, 30000)) {
+		dev_err(smmu->dev, "Couldn't halt SMMU!\n");
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int __arm_smmu_halt(struct arm_smmu_device *smmu, bool wait)
+{
+	u32 reg;
+	void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
+
+	reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
+	reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
+
+	if (arm_smmu_is_static_cb(smmu)) {
+		phys_addr_t impl_def1_base_phys = impl_def1_base - smmu->base +
+							smmu->phys_addr;
+
+		if (scm_io_write(impl_def1_base_phys +
+				IMPL_DEF1_MICRO_MMU_CTRL, reg)) {
+			dev_err(smmu->dev,
+				"scm_io_write fail. SMMU might not be halted");
+			return -EINVAL;
+		}
+	} else {
+		writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
+	}
+
+	return wait ? arm_smmu_wait_for_halt(smmu) : 0;
+}
+
+static int arm_smmu_halt(struct arm_smmu_device *smmu)
+{
+	return __arm_smmu_halt(smmu, true);
+}
+
+static int arm_smmu_halt_nowait(struct arm_smmu_device *smmu)
+{
+	return __arm_smmu_halt(smmu, false);
+}
+
+static void arm_smmu_resume(struct arm_smmu_device *smmu)
+{
+	void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
+	u32 reg;
+
+	if (arm_smmu_restore_sec_cfg(smmu))
+		return;
+
+	reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
+	reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
+
+	if (arm_smmu_is_static_cb(smmu)) {
+		phys_addr_t impl_def1_base_phys = impl_def1_base - smmu->base +
+							smmu->phys_addr;
+
+		if (scm_io_write(impl_def1_base_phys +
+				IMPL_DEF1_MICRO_MMU_CTRL, reg))
+			dev_err(smmu->dev,
+				"scm_io_write fail. SMMU might not be resumed");
+	} else {
+		writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
+	}
+}
+
+static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
+						dma_addr_t iova, bool do_halt)
+{
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 	struct arm_smmu_device *smmu = smmu_domain->smmu;
 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
-	struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
 	struct device *dev = smmu->dev;
 	void __iomem *cb_base;
 	u32 tmp;
 	u64 phys;
 	unsigned long va;
+	unsigned long flags;
+
+	if (arm_smmu_enable_clocks(smmu))
+		return 0;
 
 	cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
 
+	spin_lock_irqsave(&smmu->atos_lock, flags);
+
+	if (do_halt && arm_smmu_halt(smmu))
+		goto err_unlock;
+
 	/* ATS1 registers can only be written atomically */
 	va = iova & ~0xfffUL;
 	if (smmu->version == ARM_SMMU_V2)
@@ -1224,46 +2878,115 @@
 
 	if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
 				      !(tmp & ATSR_ACTIVE), 5, 50)) {
-		dev_err(dev,
-			"iova to phys timed out on %pad. Falling back to software table walk.\n",
-			&iova);
-		return ops->iova_to_phys(ops, iova);
+		dev_err(dev, "iova to phys timed out\n");
+		goto err_resume;
 	}
 
 	phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO);
 	phys |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32;
 
+	if (do_halt)
+		arm_smmu_resume(smmu);
+	spin_unlock_irqrestore(&smmu->atos_lock, flags);
+
 	if (phys & CB_PAR_F) {
-		dev_err(dev, "translation fault!\n");
+		dev_err(dev, "translation fault on %s!\n", dev_name(dev));
 		dev_err(dev, "PAR = 0x%llx\n", phys);
-		return 0;
+		phys = 0;
+	} else {
+		phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
 	}
 
-	return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
+	arm_smmu_disable_clocks(smmu);
+	return phys;
+
+err_resume:
+	if (do_halt)
+		arm_smmu_resume(smmu);
+err_unlock:
+	spin_unlock_irqrestore(&smmu->atos_lock, flags);
+	arm_smmu_disable_clocks(smmu);
+	phys = arm_smmu_iova_to_phys(domain, iova);
+	dev_err(dev,
+		"iova to phys failed 0x%pa. software table walk result=%pa.\n",
+		&iova, &phys);
+	return 0;
 }
 
-static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
+static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
 					dma_addr_t iova)
 {
-	phys_addr_t ret;
-	unsigned long flags;
+	return __arm_smmu_iova_to_phys_hard(domain, iova, true);
+}
+
+static phys_addr_t arm_smmu_iova_to_phys_hard_no_halt(
+	struct iommu_domain *domain, dma_addr_t iova)
+{
+	return __arm_smmu_iova_to_phys_hard(domain, iova, false);
+}
+
+static unsigned long arm_smmu_reg_read(struct iommu_domain *domain,
+				       unsigned long offset)
+{
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-	struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
+	struct arm_smmu_device *smmu;
+	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+	void __iomem *cb_base;
+	unsigned long val;
 
-	if (!ops)
+	if (offset >= SZ_4K) {
+		pr_err("Invalid offset: 0x%lx\n", offset);
 		return 0;
+	}
 
-	spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
-	if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
-			smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
-		ret = arm_smmu_iova_to_phys_hard(domain, iova);
-	} else {
-		ret = ops->iova_to_phys(ops, iova);
+	mutex_lock(&smmu_domain->init_mutex);
+	smmu = smmu_domain->smmu;
+	if (!smmu) {
+		WARN(1, "Can't read registers of a detached domain\n");
+		val = 0;
+		goto unlock;
 	}
 
-	spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+	cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+	if (arm_smmu_enable_clocks(smmu)) {
+		val = 0;
+		goto unlock;
+	}
+	val = readl_relaxed(cb_base + offset);
+	arm_smmu_disable_clocks(smmu);
 
-	return ret;
+unlock:
+	mutex_unlock(&smmu_domain->init_mutex);
+	return val;
+}
+
+static void arm_smmu_reg_write(struct iommu_domain *domain,
+			       unsigned long offset, unsigned long val)
+{
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	struct arm_smmu_device *smmu;
+	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+	void __iomem *cb_base;
+
+	if (offset >= SZ_4K) {
+		pr_err("Invalid offset: 0x%lx\n", offset);
+		return;
+	}
+
+	mutex_lock(&smmu_domain->init_mutex);
+	smmu = smmu_domain->smmu;
+	if (!smmu) {
+		WARN(1, "Can't read registers of a detached domain\n");
+		goto unlock;
+	}
+
+	cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+	if (arm_smmu_enable_clocks(smmu))
+		goto unlock;
+	writel_relaxed(val, cb_base + offset);
+	arm_smmu_disable_clocks(smmu);
+unlock:
+	mutex_unlock(&smmu_domain->init_mutex);
 }
 
 static bool arm_smmu_capable(enum iommu_cap cap)
@@ -1284,12 +3007,6 @@
 	}
 }
 
-static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
-{
-	*((u16 *)data) = alias;
-	return 0; /* Continue walking */
-}
-
 static void __arm_smmu_release_pci_iommudata(void *data)
 {
 	kfree(data);
@@ -1299,8 +3016,9 @@
 				    struct iommu_group *group)
 {
 	struct arm_smmu_master_cfg *cfg;
-	u16 sid;
-	int i;
+	u32 sid;
+	int tmp, ret;
+	struct device *dev = &pdev->dev;
 
 	cfg = iommu_group_get_iommudata(group);
 	if (!cfg) {
@@ -1315,17 +3033,13 @@
 	if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
 		return -ENOSPC;
 
-	/*
-	 * Assume Stream ID == Requester ID for now.
-	 * We need a way to describe the ID mappings in FDT.
-	 */
-	pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
-	for (i = 0; i < cfg->num_streamids; ++i)
-		if (cfg->streamids[i] == sid)
-			break;
-
-	/* Avoid duplicate SIDs, as this can lead to SMR conflicts */
-	if (i == cfg->num_streamids)
+	ret = msm_pcie_configure_sid(dev, &sid, &tmp);
+	if (ret) {
+		dev_err(dev,
+			"Couldn't configure SID through PCI-e driver: %d\n",
+			ret);
+		return ret;
+	}
 		cfg->streamids[cfg->num_streamids++] = sid;
 
 	return 0;
@@ -1370,12 +3084,18 @@
 	struct iommu_group *group;
 	int ret;
 
-	if (dev_is_pci(dev))
-		group = pci_device_group(dev);
-	else
+	/*
+	 * We used to call pci_device_group here for dev_is_pci(dev)
+	 * devices.  However, that causes the root complex device to be
+	 * placed in the same group as endpoint devices (and probably puts
+	 * all endpoint devices in the same group as well), which makes
+	 * things tricky in the DMA layer since we don't actually want to
+	 * attach *everybody* in the group when one client calls attach.
+	 * Instead, we'll just allocate a new group for everybody here.
+	 */
 		group = generic_device_group(dev);
 
-	if (IS_ERR(group))
+	if (IS_ERR_OR_NULL(group))
 		return group;
 
 	if (dev_is_pci(dev))
@@ -1394,15 +3114,120 @@
 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
 				    enum iommu_attr attr, void *data)
 {
+	int ret;
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 
+	mutex_lock(&smmu_domain->init_mutex);
 	switch (attr) {
 	case DOMAIN_ATTR_NESTING:
 		*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
-		return 0;
-	default:
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_SECURE_VMID:
+		*((int *)data) = smmu_domain->secure_vmid;
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_PT_BASE_ADDR:
+		*((phys_addr_t *)data) =
+			smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_CONTEXT_BANK:
+		/* context bank index isn't valid until we are attached */
+		if (smmu_domain->smmu == NULL)
+			return -ENODEV;
+
+		*((unsigned int *) data) = smmu_domain->cfg.cbndx;
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_TTBR0: {
+		u64 val;
+		/* not valid until we are attached */
+		if (smmu_domain->smmu == NULL)
 		return -ENODEV;
+
+		val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
+		if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
+			val |= (u64)ARM_SMMU_CB_ASID(&smmu_domain->cfg)
+					<< TTBRn_ASID_SHIFT;
+		*((u64 *)data) = val;
+		ret = 0;
+		break;
 	}
+	case DOMAIN_ATTR_CONTEXTIDR:
+		/* not valid until attached */
+		if (smmu_domain->smmu == NULL)
+			return -ENODEV;
+		*((u32 *)data) = smmu_domain->cfg.procid;
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_PROCID:
+		*((u32 *)data) = smmu_domain->cfg.procid;
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_DYNAMIC:
+		*((int *)data) = !!(smmu_domain->attributes
+					& (1 << DOMAIN_ATTR_DYNAMIC));
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_NON_FATAL_FAULTS:
+		*((int *)data) = !!(smmu_domain->attributes
+				    & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_S1_BYPASS:
+		*((int *)data) = !!(smmu_domain->attributes
+				    & (1 << DOMAIN_ATTR_S1_BYPASS));
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_FAST:
+		*((int *)data) = !!(smmu_domain->attributes
+					& (1 << DOMAIN_ATTR_FAST));
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_PGTBL_INFO: {
+		struct iommu_pgtbl_info *info = data;
+
+		if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST))) {
+			ret = -ENODEV;
+			break;
+		}
+		info->pmds = smmu_domain->pgtbl_cfg.av8l_fast_cfg.pmds;
+		ret = 0;
+		break;
+	}
+	case DOMAIN_ATTR_EARLY_MAP:
+		*((int *)data) = !!(smmu_domain->attributes
+				    & (1 << DOMAIN_ATTR_EARLY_MAP));
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT:
+		if (!smmu_domain->smmu)
+			return -ENODEV;
+		*((int *)data) = is_iommu_pt_coherent(smmu_domain);
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT:
+		*((int *)data) = !!(smmu_domain->attributes
+			& (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT));
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_ENABLE_TTBR1:
+		*((int *)data) = !!(smmu_domain->attributes
+					& (1 << DOMAIN_ATTR_ENABLE_TTBR1));
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_CB_STALL_DISABLE:
+		*((int *)data) = !!(smmu_domain->attributes
+			& (1 << DOMAIN_ATTR_CB_STALL_DISABLE));
+		ret = 0;
+		break;
+	default:
+		ret = -ENODEV;
+		break;
+	}
+	mutex_unlock(&smmu_domain->init_mutex);
+	return ret;
 }
 
 static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
@@ -1424,10 +3249,174 @@
 			smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
 		else
 			smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
+		break;
+	case DOMAIN_ATTR_SECURE_VMID:
+		BUG_ON(smmu_domain->secure_vmid != VMID_INVAL);
+		smmu_domain->secure_vmid = *((int *)data);
+		break;
+	case DOMAIN_ATTR_ATOMIC:
+	{
+		int atomic_ctx = *((int *)data);
+		if (atomic_ctx)
+			smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
+		else
+			smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
+		break;
+	}
+	case DOMAIN_ATTR_PROCID:
+		if (smmu_domain->smmu != NULL) {
+			dev_err(smmu_domain->smmu->dev,
+			  "cannot change procid attribute while attached\n");
+			ret = -EBUSY;
+			break;
+		}
+		smmu_domain->cfg.procid = *((u32 *)data);
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_DYNAMIC: {
+		int dynamic = *((int *)data);
+
+		if (smmu_domain->smmu != NULL) {
+			dev_err(smmu_domain->smmu->dev,
+			  "cannot change dynamic attribute while attached\n");
+			ret = -EBUSY;
+			break;
+		}
+
+		if (dynamic)
+			smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
+		else
+			smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
+		ret = 0;
+		break;
+	}
+	case DOMAIN_ATTR_CONTEXT_BANK:
+		/* context bank can't be set while attached */
+		if (smmu_domain->smmu != NULL) {
+			ret = -EBUSY;
+			break;
+		}
+		/* ... and it can only be set for dynamic contexts. */
+		if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
+			ret = -EINVAL;
+			break;
+		}
+
+		/* this will be validated during attach */
+		smmu_domain->cfg.cbndx = *((unsigned int *)data);
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_NON_FATAL_FAULTS:
+		smmu_domain->non_fatal_faults = *((int *)data);
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_S1_BYPASS: {
+		int bypass = *((int *)data);
 
+		if (bypass)
+			smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
+		else
+			smmu_domain->attributes &=
+					~(1 << DOMAIN_ATTR_S1_BYPASS);
+
+		ret = 0;
+		break;
+	}
+	case DOMAIN_ATTR_FAST:
+		if (*((int *)data))
+			smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_EARLY_MAP: {
+		int early_map = *((int *)data);
+
+		ret = 0;
+		if (early_map) {
+			smmu_domain->attributes |=
+						1 << DOMAIN_ATTR_EARLY_MAP;
+		} else {
+			if (smmu_domain->smmu)
+				ret = arm_smmu_enable_s1_translations(
+								smmu_domain);
+
+			if (!ret)
+				smmu_domain->attributes &=
+					~(1 << DOMAIN_ATTR_EARLY_MAP);
+		}
+		break;
+	}
+	case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT: {
+		int force_coherent = *((int *)data);
+
+		if (smmu_domain->smmu != NULL) {
+			dev_err(smmu_domain->smmu->dev,
+			  "cannot change force coherent attribute while attached\n");
+			ret = -EBUSY;
+			break;
+		}
+
+		if (force_coherent)
+			smmu_domain->attributes |=
+			    1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT;
+		else
+			smmu_domain->attributes &=
+			    ~(1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT);
+
+		ret = 0;
+		break;
+	}
+	case DOMAIN_ATTR_ENABLE_TTBR1:
+		if (*((int *)data))
+			smmu_domain->attributes |=
+				1 << DOMAIN_ATTR_ENABLE_TTBR1;
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_GEOMETRY: {
+		struct iommu_domain_geometry *geometry =
+				(struct iommu_domain_geometry *)data;
+
+		if (smmu_domain->smmu != NULL) {
+			dev_err(smmu_domain->smmu->dev,
+			  "cannot set geometry attribute while attached\n");
+			ret = -EBUSY;
+			break;
+		}
+
+		if (geometry->aperture_start >= SZ_1G * 4ULL ||
+		    geometry->aperture_end >= SZ_1G * 4ULL) {
+			pr_err("fastmap does not support IOVAs >= 4GB\n");
+			ret = -EINVAL;
+			break;
+		}
+		if (smmu_domain->attributes
+			  & (1 << DOMAIN_ATTR_GEOMETRY)) {
+			if (geometry->aperture_start
+					< domain->geometry.aperture_start)
+				domain->geometry.aperture_start =
+					geometry->aperture_start;
+
+			if (geometry->aperture_end
+					> domain->geometry.aperture_end)
+				domain->geometry.aperture_end =
+					geometry->aperture_end;
+		} else {
+			smmu_domain->attributes |= 1 << DOMAIN_ATTR_GEOMETRY;
+			domain->geometry.aperture_start =
+						geometry->aperture_start;
+			domain->geometry.aperture_end = geometry->aperture_end;
+		}
+		ret = 0;
+		break;
+	}
+	case DOMAIN_ATTR_CB_STALL_DISABLE:
+		if (*((int *)data))
+			smmu_domain->attributes |=
+				1 << DOMAIN_ATTR_CB_STALL_DISABLE;
+		ret = 0;
 		break;
 	default:
 		ret = -ENODEV;
+		break;
 	}
 
 out_unlock:
@@ -1435,6 +3424,66 @@
 	return ret;
 }
 
+
+static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain)
+{
+	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
+	void __iomem *cb_base;
+	u32 reg;
+	int ret;
+
+	cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+	ret = arm_smmu_enable_clocks(smmu);
+	if (ret)
+		return ret;
+
+	reg = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
+	reg |= SCTLR_M;
+
+	writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
+	arm_smmu_disable_clocks(smmu);
+	return ret;
+}
+
+static int arm_smmu_dma_supported(struct iommu_domain *domain,
+				  struct device *dev, u64 mask)
+{
+	struct arm_smmu_device *smmu;
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	int ret;
+
+	mutex_lock(&smmu_domain->init_mutex);
+	smmu = smmu_domain->smmu;
+	if (!smmu) {
+		dev_err(dev,
+			"Can't call dma_supported on an unattached domain\n");
+		mutex_unlock(&smmu_domain->init_mutex);
+		return 0;
+	}
+
+	ret = ((1ULL << smmu->va_size) - 1) <= mask ? 0 : 1;
+	mutex_unlock(&smmu_domain->init_mutex);
+	return ret;
+}
+
+static unsigned long arm_smmu_get_pgsize_bitmap(struct iommu_domain *domain)
+{
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+	/*
+	 * if someone is calling map before attach just return the
+	 * supported page sizes for the hardware itself.
+	 */
+	if (!smmu_domain->pgtbl_cfg.pgsize_bitmap)
+		return arm_smmu_ops.pgsize_bitmap;
+	/*
+	 * otherwise return the page sizes supported by this specific page
+	 * table configuration
+	 */
+	return smmu_domain->pgtbl_cfg.pgsize_bitmap;
+}
+
 static struct iommu_ops arm_smmu_ops = {
 	.capable		= arm_smmu_capable,
 	.domain_alloc		= arm_smmu_domain_alloc,
@@ -1443,14 +3492,25 @@
 	.detach_dev		= arm_smmu_detach_dev,
 	.map			= arm_smmu_map,
 	.unmap			= arm_smmu_unmap,
-	.map_sg			= default_iommu_map_sg,
+	.map_sg			= arm_smmu_map_sg,
 	.iova_to_phys		= arm_smmu_iova_to_phys,
+	.iova_to_phys_hard	= arm_smmu_iova_to_phys_hard,
 	.add_device		= arm_smmu_add_device,
 	.remove_device		= arm_smmu_remove_device,
 	.device_group		= arm_smmu_device_group,
 	.domain_get_attr	= arm_smmu_domain_get_attr,
 	.domain_set_attr	= arm_smmu_domain_set_attr,
 	.pgsize_bitmap		= -1UL, /* Restricted during device attach */
+	.get_pgsize_bitmap	= arm_smmu_get_pgsize_bitmap,
+	.dma_supported		= arm_smmu_dma_supported,
+	.trigger_fault		= arm_smmu_trigger_fault,
+	.reg_read		= arm_smmu_reg_read,
+	.reg_write		= arm_smmu_reg_write,
+	.tlbi_domain		= arm_smmu_tlbi_domain,
+	.enable_config_clocks	= arm_smmu_enable_config_clocks,
+	.disable_config_clocks	= arm_smmu_disable_config_clocks,
+	.is_iova_coherent	= arm_smmu_is_iova_coherent,
+	.iova_to_pte = arm_smmu_iova_to_pte,
 };
 
 static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
@@ -1464,9 +3524,11 @@
 	reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
 	writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
 
+	if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
 	/* Mark all SMRn as invalid and all S2CRn as bypass */
 	for (i = 0; i < smmu->num_mapping_groups; ++i) {
-		writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
+			writel_relaxed(0,
+				gr0_base + ARM_SMMU_GR0_SMR(i));
 		writel_relaxed(S2CR_TYPE_BYPASS,
 			gr0_base + ARM_SMMU_GR0_S2CR(i));
 	}
@@ -1477,6 +3539,7 @@
 		writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
 		writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
 	}
+	}
 
 	/* Invalidate the TLB, just in case */
 	writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
@@ -1490,8 +3553,11 @@
 	/* Disable TLB broadcasting. */
 	reg |= (sCR0_VMIDPNE | sCR0_PTM);
 
-	/* Enable client access, but bypass when no mapping is found */
-	reg &= ~(sCR0_CLIENTPD | sCR0_USFCFG);
+	/* Enable client access */
+	reg &= ~sCR0_CLIENTPD;
+
+	/* Raise an unidentified stream fault on unmapped access */
+	reg |= sCR0_USFCFG;
 
 	/* Disable forced broadcasting */
 	reg &= ~sCR0_FB;
@@ -1523,6 +3589,203 @@
 	}
 }
 
+static int regulator_notifier(struct notifier_block *nb,
+		unsigned long event, void *data)
+{
+	int ret = 0;
+	struct arm_smmu_device *smmu = container_of(nb,
+					struct arm_smmu_device, regulator_nb);
+
+	/* Ignore EVENT DISABLE as no clocks could be turned on
+	 * at this notification.
+	*/
+	if (event != REGULATOR_EVENT_PRE_DISABLE &&
+				event != REGULATOR_EVENT_ENABLE)
+		return NOTIFY_OK;
+
+	ret = arm_smmu_prepare_clocks(smmu);
+	if (ret)
+		goto out;
+
+	ret = arm_smmu_enable_clocks_atomic(smmu);
+	if (ret)
+		goto unprepare_clock;
+
+	if (event == REGULATOR_EVENT_PRE_DISABLE)
+		arm_smmu_halt(smmu);
+	else if (event == REGULATOR_EVENT_ENABLE)
+		arm_smmu_resume(smmu);
+
+	arm_smmu_disable_clocks_atomic(smmu);
+unprepare_clock:
+	arm_smmu_unprepare_clocks(smmu);
+out:
+	return NOTIFY_OK;
+}
+
+static int register_regulator_notifier(struct arm_smmu_device *smmu)
+{
+	struct device *dev = smmu->dev;
+	int ret = 0;
+
+	if (smmu->options & ARM_SMMU_OPT_HALT) {
+		smmu->regulator_nb.notifier_call = regulator_notifier;
+		ret = regulator_register_notifier(smmu->gdsc,
+						&smmu->regulator_nb);
+
+		if (ret)
+			dev_err(dev, "Regulator notifier request failed\n");
+	}
+	return ret;
+}
+
+static int arm_smmu_init_regulators(struct arm_smmu_device *smmu)
+{
+	struct device *dev = smmu->dev;
+
+	if (!of_get_property(dev->of_node, "vdd-supply", NULL))
+		return 0;
+
+	if (!of_property_read_u32(dev->of_node,
+				  "qcom,deferred-regulator-disable-delay",
+				  &(smmu->regulator_defer)))
+		dev_info(dev, "regulator defer delay %d\n",
+			smmu->regulator_defer);
+
+	smmu->gdsc = devm_regulator_get(dev, "vdd");
+	if (IS_ERR(smmu->gdsc))
+		return PTR_ERR(smmu->gdsc);
+
+	return 0;
+}
+
+static int arm_smmu_init_clocks(struct arm_smmu_device *smmu)
+{
+	const char *cname;
+	struct property *prop;
+	int i;
+	struct device *dev = smmu->dev;
+
+	smmu->num_clocks =
+		of_property_count_strings(dev->of_node, "clock-names");
+
+	if (smmu->num_clocks < 1) {
+		smmu->num_clocks = 0;
+		return 0;
+	}
+
+	smmu->clocks = devm_kzalloc(
+		dev, sizeof(*smmu->clocks) * smmu->num_clocks,
+		GFP_KERNEL);
+
+	if (!smmu->clocks) {
+		dev_err(dev,
+			"Failed to allocate memory for clocks\n");
+		return -ENODEV;
+	}
+
+	i = 0;
+	of_property_for_each_string(dev->of_node, "clock-names",
+				prop, cname) {
+		struct clk *c = devm_clk_get(dev, cname);
+		if (IS_ERR(c)) {
+			dev_err(dev, "Couldn't get clock: %s",
+				cname);
+			return PTR_ERR(c);
+		}
+
+		if (clk_get_rate(c) == 0) {
+			long rate = clk_round_rate(c, 1000);
+			clk_set_rate(c, rate);
+		}
+
+		smmu->clocks[i] = c;
+
+		++i;
+	}
+	return 0;
+}
+
+static int arm_smmu_init_bus_scaling(struct platform_device *pdev,
+				     struct arm_smmu_device *smmu)
+{
+	if (!of_find_property(pdev->dev.of_node, "qcom,msm-bus,name", NULL)) {
+		dev_dbg(&pdev->dev, "No bus scaling info\n");
+		return 0;
+	}
+
+	smmu->bus_pdata = msm_bus_cl_get_pdata(pdev);
+	if (!smmu->bus_pdata) {
+		dev_err(&pdev->dev, "Unable to read bus-scaling from DT\n");
+		return -EINVAL;
+	}
+
+	smmu->bus_client = msm_bus_scale_register_client(smmu->bus_pdata);
+	if (!smmu->bus_client) {
+		dev_err(&pdev->dev, "Bus client registration failed\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void arm_smmu_exit_bus_scaling(struct arm_smmu_device *smmu)
+{
+	if (smmu->bus_client)
+		msm_bus_scale_unregister_client(smmu->bus_client);
+	if (smmu->bus_pdata)
+		msm_bus_cl_clear_pdata(smmu->bus_pdata);
+
+	smmu->bus_client = 0;
+	smmu->bus_pdata = NULL;
+}
+
+static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
+{
+	struct device *dev = smmu->dev;
+	int i, ntuples, ret;
+	u32 *tuples;
+	struct arm_smmu_impl_def_reg *regs, *regit;
+
+	if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
+		return 0;
+
+	ntuples /= sizeof(u32);
+	if (ntuples % 2) {
+		dev_err(dev,
+			"Invalid number of attach-impl-defs registers: %d\n",
+			ntuples);
+		return -EINVAL;
+	}
+
+	regs = devm_kmalloc(
+		dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
+		GFP_KERNEL);
+	if (!regs)
+		return -ENOMEM;
+
+	tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
+	if (!tuples)
+		return -ENOMEM;
+
+	ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
+					tuples, ntuples);
+	if (ret)
+		return ret;
+
+	for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
+		regit->offset = tuples[i];
+		regit->value = tuples[i + 1];
+	}
+
+	devm_kfree(dev, tuples);
+
+	smmu->impl_def_attach_registers = regs;
+	smmu->num_impl_def_attach_registers = ntuples / 2;
+
+	return 0;
+}
+
 static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
 {
 	unsigned long size;
@@ -1530,8 +3793,11 @@
 	u32 id;
 	bool cttw_dt, cttw_reg;
 
-	dev_notice(smmu->dev, "probing hardware configuration...\n");
-	dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
+	if (arm_smmu_restore_sec_cfg(smmu))
+		return -ENODEV;
+
+	dev_dbg(smmu->dev, "probing hardware configuration...\n");
+	dev_dbg(smmu->dev, "SMMUv%d with:\n", smmu->version);
 
 	/* ID0 */
 	id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
@@ -1544,28 +3810,28 @@
 
 	if (id & ID0_S1TS) {
 		smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
-		dev_notice(smmu->dev, "\tstage 1 translation\n");
+		dev_dbg(smmu->dev, "\tstage 1 translation\n");
 	}
 
 	if (id & ID0_S2TS) {
 		smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
-		dev_notice(smmu->dev, "\tstage 2 translation\n");
+		dev_dbg(smmu->dev, "\tstage 2 translation\n");
 	}
 
 	if (id & ID0_NTS) {
 		smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
-		dev_notice(smmu->dev, "\tnested translation\n");
+		dev_dbg(smmu->dev, "\tnested translation\n");
 	}
 
 	if (!(smmu->features &
 		(ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
-		dev_err(smmu->dev, "\tno translation support!\n");
+		dev_err(smmu->dev, "\tno translation support (id0=%x)!\n", id);
 		return -ENODEV;
 	}
 
 	if ((id & ID0_S1TS) && ((smmu->version == 1) || !(id & ID0_ATOSNS))) {
 		smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
-		dev_notice(smmu->dev, "\taddress translation ops\n");
+		dev_dbg(smmu->dev, "\taddress translation ops\n");
 	}
 
 	/*
@@ -1579,14 +3845,14 @@
 	if (cttw_dt)
 		smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
 	if (cttw_dt || cttw_reg)
-		dev_notice(smmu->dev, "\t%scoherent table walk\n",
+		dev_dbg(smmu->dev, "\t%scoherent table walk\n",
 			   cttw_dt ? "" : "non-");
 	if (cttw_dt != cttw_reg)
-		dev_notice(smmu->dev,
+		dev_dbg(smmu->dev,
 			   "\t(IDR0.CTTW overridden by dma-coherent property)\n");
 
 	if (id & ID0_SMS) {
-		u32 smr, sid, mask;
+		u32 smr, sid, mask = 0;
 
 		smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
 		smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
@@ -1597,6 +3863,7 @@
 			return -ENODEV;
 		}
 
+		if (!(smmu->options & ARM_SMMU_OPT_NO_SMR_CHECK)) {
 		smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
 		smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
 		writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
@@ -1610,8 +3877,9 @@
 				mask, sid);
 			return -ENODEV;
 		}
+		}
 
-		dev_notice(smmu->dev,
+		dev_dbg(smmu->dev,
 			   "\tstream matching with %u register groups, mask 0x%x",
 			   smmu->num_mapping_groups, mask);
 	} else {
@@ -1637,7 +3905,7 @@
 		dev_err(smmu->dev, "impossible number of S2 context banks!\n");
 		return -ENODEV;
 	}
-	dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
+	dev_dbg(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
 		   smmu->num_context_banks, smmu->num_s2_context_banks);
 
 	/* ID2 */
@@ -1662,11 +3930,13 @@
 		smmu->va_size = smmu->ipa_size;
 		size = SZ_4K | SZ_2M | SZ_1G;
 	} else {
-		size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
-		smmu->va_size = arm_smmu_id_size_to_bits(size);
+		smmu->ubs = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
+
+		smmu->va_size = arm_smmu_id_size_to_bits(smmu->ubs);
 #ifndef CONFIG_64BIT
 		smmu->va_size = min(32UL, smmu->va_size);
 #endif
+		smmu->va_size = min(39UL, smmu->va_size);
 		size = 0;
 		if (id & ID2_PTFS_4K)
 			size |= SZ_4K | SZ_2M | SZ_1G;
@@ -1677,25 +3947,82 @@
 	}
 
 	arm_smmu_ops.pgsize_bitmap &= size;
-	dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
+	dev_dbg(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
 
 	if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
-		dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
+		dev_dbg(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
 			   smmu->va_size, smmu->ipa_size);
 
 	if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
-		dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
+		dev_dbg(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
 			   smmu->ipa_size, smmu->pa_size);
 
 	return 0;
 }
 
+static int arm_smmu_add_static_cbndx(struct arm_smmu_device *smmu, int sid,
+		int smr_idx)
+{
+	void __iomem *gr0_base;
+	u32 s2cr_reg;
+	struct static_cbndx_entry *entry;
+
+	entry = devm_kzalloc(smmu->dev, sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		return -ENOMEM;
+
+	gr0_base = ARM_SMMU_GR0(smmu);
+	s2cr_reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_S2CR(smr_idx));
+	entry->type = (s2cr_reg >> S2CR_TYPE_SHIFT) & S2CR_TYPE_MASK;
+	entry->smr_idx = smr_idx;
+	entry->sid = sid;
+
+	if (entry->type == TYPE_TRANS) {
+		entry->cbndx = (s2cr_reg >> S2CR_CBNDX_SHIFT) &
+					S2CR_CBNDX_MASK;
+		__arm_smmu_set_bitmap(smmu->context_map, entry->cbndx);
+		pr_debug("Static context bank: smr:%d, sid:%d, cbndx:%d\n",
+			smr_idx, sid, entry->cbndx);
+	}
+	__arm_smmu_set_bitmap(smmu->smr_map, smr_idx);
+	list_add(&entry->list, &smmu->static_cbndx_list);
+
+	return 0;
+}
+
+static int arm_smmu_init_static_cbndx_list(struct arm_smmu_device *smmu)
+{
+	int i, ret = 0;
+	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+
+	for (i = 0; i < smmu->num_mapping_groups; ++i) {
+		u32 smr_reg, sid;
+
+		smr_reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
+		if (smr_reg & SMR_VALID) {
+			u32 smr_mask = (smr_reg >> SMR_MASK_SHIFT) &
+					SMR_MASK_MASK;
+
+			if (smr_mask != 0)
+				dev_warn(smmu->dev,
+					"Static smr mask not supported\n");
+			sid = ((smr_reg >> SMR_ID_SHIFT) & SMR_ID_MASK);
+			ret = arm_smmu_add_static_cbndx(smmu, sid, i);
+			if (ret)
+				break;
+		}
+	}
+
+	return ret;
+}
+
 static const struct of_device_id arm_smmu_of_match[] = {
 	{ .compatible = "arm,smmu-v1", .data = (void *)ARM_SMMU_V1 },
 	{ .compatible = "arm,smmu-v2", .data = (void *)ARM_SMMU_V2 },
 	{ .compatible = "arm,mmu-400", .data = (void *)ARM_SMMU_V1 },
 	{ .compatible = "arm,mmu-401", .data = (void *)ARM_SMMU_V1 },
 	{ .compatible = "arm,mmu-500", .data = (void *)ARM_SMMU_V2 },
+	{ .compatible = "qcom,smmu-v2", .data = (void *)ARM_SMMU_V2 },
 	{ },
 };
 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
@@ -1707,7 +4034,6 @@
 	struct arm_smmu_device *smmu;
 	struct device *dev = &pdev->dev;
 	struct rb_node *node;
-	struct of_phandle_args masterspec;
 	int num_irqs, i, err;
 
 	smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
@@ -1716,11 +4042,19 @@
 		return -ENOMEM;
 	}
 	smmu->dev = dev;
+	mutex_init(&smmu->attach_lock);
+	mutex_init(&smmu->power_lock);
+	spin_lock_init(&smmu->atos_lock);
+	spin_lock_init(&smmu->clock_refs_lock);
+	INIT_LIST_HEAD(&smmu->static_cbndx_list);
 
 	of_id = of_match_node(arm_smmu_of_match, dev->of_node);
+	if (!of_id)
+		return -ENODEV;
 	smmu->version = (enum arm_smmu_arch_version)of_id->data;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	smmu->phys_addr = res->start;
 	smmu->base = devm_ioremap_resource(dev, res);
 	if (IS_ERR(smmu->base))
 		return PTR_ERR(smmu->base);
@@ -1762,43 +4096,62 @@
 		smmu->irqs[i] = irq;
 	}
 
-	err = arm_smmu_device_cfg_probe(smmu);
+	i = 0;
+
+	err = arm_smmu_parse_impl_def_registers(smmu);
 	if (err)
-		return err;
+		goto out;
 
-	i = 0;
+	err = arm_smmu_init_regulators(smmu);
+	if (err)
+		goto out;
+
+	err = arm_smmu_init_clocks(smmu);
+	if (err)
+		goto out;
+
+	err = arm_smmu_init_bus_scaling(pdev, smmu);
+	if (err)
+		goto out;
+
+	parse_driver_options(smmu);
+
+	err = arm_smmu_enable_clocks(smmu);
+	if (err)
+		goto out;
+
+	/* No probe deferral occurred! Proceed with iommu property parsing. */
 	smmu->masters = RB_ROOT;
-	while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters",
-					   "#stream-id-cells", i,
-					   &masterspec)) {
-		err = register_smmu_master(smmu, dev, &masterspec);
-		if (err) {
-			dev_err(dev, "failed to add master %s\n",
-				masterspec.np->name);
+	err = arm_smmu_parse_iommus_properties(smmu);
+	if (err)
 			goto out_put_masters;
-		}
 
-		i++;
-	}
-	dev_notice(dev, "registered %d master devices\n", i);
+	smmu->sec_id = msm_dev_to_device_id(dev);
+	err = arm_smmu_device_cfg_probe(smmu);
+	if (!err)
+		err = arm_smmu_init_static_cbndx_list(smmu);
 
-	parse_driver_options(smmu);
+	arm_smmu_disable_clocks(smmu);
+	if (err)
+		goto out_put_masters;
+
+	if (of_device_is_compatible(dev->of_node, "qcom,smmu-v2"))
+		smmu->model = SMMU_MODEL_QCOM_V2;
 
 	if (smmu->version > ARM_SMMU_V1 &&
 	    smmu->num_context_banks != smmu->num_context_irqs) {
 		dev_err(dev,
-			"found only %d context interrupt(s) but %d required\n",
-			smmu->num_context_irqs, smmu->num_context_banks);
-		err = -ENODEV;
-		goto out_put_masters;
+			"found %d context interrupt(s) but have %d context banks. assuming %d context interrupts.\n",
+			smmu->num_context_irqs, smmu->num_context_banks,
+			smmu->num_context_banks);
+		smmu->num_context_irqs = smmu->num_context_banks;
 	}
 
 	for (i = 0; i < smmu->num_global_irqs; ++i) {
-		err = request_irq(smmu->irqs[i],
-				  arm_smmu_global_fault,
-				  IRQF_SHARED,
-				  "arm-smmu global fault",
-				  smmu);
+		err = request_threaded_irq(smmu->irqs[i],
+					NULL, arm_smmu_global_fault,
+					IRQF_ONESHOT | IRQF_SHARED,
+					"arm-smmu global fault", smmu);
 		if (err) {
 			dev_err(dev, "failed to request global IRQ %d (%u)\n",
 				i, smmu->irqs[i]);
@@ -1806,12 +4159,17 @@
 		}
 	}
 
+	idr_init(&smmu->asid_idr);
+
+	err = register_regulator_notifier(smmu);
+	if (err)
+		goto out_free_irqs;
+
 	INIT_LIST_HEAD(&smmu->list);
 	spin_lock(&arm_smmu_devices_lock);
 	list_add(&smmu->list, &arm_smmu_devices);
 	spin_unlock(&arm_smmu_devices_lock);
 
-	arm_smmu_device_reset(smmu);
 	return 0;
 
 out_free_irqs:
@@ -1819,12 +4177,13 @@
 		free_irq(smmu->irqs[i], smmu);
 
 out_put_masters:
+	arm_smmu_exit_bus_scaling(smmu);
 	for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
 		struct arm_smmu_master *master
 			= container_of(node, struct arm_smmu_master, node);
 		of_node_put(master->of_node);
 	}
-
+out:
 	return err;
 }
 
@@ -1860,11 +4219,73 @@
 	for (i = 0; i < smmu->num_global_irqs; ++i)
 		free_irq(smmu->irqs[i], smmu);
 
-	/* Turn the thing off */
-	writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
+	mutex_lock(&smmu->attach_lock);
+	idr_destroy(&smmu->asid_idr);
+	/*
+	 * If all devices weren't detached for some reason, we're
+	 * still powered on. Power off now.
+	 */
+	if (smmu->attach_count)
+		arm_smmu_power_off(smmu);
+	mutex_unlock(&smmu->attach_lock);
+
+	arm_smmu_exit_bus_scaling(smmu);
+
 	return 0;
 }
 
+static void arm_smmu_free_master_nodes(void)
+{
+	struct arm_iommus_node *node, *nex;
+	struct arm_iommus_spec *entry, *n;
+
+	list_for_each_entry_safe(node, nex, &iommus_nodes, list) {
+		list_for_each_entry_safe(entry, n,
+				&node->iommuspec_list, list) {
+			list_del(&entry->list);
+			kfree(entry);
+		}
+		list_del(&node->list);
+		kfree(node);
+	}
+}
+
+static int arm_smmu_get_master_nodes(void)
+{
+	struct arm_iommus_node *node;
+	struct device_node *master;
+	struct of_phandle_args iommuspec;
+	struct arm_iommus_spec *entry;
+
+	for_each_node_with_property(master, "iommus") {
+		int arg_ind = 0;
+
+		node = kzalloc(sizeof(*node), GFP_KERNEL);
+		if (!node)
+			goto release_memory;
+		node->master = master;
+		list_add(&node->list, &iommus_nodes);
+
+		INIT_LIST_HEAD(&node->iommuspec_list);
+
+		while (!of_parse_phandle_with_args(master, "iommus",
+				"#iommu-cells", arg_ind, &iommuspec)) {
+			entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+			if (!entry)
+				goto release_memory;
+			entry->iommu_spec = iommuspec;
+			list_add(&entry->list, &node->iommuspec_list);
+			arg_ind++;
+		}
+	}
+
+	return 0;
+
+release_memory:
+	arm_smmu_free_master_nodes();
+	return -ENOMEM;
+}
+
 static struct platform_driver arm_smmu_driver = {
 	.driver	= {
 		.name		= "arm-smmu",
@@ -1890,10 +4311,15 @@
 
 	of_node_put(np);
 
-	ret = platform_driver_register(&arm_smmu_driver);
+	ret = arm_smmu_get_master_nodes();
 	if (ret)
 		return ret;
 
+	ret = platform_driver_register(&arm_smmu_driver);
+	if (ret) {
+		arm_smmu_free_master_nodes();
+		return ret;
+	}
 	/* Oh, for a proper bus abstraction */
 	if (!iommu_present(&platform_bus_type))
 		bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
diff -ruw linux-4.4.115/drivers/iommu/iommu.c linux-4.4.115-fbx/drivers/iommu/iommu.c
--- linux-4.4.115/drivers/iommu/iommu.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/iommu/iommu.c	2019-01-22 16:16:24.063251478 +0100
@@ -31,8 +31,11 @@
 #include <linux/err.h>
 #include <linux/pci.h>
 #include <linux/bitops.h>
+#include <linux/debugfs.h>
 #include <trace/events/iommu.h>
 
+#include "iommu-debug.h"
+
 static struct kset *iommu_group_kset;
 static struct ida iommu_group_ida;
 static struct mutex iommu_group_mutex;
@@ -1034,6 +1037,8 @@
 
 bool iommu_present(struct bus_type *bus)
 {
+	if (!bus)
+		return false;
 	return bus->iommu_ops != NULL;
 }
 EXPORT_SYMBOL_GPL(iommu_present);
@@ -1070,6 +1075,45 @@
 }
 EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
 
+/**
+ * iommu_trigger_fault() - trigger an IOMMU fault
+ * @domain: iommu domain
+ *
+ * Triggers a fault on the device to which this domain is attached.
+ *
+ * This function should only be used for debugging purposes, for obvious
+ * reasons.
+ */
+void iommu_trigger_fault(struct iommu_domain *domain, unsigned long flags)
+{
+	if (domain->ops->trigger_fault)
+		domain->ops->trigger_fault(domain, flags);
+}
+
+/**
+ * iommu_reg_read() - read an IOMMU register
+ *
+ * Reads the IOMMU register at the given offset.
+ */
+unsigned long iommu_reg_read(struct iommu_domain *domain, unsigned long offset)
+{
+	if (domain->ops->reg_read)
+		return domain->ops->reg_read(domain, offset);
+	return 0;
+}
+
+/**
+ * iommu_reg_write() - write an IOMMU register
+ *
+ * Writes the given value to the IOMMU register at the given offset.
+ */
+void iommu_reg_write(struct iommu_domain *domain, unsigned long offset,
+		     unsigned long val)
+{
+	if (domain->ops->reg_write)
+		domain->ops->reg_write(domain, offset, val);
+}
+
 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
 						 unsigned type)
 {
@@ -1096,6 +1140,7 @@
 
 void iommu_domain_free(struct iommu_domain *domain)
 {
+	iommu_debug_domain_remove(domain);
 	domain->ops->domain_free(domain);
 }
 EXPORT_SYMBOL_GPL(iommu_domain_free);
@@ -1108,8 +1153,10 @@
 		return -ENODEV;
 
 	ret = domain->ops->attach_dev(domain, dev);
-	if (!ret)
+	if (!ret) {
 		trace_attach_device_to_domain(dev);
+		iommu_debug_attach_device(domain, dev);
+	}
 	return ret;
 }
 
@@ -1288,7 +1335,39 @@
 }
 EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
 
-static size_t iommu_pgsize(struct iommu_domain *domain,
+phys_addr_t iommu_iova_to_phys_hard(struct iommu_domain *domain,
+				    dma_addr_t iova)
+{
+	if (unlikely(domain->ops->iova_to_phys_hard == NULL))
+		return 0;
+
+	return domain->ops->iova_to_phys_hard(domain, iova);
+}
+
+uint64_t iommu_iova_to_pte(struct iommu_domain *domain,
+				    dma_addr_t iova)
+{
+	if (unlikely(domain->ops->iova_to_pte == NULL))
+		return 0;
+
+	return domain->ops->iova_to_pte(domain, iova);
+}
+
+bool iommu_is_iova_coherent(struct iommu_domain *domain, dma_addr_t iova)
+{
+	if (unlikely(domain->ops->is_iova_coherent == NULL))
+		return 0;
+
+	return domain->ops->is_iova_coherent(domain, iova);
+}
+static unsigned long iommu_get_pgsize_bitmap(struct iommu_domain *domain)
+{
+	if (domain->ops->get_pgsize_bitmap)
+		return domain->ops->get_pgsize_bitmap(domain);
+	return domain->ops->pgsize_bitmap;
+}
+
+size_t iommu_pgsize(unsigned long pgsize_bitmap,
 			   unsigned long addr_merge, size_t size)
 {
 	unsigned int pgsize_idx;
@@ -1308,10 +1387,14 @@
 	pgsize = (1UL << (pgsize_idx + 1)) - 1;
 
 	/* throw away page sizes not supported by the hardware */
-	pgsize &= domain->ops->pgsize_bitmap;
+	pgsize &= pgsize_bitmap;
 
 	/* make sure we're still sane */
-	BUG_ON(!pgsize);
+	if (!pgsize) {
+		pr_err("invalid pgsize/addr/size! 0x%lx 0x%lx 0x%zx\n",
+		       pgsize_bitmap, addr_merge, size);
+		BUG();
+	}
 
 	/* pick the biggest page */
 	pgsize_idx = __fls(pgsize);
@@ -1323,20 +1406,25 @@
 int iommu_map(struct iommu_domain *domain, unsigned long iova,
 	      phys_addr_t paddr, size_t size, int prot)
 {
-	unsigned long orig_iova = iova;
+	unsigned long orig_iova = iova, pgsize_bitmap;
 	unsigned int min_pagesz;
 	size_t orig_size = size;
 	int ret = 0;
 
+	trace_map_start(iova, paddr, size);
 	if (unlikely(domain->ops->map == NULL ||
-		     domain->ops->pgsize_bitmap == 0UL))
+		     (domain->ops->pgsize_bitmap == 0UL &&
+		      !domain->ops->get_pgsize_bitmap))) {
+		trace_map_end(iova, paddr, size);
 		return -ENODEV;
+	}
 
 	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
 		return -EINVAL;
 
+	pgsize_bitmap = iommu_get_pgsize_bitmap(domain);
 	/* find out the minimum page size supported */
-	min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+	min_pagesz = 1 << __ffs(pgsize_bitmap);
 
 	/*
 	 * both the virtual address and the physical one, as well as
@@ -1346,13 +1434,14 @@
 	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
 		pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
 		       iova, &paddr, size, min_pagesz);
+		trace_map_end(iova, paddr, size);
 		return -EINVAL;
 	}
 
 	pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
 
 	while (size) {
-		size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
+		size_t pgsize = iommu_pgsize(pgsize_bitmap, iova | paddr, size);
 
 		pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
 			 iova, &paddr, pgsize);
@@ -1372,6 +1461,7 @@
 	else
 		trace_map(orig_iova, paddr, orig_size);
 
+	trace_map_end(iova, paddr, size);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(iommu_map);
@@ -1382,15 +1472,21 @@
 	unsigned int min_pagesz;
 	unsigned long orig_iova = iova;
 
+	trace_unmap_start(iova, 0, size);
 	if (unlikely(domain->ops->unmap == NULL ||
-		     domain->ops->pgsize_bitmap == 0UL))
+		     (domain->ops->pgsize_bitmap == 0UL &&
+		      !domain->ops->get_pgsize_bitmap))) {
+		trace_unmap_end(iova, 0, size);
 		return -ENODEV;
+	}
 
-	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
+	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) {
+		trace_unmap_end(iova, 0, size);
 		return -EINVAL;
+	}
 
 	/* find out the minimum page size supported */
-	min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+	min_pagesz = 1 << __ffs(iommu_get_pgsize_bitmap(domain));
 
 	/*
 	 * The virtual address, as well as the size of the mapping, must be
@@ -1400,6 +1496,7 @@
 	if (!IS_ALIGNED(iova | size, min_pagesz)) {
 		pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
 		       iova, size, min_pagesz);
+		trace_unmap_end(iova, 0, size);
 		return -EINVAL;
 	}
 
@@ -1410,9 +1507,9 @@
 	 * or we hit an area that isn't mapped.
 	 */
 	while (unmapped < size) {
-		size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
+		size_t left = size - unmapped;
 
-		unmapped_page = domain->ops->unmap(domain, iova, pgsize);
+		unmapped_page = domain->ops->unmap(domain, iova, left);
 		if (!unmapped_page)
 			break;
 
@@ -1424,6 +1521,7 @@
 	}
 
 	trace_unmap(orig_iova, size, unmapped);
+	trace_unmap_end(orig_iova, 0, size);
 	return unmapped;
 }
 EXPORT_SYMBOL_GPL(iommu_unmap);
@@ -1435,11 +1533,14 @@
 	size_t mapped = 0;
 	unsigned int i, min_pagesz;
 	int ret;
+	unsigned long pgsize_bitmap;
 
-	if (unlikely(domain->ops->pgsize_bitmap == 0UL))
+	if (unlikely(domain->ops->pgsize_bitmap == 0UL &&
+		     !domain->ops->get_pgsize_bitmap))
 		return 0;
 
-	min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+	pgsize_bitmap = iommu_get_pgsize_bitmap(domain);
+	min_pagesz = 1 << __ffs(pgsize_bitmap);
 
 	for_each_sg(sg, s, nents, i) {
 		phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
@@ -1471,6 +1572,20 @@
 }
 EXPORT_SYMBOL_GPL(default_iommu_map_sg);
 
+/* DEPRECATED */
+int iommu_map_range(struct iommu_domain *domain, unsigned int iova,
+		    struct scatterlist *sg, unsigned int len, int opt)
+{
+	return -ENODEV;
+}
+
+/* DEPRECATED */
+int iommu_unmap_range(struct iommu_domain *domain, unsigned int iova,
+		      unsigned int len)
+{
+	return -ENODEV;
+}
+
 int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
 			       phys_addr_t paddr, u64 size, int prot)
 {
@@ -1491,6 +1606,8 @@
 }
 EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
 
+struct dentry *iommu_debugfs_top;
+
 static int __init iommu_init(void)
 {
 	iommu_group_kset = kset_create_and_add("iommu_groups",
@@ -1500,6 +1617,12 @@
 
 	BUG_ON(!iommu_group_kset);
 
+	iommu_debugfs_top = debugfs_create_dir("iommu", NULL);
+	if (!iommu_debugfs_top) {
+		pr_err("Couldn't create iommu debugfs directory\n");
+		return -ENODEV;
+	}
+
 	return 0;
 }
 core_initcall(iommu_init);
@@ -1520,7 +1643,7 @@
 		break;
 	case DOMAIN_ATTR_PAGING:
 		paging  = data;
-		*paging = (domain->ops->pgsize_bitmap != 0UL);
+		*paging = (iommu_get_pgsize_bitmap(domain) != 0UL);
 		break;
 	case DOMAIN_ATTR_WINDOWS:
 		count = data;
@@ -1569,6 +1692,14 @@
 }
 EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
 
+int iommu_dma_supported(struct iommu_domain *domain, struct device *dev,
+								u64 mask)
+{
+	if (domain->ops->dma_supported)
+		return domain->ops->dma_supported(domain, dev, mask);
+	return 0;
+}
+
 void iommu_get_dm_regions(struct device *dev, struct list_head *list)
 {
 	const struct iommu_ops *ops = dev->bus->iommu_ops;
diff -ruw linux-4.4.115/drivers/iommu/io-pgtable-arm.c linux-4.4.115-fbx/drivers/iommu/io-pgtable-arm.c
--- linux-4.4.115/drivers/iommu/io-pgtable-arm.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/iommu/io-pgtable-arm.c	2019-01-22 16:16:24.059251441 +0100
@@ -22,6 +22,7 @@
 
 #include <linux/iommu.h>
 #include <linux/kernel.h>
+#include <linux/scatterlist.h>
 #include <linux/sizes.h>
 #include <linux/slab.h>
 #include <linux/types.h>
@@ -68,9 +69,12 @@
 #define ARM_LPAE_PGD_IDX(l,d)						\
 	((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
 
+#define ARM_LPAE_LVL_MASK(l, d)						\
+	((l) == ARM_LPAE_START_LVL(d) ?	(1 << (d)->pgd_bits) - 1 :	\
+					(1 << (d)->bits_per_level) - 1)
 #define ARM_LPAE_LVL_IDX(a,l,d)						\
 	(((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) &			\
-	 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
+	 ARM_LPAE_LVL_MASK(l, d))
 
 /* Calculate the block/page mapping size at level l for pagetable in d. */
 #define ARM_LPAE_BLOCK_SIZE(l,d)					\
@@ -85,6 +89,7 @@
 #define ARM_LPAE_PTE_TYPE_TABLE		3
 #define ARM_LPAE_PTE_TYPE_PAGE		3
 
+#define ARM_LPAE_PTE_SH_MASK		(((arm_lpae_iopte)0x3) << 8)
 #define ARM_LPAE_PTE_NSTABLE		(((arm_lpae_iopte)1) << 63)
 #define ARM_LPAE_PTE_XN			(((arm_lpae_iopte)3) << 53)
 #define ARM_LPAE_PTE_AF			(((arm_lpae_iopte)1) << 10)
@@ -101,8 +106,11 @@
 					 ARM_LPAE_PTE_ATTR_HI_MASK)
 
 /* Stage-1 PTE */
-#define ARM_LPAE_PTE_AP_UNPRIV		(((arm_lpae_iopte)1) << 6)
-#define ARM_LPAE_PTE_AP_RDONLY		(((arm_lpae_iopte)2) << 6)
+#define ARM_LPAE_PTE_AP_PRIV_RW		(((arm_lpae_iopte)0) << 6)
+#define ARM_LPAE_PTE_AP_RW		(((arm_lpae_iopte)1) << 6)
+#define ARM_LPAE_PTE_AP_PRIV_RO		(((arm_lpae_iopte)2) << 6)
+#define ARM_LPAE_PTE_AP_RO		(((arm_lpae_iopte)3) << 6)
+#define ARM_LPAE_PTE_ATTRINDX_MASK	0x7
 #define ARM_LPAE_PTE_ATTRINDX_SHIFT	2
 #define ARM_LPAE_PTE_nG			(((arm_lpae_iopte)1) << 11)
 
@@ -124,14 +132,21 @@
 #define ARM_LPAE_TCR_TG0_64K		(1 << 14)
 #define ARM_LPAE_TCR_TG0_16K		(2 << 14)
 
+#define ARM_LPAE_TCR_TG1_16K            1ULL
+#define ARM_LPAE_TCR_TG1_4K             2ULL
+#define ARM_LPAE_TCR_TG1_64K            3ULL
+
 #define ARM_LPAE_TCR_SH0_SHIFT		12
 #define ARM_LPAE_TCR_SH0_MASK		0x3
+#define ARM_LPAE_TCR_SH1_SHIFT		28
 #define ARM_LPAE_TCR_SH_NS		0
 #define ARM_LPAE_TCR_SH_OS		2
 #define ARM_LPAE_TCR_SH_IS		3
 
 #define ARM_LPAE_TCR_ORGN0_SHIFT	10
+#define ARM_LPAE_TCR_ORGN1_SHIFT	26
 #define ARM_LPAE_TCR_IRGN0_SHIFT	8
+#define ARM_LPAE_TCR_IRGN1_SHIFT	24
 #define ARM_LPAE_TCR_RGN_MASK		0x3
 #define ARM_LPAE_TCR_RGN_NC		0
 #define ARM_LPAE_TCR_RGN_WBWA		1
@@ -144,6 +159,9 @@
 #define ARM_LPAE_TCR_T0SZ_SHIFT		0
 #define ARM_LPAE_TCR_SZ_MASK		0xf
 
+#define ARM_LPAE_TCR_T1SZ_SHIFT         16
+#define ARM_LPAE_TCR_T1SZ_MASK          0x3f
+
 #define ARM_LPAE_TCR_PS_SHIFT		16
 #define ARM_LPAE_TCR_PS_MASK		0x7
 
@@ -157,6 +175,19 @@
 #define ARM_LPAE_TCR_PS_44_BIT		0x4ULL
 #define ARM_LPAE_TCR_PS_48_BIT		0x5ULL
 
+#define ARM_LPAE_TCR_EPD1_SHIFT		23
+#define ARM_LPAE_TCR_EPD1_FAULT		1
+
+#define ARM_LPAE_TCR_SEP_SHIFT		(15 + 32)
+
+#define ARM_LPAE_TCR_SEP_31		0ULL
+#define ARM_LPAE_TCR_SEP_35		1ULL
+#define ARM_LPAE_TCR_SEP_39		2ULL
+#define ARM_LPAE_TCR_SEP_41		3ULL
+#define ARM_LPAE_TCR_SEP_43		4ULL
+#define ARM_LPAE_TCR_SEP_47		5ULL
+#define ARM_LPAE_TCR_SEP_UPSTREAM	7ULL
+
 #define ARM_LPAE_MAIR_ATTR_SHIFT(n)	((n) << 3)
 #define ARM_LPAE_MAIR_ATTR_MASK		0xff
 #define ARM_LPAE_MAIR_ATTR_DEVICE	0x04
@@ -168,7 +199,7 @@
 
 /* IOPTE accessors */
 #define iopte_deref(pte,d)					\
-	(__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)	\
+	(__va(iopte_val(pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)	\
 	& ~((1ULL << (d)->pg_shift) - 1)))
 
 #define iopte_type(pte,l)					\
@@ -191,28 +222,113 @@
 	struct io_pgtable	iop;
 
 	int			levels;
+	unsigned int		pgd_bits;
 	size_t			pgd_size;
 	unsigned long		pg_shift;
 	unsigned long		bits_per_level;
 
-	void			*pgd;
+	void			*pgd[2];
 };
 
 typedef u64 arm_lpae_iopte;
 
 static bool selftest_running = false;
 
+/*
+ * We'll use some ignored bits in table entries to keep track of the number
+ * of page mappings beneath the table.  The maximum number of entries
+ * beneath any table mapping in armv8 is 8192 (which is possible at the
+ * 2nd- and 3rd-level when using a 64K granule size).  The bits at our
+ * disposal are:
+ *
+ *     4k granule: [58..52], [11..2]
+ *    64k granule: [58..52], [15..2]
+ *
+ * [58..52], [11..2] is enough bits for tracking table mappings at any
+ * level for any granule, so we'll use those.
+ */
+#define BOTTOM_IGNORED_MASK 0x3ff
+#define BOTTOM_IGNORED_SHIFT 2
+#define BOTTOM_IGNORED_NUM_BITS 10
+#define TOP_IGNORED_MASK 0x7fULL
+#define TOP_IGNORED_SHIFT 52
+#define IOPTE_RESERVED_MASK ((BOTTOM_IGNORED_MASK << BOTTOM_IGNORED_SHIFT) | \
+			     (TOP_IGNORED_MASK << TOP_IGNORED_SHIFT))
+
+static arm_lpae_iopte iopte_val(arm_lpae_iopte table_pte)
+{
+	return table_pte & ~IOPTE_RESERVED_MASK;
+}
+
+static arm_lpae_iopte _iopte_bottom_ignored_val(arm_lpae_iopte table_pte)
+{
+	return (table_pte & (BOTTOM_IGNORED_MASK << BOTTOM_IGNORED_SHIFT))
+		>> BOTTOM_IGNORED_SHIFT;
+}
+
+static arm_lpae_iopte _iopte_top_ignored_val(arm_lpae_iopte table_pte)
+{
+	return (table_pte & (TOP_IGNORED_MASK << TOP_IGNORED_SHIFT))
+		>> TOP_IGNORED_SHIFT;
+}
+
+static int iopte_tblcnt(arm_lpae_iopte table_pte)
+{
+	return (_iopte_bottom_ignored_val(table_pte) |
+		(_iopte_top_ignored_val(table_pte) << BOTTOM_IGNORED_NUM_BITS));
+}
+
+static void iopte_tblcnt_set(arm_lpae_iopte *table_pte, int val)
+{
+	arm_lpae_iopte pte = iopte_val(*table_pte);
+
+	pte |= ((val & BOTTOM_IGNORED_MASK) << BOTTOM_IGNORED_SHIFT) |
+		 (((val & (TOP_IGNORED_MASK << BOTTOM_IGNORED_NUM_BITS))
+		   >> BOTTOM_IGNORED_NUM_BITS) << TOP_IGNORED_SHIFT);
+	*table_pte = pte;
+}
+
+static void iopte_tblcnt_sub(arm_lpae_iopte *table_ptep, int cnt)
+{
+	arm_lpae_iopte current_cnt = iopte_tblcnt(*table_ptep);
+
+	current_cnt -= cnt;
+	iopte_tblcnt_set(table_ptep, current_cnt);
+}
+
+static void iopte_tblcnt_add(arm_lpae_iopte *table_ptep, int cnt)
+{
+	arm_lpae_iopte current_cnt = iopte_tblcnt(*table_ptep);
+
+	current_cnt += cnt;
+	iopte_tblcnt_set(table_ptep, current_cnt);
+}
+
+static bool suppress_map_failures;
+
 static dma_addr_t __arm_lpae_dma_addr(void *pages)
 {
 	return (dma_addr_t)virt_to_phys(pages);
 }
 
+static inline void pgtable_dma_sync_single_for_device(
+				struct io_pgtable_cfg *cfg,
+				dma_addr_t addr, size_t size,
+				enum dma_data_direction dir)
+{
+	if (!(cfg->quirks & IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT))
+		dma_sync_single_for_device(cfg->iommu_dev, addr, size,
+								dir);
+}
+
 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
-				    struct io_pgtable_cfg *cfg)
+				    struct io_pgtable_cfg *cfg,
+					void *cookie)
 {
 	struct device *dev = cfg->iommu_dev;
 	dma_addr_t dma;
-	void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO);
+	void *pages = io_pgtable_alloc_pages_exact(cfg, cookie,
+					     size, gfp | __GFP_ZERO);
 
 	if (!pages)
 		return NULL;
@@ -236,17 +352,17 @@
 	dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
 	dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
 out_free:
-	free_pages_exact(pages, size);
+	io_pgtable_free_pages_exact(cfg, cookie, pages, size);
 	return NULL;
 }
 
 static void __arm_lpae_free_pages(void *pages, size_t size,
-				  struct io_pgtable_cfg *cfg)
+				  struct io_pgtable_cfg *cfg, void *cookie)
 {
 	if (!selftest_running)
 		dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
 				 size, DMA_TO_DEVICE);
-	free_pages_exact(pages, size);
+	io_pgtable_free_pages_exact(cfg, cookie, pages, size);
 }
 
 static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
@@ -255,38 +371,24 @@
 	*ptep = pte;
 
 	if (!selftest_running)
-		dma_sync_single_for_device(cfg->iommu_dev,
+		pgtable_dma_sync_single_for_device(cfg,
 					   __arm_lpae_dma_addr(ptep),
 					   sizeof(pte), DMA_TO_DEVICE);
 }
 
-static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
-			    unsigned long iova, size_t size, int lvl,
-			    arm_lpae_iopte *ptep);
-
 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
 			     unsigned long iova, phys_addr_t paddr,
 			     arm_lpae_iopte prot, int lvl,
-			     arm_lpae_iopte *ptep)
+			     arm_lpae_iopte *ptep, arm_lpae_iopte *prev_ptep,
+			     bool flush)
 {
 	arm_lpae_iopte pte = prot;
 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 
-	if (iopte_leaf(*ptep, lvl)) {
 		/* We require an unmap first */
-		WARN_ON(!selftest_running);
+	if (*ptep & ARM_LPAE_PTE_VALID) {
+		BUG_ON(!suppress_map_failures);
 		return -EEXIST;
-	} else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
-		/*
-		 * We need to unmap and free the old table before
-		 * overwriting it with a block entry.
-		 */
-		arm_lpae_iopte *tblp;
-		size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
-
-		tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
-		if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
-			return -EINVAL;
 	}
 
 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
@@ -297,27 +399,82 @@
 	else
 		pte |= ARM_LPAE_PTE_TYPE_BLOCK;
 
-	pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
+	pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_OS;
 	pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
 
+	*ptep = pte;
+
+	if (flush)
 	__arm_lpae_set_pte(ptep, pte, cfg);
+
+	if (prev_ptep)
+		iopte_tblcnt_add(prev_ptep, 1);
+
 	return 0;
 }
 
+struct map_state {
+	unsigned long iova_end;
+	unsigned int pgsize;
+	arm_lpae_iopte *pgtable;
+	arm_lpae_iopte *prev_pgtable;
+	arm_lpae_iopte *pte_start;
+	unsigned int num_pte;
+};
+/* map state optimization works at level 3 (the 2nd-to-last level) */
+#define MAP_STATE_LVL 3
+
 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
 			  phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
-			  int lvl, arm_lpae_iopte *ptep)
+			  int lvl, arm_lpae_iopte *ptep,
+			  arm_lpae_iopte *prev_ptep, struct map_state *ms)
 {
 	arm_lpae_iopte *cptep, pte;
+	void *cookie = data->iop.cookie;
 	size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
+	arm_lpae_iopte *pgtable = ptep;
 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 
 	/* Find our entry at the current level */
 	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
 
 	/* If we can install a leaf entry at this level, then do so */
-	if (size == block_size && (size & cfg->pgsize_bitmap))
-		return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
+	if (size == block_size && (size & cfg->pgsize_bitmap)) {
+		if (!ms)
+			return arm_lpae_init_pte(data, iova, paddr, prot, lvl,
+						 ptep, prev_ptep, true);
+
+		if (lvl == MAP_STATE_LVL) {
+			if (ms->pgtable)
+				pgtable_dma_sync_single_for_device(cfg,
+					__arm_lpae_dma_addr(ms->pte_start),
+					ms->num_pte * sizeof(*ptep),
+					DMA_TO_DEVICE);
+
+			ms->iova_end = round_down(iova, SZ_2M) + SZ_2M;
+			ms->pgtable = pgtable;
+			ms->prev_pgtable = prev_ptep;
+			ms->pgsize = size;
+			ms->pte_start = ptep;
+			ms->num_pte = 1;
+		} else {
+			/*
+			 * We have some map state from previous page
+			 * mappings, but we're about to set up a block
+			 * mapping.  Flush out the previous page mappings.
+			 */
+			if (ms->pgtable)
+				pgtable_dma_sync_single_for_device(cfg,
+					__arm_lpae_dma_addr(ms->pte_start),
+					ms->num_pte * sizeof(*ptep),
+					DMA_TO_DEVICE);
+			memset(ms, 0, sizeof(*ms));
+			ms = NULL;
+		}
+
+		return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep,
+			prev_ptep, ms == NULL);
+	}
 
 	/* We can't allocate tables at the final level */
 	if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
@@ -327,7 +484,7 @@
 	pte = *ptep;
 	if (!pte) {
 		cptep = __arm_lpae_alloc_pages(1UL << data->pg_shift,
-					       GFP_ATOMIC, cfg);
+					       GFP_ATOMIC, cfg, cookie);
 		if (!cptep)
 			return -ENOMEM;
 
@@ -344,7 +501,8 @@
 	}
 
 	/* Rinse, repeat */
-	return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
+	return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep,
+			      ptep, ms);
 }
 
 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
@@ -354,14 +512,22 @@
 
 	if (data->iop.fmt == ARM_64_LPAE_S1 ||
 	    data->iop.fmt == ARM_32_LPAE_S1) {
-		pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG;
+		pte = ARM_LPAE_PTE_nG;
 
-		if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
-			pte |= ARM_LPAE_PTE_AP_RDONLY;
+		if (prot & IOMMU_WRITE)
+			pte |= (prot & IOMMU_PRIV) ? ARM_LPAE_PTE_AP_PRIV_RW
+					: ARM_LPAE_PTE_AP_RW;
+		else
+			pte |= (prot & IOMMU_PRIV) ? ARM_LPAE_PTE_AP_PRIV_RO
+					: ARM_LPAE_PTE_AP_RO;
 
 		if (prot & IOMMU_CACHE)
 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
+
+		if (prot & IOMMU_DEVICE)
+			pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV <<
+				ARM_LPAE_PTE_ATTRINDX_SHIFT);
 	} else {
 		pte = ARM_LPAE_PTE_HAP_FAULT;
 		if (prot & IOMMU_READ)
@@ -372,6 +538,9 @@
 			pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
 		else
 			pte |= ARM_LPAE_PTE_MEMATTR_NC;
+
+		if (prot & IOMMU_DEVICE)
+			pte |= ARM_LPAE_PTE_MEMATTR_DEV;
 	}
 
 	if (prot & IOMMU_NOEXEC)
@@ -380,20 +549,42 @@
 	return pte;
 }
 
+static inline arm_lpae_iopte *arm_lpae_get_table(
+		struct arm_lpae_io_pgtable *data, unsigned long iova)
+{
+	struct io_pgtable_cfg *cfg = &data->iop.cfg;
+
+	/*
+	 * iovas for TTBR1 will have all the bits set between the input address
+	 * region and the sign extension bit
+	 */
+	if (unlikely(cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)) {
+		unsigned long mask = GENMASK(cfg->sep, cfg->ias);
+
+		if ((iova & mask) == mask)
+			return data->pgd[1];
+	}
+
+	return data->pgd[0];
+}
+
 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
 			phys_addr_t paddr, size_t size, int iommu_prot)
 {
 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
-	arm_lpae_iopte *ptep = data->pgd;
+	arm_lpae_iopte *ptep;
 	int ret, lvl = ARM_LPAE_START_LVL(data);
 	arm_lpae_iopte prot;
 
+	ptep = arm_lpae_get_table(data, iova);
+
 	/* If no access, then nothing to do */
 	if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
 		return 0;
 
 	prot = arm_lpae_prot_to_pte(data, iommu_prot);
-	ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
+	ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep, NULL,
+				NULL);
 	/*
 	 * Synchronise all PTE updates for the new mapping before there's
 	 * a chance for anything to kick off a table walk for the new iova.
@@ -403,6 +594,91 @@
 	return ret;
 }
 
+static int arm_lpae_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
+			   struct scatterlist *sg, unsigned int nents,
+			   int iommu_prot, size_t *size)
+{
+	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+	struct io_pgtable_cfg *cfg = &data->iop.cfg;
+	arm_lpae_iopte *ptep;
+	int lvl = ARM_LPAE_START_LVL(data);
+	arm_lpae_iopte prot;
+	struct scatterlist *s;
+	size_t mapped = 0;
+	int i, ret;
+	unsigned int min_pagesz;
+	struct map_state ms;
+
+	ptep = arm_lpae_get_table(data, iova);
+
+	/* If no access, then nothing to do */
+	if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
+		goto out_err;
+
+	prot = arm_lpae_prot_to_pte(data, iommu_prot);
+
+	min_pagesz = 1 << __ffs(data->iop.cfg.pgsize_bitmap);
+
+	memset(&ms, 0, sizeof(ms));
+
+	for_each_sg(sg, s, nents, i) {
+		phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
+		size_t size = s->length;
+
+		/*
+		 * We are mapping on IOMMU page boundaries, so offset within
+		 * the page must be 0. However, the IOMMU may support pages
+		 * smaller than PAGE_SIZE, so s->offset may still represent
+		 * an offset of that boundary within the CPU page.
+		 */
+		if (!IS_ALIGNED(s->offset, min_pagesz))
+			goto out_err;
+
+		while (size) {
+			size_t pgsize = iommu_pgsize(
+				data->iop.cfg.pgsize_bitmap, iova | phys, size);
+
+			if (ms.pgtable && (iova < ms.iova_end)) {
+				arm_lpae_iopte *ptep = ms.pgtable +
+					ARM_LPAE_LVL_IDX(iova, MAP_STATE_LVL,
+							 data);
+				arm_lpae_init_pte(
+					data, iova, phys, prot, MAP_STATE_LVL,
+					ptep, ms.prev_pgtable, false);
+				ms.num_pte++;
+			} else {
+				ret = __arm_lpae_map(data, iova, phys, pgsize,
+						prot, lvl, ptep, NULL, &ms);
+				if (ret)
+					goto out_err;
+			}
+
+			iova += pgsize;
+			mapped += pgsize;
+			phys += pgsize;
+			size -= pgsize;
+		}
+	}
+
+	if (ms.pgtable)
+		pgtable_dma_sync_single_for_device(cfg,
+			__arm_lpae_dma_addr(ms.pte_start),
+			ms.num_pte * sizeof(*ms.pte_start),
+			DMA_TO_DEVICE);
+	/*
+	 * Synchronise all PTE updates for the new mapping before there's
+	 * a chance for anything to kick off a table walk for the new iova.
+	 */
+	wmb();
+
+	return mapped;
+
+out_err:
+	/* Return the size of the partial mapping so that they can be undone */
+	*size = mapped;
+	return 0;
+}
+
 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
 				    arm_lpae_iopte *ptep)
 {
@@ -422,6 +698,10 @@
 	else
 		end = (void *)ptep + table_size;
 
+	/* Only leaf entries at the last level */
+	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
+		goto end;
+
 	while (ptep != end) {
 		arm_lpae_iopte pte = *ptep++;
 
@@ -431,21 +711,27 @@
 		__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
 	}
 
-	__arm_lpae_free_pages(start, table_size, &data->iop.cfg);
+end:
+	__arm_lpae_free_pages(start, table_size, &data->iop.cfg,
+				data->iop.cookie);
 }
 
 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
 {
 	struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
 
-	__arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
+	__arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd[0]);
+	if (data->pgd[1])
+		__arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data),
+			data->pgd[1]);
 	kfree(data);
 }
 
 static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
 				    unsigned long iova, size_t size,
 				    arm_lpae_iopte prot, int lvl,
-				    arm_lpae_iopte *ptep, size_t blk_size)
+				    arm_lpae_iopte *ptep,
+				    arm_lpae_iopte *prev_ptep, size_t blk_size)
 {
 	unsigned long blk_start, blk_end;
 	phys_addr_t blk_paddr;
@@ -455,6 +741,7 @@
 	blk_start = iova & ~(blk_size - 1);
 	blk_end = blk_start + blk_size;
 	blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift;
+	size = ARM_LPAE_BLOCK_SIZE(lvl + 1, data);
 
 	for (; blk_start < blk_end; blk_start += size, blk_paddr += size) {
 		arm_lpae_iopte *tablep;
@@ -466,7 +753,7 @@
 		/* __arm_lpae_map expects a pointer to the start of the table */
 		tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data);
 		if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl,
-				   tablep) < 0) {
+				   tablep, prev_ptep, NULL) < 0) {
 			if (table) {
 				/* Free the table we allocated */
 				tablep = iopte_deref(table, data);
@@ -477,17 +764,15 @@
 	}
 
 	__arm_lpae_set_pte(ptep, table, cfg);
-	iova &= ~(blk_size - 1);
-	cfg->tlb->tlb_add_flush(iova, blk_size, true, data->iop.cookie);
 	return size;
 }
 
 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
 			    unsigned long iova, size_t size, int lvl,
-			    arm_lpae_iopte *ptep)
+			    arm_lpae_iopte *ptep, arm_lpae_iopte *prev_ptep)
 {
 	arm_lpae_iopte pte;
-	const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
+	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 	void *cookie = data->iop.cookie;
 	size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
 
@@ -504,15 +789,45 @@
 
 		if (!iopte_leaf(pte, lvl)) {
 			/* Also flush any partial walks */
-			tlb->tlb_add_flush(iova, size, false, cookie);
-			tlb->tlb_sync(cookie);
 			ptep = iopte_deref(pte, data);
 			__arm_lpae_free_pgtable(data, lvl + 1, ptep);
-		} else {
-			tlb->tlb_add_flush(iova, size, true, cookie);
 		}
 
 		return size;
+	} else if ((lvl == ARM_LPAE_MAX_LEVELS - 2) && !iopte_leaf(pte, lvl)) {
+		arm_lpae_iopte *table = iopte_deref(pte, data);
+		arm_lpae_iopte *table_base = table;
+		int tl_offset = ARM_LPAE_LVL_IDX(iova, lvl + 1, data);
+		int entry_size = (1 << data->pg_shift);
+		int max_entries = ARM_LPAE_BLOCK_SIZE(lvl, data) / entry_size;
+		int entries = min_t(int, size / entry_size,
+			max_entries - tl_offset);
+		int table_len = entries * sizeof(*table);
+
+		/*
+		 * This isn't a block mapping so it must be a table mapping
+		 * and since it's the 2nd-to-last level the next level has
+		 * to be all page mappings.  Zero them all out in one fell
+		 * swoop.
+		 */
+
+		table += tl_offset;
+
+		memset(table, 0, table_len);
+		pgtable_dma_sync_single_for_device(cfg,
+					   __arm_lpae_dma_addr(table),
+					   table_len, DMA_TO_DEVICE);
+
+		iopte_tblcnt_sub(ptep, entries);
+		if (!iopte_tblcnt(*ptep)) {
+			/* no valid mappings left under this table. free it. */
+			__arm_lpae_set_pte(ptep, 0, cfg);
+			io_pgtable_free_pages_exact(
+				&data->iop.cfg, cookie, table_base,
+				max_entries * sizeof(*table_base));
+		}
+
+		return entries * entry_size;
 	} else if (iopte_leaf(pte, lvl)) {
 		/*
 		 * Insert a table at the next level to map the old region,
@@ -520,63 +835,150 @@
 		 */
 		return arm_lpae_split_blk_unmap(data, iova, size,
 						iopte_prot(pte), lvl, ptep,
+						prev_ptep,
 						blk_size);
 	}
 
 	/* Keep on walkin' */
+	prev_ptep = ptep;
 	ptep = iopte_deref(pte, data);
-	return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
+	return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep, prev_ptep);
 }
 
-static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
+static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
 			  size_t size)
 {
-	size_t unmapped;
+	size_t unmapped = 0;
 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
 	struct io_pgtable *iop = &data->iop;
-	arm_lpae_iopte *ptep = data->pgd;
+	arm_lpae_iopte *ptep;
 	int lvl = ARM_LPAE_START_LVL(data);
 
-	unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
+	ptep = arm_lpae_get_table(data, iova);
+
+	while (unmapped < size) {
+		size_t ret, size_to_unmap, remaining;
+
+		remaining = (size - unmapped);
+		size_to_unmap = remaining < SZ_2M
+			? remaining
+			: iommu_pgsize(data->iop.cfg.pgsize_bitmap, iova,
+								remaining);
+		ret = __arm_lpae_unmap(data, iova, size_to_unmap, lvl, ptep,
+				       NULL);
+		if (ret == 0)
+			break;
+		unmapped += ret;
+		iova += ret;
+	}
 	if (unmapped)
-		iop->cfg.tlb->tlb_sync(iop->cookie);
+		iop->cfg.tlb->tlb_flush_all(iop->cookie);
 
 	return unmapped;
 }
 
-static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
-					 unsigned long iova)
+static int arm_lpae_iova_to_pte(struct arm_lpae_io_pgtable *data,
+				unsigned long iova, int *plvl_ret,
+				arm_lpae_iopte *ptep_ret)
 {
-	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
-	arm_lpae_iopte pte, *ptep = data->pgd;
-	int lvl = ARM_LPAE_START_LVL(data);
+	arm_lpae_iopte pte, *ptep;
+
+	ptep = arm_lpae_get_table(data, iova);
+
+	*plvl_ret = ARM_LPAE_START_LVL(data);
+	*ptep_ret = 0;
 
 	do {
 		/* Valid IOPTE pointer? */
 		if (!ptep)
-			return 0;
+			return -EINVAL;
 
 		/* Grab the IOPTE we're interested in */
-		pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data));
+		pte = *(ptep + ARM_LPAE_LVL_IDX(iova, *plvl_ret, data));
 
 		/* Valid entry? */
 		if (!pte)
-			return 0;
+			return -EINVAL;
 
 		/* Leaf entry? */
-		if (iopte_leaf(pte,lvl))
+		if (iopte_leaf(pte, *plvl_ret))
 			goto found_translation;
 
 		/* Take it to the next level */
 		ptep = iopte_deref(pte, data);
-	} while (++lvl < ARM_LPAE_MAX_LEVELS);
+	} while (++(*plvl_ret) < ARM_LPAE_MAX_LEVELS);
 
 	/* Ran out of page tables to walk */
-	return 0;
+	return -EINVAL;
 
 found_translation:
-	iova &= ((1 << data->pg_shift) - 1);
-	return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
+	*ptep_ret = pte;
+	return 0;
+}
+
+static uint64_t arm_lpae_iova_get_pte(struct io_pgtable_ops *ops,
+					 unsigned long iova)
+{
+	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+	arm_lpae_iopte pte;
+	int lvl;
+
+	if (!arm_lpae_iova_to_pte(data, iova, &lvl, &pte))
+		return pte;
+
+	return 0;
+}
+
+static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
+					 unsigned long iova)
+{
+	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+	arm_lpae_iopte pte;
+	int lvl;
+	phys_addr_t phys = 0;
+
+	if (!arm_lpae_iova_to_pte(data, iova, &lvl, &pte)) {
+		iova &= ((1 << ARM_LPAE_LVL_SHIFT(lvl, data)) - 1);
+		phys = ((phys_addr_t)iopte_to_pfn(pte, data)
+				<< data->pg_shift) | iova;
+	}
+
+	return phys;
+}
+
+static bool __arm_lpae_is_iova_coherent(struct arm_lpae_io_pgtable *data,
+				    arm_lpae_iopte *ptep)
+{
+	if (data->iop.fmt == ARM_64_LPAE_S1 ||
+	    data->iop.fmt == ARM_32_LPAE_S1) {
+		int attr_idx = (*ptep & (ARM_LPAE_PTE_ATTRINDX_MASK <<
+					ARM_LPAE_PTE_ATTRINDX_SHIFT)) >>
+					ARM_LPAE_PTE_ATTRINDX_SHIFT;
+		if ((attr_idx == ARM_LPAE_MAIR_ATTR_IDX_CACHE) &&
+		   (((*ptep & ARM_LPAE_PTE_SH_MASK) == ARM_LPAE_PTE_SH_IS)
+		     ||
+		     (*ptep & ARM_LPAE_PTE_SH_MASK) == ARM_LPAE_PTE_SH_OS))
+			return true;
+	} else {
+		if (*ptep & ARM_LPAE_PTE_MEMATTR_OIWB)
+			return true;
+	}
+
+	return false;
+}
+
+static bool arm_lpae_is_iova_coherent(struct io_pgtable_ops *ops,
+					 unsigned long iova)
+{
+	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+	arm_lpae_iopte pte;
+	int lvl;
+	bool ret = false;
+
+	if (!arm_lpae_iova_to_pte(data, iova, &lvl, &pte))
+		ret = __arm_lpae_is_iova_coherent(data, &pte);
+
+	return ret;
 }
 
 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
@@ -648,17 +1050,86 @@
 
 	/* Calculate the actual size of our pgd (without concatenation) */
 	pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
+	data->pgd_bits = pgd_bits;
 	data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
 
 	data->iop.ops = (struct io_pgtable_ops) {
 		.map		= arm_lpae_map,
+		.map_sg		= arm_lpae_map_sg,
 		.unmap		= arm_lpae_unmap,
 		.iova_to_phys	= arm_lpae_iova_to_phys,
+		.is_iova_coherent = arm_lpae_is_iova_coherent,
+		.iova_to_pte	= arm_lpae_iova_get_pte,
 	};
 
 	return data;
 }
 
+static u64 arm64_lpae_setup_ttbr1(struct io_pgtable_cfg *cfg,
+		struct arm_lpae_io_pgtable *data)
+
+{
+	u64 reg;
+
+	/* If TTBR1 is disabled, disable speculative walks through the TTBR1 */
+	if (!(cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)) {
+		reg = ARM_LPAE_TCR_EPD1;
+		reg |= (ARM_LPAE_TCR_SEP_UPSTREAM << ARM_LPAE_TCR_SEP_SHIFT);
+		return reg;
+	}
+
+	if (cfg->iommu_dev && cfg->iommu_dev->archdata.dma_coherent)
+		reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH1_SHIFT) |
+			(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN1_SHIFT) |
+			(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN1_SHIFT);
+	else
+		reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH1_SHIFT) |
+			(ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN1_SHIFT) |
+			(ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN1_SHIFT);
+
+	switch (1 << data->pg_shift) {
+	case SZ_4K:
+		reg |= (ARM_LPAE_TCR_TG1_4K << 30);
+		break;
+	case SZ_16K:
+		reg |= (ARM_LPAE_TCR_TG1_16K << 30);
+		break;
+	case SZ_64K:
+		reg |= (ARM_LPAE_TCR_TG1_64K << 30);
+		break;
+	}
+
+	/* Set T1SZ */
+	reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T1SZ_SHIFT;
+
+	switch (cfg->sep) {
+	case 31:
+		reg |= (ARM_LPAE_TCR_SEP_31 << ARM_LPAE_TCR_SEP_SHIFT);
+		break;
+	case 35:
+		reg |= (ARM_LPAE_TCR_SEP_35 << ARM_LPAE_TCR_SEP_SHIFT);
+		break;
+	case 39:
+		reg |= (ARM_LPAE_TCR_SEP_39 << ARM_LPAE_TCR_SEP_SHIFT);
+		break;
+	case 41:
+		reg |= (ARM_LPAE_TCR_SEP_41 << ARM_LPAE_TCR_SEP_SHIFT);
+		break;
+	case 43:
+		reg |= (ARM_LPAE_TCR_SEP_43 << ARM_LPAE_TCR_SEP_SHIFT);
+		break;
+	case 47:
+		reg |= (ARM_LPAE_TCR_SEP_47 << ARM_LPAE_TCR_SEP_SHIFT);
+		break;
+	case 48:
+	default:
+		reg |= (ARM_LPAE_TCR_SEP_UPSTREAM << ARM_LPAE_TCR_SEP_SHIFT);
+		break;
+	}
+
+	return reg;
+}
+
 static struct io_pgtable *
 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
 {
@@ -669,9 +1140,14 @@
 		return NULL;
 
 	/* TCR */
-	reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
+	if (cfg->quirks & IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT)
+		reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
 	      (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
 	      (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
+	else
+		reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
+			(ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) |
+			(ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN0_SHIFT);
 
 	switch (1 << data->pg_shift) {
 	case SZ_4K:
@@ -710,8 +1186,9 @@
 
 	reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
 
-	/* Disable speculative walks through TTBR1 */
-	reg |= ARM_LPAE_TCR_EPD1;
+	/* Bring in the TTBR1 configuration */
+	reg |= arm64_lpae_setup_ttbr1(cfg, data);
+
 	cfg->arm_lpae_s1_cfg.tcr = reg;
 
 	/* MAIRs */
@@ -726,16 +1203,33 @@
 	cfg->arm_lpae_s1_cfg.mair[1] = 0;
 
 	/* Looking good; allocate a pgd */
-	data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
-	if (!data->pgd)
+	data->pgd[0] = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg,
+		cookie);
+	if (!data->pgd[0])
 		goto out_free_data;
 
+
+	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) {
+		data->pgd[1] = __arm_lpae_alloc_pages(data->pgd_size,
+			GFP_KERNEL, cfg, cookie);
+		if (!data->pgd[1]) {
+			__arm_lpae_free_pages(data->pgd[0], data->pgd_size, cfg,
+				cookie);
+			goto out_free_data;
+		}
+	} else {
+		data->pgd[1] = NULL;
+	}
+
 	/* Ensure the empty pgd is visible before any actual TTBR write */
 	wmb();
 
 	/* TTBRs */
-	cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
-	cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
+	cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd[0]);
+
+	if (data->pgd[1])
+		cfg->arm_lpae_s1_cfg.ttbr[1] = virt_to_phys(data->pgd[1]);
+
 	return &data->iop;
 
 out_free_data:
@@ -815,15 +1309,16 @@
 	cfg->arm_lpae_s2_cfg.vtcr = reg;
 
 	/* Allocate pgd pages */
-	data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
-	if (!data->pgd)
+	data->pgd[0] = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg,
+		cookie);
+	if (!data->pgd[0])
 		goto out_free_data;
 
 	/* Ensure the empty pgd is visible before any actual TTBR write */
 	wmb();
 
 	/* VTTBR */
-	cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
+	cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd[0]);
 	return &data->iop;
 
 out_free_data:
@@ -921,16 +1416,54 @@
 		cfg->pgsize_bitmap, cfg->ias);
 	pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
 		data->levels, data->pgd_size, data->pg_shift,
-		data->bits_per_level, data->pgd);
+		data->bits_per_level, data->pgd[0]);
 }
 
 #define __FAIL(ops, i)	({						\
 		WARN(1, "selftest: test failed for fmt idx %d\n", (i));	\
 		arm_lpae_dump_ops(ops);					\
 		selftest_running = false;				\
+		suppress_map_failures = false;				\
 		-EFAULT;						\
 })
 
+/*
+ * Returns true if there's any mapping in the given iova range in ops.
+ */
+static bool arm_lpae_range_has_mapping(struct io_pgtable_ops *ops,
+				       unsigned long iova_start, size_t size)
+{
+	unsigned long iova = iova_start;
+
+	while (iova < (iova_start + size)) {
+		if (ops->iova_to_phys(ops, iova + 42))
+			return true;
+		iova += SZ_4K;
+	}
+	return false;
+}
+
+/*
+ * Returns true if the iova range is successfully mapped to the contiguous
+ * phys range in ops.
+ */
+static bool arm_lpae_range_has_specific_mapping(struct io_pgtable_ops *ops,
+						const unsigned long iova_start,
+						const phys_addr_t phys_start,
+						const size_t size)
+{
+	unsigned long iova = iova_start;
+	phys_addr_t phys = phys_start;
+
+	while (iova < (iova_start + size)) {
+		if (ops->iova_to_phys(ops, iova + 42) != (phys + 42))
+			return false;
+		iova += SZ_4K;
+		phys += SZ_4K;
+	}
+	return true;
+}
+
 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
 {
 	static const enum io_pgtable_fmt fmts[] = {
@@ -938,7 +1471,7 @@
 		ARM_64_LPAE_S2,
 	};
 
-	int i, j;
+	int i, j, k;
 	unsigned long iova;
 	size_t size;
 	struct io_pgtable_ops *ops;
@@ -946,6 +1479,9 @@
 	selftest_running = true;
 
 	for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
+		unsigned long test_sg_sizes[] = { SZ_4K, SZ_64K, SZ_2M,
+						  SZ_1M * 12, SZ_1M * 20 };
+
 		cfg_cookie = cfg;
 		ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
 		if (!ops) {
@@ -954,16 +1490,11 @@
 		}
 
 		/*
-		 * Initial sanity checks.
-		 * Empty page tables shouldn't provide any translations.
+		 * Initial sanity checks.  Empty page tables shouldn't
+		 * provide any translations.  TODO: check entire supported
+		 * range for these ops rather than first 2G
 		 */
-		if (ops->iova_to_phys(ops, 42))
-			return __FAIL(ops, i);
-
-		if (ops->iova_to_phys(ops, SZ_1G + 42))
-			return __FAIL(ops, i);
-
-		if (ops->iova_to_phys(ops, SZ_2G + 42))
+		if (arm_lpae_range_has_mapping(ops, 0, SZ_2G))
 			return __FAIL(ops, i);
 
 		/*
@@ -980,12 +1511,15 @@
 							    IOMMU_CACHE))
 				return __FAIL(ops, i);
 
+			suppress_map_failures = true;
 			/* Overlapping mappings */
 			if (!ops->map(ops, iova, iova + size, size,
 				      IOMMU_READ | IOMMU_NOEXEC))
 				return __FAIL(ops, i);
+			suppress_map_failures = false;
 
-			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
+			if (!arm_lpae_range_has_specific_mapping(ops, iova,
+								 iova, size))
 				return __FAIL(ops, i);
 
 			iova += SZ_1G;
@@ -998,11 +1532,15 @@
 		if (ops->unmap(ops, SZ_1G + size, size) != size)
 			return __FAIL(ops, i);
 
+		if (arm_lpae_range_has_mapping(ops, SZ_1G + size, size))
+			return __FAIL(ops, i);
+
 		/* Remap of partial unmap */
 		if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
 			return __FAIL(ops, i);
 
-		if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
+		if (!arm_lpae_range_has_specific_mapping(ops, SZ_1G + size,
+							 size, size))
 			return __FAIL(ops, i);
 
 		/* Full unmap */
@@ -1024,15 +1562,107 @@
 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
 				return __FAIL(ops, i);
 
+			if (ops->unmap(ops, iova, size) != size)
+				return __FAIL(ops, i);
+
 			iova += SZ_1G;
 			j++;
 			j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
 		}
 
+		if (arm_lpae_range_has_mapping(ops, 0, SZ_2G))
+			return __FAIL(ops, i);
+
+		if ((cfg->pgsize_bitmap & SZ_2M) &&
+		    (cfg->pgsize_bitmap & SZ_4K)) {
+			/* mixed block + page mappings */
+			iova = 0;
+			if (ops->map(ops, iova, iova, SZ_2M, IOMMU_READ))
+				return __FAIL(ops, i);
+
+			if (ops->map(ops, iova + SZ_2M, iova + SZ_2M, SZ_4K,
+				     IOMMU_READ))
+				return __FAIL(ops, i);
+
+			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
+				return __FAIL(ops, i);
+
+			if (ops->iova_to_phys(ops, iova + SZ_2M + 42) !=
+			    (iova + SZ_2M + 42))
+				return __FAIL(ops, i);
+
+			/* unmap both mappings at once */
+			if (ops->unmap(ops, iova, SZ_2M + SZ_4K) !=
+			    (SZ_2M + SZ_4K))
+				return __FAIL(ops, i);
+
+			if (arm_lpae_range_has_mapping(ops, 0, SZ_2G))
+				return __FAIL(ops, i);
+		}
+
+		/* map_sg */
+		for (j = 0; j < ARRAY_SIZE(test_sg_sizes); ++j) {
+			size_t mapped;
+			size_t unused;
+			struct page *page;
+			phys_addr_t page_phys;
+			struct sg_table table;
+			struct scatterlist *sg;
+			unsigned long total_size = test_sg_sizes[j];
+			int chunk_size = 1UL << find_first_bit(
+				&cfg->pgsize_bitmap, BITS_PER_LONG);
+			int nents = total_size / chunk_size;
+
+			if (total_size < chunk_size)
+				continue;
+
+			page = alloc_pages(GFP_KERNEL, get_order(chunk_size));
+			page_phys = page_to_phys(page);
+
+			iova = 0;
+			BUG_ON(sg_alloc_table(&table, nents, GFP_KERNEL));
+			BUG_ON(!page);
+			for_each_sg(table.sgl, sg, table.nents, k)
+				sg_set_page(sg, page, chunk_size, 0);
+
+			mapped = ops->map_sg(ops, iova, table.sgl, table.nents,
+					     IOMMU_READ | IOMMU_WRITE, &unused);
+
+			if (mapped != total_size)
+				return __FAIL(ops, i);
+
+			if (!arm_lpae_range_has_mapping(ops, iova, total_size))
+				return __FAIL(ops, i);
+
+			if (arm_lpae_range_has_mapping(ops, iova + total_size,
+					      SZ_2G - (iova + total_size)))
+				return __FAIL(ops, i);
+
+			for_each_sg(table.sgl, sg, table.nents, k) {
+				dma_addr_t newphys =
+					ops->iova_to_phys(ops, iova + 42);
+				if (newphys != (page_phys + 42))
+					return __FAIL(ops, i);
+				iova += chunk_size;
+			}
+
+			if (ops->unmap(ops, 0, total_size) != total_size)
+				return __FAIL(ops, i);
+
+			if (arm_lpae_range_has_mapping(ops, 0, SZ_2G))
+				return __FAIL(ops, i);
+
+			sg_free_table(&table);
+			__free_pages(page, get_order(chunk_size));
+		}
+
+		if (arm_lpae_range_has_mapping(ops, 0, SZ_2G))
+			return __FAIL(ops, i);
+
 		free_io_pgtable_ops(ops);
 	}
 
-	selftest_running = false;
+	suppress_map_failures = false;
 	return 0;
 }
 
diff -ruw linux-4.4.115/drivers/iommu/io-pgtable.c linux-4.4.115-fbx/drivers/iommu/io-pgtable.c
--- linux-4.4.115/drivers/iommu/io-pgtable.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/iommu/io-pgtable.c	2019-01-22 16:16:24.063251478 +0100
@@ -18,12 +18,25 @@
  * Author: Will Deacon <will.deacon@arm.com>
  */
 
+#define pr_fmt(fmt)	"io-pgtable: " fmt
+
 #include <linux/bug.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
+#include <linux/iommu.h>
+#include <linux/debugfs.h>
+#include <linux/atomic.h>
+#include <linux/module.h>
 
 #include "io-pgtable.h"
 
+extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_msm_secure_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_av8l_fast_init_fns;
+
 static const struct io_pgtable_init_fns *
 io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] =
 {
@@ -33,8 +46,16 @@
 	[ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns,
 	[ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns,
 #endif
+#ifdef CONFIG_MSM_TZ_SMMU
+	[ARM_MSM_SECURE] = &io_pgtable_arm_msm_secure_init_fns,
+#endif
+#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST
+	[ARM_V8L_FAST] = &io_pgtable_av8l_fast_init_fns,
+#endif
 };
 
+static struct dentry *io_pgtable_top;
+
 struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
 					    struct io_pgtable_cfg *cfg,
 					    void *cookie)
@@ -72,6 +93,58 @@
 		return;
 
 	iop = container_of(ops, struct io_pgtable, ops);
-	iop->cfg.tlb->tlb_flush_all(iop->cookie);
 	io_pgtable_init_table[iop->fmt]->free(iop);
 }
+
+static atomic_t pages_allocated;
+
+void *io_pgtable_alloc_pages_exact(struct io_pgtable_cfg *cfg, void *cookie,
+				   size_t size, gfp_t gfp_mask)
+{
+	void *ret;
+
+	if (cfg->tlb->alloc_pages_exact)
+		ret = cfg->tlb->alloc_pages_exact(cookie, size, gfp_mask);
+	else
+		ret = alloc_pages_exact(size, gfp_mask);
+
+	if (likely(ret))
+		atomic_add(1 << get_order(size), &pages_allocated);
+
+	return ret;
+}
+
+void io_pgtable_free_pages_exact(struct io_pgtable_cfg *cfg, void *cookie,
+				 void *virt, size_t size)
+{
+	if (cfg->tlb->free_pages_exact)
+		cfg->tlb->free_pages_exact(cookie, virt, size);
+	else
+		free_pages_exact(virt, size);
+
+	atomic_sub(1 << get_order(size), &pages_allocated);
+}
+
+static int io_pgtable_init(void)
+{
+	io_pgtable_top = debugfs_create_dir("io-pgtable", iommu_debugfs_top);
+
+	if (!io_pgtable_top)
+		return -ENODEV;
+
+	if (!debugfs_create_atomic_t("pages", 0600,
+				     io_pgtable_top, &pages_allocated)) {
+		debugfs_remove_recursive(io_pgtable_top);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static void io_pgtable_exit(void)
+{
+	debugfs_remove_recursive(io_pgtable_top);
+}
+
+module_init(io_pgtable_init);
+module_exit(io_pgtable_exit);
diff -ruw linux-4.4.115/drivers/iommu/io-pgtable.h linux-4.4.115-fbx/drivers/iommu/io-pgtable.h
--- linux-4.4.115/drivers/iommu/io-pgtable.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/iommu/io-pgtable.h	2019-01-22 16:16:24.063251478 +0100
@@ -1,6 +1,9 @@
 #ifndef __IO_PGTABLE_H
 #define __IO_PGTABLE_H
 
+#include <linux/scatterlist.h>
+#include <soc/qcom/msm_tz_smmu.h>
+
 /*
  * Public API for use by IOMMU drivers
  */
@@ -9,6 +12,8 @@
 	ARM_32_LPAE_S2,
 	ARM_64_LPAE_S1,
 	ARM_64_LPAE_S2,
+	ARM_MSM_SECURE,
+	ARM_V8L_FAST,
 	IO_PGTABLE_NUM_FMTS,
 };
 
@@ -20,6 +25,10 @@
  * @tlb_sync:      Ensure any queued TLB invalidation has taken effect, and
  *                 any corresponding page table updates are visible to the
  *                 IOMMU.
+ * @alloc_pages_exact: Allocate page table memory (optional, defaults to
+ *                     alloc_pages_exact)
+ * @free_pages_exact:  Free page table memory (optional, defaults to
+ *                     free_pages_exact)
  *
  * Note that these can all be called in atomic context and must therefore
  * not block.
@@ -29,6 +38,8 @@
 	void (*tlb_add_flush)(unsigned long iova, size_t size, bool leaf,
 			      void *cookie);
 	void (*tlb_sync)(void *cookie);
+	void *(*alloc_pages_exact)(void *cookie, size_t size, gfp_t gfp_mask);
+	void (*free_pages_exact)(void *cookie, void *virt, size_t size);
 };
 
 /**
@@ -45,13 +56,22 @@
  *                 page table walker.
  */
 struct io_pgtable_cfg {
+	/*
+	 * IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT: Set the page table as
+	 * coherent.
+	 */
 	#define IO_PGTABLE_QUIRK_ARM_NS	(1 << 0)	/* Set NS bit in PTEs */
+	#define IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT (1 << 1)
+	#define IO_PGTABLE_QUIRK_ARM_TTBR1 (1 << 2)     /* Allocate TTBR1 PT */
 	int				quirks;
 	unsigned long			pgsize_bitmap;
 	unsigned int			ias;
 	unsigned int			oas;
+	int				sep;
 	const struct iommu_gather_ops	*tlb;
 	struct device			*iommu_dev;
+	dma_addr_t			iova_base;
+	dma_addr_t			iova_end;
 
 	/* Low-level data specific to the table format */
 	union {
@@ -65,6 +85,18 @@
 			u64	vttbr;
 			u64	vtcr;
 		} arm_lpae_s2_cfg;
+
+		struct {
+			enum tz_smmu_device_id sec_id;
+			int cbndx;
+		} arm_msm_secure_cfg;
+
+		struct {
+			u64	ttbr[2];
+			u64	tcr;
+			u64	mair[2];
+			void	*pmds;
+		} av8l_fast_cfg;
 	};
 };
 
@@ -72,6 +104,9 @@
  * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
  *
  * @map:          Map a physically contiguous memory region.
+ * @map_sg:	  Map a scatterlist.  Returns the number of bytes mapped,
+ *		  or 0 on failure.  The size parameter contains the size
+ *		  of the partial mapping in case of failure.
  * @unmap:        Unmap a physically contiguous memory region.
  * @iova_to_phys: Translate iova to physical address.
  *
@@ -81,10 +116,18 @@
 struct io_pgtable_ops {
 	int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
 		   phys_addr_t paddr, size_t size, int prot);
-	int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
+	int (*map_sg)(struct io_pgtable_ops *ops, unsigned long iova,
+			 struct scatterlist *sg, unsigned int nents,
+			 int prot, size_t *size);
+	size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
 		     size_t size);
 	phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
 				    unsigned long iova);
+	bool (*is_iova_coherent)(struct io_pgtable_ops *ops,
+				unsigned long iova);
+	uint64_t (*iova_to_pte)(struct io_pgtable_ops *ops,
+		    unsigned long iova);
+
 };
 
 /**
@@ -143,6 +186,28 @@
 	void (*free)(struct io_pgtable *iop);
 };
 
+/**
+ * io_pgtable_alloc_pages_exact - allocate an exact number physically-contiguous pages.
+ * @size: the number of bytes to allocate
+ * @gfp_mask: GFP flags for the allocation
+ *
+ * Like alloc_pages_exact(), but with some additional accounting for debug
+ * purposes.
+ */
+void *io_pgtable_alloc_pages_exact(struct io_pgtable_cfg *cfg, void *cookie,
+				   size_t size, gfp_t gfp_mask);
+
+/**
+ * io_pgtable_free_pages_exact - release memory allocated via io_pgtable_alloc_pages_exact()
+ * @virt: the value returned by alloc_pages_exact.
+ * @size: size of allocation, same value as passed to alloc_pages_exact().
+ *
+ * Like free_pages_exact(), but with some additional accounting for debug
+ * purposes.
+ */
+void io_pgtable_free_pages_exact(struct io_pgtable_cfg *cfg, void *cookie,
+				 void *virt, size_t size);
+
 extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
 extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
 extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
diff -ruw linux-4.4.115/drivers/iommu/Kconfig linux-4.4.115-fbx/drivers/iommu/Kconfig
--- linux-4.4.115/drivers/iommu/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/iommu/Kconfig	2019-01-22 16:16:24.051251369 +0100
@@ -39,6 +39,42 @@
 
 	  If unsure, say N here.
 
+config IOMMU_IO_PGTABLE_FAST
+	bool "Fast ARMv7/v8 Long Descriptor Format"
+	depends on ARM64_DMA_USE_IOMMU
+	help
+          Enable support for a subset of the ARM long descriptor pagetable
+	  format.  This allocator achieves fast performance by
+	  pre-allocating and pre-populating page table memory up front.
+	  only supports a 32 bit virtual address space.
+
+          This implementation is mainly optimized for use cases where the
+          buffers are small (<= 64K) since it only supports 4K page sizes.
+
+config IOMMU_IO_PGTABLE_FAST_SELFTEST
+	bool "Fast IO pgtable selftests"
+	depends on IOMMU_IO_PGTABLE_FAST
+	help
+	  Enable self-tests for "fast" page table allocator. This performs
+	  a series of page-table consistency checks during boot.
+
+	  If unsure, say N here.
+
+config IOMMU_IO_PGTABLE_FAST_PROVE_TLB
+	bool "Prove correctness of TLB maintenance in the Fast DMA mapper"
+	depends on IOMMU_IO_PGTABLE_FAST
+	help
+          Enables some debug features that help prove correctness of TLB
+          maintenance routines in the Fast DMA mapper.  This option will
+          slow things down considerably, so should only be used in a debug
+          configuration.  This relies on the ability to set bits in an
+          invalid page table entry, which is disallowed on some hardware
+          due to errata.  If you're running on such a platform then this
+          option can only be used with unit tests.  It will break real use
+          cases.
+
+	  If unsure, say N here.
+
 endmenu
 
 config IOMMU_IOVA
@@ -66,24 +102,6 @@
 	  PAMU can authorize memory access, remap the memory address, and remap I/O
 	  transaction types.
 
-# MSM IOMMU support
-config MSM_IOMMU
-	bool "MSM IOMMU Support"
-	depends on ARM
-	depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST
-	depends on BROKEN
-	select IOMMU_API
-	help
-	  Support for the IOMMUs found on certain Qualcomm SOCs.
-	  These IOMMUs allow virtualization of the address space used by most
-	  cores within the multimedia subsystem.
-
-	  If unsure, say N here.
-
-config IOMMU_PGTABLES_L2
-	def_bool y
-	depends on MSM_IOMMU && MMU && SMP && CPU_DCACHE_DISABLE=n
-
 # AMD IOMMU support
 config AMD_IOMMU
 	bool "AMD IOMMU support"
@@ -366,6 +384,7 @@
 	select IOMMU_API
 	select IOMMU_IO_PGTABLE_LPAE
 	select ARM_DMA_USE_IOMMU if ARM
+	select ARM64_DMA_USE_IOMMU if ARM64
 	help
 	  Support for implementations of the ARM System MMU architecture
 	  versions 1 and 2.
@@ -393,4 +412,34 @@
 	help
 	  Support for the IOMMU API for s390 PCI devices.
 
+menuconfig IOMMU_DEBUG
+	bool "IOMMU Profiling and Debugging"
+	help
+	  Makes available some additional IOMMU profiling and debugging
+	  options.
+
+if IOMMU_DEBUG
+
+config IOMMU_DEBUG_TRACKING
+	bool "Track key IOMMU events"
+	select IOMMU_API
+	help
+	  Enables additional debug tracking in the IOMMU framework code.
+	  Tracking information and tests can be accessed through various
+	  debugfs files.
+
+	  Say Y here if you need to debug IOMMU issues and are okay with
+	  the performance penalty of the tracking.
+
+config IOMMU_TESTS
+	bool "Interactive IOMMU performance/functional tests"
+	select IOMMU_API
+	help
+	  Enables a suite of IOMMU unit tests.  The tests are runnable
+	  through debugfs.  Unlike the IOMMU_DEBUG_TRACKING option, the
+	  impact of enabling this option to overal system performance
+	  should be minimal.
+
+endif # IOMMU_DEBUG
+
 endif # IOMMU_SUPPORT
diff -ruw linux-4.4.115/drivers/iommu/Makefile linux-4.4.115-fbx/drivers/iommu/Makefile
--- linux-4.4.115/drivers/iommu/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/iommu/Makefile	2019-01-22 16:16:24.051251369 +0100
@@ -2,11 +2,14 @@
 obj-$(CONFIG_IOMMU_API) += iommu-traces.o
 obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
 obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o
+obj-$(CONFIG_IOMMU_API) += msm_dma_iommu_mapping.o
 obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
 obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
 obj-$(CONFIG_IOMMU_IOVA) += iova.o
+obj-$(CONFIG_MSM_TZ_SMMU) += io-pgtable-msm-secure.o
+obj-$(CONFIG_IOMMU_IO_PGTABLE_FAST) += io-pgtable-fast.o dma-mapping-fast.o
 obj-$(CONFIG_OF_IOMMU)	+= of_iommu.o
-obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
+obj-$(CONFIG_IOMMU_DEBUG) += iommu-debug.o
 obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
 obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
 obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
diff -ruw linux-4.4.115/drivers/irqchip/irq-gic.c linux-4.4.115-fbx/drivers/irqchip/irq-gic.c
--- linux-4.4.115/drivers/irqchip/irq-gic.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/irqchip/irq-gic.c	2019-10-29 09:26:23.825204959 +0100
@@ -98,6 +98,11 @@
 static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
 
 static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE;
+/*
+ * Supported arch specific GIC irq extension.
+ * Default make them NULL.
+ */
+extern struct irq_chip gic_arch_extn;
 
 #ifndef MAX_GIC_NR
 #define MAX_GIC_NR	1
@@ -182,7 +187,13 @@
 
 static void gic_mask_irq(struct irq_data *d)
 {
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&irq_controller_lock, flags);
 	gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR);
+	if (gic_arch_extn.irq_mask)
+		gic_arch_extn.irq_mask(d);
+	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
 }
 
 static void gic_eoimode1_mask_irq(struct irq_data *d)
@@ -202,11 +213,23 @@
 
 static void gic_unmask_irq(struct irq_data *d)
 {
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&irq_controller_lock, flags);
+	if (gic_arch_extn.irq_unmask)
+		gic_arch_extn.irq_unmask(d);
 	gic_poke_irq(d, GIC_DIST_ENABLE_SET);
+	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
 }
 
 static void gic_eoi_irq(struct irq_data *d)
 {
+	if (gic_arch_extn.irq_eoi) {
+		raw_spin_lock(&irq_controller_lock);
+		gic_arch_extn.irq_eoi(d);
+		raw_spin_unlock(&irq_controller_lock);
+	}
+
 	writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
 }
 
@@ -272,6 +295,8 @@
 {
 	void __iomem *base = gic_dist_base(d);
 	unsigned int gicirq = gic_irq(d);
+	unsigned long flags;
+	int ret;
 
 	/* Interrupt configuration for SGIs can't be changed */
 	if (gicirq < 16)
@@ -282,7 +307,25 @@
 			    type != IRQ_TYPE_EDGE_RISING)
 		return -EINVAL;
 
-	return gic_configure_irq(gicirq, type, base, NULL);
+	raw_spin_lock_irqsave(&irq_controller_lock, flags);
+
+	if (gic_arch_extn.irq_set_type)
+		gic_arch_extn.irq_set_type(d, type);
+
+	ret = gic_configure_irq(gicirq, type, base, NULL);
+
+	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
+
+	return ret;
+}
+
+static int gic_retrigger(struct irq_data *d)
+{
+	if (gic_arch_extn.irq_retrigger)
+		return gic_arch_extn.irq_retrigger(d);
+
+	/* the genirq layer expects 0 if we can't retrigger in hardware */
+	return 0;
 }
 
 static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
@@ -326,6 +369,21 @@
 }
 #endif
 
+#ifdef CONFIG_PM
+static int gic_set_wake(struct irq_data *d, unsigned int on)
+{
+	int ret = -ENXIO;
+
+	if (gic_arch_extn.irq_set_wake)
+		ret = gic_arch_extn.irq_set_wake(d, on);
+
+	return ret;
+}
+
+#else
+#define gic_set_wake	NULL
+#endif
+
 static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
 {
 	u32 irqstat, irqnr;
@@ -396,9 +454,11 @@
 	.irq_unmask		= gic_unmask_irq,
 	.irq_eoi		= gic_eoi_irq,
 	.irq_set_type		= gic_set_type,
+	.irq_retrigger		= gic_retrigger,
 #ifdef CONFIG_SMP
 	.irq_set_affinity	= gic_set_affinity,
 #endif
+	.irq_set_wake		= gic_set_wake,
 	.irq_get_irqchip_state	= gic_irq_get_irqchip_state,
 	.irq_set_irqchip_state	= gic_irq_set_irqchip_state,
 	.flags			= IRQCHIP_SET_TYPE_MASKED |
@@ -707,7 +767,8 @@
 	gic_cpu_if_up(&gic_data[gic_nr]);
 }
 
-static int gic_notifier(struct notifier_block *self, unsigned long cmd,	void *v)
+static int gic_notifier(struct notifier_block *self, unsigned long cmd,
+			void *aff_level)
 {
 	int i;
 
@@ -726,10 +787,19 @@
 			gic_cpu_restore(i);
 			break;
 		case CPU_CLUSTER_PM_ENTER:
+			/*
+			 * Affinity level of the node
+			 * eg:
+			 *    cpu level = 0
+			 *    l2 level  = 1
+			 *    cci level = 2
+			 */
+			if (!(unsigned long)aff_level)
 			gic_dist_save(i);
 			break;
 		case CPU_CLUSTER_PM_ENTER_FAILED:
 		case CPU_CLUSTER_PM_EXIT:
+			if (!(unsigned long)aff_level)
 			gic_dist_restore(i);
 			break;
 		}
@@ -1147,6 +1217,7 @@
 			pr_info("GIC: Using split EOI/Deactivate mode\n");
 	}
 
+	gic_chip.flags |= gic_arch_extn.flags;
 	gic_dist_init(gic);
 	gic_cpu_init(gic);
 	gic_pm_init(gic);
diff -ruw linux-4.4.115/drivers/irqchip/irq-gic-common.c linux-4.4.115-fbx/drivers/irqchip/irq-gic-common.c
--- linux-4.4.115/drivers/irqchip/irq-gic-common.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/irqchip/irq-gic-common.c	2019-01-22 16:16:24.071251550 +0100
@@ -32,6 +32,19 @@
 	}
 }
 
+/*
+ * Supported arch specific GIC irq extension.
+ * Default make them NULL.
+ */
+struct irq_chip gic_arch_extn = {
+	.irq_eoi	= NULL,
+	.irq_mask	= NULL,
+	.irq_unmask	= NULL,
+	.irq_retrigger	= NULL,
+	.irq_set_type	= NULL,
+	.irq_set_wake	= NULL,
+};
+
 int gic_configure_irq(unsigned int irq, unsigned int type,
 		       void __iomem *base, void (*sync_access)(void))
 {
diff -ruw linux-4.4.115/drivers/irqchip/irq-gic-common.h linux-4.4.115-fbx/drivers/irqchip/irq-gic-common.h
--- linux-4.4.115/drivers/irqchip/irq-gic-common.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/irqchip/irq-gic-common.h	2019-01-22 16:16:24.071251550 +0100
@@ -26,6 +26,9 @@
 	u32 iidr;
 	u32 mask;
 };
+extern bool from_suspend;
+extern struct irq_chip gic_arch_extn;
+extern int msm_show_resume_irq_mask;
 
 int gic_configure_irq(unsigned int irq, unsigned int type,
                        void __iomem *base, void (*sync_access)(void));
diff -ruw linux-4.4.115/drivers/irqchip/irq-gic-v3.c linux-4.4.115-fbx/drivers/irqchip/irq-gic-v3.c
--- linux-4.4.115/drivers/irqchip/irq-gic-v3.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/irqchip/irq-gic-v3.c	2019-10-29 09:26:23.821204920 +0100
@@ -24,9 +24,12 @@
 #include <linux/of_irq.h>
 #include <linux/percpu.h>
 #include <linux/slab.h>
+#include <linux/module.h>
 
 #include <linux/irqchip.h>
 #include <linux/irqchip/arm-gic-v3.h>
+#include <linux/syscore_ops.h>
+#include <linux/irqchip/msm-mpm-irq.h>
 
 #include <asm/cputype.h>
 #include <asm/exception.h>
@@ -48,6 +51,14 @@
 	u64			redist_stride;
 	u32			nr_redist_regions;
 	unsigned int		irq_nr;
+#ifdef CONFIG_PM
+	unsigned int wakeup_irqs[32];
+	unsigned int enabled_irqs[32];
+#endif
+#ifdef CONFIG_ARM_GIC_PANIC_HANDLER
+	u32 saved_dist_regs[0x400];
+	u32 saved_router_regs[0x800];
+#endif
 };
 
 static struct gic_chip_data gic_data __read_mostly;
@@ -85,7 +96,7 @@
 {
 	u32 count = 1000000;	/* 1s! */
 
-	while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
+	while (readl_relaxed_no_log(base + GICD_CTLR) & GICD_CTLR_RWP) {
 		count--;
 		if (!count) {
 			pr_err_ratelimited("RWP timeout, gone fishing\n");
@@ -120,6 +131,7 @@
 }
 #endif
 
+#ifdef CONFIG_ARM_GIC_V3_NO_ACCESS_CONTROL
 static void gic_enable_redist(bool enable)
 {
 	void __iomem *rbase;
@@ -153,6 +165,9 @@
 		pr_err_ratelimited("redistributor failed to %s...\n",
 				   enable ? "wakeup" : "sleep");
 }
+#else
+static void gic_enable_redist(bool enable) { }
+#endif
 
 /*
  * Routines to disable, enable, EOI and route interrupts
@@ -167,7 +182,7 @@
 	else
 		base = gic_data.dist_base;
 
-	return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
+	return !!(readl_relaxed_no_log(base + offset + (gic_irq(d) / 32) * 4) & mask);
 }
 
 static void gic_poke_irq(struct irq_data *d, u32 offset)
@@ -184,12 +199,15 @@
 		rwp_wait = gic_dist_wait_for_rwp;
 	}
 
-	writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4);
+	writel_relaxed_no_log(mask, base + offset + (gic_irq(d) / 32) * 4);
 	rwp_wait();
 }
 
 static void gic_mask_irq(struct irq_data *d)
 {
+	if (gic_arch_extn.irq_mask)
+		gic_arch_extn.irq_mask(d);
+
 	gic_poke_irq(d, GICD_ICENABLER);
 }
 
@@ -210,6 +228,8 @@
 
 static void gic_unmask_irq(struct irq_data *d)
 {
+	if (gic_arch_extn.irq_unmask)
+		gic_arch_extn.irq_unmask(d);
 	gic_poke_irq(d, GICD_ISENABLER);
 }
 
@@ -267,9 +287,20 @@
 
 	return 0;
 }
+static void gic_disable_irq(struct irq_data *d)
+{
+	/* don't lazy-disable PPIs */
+	if (gic_irq(d) < 32)
+		gic_mask_irq(d);
+	if (gic_arch_extn.irq_disable)
+		gic_arch_extn.irq_disable(d);
+}
 
 static void gic_eoi_irq(struct irq_data *d)
 {
+	if (gic_arch_extn.irq_eoi)
+		gic_arch_extn.irq_eoi(d);
+
 	gic_write_eoir(gic_irq(d));
 }
 
@@ -307,6 +338,9 @@
 		rwp_wait = gic_dist_wait_for_rwp;
 	}
 
+	if (gic_arch_extn.irq_set_type)
+		gic_arch_extn.irq_set_type(d, type);
+
 	return gic_configure_irq(irq, type, base, rwp_wait);
 }
 
@@ -319,6 +353,134 @@
 	return 0;
 }
 
+static int gic_retrigger(struct irq_data *d)
+{
+	if (gic_arch_extn.irq_retrigger)
+		return gic_arch_extn.irq_retrigger(d);
+
+	/* the genirq layer expects 0 if we can't retrigger in hardware */
+	return 0;
+}
+
+static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data)
+{
+	return data->dist_base;
+}
+
+#ifdef CONFIG_ARM_GIC_PANIC_HANDLER
+static int gic_panic_handler(struct notifier_block *this,
+			unsigned long event, void *ptr)
+{
+	int i;
+	void __iomem *base;
+
+	base = gic_data.dist_base;
+	for (i = 0; i < 0x400; i += 1)
+		gic_data.saved_dist_regs[i] = readl_relaxed(base + 4 * i);
+
+	base = gic_data.dist_base + GICD_IROUTER;
+	for (i = 0; i < 0x800; i += 1)
+		gic_data.saved_router_regs[i] = readl_relaxed(base + 4 * i);
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block gic_panic_blk = {
+	.notifier_call = gic_panic_handler,
+};
+#endif
+
+#ifdef CONFIG_PM
+static int gic_suspend_one(struct gic_chip_data *gic)
+{
+	unsigned int i;
+	void __iomem *base = gic_data_dist_base(gic);
+
+	for (i = 0; i * 32 < gic->irq_nr; i++) {
+		gic->enabled_irqs[i]
+			= readl_relaxed(base + GICD_ISENABLER + i * 4);
+		/* disable all of them */
+		writel_relaxed(0xffffffff, base + GICD_ICENABLER + i * 4);
+		/* enable the wakeup set */
+		writel_relaxed(gic->wakeup_irqs[i],
+			base + GICD_ISENABLER + i * 4);
+	}
+	return 0;
+}
+
+static int gic_suspend(void)
+{
+	gic_suspend_one(&gic_data);
+	return 0;
+}
+
+static void gic_show_resume_irq(struct gic_chip_data *gic)
+{
+	unsigned int i;
+	u32 enabled;
+	u32 pending[32];
+	void __iomem *base = gic_data_dist_base(gic);
+
+	if (!msm_show_resume_irq_mask)
+		return;
+
+	for (i = 0; i * 32 < gic->irq_nr; i++) {
+		enabled = readl_relaxed(base + GICD_ICENABLER + i * 4);
+		pending[i] = readl_relaxed(base + GICD_ISPENDR + i * 4);
+		pending[i] &= enabled;
+	}
+
+	for (i = find_first_bit((unsigned long *)pending, gic->irq_nr);
+	     i < gic->irq_nr;
+	     i = find_next_bit((unsigned long *)pending, gic->irq_nr, i+1)) {
+		unsigned int irq = irq_find_mapping(gic->domain, i);
+		struct irq_desc *desc = irq_to_desc(irq);
+		const char *name = "null";
+
+		if (desc == NULL)
+			name = "stray irq";
+		else if (desc->action && desc->action->name)
+			name = desc->action->name;
+
+		pr_warn("%s: %d triggered %s\n", __func__, irq, name);
+	}
+}
+
+static void gic_resume_one(struct gic_chip_data *gic)
+{
+	unsigned int i;
+	void __iomem *base = gic_data_dist_base(gic);
+
+	gic_show_resume_irq(gic);
+
+	for (i = 0; i * 32 < gic->irq_nr; i++) {
+		/* disable all of them */
+		writel_relaxed(0xffffffff, base + GICD_ICENABLER + i * 4);
+		/* enable the enabled set */
+		writel_relaxed(gic->enabled_irqs[i],
+			base + GICD_ISENABLER + i * 4);
+	}
+}
+
+static void gic_resume(void)
+{
+	gic_resume_one(&gic_data);
+}
+
+static struct syscore_ops gic_syscore_ops = {
+	.suspend = gic_suspend,
+	.resume = gic_resume,
+};
+
+static int __init gic_init_sys(void)
+{
+	register_syscore_ops(&gic_syscore_ops);
+	return 0;
+}
+arch_initcall(gic_init_sys);
+
+#endif
+
 static u64 gic_mpidr_to_affinity(unsigned long mpidr)
 {
 	u64 aff;
@@ -340,7 +502,7 @@
 
 		if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
 			int err;
-
+			uncached_logk(LOGK_IRQ, (void *)(uintptr_t)irqnr);
 			if (static_key_true(&supports_deactivate))
 				gic_write_eoir(irqnr);
 
@@ -357,6 +519,7 @@
 			continue;
 		}
 		if (irqnr < 16) {
+			uncached_logk(LOGK_IRQ, (void *)(uintptr_t)irqnr);
 			gic_write_eoir(irqnr);
 			if (static_key_true(&supports_deactivate))
 				gic_write_dir(irqnr);
@@ -444,9 +607,6 @@
 				u64 offset = ptr - gic_data.redist_regions[i].redist_base;
 				gic_data_rdist_rd_base() = ptr;
 				gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset;
-				pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
-					smp_processor_id(), mpidr, i,
-					&gic_data_rdist()->phys_base);
 				return 0;
 			}
 
@@ -516,7 +676,8 @@
 	gic_cpu_config(rbase, gic_redist_wait_for_rwp);
 
 	/* Give LPIs a spin */
-	if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
+	if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis() &&
+					!IS_ENABLED(CONFIG_ARM_GIC_V3_ACL))
 		its_cpu_init();
 
 	/* initialise system registers */
@@ -649,6 +810,14 @@
 	gic_write_irouter(val, reg);
 
 	/*
+	 * It is possible that irq is disabled from SW perspective only,
+	 * because kernel takes lazy disable approach. Therefore check irq
+	 * descriptor if it should kept disabled.
+	 */
+	if (irqd_irq_disabled(d))
+		enabled = 0;
+
+	/*
 	 * If the interrupt was enabled, enabled it again. Otherwise,
 	 * just wait for the distributor to have digested our changes.
 	 */
@@ -664,10 +833,44 @@
 #define gic_smp_init()		do { } while(0)
 #endif
 
+#ifdef CONFIG_PM
+int gic_set_wake(struct irq_data *d, unsigned int on)
+{
+	int ret = -ENXIO;
+	unsigned int reg_offset, bit_offset;
+	unsigned int gicirq = gic_irq(d);
+	struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
+
+	/* per-cpu interrupts cannot be wakeup interrupts */
+	WARN_ON(gicirq < 32);
+
+	reg_offset = gicirq / 32;
+	bit_offset = gicirq % 32;
+
+	if (on)
+		gic_data->wakeup_irqs[reg_offset] |=  1 << bit_offset;
+	else
+		gic_data->wakeup_irqs[reg_offset] &=  ~(1 << bit_offset);
+
+	if (gic_arch_extn.irq_set_wake)
+		ret = gic_arch_extn.irq_set_wake(d, on);
+	else
+		pr_err("mpm: set wake is null\n");
+
+	return ret;
+}
+
+#else
+#define gic_set_wake	NULL
+#endif
+
 #ifdef CONFIG_CPU_PM
 static int gic_cpu_pm_notifier(struct notifier_block *self,
 			       unsigned long cmd, void *v)
 {
+	if (from_suspend)
+		return NOTIFY_OK;
+
 	if (cmd == CPU_PM_EXIT) {
 		gic_enable_redist(true);
 		gic_cpu_sys_reg_init();
@@ -691,13 +894,16 @@
 static inline void gic_cpu_pm_init(void) { }
 #endif /* CONFIG_CPU_PM */
 
-static struct irq_chip gic_chip = {
+struct irq_chip gic_chip = {
 	.name			= "GICv3",
 	.irq_mask		= gic_mask_irq,
 	.irq_unmask		= gic_unmask_irq,
 	.irq_eoi		= gic_eoi_irq,
 	.irq_set_type		= gic_set_type,
+	.irq_retrigger		= gic_retrigger,
 	.irq_set_affinity	= gic_set_affinity,
+	.irq_disable		= gic_disable_irq,
+	.irq_set_wake		= gic_set_wake,
 	.irq_get_irqchip_state	= gic_irq_get_irqchip_state,
 	.irq_set_irqchip_state	= gic_irq_set_irqchip_state,
 	.flags			= IRQCHIP_SET_TYPE_MASKED,
@@ -714,6 +920,7 @@
 	.irq_set_irqchip_state	= gic_irq_set_irqchip_state,
 	.irq_set_vcpu_affinity	= gic_irq_set_vcpu_affinity,
 	.flags			= IRQCHIP_SET_TYPE_MASKED,
+	.irq_set_wake		= gic_set_wake,
 };
 
 #define GIC_ID_NR		(1U << gic_data.rdists.id_bits)
@@ -923,13 +1130,20 @@
 
 	set_handle_irq(gic_handle_irq);
 
-	if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
+	if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis() &&
+					!IS_ENABLED(CONFIG_ARM_GIC_V3_ACL))
 		its_init(node, &gic_data.rdists, gic_data.domain);
 
+	gic_chip.flags |= gic_arch_extn.flags;
 	gic_smp_init();
 	gic_dist_init();
 	gic_cpu_init();
 	gic_cpu_pm_init();
+	of_mpm_init();
+
+#ifdef CONFIG_ARM_GIC_PANIC_HANDLER
+	atomic_notifier_chain_register(&panic_notifier_list, &gic_panic_blk);
+#endif
 
 	return 0;
 
diff -ruw linux-4.4.115/drivers/irqchip/Kconfig linux-4.4.115-fbx/drivers/irqchip/Kconfig
--- linux-4.4.115/drivers/irqchip/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/irqchip/Kconfig	2019-10-29 09:26:23.821204920 +0100
@@ -7,6 +7,7 @@
 	select IRQ_DOMAIN
 	select IRQ_DOMAIN_HIERARCHY
 	select MULTI_IRQ_HANDLER
+	select QCOM_SHOW_RESUME_IRQ
 
 config ARM_GIC_V2M
 	bool
@@ -27,6 +28,25 @@
 	bool
 	select PCI_MSI_IRQ_DOMAIN
 
+config ARM_GIC_V3_ACL
+	bool "GICv3 Access control"
+	depends on ARM_GIC_V3
+	help
+	  Access to GIC ITS address space is controlled by EL2.
+	  Kernel has no permission to access ITS
+
+config ARM_GIC_V3_NO_ACCESS_CONTROL
+	bool "GICv3 No Access Control Configuration"
+	depends on ARM_GIC_V3
+	help
+	  On some SOCs with the access control configurations it is
+	  not allowed to access certain set of the GIC registers
+	  from non-secure world. Provide a common flag to protect
+	  those functionalities and compile them out for such
+	  configurations, so that specific registers are not touched.
+
+	  For production kernels, you should say 'N' here.
+
 config ARM_NVIC
 	bool
 	select IRQ_DOMAIN
@@ -80,6 +100,16 @@
 	select GENERIC_IRQ_CHIP
 	select IRQ_DOMAIN
 
+config QCOM_SHOW_RESUME_IRQ
+	bool "Enable logging of interrupts that could have caused resume"
+	depends on ARM_GIC
+	default n
+	help
+	  This option logs wake up interrupts that have triggered just before
+	  the resume loop unrolls. It helps to debug to know any unnecessary
+	  wake up interrupts that causes system to come out of low power modes.
+	  Say Y if you want to debug why the system resumed.
+
 config DW_APB_ICTL
 	bool
 	select GENERIC_IRQ_CHIP
@@ -193,3 +223,7 @@
 	def_bool y if MACH_ASM9260 || ARCH_MXS
 	select IRQ_DOMAIN
 	select STMP_DEVICE
+
+config MSM_IRQ
+	bool
+	select IRQ_DOMAIN
diff -ruw linux-4.4.115/drivers/irqchip/Makefile linux-4.4.115-fbx/drivers/irqchip/Makefile
--- linux-4.4.115/drivers/irqchip/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/irqchip/Makefile	2019-10-29 09:26:23.821204920 +0100
@@ -55,3 +55,4 @@
 obj-$(CONFIG_ARCH_SA1100)		+= irq-sa11x0.o
 obj-$(CONFIG_INGENIC_IRQ)		+= irq-ingenic.o
 obj-$(CONFIG_IMX_GPCV2)			+= irq-imx-gpcv2.o
+obj-$(CONFIG_QCOM_SHOW_RESUME_IRQ)       += msm_show_resume_irq.o
diff -ruw linux-4.4.115/drivers/Kconfig linux-4.4.115-fbx/drivers/Kconfig
--- linux-4.4.115/drivers/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/Kconfig	2019-01-22 16:16:22.767239742 +0100
@@ -8,6 +8,8 @@
 
 source "drivers/connector/Kconfig"
 
+source "drivers/fbxprocfs/Kconfig"
+
 source "drivers/mtd/Kconfig"
 
 source "drivers/of/Kconfig"
@@ -54,6 +56,10 @@
 
 source "drivers/i2c/Kconfig"
 
+source "drivers/slimbus/Kconfig"
+
+source "drivers/soundwire/Kconfig"
+
 source "drivers/spi/Kconfig"
 
 source "drivers/spmi/Kconfig"
@@ -68,6 +74,8 @@
 
 source "drivers/gpio/Kconfig"
 
+source "drivers/fbxgpio/Kconfig"
+
 source "drivers/w1/Kconfig"
 
 source "drivers/power/Kconfig"
@@ -104,6 +112,8 @@
 
 source "drivers/leds/Kconfig"
 
+source "drivers/switch/Kconfig"
+
 source "drivers/accessibility/Kconfig"
 
 source "drivers/infiniband/Kconfig"
@@ -112,6 +122,8 @@
 
 source "drivers/rtc/Kconfig"
 
+source "drivers/esoc/Kconfig"
+
 source "drivers/dma/Kconfig"
 
 source "drivers/dca/Kconfig"
@@ -198,4 +210,12 @@
 
 source "drivers/fpga/Kconfig"
 
+source "drivers/firmware/Kconfig"
+
+source "drivers/bif/Kconfig"
+
+source "drivers/sensors/Kconfig"
+
+source "drivers/tee/Kconfig"
+
 endmenu
diff -ruw linux-4.4.115/drivers/leds/Kconfig linux-4.4.115-fbx/drivers/leds/Kconfig
--- linux-4.4.115/drivers/leds/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/leds/Kconfig	2019-10-29 09:26:23.841205116 +0100
@@ -587,6 +587,43 @@
 	  To compile this driver as a module, choose 'm' here: the module
 	  will be called leds-powernv.
 
+config LEDS_QPNP
+	tristate "Support for QPNP LEDs"
+	depends on LEDS_CLASS && SPMI
+	help
+	  This driver supports the LED functionality of Qualcomm Technologies,
+	  Inc. QPNP PMICs.  It primarily supports controlling tri-color RGB
+	  LEDs in both PWM and light pattern generator (LPG) modes.  For older
+	  PMICs, it also supports WLEDs and flash LEDs.
+
+config LEDS_QPNP_FLASH
+	tristate "Support for QPNP Flash LEDs"
+	depends on LEDS_CLASS && SPMI
+	help
+	  This driver supports the flash LED functionality of Qualcomm
+	  Technologies, Inc. QPNP PMICs.  This driver supports PMICs up through
+	  PM8994.  It can configure the flash LED target current for several
+	  independent channels.
+
+config LEDS_QPNP_FLASH_V2
+	tristate "Support for QPNP V2 Flash LEDs"
+	depends on LEDS_CLASS && MFD_SPMI_PMIC && !LEDS_QPNP_FLASH
+	help
+	  This driver supports the flash V2 LED functionality of Qualcomm
+	  Technologies, Inc. QPNP PMICs.  This driver supports PMICs starting
+	  from PMI8998.  It can configure the flash LED target current for
+	  several independent channels.  It also supports various over current
+	  and over temperature mitigation features.
+
+config LEDS_QPNP_WLED
+	tristate "Support for QPNP WLED"
+	depends on LEDS_CLASS && SPMI
+	help
+	  This driver supports the WLED (White LED) functionality of Qualcomm
+	  Technologies, Inc. QPNP PMICs.  WLED is used for LCD backlight with
+	  variable brightness.  It also supports outputting the Avdd supply for
+	  AMOLED displays.
+
 config LEDS_SYSCON
 	bool "LED support for LEDs on system controllers"
 	depends on LEDS_CLASS=y
diff -ruw linux-4.4.115/drivers/leds/led-class.c linux-4.4.115-fbx/drivers/leds/led-class.c
--- linux-4.4.115/drivers/leds/led-class.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/leds/led-class.c	2019-10-29 09:26:23.841205116 +0100
@@ -53,9 +53,10 @@
 	if (ret)
 		goto unlock;
 
-	if (state == LED_OFF)
+	if (state == LED_OFF && !(led_cdev->flags & LED_KEEP_TRIGGER))
 		led_trigger_remove(led_cdev);
 	led_set_brightness(led_cdev, state);
+	led_cdev->usr_brightness_req = state;
 
 	ret = size;
 unlock:
@@ -71,7 +72,24 @@
 
 	return sprintf(buf, "%u\n", led_cdev->max_brightness);
 }
-static DEVICE_ATTR_RO(max_brightness);
+
+static ssize_t max_brightness_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	unsigned long state;
+	ssize_t ret = -EINVAL;
+
+	ret = kstrtoul(buf, 10, &state);
+	if (ret)
+		return ret;
+
+	led_cdev->max_brightness = state;
+	led_set_brightness(led_cdev, led_cdev->usr_brightness_req);
+
+	return size;
+}
+static DEVICE_ATTR_RW(max_brightness);
 
 #ifdef CONFIG_LEDS_TRIGGERS
 static DEVICE_ATTR(trigger, 0644, led_trigger_show, led_trigger_store);
diff -ruw linux-4.4.115/drivers/leds/leds.h linux-4.4.115-fbx/drivers/leds/leds.h
--- linux-4.4.115/drivers/leds/leds.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/leds/leds.h	2019-01-22 16:16:24.151252275 +0100
@@ -44,6 +44,22 @@
 	return led_cdev->brightness;
 }
 
+static inline struct led_classdev *trigger_to_lcdev(struct led_trigger *trig)
+{
+	struct led_classdev *led_cdev;
+
+	read_lock(&trig->leddev_list_lock);
+	list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) {
+		if (!strcmp(led_cdev->default_trigger, trig->name)) {
+			read_unlock(&trig->leddev_list_lock);
+			return led_cdev;
+		}
+	}
+
+	read_unlock(&trig->leddev_list_lock);
+	return NULL;
+}
+
 void led_init_core(struct led_classdev *led_cdev);
 void led_stop_software_blink(struct led_classdev *led_cdev);
 
diff -ruw linux-4.4.115/drivers/leds/Makefile linux-4.4.115-fbx/drivers/leds/Makefile
--- linux-4.4.115/drivers/leds/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/leds/Makefile	2019-10-29 09:26:23.841205116 +0100
@@ -60,6 +60,10 @@
 obj-$(CONFIG_LEDS_MAX8997)		+= leds-max8997.o
 obj-$(CONFIG_LEDS_LM355x)		+= leds-lm355x.o
 obj-$(CONFIG_LEDS_BLINKM)		+= leds-blinkm.o
+obj-$(CONFIG_LEDS_QPNP)			+= leds-qpnp.o
+obj-$(CONFIG_LEDS_QPNP_FLASH)		+= leds-qpnp-flash.o
+obj-$(CONFIG_LEDS_QPNP_FLASH_V2)        += leds-qpnp-flash-v2.o
+obj-$(CONFIG_LEDS_QPNP_WLED)		+= leds-qpnp-wled.o
 obj-$(CONFIG_LEDS_SYSCON)		+= leds-syscon.o
 obj-$(CONFIG_LEDS_VERSATILE)		+= leds-versatile.o
 obj-$(CONFIG_LEDS_MENF21BMC)		+= leds-menf21bmc.o
diff -ruw linux-4.4.115/drivers/Makefile linux-4.4.115-fbx/drivers/Makefile
--- linux-4.4.115/drivers/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/Makefile	2019-10-29 09:26:23.369200497 +0100
@@ -50,8 +50,6 @@
 obj-y				+= tty/
 obj-y				+= char/
 
-# iommu/ comes before gpu as gpu are using iommu controllers
-obj-$(CONFIG_IOMMU_SUPPORT)	+= iommu/
 
 # gpu/ comes after char for AGP vs DRM startup and after iommu
 obj-y				+= gpu/
@@ -64,12 +62,13 @@
 
 obj-$(CONFIG_PARPORT)		+= parport/
 obj-$(CONFIG_NVM)		+= lightnvm/
-obj-y				+= base/ block/ misc/ mfd/ nfc/
+obj-y				+= base/ block/ misc/ mfd/ nfc/ soc/
 obj-$(CONFIG_LIBNVDIMM)		+= nvdimm/
 obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/
 obj-$(CONFIG_NUBUS)		+= nubus/
 obj-y				+= macintosh/
 obj-$(CONFIG_IDE)		+= ide/
+obj-$(CONFIG_CRYPTO)		+= crypto/
 obj-$(CONFIG_SCSI)		+= scsi/
 obj-y				+= nvme/
 obj-$(CONFIG_ATA)		+= ata/
@@ -77,6 +76,8 @@
 obj-$(CONFIG_MTD)		+= mtd/
 obj-$(CONFIG_SPI)		+= spi/
 obj-$(CONFIG_SPMI)		+= spmi/
+obj-$(CONFIG_SLIMBUS)		+= slimbus/
+obj-$(CONFIG_SOUNDWIRE)		+= soundwire/
 obj-y				+= hsi/
 obj-y				+= net/
 obj-$(CONFIG_ATM)		+= atm/
@@ -123,10 +124,10 @@
 obj-y				+= mmc/
 obj-$(CONFIG_MEMSTICK)		+= memstick/
 obj-y				+= leds/
+obj-$(CONFIG_SWITCH)		+= switch/
 obj-$(CONFIG_INFINIBAND)	+= infiniband/
 obj-$(CONFIG_SGI_SN)		+= sn/
 obj-y				+= firmware/
-obj-$(CONFIG_CRYPTO)		+= crypto/
 obj-$(CONFIG_SUPERH)		+= sh/
 obj-$(CONFIG_ARCH_SHMOBILE)	+= sh/
 ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
@@ -145,6 +146,9 @@
 #common clk code
 obj-y				+= clk/
 
+# iommu/ comes before gpu as gpu are using iommu controllers
+obj-$(CONFIG_IOMMU_SUPPORT)	+= iommu/
+
 obj-$(CONFIG_MAILBOX)		+= mailbox/
 obj-$(CONFIG_HWSPINLOCK)	+= hwspinlock/
 obj-$(CONFIG_REMOTEPROC)	+= remoteproc/
@@ -173,3 +177,12 @@
 obj-$(CONFIG_ANDROID)		+= android/
 obj-$(CONFIG_NVMEM)		+= nvmem/
 obj-$(CONFIG_FPGA)		+= fpga/
+
+obj-$(CONFIG_BIF)		+= bif/
+
+obj-$(CONFIG_SENSORS_SSC)	+= sensors/
+obj-$(CONFIG_ESOC)              += esoc/
+obj-$(CONFIG_TEE)		+= tee/
+
+obj-$(CONFIG_FREEBOX_GPIO)	+= fbxgpio/
+obj-$(CONFIG_FREEBOX_PROCFS)	+= fbxprocfs/
diff -ruw linux-4.4.115/drivers/md/dm.c linux-4.4.115-fbx/drivers/md/dm.c
--- linux-4.4.115/drivers/md/dm.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/md/dm.c	2019-10-29 09:26:23.869205390 +0100
@@ -1146,7 +1146,7 @@
  * Must be called without clone's queue lock held,
  * see end_clone_request() for more details.
  */
-static void dm_end_request(struct request *clone, int error)
+void dm_end_request(struct request *clone, int error)
 {
 	int rw = rq_data_dir(clone);
 	struct dm_rq_target_io *tio = clone->end_io_data;
@@ -1344,7 +1344,7 @@
  * Target's rq_end_io() function isn't called.
  * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
  */
-static void dm_kill_unmapped_request(struct request *rq, int error)
+void dm_kill_unmapped_request(struct request *rq, int error)
 {
 	rq->cmd_flags |= REQ_FAILED;
 	dm_complete_request(rq, error);
@@ -1861,6 +1861,13 @@
 		dm_complete_request(rq, r);
 }
 
+void dm_dispatch_request(struct request *rq)
+{
+	struct dm_rq_target_io *tio = tio_from_request(rq);
+
+	dm_dispatch_clone_request(tio->clone, rq);
+}
+
 static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
 				 void *data)
 {
@@ -2184,8 +2191,11 @@
 		tio = tio_from_request(rq);
 		/* Establish tio->ti before queuing work (map_tio_request) */
 		tio->ti = ti;
-		queue_kthread_work(&md->kworker, &tio->work);
+		spin_unlock(q->queue_lock);
+		if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE)
+			dm_requeue_original_request(md, rq);
 		BUG_ON(!irqs_disabled());
+		spin_lock(q->queue_lock);
 	}
 
 	goto out;
@@ -2210,7 +2220,7 @@
 			 * the query about congestion status of request_queue
 			 */
 			if (dm_request_based(md))
-				r = md->queue->backing_dev_info.wb.state &
+				r = md->queue->backing_dev_info->wb.state &
 				    bdi_bits;
 			else
 				r = dm_table_any_congested(map, bdi_bits);
@@ -2292,7 +2302,7 @@
 	 * - must do so here (in alloc_dev callchain) before queue is used
 	 */
 	md->queue->queuedata = md;
-	md->queue->backing_dev_info.congested_data = md;
+	md->queue->backing_dev_info->congested_data = md;
 }
 
 static void dm_init_old_md_queue(struct mapped_device *md)
@@ -2303,7 +2313,7 @@
 	/*
 	 * Initialize aspects of queue that aren't relevant for blk-mq
 	 */
-	md->queue->backing_dev_info.congested_fn = dm_any_congested;
+	md->queue->backing_dev_info->congested_fn = dm_any_congested;
 	blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
 }
 
diff -ruw linux-4.4.115/drivers/md/dm-crypt.c linux-4.4.115-fbx/drivers/md/dm-crypt.c
--- linux-4.4.115/drivers/md/dm-crypt.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/md/dm-crypt.c	2019-01-22 16:16:24.175252492 +0100
@@ -1863,16 +1863,24 @@
 	}
 
 	ret = -ENOMEM;
-	cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1);
+	cc->io_queue = alloc_workqueue("kcryptd_io",
+				       WQ_HIGHPRI |
+				       WQ_MEM_RECLAIM,
+				       1);
 	if (!cc->io_queue) {
 		ti->error = "Couldn't create kcryptd io queue";
 		goto bad;
 	}
 
 	if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
-		cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
+		cc->crypt_queue = alloc_workqueue("kcryptd",
+						  WQ_HIGHPRI |
+						  WQ_MEM_RECLAIM, 1);
 	else
-		cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
+		cc->crypt_queue = alloc_workqueue("kcryptd",
+						  WQ_HIGHPRI |
+						  WQ_MEM_RECLAIM |
+						  WQ_UNBOUND,
 						  num_online_cpus());
 	if (!cc->crypt_queue) {
 		ti->error = "Couldn't create kcryptd queue";
diff -ruw linux-4.4.115/drivers/md/dm-ioctl.c linux-4.4.115-fbx/drivers/md/dm-ioctl.c
--- linux-4.4.115/drivers/md/dm-ioctl.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/md/dm-ioctl.c	2019-10-29 09:26:23.861205311 +0100
@@ -1923,6 +1923,45 @@
 	dm_hash_exit();
 }
 
+
+/**
+ * dm_ioctl_export - Permanently export a mapped device via the ioctl interface
+ * @md: Pointer to mapped_device
+ * @name: Buffer (size DM_NAME_LEN) for name
+ * @uuid: Buffer (size DM_UUID_LEN) for uuid or NULL if not desired
+ */
+int dm_ioctl_export(struct mapped_device *md, const char *name,
+		    const char *uuid)
+{
+	int r = 0;
+	struct hash_cell *hc;
+
+	if (!md) {
+		r = -ENXIO;
+		goto out;
+	}
+
+	/* The name and uuid can only be set once. */
+	mutex_lock(&dm_hash_cells_mutex);
+	hc = dm_get_mdptr(md);
+	mutex_unlock(&dm_hash_cells_mutex);
+	if (hc) {
+		DMERR("%s: already exported", dm_device_name(md));
+		r = -ENXIO;
+		goto out;
+	}
+
+	r = dm_hash_insert(name, uuid, md);
+	if (r) {
+		DMERR("%s: could not bind to '%s'", dm_device_name(md), name);
+		goto out;
+	}
+
+	/* Let udev know we've changed. */
+	dm_kobject_uevent(md, KOBJ_CHANGE, dm_get_event_nr(md));
+out:
+	return r;
+}
 /**
  * dm_copy_name_and_uuid - Copy mapped device name & uuid into supplied buffers
  * @md: Pointer to mapped_device
diff -ruw linux-4.4.115/drivers/md/dm-linear.c linux-4.4.115-fbx/drivers/md/dm-linear.c
--- linux-4.4.115/drivers/md/dm-linear.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/md/dm-linear.c	2019-01-22 16:16:24.179252528 +0100
@@ -25,7 +25,7 @@
 /*
  * Construct a linear mapping: <dev_path> <offset>
  */
-static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+int dm_linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 {
 	struct linear_c *lc;
 	unsigned long long tmp;
@@ -66,14 +66,16 @@
 	kfree(lc);
 	return ret;
 }
+EXPORT_SYMBOL_GPL(dm_linear_ctr);
 
-static void linear_dtr(struct dm_target *ti)
+void dm_linear_dtr(struct dm_target *ti)
 {
 	struct linear_c *lc = (struct linear_c *) ti->private;
 
 	dm_put_device(ti, lc->dev);
 	kfree(lc);
 }
+EXPORT_SYMBOL_GPL(dm_linear_dtr);
 
 static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector)
 {
@@ -92,14 +94,15 @@
 			linear_map_sector(ti, bio->bi_iter.bi_sector);
 }
 
-static int linear_map(struct dm_target *ti, struct bio *bio)
+int dm_linear_map(struct dm_target *ti, struct bio *bio)
 {
 	linear_map_bio(ti, bio);
 
 	return DM_MAPIO_REMAPPED;
 }
+EXPORT_SYMBOL_GPL(dm_linear_map);
 
-static void linear_status(struct dm_target *ti, status_type_t type,
+void dm_linear_status(struct dm_target *ti, status_type_t type,
 			  unsigned status_flags, char *result, unsigned maxlen)
 {
 	struct linear_c *lc = (struct linear_c *) ti->private;
@@ -115,8 +118,9 @@
 		break;
 	}
 }
+EXPORT_SYMBOL_GPL(dm_linear_status);
 
-static int linear_prepare_ioctl(struct dm_target *ti,
+int dm_linear_prepare_ioctl(struct dm_target *ti,
 		struct block_device **bdev, fmode_t *mode)
 {
 	struct linear_c *lc = (struct linear_c *) ti->private;
@@ -132,25 +136,27 @@
 		return 1;
 	return 0;
 }
+EXPORT_SYMBOL_GPL(dm_linear_prepare_ioctl);
 
-static int linear_iterate_devices(struct dm_target *ti,
+int dm_linear_iterate_devices(struct dm_target *ti,
 				  iterate_devices_callout_fn fn, void *data)
 {
 	struct linear_c *lc = ti->private;
 
 	return fn(ti, lc->dev, lc->start, ti->len, data);
 }
+EXPORT_SYMBOL_GPL(dm_linear_iterate_devices);
 
 static struct target_type linear_target = {
 	.name   = "linear",
 	.version = {1, 2, 1},
 	.module = THIS_MODULE,
-	.ctr    = linear_ctr,
-	.dtr    = linear_dtr,
-	.map    = linear_map,
-	.status = linear_status,
-	.prepare_ioctl = linear_prepare_ioctl,
-	.iterate_devices = linear_iterate_devices,
+	.ctr    = dm_linear_ctr,
+	.dtr    = dm_linear_dtr,
+	.map    = dm_linear_map,
+	.status = dm_linear_status,
+	.prepare_ioctl  = dm_linear_prepare_ioctl,
+	.iterate_devices = dm_linear_iterate_devices,
 };
 
 int __init dm_linear_init(void)
diff -ruw linux-4.4.115/drivers/md/dm-table.c linux-4.4.115-fbx/drivers/md/dm-table.c
--- linux-4.4.115/drivers/md/dm-table.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/md/dm-table.c	2019-10-29 09:26:23.865205351 +0100
@@ -11,6 +11,7 @@
 #include <linux/vmalloc.h>
 #include <linux/blkdev.h>
 #include <linux/namei.h>
+#include <linux/mount.h>
 #include <linux/ctype.h>
 #include <linux/string.h>
 #include <linux/slab.h>
@@ -1659,7 +1660,7 @@
 		char b[BDEVNAME_SIZE];
 
 		if (likely(q))
-			r |= bdi_congested(&q->backing_dev_info, bdi_bits);
+			r |= bdi_congested(q->backing_dev_info, bdi_bits);
 		else
 			DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
 				     dm_device_name(t->md),
diff -ruw linux-4.4.115/drivers/md/Kconfig linux-4.4.115-fbx/drivers/md/Kconfig
--- linux-4.4.115/drivers/md/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/md/Kconfig	2019-10-29 09:26:23.849205194 +0100
@@ -267,6 +267,23 @@
 
 	  If unsure, say N.
 
+config DM_REQ_CRYPT
+	tristate "Req Crypt target support"
+	depends on BLK_DEV_DM
+	select XTS
+	select CRYPTO_XTS
+	---help---
+	  This request based device-mapper target allows you to create a device that
+	  transparently encrypts the data on it. You'll need to activate
+	  the ciphers you're going to use in the cryptoapi configuration.
+	  The DM REQ CRYPT operates on requests (bigger payloads) to utilize
+	  crypto hardware better.
+
+	  To compile this code as a module, choose M here: the module will
+	  be called dm-req-crypt.
+
+	  If unsure, say N.
+
 config DM_SNAPSHOT
        tristate "Snapshot target"
        depends on BLK_DEV_DM
@@ -459,6 +476,33 @@
 
 	  If unsure, say N.
 
+config DM_VERITY_HASH_PREFETCH_MIN_SIZE_128
+	bool "Prefetch size 128"
+
+config DM_VERITY_HASH_PREFETCH_MIN_SIZE
+	int "Verity hash prefetch minimum size"
+	depends on DM_VERITY
+	range 1 4096
+	default 128 if DM_VERITY_HASH_PREFETCH_MIN_SIZE_128
+	default 1
+	---help---
+	  This sets minimum number of hash blocks to prefetch for dm-verity.
+	  For devices like eMMC, having larger prefetch size like 128 can improve
+	  performance with increased memory consumption for keeping more hashes
+	  in RAM.
+
+config DM_VERITY_FEC
+	bool "Verity forward error correction support"
+	depends on DM_VERITY
+	select REED_SOLOMON
+	select REED_SOLOMON_DEC8
+	---help---
+	  Add forward error correction support to dm-verity. This option
+	  makes it possible to use pre-generated error correction data to
+	  recover from corrupted blocks.
+
+	  If unsure, say N.
+
 config DM_SWITCH
 	tristate "Switch target support (EXPERIMENTAL)"
 	depends on BLK_DEV_DM
@@ -489,4 +533,33 @@
 
 	  If unsure, say N.
 
+config DM_VERITY_AVB
+	tristate "Support AVB specific verity error behavior"
+	depends on DM_VERITY
+	---help---
+	  Enables Android Verified Boot platform-specific error
+	  behavior. In particular, it will modify the vbmeta partition
+	  specified on the kernel command-line when non-transient error
+	  occurs (followed by a panic).
+
+	  If unsure, say N.
+
+config DM_ANDROID_VERITY
+	bool "Android verity target support"
+	depends on DM_VERITY=y
+	depends on X509_CERTIFICATE_PARSER
+	depends on SYSTEM_TRUSTED_KEYRING
+	depends on PUBLIC_KEY_ALGO_RSA
+	depends on KEYS
+	depends on ASYMMETRIC_KEY_TYPE
+	depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE
+	depends on MD_LINEAR=y
+	select DM_VERITY_HASH_PREFETCH_MIN_SIZE_128
+	---help---
+	  This device-mapper target is virtually a VERITY target. This
+	  target is setup by reading the metadata contents piggybacked
+	  to the actual data blocks in the block device. The signature
+	  of the metadata contents are verified against the key included
+	  in the system keyring. Upon success, the underlying verity
+	  target is setup.
 endif # MD
diff -ruw linux-4.4.115/drivers/md/Makefile linux-4.4.115-fbx/drivers/md/Makefile
--- linux-4.4.115/drivers/md/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/md/Makefile	2019-01-22 16:16:24.167252419 +0100
@@ -16,6 +16,7 @@
 dm-cache-smq-y   += dm-cache-policy-smq.o
 dm-cache-cleaner-y += dm-cache-policy-cleaner.o
 dm-era-y	+= dm-era-target.o
+dm-verity-y	+= dm-verity-target.o
 md-mod-y	+= md.o bitmap.o
 raid456-y	+= raid5.o raid5-cache.o
 
@@ -59,7 +60,17 @@
 obj-$(CONFIG_DM_CACHE_CLEANER)	+= dm-cache-cleaner.o
 obj-$(CONFIG_DM_ERA)		+= dm-era.o
 obj-$(CONFIG_DM_LOG_WRITES)	+= dm-log-writes.o
+obj-$(CONFIG_DM_REQ_CRYPT)	+= dm-req-crypt.o
+obj-$(CONFIG_DM_ANDROID_VERITY) += dm-android-verity.o
 
 ifeq ($(CONFIG_DM_UEVENT),y)
 dm-mod-objs			+= dm-uevent.o
 endif
+
+ifeq ($(CONFIG_DM_VERITY_FEC),y)
+dm-verity-objs			+= dm-verity-fec.o
+endif
+
+ifeq ($(CONFIG_DM_VERITY_AVB),y)
+dm-verity-objs			+= dm-verity-avb.o
+endif
diff -ruw linux-4.4.115/drivers/media/dvb-frontends/si2168.c linux-4.4.115-fbx/drivers/media/dvb-frontends/si2168.c
--- linux-4.4.115/drivers/media/dvb-frontends/si2168.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/dvb-frontends/si2168.c	2019-01-22 16:16:24.267253325 +0100
@@ -14,6 +14,8 @@
  *    GNU General Public License for more details.
  */
 
+#include <linux/delay.h>
+
 #include "si2168_priv.h"
 
 static const struct dvb_frontend_ops si2168_ops;
@@ -120,12 +122,37 @@
 	return ret;
 }
 
+static int si2168_ts_bus_ctrl(struct dvb_frontend *fe, int acquire)
+{
+	struct i2c_client *client = fe->demodulator_priv;
+	struct si2168_dev *dev = i2c_get_clientdata(client);
+	struct si2168_cmd cmd;
+	int ret = 0;
+
+	dev_dbg(&client->dev, "%s acquire: %d\n", __func__, acquire);
+
+	/* set TS_MODE property */
+	memcpy(cmd.args, "\x14\x00\x01\x10\x10\x00", 6);
+	if (acquire)
+		cmd.args[4] |= dev->ts_mode;
+	else
+		cmd.args[4] |= SI2168_TS_TRISTATE;
+	if (dev->ts_clock_gapped)
+		cmd.args[4] |= 0x40;
+	cmd.wlen = 6;
+	cmd.rlen = 4;
+	ret = si2168_cmd_execute(client, &cmd);
+
+	return ret;
+}
+
 static int si2168_read_status(struct dvb_frontend *fe, enum fe_status *status)
 {
 	struct i2c_client *client = fe->demodulator_priv;
 	struct si2168_dev *dev = i2c_get_clientdata(client);
 	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
-	int ret;
+	int ret, i;
+	unsigned int utmp, utmp1, utmp2;
 	struct si2168_cmd cmd;
 
 	*status = 0;
@@ -184,6 +211,61 @@
 	dev_dbg(&client->dev, "status=%02x args=%*ph\n",
 			*status, cmd.rlen, cmd.args);
 
+	/* BER */
+	if (*status & FE_HAS_VITERBI) {
+		memcpy(cmd.args, "\x82\x00", 2);
+		cmd.wlen = 2;
+		cmd.rlen = 3;
+		ret = si2168_cmd_execute(client, &cmd);
+		if (ret)
+			goto err;
+
+		/*
+		 * Firmware returns [0, 255] mantissa and [0, 8] exponent.
+		 * Convert to DVB API: mantissa * 10^(8 - exponent) / 10^8
+		 */
+		utmp = clamp(8 - cmd.args[1], 0, 8);
+		for (i = 0, utmp1 = 1; i < utmp; i++)
+			utmp1 = utmp1 * 10;
+
+		utmp1 = cmd.args[2] * utmp1;
+		utmp2 = 100000000; /* 10^8 */
+
+		dev_dbg(&client->dev,
+			"post_bit_error=%u post_bit_count=%u ber=%u*10^-%u\n",
+			utmp1, utmp2, cmd.args[2], cmd.args[1]);
+
+		c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+		c->post_bit_error.stat[0].uvalue += utmp1;
+		c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+		c->post_bit_count.stat[0].uvalue += utmp2;
+	} else {
+		c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+		c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	}
+
+	/* UCB */
+	if (*status & FE_HAS_SYNC) {
+		memcpy(cmd.args, "\x84\x01", 2);
+		cmd.wlen = 2;
+		cmd.rlen = 3;
+		ret = si2168_cmd_execute(client, &cmd);
+		if (ret)
+			goto err;
+
+		utmp1 = cmd.args[2] << 8 | cmd.args[1] << 0;
+		dev_dbg(&client->dev, "block_error=%u\n", utmp1);
+
+		/* Sometimes firmware returns bogus value */
+		if (utmp1 == 0xffff)
+			utmp1 = 0;
+
+		c->block_error.stat[0].scale = FE_SCALE_COUNTER;
+		c->block_error.stat[0].uvalue += utmp1;
+	} else {
+		c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	}
+
 	return 0;
 err:
 	dev_dbg(&client->dev, "failed=%d\n", ret);
@@ -321,6 +403,8 @@
 
 	memcpy(cmd.args, "\x14\x00\x0a\x10\x00\x00", 6);
 	cmd.args[4] = delivery_system | bandwidth;
+	if (dev->spectral_inversion)
+		cmd.args[5] |= 1;
 	cmd.wlen = 6;
 	cmd.rlen = 4;
 	ret = si2168_cmd_execute(client, &cmd);
@@ -385,6 +469,11 @@
 
 	dev->delivery_system = c->delivery_system;
 
+	/* enable ts bus */
+	ret = si2168_ts_bus_ctrl(fe, 1);
+	if (ret)
+		goto err;
+
 	return 0;
 err:
 	dev_dbg(&client->dev, "failed=%d\n", ret);
@@ -395,11 +484,10 @@
 {
 	struct i2c_client *client = fe->demodulator_priv;
 	struct si2168_dev *dev = i2c_get_clientdata(client);
+	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 	int ret, len, remaining;
 	const struct firmware *fw;
-	const char *fw_name;
 	struct si2168_cmd cmd;
-	unsigned int chip_id;
 
 	dev_dbg(&client->dev, "\n");
 
@@ -411,7 +499,7 @@
 	if (ret)
 		goto err;
 
-	if (dev->fw_loaded) {
+	if (dev->warm) {
 		/* resume */
 		memcpy(cmd.args, "\xc0\x06\x08\x0f\x00\x20\x21\x01", 8);
 		cmd.wlen = 8;
@@ -420,6 +508,7 @@
 		if (ret)
 			goto err;
 
+		udelay(100);
 		memcpy(cmd.args, "\x85", 1);
 		cmd.wlen = 1;
 		cmd.rlen = 1;
@@ -438,49 +527,14 @@
 	if (ret)
 		goto err;
 
-	/* query chip revision */
-	memcpy(cmd.args, "\x02", 1);
-	cmd.wlen = 1;
-	cmd.rlen = 13;
-	ret = si2168_cmd_execute(client, &cmd);
-	if (ret)
-		goto err;
-
-	chip_id = cmd.args[1] << 24 | cmd.args[2] << 16 | cmd.args[3] << 8 |
-			cmd.args[4] << 0;
-
-	#define SI2168_A20 ('A' << 24 | 68 << 16 | '2' << 8 | '0' << 0)
-	#define SI2168_A30 ('A' << 24 | 68 << 16 | '3' << 8 | '0' << 0)
-	#define SI2168_B40 ('B' << 24 | 68 << 16 | '4' << 8 | '0' << 0)
-
-	switch (chip_id) {
-	case SI2168_A20:
-		fw_name = SI2168_A20_FIRMWARE;
-		break;
-	case SI2168_A30:
-		fw_name = SI2168_A30_FIRMWARE;
-		break;
-	case SI2168_B40:
-		fw_name = SI2168_B40_FIRMWARE;
-		break;
-	default:
-		dev_err(&client->dev, "unknown chip version Si21%d-%c%c%c\n",
-				cmd.args[2], cmd.args[1],
-				cmd.args[3], cmd.args[4]);
-		ret = -EINVAL;
-		goto err;
-	}
-
-	dev_info(&client->dev, "found a 'Silicon Labs Si21%d-%c%c%c'\n",
-			cmd.args[2], cmd.args[1], cmd.args[3], cmd.args[4]);
-
 	/* request the firmware, this will block and timeout */
-	ret = request_firmware(&fw, fw_name, &client->dev);
+	ret = request_firmware(&fw, dev->firmware_name, &client->dev);
 	if (ret) {
 		/* fallback mechanism to handle old name for Si2168 B40 fw */
-		if (chip_id == SI2168_B40) {
-			fw_name = SI2168_B40_FIRMWARE_FALLBACK;
-			ret = request_firmware(&fw, fw_name, &client->dev);
+		if (dev->chip_id == SI2168_CHIP_ID_B40) {
+			dev->firmware_name = SI2168_B40_FIRMWARE_FALLBACK;
+			ret = request_firmware(&fw, dev->firmware_name,
+					       &client->dev);
 		}
 
 		if (ret == 0) {
@@ -490,13 +544,13 @@
 		} else {
 			dev_err(&client->dev,
 					"firmware file '%s' not found\n",
-					fw_name);
+					dev->firmware_name);
 			goto err_release_firmware;
 		}
 	}
 
 	dev_info(&client->dev, "downloading firmware from file '%s'\n",
-			fw_name);
+			dev->firmware_name);
 
 	if ((fw->size % 17 == 0) && (fw->data[0] > 5)) {
 		/* firmware is in the new format */
@@ -551,26 +605,32 @@
 	if (ret)
 		goto err;
 
-	dev_info(&client->dev, "firmware version: %c.%c.%d\n",
-			cmd.args[6], cmd.args[7], cmd.args[8]);
+	dev->version = (cmd.args[9] + '@') << 24 | (cmd.args[6] - '0') << 16 |
+		       (cmd.args[7] - '0') << 8 | (cmd.args[8]) << 0;
+	dev_info(&client->dev, "firmware version: %c %d.%d.%d\n",
+		 dev->version >> 24 & 0xff, dev->version >> 16 & 0xff,
+		 dev->version >> 8 & 0xff, dev->version >> 0 & 0xff);
 
 	/* set ts mode */
-	memcpy(cmd.args, "\x14\x00\x01\x10\x10\x00", 6);
-	cmd.args[4] |= dev->ts_mode;
-	if (dev->ts_clock_gapped)
-		cmd.args[4] |= 0x40;
-	cmd.wlen = 6;
-	cmd.rlen = 4;
-	ret = si2168_cmd_execute(client, &cmd);
+	ret = si2168_ts_bus_ctrl(fe, 1);
 	if (ret)
 		goto err;
 
-	dev->fw_loaded = true;
+	dev->warm = true;
 warm:
+	/* Init stats here to indicate which stats are supported */
+	c->cnr.len = 1;
+	c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	c->post_bit_error.len = 1;
+	c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	c->post_bit_count.len = 1;
+	c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	c->block_error.len = 1;
+	c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
 	dev->active = true;
 
 	return 0;
-
 err_release_firmware:
 	release_firmware(fw);
 err:
@@ -589,6 +649,15 @@
 
 	dev->active = false;
 
+	/* tri-state data bus */
+	ret = si2168_ts_bus_ctrl(fe, 0);
+	if (ret)
+		goto err;
+
+	/* Firmware B 4.0-11 or later loses warm state during sleep */
+	if (dev->version > ('B' << 24 | 4 << 16 | 0 << 8 | 11 << 0))
+		dev->warm = false;
+
 	memcpy(cmd.args, "\x13", 1);
 	cmd.wlen = 1;
 	cmd.rlen = 0;
@@ -698,6 +767,7 @@
 	struct si2168_config *config = client->dev.platform_data;
 	struct si2168_dev *dev;
 	int ret;
+	struct si2168_cmd cmd;
 
 	dev_dbg(&client->dev, "\n");
 
@@ -708,6 +778,58 @@
 		goto err;
 	}
 
+	i2c_set_clientdata(client, dev);
+
+	/* Initialize */
+	memcpy(cmd.args, "\xc0\x12\x00\x0c\x00\x0d\x16\x00\x00\x00\x00\x00\x00", 13);
+	cmd.wlen = 13;
+	cmd.rlen = 0;
+	ret = si2168_cmd_execute(client, &cmd);
+	if (ret)
+		goto err_kfree;
+
+	/* Power up */
+	memcpy(cmd.args, "\xc0\x06\x01\x0f\x00\x20\x20\x01", 8);
+	cmd.wlen = 8;
+	cmd.rlen = 1;
+	ret = si2168_cmd_execute(client, &cmd);
+	if (ret)
+		goto err_kfree;
+
+	/* Query chip revision */
+	memcpy(cmd.args, "\x02", 1);
+	cmd.wlen = 1;
+	cmd.rlen = 13;
+	ret = si2168_cmd_execute(client, &cmd);
+	if (ret)
+		goto err_kfree;
+
+	dev->chip_id = cmd.args[1] << 24 | cmd.args[2] << 16 |
+		       cmd.args[3] << 8 | cmd.args[4] << 0;
+
+	switch (dev->chip_id) {
+	case SI2168_CHIP_ID_A20:
+		dev->firmware_name = SI2168_A20_FIRMWARE;
+		break;
+	case SI2168_CHIP_ID_A30:
+		dev->firmware_name = SI2168_A30_FIRMWARE;
+		break;
+	case SI2168_CHIP_ID_B40:
+		dev->firmware_name = SI2168_B40_FIRMWARE;
+		break;
+	case SI2168_CHIP_ID_D60:
+		dev->firmware_name = SI2168_D60_FIRMWARE;
+		break;
+	default:
+		dev_dbg(&client->dev, "unknown chip version Si21%d-%c%c%c\n",
+			cmd.args[2], cmd.args[1], cmd.args[3], cmd.args[4]);
+		ret = -ENODEV;
+		goto err_kfree;
+	}
+
+	dev->version = (cmd.args[1]) << 24 | (cmd.args[3] - '0') << 16 |
+		       (cmd.args[4] - '0') << 8 | (cmd.args[5]) << 0;
+
 	/* create mux i2c adapter for tuner */
 	dev->adapter = i2c_add_mux_adapter(client->adapter, &client->dev,
 			client, 0, 0, 0, si2168_select, si2168_deselect);
@@ -724,16 +846,20 @@
 	dev->ts_mode = config->ts_mode;
 	dev->ts_clock_inv = config->ts_clock_inv;
 	dev->ts_clock_gapped = config->ts_clock_gapped;
-	dev->fw_loaded = false;
+	dev->spectral_inversion = config->spectral_inversion;
 
-	i2c_set_clientdata(client, dev);
+	dev_info(&client->dev, "Silicon Labs Si2168-%c%d%d successfully identified\n",
+		 dev->version >> 24 & 0xff, dev->version >> 16 & 0xff,
+		 dev->version >> 8 & 0xff);
+	dev_info(&client->dev, "firmware version: %c %d.%d.%d\n",
+		 dev->version >> 24 & 0xff, dev->version >> 16 & 0xff,
+		 dev->version >> 8 & 0xff, dev->version >> 0 & 0xff);
 
-	dev_info(&client->dev, "Silicon Labs Si2168 successfully attached\n");
 	return 0;
 err_kfree:
 	kfree(dev);
 err:
-	dev_dbg(&client->dev, "failed=%d\n", ret);
+	dev_warn(&client->dev, "probe failed = %d\n", ret);
 	return ret;
 }
 
@@ -762,6 +888,7 @@
 static struct i2c_driver si2168_driver = {
 	.driver = {
 		.name	= "si2168",
+		.suppress_bind_attrs = true,
 	},
 	.probe		= si2168_probe,
 	.remove		= si2168_remove,
@@ -776,3 +903,4 @@
 MODULE_FIRMWARE(SI2168_A20_FIRMWARE);
 MODULE_FIRMWARE(SI2168_A30_FIRMWARE);
 MODULE_FIRMWARE(SI2168_B40_FIRMWARE);
+MODULE_FIRMWARE(SI2168_D60_FIRMWARE);
diff -ruw linux-4.4.115/drivers/media/dvb-frontends/si2168.h linux-4.4.115-fbx/drivers/media/dvb-frontends/si2168.h
--- linux-4.4.115/drivers/media/dvb-frontends/si2168.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/dvb-frontends/si2168.h	2019-01-22 16:16:24.267253325 +0100
@@ -38,6 +38,7 @@
 	/* TS mode */
 #define SI2168_TS_PARALLEL	0x06
 #define SI2168_TS_SERIAL	0x03
+#define SI2168_TS_TRISTATE	0x00
 	u8 ts_mode;
 
 	/* TS clock inverted */
@@ -45,6 +46,9 @@
 
 	/* TS clock gapped */
 	bool ts_clock_gapped;
+
+	/* Inverted spectrum */
+	bool spectral_inversion;
 };
 
 #endif
diff -ruw linux-4.4.115/drivers/media/dvb-frontends/si2168_priv.h linux-4.4.115-fbx/drivers/media/dvb-frontends/si2168_priv.h
--- linux-4.4.115/drivers/media/dvb-frontends/si2168_priv.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/dvb-frontends/si2168_priv.h	2019-01-22 16:16:24.267253325 +0100
@@ -21,10 +21,12 @@
 #include "dvb_frontend.h"
 #include <linux/firmware.h>
 #include <linux/i2c-mux.h>
+#include <linux/kernel.h>
 
 #define SI2168_A20_FIRMWARE "dvb-demod-si2168-a20-01.fw"
 #define SI2168_A30_FIRMWARE "dvb-demod-si2168-a30-01.fw"
 #define SI2168_B40_FIRMWARE "dvb-demod-si2168-b40-01.fw"
+#define SI2168_D60_FIRMWARE "dvb-demod-si2168-d60-01.fw"
 #define SI2168_B40_FIRMWARE_FALLBACK "dvb-demod-si2168-02.fw"
 
 /* state struct */
@@ -33,11 +35,19 @@
 	struct dvb_frontend fe;
 	enum fe_delivery_system delivery_system;
 	enum fe_status fe_status;
+	#define SI2168_CHIP_ID_A20 ('A' << 24 | 68 << 16 | '2' << 8 | '0' << 0)
+	#define SI2168_CHIP_ID_A30 ('A' << 24 | 68 << 16 | '3' << 8 | '0' << 0)
+	#define SI2168_CHIP_ID_B40 ('B' << 24 | 68 << 16 | '4' << 8 | '0' << 0)
+	#define SI2168_CHIP_ID_D60 ('D' << 24 | 68 << 16 | '6' << 8 | '0' << 0)
+	unsigned int chip_id;
+	unsigned int version;
+	const char *firmware_name;
 	bool active;
-	bool fw_loaded;
+	bool warm;
 	u8 ts_mode;
 	bool ts_clock_inv;
 	bool ts_clock_gapped;
+	bool spectral_inversion;
 };
 
 /* firmware command struct */
diff -ruw linux-4.4.115/drivers/media/i2c/Kconfig linux-4.4.115-fbx/drivers/media/i2c/Kconfig
--- linux-4.4.115/drivers/media/i2c/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/i2c/Kconfig	2019-01-22 16:16:24.283253470 +0100
@@ -221,6 +221,30 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called adv7842.
 
+config VIDEO_ADV7481
+	tristate "Analog Devices ADV7481 decoder"
+	depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && MEDIA_CONTROLLER
+	---help---
+	  Support for the Analog Devices ADV7481 video decoder.
+
+	  This is a Analog Devices Component/Graphics/SD Digitizer
+	  with HDMI Receiver.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called adv7481.
+
+config VIDEO_TVTUNER
+	tristate "Analog Tv Tuner driver"
+	depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+	---help---
+	  Support for the Dummy TV Tuner.
+
+	  This is a Dummy TV Tuner Driver to Validate call flow
+	  from tv_input_test unit-test app.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called tv-tuner.
+
 config VIDEO_BT819
 	tristate "BT819A VideoStream decoder"
 	depends on VIDEO_V4L2 && I2C
@@ -726,6 +750,7 @@
 menu "Sensors used on soc_camera driver"
 
 if SOC_CAMERA
+
 	source "drivers/media/i2c/soc_camera/Kconfig"
 endif
 
diff -ruw linux-4.4.115/drivers/media/i2c/Makefile linux-4.4.115-fbx/drivers/media/i2c/Makefile
--- linux-4.4.115/drivers/media/i2c/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/i2c/Makefile	2019-10-29 09:26:23.901205703 +0100
@@ -27,6 +27,19 @@
 obj-$(CONFIG_VIDEO_ADV7393) += adv7393.o
 obj-$(CONFIG_VIDEO_ADV7604) += adv7604.o
 obj-$(CONFIG_VIDEO_ADV7842) += adv7842.o
+ifeq ($(CONFIG_MSM_AIS),y)
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/cci
+else
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/cci
+endif
+obj-$(CONFIG_VIDEO_ADV7481) += adv7481.o
+obj-$(CONFIG_VIDEO_TVTUNER) += tvtuner.o
 obj-$(CONFIG_VIDEO_AD9389B) += ad9389b.o
 obj-$(CONFIG_VIDEO_ADV7511) += adv7511.o
 obj-$(CONFIG_VIDEO_VPX3220) += vpx3220.o
diff -ruw linux-4.4.115/drivers/media/Kconfig linux-4.4.115-fbx/drivers/media/Kconfig
--- linux-4.4.115/drivers/media/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/Kconfig	2019-10-29 09:26:23.885205546 +0100
@@ -2,6 +2,12 @@
 # Multimedia device configuration
 #
 
+config CEC_CORE
+	tristate
+
+config CEC_NOTIFIER
+	bool
+
 menuconfig MEDIA_SUPPORT
 	tristate "Multimedia support"
 	depends on HAS_IOMEM
@@ -80,6 +86,17 @@
 
 	  Say Y when you have a TV or an IR device.
 
+config MEDIA_CEC_SUPPORT
+       bool "HDMI CEC support"
+       ---help---
+         Enable support for HDMI CEC (Consumer Electronics Control),
+         which is an optional HDMI feature.
+
+         Say Y when you have an HDMI receiver, transmitter or a USB CEC
+         adapter that supports HDMI CEC.
+
+source "drivers/media/cec/Kconfig"
+
 #
 # Media controller
 #	Selectable only for webcam/grabbers, as other drivers don't use it
diff -ruw linux-4.4.115/drivers/media/Makefile linux-4.4.115-fbx/drivers/media/Makefile
--- linux-4.4.115/drivers/media/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/Makefile	2019-10-29 09:26:23.885205546 +0100
@@ -24,6 +24,8 @@
 # There are both core and drivers at RC subtree - merge before drivers
 obj-y += rc/
 
+obj-$(CONFIG_CEC_CORE) += cec/
+
 #
 # Finally, merge the drivers that require the core
 #
diff -ruw linux-4.4.115/drivers/media/platform/Kconfig linux-4.4.115-fbx/drivers/media/platform/Kconfig
--- linux-4.4.115/drivers/media/platform/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/Kconfig	2019-01-22 16:16:24.387254412 +0100
@@ -303,3 +303,5 @@
 if DVB_PLATFORM_DRIVERS
 source "drivers/media/platform/sti/c8sectpfe/Kconfig"
 endif #DVB_PLATFORM_DRIVERS
+
+source "drivers/media/platform/msm/Kconfig"
diff -ruw linux-4.4.115/drivers/media/platform/Makefile linux-4.4.115-fbx/drivers/media/platform/Makefile
--- linux-4.4.115/drivers/media/platform/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/Makefile	2019-01-22 16:16:24.387254412 +0100
@@ -55,3 +55,4 @@
 obj-$(CONFIG_VIDEO_XILINX)		+= xilinx/
 
 ccflags-y += -I$(srctree)/drivers/media/i2c
+obj-y	+= msm/
diff -ruw linux-4.4.115/drivers/media/rc/ir-rc6-decoder.c linux-4.4.115-fbx/drivers/media/rc/ir-rc6-decoder.c
--- linux-4.4.115/drivers/media/rc/ir-rc6-decoder.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/rc/ir-rc6-decoder.c	2019-01-22 16:16:24.523255643 +0100
@@ -260,7 +260,6 @@
 				if ((scancode & RC6_6A_LCC_MASK) == RC6_6A_MCE_CC) {
 					protocol = RC_TYPE_RC6_MCE;
 					toggle = !!(scancode & RC6_6A_MCE_TOGGLE_MASK);
-					scancode &= ~RC6_6A_MCE_TOGGLE_MASK;
 				} else {
 					protocol = RC_BIT_RC6_6A_32;
 					toggle = 0;
@@ -271,6 +270,7 @@
 				goto out;
 			}
 
+			scancode &= ~RC6_6A_MCE_TOGGLE_MASK;
 			IR_dprintk(1, "RC6(6A) proto 0x%04x, scancode 0x%08x (toggle: %u)\n",
 				   protocol, scancode, toggle);
 			break;
diff -ruw linux-4.4.115/drivers/media/rc/keymaps/Makefile linux-4.4.115-fbx/drivers/media/rc/keymaps/Makefile
--- linux-4.4.115/drivers/media/rc/keymaps/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/rc/keymaps/Makefile	2019-10-29 09:26:23.977206447 +0100
@@ -18,6 +18,7 @@
 			rc-behold.o \
 			rc-behold-columbus.o \
 			rc-budget-ci-old.o \
+			rc-cec.o \
 			rc-cinergy-1400.o \
 			rc-cinergy.o \
 			rc-delock-61959.o \
@@ -79,6 +80,7 @@
 			rc-pv951.o \
 			rc-hauppauge.o \
 			rc-rc6-mce.o \
+			rc-rc6-freebox.o \
 			rc-real-audio-220-32-keys.o \
 			rc-reddo.o \
 			rc-snapstream-firefly.o \
diff -ruw linux-4.4.115/drivers/media/rc/mceusb.c linux-4.4.115-fbx/drivers/media/rc/mceusb.c
--- linux-4.4.115/drivers/media/rc/mceusb.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/rc/mceusb.c	2019-10-29 09:26:23.981206486 +0100
@@ -200,6 +200,7 @@
 	TIVO_KIT,
 	MCE_GEN2_NO_TX,
 	HAUPPAUGE_CX_HYBRID_TV,
+	MCE_FREEBOX,
 };
 
 struct mceusb_model {
@@ -259,6 +260,11 @@
 		.mce_gen2 = 1,
 		.rc_map = RC_MAP_TIVO,
 	},
+	[MCE_FREEBOX] = {
+		.mce_gen2 = 1,
+		.no_tx = 1,
+		.rc_map = "rc-rc6-freebox",
+	}
 };
 
 static struct usb_device_id mceusb_dev_table[] = {
@@ -354,7 +360,8 @@
 	/* Formosa Industrial Computing AIM IR605/A */
 	{ USB_DEVICE(VENDOR_FORMOSA, 0xe03c) },
 	/* Formosa Industrial Computing */
-	{ USB_DEVICE(VENDOR_FORMOSA, 0xe03e) },
+	{ USB_DEVICE(VENDOR_FORMOSA, 0xe03e),
+	  .driver_info = MCE_FREEBOX },
 	/* Formosa Industrial Computing */
 	{ USB_DEVICE(VENDOR_FORMOSA, 0xe042) },
 	/* Fintek eHome Infrared Transceiver (HP branded) */
@@ -953,7 +960,7 @@
 
 	/* 2-byte return value commands */
 	case MCE_RSP_EQIRTIMEOUT:
-		ir->rc->timeout = US_TO_NS((hi << 8 | lo) * MCE_TIME_UNIT);
+		ir->rc->timeout = US_TO_NS((hi << 8 | lo) * MCE_TIME_UNIT / 1000);
 		break;
 	case MCE_RSP_EQIRNUMPORTS:
 		ir->num_txports = hi;
@@ -1231,7 +1238,7 @@
 	rc->priv = ir;
 	rc->driver_type = RC_DRIVER_IR_RAW;
 	rc->allowed_protocols = RC_BIT_ALL;
-	rc->timeout = MS_TO_NS(100);
+	rc->timeout = MS_TO_NS(50);
 	if (!ir->flags.no_tx) {
 		rc->s_tx_mask = mceusb_set_tx_mask;
 		rc->s_tx_carrier = mceusb_set_tx_carrier;
diff -ruw linux-4.4.115/drivers/media/rc/rc-main.c linux-4.4.115-fbx/drivers/media/rc/rc-main.c
--- linux-4.4.115/drivers/media/rc/rc-main.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/rc/rc-main.c	2019-10-29 09:26:23.981206486 +0100
@@ -801,6 +801,7 @@
 	{ RC_BIT_SHARP,		"sharp"		},
 	{ RC_BIT_MCE_KBD,	"mce_kbd"	},
 	{ RC_BIT_XMP,		"xmp"		},
+	{ RC_BIT_CEC,		"cec",		},
 };
 
 /**
diff -ruw linux-4.4.115/drivers/media/tuners/si2157.c linux-4.4.115-fbx/drivers/media/tuners/si2157.c
--- linux-4.4.115/drivers/media/tuners/si2157.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/tuners/si2157.c	2019-01-22 16:16:24.543255824 +0100
@@ -84,17 +84,31 @@
 	struct si2157_cmd cmd;
 	const struct firmware *fw;
 	const char *fw_name;
-	unsigned int chip_id;
+	unsigned int uitmp, chip_id;
 
 	dev_dbg(&client->dev, "\n");
 
-	if (dev->fw_loaded)
+	/* Returned IF frequency is garbage when firmware is not running */
+	memcpy(cmd.args, "\x15\x00\x06\x07", 4);
+	cmd.wlen = 4;
+	cmd.rlen = 4;
+	ret = si2157_cmd_execute(client, &cmd);
+	if (ret)
+		goto err;
+
+	uitmp = cmd.args[2] << 0 | cmd.args[3] << 8;
+	dev_dbg(&client->dev, "if_frequency kHz=%u\n", uitmp);
+
+	if (uitmp == dev->if_frequency / 1000)
 		goto warm;
 
 	/* power up */
 	if (dev->chiptype == SI2157_CHIPTYPE_SI2146) {
 		memcpy(cmd.args, "\xc0\x05\x01\x00\x00\x0b\x00\x00\x01", 9);
 		cmd.wlen = 9;
+	} else if (dev->chiptype == SI2157_CHIPTYPE_SI2141) {
+		memcpy(cmd.args, "\xc0\x00\x0d\x0e\x00\x01\x01\x01\x01\x03", 10);
+		cmd.wlen = 10;
 	} else {
 		memcpy(cmd.args, "\xc0\x00\x0c\x00\x00\x01\x01\x01\x01\x01\x01\x02\x00\x00\x01", 15);
 		cmd.wlen = 15;
@@ -104,6 +118,15 @@
 	if (ret)
 		goto err;
 
+	/* Si2141 needs a second command before it answers the revision query */
+	if (dev->chiptype == SI2157_CHIPTYPE_SI2141) {
+		memcpy(cmd.args, "\xc0\x08\x01\x02\x00\x00\x01", 7);
+		cmd.wlen = 7;
+		ret = si2157_cmd_execute(client, &cmd);
+		if (ret)
+			goto err;
+	}
+
 	/* query chip revision */
 	memcpy(cmd.args, "\x02", 1);
 	cmd.wlen = 1;
@@ -120,12 +143,16 @@
 	#define SI2157_A30 ('A' << 24 | 57 << 16 | '3' << 8 | '0' << 0)
 	#define SI2147_A30 ('A' << 24 | 47 << 16 | '3' << 8 | '0' << 0)
 	#define SI2146_A10 ('A' << 24 | 46 << 16 | '1' << 8 | '0' << 0)
+	#define SI2141_A10 ('A' << 24 | 41 << 16 | '1' << 8 | '0' << 0)
 
 	switch (chip_id) {
 	case SI2158_A20:
 	case SI2148_A20:
 		fw_name = SI2158_A20_FIRMWARE;
 		break;
+	case SI2141_A10:
+		fw_name = SI2141_A10_FIRMWARE;
+		break;
 	case SI2157_A30:
 	case SI2147_A30:
 	case SI2146_A10:
@@ -203,9 +230,6 @@
 
 	dev_info(&client->dev, "firmware version: %c.%c.%d\n",
 			cmd.args[6], cmd.args[7], cmd.args[8]);
-
-	dev->fw_loaded = true;
-
 warm:
 	/* init statistics in order signal app which are supported */
 	c->strength.len = 1;
@@ -363,9 +387,9 @@
 
 static const struct dvb_tuner_ops si2157_ops = {
 	.info = {
-		.name           = "Silicon Labs Si2146/2147/2148/2157/2158",
-		.frequency_min  = 55000000,
-		.frequency_max  = 862000000,
+		.name           = "Silicon Labs Si2141/Si2146/2147/2148/2157/2158",
+		.frequency_min  = 42000000,
+		.frequency_max  = 870000000,
 	},
 
 	.init = si2157_init,
@@ -422,7 +446,6 @@
 	dev->fe = cfg->fe;
 	dev->inversion = cfg->inversion;
 	dev->if_port = cfg->if_port;
-	dev->fw_loaded = false;
 	dev->chiptype = (u8)id->driver_data;
 	dev->if_frequency = 5000000; /* default value of property 0x0706 */
 	mutex_init(&dev->i2c_mutex);
@@ -439,6 +462,7 @@
 	fe->tuner_priv = client;
 
 	dev_info(&client->dev, "Silicon Labs %s successfully attached\n",
+			dev->chiptype == SI2157_CHIPTYPE_SI2141 ?  "Si2141" :
 			dev->chiptype == SI2157_CHIPTYPE_SI2146 ?
 			"Si2146" : "Si2147/2148/2157/2158");
 
@@ -458,6 +482,9 @@
 
 	dev_dbg(&client->dev, "\n");
 
+	/* stop statistics polling */
+	cancel_delayed_work_sync(&dev->stat_work);
+
 	memset(&fe->ops.tuner_ops, 0, sizeof(struct dvb_tuner_ops));
 	fe->tuner_priv = NULL;
 	kfree(dev);
@@ -468,6 +495,7 @@
 static const struct i2c_device_id si2157_id_table[] = {
 	{"si2157", SI2157_CHIPTYPE_SI2157},
 	{"si2146", SI2157_CHIPTYPE_SI2146},
+	{"si2141", SI2157_CHIPTYPE_SI2141},
 	{}
 };
 MODULE_DEVICE_TABLE(i2c, si2157_id_table);
@@ -475,6 +503,7 @@
 static struct i2c_driver si2157_driver = {
 	.driver = {
 		.name	= "si2157",
+		.suppress_bind_attrs = true,
 	},
 	.probe		= si2157_probe,
 	.remove		= si2157_remove,
@@ -483,7 +512,8 @@
 
 module_i2c_driver(si2157_driver);
 
-MODULE_DESCRIPTION("Silicon Labs Si2146/2147/2148/2157/2158 silicon tuner driver");
+MODULE_DESCRIPTION("Silicon Labs Si2141/Si2146/2147/2148/2157/2158 silicon tuner driver");
 MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
 MODULE_LICENSE("GPL");
 MODULE_FIRMWARE(SI2158_A20_FIRMWARE);
+MODULE_FIRMWARE(SI2141_A10_FIRMWARE);
diff -ruw linux-4.4.115/drivers/media/tuners/si2157_priv.h linux-4.4.115-fbx/drivers/media/tuners/si2157_priv.h
--- linux-4.4.115/drivers/media/tuners/si2157_priv.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/tuners/si2157_priv.h	2019-01-22 16:16:24.543255824 +0100
@@ -25,7 +25,6 @@
 	struct mutex i2c_mutex;
 	struct dvb_frontend *fe;
 	bool active;
-	bool fw_loaded;
 	bool inversion;
 	u8 chiptype;
 	u8 if_port;
@@ -35,6 +34,7 @@
 
 #define SI2157_CHIPTYPE_SI2157 0
 #define SI2157_CHIPTYPE_SI2146 1
+#define SI2157_CHIPTYPE_SI2141 2
 
 /* firmware command struct */
 #define SI2157_ARGLEN      30
@@ -45,5 +45,6 @@
 };
 
 #define SI2158_A20_FIRMWARE "dvb-tuner-si2158-a20-01.fw"
+#define SI2141_A10_FIRMWARE "dvb-tuner-si2141-a10-01.fw"
 
 #endif
diff -ruw linux-4.4.115/drivers/media/v4l2-core/v4l2-compat-ioctl32.c linux-4.4.115-fbx/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
--- linux-4.4.115/drivers/media/v4l2-core/v4l2-compat-ioctl32.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/v4l2-core/v4l2-compat-ioctl32.c	2019-10-29 09:26:24.009206760 +0100
@@ -20,6 +20,13 @@
 #include <media/v4l2-dev.h>
 #include <media/v4l2-ioctl.h>
 
+#define convert_in_user(srcptr, dstptr)			\
+({							\
+	typeof(*srcptr) val;				\
+							\
+	get_user(val, srcptr) || put_user(val, dstptr);	\
+})
+
 static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
 	long ret = -ENOIOCTLCMD;
@@ -45,27 +52,45 @@
 	compat_caddr_t		bitmap;
 };
 
-static int get_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up)
+static int bufsize_v4l2_window32(struct v4l2_window32 __user *up)
 {
+	__u32 clipcount;
+
+	if (get_user(clipcount, &up->clipcount))
+		return -EFAULT;
+	if (clipcount > 2048)
+		return -EINVAL;
+	return clipcount * sizeof(struct v4l2_clip);
+}
+
+static int get_v4l2_window32(struct v4l2_window __user *kp, struct
+		v4l2_window32 __user *up, void __user *aux_buf, int aux_space)
+{
+	__u32 clipcount;
+
 	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_window32)) ||
-		copy_from_user(&kp->w, &up->w, sizeof(up->w)) ||
-		get_user(kp->field, &up->field) ||
-		get_user(kp->chromakey, &up->chromakey) ||
-		get_user(kp->clipcount, &up->clipcount))
+		copy_in_user(&kp->w, &up->w, sizeof(up->w)) ||
+		convert_in_user(&up->field, &kp->field) ||
+		convert_in_user(&up->chromakey, &kp->chromakey) ||
+		get_user(clipcount, &up->clipcount) ||
+		put_user(clipcount, &kp->clipcount))
 			return -EFAULT;
-	if (kp->clipcount > 2048)
+	if (clipcount > 2048)
 		return -EINVAL;
-	if (kp->clipcount) {
+	if (clipcount) {
 		struct v4l2_clip32 __user *uclips;
 		struct v4l2_clip __user *kclips;
-		int n = kp->clipcount;
+		int n = clipcount;
 		compat_caddr_t p;
 
 		if (get_user(p, &up->clips))
 			return -EFAULT;
 		uclips = compat_ptr(p);
-		kclips = compat_alloc_user_space(n * sizeof(struct v4l2_clip));
-		kp->clips = kclips;
+		if (aux_space < n * sizeof(struct v4l2_clip))
+			return -EFAULT;
+		kclips = aux_buf;
+		if (put_user(kclips, &kp->clips))
+			return -EFAULT;
 		while (--n >= 0) {
 			if (copy_in_user(&kclips->c, &uclips->c, sizeof(uclips->c)))
 				return -EFAULT;
@@ -74,89 +99,91 @@
 			uclips += 1;
 			kclips += 1;
 		}
-	} else
-		kp->clips = NULL;
+	} else {
+		if (put_user(NULL, &kp->clips))
+			return -EFAULT;
+	}
 	return 0;
 }
 
-static int put_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up)
+static int put_v4l2_window32(struct v4l2_window __user *kp, struct v4l2_window32 __user *up)
 {
-	if (copy_to_user(&up->w, &kp->w, sizeof(kp->w)) ||
-		put_user(kp->field, &up->field) ||
-		put_user(kp->chromakey, &up->chromakey) ||
-		put_user(kp->clipcount, &up->clipcount))
+	if (copy_in_user(&up->w, &kp->w, sizeof(kp->w)) ||
+			convert_in_user(&kp->field, &up->field) ||
+			convert_in_user(&kp->chromakey, &up->chromakey) ||
+			convert_in_user(&kp->clipcount, &up->clipcount))
 			return -EFAULT;
 	return 0;
 }
 
-static inline int get_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up)
+static inline int get_v4l2_pix_format(struct v4l2_pix_format __user *kp, struct v4l2_pix_format __user *up)
 {
-	if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format)))
+	if (copy_in_user(kp, up, sizeof(struct v4l2_pix_format)))
 		return -EFAULT;
 	return 0;
 }
 
-static inline int get_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp,
+static inline int get_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane __user *kp,
 				struct v4l2_pix_format_mplane __user *up)
 {
-	if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format_mplane)))
+	if (copy_in_user(kp, up, sizeof(struct v4l2_pix_format_mplane)))
 		return -EFAULT;
 	return 0;
 }
 
-static inline int put_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up)
+static inline int put_v4l2_pix_format(struct v4l2_pix_format __user *kp, struct v4l2_pix_format __user *up)
 {
-	if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format)))
+	if (copy_in_user(up, kp, sizeof(struct v4l2_pix_format)))
 		return -EFAULT;
 	return 0;
 }
 
-static inline int put_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp,
+static inline int put_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane __user *kp,
 				struct v4l2_pix_format_mplane __user *up)
 {
-	if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format_mplane)))
+	if (copy_in_user(up, kp, sizeof(struct v4l2_pix_format_mplane)))
 		return -EFAULT;
 	return 0;
 }
 
-static inline int get_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up)
+static inline int get_v4l2_vbi_format(struct v4l2_vbi_format __user *kp, struct v4l2_vbi_format __user *up)
 {
-	if (copy_from_user(kp, up, sizeof(struct v4l2_vbi_format)))
+	if (copy_in_user(kp, up, sizeof(struct v4l2_vbi_format)))
 		return -EFAULT;
 	return 0;
 }
 
-static inline int put_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up)
+static inline int put_v4l2_vbi_format(struct v4l2_vbi_format __user *kp, struct v4l2_vbi_format __user *up)
 {
-	if (copy_to_user(up, kp, sizeof(struct v4l2_vbi_format)))
+	if (copy_in_user(up, kp, sizeof(struct v4l2_vbi_format)))
 		return -EFAULT;
 	return 0;
 }
 
-static inline int get_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up)
+static inline int get_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format __user *kp, struct v4l2_sliced_vbi_format __user *up)
 {
-	if (copy_from_user(kp, up, sizeof(struct v4l2_sliced_vbi_format)))
+	if (copy_in_user(kp, up, sizeof(struct v4l2_sliced_vbi_format)))
 		return -EFAULT;
 	return 0;
 }
 
-static inline int put_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up)
+static inline int put_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format __user *kp, struct v4l2_sliced_vbi_format __user *up)
 {
-	if (copy_to_user(up, kp, sizeof(struct v4l2_sliced_vbi_format)))
+	if (copy_in_user(up, kp, sizeof(struct v4l2_sliced_vbi_format)))
 		return -EFAULT;
 	return 0;
 }
 
-static inline int get_v4l2_sdr_format(struct v4l2_sdr_format *kp, struct v4l2_sdr_format __user *up)
+static inline int get_v4l2_sdr_format(struct v4l2_sdr_format __user *kp, struct v4l2_sdr_format __user *up)
 {
-	if (copy_from_user(kp, up, sizeof(struct v4l2_sdr_format)))
+	if (copy_in_user(kp, up, sizeof(struct v4l2_sdr_format)))
 		return -EFAULT;
 	return 0;
 }
 
-static inline int put_v4l2_sdr_format(struct v4l2_sdr_format *kp, struct v4l2_sdr_format __user *up)
+static inline int put_v4l2_sdr_format(struct v4l2_sdr_format __user *kp, struct v4l2_sdr_format __user *up)
 {
-	if (copy_to_user(up, kp, sizeof(struct v4l2_sdr_format)))
+	if (copy_in_user(up, kp, sizeof(struct v4l2_sdr_format)))
 		return -EFAULT;
 	return 0;
 }
@@ -191,12 +218,31 @@
 	__u32			reserved[8];
 };
 
-static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+static int __bufsize_v4l2_format32(struct v4l2_format32 __user *up)
 {
-	if (get_user(kp->type, &up->type))
+	__u32 type;
+
+	if (get_user(type, &up->type))
 		return -EFAULT;
 
-	switch (kp->type) {
+	switch (type) {
+	case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+	case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
+		return bufsize_v4l2_window32(&up->fmt.win);
+	default:
+		return 0;
+	}
+}
+
+static int __get_v4l2_format32(struct v4l2_format __user *kp, struct
+		v4l2_format32 __user *up, void __user *aux_buf, int aux_space)
+{
+	__u32 type;
+
+	if (get_user(type, &up->type) || put_user(type, &kp->type))
+		return -EFAULT;
+
+	switch (type) {
 	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
 	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
 		return get_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix);
@@ -206,7 +252,7 @@
 						  &up->fmt.pix_mp);
 	case V4L2_BUF_TYPE_VIDEO_OVERLAY:
 	case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
-		return get_v4l2_window32(&kp->fmt.win, &up->fmt.win);
+		return get_v4l2_window32(&kp->fmt.win, &up->fmt.win, aux_buf, aux_space);
 	case V4L2_BUF_TYPE_VBI_CAPTURE:
 	case V4L2_BUF_TYPE_VBI_OUTPUT:
 		return get_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi);
@@ -223,27 +269,51 @@
 	}
 }
 
-static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+static int bufsize_v4l2_format32(struct v4l2_format32 __user *up)
 {
 	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)))
 		return -EFAULT;
-	return __get_v4l2_format32(kp, up);
+	return __bufsize_v4l2_format32(up);
 }
 
-static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
+static int get_v4l2_format32(struct v4l2_format __user *kp, struct
+		v4l2_format32 __user *up, void __user *aux_buf, int aux_space)
+{
+	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)))
+		return -EFAULT;
+	return __get_v4l2_format32(kp, up, aux_buf, aux_space);
+}
+
+static int bufsize_v4l2_create32(struct v4l2_create_buffers32 __user *up)
+{
+	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)))
+		return -EFAULT;
+	return __bufsize_v4l2_format32(&up->format);
+}
+
+static int get_v4l2_create32(struct v4l2_create_buffers __user *kp, struct
+		v4l2_create_buffers32 __user *up, void __user *aux_buf,
+		int aux_space)
 {
 	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) ||
-	    copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format)))
+	    copy_in_user(kp, up, offsetof(struct v4l2_create_buffers32, format)))
 		return -EFAULT;
-	return __get_v4l2_format32(&kp->format, &up->format);
+	return __get_v4l2_format32(&kp->format, &up->format, aux_buf, aux_space);
 }
 
-static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+static int __put_v4l2_format32(struct v4l2_format __user *kp, struct v4l2_format32 __user *up)
 {
-	if (put_user(kp->type, &up->type))
+	__u32 type;
+
+	if (kp == NULL)
+		return -EFAULT;
+
+	if (get_user(type, &kp->type))
+		return -EFAULT;
+	if (put_user(type, &up->type))
 		return -EFAULT;
 
-	switch (kp->type) {
+	switch (type) {
 	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
 	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
 		return put_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix);
@@ -270,18 +340,18 @@
 	}
 }
 
-static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+static int put_v4l2_format32(struct v4l2_format __user *kp, struct v4l2_format32 __user *up)
 {
 	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32)))
 		return -EFAULT;
 	return __put_v4l2_format32(kp, up);
 }
 
-static int put_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
+static int put_v4l2_create32(struct v4l2_create_buffers __user *kp, struct v4l2_create_buffers32 __user *up)
 {
 	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_create_buffers32)) ||
-	    copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format)) ||
-	    copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
+	    copy_in_user(up, kp, offsetof(struct v4l2_create_buffers32, format)) ||
+	    copy_in_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
 		return -EFAULT;
 	return __put_v4l2_format32(&kp->format, &up->format);
 }
@@ -295,24 +365,24 @@
 	__u32		     reserved[4];
 };
 
-static int get_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up)
+static int get_v4l2_standard32(struct v4l2_standard __user *kp, struct v4l2_standard32 __user *up)
 {
 	/* other fields are not set by the user, nor used by the driver */
 	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_standard32)) ||
-		get_user(kp->index, &up->index))
+		convert_in_user(&up->index, &kp->index))
 		return -EFAULT;
 	return 0;
 }
 
-static int put_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up)
+static int put_v4l2_standard32(struct v4l2_standard __user *kp, struct v4l2_standard32 __user *up)
 {
 	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_standard32)) ||
-		put_user(kp->index, &up->index) ||
-		put_user(kp->id, &up->id) ||
-		copy_to_user(up->name, kp->name, 24) ||
-		copy_to_user(&up->frameperiod, &kp->frameperiod, sizeof(kp->frameperiod)) ||
-		put_user(kp->framelines, &up->framelines) ||
-		copy_to_user(up->reserved, kp->reserved, 4 * sizeof(__u32)))
+		convert_in_user(&kp->index, &up->index) ||
+		copy_in_user((void __user *)up->id, &kp->id, sizeof(__u64)) ||
+		copy_in_user(up->name, kp->name, 24) ||
+		copy_in_user(&up->frameperiod, &kp->frameperiod, sizeof(kp->frameperiod)) ||
+		convert_in_user(&kp->framelines, &up->framelines) ||
+		copy_in_user(up->reserved, kp->reserved, 4 * sizeof(__u32)))
 			return -EFAULT;
 	return 0;
 }
@@ -355,19 +425,21 @@
 static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
 				enum v4l2_memory memory)
 {
-	void __user *up_pln;
 	compat_long_t p;
 
 	if (copy_in_user(up, up32, 2 * sizeof(__u32)) ||
 		copy_in_user(&up->data_offset, &up32->data_offset,
+				sizeof(__u32)) ||
+		copy_in_user(up->reserved, up32->reserved,
+				sizeof(up->reserved)) ||
+		copy_in_user(&up->length, &up32->length,
 				sizeof(__u32)))
 		return -EFAULT;
 
 	if (memory == V4L2_MEMORY_USERPTR) {
-		if (get_user(p, &up32->m.userptr))
-			return -EFAULT;
-		up_pln = compat_ptr(p);
-		if (put_user((unsigned long)up_pln, &up->m.userptr))
+		if (get_user(p, &up32->m.userptr) ||
+			put_user((unsigned long) compat_ptr(p),
+				&up->m.userptr))
 			return -EFAULT;
 	} else if (memory == V4L2_MEMORY_DMABUF) {
 		if (copy_in_user(&up->m.fd, &up32->m.fd, sizeof(int)))
@@ -385,6 +457,8 @@
 				enum v4l2_memory memory)
 {
 	if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
+		copy_in_user(up32->reserved, up->reserved,
+				sizeof(up32->reserved)) ||
 		copy_in_user(&up32->data_offset, &up->data_offset,
 				sizeof(__u32)))
 		return -EFAULT;
@@ -400,12 +474,42 @@
 		if (copy_in_user(&up32->m.fd, &up->m.fd,
 					sizeof(int)))
 			return -EFAULT;
+	if (memory == V4L2_MEMORY_USERPTR)
+		if (copy_in_user(&up32->m.userptr, &up->m.userptr,
+					sizeof(compat_long_t)))
+			return -EFAULT;
 
 	return 0;
 }
 
-static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up)
+static int bufsize_v4l2_buffer32(struct v4l2_buffer32 __user *up)
 {
+	__u32 type;
+	__u32 length;
+
+	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_buffer32)) ||
+			get_user(type, &up->type) ||
+			get_user(length, &up->length))
+		return -EFAULT;
+
+	if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
+		if (length > VIDEO_MAX_PLANES)
+			return -EINVAL;
+
+		/* We don't really care if userspace decides to kill itself
+		 * by passing a very big length value
+		 */
+		return length * sizeof(struct v4l2_plane);
+	}
+	return 0;
+}
+
+static int get_v4l2_buffer32(struct v4l2_buffer __user *kp, struct
+		v4l2_buffer32 __user *up, void __user *aux_buf, int aux_space)
+{
+	__u32 type;
+	__u32 length;
+	enum v4l2_memory memory;
 	struct v4l2_plane32 __user *uplane32;
 	struct v4l2_plane __user *uplane;
 	compat_caddr_t p;
@@ -413,28 +517,39 @@
 	int ret;
 
 	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_buffer32)) ||
-		get_user(kp->index, &up->index) ||
-		get_user(kp->type, &up->type) ||
-		get_user(kp->flags, &up->flags) ||
-		get_user(kp->memory, &up->memory) ||
-		get_user(kp->length, &up->length))
-			return -EFAULT;
-
-	if (V4L2_TYPE_IS_OUTPUT(kp->type))
-		if (get_user(kp->bytesused, &up->bytesused) ||
-			get_user(kp->field, &up->field) ||
-			get_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
-			get_user(kp->timestamp.tv_usec,
-					&up->timestamp.tv_usec))
+		convert_in_user(&up->index, &kp->index) ||
+		get_user(type, &up->type) ||
+		put_user(type, &kp->type) ||
+		convert_in_user(&up->flags, &kp->flags) ||
+		get_user(memory, &up->memory) ||
+		put_user(memory, &kp->memory) ||
+		convert_in_user(&up->length, &kp->length) ||
+		get_user(length, &up->length) ||
+		put_user(length, &kp->length))
+			return -EFAULT;
+
+	if (V4L2_TYPE_IS_OUTPUT(type))
+		if (convert_in_user(&up->bytesused, &kp->bytesused) ||
+			convert_in_user(&up->field, &kp->field) ||
+			convert_in_user(&up->timestamp.tv_sec, &kp->timestamp.tv_sec) ||
+			convert_in_user(&up->timestamp.tv_usec,
+					&kp->timestamp.tv_usec))
 			return -EFAULT;
 
-	if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
-		num_planes = kp->length;
+	if (type == V4L2_BUF_TYPE_PRIVATE) {
+		compat_long_t tmp;
+
+		if (get_user(tmp, &up->m.userptr) ||
+				put_user((unsigned long) compat_ptr(tmp),
+					&kp->m.userptr))
+			return put_user(NULL, &kp->m.planes);;
+	}
+	if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
+		num_planes = length;
 		if (num_planes == 0) {
-			kp->m.planes = NULL;
 			/* num_planes == 0 is legal, e.g. when userspace doesn't
 			 * need planes array on DQBUF*/
-			return 0;
+			return put_user(NULL, &kp->m.planes);
 		}
 
 		if (get_user(p, &up->m.planes))
@@ -447,39 +562,44 @@
 
 		/* We don't really care if userspace decides to kill itself
 		 * by passing a very big num_planes value */
-		uplane = compat_alloc_user_space(num_planes *
-						sizeof(struct v4l2_plane));
-		kp->m.planes = (__force struct v4l2_plane *)uplane;
+		if (aux_space < num_planes * sizeof(struct v4l2_plane))
+			return -EFAULT;
+
+		uplane = aux_buf;
+		if (put_user((__force struct v4l2_plane *)uplane,
+					&kp->m.planes))
+			return -EFAULT;
 
 		while (--num_planes >= 0) {
-			ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
+			ret = get_v4l2_plane32(uplane, uplane32, memory);
 			if (ret)
 				return ret;
 			++uplane;
 			++uplane32;
 		}
 	} else {
-		switch (kp->memory) {
+		switch (memory) {
 		case V4L2_MEMORY_MMAP:
-			if (get_user(kp->m.offset, &up->m.offset))
+			if (convert_in_user(&up->m.offset, &kp->m.offset))
 				return -EFAULT;
 			break;
 		case V4L2_MEMORY_USERPTR:
 			{
 			compat_long_t tmp;
 
-			if (get_user(tmp, &up->m.userptr))
+				if (get_user(tmp, &up->m.userptr) ||
+					put_user((unsigned long)
+						compat_ptr(tmp),
+						&kp->m.userptr))
 				return -EFAULT;
-
-			kp->m.userptr = (unsigned long)compat_ptr(tmp);
 			}
 			break;
 		case V4L2_MEMORY_OVERLAY:
-			if (get_user(kp->m.offset, &up->m.offset))
+			if (convert_in_user(&up->m.offset, &kp->m.offset))
 				return -EFAULT;
 			break;
 		case V4L2_MEMORY_DMABUF:
-			if (get_user(kp->m.fd, &up->m.fd))
+			if (convert_in_user(&up->m.fd, &kp->m.fd))
 				return -EFAULT;
 			break;
 		}
@@ -488,8 +608,11 @@
 	return 0;
 }
 
-static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up)
+static int put_v4l2_buffer32(struct v4l2_buffer __user *kp, struct v4l2_buffer32 __user *up)
 {
+	__u32 type;
+	__u32 length;
+	enum v4l2_memory memory;
 	struct v4l2_plane32 __user *uplane32;
 	struct v4l2_plane __user *uplane;
 	compat_caddr_t p;
@@ -497,56 +620,63 @@
 	int ret;
 
 	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_buffer32)) ||
-		put_user(kp->index, &up->index) ||
-		put_user(kp->type, &up->type) ||
-		put_user(kp->flags, &up->flags) ||
-		put_user(kp->memory, &up->memory))
-			return -EFAULT;
-
-	if (put_user(kp->bytesused, &up->bytesused) ||
-		put_user(kp->field, &up->field) ||
-		put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
-		put_user(kp->timestamp.tv_usec, &up->timestamp.tv_usec) ||
-		copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) ||
-		put_user(kp->sequence, &up->sequence) ||
-		put_user(kp->reserved2, &up->reserved2) ||
-		put_user(kp->reserved, &up->reserved) ||
-		put_user(kp->length, &up->length))
+		convert_in_user(&kp->index, &up->index) ||
+		get_user(type, &kp->type) ||
+		put_user(type, &up->type) ||
+		convert_in_user(&kp->flags, &up->flags) ||
+		get_user(memory, &kp->memory) ||
+		put_user(memory, &up->memory))
+			return -EFAULT;
+
+	if (convert_in_user(&kp->bytesused, &up->bytesused) ||
+		convert_in_user(&kp->field, &up->field) ||
+		convert_in_user(&kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
+		convert_in_user(&kp->timestamp.tv_usec, &up->timestamp.tv_usec) ||
+		copy_in_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) ||
+		convert_in_user(&kp->sequence, &up->sequence) ||
+		convert_in_user(&kp->reserved2, &up->reserved2) ||
+		convert_in_user(&kp->reserved, &up->reserved) ||
+		get_user(length, &kp->length) ||
+		put_user(length, &up->length))
+			return -EFAULT;
+	if (type == V4L2_BUF_TYPE_PRIVATE)
+		if (convert_in_user(&kp->m.userptr, &up->m.userptr))
 			return -EFAULT;
 
-	if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
-		num_planes = kp->length;
+	if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
+		num_planes = length;
 		if (num_planes == 0)
 			return 0;
 
-		uplane = (__force struct v4l2_plane __user *)kp->m.planes;
+		if (get_user(uplane, ((__force struct v4l2_plane __user **)&kp->m.planes)))
+			return -EFAULT;
 		if (get_user(p, &up->m.planes))
 			return -EFAULT;
 		uplane32 = compat_ptr(p);
 
 		while (--num_planes >= 0) {
-			ret = put_v4l2_plane32(uplane, uplane32, kp->memory);
+			ret = put_v4l2_plane32(uplane, uplane32, memory);
 			if (ret)
 				return ret;
 			++uplane;
 			++uplane32;
 		}
 	} else {
-		switch (kp->memory) {
+		switch (memory) {
 		case V4L2_MEMORY_MMAP:
-			if (put_user(kp->m.offset, &up->m.offset))
+			if (convert_in_user(&kp->m.offset, &up->m.offset))
 				return -EFAULT;
 			break;
 		case V4L2_MEMORY_USERPTR:
-			if (put_user(kp->m.userptr, &up->m.userptr))
+			if (convert_in_user(&kp->m.userptr, &up->m.userptr))
 				return -EFAULT;
 			break;
 		case V4L2_MEMORY_OVERLAY:
-			if (put_user(kp->m.offset, &up->m.offset))
+			if (convert_in_user(&kp->m.offset, &up->m.offset))
 				return -EFAULT;
 			break;
 		case V4L2_MEMORY_DMABUF:
-			if (put_user(kp->m.fd, &up->m.fd))
+			if (convert_in_user(&kp->m.fd, &up->m.fd))
 				return -EFAULT;
 			break;
 		}
@@ -571,33 +701,35 @@
 	} fmt;
 };
 
-static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up)
+static int get_v4l2_framebuffer32(struct v4l2_framebuffer __user *kp, struct v4l2_framebuffer32 __user *up)
 {
-	u32 tmp;
+	compat_caddr_t tmp;
 
 	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_framebuffer32)) ||
 		get_user(tmp, &up->base) ||
-		get_user(kp->capability, &up->capability) ||
-		get_user(kp->flags, &up->flags) ||
-		copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
+		put_user((__force void *)compat_ptr(tmp), &kp->base) ||
+		convert_in_user(&up->capability, &kp->capability) ||
+		convert_in_user(&up->flags, &kp->flags) ||
+		get_v4l2_pix_format((struct v4l2_pix_format *)&kp->fmt, (struct v4l2_pix_format *)&up->fmt))
 			return -EFAULT;
-	kp->base = (__force void *)compat_ptr(tmp);
 	return 0;
 }
 
-static int put_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up)
+static int put_v4l2_framebuffer32(struct v4l2_framebuffer __user *kp, struct v4l2_framebuffer32 __user *up)
 {
-	u32 tmp = (u32)((unsigned long)kp->base);
+	void *base;
 
 	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_framebuffer32)) ||
-		put_user(tmp, &up->base) ||
-		put_user(kp->capability, &up->capability) ||
-		put_user(kp->flags, &up->flags) ||
-		copy_to_user(&up->fmt, &kp->fmt, sizeof(up->fmt)))
+		get_user(base, &kp->base) ||
+		put_user(ptr_to_compat(base), &up->base) ||
+		convert_in_user(&kp->capability, &up->capability) ||
+		convert_in_user(&kp->flags, &up->flags) ||
+		put_v4l2_pix_format((struct v4l2_pix_format *)&kp->fmt, (struct v4l2_pix_format *)&up->fmt))
 			return -EFAULT;
 	return 0;
 }
 
+
 struct v4l2_input32 {
 	__u32	     index;		/*  Which input */
 	__u8	     name[32];		/*  Label */
@@ -611,16 +743,16 @@
 
 /* The 64-bit v4l2_input struct has extra padding at the end of the struct.
    Otherwise it is identical to the 32-bit version. */
-static inline int get_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up)
+static inline int get_v4l2_input32(struct v4l2_input __user *kp, struct v4l2_input32 __user *up)
 {
-	if (copy_from_user(kp, up, sizeof(struct v4l2_input32)))
+	if (copy_in_user(kp, up, sizeof(struct v4l2_input32)))
 		return -EFAULT;
 	return 0;
 }
 
-static inline int put_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up)
+static inline int put_v4l2_input32(struct v4l2_input __user *kp, struct v4l2_input32 __user *up)
 {
-	if (copy_to_user(up, kp, sizeof(struct v4l2_input32)))
+	if (copy_in_user(up, kp, sizeof(struct v4l2_input32)))
 		return -EFAULT;
 	return 0;
 }
@@ -661,34 +793,50 @@
 	}
 }
 
-static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
+static int bufsize_v4l2_ext_controls32(struct v4l2_ext_controls32 __user *up)
+{
+	__u32 count;
+
+	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_ext_controls32)) ||
+			get_user(count, &up->count))
+		return -EFAULT;
+	if (count > V4L2_CID_MAX_CTRLS)
+		return -EINVAL;
+	return count * sizeof(struct v4l2_ext_control);
+}
+
+static int get_v4l2_ext_controls32(struct v4l2_ext_controls __user *kp, struct
+		v4l2_ext_controls32 __user *up, void __user *aux_buf,
+		int aux_space)
 {
 	struct v4l2_ext_control32 __user *ucontrols;
 	struct v4l2_ext_control __user *kcontrols;
-	int n;
+	__u32 count;
+	unsigned int n;
 	compat_caddr_t p;
 
 	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_ext_controls32)) ||
-		get_user(kp->ctrl_class, &up->ctrl_class) ||
-		get_user(kp->count, &up->count) ||
-		get_user(kp->error_idx, &up->error_idx) ||
-		copy_from_user(kp->reserved, up->reserved,
-			       sizeof(kp->reserved)))
-			return -EFAULT;
-	n = kp->count;
-	if (n == 0) {
-		kp->controls = NULL;
-		return 0;
-	}
+		convert_in_user(&up->ctrl_class, &kp->ctrl_class) ||
+		get_user(count, &up->count) ||
+		put_user(count, &kp->count) ||
+		convert_in_user(&up->error_idx, &kp->error_idx) ||
+		copy_in_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
+			return -EFAULT;
+	if (count == 0)
+		return put_user(NULL, &kp->controls);
 	if (get_user(p, &up->controls))
 		return -EFAULT;
 	ucontrols = compat_ptr(p);
 	if (!access_ok(VERIFY_READ, ucontrols,
-			n * sizeof(struct v4l2_ext_control32)))
+			count * sizeof(struct v4l2_ext_control32)))
 		return -EFAULT;
-	kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
-	kp->controls = (__force struct v4l2_ext_control *)kcontrols;
-	while (--n >= 0) {
+	if (aux_space < count * sizeof(struct v4l2_ext_control))
+		return -EFAULT;
+	kcontrols = aux_buf;
+	if (put_user((__force struct v4l2_ext_control *)kcontrols,
+				&kp->controls))
+		return -EFAULT;
+	for (n = 0; n < count; n++) {
 		u32 id;
 
 		if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols)))
@@ -710,31 +858,33 @@
 	return 0;
 }
 
-static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
+static int put_v4l2_ext_controls32(struct v4l2_ext_controls __user *kp, struct v4l2_ext_controls32 __user *up)
 {
 	struct v4l2_ext_control32 __user *ucontrols;
-	struct v4l2_ext_control __user *kcontrols =
-		(__force struct v4l2_ext_control __user *)kp->controls;
-	int n = kp->count;
+	struct v4l2_ext_control __user *kcontrols;
+	__u32 count;
+	unsigned int n;
 	compat_caddr_t p;
 
 	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_ext_controls32)) ||
-		put_user(kp->ctrl_class, &up->ctrl_class) ||
-		put_user(kp->count, &up->count) ||
-		put_user(kp->error_idx, &up->error_idx) ||
-		copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved)))
+		get_user(kcontrols, &kp->controls) ||
+		convert_in_user(&kp->ctrl_class, &up->ctrl_class) ||
+		get_user(count, &kp->count) ||
+		put_user(count, &up->count) ||
+		convert_in_user(&kp->error_idx, &up->error_idx) ||
+		copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)))
 			return -EFAULT;
-	if (!kp->count)
+	if (!count)
 		return 0;
 
 	if (get_user(p, &up->controls))
 		return -EFAULT;
 	ucontrols = compat_ptr(p);
 	if (!access_ok(VERIFY_WRITE, ucontrols,
-			n * sizeof(struct v4l2_ext_control32)))
+			count * sizeof(struct v4l2_ext_control32)))
 		return -EFAULT;
 
-	while (--n >= 0) {
+	for (n = 0; n < count; n++) {
 		unsigned size = sizeof(*ucontrols);
 		u32 id;
 
@@ -756,6 +906,11 @@
 struct v4l2_event32 {
 	__u32				type;
 	union {
+		struct v4l2_event_vsync		vsync;
+		struct v4l2_event_ctrl		ctrl;
+		struct v4l2_event_frame_sync	frame_sync;
+		struct v4l2_event_src_change	src_change;
+		struct v4l2_event_motion_det	motion_det;
 		compat_s64		value64;
 		__u8			data[64];
 	} u;
@@ -769,14 +924,14 @@
 static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *up)
 {
 	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_event32)) ||
-		put_user(kp->type, &up->type) ||
-		copy_to_user(&up->u, &kp->u, sizeof(kp->u)) ||
-		put_user(kp->pending, &up->pending) ||
-		put_user(kp->sequence, &up->sequence) ||
-		put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
-		put_user(kp->timestamp.tv_nsec, &up->timestamp.tv_nsec) ||
-		put_user(kp->id, &up->id) ||
-		copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32)))
+		convert_in_user(&kp->type, &up->type) ||
+		copy_in_user(&up->u, &kp->u, sizeof(kp->u)) ||
+		convert_in_user(&kp->pending, &up->pending) ||
+		convert_in_user(&kp->sequence, &up->sequence) ||
+		convert_in_user(&kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
+		convert_in_user(&kp->timestamp.tv_nsec, &up->timestamp.tv_nsec) ||
+		convert_in_user(&kp->id, &up->id) ||
+		copy_in_user(up->reserved, kp->reserved, 8 * sizeof(__u32)))
 			return -EFAULT;
 	return 0;
 }
@@ -789,31 +944,34 @@
 	compat_caddr_t edid;
 };
 
-static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
+#define v4l2_subdev_edid32 v4l2_edid32
+
+static int get_v4l2_subdev_edid32(struct v4l2_subdev_edid __user *kp, struct v4l2_subdev_edid32 __user *up)
 {
-	u32 tmp;
+	compat_uptr_t tmp;
 
-	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_edid32)) ||
-		get_user(kp->pad, &up->pad) ||
-		get_user(kp->start_block, &up->start_block) ||
-		get_user(kp->blocks, &up->blocks) ||
+	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_subdev_edid32)) ||
+		convert_in_user(&up->pad, &kp->pad) ||
+		convert_in_user(&up->start_block, &kp->start_block) ||
+		convert_in_user(&up->blocks, &kp->blocks) ||
 		get_user(tmp, &up->edid) ||
-		copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
+		put_user(compat_ptr(tmp), &kp->edid) ||
+		copy_in_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
 			return -EFAULT;
-	kp->edid = (__force u8 *)compat_ptr(tmp);
 	return 0;
 }
 
-static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
+static int put_v4l2_subdev_edid32(struct v4l2_subdev_edid __user *kp, struct v4l2_subdev_edid32 __user *up)
 {
-	u32 tmp = (u32)((unsigned long)kp->edid);
+	void *edid;
 
-	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_edid32)) ||
-		put_user(kp->pad, &up->pad) ||
-		put_user(kp->start_block, &up->start_block) ||
-		put_user(kp->blocks, &up->blocks) ||
-		put_user(tmp, &up->edid) ||
-		copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved)))
+	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_subdev_edid32)) ||
+		convert_in_user(&kp->pad, &up->pad) ||
+		convert_in_user(&kp->start_block, &up->start_block) ||
+		convert_in_user(&kp->blocks, &up->blocks) ||
+		get_user(edid, &kp->edid) ||
+		put_user(ptr_to_compat(edid), &up->edid) ||
+		copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)))
 			return -EFAULT;
 	return 0;
 }
@@ -846,22 +1004,37 @@
 #define VIDIOC_G_OUTPUT32	_IOR ('V', 46, s32)
 #define VIDIOC_S_OUTPUT32	_IOWR('V', 47, s32)
 
+/*
+ * Note that these macros contain return statements to avoid the need for the
+ * "caller" to check return values.
+ */
+#define ALLOC_USER_SPACE(size) \
+({ \
+	void __user *up_native; \
+	up_native = compat_alloc_user_space(size); \
+	if (!up_native) \
+		return -ENOMEM; \
+	if (clear_user(up_native, size)) \
+		return -EFAULT; \
+	up_native; \
+})
+
+#define ALLOC_AND_GET(bufsizefunc, getfunc, structname) \
+	do { \
+		aux_space = bufsizefunc(up); \
+		if (aux_space < 0) \
+			return aux_space; \
+		up_native = ALLOC_USER_SPACE(sizeof(struct structname) + aux_space); \
+		aux_buf = up_native + sizeof(struct structname); \
+		err = getfunc(up_native, up, aux_buf, aux_space); \
+	} while (0)
+
 static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
-	union {
-		struct v4l2_format v2f;
-		struct v4l2_buffer v2b;
-		struct v4l2_framebuffer v2fb;
-		struct v4l2_input v2i;
-		struct v4l2_standard v2s;
-		struct v4l2_ext_controls v2ecs;
-		struct v4l2_event v2ev;
-		struct v4l2_create_buffers v2crt;
-		struct v4l2_edid v2edid;
-		unsigned long vx;
-		int vi;
-	} karg;
 	void __user *up = compat_ptr(arg);
+	void __user *up_native = NULL;
+	void __user *aux_buf;
+	int aux_space;
 	int compatible_arg = 1;
 	long err = 0;
 
@@ -900,30 +1073,35 @@
 	case VIDIOC_STREAMOFF:
 	case VIDIOC_S_INPUT:
 	case VIDIOC_S_OUTPUT:
-		err = get_user(karg.vi, (s32 __user *)up);
+		up_native = ALLOC_USER_SPACE(sizeof(unsigned __user));
+		if (convert_in_user((compat_uint_t __user *)up,
+					(unsigned __user *) up_native))
+			return -EFAULT;
 		compatible_arg = 0;
 		break;
 
 	case VIDIOC_G_INPUT:
 	case VIDIOC_G_OUTPUT:
+		up_native = ALLOC_USER_SPACE(sizeof(unsigned __user));
 		compatible_arg = 0;
 		break;
 
 	case VIDIOC_G_EDID:
 	case VIDIOC_S_EDID:
-		err = get_v4l2_edid32(&karg.v2edid, up);
+		up_native = ALLOC_USER_SPACE(sizeof(struct v4l2_subdev_edid));
+		err = get_v4l2_subdev_edid32(up_native, up);
 		compatible_arg = 0;
 		break;
 
 	case VIDIOC_G_FMT:
 	case VIDIOC_S_FMT:
 	case VIDIOC_TRY_FMT:
-		err = get_v4l2_format32(&karg.v2f, up);
+		ALLOC_AND_GET(bufsize_v4l2_format32, get_v4l2_format32, v4l2_format);
 		compatible_arg = 0;
 		break;
 
 	case VIDIOC_CREATE_BUFS:
-		err = get_v4l2_create32(&karg.v2crt, up);
+		ALLOC_AND_GET(bufsize_v4l2_create32, get_v4l2_create32, v4l2_create_buffers);
 		compatible_arg = 0;
 		break;
 
@@ -931,36 +1109,41 @@
 	case VIDIOC_QUERYBUF:
 	case VIDIOC_QBUF:
 	case VIDIOC_DQBUF:
-		err = get_v4l2_buffer32(&karg.v2b, up);
+		ALLOC_AND_GET(bufsize_v4l2_buffer32, get_v4l2_buffer32, v4l2_buffer);
 		compatible_arg = 0;
 		break;
 
 	case VIDIOC_S_FBUF:
-		err = get_v4l2_framebuffer32(&karg.v2fb, up);
+		up_native = ALLOC_USER_SPACE(sizeof(struct v4l2_framebuffer));
+		err = get_v4l2_framebuffer32(up_native, up);
 		compatible_arg = 0;
 		break;
 
 	case VIDIOC_G_FBUF:
+		up_native = ALLOC_USER_SPACE(sizeof(struct v4l2_framebuffer));
 		compatible_arg = 0;
 		break;
 
 	case VIDIOC_ENUMSTD:
-		err = get_v4l2_standard32(&karg.v2s, up);
+		up_native = ALLOC_USER_SPACE(sizeof(struct v4l2_standard));
+		err = get_v4l2_standard32(up_native, up);
 		compatible_arg = 0;
 		break;
 
 	case VIDIOC_ENUMINPUT:
-		err = get_v4l2_input32(&karg.v2i, up);
+		up_native = ALLOC_USER_SPACE(sizeof(struct v4l2_input));
+		err = get_v4l2_input32(up_native, up);
 		compatible_arg = 0;
 		break;
 
 	case VIDIOC_G_EXT_CTRLS:
 	case VIDIOC_S_EXT_CTRLS:
 	case VIDIOC_TRY_EXT_CTRLS:
-		err = get_v4l2_ext_controls32(&karg.v2ecs, up);
+		ALLOC_AND_GET(bufsize_v4l2_ext_controls32, get_v4l2_ext_controls32, v4l2_ext_controls);
 		compatible_arg = 0;
 		break;
 	case VIDIOC_DQEVENT:
+		up_native = ALLOC_USER_SPACE(sizeof(struct v4l2_event));
 		compatible_arg = 0;
 		break;
 	}
@@ -969,13 +1152,8 @@
 
 	if (compatible_arg)
 		err = native_ioctl(file, cmd, (unsigned long)up);
-	else {
-		mm_segment_t old_fs = get_fs();
-
-		set_fs(KERNEL_DS);
-		err = native_ioctl(file, cmd, (unsigned long)&karg);
-		set_fs(old_fs);
-	}
+	else
+		err = native_ioctl(file, cmd, (unsigned long)up_native);
 
 	/* Special case: even after an error we need to put the
 	   results back for these ioctls since the error_idx will
@@ -984,7 +1162,7 @@
 	case VIDIOC_G_EXT_CTRLS:
 	case VIDIOC_S_EXT_CTRLS:
 	case VIDIOC_TRY_EXT_CTRLS:
-		if (put_v4l2_ext_controls32(&karg.v2ecs, up))
+		if (put_v4l2_ext_controls32(up_native, up))
 			err = -EFAULT;
 		break;
 	}
@@ -996,44 +1174,44 @@
 	case VIDIOC_S_OUTPUT:
 	case VIDIOC_G_INPUT:
 	case VIDIOC_G_OUTPUT:
-		err = put_user(((s32)karg.vi), (s32 __user *)up);
+		err = convert_in_user(((unsigned __user *)up_native),
+							(compat_uint_t __user *)up);
 		break;
-
 	case VIDIOC_G_FBUF:
-		err = put_v4l2_framebuffer32(&karg.v2fb, up);
+		err = put_v4l2_framebuffer32(up_native, up);
 		break;
 
 	case VIDIOC_DQEVENT:
-		err = put_v4l2_event32(&karg.v2ev, up);
+		err = put_v4l2_event32(up_native, up);
 		break;
 
 	case VIDIOC_G_EDID:
 	case VIDIOC_S_EDID:
-		err = put_v4l2_edid32(&karg.v2edid, up);
+		err = put_v4l2_subdev_edid32(up_native, up);
 		break;
 
 	case VIDIOC_G_FMT:
 	case VIDIOC_S_FMT:
 	case VIDIOC_TRY_FMT:
-		err = put_v4l2_format32(&karg.v2f, up);
+		err = put_v4l2_format32(up_native, up);
 		break;
 
 	case VIDIOC_CREATE_BUFS:
-		err = put_v4l2_create32(&karg.v2crt, up);
+		err = put_v4l2_create32(up_native, up);
 		break;
 
 	case VIDIOC_QUERYBUF:
 	case VIDIOC_QBUF:
 	case VIDIOC_DQBUF:
-		err = put_v4l2_buffer32(&karg.v2b, up);
+		err = put_v4l2_buffer32(up_native, up);
 		break;
 
 	case VIDIOC_ENUMSTD:
-		err = put_v4l2_standard32(&karg.v2s, up);
+		err = put_v4l2_standard32(up_native, up);
 		break;
 
 	case VIDIOC_ENUMINPUT:
-		err = put_v4l2_input32(&karg.v2i, up);
+		err = put_v4l2_input32(up_native, up);
 		break;
 	}
 	return err;
diff -ruw linux-4.4.115/drivers/media/v4l2-core/v4l2-ctrls.c linux-4.4.115-fbx/drivers/media/v4l2-core/v4l2-ctrls.c
--- linux-4.4.115/drivers/media/v4l2-core/v4l2-ctrls.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/v4l2-core/v4l2-ctrls.c	2019-10-29 09:26:24.009206760 +0100
@@ -307,12 +307,14 @@
 	static const char * const header_mode[] = {
 		"Separate Buffer",
 		"Joined With 1st Frame",
+		"Joined With I frame",
 		NULL,
 	};
 	static const char * const multi_slice[] = {
 		"Single",
 		"Max Macroblocks",
 		"Max Bytes",
+		"GOB",
 		NULL,
 	};
 	static const char * const entropy_mode[] = {
@@ -337,6 +339,7 @@
 		"4.2",
 		"5",
 		"5.1",
+		"5.2",
 		NULL,
 	};
 	static const char * const h264_loop_filter[] = {
@@ -361,7 +364,9 @@
 		"Scalable Baseline",
 		"Scalable High",
 		"Scalable High Intra",
+		"Stereo High",
 		"Multiview High",
+		"Constrained High",
 		NULL,
 	};
 	static const char * const vui_sar_idc[] = {
diff -ruw linux-4.4.115/drivers/media/v4l2-core/v4l2-ioctl.c linux-4.4.115-fbx/drivers/media/v4l2-core/v4l2-ioctl.c
--- linux-4.4.115/drivers/media/v4l2-core/v4l2-ioctl.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/v4l2-core/v4l2-ioctl.c	2019-10-29 09:26:24.013206799 +0100
@@ -1229,6 +1229,84 @@
 	case V4L2_SDR_FMT_CS8:		descr = "Complex S8"; break;
 	case V4L2_SDR_FMT_CS14LE:	descr = "Complex S14LE"; break;
 	case V4L2_SDR_FMT_RU12LE:	descr = "Real U12LE"; break;
+	case V4L2_PIX_FMT_NV12_UBWC:
+		descr = "NV12 UBWC"; break;
+	case V4L2_PIX_FMT_RGBA8888_UBWC:
+		descr = "RGBA8888 UBWC"; break;
+	case V4L2_PIX_FMT_SDE_ABGR_8888:
+					descr = "32-bit ABGR 8-8-8-8"; break;
+	case V4L2_PIX_FMT_SDE_RGBA_8888:
+					descr = "32-bit RGBA 8-8-8-8"; break;
+	case V4L2_PIX_FMT_SDE_RGBX_8888:
+					descr = "32-bit RGBX 8-8-8-8"; break;
+	case V4L2_PIX_FMT_SDE_XBGR_8888:
+					descr = "32-bit XBGR 8-8-8-8"; break;
+	case V4L2_PIX_FMT_SDE_RGBA_5551:
+					descr = "16-bit RGBA 5-5-5-1"; break;
+	case V4L2_PIX_FMT_SDE_ABGR_1555:
+					descr = "16-bit ABGR 1-5-5-5"; break;
+	case V4L2_PIX_FMT_SDE_BGRA_5551:
+					descr = "16-bit BGRA 5-5-5-1"; break;
+	case V4L2_PIX_FMT_SDE_BGRX_5551:
+					descr = "16-bit BGRX 5-5-5-1"; break;
+	case V4L2_PIX_FMT_SDE_RGBX_5551:
+					descr = "16-bit RGBX 5-5-5-1"; break;
+	case V4L2_PIX_FMT_SDE_XBGR_1555:
+					descr = "16-bit XBGR 1-5-5-5"; break;
+	case V4L2_PIX_FMT_SDE_RGBA_4444:
+					descr = "16-bit RGBA 4-4-4-4"; break;
+	case V4L2_PIX_FMT_SDE_BGRA_4444:
+					descr = "16-bit BGRA 4-4-4-4"; break;
+	case V4L2_PIX_FMT_SDE_ABGR_4444:
+					descr = "16-bit ABGR 4-4-4-4"; break;
+	case V4L2_PIX_FMT_SDE_RGBX_4444:
+					descr = "16-bit RGBX 4-4-4-4"; break;
+	case V4L2_PIX_FMT_SDE_BGRX_4444:
+					descr = "16-bit BGRX 4-4-4-4"; break;
+	case V4L2_PIX_FMT_SDE_XBGR_4444:
+					descr = "16-bit XBGR 4-4-4-4"; break;
+	case V4L2_PIX_FMT_SDE_BGR_565:
+					descr = "16-bit BGR 5-6-5"; break;
+	case V4L2_PIX_FMT_SDE_Y_CR_CB_GH2V2:
+					descr = "Planar YVU 4:2:0 A16"; break;
+	case V4L2_PIX_FMT_SDE_Y_CBCR_H1V2:
+					descr = "Y/CbCr 4:2:2"; break;
+	case V4L2_PIX_FMT_SDE_Y_CRCB_H1V2:
+					descr = "Y/CrCb 4:2:2"; break;
+	case V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_VENUS:
+					descr = "Y/CbCr 4:2:0 Venus"; break;
+	case V4L2_PIX_FMT_SDE_Y_CRCB_H2V2_VENUS:
+					descr = "Y/CrCb 4:2:0 Venus"; break;
+	case V4L2_PIX_FMT_SDE_RGBX_8888_UBWC:
+					descr = "RGBX 8:8:8:8 UBWC"; break;
+	case V4L2_PIX_FMT_SDE_RGB_565_UBWC:
+					descr = "RGB 5:6:5 UBWC"; break;
+	case V4L2_PIX_FMT_SDE_RGBA_1010102:
+					descr = "RGBA 10:10:10:2"; break;
+	case V4L2_PIX_FMT_SDE_RGBX_1010102:
+					descr = "RGBX 10:10:10:2"; break;
+	case V4L2_PIX_FMT_SDE_ARGB_2101010:
+					descr = "ARGB 2:10:10:10"; break;
+	case V4L2_PIX_FMT_SDE_XRGB_2101010:
+					descr = "XRGB 2:10:10:10"; break;
+	case V4L2_PIX_FMT_SDE_BGRA_1010102:
+					descr = "BGRA 10:10:10:2"; break;
+	case V4L2_PIX_FMT_SDE_BGRX_1010102:
+					descr = "BGRX 10:10:10:2"; break;
+	case V4L2_PIX_FMT_SDE_ABGR_2101010:
+					descr = "ABGR 2:10:10:10"; break;
+	case V4L2_PIX_FMT_SDE_XBGR_2101010:
+					descr = "XBGR 2:10:10:10"; break;
+	case V4L2_PIX_FMT_SDE_RGBA_1010102_UBWC:
+					descr = "RGBA 10:10:10:2 UBWC"; break;
+	case V4L2_PIX_FMT_SDE_RGBX_1010102_UBWC:
+					descr = "RGBX 10:10:10:2 UBWC"; break;
+	case V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_TP10:
+					descr = "Y/CbCr 4:2:0 TP10"; break;
+	case V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010:
+					descr = "Y/CbCr 4:2:0 P10"; break;
+	case V4L2_PIX_FMT_NV12_TP10_UBWC:
+					descr = "Y/CbCr 4:2:0 TP10 UBWC"; break;
 
 	default:
 		/* Compressed formats */
@@ -1268,6 +1346,16 @@
 		case V4L2_PIX_FMT_JPGL:		descr = "JPEG Lite"; break;
 		case V4L2_PIX_FMT_SE401:	descr = "GSPCA SE401"; break;
 		case V4L2_PIX_FMT_S5C_UYVY_JPG:	descr = "S5C73MX interleaved UYVY/JPEG"; break;
+		case V4L2_PIX_FMT_HEVC:
+			descr = "HEVC"; break;
+		case V4L2_PIX_FMT_HEVC_HYBRID:
+			descr = "HEVC Hybrid"; break;
+		case V4L2_PIX_FMT_VP9:
+			descr = "VP9"; break;
+		case V4L2_PIX_FMT_DIVX_311:
+			descr = "DIVX311"; break;
+		case V4L2_PIX_FMT_DIVX:
+			descr = "DIVX"; break;
 		default:
 			WARN(1, "Unknown pixelformat 0x%08x\n", fmt->pixelformat);
 			if (fmt->description[0])
diff -ruw linux-4.4.115/drivers/media/v4l2-core/videobuf2-v4l2.c linux-4.4.115-fbx/drivers/media/v4l2-core/videobuf2-v4l2.c
--- linux-4.4.115/drivers/media/v4l2-core/videobuf2-v4l2.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/v4l2-core/videobuf2-v4l2.c	2019-10-29 09:26:24.017206838 +0100
@@ -133,23 +133,6 @@
 	return 0;
 };
 
-static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
-{
-	static bool check_once;
-
-	if (check_once)
-		return;
-
-	check_once = true;
-	WARN_ON(1);
-
-	pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
-	if (vb->vb2_queue->allow_zero_bytesused)
-		pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
-	else
-		pr_warn("use the actual size instead.\n");
-}
-
 static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
 				    const char *opname)
 {
@@ -357,9 +340,6 @@
 				struct vb2_plane *pdst = &planes[plane];
 				struct v4l2_plane *psrc = &b->m.planes[plane];
 
-				if (psrc->bytesused == 0)
-					vb2_warn_zero_bytesused(vb);
-
 				if (vb->vb2_queue->allow_zero_bytesused)
 					pdst->bytesused = psrc->bytesused;
 				else
@@ -394,9 +374,6 @@
 		}
 
 		if (V4L2_TYPE_IS_OUTPUT(b->type)) {
-			if (b->bytesused == 0)
-				vb2_warn_zero_bytesused(vb);
-
 			if (vb->vb2_queue->allow_zero_bytesused)
 				planes[0].bytesused = b->bytesused;
 			else
diff -ruw linux-4.4.115/drivers/mfd/Kconfig linux-4.4.115-fbx/drivers/mfd/Kconfig
--- linux-4.4.115/drivers/mfd/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mfd/Kconfig	2019-01-22 16:16:24.663256911 +0100
@@ -708,7 +708,7 @@
 
 config MFD_SPMI_PMIC
 	tristate "Qualcomm SPMI PMICs"
-	depends on ARCH_QCOM || COMPILE_TEST
+	depends on ARCH_QCOM || COMPILE_TEST || ARCH_MSM
 	depends on OF
 	depends on SPMI
 	select REGMAP_SPMI
@@ -721,6 +721,20 @@
 	  Say M here if you want to include support for the SPMI PMIC
 	  series as a module.  The module will be called "qcom-spmi-pmic".
 
+config MFD_I2C_PMIC
+	tristate "QTI I2C PMICs"
+	depends on OF
+	depends on I2C
+	select IRQ_DOMAIN
+	select REGMAP_I2C
+	help
+	  This enables support for controlling Qualcomm Technologies, Inc.
+	  PMICs over I2C. The driver controls interrupts, and provides register
+	  access for all of the device's peripherals.
+
+	  Say M here if you want to include support for the I2C PMIC series as
+	  a module. The module will be called "qcom-i2c-pmic".
+
 config MFD_RDC321X
 	tristate "RDC R-321x southbridge"
 	select MFD_CORE
@@ -1325,6 +1339,11 @@
 	  additional drivers must be enabled in order to use the
 	  functionality of the device.
 
+config MFD_FBX7HD_TOP_PSOC
+	tristate "Freebox fbx7hd top psoc"
+	depends on SPI_MASTER
+	depends on OF
+
 config MFD_TMIO
 	bool
 	default n
@@ -1493,6 +1512,77 @@
 	  in various ST Microelectronics and ST-Ericsson embedded
 	  Nomadik series.
 
+config MSM_CDC_PINCTRL
+	tristate "MSM Codec Pinctrl"
+	select MFD_CORE
+	help
+	  Enables msm codec pinctrl driver. The pinctrl driver
+	  provides support for handling WCD and WSA MSM gpios. This
+	  pinctrl driver will handle WCD and WSA gpios pinctrl states.
+	  This driver acts as interface between codec and pinctrl
+	  framework.
+
+config MSM_CDC_SUPPLY
+	tristate "MSM Codec Power Supply"
+	help
+	  Enables msm codec power supply driver. The power supply
+	  driver provides API support for handling WCD and WSA codec
+	  power supply enable or disable. This driver acts as interface
+	  between codec and regulator framework.
+
+config WCD9XXX_CODEC_UTIL
+	tristate "WCD9XXX Codec Utils"
+	select MFD_CORE
+	help
+	  WCD9XXX Util driver provides APIs for WCD drivers to reset,
+	  suspend/resume, regmap bus callback functions and read/write
+	  functions. This driver also hides the underlying bus related
+	  functionalities.
+
+config WCD9330_CODEC
+	tristate "WCD9330 Codec"
+	select SLIMBUS
+	select MFD_CORE
+	select WCD9XXX_CODEC_UTIL
+	select MSM_CDC_SUPPLY
+	help
+	  Enables the WCD9xxx codec core driver. The core driver provides
+	  read/write capability to registers which are part of the
+	  WCD9330 core and gives the ability to use the WCD9330 codec.
+	  The WCD9330 codec support either I2C/I2S or Slimbus for
+	  control and data exchnage with master processor.
+
+config WCD9335_CODEC
+	tristate "WCD9335 Codec"
+	select SLIMBUS
+	select SOUNDWIRE_WCD_CTRL
+	select MFD_CORE
+	select WCD9XXX_CODEC_UTIL
+	select MSM_CDC_SUPPLY
+	select MSM_CDC_PINCTRL
+	help
+	  Enables the WCD9xxx codec core driver. The core driver provides
+	  read/write capability to registers which are part of the
+	  WCD9335 core and gives the ability to use the WCD9335 codec.
+	  The WCD9335 codec support either I2C/I2S or Slimbus for
+	  control and data exchnage with master processor.
+
+config WCD934X_CODEC
+	tristate "WCD934X Codec"
+	depends on SLIMBUS
+	select SOUNDWIRE_WCD_CTRL
+	select MFD_CORE
+	select WCD9XXX_CODEC_UTIL
+	select MSM_CDC_SUPPLY
+	select MSM_CDC_PINCTRL
+	select PINCTRL_WCD
+	help
+	  Enables the WCD9xxx codec core driver. The core driver provides
+	  read/write capability to registers which are part of the
+	  WCD934X core and gives the ability to use the WCD934X codec.
+	  The WCD934X codec supports either I2C/I2S or Slimbus for
+	  control and data exchange with master processor.
+
 menu "Multimedia Capabilities Port drivers"
 	depends on ARCH_SA1100
 
diff -ruw linux-4.4.115/drivers/mfd/Makefile linux-4.4.115-fbx/drivers/mfd/Makefile
--- linux-4.4.115/drivers/mfd/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mfd/Makefile	2019-01-22 16:16:24.663256911 +0100
@@ -63,6 +63,8 @@
 obj-$(CONFIG_MFD_WM8350_I2C)	+= wm8350-i2c.o
 obj-$(CONFIG_MFD_WM8994)	+= wm8994-core.o wm8994-irq.o wm8994-regmap.o
 
+obj-$(CONFIG_MFD_FBX7HD_TOP_PSOC)	+= fbx7hd-top-psoc.o
+
 obj-$(CONFIG_TPS6105X)		+= tps6105x.o
 obj-$(CONFIG_TPS65010)		+= tps65010.o
 obj-$(CONFIG_TPS6507X)		+= tps6507x.o
@@ -161,6 +163,7 @@
 obj-$(CONFIG_MFD_PM8921_CORE) 	+= pm8921-core.o ssbi.o
 obj-$(CONFIG_MFD_QCOM_RPM)	+= qcom_rpm.o
 obj-$(CONFIG_MFD_SPMI_PMIC)	+= qcom-spmi-pmic.o
+obj-$(CONFIG_MFD_I2C_PMIC)	+= qcom-i2c-pmic.o
 obj-$(CONFIG_TPS65911_COMPARATOR)	+= tps65911-comparator.o
 obj-$(CONFIG_MFD_TPS65090)	+= tps65090.o
 obj-$(CONFIG_MFD_AAT2870_CORE)	+= aat2870-core.o
@@ -189,7 +192,15 @@
 obj-$(CONFIG_MFD_DLN2)		+= dln2.o
 obj-$(CONFIG_MFD_RT5033)	+= rt5033.o
 obj-$(CONFIG_MFD_SKY81452)	+= sky81452.o
-
+obj-$(CONFIG_MSM_CDC_PINCTRL)	+= msm-cdc-pinctrl.o
+obj-$(CONFIG_MSM_CDC_SUPPLY) += msm-cdc-supply.o
+obj-$(CONFIG_WCD9XXX_CODEC_UTIL) += wcd9xxx-utils.o
+obj-$(CONFIG_WCD9330_CODEC)	+= wcd9xxx-core.o wcd9xxx-irq.o wcd9xxx-slimslave.o\
+						wcd9330-regmap.o
+obj-$(CONFIG_WCD9335_CODEC)	+= wcd9xxx-core.o wcd9xxx-irq.o wcd9xxx-slimslave.o\
+					wcd9335-regmap.o wcd9335-tables.o
+obj-$(CONFIG_WCD934X_CODEC)	+= wcd9xxx-core.o wcd9xxx-irq.o wcd9xxx-slimslave.o\
+					wcd934x-regmap.o wcd934x-tables.o
 intel-soc-pmic-objs		:= intel_soc_pmic_core.o intel_soc_pmic_crc.o
 intel-soc-pmic-$(CONFIG_INTEL_PMC_IPC)	+= intel_soc_pmic_bxtwc.o
 obj-$(CONFIG_INTEL_SOC_PMIC)	+= intel-soc-pmic.o
diff -ruw linux-4.4.115/drivers/mfd/qcom-spmi-pmic.c linux-4.4.115-fbx/drivers/mfd/qcom-spmi-pmic.c
--- linux-4.4.115/drivers/mfd/qcom-spmi-pmic.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mfd/qcom-spmi-pmic.c	2019-01-22 16:16:24.679257056 +0100
@@ -147,7 +147,12 @@
 		.of_match_table = pmic_spmi_id_table,
 	},
 };
-module_spmi_driver(pmic_spmi_driver);
+
+int __init pmic_spmi_init(void)
+{
+	return spmi_driver_register(&pmic_spmi_driver);
+}
+arch_initcall(pmic_spmi_init);
 
 MODULE_DESCRIPTION("Qualcomm SPMI PMIC driver");
 MODULE_ALIAS("spmi:spmi-pmic");
diff -ruw linux-4.4.115/drivers/misc/Kconfig linux-4.4.115-fbx/drivers/misc/Kconfig
--- linux-4.4.115/drivers/misc/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/Kconfig	2019-05-31 20:58:54.922934968 +0200
@@ -487,6 +487,11 @@
 	  To compile this driver as a module, choose M here: the module will
 	  be called pch_phub.
 
+config FBXSERIAL_OF
+	bool "read fbxserial through DT chosen node"
+	depends on OF
+	select ARCH_HAS_FBXSERIAL
+
 config USB_SWITCH_FSA9480
 	tristate "FSA9480 USB Switch"
 	depends on I2C
@@ -516,6 +521,30 @@
 	  the genalloc API. It is supposed to be used for small on-chip SRAM
 	  areas found on many SoCs.
 
+config QSEECOM
+        tristate "QTI Secure Execution Communicator driver"
+        help
+          Provides a communication interface between userspace and
+          QTI Secure Execution Environment (QSEE) using Secure Channel
+          Manager (SCM) interface. It exposes APIs for both userspace and
+          kernel clients.
+
+config HDCP_QSEECOM
+	tristate "QTI High-Bandwidth Digital Content Protection Module"
+	help
+	  This module implements HDCP 2.2 features over HDMI. It exposes APIs
+	  for HDMI driver to communicate with QTI Secure Execution
+	  Environment (QSEE) via the QSEECOM Driver and also calls the APIs
+	  exposed by the HDMI driver to communicate with the Receiver.
+
+config PROFILER
+	tristate "Qualcomm Technologies, Inc. trustzone Communicator driver"
+	help
+	  Provides a communication interface between userspace and
+	  trustzone using Secure Channel Manager (SCM) interface.
+	  It exposes APIs for userspace to get system profiling
+	  information.
+
 config VEXPRESS_SYSCFG
 	bool "Versatile Express System Configuration driver"
 	depends on VEXPRESS_CONFIG
@@ -525,6 +554,36 @@
 	  bus. System Configuration interface is one of the possible means
 	  of generating transactions on this bus.
 
+config UID_SYS_STATS
+	bool "Per-UID statistics"
+	depends on PROFILING && TASK_XACCT && TASK_IO_ACCOUNTING
+	help
+	  Per UID based cpu time statistics exported to /proc/uid_cputime
+	  Per UID based io statistics exported to /proc/uid_io
+	  Per UID based procstat control in /proc/uid_procstat
+
+config QPNP_MISC
+	tristate "QPNP Misc Peripheral"
+	depends on SPMI || MSM_SPMI
+	help
+	  Say 'y' here to include support for the QTI QPNP MISC
+	  peripheral. The MISC peripheral holds the USB ID interrupt
+	  and the driver provides an API to check if this interrupt
+	  is available on the current PMIC chip.
+
+config UID_SYS_STATS_DEBUG
+	bool "Per-TASK statistics"
+	depends on UID_SYS_STATS
+	default n
+	help
+	  Per TASK based io statistics exported to /proc/uid_io
+
+config MEMORY_STATE_TIME
+	tristate "Memory freq/bandwidth time statistics"
+	depends on PROFILING
+	help
+	  Memory time statistics exported to /sys/kernel/memory_state_time
+
 source "drivers/misc/c2port/Kconfig"
 source "drivers/misc/eeprom/Kconfig"
 source "drivers/misc/cb710/Kconfig"
@@ -533,8 +592,11 @@
 source "drivers/misc/altera-stapl/Kconfig"
 source "drivers/misc/mei/Kconfig"
 source "drivers/misc/vmw_vmci/Kconfig"
+source "drivers/misc/qcom/Kconfig"
 source "drivers/misc/mic/Kconfig"
 source "drivers/misc/genwqe/Kconfig"
 source "drivers/misc/echo/Kconfig"
 source "drivers/misc/cxl/Kconfig"
+source "drivers/misc/freebox/Kconfig"
+source "drivers/misc/hdmi-cec/Kconfig"
 endmenu
diff -ruw linux-4.4.115/drivers/misc/Makefile linux-4.4.115-fbx/drivers/misc/Makefile
--- linux-4.4.115/drivers/misc/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/Makefile	2019-05-31 20:58:54.922934968 +0200
@@ -25,6 +25,7 @@
 obj-$(CONFIG_SGI_IOC4)		+= ioc4.o
 obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
 obj-$(CONFIG_KGDB_TESTS)	+= kgdbts.o
+obj-$(CONFIG_FBXSERIAL_OF)	+= fbxserial_of.o
 obj-$(CONFIG_SGI_XP)		+= sgi-xp/
 obj-$(CONFIG_SGI_GRU)		+= sgi-gru/
 obj-$(CONFIG_CS5535_MFGPT)	+= cs5535-mfgpt.o
@@ -53,6 +54,18 @@
 obj-$(CONFIG_SRAM)		+= sram.o
 obj-y				+= mic/
 obj-$(CONFIG_GENWQE)		+= genwqe/
+obj-$(CONFIG_QSEECOM) += qseecom.o
+obj-$(CONFIG_PROFILER) += profiler.o
+obj-$(CONFIG_HDCP_QSEECOM) += hdcp.o
+ifdef CONFIG_COMPAT
+obj-$(CONFIG_QSEECOM) += compat_qseecom.o
+endif
 obj-$(CONFIG_ECHO)		+= echo/
 obj-$(CONFIG_VEXPRESS_SYSCFG)	+= vexpress-syscfg.o
 obj-$(CONFIG_CXL_BASE)		+= cxl/
+obj-$(CONFIG_UID_SYS_STATS) += uid_sys_stats.o
+obj-y				+= qcom/
+obj-$(CONFIG_QPNP_MISC) 	+= qpnp-misc.o
+obj-$(CONFIG_MEMORY_STATE_TIME) += memory_state_time.o
+obj-y				+= freebox/
+obj-y				+= hdmi-cec/
diff -ruw linux-4.4.115/drivers/mmc/card/block.c linux-4.4.115-fbx/drivers/mmc/card/block.c
--- linux-4.4.115/drivers/mmc/card/block.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mmc/card/block.c	2019-10-29 09:26:24.053207190 +0100
@@ -30,14 +30,19 @@
 #include <linux/blkdev.h>
 #include <linux/mutex.h>
 #include <linux/scatterlist.h>
+#include <linux/bitops.h>
 #include <linux/string_helpers.h>
 #include <linux/delay.h>
 #include <linux/capability.h>
 #include <linux/compat.h>
 #include <linux/pm_runtime.h>
+#include <linux/ioprio.h>
+
+#include <trace/events/mmc.h>
 
 #include <linux/mmc/ioctl.h>
 #include <linux/mmc/card.h>
+#include <linux/mmc/core.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/mmc.h>
 #include <linux/mmc/sd.h>
@@ -47,13 +52,10 @@
 #include "queue.h"
 
 MODULE_ALIAS("mmc:block");
-
-#ifdef KERNEL
 #ifdef MODULE_PARAM_PREFIX
 #undef MODULE_PARAM_PREFIX
 #endif
 #define MODULE_PARAM_PREFIX "mmcblk."
-#endif
 
 #define INAND_CMD38_ARG_EXT_CSD  113
 #define INAND_CMD38_ARG_ERASE    0x00
@@ -61,15 +63,33 @@
 #define INAND_CMD38_ARG_SECERASE 0x80
 #define INAND_CMD38_ARG_SECTRIM1 0x81
 #define INAND_CMD38_ARG_SECTRIM2 0x88
-#define MMC_BLK_TIMEOUT_MS  (10 * 60 * 1000)        /* 10 minute timeout */
+#define MMC_BLK_TIMEOUT_MS  (30 * 1000)        /* 30 sec timeout */
 #define MMC_SANITIZE_REQ_TIMEOUT 240000
 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
+#define MMC_CMDQ_STOP_TIMEOUT_MS 100
 
 #define mmc_req_rel_wr(req)	((req->cmd_flags & REQ_FUA) && \
 				  (rq_data_dir(req) == WRITE))
 #define PACKED_CMD_VER	0x01
 #define PACKED_CMD_WR	0x02
+#define PACKED_TRIGGER_MAX_ELEMENTS	5000
+
+#define MMC_BLK_MAX_RETRIES 5 /* max # of retries before aborting a command */
+#define MMC_BLK_UPDATE_STOP_REASON(stats, reason)			\
+	do {								\
+		if (stats->enabled)					\
+			stats->pack_stop_reason[reason]++;		\
+	} while (0)
+
+#define MAX_RETRIES 5
+#define PCKD_TRGR_INIT_MEAN_POTEN	17
+#define PCKD_TRGR_POTEN_LOWER_BOUND	5
+#define PCKD_TRGR_URGENT_PENALTY	2
+#define PCKD_TRGR_LOWER_BOUND		5
+#define PCKD_TRGR_PRECISION_MULTIPLIER	100
 
+static struct mmc_cmdq_req *mmc_cmdq_prep_dcmd(
+		struct mmc_queue_req *mqrq, struct mmc_queue *mq);
 static DEFINE_MUTEX(block_mutex);
 
 /*
@@ -104,6 +124,7 @@
 #define MMC_BLK_CMD23	(1 << 0)	/* Can do SET_BLOCK_COUNT for multiblock */
 #define MMC_BLK_REL_WR	(1 << 1)	/* MMC Reliable write support */
 #define MMC_BLK_PACKED_CMD	(1 << 2)	/* MMC packed command support */
+#define MMC_BLK_CMD_QUEUE	(1 << 3) /* MMC command queue support */
 
 	unsigned int	usage;
 	unsigned int	read_only;
@@ -114,6 +135,8 @@
 #define MMC_BLK_WRITE		BIT(1)
 #define MMC_BLK_DISCARD		BIT(2)
 #define MMC_BLK_SECDISCARD	BIT(3)
+#define MMC_BLK_FLUSH		BIT(4)
+#define MMC_BLK_PARTSWITCH	BIT(5)
 
 	/*
 	 * Only set in main mmc_blk_data associated
@@ -123,6 +146,8 @@
 	unsigned int	part_curr;
 	struct device_attribute force_ro;
 	struct device_attribute power_ro_lock;
+	struct device_attribute num_wr_reqs_to_start_packing;
+	struct device_attribute no_pack_for_random;
 	int	area_type;
 };
 
@@ -140,6 +165,8 @@
 static inline int mmc_blk_part_switch(struct mmc_card *card,
 				      struct mmc_blk_data *md);
 static int get_card_status(struct mmc_card *card, u32 *status, int retries);
+static int mmc_blk_cmdq_switch(struct mmc_card *card,
+			       struct mmc_blk_data *md, bool enable);
 
 static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
 {
@@ -171,11 +198,7 @@
 
 static inline int mmc_get_devidx(struct gendisk *disk)
 {
-	int devmaj = MAJOR(disk_devt(disk));
-	int devidx = MINOR(disk_devt(disk)) / perdev_minors;
-
-	if (!devmaj)
-		devidx = disk->first_minor / perdev_minors;
+	int devidx = disk->first_minor / perdev_minors;
 	return devidx;
 }
 
@@ -200,9 +223,13 @@
 {
 	int ret;
 	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
-	struct mmc_card *card = md->queue.card;
+	struct mmc_card *card;
 	int locked = 0;
 
+	if (!md)
+		return -EINVAL;
+
+	card = md->queue.card;
 	if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
 		locked = 2;
 	else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
@@ -230,6 +257,8 @@
 		return count;
 
 	md = mmc_blk_get(dev_to_disk(dev));
+	if (!md)
+		return -EINVAL;
 	card = md->queue.card;
 
 	mmc_get_card(card);
@@ -267,6 +296,9 @@
 	int ret;
 	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
 
+	if (!md)
+		return -EINVAL;
+
 	ret = snprintf(buf, PAGE_SIZE, "%d\n",
 		       get_disk_ro(dev_to_disk(dev)) ^
 		       md->read_only);
@@ -281,6 +313,10 @@
 	char *end;
 	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
 	unsigned long set = simple_strtoul(buf, &end, 0);
+
+	if (!md)
+		return -EINVAL;
+
 	if (end == buf) {
 		ret = -EINVAL;
 		goto out;
@@ -293,6 +329,362 @@
 	return ret;
 }
 
+#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
+
+static int max_read_speed, max_write_speed, cache_size = 4;
+
+module_param(max_read_speed, int, S_IRUSR | S_IRGRP);
+MODULE_PARM_DESC(max_read_speed, "maximum KB/s read speed 0=off");
+module_param(max_write_speed, int, S_IRUSR | S_IRGRP);
+MODULE_PARM_DESC(max_write_speed, "maximum KB/s write speed 0=off");
+module_param(cache_size, int, S_IRUSR | S_IRGRP);
+MODULE_PARM_DESC(cache_size, "MB high speed memory or SLC cache");
+
+/*
+ * helper macros and expectations:
+ *  size    - unsigned long number of bytes
+ *  jiffies - unsigned long HZ timestamp difference
+ *  speed   - unsigned KB/s transfer rate
+ */
+#define size_and_speed_to_jiffies(size, speed) \
+		((size) * HZ / (speed) / 1024UL)
+#define jiffies_and_speed_to_size(jiffies, speed) \
+		(((speed) * (jiffies) * 1024UL) / HZ)
+#define jiffies_and_size_to_speed(jiffies, size) \
+		((size) * HZ / (jiffies) / 1024UL)
+
+/* Limits to report warning */
+/* jiffies_and_size_to_speed(10*HZ, queue_max_hw_sectors(q) * 512UL) ~ 25 */
+#define MIN_SPEED(q) 250 /* 10 times faster than a floppy disk */
+#define MAX_SPEED(q) jiffies_and_size_to_speed(1, queue_max_sectors(q) * 512UL)
+
+#define speed_valid(speed) ((speed) > 0)
+
+static const char off[] = "off\n";
+
+static int max_speed_show(int speed, char *buf)
+{
+	if (speed)
+		return scnprintf(buf, PAGE_SIZE, "%uKB/s\n", speed);
+	else
+		return scnprintf(buf, PAGE_SIZE, off);
+}
+
+static int max_speed_store(const char *buf, struct request_queue *q)
+{
+	unsigned int limit, set = 0;
+
+	if (!strncasecmp(off, buf, sizeof(off) - 2))
+		return set;
+	if (kstrtouint(buf, 0, &set) || (set > INT_MAX))
+		return -EINVAL;
+	if (set == 0)
+		return set;
+	limit = MAX_SPEED(q);
+	if (set > limit)
+		pr_warn("max speed %u ineffective above %u\n", set, limit);
+	limit = MIN_SPEED(q);
+	if (set < limit)
+		pr_warn("max speed %u painful below %u\n", set, limit);
+	return set;
+}
+
+static ssize_t max_write_speed_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+	int ret = max_speed_show(atomic_read(&md->queue.max_write_speed), buf);
+
+	mmc_blk_put(md);
+	return ret;
+}
+
+static ssize_t max_write_speed_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+	int set = max_speed_store(buf, md->queue.queue);
+
+	if (set < 0) {
+		mmc_blk_put(md);
+		return set;
+	}
+
+	atomic_set(&md->queue.max_write_speed, set);
+	mmc_blk_put(md);
+	return count;
+}
+
+static const DEVICE_ATTR(max_write_speed, S_IRUGO | S_IWUSR,
+	max_write_speed_show, max_write_speed_store);
+
+static ssize_t max_read_speed_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+	int ret = max_speed_show(atomic_read(&md->queue.max_read_speed), buf);
+
+	mmc_blk_put(md);
+	return ret;
+}
+
+static ssize_t max_read_speed_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+	int set = max_speed_store(buf, md->queue.queue);
+
+	if (set < 0) {
+		mmc_blk_put(md);
+		return set;
+	}
+
+	atomic_set(&md->queue.max_read_speed, set);
+	mmc_blk_put(md);
+	return count;
+}
+
+static const DEVICE_ATTR(max_read_speed, S_IRUGO | S_IWUSR,
+	max_read_speed_show, max_read_speed_store);
+
+static ssize_t cache_size_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+	struct mmc_queue *mq = &md->queue;
+	int cache_size = atomic_read(&mq->cache_size);
+	int ret;
+
+	if (!cache_size)
+		ret = scnprintf(buf, PAGE_SIZE, off);
+	else {
+		int speed = atomic_read(&mq->max_write_speed);
+
+		if (!speed_valid(speed))
+			ret = scnprintf(buf, PAGE_SIZE, "%uMB\n", cache_size);
+		else { /* We accept race between cache_jiffies and cache_used */
+			unsigned long size = jiffies_and_speed_to_size(
+				jiffies - mq->cache_jiffies, speed);
+			long used = atomic_long_read(&mq->cache_used);
+
+			if (size >= used)
+				size = 0;
+			else
+				size = (used - size) * 100 / cache_size
+					/ 1024UL / 1024UL;
+
+			ret = scnprintf(buf, PAGE_SIZE, "%uMB %lu%% used\n",
+				cache_size, size);
+		}
+	}
+
+	mmc_blk_put(md);
+	return ret;
+}
+
+static ssize_t cache_size_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	struct mmc_blk_data *md;
+	unsigned int set = 0;
+
+	if (strncasecmp(off, buf, sizeof(off) - 2)
+	 && (kstrtouint(buf, 0, &set) || (set > INT_MAX)))
+		return -EINVAL;
+
+	md = mmc_blk_get(dev_to_disk(dev));
+	atomic_set(&md->queue.cache_size, set);
+	mmc_blk_put(md);
+	return count;
+}
+
+static const DEVICE_ATTR(cache_size, S_IRUGO | S_IWUSR,
+	cache_size_show, cache_size_store);
+
+/* correct for write-back */
+static long mmc_blk_cache_used(struct mmc_queue *mq, unsigned long waitfor)
+{
+	long used = 0;
+	int speed = atomic_read(&mq->max_write_speed);
+
+	if (speed_valid(speed)) {
+		unsigned long size = jiffies_and_speed_to_size(
+					waitfor - mq->cache_jiffies, speed);
+		used = atomic_long_read(&mq->cache_used);
+
+		if (size >= used)
+			used = 0;
+		else
+			used -= size;
+	}
+
+	atomic_long_set(&mq->cache_used, used);
+	mq->cache_jiffies = waitfor;
+
+	return used;
+}
+
+static void mmc_blk_simulate_delay(
+	struct mmc_queue *mq,
+	struct request *req,
+	unsigned long waitfor)
+{
+	int max_speed;
+
+	if (!req)
+		return;
+
+	max_speed = (rq_data_dir(req) == READ)
+		? atomic_read(&mq->max_read_speed)
+		: atomic_read(&mq->max_write_speed);
+	if (speed_valid(max_speed)) {
+		unsigned long bytes = blk_rq_bytes(req);
+
+		if (rq_data_dir(req) != READ) {
+			int cache_size = atomic_read(&mq->cache_size);
+
+			if (cache_size) {
+				unsigned long size = cache_size * 1024L * 1024L;
+				long used = mmc_blk_cache_used(mq, waitfor);
+
+				used += bytes;
+				atomic_long_set(&mq->cache_used, used);
+				bytes = 0;
+				if (used > size)
+					bytes = used - size;
+			}
+		}
+		waitfor += size_and_speed_to_jiffies(bytes, max_speed);
+		if (time_is_after_jiffies(waitfor)) {
+			long msecs = jiffies_to_msecs(waitfor - jiffies);
+
+			if (likely(msecs > 0))
+				msleep(msecs);
+		}
+	}
+}
+
+#else
+
+#define mmc_blk_simulate_delay(mq, req, waitfor)
+
+#endif
+static ssize_t
+num_wr_reqs_to_start_packing_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+	int num_wr_reqs_to_start_packing;
+	int ret;
+
+	if (!md)
+		return -EINVAL;
+	num_wr_reqs_to_start_packing = md->queue.num_wr_reqs_to_start_packing;
+
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", num_wr_reqs_to_start_packing);
+
+	mmc_blk_put(md);
+	return ret;
+}
+
+static ssize_t
+num_wr_reqs_to_start_packing_store(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t count)
+{
+	int value;
+	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+	struct mmc_card *card;
+	int ret = count;
+
+	if (!md)
+		return -EINVAL;
+
+	card = md->queue.card;
+	if (!card) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	sscanf(buf, "%d", &value);
+
+	if (value >= 0) {
+		md->queue.num_wr_reqs_to_start_packing =
+		    min_t(int, value, (int)card->ext_csd.max_packed_writes);
+
+		pr_debug("%s: trigger to pack: new value = %d",
+			mmc_hostname(card->host),
+			md->queue.num_wr_reqs_to_start_packing);
+	} else {
+		pr_err("%s: value %d is not valid. old value remains = %d",
+			mmc_hostname(card->host), value,
+			md->queue.num_wr_reqs_to_start_packing);
+		ret = -EINVAL;
+	}
+
+exit:
+	mmc_blk_put(md);
+	return ret;
+}
+
+static ssize_t
+no_pack_for_random_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+	int ret;
+
+	if (!md)
+		return -EINVAL;
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", md->queue.no_pack_for_random);
+
+	mmc_blk_put(md);
+	return ret;
+}
+
+static ssize_t
+no_pack_for_random_store(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t count)
+{
+	int value;
+	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+	struct mmc_card *card;
+	int ret = count;
+
+	if (!md)
+		return -EINVAL;
+
+	card = md->queue.card;
+	if (!card) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	sscanf(buf, "%d", &value);
+
+	if (value < 0) {
+		pr_err("%s: value %d is not valid. old value remains = %d",
+			mmc_hostname(card->host), value,
+			md->queue.no_pack_for_random);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	md->queue.no_pack_for_random = (value > 0) ?  true : false;
+
+	pr_debug("%s: no_pack_for_random: new value = %d",
+		mmc_hostname(card->host),
+		md->queue.no_pack_for_random);
+
+exit:
+	mmc_blk_put(md);
+	return ret;
+}
+
 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
 {
 	struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
@@ -440,7 +832,8 @@
 {
 	int err;
 
-	if (!mmc_can_sanitize(card)) {
+	if (!mmc_can_sanitize(card) &&
+			(card->host->caps2 & MMC_CAP2_SANITIZE)) {
 			pr_warn("%s: %s - SANITIZE is not supported\n",
 				mmc_hostname(card->host), __func__);
 			err = -EOPNOTSUPP;
@@ -450,9 +843,11 @@
 	pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
 		mmc_hostname(card->host), __func__);
 
+	trace_mmc_blk_erase_start(EXT_CSD_SANITIZE_START, 0, 0);
 	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 					EXT_CSD_SANITIZE_START, 1,
 					MMC_SANITIZE_REQ_TIMEOUT);
+	trace_mmc_blk_erase_end(EXT_CSD_SANITIZE_START, 0, 0);
 
 	if (err)
 		pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
@@ -472,19 +867,22 @@
 	struct mmc_request mrq = {NULL};
 	struct scatterlist sg;
 	int err;
-	int is_rpmb = false;
-	u32 status = 0;
 
 	if (!card || !md || !idata)
 		return -EINVAL;
 
-	if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
-		is_rpmb = true;
-
 	cmd.opcode = idata->ic.opcode;
 	cmd.arg = idata->ic.arg;
 	cmd.flags = idata->ic.flags;
 
+	if (idata->ic.postsleep_max_us < idata->ic.postsleep_min_us) {
+		pr_err("%s: min value: %u must not be greater than max value: %u\n",
+			__func__, idata->ic.postsleep_min_us,
+			idata->ic.postsleep_max_us);
+		WARN_ON(1);
+		return -EPERM;
+	}
+
 	if (idata->buf_bytes) {
 		data.sg = &sg;
 		data.sg_len = 1;
@@ -523,6 +921,15 @@
 
 	mrq.cmd = &cmd;
 
+	if (mmc_card_doing_bkops(card)) {
+		err = mmc_stop_bkops(card);
+		if (err) {
+			dev_err(mmc_dev(card->host),
+				"%s: stop_bkops failed %d\n", __func__, err);
+			return err;
+		}
+	}
+
 	err = mmc_blk_part_switch(card, md);
 	if (err)
 		return err;
@@ -533,13 +940,6 @@
 			return err;
 	}
 
-	if (is_rpmb) {
-		err = mmc_set_blockcount(card, data.blocks,
-			idata->ic.write_flag & (1 << 31));
-		if (err)
-			return err;
-	}
-
 	if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
 	    (cmd.opcode == MMC_SWITCH)) {
 		err = ioctl_do_sanitize(card);
@@ -573,7 +973,183 @@
 
 	memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
 
-	if (is_rpmb) {
+	return err;
+}
+
+struct mmc_blk_ioc_rpmb_data {
+	struct mmc_blk_ioc_data *data[MMC_IOC_MAX_RPMB_CMD];
+};
+
+static struct mmc_blk_ioc_rpmb_data *mmc_blk_ioctl_rpmb_copy_from_user(
+	struct mmc_ioc_rpmb __user *user)
+{
+	struct mmc_blk_ioc_rpmb_data *idata;
+	int err, i;
+
+	idata = kzalloc(sizeof(*idata), GFP_KERNEL);
+	if (!idata) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) {
+		idata->data[i] = mmc_blk_ioctl_copy_from_user(&(user->cmds[i]));
+		if (IS_ERR(idata->data[i])) {
+			err = PTR_ERR(idata->data[i]);
+			goto copy_err;
+		}
+	}
+
+	return idata;
+
+copy_err:
+	while (--i >= 0) {
+		kfree(idata->data[i]->buf);
+		kfree(idata->data[i]);
+	}
+	kfree(idata);
+out:
+	return ERR_PTR(err);
+}
+
+static int mmc_blk_ioctl_rpmb_cmd(struct block_device *bdev,
+	struct mmc_ioc_rpmb __user *ic_ptr)
+{
+	struct mmc_blk_ioc_rpmb_data *idata;
+	struct mmc_blk_data *md;
+	struct mmc_card *card = NULL;
+	struct mmc_command cmd = {0};
+	struct mmc_data data = {0};
+	struct mmc_request mrq = {NULL};
+	struct scatterlist sg;
+	int err = 0, i = 0;
+	u32 status = 0;
+
+	/* The caller must have CAP_SYS_RAWIO */
+	if (!capable(CAP_SYS_RAWIO))
+		return -EPERM;
+
+	md = mmc_blk_get(bdev->bd_disk);
+	/* make sure this is a rpmb partition */
+	if ((!md) || (!(md->area_type & MMC_BLK_DATA_AREA_RPMB))) {
+		err = -EINVAL;
+		return err;
+	}
+
+	idata = mmc_blk_ioctl_rpmb_copy_from_user(ic_ptr);
+	if (IS_ERR(idata)) {
+		err = PTR_ERR(idata);
+		goto cmd_done;
+	}
+
+	card = md->queue.card;
+	if (IS_ERR(card)) {
+		err = PTR_ERR(card);
+		goto idata_free;
+	}
+
+	mmc_get_card(card);
+
+	if (mmc_card_doing_bkops(card)) {
+		if (mmc_card_cmdq(card)) {
+			err = mmc_cmdq_halt(card->host, true);
+			if (err)
+				goto cmd_rel_host;
+		}
+		err = mmc_stop_bkops(card);
+		if (err) {
+			dev_err(mmc_dev(card->host),
+				"%s: stop_bkops failed %d\n", __func__, err);
+			goto cmd_rel_host;
+		}
+		if (mmc_card_cmdq(card)) {
+			err = mmc_cmdq_halt(card->host, false);
+			if (err)
+				goto cmd_rel_host;
+		}
+	}
+
+	err = mmc_blk_part_switch(card, md);
+	if (err)
+		goto cmd_rel_host;
+
+	for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) {
+		struct mmc_blk_ioc_data *curr_data;
+		struct mmc_ioc_cmd *curr_cmd;
+
+		curr_data = idata->data[i];
+		curr_cmd = &curr_data->ic;
+		if (!curr_cmd->opcode)
+			break;
+
+		cmd.opcode = curr_cmd->opcode;
+		cmd.arg = curr_cmd->arg;
+		cmd.flags = curr_cmd->flags;
+
+		if (curr_data->buf_bytes) {
+			data.sg = &sg;
+			data.sg_len = 1;
+			data.blksz = curr_cmd->blksz;
+			data.blocks = curr_cmd->blocks;
+
+			sg_init_one(data.sg, curr_data->buf,
+					curr_data->buf_bytes);
+
+			if (curr_cmd->write_flag)
+				data.flags = MMC_DATA_WRITE;
+			else
+				data.flags = MMC_DATA_READ;
+
+			/* data.flags must already be set before doing this. */
+			mmc_set_data_timeout(&data, card);
+
+			/*
+			 * Allow overriding the timeout_ns for empirical tuning.
+			 */
+			if (curr_cmd->data_timeout_ns)
+				data.timeout_ns = curr_cmd->data_timeout_ns;
+
+			mrq.data = &data;
+		}
+
+		mrq.cmd = &cmd;
+
+		err = mmc_set_blockcount(card, data.blocks,
+				curr_cmd->write_flag & (1 << 31));
+		if (err)
+			goto cmd_rel_host;
+
+		mmc_wait_for_req(card->host, &mrq);
+
+		if (cmd.error) {
+			dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
+					__func__, cmd.error);
+			err = cmd.error;
+			goto cmd_rel_host;
+		}
+		if (data.error) {
+			dev_err(mmc_dev(card->host), "%s: data error %d\n",
+					__func__, data.error);
+			err = data.error;
+			goto cmd_rel_host;
+		}
+
+		if (copy_to_user(&(ic_ptr->cmds[i].response), cmd.resp,
+					sizeof(cmd.resp))) {
+			err = -EFAULT;
+			goto cmd_rel_host;
+		}
+
+		if (!curr_cmd->write_flag) {
+			if (copy_to_user((void __user *)(unsigned long)
+						curr_cmd->data_ptr,
+						curr_data->buf,
+						curr_data->buf_bytes)) {
+				err = -EFAULT;
+				goto cmd_rel_host;
+			}
+		}
+
 		/*
 		 * Ensure RPMB command has completed by polling CMD13
 		 * "Send Status".
@@ -585,6 +1161,20 @@
 					__func__, status, err);
 	}
 
+cmd_rel_host:
+	mmc_put_card(card);
+
+idata_free:
+	for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) {
+		kfree(idata->data[i]->buf);
+		kfree(idata->data[i]);
+	}
+	kfree(idata);
+
+cmd_done:
+	mmc_blk_put(md);
+	if (card && card->cmdq_init)
+		wake_up(&card->host->cmdq_ctx.wait);
 	return err;
 }
 
@@ -605,9 +1195,8 @@
 		return -EPERM;
 
 	idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
-	if (IS_ERR(idata))
+	if (IS_ERR_OR_NULL(idata))
 		return PTR_ERR(idata);
-
 	md = mmc_blk_get(bdev->bd_disk);
 	if (!md) {
 		err = -EINVAL;
@@ -615,15 +1204,32 @@
 	}
 
 	card = md->queue.card;
-	if (IS_ERR(card)) {
+	if (IS_ERR_OR_NULL(card)) {
 		err = PTR_ERR(card);
 		goto cmd_done;
 	}
 
 	mmc_get_card(card);
 
+	if (mmc_card_cmdq(card)) {
+		err = mmc_cmdq_halt_on_empty_queue(card->host);
+		if (err) {
+			pr_err("%s: halt failed while doing %s err (%d)\n",
+					mmc_hostname(card->host),
+					__func__, err);
+			mmc_put_card(card);
+			goto cmd_done;
+		}
+	}
+
 	ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
 
+	if (mmc_card_cmdq(card)) {
+		if (mmc_cmdq_halt(card->host, false))
+			pr_err("%s: %s: cmdq unhalt failed\n",
+			       mmc_hostname(card->host), __func__);
+	}
+
 	mmc_put_card(card);
 
 	err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
@@ -713,6 +1319,9 @@
 	case MMC_IOC_CMD:
 		return mmc_blk_ioctl_cmd(bdev,
 				(struct mmc_ioc_cmd __user *)arg);
+	case MMC_IOC_RPMB_CMD:
+		return mmc_blk_ioctl_rpmb_cmd(bdev,
+				(struct mmc_ioc_rpmb __user *)arg);
 	case MMC_IOC_MULTI_CMD:
 		return mmc_blk_ioctl_multi_cmd(bdev,
 				(struct mmc_ioc_multi_cmd __user *)arg);
@@ -740,28 +1349,92 @@
 #endif
 };
 
+static int mmc_blk_cmdq_switch(struct mmc_card *card,
+			       struct mmc_blk_data *md, bool enable)
+{
+	int ret = 0;
+	bool cmdq_mode = !!mmc_card_cmdq(card);
+	struct mmc_host *host = card->host;
+	struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
+
+	if (!(card->host->caps2 & MMC_CAP2_CMD_QUEUE) ||
+	    !card->ext_csd.cmdq_support ||
+	    (enable && !(md->flags & MMC_BLK_CMD_QUEUE)) ||
+	    (cmdq_mode == enable))
+		return 0;
+
+	if (enable) {
+		ret = mmc_set_blocklen(card, MMC_CARD_CMDQ_BLK_SIZE);
+		if (ret) {
+			pr_err("%s: failed (%d) to set block-size to %d\n",
+			       __func__, ret, MMC_CARD_CMDQ_BLK_SIZE);
+			goto out;
+		}
+
+	} else {
+		if (!test_bit(CMDQ_STATE_HALT, &ctx->curr_state)) {
+			ret = mmc_cmdq_halt(host, true);
+			if (ret) {
+				pr_err("%s: halt: failed: %d\n",
+					mmc_hostname(host), ret);
+				goto out;
+			}
+		}
+	}
+
+	ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+			 EXT_CSD_CMDQ, enable,
+			 card->ext_csd.generic_cmd6_time);
+	if (ret) {
+		pr_err("%s: cmdq mode %sable failed %d\n",
+		       md->disk->disk_name, enable ? "en" : "dis", ret);
+		goto out;
+	}
+
+	if (enable)
+		mmc_card_set_cmdq(card);
+	else
+		mmc_card_clr_cmdq(card);
+out:
+	return ret;
+}
+
 static inline int mmc_blk_part_switch(struct mmc_card *card,
 				      struct mmc_blk_data *md)
 {
 	int ret;
 	struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
 
-	if (main_md->part_curr == md->part_type)
+	if ((main_md->part_curr == md->part_type) &&
+	    (card->part_curr == md->part_type))
 		return 0;
 
 	if (mmc_card_mmc(card)) {
 		u8 part_config = card->ext_csd.part_config;
 
+		if (md->part_type) {
+			/* disable CQ mode for non-user data partitions */
+			ret = mmc_blk_cmdq_switch(card, md, false);
+			if (ret)
+				return ret;
+		}
+
 		part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
 		part_config |= md->part_type;
 
 		ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 				 EXT_CSD_PART_CONFIG, part_config,
 				 card->ext_csd.part_time);
-		if (ret)
+
+		if (ret) {
+			pr_err("%s: mmc_blk_part_switch failure, %d -> %d\n",
+				mmc_hostname(card->host), main_md->part_curr,
+					md->part_type);
 			return ret;
+		}
 
 		card->ext_csd.part_config = part_config;
+		card->part_curr = md->part_type;
 	}
 
 	main_md->part_curr = md->part_type;
@@ -942,33 +1615,45 @@
 	switch (error) {
 	case -EILSEQ:
 		/* response crc error, retry the r/w cmd */
-		pr_err("%s: %s sending %s command, card status %#x\n",
-			req->rq_disk->disk_name, "response CRC error",
+		pr_err_ratelimited(
+			"%s: response CRC error sending %s command, card status %#x\n",
+			req->rq_disk->disk_name,
 			name, status);
 		return ERR_RETRY;
 
 	case -ETIMEDOUT:
-		pr_err("%s: %s sending %s command, card status %#x\n",
-			req->rq_disk->disk_name, "timed out", name, status);
+		pr_err_ratelimited(
+			"%s: timed out sending %s command, card status %#x\n",
+			req->rq_disk->disk_name, name, status);
 
 		/* If the status cmd initially failed, retry the r/w cmd */
-		if (!status_valid)
+		if (!status_valid) {
+			pr_err_ratelimited("%s: status not valid, retrying timeout\n",
+				req->rq_disk->disk_name);
 			return ERR_RETRY;
-
+		}
 		/*
 		 * If it was a r/w cmd crc error, or illegal command
 		 * (eg, issued in wrong state) then retry - we should
 		 * have corrected the state problem above.
 		 */
-		if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
+		if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
+			pr_err_ratelimited(
+				"%s: command error, retrying timeout\n",
+				req->rq_disk->disk_name);
 			return ERR_RETRY;
+		}
 
 		/* Otherwise abort the command */
+		pr_err_ratelimited(
+			"%s: not retrying timeout\n",
+			req->rq_disk->disk_name);
 		return ERR_ABORT;
 
 	default:
 		/* We don't understand the error code the driver gave us */
-		pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
+		pr_err_ratelimited(
+			"%s: unknown error %d sending read/write command, card status %#x\n",
 		       req->rq_disk->disk_name, error, status);
 		return ERR_ABORT;
 	}
@@ -1016,12 +1701,14 @@
 		mmc_retune_recheck(card->host);
 
 		prev_cmd_status_valid = false;
-		pr_err("%s: error %d sending status command, %sing\n",
+		pr_err_ratelimited("%s: error %d sending status command, %sing\n",
 		       req->rq_disk->disk_name, err, retry ? "retry" : "abort");
 	}
 
 	/* We couldn't get a response from the card.  Give up. */
 	if (err) {
+		if (card->err_in_sdr104)
+			return ERR_RETRY;
 		/* Check if the card is removed */
 		if (mmc_detect_card_removed(card->host))
 			return ERR_NOMEDIUM;
@@ -1107,8 +1794,15 @@
 
 	md->reset_done |= type;
 	err = mmc_hw_reset(host);
+	if (err && err != -EOPNOTSUPP) {
+		/* We failed to reset so we need to abort the request */
+		pr_err("%s: %s: failed to reset %d\n", mmc_hostname(host),
+				__func__, err);
+		return -ENODEV;
+	}
+
 	/* Ensure we switch back to the correct partition */
-	if (err != -EOPNOTSUPP) {
+	if (host->card) {
 		struct mmc_blk_data *main_md =
 			dev_get_drvdata(&host->card->dev);
 		int part_err;
@@ -1143,6 +1837,77 @@
 	return false;
 }
 
+static struct mmc_cmdq_req *mmc_blk_cmdq_prep_discard_req(struct mmc_queue *mq,
+						struct request *req)
+{
+	struct mmc_blk_data *md = mq->data;
+	struct mmc_card *card = md->queue.card;
+	struct mmc_host *host = card->host;
+	struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
+	struct mmc_cmdq_req *cmdq_req;
+	struct mmc_queue_req *active_mqrq;
+
+	BUG_ON(req->tag > card->ext_csd.cmdq_depth);
+	BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs));
+
+	set_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);
+
+	active_mqrq = &mq->mqrq_cmdq[req->tag];
+	active_mqrq->req = req;
+
+	cmdq_req = mmc_cmdq_prep_dcmd(active_mqrq, mq);
+	cmdq_req->cmdq_req_flags |= QBR;
+	cmdq_req->mrq.cmd = &cmdq_req->cmd;
+	cmdq_req->tag = req->tag;
+	return cmdq_req;
+}
+
+static int mmc_blk_cmdq_issue_discard_rq(struct mmc_queue *mq,
+					struct request *req)
+{
+	struct mmc_blk_data *md = mq->data;
+	struct mmc_card *card = md->queue.card;
+	struct mmc_cmdq_req *cmdq_req = NULL;
+	unsigned int from, nr, arg;
+	int err = 0;
+
+	if (!mmc_can_erase(card)) {
+		err = -EOPNOTSUPP;
+		blk_end_request(req, err, blk_rq_bytes(req));
+		goto out;
+	}
+
+	from = blk_rq_pos(req);
+	nr = blk_rq_sectors(req);
+
+	if (mmc_can_discard(card))
+		arg = MMC_DISCARD_ARG;
+	else if (mmc_can_trim(card))
+		arg = MMC_TRIM_ARG;
+	else
+		arg = MMC_ERASE_ARG;
+
+	cmdq_req = mmc_blk_cmdq_prep_discard_req(mq, req);
+	if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+		__mmc_switch_cmdq_mode(cmdq_req->mrq.cmd,
+				EXT_CSD_CMD_SET_NORMAL,
+				INAND_CMD38_ARG_EXT_CSD,
+				arg == MMC_TRIM_ARG ?
+				INAND_CMD38_ARG_TRIM :
+				INAND_CMD38_ARG_ERASE,
+				0, true, false);
+		err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
+		if (err)
+			goto clear_dcmd;
+	}
+	err = mmc_cmdq_erase(cmdq_req, card, from, nr, arg);
+clear_dcmd:
+	mmc_host_clk_hold(card->host);
+	blk_complete_request(req);
+out:
+	return err ? 1 : 0;
+}
+
 static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
 {
 	struct mmc_blk_data *md = mq->data;
@@ -1186,6 +1951,69 @@
 	return err ? 0 : 1;
 }
 
+static int mmc_blk_cmdq_issue_secdiscard_rq(struct mmc_queue *mq,
+				       struct request *req)
+{
+	struct mmc_blk_data *md = mq->data;
+	struct mmc_card *card = md->queue.card;
+	struct mmc_cmdq_req *cmdq_req = NULL;
+	unsigned int from, nr, arg;
+	int err = 0;
+
+	if (!(mmc_can_secure_erase_trim(card))) {
+		err = -EOPNOTSUPP;
+		blk_end_request(req, err, blk_rq_bytes(req));
+		goto out;
+	}
+
+	from = blk_rq_pos(req);
+	nr = blk_rq_sectors(req);
+
+	if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
+		arg = MMC_SECURE_TRIM1_ARG;
+	else
+		arg = MMC_SECURE_ERASE_ARG;
+
+	cmdq_req = mmc_blk_cmdq_prep_discard_req(mq, req);
+	if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+		__mmc_switch_cmdq_mode(cmdq_req->mrq.cmd,
+				EXT_CSD_CMD_SET_NORMAL,
+				INAND_CMD38_ARG_EXT_CSD,
+				arg == MMC_SECURE_TRIM1_ARG ?
+				INAND_CMD38_ARG_SECTRIM1 :
+				INAND_CMD38_ARG_SECERASE,
+				0, true, false);
+		err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
+		if (err)
+			goto clear_dcmd;
+	}
+
+	err = mmc_cmdq_erase(cmdq_req, card, from, nr, arg);
+	if (err)
+		goto clear_dcmd;
+
+	if (arg == MMC_SECURE_TRIM1_ARG) {
+		if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+			__mmc_switch_cmdq_mode(cmdq_req->mrq.cmd,
+					EXT_CSD_CMD_SET_NORMAL,
+					INAND_CMD38_ARG_EXT_CSD,
+					INAND_CMD38_ARG_SECTRIM2,
+					0, true, false);
+			err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
+			if (err)
+				goto clear_dcmd;
+		}
+
+		err = mmc_cmdq_erase(cmdq_req, card, from, nr,
+				MMC_SECURE_TRIM2_ARG);
+	}
+clear_dcmd:
+	mmc_host_clk_hold(card->host);
+	blk_complete_request(req);
+out:
+	return err ? 1 : 0;
+}
+
 static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
 				       struct request *req)
 {
@@ -1259,10 +2087,64 @@
 	struct mmc_card *card = md->queue.card;
 	int ret = 0;
 
-	ret = mmc_flush_cache(card);
+	if (!req)
+		return 0;
+
+	if (req->cmd_flags & REQ_BARRIER) {
+		/*
+		 * If eMMC cache flush policy is set to 1, then the device
+		 * shall flush the requests in First-In-First-Out (FIFO) order.
+		 * In this case, as per spec, the host must not send any cache
+		 * barrier requests as they are redundant and add unnecessary
+		 * overhead to both device and host.
+		 */
+		if (card->ext_csd.cache_flush_policy & 1)
+			goto end_req;
+
+		/*
+		 * In case barrier is not supported or enabled in the device,
+		 * use flush as a fallback option.
+		 */
+		ret = mmc_cache_barrier(card);
 	if (ret)
+			ret = mmc_flush_cache(card);
+	 } else if (req->cmd_flags & REQ_FLUSH) {
+		ret = mmc_flush_cache(card);
+	 }
+	if (ret == -ENODEV) {
+		pr_err("%s: %s: restart mmc card",
+				req->rq_disk->disk_name, __func__);
+		if (mmc_blk_reset(md, card->host, MMC_BLK_FLUSH))
+			pr_err("%s: %s: fail to restart mmc",
+				req->rq_disk->disk_name, __func__);
+		else
+			mmc_blk_reset_success(md, MMC_BLK_FLUSH);
+	}
+
+	if (ret) {
+		pr_err("%s: %s: notify flush error to upper layers",
+				req->rq_disk->disk_name, __func__);
 		ret = -EIO;
+	}
 
+end_req:
+#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
+	else if (atomic_read(&mq->cache_size)) {
+		long used = mmc_blk_cache_used(mq, jiffies);
+
+		if (used) {
+			int speed = atomic_read(&mq->max_write_speed);
+
+			if (speed_valid(speed)) {
+				unsigned long msecs = jiffies_to_msecs(
+					size_and_speed_to_jiffies(
+						used, speed));
+				if (msecs)
+					msleep(msecs);
+			}
+		}
+	}
+#endif
 	blk_end_request_all(req, ret);
 
 	return ret ? 0 : 1;
@@ -1309,6 +2191,18 @@
 	int need_retune = card->host->need_retune;
 	int ecc_err = 0, gen_err = 0;
 
+	if (card->host->sdr104_wa && mmc_card_sd(card) &&
+	    (card->host->ios.timing == MMC_TIMING_UHS_SDR104) &&
+	    !card->sdr104_blocked &&
+	    (brq->data.error == -EILSEQ ||
+	     brq->data.error == -EIO ||
+	     brq->data.error == -ETIMEDOUT ||
+	     brq->cmd.error == -EILSEQ ||
+	     brq->cmd.error == -EIO ||
+	     brq->cmd.error == -ETIMEDOUT ||
+	     brq->sbc.error))
+		card->err_in_sdr104 = true;
+
 	/*
 	 * sbc.error indicates a problem with the set block count
 	 * command.  No data will have been transferred.
@@ -1493,6 +2387,7 @@
 	brq->stop.arg = 0;
 	brq->data.blocks = blk_rq_sectors(req);
 
+	brq->data.fault_injected = false;
 	/*
 	 * The block layer doesn't support all sector count
 	 * restrictions, so we need to be prepared for too big
@@ -1616,6 +2511,7 @@
 	}
 
 	mqrq->mmc_active.mrq = &brq->mrq;
+	mqrq->mmc_active.mrq->req = mqrq->req;
 	mqrq->mmc_active.err_check = mmc_blk_err_check;
 
 	mmc_queue_bounce_pre(mqrq);
@@ -1637,6 +2533,178 @@
 	return nr_segs;
 }
 
+/**
+ * mmc_blk_disable_wr_packing() - disables packing mode
+ * @mq:	MMC queue.
+ *
+ */
+void mmc_blk_disable_wr_packing(struct mmc_queue *mq)
+{
+	if (mq) {
+		mq->wr_packing_enabled = false;
+		mq->num_of_potential_packed_wr_reqs = 0;
+	}
+}
+EXPORT_SYMBOL(mmc_blk_disable_wr_packing);
+
+static int get_packed_trigger(int potential, struct mmc_card *card,
+			      struct request *req, int curr_trigger)
+{
+	static int num_mean_elements = 1;
+	static unsigned long mean_potential = PCKD_TRGR_INIT_MEAN_POTEN;
+	unsigned int trigger = curr_trigger;
+	unsigned int pckd_trgr_upper_bound = card->ext_csd.max_packed_writes;
+
+	/* scale down the upper bound to 75% */
+	pckd_trgr_upper_bound = (pckd_trgr_upper_bound * 3) / 4;
+
+	/*
+	 * since the most common calls for this function are with small
+	 * potential write values and since we don't want these calls to affect
+	 * the packed trigger, set a lower bound and ignore calls with
+	 * potential lower than that bound
+	 */
+	if (potential <= PCKD_TRGR_POTEN_LOWER_BOUND)
+		return trigger;
+
+	/*
+	 * this is to prevent integer overflow in the following calculation:
+	 * once every PACKED_TRIGGER_MAX_ELEMENTS reset the algorithm
+	 */
+	if (num_mean_elements > PACKED_TRIGGER_MAX_ELEMENTS) {
+		num_mean_elements = 1;
+		mean_potential = PCKD_TRGR_INIT_MEAN_POTEN;
+	}
+
+	/*
+	 * get next mean value based on previous mean value and current
+	 * potential packed writes. Calculation is as follows:
+	 * mean_pot[i+1] =
+	 *	((mean_pot[i] * num_mean_elem) + potential)/(num_mean_elem + 1)
+	 */
+	mean_potential *= num_mean_elements;
+	/*
+	 * add num_mean_elements so that the division of two integers doesn't
+	 * lower mean_potential too much
+	 */
+	if (potential > mean_potential)
+		mean_potential += num_mean_elements;
+	mean_potential += potential;
+	/* this is for gaining more precision when dividing two integers */
+	mean_potential *= PCKD_TRGR_PRECISION_MULTIPLIER;
+	/* this completes the mean calculation */
+	mean_potential /= ++num_mean_elements;
+	mean_potential /= PCKD_TRGR_PRECISION_MULTIPLIER;
+
+	/*
+	 * if current potential packed writes is greater than the mean potential
+	 * then the heuristic is that the following workload will contain many
+	 * write requests, therefore we lower the packed trigger. In the
+	 * opposite case we want to increase the trigger in order to get less
+	 * packing events.
+	 */
+	if (potential >= mean_potential)
+		trigger = (trigger <= PCKD_TRGR_LOWER_BOUND) ?
+				PCKD_TRGR_LOWER_BOUND : trigger - 1;
+	else
+		trigger = (trigger >= pckd_trgr_upper_bound) ?
+				pckd_trgr_upper_bound : trigger + 1;
+
+	/*
+	 * an urgent read request indicates a packed list being interrupted
+	 * by this read, therefore we aim for less packing, hence the trigger
+	 * gets increased
+	 */
+	if (req && (req->cmd_flags & REQ_URGENT) && (rq_data_dir(req) == READ))
+		trigger += PCKD_TRGR_URGENT_PENALTY;
+
+	return trigger;
+}
+
+static void mmc_blk_write_packing_control(struct mmc_queue *mq,
+					  struct request *req)
+{
+	struct mmc_host *host = mq->card->host;
+	int data_dir;
+
+	if (!(host->caps2 & MMC_CAP2_PACKED_WR))
+		return;
+
+	/* Support for the write packing on eMMC 4.5 or later */
+	if (mq->card->ext_csd.rev <= 5)
+		return;
+
+	/*
+	 * In case the packing control is not supported by the host, it should
+	 * not have an effect on the write packing. Therefore we have to enable
+	 * the write packing
+	 */
+	if (!(host->caps2 & MMC_CAP2_PACKED_WR_CONTROL)) {
+		mq->wr_packing_enabled = true;
+		return;
+	}
+
+	if (!req || (req && (req->cmd_flags & REQ_FLUSH))) {
+		if (mq->num_of_potential_packed_wr_reqs >
+				mq->num_wr_reqs_to_start_packing)
+			mq->wr_packing_enabled = true;
+		mq->num_wr_reqs_to_start_packing =
+			get_packed_trigger(mq->num_of_potential_packed_wr_reqs,
+					   mq->card, req,
+					   mq->num_wr_reqs_to_start_packing);
+		mq->num_of_potential_packed_wr_reqs = 0;
+		return;
+	}
+
+	data_dir = rq_data_dir(req);
+
+	if (data_dir == READ) {
+		mmc_blk_disable_wr_packing(mq);
+		mq->num_wr_reqs_to_start_packing =
+			get_packed_trigger(mq->num_of_potential_packed_wr_reqs,
+					   mq->card, req,
+					   mq->num_wr_reqs_to_start_packing);
+		mq->num_of_potential_packed_wr_reqs = 0;
+		mq->wr_packing_enabled = false;
+		return;
+	} else if (data_dir == WRITE) {
+		mq->num_of_potential_packed_wr_reqs++;
+	}
+
+	if (mq->num_of_potential_packed_wr_reqs >
+			mq->num_wr_reqs_to_start_packing)
+		mq->wr_packing_enabled = true;
+}
+
+struct mmc_wr_pack_stats *mmc_blk_get_packed_statistics(struct mmc_card *card)
+{
+	if (!card)
+		return NULL;
+
+	return &card->wr_pack_stats;
+}
+EXPORT_SYMBOL(mmc_blk_get_packed_statistics);
+
+void mmc_blk_init_packed_statistics(struct mmc_card *card)
+{
+	int max_num_of_packed_reqs = 0;
+
+	if (!card || !card->wr_pack_stats.packing_events)
+		return;
+
+	max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
+
+	spin_lock(&card->wr_pack_stats.lock);
+	memset(card->wr_pack_stats.packing_events, 0,
+		(max_num_of_packed_reqs + 1) *
+	       sizeof(*card->wr_pack_stats.packing_events));
+	memset(&card->wr_pack_stats.pack_stop_reason, 0,
+		sizeof(card->wr_pack_stats.pack_stop_reason));
+	card->wr_pack_stats.enabled = true;
+	spin_unlock(&card->wr_pack_stats.lock);
+}
+EXPORT_SYMBOL(mmc_blk_init_packed_statistics);
+
 static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
 {
 	struct request_queue *q = mq->queue;
@@ -1650,10 +2718,14 @@
 	bool put_back = true;
 	u8 max_packed_rw = 0;
 	u8 reqs = 0;
+	struct mmc_wr_pack_stats *stats = &card->wr_pack_stats;
 
 	if (!(md->flags & MMC_BLK_PACKED_CMD))
 		goto no_packed;
 
+	if (!mq->wr_packing_enabled)
+		goto no_packed;
+
 	if ((rq_data_dir(cur) == WRITE) &&
 	    mmc_host_packed_wr(card->host))
 		max_packed_rw = card->ext_csd.max_packed_writes;
@@ -1669,6 +2741,9 @@
 	    !IS_ALIGNED(blk_rq_sectors(cur), 8))
 		goto no_packed;
 
+	if (cur->cmd_flags & REQ_FUA)
+		goto no_packed;
+
 	mmc_blk_clear_packed(mqrq);
 
 	max_blk_count = min(card->host->max_blk_count,
@@ -1685,6 +2760,7 @@
 		phys_segments += mmc_calc_packed_hdr_segs(q, card);
 	}
 
+	spin_lock(&stats->lock);
 	do {
 		if (reqs >= max_packed_rw - 1) {
 			put_back = false;
@@ -1695,33 +2771,63 @@
 		next = blk_fetch_request(q);
 		spin_unlock_irq(q->queue_lock);
 		if (!next) {
+			MMC_BLK_UPDATE_STOP_REASON(stats, EMPTY_QUEUE);
 			put_back = false;
 			break;
 		}
 
 		if (mmc_large_sector(card) &&
-		    !IS_ALIGNED(blk_rq_sectors(next), 8))
+		    !IS_ALIGNED(blk_rq_sectors(next), 8)) {
+			MMC_BLK_UPDATE_STOP_REASON(stats, LARGE_SEC_ALIGN);
 			break;
+		}
 
 		if (next->cmd_flags & REQ_DISCARD ||
-		    next->cmd_flags & REQ_FLUSH)
+		    next->cmd_flags & REQ_FLUSH) {
+			MMC_BLK_UPDATE_STOP_REASON(stats, FLUSH_OR_DISCARD);
+			break;
+		}
+
+		if (next->cmd_flags & REQ_FUA) {
+			MMC_BLK_UPDATE_STOP_REASON(stats, FUA);
 			break;
+		}
 
-		if (rq_data_dir(cur) != rq_data_dir(next))
+		if (rq_data_dir(cur) != rq_data_dir(next)) {
+			MMC_BLK_UPDATE_STOP_REASON(stats, WRONG_DATA_DIR);
 			break;
+		}
 
 		if (mmc_req_rel_wr(next) &&
-		    (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
+		    (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) {
+			MMC_BLK_UPDATE_STOP_REASON(stats, REL_WRITE);
 			break;
+		}
 
 		req_sectors += blk_rq_sectors(next);
-		if (req_sectors > max_blk_count)
+		if (req_sectors > max_blk_count) {
+			if (stats->enabled)
+				stats->pack_stop_reason[EXCEEDS_SECTORS]++;
 			break;
+		}
 
 		phys_segments +=  next->nr_phys_segments;
-		if (phys_segments > max_phys_segs)
+		if (phys_segments > max_phys_segs) {
+			MMC_BLK_UPDATE_STOP_REASON(stats, EXCEEDS_SEGMENTS);
+			break;
+		}
+
+		if (mq->no_pack_for_random) {
+			if ((blk_rq_pos(cur) + blk_rq_sectors(cur)) !=
+			    blk_rq_pos(next)) {
+				MMC_BLK_UPDATE_STOP_REASON(stats, RANDOM);
+				put_back = 1;
 			break;
+			}
+		}
 
+		if (rq_data_dir(next) == WRITE)
+			mq->num_of_potential_packed_wr_reqs++;
 		list_add_tail(&next->queuelist, &mqrq->packed->list);
 		cur = next;
 		reqs++;
@@ -1733,6 +2839,15 @@
 		spin_unlock_irq(q->queue_lock);
 	}
 
+	if (stats->enabled) {
+		if (reqs + 1 <= card->ext_csd.max_packed_writes)
+			stats->packing_events[reqs + 1]++;
+		if (reqs + 1 == max_packed_rw)
+			MMC_BLK_UPDATE_STOP_REASON(stats, THRESHOLD);
+	}
+
+	spin_unlock(&stats->lock);
+
 	if (reqs > 0) {
 		list_add(&req->queuelist, &mqrq->packed->list);
 		mqrq->packed->nr_entries = ++reqs;
@@ -1813,6 +2928,7 @@
 	brq->data.blksz = 512;
 	brq->data.blocks = packed->blocks + hdr_blocks;
 	brq->data.flags |= MMC_DATA_WRITE;
+	brq->data.fault_injected = false;
 
 	brq->stop.opcode = MMC_STOP_TRANSMISSION;
 	brq->stop.arg = 0;
@@ -1824,8 +2940,19 @@
 	brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
 
 	mqrq->mmc_active.mrq = &brq->mrq;
+
+	/*
+	 * This is intended for packed commands tests usage - in case these
+	 * functions are not in use the respective pointers are NULL
+	 */
+	if (mq->err_check_fn)
+		mqrq->mmc_active.err_check = mq->err_check_fn;
+	else
 	mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
 
+	if (mq->packed_test_fn)
+		mq->packed_test_fn(mq->queue, mqrq);
+
 	mmc_queue_bounce_pre(mqrq);
 }
 
@@ -1846,11 +2973,12 @@
 	 */
 	if (mmc_card_sd(card)) {
 		u32 blocks;
-
+		if (!brq->data.fault_injected) {
 		blocks = mmc_sd_num_wr_blocks(card);
-		if (blocks != (u32)-1) {
+			if (blocks != (u32)-1)
 			ret = blk_end_request(req, 0, blocks << 9);
-		}
+		} else
+			ret = blk_end_request(req, 0, brq->data.bytes_xfered);
 	} else {
 		if (!mmc_packed_cmd(mq_rq->cmd_type))
 			ret = blk_end_request(req, 0, brq->data.bytes_xfered);
@@ -1930,6 +3058,614 @@
 	mmc_blk_clear_packed(mq_rq);
 }
 
+static int mmc_blk_cmdq_start_req(struct mmc_host *host,
+				  struct mmc_cmdq_req *cmdq_req)
+{
+	struct mmc_request *mrq = &cmdq_req->mrq;
+
+	mrq->done = mmc_blk_cmdq_req_done;
+	return mmc_cmdq_start_req(host, cmdq_req);
+}
+
+/* prepare for non-data commands */
+static struct mmc_cmdq_req *mmc_cmdq_prep_dcmd(
+		struct mmc_queue_req *mqrq, struct mmc_queue *mq)
+{
+	struct request *req = mqrq->req;
+	struct mmc_cmdq_req *cmdq_req = &mqrq->cmdq_req;
+
+	memset(&mqrq->cmdq_req, 0, sizeof(struct mmc_cmdq_req));
+
+	cmdq_req->mrq.data = NULL;
+	cmdq_req->cmd_flags = req->cmd_flags;
+	cmdq_req->mrq.req = mqrq->req;
+	req->special = mqrq;
+	cmdq_req->cmdq_req_flags |= DCMD;
+	cmdq_req->mrq.cmdq_req = cmdq_req;
+
+	return &mqrq->cmdq_req;
+}
+
+
+#define IS_RT_CLASS_REQ(x)     \
+	(IOPRIO_PRIO_CLASS(req_get_ioprio(x)) == IOPRIO_CLASS_RT)
+
+static struct mmc_cmdq_req *mmc_blk_cmdq_rw_prep(
+		struct mmc_queue_req *mqrq, struct mmc_queue *mq)
+{
+	struct mmc_card *card = mq->card;
+	struct request *req = mqrq->req;
+	struct mmc_blk_data *md = mq->data;
+	bool do_rel_wr = mmc_req_rel_wr(req) && (md->flags & MMC_BLK_REL_WR);
+	bool do_data_tag;
+	bool read_dir = (rq_data_dir(req) == READ);
+	bool prio = IS_RT_CLASS_REQ(req);
+	struct mmc_cmdq_req *cmdq_rq = &mqrq->cmdq_req;
+
+	memset(&mqrq->cmdq_req, 0, sizeof(struct mmc_cmdq_req));
+
+	cmdq_rq->tag = req->tag;
+	if (read_dir) {
+		cmdq_rq->cmdq_req_flags |= DIR;
+		cmdq_rq->data.flags = MMC_DATA_READ;
+	} else {
+		cmdq_rq->data.flags = MMC_DATA_WRITE;
+	}
+	if (prio)
+		cmdq_rq->cmdq_req_flags |= PRIO;
+
+	if (do_rel_wr)
+		cmdq_rq->cmdq_req_flags |= REL_WR;
+
+	cmdq_rq->data.blocks = blk_rq_sectors(req);
+	cmdq_rq->blk_addr = blk_rq_pos(req);
+	cmdq_rq->data.blksz = MMC_CARD_CMDQ_BLK_SIZE;
+
+	mmc_set_data_timeout(&cmdq_rq->data, card);
+
+	do_data_tag = (card->ext_csd.data_tag_unit_size) &&
+		(req->cmd_flags & REQ_META) &&
+		(rq_data_dir(req) == WRITE) &&
+		((cmdq_rq->data.blocks * cmdq_rq->data.blksz) >=
+		 card->ext_csd.data_tag_unit_size);
+	if (do_data_tag)
+		cmdq_rq->cmdq_req_flags |= DAT_TAG;
+	cmdq_rq->data.sg = mqrq->sg;
+	cmdq_rq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
+
+	/*
+	 * Adjust the sg list so it is the same size as the
+	 * request.
+	 */
+	if (cmdq_rq->data.blocks > card->host->max_blk_count)
+		cmdq_rq->data.blocks = card->host->max_blk_count;
+
+	if (cmdq_rq->data.blocks != blk_rq_sectors(req)) {
+		int i, data_size = cmdq_rq->data.blocks << 9;
+		struct scatterlist *sg;
+
+		for_each_sg(cmdq_rq->data.sg, sg, cmdq_rq->data.sg_len, i) {
+			data_size -= sg->length;
+			if (data_size <= 0) {
+				sg->length += data_size;
+				i++;
+				break;
+			}
+		}
+		cmdq_rq->data.sg_len = i;
+	}
+
+	mqrq->cmdq_req.cmd_flags = req->cmd_flags;
+	mqrq->cmdq_req.mrq.req = mqrq->req;
+	mqrq->cmdq_req.mrq.cmdq_req = &mqrq->cmdq_req;
+	mqrq->cmdq_req.mrq.data = &mqrq->cmdq_req.data;
+	mqrq->req->special = mqrq;
+
+	pr_debug("%s: %s: mrq: 0x%p req: 0x%p mqrq: 0x%p bytes to xf: %d mmc_cmdq_req: 0x%p card-addr: 0x%08x dir(r-1/w-0): %d\n",
+		 mmc_hostname(card->host), __func__, &mqrq->cmdq_req.mrq,
+		 mqrq->req, mqrq, (cmdq_rq->data.blocks * cmdq_rq->data.blksz),
+		 cmdq_rq, cmdq_rq->blk_addr,
+		 (cmdq_rq->cmdq_req_flags & DIR) ? 1 : 0);
+
+	return &mqrq->cmdq_req;
+}
+
+static void mmc_blk_cmdq_requeue_rw_rq(struct mmc_queue *mq,
+				struct request *req)
+{
+	struct mmc_card *card = mq->card;
+	struct mmc_host *host = card->host;
+
+	blk_requeue_request(req->q, req);
+	mmc_put_card(host->card);
+}
+
+static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
+{
+	struct mmc_queue_req *active_mqrq;
+	struct mmc_card *card = mq->card;
+	struct mmc_host *host = card->host;
+	struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
+	struct mmc_cmdq_req *mc_rq;
+	u8 active_small_sector_read = 0;
+	int ret = 0;
+
+	mmc_deferred_scaling(host);
+	mmc_cmdq_clk_scaling_start_busy(host, true);
+
+	BUG_ON((req->tag < 0) || (req->tag > card->ext_csd.cmdq_depth));
+	BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.data_active_reqs));
+	BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs));
+
+	active_mqrq = &mq->mqrq_cmdq[req->tag];
+	active_mqrq->req = req;
+
+	mc_rq = mmc_blk_cmdq_rw_prep(active_mqrq, mq);
+
+	if (card->quirks & MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD) {
+		unsigned int sectors = blk_rq_sectors(req);
+
+		if (((sectors > 0) && (sectors < 8))
+		    && (rq_data_dir(req) == READ))
+			active_small_sector_read = 1;
+	}
+	ret = mmc_blk_cmdq_start_req(card->host, mc_rq);
+	if (!ret && active_small_sector_read)
+		host->cmdq_ctx.active_small_sector_read_reqs++;
+	/*
+	 * When in SVS2 on low load scenario and there are lots of requests
+	 * queued for CMDQ we need to wait till the queue is empty to scale
+	 * back up to Nominal even if there is a sudden increase in load.
+	 * This impacts performance where lots of IO get executed in SVS2
+	 * frequency since the queue is full. As SVS2 is a low load use case
+	 * we can serialize the requests and not queue them in parallel
+	 * without impacting other use cases. This makes sure the queue gets
+	 * empty faster and we will be able to scale up to Nominal frequency
+	 * when needed.
+	 */
+	if (!ret && (host->clk_scaling.state == MMC_LOAD_LOW))
+		wait_event_interruptible(ctx->queue_empty_wq,
+					(!ctx->active_reqs));
+
+	if (ret) {
+		/* clear pending request */
+		WARN_ON(!test_and_clear_bit(req->tag,
+				&host->cmdq_ctx.data_active_reqs));
+		WARN_ON(!test_and_clear_bit(req->tag,
+				&host->cmdq_ctx.active_reqs));
+		mmc_cmdq_clk_scaling_stop_busy(host, true, false);
+	}
+
+	return ret;
+}
+
+/*
+ * Issues a flush (dcmd) request
+ */
+int mmc_blk_cmdq_issue_flush_rq(struct mmc_queue *mq, struct request *req)
+{
+	int err;
+	struct mmc_queue_req *active_mqrq;
+	struct mmc_card *card = mq->card;
+	struct mmc_host *host;
+	struct mmc_cmdq_req *cmdq_req;
+	struct mmc_cmdq_context_info *ctx_info;
+
+	BUG_ON(!card);
+	host = card->host;
+	BUG_ON(!host);
+	BUG_ON(req->tag > card->ext_csd.cmdq_depth);
+	BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs));
+
+	ctx_info = &host->cmdq_ctx;
+
+	set_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);
+
+	active_mqrq = &mq->mqrq_cmdq[req->tag];
+	active_mqrq->req = req;
+
+	cmdq_req = mmc_cmdq_prep_dcmd(active_mqrq, mq);
+	cmdq_req->cmdq_req_flags |= QBR;
+	cmdq_req->mrq.cmd = &cmdq_req->cmd;
+	cmdq_req->tag = req->tag;
+
+	err = mmc_cmdq_prepare_flush(cmdq_req->mrq.cmd);
+	if (err) {
+		pr_err("%s: failed (%d) preparing flush req\n",
+		       mmc_hostname(host), err);
+		return err;
+	}
+	err = mmc_blk_cmdq_start_req(card->host, cmdq_req);
+	return err;
+}
+EXPORT_SYMBOL(mmc_blk_cmdq_issue_flush_rq);
+
+static void mmc_blk_cmdq_reset(struct mmc_host *host, bool clear_all)
+{
+	int err = 0;
+
+	if (mmc_cmdq_halt(host, true)) {
+		pr_err("%s: halt failed\n", mmc_hostname(host));
+		goto reset;
+	}
+
+	if (clear_all)
+		mmc_cmdq_discard_queue(host, 0);
+reset:
+	mmc_host_clk_hold(host);
+	host->cmdq_ops->disable(host, true);
+	mmc_host_clk_release(host);
+	err = mmc_cmdq_hw_reset(host);
+	if (err && err != -EOPNOTSUPP) {
+		pr_err("%s: failed to cmdq_hw_reset err = %d\n",
+				mmc_hostname(host), err);
+		mmc_host_clk_hold(host);
+		host->cmdq_ops->enable(host);
+		mmc_host_clk_release(host);
+		mmc_cmdq_halt(host, false);
+		goto out;
+	}
+	/*
+	 * CMDQ HW reset would have already made CQE
+	 * in unhalted state, but reflect the same
+	 * in software state of cmdq_ctx.
+	 */
+	mmc_host_clr_halt(host);
+out:
+	return;
+}
+
+/**
+ * is_cmdq_dcmd_req - Checks if tag belongs to DCMD request.
+ * @q:		request_queue pointer.
+ * @tag:	tag number of request to check.
+ *
+ * This function checks if the request with tag number "tag"
+ * is a DCMD request or not based on cmdq_req_flags set.
+ *
+ * returns true if DCMD req, otherwise false.
+ */
+static bool is_cmdq_dcmd_req(struct request_queue *q, int tag)
+{
+	struct request *req;
+	struct mmc_queue_req *mq_rq;
+	struct mmc_cmdq_req *cmdq_req;
+
+	req = blk_queue_find_tag(q, tag);
+	if (WARN_ON(!req))
+		goto out;
+	mq_rq = req->special;
+	if (WARN_ON(!mq_rq))
+		goto out;
+	cmdq_req = &(mq_rq->cmdq_req);
+	return (cmdq_req->cmdq_req_flags & DCMD);
+out:
+	return -ENOENT;
+}
+
+/**
+ * mmc_blk_cmdq_reset_all - Reset everything for CMDQ block request.
+ * @host:	mmc_host pointer.
+ * @err:	error for which reset is performed.
+ *
+ * This function implements reset_all functionality for
+ * cmdq. It resets the controller, power cycle the card,
+ * and invalidate all busy tags(requeue all request back to
+ * elevator).
+ */
+static void mmc_blk_cmdq_reset_all(struct mmc_host *host, int err)
+{
+	struct mmc_request *mrq = host->err_mrq;
+	struct mmc_card *card = host->card;
+	struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
+	struct request_queue *q;
+	int itag = 0;
+	int ret = 0;
+
+	if (WARN_ON(!mrq))
+		return;
+
+	q = mrq->req->q;
+	WARN_ON(!test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
+
+	#ifdef CONFIG_MMC_CLKGATE
+	pr_debug("%s: %s: active_reqs = %lu, clk_requests = %d\n",
+			mmc_hostname(host), __func__,
+			ctx_info->active_reqs, host->clk_requests);
+	#endif
+
+	mmc_blk_cmdq_reset(host, false);
+
+	for_each_set_bit(itag, &ctx_info->active_reqs,
+			host->num_cq_slots) {
+		ret = is_cmdq_dcmd_req(q, itag);
+		if (WARN_ON(ret == -ENOENT))
+			continue;
+		if (!ret) {
+			WARN_ON(!test_and_clear_bit(itag,
+				 &ctx_info->data_active_reqs));
+			mmc_cmdq_post_req(host, itag, err);
+		} else {
+			clear_bit(CMDQ_STATE_DCMD_ACTIVE,
+					&ctx_info->curr_state);
+		}
+		WARN_ON(!test_and_clear_bit(itag,
+					&ctx_info->active_reqs));
+		mmc_host_clk_release(host);
+		mmc_put_card(card);
+	}
+
+	spin_lock_irq(q->queue_lock);
+	blk_queue_invalidate_tags(q);
+	spin_unlock_irq(q->queue_lock);
+}
+
+static void mmc_blk_cmdq_shutdown(struct mmc_queue *mq)
+{
+	int err;
+	struct mmc_card *card = mq->card;
+	struct mmc_host *host = card->host;
+
+	mmc_get_card(card);
+	mmc_host_clk_hold(host);
+	err = mmc_cmdq_halt(host, true);
+	if (err) {
+		pr_err("%s: halt: failed: %d\n", __func__, err);
+		goto out;
+	}
+
+	/* disable CQ mode in card */
+	if (mmc_card_cmdq(card)) {
+		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+				 EXT_CSD_CMDQ, 0,
+				 card->ext_csd.generic_cmd6_time);
+		if (err) {
+			pr_err("%s: failed to switch card to legacy mode: %d\n",
+			       __func__, err);
+			goto out;
+		}
+		mmc_card_clr_cmdq(card);
+	}
+	host->cmdq_ops->disable(host, false);
+	host->card->cmdq_init = false;
+out:
+	mmc_host_clk_release(host);
+	mmc_put_card(card);
+}
+
+static enum blk_eh_timer_return mmc_blk_cmdq_req_timed_out(struct request *req)
+{
+	struct mmc_queue *mq = req->q->queuedata;
+	struct mmc_host *host = mq->card->host;
+	struct mmc_queue_req *mq_rq = req->special;
+	struct mmc_request *mrq;
+	struct mmc_cmdq_req *cmdq_req;
+	struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
+
+	BUG_ON(!host);
+
+	/*
+	 * The mmc_queue_req will be present only if the request
+	 * is issued to the LLD. The request could be fetched from
+	 * block layer queue but could be waiting to be issued
+	 * (for e.g. clock scaling is waiting for an empty cmdq queue)
+	 * Reset the timer in such cases to give LLD more time
+	 */
+	if (!mq_rq) {
+		pr_warn("%s: restart timer for tag: %d\n", __func__, req->tag);
+		return BLK_EH_RESET_TIMER;
+	}
+
+	mrq = &mq_rq->cmdq_req.mrq;
+	cmdq_req = &mq_rq->cmdq_req;
+
+	BUG_ON(!mrq || !cmdq_req);
+
+	if (cmdq_req->cmdq_req_flags & DCMD)
+		mrq->cmd->error = -ETIMEDOUT;
+	else
+		mrq->data->error = -ETIMEDOUT;
+
+	if (mrq->cmd && mrq->cmd->error) {
+		if (!(mrq->req->cmd_flags & REQ_FLUSH)) {
+			/*
+			 * Notify completion for non flush commands like
+			 * discard that wait for DCMD finish.
+			 */
+			set_bit(CMDQ_STATE_REQ_TIMED_OUT,
+					&ctx_info->curr_state);
+			complete(&mrq->completion);
+			return BLK_EH_NOT_HANDLED;
+		}
+	}
+
+	if (test_bit(CMDQ_STATE_REQ_TIMED_OUT, &ctx_info->curr_state) ||
+		test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state))
+		return BLK_EH_NOT_HANDLED;
+
+	set_bit(CMDQ_STATE_REQ_TIMED_OUT, &ctx_info->curr_state);
+	return BLK_EH_HANDLED;
+}
+
+/*
+ * mmc_blk_cmdq_err: error handling of cmdq error requests.
+ * Function should be called in context of error out request
+ * which has claim_host and rpm acquired.
+ * This may be called with CQ engine halted. Make sure to
+ * unhalt it after error recovery.
+ *
+ * TODO: Currently cmdq error handler does reset_all in case
+ * of any erorr. Need to optimize error handling.
+ */
+static void mmc_blk_cmdq_err(struct mmc_queue *mq)
+{
+	struct mmc_host *host = mq->card->host;
+	struct mmc_request *mrq = host->err_mrq;
+	struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
+	struct request_queue *q;
+	int err, ret;
+	u32 status = 0;
+
+	mmc_host_clk_hold(host);
+	host->cmdq_ops->dumpstate(host);
+	mmc_host_clk_release(host);
+
+	if (WARN_ON(!mrq))
+		return;
+
+	q = mrq->req->q;
+	err = mmc_cmdq_halt(host, true);
+	if (err) {
+		pr_err("halt: failed: %d\n", err);
+		goto reset;
+	}
+
+	/* RED error - Fatal: requires reset */
+	if (mrq->cmdq_req->resp_err) {
+		err = mrq->cmdq_req->resp_err;
+		goto reset;
+	}
+
+	/*
+	 * TIMEOUT errrors can happen because of execution error
+	 * in the last command. So send cmd 13 to get device status
+	 */
+	if ((mrq->cmd && (mrq->cmd->error == -ETIMEDOUT)) ||
+			(mrq->data && (mrq->data->error == -ETIMEDOUT))) {
+		if (mmc_host_halt(host) || mmc_host_cq_disable(host)) {
+			ret = get_card_status(host->card, &status, 0);
+			if (ret)
+				pr_err("%s: CMD13 failed with err %d\n",
+						mmc_hostname(host), ret);
+		}
+		pr_err("%s: Timeout error detected with device status 0x%08x\n",
+			mmc_hostname(host), status);
+	}
+
+	/*
+	 * In case of software request time-out, we schedule err work only for
+	 * the first error out request and handles all other request in flight
+	 * here.
+	 */
+	if (test_bit(CMDQ_STATE_REQ_TIMED_OUT, &ctx_info->curr_state)) {
+		err = -ETIMEDOUT;
+	} else if (mrq->data && mrq->data->error) {
+		err = mrq->data->error;
+	} else if (mrq->cmd && mrq->cmd->error) {
+		/* DCMD commands */
+		err = mrq->cmd->error;
+	}
+
+reset:
+	mmc_blk_cmdq_reset_all(host, err);
+	if (mrq->cmdq_req->resp_err)
+		mrq->cmdq_req->resp_err = false;
+	mmc_cmdq_halt(host, false);
+
+	host->err_mrq = NULL;
+	clear_bit(CMDQ_STATE_REQ_TIMED_OUT, &ctx_info->curr_state);
+	WARN_ON(!test_and_clear_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
+	wake_up(&ctx_info->wait);
+}
+
+/* invoked by block layer in softirq context */
+void mmc_blk_cmdq_complete_rq(struct request *rq)
+{
+	struct mmc_queue_req *mq_rq = rq->special;
+	struct mmc_request *mrq = &mq_rq->cmdq_req.mrq;
+	struct mmc_host *host = mrq->host;
+	struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
+	struct mmc_cmdq_req *cmdq_req = &mq_rq->cmdq_req;
+	struct mmc_queue *mq = (struct mmc_queue *)rq->q->queuedata;
+	int err = 0;
+	bool is_dcmd = false;
+
+	if (mrq->cmd && mrq->cmd->error)
+		err = mrq->cmd->error;
+	else if (mrq->data && mrq->data->error)
+		err = mrq->data->error;
+
+	if ((err || cmdq_req->resp_err) && !cmdq_req->skip_err_handling) {
+		pr_err("%s: %s: txfr error(%d)/resp_err(%d)\n",
+				mmc_hostname(mrq->host), __func__, err,
+				cmdq_req->resp_err);
+		if (test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state)) {
+			pr_err("%s: CQ in error state, ending current req: %d\n",
+				__func__, err);
+		} else {
+			set_bit(CMDQ_STATE_ERR, &ctx_info->curr_state);
+			BUG_ON(host->err_mrq != NULL);
+			host->err_mrq = mrq;
+			schedule_work(&mq->cmdq_err_work);
+		}
+		goto out;
+	}
+	/*
+	 * In case of error CMDQ is expected to be either in halted
+	 * or disable state so cannot receive any completion of
+	 * other requests.
+	 */
+	BUG_ON(test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
+
+	/* clear pending request */
+	BUG_ON(!test_and_clear_bit(cmdq_req->tag,
+				   &ctx_info->active_reqs));
+	if (cmdq_req->cmdq_req_flags & DCMD)
+		is_dcmd = true;
+	else
+		BUG_ON(!test_and_clear_bit(cmdq_req->tag,
+					 &ctx_info->data_active_reqs));
+	if (!is_dcmd)
+		mmc_cmdq_post_req(host, cmdq_req->tag, err);
+	if (cmdq_req->cmdq_req_flags & DCMD) {
+		clear_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);
+		blk_end_request_all(rq, err);
+		goto out;
+	}
+	/*
+	 * In case of error, cmdq_req->data.bytes_xfered is set to 0.
+	 * If we call blk_end_request() with nr_bytes as 0 then the request
+	 * never gets completed. So in case of error, to complete a request
+	 * with error we should use blk_end_request_all().
+	 */
+	if (err && cmdq_req->skip_err_handling) {
+		cmdq_req->skip_err_handling = false;
+		blk_end_request_all(rq, err);
+		goto out;
+	}
+
+	blk_end_request(rq, err, cmdq_req->data.bytes_xfered);
+
+out:
+
+	mmc_cmdq_clk_scaling_stop_busy(host, true, is_dcmd);
+	if (!test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state)) {
+		mmc_host_clk_release(host);
+		wake_up(&ctx_info->wait);
+		mmc_put_card(host->card);
+	}
+
+	if (!ctx_info->active_reqs)
+		wake_up_interruptible(&host->cmdq_ctx.queue_empty_wq);
+
+	if (blk_queue_stopped(mq->queue) && !ctx_info->active_reqs)
+		complete(&mq->cmdq_shutdown_complete);
+
+	return;
+}
+
+/*
+ * Complete reqs from block layer softirq context
+ * Invoked in irq context
+ */
+void mmc_blk_cmdq_req_done(struct mmc_request *mrq)
+{
+	struct request *req = mrq->req;
+
+	blk_complete_request(req);
+}
+EXPORT_SYMBOL(mmc_blk_cmdq_req_done);
+
 static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
 {
 	struct mmc_blk_data *md = mq->data;
@@ -1942,6 +3678,10 @@
 	struct mmc_async_req *areq;
 	const u8 packed_nr = 2;
 	u8 reqs = 0;
+	bool reset = false;
+#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
+	unsigned long waitfor = jiffies;
+#endif
 
 	if (!rqc && !mq->mqrq_prev->req)
 		return 0;
@@ -1974,7 +3714,7 @@
 		areq = mmc_start_req(card->host, areq, (int *) &status);
 		if (!areq) {
 			if (status == MMC_BLK_NEW_REQUEST)
-				mq->flags |= MMC_QUEUE_NEW_REQUEST;
+				set_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags);
 			return 0;
 		}
 
@@ -1984,6 +3724,26 @@
 		type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
 		mmc_queue_bounce_post(mq_rq);
 
+		if (card->err_in_sdr104) {
+			/*
+			 * Data CRC/timeout errors will manifest as CMD/DATA
+			 * ERR. But we'd like to retry these too.
+			 * Moreover, no harm done if this fails too for multiple
+			 * times, we anyway reduce the bus-speed and retry the
+			 * same request.
+			 * If that fails too, we don't override this status.
+			 */
+			if (status == MMC_BLK_ABORT ||
+			    status == MMC_BLK_CMD_ERR ||
+			    status == MMC_BLK_DATA_ERR ||
+			    status == MMC_BLK_RETRY)
+				/* reset on all of these errors and retry */
+				reset = true;
+
+			status = MMC_BLK_RETRY;
+			card->err_in_sdr104 = false;
+		}
+
 		switch (status) {
 		case MMC_BLK_SUCCESS:
 		case MMC_BLK_PARTIAL:
@@ -1992,6 +3752,8 @@
 			 */
 			mmc_blk_reset_success(md, type);
 
+			mmc_blk_simulate_delay(mq, rqc, waitfor);
+
 			if (mmc_packed_cmd(mq_rq->cmd_type)) {
 				ret = mmc_blk_end_packed_req(mq_rq);
 				break;
@@ -2022,11 +3784,36 @@
 			break;
 		case MMC_BLK_RETRY:
 			retune_retry_done = brq->retune_retry_done;
-			if (retry++ < 5)
+			if (retry++ < MMC_BLK_MAX_RETRIES) {
+				break;
+			} else if (reset) {
+				reset = false;
+				/*
+				 * If we exhaust all the retries due to
+				 * CRC/timeout errors in SDR140 mode with UHS SD
+				 * cards, re-configure the card in SDR50
+				 * bus-speed mode.
+				 * All subsequent re-init of this card will be
+				 * in SDR50 mode, unless it is removed and
+				 * re-inserted. When new UHS SD cards are
+				 * inserted, it may start at SDR104 mode if
+				 * supported by the card.
+				 */
+				pr_err("%s: blocked SDR104, lower the bus-speed (SDR50 / DDR50)\n",
+					req->rq_disk->disk_name);
+				mmc_host_clear_sdr104(card->host);
+				mmc_suspend_clk_scaling(card->host);
+				mmc_blk_reset(md, card->host, type);
+				/* SDR104 mode is blocked from now on */
+				card->sdr104_blocked = true;
+				/* retry 5 times again */
+				retry = 0;
 				break;
+			}
 			/* Fall through */
 		case MMC_BLK_ABORT:
-			if (!mmc_blk_reset(md, card->host, type))
+			if (!mmc_blk_reset(md, card->host, type) &&
+				(retry++ < (MMC_BLK_MAX_RETRIES + 1)))
 				break;
 			goto cmd_abort;
 		case MMC_BLK_DATA_ERR: {
@@ -2035,10 +3822,7 @@
 			err = mmc_blk_reset(md, card->host, type);
 			if (!err)
 				break;
-			if (err == -ENODEV ||
-				mmc_packed_cmd(mq_rq->cmd_type))
 				goto cmd_abort;
-			/* Fall through */
 		}
 		case MMC_BLK_ECC_ERR:
 			if (brq->data.blocks > 1) {
@@ -2122,6 +3906,181 @@
 	return 0;
 }
 
+static inline int mmc_blk_cmdq_part_switch(struct mmc_card *card,
+				      struct mmc_blk_data *md)
+{
+	struct mmc_blk_data *main_md = mmc_get_drvdata(card);
+	struct mmc_host *host = card->host;
+	struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
+	u8 part_config = card->ext_csd.part_config;
+	int ret = 0, err = 0;
+
+	if ((main_md->part_curr == md->part_type) &&
+	    (card->part_curr == md->part_type))
+		return 0;
+
+	WARN_ON(!((card->host->caps2 & MMC_CAP2_CMD_QUEUE) &&
+		 card->ext_csd.cmdq_support &&
+		 (md->flags & MMC_BLK_CMD_QUEUE)));
+
+	if (!test_bit(CMDQ_STATE_HALT, &ctx->curr_state)) {
+		ret = mmc_cmdq_halt(host, true);
+		if (ret) {
+			pr_err("%s: %s: halt: failed: %d\n",
+				mmc_hostname(host), __func__,  ret);
+			goto out;
+		}
+	}
+
+	/* disable CQ mode in card */
+	if (mmc_card_cmdq(card)) {
+		ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+				 EXT_CSD_CMDQ, 0,
+				 card->ext_csd.generic_cmd6_time);
+		if (ret) {
+			pr_err("%s: %s: cmdq mode disable failed %d\n",
+				mmc_hostname(host), __func__, ret);
+			goto cmdq_unhalt;
+		}
+		mmc_card_clr_cmdq(card);
+	}
+
+	part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
+	part_config |= md->part_type;
+
+	ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+			 EXT_CSD_PART_CONFIG, part_config,
+			 card->ext_csd.part_time);
+	if (ret) {
+		pr_err("%s: %s: mmc_switch failure, %d -> %d , err = %d\n",
+			mmc_hostname(host), __func__, main_md->part_curr,
+			md->part_type, ret);
+		goto cmdq_switch;
+	}
+
+	card->ext_csd.part_config = part_config;
+	card->part_curr = md->part_type;
+
+	main_md->part_curr = md->part_type;
+
+cmdq_switch:
+	err = mmc_blk_cmdq_switch(card, md, true);
+	if (err) {
+		pr_err("%s: %s: mmc_blk_cmdq_switch failed: %d\n",
+			mmc_hostname(host), __func__,  err);
+		ret = err;
+	}
+cmdq_unhalt:
+	err = mmc_cmdq_halt(host, false);
+	if (err) {
+		pr_err("%s: %s: unhalt: failed: %d\n",
+			mmc_hostname(host), __func__,  err);
+		ret = err;
+	}
+out:
+	return ret;
+}
+
+static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req)
+{
+	int ret, err = 0;
+	struct mmc_blk_data *md = mq->data;
+	struct mmc_card *card = md->queue.card;
+	struct mmc_host *host = card->host;
+	unsigned int cmd_flags = req ? req->cmd_flags : 0;
+
+	mmc_get_card(card);
+
+	if (!card->host->cmdq_ctx.active_reqs && mmc_card_doing_bkops(card)) {
+		ret = mmc_cmdq_halt(card->host, true);
+		if (ret)
+			goto out;
+		ret = mmc_stop_bkops(card);
+		if (ret) {
+			pr_err("%s: %s: mmc_stop_bkops failed %d\n",
+					md->disk->disk_name, __func__, ret);
+			goto out;
+		}
+		ret = mmc_cmdq_halt(card->host, false);
+		if (ret)
+			goto out;
+	}
+
+	ret = mmc_blk_cmdq_part_switch(card, md);
+	if (ret) {
+		pr_err("%s: %s: partition switch failed %d, resetting cmdq\n",
+				md->disk->disk_name, __func__, ret);
+
+		mmc_blk_cmdq_reset(host, false);
+		err = mmc_blk_cmdq_part_switch(card, md);
+		if (!err) {
+			pr_err("%s: %s: partition switch success err = %d\n",
+				md->disk->disk_name, __func__, err);
+		} else {
+			pr_err("%s: %s: partition switch failed err = %d\n",
+				md->disk->disk_name, __func__, err);
+			ret = err;
+			goto out;
+		}
+	}
+
+	if (req) {
+		struct mmc_host *host = card->host;
+		struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
+
+		if ((cmd_flags & (REQ_FLUSH | REQ_DISCARD)) &&
+		    (card->quirks & MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD) &&
+		    ctx->active_small_sector_read_reqs) {
+			ret = wait_event_interruptible(ctx->queue_empty_wq,
+						      !ctx->active_reqs);
+			if (ret) {
+				pr_err("%s: failed while waiting for the CMDQ to be empty %s err (%d)\n",
+					mmc_hostname(host),
+					__func__, ret);
+				BUG_ON(1);
+			}
+			/* clear the counter now */
+			ctx->active_small_sector_read_reqs = 0;
+			/*
+			 * If there were small sector (less than 8 sectors) read
+			 * operations in progress then we have to wait for the
+			 * outstanding requests to finish and should also have
+			 * atleast 6 microseconds delay before queuing the DCMD
+			 * request.
+			 */
+			udelay(MMC_QUIRK_CMDQ_DELAY_BEFORE_DCMD);
+		}
+
+		if (cmd_flags & REQ_DISCARD) {
+			if (cmd_flags & REQ_SECURE &&
+			   !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
+				ret = mmc_blk_cmdq_issue_secdiscard_rq(mq, req);
+			else
+				ret = mmc_blk_cmdq_issue_discard_rq(mq, req);
+		} else if (cmd_flags & REQ_FLUSH) {
+			ret = mmc_blk_cmdq_issue_flush_rq(mq, req);
+		} else {
+			ret = mmc_blk_cmdq_issue_rw_rq(mq, req);
+			/*
+			 * If issuing of the request fails with eitehr EBUSY or
+			 * EAGAIN error, re-queue the request.
+			 * This case would occur with ICE calls.
+			 */
+			if (ret == -EBUSY || ret == -EAGAIN)
+				mmc_blk_cmdq_requeue_rw_rq(mq, req);
+		}
+	}
+
+	return ret;
+
+out:
+	if (req)
+		blk_end_request_all(req, ret);
+	mmc_put_card(card);
+
+	return ret;
+}
+
 static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
 {
 	int ret;
@@ -2130,13 +4089,31 @@
 	struct mmc_host *host = card->host;
 	unsigned long flags;
 	unsigned int cmd_flags = req ? req->cmd_flags : 0;
+	int err;
 
-	if (req && !mq->mqrq_prev->req)
+	if (req && !mq->mqrq_prev->req) {
 		/* claim host only for the first request */
 		mmc_get_card(card);
 
+		if (mmc_card_doing_bkops(host->card)) {
+			ret = mmc_stop_bkops(host->card);
+			if (ret)
+				goto out;
+		}
+	}
+
 	ret = mmc_blk_part_switch(card, md);
+
 	if (ret) {
+		err = mmc_blk_reset(md, card->host, MMC_BLK_PARTSWITCH);
+		if (!err) {
+			pr_err("%s: mmc_blk_reset(MMC_BLK_PARTSWITCH) succeeded.\n",
+					mmc_hostname(host));
+			mmc_blk_reset_success(md, MMC_BLK_PARTSWITCH);
+		} else
+			pr_err("%s: mmc_blk_reset(MMC_BLK_PARTSWITCH) failed.\n",
+				mmc_hostname(host));
+
 		if (req) {
 			blk_end_request_all(req, -EIO);
 		}
@@ -2144,16 +4121,19 @@
 		goto out;
 	}
 
-	mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
+	mmc_blk_write_packing_control(mq, req);
+
+	clear_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags);
 	if (cmd_flags & REQ_DISCARD) {
 		/* complete ongoing async transfer before issuing discard */
 		if (card->host->areq)
 			mmc_blk_issue_rw_rq(mq, NULL);
-		if (req->cmd_flags & REQ_SECURE)
+		if (cmd_flags & REQ_SECURE &&
+			!(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
 			ret = mmc_blk_issue_secdiscard_rq(mq, req);
 		else
 			ret = mmc_blk_issue_discard_rq(mq, req);
-	} else if (cmd_flags & REQ_FLUSH) {
+	} else if (cmd_flags & (REQ_FLUSH | REQ_BARRIER)) {
 		/* complete ongoing async transfer before issuing flush */
 		if (card->host->areq)
 			mmc_blk_issue_rw_rq(mq, NULL);
@@ -2168,7 +4148,7 @@
 	}
 
 out:
-	if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
+	if ((!req && !(test_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags))) ||
 	     (cmd_flags & MMC_REQ_SPECIAL_MASK))
 		/*
 		 * Release host when there are no more requests
@@ -2238,7 +4218,7 @@
 	INIT_LIST_HEAD(&md->part);
 	md->usage = 1;
 
-	ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
+	ret = mmc_init_queue(&md->queue, card, NULL, subname, area_type);
 	if (ret)
 		goto err_putdisk;
 
@@ -2252,6 +4232,7 @@
 	md->disk->queue = md->queue.queue;
 	md->disk->driverfs_dev = parent;
 	set_disk_ro(md->disk, md->read_only || default_ro);
+	md->disk->flags = GENHD_FL_EXT_DEVT;
 	if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
 		md->disk->flags |= GENHD_FL_NO_PART_SCAN;
 
@@ -2294,7 +4275,16 @@
 		blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
 	}
 
-	if (mmc_card_mmc(card) &&
+	if (card->cmdq_init) {
+		md->flags |= MMC_BLK_CMD_QUEUE;
+		md->queue.cmdq_complete_fn = mmc_blk_cmdq_complete_rq;
+		md->queue.cmdq_issue_fn = mmc_blk_cmdq_issue_rq;
+		md->queue.cmdq_error_fn = mmc_blk_cmdq_err;
+		md->queue.cmdq_req_timed_out = mmc_blk_cmdq_req_timed_out;
+		md->queue.cmdq_shutdown = mmc_blk_cmdq_shutdown;
+	}
+
+	if (mmc_card_mmc(card) && !card->cmdq_init &&
 	    (area_type == MMC_BLK_DATA_AREA_MAIN) &&
 	    (md->flags & MMC_BLK_CMD23) &&
 	    card->ext_csd.packed_event_en) {
@@ -2307,8 +4297,11 @@
  err_putdisk:
 	put_disk(md->disk);
  err_kfree:
+	if (!subname)
+		__clear_bit(md->name_idx, name_use);
 	kfree(md);
  out:
+	__clear_bit(devidx, dev_use);
 	return ERR_PTR(ret);
 }
 
@@ -2404,12 +4397,24 @@
 		mmc_cleanup_queue(&md->queue);
 		if (md->flags & MMC_BLK_PACKED_CMD)
 			mmc_packed_clean(&md->queue);
+		if (md->flags & MMC_BLK_CMD_QUEUE)
+			mmc_cmdq_clean(&md->queue, card);
+		device_remove_file(disk_to_dev(md->disk),
+				   &md->num_wr_reqs_to_start_packing);
 		if (md->disk->flags & GENHD_FL_UP) {
 			device_remove_file(disk_to_dev(md->disk), &md->force_ro);
 			if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
 					card->ext_csd.boot_ro_lockable)
 				device_remove_file(disk_to_dev(md->disk),
 					&md->power_ro_lock);
+#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
+			device_remove_file(disk_to_dev(md->disk),
+						&dev_attr_max_write_speed);
+			device_remove_file(disk_to_dev(md->disk),
+						&dev_attr_max_read_speed);
+			device_remove_file(disk_to_dev(md->disk),
+						&dev_attr_cache_size);
+#endif
 
 			del_gendisk(md->disk);
 		}
@@ -2445,6 +4450,24 @@
 	ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
 	if (ret)
 		goto force_ro_fail;
+#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
+	atomic_set(&md->queue.max_write_speed, max_write_speed);
+	ret = device_create_file(disk_to_dev(md->disk),
+			&dev_attr_max_write_speed);
+	if (ret)
+		goto max_write_speed_fail;
+	atomic_set(&md->queue.max_read_speed, max_read_speed);
+	ret = device_create_file(disk_to_dev(md->disk),
+			&dev_attr_max_read_speed);
+	if (ret)
+		goto max_read_speed_fail;
+	atomic_set(&md->queue.cache_size, cache_size);
+	atomic_long_set(&md->queue.cache_used, 0);
+	md->queue.cache_jiffies = jiffies;
+	ret = device_create_file(disk_to_dev(md->disk), &dev_attr_cache_size);
+	if (ret)
+		goto cache_size_fail;
+#endif
 
 	if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
 	     card->ext_csd.boot_ro_lockable) {
@@ -2466,9 +4489,46 @@
 		if (ret)
 			goto power_ro_lock_fail;
 	}
+
+	md->num_wr_reqs_to_start_packing.show =
+		num_wr_reqs_to_start_packing_show;
+	md->num_wr_reqs_to_start_packing.store =
+		num_wr_reqs_to_start_packing_store;
+	sysfs_attr_init(&md->num_wr_reqs_to_start_packing.attr);
+	md->num_wr_reqs_to_start_packing.attr.name =
+		"num_wr_reqs_to_start_packing";
+	md->num_wr_reqs_to_start_packing.attr.mode = S_IRUGO | S_IWUSR;
+	ret = device_create_file(disk_to_dev(md->disk),
+				 &md->num_wr_reqs_to_start_packing);
+	if (ret)
+		goto num_wr_reqs_to_start_packing_fail;
+
+	md->no_pack_for_random.show = no_pack_for_random_show;
+	md->no_pack_for_random.store = no_pack_for_random_store;
+	sysfs_attr_init(&md->no_pack_for_random.attr);
+	md->no_pack_for_random.attr.name = "no_pack_for_random";
+	md->no_pack_for_random.attr.mode = S_IRUGO | S_IWUSR;
+	ret = device_create_file(disk_to_dev(md->disk),
+				 &md->no_pack_for_random);
+	if (ret)
+		goto no_pack_for_random_fails;
+
 	return ret;
 
+no_pack_for_random_fails:
+	device_remove_file(disk_to_dev(md->disk),
+			   &md->num_wr_reqs_to_start_packing);
+num_wr_reqs_to_start_packing_fail:
+	device_remove_file(disk_to_dev(md->disk), &md->power_ro_lock);
 power_ro_lock_fail:
+#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
+	device_remove_file(disk_to_dev(md->disk), &dev_attr_cache_size);
+cache_size_fail:
+	device_remove_file(disk_to_dev(md->disk), &dev_attr_max_read_speed);
+max_read_speed_fail:
+	device_remove_file(disk_to_dev(md->disk), &dev_attr_max_write_speed);
+max_write_speed_fail:
+#endif
 	device_remove_file(disk_to_dev(md->disk), &md->force_ro);
 force_ro_fail:
 	del_gendisk(md->disk);
@@ -2482,6 +4542,11 @@
 #define CID_MANFID_SAMSUNG	0x15
 #define CID_MANFID_KINGSTON	0x70
 
+#define CID_MANFID_SANDISK	0x2
+#define CID_MANFID_TOSHIBA	0x11
+#define CID_MANFID_MICRON	0x13
+#define CID_MANFID_SAMSUNG	0x15
+
 static const struct mmc_fixup blk_fixups[] =
 {
 	MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
@@ -2513,6 +4578,8 @@
 		  MMC_QUIRK_BLK_NO_CMD23),
 	MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
 		  MMC_QUIRK_BLK_NO_CMD23),
+	MMC_FIXUP(CID_NAME_ANY, CID_MANFID_TOSHIBA, CID_OEMID_ANY,
+		  add_quirk_mmc, MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD),
 
 	/*
 	 * Some MMC cards need longer data read timeout than indicated in CSD.
@@ -2523,6 +4590,20 @@
 		  MMC_QUIRK_LONG_READ_TIME),
 
 	/*
+	 * Some Samsung MMC cards need longer data read timeout than
+	 * indicated in CSD.
+	 */
+	MMC_FIXUP("Q7XSAB", CID_MANFID_SAMSUNG, 0x100, add_quirk_mmc,
+		  MMC_QUIRK_LONG_READ_TIME),
+
+	/*
+	 * Hynix eMMC cards need longer data read timeout than
+	 * indicated in CSD.
+	 */
+	MMC_FIXUP(CID_NAME_ANY, CID_MANFID_HYNIX, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_LONG_READ_TIME),
+
+	/*
 	 * On these Samsung MoviNAND parts, performing secure erase or
 	 * secure trim can result in unrecoverable corruption due to a
 	 * firmware bug.
@@ -2553,6 +4634,32 @@
 	MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
 		  MMC_QUIRK_TRIM_BROKEN),
 
+	/* Some INAND MCP devices advertise incorrect timeout values */
+	MMC_FIXUP("SEM04G", 0x45, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_INAND_DATA_TIMEOUT),
+
+	/*
+	 * On these Samsung MoviNAND parts, performing secure erase or
+	 * secure trim can result in unrecoverable corruption due to a
+	 * firmware bug.
+	 */
+	MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+	MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+	MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+	MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+	MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+	MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+	MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+	MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+
 	END_FIXUP
 };
 
@@ -2584,6 +4691,10 @@
 
 	dev_set_drvdata(&card->dev, md);
 
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+	mmc_set_bus_resume_policy(card->host, 1);
+#endif
+
 	if (mmc_add_disk(md))
 		goto out;
 
@@ -2592,7 +4703,8 @@
 			goto out;
 	}
 
-	pm_runtime_set_autosuspend_delay(&card->dev, 3000);
+	pm_runtime_use_autosuspend(&card->dev);
+	pm_runtime_set_autosuspend_delay(&card->dev, MMC_AUTOSUSPEND_DELAY_MS);
 	pm_runtime_use_autosuspend(&card->dev);
 
 	/*
@@ -2626,25 +4738,41 @@
 	pm_runtime_put_noidle(&card->dev);
 	mmc_blk_remove_req(md);
 	dev_set_drvdata(&card->dev, NULL);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+	mmc_set_bus_resume_policy(card->host, 0);
+#endif
 }
 
-static int _mmc_blk_suspend(struct mmc_card *card)
+static int _mmc_blk_suspend(struct mmc_card *card, bool wait)
 {
 	struct mmc_blk_data *part_md;
 	struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
+	int rc = 0;
 
 	if (md) {
-		mmc_queue_suspend(&md->queue);
+		rc = mmc_queue_suspend(&md->queue, wait);
+		if (rc)
+			goto out;
 		list_for_each_entry(part_md, &md->part, part) {
-			mmc_queue_suspend(&part_md->queue);
+			rc = mmc_queue_suspend(&part_md->queue, wait);
+			if (rc)
+				goto out_resume;
 		}
 	}
-	return 0;
+	goto out;
+
+ out_resume:
+	mmc_queue_resume(&md->queue);
+	list_for_each_entry(part_md, &md->part, part) {
+		mmc_queue_resume(&part_md->queue);
+	}
+ out:
+	return rc;
 }
 
 static void mmc_blk_shutdown(struct mmc_card *card)
 {
-	_mmc_blk_suspend(card);
+	_mmc_blk_suspend(card, 1);
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -2652,7 +4780,7 @@
 {
 	struct mmc_card *card = mmc_dev_to_card(dev);
 
-	return _mmc_blk_suspend(card);
+	return _mmc_blk_suspend(card, 0);
 }
 
 static int mmc_blk_resume(struct device *dev)
diff -ruw linux-4.4.115/drivers/mmc/card/Kconfig linux-4.4.115-fbx/drivers/mmc/card/Kconfig
--- linux-4.4.115/drivers/mmc/card/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mmc/card/Kconfig	2019-01-22 16:16:24.763257817 +0100
@@ -50,6 +50,17 @@
 
 	  If unsure, say Y here.
 
+config MMC_BLOCK_DEFERRED_RESUME
+	bool "Defer MMC layer resume until I/O is requested"
+	depends on MMC_BLOCK
+	default n
+	help
+	  Say Y here to enable deferred MMC resume until I/O
+	  is requested.
+
+	  This will reduce overall resume latency and
+	  save power when there is an SD card inserted but not being used.
+
 config SDIO_UART
 	tristate "SDIO UART/GPS class support"
 	depends on TTY
@@ -68,3 +79,15 @@
 
 	  This driver is only of interest to those developing or
 	  testing a host driver. Most people should say N here.
+
+config MMC_SIMULATE_MAX_SPEED
+	bool "Turn on maximum speed control per block device"
+	depends on MMC_BLOCK
+	help
+	  Say Y here to enable MMC device speed limiting. Used to test and
+	  simulate the behavior of the system when confronted with a slow MMC.
+
+	  Enables max_read_speed, max_write_speed and cache_size attributes to
+	  control the write or read maximum KB/second speed behaviors.
+
+	  If unsure, say N here.
diff -ruw linux-4.4.115/drivers/mmc/card/Makefile linux-4.4.115-fbx/drivers/mmc/card/Makefile
--- linux-4.4.115/drivers/mmc/card/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mmc/card/Makefile	2019-01-22 16:16:24.763257817 +0100
@@ -8,3 +8,4 @@
 
 obj-$(CONFIG_SDIO_UART)		+= sdio_uart.o
 
+obj-$(CONFIG_MMC_BLOCK_TEST)		+= mmc_block_test.o
diff -ruw linux-4.4.115/drivers/mmc/card/queue.c linux-4.4.115-fbx/drivers/mmc/card/queue.c
--- linux-4.4.115/drivers/mmc/card/queue.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mmc/card/queue.c	2019-10-29 09:26:24.053207190 +0100
@@ -16,14 +16,24 @@
 #include <linux/kthread.h>
 #include <linux/scatterlist.h>
 #include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
 
 #include <linux/mmc/card.h>
 #include <linux/mmc/host.h>
+#include <linux/sched/rt.h>
 #include "queue.h"
 
 #define MMC_QUEUE_BOUNCESZ	65536
 
 /*
+ * Based on benchmark tests the default num of requests to trigger the write
+ * packing was determined, to keep the read latency as low as possible and
+ * manage to keep the high write throughput.
+ */
+#define DEFAULT_NUM_REQS_TO_START_PACK 17
+
+/*
  * Prepare a MMC request. This just filters out odd stuff.
  */
 static int mmc_prep_request(struct request_queue *q, struct request *req)
@@ -46,12 +56,102 @@
 	return BLKPREP_OK;
 }
 
+static struct request *mmc_peek_request(struct mmc_queue *mq)
+{
+	struct request_queue *q = mq->queue;
+	mq->cmdq_req_peeked = NULL;
+
+	spin_lock_irq(q->queue_lock);
+	if (!blk_queue_stopped(q))
+		mq->cmdq_req_peeked = blk_peek_request(q);
+	spin_unlock_irq(q->queue_lock);
+
+	return mq->cmdq_req_peeked;
+}
+
+static bool mmc_check_blk_queue_start_tag(struct request_queue *q,
+					  struct request *req)
+{
+	int ret;
+
+	spin_lock_irq(q->queue_lock);
+	ret = blk_queue_start_tag(q, req);
+	spin_unlock_irq(q->queue_lock);
+
+	return !!ret;
+}
+
+static inline void mmc_cmdq_ready_wait(struct mmc_host *host,
+					struct mmc_queue *mq)
+{
+	struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
+	struct request_queue *q = mq->queue;
+
+	/*
+	 * Wait until all of the following conditions are true:
+	 * 1. There is a request pending in the block layer queue
+	 *    to be processed.
+	 * 2. If the peeked request is flush/discard then there shouldn't
+	 *    be any other direct command active.
+	 * 3. cmdq state should be unhalted.
+	 * 4. cmdq state shouldn't be in error state.
+	 * 5. free tag available to process the new request.
+	 */
+	wait_event(ctx->wait, kthread_should_stop()
+		|| (mmc_peek_request(mq) &&
+		!((mq->cmdq_req_peeked->cmd_flags & (REQ_FLUSH | REQ_DISCARD))
+		  && test_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx->curr_state))
+		&& !(!host->card->part_curr && !mmc_card_suspended(host->card)
+		     && mmc_host_halt(host))
+		&& !(!host->card->part_curr && mmc_host_cq_disable(host) &&
+			!mmc_card_suspended(host->card))
+		&& !test_bit(CMDQ_STATE_ERR, &ctx->curr_state)
+		&& !mmc_check_blk_queue_start_tag(q, mq->cmdq_req_peeked)));
+}
+
+static int mmc_cmdq_thread(void *d)
+{
+	struct mmc_queue *mq = d;
+	struct mmc_card *card = mq->card;
+	struct mmc_host *host = card->host;
+
+	current->flags |= PF_MEMALLOC;
+	if (card->host->wakeup_on_idle)
+		set_wake_up_idle(true);
+
+	while (1) {
+		int ret = 0;
+
+		mmc_cmdq_ready_wait(host, mq);
+		if (kthread_should_stop())
+			break;
+
+		ret = mq->cmdq_issue_fn(mq, mq->cmdq_req_peeked);
+		/*
+		 * Don't requeue if issue_fn fails.
+		 * Recovery will be come by completion softirq
+		 * Also we end the request if there is a partition switch error,
+		 * so we should not requeue the request here.
+		 */
+	} /* loop */
+
+	return 0;
+}
+
 static int mmc_queue_thread(void *d)
 {
 	struct mmc_queue *mq = d;
 	struct request_queue *q = mq->queue;
+	struct mmc_card *card = mq->card;
+	struct sched_param scheduler_params = {0};
+
+	scheduler_params.sched_priority = 1;
+
+	sched_setscheduler(current, SCHED_FIFO, &scheduler_params);
 
 	current->flags |= PF_MEMALLOC;
+	if (card->host->wakeup_on_idle)
+		set_wake_up_idle(true);
 
 	down(&mq->thread_sem);
 	do {
@@ -69,8 +169,8 @@
 			cmd_flags = req ? req->cmd_flags : 0;
 			mq->issue_fn(mq, req);
 			cond_resched();
-			if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
-				mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
+			if (test_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags)) {
+				clear_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags);
 				continue; /* fetch again */
 			}
 
@@ -102,6 +202,13 @@
 	return 0;
 }
 
+static void mmc_cmdq_dispatch_req(struct request_queue *q)
+{
+	struct mmc_queue *mq = q->queuedata;
+
+	wake_up(&mq->card->host->cmdq_ctx.wait);
+}
+
 /*
  * Generic MMC request handler.  This is called for any queue on a
  * particular host.  When the host is not busy, we look for a request
@@ -177,6 +284,32 @@
 }
 
 /**
+ * mmc_blk_cmdq_setup_queue
+ * @mq: mmc queue
+ * @card: card to attach to this queue
+ *
+ * Setup queue for CMDQ supporting MMC card
+ */
+void mmc_cmdq_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
+{
+	u64 limit = BLK_BOUNCE_HIGH;
+	struct mmc_host *host = card->host;
+
+	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
+		limit = *mmc_dev(host)->dma_mask;
+
+	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
+	if (mmc_can_erase(card))
+		mmc_queue_setup_discard(mq->queue, card);
+
+	blk_queue_bounce_limit(mq->queue, limit);
+	blk_queue_max_hw_sectors(mq->queue, min(host->max_blk_count,
+						host->max_req_size / 512));
+	blk_queue_max_segment_size(mq->queue, host->max_seg_size);
+	blk_queue_max_segments(mq->queue, host->max_segs);
+}
+
+/**
  * mmc_init_queue - initialise a queue structure.
  * @mq: mmc queue
  * @card: mmc card to attach this queue
@@ -186,7 +319,7 @@
  * Initialise a MMC card request queue.
  */
 int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
-		   spinlock_t *lock, const char *subname)
+		   spinlock_t *lock, const char *subname, int area_type)
 {
 	struct mmc_host *host = card->host;
 	u64 limit = BLK_BOUNCE_HIGH;
@@ -198,6 +331,37 @@
 		limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
 
 	mq->card = card;
+	if (card->ext_csd.cmdq_support &&
+	    (area_type == MMC_BLK_DATA_AREA_MAIN)) {
+		mq->queue = blk_init_queue(mmc_cmdq_dispatch_req, lock);
+		if (!mq->queue)
+			return -ENOMEM;
+		mmc_cmdq_setup_queue(mq, card);
+		ret = mmc_cmdq_init(mq, card);
+		if (ret) {
+			pr_err("%s: %d: cmdq: unable to set-up\n",
+			       mmc_hostname(card->host), ret);
+			blk_cleanup_queue(mq->queue);
+		} else {
+			sema_init(&mq->thread_sem, 1);
+			/* hook for pm qos cmdq init */
+			if (card->host->cmdq_ops->init)
+				card->host->cmdq_ops->init(card->host);
+			mq->queue->queuedata = mq;
+			mq->thread = kthread_run(mmc_cmdq_thread, mq,
+						 "mmc-cmdqd/%d%s",
+						 host->index,
+						 subname ? subname : "");
+			if (IS_ERR(mq->thread)) {
+				pr_err("%s: %d: cmdq: failed to start mmc-cmdqd thread\n",
+						mmc_hostname(card->host), ret);
+				ret = PTR_ERR(mq->thread);
+			}
+
+			return ret;
+		}
+	}
+
 	mq->queue = blk_init_queue(mmc_request_fn, lock);
 	if (!mq->queue)
 		return -ENOMEM;
@@ -205,6 +369,9 @@
 	mq->mqrq_cur = mqrq_cur;
 	mq->mqrq_prev = mqrq_prev;
 	mq->queue->queuedata = mq;
+	mq->num_wr_reqs_to_start_packing =
+		min_t(int, (int)card->ext_csd.max_packed_writes,
+		     DEFAULT_NUM_REQS_TO_START_PACK);
 
 	blk_queue_prep_rq(mq->queue, mmc_prep_request);
 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
@@ -270,24 +437,49 @@
 #endif
 
 	if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
+		unsigned int max_segs = host->max_segs;
+
 		blk_queue_bounce_limit(mq->queue, limit);
 		blk_queue_max_hw_sectors(mq->queue,
 			min(host->max_blk_count, host->max_req_size / 512));
-		blk_queue_max_segments(mq->queue, host->max_segs);
 		blk_queue_max_segment_size(mq->queue, host->max_seg_size);
+retry:
+		blk_queue_max_segments(mq->queue, host->max_segs);
 
 		mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
-		if (ret)
+		if (ret == -ENOMEM)
+			goto cur_sg_alloc_failed;
+		else if (ret)
 			goto cleanup_queue;
 
-
 		mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
-		if (ret)
+		if (ret == -ENOMEM)
+			goto prev_sg_alloc_failed;
+		else if (ret)
+			goto cleanup_queue;
+
+		goto success;
+
+prev_sg_alloc_failed:
+		kfree(mqrq_cur->sg);
+		mqrq_cur->sg = NULL;
+cur_sg_alloc_failed:
+		host->max_segs /= 2;
+		if (host->max_segs) {
+			goto retry;
+		} else {
+			host->max_segs = max_segs;
 			goto cleanup_queue;
 	}
+	}
 
+success:
 	sema_init(&mq->thread_sem, 1);
 
+	/* hook for pm qos legacy init */
+	if (card->host->ops->init)
+		card->host->ops->init(card->host);
+
 	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
 		host->index, subname ? subname : "");
 
@@ -402,29 +594,193 @@
 	mqrq_prev->packed = NULL;
 }
 
+static void mmc_cmdq_softirq_done(struct request *rq)
+{
+	struct mmc_queue *mq = rq->q->queuedata;
+	mq->cmdq_complete_fn(rq);
+}
+
+static void mmc_cmdq_error_work(struct work_struct *work)
+{
+	struct mmc_queue *mq = container_of(work, struct mmc_queue,
+					    cmdq_err_work);
+
+	mq->cmdq_error_fn(mq);
+}
+
+enum blk_eh_timer_return mmc_cmdq_rq_timed_out(struct request *req)
+{
+	struct mmc_queue *mq = req->q->queuedata;
+
+	pr_err("%s: request with tag: %d flags: 0x%llx timed out\n",
+	       mmc_hostname(mq->card->host), req->tag, req->cmd_flags);
+
+	return mq->cmdq_req_timed_out(req);
+}
+
+int mmc_cmdq_init(struct mmc_queue *mq, struct mmc_card *card)
+{
+	int i, ret = 0;
+	/* one slot is reserved for dcmd requests */
+	int q_depth = card->ext_csd.cmdq_depth - 1;
+
+	card->cmdq_init = false;
+	if (!(card->host->caps2 & MMC_CAP2_CMD_QUEUE)) {
+		ret = -ENOTSUPP;
+		goto out;
+	}
+
+	init_waitqueue_head(&card->host->cmdq_ctx.queue_empty_wq);
+	init_waitqueue_head(&card->host->cmdq_ctx.wait);
+
+	mq->mqrq_cmdq = kzalloc(
+			sizeof(struct mmc_queue_req) * q_depth, GFP_KERNEL);
+	if (!mq->mqrq_cmdq) {
+		pr_warn("%s: unable to allocate mqrq's for q_depth %d\n",
+			mmc_card_name(card), q_depth);
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	/* sg is allocated for data request slots only */
+	for (i = 0; i < q_depth; i++) {
+		mq->mqrq_cmdq[i].sg = mmc_alloc_sg(card->host->max_segs, &ret);
+		if (ret) {
+			pr_warn("%s: unable to allocate cmdq sg of size %d\n",
+				mmc_card_name(card),
+				card->host->max_segs);
+			goto free_mqrq_sg;
+		}
+	}
+
+	ret = blk_queue_init_tags(mq->queue, q_depth, NULL, BLK_TAG_ALLOC_FIFO);
+	if (ret) {
+		pr_warn("%s: unable to allocate cmdq tags %d\n",
+				mmc_card_name(card), q_depth);
+		goto free_mqrq_sg;
+	}
+
+	blk_queue_softirq_done(mq->queue, mmc_cmdq_softirq_done);
+	INIT_WORK(&mq->cmdq_err_work, mmc_cmdq_error_work);
+	init_completion(&mq->cmdq_shutdown_complete);
+	init_completion(&mq->cmdq_pending_req_done);
+
+	blk_queue_rq_timed_out(mq->queue, mmc_cmdq_rq_timed_out);
+	blk_queue_rq_timeout(mq->queue, 120 * HZ);
+	card->cmdq_init = true;
+
+	goto out;
+
+free_mqrq_sg:
+	for (i = 0; i < q_depth; i++)
+		kfree(mq->mqrq_cmdq[i].sg);
+	kfree(mq->mqrq_cmdq);
+	mq->mqrq_cmdq = NULL;
+out:
+	return ret;
+}
+
+void mmc_cmdq_clean(struct mmc_queue *mq, struct mmc_card *card)
+{
+	int i;
+	int q_depth = card->ext_csd.cmdq_depth - 1;
+
+	blk_free_tags(mq->queue->queue_tags);
+	mq->queue->queue_tags = NULL;
+	blk_queue_free_tags(mq->queue);
+
+	for (i = 0; i < q_depth; i++)
+		kfree(mq->mqrq_cmdq[i].sg);
+	kfree(mq->mqrq_cmdq);
+	mq->mqrq_cmdq = NULL;
+}
+
 /**
  * mmc_queue_suspend - suspend a MMC request queue
  * @mq: MMC queue to suspend
+ * @wait: Wait till MMC request queue is empty
  *
  * Stop the block request queue, and wait for our thread to
  * complete any outstanding requests.  This ensures that we
  * won't suspend while a request is being processed.
  */
-void mmc_queue_suspend(struct mmc_queue *mq)
+int mmc_queue_suspend(struct mmc_queue *mq, int wait)
 {
 	struct request_queue *q = mq->queue;
 	unsigned long flags;
+	int rc = 0;
+	struct mmc_card *card = mq->card;
+	struct request *req;
+
+	if (card->cmdq_init && blk_queue_tagged(q)) {
+		struct mmc_host *host = card->host;
+
+		if (test_and_set_bit(MMC_QUEUE_SUSPENDED, &mq->flags))
+			goto out;
+
+		if (wait) {
+
+			/*
+			 * After blk_cleanup_queue is called, wait for all
+			 * active_reqs to complete.
+			 * Then wait for cmdq thread to exit before calling
+			 * cmdq shutdown to avoid race between issuing
+			 * requests and shutdown of cmdq.
+			 */
+			blk_cleanup_queue(q);
 
-	if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
-		mq->flags |= MMC_QUEUE_SUSPENDED;
+			if (host->cmdq_ctx.active_reqs)
+				wait_for_completion(
+						&mq->cmdq_shutdown_complete);
+			kthread_stop(mq->thread);
+			mq->cmdq_shutdown(mq);
+		} else {
+			spin_lock_irqsave(q->queue_lock, flags);
+			blk_stop_queue(q);
+			wake_up(&host->cmdq_ctx.wait);
+			req = blk_peek_request(q);
+			if (req || mq->cmdq_req_peeked ||
+			    host->cmdq_ctx.active_reqs) {
+				clear_bit(MMC_QUEUE_SUSPENDED, &mq->flags);
+				blk_start_queue(q);
+				rc = -EBUSY;
+			}
+			spin_unlock_irqrestore(q->queue_lock, flags);
+		}
+
+		goto out;
+	}
 
+	if (!(test_and_set_bit(MMC_QUEUE_SUSPENDED, &mq->flags))) {
+		if (!wait) {
+			/* suspend/stop the queue in case of suspend */
 		spin_lock_irqsave(q->queue_lock, flags);
 		blk_stop_queue(q);
 		spin_unlock_irqrestore(q->queue_lock, flags);
+		} else {
+			/* shutdown the queue in case of shutdown/reboot */
+			blk_cleanup_queue(q);
+		}
 
+		rc = down_trylock(&mq->thread_sem);
+		if (rc && !wait) {
+			/*
+			 * Failed to take the lock so better to abort the
+			 * suspend because mmcqd thread is processing requests.
+			 */
+			clear_bit(MMC_QUEUE_SUSPENDED, &mq->flags);
+			spin_lock_irqsave(q->queue_lock, flags);
+			blk_start_queue(q);
+			spin_unlock_irqrestore(q->queue_lock, flags);
+			rc = -EBUSY;
+		} else if (rc && wait) {
 		down(&mq->thread_sem);
+			rc = 0;
 	}
 }
+out:
+	return rc;
+}
 
 /**
  * mmc_queue_resume - resume a previously suspended MMC request queue
@@ -433,11 +789,12 @@
 void mmc_queue_resume(struct mmc_queue *mq)
 {
 	struct request_queue *q = mq->queue;
+	struct mmc_card *card = mq->card;
 	unsigned long flags;
 
-	if (mq->flags & MMC_QUEUE_SUSPENDED) {
-		mq->flags &= ~MMC_QUEUE_SUSPENDED;
+	if (test_and_clear_bit(MMC_QUEUE_SUSPENDED, &mq->flags)) {
 
+		if (!(card->cmdq_init && blk_queue_tagged(q)))
 		up(&mq->thread_sem);
 
 		spin_lock_irqsave(q->queue_lock, flags);
diff -ruw linux-4.4.115/drivers/mmc/card/queue.h linux-4.4.115-fbx/drivers/mmc/card/queue.h
--- linux-4.4.115/drivers/mmc/card/queue.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mmc/card/queue.h	2019-01-22 16:16:24.763257817 +0100
@@ -42,28 +42,55 @@
 	struct mmc_async_req	mmc_active;
 	enum mmc_packed_type	cmd_type;
 	struct mmc_packed	*packed;
+	struct mmc_cmdq_req	cmdq_req;
 };
 
 struct mmc_queue {
 	struct mmc_card		*card;
 	struct task_struct	*thread;
 	struct semaphore	thread_sem;
-	unsigned int		flags;
-#define MMC_QUEUE_SUSPENDED	(1 << 0)
-#define MMC_QUEUE_NEW_REQUEST	(1 << 1)
+	unsigned long		flags;
+#define MMC_QUEUE_SUSPENDED		0
+#define MMC_QUEUE_NEW_REQUEST		1
 
 	int			(*issue_fn)(struct mmc_queue *, struct request *);
+	int (*cmdq_issue_fn)(struct mmc_queue *,
+			     struct request *);
+	void (*cmdq_complete_fn)(struct request *);
+	void (*cmdq_error_fn)(struct mmc_queue *);
+	enum blk_eh_timer_return (*cmdq_req_timed_out)(struct request *);
 	void			*data;
 	struct request_queue	*queue;
 	struct mmc_queue_req	mqrq[2];
 	struct mmc_queue_req	*mqrq_cur;
 	struct mmc_queue_req	*mqrq_prev;
+	struct mmc_queue_req	*mqrq_cmdq;
+	bool			wr_packing_enabled;
+	int			num_of_potential_packed_wr_reqs;
+	int			num_wr_reqs_to_start_packing;
+	bool			no_pack_for_random;
+	struct work_struct	cmdq_err_work;
+
+	struct completion	cmdq_pending_req_done;
+	struct completion	cmdq_shutdown_complete;
+	struct request		*cmdq_req_peeked;
+	int (*err_check_fn) (struct mmc_card *, struct mmc_async_req *);
+	void (*packed_test_fn) (struct request_queue *, struct mmc_queue_req *);
+	void (*cmdq_shutdown)(struct mmc_queue *);
+#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
+	atomic_t max_write_speed;
+	atomic_t max_read_speed;
+	atomic_t cache_size;
+	/* i/o tracking */
+	atomic_long_t cache_used;
+	unsigned long cache_jiffies;
+#endif
 };
 
 extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
-			  const char *);
+			  const char *, int);
 extern void mmc_cleanup_queue(struct mmc_queue *);
-extern void mmc_queue_suspend(struct mmc_queue *);
+extern int mmc_queue_suspend(struct mmc_queue *, int);
 extern void mmc_queue_resume(struct mmc_queue *);
 
 extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
@@ -76,4 +103,9 @@
 
 extern int mmc_access_rpmb(struct mmc_queue *);
 
+extern void print_mmc_packing_stats(struct mmc_card *card);
+
+extern int mmc_cmdq_init(struct mmc_queue *mq, struct mmc_card *card);
+extern void mmc_cmdq_clean(struct mmc_queue *mq, struct mmc_card *card);
+
 #endif
diff -ruw linux-4.4.115/drivers/mmc/core/bus.c linux-4.4.115-fbx/drivers/mmc/core/bus.c
--- linux-4.4.115/drivers/mmc/core/bus.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mmc/core/bus.c	2019-10-29 09:26:24.057207230 +0100
@@ -132,6 +132,16 @@
 	struct mmc_host *host = card->host;
 	int ret;
 
+	if (!drv) {
+		pr_debug("%s: %s: drv is NULL\n", dev_name(dev), __func__);
+		return;
+	}
+
+	if (!card) {
+		pr_debug("%s: %s: card is NULL\n", dev_name(dev), __func__);
+		return;
+	}
+
 	if (dev->driver && drv->shutdown)
 		drv->shutdown(card);
 
@@ -154,10 +164,22 @@
 	if (ret)
 		return ret;
 
+	if (mmc_bus_needs_resume(host))
+		return 0;
 	ret = host->bus_ops->suspend(host);
+
+	/*
+	 * bus_ops->suspend may fail due to some reason
+	 * In such cases if we return error to PM framework
+	 * from here without calling pm_generic_resume then mmc
+	 * request may get stuck since PM framework will assume
+	 * that mmc bus is not suspended (because of error) and
+	 * it won't call resume again.
+	 *
+	 * So in case of error call pm_generic_resume().
+	 */
 	if (ret)
 		pm_generic_resume(dev);
-
 	return ret;
 }
 
@@ -167,11 +189,17 @@
 	struct mmc_host *host = card->host;
 	int ret;
 
+	if (mmc_bus_manual_resume(host)) {
+		host->bus_resume_flags |= MMC_BUSRESUME_NEEDS_RESUME;
+		goto skip_full_resume;
+	}
+
 	ret = host->bus_ops->resume(host);
 	if (ret)
 		pr_warn("%s: error %d during resume (card was removed?)\n",
 			mmc_hostname(host), ret);
 
+skip_full_resume:
 	ret = pm_generic_resume(dev);
 	return ret;
 }
@@ -183,6 +211,9 @@
 	struct mmc_card *card = mmc_dev_to_card(dev);
 	struct mmc_host *host = card->host;
 
+	if (mmc_bus_needs_resume(host))
+		return 0;
+
 	return host->bus_ops->runtime_suspend(host);
 }
 
@@ -191,8 +222,12 @@
 	struct mmc_card *card = mmc_dev_to_card(dev);
 	struct mmc_host *host = card->host;
 
+	if (mmc_bus_needs_resume(host))
+		host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME;
+
 	return host->bus_ops->runtime_resume(host);
 }
+
 #endif /* !CONFIG_PM */
 
 static const struct dev_pm_ops mmc_bus_pm_ops = {
@@ -276,6 +311,9 @@
 	card->dev.release = mmc_release_card;
 	card->dev.type = type;
 
+	spin_lock_init(&card->wr_pack_stats.lock);
+	spin_lock_init(&card->bkops.stats.lock);
+
 	return card;
 }
 
@@ -352,11 +390,18 @@
 
 	card->dev.of_node = mmc_of_find_child_device(card->host, 0);
 
+	if (mmc_card_sdio(card)) {
+		ret = device_init_wakeup(&card->dev, true);
+		if (ret)
+			pr_err("%s: %s: failed to init wakeup: %d\n",
+			       mmc_hostname(card->host), __func__, ret);
+	}
 	ret = device_add(&card->dev);
 	if (ret)
 		return ret;
 
 	mmc_card_set_present(card);
+	device_enable_async_suspend(&card->dev);
 
 	return 0;
 }
@@ -383,6 +428,8 @@
 		of_node_put(card->dev.of_node);
 	}
 
+	kfree(card->wr_pack_stats.packing_events);
+
 	put_device(&card->dev);
 }
 
diff -ruw linux-4.4.115/drivers/mmc/core/bus.h linux-4.4.115-fbx/drivers/mmc/core/bus.h
--- linux-4.4.115/drivers/mmc/core/bus.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mmc/core/bus.h	2019-01-22 16:16:24.767257853 +0100
@@ -15,7 +15,7 @@
 static ssize_t mmc_##name##_show (struct device *dev, struct device_attribute *attr, char *buf)	\
 {										\
 	struct mmc_card *card = mmc_dev_to_card(dev);				\
-	return sprintf(buf, fmt, args);						\
+	return snprintf(buf, PAGE_SIZE, fmt, args);			\
 }										\
 static DEVICE_ATTR(name, S_IRUGO, mmc_##name##_show, NULL)
 
diff -ruw linux-4.4.115/drivers/mmc/core/core.c linux-4.4.115-fbx/drivers/mmc/core/core.c
--- linux-4.4.115/drivers/mmc/core/core.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mmc/core/core.c	2019-10-29 09:26:24.057207230 +0100
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/completion.h>
+#include <linux/devfreq.h>
 #include <linux/device.h>
 #include <linux/delay.h>
 #include <linux/pagemap.h>
@@ -29,6 +30,11 @@
 #include <linux/random.h>
 #include <linux/slab.h>
 #include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/jiffies.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/mmc.h>
 
 #include <linux/mmc/card.h>
 #include <linux/mmc/host.h>
@@ -46,6 +52,11 @@
 #include "sd_ops.h"
 #include "sdio_ops.h"
 
+EXPORT_TRACEPOINT_SYMBOL_GPL(mmc_blk_erase_start);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mmc_blk_erase_end);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mmc_blk_rw_start);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mmc_blk_rw_end);
+
 /* If the device is not responding */
 #define MMC_CORE_TIMEOUT_MS	(10 * 60 * 1000) /* 10 minute timeout */
 
@@ -109,6 +120,7 @@
 
 	data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
 	data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
+	data->fault_injected = true;
 }
 
 #else /* CONFIG_FAIL_MMC_REQUEST */
@@ -120,6 +132,816 @@
 
 #endif /* CONFIG_FAIL_MMC_REQUEST */
 
+static bool mmc_is_data_request(struct mmc_request *mmc_request)
+{
+	switch (mmc_request->cmd->opcode) {
+	case MMC_READ_SINGLE_BLOCK:
+	case MMC_READ_MULTIPLE_BLOCK:
+	case MMC_WRITE_BLOCK:
+	case MMC_WRITE_MULTIPLE_BLOCK:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static void mmc_clk_scaling_start_busy(struct mmc_host *host, bool lock_needed)
+{
+	struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
+
+	if (!clk_scaling->enable)
+		return;
+
+	if (lock_needed)
+		spin_lock_bh(&clk_scaling->lock);
+
+	clk_scaling->start_busy = ktime_get();
+	clk_scaling->is_busy_started = true;
+
+	if (lock_needed)
+		spin_unlock_bh(&clk_scaling->lock);
+}
+
+static void mmc_clk_scaling_stop_busy(struct mmc_host *host, bool lock_needed)
+{
+	struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
+
+	if (!clk_scaling->enable)
+		return;
+
+	if (lock_needed)
+		spin_lock_bh(&clk_scaling->lock);
+
+	if (!clk_scaling->is_busy_started) {
+		WARN_ON(1);
+		goto out;
+	}
+
+	clk_scaling->total_busy_time_us +=
+		ktime_to_us(ktime_sub(ktime_get(),
+			clk_scaling->start_busy));
+	pr_debug("%s: accumulated busy time is %lu usec\n",
+		mmc_hostname(host), clk_scaling->total_busy_time_us);
+	clk_scaling->is_busy_started = false;
+
+out:
+	if (lock_needed)
+		spin_unlock_bh(&clk_scaling->lock);
+}
+
+/**
+ * mmc_cmdq_clk_scaling_start_busy() - start busy timer for data requests
+ * @host: pointer to mmc host structure
+ * @lock_needed: flag indication if locking is needed
+ *
+ * This function starts the busy timer in case it was not already started.
+ */
+void mmc_cmdq_clk_scaling_start_busy(struct mmc_host *host,
+	bool lock_needed)
+{
+	if (!host->clk_scaling.enable)
+		return;
+
+	if (lock_needed)
+		spin_lock_bh(&host->clk_scaling.lock);
+
+	if (!host->clk_scaling.is_busy_started &&
+		!test_bit(CMDQ_STATE_DCMD_ACTIVE,
+			&host->cmdq_ctx.curr_state)) {
+		host->clk_scaling.start_busy = ktime_get();
+		host->clk_scaling.is_busy_started = true;
+	}
+
+	if (lock_needed)
+		spin_unlock_bh(&host->clk_scaling.lock);
+}
+EXPORT_SYMBOL(mmc_cmdq_clk_scaling_start_busy);
+
+/**
+ * mmc_cmdq_clk_scaling_stop_busy() - stop busy timer for last data requests
+ * @host: pointer to mmc host structure
+ * @lock_needed: flag indication if locking is needed
+ *
+ * This function stops the busy timer in case it is the last data request.
+ * In case the current request is not the last one, the busy time till
+ * now will be accumulated and the counter will be restarted.
+ */
+void mmc_cmdq_clk_scaling_stop_busy(struct mmc_host *host,
+	bool lock_needed, bool is_cmdq_dcmd)
+{
+	if (!host->clk_scaling.enable)
+		return;
+
+	if (lock_needed)
+		spin_lock_bh(&host->clk_scaling.lock);
+
+	/*
+	 *  For CQ mode: In completion of DCMD request, start busy time in
+	 *  case of pending data requests
+	 */
+	if (is_cmdq_dcmd) {
+		if (host->cmdq_ctx.data_active_reqs) {
+			host->clk_scaling.is_busy_started = true;
+			host->clk_scaling.start_busy = ktime_get();
+		}
+		goto out;
+	}
+
+	host->clk_scaling.total_busy_time_us +=
+		ktime_to_us(ktime_sub(ktime_get(),
+			host->clk_scaling.start_busy));
+
+	if (host->cmdq_ctx.data_active_reqs) {
+		host->clk_scaling.is_busy_started = true;
+		host->clk_scaling.start_busy = ktime_get();
+	} else {
+		host->clk_scaling.is_busy_started = false;
+	}
+out:
+	if (lock_needed)
+		spin_unlock_bh(&host->clk_scaling.lock);
+
+}
+EXPORT_SYMBOL(mmc_cmdq_clk_scaling_stop_busy);
+
+/**
+ * mmc_can_scale_clk() - Check clock scaling capability
+ * @host: pointer to mmc host structure
+ */
+bool mmc_can_scale_clk(struct mmc_host *host)
+{
+	if (!host) {
+		pr_err("bad host parameter\n");
+		WARN_ON(1);
+		return false;
+	}
+
+	return host->caps2 & MMC_CAP2_CLK_SCALE;
+}
+EXPORT_SYMBOL(mmc_can_scale_clk);
+
+static int mmc_devfreq_get_dev_status(struct device *dev,
+		struct devfreq_dev_status *status)
+{
+	struct mmc_host *host = container_of(dev, struct mmc_host, class_dev);
+	struct mmc_devfeq_clk_scaling *clk_scaling;
+
+	if (!host) {
+		pr_err("bad host parameter\n");
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	clk_scaling = &host->clk_scaling;
+
+	if (!clk_scaling->enable)
+		return 0;
+
+	spin_lock_bh(&clk_scaling->lock);
+
+	/* accumulate the busy time of ongoing work */
+	memset(status, 0, sizeof(*status));
+	if (clk_scaling->is_busy_started) {
+		if (mmc_card_cmdq(host->card)) {
+			/* the "busy-timer" will be restarted in case there
+			 * are pending data requests */
+			mmc_cmdq_clk_scaling_stop_busy(host, false, false);
+		} else {
+			mmc_clk_scaling_stop_busy(host, false);
+			mmc_clk_scaling_start_busy(host, false);
+		}
+	}
+
+	status->busy_time = clk_scaling->total_busy_time_us;
+	status->total_time = ktime_to_us(ktime_sub(ktime_get(),
+		clk_scaling->measure_interval_start));
+	clk_scaling->total_busy_time_us = 0;
+	status->current_frequency = clk_scaling->curr_freq;
+	clk_scaling->measure_interval_start = ktime_get();
+
+	pr_debug("%s: status: load = %lu%% - total_time=%lu busy_time = %lu, clk=%lu\n",
+		mmc_hostname(host),
+		(status->busy_time*100)/status->total_time,
+		status->total_time, status->busy_time,
+		status->current_frequency);
+
+	spin_unlock_bh(&clk_scaling->lock);
+
+	return 0;
+}
+
+static bool mmc_is_valid_state_for_clk_scaling(struct mmc_host *host)
+{
+	struct mmc_card *card = host->card;
+	u32 status;
+
+	/*
+	 * If the current partition type is RPMB, clock switching may not
+	 * work properly as sending tuning command (CMD21) is illegal in
+	 * this mode.
+	 */
+	if (!card || (mmc_card_mmc(card) &&
+			(card->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB ||
+			mmc_card_doing_bkops(card))))
+		return false;
+
+	if (mmc_send_status(card, &status)) {
+		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
+		return false;
+	}
+
+	return R1_CURRENT_STATE(status) == R1_STATE_TRAN;
+}
+
+int mmc_cmdq_halt_on_empty_queue(struct mmc_host *host)
+{
+	int err = 0;
+
+	err = wait_event_interruptible(host->cmdq_ctx.queue_empty_wq,
+				(!host->cmdq_ctx.active_reqs));
+	if (host->cmdq_ctx.active_reqs) {
+		pr_err("%s: %s: unexpected active requests (%lu)\n",
+			mmc_hostname(host), __func__,
+			host->cmdq_ctx.active_reqs);
+		return -EPERM;
+	}
+
+	err = mmc_cmdq_halt(host, true);
+	if (err) {
+		pr_err("%s: %s: mmc_cmdq_halt failed (%d)\n",
+		       mmc_hostname(host), __func__, err);
+		goto out;
+	}
+
+out:
+	return err;
+}
+EXPORT_SYMBOL(mmc_cmdq_halt_on_empty_queue);
+
+int mmc_clk_update_freq(struct mmc_host *host,
+		unsigned long freq, enum mmc_load state)
+{
+	int err = 0;
+	bool cmdq_mode;
+
+	if (!host) {
+		pr_err("bad host parameter\n");
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	mmc_host_clk_hold(host);
+	cmdq_mode = mmc_card_cmdq(host->card);
+
+	/* make sure the card supports the frequency we want */
+	if (unlikely(freq > host->card->clk_scaling_highest)) {
+		freq = host->card->clk_scaling_highest;
+		pr_warn("%s: %s: frequency was overridden to %lu\n",
+				mmc_hostname(host), __func__,
+				host->card->clk_scaling_highest);
+	}
+
+	if (unlikely(freq < host->card->clk_scaling_lowest)) {
+		freq = host->card->clk_scaling_lowest;
+		pr_warn("%s: %s: frequency was overridden to %lu\n",
+			mmc_hostname(host), __func__,
+			host->card->clk_scaling_lowest);
+	}
+
+	if (freq == host->clk_scaling.curr_freq)
+		goto out;
+
+	if (host->ops->notify_load) {
+		err = host->ops->notify_load(host, state);
+		if (err) {
+			pr_err("%s: %s: fail on notify_load\n",
+				mmc_hostname(host), __func__);
+			goto out;
+		}
+	}
+
+	if (cmdq_mode) {
+		err = mmc_cmdq_halt_on_empty_queue(host);
+		if (err) {
+			pr_err("%s: %s: failed halting queue (%d)\n",
+				mmc_hostname(host), __func__, err);
+			goto halt_failed;
+		}
+	}
+
+	if (!mmc_is_valid_state_for_clk_scaling(host)) {
+		pr_debug("%s: invalid state for clock scaling - skipping",
+			mmc_hostname(host));
+		goto invalid_state;
+	}
+
+	err = host->bus_ops->change_bus_speed(host, &freq);
+	if (!err)
+		host->clk_scaling.curr_freq = freq;
+	else
+		pr_err("%s: %s: failed (%d) at freq=%lu\n",
+			mmc_hostname(host), __func__, err, freq);
+
+invalid_state:
+	if (cmdq_mode) {
+		if (mmc_cmdq_halt(host, false))
+			pr_err("%s: %s: cmdq unhalt failed\n",
+			mmc_hostname(host), __func__);
+	}
+
+halt_failed:
+	if (err) {
+		/* restore previous state */
+		if (host->ops->notify_load)
+			if (host->ops->notify_load(host,
+				host->clk_scaling.state))
+				pr_err("%s: %s: fail on notify_load restore\n",
+					mmc_hostname(host), __func__);
+	}
+out:
+	mmc_host_clk_release(host);
+	return err;
+}
+EXPORT_SYMBOL(mmc_clk_update_freq);
+
+int mmc_recovery_fallback_lower_speed(struct mmc_host *host)
+{
+	int err = 0;
+	if (!host->card)
+		return -EINVAL;
+
+	if (host->sdr104_wa && mmc_card_sd(host->card) &&
+	    (host->ios.timing == MMC_TIMING_UHS_SDR104) &&
+	    !host->card->sdr104_blocked) {
+		pr_err("%s: %s: blocked SDR104, lower the bus-speed (SDR50 / DDR50)\n",
+			mmc_hostname(host), __func__);
+		mmc_host_clear_sdr104(host);
+		err = mmc_hw_reset(host);
+		host->card->sdr104_blocked = true;
+	} else {
+		/* If sdr104_wa is not present, just return status */
+		err = host->bus_ops->alive(host);
+	}
+	if (err)
+		pr_err("%s: %s: Fallback to lower speed mode failed with err=%d\n",
+			mmc_hostname(host), __func__, err);
+
+	return err;
+}
+
+static int mmc_devfreq_set_target(struct device *dev,
+				unsigned long *freq, u32 devfreq_flags)
+{
+	struct mmc_host *host = container_of(dev, struct mmc_host, class_dev);
+	struct mmc_devfeq_clk_scaling *clk_scaling;
+	int err = 0;
+	int abort;
+	unsigned long pflags = current->flags;
+
+	/* Ensure scaling would happen even in memory pressure conditions */
+	current->flags |= PF_MEMALLOC;
+
+	if (!(host && freq)) {
+		pr_err("%s: unexpected host/freq parameter\n", __func__);
+		err = -EINVAL;
+		goto out;
+	}
+
+	clk_scaling = &host->clk_scaling;
+
+	if (!clk_scaling->enable)
+		goto out;
+
+	pr_debug("%s: target freq = %lu (%s)\n", mmc_hostname(host),
+		*freq, current->comm);
+
+	if ((clk_scaling->curr_freq == *freq) ||
+		clk_scaling->skip_clk_scale_freq_update)
+		goto out;
+
+	/* No need to scale the clocks if they are gated */
+	if (!host->ios.clock)
+		goto out;
+
+	spin_lock_bh(&clk_scaling->lock);
+	if (clk_scaling->clk_scaling_in_progress) {
+		pr_debug("%s: clocks scaling is already in-progress by mmc thread\n",
+			mmc_hostname(host));
+		spin_unlock_bh(&clk_scaling->lock);
+		goto out;
+	}
+	clk_scaling->need_freq_change = true;
+	clk_scaling->target_freq = *freq;
+	clk_scaling->state = *freq < clk_scaling->curr_freq ?
+		MMC_LOAD_LOW : MMC_LOAD_HIGH;
+	spin_unlock_bh(&clk_scaling->lock);
+
+	abort = __mmc_claim_host(host, &clk_scaling->devfreq_abort);
+	if (abort)
+		goto out;
+
+	if (mmc_card_sd(host->card) && host->card->sdr104_blocked)
+		goto rel_host;
+
+	/*
+	 * In case we were able to claim host there is no need to
+	 * defer the frequency change. It will be done now
+	 */
+	clk_scaling->need_freq_change = false;
+
+	mmc_host_clk_hold(host);
+	err = mmc_clk_update_freq(host, *freq, clk_scaling->state);
+	if (err && err != -EAGAIN) {
+		pr_err("%s: clock scale to %lu failed with error %d\n",
+			mmc_hostname(host), *freq, err);
+		err = mmc_recovery_fallback_lower_speed(host);
+	} else {
+		pr_debug("%s: clock change to %lu finished successfully (%s)\n",
+			mmc_hostname(host), *freq, current->comm);
+	}
+
+
+	mmc_host_clk_release(host);
+rel_host:
+	mmc_release_host(host);
+out:
+	tsk_restore_flags(current, pflags, PF_MEMALLOC);
+	return err;
+}
+
+/**
+ * mmc_deferred_scaling() - scale clocks from data path (mmc thread context)
+ * @host: pointer to mmc host structure
+ *
+ * This function does clock scaling in case "need_freq_change" flag was set
+ * by the clock scaling logic.
+ */
+void mmc_deferred_scaling(struct mmc_host *host)
+{
+	unsigned long target_freq;
+	int err;
+
+	if (!host->clk_scaling.enable)
+		return;
+
+	if (mmc_card_sd(host->card) && host->card->sdr104_blocked)
+		return;
+
+	spin_lock_bh(&host->clk_scaling.lock);
+
+	if (host->clk_scaling.clk_scaling_in_progress ||
+		!(host->clk_scaling.need_freq_change)) {
+		spin_unlock_bh(&host->clk_scaling.lock);
+		return;
+	}
+
+
+	atomic_inc(&host->clk_scaling.devfreq_abort);
+	target_freq = host->clk_scaling.target_freq;
+	host->clk_scaling.clk_scaling_in_progress = true;
+	host->clk_scaling.need_freq_change = false;
+	spin_unlock_bh(&host->clk_scaling.lock);
+	pr_debug("%s: doing deferred frequency change (%lu) (%s)\n",
+				mmc_hostname(host),
+				target_freq, current->comm);
+
+	err = mmc_clk_update_freq(host, target_freq,
+		host->clk_scaling.state);
+	if (err && err != -EAGAIN) {
+		pr_err("%s: failed on deferred scale clocks (%d)\n",
+			mmc_hostname(host), err);
+		mmc_recovery_fallback_lower_speed(host);
+	} else {
+		pr_debug("%s: clocks were successfully scaled to %lu (%s)\n",
+			mmc_hostname(host),
+			target_freq, current->comm);
+	}
+	host->clk_scaling.clk_scaling_in_progress = false;
+	atomic_dec(&host->clk_scaling.devfreq_abort);
+}
+EXPORT_SYMBOL(mmc_deferred_scaling);
+
+static int mmc_devfreq_create_freq_table(struct mmc_host *host)
+{
+	int i;
+	struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
+
+	pr_debug("%s: supported: lowest=%lu, highest=%lu\n",
+		mmc_hostname(host),
+		host->card->clk_scaling_lowest,
+		host->card->clk_scaling_highest);
+
+	/*
+	 * Create the frequency table and initialize it with default values.
+	 * Initialize it with platform specific frequencies if the frequency
+	 * table supplied by platform driver is present, otherwise initialize
+	 * it with min and max frequencies supported by the card.
+	 */
+	if (!clk_scaling->freq_table) {
+		if (clk_scaling->pltfm_freq_table_sz)
+			clk_scaling->freq_table_sz =
+				clk_scaling->pltfm_freq_table_sz;
+		else
+			clk_scaling->freq_table_sz = 2;
+
+		clk_scaling->freq_table = kzalloc(
+			(clk_scaling->freq_table_sz *
+			sizeof(*(clk_scaling->freq_table))), GFP_KERNEL);
+		if (!clk_scaling->freq_table)
+			return -ENOMEM;
+
+		if (clk_scaling->pltfm_freq_table) {
+			memcpy(clk_scaling->freq_table,
+				clk_scaling->pltfm_freq_table,
+				(clk_scaling->pltfm_freq_table_sz *
+				sizeof(*(clk_scaling->pltfm_freq_table))));
+		} else {
+			pr_debug("%s: no frequency table defined -  setting default\n",
+				mmc_hostname(host));
+			clk_scaling->freq_table[0] =
+				host->card->clk_scaling_lowest;
+			clk_scaling->freq_table[1] =
+				host->card->clk_scaling_highest;
+			goto out;
+		}
+	}
+
+	if (host->card->clk_scaling_lowest >
+		clk_scaling->freq_table[0])
+		pr_debug("%s: frequency table undershot possible freq\n",
+			mmc_hostname(host));
+
+	for (i = 0; i < clk_scaling->freq_table_sz; i++) {
+		if (clk_scaling->freq_table[i] <=
+			host->card->clk_scaling_highest)
+			continue;
+		clk_scaling->freq_table[i] =
+			host->card->clk_scaling_highest;
+		clk_scaling->freq_table_sz = i + 1;
+		pr_debug("%s: frequency table overshot possible freq (%d)\n",
+				mmc_hostname(host), clk_scaling->freq_table[i]);
+		break;
+	}
+
+out:
+	clk_scaling->devfreq_profile.freq_table = clk_scaling->freq_table;
+	clk_scaling->devfreq_profile.max_state = clk_scaling->freq_table_sz;
+
+	for (i = 0; i < clk_scaling->freq_table_sz; i++)
+		pr_debug("%s: freq[%d] = %u\n",
+			mmc_hostname(host), i, clk_scaling->freq_table[i]);
+
+	return 0;
+}
+
+/**
+ * mmc_init_devfreq_clk_scaling() - Initialize clock scaling
+ * @host: pointer to mmc host structure
+ *
+ * Initialize clock scaling for supported hosts. It is assumed that the caller
+ * ensure clock is running at maximum possible frequency before calling this
+ * function. Shall use struct devfreq_simple_ondemand_data to configure
+ * governor.
+ */
+int mmc_init_clk_scaling(struct mmc_host *host)
+{
+	int err;
+
+	if (!host || !host->card) {
+		pr_err("%s: unexpected host/card parameters\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (!mmc_can_scale_clk(host) ||
+		!host->bus_ops->change_bus_speed) {
+		pr_debug("%s: clock scaling is not supported\n",
+			mmc_hostname(host));
+		return 0;
+	}
+
+	pr_debug("registering %s dev (%p) to devfreq",
+		mmc_hostname(host),
+		mmc_classdev(host));
+
+	if (host->clk_scaling.devfreq) {
+		pr_err("%s: dev is already registered for dev %p\n",
+			mmc_hostname(host),
+			mmc_dev(host));
+		return -EPERM;
+	}
+	spin_lock_init(&host->clk_scaling.lock);
+	atomic_set(&host->clk_scaling.devfreq_abort, 0);
+	host->clk_scaling.curr_freq = host->ios.clock;
+	host->clk_scaling.clk_scaling_in_progress = false;
+	host->clk_scaling.need_freq_change = false;
+	host->clk_scaling.is_busy_started = false;
+
+	host->clk_scaling.devfreq_profile.polling_ms =
+		host->clk_scaling.polling_delay_ms;
+	host->clk_scaling.devfreq_profile.get_dev_status =
+		mmc_devfreq_get_dev_status;
+	host->clk_scaling.devfreq_profile.target = mmc_devfreq_set_target;
+	host->clk_scaling.devfreq_profile.initial_freq = host->ios.clock;
+
+	host->clk_scaling.ondemand_gov_data.simple_scaling = true;
+	host->clk_scaling.ondemand_gov_data.upthreshold =
+		host->clk_scaling.upthreshold;
+	host->clk_scaling.ondemand_gov_data.downdifferential =
+		host->clk_scaling.upthreshold - host->clk_scaling.downthreshold;
+
+	err = mmc_devfreq_create_freq_table(host);
+	if (err) {
+		pr_err("%s: fail to create devfreq frequency table\n",
+			mmc_hostname(host));
+		return err;
+	}
+
+	pr_debug("%s: adding devfreq with: upthreshold=%u downthreshold=%u polling=%u\n",
+		mmc_hostname(host),
+		host->clk_scaling.ondemand_gov_data.upthreshold,
+		host->clk_scaling.ondemand_gov_data.downdifferential,
+		host->clk_scaling.devfreq_profile.polling_ms);
+	host->clk_scaling.devfreq = devfreq_add_device(
+		mmc_classdev(host),
+		&host->clk_scaling.devfreq_profile,
+		"simple_ondemand",
+		&host->clk_scaling.ondemand_gov_data);
+	if (!host->clk_scaling.devfreq) {
+		pr_err("%s: unable to register with devfreq\n",
+			mmc_hostname(host));
+		return -EPERM;
+	}
+
+	pr_debug("%s: clk scaling is enabled for device %s (%p) with devfreq %p (clock = %uHz)\n",
+		mmc_hostname(host),
+		dev_name(mmc_classdev(host)),
+		mmc_classdev(host),
+		host->clk_scaling.devfreq,
+		host->ios.clock);
+
+	host->clk_scaling.enable = true;
+
+	return err;
+}
+EXPORT_SYMBOL(mmc_init_clk_scaling);
+
+/**
+ * mmc_suspend_clk_scaling() - suspend clock scaling
+ * @host: pointer to mmc host structure
+ *
+ * This API will suspend devfreq feature for the specific host.
+ * The statistics collected by mmc will be cleared.
+ * This function is intended to be called by the pm callbacks
+ * (e.g. runtime_suspend, suspend) of the mmc device
+ */
+int mmc_suspend_clk_scaling(struct mmc_host *host)
+{
+	int err;
+
+	if (!host) {
+		WARN(1, "bad host parameter\n");
+		return -EINVAL;
+	}
+
+	if (!mmc_can_scale_clk(host) || !host->clk_scaling.enable)
+		return 0;
+
+	if (!host->clk_scaling.devfreq) {
+		pr_err("%s: %s: no devfreq is assosiated with this device\n",
+			mmc_hostname(host), __func__);
+		return -EPERM;
+	}
+
+	atomic_inc(&host->clk_scaling.devfreq_abort);
+	wake_up(&host->wq);
+	err = devfreq_suspend_device(host->clk_scaling.devfreq);
+	if (err) {
+		pr_err("%s: %s: failed to suspend devfreq\n",
+			mmc_hostname(host), __func__);
+		return err;
+	}
+	host->clk_scaling.enable = false;
+
+	host->clk_scaling.total_busy_time_us = 0;
+
+	pr_debug("%s: devfreq suspended\n", mmc_hostname(host));
+
+	return 0;
+}
+EXPORT_SYMBOL(mmc_suspend_clk_scaling);
+
+/**
+ * mmc_resume_clk_scaling() - resume clock scaling
+ * @host: pointer to mmc host structure
+ *
+ * This API will resume devfreq feature for the specific host.
+ * This API is intended to be called by the pm callbacks
+ * (e.g. runtime_suspend, suspend) of the mmc device
+ */
+int mmc_resume_clk_scaling(struct mmc_host *host)
+{
+	int err = 0;
+	u32 max_clk_idx = 0;
+	u32 devfreq_max_clk = 0;
+	u32 devfreq_min_clk = 0;
+
+	if (!host) {
+		WARN(1, "bad host parameter\n");
+		return -EINVAL;
+	}
+
+	if (!mmc_can_scale_clk(host))
+		return 0;
+
+	/*
+	 * If clock scaling is already exited when resume is called, like
+	 * during mmc shutdown, it is not an error and should not fail the
+	 * API calling this.
+	 */
+	if (!host->clk_scaling.devfreq) {
+		pr_warn("%s: %s: no devfreq is assosiated with this device\n",
+			mmc_hostname(host), __func__);
+		return 0;
+	}
+
+	atomic_set(&host->clk_scaling.devfreq_abort, 0);
+
+	max_clk_idx = host->clk_scaling.freq_table_sz - 1;
+	devfreq_max_clk = host->clk_scaling.freq_table[max_clk_idx];
+	devfreq_min_clk = host->clk_scaling.freq_table[0];
+
+	host->clk_scaling.curr_freq = devfreq_max_clk;
+	if (host->ios.clock < host->clk_scaling.freq_table[max_clk_idx])
+		host->clk_scaling.curr_freq = devfreq_min_clk;
+
+	host->clk_scaling.clk_scaling_in_progress = false;
+	host->clk_scaling.need_freq_change = false;
+
+	err = devfreq_resume_device(host->clk_scaling.devfreq);
+	if (err) {
+		pr_err("%s: %s: failed to resume devfreq (%d)\n",
+			mmc_hostname(host), __func__, err);
+	} else {
+		host->clk_scaling.enable = true;
+		pr_debug("%s: devfreq resumed\n", mmc_hostname(host));
+	}
+
+	return err;
+}
+EXPORT_SYMBOL(mmc_resume_clk_scaling);
+
+/**
+ * mmc_exit_devfreq_clk_scaling() - Disable clock scaling
+ * @host: pointer to mmc host structure
+ *
+ * Disable clock scaling permanently.
+ */
+int mmc_exit_clk_scaling(struct mmc_host *host)
+{
+	int err;
+
+	if (!host) {
+		pr_err("%s: bad host parameter\n", __func__);
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	if (!mmc_can_scale_clk(host))
+		return 0;
+
+	if (!host->clk_scaling.devfreq) {
+		pr_err("%s: %s: no devfreq is assosiated with this device\n",
+			mmc_hostname(host), __func__);
+		return -EPERM;
+	}
+
+	err = mmc_suspend_clk_scaling(host);
+	if (err) {
+		pr_err("%s: %s: fail to suspend clock scaling (%d)\n",
+			mmc_hostname(host), __func__,  err);
+		return err;
+	}
+
+	err = devfreq_remove_device(host->clk_scaling.devfreq);
+	if (err) {
+		pr_err("%s: remove devfreq failed (%d)\n",
+			mmc_hostname(host), err);
+		return err;
+	}
+
+	host->clk_scaling.devfreq = NULL;
+	atomic_set(&host->clk_scaling.devfreq_abort, 1);
+
+	kfree(host->clk_scaling.freq_table);
+	host->clk_scaling.freq_table = NULL;
+
+	pr_debug("%s: devfreq was removed\n", mmc_hostname(host));
+
+	return 0;
+}
+EXPORT_SYMBOL(mmc_exit_clk_scaling);
+
 /**
  *	mmc_request_done - finish processing an MMC request
  *	@host: MMC host which completed request
@@ -132,6 +954,12 @@
 {
 	struct mmc_command *cmd = mrq->cmd;
 	int err = cmd->error;
+#ifdef CONFIG_MMC_PERF_PROFILING
+	ktime_t diff;
+#endif
+
+	if (host->clk_scaling.is_busy_started)
+		mmc_clk_scaling_stop_busy(host, true);
 
 	/* Flag re-tuning needed on CRC errors */
 	if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
@@ -172,9 +1000,42 @@
 			cmd->resp[2], cmd->resp[3]);
 
 		if (mrq->data) {
+#ifdef CONFIG_MMC_PERF_PROFILING
+			if (host->perf_enable) {
+				diff = ktime_sub(ktime_get(), host->perf.start);
+				if (mrq->data->flags == MMC_DATA_READ) {
+					host->perf.rbytes_drv +=
+							mrq->data->bytes_xfered;
+					host->perf.rtime_drv =
+						ktime_add(host->perf.rtime_drv,
+							diff);
+				} else {
+					host->perf.wbytes_drv +=
+						mrq->data->bytes_xfered;
+					host->perf.wtime_drv =
+						ktime_add(host->perf.wtime_drv,
+							diff);
+				}
+			}
+#endif
 			pr_debug("%s:     %d bytes transferred: %d\n",
 				mmc_hostname(host),
 				mrq->data->bytes_xfered, mrq->data->error);
+#ifdef CONFIG_BLOCK
+			if (mrq->lat_hist_enabled) {
+				ktime_t completion;
+				u_int64_t delta_us;
+
+				completion = ktime_get();
+				delta_us = ktime_us_delta(completion,
+							  mrq->io_start);
+				blk_update_latency_hist(
+					(mrq->data->flags & MMC_DATA_READ) ?
+					&host->io_lat_read :
+					&host->io_lat_write, delta_us);
+			}
+#endif
+			trace_mmc_blk_rw_end(cmd->opcode, cmd->arg, mrq->data);
 		}
 
 		if (mrq->stop) {
@@ -187,6 +1048,8 @@
 
 		if (mrq->done)
 			mrq->done(mrq);
+
+		mmc_host_clk_release(host);
 	}
 }
 
@@ -289,32 +1152,206 @@
 			mrq->stop->error = 0;
 			mrq->stop->mrq = mrq;
 		}
+#ifdef CONFIG_MMC_PERF_PROFILING
+		if (host->perf_enable)
+			host->perf.start = ktime_get();
+#endif
 	}
+	mmc_host_clk_hold(host);
 	led_trigger_event(host->led, LED_FULL);
+
+	if (mmc_is_data_request(mrq)) {
+		mmc_deferred_scaling(host);
+		mmc_clk_scaling_start_busy(host, true);
+	}
+
 	__mmc_start_request(host, mrq);
 
 	return 0;
 }
 
+static int mmc_start_cmdq_request(struct mmc_host *host,
+				   struct mmc_request *mrq)
+{
+	int ret = 0;
+
+	if (mrq->data) {
+		pr_debug("%s:     blksz %d blocks %d flags %08x tsac %lu ms nsac %d\n",
+			mmc_hostname(host), mrq->data->blksz,
+			mrq->data->blocks, mrq->data->flags,
+			mrq->data->timeout_ns / NSEC_PER_MSEC,
+			mrq->data->timeout_clks);
+
+		BUG_ON(mrq->data->blksz > host->max_blk_size);
+		BUG_ON(mrq->data->blocks > host->max_blk_count);
+		BUG_ON(mrq->data->blocks * mrq->data->blksz >
+			host->max_req_size);
+		mrq->data->error = 0;
+		mrq->data->mrq = mrq;
+	}
+
+	if (mrq->cmd) {
+		mrq->cmd->error = 0;
+		mrq->cmd->mrq = mrq;
+	}
+
+	mmc_host_clk_hold(host);
+	if (likely(host->cmdq_ops->request)) {
+		ret = host->cmdq_ops->request(host, mrq);
+	} else {
+		ret = -ENOENT;
+		pr_err("%s: %s: cmdq request host op is not available\n",
+			mmc_hostname(host), __func__);
+	}
+
+	if (ret) {
+		mmc_host_clk_release(host);
+		pr_err("%s: %s: issue request failed, err=%d\n",
+			mmc_hostname(host), __func__, ret);
+	}
+
+	return ret;
+}
+
+/**
+ *	mmc_blk_init_bkops_statistics - initialize bkops statistics
+ *	@card: MMC card to start BKOPS
+ *
+ *	Initialize and enable the bkops statistics
+ */
+void mmc_blk_init_bkops_statistics(struct mmc_card *card)
+{
+	int i;
+	struct mmc_bkops_stats *stats;
+
+	if (!card)
+		return;
+
+	stats = &card->bkops.stats;
+	spin_lock(&stats->lock);
+
+	stats->manual_start = 0;
+	stats->hpi = 0;
+	stats->auto_start = 0;
+	stats->auto_stop = 0;
+	for (i = 0 ; i < MMC_BKOPS_NUM_SEVERITY_LEVELS ; i++)
+		stats->level[i] = 0;
+	stats->enabled = true;
+
+	spin_unlock(&stats->lock);
+}
+EXPORT_SYMBOL(mmc_blk_init_bkops_statistics);
+
+static void mmc_update_bkops_hpi(struct mmc_bkops_stats *stats)
+{
+	spin_lock_irq(&stats->lock);
+	if (stats->enabled)
+		stats->hpi++;
+	spin_unlock_irq(&stats->lock);
+}
+
+static void mmc_update_bkops_start(struct mmc_bkops_stats *stats)
+{
+	spin_lock_irq(&stats->lock);
+	if (stats->enabled)
+		stats->manual_start++;
+	spin_unlock_irq(&stats->lock);
+}
+
+static void mmc_update_bkops_auto_on(struct mmc_bkops_stats *stats)
+{
+	spin_lock_irq(&stats->lock);
+	if (stats->enabled)
+		stats->auto_start++;
+	spin_unlock_irq(&stats->lock);
+}
+
+static void mmc_update_bkops_auto_off(struct mmc_bkops_stats *stats)
+{
+	spin_lock_irq(&stats->lock);
+	if (stats->enabled)
+		stats->auto_stop++;
+	spin_unlock_irq(&stats->lock);
+}
+
+static void mmc_update_bkops_level(struct mmc_bkops_stats *stats,
+					unsigned level)
+{
+	BUG_ON(level >= MMC_BKOPS_NUM_SEVERITY_LEVELS);
+	spin_lock_irq(&stats->lock);
+	if (stats->enabled)
+		stats->level[level]++;
+	spin_unlock_irq(&stats->lock);
+}
+
 /**
- *	mmc_start_bkops - start BKOPS for supported cards
+ *	mmc_set_auto_bkops - set auto BKOPS for supported cards
  *	@card: MMC card to start BKOPS
- *	@form_exception: A flag to indicate if this function was
- *			 called due to an exception raised by the card
+ *	@enable: enable/disable flag
+ *	Configure the card to run automatic BKOPS.
+ *
+ *	Should be called when host is claimed.
+*/
+int mmc_set_auto_bkops(struct mmc_card *card, bool enable)
+{
+	int ret = 0;
+	u8 bkops_en;
+
+	BUG_ON(!card);
+	enable = !!enable;
+
+	if (unlikely(!mmc_card_support_auto_bkops(card))) {
+		pr_err("%s: %s: card doesn't support auto bkops\n",
+				mmc_hostname(card->host), __func__);
+		return -EPERM;
+	}
+
+	if (enable) {
+		if (mmc_card_doing_auto_bkops(card))
+			goto out;
+		bkops_en = card->ext_csd.bkops_en | EXT_CSD_BKOPS_AUTO_EN;
+	} else {
+		if (!mmc_card_doing_auto_bkops(card))
+			goto out;
+		bkops_en = card->ext_csd.bkops_en & ~EXT_CSD_BKOPS_AUTO_EN;
+	}
+
+	ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN,
+			bkops_en, 0);
+	if (ret) {
+		pr_err("%s: %s: error in setting auto bkops to %d (%d)\n",
+			mmc_hostname(card->host), __func__, enable, ret);
+	} else {
+		if (enable) {
+			mmc_card_set_auto_bkops(card);
+			mmc_update_bkops_auto_on(&card->bkops.stats);
+		} else {
+			mmc_card_clr_auto_bkops(card);
+			mmc_update_bkops_auto_off(&card->bkops.stats);
+		}
+		card->ext_csd.bkops_en = bkops_en;
+		pr_debug("%s: %s: bkops state %x\n",
+				mmc_hostname(card->host), __func__, bkops_en);
+	}
+out:
+	return ret;
+}
+EXPORT_SYMBOL(mmc_set_auto_bkops);
+
+/**
+ *	mmc_check_bkops - check BKOPS for supported cards
+ *	@card: MMC card to check BKOPS
  *
- *	Start background operations whenever requested.
- *	When the urgent BKOPS bit is set in a R1 command response
- *	then background operations should be started immediately.
+ *	Read the BKOPS status in order to determine whether the
+ *	card requires bkops to be started.
 */
-void mmc_start_bkops(struct mmc_card *card, bool from_exception)
+void mmc_check_bkops(struct mmc_card *card)
 {
 	int err;
-	int timeout;
-	bool use_busy_signal;
 
 	BUG_ON(!card);
 
-	if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
+	if (mmc_card_doing_bkops(card))
 		return;
 
 	err = mmc_read_bkops_status(card);
@@ -324,47 +1361,50 @@
 		return;
 	}
 
-	if (!card->ext_csd.raw_bkops_status)
-		return;
+	card->bkops.needs_check = false;
 
-	if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
-	    from_exception)
-		return;
+	mmc_update_bkops_level(&card->bkops.stats,
+				card->ext_csd.raw_bkops_status);
 
-	mmc_claim_host(card->host);
-	if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
-		timeout = MMC_BKOPS_MAX_TIMEOUT;
-		use_busy_signal = true;
-	} else {
-		timeout = 0;
-		use_busy_signal = false;
+	card->bkops.needs_bkops = card->ext_csd.raw_bkops_status > 0;
 	}
+EXPORT_SYMBOL(mmc_check_bkops);
+
+/**
+ *	mmc_start_manual_bkops - start BKOPS for supported cards
+ *	@card: MMC card to start BKOPS
+ *
+ *      Send START_BKOPS to the card.
+ *      The function should be called with claimed host.
+*/
+void mmc_start_manual_bkops(struct mmc_card *card)
+{
+	int err;
+
+	BUG_ON(!card);
+
+	if (unlikely(!mmc_card_configured_manual_bkops(card)))
+		return;
+
+	if (mmc_card_doing_bkops(card))
+		return;
 
 	mmc_retune_hold(card->host);
 
-	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-			EXT_CSD_BKOPS_START, 1, timeout,
-			use_busy_signal, true, false);
+	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_START,
+				1, 0, false, true, false);
 	if (err) {
-		pr_warn("%s: Error %d starting bkops\n",
+		pr_err("%s: Error %d starting manual bkops\n",
 			mmc_hostname(card->host), err);
-		mmc_retune_release(card->host);
-		goto out;
+	} else {
+		mmc_card_set_doing_bkops(card);
+		mmc_update_bkops_start(&card->bkops.stats);
+		card->bkops.needs_bkops = false;
 	}
 
-	/*
-	 * For urgent bkops status (LEVEL_2 and more)
-	 * bkops executed synchronously, otherwise
-	 * the operation is in progress
-	 */
-	if (!use_busy_signal)
-		mmc_card_set_doing_bkops(card);
-	else
 		mmc_retune_release(card->host);
-out:
-	mmc_release_host(card->host);
 }
-EXPORT_SYMBOL(mmc_start_bkops);
+EXPORT_SYMBOL(mmc_start_manual_bkops);
 
 /*
  * mmc_wait_data_done() - done callback for data request
@@ -374,10 +1414,13 @@
  */
 static void mmc_wait_data_done(struct mmc_request *mrq)
 {
+	unsigned long flags;
 	struct mmc_context_info *context_info = &mrq->host->context_info;
 
+	spin_lock_irqsave(&context_info->lock, flags);
 	context_info->is_done_rcv = true;
 	wake_up_interruptible(&context_info->wait);
+	spin_unlock_irqrestore(&context_info->lock, flags);
 }
 
 static void mmc_wait_done(struct mmc_request *mrq)
@@ -443,6 +1486,7 @@
 	struct mmc_command *cmd;
 	struct mmc_context_info *context_info = &host->context_info;
 	int err;
+	bool is_done_rcv = false;
 	unsigned long flags;
 
 	while (1) {
@@ -450,9 +1494,10 @@
 				(context_info->is_done_rcv ||
 				 context_info->is_new_req));
 		spin_lock_irqsave(&context_info->lock, flags);
+		is_done_rcv = context_info->is_done_rcv;
 		context_info->is_waiting_last_req = false;
 		spin_unlock_irqrestore(&context_info->lock, flags);
-		if (context_info->is_done_rcv) {
+		if (is_done_rcv) {
 			context_info->is_done_rcv = false;
 			context_info->is_new_req = false;
 			cmd = mrq->cmd;
@@ -488,19 +1533,19 @@
 	struct mmc_command *cmd;
 
 	while (1) {
-		wait_for_completion(&mrq->completion);
+		wait_for_completion_io(&mrq->completion);
 
 		cmd = mrq->cmd;
 
 		/*
-		 * If host has timed out waiting for the sanitize
+		 * If host has timed out waiting for the sanitize/bkops
 		 * to complete, card might be still in programming state
 		 * so let's try to bring the card out of programming
 		 * state.
 		 */
-		if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
+		if ((cmd->bkops_busy || cmd->sanitize_busy) && cmd->error == -ETIMEDOUT) {
 			if (!mmc_interrupt_hpi(host->card)) {
-				pr_warn("%s: %s: Interrupted sanitize\n",
+				pr_warn("%s: %s: Interrupted sanitize/bkops\n",
 					mmc_hostname(host), __func__);
 				cmd->error = 0;
 				break;
@@ -510,8 +1555,13 @@
 			}
 		}
 		if (!cmd->error || !cmd->retries ||
-		    mmc_card_removed(host->card))
+		    mmc_card_removed(host->card)) {
+			if (cmd->error && !cmd->retries &&
+			     cmd->opcode != MMC_SEND_STATUS &&
+			     cmd->opcode != MMC_SEND_TUNING_BLOCK)
+				mmc_recovery_fallback_lower_speed(host);
 			break;
+		}
 
 		mmc_retune_recheck(host);
 
@@ -539,8 +1589,11 @@
 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
 		 bool is_first_req)
 {
-	if (host->ops->pre_req)
+	if (host->ops->pre_req) {
+		mmc_host_clk_hold(host);
 		host->ops->pre_req(host, mrq, is_first_req);
+		mmc_host_clk_release(host);
+	}
 }
 
 /**
@@ -555,10 +1608,140 @@
 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
 			 int err)
 {
-	if (host->ops->post_req)
+	if (host->ops->post_req) {
+		mmc_host_clk_hold(host);
 		host->ops->post_req(host, mrq, err);
+		mmc_host_clk_release(host);
+	}
+}
+
+/**
+ *	mmc_cmdq_discard_card_queue - discard the task[s] in the device
+ *	@host: host instance
+ *	@tasks: mask of tasks to be knocked off
+ *		0: remove all queued tasks
+ */
+int mmc_cmdq_discard_queue(struct mmc_host *host, u32 tasks)
+{
+	return mmc_discard_queue(host, tasks);
+}
+EXPORT_SYMBOL(mmc_cmdq_discard_queue);
+
+
+/**
+ *	mmc_cmdq_post_req - post process of a completed request
+ *	@host: host instance
+ *	@tag: the request tag.
+ *	@err: non-zero is error, success otherwise
+ */
+void mmc_cmdq_post_req(struct mmc_host *host, int tag, int err)
+{
+	if (likely(host->cmdq_ops->post_req))
+		host->cmdq_ops->post_req(host, tag, err);
+}
+EXPORT_SYMBOL(mmc_cmdq_post_req);
+
+/**
+ *	mmc_cmdq_halt - halt/un-halt the command queue engine
+ *	@host: host instance
+ *	@halt: true - halt, un-halt otherwise
+ *
+ *	Host halts the command queue engine. It should complete
+ *	the ongoing transfer and release the bus.
+ *	All legacy commands can be sent upon successful
+ *	completion of this function.
+ *	Returns 0 on success, negative otherwise
+ */
+int mmc_cmdq_halt(struct mmc_host *host, bool halt)
+{
+	int err = 0;
+
+	if (mmc_host_cq_disable(host)) {
+		pr_debug("%s: %s: CQE is already disabled\n",
+				mmc_hostname(host), __func__);
+		return 0;
+	}
+
+	if ((halt && mmc_host_halt(host)) ||
+			(!halt && !mmc_host_halt(host))) {
+		pr_debug("%s: %s: CQE is already %s\n", mmc_hostname(host),
+				__func__, halt ? "halted" : "un-halted");
+		return 0;
 }
 
+	mmc_host_clk_hold(host);
+	if (host->cmdq_ops->halt) {
+		err = host->cmdq_ops->halt(host, halt);
+		if (!err && host->ops->notify_halt)
+			host->ops->notify_halt(host, halt);
+		if (!err && halt)
+			mmc_host_set_halt(host);
+		else if (!err && !halt) {
+			mmc_host_clr_halt(host);
+			wake_up(&host->cmdq_ctx.wait);
+		}
+	} else {
+		err = -ENOSYS;
+	}
+	mmc_host_clk_release(host);
+	return err;
+}
+EXPORT_SYMBOL(mmc_cmdq_halt);
+
+int mmc_cmdq_start_req(struct mmc_host *host, struct mmc_cmdq_req *cmdq_req)
+{
+	struct mmc_request *mrq = &cmdq_req->mrq;
+
+	mrq->host = host;
+	if (mmc_card_removed(host->card)) {
+		mrq->cmd->error = -ENOMEDIUM;
+		return -ENOMEDIUM;
+	}
+	return mmc_start_cmdq_request(host, mrq);
+}
+EXPORT_SYMBOL(mmc_cmdq_start_req);
+
+static void mmc_cmdq_dcmd_req_done(struct mmc_request *mrq)
+{
+	mmc_host_clk_release(mrq->host);
+	complete(&mrq->completion);
+}
+
+int mmc_cmdq_wait_for_dcmd(struct mmc_host *host,
+			struct mmc_cmdq_req *cmdq_req)
+{
+	struct mmc_request *mrq = &cmdq_req->mrq;
+	struct mmc_command *cmd = mrq->cmd;
+	int err = 0;
+
+	init_completion(&mrq->completion);
+	mrq->done = mmc_cmdq_dcmd_req_done;
+	err = mmc_cmdq_start_req(host, cmdq_req);
+	if (err)
+		return err;
+
+	wait_for_completion_io(&mrq->completion);
+	if (cmd->error) {
+		pr_err("%s: DCMD %d failed with err %d\n",
+				mmc_hostname(host), cmd->opcode,
+				cmd->error);
+		err = cmd->error;
+		mmc_host_clk_hold(host);
+		host->cmdq_ops->dumpstate(host);
+		mmc_host_clk_release(host);
+	}
+	return err;
+}
+EXPORT_SYMBOL(mmc_cmdq_wait_for_dcmd);
+
+int mmc_cmdq_prepare_flush(struct mmc_command *cmd)
+{
+	return   __mmc_switch_cmdq_mode(cmd, EXT_CSD_CMD_SET_NORMAL,
+				     EXT_CSD_FLUSH_CACHE, 1,
+				     0, true, true);
+}
+EXPORT_SYMBOL(mmc_cmdq_prepare_flush);
+
 /**
  *	mmc_start_req - start a non-blocking request
  *	@host: MMC host to start command
@@ -579,7 +1762,6 @@
 				    struct mmc_async_req *areq, int *error)
 {
 	int err = 0;
-	int start_err = 0;
 	struct mmc_async_req *data = host->areq;
 
 	/* Prepare a new request */
@@ -609,7 +1791,7 @@
 			if (areq)
 				mmc_post_req(host, areq->mrq, -EINVAL);
 
-			mmc_start_bkops(host->card, true);
+			mmc_check_bkops(host->card);
 
 			/* prepare the request again */
 			if (areq)
@@ -617,14 +1799,24 @@
 		}
 	}
 
-	if (!err && areq)
-		start_err = __mmc_start_data_req(host, areq->mrq);
+	if (!err && areq) {
+#ifdef CONFIG_BLOCK
+		if (host->latency_hist_enabled) {
+			areq->mrq->io_start = ktime_get();
+			areq->mrq->lat_hist_enabled = 1;
+		} else
+			areq->mrq->lat_hist_enabled = 0;
+#endif
+		trace_mmc_blk_rw_start(areq->mrq->cmd->opcode,
+				       areq->mrq->cmd->arg,
+				       areq->mrq->data);
+		__mmc_start_data_req(host, areq->mrq);
+	}
 
 	if (host->areq)
 		mmc_post_req(host, host->areq->mrq, 0);
 
-	 /* Cancel a prepared request if it was not started. */
-	if ((err || start_err) && areq)
+	if (err && areq)
 		mmc_post_req(host, areq->mrq, -EINVAL);
 
 	if (err)
@@ -649,6 +1841,10 @@
  */
 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
 {
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+	if (mmc_bus_needs_resume(host))
+		mmc_resume_bus(host);
+#endif
 	__mmc_start_req(host, mrq);
 	mmc_wait_for_req_done(host, mrq);
 }
@@ -702,8 +1898,6 @@
 	}
 
 	err = mmc_send_hpi_cmd(card, &status);
-	if (err)
-		goto out;
 
 	prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
 	do {
@@ -711,8 +1905,13 @@
 
 		if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
 			break;
-		if (time_after(jiffies, prg_wait))
+		if (time_after(jiffies, prg_wait)) {
+			err = mmc_send_status(card, &status);
+			if (!err && R1_CURRENT_STATE(status) != R1_STATE_TRAN)
 			err = -ETIMEDOUT;
+			else
+				break;
+		}
 	} while (!err);
 
 out:
@@ -764,6 +1963,11 @@
 	int err = 0;
 
 	BUG_ON(!card);
+	if (unlikely(!mmc_card_configured_manual_bkops(card)))
+		goto out;
+	if (!mmc_card_doing_bkops(card))
+		goto out;
+
 	err = mmc_interrupt_hpi(card);
 
 	/*
@@ -772,10 +1976,11 @@
 	 */
 	if (!err || (err == -EINVAL)) {
 		mmc_card_clr_doing_bkops(card);
+		mmc_update_bkops_hpi(&card->bkops.stats);
 		mmc_retune_release(card->host);
 		err = 0;
 	}
-
+out:
 	return err;
 }
 EXPORT_SYMBOL(mmc_stop_bkops);
@@ -791,8 +1996,14 @@
 	if (err)
 		return err;
 
-	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
-	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
+	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS] &
+		MMC_BKOPS_URGENCY_MASK;
+	card->ext_csd.raw_exception_status =
+		ext_csd[EXT_CSD_EXP_EVENTS_STATUS] & (EXT_CSD_URGENT_BKOPS |
+						      EXT_CSD_DYNCAP_NEEDED |
+						      EXT_CSD_SYSPOOL_EXHAUSTED
+						      | EXT_CSD_PACKED_FAILURE);
+
 	kfree(ext_csd);
 	return 0;
 }
@@ -810,6 +2021,10 @@
 {
 	unsigned int mult;
 
+	if (!card) {
+		WARN_ON(1);
+		return;
+	}
 	/*
 	 * SDIO cards only define an upper 1 s limit on access.
 	 */
@@ -841,9 +2056,9 @@
 		unsigned int timeout_us, limit_us;
 
 		timeout_us = data->timeout_ns / 1000;
-		if (card->host->ios.clock)
+		if (mmc_host_clk_rate(card->host))
 			timeout_us += data->timeout_clks * 1000 /
-				(card->host->ios.clock / 1000);
+				(mmc_host_clk_rate(card->host) / 1000);
 
 		if (data->flags & MMC_DATA_WRITE)
 			/*
@@ -876,9 +2091,11 @@
 	 * Address this by setting the read timeout to a "reasonably high"
 	 * value. For the cards tested, 600ms has proven enough. If necessary,
 	 * this value can be increased if other problematic cards require this.
+	 * Certain Hynix 5.x cards giving read timeout even with 300ms.
+	 * Increasing further to max value (4s).
 	 */
 	if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
-		data->timeout_ns = 600000000;
+		data->timeout_ns = 4000000000u;
 		data->timeout_clks = 0;
 	}
 
@@ -897,6 +2114,11 @@
 				data->timeout_ns =  100000000;	/* 100ms */
 		}
 	}
+	/* Increase the timeout values for some bad INAND MCP devices */
+	if (card->quirks & MMC_QUIRK_INAND_DATA_TIMEOUT) {
+		data->timeout_ns = 4000000000u; /* 4s */
+		data->timeout_clks = 0;
+	}
 }
 EXPORT_SYMBOL(mmc_set_data_timeout);
 
@@ -947,6 +2169,7 @@
 	might_sleep();
 
 	add_wait_queue(&host->wq, &wait);
+
 	spin_lock_irqsave(&host->lock, flags);
 	while (1) {
 		set_current_state(TASK_UNINTERRUPTIBLE);
@@ -972,11 +2195,53 @@
 	if (pm)
 		pm_runtime_get_sync(mmc_dev(host));
 
+	if (host->ops->enable && !stop && host->claim_cnt == 1)
+		host->ops->enable(host);
+
 	return stop;
 }
 EXPORT_SYMBOL(__mmc_claim_host);
 
 /**
+ *     mmc_try_claim_host - try exclusively to claim a host
+ *        and keep trying for given time, with a gap of 10ms
+ *     @host: mmc host to claim
+ *     @dealy_ms: delay in ms
+ *
+ *     Returns %1 if the host is claimed, %0 otherwise.
+ */
+int mmc_try_claim_host(struct mmc_host *host, unsigned int delay_ms)
+{
+	int claimed_host = 0;
+	unsigned long flags;
+	int retry_cnt = delay_ms/10;
+	bool pm = false;
+
+	do {
+		spin_lock_irqsave(&host->lock, flags);
+		if (!host->claimed || host->claimer == current) {
+			host->claimed = 1;
+			host->claimer = current;
+			host->claim_cnt += 1;
+			claimed_host = 1;
+			if (host->claim_cnt == 1)
+				pm = true;
+		}
+		spin_unlock_irqrestore(&host->lock, flags);
+		if (!claimed_host)
+			mmc_delay(10);
+	} while (!claimed_host && retry_cnt--);
+
+	if (pm)
+		pm_runtime_get_sync(mmc_dev(host));
+
+	if (host->ops->enable && claimed_host && host->claim_cnt == 1)
+		host->ops->enable(host);
+	return claimed_host;
+}
+EXPORT_SYMBOL(mmc_try_claim_host);
+
+/**
  *	mmc_release_host - release a host
  *	@host: mmc host to release
  *
@@ -989,6 +2254,9 @@
 
 	WARN_ON(!host->claimed);
 
+	if (host->ops->disable && host->claim_cnt == 1)
+		host->ops->disable(host);
+
 	spin_lock_irqsave(&host->lock, flags);
 	if (--host->claim_cnt) {
 		/* Release for nested claim */
@@ -1012,9 +2280,14 @@
 {
 	pm_runtime_get_sync(&card->dev);
 	mmc_claim_host(card->host);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+	if (mmc_bus_needs_resume(card->host))
+		mmc_resume_bus(card->host);
+#endif
 }
 EXPORT_SYMBOL(mmc_get_card);
 
+
 /*
  * This is a helper function, which releases the host and drops the runtime
  * pm reference for the card device.
@@ -1031,7 +2304,7 @@
  * Internal function that does the actual ios call to the host driver,
  * optionally printing some debug output.
  */
-static inline void mmc_set_ios(struct mmc_host *host)
+void mmc_set_ios(struct mmc_host *host)
 {
 	struct mmc_ios *ios = &host->ios;
 
@@ -1041,23 +2314,41 @@
 		 ios->power_mode, ios->chip_select, ios->vdd,
 		 ios->bus_width, ios->timing);
 
+	if (ios->clock > 0)
+		mmc_set_ungated(host);
 	host->ops->set_ios(host, ios);
+	if (ios->old_rate != ios->clock) {
+		if (likely(ios->clk_ts)) {
+			char trace_info[80];
+			snprintf(trace_info, 80,
+				"%s: freq_KHz %d --> %d | t = %d",
+				mmc_hostname(host), ios->old_rate / 1000,
+				ios->clock / 1000, jiffies_to_msecs(
+					(long)jiffies - (long)ios->clk_ts));
+			trace_mmc_clk(trace_info);
+		}
+		ios->old_rate = ios->clock;
+		ios->clk_ts = jiffies;
 }
+}
+EXPORT_SYMBOL(mmc_set_ios);
 
 /*
  * Control chip select pin on a host.
  */
 void mmc_set_chip_select(struct mmc_host *host, int mode)
 {
+	mmc_host_clk_hold(host);
 	host->ios.chip_select = mode;
 	mmc_set_ios(host);
+	mmc_host_clk_release(host);
 }
 
 /*
  * Sets the host clock to the highest possible frequency that
  * is below "hz".
  */
-void mmc_set_clock(struct mmc_host *host, unsigned int hz)
+static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
 {
 	WARN_ON(hz && hz < host->f_min);
 
@@ -1068,6 +2359,81 @@
 	mmc_set_ios(host);
 }
 
+void mmc_set_clock(struct mmc_host *host, unsigned int hz)
+{
+	mmc_host_clk_hold(host);
+	__mmc_set_clock(host, hz);
+	mmc_host_clk_release(host);
+}
+
+#ifdef CONFIG_MMC_CLKGATE
+/*
+ * This gates the clock by setting it to 0 Hz.
+ */
+void mmc_gate_clock(struct mmc_host *host)
+{
+	unsigned long flags;
+
+	WARN_ON(!host->ios.clock);
+
+	spin_lock_irqsave(&host->clk_lock, flags);
+	host->clk_old = host->ios.clock;
+	host->ios.clock = 0;
+	host->clk_gated = true;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	mmc_set_ios(host);
+}
+
+/*
+ * This restores the clock from gating by using the cached
+ * clock value.
+ */
+void mmc_ungate_clock(struct mmc_host *host)
+{
+	/*
+	 * We should previously have gated the clock, so the clock shall
+	 * be 0 here! The clock may however be 0 during initialization,
+	 * when some request operations are performed before setting
+	 * the frequency. When ungate is requested in that situation
+	 * we just ignore the call.
+	 */
+	if (host->clk_old) {
+		WARN_ON(host->ios.clock);
+		/* This call will also set host->clk_gated to false */
+		__mmc_set_clock(host, host->clk_old);
+		/*
+		 * We have seen that host controller's clock tuning circuit may
+		 * go out of sync if controller clocks are gated.
+		 * To workaround this issue, we are triggering retuning of the
+		 * tuning circuit after ungating the controller clocks.
+		 */
+		mmc_retune_needed(host);
+	}
+}
+
+void mmc_set_ungated(struct mmc_host *host)
+{
+	unsigned long flags;
+
+	/*
+	 * We've been given a new frequency while the clock is gated,
+	 * so make sure we regard this as ungating it.
+	 */
+	spin_lock_irqsave(&host->clk_lock, flags);
+	host->clk_gated = false;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+}
+
+#else
+void mmc_set_ungated(struct mmc_host *host)
+{
+}
+
+void mmc_gate_clock(struct mmc_host *host)
+{
+}
+#endif
+
 int mmc_execute_tuning(struct mmc_card *card)
 {
 	struct mmc_host *host = card->host;
@@ -1082,7 +2448,9 @@
 	else
 		opcode = MMC_SEND_TUNING_BLOCK;
 
+	mmc_host_clk_hold(host);
 	err = host->ops->execute_tuning(host, opcode);
+	mmc_host_clk_release(host);
 
 	if (err)
 		pr_err("%s: tuning execution failed\n", mmc_hostname(host));
@@ -1097,8 +2465,10 @@
  */
 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
 {
+	mmc_host_clk_hold(host);
 	host->ios.bus_mode = mode;
 	mmc_set_ios(host);
+	mmc_host_clk_release(host);
 }
 
 /*
@@ -1106,8 +2476,10 @@
  */
 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
 {
+	mmc_host_clk_hold(host);
 	host->ios.bus_width = width;
 	mmc_set_ios(host);
+	mmc_host_clk_release(host);
 }
 
 /*
@@ -1119,9 +2491,10 @@
 
 	if (mmc_host_is_spi(host))
 		host->ios.chip_select = MMC_CS_HIGH;
-	else
+	else {
 		host->ios.chip_select = MMC_CS_DONTCARE;
-	host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
+		host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
+	}
 	host->ios.bus_width = MMC_BUS_WIDTH_1;
 	host->ios.timing = MMC_TIMING_LEGACY;
 	host->ios.drv_type = 0;
@@ -1548,8 +2921,11 @@
 	int old_signal_voltage = host->ios.signal_voltage;
 
 	host->ios.signal_voltage = signal_voltage;
-	if (host->ops->start_signal_voltage_switch)
+	if (host->ops->start_signal_voltage_switch) {
+		mmc_host_clk_hold(host);
 		err = host->ops->start_signal_voltage_switch(host, &host->ios);
+		mmc_host_clk_release(host);
+	}
 
 	if (err)
 		host->ios.signal_voltage = old_signal_voltage;
@@ -1587,13 +2963,27 @@
 	cmd.arg = 0;
 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 
+	/*
+	 * Hold the clock reference so clock doesn't get auto gated during this
+	 * voltage switch sequence.
+	 */
+	mmc_host_clk_hold(host);
 	err = mmc_wait_for_cmd(host, &cmd, 0);
-	if (err)
-		return err;
-
-	if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
-		return -EIO;
+	if (err) {
+		if (err == -ETIMEDOUT) {
+			pr_debug("%s: voltage switching failed with err %d\n",
+				mmc_hostname(host), err);
+			err = -EAGAIN;
+			goto power_cycle;
+		} else {
+			goto err_command;
+		}
+	}
 
+	if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) {
+		err = -EIO;
+		goto err_command;
+	}
 	/*
 	 * The card should drive cmd and dat[0:3] low immediately
 	 * after the response of cmd11, but wait 1 ms to be sure
@@ -1607,6 +2997,7 @@
 	 * During a signal voltage level switch, the clock must be gated
 	 * for 5 ms according to the SD spec
 	 */
+	host->card_clock_off = true;
 	clock = host->ios.clock;
 	host->ios.clock = 0;
 	mmc_set_ios(host);
@@ -1617,6 +3008,9 @@
 		 * sent CMD11, so a power cycle is required anyway
 		 */
 		err = -EAGAIN;
+		host->ios.clock = clock;
+		mmc_set_ios(host);
+		host->card_clock_off = false;
 		goto power_cycle;
 	}
 
@@ -1625,6 +3019,7 @@
 	host->ios.clock = clock;
 	mmc_set_ios(host);
 
+	host->card_clock_off = false;
 	/* Wait for at least 1 ms according to spec */
 	mmc_delay(1);
 
@@ -1642,6 +3037,9 @@
 		mmc_power_cycle(host, ocr);
 	}
 
+err_command:
+	mmc_host_clk_release(host);
+
 	return err;
 }
 
@@ -1650,8 +3048,10 @@
  */
 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
 {
+	mmc_host_clk_hold(host);
 	host->ios.timing = timing;
 	mmc_set_ios(host);
+	mmc_host_clk_release(host);
 }
 
 /*
@@ -1659,8 +3059,10 @@
  */
 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
 {
+	mmc_host_clk_hold(host);
 	host->ios.drv_type = drv_type;
 	mmc_set_ios(host);
+	mmc_host_clk_release(host);
 }
 
 int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
@@ -1668,6 +3070,7 @@
 {
 	struct mmc_host *host = card->host;
 	int host_drv_type = SD_DRIVER_TYPE_B;
+	int drive_strength;
 
 	*drv_type = 0;
 
@@ -1690,10 +3093,14 @@
 	 * information and let the hardware specific code
 	 * return what is possible given the options
 	 */
-	return host->ops->select_drive_strength(card, max_dtr,
+	mmc_host_clk_hold(host);
+	drive_strength = host->ops->select_drive_strength(card, max_dtr,
 						host_drv_type,
 						card_drv_type,
 						drv_type);
+	mmc_host_clk_release(host);
+
+	return drive_strength;
 }
 
 /*
@@ -1712,6 +3119,8 @@
 	if (host->ios.power_mode == MMC_POWER_ON)
 		return;
 
+	mmc_host_clk_hold(host);
+
 	mmc_pwrseq_pre_power_on(host);
 
 	host->ios.vdd = fls(ocr) - 1;
@@ -1745,6 +3154,8 @@
 	 * time required to reach a stable voltage.
 	 */
 	mmc_delay(10);
+
+	mmc_host_clk_release(host);
 }
 
 void mmc_power_off(struct mmc_host *host)
@@ -1752,6 +3163,8 @@
 	if (host->ios.power_mode == MMC_POWER_OFF)
 		return;
 
+	mmc_host_clk_hold(host);
+
 	mmc_pwrseq_power_off(host);
 
 	host->ios.clock = 0;
@@ -1767,6 +3180,8 @@
 	 * can be successfully turned on again.
 	 */
 	mmc_delay(1);
+
+	mmc_host_clk_release(host);
 }
 
 void mmc_power_cycle(struct mmc_host *host, u32 ocr)
@@ -1816,6 +3231,51 @@
 	spin_unlock_irqrestore(&host->lock, flags);
 }
 
+int mmc_resume_bus(struct mmc_host *host)
+{
+	unsigned long flags;
+	int err = 0;
+
+	if (!mmc_bus_needs_resume(host))
+		return -EINVAL;
+
+	pr_debug("%s: Starting deferred resume\n", mmc_hostname(host));
+	spin_lock_irqsave(&host->lock, flags);
+	host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME;
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	mmc_bus_get(host);
+	if (host->bus_ops && !host->bus_dead && host->card) {
+		mmc_power_up(host, host->card->ocr);
+		BUG_ON(!host->bus_ops->resume);
+		err = host->bus_ops->resume(host);
+		if (err) {
+			pr_err("%s: bus resume: failed: %d\n",
+			       mmc_hostname(host), err);
+			err = mmc_hw_reset(host);
+			if (err) {
+				pr_err("%s: reset: failed: %d\n",
+				       mmc_hostname(host), err);
+				goto err_reset;
+			} else {
+				mmc_card_clr_suspended(host->card);
+			}
+		}
+		if (mmc_card_cmdq(host->card)) {
+			err = mmc_cmdq_halt(host, false);
+			if (err)
+				pr_err("%s: %s: unhalt failed: %d\n",
+				       mmc_hostname(host), __func__, err);
+		}
+	}
+
+err_reset:
+	mmc_bus_put(host);
+	pr_debug("%s: Deferred resume completed\n", mmc_hostname(host));
+	return err;
+}
+EXPORT_SYMBOL(mmc_resume_bus);
+
 /*
  * Assign a mmc bus handler to a host. Only one bus handler may control a
  * host at any given time.
@@ -1881,6 +3341,13 @@
 		pm_wakeup_event(mmc_dev(host), 5000);
 
 	host->detect_change = 1;
+	/*
+	 * Change in cd_gpio state, so make sure detection part is
+	 * not overided because of manual resume.
+	 */
+	if (cd_irq && mmc_bus_manual_resume(host))
+		host->ignore_bus_resume_flags = true;
+
 	mmc_schedule_delayed_work(&host->detect, delay);
 }
 
@@ -1982,7 +3449,7 @@
 		 */
 		timeout_clks <<= 1;
 		timeout_us += (timeout_clks * 1000) /
-			      (card->host->ios.clock / 1000);
+			      (mmc_host_clk_rate(card->host) / 1000);
 
 		erase_timeout = timeout_us / 1000;
 
@@ -2049,15 +3516,9 @@
 		return mmc_mmc_erase_timeout(card, arg, qty);
 }
 
-static int mmc_do_erase(struct mmc_card *card, unsigned int from,
-			unsigned int to, unsigned int arg)
+static u32 mmc_get_erase_qty(struct mmc_card *card, u32 from, u32 to)
 {
-	struct mmc_command cmd = {0};
-	unsigned int qty = 0;
-	unsigned long timeout;
-	int err;
-
-	mmc_retune_hold(card->host);
+	u32 qty = 0;
 
 	/*
 	 * qty is used to calculate the erase timeout which depends on how many
@@ -2083,12 +3544,122 @@
 	else
 		qty += ((to / card->erase_size) -
 			(from / card->erase_size)) + 1;
+	return qty;
+}
+
+static int mmc_cmdq_send_erase_cmd(struct mmc_cmdq_req *cmdq_req,
+		struct mmc_card *card, u32 opcode, u32 arg, u32 qty)
+{
+	struct mmc_command *cmd = cmdq_req->mrq.cmd;
+	int err;
+
+	memset(cmd, 0, sizeof(struct mmc_command));
+
+	cmd->opcode = opcode;
+	cmd->arg = arg;
+	if (cmd->opcode == MMC_ERASE) {
+		cmd->flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+		cmd->busy_timeout = mmc_erase_timeout(card, arg, qty);
+	} else {
+		cmd->flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+	}
+
+	err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
+	if (err) {
+		pr_err("mmc_erase: group start error %d, status %#x\n",
+				err, cmd->resp[0]);
+		return -EIO;
+	}
+	return 0;
+}
+
+static int mmc_cmdq_do_erase(struct mmc_cmdq_req *cmdq_req,
+			struct mmc_card *card, unsigned int from,
+			unsigned int to, unsigned int arg)
+{
+	struct mmc_command *cmd = cmdq_req->mrq.cmd;
+	unsigned int qty = 0;
+	unsigned long timeout;
+	unsigned int fr, nr;
+	int err;
+
+	fr = from;
+	nr = to - from + 1;
+	trace_mmc_blk_erase_start(arg, fr, nr);
+
+	qty = mmc_get_erase_qty(card, from, to);
+
+	if (!mmc_card_blockaddr(card)) {
+		from <<= 9;
+		to <<= 9;
+	}
+
+	err = mmc_cmdq_send_erase_cmd(cmdq_req, card, MMC_ERASE_GROUP_START,
+			from, qty);
+	if (err)
+		goto out;
+
+	err = mmc_cmdq_send_erase_cmd(cmdq_req, card, MMC_ERASE_GROUP_END,
+			to, qty);
+	if (err)
+		goto out;
+
+	err = mmc_cmdq_send_erase_cmd(cmdq_req, card, MMC_ERASE,
+			arg, qty);
+	if (err)
+		goto out;
+
+	timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS);
+	do {
+		memset(cmd, 0, sizeof(struct mmc_command));
+		cmd->opcode = MMC_SEND_STATUS;
+		cmd->arg = card->rca << 16;
+		cmd->flags = MMC_RSP_R1 | MMC_CMD_AC;
+		/* Do not retry else we can't see errors */
+		err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
+		if (err || (cmd->resp[0] & 0xFDF92000)) {
+			pr_err("error %d requesting status %#x\n",
+				err, cmd->resp[0]);
+			err = -EIO;
+			goto out;
+		}
+		/* Timeout if the device never becomes ready for data and
+		 * never leaves the program state.
+		 */
+		if (time_after(jiffies, timeout)) {
+			pr_err("%s: Card stuck in programming state! %s\n",
+				mmc_hostname(card->host), __func__);
+			err =  -EIO;
+			goto out;
+		}
+	} while (!(cmd->resp[0] & R1_READY_FOR_DATA) ||
+		 (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG));
+out:
+	trace_mmc_blk_erase_end(arg, fr, nr);
+	return err;
+}
+
+static int mmc_do_erase(struct mmc_card *card, unsigned int from,
+			unsigned int to, unsigned int arg)
+{
+	struct mmc_command cmd = {0};
+	unsigned int qty = 0;
+	unsigned long timeout;
+	unsigned int fr, nr;
+	int err;
+
+	fr = from;
+	nr = to - from + 1;
+	trace_mmc_blk_erase_start(arg, fr, nr);
+
+	qty = mmc_get_erase_qty(card, from, to);
 
 	if (!mmc_card_blockaddr(card)) {
 		from <<= 9;
 		to <<= 9;
 	}
 
+	mmc_retune_hold(card->host);
 	if (mmc_card_sd(card))
 		cmd.opcode = SD_ERASE_WR_BLK_START;
 	else
@@ -2163,24 +3734,13 @@
 		 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
 out:
 	mmc_retune_release(card->host);
+	trace_mmc_blk_erase_end(arg, fr, nr);
 	return err;
 }
 
-/**
- * mmc_erase - erase sectors.
- * @card: card to erase
- * @from: first sector to erase
- * @nr: number of sectors to erase
- * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
- *
- * Caller must claim host before calling this function.
- */
-int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
-	      unsigned int arg)
+int mmc_erase_sanity_check(struct mmc_card *card, unsigned int from,
+		unsigned int nr, unsigned int arg)
 {
-	unsigned int rem, to = from + nr;
-	int err;
-
 	if (!(card->host->caps & MMC_CAP_ERASE) ||
 	    !(card->csd.cmdclass & CCC_ERASE))
 		return -EOPNOTSUPP;
@@ -2203,6 +3763,68 @@
 		if (from % card->erase_size || nr % card->erase_size)
 			return -EINVAL;
 	}
+	return 0;
+}
+
+int mmc_cmdq_erase(struct mmc_cmdq_req *cmdq_req,
+	      struct mmc_card *card, unsigned int from, unsigned int nr,
+	      unsigned int arg)
+{
+	unsigned int rem, to = from + nr;
+	int ret;
+
+	ret = mmc_erase_sanity_check(card, from, nr, arg);
+	if (ret)
+		return ret;
+
+	if (arg == MMC_ERASE_ARG) {
+		rem = from % card->erase_size;
+		if (rem) {
+			rem = card->erase_size - rem;
+			from += rem;
+			if (nr > rem)
+				nr -= rem;
+			else
+				return 0;
+		}
+		rem = nr % card->erase_size;
+		if (rem)
+			nr -= rem;
+	}
+
+	if (nr == 0)
+		return 0;
+
+	to = from + nr;
+
+	if (to <= from)
+		return -EINVAL;
+
+	/* 'from' and 'to' are inclusive */
+	to -= 1;
+
+	return mmc_cmdq_do_erase(cmdq_req, card, from, to, arg);
+}
+EXPORT_SYMBOL(mmc_cmdq_erase);
+
+/**
+ * mmc_erase - erase sectors.
+ * @card: card to erase
+ * @from: first sector to erase
+ * @nr: number of sectors to erase
+ * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
+ *
+ * Caller must claim host before calling this function.
+ */
+int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
+	      unsigned int arg)
+{
+	unsigned int rem, to = from + nr;
+	int ret;
+
+	ret = mmc_erase_sanity_check(card, from, nr, arg);
+	if (ret)
+		return ret;
 
 	if (arg == MMC_ERASE_ARG) {
 		rem = from % card->erase_size;
@@ -2240,10 +3862,10 @@
 	 */
 	rem = card->erase_size - (from % card->erase_size);
 	if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
-		err = mmc_do_erase(card, from, from + rem - 1, arg);
+		ret = mmc_do_erase(card, from, from + rem - 1, arg);
 		from += rem;
-		if ((err) || (to <= from))
-			return err;
+		if ((ret) || (to <= from))
+			return ret;
 	}
 
 	return mmc_do_erase(card, from, to, arg);
@@ -2373,7 +3995,8 @@
 	struct mmc_host *host = card->host;
 	unsigned int max_discard, max_trim;
 
-	if (!host->max_busy_timeout)
+	if (!host->max_busy_timeout ||
+			(host->caps2 & MMC_CAP2_MAX_DISCARD_SIZE))
 		return UINT_MAX;
 
 	/*
@@ -2430,9 +4053,26 @@
 {
 	if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
 		return;
+	mmc_host_clk_hold(host);
 	host->ops->hw_reset(host);
+	mmc_host_clk_release(host);
 }
 
+/*
+ * mmc_cmdq_hw_reset: Helper API for doing
+ * reset_all of host and reinitializing card.
+ * This must be called with mmc_claim_host
+ * acquired by the caller.
+ */
+int mmc_cmdq_hw_reset(struct mmc_host *host)
+{
+	if (!host->bus_ops->reset)
+		return -EOPNOTSUPP;
+
+	return host->bus_ops->reset(host);
+}
+EXPORT_SYMBOL(mmc_cmdq_hw_reset);
+
 int mmc_hw_reset(struct mmc_host *host)
 {
 	int ret;
@@ -2449,8 +4089,9 @@
 	ret = host->bus_ops->reset(host);
 	mmc_bus_put(host);
 
-	if (ret != -EOPNOTSUPP)
-		pr_warn("%s: tried to reset card\n", mmc_hostname(host));
+	if (ret)
+		pr_warn("%s: tried to reset card, got error %d\n",
+			mmc_hostname(host), ret);
 
 	return ret;
 }
@@ -2519,8 +4160,17 @@
 	}
 
 	if (ret) {
+		if (host->ops->get_cd && host->ops->get_cd(host)) {
+			ret = mmc_recovery_fallback_lower_speed(host);
+		} else {
 		mmc_card_set_removed(host->card);
-		pr_debug("%s: card remove detected\n", mmc_hostname(host));
+			if (host->card->sdr104_blocked) {
+				mmc_host_set_sdr104(host);
+				host->card->sdr104_blocked = false;
+			}
+			pr_debug("%s: card remove detected\n",
+					mmc_hostname(host));
+		}
 	}
 
 	return ret;
@@ -2561,19 +4211,35 @@
 }
 EXPORT_SYMBOL(mmc_detect_card_removed);
 
+/*
+ * This should be called to make sure that detect work(mmc_rescan)
+ * is completed.Drivers may use this function from async schedule/probe
+ * contexts to make sure that the bootdevice detection is completed on
+ * completion of async_schedule.
+ */
+void mmc_flush_detect_work(struct mmc_host *host)
+{
+	flush_delayed_work(&host->detect);
+}
+EXPORT_SYMBOL(mmc_flush_detect_work);
+
 void mmc_rescan(struct work_struct *work)
 {
+	unsigned long flags;
 	struct mmc_host *host =
 		container_of(work, struct mmc_host, detect.work);
-	int i;
 
 	if (host->trigger_card_event && host->ops->card_event) {
 		host->ops->card_event(host);
 		host->trigger_card_event = false;
 	}
 
-	if (host->rescan_disable)
+	spin_lock_irqsave(&host->lock, flags);
+	if (host->rescan_disable) {
+		spin_unlock_irqrestore(&host->lock, flags);
 		return;
+	}
+	spin_unlock_irqrestore(&host->lock, flags);
 
 	/* If there is a non-removable card registered, only scan once */
 	if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered)
@@ -2591,6 +4257,8 @@
 		host->bus_ops->detect(host);
 
 	host->detect_change = 0;
+	if (host->ignore_bus_resume_flags)
+		host->ignore_bus_resume_flags = false;
 
 	/*
 	 * Let mmc_bus_put() free the bus/bus_ops if we've found that
@@ -2620,12 +4288,7 @@
 	}
 
 	mmc_claim_host(host);
-	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
-		if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
-			break;
-		if (freqs[i] <= host->f_min)
-			break;
-	}
+	mmc_rescan_try_freq(host, host->f_min);
 	mmc_release_host(host);
 
  out:
@@ -2635,18 +4298,18 @@
 
 void mmc_start_host(struct mmc_host *host)
 {
+	mmc_claim_host(host);
 	host->f_init = max(freqs[0], host->f_min);
 	host->rescan_disable = 0;
 	host->ios.power_mode = MMC_POWER_UNDEFINED;
 
-	mmc_claim_host(host);
 	if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)
 		mmc_power_off(host);
 	else
 		mmc_power_up(host, host->ocr_avail);
-	mmc_release_host(host);
 
 	mmc_gpiod_request_cd_irq(host);
+	mmc_release_host(host);
 	_mmc_detect_change(host, 0, false);
 }
 
@@ -2730,7 +4393,9 @@
 	}
 
 	mmc_power_up(host, host->card->ocr);
+	mmc_claim_host(host);
 	ret = host->bus_ops->power_restore(host);
+	mmc_release_host(host);
 
 	mmc_bus_put(host);
 
@@ -2739,6 +4404,40 @@
 EXPORT_SYMBOL(mmc_power_restore_host);
 
 /*
+ * Add barrier request to the requests in cache
+ */
+int mmc_cache_barrier(struct mmc_card *card)
+{
+	struct mmc_host *host = card->host;
+	int err = 0;
+
+	if (!card->ext_csd.cache_ctrl ||
+	     (card->quirks & MMC_QUIRK_CACHE_DISABLE))
+		goto out;
+
+	if (!mmc_card_mmc(card))
+		goto out;
+
+	if (!card->ext_csd.barrier_en)
+		return -ENOTSUPP;
+
+	/*
+	 * If a device receives maximum supported barrier
+	 * requests, a barrier command is treated as a
+	 * flush command. Hence, it is betetr to use
+	 * flush timeout instead a generic CMD6 timeout
+	 */
+	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+			EXT_CSD_FLUSH_CACHE, 0x2, 0);
+	if (err)
+		pr_err("%s: cache barrier error %d\n",
+				mmc_hostname(host), err);
+out:
+	return err;
+}
+EXPORT_SYMBOL(mmc_cache_barrier);
+
+/*
  * Flush the cache to the non-volatile storage.
  */
 int mmc_flush_cache(struct mmc_card *card)
@@ -2747,13 +4446,24 @@
 
 	if (mmc_card_mmc(card) &&
 			(card->ext_csd.cache_size > 0) &&
-			(card->ext_csd.cache_ctrl & 1)) {
+			(card->ext_csd.cache_ctrl & 1) &&
+			(!(card->quirks & MMC_QUIRK_CACHE_DISABLE))) {
 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 				EXT_CSD_FLUSH_CACHE, 1, 0);
-		if (err)
+		if (err == -ETIMEDOUT) {
+			pr_err("%s: cache flush timeout\n",
+					mmc_hostname(card->host));
+			err = mmc_interrupt_hpi(card);
+			if (err) {
+				pr_err("%s: mmc_interrupt_hpi() failed (%d)\n",
+						mmc_hostname(card->host), err);
+				err = -ENODEV;
+			}
+		} else if (err) {
 			pr_err("%s: cache flush error %d\n",
 					mmc_hostname(card->host), err);
 	}
+	}
 
 	return err;
 }
@@ -2806,6 +4516,11 @@
 
 		spin_lock_irqsave(&host->lock, flags);
 		host->rescan_disable = 0;
+		if (mmc_bus_manual_resume(host) &&
+				!host->ignore_bus_resume_flags) {
+			spin_unlock_irqrestore(&host->lock, flags);
+			break;
+		}
 		spin_unlock_irqrestore(&host->lock, flags);
 		_mmc_detect_change(host, 0, false);
 
@@ -2832,6 +4547,22 @@
 	init_waitqueue_head(&host->context_info.wait);
 }
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+void mmc_set_embedded_sdio_data(struct mmc_host *host,
+				struct sdio_cis *cis,
+				struct sdio_cccr *cccr,
+				struct sdio_embedded_func *funcs,
+				int num_funcs)
+{
+	host->embedded_sdio_data.cis = cis;
+	host->embedded_sdio_data.cccr = cccr;
+	host->embedded_sdio_data.funcs = funcs;
+	host->embedded_sdio_data.num_funcs = num_funcs;
+}
+
+EXPORT_SYMBOL(mmc_set_embedded_sdio_data);
+#endif
+
 static int __init mmc_init(void)
 {
 	int ret;
@@ -2872,6 +4603,63 @@
 	destroy_workqueue(workqueue);
 }
 
+#ifdef CONFIG_BLOCK
+static ssize_t
+latency_hist_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+	size_t written_bytes;
+
+	written_bytes = blk_latency_hist_show("Read", &host->io_lat_read,
+			buf, PAGE_SIZE);
+	written_bytes += blk_latency_hist_show("Write", &host->io_lat_write,
+			buf + written_bytes, PAGE_SIZE - written_bytes);
+
+	return written_bytes;
+}
+
+/*
+ * Values permitted 0, 1, 2.
+ * 0 -> Disable IO latency histograms (default)
+ * 1 -> Enable IO latency histograms
+ * 2 -> Zero out IO latency histograms
+ */
+static ssize_t
+latency_hist_store(struct device *dev, struct device_attribute *attr,
+		   const char *buf, size_t count)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+	long value;
+
+	if (kstrtol(buf, 0, &value))
+		return -EINVAL;
+	if (value == BLK_IO_LAT_HIST_ZERO) {
+		memset(&host->io_lat_read, 0, sizeof(host->io_lat_read));
+		memset(&host->io_lat_write, 0, sizeof(host->io_lat_write));
+	} else if (value == BLK_IO_LAT_HIST_ENABLE ||
+		 value == BLK_IO_LAT_HIST_DISABLE)
+		host->latency_hist_enabled = value;
+	return count;
+}
+
+static DEVICE_ATTR(latency_hist, S_IRUGO | S_IWUSR,
+		   latency_hist_show, latency_hist_store);
+
+void
+mmc_latency_hist_sysfs_init(struct mmc_host *host)
+{
+	if (device_create_file(&host->class_dev, &dev_attr_latency_hist))
+		dev_err(&host->class_dev,
+			"Failed to create latency_hist sysfs entry\n");
+}
+
+void
+mmc_latency_hist_sysfs_exit(struct mmc_host *host)
+{
+	device_remove_file(&host->class_dev, &dev_attr_latency_hist);
+}
+#endif
+
 subsys_initcall(mmc_init);
 module_exit(mmc_exit);
 
diff -ruw linux-4.4.115/drivers/mmc/core/core.h linux-4.4.115-fbx/drivers/mmc/core/core.h
--- linux-4.4.115/drivers/mmc/core/core.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mmc/core/core.h	2019-01-22 16:16:24.767257853 +0100
@@ -15,21 +15,6 @@
 
 #define MMC_CMD_RETRIES        3
 
-struct mmc_bus_ops {
-	void (*remove)(struct mmc_host *);
-	void (*detect)(struct mmc_host *);
-	int (*pre_suspend)(struct mmc_host *);
-	int (*suspend)(struct mmc_host *);
-	int (*resume)(struct mmc_host *);
-	int (*runtime_suspend)(struct mmc_host *);
-	int (*runtime_resume)(struct mmc_host *);
-	int (*power_save)(struct mmc_host *);
-	int (*power_restore)(struct mmc_host *);
-	int (*alive)(struct mmc_host *);
-	int (*shutdown)(struct mmc_host *);
-	int (*reset)(struct mmc_host *);
-};
-
 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
 void mmc_detach_bus(struct mmc_host *host);
 
@@ -40,6 +25,11 @@
 
 void mmc_set_chip_select(struct mmc_host *host, int mode);
 void mmc_set_clock(struct mmc_host *host, unsigned int hz);
+int mmc_clk_update_freq(struct mmc_host *host,
+		unsigned long freq, enum mmc_load state);
+void mmc_gate_clock(struct mmc_host *host);
+void mmc_ungate_clock(struct mmc_host *host);
+void mmc_set_ungated(struct mmc_host *host);
 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
 void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
@@ -59,6 +49,8 @@
 	if (ms < 1000 / HZ) {
 		cond_resched();
 		mdelay(ms);
+	} else if (ms < jiffies_to_msecs(2)) {
+		usleep_range(ms * 1000, (ms + 1) * 1000);
 	} else {
 		msleep(ms);
 	}
@@ -86,6 +78,12 @@
 
 void mmc_init_context_info(struct mmc_host *host);
 
+extern bool mmc_can_scale_clk(struct mmc_host *host);
+extern int mmc_init_clk_scaling(struct mmc_host *host);
+extern int mmc_resume_clk_scaling(struct mmc_host *host);
+extern int mmc_exit_clk_scaling(struct mmc_host *host);
+extern unsigned long mmc_get_max_frequency(struct mmc_host *host);
+
 int mmc_execute_tuning(struct mmc_card *card);
 int mmc_hs200_to_hs400(struct mmc_card *card);
 int mmc_hs400_to_hs200(struct mmc_card *card);
diff -ruw linux-4.4.115/drivers/mmc/core/debugfs.c linux-4.4.115-fbx/drivers/mmc/core/debugfs.c
--- linux-4.4.115/drivers/mmc/core/debugfs.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mmc/core/debugfs.c	2019-10-29 09:26:24.057207230 +0100
@@ -15,6 +15,7 @@
 #include <linux/slab.h>
 #include <linux/stat.h>
 #include <linux/fault-inject.h>
+#include <linux/uaccess.h>
 
 #include <linux/mmc/card.h>
 #include <linux/mmc/host.h>
@@ -31,6 +32,26 @@
 #endif /* CONFIG_FAIL_MMC_REQUEST */
 
 /* The debugfs functions are optimized away when CONFIG_DEBUG_FS isn't set. */
+static int mmc_ring_buffer_show(struct seq_file *s, void *data)
+{
+	struct mmc_host *mmc = s->private;
+
+	mmc_dump_trace_buffer(mmc, s);
+	return 0;
+}
+
+static int mmc_ring_buffer_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mmc_ring_buffer_show, inode->i_private);
+}
+
+static const struct file_operations mmc_ring_buffer_fops = {
+	.open		= mmc_ring_buffer_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
 static int mmc_ios_show(struct seq_file *s, void *data)
 {
 	static const char *vdd_str[] = {
@@ -233,6 +254,132 @@
 DEFINE_SIMPLE_ATTRIBUTE(mmc_clock_fops, mmc_clock_opt_get, mmc_clock_opt_set,
 	"%llu\n");
 
+#include <linux/delay.h>
+
+static int mmc_scale_get(void *data, u64 *val)
+{
+	struct mmc_host *host = data;
+
+	*val = host->clk_scaling.curr_freq;
+
+	return 0;
+}
+
+static int mmc_scale_set(void *data, u64 val)
+{
+	int err = 0;
+	struct mmc_host *host = data;
+
+	mmc_claim_host(host);
+	mmc_host_clk_hold(host);
+
+	/* change frequency from sysfs manually */
+	err = mmc_clk_update_freq(host, val, host->clk_scaling.state);
+	if (err == -EAGAIN)
+		err = 0;
+	else if (err)
+		pr_err("%s: clock scale to %llu failed with error %d\n",
+			mmc_hostname(host), val, err);
+	else
+		pr_debug("%s: clock change to %llu finished successfully (%s)\n",
+			mmc_hostname(host), val, current->comm);
+
+	mmc_host_clk_release(host);
+	mmc_release_host(host);
+
+	return err;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(mmc_scale_fops, mmc_scale_get, mmc_scale_set,
+	"%llu\n");
+
+static int mmc_max_clock_get(void *data, u64 *val)
+{
+	struct mmc_host *host = data;
+
+	if (!host)
+		return -EINVAL;
+
+	*val = host->f_max;
+
+	return 0;
+}
+
+static int mmc_max_clock_set(void *data, u64 val)
+{
+	struct mmc_host *host = data;
+	int err = -EINVAL;
+	unsigned long freq = val;
+	unsigned int old_freq;
+
+	if (!host || (val < host->f_min))
+		goto out;
+
+	mmc_claim_host(host);
+	if (host->bus_ops && host->bus_ops->change_bus_speed) {
+		old_freq = host->f_max;
+		host->f_max = freq;
+
+		err = host->bus_ops->change_bus_speed(host, &freq);
+
+		if (err)
+			host->f_max = old_freq;
+	}
+	mmc_release_host(host);
+out:
+	return err;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(mmc_max_clock_fops, mmc_max_clock_get,
+		mmc_max_clock_set, "%llu\n");
+
+static int mmc_force_err_set(void *data, u64 val)
+{
+	struct mmc_host *host = data;
+
+	if (host && host->card && host->ops &&
+			host->ops->force_err_irq) {
+		/*
+		 * To access the force error irq reg, we need to make
+		 * sure the host is powered up and host clock is ticking.
+		 */
+		mmc_get_card(host->card);
+		host->ops->force_err_irq(host, val);
+		mmc_put_card(host->card);
+	}
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(mmc_force_err_fops, NULL, mmc_force_err_set, "%llu\n");
+
+static int mmc_err_state_get(void *data, u64 *val)
+{
+	struct mmc_host *host = data;
+
+	if (!host)
+		return -EINVAL;
+
+	*val = host->err_occurred ? 1 : 0;
+
+	return 0;
+}
+
+static int mmc_err_state_clear(void *data, u64 val)
+{
+	struct mmc_host *host = data;
+
+	if (!host)
+		return -EINVAL;
+
+	host->err_occurred = false;
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(mmc_err_state, mmc_err_state_get,
+		mmc_err_state_clear, "%llu\n");
+
 void mmc_add_host_debugfs(struct mmc_host *host)
 {
 	struct dentry *root;
@@ -255,6 +402,38 @@
 			&mmc_clock_fops))
 		goto err_node;
 
+	if (!debugfs_create_file("max_clock", S_IRUSR | S_IWUSR, root, host,
+		&mmc_max_clock_fops))
+		goto err_node;
+
+	if (!debugfs_create_file("scale", S_IRUSR | S_IWUSR, root, host,
+		&mmc_scale_fops))
+		goto err_node;
+
+	if (!debugfs_create_bool("skip_clk_scale_freq_update",
+		S_IRUSR | S_IWUSR, root,
+		&host->clk_scaling.skip_clk_scale_freq_update))
+		goto err_node;
+
+	if (!debugfs_create_bool("cmdq_task_history",
+		S_IRUSR | S_IWUSR, root,
+		&host->cmdq_thist_enabled))
+		goto err_node;
+
+#ifdef CONFIG_MMC_RING_BUFFER
+	if (!debugfs_create_file("ring_buffer", S_IRUSR,
+				root, host, &mmc_ring_buffer_fops))
+		goto err_node;
+#endif
+	if (!debugfs_create_file("err_state", S_IRUSR | S_IWUSR, root, host,
+		&mmc_err_state))
+		goto err_node;
+
+#ifdef CONFIG_MMC_CLKGATE
+	if (!debugfs_create_u32("clk_delay", (S_IRUSR | S_IWUSR),
+				root, &host->clk_delay))
+		goto err_node;
+#endif
 #ifdef CONFIG_FAIL_MMC_REQUEST
 	if (fail_request)
 		setup_fault_attr(&fail_default_attr, fail_request);
@@ -264,6 +443,10 @@
 					     &host->fail_mmc_request)))
 		goto err_node;
 #endif
+	if (!debugfs_create_file("force_error", S_IWUSR, root, host,
+		&mmc_force_err_fops))
+		goto err_node;
+
 	return;
 
 err_node:
@@ -285,11 +468,26 @@
 	int		ret;
 
 	mmc_get_card(card);
+	if (mmc_card_cmdq(card)) {
+		ret = mmc_cmdq_halt_on_empty_queue(card->host);
+		if (ret) {
+			pr_err("%s: halt failed while doing %s err (%d)\n",
+					mmc_hostname(card->host), __func__,
+					ret);
+			goto out;
+		}
+	}
 
 	ret = mmc_send_status(data, &status);
 	if (!ret)
 		*val = status;
 
+	if (mmc_card_cmdq(card)) {
+		if (mmc_cmdq_halt(card->host, false))
+			pr_err("%s: %s: cmdq unhalt failed\n",
+			       mmc_hostname(card->host), __func__);
+	}
+out:
 	mmc_put_card(card);
 
 	return ret;
@@ -312,8 +510,18 @@
 		return -ENOMEM;
 
 	mmc_get_card(card);
-	err = mmc_get_ext_csd(card, &ext_csd);
+	if (mmc_card_cmdq(card)) {
+		err = mmc_cmdq_halt_on_empty_queue(card->host);
+		if (err) {
+			pr_err("%s: halt failed while doing %s err (%d)\n",
+					mmc_hostname(card->host), __func__,
+					err);
 	mmc_put_card(card);
+			goto out_free_halt;
+		}
+	}
+
+	err = mmc_get_ext_csd(card, &ext_csd);
 	if (err)
 		goto out_free;
 
@@ -323,10 +531,25 @@
 	BUG_ON(n != EXT_CSD_STR_LEN);
 
 	filp->private_data = buf;
+
+	if (mmc_card_cmdq(card)) {
+		if (mmc_cmdq_halt(card->host, false))
+			pr_err("%s: %s: cmdq unhalt failed\n",
+			       mmc_hostname(card->host), __func__);
+	}
+
+	mmc_put_card(card);
 	kfree(ext_csd);
 	return 0;
 
 out_free:
+	if (mmc_card_cmdq(card)) {
+		if (mmc_cmdq_halt(card->host, false))
+			pr_err("%s: %s: cmdq unhalt failed\n",
+			       mmc_hostname(card->host), __func__);
+	}
+	mmc_put_card(card);
+out_free_halt:
 	kfree(buf);
 	return err;
 }
@@ -353,6 +576,296 @@
 	.llseek		= default_llseek,
 };
 
+static int mmc_wr_pack_stats_open(struct inode *inode, struct file *filp)
+{
+	struct mmc_card *card = inode->i_private;
+
+	filp->private_data = card;
+	card->wr_pack_stats.print_in_read = 1;
+	return 0;
+}
+
+#define TEMP_BUF_SIZE 256
+static ssize_t mmc_wr_pack_stats_read(struct file *filp, char __user *ubuf,
+				size_t cnt, loff_t *ppos)
+{
+	struct mmc_card *card = filp->private_data;
+	struct mmc_wr_pack_stats *pack_stats;
+	int i, ret = 0;
+	int max_num_of_packed_reqs = 0;
+	char *temp_buf, *temp_ubuf;
+	size_t tubuf_cnt = 0;
+
+	if (!card)
+		return cnt;
+
+	if (!access_ok(VERIFY_WRITE, ubuf, cnt))
+		return cnt;
+
+	if (!card->wr_pack_stats.print_in_read)
+		return 0;
+
+	if (!card->wr_pack_stats.enabled) {
+		pr_info("%s: write packing statistics are disabled\n",
+			 mmc_hostname(card->host));
+		goto exit;
+	}
+
+	pack_stats = &card->wr_pack_stats;
+
+	if (!pack_stats->packing_events) {
+		pr_info("%s: NULL packing_events\n", mmc_hostname(card->host));
+		goto exit;
+	}
+
+	max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
+
+	if (cnt <= (strlen_user(ubuf) + 1))
+		goto exit;
+
+	temp_buf = kzalloc(TEMP_BUF_SIZE, GFP_KERNEL);
+	if (!temp_buf)
+		goto exit;
+
+	tubuf_cnt = cnt - strlen_user(ubuf) - 1;
+
+	temp_ubuf = kzalloc(tubuf_cnt, GFP_KERNEL);
+	if (!temp_ubuf)
+		goto cleanup;
+
+	spin_lock(&pack_stats->lock);
+
+	snprintf(temp_buf, TEMP_BUF_SIZE, "%s: write packing statistics:\n",
+		mmc_hostname(card->host));
+	strlcat(temp_ubuf, temp_buf, tubuf_cnt);
+
+	for (i = 1 ; i <= max_num_of_packed_reqs ; ++i) {
+		if (pack_stats->packing_events[i]) {
+			snprintf(temp_buf, TEMP_BUF_SIZE,
+				 "%s: Packed %d reqs - %d times\n",
+				mmc_hostname(card->host), i,
+				pack_stats->packing_events[i]);
+			strlcat(temp_ubuf, temp_buf, tubuf_cnt);
+		}
+	}
+
+	snprintf(temp_buf, TEMP_BUF_SIZE,
+		 "%s: stopped packing due to the following reasons:\n",
+		 mmc_hostname(card->host));
+	strlcat(temp_ubuf, temp_buf, tubuf_cnt);
+
+	if (pack_stats->pack_stop_reason[EXCEEDS_SEGMENTS]) {
+		snprintf(temp_buf, TEMP_BUF_SIZE,
+			 "%s: %d times: exceed max num of segments\n",
+			 mmc_hostname(card->host),
+			 pack_stats->pack_stop_reason[EXCEEDS_SEGMENTS]);
+		strlcat(temp_ubuf, temp_buf, tubuf_cnt);
+	}
+	if (pack_stats->pack_stop_reason[EXCEEDS_SECTORS]) {
+		snprintf(temp_buf, TEMP_BUF_SIZE,
+			 "%s: %d times: exceed max num of sectors\n",
+			mmc_hostname(card->host),
+			pack_stats->pack_stop_reason[EXCEEDS_SECTORS]);
+		strlcat(temp_ubuf, temp_buf, tubuf_cnt);
+	}
+	if (pack_stats->pack_stop_reason[WRONG_DATA_DIR]) {
+		snprintf(temp_buf, TEMP_BUF_SIZE,
+			 "%s: %d times: wrong data direction\n",
+			mmc_hostname(card->host),
+			pack_stats->pack_stop_reason[WRONG_DATA_DIR]);
+		strlcat(temp_ubuf, temp_buf, tubuf_cnt);
+	}
+	if (pack_stats->pack_stop_reason[FLUSH_OR_DISCARD]) {
+		snprintf(temp_buf, TEMP_BUF_SIZE,
+			 "%s: %d times: flush or discard\n",
+			mmc_hostname(card->host),
+			pack_stats->pack_stop_reason[FLUSH_OR_DISCARD]);
+		strlcat(temp_ubuf, temp_buf, tubuf_cnt);
+	}
+	if (pack_stats->pack_stop_reason[EMPTY_QUEUE]) {
+		snprintf(temp_buf, TEMP_BUF_SIZE,
+			 "%s: %d times: empty queue\n",
+			mmc_hostname(card->host),
+			pack_stats->pack_stop_reason[EMPTY_QUEUE]);
+		strlcat(temp_ubuf, temp_buf, tubuf_cnt);
+	}
+	if (pack_stats->pack_stop_reason[REL_WRITE]) {
+		snprintf(temp_buf, TEMP_BUF_SIZE,
+			 "%s: %d times: rel write\n",
+			mmc_hostname(card->host),
+			pack_stats->pack_stop_reason[REL_WRITE]);
+		strlcat(temp_ubuf, temp_buf, tubuf_cnt);
+	}
+	if (pack_stats->pack_stop_reason[THRESHOLD]) {
+		snprintf(temp_buf, TEMP_BUF_SIZE,
+			 "%s: %d times: Threshold\n",
+			mmc_hostname(card->host),
+			pack_stats->pack_stop_reason[THRESHOLD]);
+		strlcat(temp_ubuf, temp_buf, tubuf_cnt);
+	}
+
+	if (pack_stats->pack_stop_reason[LARGE_SEC_ALIGN]) {
+		snprintf(temp_buf, TEMP_BUF_SIZE,
+			 "%s: %d times: Large sector alignment\n",
+			mmc_hostname(card->host),
+			pack_stats->pack_stop_reason[LARGE_SEC_ALIGN]);
+		strlcat(temp_ubuf, temp_buf, tubuf_cnt);
+	}
+	if (pack_stats->pack_stop_reason[RANDOM]) {
+		snprintf(temp_buf, TEMP_BUF_SIZE,
+			 "%s: %d times: random request\n",
+			mmc_hostname(card->host),
+			pack_stats->pack_stop_reason[RANDOM]);
+		strlcat(temp_ubuf, temp_buf, tubuf_cnt);
+	}
+	if (pack_stats->pack_stop_reason[FUA]) {
+		snprintf(temp_buf, TEMP_BUF_SIZE,
+			 "%s: %d times: fua request\n",
+			mmc_hostname(card->host),
+			pack_stats->pack_stop_reason[FUA]);
+		strlcat(temp_ubuf, temp_buf, tubuf_cnt);
+	}
+	if (strlen_user(ubuf) < cnt - strlen(temp_ubuf))
+		ret = copy_to_user((ubuf + strlen_user(ubuf)),
+				temp_ubuf, tubuf_cnt);
+	else
+		ret = -EFAULT;
+	if (ret)
+		pr_err("%s: %s: Copy to userspace failed: %s\n",
+				mmc_hostname(card->host), __func__, ubuf);
+
+	spin_unlock(&pack_stats->lock);
+
+	kfree(temp_ubuf);
+
+cleanup:
+	kfree(temp_buf);
+
+	pr_info("%s", ubuf);
+
+exit:
+	if (card->wr_pack_stats.print_in_read == 1) {
+		card->wr_pack_stats.print_in_read = 0;
+		return strnlen(ubuf, cnt);
+	}
+
+	return 0;
+}
+
+static ssize_t mmc_wr_pack_stats_write(struct file *filp,
+				       const char __user *ubuf, size_t cnt,
+				       loff_t *ppos)
+{
+	struct mmc_card *card = filp->private_data;
+	int value;
+
+	if (!card)
+		return cnt;
+
+	if (!access_ok(VERIFY_READ, ubuf, cnt))
+		return cnt;
+
+	sscanf(ubuf, "%d", &value);
+	if (value) {
+		mmc_blk_init_packed_statistics(card);
+	} else {
+		spin_lock(&card->wr_pack_stats.lock);
+		card->wr_pack_stats.enabled = false;
+		spin_unlock(&card->wr_pack_stats.lock);
+	}
+
+	return cnt;
+}
+
+static const struct file_operations mmc_dbg_wr_pack_stats_fops = {
+	.open		= mmc_wr_pack_stats_open,
+	.read		= mmc_wr_pack_stats_read,
+	.write		= mmc_wr_pack_stats_write,
+};
+
+static int mmc_bkops_stats_read(struct seq_file *file, void *data)
+{
+	struct mmc_card *card = file->private;
+	struct mmc_bkops_stats *stats;
+	int i;
+
+	if (!card)
+		return -EINVAL;
+
+	stats = &card->bkops.stats;
+
+	if (!stats->enabled) {
+		pr_info("%s: bkops statistics are disabled\n",
+			 mmc_hostname(card->host));
+		goto exit;
+	}
+
+	spin_lock(&stats->lock);
+
+	seq_printf(file, "%s: bkops statistics:\n",
+			mmc_hostname(card->host));
+	seq_printf(file, "%s: BKOPS: sent START_BKOPS to device: %u\n",
+			mmc_hostname(card->host), stats->manual_start);
+	seq_printf(file, "%s: BKOPS: stopped due to HPI: %u\n",
+			mmc_hostname(card->host), stats->hpi);
+	seq_printf(file, "%s: BKOPS: sent AUTO_EN set to 1: %u\n",
+			mmc_hostname(card->host), stats->auto_start);
+	seq_printf(file, "%s: BKOPS: sent AUTO_EN set to 0: %u\n",
+			mmc_hostname(card->host), stats->auto_stop);
+
+	for (i = 0 ; i < MMC_BKOPS_NUM_SEVERITY_LEVELS ; ++i)
+		seq_printf(file, "%s: BKOPS: due to level %d: %u\n",
+			 mmc_hostname(card->host), i, stats->level[i]);
+
+	spin_unlock(&stats->lock);
+
+exit:
+
+	return 0;
+}
+
+static ssize_t mmc_bkops_stats_write(struct file *filp,
+				      const char __user *ubuf, size_t cnt,
+				      loff_t *ppos)
+{
+	struct mmc_card *card = filp->f_mapping->host->i_private;
+	int value;
+	struct mmc_bkops_stats *stats;
+	int err;
+
+	if (!card)
+		return cnt;
+
+	stats = &card->bkops.stats;
+
+	err = kstrtoint_from_user(ubuf, cnt, 0, &value);
+	if (err) {
+		pr_err("%s: %s: error parsing input from user (%d)\n",
+				mmc_hostname(card->host), __func__, err);
+		return err;
+	}
+	if (value) {
+		mmc_blk_init_bkops_statistics(card);
+	} else {
+		spin_lock(&stats->lock);
+		stats->enabled = false;
+		spin_unlock(&stats->lock);
+	}
+
+	return cnt;
+}
+
+static int mmc_bkops_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mmc_bkops_stats_read, inode->i_private);
+}
+
+static const struct file_operations mmc_dbg_bkops_stats_fops = {
+	.open		= mmc_bkops_stats_open,
+	.read		= seq_read,
+	.write		= mmc_bkops_stats_write,
+};
+
 void mmc_add_card_debugfs(struct mmc_card *card)
 {
 	struct mmc_host	*host = card->host;
@@ -385,6 +898,19 @@
 					&mmc_dbg_ext_csd_fops))
 			goto err;
 
+	if (mmc_card_mmc(card) && (card->ext_csd.rev >= 6) &&
+	    (card->host->caps2 & MMC_CAP2_PACKED_WR))
+		if (!debugfs_create_file("wr_pack_stats", S_IRUSR, root, card,
+					 &mmc_dbg_wr_pack_stats_fops))
+			goto err;
+
+	if (mmc_card_mmc(card) && (card->ext_csd.rev >= 5) &&
+	    (mmc_card_configured_auto_bkops(card) ||
+	     mmc_card_configured_manual_bkops(card)))
+		if (!debugfs_create_file("bkops_stats", S_IRUSR, root, card,
+					 &mmc_dbg_bkops_stats_fops))
+			goto err;
+
 	return;
 
 err:
diff -ruw linux-4.4.115/drivers/mmc/core/host.c linux-4.4.115-fbx/drivers/mmc/core/host.c
--- linux-4.4.115/drivers/mmc/core/host.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mmc/core/host.c	2019-10-29 09:26:24.057207230 +0100
@@ -4,6 +4,7 @@
  *  Copyright (C) 2003 Russell King, All Rights Reserved.
  *  Copyright (C) 2007-2008 Pierre Ossman
  *  Copyright (C) 2010 Linus Walleij
+ *  Copyright (c) 2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -25,6 +26,8 @@
 
 #include <linux/mmc/host.h>
 #include <linux/mmc/card.h>
+#include <linux/mmc/ring_buffer.h>
+
 #include <linux/mmc/slot-gpio.h>
 
 #include "core.h"
@@ -33,6 +36,9 @@
 #include "pwrseq.h"
 
 #define cls_dev_to_mmc_host(d)	container_of(d, struct mmc_host, class_dev)
+#define MMC_DEVFRQ_DEFAULT_UP_THRESHOLD 35
+#define MMC_DEVFRQ_DEFAULT_DOWN_THRESHOLD 5
+#define MMC_DEVFRQ_DEFAULT_POLLING_MSEC 100
 
 static DEFINE_IDR(mmc_host_idr);
 static DEFINE_SPINLOCK(mmc_host_lock);
@@ -46,9 +52,28 @@
 	kfree(host);
 }
 
+static int mmc_host_prepare(struct device *dev)
+{
+	/*
+	 * Since mmc_host is a virtual device, we don't have to do anything.
+	 * If we return a positive value, the pm framework will consider that
+	 * the runtime suspend and system suspend of this device is same and
+	 * will set direct_complete flag as true. We don't want this as the
+	 * mmc_host always has positive disable_depth and setting the flag
+	 * will not speed up the suspend process.
+	 * So return 0.
+	 */
+	return 0;
+}
+
+static const struct dev_pm_ops mmc_pm_ops = {
+	.prepare = mmc_host_prepare,
+};
+
 static struct class mmc_host_class = {
 	.name		= "mmc_host",
 	.dev_release	= mmc_host_classdev_release,
+	.pm		= &mmc_pm_ops,
 };
 
 int mmc_register_host_class(void)
@@ -61,6 +86,259 @@
 	class_unregister(&mmc_host_class);
 }
 
+#ifdef CONFIG_MMC_CLKGATE
+static ssize_t clkgate_delay_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+	return snprintf(buf, PAGE_SIZE, "%lu\n", host->clkgate_delay);
+}
+
+static ssize_t clkgate_delay_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+	unsigned long flags, value;
+
+	if (kstrtoul(buf, 0, &value))
+		return -EINVAL;
+
+	spin_lock_irqsave(&host->clk_lock, flags);
+	host->clkgate_delay = value;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	return count;
+}
+
+/*
+ * Enabling clock gating will make the core call out to the host
+ * once up and once down when it performs a request or card operation
+ * intermingled in any fashion. The driver will see this through
+ * set_ios() operations with ios.clock field set to 0 to gate (disable)
+ * the block clock, and to the old frequency to enable it again.
+ */
+static void mmc_host_clk_gate_delayed(struct mmc_host *host)
+{
+	unsigned long tick_ns;
+	unsigned long freq = host->ios.clock;
+	unsigned long flags;
+
+	if (!freq) {
+		pr_debug("%s: frequency set to 0 in disable function, "
+			 "this means the clock is already disabled.\n",
+			 mmc_hostname(host));
+		return;
+	}
+	/*
+	 * New requests may have appeared while we were scheduling,
+	 * then there is no reason to delay the check before
+	 * clk_disable().
+	 */
+	spin_lock_irqsave(&host->clk_lock, flags);
+
+	/*
+	 * Delay n bus cycles (at least 8 from MMC spec) before attempting
+	 * to disable the MCI block clock. The reference count may have
+	 * gone up again after this delay due to rescheduling!
+	 */
+	if (!host->clk_requests) {
+		spin_unlock_irqrestore(&host->clk_lock, flags);
+		tick_ns = DIV_ROUND_UP(1000000000, freq);
+		ndelay(host->clk_delay * tick_ns);
+	} else {
+		/* New users appeared while waiting for this work */
+		spin_unlock_irqrestore(&host->clk_lock, flags);
+		return;
+	}
+	mutex_lock(&host->clk_gate_mutex);
+	spin_lock_irqsave(&host->clk_lock, flags);
+	if (!host->clk_requests) {
+		spin_unlock_irqrestore(&host->clk_lock, flags);
+		/* This will set host->ios.clock to 0 */
+		mmc_gate_clock(host);
+		spin_lock_irqsave(&host->clk_lock, flags);
+		pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
+	}
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	mutex_unlock(&host->clk_gate_mutex);
+}
+
+/*
+ * Internal work. Work to disable the clock at some later point.
+ */
+static void mmc_host_clk_gate_work(struct work_struct *work)
+{
+	struct mmc_host *host = container_of(work, struct mmc_host,
+					      clk_gate_work.work);
+
+	mmc_host_clk_gate_delayed(host);
+}
+
+/**
+ *	mmc_host_clk_hold - ungate hardware MCI clocks
+ *	@host: host to ungate.
+ *
+ *	Makes sure the host ios.clock is restored to a non-zero value
+ *	past this call.	Increase clock reference count and ungate clock
+ *	if we're the first user.
+ */
+void mmc_host_clk_hold(struct mmc_host *host)
+{
+	unsigned long flags;
+
+	/* cancel any clock gating work scheduled by mmc_host_clk_release() */
+	cancel_delayed_work_sync(&host->clk_gate_work);
+	mutex_lock(&host->clk_gate_mutex);
+	spin_lock_irqsave(&host->clk_lock, flags);
+	if (host->clk_gated) {
+		spin_unlock_irqrestore(&host->clk_lock, flags);
+		mmc_ungate_clock(host);
+
+		spin_lock_irqsave(&host->clk_lock, flags);
+		pr_debug("%s: ungated MCI clock\n", mmc_hostname(host));
+	}
+	host->clk_requests++;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	mutex_unlock(&host->clk_gate_mutex);
+}
+
+/**
+ *	mmc_host_may_gate_card - check if this card may be gated
+ *	@card: card to check.
+ */
+bool mmc_host_may_gate_card(struct mmc_card *card)
+{
+	/* If there is no card we may gate it */
+	if (!card)
+		return true;
+
+	/*
+	 * SDIO3.0 card allows the clock to be gated off so check if
+	 * that is the case or not.
+	 */
+	if (mmc_card_sdio(card) && card->cccr.async_intr_sup)
+			return true;
+
+	/*
+	 * Don't gate SDIO cards! These need to be clocked at all times
+	 * since they may be independent systems generating interrupts
+	 * and other events. The clock requests counter from the core will
+	 * go down to zero since the core does not need it, but we will not
+	 * gate the clock, because there is somebody out there that may still
+	 * be using it.
+	 */
+	return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING);
+}
+
+/**
+ *	mmc_host_clk_release - gate off hardware MCI clocks
+ *	@host: host to gate.
+ *
+ *	Calls the host driver with ios.clock set to zero as often as possible
+ *	in order to gate off hardware MCI clocks. Decrease clock reference
+ *	count and schedule disabling of clock.
+ */
+void mmc_host_clk_release(struct mmc_host *host)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->clk_lock, flags);
+	host->clk_requests--;
+	if (mmc_host_may_gate_card(host->card) &&
+	    !host->clk_requests)
+		schedule_delayed_work(&host->clk_gate_work,
+				      msecs_to_jiffies(host->clkgate_delay));
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+}
+
+/**
+ *	mmc_host_clk_rate - get current clock frequency setting
+ *	@host: host to get the clock frequency for.
+ *
+ *	Returns current clock frequency regardless of gating.
+ */
+unsigned int mmc_host_clk_rate(struct mmc_host *host)
+{
+	unsigned long freq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->clk_lock, flags);
+	if (host->clk_gated)
+		freq = host->clk_old;
+	else
+		freq = host->ios.clock;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	return freq;
+}
+
+/**
+ *	mmc_host_clk_init - set up clock gating code
+ *	@host: host with potential clock to control
+ */
+static inline void mmc_host_clk_init(struct mmc_host *host)
+{
+	host->clk_requests = 0;
+	/* Hold MCI clock for 8 cycles by default */
+	host->clk_delay = 8;
+	/*
+	 * Default clock gating delay is 0ms to avoid wasting power.
+	 * This value can be tuned by writing into sysfs entry.
+	 */
+	host->clkgate_delay = 0;
+	host->clk_gated = false;
+	INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
+	spin_lock_init(&host->clk_lock);
+	mutex_init(&host->clk_gate_mutex);
+}
+
+/**
+ *	mmc_host_clk_exit - shut down clock gating code
+ *	@host: host with potential clock to control
+ */
+static inline void mmc_host_clk_exit(struct mmc_host *host)
+{
+	/*
+	 * Wait for any outstanding gate and then make sure we're
+	 * ungated before exiting.
+	 */
+	if (cancel_delayed_work_sync(&host->clk_gate_work))
+		mmc_host_clk_gate_delayed(host);
+	if (host->clk_gated)
+		mmc_host_clk_hold(host);
+	/* There should be only one user now */
+	WARN_ON(host->clk_requests > 1);
+}
+
+static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
+{
+	host->clkgate_delay_attr.show = clkgate_delay_show;
+	host->clkgate_delay_attr.store = clkgate_delay_store;
+	sysfs_attr_init(&host->clkgate_delay_attr.attr);
+	host->clkgate_delay_attr.attr.name = "clkgate_delay";
+	host->clkgate_delay_attr.attr.mode = S_IRUGO | S_IWUSR;
+	if (device_create_file(&host->class_dev, &host->clkgate_delay_attr))
+		pr_err("%s: Failed to create clkgate_delay sysfs entry\n",
+				mmc_hostname(host));
+}
+#else
+
+static inline void mmc_host_clk_init(struct mmc_host *host)
+{
+}
+
+static inline void mmc_host_clk_exit(struct mmc_host *host)
+{
+}
+
+static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
+{
+}
+
+bool mmc_host_may_gate_card(struct mmc_card *card)
+{
+	return false;
+}
+#endif
+
 void mmc_retune_enable(struct mmc_host *host)
 {
 	host->can_retune = 1;
@@ -68,6 +346,7 @@
 		mod_timer(&host->retune_timer,
 			  jiffies + host->retune_period * HZ);
 }
+EXPORT_SYMBOL(mmc_retune_enable);
 
 void mmc_retune_disable(struct mmc_host *host)
 {
@@ -76,6 +355,7 @@
 	host->retune_now = 0;
 	host->need_retune = 0;
 }
+EXPORT_SYMBOL(mmc_retune_disable);
 
 void mmc_retune_timer_stop(struct mmc_host *host)
 {
@@ -345,6 +625,8 @@
 		return NULL;
 	}
 
+	mmc_host_clk_init(host);
+
 	spin_lock_init(&host->lock);
 	init_waitqueue_head(&host->wq);
 	INIT_DELAYED_WORK(&host->detect, mmc_rescan);
@@ -369,6 +651,217 @@
 
 EXPORT_SYMBOL(mmc_alloc_host);
 
+static ssize_t show_enable(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+
+	if (!host)
+		return -EINVAL;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", mmc_can_scale_clk(host));
+}
+
+static ssize_t store_enable(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+	unsigned long value;
+
+	if (!host || !host->card || kstrtoul(buf, 0, &value))
+		return -EINVAL;
+
+	mmc_get_card(host->card);
+
+	if (!value) {
+		/* Suspend the clock scaling and mask host capability */
+		if (host->clk_scaling.enable)
+			mmc_suspend_clk_scaling(host);
+		host->caps2 &= ~MMC_CAP2_CLK_SCALE;
+		host->clk_scaling.state = MMC_LOAD_HIGH;
+		/* Set to max. frequency when disabling */
+		mmc_clk_update_freq(host, host->card->clk_scaling_highest,
+					host->clk_scaling.state);
+	} else if (value) {
+		/* Unmask host capability and resume scaling */
+		host->caps2 |= MMC_CAP2_CLK_SCALE;
+		if (!host->clk_scaling.enable)
+			mmc_resume_clk_scaling(host);
+	}
+
+	mmc_put_card(host->card);
+
+	return count;
+}
+
+static ssize_t show_up_threshold(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+
+	if (!host)
+		return -EINVAL;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", host->clk_scaling.upthreshold);
+}
+
+#define MAX_PERCENTAGE	100
+static ssize_t store_up_threshold(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+	unsigned long value;
+
+	if (!host || kstrtoul(buf, 0, &value) || (value > MAX_PERCENTAGE))
+		return -EINVAL;
+
+	host->clk_scaling.upthreshold = value;
+
+	pr_debug("%s: clkscale_up_thresh set to %lu\n",
+			mmc_hostname(host), value);
+	return count;
+}
+
+static ssize_t show_down_threshold(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+
+	if (!host)
+		return -EINVAL;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			host->clk_scaling.downthreshold);
+}
+
+static ssize_t store_down_threshold(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+	unsigned long value;
+
+	if (!host || kstrtoul(buf, 0, &value) || (value > MAX_PERCENTAGE))
+		return -EINVAL;
+
+	host->clk_scaling.downthreshold = value;
+
+	pr_debug("%s: clkscale_down_thresh set to %lu\n",
+			mmc_hostname(host), value);
+	return count;
+}
+
+static ssize_t show_polling(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+
+	if (!host)
+		return -EINVAL;
+
+	return snprintf(buf, PAGE_SIZE, "%lu milliseconds\n",
+			host->clk_scaling.polling_delay_ms);
+}
+
+static ssize_t store_polling(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+	unsigned long value;
+
+	if (!host || kstrtoul(buf, 0, &value))
+		return -EINVAL;
+
+	host->clk_scaling.polling_delay_ms = value;
+
+	pr_debug("%s: clkscale_polling_delay_ms set to %lu\n",
+			mmc_hostname(host), value);
+	return count;
+}
+
+DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
+		show_enable, store_enable);
+DEVICE_ATTR(polling_interval, S_IRUGO | S_IWUSR,
+		show_polling, store_polling);
+DEVICE_ATTR(up_threshold, S_IRUGO | S_IWUSR,
+		show_up_threshold, store_up_threshold);
+DEVICE_ATTR(down_threshold, S_IRUGO | S_IWUSR,
+		show_down_threshold, store_down_threshold);
+
+static struct attribute *clk_scaling_attrs[] = {
+	&dev_attr_enable.attr,
+	&dev_attr_up_threshold.attr,
+	&dev_attr_down_threshold.attr,
+	&dev_attr_polling_interval.attr,
+	NULL,
+};
+
+static struct attribute_group clk_scaling_attr_grp = {
+	.name = "clk_scaling",
+	.attrs = clk_scaling_attrs,
+};
+
+#ifdef CONFIG_MMC_PERF_PROFILING
+static ssize_t
+show_perf(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+	int64_t rtime_drv, wtime_drv;
+	unsigned long rbytes_drv, wbytes_drv, flags;
+
+	spin_lock_irqsave(&host->lock, flags);
+
+	rbytes_drv = host->perf.rbytes_drv;
+	wbytes_drv = host->perf.wbytes_drv;
+
+	rtime_drv = ktime_to_us(host->perf.rtime_drv);
+	wtime_drv = ktime_to_us(host->perf.wtime_drv);
+
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	return snprintf(buf, PAGE_SIZE, "Write performance at driver Level:"
+					"%lu bytes in %lld microseconds\n"
+					"Read performance at driver Level:"
+					"%lu bytes in %lld microseconds\n",
+					wbytes_drv, wtime_drv,
+					rbytes_drv, rtime_drv);
+}
+
+static ssize_t
+set_perf(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+	int64_t value;
+	unsigned long flags;
+
+	sscanf(buf, "%lld", &value);
+	spin_lock_irqsave(&host->lock, flags);
+	if (!value) {
+		memset(&host->perf, 0, sizeof(host->perf));
+		host->perf_enable = false;
+	} else {
+		host->perf_enable = true;
+	}
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	return count;
+}
+
+static DEVICE_ATTR(perf, S_IRUGO | S_IWUSR,
+		show_perf, set_perf);
+
+#endif
+
+static struct attribute *dev_attrs[] = {
+#ifdef CONFIG_MMC_PERF_PROFILING
+	&dev_attr_perf.attr,
+#endif
+	NULL,
+};
+static struct attribute_group dev_attr_grp = {
+	.attrs = dev_attrs,
+};
+
 /**
  *	mmc_add_host - initialise host hardware
  *	@host: mmc host
@@ -390,11 +883,33 @@
 
 	led_trigger_register_simple(dev_name(&host->class_dev), &host->led);
 
+	host->clk_scaling.upthreshold = MMC_DEVFRQ_DEFAULT_UP_THRESHOLD;
+	host->clk_scaling.downthreshold = MMC_DEVFRQ_DEFAULT_DOWN_THRESHOLD;
+	host->clk_scaling.polling_delay_ms = MMC_DEVFRQ_DEFAULT_POLLING_MSEC;
+	host->clk_scaling.skip_clk_scale_freq_update = false;
+
 #ifdef CONFIG_DEBUG_FS
 	mmc_add_host_debugfs(host);
 #endif
+	mmc_host_clk_sysfs_init(host);
+	mmc_trace_init(host);
+
+	err = sysfs_create_group(&host->class_dev.kobj, &clk_scaling_attr_grp);
+	if (err)
+		pr_err("%s: failed to create clk scale sysfs group with err %d\n",
+				__func__, err);
+
+	err = sysfs_create_group(&host->class_dev.kobj, &dev_attr_grp);
+	if (err)
+		pr_err("%s: failed to create sysfs group with err %d\n",
+							 __func__, err);
+
+#ifdef CONFIG_BLOCK
+	mmc_latency_hist_sysfs_init(host);
+#endif
 
 	mmc_start_host(host);
+	if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
 	register_pm_notifier(&host->pm_notify);
 
 	return 0;
@@ -412,16 +927,26 @@
  */
 void mmc_remove_host(struct mmc_host *host)
 {
+	if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
 	unregister_pm_notifier(&host->pm_notify);
+
 	mmc_stop_host(host);
 
 #ifdef CONFIG_DEBUG_FS
 	mmc_remove_host_debugfs(host);
 #endif
+	sysfs_remove_group(&host->parent->kobj, &dev_attr_grp);
+	sysfs_remove_group(&host->class_dev.kobj, &clk_scaling_attr_grp);
+
+#ifdef CONFIG_BLOCK
+	mmc_latency_hist_sysfs_exit(host);
+#endif
 
 	device_del(&host->class_dev);
 
 	led_trigger_unregister_simple(host->led);
+
+	mmc_host_clk_exit(host);
 }
 
 EXPORT_SYMBOL(mmc_remove_host);
diff -ruw linux-4.4.115/drivers/mmc/core/host.h linux-4.4.115-fbx/drivers/mmc/core/host.h
--- linux-4.4.115/drivers/mmc/core/host.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mmc/core/host.h	2019-01-22 16:16:24.767257853 +0100
@@ -12,6 +12,8 @@
 #define _MMC_CORE_HOST_H
 #include <linux/mmc/host.h>
 
+#define cls_dev_to_mmc_host(d)	container_of(d, struct mmc_host, class_dev)
+
 int mmc_register_host_class(void);
 void mmc_unregister_host_class(void);
 
@@ -21,5 +23,8 @@
 void mmc_retune_release(struct mmc_host *host);
 int mmc_retune(struct mmc_host *host);
 
+void mmc_latency_hist_sysfs_init(struct mmc_host *host);
+void mmc_latency_hist_sysfs_exit(struct mmc_host *host);
+
 #endif
 
diff -ruw linux-4.4.115/drivers/mmc/core/Kconfig linux-4.4.115-fbx/drivers/mmc/core/Kconfig
--- linux-4.4.115/drivers/mmc/core/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mmc/core/Kconfig	2019-01-22 16:16:24.767257853 +0100
@@ -1,3 +1,39 @@
 #
 # MMC core configuration
 #
+
+config MMC_RING_BUFFER
+	bool "MMC_RING_BUFFER"
+	depends on MMC
+	default n
+	help
+	  This enables the ring buffer tracing of significant
+	  events for mmc driver to provide command history for
+	  debugging purpose.
+
+	  If unsure, say N.
+
+config MMC_EMBEDDED_SDIO
+	boolean "MMC embedded SDIO device support (EXPERIMENTAL)"
+	help
+	  If you say Y here, support will be added for embedded SDIO
+	  devices which do not contain the necessary enumeration
+	  support in hardware to be properly detected.
+
+config MMC_PARANOID_SD_INIT
+	bool "Enable paranoid SD card initialization (EXPERIMENTAL)"
+	help
+	  If you say Y here, the MMC layer will be extra paranoid
+	  about re-trying SD init requests. This can be a useful
+	  work-around for buggy controllers and hardware. Enable
+	  if you are experiencing issues with SD detection.
+
+config MMC_CLKGATE
+	bool "MMC host clock gating"
+	help
+	  This will attempt to aggressively gate the clock to the MMC card.
+	  This is done to save power due to gating off the logic and bus
+	  noise when the MMC card is not in use. Your host driver has to
+	  support handling this in order for it to be of any use.
+
+	  If unsure, say N.
diff -ruw linux-4.4.115/drivers/mmc/core/Makefile linux-4.4.115-fbx/drivers/mmc/core/Makefile
--- linux-4.4.115/drivers/mmc/core/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mmc/core/Makefile	2019-01-22 16:16:24.767257853 +0100
@@ -10,3 +10,4 @@
 				   quirks.o slot-gpio.o
 mmc_core-$(CONFIG_OF)		+= pwrseq.o pwrseq_simple.o pwrseq_emmc.o
 mmc_core-$(CONFIG_DEBUG_FS)	+= debugfs.o
+obj-$(CONFIG_MMC_RING_BUFFER)	+= ring_buffer.o
diff -ruw linux-4.4.115/drivers/mmc/core/mmc.c linux-4.4.115-fbx/drivers/mmc/core/mmc.c
--- linux-4.4.115/drivers/mmc/core/mmc.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mmc/core/mmc.c	2019-10-29 09:26:24.057207230 +0100
@@ -19,6 +19,8 @@
 #include <linux/mmc/host.h>
 #include <linux/mmc/card.h>
 #include <linux/mmc/mmc.h>
+#include <linux/reboot.h>
+#include <trace/events/mmc.h>
 
 #include "core.h"
 #include "host.h"
@@ -59,6 +61,7 @@
 		__res & __mask;						\
 	})
 
+static int mmc_switch_status(struct mmc_card *card, bool ignore_crc);
 /*
  * Given the decoded CSD structure, decode the raw CID to our CID structure.
  */
@@ -124,6 +127,19 @@
 	mmc_init_erase(card);
 }
 
+static const struct mmc_fixup mmc_fixups[] = {
+
+	/* avoid HPI for specific cards */
+	MMC_FIXUP_EXT_CSD_REV("MMC16G", CID_MANFID_KINGSTON, CID_OEMID_ANY,
+		add_quirk, MMC_QUIRK_BROKEN_HPI, MMC_V4_41),
+
+	/* Disable cache for specific cards */
+	MMC_FIXUP("MMC16G", CID_MANFID_KINGSTON, CID_OEMID_ANY,
+		add_quirk_mmc, MMC_QUIRK_CACHE_DISABLE),
+
+	END_FIXUP
+};
+
 /*
  * Given a 128-bit response, decode to our card CSD structure.
  */
@@ -494,22 +510,41 @@
 			ext_csd[EXT_CSD_PWR_CL_DDR_200_360];
 	}
 
+	/* check whether the eMMC card supports HPI */
+	if ((ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) &&
+		!(card->quirks & MMC_QUIRK_BROKEN_HPI)) {
+		card->ext_csd.hpi = 1;
+		if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
+			card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION;
+		else
+			card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
+		/*
+		 * Indicate the maximum timeout to close
+		 * a command interrupted by HPI
+		 */
+		card->ext_csd.out_of_int_time =
+			ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
+		pr_info("%s: Out-of-interrupt timeout is %d[ms]\n",
+				mmc_hostname(card->host),
+				card->ext_csd.out_of_int_time);
+	}
+
 	if (card->ext_csd.rev >= 5) {
 		/* Adjust production date as per JEDEC JESD84-B451 */
 		if (card->cid.year < 2010)
 			card->cid.year += 16;
 
 		/* check whether the eMMC card supports BKOPS */
-		if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
+		if ((ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) &&
+				card->ext_csd.hpi) {
 			card->ext_csd.bkops = 1;
-			card->ext_csd.man_bkops_en =
-					(ext_csd[EXT_CSD_BKOPS_EN] &
-						EXT_CSD_MANUAL_BKOPS_MASK);
+			card->ext_csd.bkops_en = ext_csd[EXT_CSD_BKOPS_EN];
 			card->ext_csd.raw_bkops_status =
 				ext_csd[EXT_CSD_BKOPS_STATUS];
-			if (!card->ext_csd.man_bkops_en)
-				pr_info("%s: MAN_BKOPS_EN bit is not set\n",
-					mmc_hostname(card->host));
+			if (!card->ext_csd.bkops_en)
+				pr_info("%s: BKOPS_EN equals 0x%x\n",
+					mmc_hostname(card->host),
+					card->ext_csd.bkops_en);
 		}
 
 		/* check whether the eMMC card supports HPI */
@@ -531,6 +566,19 @@
 		card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
 
 		/*
+		 * Some eMMC vendors violate eMMC 5.0 spec and set
+		 * REL_WR_SEC_C register to 0x10 to indicate the
+		 * ability of RPMB throughput improvement thus lead
+		 * to failure when TZ module write data to RPMB
+		 * partition. So check bit[4] of EXT_CSD[166] and
+		 * if it is not set then change value of REL_WR_SEC_C
+		 * to 0x1 directly ignoring value of EXT_CSD[222].
+		 */
+		if (!(card->ext_csd.rel_param &
+					EXT_CSD_WR_REL_PARAM_EN_RPMB_REL_WR))
+			card->ext_csd.rel_sectors = 0x1;
+
+		/*
 		 * RPMB regions are defined in multiples of 128K.
 		 */
 		card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT];
@@ -585,6 +633,46 @@
 		card->ext_csd.data_sector_size = 512;
 	}
 
+	if (card->ext_csd.rev >= 7) {
+		/* Enhance Strobe is supported since v5.1 which rev should be
+		 * 8 but some eMMC devices can support it with rev 7. So handle
+		 * Enhance Strobe here.
+		 */
+		card->ext_csd.strobe_support = ext_csd[EXT_CSD_STROBE_SUPPORT];
+		card->ext_csd.cmdq_support = ext_csd[EXT_CSD_CMDQ_SUPPORT];
+		card->ext_csd.fw_version = ext_csd[EXT_CSD_FIRMWARE_VERSION];
+		pr_info("%s: eMMC FW version: 0x%02x\n",
+			mmc_hostname(card->host),
+			card->ext_csd.fw_version);
+		if (card->ext_csd.cmdq_support) {
+			/*
+			 * Queue Depth = N + 1,
+			 * see JEDEC JESD84-B51 section 7.4.19
+			 */
+			card->ext_csd.cmdq_depth =
+				ext_csd[EXT_CSD_CMDQ_DEPTH] + 1;
+			pr_info("%s: CMDQ supported: depth: %d\n",
+				mmc_hostname(card->host),
+				card->ext_csd.cmdq_depth);
+		}
+		card->ext_csd.barrier_support =
+			ext_csd[EXT_CSD_BARRIER_SUPPORT];
+		card->ext_csd.cache_flush_policy =
+			ext_csd[EXT_CSD_CACHE_FLUSH_POLICY];
+		pr_info("%s: cache barrier support %d flush policy %d\n",
+				mmc_hostname(card->host),
+				card->ext_csd.barrier_support,
+				card->ext_csd.cache_flush_policy);
+		card->ext_csd.enhanced_rpmb_supported =
+			(card->ext_csd.rel_param &
+			 EXT_CSD_WR_REL_PARAM_EN_RPMB_REL_WR);
+	} else {
+		card->ext_csd.cmdq_support = 0;
+		card->ext_csd.cmdq_depth = 0;
+		card->ext_csd.barrier_support = 0;
+		card->ext_csd.cache_flush_policy = 0;
+	}
+
 	/* eMMC v5 or later */
 	if (card->ext_csd.rev >= 7) {
 		memcpy(card->ext_csd.fwrev, &ext_csd[EXT_CSD_FIRMWARE_VERSION],
@@ -592,6 +680,12 @@
 		card->ext_csd.ffu_capable =
 			(ext_csd[EXT_CSD_SUPPORTED_MODE] & 0x1) &&
 			!(ext_csd[EXT_CSD_FW_CONFIG] & 0x1);
+
+		card->ext_csd.pre_eol_info = ext_csd[EXT_CSD_PRE_EOL_INFO];
+		card->ext_csd.device_life_time_est_typ_a =
+			ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_A];
+		card->ext_csd.device_life_time_est_typ_b =
+			ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_B];
 	}
 out:
 	return err;
@@ -599,6 +693,7 @@
 
 static int mmc_read_ext_csd(struct mmc_card *card)
 {
+	struct mmc_host *host = card->host;
 	u8 *ext_csd;
 	int err;
 
@@ -607,6 +702,9 @@
 
 	err = mmc_get_ext_csd(card, &ext_csd);
 	if (err) {
+		pr_err("%s: %s: mmc_get_ext_csd() fails %d\n",
+				mmc_hostname(host), __func__, err);
+
 		/* If the host or the card can't do the switch,
 		 * fail more gracefully. */
 		if ((err != -EINVAL)
@@ -721,11 +819,18 @@
 MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
 MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
+MMC_DEV_ATTR(rev, "0x%x\n", card->ext_csd.rev);
+MMC_DEV_ATTR(pre_eol_info, "%02x\n", card->ext_csd.pre_eol_info);
+MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n",
+	card->ext_csd.device_life_time_est_typ_a,
+	card->ext_csd.device_life_time_est_typ_b);
 MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
 MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
 		card->ext_csd.enhanced_area_offset);
 MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
 MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
+MMC_DEV_ATTR(enhanced_rpmb_supported, "%#x\n",
+		card->ext_csd.enhanced_rpmb_supported);
 MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
 
 static ssize_t mmc_fwrev_show(struct device *dev,
@@ -757,10 +862,14 @@
 	&dev_attr_name.attr,
 	&dev_attr_oemid.attr,
 	&dev_attr_prv.attr,
+	&dev_attr_rev.attr,
+	&dev_attr_pre_eol_info.attr,
+	&dev_attr_life_time.attr,
 	&dev_attr_serial.attr,
 	&dev_attr_enhanced_area_offset.attr,
 	&dev_attr_enhanced_area_size.attr,
 	&dev_attr_raw_rpmb_size_mult.attr,
+	&dev_attr_enhanced_rpmb_supported.attr,
 	&dev_attr_rel_sectors.attr,
 	NULL,
 };
@@ -895,11 +1004,11 @@
  */
 static int mmc_select_bus_width(struct mmc_card *card)
 {
-	static unsigned ext_csd_bits[] = {
+	static const unsigned ext_csd_bits[] = {
 		EXT_CSD_BUS_WIDTH_8,
 		EXT_CSD_BUS_WIDTH_4,
 	};
-	static unsigned bus_widths[] = {
+	static const unsigned bus_widths[] = {
 		MMC_BUS_WIDTH_8,
 		MMC_BUS_WIDTH_4,
 	};
@@ -969,9 +1078,11 @@
 	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 			   EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
 			   card->ext_csd.generic_cmd6_time,
-			   true, true, true);
-	if (!err)
+			   true, false, true);
+	if (!err) {
 		mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
+		err = mmc_switch_status(card, false);
+	}
 
 	return err;
 }
@@ -995,10 +1106,11 @@
 	ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
 		EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
 
-	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 			EXT_CSD_BUS_WIDTH,
 			ext_csd_bits,
-			card->ext_csd.generic_cmd6_time);
+			card->ext_csd.generic_cmd6_time,
+			true, false, false);
 	if (err) {
 		pr_err("%s: switch to bus width %d ddr failed\n",
 			mmc_hostname(host), 1 << bus_width);
@@ -1041,19 +1153,21 @@
 	if (err)
 		err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
 
-	if (!err)
+	if (!err) {
 		mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
+		err = mmc_switch_status(card, false);
+	}
 
 	return err;
 }
 
 /* Caller must hold re-tuning */
-static int mmc_switch_status(struct mmc_card *card)
+static int mmc_switch_status(struct mmc_card *card, bool ignore_crc)
 {
 	u32 status;
 	int err;
 
-	err = mmc_send_status(card, &status);
+	err = __mmc_send_status(card, &status, ignore_crc);
 	if (err)
 		return err;
 
@@ -1071,17 +1185,32 @@
 	/*
 	 * HS400 mode requires 8-bit bus width
 	 */
+	if (card->ext_csd.strobe_support) {
+		if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
+		    host->caps & MMC_CAP_8_BIT_DATA))
+			return 0;
+
+		/* For Enhance Strobe flow. For non Enhance Strobe, signal
+		 * voltage will not be set.
+		 */
+		if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V)
+			err = __mmc_set_signal_voltage(host,
+					MMC_SIGNAL_VOLTAGE_120);
+
+		if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_8V)
+			err = __mmc_set_signal_voltage(host,
+					MMC_SIGNAL_VOLTAGE_180);
+		if (err)
+			return err;
+	} else {
 	if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
 	      host->ios.bus_width == MMC_BUS_WIDTH_8))
 		return 0;
+	}
 
 	if (host->caps & MMC_CAP_WAIT_WHILE_BUSY)
 		send_status = false;
 
-	/* Reduce frequency to HS frequency */
-	max_dtr = card->ext_csd.hs_max_dtr;
-	mmc_set_clock(host, max_dtr);
-
 	/* Switch card to HS mode */
 	val = EXT_CSD_TIMING_HS;
 	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -1097,16 +1226,28 @@
 	/* Set host controller to HS timing */
 	mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
 
+	/* Reduce frequency to HS frequency */
+	max_dtr = card->ext_csd.hs_max_dtr;
+	mmc_set_clock(host, max_dtr);
+
 	if (!send_status) {
-		err = mmc_switch_status(card);
+		err = mmc_switch_status(card, false);
 		if (err)
 			goto out_err;
 	}
 
+	val = EXT_CSD_DDR_BUS_WIDTH_8;
+	if (card->ext_csd.strobe_support) {
+		err = mmc_select_bus_width(card);
+		if (IS_ERR_VALUE(err))
+			return err;
+		val |= EXT_CSD_BUS_WIDTH_STROBE;
+	}
+
 	/* Switch card to DDR */
 	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 			 EXT_CSD_BUS_WIDTH,
-			 EXT_CSD_DDR_BUS_WIDTH_8,
+			 val,
 			 card->ext_csd.generic_cmd6_time);
 	if (err) {
 		pr_err("%s: switch to bus width for hs400 failed, err:%d\n",
@@ -1131,8 +1272,29 @@
 	mmc_set_timing(host, MMC_TIMING_MMC_HS400);
 	mmc_set_bus_speed(card);
 
+	if (card->ext_csd.strobe_support && host->ops->enhanced_strobe) {
+		mmc_host_clk_hold(host);
+		err = host->ops->enhanced_strobe(host);
+		mmc_host_clk_release(host);
+	} else if ((host->caps2 & MMC_CAP2_HS400_POST_TUNING) &&
+			host->ops->execute_tuning) {
+		mmc_host_clk_hold(host);
+		err = host->ops->execute_tuning(host,
+				MMC_SEND_TUNING_BLOCK_HS200);
+		mmc_host_clk_release(host);
+
+		if (err)
+			pr_warn("%s: tuning execution failed\n",
+				mmc_hostname(host));
+	}
+
+	/*
+	 * Sending of CMD13 should be done after the host calibration
+	 * for enhanced_strobe or HS400 mode is completed.
+	 * Otherwise may see CMD13 timeouts or CRC errors.
+	 */
 	if (!send_status) {
-		err = mmc_switch_status(card);
+		err = mmc_switch_status(card, false);
 		if (err)
 			goto out_err;
 	}
@@ -1161,10 +1323,6 @@
 	if (host->caps & MMC_CAP_WAIT_WHILE_BUSY)
 		send_status = false;
 
-	/* Reduce frequency to HS */
-	max_dtr = card->ext_csd.hs_max_dtr;
-	mmc_set_clock(host, max_dtr);
-
 	/* Switch HS400 to HS DDR */
 	val = EXT_CSD_TIMING_HS;
 	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
@@ -1175,8 +1333,12 @@
 
 	mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
 
+	/* Reduce frequency to HS */
+	max_dtr = card->ext_csd.hs_max_dtr;
+	mmc_set_clock(host, max_dtr);
+
 	if (!send_status) {
-		err = mmc_switch_status(card);
+		err = mmc_switch_status(card, false);
 		if (err)
 			goto out_err;
 	}
@@ -1191,7 +1353,7 @@
 	mmc_set_timing(host, MMC_TIMING_MMC_HS);
 
 	if (!send_status) {
-		err = mmc_switch_status(card);
+		err = mmc_switch_status(card, false);
 		if (err)
 			goto out_err;
 	}
@@ -1208,7 +1370,7 @@
 	mmc_set_timing(host, MMC_TIMING_MMC_HS200);
 
 	if (!send_status) {
-		err = mmc_switch_status(card);
+		err = mmc_switch_status(card, false);
 		if (err)
 			goto out_err;
 	}
@@ -1287,7 +1449,12 @@
 		old_timing = host->ios.timing;
 		mmc_set_timing(host, MMC_TIMING_MMC_HS200);
 		if (!send_status) {
-			err = mmc_switch_status(card);
+			/*
+			 * Since after switching to hs200, crc errors might
+			 * occur for commands send before tuning.
+			 * So ignore crc error for cmd13.
+			 */
+			err = mmc_switch_status(card, true);
 			/*
 			 * mmc_select_timing() assumes timing has not changed if
 			 * it is a switch error.
@@ -1303,6 +1470,17 @@
 	return err;
 }
 
+static int mmc_reboot_notify(struct notifier_block *notify_block,
+		unsigned long event, void *unused)
+{
+	struct mmc_card *card = container_of(
+			notify_block, struct mmc_card, reboot_notify);
+
+	card->pon_type = (event != SYS_RESTART) ? MMC_LONG_PON : MMC_SHRT_PON;
+
+	return NOTIFY_OK;
+}
+
 /*
  * Activate High Speed or HS200 mode if supported.
  */
@@ -1313,7 +1491,12 @@
 	if (!mmc_can_ext_csd(card))
 		goto bus_speed;
 
-	if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)
+	/* For Enhance Strobe HS400 flow */
+	if (card->ext_csd.strobe_support &&
+	    card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
+	    card->host->caps & MMC_CAP_8_BIT_DATA)
+		err = mmc_select_hs400(card);
+	else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)
 		err = mmc_select_hs200(card);
 	else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS)
 		err = mmc_select_hs(card);
@@ -1352,12 +1535,242 @@
 	 */
 	if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
 	    host->ios.bus_width == MMC_BUS_WIDTH_8)
-		if (host->ops->prepare_hs400_tuning)
-			host->ops->prepare_hs400_tuning(host, &host->ios);
+		mmc_set_timing(host, MMC_TIMING_MMC_HS400);
 
 	return mmc_execute_tuning(card);
 }
 
+static int mmc_select_cmdq(struct mmc_card *card)
+{
+	struct mmc_host *host = card->host;
+	int ret = 0;
+
+	if (!host->cmdq_ops) {
+		pr_err("%s: host controller doesn't support CMDQ\n",
+		       mmc_hostname(host));
+		return 0;
+	}
+
+	ret = mmc_set_blocklen(card, MMC_CARD_CMDQ_BLK_SIZE);
+	if (ret)
+		goto out;
+
+	ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ, 1,
+			 card->ext_csd.generic_cmd6_time);
+	if (ret)
+		goto out;
+
+	mmc_card_set_cmdq(card);
+	mmc_host_clk_hold(card->host);
+	ret = host->cmdq_ops->enable(card->host);
+	if (ret) {
+		mmc_host_clk_release(card->host);
+		pr_err("%s: failed (%d) enabling CMDQ on host\n",
+			mmc_hostname(host), ret);
+		mmc_card_clr_cmdq(card);
+		ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ, 0,
+				 card->ext_csd.generic_cmd6_time);
+		goto out;
+	}
+
+	mmc_host_clk_release(card->host);
+	pr_info_once("%s: CMDQ enabled on card\n", mmc_hostname(host));
+out:
+	return ret;
+}
+
+static int mmc_select_hs_ddr52(struct mmc_host *host)
+{
+	int err;
+
+	mmc_select_hs(host->card);
+	err = mmc_select_bus_width(host->card);
+	if (err < 0) {
+		pr_err("%s: %s: select_bus_width failed(%d)\n",
+			mmc_hostname(host), __func__, err);
+		return err;
+	}
+
+	err = mmc_select_hs_ddr(host->card);
+	mmc_set_clock(host, MMC_HIGH_52_MAX_DTR);
+
+	return err;
+}
+
+/*
+ * Scale down from HS400 to HS in order to allow frequency change.
+ * This is needed for cards that doesn't support changing frequency in HS400
+ */
+static int mmc_scale_low(struct mmc_host *host, unsigned long freq)
+{
+	int err = 0;
+
+	mmc_set_timing(host, MMC_TIMING_LEGACY);
+	mmc_set_clock(host, MMC_HIGH_26_MAX_DTR);
+
+	if (host->clk_scaling.lower_bus_speed_mode &
+	    MMC_SCALING_LOWER_DDR52_MODE) {
+		err = mmc_select_hs_ddr52(host);
+		if (err)
+			pr_err("%s: %s: failed to switch to DDR52: err: %d\n",
+			       mmc_hostname(host), __func__, err);
+		else
+			return err;
+	}
+
+	err = mmc_select_hs(host->card);
+	if (err) {
+		pr_err("%s: %s: scaling low: failed (%d)\n",
+		       mmc_hostname(host), __func__, err);
+		return err;
+	}
+
+	err = mmc_select_bus_width(host->card);
+	if (err < 0) {
+		pr_err("%s: %s: select_bus_width failed(%d)\n",
+			mmc_hostname(host), __func__, err);
+		return err;
+	}
+
+	mmc_set_clock(host, freq);
+
+	return 0;
+}
+
+/*
+ * Scale UP from HS to HS200/H400
+ */
+static int mmc_scale_high(struct mmc_host *host)
+{
+	int err = 0;
+
+	if (mmc_card_ddr52(host->card)) {
+		mmc_set_timing(host, MMC_TIMING_LEGACY);
+		mmc_set_clock(host, MMC_HIGH_26_MAX_DTR);
+	}
+
+	if (!host->card->ext_csd.strobe_support) {
+		if (!(host->card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)) {
+			pr_err("%s: %s: card does not support HS200\n",
+				mmc_hostname(host), __func__);
+			WARN_ON(1);
+			return -EPERM;
+		}
+
+		err = mmc_select_hs200(host->card);
+		if (err) {
+			pr_err("%s: %s: selecting HS200 failed (%d)\n",
+				mmc_hostname(host), __func__, err);
+			return err;
+		}
+
+		mmc_set_bus_speed(host->card);
+
+		err = mmc_hs200_tuning(host->card);
+		if (err) {
+			pr_err("%s: %s: hs200 tuning failed (%d)\n",
+				mmc_hostname(host), __func__, err);
+			return err;
+		}
+
+		if (!(host->card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400)) {
+			pr_debug("%s: card does not support HS400\n",
+				mmc_hostname(host));
+			return 0;
+		}
+	}
+
+	err = mmc_select_hs400(host->card);
+	if (err) {
+		pr_err("%s: %s: select hs400 failed (%d)\n",
+			mmc_hostname(host), __func__, err);
+		return err;
+	}
+
+	return err;
+}
+
+static int mmc_set_clock_bus_speed(struct mmc_card *card, unsigned long freq)
+{
+	int err = 0;
+
+	if (freq == MMC_HS200_MAX_DTR)
+		err = mmc_scale_high(card->host);
+	else
+		err = mmc_scale_low(card->host, freq);
+
+	return err;
+}
+
+static inline unsigned long mmc_ddr_freq_accommodation(unsigned long freq)
+{
+	if (freq == MMC_HIGH_DDR_MAX_DTR)
+		return freq;
+
+	return freq/2;
+}
+
+/**
+ * mmc_change_bus_speed() - Change MMC card bus frequency at runtime
+ * @host: pointer to mmc host structure
+ * @freq: pointer to desired frequency to be set
+ *
+ * Change the MMC card bus frequency at runtime after the card is
+ * initialized. Callers are expected to make sure of the card's
+ * state (DATA/RCV/TRANSFER) before changing the frequency at runtime.
+ *
+ * If the frequency to change is greater than max. supported by card,
+ * *freq is changed to max. supported by card. If it is less than min.
+ * supported by host, *freq is changed to min. supported by host.
+ * Host is assumed to be calimed while calling this funciton.
+ */
+static int mmc_change_bus_speed(struct mmc_host *host, unsigned long *freq)
+{
+	int err = 0;
+	struct mmc_card *card;
+	unsigned long actual_freq;
+
+	card = host->card;
+
+	if (!card || !freq) {
+		err = -EINVAL;
+		goto out;
+	}
+	actual_freq = *freq;
+
+	WARN_ON(!host->claimed);
+
+	/*
+	 * For scaling up/down HS400 we'll need special handling,
+	 * for other timings we can simply do clock frequency change
+	 */
+	if (mmc_card_hs400(card) ||
+		(!mmc_card_hs200(host->card) && *freq == MMC_HS200_MAX_DTR)) {
+		err = mmc_set_clock_bus_speed(card, *freq);
+		if (err) {
+			pr_err("%s: %s: failed (%d)to set bus and clock speed (freq=%lu)\n",
+				mmc_hostname(host), __func__, err, *freq);
+			goto out;
+		}
+	} else if (mmc_card_hs200(host->card)) {
+		mmc_set_clock(host, *freq);
+		err = mmc_hs200_tuning(host->card);
+		if (err) {
+			pr_warn("%s: %s: tuning execution failed %d\n",
+				mmc_hostname(card->host),
+				__func__, err);
+			mmc_set_clock(host, host->clk_scaling.curr_freq);
+		}
+	} else {
+		if (mmc_card_ddr52(host->card))
+			actual_freq = mmc_ddr_freq_accommodation(*freq);
+		mmc_set_clock(host, actual_freq);
+	}
+
+out:
+	return err;
+}
+
 /*
  * Handle the detection and initialisation of a card.
  *
@@ -1386,21 +1799,28 @@
 	 * respond.
 	 * mmc_go_idle is needed for eMMC that are asleep
 	 */
+reinit:
 	mmc_go_idle(host);
 
 	/* The extra bit indicates that we support high capacity */
 	err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
-	if (err)
+	if (err) {
+		pr_err("%s: %s: mmc_send_op_cond() fails %d\n",
+				mmc_hostname(host), __func__, err);
 		goto err;
+	}
 
 	/*
 	 * For SPI, enable CRC as appropriate.
 	 */
 	if (mmc_host_is_spi(host)) {
 		err = mmc_spi_set_crc(host, use_spi_crc);
-		if (err)
+		if (err) {
+			pr_err("%s: %s: mmc_spi_set_crc() fails %d\n",
+					mmc_hostname(host), __func__, err);
 			goto err;
 	}
+	}
 
 	/*
 	 * Fetch CID from card.
@@ -1409,12 +1829,17 @@
 		err = mmc_send_cid(host, cid);
 	else
 		err = mmc_all_send_cid(host, cid);
-	if (err)
+	if (err) {
+		pr_err("%s: %s: mmc_send_cid() fails %d\n",
+				mmc_hostname(host), __func__, err);
 		goto err;
+	}
 
 	if (oldcard) {
 		if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
 			err = -ENOENT;
+			pr_err("%s: %s: CID memcmp failed %d\n",
+					mmc_hostname(host), __func__, err);
 			goto err;
 		}
 
@@ -1426,6 +1851,8 @@
 		card = mmc_alloc_card(host, &mmc_type);
 		if (IS_ERR(card)) {
 			err = PTR_ERR(card);
+			pr_err("%s: %s: no memory to allocate for card %d\n",
+					mmc_hostname(host), __func__, err);
 			goto err;
 		}
 
@@ -1433,6 +1860,8 @@
 		card->type = MMC_TYPE_MMC;
 		card->rca = 1;
 		memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
+		host->card = card;
+		card->reboot_notify.notifier_call = mmc_reboot_notify;
 	}
 
 	/*
@@ -1446,8 +1875,11 @@
 	 */
 	if (!mmc_host_is_spi(host)) {
 		err = mmc_set_relative_addr(card);
-		if (err)
+		if (err) {
+			pr_err("%s: %s: mmc_set_relative_addr() fails %d\n",
+					mmc_hostname(host), __func__, err);
 			goto free_card;
+		}
 
 		mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
 	}
@@ -1457,16 +1889,25 @@
 		 * Fetch CSD from card.
 		 */
 		err = mmc_send_csd(card, card->raw_csd);
-		if (err)
+		if (err) {
+			pr_err("%s: %s: mmc_send_csd() fails %d\n",
+					mmc_hostname(host), __func__, err);
 			goto free_card;
+		}
 
 		err = mmc_decode_csd(card);
-		if (err)
+		if (err) {
+			pr_err("%s: %s: mmc_decode_csd() fails %d\n",
+					mmc_hostname(host), __func__, err);
 			goto free_card;
+		}
 		err = mmc_decode_cid(card);
-		if (err)
+		if (err) {
+			pr_err("%s: %s: mmc_decode_cid() fails %d\n",
+					mmc_hostname(host), __func__, err);
 			goto free_card;
 	}
+	}
 
 	/*
 	 * handling only for cards supporting DSR and hosts requesting
@@ -1480,15 +1921,21 @@
 	 */
 	if (!mmc_host_is_spi(host)) {
 		err = mmc_select_card(card);
-		if (err)
+		if (err) {
+			pr_err("%s: %s: mmc_select_card() fails %d\n",
+					mmc_hostname(host), __func__, err);
 			goto free_card;
 	}
+	}
 
 	if (!oldcard) {
 		/* Read extended CSD. */
 		err = mmc_read_ext_csd(card);
-		if (err)
+		if (err) {
+			pr_err("%s: %s: mmc_read_ext_csd() fails %d\n",
+					mmc_hostname(host), __func__, err);
 			goto free_card;
+		}
 
 		/* If doing byte addressing, check if required to do sector
 		 * addressing.  Handle the case of <2GB cards needing sector
@@ -1500,6 +1947,9 @@
 
 		/* Erase size depends on CSD and Extended CSD */
 		mmc_set_erase_size(card);
+
+		if (card->ext_csd.sectors && (rocr & MMC_CARD_SECTOR_ADDR))
+			mmc_card_set_blockaddr(card);
 	}
 
 	/*
@@ -1512,8 +1962,11 @@
 				 EXT_CSD_ERASE_GROUP_DEF, 1,
 				 card->ext_csd.generic_cmd6_time);
 
-		if (err && err != -EBADMSG)
+		if (err && err != -EBADMSG) {
+			pr_err("%s: %s: mmc_switch() for ERASE_GRP_DEF fails %d\n",
+					mmc_hostname(host), __func__, err);
 			goto free_card;
+		}
 
 		if (err) {
 			err = 0;
@@ -1543,9 +1996,14 @@
 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG,
 				 card->ext_csd.part_config,
 				 card->ext_csd.part_time);
-		if (err && err != -EBADMSG)
+		if (err && err != -EBADMSG) {
+			pr_err("%s: %s: mmc_switch() for PART_CONFIG fails %d\n",
+					mmc_hostname(host), __func__, err);
 			goto free_card;
 	}
+		card->part_curr = card->ext_csd.part_config &
+				  EXT_CSD_PART_CONFIG_ACC_MASK;
+	}
 
 	/*
 	 * Enable power_off_notification byte in the ext_csd register
@@ -1555,8 +2013,11 @@
 				 EXT_CSD_POWER_OFF_NOTIFICATION,
 				 EXT_CSD_POWER_ON,
 				 card->ext_csd.generic_cmd6_time);
-		if (err && err != -EBADMSG)
+		if (err && err != -EBADMSG) {
+			pr_err("%s: %s: mmc_switch() for POWER_ON PON fails %d\n",
+					mmc_hostname(host), __func__, err);
 			goto free_card;
+		}
 
 		/*
 		 * The err can be -EBADMSG or 0,
@@ -1570,8 +2031,11 @@
 	 * Select timing interface
 	 */
 	err = mmc_select_timing(card);
-	if (err)
+	if (err) {
+		pr_err("%s: %s: mmc_select_timing() fails %d\n",
+					mmc_hostname(host), __func__, err);
 		goto free_card;
+	}
 
 	if (mmc_card_hs200(card)) {
 		err = mmc_hs200_tuning(card);
@@ -1581,7 +2045,7 @@
 		err = mmc_select_hs400(card);
 		if (err)
 			goto free_card;
-	} else {
+	} else if (!mmc_card_hs400(card)) {
 		/* Select the desired bus width optionally */
 		err = mmc_select_bus_width(card);
 		if (!IS_ERR_VALUE(err) && mmc_card_hs(card)) {
@@ -1591,6 +2055,16 @@
 		}
 	}
 
+	card->clk_scaling_lowest = host->f_min;
+	if ((card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400) ||
+			(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200))
+		card->clk_scaling_highest = card->ext_csd.hs200_max_dtr;
+	else if ((card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS) ||
+			(card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52))
+		card->clk_scaling_highest = card->ext_csd.hs_max_dtr;
+	else
+		card->clk_scaling_highest = card->csd.max_dtr;
+
 	/*
 	 * Choose the power class with selected bus interface
 	 */
@@ -1603,8 +2077,11 @@
 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 				EXT_CSD_HPI_MGMT, 1,
 				card->ext_csd.generic_cmd6_time);
-		if (err && err != -EBADMSG)
+		if (err && err != -EBADMSG) {
+			pr_err("%s: %s: mmc_switch() for HPI_MGMT fails %d\n",
+					mmc_hostname(host), __func__, err);
 			goto free_card;
+		}
 		if (err) {
 			pr_warn("%s: Enabling HPI failed\n",
 				mmc_hostname(card->host));
@@ -1616,13 +2093,19 @@
 	/*
 	 * If cache size is higher than 0, this indicates
 	 * the existence of cache and it can be turned on.
+	 * If HPI is not supported then cache shouldn't be enabled.
 	 */
 	if (card->ext_csd.cache_size > 0) {
+		if (card->ext_csd.hpi_en &&
+			(!(card->quirks & MMC_QUIRK_CACHE_DISABLE))) {
 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 				EXT_CSD_CACHE_CTRL, 1,
 				card->ext_csd.generic_cmd6_time);
-		if (err && err != -EBADMSG)
+			if (err && err != -EBADMSG) {
+				pr_err("%s: %s: fail on CACHE_CTRL ON %d\n",
+					mmc_hostname(host), __func__, err);
 			goto free_card;
+			}
 
 		/*
 		 * Only if no error, cache is turned on successfully.
@@ -1635,6 +2118,45 @@
 		} else {
 			card->ext_csd.cache_ctrl = 1;
 		}
+			/* enable cache barrier if supported by the device */
+			if (card->ext_csd.cache_ctrl &&
+					card->ext_csd.barrier_support) {
+				err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+					EXT_CSD_BARRIER_CTRL, 1,
+					card->ext_csd.generic_cmd6_time);
+				if (err && err != -EBADMSG) {
+					pr_err("%s: %s: mmc_switch() for BARRIER_CTRL fails %d\n",
+						mmc_hostname(host), __func__,
+						err);
+					goto free_card;
+				}
+				if (err) {
+					pr_warn("%s: Barrier is supported but failed to turn on (%d)\n",
+						mmc_hostname(card->host), err);
+					card->ext_csd.barrier_en = 0;
+					err = 0;
+				} else {
+					card->ext_csd.barrier_en = 1;
+				}
+			}
+		} else {
+			/*
+			 * mmc standard doesn't say what is the card default
+			 * value for EXT_CSD_CACHE_CTRL.
+			 * Hence, cache may be enabled by default by
+			 * card vendors.
+			 * Thus, it is best to explicitly disable cache in case
+			 * we want to avoid cache.
+			 */
+			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+					EXT_CSD_CACHE_CTRL, 0,
+					card->ext_csd.generic_cmd6_time);
+			if (err) {
+				pr_err("%s: %s: fail on CACHE_CTRL OFF %d\n",
+					mmc_hostname(host), __func__, err);
+				goto free_card;
+			}
+		}
 	}
 
 	/*
@@ -1648,8 +2170,11 @@
 				EXT_CSD_EXP_EVENTS_CTRL,
 				EXT_CSD_PACKED_EVENT_EN,
 				card->ext_csd.generic_cmd6_time);
-		if (err && err != -EBADMSG)
+		if (err && err != -EBADMSG) {
+			pr_err("%s: %s: mmc_switch() for EXP_EVENTS_CTRL fails %d\n",
+					mmc_hostname(host), __func__, err);
 			goto free_card;
+		}
 		if (err) {
 			pr_warn("%s: Enabling packed event failed\n",
 				mmc_hostname(card->host));
@@ -1658,41 +2183,124 @@
 		} else {
 			card->ext_csd.packed_event_en = 1;
 		}
+
 	}
 
-	if (!oldcard)
-		host->card = card;
+	if (!oldcard) {
+		if ((host->caps2 & MMC_CAP2_PACKED_CMD) &&
+		    (card->ext_csd.max_packed_writes > 0)) {
+			/*
+			 * We would like to keep the statistics in an index
+			 * that equals the num of packed requests
+			 * (1 to max_packed_writes)
+			 */
+			card->wr_pack_stats.packing_events = kzalloc(
+				(card->ext_csd.max_packed_writes + 1) *
+				sizeof(*card->wr_pack_stats.packing_events),
+				GFP_KERNEL);
+			if (!card->wr_pack_stats.packing_events) {
+				pr_err("%s: %s: no memory for packing events\n",
+						mmc_hostname(host), __func__);
+				goto free_card;
+			}
+		}
+	}
+
+	/*
+	 * Start auto bkops, if supported.
+	 *
+	 * Note: This leaves the possibility of having both manual and
+	 * auto bkops running in parallel. The runtime implementation
+	 * will allow this, but ignore bkops exceptions on the premises
+	 * that auto bkops will eventually kick in and the device will
+	 * handle bkops without START_BKOPS from the host.
+	 */
+	if (mmc_card_support_auto_bkops(card)) {
+		/*
+		 * Ignore the return value of setting auto bkops.
+		 * If it failed, will run in backward compatible mode.
+		 */
+		(void)mmc_set_auto_bkops(card, true);
+	}
+
+	if (card->ext_csd.cmdq_support && (card->host->caps2 &
+					   MMC_CAP2_CMD_QUEUE)) {
+		err = mmc_select_cmdq(card);
+		if (err) {
+			pr_err("%s: selecting CMDQ mode: failed: %d\n",
+					   mmc_hostname(card->host), err);
+			card->ext_csd.cmdq_support = 0;
+			oldcard = card;
+			goto reinit;
+		}
+	}
 
 	return 0;
 
 free_card:
-	if (!oldcard)
+	if (!oldcard) {
+		host->card = NULL;
 		mmc_remove_card(card);
+	}
 err:
 	return err;
 }
 
-static int mmc_can_sleep(struct mmc_card *card)
+static int mmc_can_sleepawake(struct mmc_host *host)
 {
-	return (card && card->ext_csd.rev >= 3);
+	return host && (host->caps2 & MMC_CAP2_SLEEP_AWAKE) && host->card &&
+		(host->card->ext_csd.rev >= 3);
 }
 
-static int mmc_sleep(struct mmc_host *host)
+static int mmc_sleepawake(struct mmc_host *host, bool sleep)
 {
 	struct mmc_command cmd = {0};
 	struct mmc_card *card = host->card;
-	unsigned int timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
+	unsigned int timeout_ms;
 	int err;
 
+	if (!card) {
+		pr_err("%s: %s: invalid card\n", mmc_hostname(host), __func__);
+		return -EINVAL;
+	}
+
+	timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
+	if (card->ext_csd.rev >= 3 &&
+		card->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB) {
+		u8 part_config = card->ext_csd.part_config;
+
+		/*
+		 * If the last access before suspend is RPMB access, then
+		 * switch to default part config so that sleep command CMD5
+		 * and deselect CMD7 can be sent to the card.
+		 */
+		part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
+		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+				 EXT_CSD_PART_CONFIG,
+				 part_config,
+				 card->ext_csd.part_time);
+		if (err) {
+			pr_err("%s: %s: failed to switch to default part config %x\n",
+				mmc_hostname(host), __func__, part_config);
+			return err;
+		}
+		card->ext_csd.part_config = part_config;
+		card->part_curr = card->ext_csd.part_config &
+				  EXT_CSD_PART_CONFIG_ACC_MASK;
+	}
+
 	/* Re-tuning can't be done once the card is deselected */
 	mmc_retune_hold(host);
 
+	if (sleep) {
 	err = mmc_deselect_cards(host);
 	if (err)
 		goto out_release;
+	}
 
 	cmd.opcode = MMC_SLEEP_AWAKE;
 	cmd.arg = card->rca << 16;
+	if (sleep)
 	cmd.arg |= 1 << 15;
 
 	/*
@@ -1721,6 +2329,9 @@
 	if (!cmd.busy_timeout || !(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
 		mmc_delay(timeout_ms);
 
+	if (!sleep)
+		err = mmc_select_card(card);
+
 out_release:
 	mmc_retune_release(host);
 	return err;
@@ -1755,6 +2366,27 @@
 	return err;
 }
 
+int mmc_send_pon(struct mmc_card *card)
+{
+	int err = 0;
+	struct mmc_host *host = card->host;
+
+	if (!mmc_can_poweroff_notify(card))
+		goto out;
+
+	mmc_get_card(card);
+	if (card->pon_type & MMC_LONG_PON)
+		err = mmc_poweroff_notify(host->card, EXT_CSD_POWER_OFF_LONG);
+	else if (card->pon_type & MMC_SHRT_PON)
+		err = mmc_poweroff_notify(host->card, EXT_CSD_POWER_OFF_SHORT);
+	if (err)
+		pr_warn("%s: error %d sending PON type %u",
+			mmc_hostname(host), err, card->pon_type);
+	mmc_put_card(card);
+out:
+	return err;
+}
+
 /*
  * Host is being removed. Free up the current card.
  */
@@ -1763,8 +2395,14 @@
 	BUG_ON(!host);
 	BUG_ON(!host->card);
 
+	unregister_reboot_notifier(&host->card->reboot_notify);
+
+	mmc_exit_clk_scaling(host);
 	mmc_remove_card(host->card);
+
+	mmc_claim_host(host);
 	host->card = NULL;
+	mmc_release_host(host);
 }
 
 /*
@@ -1804,44 +2442,228 @@
 	}
 }
 
+static int mmc_cache_card_ext_csd(struct mmc_host *host)
+{
+	int err;
+	u8 *ext_csd;
+	struct mmc_card *card = host->card;
+
+	err = mmc_get_ext_csd(card, &ext_csd);
+	if (err || !ext_csd) {
+		pr_err("%s: %s: mmc_get_ext_csd failed (%d)\n",
+			mmc_hostname(host), __func__, err);
+		return err;
+	}
+
+	/* only cache read/write fields that the sw changes */
+	card->ext_csd.raw_ext_csd_cmdq = ext_csd[EXT_CSD_CMDQ];
+	card->ext_csd.raw_ext_csd_cache_ctrl = ext_csd[EXT_CSD_CACHE_CTRL];
+	card->ext_csd.raw_ext_csd_bus_width = ext_csd[EXT_CSD_BUS_WIDTH];
+	card->ext_csd.raw_ext_csd_hs_timing = ext_csd[EXT_CSD_HS_TIMING];
+
+	kfree(ext_csd);
+
+	return 0;
+}
+
+static int mmc_test_awake_ext_csd(struct mmc_host *host)
+{
+	int err;
+	u8 *ext_csd;
+	struct mmc_card *card = host->card;
+
+	err = mmc_get_ext_csd(card, &ext_csd);
+	if (err || !ext_csd) {
+		pr_err("%s: %s: mmc_get_ext_csd failed (%d)\n",
+			mmc_hostname(host), __func__, err);
+		return err;
+	}
+
+	/* only compare read/write fields that the sw changes */
+	pr_debug("%s: %s: type(cached:current) cmdq(%d:%d) cache_ctrl(%d:%d) bus_width (%d:%d) timing(%d:%d)\n",
+		mmc_hostname(host), __func__,
+		card->ext_csd.raw_ext_csd_cmdq,
+		ext_csd[EXT_CSD_CMDQ],
+		card->ext_csd.raw_ext_csd_cache_ctrl,
+		ext_csd[EXT_CSD_CACHE_CTRL],
+		card->ext_csd.raw_ext_csd_bus_width,
+		ext_csd[EXT_CSD_BUS_WIDTH],
+		card->ext_csd.raw_ext_csd_hs_timing,
+		ext_csd[EXT_CSD_HS_TIMING]);
+
+	err = !((card->ext_csd.raw_ext_csd_cmdq ==
+			ext_csd[EXT_CSD_CMDQ]) &&
+		(card->ext_csd.raw_ext_csd_cache_ctrl ==
+			ext_csd[EXT_CSD_CACHE_CTRL]) &&
+		(card->ext_csd.raw_ext_csd_bus_width ==
+			ext_csd[EXT_CSD_BUS_WIDTH]) &&
+		(card->ext_csd.raw_ext_csd_hs_timing ==
+			ext_csd[EXT_CSD_HS_TIMING]));
+
+	kfree(ext_csd);
+
+	return err;
+}
+
 static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
 {
-	int err = 0;
-	unsigned int notify_type = is_suspend ? EXT_CSD_POWER_OFF_SHORT :
-					EXT_CSD_POWER_OFF_LONG;
+	int err = 0, ret;
 
 	BUG_ON(!host);
 	BUG_ON(!host->card);
 
+	err = mmc_suspend_clk_scaling(host);
+	if (err) {
+		pr_err("%s: %s: fail to suspend clock scaling (%d)\n",
+			mmc_hostname(host), __func__, err);
+		if (host->card->cmdq_init)
+			wake_up(&host->cmdq_ctx.wait);
+		return err;
+	}
+
 	mmc_claim_host(host);
 
 	if (mmc_card_suspended(host->card))
 		goto out;
 
+	if (host->card->cmdq_init) {
+		BUG_ON(host->cmdq_ctx.active_reqs);
+
+		err = mmc_cmdq_halt(host, true);
+		if (err) {
+			pr_err("%s: halt: failed: %d\n", __func__, err);
+			goto out;
+		}
+		mmc_host_clk_hold(host);
+		host->cmdq_ops->disable(host, true);
+		mmc_host_clk_release(host);
+	}
+
 	if (mmc_card_doing_bkops(host->card)) {
 		err = mmc_stop_bkops(host->card);
 		if (err)
-			goto out;
+			goto out_err;
 	}
 
 	err = mmc_flush_cache(host->card);
 	if (err)
-		goto out;
+		goto out_err;
 
-	if (mmc_can_poweroff_notify(host->card) &&
-		((host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) || !is_suspend))
-		err = mmc_poweroff_notify(host->card, notify_type);
-	else if (mmc_can_sleep(host->card))
-		err = mmc_sleep(host);
-	else if (!mmc_host_is_spi(host))
+	if (mmc_can_sleepawake(host)) {
+		/*
+		 * For caching host->ios to cached_ios we need to
+		 * make sure that clocks are not gated otherwise
+		 * cached_ios->clock will be 0.
+		 */
+		mmc_host_clk_hold(host);
+		memcpy(&host->cached_ios, &host->ios, sizeof(host->cached_ios));
+		mmc_cache_card_ext_csd(host);
+		err = mmc_sleepawake(host, true);
+		mmc_host_clk_release(host);
+	} else if (!mmc_host_is_spi(host)) {
 		err = mmc_deselect_cards(host);
+	}
 
-	if (!err) {
+	if (err)
+		goto out_err;
 		mmc_power_off(host);
 		mmc_card_set_suspended(host->card);
+
+	goto out;
+
+out_err:
+	/*
+	 * In case of err let's put controller back in cmdq mode and unhalt
+	 * the controller.
+	 * We expect cmdq_enable and unhalt won't return any error
+	 * since it is anyway enabling few registers.
+	 */
+	if (host->card->cmdq_init) {
+		mmc_host_clk_hold(host);
+		ret = host->cmdq_ops->enable(host);
+		if (ret)
+			pr_err("%s: %s: enabling CMDQ mode failed (%d)\n",
+				mmc_hostname(host), __func__, ret);
+		mmc_host_clk_release(host);
+		mmc_cmdq_halt(host, false);
 	}
+
 out:
+	/* Kick CMDQ thread to process any requests came in while suspending */
+	if (host->card->cmdq_init)
+		wake_up(&host->cmdq_ctx.wait);
+
 	mmc_release_host(host);
+	if (err)
+		mmc_resume_clk_scaling(host);
+	return err;
+}
+
+static int mmc_partial_init(struct mmc_host *host)
+{
+	int err = 0;
+	struct mmc_card *card = host->card;
+
+	pr_debug("%s: %s: starting partial init\n",
+		mmc_hostname(host), __func__);
+
+	mmc_set_bus_width(host, host->cached_ios.bus_width);
+	mmc_set_timing(host, host->cached_ios.timing);
+	mmc_set_clock(host, host->cached_ios.clock);
+	mmc_set_bus_mode(host, host->cached_ios.bus_mode);
+
+	mmc_host_clk_hold(host);
+
+	if (mmc_card_hs400(card)) {
+		if (card->ext_csd.strobe_support && host->ops->enhanced_strobe)
+			err = host->ops->enhanced_strobe(host);
+		else if (host->ops->execute_tuning)
+			err = host->ops->execute_tuning(host,
+				MMC_SEND_TUNING_BLOCK_HS200);
+	} else if (mmc_card_hs200(card) && host->ops->execute_tuning) {
+		err = host->ops->execute_tuning(host,
+			MMC_SEND_TUNING_BLOCK_HS200);
+		if (err)
+			pr_warn("%s: %s: tuning execution failed (%d)\n",
+				mmc_hostname(host), __func__, err);
+	}
+
+	/*
+	 * The ext_csd is read to make sure the card did not went through
+	 * Power-failure during sleep period.
+	 * A subset of the W/E_P, W/C_P register will be tested. In case
+	 * these registers values are different from the values that were
+	 * cached during suspend, we will conclude that a Power-failure occurred
+	 * and will do full initialization sequence.
+	 * In addition, full init sequence also transfer ext_csd before moving
+	 * to CMDQ mode which has a side affect of configuring SDHCI registers
+	 * which needed to be done before moving to CMDQ mode. The same
+	 * registers need to be configured for partial init.
+	 */
+	err = mmc_test_awake_ext_csd(host);
+	if (err) {
+		pr_debug("%s: %s: fail on ext_csd read (%d)\n",
+			mmc_hostname(host), __func__, err);
+		goto out;
+	}
+	pr_debug("%s: %s: reading and comparing ext_csd successful\n",
+		mmc_hostname(host), __func__);
+
+	if (card->ext_csd.cmdq_support && (card->host->caps2 &
+					   MMC_CAP2_CMD_QUEUE)) {
+		err = mmc_select_cmdq(card);
+		if (err) {
+			pr_warn("%s: %s: enabling CMDQ mode failed (%d)\n",
+					mmc_hostname(card->host),
+					__func__, err);
+		}
+	}
+out:
+	mmc_host_clk_release(host);
+
+	pr_debug("%s: %s: done partial init (%d)\n",
+		mmc_hostname(host), __func__, err);
+
 	return err;
 }
 
@@ -1851,13 +2673,18 @@
 static int mmc_suspend(struct mmc_host *host)
 {
 	int err;
+	ktime_t start = ktime_get();
 
+	MMC_TRACE(host, "%s: Enter\n", __func__);
 	err = _mmc_suspend(host, true);
 	if (!err) {
 		pm_runtime_disable(&host->card->dev);
 		pm_runtime_set_suspended(&host->card->dev);
 	}
 
+	trace_mmc_suspend(mmc_hostname(host), err,
+			ktime_to_us(ktime_sub(ktime_get(), start)));
+	MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err);
 	return err;
 }
 
@@ -1867,43 +2694,61 @@
  */
 static int _mmc_resume(struct mmc_host *host)
 {
-	int err = 0;
+	int err = -ENOSYS;
+	int retries;
 
 	BUG_ON(!host);
 	BUG_ON(!host->card);
 
 	mmc_claim_host(host);
 
-	if (!mmc_card_suspended(host->card))
+	if (!mmc_card_suspended(host->card)) {
+		mmc_release_host(host);
 		goto out;
+	}
 
 	mmc_power_up(host, host->card->ocr);
+	retries = 3;
+	while (retries) {
+		if (mmc_can_sleepawake(host)) {
+			err = mmc_sleepawake(host, false);
+			if (!err)
+				err = mmc_partial_init(host);
+			if (err)
+				pr_err("%s: %s: awake failed (%d), fallback to full init\n",
+					mmc_hostname(host), __func__, err);
+		}
+
+		if (err)
 	err = mmc_init_card(host, host->card->ocr, host->card);
-	mmc_card_clr_suspended(host->card);
 
-out:
-	mmc_release_host(host);
-	return err;
+		if (err) {
+			pr_err("%s: MMC card re-init failed rc = %d (retries = %d)\n",
+			       mmc_hostname(host), err, retries);
+			retries--;
+			mmc_power_off(host);
+			usleep_range(5000, 5500);
+			mmc_power_up(host, host->card->ocr);
+			mmc_select_voltage(host, host->card->ocr);
+			continue;
 }
+		break;
+	}
+	if (!err && mmc_card_cmdq(host->card)) {
+		err = mmc_cmdq_halt(host, false);
+		if (err)
+			pr_err("%s: un-halt: failed: %d\n", __func__, err);
+	}
+	mmc_card_clr_suspended(host->card);
 
-/*
- * Shutdown callback
- */
-static int mmc_shutdown(struct mmc_host *host)
-{
-	int err = 0;
-
-	/*
-	 * In a specific case for poweroff notify, we need to resume the card
-	 * before we can shutdown it properly.
-	 */
-	if (mmc_can_poweroff_notify(host->card) &&
-		!(host->caps2 & MMC_CAP2_FULL_PWR_CYCLE))
-		err = _mmc_resume(host);
+	mmc_release_host(host);
 
-	if (!err)
-		err = _mmc_suspend(host, false);
+	err = mmc_resume_clk_scaling(host);
+	if (err)
+		pr_err("%s: %s: fail to resume clock scaling (%d)\n",
+			mmc_hostname(host), __func__, err);
 
+out:
 	return err;
 }
 
@@ -1913,7 +2758,9 @@
 static int mmc_resume(struct mmc_host *host)
 {
 	int err = 0;
+	ktime_t start = ktime_get();
 
+	MMC_TRACE(host, "%s: Enter\n", __func__);
 	if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) {
 		err = _mmc_resume(host);
 		pm_runtime_set_active(&host->card->dev);
@@ -1921,24 +2768,95 @@
 	}
 	pm_runtime_enable(&host->card->dev);
 
+	trace_mmc_resume(mmc_hostname(host), err,
+			ktime_to_us(ktime_sub(ktime_get(), start)));
+	MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err);
 	return err;
 }
 
+#define MAX_DEFER_SUSPEND_COUNTER 20
+static bool mmc_process_bkops(struct mmc_host *host)
+{
+	int err = 0;
+	bool is_running = false;
+	u32 status;
+
+	mmc_claim_host(host);
+	if (mmc_card_cmdq(host->card)) {
+		BUG_ON(host->cmdq_ctx.active_reqs);
+
+		err = mmc_cmdq_halt(host, true);
+		if (err) {
+			pr_err("%s: halt: failed: %d\n", __func__, err);
+			goto unhalt;
+		}
+	}
+
+	if (mmc_card_doing_bkops(host->card)) {
+		/* check that manual bkops finished */
+		err = mmc_send_status(host->card, &status);
+		if (err) {
+			pr_err("%s: Get card status fail\n", __func__);
+			goto unhalt;
+		}
+		if (R1_CURRENT_STATE(status) != R1_STATE_PRG) {
+			mmc_card_clr_doing_bkops(host->card);
+			goto unhalt;
+		}
+	} else {
+		mmc_check_bkops(host->card);
+	}
+
+	if (host->card->bkops.needs_bkops &&
+			!mmc_card_support_auto_bkops(host->card))
+		mmc_start_manual_bkops(host->card);
+
+unhalt:
+	if (mmc_card_cmdq(host->card)) {
+		err = mmc_cmdq_halt(host, false);
+		if (err)
+			pr_err("%s: unhalt: failed: %d\n", __func__, err);
+	}
+	mmc_release_host(host);
+
+	if (host->card->bkops.needs_bkops ||
+			mmc_card_doing_bkops(host->card)) {
+		if (host->card->bkops.retry_counter++ <
+				MAX_DEFER_SUSPEND_COUNTER) {
+			host->card->bkops.needs_check = true;
+			is_running = true;
+		} else {
+			host->card->bkops.retry_counter = 0;
+		}
+	}
+	return is_running;
+}
+
 /*
  * Callback for runtime_suspend.
  */
 static int mmc_runtime_suspend(struct mmc_host *host)
 {
 	int err;
+	ktime_t start = ktime_get();
 
 	if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
 		return 0;
 
+	if (mmc_process_bkops(host)) {
+		pm_runtime_mark_last_busy(&host->card->dev);
+		pr_debug("%s: defered, need bkops\n", __func__);
+		return -EBUSY;
+	}
+
+	MMC_TRACE(host, "%s\n", __func__);
 	err = _mmc_suspend(host, true);
 	if (err)
 		pr_err("%s: error %d doing aggressive suspend\n",
 			mmc_hostname(host), err);
 
+	trace_mmc_runtime_suspend(mmc_hostname(host), err,
+			ktime_to_us(ktime_sub(ktime_get(), start)));
 	return err;
 }
 
@@ -1948,16 +2866,21 @@
 static int mmc_runtime_resume(struct mmc_host *host)
 {
 	int err;
+	ktime_t start = ktime_get();
 
 	if (!(host->caps & (MMC_CAP_AGGRESSIVE_PM | MMC_CAP_RUNTIME_RESUME)))
 		return 0;
 
+	MMC_TRACE(host, "%s\n", __func__);
 	err = _mmc_resume(host);
 	if (err)
 		pr_err("%s: error %d doing aggressive resume\n",
 			mmc_hostname(host), err);
 
-	return 0;
+	trace_mmc_runtime_resume(mmc_hostname(host), err,
+			ktime_to_us(ktime_sub(ktime_get(), start)));
+
+	return err;
 }
 
 int mmc_can_reset(struct mmc_card *card)
@@ -1974,21 +2897,58 @@
 static int mmc_reset(struct mmc_host *host)
 {
 	struct mmc_card *card = host->card;
+	int ret;
 
-	if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
-		return -EOPNOTSUPP;
-
-	if (!mmc_can_reset(card))
-		return -EOPNOTSUPP;
-
+	if ((host->caps & MMC_CAP_HW_RESET) && host->ops->hw_reset &&
+	     mmc_can_reset(card)) {
+		/* If the card accept RST_n signal, send it. */
 	mmc_set_clock(host, host->f_init);
-
 	host->ops->hw_reset(host);
-
 	/* Set initial state and call mmc_set_ios */
 	mmc_set_initial_state(host);
+	} else {
+		/* Do a brute force power cycle */
+		mmc_power_cycle(host, card->ocr);
+	}
 
-	return mmc_init_card(host, card->ocr, card);
+	/* Suspend clk scaling to avoid switching frequencies intermittently */
+
+	ret = mmc_suspend_clk_scaling(host);
+	if (ret) {
+		pr_err("%s: %s: fail to suspend clock scaling (%d)\n",
+			mmc_hostname(host), __func__, ret);
+		return ret;
+	}
+
+	ret = mmc_init_card(host, host->card->ocr, host->card);
+	if (ret) {
+		pr_err("%s: %s: mmc_init_card failed (%d)\n",
+			mmc_hostname(host), __func__, ret);
+		return ret;
+	}
+
+	ret = mmc_resume_clk_scaling(host);
+	if (ret)
+		pr_err("%s: %s: fail to resume clock scaling (%d)\n",
+			mmc_hostname(host), __func__, ret);
+
+	return ret;
+}
+
+static int mmc_shutdown(struct mmc_host *host)
+{
+	struct mmc_card *card = host->card;
+
+	/*
+	 * Exit clock scaling so that it doesn't kick in after
+	 * power off notification is sent
+	 */
+	if (host->caps2 & MMC_CAP2_CLK_SCALE)
+		mmc_exit_clk_scaling(card->host);
+	/* send power off notification */
+	if (mmc_card_mmc(card))
+		mmc_send_pon(card);
+	return 0;
 }
 
 static const struct mmc_bus_ops mmc_ops = {
@@ -1999,8 +2959,9 @@
 	.runtime_suspend = mmc_runtime_suspend,
 	.runtime_resume = mmc_runtime_resume,
 	.alive = mmc_alive,
-	.shutdown = mmc_shutdown,
+	.change_bus_speed = mmc_change_bus_speed,
 	.reset = mmc_reset,
+	.shutdown = mmc_shutdown,
 };
 
 /*
@@ -2058,6 +3019,14 @@
 		goto remove_card;
 
 	mmc_claim_host(host);
+	err = mmc_init_clk_scaling(host);
+	if (err) {
+		mmc_release_host(host);
+		goto remove_card;
+	}
+
+	register_reboot_notifier(&host->card->reboot_notify);
+
 	return 0;
 
 remove_card:
diff -ruw linux-4.4.115/drivers/mmc/core/mmc_ops.c linux-4.4.115-fbx/drivers/mmc/core/mmc_ops.c
--- linux-4.4.115/drivers/mmc/core/mmc_ops.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mmc/core/mmc_ops.c	2019-10-29 09:26:24.061207269 +0100
@@ -54,7 +54,7 @@
 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
 };
 
-static inline int __mmc_send_status(struct mmc_card *card, u32 *status,
+int __mmc_send_status(struct mmc_card *card, u32 *status,
 				    bool ignore_crc)
 {
 	int err;
@@ -466,6 +466,45 @@
 }
 
 /**
+ *	mmc_prepare_switch - helper; prepare to modify EXT_CSD register
+ *	@card: the MMC card associated with the data transfer
+ *	@set: cmd set values
+ *	@index: EXT_CSD register index
+ *	@value: value to program into EXT_CSD register
+ *	@tout_ms: timeout (ms) for operation performed by register write,
+ *                   timeout of zero implies maximum possible timeout
+ *	@use_busy_signal: use the busy signal as response type
+ *
+ *	Helper to prepare to modify EXT_CSD register for selected card.
+ */
+
+static inline void mmc_prepare_switch(struct mmc_command *cmd, u8 index,
+				      u8 value, u8 set, unsigned int tout_ms,
+				      bool use_busy_signal)
+{
+	cmd->opcode = MMC_SWITCH;
+	cmd->arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
+		  (index << 16) |
+		  (value << 8) |
+		  set;
+	cmd->flags = MMC_CMD_AC;
+	cmd->busy_timeout = tout_ms;
+	if (use_busy_signal)
+		cmd->flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
+	else
+		cmd->flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
+}
+
+int __mmc_switch_cmdq_mode(struct mmc_command *cmd, u8 set, u8 index, u8 value,
+			   unsigned int timeout_ms, bool use_busy_signal,
+			   bool ignore_timeout)
+{
+	mmc_prepare_switch(cmd, index, value, set, timeout_ms, use_busy_signal);
+	return 0;
+}
+EXPORT_SYMBOL(__mmc_switch_cmdq_mode);
+
+/**
  *	__mmc_switch - modify EXT_CSD register
  *	@card: the MMC card associated with the data transfer
  *	@set: cmd set values
@@ -489,6 +528,7 @@
 	unsigned long timeout;
 	u32 status = 0;
 	bool use_r1b_resp = use_busy_signal;
+	int retries = 5;
 
 	mmc_retune_hold(host);
 
@@ -502,12 +542,8 @@
 		(timeout_ms > host->max_busy_timeout))
 		use_r1b_resp = false;
 
-	cmd.opcode = MMC_SWITCH;
-	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
-		  (index << 16) |
-		  (value << 8) |
-		  set;
-	cmd.flags = MMC_CMD_AC;
+	mmc_prepare_switch(&cmd, index, value, set, timeout_ms,
+			   use_r1b_resp);
 	if (use_r1b_resp) {
 		cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
 		/*
@@ -521,6 +557,8 @@
 
 	if (index == EXT_CSD_SANITIZE_START)
 		cmd.sanitize_busy = true;
+	else if (index == EXT_CSD_BKOPS_START)
+		cmd.bkops_busy = true;
 
 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 	if (err)
@@ -566,11 +604,18 @@
 
 		/* Timeout if the device never leaves the program state. */
 		if (time_after(jiffies, timeout)) {
-			pr_err("%s: Card stuck in programming state! %s\n",
-				mmc_hostname(host), __func__);
+			pr_err("%s: Card stuck in programming state! %s, timeout:%ums, retries:%d\n",
+				mmc_hostname(host), __func__,
+				timeout_ms, retries);
+			if (retries)
+				timeout = jiffies +
+						msecs_to_jiffies(timeout_ms);
+			else {
 			err = -ETIMEDOUT;
 			goto out;
 		}
+			retries--;
+		}
 	} while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
 
 	err = mmc_switch_status_error(host, status);
@@ -713,7 +758,10 @@
 
 	data.sg = &sg;
 	data.sg_len = 1;
+	data.timeout_ns = 1000000;
+	data.timeout_clks = 0;
 	mmc_set_data_timeout(&data, card);
+
 	sg_init_one(&sg, data_buf, len);
 	mmc_wait_for_req(host, &mrq);
 	err = 0;
@@ -762,7 +810,7 @@
 	unsigned int opcode;
 	int err;
 
-	if (!card->ext_csd.hpi) {
+	if (!card->ext_csd.hpi_en) {
 		pr_warn("%s: Card didn't support HPI command\n",
 			mmc_hostname(card->host));
 		return -EINVAL;
@@ -779,7 +827,7 @@
 
 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
 	if (err) {
-		pr_warn("%s: error %d interrupting operation. "
+		pr_debug("%s: error %d interrupting operation. "
 			"HPI command response %#x\n", mmc_hostname(card->host),
 			err, cmd.resp[0]);
 		return err;
@@ -794,3 +842,21 @@
 {
 	return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
 }
+
+int mmc_discard_queue(struct mmc_host *host, u32 tasks)
+{
+	struct mmc_command cmd = {0};
+
+	cmd.opcode = MMC_CMDQ_TASK_MGMT;
+	if (tasks) {
+		cmd.arg = DISCARD_TASK;
+		cmd.arg |= (tasks << 16);
+	} else {
+		cmd.arg = DISCARD_QUEUE;
+	}
+
+	cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+
+	return mmc_wait_for_cmd(host, &cmd, 0);
+}
+EXPORT_SYMBOL(mmc_discard_queue);
diff -ruw linux-4.4.115/drivers/mmc/core/mmc_ops.h linux-4.4.115-fbx/drivers/mmc/core/mmc_ops.h
--- linux-4.4.115/drivers/mmc/core/mmc_ops.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mmc/core/mmc_ops.h	2019-01-22 16:16:24.771257889 +0100
@@ -27,10 +27,12 @@
 int mmc_bus_test(struct mmc_card *card, u8 bus_width);
 int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status);
 int mmc_can_ext_csd(struct mmc_card *card);
+int mmc_discard_queue(struct mmc_host *host, u32 tasks);
 int mmc_switch_status_error(struct mmc_host *host, u32 status);
 int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
 		unsigned int timeout_ms, bool use_busy_signal, bool send_status,
 		bool ignore_crc);
-
+int __mmc_send_status(struct mmc_card *card, u32 *status,
+				    bool ignore_crc);
 #endif
 
diff -ruw linux-4.4.115/drivers/mmc/core/quirks.c linux-4.4.115-fbx/drivers/mmc/core/quirks.c
--- linux-4.4.115/drivers/mmc/core/quirks.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mmc/core/quirks.c	2019-01-22 16:16:24.771257889 +0100
@@ -35,7 +35,85 @@
 #define SDIO_DEVICE_ID_MARVELL_8797_F0	0x9128
 #endif
 
+#ifndef SDIO_VENDOR_ID_MSM
+#define SDIO_VENDOR_ID_MSM		0x0070
+#endif
+
+#ifndef SDIO_DEVICE_ID_MSM_WCN1314
+#define SDIO_DEVICE_ID_MSM_WCN1314	0x2881
+#endif
+
+#ifndef SDIO_VENDOR_ID_MSM_QCA
+#define SDIO_VENDOR_ID_MSM_QCA		0x271
+#endif
+
+#ifndef SDIO_DEVICE_ID_MSM_QCA_AR6003_1
+#define SDIO_DEVICE_ID_MSM_QCA_AR6003_1	0x300
+#endif
+
+#ifndef SDIO_DEVICE_ID_MSM_QCA_AR6003_2
+#define SDIO_DEVICE_ID_MSM_QCA_AR6003_2	0x301
+#endif
+
+#ifndef SDIO_DEVICE_ID_MSM_QCA_AR6004_1
+#define SDIO_DEVICE_ID_MSM_QCA_AR6004_1	0x400
+#endif
+
+#ifndef SDIO_DEVICE_ID_MSM_QCA_AR6004_2
+#define SDIO_DEVICE_ID_MSM_QCA_AR6004_2	0x401
+#endif
+
+#ifndef SDIO_VENDOR_ID_QCA6574
+#define SDIO_VENDOR_ID_QCA6574		0x271
+#endif
+
+#ifndef SDIO_DEVICE_ID_QCA6574
+#define SDIO_DEVICE_ID_QCA6574		0x50a
+#endif
+
+#ifndef SDIO_VENDOR_ID_QCA9377
+#define SDIO_VENDOR_ID_QCA9377		0x271
+#endif
+
+#ifndef SDIO_DEVICE_ID_QCA9377
+#define SDIO_DEVICE_ID_QCA9377		0x701
+#endif
+
+
+/*
+ * This hook just adds a quirk for all sdio devices
+ */
+static void add_quirk_for_sdio_devices(struct mmc_card *card, int data)
+{
+	if (mmc_card_sdio(card))
+		card->quirks |= data;
+}
+
 static const struct mmc_fixup mmc_fixup_methods[] = {
+	/* by default sdio devices are considered CLK_GATING broken */
+	/* good cards will be whitelisted as they are tested */
+	SDIO_FIXUP(SDIO_ANY_ID, SDIO_ANY_ID,
+		   add_quirk_for_sdio_devices,
+		   MMC_QUIRK_BROKEN_CLK_GATING),
+
+	SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
+		   remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
+	SDIO_FIXUP(SDIO_VENDOR_ID_MSM, SDIO_DEVICE_ID_MSM_WCN1314,
+		   remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
+	SDIO_FIXUP(SDIO_VENDOR_ID_MSM_QCA, SDIO_DEVICE_ID_MSM_QCA_AR6003_1,
+		   remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
+	SDIO_FIXUP(SDIO_VENDOR_ID_MSM_QCA, SDIO_DEVICE_ID_MSM_QCA_AR6003_2,
+		   remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
+	SDIO_FIXUP(SDIO_VENDOR_ID_MSM_QCA, SDIO_DEVICE_ID_MSM_QCA_AR6004_1,
+		   remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
+	SDIO_FIXUP(SDIO_VENDOR_ID_MSM_QCA, SDIO_DEVICE_ID_MSM_QCA_AR6004_2,
+		   remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
 	SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
 		   add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
 
@@ -48,6 +126,11 @@
 	SDIO_FIXUP(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797_F0,
 		   add_quirk, MMC_QUIRK_BROKEN_IRQ_POLLING),
 
+	SDIO_FIXUP(SDIO_VENDOR_ID_QCA6574, SDIO_DEVICE_ID_QCA6574,
+		   add_quirk, MMC_QUIRK_QCA6574_SETTINGS),
+
+	SDIO_FIXUP(SDIO_VENDOR_ID_QCA9377, SDIO_DEVICE_ID_QCA9377,
+		add_quirk, MMC_QUIRK_QCA9377_SETTINGS),
 	END_FIXUP
 };
 
@@ -68,6 +151,8 @@
 		    (f->name == CID_NAME_ANY ||
 		     !strncmp(f->name, card->cid.prod_name,
 			      sizeof(card->cid.prod_name))) &&
+		    (f->ext_csd_rev == EXT_CSD_REV_ANY ||
+		     f->ext_csd_rev == card->ext_csd.rev) &&
 		    (f->cis_vendor == card->cis.vendor ||
 		     f->cis_vendor == (u16) SDIO_ANY_ID) &&
 		    (f->cis_device == card->cis.device ||
diff -ruw linux-4.4.115/drivers/mmc/core/sd.c linux-4.4.115-fbx/drivers/mmc/core/sd.c
--- linux-4.4.115/drivers/mmc/core/sd.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mmc/core/sd.c	2019-10-29 09:26:24.061207269 +0100
@@ -27,6 +27,12 @@
 #include "sd.h"
 #include "sd_ops.h"
 
+#define UHS_SDR104_MIN_DTR	(100 * 1000 * 1000)
+#define UHS_DDR50_MIN_DTR	(50 * 1000 * 1000)
+#define UHS_SDR50_MIN_DTR	(50 * 1000 * 1000)
+#define UHS_SDR25_MIN_DTR	(25 * 1000 * 1000)
+#define UHS_SDR12_MIN_DTR	(12.5 * 1000 * 1000)
+
 static const unsigned int tran_exp[] = {
 	10000,		100000,		1000000,	10000000,
 	0,		0,		0,		0
@@ -369,9 +375,9 @@
 		goto out;
 
 	if ((status[16] & 0xF) != 1) {
-		pr_warn("%s: Problem switching card into high-speed mode!\n",
-			mmc_hostname(card->host));
-		err = 0;
+		pr_warn("%s: Problem switching card into high-speed mode!, status:%x\n",
+			mmc_hostname(card->host), (status[16] & 0xF));
+		err = -EBUSY;
 	} else {
 		err = 1;
 	}
@@ -425,18 +431,22 @@
 	}
 
 	if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
-	    (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
+	    (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104) &&
+	    (card->host->f_max > UHS_SDR104_MIN_DTR)) {
 			card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
-	} else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
-		   (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
-			card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
 	} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
 		    MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
-		    SD_MODE_UHS_SDR50)) {
+		    SD_MODE_UHS_SDR50) &&
+		    (card->host->f_max > UHS_SDR50_MIN_DTR)) {
 			card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
+	} else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
+		   (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50) &&
+		    (card->host->f_max > UHS_DDR50_MIN_DTR)) {
+		card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
 	} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
 		    MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
-		   (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
+		   (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25) &&
+		 (card->host->f_max > UHS_SDR25_MIN_DTR)) {
 			card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
 	} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
 		    MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
@@ -480,15 +490,17 @@
 	if (err)
 		return err;
 
-	if ((status[16] & 0xF) != card->sd_bus_speed)
-		pr_warn("%s: Problem setting bus speed mode!\n",
-			mmc_hostname(card->host));
-	else {
+	if ((status[16] & 0xF) != card->sd_bus_speed) {
+		pr_warn("%s: Problem setting bus speed mode(%u)! max_dtr:%u, timing:%u, status:%x\n",
+			mmc_hostname(card->host), card->sd_bus_speed,
+			card->sw_caps.uhs_max_dtr, timing, (status[16] & 0xF));
+		err = -EBUSY;
+	} else {
 		mmc_set_timing(card->host, timing);
 		mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr);
 	}
 
-	return 0;
+	return err;
 }
 
 /* Get host's max current setting at its current voltage */
@@ -569,6 +581,64 @@
 	return 0;
 }
 
+/**
+ * mmc_sd_change_bus_speed() - Change SD card bus frequency at runtime
+ * @host: pointer to mmc host structure
+ * @freq: pointer to desired frequency to be set
+ *
+ * Change the SD card bus frequency at runtime after the card is
+ * initialized. Callers are expected to make sure of the card's
+ * state (DATA/RCV/TRANSFER) beforing changing the frequency at runtime.
+ *
+ * If the frequency to change is greater than max. supported by card,
+ * *freq is changed to max. supported by card and if it is less than min.
+ * supported by host, *freq is changed to min. supported by host.
+ */
+static int mmc_sd_change_bus_speed(struct mmc_host *host, unsigned long *freq)
+{
+	int err = 0;
+	struct mmc_card *card;
+
+	mmc_claim_host(host);
+	/*
+	 * Assign card pointer after claiming host to avoid race
+	 * conditions that may arise during removal of the card.
+	 */
+	card = host->card;
+
+	/* sanity checks */
+	if (!card || !freq) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	mmc_set_clock(host, (unsigned int) (*freq));
+
+	if (!mmc_host_is_spi(card->host) && mmc_card_uhs(card)
+			&& card->host->ops->execute_tuning) {
+		/*
+		 * We try to probe host driver for tuning for any
+		 * frequency, it is host driver responsibility to
+		 * perform actual tuning only when required.
+		 */
+		mmc_host_clk_hold(card->host);
+		err = card->host->ops->execute_tuning(card->host,
+				MMC_SEND_TUNING_BLOCK);
+		mmc_host_clk_release(card->host);
+
+		if (err) {
+			pr_warn("%s: %s: tuning execution failed %d. Restoring to previous clock %lu\n",
+				   mmc_hostname(card->host), __func__, err,
+				   host->clk_scaling.curr_freq);
+			mmc_set_clock(host, host->clk_scaling.curr_freq);
+		}
+	}
+
+out:
+	mmc_release_host(host);
+	return err;
+}
+
 /*
  * UHS-I specific initialization procedure
  */
@@ -800,7 +870,9 @@
 	if (!host->ops->get_ro)
 		return -1;
 
+	mmc_host_clk_hold(host);
 	ro = host->ops->get_ro(host);
+	mmc_host_clk_release(host);
 
 	return ro;
 }
@@ -809,6 +881,9 @@
 	bool reinit)
 {
 	int err;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+	int retries;
+#endif
 
 	if (!reinit) {
 		/*
@@ -835,7 +910,26 @@
 		/*
 		 * Fetch switch information from card.
 		 */
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+		for (retries = 1; retries <= 3; retries++) {
+			err = mmc_read_switch(card);
+			if (!err) {
+				if (retries > 1) {
+					printk(KERN_WARNING
+					       "%s: recovered\n",
+					       mmc_hostname(host));
+				}
+				break;
+			} else {
+				printk(KERN_WARNING
+				       "%s: read switch failed (attempt %d)\n",
+				       mmc_hostname(host), retries);
+			}
+		}
+#else
 		err = mmc_read_switch(card);
+#endif
+
 		if (err)
 			return err;
 	}
@@ -873,7 +967,10 @@
 {
 	unsigned max_dtr = (unsigned int)-1;
 
-	if (mmc_card_hs(card)) {
+	if (mmc_card_uhs(card)) {
+		if (max_dtr > card->sw_caps.uhs_max_dtr)
+			max_dtr = card->sw_caps.uhs_max_dtr;
+	} else if (mmc_card_hs(card)) {
 		if (max_dtr > card->sw_caps.hs_max_dtr)
 			max_dtr = card->sw_caps.hs_max_dtr;
 	} else if (max_dtr > card->csd.max_dtr) {
@@ -935,6 +1032,7 @@
 		err = mmc_send_relative_addr(host, &card->rca);
 		if (err)
 			goto free_card;
+		host->card = card;
 	}
 
 	if (!oldcard) {
@@ -998,12 +1096,16 @@
 		}
 	}
 
-	host->card = card;
+	card->clk_scaling_highest = mmc_sd_get_max_clock(card);
+	card->clk_scaling_lowest = host->f_min;
+
 	return 0;
 
 free_card:
-	if (!oldcard)
+	if (!oldcard) {
+		host->card = NULL;
 		mmc_remove_card(card);
+	}
 
 	return err;
 }
@@ -1016,8 +1118,12 @@
 	BUG_ON(!host);
 	BUG_ON(!host->card);
 
+	mmc_exit_clk_scaling(host);
 	mmc_remove_card(host->card);
+
+	mmc_claim_host(host);
 	host->card = NULL;
+	mmc_release_host(host);
 }
 
 /*
@@ -1033,17 +1139,49 @@
  */
 static void mmc_sd_detect(struct mmc_host *host)
 {
-	int err;
+	int err = 0;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+	int retries = 5;
+#endif
 
 	BUG_ON(!host);
 	BUG_ON(!host->card);
 
-	mmc_get_card(host->card);
+	/*
+	 * Try to acquire claim host. If failed to get the lock in 2 sec,
+	 * just return; This is to ensure that when this call is invoked
+	 * due to pm_suspend, not to block suspend for longer duration.
+	 */
+	pm_runtime_get_sync(&host->card->dev);
+	if (!mmc_try_claim_host(host, 2000)) {
+		pm_runtime_mark_last_busy(&host->card->dev);
+		pm_runtime_put_autosuspend(&host->card->dev);
+		return;
+	}
+
+	mmc_power_up(host, host->ocr_avail);
 
 	/*
 	 * Just check if our card has been removed.
 	 */
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+	while(retries) {
+		err = mmc_send_status(host->card, NULL);
+		if (err) {
+			retries--;
+			udelay(5);
+			continue;
+		}
+		break;
+	}
+	if (!retries) {
+		printk(KERN_ERR "%s(%s): Unable to re-detect card (%d)\n",
+		       __func__, mmc_hostname(host), err);
+		err = _mmc_detect_card_removed(host);
+	}
+#else
 	err = _mmc_detect_card_removed(host);
+#endif
 
 	mmc_put_card(host->card);
 
@@ -1064,6 +1202,13 @@
 	BUG_ON(!host);
 	BUG_ON(!host->card);
 
+	err = mmc_suspend_clk_scaling(host);
+	if (err) {
+		pr_err("%s: %s: fail to suspend clock scaling (%d)\n",
+			mmc_hostname(host), __func__,  err);
+		return err;
+	}
+
 	mmc_claim_host(host);
 
 	if (mmc_card_suspended(host->card))
@@ -1089,11 +1234,16 @@
 {
 	int err;
 
+	MMC_TRACE(host, "%s: Enter\n", __func__);
 	err = _mmc_sd_suspend(host);
 	if (!err) {
 		pm_runtime_disable(&host->card->dev);
 		pm_runtime_set_suspended(&host->card->dev);
-	}
+	/* if suspend fails, force mmc_detect_change during resume */
+	} else if (mmc_bus_manual_resume(host))
+		host->ignore_bus_resume_flags = true;
+
+	MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err);
 
 	return err;
 }
@@ -1105,6 +1255,9 @@
 static int _mmc_sd_resume(struct mmc_host *host)
 {
 	int err = 0;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+	int retries;
+#endif
 
 	BUG_ON(!host);
 	BUG_ON(!host->card);
@@ -1115,9 +1268,43 @@
 		goto out;
 
 	mmc_power_up(host, host->card->ocr);
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+	retries = 5;
+	while (retries) {
 	err = mmc_sd_init_card(host, host->card->ocr, host->card);
+
+		if (err) {
+			printk(KERN_ERR "%s: Re-init card rc = %d (retries = %d)\n",
+			       mmc_hostname(host), err, retries);
+			retries--;
+			mmc_power_off(host);
+			usleep_range(5000, 5500);
+			mmc_power_up(host, host->card->ocr);
+			mmc_select_voltage(host, host->card->ocr);
+			continue;
+		}
+		break;
+	}
+#else
+	err = mmc_sd_init_card(host, host->card->ocr, host->card);
+#endif
+	if (err) {
+		pr_err("%s: %s: mmc_sd_init_card_failed (%d)\n",
+				mmc_hostname(host), __func__, err);
+		mmc_power_off(host);
+		goto out;
+	}
 	mmc_card_clr_suspended(host->card);
 
+	if (host->card->sdr104_blocked)
+		goto out;
+	err = mmc_resume_clk_scaling(host);
+	if (err) {
+		pr_err("%s: %s: fail to resume clock scaling (%d)\n",
+			mmc_hostname(host), __func__, err);
+		goto out;
+	}
+
 out:
 	mmc_release_host(host);
 	return err;
@@ -1130,12 +1317,14 @@
 {
 	int err = 0;
 
+	MMC_TRACE(host, "%s: Enter\n", __func__);
 	if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) {
 		err = _mmc_sd_resume(host);
 		pm_runtime_set_active(&host->card->dev);
 		pm_runtime_mark_last_busy(&host->card->dev);
 	}
 	pm_runtime_enable(&host->card->dev);
+	MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err);
 
 	return err;
 }
@@ -1190,7 +1379,7 @@
 	.suspend = mmc_sd_suspend,
 	.resume = mmc_sd_resume,
 	.alive = mmc_sd_alive,
-	.shutdown = mmc_sd_suspend,
+	.change_bus_speed = mmc_sd_change_bus_speed,
 	.reset = mmc_sd_reset,
 };
 
@@ -1201,6 +1390,9 @@
 {
 	int err;
 	u32 ocr, rocr;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+	int retries;
+#endif
 
 	BUG_ON(!host);
 	WARN_ON(!host->claimed);
@@ -1237,9 +1429,31 @@
 	/*
 	 * Detect and init the card.
 	 */
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+	retries = 5;
+	while (retries) {
+		err = mmc_sd_init_card(host, rocr, NULL);
+		if (err) {
+			retries--;
+			mmc_power_off(host);
+			usleep_range(5000, 5500);
+			mmc_power_up(host, rocr);
+			mmc_select_voltage(host, rocr);
+			continue;
+		}
+		break;
+	}
+
+	if (!retries) {
+		printk(KERN_ERR "%s: mmc_sd_init_card() failure (err = %d)\n",
+		       mmc_hostname(host), err);
+		goto err;
+	}
+#else
 	err = mmc_sd_init_card(host, rocr, NULL);
 	if (err)
 		goto err;
+#endif
 
 	mmc_release_host(host);
 	err = mmc_add_card(host->card);
@@ -1247,6 +1461,13 @@
 		goto remove_card;
 
 	mmc_claim_host(host);
+
+	err = mmc_init_clk_scaling(host);
+	if (err) {
+		mmc_release_host(host);
+		goto remove_card;
+	}
+
 	return 0;
 
 remove_card:
diff -ruw linux-4.4.115/drivers/mmc/core/sdio_bus.c linux-4.4.115-fbx/drivers/mmc/core/sdio_bus.c
--- linux-4.4.115/drivers/mmc/core/sdio_bus.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mmc/core/sdio_bus.c	2019-01-22 16:16:24.775257925 +0100
@@ -28,6 +28,10 @@
 #include "sdio_cis.h"
 #include "sdio_bus.h"
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+#include <linux/mmc/host.h>
+#endif
+
 #define to_sdio_driver(d)	container_of(d, struct sdio_driver, drv)
 
 /* show configuration fields */
@@ -263,6 +267,13 @@
 {
 	struct sdio_func *func = dev_to_sdio_func(dev);
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+	/*
+	 * If this device is embedded then we never allocated
+	 * cis tables for this func
+	 */
+	if (!func->card->host->embedded_sdio_data.funcs)
+#endif
 	sdio_free_func_cis(func);
 
 	kfree(func->info);
diff -ruw linux-4.4.115/drivers/mmc/core/sdio.c linux-4.4.115-fbx/drivers/mmc/core/sdio.c
--- linux-4.4.115/drivers/mmc/core/sdio.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mmc/core/sdio.c	2019-01-22 16:16:24.775257925 +0100
@@ -10,6 +10,7 @@
  */
 
 #include <linux/err.h>
+#include <linux/module.h>
 #include <linux/pm_runtime.h>
 
 #include <linux/mmc/host.h>
@@ -21,6 +22,7 @@
 
 #include "core.h"
 #include "bus.h"
+#include "host.h"
 #include "sd.h"
 #include "sdio_bus.h"
 #include "mmc_ops.h"
@@ -28,6 +30,10 @@
 #include "sdio_ops.h"
 #include "sdio_cis.h"
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+#include <linux/mmc/sdio_ids.h>
+#endif
+
 static int sdio_read_fbr(struct sdio_func *func)
 {
 	int ret;
@@ -182,6 +188,23 @@
 				card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_C;
 			if (data & SDIO_DRIVE_SDTD)
 				card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_D;
+
+			ret = mmc_io_rw_direct(card, 0, 0,
+				SDIO_CCCR_INTERRUPT_EXTENSION, 0, &data);
+			if (ret)
+				goto out;
+			if (data & SDIO_SUPPORT_ASYNC_INTR) {
+				if (card->host->caps2 &
+				    MMC_CAP2_ASYNC_SDIO_IRQ_4BIT_MODE) {
+					data |= SDIO_ENABLE_ASYNC_INTR;
+					ret = mmc_io_rw_direct(card, 1, 0,
+						SDIO_CCCR_INTERRUPT_EXTENSION,
+						data, NULL);
+					if (ret)
+						goto out;
+					card->cccr.async_intr_sup = 1;
+				}
+			}
 		}
 
 		/* if no uhs mode ensure we check for high speed */
@@ -200,12 +223,60 @@
 	return ret;
 }
 
+static void sdio_enable_vendor_specific_settings(struct mmc_card *card)
+{
+	int ret;
+	u8 settings;
+
+	if (mmc_enable_qca6574_settings(card) ||
+		mmc_enable_qca9377_settings(card)) {
+		ret = mmc_io_rw_direct(card, 1, 0, 0xF2, 0x0F, NULL);
+		if (ret) {
+			pr_crit("%s: failed to write to fn 0xf2 %d\n",
+					mmc_hostname(card->host), ret);
+			goto out;
+		}
+
+		ret = mmc_io_rw_direct(card, 0, 0, 0xF1, 0, &settings);
+		if (ret) {
+			pr_crit("%s: failed to read fn 0xf1 %d\n",
+					mmc_hostname(card->host), ret);
+			goto out;
+		}
+
+		settings |= 0x80;
+		ret = mmc_io_rw_direct(card, 1, 0, 0xF1, settings, NULL);
+		if (ret) {
+			pr_crit("%s: failed to write to fn 0xf1 %d\n",
+					mmc_hostname(card->host), ret);
+			goto out;
+		}
+
+		ret = mmc_io_rw_direct(card, 0, 0, 0xF0, 0, &settings);
+		if (ret) {
+			pr_crit("%s: failed to read fn 0xf0 %d\n",
+					mmc_hostname(card->host), ret);
+			goto out;
+		}
+
+		settings |= 0x20;
+		ret = mmc_io_rw_direct(card, 1, 0, 0xF0, settings, NULL);
+		if (ret) {
+			pr_crit("%s: failed to write to fn 0xf0 %d\n",
+					mmc_hostname(card->host), ret);
+			goto out;
+		}
+	}
+out:
+	return;
+}
+
 static int sdio_enable_wide(struct mmc_card *card)
 {
 	int ret;
 	u8 ctrl;
 
-	if (!(card->host->caps & MMC_CAP_4_BIT_DATA))
+	if (!(card->host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
 		return 0;
 
 	if (card->cccr.low_speed && !card->cccr.wide_bus)
@@ -221,6 +292,9 @@
 
 	/* set as 4-bit bus width */
 	ctrl &= ~SDIO_BUS_WIDTH_MASK;
+	if (card->host->caps & MMC_CAP_8_BIT_DATA)
+		ctrl |= SDIO_BUS_WIDTH_8BIT;
+	else if (card->host->caps & MMC_CAP_4_BIT_DATA)
 	ctrl |= SDIO_BUS_WIDTH_4BIT;
 
 	ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
@@ -262,7 +336,7 @@
 	int ret;
 	u8 ctrl;
 
-	if (!(card->host->caps & MMC_CAP_4_BIT_DATA))
+	if (!(card->host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
 		return 0;
 
 	if (card->cccr.low_speed && !card->cccr.wide_bus)
@@ -272,10 +346,10 @@
 	if (ret)
 		return ret;
 
-	if (!(ctrl & SDIO_BUS_WIDTH_4BIT))
+	if (!(ctrl & (SDIO_BUS_WIDTH_4BIT | SDIO_BUS_WIDTH_8BIT)))
 		return 0;
 
-	ctrl &= ~SDIO_BUS_WIDTH_4BIT;
+	ctrl &= ~(SDIO_BUS_WIDTH_4BIT | SDIO_BUS_WIDTH_8BIT);
 	ctrl |= SDIO_BUS_ASYNC_INT;
 
 	ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
@@ -492,6 +566,9 @@
 	if (err)
 		return err;
 
+	/* Vendor specific settings based on card quirks */
+	sdio_enable_vendor_specific_settings(card);
+
 	speed &= ~SDIO_SPEED_BSS_MASK;
 	speed |= bus_speed;
 	err = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_SPEED, speed, NULL);
@@ -618,8 +695,11 @@
 	/*
 	 * Call the optional HC's init_card function to handle quirks.
 	 */
-	if (host->ops->init_card)
+	if (host->ops->init_card) {
+		mmc_host_clk_hold(host);
 		host->ops->init_card(host, card);
+		mmc_host_clk_release(host);
+	}
 
 	/*
 	 * If the host and card support UHS-I mode request the card
@@ -699,19 +779,35 @@
 		goto finish;
 	}
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+	if (host->embedded_sdio_data.cccr)
+		memcpy(&card->cccr, host->embedded_sdio_data.cccr, sizeof(struct sdio_cccr));
+	else {
+#endif
 	/*
 	 * Read the common registers.
 	 */
 	err = sdio_read_cccr(card, ocr);
 	if (err)
 		goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+	}
+#endif
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+	if (host->embedded_sdio_data.cis)
+		memcpy(&card->cis, host->embedded_sdio_data.cis, sizeof(struct sdio_cis));
+	else {
+#endif
 	/*
 	 * Read the common CIS tuples.
 	 */
 	err = sdio_read_common_cis(card);
 	if (err)
 		goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+	}
+#endif
 
 	if (oldcard) {
 		int same = (card->cis.vendor == oldcard->cis.vendor &&
@@ -770,7 +866,12 @@
 		 * Switch to wider bus (if supported).
 		 */
 		err = sdio_enable_4bit_bus(card);
-		if (err)
+		if (err > 0) {
+			if (card->host->caps & MMC_CAP_8_BIT_DATA)
+				mmc_set_bus_width(card->host, MMC_BUS_WIDTH_8);
+			else if (card->host->caps & MMC_CAP_4_BIT_DATA)
+				mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
+		} else if (err)
 			goto remove;
 	}
 finish:
@@ -897,6 +998,7 @@
  */
 static int mmc_sdio_suspend(struct mmc_host *host)
 {
+	MMC_TRACE(host, "%s: Enter\n", __func__);
 	mmc_claim_host(host);
 
 	if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host))
@@ -904,13 +1006,15 @@
 
 	if (!mmc_card_keep_power(host)) {
 		mmc_power_off(host);
+	} else if (host->ios.clock) {
+		mmc_gate_clock(host);
 	} else if (host->retune_period) {
 		mmc_retune_timer_stop(host);
 		mmc_retune_needed(host);
 	}
 
 	mmc_release_host(host);
-
+	MMC_TRACE(host, "%s: Exit\n", __func__);
 	return 0;
 }
 
@@ -921,6 +1025,7 @@
 	BUG_ON(!host);
 	BUG_ON(!host->card);
 
+	MMC_TRACE(host, "%s: Enter\n", __func__);
 	/* Basic card reinitialization. */
 	mmc_claim_host(host);
 
@@ -953,18 +1058,30 @@
 	} else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
 		/* We may have switched to 1-bit mode during suspend */
 		err = sdio_enable_4bit_bus(host->card);
+		if (err > 0) {
+			if (host->caps & MMC_CAP_8_BIT_DATA)
+				mmc_set_bus_width(host, MMC_BUS_WIDTH_8);
+			else if (host->caps & MMC_CAP_4_BIT_DATA)
+				mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
+			err = 0;
+		}
 	}
 
 	if (!err && host->sdio_irqs) {
-		if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD))
+		if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
 			wake_up_process(host->sdio_irq_thread);
-		else if (host->caps & MMC_CAP_SDIO_IRQ)
+		} else if (host->caps & MMC_CAP_SDIO_IRQ) {
+			mmc_host_clk_hold(host);
 			host->ops->enable_sdio_irq(host, 1);
+			mmc_host_clk_release(host);
+		}
 	}
 
 	mmc_release_host(host);
 
 	host->pm_flags &= ~MMC_PM_KEEP_POWER;
+	host->pm_flags &= ~MMC_PM_WAKE_SDIO_IRQ;
+	MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err);
 	return err;
 }
 
@@ -1120,14 +1237,36 @@
 	funcs = (ocr & 0x70000000) >> 28;
 	card->sdio_funcs = 0;
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+	if (host->embedded_sdio_data.funcs)
+		card->sdio_funcs = funcs = host->embedded_sdio_data.num_funcs;
+#endif
+
 	/*
 	 * Initialize (but don't add) all present functions.
 	 */
 	for (i = 0; i < funcs; i++, card->sdio_funcs++) {
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+		if (host->embedded_sdio_data.funcs) {
+			struct sdio_func *tmp;
+
+			tmp = sdio_alloc_func(host->card);
+			if (IS_ERR(tmp))
+				goto remove;
+			tmp->num = (i + 1);
+			card->sdio_func[i] = tmp;
+			tmp->class = host->embedded_sdio_data.funcs[i].f_class;
+			tmp->max_blksize = host->embedded_sdio_data.funcs[i].f_maxblksize;
+			tmp->vendor = card->cis.vendor;
+			tmp->device = card->cis.device;
+		} else {
+#endif
 		err = sdio_init_func(host->card, i + 1);
 		if (err)
 			goto remove;
-
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+		}
+#endif
 		/*
 		 * Enable Runtime PM for this func (if supported)
 		 */
@@ -1175,3 +1314,8 @@
 	return err;
 }
 
+int sdio_reset_comm(struct mmc_card *card)
+{
+	return mmc_power_restore_host(card->host);
+}
+EXPORT_SYMBOL(sdio_reset_comm);
diff -ruw linux-4.4.115/drivers/mmc/core/sdio_cis.c linux-4.4.115-fbx/drivers/mmc/core/sdio_cis.c
--- linux-4.4.115/drivers/mmc/core/sdio_cis.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mmc/core/sdio_cis.c	2019-01-22 16:16:24.775257925 +0100
@@ -55,7 +55,7 @@
 
 	for (i = 0; i < nr_strings; i++) {
 		buffer[i] = string;
-		strcpy(string, buf);
+		strlcpy(string, buf, strlen(buf) + 1);
 		string += strlen(string) + 1;
 		buf += strlen(buf) + 1;
 	}
@@ -270,8 +270,16 @@
 			break;
 
 		/* null entries have no link field or data */
-		if (tpl_code == 0x00)
+		if (tpl_code == 0x00) {
+			if (card->cis.vendor == 0x70 &&
+				(card->cis.device == 0x2460 ||
+				 card->cis.device == 0x0460 ||
+				 card->cis.device == 0x23F1 ||
+				 card->cis.device == 0x23F0))
+				break;
+			else
 			continue;
+		}
 
 		ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_link);
 		if (ret)
diff -ruw linux-4.4.115/drivers/mmc/core/sdio_io.c linux-4.4.115-fbx/drivers/mmc/core/sdio_io.c
--- linux-4.4.115/drivers/mmc/core/sdio_io.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mmc/core/sdio_io.c	2019-01-22 16:16:24.775257925 +0100
@@ -384,6 +384,39 @@
 EXPORT_SYMBOL_GPL(sdio_readb);
 
 /**
+ *	sdio_readb_ext - read a single byte from a SDIO function
+ *	@func: SDIO function to access
+ *	@addr: address to read
+ *	@err_ret: optional status value from transfer
+ *	@in: value to add to argument
+ *
+ *	Reads a single byte from the address space of a given SDIO
+ *	function. If there is a problem reading the address, 0xff
+ *	is returned and @err_ret will contain the error code.
+ */
+unsigned char sdio_readb_ext(struct sdio_func *func, unsigned int addr,
+	int *err_ret, unsigned in)
+{
+	int ret;
+	unsigned char val;
+
+	BUG_ON(!func);
+
+	if (err_ret)
+		*err_ret = 0;
+
+	ret = mmc_io_rw_direct(func->card, 0, func->num, addr, (u8)in, &val);
+	if (ret) {
+		if (err_ret)
+			*err_ret = ret;
+		return 0xFF;
+	}
+
+	return val;
+}
+EXPORT_SYMBOL_GPL(sdio_readb_ext);
+
+/**
  *	sdio_writeb - write a single byte to a SDIO function
  *	@func: SDIO function to access
  *	@b: byte to write
diff -ruw linux-4.4.115/drivers/mmc/core/sdio_irq.c linux-4.4.115-fbx/drivers/mmc/core/sdio_irq.c
--- linux-4.4.115/drivers/mmc/core/sdio_irq.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mmc/core/sdio_irq.c	2019-01-22 16:16:24.775257925 +0100
@@ -93,7 +93,9 @@
 {
 	mmc_claim_host(host);
 	host->sdio_irq_pending = true;
+	mmc_host_clk_hold(host);
 	process_sdio_pending_irqs(host);
+	mmc_host_clk_release(host);
 	mmc_release_host(host);
 }
 EXPORT_SYMBOL_GPL(sdio_run_irqs);
@@ -104,6 +106,7 @@
 	struct sched_param param = { .sched_priority = 1 };
 	unsigned long period, idle_period;
 	int ret;
+	bool ws;
 
 	sched_setscheduler(current, SCHED_FIFO, &param);
 
@@ -137,6 +140,17 @@
 		ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort);
 		if (ret)
 			break;
+		ws = false;
+		/*
+		 * prevent suspend if it has started when scheduled;
+		 * 100 msec (approx. value) should be enough for the system to
+		 * resume and attend to the card's request
+		 */
+		if ((host->dev_status == DEV_SUSPENDING) ||
+		    (host->dev_status == DEV_SUSPENDED)) {
+			pm_wakeup_event(&host->card->dev, 100);
+			ws = true;
+		}
 		ret = process_sdio_pending_irqs(host);
 		host->sdio_irq_pending = false;
 		mmc_release_host(host);
@@ -168,15 +182,27 @@
 		}
 
 		set_current_state(TASK_INTERRUPTIBLE);
-		if (host->caps & MMC_CAP_SDIO_IRQ)
+		if (host->caps & MMC_CAP_SDIO_IRQ) {
+			mmc_host_clk_hold(host);
 			host->ops->enable_sdio_irq(host, 1);
+			mmc_host_clk_release(host);
+		}
+		/*
+		 * function drivers would have processed the event from card
+		 * unless suspended, hence release wake source
+		 */
+		if (ws && (host->dev_status == DEV_RESUMED))
+			pm_relax(&host->card->dev);
 		if (!kthread_should_stop())
 			schedule_timeout(period);
 		set_current_state(TASK_RUNNING);
 	} while (!kthread_should_stop());
 
-	if (host->caps & MMC_CAP_SDIO_IRQ)
+	if (host->caps & MMC_CAP_SDIO_IRQ) {
+		mmc_host_clk_hold(host);
 		host->ops->enable_sdio_irq(host, 0);
+		mmc_host_clk_release(host);
+	}
 
 	pr_debug("%s: IRQ thread exiting with code %d\n",
 		 mmc_hostname(host), ret);
@@ -202,7 +228,9 @@
 				return err;
 			}
 		} else if (host->caps & MMC_CAP_SDIO_IRQ) {
+			mmc_host_clk_hold(host);
 			host->ops->enable_sdio_irq(host, 1);
+			mmc_host_clk_release(host);
 		}
 	}
 
@@ -221,7 +249,9 @@
 			atomic_set(&host->sdio_irq_thread_abort, 1);
 			kthread_stop(host->sdio_irq_thread);
 		} else if (host->caps & MMC_CAP_SDIO_IRQ) {
+			mmc_host_clk_hold(host);
 			host->ops->enable_sdio_irq(host, 0);
+			mmc_host_clk_release(host);
 		}
 	}
 
diff -ruw linux-4.4.115/drivers/mmc/host/Kconfig linux-4.4.115-fbx/drivers/mmc/host/Kconfig
--- linux-4.4.115/drivers/mmc/host/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mmc/host/Kconfig	2019-01-22 16:16:24.775257925 +0100
@@ -406,18 +406,39 @@
 	  If unsure, say N.
 
 config MMC_SDHCI_MSM
-	tristate "Qualcomm SDHCI Controller Support"
-	depends on ARCH_QCOM || (ARM && COMPILE_TEST)
+	tristate "Qualcomm Technologies, Inc. SDHCI Controller Support"
+	depends on ARCH_QCOM || ARCH_MSM || (ARM && COMPILE_TEST)
 	depends on MMC_SDHCI_PLTFM
+	select PM_DEVFREQ
+	select DEVFREQ_GOV_SIMPLE_ONDEMAND
 	help
 	  This selects the Secure Digital Host Controller Interface (SDHCI)
-	  support present in Qualcomm SOCs. The controller supports
-	  SD/MMC/SDIO devices.
+	  support present in Qualcomm Technologies, Inc. SOCs. The controller
+	  supports SD/MMC/SDIO devices.
 
 	  If you have a controller with this interface, say Y or M here.
 
 	  If unsure, say N.
 
+config MMC_SDHCI_MSM_ICE
+	bool "Qualcomm Technologies, Inc Inline Crypto Engine for SDHCI core"
+	depends on MMC_SDHCI_MSM && CRYPTO_DEV_QCOM_ICE
+	help
+	  This selects the QTI specific additions to support Inline Crypto
+	  Engine (ICE). ICE accelerates the crypto operations and maintains
+	  the high SDHCI performance.
+
+	  Select this if you have ICE supported for SDHCI on QTI chipset.
+	  If unsure, say N.
+
+config MMC_MSM
+	tristate "Qualcomm SDCC Controller Support"
+	depends on MMC && (ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50)
+	help
+	  This provides support for the SD/MMC cell found in the
+	  MSM and QSD SOCs from Qualcomm. The controller also has
+	  support for SDIO devices.
+
 config MMC_MXC
 	tristate "Freescale i.MX21/27/31 or MPC512x Multimedia Card support"
 	depends on ARCH_MXC || PPC_MPC512x
@@ -773,6 +794,19 @@
 	  This selects support for the SD/MMC Host Controller on
 	  Allwinner sunxi SoCs.
 
+config MMC_CQ_HCI
+	tristate "Command Queue Support"
+	depends on HAS_DMA
+	help
+	  This selects the Command Queue Host Controller Interface (CQHCI)
+	  support present in host controllers of Qualcomm Technologies, Inc
+	  amongst others.
+	  This controller supports eMMC devices with command queue support.
+
+	  If you have a controller with this interface, say Y or M here.
+
+	  If unsure, say N.
+
 config MMC_TOSHIBA_PCI
 	tristate "Toshiba Type A SD/MMC Card Interface Driver"
 	depends on PCI
diff -ruw linux-4.4.115/drivers/mmc/host/Makefile linux-4.4.115-fbx/drivers/mmc/host/Makefile
--- linux-4.4.115/drivers/mmc/host/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mmc/host/Makefile	2019-01-22 16:16:24.775257925 +0100
@@ -72,9 +72,11 @@
 obj-$(CONFIG_MMC_SDHCI_OF_HLWD)		+= sdhci-of-hlwd.o
 obj-$(CONFIG_MMC_SDHCI_BCM_KONA)	+= sdhci-bcm-kona.o
 obj-$(CONFIG_MMC_SDHCI_BCM2835)		+= sdhci-bcm2835.o
-obj-$(CONFIG_MMC_SDHCI_IPROC)		+= sdhci-iproc.o
 obj-$(CONFIG_MMC_SDHCI_MSM)		+= sdhci-msm.o
+obj-$(CONFIG_MMC_SDHCI_MSM_ICE)		+= sdhci-msm-ice.o
+obj-$(CONFIG_MMC_SDHCI_IPROC)		+= sdhci-iproc.o
 obj-$(CONFIG_MMC_SDHCI_ST)		+= sdhci-st.o
+obj-$(CONFIG_MMC_CQ_HCI)		+= cmdq_hci.o
 
 ifeq ($(CONFIG_CB710_DEBUG),y)
 	CFLAGS-cb710-mmc	+= -DDEBUG
diff -ruw linux-4.4.115/drivers/mmc/host/sdhci.c linux-4.4.115-fbx/drivers/mmc/host/sdhci.c
--- linux-4.4.115/drivers/mmc/host/sdhci.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mmc/host/sdhci.c	2019-10-29 09:26:24.077207425 +0100
@@ -30,8 +30,12 @@
 #include <linux/mmc/card.h>
 #include <linux/mmc/sdio.h>
 #include <linux/mmc/slot-gpio.h>
+#include <linux/mmc/sdio.h>
+
+#include <trace/events/mmc.h>
 
 #include "sdhci.h"
+#include "cmdq_hci.h"
 
 #define DRIVER_NAME "sdhci"
 
@@ -45,6 +49,9 @@
 
 #define MAX_TUNING_LOOP 40
 
+#define SDHCI_DBG_DUMP_RS_INTERVAL (10 * HZ)
+#define SDHCI_DBG_DUMP_RS_BURST 2
+
 static unsigned int debug_quirks = 0;
 static unsigned int debug_quirks2;
 
@@ -52,10 +59,13 @@
 
 static void sdhci_finish_command(struct sdhci_host *);
 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
+static int sdhci_enhanced_strobe(struct mmc_host *mmc);
 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
 					struct mmc_data *data);
 static int sdhci_do_get_cd(struct sdhci_host *host);
+static bool sdhci_check_state(struct sdhci_host *);
+static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable);
 
 #ifdef CONFIG_PM
 static int sdhci_runtime_pm_get(struct sdhci_host *host);
@@ -79,60 +89,102 @@
 }
 #endif
 
+static void sdhci_dump_state(struct sdhci_host *host)
+{
+	struct mmc_host *mmc = host->mmc;
+
+	#ifdef CONFIG_MMC_CLKGATE
+	pr_info("%s: clk: %d clk-gated: %d claimer: %s pwr: %d host->irq = %d\n",
+		mmc_hostname(mmc), host->clock, mmc->clk_gated,
+		mmc->claimer->comm, host->pwr,
+		(host->flags & SDHCI_HOST_IRQ_STATUS));
+	#else
+	pr_info("%s: clk: %d claimer: %s pwr: %d\n",
+		mmc_hostname(mmc), host->clock,
+		mmc->claimer->comm, host->pwr);
+	#endif
+	pr_info("%s: rpmstatus[pltfm](runtime-suspend:usage_count:disable_depth)(%d:%d:%d)\n",
+		mmc_hostname(mmc), mmc->parent->power.runtime_status,
+		atomic_read(&mmc->parent->power.usage_count),
+		mmc->parent->power.disable_depth);
+}
+
 static void sdhci_dumpregs(struct sdhci_host *host)
 {
-	pr_debug(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
+	MMC_TRACE(host->mmc,
+		"%s: 0x04=0x%08x 0x06=0x%08x 0x0E=0x%08x 0x30=0x%08x 0x34=0x%08x 0x38=0x%08x\n",
+		__func__,
+		sdhci_readw(host, SDHCI_BLOCK_SIZE),
+		sdhci_readw(host, SDHCI_BLOCK_COUNT),
+		sdhci_readw(host, SDHCI_COMMAND),
+		sdhci_readl(host, SDHCI_INT_STATUS),
+		sdhci_readl(host, SDHCI_INT_ENABLE),
+		sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
+	mmc_stop_tracing(host->mmc);
+
+	pr_info(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
 		mmc_hostname(host->mmc));
 
-	pr_debug(DRIVER_NAME ": Sys addr: 0x%08x | Version:  0x%08x\n",
+	pr_info(DRIVER_NAME ": Sys addr: 0x%08x | Version:  0x%08x\n",
 		sdhci_readl(host, SDHCI_DMA_ADDRESS),
 		sdhci_readw(host, SDHCI_HOST_VERSION));
-	pr_debug(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt:  0x%08x\n",
+	pr_info(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt:  0x%08x\n",
 		sdhci_readw(host, SDHCI_BLOCK_SIZE),
 		sdhci_readw(host, SDHCI_BLOCK_COUNT));
-	pr_debug(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
+	pr_info(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
 		sdhci_readl(host, SDHCI_ARGUMENT),
 		sdhci_readw(host, SDHCI_TRANSFER_MODE));
-	pr_debug(DRIVER_NAME ": Present:  0x%08x | Host ctl: 0x%08x\n",
+	pr_info(DRIVER_NAME ": Present:  0x%08x | Host ctl: 0x%08x\n",
 		sdhci_readl(host, SDHCI_PRESENT_STATE),
 		sdhci_readb(host, SDHCI_HOST_CONTROL));
-	pr_debug(DRIVER_NAME ": Power:    0x%08x | Blk gap:  0x%08x\n",
+	pr_info(DRIVER_NAME ": Power:    0x%08x | Blk gap:  0x%08x\n",
 		sdhci_readb(host, SDHCI_POWER_CONTROL),
 		sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
-	pr_debug(DRIVER_NAME ": Wake-up:  0x%08x | Clock:    0x%08x\n",
+	pr_info(DRIVER_NAME ": Wake-up:  0x%08x | Clock:    0x%08x\n",
 		sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
 		sdhci_readw(host, SDHCI_CLOCK_CONTROL));
-	pr_debug(DRIVER_NAME ": Timeout:  0x%08x | Int stat: 0x%08x\n",
+	pr_info(DRIVER_NAME ": Timeout:  0x%08x | Int stat: 0x%08x\n",
 		sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
 		sdhci_readl(host, SDHCI_INT_STATUS));
-	pr_debug(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
+	pr_info(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
 		sdhci_readl(host, SDHCI_INT_ENABLE),
 		sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
-	pr_debug(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
-		sdhci_readw(host, SDHCI_ACMD12_ERR),
+	pr_info(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
+		host->auto_cmd_err_sts,
 		sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
-	pr_debug(DRIVER_NAME ": Caps:     0x%08x | Caps_1:   0x%08x\n",
+	pr_info(DRIVER_NAME ": Caps:     0x%08x | Caps_1:   0x%08x\n",
 		sdhci_readl(host, SDHCI_CAPABILITIES),
 		sdhci_readl(host, SDHCI_CAPABILITIES_1));
-	pr_debug(DRIVER_NAME ": Cmd:      0x%08x | Max curr: 0x%08x\n",
+	pr_info(DRIVER_NAME ": Cmd:      0x%08x | Max curr: 0x%08x\n",
 		sdhci_readw(host, SDHCI_COMMAND),
 		sdhci_readl(host, SDHCI_MAX_CURRENT));
-	pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n",
+	pr_info(DRIVER_NAME ": Resp 1:   0x%08x | Resp 0:   0x%08x\n",
+		sdhci_readl(host, SDHCI_RESPONSE + 0x4),
+		sdhci_readl(host, SDHCI_RESPONSE));
+	pr_info(DRIVER_NAME ": Resp 3:   0x%08x | Resp 2:   0x%08x\n",
+		sdhci_readl(host, SDHCI_RESPONSE + 0xC),
+		sdhci_readl(host, SDHCI_RESPONSE + 0x8));
+	pr_info(DRIVER_NAME ": Host ctl2: 0x%08x\n",
 		sdhci_readw(host, SDHCI_HOST_CONTROL2));
 
 	if (host->flags & SDHCI_USE_ADMA) {
 		if (host->flags & SDHCI_USE_64_BIT_DMA)
-			pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
+			pr_info(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
 				 readl(host->ioaddr + SDHCI_ADMA_ERROR),
 				 readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI),
 				 readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
 		else
-			pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
+			pr_info(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
 				 readl(host->ioaddr + SDHCI_ADMA_ERROR),
 				 readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
 	}
 
-	pr_debug(DRIVER_NAME ": ===========================================\n");
+	host->mmc->err_occurred = true;
+
+	if (host->ops->dump_vendor_regs)
+		host->ops->dump_vendor_regs(host);
+	sdhci_dump_state(host);
+	pr_info(DRIVER_NAME ": ===========================================\n");
 }
 
 /*****************************************************************************\
@@ -177,6 +229,7 @@
 {
 	unsigned long timeout;
 
+retry_reset:
 	sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
 
 	if (mask & SDHCI_RESET_ALL) {
@@ -187,19 +240,60 @@
 	}
 
 	/* Wait max 100 ms */
-	timeout = 100;
+	timeout = 100000;
+
+	if (host->ops->check_power_status && host->pwr &&
+	    (mask & SDHCI_RESET_ALL))
+		host->ops->check_power_status(host, REQ_BUS_OFF);
+
+	/* clear pending normal/error interrupt status */
+	sdhci_writel(host, sdhci_readl(host, SDHCI_INT_STATUS),
+			SDHCI_INT_STATUS);
 
 	/* hw clears the bit when it's done */
 	while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
 		if (timeout == 0) {
 			pr_err("%s: Reset 0x%x never completed.\n",
 				mmc_hostname(host->mmc), (int)mask);
+			MMC_TRACE(host->mmc, "%s: Reset 0x%x never completed\n",
+					__func__, (int)mask);
+			if ((host->quirks2 & SDHCI_QUIRK2_USE_RESET_WORKAROUND)
+				&& host->ops->reset_workaround) {
+				if (!host->reset_wa_applied) {
+					/*
+					 * apply the workaround and issue
+					 * reset again.
+					 */
+					host->ops->reset_workaround(host, 1);
+					host->reset_wa_applied = 1;
+					host->reset_wa_cnt++;
+					goto retry_reset;
+				} else {
+					pr_err("%s: Reset 0x%x failed with workaround\n",
+						mmc_hostname(host->mmc),
+						(int)mask);
+					/* clear the workaround */
+					host->ops->reset_workaround(host, 0);
+					host->reset_wa_applied = 0;
+				}
+			}
+
 			sdhci_dumpregs(host);
 			return;
 		}
 		timeout--;
-		mdelay(1);
+		udelay(1);
 	}
+
+	if ((host->quirks2 & SDHCI_QUIRK2_USE_RESET_WORKAROUND) &&
+			host->ops->reset_workaround && host->reset_wa_applied) {
+		pr_info("%s: Reset 0x%x successful with workaround\n",
+				mmc_hostname(host->mmc), (int)mask);
+		/* clear the workaround */
+		host->ops->reset_workaround(host, 0);
+		host->reset_wa_applied = 0;
+	}
+
 }
 EXPORT_SYMBOL_GPL(sdhci_reset);
 
@@ -221,6 +315,8 @@
 		/* Resetting the controller clears many */
 		host->preset_enabled = false;
 	}
+	if (host->is_crypto_en)
+		host->crypto_reset_reqd = true;
 }
 
 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
@@ -236,7 +332,7 @@
 		    SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
 		    SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
 		    SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
-		    SDHCI_INT_RESPONSE;
+		    SDHCI_INT_RESPONSE | SDHCI_INT_AUTO_CMD_ERR;
 
 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
@@ -279,9 +375,12 @@
 	struct sdhci_host *host = container_of(led, struct sdhci_host, led);
 	unsigned long flags;
 
+	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+		return;
+
 	spin_lock_irqsave(&host->lock, flags);
 
-	if (host->runtime_suspended)
+	if (host->runtime_suspended || sdhci_check_state(host))
 		goto out;
 
 	if (brightness == LED_OFF)
@@ -598,7 +697,10 @@
 	void *align;
 	char *buffer;
 	unsigned long flags;
-	bool has_unaligned;
+	bool has_unaligned = false;
+	u32 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
+
+	trace_mmc_adma_table_post(command, data->sg_len);
 
 	if (data->flags & MMC_DATA_READ)
 		direction = DMA_FROM_DEVICE;
@@ -648,6 +750,7 @@
 	u8 count;
 	struct mmc_data *data = cmd->data;
 	unsigned target_timeout, current_timeout;
+	u32 curr_clk = 0; /* In KHz */
 
 	/*
 	 * If the host controller provides us with an incorrect timeout
@@ -693,7 +796,14 @@
 	 *     (1) / (2) > 2^6
 	 */
 	count = 0;
+	if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK) {
+		curr_clk = host->clock / 1000;
+		if (host->quirks2 & SDHCI_QUIRK2_DIVIDE_TOUT_BY_4)
+			curr_clk /= 4;
+		current_timeout = (1 << 13) * 1000 / curr_clk;
+	} else {
 	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
+	}
 	while (current_timeout < target_timeout) {
 		count++;
 		current_timeout <<= 1;
@@ -701,11 +811,13 @@
 			break;
 	}
 
+	if (!(host->quirks2 & SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT)) {
 	if (count >= 0xF) {
 		DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
 		    mmc_hostname(host->mmc), count, cmd->opcode);
 		count = 0xE;
 	}
+	}
 
 	return count;
 }
@@ -736,6 +848,17 @@
 	}
 }
 
+static void sdhci_set_blk_size_reg(struct sdhci_host *host, unsigned int blksz,
+				   unsigned int sdma_boundary)
+{
+	if (host->flags & SDHCI_USE_ADMA)
+		sdhci_writew(host, SDHCI_MAKE_BLKSZ(0, blksz),
+			     SDHCI_BLOCK_SIZE);
+	else
+		sdhci_writew(host, SDHCI_MAKE_BLKSZ(sdma_boundary, blksz),
+			     SDHCI_BLOCK_SIZE);
+}
+
 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
 {
 	u8 ctrl;
@@ -751,7 +874,7 @@
 		return;
 
 	/* Sanity checks */
-	BUG_ON(data->blksz * data->blocks > 524288);
+	BUG_ON(data->blksz * data->blocks > host->mmc->max_req_size);
 	BUG_ON(data->blksz > host->mmc->max_blk_size);
 	BUG_ON(data->blocks > 65535);
 
@@ -762,6 +885,10 @@
 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
 		host->flags |= SDHCI_REQ_USE_DMA;
 
+	if ((host->quirks2 & SDHCI_QUIRK2_USE_PIO_FOR_EMMC_TUNING) &&
+		cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
+		host->flags &= ~SDHCI_REQ_USE_DMA;
+
 	/*
 	 * FIXME: This doesn't account for merging when mapping the
 	 * scatterlist.
@@ -828,6 +955,7 @@
 
 	if (host->flags & SDHCI_REQ_USE_DMA) {
 		if (host->flags & SDHCI_USE_ADMA) {
+			trace_mmc_adma_table_pre(cmd->opcode, data->sg_len);
 			ret = sdhci_adma_table_pre(host, data);
 			if (ret) {
 				/*
@@ -898,9 +1026,13 @@
 	sdhci_set_transfer_irqs(host);
 
 	/* Set the DMA boundary value and block size */
-	sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
-		data->blksz), SDHCI_BLOCK_SIZE);
+	sdhci_set_blk_size_reg(host, data->blksz, SDHCI_DEFAULT_BOUNDARY_ARG);
 	sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
+	MMC_TRACE(host->mmc,
+		"%s: 0x28=0x%08x 0x3E=0x%08x 0x06=0x%08x\n", __func__,
+		sdhci_readb(host, SDHCI_HOST_CONTROL),
+		sdhci_readw(host, SDHCI_HOST_CONTROL2),
+		sdhci_readw(host, SDHCI_BLOCK_COUNT));
 }
 
 static void sdhci_set_transfer_mode(struct sdhci_host *host,
@@ -942,12 +1074,26 @@
 		}
 	}
 
-	if (data->flags & MMC_DATA_READ)
+	if (data->flags & MMC_DATA_READ) {
 		mode |= SDHCI_TRNS_READ;
+		if (host->ops->toggle_cdr) {
+			if ((cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200) ||
+				(cmd->opcode == MMC_SEND_TUNING_BLOCK_HS400) ||
+				(cmd->opcode == MMC_SEND_TUNING_BLOCK))
+				host->ops->toggle_cdr(host, false);
+			else
+				host->ops->toggle_cdr(host, true);
+		}
+	}
+	if (host->ops->toggle_cdr && (data->flags & MMC_DATA_WRITE))
+		host->ops->toggle_cdr(host, false);
 	if (host->flags & SDHCI_REQ_USE_DMA)
 		mode |= SDHCI_TRNS_DMA;
 
 	sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
+	MMC_TRACE(host->mmc, "%s: 0x00=0x%08x 0x0C=0x%08x\n", __func__,
+		sdhci_readw(host, SDHCI_ARGUMENT2),
+		sdhci_readw(host, SDHCI_TRANSFER_MODE));
 }
 
 static void sdhci_finish_data(struct sdhci_host *host)
@@ -959,6 +1105,8 @@
 	data = host->data;
 	host->data = NULL;
 
+	MMC_TRACE(host->mmc, "%s: 0x24=0x%08x\n", __func__,
+		sdhci_readl(host, SDHCI_PRESENT_STATE));
 	if (host->flags & SDHCI_REQ_USE_DMA) {
 		if (host->flags & SDHCI_USE_ADMA)
 			sdhci_adma_table_post(host, data);
@@ -1017,7 +1165,7 @@
 	WARN_ON(host->cmd);
 
 	/* Wait max 10 ms */
-	timeout = 10;
+	timeout = 10000;
 
 	mask = SDHCI_CMD_INHIBIT;
 	if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
@@ -1032,13 +1180,16 @@
 		if (timeout == 0) {
 			pr_err("%s: Controller never released "
 				"inhibit bit(s).\n", mmc_hostname(host->mmc));
+			MMC_TRACE(host->mmc,
+			"%s :Controller never released inhibit bit(s)\n",
+			__func__);
 			sdhci_dumpregs(host);
 			cmd->error = -EIO;
 			tasklet_schedule(&host->finish_tasklet);
 			return;
 		}
 		timeout--;
-		mdelay(1);
+		udelay(1);
 	}
 
 	timeout = jiffies;
@@ -1084,7 +1235,15 @@
 	    cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
 		flags |= SDHCI_CMD_DATA;
 
+	if (cmd->data)
+		host->data_start_time = ktime_get();
+	trace_mmc_cmd_rw_start(cmd->opcode, cmd->arg, cmd->flags);
 	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
+	MMC_TRACE(host->mmc,
+		"%s: updated 0x8=0x%08x 0xC=0x%08x 0xE=0x%08x\n", __func__,
+		sdhci_readl(host, SDHCI_ARGUMENT),
+		sdhci_readw(host, SDHCI_TRANSFER_MODE),
+		sdhci_readw(host, SDHCI_COMMAND));
 }
 EXPORT_SYMBOL_GPL(sdhci_send_command);
 
@@ -1105,15 +1264,20 @@
 						sdhci_readb(host,
 						SDHCI_RESPONSE + (3-i)*4-1);
 			}
+			MMC_TRACE(host->mmc,
+			"%s: resp 0: 0x%08x resp 1: 0x%08x resp 2: 0x%08x resp 3: 0x%08x\n",
+			__func__, host->cmd->resp[0], host->cmd->resp[1],
+			host->cmd->resp[2], host->cmd->resp[3]);
 		} else {
 			host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
+			MMC_TRACE(host->mmc, "%s: resp 0: 0x%08x\n",
+				__func__, host->cmd->resp[0]);
 		}
 	}
 
-	host->cmd->error = 0;
-
 	/* Finished CMD23, now send actual command. */
 	if (host->cmd == host->mrq->sbc) {
+		host->cmd->error = 0;
 		host->cmd = NULL;
 		sdhci_send_command(host, host->mrq->cmd);
 	} else {
@@ -1173,6 +1337,7 @@
 
 	host->mmc->actual_clock = 0;
 
+	if (host->clock)
 	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
 	if (host->quirks2 & SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST)
 		mdelay(1);
@@ -1257,6 +1422,10 @@
 clock_set:
 	if (real_div)
 		host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div;
+
+	if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
+		div = 0;
+
 	clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
 	clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
 		<< SDHCI_DIVIDER_HI_SHIFT;
@@ -1264,19 +1433,19 @@
 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
 
 	/* Wait max 20 ms */
-	timeout = 20;
+	timeout = 20000;
 	while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
 		& SDHCI_CLOCK_INT_STABLE)) {
 		if (timeout == 0) {
 			pr_err("%s: Internal clock never "
 				"stabilised.\n", mmc_hostname(host->mmc));
+			MMC_TRACE(host->mmc,
+			"%s: Internal clock never stabilised.\n", __func__);
 			sdhci_dumpregs(host);
 			return;
 		}
 		timeout--;
-		spin_unlock_irq(&host->lock);
-		usleep_range(900, 1100);
-		spin_lock_irq(&host->lock);
+		udelay(1);
 	}
 
 	clk |= SDHCI_CLOCK_CARD_EN;
@@ -1330,6 +1499,8 @@
 
 	if (pwr == 0) {
 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+		if (host->ops->check_power_status)
+			host->ops->check_power_status(host, REQ_BUS_OFF);
 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
 			sdhci_runtime_pm_bus_off(host);
 		vdd = 0;
@@ -1338,20 +1509,27 @@
 		 * Spec says that we should clear the power reg before setting
 		 * a new value. Some controllers don't seem to like this though.
 		 */
-		if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
+		if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) {
 			sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
-
+			if (host->ops->check_power_status)
+				host->ops->check_power_status(host, REQ_BUS_OFF);
+		}
 		/*
 		 * At least the Marvell CaFe chip gets confused if we set the
 		 * voltage and set turn on power at the same time, so set the
 		 * voltage first.
 		 */
-		if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
+		if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) {
 			sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+			if (host->ops->check_power_status)
+				host->ops->check_power_status(host, REQ_BUS_ON);
+		}
 
 		pwr |= SDHCI_POWER_ON;
 
 		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+		if (host->ops->check_power_status)
+			host->ops->check_power_status(host, REQ_BUS_ON);
 
 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
 			sdhci_runtime_pm_bus_on(host);
@@ -1371,6 +1549,148 @@
  *                                                                           *
 \*****************************************************************************/
 
+static int sdhci_enable(struct mmc_host *mmc)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+
+	if (host->ops->platform_bus_voting)
+		host->ops->platform_bus_voting(host, 1);
+
+	return 0;
+}
+
+static int sdhci_disable(struct mmc_host *mmc)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+
+	if (host->ops->platform_bus_voting)
+		host->ops->platform_bus_voting(host, 0);
+
+	return 0;
+}
+
+static void sdhci_notify_halt(struct mmc_host *mmc, bool halt)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+
+	pr_debug("%s: halt notification was sent, halt=%d\n",
+		mmc_hostname(mmc), halt);
+	if (host->flags & SDHCI_USE_64_BIT_DMA) {
+		if (halt)
+			host->desc_sz = 16;
+		else
+			host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
+	}
+}
+
+static inline void sdhci_update_power_policy(struct sdhci_host *host,
+		enum sdhci_power_policy policy)
+{
+	host->power_policy = policy;
+}
+
+static int sdhci_notify_load(struct mmc_host *mmc, enum mmc_load state)
+{
+	int err = 0;
+	struct sdhci_host *host = mmc_priv(mmc);
+
+	switch (state) {
+	case MMC_LOAD_HIGH:
+		sdhci_update_power_policy(host, SDHCI_PERFORMANCE_MODE);
+		break;
+	case MMC_LOAD_LOW:
+		sdhci_update_power_policy(host, SDHCI_POWER_SAVE_MODE);
+		break;
+	default:
+		err = -EINVAL;
+		break;
+	}
+
+	if (host->ops->notify_load)
+		err = host->ops->notify_load(host, state);
+
+	return err;
+}
+
+static bool sdhci_check_state(struct sdhci_host *host)
+{
+	if (!host->clock || !host->pwr)
+		return true;
+	else
+		return false;
+}
+
+static bool sdhci_check_auto_tuning(struct sdhci_host *host,
+				  struct mmc_command *cmd)
+{
+	if (((cmd->opcode != MMC_READ_SINGLE_BLOCK) &&
+	     (cmd->opcode != MMC_READ_MULTIPLE_BLOCK) &&
+	     (cmd->opcode != SD_IO_RW_EXTENDED)) || (host->clock < 100000000))
+		return false;
+	else if (host->mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
+		 host->mmc->ios.timing == MMC_TIMING_UHS_SDR104)
+		return true;
+	else
+		return false;
+}
+
+static int sdhci_get_tuning_cmd(struct sdhci_host *host)
+{
+	if (!host->mmc || !host->mmc->card)
+		return 0;
+	/*
+	 * If we are here, all conditions have already been true
+	 * and the card can either be an eMMC or SD/SDIO
+	 */
+	if (mmc_card_mmc(host->mmc->card))
+		return MMC_SEND_TUNING_BLOCK_HS200;
+	else
+		return MMC_SEND_TUNING_BLOCK;
+}
+
+static int sdhci_crypto_cfg(struct sdhci_host *host, struct mmc_request *mrq,
+		u32 slot)
+{
+	int err = 0;
+
+	if (host->crypto_reset_reqd && host->ops->crypto_engine_reset) {
+		err = host->ops->crypto_engine_reset(host);
+		if (err) {
+			pr_err("%s: crypto reset failed\n",
+					mmc_hostname(host->mmc));
+			goto out;
+		}
+		host->crypto_reset_reqd = false;
+	}
+
+	if (host->ops->crypto_engine_cfg) {
+		err = host->ops->crypto_engine_cfg(host, mrq, slot);
+		if (err) {
+			pr_err("%s: failed to configure crypto\n",
+					mmc_hostname(host->mmc));
+			goto out;
+		}
+	}
+out:
+	return err;
+}
+
+static int sdhci_crypto_cfg_end(struct sdhci_host *host,
+				struct mmc_request *mrq)
+{
+	int err = 0;
+
+	if (host->ops->crypto_engine_cfg_end) {
+		err = host->ops->crypto_engine_cfg_end(host, mrq);
+		if (err) {
+			pr_err("%s: failed to configure crypto\n",
+					mmc_hostname(host->mmc));
+			return err;
+		}
+	}
+	return 0;
+}
+
 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 {
 	struct sdhci_host *host;
@@ -1380,15 +1700,41 @@
 	host = mmc_priv(mmc);
 
 	sdhci_runtime_pm_get(host);
+	if (sdhci_check_state(host)) {
+		sdhci_dump_state(host);
+		pr_err("%s: sdhci in bad state\n",
+			mmc_hostname(host->mmc));
+		mrq->cmd->error = -EIO;
+		if (mrq->data)
+			mrq->data->error = -EIO;
+		mmc_request_done(host->mmc, mrq);
+		sdhci_runtime_pm_put(host);
+		return;
+	}
 
-	/* Firstly check card presence */
-	present = mmc->ops->get_cd(mmc);
+	/*
+	 * Firstly check card presence from cd-gpio.  The return could
+	 * be one of the following possibilities:
+	 *     negative: cd-gpio is not available
+	 *     zero: cd-gpio is used, and card is removed
+	 *     one: cd-gpio is used, and card is present
+	 */
+	present = sdhci_do_get_cd(host);
+	if (present < 0) {
+		/* If polling, assume that the card is always present. */
+		if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+			present = 1;
+		else
+			present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+					SDHCI_CARD_PRESENT;
+	}
 
 	spin_lock_irqsave(&host->lock, flags);
 
 	WARN_ON(host->mrq != NULL);
 
 #ifndef SDHCI_USE_LEDS_CLASS
+	if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_LED_CONTROL))
 	sdhci_activate_led(host);
 #endif
 
@@ -1409,6 +1755,22 @@
 		host->mrq->cmd->error = -ENOMEDIUM;
 		tasklet_schedule(&host->finish_tasklet);
 	} else {
+		if (host->ops->config_auto_tuning_cmd) {
+			if (sdhci_check_auto_tuning(host, mrq->cmd))
+				host->ops->config_auto_tuning_cmd(host, true,
+					sdhci_get_tuning_cmd(host));
+			else
+				host->ops->config_auto_tuning_cmd(host, false,
+					sdhci_get_tuning_cmd(host));
+		}
+
+		if (host->is_crypto_en) {
+			spin_unlock_irqrestore(&host->lock, flags);
+			if (sdhci_crypto_cfg(host, mrq, 0))
+				goto end_req;
+			spin_lock_irqsave(&host->lock, flags);
+		}
+
 		if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
 			sdhci_send_command(host, mrq->sbc);
 		else
@@ -1417,6 +1779,16 @@
 
 	mmiowb();
 	spin_unlock_irqrestore(&host->lock, flags);
+	return;
+end_req:
+	mrq->cmd->error = -EIO;
+	if (mrq->data)
+		mrq->data->error = -EIO;
+	host->mrq = NULL;
+	MMC_TRACE(host->mmc, "Request failed due to ice config\n");
+	sdhci_dumpregs(host);
+	mmc_request_done(host->mmc, mrq);
+	sdhci_runtime_pm_put(host);
 }
 
 void sdhci_set_bus_width(struct sdhci_host *host, int width)
@@ -1465,38 +1837,50 @@
 }
 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
 
+void sdhci_cfg_irq(struct sdhci_host *host, bool enable, bool sync)
+{
+	if (enable && !(host->flags & SDHCI_HOST_IRQ_STATUS)) {
+		enable_irq(host->irq);
+		host->flags |= SDHCI_HOST_IRQ_STATUS;
+	} else if (!enable && (host->flags & SDHCI_HOST_IRQ_STATUS)) {
+		if (sync)
+			disable_irq(host->irq);
+		else
+			disable_irq_nosync(host->irq);
+		host->flags &= ~SDHCI_HOST_IRQ_STATUS;
+	}
+}
+EXPORT_SYMBOL(sdhci_cfg_irq);
+
 static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
 {
 	unsigned long flags;
 	u8 ctrl;
 	struct mmc_host *mmc = host->mmc;
-
-	spin_lock_irqsave(&host->lock, flags);
+	int ret;
 
 	if (host->flags & SDHCI_DEVICE_DEAD) {
-		spin_unlock_irqrestore(&host->lock, flags);
 		if (!IS_ERR(mmc->supply.vmmc) &&
 		    ios->power_mode == MMC_POWER_OFF)
 			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
 		return;
 	}
 
-	/*
-	 * Reset the chip on each power off.
-	 * Should clear out any weird states.
-	 */
-	if (ios->power_mode == MMC_POWER_OFF) {
-		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
-		sdhci_reinit(host);
-	}
-
 	if (host->version >= SDHCI_SPEC_300 &&
 		(ios->power_mode == MMC_POWER_UP) &&
 		!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
 		sdhci_enable_preset_value(host, false);
 
-	if (!ios->clock || ios->clock != host->clock) {
+	spin_lock_irqsave(&host->lock, flags);
+	if (host->mmc && host->mmc->card &&
+			mmc_card_sdio(host->mmc->card))
+		sdhci_cfg_irq(host, false, false);
+
+	if (ios->clock &&
+	    ((ios->clock != host->clock) || (ios->timing != host->timing))) {
+		spin_unlock_irqrestore(&host->lock, flags);
 		host->ops->set_clock(host, ios->clock);
+		spin_lock_irqsave(&host->lock, flags);
 		host->clock = ios->clock;
 
 		if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
@@ -1511,8 +1895,45 @@
 			host->mmc->max_busy_timeout /= host->timeout_clk;
 		}
 	}
+	if (ios->clock && host->sdio_irq_async_status)
+		sdhci_enable_sdio_irq_nolock(host, false);
+	spin_unlock_irqrestore(&host->lock, flags);
 
+	/*
+	 * The controller clocks may be off during power-up and we may end up
+	 * enabling card clock before giving power to the card. Hence, during
+	 * MMC_POWER_UP enable the controller clock and turn-on the regulators.
+	 * The mmc_power_up would provide the necessary delay before turning on
+	 * the clocks to the card.
+	 */
+	if (ios->power_mode & MMC_POWER_UP) {
+		if (host->ops->enable_controller_clock) {
+			ret = host->ops->enable_controller_clock(host);
+			if (ret) {
+				pr_err("%s: enabling controller clock: failed: %d\n",
+				       mmc_hostname(host->mmc), ret);
+			} else {
 	sdhci_set_power(host, ios->power_mode, ios->vdd);
+			}
+		}
+	}
+
+	spin_lock_irqsave(&host->lock, flags);
+	if (!host->clock) {
+		if (host->mmc && host->mmc->card &&
+				mmc_card_sdio(host->mmc->card))
+			sdhci_cfg_irq(host, true, false);
+		spin_unlock_irqrestore(&host->lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	if (!host->ops->enable_controller_clock && (ios->power_mode &
+						    (MMC_POWER_UP |
+						     MMC_POWER_ON)))
+		sdhci_set_power(host, ios->power_mode, ios->vdd);
+
+	spin_lock_irqsave(&host->lock, flags);
 
 	if (host->ops->platform_send_init_74_clocks)
 		host->ops->platform_send_init_74_clocks(host, ios->power_mode);
@@ -1580,7 +2001,11 @@
 			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
 
 			/* Re-enable SD Clock */
+			if (ios->clock) {
+				spin_unlock_irqrestore(&host->lock, flags);
 			host->ops->set_clock(host, host->clock);
+				spin_lock_irqsave(&host->lock, flags);
+			}
 		}
 
 		/* Reset SD Clock Enable */
@@ -1607,10 +2032,15 @@
 		}
 
 		/* Re-enable SD Clock */
+		if (ios->clock) {
+			spin_unlock_irqrestore(&host->lock, flags);
 		host->ops->set_clock(host, host->clock);
+			spin_lock_irqsave(&host->lock, flags);
+		}
 	} else
 		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
 
+	spin_unlock_irqrestore(&host->lock, flags);
 	/*
 	 * Some (ENE) controllers go apeshit on some ios operation,
 	 * signalling timeout and CRC errors even on CMD0. Resetting
@@ -1619,8 +2049,25 @@
 	if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
 		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
 
-	mmiowb();
+	/*
+	 * Reset the chip on each power off.
+	 * Should clear out any weird states.
+	 */
+	if (ios->power_mode == MMC_POWER_OFF) {
+		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
+		sdhci_reinit(host);
+		sdhci_set_power(host, ios->power_mode, ios->vdd);
+	}
+	if (!ios->clock)
+		host->ops->set_clock(host, ios->clock);
+
+	spin_lock_irqsave(&host->lock, flags);
+	if (host->mmc && host->mmc->card &&
+			mmc_card_sdio(host->mmc->card))
+		sdhci_cfg_irq(host, true, false);
 	spin_unlock_irqrestore(&host->lock, flags);
+
+	mmiowb();
 }
 
 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -1732,7 +2179,20 @@
 
 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
 {
-	if (!(host->flags & SDHCI_DEVICE_DEAD)) {
+	u16 ctrl = 0;
+
+	if (host->flags & SDHCI_DEVICE_DEAD)
+		return;
+
+	if (mmc_card_and_host_support_async_int(host->mmc)) {
+		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+		if (enable)
+			ctrl |= SDHCI_CTRL_ASYNC_INT_ENABLE;
+		else
+			ctrl &= ~SDHCI_CTRL_ASYNC_INT_ENABLE;
+		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+	}
+
 		if (enable)
 			host->ier |= SDHCI_INT_CARD_INT;
 		else
@@ -1742,7 +2202,6 @@
 		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
 		mmiowb();
 	}
-}
 
 static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
 {
@@ -1784,6 +2243,8 @@
 		/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
 		ctrl &= ~SDHCI_CTRL_VDD_180;
 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+		if (host->ops->check_power_status)
+			host->ops->check_power_status(host, REQ_IO_HIGH);
 
 		if (!IS_ERR(mmc->supply.vqmmc)) {
 			ret = regulator_set_voltage(mmc->supply.vqmmc, 2700000,
@@ -1823,6 +2284,8 @@
 		 */
 		ctrl |= SDHCI_CTRL_VDD_180;
 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+		if (host->ops->check_power_status)
+			host->ops->check_power_status(host, REQ_IO_LOW);
 
 		/* Some controller need to do more when switching */
 		if (host->ops->voltage_switch)
@@ -1893,6 +2356,19 @@
 	return 0;
 }
 
+static int sdhci_enhanced_strobe(struct mmc_host *mmc)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+	int err = 0;
+
+	sdhci_runtime_pm_get(host);
+	if (host->ops->enhanced_strobe)
+		err = host->ops->enhanced_strobe(host);
+	sdhci_runtime_pm_put(host);
+
+	return err;
+}
+
 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
 {
 	struct sdhci_host *host = mmc_priv(mmc);
@@ -1922,9 +2398,10 @@
 	switch (host->timing) {
 	/* HS400 tuning is done in HS200 mode */
 	case MMC_TIMING_MMC_HS400:
+		if (!(mmc->caps2 & MMC_CAP2_HS400_POST_TUNING)) {
 		err = -EINVAL;
 		goto out_unlock;
-
+		}
 	case MMC_TIMING_MMC_HS200:
 		/*
 		 * Periodic re-tuning for HS400 is not expected to be needed, so
@@ -1950,7 +2427,13 @@
 
 	if (host->ops->platform_execute_tuning) {
 		spin_unlock_irqrestore(&host->lock, flags);
+		/*
+		 * Make sure re-tuning won't get triggered for the CRC errors
+		 * occurred while executing tuning
+		 */
+		mmc_retune_disable(mmc);
 		err = host->ops->platform_execute_tuning(host, opcode);
+		mmc_retune_enable(mmc);
 		sdhci_runtime_pm_put(host);
 		return err;
 	}
@@ -2002,14 +2485,11 @@
 		 */
 		if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) {
 			if (mmc->ios.bus_width == MMC_BUS_WIDTH_8)
-				sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128),
-					     SDHCI_BLOCK_SIZE);
+				sdhci_set_blk_size_reg(host, 128, 7);
 			else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
-				sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
-					     SDHCI_BLOCK_SIZE);
+				sdhci_set_blk_size_reg(host, 64, 7);
 		} else {
-			sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
-				     SDHCI_BLOCK_SIZE);
+			sdhci_set_blk_size_reg(host, 64, 7);
 		}
 
 		/*
@@ -2132,6 +2612,9 @@
 	if (host->version < SDHCI_SPEC_300)
 		return;
 
+	if (host->quirks2 & SDHCI_QUIRK2_BROKEN_PRESET_VALUE)
+		return;
+
 	/*
 	 * We only enable or disable Preset Value if they are not already
 	 * enabled or disabled respectively. Otherwise, we bail out.
@@ -2169,6 +2652,8 @@
 					 DMA_TO_DEVICE : DMA_FROM_DEVICE);
 		data->host_cookie = COOKIE_UNMAPPED;
 	}
+	if (host->ops->post_req)
+		host->ops->post_req(host, mrq);
 }
 
 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
@@ -2205,6 +2690,9 @@
 
 	if (host->flags & SDHCI_REQ_USE_DMA)
 		sdhci_pre_dma_transfer(host, mrq->data);
+
+	if (host->ops->pre_req)
+		host->ops->pre_req(host, mrq);
 }
 
 static void sdhci_card_event(struct mmc_host *mmc)
@@ -2238,7 +2726,29 @@
 	spin_unlock_irqrestore(&host->lock, flags);
 }
 
+static int sdhci_late_init(struct mmc_host *mmc)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+
+	if (host->ops->init)
+		host->ops->init(host);
+
+	return 0;
+}
+
+static void sdhci_force_err_irq(struct mmc_host *mmc, u64 errmask)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+	u16 mask = errmask & 0xFFFF;
+
+	pr_err("%s: Force raise error mask:0x%04x\n", __func__, mask);
+	sdhci_runtime_pm_get(host);
+	sdhci_writew(host, mask, SDHCI_SET_INT_ERROR);
+	sdhci_runtime_pm_put(host);
+}
+
 static const struct mmc_host_ops sdhci_ops = {
+	.init           = sdhci_late_init,
 	.request	= sdhci_request,
 	.post_req	= sdhci_post_req,
 	.pre_req	= sdhci_pre_req,
@@ -2250,9 +2760,15 @@
 	.start_signal_voltage_switch	= sdhci_start_signal_voltage_switch,
 	.prepare_hs400_tuning		= sdhci_prepare_hs400_tuning,
 	.execute_tuning			= sdhci_execute_tuning,
+	.enhanced_strobe		= sdhci_enhanced_strobe,
 	.select_drive_strength		= sdhci_select_drive_strength,
 	.card_event			= sdhci_card_event,
 	.card_busy	= sdhci_card_busy,
+	.enable		= sdhci_enable,
+	.disable	= sdhci_disable,
+	.notify_load	= sdhci_notify_load,
+	.notify_halt	= sdhci_notify_halt,
+	.force_err_irq	= sdhci_force_err_irq,
 };
 
 /*****************************************************************************\
@@ -2304,19 +2820,25 @@
 		   controllers do not like that. */
 		sdhci_do_reset(host, SDHCI_RESET_CMD);
 		sdhci_do_reset(host, SDHCI_RESET_DATA);
+	} else {
+		if (host->quirks2 & SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT)
+			sdhci_reset(host, SDHCI_RESET_DATA);
 	}
 
 	host->mrq = NULL;
 	host->cmd = NULL;
 	host->data = NULL;
+	host->auto_cmd_err_sts = 0;
 
 #ifndef SDHCI_USE_LEDS_CLASS
+	if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_LED_CONTROL))
 	sdhci_deactivate_led(host);
 #endif
 
 	mmiowb();
 	spin_unlock_irqrestore(&host->lock, flags);
 
+	sdhci_crypto_cfg_end(host, mrq);
 	mmc_request_done(host->mmc, mrq);
 	sdhci_runtime_pm_put(host);
 }
@@ -2333,9 +2855,15 @@
 	if (host->mrq) {
 		pr_err("%s: Timeout waiting for hardware "
 			"interrupt.\n", mmc_hostname(host->mmc));
+		MMC_TRACE(host->mmc, "Timeout waiting for h/w interrupt\n");
 		sdhci_dumpregs(host);
 
 		if (host->data) {
+			pr_info("%s: bytes to transfer: %d transferred: %d\n",
+				mmc_hostname(host->mmc),
+				(host->data->blksz * host->data->blocks),
+				(sdhci_readw(host, SDHCI_BLOCK_SIZE) & 0xFFF) *
+				sdhci_readw(host, SDHCI_BLOCK_COUNT));
 			host->data->error = -ETIMEDOUT;
 			sdhci_finish_data(host);
 		} else {
@@ -2360,23 +2888,63 @@
 
 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
 {
+	u16 auto_cmd_status;
 	BUG_ON(intmask == 0);
 
 	if (!host->cmd) {
 		pr_err("%s: Got command interrupt 0x%08x even "
 			"though no command operation was in progress.\n",
 			mmc_hostname(host->mmc), (unsigned)intmask);
+		MMC_TRACE(host->mmc,
+		"Got command interrupt 0x%08x even though no command operation was in progress.\n",
+		(unsigned)intmask);
 		sdhci_dumpregs(host);
 		return;
 	}
 
+	trace_mmc_cmd_rw_end(host->cmd->opcode, intmask,
+				sdhci_readl(host, SDHCI_RESPONSE));
+
 	if (intmask & SDHCI_INT_TIMEOUT)
 		host->cmd->error = -ETIMEDOUT;
 	else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
 			SDHCI_INT_INDEX))
 		host->cmd->error = -EILSEQ;
 
+	if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
+		auto_cmd_status = host->auto_cmd_err_sts;
+		pr_err_ratelimited("%s: %s: AUTO CMD err sts 0x%08x\n",
+			mmc_hostname(host->mmc), __func__, auto_cmd_status);
+		if (auto_cmd_status & (SDHCI_AUTO_CMD12_NOT_EXEC |
+				       SDHCI_AUTO_CMD_INDEX_ERR |
+				       SDHCI_AUTO_CMD_ENDBIT_ERR))
+			host->cmd->error = -EIO;
+		else if (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT_ERR)
+			host->cmd->error = -ETIMEDOUT;
+		else if (auto_cmd_status & SDHCI_AUTO_CMD_CRC_ERR)
+			host->cmd->error = -EILSEQ;
+	}
+
 	if (host->cmd->error) {
+		/*
+		 * If this command initiates a data phase and a response
+		 * CRC error is signalled, the card can start transferring
+		 * data - the card may have received the command without
+		 * error.  We must not terminate the mmc_request early.
+		 *
+		 * If the card did not receive the command or returned an
+		 * error which prevented it sending data, the data phase
+		 * will time out.
+		 *
+		 * Even in case of cmd INDEX OR ENDBIT error we
+		 * handle it the same way.
+		 */
+		if (host->cmd->data &&
+		    (((intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
+		     SDHCI_INT_CRC) || (host->cmd->error == -EILSEQ))) {
+			host->cmd = NULL;
+			return;
+		}
 		tasklet_schedule(&host->finish_tasklet);
 		return;
 	}
@@ -2426,13 +2994,13 @@
 		struct sdhci_adma2_64_desc *dma_desc = desc;
 
 		if (host->flags & SDHCI_USE_64_BIT_DMA)
-			DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
+			DBG("%s: %pK: DMA 0x%08x%08x, LEN 0x%04x,Attr=0x%02x\n",
 			    name, desc, le32_to_cpu(dma_desc->addr_hi),
 			    le32_to_cpu(dma_desc->addr_lo),
 			    le16_to_cpu(dma_desc->len),
 			    le16_to_cpu(dma_desc->cmd));
 		else
-			DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
+			DBG("%s: %pK: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
 			    name, desc, le32_to_cpu(dma_desc->addr_lo),
 			    le16_to_cpu(dma_desc->len),
 			    le16_to_cpu(dma_desc->cmd));
@@ -2450,13 +3018,17 @@
 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
 {
 	u32 command;
+	bool pr_msg = false;
 	BUG_ON(intmask == 0);
 
+	command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
+	trace_mmc_data_rw_end(command, intmask);
+
 	/* CMD19 generates _only_ Buffer Read Ready interrupt */
 	if (intmask & SDHCI_INT_DATA_AVAIL) {
-		command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
-		if (command == MMC_SEND_TUNING_BLOCK ||
-		    command == MMC_SEND_TUNING_BLOCK_HS200) {
+		if (!(host->quirks2 & SDHCI_QUIRK2_NON_STANDARD_TUNING) &&
+			(command == MMC_SEND_TUNING_BLOCK ||
+			command == MMC_SEND_TUNING_BLOCK_HS200)) {
 			host->tuning_done = 1;
 			wake_up(&host->buf_ready_int);
 			return;
@@ -2470,11 +3042,6 @@
 		 * above in sdhci_cmd_irq().
 		 */
 		if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
-			if (intmask & SDHCI_INT_DATA_TIMEOUT) {
-				host->cmd->error = -ETIMEDOUT;
-				tasklet_schedule(&host->finish_tasklet);
-				return;
-			}
 			if (intmask & SDHCI_INT_DATA_END) {
 				/*
 				 * Some cards handle busy-end interrupt
@@ -2487,11 +3054,29 @@
 					host->busy_handle = 1;
 				return;
 			}
+			if (host->quirks2 &
+				SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD) {
+				pr_err_ratelimited("%s: %s: ignoring interrupt: 0x%08x due to DATATOUT_FOR_R1B quirk\n",
+						mmc_hostname(host->mmc),
+						__func__, intmask);
+				MMC_TRACE(host->mmc,
+					"%s: Quirk ignoring intr: 0x%08x\n",
+						__func__, intmask);
+				return;
+			}
+			if (intmask & SDHCI_INT_DATA_TIMEOUT) {
+				host->cmd->error = -ETIMEDOUT;
+				tasklet_schedule(&host->finish_tasklet);
+				return;
+			}
 		}
 
 		pr_err("%s: Got data interrupt 0x%08x even "
 			"though no data operation was in progress.\n",
 			mmc_hostname(host->mmc), (unsigned)intmask);
+		MMC_TRACE(host->mmc,
+		"Got data interrupt 0x%08x even though no data operation was in progress.\n",
+		(unsigned)intmask);
 		sdhci_dumpregs(host);
 
 		return;
@@ -2502,8 +3087,7 @@
 	else if (intmask & SDHCI_INT_DATA_END_BIT)
 		host->data->error = -EILSEQ;
 	else if ((intmask & SDHCI_INT_DATA_CRC) &&
-		SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
-			!= MMC_BUS_TEST_R)
+		(command != MMC_BUS_TEST_R))
 		host->data->error = -EILSEQ;
 	else if (intmask & SDHCI_INT_ADMA_ERROR) {
 		pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
@@ -2512,10 +3096,34 @@
 		if (host->ops->adma_workaround)
 			host->ops->adma_workaround(host, intmask);
 	}
+	if (host->data->error) {
+		if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT
+					| SDHCI_INT_DATA_END_BIT)) {
+			command = SDHCI_GET_CMD(sdhci_readw(host,
+							    SDHCI_COMMAND));
+			if ((command != MMC_SEND_TUNING_BLOCK_HS200) &&
+			    (command != MMC_SEND_TUNING_BLOCK))
+				pr_msg = true;
+		} else {
+			pr_msg = true;
+		}
+		if (pr_msg && __ratelimit(&host->dbg_dump_rs)) {
+			pr_err("%s: data txfr (0x%08x) error: %d after %lld ms\n",
+			       mmc_hostname(host->mmc), intmask,
+			       host->data->error, ktime_to_ms(ktime_sub(
+			       ktime_get(), host->data_start_time)));
+			MMC_TRACE(host->mmc,
+				"data txfr (0x%08x) error: %d after %lld ms\n",
+				intmask, host->data->error,
+				ktime_to_ms(ktime_sub(ktime_get(),
+				host->data_start_time)));
 
-	if (host->data->error)
+			if (!host->mmc->sdr104_wa ||
+			    (host->mmc->ios.timing != MMC_TIMING_UHS_SDR104))
+				sdhci_dumpregs(host);
+		}
 		sdhci_finish_data(host);
-	else {
+	} else {
 		if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
 			sdhci_transfer_pio(host);
 
@@ -2561,6 +3169,58 @@
 	}
 }
 
+#ifdef CONFIG_MMC_CQ_HCI
+static int sdhci_get_cmd_err(u32 intmask)
+{
+	if (intmask & SDHCI_INT_TIMEOUT)
+		return -ETIMEDOUT;
+	else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
+			    SDHCI_INT_INDEX))
+		return -EILSEQ;
+	return 0;
+}
+
+static int sdhci_get_data_err(u32 intmask)
+{
+	if (intmask & SDHCI_INT_DATA_TIMEOUT)
+		return -ETIMEDOUT;
+	else if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
+		return -EILSEQ;
+	else if (intmask & SDHCI_INT_ADMA_ERROR)
+		return -EIO;
+	return 0;
+}
+
+static irqreturn_t sdhci_cmdq_irq(struct sdhci_host *host, u32 intmask)
+{
+	int err = 0;
+	u32 mask = 0;
+	irqreturn_t ret;
+
+	if (intmask & SDHCI_INT_CMD_MASK)
+		err = sdhci_get_cmd_err(intmask);
+	else if (intmask & SDHCI_INT_DATA_MASK)
+		err = sdhci_get_data_err(intmask);
+
+	ret = cmdq_irq(host->mmc, err);
+	if (err) {
+		/* Clear the error interrupts */
+		mask = intmask & SDHCI_INT_ERROR_MASK;
+		sdhci_writel(host, mask, SDHCI_INT_STATUS);
+	}
+	return ret;
+
+}
+
+#else
+static irqreturn_t sdhci_cmdq_irq(struct sdhci_host *host, u32 intmask)
+{
+	pr_err("%s: Received cmdq-irq when disabled !!!!\n",
+		mmc_hostname(host->mmc));
+	return IRQ_NONE;
+}
+#endif
+
 static irqreturn_t sdhci_irq(int irq, void *dev_id)
 {
 	irqreturn_t result = IRQ_NONE;
@@ -2575,6 +3235,31 @@
 		return IRQ_NONE;
 	}
 
+	if (!host->clock && host->mmc->card &&
+			mmc_card_sdio(host->mmc->card)) {
+		if (!mmc_card_and_host_support_async_int(host->mmc)) {
+			spin_unlock(&host->lock);
+			return IRQ_NONE;
+		}
+		/*
+		 * async card interrupt is level sensitive and received
+		 * when clocks are off.
+		 * If sdio card has asserted async interrupt, in that
+		 * case we need to disable host->irq.
+		 * Later we can disable card interrupt and re-enable
+		 * host->irq.
+		 */
+
+		pr_debug("%s: %s: sdio_async intr. received\n",
+				mmc_hostname(host->mmc), __func__);
+		sdhci_cfg_irq(host, false, false);
+		host->sdio_irq_async_status = true;
+		host->thread_isr |= SDHCI_INT_CARD_INT;
+		result = IRQ_WAKE_THREAD;
+		spin_unlock(&host->lock);
+		return result;
+	}
+
 	intmask = sdhci_readl(host, SDHCI_INT_STATUS);
 	if (!intmask || intmask == 0xffffffff) {
 		result = IRQ_NONE;
@@ -2582,6 +3267,22 @@
 	}
 
 	do {
+		if (host->mmc->card && mmc_card_cmdq(host->mmc->card) &&
+		!mmc_host_halt(host->mmc) && !mmc_host_cq_disable(host->mmc)) {
+			pr_debug("*** %s: cmdq intr: 0x%08x\n",
+					mmc_hostname(host->mmc),
+					intmask);
+			result = sdhci_cmdq_irq(host, intmask);
+			if (result == IRQ_HANDLED)
+				goto out;
+		}
+
+		MMC_TRACE(host->mmc,
+			"%s: intmask: 0x%x\n", __func__, intmask);
+
+		if (intmask & SDHCI_INT_AUTO_CMD_ERR)
+			host->auto_cmd_err_sts = sdhci_readw(host,
+					SDHCI_AUTO_CMD_ERR);
 		/* Clear selected interrupts. */
 		mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
 				  SDHCI_INT_BUS_POWER);
@@ -2620,12 +3321,20 @@
 			result = IRQ_WAKE_THREAD;
 		}
 
-		if (intmask & SDHCI_INT_CMD_MASK)
+		if (intmask & SDHCI_INT_CMD_MASK) {
+			if ((host->quirks2 & SDHCI_QUIRK2_SLOW_INT_CLR) &&
+				(host->clock <= 400000))
+				udelay(40);
 			sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK,
 				      &intmask);
+		}
 
-		if (intmask & SDHCI_INT_DATA_MASK)
+		if (intmask & SDHCI_INT_DATA_MASK) {
+			if ((host->quirks2 & SDHCI_QUIRK2_SLOW_INT_CLR) &&
+			    (host->clock <= 400000))
+				udelay(40);
 			sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
+		}
 
 		if (intmask & SDHCI_INT_BUS_POWER)
 			pr_err("%s: Card is consuming too much power!\n",
@@ -2659,6 +3368,8 @@
 	if (unexpected) {
 		pr_err("%s: Unexpected interrupt 0x%08x.\n",
 			   mmc_hostname(host->mmc), unexpected);
+		MMC_TRACE(host->mmc, "Unexpected interrupt 0x%08x.\n",
+				unexpected);
 		sdhci_dumpregs(host);
 	}
 
@@ -2685,8 +3396,11 @@
 		sdio_run_irqs(host->mmc);
 
 		spin_lock_irqsave(&host->lock, flags);
-		if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
+		if (host->flags & SDHCI_SDIO_IRQ_ENABLED) {
+			if (host->sdio_irq_async_status)
+				host->sdio_irq_async_status = false;
 			sdhci_enable_sdio_irq_nolock(host, true);
+		}
 		spin_unlock_irqrestore(&host->lock, flags);
 	}
 
@@ -2903,11 +3617,255 @@
 	host->mmc_host_ops = sdhci_ops;
 	mmc->ops = &host->mmc_host_ops;
 
+	spin_lock_init(&host->lock);
+	ratelimit_state_init(&host->dbg_dump_rs, SDHCI_DBG_DUMP_RS_INTERVAL,
+			SDHCI_DBG_DUMP_RS_BURST);
+
 	return host;
 }
 
 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
 
+#ifdef CONFIG_MMC_CQ_HCI
+static void sdhci_cmdq_set_transfer_params(struct mmc_host *mmc)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+	u8 ctrl;
+
+	if (host->version >= SDHCI_SPEC_200) {
+		ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+		ctrl &= ~SDHCI_CTRL_DMA_MASK;
+		if (host->flags & SDHCI_USE_64_BIT_DMA)
+			ctrl |= SDHCI_CTRL_ADMA64;
+		else
+			ctrl |= SDHCI_CTRL_ADMA32;
+		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+	}
+	if (host->ops->toggle_cdr && !host->cdr_support)
+		host->ops->toggle_cdr(host, false);
+}
+
+static void sdhci_cmdq_clear_set_irqs(struct mmc_host *mmc, bool clear)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+	u32 ier = 0;
+
+	ier &= ~SDHCI_INT_ALL_MASK;
+
+	if (clear) {
+		ier = SDHCI_INT_CMDQ_EN | SDHCI_INT_ERROR_MASK;
+		sdhci_writel(host, ier, SDHCI_INT_ENABLE);
+		sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
+	} else {
+		ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
+			     SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
+			     SDHCI_INT_INDEX | SDHCI_INT_END_BIT |
+			     SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
+			     SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE |
+			     SDHCI_INT_AUTO_CMD_ERR;
+		sdhci_writel(host, ier, SDHCI_INT_ENABLE);
+		sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
+	}
+}
+
+static void sdhci_cmdq_set_data_timeout(struct mmc_host *mmc, u32 val)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+
+	sdhci_writeb(host, val, SDHCI_TIMEOUT_CONTROL);
+}
+
+static void sdhci_cmdq_dump_vendor_regs(struct mmc_host *mmc)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+
+	sdhci_dumpregs(host);
+}
+
+static int sdhci_cmdq_init(struct sdhci_host *host, struct mmc_host *mmc,
+			   bool dma64)
+{
+	return cmdq_init(host->cq_host, mmc, dma64);
+}
+
+static void sdhci_cmdq_set_block_size(struct mmc_host *mmc)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+
+	sdhci_set_blk_size_reg(host, 512, 0);
+}
+
+static void sdhci_enhanced_strobe_mask(struct mmc_host *mmc, bool set)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+
+	if (host->ops->enhanced_strobe_mask)
+		host->ops->enhanced_strobe_mask(host, set);
+}
+
+static void sdhci_cmdq_clear_set_dumpregs(struct mmc_host *mmc, bool set)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+
+	if (host->ops->clear_set_dumpregs)
+		host->ops->clear_set_dumpregs(host, set);
+}
+static int sdhci_cmdq_crypto_cfg(struct mmc_host *mmc,
+		struct mmc_request *mrq, u32 slot, u64 *ice_ctx)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+	int err = 0;
+
+	if (!host->is_crypto_en)
+		return 0;
+
+	if (host->crypto_reset_reqd && host->ops->crypto_engine_reset) {
+		err = host->ops->crypto_engine_reset(host);
+		if (err) {
+			pr_err("%s: crypto reset failed\n",
+					mmc_hostname(host->mmc));
+			goto out;
+		}
+		host->crypto_reset_reqd = false;
+	}
+
+	if (host->ops->crypto_engine_cmdq_cfg) {
+		err = host->ops->crypto_engine_cmdq_cfg(host, mrq,
+				slot, ice_ctx);
+		if (err) {
+			pr_err("%s: failed to configure crypto\n",
+					mmc_hostname(host->mmc));
+			goto out;
+		}
+	}
+out:
+	return err;
+}
+
+static int sdhci_cmdq_crypto_cfg_end(struct mmc_host *mmc,
+					struct mmc_request *mrq)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+
+	if (!host->is_crypto_en)
+		return 0;
+
+	return sdhci_crypto_cfg_end(host, mrq);
+}
+
+static void sdhci_cmdq_crypto_cfg_reset(struct mmc_host *mmc, unsigned int slot)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+
+	if (!host->is_crypto_en)
+		return;
+
+	if (host->ops->crypto_cfg_reset)
+		host->ops->crypto_cfg_reset(host, slot);
+}
+
+static void sdhci_cmdq_post_cqe_halt(struct mmc_host *mmc)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+
+	sdhci_writel(host, sdhci_readl(host, SDHCI_INT_ENABLE) |
+			SDHCI_INT_RESPONSE, SDHCI_INT_ENABLE);
+	sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS);
+}
+#else
+static void sdhci_cmdq_set_transfer_params(struct mmc_host *mmc)
+{
+
+}
+static void sdhci_cmdq_clear_set_irqs(struct mmc_host *mmc, bool clear)
+{
+
+}
+
+static void sdhci_cmdq_set_data_timeout(struct mmc_host *mmc, u32 val)
+{
+
+}
+
+static void sdhci_cmdq_dump_vendor_regs(struct mmc_host *mmc)
+{
+
+}
+
+static int sdhci_cmdq_init(struct sdhci_host *host, struct mmc_host *mmc,
+			   bool dma64)
+{
+	return -ENOSYS;
+}
+
+static void sdhci_cmdq_set_block_size(struct mmc_host *mmc)
+{
+
+}
+
+static void sdhci_enhanced_strobe_mask(struct mmc_host *mmc, bool set)
+{
+
+}
+
+static void sdhci_cmdq_clear_set_dumpregs(struct mmc_host *mmc, bool set)
+{
+
+}
+static int sdhci_cmdq_crypto_cfg(struct mmc_host *mmc,
+		struct mmc_request *mrq, u32 slot, u64 *ice_ctx)
+{
+	return 0;
+}
+
+static int sdhci_cmdq_crypto_cfg_end(struct mmc_host *mmc,
+				struct mmc_request *mrq)
+{
+	return 0;
+}
+
+static void sdhci_cmdq_crypto_cfg_reset(struct mmc_host *mmc, unsigned int slot)
+{
+
+}
+static void sdhci_cmdq_post_cqe_halt(struct mmc_host *mmc)
+{
+}
+#endif
+
+static const struct cmdq_host_ops sdhci_cmdq_ops = {
+	.clear_set_irqs = sdhci_cmdq_clear_set_irqs,
+	.set_data_timeout = sdhci_cmdq_set_data_timeout,
+	.dump_vendor_regs = sdhci_cmdq_dump_vendor_regs,
+	.set_block_size = sdhci_cmdq_set_block_size,
+	.clear_set_dumpregs = sdhci_cmdq_clear_set_dumpregs,
+	.enhanced_strobe_mask = sdhci_enhanced_strobe_mask,
+	.crypto_cfg	= sdhci_cmdq_crypto_cfg,
+	.crypto_cfg_end	= sdhci_cmdq_crypto_cfg_end,
+	.crypto_cfg_reset	= sdhci_cmdq_crypto_cfg_reset,
+	.post_cqe_halt = sdhci_cmdq_post_cqe_halt,
+	.set_transfer_params = sdhci_cmdq_set_transfer_params,
+};
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+static int sdhci_is_adma2_64bit(struct sdhci_host *host)
+{
+	u32 caps;
+
+	caps = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
+		sdhci_readl(host, SDHCI_CAPABILITIES);
+
+	if (caps & SDHCI_CAN_64BIT)
+		return 1;
+	return 0;
+}
+#else
+static int sdhci_is_adma2_64bit(struct sdhci_host *host)
+{
+	return 0;
+}
+#endif
+
 int sdhci_add_host(struct sdhci_host *host)
 {
 	struct mmc_host *mmc;
@@ -2980,7 +3938,7 @@
 	 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
 	 * implement.
 	 */
-	if (sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT)
+	if (sdhci_is_adma2_64bit(host))
 		host->flags |= SDHCI_USE_64_BIT_DMA;
 
 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
@@ -3135,6 +4093,9 @@
 	mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
 	mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
 
+	if (caps[0] & SDHCI_CAN_ASYNC_INT)
+		mmc->caps2 |= MMC_CAP2_ASYNC_SDIO_IRQ_4BIT_MODE;
+
 	if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
 		host->flags |= SDHCI_AUTO_CMD12;
 
@@ -3167,7 +4128,8 @@
 
 	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
 	    !(mmc->caps & MMC_CAP_NONREMOVABLE) &&
-	    IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)))
+	    (IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)) &&
+	    !(mmc->caps2 & MMC_CAP2_NONHOTPLUG)))
 		mmc->caps |= MMC_CAP_NEEDS_POLL;
 
 	/* If there are external regulators, get them */
@@ -3264,10 +4226,15 @@
 	 * value.
 	 */
 	max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
-	if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
-		int curr = regulator_get_current_limit(mmc->supply.vmmc);
-		if (curr > 0) {
+	if (!max_current_caps) {
+		u32 curr = 0;
+
+		if (!IS_ERR(mmc->supply.vmmc))
+			curr = regulator_get_current_limit(mmc->supply.vmmc);
+		else if (host->ops->get_current_limit)
+			curr = host->ops->get_current_limit(host);
 
+		if (curr > 0) {
 			/* convert to SDHCI_MAX_CURRENT format */
 			curr = curr/1000;  /* convert to mA */
 			curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
@@ -3332,8 +4299,6 @@
 		return -ENODEV;
 	}
 
-	spin_lock_init(&host->lock);
-
 	/*
 	 * Maximum number of segments. Depends on if the hardware
 	 * can do scatter/gather or not.
@@ -3399,6 +4364,8 @@
 
 	init_waitqueue_head(&host->buf_ready_int);
 
+	host->flags |= SDHCI_HOST_IRQ_STATUS;
+
 	sdhci_init(host, 0);
 
 	ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
@@ -3414,6 +4381,7 @@
 #endif
 
 #ifdef SDHCI_USE_LEDS_CLASS
+	if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_LED_CONTROL)) {
 	snprintf(host->led_name, sizeof(host->led_name),
 		"%s::", mmc_hostname(mmc));
 	host->led.name = host->led_name;
@@ -3427,20 +4395,40 @@
 		       mmc_hostname(mmc), ret);
 		goto reset;
 	}
+	}
 #endif
 
 	mmiowb();
 
-	mmc_add_host(mmc);
+	if (host->quirks2 & SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR) {
+		host->ier = (host->ier & ~SDHCI_INT_DATA_END_BIT);
+		sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+	}
+
+	if (mmc->caps2 &  MMC_CAP2_CMD_QUEUE) {
+		bool dma64 = (host->flags & SDHCI_USE_64_BIT_DMA) ?
+			true : false;
+		ret = sdhci_cmdq_init(host, mmc, dma64);
+		if (ret)
+			pr_err("%s: CMDQ init: failed (%d)\n",
+			       mmc_hostname(host->mmc), ret);
+		else
+			host->cq_host->ops = &sdhci_cmdq_ops;
+	}
 
-	pr_info("%s: SDHCI controller on %s [%s] using %s\n",
+	pr_info("%s: SDHCI controller on %s [%s] using %s in %s mode\n",
 		mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
 		(host->flags & SDHCI_USE_ADMA) ?
-		(host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
-		(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
+		((host->flags & SDHCI_USE_64_BIT_DMA) ?
+		"64-bit ADMA" : "32-bit ADMA") :
+		((host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"),
+		((mmc->caps2 &  MMC_CAP2_CMD_QUEUE) && !ret) ?
+		"CMDQ" : "legacy");
 
 	sdhci_enable_card_detection(host);
 
+	mmc_add_host(mmc);
 	return 0;
 
 #ifdef SDHCI_USE_LEDS_CLASS
@@ -3481,9 +4469,10 @@
 
 	sdhci_disable_card_detection(host);
 
-	mmc_remove_host(mmc);
+	mmc_remove_host(host->mmc);
 
 #ifdef SDHCI_USE_LEDS_CLASS
+	if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_LED_CONTROL))
 	led_classdev_unregister(&host->led);
 #endif
 
diff -ruw linux-4.4.115/drivers/mmc/host/sdhci.h linux-4.4.115-fbx/drivers/mmc/host/sdhci.h
--- linux-4.4.115/drivers/mmc/host/sdhci.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mmc/host/sdhci.h	2019-10-29 09:26:24.077207425 +0100
@@ -17,7 +17,7 @@
 #include <linux/compiler.h>
 #include <linux/types.h>
 #include <linux/io.h>
-
+#include <linux/ratelimit.h>
 #include <linux/mmc/host.h>
 
 /*
@@ -137,22 +137,32 @@
 #define  SDHCI_INT_DATA_CRC	0x00200000
 #define  SDHCI_INT_DATA_END_BIT	0x00400000
 #define  SDHCI_INT_BUS_POWER	0x00800000
-#define  SDHCI_INT_ACMD12ERR	0x01000000
+#define  SDHCI_INT_AUTO_CMD_ERR	0x01000000
 #define  SDHCI_INT_ADMA_ERROR	0x02000000
 
 #define  SDHCI_INT_NORMAL_MASK	0x00007FFF
 #define  SDHCI_INT_ERROR_MASK	0xFFFF8000
 
 #define  SDHCI_INT_CMD_MASK	(SDHCI_INT_RESPONSE | SDHCI_INT_TIMEOUT | \
-		SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX)
+		SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX | \
+				 SDHCI_INT_AUTO_CMD_ERR)
+
 #define  SDHCI_INT_DATA_MASK	(SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \
 		SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
 		SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
 		SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR | \
 		SDHCI_INT_BLK_GAP)
+
+#define SDHCI_INT_CMDQ_EN	(0x1 << 14)
 #define SDHCI_INT_ALL_MASK	((unsigned int)-1)
 
-#define SDHCI_ACMD12_ERR	0x3C
+#define SDHCI_AUTO_CMD_ERR		0x3C
+#define SDHCI_AUTO_CMD12_NOT_EXEC	0x0001
+#define SDHCI_AUTO_CMD_TIMEOUT_ERR	0x0002
+#define SDHCI_AUTO_CMD_CRC_ERR		0x0004
+#define SDHCI_AUTO_CMD_ENDBIT_ERR	0x0008
+#define SDHCI_AUTO_CMD_INDEX_ERR	0x0010
+#define SDHCI_AUTO_CMD12_NOT_ISSUED	0x0080
 
 #define SDHCI_HOST_CONTROL2		0x3E
 #define  SDHCI_CTRL_UHS_MASK		0x0007
@@ -170,6 +180,7 @@
 #define   SDHCI_CTRL_DRV_TYPE_D		0x0030
 #define  SDHCI_CTRL_EXEC_TUNING		0x0040
 #define  SDHCI_CTRL_TUNED_CLK		0x0080
+#define  SDHCI_CTRL_ASYNC_INT_ENABLE	0x4000
 #define  SDHCI_CTRL_PRESET_VAL_ENABLE	0x8000
 
 #define SDHCI_CAPABILITIES	0x40
@@ -190,6 +201,7 @@
 #define  SDHCI_CAN_VDD_300	0x02000000
 #define  SDHCI_CAN_VDD_180	0x04000000
 #define  SDHCI_CAN_64BIT	0x10000000
+#define  SDHCI_CAN_ASYNC_INT	0x20000000
 
 #define  SDHCI_SUPPORT_SDR50	0x00000001
 #define  SDHCI_SUPPORT_SDR104	0x00000002
@@ -320,6 +332,12 @@
 	COOKIE_GIVEN,
 };
 
+enum sdhci_power_policy {
+	SDHCI_PERFORMANCE_MODE,
+	SDHCI_POWER_SAVE_MODE,
+	SDHCI_POWER_POLICY_NUM /* Always keep this one last */
+};
+
 struct sdhci_host {
 	/* Data set by hardware interface driver */
 	const char *hw_name;	/* Hardware bus name */
@@ -423,6 +441,84 @@
  */
 #define SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST	(1<<16)
 
+/*
+ * Read Transfer Active/ Write Transfer Active may be not
+ * de-asserted after end of transaction. Issue reset for DAT line.
+ */
+#define SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT                 (1<<17)
+/*
+ * Slow interrupt clearance at 400KHz may cause
+ * host controller driver interrupt handler to
+ * be called twice.
+ */
+#define SDHCI_QUIRK2_SLOW_INT_CLR			(1<<18)
+
+/*
+ * If the base clock can be scalable, then there should be no further
+ * clock dividing as the input clock itself will be scaled down to
+ * required frequency.
+ */
+#define SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK		(1<<19)
+
+/*
+ * Ignore data timeout error for R1B commands as there will be no
+ * data associated and the busy timeout value for these commands
+ * could be lager than the maximum timeout value that controller
+ * can handle.
+ */
+#define SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD		(1<<20)
+
+/*
+ * The preset value registers are not properly initialized by
+ * some hardware and hence preset value must not be enabled for
+ * such controllers.
+ */
+#define SDHCI_QUIRK2_BROKEN_PRESET_VALUE		(1<<21)
+/*
+ * Some controllers define the usage of 0xF in data timeout counter
+ * register (0x2E) which is actually a reserved bit as per
+ * specification.
+ */
+#define SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT		(1<<22)
+/*
+ * This is applicable for controllers that advertize timeout clock
+ * value in capabilities register (bit 5-0) as just 50MHz whereas the
+ * base clock frequency is 200MHz. So, the controller internally
+ * multiplies the value in timeout control register by 4 with the
+ * assumption that driver always uses fixed timeout clock value from
+ * capabilities register to calculate the timeout. But when the driver
+ * uses SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK base clock frequency is directly
+ * controller by driver and it's rate varies upto max. 200MHz. This new quirk
+ * will be used in such cases to avoid controller mulplication when timeout is
+ * calculated based on the base clock.
+ */
+#define SDHCI_QUIRK2_DIVIDE_TOUT_BY_4 (1 << 23)
+
+/*
+ * Some SDHC controllers are unable to handle data-end bit error in
+ * 1-bit mode of SDIO.
+ */
+#define SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR             (1<<24)
+
+/* Controller has nonstandard clock management */
+#define SDHCI_QUIRK_NONSTANDARD_CLOCK			(1<<25)
+/* Use reset workaround in case sdhci reset timeouts */
+#define SDHCI_QUIRK2_USE_RESET_WORKAROUND		(1<<26)
+/* Some controllers doesn't have have any LED control */
+#define SDHCI_QUIRK2_BROKEN_LED_CONTROL			(1<<27)
+/*
+ * Some controllers doesn't follow the tuning procedure as defined in spec.
+ * The tuning data has to be compared from SW driver to validate the correct
+ * phase.
+ */
+#define SDHCI_QUIRK2_NON_STANDARD_TUNING (1 << 28)
+/*
+ * Some controllers may use PIO mode to workaround HW issues in ADMA for
+ * eMMC tuning commands.
+ */
+#define SDHCI_QUIRK2_USE_PIO_FOR_EMMC_TUNING (1 << 29)
+
+
 	int irq;		/* Device IRQ */
 	void __iomem *ioaddr;	/* Mapped address */
 
@@ -432,6 +528,7 @@
 	struct mmc_host *mmc;	/* MMC structure */
 	struct mmc_host_ops mmc_host_ops;	/* MMC host ops */
 	u64 dma_mask;		/* custom DMA mask */
+	u64 coherent_dma_mask;
 
 #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
 	struct led_classdev led;	/* LED control */
@@ -453,6 +550,7 @@
 #define SDHCI_SDR104_NEEDS_TUNING (1<<10)	/* SDR104/HS200 needs tuning */
 #define SDHCI_USE_64_BIT_DMA	(1<<12)	/* Use 64-bit DMA */
 #define SDHCI_HS400_TUNING	(1<<13)	/* Tuning for HS400 */
+#define SDHCI_HOST_IRQ_STATUS	 (1<<14) /* host->irq status */
 
 	unsigned int version;	/* SDHCI spec. version */
 
@@ -466,6 +564,7 @@
 	bool runtime_suspended;	/* Host is runtime suspended */
 	bool bus_on;		/* Bus power prevents runtime suspend */
 	bool preset_enabled;	/* Preset is enabled */
+	bool cdr_support;
 
 	struct mmc_request *mrq;	/* Current request */
 	struct mmc_command *cmd;	/* Current command */
@@ -514,6 +613,20 @@
 	unsigned int		tuning_count;	/* Timer count for re-tuning */
 	unsigned int		tuning_mode;	/* Re-tuning mode supported by host */
 #define SDHCI_TUNING_MODE_1	0
+	ktime_t data_start_time;
+
+	enum sdhci_power_policy power_policy;
+
+	bool is_crypto_en;
+	bool crypto_reset_reqd;
+	bool sdio_irq_async_status;
+
+	u32 auto_cmd_err_sts;
+	struct ratelimit_state dbg_dump_rs;
+	struct cmdq_host *cq_host;
+	int reset_wa_applied; /* reset workaround status */
+	ktime_t reset_wa_t; /* time when the reset workaround is applied */
+	int reset_wa_cnt; /* total number of times workaround is used */
 
 	unsigned long private[0] ____cacheline_aligned;
 };
@@ -543,16 +656,46 @@
 	unsigned int    (*get_ro)(struct sdhci_host *host);
 	void		(*reset)(struct sdhci_host *host, u8 mask);
 	int	(*platform_execute_tuning)(struct sdhci_host *host, u32 opcode);
+	int	(*crypto_engine_cfg)(struct sdhci_host *host,
+				struct mmc_request *mrq, u32 slot);
+	int	(*crypto_engine_cmdq_cfg)(struct sdhci_host *host,
+			struct mmc_request *mrq, u32 slot, u64 *ice_ctx);
+	int	(*crypto_engine_cfg_end)(struct sdhci_host *host,
+					struct mmc_request *mrq);
+	int	(*crypto_engine_reset)(struct sdhci_host *host);
+	void	(*crypto_cfg_reset)(struct sdhci_host *host, unsigned int slot);
 	void	(*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
 	void	(*hw_reset)(struct sdhci_host *host);
 	void    (*adma_workaround)(struct sdhci_host *host, u32 intmask);
+	unsigned int	(*get_max_segments)(void);
 	void	(*platform_init)(struct sdhci_host *host);
+#define REQ_BUS_OFF	(1 << 0)
+#define REQ_BUS_ON	(1 << 1)
+#define REQ_IO_LOW	(1 << 2)
+#define REQ_IO_HIGH	(1 << 3)
 	void    (*card_event)(struct sdhci_host *host);
+	int	(*enhanced_strobe)(struct sdhci_host *host);
+	void	(*platform_bus_voting)(struct sdhci_host *host, u32 enable);
+	void	(*check_power_status)(struct sdhci_host *host, u32 req_type);
+	int	(*config_auto_tuning_cmd)(struct sdhci_host *host,
+					  bool enable,
+					  u32 type);
+	int	(*enable_controller_clock)(struct sdhci_host *host);
+	void	(*clear_set_dumpregs)(struct sdhci_host *host, bool set);
+	void	(*enhanced_strobe_mask)(struct sdhci_host *host, bool set);
+	void	(*dump_vendor_regs)(struct sdhci_host *host);
+	void	(*toggle_cdr)(struct sdhci_host *host, bool enable);
 	void	(*voltage_switch)(struct sdhci_host *host);
 	int	(*select_drive_strength)(struct sdhci_host *host,
 					 struct mmc_card *card,
 					 unsigned int max_dtr, int host_drv,
 					 int card_drv, int *drv_type);
+	int	(*notify_load)(struct sdhci_host *host, enum mmc_load state);
+	void	(*reset_workaround)(struct sdhci_host *host, u32 enable);
+	void	(*init)(struct sdhci_host *host);
+	void	(*pre_req)(struct sdhci_host *host, struct mmc_request *req);
+	void	(*post_req)(struct sdhci_host *host, struct mmc_request *req);
+	unsigned int	(*get_current_limit)(struct sdhci_host *host);
 };
 
 #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
@@ -672,4 +815,5 @@
 extern int sdhci_runtime_resume_host(struct sdhci_host *host);
 #endif
 
+void sdhci_cfg_irq(struct sdhci_host *host, bool enable, bool sync);
 #endif /* __SDHCI_HW_H */
diff -ruw linux-4.4.115/drivers/mmc/host/sdhci-msm.c linux-4.4.115-fbx/drivers/mmc/host/sdhci-msm.c
--- linux-4.4.115/drivers/mmc/host/sdhci-msm.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mmc/host/sdhci-msm.c	2019-10-29 09:26:24.073207386 +0100
@@ -1,7 +1,8 @@
 /*
- * drivers/mmc/host/sdhci-msm.c - Qualcomm SDHCI Platform driver
+ * drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform
+ * driver source file
  *
- * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -15,97 +16,498 @@
  */
 
 #include <linux/module.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/gfp.h>
+#include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+#include <linux/io.h>
 #include <linux/delay.h>
-#include <linux/mmc/mmc.h>
+#include <linux/scatterlist.h>
 #include <linux/slab.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/msm-bus.h>
+#include <linux/pm_runtime.h>
+#include <trace/events/mmc.h>
+
+#include "sdhci-msm.h"
+#include "sdhci-msm-ice.h"
+#include "cmdq_hci.h"
 
-#include "sdhci-pltfm.h"
+#define QOS_REMOVE_DELAY_MS	10
+#define CORE_POWER		0x0
+#define CORE_SW_RST		(1 << 7)
+
+#define SDHCI_VER_100		0x2B
 
-#define CORE_MCI_VERSION		0x50
+#define CORE_VERSION_STEP_MASK		0x0000FFFF
+#define CORE_VERSION_MINOR_MASK		0x0FFF0000
+#define CORE_VERSION_MINOR_SHIFT	16
+#define CORE_VERSION_MAJOR_MASK		0xF0000000
 #define CORE_VERSION_MAJOR_SHIFT	28
-#define CORE_VERSION_MAJOR_MASK		(0xf << CORE_VERSION_MAJOR_SHIFT)
-#define CORE_VERSION_MINOR_MASK		0xff
+#define CORE_VERSION_TARGET_MASK	0x000000FF
+#define SDHCI_MSM_VER_420               0x49
+
+#define SWITCHABLE_SIGNALLING_VOL	(1 << 29)
 
 #define CORE_HC_MODE		0x78
 #define HC_MODE_EN		0x1
-#define CORE_POWER		0x0
-#define CORE_SW_RST		BIT(7)
+#define FF_CLK_SW_RST_DIS	(1 << 13)
+
+#define CORE_PWRCTL_BUS_OFF	0x01
+#define CORE_PWRCTL_BUS_ON	(1 << 1)
+#define CORE_PWRCTL_IO_LOW	(1 << 2)
+#define CORE_PWRCTL_IO_HIGH	(1 << 3)
+
+#define CORE_PWRCTL_BUS_SUCCESS	0x01
+#define CORE_PWRCTL_BUS_FAIL	(1 << 1)
+#define CORE_PWRCTL_IO_SUCCESS	(1 << 2)
+#define CORE_PWRCTL_IO_FAIL	(1 << 3)
 
+#define INT_MASK		0xF
 #define MAX_PHASES		16
-#define CORE_DLL_LOCK		BIT(7)
-#define CORE_DLL_EN		BIT(16)
-#define CORE_CDR_EN		BIT(17)
-#define CORE_CK_OUT_EN		BIT(18)
-#define CORE_CDR_EXT_EN		BIT(19)
-#define CORE_DLL_PDN		BIT(29)
-#define CORE_DLL_RST		BIT(30)
-#define CORE_DLL_CONFIG		0x100
-#define CORE_DLL_STATUS		0x108
-
-#define CORE_VENDOR_SPEC	0x10c
-#define CORE_CLK_PWRSAVE	BIT(1)
-
-#define CORE_VENDOR_SPEC_CAPABILITIES0	0x11c
-
-#define CDR_SELEXT_SHIFT	20
-#define CDR_SELEXT_MASK		(0xf << CDR_SELEXT_SHIFT)
-#define CMUX_SHIFT_PHASE_SHIFT	24
-#define CMUX_SHIFT_PHASE_MASK	(7 << CMUX_SHIFT_PHASE_SHIFT)
-
-struct sdhci_msm_host {
-	struct platform_device *pdev;
-	void __iomem *core_mem;	/* MSM SDCC mapped address */
-	struct clk *clk;	/* main SD/MMC bus clock */
-	struct clk *pclk;	/* SDHC peripheral bus clock */
-	struct clk *bus_clk;	/* SDHC bus voter clock */
-	struct mmc_host *mmc;
-	struct sdhci_pltfm_data sdhci_msm_pdata;
+
+#define CORE_CMD_DAT_TRACK_SEL	(1 << 0)
+#define CORE_DLL_EN		(1 << 16)
+#define CORE_CDR_EN		(1 << 17)
+#define CORE_CK_OUT_EN		(1 << 18)
+#define CORE_CDR_EXT_EN		(1 << 19)
+#define CORE_DLL_PDN		(1 << 29)
+#define CORE_DLL_RST		(1 << 30)
+
+#define CORE_DLL_LOCK		(1 << 7)
+#define CORE_DDR_DLL_LOCK	(1 << 11)
+
+#define CORE_CLK_PWRSAVE		(1 << 1)
+#define CORE_HC_MCLK_SEL_DFLT		(2 << 8)
+#define CORE_HC_MCLK_SEL_HS400		(3 << 8)
+#define CORE_HC_MCLK_SEL_MASK		(3 << 8)
+#define CORE_HC_AUTO_CMD21_EN		(1 << 6)
+#define CORE_IO_PAD_PWR_SWITCH_EN	(1 << 15)
+#define CORE_IO_PAD_PWR_SWITCH	(1 << 16)
+#define CORE_HC_SELECT_IN_EN	(1 << 18)
+#define CORE_HC_SELECT_IN_HS400	(6 << 19)
+#define CORE_HC_SELECT_IN_MASK	(7 << 19)
+#define CORE_VENDOR_SPEC_POR_VAL	0xA1C
+
+#define HC_SW_RST_WAIT_IDLE_DIS	(1 << 20)
+#define HC_SW_RST_REQ (1 << 21)
+#define CORE_ONE_MID_EN     (1 << 25)
+
+#define CORE_8_BIT_SUPPORT		(1 << 18)
+#define CORE_3_3V_SUPPORT		(1 << 24)
+#define CORE_3_0V_SUPPORT		(1 << 25)
+#define CORE_1_8V_SUPPORT		(1 << 26)
+#define CORE_SYS_BUS_SUPPORT_64_BIT	BIT(28)
+
+#define CORE_CSR_CDC_CTLR_CFG0		0x130
+#define CORE_SW_TRIG_FULL_CALIB		(1 << 16)
+#define CORE_HW_AUTOCAL_ENA		(1 << 17)
+
+#define CORE_CSR_CDC_CTLR_CFG1		0x134
+#define CORE_CSR_CDC_CAL_TIMER_CFG0	0x138
+#define CORE_TIMER_ENA			(1 << 16)
+
+#define CORE_CSR_CDC_CAL_TIMER_CFG1	0x13C
+#define CORE_CSR_CDC_REFCOUNT_CFG	0x140
+#define CORE_CSR_CDC_COARSE_CAL_CFG	0x144
+#define CORE_CDC_OFFSET_CFG		0x14C
+#define CORE_CSR_CDC_DELAY_CFG		0x150
+#define CORE_CDC_SLAVE_DDA_CFG		0x160
+#define CORE_CSR_CDC_STATUS0		0x164
+#define CORE_CALIBRATION_DONE		(1 << 0)
+
+#define CORE_CDC_ERROR_CODE_MASK	0x7000000
+
+#define CQ_CMD_DBG_RAM	                0x110
+#define CQ_CMD_DBG_RAM_WA               0x150
+#define CQ_CMD_DBG_RAM_OL               0x154
+
+#define CORE_CSR_CDC_GEN_CFG		0x178
+#define CORE_CDC_SWITCH_BYPASS_OFF	(1 << 0)
+#define CORE_CDC_SWITCH_RC_EN		(1 << 1)
+
+#define CORE_CDC_T4_DLY_SEL		(1 << 0)
+#define CORE_CMDIN_RCLK_EN		(1 << 1)
+#define CORE_START_CDC_TRAFFIC		(1 << 6)
+
+#define CORE_PWRSAVE_DLL	(1 << 3)
+#define CORE_FIFO_ALT_EN	(1 << 10)
+#define CORE_CMDEN_HS400_INPUT_MASK_CNT (1 << 13)
+
+#define CORE_DDR_CAL_EN		(1 << 0)
+#define CORE_FLL_CYCLE_CNT	(1 << 18)
+#define CORE_DLL_CLOCK_DISABLE	(1 << 21)
+
+#define DDR_CONFIG_POR_VAL		0x80040853
+#define DDR_CONFIG_PRG_RCLK_DLY_MASK	0x1FF
+#define DDR_CONFIG_PRG_RCLK_DLY		115
+#define DDR_CONFIG_2_POR_VAL		0x80040873
+
+/* 512 descriptors */
+#define SDHCI_MSM_MAX_SEGMENTS  (1 << 9)
+#define SDHCI_MSM_MMC_CLK_GATE_DELAY	200 /* msecs */
+
+#define CORE_FREQ_100MHZ	(100 * 1000 * 1000)
+#define TCXO_FREQ		19200000
+
+#define INVALID_TUNING_PHASE	-1
+#define sdhci_is_valid_gpio_wakeup_int(_h) ((_h)->pdata->sdiowakeup_irq >= 0)
+
+#define NUM_TUNING_PHASES		16
+#define MAX_DRV_TYPES_SUPPORTED_HS200	4
+#define MSM_AUTOSUSPEND_DELAY_MS 100
+
+struct sdhci_msm_offset {
+	u32 CORE_MCI_DATA_CNT;
+	u32 CORE_MCI_STATUS;
+	u32 CORE_MCI_FIFO_CNT;
+	u32 CORE_MCI_VERSION;
+	u32 CORE_GENERICS;
+	u32 CORE_TESTBUS_CONFIG;
+	u32 CORE_TESTBUS_SEL2_BIT;
+	u32 CORE_TESTBUS_ENA;
+	u32 CORE_TESTBUS_SEL2;
+	u32 CORE_PWRCTL_STATUS;
+	u32 CORE_PWRCTL_MASK;
+	u32 CORE_PWRCTL_CLEAR;
+	u32 CORE_PWRCTL_CTL;
+	u32 CORE_SDCC_DEBUG_REG;
+	u32 CORE_DLL_CONFIG;
+	u32 CORE_DLL_STATUS;
+	u32 CORE_VENDOR_SPEC;
+	u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR0;
+	u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR1;
+	u32 CORE_VENDOR_SPEC_FUNC2;
+	u32 CORE_VENDOR_SPEC_CAPABILITIES0;
+	u32 CORE_DDR_200_CFG;
+	u32 CORE_VENDOR_SPEC3;
+	u32 CORE_DLL_CONFIG_2;
+	u32 CORE_DDR_CONFIG;
+	u32 CORE_DDR_CONFIG_2;
 };
 
-/* Platform specific tuning */
-static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll)
+struct sdhci_msm_offset sdhci_msm_offset_mci_removed = {
+	.CORE_MCI_DATA_CNT = 0x35C,
+	.CORE_MCI_STATUS = 0x324,
+	.CORE_MCI_FIFO_CNT = 0x308,
+	.CORE_MCI_VERSION = 0x318,
+	.CORE_GENERICS = 0x320,
+	.CORE_TESTBUS_CONFIG = 0x32C,
+	.CORE_TESTBUS_SEL2_BIT = 3,
+	.CORE_TESTBUS_ENA = (1 << 31),
+	.CORE_TESTBUS_SEL2 = (1 << 3),
+	.CORE_PWRCTL_STATUS = 0x240,
+	.CORE_PWRCTL_MASK = 0x244,
+	.CORE_PWRCTL_CLEAR = 0x248,
+	.CORE_PWRCTL_CTL = 0x24C,
+	.CORE_SDCC_DEBUG_REG = 0x358,
+	.CORE_DLL_CONFIG = 0x200,
+	.CORE_DLL_STATUS = 0x208,
+	.CORE_VENDOR_SPEC = 0x20C,
+	.CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x214,
+	.CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x218,
+	.CORE_VENDOR_SPEC_FUNC2 = 0x210,
+	.CORE_VENDOR_SPEC_CAPABILITIES0 = 0x21C,
+	.CORE_DDR_200_CFG = 0x224,
+	.CORE_VENDOR_SPEC3 = 0x250,
+	.CORE_DLL_CONFIG_2 = 0x254,
+	.CORE_DDR_CONFIG = 0x258,
+	.CORE_DDR_CONFIG_2 = 0x25C,
+};
+
+struct sdhci_msm_offset sdhci_msm_offset_mci_present = {
+	.CORE_MCI_DATA_CNT = 0x30,
+	.CORE_MCI_STATUS = 0x34,
+	.CORE_MCI_FIFO_CNT = 0x44,
+	.CORE_MCI_VERSION = 0x050,
+	.CORE_GENERICS = 0x70,
+	.CORE_TESTBUS_CONFIG = 0x0CC,
+	.CORE_TESTBUS_SEL2_BIT = 4,
+	.CORE_TESTBUS_ENA = (1 << 3),
+	.CORE_TESTBUS_SEL2 = (1 << 4),
+	.CORE_PWRCTL_STATUS = 0xDC,
+	.CORE_PWRCTL_MASK = 0xE0,
+	.CORE_PWRCTL_CLEAR = 0xE4,
+	.CORE_PWRCTL_CTL = 0xE8,
+	.CORE_SDCC_DEBUG_REG = 0x124,
+	.CORE_DLL_CONFIG = 0x100,
+	.CORE_DLL_STATUS = 0x108,
+	.CORE_VENDOR_SPEC = 0x10C,
+	.CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x114,
+	.CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x118,
+	.CORE_VENDOR_SPEC_FUNC2 = 0x110,
+	.CORE_VENDOR_SPEC_CAPABILITIES0 = 0x11C,
+	.CORE_DDR_200_CFG = 0x184,
+	.CORE_VENDOR_SPEC3 = 0x1B0,
+	.CORE_DLL_CONFIG_2 = 0x1B4,
+	.CORE_DDR_CONFIG = 0x1B8,
+	.CORE_DDR_CONFIG_2 = 0x1BC,
+};
+
+u8 sdhci_msm_readb_relaxed(struct sdhci_host *host, u32 offset)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	void __iomem *base_addr;
+
+	if (msm_host->mci_removed)
+		base_addr = host->ioaddr;
+	else
+		base_addr = msm_host->core_mem;
+
+	return readb_relaxed(base_addr + offset);
+}
+
+u32 sdhci_msm_readl_relaxed(struct sdhci_host *host, u32 offset)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	void __iomem *base_addr;
+
+	if (msm_host->mci_removed)
+		base_addr = host->ioaddr;
+	else
+		base_addr = msm_host->core_mem;
+
+	return readl_relaxed(base_addr + offset);
+}
+
+void sdhci_msm_writeb_relaxed(u8 val, struct sdhci_host *host, u32 offset)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	void __iomem *base_addr;
+
+	if (msm_host->mci_removed)
+		base_addr = host->ioaddr;
+	else
+		base_addr = msm_host->core_mem;
+
+	writeb_relaxed(val, base_addr + offset);
+}
+
+void sdhci_msm_writel_relaxed(u32 val, struct sdhci_host *host, u32 offset)
 {
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	void __iomem *base_addr;
+
+	if (msm_host->mci_removed)
+		base_addr = host->ioaddr;
+	else
+		base_addr = msm_host->core_mem;
+
+	writel_relaxed(val, base_addr + offset);
+}
+
+/* Timeout value to avoid infinite waiting for pwr_irq */
+#define MSM_PWR_IRQ_TIMEOUT_MS 5000
+
+static const u32 tuning_block_64[] = {
+	0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
+	0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
+	0xF0FFF0FF, 0x3CCCFC0F, 0xCFCC33CC, 0xEEFFEFFF,
+	0xFDFFFDFF, 0xFFBFFFDF, 0xFFF7FFBB, 0xDE7B7FF7
+};
+
+static const u32 tuning_block_128[] = {
+	0xFF00FFFF, 0x0000FFFF, 0xCCCCFFFF, 0xCCCC33CC,
+	0xCC3333CC, 0xFFFFCCCC, 0xFFFFEEFF, 0xFFEEEEFF,
+	0xFFDDFFFF, 0xDDDDFFFF, 0xBBFFFFFF, 0xBBFFFFFF,
+	0xFFFFFFBB, 0xFFFFFF77, 0x77FF7777, 0xFFEEDDBB,
+	0x00FFFFFF, 0x00FFFFFF, 0xCCFFFF00, 0xCC33CCCC,
+	0x3333CCCC, 0xFFCCCCCC, 0xFFEEFFFF, 0xEEEEFFFF,
+	0xDDFFFFFF, 0xDDFFFFFF, 0xFFFFFFDD, 0xFFFFFFBB,
+	0xFFFFBBBB, 0xFFFF77FF, 0xFF7777FF, 0xEEDDBB77
+};
+
+/* global to hold each slot instance for debug */
+static struct sdhci_msm_host *sdhci_slot[2];
+
+static int disable_slots;
+/* root can write, others read */
+module_param(disable_slots, int, S_IRUGO|S_IWUSR);
+
+static bool nocmdq;
+module_param(nocmdq, bool, S_IRUGO|S_IWUSR);
+
+enum vdd_io_level {
+	/* set vdd_io_data->low_vol_level */
+	VDD_IO_LOW,
+	/* set vdd_io_data->high_vol_level */
+	VDD_IO_HIGH,
+	/*
+	 * set whatever there in voltage_level (third argument) of
+	 * sdhci_msm_set_vdd_io_vol() function.
+	 */
+	VDD_IO_SET_LEVEL,
+};
+
+/* MSM platform specific tuning */
+static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
+						u8 poll)
+{
+	int rc = 0;
 	u32 wait_cnt = 50;
-	u8 ck_out_en;
+	u8 ck_out_en = 0;
 	struct mmc_host *mmc = host->mmc;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
 
-	/* Poll for CK_OUT_EN bit.  max. poll time = 50us */
-	ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
-			CORE_CK_OUT_EN);
+	/* poll for CK_OUT_EN bit.  max. poll time = 50us */
+	ck_out_en = !!(readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
 
 	while (ck_out_en != poll) {
 		if (--wait_cnt == 0) {
-			dev_err(mmc_dev(mmc), "%s: CK_OUT_EN bit is not %d\n",
-			       mmc_hostname(mmc), poll);
-			return -ETIMEDOUT;
+			pr_err("%s: %s: CK_OUT_EN bit is not %d\n",
+				mmc_hostname(mmc), __func__, poll);
+			rc = -ETIMEDOUT;
+			goto out;
 		}
 		udelay(1);
 
-		ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
-				CORE_CK_OUT_EN);
+		ck_out_en = !!(readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
+	}
+out:
+	return rc;
 	}
 
+/*
+ * Enable CDR to track changes of DAT lines and adjust sampling
+ * point according to voltage/temperature variations
+ */
+static int msm_enable_cdr_cm_sdc4_dll(struct sdhci_host *host)
+{
+	int rc = 0;
+	u32 config;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+
+	config = readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG);
+	config |= CORE_CDR_EN;
+	config &= ~(CORE_CDR_EXT_EN | CORE_CK_OUT_EN);
+	writel_relaxed(config, host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG);
+
+	rc = msm_dll_poll_ck_out_en(host, 0);
+	if (rc)
+		goto err;
+
+	writel_relaxed((readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
+		host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
+
+	rc = msm_dll_poll_ck_out_en(host, 1);
+	if (rc)
+		goto err;
+	goto out;
+err:
+	pr_err("%s: %s: failed\n", mmc_hostname(host->mmc), __func__);
+out:
+	return rc;
+}
+
+static ssize_t store_auto_cmd21(struct device *dev, struct device_attribute
+				*attr, const char *buf, size_t count)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	u32 tmp;
+	unsigned long flags;
+
+	if (!kstrtou32(buf, 0, &tmp)) {
+		spin_lock_irqsave(&host->lock, flags);
+		msm_host->en_auto_cmd21 = !!tmp;
+		spin_unlock_irqrestore(&host->lock, flags);
+	}
+	return count;
+}
+
+static ssize_t show_auto_cmd21(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", msm_host->en_auto_cmd21);
+}
+
+/* MSM auto-tuning handler */
+static int sdhci_msm_config_auto_tuning_cmd(struct sdhci_host *host,
+					    bool enable,
+					    u32 type)
+{
+	int rc = 0;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+	u32 val = 0;
+
+	if (!msm_host->en_auto_cmd21)
+		return 0;
+
+	if (type == MMC_SEND_TUNING_BLOCK_HS200)
+		val = CORE_HC_AUTO_CMD21_EN;
+	else
 	return 0;
+
+	if (enable) {
+		rc = msm_enable_cdr_cm_sdc4_dll(host);
+		writel_relaxed(readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC) | val,
+			host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
+	} else {
+		writel_relaxed(readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC) & ~val,
+			host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
+	}
+	return rc;
 }
 
 static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
 {
-	int rc;
-	static const u8 grey_coded_phase_table[] = {
-		0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
-		0xc, 0xd, 0xf, 0xe, 0xa, 0xb, 0x9, 0x8
-	};
+	int rc = 0;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+	u8 grey_coded_phase_table[] = {0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
+					0xC, 0xD, 0xF, 0xE, 0xA, 0xB, 0x9,
+					0x8};
 	unsigned long flags;
 	u32 config;
 	struct mmc_host *mmc = host->mmc;
 
+	pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
 	spin_lock_irqsave(&host->lock, flags);
 
-	config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+	config = readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG);
 	config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
 	config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
-	writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+	writel_relaxed(config, host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG);
 
 	/* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
 	rc = msm_dll_poll_ck_out_en(host, 0);
@@ -116,31 +518,36 @@
 	 * Write the selected DLL clock output phase (0 ... 15)
 	 * to CDR_SELEXT bit field of DLL_CONFIG register.
 	 */
-	config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
-	config &= ~CDR_SELEXT_MASK;
-	config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT;
-	writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+	writel_relaxed(((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG)
+			& ~(0xF << 20))
+			| (grey_coded_phase_table[phase] << 20)),
+			host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
 
 	/* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
-	writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
-			| CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
+	writel_relaxed((readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
+		host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
 
 	/* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
 	rc = msm_dll_poll_ck_out_en(host, 1);
 	if (rc)
 		goto err_out;
 
-	config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+	config = readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG);
 	config |= CORE_CDR_EN;
 	config &= ~CORE_CDR_EXT_EN;
-	writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+	writel_relaxed(config, host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG);
 	goto out;
 
 err_out:
-	dev_err(mmc_dev(mmc), "%s: Failed to set DLL phase: %d\n",
-	       mmc_hostname(mmc), phase);
+	pr_err("%s: %s: Failed to set DLL phase: %d\n",
+		mmc_hostname(mmc), __func__, phase);
 out:
 	spin_unlock_irqrestore(&host->lock, flags);
+	pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
 	return rc;
 }
 
@@ -148,8 +555,8 @@
  * Find out the greatest range of consecuitive selected
  * DLL clock output phases that can be used as sampling
  * setting for SD3.0 UHS-I card read operation (in SDR104
- * timing mode) or for eMMC4.5 card read operation (in HS200
- * timing mode).
+ * timing mode) or for eMMC4.5 card read operation (in
+ * HS400/HS200 timing mode).
  * Select the 3/4 of the range and configure the DLL with the
  * selected DLL clock output phase.
  */
@@ -165,9 +572,10 @@
 	bool phase_0_found = false, phase_15_found = false;
 	struct mmc_host *mmc = host->mmc;
 
+	pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
 	if (!total_phases || (total_phases > MAX_PHASES)) {
-		dev_err(mmc_dev(mmc), "%s: Invalid argument: total_phases=%d\n",
-		       mmc_hostname(mmc), total_phases);
+		pr_err("%s: %s: invalid argument: total_phases=%d\n",
+			mmc_hostname(mmc), __func__, total_phases);
 		return -EINVAL;
 	}
 
@@ -241,24 +649,29 @@
 		}
 	}
 
-	i = (curr_max * 3) / 4;
+	i = ((curr_max * 3) / 4);
 	if (i)
 		i--;
 
-	ret = ranges[selected_row_index][i];
+	ret = (int)ranges[selected_row_index][i];
 
 	if (ret >= MAX_PHASES) {
 		ret = -EINVAL;
-		dev_err(mmc_dev(mmc), "%s: Invalid phase selected=%d\n",
-		       mmc_hostname(mmc), ret);
+		pr_err("%s: %s: invalid phase selected=%d\n",
+			mmc_hostname(mmc), __func__, ret);
 	}
 
+	pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
 	return ret;
 }
 
 static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
 {
-	u32 mclk_freq = 0, config;
+	u32 mclk_freq = 0;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
 
 	/* Program the MCLK value to MCLK_FREQ bit field */
 	if (host->clock <= 112000000)
@@ -278,117 +691,639 @@
 	else if (host->clock <= 200000000)
 		mclk_freq = 7;
 
-	config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
-	config &= ~CMUX_SHIFT_PHASE_MASK;
-	config |= mclk_freq << CMUX_SHIFT_PHASE_SHIFT;
-	writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+	writel_relaxed(((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG)
+			& ~(7 << 24)) | (mclk_freq << 24)),
+			host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
 }
 
 /* Initialize the DLL (Programmable Delay Line) */
 static int msm_init_cm_dll(struct sdhci_host *host)
 {
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
 	struct mmc_host *mmc = host->mmc;
-	int wait_cnt = 50;
+	int rc = 0;
 	unsigned long flags;
+	u32 wait_cnt;
+	bool prev_pwrsave, curr_pwrsave;
 
+	pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
 	spin_lock_irqsave(&host->lock, flags);
-
+	prev_pwrsave = !!(readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
+	curr_pwrsave = prev_pwrsave;
 	/*
 	 * Make sure that clock is always enabled when DLL
 	 * tuning is in progress. Keeping PWRSAVE ON may
-	 * turn off the clock.
+	 * turn off the clock. So let's disable the PWRSAVE
+	 * here and re-enable it once tuning is completed.
 	 */
-	writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
-			& ~CORE_CLK_PWRSAVE), host->ioaddr + CORE_VENDOR_SPEC);
+	if (prev_pwrsave) {
+		writel_relaxed((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC)
+			& ~CORE_CLK_PWRSAVE), host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC);
+		curr_pwrsave = false;
+	}
+
+	if (msm_host->use_updated_dll_reset) {
+		/* Disable the DLL clock */
+		writel_relaxed((readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG)
+				& ~CORE_CK_OUT_EN), host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG);
+
+		writel_relaxed((readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG_2)
+				| CORE_DLL_CLOCK_DISABLE), host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG_2);
+	}
 
 	/* Write 1 to DLL_RST bit of DLL_CONFIG register */
-	writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
-			| CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
+	writel_relaxed((readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_RST),
+		host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
 
 	/* Write 1 to DLL_PDN bit of DLL_CONFIG register */
-	writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
-			| CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
+	writel_relaxed((readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_PDN),
+		host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
 	msm_cm_dll_set_freq(host);
 
+	if (msm_host->use_updated_dll_reset) {
+		u32 mclk_freq = 0;
+
+		if ((readl_relaxed(host->ioaddr +
+					msm_host_offset->CORE_DLL_CONFIG_2)
+					& CORE_FLL_CYCLE_CNT))
+			mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 8);
+		else
+			mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 4);
+
+		writel_relaxed(((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG_2)
+			& ~(0xFF << 10)) | (mclk_freq << 10)),
+			host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
+		/* wait for 5us before enabling DLL clock */
+		udelay(5);
+	}
+
 	/* Write 0 to DLL_RST bit of DLL_CONFIG register */
-	writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
-			& ~CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
+	writel_relaxed((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_RST),
+			host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
 
 	/* Write 0 to DLL_PDN bit of DLL_CONFIG register */
-	writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
-			& ~CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
+	writel_relaxed((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_PDN),
+			host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
+
+	if (msm_host->use_updated_dll_reset) {
+		msm_cm_dll_set_freq(host);
+		/* Enable the DLL clock */
+		writel_relaxed((readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG_2)
+				& ~CORE_DLL_CLOCK_DISABLE), host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG_2);
+	}
 
 	/* Set DLL_EN bit to 1. */
-	writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
-			| CORE_DLL_EN), host->ioaddr + CORE_DLL_CONFIG);
+	writel_relaxed((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_EN),
+			host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
 
 	/* Set CK_OUT_EN bit to 1. */
-	writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
-			| CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
+	writel_relaxed((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG)
+			| CORE_CK_OUT_EN), host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG);
 
+	wait_cnt = 50;
 	/* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
-	while (!(readl_relaxed(host->ioaddr + CORE_DLL_STATUS) &
-		 CORE_DLL_LOCK)) {
+	while (!(readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_STATUS) & CORE_DLL_LOCK)) {
 		/* max. wait for 50us sec for LOCK bit to be set */
 		if (--wait_cnt == 0) {
-			dev_err(mmc_dev(mmc), "%s: DLL failed to LOCK\n",
-			       mmc_hostname(mmc));
-			spin_unlock_irqrestore(&host->lock, flags);
-			return -ETIMEDOUT;
+			pr_err("%s: %s: DLL failed to LOCK\n",
+				mmc_hostname(mmc), __func__);
+			rc = -ETIMEDOUT;
+			goto out;
 		}
+		/* wait for 1us before polling again */
 		udelay(1);
 	}
 
+out:
+	/* Restore the correct PWRSAVE state */
+	if (prev_pwrsave ^ curr_pwrsave) {
+		u32 reg = readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC);
+
+		if (prev_pwrsave)
+			reg |= CORE_CLK_PWRSAVE;
+		else
+			reg &= ~CORE_CLK_PWRSAVE;
+
+		writel_relaxed(reg, host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC);
+	}
+
 	spin_unlock_irqrestore(&host->lock, flags);
+	pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
+	return rc;
+}
+
+static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
+{
+	u32 calib_done;
+	int ret = 0;
+	int cdc_err = 0;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+
+	pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
+
+	/* Write 0 to CDC_T4_DLY_SEL field in VENDOR_SPEC_DDR200_CFG */
+	writel_relaxed((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DDR_200_CFG)
+			& ~CORE_CDC_T4_DLY_SEL),
+			host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
+
+	/* Write 0 to CDC_SWITCH_BYPASS_OFF field in CORE_CSR_CDC_GEN_CFG */
+	writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
+			& ~CORE_CDC_SWITCH_BYPASS_OFF),
+			host->ioaddr + CORE_CSR_CDC_GEN_CFG);
+
+	/* Write 1 to CDC_SWITCH_RC_EN field in CORE_CSR_CDC_GEN_CFG */
+	writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
+			| CORE_CDC_SWITCH_RC_EN),
+			host->ioaddr + CORE_CSR_CDC_GEN_CFG);
+
+	/* Write 0 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
+	writel_relaxed((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DDR_200_CFG)
+			& ~CORE_START_CDC_TRAFFIC),
+			host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
+
+	/*
+	 * Perform CDC Register Initialization Sequence
+	 *
+	 * CORE_CSR_CDC_CTLR_CFG0	0x11800EC
+	 * CORE_CSR_CDC_CTLR_CFG1	0x3011111
+	 * CORE_CSR_CDC_CAL_TIMER_CFG0	0x1201000
+	 * CORE_CSR_CDC_CAL_TIMER_CFG1	0x4
+	 * CORE_CSR_CDC_REFCOUNT_CFG	0xCB732020
+	 * CORE_CSR_CDC_COARSE_CAL_CFG	0xB19
+	 * CORE_CSR_CDC_DELAY_CFG	0x3AC
+	 * CORE_CDC_OFFSET_CFG		0x0
+	 * CORE_CDC_SLAVE_DDA_CFG	0x16334
+	 */
+
+	writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+	writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
+	writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
+	writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
+	writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
+	writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
+	writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
+	writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
+	writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
+
+	/* CDC HW Calibration */
+
+	/* Write 1 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
+	writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
+			| CORE_SW_TRIG_FULL_CALIB),
+			host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+
+	/* Write 0 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
+	writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
+			& ~CORE_SW_TRIG_FULL_CALIB),
+			host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+
+	/* Write 1 to HW_AUTOCAL_ENA field in CORE_CSR_CDC_CTLR_CFG0 */
+	writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
+			| CORE_HW_AUTOCAL_ENA),
+			host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+
+	/* Write 1 to TIMER_ENA field in CORE_CSR_CDC_CAL_TIMER_CFG0 */
+	writel_relaxed((readl_relaxed(host->ioaddr +
+			CORE_CSR_CDC_CAL_TIMER_CFG0) | CORE_TIMER_ENA),
+			host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
+
+	mb();
+
+	/* Poll on CALIBRATION_DONE field in CORE_CSR_CDC_STATUS0 to be 1 */
+	ret = readl_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
+		 calib_done, (calib_done & CORE_CALIBRATION_DONE), 1, 50);
+
+	if (ret == -ETIMEDOUT) {
+		pr_err("%s: %s: CDC Calibration was not completed\n",
+				mmc_hostname(host->mmc), __func__);
+		goto out;
+	}
+
+	/* Verify CDC_ERROR_CODE field in CORE_CSR_CDC_STATUS0 is 0 */
+	cdc_err = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
+			& CORE_CDC_ERROR_CODE_MASK;
+	if (cdc_err) {
+		pr_err("%s: %s: CDC Error Code %d\n",
+			mmc_hostname(host->mmc), __func__, cdc_err);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* Write 1 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
+	writel_relaxed((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DDR_200_CFG)
+			| CORE_START_CDC_TRAFFIC),
+			host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
+out:
+	pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
+			__func__, ret);
+	return ret;
+}
+
+static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+	u32 dll_status, ddr_config;
+	int ret = 0;
+
+	pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
+
+	/*
+	 * Reprogramming the value in case it might have been modified by
+	 * bootloaders.
+	 */
+	if (msm_host->rclk_delay_fix) {
+		writel_relaxed(DDR_CONFIG_2_POR_VAL, host->ioaddr +
+			msm_host_offset->CORE_DDR_CONFIG_2);
+	} else {
+		ddr_config = DDR_CONFIG_POR_VAL &
+				~DDR_CONFIG_PRG_RCLK_DLY_MASK;
+		ddr_config |= DDR_CONFIG_PRG_RCLK_DLY;
+		writel_relaxed(ddr_config, host->ioaddr +
+			msm_host_offset->CORE_DDR_CONFIG);
+	}
+
+	if (msm_host->enhanced_strobe && mmc_card_strobe(msm_host->mmc->card))
+		writel_relaxed((readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_DDR_200_CFG)
+				| CORE_CMDIN_RCLK_EN), host->ioaddr +
+				msm_host_offset->CORE_DDR_200_CFG);
+
+	/* Write 1 to DDR_CAL_EN field in CORE_DLL_CONFIG_2 */
+	writel_relaxed((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG_2)
+			| CORE_DDR_CAL_EN),
+			host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
+
+	/* Poll on DDR_DLL_LOCK bit in CORE_DLL_STATUS to be set */
+	ret = readl_poll_timeout(host->ioaddr +
+		 msm_host_offset->CORE_DLL_STATUS,
+		 dll_status, (dll_status & CORE_DDR_DLL_LOCK), 10, 1000);
+
+	if (ret == -ETIMEDOUT) {
+		pr_err("%s: %s: CM_DLL_SDC4 Calibration was not completed\n",
+				mmc_hostname(host->mmc), __func__);
+		goto out;
+	}
+
+	/*
+	 * set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
+	 * when MCLK is gated OFF, it is not gated for less than 0.5us
+	 * and MCLK must be switched on for at-least 1us before DATA
+	 * starts coming. Controllers with 14lpp tech DLL cannot
+	 * guarantee above requirement. So PWRSAVE_DLL should not be
+	 * turned on for host controllers using this DLL.
+	 */
+	if (!msm_host->use_14lpp_dll)
+		writel_relaxed((readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC3)
+				| CORE_PWRSAVE_DLL), host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC3);
+	mb();
+out:
+	pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
+			__func__, ret);
+	return ret;
+}
+
+static int sdhci_msm_enhanced_strobe(struct sdhci_host *host)
+{
+	int ret = 0;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	struct mmc_host *mmc = host->mmc;
+
+	pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
+
+	if (!msm_host->enhanced_strobe || !mmc_card_strobe(mmc->card)) {
+		pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
+				mmc_hostname(mmc));
+		return -EINVAL;
+	}
+
+	if (msm_host->calibration_done ||
+		!(mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
 	return 0;
 }
 
-static int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
+	/*
+	 * Reset the tuning block.
+	 */
+	ret = msm_init_cm_dll(host);
+	if (ret)
+		goto out;
+
+	ret = sdhci_msm_cm_dll_sdc4_calibration(host);
+out:
+	if (!ret)
+		msm_host->calibration_done = true;
+	pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
+			__func__, ret);
+	return ret;
+}
+
+static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
+{
+	int ret = 0;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+
+	pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
+
+	/*
+	 * Retuning in HS400 (DDR mode) will fail, just reset the
+	 * tuning block and restore the saved tuning phase.
+	 */
+	ret = msm_init_cm_dll(host);
+	if (ret)
+		goto out;
+
+	/* Set the selected phase in delay line hw block */
+	ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
+	if (ret)
+		goto out;
+
+	/* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
+	writel_relaxed((readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG)
+				| CORE_CMD_DAT_TRACK_SEL), host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG);
+
+	if (msm_host->use_cdclp533)
+		/* Calibrate CDCLP533 DLL HW */
+		ret = sdhci_msm_cdclp533_calibration(host);
+	else
+		/* Calibrate CM_DLL_SDC4 HW */
+		ret = sdhci_msm_cm_dll_sdc4_calibration(host);
+out:
+	pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
+			__func__, ret);
+	return ret;
+}
+
+static void sdhci_msm_set_mmc_drv_type(struct sdhci_host *host, u32 opcode,
+		u8 drv_type)
 {
+	struct mmc_command cmd = {0};
+	struct mmc_request mrq = {NULL};
+	struct mmc_host *mmc = host->mmc;
+	u8 val = ((drv_type << 4) | 2);
+
+	cmd.opcode = MMC_SWITCH;
+	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
+		(EXT_CSD_HS_TIMING << 16) |
+		(val << 8) |
+		EXT_CSD_CMD_SET_NORMAL;
+	cmd.flags = MMC_CMD_AC | MMC_RSP_R1B;
+	/* 1 sec */
+	cmd.busy_timeout = 1000 * 1000;
+
+	memset(cmd.resp, 0, sizeof(cmd.resp));
+	cmd.retries = 3;
+
+	mrq.cmd = &cmd;
+	cmd.data = NULL;
+
+	mmc_wait_for_req(mmc, &mrq);
+	pr_debug("%s: %s: set card drive type to %d\n",
+			mmc_hostname(mmc), __func__,
+			drv_type);
+}
+
+int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
+{
+	unsigned long flags;
 	int tuning_seq_cnt = 3;
-	u8 phase, tuned_phases[16], tuned_phase_cnt = 0;
+	u8 phase, *data_buf, tuned_phases[NUM_TUNING_PHASES], tuned_phase_cnt;
+	const u32 *tuning_block_pattern = tuning_block_64;
+	int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */
 	int rc;
 	struct mmc_host *mmc = host->mmc;
 	struct mmc_ios ios = host->mmc->ios;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	u8 drv_type = 0;
+	bool drv_type_changed = false;
+	struct mmc_card *card = host->mmc->card;
+	int sts_retry;
+	u8 last_good_phase = 0;
 
 	/*
 	 * Tuning is required for SDR104, HS200 and HS400 cards and
 	 * if clock frequency is greater than 100MHz in these modes.
 	 */
-	if (host->clock <= 100 * 1000 * 1000 ||
-	    !((ios.timing == MMC_TIMING_MMC_HS200) ||
+	if (host->clock <= CORE_FREQ_100MHZ ||
+		!((ios.timing == MMC_TIMING_MMC_HS400) ||
+		(ios.timing == MMC_TIMING_MMC_HS200) ||
 	      (ios.timing == MMC_TIMING_UHS_SDR104)))
 		return 0;
 
+	/*
+	 * Don't allow re-tuning for CRC errors observed for any commands
+	 * that are sent during tuning sequence itself.
+	 */
+	if (msm_host->tuning_in_progress)
+		return 0;
+	msm_host->tuning_in_progress = true;
+	pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
+
+	/* CDC/SDC4 DLL HW calibration is only required for HS400 mode*/
+	if (msm_host->tuning_done && !msm_host->calibration_done &&
+		(mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
+		rc = sdhci_msm_hs400_dll_calibration(host);
+		spin_lock_irqsave(&host->lock, flags);
+		if (!rc)
+			msm_host->calibration_done = true;
+		spin_unlock_irqrestore(&host->lock, flags);
+		goto out;
+	}
+
+	spin_lock_irqsave(&host->lock, flags);
+
+	if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) &&
+		(mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
+		tuning_block_pattern = tuning_block_128;
+		size = sizeof(tuning_block_128);
+	}
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	data_buf = kmalloc(size, GFP_KERNEL);
+	if (!data_buf) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
 retry:
-	/* First of all reset the tuning block */
+	tuned_phase_cnt = 0;
+
+	/* first of all reset the tuning block */
 	rc = msm_init_cm_dll(host);
 	if (rc)
-		return rc;
+		goto kfree;
 
 	phase = 0;
 	do {
-		/* Set the phase in delay line hw block */
+		struct mmc_command cmd = {0};
+		struct mmc_data data = {0};
+		struct mmc_request mrq = {
+			.cmd = &cmd,
+			.data = &data
+		};
+		struct scatterlist sg;
+		struct mmc_command sts_cmd = {0};
+
+		/* set the phase in delay line hw block */
 		rc = msm_config_cm_dll_phase(host, phase);
 		if (rc)
-			return rc;
+			goto kfree;
+
+		cmd.opcode = opcode;
+		cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+		data.blksz = size;
+		data.blocks = 1;
+		data.flags = MMC_DATA_READ;
+		data.timeout_ns = 1000 * 1000 * 1000; /* 1 sec */
+
+		data.sg = &sg;
+		data.sg_len = 1;
+		sg_init_one(&sg, data_buf, size);
+		memset(data_buf, 0, size);
+		mmc_wait_for_req(mmc, &mrq);
+
+		if (card && (cmd.error || data.error)) {
+			/*
+			 * Set the dll to last known good phase while sending
+			 * status command to ensure that status command won't
+			 * fail due to bad phase.
+			 */
+			if (tuned_phase_cnt)
+				last_good_phase =
+					tuned_phases[tuned_phase_cnt-1];
+			else if (msm_host->saved_tuning_phase !=
+					INVALID_TUNING_PHASE)
+				last_good_phase = msm_host->saved_tuning_phase;
+
+			rc = msm_config_cm_dll_phase(host, last_good_phase);
+			if (rc)
+				goto kfree;
+
+			sts_cmd.opcode = MMC_SEND_STATUS;
+			sts_cmd.arg = card->rca << 16;
+			sts_cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+			sts_retry = 5;
+			while (sts_retry) {
+				mmc_wait_for_cmd(mmc, &sts_cmd, 0);
+
+				if (sts_cmd.error ||
+				   (R1_CURRENT_STATE(sts_cmd.resp[0])
+				   != R1_STATE_TRAN)) {
+					sts_retry--;
+					/*
+					 * wait for at least 146 MCLK cycles for
+					 * the card to move to TRANS state. As
+					 * the MCLK would be min 200MHz for
+					 * tuning, we need max 0.73us delay. To
+					 * be on safer side 1ms delay is given.
+					 */
+					usleep_range(1000, 1200);
+					pr_debug("%s: phase %d sts cmd err %d resp 0x%x\n",
+						mmc_hostname(mmc), phase,
+						sts_cmd.error, sts_cmd.resp[0]);
+					continue;
+				}
+				break;
+			};
+		}
 
-		rc = mmc_send_tuning(mmc, opcode, NULL);
-		if (!rc) {
-			/* Tuning is successful at this tuning point */
+		if (!cmd.error && !data.error &&
+			!memcmp(data_buf, tuning_block_pattern, size)) {
+			/* tuning is successful at this tuning point */
 			tuned_phases[tuned_phase_cnt++] = phase;
-			dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n",
-				 mmc_hostname(mmc), phase);
+			pr_debug("%s: %s: found *** good *** phase = %d\n",
+				mmc_hostname(mmc), __func__, phase);
+		} else {
+			pr_debug("%s: %s: found ## bad ## phase = %d\n",
+				mmc_hostname(mmc), __func__, phase);
 		}
-	} while (++phase < ARRAY_SIZE(tuned_phases));
+	} while (++phase < 16);
+
+	if ((tuned_phase_cnt == NUM_TUNING_PHASES) &&
+			card && mmc_card_mmc(card)) {
+		/*
+		 * If all phases pass then its a problem. So change the card's
+		 * drive type to a different value, if supported and repeat
+		 * tuning until at least one phase fails. Then set the original
+		 * drive type back.
+		 *
+		 * If all the phases still pass after trying all possible
+		 * drive types, then one of those 16 phases will be picked.
+		 * This is no different from what was going on before the
+		 * modification to change drive type and retune.
+		 */
+		pr_debug("%s: tuned phases count: %d\n", mmc_hostname(mmc),
+				tuned_phase_cnt);
+
+		/* set drive type to other value . default setting is 0x0 */
+		while (++drv_type <= MAX_DRV_TYPES_SUPPORTED_HS200) {
+			pr_debug("%s: trying different drive strength (%d)\n",
+				mmc_hostname(mmc), drv_type);
+			if (card->ext_csd.raw_driver_strength &
+					(1 << drv_type)) {
+				sdhci_msm_set_mmc_drv_type(host, opcode,
+						drv_type);
+				if (!drv_type_changed)
+					drv_type_changed = true;
+				goto retry;
+			}
+		}
+	}
+
+	/* reset drive type to default (50 ohm) if changed */
+	if (drv_type_changed)
+		sdhci_msm_set_mmc_drv_type(host, opcode, 0);
 
 	if (tuned_phase_cnt) {
 		rc = msm_find_most_appropriate_phase(host, tuned_phases,
 						     tuned_phase_cnt);
 		if (rc < 0)
-			return rc;
+			goto kfree;
 		else
-			phase = rc;
+			phase = (u8)rc;
 
 		/*
 		 * Finally set the selected phase in delay
@@ -396,70 +1331,3025 @@
 		 */
 		rc = msm_config_cm_dll_phase(host, phase);
 		if (rc)
-			return rc;
-		dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n",
-			 mmc_hostname(mmc), phase);
+			goto kfree;
+		msm_host->saved_tuning_phase = phase;
+		pr_debug("%s: %s: finally setting the tuning phase to %d\n",
+				mmc_hostname(mmc), __func__, phase);
 	} else {
 		if (--tuning_seq_cnt)
 			goto retry;
-		/* Tuning failed */
-		dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n",
-		       mmc_hostname(mmc));
+		/* tuning failed */
+		pr_err("%s: %s: no tuning point found\n",
+			mmc_hostname(mmc), __func__);
 		rc = -EIO;
 	}
 
+kfree:
+	kfree(data_buf);
+out:
+	spin_lock_irqsave(&host->lock, flags);
+	if (!rc)
+		msm_host->tuning_done = true;
+	spin_unlock_irqrestore(&host->lock, flags);
+	msm_host->tuning_in_progress = false;
+	pr_debug("%s: Exit %s, err(%d)\n", mmc_hostname(mmc), __func__, rc);
 	return rc;
 }
 
-static const struct of_device_id sdhci_msm_dt_match[] = {
-	{ .compatible = "qcom,sdhci-msm-v4" },
-	{},
-};
+static int sdhci_msm_setup_gpio(struct sdhci_msm_pltfm_data *pdata, bool enable)
+{
+	struct sdhci_msm_gpio_data *curr;
+	int i, ret = 0;
 
-MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
+	curr = pdata->pin_data->gpio_data;
+	for (i = 0; i < curr->size; i++) {
+		if (!gpio_is_valid(curr->gpio[i].no)) {
+			ret = -EINVAL;
+			pr_err("%s: Invalid gpio = %d\n", __func__,
+					curr->gpio[i].no);
+			goto free_gpios;
+		}
+		if (enable) {
+			ret = gpio_request(curr->gpio[i].no,
+						curr->gpio[i].name);
+			if (ret) {
+				pr_err("%s: gpio_request(%d, %s) failed %d\n",
+					__func__, curr->gpio[i].no,
+					curr->gpio[i].name, ret);
+				goto free_gpios;
+			}
+			curr->gpio[i].is_enabled = true;
+		} else {
+			gpio_free(curr->gpio[i].no);
+			curr->gpio[i].is_enabled = false;
+		}
+	}
+	return ret;
+
+free_gpios:
+	for (i--; i >= 0; i--) {
+		gpio_free(curr->gpio[i].no);
+		curr->gpio[i].is_enabled = false;
+	}
+	return ret;
+}
+
+static int sdhci_msm_setup_pinctrl(struct sdhci_msm_pltfm_data *pdata,
+		bool enable)
+{
+	int ret = 0;
+
+	if (enable)
+		ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
+			pdata->pctrl_data->pins_active);
+	else
+		ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
+			pdata->pctrl_data->pins_sleep);
+
+	if (ret < 0)
+		pr_err("%s state for pinctrl failed with %d\n",
+			enable ? "Enabling" : "Disabling", ret);
+
+	return ret;
+}
+
+static int sdhci_msm_setup_pins(struct sdhci_msm_pltfm_data *pdata, bool enable)
+{
+	int ret = 0;
+
+	if  (pdata->pin_cfg_sts == enable) {
+		return 0;
+	} else if (pdata->pctrl_data) {
+		ret = sdhci_msm_setup_pinctrl(pdata, enable);
+		goto out;
+	} else if (!pdata->pin_data) {
+		return 0;
+	}
+	if (pdata->pin_data->is_gpio)
+		ret = sdhci_msm_setup_gpio(pdata, enable);
+out:
+	if (!ret)
+		pdata->pin_cfg_sts = enable;
+
+	return ret;
+}
+
+static int sdhci_msm_dt_get_array(struct device *dev, const char *prop_name,
+				 u32 **out, int *len, u32 size)
+{
+	int ret = 0;
+	struct device_node *np = dev->of_node;
+	size_t sz;
+	u32 *arr = NULL;
+
+	if (!of_get_property(np, prop_name, len)) {
+		ret = -EINVAL;
+		goto out;
+	}
+	sz = *len = *len / sizeof(*arr);
+	if (sz <= 0 || (size > 0 && (sz > size))) {
+		dev_err(dev, "%s invalid size\n", prop_name);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	arr = devm_kzalloc(dev, sz * sizeof(*arr), GFP_KERNEL);
+	if (!arr) {
+		dev_err(dev, "%s failed allocating memory\n", prop_name);
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = of_property_read_u32_array(np, prop_name, arr, sz);
+	if (ret < 0) {
+		dev_err(dev, "%s failed reading array %d\n", prop_name, ret);
+		goto out;
+	}
+	*out = arr;
+out:
+	if (ret)
+		*len = 0;
+	return ret;
+}
+
+#define MAX_PROP_SIZE 32
+static int sdhci_msm_dt_parse_vreg_info(struct device *dev,
+		struct sdhci_msm_reg_data **vreg_data, const char *vreg_name)
+{
+	int len, ret = 0;
+	const __be32 *prop;
+	char prop_name[MAX_PROP_SIZE];
+	struct sdhci_msm_reg_data *vreg;
+	struct device_node *np = dev->of_node;
+
+	snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
+	if (!of_parse_phandle(np, prop_name, 0)) {
+		dev_info(dev, "No vreg data found for %s\n", vreg_name);
+		return ret;
+	}
+
+	vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
+	if (!vreg) {
+		dev_err(dev, "No memory for vreg: %s\n", vreg_name);
+		ret = -ENOMEM;
+		return ret;
+	}
+
+	vreg->name = vreg_name;
+
+	snprintf(prop_name, MAX_PROP_SIZE,
+			"qcom,%s-always-on", vreg_name);
+	if (of_get_property(np, prop_name, NULL))
+		vreg->is_always_on = true;
+
+	snprintf(prop_name, MAX_PROP_SIZE,
+			"qcom,%s-lpm-sup", vreg_name);
+	if (of_get_property(np, prop_name, NULL))
+		vreg->lpm_sup = true;
+
+	snprintf(prop_name, MAX_PROP_SIZE,
+			"qcom,%s-voltage-level", vreg_name);
+	prop = of_get_property(np, prop_name, &len);
+	if (!prop || (len != (2 * sizeof(__be32)))) {
+		dev_warn(dev, "%s %s property\n",
+			prop ? "invalid format" : "no", prop_name);
+	} else {
+		vreg->low_vol_level = be32_to_cpup(&prop[0]);
+		vreg->high_vol_level = be32_to_cpup(&prop[1]);
+	}
+
+	snprintf(prop_name, MAX_PROP_SIZE,
+			"qcom,%s-current-level", vreg_name);
+	prop = of_get_property(np, prop_name, &len);
+	if (!prop || (len != (2 * sizeof(__be32)))) {
+		dev_warn(dev, "%s %s property\n",
+			prop ? "invalid format" : "no", prop_name);
+	} else {
+		vreg->lpm_uA = be32_to_cpup(&prop[0]);
+		vreg->hpm_uA = be32_to_cpup(&prop[1]);
+	}
+
+	*vreg_data = vreg;
+	dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n",
+		vreg->name, vreg->is_always_on ? "always_on," : "",
+		vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level,
+		vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA);
+
+	return ret;
+}
+
+/* GPIO/Pad data extraction */
+static int sdhci_msm_parse_pinctrl_info(struct device *dev,
+		struct sdhci_msm_pltfm_data *pdata)
+{
+	struct sdhci_pinctrl_data *pctrl_data;
+	struct pinctrl *pctrl;
+	int ret = 0;
+
+	/* Try to obtain pinctrl handle */
+	pctrl = devm_pinctrl_get(dev);
+	if (IS_ERR(pctrl)) {
+		ret = PTR_ERR(pctrl);
+		goto out;
+	}
+	pctrl_data = devm_kzalloc(dev, sizeof(*pctrl_data), GFP_KERNEL);
+	if (!pctrl_data) {
+		dev_err(dev, "No memory for sdhci_pinctrl_data\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+	pctrl_data->pctrl = pctrl;
+	/* Look-up and keep the states handy to be used later */
+	pctrl_data->pins_active = pinctrl_lookup_state(
+			pctrl_data->pctrl, "active");
+	if (IS_ERR(pctrl_data->pins_active)) {
+		ret = PTR_ERR(pctrl_data->pins_active);
+		dev_err(dev, "Could not get active pinstates, err:%d\n", ret);
+		goto out;
+	}
+	pctrl_data->pins_sleep = pinctrl_lookup_state(
+			pctrl_data->pctrl, "sleep");
+	if (IS_ERR(pctrl_data->pins_sleep)) {
+		ret = PTR_ERR(pctrl_data->pins_sleep);
+		dev_err(dev, "Could not get sleep pinstates, err:%d\n", ret);
+		goto out;
+	}
+	pdata->pctrl_data = pctrl_data;
+out:
+	return ret;
+}
+
+#define GPIO_NAME_MAX_LEN 32
+static int sdhci_msm_dt_parse_gpio_info(struct device *dev,
+		struct sdhci_msm_pltfm_data *pdata)
+{
+	int ret = 0, cnt, i;
+	struct sdhci_msm_pin_data *pin_data;
+	struct device_node *np = dev->of_node;
+
+	ret = sdhci_msm_parse_pinctrl_info(dev, pdata);
+	if (!ret) {
+		goto out;
+	} else if (ret == -EPROBE_DEFER) {
+		dev_err(dev, "Pinctrl framework not registered, err:%d\n", ret);
+		goto out;
+	} else {
+		dev_err(dev, "Parsing Pinctrl failed with %d, falling back on GPIO lib\n",
+			ret);
+		ret = 0;
+	}
+	pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
+	if (!pin_data) {
+		dev_err(dev, "No memory for pin_data\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	cnt = of_gpio_count(np);
+	if (cnt > 0) {
+		pin_data->is_gpio = true;
+		pin_data->gpio_data = devm_kzalloc(dev,
+				sizeof(struct sdhci_msm_gpio_data), GFP_KERNEL);
+		if (!pin_data->gpio_data) {
+			dev_err(dev, "No memory for gpio_data\n");
+			ret = -ENOMEM;
+			goto out;
+		}
+		pin_data->gpio_data->size = cnt;
+		pin_data->gpio_data->gpio = devm_kzalloc(dev, cnt *
+				sizeof(struct sdhci_msm_gpio), GFP_KERNEL);
+
+		if (!pin_data->gpio_data->gpio) {
+			dev_err(dev, "No memory for gpio\n");
+			ret = -ENOMEM;
+			goto out;
+		}
+		for (i = 0; i < cnt; i++) {
+			const char *name = NULL;
+			char result[GPIO_NAME_MAX_LEN];
+			pin_data->gpio_data->gpio[i].no = of_get_gpio(np, i);
+			of_property_read_string_index(np,
+					"qcom,gpio-names", i, &name);
+
+			snprintf(result, GPIO_NAME_MAX_LEN, "%s-%s",
+					dev_name(dev), name ? name : "?");
+			pin_data->gpio_data->gpio[i].name = result;
+			dev_dbg(dev, "%s: gpio[%s] = %d\n", __func__,
+				pin_data->gpio_data->gpio[i].name,
+				pin_data->gpio_data->gpio[i].no);
+		}
+	}
+	pdata->pin_data = pin_data;
+out:
+	if (ret)
+		dev_err(dev, "%s failed with err %d\n", __func__, ret);
+	return ret;
+}
+
+#ifdef CONFIG_SMP
+static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata)
+{
+	pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_IRQ;
+}
+#else
+static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata) { }
+#endif
+
+static int sdhci_msm_pm_qos_parse_irq(struct device *dev,
+		struct sdhci_msm_pltfm_data *pdata)
+{
+	struct device_node *np = dev->of_node;
+	const char *str;
+	u32 cpu;
+	int ret = 0;
+	int i;
+
+	pdata->pm_qos_data.irq_valid = false;
+	pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_CORES;
+	if (!of_property_read_string(np, "qcom,pm-qos-irq-type", &str) &&
+		!strcmp(str, "affine_irq")) {
+		parse_affine_irq(pdata);
+	}
+
+	/* must specify cpu for "affine_cores" type */
+	if (pdata->pm_qos_data.irq_req_type == PM_QOS_REQ_AFFINE_CORES) {
+		pdata->pm_qos_data.irq_cpu = -1;
+		ret = of_property_read_u32(np, "qcom,pm-qos-irq-cpu", &cpu);
+		if (ret) {
+			dev_err(dev, "%s: error %d reading irq cpu\n", __func__,
+				ret);
+			goto out;
+		}
+		if (cpu < 0 || cpu >= num_possible_cpus()) {
+			dev_err(dev, "%s: invalid irq cpu %d (NR_CPUS=%d)\n",
+				__func__, cpu, num_possible_cpus());
+			ret = -EINVAL;
+			goto out;
+		}
+		pdata->pm_qos_data.irq_cpu = cpu;
+	}
+
+	if (of_property_count_u32_elems(np, "qcom,pm-qos-irq-latency") !=
+		SDHCI_POWER_POLICY_NUM) {
+		dev_err(dev, "%s: could not read %d values for 'qcom,pm-qos-irq-latency'\n",
+			__func__, SDHCI_POWER_POLICY_NUM);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	for (i = 0; i < SDHCI_POWER_POLICY_NUM; i++)
+		of_property_read_u32_index(np, "qcom,pm-qos-irq-latency", i,
+			&pdata->pm_qos_data.irq_latency.latency[i]);
+
+	pdata->pm_qos_data.irq_valid = true;
+out:
+	return ret;
+}
+
+static int sdhci_msm_pm_qos_parse_cpu_groups(struct device *dev,
+		struct sdhci_msm_pltfm_data *pdata)
+{
+	struct device_node *np = dev->of_node;
+	u32 mask;
+	int nr_groups;
+	int ret;
+	int i;
+
+	/* Read cpu group mapping */
+	nr_groups = of_property_count_u32_elems(np, "qcom,pm-qos-cpu-groups");
+	if (nr_groups <= 0) {
+		ret = -EINVAL;
+		goto out;
+	}
+	pdata->pm_qos_data.cpu_group_map.nr_groups = nr_groups;
+	pdata->pm_qos_data.cpu_group_map.mask =
+		kcalloc(nr_groups, sizeof(cpumask_t), GFP_KERNEL);
+	if (!pdata->pm_qos_data.cpu_group_map.mask) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	for (i = 0; i < nr_groups; i++) {
+		of_property_read_u32_index(np, "qcom,pm-qos-cpu-groups",
+			i, &mask);
+
+		pdata->pm_qos_data.cpu_group_map.mask[i].bits[0] = mask;
+		if (!cpumask_subset(&pdata->pm_qos_data.cpu_group_map.mask[i],
+			cpu_possible_mask)) {
+			dev_err(dev, "%s: invalid mask 0x%x of cpu group #%d\n",
+				__func__, mask, i);
+			ret = -EINVAL;
+			goto free_res;
+		}
+	}
+	return 0;
+
+free_res:
+	kfree(pdata->pm_qos_data.cpu_group_map.mask);
+out:
+	return ret;
+}
+
+static int sdhci_msm_pm_qos_parse_latency(struct device *dev, const char *name,
+		int nr_groups, struct sdhci_msm_pm_qos_latency **latency)
+{
+	struct device_node *np = dev->of_node;
+	struct sdhci_msm_pm_qos_latency *values;
+	int ret;
+	int i;
+	int group;
+	int cfg;
+
+	ret = of_property_count_u32_elems(np, name);
+	if (ret > 0 && ret != SDHCI_POWER_POLICY_NUM * nr_groups) {
+		dev_err(dev, "%s: invalid number of values for property %s: expected=%d actual=%d\n",
+			__func__, name,	SDHCI_POWER_POLICY_NUM * nr_groups,
+			ret);
+		return -EINVAL;
+	} else if (ret < 0) {
+		return ret;
+	}
+
+	values = kcalloc(nr_groups, sizeof(struct sdhci_msm_pm_qos_latency),
+			GFP_KERNEL);
+	if (!values)
+		return -ENOMEM;
+
+	for (i = 0; i < SDHCI_POWER_POLICY_NUM * nr_groups; i++) {
+		group = i / SDHCI_POWER_POLICY_NUM;
+		cfg = i % SDHCI_POWER_POLICY_NUM;
+		of_property_read_u32_index(np, name, i,
+				&(values[group].latency[cfg]));
+	}
+
+	*latency = values;
+	return 0;
+}
+
+static void sdhci_msm_pm_qos_parse(struct device *dev,
+				struct sdhci_msm_pltfm_data *pdata)
+{
+	if (sdhci_msm_pm_qos_parse_irq(dev, pdata))
+		dev_notice(dev, "%s: PM QoS voting for IRQ will be disabled\n",
+			__func__);
+
+	if (!sdhci_msm_pm_qos_parse_cpu_groups(dev, pdata)) {
+		pdata->pm_qos_data.cmdq_valid =
+			!sdhci_msm_pm_qos_parse_latency(dev,
+				"qcom,pm-qos-cmdq-latency-us",
+				pdata->pm_qos_data.cpu_group_map.nr_groups,
+				&pdata->pm_qos_data.cmdq_latency);
+		pdata->pm_qos_data.legacy_valid =
+			!sdhci_msm_pm_qos_parse_latency(dev,
+				"qcom,pm-qos-legacy-latency-us",
+				pdata->pm_qos_data.cpu_group_map.nr_groups,
+				&pdata->pm_qos_data.latency);
+		if (!pdata->pm_qos_data.cmdq_valid &&
+			!pdata->pm_qos_data.legacy_valid) {
+			/* clean-up previously allocated arrays */
+			kfree(pdata->pm_qos_data.latency);
+			kfree(pdata->pm_qos_data.cmdq_latency);
+			dev_err(dev, "%s: invalid PM QoS latency values. Voting for cpu group will be disabled\n",
+				__func__);
+		}
+	} else {
+		dev_notice(dev, "%s: PM QoS voting for cpu group will be disabled\n",
+			__func__);
+	}
+}
+
+/* Parse platform data */
+static
+struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
+						struct sdhci_msm_host *msm_host)
+{
+	struct sdhci_msm_pltfm_data *pdata = NULL;
+	struct device_node *np = dev->of_node;
+	u32 bus_width = 0;
+	int len, i;
+	int clk_table_len;
+	u32 *clk_table = NULL;
+	int ice_clk_table_len;
+	u32 *ice_clk_table = NULL;
+	enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
+	const char *lower_bus_speed = NULL;
+
+	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata) {
+		dev_err(dev, "failed to allocate memory for platform data\n");
+		goto out;
+	}
+
+	pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
+	if (gpio_is_valid(pdata->status_gpio) && !(flags & OF_GPIO_ACTIVE_LOW))
+		pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
+
+	of_property_read_u32(np, "qcom,bus-width", &bus_width);
+	if (bus_width == 8)
+		pdata->mmc_bus_width = MMC_CAP_8_BIT_DATA;
+	else if (bus_width == 4)
+		pdata->mmc_bus_width = MMC_CAP_4_BIT_DATA;
+	else {
+		dev_notice(dev, "invalid bus-width, default to 1-bit mode\n");
+		pdata->mmc_bus_width = 0;
+	}
+
+	if (sdhci_msm_dt_get_array(dev, "qcom,devfreq,freq-table",
+			&msm_host->mmc->clk_scaling.pltfm_freq_table,
+			&msm_host->mmc->clk_scaling.pltfm_freq_table_sz, 0))
+		pr_debug("%s: no clock scaling frequencies were supplied\n",
+			dev_name(dev));
+	else if (!msm_host->mmc->clk_scaling.pltfm_freq_table ||
+			!msm_host->mmc->clk_scaling.pltfm_freq_table_sz)
+		dev_err(dev, "bad dts clock scaling frequencies\n");
+
+	/*
+	 * Few hosts can support DDR52 mode at the same lower
+	 * system voltage corner as high-speed mode. In such cases,
+	 * it is always better to put it in DDR mode which will
+	 * improve the performance without any power impact.
+	 */
+	if (!of_property_read_string(np, "qcom,scaling-lower-bus-speed-mode",
+				&lower_bus_speed)) {
+		if (!strcmp(lower_bus_speed, "DDR52"))
+			msm_host->mmc->clk_scaling.lower_bus_speed_mode |=
+				MMC_SCALING_LOWER_DDR52_MODE;
+	}
+
+	if (sdhci_msm_dt_get_array(dev, "qcom,clk-rates",
+			&clk_table, &clk_table_len, 0)) {
+		dev_err(dev, "failed parsing supported clock rates\n");
+		goto out;
+	}
+	if (!clk_table || !clk_table_len) {
+		dev_err(dev, "Invalid clock table\n");
+		goto out;
+	}
+	pdata->sup_clk_table = clk_table;
+	pdata->sup_clk_cnt = clk_table_len;
+
+	if (msm_host->ice.pdev) {
+		if (sdhci_msm_dt_get_array(dev, "qcom,ice-clk-rates",
+				&ice_clk_table, &ice_clk_table_len, 0)) {
+			dev_err(dev, "failed parsing supported ice clock rates\n");
+			goto out;
+		}
+		if (!ice_clk_table || !ice_clk_table_len) {
+			dev_err(dev, "Invalid clock table\n");
+			goto out;
+		}
+		if (ice_clk_table_len != 2) {
+			dev_err(dev, "Need max and min frequencies in the table\n");
+			goto out;
+		}
+		pdata->sup_ice_clk_table = ice_clk_table;
+		pdata->sup_ice_clk_cnt = ice_clk_table_len;
+		pdata->ice_clk_max = pdata->sup_ice_clk_table[0];
+		pdata->ice_clk_min = pdata->sup_ice_clk_table[1];
+		dev_dbg(dev, "supported ICE clock rates (Hz): max: %u min: %u\n",
+				pdata->ice_clk_max, pdata->ice_clk_min);
+	}
+
+	pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
+						    sdhci_msm_slot_reg_data),
+					GFP_KERNEL);
+	if (!pdata->vreg_data) {
+		dev_err(dev, "failed to allocate memory for vreg data\n");
+		goto out;
+	}
+
+	if (sdhci_msm_dt_parse_vreg_info(dev, &pdata->vreg_data->vdd_data,
+					 "vdd")) {
+		dev_err(dev, "failed parsing vdd data\n");
+		goto out;
+	}
+	if (sdhci_msm_dt_parse_vreg_info(dev,
+					 &pdata->vreg_data->vdd_io_data,
+					 "vdd-io")) {
+		dev_err(dev, "failed parsing vdd-io data\n");
+		goto out;
+	}
+
+	if (sdhci_msm_dt_parse_gpio_info(dev, pdata)) {
+		dev_err(dev, "failed parsing gpio data\n");
+		goto out;
+	}
+
+	len = of_property_count_strings(np, "qcom,bus-speed-mode");
+
+	for (i = 0; i < len; i++) {
+		const char *name = NULL;
+
+		of_property_read_string_index(np,
+			"qcom,bus-speed-mode", i, &name);
+		if (!name)
+			continue;
+
+		if (!strncmp(name, "HS400_1p8v", sizeof("HS400_1p8v")))
+			pdata->caps2 |= MMC_CAP2_HS400_1_8V;
+		else if (!strncmp(name, "HS400_1p2v", sizeof("HS400_1p2v")))
+			pdata->caps2 |= MMC_CAP2_HS400_1_2V;
+		else if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
+			pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
+		else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v")))
+			pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
+		else if (!strncmp(name, "DDR_1p8v", sizeof("DDR_1p8v")))
+			pdata->caps |= MMC_CAP_1_8V_DDR
+						| MMC_CAP_UHS_DDR50;
+		else if (!strncmp(name, "DDR_1p2v", sizeof("DDR_1p2v")))
+			pdata->caps |= MMC_CAP_1_2V_DDR
+						| MMC_CAP_UHS_DDR50;
+	}
+
+	if (of_get_property(np, "qcom,nonremovable", NULL))
+		pdata->nonremovable = true;
+
+	if (of_get_property(np, "qcom,nonhotplug", NULL))
+		pdata->nonhotplug = true;
+
+	pdata->largeaddressbus =
+		of_property_read_bool(np, "qcom,large-address-bus");
+
+	if (of_property_read_bool(np, "qcom,wakeup-on-idle"))
+		msm_host->mmc->wakeup_on_idle = true;
+
+	sdhci_msm_pm_qos_parse(dev, pdata);
+
+	if (of_get_property(np, "qcom,core_3_0v_support", NULL))
+		msm_host->core_3_0v_support = true;
+
+	pdata->sdr104_wa = of_property_read_bool(np, "qcom,sdr104-wa");
+
+	return pdata;
+out:
+	return NULL;
+}
+
+/* Returns required bandwidth in Bytes per Sec */
+static unsigned int sdhci_get_bw_required(struct sdhci_host *host,
+					struct mmc_ios *ios)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	unsigned int bw;
+
+	bw = msm_host->clk_rate;
+	/*
+	 * For DDR mode, SDCC controller clock will be at
+	 * the double rate than the actual clock that goes to card.
+	 */
+	if (ios->bus_width == MMC_BUS_WIDTH_4)
+		bw /= 2;
+	else if (ios->bus_width == MMC_BUS_WIDTH_1)
+		bw /= 8;
+
+	return bw;
+}
+
+static int sdhci_msm_bus_get_vote_for_bw(struct sdhci_msm_host *host,
+					   unsigned int bw)
+{
+	unsigned int *table = host->pdata->voting_data->bw_vecs;
+	unsigned int size = host->pdata->voting_data->bw_vecs_size;
+	int i;
+
+	if (host->msm_bus_vote.is_max_bw_needed && bw)
+		return host->msm_bus_vote.max_bw_vote;
+
+	for (i = 0; i < size; i++) {
+		if (bw <= table[i])
+			break;
+	}
+
+	if (i && (i == size))
+		i--;
+
+	return i;
+}
+
+/*
+ * This function must be called with host lock acquired.
+ * Caller of this function should also ensure that msm bus client
+ * handle is not null.
+ */
+static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
+					     int vote,
+					     unsigned long *flags)
+{
+	struct sdhci_host *host =  platform_get_drvdata(msm_host->pdev);
+	int rc = 0;
+
+	BUG_ON(!flags);
+
+	if (vote != msm_host->msm_bus_vote.curr_vote) {
+		spin_unlock_irqrestore(&host->lock, *flags);
+		rc = msm_bus_scale_client_update_request(
+				msm_host->msm_bus_vote.client_handle, vote);
+		spin_lock_irqsave(&host->lock, *flags);
+		if (rc) {
+			pr_err("%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
+				mmc_hostname(host->mmc),
+				msm_host->msm_bus_vote.client_handle, vote, rc);
+			goto out;
+		}
+		msm_host->msm_bus_vote.curr_vote = vote;
+	}
+out:
+	return rc;
+}
+
+/*
+ * Internal work. Work to set 0 bandwidth for msm bus.
+ */
+static void sdhci_msm_bus_work(struct work_struct *work)
+{
+	struct sdhci_msm_host *msm_host;
+	struct sdhci_host *host;
+	unsigned long flags;
+
+	msm_host = container_of(work, struct sdhci_msm_host,
+				msm_bus_vote.vote_work.work);
+	host =  platform_get_drvdata(msm_host->pdev);
+
+	if (!msm_host->msm_bus_vote.client_handle)
+		return;
+
+	spin_lock_irqsave(&host->lock, flags);
+	/* don't vote for 0 bandwidth if any request is in progress */
+	if (!host->mrq) {
+		sdhci_msm_bus_set_vote(msm_host,
+			msm_host->msm_bus_vote.min_bw_vote, &flags);
+	} else
+		pr_warning("%s: %s: Transfer in progress. skipping bus voting to 0 bandwidth\n",
+			   mmc_hostname(host->mmc), __func__);
+	spin_unlock_irqrestore(&host->lock, flags);
+}
+
+/*
+ * This function cancels any scheduled delayed work and sets the bus
+ * vote based on bw (bandwidth) argument.
+ */
+static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
+						unsigned int bw)
+{
+	int vote;
+	unsigned long flags;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	cancel_delayed_work_sync(&msm_host->msm_bus_vote.vote_work);
+	spin_lock_irqsave(&host->lock, flags);
+	vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
+	sdhci_msm_bus_set_vote(msm_host, vote, &flags);
+	spin_unlock_irqrestore(&host->lock, flags);
+}
+
+#define MSM_MMC_BUS_VOTING_DELAY	200 /* msecs */
+
+/* This function queues a work which will set the bandwidth requiement to 0 */
+static void sdhci_msm_bus_queue_work(struct sdhci_host *host)
+{
+	unsigned long flags;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	spin_lock_irqsave(&host->lock, flags);
+	if (msm_host->msm_bus_vote.min_bw_vote !=
+		msm_host->msm_bus_vote.curr_vote)
+		queue_delayed_work(system_wq,
+				   &msm_host->msm_bus_vote.vote_work,
+				   msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
+	spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static int sdhci_msm_bus_register(struct sdhci_msm_host *host,
+				struct platform_device *pdev)
+{
+	int rc = 0;
+	struct msm_bus_scale_pdata *bus_pdata;
+
+	struct sdhci_msm_bus_voting_data *data;
+	struct device *dev = &pdev->dev;
+
+	data = devm_kzalloc(dev,
+		sizeof(struct sdhci_msm_bus_voting_data), GFP_KERNEL);
+	if (!data) {
+		dev_err(&pdev->dev,
+			"%s: failed to allocate memory\n", __func__);
+		rc = -ENOMEM;
+		goto out;
+	}
+	data->bus_pdata = msm_bus_cl_get_pdata(pdev);
+	if (data->bus_pdata) {
+		rc = sdhci_msm_dt_get_array(dev, "qcom,bus-bw-vectors-bps",
+				&data->bw_vecs, &data->bw_vecs_size, 0);
+		if (rc) {
+			dev_err(&pdev->dev,
+				"%s: Failed to get bus-bw-vectors-bps\n",
+				__func__);
+			goto out;
+		}
+		host->pdata->voting_data = data;
+	}
+	if (host->pdata->voting_data &&
+		host->pdata->voting_data->bus_pdata &&
+		host->pdata->voting_data->bw_vecs &&
+		host->pdata->voting_data->bw_vecs_size) {
+
+		bus_pdata = host->pdata->voting_data->bus_pdata;
+		host->msm_bus_vote.client_handle =
+				msm_bus_scale_register_client(bus_pdata);
+		if (!host->msm_bus_vote.client_handle) {
+			dev_err(&pdev->dev, "msm_bus_scale_register_client()\n");
+			rc = -EFAULT;
+			goto out;
+		}
+		/* cache the vote index for minimum and maximum bandwidth */
+		host->msm_bus_vote.min_bw_vote =
+				sdhci_msm_bus_get_vote_for_bw(host, 0);
+		host->msm_bus_vote.max_bw_vote =
+				sdhci_msm_bus_get_vote_for_bw(host, UINT_MAX);
+	} else {
+		devm_kfree(dev, data);
+	}
+
+out:
+	return rc;
+}
+
+static void sdhci_msm_bus_unregister(struct sdhci_msm_host *host)
+{
+	if (host->msm_bus_vote.client_handle)
+		msm_bus_scale_unregister_client(
+			host->msm_bus_vote.client_handle);
+}
+
+static void sdhci_msm_bus_voting(struct sdhci_host *host, u32 enable)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	struct mmc_ios *ios = &host->mmc->ios;
+	unsigned int bw;
+
+	if (!msm_host->msm_bus_vote.client_handle)
+		return;
+
+	bw = sdhci_get_bw_required(host, ios);
+	if (enable) {
+		sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
+	} else {
+		/*
+		 * If clock gating is enabled, then remove the vote
+		 * immediately because clocks will be disabled only
+		 * after SDHCI_MSM_MMC_CLK_GATE_DELAY and thus no
+		 * additional delay is required to remove the bus vote.
+		 */
+#ifdef CONFIG_MMC_CLKGATE
+		if (host->mmc->clkgate_delay)
+			sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+		else
+#endif
+			sdhci_msm_bus_queue_work(host);
+	}
+}
+
+/* Regulator utility functions */
+static int sdhci_msm_vreg_init_reg(struct device *dev,
+					struct sdhci_msm_reg_data *vreg)
+{
+	int ret = 0;
+
+	/* check if regulator is already initialized? */
+	if (vreg->reg)
+		goto out;
+
+	/* Get the regulator handle */
+	vreg->reg = devm_regulator_get(dev, vreg->name);
+	if (IS_ERR(vreg->reg)) {
+		ret = PTR_ERR(vreg->reg);
+		pr_err("%s: devm_regulator_get(%s) failed. ret=%d\n",
+			__func__, vreg->name, ret);
+		goto out;
+	}
+
+	if (regulator_count_voltages(vreg->reg) > 0) {
+		vreg->set_voltage_sup = true;
+		/* sanity check */
+		if (!vreg->high_vol_level || !vreg->hpm_uA) {
+			pr_err("%s: %s invalid constraints specified\n",
+			       __func__, vreg->name);
+			ret = -EINVAL;
+		}
+	}
+
+out:
+	return ret;
+}
+
+static void sdhci_msm_vreg_deinit_reg(struct sdhci_msm_reg_data *vreg)
+{
+	if (vreg->reg)
+		devm_regulator_put(vreg->reg);
+}
+
+static int sdhci_msm_vreg_set_optimum_mode(struct sdhci_msm_reg_data
+						  *vreg, int uA_load)
+{
+	int ret = 0;
+
+	/*
+	 * regulators that do not support regulator_set_voltage also
+	 * do not support regulator_set_optimum_mode
+	 */
+	if (vreg->set_voltage_sup) {
+		ret = regulator_set_load(vreg->reg, uA_load);
+		if (ret < 0)
+			pr_err("%s: regulator_set_load(reg=%s,uA_load=%d) failed. ret=%d\n",
+			       __func__, vreg->name, uA_load, ret);
+		else
+			/*
+			 * regulator_set_load() can return non zero
+			 * value even for success case.
+			 */
+			ret = 0;
+	}
+	return ret;
+}
+
+static int sdhci_msm_vreg_set_voltage(struct sdhci_msm_reg_data *vreg,
+					int min_uV, int max_uV)
+{
+	int ret = 0;
+	if (vreg->set_voltage_sup) {
+		ret = regulator_set_voltage(vreg->reg, min_uV, max_uV);
+		if (ret) {
+			pr_err("%s: regulator_set_voltage(%s)failed. min_uV=%d,max_uV=%d,ret=%d\n",
+			       __func__, vreg->name, min_uV, max_uV, ret);
+		}
+	}
+
+	return ret;
+}
+
+static int sdhci_msm_vreg_enable(struct sdhci_msm_reg_data *vreg)
+{
+	int ret = 0;
+
+	/* Put regulator in HPM (high power mode) */
+	ret = sdhci_msm_vreg_set_optimum_mode(vreg, vreg->hpm_uA);
+	if (ret < 0)
+		return ret;
+
+	if (!vreg->is_enabled) {
+		/* Set voltage level */
+		ret = sdhci_msm_vreg_set_voltage(vreg, vreg->high_vol_level,
+						vreg->high_vol_level);
+		if (ret)
+			return ret;
+	}
+	ret = regulator_enable(vreg->reg);
+	if (ret) {
+		pr_err("%s: regulator_enable(%s) failed. ret=%d\n",
+				__func__, vreg->name, ret);
+		return ret;
+	}
+	vreg->is_enabled = true;
+	return ret;
+}
+
+static int sdhci_msm_vreg_disable(struct sdhci_msm_reg_data *vreg)
+{
+	int ret = 0;
+
+	/* Never disable regulator marked as always_on */
+	if (vreg->is_enabled && !vreg->is_always_on) {
+		ret = regulator_disable(vreg->reg);
+		if (ret) {
+			pr_err("%s: regulator_disable(%s) failed. ret=%d\n",
+				__func__, vreg->name, ret);
+			goto out;
+		}
+		vreg->is_enabled = false;
+
+		ret = sdhci_msm_vreg_set_optimum_mode(vreg, 0);
+		if (ret < 0)
+			goto out;
+
+		/* Set min. voltage level to 0 */
+		ret = sdhci_msm_vreg_set_voltage(vreg, 0, vreg->high_vol_level);
+		if (ret)
+			goto out;
+	} else if (vreg->is_enabled && vreg->is_always_on) {
+		if (vreg->lpm_sup) {
+			/* Put always_on regulator in LPM (low power mode) */
+			ret = sdhci_msm_vreg_set_optimum_mode(vreg,
+							      vreg->lpm_uA);
+			if (ret < 0)
+				goto out;
+		}
+	}
+out:
+	return ret;
+}
+
+static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata,
+			bool enable, bool is_init)
+{
+	int ret = 0, i;
+	struct sdhci_msm_slot_reg_data *curr_slot;
+	struct sdhci_msm_reg_data *vreg_table[2];
+
+	curr_slot = pdata->vreg_data;
+	if (!curr_slot) {
+		pr_debug("%s: vreg info unavailable,assuming the slot is powered by always on domain\n",
+			 __func__);
+		goto out;
+	}
+
+	vreg_table[0] = curr_slot->vdd_data;
+	vreg_table[1] = curr_slot->vdd_io_data;
+
+	for (i = 0; i < ARRAY_SIZE(vreg_table); i++) {
+		if (vreg_table[i]) {
+			if (enable)
+				ret = sdhci_msm_vreg_enable(vreg_table[i]);
+			else
+				ret = sdhci_msm_vreg_disable(vreg_table[i]);
+			if (ret)
+				goto out;
+		}
+	}
+out:
+	return ret;
+}
+
+/* This init function should be called only once for each SDHC slot */
+static int sdhci_msm_vreg_init(struct device *dev,
+				struct sdhci_msm_pltfm_data *pdata,
+				bool is_init)
+{
+	int ret = 0;
+	struct sdhci_msm_slot_reg_data *curr_slot;
+	struct sdhci_msm_reg_data *curr_vdd_reg, *curr_vdd_io_reg;
+
+	curr_slot = pdata->vreg_data;
+	if (!curr_slot)
+		goto out;
+
+	curr_vdd_reg = curr_slot->vdd_data;
+	curr_vdd_io_reg = curr_slot->vdd_io_data;
+
+	if (!is_init)
+		/* Deregister all regulators from regulator framework */
+		goto vdd_io_reg_deinit;
+
+	/*
+	 * Get the regulator handle from voltage regulator framework
+	 * and then try to set the voltage level for the regulator
+	 */
+	if (curr_vdd_reg) {
+		ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_reg);
+		if (ret)
+			goto out;
+	}
+	if (curr_vdd_io_reg) {
+		ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_io_reg);
+		if (ret)
+			goto vdd_reg_deinit;
+	}
+
+	if (ret)
+		dev_err(dev, "vreg reset failed (%d)\n", ret);
+	goto out;
+
+vdd_io_reg_deinit:
+	if (curr_vdd_io_reg)
+		sdhci_msm_vreg_deinit_reg(curr_vdd_io_reg);
+vdd_reg_deinit:
+	if (curr_vdd_reg)
+		sdhci_msm_vreg_deinit_reg(curr_vdd_reg);
+out:
+	return ret;
+}
+
+
+static int sdhci_msm_set_vdd_io_vol(struct sdhci_msm_pltfm_data *pdata,
+			enum vdd_io_level level,
+			unsigned int voltage_level)
+{
+	int ret = 0;
+	int set_level;
+	struct sdhci_msm_reg_data *vdd_io_reg;
+
+	if (!pdata->vreg_data)
+		return ret;
+
+	vdd_io_reg = pdata->vreg_data->vdd_io_data;
+	if (vdd_io_reg && vdd_io_reg->is_enabled) {
+		switch (level) {
+		case VDD_IO_LOW:
+			set_level = vdd_io_reg->low_vol_level;
+			break;
+		case VDD_IO_HIGH:
+			set_level = vdd_io_reg->high_vol_level;
+			break;
+		case VDD_IO_SET_LEVEL:
+			set_level = voltage_level;
+			break;
+		default:
+			pr_err("%s: invalid argument level = %d",
+					__func__, level);
+			ret = -EINVAL;
+			return ret;
+		}
+		ret = sdhci_msm_vreg_set_voltage(vdd_io_reg, set_level,
+				set_level);
+	}
+	return ret;
+}
+
+/*
+ * Acquire spin-lock host->lock before calling this function
+ */
+static void sdhci_msm_cfg_sdiowakeup_gpio_irq(struct sdhci_host *host,
+					      bool enable)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	if (enable && !msm_host->is_sdiowakeup_enabled)
+		enable_irq(msm_host->pdata->sdiowakeup_irq);
+	else if (!enable && msm_host->is_sdiowakeup_enabled)
+		disable_irq_nosync(msm_host->pdata->sdiowakeup_irq);
+	else
+		dev_warn(&msm_host->pdev->dev, "%s: wakeup to config: %d curr: %d\n",
+			__func__, enable, msm_host->is_sdiowakeup_enabled);
+	msm_host->is_sdiowakeup_enabled = enable;
+}
+
+static irqreturn_t sdhci_msm_sdiowakeup_irq(int irq, void *data)
+{
+	struct sdhci_host *host = (struct sdhci_host *)data;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	unsigned long flags;
+
+	pr_debug("%s: irq (%d) received\n", __func__, irq);
+
+	spin_lock_irqsave(&host->lock, flags);
+	sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
+	spin_unlock_irqrestore(&host->lock, flags);
+	msm_host->sdio_pending_processing = true;
+
+	return IRQ_HANDLED;
+}
+
+void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+	unsigned int irq_flags = 0;
+	struct irq_desc *pwr_irq_desc = irq_to_desc(msm_host->pwr_irq);
+
+	if (pwr_irq_desc)
+		irq_flags = pwr_irq_desc->irq_data.common->state_use_accessors;
+
+	pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x, pwr isr state=0x%x\n",
+		mmc_hostname(host->mmc),
+		sdhci_msm_readl_relaxed(host,
+			msm_host_offset->CORE_PWRCTL_STATUS),
+		sdhci_msm_readl_relaxed(host,
+			msm_host_offset->CORE_PWRCTL_MASK),
+		sdhci_msm_readl_relaxed(host,
+			msm_host_offset->CORE_PWRCTL_CTL), irq_flags);
+
+	MMC_TRACE(host->mmc,
+		"%s: Sts: 0x%08x | Mask: 0x%08x | Ctrl: 0x%08x, pwr isr state=0x%x\n",
+		__func__,
+		sdhci_msm_readb_relaxed(host,
+			msm_host_offset->CORE_PWRCTL_STATUS),
+		sdhci_msm_readb_relaxed(host,
+			msm_host_offset->CORE_PWRCTL_MASK),
+		sdhci_msm_readb_relaxed(host,
+			msm_host_offset->CORE_PWRCTL_CTL), irq_flags);
+}
+
+static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
+{
+	struct sdhci_host *host = (struct sdhci_host *)data;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+	u8 irq_status = 0;
+	u8 irq_ack = 0;
+	int ret = 0;
+	int pwr_state = 0, io_level = 0;
+	unsigned long flags;
+	int retry = 10;
+
+	irq_status = sdhci_msm_readb_relaxed(host,
+		msm_host_offset->CORE_PWRCTL_STATUS);
+
+	pr_debug("%s: Received IRQ(%d), status=0x%x\n",
+		mmc_hostname(msm_host->mmc), irq, irq_status);
+
+	/* Clear the interrupt */
+	sdhci_msm_writeb_relaxed(irq_status, host,
+		msm_host_offset->CORE_PWRCTL_CLEAR);
+
+	/*
+	 * SDHC has core_mem and hc_mem device memory and these memory
+	 * addresses do not fall within 1KB region. Hence, any update to
+	 * core_mem address space would require an mb() to ensure this gets
+	 * completed before its next update to registers within hc_mem.
+	 */
+	mb();
+	/*
+	 * There is a rare HW scenario where the first clear pulse could be
+	 * lost when actual reset and clear/read of status register is
+	 * happening at a time. Hence, retry for at least 10 times to make
+	 * sure status register is cleared. Otherwise, this will result in
+	 * a spurious power IRQ resulting in system instability.
+	 */
+	while (irq_status & sdhci_msm_readb_relaxed(host,
+		msm_host_offset->CORE_PWRCTL_STATUS)) {
+		if (retry == 0) {
+			pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
+				mmc_hostname(host->mmc), irq_status);
+			sdhci_msm_dump_pwr_ctrl_regs(host);
+			BUG_ON(1);
+		}
+		sdhci_msm_writeb_relaxed(irq_status, host,
+			msm_host_offset->CORE_PWRCTL_CLEAR);
+		retry--;
+		udelay(10);
+	}
+	if (likely(retry < 10))
+		pr_debug("%s: success clearing (0x%x) pwrctl status register, retries left %d\n",
+				mmc_hostname(host->mmc), irq_status, retry);
+
+	/* Handle BUS ON/OFF*/
+	if (irq_status & CORE_PWRCTL_BUS_ON) {
+		ret = sdhci_msm_setup_vreg(msm_host->pdata, true, false);
+		if (!ret) {
+			ret = sdhci_msm_setup_pins(msm_host->pdata, true);
+			ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
+					VDD_IO_HIGH, 0);
+		}
+		if (ret)
+			irq_ack |= CORE_PWRCTL_BUS_FAIL;
+		else
+			irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
+
+		pwr_state = REQ_BUS_ON;
+		io_level = REQ_IO_HIGH;
+	}
+	if (irq_status & CORE_PWRCTL_BUS_OFF) {
+		if (msm_host->pltfm_init_done)
+			ret = sdhci_msm_setup_vreg(msm_host->pdata,
+					false, false);
+		if (!ret) {
+			ret = sdhci_msm_setup_pins(msm_host->pdata, false);
+			ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
+					VDD_IO_LOW, 0);
+		}
+		if (ret)
+			irq_ack |= CORE_PWRCTL_BUS_FAIL;
+		else
+			irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
+
+		pwr_state = REQ_BUS_OFF;
+		io_level = REQ_IO_LOW;
+	}
+	/* Handle IO LOW/HIGH */
+	if (irq_status & CORE_PWRCTL_IO_LOW) {
+		/* Switch voltage Low */
+		ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_LOW, 0);
+		if (ret)
+			irq_ack |= CORE_PWRCTL_IO_FAIL;
+		else
+			irq_ack |= CORE_PWRCTL_IO_SUCCESS;
+
+		io_level = REQ_IO_LOW;
+	}
+	if (irq_status & CORE_PWRCTL_IO_HIGH) {
+		/* Switch voltage High */
+		ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_HIGH, 0);
+		if (ret)
+			irq_ack |= CORE_PWRCTL_IO_FAIL;
+		else
+			irq_ack |= CORE_PWRCTL_IO_SUCCESS;
+
+		io_level = REQ_IO_HIGH;
+	}
+
+	/* ACK status to the core */
+	sdhci_msm_writeb_relaxed(irq_ack, host,
+			msm_host_offset->CORE_PWRCTL_CTL);
+	/*
+	 * SDHC has core_mem and hc_mem device memory and these memory
+	 * addresses do not fall within 1KB region. Hence, any update to
+	 * core_mem address space would require an mb() to ensure this gets
+	 * completed before its next update to registers within hc_mem.
+	 */
+	mb();
+
+	if ((io_level & REQ_IO_HIGH) &&
+			(msm_host->caps_0 & CORE_3_0V_SUPPORT) &&
+			!msm_host->core_3_0v_support)
+		writel_relaxed((readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC) &
+				~CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC);
+	else if ((io_level & REQ_IO_LOW) ||
+			(msm_host->caps_0 & CORE_1_8V_SUPPORT))
+		writel_relaxed((readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC) |
+				CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC);
+	mb();
+
+	pr_debug("%s: Handled IRQ(%d), ret=%d, ack=0x%x\n",
+		mmc_hostname(msm_host->mmc), irq, ret, irq_ack);
+	spin_lock_irqsave(&host->lock, flags);
+	if (pwr_state)
+		msm_host->curr_pwr_state = pwr_state;
+	if (io_level)
+		msm_host->curr_io_level = io_level;
+	complete(&msm_host->pwr_irq_completion);
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+static ssize_t
+show_polling(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	int poll;
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->lock, flags);
+	poll = !!(host->mmc->caps & MMC_CAP_NEEDS_POLL);
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", poll);
+}
+
+static ssize_t
+store_polling(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	int value;
+	unsigned long flags;
+
+	if (!kstrtou32(buf, 0, &value)) {
+		spin_lock_irqsave(&host->lock, flags);
+		if (value) {
+			host->mmc->caps |= MMC_CAP_NEEDS_POLL;
+			mmc_detect_change(host->mmc, 0);
+		} else {
+			host->mmc->caps &= ~MMC_CAP_NEEDS_POLL;
+		}
+		spin_unlock_irqrestore(&host->lock, flags);
+	}
+	return count;
+}
+
+static ssize_t
+show_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+			msm_host->msm_bus_vote.is_max_bw_needed);
+}
+
+static ssize_t
+store_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	uint32_t value;
+	unsigned long flags;
+
+	if (!kstrtou32(buf, 0, &value)) {
+		spin_lock_irqsave(&host->lock, flags);
+		msm_host->msm_bus_vote.is_max_bw_needed = !!value;
+		spin_unlock_irqrestore(&host->lock, flags);
+	}
+	return count;
+}
+
+static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+	unsigned long flags;
+	bool done = false;
+	u32 io_sig_sts = SWITCHABLE_SIGNALLING_VOL;
+
+	spin_lock_irqsave(&host->lock, flags);
+	pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
+			mmc_hostname(host->mmc), __func__, req_type,
+			msm_host->curr_pwr_state, msm_host->curr_io_level);
+	if (!msm_host->mci_removed)
+		io_sig_sts = sdhci_msm_readl_relaxed(host,
+				msm_host_offset->CORE_GENERICS);
+
+	/*
+	 * The IRQ for request type IO High/Low will be generated when -
+	 * 1. SWITCHABLE_SIGNALLING_VOL is enabled in HW.
+	 * 2. If 1 is true and when there is a state change in 1.8V enable
+	 * bit (bit 3) of SDHCI_HOST_CONTROL2 register. The reset state of
+	 * that bit is 0 which indicates 3.3V IO voltage. So, when MMC core
+	 * layer tries to set it to 3.3V before card detection happens, the
+	 * IRQ doesn't get triggered as there is no state change in this bit.
+	 * The driver already handles this case by changing the IO voltage
+	 * level to high as part of controller power up sequence. Hence, check
+	 * for host->pwr to handle a case where IO voltage high request is
+	 * issued even before controller power up.
+	 */
+	if (req_type & (REQ_IO_HIGH | REQ_IO_LOW)) {
+		if (!(io_sig_sts & SWITCHABLE_SIGNALLING_VOL) ||
+				((req_type & REQ_IO_HIGH) && !host->pwr)) {
+			pr_debug("%s: do not wait for power IRQ that never comes\n",
+					mmc_hostname(host->mmc));
+			spin_unlock_irqrestore(&host->lock, flags);
+			return;
+		}
+	}
+
+	if ((req_type & msm_host->curr_pwr_state) ||
+			(req_type & msm_host->curr_io_level))
+		done = true;
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	/*
+	 * This is needed here to hanlde a case where IRQ gets
+	 * triggered even before this function is called so that
+	 * x->done counter of completion gets reset. Otherwise,
+	 * next call to wait_for_completion returns immediately
+	 * without actually waiting for the IRQ to be handled.
+	 */
+	if (done)
+		init_completion(&msm_host->pwr_irq_completion);
+	else if (!wait_for_completion_timeout(&msm_host->pwr_irq_completion,
+				msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS))) {
+		__WARN_printf("%s: request(%d) timed out waiting for pwr_irq\n",
+					mmc_hostname(host->mmc), req_type);
+		MMC_TRACE(host->mmc,
+			"%s: request(%d) timed out waiting for pwr_irq\n",
+			__func__, req_type);
+		sdhci_msm_dump_pwr_ctrl_regs(host);
+	}
+	pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
+			__func__, req_type);
+}
+
+static void sdhci_msm_toggle_cdr(struct sdhci_host *host, bool enable)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+	u32 config = readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG);
+
+	if (enable) {
+		config |= CORE_CDR_EN;
+		config &= ~CORE_CDR_EXT_EN;
+		writel_relaxed(config, host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG);
+	} else {
+		config &= ~CORE_CDR_EN;
+		config |= CORE_CDR_EXT_EN;
+		writel_relaxed(config, host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG);
+	}
+}
+
+static unsigned int sdhci_msm_max_segs(void)
+{
+	return SDHCI_MSM_MAX_SEGMENTS;
+}
+
+static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	return msm_host->pdata->sup_clk_table[0];
+}
+
+static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int max_clk_index = msm_host->pdata->sup_clk_cnt;
+
+	return msm_host->pdata->sup_clk_table[max_clk_index - 1];
+}
+
+static unsigned int sdhci_msm_get_sup_clk_rate(struct sdhci_host *host,
+						u32 req_clk)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	unsigned int sel_clk = -1;
+	unsigned char cnt;
+
+	if (req_clk < sdhci_msm_get_min_clock(host)) {
+		sel_clk = sdhci_msm_get_min_clock(host);
+		return sel_clk;
+	}
+
+	for (cnt = 0; cnt < msm_host->pdata->sup_clk_cnt; cnt++) {
+		if (msm_host->pdata->sup_clk_table[cnt] > req_clk) {
+			break;
+		} else if (msm_host->pdata->sup_clk_table[cnt] == req_clk) {
+			sel_clk = msm_host->pdata->sup_clk_table[cnt];
+			break;
+		} else {
+			sel_clk = msm_host->pdata->sup_clk_table[cnt];
+		}
+	}
+	return sel_clk;
+}
+
+static int sdhci_msm_enable_controller_clock(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int rc = 0;
+
+	if (atomic_read(&msm_host->controller_clock))
+		return 0;
+
+	sdhci_msm_bus_voting(host, 1);
+
+	if (!IS_ERR(msm_host->pclk)) {
+		rc = clk_prepare_enable(msm_host->pclk);
+		if (rc) {
+			pr_err("%s: %s: failed to enable the pclk with error %d\n",
+			       mmc_hostname(host->mmc), __func__, rc);
+			goto remove_vote;
+		}
+	}
+
+	rc = clk_prepare_enable(msm_host->clk);
+	if (rc) {
+		pr_err("%s: %s: failed to enable the host-clk with error %d\n",
+		       mmc_hostname(host->mmc), __func__, rc);
+		goto disable_pclk;
+	}
+
+	if (!IS_ERR(msm_host->ice_clk)) {
+		rc = clk_prepare_enable(msm_host->ice_clk);
+		if (rc) {
+			pr_err("%s: %s: failed to enable the ice-clk with error %d\n",
+				mmc_hostname(host->mmc), __func__, rc);
+			goto disable_host_clk;
+		}
+	}
+	atomic_set(&msm_host->controller_clock, 1);
+	pr_debug("%s: %s: enabled controller clock\n",
+			mmc_hostname(host->mmc), __func__);
+	goto out;
+
+disable_host_clk:
+	if (!IS_ERR(msm_host->clk))
+		clk_disable_unprepare(msm_host->clk);
+disable_pclk:
+	if (!IS_ERR(msm_host->pclk))
+		clk_disable_unprepare(msm_host->pclk);
+remove_vote:
+	if (msm_host->msm_bus_vote.client_handle)
+		sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+out:
+	return rc;
+}
+
+static void sdhci_msm_disable_controller_clock(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	if (atomic_read(&msm_host->controller_clock)) {
+		if (!IS_ERR(msm_host->clk))
+			clk_disable_unprepare(msm_host->clk);
+		if (!IS_ERR(msm_host->pclk))
+			clk_disable_unprepare(msm_host->pclk);
+		if (!IS_ERR(msm_host->ice_clk))
+			clk_disable_unprepare(msm_host->ice_clk);
+		sdhci_msm_bus_voting(host, 0);
+		atomic_set(&msm_host->controller_clock, 0);
+		pr_debug("%s: %s: disabled controller clock\n",
+			mmc_hostname(host->mmc), __func__);
+	}
+}
+
+static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int rc = 0;
+
+	if (enable && !atomic_read(&msm_host->clks_on)) {
+		pr_debug("%s: request to enable clocks\n",
+				mmc_hostname(host->mmc));
+
+		/*
+		 * The bus-width or the clock rate might have changed
+		 * after controller clocks are enbaled, update bus vote
+		 * in such case.
+		 */
+		if (atomic_read(&msm_host->controller_clock))
+			sdhci_msm_bus_voting(host, 1);
+
+		rc = sdhci_msm_enable_controller_clock(host);
+		if (rc)
+			goto remove_vote;
+
+		if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
+			rc = clk_prepare_enable(msm_host->bus_clk);
+			if (rc) {
+				pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
+					mmc_hostname(host->mmc), __func__, rc);
+				goto disable_controller_clk;
+			}
+		}
+		if (!IS_ERR(msm_host->ff_clk)) {
+			rc = clk_prepare_enable(msm_host->ff_clk);
+			if (rc) {
+				pr_err("%s: %s: failed to enable the ff_clk with error %d\n",
+					mmc_hostname(host->mmc), __func__, rc);
+				goto disable_bus_clk;
+			}
+		}
+		if (!IS_ERR(msm_host->sleep_clk)) {
+			rc = clk_prepare_enable(msm_host->sleep_clk);
+			if (rc) {
+				pr_err("%s: %s: failed to enable the sleep_clk with error %d\n",
+					mmc_hostname(host->mmc), __func__, rc);
+				goto disable_ff_clk;
+			}
+		}
+		mb();
+
+	} else if (!enable && atomic_read(&msm_host->clks_on)) {
+		sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+		mb();
+		/*
+		 * During 1.8V signal switching the clock source must
+		 * still be ON as it requires accessing SDHC
+		 * registers (SDHCi host control2 register bit 3 must
+		 * be written and polled after stopping the SDCLK).
+		 */
+		if (host->mmc->card_clock_off)
+			return 0;
+		pr_debug("%s: request to disable clocks\n",
+				mmc_hostname(host->mmc));
+		if (!IS_ERR_OR_NULL(msm_host->sleep_clk))
+			clk_disable_unprepare(msm_host->sleep_clk);
+		if (!IS_ERR_OR_NULL(msm_host->ff_clk))
+			clk_disable_unprepare(msm_host->ff_clk);
+		clk_disable_unprepare(msm_host->clk);
+		if (!IS_ERR(msm_host->ice_clk))
+			clk_disable_unprepare(msm_host->ice_clk);
+		if (!IS_ERR(msm_host->pclk))
+			clk_disable_unprepare(msm_host->pclk);
+		if (!IS_ERR_OR_NULL(msm_host->bus_clk))
+			clk_disable_unprepare(msm_host->bus_clk);
+
+		atomic_set(&msm_host->controller_clock, 0);
+		sdhci_msm_bus_voting(host, 0);
+	}
+	atomic_set(&msm_host->clks_on, enable);
+	goto out;
+disable_ff_clk:
+	if (!IS_ERR_OR_NULL(msm_host->ff_clk))
+		clk_disable_unprepare(msm_host->ff_clk);
+disable_bus_clk:
+	if (!IS_ERR_OR_NULL(msm_host->bus_clk))
+		clk_disable_unprepare(msm_host->bus_clk);
+disable_controller_clk:
+	if (!IS_ERR_OR_NULL(msm_host->clk))
+		clk_disable_unprepare(msm_host->clk);
+	if (!IS_ERR(msm_host->ice_clk))
+		clk_disable_unprepare(msm_host->ice_clk);
+	if (!IS_ERR_OR_NULL(msm_host->pclk))
+		clk_disable_unprepare(msm_host->pclk);
+	atomic_set(&msm_host->controller_clock, 0);
+remove_vote:
+	if (msm_host->msm_bus_vote.client_handle)
+		sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+out:
+	return rc;
+}
+
+static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+	int rc;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+	struct mmc_card *card = host->mmc->card;
+	struct mmc_ios	curr_ios = host->mmc->ios;
+	u32 sup_clock, ddr_clock, dll_lock;
+	bool curr_pwrsave;
+
+	if (!clock) {
+		/*
+		 * disable pwrsave to ensure clock is not auto-gated until
+		 * the rate is >400KHz (initialization complete).
+		 */
+		writel_relaxed(readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC) &
+			~CORE_CLK_PWRSAVE, host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC);
+		sdhci_msm_prepare_clocks(host, false);
+		host->clock = clock;
+		goto out;
+	}
+
+	rc = sdhci_msm_prepare_clocks(host, true);
+	if (rc)
+		goto out;
+
+	curr_pwrsave = !!(readl_relaxed(host->ioaddr +
+	msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
+	if ((clock > 400000) &&
+	    !curr_pwrsave && card && mmc_host_may_gate_card(card))
+		writel_relaxed(readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC)
+				| CORE_CLK_PWRSAVE, host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC);
+	/*
+	 * Disable pwrsave for a newly added card if doesn't allow clock
+	 * gating.
+	 */
+	else if (curr_pwrsave && card && !mmc_host_may_gate_card(card))
+		writel_relaxed(readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC)
+				& ~CORE_CLK_PWRSAVE, host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC);
+
+	sup_clock = sdhci_msm_get_sup_clk_rate(host, clock);
+	if ((curr_ios.timing == MMC_TIMING_UHS_DDR50) ||
+		(curr_ios.timing == MMC_TIMING_MMC_DDR52) ||
+		(curr_ios.timing == MMC_TIMING_MMC_HS400)) {
+		/*
+		 * The SDHC requires internal clock frequency to be double the
+		 * actual clock that will be set for DDR mode. The controller
+		 * uses the faster clock(100/400MHz) for some of its parts and
+		 * send the actual required clock (50/200MHz) to the card.
+		 */
+		ddr_clock = clock * 2;
+		sup_clock = sdhci_msm_get_sup_clk_rate(host,
+				ddr_clock);
+	}
+
+	/*
+	 * In general all timing modes are controlled via UHS mode select in
+	 * Host Control2 register. eMMC specific HS200/HS400 doesn't have
+	 * their respective modes defined here, hence we use these values.
+	 *
+	 * HS200 - SDR104 (Since they both are equivalent in functionality)
+	 * HS400 - This involves multiple configurations
+	 *		Initially SDR104 - when tuning is required as HS200
+	 *		Then when switching to DDR @ 400MHz (HS400) we use
+	 *		the vendor specific HC_SELECT_IN to control the mode.
+	 *
+	 * In addition to controlling the modes we also need to select the
+	 * correct input clock for DLL depending on the mode.
+	 *
+	 * HS400 - divided clock (free running MCLK/2)
+	 * All other modes - default (free running MCLK)
+	 */
+	if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
+		/* Select the divided clock (free running MCLK/2) */
+		writel_relaxed(((readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC)
+				& ~CORE_HC_MCLK_SEL_MASK)
+				| CORE_HC_MCLK_SEL_HS400), host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC);
+		/*
+		 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
+		 * register
+		 */
+		if ((msm_host->tuning_done ||
+				(card && mmc_card_strobe(card) &&
+				 msm_host->enhanced_strobe)) &&
+				!msm_host->calibration_done) {
+			/*
+			 * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
+			 * field in VENDOR_SPEC_FUNC
+			 */
+			writel_relaxed((readl_relaxed(host->ioaddr + \
+					msm_host_offset->CORE_VENDOR_SPEC)
+					| CORE_HC_SELECT_IN_HS400
+					| CORE_HC_SELECT_IN_EN), host->ioaddr +
+					msm_host_offset->CORE_VENDOR_SPEC);
+		}
+		if (!host->mmc->ios.old_rate && !msm_host->use_cdclp533) {
+			/*
+			 * Poll on DLL_LOCK and DDR_DLL_LOCK bits in
+			 * CORE_DLL_STATUS to be set.  This should get set
+			 * with in 15 us at 200 MHz.
+			 */
+			rc = readl_poll_timeout(host->ioaddr +
+					msm_host_offset->CORE_DLL_STATUS,
+					dll_lock, (dll_lock & (CORE_DLL_LOCK |
+					CORE_DDR_DLL_LOCK)), 10, 1000);
+			if (rc == -ETIMEDOUT)
+				pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
+						mmc_hostname(host->mmc),
+						dll_lock);
+		}
+	} else {
+		if (!msm_host->use_cdclp533)
+			/* set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3 */
+			writel_relaxed((readl_relaxed(host->ioaddr +
+					msm_host_offset->CORE_VENDOR_SPEC3)
+					& ~CORE_PWRSAVE_DLL), host->ioaddr +
+					msm_host_offset->CORE_VENDOR_SPEC3);
+
+		/* Select the default clock (free running MCLK) */
+		writel_relaxed(((readl_relaxed(host->ioaddr +
+					msm_host_offset->CORE_VENDOR_SPEC)
+					& ~CORE_HC_MCLK_SEL_MASK)
+					| CORE_HC_MCLK_SEL_DFLT), host->ioaddr +
+					msm_host_offset->CORE_VENDOR_SPEC);
+
+		/*
+		 * Disable HC_SELECT_IN to be able to use the UHS mode select
+		 * configuration from Host Control2 register for all other
+		 * modes.
+		 *
+		 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
+		 * in VENDOR_SPEC_FUNC
+		 */
+		writel_relaxed((readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC)
+				& ~CORE_HC_SELECT_IN_EN
+				& ~CORE_HC_SELECT_IN_MASK), host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC);
+	}
+	mb();
+
+	if (sup_clock != msm_host->clk_rate) {
+		pr_debug("%s: %s: setting clk rate to %u\n",
+				mmc_hostname(host->mmc), __func__, sup_clock);
+		rc = clk_set_rate(msm_host->clk, sup_clock);
+		if (rc) {
+			pr_err("%s: %s: Failed to set rate %u for host-clk : %d\n",
+					mmc_hostname(host->mmc), __func__,
+					sup_clock, rc);
+			goto out;
+		}
+		msm_host->clk_rate = sup_clock;
+		host->clock = clock;
+		/*
+		 * Update the bus vote in case of frequency change due to
+		 * clock scaling.
+		 */
+		sdhci_msm_bus_voting(host, 1);
+	}
+out:
+	sdhci_set_clock(host, clock);
+}
+
+static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
+					unsigned int uhs)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+	u16 ctrl_2;
+
+	ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+	/* Select Bus Speed Mode for host */
+	ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+	if ((uhs == MMC_TIMING_MMC_HS400) ||
+		(uhs == MMC_TIMING_MMC_HS200) ||
+		(uhs == MMC_TIMING_UHS_SDR104))
+		ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
+	else if (uhs == MMC_TIMING_UHS_SDR12)
+		ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
+	else if (uhs == MMC_TIMING_UHS_SDR25)
+		ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+	else if (uhs == MMC_TIMING_UHS_SDR50)
+		ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
+	else if ((uhs == MMC_TIMING_UHS_DDR50) ||
+		 (uhs == MMC_TIMING_MMC_DDR52))
+		ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
+	/*
+	 * When clock frquency is less than 100MHz, the feedback clock must be
+	 * provided and DLL must not be used so that tuning can be skipped. To
+	 * provide feedback clock, the mode selection can be any value less
+	 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
+	 */
+	if (host->clock <= CORE_FREQ_100MHZ) {
+		if ((uhs == MMC_TIMING_MMC_HS400) ||
+		    (uhs == MMC_TIMING_MMC_HS200) ||
+		    (uhs == MMC_TIMING_UHS_SDR104))
+			ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+
+		/*
+		 * Make sure DLL is disabled when not required
+		 *
+		 * Write 1 to DLL_RST bit of DLL_CONFIG register
+		 */
+		writel_relaxed((readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG)
+				| CORE_DLL_RST), host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG);
+
+		/* Write 1 to DLL_PDN bit of DLL_CONFIG register */
+		writel_relaxed((readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG)
+				| CORE_DLL_PDN), host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG);
+		mb();
+
+		/*
+		 * The DLL needs to be restored and CDCLP533 recalibrated
+		 * when the clock frequency is set back to 400MHz.
+		 */
+		msm_host->calibration_done = false;
+	}
+
+	pr_debug("%s: %s-clock:%u uhs mode:%u ctrl_2:0x%x\n",
+		mmc_hostname(host->mmc), __func__, host->clock, uhs, ctrl_2);
+	sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+
+}
+
+#define MAX_TEST_BUS 60
+#define DRV_NAME "cmdq-host"
+static void sdhci_msm_cmdq_dump_debug_ram(struct sdhci_host *host)
+{
+	int i = 0;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+	struct cmdq_host *cq_host = host->cq_host;
+
+	u32 version = sdhci_msm_readl_relaxed(host,
+		msm_host_offset->CORE_MCI_VERSION);
+	u16 minor = version & CORE_VERSION_TARGET_MASK;
+	/* registers offset changed starting from 4.2.0 */
+	int offset = minor >= SDHCI_MSM_VER_420 ? 0 : 0x48;
+
+	if (cq_host->offset_changed)
+		offset += CQ_V5_VENDOR_CFG;
+	pr_err("---- Debug RAM dump ----\n");
+	pr_err(DRV_NAME ": Debug RAM wrap-around: 0x%08x | Debug RAM overlap: 0x%08x\n",
+	       cmdq_readl(cq_host, CQ_CMD_DBG_RAM_WA + offset),
+	       cmdq_readl(cq_host, CQ_CMD_DBG_RAM_OL + offset));
+
+	while (i < 16) {
+		pr_err(DRV_NAME ": Debug RAM dump [%d]: 0x%08x\n", i,
+		       cmdq_readl(cq_host, CQ_CMD_DBG_RAM + offset + (4 * i)));
+		i++;
+	}
+	pr_err("-------------------------\n");
+}
+
+static void sdhci_msm_cache_debug_data(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	struct sdhci_msm_debug_data *cached_data = &msm_host->cached_data;
+
+	memcpy(&cached_data->copy_mmc, msm_host->mmc,
+		sizeof(struct mmc_host));
+	if (msm_host->mmc->card)
+		memcpy(&cached_data->copy_card, msm_host->mmc->card,
+			sizeof(struct mmc_card));
+	memcpy(&cached_data->copy_host, host,
+		sizeof(struct sdhci_host));
+}
+
+void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+	int tbsel, tbsel2;
+	int i, index = 0;
+	u32 test_bus_val = 0;
+	u32 debug_reg[MAX_TEST_BUS] = {0};
+	u32 sts = 0;
+
+	sdhci_msm_cache_debug_data(host);
+	pr_info("----------- VENDOR REGISTER DUMP -----------\n");
+	if (host->cq_host)
+		sdhci_msm_cmdq_dump_debug_ram(host);
+
+	MMC_TRACE(host->mmc, "Data cnt: 0x%08x | Fifo cnt: 0x%08x\n",
+		sdhci_msm_readl_relaxed(host,
+			msm_host_offset->CORE_MCI_DATA_CNT),
+		sdhci_msm_readl_relaxed(host,
+			msm_host_offset->CORE_MCI_FIFO_CNT));
+	pr_info("Data cnt: 0x%08x | Fifo cnt: 0x%08x | Int sts: 0x%08x\n",
+		sdhci_msm_readl_relaxed(host,
+			msm_host_offset->CORE_MCI_DATA_CNT),
+		sdhci_msm_readl_relaxed(host,
+			msm_host_offset->CORE_MCI_FIFO_CNT),
+		sdhci_msm_readl_relaxed(host,
+			msm_host_offset->CORE_MCI_STATUS));
+	pr_info("DLL cfg:  0x%08x | DLL sts:  0x%08x | SDCC ver: 0x%08x\n",
+		readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG),
+		readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DLL_STATUS),
+		sdhci_msm_readl_relaxed(host,
+			msm_host_offset->CORE_MCI_VERSION));
+	pr_info("Vndr func: 0x%08x | Vndr adma err : addr0: 0x%08x addr1: 0x%08x\n",
+		readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC),
+		readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR0),
+		readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR1));
+	pr_info("Vndr func2: 0x%08x\n",
+		readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC_FUNC2));
+
+	/*
+	 * tbsel indicates [2:0] bits and tbsel2 indicates [7:4] bits
+	 * of CORE_TESTBUS_CONFIG register.
+	 *
+	 * To select test bus 0 to 7 use tbsel and to select any test bus
+	 * above 7 use (tbsel2 | tbsel) to get the test bus number. For eg,
+	 * to select test bus 14, write 0x1E to CORE_TESTBUS_CONFIG register
+	 * i.e., tbsel2[7:4] = 0001, tbsel[2:0] = 110.
+	 */
+	for (tbsel2 = 0; tbsel2 < 7; tbsel2++) {
+		for (tbsel = 0; tbsel < 8; tbsel++) {
+			if (index >= MAX_TEST_BUS)
+				break;
+			test_bus_val =
+			(tbsel2 << msm_host_offset->CORE_TESTBUS_SEL2_BIT) |
+				tbsel | msm_host_offset->CORE_TESTBUS_ENA;
+			sdhci_msm_writel_relaxed(test_bus_val, host,
+				msm_host_offset->CORE_TESTBUS_CONFIG);
+			debug_reg[index++] = sdhci_msm_readl_relaxed(host,
+				msm_host_offset->CORE_SDCC_DEBUG_REG);
+		}
+	}
+	for (i = 0; i < MAX_TEST_BUS; i = i + 4)
+		pr_info(" Test bus[%d to %d]: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+				i, i + 3, debug_reg[i], debug_reg[i+1],
+				debug_reg[i+2], debug_reg[i+3]);
+	if (host->is_crypto_en) {
+		sdhci_msm_ice_get_status(host, &sts);
+		pr_info("%s: ICE status %x\n", mmc_hostname(host->mmc), sts);
+		sdhci_msm_ice_print_regs(host);
+	}
+}
+
+void sdhci_msm_reset(struct sdhci_host *host, u8 mask)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	/* Set ICE core to be reset in sync with SDHC core */
+	if (msm_host->ice.pdev) {
+		if (msm_host->ice_hci_support)
+			writel_relaxed(1, host->ioaddr +
+						HC_VENDOR_SPECIFIC_ICE_CTRL);
+		else
+			writel_relaxed(1,
+				host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL);
+	}
+
+	sdhci_reset(host, mask);
+}
+
+/*
+ * sdhci_msm_enhanced_strobe_mask :-
+ * Before running CMDQ transfers in HS400 Enhanced Strobe mode,
+ * SW should write 3 to
+ * HC_VENDOR_SPECIFIC_FUNC3.CMDEN_HS400_INPUT_MASK_CNT register.
+ * The default reset value of this register is 2.
+ */
+static void sdhci_msm_enhanced_strobe_mask(struct sdhci_host *host, bool set)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+
+	if (!msm_host->enhanced_strobe ||
+			!mmc_card_strobe(msm_host->mmc->card)) {
+		pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
+				mmc_hostname(host->mmc));
+		return;
+	}
+
+	if (set) {
+		writel_relaxed((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC3)
+			| CORE_CMDEN_HS400_INPUT_MASK_CNT),
+			host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
+	} else {
+		writel_relaxed((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC3)
+			& ~CORE_CMDEN_HS400_INPUT_MASK_CNT),
+			host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
+	}
+}
+
+static void sdhci_msm_clear_set_dumpregs(struct sdhci_host *host, bool set)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+
+	if (set) {
+		sdhci_msm_writel_relaxed(msm_host_offset->CORE_TESTBUS_ENA,
+			host, msm_host_offset->CORE_TESTBUS_CONFIG);
+	} else {
+		u32 value;
+		value = sdhci_msm_readl_relaxed(host,
+			msm_host_offset->CORE_TESTBUS_CONFIG);
+		value &= ~(msm_host_offset->CORE_TESTBUS_ENA);
+		sdhci_msm_writel_relaxed(value, host,
+			msm_host_offset->CORE_TESTBUS_CONFIG);
+	}
+}
+
+int sdhci_msm_notify_load(struct sdhci_host *host, enum mmc_load state)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int ret = 0;
+	u32 clk_rate = 0;
+
+	if (!IS_ERR(msm_host->ice_clk)) {
+		clk_rate = (state == MMC_LOAD_LOW) ?
+			msm_host->pdata->ice_clk_min :
+			msm_host->pdata->ice_clk_max;
+		if (msm_host->ice_clk_rate == clk_rate)
+			return 0;
+		pr_debug("%s: changing ICE clk rate to %u\n",
+				mmc_hostname(host->mmc), clk_rate);
+		ret = clk_set_rate(msm_host->ice_clk, clk_rate);
+		if (ret) {
+			pr_err("%s: ICE_CLK rate set failed (%d) for %u\n",
+				mmc_hostname(host->mmc), ret, clk_rate);
+			return ret;
+		}
+		msm_host->ice_clk_rate = clk_rate;
+	}
+	return 0;
+}
+
+void sdhci_msm_reset_workaround(struct sdhci_host *host, u32 enable)
+{
+	u32 vendor_func2;
+	unsigned long timeout;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+
+	vendor_func2 = readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+
+	if (enable) {
+		writel_relaxed(vendor_func2 | HC_SW_RST_REQ, host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+		timeout = 10000;
+		while (readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_VENDOR_SPEC_FUNC2) & HC_SW_RST_REQ) {
+			if (timeout == 0) {
+				pr_info("%s: Applying wait idle disable workaround\n",
+					mmc_hostname(host->mmc));
+				/*
+				 * Apply the reset workaround to not wait for
+				 * pending data transfers on AXI before
+				 * resetting the controller. This could be
+				 * risky if the transfers were stuck on the
+				 * AXI bus.
+				 */
+				vendor_func2 = readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+				writel_relaxed(vendor_func2 |
+				HC_SW_RST_WAIT_IDLE_DIS, host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+				host->reset_wa_t = ktime_get();
+				return;
+			}
+			timeout--;
+			udelay(10);
+		}
+		pr_info("%s: waiting for SW_RST_REQ is successful\n",
+				mmc_hostname(host->mmc));
+	} else {
+		writel_relaxed(vendor_func2 & ~HC_SW_RST_WAIT_IDLE_DIS,
+			host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+	}
+}
+
+static void sdhci_msm_pm_qos_irq_unvote_work(struct work_struct *work)
+{
+	struct sdhci_msm_pm_qos_irq *pm_qos_irq =
+		container_of(work, struct sdhci_msm_pm_qos_irq,
+			     unvote_work.work);
+
+	if (atomic_read(&pm_qos_irq->counter))
+		return;
+
+	pm_qos_irq->latency = PM_QOS_DEFAULT_VALUE;
+	pm_qos_update_request(&pm_qos_irq->req, pm_qos_irq->latency);
+}
+
+void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	struct sdhci_msm_pm_qos_latency *latency =
+		&msm_host->pdata->pm_qos_data.irq_latency;
+	int counter;
+
+	if (!msm_host->pm_qos_irq.enabled)
+		return;
+
+	counter = atomic_inc_return(&msm_host->pm_qos_irq.counter);
+	/* Make sure to update the voting in case power policy has changed */
+	if (msm_host->pm_qos_irq.latency == latency->latency[host->power_policy]
+		&& counter > 1)
+		return;
+
+	cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
+	msm_host->pm_qos_irq.latency = latency->latency[host->power_policy];
+	pm_qos_update_request(&msm_host->pm_qos_irq.req,
+				msm_host->pm_qos_irq.latency);
+}
+
+void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int counter;
+
+	if (!msm_host->pm_qos_irq.enabled)
+		return;
+
+	if (atomic_read(&msm_host->pm_qos_irq.counter)) {
+		counter = atomic_dec_return(&msm_host->pm_qos_irq.counter);
+	} else {
+		WARN(1, "attempt to decrement pm_qos_irq.counter when it's 0");
+		return;
+	}
+
+	if (counter)
+		return;
+
+	if (async) {
+		schedule_delayed_work(&msm_host->pm_qos_irq.unvote_work,
+				      msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
+		return;
+	}
+
+	msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
+	pm_qos_update_request(&msm_host->pm_qos_irq.req,
+			msm_host->pm_qos_irq.latency);
+}
+
+static ssize_t
+sdhci_msm_pm_qos_irq_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	struct sdhci_msm_pm_qos_irq *irq = &msm_host->pm_qos_irq;
+
+	return snprintf(buf, PAGE_SIZE,
+		"IRQ PM QoS: enabled=%d, counter=%d, latency=%d\n",
+		irq->enabled, atomic_read(&irq->counter), irq->latency);
+}
+
+static ssize_t
+sdhci_msm_pm_qos_irq_enable_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", msm_host->pm_qos_irq.enabled);
+}
+
+static ssize_t
+sdhci_msm_pm_qos_irq_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	uint32_t value;
+	bool enable;
+	int ret;
+
+	ret = kstrtou32(buf, 0, &value);
+	if (ret)
+		goto out;
+	enable = !!value;
+
+	if (enable == msm_host->pm_qos_irq.enabled)
+		goto out;
+
+	msm_host->pm_qos_irq.enabled = enable;
+	if (!enable) {
+		cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
+		atomic_set(&msm_host->pm_qos_irq.counter, 0);
+		msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
+		pm_qos_update_request(&msm_host->pm_qos_irq.req,
+				msm_host->pm_qos_irq.latency);
+	}
+
+out:
+	return count;
+}
+
+#ifdef CONFIG_SMP
+static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
+				struct sdhci_host *host)
+{
+	msm_host->pm_qos_irq.req.irq = host->irq;
+}
+#else
+static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
+				struct sdhci_host *host) { }
+#endif
+
+void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	struct sdhci_msm_pm_qos_latency *irq_latency;
+	int ret;
+
+	if (!msm_host->pdata->pm_qos_data.irq_valid)
+		return;
+
+	/* Initialize only once as this gets called per partition */
+	if (msm_host->pm_qos_irq.enabled)
+		return;
+
+	atomic_set(&msm_host->pm_qos_irq.counter, 0);
+	msm_host->pm_qos_irq.req.type =
+			msm_host->pdata->pm_qos_data.irq_req_type;
+	if ((msm_host->pm_qos_irq.req.type != PM_QOS_REQ_AFFINE_CORES) &&
+		(msm_host->pm_qos_irq.req.type != PM_QOS_REQ_ALL_CORES))
+		set_affine_irq(msm_host, host);
+	else
+		cpumask_copy(&msm_host->pm_qos_irq.req.cpus_affine,
+			cpumask_of(msm_host->pdata->pm_qos_data.irq_cpu));
+
+	INIT_DELAYED_WORK(&msm_host->pm_qos_irq.unvote_work,
+		sdhci_msm_pm_qos_irq_unvote_work);
+	/* For initialization phase, set the performance latency */
+	irq_latency = &msm_host->pdata->pm_qos_data.irq_latency;
+	msm_host->pm_qos_irq.latency =
+		irq_latency->latency[SDHCI_PERFORMANCE_MODE];
+	pm_qos_add_request(&msm_host->pm_qos_irq.req, PM_QOS_CPU_DMA_LATENCY,
+			msm_host->pm_qos_irq.latency);
+	msm_host->pm_qos_irq.enabled = true;
+
+	/* sysfs */
+	msm_host->pm_qos_irq.enable_attr.show =
+		sdhci_msm_pm_qos_irq_enable_show;
+	msm_host->pm_qos_irq.enable_attr.store =
+		sdhci_msm_pm_qos_irq_enable_store;
+	sysfs_attr_init(&msm_host->pm_qos_irq.enable_attr.attr);
+	msm_host->pm_qos_irq.enable_attr.attr.name = "pm_qos_irq_enable";
+	msm_host->pm_qos_irq.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
+	ret = device_create_file(&msm_host->pdev->dev,
+		&msm_host->pm_qos_irq.enable_attr);
+	if (ret)
+		pr_err("%s: fail to create pm_qos_irq_enable (%d)\n",
+			__func__, ret);
+
+	msm_host->pm_qos_irq.status_attr.show = sdhci_msm_pm_qos_irq_show;
+	msm_host->pm_qos_irq.status_attr.store = NULL;
+	sysfs_attr_init(&msm_host->pm_qos_irq.status_attr.attr);
+	msm_host->pm_qos_irq.status_attr.attr.name = "pm_qos_irq_status";
+	msm_host->pm_qos_irq.status_attr.attr.mode = S_IRUGO;
+	ret = device_create_file(&msm_host->pdev->dev,
+			&msm_host->pm_qos_irq.status_attr);
+	if (ret)
+		pr_err("%s: fail to create pm_qos_irq_status (%d)\n",
+			__func__, ret);
+}
+
+static ssize_t sdhci_msm_pm_qos_group_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	struct sdhci_msm_pm_qos_group *group;
+	int i;
+	int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
+	int offset = 0;
+
+	for (i = 0; i < nr_groups; i++) {
+		group = &msm_host->pm_qos[i];
+		offset += snprintf(&buf[offset], PAGE_SIZE,
+			"Group #%d (mask=0x%lx) PM QoS: enabled=%d, counter=%d, latency=%d\n",
+			i, group->req.cpus_affine.bits[0],
+			msm_host->pm_qos_group_enable,
+			atomic_read(&group->counter),
+			group->latency);
+	}
+
+	return offset;
+}
+
+static ssize_t sdhci_msm_pm_qos_group_enable_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+		msm_host->pm_qos_group_enable ? "enabled" : "disabled");
+}
+
+static ssize_t sdhci_msm_pm_qos_group_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
+	uint32_t value;
+	bool enable;
+	int ret;
+	int i;
+
+	ret = kstrtou32(buf, 0, &value);
+	if (ret)
+		goto out;
+	enable = !!value;
+
+	if (enable == msm_host->pm_qos_group_enable)
+		goto out;
+
+	msm_host->pm_qos_group_enable = enable;
+	if (!enable) {
+		for (i = 0; i < nr_groups; i++) {
+			cancel_delayed_work_sync(
+				&msm_host->pm_qos[i].unvote_work);
+			atomic_set(&msm_host->pm_qos[i].counter, 0);
+			msm_host->pm_qos[i].latency = PM_QOS_DEFAULT_VALUE;
+			pm_qos_update_request(&msm_host->pm_qos[i].req,
+				msm_host->pm_qos[i].latency);
+		}
+	}
+
+out:
+	return count;
+}
+
+static int sdhci_msm_get_cpu_group(struct sdhci_msm_host *msm_host, int cpu)
+{
+	int i;
+	struct sdhci_msm_cpu_group_map *map =
+			&msm_host->pdata->pm_qos_data.cpu_group_map;
+
+	if (cpu < 0)
+		goto not_found;
+
+	for (i = 0; i < map->nr_groups; i++)
+		if (cpumask_test_cpu(cpu, &map->mask[i]))
+			return i;
+
+not_found:
+	return -EINVAL;
+}
+
+void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
+		struct sdhci_msm_pm_qos_latency *latency, int cpu)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int group = sdhci_msm_get_cpu_group(msm_host, cpu);
+	struct sdhci_msm_pm_qos_group *pm_qos_group;
+	int counter;
+
+	if (!msm_host->pm_qos_group_enable || group < 0)
+		return;
+
+	pm_qos_group = &msm_host->pm_qos[group];
+	counter = atomic_inc_return(&pm_qos_group->counter);
+
+	/* Make sure to update the voting in case power policy has changed */
+	if (pm_qos_group->latency == latency->latency[host->power_policy]
+		&& counter > 1)
+		return;
+
+	cancel_delayed_work_sync(&pm_qos_group->unvote_work);
+
+	pm_qos_group->latency = latency->latency[host->power_policy];
+	pm_qos_update_request(&pm_qos_group->req, pm_qos_group->latency);
+}
+
+static void sdhci_msm_pm_qos_cpu_unvote_work(struct work_struct *work)
+{
+	struct sdhci_msm_pm_qos_group *group =
+		container_of(work, struct sdhci_msm_pm_qos_group,
+			     unvote_work.work);
+
+	if (atomic_read(&group->counter))
+		return;
+
+	group->latency = PM_QOS_DEFAULT_VALUE;
+	pm_qos_update_request(&group->req, group->latency);
+}
+
+bool sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int group = sdhci_msm_get_cpu_group(msm_host, cpu);
+
+	if (!msm_host->pm_qos_group_enable || group < 0 ||
+		atomic_dec_return(&msm_host->pm_qos[group].counter))
+		return false;
+
+	if (async) {
+		schedule_delayed_work(&msm_host->pm_qos[group].unvote_work,
+				      msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
+		return true;
+	}
+
+	msm_host->pm_qos[group].latency = PM_QOS_DEFAULT_VALUE;
+	pm_qos_update_request(&msm_host->pm_qos[group].req,
+				msm_host->pm_qos[group].latency);
+	return true;
+}
+
+void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
+		struct sdhci_msm_pm_qos_latency *latency)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
+	struct sdhci_msm_pm_qos_group *group;
+	int i;
+	int ret;
+
+	if (msm_host->pm_qos_group_enable)
+		return;
+
+	msm_host->pm_qos = kcalloc(nr_groups, sizeof(*msm_host->pm_qos),
+			GFP_KERNEL);
+	if (!msm_host->pm_qos)
+		return;
+
+	for (i = 0; i < nr_groups; i++) {
+		group = &msm_host->pm_qos[i];
+		INIT_DELAYED_WORK(&group->unvote_work,
+			sdhci_msm_pm_qos_cpu_unvote_work);
+		atomic_set(&group->counter, 0);
+		group->req.type = PM_QOS_REQ_AFFINE_CORES;
+		cpumask_copy(&group->req.cpus_affine,
+			&msm_host->pdata->pm_qos_data.cpu_group_map.mask[i]);
+		/* We set default latency here for all pm_qos cpu groups. */
+		group->latency = PM_QOS_DEFAULT_VALUE;
+		pm_qos_add_request(&group->req, PM_QOS_CPU_DMA_LATENCY,
+			group->latency);
+		pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d\n",
+			__func__, i,
+			group->req.cpus_affine.bits[0],
+			group->latency);
+	}
+	msm_host->pm_qos_prev_cpu = -1;
+	msm_host->pm_qos_group_enable = true;
+
+	/* sysfs */
+	msm_host->pm_qos_group_status_attr.show = sdhci_msm_pm_qos_group_show;
+	msm_host->pm_qos_group_status_attr.store = NULL;
+	sysfs_attr_init(&msm_host->pm_qos_group_status_attr.attr);
+	msm_host->pm_qos_group_status_attr.attr.name =
+			"pm_qos_cpu_groups_status";
+	msm_host->pm_qos_group_status_attr.attr.mode = S_IRUGO;
+	ret = device_create_file(&msm_host->pdev->dev,
+			&msm_host->pm_qos_group_status_attr);
+	if (ret)
+		dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_status_attr (%d)\n",
+			__func__, ret);
+	msm_host->pm_qos_group_enable_attr.show =
+			sdhci_msm_pm_qos_group_enable_show;
+	msm_host->pm_qos_group_enable_attr.store =
+			sdhci_msm_pm_qos_group_enable_store;
+	sysfs_attr_init(&msm_host->pm_qos_group_enable_attr.attr);
+	msm_host->pm_qos_group_enable_attr.attr.name =
+			"pm_qos_cpu_groups_enable";
+	msm_host->pm_qos_group_enable_attr.attr.mode = S_IRUGO;
+	ret = device_create_file(&msm_host->pdev->dev,
+			&msm_host->pm_qos_group_enable_attr);
+	if (ret)
+		dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_enable_attr (%d)\n",
+			__func__, ret);
+}
+
+static void sdhci_msm_pre_req(struct sdhci_host *host,
+		struct mmc_request *mmc_req)
+{
+	int cpu;
+	int group;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int prev_group = sdhci_msm_get_cpu_group(msm_host,
+			msm_host->pm_qos_prev_cpu);
+
+	sdhci_msm_pm_qos_irq_vote(host);
+
+	cpu = get_cpu();
+	put_cpu();
+	group = sdhci_msm_get_cpu_group(msm_host, cpu);
+	if (group < 0)
+		return;
+
+	if (group != prev_group && prev_group >= 0) {
+		sdhci_msm_pm_qos_cpu_unvote(host,
+				msm_host->pm_qos_prev_cpu, false);
+		prev_group = -1; /* make sure to vote for new group */
+	}
+
+	if (prev_group < 0) {
+		sdhci_msm_pm_qos_cpu_vote(host,
+				msm_host->pdata->pm_qos_data.latency, cpu);
+		msm_host->pm_qos_prev_cpu = cpu;
+	}
+}
+
+static void sdhci_msm_post_req(struct sdhci_host *host,
+				struct mmc_request *mmc_req)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	sdhci_msm_pm_qos_irq_unvote(host, false);
+
+	if (sdhci_msm_pm_qos_cpu_unvote(host, msm_host->pm_qos_prev_cpu, false))
+			msm_host->pm_qos_prev_cpu = -1;
+}
+
+static void sdhci_msm_init(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	sdhci_msm_pm_qos_irq_init(host);
+
+	if (msm_host->pdata->pm_qos_data.legacy_valid)
+		sdhci_msm_pm_qos_cpu_init(host,
+				msm_host->pdata->pm_qos_data.latency);
+}
+
+static unsigned int sdhci_msm_get_current_limit(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	struct sdhci_msm_slot_reg_data *curr_slot = msm_host->pdata->vreg_data;
+	u32 max_curr = 0;
+
+	if (curr_slot && curr_slot->vdd_data)
+		max_curr = curr_slot->vdd_data->hpm_uA;
+
+	return max_curr;
+}
 
 static struct sdhci_ops sdhci_msm_ops = {
+	.crypto_engine_cfg = sdhci_msm_ice_cfg,
+	.crypto_engine_cmdq_cfg = sdhci_msm_ice_cmdq_cfg,
+	.crypto_engine_cfg_end = sdhci_msm_ice_cfg_end,
+	.crypto_cfg_reset = sdhci_msm_ice_cfg_reset,
+	.crypto_engine_reset = sdhci_msm_ice_reset,
+	.set_uhs_signaling = sdhci_msm_set_uhs_signaling,
+	.check_power_status = sdhci_msm_check_power_status,
 	.platform_execute_tuning = sdhci_msm_execute_tuning,
-	.reset = sdhci_reset,
-	.set_clock = sdhci_set_clock,
+	.enhanced_strobe = sdhci_msm_enhanced_strobe,
+	.toggle_cdr = sdhci_msm_toggle_cdr,
+	.get_max_segments = sdhci_msm_max_segs,
+	.set_clock = sdhci_msm_set_clock,
+	.get_min_clock = sdhci_msm_get_min_clock,
+	.get_max_clock = sdhci_msm_get_max_clock,
+	.dump_vendor_regs = sdhci_msm_dump_vendor_regs,
+	.config_auto_tuning_cmd = sdhci_msm_config_auto_tuning_cmd,
+	.enable_controller_clock = sdhci_msm_enable_controller_clock,
 	.set_bus_width = sdhci_set_bus_width,
-	.set_uhs_signaling = sdhci_set_uhs_signaling,
+	.reset = sdhci_msm_reset,
+	.clear_set_dumpregs = sdhci_msm_clear_set_dumpregs,
+	.enhanced_strobe_mask = sdhci_msm_enhanced_strobe_mask,
+	.notify_load = sdhci_msm_notify_load,
+	.reset_workaround = sdhci_msm_reset_workaround,
+	.init = sdhci_msm_init,
+	.pre_req = sdhci_msm_pre_req,
+	.post_req = sdhci_msm_post_req,
+	.get_current_limit = sdhci_msm_get_current_limit,
 };
 
+static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
+		struct sdhci_host *host)
+{
+	u32 version, caps = 0;
+	u16 minor;
+	u8 major;
+	u32 val;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+
+	version = sdhci_msm_readl_relaxed(host,
+		msm_host_offset->CORE_MCI_VERSION);
+	major = (version & CORE_VERSION_MAJOR_MASK) >>
+			CORE_VERSION_MAJOR_SHIFT;
+	minor = version & CORE_VERSION_TARGET_MASK;
+
+	caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
+
+	/*
+	 * Starting with SDCC 5 controller (core major version = 1)
+	 * controller won't advertise 3.0v, 1.8v and 8-bit features
+	 * except for some targets.
+	 */
+	if (major >= 1 && minor != 0x11 && minor != 0x12) {
+		struct sdhci_msm_reg_data *vdd_io_reg;
+		/*
+		 * Enable 1.8V support capability on controllers that
+		 * support dual voltage
+		 */
+		vdd_io_reg = msm_host->pdata->vreg_data->vdd_io_data;
+		if (vdd_io_reg && (vdd_io_reg->high_vol_level > 2700000))
+			caps |= CORE_3_0V_SUPPORT;
+		if (vdd_io_reg && (vdd_io_reg->low_vol_level < 1950000))
+			caps |= CORE_1_8V_SUPPORT;
+		if (msm_host->pdata->mmc_bus_width == MMC_CAP_8_BIT_DATA)
+			caps |= CORE_8_BIT_SUPPORT;
+	}
+
+	/*
+	 * Enable one MID mode for SDCC5 (major 1) on 8916/8939 (minor 0x2e) and
+	 * on 8992 (minor 0x3e) as a workaround to reset for data stuck issue.
+	 */
+	if (major == 1 && (minor == 0x2e || minor == 0x3e)) {
+		host->quirks2 |= SDHCI_QUIRK2_USE_RESET_WORKAROUND;
+		val = readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+		writel_relaxed((val | CORE_ONE_MID_EN),
+			host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+	}
+	/*
+	 * SDCC 5 controller with major version 1, minor version 0x34 and later
+	 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
+	 */
+	if ((major == 1) && (minor < 0x34))
+		msm_host->use_cdclp533 = true;
+
+	/*
+	 * SDCC 5 controller with major version 1, minor version 0x42 and later
+	 * will require additional steps when resetting DLL.
+	 * It also supports HS400 enhanced strobe mode.
+	 */
+	if ((major == 1) && (minor >= 0x42)) {
+		msm_host->use_updated_dll_reset = true;
+		msm_host->enhanced_strobe = true;
+	}
+
+	/*
+	 * SDCC 5 controller with major version 1 and minor version 0x42,
+	 * 0x46 and 0x49 currently uses 14lpp tech DLL whose internal
+	 * gating cannot guarantee MCLK timing requirement i.e.
+	 * when MCLK is gated OFF, it is not gated for less than 0.5us
+	 * and MCLK must be switched on for at-least 1us before DATA
+	 * starts coming.
+	 */
+	if ((major == 1) && ((minor == 0x42) || (minor == 0x46) ||
+				(minor == 0x49) || (minor >= 0x6b)))
+		msm_host->use_14lpp_dll = true;
+
+	/* Fake 3.0V support for SDIO devices which requires such voltage */
+	if (msm_host->core_3_0v_support) {
+		caps |= CORE_3_0V_SUPPORT;
+			writel_relaxed((readl_relaxed(host->ioaddr +
+			SDHCI_CAPABILITIES) | caps), host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
+	}
+
+	if ((major == 1) && (minor >= 0x49))
+		msm_host->rclk_delay_fix = true;
+	/*
+	 * Mask 64-bit support for controller with 32-bit address bus so that
+	 * smaller descriptor size will be used and improve memory consumption.
+	 */
+	if (!msm_host->pdata->largeaddressbus)
+		caps &= ~CORE_SYS_BUS_SUPPORT_64_BIT;
+
+	writel_relaxed(caps, host->ioaddr +
+		msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
+	/* keep track of the value in SDHCI_CAPABILITIES */
+	msm_host->caps_0 = caps;
+
+	if ((major == 1) && (minor >= 0x6b)) {
+		msm_host->ice_hci_support = true;
+		host->cdr_support = true;
+	}
+}
+
+#ifdef CONFIG_MMC_CQ_HCI
+static void sdhci_msm_cmdq_init(struct sdhci_host *host,
+				struct platform_device *pdev)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	if (nocmdq) {
+		dev_dbg(&pdev->dev, "CMDQ disabled via cmdline\n");
+		return;
+	}
+
+	host->cq_host = cmdq_pltfm_init(pdev);
+	if (IS_ERR(host->cq_host)) {
+		dev_dbg(&pdev->dev, "cmdq-pltfm init: failed: %ld\n",
+			PTR_ERR(host->cq_host));
+		host->cq_host = NULL;
+	} else {
+		msm_host->mmc->caps2 |= MMC_CAP2_CMD_QUEUE;
+	}
+}
+#else
+static void sdhci_msm_cmdq_init(struct sdhci_host *host,
+				struct platform_device *pdev)
+{
+
+}
+#endif
+
+static bool sdhci_msm_is_bootdevice(struct device *dev)
+{
+	if (strnstr(saved_command_line, "androidboot.bootdevice=",
+		    strlen(saved_command_line))) {
+		char search_string[50];
+
+		snprintf(search_string, ARRAY_SIZE(search_string),
+			"androidboot.bootdevice=%s", dev_name(dev));
+		if (strnstr(saved_command_line, search_string,
+		    strlen(saved_command_line)))
+			return true;
+		else
+			return false;
+	}
+
+	/*
+	 * "androidboot.bootdevice=" argument is not present then
+	 * return true as we don't know the boot device anyways.
+	 */
+	return true;
+}
+
 static int sdhci_msm_probe(struct platform_device *pdev)
 {
+	const struct sdhci_msm_offset *msm_host_offset;
 	struct sdhci_host *host;
 	struct sdhci_pltfm_host *pltfm_host;
 	struct sdhci_msm_host *msm_host;
-	struct resource *core_memres;
-	int ret;
-	u16 host_version, core_minor;
-	u32 core_version, caps;
-	u8 core_major;
+	struct resource *core_memres = NULL;
+	int ret = 0, dead = 0;
+	u16 host_version;
+	u32 irq_status, irq_ctl;
+	struct resource *tlmm_memres = NULL;
+	void __iomem *tlmm_mem;
+	unsigned long flags;
+	bool force_probe;
 
-	msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
-	if (!msm_host)
-		return -ENOMEM;
+	pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
+	msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
+				GFP_KERNEL);
+	if (!msm_host) {
+		ret = -ENOMEM;
+		goto out;
+	}
 
+	if (of_find_compatible_node(NULL, NULL, "qcom,sdhci-msm-v5")) {
+		msm_host->mci_removed = true;
+		msm_host->offset = &sdhci_msm_offset_mci_removed;
+	} else {
+		msm_host->mci_removed = false;
+		msm_host->offset = &sdhci_msm_offset_mci_present;
+	}
+	msm_host_offset = msm_host->offset;
 	msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
 	host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
-	if (IS_ERR(host))
-		return PTR_ERR(host);
+	if (IS_ERR(host)) {
+		ret = PTR_ERR(host);
+		goto out_host_free;
+	}
 
 	pltfm_host = sdhci_priv(host);
 	pltfm_host->priv = msm_host;
 	msm_host->mmc = host->mmc;
 	msm_host->pdev = pdev;
 
-	ret = mmc_of_parse(host->mmc);
-	if (ret)
+	/* get the ice device vops if present */
+	ret = sdhci_msm_ice_get_dev(host);
+	if (ret == -EPROBE_DEFER) {
+		/*
+		 * SDHCI driver might be probed before ICE driver does.
+		 * In that case we would like to return EPROBE_DEFER code
+		 * in order to delay its probing.
+		 */
+		dev_err(&pdev->dev, "%s: required ICE device not probed yet err = %d\n",
+			__func__, ret);
 		goto pltfm_free;
 
-	sdhci_get_of_property(pdev);
+	} else if (ret == -ENODEV) {
+		/*
+		 * ICE device is not enabled in DTS file. No need for further
+		 * initialization of ICE driver.
+		 */
+		dev_warn(&pdev->dev, "%s: ICE device is not enabled",
+			__func__);
+	} else if (ret) {
+		dev_err(&pdev->dev, "%s: sdhci_msm_ice_get_dev failed %d\n",
+			__func__, ret);
+		goto pltfm_free;
+	}
+
+	/* Extract platform data */
+	if (pdev->dev.of_node) {
+		ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
+		if (ret <= 0) {
+			dev_err(&pdev->dev, "Failed to get slot index %d\n",
+				ret);
+			goto pltfm_free;
+		}
+
+		/* Read property to determine if the probe is forced */
+		force_probe = of_find_property(pdev->dev.of_node,
+			"qcom,force-sdhc1-probe", NULL);
+
+		/* skip the probe if eMMC isn't a boot device */
+		if ((ret == 1) && !sdhci_msm_is_bootdevice(&pdev->dev)
+		    && !force_probe) {
+			ret = -ENODEV;
+			goto pltfm_free;
+		}
+
+		if (disable_slots & (1 << (ret - 1))) {
+			dev_info(&pdev->dev, "%s: Slot %d disabled\n", __func__,
+				ret);
+			ret = -ENODEV;
+			goto pltfm_free;
+		}
+
+		if (ret <= 2)
+			sdhci_slot[ret-1] = msm_host;
+
+		msm_host->pdata = sdhci_msm_populate_pdata(&pdev->dev,
+							   msm_host);
+		if (!msm_host->pdata) {
+			dev_err(&pdev->dev, "DT parsing error\n");
+			goto pltfm_free;
+		}
+	} else {
+		dev_err(&pdev->dev, "No device tree node\n");
+		goto pltfm_free;
+	}
+
+	/* Setup Clocks */
 
 	/* Setup SDCC bus voter clock. */
-	msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
-	if (!IS_ERR(msm_host->bus_clk)) {
+	msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
+	if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
 		/* Vote for max. clk rate for max. performance */
 		ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
 		if (ret)
@@ -470,99 +4360,429 @@
 	}
 
 	/* Setup main peripheral bus clock */
-	msm_host->pclk = devm_clk_get(&pdev->dev, "iface");
-	if (IS_ERR(msm_host->pclk)) {
-		ret = PTR_ERR(msm_host->pclk);
-		dev_err(&pdev->dev, "Perpheral clk setup failed (%d)\n", ret);
+	msm_host->pclk = devm_clk_get(&pdev->dev, "iface_clk");
+	if (!IS_ERR(msm_host->pclk)) {
+		ret = clk_prepare_enable(msm_host->pclk);
+		if (ret)
 		goto bus_clk_disable;
 	}
+	atomic_set(&msm_host->controller_clock, 1);
 
-	ret = clk_prepare_enable(msm_host->pclk);
+	if (msm_host->ice.pdev) {
+		/* Setup SDC ICE clock */
+		msm_host->ice_clk = devm_clk_get(&pdev->dev, "ice_core_clk");
+		if (!IS_ERR(msm_host->ice_clk)) {
+			/* ICE core has only one clock frequency for now */
+			ret = clk_set_rate(msm_host->ice_clk,
+					msm_host->pdata->ice_clk_max);
+			if (ret) {
+				dev_err(&pdev->dev, "ICE_CLK rate set failed (%d) for %u\n",
+					ret,
+					msm_host->pdata->ice_clk_max);
+				goto pclk_disable;
+			}
+			ret = clk_prepare_enable(msm_host->ice_clk);
 	if (ret)
-		goto bus_clk_disable;
+				goto pclk_disable;
+
+			msm_host->ice_clk_rate =
+				msm_host->pdata->ice_clk_max;
+		}
+	}
 
 	/* Setup SDC MMC clock */
-	msm_host->clk = devm_clk_get(&pdev->dev, "core");
+	msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
 	if (IS_ERR(msm_host->clk)) {
 		ret = PTR_ERR(msm_host->clk);
-		dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret);
 		goto pclk_disable;
 	}
 
-	/* Vote for maximum clock rate for maximum performance */
-	ret = clk_set_rate(msm_host->clk, INT_MAX);
-	if (ret)
-		dev_warn(&pdev->dev, "core clock boost failed\n");
-
+	/* Set to the minimum supported clock frequency */
+	ret = clk_set_rate(msm_host->clk, sdhci_msm_get_min_clock(host));
+	if (ret) {
+		dev_err(&pdev->dev, "MClk rate set failed (%d)\n", ret);
+		goto pclk_disable;
+	}
 	ret = clk_prepare_enable(msm_host->clk);
 	if (ret)
 		goto pclk_disable;
 
-	core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	msm_host->core_mem = devm_ioremap_resource(&pdev->dev, core_memres);
+	msm_host->clk_rate = sdhci_msm_get_min_clock(host);
+	atomic_set(&msm_host->clks_on, 1);
 
-	if (IS_ERR(msm_host->core_mem)) {
-		dev_err(&pdev->dev, "Failed to remap registers\n");
-		ret = PTR_ERR(msm_host->core_mem);
+	/* Setup CDC calibration fixed feedback clock */
+	msm_host->ff_clk = devm_clk_get(&pdev->dev, "cal_clk");
+	if (!IS_ERR(msm_host->ff_clk)) {
+		ret = clk_prepare_enable(msm_host->ff_clk);
+		if (ret)
 		goto clk_disable;
 	}
 
+	/* Setup CDC calibration sleep clock */
+	msm_host->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
+	if (!IS_ERR(msm_host->sleep_clk)) {
+		ret = clk_prepare_enable(msm_host->sleep_clk);
+		if (ret)
+			goto ff_clk_disable;
+	}
+
+	msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
+
+	ret = sdhci_msm_bus_register(msm_host, pdev);
+	if (ret)
+		goto sleep_clk_disable;
+
+	if (msm_host->msm_bus_vote.client_handle)
+		INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
+				  sdhci_msm_bus_work);
+	sdhci_msm_bus_voting(host, 1);
+
+	/* Setup regulators */
+	ret = sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, true);
+	if (ret) {
+		dev_err(&pdev->dev, "Regulator setup failed (%d)\n", ret);
+		goto bus_unregister;
+	}
+
 	/* Reset the core and Enable SDHC mode */
-	writel_relaxed(readl_relaxed(msm_host->core_mem + CORE_POWER) |
-		       CORE_SW_RST, msm_host->core_mem + CORE_POWER);
+	core_memres = platform_get_resource_byname(pdev,
+				IORESOURCE_MEM, "core_mem");
+	if (!msm_host->mci_removed) {
+		if (!core_memres) {
+			dev_err(&pdev->dev, "Failed to get iomem resource\n");
+			goto vreg_deinit;
+		}
+		msm_host->core_mem = devm_ioremap(&pdev->dev,
+			core_memres->start, resource_size(core_memres));
 
-	/* SW reset can take upto 10HCLK + 15MCLK cycles. (min 40us) */
-	usleep_range(1000, 5000);
-	if (readl(msm_host->core_mem + CORE_POWER) & CORE_SW_RST) {
-		dev_err(&pdev->dev, "Stuck in reset\n");
-		ret = -ETIMEDOUT;
-		goto clk_disable;
+		if (!msm_host->core_mem) {
+			dev_err(&pdev->dev, "Failed to remap registers\n");
+			ret = -ENOMEM;
+			goto vreg_deinit;
+		}
+	}
+
+	tlmm_memres = platform_get_resource_byname(pdev,
+				IORESOURCE_MEM, "tlmm_mem");
+	if (tlmm_memres) {
+		tlmm_mem = devm_ioremap(&pdev->dev, tlmm_memres->start,
+						resource_size(tlmm_memres));
+
+		if (!tlmm_mem) {
+			dev_err(&pdev->dev, "Failed to remap tlmm registers\n");
+			ret = -ENOMEM;
+			goto vreg_deinit;
 	}
+		writel_relaxed(readl_relaxed(tlmm_mem) | 0x2, tlmm_mem);
+	}
+
+	/*
+	 * Reset the vendor spec register to power on reset state.
+	 */
+	writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
+	host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
+
+	/*
+	 * Ensure SDHCI FIFO is enabled by disabling alternative FIFO
+	 */
+	writel_relaxed((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC3) &
+			~CORE_FIFO_ALT_EN), host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC3);
 
+	if (!msm_host->mci_removed) {
 	/* Set HC_MODE_EN bit in HC_MODE register */
 	writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
 
+		/* Set FF_CLK_SW_RST_DIS bit in HC_MODE register */
+		writel_relaxed(readl_relaxed(msm_host->core_mem +
+				CORE_HC_MODE) | FF_CLK_SW_RST_DIS,
+				msm_host->core_mem + CORE_HC_MODE);
+	}
+	sdhci_set_default_hw_caps(msm_host, host);
+
+	/*
+	 * Set the PAD_PWR_SWTICH_EN bit so that the PAD_PWR_SWITCH bit can
+	 * be used as required later on.
+	 */
+	writel_relaxed((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC) |
+			CORE_IO_PAD_PWR_SWITCH_EN), host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC);
+	/*
+	 * CORE_SW_RST above may trigger power irq if previous status of PWRCTL
+	 * was either BUS_ON or IO_HIGH_V. So before we enable the power irq
+	 * interrupt in GIC (by registering the interrupt handler), we need to
+	 * ensure that any pending power irq interrupt status is acknowledged
+	 * otherwise power irq interrupt handler would be fired prematurely.
+	 */
+	irq_status = sdhci_msm_readl_relaxed(host,
+		msm_host_offset->CORE_PWRCTL_STATUS);
+	sdhci_msm_writel_relaxed(irq_status, host,
+		msm_host_offset->CORE_PWRCTL_CLEAR);
+	irq_ctl = sdhci_msm_readl_relaxed(host,
+		msm_host_offset->CORE_PWRCTL_CTL);
+
+	if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
+		irq_ctl |= CORE_PWRCTL_BUS_SUCCESS;
+	if (irq_status & (CORE_PWRCTL_IO_HIGH | CORE_PWRCTL_IO_LOW))
+		irq_ctl |= CORE_PWRCTL_IO_SUCCESS;
+	sdhci_msm_writel_relaxed(irq_ctl, host,
+		msm_host_offset->CORE_PWRCTL_CTL);
+
+	/*
+	 * Ensure that above writes are propogated before interrupt enablement
+	 * in GIC.
+	 */
+	mb();
+
+	/*
+	 * Following are the deviations from SDHC spec v3.0 -
+	 * 1. Card detection is handled using separate GPIO.
+	 * 2. Bus power control is handled by interacting with PMIC.
+	 */
 	host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
 	host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
+	host->quirks |= SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
+	host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
+	host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
+	host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
+	host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
+	host->quirks2 |= SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT;
+	host->quirks2 |= SDHCI_QUIRK2_NON_STANDARD_TUNING;
+	host->quirks2 |= SDHCI_QUIRK2_USE_PIO_FOR_EMMC_TUNING;
+
+	if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
+		host->quirks2 |= SDHCI_QUIRK2_DIVIDE_TOUT_BY_4;
 
 	host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
 	dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
 		host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
 			       SDHCI_VENDOR_VER_SHIFT));
+	if (((host_version & SDHCI_VENDOR_VER_MASK) >>
+		SDHCI_VENDOR_VER_SHIFT) == SDHCI_VER_100) {
+		/*
+		 * Add 40us delay in interrupt handler when
+		 * operating at initialization frequency(400KHz).
+		 */
+		host->quirks2 |= SDHCI_QUIRK2_SLOW_INT_CLR;
+		/*
+		 * Set Software Reset for DAT line in Software
+		 * Reset Register (Bit 2).
+		 */
+		host->quirks2 |= SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT;
+	}
 
-	core_version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION);
-	core_major = (core_version & CORE_VERSION_MAJOR_MASK) >>
-		      CORE_VERSION_MAJOR_SHIFT;
-	core_minor = core_version & CORE_VERSION_MINOR_MASK;
-	dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n",
-		core_version, core_major, core_minor);
+	host->quirks2 |= SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR;
+
+	/* Setup PWRCTL irq */
+	msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
+	if (msm_host->pwr_irq < 0) {
+		dev_err(&pdev->dev, "Failed to get pwr_irq by name (%d)\n",
+				msm_host->pwr_irq);
+		goto vreg_deinit;
+	}
+	ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
+					sdhci_msm_pwr_irq, IRQF_ONESHOT,
+					dev_name(&pdev->dev), host);
+	if (ret) {
+		dev_err(&pdev->dev, "Request threaded irq(%d) failed (%d)\n",
+				msm_host->pwr_irq, ret);
+		goto vreg_deinit;
+	}
+
+	/* Enable pwr irq interrupts */
+	sdhci_msm_writel_relaxed(INT_MASK, host,
+		msm_host_offset->CORE_PWRCTL_MASK);
+
+#ifdef CONFIG_MMC_CLKGATE
+	/* Set clock gating delay to be used when CONFIG_MMC_CLKGATE is set */
+	msm_host->mmc->clkgate_delay = SDHCI_MSM_MMC_CLK_GATE_DELAY;
+#endif
+
+	/* Set host capabilities */
+	msm_host->mmc->caps |= msm_host->pdata->mmc_bus_width;
+	msm_host->mmc->caps |= msm_host->pdata->caps;
+	msm_host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
+	msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
+	msm_host->mmc->caps2 |= msm_host->pdata->caps2;
+	msm_host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
+	msm_host->mmc->caps2 |= MMC_CAP2_HS400_POST_TUNING;
+	msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
+	msm_host->mmc->caps2 |= MMC_CAP2_SANITIZE;
+	msm_host->mmc->caps2 |= MMC_CAP2_MAX_DISCARD_SIZE;
+	msm_host->mmc->caps2 |= MMC_CAP2_SLEEP_AWAKE;
+	msm_host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
+
+	if (msm_host->pdata->nonremovable)
+		msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
+
+	if (msm_host->pdata->nonhotplug)
+		msm_host->mmc->caps2 |= MMC_CAP2_NONHOTPLUG;
+
+	msm_host->mmc->sdr104_wa = msm_host->pdata->sdr104_wa;
+
+	/* Initialize ICE if present */
+	if (msm_host->ice.pdev) {
+		ret = sdhci_msm_ice_init(host);
+		if (ret) {
+			dev_err(&pdev->dev, "%s: SDHCi ICE init failed (%d)\n",
+					mmc_hostname(host->mmc), ret);
+			ret = -EINVAL;
+			goto vreg_deinit;
+		}
+		host->is_crypto_en = true;
+		/* Packed commands cannot be encrypted/decrypted using ICE */
+		msm_host->mmc->caps2 &= ~(MMC_CAP2_PACKED_WR |
+				MMC_CAP2_PACKED_WR_CONTROL);
+	}
 
+	init_completion(&msm_host->pwr_irq_completion);
+
+	if (gpio_is_valid(msm_host->pdata->status_gpio)) {
 	/*
-	 * Support for some capabilities is not advertised by newer
-	 * controller versions and must be explicitly enabled.
+		 * Set up the card detect GPIO in active configuration before
+		 * configuring it as an IRQ. Otherwise, it can be in some
+		 * weird/inconsistent state resulting in flood of interrupts.
 	 */
-	if (core_major >= 1 && core_minor != 0x11 && core_minor != 0x12) {
-		caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
-		caps |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT;
-		writel_relaxed(caps, host->ioaddr +
-			       CORE_VENDOR_SPEC_CAPABILITIES0);
+		sdhci_msm_setup_pins(msm_host->pdata, true);
+
+		/*
+		 * This delay is needed for stabilizing the card detect GPIO
+		 * line after changing the pull configs.
+		 */
+		usleep_range(10000, 10500);
+		ret = mmc_gpio_request_cd(msm_host->mmc,
+				msm_host->pdata->status_gpio, 0);
+		if (ret) {
+			dev_err(&pdev->dev, "%s: Failed to request card detection IRQ %d\n",
+					__func__, ret);
+			goto vreg_deinit;
+		}
+	}
+
+	if ((sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) &&
+		(dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(64)))) {
+		host->dma_mask = DMA_BIT_MASK(64);
+		mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
+		mmc_dev(host->mmc)->coherent_dma_mask  = host->dma_mask;
+	} else if (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(32))) {
+		host->dma_mask = DMA_BIT_MASK(32);
+		mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
+		mmc_dev(host->mmc)->coherent_dma_mask  = host->dma_mask;
+	} else {
+		dev_err(&pdev->dev, "%s: Failed to set dma mask\n", __func__);
 	}
 
+	msm_host->pdata->sdiowakeup_irq = platform_get_irq_byname(pdev,
+							  "sdiowakeup_irq");
+	if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
+		dev_info(&pdev->dev, "%s: sdiowakeup_irq = %d\n", __func__,
+				msm_host->pdata->sdiowakeup_irq);
+		msm_host->is_sdiowakeup_enabled = true;
+		ret = request_irq(msm_host->pdata->sdiowakeup_irq,
+				  sdhci_msm_sdiowakeup_irq,
+				  IRQF_SHARED | IRQF_TRIGGER_HIGH,
+				  "sdhci-msm sdiowakeup", host);
+		if (ret) {
+			dev_err(&pdev->dev, "%s: request sdiowakeup IRQ %d: failed: %d\n",
+				__func__, msm_host->pdata->sdiowakeup_irq, ret);
+			msm_host->pdata->sdiowakeup_irq = -1;
+			msm_host->is_sdiowakeup_enabled = false;
+			goto vreg_deinit;
+		} else {
+			spin_lock_irqsave(&host->lock, flags);
+			sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
+			msm_host->sdio_pending_processing = false;
+			spin_unlock_irqrestore(&host->lock, flags);
+		}
+	}
+
+	sdhci_msm_cmdq_init(host, pdev);
 	ret = sdhci_add_host(host);
+	if (ret) {
+		dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
+		goto vreg_deinit;
+	}
+
+	msm_host->pltfm_init_done = true;
+
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_AUTOSUSPEND_DELAY_MS);
+	pm_runtime_use_autosuspend(&pdev->dev);
+
+	msm_host->msm_bus_vote.max_bus_bw.show = show_sdhci_max_bus_bw;
+	msm_host->msm_bus_vote.max_bus_bw.store = store_sdhci_max_bus_bw;
+	sysfs_attr_init(&msm_host->msm_bus_vote.max_bus_bw.attr);
+	msm_host->msm_bus_vote.max_bus_bw.attr.name = "max_bus_bw";
+	msm_host->msm_bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
+	ret = device_create_file(&pdev->dev,
+			&msm_host->msm_bus_vote.max_bus_bw);
 	if (ret)
-		goto clk_disable;
+		goto remove_host;
 
-	return 0;
+	if (!gpio_is_valid(msm_host->pdata->status_gpio)) {
+		msm_host->polling.show = show_polling;
+		msm_host->polling.store = store_polling;
+		sysfs_attr_init(&msm_host->polling.attr);
+		msm_host->polling.attr.name = "polling";
+		msm_host->polling.attr.mode = S_IRUGO | S_IWUSR;
+		ret = device_create_file(&pdev->dev, &msm_host->polling);
+		if (ret)
+			goto remove_max_bus_bw_file;
+	}
+
+	msm_host->auto_cmd21_attr.show = show_auto_cmd21;
+	msm_host->auto_cmd21_attr.store = store_auto_cmd21;
+	sysfs_attr_init(&msm_host->auto_cmd21_attr.attr);
+	msm_host->auto_cmd21_attr.attr.name = "enable_auto_cmd21";
+	msm_host->auto_cmd21_attr.attr.mode = S_IRUGO | S_IWUSR;
+	ret = device_create_file(&pdev->dev, &msm_host->auto_cmd21_attr);
+	if (ret) {
+		pr_err("%s: %s: failed creating auto-cmd21 attr: %d\n",
+		       mmc_hostname(host->mmc), __func__, ret);
+		device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
+	}
+	if (sdhci_msm_is_bootdevice(&pdev->dev))
+		mmc_flush_detect_work(host->mmc);
+
+	/* Successful initialization */
+	goto out;
 
+remove_max_bus_bw_file:
+	device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
+remove_host:
+	dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
+	pm_runtime_disable(&pdev->dev);
+	sdhci_remove_host(host, dead);
+vreg_deinit:
+	sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
+bus_unregister:
+	if (msm_host->msm_bus_vote.client_handle)
+		sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+	sdhci_msm_bus_unregister(msm_host);
+sleep_clk_disable:
+	if (!IS_ERR(msm_host->sleep_clk))
+		clk_disable_unprepare(msm_host->sleep_clk);
+ff_clk_disable:
+	if (!IS_ERR(msm_host->ff_clk))
+		clk_disable_unprepare(msm_host->ff_clk);
 clk_disable:
+	if (!IS_ERR(msm_host->clk))
 	clk_disable_unprepare(msm_host->clk);
 pclk_disable:
+	if (!IS_ERR(msm_host->pclk))
 	clk_disable_unprepare(msm_host->pclk);
 bus_clk_disable:
-	if (!IS_ERR(msm_host->bus_clk))
+	if (!IS_ERR_OR_NULL(msm_host->bus_clk))
 		clk_disable_unprepare(msm_host->bus_clk);
 pltfm_free:
 	sdhci_pltfm_free(pdev);
+out_host_free:
+	devm_kfree(&pdev->dev, msm_host);
+out:
+	pr_debug("%s: Exit %s\n", dev_name(&pdev->dev), __func__);
 	return ret;
 }
 
@@ -571,28 +4791,270 @@
 	struct sdhci_host *host = platform_get_drvdata(pdev);
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	struct sdhci_msm_pltfm_data *pdata = msm_host->pdata;
 	int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
 		    0xffffffff);
 
+	pr_debug("%s: %s\n", dev_name(&pdev->dev), __func__);
+	if (!gpio_is_valid(msm_host->pdata->status_gpio))
+		device_remove_file(&pdev->dev, &msm_host->polling);
+	device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
+	pm_runtime_disable(&pdev->dev);
 	sdhci_remove_host(host, dead);
 	sdhci_pltfm_free(pdev);
-	clk_disable_unprepare(msm_host->clk);
-	clk_disable_unprepare(msm_host->pclk);
-	if (!IS_ERR(msm_host->bus_clk))
-		clk_disable_unprepare(msm_host->bus_clk);
+
+	sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
+
+	sdhci_msm_setup_pins(pdata, true);
+	sdhci_msm_setup_pins(pdata, false);
+
+	if (msm_host->msm_bus_vote.client_handle) {
+		sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+		sdhci_msm_bus_unregister(msm_host);
+	}
 	return 0;
 }
 
+#ifdef CONFIG_PM
+static int sdhci_msm_cfg_sdio_wakeup(struct sdhci_host *host, bool enable)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	unsigned long flags;
+	int ret = 0;
+
+	if (!(host->mmc->card && mmc_card_sdio(host->mmc->card) &&
+	      sdhci_is_valid_gpio_wakeup_int(msm_host) &&
+	      mmc_card_wake_sdio_irq(host->mmc))) {
+		msm_host->sdio_pending_processing = false;
+		return 1;
+	}
+
+	spin_lock_irqsave(&host->lock, flags);
+	if (enable) {
+		/* configure DAT1 gpio if applicable */
+		if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
+			msm_host->sdio_pending_processing = false;
+			ret = enable_irq_wake(msm_host->pdata->sdiowakeup_irq);
+			if (!ret)
+				sdhci_msm_cfg_sdiowakeup_gpio_irq(host, true);
+			goto out;
+		} else {
+			pr_err("%s: sdiowakeup_irq(%d) invalid\n",
+					mmc_hostname(host->mmc), enable);
+		}
+	} else {
+		if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
+			ret = disable_irq_wake(msm_host->pdata->sdiowakeup_irq);
+			sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
+			msm_host->sdio_pending_processing = false;
+		} else {
+			pr_err("%s: sdiowakeup_irq(%d)invalid\n",
+					mmc_hostname(host->mmc), enable);
+
+		}
+	}
+out:
+	if (ret)
+		pr_err("%s: %s: %sable wakeup: failed: %d gpio: %d\n",
+		       mmc_hostname(host->mmc), __func__, enable ? "en" : "dis",
+		       ret, msm_host->pdata->sdiowakeup_irq);
+	spin_unlock_irqrestore(&host->lock, flags);
+	return ret;
+}
+
+
+static int sdhci_msm_runtime_suspend(struct device *dev)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	ktime_t start = ktime_get();
+	int ret;
+
+	if (host->mmc->card && mmc_card_sdio(host->mmc->card))
+		goto defer_disable_host_irq;
+
+	sdhci_cfg_irq(host, false, true);
+
+defer_disable_host_irq:
+	disable_irq(msm_host->pwr_irq);
+
+	/*
+	 * Remove the vote immediately only if clocks are off in which
+	 * case we might have queued work to remove vote but it may not
+	 * be completed before runtime suspend or system suspend.
+	 */
+	if (!atomic_read(&msm_host->clks_on)) {
+		if (msm_host->msm_bus_vote.client_handle)
+			sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+	}
+
+	if (host->is_crypto_en) {
+		ret = sdhci_msm_ice_suspend(host);
+		if (ret < 0)
+			pr_err("%s: failed to suspend crypto engine %d\n",
+					mmc_hostname(host->mmc), ret);
+	}
+	trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0,
+			ktime_to_us(ktime_sub(ktime_get(), start)));
+	return 0;
+}
+
+static int sdhci_msm_runtime_resume(struct device *dev)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	ktime_t start = ktime_get();
+	int ret;
+
+	if (host->is_crypto_en) {
+		ret = sdhci_msm_enable_controller_clock(host);
+		if (ret) {
+			pr_err("%s: Failed to enable reqd clocks\n",
+					mmc_hostname(host->mmc));
+			goto skip_ice_resume;
+		}
+		ret = sdhci_msm_ice_resume(host);
+		if (ret)
+			pr_err("%s: failed to resume crypto engine %d\n",
+					mmc_hostname(host->mmc), ret);
+	}
+skip_ice_resume:
+	if (host->mmc->card && mmc_card_sdio(host->mmc->card))
+		goto defer_enable_host_irq;
+
+	sdhci_cfg_irq(host, true, true);
+
+defer_enable_host_irq:
+	enable_irq(msm_host->pwr_irq);
+
+	trace_sdhci_msm_runtime_resume(mmc_hostname(host->mmc), 0,
+			ktime_to_us(ktime_sub(ktime_get(), start)));
+	return 0;
+}
+
+static int sdhci_msm_suspend(struct device *dev)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int ret = 0;
+	int sdio_cfg = 0;
+	ktime_t start = ktime_get();
+
+	if (gpio_is_valid(msm_host->pdata->status_gpio) &&
+		(msm_host->mmc->slot.cd_irq >= 0))
+			disable_irq(msm_host->mmc->slot.cd_irq);
+
+	if (pm_runtime_suspended(dev)) {
+		pr_debug("%s: %s: already runtime suspended\n",
+		mmc_hostname(host->mmc), __func__);
+		goto out;
+	}
+	ret = sdhci_msm_runtime_suspend(dev);
+out:
+	sdhci_msm_disable_controller_clock(host);
+	if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
+		sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, true);
+		if (sdio_cfg)
+			sdhci_cfg_irq(host, false, true);
+	}
+
+	trace_sdhci_msm_suspend(mmc_hostname(host->mmc), ret,
+			ktime_to_us(ktime_sub(ktime_get(), start)));
+	return ret;
+}
+
+static int sdhci_msm_resume(struct device *dev)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int ret = 0;
+	int sdio_cfg = 0;
+	ktime_t start = ktime_get();
+
+	if (gpio_is_valid(msm_host->pdata->status_gpio) &&
+		(msm_host->mmc->slot.cd_irq >= 0))
+			enable_irq(msm_host->mmc->slot.cd_irq);
+
+	if (pm_runtime_suspended(dev)) {
+		pr_debug("%s: %s: runtime suspended, defer system resume\n",
+		mmc_hostname(host->mmc), __func__);
+		goto out;
+	}
+
+	ret = sdhci_msm_runtime_resume(dev);
+out:
+	if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
+		sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, false);
+		if (sdio_cfg)
+			sdhci_cfg_irq(host, true, true);
+	}
+
+	trace_sdhci_msm_resume(mmc_hostname(host->mmc), ret,
+			ktime_to_us(ktime_sub(ktime_get(), start)));
+	return ret;
+}
+
+static int sdhci_msm_suspend_noirq(struct device *dev)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int ret = 0;
+
+	/*
+	 * ksdioirqd may be running, hence retry
+	 * suspend in case the clocks are ON
+	 */
+	if (atomic_read(&msm_host->clks_on)) {
+		pr_warn("%s: %s: clock ON after suspend, aborting suspend\n",
+			mmc_hostname(host->mmc), __func__);
+		ret = -EAGAIN;
+	}
+
+	if (host->mmc->card && mmc_card_sdio(host->mmc->card))
+		if (msm_host->sdio_pending_processing)
+			ret = -EBUSY;
+
+	return ret;
+}
+
+static const struct dev_pm_ops sdhci_msm_pmops = {
+	SET_LATE_SYSTEM_SLEEP_PM_OPS(sdhci_msm_suspend, sdhci_msm_resume)
+	SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume,
+			   NULL)
+	.suspend_noirq = sdhci_msm_suspend_noirq,
+};
+
+#define SDHCI_MSM_PMOPS (&sdhci_msm_pmops)
+
+#else
+#define SDHCI_MSM_PMOPS NULL
+#endif
+static const struct of_device_id sdhci_msm_dt_match[] = {
+	{.compatible = "qcom,sdhci-msm"},
+	{.compatible = "qcom,sdhci-msm-v5"},
+	{},
+};
+MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
+
 static struct platform_driver sdhci_msm_driver = {
 	.probe = sdhci_msm_probe,
 	.remove = sdhci_msm_remove,
 	.driver = {
 		   .name = "sdhci_msm",
+		.owner	= THIS_MODULE,
+		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
 		   .of_match_table = sdhci_msm_dt_match,
+		.pm	= SDHCI_MSM_PMOPS,
 	},
 };
 
 module_platform_driver(sdhci_msm_driver);
 
-MODULE_DESCRIPTION("Qualcomm Secure Digital Host Controller Interface driver");
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Secure Digital Host Controller Interface driver");
 MODULE_LICENSE("GPL v2");
diff -ruw linux-4.4.115/drivers/mmc/Kconfig linux-4.4.115-fbx/drivers/mmc/Kconfig
--- linux-4.4.115/drivers/mmc/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mmc/Kconfig	2019-01-22 16:16:24.763257817 +0100
@@ -19,6 +19,14 @@
 	  This is an option for use by developers; most people should
 	  say N here.  This enables MMC core and driver debugging.
 
+config MMC_PERF_PROFILING
+	bool "MMC performance profiling"
+	depends on MMC != n
+	default n
+	help
+	  If you say Y here, support will be added for collecting
+	  performance numbers at the MMC Queue and Host layers.
+
 if MMC
 
 source "drivers/mmc/core/Kconfig"
diff -ruw linux-4.4.115/drivers/mtd/nand/Kconfig linux-4.4.115-fbx/drivers/mtd/nand/Kconfig
--- linux-4.4.115/drivers/mtd/nand/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/mtd/nand/Kconfig	2019-01-22 16:16:24.819258324 +0100
@@ -1,3 +1,10 @@
+config MTD_NAND_IDS
+	tristate "Include chip ids for known NAND devices."
+	depends on MTD
+	help
+	  Useful for NAND drivers that do not use the NAND subsystem but
+	  still like to take advantage of the known chip information.
+
 config MTD_NAND_ECC
 	tristate
 
@@ -108,9 +115,6 @@
 config MTD_NAND_OMAP_BCH_BUILD
 	def_tristate MTD_NAND_OMAP2 && MTD_NAND_OMAP_BCH
 
-config MTD_NAND_IDS
-	tristate
-
 config MTD_NAND_RICOH
 	tristate "Ricoh xD card reader"
 	default n
diff -ruw linux-4.4.115/drivers/net/can/spi/Kconfig linux-4.4.115-fbx/drivers/net/can/spi/Kconfig
--- linux-4.4.115/drivers/net/can/spi/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/can/spi/Kconfig	2019-10-29 09:26:24.121207856 +0100
@@ -7,4 +7,15 @@
 	---help---
 	  Driver for the Microchip MCP251x SPI CAN controllers.
 
+config CAN_RH850
+	tristate "Renesas RH850 SPI CAN controller"
+	depends on HAS_DMA
+	---help---
+	  Driver for the Renesas RH850 SPI CAN controller.
+
+config CAN_K61
+	tristate "Freescale K61 SPI CAN controllers"
+	depends on SPI
+	---help---
+	  Driver for the Freescale K61 SPI CAN controllers.
 endmenu
diff -ruw linux-4.4.115/drivers/net/can/spi/Makefile linux-4.4.115-fbx/drivers/net/can/spi/Makefile
--- linux-4.4.115/drivers/net/can/spi/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/can/spi/Makefile	2019-10-29 09:26:24.121207856 +0100
@@ -4,3 +4,5 @@
 
 
 obj-$(CONFIG_CAN_MCP251X)	+= mcp251x.o
+obj-$(CONFIG_CAN_RH850)		+= rh850.o
+obj-${CONFIG_CAN_K61}		+= k61.o
diff -ruw linux-4.4.115/drivers/net/ethernet/atheros/atl1c/Makefile linux-4.4.115-fbx/drivers/net/ethernet/atheros/atl1c/Makefile
--- linux-4.4.115/drivers/net/ethernet/atheros/atl1c/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/ethernet/atheros/atl1c/Makefile	2019-10-29 09:26:24.141208052 +0100
@@ -1,2 +1,3 @@
 obj-$(CONFIG_ATL1C) += atl1c.o
 atl1c-objs := atl1c_main.o atl1c_hw.o atl1c_ethtool.o
+ccflags-$(CONFIG_ARCH_MSM8998) := -DAPQ_PLATFORM
diff -ruw linux-4.4.115/drivers/net/ethernet/atheros/Kconfig linux-4.4.115-fbx/drivers/net/ethernet/atheros/Kconfig
--- linux-4.4.115/drivers/net/ethernet/atheros/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/ethernet/atheros/Kconfig	2019-01-22 16:16:24.923259266 +0100
@@ -78,4 +78,11 @@
 	  To compile this driver as a module, choose M here.  The module
 	  will be called alx.
 
+
+config ALX_PROP
+	tristate "Qualcomm eth prop"
+	depends on PCI
+	select CRC32
+	select MDIO
+
 endif # NET_VENDOR_ATHEROS
diff -ruw linux-4.4.115/drivers/net/ethernet/atheros/Makefile linux-4.4.115-fbx/drivers/net/ethernet/atheros/Makefile
--- linux-4.4.115/drivers/net/ethernet/atheros/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/ethernet/atheros/Makefile	2019-01-22 16:16:24.923259266 +0100
@@ -7,3 +7,4 @@
 obj-$(CONFIG_ATL1E) += atl1e/
 obj-$(CONFIG_ATL1C) += atl1c/
 obj-$(CONFIG_ALX) += alx/
+obj-$(CONFIG_ALX_PROP) += alx_prop/
diff -ruw linux-4.4.115/drivers/net/ethernet/Kconfig linux-4.4.115-fbx/drivers/net/ethernet/Kconfig
--- linux-4.4.115/drivers/net/ethernet/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/ethernet/Kconfig	2019-01-22 16:16:24.899259048 +0100
@@ -110,6 +110,7 @@
 source "drivers/net/ethernet/micrel/Kconfig"
 source "drivers/net/ethernet/microchip/Kconfig"
 source "drivers/net/ethernet/moxa/Kconfig"
+source "drivers/net/ethernet/msm/Kconfig"
 source "drivers/net/ethernet/myricom/Kconfig"
 
 config FEALNX
diff -ruw linux-4.4.115/drivers/net/ethernet/Makefile linux-4.4.115-fbx/drivers/net/ethernet/Makefile
--- linux-4.4.115/drivers/net/ethernet/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/ethernet/Makefile	2019-01-22 16:16:24.899259048 +0100
@@ -50,6 +50,7 @@
 obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
 obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
 obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/
+obj-$(CONFIG_ARCH_QCOM) += msm/
 obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
 obj-$(CONFIG_FEALNX) += fealnx.o
 obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
diff -ruw linux-4.4.115/drivers/net/Kconfig linux-4.4.115-fbx/drivers/net/Kconfig
--- linux-4.4.115/drivers/net/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/Kconfig	2019-10-29 09:26:24.101207660 +0100
@@ -425,4 +425,6 @@
 
 source "drivers/net/hyperv/Kconfig"
 
+source "drivers/net/rmnet/Kconfig"
+
 endif # NETDEVICES
diff -ruw linux-4.4.115/drivers/net/Makefile linux-4.4.115-fbx/drivers/net/Makefile
--- linux-4.4.115/drivers/net/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/Makefile	2019-01-22 16:16:24.851258614 +0100
@@ -70,3 +70,4 @@
 obj-$(CONFIG_NTB_NETDEV) += ntb_netdev.o
 
 obj-$(CONFIG_FUJITSU_ES) += fjes/
+obj-$(CONFIG_RMNET) += rmnet/
diff -ruw linux-4.4.115/drivers/net/ppp/Kconfig linux-4.4.115-fbx/drivers/net/ppp/Kconfig
--- linux-4.4.115/drivers/net/ppp/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/ppp/Kconfig	2019-01-22 16:16:25.363263250 +0100
@@ -149,6 +149,23 @@
 	  tunnels. L2TP is replacing PPTP for VPN uses.
 if TTY
 
+config PPPOLAC
+	tristate "PPP on L2TP Access Concentrator"
+	depends on PPP && INET
+	help
+	  L2TP (RFC 2661) is a tunneling protocol widely used in virtual private
+	  networks. This driver handles L2TP data packets between a UDP socket
+	  and a PPP channel, but only permits one session per socket. Thus it is
+	  fairly simple and suited for clients.
+
+config PPPOPNS
+	tristate "PPP on PPTP Network Server"
+	depends on PPP && INET
+	help
+	  PPTP (RFC 2637) is a tunneling protocol widely used in virtual private
+	  networks. This driver handles PPTP data packets between a RAW socket
+	  and a PPP channel. It is fairly simple and easy to use.
+
 config PPP_ASYNC
 	tristate "PPP support for async serial ports"
 	depends on PPP
diff -ruw linux-4.4.115/drivers/net/ppp/Makefile linux-4.4.115-fbx/drivers/net/ppp/Makefile
--- linux-4.4.115/drivers/net/ppp/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/ppp/Makefile	2019-01-22 16:16:25.363263250 +0100
@@ -11,3 +11,5 @@
 obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
 obj-$(CONFIG_PPPOL2TP) += pppox.o
 obj-$(CONFIG_PPTP) += pppox.o pptp.o
+obj-$(CONFIG_PPPOLAC) += pppox.o pppolac.o
+obj-$(CONFIG_PPPOPNS) += pppox.o pppopns.o
diff -ruw linux-4.4.115/drivers/net/tun.c linux-4.4.115-fbx/drivers/net/tun.c
--- linux-4.4.115/drivers/net/tun.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/tun.c	2019-10-29 09:26:24.425210831 +0100
@@ -71,6 +71,10 @@
 #include <net/sock.h>
 #include <linux/seq_file.h>
 #include <linux/uio.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <net/ip.h>
 
 #include <asm/uaccess.h>
 
@@ -169,6 +173,31 @@
 	unsigned long updated;
 };
 
+/*
+ * smalltun definitions
+ */
+#define SMALLTUN_MAGIC			0x6660
+#define SMALLTUN_VERSION		0x1
+
+#define TYPE_MASK			0xf
+#define TYPE_CLT			(1 << 3)
+
+#define TYPE_TRIGGER			0x0
+#define TYPE_CHALLENGE			0x1
+#define TYPE_CLIENT_HELLO		0x2
+#define TYPE_SERVER_HELLO		0x3
+
+#define TYPE_CLT_DATA			(TYPE_CLT | 0x0)
+#define TYPE_CLT_GET_PARAMS		(TYPE_CLT | 0x1)
+#define TYPE_CLT_PARAMS			(TYPE_CLT | 0x2)
+
+struct smalltun_pkt_hdr {
+	u16		magic;
+	u8		version;
+	u8		flag_type;
+	u8		data[0];
+};
+
 #define TUN_NUM_FLOW_ENTRIES 1024
 
 /* Since the socket were moved to tun_file, to preserve the behavior of persist
@@ -182,6 +211,11 @@
 	kuid_t			owner;
 	kgid_t			group;
 
+	struct smalltun_fp	smalltun_fps[4];
+	unsigned int		smalltun_valid_count;
+	unsigned int		smalltun_valid[4];
+	struct rtable		*smalltun_rt_cache[4];
+
 	struct net_device	*dev;
 	netdev_features_t	set_features;
 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
@@ -806,6 +840,184 @@
 	return 0;
 }
 
+static int smalltun_is_fastpath(struct tun_struct *tun,
+				struct sk_buff *skb)
+{
+	struct iphdr *iph;
+	const struct smalltun_fp *fp;
+	struct rtable **prt_cache, *rt_cache;
+	struct flowi4 fl;
+	bool match;
+	size_t i;
+
+	if (!tun->smalltun_valid_count)
+		return 0;
+
+	if (skb->protocol != htons(ETH_P_IP))
+		return 0;
+
+	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+		return 0;
+
+	iph = ip_hdr(skb);
+
+	/* lookup smalltun fastpath */
+	fp = NULL;
+	rt_cache = NULL;
+	for (i = 0; i < ARRAY_SIZE(tun->smalltun_fps); i++) {
+		if (!tun->smalltun_valid[i])
+			continue;
+
+		if (iph->daddr == tun->smalltun_fps[i].inner_dst) {
+			fp = &tun->smalltun_fps[i];
+			prt_cache = &tun->smalltun_rt_cache[i];
+			break;
+		}
+	}
+
+	if (!fp)
+		return 0;
+
+	if (fp->af != AF_INET) {
+		/* FIXME: implement IPv6 transport */
+		return 0;
+	}
+
+	if (!pskb_may_pull(skb, iph->ihl * 4))
+		return 0;
+
+	match = false;
+	for (i = 0; i < fp->rule_count; i++) {
+		const struct smalltun_rule *r = &fp->rules[i];
+		unsigned int sport, dport;
+
+		if (iph->protocol != r->proto)
+			continue;
+
+		switch (iph->protocol) {
+		case IPPROTO_UDP:
+		{
+			const struct udphdr *udp;
+			udp = (struct udphdr *)((u8 *)iph + (iph->ihl << 2));
+			sport = udp->source;
+	                dport = udp->dest;
+			break;
+		}
+		case IPPROTO_TCP:
+		{
+			const struct tcphdr *tcp;
+			tcp = (struct tcphdr *)((u8 *)iph + (iph->ihl << 2));
+			sport = tcp->source;
+			dport = tcp->dest;
+			break;
+		}
+		default:
+			match = true;
+			break;
+		}
+
+		if (r->src_port_start && r->src_port_end) {
+			if (sport < r->src_port_start ||
+			    sport > r->src_port_end)
+				continue;
+		}
+
+		if (r->dst_port_start && r->dst_port_end) {
+			if (dport < r->dst_port_start ||
+			    dport > r->dst_port_end)
+				continue;
+		}
+		match = true;
+	}
+
+	if (!match)
+		return 0;
+
+	if (fp->af == AF_INET) {
+		struct iphdr *oiph;
+		struct udphdr *oudph;
+		struct smalltun_pkt_hdr *pkt;
+		unsigned int payload_len;
+
+		payload_len = skb->len;
+
+		if (skb_cow_head(skb,
+				 sizeof (struct iphdr) +
+				 sizeof (struct udphdr) +
+				 sizeof (struct smalltun_pkt_hdr)))
+			return 0;
+
+		pkt = (struct smalltun_pkt_hdr *)
+			skb_push(skb, sizeof (struct smalltun_pkt_hdr));
+		oudph = (struct udphdr *)
+			skb_push(skb, sizeof (struct udphdr));
+		skb_reset_transport_header(skb);
+		oiph = (struct iphdr *)
+			skb_push(skb, sizeof (struct iphdr));
+		skb_reset_network_header(skb);
+
+		/* ip */
+		oiph->version = 4;
+		oiph->tos = 0;
+		oiph->id = 0;
+		oiph->ihl = 5;
+		oiph->frag_off = 0;
+		oiph->ttl = 64;
+		oiph->protocol = IPPROTO_UDP;
+		memcpy(&oiph->saddr, fp->outer_src, 4);
+		memcpy(&oiph->daddr, fp->outer_dst, 4);
+
+		/* udp */
+		oudph->source = fp->outer_src_port;
+		oudph->dest = fp->outer_dst_port;
+		oudph->len = htons(payload_len + sizeof (*oudph) +
+				   sizeof (*pkt));
+		oudph->check = 0;
+
+		/* smalltun */
+		pkt->magic = htons(SMALLTUN_MAGIC);
+		pkt->version = SMALLTUN_VERSION;
+		pkt->flag_type = TYPE_CLT_DATA;
+
+		memset(&fl, 0x00, sizeof (fl));
+		memcpy(&fl.saddr, fp->outer_src, 4);
+		memcpy(&fl.daddr, fp->outer_dst, 4);
+
+		if (*prt_cache && (*prt_cache)->dst.obsolete > 0) {
+			rt_cache = *prt_cache;
+			*prt_cache = NULL;
+			ip_rt_put(rt_cache);
+		}
+
+		rt_cache = *prt_cache;
+		if (!rt_cache) {
+			rt_cache = ip_route_output_key(&init_net, &fl);
+			if (IS_ERR(rt_cache)) {
+				pr_err("ip_route_output_key(%pI4): %li\n",
+				       &fl.daddr, PTR_ERR(rt_cache));
+				return 0;
+			}
+
+			if (!rt_cache->dst.dev) {
+				pr_err("ip_route_output_key(%pI4): no dev\n",
+				       &fl.daddr);
+				return 0;
+			}
+
+			*prt_cache = rt_cache;
+		}
+
+		skb_dst_set(skb, dst_clone(&rt_cache->dst));
+		skb->dev = skb_dst(skb)->dev;
+		ip_local_out(&init_net, NULL, skb);
+		return 1;
+	}
+
+	/* find route */
+
+	return 0;
+}
+
 /* Net device start xmit */
 static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
 {
@@ -862,16 +1074,18 @@
 	if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
 		goto drop;
 
-	if (skb->sk && sk_fullsock(skb->sk)) {
-		sock_tx_timestamp(skb->sk, &skb_shinfo(skb)->tx_flags);
-		sw_tx_timestamp(skb);
-	}
+	skb_tx_timestamp(skb);
 
 	/* Orphan the skb - required as we might hang on to it
 	 * for indefinite time.
 	 */
 	skb_orphan(skb);
 
+	if (smalltun_is_fastpath(tun, skb)) {
+		rcu_read_unlock();
+		return NETDEV_TX_OK;
+	}
+
 	nf_reset(skb);
 
 	/* Enqueue packet */
@@ -1192,6 +1406,10 @@
 		}
 	}
 
+	if (!(tun->flags & IFF_NO_PI))
+		if (pi.flags & htons(CHECKSUM_UNNECESSARY))
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+
 	switch (tun->flags & TUN_TYPE_MASK) {
 	case IFF_TUN:
 		if (tun->flags & IFF_NO_PI) {
@@ -1895,6 +2113,12 @@
 	int le;
 	int ret;
 
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+	if (cmd != TUNGETIFF && !capable(CAP_NET_ADMIN)) {
+		return -EPERM;
+	}
+#endif
+
 	if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
 		if (copy_from_user(&ifr, argp, ifreq_len))
 			return -EFAULT;
@@ -2152,6 +2376,100 @@
 		ret = 0;
 		break;
 
+	case TUNSMALLTUNSETFP:
+	{
+		struct smalltun_fp fp;
+		unsigned int i;
+		int free_idx;
+
+		ret = -EFAULT;
+		if (copy_from_user(&fp, argp, sizeof(fp)))
+			break;
+
+		/* look for duplicate */
+		ret = 0;
+		free_idx = -1;
+		for (i = 0; i < ARRAY_SIZE(tun->smalltun_fps); i++) {
+			if (!tun->smalltun_valid[i]) {
+				if (free_idx == -1)
+					free_idx = i;
+				continue;
+			}
+
+			if (fp.inner_src == tun->smalltun_fps[i].inner_src &&
+			    fp.inner_dst == tun->smalltun_fps[i].inner_dst) {
+				ret = -EEXIST;
+				break;
+			}
+		}
+
+		if (ret)
+			break;
+
+		if (free_idx == -1) {
+			ret = -ENOSPC;
+			break;
+		}
+
+		memcpy(&tun->smalltun_fps[free_idx], &fp, sizeof (fp));
+		tun->smalltun_valid[free_idx] = 1;
+		tun->smalltun_valid_count++;
+		tun_debug(KERN_INFO, tun, "new fp rule for %pI4 <=> %pI4 (%u rules)\n",
+			  &fp.inner_src,
+			  &fp.inner_dst,
+			  fp.rule_count);
+
+		if (fp.af == AF_INET) {
+			tun_debug(KERN_INFO, tun, "outer %pI4:%u <=> %pI4:%u\n",
+				  fp.outer_src,
+				  ntohs(fp.outer_src_port),
+				  fp.outer_dst,
+				  ntohs(fp.outer_dst_port));
+		} else {
+			tun_debug(KERN_INFO, tun, "outer %pI6:%u <=> %pI6:%u\n",
+				  fp.outer_src,
+				  ntohs(fp.outer_src_port),
+				  fp.outer_dst,
+				  ntohs(fp.outer_dst_port));
+		}
+		break;
+	}
+
+	case TUNSMALLTUNDELFP:
+	{
+		struct smalltun_fp fp;
+		unsigned int i;
+
+		ret = -EFAULT;
+		if (copy_from_user(&fp, argp, sizeof(fp)))
+			break;
+
+		/* lookup */
+		ret = -ENOENT;
+		for (i = 0; i < ARRAY_SIZE(tun->smalltun_fps); i++) {
+			if (fp.inner_src == tun->smalltun_fps[i].inner_src &&
+			    fp.inner_dst == tun->smalltun_fps[i].inner_dst) {
+				ret = 0;
+				break;
+			}
+		}
+
+		if (ret)
+			break;
+
+		tun->smalltun_valid[i] = 0;
+		tun->smalltun_valid_count--;
+		if (tun->smalltun_rt_cache[i]) {
+			ip_rt_put(tun->smalltun_rt_cache[i]);
+			tun->smalltun_rt_cache[i] = NULL;
+		}
+
+		tun_debug(KERN_INFO, tun, "removed fp rule for %pI4 <=> %pI4\n",
+			  &fp.inner_src,
+			  &fp.inner_dst);
+		break;
+	}
+
 	default:
 		ret = -EINVAL;
 		break;
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/bmi.c linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/bmi.c
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/bmi.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/bmi.c	2019-01-22 16:16:25.411263685 +0100
@@ -221,7 +221,7 @@
 	u32 txlen;
 	int ret;
 
-	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
+	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%pK length %d\n",
 		   buffer, length);
 
 	if (ar->bmi.done_sent) {
@@ -287,7 +287,7 @@
 	int ret;
 
 	ath10k_dbg(ar, ATH10K_DBG_BMI,
-		   "bmi fast download address 0x%x buffer 0x%p length %d\n",
+		   "bmi fast download address 0x%x buffer 0x%pK length %d\n",
 		   address, buffer, length);
 
 	ret = ath10k_bmi_lz_stream_start(ar, address);
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/ce.c linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/ce.c
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/ce.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/ce.c	2019-01-22 16:16:25.411263685 +0100
@@ -16,13 +16,12 @@
  */
 
 #include "hif.h"
-#include "pci.h"
 #include "ce.h"
 #include "debug.h"
 
 /*
  * Support for Copy Engine hardware, which is mainly used for
- * communication between Host and Target over a PCIe interconnect.
+ * communication between Host and Target over a PCIe/SNOC/AHB interconnect.
  */
 
 /*
@@ -33,13 +32,13 @@
  * Each ring consists of a number of descriptors which specify
  * an address, length, and meta-data.
  *
- * Typically, one side of the PCIe interconnect (Host or Target)
+ * Typically, one side of the PCIe/AHB/SNOC interconnect (Host or Target)
  * controls one ring and the other side controls the other ring.
  * The source side chooses when to initiate a transfer and it
  * chooses what to send (buffer address, length). The destination
  * side keeps a supply of "anonymous receive buffers" available and
  * it handles incoming data as it arrives (when the destination
- * recieves an interrupt).
+ * receives an interrupt).
  *
  * The sender may send a simple buffer (address/length) or it may
  * send a small list of buffers.  When a small list is sent, hardware
@@ -63,201 +62,401 @@
 						       u32 ce_ctrl_addr,
 						       unsigned int n)
 {
-	ath10k_pci_write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n);
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+
+	ar_opaque->bus_ops->write32(ar,
+		ce_ctrl_addr + ar->hw_ce_regs->dst_wr_index_addr, n);
 }
 
 static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
 						      u32 ce_ctrl_addr)
 {
-	return ath10k_pci_read32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS);
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+
+	return ar_opaque->bus_ops->read32(ar,
+			ce_ctrl_addr + ar->hw_ce_regs->dst_wr_index_addr);
 }
 
 static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
 						      u32 ce_ctrl_addr,
 						      unsigned int n)
 {
-	ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+
+	ar_opaque->bus_ops->write32(ar,
+		ce_ctrl_addr + ar->hw_ce_regs->sr_wr_index_addr, n);
 }
 
 static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
 						     u32 ce_ctrl_addr)
 {
-	return ath10k_pci_read32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS);
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+
+	return ar_opaque->bus_ops->read32(ar,
+			ce_ctrl_addr + ar->hw_ce_regs->sr_wr_index_addr);
+}
+
+static inline u32 ath10k_ce_src_ring_read_index_get_from_ddr(
+				struct ath10k *ar, u32 ce_id)
+{
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+
+	return ar_opaque->vaddr_rri_on_ddr[ce_id] & CE_DDR_RRI_MASK;
 }
 
 static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
 						    u32 ce_ctrl_addr)
 {
-	return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS);
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+	u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr);
+	struct ath10k_ce_pipe *ce_state = &ar_opaque->ce_states[ce_id];
+	u32 index;
+
+	if (ar->rri_on_ddr && (ce_state->attr_flags & CE_ATTR_DIS_INTR))
+		index = ath10k_ce_src_ring_read_index_get_from_ddr(ar, ce_id);
+	else
+		index = ar_opaque->bus_ops->read32(ar,
+			ce_ctrl_addr + ar->hw_ce_regs->current_srri_addr);
+
+	return index;
+}
+
+static inline void ath10k_ce_shadow_src_ring_write_index_set(struct ath10k *ar,
+							     u32 ce_ctrl_addr,
+							     unsigned int n)
+{
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+
+	ar_opaque->bus_ops->write32(ar, shadow_sr_wr_ind_addr(ar,
+							      ce_ctrl_addr), n);
+}
+
+static inline void ath10k_ce_shadow_dest_ring_write_index_set(struct ath10k *ar,
+							      u32 ce_ctrl_addr,
+							      unsigned int n)
+{
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+
+	ar_opaque->bus_ops->write32(ar, shadow_dst_wr_ind_addr(ar,
+							       ce_ctrl_addr),
+							       n);
 }
 
 static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
 						    u32 ce_ctrl_addr,
 						    unsigned int addr)
 {
-	ath10k_pci_write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr);
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+
+	ar_opaque->bus_ops->write32(ar,
+		ce_ctrl_addr + ar->hw_ce_regs->sr_base_addr, addr);
 }
 
 static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
 					       u32 ce_ctrl_addr,
 					       unsigned int n)
 {
-	ath10k_pci_write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n);
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+
+	ar_opaque->bus_ops->write32(ar,
+		ce_ctrl_addr + ar->hw_ce_regs->sr_size_addr, n);
 }
 
 static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
 					       u32 ce_ctrl_addr,
 					       unsigned int n)
 {
-	u32 ctrl1_addr = ath10k_pci_read32((ar),
-					   (ce_ctrl_addr) + CE_CTRL1_ADDRESS);
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+	struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
 
-	ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
-			   (ctrl1_addr &  ~CE_CTRL1_DMAX_LENGTH_MASK) |
-			   CE_CTRL1_DMAX_LENGTH_SET(n));
+	u32 ctrl1_addr = ar_opaque->bus_ops->read32((ar),
+				(ce_ctrl_addr) + ctrl_regs->addr);
+
+	ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + ctrl_regs->addr,
+			   (ctrl1_addr &  ~(ctrl_regs->dmax->mask)) |
+			   ctrl_regs->dmax->set(n, ctrl_regs->dmax));
 }
 
 static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
 						    u32 ce_ctrl_addr,
 						    unsigned int n)
 {
-	u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+	struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
+
+	u32 ctrl1_addr = ar_opaque->bus_ops->read32(ar, ce_ctrl_addr +
+						    ctrl_regs->addr);
 
-	ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
-			   (ctrl1_addr & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) |
-			   CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n));
+	ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + ctrl_regs->addr,
+			   (ctrl1_addr & ~(ctrl_regs->src_ring->mask)) |
+			   ctrl_regs->src_ring->set(n, ctrl_regs->src_ring));
 }
 
 static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
 						     u32 ce_ctrl_addr,
 						     unsigned int n)
 {
-	u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+	struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
 
-	ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
-			   (ctrl1_addr & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) |
-			   CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n));
+	u32 ctrl1_addr = ar_opaque->bus_ops->read32(ar, ce_ctrl_addr +
+						    ctrl_regs->addr);
+
+	ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + ctrl_regs->addr,
+			   (ctrl1_addr & ~(ctrl_regs->dst_ring->mask)) |
+			   ctrl_regs->dst_ring->set(n, ctrl_regs->dst_ring));
 }
 
 static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
 						     u32 ce_ctrl_addr)
 {
-	return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_DRRI_ADDRESS);
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+	u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr);
+	struct ath10k_ce_pipe *ce_state = &ar_opaque->ce_states[ce_id];
+	u32 index;
+
+	if (ar->rri_on_ddr && (ce_state->attr_flags & CE_ATTR_DIS_INTR))
+		index = (ar_opaque->vaddr_rri_on_ddr[ce_id] >>
+			  CE_DDR_RRI_SHIFT) &
+			  CE_DDR_RRI_MASK;
+	else
+		index = ar_opaque->bus_ops->read32(ar,
+			ce_ctrl_addr + ar->hw_ce_regs->current_drri_addr);
+
+	return index;
 }
 
 static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
 						     u32 ce_ctrl_addr,
 						     u32 addr)
 {
-	ath10k_pci_write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr);
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+
+	ar_opaque->bus_ops->write32(ar,
+		ce_ctrl_addr + ar->hw_ce_regs->dr_base_addr, addr);
 }
 
 static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
 						u32 ce_ctrl_addr,
 						unsigned int n)
 {
-	ath10k_pci_write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n);
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+
+	ar_opaque->bus_ops->write32(ar,
+		ce_ctrl_addr + ar->hw_ce_regs->dr_size_addr, n);
 }
 
 static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
 						   u32 ce_ctrl_addr,
 						   unsigned int n)
 {
-	u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
-
-	ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
-			   (addr & ~SRC_WATERMARK_HIGH_MASK) |
-			   SRC_WATERMARK_HIGH_SET(n));
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+	struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
+	u32 addr = ar_opaque->bus_ops->read32(ar, ce_ctrl_addr + srcr_wm->addr);
+
+	ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + srcr_wm->addr,
+			   (addr & ~(srcr_wm->wm_high->mask)) |
+			   (srcr_wm->wm_high->set(n, srcr_wm->wm_high)));
 }
 
 static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
 						  u32 ce_ctrl_addr,
 						  unsigned int n)
 {
-	u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
-
-	ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
-			   (addr & ~SRC_WATERMARK_LOW_MASK) |
-			   SRC_WATERMARK_LOW_SET(n));
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+	struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
+	u32 addr = ar_opaque->bus_ops->read32(ar, ce_ctrl_addr + srcr_wm->addr);
+
+	ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + srcr_wm->addr,
+			   (addr & ~(srcr_wm->wm_low->mask)) |
+			   (srcr_wm->wm_low->set(n, srcr_wm->wm_low)));
 }
 
 static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
 						    u32 ce_ctrl_addr,
 						    unsigned int n)
 {
-	u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
-
-	ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
-			   (addr & ~DST_WATERMARK_HIGH_MASK) |
-			   DST_WATERMARK_HIGH_SET(n));
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+	struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
+	u32 addr = ar_opaque->bus_ops->read32(ar, ce_ctrl_addr + dstr_wm->addr);
+
+	ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + dstr_wm->addr,
+			   (addr & ~(dstr_wm->wm_high->mask)) |
+			   (dstr_wm->wm_high->set(n, dstr_wm->wm_high)));
 }
 
 static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
 						   u32 ce_ctrl_addr,
 						   unsigned int n)
 {
-	u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
-
-	ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
-			   (addr & ~DST_WATERMARK_LOW_MASK) |
-			   DST_WATERMARK_LOW_SET(n));
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+	struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
+	u32 addr = ar_opaque->bus_ops->read32(ar, ce_ctrl_addr + dstr_wm->addr);
+
+	ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + dstr_wm->addr,
+			   (addr & ~(dstr_wm->wm_low->mask)) |
+			   (dstr_wm->wm_low->set(n, dstr_wm->wm_low)));
 }
 
 static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
 							u32 ce_ctrl_addr)
 {
-	u32 host_ie_addr = ath10k_pci_read32(ar,
-					     ce_ctrl_addr + HOST_IE_ADDRESS);
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+	struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
 
-	ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
-			   host_ie_addr | HOST_IE_COPY_COMPLETE_MASK);
+	u32 host_ie_addr = ar_opaque->bus_ops->read32(ar,
+				ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr);
+
+	ar_opaque->bus_ops->write32(ar,
+			ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
+			host_ie_addr | host_ie->copy_complete->mask);
 }
 
 static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
 							u32 ce_ctrl_addr)
 {
-	u32 host_ie_addr = ath10k_pci_read32(ar,
-					     ce_ctrl_addr + HOST_IE_ADDRESS);
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+	struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
+
+	u32 host_ie_addr = ar_opaque->bus_ops->read32(ar,
+				ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr);
 
-	ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
-			   host_ie_addr & ~HOST_IE_COPY_COMPLETE_MASK);
+	ar_opaque->bus_ops->write32(ar,
+			ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
+			host_ie_addr & ~(host_ie->copy_complete->mask));
 }
 
 static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
 						    u32 ce_ctrl_addr)
 {
-	u32 host_ie_addr = ath10k_pci_read32(ar,
-					     ce_ctrl_addr + HOST_IE_ADDRESS);
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+	struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
 
-	ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
-			   host_ie_addr & ~CE_WATERMARK_MASK);
+	u32 host_ie_addr = ar_opaque->bus_ops->read32(ar,
+				ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr);
+
+	ar_opaque->bus_ops->write32(ar,
+			ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
+			host_ie_addr & ~(wm_regs->wm_mask));
 }
 
 static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
 					       u32 ce_ctrl_addr)
 {
-	u32 misc_ie_addr = ath10k_pci_read32(ar,
-					     ce_ctrl_addr + MISC_IE_ADDRESS);
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+	struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
+
+	u32 misc_ie_addr = ar_opaque->bus_ops->read32(ar,
+			ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr);
 
-	ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
-			   misc_ie_addr | CE_ERROR_MASK);
+	ar_opaque->bus_ops->write32(ar,
+			ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr,
+			misc_ie_addr | misc_regs->err_mask);
 }
 
 static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
 						u32 ce_ctrl_addr)
 {
-	u32 misc_ie_addr = ath10k_pci_read32(ar,
-					     ce_ctrl_addr + MISC_IE_ADDRESS);
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+	struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
 
-	ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
-			   misc_ie_addr & ~CE_ERROR_MASK);
+	u32 misc_ie_addr = ar_opaque->bus_ops->read32(ar,
+			ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr);
+
+	ar_opaque->bus_ops->write32(ar,
+			ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr,
+			misc_ie_addr & ~(misc_regs->err_mask));
 }
 
 static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
 						     u32 ce_ctrl_addr,
 						     unsigned int mask)
 {
-	ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask);
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+	struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
+
+	ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + wm_regs->addr, mask);
+}
+
+u32 shadow_sr_wr_ind_addr(struct ath10k *ar, u32 ctrl_addr)
+{
+	u32 addr = 0;
+	u32 ce = COPY_ENGINE_ID(ctrl_addr);
+
+	switch (ce) {
+	case 0:
+		addr = SHADOW_VALUE0;
+		break;
+	case 3:
+		addr = SHADOW_VALUE3;
+		break;
+	case 4:
+		addr = SHADOW_VALUE4;
+		break;
+	case 5:
+		addr = SHADOW_VALUE5;
+		break;
+	case 7:
+		addr = SHADOW_VALUE7;
+		break;
+	default:
+		ath10k_err(ar, "invalid CE ctrl_addr (CE=%d)", ce);
+		WARN_ON(1);
+	}
+	return addr;
+}
+
+u32 shadow_dst_wr_ind_addr(struct ath10k *ar, u32 ctrl_addr)
+{
+	u32 addr = 0;
+	u32 ce = COPY_ENGINE_ID(ctrl_addr);
+
+	switch (ce) {
+	case 1:
+		addr = SHADOW_VALUE13;
+		break;
+	case 2:
+		addr = SHADOW_VALUE14;
+		break;
+	case 5:
+		addr = SHADOW_VALUE17;
+		break;
+	case 7:
+		addr = SHADOW_VALUE19;
+		break;
+	case 8:
+		addr = SHADOW_VALUE20;
+		break;
+	case 9:
+		addr = SHADOW_VALUE21;
+		break;
+	case 10:
+		addr = SHADOW_VALUE22;
+		break;
+	case 11:
+		addr = SHADOW_VALUE23;
+		break;
+	default:
+		ath10k_err(ar, "invalid CE ctrl_addr (CE=%d)", ce);
+		WARN_ON(1);
+	}
+
+	return addr;
+}
+
+static inline void ath10k_ce_snoc_addr_config(struct ce_desc *sdesc,
+					      dma_addr_t buffer,
+					      unsigned int flags)
+{
+	__le32 *addr = (__le32 *)&sdesc->addr;
+
+	flags |= upper_32_bits(buffer) & CE_DESC_FLAGS_GET_MASK;
+	addr[0] = __cpu_to_le32(buffer);
+	addr[1] = flags;
+	if (flags & CE_SEND_FLAG_GATHER)
+		addr[1] |= CE_WCN3990_DESC_FLAGS_GATHER;
+	else
+		addr[1] &= ~CE_WCN3990_DESC_FLAGS_GATHER;
 }
 
 /*
@@ -267,7 +466,7 @@
  */
 int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
 			  void *per_transfer_context,
-			  u32 buffer,
+			  dma_addr_t buffer,
 			  unsigned int nbytes,
 			  unsigned int transfer_id,
 			  unsigned int flags)
@@ -276,16 +475,20 @@
 	struct ath10k_ce_ring *src_ring = ce_state->src_ring;
 	struct ce_desc *desc, sdesc;
 	unsigned int nentries_mask = src_ring->nentries_mask;
-	unsigned int sw_index = src_ring->sw_index;
+	unsigned int sw_index;
 	unsigned int write_index = src_ring->write_index;
 	u32 ctrl_addr = ce_state->ctrl_addr;
 	u32 desc_flags = 0;
 	int ret = 0;
 
+	if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+		return -ESHUTDOWN;
+
 	if (nbytes > ce_state->src_sz_max)
 		ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
 			    __func__, nbytes, ce_state->src_sz_max);
 
+	sw_index = ath10k_ce_src_ring_read_index_get_from_ddr(ar, ce_state->id);
 	if (unlikely(CE_RING_DELTA(nentries_mask,
 				   write_index, sw_index - 1) <= 0)) {
 		ret = -ENOSR;
@@ -299,10 +502,15 @@
 
 	if (flags & CE_SEND_FLAG_GATHER)
 		desc_flags |= CE_DESC_FLAGS_GATHER;
+
 	if (flags & CE_SEND_FLAG_BYTE_SWAP)
 		desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
 
+	if (QCA_REV_WCN3990(ar))
+		ath10k_ce_snoc_addr_config(&sdesc, buffer, flags);
+	else
 	sdesc.addr   = __cpu_to_le32(buffer);
+
 	sdesc.nbytes = __cpu_to_le16(nbytes);
 	sdesc.flags  = __cpu_to_le16(desc_flags);
 
@@ -314,8 +522,14 @@
 	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
 
 	/* WORKAROUND */
-	if (!(flags & CE_SEND_FLAG_GATHER))
-		ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
+	if (!(flags & CE_SEND_FLAG_GATHER)) {
+		if (QCA_REV_WCN3990(ar))
+			ath10k_ce_shadow_src_ring_write_index_set(ar, ctrl_addr,
+								  write_index);
+		else
+			ath10k_ce_src_ring_write_index_set(ar, ctrl_addr,
+							   write_index);
+	}
 
 	src_ring->write_index = write_index;
 exit:
@@ -325,11 +539,11 @@
 void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
 {
 	struct ath10k *ar = pipe->ar;
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
 	struct ath10k_ce_ring *src_ring = pipe->src_ring;
 	u32 ctrl_addr = pipe->ctrl_addr;
 
-	lockdep_assert_held(&ar_pci->ce_lock);
+	lockdep_assert_held(&ar_opaque->ce_lock);
 
 	/*
 	 * This function must be called only if there is an incomplete
@@ -351,19 +565,19 @@
 
 int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
 		   void *per_transfer_context,
-		   u32 buffer,
+		   dma_addr_t buffer,
 		   unsigned int nbytes,
 		   unsigned int transfer_id,
 		   unsigned int flags)
 {
 	struct ath10k *ar = ce_state->ar;
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
 	int ret;
 
-	spin_lock_bh(&ar_pci->ce_lock);
+	spin_lock_bh(&ar_opaque->ce_lock);
 	ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
 				    buffer, nbytes, transfer_id, flags);
-	spin_unlock_bh(&ar_pci->ce_lock);
+	spin_unlock_bh(&ar_opaque->ce_lock);
 
 	return ret;
 }
@@ -371,14 +585,14 @@
 int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
 {
 	struct ath10k *ar = pipe->ar;
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
 	int delta;
 
-	spin_lock_bh(&ar_pci->ce_lock);
+	spin_lock_bh(&ar_opaque->ce_lock);
 	delta = CE_RING_DELTA(pipe->src_ring->nentries_mask,
 			      pipe->src_ring->write_index,
 			      pipe->src_ring->sw_index - 1);
-	spin_unlock_bh(&ar_pci->ce_lock);
+	spin_unlock_bh(&ar_opaque->ce_lock);
 
 	return delta;
 }
@@ -386,21 +600,22 @@
 int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
 {
 	struct ath10k *ar = pipe->ar;
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
 	struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
 	unsigned int nentries_mask = dest_ring->nentries_mask;
 	unsigned int write_index = dest_ring->write_index;
 	unsigned int sw_index = dest_ring->sw_index;
 
-	lockdep_assert_held(&ar_pci->ce_lock);
+	lockdep_assert_held(&ar_opaque->ce_lock);
 
 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
 }
 
-int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
+int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
+			    dma_addr_t paddr)
 {
 	struct ath10k *ar = pipe->ar;
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
 	struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
 	unsigned int nentries_mask = dest_ring->nentries_mask;
 	unsigned int write_index = dest_ring->write_index;
@@ -409,12 +624,19 @@
 	struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
 	u32 ctrl_addr = pipe->ctrl_addr;
 
-	lockdep_assert_held(&ar_pci->ce_lock);
+	lockdep_assert_held(&ar_opaque->ce_lock);
 
-	if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
+	if ((pipe->id != 5) &&
+	    CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
 		return -ENOSPC;
 
+	if (QCA_REV_WCN3990(ar)) {
+		desc->addr = paddr;
+		desc->addr &= CE_DESC_37BIT_ADDR_MASK;
+	} else {
 	desc->addr = __cpu_to_le32(paddr);
+	}
+
 	desc->nbytes = 0;
 
 	dest_ring->per_transfer_context[write_index] = ctx;
@@ -425,15 +647,29 @@
 	return 0;
 }
 
-int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
+void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
 {
 	struct ath10k *ar = pipe->ar;
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
+	unsigned int nentries_mask = dest_ring->nentries_mask;
+	unsigned int write_index = dest_ring->write_index;
+	u32 ctrl_addr = pipe->ctrl_addr;
+
+	write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries);
+	ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
+	dest_ring->write_index = write_index;
+}
+
+int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
+			  dma_addr_t paddr)
+{
+	struct ath10k *ar = pipe->ar;
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
 	int ret;
 
-	spin_lock_bh(&ar_pci->ce_lock);
+	spin_lock_bh(&ar_opaque->ce_lock);
 	ret = __ath10k_ce_rx_post_buf(pipe, ctx, paddr);
-	spin_unlock_bh(&ar_pci->ce_lock);
+	spin_unlock_bh(&ar_opaque->ce_lock);
 
 	return ret;
 }
@@ -444,14 +680,10 @@
  */
 int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
 					 void **per_transfer_contextp,
-					 u32 *bufferp,
-					 unsigned int *nbytesp,
-					 unsigned int *transfer_idp,
-					 unsigned int *flagsp)
+					 unsigned int *nbytesp)
 {
 	struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
 	unsigned int nentries_mask = dest_ring->nentries_mask;
-	struct ath10k *ar = ce_state->ar;
 	unsigned int sw_index = dest_ring->sw_index;
 
 	struct ce_desc *base = dest_ring->base_addr_owner_space;
@@ -476,20 +708,16 @@
 	desc->nbytes = 0;
 
 	/* Return data from completed destination descriptor */
-	*bufferp = __le32_to_cpu(sdesc.addr);
 	*nbytesp = nbytes;
-	*transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA);
-
-	if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP)
-		*flagsp = CE_RECV_FLAG_SWAPPED;
-	else
-		*flagsp = 0;
 
 	if (per_transfer_contextp)
 		*per_transfer_contextp =
 			dest_ring->per_transfer_context[sw_index];
 
-	/* sanity */
+	/* Copy engine 5 (HTT Rx) will reuse the same transfer context.
+	 * So update transfer context all CEs except CE5.
+	 */
+	if (ce_state->id != 5)
 	dest_ring->per_transfer_context[sw_index] = NULL;
 
 	/* Update sw_index */
@@ -501,21 +729,17 @@
 
 int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
 				  void **per_transfer_contextp,
-				  u32 *bufferp,
-				  unsigned int *nbytesp,
-				  unsigned int *transfer_idp,
-				  unsigned int *flagsp)
+				  unsigned int *nbytesp)
 {
 	struct ath10k *ar = ce_state->ar;
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
 	int ret;
 
-	spin_lock_bh(&ar_pci->ce_lock);
+	spin_lock_bh(&ar_opaque->ce_lock);
 	ret = ath10k_ce_completed_recv_next_nolock(ce_state,
 						   per_transfer_contextp,
-						   bufferp, nbytesp,
-						   transfer_idp, flagsp);
-	spin_unlock_bh(&ar_pci->ce_lock);
+						   nbytesp);
+	spin_unlock_bh(&ar_opaque->ce_lock);
 
 	return ret;
 }
@@ -530,7 +754,7 @@
 	unsigned int write_index;
 	int ret;
 	struct ath10k *ar;
-	struct ath10k_pci *ar_pci;
+	struct bus_opaque *ar_opaque;
 
 	dest_ring = ce_state->dest_ring;
 
@@ -538,9 +762,9 @@
 		return -EIO;
 
 	ar = ce_state->ar;
-	ar_pci = ath10k_pci_priv(ar);
+	ar_opaque = ath10k_bus_priv(ar);
 
-	spin_lock_bh(&ar_pci->ce_lock);
+	spin_lock_bh(&ar_opaque->ce_lock);
 
 	nentries_mask = dest_ring->nentries_mask;
 	sw_index = dest_ring->sw_index;
@@ -568,7 +792,7 @@
 		ret = -EIO;
 	}
 
-	spin_unlock_bh(&ar_pci->ce_lock);
+	spin_unlock_bh(&ar_opaque->ce_lock);
 
 	return ret;
 }
@@ -636,7 +860,7 @@
 	unsigned int write_index;
 	int ret;
 	struct ath10k *ar;
-	struct ath10k_pci *ar_pci;
+	struct bus_opaque *ar_opaque;
 
 	src_ring = ce_state->src_ring;
 
@@ -644,9 +868,9 @@
 		return -EIO;
 
 	ar = ce_state->ar;
-	ar_pci = ath10k_pci_priv(ar);
+	ar_opaque = ath10k_bus_priv(ar);
 
-	spin_lock_bh(&ar_pci->ce_lock);
+	spin_lock_bh(&ar_opaque->ce_lock);
 
 	nentries_mask = src_ring->nentries_mask;
 	sw_index = src_ring->sw_index;
@@ -677,7 +901,7 @@
 		ret = -EIO;
 	}
 
-	spin_unlock_bh(&ar_pci->ce_lock);
+	spin_unlock_bh(&ar_opaque->ce_lock);
 
 	return ret;
 }
@@ -686,13 +910,13 @@
 				  void **per_transfer_contextp)
 {
 	struct ath10k *ar = ce_state->ar;
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
 	int ret;
 
-	spin_lock_bh(&ar_pci->ce_lock);
+	spin_lock_bh(&ar_opaque->ce_lock);
 	ret = ath10k_ce_completed_send_next_nolock(ce_state,
 						   per_transfer_contextp);
-	spin_unlock_bh(&ar_pci->ce_lock);
+	spin_unlock_bh(&ar_opaque->ce_lock);
 
 	return ret;
 }
@@ -705,17 +929,18 @@
  */
 void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
 {
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+	struct ath10k_ce_pipe *ce_state = &ar_opaque->ce_states[ce_id];
+	struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
 	u32 ctrl_addr = ce_state->ctrl_addr;
 
-	spin_lock_bh(&ar_pci->ce_lock);
+	spin_lock_bh(&ar_opaque->ce_lock);
 
 	/* Clear the copy-complete interrupts that will be handled here. */
 	ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
-					  HOST_IS_COPY_COMPLETE_MASK);
+					  wm_regs->cc_mask);
 
-	spin_unlock_bh(&ar_pci->ce_lock);
+	spin_unlock_bh(&ar_opaque->ce_lock);
 
 	if (ce_state->recv_cb)
 		ce_state->recv_cb(ce_state);
@@ -723,15 +948,15 @@
 	if (ce_state->send_cb)
 		ce_state->send_cb(ce_state);
 
-	spin_lock_bh(&ar_pci->ce_lock);
+	spin_lock_bh(&ar_opaque->ce_lock);
 
 	/*
 	 * Misc CE interrupts are not being handled, but still need
 	 * to be cleared.
 	 */
-	ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK);
+	ath10k_ce_engine_int_status_clear(ar, ctrl_addr, wm_regs->wm_mask);
 
-	spin_unlock_bh(&ar_pci->ce_lock);
+	spin_unlock_bh(&ar_opaque->ce_lock);
 }
 
 /*
@@ -744,8 +969,16 @@
 {
 	int ce_id;
 	u32 intr_summary;
+	struct ath10k_ce_pipe *ce_state;
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
 
-	intr_summary = CE_INTERRUPT_SUMMARY(ar);
+	if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+		return;
+
+	if (ar->target_version == ATH10K_HW_WCN3990)
+		intr_summary = 0xFFF;
+	else
+		intr_summary = CE_INTERRUPT_SUMMARY(ar, ar_opaque);
 
 	for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
 		if (intr_summary & (1 << ce_id))
@@ -754,8 +987,11 @@
 			/* no intr pending on this CE */
 			continue;
 
+		ce_state = &ar_opaque->ce_states[ce_id];
+		if (ce_state->send_cb || ce_state->recv_cb)
 		ath10k_ce_per_engine_service(ar, ce_id);
 	}
+
 }
 
 /*
@@ -797,22 +1033,53 @@
 
 void ath10k_ce_enable_interrupts(struct ath10k *ar)
 {
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
 	int ce_id;
+	struct ath10k_ce_pipe *ce_state;
+	u8 ce_count;
 
+	if (QCA_REV_WCN3990(ar))
+		ce_count = CE_COUNT;
+	else
 	/* Skip the last copy engine, CE7 the diagnostic window, as that
 	 * uses polling and isn't initialized for interrupts.
 	 */
-	for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++)
-		ath10k_ce_per_engine_handler_adjust(&ar_pci->ce_states[ce_id]);
+		ce_count = CE_COUNT - 1;
+
+	for (ce_id = 0; ce_id < ce_count; ce_id++) {
+		ce_state  = &ar_opaque->ce_states[ce_id];
+		ath10k_ce_per_engine_handler_adjust(ce_state);
+	}
+}
+
+void ath10k_ce_enable_per_ce_interrupts(struct ath10k *ar, unsigned int ce_id)
+{
+	u32 offset;
+	u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+
+	offset = ar->hw_ce_regs->host_ie_addr + ctrl_addr;
+	ar_opaque->bus_ops->write32(ar, offset, 1);
+	ar_opaque->bus_ops->read32(ar, offset);
+}
+
+void ath10k_ce_disable_per_ce_interrupts(struct ath10k *ar, unsigned int ce_id)
+{
+	u32 offset;
+	u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+
+	offset = ar->hw_ce_regs->host_ie_addr + ctrl_addr;
+	ar_opaque->bus_ops->write32(ar, offset, 0);
+	ar_opaque->bus_ops->read32(ar, offset);
 }
 
 static int ath10k_ce_init_src_ring(struct ath10k *ar,
 				   unsigned int ce_id,
 				   const struct ce_attr *attr)
 {
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+	struct ath10k_ce_pipe *ce_state = &ar_opaque->ce_states[ce_id];
 	struct ath10k_ce_ring *src_ring = ce_state->src_ring;
 	u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
 
@@ -838,7 +1105,7 @@
 	ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
 
 	ath10k_dbg(ar, ATH10K_DBG_BOOT,
-		   "boot init ce src ring id %d entries %d base_addr %p\n",
+		   "boot init ce src ring id %d entries %d base_addr %pK\n",
 		   ce_id, nentries, src_ring->base_addr_owner_space);
 
 	return 0;
@@ -848,8 +1115,8 @@
 				    unsigned int ce_id,
 				    const struct ce_attr *attr)
 {
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+	struct ath10k_ce_pipe *ce_state = &ar_opaque->ce_states[ce_id];
 	struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
 	u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
 
@@ -872,7 +1139,7 @@
 	ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
 
 	ath10k_dbg(ar, ATH10K_DBG_BOOT,
-		   "boot ce dest ring id %d entries %d base_addr %p\n",
+		   "boot ce dest ring id %d entries %d base_addr %pK\n",
 		   ce_id, nentries, dest_ring->base_addr_owner_space);
 
 	return 0;
@@ -921,6 +1188,24 @@
 			src_ring->base_addr_ce_space_unaligned,
 			CE_DESC_RING_ALIGN);
 
+	src_ring->shadow_base_unaligned = kzalloc(
+					  nentries * sizeof(struct ce_desc),
+					  GFP_KERNEL);
+
+	if (!src_ring->shadow_base_unaligned) {
+		dma_free_coherent(ar->dev,
+				  (nentries * sizeof(struct ce_desc) +
+				   CE_DESC_RING_ALIGN),
+				   src_ring->base_addr_owner_space_unaligned,
+				   base_addr);
+		kfree(src_ring);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	src_ring->shadow_base = (struct ce_desc *)PTR_ALIGN(
+				src_ring->shadow_base_unaligned,
+				CE_DESC_RING_ALIGN);
+
 	return src_ring;
 }
 
@@ -977,6 +1262,52 @@
 	return dest_ring;
 }
 
+void ce_config_rri_on_ddr(struct ath10k *ar)
+{
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+	u32 hi_paddr, low_paddr;
+	u32 ce_base_addr;
+	u32 ctrl1_regs;
+	int i;
+
+	ar_opaque->vaddr_rri_on_ddr =
+		(u32 *)dma_alloc_coherent(ar->dev,
+		(CE_COUNT * sizeof(u32)),
+		&ar_opaque->paddr_rri_on_ddr, GFP_KERNEL);
+
+	if (!ar_opaque->vaddr_rri_on_ddr)
+		return;
+
+	low_paddr  = lower_32_bits(ar_opaque->paddr_rri_on_ddr);
+	hi_paddr = upper_32_bits(ar_opaque->paddr_rri_on_ddr) &
+					CE_DESC_FLAGS_GET_MASK;
+
+	ar_opaque->bus_ops->write32(ar, ar->hw_ce_regs->ce_rri_low, low_paddr);
+	ar_opaque->bus_ops->write32(ar, ar->hw_ce_regs->ce_rri_high, hi_paddr);
+
+	for (i = 0; i < CE_COUNT; i++) {
+		ctrl1_regs = ar->hw_ce_regs->ctrl1_regs->addr;
+		ce_base_addr = ath10k_ce_base_address(ar, i);
+		ar_opaque->bus_ops->write32(ar, ce_base_addr + ctrl1_regs,
+		ar_opaque->bus_ops->read32(ar, ce_base_addr + ctrl1_regs) |
+		ar->hw_ce_regs->upd->mask);
+	}
+
+	memset(ar_opaque->vaddr_rri_on_ddr, 0, CE_COUNT * sizeof(u32));
+}
+
+void ce_remove_rri_on_ddr(struct ath10k *ar)
+{
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+
+	if (!ar_opaque->vaddr_rri_on_ddr)
+		return;
+
+	dma_free_coherent(ar->dev, (CE_COUNT * sizeof(u32)),
+			  ar_opaque->vaddr_rri_on_ddr,
+			  ar_opaque->paddr_rri_on_ddr);
+}
+
 /*
  * Initialize a Copy Engine based on caller-supplied attributes.
  * This may be called once to initialize both source and destination
@@ -1038,8 +1369,8 @@
 int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
 			 const struct ce_attr *attr)
 {
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+	struct ath10k_ce_pipe *ce_state = &ar_opaque->ce_states[ce_id];
 	int ret;
 
 	/*
@@ -1095,10 +1426,11 @@
 
 void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
 {
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+	struct bus_opaque *ar_opaque = ath10k_bus_priv(ar);
+	struct ath10k_ce_pipe *ce_state = &ar_opaque->ce_states[ce_id];
 
 	if (ce_state->src_ring) {
+		kfree(ce_state->src_ring->shadow_base_unaligned);
 		dma_free_coherent(ar->dev,
 				  (ce_state->src_ring->nentries *
 				   sizeof(struct ce_desc) +
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/ce.h linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/ce.h
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/ce.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/ce.h	2019-01-22 16:16:25.411263685 +0100
@@ -22,7 +22,7 @@
 
 /* Maximum number of Copy Engine's supported */
 #define CE_COUNT_MAX 12
-#define CE_HTT_H2T_MSG_SRC_NENTRIES 4096
+#define CE_HTT_H2T_MSG_SRC_NENTRIES 8192
 
 /* Descriptor rings must be aligned to this boundary */
 #define CE_DESC_RING_ALIGN	8
@@ -38,6 +38,12 @@
 
 #define CE_DESC_FLAGS_GATHER         (1 << 0)
 #define CE_DESC_FLAGS_BYTE_SWAP      (1 << 1)
+#define CE_WCN3990_DESC_FLAGS_GATHER BIT(31)
+
+#define CE_DESC_FLAGS_GET_MASK		0x1F
+#define CE_DESC_37BIT_ADDR_MASK		0x1FFFFFFFFF
+#define CE_DDR_RRI_MASK			0xFFFF
+#define CE_DDR_RRI_SHIFT		16
 
 /* Following desc flags are used in QCA99X0 */
 #define CE_DESC_FLAGS_HOST_INT_DIS	(1 << 2)
@@ -46,11 +52,20 @@
 #define CE_DESC_FLAGS_META_DATA_MASK ar->hw_values->ce_desc_meta_data_mask
 #define CE_DESC_FLAGS_META_DATA_LSB  ar->hw_values->ce_desc_meta_data_lsb
 
+#ifndef CONFIG_ATH10K_SNOC
 struct ce_desc {
 	__le32 addr;
 	__le16 nbytes;
 	__le16 flags; /* %CE_DESC_FLAGS_ */
 };
+#else
+struct ce_desc {
+	__le64 addr;
+	u16 nbytes; /* length in register map */
+	u16 flags; /* fw_metadata_high */
+	u32 toeplitz_hash_result;
+};
+#endif
 
 struct ath10k_ce_ring {
 	/* Number of entries in this ring; must be power of 2 */
@@ -101,6 +116,9 @@
 	/* CE address space */
 	u32 base_addr_ce_space;
 
+	char *shadow_base_unaligned;
+	struct ce_desc *shadow_base;
+
 	/* keep last */
 	void *per_transfer_context[0];
 };
@@ -124,6 +142,81 @@
 /* Copy Engine settable attributes */
 struct ce_attr;
 
+#define SHADOW_VALUE0       (ar->shadow_reg_value->shadow_reg_value_0)
+#define SHADOW_VALUE1       (ar->shadow_reg_value->shadow_reg_value_1)
+#define SHADOW_VALUE2       (ar->shadow_reg_value->shadow_reg_value_2)
+#define SHADOW_VALUE3       (ar->shadow_reg_value->shadow_reg_value_3)
+#define SHADOW_VALUE4       (ar->shadow_reg_value->shadow_reg_value_4)
+#define SHADOW_VALUE5       (ar->shadow_reg_value->shadow_reg_value_5)
+#define SHADOW_VALUE6       (ar->shadow_reg_value->shadow_reg_value_6)
+#define SHADOW_VALUE7       (ar->shadow_reg_value->shadow_reg_value_7)
+#define SHADOW_VALUE8       (ar->shadow_reg_value->shadow_reg_value_8)
+#define SHADOW_VALUE9       (ar->shadow_reg_value->shadow_reg_value_9)
+#define SHADOW_VALUE10      (ar->shadow_reg_value->shadow_reg_value_10)
+#define SHADOW_VALUE11      (ar->shadow_reg_value->shadow_reg_value_11)
+#define SHADOW_VALUE12      (ar->shadow_reg_value->shadow_reg_value_12)
+#define SHADOW_VALUE13      (ar->shadow_reg_value->shadow_reg_value_13)
+#define SHADOW_VALUE14      (ar->shadow_reg_value->shadow_reg_value_14)
+#define SHADOW_VALUE15      (ar->shadow_reg_value->shadow_reg_value_15)
+#define SHADOW_VALUE16      (ar->shadow_reg_value->shadow_reg_value_16)
+#define SHADOW_VALUE17      (ar->shadow_reg_value->shadow_reg_value_17)
+#define SHADOW_VALUE18      (ar->shadow_reg_value->shadow_reg_value_18)
+#define SHADOW_VALUE19      (ar->shadow_reg_value->shadow_reg_value_19)
+#define SHADOW_VALUE20      (ar->shadow_reg_value->shadow_reg_value_20)
+#define SHADOW_VALUE21      (ar->shadow_reg_value->shadow_reg_value_21)
+#define SHADOW_VALUE22      (ar->shadow_reg_value->shadow_reg_value_22)
+#define SHADOW_VALUE23      (ar->shadow_reg_value->shadow_reg_value_23)
+#define SHADOW_ADDRESS0     (ar->shadow_reg_address->shadow_reg_address_0)
+#define SHADOW_ADDRESS1     (ar->shadow_reg_address->shadow_reg_address_1)
+#define SHADOW_ADDRESS2     (ar->shadow_reg_address->shadow_reg_address_2)
+#define SHADOW_ADDRESS3     (ar->shadow_reg_address->shadow_reg_address_3)
+#define SHADOW_ADDRESS4     (ar->shadow_reg_address->shadow_reg_address_4)
+#define SHADOW_ADDRESS5     (ar->shadow_reg_address->shadow_reg_address_5)
+#define SHADOW_ADDRESS6     (ar->shadow_reg_address->shadow_reg_address_6)
+#define SHADOW_ADDRESS7     (ar->shadow_reg_address->shadow_reg_address_7)
+#define SHADOW_ADDRESS8     (ar->shadow_reg_address->shadow_reg_address_8)
+#define SHADOW_ADDRESS9     (ar->shadow_reg_address->shadow_reg_address_9)
+#define SHADOW_ADDRESS10    (ar->shadow_reg_address->shadow_reg_address_10)
+#define SHADOW_ADDRESS11    (ar->shadow_reg_address->shadow_reg_address_11)
+#define SHADOW_ADDRESS12    (ar->shadow_reg_address->shadow_reg_address_12)
+#define SHADOW_ADDRESS13    (ar->shadow_reg_address->shadow_reg_address_13)
+#define SHADOW_ADDRESS14    (ar->shadow_reg_address->shadow_reg_address_14)
+#define SHADOW_ADDRESS15    (ar->shadow_reg_address->shadow_reg_address_15)
+#define SHADOW_ADDRESS16    (ar->shadow_reg_address->shadow_reg_address_16)
+#define SHADOW_ADDRESS17    (ar->shadow_reg_address->shadow_reg_address_17)
+#define SHADOW_ADDRESS18    (ar->shadow_reg_address->shadow_reg_address_18)
+#define SHADOW_ADDRESS19    (ar->shadow_reg_address->shadow_reg_address_19)
+#define SHADOW_ADDRESS20    (ar->shadow_reg_address->shadow_reg_address_20)
+#define SHADOW_ADDRESS21    (ar->shadow_reg_address->shadow_reg_address_21)
+#define SHADOW_ADDRESS22    (ar->shadow_reg_address->shadow_reg_address_22)
+#define SHADOW_ADDRESS23    (ar->shadow_reg_address->shadow_reg_address_23)
+
+#define SHADOW_ADDRESS(i) (SHADOW_ADDRESS0 + \
+			   i * (SHADOW_ADDRESS1 - SHADOW_ADDRESS0))
+
+u32 shadow_sr_wr_ind_addr(struct ath10k *ar, u32 ctrl_addr);
+u32 shadow_dst_wr_ind_addr(struct ath10k *ar, u32 ctrl_addr);
+
+struct ath10k_bus_ops {
+	u32 (*read32)(struct ath10k *ar, u32 offset);
+	void (*write32)(struct ath10k *ar, u32 offset, u32 value);
+	int (*get_num_banks)(struct ath10k *ar);
+};
+
+static inline struct bus_opaque *ath10k_bus_priv(struct ath10k *ar)
+{
+	return (struct bus_opaque *)ar->drv_priv;
+}
+
+struct bus_opaque {
+	/* protects CE info */
+	spinlock_t ce_lock;
+	const struct ath10k_bus_ops *bus_ops;
+	struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
+	u32 *vaddr_rri_on_ddr;
+	dma_addr_t paddr_rri_on_ddr;
+};
+
 /*==================Send====================*/
 
 /* ath10k_ce_send flags */
@@ -144,7 +237,7 @@
  */
 int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
 		   void *per_transfer_send_context,
-		   u32 buffer,
+		   dma_addr_t buffer,
 		   unsigned int nbytes,
 		   /* 14 bits */
 		   unsigned int transfer_id,
@@ -152,7 +245,7 @@
 
 int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
 			  void *per_transfer_context,
-			  u32 buffer,
+			  dma_addr_t buffer,
 			  unsigned int nbytes,
 			  unsigned int transfer_id,
 			  unsigned int flags);
@@ -164,8 +257,11 @@
 /*==================Recv=======================*/
 
 int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe);
-int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
-int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
+int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
+			    dma_addr_t paddr);
+int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
+			  dma_addr_t paddr);
+void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries);
 
 /* recv flags */
 /* Data is byte-swapped */
@@ -177,10 +273,7 @@
  */
 int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
 				  void **per_transfer_contextp,
-				  u32 *bufferp,
-				  unsigned int *nbytesp,
-				  unsigned int *transfer_idp,
-				  unsigned int *flagsp);
+				  unsigned int *nbytesp);
 /*
  * Supply data for the next completed unprocessed send descriptor.
  * Pops 1 completed send buffer from Source ring.
@@ -199,6 +292,8 @@
 int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
 			 const struct ce_attr *attr);
 void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
+void ce_config_rri_on_ddr(struct ath10k *ar);
+void ce_remove_rri_on_ddr(struct ath10k *ar);
 
 /*==================CE Engine Shutdown=======================*/
 /*
@@ -212,10 +307,7 @@
 
 int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
 					 void **per_transfer_contextp,
-					 u32 *bufferp,
-					 unsigned int *nbytesp,
-					 unsigned int *transfer_idp,
-					 unsigned int *flagsp);
+					 unsigned int *nbytesp);
 
 /*
  * Support clean shutdown by allowing the caller to cancel
@@ -233,6 +325,8 @@
 void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
 int ath10k_ce_disable_interrupts(struct ath10k *ar);
 void ath10k_ce_enable_interrupts(struct ath10k *ar);
+void ath10k_ce_disable_per_ce_interrupts(struct ath10k *ar, unsigned int ce_id);
+void ath10k_ce_enable_per_ce_interrupts(struct ath10k *ar, unsigned int ce_id);
 
 /* ce_attr.flags values */
 /* Use NonSnooping PCIe accesses? */
@@ -268,143 +362,14 @@
 	void (*recv_cb)(struct ath10k_ce_pipe *);
 };
 
-#define SR_BA_ADDRESS		0x0000
-#define SR_SIZE_ADDRESS		0x0004
-#define DR_BA_ADDRESS		0x0008
-#define DR_SIZE_ADDRESS		0x000c
-#define CE_CMD_ADDRESS		0x0018
-
-#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MSB	17
-#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB	17
-#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK	0x00020000
-#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(x) \
-	(((0 | (x)) << CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) & \
-	CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK)
-
-#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MSB	16
-#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB	16
-#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK	0x00010000
-#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_GET(x) \
-	(((x) & CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) >> \
-	 CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB)
-#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(x) \
-	(((0 | (x)) << CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) & \
-	 CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK)
-
-#define CE_CTRL1_DMAX_LENGTH_MSB		15
-#define CE_CTRL1_DMAX_LENGTH_LSB		0
-#define CE_CTRL1_DMAX_LENGTH_MASK		0x0000ffff
-#define CE_CTRL1_DMAX_LENGTH_GET(x) \
-	(((x) & CE_CTRL1_DMAX_LENGTH_MASK) >> CE_CTRL1_DMAX_LENGTH_LSB)
-#define CE_CTRL1_DMAX_LENGTH_SET(x) \
-	(((0 | (x)) << CE_CTRL1_DMAX_LENGTH_LSB) & CE_CTRL1_DMAX_LENGTH_MASK)
-
-#define CE_CTRL1_ADDRESS			0x0010
-#define CE_CTRL1_HW_MASK			0x0007ffff
-#define CE_CTRL1_SW_MASK			0x0007ffff
-#define CE_CTRL1_HW_WRITE_MASK			0x00000000
-#define CE_CTRL1_SW_WRITE_MASK			0x0007ffff
-#define CE_CTRL1_RSTMASK			0xffffffff
-#define CE_CTRL1_RESET				0x00000080
-
-#define CE_CMD_HALT_STATUS_MSB			3
-#define CE_CMD_HALT_STATUS_LSB			3
-#define CE_CMD_HALT_STATUS_MASK			0x00000008
-#define CE_CMD_HALT_STATUS_GET(x) \
-	(((x) & CE_CMD_HALT_STATUS_MASK) >> CE_CMD_HALT_STATUS_LSB)
-#define CE_CMD_HALT_STATUS_SET(x) \
-	(((0 | (x)) << CE_CMD_HALT_STATUS_LSB) & CE_CMD_HALT_STATUS_MASK)
-#define CE_CMD_HALT_STATUS_RESET		0
-#define CE_CMD_HALT_MSB				0
-#define CE_CMD_HALT_MASK			0x00000001
-
-#define HOST_IE_COPY_COMPLETE_MSB		0
-#define HOST_IE_COPY_COMPLETE_LSB		0
-#define HOST_IE_COPY_COMPLETE_MASK		0x00000001
-#define HOST_IE_COPY_COMPLETE_GET(x) \
-	(((x) & HOST_IE_COPY_COMPLETE_MASK) >> HOST_IE_COPY_COMPLETE_LSB)
-#define HOST_IE_COPY_COMPLETE_SET(x) \
-	(((0 | (x)) << HOST_IE_COPY_COMPLETE_LSB) & HOST_IE_COPY_COMPLETE_MASK)
-#define HOST_IE_COPY_COMPLETE_RESET		0
-#define HOST_IE_ADDRESS				0x002c
-
-#define HOST_IS_DST_RING_LOW_WATERMARK_MASK	0x00000010
-#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK	0x00000008
-#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK	0x00000004
-#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK	0x00000002
-#define HOST_IS_COPY_COMPLETE_MASK		0x00000001
-#define HOST_IS_ADDRESS				0x0030
-
-#define MISC_IE_ADDRESS				0x0034
-
-#define MISC_IS_AXI_ERR_MASK			0x00000400
-
-#define MISC_IS_DST_ADDR_ERR_MASK		0x00000200
-#define MISC_IS_SRC_LEN_ERR_MASK		0x00000100
-#define MISC_IS_DST_MAX_LEN_VIO_MASK		0x00000080
-#define MISC_IS_DST_RING_OVERFLOW_MASK		0x00000040
-#define MISC_IS_SRC_RING_OVERFLOW_MASK		0x00000020
-
-#define MISC_IS_ADDRESS				0x0038
-
-#define SR_WR_INDEX_ADDRESS			0x003c
-
-#define DST_WR_INDEX_ADDRESS			0x0040
-
-#define CURRENT_SRRI_ADDRESS			0x0044
-
-#define CURRENT_DRRI_ADDRESS			0x0048
-
-#define SRC_WATERMARK_LOW_MSB			31
-#define SRC_WATERMARK_LOW_LSB			16
-#define SRC_WATERMARK_LOW_MASK			0xffff0000
-#define SRC_WATERMARK_LOW_GET(x) \
-	(((x) & SRC_WATERMARK_LOW_MASK) >> SRC_WATERMARK_LOW_LSB)
-#define SRC_WATERMARK_LOW_SET(x) \
-	(((0 | (x)) << SRC_WATERMARK_LOW_LSB) & SRC_WATERMARK_LOW_MASK)
-#define SRC_WATERMARK_LOW_RESET			0
-#define SRC_WATERMARK_HIGH_MSB			15
-#define SRC_WATERMARK_HIGH_LSB			0
-#define SRC_WATERMARK_HIGH_MASK			0x0000ffff
-#define SRC_WATERMARK_HIGH_GET(x) \
-	(((x) & SRC_WATERMARK_HIGH_MASK) >> SRC_WATERMARK_HIGH_LSB)
-#define SRC_WATERMARK_HIGH_SET(x) \
-	(((0 | (x)) << SRC_WATERMARK_HIGH_LSB) & SRC_WATERMARK_HIGH_MASK)
-#define SRC_WATERMARK_HIGH_RESET		0
-#define SRC_WATERMARK_ADDRESS			0x004c
-
-#define DST_WATERMARK_LOW_LSB			16
-#define DST_WATERMARK_LOW_MASK			0xffff0000
-#define DST_WATERMARK_LOW_SET(x) \
-	(((0 | (x)) << DST_WATERMARK_LOW_LSB) & DST_WATERMARK_LOW_MASK)
-#define DST_WATERMARK_LOW_RESET			0
-#define DST_WATERMARK_HIGH_MSB			15
-#define DST_WATERMARK_HIGH_LSB			0
-#define DST_WATERMARK_HIGH_MASK			0x0000ffff
-#define DST_WATERMARK_HIGH_GET(x) \
-	(((x) & DST_WATERMARK_HIGH_MASK) >> DST_WATERMARK_HIGH_LSB)
-#define DST_WATERMARK_HIGH_SET(x) \
-	(((0 | (x)) << DST_WATERMARK_HIGH_LSB) & DST_WATERMARK_HIGH_MASK)
-#define DST_WATERMARK_HIGH_RESET		0
-#define DST_WATERMARK_ADDRESS			0x0050
+#define COPY_ENGINE_ID(COPY_ENGINE_BASE_ADDRESS) ((COPY_ENGINE_BASE_ADDRESS \
+		- CE0_BASE_ADDRESS) / (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS))
 
 static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
 {
 	return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
 }
 
-#define CE_WATERMARK_MASK (HOST_IS_SRC_RING_LOW_WATERMARK_MASK  | \
-			   HOST_IS_SRC_RING_HIGH_WATERMARK_MASK | \
-			   HOST_IS_DST_RING_LOW_WATERMARK_MASK  | \
-			   HOST_IS_DST_RING_HIGH_WATERMARK_MASK)
-
-#define CE_ERROR_MASK	(MISC_IS_AXI_ERR_MASK           | \
-			 MISC_IS_DST_ADDR_ERR_MASK      | \
-			 MISC_IS_SRC_LEN_ERR_MASK       | \
-			 MISC_IS_DST_MAX_LEN_VIO_MASK   | \
-			 MISC_IS_DST_RING_OVERFLOW_MASK | \
-			 MISC_IS_SRC_RING_OVERFLOW_MASK)
-
 #define CE_SRC_RING_TO_DESC(baddr, idx) \
 	(&(((struct ce_desc *)baddr)[idx]))
 
@@ -416,6 +381,8 @@
 	(((int)(toidx)-(int)(fromidx)) & (nentries_mask))
 
 #define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
+#define CE_RING_IDX_ADD(nentries_mask, idx, num) \
+		(((idx) + (num)) & (nentries_mask))
 
 #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \
 				ar->regs->ce_wrap_intr_sum_host_msi_lsb
@@ -426,9 +393,9 @@
 		CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
 #define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS			0x0000
 
-#define CE_INTERRUPT_SUMMARY(ar) \
+#define CE_INTERRUPT_SUMMARY(ar, ar_opaque) \
 	CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( \
-		ath10k_pci_read32((ar), CE_WRAPPER_BASE_ADDRESS + \
+		ar_opaque->bus_ops->read32((ar), CE_WRAPPER_BASE_ADDRESS + \
 		CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS))
 
 #endif /* _CE_H_ */
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/core.c linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/core.c
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/core.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/core.c	2019-01-22 16:16:25.415263721 +0100
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2013, 2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -18,6 +18,7 @@
 #include <linux/module.h>
 #include <linux/firmware.h>
 #include <linux/of.h>
+#include <asm/byteorder.h>
 
 #include "core.h"
 #include "mac.h"
@@ -55,18 +56,39 @@
 		.name = "qca988x hw2.0",
 		.patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
 		.uart_pin = 7,
-		.has_shifted_cc_wraparound = true,
+		.cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL,
 		.otp_exe_param = 0,
 		.channel_counters_freq_hz = 88000,
 		.max_probe_resp_desc_thres = 0,
+		.cal_data_len = 2116,
 		.fw = {
 			.dir = QCA988X_HW_2_0_FW_DIR,
-			.fw = QCA988X_HW_2_0_FW_FILE,
-			.otp = QCA988X_HW_2_0_OTP_FILE,
 			.board = QCA988X_HW_2_0_BOARD_DATA_FILE,
 			.board_size = QCA988X_BOARD_DATA_SZ,
 			.board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
 		},
+		.hw_ops = &qca988x_ops,
+		.decap_align_bytes = 4,
+	},
+	{
+		.id = QCA9887_HW_1_0_VERSION,
+		.dev_id = QCA9887_1_0_DEVICE_ID,
+		.name = "qca9887 hw1.0",
+		.patch_load_addr = QCA9887_HW_1_0_PATCH_LOAD_ADDR,
+		.uart_pin = 7,
+		.cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL,
+		.otp_exe_param = 0,
+		.channel_counters_freq_hz = 88000,
+		.max_probe_resp_desc_thres = 0,
+		.cal_data_len = 2116,
+		.fw = {
+			.dir = QCA9887_HW_1_0_FW_DIR,
+			.board = QCA9887_HW_1_0_BOARD_DATA_FILE,
+			.board_size = QCA9887_BOARD_DATA_SZ,
+			.board_ext_size = QCA9887_BOARD_EXT_DATA_SZ,
+		},
+		.hw_ops = &qca988x_ops,
+		.decap_align_bytes = 4,
 	},
 	{
 		.id = QCA6174_HW_2_1_VERSION,
@@ -77,14 +99,15 @@
 		.otp_exe_param = 0,
 		.channel_counters_freq_hz = 88000,
 		.max_probe_resp_desc_thres = 0,
+		.cal_data_len = 8124,
 		.fw = {
 			.dir = QCA6174_HW_2_1_FW_DIR,
-			.fw = QCA6174_HW_2_1_FW_FILE,
-			.otp = QCA6174_HW_2_1_OTP_FILE,
 			.board = QCA6174_HW_2_1_BOARD_DATA_FILE,
 			.board_size = QCA6174_BOARD_DATA_SZ,
 			.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
 		},
+		.hw_ops = &qca988x_ops,
+		.decap_align_bytes = 4,
 	},
 	{
 		.id = QCA6174_HW_2_1_VERSION,
@@ -95,14 +118,15 @@
 		.otp_exe_param = 0,
 		.channel_counters_freq_hz = 88000,
 		.max_probe_resp_desc_thres = 0,
+		.cal_data_len = 8124,
 		.fw = {
 			.dir = QCA6174_HW_2_1_FW_DIR,
-			.fw = QCA6174_HW_2_1_FW_FILE,
-			.otp = QCA6174_HW_2_1_OTP_FILE,
 			.board = QCA6174_HW_2_1_BOARD_DATA_FILE,
 			.board_size = QCA6174_BOARD_DATA_SZ,
 			.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
 		},
+		.hw_ops = &qca988x_ops,
+		.decap_align_bytes = 4,
 	},
 	{
 		.id = QCA6174_HW_3_0_VERSION,
@@ -113,14 +137,15 @@
 		.otp_exe_param = 0,
 		.channel_counters_freq_hz = 88000,
 		.max_probe_resp_desc_thres = 0,
+		.cal_data_len = 8124,
 		.fw = {
 			.dir = QCA6174_HW_3_0_FW_DIR,
-			.fw = QCA6174_HW_3_0_FW_FILE,
-			.otp = QCA6174_HW_3_0_OTP_FILE,
 			.board = QCA6174_HW_3_0_BOARD_DATA_FILE,
 			.board_size = QCA6174_BOARD_DATA_SZ,
 			.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
 		},
+		.hw_ops = &qca988x_ops,
+		.decap_align_bytes = 4,
 	},
 	{
 		.id = QCA6174_HW_3_2_VERSION,
@@ -131,15 +156,16 @@
 		.otp_exe_param = 0,
 		.channel_counters_freq_hz = 88000,
 		.max_probe_resp_desc_thres = 0,
+		.cal_data_len = 8124,
 		.fw = {
 			/* uses same binaries as hw3.0 */
 			.dir = QCA6174_HW_3_0_FW_DIR,
-			.fw = QCA6174_HW_3_0_FW_FILE,
-			.otp = QCA6174_HW_3_0_OTP_FILE,
 			.board = QCA6174_HW_3_0_BOARD_DATA_FILE,
 			.board_size = QCA6174_BOARD_DATA_SZ,
 			.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
 		},
+		.hw_ops = &qca988x_ops,
+		.decap_align_bytes = 4,
 	},
 	{
 		.id = QCA99X0_HW_2_0_DEV_VERSION,
@@ -149,16 +175,71 @@
 		.uart_pin = 7,
 		.otp_exe_param = 0x00000700,
 		.continuous_frag_desc = true,
+		.cck_rate_map_rev2 = true,
 		.channel_counters_freq_hz = 150000,
 		.max_probe_resp_desc_thres = 24,
+		.tx_chain_mask = 0xf,
+		.rx_chain_mask = 0xf,
+		.max_spatial_stream = 4,
+		.cal_data_len = 12064,
 		.fw = {
 			.dir = QCA99X0_HW_2_0_FW_DIR,
-			.fw = QCA99X0_HW_2_0_FW_FILE,
-			.otp = QCA99X0_HW_2_0_OTP_FILE,
 			.board = QCA99X0_HW_2_0_BOARD_DATA_FILE,
 			.board_size = QCA99X0_BOARD_DATA_SZ,
 			.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
 		},
+		.sw_decrypt_mcast_mgmt = true,
+		.hw_ops = &qca99x0_ops,
+		.decap_align_bytes = 1,
+	},
+	{
+		.id = QCA9984_HW_1_0_DEV_VERSION,
+		.dev_id = QCA9984_1_0_DEVICE_ID,
+		.name = "qca9984/qca9994 hw1.0",
+		.patch_load_addr = QCA9984_HW_1_0_PATCH_LOAD_ADDR,
+		.uart_pin = 7,
+		.otp_exe_param = 0x00000700,
+		.continuous_frag_desc = true,
+		.cck_rate_map_rev2 = true,
+		.channel_counters_freq_hz = 150000,
+		.max_probe_resp_desc_thres = 24,
+		.tx_chain_mask = 0xf,
+		.rx_chain_mask = 0xf,
+		.max_spatial_stream = 4,
+		.cal_data_len = 12064,
+		.fw = {
+			.dir = QCA9984_HW_1_0_FW_DIR,
+			.board = QCA9984_HW_1_0_BOARD_DATA_FILE,
+			.board_size = QCA99X0_BOARD_DATA_SZ,
+			.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
+		},
+		.sw_decrypt_mcast_mgmt = true,
+		.hw_ops = &qca99x0_ops,
+		.decap_align_bytes = 1,
+	},
+	{
+		.id = QCA9888_HW_2_0_DEV_VERSION,
+		.dev_id = QCA9888_2_0_DEVICE_ID,
+		.name = "qca9888 hw2.0",
+		.patch_load_addr = QCA9888_HW_2_0_PATCH_LOAD_ADDR,
+		.uart_pin = 7,
+		.otp_exe_param = 0x00000700,
+		.continuous_frag_desc = true,
+		.channel_counters_freq_hz = 150000,
+		.max_probe_resp_desc_thres = 24,
+		.tx_chain_mask = 3,
+		.rx_chain_mask = 3,
+		.max_spatial_stream = 2,
+		.cal_data_len = 12064,
+		.fw = {
+			.dir = QCA9888_HW_2_0_FW_DIR,
+			.board = QCA9888_HW_2_0_BOARD_DATA_FILE,
+			.board_size = QCA99X0_BOARD_DATA_SZ,
+			.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
+		},
+		.sw_decrypt_mcast_mgmt = true,
+		.hw_ops = &qca99x0_ops,
+		.decap_align_bytes = 1,
 	},
 	{
 		.id = QCA9377_HW_1_0_DEV_VERSION,
@@ -169,14 +250,15 @@
 		.otp_exe_param = 0,
 		.channel_counters_freq_hz = 88000,
 		.max_probe_resp_desc_thres = 0,
+		.cal_data_len = 8124,
 		.fw = {
 			.dir = QCA9377_HW_1_0_FW_DIR,
-			.fw = QCA9377_HW_1_0_FW_FILE,
-			.otp = QCA9377_HW_1_0_OTP_FILE,
 			.board = QCA9377_HW_1_0_BOARD_DATA_FILE,
 			.board_size = QCA9377_BOARD_DATA_SZ,
 			.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
 		},
+		.hw_ops = &qca988x_ops,
+		.decap_align_bytes = 4,
 	},
 	{
 		.id = QCA9377_HW_1_1_DEV_VERSION,
@@ -187,14 +269,56 @@
 		.otp_exe_param = 0,
 		.channel_counters_freq_hz = 88000,
 		.max_probe_resp_desc_thres = 0,
+		.cal_data_len = 8124,
 		.fw = {
 			.dir = QCA9377_HW_1_0_FW_DIR,
-			.fw = QCA9377_HW_1_0_FW_FILE,
-			.otp = QCA9377_HW_1_0_OTP_FILE,
 			.board = QCA9377_HW_1_0_BOARD_DATA_FILE,
 			.board_size = QCA9377_BOARD_DATA_SZ,
 			.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
 		},
+		.hw_ops = &qca988x_ops,
+		.decap_align_bytes = 4,
+	},
+	{
+		.id = QCA4019_HW_1_0_DEV_VERSION,
+		.dev_id = 0,
+		.name = "qca4019 hw1.0",
+		.patch_load_addr = QCA4019_HW_1_0_PATCH_LOAD_ADDR,
+		.uart_pin = 7,
+		.cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH,
+		.otp_exe_param = 0x0010000,
+		.continuous_frag_desc = true,
+		.cck_rate_map_rev2 = true,
+		.channel_counters_freq_hz = 125000,
+		.max_probe_resp_desc_thres = 24,
+		.tx_chain_mask = 0x3,
+		.rx_chain_mask = 0x3,
+		.max_spatial_stream = 2,
+		.cal_data_len = 12064,
+		.fw = {
+			.dir = QCA4019_HW_1_0_FW_DIR,
+			.board = QCA4019_HW_1_0_BOARD_DATA_FILE,
+			.board_size = QCA4019_BOARD_DATA_SZ,
+			.board_ext_size = QCA4019_BOARD_EXT_DATA_SZ,
+		},
+		.sw_decrypt_mcast_mgmt = true,
+		.hw_ops = &qca99x0_ops,
+		.decap_align_bytes = 1,
+	},
+	{
+		.id = ATH10K_HW_WCN3990,
+		.dev_id = 0,
+		.name = "wcn3990 hw1.0",
+		.continuous_frag_desc = true,
+		.tx_chain_mask = 0x7,
+		.rx_chain_mask = 0x7,
+		.max_spatial_stream = 4,
+		.fw = {
+			.dir = WCN3990_HW_1_0_FW_DIR,
+		},
+		.sw_decrypt_mcast_mgmt = true,
+		.hw_ops = &wcn3990_ops,
+		.decap_align_bytes = 1,
 	},
 };
 
@@ -211,6 +335,10 @@
 	[ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT] = "skip-clock-init",
 	[ATH10K_FW_FEATURE_RAW_MODE_SUPPORT] = "raw-mode",
 	[ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA] = "adaptive-cca",
+	[ATH10K_FW_FEATURE_MFP_SUPPORT] = "mfp",
+	[ATH10K_FW_FEATURE_PEER_FLOW_CONTROL] = "peer-flow-ctrl",
+	[ATH10K_FW_FEATURE_BTCOEX_PARAM] = "btcoex-param",
+	[ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR] = "skip-null-func-war",
 };
 
 static unsigned int ath10k_core_get_fw_feature_str(char *buf,
@@ -237,7 +365,7 @@
 	int i;
 
 	for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) {
-		if (test_bit(i, ar->fw_features)) {
+		if (test_bit(i, ar->normal_mode_fw.fw_file.fw_features)) {
 			if (len > 0)
 				len += scnprintf(buf + len, buf_len - len, ",");
 
@@ -429,18 +557,18 @@
 	return ret;
 }
 
-static int ath10k_download_cal_file(struct ath10k *ar)
+static int ath10k_download_cal_file(struct ath10k *ar,
+				    const struct firmware *file)
 {
 	int ret;
 
-	if (!ar->cal_file)
+	if (!file)
 		return -ENOENT;
 
-	if (IS_ERR(ar->cal_file))
-		return PTR_ERR(ar->cal_file);
+	if (IS_ERR(file))
+		return PTR_ERR(file);
 
-	ret = ath10k_download_board_data(ar, ar->cal_file->data,
-					 ar->cal_file->size);
+	ret = ath10k_download_board_data(ar, file->data, file->size);
 	if (ret) {
 		ath10k_err(ar, "failed to download cal_file data: %d\n", ret);
 		return ret;
@@ -451,7 +579,7 @@
 	return 0;
 }
 
-static int ath10k_download_cal_dt(struct ath10k *ar)
+static int ath10k_download_cal_dt(struct ath10k *ar, const char *dt_name)
 {
 	struct device_node *node;
 	int data_len;
@@ -465,13 +593,12 @@
 		 */
 		return -ENOENT;
 
-	if (!of_get_property(node, "qcom,ath10k-calibration-data",
-			     &data_len)) {
+	if (!of_get_property(node, dt_name, &data_len)) {
 		/* The calibration data node is optional */
 		return -ENOENT;
 	}
 
-	if (data_len != QCA988X_CAL_DATA_LEN) {
+	if (data_len != ar->hw_params.cal_data_len) {
 		ath10k_warn(ar, "invalid calibration data length in DT: %d\n",
 			    data_len);
 		ret = -EMSGSIZE;
@@ -484,8 +611,7 @@
 		goto out;
 	}
 
-	ret = of_property_read_u8_array(node, "qcom,ath10k-calibration-data",
-					data, data_len);
+	ret = of_property_read_u8_array(node, dt_name, data, data_len);
 	if (ret) {
 		ath10k_warn(ar, "failed to read calibration data from DT: %d\n",
 			    ret);
@@ -508,6 +634,35 @@
 	return ret;
 }
 
+static int ath10k_download_cal_eeprom(struct ath10k *ar)
+{
+	size_t data_len;
+	void *data = NULL;
+	int ret;
+
+	ret = ath10k_hif_fetch_cal_eeprom(ar, &data, &data_len);
+	if (ret) {
+		if (ret != -EOPNOTSUPP)
+			ath10k_warn(ar, "failed to read calibration data from EEPROM: %d\n",
+				    ret);
+		goto out_free;
+	}
+
+	ret = ath10k_download_board_data(ar, data, data_len);
+	if (ret) {
+		ath10k_warn(ar, "failed to download calibration data from EEPROM: %d\n",
+			    ret);
+		goto out_free;
+	}
+
+	ret = 0;
+
+out_free:
+	kfree(data);
+
+	return ret;
+}
+
 static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
 {
 	u32 result, address;
@@ -516,7 +671,8 @@
 
 	address = ar->hw_params.patch_load_addr;
 
-	if (!ar->otp_data || !ar->otp_len) {
+	if (!ar->normal_mode_fw.fw_file.otp_data ||
+	    !ar->normal_mode_fw.fw_file.otp_len) {
 		ath10k_warn(ar,
 			    "failed to retrieve board id because of invalid otp\n");
 		return -ENODATA;
@@ -524,9 +680,11 @@
 
 	ath10k_dbg(ar, ATH10K_DBG_BOOT,
 		   "boot upload otp to 0x%x len %zd for board id\n",
-		   address, ar->otp_len);
+		   address, ar->normal_mode_fw.fw_file.otp_len);
 
-	ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
+	ret = ath10k_bmi_fast_download(ar, address,
+				       ar->normal_mode_fw.fw_file.otp_data,
+				       ar->normal_mode_fw.fw_file.otp_len);
 	if (ret) {
 		ath10k_err(ar, "could not write otp for board id check: %d\n",
 			   ret);
@@ -567,7 +725,9 @@
 	u32 bmi_otp_exe_param = ar->hw_params.otp_exe_param;
 	int ret;
 
-	ret = ath10k_download_board_data(ar, ar->board_data, ar->board_len);
+	ret = ath10k_download_board_data(ar,
+					 ar->running_fw->board_data,
+					 ar->running_fw->board_len);
 	if (ret) {
 		ath10k_err(ar, "failed to download board data: %d\n", ret);
 		return ret;
@@ -575,16 +735,20 @@
 
 	/* OTP is optional */
 
-	if (!ar->otp_data || !ar->otp_len) {
-		ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
-			    ar->otp_data, ar->otp_len);
+	if (!ar->running_fw->fw_file.otp_data ||
+	    !ar->running_fw->fw_file.otp_len) {
+		ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %pK otp_len %zd)!\n",
+			    ar->running_fw->fw_file.otp_data,
+			    ar->running_fw->fw_file.otp_len);
 		return 0;
 	}
 
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
-		   address, ar->otp_len);
+		   address, ar->running_fw->fw_file.otp_len);
 
-	ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
+	ret = ath10k_bmi_fast_download(ar, address,
+				       ar->running_fw->fw_file.otp_data,
+				       ar->running_fw->fw_file.otp_len);
 	if (ret) {
 		ath10k_err(ar, "could not write otp (%d)\n", ret);
 		return ret;
@@ -599,7 +763,7 @@
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
 
 	if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
-				   ar->fw_features)) &&
+				   ar->running_fw->fw_file.fw_features)) &&
 	    result != 0) {
 		ath10k_err(ar, "otp calibration failed: %d", result);
 		return -EINVAL;
@@ -608,46 +772,32 @@
 	return 0;
 }
 
-static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode)
+static int ath10k_download_fw(struct ath10k *ar)
 {
 	u32 address, data_len;
-	const char *mode_name;
 	const void *data;
 	int ret;
 
 	address = ar->hw_params.patch_load_addr;
 
-	switch (mode) {
-	case ATH10K_FIRMWARE_MODE_NORMAL:
-		data = ar->firmware_data;
-		data_len = ar->firmware_len;
-		mode_name = "normal";
-		ret = ath10k_swap_code_seg_configure(ar,
-						     ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW);
+	data = ar->running_fw->fw_file.firmware_data;
+	data_len = ar->running_fw->fw_file.firmware_len;
+
+	ret = ath10k_swap_code_seg_configure(ar, &ar->running_fw->fw_file);
 		if (ret) {
 			ath10k_err(ar, "failed to configure fw code swap: %d\n",
 				   ret);
 			return ret;
 		}
-		break;
-	case ATH10K_FIRMWARE_MODE_UTF:
-		data = ar->testmode.utf_firmware_data;
-		data_len = ar->testmode.utf_firmware_len;
-		mode_name = "utf";
-		break;
-	default:
-		ath10k_err(ar, "unknown firmware mode: %d\n", mode);
-		return -EINVAL;
-	}
 
 	ath10k_dbg(ar, ATH10K_DBG_BOOT,
-		   "boot uploading firmware image %p len %d mode %s\n",
-		   data, data_len, mode_name);
+		   "boot uploading firmware image %pK len %d\n",
+		   data, data_len);
 
 	ret = ath10k_bmi_fast_download(ar, address, data, data_len);
 	if (ret) {
-		ath10k_err(ar, "failed to download %s firmware: %d\n",
-			   mode_name, ret);
+		ath10k_err(ar, "failed to download firmware: %d\n",
+			   ret);
 		return ret;
 	}
 
@@ -656,42 +806,50 @@
 
 static void ath10k_core_free_board_files(struct ath10k *ar)
 {
-	if (!IS_ERR(ar->board))
-		release_firmware(ar->board);
+	if (!IS_ERR(ar->normal_mode_fw.board))
+		release_firmware(ar->normal_mode_fw.board);
 
-	ar->board = NULL;
-	ar->board_data = NULL;
-	ar->board_len = 0;
+	ar->normal_mode_fw.board = NULL;
+	ar->normal_mode_fw.board_data = NULL;
+	ar->normal_mode_fw.board_len = 0;
 }
 
 static void ath10k_core_free_firmware_files(struct ath10k *ar)
 {
-	if (!IS_ERR(ar->otp))
-		release_firmware(ar->otp);
-
-	if (!IS_ERR(ar->firmware))
-		release_firmware(ar->firmware);
+	if (!IS_ERR(ar->normal_mode_fw.fw_file.firmware))
+		release_firmware(ar->normal_mode_fw.fw_file.firmware);
 
 	if (!IS_ERR(ar->cal_file))
 		release_firmware(ar->cal_file);
 
-	ath10k_swap_code_seg_release(ar);
+	if (!IS_ERR(ar->pre_cal_file))
+		release_firmware(ar->pre_cal_file);
 
-	ar->otp = NULL;
-	ar->otp_data = NULL;
-	ar->otp_len = 0;
-
-	ar->firmware = NULL;
-	ar->firmware_data = NULL;
-	ar->firmware_len = 0;
+	ath10k_swap_code_seg_release(ar, &ar->normal_mode_fw.fw_file);
+
+	ar->normal_mode_fw.fw_file.otp_data = NULL;
+	ar->normal_mode_fw.fw_file.otp_len = 0;
+
+	ar->normal_mode_fw.fw_file.firmware = NULL;
+	ar->normal_mode_fw.fw_file.firmware_data = NULL;
+	ar->normal_mode_fw.fw_file.firmware_len = 0;
 
 	ar->cal_file = NULL;
+	ar->pre_cal_file = NULL;
 }
 
 static int ath10k_fetch_cal_file(struct ath10k *ar)
 {
 	char filename[100];
 
+	/* pre-cal-<bus>-<id>.bin */
+	scnprintf(filename, sizeof(filename), "pre-cal-%s-%s.bin",
+		  ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
+
+	ar->pre_cal_file = ath10k_fetch_fw_file(ar, ATH10K_FW_DIR, filename);
+	if (!IS_ERR(ar->pre_cal_file))
+		goto success;
+
 	/* cal-<bus>-<id>.bin */
 	scnprintf(filename, sizeof(filename), "cal-%s-%s.bin",
 		  ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
@@ -700,7 +858,7 @@
 	if (IS_ERR(ar->cal_file))
 		/* calibration file is optional, don't print any warnings */
 		return PTR_ERR(ar->cal_file);
-
+success:
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "found calibration file %s/%s\n",
 		   ATH10K_FW_DIR, filename);
 
@@ -714,14 +872,14 @@
 		return -EINVAL;
 	}
 
-	ar->board = ath10k_fetch_fw_file(ar,
+	ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar,
 					 ar->hw_params.fw.dir,
 					 ar->hw_params.fw.board);
-	if (IS_ERR(ar->board))
-		return PTR_ERR(ar->board);
+	if (IS_ERR(ar->normal_mode_fw.board))
+		return PTR_ERR(ar->normal_mode_fw.board);
 
-	ar->board_data = ar->board->data;
-	ar->board_len = ar->board->size;
+	ar->normal_mode_fw.board_data = ar->normal_mode_fw.board->data;
+	ar->normal_mode_fw.board_len = ar->normal_mode_fw.board->size;
 
 	return 0;
 }
@@ -781,8 +939,8 @@
 				   "boot found board data for '%s'",
 				   boardname);
 
-			ar->board_data = board_ie_data;
-			ar->board_len = board_ie_len;
+			ar->normal_mode_fw.board_data = board_ie_data;
+			ar->normal_mode_fw.board_len = board_ie_len;
 
 			ret = 0;
 			goto out;
@@ -815,12 +973,14 @@
 	const u8 *data;
 	int ret, ie_id;
 
-	ar->board = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, filename);
-	if (IS_ERR(ar->board))
-		return PTR_ERR(ar->board);
+	ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar,
+							ar->hw_params.fw.dir,
+							filename);
+	if (IS_ERR(ar->normal_mode_fw.board))
+		return PTR_ERR(ar->normal_mode_fw.board);
 
-	data = ar->board->data;
-	len = ar->board->size;
+	data = ar->normal_mode_fw.board->data;
+	len = ar->normal_mode_fw.board->size;
 
 	/* magic has extra null byte padded */
 	magic_len = strlen(ATH10K_BOARD_MAGIC) + 1;
@@ -887,10 +1047,10 @@
 	}
 
 out:
-	if (!ar->board_data || !ar->board_len) {
+	if (!ar->normal_mode_fw.board_data || !ar->normal_mode_fw.board_len) {
 		ath10k_err(ar,
 			   "failed to fetch board data for %s from %s/%s\n",
-			   ar->hw_params.fw.dir, boardname, filename);
+			   boardname, ar->hw_params.fw.dir, filename);
 		ret = -ENODATA;
 		goto err;
 	}
@@ -955,51 +1115,8 @@
 	return 0;
 }
 
-static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
-{
-	int ret = 0;
-
-	if (ar->hw_params.fw.fw == NULL) {
-		ath10k_err(ar, "firmware file not defined\n");
-		return -EINVAL;
-	}
-
-	ar->firmware = ath10k_fetch_fw_file(ar,
-					    ar->hw_params.fw.dir,
-					    ar->hw_params.fw.fw);
-	if (IS_ERR(ar->firmware)) {
-		ret = PTR_ERR(ar->firmware);
-		ath10k_err(ar, "could not fetch firmware (%d)\n", ret);
-		goto err;
-	}
-
-	ar->firmware_data = ar->firmware->data;
-	ar->firmware_len = ar->firmware->size;
-
-	/* OTP may be undefined. If so, don't fetch it at all */
-	if (ar->hw_params.fw.otp == NULL)
-		return 0;
-
-	ar->otp = ath10k_fetch_fw_file(ar,
-				       ar->hw_params.fw.dir,
-				       ar->hw_params.fw.otp);
-	if (IS_ERR(ar->otp)) {
-		ret = PTR_ERR(ar->otp);
-		ath10k_err(ar, "could not fetch otp (%d)\n", ret);
-		goto err;
-	}
-
-	ar->otp_data = ar->otp->data;
-	ar->otp_len = ar->otp->size;
-
-	return 0;
-
-err:
-	ath10k_core_free_firmware_files(ar);
-	return ret;
-}
-
-static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
+int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
+				     struct ath10k_fw_file *fw_file)
 {
 	size_t magic_len, len, ie_len;
 	int ie_id, i, index, bit, ret;
@@ -1008,15 +1125,17 @@
 	__le32 *timestamp, *version;
 
 	/* first fetch the firmware file (firmware-*.bin) */
-	ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name);
-	if (IS_ERR(ar->firmware)) {
+	fw_file->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
+						 name);
+	if (IS_ERR(fw_file->firmware)) {
 		ath10k_err(ar, "could not fetch firmware file '%s/%s': %ld\n",
-			   ar->hw_params.fw.dir, name, PTR_ERR(ar->firmware));
-		return PTR_ERR(ar->firmware);
+			   ar->hw_params.fw.dir, name,
+			   PTR_ERR(fw_file->firmware));
+		return PTR_ERR(fw_file->firmware);
 	}
 
-	data = ar->firmware->data;
-	len = ar->firmware->size;
+	data = fw_file->firmware->data;
+	len = fw_file->firmware->size;
 
 	/* magic also includes the null byte, check that as well */
 	magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
@@ -1059,15 +1178,15 @@
 
 		switch (ie_id) {
 		case ATH10K_FW_IE_FW_VERSION:
-			if (ie_len > sizeof(ar->hw->wiphy->fw_version) - 1)
+			if (ie_len > sizeof(fw_file->fw_version) - 1)
 				break;
 
-			memcpy(ar->hw->wiphy->fw_version, data, ie_len);
-			ar->hw->wiphy->fw_version[ie_len] = '\0';
+			memcpy(fw_file->fw_version, data, ie_len);
+			fw_file->fw_version[ie_len] = '\0';
 
 			ath10k_dbg(ar, ATH10K_DBG_BOOT,
 				   "found fw version %s\n",
-				    ar->hw->wiphy->fw_version);
+				    fw_file->fw_version);
 			break;
 		case ATH10K_FW_IE_TIMESTAMP:
 			if (ie_len != sizeof(u32))
@@ -1094,21 +1213,21 @@
 					ath10k_dbg(ar, ATH10K_DBG_BOOT,
 						   "Enabling feature bit: %i\n",
 						   i);
-					__set_bit(i, ar->fw_features);
+					__set_bit(i, fw_file->fw_features);
 				}
 			}
 
 			ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "",
-					ar->fw_features,
-					sizeof(ar->fw_features));
+					fw_file->fw_features,
+					sizeof(fw_file->fw_features));
 			break;
 		case ATH10K_FW_IE_FW_IMAGE:
 			ath10k_dbg(ar, ATH10K_DBG_BOOT,
 				   "found fw image ie (%zd B)\n",
 				   ie_len);
 
-			ar->firmware_data = data;
-			ar->firmware_len = ie_len;
+			fw_file->firmware_data = data;
+			fw_file->firmware_len = ie_len;
 
 			break;
 		case ATH10K_FW_IE_OTP_IMAGE:
@@ -1116,8 +1235,8 @@
 				   "found otp image ie (%zd B)\n",
 				   ie_len);
 
-			ar->otp_data = data;
-			ar->otp_len = ie_len;
+			fw_file->otp_data = data;
+			fw_file->otp_len = ie_len;
 
 			break;
 		case ATH10K_FW_IE_WMI_OP_VERSION:
@@ -1126,10 +1245,10 @@
 
 			version = (__le32 *)data;
 
-			ar->wmi.op_version = le32_to_cpup(version);
+			fw_file->wmi_op_version = le32_to_cpup(version);
 
 			ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie wmi op version %d\n",
-				   ar->wmi.op_version);
+				   fw_file->wmi_op_version);
 			break;
 		case ATH10K_FW_IE_HTT_OP_VERSION:
 			if (ie_len != sizeof(u32))
@@ -1137,17 +1256,17 @@
 
 			version = (__le32 *)data;
 
-			ar->htt.op_version = le32_to_cpup(version);
+			fw_file->htt_op_version = le32_to_cpup(version);
 
 			ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n",
-				   ar->htt.op_version);
+				   fw_file->htt_op_version);
 			break;
 		case ATH10K_FW_IE_FW_CODE_SWAP_IMAGE:
 			ath10k_dbg(ar, ATH10K_DBG_BOOT,
 				   "found fw code swap image ie (%zd B)\n",
 				   ie_len);
-			ar->swap.firmware_codeswap_data = data;
-			ar->swap.firmware_codeswap_len = ie_len;
+			fw_file->codeswap_data = data;
+			fw_file->codeswap_len = ie_len;
 			break;
 		default:
 			ath10k_warn(ar, "Unknown FW IE: %u\n",
@@ -1162,12 +1281,15 @@
 		data += ie_len;
 	}
 
-	if (!ar->firmware_data || !ar->firmware_len) {
+	if (ar->is_bmi) {
+		if (!fw_file->firmware_data ||
+		    !fw_file->firmware_len) {
 		ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
 			    ar->hw_params.fw.dir, name);
 		ret = -ENOMEDIUM;
 		goto err;
 	}
+	}
 
 	return 0;
 
@@ -1179,47 +1301,117 @@
 static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
 {
 	int ret;
+	struct ath10k_fw_file *fw_file;
 
+	if (!ar->is_bmi) {
+		fw_file = &ar->normal_mode_fw.fw_file;
+		fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_TLV;
+		fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
+		__set_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
+			  fw_file->fw_features);
+		__set_bit(WMI_SERVICE_WOW, ar->wmi.svc_map);
+		__set_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
+			  fw_file->fw_features);
+		return 0;
+	}
+
+	if (ar->is_bmi) {
 	/* calibration file is optional, don't check for any errors */
 	ath10k_fetch_cal_file(ar);
+	}
 
 	ar->fw_api = 5;
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
 
-	ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API5_FILE);
+	ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API5_FILE,
+					       &ar->normal_mode_fw.fw_file);
 	if (ret == 0)
 		goto success;
 
 	ar->fw_api = 4;
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
 
-	ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API4_FILE);
+	ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API4_FILE,
+					       &ar->normal_mode_fw.fw_file);
 	if (ret == 0)
 		goto success;
 
 	ar->fw_api = 3;
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
 
-	ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API3_FILE);
+	ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API3_FILE,
+					       &ar->normal_mode_fw.fw_file);
 	if (ret == 0)
 		goto success;
 
 	ar->fw_api = 2;
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
 
-	ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE);
-	if (ret == 0)
+	ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE,
+					       &ar->normal_mode_fw.fw_file);
+	if (ret)
+		return ret;
+
+success:
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
+
+	return 0;
+}
+
+static int ath10k_core_pre_cal_download(struct ath10k *ar)
+{
+	int ret;
+
+	ret = ath10k_download_cal_file(ar, ar->pre_cal_file);
+	if (ret == 0) {
+		ar->cal_mode = ATH10K_PRE_CAL_MODE_FILE;
 		goto success;
+	}
 
-	ar->fw_api = 1;
-	ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+	ath10k_dbg(ar, ATH10K_DBG_BOOT,
+		   "boot did not find a pre calibration file, try DT next: %d\n",
+		   ret);
 
-	ret = ath10k_core_fetch_firmware_api_1(ar);
-	if (ret)
+	ret = ath10k_download_cal_dt(ar, "qcom,ath10k-pre-calibration-data");
+	if (ret) {
+		ath10k_dbg(ar, ATH10K_DBG_BOOT,
+			   "unable to load pre cal data from DT: %d\n", ret);
 		return ret;
+	}
+	ar->cal_mode = ATH10K_PRE_CAL_MODE_DT;
 
 success:
-	ath10k_dbg(ar, ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using calibration mode %s\n",
+		   ath10k_cal_mode_str(ar->cal_mode));
+
+	return 0;
+}
+
+static int ath10k_core_pre_cal_config(struct ath10k *ar)
+{
+	int ret;
+
+	ret = ath10k_core_pre_cal_download(ar);
+	if (ret) {
+		ath10k_dbg(ar, ATH10K_DBG_BOOT,
+			   "failed to load pre cal data: %d\n", ret);
+		return ret;
+	}
+
+	ret = ath10k_core_get_board_id_from_otp(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to get board id: %d\n", ret);
+		return ret;
+	}
+
+	ret = ath10k_download_and_run_otp(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to run otp: %d\n", ret);
+		return ret;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT,
+		   "pre cal configuration done successfully\n");
 
 	return 0;
 }
@@ -1228,7 +1420,15 @@
 {
 	int ret;
 
-	ret = ath10k_download_cal_file(ar);
+	ret = ath10k_core_pre_cal_config(ar);
+	if (ret == 0)
+		return 0;
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT,
+		   "pre cal download procedure failed, try cal file: %d\n",
+		   ret);
+
+	ret = ath10k_download_cal_file(ar, ar->cal_file);
 	if (ret == 0) {
 		ar->cal_mode = ATH10K_CAL_MODE_FILE;
 		goto done;
@@ -1238,14 +1438,24 @@
 		   "boot did not find a calibration file, try DT next: %d\n",
 		   ret);
 
-	ret = ath10k_download_cal_dt(ar);
+	ret = ath10k_download_cal_dt(ar, "qcom,ath10k-calibration-data");
 	if (ret == 0) {
 		ar->cal_mode = ATH10K_CAL_MODE_DT;
 		goto done;
 	}
 
 	ath10k_dbg(ar, ATH10K_DBG_BOOT,
-		   "boot did not find DT entry, try OTP next: %d\n",
+		   "boot did not find DT entry, try target EEPROM next: %d\n",
+		   ret);
+
+	ret = ath10k_download_cal_eeprom(ar);
+	if (ret == 0) {
+		ar->cal_mode = ATH10K_CAL_MODE_EEPROM;
+		goto done;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT,
+		   "boot did not find target EEPROM entry, try OTP next: %d\n",
 		   ret);
 
 	ret = ath10k_download_and_run_otp(ar);
@@ -1342,13 +1552,15 @@
 
 	ieee80211_stop_queues(ar->hw);
 	ath10k_drain_tx(ar);
-	complete_all(&ar->scan.started);
-	complete_all(&ar->scan.completed);
-	complete_all(&ar->scan.on_channel);
-	complete_all(&ar->offchan_tx_completed);
-	complete_all(&ar->install_key_done);
-	complete_all(&ar->vdev_setup_done);
-	complete_all(&ar->thermal.wmi_sync);
+	complete(&ar->scan.started);
+	complete(&ar->scan.completed);
+	complete(&ar->scan.on_channel);
+	complete(&ar->offchan_tx_completed);
+	complete(&ar->install_key_done);
+	complete(&ar->vdev_setup_done);
+	complete(&ar->vdev_delete_done);
+	complete(&ar->thermal.wmi_sync);
+	complete(&ar->bss_survey_done);
 	wake_up(&ar->htt.empty_tx_wq);
 	wake_up(&ar->wmi.tx_credits_wq);
 	wake_up(&ar->peer_mapping_wq);
@@ -1386,15 +1598,17 @@
 
 static int ath10k_core_init_firmware_features(struct ath10k *ar)
 {
-	if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features) &&
-	    !test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+	struct ath10k_fw_file *fw_file = &ar->normal_mode_fw.fw_file;
+
+	if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, fw_file->fw_features) &&
+	    !test_bit(ATH10K_FW_FEATURE_WMI_10X, fw_file->fw_features)) {
 		ath10k_err(ar, "feature bits corrupted: 10.2 feature requires 10.x feature to be set as well");
 		return -EINVAL;
 	}
 
-	if (ar->wmi.op_version >= ATH10K_FW_WMI_OP_VERSION_MAX) {
+	if (fw_file->wmi_op_version >= ATH10K_FW_WMI_OP_VERSION_MAX) {
 		ath10k_err(ar, "unsupported WMI OP version (max %d): %d\n",
-			   ATH10K_FW_WMI_OP_VERSION_MAX, ar->wmi.op_version);
+			   ATH10K_FW_WMI_OP_VERSION_MAX, fw_file->wmi_op_version);
 		return -EINVAL;
 	}
 
@@ -1406,7 +1620,7 @@
 		break;
 	case ATH10K_CRYPT_MODE_SW:
 		if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT,
-			      ar->fw_features)) {
+			      fw_file->fw_features)) {
 			ath10k_err(ar, "cryptmode > 0 requires raw mode support from firmware");
 			return -EINVAL;
 		}
@@ -1425,7 +1639,7 @@
 
 	if (rawmode) {
 		if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT,
-			      ar->fw_features)) {
+			      fw_file->fw_features)) {
 			ath10k_err(ar, "rawmode = 1 requires support from firmware");
 			return -EINVAL;
 		}
@@ -1450,19 +1664,19 @@
 	/* Backwards compatibility for firmwares without
 	 * ATH10K_FW_IE_WMI_OP_VERSION.
 	 */
-	if (ar->wmi.op_version == ATH10K_FW_WMI_OP_VERSION_UNSET) {
-		if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+	if (fw_file->wmi_op_version == ATH10K_FW_WMI_OP_VERSION_UNSET) {
+		if (test_bit(ATH10K_FW_FEATURE_WMI_10X, fw_file->fw_features)) {
 			if (test_bit(ATH10K_FW_FEATURE_WMI_10_2,
-				     ar->fw_features))
-				ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_2;
+				     fw_file->fw_features))
+				fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_2;
 			else
-				ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
+				fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
 		} else {
-			ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_MAIN;
+			fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_MAIN;
 		}
 	}
 
-	switch (ar->wmi.op_version) {
+	switch (fw_file->wmi_op_version) {
 	case ATH10K_FW_WMI_OP_VERSION_MAIN:
 		ar->max_num_peers = TARGET_NUM_PEERS;
 		ar->max_num_stations = TARGET_NUM_STATIONS;
@@ -1475,8 +1689,13 @@
 	case ATH10K_FW_WMI_OP_VERSION_10_1:
 	case ATH10K_FW_WMI_OP_VERSION_10_2:
 	case ATH10K_FW_WMI_OP_VERSION_10_2_4:
+		if (ath10k_peer_stats_enabled(ar)) {
+			ar->max_num_peers = TARGET_10X_TX_STATS_NUM_PEERS;
+			ar->max_num_stations = TARGET_10X_TX_STATS_NUM_STATIONS;
+		} else {
 		ar->max_num_peers = TARGET_10X_NUM_PEERS;
 		ar->max_num_stations = TARGET_10X_NUM_STATIONS;
+		}
 		ar->max_num_vdevs = TARGET_10X_NUM_VDEVS;
 		ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
 		ar->fw_stats_req_mask = WMI_STAT_PEER;
@@ -1487,11 +1706,16 @@
 		ar->max_num_stations = TARGET_TLV_NUM_STATIONS;
 		ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS;
 		ar->max_num_tdls_vdevs = TARGET_TLV_NUM_TDLS_VDEVS;
+		if (QCA_REV_WCN3990(ar))
+			ar->htt.max_num_pending_tx =
+						TARGET_HL_1_0_NUM_MSDU_DESC;
+		else
 		ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC;
 		ar->wow.max_num_patterns = TARGET_TLV_NUM_WOW_PATTERNS;
 		ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
 			WMI_STAT_PEER;
 		ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
+		ar->wmi.mgmt_max_num_pending_tx = TARGET_TLV_MGMT_NUM_MSDU_DESC;
 		break;
 	case ATH10K_FW_WMI_OP_VERSION_10_4:
 		ar->max_num_peers = TARGET_10_4_NUM_PEERS;
@@ -1499,9 +1723,15 @@
 		ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS;
 		ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS;
 		ar->num_tids = TARGET_10_4_TGT_NUM_TIDS;
+		ar->fw_stats_req_mask = WMI_10_4_STAT_PEER |
+					WMI_10_4_STAT_PEER_EXTD;
+		ar->max_spatial_stream = ar->hw_params.max_spatial_stream;
+
+		if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+			     fw_file->fw_features))
+			ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC_PFC;
+		else
 		ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC;
-		ar->fw_stats_req_mask = WMI_STAT_PEER;
-		ar->max_spatial_stream = WMI_10_4_MAX_SPATIAL_STREAM;
 		break;
 	case ATH10K_FW_WMI_OP_VERSION_UNSET:
 	case ATH10K_FW_WMI_OP_VERSION_MAX:
@@ -1512,23 +1742,23 @@
 	/* Backwards compatibility for firmwares without
 	 * ATH10K_FW_IE_HTT_OP_VERSION.
 	 */
-	if (ar->htt.op_version == ATH10K_FW_HTT_OP_VERSION_UNSET) {
-		switch (ar->wmi.op_version) {
+	if (fw_file->htt_op_version == ATH10K_FW_HTT_OP_VERSION_UNSET) {
+		switch (fw_file->wmi_op_version) {
 		case ATH10K_FW_WMI_OP_VERSION_MAIN:
-			ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_MAIN;
+			fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_MAIN;
 			break;
 		case ATH10K_FW_WMI_OP_VERSION_10_1:
 		case ATH10K_FW_WMI_OP_VERSION_10_2:
 		case ATH10K_FW_WMI_OP_VERSION_10_2_4:
-			ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
+			fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
 			break;
 		case ATH10K_FW_WMI_OP_VERSION_TLV:
-			ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
+			fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
 			break;
 		case ATH10K_FW_WMI_OP_VERSION_10_4:
 		case ATH10K_FW_WMI_OP_VERSION_UNSET:
 		case ATH10K_FW_WMI_OP_VERSION_MAX:
-			WARN_ON(1);
+			ath10k_err(ar, "htt op version not found from fw meta data");
 			return -EINVAL;
 		}
 	}
@@ -1536,14 +1766,69 @@
 	return 0;
 }
 
-int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
+static int ath10k_core_reset_rx_filter(struct ath10k *ar)
+{
+	int ret;
+	int vdev_id;
+	int vdev_type;
+	int vdev_subtype;
+	const u8 *vdev_addr;
+
+	vdev_id = 0;
+	vdev_type = WMI_VDEV_TYPE_STA;
+	vdev_subtype = ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE);
+	vdev_addr = ar->mac_addr;
+
+	ret = ath10k_wmi_vdev_create(ar, vdev_id, vdev_type, vdev_subtype,
+				     vdev_addr);
+	if (ret) {
+		ath10k_err(ar, "failed to create dummy vdev: %d\n", ret);
+		return ret;
+	}
+
+	ret = ath10k_wmi_vdev_delete(ar, vdev_id);
+	if (ret) {
+		ath10k_err(ar, "failed to delete dummy vdev: %d\n", ret);
+		return ret;
+	}
+
+	/* WMI and HTT may use separate HIF pipes and are not guaranteed to be
+	 * serialized properly implicitly.
+	 *
+	 * Moreover (most) WMI commands have no explicit acknowledges. It is
+	 * possible to infer it implicitly by poking firmware with echo
+	 * command - getting a reply means all preceding comments have been
+	 * (mostly) processed.
+	 *
+	 * In case of vdev create/delete this is sufficient.
+	 *
+	 * Without this it's possible to end up with a race when HTT Rx ring is
+	 * started before vdev create/delete hack is complete allowing a short
+	 * window of opportunity to receive (and Tx ACK) a bunch of frames.
+	 */
+	ret = ath10k_wmi_barrier(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to ping firmware: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
+		      const struct ath10k_fw_components *fw)
 {
 	int status;
+	u32 val;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
 	clear_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
 
+	ar->running_fw = fw;
+
+	if (ar->is_bmi) {
+
 	ath10k_bmi_start(ar);
 
 	if (ath10k_init_configure_target(ar)) {
@@ -1558,26 +1843,28 @@
 	/* Some of of qca988x solutions are having global reset issue
 	 * during target initialization. Bypassing PLL setting before
 	 * downloading firmware and letting the SoC run on REF_CLK is
-	 * fixing the problem. Corresponding firmware change is also needed
-	 * to set the clock source once the target is initialized.
+		 * fixing the problem. Corresponding firmware change is also
+		 * needed to set the clock source once the target is
+		 * initialized.
 	 */
 	if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT,
-		     ar->fw_features)) {
+			     ar->running_fw->fw_file.fw_features)) {
 		status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1);
 		if (status) {
-			ath10k_err(ar, "could not write to skip_clock_init: %d\n",
+				ath10k_err(ar, "skip_clock_init failed: %d\n",
 				   status);
 			goto err;
 		}
 	}
 
-	status = ath10k_download_fw(ar, mode);
+		status = ath10k_download_fw(ar);
 	if (status)
 		goto err;
 
 	status = ath10k_init_uart(ar);
 	if (status)
 		goto err;
+	}
 
 	ar->htc.htc_ops.target_send_suspend_complete =
 		ath10k_send_suspend_complete;
@@ -1588,9 +1875,11 @@
 		goto err;
 	}
 
+	if (ar->is_bmi) {
 	status = ath10k_bmi_done(ar);
 	if (status)
 		goto err;
+	}
 
 	status = ath10k_wmi_attach(ar);
 	if (status) {
@@ -1648,6 +1937,12 @@
 		goto err_hif_stop;
 	}
 
+	status = ath10k_pktlog_connect(ar);
+	if (status) {
+		ath10k_err(ar, "could not connect pktlog: %d\n", status);
+		goto err_hif_stop;
+	}
+
 	status = ath10k_htc_start(&ar->htc);
 	if (status) {
 		ath10k_err(ar, "failed to start htc: %d\n", status);
@@ -1665,6 +1960,33 @@
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n",
 		   ar->hw->wiphy->fw_version);
 
+	if (test_bit(WMI_SERVICE_EXT_RES_CFG_SUPPORT, ar->wmi.svc_map)) {
+		val = 0;
+		if (ath10k_peer_stats_enabled(ar))
+			val = WMI_10_4_PEER_STATS;
+
+		if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map))
+			val |= WMI_10_4_BSS_CHANNEL_INFO_64;
+
+		/* 10.4 firmware supports BT-Coex without reloading firmware
+		 * via pdev param. To support Bluetooth coexistence pdev param,
+		 * WMI_COEX_GPIO_SUPPORT of extended resource config should be
+		 * enabled always.
+		 */
+		if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) &&
+		    test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
+			     ar->running_fw->fw_file.fw_features))
+			val |= WMI_10_4_COEX_GPIO_SUPPORT;
+
+		status = ath10k_mac_ext_resource_config(ar, val);
+		if (status) {
+			ath10k_err(ar,
+				   "failed to send ext resource cfg command : %d\n",
+				   status);
+			goto err_hif_stop;
+		}
+	}
+
 	status = ath10k_wmi_cmd_init(ar);
 	if (status) {
 		ath10k_err(ar, "could not send WMI init command (%d)\n",
@@ -1678,12 +2000,37 @@
 		goto err_hif_stop;
 	}
 
+	/* Some firmware revisions do not properly set up hardware rx filter
+	 * registers.
+	 *
+	 * A known example from QCA9880 and 10.2.4 is that MAC_PCU_ADDR1_MASK
+	 * is filled with 0s instead of 1s allowing HW to respond with ACKs to
+	 * any frames that matches MAC_PCU_RX_FILTER which is also
+	 * misconfigured to accept anything.
+	 *
+	 * The ADDR1 is programmed using internal firmware structure field and
+	 * can't be (easily/sanely) reached from the driver explicitly. It is
+	 * possible to implicitly make it correct by creating a dummy vdev and
+	 * then deleting it.
+	 */
+	if (!QCA_REV_WCN3990(ar)) {
+		status = ath10k_core_reset_rx_filter(ar);
+		if (status) {
+			ath10k_err(ar, "failed to reset rx filter: %d\n",
+				   status);
+			goto err_hif_stop;
+		}
+	}
+
 	status = ath10k_htt_rx_ring_refill(ar);
 	if (status) {
 		ath10k_err(ar, "failed to refill htt rx ring: %d\n", status);
 		goto err_hif_stop;
 	}
 
+	if (ar->max_num_vdevs >= 64)
+		ar->free_vdev_map = 0xFFFFFFFFFFFFFFFFLL;
+	else
 	ar->free_vdev_map = (1LL << ar->max_num_vdevs) - 1;
 
 	INIT_LIST_HEAD(&ar->arvifs);
@@ -1747,7 +2094,7 @@
 	/* try to suspend target */
 	if (ar->state != ATH10K_STATE_RESTARTING &&
 	    ar->state != ATH10K_STATE_UTF)
-		ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR);
+		ath10k_wait_for_suspend(ar, ar->hw_values->pdev_suspend_option);
 
 	ath10k_hif_stop(ar);
 	ath10k_htt_tx_free(&ar->htt);
@@ -1771,15 +2118,16 @@
 		return ret;
 	}
 
+	if (ar->is_bmi) {
 	memset(&target_info, 0, sizeof(target_info));
 	ret = ath10k_bmi_get_target_info(ar, &target_info);
 	if (ret) {
 		ath10k_err(ar, "could not get target info (%d)\n", ret);
 		goto err_power_down;
 	}
-
 	ar->target_version = target_info.version;
 	ar->hw->wiphy->hw_version = target_info.version;
+	}
 
 	ret = ath10k_init_hw_params(ar);
 	if (ret) {
@@ -1793,11 +2141,28 @@
 		goto err_power_down;
 	}
 
+	BUILD_BUG_ON(sizeof(ar->hw->wiphy->fw_version) !=
+			sizeof(ar->normal_mode_fw.fw_file.fw_version));
+	memcpy(ar->hw->wiphy->fw_version,
+	       ar->normal_mode_fw.fw_file.fw_version,
+	       sizeof(ar->hw->wiphy->fw_version));
+	ath10k_debug_print_hwfw_info(ar);
+
+	if (ar->is_bmi) {
+		ret = ath10k_core_pre_cal_download(ar);
+		if (ret) {
+			/* pre calibration data download is not necessary
+			 * for all the chipsets. Ignore failures and continue.
+			 */
+			ath10k_dbg(ar, ATH10K_DBG_BOOT,
+				   "could not load pre cal data: %d\n", ret);
+		}
+
 	ret = ath10k_core_get_board_id_from_otp(ar);
 	if (ret && ret != -EOPNOTSUPP) {
-		ath10k_err(ar, "failed to get board id from otp for qca99x0: %d\n",
+			ath10k_err(ar, "failed to get board id from otp: %d\n",
 			   ret);
-		return ret;
+			goto err_free_firmware_files;
 	}
 
 	ret = ath10k_core_fetch_board_file(ar);
@@ -1806,6 +2171,9 @@
 		goto err_free_firmware_files;
 	}
 
+		ath10k_debug_print_board_info(ar);
+	}
+
 	ret = ath10k_core_init_firmware_features(ar);
 	if (ret) {
 		ath10k_err(ar, "fatal problem with firmware features: %d\n",
@@ -1813,22 +2181,26 @@
 		goto err_free_firmware_files;
 	}
 
-	ret = ath10k_swap_code_seg_init(ar);
+	if (ar->is_bmi) {
+		ret = ath10k_swap_code_seg_init(ar,
+						&ar->normal_mode_fw.fw_file);
 	if (ret) {
-		ath10k_err(ar, "failed to initialize code swap segment: %d\n",
+			ath10k_err(ar, "failed to init code swap segment: %d\n",
 			   ret);
 		goto err_free_firmware_files;
 	}
+	}
 
 	mutex_lock(&ar->conf_mutex);
 
-	ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
+	ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL,
+				&ar->normal_mode_fw);
 	if (ret) {
 		ath10k_err(ar, "could not init core (%d)\n", ret);
 		goto err_unlock;
 	}
 
-	ath10k_print_driver_info(ar);
+	ath10k_debug_print_boot_info(ar);
 	ath10k_core_stop(ar);
 
 	mutex_unlock(&ar->conf_mutex);
@@ -1853,6 +2225,9 @@
 	struct ath10k *ar = container_of(work, struct ath10k, register_work);
 	int status;
 
+	/* peer stats are enabled by default */
+	set_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags);
+
 	status = ath10k_core_probe_fw(ar);
 	if (status) {
 		ath10k_err(ar, "could not probe fw (%d)\n", status);
@@ -1958,20 +2333,52 @@
 	ar->hif.ops = hif_ops;
 	ar->hif.bus = bus;
 
+	/* By default, assume bmi is set, as most of the existing
+	 * chip sets are based on this, set to false explicitly
+	 * when current chip set does not support.
+	 */
+	ar->is_bmi = true;
+
 	switch (hw_rev) {
 	case ATH10K_HW_QCA988X:
+	case ATH10K_HW_QCA9887:
 		ar->regs = &qca988x_regs;
+		ar->hw_ce_regs = &qcax_ce_regs;
 		ar->hw_values = &qca988x_values;
 		break;
 	case ATH10K_HW_QCA6174:
 	case ATH10K_HW_QCA9377:
 		ar->regs = &qca6174_regs;
+		ar->hw_ce_regs = &qcax_ce_regs;
 		ar->hw_values = &qca6174_values;
 		break;
 	case ATH10K_HW_QCA99X0:
+	case ATH10K_HW_QCA9984:
 		ar->regs = &qca99x0_regs;
+		ar->hw_ce_regs = &qcax_ce_regs;
 		ar->hw_values = &qca99x0_values;
 		break;
+	case ATH10K_HW_QCA9888:
+		ar->regs = &qca99x0_regs;
+		ar->hw_ce_regs = &qcax_ce_regs;
+		ar->hw_values = &qca9888_values;
+		break;
+	case ATH10K_HW_QCA4019:
+		ar->regs = &qca4019_regs;
+		ar->hw_ce_regs = &qcax_ce_regs;
+		ar->hw_values = &qca4019_values;
+		break;
+	case ATH10K_HW_WCN3990:
+		ar->regs = &wcn3990_regs;
+		ar->hw_ce_regs = &wcn3990_ce_regs;
+		ar->hw_values = &wcn3990_values;
+		/* WCN3990 chip set is non bmi based */
+		ar->is_bmi = false;
+		ar->fw_flags = &wcn3990_fw_flags;
+		ar->shadow_reg_value = &wcn3990_shadow_reg_value;
+		ar->shadow_reg_address = &wcn3990_shadow_reg_address;
+		ar->rri_on_ddr = true;
+		break;
 	default:
 		ath10k_err(ar, "unsupported core hardware revision %d\n",
 			   hw_rev);
@@ -1987,7 +2394,10 @@
 
 	init_completion(&ar->install_key_done);
 	init_completion(&ar->vdev_setup_done);
+	init_completion(&ar->vdev_delete_done);
 	init_completion(&ar->thermal.wmi_sync);
+	init_completion(&ar->bss_survey_done);
+	init_completion(&ar->peer_delete_done);
 
 	INIT_DELAYED_WORK(&ar->scan.timeout, ath10k_scan_timeout_work);
 
@@ -2001,7 +2411,10 @@
 
 	mutex_init(&ar->conf_mutex);
 	spin_lock_init(&ar->data_lock);
+	spin_lock_init(&ar->txqs_lock);
+	spin_lock_init(&ar->datapath_rx_stat_lock);
 
+	INIT_LIST_HEAD(&ar->txqs);
 	INIT_LIST_HEAD(&ar->peers);
 	init_waitqueue_head(&ar->peer_mapping_wq);
 	init_waitqueue_head(&ar->htt.empty_tx_wq);
@@ -2017,6 +2430,8 @@
 	INIT_WORK(&ar->register_work, ath10k_core_register_work);
 	INIT_WORK(&ar->restart_work, ath10k_core_restart);
 
+	init_dummy_netdev(&ar->napi_dev);
+
 	ret = ath10k_debug_create(ar);
 	if (ret)
 		goto err_free_aux_wq;
@@ -2050,5 +2465,5 @@
 EXPORT_SYMBOL(ath10k_core_destroy);
 
 MODULE_AUTHOR("Qualcomm Atheros");
-MODULE_DESCRIPTION("Core module for QCA988X PCIe devices.");
+MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11ac wireless LAN cards.");
 MODULE_LICENSE("Dual BSD/GPL");
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/core.h linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/core.h
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/core.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/core.h	2019-01-22 16:16:25.415263721 +0100
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2013, 2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -24,6 +24,8 @@
 #include <linux/pci.h>
 #include <linux/uuid.h>
 #include <linux/time.h>
+#include <linux/inetdevice.h>
+#include <soc/qcom/socinfo.h>
 
 #include "htt.h"
 #include "htc.h"
@@ -65,10 +67,30 @@
 #define ATH10K_KEEPALIVE_MAX_IDLE 3895
 #define ATH10K_KEEPALIVE_MAX_UNRESPONSIVE 3900
 
+/* NAPI poll budget */
+#define ATH10K_NAPI_BUDGET      64
+#define ATH10K_NAPI_QUOTA_LIMIT 60
+
+#define ATH10K_RX_MCS_MIN           0
+#define ATH10K_RX_HT_MCS_MAX        32
+#define ATH10K_RX_VHT_RATEIDX_MAX   9
+#define ATH10K_RX_VHT_MCS_MAX       20  /* For 2x2 */
+#define ATH10K_RX_NSS_MIN           0
+#define ATH10K_RX_NSS_MAX           5
+
+enum ath10k_datapath_rx_band {
+	ATH10K_BAND_MIN,
+	ATH10K_BAND_2GHZ = ATH10K_BAND_MIN,
+	ATH10K_BAND_5GHZ,
+	ATH10K_BAND_MAX,
+};
+
 struct ath10k;
 
 enum ath10k_bus {
 	ATH10K_BUS_PCI,
+	ATH10K_BUS_AHB,
+	ATH10K_BUS_SNOC,
 };
 
 static inline const char *ath10k_bus_str(enum ath10k_bus bus)
@@ -76,31 +98,30 @@
 	switch (bus) {
 	case ATH10K_BUS_PCI:
 		return "pci";
+	case ATH10K_BUS_AHB:
+		return "ahb";
+	case ATH10K_BUS_SNOC:
+		return "snoc";
 	}
 
 	return "unknown";
 }
 
+enum ath10k_skb_flags {
+	ATH10K_SKB_F_NO_HWCRYPT = BIT(0),
+	ATH10K_SKB_F_DTIM_ZERO = BIT(1),
+	ATH10K_SKB_F_DELIVER_CAB = BIT(2),
+	ATH10K_SKB_F_MGMT = BIT(3),
+	ATH10K_SKB_F_QOS = BIT(4),
+};
+
 struct ath10k_skb_cb {
 	dma_addr_t paddr;
+	u8 flags;
 	u8 eid;
-	u8 vdev_id;
-	enum ath10k_hw_txrx_mode txmode;
-	bool is_protected;
-
-	struct {
-		u8 tid;
-		u16 freq;
-		bool is_offchan;
-		bool nohwcrypt;
-		struct ath10k_htt_txbuf *txbuf;
-		u32 txbuf_paddr;
-	} __packed htt;
-
-	struct {
-		bool dtim_zero;
-		bool deliver_cab;
-	} bcn;
+	u16 msdu_id;
+	struct ieee80211_vif *vif;
+	struct ieee80211_txq *txq;
 } __packed;
 
 struct ath10k_skb_rxcb {
@@ -141,17 +162,22 @@
 };
 
 struct ath10k_wmi {
-	enum ath10k_fw_wmi_op_version op_version;
 	enum ath10k_htc_ep_id eid;
 	struct completion service_ready;
 	struct completion unified_ready;
+	struct completion barrier;
 	wait_queue_head_t tx_credits_wq;
 	DECLARE_BITMAP(svc_map, WMI_SERVICE_MAX);
 	struct wmi_cmd_map *cmd;
 	struct wmi_vdev_param_map *vdev_param;
 	struct wmi_pdev_param_map *pdev_param;
 	const struct wmi_ops *ops;
+	const struct wmi_peer_flags_map *peer_flags;
 
+	u32 mgmt_max_num_pending_tx;
+	struct idr mgmt_pending_tx;
+	/* Protects access to mgmt_pending_tx, mgmt_max_num_pending_tx */
+	spinlock_t mgmt_tx_lock;
 	u32 num_mem_chunks;
 	u32 rx_decap_mode;
 	struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
@@ -164,6 +190,14 @@
 	u32 peer_rssi;
 	u32 peer_tx_rate;
 	u32 peer_rx_rate; /* 10x only */
+	u32 rx_duration;
+};
+
+struct ath10k_fw_extd_stats_peer {
+	struct list_head list;
+
+	u8 peer_macaddr[ETH_ALEN];
+	u32 rx_duration;
 };
 
 struct ath10k_fw_stats_vdev {
@@ -190,10 +224,10 @@
 
 	/* PDEV stats */
 	s32 ch_noise_floor;
-	u32 tx_frame_count;
-	u32 rx_frame_count;
-	u32 rx_clear_count;
-	u32 cycle_count;
+	u32 tx_frame_count; /* Cycles spent transmitting frames */
+	u32 rx_frame_count; /* Cycles spent receiving frames */
+	u32 rx_clear_count; /* Total channel busy time, evidently */
+	u32 cycle_count; /* Total on-channel time */
 	u32 phy_err_count;
 	u32 chan_tx_power;
 	u32 ack_rx_bad;
@@ -257,9 +291,11 @@
 };
 
 struct ath10k_fw_stats {
+	bool extended;
 	struct list_head pdevs;
 	struct list_head vdevs;
 	struct list_head peers;
+	struct list_head peers_extd;
 };
 
 #define ATH10K_TPC_TABLE_TYPE_FLAG	1
@@ -298,6 +334,9 @@
 
 struct ath10k_peer {
 	struct list_head list;
+	struct ieee80211_vif *vif;
+	struct ieee80211_sta *sta;
+
 	int vdev_id;
 	u8 addr[ETH_ALEN];
 	DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS);
@@ -306,6 +345,12 @@
 	struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
 };
 
+struct ath10k_txq {
+	struct list_head list;
+	unsigned long num_fw_queued;
+	unsigned long num_push_allowed;
+};
+
 struct ath10k_sta {
 	struct ath10k_vif *arvif;
 
@@ -314,16 +359,19 @@
 	u32 bw;
 	u32 nss;
 	u32 smps;
+	u16 peer_id;
 
 	struct work_struct update_wk;
 
 #ifdef CONFIG_MAC80211_DEBUGFS
 	/* protected by conf_mutex */
 	bool aggr_mode;
+	u64 rx_duration;
 #endif
 };
 
 #define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
+#define ATH10K_VDEV_DELETE_TIMEOUT_HZ	(5 * HZ)
 
 enum ath10k_beacon_state {
 	ATH10K_BEACON_SCHEDULED = 0,
@@ -335,6 +383,7 @@
 	struct list_head list;
 
 	u32 vdev_id;
+	u16 peer_id;
 	enum wmi_vdev_type vdev_type;
 	enum wmi_vdev_subtype vdev_subtype;
 	u32 beacon_interval;
@@ -386,6 +435,9 @@
 	struct work_struct ap_csa_work;
 	struct delayed_work connection_loss_work;
 	struct cfg80211_bitrate_mask bitrate_mask;
+	struct wmi_ns_arp_offload_req arp_offload;
+	struct wmi_ns_arp_offload_req ns_offload;
+	struct wmi_gtk_rekey_data gtk_rekey_data;
 };
 
 struct ath10k_vif_iter {
@@ -420,11 +472,13 @@
 	struct completion tpc_complete;
 
 	/* protected by conf_mutex */
-	u32 fw_dbglog_mask;
+	u64 fw_dbglog_mask;
 	u32 fw_dbglog_level;
 	u32 pktlog_filter;
+	enum ath10k_htc_ep_id eid;
 	u32 reg_addr;
 	u32 nf_cal_period;
+	void *cal_data;
 
 	struct ath10k_fw_crash_data *fw_crash_data;
 };
@@ -512,6 +566,32 @@
 	/* Firmware Supports Adaptive CCA*/
 	ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA = 11,
 
+	/* Firmware supports management frame protection */
+	ATH10K_FW_FEATURE_MFP_SUPPORT = 12,
+
+	/* Firmware supports pull-push model where host shares it's software
+	 * queue state with firmware and firmware generates fetch requests
+	 * telling host which queues to dequeue tx from.
+	 *
+	 * Primary function of this is improved MU-MIMO performance with
+	 * multiple clients.
+	 */
+	ATH10K_FW_FEATURE_PEER_FLOW_CONTROL = 13,
+
+	/* Firmware supports BT-Coex without reloading firmware via pdev param.
+	 * To support Bluetooth coexistence pdev param, WMI_COEX_GPIO_SUPPORT of
+	 * extended resource config should be enabled always. This firmware IE
+	 * is used to configure WMI_COEX_GPIO_SUPPORT.
+	 */
+	ATH10K_FW_FEATURE_BTCOEX_PARAM = 14,
+
+	/* Older firmware with HTT delivers incorrect tx status for null func
+	 * frames to driver, but this fixed in 10.2 and 10.4 firmware versions.
+	 * Also this workaround results in reporting of incorrect null func
+	 * status for 10.4. This flag is used to skip the workaround.
+	 */
+	ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR = 15,
+
 	/* keep last */
 	ATH10K_FW_FEATURE_COUNT,
 };
@@ -534,12 +614,21 @@
 
 	/* Disable HW crypto engine */
 	ATH10K_FLAG_HW_CRYPTO_DISABLED,
+
+	/* Bluetooth coexistance enabled */
+	ATH10K_FLAG_BTCOEX,
+
+	/* Per Station statistics service */
+	ATH10K_FLAG_PEER_STATS,
 };
 
 enum ath10k_cal_mode {
 	ATH10K_CAL_MODE_FILE,
 	ATH10K_CAL_MODE_OTP,
 	ATH10K_CAL_MODE_DT,
+	ATH10K_PRE_CAL_MODE_FILE,
+	ATH10K_PRE_CAL_MODE_DT,
+	ATH10K_CAL_MODE_EEPROM,
 };
 
 enum ath10k_crypt_mode {
@@ -558,6 +647,12 @@
 		return "otp";
 	case ATH10K_CAL_MODE_DT:
 		return "dt";
+	case ATH10K_PRE_CAL_MODE_FILE:
+		return "pre-cal-file";
+	case ATH10K_PRE_CAL_MODE_DT:
+		return "pre-cal-dt";
+	case ATH10K_CAL_MODE_EEPROM:
+		return "eeprom";
 	}
 
 	return "unknown";
@@ -591,11 +686,68 @@
 	ATH10K_TX_PAUSE_MAX,
 };
 
+struct fw_flag {
+	u32 flags;
+};
+
+struct ath10k_fw_file {
+	const struct firmware *firmware;
+
+	char fw_version[ETHTOOL_FWVERS_LEN];
+
+	DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
+
+	enum ath10k_fw_wmi_op_version wmi_op_version;
+	enum ath10k_fw_htt_op_version htt_op_version;
+
+	const void *firmware_data;
+	size_t firmware_len;
+
+	const void *otp_data;
+	size_t otp_len;
+
+	const void *codeswap_data;
+	size_t codeswap_len;
+
+	/* The original idea of struct ath10k_fw_file was that it only
+	 * contains struct firmware and pointers to various parts (actual
+	 * firmware binary, otp, metadata etc) of the file. This seg_info
+	 * is actually created separate but as this is used similarly as
+	 * the other firmware components it's more convenient to have it
+	 * here.
+	 */
+	struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
+};
+
+struct ath10k_fw_components {
+	const struct firmware *board;
+	const void *board_data;
+	size_t board_len;
+
+	struct ath10k_fw_file fw_file;
+};
+
+struct datapath_rx_stats {
+	u32 no_of_packets;
+	u32 short_gi_pkts;
+	u32 ht_rate_indx[ATH10K_RX_HT_MCS_MAX + 1];
+	u32 vht_rate_indx[ATH10K_RX_VHT_MCS_MAX + 1];
+	u32 ht_rate_packets;
+	u32 vht_rate_packets;
+	u32 legacy_pkt;
+	u32 nss[ATH10K_RX_NSS_MAX + 1];
+	u32 num_pkts_40Mhz;
+	u32 num_pkts_80Mhz;
+	u32 band[ATH10K_BAND_MAX + 1];
+};
+
 struct ath10k {
 	struct ath_common ath_common;
 	struct ieee80211_hw *hw;
+	struct ieee80211_ops *ops;
 	struct device *dev;
 	u8 mac_addr[ETH_ALEN];
+	u8 base_mac_addr[ETH_ALEN];
 
 	enum ath10k_hw_rev hw_rev;
 	u16 dev_id;
@@ -615,9 +767,7 @@
 	u32 max_spatial_stream;
 	/* protected by conf_mutex */
 	bool ani_enabled;
-
-	DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
-
+	bool sifs_burst_enabled;
 	bool p2p;
 
 	struct {
@@ -628,71 +778,29 @@
 	struct completion target_suspend;
 
 	const struct ath10k_hw_regs *regs;
+	const struct ath10k_hw_ce_regs *hw_ce_regs;
 	const struct ath10k_hw_values *hw_values;
+	struct ath10k_shadow_reg_value *shadow_reg_value;
+	struct ath10k_shadow_reg_address *shadow_reg_address;
 	struct ath10k_bmi bmi;
 	struct ath10k_wmi wmi;
 	struct ath10k_htc htc;
 	struct ath10k_htt htt;
 
-	struct ath10k_hw_params {
-		u32 id;
-		u16 dev_id;
-		const char *name;
-		u32 patch_load_addr;
-		int uart_pin;
-		u32 otp_exe_param;
-
-		/* This is true if given HW chip has a quirky Cycle Counter
-		 * wraparound which resets to 0x7fffffff instead of 0. All
-		 * other CC related counters (e.g. Rx Clear Count) are divided
-		 * by 2 so they never wraparound themselves.
-		 */
-		bool has_shifted_cc_wraparound;
-
-		/* Some of chip expects fragment descriptor to be continuous
-		 * memory for any TX operation. Set continuous_frag_desc flag
-		 * for the hardware which have such requirement.
-		 */
-		bool continuous_frag_desc;
-
-		u32 channel_counters_freq_hz;
-
-		/* Mgmt tx descriptors threshold for limiting probe response
-		 * frames.
-		 */
-		u32 max_probe_resp_desc_thres;
-
-		struct ath10k_hw_params_fw {
-			const char *dir;
-			const char *fw;
-			const char *otp;
-			const char *board;
-			size_t board_size;
-			size_t board_ext_size;
-		} fw;
-	} hw_params;
-
-	const struct firmware *board;
-	const void *board_data;
-	size_t board_len;
+	struct ath10k_hw_params hw_params;
 
-	const struct firmware *otp;
-	const void *otp_data;
-	size_t otp_len;
+	/* contains the firmware images used with ATH10K_FIRMWARE_MODE_NORMAL */
+	struct ath10k_fw_components normal_mode_fw;
 
-	const struct firmware *firmware;
-	const void *firmware_data;
-	size_t firmware_len;
+	/* READ-ONLY images of the running firmware, which can be either
+	 * normal or UTF. Do not modify, release etc!
+	 */
+	const struct ath10k_fw_components *running_fw;
 
+	const struct firmware *pre_cal_file;
 	const struct firmware *cal_file;
 
 	struct {
-		const void *firmware_codeswap_data;
-		size_t firmware_codeswap_len;
-		struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
-	} swap;
-
-	struct {
 		u32 vendor;
 		u32 device;
 		u32 subsystem_vendor;
@@ -732,6 +840,9 @@
 	/* current operating channel definition */
 	struct cfg80211_chan_def chandef;
 
+	/* currently configured operating channel in firmware */
+	struct ieee80211_channel *tgt_oper_chan;
+
 	unsigned long long free_vdev_map;
 	struct ath10k_vif *monitor_arvif;
 	bool monitor;
@@ -752,6 +863,7 @@
 	struct completion install_key_done;
 
 	struct completion vdev_setup_done;
+	struct completion vdev_delete_done;
 
 	struct workqueue_struct *workqueue;
 	/* Auxiliary workqueue */
@@ -762,9 +874,13 @@
 
 	/* protects shared structure data */
 	spinlock_t data_lock;
+	/* protects: ar->txqs, artxq->list */
+	spinlock_t txqs_lock;
 
+	struct list_head txqs;
 	struct list_head arvifs;
 	struct list_head peers;
+	struct ath10k_peer *peer_map[ATH10K_MAX_NUM_PEER_IDS];
 	wait_queue_head_t peer_mapping_wq;
 
 	/* protected by conf_mutex */
@@ -807,6 +923,7 @@
 	 * avoid reporting garbage data.
 	 */
 	bool ch_info_can_report_survey;
+	struct completion bss_survey_done;
 
 	struct dfs_pattern_detector *dfs_detector;
 
@@ -814,8 +931,6 @@
 
 #ifdef CONFIG_ATH10K_DEBUGFS
 	struct ath10k_debug debug;
-#endif
-
 	struct {
 		/* relay(fs) channel for spectral scan */
 		struct rchan *rfs_chan_spec_scan;
@@ -824,16 +939,15 @@
 		enum ath10k_spectral_mode mode;
 		struct ath10k_spec_scan config;
 	} spectral;
+	struct datapath_rx_stats *rx_stats;
+#endif
+	/* prevent concurrency histogram for receiving data packet */
+	spinlock_t datapath_rx_stat_lock;
 
 	struct {
 		/* protected by conf_mutex */
-		const struct firmware *utf;
-		char utf_version[32];
-		const void *utf_firmware_data;
-		size_t utf_firmware_len;
-		DECLARE_BITMAP(orig_fw_features, ATH10K_FW_FEATURE_COUNT);
-		enum ath10k_fw_wmi_op_version orig_wmi_op_version;
-		enum ath10k_fw_wmi_op_version op_version;
+		struct ath10k_fw_components utf_mode_fw;
+
 		/* protected by data_lock */
 		bool utf_monitor;
 	} testmode;
@@ -848,10 +962,29 @@
 	struct ath10k_thermal thermal;
 	struct ath10k_wow wow;
 
+	/* NAPI */
+	struct net_device napi_dev;
+	struct napi_struct napi;
+
+	struct fw_flag *fw_flags;
+	/* set for bmi chip sets */
+	struct completion peer_delete_done;
+	bool is_bmi;
+	enum ieee80211_sta_state sta_state;
+	bool rri_on_ddr;
 	/* must be last */
 	u8 drv_priv[0] __aligned(sizeof(void *));
 };
 
+static inline bool ath10k_peer_stats_enabled(struct ath10k *ar)
+{
+	if (test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags) &&
+	    test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
+		return true;
+
+	return false;
+}
+
 struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
 				  enum ath10k_bus bus,
 				  enum ath10k_hw_rev hw_rev,
@@ -860,8 +993,11 @@
 void ath10k_core_get_fw_features_str(struct ath10k *ar,
 				     char *buf,
 				     size_t max_len);
+int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
+				     struct ath10k_fw_file *fw_file);
 
-int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode);
+int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
+		      const struct ath10k_fw_components *fw_components);
 int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);
 void ath10k_core_stop(struct ath10k *ar);
 int ath10k_core_register(struct ath10k *ar, u32 chip_id);
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/debug.c linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/debug.c
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/debug.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/debug.c	2019-10-29 09:26:24.457211144 +0100
@@ -19,15 +19,20 @@
 #include <linux/debugfs.h>
 #include <linux/vmalloc.h>
 #include <linux/utsname.h>
+#include <linux/crc32.h>
+#include <linux/firmware.h>
 
 #include "core.h"
 #include "debug.h"
 #include "hif.h"
+#include "htt.h"
 #include "wmi-ops.h"
 
 /* ms */
 #define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
 
+#define ATH10K_DEBUG_CAL_DATA_LEN 12064
+
 #define ATH10K_FW_CRASH_DUMP_VERSION 1
 
 /**
@@ -122,43 +127,73 @@
 }
 EXPORT_SYMBOL(ath10k_info);
 
-void ath10k_print_driver_info(struct ath10k *ar)
+void ath10k_debug_print_hwfw_info(struct ath10k *ar)
 {
+	const struct firmware *firmware;
 	char fw_features[128] = {};
-	char boardinfo[100];
+	u32 crc = 0;
 
 	ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features));
 
-	if (ar->id.bmi_ids_valid)
-		scnprintf(boardinfo, sizeof(boardinfo), "bmi %d:%d",
-			  ar->id.bmi_chip_id, ar->id.bmi_board_id);
-	else
-		scnprintf(boardinfo, sizeof(boardinfo), "sub %04x:%04x",
-			  ar->id.subsystem_vendor, ar->id.subsystem_device);
-
-	ath10k_info(ar, "%s (0x%08x, 0x%08x %s) fw %s fwapi %d bdapi %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d features %s\n",
+	ath10k_info(ar, "%s target 0x%08x chip_id 0x%08x sub %04x:%04x",
 		    ar->hw_params.name,
 		    ar->target_version,
 		    ar->chip_id,
-		    boardinfo,
+		    ar->id.subsystem_vendor, ar->id.subsystem_device);
+
+	ath10k_info(ar, "kconfig debug %d debugfs %d tracing %d dfs %d testmode %d\n",
+		    IS_ENABLED(CONFIG_ATH10K_DEBUG),
+		    IS_ENABLED(CONFIG_ATH10K_DEBUGFS),
+		    IS_ENABLED(CONFIG_ATH10K_TRACING),
+		    IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED),
+		    IS_ENABLED(CONFIG_NL80211_TESTMODE));
+
+	firmware = ar->normal_mode_fw.fw_file.firmware;
+	if (firmware)
+		crc = crc32_le(0, firmware->data, firmware->size);
+
+	ath10k_info(ar, "firmware ver %s api %d features %s crc32 %08x\n",
 		    ar->hw->wiphy->fw_version,
 		    ar->fw_api,
+		    fw_features,
+		    crc);
+}
+
+void ath10k_debug_print_board_info(struct ath10k *ar)
+{
+	char boardinfo[100];
+
+	if (ar->id.bmi_ids_valid)
+		scnprintf(boardinfo, sizeof(boardinfo), "%d:%d",
+			  ar->id.bmi_chip_id, ar->id.bmi_board_id);
+	else
+		scnprintf(boardinfo, sizeof(boardinfo), "N/A");
+
+	ath10k_info(ar, "board_file api %d bmi_id %s crc32 %08x",
 		    ar->bd_api,
+		    boardinfo,
+		    crc32_le(0, ar->normal_mode_fw.board->data,
+			     ar->normal_mode_fw.board->size));
+}
+
+void ath10k_debug_print_boot_info(struct ath10k *ar)
+{
+	ath10k_info(ar, "htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d\n",
 		    ar->htt.target_version_major,
 		    ar->htt.target_version_minor,
-		    ar->wmi.op_version,
-		    ar->htt.op_version,
+		    ar->normal_mode_fw.fw_file.wmi_op_version,
+		    ar->normal_mode_fw.fw_file.htt_op_version,
 		    ath10k_cal_mode_str(ar->cal_mode),
 		    ar->max_num_stations,
 		    test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags),
-		    !test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags),
-		    fw_features);
-	ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n",
-		    config_enabled(CONFIG_ATH10K_DEBUG),
-		    config_enabled(CONFIG_ATH10K_DEBUGFS),
-		    config_enabled(CONFIG_ATH10K_TRACING),
-		    config_enabled(CONFIG_ATH10K_DFS_CERTIFIED),
-		    config_enabled(CONFIG_NL80211_TESTMODE));
+		    !test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags));
+}
+
+void ath10k_print_driver_info(struct ath10k *ar)
+{
+	ath10k_debug_print_hwfw_info(ar);
+	ath10k_debug_print_board_info(ar);
+	ath10k_debug_print_boot_info(ar);
 }
 EXPORT_SYMBOL(ath10k_print_driver_info);
 
@@ -251,7 +286,7 @@
 	.llseek = default_llseek,
 };
 
-static void ath10k_debug_fw_stats_pdevs_free(struct list_head *head)
+static void ath10k_fw_stats_pdevs_free(struct list_head *head)
 {
 	struct ath10k_fw_stats_pdev *i, *tmp;
 
@@ -261,7 +296,7 @@
 	}
 }
 
-static void ath10k_debug_fw_stats_vdevs_free(struct list_head *head)
+static void ath10k_fw_stats_vdevs_free(struct list_head *head)
 {
 	struct ath10k_fw_stats_vdev *i, *tmp;
 
@@ -271,7 +306,7 @@
 	}
 }
 
-static void ath10k_debug_fw_stats_peers_free(struct list_head *head)
+static void ath10k_fw_stats_peers_free(struct list_head *head)
 {
 	struct ath10k_fw_stats_peer *i, *tmp;
 
@@ -281,13 +316,25 @@
 	}
 }
 
+static void ath10k_fw_extd_stats_peers_free(struct list_head *head)
+{
+	struct ath10k_fw_extd_stats_peer *i, *tmp;
+
+	list_for_each_entry_safe(i, tmp, head, list) {
+		list_del(&i->list);
+		kfree(i);
+	}
+}
+
 static void ath10k_debug_fw_stats_reset(struct ath10k *ar)
 {
 	spin_lock_bh(&ar->data_lock);
 	ar->debug.fw_stats_done = false;
-	ath10k_debug_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs);
-	ath10k_debug_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
-	ath10k_debug_fw_stats_peers_free(&ar->debug.fw_stats.peers);
+	ar->debug.fw_stats.extended = false;
+	ath10k_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs);
+	ath10k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
+	ath10k_fw_stats_peers_free(&ar->debug.fw_stats.peers);
+	ath10k_fw_extd_stats_peers_free(&ar->debug.fw_stats.peers_extd);
 	spin_unlock_bh(&ar->data_lock);
 }
 
@@ -302,6 +349,7 @@
 	INIT_LIST_HEAD(&stats.pdevs);
 	INIT_LIST_HEAD(&stats.vdevs);
 	INIT_LIST_HEAD(&stats.peers);
+	INIT_LIST_HEAD(&stats.peers_extd);
 
 	spin_lock_bh(&ar->data_lock);
 	ret = ath10k_wmi_pull_fw_stats(ar, skb, &stats);
@@ -321,9 +369,13 @@
 	 *  b) consume stat update events until another one with pdev stats is
 	 *     delivered which is treated as end-of-data and is itself discarded
 	 */
+	if (ath10k_peer_stats_enabled(ar))
+		ath10k_sta_update_rx_duration(ar, &stats);
 
 	if (ar->debug.fw_stats_done) {
+		if (!ath10k_peer_stats_enabled(ar))
 		ath10k_warn(ar, "received unsolicited stats update event\n");
+
 		goto free;
 	}
 
@@ -347,17 +399,21 @@
 			/* Although this is unlikely impose a sane limit to
 			 * prevent firmware from DoS-ing the host.
 			 */
+			ath10k_fw_stats_peers_free(&ar->debug.fw_stats.peers);
 			ath10k_warn(ar, "dropping fw peer stats\n");
 			goto free;
 		}
 
 		if (num_vdevs >= BITS_PER_LONG) {
+			ath10k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
 			ath10k_warn(ar, "dropping fw vdev stats\n");
 			goto free;
 		}
 
 		list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers);
 		list_splice_tail_init(&stats.vdevs, &ar->debug.fw_stats.vdevs);
+		list_splice_tail_init(&stats.peers_extd,
+				      &ar->debug.fw_stats.peers_extd);
 	}
 
 	complete(&ar->debug.fw_stats_complete);
@@ -366,9 +422,10 @@
 	/* In some cases lists have been spliced and cleared. Free up
 	 * resources if that is not the case.
 	 */
-	ath10k_debug_fw_stats_pdevs_free(&stats.pdevs);
-	ath10k_debug_fw_stats_vdevs_free(&stats.vdevs);
-	ath10k_debug_fw_stats_peers_free(&stats.peers);
+	ath10k_fw_stats_pdevs_free(&stats.pdevs);
+	ath10k_fw_stats_vdevs_free(&stats.vdevs);
+	ath10k_fw_stats_peers_free(&stats.peers);
+	ath10k_fw_extd_stats_peers_free(&stats.peers_extd);
 
 	spin_unlock_bh(&ar->data_lock);
 }
@@ -481,6 +538,230 @@
 	.llseek = default_llseek,
 };
 
+static inline int is_vht_rate_valid(u32 rate_indx)
+{
+	if ((rate_indx >= ATH10K_RX_MCS_MIN) &&
+	    (rate_indx <= ATH10K_RX_VHT_RATEIDX_MAX))
+		return 1;
+	else
+		return 0;
+}
+
+void fill_datapath_stats(struct ath10k *ar, struct ieee80211_rx_status *status)
+{
+	struct datapath_rx_stats *stat_cnt = ar->rx_stats;
+
+	spin_lock_bh(&ar->datapath_rx_stat_lock);
+
+	stat_cnt->no_of_packets += 1;
+	if (!(stat_cnt->no_of_packets)) {
+		memset(stat_cnt, 0, sizeof(*stat_cnt));
+		stat_cnt->no_of_packets += 1;
+	}
+
+	if (status->flag & RX_FLAG_SHORT_GI)
+		stat_cnt->short_gi_pkts += 1;
+
+	if ((status->vht_nss >= ATH10K_RX_NSS_MIN) &&
+	    (status->vht_nss < ATH10K_RX_NSS_MAX)) {
+		stat_cnt->nss[status->vht_nss] += 1;
+		if (status->flag & RX_FLAG_VHT) {
+			stat_cnt->vht_rate_packets += 1;
+			if (is_vht_rate_valid(status->rate_idx)) {
+				stat_cnt->vht_rate_indx[((status->vht_nss - 1) *
+				10) + status->rate_idx] += 1;
+			} else {
+			    /*if we get index other than (>=0 and <=9)*/
+			    stat_cnt->vht_rate_indx[ATH10K_RX_VHT_MCS_MAX] += 1;
+			}
+		} else if (status->flag & RX_FLAG_HT) {
+			stat_cnt->ht_rate_packets += 1;
+			if ((status->rate_idx >= ATH10K_RX_MCS_MIN) &&
+			    (status->rate_idx < ATH10K_RX_HT_MCS_MAX))
+				stat_cnt->ht_rate_indx[status->rate_idx] += 1;
+			else {
+			    /*if we get index other than (>=0 and <=31)*/
+			    stat_cnt->ht_rate_indx[ATH10K_RX_HT_MCS_MAX] += 1;
+			}
+		} else {
+			/* if pkt is other than HT and VHT */
+			stat_cnt->legacy_pkt += 1;
+		}
+	} else {
+		stat_cnt->nss[ATH10K_RX_NSS_MAX] += 1;
+	}
+
+	if (status->flag & RX_FLAG_40MHZ)
+		stat_cnt->num_pkts_40Mhz += 1;
+	if (status->vht_flag & RX_VHT_FLAG_80MHZ)
+		stat_cnt->num_pkts_80Mhz += 1;
+	if ((status->band >= ATH10K_BAND_MIN) &&
+	    (status->band < ATH10K_BAND_MAX)) {
+		stat_cnt->band[status->band] += 1;
+	} else {
+		/*if band is other than 0,1 */
+		stat_cnt->band[ATH10K_BAND_MAX] += 1;
+	}
+
+	spin_unlock_bh(&ar->datapath_rx_stat_lock);
+}
+
+size_t get_datapath_stat(char *buf, struct ath10k *ar)
+{
+	u8 i;
+	struct datapath_rx_stats *stat_cnt = ar->rx_stats;
+	size_t j = 0;
+
+	spin_lock(&ar->datapath_rx_stat_lock);
+
+	j = snprintf(buf, ATH10K_DATAPATH_BUF_SIZE, "\nNo of packets: %u\t"
+				 "No of short_gi packets: %u\n"
+				 "\nHT Packets: %u \t VHT Packets: %u\n"
+				 "\n40Mhz Packets: %u \t 80Mhz Packets: %u\n"
+				 "\n2.4GHz: %u \t 5GHz: %u \t band-error: %u\n\n",
+				 stat_cnt->no_of_packets,
+				 stat_cnt->short_gi_pkts,
+				 stat_cnt->ht_rate_packets,
+				 stat_cnt->vht_rate_packets,
+				 stat_cnt->num_pkts_40Mhz,
+				 stat_cnt->num_pkts_80Mhz,
+				 stat_cnt->band[ATH10K_BAND_2GHZ],
+				 stat_cnt->band[ATH10K_BAND_5GHZ],
+				 stat_cnt->band[ATH10K_BAND_MAX]);
+
+	for (i = 0; i <= ATH10K_RX_NSS_MAX; i++) {
+		j += snprintf(buf + j, (ATH10K_DATAPATH_BUF_SIZE - j),
+			      "NSS-%u: %u\t", i, stat_cnt->nss[i]);
+	}
+
+	j += snprintf(buf + j, (ATH10K_DATAPATH_BUF_SIZE - j),
+					"\n\n----HT Rate index------\n");
+
+	for (i = ATH10K_RX_MCS_MIN; i < ATH10K_RX_HT_MCS_MAX;
+		 i += 4) {
+		j += snprintf(buf + j, (ATH10K_DATAPATH_BUF_SIZE - j),
+			      "ht_rate_indx[%02u]: %10u\tht_rate_indx[%02u]: %10u\t"
+			      "ht_rate_indx[%02u]: %10u\tht_rate_indx[%02u]: %10u\n",
+			      i, stat_cnt->ht_rate_indx[i],
+			      i + 1, stat_cnt->ht_rate_indx[i + 1],
+			      i + 2, stat_cnt->ht_rate_indx[i + 2],
+			      i + 3, stat_cnt->ht_rate_indx[i + 3]);
+	}
+
+	j += snprintf(buf + j, (ATH10K_DATAPATH_BUF_SIZE - j),
+		  "ht_rate_indx[OOB]: %10u\n",
+		  stat_cnt->ht_rate_indx[ATH10K_RX_HT_MCS_MAX]);
+
+	j += snprintf(buf + j, (ATH10K_DATAPATH_BUF_SIZE - j),
+					"\n----VHT Rate index------\n");
+
+	for (i = ATH10K_RX_MCS_MIN;
+			i <= ATH10K_RX_VHT_RATEIDX_MAX; i++) {
+		j += snprintf(buf + j, (ATH10K_DATAPATH_BUF_SIZE - j),
+			      "vht_rate_indx[%02u]: %10u\tvht_rate_indx[%02u]: %10u\n",
+			       i, stat_cnt->vht_rate_indx[i],
+			       i + 10, stat_cnt->vht_rate_indx[i + 10]);
+	}
+
+	j += snprintf(buf + j, (ATH10K_DATAPATH_BUF_SIZE - j),
+			      "vht_rate_indx[%02u]: %10u\n",
+			       i + 10, stat_cnt->vht_rate_indx[i + 10]);
+
+	j += snprintf(buf + j, (ATH10K_DATAPATH_BUF_SIZE - j),
+					"\nnumber of pkt other than HT and VHT(legacy) : %u\n"
+					"----------------------\n",
+					stat_cnt->legacy_pkt);
+
+	spin_unlock(&ar->datapath_rx_stat_lock);
+
+	return j;
+}
+
+static int ath10k_datapath_stats_open(struct inode *inode, struct file *file)
+{
+	struct ath10k *ar = inode->i_private;
+	int ret;
+
+	spin_lock(&ar->datapath_rx_stat_lock);
+
+	if (ar->state != ATH10K_STATE_ON) {
+		ret = -ENETDOWN;
+		goto err_unlock;
+	}
+
+	file->private_data = ar;
+
+	spin_unlock(&ar->datapath_rx_stat_lock);
+	return 0;
+
+err_unlock:
+	spin_unlock(&ar->datapath_rx_stat_lock);
+	return ret;
+}
+
+static ssize_t ath10k_datapath_stats_read(struct file *file,
+					  char __user *user_buf,
+					  size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	size_t buf_len;
+	unsigned int ret;
+	void *buf = NULL;
+
+	buf = vmalloc(ATH10K_DATAPATH_BUF_SIZE);
+	if (!buf)
+		return 0;
+
+	buf_len = get_datapath_stat(buf, ar);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, buf_len);
+	vfree(buf);
+
+	return ret;
+}
+
+static ssize_t ath10k_datapath_stats_write(struct file *file,
+					   const char __user *ubuf,
+					   size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	u32 filter;
+	int ret;
+
+	if (kstrtouint_from_user(ubuf, count, 0, &filter))
+		return -EINVAL;
+
+	spin_lock(&ar->datapath_rx_stat_lock);
+
+	if (ar->state != ATH10K_STATE_ON) {
+		ret = count;
+		goto err_unlock;
+	}
+
+	if (!filter)
+		memset(ar->rx_stats, 0, sizeof(*ar->rx_stats));
+
+	ret = count;
+
+err_unlock:
+	spin_unlock(&ar->datapath_rx_stat_lock);
+	return ret;
+}
+
+static int ath10k_datapath_stats_release(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static const struct file_operations fops_datapath_stats = {
+	.open = ath10k_datapath_stats_open,
+	.read = ath10k_datapath_stats_read,
+	.write = ath10k_datapath_stats_write,
+	.release = ath10k_datapath_stats_release,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
 static ssize_t ath10k_debug_fw_reset_stats_read(struct file *file,
 						char __user *user_buf,
 						size_t count, loff_t *ppos)
@@ -571,25 +852,23 @@
 	char buf[32];
 	int ret;
 
-	mutex_lock(&ar->conf_mutex);
-
 	simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
 
 	/* make sure that buf is null terminated */
 	buf[sizeof(buf) - 1] = 0;
 
+	/* drop the possible '\n' from the end */
+	if (buf[count - 1] == '\n')
+		buf[count - 1] = 0;
+
+	mutex_lock(&ar->conf_mutex);
+
 	if (ar->state != ATH10K_STATE_ON &&
 	    ar->state != ATH10K_STATE_RESTARTED) {
 		ret = -ENETDOWN;
 		goto exit;
 	}
 
-	/* drop the possible '\n' from the end */
-	if (buf[count - 1] == '\n') {
-		buf[count - 1] = 0;
-		count--;
-	}
-
 	if (!strcmp(buf, "soft")) {
 		ath10k_info(ar, "simulating soft firmware crash\n");
 		ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
@@ -1114,7 +1393,7 @@
 {
 	struct ath10k *ar = file->private_data;
 	char buf[64];
-	u8 amsdu = 3, ampdu = 64;
+	u8 amsdu, ampdu;
 	unsigned int len;
 
 	mutex_lock(&ar->conf_mutex);
@@ -1176,9 +1455,9 @@
 {
 	struct ath10k *ar = file->private_data;
 	unsigned int len;
-	char buf[64];
+	char buf[96];
 
-	len = scnprintf(buf, sizeof(buf), "0x%08x %u\n",
+	len = scnprintf(buf, sizeof(buf), "0x%16llx %u\n",
 			ar->debug.fw_dbglog_mask, ar->debug.fw_dbglog_level);
 
 	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
@@ -1190,15 +1469,16 @@
 {
 	struct ath10k *ar = file->private_data;
 	int ret;
-	char buf[64];
-	unsigned int log_level, mask;
+	char buf[96];
+	unsigned int log_level;
+	u64 mask;
 
 	simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
 
 	/* make sure that buf is null terminated */
 	buf[sizeof(buf) - 1] = 0;
 
-	ret = sscanf(buf, "%x %u", &mask, &log_level);
+	ret = sscanf(buf, "%llx %u", &mask, &log_level);
 
 	if (!ret)
 		return -EINVAL;
@@ -1398,74 +1678,68 @@
 	.llseek = default_llseek,
 };
 
-static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
+static int ath10k_debug_cal_data_fetch(struct ath10k *ar)
 {
-	struct ath10k *ar = inode->i_private;
-	void *buf;
 	u32 hi_addr;
 	__le32 addr;
 	int ret;
 
-	mutex_lock(&ar->conf_mutex);
-
-	if (ar->state != ATH10K_STATE_ON &&
-	    ar->state != ATH10K_STATE_UTF) {
-		ret = -ENETDOWN;
-		goto err;
-	}
+	lockdep_assert_held(&ar->conf_mutex);
 
-	buf = vmalloc(QCA988X_CAL_DATA_LEN);
-	if (!buf) {
-		ret = -ENOMEM;
-		goto err;
-	}
+	if (WARN_ON(ar->hw_params.cal_data_len > ATH10K_DEBUG_CAL_DATA_LEN))
+		return -EINVAL;
 
 	hi_addr = host_interest_item_address(HI_ITEM(hi_board_data));
 
 	ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr));
 	if (ret) {
-		ath10k_warn(ar, "failed to read hi_board_data address: %d\n", ret);
-		goto err_vfree;
+		ath10k_warn(ar, "failed to read hi_board_data address: %d\n",
+			    ret);
+		return ret;
 	}
 
-	ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf,
-				   QCA988X_CAL_DATA_LEN);
+	ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), ar->debug.cal_data,
+				   ar->hw_params.cal_data_len);
 	if (ret) {
 		ath10k_warn(ar, "failed to read calibration data: %d\n", ret);
-		goto err_vfree;
+		return ret;
 	}
 
-	file->private_data = buf;
+	return 0;
+}
 
-	mutex_unlock(&ar->conf_mutex);
+static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
+{
+	struct ath10k *ar = inode->i_private;
 
-	return 0;
+	mutex_lock(&ar->conf_mutex);
 
-err_vfree:
-	vfree(buf);
+	if (ar->state == ATH10K_STATE_ON ||
+	    ar->state == ATH10K_STATE_UTF) {
+		ath10k_debug_cal_data_fetch(ar);
+	}
 
-err:
+	file->private_data = ar;
 	mutex_unlock(&ar->conf_mutex);
 
-	return ret;
+	return 0;
 }
 
 static ssize_t ath10k_debug_cal_data_read(struct file *file,
 					  char __user *user_buf,
 					  size_t count, loff_t *ppos)
 {
-	void *buf = file->private_data;
+	struct ath10k *ar = file->private_data;
 
-	return simple_read_from_buffer(user_buf, count, ppos,
-				       buf, QCA988X_CAL_DATA_LEN);
-}
+	mutex_lock(&ar->conf_mutex);
 
-static int ath10k_debug_cal_data_release(struct inode *inode,
-					 struct file *file)
-{
-	vfree(file->private_data);
+	count = simple_read_from_buffer(user_buf, count, ppos,
+					ar->debug.cal_data,
+					ar->hw_params.cal_data_len);
 
-	return 0;
+	mutex_unlock(&ar->conf_mutex);
+
+	return count;
 }
 
 static ssize_t ath10k_write_ani_enable(struct file *file,
@@ -1523,10 +1797,67 @@
 	.llseek = default_llseek,
 };
 
+static ssize_t ath10k_write_sifs_burst_enable(struct file *file,
+					      const char __user *user_buf,
+					      size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	int ret;
+	u8 enable;
+
+	if (kstrtou8_from_user(user_buf, count, 0, &enable))
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->sifs_burst_enabled == enable) {
+		ret = count;
+		goto exit;
+	}
+
+	ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->burst_enable,
+					enable);
+	if (ret) {
+		ath10k_warn(ar, "sifs_burst_enable failed: %d\n", ret);
+		goto exit;
+	}
+	ar->sifs_burst_enabled = enable;
+
+	ret = count;
+
+exit:
+	mutex_unlock(&ar->conf_mutex);
+
+	return ret;
+}
+
+static ssize_t ath10k_read_sifs_burst_enable(struct file *file,
+					     char __user *user_buf,
+					     size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	char buf[32];
+	int len;
+	bool ret = false;
+
+	if (ar->sifs_burst_enabled)
+		ret = true;
+
+	len = scnprintf(buf, sizeof(buf), "%d\n", ret);
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_sifs_burst_enable = {
+	.read = ath10k_read_sifs_burst_enable,
+	.write = ath10k_write_sifs_burst_enable,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
 static const struct file_operations fops_cal_data = {
 	.open = ath10k_debug_cal_data_open,
 	.read = ath10k_debug_cal_data_read,
-	.release = ath10k_debug_cal_data_release,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -1861,7 +2192,7 @@
 			ath10k_warn(ar, "failed to disable pktlog: %d\n", ret);
 	}
 
-	if (ar->debug.nf_cal_period) {
+	if (ar->debug.nf_cal_period && !QCA_REV_WCN3990(ar)) {
 		ret = ath10k_wmi_pdev_set_param(ar,
 						ar->wmi.pdev_param->cal_period,
 						ar->debug.nf_cal_period);
@@ -1878,6 +2209,9 @@
 {
 	lockdep_assert_held(&ar->conf_mutex);
 
+	if (!QCA_REV_WCN3990(ar))
+		ath10k_debug_cal_data_fetch(ar);
+
 	/* Must not use _sync to avoid deadlock, we do that in
 	 * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid
 	 * warning from del_timer(). */
@@ -2079,17 +2413,410 @@
 	.open = simple_open
 };
 
+static ssize_t ath10k_write_btcoex(struct file *file,
+				   const char __user *ubuf,
+				   size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	char buf[32];
+	size_t buf_size;
+	int ret;
+	bool val;
+	u32 pdev_param;
+
+	buf_size = min(count, (sizeof(buf) - 1));
+	if (copy_from_user(buf, ubuf, buf_size))
+		return -EFAULT;
+
+	buf[buf_size] = '\0';
+
+	if (strtobool(buf, &val) != 0)
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state != ATH10K_STATE_ON &&
+	    ar->state != ATH10K_STATE_RESTARTED) {
+		ret = -ENETDOWN;
+		goto exit;
+	}
+
+	if (!(test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) ^ val)) {
+		ret = count;
+		goto exit;
+	}
+
+	pdev_param = ar->wmi.pdev_param->enable_btcoex;
+	if (test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
+		     ar->running_fw->fw_file.fw_features)) {
+		ret = ath10k_wmi_pdev_set_param(ar, pdev_param, val);
+		if (ret) {
+			ath10k_warn(ar, "failed to enable btcoex: %d\n", ret);
+			ret = count;
+			goto exit;
+		}
+	} else {
+		ath10k_info(ar, "restarting firmware due to btcoex change");
+		queue_work(ar->workqueue, &ar->restart_work);
+	}
+
+	if (val)
+		set_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
+	else
+		clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
+
+	ret = count;
+
+exit:
+	mutex_unlock(&ar->conf_mutex);
+
+	return ret;
+}
+
+static ssize_t ath10k_read_btcoex(struct file *file, char __user *ubuf,
+				  size_t count, loff_t *ppos)
+{
+	char buf[32];
+	struct ath10k *ar = file->private_data;
+	int len = 0;
+
+	mutex_lock(&ar->conf_mutex);
+	len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+			test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags));
+	mutex_unlock(&ar->conf_mutex);
+
+	return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_btcoex = {
+	.read = ath10k_read_btcoex,
+	.write = ath10k_write_btcoex,
+	.open = simple_open
+};
+
+static ssize_t ath10k_write_peer_stats(struct file *file,
+				       const char __user *ubuf,
+				       size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	char buf[32];
+	size_t buf_size;
+	int ret;
+	bool val;
+
+	buf_size = min(count, (sizeof(buf) - 1));
+	if (copy_from_user(buf, ubuf, buf_size))
+		return -EFAULT;
+
+	buf[buf_size] = '\0';
+
+	if (strtobool(buf, &val) != 0)
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state != ATH10K_STATE_ON &&
+	    ar->state != ATH10K_STATE_RESTARTED) {
+		ret = -ENETDOWN;
+		goto exit;
+	}
+
+	if (!(test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags) ^ val)) {
+		ret = count;
+		goto exit;
+	}
+
+	if (val)
+		set_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags);
+	else
+		clear_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags);
+
+	ath10k_info(ar, "restarting firmware due to Peer stats change");
+
+	queue_work(ar->workqueue, &ar->restart_work);
+	ret = count;
+
+exit:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static ssize_t ath10k_read_peer_stats(struct file *file, char __user *ubuf,
+				      size_t count, loff_t *ppos)
+
+{
+	char buf[32];
+	struct ath10k *ar = file->private_data;
+	int len = 0;
+
+	mutex_lock(&ar->conf_mutex);
+	len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+			test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags));
+	mutex_unlock(&ar->conf_mutex);
+
+	return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_peer_stats = {
+	.read = ath10k_read_peer_stats,
+	.write = ath10k_write_peer_stats,
+	.open = simple_open
+};
+
+static ssize_t ath10k_debug_fw_checksums_read(struct file *file,
+					      char __user *user_buf,
+					      size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	unsigned int len = 0, buf_len = 4096;
+	ssize_t ret_cnt;
+	char *buf;
+
+	buf = kzalloc(buf_len, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	mutex_lock(&ar->conf_mutex);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "firmware-N.bin\t\t%08x\n",
+			 crc32_le(0, ar->normal_mode_fw.fw_file.firmware->data,
+				  ar->normal_mode_fw.fw_file.firmware->size));
+	len += scnprintf(buf + len, buf_len - len,
+			 "athwlan\t\t\t%08x\n",
+			 crc32_le(0, ar->normal_mode_fw.fw_file.firmware_data,
+				  ar->normal_mode_fw.fw_file.firmware_len));
+	len += scnprintf(buf + len, buf_len - len,
+			 "otp\t\t\t%08x\n",
+			 crc32_le(0, ar->normal_mode_fw.fw_file.otp_data,
+				  ar->normal_mode_fw.fw_file.otp_len));
+	len += scnprintf(buf + len, buf_len - len,
+			 "codeswap\t\t%08x\n",
+			 crc32_le(0, ar->normal_mode_fw.fw_file.codeswap_data,
+				  ar->normal_mode_fw.fw_file.codeswap_len));
+	len += scnprintf(buf + len, buf_len - len,
+			 "board-N.bin\t\t%08x\n",
+			 crc32_le(0, ar->normal_mode_fw.board->data,
+				  ar->normal_mode_fw.board->size));
+	len += scnprintf(buf + len, buf_len - len,
+			 "board\t\t\t%08x\n",
+			 crc32_le(0, ar->normal_mode_fw.board_data,
+				  ar->normal_mode_fw.board_len));
+
+	ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+	mutex_unlock(&ar->conf_mutex);
+
+	kfree(buf);
+	return ret_cnt;
+}
+
+static const struct file_operations fops_fw_checksums = {
+	.read = ath10k_debug_fw_checksums_read,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static struct txctl_frm_hdr frm_hdr;
+
+static void ath10k_extract_frame_header(u8 *addr1, u8 *addr2, u8 *addr3)
+{
+	frm_hdr.bssid_tail = (addr1[IEEE80211_ADDR_LEN - 2] << BITS_PER_BYTE)
+			      | (addr1[IEEE80211_ADDR_LEN - 1]);
+	frm_hdr.sa_tail = (addr2[IEEE80211_ADDR_LEN - 2] << BITS_PER_BYTE)
+			   | (addr2[IEEE80211_ADDR_LEN - 1]);
+	frm_hdr.da_tail = (addr3[IEEE80211_ADDR_LEN - 2] << BITS_PER_BYTE)
+			   | (addr3[IEEE80211_ADDR_LEN - 1]);
+}
+
+static void ath10k_process_ieee_hdr(void *data)
+{
+	u8 dir;
+	struct ieee80211_frame *wh;
+
+	if (!data)
+		return;
+
+	wh = (struct ieee80211_frame *)(data);
+	frm_hdr.framectrl = *(u_int16_t *)(wh->i_fc);
+	frm_hdr.seqctrl   = *(u_int16_t *)(wh->i_seq);
+	dir = (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK);
+
+	if (dir == IEEE80211_FC1_DIR_TODS)
+		ath10k_extract_frame_header(wh->i_addr1, wh->i_addr2,
+					    wh->i_addr3);
+	else if (dir == IEEE80211_FC1_DIR_FROMDS)
+		ath10k_extract_frame_header(wh->i_addr2, wh->i_addr3,
+					    wh->i_addr1);
+	else
+		ath10k_extract_frame_header(wh->i_addr3, wh->i_addr2,
+					    wh->i_addr1);
+}
+
+static void ath10k_pktlog_process_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct ath10k_pktlog_hdr *hdr = (void *)skb->data;
+	struct ath_pktlog_txctl pktlog_tx_ctrl;
+
+	switch (hdr->log_type) {
+	case ATH10K_PKTLOG_TYPE_TX_CTRL: {
+		spin_lock_bh(&ar->htt.tx_lock);
+
+		memcpy((void *)(&pktlog_tx_ctrl.hdr), (void *)hdr,
+		       sizeof(pktlog_tx_ctrl.hdr));
+		pktlog_tx_ctrl.frm_hdr = frm_hdr;
+		memcpy((void *)pktlog_tx_ctrl.txdesc_ctl, (void *)hdr->payload,
+		       __le16_to_cpu(hdr->size));
+		pktlog_tx_ctrl.hdr.size = sizeof(pktlog_tx_ctrl) -
+			sizeof(pktlog_tx_ctrl.hdr);
+
+		spin_unlock_bh(&ar->htt.tx_lock);
+
+		trace_ath10k_htt_pktlog(ar, (void *)&pktlog_tx_ctrl,
+					sizeof(pktlog_tx_ctrl));
+		break;
+		}
+	case ATH10K_PKTLOG_TYPE_TX_MSDU_ID:
+		break;
+	case ATH10K_PKTLOG_TYPE_TX_FRM_HDR: {
+		ath10k_process_ieee_hdr((void *)(hdr->payload));
+		trace_ath10k_htt_pktlog(ar, hdr, sizeof(*hdr) +
+					__le16_to_cpu(hdr->size));
+		break;
+		}
+	case ATH10K_PKTLOG_TYPE_RX_STAT:
+	case ATH10K_PKTLOG_TYPE_RC_FIND:
+	case ATH10K_PKTLOG_TYPE_RC_UPDATE:
+	case ATH10K_PKTLOG_TYPE_DBG_PRINT:
+	case ATH10K_PKTLOG_TYPE_TX_STAT:
+	case ATH10K_PKTLOG_TYPE_SW_EVENT:
+		trace_ath10k_htt_pktlog(ar, hdr, sizeof(*hdr) +
+					__le16_to_cpu(hdr->size));
+		break;
+	case ATH10K_PKTLOG_TYPE_TX_VIRT_ADDR: {
+		u32 desc_id = (u32)*((u32 *)(hdr->payload));
+		struct sk_buff *msdu;
+
+		spin_lock_bh(&ar->htt.tx_lock);
+		msdu = ath10k_htt_tx_find_msdu_by_id(&ar->htt, desc_id);
+
+		if (!msdu) {
+			ath10k_info(ar,
+				    "Failed to get msdu, id: %d\n",
+				    desc_id);
+			spin_unlock_bh(&ar->htt.tx_lock);
+			return;
+		}
+		ath10k_process_ieee_hdr((void *)msdu->data);
+		spin_unlock_bh(&ar->htt.tx_lock);
+		trace_ath10k_htt_pktlog(ar, hdr, sizeof(*hdr) +
+					__le16_to_cpu(hdr->size));
+		break;
+		}
+	}
+}
+
+int ath10k_rx_record_pktlog(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct sk_buff *pktlog_skb;
+	struct ath_pktlog_hdr *pl_hdr;
+	struct ath_pktlog_rx_info *pktlog_rx_info;
+	struct htt_rx_desc *rx_desc = (void *)skb->data - sizeof(*rx_desc);
+
+	if (!ar->debug.pktlog_filter)
+		return 0;
+
+	pktlog_skb = dev_alloc_skb(sizeof(struct ath_pktlog_hdr) +
+				   sizeof(struct htt_rx_desc) -
+				   sizeof(struct htt_host_fw_desc_base));
+	if (!pktlog_skb)
+		return -ENOMEM;
+
+	pktlog_rx_info = (struct ath_pktlog_rx_info *)pktlog_skb->data;
+	pl_hdr = &pktlog_rx_info->pl_hdr;
+
+	pl_hdr->flags = (1 << ATH10K_PKTLOG_FLG_FRM_TYPE_REMOTE_S);
+	pl_hdr->missed_cnt = 0;
+	pl_hdr->mac_id = 0;
+	pl_hdr->log_type = ATH10K_PKTLOG_TYPE_RX_STAT;
+	pl_hdr->flags |= ATH10K_PKTLOG_HDR_SIZE_16;
+	pl_hdr->size = sizeof(*rx_desc) -
+		       sizeof(struct htt_host_fw_desc_base);
+
+	pl_hdr->timestamp =
+	cpu_to_le32(rx_desc->ppdu_end.wcn3990.rx_pkt_end.phy_timestamp_1);
+
+	pl_hdr->type_specific_data = 0xDEADAA;
+	memcpy((void *)pktlog_rx_info + sizeof(struct ath_pktlog_hdr),
+	       (void *)rx_desc + sizeof(struct htt_host_fw_desc_base),
+	       pl_hdr->size);
+
+	ath10k_pktlog_process_rx(ar, pktlog_skb);
+	dev_kfree_skb_any(pktlog_skb);
+	return 0;
+}
+
+static void ath10k_pktlog_htc_tx_complete(struct ath10k *ar,
+					  struct sk_buff *skb)
+{
+	ath10k_info(ar, "PKTLOG htc completed\n");
+}
+
+int ath10k_pktlog_connect(struct ath10k *ar)
+{
+	int status;
+	struct ath10k_htc_svc_conn_req conn_req;
+	struct ath10k_htc_svc_conn_resp conn_resp;
+
+	memset(&conn_req, 0, sizeof(conn_req));
+	memset(&conn_resp, 0, sizeof(conn_resp));
+
+	conn_req.ep_ops.ep_tx_complete = ath10k_pktlog_htc_tx_complete;
+	conn_req.ep_ops.ep_rx_complete = ath10k_pktlog_process_rx;
+	conn_req.ep_ops.ep_tx_credits = NULL;
+
+	/* connect to control service */
+	conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_LOG_MSG;
+	status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
+	if (status) {
+		ath10k_warn(ar, "failed to connect to PKTLOG service: %d\n",
+			    status);
+		return status;
+	}
+
+	ar->debug.eid = conn_resp.eid;
+
+	return 0;
+}
+
 int ath10k_debug_create(struct ath10k *ar)
 {
 	ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data));
 	if (!ar->debug.fw_crash_data)
 		return -ENOMEM;
 
+	ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN);
+	if (!ar->debug.cal_data)
+		goto err_cal_data;
+
+	ar->rx_stats = vzalloc(sizeof(*ar->rx_stats));
+	if (!ar->rx_stats)
+		goto err_rx_stats;
+
 	INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
 	INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
 	INIT_LIST_HEAD(&ar->debug.fw_stats.peers);
+	INIT_LIST_HEAD(&ar->debug.fw_stats.peers_extd);
 
 	return 0;
+
+err_rx_stats:
+	vfree(ar->debug.cal_data);
+
+err_cal_data:
+	vfree(ar->debug.fw_crash_data);
+	return -ENOMEM;
 }
 
 void ath10k_debug_destroy(struct ath10k *ar)
@@ -2097,6 +2824,12 @@
 	vfree(ar->debug.fw_crash_data);
 	ar->debug.fw_crash_data = NULL;
 
+	vfree(ar->debug.cal_data);
+	ar->debug.cal_data = NULL;
+
+	vfree(ar->rx_stats);
+	ar->rx_stats = NULL;
+
 	ath10k_debug_fw_stats_reset(ar);
 
 	kfree(ar->debug.tpc_stats);
@@ -2119,6 +2852,9 @@
 	init_completion(&ar->debug.tpc_complete);
 	init_completion(&ar->debug.fw_stats_complete);
 
+	debugfs_create_file("datapath_rx_stats", S_IRUSR, ar->debug.debugfs_phy,
+			    ar, &fops_datapath_stats);
+
 	debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar,
 			    &fops_fw_stats);
 
@@ -2128,8 +2864,8 @@
 	debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar,
 			    &fops_wmi_services);
 
-	debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy,
-			    ar, &fops_simulate_fw_crash);
+	debugfs_create_file("simulate_fw_crash", S_IRUSR | S_IWUSR,
+			    ar->debug.debugfs_phy, ar, &fops_simulate_fw_crash);
 
 	debugfs_create_file("fw_crash_dump", S_IRUSR, ar->debug.debugfs_phy,
 			    ar, &fops_fw_crash_dump);
@@ -2146,26 +2882,32 @@
 	debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy,
 			    ar, &fops_chip_id);
 
-	debugfs_create_file("htt_stats_mask", S_IRUSR, ar->debug.debugfs_phy,
-			    ar, &fops_htt_stats_mask);
+	debugfs_create_file("htt_stats_mask", S_IRUSR | S_IWUSR,
+			    ar->debug.debugfs_phy, ar, &fops_htt_stats_mask);
 
 	debugfs_create_file("htt_max_amsdu_ampdu", S_IRUSR | S_IWUSR,
 			    ar->debug.debugfs_phy, ar,
 			    &fops_htt_max_amsdu_ampdu);
 
-	debugfs_create_file("fw_dbglog", S_IRUSR, ar->debug.debugfs_phy,
-			    ar, &fops_fw_dbglog);
+	debugfs_create_file("fw_dbglog", S_IRUSR | S_IWUSR,
+			    ar->debug.debugfs_phy, ar, &fops_fw_dbglog);
 
+	if (!QCA_REV_WCN3990(ar)) {
 	debugfs_create_file("cal_data", S_IRUSR, ar->debug.debugfs_phy,
 			    ar, &fops_cal_data);
 
+		debugfs_create_file("nf_cal_period", S_IRUSR | S_IWUSR,
+				    ar->debug.debugfs_phy, ar,
+				    &fops_nf_cal_period);
+	}
+
 	debugfs_create_file("ani_enable", S_IRUSR | S_IWUSR,
 			    ar->debug.debugfs_phy, ar, &fops_ani_enable);
 
-	debugfs_create_file("nf_cal_period", S_IRUSR | S_IWUSR,
-			    ar->debug.debugfs_phy, ar, &fops_nf_cal_period);
+	debugfs_create_file("sifs_burst_enable", S_IRUSR | S_IWUSR,
+			    ar->debug.debugfs_phy, ar, &fops_sifs_burst_enable);
 
-	if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
+	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
 		debugfs_create_file("dfs_simulate_radar", S_IWUSR,
 				    ar->debug.debugfs_phy, ar,
 				    &fops_simulate_radar);
@@ -2188,6 +2930,18 @@
 	debugfs_create_file("tpc_stats", S_IRUSR,
 			    ar->debug.debugfs_phy, ar, &fops_tpc_stats);
 
+	if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
+		debugfs_create_file("btcoex", S_IRUGO | S_IWUSR,
+				    ar->debug.debugfs_phy, ar, &fops_btcoex);
+
+	if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
+		debugfs_create_file("peer_stats", S_IRUGO | S_IWUSR,
+				    ar->debug.debugfs_phy, ar,
+				    &fops_peer_stats);
+
+	debugfs_create_file("fw_checksums", S_IRUSR,
+			    ar->debug.debugfs_phy, ar, &fops_fw_checksums);
+
 	return 0;
 }
 
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/debug.h linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/debug.h
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/debug.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/debug.h	2019-01-22 16:16:25.415263721 +0100
@@ -37,6 +37,8 @@
 	ATH10K_DBG_TESTMODE	= 0x00001000,
 	ATH10K_DBG_WMI_PRINT	= 0x00002000,
 	ATH10K_DBG_PCI_PS	= 0x00004000,
+	ATH10K_DBG_AHB		= 0x00008000,
+	ATH10K_DBG_SNOC		= 0x00010000,
 	ATH10K_DBG_ANY		= 0xffffffff,
 };
 
@@ -55,14 +57,97 @@
 	ATH10K_DBG_AGGR_MODE_MAX,
 };
 
+#define IEEE80211_FC1_DIR_MASK              0x03
+#define IEEE80211_FC1_DIR_NODS              0x00    /* STA->STA */
+#define IEEE80211_FC1_DIR_TODS              0x01    /* STA->AP  */
+#define IEEE80211_FC1_DIR_FROMDS            0x02    /* AP ->STA */
+#define IEEE80211_FC1_DIR_DSTODS            0x03    /* AP ->AP  */
+#define IEEE80211_ADDR_LEN  6                       /* size of 802.11 address */
+
+#define MAX_PKT_INFO_MSDU_ID 192
+#define MSDU_ID_INFO_ID_OFFSET  \
+	((MAX_PKT_INFO_MSDU_ID >> 3) + 4)
+
+#define PKTLOG_MAX_TXCTL_WORDS 57 /* +2 words for bitmap */
+#define HTT_TX_MSDU_LEN_MASK 0xffff
+
+struct txctl_frm_hdr {
+	__le16 framectrl;       /* frame control field from header */
+	__le16 seqctrl;         /* frame control field from header */
+	__le16 bssid_tail;      /* last two octets of bssid */
+	__le16 sa_tail;         /* last two octets of SA */
+	__le16 da_tail;         /* last two octets of DA */
+	__le16 resvd;
+} __packed;
+
+struct ath_pktlog_hdr {
+	__le16 flags;
+	__le16 missed_cnt;
+	u8 log_type;
+	u8 mac_id;
+	__le16 size;
+	__le32 timestamp;
+	__le32 type_specific_data;
+} __packed;
+
+/* generic definitions for IEEE 802.11 frames */
+struct ieee80211_frame {
+	u8 i_fc[2];
+	u8 i_dur[2];
+	union {
+		struct {
+			u8 i_addr1[IEEE80211_ADDR_LEN];
+			u8 i_addr2[IEEE80211_ADDR_LEN];
+			u8 i_addr3[IEEE80211_ADDR_LEN];
+		};
+		u8 i_addr_all[3 * IEEE80211_ADDR_LEN];
+	};
+	u8 i_seq[2];
+} __packed;
+
+struct fw_pktlog_msdu_info {
+	__le32 num_msdu;
+	u8 bound_bmap[MAX_PKT_INFO_MSDU_ID >> 3];
+	__le16 id[MAX_PKT_INFO_MSDU_ID];
+} __packed;
+
+struct ath_pktlog_txctl {
+	struct ath_pktlog_hdr hdr;
+	struct txctl_frm_hdr frm_hdr;
+	__le32 txdesc_ctl[PKTLOG_MAX_TXCTL_WORDS];
+} __packed;
+
+struct ath_pktlog_msdu_id {
+	struct ath_pktlog_hdr hdr;
+	struct fw_pktlog_msdu_info msdu_info;
+} __packed;
+
+struct ath_pktlog_rx_info {
+	struct ath_pktlog_hdr pl_hdr;
+	struct rx_attention attention;
+	struct rx_frag_info frag_info;
+	struct rx_mpdu_start mpdu_start;
+	struct rx_msdu_start msdu_start;
+	struct rx_msdu_end msdu_end;
+	struct rx_mpdu_end mpdu_end;
+	struct rx_ppdu_start ppdu_start;
+	struct rx_ppdu_end ppdu_end;
+	u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
+} __packed;
+
 /* FIXME: How to calculate the buffer size sanely? */
 #define ATH10K_FW_STATS_BUF_SIZE (1024*1024)
+#define ATH10K_DATAPATH_BUF_SIZE (1024 * 1024)
 
 extern unsigned int ath10k_debug_mask;
 
 __printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...);
 __printf(2, 3) void ath10k_err(struct ath10k *ar, const char *fmt, ...);
 __printf(2, 3) void ath10k_warn(struct ath10k *ar, const char *fmt, ...);
+
+void ath10k_debug_print_hwfw_info(struct ath10k *ar);
+void ath10k_debug_print_board_info(struct ath10k *ar);
+void ath10k_debug_print_boot_info(struct ath10k *ar);
 void ath10k_print_driver_info(struct ath10k *ar);
 
 #ifdef CONFIG_ATH10K_DEBUGFS
@@ -79,6 +164,7 @@
 ath10k_debug_get_new_fw_crash_data(struct ath10k *ar);
 
 void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len);
+int ath10k_rx_record_pktlog(struct ath10k *ar, struct sk_buff *skb);
 #define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++)
 
 void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
@@ -89,6 +175,9 @@
 void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
 			       struct ieee80211_vif *vif,
 			       struct ethtool_stats *stats, u64 *data);
+void fill_datapath_stats(struct ath10k *ar, struct ieee80211_rx_status *status);
+size_t get_datapath_stat(char *buf, struct ath10k *ar);
+int ath10k_pktlog_connect(struct ath10k *ar);
 #else
 static inline int ath10k_debug_start(struct ath10k *ar)
 {
@@ -133,12 +222,32 @@
 {
 }
 
+static inline int ath10k_rx_record_pktlog(struct ath10k *ar,
+					  struct sk_buff *skb)
+{
+	return 0;
+}
+
 static inline struct ath10k_fw_crash_data *
 ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
 {
 	return NULL;
 }
 
+static inline void fill_datapath_stats(struct ath10k *ar,
+				       struct ieee80211_rx_status *status)
+{
+}
+
+static inline size_t get_datapath_stat(char *buf, struct ath10k *ar)
+{
+	return 0;
+}
+
+static inline int ath10k_pktlog_connect(struct ath10k *ar)
+{
+	return 0;
+}
 #define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
 
 #define ath10k_debug_get_et_strings NULL
@@ -149,6 +258,17 @@
 #ifdef CONFIG_MAC80211_DEBUGFS
 void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 			    struct ieee80211_sta *sta, struct dentry *dir);
+void ath10k_sta_update_rx_duration(struct ath10k *ar,
+				   struct ath10k_fw_stats *stats);
+void ath10k_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			   struct ieee80211_sta *sta,
+			   struct station_info *sinfo);
+#else
+static inline
+void ath10k_sta_update_rx_duration(struct ath10k *ar,
+				   struct ath10k_fw_stats *stats)
+{
+}
 #endif /* CONFIG_MAC80211_DEBUGFS */
 
 #ifdef CONFIG_ATH10K_DEBUG
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/hif.h linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/hif.h
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/hif.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/hif.h	2019-01-22 16:16:25.415263721 +0100
@@ -26,7 +26,7 @@
 	u16 transfer_id;
 	void *transfer_context; /* NULL = tx completion callback not called */
 	void *vaddr; /* for debugging mostly */
-	u32 paddr;
+	dma_addr_t paddr;
 	u16 len;
 };
 
@@ -87,6 +87,10 @@
 
 	int (*suspend)(struct ath10k *ar);
 	int (*resume)(struct ath10k *ar);
+
+	/* fetch calibration data from target eeprom */
+	int (*fetch_cal_eeprom)(struct ath10k *ar, void **data,
+				size_t *data_len);
 };
 
 static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
@@ -202,4 +206,14 @@
 	ar->hif.ops->write32(ar, address, data);
 }
 
+static inline int ath10k_hif_fetch_cal_eeprom(struct ath10k *ar,
+					      void **data,
+					      size_t *data_len)
+{
+	if (!ar->hif.ops->fetch_cal_eeprom)
+		return -EOPNOTSUPP;
+
+	return ar->hif.ops->fetch_cal_eeprom(ar, data, data_len);
+}
+
 #endif /* _HIF_H_ */
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/htc.c linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/htc.c
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/htc.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/htc.c	2019-01-22 16:16:25.415263721 +0100
@@ -44,7 +44,7 @@
 	skb_cb = ATH10K_SKB_CB(skb);
 	memset(skb_cb, 0, sizeof(*skb_cb));
 
-	ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
+	ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %pK\n", __func__, skb);
 	return skb;
 }
 
@@ -62,7 +62,7 @@
 {
 	struct ath10k *ar = ep->htc->ar;
 
-	ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
+	ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__,
 		   ep->eid, skb);
 
 	ath10k_htc_restore_tx_skb(ep->htc, skb);
@@ -86,6 +86,7 @@
 	hdr->eid = ep->eid;
 	hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
 	hdr->flags = 0;
+	if (ep->tx_credit_flow_enabled)
 	hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
 
 	spin_lock_bh(&ep->htc->tx_lock);
@@ -404,7 +405,7 @@
 		goto out;
 	}
 
-	ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
+	ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
 		   eid, skb);
 	ep->ep_ops.ep_rx_complete(ar, skb);
 
@@ -451,8 +452,16 @@
 		return "NMI Data";
 	case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
 		return "HTT Data";
+	case ATH10K_HTC_SVC_ID_HTT_DATA2_MSG:
+		return "HTT Data";
+	case ATH10K_HTC_SVC_ID_HTT_DATA3_MSG:
+		return "HTT Data";
+	case ATH10K_HTC_SVC_ID_HTT_IPA_MSG:
+		return "IPA";
 	case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
 		return "RAW";
+	case ATH10K_HTC_SVC_ID_HTT_LOG_MSG:
+		return "PKTLOG";
 	}
 
 	return "Unknown";
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/htc.h linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/htc.h
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/htc.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/htc.h	2019-01-22 16:16:25.415263721 +0100
@@ -22,7 +22,6 @@
 #include <linux/list.h>
 #include <linux/bug.h>
 #include <linux/skbuff.h>
-#include <linux/semaphore.h>
 #include <linux/timer.h>
 
 struct ath10k;
@@ -223,7 +222,8 @@
 	ATH10K_HTC_SVC_GRP_WMI = 1,
 	ATH10K_HTC_SVC_GRP_NMI = 2,
 	ATH10K_HTC_SVC_GRP_HTT = 3,
-
+	ATH10K_IPA_SERVICE_GROUP = 5,
+	ATH10K_LOG_SERVICE_GROUP = 6,
 	ATH10K_HTC_SVC_GRP_TEST = 254,
 	ATH10K_HTC_SVC_GRP_LAST = 255,
 };
@@ -247,7 +247,10 @@
 	ATH10K_HTC_SVC_ID_NMI_DATA	= SVC(ATH10K_HTC_SVC_GRP_NMI, 1),
 
 	ATH10K_HTC_SVC_ID_HTT_DATA_MSG	= SVC(ATH10K_HTC_SVC_GRP_HTT, 0),
-
+	ATH10K_HTC_SVC_ID_HTT_DATA2_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 1),
+	ATH10K_HTC_SVC_ID_HTT_DATA3_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 2),
+	ATH10K_HTC_SVC_ID_HTT_IPA_MSG = SVC(ATH10K_IPA_SERVICE_GROUP, 0),
+	ATH10K_HTC_SVC_ID_HTT_LOG_MSG = SVC(ATH10K_LOG_SERVICE_GROUP, 0),
 	/* raw stream service (i.e. flash, tcmd, calibration apps) */
 	ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS = SVC(ATH10K_HTC_SVC_GRP_TEST, 0),
 };
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/htt.c linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/htt.c
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/htt.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/htt.c	2019-01-22 16:16:25.415263721 +0100
@@ -131,12 +131,12 @@
 	[HTT_10_4_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF,
 	[HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND] =
 				HTT_T2H_MSG_TYPE_TX_FETCH_IND,
-	[HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF] =
-				HTT_T2H_MSG_TYPE_TX_FETCH_CONF,
+	[HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONFIRM] =
+				HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM,
 	[HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD] =
 				HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
-	[HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND] =
-				HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND,
+	[HTT_10_4_T2H_MSG_TYPE_TX_MODE_SWITCH_IND] =
+				HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND,
 };
 
 int ath10k_htt_connect(struct ath10k_htt *htt)
@@ -149,7 +149,7 @@
 	memset(&conn_resp, 0, sizeof(conn_resp));
 
 	conn_req.ep_ops.ep_tx_complete = ath10k_htt_htc_tx_complete;
-	conn_req.ep_ops.ep_rx_complete = ath10k_htt_t2h_msg_handler;
+	conn_req.ep_ops.ep_rx_complete = ath10k_htt_htc_t2h_msg_handler;
 
 	/* connect to control service */
 	conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG;
@@ -183,7 +183,7 @@
 		8 + /* llc snap */
 		2; /* ip4 dscp or ip6 priority */
 
-	switch (ar->htt.op_version) {
+	switch (ar->running_fw->fw_file.htt_op_version) {
 	case ATH10K_FW_HTT_OP_VERSION_10_4:
 		ar->htt.t2h_msg_types = htt_10_4_t2h_msg_types;
 		ar->htt.t2h_msg_types_max = HTT_10_4_T2H_NUM_MSGS;
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/htt.h linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/htt.h
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/htt.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/htt.h	2019-01-22 16:16:25.415263721 +0100
@@ -22,6 +22,7 @@
 #include <linux/interrupt.h>
 #include <linux/dmapool.h>
 #include <linux/hashtable.h>
+#include <linux/kfifo.h>
 #include <net/mac80211.h>
 
 #include "htc.h"
@@ -52,6 +53,7 @@
 	/* This command is used for sending management frames in HTT < 3.0.
 	 * HTT >= 3.0 uses TX_FRM for everything. */
 	HTT_H2T_MSG_TYPE_MGMT_TX            = 7,
+	HTT_H2T_MSG_TYPE_TX_FETCH_RESP      = 11,
 
 	HTT_H2T_NUM_MSGS /* keep this last */
 };
@@ -97,7 +99,11 @@
 } __packed;
 
 struct htt_msdu_ext_desc {
+#ifdef CONFIG_ATH10K_SNOC
+	__le32 tso_flag[5];
+#else
 	__le32 tso_flag[3];
+#endif
 	__le16 ip_identification;
 	u8 flags;
 	u8 reserved;
@@ -116,6 +122,19 @@
 				 | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE \
 				 | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE)
 
+#define HTT_TX_IPV4_CSUM_EN	BIT(16)
+#define HTT_TX_UDP_IPV4_CSUM_EN	BIT(17)
+#define HTT_TX_UDP_IPV6_CSUM_EN	BIT(18)
+#define HTT_TX_TCP_IPV4_CSUM_EN	BIT(19)
+#define HTT_TX_TCP_IPV6_CSUM_EN	BIT(20)
+#define HTT_TX_PARTIAL_CSUM_EN	BIT(21)
+
+#define HTT_TX_CHECKSUM_ENABLE	(HTT_TX_IPV4_CSUM_EN \
+				| HTT_TX_UDP_IPV4_CSUM_EN \
+				| HTT_TX_UDP_IPV6_CSUM_EN \
+				| HTT_TX_TCP_IPV4_CSUM_EN \
+				| HTT_TX_TCP_IPV6_CSUM_EN)
+
 enum htt_data_tx_desc_flags0 {
 	HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0,
 	HTT_DATA_TX_DESC_FLAGS0_NO_AGGR         = 1 << 1,
@@ -165,9 +184,19 @@
 	__le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */
 	__le16 len;
 	__le16 id;
+#ifdef CONFIG_ATH10K_SNOC
+	__le32 frags_paddr_lo;
+	__le32 frags_paddr_hi;
+#else
 	__le32 frags_paddr;
+#endif
+	union {
+		__le32 peerid;
+		struct {
 	__le16 peerid;
 	__le16 freq;
+		} __packed offchan_tx;
+	} __packed;
 	u8 prefetch[0]; /* start of frame, for FW classification engine */
 } __packed;
 
@@ -194,8 +223,15 @@
 #define HTT_RX_RING_SIZE_MAX 2048
 
 struct htt_rx_ring_setup_ring {
+#ifdef CONFIG_ATH10K_SNOC
+	__le32 fw_idx_shadow_reg_paddr_low;
+	__le32 fw_idx_shadow_reg_paddr_high;
+	__le32 rx_ring_base_paddr_low;
+	__le32 rx_ring_base_paddr_high;
+#else
 	__le32 fw_idx_shadow_reg_paddr;
 	__le32 rx_ring_base_paddr;
+#endif
 	__le16 rx_ring_len; /* in 4-byte words */
 	__le16 rx_ring_bufsize; /* rx skb size - in bytes */
 	__le16 flags; /* %HTT_RX_RING_FLAGS_ */
@@ -408,10 +444,10 @@
 	HTT_10_4_T2H_MSG_TYPE_EN_STATS               = 0x14,
 	HTT_10_4_T2H_MSG_TYPE_AGGR_CONF              = 0x15,
 	HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND           = 0x16,
-	HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF          = 0x17,
+	HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONFIRM       = 0x17,
 	HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD         = 0x18,
 	/* 0x19 to 0x2f are reserved */
-	HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND     = 0x30,
+	HTT_10_4_T2H_MSG_TYPE_TX_MODE_SWITCH_IND     = 0x30,
 	/* keep this last */
 	HTT_10_4_T2H_NUM_MSGS
 };
@@ -444,8 +480,8 @@
 	HTT_T2H_MSG_TYPE_TEST,
 	HTT_T2H_MSG_TYPE_EN_STATS,
 	HTT_T2H_MSG_TYPE_TX_FETCH_IND,
-	HTT_T2H_MSG_TYPE_TX_FETCH_CONF,
-	HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND,
+	HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM,
+	HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND,
 	/* keep this last */
 	HTT_T2H_NUM_MSGS
 };
@@ -478,10 +514,10 @@
 	__le32 status;
 } __packed;
 
-#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK  (0x3F)
+#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK  (0x1F)
 #define HTT_RX_INDICATION_INFO0_EXT_TID_LSB   (0)
-#define HTT_RX_INDICATION_INFO0_FLUSH_VALID   (1 << 6)
-#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 7)
+#define HTT_RX_INDICATION_INFO0_FLUSH_VALID   (1 << 5)
+#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 6)
 
 #define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK   0x0000003F
 #define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB    0
@@ -517,6 +553,9 @@
 #define HTT_RX_INDICATION_INFO2_SERVICE_MASK    0xFF000000
 #define HTT_RX_INDICATION_INFO2_SERVICE_LSB     24
 
+#define HTT_WCN3990_PADDR_MASK 0x1F
+#define HTT_WCN3990_ARCH_PADDR_MASK 0x1FFFFFFFFF
+
 enum htt_rx_legacy_rate {
 	HTT_RX_OFDM_48 = 0,
 	HTT_RX_OFDM_24 = 1,
@@ -588,7 +627,7 @@
 	/* only accept EAPOL frames */
 	HTT_RX_IND_MPDU_STATUS_UNAUTH_PEER,
 	HTT_RX_IND_MPDU_STATUS_OUT_OF_SYNC,
-	/* Non-data in promiscous mode */
+	/* Non-data in promiscuous mode */
 	HTT_RX_IND_MPDU_STATUS_MGMT_CTRL,
 	HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR,
 	HTT_RX_IND_MPDU_STATUS_DECRYPT_ERR,
@@ -839,7 +878,11 @@
 } __packed;
 
 struct htt_rx_in_ord_msdu_desc {
+#ifdef CONFIG_ATH10K_SNOC
+	__le64 msdu_paddr;
+#else
 	__le32 msdu_paddr;
+#endif
 	__le16 msdu_len;
 	u8 fw_desc;
 	u8 reserved;
@@ -893,7 +936,7 @@
  *     Purpose: indicate how many 32-bit integers follow the message header
  *   - NUM_CHARS
  *     Bits 31:16
- *     Purpose: indicate how many 8-bit charaters follow the series of integers
+ *     Purpose: indicate how many 8-bit characters follow the series of integers
  */
 struct htt_rx_test {
 	u8 num_ints;
@@ -1035,10 +1078,10 @@
 	/* illegal rate phy errors  */
 	__le32 illgl_rate_phy_err;
 
-	/* wal pdev continous xretry */
+	/* wal pdev continuous xretry */
 	__le32 pdev_cont_xretry;
 
-	/* wal pdev continous xretry */
+	/* wal pdev continuous xretry */
 	__le32 pdev_tx_timeout;
 
 	/* wal pdev resets  */
@@ -1303,14 +1346,173 @@
 
 #define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03
 #define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB  0
-#define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP         (1 << 2)
+#define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP			BIT(2)
+#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID		BIT(3)
+#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_MASK	BIT(4)
+#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_LSB	4
+
+enum htt_q_depth_type {
+	HTT_Q_DEPTH_TYPE_BYTES = 0,
+	HTT_Q_DEPTH_TYPE_MSDUS = 1,
+};
+
+#define HTT_TX_Q_STATE_NUM_PEERS		(TARGET_10_4_NUM_QCACHE_PEERS_MAX + \
+						 TARGET_10_4_NUM_VDEVS)
+#define HTT_TX_Q_STATE_NUM_TIDS			8
+#define HTT_TX_Q_STATE_ENTRY_SIZE		1
+#define HTT_TX_Q_STATE_ENTRY_MULTIPLIER		0
+
+/**
+ * htt_q_state_conf - part of htt_frag_desc_bank_cfg for host q state config
+ *
+ * Defines host q state format and behavior. See htt_q_state.
+ *
+ * @record_size: Defines the size of each host q entry in bytes. In practice
+ *	however firmware (at least 10.4.3-00191) ignores this host
+ *	configuration value and uses hardcoded value of 1.
+ * @record_multiplier: This is valid only when q depth type is MSDUs. It
+ *	defines the exponent for the power of 2 multiplication.
+ */
+struct htt_q_state_conf {
+	__le32 paddr;
+	__le16 num_peers;
+	__le16 num_tids;
+	u8 record_size;
+	u8 record_multiplier;
+	u8 pad[2];
+} __packed;
+
+struct bank_base_addr {
+	__le32 low;
+	__le32 high;
+};
 
 struct htt_frag_desc_bank_cfg {
 	u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
 	u8 num_banks;
 	u8 desc_size;
+#ifdef CONFIG_ATH10K_SNOC
+	struct bank_base_addr bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
+#else
 	__le32 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
+#endif
 	struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX];
+	struct htt_q_state_conf q_state;
+} __packed;
+
+#define HTT_TX_Q_STATE_ENTRY_COEFFICIENT	128
+#define HTT_TX_Q_STATE_ENTRY_FACTOR_MASK	0x3f
+#define HTT_TX_Q_STATE_ENTRY_FACTOR_LSB		0
+#define HTT_TX_Q_STATE_ENTRY_EXP_MASK		0xc0
+#define HTT_TX_Q_STATE_ENTRY_EXP_LSB		6
+
+/**
+ * htt_q_state - shared between host and firmware via DMA
+ *
+ * This structure is used for the host to expose it's software queue state to
+ * firmware so that its rate control can schedule fetch requests for optimized
+ * performance. This is most notably used for MU-MIMO aggregation when multiple
+ * MU clients are connected.
+ *
+ * @count: Each element defines the host queue depth. When q depth type was
+ *	configured as HTT_Q_DEPTH_TYPE_BYTES then each entry is defined as:
+ *	FACTOR * 128 * 8^EXP (see HTT_TX_Q_STATE_ENTRY_FACTOR_MASK and
+ *	HTT_TX_Q_STATE_ENTRY_EXP_MASK). When q depth type was configured as
+ *	HTT_Q_DEPTH_TYPE_MSDUS the number of packets is scaled by 2 **
+ *	record_multiplier (see htt_q_state_conf).
+ * @map: Used by firmware to quickly check which host queues are not empty. It
+ *	is a bitmap simply saying.
+ * @seq: Used by firmware to quickly check if the host queues were updated
+ *	since it last checked.
+ *
+ * FIXME: Is the q_state map[] size calculation really correct?
+ */
+struct htt_q_state {
+	u8 count[HTT_TX_Q_STATE_NUM_TIDS][HTT_TX_Q_STATE_NUM_PEERS];
+	u32 map[HTT_TX_Q_STATE_NUM_TIDS][(HTT_TX_Q_STATE_NUM_PEERS + 31) / 32];
+	__le32 seq;
+} __packed;
+
+#define HTT_TX_FETCH_RECORD_INFO_PEER_ID_MASK	0x0fff
+#define HTT_TX_FETCH_RECORD_INFO_PEER_ID_LSB	0
+#define HTT_TX_FETCH_RECORD_INFO_TID_MASK	0xf000
+#define HTT_TX_FETCH_RECORD_INFO_TID_LSB	12
+
+struct htt_tx_fetch_record {
+	__le16 info; /* HTT_TX_FETCH_IND_RECORD_INFO_ */
+	__le16 num_msdus;
+	__le32 num_bytes;
+} __packed;
+
+struct htt_tx_fetch_ind {
+	u8 pad0;
+	__le16 fetch_seq_num;
+	__le32 token;
+	__le16 num_resp_ids;
+	__le16 num_records;
+	struct htt_tx_fetch_record records[0];
+	__le32 resp_ids[0]; /* ath10k_htt_get_tx_fetch_ind_resp_ids() */
+} __packed;
+
+static inline void *
+ath10k_htt_get_tx_fetch_ind_resp_ids(struct htt_tx_fetch_ind *ind)
+{
+	return (void *)&ind->records[le16_to_cpu(ind->num_records)];
+}
+
+struct htt_tx_fetch_resp {
+	u8 pad0;
+	__le16 resp_id;
+	__le16 fetch_seq_num;
+	__le16 num_records;
+	__le32 token;
+	struct htt_tx_fetch_record records[0];
+} __packed;
+
+struct htt_tx_fetch_confirm {
+	u8 pad0;
+	__le16 num_resp_ids;
+	__le32 resp_ids[0];
+} __packed;
+
+enum htt_tx_mode_switch_mode {
+	HTT_TX_MODE_SWITCH_PUSH = 0,
+	HTT_TX_MODE_SWITCH_PUSH_PULL = 1,
+};
+
+#define HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE		BIT(0)
+#define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_MASK	0xfffe
+#define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_LSB	1
+
+#define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_MASK		0x0003
+#define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_LSB		0
+#define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_MASK	0xfffc
+#define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_LSB	2
+
+#define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_MASK	0x0fff
+#define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_LSB	0
+#define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_MASK	0xf000
+#define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_LSB		12
+
+struct htt_tx_mode_switch_record {
+	__le16 info0; /* HTT_TX_MODE_SWITCH_RECORD_INFO0_ */
+	__le16 num_max_msdus;
+} __packed;
+
+struct htt_tx_mode_switch_ind {
+	u8 pad0;
+	__le16 info0; /* HTT_TX_MODE_SWITCH_IND_INFO0_ */
+	__le16 info1; /* HTT_TX_MODE_SWITCH_IND_INFO1_ */
+	u8 pad1[2];
+	struct htt_tx_mode_switch_record records[0];
+} __packed;
+
+struct htt_channel_change {
+	u8 pad[3];
+	__le32 freq;
+	__le32 center_freq1;
+	__le32 center_freq2;
+	__le32 phymode;
 } __packed;
 
 union htt_rx_pn_t {
@@ -1318,10 +1520,10 @@
 	u32 pn24;
 
 	/* TKIP or CCMP: 48-bit PN */
-	u_int64_t pn48;
+	u64 pn48;
 
 	/* WAPI: 128-bit PN */
-	u_int64_t pn128[2];
+	u64 pn128[2];
 };
 
 struct htt_cmd {
@@ -1335,6 +1537,7 @@
 		struct htt_oob_sync_req oob_sync_req;
 		struct htt_aggr_conf aggr_conf;
 		struct htt_frag_desc_bank_cfg frag_desc_bank_cfg;
+		struct htt_tx_fetch_resp tx_fetch_resp;
 	};
 } __packed;
 
@@ -1359,16 +1562,25 @@
 		struct htt_rx_pn_ind rx_pn_ind;
 		struct htt_rx_offload_ind rx_offload_ind;
 		struct htt_rx_in_ord_ind rx_in_ord_ind;
+		struct htt_tx_fetch_ind tx_fetch_ind;
+		struct htt_tx_fetch_confirm tx_fetch_confirm;
+		struct htt_tx_mode_switch_ind tx_mode_switch_ind;
+		struct htt_channel_change chan_change;
 	};
 } __packed;
 
 /*** host side structures follow ***/
 
 struct htt_tx_done {
-	u32 msdu_id;
-	bool discard;
-	bool no_ack;
-	bool success;
+	u16 msdu_id;
+	u16 status;
+};
+
+enum htt_tx_compl_state {
+	HTT_TX_COMPL_STATE_NONE,
+	HTT_TX_COMPL_STATE_ACK,
+	HTT_TX_COMPL_STATE_NOACK,
+	HTT_TX_COMPL_STATE_DISCARD,
 };
 
 struct htt_peer_map_event {
@@ -1395,7 +1607,6 @@
 	u8 target_version_major;
 	u8 target_version_minor;
 	struct completion target_version_received;
-	enum ath10k_fw_htt_op_version op_version;
 	u8 max_num_amsdu;
 	u8 max_num_ampdu;
 
@@ -1433,7 +1644,11 @@
 		 * rx buffers the host SW provides for the MAC HW to
 		 * fill.
 		 */
+#ifdef CONFIG_ATH10K_SNOC
+		__le64 *paddrs_ring;
+#else
 		__le32 *paddrs_ring;
+#endif
 
 		/*
 		 * Base address of ring, as a "physical" device address
@@ -1489,17 +1704,19 @@
 	struct idr pending_tx;
 	wait_queue_head_t empty_tx_wq;
 
+	/* FIFO for storing tx done status {ack, no-ack, discard} and msdu id */
+	DECLARE_KFIFO_PTR(txdone_fifo, struct htt_tx_done);
+
 	/* set if host-fw communication goes haywire
 	 * used to avoid further failures */
 	bool rx_confused;
-	struct tasklet_struct rx_replenish_task;
+	atomic_t num_mpdus_ready;
 
 	/* This is used to group tx/rx completions separately and process them
 	 * in batches to reduce cache stalls */
-	struct tasklet_struct txrx_compl_task;
-	struct sk_buff_head tx_compl_q;
 	struct sk_buff_head rx_compl_q;
 	struct sk_buff_head rx_in_ord_compl_q;
+	struct sk_buff_head tx_fetch_ind_q;
 
 	/* rx_status template */
 	struct ieee80211_rx_status rx_status;
@@ -1513,20 +1730,36 @@
 		dma_addr_t paddr;
 		struct ath10k_htt_txbuf *vaddr;
 	} txbuf;
+
+	struct {
+		bool enabled;
+		struct htt_q_state *vaddr;
+		dma_addr_t paddr;
+		u16 num_push_allowed;
+		u16 num_peers;
+		u16 num_tids;
+		enum htt_tx_mode_switch_mode mode;
+		enum htt_q_depth_type type;
+	} tx_q_state;
 };
 
 #define RX_HTT_HDR_STATUS_LEN 64
 
-/* This structure layout is programmed via rx ring setup
- * so that FW knows how to transfer the rx descriptor to the host.
- * Buffers like this are placed on the rx ring. */
-struct htt_rx_desc {
+struct htt_host_fw_desc_base {
 	union {
 		/* This field is filled on the host using the msdu buffer
 		 * from htt_rx_indication */
 		struct fw_rx_desc_base fw_desc;
 		u32 pad;
 	} __packed;
+};
+
+/* This structure layout is programmed via rx ring setup
+ * so that FW knows how to transfer the rx descriptor to the host.
+ * Buffers like this are placed on the rx ring.
+ */
+struct htt_rx_desc {
+	struct htt_host_fw_desc_base fw_desc_base;
 	struct {
 		struct rx_attention attention;
 		struct rx_frag_info frag_info;
@@ -1555,7 +1788,7 @@
 
 /* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
  * aggregated traffic more nicely. */
-#define ATH10K_HTT_MAX_NUM_REFILL 16
+#define ATH10K_HTT_MAX_NUM_REFILL 100
 
 /*
  * DMA_MAP expects the buffer to be an integral number of cache lines.
@@ -1583,7 +1816,8 @@
 void ath10k_htt_rx_free(struct ath10k_htt *htt);
 
 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
-void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
+bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
 int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
 int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt);
@@ -1592,11 +1826,33 @@
 				u8 max_subfrms_ampdu,
 				u8 max_subfrms_amsdu);
 void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
+			     __le32 token,
+			     __le16 fetch_seq_num,
+			     struct htt_tx_fetch_record *records,
+			     size_t num_records);
+
+void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
+			      struct ieee80211_txq *txq);
+void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
+			      struct ieee80211_txq *txq);
+void ath10k_htt_tx_txq_sync(struct ath10k *ar);
+void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
+int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt);
+void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt);
+int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
+				   bool is_presp);
 
-void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc);
 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
 int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
-int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *);
+int ath10k_htt_tx(struct ath10k_htt *htt,
+		  enum ath10k_hw_txrx_mode txmode,
+		  struct sk_buff *msdu);
+void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
+					     struct sk_buff *skb);
+int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget);
+struct sk_buff *ath10k_htt_tx_find_msdu_by_id(struct ath10k_htt *htt,
+					      u16 msdu_id);
 
 #endif
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/htt_rx.c linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/htt_rx.c
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/htt_rx.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/htt_rx.c	2019-10-29 09:26:24.457211144 +0100
@@ -27,15 +27,17 @@
 
 #define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
 #define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
+#define HTT_RX_RING_FILL_LEVEL_DUAL_MAC (HTT_RX_RING_SIZE - 1)
 
 /* when under memory pressure rx ring refill may fail and needs a retry */
 #define HTT_RX_RING_REFILL_RETRY_MS 50
 
+#define HTT_RX_RING_REFILL_RESCHED_MS 5
+
 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
-static void ath10k_htt_txrx_compl_task(unsigned long ptr);
 
 static struct sk_buff *
-ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
+ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
 {
 	struct ath10k_skb_rxcb *rxcb;
 
@@ -128,13 +130,16 @@
 		rxcb = ATH10K_SKB_RXCB(skb);
 		rxcb->paddr = paddr;
 		htt->rx_ring.netbufs_ring[idx] = skb;
+		if (QCA_REV_WCN3990(htt->ar))
+			htt->rx_ring.paddrs_ring[idx] = __cpu_to_le64(paddr);
+		else
 		htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
 		htt->rx_ring.fill_cnt++;
 
 		if (htt->rx_ring.in_ord_rx) {
 			hash_add(htt->rx_ring.skb_table,
 				 &ATH10K_SKB_RXCB(skb)->hlist,
-				 (u32)paddr);
+				 paddr);
 		}
 
 		num--;
@@ -192,7 +197,8 @@
 		mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
 			  msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
 	} else if (num_deficit > 0) {
-		tasklet_schedule(&htt->rx_replenish_task);
+		mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
+			  msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
 	}
 	spin_unlock_bh(&htt->rx_ring.lock);
 }
@@ -223,12 +229,10 @@
 void ath10k_htt_rx_free(struct ath10k_htt *htt)
 {
 	del_timer_sync(&htt->rx_ring.refill_retry_timer);
-	tasklet_kill(&htt->rx_replenish_task);
-	tasklet_kill(&htt->txrx_compl_task);
 
-	skb_queue_purge(&htt->tx_compl_q);
 	skb_queue_purge(&htt->rx_compl_q);
 	skb_queue_purge(&htt->rx_in_ord_compl_q);
+	skb_queue_purge(&htt->tx_fetch_ind_q);
 
 	ath10k_htt_rx_ring_free(htt);
 
@@ -281,7 +285,6 @@
 
 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
-				   u8 **fw_desc, int *fw_desc_len,
 				   struct sk_buff_head *amsdu)
 {
 	struct ath10k *ar = htt->ar;
@@ -323,48 +326,6 @@
 			return -EIO;
 		}
 
-		/*
-		 * Copy the FW rx descriptor for this MSDU from the rx
-		 * indication message into the MSDU's netbuf. HL uses the
-		 * same rx indication message definition as LL, and simply
-		 * appends new info (fields from the HW rx desc, and the
-		 * MSDU payload itself). So, the offset into the rx
-		 * indication message only has to account for the standard
-		 * offset of the per-MSDU FW rx desc info within the
-		 * message, and how many bytes of the per-MSDU FW rx desc
-		 * info have already been consumed. (And the endianness of
-		 * the host, since for a big-endian host, the rx ind
-		 * message contents, including the per-MSDU rx desc bytes,
-		 * were byteswapped during upload.)
-		 */
-		if (*fw_desc_len > 0) {
-			rx_desc->fw_desc.info0 = **fw_desc;
-			/*
-			 * The target is expected to only provide the basic
-			 * per-MSDU rx descriptors. Just to be sure, verify
-			 * that the target has not attached extension data
-			 * (e.g. LRO flow ID).
-			 */
-
-			/* or more, if there's extension data */
-			(*fw_desc)++;
-			(*fw_desc_len)--;
-		} else {
-			/*
-			 * When an oversized AMSDU happened, FW will lost
-			 * some of MSDU status - in this case, the FW
-			 * descriptors provided will be less than the
-			 * actual MSDUs inside this MPDU. Mark the FW
-			 * descriptors so that it will still deliver to
-			 * upper stack, if no CRC error for this MPDU.
-			 *
-			 * FIX THIS - the FW descriptors are actually for
-			 * MSDUs in the end of this A-MSDU instead of the
-			 * beginning.
-			 */
-			rx_desc->fw_desc.info0 = 0;
-		}
-
 		msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
 					& (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
 					   RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
@@ -423,15 +384,8 @@
 	return msdu_chaining;
 }
 
-static void ath10k_htt_rx_replenish_task(unsigned long ptr)
-{
-	struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
-
-	ath10k_htt_rx_msdu_buff_replenish(htt);
-}
-
 static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
-					       u32 paddr)
+					       u64 paddr)
 {
 	struct ath10k *ar = htt->ar;
 	struct ath10k_skb_rxcb *rxcb;
@@ -466,7 +420,7 @@
 	struct sk_buff *msdu;
 	int msdu_count;
 	bool is_offload;
-	u32 paddr;
+	u64 paddr;
 
 	lockdep_assert_held(&htt->rx_ring.lock);
 
@@ -474,8 +428,12 @@
 	is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
 
 	while (msdu_count--) {
+#ifdef CONFIG_ATH10K_SNOC
+		paddr = __le64_to_cpu(msdu_desc->msdu_paddr);
+		paddr &= HTT_WCN3990_ARCH_PADDR_MASK;
+#else
 		paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
-
+#endif
 		msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
 		if (!msdu) {
 			__skb_queue_purge(list);
@@ -521,7 +479,15 @@
 	 */
 	htt->rx_ring.size = HTT_RX_RING_SIZE;
 	htt->rx_ring.size_mask = htt->rx_ring.size - 1;
+
+	switch (ar->hw_rev) {
+	case ATH10K_HW_WCN3990:
+		htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL_DUAL_MAC;
+		break;
+	default:
 	htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL;
+		break;
+	}
 
 	if (!is_power_of_2(htt->rx_ring.size)) {
 		ath10k_warn(ar, "htt rx ring size is not power of 2\n");
@@ -536,7 +502,7 @@
 
 	size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
 
-	vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_DMA);
+	vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
 	if (!vaddr)
 		goto err_dma_ring;
 
@@ -545,7 +511,7 @@
 
 	vaddr = dma_alloc_coherent(htt->ar->dev,
 				   sizeof(*htt->rx_ring.alloc_idx.vaddr),
-				   &paddr, GFP_DMA);
+				   &paddr, GFP_KERNEL);
 	if (!vaddr)
 		goto err_dma_idx;
 
@@ -563,15 +529,10 @@
 	htt->rx_ring.sw_rd_idx.msdu_payld = 0;
 	hash_init(htt->rx_ring.skb_table);
 
-	tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
-		     (unsigned long)htt);
-
-	skb_queue_head_init(&htt->tx_compl_q);
 	skb_queue_head_init(&htt->rx_compl_q);
 	skb_queue_head_init(&htt->rx_in_ord_compl_q);
-
-	tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
-		     (unsigned long)htt);
+	skb_queue_head_init(&htt->tx_fetch_ind_q);
+	atomic_set(&htt->num_mpdus_ready, 0);
 
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
 		   htt->rx_ring.size, htt->rx_ring.fill_level);
@@ -674,7 +635,7 @@
 		rate &= ~RX_PPDU_START_RATE_FLAG;
 
 		sband = &ar->mac.sbands[status->band];
-		status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate);
+		status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
 		break;
 	case HTT_RX_HT:
 	case HTT_RX_HT_WITH_TXBF:
@@ -798,7 +759,7 @@
 	if (WARN_ON_ONCE(!arvif))
 		return NULL;
 
-	if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
+	if (ath10k_mac_vif_chan(arvif->vif, &def))
 		return NULL;
 
 	return def.chan;
@@ -860,6 +821,8 @@
 		ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
 	if (!ch)
 		ch = ath10k_htt_rx_h_any_channel(ar);
+	if (!ch)
+		ch = ar->tgt_oper_chan;
 	spin_unlock_bh(&ar->data_lock);
 
 	if (!ch)
@@ -977,9 +940,9 @@
 
 	status = IEEE80211_SKB_RXCB(skb);
 	*status = *rx_status;
-
+	fill_datapath_stats(ar, status);
 	ath10k_dbg(ar, ATH10K_DBG_DATA,
-		   "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+		   "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
 		   skb,
 		   skb->len,
 		   ieee80211_get_SA(hdr),
@@ -987,7 +950,8 @@
 		   is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
 							"mcast" : "ucast",
 		   (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
-		   status->flag == 0 ? "legacy" : "",
+		   (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) == 0 ?
+							"legacy" : "",
 		   status->flag & RX_FLAG_HT ? "ht" : "",
 		   status->flag & RX_FLAG_VHT ? "vht" : "",
 		   status->flag & RX_FLAG_40MHZ ? "40" : "",
@@ -1005,7 +969,8 @@
 	trace_ath10k_rx_hdr(ar, skb->data, skb->len);
 	trace_ath10k_rx_payload(ar, skb->data, skb->len);
 
-	ieee80211_rx(ar->hw, skb);
+	ath10k_rx_record_pktlog(ar, skb);
+	ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
 }
 
 static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
@@ -1014,7 +979,7 @@
 	int len = ieee80211_hdrlen(hdr->frame_control);
 
 	if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
-		      ar->fw_features))
+		      ar->running_fw->fw_file.fw_features))
 		len = round_up(len, 4);
 
 	return len;
@@ -1076,14 +1041,18 @@
 	hdr = (void *)msdu->data;
 
 	/* Tail */
-	skb_trim(msdu, msdu->len - ath10k_htt_rx_crypto_tail_len(ar, enctype));
+	if (status->flag & RX_FLAG_IV_STRIPPED)
+		skb_trim(msdu, msdu->len -
+			 ath10k_htt_rx_crypto_tail_len(ar, enctype));
 
 	/* MMIC */
-	if (!ieee80211_has_morefrags(hdr->frame_control) &&
+	if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
+	    !ieee80211_has_morefrags(hdr->frame_control) &&
 	    enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
 		skb_trim(msdu, msdu->len - 8);
 
 	/* Head */
+	if (status->flag & RX_FLAG_IV_STRIPPED) {
 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
 	crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
 
@@ -1091,6 +1060,7 @@
 		(void *)msdu->data, hdr_len);
 	skb_pull(msdu, crypto_len);
 }
+}
 
 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
 					  struct sk_buff *msdu,
@@ -1098,9 +1068,11 @@
 					  const u8 first_hdr[64])
 {
 	struct ieee80211_hdr *hdr;
+	struct htt_rx_desc *rxd;
 	size_t hdr_len;
 	u8 da[ETH_ALEN];
 	u8 sa[ETH_ALEN];
+	int l3_pad_bytes;
 
 	/* Delivered decapped frame:
 	 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
@@ -1114,7 +1086,13 @@
 	 */
 
 	/* pull decapped header and copy SA & DA */
-	hdr = (struct ieee80211_hdr *)msdu->data;
+	rxd = (void *)msdu->data - sizeof(*rxd);
+
+	l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+	skb_put(msdu, l3_pad_bytes);
+
+	hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
+
 	hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
 	ether_addr_copy(da, ieee80211_get_DA(hdr));
 	ether_addr_copy(sa, ieee80211_get_SA(hdr));
@@ -1142,6 +1120,7 @@
 	size_t hdr_len, crypto_len;
 	void *rfc1042;
 	bool is_first, is_last, is_amsdu;
+	int bytes_aligned = ar->hw_params.decap_align_bytes;
 
 	rxd = (void *)msdu->data - sizeof(*rxd);
 	hdr = (void *)rxd->rx_hdr_status;
@@ -1158,8 +1137,8 @@
 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
 		crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
 
-		rfc1042 += round_up(hdr_len, 4) +
-			   round_up(crypto_len, 4);
+		rfc1042 += round_up(hdr_len, bytes_aligned) +
+			   round_up(crypto_len, bytes_aligned);
 	}
 
 	if (is_amsdu)
@@ -1180,6 +1159,8 @@
 	void *rfc1042;
 	u8 da[ETH_ALEN];
 	u8 sa[ETH_ALEN];
+	int l3_pad_bytes;
+	struct htt_rx_desc *rxd;
 
 	/* Delivered decapped frame:
 	 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
@@ -1190,6 +1171,11 @@
 	if (WARN_ON_ONCE(!rfc1042))
 		return;
 
+	rxd = (void *)msdu->data - sizeof(*rxd);
+	l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+	skb_put(msdu, l3_pad_bytes);
+	skb_pull(msdu, l3_pad_bytes);
+
 	/* pull decapped header and copy SA & DA */
 	eth = (struct ethhdr *)msdu->data;
 	ether_addr_copy(da, eth->h_dest);
@@ -1220,6 +1206,8 @@
 {
 	struct ieee80211_hdr *hdr;
 	size_t hdr_len;
+	int l3_pad_bytes;
+	struct htt_rx_desc *rxd;
 
 	/* Delivered decapped frame:
 	 * [amsdu header] <-- replaced with 802.11 hdr
@@ -1227,7 +1215,11 @@
 	 * [payload]
 	 */
 
-	skb_pull(msdu, sizeof(struct amsdu_subframe_hdr));
+	rxd = (void *)msdu->data - sizeof(*rxd);
+	l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+
+	skb_put(msdu, l3_pad_bytes);
+	skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
 
 	hdr = (struct ieee80211_hdr *)first_hdr;
 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
@@ -1330,6 +1322,7 @@
 	bool has_tkip_err;
 	bool has_peer_idx_invalid;
 	bool is_decrypted;
+	bool is_mgmt;
 	u32 attention;
 
 	if (skb_queue_empty(amsdu))
@@ -1338,6 +1331,9 @@
 	first = skb_peek(amsdu);
 	rxd = (void *)first->data - sizeof(*rxd);
 
+	is_mgmt = !!(rxd->attention.flags &
+		     __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
+
 	enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
 		     RX_MPDU_START_INFO0_ENCRYPT_TYPE);
 
@@ -1379,6 +1375,7 @@
 			  RX_FLAG_MMIC_ERROR |
 			  RX_FLAG_DECRYPTED |
 			  RX_FLAG_IV_STRIPPED |
+			  RX_FLAG_ONLY_MONITOR |
 			  RX_FLAG_MMIC_STRIPPED);
 
 	if (has_fcs_err)
@@ -1387,10 +1384,21 @@
 	if (has_tkip_err)
 		status->flag |= RX_FLAG_MMIC_ERROR;
 
-	if (is_decrypted)
-		status->flag |= RX_FLAG_DECRYPTED |
-				RX_FLAG_IV_STRIPPED |
+	/* Firmware reports all necessary management frames via WMI already.
+	 * They are not reported to monitor interfaces at all so pass the ones
+	 * coming via HTT to monitor interfaces instead. This simplifies
+	 * matters a lot.
+	 */
+	if (is_mgmt)
+		status->flag |= RX_FLAG_ONLY_MONITOR;
+
+	if (is_decrypted) {
+		status->flag |= RX_FLAG_DECRYPTED;
+
+		if (likely(!is_mgmt))
+			status->flag |= RX_FLAG_IV_STRIPPED |
 				RX_FLAG_MMIC_STRIPPED;
+}
 
 	skb_queue_walk(amsdu, msdu) {
 		ath10k_htt_rx_h_csum_offload(msdu);
@@ -1403,6 +1411,8 @@
 		 */
 		if (!is_decrypted)
 			continue;
+		if (is_mgmt)
+			continue;
 
 		hdr = (void *)msdu->data;
 		hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
@@ -1503,14 +1513,6 @@
 					struct sk_buff_head *amsdu,
 					struct ieee80211_rx_status *rx_status)
 {
-	struct sk_buff *msdu;
-	struct htt_rx_desc *rxd;
-	bool is_mgmt;
-	bool has_fcs_err;
-
-	msdu = skb_peek(amsdu);
-	rxd = (void *)msdu->data - sizeof(*rxd);
-
 	/* FIXME: It might be a good idea to do some fuzzy-testing to drop
 	 * invalid/dangerous frames.
 	 */
@@ -1520,23 +1522,6 @@
 		return false;
 	}
 
-	is_mgmt = !!(rxd->attention.flags &
-		     __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
-	has_fcs_err = !!(rxd->attention.flags &
-			 __cpu_to_le32(RX_ATTENTION_FLAGS_FCS_ERR));
-
-	/* Management frames are handled via WMI events. The pros of such
-	 * approach is that channel is explicitly provided in WMI events
-	 * whereas HTT doesn't provide channel information for Rxed frames.
-	 *
-	 * However some firmware revisions don't report corrupted frames via
-	 * WMI so don't drop them.
-	 */
-	if (is_mgmt && !has_fcs_err) {
-		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
-		return false;
-	}
-
 	if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
 		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
 		return false;
@@ -1558,42 +1543,23 @@
 	__skb_queue_purge(amsdu);
 }
 
-static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
-				  struct htt_rx_indication *rx)
+static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
 {
 	struct ath10k *ar = htt->ar;
 	struct ieee80211_rx_status *rx_status = &htt->rx_status;
-	struct htt_rx_indication_mpdu_range *mpdu_ranges;
 	struct sk_buff_head amsdu;
-	int num_mpdu_ranges;
-	int fw_desc_len;
-	u8 *fw_desc;
-	int i, ret, mpdu_count = 0;
+	int ret, num_msdus;
 
-	lockdep_assert_held(&htt->rx_ring.lock);
-
-	if (htt->rx_confused)
-		return;
-
-	fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
-	fw_desc = (u8 *)&rx->fw_desc;
-
-	num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
-			     HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
-	mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
-
-	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
-			rx, sizeof(*rx) +
-			(sizeof(struct htt_rx_indication_mpdu_range) *
-				num_mpdu_ranges));
+	__skb_queue_head_init(&amsdu);
 
-	for (i = 0; i < num_mpdu_ranges; i++)
-		mpdu_count += mpdu_ranges[i].mpdu_count;
+	spin_lock_bh(&htt->rx_ring.lock);
+	if (htt->rx_confused) {
+		spin_unlock_bh(&htt->rx_ring.lock);
+		return -EIO;
+	}
+	ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
+	spin_unlock_bh(&htt->rx_ring.lock);
 
-	while (mpdu_count--) {
-		__skb_queue_head_init(&amsdu);
-		ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc,
-					      &fw_desc_len, &amsdu);
 		if (ret < 0) {
 			ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
 			__skb_queue_purge(&amsdu);
@@ -1601,69 +1567,43 @@
 			 * device instead of leaving it inoperable.
 			 */
 			htt->rx_confused = true;
-			break;
+		return ret;
 		}
 
+	num_msdus = skb_queue_len(&amsdu);
 		ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
 		ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
 		ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
 		ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
 		ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
-	}
 
-	tasklet_schedule(&htt->rx_replenish_task);
+	return num_msdus;
 }
 
-static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
-				       struct htt_rx_fragment_indication *frag)
+static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
+				      struct htt_rx_indication *rx)
 {
 	struct ath10k *ar = htt->ar;
-	struct ieee80211_rx_status *rx_status = &htt->rx_status;
-	struct sk_buff_head amsdu;
-	int ret;
-	u8 *fw_desc;
-	int fw_desc_len;
-
-	fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
-	fw_desc = (u8 *)frag->fw_msdu_rx_desc;
-
-	__skb_queue_head_init(&amsdu);
-
-	spin_lock_bh(&htt->rx_ring.lock);
-	ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
-				      &amsdu);
-	spin_unlock_bh(&htt->rx_ring.lock);
-
-	tasklet_schedule(&htt->rx_replenish_task);
-
-	ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
+	struct htt_rx_indication_mpdu_range *mpdu_ranges;
+	int num_mpdu_ranges;
+	int i, mpdu_count = 0;
 
-	if (ret) {
-		ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
-			    ret);
-		__skb_queue_purge(&amsdu);
-		return;
-	}
+	num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
+			     HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
+	mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
 
-	if (skb_queue_len(&amsdu) != 1) {
-		ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n");
-		__skb_queue_purge(&amsdu);
-		return;
-	}
+	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
+			rx, sizeof(*rx) +
+			(sizeof(struct htt_rx_indication_mpdu_range) *
+				num_mpdu_ranges));
 
-	ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
-	ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
-	ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
-	ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
+	for (i = 0; i < num_mpdu_ranges; i++)
+		mpdu_count += mpdu_ranges[i].mpdu_count;
 
-	if (fw_desc_len > 0) {
-		ath10k_dbg(ar, ATH10K_DBG_HTT,
-			   "expecting more fragmented rx in one indication %d\n",
-			   fw_desc_len);
-	}
+	atomic_add(mpdu_count, &htt->num_mpdus_ready);
 }
 
-static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
+static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
 				       struct sk_buff *skb)
 {
 	struct ath10k_htt *htt = &ar->htt;
@@ -1675,19 +1615,19 @@
 
 	switch (status) {
 	case HTT_DATA_TX_STATUS_NO_ACK:
-		tx_done.no_ack = true;
+		tx_done.status = HTT_TX_COMPL_STATE_NOACK;
 		break;
 	case HTT_DATA_TX_STATUS_OK:
-		tx_done.success = true;
+		tx_done.status = HTT_TX_COMPL_STATE_ACK;
 		break;
 	case HTT_DATA_TX_STATUS_DISCARD:
 	case HTT_DATA_TX_STATUS_POSTPONE:
 	case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
-		tx_done.discard = true;
+		tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
 		break;
 	default:
 		ath10k_warn(ar, "unhandled tx completion status %d\n", status);
-		tx_done.discard = true;
+		tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
 		break;
 	}
 
@@ -1697,9 +1637,22 @@
 	for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
 		msdu_id = resp->data_tx_completion.msdus[i];
 		tx_done.msdu_id = __le16_to_cpu(msdu_id);
+
+		/* kfifo_put: In practice firmware shouldn't fire off per-CE
+		 * interrupt and main interrupt (MSI/-X range case) for the same
+		 * HTC service so it should be safe to use kfifo_put w/o lock.
+		 *
+		 * From kfifo_put() documentation:
+		 *  Note that with only one concurrent reader and one concurrent
+		 *  writer, you don't need extra locking to use these macro.
+		 */
+		if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
+			ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
+				    tx_done.msdu_id, tx_done.status);
 		ath10k_txrx_tx_unref(htt, &tx_done);
 	}
 }
+}
 
 static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
 {
@@ -1781,7 +1734,8 @@
 	spin_unlock_bh(&ar->data_lock);
 }
 
-static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
+static int ath10k_htt_rx_extract_amsdu(struct ath10k *ar,
+				       struct sk_buff_head *list,
 				       struct sk_buff_head *amsdu)
 {
 	struct sk_buff *msdu;
@@ -1802,6 +1756,9 @@
 			break;
 	}
 
+	if (QCA_REV_WCN3990(ar))
+		return 0;
+
 	msdu = skb_peek_tail(amsdu);
 	rxd = (void *)msdu->data - sizeof(*rxd);
 	if (!(rxd->msdu_end.common.info0 &
@@ -1832,7 +1789,7 @@
 			RX_FLAG_MMIC_STRIPPED;
 }
 
-static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
+static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
 				       struct sk_buff_head *list)
 {
 	struct ath10k_htt *htt = &ar->htt;
@@ -1840,6 +1797,7 @@
 	struct htt_rx_offload_msdu *rx;
 	struct sk_buff *msdu;
 	size_t offset;
+	int num_msdu = 0;
 
 	while ((msdu = __skb_dequeue(list))) {
 		/* Offloaded frames don't have Rx descriptor. Instead they have
@@ -1879,10 +1837,12 @@
 		ath10k_htt_rx_h_rx_offload_prot(status, msdu);
 		ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
 		ath10k_process_rx(ar, status, msdu);
+		num_msdu++;
 	}
+	return num_msdu;
 }
 
-static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
+static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
 {
 	struct ath10k_htt *htt = &ar->htt;
 	struct htt_resp *resp = (void *)skb->data;
@@ -1895,12 +1855,12 @@
 	u8 tid;
 	bool offload;
 	bool frag;
-	int ret;
+	int ret, num_msdus = 0;
 
 	lockdep_assert_held(&htt->rx_ring.lock);
 
 	if (htt->rx_confused)
-		return;
+		return -EIO;
 
 	skb_pull(skb, sizeof(resp->hdr));
 	skb_pull(skb, sizeof(resp->rx_in_ord_ind));
@@ -1919,7 +1879,7 @@
 
 	if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
 		ath10k_warn(ar, "dropping invalid in order rx indication\n");
-		return;
+		return -EINVAL;
 	}
 
 	/* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
@@ -1930,18 +1890,18 @@
 	if (ret < 0) {
 		ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
 		htt->rx_confused = true;
-		return;
+		return -EIO;
 	}
 
 	/* Offloaded frames are very different and need to be handled
 	 * separately.
 	 */
 	if (offload)
-		ath10k_htt_rx_h_rx_offload(ar, &list);
+		num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list);
 
 	while (!skb_queue_empty(&list)) {
 		__skb_queue_head_init(&amsdu);
-		ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
+		ret = ath10k_htt_rx_extract_amsdu(ar, &list, &amsdu);
 		switch (ret) {
 		case 0:
 			/* Note: The in-order indication may report interleaved
@@ -1950,6 +1910,7 @@
 			 * better to report something than nothing though. This
 			 * should still give an idea about rx rate to the user.
 			 */
+			num_msdus += skb_queue_len(&amsdu);
 			ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
 			ath10k_htt_rx_h_filter(ar, &amsdu, status);
 			ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
@@ -1962,14 +1923,299 @@
 			ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
 			htt->rx_confused = true;
 			__skb_queue_purge(&list);
+			return -EIO;
+		}
+	}
+	return num_msdus;
+}
+
+static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
+						   const __le32 *resp_ids,
+						   int num_resp_ids)
+{
+	int i;
+	u32 resp_id;
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
+		   num_resp_ids);
+
+	for (i = 0; i < num_resp_ids; i++) {
+		resp_id = le32_to_cpu(resp_ids[i]);
+
+		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
+			   resp_id);
+
+		/* TODO: free resp_id */
+	}
+}
+
+static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct ieee80211_hw *hw = ar->hw;
+	struct ieee80211_txq *txq;
+	struct htt_resp *resp = (struct htt_resp *)skb->data;
+	struct htt_tx_fetch_record *record;
+	size_t len;
+	size_t max_num_bytes;
+	size_t max_num_msdus;
+	size_t num_bytes;
+	size_t num_msdus;
+	const __le32 *resp_ids;
+	u16 num_records;
+	u16 num_resp_ids;
+	u16 peer_id;
+	u8 tid;
+	int ret;
+	int i;
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
+
+	len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
+	if (unlikely(skb->len < len)) {
+		ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
 			return;
 		}
+
+	num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
+	num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
+
+	len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
+	len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
+
+	if (unlikely(skb->len < len)) {
+		ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
+		return;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
+		   num_records, num_resp_ids,
+		   le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
+
+	if (!ar->htt.tx_q_state.enabled) {
+		ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
+		return;
+	}
+
+	if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
+		ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
+		return;
+	}
+
+	rcu_read_lock();
+
+	for (i = 0; i < num_records; i++) {
+		record = &resp->tx_fetch_ind.records[i];
+		peer_id = MS(le16_to_cpu(record->info),
+			     HTT_TX_FETCH_RECORD_INFO_PEER_ID);
+		tid = MS(le16_to_cpu(record->info),
+			 HTT_TX_FETCH_RECORD_INFO_TID);
+		max_num_msdus = le16_to_cpu(record->num_msdus);
+		max_num_bytes = le32_to_cpu(record->num_bytes);
+
+		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
+			   i, peer_id, tid, max_num_msdus, max_num_bytes);
+
+		if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
+		    unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
+			ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
+				    peer_id, tid);
+			continue;
+		}
+
+		spin_lock_bh(&ar->data_lock);
+		txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
+		spin_unlock_bh(&ar->data_lock);
+
+		/* It is okay to release the lock and use txq because RCU read
+		 * lock is held.
+		 */
+
+		if (unlikely(!txq)) {
+			ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
+				    peer_id, tid);
+			continue;
+		}
+
+		num_msdus = 0;
+		num_bytes = 0;
+
+		while (num_msdus < max_num_msdus &&
+		       num_bytes < max_num_bytes) {
+			ret = ath10k_mac_tx_push_txq(hw, txq);
+			if (ret < 0)
+				break;
+
+			num_msdus++;
+			num_bytes += ret;
+		}
+
+		record->num_msdus = cpu_to_le16(num_msdus);
+		record->num_bytes = cpu_to_le32(num_bytes);
+
+		ath10k_htt_tx_txq_recalc(hw, txq);
+	}
+
+	rcu_read_unlock();
+
+	resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
+	ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
+
+	ret = ath10k_htt_tx_fetch_resp(ar,
+				       resp->tx_fetch_ind.token,
+				       resp->tx_fetch_ind.fetch_seq_num,
+				       resp->tx_fetch_ind.records,
+				       num_records);
+	if (unlikely(ret)) {
+		ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
+			    le32_to_cpu(resp->tx_fetch_ind.token), ret);
+		/* FIXME: request fw restart */
+	}
+
+	ath10k_htt_tx_txq_sync(ar);
+}
+
+static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
+					   struct sk_buff *skb)
+{
+	const struct htt_resp *resp = (void *)skb->data;
+	size_t len;
+	int num_resp_ids;
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
+
+	len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
+	if (unlikely(skb->len < len)) {
+		ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
+		return;
+	}
+
+	num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
+	len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
+
+	if (unlikely(skb->len < len)) {
+		ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
+		return;
+	}
+
+	ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
+					       resp->tx_fetch_confirm.resp_ids,
+					       num_resp_ids);
+}
+
+static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
+					     struct sk_buff *skb)
+{
+	const struct htt_resp *resp = (void *)skb->data;
+	const struct htt_tx_mode_switch_record *record;
+	struct ieee80211_txq *txq;
+	struct ath10k_txq *artxq;
+	size_t len;
+	size_t num_records;
+	enum htt_tx_mode_switch_mode mode;
+	bool enable;
+	u16 info0;
+	u16 info1;
+	u16 threshold;
+	u16 peer_id;
+	u8 tid;
+	int i;
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
+
+	len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
+	if (unlikely(skb->len < len)) {
+		ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
+		return;
+	}
+
+	info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
+	info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
+
+	enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
+	num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
+	mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
+	threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT,
+		   "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
+		   info0, info1, enable, num_records, mode, threshold);
+
+	len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
+
+	if (unlikely(skb->len < len)) {
+		ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
+		return;
+	}
+
+	switch (mode) {
+	case HTT_TX_MODE_SWITCH_PUSH:
+	case HTT_TX_MODE_SWITCH_PUSH_PULL:
+		break;
+	default:
+		ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
+			    mode);
+		return;
+	}
+
+	if (!enable)
+		return;
+
+	ar->htt.tx_q_state.enabled = enable;
+	ar->htt.tx_q_state.mode = mode;
+	ar->htt.tx_q_state.num_push_allowed = threshold;
+
+	rcu_read_lock();
+
+	for (i = 0; i < num_records; i++) {
+		record = &resp->tx_mode_switch_ind.records[i];
+		info0 = le16_to_cpu(record->info0);
+		peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
+		tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
+
+		if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
+		    unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
+			ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
+				    peer_id, tid);
+			continue;
+		}
+
+		spin_lock_bh(&ar->data_lock);
+		txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
+		spin_unlock_bh(&ar->data_lock);
+
+		/* It is okay to release the lock and use txq because RCU read
+		 * lock is held.
+		 */
+
+		if (unlikely(!txq)) {
+			ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
+				    peer_id, tid);
+			continue;
 	}
 
-	tasklet_schedule(&htt->rx_replenish_task);
+		spin_lock_bh(&ar->htt.tx_lock);
+		artxq = (void *)txq->drv_priv;
+		artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
+		spin_unlock_bh(&ar->htt.tx_lock);
 }
 
-void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
+	rcu_read_unlock();
+
+	ath10k_mac_tx_push_pending(ar);
+}
+
+void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
+{
+	bool release;
+
+	release = ath10k_htt_t2h_msg_handler(ar, skb);
+
+	/* Free the indication buffer */
+	if (release)
+		dev_kfree_skb_any(skb);
+}
+
+bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
 {
 	struct ath10k_htt *htt = &ar->htt;
 	struct htt_resp *resp = (struct htt_resp *)skb->data;
@@ -1985,8 +2231,7 @@
 	if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
 		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
 			   resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
-		dev_kfree_skb_any(skb);
-		return;
+		return true;
 	}
 	type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
 
@@ -1998,11 +2243,8 @@
 		break;
 	}
 	case HTT_T2H_MSG_TYPE_RX_IND:
-		spin_lock_bh(&htt->rx_ring.lock);
-		__skb_queue_tail(&htt->rx_compl_q, skb);
-		spin_unlock_bh(&htt->rx_ring.lock);
-		tasklet_schedule(&htt->txrx_compl_task);
-		return;
+		ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
+		break;
 	case HTT_T2H_MSG_TYPE_PEER_MAP: {
 		struct htt_peer_map_event ev = {
 			.vdev_id = resp->peer_map.vdev_id,
@@ -2023,28 +2265,31 @@
 		struct htt_tx_done tx_done = {};
 		int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
 
-		tx_done.msdu_id =
-			__le32_to_cpu(resp->mgmt_tx_completion.desc_id);
+		tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
 
 		switch (status) {
 		case HTT_MGMT_TX_STATUS_OK:
-			tx_done.success = true;
+			tx_done.status = HTT_TX_COMPL_STATE_ACK;
 			break;
 		case HTT_MGMT_TX_STATUS_RETRY:
-			tx_done.no_ack = true;
+			tx_done.status = HTT_TX_COMPL_STATE_NOACK;
 			break;
 		case HTT_MGMT_TX_STATUS_DROP:
-			tx_done.discard = true;
+			tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
 			break;
 		}
 
-		ath10k_txrx_tx_unref(htt, &tx_done);
+		status = ath10k_txrx_tx_unref(htt, &tx_done);
+		if (!status) {
+			spin_lock_bh(&htt->tx_lock);
+			ath10k_htt_tx_mgmt_dec_pending(htt);
+			spin_unlock_bh(&htt->tx_lock);
+		}
 		break;
 	}
 	case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
-		skb_queue_tail(&htt->tx_compl_q, skb);
-		tasklet_schedule(&htt->txrx_compl_task);
-		return;
+		ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
+		break;
 	case HTT_T2H_MSG_TYPE_SEC_IND: {
 		struct ath10k *ar = htt->ar;
 		struct htt_security_indication *ev = &resp->security_indication;
@@ -2060,7 +2305,7 @@
 	case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
 		ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
 				skb->data, skb->len);
-		ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
+		atomic_inc(&htt->num_mpdus_ready);
 		break;
 	}
 	case HTT_T2H_MSG_TYPE_TEST:
@@ -2083,12 +2328,10 @@
 		ath10k_htt_rx_delba(ar, resp);
 		break;
 	case HTT_T2H_MSG_TYPE_PKTLOG: {
-		struct ath10k_pktlog_hdr *hdr =
-			(struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload;
-
 		trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
-					sizeof(*hdr) +
-					__le16_to_cpu(hdr->size));
+					skb->len -
+					offsetof(struct htt_resp,
+						 pktlog_msg.payload));
 		break;
 	}
 	case HTT_T2H_MSG_TYPE_RX_FLUSH: {
@@ -2098,22 +2341,41 @@
 		break;
 	}
 	case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
-		spin_lock_bh(&htt->rx_ring.lock);
 		__skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
-		spin_unlock_bh(&htt->rx_ring.lock);
-		tasklet_schedule(&htt->txrx_compl_task);
-		return;
+		return false;
 	}
 	case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
 		break;
-	case HTT_T2H_MSG_TYPE_CHAN_CHANGE:
+	case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
+		u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
+		u32 freq = __le32_to_cpu(resp->chan_change.freq);
+
+		ar->tgt_oper_chan =
+			__ieee80211_get_channel(ar->hw->wiphy, freq);
+		ath10k_dbg(ar, ATH10K_DBG_HTT,
+			   "htt chan change freq %u phymode %s\n",
+			   freq, ath10k_wmi_phymode_str(phymode));
 		break;
+	}
 	case HTT_T2H_MSG_TYPE_AGGR_CONF:
 		break;
+	case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
+		struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
+
+		if (!tx_fetch_ind) {
+			ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
+			break;
+		}
+		skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
+		break;
+	}
+	case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
+		ath10k_htt_rx_tx_fetch_confirm(ar, skb);
+		break;
+	case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
+		ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
+		break;
 	case HTT_T2H_MSG_TYPE_EN_STATS:
-	case HTT_T2H_MSG_TYPE_TX_FETCH_IND:
-	case HTT_T2H_MSG_TYPE_TX_FETCH_CONF:
-	case HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND:
 	default:
 		ath10k_warn(ar, "htt event (%d) not handled\n",
 			    resp->hdr.msg_type);
@@ -2121,34 +2383,116 @@
 				skb->data, skb->len);
 		break;
 	};
+	return true;
+}
+EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
 
-	/* Free the indication buffer */
+void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
+					     struct sk_buff *skb)
+{
+	trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
 	dev_kfree_skb_any(skb);
 }
-EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
+EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
 
-static void ath10k_htt_txrx_compl_task(unsigned long ptr)
+int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
 {
-	struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
-	struct ath10k *ar = htt->ar;
-	struct htt_resp *resp;
+	struct ath10k_htt *htt = &ar->htt;
+	struct htt_tx_done tx_done = {};
+	struct sk_buff_head tx_ind_q;
 	struct sk_buff *skb;
+	unsigned long flags;
+	int quota = 0, done, num_rx_msdus;
+	bool resched_napi = false;
 
-	while ((skb = skb_dequeue(&htt->tx_compl_q))) {
-		ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
-		dev_kfree_skb_any(skb);
+	__skb_queue_head_init(&tx_ind_q);
+
+	/* Since in-ord-ind can deliver more than 1 A-MSDU in single event,
+	 * process it first to utilize full available quota.
+	 */
+	while (quota < budget) {
+		if (skb_queue_empty(&htt->rx_in_ord_compl_q))
+			break;
+
+		skb = __skb_dequeue(&htt->rx_in_ord_compl_q);
+		if (!skb) {
+			resched_napi = true;
+			goto exit;
 	}
 
 	spin_lock_bh(&htt->rx_ring.lock);
-	while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
-		resp = (struct htt_resp *)skb->data;
-		ath10k_htt_rx_handler(htt, &resp->rx_ind);
+		num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb);
+		spin_unlock_bh(&htt->rx_ring.lock);
+		if (num_rx_msdus < 0) {
+			resched_napi = true;
+			goto exit;
+		}
+
 		dev_kfree_skb_any(skb);
+		if (num_rx_msdus > 0)
+			quota += num_rx_msdus;
+
+		if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
+		    !skb_queue_empty(&htt->rx_in_ord_compl_q)) {
+			resched_napi = true;
+			goto exit;
 	}
+	}
+
+	while (quota < budget) {
+		/* no more data to receive */
+		if (!atomic_read(&htt->num_mpdus_ready))
+			break;
+
+		num_rx_msdus = ath10k_htt_rx_handle_amsdu(htt);
+		if (num_rx_msdus < 0) {
+			resched_napi = true;
+			goto exit;
+		}
+
+		quota += num_rx_msdus;
+		atomic_dec(&htt->num_mpdus_ready);
+		if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
+		    atomic_read(&htt->num_mpdus_ready)) {
+			resched_napi = true;
+			goto exit;
+		}
+	}
+
+	/* From NAPI documentation:
+	 *  The napi poll() function may also process TX completions, in which
+	 *  case if it processes the entire TX ring then it should count that
+	 *  work as the rest of the budget.
+	 */
+	if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
+		quota = budget;
 
-	while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
-		ath10k_htt_rx_in_ord_ind(ar, skb);
+	/* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
+	 * From kfifo_get() documentation:
+	 *  Note that with only one concurrent reader and one concurrent writer,
+	 *  you don't need extra locking to use these macro.
+	 */
+	while (kfifo_get(&htt->txdone_fifo, &tx_done))
+		ath10k_txrx_tx_unref(htt, &tx_done);
+
+	ath10k_mac_tx_push_pending(ar);
+
+	spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
+	skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
+	spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
+
+	while ((skb = __skb_dequeue(&tx_ind_q))) {
+		ath10k_htt_rx_tx_fetch_ind(ar, skb);
 		dev_kfree_skb_any(skb);
 	}
-	spin_unlock_bh(&htt->rx_ring.lock);
+
+exit:
+	ath10k_htt_rx_msdu_buff_replenish(htt);
+	/* In case of rx failure or more data to read, report budget
+	 * to reschedule NAPI poll
+	 */
+	done = resched_napi ? budget : quota;
+
+	return done;
 }
+EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/htt_tx.c linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/htt_tx.c
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/htt_tx.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/htt_tx.c	2019-01-22 16:16:25.415263721 +0100
@@ -22,53 +22,185 @@
 #include "txrx.h"
 #include "debug.h"
 
-void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc)
+static u8 ath10k_htt_tx_txq_calc_size(size_t count)
 {
-	if (limit_mgmt_desc)
-		htt->num_pending_mgmt_tx--;
+	int exp;
+	int factor;
 
-	htt->num_pending_tx--;
-	if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
-		ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
+	exp = 0;
+	factor = count >> 7;
+
+	while (factor >= 64 && exp < 4) {
+		factor >>= 3;
+		exp++;
+	}
+
+	if (exp == 4)
+		return 0xff;
+
+	if (count > 0)
+		factor = max(1, factor);
+
+	return SM(exp, HTT_TX_Q_STATE_ENTRY_EXP) |
+	       SM(factor, HTT_TX_Q_STATE_ENTRY_FACTOR);
 }
 
-static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt,
-				      bool limit_mgmt_desc)
+static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
+				       struct ieee80211_txq *txq)
 {
-	spin_lock_bh(&htt->tx_lock);
-	__ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
-	spin_unlock_bh(&htt->tx_lock);
+	struct ath10k *ar = hw->priv;
+	struct ath10k_sta *arsta;
+	struct ath10k_vif *arvif = (void *)txq->vif->drv_priv;
+	unsigned long frame_cnt;
+	unsigned long byte_cnt;
+	int idx;
+	u32 bit;
+	u16 peer_id;
+	u8 tid;
+	u8 count;
+
+	lockdep_assert_held(&ar->htt.tx_lock);
+
+	if (!ar->htt.tx_q_state.enabled)
+		return;
+
+	if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
+		return;
+
+	if (txq->sta) {
+		arsta = (void *)txq->sta->drv_priv;
+		peer_id = arsta->peer_id;
+	} else {
+		peer_id = arvif->peer_id;
+	}
+
+	tid = txq->tid;
+	bit = BIT(peer_id % 32);
+	idx = peer_id / 32;
+
+	ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt);
+	count = ath10k_htt_tx_txq_calc_size(byte_cnt);
+
+	if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
+	    unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
+		ath10k_warn(ar, "refusing to update txq for peer_id %hu tid %hhu due to out of bounds\n",
+			    peer_id, tid);
+		return;
+	}
+
+	ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count;
+	ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit;
+	ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0;
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %hu tid %hhu count %hhu\n",
+		   peer_id, tid, count);
 }
 
-static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt,
-				     bool limit_mgmt_desc, bool is_probe_resp)
+static void __ath10k_htt_tx_txq_sync(struct ath10k *ar)
 {
-	struct ath10k *ar = htt->ar;
-	int ret = 0;
+	u32 seq;
+	size_t size;
 
-	spin_lock_bh(&htt->tx_lock);
+	lockdep_assert_held(&ar->htt.tx_lock);
+
+	if (!ar->htt.tx_q_state.enabled)
+		return;
 
-	if (htt->num_pending_tx >= htt->max_num_pending_tx) {
-		ret = -EBUSY;
-		goto exit;
+	if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
+		return;
+
+	seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq);
+	seq++;
+	ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq);
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update commit seq %u\n",
+		   seq);
+
+	size = sizeof(*ar->htt.tx_q_state.vaddr);
+	dma_sync_single_for_device(ar->dev,
+				   ar->htt.tx_q_state.paddr,
+				   size,
+				   DMA_TO_DEVICE);
 	}
 
-	if (limit_mgmt_desc) {
-		if (is_probe_resp && (htt->num_pending_mgmt_tx >
-		    ar->hw_params.max_probe_resp_desc_thres)) {
-			ret = -EBUSY;
-			goto exit;
+void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
+			      struct ieee80211_txq *txq)
+{
+	struct ath10k *ar = hw->priv;
+
+	spin_lock_bh(&ar->htt.tx_lock);
+	__ath10k_htt_tx_txq_recalc(hw, txq);
+	spin_unlock_bh(&ar->htt.tx_lock);
 		}
-		htt->num_pending_mgmt_tx++;
+
+void ath10k_htt_tx_txq_sync(struct ath10k *ar)
+{
+	spin_lock_bh(&ar->htt.tx_lock);
+	__ath10k_htt_tx_txq_sync(ar);
+	spin_unlock_bh(&ar->htt.tx_lock);
 	}
 
+void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
+			      struct ieee80211_txq *txq)
+{
+	struct ath10k *ar = hw->priv;
+
+	spin_lock_bh(&ar->htt.tx_lock);
+	__ath10k_htt_tx_txq_recalc(hw, txq);
+	__ath10k_htt_tx_txq_sync(ar);
+	spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
+{
+	lockdep_assert_held(&htt->tx_lock);
+
+	htt->num_pending_tx--;
+	if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
+		ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
+}
+
+int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
+{
+	lockdep_assert_held(&htt->tx_lock);
+
+	if (htt->num_pending_tx >= htt->max_num_pending_tx)
+		return -EBUSY;
+
 	htt->num_pending_tx++;
 	if (htt->num_pending_tx == htt->max_num_pending_tx)
 		ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
 
-exit:
-	spin_unlock_bh(&htt->tx_lock);
-	return ret;
+	return 0;
+}
+
+int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
+				   bool is_presp)
+{
+	struct ath10k *ar = htt->ar;
+
+	lockdep_assert_held(&htt->tx_lock);
+
+	if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres)
+		return 0;
+
+	if (is_presp &&
+	    ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx)
+		return -EBUSY;
+
+	htt->num_pending_mgmt_tx++;
+
+	return 0;
+}
+
+void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt)
+{
+	lockdep_assert_held(&htt->tx_lock);
+
+	if (!htt->ar->hw_params.max_probe_resp_desc_thres)
+		return;
+
+	htt->num_pending_mgmt_tx--;
 }
 
 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
@@ -86,6 +218,27 @@
 	return ret;
 }
 
+struct sk_buff *ath10k_htt_tx_find_msdu_by_id(struct ath10k_htt *htt,
+					      u16 msdu_id)
+{
+	struct ath10k *ar;
+	struct sk_buff *ret;
+
+	if (!htt)
+		return NULL;
+
+	ar = htt->ar;
+
+	lockdep_assert_held(&htt->tx_lock);
+
+	ret = (struct sk_buff *)idr_find(&htt->pending_tx, msdu_id);
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx find msdu by msdu_id %s\n",
+		   !ret ? "Failed" : "Success");
+
+	return ret;
+}
+
 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
 {
 	struct ath10k *ar = htt->ar;
@@ -97,6 +250,87 @@
 	idr_remove(&htt->pending_tx, msdu_id);
 }
 
+static void ath10k_htt_tx_free_cont_frag_desc(struct ath10k_htt *htt)
+{
+	size_t size;
+
+	if (!htt->frag_desc.vaddr)
+		return;
+
+	size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
+
+	dma_free_coherent(htt->ar->dev,
+			  size,
+			  htt->frag_desc.vaddr,
+			  htt->frag_desc.paddr);
+}
+
+static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt)
+{
+	struct ath10k *ar = htt->ar;
+	size_t size;
+
+	if (!ar->hw_params.continuous_frag_desc)
+		return 0;
+
+	size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
+	htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
+						  &htt->frag_desc.paddr,
+						  GFP_KERNEL);
+	if (!htt->frag_desc.vaddr) {
+		ath10k_err(ar, "failed to alloc fragment desc memory\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt)
+{
+	struct ath10k *ar = htt->ar;
+	size_t size;
+
+	if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+		      ar->running_fw->fw_file.fw_features))
+		return;
+
+	size = sizeof(*htt->tx_q_state.vaddr);
+
+	dma_unmap_single(ar->dev, htt->tx_q_state.paddr, size, DMA_TO_DEVICE);
+	kfree(htt->tx_q_state.vaddr);
+}
+
+static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt)
+{
+	struct ath10k *ar = htt->ar;
+	size_t size;
+	int ret;
+
+	if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+		      ar->running_fw->fw_file.fw_features))
+		return 0;
+
+	htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS;
+	htt->tx_q_state.num_tids = HTT_TX_Q_STATE_NUM_TIDS;
+	htt->tx_q_state.type = HTT_Q_DEPTH_TYPE_BYTES;
+
+	size = sizeof(*htt->tx_q_state.vaddr);
+	htt->tx_q_state.vaddr = kzalloc(size, GFP_KERNEL);
+	if (!htt->tx_q_state.vaddr)
+		return -ENOMEM;
+
+	htt->tx_q_state.paddr = dma_map_single(ar->dev, htt->tx_q_state.vaddr,
+					       size, DMA_TO_DEVICE);
+	ret = dma_mapping_error(ar->dev, htt->tx_q_state.paddr);
+	if (ret) {
+		ath10k_warn(ar, "failed to dma map tx_q_state: %d\n", ret);
+		kfree(htt->tx_q_state.vaddr);
+		return -EIO;
+	}
+
+	return 0;
+}
+
 int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
 {
 	struct ath10k *ar = htt->ar;
@@ -111,36 +345,49 @@
 	size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
 	htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size,
 						  &htt->txbuf.paddr,
-						  GFP_DMA);
+						  GFP_KERNEL);
 	if (!htt->txbuf.vaddr) {
 		ath10k_err(ar, "failed to alloc tx buffer\n");
 		ret = -ENOMEM;
 		goto free_idr_pending_tx;
 	}
 
-	if (!ar->hw_params.continuous_frag_desc)
-		goto skip_frag_desc_alloc;
-
-	size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
-	htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
-						  &htt->frag_desc.paddr,
-						  GFP_DMA);
-	if (!htt->frag_desc.vaddr) {
-		ath10k_warn(ar, "failed to alloc fragment desc memory\n");
-		ret = -ENOMEM;
+	ret = ath10k_htt_tx_alloc_cont_frag_desc(htt);
+	if (ret) {
+		ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret);
 		goto free_txbuf;
 	}
 
-skip_frag_desc_alloc:
+	ret = ath10k_htt_tx_alloc_txq(htt);
+	if (ret) {
+		ath10k_err(ar, "failed to alloc txq: %d\n", ret);
+		goto free_frag_desc;
+	}
+
+	size = roundup_pow_of_two(htt->max_num_pending_tx);
+	ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL);
+	if (ret) {
+		ath10k_err(ar, "failed to alloc txdone fifo: %d\n", ret);
+		goto free_txq;
+	}
+
 	return 0;
 
+free_txq:
+	ath10k_htt_tx_free_txq(htt);
+
+free_frag_desc:
+	ath10k_htt_tx_free_cont_frag_desc(htt);
+
 free_txbuf:
 	size = htt->max_num_pending_tx *
 			  sizeof(struct ath10k_htt_txbuf);
 	dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr,
 			  htt->txbuf.paddr);
+
 free_idr_pending_tx:
 	idr_destroy(&htt->pending_tx);
+
 	return ret;
 }
 
@@ -152,8 +399,8 @@
 
 	ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
 
-	tx_done.discard = 1;
 	tx_done.msdu_id = msdu_id;
+	tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
 
 	ath10k_txrx_tx_unref(htt, &tx_done);
 
@@ -174,12 +421,10 @@
 				  htt->txbuf.paddr);
 	}
 
-	if (htt->frag_desc.vaddr) {
-		size = htt->max_num_pending_tx *
-				  sizeof(struct htt_msdu_ext_desc);
-		dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr,
-				  htt->frag_desc.paddr);
-	}
+	ath10k_htt_tx_free_txq(htt);
+	ath10k_htt_tx_free_cont_frag_desc(htt);
+	WARN_ON(!kfifo_is_empty(&htt->txdone_fifo));
+	kfifo_free(&htt->txdone_fifo);
 }
 
 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
@@ -263,12 +508,103 @@
 	return 0;
 }
 
+#ifdef CONFIG_ATH10K_SNOC
+static inline
+void ath10k_htt_fill_rx_ring_cfg(struct ath10k_htt *htt,
+				 struct htt_rx_ring_setup_ring *ring)
+{
+	ring->fw_idx_shadow_reg_paddr_low =
+		__cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
+	ring->fw_idx_shadow_reg_paddr_high = 0;
+	ring->rx_ring_base_paddr_low = __cpu_to_le32(htt->rx_ring.base_paddr);
+	ring->rx_ring_base_paddr_high = upper_32_bits(htt->rx_ring.base_paddr) &
+						      HTT_WCN3990_PADDR_MASK;
+}
+
+static inline void ath10k_htt_fill_frags(struct htt_data_tx_desc_frag *frags,
+					 struct sk_buff *msdu,
+					 struct ath10k_skb_cb *skb_cb)
+{
+	frags[0].tword_addr.paddr_lo = __cpu_to_le32(skb_cb->paddr);
+	frags[0].tword_addr.paddr_hi = upper_32_bits(skb_cb->paddr) &
+					HTT_WCN3990_PADDR_MASK;
+	frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
+	frags[1].tword_addr.paddr_lo = 0;
+	frags[1].tword_addr.paddr_hi = 0;
+	frags[1].tword_addr.len_16 = 0;
+}
+
+static inline void ath10k_htt_fill_frag_desc(struct ath10k_htt_txbuf *txbuf,
+					     dma_addr_t frags_paddr)
+{
+	txbuf->cmd_tx.frags_paddr_lo = __cpu_to_le32(frags_paddr);
+	txbuf->cmd_tx.frags_paddr_hi = upper_32_bits(frags_paddr) &
+					HTT_WCN3990_PADDR_MASK;
+}
+
+static inline
+void ath10k_htt_set_bank_base_addr(struct htt_frag_desc_bank_cfg *cfg,
+				   dma_addr_t paddr)
+{
+	cfg->bank_base_addrs[0].low = __cpu_to_le32(paddr);
+	cfg->bank_base_addrs[0].high = upper_32_bits(paddr) &
+						HTT_WCN3990_PADDR_MASK;
+}
+
+static inline
+u32 ath10k_htt_get_paddr_hi(dma_addr_t paddr)
+{
+	return(upper_32_bits(paddr) &
+			     HTT_WCN3990_PADDR_MASK);
+}
+#else
+static inline void ath10k_htt_fill_frags(struct htt_data_tx_desc_frag *frags,
+					 struct sk_buff *msdu,
+					 struct ath10k_skb_cb *skb_cb)
+{
+	frags[0].dword_addr.paddr = __cpu_to_le32(skb_cb->paddr);
+	frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
+	frags[1].dword_addr.paddr = 0;
+	frags[1].dword_addr.len = 0;
+}
+
+static inline void ath10k_htt_fill_frag_desc(struct ath10k_htt_txbuf *txbuf,
+					     dma_addr_t frags_paddr)
+{
+	txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
+}
+
+static inline
+void ath10k_htt_set_bank_base_addr(struct htt_frag_desc_bank_cfg *cfg,
+				   dma_addr_t paddr)
+{
+	cfg->bank_base_addrs[0] = __cpu_to_le32(paddr);
+}
+
+static inline
+void ath10k_htt_fill_rx_ring_cfg(struct ath10k_htt *htt,
+				 struct htt_rx_ring_setup_ring *ring)
+{
+	ring->fw_idx_shadow_reg_paddr =
+			__cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
+	ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
+}
+
+static inline
+u32 ath10k_htt_get_paddr_hi(dma_addr_t paddr)
+{
+	return 0;
+}
+#endif
+
 int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
 {
 	struct ath10k *ar = htt->ar;
 	struct sk_buff *skb;
 	struct htt_cmd *cmd;
+	struct htt_frag_desc_bank_cfg *cfg;
 	int ret, size;
+	u8 info;
 
 	if (!ar->hw_params.continuous_frag_desc)
 		return 0;
@@ -286,14 +622,31 @@
 	skb_put(skb, size);
 	cmd = (struct htt_cmd *)skb->data;
 	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
-	cmd->frag_desc_bank_cfg.info = 0;
-	cmd->frag_desc_bank_cfg.num_banks = 1;
-	cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc);
-	cmd->frag_desc_bank_cfg.bank_base_addrs[0] =
-				__cpu_to_le32(htt->frag_desc.paddr);
-	cmd->frag_desc_bank_cfg.bank_id[0].bank_min_id = 0;
-	cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id =
-				__cpu_to_le16(htt->max_num_pending_tx - 1);
+
+	info = 0;
+	info |= SM(htt->tx_q_state.type,
+		   HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
+
+	if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+		     ar->running_fw->fw_file.fw_features))
+		info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
+
+	cfg = &cmd->frag_desc_bank_cfg;
+	cfg->info = info;
+	cfg->num_banks = 1;
+	cfg->desc_size = sizeof(struct htt_msdu_ext_desc);
+	ath10k_htt_set_bank_base_addr(cfg, htt->frag_desc.paddr);
+	cfg->bank_id[0].bank_min_id = 0;
+	cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -
+						    1);
+
+	cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);
+	cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);
+	cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);
+	cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;
+	cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");
 
 	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 	if (ret) {
@@ -360,9 +713,7 @@
 
 	fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
 
-	ring->fw_idx_shadow_reg_paddr =
-		__cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
-	ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
+	ath10k_htt_fill_rx_ring_cfg(htt, ring);
 	ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
 	ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
 	ring->flags = __cpu_to_le16(flags);
@@ -439,6 +790,86 @@
 	return 0;
 }
 
+int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
+			     __le32 token,
+			     __le16 fetch_seq_num,
+			     struct htt_tx_fetch_record *records,
+			     size_t num_records)
+{
+	struct sk_buff *skb;
+	struct htt_cmd *cmd;
+	const u16 resp_id = 0;
+	int len = 0;
+	int ret;
+
+	/* Response IDs are echo-ed back only for host driver convienence
+	 * purposes. They aren't used for anything in the driver yet so use 0.
+	 */
+
+	len += sizeof(cmd->hdr);
+	len += sizeof(cmd->tx_fetch_resp);
+	len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records;
+
+	skb = ath10k_htc_alloc_skb(ar, len);
+	if (!skb)
+		return -ENOMEM;
+
+	skb_put(skb, len);
+	cmd = (struct htt_cmd *)skb->data;
+	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP;
+	cmd->tx_fetch_resp.resp_id = cpu_to_le16(resp_id);
+	cmd->tx_fetch_resp.fetch_seq_num = fetch_seq_num;
+	cmd->tx_fetch_resp.num_records = cpu_to_le16(num_records);
+	cmd->tx_fetch_resp.token = token;
+
+	memcpy(cmd->tx_fetch_resp.records, records,
+	       sizeof(records[0]) * num_records);
+
+	ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb);
+	if (ret) {
+		ath10k_warn(ar, "failed to submit htc command: %d\n", ret);
+		goto err_free_skb;
+	}
+
+	return 0;
+
+err_free_skb:
+	dev_kfree_skb_any(skb);
+
+	return ret;
+}
+
+static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+	struct ath10k_vif *arvif;
+
+	if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
+		return ar->scan.vdev_id;
+	} else if (cb->vif) {
+		arvif = (void *)cb->vif->drv_priv;
+		return arvif->vdev_id;
+	} else if (ar->monitor_started) {
+		return ar->monitor_vdev_id;
+	} else {
+		return 0;
+	}
+}
+
+static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth)
+{
+	struct ieee80211_hdr *hdr = (void *)skb->data;
+	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+
+	if (!is_eth && ieee80211_is_mgmt(hdr->frame_control))
+		return HTT_DATA_TX_EXT_TID_MGMT;
+	else if (cb->flags & ATH10K_SKB_F_QOS)
+		return skb->priority % IEEE80211_QOS_CTL_TID_MASK;
+	else
+		return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+}
+
 int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
 {
 	struct ath10k *ar = htt->ar;
@@ -446,25 +877,11 @@
 	struct sk_buff *txdesc = NULL;
 	struct htt_cmd *cmd;
 	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
-	u8 vdev_id = skb_cb->vdev_id;
+	u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
 	int len = 0;
 	int msdu_id = -1;
 	int res;
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
-	bool limit_mgmt_desc = false;
-	bool is_probe_resp = false;
-
-	if (ar->hw_params.max_probe_resp_desc_thres) {
-		limit_mgmt_desc = true;
-
-		if (ieee80211_is_probe_resp(hdr->frame_control))
-			is_probe_resp = true;
-	}
-
-	res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
-
-	if (res)
-		goto err;
 
 	len += sizeof(cmd->hdr);
 	len += sizeof(cmd->mgmt_tx);
@@ -473,10 +890,17 @@
 	res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
 	spin_unlock_bh(&htt->tx_lock);
 	if (res < 0)
-		goto err_tx_dec;
+		goto err;
 
 	msdu_id = res;
 
+	if ((ieee80211_is_action(hdr->frame_control) ||
+	     ieee80211_is_deauth(hdr->frame_control) ||
+	     ieee80211_is_disassoc(hdr->frame_control)) &&
+	     ieee80211_has_protected(hdr->frame_control)) {
+		skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+	}
+
 	txdesc = ath10k_htc_alloc_skb(ar, len);
 	if (!txdesc) {
 		res = -ENOMEM;
@@ -503,8 +927,6 @@
 	memcpy(cmd->mgmt_tx.hdr, msdu->data,
 	       min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
 
-	skb_cb->htt.txbuf = NULL;
-
 	res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
 	if (res)
 		goto err_unmap_msdu;
@@ -519,56 +941,46 @@
 	spin_lock_bh(&htt->tx_lock);
 	ath10k_htt_tx_free_msdu_id(htt, msdu_id);
 	spin_unlock_bh(&htt->tx_lock);
-err_tx_dec:
-	ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
 err:
 	return res;
 }
 
-int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
+int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
+		  struct sk_buff *msdu)
 {
 	struct ath10k *ar = htt->ar;
 	struct device *dev = ar->dev;
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
 	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
 	struct ath10k_hif_sg_item sg_items[2];
+	struct ath10k_htt_txbuf *txbuf;
 	struct htt_data_tx_desc_frag *frags;
-	u8 vdev_id = skb_cb->vdev_id;
-	u8 tid = skb_cb->htt.tid;
+	bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
+	u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
+	u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
 	int prefetch_len;
 	int res;
 	u8 flags0 = 0;
 	u16 msdu_id, flags1 = 0;
-	u32 frags_paddr = 0;
+	u16 freq = 0;
+	dma_addr_t frags_paddr = 0;
+	u32 txbuf_paddr;
 	struct htt_msdu_ext_desc *ext_desc = NULL;
-	bool limit_mgmt_desc = false;
-	bool is_probe_resp = false;
-
-	if (unlikely(ieee80211_is_mgmt(hdr->frame_control)) &&
-	    ar->hw_params.max_probe_resp_desc_thres) {
-		limit_mgmt_desc = true;
-
-		if (ieee80211_is_probe_resp(hdr->frame_control))
-			is_probe_resp = true;
-	}
-
-	res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
-	if (res)
-		goto err;
 
 	spin_lock_bh(&htt->tx_lock);
 	res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
 	spin_unlock_bh(&htt->tx_lock);
 	if (res < 0)
-		goto err_tx_dec;
+		goto err;
 
 	msdu_id = res;
 
 	prefetch_len = min(htt->prefetch_len, msdu->len);
 	prefetch_len = roundup(prefetch_len, 4);
 
-	skb_cb->htt.txbuf = &htt->txbuf.vaddr[msdu_id];
-	skb_cb->htt.txbuf_paddr = htt->txbuf.paddr +
+	txbuf = &htt->txbuf.vaddr[msdu_id];
+	txbuf_paddr = htt->txbuf.paddr +
 		(sizeof(struct ath10k_htt_txbuf) * msdu_id);
 
 	if ((ieee80211_is_action(hdr->frame_control) ||
@@ -576,8 +988,8 @@
 	     ieee80211_is_disassoc(hdr->frame_control)) &&
 	     ieee80211_has_protected(hdr->frame_control)) {
 		skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
-	} else if (!skb_cb->htt.nohwcrypt &&
-		   skb_cb->txmode == ATH10K_HW_TXRX_RAW &&
+	} else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
+		   txmode == ATH10K_HW_TXRX_RAW &&
 		   ieee80211_has_protected(hdr->frame_control)) {
 		skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
 	}
@@ -590,7 +1002,10 @@
 		goto err_free_msdu_id;
 	}
 
-	switch (skb_cb->txmode) {
+	if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
+		freq = ar->scan.roc_freq;
+
+	switch (txmode) {
 	case ATH10K_HW_TXRX_RAW:
 	case ATH10K_HW_TXRX_NATIVE_WIFI:
 		flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
@@ -604,22 +1019,18 @@
 			ext_desc = &htt->frag_desc.vaddr[msdu_id];
 			frags[0].tword_addr.paddr_lo =
 				__cpu_to_le32(skb_cb->paddr);
-			frags[0].tword_addr.paddr_hi = 0;
+			frags[0].tword_addr.paddr_hi =
+				ath10k_htt_get_paddr_hi(skb_cb->paddr);
 			frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
 
 			frags_paddr =  htt->frag_desc.paddr +
 				(sizeof(struct htt_msdu_ext_desc) * msdu_id);
 		} else {
-			frags = skb_cb->htt.txbuf->frags;
-			frags[0].dword_addr.paddr =
-				__cpu_to_le32(skb_cb->paddr);
-			frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
-			frags[1].dword_addr.paddr = 0;
-			frags[1].dword_addr.len = 0;
-
-			frags_paddr = skb_cb->htt.txbuf_paddr;
+			frags = txbuf->frags;
+			ath10k_htt_fill_frags(frags, msdu, skb_cb);
+			frags_paddr = txbuf_paddr;
 		}
-		flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+		flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
 		break;
 	case ATH10K_HW_TXRX_MGMT:
 		flags0 |= SM(ATH10K_HW_TXRX_MGMT,
@@ -646,17 +1057,13 @@
 	 * avoid extra memory allocations, compress data structures and thus
 	 * improve performance. */
 
-	skb_cb->htt.txbuf->htc_hdr.eid = htt->eid;
-	skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16(
-			sizeof(skb_cb->htt.txbuf->cmd_hdr) +
-			sizeof(skb_cb->htt.txbuf->cmd_tx) +
+	txbuf->htc_hdr.eid = htt->eid;
+	txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +
+					   sizeof(txbuf->cmd_tx) +
 			prefetch_len);
-	skb_cb->htt.txbuf->htc_hdr.flags = 0;
-
-	if (skb_cb->htt.nohwcrypt)
-		flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
+	txbuf->htc_hdr.flags = 0;
 
-	if (!skb_cb->is_protected)
+	if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
 		flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
 
 	flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
@@ -665,8 +1072,14 @@
 	    !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
 		flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
 		flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
-		if (ar->hw_params.continuous_frag_desc)
+		if (ar->hw_params.continuous_frag_desc) {
 			ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
+			if (QCA_REV_WCN3990(ar)) {
+				memset(ext_desc->tso_flag, 0,
+				       sizeof(ext_desc->tso_flag));
+				ext_desc->tso_flag[3] |= HTT_TX_CHECKSUM_ENABLE;
+			}
+		}
 	}
 
 	/* Prevent firmware from sending up tx inspection requests. There's
@@ -675,20 +1088,29 @@
 	 */
 	flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
 
-	skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
-	skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
-	skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
-	skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
-	skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
-	skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
-	skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID);
-	skb_cb->htt.txbuf->cmd_tx.freq = __cpu_to_le16(skb_cb->htt.freq);
+	txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
+	txbuf->cmd_tx.flags0 = flags0;
+	txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
+	txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
+	txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
+
+	/* fill fragment descriptor */
+	ath10k_htt_fill_frag_desc(txbuf, frags_paddr);
+	if (ath10k_mac_tx_frm_has_freq(ar)) {
+		txbuf->cmd_tx.offchan_tx.peerid =
+				__cpu_to_le16(HTT_INVALID_PEERID);
+		txbuf->cmd_tx.offchan_tx.freq =
+				__cpu_to_le16(freq);
+	} else {
+		txbuf->cmd_tx.peerid =
+				__cpu_to_le32(HTT_INVALID_PEERID);
+	}
 
 	trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
 	ath10k_dbg(ar, ATH10K_DBG_HTT,
-		   "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
-		   flags0, flags1, msdu->len, msdu_id, frags_paddr,
-		   (u32)skb_cb->paddr, vdev_id, tid, skb_cb->htt.freq);
+		   "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n",
+		   flags0, flags1, msdu->len, msdu_id, &frags_paddr,
+		   &skb_cb->paddr, vdev_id, tid, freq);
 	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
 			msdu->data, msdu->len);
 	trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
@@ -696,12 +1118,12 @@
 
 	sg_items[0].transfer_id = 0;
 	sg_items[0].transfer_context = NULL;
-	sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr;
-	sg_items[0].paddr = skb_cb->htt.txbuf_paddr +
-			    sizeof(skb_cb->htt.txbuf->frags);
-	sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) +
-			  sizeof(skb_cb->htt.txbuf->cmd_hdr) +
-			  sizeof(skb_cb->htt.txbuf->cmd_tx);
+	sg_items[0].vaddr = &txbuf->htc_hdr;
+	sg_items[0].paddr = txbuf_paddr +
+			    sizeof(txbuf->frags);
+	sg_items[0].len = sizeof(txbuf->htc_hdr) +
+			  sizeof(txbuf->cmd_hdr) +
+			  sizeof(txbuf->cmd_tx);
 
 	sg_items[1].transfer_id = 0;
 	sg_items[1].transfer_context = NULL;
@@ -720,11 +1142,7 @@
 err_unmap_msdu:
 	dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
 err_free_msdu_id:
-	spin_lock_bh(&htt->tx_lock);
 	ath10k_htt_tx_free_msdu_id(htt, msdu_id);
-	spin_unlock_bh(&htt->tx_lock);
-err_tx_dec:
-	ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
 err:
 	return res;
 }
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/hw.c linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/hw.c
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/hw.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/hw.c	2019-10-29 09:26:24.457211144 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2015, 2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -19,7 +19,6 @@
 #include "hw.h"
 
 const struct ath10k_hw_regs qca988x_regs = {
-	.rtc_state_cold_reset_mask	= 0x00000400,
 	.rtc_soc_base_address		= 0x00004000,
 	.rtc_wmac_base_address		= 0x00005000,
 	.soc_core_base_address		= 0x00009000,
@@ -46,7 +45,6 @@
 };
 
 const struct ath10k_hw_regs qca6174_regs = {
-	.rtc_state_cold_reset_mask		= 0x00002000,
 	.rtc_soc_base_address			= 0x00000800,
 	.rtc_wmac_base_address			= 0x00001000,
 	.soc_core_base_address			= 0x0003a000,
@@ -73,7 +71,6 @@
 };
 
 const struct ath10k_hw_regs qca99x0_regs = {
-	.rtc_state_cold_reset_mask		= 0x00000400,
 	.rtc_soc_base_address			= 0x00080000,
 	.rtc_wmac_base_address			= 0x00000000,
 	.soc_core_base_address			= 0x00082000,
@@ -88,7 +85,7 @@
 	.ce7_base_address			= 0x0004bc00,
 	/* Note: qca99x0 supports upto 12 Copy Engines. Other than address of
 	 * CE0 and CE1 no other copy engine is directly referred in the code.
-	 * It is not really neccessary to assign address for newly supported
+	 * It is not really necessary to assign address for newly supported
 	 * CEs in this address table.
 	 *	Copy Engine		Address
 	 *	CE8			0x0004c000
@@ -109,7 +106,368 @@
 	.pcie_intr_clr_address			= 0x00000010,
 };
 
+const struct ath10k_hw_regs qca4019_regs = {
+	.rtc_soc_base_address                   = 0x00080000,
+	.soc_core_base_address                  = 0x00082000,
+	.ce_wrapper_base_address                = 0x0004d000,
+	.ce0_base_address                       = 0x0004a000,
+	.ce1_base_address                       = 0x0004a400,
+	.ce2_base_address                       = 0x0004a800,
+	.ce3_base_address                       = 0x0004ac00,
+	.ce4_base_address                       = 0x0004b000,
+	.ce5_base_address                       = 0x0004b400,
+	.ce6_base_address                       = 0x0004b800,
+	.ce7_base_address                       = 0x0004bc00,
+	/* qca4019 supports upto 12 copy engines. Since base address
+	 * of ce8 to ce11 are not directly referred in the code,
+	 * no need have them in separate members in this table.
+	 *      Copy Engine             Address
+	 *      CE8                     0x0004c000
+	 *      CE9                     0x0004c400
+	 *      CE10                    0x0004c800
+	 *      CE11                    0x0004cc00
+	 */
+	.soc_reset_control_si0_rst_mask         = 0x00000001,
+	.soc_reset_control_ce_rst_mask          = 0x00000100,
+	.soc_chip_id_address                    = 0x000000ec,
+	.fw_indicator_address                   = 0x0004f00c,
+	.ce_wrap_intr_sum_host_msi_lsb          = 0x0000000c,
+	.ce_wrap_intr_sum_host_msi_mask         = 0x00fff000,
+	.pcie_intr_fw_mask                      = 0x00100000,
+	.pcie_intr_ce_mask_all                  = 0x000fff00,
+	.pcie_intr_clr_address                  = 0x00000010,
+};
+
+const struct ath10k_hw_regs wcn3990_regs = {
+	.rtc_soc_base_address			= 0x00000000,
+	.rtc_wmac_base_address			= 0x00000000,
+	.soc_core_base_address			= 0x00000000,
+	.ce_wrapper_base_address		= 0x0024C000,
+	.soc_global_reset_address		= 0x00000008,
+	.ce0_base_address			= 0x00240000,
+	.ce1_base_address			= 0x00241000,
+	.ce2_base_address			= 0x00242000,
+	.ce3_base_address			= 0x00243000,
+	.ce4_base_address			= 0x00244000,
+	.ce5_base_address			= 0x00245000,
+	.ce6_base_address			= 0x00246000,
+	.ce7_base_address			= 0x00247000,
+	.ce8_base_address			= 0x00248000,
+	.ce9_base_address			= 0x00249000,
+	.ce10_base_address			= 0x0024A000,
+	.ce11_base_address			= 0x0024B000,
+	.soc_chip_id_address			= 0x000000f0,
+	.soc_reset_control_si0_rst_mask		= 0x00000001,
+	.soc_reset_control_ce_rst_mask		= 0x00000100,
+	.ce_wrap_intr_sum_host_msi_lsb		= 0x0000000c,
+	.ce_wrap_intr_sum_host_msi_mask		= 0x00fff000,
+	.pcie_intr_fw_mask			= 0x00100000,
+};
+
+static unsigned int
+ath10k_set_ring_byte(unsigned int offset,
+		     struct ath10k_hw_ce_regs_addr_map *addr_map)
+{
+	return (((0 | (offset)) << addr_map->lsb) & addr_map->mask);
+}
+
+static unsigned int
+ath10k_get_ring_byte(unsigned int offset,
+		     struct ath10k_hw_ce_regs_addr_map *addr_map)
+{
+	return (((offset) & addr_map->mask) >> (addr_map->lsb));
+}
+
+struct ath10k_hw_ce_regs_addr_map wcn3990_src_ring = {
+	.msb	= 0x00000010,
+	.lsb	= 0x00000010,
+	.mask	= 0x00020000,
+	.set	= &ath10k_set_ring_byte,
+	.get	= &ath10k_get_ring_byte,
+};
+
+struct ath10k_hw_ce_regs_addr_map wcn3990_dst_ring = {
+	.msb	= 0x00000012,
+	.lsb	= 0x00000012,
+	.mask	= 0x00040000,
+	.set	= &ath10k_set_ring_byte,
+	.get	= &ath10k_get_ring_byte,
+};
+
+struct ath10k_hw_ce_regs_addr_map wcn3990_dmax = {
+	.msb	= 0x00000000,
+	.lsb	= 0x00000000,
+	.mask	= 0x0000ffff,
+	.set	= &ath10k_set_ring_byte,
+	.get	= &ath10k_get_ring_byte,
+};
+
+struct ath10k_hw_ce_ctrl1 wcn3990_ctrl1 = {
+	.addr		= 0x00000018,
+	.src_ring	= &wcn3990_src_ring,
+	.dst_ring	= &wcn3990_dst_ring,
+	.dmax		= &wcn3990_dmax,
+};
+
+struct ath10k_hw_ce_regs_addr_map wcn3990_host_ie_cc = {
+	.mask	= 0x00000001,
+};
+
+struct ath10k_hw_ce_host_ie wcn3990_host_ie = {
+	.copy_complete	= &wcn3990_host_ie_cc,
+};
+
+struct ath10k_hw_ce_host_wm_regs wcn3990_wm_reg = {
+	.dstr_lmask	= 0x00000010,
+	.dstr_hmask	= 0x00000008,
+	.srcr_lmask	= 0x00000004,
+	.srcr_hmask	= 0x00000002,
+	.cc_mask	= 0x00000001,
+	.wm_mask	= 0x0000001E,
+	.addr		= 0x00000030,
+};
+
+struct ath10k_hw_ce_misc_regs wcn3990_misc_reg = {
+	.axi_err	= 0x00000100,
+	.dstr_add_err	= 0x00000200,
+	.srcr_len_err	= 0x00000100,
+	.dstr_mlen_vio	= 0x00000080,
+	.dstr_overflow	= 0x00000040,
+	.srcr_overflow	= 0x00000020,
+	.err_mask	= 0x000003E0,
+	.addr		= 0x00000038,
+};
+
+struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_low = {
+	.msb	= 0x00000000,
+	.lsb	= 0x00000010,
+	.mask	= 0xffff0000,
+	.set	= &ath10k_set_ring_byte,
+	.get	= &ath10k_get_ring_byte,
+};
+
+struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_high = {
+	.msb	= 0x0000000f,
+	.lsb	= 0x00000000,
+	.mask	= 0x0000ffff,
+	.set	= &ath10k_set_ring_byte,
+	.get	= &ath10k_get_ring_byte,
+};
+
+struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_src_ring = {
+	.addr		= 0x0000004c,
+	.low_rst	= 0x00000000,
+	.high_rst	= 0x00000000,
+	.wm_low		= &wcn3990_src_wm_low,
+	.wm_high	= &wcn3990_src_wm_high,
+};
+
+struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_low = {
+	.lsb	= 0x00000010,
+	.mask	= 0xffff0000,
+	.set	= &ath10k_set_ring_byte,
+};
+
+struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_high = {
+	.msb	= 0x0000000f,
+	.lsb	= 0x00000000,
+	.mask	= 0x0000ffff,
+	.set	= &ath10k_set_ring_byte,
+	.get	= &ath10k_get_ring_byte,
+};
+
+struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = {
+	.addr		= 0x00000050,
+	.low_rst	= 0x00000000,
+	.high_rst	= 0x00000000,
+	.wm_low		= &wcn3990_dst_wm_low,
+	.wm_high	= &wcn3990_dst_wm_high,
+};
+
+static struct ath10k_hw_ce_ctrl1_upd wcn3990_ctrl1_upd = {
+	.shift = 19,
+	.mask = 0x00080000,
+	.enable = 0x00000000,
+};
+
+struct ath10k_hw_ce_regs wcn3990_ce_regs = {
+	.sr_base_addr		= 0x00000000,
+	.sr_size_addr		= 0x00000008,
+	.dr_base_addr		= 0x0000000c,
+	.dr_size_addr		= 0x00000014,
+	.misc_ie_addr		= 0x00000034,
+	.sr_wr_index_addr	= 0x0000003c,
+	.dst_wr_index_addr	= 0x00000040,
+	.current_srri_addr	= 0x00000044,
+	.current_drri_addr	= 0x00000048,
+	.ddr_addr_for_rri_low	= 0x00000004,
+	.ddr_addr_for_rri_high	= 0x00000008,
+	.ce_rri_low		= 0x0024C004,
+	.ce_rri_high		= 0x0024C008,
+	.host_ie_addr		= 0x0000002c,
+	.ctrl1_regs		= &wcn3990_ctrl1,
+	.host_ie		= &wcn3990_host_ie,
+	.wm_regs		= &wcn3990_wm_reg,
+	.misc_regs		= &wcn3990_misc_reg,
+	.wm_srcr		= &wcn3990_wm_src_ring,
+	.wm_dstr		= &wcn3990_wm_dst_ring,
+	.upd			= &wcn3990_ctrl1_upd,
+};
+
+struct ath10k_hw_ce_regs_addr_map qcax_src_ring = {
+	.msb	= 0x00000010,
+	.lsb	= 0x00000010,
+	.mask	= 0x00010000,
+	.set	= &ath10k_set_ring_byte,
+};
+
+struct ath10k_hw_ce_regs_addr_map qcax_dst_ring = {
+	.msb	= 0x00000011,
+	.lsb	= 0x00000011,
+	.mask	= 0x00020000,
+	.set	= &ath10k_set_ring_byte,
+	.get	= &ath10k_get_ring_byte,
+};
+
+struct ath10k_hw_ce_regs_addr_map qcax_dmax = {
+	.msb	= 0x0000000f,
+	.lsb	= 0x00000000,
+	.mask	= 0x0000ffff,
+	.set	= &ath10k_set_ring_byte,
+	.get    = &ath10k_get_ring_byte,
+};
+
+struct ath10k_hw_ce_ctrl1 qcax_ctrl1 = {
+	.addr		= 0x00000010,
+	.hw_mask	= 0x0007ffff,
+	.sw_mask	= 0x0007ffff,
+	.hw_wr_mask	= 0x00000000,
+	.sw_wr_mask	= 0x0007ffff,
+	.reset_mask	= 0xffffffff,
+	.reset		= 0x00000080,
+	.src_ring	= &qcax_src_ring,
+	.dst_ring	= &qcax_dst_ring,
+	.dmax		= &qcax_dmax,
+};
+
+struct ath10k_hw_ce_regs_addr_map qcax_cmd_halt_status = {
+	.msb	= 0x00000003,
+	.lsb	= 0x00000003,
+	.mask	= 0x00000008,
+	.set	= &ath10k_set_ring_byte,
+	.get	= &ath10k_get_ring_byte,
+};
+
+struct ath10k_hw_ce_cmd_halt qcax_cmd_halt = {
+	.msb		= 0x00000000,
+	.mask		= 0x00000001,
+	.status_reset	= 0x00000000,
+	.status		= &qcax_cmd_halt_status,
+};
+
+struct ath10k_hw_ce_regs_addr_map qcax_host_ie_cc = {
+	.msb	= 0x00000000,
+	.lsb	= 0x00000000,
+	.mask	= 0x00000001,
+	.set	= &ath10k_set_ring_byte,
+	.get	= &ath10k_get_ring_byte,
+};
+
+struct ath10k_hw_ce_host_ie qcax_host_ie = {
+	.copy_complete_reset	= 0x00000000,
+	.copy_complete		= &qcax_host_ie_cc,
+};
+
+struct ath10k_hw_ce_host_wm_regs qcax_wm_reg = {
+	.dstr_lmask	= 0x00000010,
+	.dstr_hmask	= 0x00000008,
+	.srcr_lmask	= 0x00000004,
+	.srcr_hmask	= 0x00000002,
+	.cc_mask	= 0x00000001,
+	.wm_mask	= 0x0000001E,
+	.addr		= 0x00000030,
+};
+
+struct ath10k_hw_ce_misc_regs qcax_misc_reg = {
+	.axi_err	= 0x00000400,
+	.dstr_add_err	= 0x00000200,
+	.srcr_len_err	= 0x00000100,
+	.dstr_mlen_vio	= 0x00000080,
+	.dstr_overflow	= 0x00000040,
+	.srcr_overflow	= 0x00000020,
+	.err_mask	= 0x000007E0,
+	.addr		= 0x00000038,
+};
+
+struct ath10k_hw_ce_regs_addr_map qcax_src_wm_low = {
+	.msb    = 0x0000001f,
+	.lsb	= 0x00000010,
+	.mask	= 0xffff0000,
+	.set	= &ath10k_set_ring_byte,
+	.get	= &ath10k_get_ring_byte,
+};
+
+struct ath10k_hw_ce_regs_addr_map qcax_src_wm_high = {
+	.msb	= 0x0000000f,
+	.lsb	= 0x00000000,
+	.mask	= 0x0000ffff,
+	.set	= &ath10k_set_ring_byte,
+	.get	= &ath10k_get_ring_byte,
+};
+
+struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_src_ring = {
+	.addr		= 0x0000004c,
+	.low_rst	= 0x00000000,
+	.high_rst	= 0x00000000,
+	.wm_low		= &qcax_src_wm_low,
+	.wm_high        = &qcax_src_wm_high,
+};
+
+struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_low = {
+	.lsb	= 0x00000010,
+	.mask	= 0xffff0000,
+	.set	= &ath10k_set_ring_byte,
+};
+
+struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_high = {
+	.msb	= 0x0000000f,
+	.lsb	= 0x00000000,
+	.mask	= 0x0000ffff,
+	.set	= &ath10k_set_ring_byte,
+	.get	= &ath10k_get_ring_byte,
+};
+
+struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_dst_ring = {
+	.addr		= 0x00000050,
+	.low_rst	= 0x00000000,
+	.high_rst	= 0x00000000,
+	.wm_low		= &qcax_dst_wm_low,
+	.wm_high	= &qcax_dst_wm_high,
+};
+
+struct ath10k_hw_ce_regs qcax_ce_regs = {
+	.sr_base_addr		= 0x00000000,
+	.sr_size_addr		= 0x00000004,
+	.dr_base_addr		= 0x00000008,
+	.dr_size_addr		= 0x0000000c,
+	.ce_cmd_addr		= 0x00000018,
+	.misc_ie_addr		= 0x00000034,
+	.sr_wr_index_addr	= 0x0000003c,
+	.dst_wr_index_addr	= 0x00000040,
+	.current_srri_addr	= 0x00000044,
+	.current_drri_addr	= 0x00000048,
+	.host_ie_addr		= 0x0000002c,
+	.ctrl1_regs		= &qcax_ctrl1,
+	.cmd_halt		= &qcax_cmd_halt,
+	.host_ie		= &qcax_host_ie,
+	.wm_regs		= &qcax_wm_reg,
+	.misc_regs		= &qcax_misc_reg,
+	.wm_srcr		= &qcax_wm_src_ring,
+	.wm_dstr                = &qcax_wm_dst_ring,
+};
+
 const struct ath10k_hw_values qca988x_values = {
+	.pdev_suspend_option		= WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
 	.rtc_state_val_on		= 3,
 	.ce_count			= 8,
 	.msi_assign_ce_max		= 7,
@@ -119,6 +477,7 @@
 };
 
 const struct ath10k_hw_values qca6174_values = {
+	.pdev_suspend_option		= WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
 	.rtc_state_val_on		= 3,
 	.ce_count			= 8,
 	.msi_assign_ce_max		= 7,
@@ -128,6 +487,7 @@
 };
 
 const struct ath10k_hw_values qca99x0_values = {
+	.pdev_suspend_option		= WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
 	.rtc_state_val_on		= 5,
 	.ce_count			= 12,
 	.msi_assign_ce_max		= 12,
@@ -136,22 +496,143 @@
 	.ce_desc_meta_data_lsb		= 4,
 };
 
+const struct ath10k_hw_values qca9888_values = {
+	.pdev_suspend_option		= WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
+	.rtc_state_val_on		= 3,
+	.ce_count			= 12,
+	.msi_assign_ce_max		= 12,
+	.num_target_ce_config_wlan	= 10,
+	.ce_desc_meta_data_mask		= 0xFFF0,
+	.ce_desc_meta_data_lsb		= 4,
+};
+
+const struct ath10k_hw_values qca4019_values = {
+	.pdev_suspend_option		= WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
+	.ce_count			= 12,
+	.num_target_ce_config_wlan	= 10,
+	.ce_desc_meta_data_mask		= 0xFFF0,
+	.ce_desc_meta_data_lsb		= 4,
+};
+
+const struct ath10k_hw_values wcn3990_values = {
+	.pdev_suspend_option		= WMI_PDEV_SUSPEND,
+	.rtc_state_val_on		= 5,
+	.ce_count			= 12,
+	.msi_assign_ce_max		= 12,
+	.num_target_ce_config_wlan	= 12,
+	.ce_desc_meta_data_mask		= 0xFFF0,
+	.ce_desc_meta_data_lsb		= 4,
+	.default_listen_interval	= 1,
+};
+
+struct fw_flag wcn3990_fw_flags = {
+	.flags = 0x82E,
+};
+
+struct ath10k_shadow_reg_value wcn3990_shadow_reg_value = {
+	.shadow_reg_value_0  = 0x00032000,
+	.shadow_reg_value_1  = 0x00032004,
+	.shadow_reg_value_2  = 0x00032008,
+	.shadow_reg_value_3  = 0x0003200C,
+	.shadow_reg_value_4  = 0x00032010,
+	.shadow_reg_value_5  = 0x00032014,
+	.shadow_reg_value_6  = 0x00032018,
+	.shadow_reg_value_7  = 0x0003201C,
+	.shadow_reg_value_8  = 0x00032020,
+	.shadow_reg_value_9  = 0x00032024,
+	.shadow_reg_value_10 = 0x00032028,
+	.shadow_reg_value_11 = 0x0003202C,
+	.shadow_reg_value_12 = 0x00032030,
+	.shadow_reg_value_13 = 0x00032034,
+	.shadow_reg_value_14 = 0x00032038,
+	.shadow_reg_value_15 = 0x0003203C,
+	.shadow_reg_value_16 = 0x00032040,
+	.shadow_reg_value_17 = 0x00032044,
+	.shadow_reg_value_18 = 0x00032048,
+	.shadow_reg_value_19 = 0x0003204C,
+	.shadow_reg_value_20 = 0x00032050,
+	.shadow_reg_value_21 = 0x00032054,
+	.shadow_reg_value_22 = 0x00032058,
+	.shadow_reg_value_23 = 0x0003205C
+};
+
+struct ath10k_shadow_reg_address wcn3990_shadow_reg_address = {
+	.shadow_reg_address_0  = 0x00030020,
+	.shadow_reg_address_1  = 0x00030024,
+	.shadow_reg_address_2  = 0x00030028,
+	.shadow_reg_address_3  = 0x0003002C,
+	.shadow_reg_address_4  = 0x00030030,
+	.shadow_reg_address_5  = 0x00030034,
+	.shadow_reg_address_6  = 0x00030038,
+	.shadow_reg_address_7  = 0x0003003C,
+	.shadow_reg_address_8  = 0x00030040,
+	.shadow_reg_address_9  = 0x00030044,
+	.shadow_reg_address_10 = 0x00030048,
+	.shadow_reg_address_11 = 0x0003004C,
+	.shadow_reg_address_12 = 0x00030050,
+	.shadow_reg_address_13 = 0x00030054,
+	.shadow_reg_address_14 = 0x00030058,
+	.shadow_reg_address_15 = 0x0003005C,
+	.shadow_reg_address_16 = 0x00030060,
+	.shadow_reg_address_17 = 0x00030064,
+	.shadow_reg_address_18 = 0x00030068,
+	.shadow_reg_address_19 = 0x0003006C,
+	.shadow_reg_address_20 = 0x00030070,
+	.shadow_reg_address_21 = 0x00030074,
+	.shadow_reg_address_22 = 0x00030078,
+	.shadow_reg_address_23 = 0x0003007C
+};
+
 void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
 				u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev)
 {
 	u32 cc_fix = 0;
+	u32 rcc_fix = 0;
+	enum ath10k_hw_cc_wraparound_type wraparound_type;
 
 	survey->filled |= SURVEY_INFO_TIME |
 			  SURVEY_INFO_TIME_BUSY;
 
-	if (ar->hw_params.has_shifted_cc_wraparound && cc < cc_prev) {
+	wraparound_type = ar->hw_params.cc_wraparound_type;
+
+	if (cc < cc_prev || rcc < rcc_prev) {
+		switch (wraparound_type) {
+		case ATH10K_HW_CC_WRAP_SHIFTED_ALL:
+			if (cc < cc_prev) {
 		cc_fix = 0x7fffffff;
 		survey->filled &= ~SURVEY_INFO_TIME_BUSY;
 	}
+			break;
+		case ATH10K_HW_CC_WRAP_SHIFTED_EACH:
+			if (cc < cc_prev)
+				cc_fix = 0x7fffffff;
+
+			if (rcc < rcc_prev)
+				rcc_fix = 0x7fffffff;
+			break;
+		case ATH10K_HW_CC_WRAP_DISABLED:
+			break;
+		}
+	}
 
 	cc -= cc_prev - cc_fix;
-	rcc -= rcc_prev;
+	rcc -= rcc_prev - rcc_fix;
 
 	survey->time = CCNT_TO_MSEC(ar, cc);
 	survey->time_busy = CCNT_TO_MSEC(ar, rcc);
 }
+
+const struct ath10k_hw_ops qca988x_ops = {
+};
+
+static int ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc *rxd)
+{
+	return MS(__le32_to_cpu(rxd->msdu_end.qca99x0.info1),
+		  RX_MSDU_END_INFO1_L3_HDR_PAD);
+}
+
+const struct ath10k_hw_ops qca99x0_ops = {
+	.rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes,
+};
+
+const struct ath10k_hw_ops wcn3990_ops = {0};
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/hw.h linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/hw.h
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/hw.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/hw.h	2019-01-22 16:16:25.419263757 +0100
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2013,2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -26,7 +26,10 @@
 #define QCA6164_2_1_DEVICE_ID   (0x0041)
 #define QCA6174_2_1_DEVICE_ID   (0x003e)
 #define QCA99X0_2_0_DEVICE_ID   (0x0040)
+#define QCA9888_2_0_DEVICE_ID	(0x0056)
+#define QCA9984_1_0_DEVICE_ID	(0x0046)
 #define QCA9377_1_0_DEVICE_ID   (0x0042)
+#define QCA9887_1_0_DEVICE_ID   (0x0050)
 
 /* QCA988X 1.0 definitions (unsupported) */
 #define QCA988X_HW_1_0_CHIP_ID_REV	0x0
@@ -35,11 +38,16 @@
 #define QCA988X_HW_2_0_VERSION		0x4100016c
 #define QCA988X_HW_2_0_CHIP_ID_REV	0x2
 #define QCA988X_HW_2_0_FW_DIR		ATH10K_FW_DIR "/QCA988X/hw2.0"
-#define QCA988X_HW_2_0_FW_FILE		"firmware.bin"
-#define QCA988X_HW_2_0_OTP_FILE		"otp.bin"
 #define QCA988X_HW_2_0_BOARD_DATA_FILE	"board.bin"
 #define QCA988X_HW_2_0_PATCH_LOAD_ADDR	0x1234
 
+/* QCA9887 1.0 definitions */
+#define QCA9887_HW_1_0_VERSION		0x4100016d
+#define QCA9887_HW_1_0_CHIP_ID_REV	0
+#define QCA9887_HW_1_0_FW_DIR		ATH10K_FW_DIR "/QCA9887/hw1.0"
+#define QCA9887_HW_1_0_BOARD_DATA_FILE	"board.bin"
+#define QCA9887_HW_1_0_PATCH_LOAD_ADDR	0x1234
+
 /* QCA6174 target BMI version signatures */
 #define QCA6174_HW_1_0_VERSION		0x05000000
 #define QCA6174_HW_1_1_VERSION		0x05000001
@@ -76,14 +84,10 @@
 };
 
 #define QCA6174_HW_2_1_FW_DIR		"ath10k/QCA6174/hw2.1"
-#define QCA6174_HW_2_1_FW_FILE		"firmware.bin"
-#define QCA6174_HW_2_1_OTP_FILE		"otp.bin"
 #define QCA6174_HW_2_1_BOARD_DATA_FILE	"board.bin"
 #define QCA6174_HW_2_1_PATCH_LOAD_ADDR	0x1234
 
 #define QCA6174_HW_3_0_FW_DIR		"ath10k/QCA6174/hw3.0"
-#define QCA6174_HW_3_0_FW_FILE		"firmware.bin"
-#define QCA6174_HW_3_0_OTP_FILE		"otp.bin"
 #define QCA6174_HW_3_0_BOARD_DATA_FILE	"board.bin"
 #define QCA6174_HW_3_0_PATCH_LOAD_ADDR	0x1234
 
@@ -94,18 +98,40 @@
 #define QCA99X0_HW_2_0_DEV_VERSION     0x01000000
 #define QCA99X0_HW_2_0_CHIP_ID_REV     0x1
 #define QCA99X0_HW_2_0_FW_DIR          ATH10K_FW_DIR "/QCA99X0/hw2.0"
-#define QCA99X0_HW_2_0_FW_FILE         "firmware.bin"
-#define QCA99X0_HW_2_0_OTP_FILE        "otp.bin"
 #define QCA99X0_HW_2_0_BOARD_DATA_FILE "board.bin"
 #define QCA99X0_HW_2_0_PATCH_LOAD_ADDR	0x1234
 
+/* QCA9984 1.0 defines */
+#define QCA9984_HW_1_0_DEV_VERSION	0x1000000
+#define QCA9984_HW_DEV_TYPE		0xa
+#define QCA9984_HW_1_0_CHIP_ID_REV	0x0
+#define QCA9984_HW_1_0_FW_DIR		ATH10K_FW_DIR "/QCA9984/hw1.0"
+#define QCA9984_HW_1_0_BOARD_DATA_FILE "board.bin"
+#define QCA9984_HW_1_0_PATCH_LOAD_ADDR	0x1234
+
+/* QCA9888 2.0 defines */
+#define QCA9888_HW_2_0_DEV_VERSION	0x1000000
+#define QCA9888_HW_DEV_TYPE		0xc
+#define QCA9888_HW_2_0_CHIP_ID_REV	0x0
+#define QCA9888_HW_2_0_FW_DIR		ATH10K_FW_DIR "/QCA9888/hw2.0"
+#define QCA9888_HW_2_0_BOARD_DATA_FILE "board.bin"
+#define QCA9888_HW_2_0_PATCH_LOAD_ADDR	0x1234
+
 /* QCA9377 1.0 definitions */
 #define QCA9377_HW_1_0_FW_DIR          ATH10K_FW_DIR "/QCA9377/hw1.0"
-#define QCA9377_HW_1_0_FW_FILE         "firmware.bin"
-#define QCA9377_HW_1_0_OTP_FILE        "otp.bin"
 #define QCA9377_HW_1_0_BOARD_DATA_FILE "board.bin"
 #define QCA9377_HW_1_0_PATCH_LOAD_ADDR	0x1234
 
+/* QCA4019 1.0 definitions */
+#define QCA4019_HW_1_0_DEV_VERSION     0x01000000
+#define QCA4019_HW_1_0_FW_DIR          ATH10K_FW_DIR "/QCA4019/hw1.0"
+#define QCA4019_HW_1_0_BOARD_DATA_FILE "board.bin"
+#define QCA4019_HW_1_0_PATCH_LOAD_ADDR  0x1234
+
+/* WCN3990 1.0 definitions */
+#define WCN3990_HW_1_0_DEV_VERSION     ATH10K_HW_WCN3990
+#define WCN3990_HW_1_0_FW_DIR          "/etc/firmware"
+
 #define ATH10K_FW_API2_FILE		"firmware-2.bin"
 #define ATH10K_FW_API3_FILE		"firmware-3.bin"
 
@@ -126,8 +152,6 @@
 
 #define REG_DUMP_COUNT_QCA988X 60
 
-#define QCA988X_CAL_DATA_LEN		2116
-
 struct ath10k_fw_ie {
 	__le32 id;
 	__le32 len;
@@ -199,14 +223,19 @@
 	ATH10K_HW_QCA988X,
 	ATH10K_HW_QCA6174,
 	ATH10K_HW_QCA99X0,
+	ATH10K_HW_QCA9888,
+	ATH10K_HW_QCA9984,
 	ATH10K_HW_QCA9377,
+	ATH10K_HW_QCA4019,
+	ATH10K_HW_QCA9887,
+	ATH10K_HW_WCN3990,
 };
 
 struct ath10k_hw_regs {
-	u32 rtc_state_cold_reset_mask;
 	u32 rtc_soc_base_address;
 	u32 rtc_wmac_base_address;
 	u32 soc_core_base_address;
+	u32 soc_global_reset_address;
 	u32 ce_wrapper_base_address;
 	u32 ce0_base_address;
 	u32 ce1_base_address;
@@ -216,6 +245,10 @@
 	u32 ce5_base_address;
 	u32 ce6_base_address;
 	u32 ce7_base_address;
+	u32 ce8_base_address;
+	u32 ce9_base_address;
+	u32 ce10_base_address;
+	u32 ce11_base_address;
 	u32 soc_reset_control_si0_rst_mask;
 	u32 soc_reset_control_ce_rst_mask;
 	u32 soc_chip_id_address;
@@ -232,29 +265,142 @@
 extern const struct ath10k_hw_regs qca988x_regs;
 extern const struct ath10k_hw_regs qca6174_regs;
 extern const struct ath10k_hw_regs qca99x0_regs;
+extern const struct ath10k_hw_regs qca4019_regs;
+extern const struct ath10k_hw_regs wcn3990_regs;
+
+struct ath10k_hw_ce_regs_addr_map {
+	u32 msb;
+	u32 lsb;
+	u32 mask;
+	unsigned int (*set)(unsigned int offset,
+			    struct ath10k_hw_ce_regs_addr_map *addr_map);
+	unsigned int (*get)(unsigned int offset,
+			    struct ath10k_hw_ce_regs_addr_map *addr_map);
+};
+
+struct ath10k_hw_ce_ctrl1 {
+	u32 addr;
+	u32 hw_mask;
+	u32 sw_mask;
+	u32 hw_wr_mask;
+	u32 sw_wr_mask;
+	u32 reset_mask;
+	u32 reset;
+	struct ath10k_hw_ce_regs_addr_map *src_ring;
+	struct ath10k_hw_ce_regs_addr_map *dst_ring;
+	struct ath10k_hw_ce_regs_addr_map *dmax;
+};
+
+struct ath10k_hw_ce_cmd_halt {
+	u32 status_reset;
+	u32 msb;
+	u32 mask;
+	struct ath10k_hw_ce_regs_addr_map *status;
+};
+
+struct ath10k_hw_ce_host_ie {
+	u32 copy_complete_reset;
+	struct ath10k_hw_ce_regs_addr_map *copy_complete;
+};
+
+struct ath10k_hw_ce_host_wm_regs {
+	u32 dstr_lmask;
+	u32 dstr_hmask;
+	u32 srcr_lmask;
+	u32 srcr_hmask;
+	u32 cc_mask;
+	u32 wm_mask;
+	u32 addr;
+};
+
+struct ath10k_hw_ce_misc_regs {
+	u32 axi_err;
+	u32 dstr_add_err;
+	u32 srcr_len_err;
+	u32 dstr_mlen_vio;
+	u32 dstr_overflow;
+	u32 srcr_overflow;
+	u32 err_mask;
+	u32 addr;
+};
+
+struct ath10k_hw_ce_dst_src_wm_regs {
+	u32 addr;
+	u32 low_rst;
+	u32 high_rst;
+	struct ath10k_hw_ce_regs_addr_map *wm_low;
+	struct ath10k_hw_ce_regs_addr_map *wm_high;
+};
+
+struct ath10k_hw_ce_ctrl1_upd {
+	u32 shift;
+	u32 mask;
+	u32 enable;
+};
+
+struct ath10k_hw_ce_regs {
+	u32 sr_base_addr;
+	u32 sr_size_addr;
+	u32 dr_base_addr;
+	u32 dr_size_addr;
+	u32 ce_cmd_addr;
+	u32 misc_ie_addr;
+	u32 sr_wr_index_addr;
+	u32 dst_wr_index_addr;
+	u32 current_srri_addr;
+	u32 current_drri_addr;
+	u32 ddr_addr_for_rri_low;
+	u32 ddr_addr_for_rri_high;
+	u32 ce_rri_low;
+	u32 ce_rri_high;
+	u32 host_ie_addr;
+	struct ath10k_hw_ce_host_wm_regs *wm_regs;
+	struct ath10k_hw_ce_misc_regs *misc_regs;
+	struct ath10k_hw_ce_ctrl1 *ctrl1_regs;
+	struct ath10k_hw_ce_cmd_halt *cmd_halt;
+	struct ath10k_hw_ce_host_ie *host_ie;
+	struct ath10k_hw_ce_dst_src_wm_regs *wm_srcr;
+	struct ath10k_hw_ce_dst_src_wm_regs *wm_dstr;
+	struct ath10k_hw_ce_ctrl1_upd *upd;
+};
+
+extern struct ath10k_hw_ce_regs wcn3990_ce_regs;
+extern struct ath10k_hw_ce_regs qcax_ce_regs;
+
+extern struct fw_flag wcn3990_fw_flags;
 
 struct ath10k_hw_values {
+	u32 pdev_suspend_option;
 	u32 rtc_state_val_on;
 	u8 ce_count;
 	u8 msi_assign_ce_max;
 	u8 num_target_ce_config_wlan;
 	u16 ce_desc_meta_data_mask;
 	u8 ce_desc_meta_data_lsb;
+	u8 default_listen_interval;
 };
 
 extern const struct ath10k_hw_values qca988x_values;
 extern const struct ath10k_hw_values qca6174_values;
 extern const struct ath10k_hw_values qca99x0_values;
+extern const struct ath10k_hw_values qca9888_values;
+extern const struct ath10k_hw_values qca4019_values;
+extern const struct ath10k_hw_values wcn3990_values;
 
 void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
 				u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev);
 
 #define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
+#define QCA_REV_9887(ar) ((ar)->hw_rev == ATH10K_HW_QCA9887)
 #define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
 #define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0)
+#define QCA_REV_9888(ar) ((ar)->hw_rev == ATH10K_HW_QCA9888)
+#define QCA_REV_9984(ar) ((ar)->hw_rev == ATH10K_HW_QCA9984)
 #define QCA_REV_9377(ar) ((ar)->hw_rev == ATH10K_HW_QCA9377)
+#define QCA_REV_40XX(ar) ((ar)->hw_rev == ATH10K_HW_QCA4019)
+#define QCA_REV_WCN3990(ar) ((ar)->hw_rev == ATH10K_HW_WCN3990)
 
-/* Known pecularities:
+/* Known peculiarities:
  *  - raw appears in nwifi decap, raw and nwifi appear in ethernet decap
  *  - raw have FCS, nwifi doesn't
  *  - ethernet frames have 802.11 header decapped and parts (base hdr, cipher
@@ -277,15 +423,6 @@
 	ATH10K_MCAST2UCAST_ENABLED = 1,
 };
 
-struct ath10k_pktlog_hdr {
-	__le16 flags;
-	__le16 missed_cnt;
-	__le16 log_type;
-	__le16 size;
-	__le32 timestamp;
-	u8 payload[0];
-} __packed;
-
 enum ath10k_hw_rate_ofdm {
 	ATH10K_HW_RATE_OFDM_48M = 0,
 	ATH10K_HW_RATE_OFDM_24M,
@@ -307,6 +444,111 @@
 	ATH10K_HW_RATE_CCK_SP_2M,
 };
 
+enum ath10k_hw_rate_rev2_cck {
+	ATH10K_HW_RATE_REV2_CCK_LP_1M = 1,
+	ATH10K_HW_RATE_REV2_CCK_LP_2M,
+	ATH10K_HW_RATE_REV2_CCK_LP_5_5M,
+	ATH10K_HW_RATE_REV2_CCK_LP_11M,
+	ATH10K_HW_RATE_REV2_CCK_SP_2M,
+	ATH10K_HW_RATE_REV2_CCK_SP_5_5M,
+	ATH10K_HW_RATE_REV2_CCK_SP_11M,
+};
+
+enum ath10k_hw_cc_wraparound_type {
+	ATH10K_HW_CC_WRAP_DISABLED = 0,
+
+	/* This type is when the HW chip has a quirky Cycle Counter
+	 * wraparound which resets to 0x7fffffff instead of 0. All
+	 * other CC related counters (e.g. Rx Clear Count) are divided
+	 * by 2 so they never wraparound themselves.
+	 */
+	ATH10K_HW_CC_WRAP_SHIFTED_ALL = 1,
+
+	/* Each hw counter wrapsaround independently. When the
+	 * counter overflows the repestive counter is right shifted
+	 * by 1, i.e reset to 0x7fffffff, and other counters will be
+	 * running unaffected. In this type of wraparound, it should
+	 * be possible to report accurate Rx busy time unlike the
+	 * first type.
+	 */
+	ATH10K_HW_CC_WRAP_SHIFTED_EACH = 2,
+};
+
+struct ath10k_hw_params {
+	u32 id;
+	u16 dev_id;
+	const char *name;
+	u32 patch_load_addr;
+	int uart_pin;
+	u32 otp_exe_param;
+
+	/* Type of hw cycle counter wraparound logic, for more info
+	 * refer enum ath10k_hw_cc_wraparound_type.
+	 */
+	enum ath10k_hw_cc_wraparound_type cc_wraparound_type;
+
+	/* Some of chip expects fragment descriptor to be continuous
+	 * memory for any TX operation. Set continuous_frag_desc flag
+	 * for the hardware which have such requirement.
+	 */
+	bool continuous_frag_desc;
+
+	/* CCK hardware rate table mapping for the newer chipsets
+	 * like QCA99X0, QCA4019 got revised. The CCK h/w rate values
+	 * are in a proper order with respect to the rate/preamble
+	 */
+	bool cck_rate_map_rev2;
+
+	u32 channel_counters_freq_hz;
+
+	/* Mgmt tx descriptors threshold for limiting probe response
+	 * frames.
+	 */
+	u32 max_probe_resp_desc_thres;
+
+	u32 tx_chain_mask;
+	u32 rx_chain_mask;
+	u32 max_spatial_stream;
+	u32 cal_data_len;
+
+	struct ath10k_hw_params_fw {
+		const char *dir;
+		const char *board;
+		size_t board_size;
+		size_t board_ext_size;
+	} fw;
+
+	/* qca99x0 family chips deliver broadcast/multicast management
+	 * frames encrypted and expect software do decryption.
+	 */
+	bool sw_decrypt_mcast_mgmt;
+
+	const struct ath10k_hw_ops *hw_ops;
+
+	/* Number of bytes used for alignment in rx_hdr_status of rx desc. */
+	int decap_align_bytes;
+};
+
+struct htt_rx_desc;
+
+/* Defines needed for Rx descriptor abstraction */
+struct ath10k_hw_ops {
+	int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd);
+};
+
+extern const struct ath10k_hw_ops qca988x_ops;
+extern const struct ath10k_hw_ops qca99x0_ops;
+extern const struct ath10k_hw_ops wcn3990_ops;
+
+static inline int
+ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
+				struct htt_rx_desc *rxd)
+{
+	if (hw->hw_ops->rx_desc_get_l3_pad_bytes)
+		return hw->hw_ops->rx_desc_get_l3_pad_bytes(rxd);
+	return 0;
+}
+
 /* Target specific defines for MAIN firmware */
 #define TARGET_NUM_VDEVS			8
 #define TARGET_NUM_PEER_AST			2
@@ -348,14 +590,19 @@
 #define TARGET_10X_MAC_AGGR_DELIM		0
 #define TARGET_10X_AST_SKID_LIMIT		128
 #define TARGET_10X_NUM_STATIONS			128
+#define TARGET_10X_TX_STATS_NUM_STATIONS	118
 #define TARGET_10X_NUM_PEERS			((TARGET_10X_NUM_STATIONS) + \
 						 (TARGET_10X_NUM_VDEVS))
+#define TARGET_10X_TX_STATS_NUM_PEERS		((TARGET_10X_TX_STATS_NUM_STATIONS) + \
+						 (TARGET_10X_NUM_VDEVS))
 #define TARGET_10X_NUM_OFFLOAD_PEERS		0
 #define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS	0
 #define TARGET_10X_NUM_PEER_KEYS		2
 #define TARGET_10X_NUM_TIDS_MAX			256
 #define TARGET_10X_NUM_TIDS			min((TARGET_10X_NUM_TIDS_MAX), \
 						    (TARGET_10X_NUM_PEERS) * 2)
+#define TARGET_10X_TX_STATS_NUM_TIDS		min((TARGET_10X_NUM_TIDS_MAX), \
+						    (TARGET_10X_TX_STATS_NUM_PEERS) * 2)
 #define TARGET_10X_TX_CHAIN_MASK		(BIT(0) | BIT(1) | BIT(2))
 #define TARGET_10X_RX_CHAIN_MASK		(BIT(0) | BIT(1) | BIT(2))
 #define TARGET_10X_RX_TIMEOUT_LO_PRI		100
@@ -385,6 +632,14 @@
 #define TARGET_TLV_NUM_TIDS			((TARGET_TLV_NUM_PEERS) * 2)
 #define TARGET_TLV_NUM_MSDU_DESC		(1024 + 32)
 #define TARGET_TLV_NUM_WOW_PATTERNS		22
+/* FW supports max 50 outstanding mgmt cmds */
+#define TARGET_TLV_MGMT_NUM_MSDU_DESC		(50)
+
+/* Target specific defines for WMI-HL-1.0 firmware */
+#define TARGET_HL_10_TLV_NUM_PEERS		14
+#define TARGET_HL_10_TLV_AST_SKID_LIMIT		6
+#define TARGET_HL_10_TLV_NUM_WDS_ENTRIES	2
+#define TARGET_HL_1_0_NUM_MSDU_DESC		(3600)
 
 /* Diagnostic Window */
 #define CE_DIAG_PIPE	7
@@ -400,15 +655,14 @@
 
 #define TARGET_10_4_NUM_QCACHE_PEERS_MAX	512
 #define TARGET_10_4_QCACHE_ACTIVE_PEERS		50
+#define TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC	35
 #define TARGET_10_4_NUM_OFFLOAD_PEERS		0
 #define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS	0
 #define TARGET_10_4_NUM_PEER_KEYS		2
 #define TARGET_10_4_TGT_NUM_TIDS		((TARGET_10_4_NUM_PEERS) * 2)
+#define TARGET_10_4_NUM_MSDU_DESC		(1024 + 400)
+#define TARGET_10_4_NUM_MSDU_DESC_PFC		2500
 #define TARGET_10_4_AST_SKID_LIMIT		32
-#define TARGET_10_4_TX_CHAIN_MASK		(BIT(0) | BIT(1) | \
-						 BIT(2) | BIT(3))
-#define TARGET_10_4_RX_CHAIN_MASK		(BIT(0) | BIT(1) | \
-						 BIT(2) | BIT(3))
 
 /* 100 ms for video, best-effort, and background */
 #define TARGET_10_4_RX_TIMEOUT_LO_PRI		100
@@ -434,7 +688,6 @@
 #define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
 #define TARGET_10_4_VOW_CONFIG			0
 #define TARGET_10_4_GTK_OFFLOAD_MAX_VDEV	3
-#define TARGET_10_4_NUM_MSDU_DESC		(1024 + 400)
 #define TARGET_10_4_11AC_TX_MAX_FRAGS		2
 #define TARGET_10_4_MAX_PEER_EXT_STATS		16
 #define TARGET_10_4_SMART_ANT_CAP		0
@@ -468,7 +721,6 @@
 /* as of IP3.7.1 */
 #define RTC_STATE_V_ON				ar->hw_values->rtc_state_val_on
 
-#define RTC_STATE_COLD_RESET_MASK		ar->regs->rtc_state_cold_reset_mask
 #define RTC_STATE_V_LSB				0
 #define RTC_STATE_V_MASK			0x00000007
 #define RTC_STATE_ADDRESS			0x0000
@@ -531,7 +783,10 @@
 #define WLAN_SYSTEM_SLEEP_DISABLE_MASK		0x00000001
 
 #define WLAN_GPIO_PIN0_ADDRESS			0x00000028
+#define WLAN_GPIO_PIN0_CONFIG_LSB		11
 #define WLAN_GPIO_PIN0_CONFIG_MASK		0x00007800
+#define WLAN_GPIO_PIN0_PAD_PULL_LSB		5
+#define WLAN_GPIO_PIN0_PAD_PULL_MASK		0x00000060
 #define WLAN_GPIO_PIN1_ADDRESS			0x0000002c
 #define WLAN_GPIO_PIN1_CONFIG_MASK		0x00007800
 #define WLAN_GPIO_PIN10_ADDRESS			0x00000050
@@ -544,6 +799,8 @@
 #define CLOCK_GPIO_BT_CLK_OUT_EN_MASK		0
 
 #define SI_CONFIG_OFFSET			0x00000000
+#define SI_CONFIG_ERR_INT_LSB			19
+#define SI_CONFIG_ERR_INT_MASK			0x00080000
 #define SI_CONFIG_BIDIR_OD_DATA_LSB		18
 #define SI_CONFIG_BIDIR_OD_DATA_MASK		0x00040000
 #define SI_CONFIG_I2C_LSB			16
@@ -557,7 +814,9 @@
 #define SI_CONFIG_DIVIDER_LSB			0
 #define SI_CONFIG_DIVIDER_MASK			0x0000000f
 #define SI_CS_OFFSET				0x00000004
+#define SI_CS_DONE_ERR_LSB			10
 #define SI_CS_DONE_ERR_MASK			0x00000400
+#define SI_CS_DONE_INT_LSB			9
 #define SI_CS_DONE_INT_MASK			0x00000200
 #define SI_CS_START_LSB				8
 #define SI_CS_START_MASK			0x00000100
@@ -586,6 +845,7 @@
 #define FW_INDICATOR_ADDRESS			ar->regs->fw_indicator_address
 #define FW_IND_EVENT_PENDING			1
 #define FW_IND_INITIALIZED			2
+#define FW_IND_HOST_READY			0x80000000
 
 /* HOST_REG interrupt from firmware */
 #define PCIE_INTR_FIRMWARE_MASK			ar->regs->pcie_intr_fw_mask
@@ -607,7 +867,10 @@
 #define GPIO_BASE_ADDRESS			WLAN_GPIO_BASE_ADDRESS
 #define GPIO_PIN0_OFFSET			WLAN_GPIO_PIN0_ADDRESS
 #define GPIO_PIN1_OFFSET			WLAN_GPIO_PIN1_ADDRESS
+#define GPIO_PIN0_CONFIG_LSB			WLAN_GPIO_PIN0_CONFIG_LSB
 #define GPIO_PIN0_CONFIG_MASK			WLAN_GPIO_PIN0_CONFIG_MASK
+#define GPIO_PIN0_PAD_PULL_LSB			WLAN_GPIO_PIN0_PAD_PULL_LSB
+#define GPIO_PIN0_PAD_PULL_MASK			WLAN_GPIO_PIN0_PAD_PULL_MASK
 #define GPIO_PIN1_CONFIG_MASK			WLAN_GPIO_PIN1_CONFIG_MASK
 #define SI_BASE_ADDRESS				WLAN_SI_BASE_ADDRESS
 #define SCRATCH_BASE_ADDRESS			SOC_CORE_BASE_ADDRESS
@@ -662,6 +925,108 @@
 #define WINDOW_READ_ADDR_ADDRESS		MISSING
 #define WINDOW_WRITE_ADDR_ADDRESS		MISSING
 
+#define QCA9887_1_0_I2C_SDA_GPIO_PIN		5
+#define QCA9887_1_0_I2C_SDA_PIN_CONFIG		3
+#define QCA9887_1_0_SI_CLK_GPIO_PIN		17
+#define QCA9887_1_0_SI_CLK_PIN_CONFIG		3
+#define QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS 0x00000010
+
+#define QCA9887_EEPROM_SELECT_READ		0xa10000a0
+#define QCA9887_EEPROM_ADDR_HI_MASK		0x0000ff00
+#define QCA9887_EEPROM_ADDR_HI_LSB		8
+#define QCA9887_EEPROM_ADDR_LO_MASK		0x00ff0000
+#define QCA9887_EEPROM_ADDR_LO_LSB		16
+
 #define RTC_STATE_V_GET(x) (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB)
 
+struct ath10k_shadow_reg_value {
+	u32 shadow_reg_value_0;
+	u32 shadow_reg_value_1;
+	u32 shadow_reg_value_2;
+	u32 shadow_reg_value_3;
+	u32 shadow_reg_value_4;
+	u32 shadow_reg_value_5;
+	u32 shadow_reg_value_6;
+	u32 shadow_reg_value_7;
+	u32 shadow_reg_value_8;
+	u32 shadow_reg_value_9;
+	u32 shadow_reg_value_10;
+	u32 shadow_reg_value_11;
+	u32 shadow_reg_value_12;
+	u32 shadow_reg_value_13;
+	u32 shadow_reg_value_14;
+	u32 shadow_reg_value_15;
+	u32 shadow_reg_value_16;
+	u32 shadow_reg_value_17;
+	u32 shadow_reg_value_18;
+	u32 shadow_reg_value_19;
+	u32 shadow_reg_value_20;
+	u32 shadow_reg_value_21;
+	u32 shadow_reg_value_22;
+	u32 shadow_reg_value_23;
+};
+
+struct ath10k_shadow_reg_address {
+	u32 shadow_reg_address_0;
+	u32 shadow_reg_address_1;
+	u32 shadow_reg_address_2;
+	u32 shadow_reg_address_3;
+	u32 shadow_reg_address_4;
+	u32 shadow_reg_address_5;
+	u32 shadow_reg_address_6;
+	u32 shadow_reg_address_7;
+	u32 shadow_reg_address_8;
+	u32 shadow_reg_address_9;
+	u32 shadow_reg_address_10;
+	u32 shadow_reg_address_11;
+	u32 shadow_reg_address_12;
+	u32 shadow_reg_address_13;
+	u32 shadow_reg_address_14;
+	u32 shadow_reg_address_15;
+	u32 shadow_reg_address_16;
+	u32 shadow_reg_address_17;
+	u32 shadow_reg_address_18;
+	u32 shadow_reg_address_19;
+	u32 shadow_reg_address_20;
+	u32 shadow_reg_address_21;
+	u32 shadow_reg_address_22;
+	u32 shadow_reg_address_23;
+};
+
+extern struct ath10k_shadow_reg_value wcn3990_shadow_reg_value;
+extern struct ath10k_shadow_reg_address wcn3990_shadow_reg_address;
+
+#define ATH10K_PKTLOG_HDR_SIZE_16      0x8000
+
+enum {
+	ATH10k_PKTLOG_FLG_FRM_TYPE_LOCAL_S = 0,
+	ATH10K_PKTLOG_FLG_FRM_TYPE_REMOTE_S,
+	ATH10K_PKTLOG_FLG_FRM_TYPE_CLONE_S,
+	ATH10K_PKTLOG_FLG_FRM_TYPE_CBF_S,
+	ATH10K_PKTLOG_FLG_FRM_TYPE_UNKNOWN_S
+};
+
+enum ath10k_pktlog_type {
+	ATH10K_PKTLOG_TYPE_TX_CTRL = 1,
+	ATH10K_PKTLOG_TYPE_TX_STAT,
+	ATH10K_PKTLOG_TYPE_TX_MSDU_ID,
+	ATH10K_PKTLOG_TYPE_TX_FRM_HDR,
+	ATH10K_PKTLOG_TYPE_RX_STAT,
+	ATH10K_PKTLOG_TYPE_RC_FIND,
+	ATH10K_PKTLOG_TYPE_RC_UPDATE,
+	ATH10K_PKTLOG_TYPE_TX_VIRT_ADDR,
+	ATH10K_PKTLOG_TYPE_DBG_PRINT,
+	ATH10K_PKTLOG_TYPE_SW_EVENT,
+	ATH10K_PKTLOG_TYPE_MAX,
+};
+
+struct ath10k_pktlog_hdr {
+	__le16 flags;
+	__le16 missed_cnt;
+	__le16 log_type;
+	__le16 size;
+	__le32 timestamp;
+	u8 payload[0];
+} __packed;
+
 #endif /* _HW_H_ */
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/Kconfig linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/Kconfig
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/Kconfig	2019-01-22 16:16:25.411263685 +0100
@@ -2,6 +2,7 @@
         tristate "Atheros 802.11ac wireless cards support"
         depends on MAC80211 && HAS_DMA
 	select ATH_COMMON
+	select CRC32
         ---help---
           This module adds support for wireless adapters based on
           Atheros IEEE 802.11ac family of chipsets.
@@ -14,6 +15,32 @@
 	---help---
 	  This module adds support for PCIE bus
 
+config ATH10K_AHB
+	bool "Atheros ath10k AHB support"
+	depends on ATH10K_PCI && OF && RESET_CONTROLLER
+	---help---
+	  This module adds support for AHB bus
+
+config ATH10K_TARGET_SNOC
+	tristate "Atheros ath10k SNOC support"
+	depends on ATH10K
+	---help---
+	  This module adds support for the Integrated WCN3990 WLAN module,
+	  WCN3990 has integrated 802.11ac chipset with SNOC bus interface.
+	  This module also adds support to register the WCN3990 wlan module
+	  with MAC80211 network subsystem.
+
+config ATH10K_SNOC
+	bool "Enable/disable Atheros ath10k SNOC bus interface support"
+	depends on ATH10K
+	depends on ATH10K_TARGET_SNOC
+	---help---
+	  This module add support for WLAN SNOC bus registration, WLAN
+	  copy engine configuration for the WCN3990 chipset, WLAN hardware
+	  shadow register configuration, create host to target communication
+	  interface to interact with WLAN firmware, WLAN module interface
+	  control and data receive(RX)/transmit(TX) control.
+
 config ATH10K_DEBUG
 	bool "Atheros ath10k debugging"
 	depends on ATH10K
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/mac.c linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/mac.c
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/mac.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/mac.c	2019-10-29 09:26:24.461211183 +0100
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2013, 2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -32,6 +32,11 @@
 #include "wmi-ops.h"
 #include "wow.h"
 
+static u8 base_mac_addr[18] = "00:00:00:00:00:00";
+module_param_string(base_mac_addr, base_mac_addr, 18, 0);
+MODULE_PARM_DESC(ap_base_mac_addr,
+                 "Override EEPROM defined base mac address");
+
 /*********/
 /* Rates */
 /*********/
@@ -62,6 +67,32 @@
 	{ .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
 };
 
+static struct ieee80211_rate ath10k_rates_rev2[] = {
+	{ .bitrate = 10,
+	  .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M },
+	{ .bitrate = 20,
+	  .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M,
+	  .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M,
+	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+	{ .bitrate = 55,
+	  .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M,
+	  .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M,
+	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+	{ .bitrate = 110,
+	  .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M,
+	  .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M,
+	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+
+	{ .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
+	{ .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
+	{ .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
+	{ .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
+	{ .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
+	{ .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
+	{ .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
+	{ .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
+};
+
 #define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
 
 #define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX)
@@ -70,6 +101,9 @@
 #define ath10k_g_rates (ath10k_rates + 0)
 #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
 
+#define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0)
+#define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2))
+
 static bool ath10k_mac_bitrate_is_cck(int bitrate)
 {
 	switch (bitrate) {
@@ -90,7 +124,7 @@
 }
 
 u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
-			     u8 hw_rate)
+			     u8 hw_rate, bool cck)
 {
 	const struct ieee80211_rate *rate;
 	int i;
@@ -98,6 +132,9 @@
 	for (i = 0; i < sband->n_bitrates; i++) {
 		rate = &sband->bitrates[i];
 
+		if (ath10k_mac_bitrate_is_cck(rate->bitrate) != cck)
+			continue;
+
 		if (rate->hw_value == hw_rate)
 			return i;
 		else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
@@ -154,6 +191,26 @@
 	return 1;
 }
 
+int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val)
+{
+	enum wmi_host_platform_type platform_type;
+	int ret;
+
+	if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map))
+		platform_type = WMI_HOST_PLATFORM_LOW_PERF;
+	else
+		platform_type = WMI_HOST_PLATFORM_HIGH_PERF;
+
+	ret = ath10k_wmi_ext_resource_config(ar, platform_type, val);
+
+	if (ret && ret != -EOPNOTSUPP) {
+		ath10k_warn(ar, "failed to configure ext resource: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
 /**********/
 /* Crypto */
 /**********/
@@ -247,7 +304,8 @@
 	lockdep_assert_held(&ar->conf_mutex);
 
 	if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP &&
-		    arvif->vif->type != NL80211_IFTYPE_ADHOC))
+		    arvif->vif->type != NL80211_IFTYPE_ADHOC &&
+		    arvif->vif->type != NL80211_IFTYPE_MESH_POINT))
 		return -EINVAL;
 
 	spin_lock_bh(&ar->data_lock);
@@ -445,10 +503,10 @@
 	lockdep_assert_held(&ar->conf_mutex);
 
 	list_for_each_entry(peer, &ar->peers, list) {
-		if (!memcmp(peer->addr, arvif->vif->addr, ETH_ALEN))
+		if (ether_addr_equal(peer->addr, arvif->vif->addr))
 			continue;
 
-		if (!memcmp(peer->addr, arvif->bssid, ETH_ALEN))
+		if (ether_addr_equal(peer->addr, arvif->bssid))
 			continue;
 
 		if (peer->keys[key->keyidx] == key)
@@ -614,10 +672,45 @@
 	*def = &conf->def;
 }
 
-static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr,
+static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
+{
+	int ret;
+	unsigned long time_left;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	ret = ath10k_wmi_peer_delete(ar, vdev_id, addr);
+	if (ret)
+		return ret;
+
+	ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr);
+	if (ret)
+		return ret;
+
+	if (QCA_REV_WCN3990(ar)) {
+		time_left = wait_for_completion_timeout(&ar->peer_delete_done,
+							50 * HZ);
+
+		if (time_left == 0) {
+			ath10k_warn(ar, "Timeout in receiving peer delete response\n");
+			return -ETIMEDOUT;
+		}
+	}
+
+	ar->num_peers--;
+
+	return 0;
+}
+
+static int ath10k_peer_create(struct ath10k *ar,
+			      struct ieee80211_vif *vif,
+			      struct ieee80211_sta *sta,
+			      u32 vdev_id,
+			      const u8 *addr,
 			      enum wmi_peer_type peer_type)
 {
 	struct ath10k_vif *arvif;
+	struct ath10k_peer *peer;
 	int num_peers = 0;
 	int ret;
 
@@ -646,6 +739,22 @@
 		return ret;
 	}
 
+	spin_lock_bh(&ar->data_lock);
+
+	peer = ath10k_peer_find(ar, vdev_id, addr);
+	if (!peer) {
+		spin_unlock_bh(&ar->data_lock);
+		ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
+			    addr, vdev_id);
+		ath10k_peer_delete(ar, vdev_id, addr);
+		return -ENOENT;
+	}
+
+	peer->vif = vif;
+	peer->sta = sta;
+
+	spin_unlock_bh(&ar->data_lock);
+
 	ar->num_peers++;
 
 	return 0;
@@ -705,28 +814,11 @@
 	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
 }
 
-static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
-{
-	int ret;
-
-	lockdep_assert_held(&ar->conf_mutex);
-
-	ret = ath10k_wmi_peer_delete(ar, vdev_id, addr);
-	if (ret)
-		return ret;
-
-	ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr);
-	if (ret)
-		return ret;
-
-	ar->num_peers--;
-
-	return 0;
-}
-
 static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
 {
 	struct ath10k_peer *peer, *tmp;
+	int peer_id;
+	int i;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
@@ -738,6 +830,22 @@
 		ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
 			    peer->addr, vdev_id);
 
+		for_each_set_bit(peer_id, peer->peer_ids,
+				 ATH10K_MAX_NUM_PEER_IDS) {
+			ar->peer_map[peer_id] = NULL;
+		}
+
+		/* Double check that peer is properly un-referenced from
+		 * the peer_map
+		 */
+		for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
+			if (ar->peer_map[i] == peer) {
+				ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n",
+					    peer->addr, peer, i);
+				ar->peer_map[i] = NULL;
+			}
+		}
+
 		list_del(&peer->list);
 		kfree(peer);
 		ar->num_peers--;
@@ -748,6 +856,7 @@
 static void ath10k_peer_cleanup_all(struct ath10k *ar)
 {
 	struct ath10k_peer *peer, *tmp;
+	int i;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
@@ -756,6 +865,10 @@
 		list_del(&peer->list);
 		kfree(peer);
 	}
+
+	for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++)
+		ar->peer_map[i] = NULL;
+
 	spin_unlock_bh(&ar->data_lock);
 
 	ar->num_peers = 0;
@@ -886,6 +999,7 @@
 	arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
 
 	reinit_completion(&ar->vdev_setup_done);
+	reinit_completion(&ar->vdev_delete_done);
 
 	ret = ath10k_wmi_vdev_start(ar, &arg);
 	if (ret) {
@@ -935,6 +1049,7 @@
 			    ar->monitor_vdev_id, ret);
 
 	reinit_completion(&ar->vdev_setup_done);
+	reinit_completion(&ar->vdev_delete_done);
 
 	ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
 	if (ret)
@@ -1187,15 +1302,18 @@
 
 	set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
 
+	if (!QCA_REV_WCN3990(ar)) {
 	ret = ath10k_monitor_recalc(ar);
 	if (ret) {
-		ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret);
+			ath10k_warn(ar, "failed to start monitor (cac): %d\n",
+				    ret);
 		clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
 		return ret;
 	}
 
 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n",
 		   ar->monitor_vdev_id);
+	}
 
 	return 0;
 }
@@ -1209,6 +1327,7 @@
 		return 0;
 
 	clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+	if (!QCA_REV_WCN3990(ar))
 	ath10k_monitor_stop(ar);
 
 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n");
@@ -1271,6 +1390,7 @@
 	lockdep_assert_held(&ar->conf_mutex);
 
 	reinit_completion(&ar->vdev_setup_done);
+	reinit_completion(&ar->vdev_delete_done);
 
 	ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
 	if (ret) {
@@ -1306,7 +1426,12 @@
 
 	lockdep_assert_held(&ar->conf_mutex);
 
+	/* Clear arp and ns offload cache */
+	memset(&arvif->arp_offload, 0, sizeof(arvif->arp_offload));
+	memset(&arvif->ns_offload, 0, sizeof(arvif->ns_offload));
+
 	reinit_completion(&ar->vdev_setup_done);
+	reinit_completion(&ar->vdev_delete_done);
 
 	arg.vdev_id = arvif->vdev_id;
 	arg.dtim_period = arvif->dtim_period;
@@ -1384,10 +1509,7 @@
 	const u8 *p2p_ie;
 	int ret;
 
-	if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
-		return 0;
-
-	if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
+	if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p)
 		return 0;
 
 	mgmt = (void *)bcn->data;
@@ -1754,7 +1876,7 @@
 
 	if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 &&
 	    !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
-		      ar->fw_features)) {
+		      ar->running_fw->fw_file.fw_features)) {
 		ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
 			    arvif->vdev_id);
 		enable_ps = false;
@@ -1990,7 +2112,7 @@
 	ether_addr_copy(arg->addr, sta->addr);
 	arg->vdev_id = arvif->vdev_id;
 	arg->peer_aid = aid;
-	arg->peer_flags |= WMI_PEER_AUTH;
+	arg->peer_flags |= arvif->ar->wmi.peer_flags->auth;
 	arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif);
 	arg->peer_num_spatial_streams = 1;
 	arg->peer_caps = vif->bss_conf.assoc_capability;
@@ -1998,6 +2120,7 @@
 
 static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
 				       struct ieee80211_vif *vif,
+				       struct ieee80211_sta *sta,
 				       struct wmi_peer_assoc_complete_arg *arg)
 {
 	struct ieee80211_bss_conf *info = &vif->bss_conf;
@@ -2032,12 +2155,18 @@
 	/* FIXME: base on RSN IE/WPA IE is a correct idea? */
 	if (rsnie || wpaie) {
 		ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__);
-		arg->peer_flags |= WMI_PEER_NEED_PTK_4_WAY;
+		arg->peer_flags |= ar->wmi.peer_flags->need_ptk_4_way;
 	}
 
 	if (wpaie) {
 		ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__);
-		arg->peer_flags |= WMI_PEER_NEED_GTK_2_WAY;
+		arg->peer_flags |= ar->wmi.peer_flags->need_gtk_2_way;
+	}
+
+	if (sta->mfp &&
+	    test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT,
+		     ar->running_fw->fw_file.fw_features)) {
+		arg->peer_flags |= ar->wmi.peer_flags->pmf;
 	}
 }
 
@@ -2134,7 +2263,7 @@
 	    ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
 		return;
 
-	arg->peer_flags |= WMI_PEER_HT;
+	arg->peer_flags |= ar->wmi.peer_flags->ht;
 	arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
 				    ht_cap->ampdu_factor)) - 1;
 
@@ -2145,10 +2274,10 @@
 	arg->peer_rate_caps |= WMI_RC_HT_FLAG;
 
 	if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
-		arg->peer_flags |= WMI_PEER_LDPC;
+		arg->peer_flags |= ar->wmi.peer_flags->ldbc;
 
 	if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
-		arg->peer_flags |= WMI_PEER_40MHZ;
+		arg->peer_flags |= ar->wmi.peer_flags->bw40;
 		arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
 	}
 
@@ -2162,7 +2291,7 @@
 
 	if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
 		arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG;
-		arg->peer_flags |= WMI_PEER_STBC;
+		arg->peer_flags |= ar->wmi.peer_flags->stbc;
 	}
 
 	if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
@@ -2170,7 +2299,7 @@
 		stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
 		stbc = stbc << WMI_RC_RX_STBC_FLAG_S;
 		arg->peer_rate_caps |= stbc;
-		arg->peer_flags |= WMI_PEER_STBC;
+		arg->peer_flags |= ar->wmi.peer_flags->stbc;
 	}
 
 	if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
@@ -2351,10 +2480,10 @@
 	if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
 		return;
 
-	arg->peer_flags |= WMI_PEER_VHT;
+	arg->peer_flags |= ar->wmi.peer_flags->vht;
 
 	if (def.chan->band == IEEE80211_BAND_2GHZ)
-		arg->peer_flags |= WMI_PEER_VHT_2G;
+		arg->peer_flags |= ar->wmi.peer_flags->vht_2g;
 
 	arg->peer_vht_caps = vht_cap->cap;
 
@@ -2371,7 +2500,7 @@
 					ampdu_factor)) - 1);
 
 	if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
-		arg->peer_flags |= WMI_PEER_80MHZ;
+		arg->peer_flags |= ar->wmi.peer_flags->bw80;
 
 	arg->peer_vht_rates.rx_max_rate =
 		__le16_to_cpu(vht_cap->vht_mcs.rx_highest);
@@ -2396,27 +2525,28 @@
 	switch (arvif->vdev_type) {
 	case WMI_VDEV_TYPE_AP:
 		if (sta->wme)
-			arg->peer_flags |= WMI_PEER_QOS;
+			arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
 
 		if (sta->wme && sta->uapsd_queues) {
-			arg->peer_flags |= WMI_PEER_APSD;
+			arg->peer_flags |= arvif->ar->wmi.peer_flags->apsd;
 			arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
 		}
 		break;
 	case WMI_VDEV_TYPE_STA:
 		if (vif->bss_conf.qos)
-			arg->peer_flags |= WMI_PEER_QOS;
+			arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
 		break;
 	case WMI_VDEV_TYPE_IBSS:
 		if (sta->wme)
-			arg->peer_flags |= WMI_PEER_QOS;
+			arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
 		break;
 	default:
 		break;
 	}
 
 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n",
-		   sta->addr, !!(arg->peer_flags & WMI_PEER_QOS));
+		   sta->addr, !!(arg->peer_flags &
+		   arvif->ar->wmi.peer_flags->qos));
 }
 
 static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
@@ -2509,7 +2639,7 @@
 	memset(arg, 0, sizeof(*arg));
 
 	ath10k_peer_assoc_h_basic(ar, vif, sta, arg);
-	ath10k_peer_assoc_h_crypto(ar, vif, arg);
+	ath10k_peer_assoc_h_crypto(ar, vif, sta, arg);
 	ath10k_peer_assoc_h_rates(ar, vif, sta, arg);
 	ath10k_peer_assoc_h_ht(ar, vif, sta, arg);
 	ath10k_peer_assoc_h_vht(ar, vif, sta, arg);
@@ -2721,17 +2851,19 @@
 
 	ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
 	if (ret)
-		ath10k_warn(ar, "faield to down vdev %i: %d\n",
+		ath10k_warn(ar, "failed to down vdev %i: %d\n",
 			    arvif->vdev_id, ret);
 
 	arvif->def_wep_key_idx = -1;
 
+	if (!QCA_REV_WCN3990(ar)) {
 	ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
 	if (ret) {
 		ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n",
 			    arvif->vdev_id, ret);
 		return;
 	}
+	}
 
 	arvif->is_up = false;
 
@@ -2967,7 +3099,7 @@
 
 	regpair = ar->ath_common.regulatory.regpair;
 
-	if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
+	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
 		nl_dfs_reg = ar->dfs_detector->region;
 		wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg);
 	} else {
@@ -2996,7 +3128,7 @@
 
 	ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
 
-	if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
+	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
 		ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
 			   request->dfs_region);
 		result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
@@ -3016,6 +3148,13 @@
 /* TX handlers */
 /***************/
 
+enum ath10k_mac_tx_path {
+	ATH10K_MAC_TX_HTT,
+	ATH10K_MAC_TX_HTT_MGMT,
+	ATH10K_MAC_TX_WMI_MGMT,
+	ATH10K_MAC_TX_UNKNOWN,
+};
+
 void ath10k_mac_tx_lock(struct ath10k *ar, int reason)
 {
 	lockdep_assert_held(&ar->htt.tx_lock);
@@ -3142,35 +3281,11 @@
 	spin_unlock_bh(&ar->htt.tx_lock);
 }
 
-static u8 ath10k_tx_h_get_tid(struct ieee80211_hdr *hdr)
-{
-	if (ieee80211_is_mgmt(hdr->frame_control))
-		return HTT_DATA_TX_EXT_TID_MGMT;
-
-	if (!ieee80211_is_data_qos(hdr->frame_control))
-		return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
-
-	if (!is_unicast_ether_addr(ieee80211_get_DA(hdr)))
-		return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
-
-	return ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
-}
-
-static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar, struct ieee80211_vif *vif)
-{
-	if (vif)
-		return ath10k_vif_to_arvif(vif)->vdev_id;
-
-	if (ar->monitor_started)
-		return ar->monitor_vdev_id;
-
-	ath10k_warn(ar, "failed to resolve vdev id\n");
-	return 0;
-}
-
 static enum ath10k_hw_txrx_mode
-ath10k_tx_h_get_txmode(struct ath10k *ar, struct ieee80211_vif *vif,
-		       struct ieee80211_sta *sta, struct sk_buff *skb)
+ath10k_mac_tx_h_get_txmode(struct ath10k *ar,
+			   struct ieee80211_vif *vif,
+			   struct ieee80211_sta *sta,
+			   struct sk_buff *skb)
 {
 	const struct ieee80211_hdr *hdr = (void *)skb->data;
 	__le16 fc = hdr->frame_control;
@@ -3199,7 +3314,10 @@
 	 */
 	if (ar->htt.target_version_major < 3 &&
 	    (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
-	    !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, ar->fw_features))
+	    !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
+		      ar->running_fw->fw_file.fw_features) &&
+	    !test_bit(ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR,
+		      ar->running_fw->fw_file.fw_features))
 		return ATH10K_HW_TXRX_MGMT;
 
 	/* Workaround:
@@ -3220,14 +3338,22 @@
 }
 
 static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif,
-				     struct sk_buff *skb) {
-	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+				     struct sk_buff *skb)
+{
+	const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	const struct ieee80211_hdr *hdr = (void *)skb->data;
 	const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT |
 			 IEEE80211_TX_CTL_INJECTED;
+
+	if (!ieee80211_has_protected(hdr->frame_control))
+		return false;
+
 	if ((info->flags & mask) == mask)
 		return false;
+
 	if (vif)
 		return !ath10k_vif_to_arvif(vif)->nohwcrypt;
+
 	return true;
 }
 
@@ -3254,7 +3380,7 @@
 	 */
 	hdr = (void *)skb->data;
 	if (ieee80211_is_qos_nullfunc(hdr->frame_control))
-		cb->htt.tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+		cb->flags &= ~ATH10K_SKB_F_QOS;
 
 	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
 }
@@ -3294,8 +3420,7 @@
 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
 
 	/* This is case only for P2P_GO */
-	if (arvif->vdev_type != WMI_VDEV_TYPE_AP ||
-	    arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
+	if (vif->type != NL80211_IFTYPE_AP || !vif->p2p)
 		return;
 
 	if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
@@ -3310,7 +3435,29 @@
 	}
 }
 
-static bool ath10k_mac_need_offchan_tx_work(struct ath10k *ar)
+static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
+				    struct ieee80211_vif *vif,
+				    struct ieee80211_txq *txq,
+				    struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr = (void *)skb->data;
+	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+
+	cb->flags = 0;
+	if (!ath10k_tx_h_use_hwcrypto(vif, skb))
+		cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
+
+	if (ieee80211_is_mgmt(hdr->frame_control))
+		cb->flags |= ATH10K_SKB_F_MGMT;
+
+	if (ieee80211_is_data_qos(hdr->frame_control))
+		cb->flags |= ATH10K_SKB_F_QOS;
+
+	cb->vif = vif;
+	cb->txq = txq;
+}
+
+bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
 {
 	/* FIXME: Not really sure since when the behaviour changed. At some
 	 * point new firmware stopped requiring creation of peer entries for
@@ -3318,8 +3465,9 @@
 	 * tx credit replenishment and reliability). Assuming it's at least 3.4
 	 * because that's when the `freq` was introduced to TX_FRM HTT command.
 	 */
-	return !(ar->htt.target_version_major >= 3 &&
-		 ar->htt.target_version_minor >= 4);
+	return (ar->htt.target_version_major >= 3 &&
+		ar->htt.target_version_minor >= 4 &&
+		ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV);
 }
 
 static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
@@ -3344,27 +3492,53 @@
 	return ret;
 }
 
-static void ath10k_mac_tx(struct ath10k *ar, struct sk_buff *skb)
+static enum ath10k_mac_tx_path
+ath10k_mac_tx_h_get_txpath(struct ath10k *ar,
+			   struct sk_buff *skb,
+			   enum ath10k_hw_txrx_mode txmode)
 {
-	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
-	struct ath10k_htt *htt = &ar->htt;
-	int ret = 0;
-
-	switch (cb->txmode) {
+	switch (txmode) {
 	case ATH10K_HW_TXRX_RAW:
 	case ATH10K_HW_TXRX_NATIVE_WIFI:
 	case ATH10K_HW_TXRX_ETHERNET:
-		ret = ath10k_htt_tx(htt, skb);
-		break;
+		return ATH10K_MAC_TX_HTT;
 	case ATH10K_HW_TXRX_MGMT:
 		if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
-			     ar->fw_features))
-			ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
+			     ar->running_fw->fw_file.fw_features) ||
+			     test_bit(WMI_SERVICE_MGMT_TX_WMI,
+				      ar->wmi.svc_map))
+			return ATH10K_MAC_TX_WMI_MGMT;
 		else if (ar->htt.target_version_major >= 3)
-			ret = ath10k_htt_tx(htt, skb);
+			return ATH10K_MAC_TX_HTT;
 		else
+			return ATH10K_MAC_TX_HTT_MGMT;
+	}
+
+	return ATH10K_MAC_TX_UNKNOWN;
+}
+
+static int ath10k_mac_tx_submit(struct ath10k *ar,
+				enum ath10k_hw_txrx_mode txmode,
+				enum ath10k_mac_tx_path txpath,
+				struct sk_buff *skb)
+{
+	struct ath10k_htt *htt = &ar->htt;
+	int ret = -EINVAL;
+
+	switch (txpath) {
+	case ATH10K_MAC_TX_HTT:
+		ret = ath10k_htt_tx(htt, txmode, skb);
+		break;
+	case ATH10K_MAC_TX_HTT_MGMT:
 			ret = ath10k_htt_mgmt_tx(htt, skb);
 		break;
+	case ATH10K_MAC_TX_WMI_MGMT:
+		ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
+		break;
+	case ATH10K_MAC_TX_UNKNOWN:
+		WARN_ON_ONCE(1);
+		ret = -EINVAL;
+		break;
 	}
 
 	if (ret) {
@@ -3372,6 +3546,65 @@
 			    ret);
 		ieee80211_free_txskb(ar->hw, skb);
 	}
+
+	return ret;
+}
+
+/* This function consumes the sk_buff regardless of return value as far as
+ * caller is concerned so no freeing is necessary afterwards.
+ */
+static int ath10k_mac_tx(struct ath10k *ar,
+			 struct ieee80211_vif *vif,
+			 struct ieee80211_sta *sta,
+			 enum ath10k_hw_txrx_mode txmode,
+			 enum ath10k_mac_tx_path txpath,
+			 struct sk_buff *skb)
+{
+	struct ieee80211_hw *hw = ar->hw;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	int ret;
+
+	skb_orphan(skb);
+	/* We should disable CCK RATE due to P2P */
+	if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
+		ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
+
+	switch (txmode) {
+	case ATH10K_HW_TXRX_MGMT:
+	case ATH10K_HW_TXRX_NATIVE_WIFI:
+		ath10k_tx_h_nwifi(hw, skb);
+		ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
+		ath10k_tx_h_seq_no(vif, skb);
+		break;
+	case ATH10K_HW_TXRX_ETHERNET:
+		ath10k_tx_h_8023(skb);
+		break;
+	case ATH10K_HW_TXRX_RAW:
+		if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+			WARN_ON_ONCE(1);
+			ieee80211_free_txskb(hw, skb);
+			return -ENOTSUPP;
+		}
+	}
+
+	if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
+		if (!ath10k_mac_tx_frm_has_freq(ar)) {
+			ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %pK\n",
+				   skb);
+
+			skb_queue_tail(&ar->offchan_tx_queue, skb);
+			ieee80211_queue_work(hw, &ar->offchan_tx_work);
+			return 0;
+		}
+	}
+
+	ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb);
+	if (ret) {
+		ath10k_warn(ar, "failed to submit frame: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
 }
 
 void ath10k_offchan_tx_purge(struct ath10k *ar)
@@ -3391,7 +3624,12 @@
 {
 	struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work);
 	struct ath10k_peer *peer;
+	struct ath10k_vif *arvif;
+	enum ath10k_hw_txrx_mode txmode;
+	enum ath10k_mac_tx_path txpath;
 	struct ieee80211_hdr *hdr;
+	struct ieee80211_vif *vif;
+	struct ieee80211_sta *sta;
 	struct sk_buff *skb;
 	const u8 *peer_addr;
 	int vdev_id;
@@ -3413,14 +3651,14 @@
 
 		mutex_lock(&ar->conf_mutex);
 
-		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %p\n",
+		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK\n",
 			   skb);
 
 		hdr = (struct ieee80211_hdr *)skb->data;
 		peer_addr = ieee80211_get_DA(hdr);
-		vdev_id = ATH10K_SKB_CB(skb)->vdev_id;
 
 		spin_lock_bh(&ar->data_lock);
+		vdev_id = ar->scan.vdev_id;
 		peer = ath10k_peer_find(ar, vdev_id, peer_addr);
 		spin_unlock_bh(&ar->data_lock);
 
@@ -3430,7 +3668,8 @@
 				   peer_addr, vdev_id);
 
 		if (!peer) {
-			ret = ath10k_peer_create(ar, vdev_id, peer_addr,
+			ret = ath10k_peer_create(ar, NULL, NULL, vdev_id,
+						 peer_addr,
 						 WMI_PEER_TYPE_DEFAULT);
 			if (ret)
 				ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
@@ -3443,12 +3682,33 @@
 		ar->offchan_tx_skb = skb;
 		spin_unlock_bh(&ar->data_lock);
 
-		ath10k_mac_tx(ar, skb);
+		/* It's safe to access vif and sta - conf_mutex guarantees that
+		 * sta_state() and remove_interface() are locked exclusively
+		 * out wrt to this offchannel worker.
+		 */
+		arvif = ath10k_get_arvif(ar, vdev_id);
+		if (arvif) {
+			vif = arvif->vif;
+			sta = ieee80211_find_sta(vif, peer_addr);
+		} else {
+			vif = NULL;
+			sta = NULL;
+		}
+
+		txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
+		txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
+
+		ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
+		if (ret) {
+			ath10k_warn(ar, "failed to transmit offchannel frame: %d\n",
+				    ret);
+			/* not serious */
+		}
 
 		time_left =
 		wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
 		if (time_left == 0)
-			ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
+			ath10k_warn(ar, "timed out waiting for offchannel skb %pK\n",
 				    skb);
 
 		if (!peer && tmp_peer_created) {
@@ -3479,6 +3739,7 @@
 {
 	struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work);
 	struct sk_buff *skb;
+	dma_addr_t paddr;
 	int ret;
 
 	for (;;) {
@@ -3486,6 +3747,20 @@
 		if (!skb)
 			break;
 
+		if (QCA_REV_WCN3990(ar)) {
+			paddr = dma_map_single(ar->dev, skb->data,
+					       skb->len, DMA_TO_DEVICE);
+			if (!paddr)
+				continue;
+			ret = ath10k_wmi_mgmt_tx_send(ar, skb, paddr);
+			if (ret) {
+				ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n",
+					    ret);
+				dma_unmap_single(ar->dev, paddr, skb->len,
+						 DMA_FROM_DEVICE);
+				ieee80211_free_txskb(ar->hw, skb);
+			}
+		} else {
 		ret = ath10k_wmi_mgmt_tx(ar, skb);
 		if (ret) {
 			ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n",
@@ -3494,6 +3769,201 @@
 		}
 	}
 }
+}
+
+static void ath10k_mac_txq_init(struct ieee80211_txq *txq)
+{
+	struct ath10k_txq *artxq;
+
+	if (!txq)
+		return;
+
+	artxq = (void *)txq->drv_priv;
+	INIT_LIST_HEAD(&artxq->list);
+}
+
+static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
+{
+	struct ath10k_txq *artxq;
+	struct ath10k_skb_cb *cb;
+	struct sk_buff *msdu;
+	int msdu_id;
+
+	if (!txq)
+		return;
+
+	artxq = (void *)txq->drv_priv;
+	spin_lock_bh(&ar->txqs_lock);
+	if (!list_empty(&artxq->list))
+		list_del_init(&artxq->list);
+	spin_unlock_bh(&ar->txqs_lock);
+
+	spin_lock_bh(&ar->htt.tx_lock);
+	idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) {
+		cb = ATH10K_SKB_CB(msdu);
+		if (cb->txq == txq)
+			cb->txq = NULL;
+	}
+	spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
+					    u16 peer_id,
+					    u8 tid)
+{
+	struct ath10k_peer *peer;
+
+	lockdep_assert_held(&ar->data_lock);
+
+	peer = ar->peer_map[peer_id];
+	if (!peer)
+		return NULL;
+
+	if (peer->sta)
+		return peer->sta->txq[tid];
+	else if (peer->vif)
+		return peer->vif->txq;
+	else
+		return NULL;
+}
+
+static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw,
+				   struct ieee80211_txq *txq)
+{
+	struct ath10k *ar = hw->priv;
+	struct ath10k_txq *artxq = (void *)txq->drv_priv;
+
+	/* No need to get locks */
+
+	if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH)
+		return true;
+
+	if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed)
+		return true;
+
+	if (artxq->num_fw_queued < artxq->num_push_allowed)
+		return true;
+
+	return false;
+}
+
+int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
+			   struct ieee80211_txq *txq)
+{
+	struct ath10k *ar = hw->priv;
+	struct ath10k_htt *htt = &ar->htt;
+	struct ath10k_txq *artxq = (void *)txq->drv_priv;
+	struct ieee80211_vif *vif = txq->vif;
+	struct ieee80211_sta *sta = txq->sta;
+	enum ath10k_hw_txrx_mode txmode;
+	enum ath10k_mac_tx_path txpath;
+	struct sk_buff *skb;
+	struct ieee80211_hdr *hdr;
+	size_t skb_len;
+	bool is_mgmt, is_presp;
+	int ret;
+
+	spin_lock_bh(&ar->htt.tx_lock);
+	ret = ath10k_htt_tx_inc_pending(htt);
+	spin_unlock_bh(&ar->htt.tx_lock);
+
+	if (ret)
+		return ret;
+
+	skb = ieee80211_tx_dequeue(hw, txq);
+	if (!skb) {
+		spin_lock_bh(&ar->htt.tx_lock);
+		ath10k_htt_tx_dec_pending(htt);
+		spin_unlock_bh(&ar->htt.tx_lock);
+
+		return -ENOENT;
+	}
+
+	ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
+
+	skb_len = skb->len;
+	txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
+	txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
+	is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
+
+	if (is_mgmt) {
+		hdr = (struct ieee80211_hdr *)skb->data;
+		is_presp = ieee80211_is_probe_resp(hdr->frame_control);
+
+		spin_lock_bh(&ar->htt.tx_lock);
+		ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
+
+		if (ret) {
+			ath10k_htt_tx_dec_pending(htt);
+			spin_unlock_bh(&ar->htt.tx_lock);
+			return ret;
+		}
+		spin_unlock_bh(&ar->htt.tx_lock);
+	}
+
+	ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
+	if (unlikely(ret)) {
+		ath10k_warn(ar, "failed to push frame: %d\n", ret);
+
+		spin_lock_bh(&ar->htt.tx_lock);
+		ath10k_htt_tx_dec_pending(htt);
+		if (is_mgmt)
+			ath10k_htt_tx_mgmt_dec_pending(htt);
+		spin_unlock_bh(&ar->htt.tx_lock);
+
+		return ret;
+	}
+
+	spin_lock_bh(&ar->htt.tx_lock);
+	artxq->num_fw_queued++;
+	spin_unlock_bh(&ar->htt.tx_lock);
+
+	return skb_len;
+}
+
+void ath10k_mac_tx_push_pending(struct ath10k *ar)
+{
+	struct ieee80211_hw *hw = ar->hw;
+	struct ieee80211_txq *txq;
+	struct ath10k_txq *artxq;
+	struct ath10k_txq *last;
+	int ret;
+	int max;
+
+	if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2))
+		return;
+
+	spin_lock_bh(&ar->txqs_lock);
+	rcu_read_lock();
+
+	last = list_last_entry(&ar->txqs, struct ath10k_txq, list);
+	while (!list_empty(&ar->txqs)) {
+		artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
+		txq = container_of((void *)artxq, struct ieee80211_txq,
+				   drv_priv);
+
+		/* Prevent aggressive sta/tid taking over tx queue */
+		max = 16;
+		ret = 0;
+		while (ath10k_mac_tx_can_push(hw, txq) && max--) {
+			ret = ath10k_mac_tx_push_txq(hw, txq);
+			if (ret < 0)
+				break;
+		}
+
+		list_del_init(&artxq->list);
+		if (ret != -ENOENT)
+			list_add_tail(&artxq->list, &ar->txqs);
+
+		ath10k_htt_tx_txq_update(hw, txq);
+
+		if (artxq == last || (ret < 0 && ret != -ENOENT))
+			break;
+	}
+
+	rcu_read_unlock();
+	spin_unlock_bh(&ar->txqs_lock);
+}
 
 /************/
 /* Scanning */
@@ -3518,9 +3988,10 @@
 	case ATH10K_SCAN_STARTING:
 		ar->scan.state = ATH10K_SCAN_IDLE;
 		ar->scan_channel = NULL;
+		ar->scan.roc_freq = 0;
 		ath10k_offchan_tx_purge(ar);
 		cancel_delayed_work(&ar->scan.timeout);
-		complete_all(&ar->scan.completed);
+		complete(&ar->scan.completed);
 		break;
 	}
 }
@@ -3656,67 +4127,100 @@
 /* mac80211 callbacks */
 /**********************/
 
-static void ath10k_tx(struct ieee80211_hw *hw,
+static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
 		      struct ieee80211_tx_control *control,
 		      struct sk_buff *skb)
 {
 	struct ath10k *ar = hw->priv;
+	struct ath10k_htt *htt = &ar->htt;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_vif *vif = info->control.vif;
 	struct ieee80211_sta *sta = control->sta;
-	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-	__le16 fc = hdr->frame_control;
+	struct ieee80211_txq *txq = NULL;
+	struct ieee80211_hdr *hdr = (void *)skb->data;
+	enum ath10k_hw_txrx_mode txmode;
+	enum ath10k_mac_tx_path txpath;
+	bool is_htt;
+	bool is_mgmt;
+	bool is_presp;
+	int ret;
+
+	ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
+
+	txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
+	txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
+	is_htt = (txpath == ATH10K_MAC_TX_HTT ||
+		  txpath == ATH10K_MAC_TX_HTT_MGMT);
+	is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
 
-	/* We should disable CCK RATE due to P2P */
-	if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
-		ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
+	if (is_htt) {
+		spin_lock_bh(&ar->htt.tx_lock);
+		is_presp = ieee80211_is_probe_resp(hdr->frame_control);
 
-	ATH10K_SKB_CB(skb)->htt.is_offchan = false;
-	ATH10K_SKB_CB(skb)->htt.freq = 0;
-	ATH10K_SKB_CB(skb)->htt.tid = ath10k_tx_h_get_tid(hdr);
-	ATH10K_SKB_CB(skb)->htt.nohwcrypt = !ath10k_tx_h_use_hwcrypto(vif, skb);
-	ATH10K_SKB_CB(skb)->vdev_id = ath10k_tx_h_get_vdev_id(ar, vif);
-	ATH10K_SKB_CB(skb)->txmode = ath10k_tx_h_get_txmode(ar, vif, sta, skb);
-	ATH10K_SKB_CB(skb)->is_protected = ieee80211_has_protected(fc);
+		ret = ath10k_htt_tx_inc_pending(htt);
+		if (ret) {
+			ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n",
+				    ret);
+			spin_unlock_bh(&ar->htt.tx_lock);
+			ieee80211_free_txskb(ar->hw, skb);
+			return;
+		}
 
-	switch (ATH10K_SKB_CB(skb)->txmode) {
-	case ATH10K_HW_TXRX_MGMT:
-	case ATH10K_HW_TXRX_NATIVE_WIFI:
-		ath10k_tx_h_nwifi(hw, skb);
-		ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
-		ath10k_tx_h_seq_no(vif, skb);
-		break;
-	case ATH10K_HW_TXRX_ETHERNET:
-		ath10k_tx_h_8023(skb);
-		break;
-	case ATH10K_HW_TXRX_RAW:
-		if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
-			WARN_ON_ONCE(1);
-			ieee80211_free_txskb(hw, skb);
+		ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
+		if (ret) {
+			ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n",
+				   ret);
+			ath10k_htt_tx_dec_pending(htt);
+			spin_unlock_bh(&ar->htt.tx_lock);
+			ieee80211_free_txskb(ar->hw, skb);
 			return;
 		}
+		spin_unlock_bh(&ar->htt.tx_lock);
 	}
 
-	if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
-		spin_lock_bh(&ar->data_lock);
-		ATH10K_SKB_CB(skb)->htt.freq = ar->scan.roc_freq;
-		ATH10K_SKB_CB(skb)->vdev_id = ar->scan.vdev_id;
-		spin_unlock_bh(&ar->data_lock);
+	ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
+	if (ret) {
+		ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
+		if (is_htt) {
+			spin_lock_bh(&ar->htt.tx_lock);
+			ath10k_htt_tx_dec_pending(htt);
+			if (is_mgmt)
+				ath10k_htt_tx_mgmt_dec_pending(htt);
+			spin_unlock_bh(&ar->htt.tx_lock);
+		}
+		return;
+	}
+}
 
-		if (ath10k_mac_need_offchan_tx_work(ar)) {
-			ATH10K_SKB_CB(skb)->htt.freq = 0;
-			ATH10K_SKB_CB(skb)->htt.is_offchan = true;
+static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
+					struct ieee80211_txq *txq)
+{
+	struct ath10k *ar = hw->priv;
+	struct ath10k_txq *artxq = (void *)txq->drv_priv;
+	struct ieee80211_txq *f_txq;
+	struct ath10k_txq *f_artxq;
+	int ret = 0;
+	int max = 16;
 
-			ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
-				   skb);
+	spin_lock_bh(&ar->txqs_lock);
+	if (list_empty(&artxq->list))
+		list_add_tail(&artxq->list, &ar->txqs);
 
-			skb_queue_tail(&ar->offchan_tx_queue, skb);
-			ieee80211_queue_work(hw, &ar->offchan_tx_work);
-			return;
-		}
+	f_artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
+	f_txq = container_of((void *)f_artxq, struct ieee80211_txq, drv_priv);
+	list_del_init(&f_artxq->list);
+
+	while (ath10k_mac_tx_can_push(hw, f_txq) && max--) {
+		ret = ath10k_mac_tx_push_txq(hw, f_txq);
+		if (ret)
+			break;
 	}
+	if (ret != -ENOENT)
+		list_add_tail(&f_artxq->list, &ar->txqs);
+	spin_unlock_bh(&ar->txqs_lock);
 
-	ath10k_mac_tx(ar, skb);
+	ath10k_htt_tx_txq_update(hw, f_txq);
+	ath10k_htt_tx_txq_update(hw, txq);
 }
 
 /* Must not be called with conf_mutex held as workers can use that also. */
@@ -3856,6 +4360,9 @@
 			mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
 	}
 
+	if (ar->cfg_tx_chainmask <= 1)
+		vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC;
+
 	vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
 	vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
 
@@ -3875,7 +4382,8 @@
 	ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
 	ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
 	ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
-	ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT;
+	ht_cap.cap |=
+		WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT;
 
 	if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI)
 		ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
@@ -3892,7 +4400,7 @@
 		ht_cap.cap |= smps;
 	}
 
-	if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC)
+	if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC && (ar->cfg_tx_chainmask > 1))
 		ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
 
 	if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) {
@@ -3939,9 +4447,6 @@
 	if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
 		band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
 		band->ht_cap = ht_cap;
-
-		/* Enable the VHT support at 2.4 GHz */
-		band->vht_cap = vht_cap;
 	}
 	if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
 		band = &ar->mac.sbands[IEEE80211_BAND_5GHZ];
@@ -4002,12 +4507,12 @@
 static int ath10k_start(struct ieee80211_hw *hw)
 {
 	struct ath10k *ar = hw->priv;
-	u32 burst_enable;
+	u32 param;
 	int ret = 0;
 
 	/*
 	 * This makes sense only when restarting hw. It is harmless to call
-	 * uncoditionally. This is necessary to make sure no HTT/WMI tx
+	 * unconditionally. This is necessary to make sure no HTT/WMI tx
 	 * commands will be submitted while restarting.
 	 */
 	ath10k_drain_tx(ar);
@@ -4019,6 +4524,7 @@
 		ar->state = ATH10K_STATE_ON;
 		break;
 	case ATH10K_STATE_RESTARTING:
+		if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
 		ath10k_halt(ar);
 		ar->state = ATH10K_STATE_RESTARTED;
 		break;
@@ -4039,24 +4545,34 @@
 		goto err_off;
 	}
 
-	ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
+	ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL,
+				&ar->normal_mode_fw);
 	if (ret) {
 		ath10k_err(ar, "Could not init core: %d\n", ret);
 		goto err_power_down;
 	}
 
-	ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1);
+	param = ar->wmi.pdev_param->pmf_qos;
+	ret = ath10k_wmi_pdev_set_param(ar, param, 1);
 	if (ret) {
 		ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret);
 		goto err_core_stop;
 	}
 
-	ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1);
+	param = ar->wmi.pdev_param->dynamic_bw;
+	ret = ath10k_wmi_pdev_set_param(ar, param, 1);
 	if (ret) {
 		ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret);
 		goto err_core_stop;
 	}
 
+	param = ar->wmi.pdev_param->idle_ps_config;
+	ret = ath10k_wmi_pdev_set_param(ar, param, 1);
+	if (ret) {
+		ath10k_warn(ar, "failed to enable idle_ps_config: %d\n", ret);
+		goto err_core_stop;
+	}
+
 	if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
 		ret = ath10k_wmi_adaptive_qcs(ar, true);
 		if (ret) {
@@ -4067,8 +4583,8 @@
 	}
 
 	if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) {
-		burst_enable = ar->wmi.pdev_param->burst_enable;
-		ret = ath10k_wmi_pdev_set_param(ar, burst_enable, 0);
+		param = ar->wmi.pdev_param->burst_enable;
+		ret = ath10k_wmi_pdev_set_param(ar, param, 0);
 		if (ret) {
 			ath10k_warn(ar, "failed to disable burst: %d\n", ret);
 			goto err_core_stop;
@@ -4086,8 +4602,8 @@
 	 * this problem.
 	 */
 
-	ret = ath10k_wmi_pdev_set_param(ar,
-					ar->wmi.pdev_param->arp_ac_override, 0);
+	param = ar->wmi.pdev_param->arp_ac_override;
+	ret = ath10k_wmi_pdev_set_param(ar, param, 0);
 	if (ret) {
 		ath10k_warn(ar, "failed to set arp ac override parameter: %d\n",
 			    ret);
@@ -4095,7 +4611,7 @@
 	}
 
 	if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA,
-		     ar->fw_features)) {
+		     ar->running_fw->fw_file.fw_features)) {
 		ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1,
 							  WMI_CCA_DETECT_LEVEL_AUTO,
 							  WMI_CCA_DETECT_MARGIN_AUTO);
@@ -4104,10 +4620,11 @@
 				    ret);
 			goto err_core_stop;
 		}
+		ar->sifs_burst_enabled = false;
 	}
 
-	ret = ath10k_wmi_pdev_set_param(ar,
-					ar->wmi.pdev_param->ani_enable, 1);
+	param = ar->wmi.pdev_param->ani_enable;
+	ret = ath10k_wmi_pdev_set_param(ar, param, 1);
 	if (ret) {
 		ath10k_warn(ar, "failed to enable ani by default: %d\n",
 			    ret);
@@ -4116,6 +4633,31 @@
 
 	ar->ani_enabled = true;
 
+	if (ath10k_peer_stats_enabled(ar)) {
+		param = ar->wmi.pdev_param->peer_stats_update_period;
+		ret = ath10k_wmi_pdev_set_param(ar, param,
+						PEER_DEFAULT_STATS_UPDATE_PERIOD);
+		if (ret) {
+			ath10k_warn(ar,
+				    "failed to set peer stats period : %d\n",
+				    ret);
+			goto err_core_stop;
+		}
+	}
+
+	param = ar->wmi.pdev_param->enable_btcoex;
+	if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) &&
+	    test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
+		     ar->running_fw->fw_file.fw_features)) {
+		ret = ath10k_wmi_pdev_set_param(ar, param, 0);
+		if (ret) {
+			ath10k_warn(ar,
+				    "failed to set btcoex param: %d\n", ret);
+			goto err_core_stop;
+		}
+		clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
+	}
+
 	ar->num_started_vdevs = 0;
 	ath10k_regd_update(ar);
 
@@ -4318,6 +4860,7 @@
 {
 	struct ath10k *ar = hw->priv;
 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct ath10k_peer *peer;
 	enum wmi_sta_powersave_param param;
 	int ret = 0;
 	u32 value;
@@ -4330,6 +4873,7 @@
 	mutex_lock(&ar->conf_mutex);
 
 	memset(arvif, 0, sizeof(*arvif));
+	ath10k_mac_txq_init(vif->txq);
 
 	arvif->ar = ar;
 	arvif->vif = vif;
@@ -4364,24 +4908,30 @@
 		   bit, ar->free_vdev_map);
 
 	arvif->vdev_id = bit;
-	arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
+	arvif->vdev_subtype =
+		ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE);
 
 	switch (vif->type) {
 	case NL80211_IFTYPE_P2P_DEVICE:
 		arvif->vdev_type = WMI_VDEV_TYPE_STA;
-		arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
+		arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
+					(ar, WMI_VDEV_SUBTYPE_P2P_DEVICE);
 		break;
 	case NL80211_IFTYPE_UNSPECIFIED:
 	case NL80211_IFTYPE_STATION:
 		arvif->vdev_type = WMI_VDEV_TYPE_STA;
 		if (vif->p2p)
-			arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT;
+			arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
+					(ar, WMI_VDEV_SUBTYPE_P2P_CLIENT);
 		break;
 	case NL80211_IFTYPE_ADHOC:
 		arvif->vdev_type = WMI_VDEV_TYPE_IBSS;
 		break;
 	case NL80211_IFTYPE_MESH_POINT:
-		if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+		if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) {
+			arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
+						(ar, WMI_VDEV_SUBTYPE_MESH_11S);
+		} else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
 			ret = -EINVAL;
 			ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n");
 			goto err;
@@ -4392,7 +4942,8 @@
 		arvif->vdev_type = WMI_VDEV_TYPE_AP;
 
 		if (vif->p2p)
-			arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO;
+			arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
+						(ar, WMI_VDEV_SUBTYPE_P2P_GO);
 		break;
 	case NL80211_IFTYPE_MONITOR:
 		arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
@@ -4462,6 +5013,15 @@
 		goto err;
 	}
 
+	if ((arvif->vdev_type == WMI_VDEV_TYPE_STA) && QCA_REV_WCN3990(ar)) {
+		ret = ath10k_wmi_csa_offload(ar, arvif->vdev_id, true);
+		if (ret) {
+			ath10k_err(ar, "CSA offload failed for vdev %i: %d\n",
+				   arvif->vdev_id, ret);
+			goto err_vdev_delete;
+		}
+	}
+
 	ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
 	list_add(&arvif->list, &ar->arvifs);
 
@@ -4506,13 +5066,31 @@
 
 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
 	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
-		ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr,
-					 WMI_PEER_TYPE_DEFAULT);
+		ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id,
+					 vif->addr, WMI_PEER_TYPE_DEFAULT);
 		if (ret) {
 			ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n",
 				    arvif->vdev_id, ret);
 			goto err_vdev_delete;
 		}
+
+		spin_lock_bh(&ar->data_lock);
+
+		peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr);
+		if (!peer) {
+			ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
+				    vif->addr, arvif->vdev_id);
+			spin_unlock_bh(&ar->data_lock);
+			ret = -ENOENT;
+			goto err_peer_delete;
+		}
+
+		arvif->peer_id = find_first_bit(peer->peer_ids,
+						ATH10K_MAX_NUM_PEER_IDS);
+
+		spin_unlock_bh(&ar->data_lock);
+	} else {
+		arvif->peer_id = HTT_INVALID_PEERID;
 	}
 
 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
@@ -4591,7 +5169,7 @@
 err_peer_delete:
 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
 	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS)
-		ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
+		ath10k_peer_delete(ar, arvif->vdev_id, vif->addr);
 
 err_vdev_delete:
 	ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
@@ -4623,7 +5201,10 @@
 {
 	struct ath10k *ar = hw->priv;
 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct ath10k_peer *peer;
+	unsigned long time_left;
 	int ret;
+	int i;
 
 	cancel_work_sync(&arvif->ap_csa_work);
 	cancel_delayed_work_sync(&arvif->connection_loss_work);
@@ -4644,7 +5225,7 @@
 
 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
 	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
-		ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id,
+		ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id,
 					     vif->addr);
 		if (ret)
 			ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n",
@@ -4653,6 +5234,9 @@
 		kfree(arvif->u.ap.noa_data);
 	}
 
+	if ((arvif->vdev_type == WMI_VDEV_TYPE_STA) && QCA_REV_WCN3990(ar))
+		ath10k_wmi_csa_offload(ar, arvif->vdev_id, false);
+
 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
 		   arvif->vdev_id);
 
@@ -4661,6 +5245,16 @@
 		ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n",
 			    arvif->vdev_id, ret);
 
+	if (QCA_REV_WCN3990(ar)) {
+		time_left = wait_for_completion_timeout(
+						&ar->vdev_delete_done,
+						ATH10K_VDEV_DELETE_TIMEOUT_HZ);
+		if (time_left == 0) {
+			ath10k_warn(ar, "Timeout in receiving vdev delete resp\n");
+			return;
+		}
+	}
+
 	/* Some firmware revisions don't notify host about self-peer removal
 	 * until after associated vdev is deleted.
 	 */
@@ -4677,7 +5271,22 @@
 		spin_unlock_bh(&ar->data_lock);
 	}
 
+	spin_lock_bh(&ar->data_lock);
+	for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
+		peer = ar->peer_map[i];
+		if (!peer)
+			continue;
+
+		if (peer->vif == vif) {
+			ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n",
+				    vif->addr, arvif->vdev_id);
+			peer->vif = NULL;
+		}
+	}
+	spin_unlock_bh(&ar->data_lock);
+
 	ath10k_peer_cleanup(ar, arvif->vdev_id);
+	ath10k_mac_txq_unref(ar, vif->txq);
 
 	if (vif->type == NL80211_IFTYPE_MONITOR) {
 		ar->monitor_arvif = NULL;
@@ -4686,13 +5295,39 @@
 			ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
 	}
 
+	ret = ath10k_mac_txpower_recalc(ar);
+	if (ret)
+		ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
+
 	spin_lock_bh(&ar->htt.tx_lock);
 	ath10k_mac_vif_tx_unlock_all(arvif);
 	spin_unlock_bh(&ar->htt.tx_lock);
 
+	ath10k_mac_txq_unref(ar, vif->txq);
+
 	mutex_unlock(&ar->conf_mutex);
 }
 
+static int ath10k_change_interface(struct ieee80211_hw *hw,
+				   struct ieee80211_vif *vif,
+				   enum nl80211_iftype new_type, bool p2p)
+{
+	struct ath10k *ar = hw->priv;
+	int ret = 0;
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC,
+		   "change_interface new: %d (%d), old: %d (%d)\n", new_type,
+		   p2p, vif->type, vif->p2p);
+
+	if (new_type != vif->type || vif->p2p != p2p) {
+		ath10k_remove_interface(hw, vif);
+		vif->type = new_type;
+		vif->p2p = p2p;
+		ret = ath10k_add_interface(hw, vif);
+	}
+	return ret;
+}
+
 /*
  * FIXME: Has to be verified.
  */
@@ -4721,7 +5356,7 @@
 
 	ret = ath10k_monitor_recalc(ar);
 	if (ret)
-		ath10k_warn(ar, "failed to recalc montior: %d\n", ret);
+		ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
 
 	mutex_unlock(&ar->conf_mutex);
 }
@@ -4913,7 +5548,8 @@
 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
 	struct cfg80211_scan_request *req = &hw_req->req;
 	struct wmi_start_scan_arg arg;
-	int ret = 0;
+	const u8 *ptr;
+	int ret = 0, ie_skip_len = 0;
 	int i;
 
 	mutex_lock(&ar->conf_mutex);
@@ -4945,8 +5581,16 @@
 	arg.scan_id = ATH10K_SCAN_ID;
 
 	if (req->ie_len) {
-		arg.ie_len = req->ie_len;
-		memcpy(arg.ie, req->ie, arg.ie_len);
+		if (QCA_REV_WCN3990(ar)) {
+			ptr = req->ie;
+			while (ptr[0] == WLAN_EID_SUPP_RATES ||
+			       ptr[0] == WLAN_EID_EXT_SUPP_RATES) {
+				ie_skip_len = ptr[1] + 2;
+				ptr += ie_skip_len;
+			}
+		}
+		arg.ie_len = req->ie_len - ie_skip_len;
+		memcpy(arg.ie, req->ie + ie_skip_len, arg.ie_len);
 	}
 
 	if (req->n_ssids) {
@@ -4955,6 +5599,11 @@
 			arg.ssids[i].len  = req->ssids[i].ssid_len;
 			arg.ssids[i].ssid = req->ssids[i].ssid;
 		}
+		if (QCA_REV_WCN3990(ar)) {
+			arg.scan_ctrl_flags &=
+					~(WMI_SCAN_ADD_BCAST_PROBE_REQ |
+					  WMI_SCAN_CHAN_STAT_EVENT);
+		}
 	} else {
 		arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
 	}
@@ -5037,6 +5686,22 @@
 			    arvif->vdev_id, ret);
 }
 
+static void ath10k_set_rekey_data(struct ieee80211_hw *hw,
+				  struct ieee80211_vif *vif,
+				  struct cfg80211_gtk_rekey_data *data)
+{
+	struct ath10k *ar = hw->priv;
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+	mutex_lock(&ar->conf_mutex);
+	memcpy(&arvif->gtk_rekey_data.kek, data->kek, NL80211_KEK_LEN);
+	memcpy(&arvif->gtk_rekey_data.kck, data->kck, NL80211_KCK_LEN);
+	arvif->gtk_rekey_data.replay_ctr =
+			be64_to_cpup((__be64 *)data->replay_ctr);
+	arvif->gtk_rekey_data.valid = true;
+	mutex_unlock(&ar->conf_mutex);
+}
+
 static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 			  struct ieee80211_vif *vif, struct ieee80211_sta *sta,
 			  struct ieee80211_key_conf *key)
@@ -5392,13 +6057,18 @@
 	struct ath10k *ar = hw->priv;
 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
 	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+	struct ath10k_peer *peer;
 	int ret = 0;
+	int i;
 
 	if (old_state == IEEE80211_STA_NOTEXIST &&
 	    new_state == IEEE80211_STA_NONE) {
 		memset(arsta, 0, sizeof(*arsta));
 		arsta->arvif = arvif;
 		INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
+
+		for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+			ath10k_mac_txq_init(sta->txq[i]);
 	}
 
 	/* cancel must be done outside the mutex to avoid deadlock */
@@ -5406,6 +6076,9 @@
 	     new_state == IEEE80211_STA_NOTEXIST))
 		cancel_work_sync(&arsta->update_wk);
 
+	if (vif->type == NL80211_IFTYPE_STATION && new_state > ar->sta_state)
+		ar->sta_state = new_state;
+
 	mutex_lock(&ar->conf_mutex);
 
 	if (old_state == IEEE80211_STA_NOTEXIST &&
@@ -5433,8 +6106,8 @@
 		if (sta->tdls)
 			peer_type = WMI_PEER_TYPE_TDLS;
 
-		ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr,
-					 peer_type);
+		ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id,
+					 sta->addr, peer_type);
 		if (ret) {
 			ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
 				    sta->addr, arvif->vdev_id, ret);
@@ -5442,6 +6115,24 @@
 			goto exit;
 		}
 
+		spin_lock_bh(&ar->data_lock);
+
+		peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
+		if (!peer) {
+			ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
+				    vif->addr, arvif->vdev_id);
+			spin_unlock_bh(&ar->data_lock);
+			ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
+			ath10k_mac_dec_num_stations(arvif, sta);
+			ret = -ENOENT;
+			goto exit;
+		}
+
+		arsta->peer_id = find_first_bit(peer->peer_ids,
+						ATH10K_MAX_NUM_PEER_IDS);
+
+		spin_unlock_bh(&ar->data_lock);
+
 		if (!sta->tdls)
 			goto exit;
 
@@ -5494,8 +6185,8 @@
 		 * Existing station deletion.
 		 */
 		ath10k_dbg(ar, ATH10K_DBG_MAC,
-			   "mac vdev %d peer delete %pM (sta gone)\n",
-			   arvif->vdev_id, sta->addr);
+			   "mac vdev %d peer delete %pM sta %pK (sta gone)\n",
+			   arvif->vdev_id, sta->addr, sta);
 
 		ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
 		if (ret)
@@ -5504,6 +6195,31 @@
 
 		ath10k_mac_dec_num_stations(arvif, sta);
 
+		spin_lock_bh(&ar->data_lock);
+		for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
+			peer = ar->peer_map[i];
+			if (!peer)
+				continue;
+
+			if (peer->sta == sta) {
+				ath10k_warn(ar, "found sta peer %pM (ptr %pK id %d) entry on vdev %i after it was supposedly removed\n",
+					    sta->addr, peer, i, arvif->vdev_id);
+				peer->sta = NULL;
+
+				/* Clean up the peer object as well since we
+				 * must have failed to do this above.
+				 */
+				list_del(&peer->list);
+				ar->peer_map[i] = NULL;
+				kfree(peer);
+				ar->num_peers--;
+			}
+		}
+		spin_unlock_bh(&ar->data_lock);
+
+		for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+			ath10k_mac_txq_unref(ar, sta->txq[i]);
+
 		if (!sta->tdls)
 			goto exit;
 
@@ -5802,7 +6518,13 @@
 	arg.dwell_time_passive = scan_time_msec;
 	arg.max_scan_time = scan_time_msec;
 	arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
+	if (QCA_REV_WCN3990(ar)) {
+		arg.scan_ctrl_flags &= ~(WMI_SCAN_FILTER_PROBE_REQ |
+					  WMI_SCAN_CHAN_STAT_EVENT |
+					  WMI_SCAN_ADD_BCAST_PROBE_REQ);
+	} else {
 	arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
+	}
 	arg.burst_duration_ms = duration;
 
 	ret = ath10k_start_scan(ar, &arg);
@@ -5966,6 +6688,39 @@
 	mutex_unlock(&ar->conf_mutex);
 }
 
+static void
+ath10k_mac_update_bss_chan_survey(struct ath10k *ar,
+				  struct ieee80211_channel *channel)
+{
+	int ret;
+	enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	if (!test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map) ||
+	    (ar->rx_channel != channel))
+		return;
+
+	if (ar->scan.state != ATH10K_SCAN_IDLE) {
+		ath10k_dbg(ar, ATH10K_DBG_MAC, "ignoring bss chan info request while scanning..\n");
+		return;
+	}
+
+	reinit_completion(&ar->bss_survey_done);
+
+	ret = ath10k_wmi_pdev_bss_chan_info_request(ar, type);
+	if (ret) {
+		ath10k_warn(ar, "failed to send pdev bss chan info request\n");
+		return;
+	}
+
+	ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ);
+	if (!ret) {
+		ath10k_warn(ar, "bss channel survey timed out\n");
+		return;
+	}
+}
+
 static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
 			     struct survey_info *survey)
 {
@@ -5990,6 +6745,9 @@
 		goto exit;
 	}
 
+	if (!QCA_REV_WCN3990(ar))
+		ath10k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
+
 	spin_lock_bh(&ar->data_lock);
 	memcpy(survey, ar_survey, sizeof(*survey));
 	spin_unlock_bh(&ar->data_lock);
@@ -6378,6 +7136,32 @@
 	return 0;
 }
 
+static void ath10k_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			   u64 tsf)
+{
+	struct ath10k *ar = hw->priv;
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	u32 tsf_offset, vdev_param = ar->wmi.vdev_param->set_tsf;
+	int ret;
+
+	/* Workaround:
+	 *
+	 * Given tsf argument is entire TSF value, but firmware accepts
+	 * only TSF offset to current TSF.
+	 *
+	 * get_tsf function is used to get offset value, however since
+	 * ath10k_get_tsf is not implemented properly, it will return 0 always.
+	 * Luckily all the caller functions to set_tsf, as of now, also rely on
+	 * get_tsf function to get entire tsf value such get_tsf() + tsf_delta,
+	 * final tsf offset value to firmware will be arithmetically correct.
+	 */
+	tsf_offset = tsf - ath10k_get_tsf(hw, vif);
+	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+					vdev_param, tsf_offset);
+	if (ret && ret != -EOPNOTSUPP)
+		ath10k_warn(ar, "failed to set tsf offset: %d\n", ret);
+}
+
 static int ath10k_ampdu_action(struct ieee80211_hw *hw,
 			       struct ieee80211_vif *vif,
 			       struct ieee80211_ampdu_params *params)
@@ -6496,7 +7280,19 @@
 
 		if (WARN_ON(!arvif->is_up))
 			continue;
+		if (QCA_REV_WCN3990(ar)) {
+			/* In the case of wcn3990 WLAN module we send
+			 * vdev restart only, no need to send vdev down.
+			 */
 
+			ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def);
+			if (ret) {
+				ath10k_warn(ar,
+					    "failed to restart vdev %d: %d\n",
+					    arvif->vdev_id, ret);
+				continue;
+			}
+		} else {
 		ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
 		if (ret) {
 			ath10k_warn(ar, "failed to down vdev %d: %d\n",
@@ -6504,6 +7300,7 @@
 			continue;
 		}
 	}
+	}
 
 	/* All relevant vdevs are downed and associated channel resources
 	 * should be available for the channel switch now.
@@ -6532,12 +7329,18 @@
 			ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
 				    ret);
 
+		if (!QCA_REV_WCN3990(ar)) {
+			/* In case of other than wcn3990 WLAN module we
+			 * send vdev down and vdev restart to the firmware.
+			 */
+
 		ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def);
 		if (ret) {
 			ath10k_warn(ar, "failed to restart vdev %d: %d\n",
 				    arvif->vdev_id, ret);
 			continue;
 		}
+		}
 
 		ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
 					 arvif->bssid);
@@ -6558,7 +7361,7 @@
 	struct ath10k *ar = hw->priv;
 
 	ath10k_dbg(ar, ATH10K_DBG_MAC,
-		   "mac chanctx add freq %hu width %d ptr %p\n",
+		   "mac chanctx add freq %hu width %d ptr %pK\n",
 		   ctx->def.chan->center_freq, ctx->def.width, ctx);
 
 	mutex_lock(&ar->conf_mutex);
@@ -6582,7 +7385,7 @@
 	struct ath10k *ar = hw->priv;
 
 	ath10k_dbg(ar, ATH10K_DBG_MAC,
-		   "mac chanctx remove freq %hu width %d ptr %p\n",
+		   "mac chanctx remove freq %hu width %d ptr %pK\n",
 		   ctx->def.chan->center_freq, ctx->def.width, ctx);
 
 	mutex_lock(&ar->conf_mutex);
@@ -6647,7 +7450,7 @@
 	mutex_lock(&ar->conf_mutex);
 
 	ath10k_dbg(ar, ATH10K_DBG_MAC,
-		   "mac chanctx change freq %hu width %d ptr %p changed %x\n",
+		   "mac chanctx change freq %hu width %d ptr %pK changed %x\n",
 		   ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
 
 	/* This shouldn't really happen because channel switching should use
@@ -6705,7 +7508,7 @@
 	mutex_lock(&ar->conf_mutex);
 
 	ath10k_dbg(ar, ATH10K_DBG_MAC,
-		   "mac chanctx assign ptr %p vdev_id %i\n",
+		   "mac chanctx assign ptr %pK vdev_id %i\n",
 		   ctx, arvif->vdev_id);
 
 	if (WARN_ON(arvif->is_started)) {
@@ -6773,12 +7576,13 @@
 	mutex_lock(&ar->conf_mutex);
 
 	ath10k_dbg(ar, ATH10K_DBG_MAC,
-		   "mac chanctx unassign ptr %p vdev_id %i\n",
+		   "mac chanctx unassign ptr %pK vdev_id %i\n",
 		   ctx, arvif->vdev_id);
 
 	WARN_ON(!arvif->is_started);
-
-	if (vif->type == NL80211_IFTYPE_MONITOR) {
+	if (vif->type == NL80211_IFTYPE_MONITOR ||
+	    (vif->type == NL80211_IFTYPE_STATION &&
+	     ar->sta_state < IEEE80211_STA_ASSOC)) {
 		WARN_ON(!arvif->is_up);
 
 		ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
@@ -6794,6 +7598,7 @@
 		ath10k_warn(ar, "failed to stop vdev %i: %d\n",
 			    arvif->vdev_id, ret);
 
+	ar->sta_state = IEEE80211_STA_NOTEXIST;
 	arvif->is_started = false;
 
 	mutex_unlock(&ar->conf_mutex);
@@ -6819,16 +7624,19 @@
 }
 
 static const struct ieee80211_ops ath10k_ops = {
-	.tx				= ath10k_tx,
+	.tx				= ath10k_mac_op_tx,
+	.wake_tx_queue			= ath10k_mac_op_wake_tx_queue,
 	.start				= ath10k_start,
 	.stop				= ath10k_stop,
 	.config				= ath10k_config,
 	.add_interface			= ath10k_add_interface,
+	.change_interface		= ath10k_change_interface,
 	.remove_interface		= ath10k_remove_interface,
 	.configure_filter		= ath10k_configure_filter,
 	.bss_info_changed		= ath10k_bss_info_changed,
 	.hw_scan			= ath10k_hw_scan,
 	.cancel_hw_scan			= ath10k_cancel_hw_scan,
+	.set_rekey_data			= ath10k_set_rekey_data,
 	.set_key			= ath10k_set_key,
 	.set_default_unicast_key        = ath10k_set_default_unicast_key,
 	.sta_state			= ath10k_sta_state,
@@ -6846,6 +7654,7 @@
 	.set_bitrate_mask		= ath10k_mac_op_set_bitrate_mask,
 	.sta_rc_update			= ath10k_sta_rc_update,
 	.get_tsf			= ath10k_get_tsf,
+	.set_tsf			= ath10k_set_tsf,
 	.ampdu_action			= ath10k_ampdu_action,
 	.get_et_sset_count		= ath10k_debug_get_et_sset_count,
 	.get_et_stats			= ath10k_debug_get_et_stats,
@@ -6862,9 +7671,11 @@
 #ifdef CONFIG_PM
 	.suspend			= ath10k_wow_op_suspend,
 	.resume				= ath10k_wow_op_resume,
+	.set_wakeup			= ath10k_wow_op_set_wakeup,
 #endif
 #ifdef CONFIG_MAC80211_DEBUGFS
 	.sta_add_debugfs		= ath10k_sta_add_debugfs,
+	.sta_statistics			= ath10k_sta_statistics,
 #endif
 };
 
@@ -6934,21 +7745,32 @@
 struct ath10k *ath10k_mac_create(size_t priv_size)
 {
 	struct ieee80211_hw *hw;
+	struct ieee80211_ops *ops;
 	struct ath10k *ar;
 
-	hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, &ath10k_ops);
-	if (!hw)
+	ops = kmemdup(&ath10k_ops, sizeof(ath10k_ops), GFP_KERNEL);
+	if (!ops)
 		return NULL;
 
+	hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, ops);
+	if (!hw) {
+		kfree(ops);
+		return NULL;
+	}
+
 	ar = hw->priv;
 	ar->hw = hw;
+	ar->ops = ops;
 
 	return ar;
 }
 
 void ath10k_mac_destroy(struct ath10k *ar)
 {
+	struct ieee80211_ops *ops = ar->ops;
+
 	ieee80211_free_hw(ar->hw);
+	kfree(ops);
 }
 
 static const struct ieee80211_iface_limit ath10k_if_limits[] = {
@@ -6982,6 +7804,10 @@
 		| BIT(NL80211_IFTYPE_MESH_POINT)
 #endif
 	},
+	{
+		.max	= 1,
+		.types	= BIT(NL80211_IFTYPE_STATION)
+	},
 };
 
 static const struct ieee80211_iface_combination ath10k_if_comb[] = {
@@ -7103,6 +7929,85 @@
 	},
 };
 
+static const struct ieee80211_iface_limit ath10k_wcn3990_if_limit[] = {
+	{
+		.max = 2,
+		.types = BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+		.max = 2,
+		.types = BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+			 BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+			 BIT(NL80211_IFTYPE_P2P_CLIENT) |
+			 BIT(NL80211_IFTYPE_P2P_GO),
+	},
+};
+
+static const struct ieee80211_iface_limit ath10k_wcn3990_qcs_if_limit[] = {
+	{
+		.max = 2,
+		.types = BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+		.max = 2,
+		.types = BIT(NL80211_IFTYPE_P2P_CLIENT),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+			 BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+			 BIT(NL80211_IFTYPE_P2P_GO),
+	},
+};
+
+static const struct ieee80211_iface_limit ath10k_wcn3990_if_limit_ibss[] = {
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_ADHOC),
+	},
+};
+
+static struct ieee80211_iface_combination ath10k_wcn3990_qcs_if_comb[] = {
+	{
+		.limits = ath10k_wcn3990_if_limit,
+		.num_different_channels = 1,
+		.max_interfaces = 4,
+		.n_limits = ARRAY_SIZE(ath10k_wcn3990_if_limit),
+#ifdef CONFIG_ATH10K_DFS_CERTIFIED
+		.radar_detect_widths =  BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+				       BIT(NL80211_CHAN_WIDTH_20) |
+				       BIT(NL80211_CHAN_WIDTH_40) |
+				       BIT(NL80211_CHAN_WIDTH_80),
+#endif
+	},
+	{
+		.limits = ath10k_wcn3990_qcs_if_limit,
+		.num_different_channels = 2,
+		.max_interfaces = 4,
+		.n_limits = ARRAY_SIZE(ath10k_wcn3990_qcs_if_limit),
+	},
+	{
+		.limits = ath10k_wcn3990_if_limit_ibss,
+		.num_different_channels = 1,
+		.max_interfaces = 2,
+		.n_limits = ARRAY_SIZE(ath10k_wcn3990_if_limit_ibss),
+#ifdef CONFIG_ATH10K_DFS_CERTIFIED
+		.radar_detect_widths =  BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+				       BIT(NL80211_CHAN_WIDTH_20) |
+				       BIT(NL80211_CHAN_WIDTH_40) |
+				       BIT(NL80211_CHAN_WIDTH_80),
+#endif
+	},
+};
+
 static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = {
 	{
 		.max = 1,
@@ -7176,7 +8081,11 @@
 	struct ieee80211_supported_band *band;
 	void *channels;
 	int ret;
+	u8 base_mac[ETH_ALEN];
 
+	if (mac_pton(base_mac_addr, base_mac) && !is_zero_ether_addr(base_mac))
+		SET_IEEE80211_PERM_ADDR(ar->hw, base_mac);
+	else
 	SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
 
 	SET_IEEE80211_DEV(ar->hw, ar->dev);
@@ -7197,8 +8106,14 @@
 		band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
 		band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
 		band->channels = channels;
+
+		if (ar->hw_params.cck_rate_map_rev2) {
+			band->n_bitrates = ath10k_g_rates_rev2_size;
+			band->bitrates = ath10k_g_rates_rev2;
+		} else {
 		band->n_bitrates = ath10k_g_rates_size;
 		band->bitrates = ath10k_g_rates;
+		}
 
 		ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band;
 	}
@@ -7230,12 +8145,16 @@
 	ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask;
 	ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask;
 
-	if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features))
+	if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features))
 		ar->hw->wiphy->interface_modes |=
 			BIT(NL80211_IFTYPE_P2P_DEVICE) |
 			BIT(NL80211_IFTYPE_P2P_CLIENT) |
 			BIT(NL80211_IFTYPE_P2P_GO);
 
+	if (QCA_REV_WCN3990(ar))
+		ar->hw->wiphy->interface_modes &=
+			~BIT(NL80211_IFTYPE_P2P_DEVICE);
+
 	ieee80211_hw_set(ar->hw, SIGNAL_DBM);
 	ieee80211_hw_set(ar->hw, SUPPORTS_PS);
 	ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
@@ -7270,6 +8189,7 @@
 
 	ar->hw->vif_data_size = sizeof(struct ath10k_vif);
 	ar->hw->sta_data_size = sizeof(struct ath10k_sta);
+	ar->hw->txq_data_size = sizeof(struct ath10k_txq);
 
 	ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
 
@@ -7294,7 +8214,8 @@
 	ar->hw->wiphy->max_remain_on_channel_duration = 5000;
 
 	ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
-	ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
+	ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
+				   NL80211_FEATURE_AP_SCAN;
 
 	ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
 
@@ -7318,7 +8239,7 @@
 	 */
 	ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1;
 
-	switch (ar->wmi.op_version) {
+	switch (ar->running_fw->fw_file.wmi_op_version) {
 	case ATH10K_FW_WMI_OP_VERSION_MAIN:
 		ar->hw->wiphy->iface_combinations = ath10k_if_comb;
 		ar->hw->wiphy->n_iface_combinations =
@@ -7326,6 +8247,14 @@
 		ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
 		break;
 	case ATH10K_FW_WMI_OP_VERSION_TLV:
+		ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+		if (QCA_REV_WCN3990(ar)) {
+			ar->hw->wiphy->iface_combinations =
+				ath10k_wcn3990_qcs_if_comb;
+			ar->hw->wiphy->n_iface_combinations =
+				ARRAY_SIZE(ath10k_wcn3990_qcs_if_comb);
+			break;
+		}
 		if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
 			ar->hw->wiphy->iface_combinations =
 				ath10k_tlv_qcs_if_comb;
@@ -7336,7 +8265,6 @@
 			ar->hw->wiphy->n_iface_combinations =
 				ARRAY_SIZE(ath10k_tlv_if_comb);
 		}
-		ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
 		break;
 	case ATH10K_FW_WMI_OP_VERSION_10_1:
 	case ATH10K_FW_WMI_OP_VERSION_10_2:
@@ -7360,7 +8288,7 @@
 	if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
 		ar->hw->netdev_features = NETIF_F_HW_CSUM;
 
-	if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
+	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
 		/* Init ath dfs pattern detector */
 		ar->ath_common.debug_mask = ATH_DBG_DFS;
 		ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common,
@@ -7370,6 +8298,15 @@
 			ath10k_warn(ar, "failed to initialise DFS pattern detector\n");
 	}
 
+	/* Current wake_tx_queue implementation imposes a significant
+	 * performance penalty in some setups. The tx scheduling code needs
+	 * more work anyway so disable the wake_tx_queue unless firmware
+	 * supports the pull-push mechanism.
+	 */
+	if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+		      ar->running_fw->fw_file.fw_features))
+		ar->ops->wake_tx_queue = NULL;
+
 	ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
 			    ath10k_reg_notifier);
 	if (ret) {
@@ -7399,7 +8336,7 @@
 	ieee80211_unregister_hw(ar->hw);
 
 err_dfs_detector_exit:
-	if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
+	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
 		ar->dfs_detector->exit(ar->dfs_detector);
 
 err_free:
@@ -7412,9 +8349,10 @@
 
 void ath10k_mac_unregister(struct ath10k *ar)
 {
+	ath10k_wow_deinit(ar);
 	ieee80211_unregister_hw(ar->hw);
 
-	if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
+	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
 		ar->dfs_detector->exit(ar->dfs_detector);
 
 	kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/mac.h linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/mac.h
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/mac.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/mac.h	2019-01-22 16:16:25.419263757 +0100
@@ -66,7 +66,7 @@
 				     enum wmi_tlv_tx_pause_action action);
 
 u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
-			     u8 hw_rate);
+			     u8 hw_rate, bool cck);
 u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
 			     u32 bitrate);
 
@@ -74,6 +74,14 @@
 void ath10k_mac_tx_unlock(struct ath10k *ar, int reason);
 void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason);
 void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason);
+bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar);
+void ath10k_mac_tx_push_pending(struct ath10k *ar);
+int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
+			   struct ieee80211_txq *txq);
+struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
+					    u16 peer_id,
+					    u8 tid);
+int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val);
 
 static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
 {
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/Makefile linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/Makefile
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/Makefile	2019-01-22 16:16:25.411263685 +0100
@@ -24,6 +24,13 @@
 obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
 ath10k_pci-y += pci.o \
 		ce.o
+obj-$(CONFIG_ATH10K_TARGET_SNOC) += ath10k_snoc.o
+ath10k_snoc-y += snoc.o \
+		qmi.o \
+		wcn3990_qmi_service_v01.o \
+		ce.o
+
+ath10k_pci-$(CONFIG_ATH10K_AHB) += ahb.o
 
 # for tracing framework to find trace.h
 CFLAGS_trace.o := -I$(src)
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/pci.h linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/pci.h
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/pci.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/pci.h	2019-01-22 16:16:25.423263793 +0100
@@ -22,6 +22,7 @@
 
 #include "hw.h"
 #include "ce.h"
+#include "ahb.h"
 
 /*
  * maximum number of bytes that can be handled atomically by DiagRead/DiagWrite
@@ -147,9 +148,6 @@
 
 	/* protects compl_free and num_send_allowed */
 	spinlock_t pipe_lock;
-
-	struct ath10k_pci *ar_pci;
-	struct tasklet_struct intr;
 };
 
 struct ath10k_pci_supp_chip {
@@ -157,32 +155,28 @@
 	u32 rev_id;
 };
 
+enum ath10k_pci_irq_mode {
+	ATH10K_PCI_IRQ_AUTO = 0,
+	ATH10K_PCI_IRQ_LEGACY = 1,
+	ATH10K_PCI_IRQ_MSI = 2,
+};
+
 struct ath10k_pci {
+	struct bus_opaque opaque_ctx;
 	struct pci_dev *pdev;
 	struct device *dev;
 	struct ath10k *ar;
 	void __iomem *mem;
 	size_t mem_len;
 
-	/*
-	 * Number of MSI interrupts granted, 0 --> using legacy PCI line
-	 * interrupts.
-	 */
-	int num_msi_intrs;
-
-	struct tasklet_struct intr_tq;
-	struct tasklet_struct msi_fw_err;
+	/* Operating interrupt mode */
+	enum ath10k_pci_irq_mode oper_irq_mode;
 
 	struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
 
 	/* Copy Engine used for Diagnostic Accesses */
 	struct ath10k_ce_pipe *ce_diag;
 
-	/* FIXME: document what this really protects */
-	spinlock_t ce_lock;
-
-	/* Map CE id to ce_state */
-	struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
 	struct timer_list rx_post_retry;
 
 	/* Due to HW quirks it is recommended to disable ASPM during device
@@ -225,6 +219,18 @@
 	 * on MMIO read/write.
 	 */
 	bool pci_ps;
+
+	/* Chip specific pci reset routine used to do a safe reset */
+	int (*pci_soft_reset)(struct ath10k *ar);
+
+	/* Chip specific pci full reset function */
+	int (*pci_hard_reset)(struct ath10k *ar);
+
+	/* Keep this entry in the last, memory for struct ath10k_ahb is
+	 * allocated (ahb support enabled case) in the continuation of
+	 * this struct.
+	 */
+	struct ath10k_ahb ahb[0];
 };
 
 static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
@@ -253,6 +259,40 @@
 u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr);
 u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr);
 
+int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+			 struct ath10k_hif_sg_item *items, int n_items);
+int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
+			     size_t buf_len);
+int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
+			      const void *data, int nbytes);
+int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, void *req, u32 req_len,
+				    void *resp, u32 *resp_len);
+int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
+				       u8 *ul_pipe, u8 *dl_pipe);
+void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, u8 *ul_pipe,
+				     u8 *dl_pipe);
+void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
+					int force);
+u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe);
+void ath10k_pci_hif_power_down(struct ath10k *ar);
+int ath10k_pci_alloc_pipes(struct ath10k *ar);
+void ath10k_pci_free_pipes(struct ath10k *ar);
+void ath10k_pci_free_pipes(struct ath10k *ar);
+void ath10k_pci_rx_replenish_retry(unsigned long ptr);
+void ath10k_pci_ce_deinit(struct ath10k *ar);
+void ath10k_pci_init_napi(struct ath10k *ar);
+int ath10k_pci_init_pipes(struct ath10k *ar);
+int ath10k_pci_init_config(struct ath10k *ar);
+void ath10k_pci_rx_post(struct ath10k *ar);
+void ath10k_pci_flush(struct ath10k *ar);
+void ath10k_pci_enable_legacy_irq(struct ath10k *ar);
+bool ath10k_pci_irq_pending(struct ath10k *ar);
+void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar);
+void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar);
+int ath10k_pci_wait_for_target_init(struct ath10k *ar);
+int ath10k_pci_setup_resource(struct ath10k *ar);
+void ath10k_pci_release_resource(struct ath10k *ar);
+
 /* QCA6174 is known to have Tx/Rx issues when SOC_WAKE register is poked too
  * frequently. To avoid this put SoC to sleep after a very conservative grace
  * period. Adjust with great care.
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/rx_desc.h linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/rx_desc.h
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/rx_desc.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/rx_desc.h	2019-10-29 09:26:24.461211183 +0100
@@ -205,12 +205,25 @@
  *		descriptor.
  */
 
+#ifndef CONFIG_ATH10K_SNOC
 struct rx_frag_info {
 	u8 ring0_more_count;
 	u8 ring1_more_count;
 	u8 ring2_more_count;
 	u8 ring3_more_count;
 } __packed;
+#else
+struct rx_frag_info {
+	u8 ring0_more_count;
+	u8 ring1_more_count;
+	u8 ring2_more_count;
+	u8 ring3_more_count;
+	u8 ring4_more_count;
+	u8 ring5_more_count;
+	u8 ring6_more_count;
+	u8 ring7_more_count;
+} __packed;
+#endif
 
 /*
  * ring0_more_count
@@ -465,11 +478,23 @@
 	__le32 info2; /* %RX_MSDU_START_INFO2_ */
 } __packed;
 
+#ifdef CONFIG_ATH10K_SNOC
+struct rx_msdu_start_wcn3990 {
+	__le32 info3;
+} __packed;
+#else
+struct rx_msdu_start_wcn3990 {
+} __packed;
+#endif
+
 struct rx_msdu_start {
 	struct rx_msdu_start_common common;
 	union {
 		struct rx_msdu_start_qca99x0 qca99x0;
 	} __packed;
+	union {
+		struct rx_msdu_start_wcn3990 wcn3990;
+	} __packed;
 } __packed;
 
 /*
@@ -589,11 +614,21 @@
 	__le32 info2;
 } __packed;
 
+struct rx_msdu_end_wcn3990 {
+	__le32 rule_indication_0;
+	__le32 rule_indication_1;
+	__le32 rule_indication_2;
+	__le32 rule_indication_3;
+} __packed;
+
 struct rx_msdu_end {
 	struct rx_msdu_end_common common;
 	union {
 		struct rx_msdu_end_qca99x0 qca99x0;
 	} __packed;
+#ifdef CONFIG_ATH10K_SNOC
+	struct rx_msdu_end_wcn3990 wcn3990;
+#endif
 } __packed;
 
 /*
@@ -656,26 +691,6 @@
  *		Reserved: HW should fill with zero.  FW should ignore.
  */
 
-#define RX_PPDU_START_SIG_RATE_SELECT_OFDM 0
-#define RX_PPDU_START_SIG_RATE_SELECT_CCK  1
-
-#define RX_PPDU_START_SIG_RATE_OFDM_48 0
-#define RX_PPDU_START_SIG_RATE_OFDM_24 1
-#define RX_PPDU_START_SIG_RATE_OFDM_12 2
-#define RX_PPDU_START_SIG_RATE_OFDM_6  3
-#define RX_PPDU_START_SIG_RATE_OFDM_54 4
-#define RX_PPDU_START_SIG_RATE_OFDM_36 5
-#define RX_PPDU_START_SIG_RATE_OFDM_18 6
-#define RX_PPDU_START_SIG_RATE_OFDM_9  7
-
-#define RX_PPDU_START_SIG_RATE_CCK_LP_11  0
-#define RX_PPDU_START_SIG_RATE_CCK_LP_5_5 1
-#define RX_PPDU_START_SIG_RATE_CCK_LP_2   2
-#define RX_PPDU_START_SIG_RATE_CCK_LP_1   3
-#define RX_PPDU_START_SIG_RATE_CCK_SP_11  4
-#define RX_PPDU_START_SIG_RATE_CCK_SP_5_5 5
-#define RX_PPDU_START_SIG_RATE_CCK_SP_2   6
-
 #define HTT_RX_PPDU_START_PREAMBLE_LEGACY        0x04
 #define HTT_RX_PPDU_START_PREAMBLE_HT            0x08
 #define HTT_RX_PPDU_START_PREAMBLE_HT_WITH_TXBF  0x09
@@ -711,25 +726,6 @@
 /* No idea what this flag means. It seems to be always set in rate. */
 #define RX_PPDU_START_RATE_FLAG BIT(3)
 
-enum rx_ppdu_start_rate {
-	RX_PPDU_START_RATE_OFDM_48M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_48M,
-	RX_PPDU_START_RATE_OFDM_24M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_24M,
-	RX_PPDU_START_RATE_OFDM_12M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_12M,
-	RX_PPDU_START_RATE_OFDM_6M  = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_6M,
-	RX_PPDU_START_RATE_OFDM_54M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_54M,
-	RX_PPDU_START_RATE_OFDM_36M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_36M,
-	RX_PPDU_START_RATE_OFDM_18M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_18M,
-	RX_PPDU_START_RATE_OFDM_9M  = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_9M,
-
-	RX_PPDU_START_RATE_CCK_LP_11M  = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_11M,
-	RX_PPDU_START_RATE_CCK_LP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_5_5M,
-	RX_PPDU_START_RATE_CCK_LP_2M   = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_2M,
-	RX_PPDU_START_RATE_CCK_LP_1M   = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_1M,
-	RX_PPDU_START_RATE_CCK_SP_11M  = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_11M,
-	RX_PPDU_START_RATE_CCK_SP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_5_5M,
-	RX_PPDU_START_RATE_CCK_SP_2M   = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_2M,
-};
-
 struct rx_ppdu_start {
 	struct {
 		u8 pri20_mhz;
@@ -992,9 +988,51 @@
 
 struct rx_pkt_end {
 	__le32 info0; /* %RX_PKT_END_INFO0_ */
+#ifndef CONFIG_ATH10K_SNOC
 	__le32 phy_timestamp_1;
 	__le32 phy_timestamp_2;
-	__le32 rx_location_info; /* %RX_LOCATION_INFO_ */
+#else
+	__le64 phy_timestamp_1;
+	__le64 phy_timestamp_2;
+#endif
+} __packed;
+
+#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_MASK		0x00003fff
+#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_LSB		0
+#define RX_LOCATION_INFO0_RTT_FAC_VHT_MASK		0x1fff8000
+#define RX_LOCATION_INFO0_RTT_FAC_VHT_LSB		15
+#define RX_LOCATION_INFO0_RTT_STRONGEST_CHAIN_MASK	0xc0000000
+#define RX_LOCATION_INFO0_RTT_STRONGEST_CHAIN_LSB	30
+#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_STATUS		BIT(14)
+#define RX_LOCATION_INFO0_RTT_FAC_VHT_STATUS		BIT(29)
+
+#define RX_LOCATION_INFO1_RTT_PREAMBLE_TYPE_MASK	0x0000000c
+#define RX_LOCATION_INFO1_RTT_PREAMBLE_TYPE_LSB		2
+#define RX_LOCATION_INFO1_PKT_BW_MASK			0x00000030
+#define RX_LOCATION_INFO1_PKT_BW_LSB			4
+#define RX_LOCATION_INFO1_SKIP_P_SKIP_BTCF_MASK		0x0000ff00
+#define RX_LOCATION_INFO1_SKIP_P_SKIP_BTCF_LSB		8
+#define RX_LOCATION_INFO1_RTT_MSC_RATE_MASK		0x000f0000
+#define RX_LOCATION_INFO1_RTT_MSC_RATE_LSB		16
+#define RX_LOCATION_INFO1_RTT_PBD_LEG_BW_MASK		0x00300000
+#define RX_LOCATION_INFO1_RTT_PBD_LEG_BW_LSB		20
+#define RX_LOCATION_INFO1_TIMING_BACKOFF_MASK		0x07c00000
+#define RX_LOCATION_INFO1_TIMING_BACKOFF_LSB		22
+#define RX_LOCATION_INFO1_RTT_TX_FRAME_PHASE_MASK	0x18000000
+#define RX_LOCATION_INFO1_RTT_TX_FRAME_PHASE_LSB	27
+#define RX_LOCATION_INFO1_RTT_CFR_STATUS		BIT(0)
+#define RX_LOCATION_INFO1_RTT_CIR_STATUS		BIT(1)
+#define RX_LOCATION_INFO1_RTT_GI_TYPE			BIT(7)
+#define RX_LOCATION_INFO1_RTT_MAC_PHY_PHASE		BIT(29)
+#define RX_LOCATION_INFO1_RTT_TX_DATA_START_X_PHASE	BIT(30)
+#define RX_LOCATION_INFO1_RX_LOCATION_VALID		BIT(31)
+
+struct rx_location_info {
+	__le32 rx_location_info0; /* %RX_LOCATION_INFO0_ */
+	__le32 rx_location_info1; /* %RX_LOCATION_INFO1_ */
+#ifdef CONFIG_ATH10K_SNOC
+	__le32 rx_location_info2; /* %RX_LOCATION_INFO2_ */
+#endif
 } __packed;
 
 enum rx_phy_ppdu_end_info0 {
@@ -1067,6 +1105,17 @@
 
 struct rx_ppdu_end_qca99x0 {
 	struct rx_pkt_end rx_pkt_end;
+	__le32 rx_location_info; /* %RX_LOCATION_INFO_ */
+	struct rx_phy_ppdu_end rx_phy_ppdu_end;
+	__le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */
+	__le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */
+	__le16 bb_length;
+	__le16 info1; /* %RX_PPDU_END_INFO1_ */
+} __packed;
+
+struct rx_ppdu_end_qca9984 {
+	struct rx_pkt_end rx_pkt_end;
+	struct rx_location_info rx_location_info;
 	struct rx_phy_ppdu_end rx_phy_ppdu_end;
 	__le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */
 	__le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */
@@ -1074,12 +1123,34 @@
 	__le16 info1; /* %RX_PPDU_END_INFO1_ */
 } __packed;
 
+struct rx_timing_offset {
+	__le32 timing_offset;
+};
+
+struct rx_ppdu_end_wcn3990 {
+	__le32 reserved_info_0;
+	__le32 reserved_info_1;
+	__le32 rx_antenna_info;
+	__le32 rx_coex_info;
+	__le32 rx_mpdu_cnt_info;
+	__le32 rx_bb_length;
+	__le64 phy_timestamp_tx;
+	struct rx_pkt_end rx_pkt_end;
+	struct rx_phy_ppdu_end rx_phy_ppdu_end;
+	struct rx_timing_offset rx_timing_offset;
+	struct rx_location_info rx_location_info;
+} __packed;
+
 struct rx_ppdu_end {
 	struct rx_ppdu_end_common common;
 	union {
 		struct rx_ppdu_end_qca988x qca988x;
 		struct rx_ppdu_end_qca6174 qca6174;
 		struct rx_ppdu_end_qca99x0 qca99x0;
+		struct rx_ppdu_end_qca9984 qca9984;
+#ifdef CONFIG_ATH10K_SNOC
+		struct rx_ppdu_end_wcn3990 wcn3990;
+#endif
 	} __packed;
 } __packed;
 
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/swap.c linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/swap.c
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/swap.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/swap.c	2019-01-22 16:16:25.423263793 +0100
@@ -135,26 +135,17 @@
 }
 
 int ath10k_swap_code_seg_configure(struct ath10k *ar,
-				   enum ath10k_swap_code_seg_bin_type type)
+				   const struct ath10k_fw_file *fw_file)
 {
 	int ret;
 	struct ath10k_swap_code_seg_info *seg_info = NULL;
 
-	switch (type) {
-	case ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW:
-		if (!ar->swap.firmware_swap_code_seg_info)
+	if (!fw_file->firmware_swap_code_seg_info)
 			return 0;
 
 		ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n");
-		seg_info = ar->swap.firmware_swap_code_seg_info;
-		break;
-	default:
-	case ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP:
-	case ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF:
-		ath10k_warn(ar, "ignoring unknown code swap binary type %d\n",
-			    type);
-		return 0;
-	}
+
+	seg_info = fw_file->firmware_swap_code_seg_info;
 
 	ret = ath10k_bmi_write_memory(ar, seg_info->target_addr,
 				      &seg_info->seg_hw_info,
@@ -168,32 +159,41 @@
 	return 0;
 }
 
-void ath10k_swap_code_seg_release(struct ath10k *ar)
+void ath10k_swap_code_seg_release(struct ath10k *ar,
+				  struct ath10k_fw_file *fw_file)
 {
-	ath10k_swap_code_seg_free(ar, ar->swap.firmware_swap_code_seg_info);
-	ar->swap.firmware_codeswap_data = NULL;
-	ar->swap.firmware_codeswap_len = 0;
-	ar->swap.firmware_swap_code_seg_info = NULL;
+	ath10k_swap_code_seg_free(ar, fw_file->firmware_swap_code_seg_info);
+
+	/* FIXME: these two assignments look to bein wrong place! Shouldn't
+	 * they be in ath10k_core_free_firmware_files() like the rest?
+	 */
+	fw_file->codeswap_data = NULL;
+	fw_file->codeswap_len = 0;
+
+	fw_file->firmware_swap_code_seg_info = NULL;
 }
 
-int ath10k_swap_code_seg_init(struct ath10k *ar)
+int ath10k_swap_code_seg_init(struct ath10k *ar, struct ath10k_fw_file *fw_file)
 {
 	int ret;
 	struct ath10k_swap_code_seg_info *seg_info;
+	const void *codeswap_data;
+	size_t codeswap_len;
+
+	codeswap_data = fw_file->codeswap_data;
+	codeswap_len = fw_file->codeswap_len;
 
-	if (!ar->swap.firmware_codeswap_len || !ar->swap.firmware_codeswap_data)
+	if (!codeswap_len || !codeswap_data)
 		return 0;
 
-	seg_info = ath10k_swap_code_seg_alloc(ar,
-					      ar->swap.firmware_codeswap_len);
+	seg_info = ath10k_swap_code_seg_alloc(ar, codeswap_len);
 	if (!seg_info) {
 		ath10k_err(ar, "failed to allocate fw code swap segment\n");
 		return -ENOMEM;
 	}
 
 	ret = ath10k_swap_code_seg_fill(ar, seg_info,
-					ar->swap.firmware_codeswap_data,
-					ar->swap.firmware_codeswap_len);
+					codeswap_data, codeswap_len);
 
 	if (ret) {
 		ath10k_warn(ar, "failed to initialize fw code swap segment: %d\n",
@@ -202,7 +202,7 @@
 		return ret;
 	}
 
-	ar->swap.firmware_swap_code_seg_info = seg_info;
+	fw_file->firmware_swap_code_seg_info = seg_info;
 
 	return 0;
 }
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/swap.h linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/swap.h
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/swap.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/swap.h	2019-01-22 16:16:25.423263793 +0100
@@ -23,6 +23,8 @@
 /* Currently only one swap segment is supported */
 #define ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED	1
 
+struct ath10k_fw_file;
+
 struct ath10k_swap_code_seg_tlv {
 	__le32 address;
 	__le32 length;
@@ -39,12 +41,6 @@
 	struct ath10k_swap_code_seg_tail tail;
 } __packed;
 
-enum ath10k_swap_code_seg_bin_type {
-	 ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP,
-	 ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW,
-	 ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF,
-};
-
 struct ath10k_swap_code_seg_hw_info {
 	/* Swap binary image size */
 	__le32 swap_size;
@@ -65,8 +61,10 @@
 };
 
 int ath10k_swap_code_seg_configure(struct ath10k *ar,
-				   enum ath10k_swap_code_seg_bin_type type);
-void ath10k_swap_code_seg_release(struct ath10k *ar);
-int ath10k_swap_code_seg_init(struct ath10k *ar);
+				   const struct ath10k_fw_file *fw_file);
+void ath10k_swap_code_seg_release(struct ath10k *ar,
+				  struct ath10k_fw_file *fw_file);
+int ath10k_swap_code_seg_init(struct ath10k *ar,
+			      struct ath10k_fw_file *fw_file);
 
 #endif
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/targaddrs.h linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/targaddrs.h
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/targaddrs.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/targaddrs.h	2019-01-22 16:16:25.423263793 +0100
@@ -405,7 +405,7 @@
  * 1. target firmware would check magic number and if it's a match, firmware
  *    would consider the bits[0:15] are valid and base on that to calculate
  *    the end of DRAM. Early allocation would be located at that area and
- *    may be reclaimed when necesary
+ *    may be reclaimed when necessary
  * 2. if no magic number is found, early allocation would happen at "_end"
  *    symbol of ROM which is located before the app-data and might NOT be
  *    re-claimable. If this is adopted, link script should keep this in
@@ -447,6 +447,9 @@
 #define QCA988X_BOARD_DATA_SZ     7168
 #define QCA988X_BOARD_EXT_DATA_SZ 0
 
+#define QCA9887_BOARD_DATA_SZ     7168
+#define QCA9887_BOARD_EXT_DATA_SZ 0
+
 #define QCA6174_BOARD_DATA_SZ     8192
 #define QCA6174_BOARD_EXT_DATA_SZ 0
 
@@ -456,4 +459,7 @@
 #define QCA99X0_BOARD_DATA_SZ	  12288
 #define QCA99X0_BOARD_EXT_DATA_SZ 0
 
+#define QCA4019_BOARD_DATA_SZ	  12064
+#define QCA4019_BOARD_EXT_DATA_SZ 0
+
 #endif /* __TARGADDRS_H__ */
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/testmode.c linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/testmode.c
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/testmode.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/testmode.c	2019-01-22 16:16:25.423263793 +0100
@@ -23,6 +23,7 @@
 #include "wmi.h"
 #include "hif.h"
 #include "hw.h"
+#include "core.h"
 
 #include "testmode_i.h"
 
@@ -45,7 +46,7 @@
 	int ret;
 
 	ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
-		   "testmode event wmi cmd_id %d skb %p skb->len %d\n",
+		   "testmode event wmi cmd_id %d skb %pK skb->len %d\n",
 		   cmd_id, skb, skb->len);
 
 	ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
@@ -136,130 +137,18 @@
 		return ret;
 	}
 
-	return cfg80211_testmode_reply(skb);
-}
-
-static int ath10k_tm_fetch_utf_firmware_api_2(struct ath10k *ar)
-{
-	size_t len, magic_len, ie_len;
-	struct ath10k_fw_ie *hdr;
-	char filename[100];
-	__le32 *version;
-	const u8 *data;
-	int ie_id, ret;
-
-	snprintf(filename, sizeof(filename), "%s/%s",
-		 ar->hw_params.fw.dir, ATH10K_FW_UTF_API2_FILE);
-
-	/* load utf firmware image */
-	ret = request_firmware(&ar->testmode.utf, filename, ar->dev);
+	ret = nla_put_u32(skb, ATH10K_TM_ATTR_WMI_OP_VERSION,
+			  ar->normal_mode_fw.fw_file.wmi_op_version);
 	if (ret) {
-		ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n",
-			    filename, ret);
+		kfree_skb(skb);
 		return ret;
 	}
 
-	data = ar->testmode.utf->data;
-	len = ar->testmode.utf->size;
-
-	/* FIXME: call release_firmware() in error cases */
-
-	/* magic also includes the null byte, check that as well */
-	magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
-
-	if (len < magic_len) {
-		ath10k_err(ar, "utf firmware file is too small to contain magic\n");
-		ret = -EINVAL;
-		goto err;
-	}
-
-	if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
-		ath10k_err(ar, "invalid firmware magic\n");
-		ret = -EINVAL;
-		goto err;
-	}
-
-	/* jump over the padding */
-	magic_len = ALIGN(magic_len, 4);
-
-	len -= magic_len;
-	data += magic_len;
-
-	/* loop elements */
-	while (len > sizeof(struct ath10k_fw_ie)) {
-		hdr = (struct ath10k_fw_ie *)data;
-
-		ie_id = le32_to_cpu(hdr->id);
-		ie_len = le32_to_cpu(hdr->len);
-
-		len -= sizeof(*hdr);
-		data += sizeof(*hdr);
-
-		if (len < ie_len) {
-			ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n",
-				   ie_id, len, ie_len);
-			ret = -EINVAL;
-			goto err;
-		}
-
-		switch (ie_id) {
-		case ATH10K_FW_IE_FW_VERSION:
-			if (ie_len > sizeof(ar->testmode.utf_version) - 1)
-				break;
-
-			memcpy(ar->testmode.utf_version, data, ie_len);
-			ar->testmode.utf_version[ie_len] = '\0';
-
-			ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
-				   "testmode found fw utf version %s\n",
-				   ar->testmode.utf_version);
-			break;
-		case ATH10K_FW_IE_TIMESTAMP:
-			/* ignore timestamp, but don't warn about it either */
-			break;
-		case ATH10K_FW_IE_FW_IMAGE:
-			ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
-				   "testmode found fw image ie (%zd B)\n",
-				   ie_len);
-
-			ar->testmode.utf_firmware_data = data;
-			ar->testmode.utf_firmware_len = ie_len;
-			break;
-		case ATH10K_FW_IE_WMI_OP_VERSION:
-			if (ie_len != sizeof(u32))
-				break;
-			version = (__le32 *)data;
-			ar->testmode.op_version = le32_to_cpup(version);
-			ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode found fw ie wmi op version %d\n",
-				   ar->testmode.op_version);
-			break;
-		default:
-			ath10k_warn(ar, "Unknown testmode FW IE: %u\n",
-				    le32_to_cpu(hdr->id));
-			break;
-		}
-		/* jump over the padding */
-		ie_len = ALIGN(ie_len, 4);
-
-		len -= ie_len;
-		data += ie_len;
-	}
-
-	if (!ar->testmode.utf_firmware_data || !ar->testmode.utf_firmware_len) {
-		ath10k_err(ar, "No ATH10K_FW_IE_FW_IMAGE found\n");
-		ret = -EINVAL;
-		goto err;
-	}
-
-	return 0;
-
-err:
-	release_firmware(ar->testmode.utf);
-
-	return ret;
+	return cfg80211_testmode_reply(skb);
 }
 
-static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar)
+static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar,
+					      struct ath10k_fw_file *fw_file)
 {
 	char filename[100];
 	int ret;
@@ -268,7 +157,7 @@
 		 ar->hw_params.fw.dir, ATH10K_FW_UTF_FILE);
 
 	/* load utf firmware image */
-	ret = request_firmware(&ar->testmode.utf, filename, ar->dev);
+	ret = request_firmware(&fw_file->firmware, filename, ar->dev);
 	if (ret) {
 		ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n",
 			    filename, ret);
@@ -281,24 +170,34 @@
 	 * correct WMI interface.
 	 */
 
-	ar->testmode.op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
-	ar->testmode.utf_firmware_data = ar->testmode.utf->data;
-	ar->testmode.utf_firmware_len = ar->testmode.utf->size;
+	fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
+	fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
+	fw_file->firmware_data = fw_file->firmware->data;
+	fw_file->firmware_len = fw_file->firmware->size;
 
 	return 0;
 }
 
 static int ath10k_tm_fetch_firmware(struct ath10k *ar)
 {
+	struct ath10k_fw_components *utf_mode_fw;
+	struct ath10k_fw_file *fw_file;
 	int ret;
 
-	ret = ath10k_tm_fetch_utf_firmware_api_2(ar);
+	if (!ar->is_bmi) {
+		fw_file = &ar->testmode.utf_mode_fw.fw_file;
+		fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_TLV;
+		fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
+		return 0;
+	}
+	ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_UTF_API2_FILE,
+					       &ar->testmode.utf_mode_fw.fw_file);
 	if (ret == 0) {
 		ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode using fw utf api 2");
-		return 0;
+		goto out;
 	}
 
-	ret = ath10k_tm_fetch_utf_firmware_api_1(ar);
+	ret = ath10k_tm_fetch_utf_firmware_api_1(ar, &ar->testmode.utf_mode_fw.fw_file);
 	if (ret) {
 		ath10k_err(ar, "failed to fetch utf firmware binary: %d", ret);
 		return ret;
@@ -306,6 +205,21 @@
 
 	ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode using utf api 1");
 
+out:
+	utf_mode_fw = &ar->testmode.utf_mode_fw;
+
+	/* Use the same board data file as the normal firmware uses (but
+	 * it's still "owned" by normal_mode_fw so we shouldn't free it.
+	 */
+	utf_mode_fw->board_data = ar->normal_mode_fw.board_data;
+	utf_mode_fw->board_len = ar->normal_mode_fw.board_len;
+
+	if (!utf_mode_fw->fw_file.otp_data) {
+		ath10k_info(ar, "utf.bin didn't contain otp binary, taking it from the normal mode firmware");
+		utf_mode_fw->fw_file.otp_data = ar->normal_mode_fw.fw_file.otp_data;
+		utf_mode_fw->fw_file.otp_len = ar->normal_mode_fw.fw_file.otp_len;
+	}
+
 	return 0;
 }
 
@@ -329,7 +243,7 @@
 		goto err;
 	}
 
-	if (WARN_ON(ar->testmode.utf != NULL)) {
+	if (WARN_ON(ar->testmode.utf_mode_fw.fw_file.firmware != NULL)) {
 		/* utf image is already downloaded, it shouldn't be */
 		ret = -EEXIST;
 		goto err;
@@ -341,30 +255,34 @@
 		goto err;
 	}
 
+	if (ar->testmode.utf_mode_fw.fw_file.codeswap_data &&
+	    ar->testmode.utf_mode_fw.fw_file.codeswap_len) {
+		ret = ath10k_swap_code_seg_init(ar,
+						&ar->testmode.utf_mode_fw.fw_file);
+		if (ret) {
+			ath10k_warn(ar,
+				    "failed to init utf code swap segment: %d\n",
+				    ret);
+			goto err_release_utf_mode_fw;
+		}
+	}
+
 	spin_lock_bh(&ar->data_lock);
 	ar->testmode.utf_monitor = true;
 	spin_unlock_bh(&ar->data_lock);
-	BUILD_BUG_ON(sizeof(ar->fw_features) !=
-		     sizeof(ar->testmode.orig_fw_features));
-
-	memcpy(ar->testmode.orig_fw_features, ar->fw_features,
-	       sizeof(ar->fw_features));
-	ar->testmode.orig_wmi_op_version = ar->wmi.op_version;
-	memset(ar->fw_features, 0, sizeof(ar->fw_features));
-
-	ar->wmi.op_version = ar->testmode.op_version;
 
 	ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode wmi version %d\n",
-		   ar->wmi.op_version);
+		   ar->testmode.utf_mode_fw.fw_file.wmi_op_version);
 
 	ret = ath10k_hif_power_up(ar);
 	if (ret) {
 		ath10k_err(ar, "failed to power up hif (testmode): %d\n", ret);
 		ar->state = ATH10K_STATE_OFF;
-		goto err_fw_features;
+		goto err_release_utf_mode_fw;
 	}
 
-	ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF);
+	ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF,
+				&ar->testmode.utf_mode_fw);
 	if (ret) {
 		ath10k_err(ar, "failed to start core (testmode): %d\n", ret);
 		ar->state = ATH10K_STATE_OFF;
@@ -373,8 +291,8 @@
 
 	ar->state = ATH10K_STATE_UTF;
 
-	if (strlen(ar->testmode.utf_version) > 0)
-		ver = ar->testmode.utf_version;
+	if (strlen(ar->testmode.utf_mode_fw.fw_file.fw_version) > 0)
+		ver = ar->testmode.utf_mode_fw.fw_file.fw_version;
 	else
 		ver = "API 1";
 
@@ -387,14 +305,14 @@
 err_power_down:
 	ath10k_hif_power_down(ar);
 
-err_fw_features:
-	/* return the original firmware features */
-	memcpy(ar->fw_features, ar->testmode.orig_fw_features,
-	       sizeof(ar->fw_features));
-	ar->wmi.op_version = ar->testmode.orig_wmi_op_version;
+err_release_utf_mode_fw:
+	if (ar->testmode.utf_mode_fw.fw_file.codeswap_data &&
+	    ar->testmode.utf_mode_fw.fw_file.codeswap_len)
+		ath10k_swap_code_seg_release(ar,
+					     &ar->testmode.utf_mode_fw.fw_file);
 
-	release_firmware(ar->testmode.utf);
-	ar->testmode.utf = NULL;
+	release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware);
+	ar->testmode.utf_mode_fw.fw_file.firmware = NULL;
 
 err:
 	mutex_unlock(&ar->conf_mutex);
@@ -415,13 +333,13 @@
 
 	spin_unlock_bh(&ar->data_lock);
 
-	/* return the original firmware features */
-	memcpy(ar->fw_features, ar->testmode.orig_fw_features,
-	       sizeof(ar->fw_features));
-	ar->wmi.op_version = ar->testmode.orig_wmi_op_version;
+	if (ar->testmode.utf_mode_fw.fw_file.codeswap_data &&
+	    ar->testmode.utf_mode_fw.fw_file.codeswap_len)
+		ath10k_swap_code_seg_release(ar,
+					     &ar->testmode.utf_mode_fw.fw_file);
 
-	release_firmware(ar->testmode.utf);
-	ar->testmode.utf = NULL;
+	release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware);
+	ar->testmode.utf_mode_fw.fw_file.firmware = NULL;
 
 	ar->state = ATH10K_STATE_OFF;
 }
@@ -479,7 +397,7 @@
 	cmd_id = nla_get_u32(tb[ATH10K_TM_ATTR_WMI_CMDID]);
 
 	ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
-		   "testmode cmd wmi cmd_id %d buf %p buf_len %d\n",
+		   "testmode cmd wmi cmd_id %d buf %pK buf_len %d\n",
 		   cmd_id, buf, buf_len);
 
 	ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", buf, buf_len);
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/testmode_i.h linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/testmode_i.h
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/testmode_i.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/testmode_i.h	2019-01-22 16:16:25.423263793 +0100
@@ -33,6 +33,7 @@
 	ATH10K_TM_ATTR_WMI_CMDID	= 3,
 	ATH10K_TM_ATTR_VERSION_MAJOR	= 4,
 	ATH10K_TM_ATTR_VERSION_MINOR	= 5,
+	ATH10K_TM_ATTR_WMI_OP_VERSION	= 6,
 
 	/* keep last */
 	__ATH10K_TM_ATTR_AFTER_LAST,
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/thermal.c linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/thermal.c
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/thermal.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/thermal.c	2019-01-22 16:16:25.423263793 +0100
@@ -187,12 +187,12 @@
 	/* Do not register hwmon device when temperature reading is not
 	 * supported by firmware
 	 */
-	if (ar->wmi.op_version != ATH10K_FW_WMI_OP_VERSION_10_2_4)
+	if (!(ar->wmi.ops->gen_pdev_get_temperature))
 		return 0;
 
 	/* Avoid linking error on devm_hwmon_device_register_with_groups, I
 	 * guess linux/hwmon.h is missing proper stubs. */
-	if (!config_enabled(CONFIG_HWMON))
+	if (!IS_REACHABLE(CONFIG_HWMON))
 		return 0;
 
 	hwmon_dev = devm_hwmon_device_register_with_groups(ar->dev,
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/thermal.h linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/thermal.h
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/thermal.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/thermal.h	2019-01-22 16:16:25.423263793 +0100
@@ -36,7 +36,7 @@
 	int temperature;
 };
 
-#ifdef CONFIG_THERMAL
+#if IS_REACHABLE(CONFIG_THERMAL)
 int ath10k_thermal_register(struct ath10k *ar);
 void ath10k_thermal_unregister(struct ath10k *ar);
 void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature);
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/trace.h linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/trace.h
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/trace.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/trace.h	2019-10-29 09:26:24.461211183 +0100
@@ -250,6 +250,7 @@
 	TP_STRUCT__entry(
 		__string(device, dev_name(ar->dev))
 		__string(driver, dev_driver_string(ar->dev))
+		__field(u8, hw_type);
 		__field(size_t, buf_len)
 		__dynamic_array(u8, buf, buf_len)
 	),
@@ -257,14 +258,16 @@
 	TP_fast_assign(
 		__assign_str(device, dev_name(ar->dev));
 		__assign_str(driver, dev_driver_string(ar->dev));
+		__entry->hw_type = ar->hw_rev;
 		__entry->buf_len = buf_len;
 		memcpy(__get_dynamic_array(buf), buf, buf_len);
 	),
 
 	TP_printk(
-		"%s %s len %zu",
+		"%s %s %d len %zu",
 		__get_str(driver),
 		__get_str(device),
+		__entry->hw_type,
 		__entry->buf_len
 	)
 );
@@ -277,6 +280,7 @@
 	TP_STRUCT__entry(
 		__string(device, dev_name(ar->dev))
 		__string(driver, dev_driver_string(ar->dev))
+		__field(u8, hw_type);
 		__field(u16, buf_len)
 		__dynamic_array(u8, pktlog, buf_len)
 	),
@@ -284,14 +288,16 @@
 	TP_fast_assign(
 		__assign_str(device, dev_name(ar->dev));
 		__assign_str(driver, dev_driver_string(ar->dev));
+		__entry->hw_type = ar->hw_rev;
 		__entry->buf_len = buf_len;
 		memcpy(__get_dynamic_array(pktlog), buf, buf_len);
 	),
 
 	TP_printk(
-		"%s %s size %hu",
+		"%s %s %d size %hu",
 		__get_str(driver),
 		__get_str(device),
+		__entry->hw_type,
 		__entry->buf_len
 	 )
 );
@@ -440,6 +446,7 @@
 	TP_STRUCT__entry(
 		__string(device, dev_name(ar->dev))
 		__string(driver, dev_driver_string(ar->dev))
+		__field(u8, hw_type);
 		__field(u16, len)
 		__dynamic_array(u8, rxdesc, len)
 	),
@@ -447,14 +454,16 @@
 	TP_fast_assign(
 		__assign_str(device, dev_name(ar->dev));
 		__assign_str(driver, dev_driver_string(ar->dev));
+		__entry->hw_type = ar->hw_rev;
 		__entry->len = len;
 		memcpy(__get_dynamic_array(rxdesc), data, len);
 	),
 
 	TP_printk(
-		"%s %s rxdesc len %d",
+		"%s %s %d rxdesc len %d",
 		__get_str(driver),
 		__get_str(device),
+		__entry->hw_type,
 		__entry->len
 	 )
 );
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/txrx.c linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/txrx.c
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/txrx.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/txrx.c	2019-01-22 16:16:25.427263830 +0100
@@ -23,7 +23,12 @@
 
 static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
 {
-	if (!ATH10K_SKB_CB(skb)->htt.is_offchan)
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+	if (likely(!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)))
+		return;
+
+	if (ath10k_mac_tx_frm_has_freq(ar))
 		return;
 
 	/* If the original wait_for_completion() timed out before
@@ -39,32 +44,30 @@
 	complete(&ar->offchan_tx_completed);
 	ar->offchan_tx_skb = NULL; /* just for sanity */
 
-	ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
+	ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %pK\n", skb);
 out:
 	spin_unlock_bh(&ar->data_lock);
 }
 
-void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
 			  const struct htt_tx_done *tx_done)
 {
 	struct ath10k *ar = htt->ar;
 	struct device *dev = ar->dev;
 	struct ieee80211_tx_info *info;
+	struct ieee80211_txq *txq;
 	struct ath10k_skb_cb *skb_cb;
+	struct ath10k_txq *artxq;
 	struct sk_buff *msdu;
-	struct ieee80211_hdr *hdr;
-	__le16 fc;
-	bool limit_mgmt_desc = false;
 
 	ath10k_dbg(ar, ATH10K_DBG_HTT,
-		   "htt tx completion msdu_id %u discard %d no_ack %d success %d\n",
-		   tx_done->msdu_id, !!tx_done->discard,
-		   !!tx_done->no_ack, !!tx_done->success);
+		   "htt tx completion msdu_id %u status %d\n",
+		   tx_done->msdu_id, tx_done->status);
 
 	if (tx_done->msdu_id >= htt->max_num_pending_tx) {
 		ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
 			    tx_done->msdu_id);
-		return;
+		return -EINVAL;
 	}
 
 	spin_lock_bh(&htt->tx_lock);
@@ -73,23 +76,23 @@
 		ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
 			    tx_done->msdu_id);
 		spin_unlock_bh(&htt->tx_lock);
-		return;
+		return -ENOENT;
 	}
 
-	hdr = (struct ieee80211_hdr *)msdu->data;
-	fc = hdr->frame_control;
+	skb_cb = ATH10K_SKB_CB(msdu);
+	txq = skb_cb->txq;
 
-	if (unlikely(ieee80211_is_mgmt(fc)) &&
-	    ar->hw_params.max_probe_resp_desc_thres)
-		limit_mgmt_desc = true;
+	if (txq) {
+		artxq = (void *)txq->drv_priv;
+		artxq->num_fw_queued--;
+	}
 
 	ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
-	__ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
+	ath10k_htt_tx_dec_pending(htt);
 	if (htt->num_pending_tx == 0)
 		wake_up(&htt->empty_tx_wq);
 	spin_unlock_bh(&htt->tx_lock);
 
-	skb_cb = ATH10K_SKB_CB(msdu);
 	dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
 
 	ath10k_report_offchan_tx(htt->ar, msdu);
@@ -98,22 +101,25 @@
 	memset(&info->status, 0, sizeof(info->status));
 	trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
 
-	if (tx_done->discard) {
+	if (tx_done->status == HTT_TX_COMPL_STATE_DISCARD) {
 		ieee80211_free_txskb(htt->ar->hw, msdu);
-		return;
+		return 0;
 	}
 
 	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
 		info->flags |= IEEE80211_TX_STAT_ACK;
 
-	if (tx_done->no_ack)
+	if (tx_done->status == HTT_TX_COMPL_STATE_NOACK)
 		info->flags &= ~IEEE80211_TX_STAT_ACK;
 
-	if (tx_done->success && (info->flags & IEEE80211_TX_CTL_NO_ACK))
+	if ((tx_done->status == HTT_TX_COMPL_STATE_ACK) &&
+	    (info->flags & IEEE80211_TX_CTL_NO_ACK))
 		info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
 
 	ieee80211_tx_status(htt->ar->hw, msdu);
 	/* we do not own the msdu anymore */
+
+	return 0;
 }
 
 struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
@@ -126,7 +132,7 @@
 	list_for_each_entry(peer, &ar->peers, list) {
 		if (peer->vdev_id != vdev_id)
 			continue;
-		if (memcmp(peer->addr, addr, ETH_ALEN))
+		if (!ether_addr_equal(peer->addr, addr))
 			continue;
 
 		return peer;
@@ -186,6 +192,13 @@
 	struct ath10k *ar = htt->ar;
 	struct ath10k_peer *peer;
 
+	if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) {
+		ath10k_warn(ar,
+			    "received htt peer map event with idx out of bounds: %hu\n",
+			    ev->peer_id);
+		return;
+	}
+
 	spin_lock_bh(&ar->data_lock);
 	peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr);
 	if (!peer) {
@@ -202,6 +215,8 @@
 	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
 		   ev->vdev_id, ev->addr, ev->peer_id);
 
+	WARN_ON(ar->peer_map[ev->peer_id] && (ar->peer_map[ev->peer_id] != peer));
+	ar->peer_map[ev->peer_id] = peer;
 	set_bit(ev->peer_id, peer->peer_ids);
 exit:
 	spin_unlock_bh(&ar->data_lock);
@@ -213,6 +228,13 @@
 	struct ath10k *ar = htt->ar;
 	struct ath10k_peer *peer;
 
+	if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) {
+		ath10k_warn(ar,
+			    "received htt peer unmap event with idx out of bounds: %hu\n",
+			    ev->peer_id);
+		return;
+	}
+
 	spin_lock_bh(&ar->data_lock);
 	peer = ath10k_peer_find_by_id(ar, ev->peer_id);
 	if (!peer) {
@@ -224,6 +246,7 @@
 	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
 		   peer->vdev_id, peer->addr, ev->peer_id);
 
+	ar->peer_map[ev->peer_id] = NULL;
 	clear_bit(ev->peer_id, peer->peer_ids);
 
 	if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) {
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/txrx.h linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/txrx.h
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/txrx.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/txrx.h	2019-01-22 16:16:25.427263830 +0100
@@ -19,7 +19,7 @@
 
 #include "htt.h"
 
-void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
 			  const struct htt_tx_done *tx_done);
 
 struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/wmi.c linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/wmi.c
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/wmi.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/wmi.c	2019-10-29 09:26:24.465211222 +0100
@@ -29,6 +29,9 @@
 #include "p2p.h"
 #include "hw.h"
 
+#define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9
+#define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ)
+
 /* MAIN WMI cmd track */
 static struct wmi_cmd_map wmi_cmd_map = {
 	.init_cmdid = WMI_INIT_CMDID,
@@ -521,7 +524,8 @@
 	.vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
 	.mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
 	.set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
-	.pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_bss_chan_info_request_cmdid =
+		WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
 };
 
 /* 10.4 WMI cmd track */
@@ -705,6 +709,7 @@
 	.set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID,
 	.pdev_bss_chan_info_request_cmdid =
 			WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+	.ext_resource_cfg_cmdid = WMI_10_4_EXT_RESOURCE_CFG_CMDID,
 };
 
 /* MAIN WMI VDEV param map */
@@ -780,6 +785,7 @@
 	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
 	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
 	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.set_tsf = WMI_VDEV_PARAM_UNSUPPORTED,
 };
 
 /* 10.X WMI VDEV param map */
@@ -855,6 +861,7 @@
 	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
 	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
 	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.set_tsf = WMI_VDEV_PARAM_UNSUPPORTED,
 };
 
 static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
@@ -929,6 +936,7 @@
 	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
 	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
 	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.set_tsf = WMI_10X_VDEV_PARAM_TSF_INCREMENT,
 };
 
 static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
@@ -1004,6 +1012,7 @@
 	.meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC,
 	.rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
 	.bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
+	.set_tsf = WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
 };
 
 static struct wmi_pdev_param_map wmi_pdev_param_map = {
@@ -1098,6 +1107,7 @@
 	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
 	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
 	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+	.enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
 };
 
 static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
@@ -1193,6 +1203,7 @@
 	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
 	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
 	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+	.enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
 };
 
 static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
@@ -1288,6 +1299,7 @@
 	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
 	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
 	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+	.enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
 };
 
 /* firmware 10.2 specific mappings */
@@ -1544,6 +1556,62 @@
 	.wapi_mbssid_offset = WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
 	.arp_srcaddr = WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
 	.arp_dstaddr = WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
+	.enable_btcoex = WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX,
+};
+
+static const struct wmi_peer_flags_map wmi_peer_flags_map = {
+	.auth = WMI_PEER_AUTH,
+	.qos = WMI_PEER_QOS,
+	.need_ptk_4_way = WMI_PEER_NEED_PTK_4_WAY,
+	.need_gtk_2_way = WMI_PEER_NEED_GTK_2_WAY,
+	.apsd = WMI_PEER_APSD,
+	.ht = WMI_PEER_HT,
+	.bw40 = WMI_PEER_40MHZ,
+	.stbc = WMI_PEER_STBC,
+	.ldbc = WMI_PEER_LDPC,
+	.dyn_mimops = WMI_PEER_DYN_MIMOPS,
+	.static_mimops = WMI_PEER_STATIC_MIMOPS,
+	.spatial_mux = WMI_PEER_SPATIAL_MUX,
+	.vht = WMI_PEER_VHT,
+	.bw80 = WMI_PEER_80MHZ,
+	.vht_2g = WMI_PEER_VHT_2G,
+	.pmf = WMI_PEER_PMF,
+};
+
+static const struct wmi_peer_flags_map wmi_10x_peer_flags_map = {
+	.auth = WMI_10X_PEER_AUTH,
+	.qos = WMI_10X_PEER_QOS,
+	.need_ptk_4_way = WMI_10X_PEER_NEED_PTK_4_WAY,
+	.need_gtk_2_way = WMI_10X_PEER_NEED_GTK_2_WAY,
+	.apsd = WMI_10X_PEER_APSD,
+	.ht = WMI_10X_PEER_HT,
+	.bw40 = WMI_10X_PEER_40MHZ,
+	.stbc = WMI_10X_PEER_STBC,
+	.ldbc = WMI_10X_PEER_LDPC,
+	.dyn_mimops = WMI_10X_PEER_DYN_MIMOPS,
+	.static_mimops = WMI_10X_PEER_STATIC_MIMOPS,
+	.spatial_mux = WMI_10X_PEER_SPATIAL_MUX,
+	.vht = WMI_10X_PEER_VHT,
+	.bw80 = WMI_10X_PEER_80MHZ,
+};
+
+static const struct wmi_peer_flags_map wmi_10_2_peer_flags_map = {
+	.auth = WMI_10_2_PEER_AUTH,
+	.qos = WMI_10_2_PEER_QOS,
+	.need_ptk_4_way = WMI_10_2_PEER_NEED_PTK_4_WAY,
+	.need_gtk_2_way = WMI_10_2_PEER_NEED_GTK_2_WAY,
+	.apsd = WMI_10_2_PEER_APSD,
+	.ht = WMI_10_2_PEER_HT,
+	.bw40 = WMI_10_2_PEER_40MHZ,
+	.stbc = WMI_10_2_PEER_STBC,
+	.ldbc = WMI_10_2_PEER_LDPC,
+	.dyn_mimops = WMI_10_2_PEER_DYN_MIMOPS,
+	.static_mimops = WMI_10_2_PEER_STATIC_MIMOPS,
+	.spatial_mux = WMI_10_2_PEER_SPATIAL_MUX,
+	.vht = WMI_10_2_PEER_VHT,
+	.bw80 = WMI_10_2_PEER_80MHZ,
+	.vht_2g = WMI_10_2_PEER_VHT_2G,
+	.pmf = WMI_10_2_PEER_PMF,
 };
 
 void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
@@ -1573,6 +1641,7 @@
 	ch->max_power = arg->max_power;
 	ch->reg_power = arg->max_reg_power;
 	ch->antenna_max = arg->max_antenna_gain;
+	ch->max_tx_power = arg->max_power;
 
 	/* mode & flags share storage */
 	ch->mode = arg->mode;
@@ -1660,6 +1729,8 @@
 	struct ath10k *ar = arvif->ar;
 	struct ath10k_skb_cb *cb;
 	struct sk_buff *bcn;
+	bool dtim_zero;
+	bool deliver_cab;
 	int ret;
 
 	spin_lock_bh(&ar->data_lock);
@@ -1679,12 +1750,14 @@
 		arvif->beacon_state = ATH10K_BEACON_SENDING;
 		spin_unlock_bh(&ar->data_lock);
 
+		dtim_zero = !!(cb->flags & ATH10K_SKB_F_DTIM_ZERO);
+		deliver_cab = !!(cb->flags & ATH10K_SKB_F_DELIVER_CAB);
 		ret = ath10k_wmi_beacon_send_ref_nowait(arvif->ar,
 							arvif->vdev_id,
 							bcn->data, bcn->len,
 							cb->paddr,
-							cb->bcn.dtim_zero,
-							cb->bcn.deliver_cab);
+							dtim_zero,
+							deliver_cab);
 
 		spin_lock_bh(&ar->data_lock);
 
@@ -1726,6 +1799,9 @@
 {
 	int ret = -EOPNOTSUPP;
 
+	if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+		return -ESHUTDOWN;
+
 	might_sleep();
 
 	if (cmd_id == WMI_CMD_UNSUPPORTED) {
@@ -1755,16 +1831,26 @@
 static struct sk_buff *
 ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
 {
+	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
+	struct ath10k_vif *arvif;
 	struct wmi_mgmt_tx_cmd *cmd;
 	struct ieee80211_hdr *hdr;
 	struct sk_buff *skb;
 	int len;
+	u32 vdev_id;
 	u32 buf_len = msdu->len;
 	u16 fc;
 
 	hdr = (struct ieee80211_hdr *)msdu->data;
 	fc = le16_to_cpu(hdr->frame_control);
 
+	if (cb->vif) {
+		arvif = (void *)cb->vif->drv_priv;
+		vdev_id = arvif->vdev_id;
+	} else {
+		vdev_id = 0;
+	}
+
 	if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
 		return ERR_PTR(-EINVAL);
 
@@ -1786,7 +1872,7 @@
 
 	cmd = (struct wmi_mgmt_tx_cmd *)skb->data;
 
-	cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(msdu)->vdev_id);
+	cmd->hdr.vdev_id = __cpu_to_le32(vdev_id);
 	cmd->hdr.tx_rate = 0;
 	cmd->hdr.tx_power = 0;
 	cmd->hdr.buf_len = __cpu_to_le32(buf_len);
@@ -1794,7 +1880,7 @@
 	ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr));
 	memcpy(cmd->buf, msdu->data, msdu->len);
 
-	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %pK len %d ftype %02x stype %02x\n",
 		   msdu, skb->len, fc & IEEE80211_FCTL_FTYPE,
 		   fc & IEEE80211_FCTL_STYPE);
 	trace_ath10k_tx_hdr(ar, skb->data, skb->len);
@@ -2032,34 +2118,6 @@
 	return 0;
 }
 
-static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
-{
-	enum ieee80211_band band;
-
-	switch (phy_mode) {
-	case MODE_11A:
-	case MODE_11NA_HT20:
-	case MODE_11NA_HT40:
-	case MODE_11AC_VHT20:
-	case MODE_11AC_VHT40:
-	case MODE_11AC_VHT80:
-		band = IEEE80211_BAND_5GHZ;
-		break;
-	case MODE_11G:
-	case MODE_11B:
-	case MODE_11GONLY:
-	case MODE_11NG_HT20:
-	case MODE_11NG_HT40:
-	case MODE_11AC_VHT20_2G:
-	case MODE_11AC_VHT40_2G:
-	case MODE_11AC_VHT80_2G:
-	default:
-		band = IEEE80211_BAND_2GHZ;
-	}
-
-	return band;
-}
-
 /* If keys are configured, HW decrypts all frames
  * with protected bit set. Mark such frames as decrypted.
  */
@@ -2100,10 +2158,13 @@
 	struct wmi_mgmt_rx_event_v1 *ev_v1;
 	struct wmi_mgmt_rx_event_v2 *ev_v2;
 	struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
+	struct wmi_mgmt_rx_ext_info *ext_info;
 	size_t pull_len;
 	u32 msdu_len;
+	u32 len;
 
-	if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
+	if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX,
+		     ar->running_fw->fw_file.fw_features)) {
 		ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
 		ev_hdr = &ev_v2->hdr.v1;
 		pull_len = sizeof(*ev_v2);
@@ -2128,6 +2189,12 @@
 	if (skb->len < msdu_len)
 		return -EPROTO;
 
+	if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
+		len = ALIGN(le32_to_cpu(arg->buf_len), 4);
+		ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
+		memcpy(&arg->ext_info, ext_info,
+		       sizeof(struct wmi_mgmt_rx_ext_info));
+	}
 	/* the WMI buffer might've ended up being padded to 4 bytes due to HTC
 	 * trailer with credit update. Trim the excess garbage.
 	 */
@@ -2144,6 +2211,8 @@
 	struct wmi_10_4_mgmt_rx_hdr *ev_hdr;
 	size_t pull_len;
 	u32 msdu_len;
+	struct wmi_mgmt_rx_ext_info *ext_info;
+	u32 len;
 
 	ev = (struct wmi_10_4_mgmt_rx_event *)skb->data;
 	ev_hdr = &ev->hdr;
@@ -2164,12 +2233,113 @@
 	if (skb->len < msdu_len)
 		return -EPROTO;
 
+	if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
+		len = ALIGN(le32_to_cpu(arg->buf_len), 4);
+		ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
+		memcpy(&arg->ext_info, ext_info,
+		       sizeof(struct wmi_mgmt_rx_ext_info));
+	}
+
 	/* Make sure bytes added for padding are removed. */
 	skb_trim(skb, msdu_len);
 
 	return 0;
 }
 
+static bool ath10k_wmi_rx_is_decrypted(struct ath10k *ar,
+				       struct ieee80211_hdr *hdr)
+{
+	if (!ieee80211_has_protected(hdr->frame_control))
+		return false;
+
+	/* FW delivers WEP Shared Auth frame with Protected Bit set and
+	 * encrypted payload. However in case of PMF it delivers decrypted
+	 * frames with Protected Bit set.
+	 */
+	if (ieee80211_is_auth(hdr->frame_control))
+		return false;
+
+	/* qca99x0 based FW delivers broadcast or multicast management frames
+	 * (ex: group privacy action frames in mesh) as encrypted payload.
+	 */
+	if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) &&
+	    ar->hw_params.sw_decrypt_mcast_mgmt)
+		return false;
+
+	return true;
+}
+
+int ath10k_wmi_tlv_event_peer_delete_resp(struct ath10k *ar,
+					  struct sk_buff *skb)
+{
+	int ret;
+	struct wmi_peer_delete_resp_ev_arg arg = {};
+
+	ret = ath10k_wmi_pull_peer_delete_resp(ar, skb, &arg);
+	if (ret) {
+		ath10k_warn(ar, "failed to parse peer delete resp: %d\n", ret);
+		dev_kfree_skb(skb);
+		return ret;
+	}
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TLV_PEER_DELETE_RESP_EVENTID\n");
+	complete(&ar->peer_delete_done);
+
+	return 0;
+}
+
+static int wmi_tlv_process_mgmt_tx_comp(struct ath10k *ar, u32 desc_id,
+					u32 status)
+{
+	struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
+	struct ath10k_wmi *wmi = &ar->wmi;
+	struct ieee80211_tx_info *info;
+	struct sk_buff *msdu;
+	int ret = 0;
+
+	spin_lock_bh(&wmi->mgmt_tx_lock);
+	pkt_addr = idr_find(&wmi->mgmt_pending_tx, desc_id);
+	if (!pkt_addr) {
+		ath10k_warn(ar, "received mgmt tx completion for invalid msdu_id: %d\n",
+			    desc_id);
+		ret = -ENOENT;
+		goto tx_comp_process_done;
+	}
+
+	msdu = pkt_addr->vaddr;
+	dma_unmap_single(ar->dev, pkt_addr->paddr,
+			 msdu->len, DMA_FROM_DEVICE);
+	info = IEEE80211_SKB_CB(msdu);
+	if (!status)
+		info->flags |= IEEE80211_TX_STAT_ACK;
+	else
+		info->flags |= status;
+	ieee80211_tx_status_irqsafe(ar->hw, msdu);
+	ret = 0;
+
+tx_comp_process_done:
+	idr_remove(&wmi->mgmt_pending_tx, desc_id);
+	spin_unlock_bh(&wmi->mgmt_tx_lock);
+
+	return ret;
+}
+
+int ath10k_wmi_tlv_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb)
+{
+	int ret;
+	struct wmi_tlv_mgmt_tx_compl_ev_arg arg;
+
+	ret = ath10k_wmi_pull_mgmt_tx_compl(ar, skb, &arg);
+	if (ret) {
+		ath10k_warn(ar, "failed to parse mgmt comp event: %d\n", ret);
+		return ret;
+	}
+
+	wmi_tlv_process_mgmt_tx_comp(ar, arg.desc_id, arg.status);
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TLV_MGMT_TX_COMPLETION_EVENTID\n");
+
+	return 0;
+}
+
 int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
 {
 	struct wmi_mgmt_rx_ev_arg arg = {};
@@ -2204,22 +2374,9 @@
 	ath10k_dbg(ar, ATH10K_DBG_MGMT,
 		   "event mgmt rx status %08x\n", rx_status);
 
-	if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
-		dev_kfree_skb(skb);
-		return 0;
-	}
-
-	if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) {
-		dev_kfree_skb(skb);
-		return 0;
-	}
-
-	if (rx_status & WMI_RX_STATUS_ERR_KEY_CACHE_MISS) {
-		dev_kfree_skb(skb);
-		return 0;
-	}
-
-	if (rx_status & WMI_RX_STATUS_ERR_CRC) {
+	if ((test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) ||
+	    (rx_status & (WMI_RX_STATUS_ERR_DECRYPT |
+	    WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) {
 		dev_kfree_skb(skb);
 		return 0;
 	}
@@ -2227,6 +2384,11 @@
 	if (rx_status & WMI_RX_STATUS_ERR_MIC)
 		status->flag |= RX_FLAG_MMIC_ERROR;
 
+	if (rx_status & WMI_RX_STATUS_EXT_INFO) {
+		status->mactime =
+			__le64_to_cpu(arg.ext_info.rx_mac_timestamp);
+		status->flag |= RX_FLAG_MACTIME_END;
+	}
 	/* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to
 	 * MODE_11B. This means phy_mode is not a reliable source for the band
 	 * of mgmt rx.
@@ -2256,13 +2418,15 @@
 	hdr = (struct ieee80211_hdr *)skb->data;
 	fc = le16_to_cpu(hdr->frame_control);
 
+	/* Firmware is guaranteed to report all essential management frames via
+	 * WMI while it can deliver some extra via HTT. Since there can be
+	 * duplicates split the reporting wrt monitor/sniffing.
+	 */
+	status->flag |= RX_FLAG_SKIP_MONITOR;
+
 	ath10k_wmi_handle_wep_reauth(ar, skb, status);
 
-	/* FW delivers WEP Shared Auth frame with Protected Bit set and
-	 * encrypted payload. However in case of PMF it delivers decrypted
-	 * frames with Protected Bit set. */
-	if (ieee80211_has_protected(hdr->frame_control) &&
-	    !ieee80211_is_auth(hdr->frame_control)) {
+	if (ath10k_wmi_rx_is_decrypted(ar, hdr)) {
 		status->flag |= RX_FLAG_DECRYPTED;
 
 		if (!ieee80211_is_action(hdr->frame_control) &&
@@ -2279,7 +2443,7 @@
 		ath10k_mac_handle_beacon(ar, skb);
 
 	ath10k_dbg(ar, ATH10K_DBG_MGMT,
-		   "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
+		   "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
 		   skb, skb->len,
 		   fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
 
@@ -2353,6 +2517,31 @@
 	return 0;
 }
 
+static void wlan_fill_survey_result(struct ath10k *ar,
+				    struct survey_info *survey,
+				    struct wmi_ch_info_ev_arg arg)
+{
+	u64 clock_freq;
+
+	if (!arg.mac_clk_mhz || !survey)
+		return;
+
+	clock_freq = arg.mac_clk_mhz * 1000;
+
+	memset(survey, 0, sizeof(*survey));
+
+	survey->noise = __le32_to_cpu(arg.noise_floor);
+	survey->time = __le32_to_cpu(arg.cycle_count) / clock_freq;
+	survey->time_busy = __le32_to_cpu(arg.rx_clear_count) / clock_freq;
+	survey->time_tx = __le32_to_cpu(arg.rx_clear_count) / clock_freq;
+
+	survey->filled = SURVEY_INFO_NOISE_DBM;
+	ar->ch_info_can_report_survey = true;
+
+	survey->filled |= (SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY |
+			   SURVEY_INFO_TIME_TX);
+}
+
 void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
 {
 	struct wmi_ch_info_ev_arg arg = {};
@@ -2397,6 +2586,12 @@
 		goto exit;
 	}
 
+	if (QCA_REV_WCN3990(ar)) {
+		survey = &ar->survey[idx];
+		wlan_fill_survey_result(ar, survey, arg);
+		goto exit;
+	}
+
 	if (cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
 		if (ar->ch_info_can_report_survey) {
 			survey = &ar->survey[idx];
@@ -2427,7 +2622,21 @@
 
 void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
 {
-	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
+	struct wmi_echo_ev_arg arg = {};
+	int ret;
+
+	ret = ath10k_wmi_pull_echo_ev(ar, skb, &arg);
+	if (ret) {
+		ath10k_warn(ar, "failed to parse echo: %d\n", ret);
+		return;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi event echo value 0x%08x\n",
+		   le32_to_cpu(arg.value));
+
+	if (le32_to_cpu(arg.value) == ATH10K_WMI_BARRIER_ECHO_ID)
+		complete(&ar->wmi.barrier);
 }
 
 int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
@@ -2558,6 +2767,16 @@
 	dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
 }
 
+static void
+ath10k_wmi_10_4_pull_peer_stats(const struct wmi_10_4_peer_stats *src,
+				struct ath10k_fw_stats_peer *dst)
+{
+	ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
+	dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
+	dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
+	dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+}
+
 static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar,
 					    struct sk_buff *skb,
 					    struct ath10k_fw_stats *stats)
@@ -2808,11 +3027,17 @@
 	/* fw doesn't implement vdev stats */
 
 	for (i = 0; i < num_peer_stats; i++) {
-		const struct wmi_10_2_4_peer_stats *src;
+		const struct wmi_10_2_4_ext_peer_stats *src;
 		struct ath10k_fw_stats_peer *dst;
+		int stats_len;
+
+		if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
+			stats_len = sizeof(struct wmi_10_2_4_ext_peer_stats);
+		else
+			stats_len = sizeof(struct wmi_10_2_4_peer_stats);
 
 		src = (void *)skb->data;
-		if (!skb_pull(skb, sizeof(*src)))
+		if (!skb_pull(skb, stats_len))
 			return -EPROTO;
 
 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
@@ -2822,6 +3047,9 @@
 		ath10k_wmi_pull_peer_stats(&src->common.old, dst);
 
 		dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate);
+
+		if (ath10k_peer_stats_enabled(ar))
+			dst->rx_duration = __le32_to_cpu(src->rx_duration);
 		/* FIXME: expose 10.2 specific values */
 
 		list_add_tail(&dst->list, &stats->peers);
@@ -2839,6 +3067,8 @@
 	u32 num_pdev_ext_stats;
 	u32 num_vdev_stats;
 	u32 num_peer_stats;
+	u32 num_bcnflt_stats;
+	u32 stats_id;
 	int i;
 
 	if (!skb_pull(skb, sizeof(*ev)))
@@ -2848,6 +3078,8 @@
 	num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
 	num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
 	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+	num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
+	stats_id = __le32_to_cpu(ev->stats_id);
 
 	for (i = 0; i < num_pdev_stats; i++) {
 		const struct wmi_10_4_pdev_stats *src;
@@ -2898,15 +3130,46 @@
 		if (!dst)
 			continue;
 
-		ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
-		dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
-		dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
-		dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
-		/* FIXME: expose 10.4 specific values */
-
+		ath10k_wmi_10_4_pull_peer_stats(src, dst);
 		list_add_tail(&dst->list, &stats->peers);
 	}
 
+	for (i = 0; i < num_bcnflt_stats; i++) {
+		const struct wmi_10_4_bss_bcn_filter_stats *src;
+
+		src = (void *)skb->data;
+		if (!skb_pull(skb, sizeof(*src)))
+			return -EPROTO;
+
+		/* FIXME: expose values to userspace
+		 *
+		 * Note: Even though this loop seems to do nothing it is
+		 * required to parse following sub-structures properly.
+		 */
+	}
+
+	if ((stats_id & WMI_10_4_STAT_PEER_EXTD) == 0)
+		return 0;
+
+	stats->extended = true;
+
+	for (i = 0; i < num_peer_stats; i++) {
+		const struct wmi_10_4_peer_extd_stats *src;
+		struct ath10k_fw_extd_stats_peer *dst;
+
+		src = (void *)skb->data;
+		if (!skb_pull(skb, sizeof(*src)))
+			return -EPROTO;
+
+		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+		if (!dst)
+			continue;
+
+		ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
+		dst->rx_duration = __le32_to_cpu(src->rx_duration);
+		list_add_tail(&dst->list, &stats->peers_extd);
+	}
+
 	return 0;
 }
 
@@ -2959,6 +3222,12 @@
 	complete(&ar->vdev_setup_done);
 }
 
+void ath10k_wmi_event_vdev_delete_resp(struct ath10k *ar, struct sk_buff *skb)
+{
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_DELETE_RESP_EVENTID\n");
+	complete(&ar->vdev_delete_done);
+}
+
 static int
 ath10k_wmi_op_pull_peer_kick_ev(struct ath10k *ar, struct sk_buff *skb,
 				struct wmi_peer_kick_ev_arg *arg)
@@ -3115,10 +3384,10 @@
 	memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
 
 	if (tim->dtim_count == 0) {
-		ATH10K_SKB_CB(bcn)->bcn.dtim_zero = true;
+		ATH10K_SKB_CB(bcn)->flags |= ATH10K_SKB_F_DTIM_ZERO;
 
 		if (__le32_to_cpu(tim_info->tim_mcast) == 1)
-			ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true;
+			ATH10K_SKB_CB(bcn)->flags |= ATH10K_SKB_F_DELIVER_CAB;
 	}
 
 	ath10k_dbg(ar, ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
@@ -3130,7 +3399,7 @@
 				  struct sk_buff *bcn,
 				  const struct wmi_p2p_noa_info *noa)
 {
-	if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
+	if (!arvif->vif->p2p)
 		return;
 
 	ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
@@ -3190,6 +3459,50 @@
 	return 0;
 }
 
+static int ath10k_wmi_10_2_4_op_pull_swba_ev(struct ath10k *ar,
+					     struct sk_buff *skb,
+					     struct wmi_swba_ev_arg *arg)
+{
+	struct wmi_10_2_4_host_swba_event *ev = (void *)skb->data;
+	u32 map;
+	size_t i;
+
+	if (skb->len < sizeof(*ev))
+		return -EPROTO;
+
+	skb_pull(skb, sizeof(*ev));
+	arg->vdev_map = ev->vdev_map;
+
+	for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
+		if (!(map & BIT(0)))
+			continue;
+
+		/* If this happens there were some changes in firmware and
+		 * ath10k should update the max size of tim_info array.
+		 */
+		if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
+			break;
+
+		if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
+		     sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
+			ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+			return -EPROTO;
+		}
+
+		arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len;
+		arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
+		arg->tim_info[i].tim_bitmap =
+				ev->bcn_info[i].tim_info.tim_bitmap;
+		arg->tim_info[i].tim_changed =
+				ev->bcn_info[i].tim_info.tim_changed;
+		arg->tim_info[i].tim_num_ps_pending =
+				ev->bcn_info[i].tim_info.tim_num_ps_pending;
+		i++;
+	}
+
+	return 0;
+}
+
 static int ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k *ar,
 					   struct sk_buff *skb,
 					   struct wmi_swba_ev_arg *arg)
@@ -3312,6 +3625,12 @@
 			continue;
 		}
 
+		/* mac80211 would have already asked us to stop beaconing and
+		 * bring the vdev down, so continue in that case
+		 */
+		if (!arvif->is_up)
+			continue;
+
 		/* There are no completions for beacons so wait for next SWBA
 		 * before telling mac80211 to decrement CSA counter
 		 *
@@ -3361,7 +3680,6 @@
 				ath10k_warn(ar, "failed to map beacon: %d\n",
 					    ret);
 				dev_kfree_skb_any(bcn);
-				ret = -EIO;
 				goto skip;
 			}
 
@@ -3538,7 +3856,7 @@
 		   phyerr->tsf_timestamp, tsf, buf_len);
 
 	/* Skip event if DFS disabled */
-	if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED))
+	if (!IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED))
 		return;
 
 	ATH10K_DFS_STAT_INC(ar, pulses_total);
@@ -3811,19 +4129,20 @@
 
 		left_len -= buf_len;
 
-		switch (phy_err_code) {
-		case PHY_ERROR_RADAR:
+		if ((phy_err_code == PHY_ERROR_RADAR) ||
+		    (hdr_arg.phy_err_mask0 &
+				WMI_PHY_ERROR_MASK0_RADAR)) {
 			ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf);
-			break;
-		case PHY_ERROR_SPECTRAL_SCAN:
+		} else if ((phy_err_code ==
+				WMI_PHY_ERROR_MASK0_SPECTRAL_SCAN) ||
+			   (hdr_arg.phy_err_mask0 &
+				WMI_PHY_ERROR_MASK0_SPECTRAL_SCAN)) {
 			ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf);
-			break;
-		case PHY_ERROR_FALSE_RADAR_EXT:
+		} else if ((phy_err_code == PHY_ERROR_FALSE_RADAR_EXT) ||
+			   (hdr_arg.phy_err_mask0 &
+				WMI_PHY_ERROR_MASK0_FALSE_RADAR_EXT)) {
 			ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf);
 			ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf);
-			break;
-		default:
-			break;
 		}
 
 		phyerr = phyerr + phyerr_arg.hdr_len + buf_len;
@@ -3949,6 +4268,7 @@
 		return;
 	}
 
+	ar->wow.wakeup_reason = ev.wake_reason;
 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wow wakeup host reason %s\n",
 		   wow_reason(ev.wake_reason));
 }
@@ -4258,34 +4578,58 @@
 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
 }
 
-static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
+static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id,
 				     u32 num_units, u32 unit_len)
 {
 	dma_addr_t paddr;
-	u32 pool_size;
+	u32 pool_size = 0;
 	int idx = ar->wmi.num_mem_chunks;
+	void *vaddr = NULL;
 
-	pool_size = num_units * round_up(unit_len, 4);
+	if (ar->wmi.num_mem_chunks == ARRAY_SIZE(ar->wmi.mem_chunks))
+		return -ENOMEM;
 
+	while (!vaddr && num_units) {
+		pool_size = num_units * round_up(unit_len, 4);
 	if (!pool_size)
 		return -EINVAL;
 
-	ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev,
-							   pool_size,
-							   &paddr,
-							   GFP_KERNEL);
-	if (!ar->wmi.mem_chunks[idx].vaddr) {
-		ath10k_warn(ar, "failed to allocate memory chunk\n");
-		return -ENOMEM;
+		vaddr = kzalloc(pool_size, GFP_KERNEL | __GFP_NOWARN);
+		if (!vaddr)
+			num_units /= 2;
 	}
 
-	memset(ar->wmi.mem_chunks[idx].vaddr, 0, pool_size);
+	if (!num_units)
+		return -ENOMEM;
+
+	paddr = dma_map_single(ar->dev, vaddr, pool_size, DMA_TO_DEVICE);
+	if (dma_mapping_error(ar->dev, paddr)) {
+		kfree(vaddr);
+		return -ENOMEM;
+	}
 
+	ar->wmi.mem_chunks[idx].vaddr = vaddr;
 	ar->wmi.mem_chunks[idx].paddr = paddr;
 	ar->wmi.mem_chunks[idx].len = pool_size;
 	ar->wmi.mem_chunks[idx].req_id = req_id;
 	ar->wmi.num_mem_chunks++;
 
+	return num_units;
+}
+
+static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
+				     u32 num_units, u32 unit_len)
+{
+	int ret;
+
+	while (num_units) {
+		ret = ath10k_wmi_alloc_chunk(ar, req_id, num_units, unit_len);
+		if (ret < 0)
+			return ret;
+
+		num_units -= ret;
+	}
+
 	return 0;
 }
 
@@ -4450,10 +4794,6 @@
 	ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
 			arg.service_map, arg.service_map_len);
 
-	/* only manually set fw features when not using FW IE format */
-	if (ar->fw_api == 1 && ar->fw_version_build > 636)
-		set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
-
 	if (ar->num_rf_chains > ar->max_spatial_stream) {
 		ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
 			    ar->num_rf_chains, ar->max_spatial_stream);
@@ -4483,10 +4823,16 @@
 	}
 
 	if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) {
-		ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX +
-				    TARGET_10_4_NUM_VDEVS;
+		if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+			     ar->running_fw->fw_file.fw_features))
+			ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC +
+					       ar->max_num_vdevs;
+		else
 		ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS +
-				       TARGET_10_4_NUM_VDEVS;
+					       ar->max_num_vdevs;
+
+		ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX +
+				    ar->max_num_vdevs;
 		ar->num_tids = ar->num_active_peers * 2;
 		ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX;
 	}
@@ -4600,6 +4946,50 @@
 	return 0;
 }
 
+static int ath10k_wmi_op_pull_echo_ev(struct ath10k *ar,
+				      struct sk_buff *skb,
+				      struct wmi_echo_ev_arg *arg)
+{
+	struct wmi_echo_event *ev = (void *)skb->data;
+
+	arg->value = ev->value;
+
+	return 0;
+}
+
+void
+ath10k_generate_mac_addr_auto(struct ath10k *ar, struct wmi_rdy_ev_arg *arg)
+{
+	unsigned int soc_serial_num;
+	u8 bdata_mac_addr[ETH_ALEN];
+	u8 udef_mac_addr[] = {0x00, 0x0A, 0xF5, 0x00, 0x00, 0x00};
+
+	soc_serial_num = socinfo_get_serial_number();
+	if (!soc_serial_num)
+		return;
+
+	if (arg->mac_addr) {
+		ether_addr_copy(ar->base_mac_addr, arg->mac_addr);
+		ether_addr_copy(bdata_mac_addr, arg->mac_addr);
+		soc_serial_num &= 0x00ffffff;
+		bdata_mac_addr[3] = (soc_serial_num >> 16) & 0xff;
+		bdata_mac_addr[4] = (soc_serial_num >> 8) & 0xff;
+		bdata_mac_addr[5] = soc_serial_num & 0xff;
+		ether_addr_copy(ar->mac_addr, bdata_mac_addr);
+	} else {
+		/* If mac address not encoded in wlan board data,
+		 * Auto-generate mac address using device serial
+		 * number and user defined mac address 'udef_mac_addr'.
+		 */
+		udef_mac_addr[3] = (soc_serial_num >> 16) & 0xff;
+		udef_mac_addr[4] = (soc_serial_num >> 8) & 0xff;
+		udef_mac_addr[5] = soc_serial_num & 0xff;
+		ether_addr_copy(ar->base_mac_addr, udef_mac_addr);
+		udef_mac_addr[2] = (soc_serial_num >> 24) & 0xff;
+		ether_addr_copy(ar->mac_addr, udef_mac_addr);
+	}
+}
+
 int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
 {
 	struct wmi_rdy_ev_arg arg = {};
@@ -4618,7 +5008,11 @@
 		   arg.mac_addr,
 		   __le32_to_cpu(arg.status));
 
+	if (QCA_REV_WCN3990(ar))
+		ath10k_generate_mac_addr_auto(ar, &arg);
+	else
 	ether_addr_copy(ar->mac_addr, arg.mac_addr);
+
 	complete(&ar->wmi.unified_ready);
 	return 0;
 }
@@ -4635,6 +5029,58 @@
 	return 0;
 }
 
+static int ath10k_wmi_event_pdev_bss_chan_info(struct ath10k *ar,
+					       struct sk_buff *skb)
+{
+	struct wmi_pdev_bss_chan_info_event *ev;
+	struct survey_info *survey;
+	u64 busy, total, tx, rx, rx_bss;
+	u32 freq, noise_floor;
+	u32 cc_freq_hz = ar->hw_params.channel_counters_freq_hz;
+	int idx;
+
+	ev = (struct wmi_pdev_bss_chan_info_event *)skb->data;
+	if (WARN_ON(skb->len < sizeof(*ev)))
+		return -EPROTO;
+
+	freq        = __le32_to_cpu(ev->freq);
+	noise_floor = __le32_to_cpu(ev->noise_floor);
+	busy        = __le64_to_cpu(ev->cycle_busy);
+	total       = __le64_to_cpu(ev->cycle_total);
+	tx          = __le64_to_cpu(ev->cycle_tx);
+	rx          = __le64_to_cpu(ev->cycle_rx);
+	rx_bss      = __le64_to_cpu(ev->cycle_rx_bss);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi event pdev bss chan info:\n freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
+		   freq, noise_floor, busy, total, tx, rx, rx_bss);
+
+	spin_lock_bh(&ar->data_lock);
+	idx = freq_to_idx(ar, freq);
+	if (idx >= ARRAY_SIZE(ar->survey)) {
+		ath10k_warn(ar, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
+			    freq, idx);
+		goto exit;
+	}
+
+	survey = &ar->survey[idx];
+
+	survey->noise     = noise_floor;
+	survey->time      = div_u64(total, cc_freq_hz);
+	survey->time_busy = div_u64(busy, cc_freq_hz);
+	survey->time_rx   = div_u64(rx_bss, cc_freq_hz);
+	survey->time_tx   = div_u64(tx, cc_freq_hz);
+	survey->filled   |= (SURVEY_INFO_NOISE_DBM |
+			     SURVEY_INFO_TIME |
+			     SURVEY_INFO_TIME_BUSY |
+			     SURVEY_INFO_TIME_RX |
+			     SURVEY_INFO_TIME_TX);
+exit:
+	spin_unlock_bh(&ar->data_lock);
+	complete(&ar->bss_survey_done);
+	return 0;
+}
+
 static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
 {
 	struct wmi_cmd_hdr *cmd_hdr;
@@ -4880,6 +5326,7 @@
 {
 	struct wmi_cmd_hdr *cmd_hdr;
 	enum wmi_10_2_event_id id;
+	bool consumed;
 
 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
@@ -4889,6 +5336,18 @@
 
 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
 
+	consumed = ath10k_tm_event_wmi(ar, id, skb);
+
+	/* Ready event must be handled normally also in UTF mode so that we
+	 * know the UTF firmware has booted, others we are just bypass WMI
+	 * events to testmode.
+	 */
+	if (consumed && id != WMI_10_2_READY_EVENTID) {
+		ath10k_dbg(ar, ATH10K_DBG_WMI,
+			   "wmi testmode consumed 0x%x\n", id);
+		goto out;
+	}
+
 	switch (id) {
 	case WMI_10_2_MGMT_RX_EVENTID:
 		ath10k_wmi_event_mgmt_rx(ar, skb);
@@ -4978,6 +5437,9 @@
 	case WMI_10_2_PDEV_TEMPERATURE_EVENTID:
 		ath10k_wmi_event_temperature(ar, skb);
 		break;
+	case WMI_10_2_PDEV_BSS_CHAN_INFO_EVENTID:
+		ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
+		break;
 	case WMI_10_2_RTT_KEEPALIVE_EVENTID:
 	case WMI_10_2_GPIO_INPUT_EVENTID:
 	case WMI_10_2_PEER_RATECODE_LIST_EVENTID:
@@ -5001,6 +5463,7 @@
 {
 	struct wmi_cmd_hdr *cmd_hdr;
 	enum wmi_10_4_event_id id;
+	bool consumed;
 
 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
@@ -5010,6 +5473,18 @@
 
 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
 
+	consumed = ath10k_tm_event_wmi(ar, id, skb);
+
+	/* Ready event must be handled normally also in UTF mode so that we
+	 * know the UTF firmware has booted, others we are just bypass WMI
+	 * events to testmode.
+	 */
+	if (consumed && id != WMI_10_4_READY_EVENTID) {
+		ath10k_dbg(ar, ATH10K_DBG_WMI,
+			   "wmi testmode consumed 0x%x\n", id);
+		goto out;
+	}
+
 	switch (id) {
 	case WMI_10_4_MGMT_RX_EVENTID:
 		ath10k_wmi_event_mgmt_rx(ar, skb);
@@ -5039,6 +5514,9 @@
 	case WMI_10_4_PEER_STA_KICKOUT_EVENTID:
 		ath10k_wmi_event_peer_sta_kickout(ar, skb);
 		break;
+	case WMI_10_4_ROAM_EVENTID:
+		ath10k_wmi_event_roam(ar, skb);
+		break;
 	case WMI_10_4_HOST_SWBA_EVENTID:
 		ath10k_wmi_event_host_swba(ar, skb);
 		break;
@@ -5055,12 +5533,20 @@
 		ath10k_wmi_event_vdev_stopped(ar, skb);
 		break;
 	case WMI_10_4_WOW_WAKEUP_HOST_EVENTID:
+	case WMI_10_4_PEER_RATECODE_LIST_EVENTID:
+	case WMI_10_4_WDS_PEER_EVENTID:
 		ath10k_dbg(ar, ATH10K_DBG_WMI,
 			   "received event id %d not implemented\n", id);
 		break;
 	case WMI_10_4_UPDATE_STATS_EVENTID:
 		ath10k_wmi_event_update_stats(ar, skb);
 		break;
+	case WMI_10_4_PDEV_TEMPERATURE_EVENTID:
+		ath10k_wmi_event_temperature(ar, skb);
+		break;
+	case WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID:
+		ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
+		break;
 	default:
 		ath10k_warn(ar, "Unknown eventid: %d\n", id);
 		break;
@@ -5379,9 +5865,16 @@
 	u32 len, val, features;
 
 	config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
-	config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
 	config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
+
+	if (ath10k_peer_stats_enabled(ar)) {
+		config.num_peers = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_PEERS);
+		config.num_tids = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_TIDS);
+	} else {
+		config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
 	config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
+	}
+
 	config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
 	config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
 	config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
@@ -5431,8 +5924,17 @@
 	cmd = (struct wmi_init_cmd_10_2 *)buf->data;
 
 	features = WMI_10_2_RX_BATCH_MODE;
-	if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
+
+	if (test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) &&
+	    test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
 		features |= WMI_10_2_COEX_GPIO;
+
+	if (ath10k_peer_stats_enabled(ar))
+		features |= WMI_10_2_PEER_STATS;
+
+	if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map))
+		features |= WMI_10_2_BSS_CHAN_INFO;
+
 	cmd->resource_config.feature_mask = __cpu_to_le32(features);
 
 	memcpy(&cmd->resource_config.common, &config, sizeof(config));
@@ -5459,8 +5961,8 @@
 			__cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS);
 	config.num_peer_keys  = __cpu_to_le32(TARGET_10_4_NUM_PEER_KEYS);
 	config.ast_skid_limit = __cpu_to_le32(TARGET_10_4_AST_SKID_LIMIT);
-	config.tx_chain_mask  = __cpu_to_le32(TARGET_10_4_TX_CHAIN_MASK);
-	config.rx_chain_mask  = __cpu_to_le32(TARGET_10_4_RX_CHAIN_MASK);
+	config.tx_chain_mask  = __cpu_to_le32(ar->hw_params.tx_chain_mask);
+	config.rx_chain_mask  = __cpu_to_le32(ar->hw_params.rx_chain_mask);
 
 	config.rx_timeout_pri[0] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
 	config.rx_timeout_pri[1] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
@@ -5491,7 +5993,7 @@
 	config.vow_config = __cpu_to_le32(TARGET_10_4_VOW_CONFIG);
 	config.gtk_offload_max_vdev =
 			__cpu_to_le32(TARGET_10_4_GTK_OFFLOAD_MAX_VDEV);
-	config.num_msdu_desc = __cpu_to_le32(TARGET_10_4_NUM_MSDU_DESC);
+	config.num_msdu_desc = __cpu_to_le32(ar->htt.max_num_pending_tx);
 	config.max_frag_entries = __cpu_to_le32(TARGET_10_4_11AC_TX_MAX_FRAGS);
 	config.max_peer_ext_stats =
 			__cpu_to_le32(TARGET_10_4_MAX_PEER_EXT_STATS);
@@ -5651,9 +6153,8 @@
 		bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
 
 		for (i = 0; i < arg->n_bssids; i++)
-			memcpy(&bssids->bssid_list[i],
-			       arg->bssids[i].bssid,
-			       ETH_ALEN);
+			ether_addr_copy(bssids->bssid_list[i].addr,
+					arg->bssids[i].bssid);
 
 		ptr += sizeof(*bssids);
 		ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids;
@@ -5746,6 +6247,8 @@
 		| WMI_SCAN_EVENT_BSS_CHANNEL
 		| WMI_SCAN_EVENT_FOREIGN_CHANNEL
 		| WMI_SCAN_EVENT_DEQUEUED;
+	if (QCA_REV_WCN3990(ar))
+		arg->scan_ctrl_flags = ar->fw_flags->flags;
 	arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
 	arg->n_bssids = 1;
 	arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
@@ -6328,6 +6831,16 @@
 	cmd->info0 = __cpu_to_le32(info0);
 }
 
+static void
+ath10k_wmi_peer_assoc_fill_10_4(struct ath10k *ar, void *buf,
+				const struct wmi_peer_assoc_complete_arg *arg)
+{
+	struct wmi_10_4_peer_assoc_complete_cmd *cmd = buf;
+
+	ath10k_wmi_peer_assoc_fill_10_2(ar, buf, arg);
+	cmd->peer_bw_rxnss_override = 0;
+}
+
 static int
 ath10k_wmi_peer_assoc_check_arg(const struct wmi_peer_assoc_complete_arg *arg)
 {
@@ -6417,6 +6930,31 @@
 }
 
 static struct sk_buff *
+ath10k_wmi_10_4_op_gen_peer_assoc(struct ath10k *ar,
+				  const struct wmi_peer_assoc_complete_arg *arg)
+{
+	size_t len = sizeof(struct wmi_10_4_peer_assoc_complete_cmd);
+	struct sk_buff *skb;
+	int ret;
+
+	ret = ath10k_wmi_peer_assoc_check_arg(arg);
+	if (ret)
+		return ERR_PTR(ret);
+
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ath10k_wmi_peer_assoc_fill_10_4(ar, skb->data, arg);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi peer assoc vdev %d addr %pM (%s)\n",
+		   arg->vdev_id, arg->addr,
+		   arg->peer_reassoc ? "reassociate" : "new");
+	return skb;
+}
+
+static struct sk_buff *
 ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar)
 {
 	struct sk_buff *skb;
@@ -6429,6 +6967,26 @@
 	return skb;
 }
 
+static struct sk_buff *
+ath10k_wmi_10_2_op_gen_pdev_bss_chan_info(struct ath10k *ar,
+					  enum wmi_bss_survey_req_type type)
+{
+	struct wmi_pdev_chan_info_req_cmd *cmd;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_pdev_chan_info_req_cmd *)skb->data;
+	cmd->type = __cpu_to_le32(type);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi pdev bss info request type %d\n", type);
+
+	return skb;
+}
+
 /* This function assumes the beacon is already DMA mapped */
 static struct sk_buff *
 ath10k_wmi_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn,
@@ -6536,7 +7094,7 @@
 }
 
 static struct sk_buff *
-ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
+ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
 			     u32 log_level)
 {
 	struct wmi_dbglog_cfg_cmd *cmd;
@@ -6574,6 +7132,44 @@
 }
 
 static struct sk_buff *
+ath10k_wmi_10_4_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
+				  u32 log_level)
+{
+	struct wmi_10_4_dbglog_cfg_cmd *cmd;
+	struct sk_buff *skb;
+	u32 cfg;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_10_4_dbglog_cfg_cmd *)skb->data;
+
+	if (module_enable) {
+		cfg = SM(log_level,
+			 ATH10K_DBGLOG_CFG_LOG_LVL);
+	} else {
+		/* set back defaults, all modules with WARN level */
+		cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
+			 ATH10K_DBGLOG_CFG_LOG_LVL);
+		module_enable = ~0;
+	}
+
+	cmd->module_enable = __cpu_to_le64(module_enable);
+	cmd->module_valid = __cpu_to_le64(~0);
+	cmd->config_enable = __cpu_to_le32(cfg);
+	cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi dbglog cfg modules 0x%016llx 0x%016llx config %08x %08x\n",
+		   __le64_to_cpu(cmd->module_enable),
+		   __le64_to_cpu(cmd->module_valid),
+		   __le32_to_cpu(cmd->config_enable),
+		   __le32_to_cpu(cmd->config_valid));
+	return skb;
+}
+
+static struct sk_buff *
 ath10k_wmi_op_gen_pktlog_enable(struct ath10k *ar, u32 ev_bitmap)
 {
 	struct wmi_pdev_pktlog_enable_cmd *cmd;
@@ -7007,6 +7603,9 @@
 			"Peer TX rate", peer->peer_tx_rate);
 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
 			"Peer RX rate", peer->peer_rx_rate);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			"Peer RX duration", peer->rx_duration);
+
 	len += scnprintf(buf + len, buf_len - len, "\n");
 	*length = len;
 }
@@ -7232,6 +7831,135 @@
 		buf[len] = 0;
 }
 
+int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar,
+				   enum wmi_vdev_subtype subtype)
+{
+	switch (subtype) {
+	case WMI_VDEV_SUBTYPE_NONE:
+		return WMI_VDEV_SUBTYPE_LEGACY_NONE;
+	case WMI_VDEV_SUBTYPE_P2P_DEVICE:
+		return WMI_VDEV_SUBTYPE_LEGACY_P2P_DEV;
+	case WMI_VDEV_SUBTYPE_P2P_CLIENT:
+		return WMI_VDEV_SUBTYPE_LEGACY_P2P_CLI;
+	case WMI_VDEV_SUBTYPE_P2P_GO:
+		return WMI_VDEV_SUBTYPE_LEGACY_P2P_GO;
+	case WMI_VDEV_SUBTYPE_PROXY_STA:
+		return WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA;
+	case WMI_VDEV_SUBTYPE_MESH_11S:
+	case WMI_VDEV_SUBTYPE_MESH_NON_11S:
+		return -ENOTSUPP;
+	}
+	return -ENOTSUPP;
+}
+
+static int ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k *ar,
+						 enum wmi_vdev_subtype subtype)
+{
+	switch (subtype) {
+	case WMI_VDEV_SUBTYPE_NONE:
+		return WMI_VDEV_SUBTYPE_10_2_4_NONE;
+	case WMI_VDEV_SUBTYPE_P2P_DEVICE:
+		return WMI_VDEV_SUBTYPE_10_2_4_P2P_DEV;
+	case WMI_VDEV_SUBTYPE_P2P_CLIENT:
+		return WMI_VDEV_SUBTYPE_10_2_4_P2P_CLI;
+	case WMI_VDEV_SUBTYPE_P2P_GO:
+		return WMI_VDEV_SUBTYPE_10_2_4_P2P_GO;
+	case WMI_VDEV_SUBTYPE_PROXY_STA:
+		return WMI_VDEV_SUBTYPE_10_2_4_PROXY_STA;
+	case WMI_VDEV_SUBTYPE_MESH_11S:
+		return WMI_VDEV_SUBTYPE_10_2_4_MESH_11S;
+	case WMI_VDEV_SUBTYPE_MESH_NON_11S:
+		return -ENOTSUPP;
+	}
+	return -ENOTSUPP;
+}
+
+static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar,
+					       enum wmi_vdev_subtype subtype)
+{
+	switch (subtype) {
+	case WMI_VDEV_SUBTYPE_NONE:
+		return WMI_VDEV_SUBTYPE_10_4_NONE;
+	case WMI_VDEV_SUBTYPE_P2P_DEVICE:
+		return WMI_VDEV_SUBTYPE_10_4_P2P_DEV;
+	case WMI_VDEV_SUBTYPE_P2P_CLIENT:
+		return WMI_VDEV_SUBTYPE_10_4_P2P_CLI;
+	case WMI_VDEV_SUBTYPE_P2P_GO:
+		return WMI_VDEV_SUBTYPE_10_4_P2P_GO;
+	case WMI_VDEV_SUBTYPE_PROXY_STA:
+		return WMI_VDEV_SUBTYPE_10_4_PROXY_STA;
+	case WMI_VDEV_SUBTYPE_MESH_11S:
+		return WMI_VDEV_SUBTYPE_10_4_MESH_11S;
+	case WMI_VDEV_SUBTYPE_MESH_NON_11S:
+		return WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S;
+	}
+	return -ENOTSUPP;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar,
+				    enum wmi_host_platform_type type,
+				    u32 fw_feature_bitmap)
+{
+	struct wmi_ext_resource_config_10_4_cmd *cmd;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_ext_resource_config_10_4_cmd *)skb->data;
+	cmd->host_platform_config = __cpu_to_le32(type);
+	cmd->fw_feature_bitmap = __cpu_to_le32(fw_feature_bitmap);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi ext resource config host type %d firmware feature bitmap %08x\n",
+		   type, fw_feature_bitmap);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_echo(struct ath10k *ar, u32 value)
+{
+	struct wmi_echo_cmd *cmd;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_echo_cmd *)skb->data;
+	cmd->value = cpu_to_le32(value);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi echo value 0x%08x\n", value);
+	return skb;
+}
+
+int
+ath10k_wmi_barrier(struct ath10k *ar)
+{
+	int ret;
+	int time_left;
+
+	spin_lock_bh(&ar->data_lock);
+	reinit_completion(&ar->wmi.barrier);
+	spin_unlock_bh(&ar->data_lock);
+
+	ret = ath10k_wmi_echo(ar, ATH10K_WMI_BARRIER_ECHO_ID);
+	if (ret) {
+		ath10k_warn(ar, "failed to submit wmi echo: %d\n", ret);
+		return ret;
+	}
+
+	time_left = wait_for_completion_timeout(&ar->wmi.barrier,
+						ATH10K_WMI_BARRIER_TIMEOUT_HZ);
+	if (!time_left)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
 static const struct wmi_ops wmi_ops = {
 	.rx = ath10k_wmi_op_rx,
 	.map_svc = wmi_main_svc_map,
@@ -7248,6 +7976,7 @@
 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
 	.pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+	.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
 
 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -7291,6 +8020,8 @@
 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
 	.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
+	.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
+	.gen_echo = ath10k_wmi_op_gen_echo,
 	/* .gen_bcn_tmpl not implemented */
 	/* .gen_prb_tmpl not implemented */
 	/* .gen_p2p_go_bcn_ie not implemented */
@@ -7320,6 +8051,7 @@
 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+	.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
 
 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -7358,6 +8090,8 @@
 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
 	.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
+	.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
+	.gen_echo = ath10k_wmi_op_gen_echo,
 	/* .gen_bcn_tmpl not implemented */
 	/* .gen_prb_tmpl not implemented */
 	/* .gen_p2p_go_bcn_ie not implemented */
@@ -7377,6 +8111,7 @@
 	.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
 	.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
 	.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
+	.gen_echo = ath10k_wmi_op_gen_echo,
 
 	.pull_scan = ath10k_wmi_op_pull_scan_ev,
 	.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
@@ -7388,6 +8123,7 @@
 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+	.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
 
 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -7426,6 +8162,7 @@
 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
 	.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
+	.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
 	/* .gen_pdev_enable_adaptive_cca not implemented */
 };
 
@@ -7435,23 +8172,26 @@
 	.gen_init = ath10k_wmi_10_2_op_gen_init,
 	.gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
 	.gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
+	.gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
 
 	/* shared with 10.1 */
 	.map_svc = wmi_10x_svc_map,
 	.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
 	.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
 	.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
+	.gen_echo = ath10k_wmi_op_gen_echo,
 
 	.pull_scan = ath10k_wmi_op_pull_scan_ev,
 	.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
 	.pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
-	.pull_swba = ath10k_wmi_op_pull_swba_ev,
+	.pull_swba = ath10k_wmi_10_2_4_op_pull_swba_ev,
 	.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+	.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
 
 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -7492,6 +8232,7 @@
 	.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
 	.gen_pdev_enable_adaptive_cca =
 		ath10k_wmi_op_gen_pdev_enable_adaptive_cca,
+	.get_vdev_subtype = ath10k_wmi_10_2_4_op_get_vdev_subtype,
 	/* .gen_bcn_tmpl not implemented */
 	/* .gen_prb_tmpl not implemented */
 	/* .gen_p2p_go_bcn_ie not implemented */
@@ -7513,6 +8254,7 @@
 	.pull_phyerr = ath10k_wmi_10_4_op_pull_phyerr_ev,
 	.pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
 	.get_txbf_conf_scheme = ath10k_wmi_10_4_txbf_conf_scheme,
 
 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
@@ -7536,6 +8278,7 @@
 	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
 	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
 	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+	.gen_peer_assoc = ath10k_wmi_10_4_op_gen_peer_assoc,
 	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
 	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
 	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
@@ -7544,7 +8287,7 @@
 	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
 	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
 	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
-	.gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+	.gen_dbglog_cfg = ath10k_wmi_10_4_op_gen_dbglog_cfg,
 	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
 	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
 	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
@@ -7553,44 +8296,54 @@
 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
 	.fw_stats_fill = ath10k_wmi_10_4_op_fw_stats_fill,
+	.ext_resource_config = ath10k_wmi_10_4_ext_resource_config,
 
 	/* shared with 10.2 */
-	.gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
+	.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
 	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
+	.gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
+	.get_vdev_subtype = ath10k_wmi_10_4_op_get_vdev_subtype,
+	.gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
+	.gen_echo = ath10k_wmi_op_gen_echo,
 };
 
 int ath10k_wmi_attach(struct ath10k *ar)
 {
-	switch (ar->wmi.op_version) {
+	switch (ar->running_fw->fw_file.wmi_op_version) {
 	case ATH10K_FW_WMI_OP_VERSION_10_4:
 		ar->wmi.ops = &wmi_10_4_ops;
 		ar->wmi.cmd = &wmi_10_4_cmd_map;
 		ar->wmi.vdev_param = &wmi_10_4_vdev_param_map;
 		ar->wmi.pdev_param = &wmi_10_4_pdev_param_map;
+		ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
 		break;
 	case ATH10K_FW_WMI_OP_VERSION_10_2_4:
 		ar->wmi.cmd = &wmi_10_2_4_cmd_map;
 		ar->wmi.ops = &wmi_10_2_4_ops;
 		ar->wmi.vdev_param = &wmi_10_2_4_vdev_param_map;
 		ar->wmi.pdev_param = &wmi_10_2_4_pdev_param_map;
+		ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
 		break;
 	case ATH10K_FW_WMI_OP_VERSION_10_2:
 		ar->wmi.cmd = &wmi_10_2_cmd_map;
 		ar->wmi.ops = &wmi_10_2_ops;
 		ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
 		ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
+		ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
 		break;
 	case ATH10K_FW_WMI_OP_VERSION_10_1:
 		ar->wmi.cmd = &wmi_10x_cmd_map;
 		ar->wmi.ops = &wmi_10_1_ops;
 		ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
 		ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
+		ar->wmi.peer_flags = &wmi_10x_peer_flags_map;
 		break;
 	case ATH10K_FW_WMI_OP_VERSION_MAIN:
 		ar->wmi.cmd = &wmi_cmd_map;
 		ar->wmi.ops = &wmi_ops;
 		ar->wmi.vdev_param = &wmi_vdev_param_map;
 		ar->wmi.pdev_param = &wmi_pdev_param_map;
+		ar->wmi.peer_flags = &wmi_peer_flags_map;
 		break;
 	case ATH10K_FW_WMI_OP_VERSION_TLV:
 		ath10k_wmi_tlv_attach(ar);
@@ -7598,15 +8351,21 @@
 	case ATH10K_FW_WMI_OP_VERSION_UNSET:
 	case ATH10K_FW_WMI_OP_VERSION_MAX:
 		ath10k_err(ar, "unsupported WMI op version: %d\n",
-			   ar->wmi.op_version);
+			   ar->running_fw->fw_file.wmi_op_version);
 		return -EINVAL;
 	}
 
 	init_completion(&ar->wmi.service_ready);
 	init_completion(&ar->wmi.unified_ready);
+	init_completion(&ar->wmi.barrier);
 
 	INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work);
 
+	if (QCA_REV_WCN3990(ar)) {
+		spin_lock_init(&ar->wmi.mgmt_tx_lock);
+		idr_init(&ar->wmi.mgmt_pending_tx);
+	}
+
 	return 0;
 }
 
@@ -7616,17 +8375,42 @@
 
 	/* free the host memory chunks requested by firmware */
 	for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
-		dma_free_coherent(ar->dev,
+		dma_unmap_single(ar->dev,
+				 ar->wmi.mem_chunks[i].paddr,
 				  ar->wmi.mem_chunks[i].len,
-				  ar->wmi.mem_chunks[i].vaddr,
-				  ar->wmi.mem_chunks[i].paddr);
+				 DMA_TO_DEVICE);
+		kfree(ar->wmi.mem_chunks[i].vaddr);
 	}
 
 	ar->wmi.num_mem_chunks = 0;
 }
 
+static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr,
+					       void *ctx)
+{
+	struct ath10k_mgmt_tx_pkt_addr *pkt_addr = ptr;
+	struct ath10k *ar = ctx;
+	struct sk_buff *msdu;
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "force cleanup mgmt msdu_id %hu\n", msdu_id);
+
+	msdu = pkt_addr->vaddr;
+	dma_unmap_single(ar->dev, pkt_addr->paddr,
+			 msdu->len, DMA_FROM_DEVICE);
+	ieee80211_free_txskb(ar->hw, msdu);
+
+	return 0;
+}
+
 void ath10k_wmi_detach(struct ath10k *ar)
 {
+	if (QCA_REV_WCN3990(ar)) {
+		idr_for_each(&ar->wmi.mgmt_pending_tx,
+			     ath10k_wmi_mgmt_tx_clean_up_pending, ar);
+		idr_destroy(&ar->wmi.mgmt_pending_tx);
+	}
+
 	cancel_work_sync(&ar->svc_rdy_work);
 
 	if (ar->svc_rdy_skb)
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/wmi.h linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/wmi.h
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/wmi.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/wmi.h	2019-10-29 09:26:24.465211222 +0100
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2013, 2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -20,6 +20,9 @@
 
 #include <linux/types.h>
 #include <net/mac80211.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <linux/in.h>
 
 /*
  * This file specifies the WMI interface for the Unified Software
@@ -55,7 +58,7 @@
  *    type.
  *
  * 6. Comment each parameter part of the WMI command/event structure by
- *    using the 2 stars at the begining of C comment instead of one star to
+ *    using the 2 stars at the beginning of C comment instead of one star to
  *    enable HTML document generation using Doxygen.
  *
  */
@@ -175,6 +178,16 @@
 	WMI_SERVICE_AUX_SPECTRAL_INTF,
 	WMI_SERVICE_AUX_CHAN_LOAD_INTF,
 	WMI_SERVICE_BSS_CHANNEL_INFO_64,
+	WMI_SERVICE_EXT_RES_CFG_SUPPORT,
+	WMI_SERVICE_MESH_11S,
+	WMI_SERVICE_MESH_NON_11S,
+	WMI_SERVICE_PEER_STATS,
+	WMI_SERVICE_RESTRT_CHNL_SUPPORT,
+	WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
+	WMI_SERVICE_TX_MODE_PUSH_ONLY,
+	WMI_SERVICE_TX_MODE_PUSH_PULL,
+	WMI_SERVICE_TX_MODE_DYNAMIC,
+	WMI_SERVICE_MGMT_TX_WMI,
 
 	/* keep last */
 	WMI_SERVICE_MAX,
@@ -206,6 +219,12 @@
 	WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
 	WMI_10X_SERVICE_ATF,
 	WMI_10X_SERVICE_COEX_GPIO,
+	WMI_10X_SERVICE_AUX_SPECTRAL_INTF,
+	WMI_10X_SERVICE_AUX_CHAN_LOAD_INTF,
+	WMI_10X_SERVICE_BSS_CHANNEL_INFO_64,
+	WMI_10X_SERVICE_MESH,
+	WMI_10X_SERVICE_EXT_RES_CFG_SUPPORT,
+	WMI_10X_SERVICE_PEER_STATS,
 };
 
 enum wmi_main_service {
@@ -286,6 +305,15 @@
 	WMI_10_4_SERVICE_AUX_SPECTRAL_INTF,
 	WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF,
 	WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64,
+	WMI_10_4_SERVICE_EXT_RES_CFG_SUPPORT,
+	WMI_10_4_SERVICE_MESH_NON_11S,
+	WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT,
+	WMI_10_4_SERVICE_PEER_STATS,
+	WMI_10_4_SERVICE_MESH_11S,
+	WMI_10_4_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
+	WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
+	WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
+	WMI_10_4_SERVICE_TX_MODE_DYNAMIC,
 };
 
 static inline char *wmi_service_name(int service_id)
@@ -375,6 +403,15 @@
 	SVCSTR(WMI_SERVICE_AUX_SPECTRAL_INTF);
 	SVCSTR(WMI_SERVICE_AUX_CHAN_LOAD_INTF);
 	SVCSTR(WMI_SERVICE_BSS_CHANNEL_INFO_64);
+	SVCSTR(WMI_SERVICE_EXT_RES_CFG_SUPPORT);
+	SVCSTR(WMI_SERVICE_MESH_11S);
+	SVCSTR(WMI_SERVICE_MESH_NON_11S);
+	SVCSTR(WMI_SERVICE_PEER_STATS);
+	SVCSTR(WMI_SERVICE_RESTRT_CHNL_SUPPORT);
+	SVCSTR(WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT);
+	SVCSTR(WMI_SERVICE_TX_MODE_PUSH_ONLY);
+	SVCSTR(WMI_SERVICE_TX_MODE_PUSH_PULL);
+	SVCSTR(WMI_SERVICE_TX_MODE_DYNAMIC);
 	default:
 		return NULL;
 	}
@@ -442,6 +479,18 @@
 	       WMI_SERVICE_ATF, len);
 	SVCMAP(WMI_10X_SERVICE_COEX_GPIO,
 	       WMI_SERVICE_COEX_GPIO, len);
+	SVCMAP(WMI_10X_SERVICE_AUX_SPECTRAL_INTF,
+	       WMI_SERVICE_AUX_SPECTRAL_INTF, len);
+	SVCMAP(WMI_10X_SERVICE_AUX_CHAN_LOAD_INTF,
+	       WMI_SERVICE_AUX_CHAN_LOAD_INTF, len);
+	SVCMAP(WMI_10X_SERVICE_BSS_CHANNEL_INFO_64,
+	       WMI_SERVICE_BSS_CHANNEL_INFO_64, len);
+	SVCMAP(WMI_10X_SERVICE_MESH,
+	       WMI_SERVICE_MESH_11S, len);
+	SVCMAP(WMI_10X_SERVICE_EXT_RES_CFG_SUPPORT,
+	       WMI_SERVICE_EXT_RES_CFG_SUPPORT, len);
+	SVCMAP(WMI_10X_SERVICE_PEER_STATS,
+	       WMI_SERVICE_PEER_STATS, len);
 }
 
 static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out,
@@ -600,6 +649,24 @@
 	       WMI_SERVICE_AUX_CHAN_LOAD_INTF, len);
 	SVCMAP(WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64,
 	       WMI_SERVICE_BSS_CHANNEL_INFO_64, len);
+	SVCMAP(WMI_10_4_SERVICE_EXT_RES_CFG_SUPPORT,
+	       WMI_SERVICE_EXT_RES_CFG_SUPPORT, len);
+	SVCMAP(WMI_10_4_SERVICE_MESH_NON_11S,
+	       WMI_SERVICE_MESH_NON_11S, len);
+	SVCMAP(WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT,
+	       WMI_SERVICE_RESTRT_CHNL_SUPPORT, len);
+	SVCMAP(WMI_10_4_SERVICE_PEER_STATS,
+	       WMI_SERVICE_PEER_STATS, len);
+	SVCMAP(WMI_10_4_SERVICE_MESH_11S,
+	       WMI_SERVICE_MESH_11S, len);
+	SVCMAP(WMI_10_4_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
+	       WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT, len);
+	SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
+	       WMI_SERVICE_TX_MODE_PUSH_ONLY, len);
+	SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
+	       WMI_SERVICE_TX_MODE_PUSH_PULL, len);
+	SVCMAP(WMI_10_4_SERVICE_TX_MODE_DYNAMIC,
+	       WMI_SERVICE_TX_MODE_DYNAMIC, len);
 }
 
 #undef SVCMAP
@@ -657,6 +724,7 @@
 	u32 bcn_filter_rx_cmdid;
 	u32 prb_req_filter_rx_cmdid;
 	u32 mgmt_tx_cmdid;
+	u32 mgmt_tx_send_cmdid;
 	u32 prb_tmpl_cmdid;
 	u32 addba_clear_resp_cmdid;
 	u32 addba_send_cmdid;
@@ -773,6 +841,7 @@
 	u32 set_cca_params_cmdid;
 	u32 pdev_bss_chan_info_request_cmdid;
 	u32 pdev_enable_adaptive_cca_cmdid;
+	u32 ext_resource_cfg_cmdid;
 };
 
 /*
@@ -1385,6 +1454,7 @@
 	WMI_10_2_MU_CAL_START_CMDID,
 	WMI_10_2_SET_LTEU_CONFIG_CMDID,
 	WMI_10_2_SET_CCA_PARAMS,
+	WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
 	WMI_10_2_PDEV_UTF_CMDID = WMI_10_2_END_CMDID - 1,
 };
 
@@ -1428,6 +1498,8 @@
 	WMI_10_2_WDS_PEER_EVENTID,
 	WMI_10_2_PEER_STA_PS_STATECHG_EVENTID,
 	WMI_10_2_PDEV_TEMPERATURE_EVENTID,
+	WMI_10_2_MU_REPORT_EVENTID,
+	WMI_10_2_PDEV_BSS_CHAN_INFO_EVENTID,
 	WMI_10_2_PDEV_UTF_EVENTID = WMI_10_2_END_EVENTID - 1,
 };
 
@@ -1576,6 +1648,9 @@
 	WMI_10_4_MU_CAL_START_CMDID,
 	WMI_10_4_SET_CCA_PARAMS_CMDID,
 	WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+	WMI_10_4_EXT_RESOURCE_CFG_CMDID,
+	WMI_10_4_VDEV_SET_IE_CMDID,
+	WMI_10_4_SET_LTEU_CONFIG_CMDID,
 	WMI_10_4_PDEV_UTF_CMDID = WMI_10_4_END_CMDID - 1,
 };
 
@@ -1638,6 +1713,7 @@
 	WMI_10_4_PDEV_TEMPERATURE_EVENTID,
 	WMI_10_4_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID,
 	WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID,
+	WMI_10_4_MU_REPORT_EVENTID,
 	WMI_10_4_PDEV_UTF_EVENTID = WMI_10_4_END_EVENTID - 1,
 };
 
@@ -1732,6 +1808,7 @@
 		__le32 reginfo1;
 		struct {
 			u8 antenna_max;
+			u8 max_tx_power;
 		} __packed;
 	} __packed;
 } __packed;
@@ -1771,7 +1848,6 @@
 #define WMI_CHANNEL_CHANGE_CAUSE_CSA (1 << 13)
 
 #define WMI_MAX_SPATIAL_STREAM        3 /* default max ss */
-#define WMI_10_4_MAX_SPATIAL_STREAM   4
 
 /* HT Capabilities*/
 #define WMI_HT_CAP_ENABLED                0x0001   /* HT Enabled/ disabled */
@@ -2016,7 +2092,7 @@
 	 * In offload mode target supports features like WOW, chatter and
 	 * other protocol offloads. In order to support them some
 	 * functionalities like reorder buffering, PN checking need to be
-	 * done in target. This determines maximum number of peers suported
+	 * done in target. This determines maximum number of peers supported
 	 * by target in offload mode
 	 */
 	__le32 num_offload_peers;
@@ -2197,7 +2273,7 @@
 	 * Max. number of Tx fragments per MSDU
 	 *  This parameter controls the max number of Tx fragments per MSDU.
 	 *  This is sent by the target as part of the WMI_SERVICE_READY event
-	 *  and is overriden by the OS shim as required.
+	 *  and is overridden by the OS shim as required.
 	 */
 	__le32 max_frag_entries;
 } __packed;
@@ -2379,7 +2455,7 @@
 	 * Max. number of Tx fragments per MSDU
 	 *  This parameter controls the max number of Tx fragments per MSDU.
 	 *  This is sent by the target as part of the WMI_SERVICE_READY event
-	 *  and is overriden by the OS shim as required.
+	 *  and is overridden by the OS shim as required.
 	 */
 	__le32 max_frag_entries;
 } __packed;
@@ -2388,6 +2464,8 @@
 	WMI_10_2_RX_BATCH_MODE = BIT(0),
 	WMI_10_2_ATF_CONFIG    = BIT(1),
 	WMI_10_2_COEX_GPIO     = BIT(3),
+	WMI_10_2_BSS_CHAN_INFO = BIT(6),
+	WMI_10_2_PEER_STATS    = BIT(7),
 };
 
 struct wmi_resource_config_10_2 {
@@ -2613,13 +2691,43 @@
 	 */
 	__le32 iphdr_pad_config;
 
-	/* qwrap configuration
+	/* qwrap configuration (bits 15-0)
 	 * 1  - This is qwrap configuration
 	 * 0  - This is not qwrap
+	 *
+	 * Bits 31-16 is alloc_frag_desc_for_data_pkt (1 enables, 0 disables)
+	 * In order to get ack-RSSI reporting and to specify the tx-rate for
+	 * individual frames, this option must be enabled.  This uses an extra
+	 * 4 bytes per tx-msdu descriptor, so don't enable it unless you need it.
 	 */
 	__le32 qwrap_config;
 } __packed;
 
+/**
+ * enum wmi_10_4_feature_mask - WMI 10.4 feature enable/disable flags
+ * @WMI_10_4_LTEU_SUPPORT: LTEU config
+ * @WMI_10_4_COEX_GPIO_SUPPORT: COEX GPIO config
+ * @WMI_10_4_AUX_RADIO_SPECTRAL_INTF: AUX Radio Enhancement for spectral scan
+ * @WMI_10_4_AUX_RADIO_CHAN_LOAD_INTF: AUX Radio Enhancement for chan load scan
+ * @WMI_10_4_BSS_CHANNEL_INFO_64: BSS channel info stats
+ * @WMI_10_4_PEER_STATS: Per station stats
+ */
+enum wmi_10_4_feature_mask {
+	WMI_10_4_LTEU_SUPPORT			= BIT(0),
+	WMI_10_4_COEX_GPIO_SUPPORT		= BIT(1),
+	WMI_10_4_AUX_RADIO_SPECTRAL_INTF	= BIT(2),
+	WMI_10_4_AUX_RADIO_CHAN_LOAD_INTF	= BIT(3),
+	WMI_10_4_BSS_CHANNEL_INFO_64		= BIT(4),
+	WMI_10_4_PEER_STATS			= BIT(5),
+};
+
+struct wmi_ext_resource_config_10_4_cmd {
+	/* contains enum wmi_host_platform_type */
+	__le32 host_platform_config;
+	/* see enum wmi_10_4_feature_mask */
+	__le32 fw_feature_bitmap;
+};
+
 /* strucutre describing host memory chunk. */
 struct host_memory_chunk {
 	/* id of the request that is passed up in service ready */
@@ -2641,7 +2749,7 @@
 	struct wmi_host_mem_chunks mem_chunks;
 } __packed;
 
-/* _10x stucture is from 10.X FW API */
+/* _10x structure is from 10.X FW API */
 struct wmi_init_cmd_10x {
 	struct wmi_resource_config_10x resource_config;
 	struct wmi_host_mem_chunks mem_chunks;
@@ -2779,6 +2887,66 @@
 	__le32 scan_ctrl_flags;
 } __packed;
 
+/* ARP-NS offload data structure */
+#define WMI_NS_ARP_OFFLOAD		2
+#define WMI_ARP_NS_OFF_FLAGS_VALID	BIT(0)
+#define WMI_IPV4_ARP_REPLY_OFFLOAD	0
+#define WMI_ARP_NS_OFFLOAD_DISABLE	0
+#define WMI_ARP_NS_OFFLOAD_ENABLE	1
+#define WMI_NSOFF_IPV6_ANYCAST		BIT(3)
+
+struct wmi_ns_offload_info {
+	struct in6_addr src_addr;
+	struct in6_addr self_addr[TARGET_NUM_STATIONS];
+	struct in6_addr target_addr[TARGET_NUM_STATIONS];
+	struct wmi_mac_addr self_macaddr;
+	u8 src_ipv6_addr_valid;
+	struct in6_addr target_addr_valid;
+	struct in6_addr target_ipv6_ac;
+	u8 slot_idx;
+} __packed;
+
+struct wmi_ns_arp_offload_req {
+	u8 offload_type;
+	u8 enable_offload;
+	__le32 num_ns_offload_count;
+	union {
+		struct in_addr ipv4_addr;
+		struct in6_addr ipv6_addr;
+	} params;
+	struct wmi_ns_offload_info info;
+	struct wmi_mac_addr bssid;
+} __packed;
+
+struct wmi_ns_offload {
+	__le32 flags;
+	struct in6_addr target_ipaddr[WMI_NS_ARP_OFFLOAD];
+	struct in6_addr solicitation_ipaddr;
+	struct in6_addr remote_ipaddr;
+	struct wmi_mac_addr target_mac;
+} __packed;
+
+struct wmi_arp_offload {
+	__le32 flags;
+	struct in_addr target_ipaddr;
+	struct in_addr remote_ipaddr;
+	struct wmi_mac_addr target_mac;
+} __packed;
+
+/* GTK offload data structure */
+#define WMI_GTK_OFFLOAD_ENABLE_OPCODE	BIT(24)
+#define WMI_GTK_OFFLOAD_DISABLE_OPCODE	BIT(25)
+#define WMI_GTK_OFFLOAD_ENABLE	1
+#define WMI_GTK_OFFLOAD_DISABLE	0
+
+struct wmi_gtk_rekey_data {
+	bool valid;
+	bool enable_offload;
+	u8 kck[NL80211_KCK_LEN];
+	u8 kek[NL80211_KEK_LEN];
+	__le64 replay_ctr;
+} __packed;
+
 struct wmi_start_scan_tlvs {
 	/* TLV parameters. These includes channel list, ssid list, bssid list,
 	 * extra ies.
@@ -2855,6 +3023,8 @@
 /* Different FW scan engine may choose to bail out on errors.
  * Allow the driver to have influence over that. */
 #define WMI_SCAN_CONTINUE_ON_ERROR 0x80
+/** add DS content in probe req frame */
+#define WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ   0x800
 
 /* WMI_SCAN_CLASS_MASK must be the same value as IEEE80211_SCAN_CLASS_MASK */
 #define WMI_SCAN_CLASS_MASK 0xFF000000
@@ -2990,11 +3160,17 @@
 	u8 buf[0];
 } __packed;
 
+struct wmi_mgmt_rx_ext_info {
+	__le64 rx_mac_timestamp;
+} __packed __aligned(4);
+
 #define WMI_RX_STATUS_OK			0x00
 #define WMI_RX_STATUS_ERR_CRC			0x01
 #define WMI_RX_STATUS_ERR_DECRYPT		0x08
 #define WMI_RX_STATUS_ERR_MIC			0x10
 #define WMI_RX_STATUS_ERR_KEY_CACHE_MISS	0x20
+/* Extension data at the end of mgmt frame */
+#define WMI_RX_STATUS_EXT_INFO		0x40
 
 #define PHY_ERROR_GEN_SPECTRAL_SCAN		0x26
 #define PHY_ERROR_GEN_FALSE_RADAR_EXT		0x24
@@ -3003,6 +3179,10 @@
 #define PHY_ERROR_10_4_RADAR_MASK               0x4
 #define PHY_ERROR_10_4_SPECTRAL_SCAN_MASK       0x4000000
 
+#define WMI_PHY_ERROR_MASK0_RADAR		BIT(2)
+#define WMI_PHY_ERROR_MASK0_FALSE_RADAR_EXT	BIT(24)
+#define WMI_PHY_ERROR_MASK0_SPECTRAL_SCAN	BIT(26)
+
 enum phy_err_type {
 	PHY_ERROR_UNKNOWN,
 	PHY_ERROR_SPECTRAL_SCAN,
@@ -3343,6 +3523,7 @@
 	u32 wapi_mbssid_offset;
 	u32 arp_srcaddr;
 	u32 arp_dstaddr;
+	u32 enable_btcoex;
 };
 
 #define WMI_PDEV_PARAM_UNSUPPORTED 0
@@ -3650,6 +3831,15 @@
 	WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
 	WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
 	WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
+	WMI_10_4_PDEV_PARAM_TXPOWER_DECR_DB,
+	WMI_10_4_PDEV_PARAM_RX_BATCHMODE,
+	WMI_10_4_PDEV_PARAM_PACKET_AGGR_DELAY,
+	WMI_10_4_PDEV_PARAM_ATF_OBSS_NOISE_SCH,
+	WMI_10_4_PDEV_PARAM_ATF_OBSS_NOISE_SCALING_FACTOR,
+	WMI_10_4_PDEV_PARAM_CUST_TXPOWER_SCALE,
+	WMI_10_4_PDEV_PARAM_ATF_DYNAMIC_ENABLE,
+	WMI_10_4_PDEV_PARAM_ATF_SSID_GROUP_POLICY,
+	WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX,
 };
 
 struct wmi_pdev_set_param_cmd {
@@ -3848,7 +4038,7 @@
 	/* illegal rate phy errors  */
 	__le32 illgl_rate_phy_err;
 
-	/* wal pdev continous xretry */
+	/* wal pdev continuous xretry */
 	__le32 pdev_cont_xretry;
 
 	/* wal pdev continous xretry */
@@ -4019,6 +4209,13 @@
 	WMI_STAT_VDEV_RATE = BIT(5),
 };
 
+enum wmi_10_4_stats_id {
+	WMI_10_4_STAT_PEER		= BIT(0),
+	WMI_10_4_STAT_AP		= BIT(1),
+	WMI_10_4_STAT_INST		= BIT(2),
+	WMI_10_4_STAT_PEER_EXTD		= BIT(3),
+};
+
 struct wlan_inst_rssi_args {
 	__le16 cfg_retry_count;
 	__le16 retry_count;
@@ -4096,10 +4293,10 @@
  */
 struct wmi_pdev_stats_base {
 	__le32 chan_nf;
-	__le32 tx_frame_count;
-	__le32 rx_frame_count;
-	__le32 rx_clear_count;
-	__le32 cycle_count;
+	__le32 tx_frame_count; /* Cycles spent transmitting frames */
+	__le32 rx_frame_count; /* Cycles spent receiving frames */
+	__le32 rx_clear_count; /* Total channel busy time, evidently */
+	__le32 cycle_count; /* Total on-channel time */
 	__le32 phy_err_count;
 	__le32 chan_tx_pwr;
 } __packed;
@@ -4192,7 +4389,13 @@
 
 struct wmi_10_2_4_peer_stats {
 	struct wmi_10_2_peer_stats common;
-	__le32 unknown_value; /* FIXME: what is this word? */
+	__le32 peer_rssi_changed;
+} __packed;
+
+struct wmi_10_2_4_ext_peer_stats {
+	struct wmi_10_2_peer_stats common;
+	__le32 peer_rssi_changed;
+	__le32 rx_duration;
 } __packed;
 
 struct wmi_10_4_peer_stats {
@@ -4212,6 +4415,27 @@
 	__le32 peer_rssi_changed;
 } __packed;
 
+struct wmi_10_4_peer_extd_stats {
+	struct wmi_mac_addr peer_macaddr;
+	__le32 inactive_time;
+	__le32 peer_chain_rssi;
+	__le32 rx_duration;
+	__le32 reserved[10];
+} __packed;
+
+struct wmi_10_4_bss_bcn_stats {
+	__le32 vdev_id;
+	__le32 bss_bcns_dropped;
+	__le32 bss_bcn_delivered;
+} __packed;
+
+struct wmi_10_4_bss_bcn_filter_stats {
+	__le32 bcns_dropped;
+	__le32 bcns_delivered;
+	__le32 active_filters;
+	struct wmi_10_4_bss_bcn_stats bss_stats;
+} __packed;
+
 struct wmi_10_2_pdev_ext_stats {
 	__le32 rx_rssi_comb;
 	__le32 rx_rssi[4];
@@ -4235,10 +4459,40 @@
 };
 
 enum wmi_vdev_subtype {
-	WMI_VDEV_SUBTYPE_NONE       = 0,
-	WMI_VDEV_SUBTYPE_P2P_DEVICE = 1,
-	WMI_VDEV_SUBTYPE_P2P_CLIENT = 2,
-	WMI_VDEV_SUBTYPE_P2P_GO     = 3,
+	WMI_VDEV_SUBTYPE_NONE,
+	WMI_VDEV_SUBTYPE_P2P_DEVICE,
+	WMI_VDEV_SUBTYPE_P2P_CLIENT,
+	WMI_VDEV_SUBTYPE_P2P_GO,
+	WMI_VDEV_SUBTYPE_PROXY_STA,
+	WMI_VDEV_SUBTYPE_MESH_11S,
+	WMI_VDEV_SUBTYPE_MESH_NON_11S,
+};
+
+enum wmi_vdev_subtype_legacy {
+	WMI_VDEV_SUBTYPE_LEGACY_NONE      = 0,
+	WMI_VDEV_SUBTYPE_LEGACY_P2P_DEV   = 1,
+	WMI_VDEV_SUBTYPE_LEGACY_P2P_CLI   = 2,
+	WMI_VDEV_SUBTYPE_LEGACY_P2P_GO    = 3,
+	WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA = 4,
+};
+
+enum wmi_vdev_subtype_10_2_4 {
+	WMI_VDEV_SUBTYPE_10_2_4_NONE      = 0,
+	WMI_VDEV_SUBTYPE_10_2_4_P2P_DEV   = 1,
+	WMI_VDEV_SUBTYPE_10_2_4_P2P_CLI   = 2,
+	WMI_VDEV_SUBTYPE_10_2_4_P2P_GO    = 3,
+	WMI_VDEV_SUBTYPE_10_2_4_PROXY_STA = 4,
+	WMI_VDEV_SUBTYPE_10_2_4_MESH_11S  = 5,
+};
+
+enum wmi_vdev_subtype_10_4 {
+	WMI_VDEV_SUBTYPE_10_4_NONE         = 0,
+	WMI_VDEV_SUBTYPE_10_4_P2P_DEV      = 1,
+	WMI_VDEV_SUBTYPE_10_4_P2P_CLI      = 2,
+	WMI_VDEV_SUBTYPE_10_4_P2P_GO       = 3,
+	WMI_VDEV_SUBTYPE_10_4_PROXY_STA    = 4,
+	WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S = 5,
+	WMI_VDEV_SUBTYPE_10_4_MESH_11S     = 6,
 };
 
 /* values for vdev_subtype */
@@ -4278,9 +4532,9 @@
 	__le32 flags;
 	/* ssid field. Only valid for AP/GO/IBSS/BTAmp VDEV type. */
 	struct wmi_ssid ssid;
-	/* beacon/probe reponse xmit rate. Applicable for SoftAP. */
+	/* beacon/probe response xmit rate. Applicable for SoftAP. */
 	__le32 bcn_tx_rate;
-	/* beacon/probe reponse xmit power. Applicable for SoftAP. */
+	/* beacon/probe response xmit power. Applicable for SoftAP. */
 	__le32 bcn_tx_power;
 	/* number of p2p NOA descriptor(s) from scan entry */
 	__le32 num_noa_descriptors;
@@ -4493,6 +4747,7 @@
 	u32 meru_vc;
 	u32 rx_decap_type;
 	u32 bw_nss_ratemask;
+	u32 set_tsf;
 };
 
 #define WMI_VDEV_PARAM_UNSUPPORTED 0
@@ -4507,7 +4762,7 @@
 	WMI_VDEV_PARAM_BEACON_INTERVAL,
 	/* Listen interval in TUs */
 	WMI_VDEV_PARAM_LISTEN_INTERVAL,
-	/* muticast rate in Mbps */
+	/* multicast rate in Mbps */
 	WMI_VDEV_PARAM_MULTICAST_RATE,
 	/* management frame rate in Mbps */
 	WMI_VDEV_PARAM_MGMT_TX_RATE,
@@ -4638,7 +4893,7 @@
 	WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
 	/* Listen interval in TUs */
 	WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
-	/* muticast rate in Mbps */
+	/* multicast rate in Mbps */
 	WMI_10X_VDEV_PARAM_MULTICAST_RATE,
 	/* management frame rate in Mbps */
 	WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
@@ -4749,6 +5004,7 @@
 	WMI_10X_VDEV_PARAM_RTS_FIXED_RATE,
 	WMI_10X_VDEV_PARAM_VHT_SGIMASK,
 	WMI_10X_VDEV_PARAM_VHT80_RATEMASK,
+	WMI_10X_VDEV_PARAM_TSF_INCREMENT,
 };
 
 enum wmi_10_4_vdev_param {
@@ -4818,6 +5074,12 @@
 	WMI_10_4_VDEV_PARAM_MERU_VC,
 	WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
 	WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
+	WMI_10_4_VDEV_PARAM_SENSOR_AP,
+	WMI_10_4_VDEV_PARAM_BEACON_RATE,
+	WMI_10_4_VDEV_PARAM_DTIM_ENABLE_CTS,
+	WMI_10_4_VDEV_PARAM_STA_KICKOUT,
+	WMI_10_4_VDEV_PARAM_CAPABILITIES,
+	WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
 };
 
 #define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
@@ -4876,7 +5138,7 @@
 } __packed;
 
 /* VDEV start response status codes */
-/* VDEV succesfully started */
+/* VDEV successfully started */
 #define WMI_INIFIED_VDEV_START_RESPONSE_STATUS_SUCCESS	0x0
 
 /* requested VDEV not found */
@@ -5192,7 +5454,7 @@
 #define WMI_UAPSD_AC_TYPE_TRIG 1
 
 #define WMI_UAPSD_AC_BIT_MASK(ac, type) \
-	((type ==  WMI_UAPSD_AC_TYPE_DELI) ? (1<<(ac<<1)) : (1<<((ac<<1)+1)))
+	(type == WMI_UAPSD_AC_TYPE_DELI ? 1 << (ac << 1) : 1 << ((ac << 1) + 1))
 
 enum wmi_sta_ps_param_uapsd {
 	WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
@@ -5405,6 +5667,16 @@
 	struct wmi_bcn_info bcn_info[0];
 } __packed;
 
+struct wmi_10_2_4_bcn_info {
+	struct wmi_tim_info tim_info;
+	/* The 10.2.4 FW doesn't have p2p NOA info */
+} __packed;
+
+struct wmi_10_2_4_host_swba_event {
+	__le32 vdev_map;
+	struct wmi_10_2_4_bcn_info bcn_info[0];
+} __packed;
+
 /* 16 words = 512 client + 1 word = for guard */
 #define WMI_10_4_TIM_BITMAP_ARRAY_SIZE 17
 
@@ -5641,21 +5913,79 @@
 	__le32 callback_enable;
 } __packed;
 
-#define WMI_PEER_AUTH           0x00000001
-#define WMI_PEER_QOS            0x00000002
-#define WMI_PEER_NEED_PTK_4_WAY 0x00000004
-#define WMI_PEER_NEED_GTK_2_WAY 0x00000010
-#define WMI_PEER_APSD           0x00000800
-#define WMI_PEER_HT             0x00001000
-#define WMI_PEER_40MHZ          0x00002000
-#define WMI_PEER_STBC           0x00008000
-#define WMI_PEER_LDPC           0x00010000
-#define WMI_PEER_DYN_MIMOPS     0x00020000
-#define WMI_PEER_STATIC_MIMOPS  0x00040000
-#define WMI_PEER_SPATIAL_MUX    0x00200000
-#define WMI_PEER_VHT            0x02000000
-#define WMI_PEER_80MHZ          0x04000000
-#define WMI_PEER_VHT_2G         0x08000000
+struct wmi_peer_flags_map {
+	u32 auth;
+	u32 qos;
+	u32 need_ptk_4_way;
+	u32 need_gtk_2_way;
+	u32 apsd;
+	u32 ht;
+	u32 bw40;
+	u32 stbc;
+	u32 ldbc;
+	u32 dyn_mimops;
+	u32 static_mimops;
+	u32 spatial_mux;
+	u32 vht;
+	u32 bw80;
+	u32 vht_2g;
+	u32 pmf;
+};
+
+enum wmi_peer_flags {
+	WMI_PEER_AUTH = 0x00000001,
+	WMI_PEER_QOS = 0x00000002,
+	WMI_PEER_NEED_PTK_4_WAY = 0x00000004,
+	WMI_PEER_NEED_GTK_2_WAY = 0x00000010,
+	WMI_PEER_APSD = 0x00000800,
+	WMI_PEER_HT = 0x00001000,
+	WMI_PEER_40MHZ = 0x00002000,
+	WMI_PEER_STBC = 0x00008000,
+	WMI_PEER_LDPC = 0x00010000,
+	WMI_PEER_DYN_MIMOPS = 0x00020000,
+	WMI_PEER_STATIC_MIMOPS = 0x00040000,
+	WMI_PEER_SPATIAL_MUX = 0x00200000,
+	WMI_PEER_VHT = 0x02000000,
+	WMI_PEER_80MHZ = 0x04000000,
+	WMI_PEER_VHT_2G = 0x08000000,
+	WMI_PEER_PMF = 0x10000000,
+};
+
+enum wmi_10x_peer_flags {
+	WMI_10X_PEER_AUTH = 0x00000001,
+	WMI_10X_PEER_QOS = 0x00000002,
+	WMI_10X_PEER_NEED_PTK_4_WAY = 0x00000004,
+	WMI_10X_PEER_NEED_GTK_2_WAY = 0x00000010,
+	WMI_10X_PEER_APSD = 0x00000800,
+	WMI_10X_PEER_HT = 0x00001000,
+	WMI_10X_PEER_40MHZ = 0x00002000,
+	WMI_10X_PEER_STBC = 0x00008000,
+	WMI_10X_PEER_LDPC = 0x00010000,
+	WMI_10X_PEER_DYN_MIMOPS = 0x00020000,
+	WMI_10X_PEER_STATIC_MIMOPS = 0x00040000,
+	WMI_10X_PEER_SPATIAL_MUX = 0x00200000,
+	WMI_10X_PEER_VHT = 0x02000000,
+	WMI_10X_PEER_80MHZ = 0x04000000,
+};
+
+enum wmi_10_2_peer_flags {
+	WMI_10_2_PEER_AUTH = 0x00000001,
+	WMI_10_2_PEER_QOS = 0x00000002,
+	WMI_10_2_PEER_NEED_PTK_4_WAY = 0x00000004,
+	WMI_10_2_PEER_NEED_GTK_2_WAY = 0x00000010,
+	WMI_10_2_PEER_APSD = 0x00000800,
+	WMI_10_2_PEER_HT = 0x00001000,
+	WMI_10_2_PEER_40MHZ = 0x00002000,
+	WMI_10_2_PEER_STBC = 0x00008000,
+	WMI_10_2_PEER_LDPC = 0x00010000,
+	WMI_10_2_PEER_DYN_MIMOPS = 0x00020000,
+	WMI_10_2_PEER_STATIC_MIMOPS = 0x00040000,
+	WMI_10_2_PEER_SPATIAL_MUX = 0x00200000,
+	WMI_10_2_PEER_VHT = 0x02000000,
+	WMI_10_2_PEER_80MHZ = 0x04000000,
+	WMI_10_2_PEER_VHT_2G = 0x08000000,
+	WMI_10_2_PEER_PMF = 0x10000000,
+};
 
 /*
  * Peer rate capabilities.
@@ -5721,6 +6051,11 @@
 	__le32 info0; /* WMI_PEER_ASSOC_INFO0_ */
 } __packed;
 
+struct wmi_10_4_peer_assoc_complete_cmd {
+	struct wmi_10_2_peer_assoc_complete_cmd cmd;
+	__le32 peer_bw_rxnss_override;
+} __packed;
+
 struct wmi_peer_assoc_complete_arg {
 	u8 addr[ETH_ALEN];
 	u32 vdev_id;
@@ -5768,6 +6103,13 @@
 	__le32 noise_floor;
 	__le32 rx_clear_count;
 	__le32 cycle_count;
+	__le32 chan_tx_pwr_range;
+	__le32 chan_tx_pwr_tp;
+	__le32 rx_frame_count;
+	__le32 my_bss_rx_cycle_count;
+	__le32 rx_11b_mode_data_duration;
+	__le32 tx_frame_cnt;
+	__le32 mac_clk_mhz;
 } __packed;
 
 struct wmi_10_4_chan_info_event {
@@ -5910,6 +6252,20 @@
 	__le32 config_valid;
 } __packed;
 
+struct wmi_10_4_dbglog_cfg_cmd {
+	/* bitmask to hold mod id config*/
+	__le64 module_enable;
+
+	/* see ATH10K_DBGLOG_CFG_ */
+	__le32 config_enable;
+
+	/* mask of module id bits to be changed */
+	__le64 module_valid;
+
+	/* mask of config bits to be changed, see ATH10K_DBGLOG_CFG_ */
+	__le32 config_valid;
+} __packed;
+
 enum wmi_roam_reason {
 	WMI_ROAM_REASON_BETTER_AP = 1,
 	WMI_ROAM_REASON_BEACON_MISS = 2,
@@ -5947,6 +6303,17 @@
 	__le32 vdev_id;
 };
 
+struct wmi_peer_delete_resp_ev_arg {
+	__le32 vdev_id;
+	struct wmi_mac_addr peer_addr;
+};
+
+struct wmi_tlv_mgmt_tx_compl_ev_arg {
+	__le32 desc_id;
+	__le32 status;
+	__le32 pdev_id;
+};
+
 struct wmi_mgmt_rx_ev_arg {
 	__le32 channel;
 	__le32 snr;
@@ -5954,6 +6321,7 @@
 	__le32 phy_mode;
 	__le32 buf_len;
 	__le32 status; /* %WMI_RX_STATUS_ */
+	struct wmi_mgmt_rx_ext_info ext_info;
 };
 
 struct wmi_ch_info_ev_arg {
@@ -5966,6 +6334,10 @@
 	__le32 chan_tx_pwr_range;
 	__le32 chan_tx_pwr_tp;
 	__le32 rx_frame_count;
+	__le32 my_bss_rx_cycle_count;
+	__le32 rx_11b_mode_data_duration;
+	__le32 tx_frame_cnt;
+	__le32 mac_clk_mhz;
 };
 
 struct wmi_vdev_start_ev_arg {
@@ -6004,6 +6376,8 @@
 	u32 tsf_u32;
 	u32 buf_len;
 	const void *phyerrs;
+	u32 phy_err_mask0;
+	u32 phy_err_mask1;
 };
 
 struct wmi_svc_rdy_ev_arg {
@@ -6036,11 +6410,26 @@
 	__le32 rssi;
 };
 
+struct wmi_echo_ev_arg {
+	__le32 value;
+};
+
 struct wmi_pdev_temperature_event {
 	/* temperature value in Celcius degree */
 	__le32 temperature;
 } __packed;
 
+struct wmi_pdev_bss_chan_info_event {
+	__le32 freq;
+	__le32 noise_floor;
+	__le64 cycle_busy;
+	__le64 cycle_total;
+	__le64 cycle_tx;
+	__le64 cycle_rx;
+	__le64 cycle_rx_bss;
+	__le32 reserved;
+} __packed;
+
 /* WOW structures */
 enum wmi_wow_wakeup_event {
 	WOW_BMISS_EVENT = 0,
@@ -6239,6 +6628,21 @@
 	__le32 cca_detect_margin;
 } __packed;
 
+enum wmi_host_platform_type {
+	WMI_HOST_PLATFORM_HIGH_PERF,
+	WMI_HOST_PLATFORM_LOW_PERF,
+};
+
+enum wmi_bss_survey_req_type {
+	WMI_BSS_SURVEY_REQ_TYPE_READ = 1,
+	WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR,
+};
+
+struct wmi_pdev_chan_info_req_cmd {
+	__le32 type;
+	__le32 reserved;
+} __packed;
+
 struct ath10k;
 struct ath10k_vif;
 struct ath10k_fw_stats_pdev;
@@ -6281,13 +6685,17 @@
 int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg);
 
 int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_tlv_event_peer_delete_resp(struct ath10k *ar,
+					  struct sk_buff *skb);
 int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_tlv_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb);
 void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb);
 void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb);
 int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb);
 void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb);
 void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb);
 void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_vdev_delete_resp(struct ath10k *ar, struct sk_buff *skb);
 void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb);
 void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb);
 void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb);
@@ -6336,5 +6744,8 @@
 void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar,
 				      struct ath10k_fw_stats *fw_stats,
 				      char *buf);
+int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar,
+				   enum wmi_vdev_subtype subtype);
+int ath10k_wmi_barrier(struct ath10k *ar);
 
 #endif /* _WMI_H_ */
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/wmi-ops.h linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/wmi-ops.h
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/wmi-ops.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/wmi-ops.h	2019-01-22 16:16:25.427263830 +0100
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2014, 2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -29,8 +29,12 @@
 			 struct wmi_scan_ev_arg *arg);
 	int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
 			    struct wmi_mgmt_rx_ev_arg *arg);
+	int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb,
+				  struct wmi_tlv_mgmt_tx_compl_ev_arg *arg);
 	int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
 			    struct wmi_ch_info_ev_arg *arg);
+	int (*pull_peer_delete_resp)(struct ath10k *ar, struct sk_buff *skb,
+				     struct wmi_peer_delete_resp_ev_arg *arg);
 	int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
 			       struct wmi_vdev_start_ev_arg *arg);
 	int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
@@ -51,6 +55,8 @@
 			    struct wmi_roam_ev_arg *arg);
 	int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
 			      struct wmi_wow_ev_arg *arg);
+	int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb,
+			    struct wmi_echo_ev_arg *arg);
 	enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
 
 	struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
@@ -123,7 +129,10 @@
 					     enum wmi_force_fw_hang_type type,
 					     u32 delay_ms);
 	struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
-	struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable,
+	struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar,
+					    struct sk_buff *skb,
+					    dma_addr_t paddr);
+	struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
 					  u32 log_level);
 	struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
 	struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
@@ -156,6 +165,10 @@
 					      u32 num_ac);
 	struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
 					     const struct wmi_sta_keepalive_arg *arg);
+	struct sk_buff *(*gen_set_arp_ns_offload)(struct ath10k *ar,
+						  struct ath10k_vif *arvif);
+	struct sk_buff *(*gen_gtk_offload)(struct ath10k *ar,
+					   struct ath10k_vif *arvif);
 	struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
 	struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
 						    enum wmi_wow_wakeup_event event,
@@ -186,6 +199,20 @@
 							u8 enable,
 							u32 detect_level,
 							u32 detect_margin);
+	struct sk_buff *(*gen_set_pdev_mac_addr)(struct ath10k *ar, u32 pdev_id,
+						 u8 *mac_addr);
+
+	struct sk_buff *(*ext_resource_config)(struct ath10k *ar,
+					       enum wmi_host_platform_type type,
+					       u32 fw_feature_bitmap);
+	int (*get_vdev_subtype)(struct ath10k *ar,
+				enum wmi_vdev_subtype subtype);
+	struct sk_buff *(*gen_pdev_bss_chan_info_req)
+					(struct ath10k *ar,
+					 enum wmi_bss_survey_req_type type);
+	struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value);
+	struct sk_buff *(*gen_csa_offload)(struct ath10k *ar,
+					   u32 vdev_id, bool enable);
 };
 
 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
@@ -222,6 +249,16 @@
 }
 
 static inline int
+ath10k_wmi_pull_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb,
+			      struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
+{
+	if (!ar->wmi.ops->pull_mgmt_tx_compl)
+		return -EOPNOTSUPP;
+
+	return ar->wmi.ops->pull_mgmt_tx_compl(ar, skb, arg);
+}
+
+static inline int
 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
 			struct wmi_mgmt_rx_ev_arg *arg)
 {
@@ -232,6 +269,16 @@
 }
 
 static inline int
+ath10k_wmi_pull_peer_delete_resp(struct ath10k *ar, struct sk_buff *skb,
+				 struct wmi_peer_delete_resp_ev_arg *arg)
+{
+	if (!ar->wmi.ops->pull_peer_delete_resp)
+		return -EOPNOTSUPP;
+
+	return ar->wmi.ops->pull_peer_delete_resp(ar, skb, arg);
+}
+
+static inline int
 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
 			struct wmi_ch_info_ev_arg *arg)
 {
@@ -341,6 +388,16 @@
 	return ar->wmi.ops->pull_wow_event(ar, skb, arg);
 }
 
+static inline int
+ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb,
+			struct wmi_echo_ev_arg *arg)
+{
+	if (!ar->wmi.ops->pull_echo_ev)
+		return -EOPNOTSUPP;
+
+	return ar->wmi.ops->pull_echo_ev(ar, skb, arg);
+}
+
 static inline enum wmi_txbf_conf
 ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
 {
@@ -351,6 +408,28 @@
 }
 
 static inline int
+ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
+			dma_addr_t paddr)
+{
+	struct sk_buff *skb;
+	int ret;
+
+	if (!ar->wmi.ops->gen_mgmt_tx_send)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_mgmt_tx_send(ar, msdu, paddr);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	ret = ath10k_wmi_cmd_send(ar, skb,
+				  ar->wmi.cmd->mgmt_tx_send_cmdid);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static inline int
 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
 {
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
@@ -364,7 +443,8 @@
 	if (IS_ERR(skb))
 		return PTR_ERR(skb);
 
-	ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
+	ret = ath10k_wmi_cmd_send(ar, skb,
+				  ar->wmi.cmd->mgmt_tx_cmdid);
 	if (ret)
 		return ret;
 
@@ -930,7 +1010,7 @@
 }
 
 static inline int
-ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level)
+ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level)
 {
 	struct sk_buff *skb;
 
@@ -1145,6 +1225,40 @@
 }
 
 static inline int
+ath10k_wmi_set_arp_ns_offload(struct ath10k *ar, struct ath10k_vif *arvif)
+{
+	struct sk_buff *skb;
+	u32 cmd_id;
+
+	if (!ar->wmi.ops->gen_set_arp_ns_offload)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_set_arp_ns_offload(ar, arvif);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	cmd_id = ar->wmi.cmd->set_arp_ns_offload_cmdid;
+	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_gtk_offload(struct ath10k *ar, struct ath10k_vif *arvif)
+{
+	struct sk_buff *skb;
+	u32 cmd_id;
+
+	if (!ar->wmi.ops->gen_gtk_offload)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_gtk_offload(ar, arvif);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	cmd_id = ar->wmi.cmd->gtk_offload_cmdid;
+	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
 ath10k_wmi_wow_enable(struct ath10k *ar)
 {
 	struct sk_buff *skb;
@@ -1333,4 +1447,105 @@
 				   ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
 }
 
+static inline int
+ath10k_wmi_ext_resource_config(struct ath10k *ar,
+			       enum wmi_host_platform_type type,
+			       u32 fw_feature_bitmap)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->ext_resource_config)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->ext_resource_config(ar, type,
+					       fw_feature_bitmap);
+
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->ext_resource_cfg_cmdid);
+}
+
+static inline int
+ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
+{
+	if (!ar->wmi.ops->get_vdev_subtype)
+		return -EOPNOTSUPP;
+
+	return ar->wmi.ops->get_vdev_subtype(ar, subtype);
+}
+
+static inline int
+ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
+				      enum wmi_bss_survey_req_type type)
+{
+	struct ath10k_wmi *wmi = &ar->wmi;
+	struct sk_buff *skb;
+
+	if (!wmi->ops->gen_pdev_bss_chan_info_req)
+		return -EOPNOTSUPP;
+
+	skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb,
+				   wmi->cmd->pdev_bss_chan_info_request_cmdid);
+}
+
+static inline int
+ath10k_wmi_csa_offload(struct ath10k *ar, u32 vdev_id, bool enable)
+{
+	struct sk_buff *skb;
+	u32 cmd_id;
+
+	if (!ar->wmi.ops->gen_csa_offload)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_csa_offload(ar, vdev_id, enable);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	cmd_id = ar->wmi.cmd->csa_offload_enable_cmdid;
+	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_echo(struct ath10k *ar, u32 value)
+{
+	struct ath10k_wmi *wmi = &ar->wmi;
+	struct sk_buff *skb;
+
+	if (!wmi->ops->gen_echo)
+		return -EOPNOTSUPP;
+
+	skb = wmi->ops->gen_echo(ar, value);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid);
+}
+
+static inline int
+ath10k_gen_set_base_mac_addr(struct ath10k *ar, u8 *mac)
+{
+	struct sk_buff *skb;
+	int ret;
+
+	if (!ar->wmi.ops->gen_set_pdev_mac_addr)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_set_pdev_mac_addr(ar, 0, mac);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	ret = ath10k_wmi_cmd_send(ar, skb,
+				  ar->wmi.cmd->pdev_set_base_macaddr_cmdid);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
 #endif
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/wmi-tlv.c linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/wmi-tlv.c
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/wmi-tlv.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/wmi-tlv.c	2019-10-29 09:26:24.461211183 +0100
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2014, 2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -468,6 +468,9 @@
 	case WMI_TLV_VDEV_STOPPED_EVENTID:
 		ath10k_wmi_event_vdev_stopped(ar, skb);
 		break;
+	case WMI_TLV_VDEV_DELETE_RESP_EVENTID:
+		ath10k_wmi_event_vdev_delete_resp(ar, skb);
+		break;
 	case WMI_TLV_PEER_STA_KICKOUT_EVENTID:
 		ath10k_wmi_event_peer_sta_kickout(ar, skb);
 		break;
@@ -552,8 +555,14 @@
 	case WMI_TLV_TX_PAUSE_EVENTID:
 		ath10k_wmi_tlv_event_tx_pause(ar, skb);
 		break;
+	case WMI_TLV_PEER_DELETE_RESP_EVENTID:
+		ath10k_wmi_tlv_event_peer_delete_resp(ar, skb);
+		break;
+	case WMI_TLV_MGMT_TX_COMPLETION_EVENTID:
+		ath10k_wmi_tlv_event_mgmt_tx_compl(ar, skb);
+		break;
 	default:
-		ath10k_warn(ar, "Unknown eventid: %d\n", id);
+		ath10k_dbg(ar, ATH10K_DBG_WMI, "Unknown eventid: %d\n", id);
 		break;
 	}
 
@@ -593,6 +602,31 @@
 	return 0;
 }
 
+static int ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(
+				struct ath10k *ar, struct sk_buff *skb,
+				struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
+{
+	const void **tb;
+	const struct wmi_tlv_mgmt_tx_compl_ev *ev;
+	int ret;
+
+	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+		return ret;
+	}
+
+	ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL];
+
+	arg->desc_id = ev->desc_id;
+	arg->status = ev->status;
+	arg->pdev_id = ev->pdev_id;
+
+	kfree(tb);
+	return 0;
+}
+
 static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
 					     struct sk_buff *skb,
 					     struct wmi_mgmt_rx_ev_arg *arg)
@@ -642,6 +676,34 @@
 	return 0;
 }
 
+static int ath10k_wmi_tlv_op_pull_peer_delete_ev(
+			struct ath10k *ar, struct sk_buff *skb,
+			struct wmi_peer_delete_resp_ev_arg *arg)
+{
+	const void **tb;
+	const struct wmi_peer_delete_resp_ev_arg *ev;
+	int ret;
+
+	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+		return ret;
+	}
+
+	ev = tb[WMI_TLV_TAG_STRUCT_PEER_DELETE_RESP_EVENT];
+	if (!ev) {
+		kfree(tb);
+		return -EPROTO;
+	}
+
+	arg->vdev_id = ev->vdev_id;
+	arg->peer_addr = ev->peer_addr;
+
+	kfree(tb);
+	return 0;
+}
+
 static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar,
 					     struct sk_buff *skb,
 					     struct wmi_ch_info_ev_arg *arg)
@@ -669,6 +731,13 @@
 	arg->noise_floor = ev->noise_floor;
 	arg->rx_clear_count = ev->rx_clear_count;
 	arg->cycle_count = ev->cycle_count;
+	arg->chan_tx_pwr_range = ev->chan_tx_pwr_range;
+	arg->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
+	arg->rx_frame_count = ev->rx_frame_count;
+	arg->my_bss_rx_cycle_count = ev->my_bss_rx_cycle_count;
+	arg->rx_11b_mode_data_duration = ev->rx_11b_mode_data_duration;
+	arg->tx_frame_cnt = ev->tx_frame_cnt;
+	arg->mac_clk_mhz = ev->mac_clk_mhz;
 
 	kfree(tb);
 	return 0;
@@ -882,6 +951,11 @@
 	arg->buf_len = __le32_to_cpu(ev->buf_len);
 	arg->phyerrs = phyerrs;
 
+	if (QCA_REV_WCN3990(ar)) {
+		arg->phy_err_mask0 = __le32_to_cpu(ev->rs_phy_err_mask0);
+		arg->phy_err_mask1 = __le32_to_cpu(ev->rs_phy_err_mask1);
+	}
+
 	kfree(tb);
 	return 0;
 }
@@ -937,7 +1011,12 @@
 
 	ev = tb[WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT];
 	reg = tb[WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES];
+	if (QCA_REV_WCN3990(ar)) {
+		svc_bmap = (__le32 *)(skb->data +
+			WMI_TLV_TAG_STRUCT_HL_1_0_SVC_OFFSET);
+	} else {
 	svc_bmap = tb[WMI_TLV_TAG_ARRAY_UINT32];
+	}
 	mem_reqs = tb[WMI_TLV_TAG_ARRAY_STRUCT];
 
 	if (!ev || !reg || !svc_bmap || !mem_reqs) {
@@ -1229,6 +1308,33 @@
 	return 0;
 }
 
+static int ath10k_wmi_tlv_op_pull_echo_ev(struct ath10k *ar,
+					  struct sk_buff *skb,
+					  struct wmi_echo_ev_arg *arg)
+{
+	const void **tb;
+	const struct wmi_echo_event *ev;
+	int ret;
+
+	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+		return ret;
+	}
+
+	ev = tb[WMI_TLV_TAG_STRUCT_ECHO_EVENT];
+	if (!ev) {
+		kfree(tb);
+		return -EPROTO;
+	}
+
+	arg->value = ev->value;
+
+	kfree(tb);
+	return 0;
+}
+
 static struct sk_buff *
 ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
 {
@@ -1379,7 +1485,18 @@
 	cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
 
 	cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
+	if (QCA_REV_WCN3990(ar)) {
+		cfg->num_peers = __cpu_to_le32(TARGET_HL_10_TLV_NUM_PEERS);
+		cfg->ast_skid_limit =
+				__cpu_to_le32(TARGET_HL_10_TLV_AST_SKID_LIMIT);
+		cfg->num_wds_entries =
+				__cpu_to_le32(TARGET_HL_10_TLV_NUM_WDS_ENTRIES);
+	} else {
 	cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
+		cfg->ast_skid_limit = __cpu_to_le32(0x10);
+		cfg->num_wds_entries = __cpu_to_le32(0x20);
+	}
+
 
 	if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
 		cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
@@ -1391,7 +1508,6 @@
 
 	cfg->num_peer_keys = __cpu_to_le32(2);
 	cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
-	cfg->ast_skid_limit = __cpu_to_le32(0x10);
 	cfg->tx_chain_mask = __cpu_to_le32(0x7);
 	cfg->rx_chain_mask = __cpu_to_le32(0x7);
 	cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64);
@@ -1407,7 +1523,6 @@
 	cfg->num_mcast_table_elems = __cpu_to_le32(0);
 	cfg->mcast2ucast_mode = __cpu_to_le32(0);
 	cfg->tx_dbg_log_size = __cpu_to_le32(0x400);
-	cfg->num_wds_entries = __cpu_to_le32(0x20);
 	cfg->dma_burst_size = __cpu_to_le32(0);
 	cfg->mac_aggr_delim = __cpu_to_le32(0);
 	cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0);
@@ -1424,6 +1539,12 @@
 	cfg->keep_alive_pattern_size = __cpu_to_le32(0);
 	cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
 	cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
+	cfg->wmi_send_separate = __cpu_to_le32(0);
+	cfg->num_ocb_vdevs = __cpu_to_le32(0);
+	cfg->num_ocb_channels = __cpu_to_le32(0);
+	cfg->num_ocb_schedules = __cpu_to_le32(0);
+	cfg->host_capab =
+		__cpu_to_le32(WMI_TLV_TX_MSDU_ID_NEW_PARTITION_SUPPORT);
 
 	ath10k_wmi_put_host_mem_chunks(ar, chunks);
 
@@ -1477,10 +1598,10 @@
 	cmd->ie_len = __cpu_to_le32(arg->ie_len);
 	cmd->num_probes = __cpu_to_le32(3);
 
-	/* FIXME: There are some scan flag inconsistencies across firmwares,
-	 * e.g. WMI-TLV inverts the logic behind the following flag.
-	 */
-	cmd->common.scan_ctrl_flags ^= __cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
+	if (!QCA_REV_WCN3990(ar)) {
+		cmd->common.scan_ctrl_flags ^=
+			__cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
+	}
 
 	ptr += sizeof(*tlv);
 	ptr += sizeof(*cmd);
@@ -2401,6 +2522,30 @@
 	return skb;
 }
 
+static int
+ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb,
+				 dma_addr_t paddr)
+{
+	struct ath10k_wmi *wmi = &ar->wmi;
+	struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
+	int ret;
+
+	pkt_addr = kmalloc(sizeof(*pkt_addr), GFP_ATOMIC);
+	if (!pkt_addr)
+		return -ENOMEM;
+
+	pkt_addr->vaddr = skb;
+	pkt_addr->paddr = paddr;
+
+	spin_lock_bh(&wmi->mgmt_tx_lock);
+	ret = idr_alloc(&wmi->mgmt_pending_tx, pkt_addr, 0,
+			wmi->mgmt_max_num_pending_tx, GFP_ATOMIC);
+	spin_unlock_bh(&wmi->mgmt_tx_lock);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx alloc msdu_id %d\n", ret);
+	return ret;
+}
+
 static struct sk_buff *
 ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
 {
@@ -2423,6 +2568,84 @@
 }
 
 static struct sk_buff *
+ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
+				   dma_addr_t paddr)
+{
+	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
+	struct wmi_tlv_mgmt_tx_cmd *cmd;
+	struct ieee80211_hdr *hdr;
+	struct ath10k_vif *arvif;
+	u32 buf_len = msdu->len;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	int desc_id, len;
+	u32 vdev_id;
+	void *ptr;
+	u16 fc;
+
+	hdr = (struct ieee80211_hdr *)msdu->data;
+	fc = le16_to_cpu(hdr->frame_control);
+
+	if (cb->vif) {
+		arvif = (void *)cb->vif->drv_priv;
+		vdev_id = arvif->vdev_id;
+	} else {
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
+		return ERR_PTR(-EINVAL);
+
+	len = sizeof(*cmd) + buf_len;
+
+	if ((ieee80211_is_action(hdr->frame_control) ||
+	     ieee80211_is_deauth(hdr->frame_control) ||
+	     ieee80211_is_disassoc(hdr->frame_control)) &&
+	     ieee80211_has_protected(hdr->frame_control)) {
+		len += IEEE80211_CCMP_MIC_LEN;
+		buf_len += IEEE80211_CCMP_MIC_LEN;
+	}
+
+	len += sizeof(*tlv);
+	len = round_up(len, 4);
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	desc_id = ath10k_wmi_mgmt_tx_alloc_msdu_id(ar, msdu, paddr);
+	if (desc_id < 0)
+		goto msdu_id_alloc_fail;
+
+	ptr = (void *)skb->data;
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD);
+	tlv->len = __cpu_to_le16(sizeof(cmd->hdr));
+	cmd = (void *)tlv->value;
+	cmd->hdr.vdev_id = vdev_id;
+	cmd->hdr.desc_id = desc_id;
+	cmd->hdr.chanfreq = 0;
+	cmd->hdr.buf_len = __cpu_to_le32(buf_len);
+	cmd->hdr.frame_len = __cpu_to_le32(msdu->len);
+	cmd->hdr.paddr = __cpu_to_le64(paddr);
+	cmd->data_len = buf_len;
+	cmd->data_tag = 0x11;
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*cmd);
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+	tlv->len = __cpu_to_le16(sizeof(msdu->len));
+	memcpy(cmd->buf, msdu->data, buf_len);
+
+	return skb;
+
+msdu_id_alloc_fail:
+	dev_kfree_skb(skb);
+	return ERR_PTR(desc_id);
+}
+
+static struct sk_buff *
 ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
 				    enum wmi_force_fw_hang_type type,
 				    u32 delay_ms)
@@ -2447,7 +2670,7 @@
 }
 
 static struct sk_buff *
-ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
+ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
 				 u32 log_level) {
 	struct wmi_tlv_dbglog_cmd *cmd;
 	struct wmi_tlv *tlv;
@@ -2856,6 +3079,165 @@
 }
 
 static struct sk_buff *
+ath10k_wmi_tlv_op_gen_csa_offload(struct ath10k *ar, u32 vdev_id, bool enable)
+{
+	struct wmi_csa_offload_enable_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	int len;
+
+	len = sizeof(*cmd) + sizeof(*tlv);
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	tlv = (void *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CSA_OFFLOAD_ENABLE_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	if (enable)
+		cmd->csa_offload_enable |=
+			 __cpu_to_le32(WMI_CSA_OFFLOAD_ENABLE);
+	else
+		cmd->csa_offload_enable |=
+			__cpu_to_le32(WMI_CSA_OFFLOAD_DISABLE);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi CSA offload for vdev: %d\n", vdev_id);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_gtk_offload(struct ath10k *ar, struct ath10k_vif *arvif)
+{
+	struct wmi_tlv_gtk_offload_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	struct wmi_gtk_rekey_data *rekey_data = &arvif->gtk_rekey_data;
+	int len;
+
+	len = sizeof(*cmd) + sizeof(*tlv);
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	tlv = (void *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_GTK_OFFLOAD_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	if (rekey_data->enable_offload) {
+		cmd->vdev_id = __cpu_to_le32(arvif->vdev_id);
+		cmd->flags |= __cpu_to_le32(WMI_GTK_OFFLOAD_ENABLE_OPCODE);
+		memcpy(cmd->kek, rekey_data->kek, NL80211_KEK_LEN);
+		memcpy(cmd->kck, rekey_data->kck, NL80211_KCK_LEN);
+		cmd->replay_ctr = __cpu_to_le64(rekey_data->replay_ctr);
+	} else {
+		cmd->vdev_id = __cpu_to_le32(arvif->vdev_id);
+		cmd->flags |= __cpu_to_le32(WMI_GTK_OFFLOAD_DISABLE_OPCODE);
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi GTK offload for vdev: %d\n", arvif->vdev_id);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_set_arp_ns_offload(struct ath10k *ar,
+					 struct ath10k_vif *arvif)
+{
+	struct wmi_tlv_arp_ns_offload_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	size_t len;
+	void *ptr;
+	int i;
+	struct wmi_ns_arp_offload_req *arp = &arvif->arp_offload;
+	struct wmi_ns_arp_offload_req *ns = &arvif->ns_offload;
+	struct wmi_ns_offload *ns_tuple;
+	struct wmi_arp_offload *arp_tuple;
+
+	len = sizeof(*cmd) + sizeof(*tlv) +
+		sizeof(*tlv) + WMI_NS_ARP_OFFLOAD *
+		(sizeof(struct wmi_ns_offload) + sizeof(*tlv)) +
+		sizeof(*tlv) + WMI_NS_ARP_OFFLOAD *
+		(sizeof(struct wmi_arp_offload) + sizeof(*tlv));
+
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ptr = (void *)skb->data;
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SET_ARP_NS_OFFLOAD_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (struct wmi_tlv_arp_ns_offload_cmd *)tlv->value;
+	cmd->flags = __cpu_to_le32(0);
+	cmd->vdev_id = __cpu_to_le32(arvif->vdev_id);
+
+	ptr += (sizeof(*tlv) + sizeof(*cmd));
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+	tlv->len = __cpu_to_le16(WMI_NS_ARP_OFFLOAD *
+		(sizeof(struct wmi_ns_offload) + sizeof(*tlv)));
+	ptr += sizeof(*tlv);
+	tlv = ptr;
+
+	for (i = 0; i < WMI_NS_ARP_OFFLOAD; i++) {
+		tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NS_OFFLOAD_TUPLE);
+		tlv->len = __cpu_to_le16(sizeof(struct wmi_ns_offload));
+		ns_tuple = (struct wmi_ns_offload *)tlv->value;
+		if (ns->enable_offload) {
+			ns_tuple->flags |=
+				__cpu_to_le32(WMI_ARP_NS_OFF_FLAGS_VALID);
+			if (ns->info.target_addr_valid.s6_addr[i]) {
+				memcpy(&ns_tuple->target_ipaddr[0],
+				       &ns->info.target_addr[i],
+				       sizeof(struct in6_addr));
+			}
+			memcpy(&ns_tuple->solicitation_ipaddr,
+			       &ns->info.self_addr[i], sizeof(struct in6_addr));
+			if (ns->info.target_ipv6_ac.s6_addr[i] == IPV6_ADDR_ANY)
+				ns_tuple->flags |=
+					__cpu_to_le32(WMI_NSOFF_IPV6_ANYCAST);
+		} else {
+			ns_tuple->flags |=
+				__cpu_to_le32(WMI_ARP_NS_OFFLOAD_DISABLE);
+		}
+		ptr += (sizeof(*tlv) + sizeof(struct wmi_ns_offload));
+		tlv = ptr;
+	}
+
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+	tlv->len = __cpu_to_le16(WMI_NS_ARP_OFFLOAD *
+		(sizeof(struct wmi_arp_offload) + sizeof(*tlv)));
+	ptr += sizeof(*tlv);
+	tlv = ptr;
+
+	for (i = 0; i < WMI_NS_ARP_OFFLOAD; i++) {
+		tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_ARP_OFFLOAD_TUPLE);
+		tlv->len = __cpu_to_le16(sizeof(struct wmi_arp_offload));
+		arp_tuple = (struct wmi_arp_offload *)tlv->value;
+		if (arp->enable_offload && (i == 0)) {
+			arp_tuple->flags |=
+				__cpu_to_le32(WMI_ARP_NS_OFF_FLAGS_VALID);
+			memcpy(&arp_tuple->target_ipaddr,
+			       &arp->params.ipv4_addr,
+			       sizeof(arp_tuple->target_ipaddr));
+		} else {
+			arp_tuple->flags |=
+				__cpu_to_le32(WMI_ARP_NS_OFFLOAD_DISABLE);
+		}
+		ptr += (sizeof(*tlv) + sizeof(struct wmi_arp_offload));
+		tlv = ptr;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set arp ns offload\n");
+	return skb;
+}
+
+static struct sk_buff *
 ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar)
 {
 	struct wmi_tlv_wow_enable_cmd *cmd;
@@ -2874,6 +3256,8 @@
 	cmd = (void *)tlv->value;
 
 	cmd->enable = __cpu_to_le32(1);
+	if (QCA_REV_WCN3990(ar))
+		cmd->pause_iface_config = __cpu_to_le32(1);
 
 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow enable\n");
 	return skb;
@@ -3060,6 +3444,33 @@
 }
 
 static struct sk_buff *
+ath10k_wmi_tlv_op_gen_set_base_mac_addr(struct ath10k *ar, u32 pdev_id,
+					u8 *mac_addr)
+{
+	struct wmi_tlv_mac_addr_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	size_t len;
+
+	len = sizeof(*tlv) + sizeof(*cmd);
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	tlv = (struct wmi_tlv *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_BASE_MACADDR_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+
+	cmd->pdev_id = __cpu_to_le32(pdev_id);
+	ether_addr_copy(cmd->mac_addr.addr, mac_addr);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set_base_mac addr pdev_id %d mac addr %pM\n",
+		   cmd->pdev_id, cmd->mac_addr.addr);
+	return skb;
+}
+
+static struct sk_buff *
 ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
 {
 	struct wmi_tlv_adaptive_qcs *cmd;
@@ -3087,6 +3498,34 @@
 	return skb;
 }
 
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_echo(struct ath10k *ar, u32 value)
+{
+	struct wmi_echo_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	void *ptr;
+	size_t len;
+
+	len = sizeof(*tlv) + sizeof(*cmd);
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ptr = (void *)skb->data;
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_ECHO_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->value = cpu_to_le32(value);
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*cmd);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv echo value 0x%08x\n", value);
+	return skb;
+}
+
 /****************/
 /* TLV mappings */
 /****************/
@@ -3133,6 +3572,7 @@
 	.bcn_filter_rx_cmdid = WMI_TLV_BCN_FILTER_RX_CMDID,
 	.prb_req_filter_rx_cmdid = WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
 	.mgmt_tx_cmdid = WMI_TLV_MGMT_TX_CMDID,
+	.mgmt_tx_send_cmdid = WMI_TLV_MGMT_TX_SEND_CMD,
 	.prb_tmpl_cmdid = WMI_TLV_PRB_TMPL_CMDID,
 	.addba_clear_resp_cmdid = WMI_TLV_ADDBA_CLEAR_RESP_CMDID,
 	.addba_send_cmdid = WMI_TLV_ADDBA_SEND_CMDID,
@@ -3415,6 +3855,7 @@
 	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
 	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
 	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.set_tsf = WMI_VDEV_PARAM_UNSUPPORTED,
 };
 
 static const struct wmi_ops wmi_tlv_ops = {
@@ -3423,7 +3864,9 @@
 
 	.pull_scan = ath10k_wmi_tlv_op_pull_scan_ev,
 	.pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev,
+	.pull_mgmt_tx_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev,
 	.pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev,
+	.pull_peer_delete_resp = ath10k_wmi_tlv_op_pull_peer_delete_ev,
 	.pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,
 	.pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev,
 	.pull_swba = ath10k_wmi_tlv_op_pull_swba_ev,
@@ -3434,6 +3877,7 @@
 	.pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
 	.pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
 	.pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
+	.pull_echo_ev = ath10k_wmi_tlv_op_pull_echo_ev,
 	.get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme,
 
 	.gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
@@ -3465,7 +3909,7 @@
 	.gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm,
 	.gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
 	.gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
-	/* .gen_mgmt_tx = not implemented; HTT is used */
+	.gen_mgmt_tx_send = ath10k_wmi_tlv_op_gen_mgmt_tx_send,
 	.gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg,
 	.gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable,
 	.gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
@@ -3480,6 +3924,9 @@
 	.gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie,
 	.gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd,
 	.gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
+	.gen_set_arp_ns_offload = ath10k_wmi_tlv_op_gen_set_arp_ns_offload,
+	.gen_gtk_offload = ath10k_wmi_op_gen_gtk_offload,
+	.gen_csa_offload = ath10k_wmi_tlv_op_gen_csa_offload,
 	.gen_wow_enable = ath10k_wmi_tlv_op_gen_wow_enable,
 	.gen_wow_add_wakeup_event = ath10k_wmi_tlv_op_gen_wow_add_wakeup_event,
 	.gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind,
@@ -3489,9 +3936,29 @@
 	.gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
 	.gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
 	.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
+	.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
+	.gen_echo = ath10k_wmi_tlv_op_gen_echo,
+	.gen_set_pdev_mac_addr = ath10k_wmi_tlv_op_gen_set_base_mac_addr,
+};
+
+static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = {
+	.auth = WMI_TLV_PEER_AUTH,
+	.qos = WMI_TLV_PEER_QOS,
+	.need_ptk_4_way = WMI_TLV_PEER_NEED_PTK_4_WAY,
+	.need_gtk_2_way = WMI_TLV_PEER_NEED_GTK_2_WAY,
+	.apsd = WMI_TLV_PEER_APSD,
+	.ht = WMI_TLV_PEER_HT,
+	.bw40 = WMI_TLV_PEER_40MHZ,
+	.stbc = WMI_TLV_PEER_STBC,
+	.ldbc = WMI_TLV_PEER_LDPC,
+	.dyn_mimops = WMI_TLV_PEER_DYN_MIMOPS,
+	.static_mimops = WMI_TLV_PEER_STATIC_MIMOPS,
+	.spatial_mux = WMI_TLV_PEER_SPATIAL_MUX,
+	.vht = WMI_TLV_PEER_VHT,
+	.bw80 = WMI_TLV_PEER_80MHZ,
+	.pmf = WMI_TLV_PEER_PMF,
 };
 
-/************/
 /* TLV init */
 /************/
 
@@ -3501,4 +3968,5 @@
 	ar->wmi.vdev_param = &wmi_tlv_vdev_param_map;
 	ar->wmi.pdev_param = &wmi_tlv_pdev_param_map;
 	ar->wmi.ops = &wmi_tlv_ops;
+	ar->wmi.peer_flags = &wmi_tlv_peer_flags_map;
 }
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/wmi-tlv.h linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/wmi-tlv.h
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/wmi-tlv.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/wmi-tlv.h	2019-01-22 16:16:25.427263830 +0100
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2014, 2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -22,6 +22,7 @@
 #define WMI_TLV_CMD_UNSUPPORTED 0
 #define WMI_TLV_PDEV_PARAM_UNSUPPORTED 0
 #define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0
+#define WMI_TX_DL_FRM_LEN	64
 
 enum wmi_tlv_grp_id {
 	WMI_TLV_GRP_START = 0x3,
@@ -132,6 +133,7 @@
 	WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
 	WMI_TLV_MGMT_TX_CMDID,
 	WMI_TLV_PRB_TMPL_CMDID,
+	WMI_TLV_MGMT_TX_SEND_CMD,
 	WMI_TLV_ADDBA_CLEAR_RESP_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_BA_NEG),
 	WMI_TLV_ADDBA_SEND_CMDID,
 	WMI_TLV_ADDBA_STATUS_CMDID,
@@ -306,16 +308,21 @@
 	WMI_TLV_VDEV_STOPPED_EVENTID,
 	WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
 	WMI_TLV_VDEV_MCC_BCN_INTERVAL_CHANGE_REQ_EVENTID,
+	WMI_TLV_VDEV_TSF_REPORT_EVENTID,
+	WMI_TLV_VDEV_DELETE_RESP_EVENTID,
 	WMI_TLV_PEER_STA_KICKOUT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_PEER),
 	WMI_TLV_PEER_INFO_EVENTID,
 	WMI_TLV_PEER_TX_FAIL_CNT_THR_EVENTID,
 	WMI_TLV_PEER_ESTIMATED_LINKSPEED_EVENTID,
 	WMI_TLV_PEER_STATE_EVENTID,
+	WMI_TLV_PEER_ASSOC_CONF_EVENTID,
+	WMI_TLV_PEER_DELETE_RESP_EVENTID,
 	WMI_TLV_MGMT_RX_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_MGMT),
 	WMI_TLV_HOST_SWBA_EVENTID,
 	WMI_TLV_TBTTOFFSET_UPDATE_EVENTID,
 	WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID,
 	WMI_TLV_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID,
+	WMI_TLV_MGMT_TX_COMPLETION_EVENTID,
 	WMI_TLV_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_BA_NEG),
 	WMI_TLV_TX_ADDBA_COMPLETE_EVENTID,
 	WMI_TLV_BA_RSP_SSN_EVENTID,
@@ -527,6 +534,24 @@
 	WMI_TLV_VDEV_PARAM_IBSS_PS_1RX_CHAIN_IN_ATIM_WINDOW_ENABLE,
 };
 
+enum wmi_tlv_peer_flags {
+	WMI_TLV_PEER_AUTH = 0x00000001,
+	WMI_TLV_PEER_QOS = 0x00000002,
+	WMI_TLV_PEER_NEED_PTK_4_WAY = 0x00000004,
+	WMI_TLV_PEER_NEED_GTK_2_WAY = 0x00000010,
+	WMI_TLV_PEER_APSD = 0x00000800,
+	WMI_TLV_PEER_HT = 0x00001000,
+	WMI_TLV_PEER_40MHZ = 0x00002000,
+	WMI_TLV_PEER_STBC = 0x00008000,
+	WMI_TLV_PEER_LDPC = 0x00010000,
+	WMI_TLV_PEER_DYN_MIMOPS = 0x00020000,
+	WMI_TLV_PEER_STATIC_MIMOPS = 0x00040000,
+	WMI_TLV_PEER_SPATIAL_MUX = 0x00200000,
+	WMI_TLV_PEER_VHT = 0x02000000,
+	WMI_TLV_PEER_80MHZ = 0x04000000,
+	WMI_TLV_PEER_PMF = 0x08000000,
+};
+
 enum wmi_tlv_tag {
 	WMI_TLV_TAG_LAST_RESERVED = 15,
 
@@ -871,6 +896,11 @@
 	WMI_TLV_TAG_STRUCT_SAP_OFL_DEL_STA_EVENT,
 	WMI_TLV_TAG_STRUCT_APFIND_CMD_PARAM,
 	WMI_TLV_TAG_STRUCT_APFIND_EVENT_HDR,
+	WMI_TLV_TAG_STRUCT_HL_1_0_SVC_OFFSET = 176,
+
+	WMI_TLV_TAG_STRUCT_MGMT_TX_CMD = 0x1A6,
+	WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL,
+	WMI_TLV_TAG_STRUCT_PEER_DELETE_RESP_EVENT = 0x1C3,
 
 	WMI_TLV_TAG_MAX
 };
@@ -946,6 +976,50 @@
 	WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
 	WMI_TLV_SERVICE_MDNS_OFFLOAD,
 	WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD,
+	WMI_TLV_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT,
+	WMI_TLV_SERVICE_OCB,
+	WMI_TLV_SERVICE_AP_ARPNS_OFFLOAD,
+	WMI_TLV_SERVICE_PER_BAND_CHAINMASK_SUPPORT,
+	WMI_TLV_SERVICE_PACKET_FILTER_OFFLOAD,
+	WMI_TLV_SERVICE_MGMT_TX_HTT,
+	WMI_TLV_SERVICE_MGMT_TX_WMI,
+	WMI_TLV_SERVICE_EXT_MSG,
+	WMI_TLV_SERVICE_MAWC,
+	WMI_TLV_SERVICE_PEER_ASSOC_CONF,
+	WMI_TLV_SERVICE_EGAP,
+	WMI_TLV_SERVICE_STA_PMF_OFFLOAD,
+	WMI_TLV_SERVICE_UNIFIED_WOW_CAPABILITY,
+	WMI_TLV_SERVICE_ENHANCED_PROXY_STA,
+	WMI_TLV_SERVICE_ATF,
+	WMI_TLV_SERVICE_COEX_GPIO,
+	WMI_TLV_SERVICE_AUX_SPECTRAL_INTF,
+	WMI_TLV_SERVICE_AUX_CHAN_LOAD_INTF,
+	WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64,
+	WMI_TLV_SERVICE_ENTERPRISE_MESH,
+	WMI_TLV_SERVICE_RESTRT_CHNL_SUPPORT,
+	WMI_TLV_SERVICE_BPF_OFFLOAD,
+	WMI_TLV_SERVICE_SYNC_DELETE_CMDS,
+	WMI_TLV_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+	WMI_TLV_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+	WMI_TLV_SERVICE_RATECTRL_LIMIT_MAX_MIN_RATES,
+	WMI_TLV_SERVICE_NAN_DATA,
+	WMI_TLV_SERVICE_NAN_RTT,
+	WMI_TLV_SERVICE_11AX,
+	WMI_TLV_SERVICE_DEPRECATED_REPLACE,
+	WMI_TLV_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
+	WMI_TLV_SERVICE_ENHANCED_MCAST_FILTER,
+	WMI_TLV_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
+	WMI_TLV_SERVICE_MESH_11S,
+	WMI_TLV_SERVICE_HALF_RATE_QUARTER_RATE_SUPPORT,
+	WMI_TLV_SERVICE_VDEV_RX_FILTER,
+	WMI_TLV_SERVICE_P2P_LISTEN_OFFLOAD_SUPPORT,
+	WMI_TLV_SERVICE_MARK_FIRST_WAKEUP_PACKET,
+	WMI_TLV_SERVICE_MULTIPLE_MCAST_FILTER_SET,
+	WMI_TLV_SERVICE_HOST_MANAGED_RX_REORDER,
+	WMI_TLV_SERVICE_FLASH_RDWR_SUPPORT,
+	WMI_TLV_SERVICE_WLAN_STATS_REPORT,
+	WMI_TLV_SERVICE_TX_MSDU_ID_NEW_PARTITION_SUPPORT,
+	WMI_TLV_SERVICE_DFS_PHYERR_OFFLOAD,
 };
 
 #define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
@@ -1102,6 +1176,8 @@
 	       WMI_SERVICE_MDNS_OFFLOAD, len);
 	SVCMAP(WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD,
 	       WMI_SERVICE_SAP_AUTH_OFFLOAD, len);
+	SVCMAP(WMI_TLV_SERVICE_MGMT_TX_WMI,
+	       WMI_SERVICE_MGMT_TX_WMI, len);
 }
 
 #undef SVCMAP
@@ -1112,6 +1188,17 @@
 	u8 value[0];
 } __packed;
 
+struct ath10k_mgmt_tx_pkt_addr {
+	void *vaddr;
+	dma_addr_t paddr;
+};
+
+struct wmi_tlv_mgmt_tx_compl_ev {
+	__le32 desc_id;
+	__le32 status;
+	__le32 pdev_id;
+};
+
 #define WMI_TLV_MGMT_RX_NUM_RSSI 4
 
 struct wmi_tlv_mgmt_rx_ev {
@@ -1164,6 +1251,14 @@
 	__le32 max_num_scan_chans;
 	__le32 hw_bd_id; /* 0 means hw_bd_info is invalid */
 	struct wmi_tlv_hw_bd_info hw_bd_info[5];
+#ifdef CONFIG_ATH10K_SNOC
+	__le32 max_supported_macs;
+	__le32 wmi_fw_sub_feat_caps;
+	__le32 num_dbs_hw_modes;
+	__le32 txrx_chainmask;
+	__le32 default_dbs_hw_mode_index;
+	__le32 num_msdu_desc;
+#endif
 } __packed;
 
 struct wmi_tlv_rdy_ev {
@@ -1172,6 +1267,8 @@
 	__le32 status;
 } __packed;
 
+#define WMI_TLV_TX_MSDU_ID_NEW_PARTITION_SUPPORT  BIT(10)
+
 struct wmi_tlv_resource_config {
 	__le32 num_vdevs;
 	__le32 num_peers;
@@ -1209,6 +1306,11 @@
 	__le32 keep_alive_pattern_size;
 	__le32 max_tdls_concurrent_sleep_sta;
 	__le32 max_tdls_concurrent_buffer_sta;
+	__le32 wmi_send_separate;
+	__le32 num_ocb_vdevs;
+	__le32 num_ocb_channels;
+	__le32 num_ocb_schedules;
+	__le32 host_capab;
 } __packed;
 
 struct wmi_tlv_init_cmd {
@@ -1317,6 +1419,10 @@
 	__le32 tsf_l32;
 	__le32 tsf_u32;
 	__le32 buf_len;
+	__le32 pdev_id;
+	__le32 rs_phy_err_mask0;
+	__le32 rs_phy_err_mask1;
+	__le32 rs_phy_err_mask2;
 } __packed;
 
 enum wmi_tlv_dbglog_param {
@@ -1472,6 +1578,21 @@
 
 struct wmi_tlv_wow_enable_cmd {
 	__le32 enable;
+	__le32 pause_iface_config;
+} __packed;
+
+struct wmi_tlv_arp_ns_offload_cmd {
+	__le32 flags;
+	__le32 vdev_id;
+	__le32 num_ns_ext_tuples;
+} __packed;
+
+struct wmi_tlv_gtk_offload_cmd {
+	__le32 vdev_id;
+	__le32 flags;
+	u8 kek[NL80211_KEK_LEN];
+	u8 kck[NL80211_KCK_LEN];
+	__le64 replay_ctr;
 } __packed;
 
 struct wmi_tlv_wow_host_wakeup_ind {
@@ -1624,4 +1745,24 @@
 
 void ath10k_wmi_tlv_attach(struct ath10k *ar);
 
+struct wmi_tlv_mgmt_tx_hdr {
+	__le32 vdev_id;
+	__le32 desc_id;
+	__le32 chanfreq;
+	__le64 paddr;
+	__le32 frame_len;
+	__le32 buf_len;
+} __packed;
+
+struct wmi_tlv_mgmt_tx_cmd {
+	struct wmi_tlv_mgmt_tx_hdr hdr;
+	__le16 data_len;
+	__le16 data_tag;
+	u8 buf[0];
+} __packed;
+
+struct wmi_tlv_mac_addr_cmd {
+	__le32 pdev_id;
+	struct wmi_mac_addr mac_addr;
+} __packed;
 #endif
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/wow.c linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/wow.c
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/wow.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/wow.c	2019-01-22 16:16:25.431263866 +0100
@@ -17,6 +17,7 @@
 #include "mac.h"
 
 #include <net/mac80211.h>
+#include <net/addrconf.h>
 #include "hif.h"
 #include "core.h"
 #include "debug.h"
@@ -25,7 +26,9 @@
 
 static const struct wiphy_wowlan_support ath10k_wowlan_support = {
 	.flags = WIPHY_WOWLAN_DISCONNECT |
-		 WIPHY_WOWLAN_MAGIC_PKT,
+		WIPHY_WOWLAN_MAGIC_PKT |
+		WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+		WIPHY_WOWLAN_GTK_REKEY_FAILURE,
 	.pattern_min_len = WOW_MIN_PATTERN_SIZE,
 	.pattern_max_len = WOW_MAX_PATTERN_SIZE,
 	.max_pkt_offset = WOW_MAX_PKT_OFFSET,
@@ -82,6 +85,7 @@
 	int ret, i;
 	unsigned long wow_mask = 0;
 	struct ath10k *ar = arvif->ar;
+	struct ieee80211_bss_conf *bss = &arvif->vif->bss_conf;
 	const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
 	int pattern_id = 0;
 
@@ -100,6 +104,7 @@
 		__set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
 		break;
 	case WMI_VDEV_TYPE_STA:
+		if (arvif->is_up && bss->assoc) {
 		if (wowlan->disconnect) {
 			__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
 			__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
@@ -109,6 +114,9 @@
 
 		if (wowlan->magic_pkt)
 			__set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
+			if (wowlan->gtk_rekey_failure)
+				__set_bit(WOW_GTK_ERR_EVENT, &wow_mask);
+		}
 		break;
 	default:
 		break;
@@ -224,6 +232,253 @@
 	return 0;
 }
 
+static int
+ath10k_wow_fill_vdev_ns_offload_struct(struct ath10k_vif *arvif,
+				       bool enable_offload)
+{
+	struct in6_addr addr[TARGET_NUM_STATIONS];
+	struct wmi_ns_arp_offload_req *ns;
+	struct wireless_dev *wdev;
+	struct inet6_dev *in6_dev;
+	struct in6_addr addr_type;
+	struct inet6_ifaddr *ifa;
+	struct ifacaddr6 *ifaca;
+	struct list_head *addr_list;
+	u32 scope, count = 0;
+	int i;
+
+	ns = &arvif->ns_offload;
+	if (!enable_offload) {
+		ns->offload_type = __cpu_to_le16(WMI_NS_ARP_OFFLOAD);
+		ns->enable_offload = __cpu_to_le16(WMI_ARP_NS_OFFLOAD_DISABLE);
+		return 0;
+	}
+
+	wdev = ieee80211_vif_to_wdev(arvif->vif);
+	if (!wdev)
+		return -ENODEV;
+
+	in6_dev = __in6_dev_get(wdev->netdev);
+	if (!in6_dev)
+		return -ENODEV;
+
+	memset(&addr, 0, TARGET_NUM_STATIONS * sizeof(struct in6_addr));
+	memset(&addr_type, 0, sizeof(struct in6_addr));
+
+	/* Unicast Addresses */
+	read_lock_bh(&in6_dev->lock);
+	list_for_each(addr_list, &in6_dev->addr_list) {
+		if (count >= TARGET_NUM_STATIONS) {
+			read_unlock_bh(&in6_dev->lock);
+			return -EINVAL;
+		}
+
+		ifa = list_entry(addr_list, struct inet6_ifaddr, if_list);
+		if (ifa->flags & IFA_F_DADFAILED)
+			continue;
+		scope = ipv6_addr_src_scope(&ifa->addr);
+		switch (scope) {
+		case IPV6_ADDR_SCOPE_GLOBAL:
+		case IPV6_ADDR_SCOPE_LINKLOCAL:
+			memcpy(&addr[count], &ifa->addr.s6_addr,
+			       sizeof(ifa->addr.s6_addr));
+			addr_type.s6_addr[count] = IPV6_ADDR_UNICAST;
+			count += 1;
+			break;
+		}
+	}
+
+	/* Anycast Addresses */
+	for (ifaca = in6_dev->ac_list; ifaca; ifaca = ifaca->aca_next) {
+		if (count >= TARGET_NUM_STATIONS) {
+			read_unlock_bh(&in6_dev->lock);
+			return -EINVAL;
+		}
+
+		scope = ipv6_addr_src_scope(&ifaca->aca_addr);
+		switch (scope) {
+		case IPV6_ADDR_SCOPE_GLOBAL:
+		case IPV6_ADDR_SCOPE_LINKLOCAL:
+			memcpy(&addr[count], &ifaca->aca_addr,
+			       sizeof(ifaca->aca_addr));
+			addr_type.s6_addr[count] = IPV6_ADDR_ANY;
+			count += 1;
+			break;
+		}
+	}
+	read_unlock_bh(&in6_dev->lock);
+
+	/* Filling up the request structure
+	 * Filling the self_addr with solicited address
+	 * A Solicited-Node multicast address is created by
+	 * taking the last 24 bits of a unicast or anycast
+	 * address and appending them to the prefix
+	 *
+	 * FF02:0000:0000:0000:0000:0001:FFXX:XXXX
+	 *
+	 * here XX is the unicast/anycast bits
+	 */
+	for (i = 0; i < count; i++) {
+		ns->info.self_addr[i].s6_addr[0] = 0xFF;
+		ns->info.self_addr[i].s6_addr[1] = 0x02;
+		ns->info.self_addr[i].s6_addr[11] = 0x01;
+		ns->info.self_addr[i].s6_addr[12] = 0xFF;
+		ns->info.self_addr[i].s6_addr[13] = addr[i].s6_addr[13];
+		ns->info.self_addr[i].s6_addr[14] = addr[i].s6_addr[14];
+		ns->info.self_addr[i].s6_addr[15] = addr[i].s6_addr[15];
+		ns->info.slot_idx = i;
+		memcpy(&ns->info.target_addr[i], &addr[i],
+		       sizeof(struct in6_addr));
+		ns->info.target_addr_valid.s6_addr[i] = 1;
+		ns->info.target_ipv6_ac.s6_addr[i] = addr_type.s6_addr[i];
+		memcpy(&ns->params.ipv6_addr, &ns->info.target_addr[i],
+		       sizeof(struct in6_addr));
+	}
+
+	ns->offload_type = __cpu_to_le16(WMI_NS_ARP_OFFLOAD);
+	ns->enable_offload = __cpu_to_le16(WMI_ARP_NS_OFFLOAD_ENABLE);
+	ns->num_ns_offload_count = __cpu_to_le16(count);
+
+	return 0;
+}
+
+static int
+ath10k_wow_fill_vdev_arp_offload_struct(struct ath10k_vif *arvif,
+					bool enable_offload)
+{
+	struct in_device *in_dev;
+	struct in_ifaddr *ifa;
+	bool offload_params_found = false;
+	struct wireless_dev *wdev = ieee80211_vif_to_wdev(arvif->vif);
+	struct wmi_ns_arp_offload_req *arp = &arvif->arp_offload;
+
+	if (!enable_offload) {
+		arp->offload_type = __cpu_to_le16(WMI_IPV4_ARP_REPLY_OFFLOAD);
+		arp->enable_offload = __cpu_to_le16(WMI_ARP_NS_OFFLOAD_DISABLE);
+		return 0;
+	}
+
+	if (!wdev)
+		return -ENODEV;
+	if (!wdev->netdev)
+		return -ENODEV;
+	in_dev = __in_dev_get_rtnl(wdev->netdev);
+	if (!in_dev)
+		return -ENODEV;
+
+	arp->offload_type = __cpu_to_le16(WMI_IPV4_ARP_REPLY_OFFLOAD);
+	arp->enable_offload = __cpu_to_le16(WMI_ARP_NS_OFFLOAD_ENABLE);
+	for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
+		if (!memcmp(ifa->ifa_label, wdev->netdev->name, IFNAMSIZ)) {
+			offload_params_found = true;
+			break;
+		}
+	}
+
+	if (!offload_params_found)
+		return -ENODEV;
+	memcpy(&arp->params.ipv4_addr, &ifa->ifa_local,
+	       sizeof(arp->params.ipv4_addr));
+
+	return 0;
+}
+
+static int ath10k_wow_enable_ns_arp_offload(struct ath10k *ar, bool offload)
+{
+	struct ath10k_vif *arvif;
+	int ret;
+
+	list_for_each_entry(arvif, &ar->arvifs, list) {
+		if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+			continue;
+
+		if (!arvif->is_up)
+			continue;
+
+		ret = ath10k_wow_fill_vdev_arp_offload_struct(arvif, offload);
+		if (ret) {
+			ath10k_err(ar, "ARP-offload config failed, vdev: %d\n",
+				   arvif->vdev_id);
+			return ret;
+		}
+
+		ret = ath10k_wow_fill_vdev_ns_offload_struct(arvif, offload);
+		if (ret) {
+			ath10k_err(ar, "NS-offload config failed, vdev: %d\n",
+				   arvif->vdev_id);
+			return ret;
+		}
+
+		ret = ath10k_wmi_set_arp_ns_offload(ar, arvif);
+		if (ret) {
+			ath10k_err(ar, "failed to send offload cmd, vdev: %d\n",
+				   arvif->vdev_id);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int ath10k_config_wow_listen_interval(struct ath10k *ar)
+{
+	int ret;
+	u32 param = ar->wmi.vdev_param->listen_interval;
+	u8 listen_interval = ar->hw_values->default_listen_interval;
+	struct ath10k_vif *arvif;
+
+	if (!listen_interval)
+		return 0;
+
+	list_for_each_entry(arvif, &ar->arvifs, list) {
+		if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+			continue;
+		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+						param, listen_interval);
+		if (ret) {
+			ath10k_err(ar, "failed to config LI for vdev_id: %d\n",
+				   arvif->vdev_id);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int ath10k_wow_config_gtk_offload(struct ath10k *ar, bool gtk_offload)
+{
+	struct ath10k_vif *arvif;
+	struct ieee80211_bss_conf *bss;
+	struct wmi_gtk_rekey_data *rekey_data;
+	int ret;
+
+	list_for_each_entry(arvif, &ar->arvifs, list) {
+		if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+			continue;
+
+		bss = &arvif->vif->bss_conf;
+		if (!arvif->is_up || !bss->assoc)
+			continue;
+
+		rekey_data = &arvif->gtk_rekey_data;
+		if (!rekey_data->valid)
+			continue;
+
+		if (gtk_offload)
+			rekey_data->enable_offload = WMI_GTK_OFFLOAD_ENABLE;
+		else
+			rekey_data->enable_offload = WMI_GTK_OFFLOAD_DISABLE;
+		ret = ath10k_wmi_gtk_offload(ar, arvif);
+		if (ret) {
+			ath10k_err(ar, "GTK offload failed for vdev_id: %d\n",
+				   arvif->vdev_id);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
 int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
 			  struct cfg80211_wowlan *wowlan)
 {
@@ -233,7 +488,7 @@
 	mutex_lock(&ar->conf_mutex);
 
 	if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
-			      ar->fw_features))) {
+			      ar->running_fw->fw_file.fw_features))) {
 		ret = 1;
 		goto exit;
 	}
@@ -245,10 +500,29 @@
 		goto exit;
 	}
 
+	ret = ath10k_wow_config_gtk_offload(ar, true);
+	if (ret) {
+		ath10k_warn(ar, "failed to enable GTK offload: %d\n", ret);
+		goto exit;
+	}
+
+	ret = ath10k_wow_enable_ns_arp_offload(ar, true);
+	if (ret) {
+		ath10k_warn(ar, "failed to enable ARP-NS offload: %d\n", ret);
+		goto disable_gtk_offload;
+	}
+
 	ret = ath10k_wow_set_wakeups(ar, wowlan);
 	if (ret) {
 		ath10k_warn(ar, "failed to set wow wakeup events: %d\n",
 			    ret);
+		goto disable_ns_arp_offload;
+	}
+
+	ret = ath10k_config_wow_listen_interval(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to config wow listen interval: %d\n",
+			    ret);
 		goto cleanup;
 	}
 
@@ -272,11 +546,68 @@
 cleanup:
 	ath10k_wow_cleanup(ar);
 
+disable_ns_arp_offload:
+	ath10k_wow_enable_ns_arp_offload(ar, false);
+
+disable_gtk_offload:
+	ath10k_wow_config_gtk_offload(ar, false);
 exit:
 	mutex_unlock(&ar->conf_mutex);
 	return ret ? 1 : 0;
 }
 
+void ath10k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+	struct ath10k *ar = hw->priv;
+
+	mutex_lock(&ar->conf_mutex);
+	if (test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
+		     ar->running_fw->fw_file.fw_features)) {
+		device_set_wakeup_enable(ar->dev, enabled);
+	}
+	mutex_unlock(&ar->conf_mutex);
+}
+
+static void ath10k_wow_op_report_wakeup_reason(struct ath10k *ar)
+{
+	struct cfg80211_wowlan_wakeup *wakeup = &ar->wow.wakeup;
+	struct ath10k_vif *arvif;
+
+	memset(wakeup, 0, sizeof(struct cfg80211_wowlan_wakeup));
+	switch (ar->wow.wakeup_reason) {
+	case WOW_REASON_UNSPECIFIED:
+		wakeup = NULL;
+		break;
+	case WOW_REASON_RECV_MAGIC_PATTERN:
+		wakeup->magic_pkt = true;
+		break;
+	case WOW_REASON_DEAUTH_RECVD:
+	case WOW_REASON_DISASSOC_RECVD:
+	case WOW_REASON_AP_ASSOC_LOST:
+	case WOW_REASON_CSA_EVENT:
+		wakeup->disconnect = true;
+		break;
+	case WOW_REASON_GTK_HS_ERR:
+		wakeup->gtk_rekey_failure = true;
+		break;
+	}
+	ar->wow.wakeup_reason = WOW_REASON_UNSPECIFIED;
+
+	if (wakeup) {
+		wakeup->pattern_idx = -1;
+		list_for_each_entry(arvif, &ar->arvifs, list) {
+			ieee80211_report_wowlan_wakeup(arvif->vif,
+						       wakeup, GFP_KERNEL);
+			if (wakeup->disconnect)
+				ieee80211_resume_disconnect(arvif->vif);
+		}
+	} else {
+		list_for_each_entry(arvif, &ar->arvifs, list)
+			ieee80211_report_wowlan_wakeup(arvif->vif,
+						       NULL, GFP_KERNEL);
+	}
+}
+
 int ath10k_wow_op_resume(struct ieee80211_hw *hw)
 {
 	struct ath10k *ar = hw->priv;
@@ -285,7 +616,7 @@
 	mutex_lock(&ar->conf_mutex);
 
 	if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
-			      ar->fw_features))) {
+			      ar->running_fw->fw_file.fw_features))) {
 		ret = 1;
 		goto exit;
 	}
@@ -297,8 +628,20 @@
 	}
 
 	ret = ath10k_wow_wakeup(ar);
-	if (ret)
+	if (ret) {
 		ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret);
+		goto exit;
+	}
+
+	ret = ath10k_wow_enable_ns_arp_offload(ar, false);
+	if (ret) {
+		ath10k_warn(ar, "failed to disable ARP-NS offload: %d\n", ret);
+		goto exit;
+	}
+
+	ret = ath10k_wow_config_gtk_offload(ar, false);
+	if (ret)
+		ath10k_warn(ar, "failed to disable GTK offload: %d\n", ret);
 
 exit:
 	if (ret) {
@@ -319,13 +662,15 @@
 		}
 	}
 
+	ath10k_wow_op_report_wakeup_reason(ar);
 	mutex_unlock(&ar->conf_mutex);
 	return ret;
 }
 
 int ath10k_wow_init(struct ath10k *ar)
 {
-	if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, ar->fw_features))
+	if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
+		      ar->running_fw->fw_file.fw_features))
 		return 0;
 
 	if (WARN_ON(!test_bit(WMI_SERVICE_WOW, ar->wmi.svc_map)))
@@ -334,6 +679,15 @@
 	ar->wow.wowlan_support = ath10k_wowlan_support;
 	ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
 	ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
+	device_init_wakeup(ar->dev, true);
 
 	return 0;
 }
+
+void ath10k_wow_deinit(struct ath10k *ar)
+{
+	if (test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
+		     ar->running_fw->fw_file.fw_features) &&
+		test_bit(WMI_SERVICE_WOW, ar->wmi.svc_map))
+		device_init_wakeup(ar->dev, false);
+}
diff -ruw linux-4.4.115/drivers/net/wireless/ath/ath10k/wow.h linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/wow.h
--- linux-4.4.115/drivers/net/wireless/ath/ath10k/wow.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/wow.h	2019-01-22 16:16:25.431263866 +0100
@@ -17,18 +17,21 @@
 #define _WOW_H_
 
 struct ath10k_wow {
+	u32 wakeup_reason;
 	u32 max_num_patterns;
 	struct completion wakeup_completed;
+	struct cfg80211_wowlan_wakeup wakeup;
 	struct wiphy_wowlan_support wowlan_support;
 };
 
 #ifdef CONFIG_PM
 
 int ath10k_wow_init(struct ath10k *ar);
+void ath10k_wow_deinit(struct ath10k *ar);
 int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
 			  struct cfg80211_wowlan *wowlan);
 int ath10k_wow_op_resume(struct ieee80211_hw *hw);
-
+void ath10k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled);
 #else
 
 static inline int ath10k_wow_init(struct ath10k *ar)
@@ -36,5 +39,8 @@
 	return 0;
 }
 
+void ath10k_wow_deinit(struct ath10k *ar)
+{
+}
 #endif /* CONFIG_PM */
 #endif /* _WOW_H_ */
diff -ruw linux-4.4.115/drivers/net/wireless/ath/dfs_pattern_detector.c linux-4.4.115-fbx/drivers/net/wireless/ath/dfs_pattern_detector.c
--- linux-4.4.115/drivers/net/wireless/ath/dfs_pattern_detector.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/dfs_pattern_detector.c	2019-10-29 09:26:24.473211301 +0100
@@ -37,6 +37,11 @@
 #define MIN_PPB_THRESH	50
 #define PPB_THRESH_RATE(PPB, RATE) ((PPB * RATE + 100 - RATE) / 100)
 #define PPB_THRESH(PPB) PPB_THRESH_RATE(PPB, MIN_PPB_THRESH)
+/* percentage on ppb threshold, for ETSI type4, to trigger detection */
+#define MIN_PPB_THRESH_TYPE4	30
+#define PPB_THRESH_RATE_TYPE4(PPB, RATE) ((PPB * RATE + 100 - RATE) / 100)
+#define PPB_THRESH_TYPE4(PPB) PPB_THRESH_RATE_TYPE4(PPB, MIN_PPB_THRESH_TYPE4)
+
 #define PRF2PRI(PRF) ((1000000 + PRF / 2) / PRF)
 /* percentage of pulse width tolerance */
 #define WIDTH_TOLERANCE 5
@@ -51,13 +56,21 @@
 	PPB_THRESH(PPB), PRI_TOLERANCE,	CHIRP			\
 }
 
+#define ETSI_PATTERN_TYPE4(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB, CHIRP)	\
+{								\
+	ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX),		\
+	(PRF2PRI(PMAX) - PRI_TOLERANCE),			\
+	(PRF2PRI(PMIN) * PRF + PRI_TOLERANCE), PRF, PPB * PRF,	\
+	PPB_THRESH_TYPE4(PPB), PRI_TOLERANCE,	CHIRP			\
+}
+
 /* radar types as defined by ETSI EN-301-893 v1.5.1 */
 static const struct radar_detector_specs etsi_radar_ref_types_v15[] = {
 	ETSI_PATTERN(0,  0,  1,  700,  700, 1, 18, false),
 	ETSI_PATTERN(1,  0,  5,  200, 1000, 1, 10, false),
 	ETSI_PATTERN(2,  0, 15,  200, 1600, 1, 15, false),
 	ETSI_PATTERN(3,  0, 15, 2300, 4000, 1, 25, false),
-	ETSI_PATTERN(4, 20, 30, 2000, 4000, 1, 20, false),
+	ETSI_PATTERN_TYPE4(4, 20, 30, 2000, 4000, 1, 20, false),
 	ETSI_PATTERN(5,  0,  2,  300,  400, 3, 10, false),
 	ETSI_PATTERN(6,  0,  2,  400, 1200, 3, 15, false),
 };
diff -ruw linux-4.4.115/drivers/net/wireless/ath/regd.c linux-4.4.115-fbx/drivers/net/wireless/ath/regd.c
--- linux-4.4.115/drivers/net/wireless/ath/regd.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/regd.c	2019-10-29 09:26:24.473211301 +0100
@@ -38,28 +38,28 @@
 /* We enable active scan on these a case by case basis by regulatory domain */
 #define ATH9K_2GHZ_CH12_13	REG_RULE(2467-10, 2472+10, 40, 0, 20,\
 					 NL80211_RRF_NO_IR)
-#define ATH9K_2GHZ_CH14		REG_RULE(2484-10, 2484+10, 40, 0, 20,\
+#define ATH9K_2GHZ_CH14		REG_RULE(2484 - 10, 2484 + 10, 20, 0, 20,\
 					 NL80211_RRF_NO_IR | \
 					 NL80211_RRF_NO_OFDM)
 
 /* We allow IBSS on these on a case by case basis by regulatory domain */
-#define ATH9K_5GHZ_5150_5350	REG_RULE(5150-10, 5350+10, 80, 0, 30,\
+#define ATH9K_5GHZ_5180_5320	REG_RULE(5180 - 10, 5320 + 10, 160, 0, 20,\
 					 NL80211_RRF_NO_IR)
-#define ATH9K_5GHZ_5470_5850	REG_RULE(5470-10, 5850+10, 80, 0, 30,\
+#define ATH9K_5GHZ_5500_5825	REG_RULE(5500 - 10, 5825 + 10, 80, 0, 20,\
 					 NL80211_RRF_NO_IR)
-#define ATH9K_5GHZ_5725_5850	REG_RULE(5725-10, 5850+10, 80, 0, 30,\
+#define ATH9K_5GHZ_5745_5825	REG_RULE(5745 - 10, 5825 + 10, 80, 0, 20,\
 					 NL80211_RRF_NO_IR)
 
 #define ATH9K_2GHZ_ALL		ATH9K_2GHZ_CH01_11, \
 				ATH9K_2GHZ_CH12_13, \
 				ATH9K_2GHZ_CH14
 
-#define ATH9K_5GHZ_ALL		ATH9K_5GHZ_5150_5350, \
-				ATH9K_5GHZ_5470_5850
+#define ATH9K_5GHZ_ALL		ATH9K_5GHZ_5180_5320, \
+				ATH9K_5GHZ_5500_5825
 
 /* This one skips what we call "mid band" */
-#define ATH9K_5GHZ_NO_MIDBAND	ATH9K_5GHZ_5150_5350, \
-				ATH9K_5GHZ_5725_5850
+#define ATH9K_5GHZ_NO_MIDBAND	ATH9K_5GHZ_5180_5320, \
+				ATH9K_5GHZ_5745_5825
 
 /* Can be used for:
  * 0x60, 0x61, 0x62 */
@@ -256,7 +256,7 @@
 /* Frequency is one where radar detection is required */
 static bool ath_is_radar_freq(u16 center_freq)
 {
-	return (center_freq >= 5260 && center_freq <= 5700);
+	return (center_freq >= 5260 && center_freq <= 5720);
 }
 
 static void ath_force_clear_no_ir_chan(struct wiphy *wiphy,
@@ -631,6 +631,8 @@
 					 struct regulatory_request *request))
 {
 	const struct ieee80211_regdomain *regd;
+	u32 chan_num;
+	struct ieee80211_channel *chan;
 
 	wiphy->reg_notifier = reg_notifier;
 	wiphy->regulatory_flags |= REGULATORY_STRICT_REG |
@@ -653,6 +655,20 @@
 	}
 
 	wiphy_apply_custom_regulatory(wiphy, regd);
+
+	/* For regulatory rules similar to the following:
+	 * REG_RULE(2412-10, 2462+10, 40, 0, 20, 0), channels 12/13 are enabled
+	 * due to support of 5/10 MHz.
+	 * Therefore, disable 2.4 Ghz channels that dont have 20 mhz bw
+	 */
+	for (chan_num = 0;
+	     chan_num < wiphy->bands[IEEE80211_BAND_2GHZ]->n_channels;
+	     chan_num++) {
+		chan = &wiphy->bands[IEEE80211_BAND_2GHZ]->channels[chan_num];
+		if (chan->flags & IEEE80211_CHAN_NO_20MHZ)
+			chan->flags |= IEEE80211_CHAN_DISABLED;
+	}
+
 	ath_reg_apply_radar_flags(wiphy);
 	ath_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg);
 	return 0;
diff -ruw linux-4.4.115/drivers/net/wireless/ath/wil6210/Kconfig linux-4.4.115-fbx/drivers/net/wireless/ath/wil6210/Kconfig
--- linux-4.4.115/drivers/net/wireless/ath/wil6210/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/wil6210/Kconfig	2019-01-22 16:16:25.491264409 +0100
@@ -40,3 +40,35 @@
 	  option if you are interested in debugging the driver.
 
 	  If unsure, say Y to make it easier to debug problems.
+
+config WIL6210_WRITE_IOCTL
+	bool "wil6210 write ioctl to the device"
+	depends on WIL6210
+	default y
+	---help---
+	  Say Y here to allow write-access from user-space to
+	  the device memory through ioctl. This is useful for
+	  debugging purposes only.
+
+	  If unsure, say N.
+
+config WIL6210_PLATFORM_MSM
+	bool "wil6210 MSM platform specific support"
+	depends on WIL6210
+	depends on ARCH_QCOM
+	default y
+	---help---
+	  Say Y here to enable wil6210 driver support for MSM
+	  platform specific features
+
+config WIL6210_DEBUGFS
+	bool "wil6210 debugfs support"
+	depends on WIL6210
+	depends on DEBUG_FS
+	default y
+	---help---
+	  Say Y here to enable wil6210 debugfs support, using the
+	  kernel debugfs infrastructure. Select this
+	  option if you are interested in debugging the driver.
+
+	  If unsure, say Y to make it easier to debug problems.
diff -ruw linux-4.4.115/drivers/net/wireless/ath/wil6210/Makefile linux-4.4.115-fbx/drivers/net/wireless/ath/wil6210/Makefile
--- linux-4.4.115/drivers/net/wireless/ath/wil6210/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/wil6210/Makefile	2019-01-22 16:16:25.491264409 +0100
@@ -4,7 +4,8 @@
 wil6210-y += netdev.o
 wil6210-y += cfg80211.o
 wil6210-y += pcie_bus.o
-wil6210-y += debugfs.o
+wil6210-$(CONFIG_WIL6210_DEBUGFS) += debugfs.o
+wil6210-y += sysfs.o
 wil6210-y += wmi.o
 wil6210-y += interrupt.o
 wil6210-y += txrx.o
@@ -18,8 +19,13 @@
 wil6210-y += wil_platform.o
 wil6210-y += ethtool.o
 wil6210-y += wil_crash_dump.o
+wil6210-y += p2p.o
+wil6210-y += ftm.o
 
 # for tracing framework to find trace.h
 CFLAGS_trace.o := -I$(src)
 
 subdir-ccflags-y += -D__CHECK_ENDIAN__
+
+MSM_11AD_PATH = drivers/platform/msm/msm_11ad
+CFLAGS_wil_platform.o := -I$(MSM_11AD_PATH)
diff -ruw linux-4.4.115/drivers/net/wireless/Kconfig linux-4.4.115-fbx/drivers/net/wireless/Kconfig
--- linux-4.4.115/drivers/net/wireless/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/Kconfig	2019-10-29 09:26:24.453211105 +0100
@@ -265,6 +265,60 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called mwl8k.  If unsure, say N.
 
+config WIFI_CONTROL_FUNC
+	bool "Enable WiFi control function abstraction"
+	help
+	  Enables Power/Reset/Carddetect function abstraction
+
+config WCNSS_CORE
+	tristate "Qualcomm WCNSS CORE driver"
+	select WIRELESS_EXT
+	select WEXT_PRIV
+	select WEXT_CORE
+	select WEXT_SPY
+	---help---
+	  Core driver for the Qualcomm WCNSS triple play connectivity subsystem
+
+config WCNSS_CORE_PRONTO
+	tristate "Qualcomm WCNSS Pronto Support"
+	depends on WCNSS_CORE
+	---help---
+	  Pronto Support for the Qualcomm WCNSS triple play connectivity subsystem
+
+config WCNSS_REGISTER_DUMP_ON_BITE
+	bool "Enable/disable WCNSS register dump when there is a WCNSS bite"
+	depends on WCNSS_CORE_PRONTO
+	---help---
+	  When Apps recieves a WDOG bite from WCNSS, collecting a register dump
+	  of WCNSS is helpful to root cause the failure. WCNSS may not be
+	  properly clocked in some WCNSS bite cases, and that may cause unclocked
+	  register access failures. So this feature is to enable/disable the
+	  register dump on WCNSS WDOG bite.
+
+config WCNSS_MEM_PRE_ALLOC
+	tristate "WCNSS pre-alloc memory support"
+	---help---
+	  Pre-allocate memory for the WLAN driver module.
+	  This feature enable cld wlan driver to use pre allocated memory
+	  for it's internal usage and release it to back to pre allocated pool.
+	  This memory is allocated at the cold boot time.
+
+config CNSS_CRYPTO
+	tristate "Enable CNSS crypto support"
+	---help---
+	  Add crypto support for the WLAN  driver module.
+	  This feature enable wlan driver to use the crypto APIs exported
+	  from cnss platform driver. This crypto APIs used to generate cipher
+	  key and add support for the WLAN driver module security protocol.
+
+config CNSS_QCA6290
+	bool "Enable CNSS QCA6290 chipset specific changes"
+	---help---
+	  This enables the changes from WLAN host driver that are specific to
+	  CNSS QCA6290 chipset.
+	  These changes are needed to support the new hardware architecture
+	  for CNSS QCA6290 chipset.
+
 source "drivers/net/wireless/ath/Kconfig"
 source "drivers/net/wireless/b43/Kconfig"
 source "drivers/net/wireless/b43legacy/Kconfig"
@@ -285,5 +339,9 @@
 source "drivers/net/wireless/mwifiex/Kconfig"
 source "drivers/net/wireless/cw1200/Kconfig"
 source "drivers/net/wireless/rsi/Kconfig"
+source "drivers/net/wireless/cnss/Kconfig"
+source "drivers/net/wireless/cnss2/Kconfig"
+source "drivers/net/wireless/cnss_genl/Kconfig"
+source "drivers/net/wireless/cnss_utils/Kconfig"
 
 endif # WLAN
diff -ruw linux-4.4.115/drivers/net/wireless/Makefile linux-4.4.115-fbx/drivers/net/wireless/Makefile
--- linux-4.4.115/drivers/net/wireless/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/Makefile	2019-10-29 09:26:24.453211105 +0100
@@ -60,3 +60,12 @@
 
 obj-$(CONFIG_CW1200)	+= cw1200/
 obj-$(CONFIG_RSI_91X)	+= rsi/
+
+obj-$(CONFIG_WCNSS_CORE)	+= wcnss/
+
+obj-$(CONFIG_CNSS)	+= cnss/
+obj-$(CONFIG_CNSS2)	+= cnss2/
+obj-$(CONFIG_WCNSS_MEM_PRE_ALLOC) += cnss_prealloc/
+obj-$(CONFIG_CNSS_CRYPTO)	+= cnss_crypto/
+obj-$(CONFIG_CNSS_GENL) += cnss_genl/
+obj-$(CONFIG_CNSS_UTILS) += cnss_utils/
diff -ruw linux-4.4.115/drivers/nfc/Kconfig linux-4.4.115-fbx/drivers/nfc/Kconfig
--- linux-4.4.115/drivers/nfc/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/nfc/Kconfig	2019-01-22 16:16:25.999269009 +0100
@@ -77,3 +77,11 @@
 source "drivers/nfc/nxp-nci/Kconfig"
 source "drivers/nfc/s3fwrn5/Kconfig"
 endmenu
+
+config NFC_NQ
+	tristate "QTI NCI based NFC Controller Driver for NQx"
+	depends on I2C
+	help
+	  This enables the NFC driver for NQx based devices.
+	  This is for i2c connected version. NCI protocol logic
+	  resides in the usermode and it has no other NFC dependencies.
diff -ruw linux-4.4.115/drivers/nfc/Makefile linux-4.4.115-fbx/drivers/nfc/Makefile
--- linux-4.4.115/drivers/nfc/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/nfc/Makefile	2019-01-22 16:16:25.999269009 +0100
@@ -16,3 +16,4 @@
 obj-$(CONFIG_NFC_ST_NCI)	+= st-nci/
 obj-$(CONFIG_NFC_NXP_NCI)	+= nxp-nci/
 obj-$(CONFIG_NFC_S3FWRN5)	+= s3fwrn5/
+obj-$(CONFIG_NFC_NQ)		+= nq-nci.o
diff -ruw linux-4.4.115/drivers/of/address.c linux-4.4.115-fbx/drivers/of/address.c
--- linux-4.4.115/drivers/of/address.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/of/address.c	2019-01-22 16:16:26.015269154 +0100
@@ -788,6 +788,22 @@
 #endif
 }
 
+const __be32 *of_get_address_by_name(struct device_node *dev, const char *name,
+		u64 *size, unsigned int *flags)
+{
+	int index;
+	if (!name)
+		return NULL;
+
+	/* Try to read "reg-names" property and get the index by name */
+	index = of_property_match_string(dev, "reg-names", name);
+	if (index < 0)
+		return NULL;
+
+	return of_get_address(dev, index, size, flags);
+}
+EXPORT_SYMBOL(of_get_address_by_name);
+
 static int __of_address_to_resource(struct device_node *dev,
 		const __be32 *addrp, u64 size, unsigned int flags,
 		const char *name, struct resource *r)
@@ -1026,3 +1042,19 @@
 	return false;
 }
 EXPORT_SYMBOL_GPL(of_dma_is_coherent);
+
+void __iomem *of_iomap_by_name(struct device_node *np, const char *name)
+{
+	int index;
+
+	if (!name)
+		return NULL;
+
+	/* Try to read "reg-names" property and get the index by name */
+	index = of_property_match_string(np, "reg-names", name);
+	if (index < 0)
+		return NULL;
+
+	return of_iomap(np, index);
+}
+EXPORT_SYMBOL(of_iomap_by_name);
diff -ruw linux-4.4.115/drivers/of/fdt.c linux-4.4.115-fbx/drivers/of/fdt.c
--- linux-4.4.115/drivers/of/fdt.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/of/fdt.c	2019-01-22 16:16:26.015269154 +0100
@@ -496,7 +496,7 @@
 
 		if (size &&
 		    early_init_dt_reserve_memory_arch(base, size, nomap) == 0)
-			pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %ld MiB\n",
+			pr_info("Reserved memory: reserved region for node '%s': base %pa, size %ld MiB\n",
 				uname, &base, (unsigned long)size / SZ_1M);
 		else
 			pr_info("Reserved memory: failed to reserve memory for node '%s': base %pa, size %ld MiB\n",
@@ -763,6 +763,16 @@
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
+#ifndef __early_init_dt_declare_initrd
+static void __early_init_dt_declare_initrd(unsigned long start,
+					   unsigned long end)
+{
+	initrd_start = (unsigned long)__va(start);
+	initrd_end = (unsigned long)__va(end);
+	initrd_below_start_ok = 1;
+}
+#endif
+
 /**
  * early_init_dt_check_for_initrd - Decode initrd location from flat tree
  * @node: reference to node containing initrd location ('chosen')
@@ -785,9 +795,7 @@
 		return;
 	end = of_read_number(prop, len/4);
 
-	initrd_start = (unsigned long)__va(start);
-	initrd_end = (unsigned long)__va(end);
-	initrd_below_start_ok = 1;
+	__early_init_dt_declare_initrd(start, end);
 
 	pr_debug("initrd_start=0x%llx  initrd_end=0x%llx\n",
 		 (unsigned long long)start, (unsigned long long)end);
@@ -799,14 +807,13 @@
 #endif /* CONFIG_BLK_DEV_INITRD */
 
 #ifdef CONFIG_SERIAL_EARLYCON
-extern struct of_device_id __earlycon_of_table[];
 
 static int __init early_init_dt_scan_chosen_serial(void)
 {
 	int offset;
 	const char *p;
 	int l;
-	const struct of_device_id *match = __earlycon_of_table;
+	const struct earlycon_id *match;
 	const void *fdt = initial_boot_params;
 
 	offset = fdt_path_offset(fdt, "/chosen");
@@ -829,19 +836,20 @@
 	if (offset < 0)
 		return -ENODEV;
 
-	while (match->compatible[0]) {
+	for (match = __earlycon_table; match < __earlycon_table_end; match++) {
 		u64 addr;
 
-		if (fdt_node_check_compatible(fdt, offset, match->compatible)) {
-			match++;
+		if (!match->compatible[0])
+			continue;
+
+		if (fdt_node_check_compatible(fdt, offset, match->compatible))
 			continue;
-		}
 
 		addr = fdt_translate_address(fdt, offset);
 		if (addr == OF_BAD_ADDR)
 			return -ENXIO;
 
-		of_setup_earlycon(addr, match->data);
+		of_setup_earlycon(addr, match->setup);
 		return 0;
 	}
 	return -ENODEV;
@@ -941,36 +949,66 @@
 	return 0;
 }
 
+/*
+ * Convert configs to something easy to use in C code
+ */
+#if defined(CONFIG_CMDLINE_FORCE)
+static const int overwrite_incoming_cmdline = 1;
+static const int read_dt_cmdline;
+static const int concat_cmdline;
+#elif defined(CONFIG_CMDLINE_EXTEND)
+static const int overwrite_incoming_cmdline;
+static const int read_dt_cmdline = 1;
+static const int concat_cmdline = 1;
+#else /* CMDLINE_FROM_BOOTLOADER */
+static const int overwrite_incoming_cmdline;
+static const int read_dt_cmdline = 1;
+static const int concat_cmdline;
+#endif
+
+#ifdef CONFIG_CMDLINE
+static const char *config_cmdline = CONFIG_CMDLINE;
+#else
+static const char *config_cmdline = "";
+#endif
+
 int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
 				     int depth, void *data)
 {
-	int l;
-	const char *p;
+	int l = 0;
+	const char *p = NULL;
+	char *cmdline = data;
 
 	pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
 
-	if (depth != 1 || !data ||
+	if (depth != 1 || !cmdline ||
 	    (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
 		return 0;
 
 	early_init_dt_check_for_initrd(node);
 
-	/* Retrieve command line */
+	/* Put CONFIG_CMDLINE in if forced or if data had nothing in it to start */
+	if (overwrite_incoming_cmdline || !cmdline[0])
+		strlcpy(cmdline, config_cmdline, COMMAND_LINE_SIZE);
+
+	/* Retrieve command line unless forcing */
+	if (read_dt_cmdline)
 	p = of_get_flat_dt_prop(node, "bootargs", &l);
-	if (p != NULL && l > 0)
-		strlcpy(data, p, min((int)l, COMMAND_LINE_SIZE));
 
-	/*
-	 * CONFIG_CMDLINE is meant to be a default in case nothing else
-	 * managed to set the command line, unless CONFIG_CMDLINE_FORCE
-	 * is set in which case we override whatever was found earlier.
-	 */
-#ifdef CONFIG_CMDLINE
-#ifndef CONFIG_CMDLINE_FORCE
-	if (!((char *)data)[0])
-#endif
-		strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
-#endif /* CONFIG_CMDLINE */
+	if (p != NULL && l > 0) {
+		if (concat_cmdline) {
+			int cmdline_len;
+			int copy_len;
+			strlcat(cmdline, " ", COMMAND_LINE_SIZE);
+			cmdline_len = strlen(cmdline);
+			copy_len = COMMAND_LINE_SIZE - cmdline_len - 1;
+			copy_len = min((int)l, copy_len);
+			strncpy(cmdline + cmdline_len, p, copy_len);
+			cmdline[cmdline_len + copy_len] = '\0';
+		} else {
+			strlcpy(cmdline, p, min((int)l, COMMAND_LINE_SIZE));
+		}
+	}
 
 	pr_debug("Command line is: %s\n", (char*)data);
 
@@ -979,13 +1017,16 @@
 }
 
 #ifdef CONFIG_HAVE_MEMBLOCK
+#ifndef MIN_MEMBLOCK_ADDR
+#define MIN_MEMBLOCK_ADDR	__pa(PAGE_OFFSET)
+#endif
 #ifndef MAX_MEMBLOCK_ADDR
 #define MAX_MEMBLOCK_ADDR	((phys_addr_t)~0)
 #endif
 
 void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
 {
-	const u64 phys_offset = __pa(PAGE_OFFSET);
+	const u64 phys_offset = MIN_MEMBLOCK_ADDR;
 
 	if (!PAGE_ALIGNED(base)) {
 		if (size < PAGE_SIZE - (base & ~PAGE_MASK)) {
diff -ruw linux-4.4.115/drivers/of/Kconfig linux-4.4.115-fbx/drivers/of/Kconfig
--- linux-4.4.115/drivers/of/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/of/Kconfig	2019-01-22 16:16:26.015269154 +0100
@@ -102,6 +102,12 @@
 config OF_RESOLVE
 	bool
 
+config OF_SLIMBUS
+	def_tristate SLIMBUS
+	depends on SLIMBUS
+	help
+	  OpenFirmware SLIMBUS accessors
+
 config OF_OVERLAY
 	bool "Device Tree overlays"
 	select OF_DYNAMIC
@@ -112,4 +118,9 @@
 	  While this option is selected automatically when needed, you can
 	  enable it manually to improve device tree unit test coverage.
 
+config OF_BATTERYDATA
+	def_bool y
+	help
+	  OpenFirmware BatteryData accessors
+
 endif # OF
diff -ruw linux-4.4.115/drivers/of/Makefile linux-4.4.115-fbx/drivers/of/Makefile
--- linux-4.4.115/drivers/of/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/of/Makefile	2019-01-22 16:16:26.015269154 +0100
@@ -14,5 +14,7 @@
 obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o
 obj-$(CONFIG_OF_RESOLVE)  += resolver.o
 obj-$(CONFIG_OF_OVERLAY) += overlay.o
+obj-$(CONFIG_OF_SLIMBUS)        += of_slimbus.o
+obj-$(CONFIG_OF_BATTERYDATA) += of_batterydata.o
 
 obj-$(CONFIG_OF_UNITTEST) += unittest-data/
diff -ruw linux-4.4.115/drivers/of/of_reserved_mem.c linux-4.4.115-fbx/drivers/of/of_reserved_mem.c
--- linux-4.4.115/drivers/of/of_reserved_mem.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/of/of_reserved_mem.c	2019-01-22 16:16:26.019269190 +0100
@@ -153,7 +153,7 @@
 			ret = early_init_dt_alloc_reserved_memory_arch(size,
 					align, start, end, nomap, &base);
 			if (ret == 0) {
-				pr_debug("Reserved memory: allocated memory for '%s' node: base %pa, size %ld MiB\n",
+				pr_info("Reserved memory: allocated memory for '%s' node: base %pa, size %ld MiB\n",
 					uname, &base,
 					(unsigned long)size / SZ_1M);
 				break;
@@ -165,7 +165,7 @@
 		ret = early_init_dt_alloc_reserved_memory_arch(size, align,
 							0, 0, nomap, &base);
 		if (ret == 0)
-			pr_debug("Reserved memory: allocated memory for '%s' node: base %pa, size %ld MiB\n",
+			pr_info("Reserved memory: allocated memory for '%s' node: base %pa, size %ld MiB\n",
 				uname, &base, (unsigned long)size / SZ_1M);
 	}
 
diff -ruw linux-4.4.115/drivers/of/platform.c linux-4.4.115-fbx/drivers/of/platform.c
--- linux-4.4.115/drivers/of/platform.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/of/platform.c	2019-01-22 16:16:26.019269190 +0100
@@ -21,6 +21,7 @@
 #include <linux/of_device.h>
 #include <linux/of_irq.h>
 #include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
 #include <linux/platform_device.h>
 
 const struct of_device_id of_default_bus_match_table[] = {
@@ -139,7 +140,7 @@
 	}
 
 	dev->dev.of_node = of_node_get(np);
-	dev->dev.parent = parent ? : &platform_bus;
+	dev->dev.parent = parent;
 
 	if (bus_id)
 		dev_set_name(&dev->dev, "%s", bus_id);
@@ -185,6 +186,7 @@
 	dev->dev.platform_data = platform_data;
 	of_dma_configure(&dev->dev, dev->dev.of_node);
 	of_msi_configure(&dev->dev, dev->dev.of_node);
+	of_reserved_mem_device_init(&dev->dev);
 
 	if (of_device_add(dev) != 0) {
 		of_dma_deconfigure(&dev->dev);
@@ -241,7 +243,7 @@
 
 	/* setup generic device info */
 	dev->dev.of_node = of_node_get(node);
-	dev->dev.parent = parent ? : &platform_bus;
+	dev->dev.parent = parent;
 	dev->dev.platform_data = platform_data;
 	if (bus_id)
 		dev_set_name(&dev->dev, "%s", bus_id);
diff -ruw linux-4.4.115/drivers/pci/host/Makefile linux-4.4.115-fbx/drivers/pci/host/Makefile
--- linux-4.4.115/drivers/pci/host/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/pci/host/Makefile	2019-01-22 16:16:26.031269299 +0100
@@ -4,6 +4,7 @@
 obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
 obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
 obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
+obj-$(CONFIG_PCI_MSM)	+= pci-msm.o
 obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
 obj-$(CONFIG_PCI_RCAR_GEN2_PCIE) += pcie-rcar.o
 obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
diff -ruw linux-4.4.115/drivers/pci/Kconfig linux-4.4.115-fbx/drivers/pci/Kconfig
--- linux-4.4.115/drivers/pci/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/pci/Kconfig	2019-01-22 16:16:26.031269299 +0100
@@ -114,6 +114,20 @@
 
 	  If unsure, say N.
 
+config PCI_MSM
+	bool "MSM PCIe Controller driver"
+	depends on ARCH_QCOM && PCI
+	select PCI_DOMAINS
+	select PCI_DOMAINS_GENERIC
+	select PCI_MSI
+	help
+	  Enables the PCIe functionality by configuring PCIe core on
+	  MSM chipset and by enabling the ARM PCI framework extension.
+	  The PCIe core is essential for communication between the host
+	  and an endpoint.
+
+	  If unsure, say N.
+
 config PCI_LABEL
 	def_bool y if (DMI || ACPI)
 	select NLS
diff -ruw linux-4.4.115/drivers/perf/arm_pmu.c linux-4.4.115-fbx/drivers/perf/arm_pmu.c
--- linux-4.4.115/drivers/perf/arm_pmu.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/perf/arm_pmu.c	2019-10-29 09:26:24.577212318 +0100
@@ -13,6 +13,7 @@
 
 #include <linux/bitmap.h>
 #include <linux/cpumask.h>
+#include <linux/cpu_pm.h>
 #include <linux/export.h>
 #include <linux/kernel.h>
 #include <linux/of_device.h>
@@ -22,6 +23,7 @@
 #include <linux/spinlock.h>
 #include <linux/irq.h>
 #include <linux/irqdesc.h>
+#include <linux/debugfs.h>
 
 #include <asm/cputype.h>
 #include <asm/irq_regs.h>
@@ -365,6 +367,8 @@
 		return err;
 	}
 
+	armpmu->pmu_state = ARM_PMU_STATE_RUNNING;
+
 	return 0;
 }
 
@@ -548,17 +552,10 @@
 		.stop		= armpmu_stop,
 		.read		= armpmu_read,
 		.filter_match	= armpmu_filter_match,
+		.events_across_hotplug = 1,
 	};
 }
 
-int armpmu_register(struct arm_pmu *armpmu, int type)
-{
-	armpmu_init(armpmu);
-	pr_info("enabled with %s PMU driver, %d counters available\n",
-			armpmu->name, armpmu->num_events);
-	return perf_pmu_register(&armpmu->pmu, armpmu->name, type);
-}
-
 /* Set at runtime when we know what CPU type we are. */
 static struct arm_pmu *__oprofile_cpu_pmu;
 
@@ -606,10 +603,12 @@
 	struct platform_device *pmu_device = cpu_pmu->plat_device;
 	struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
 
+	cpu_pmu->pmu_state = ARM_PMU_STATE_GOING_DOWN;
+
 	irqs = min(pmu_device->num_resources, num_possible_cpus());
 
 	irq = platform_get_irq(pmu_device, 0);
-	if (irq >= 0 && irq_is_percpu(irq)) {
+	if (irq > 0 && irq_is_percpu(irq)) {
 		on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
 		free_percpu_irq(irq, &hw_events->percpu_pmu);
 	} else {
@@ -622,10 +621,11 @@
 			if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
 				continue;
 			irq = platform_get_irq(pmu_device, i);
-			if (irq >= 0)
+			if (irq > 0)
 				free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
 		}
 	}
+	cpu_pmu->pmu_state = ARM_PMU_STATE_OFF;
 }
 
 static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
@@ -644,7 +644,7 @@
 	}
 
 	irq = platform_get_irq(pmu_device, 0);
-	if (irq >= 0 && irq_is_percpu(irq)) {
+	if (irq > 0 && irq_is_percpu(irq)) {
 		err = request_percpu_irq(irq, handler, "arm-pmu",
 					 &hw_events->percpu_pmu);
 		if (err) {
@@ -653,6 +653,7 @@
 			return err;
 		}
 		on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
+		cpu_pmu->percpu_irq = irq;
 	} else {
 		for (i = 0; i < irqs; ++i) {
 			int cpu = i;
@@ -692,6 +693,134 @@
 	return 0;
 }
 
+struct cpu_pm_pmu_args {
+	struct arm_pmu	*armpmu;
+	unsigned long	cmd;
+	int		cpu;
+	int		ret;
+};
+
+#ifdef CONFIG_CPU_PM
+static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
+{
+	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
+	struct perf_event *event;
+	int idx;
+
+	for (idx = 0; idx < armpmu->num_events; idx++) {
+		/*
+		 * If the counter is not used skip it, there is no
+		 * need of stopping/restarting it.
+		 */
+		if (!test_bit(idx, hw_events->used_mask))
+			continue;
+
+		event = hw_events->events[idx];
+
+		switch (cmd) {
+		case CPU_PM_ENTER:
+			/*
+			 * Stop and update the counter
+			 */
+			armpmu_stop(event, PERF_EF_UPDATE);
+			break;
+		case CPU_PM_EXIT:
+		case CPU_PM_ENTER_FAILED:
+			 /*
+			  * Restore and enable the counter.
+			  * armpmu_start() indirectly calls
+			  *
+			  * perf_event_update_userpage()
+			  *
+			  * that requires RCU read locking to be functional,
+			  * wrap the call within RCU_NONIDLE to make the
+			  * RCU subsystem aware this cpu is not idle from
+			  * an RCU perspective for the armpmu_start() call
+			  * duration.
+			  */
+			RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+static void cpu_pm_pmu_common(void *info)
+{
+	struct cpu_pm_pmu_args *data	= info;
+	struct arm_pmu *armpmu		= data->armpmu;
+	unsigned long cmd		= data->cmd;
+	int cpu				= data->cpu;
+	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
+	int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
+
+	if (!cpumask_test_cpu(cpu, &armpmu->supported_cpus)) {
+		data->ret = NOTIFY_DONE;
+		return;
+	}
+
+	if (!enabled) {
+		data->ret = NOTIFY_OK;
+		return;
+	}
+
+	data->ret = NOTIFY_OK;
+
+	switch (cmd) {
+	case CPU_PM_ENTER:
+		armpmu->stop(armpmu);
+		cpu_pm_pmu_setup(armpmu, cmd);
+		break;
+	case CPU_PM_EXIT:
+	case CPU_PM_ENTER_FAILED:
+		cpu_pm_pmu_setup(armpmu, cmd);
+		armpmu->start(armpmu);
+		break;
+	default:
+		data->ret = NOTIFY_DONE;
+		break;
+	}
+
+	return;
+}
+
+static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
+			     void *v)
+{
+	struct cpu_pm_pmu_args data = {
+		.armpmu	= container_of(b, struct arm_pmu, cpu_pm_nb),
+		.cmd	= cmd,
+		.cpu	= smp_processor_id(),
+	};
+
+	/*
+	 * Always reset the PMU registers on power-up even if
+	 * there are no events running.
+	 */
+	if (cmd == CPU_PM_EXIT && data.armpmu->reset)
+		data.armpmu->reset(data.armpmu);
+
+	cpu_pm_pmu_common(&data);
+	return data.ret;
+}
+
+static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu)
+{
+	cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify;
+	return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb);
+}
+
+static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu)
+{
+	cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb);
+}
+#else
+static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; }
+static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
+static inline void cpu_pm_pmu_common(void *info) { }
+#endif
+
 /*
  * PMU hardware loses all context when a CPU goes offline.
  * When a CPU is hotplugged back in, since some hardware registers are
@@ -701,21 +830,49 @@
 static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
 			  void *hcpu)
 {
-	int cpu = (unsigned long)hcpu;
-	struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb);
-
-	if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
-		return NOTIFY_DONE;
-
-	if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
-		return NOTIFY_DONE;
+	int irq = -1;
+	unsigned long masked_action = (action & ~CPU_TASKS_FROZEN);
+	struct cpu_pm_pmu_args data = {
+		.armpmu	= container_of(b, struct arm_pmu, hotplug_nb),
+		.cpu	= (unsigned long)hcpu,
+	};
 
-	if (pmu->reset)
-		pmu->reset(pmu);
-	else
+	if (!cpumask_test_cpu(data.cpu, &data.armpmu->supported_cpus))
 		return NOTIFY_DONE;
 
+	switch (masked_action) {
+	case CPU_STARTING:
+	case CPU_DOWN_FAILED:
+		/*
+		 * Always reset the PMU registers on power-up even if
+		 * there are no events running.
+		 */
+		if (data.armpmu->reset)
+			data.armpmu->reset(data.armpmu);
+		if (data.armpmu->pmu_state == ARM_PMU_STATE_RUNNING) {
+			if (data.armpmu->plat_device)
+				irq = data.armpmu->percpu_irq;
+			/* Arm the PMU IRQ before appearing. */
+			if (irq > 0 && irq_is_percpu(irq))
+				cpu_pmu_enable_percpu_irq(&irq);
+			data.cmd = CPU_PM_EXIT;
+			cpu_pm_pmu_common(&data);
+		}
+		return NOTIFY_OK;
+	case CPU_DYING:
+		if (data.armpmu->pmu_state != ARM_PMU_STATE_OFF) {
+			data.cmd = CPU_PM_ENTER;
+			cpu_pm_pmu_common(&data);
+			/* Disarm the PMU IRQ before disappearing. */
+			if (data.armpmu->plat_device)
+				irq = data.armpmu->percpu_irq;
+			if (irq > 0 && irq_is_percpu(irq))
+				cpu_pmu_disable_percpu_irq(&irq);
+		}
 	return NOTIFY_OK;
+	default:
+		return NOTIFY_DONE;
+	}
 }
 
 static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
@@ -733,6 +890,10 @@
 	if (err)
 		goto out_hw_events;
 
+	err = cpu_pm_pmu_register(cpu_pmu);
+	if (err)
+		goto out_unregister;
+
 	for_each_possible_cpu(cpu) {
 		struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
 		raw_spin_lock_init(&events->pmu_lock);
@@ -754,6 +915,8 @@
 
 	return 0;
 
+out_unregister:
+	unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
 out_hw_events:
 	free_percpu(cpu_hw_events);
 	return err;
@@ -761,6 +924,7 @@
 
 static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
 {
+	cpu_pm_pmu_unregister(cpu_pmu);
 	unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
 	free_percpu(cpu_pmu->hw_events);
 }
@@ -809,7 +973,7 @@
 
 		/* Check the IRQ type and prohibit a mix of PPIs and SPIs */
 		irq = platform_get_irq(pdev, i);
-		if (irq >= 0) {
+		if (irq > 0) {
 			bool spi = !irq_is_percpu(irq);
 
 			if (i > 0 && spi != using_spi) {
@@ -888,14 +1052,29 @@
 		return -ENOMEM;
 	}
 
+	armpmu_init(pmu);
+
 	if (!__oprofile_cpu_pmu)
 		__oprofile_cpu_pmu = pmu;
 
 	pmu->plat_device = pdev;
 
+	ret = cpu_pmu_init(pmu);
+	if (ret)
+		goto out_free;
+
 	if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
 		init_fn = of_id->data;
 
+		pmu->secure_access = of_property_read_bool(pdev->dev.of_node,
+							   "secure-reg-access");
+
+		/* arm64 systems boot only as non-secure */
+		if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) {
+			pr_warn("ignoring \"secure-reg-access\" property for arm64\n");
+			pmu->secure_access = false;
+		}
+
 		ret = of_pmu_irq_cfg(pmu);
 		if (!ret)
 			ret = init_fn(pmu);
@@ -905,24 +1084,98 @@
 	}
 
 	if (ret) {
-		pr_info("failed to probe PMU!\n");
-		goto out_free;
+		pr_info("%s: failed to probe PMU!\n", of_node_full_name(node));
+		goto out_destroy;
 	}
 
-	ret = cpu_pmu_init(pmu);
-	if (ret)
-		goto out_free;
-
-	ret = armpmu_register(pmu, -1);
+	ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
 	if (ret)
 		goto out_destroy;
 
+	pmu->pmu_state  = ARM_PMU_STATE_OFF;
+	pmu->percpu_irq = -1;
+
+	pr_info("enabled with %s PMU driver, %d counters available\n",
+			pmu->name, pmu->num_events);
+
 	return 0;
 
 out_destroy:
 	cpu_pmu_destroy(pmu);
 out_free:
-	pr_info("failed to register PMU devices!\n");
+	pr_info("%s: failed to register PMU devices!\n",
+		of_node_full_name(node));
 	kfree(pmu);
 	return ret;
 }
+
+static struct dentry *perf_debug_dir;
+
+struct dentry *perf_create_debug_dir(void)
+{
+	if (!perf_debug_dir)
+		perf_debug_dir = debugfs_create_dir("msm_perf", NULL);
+	return perf_debug_dir;
+}
+
+#ifdef CONFIG_PERF_EVENTS_RESET_PMU_DEBUGFS
+static __ref void reset_pmu_force(void)
+{
+	int cpu, ret;
+	u32 save_online_mask = 0;
+
+	for_each_possible_cpu(cpu) {
+		if (!cpu_online(cpu)) {
+			save_online_mask |= BIT(cpu);
+			ret = cpu_up(cpu);
+			if (ret)
+				pr_err("Failed to bring up CPU: %d, ret: %d\n",
+				       cpu, ret);
+		}
+	}
+	if (cpu_pmu && cpu_pmu->reset)
+		on_each_cpu(cpu_pmu->reset, NULL, 1);
+	if (cpu_pmu && cpu_pmu->plat_device)
+		armpmu_release_hardware(cpu_pmu);
+	for_each_possible_cpu(cpu) {
+		if ((save_online_mask & BIT(cpu)) && cpu_online(cpu)) {
+			ret = cpu_down(cpu);
+			if (ret)
+				pr_err("Failed to bring down CPU: %d, ret: %d\n",
+						cpu, ret);
+		}
+	}
+}
+
+static int write_enabled_perfpmu_action(void *data, u64 val)
+{
+	if (val != 0)
+		reset_pmu_force();
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_pmuaction,
+		NULL, write_enabled_perfpmu_action, "%llu\n");
+
+int __init init_pmu_actions(void)
+{
+	struct dentry *dir;
+	struct dentry *file;
+	unsigned int value = 1;
+
+	dir = perf_create_debug_dir();
+	if (!dir)
+		return -ENOMEM;
+	file = debugfs_create_file("resetpmu", 0220, dir,
+		&value, &fops_pmuaction);
+	if (!file)
+		return -ENOMEM;
+	return 0;
+}
+#else
+int __init init_pmu_actions(void)
+{
+	return 0;
+}
+#endif
+late_initcall(init_pmu_actions);
diff -ruw linux-4.4.115/drivers/perf/Makefile linux-4.4.115-fbx/drivers/perf/Makefile
--- linux-4.4.115/drivers/perf/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/perf/Makefile	2019-01-22 16:16:26.075269698 +0100
@@ -1 +1,2 @@
 obj-$(CONFIG_ARM_PMU) += arm_pmu.o
+obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_armv8.o
diff -ruw linux-4.4.115/drivers/phy/Makefile linux-4.4.115-fbx/drivers/phy/Makefile
--- linux-4.4.115/drivers/phy/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/phy/Makefile	2019-01-22 16:16:26.075269698 +0100
@@ -44,6 +44,9 @@
 obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs.o
 obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs-qmp-20nm.o
 obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs-qmp-14nm.o
+obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs-qmp-v3.o
+obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs-qrbtc-v2.o
+obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs-qmp-v3-660.o
 obj-$(CONFIG_PHY_TUSB1210)		+= phy-tusb1210.o
 obj-$(CONFIG_PHY_BRCMSTB_SATA)		+= phy-brcmstb-sata.o
 obj-$(CONFIG_PHY_PISTACHIO_USB)		+= phy-pistachio-usb.o
diff -ruw linux-4.4.115/drivers/phy/phy-qcom-ufs.c linux-4.4.115-fbx/drivers/phy/phy-qcom-ufs.c
--- linux-4.4.115/drivers/phy/phy-qcom-ufs.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/phy/phy-qcom-ufs.c	2019-01-22 16:16:26.079269734 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -15,9 +15,9 @@
 #include "phy-qcom-ufs-i.h"
 
 #define MAX_PROP_NAME              32
-#define VDDA_PHY_MIN_UV            1000000
-#define VDDA_PHY_MAX_UV            1000000
-#define VDDA_PLL_MIN_UV            1800000
+#define VDDA_PHY_MIN_UV            800000
+#define VDDA_PHY_MAX_UV            925000
+#define VDDA_PLL_MIN_UV            1200000
 #define VDDA_PLL_MAX_UV            1800000
 #define VDDP_REF_CLK_MIN_UV        1200000
 #define VDDP_REF_CLK_MAX_UV        1200000
@@ -29,13 +29,24 @@
 static int ufs_qcom_phy_base_init(struct platform_device *pdev,
 				  struct ufs_qcom_phy *phy_common);
 
+void ufs_qcom_phy_write_tbl(struct ufs_qcom_phy *ufs_qcom_phy,
+			   struct ufs_qcom_phy_calibration *tbl,
+			   int tbl_size)
+{
+	int i;
+
+	for (i = 0; i < tbl_size; i++)
+		writel_relaxed(tbl[i].cfg_value,
+			       ufs_qcom_phy->mmio + tbl[i].reg_offset);
+}
+EXPORT_SYMBOL(ufs_qcom_phy_write_tbl);
+
 int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
 			   struct ufs_qcom_phy_calibration *tbl_A,
 			   int tbl_size_A,
 			   struct ufs_qcom_phy_calibration *tbl_B,
 			   int tbl_size_B, bool is_rate_B)
 {
-	int i;
 	int ret = 0;
 
 	if (!tbl_A) {
@@ -44,9 +55,7 @@
 		goto out;
 	}
 
-	for (i = 0; i < tbl_size_A; i++)
-		writel_relaxed(tbl_A[i].cfg_value,
-			       ufs_qcom_phy->mmio + tbl_A[i].reg_offset);
+	ufs_qcom_phy_write_tbl(ufs_qcom_phy, tbl_A, tbl_size_A);
 
 	/*
 	 * In case we would like to work in rate B, we need
@@ -62,9 +71,7 @@
 			goto out;
 		}
 
-		for (i = 0; i < tbl_size_B; i++)
-			writel_relaxed(tbl_B[i].cfg_value,
-				ufs_qcom_phy->mmio + tbl_B[i].reg_offset);
+		ufs_qcom_phy_write_tbl(ufs_qcom_phy, tbl_B, tbl_size_B);
 	}
 
 	/* flush buffered writes */
@@ -106,6 +113,14 @@
 		goto out;
 	}
 
+	/*
+	 * UFS PHY power management is managed by its parent (UFS host
+	 * controller) hence set the no the no runtime PM callbacks flag
+	 * on UFS PHY device to avoid any accidental attempt to call the
+	 * PM callbacks for PHY device.
+	 */
+	pm_runtime_no_callbacks(&generic_phy->dev);
+
 	common_cfg->phy_spec_ops = phy_spec_ops;
 	common_cfg->dev = dev;
 
@@ -135,23 +150,21 @@
 	int err = 0;
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_mem");
+	if (!res) {
+		dev_err(dev, "%s: phy_mem resource not found\n", __func__);
+		err = -ENOMEM;
+		goto out;
+	}
+
 	phy_common->mmio = devm_ioremap_resource(dev, res);
 	if (IS_ERR((void const *)phy_common->mmio)) {
 		err = PTR_ERR((void const *)phy_common->mmio);
 		phy_common->mmio = NULL;
 		dev_err(dev, "%s: ioremap for phy_mem resource failed %d\n",
 			__func__, err);
-		return err;
 	}
-
-	/* "dev_ref_clk_ctrl_mem" is optional resource */
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-					   "dev_ref_clk_ctrl_mem");
-	phy_common->dev_ref_clk_ctrl_mmio = devm_ioremap_resource(dev, res);
-	if (IS_ERR((void const *)phy_common->dev_ref_clk_ctrl_mmio))
-		phy_common->dev_ref_clk_ctrl_mmio = NULL;
-
-	return 0;
+out:
+	return err;
 }
 
 static int __ufs_qcom_phy_clk_get(struct phy *phy,
@@ -187,15 +200,19 @@
 {
 	int err;
 
-	err = ufs_qcom_phy_clk_get(generic_phy, "tx_iface_clk",
-				   &phy_common->tx_iface_clk);
-	if (err)
-		goto out;
+	/*
+	 * tx_iface_clk does not exist in newer version of ufs-phy HW,
+	 * so don't return error if it is not found
+	 */
+	__ufs_qcom_phy_clk_get(generic_phy, "tx_iface_clk",
+				   &phy_common->tx_iface_clk, false);
 
-	err = ufs_qcom_phy_clk_get(generic_phy, "rx_iface_clk",
-				   &phy_common->rx_iface_clk);
-	if (err)
-		goto out;
+	/*
+	 * rx_iface_clk does not exist in newer version of ufs-phy HW,
+	 * so don't return error if it is not found
+	 */
+	__ufs_qcom_phy_clk_get(generic_phy, "rx_iface_clk",
+				   &phy_common->rx_iface_clk, false);
 
 	err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk_src",
 				   &phy_common->ref_clk_src);
@@ -211,7 +228,15 @@
 
 	err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk",
 				   &phy_common->ref_clk);
+	if (err)
+		goto out;
 
+	/*
+	 * "ref_aux_clk" is optional and only supported by certain
+	 * phy versions, don't abort init if it's not found.
+	 */
+	 __ufs_qcom_phy_clk_get(generic_phy, "ref_aux_clk",
+				   &phy_common->ref_aux_clk, false);
 out:
 	return err;
 }
@@ -230,7 +255,6 @@
 
 	err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_phy,
 		"vdda-phy");
-
 	if (err)
 		goto out;
 
@@ -251,6 +275,14 @@
 
 	char prop_name[MAX_PROP_NAME];
 
+	if (dev->of_node) {
+		snprintf(prop_name, MAX_PROP_NAME, "%s-supply", name);
+		if (!of_parse_phandle(dev->of_node, prop_name, 0)) {
+			dev_dbg(dev, "No vreg data found for %s\n", prop_name);
+			return optional ? err : -ENODATA;
+		}
+	}
+
 	vreg->name = kstrdup(name, GFP_KERNEL);
 	if (!vreg->name) {
 		err = -ENOMEM;
@@ -421,9 +453,26 @@
 		goto out_disable_parent;
 	}
 
+	/*
+	 * "ref_aux_clk" is optional clock and only supported by certain
+	 * phy versions, hence make sure that clk reference is available
+	 * before trying to enable the clock.
+	 */
+	if (phy->ref_aux_clk) {
+		ret = clk_prepare_enable(phy->ref_aux_clk);
+		if (ret) {
+			dev_err(phy->dev, "%s: ref_aux_clk enable failed %d\n",
+					__func__, ret);
+			goto out_disable_ref;
+		}
+	}
+
 	phy->is_ref_clk_enabled = true;
 	goto out;
 
+out_disable_ref:
+	if (phy->ref_clk)
+		clk_disable_unprepare(phy->ref_clk);
 out_disable_parent:
 	if (phy->ref_clk_parent)
 		clk_disable_unprepare(phy->ref_clk_parent);
@@ -464,6 +513,13 @@
 	struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
 
 	if (phy->is_ref_clk_enabled) {
+		/*
+		 * "ref_aux_clk" is optional clock and only supported by
+		 * certain phy versions, hence make sure that clk reference
+		 * is available before trying to disable the clock.
+		 */
+		if (phy->ref_aux_clk)
+			clk_disable_unprepare(phy->ref_aux_clk);
 		clk_disable_unprepare(phy->ref_clk);
 		/*
 		 * "ref_clk_parent" is optional clock hence make sure that clk
@@ -477,56 +533,6 @@
 }
 EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_ref_clk);
 
-#define UFS_REF_CLK_EN	(1 << 5)
-
-static void ufs_qcom_phy_dev_ref_clk_ctrl(struct phy *generic_phy, bool enable)
-{
-	struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
-
-	if (phy->dev_ref_clk_ctrl_mmio &&
-	    (enable ^ phy->is_dev_ref_clk_enabled)) {
-		u32 temp = readl_relaxed(phy->dev_ref_clk_ctrl_mmio);
-
-		if (enable)
-			temp |= UFS_REF_CLK_EN;
-		else
-			temp &= ~UFS_REF_CLK_EN;
-
-		/*
-		 * If we are here to disable this clock immediately after
-		 * entering into hibern8, we need to make sure that device
-		 * ref_clk is active atleast 1us after the hibern8 enter.
-		 */
-		if (!enable)
-			udelay(1);
-
-		writel_relaxed(temp, phy->dev_ref_clk_ctrl_mmio);
-		/* ensure that ref_clk is enabled/disabled before we return */
-		wmb();
-		/*
-		 * If we call hibern8 exit after this, we need to make sure that
-		 * device ref_clk is stable for atleast 1us before the hibern8
-		 * exit command.
-		 */
-		if (enable)
-			udelay(1);
-
-		phy->is_dev_ref_clk_enabled = enable;
-	}
-}
-
-void ufs_qcom_phy_enable_dev_ref_clk(struct phy *generic_phy)
-{
-	ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, true);
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_dev_ref_clk);
-
-void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy)
-{
-	ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, false);
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_dev_ref_clk);
-
 /* Turn ON M-PHY RMMI interface clocks */
 int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy)
 {
@@ -536,6 +542,9 @@
 	if (phy->is_iface_clk_enabled)
 		goto out;
 
+	if (!phy->tx_iface_clk)
+		goto out;
+
 	ret = clk_prepare_enable(phy->tx_iface_clk);
 	if (ret) {
 		dev_err(phy->dev, "%s: tx_iface_clk enable failed %d\n",
@@ -561,6 +570,9 @@
 {
 	struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
 
+	if (!phy->tx_iface_clk)
+		return;
+
 	if (phy->is_iface_clk_enabled) {
 		clk_disable_unprepare(phy->tx_iface_clk);
 		clk_disable_unprepare(phy->rx_iface_clk);
@@ -591,19 +603,26 @@
 	struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
 	int ret = 0;
 
-	if (!ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable) {
-		dev_err(ufs_qcom_phy->dev, "%s: set_tx_lane_enable() callback is not supported\n",
-			__func__);
-		ret = -ENOTSUPP;
-	} else {
+	if (ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable)
 		ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable(ufs_qcom_phy,
 							       tx_lanes);
-	}
 
 	return ret;
 }
 EXPORT_SYMBOL_GPL(ufs_qcom_phy_set_tx_lane_enable);
 
+int ufs_qcom_phy_ctrl_rx_linecfg(struct phy *generic_phy, bool ctrl)
+{
+	struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+	int ret = 0;
+
+	if (ufs_qcom_phy->phy_spec_ops->ctrl_rx_linecfg)
+		ufs_qcom_phy->phy_spec_ops->ctrl_rx_linecfg(ufs_qcom_phy, ctrl);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_ctrl_rx_linecfg);
+
 void ufs_qcom_phy_save_controller_version(struct phy *generic_phy,
 					  u8 major, u16 minor, u16 step)
 {
@@ -636,6 +655,14 @@
 }
 EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate_phy);
 
+const char *ufs_qcom_phy_name(struct phy *phy)
+{
+	struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
+
+	return ufs_qcom_phy->name;
+}
+EXPORT_SYMBOL(ufs_qcom_phy_name);
+
 int ufs_qcom_phy_remove(struct phy *generic_phy,
 			struct ufs_qcom_phy *ufs_qcom_phy)
 {
@@ -747,3 +774,39 @@
 	return 0;
 }
 EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_off);
+
+int ufs_qcom_phy_configure_lpm(struct phy *generic_phy, bool enable)
+{
+	struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+	int ret = 0;
+
+	if (ufs_qcom_phy->phy_spec_ops->configure_lpm) {
+		ret = ufs_qcom_phy->phy_spec_ops->
+				configure_lpm(ufs_qcom_phy, enable);
+		if (ret)
+			dev_err(ufs_qcom_phy->dev,
+				"%s: configure_lpm(%s) failed %d\n",
+				__func__, enable ? "enable" : "disable", ret);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(ufs_qcom_phy_configure_lpm);
+
+void ufs_qcom_phy_dump_regs(struct ufs_qcom_phy *phy, int offset,
+				int len, char *prefix)
+{
+	print_hex_dump(KERN_ERR, prefix,
+			len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
+			16, 4, phy->mmio + offset, len, false);
+}
+EXPORT_SYMBOL(ufs_qcom_phy_dump_regs);
+
+void ufs_qcom_phy_dbg_register_dump(struct phy *generic_phy)
+{
+	struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+
+	if (ufs_qcom_phy->phy_spec_ops->dbg_register_dump)
+		ufs_qcom_phy->phy_spec_ops->dbg_register_dump(ufs_qcom_phy);
+}
+EXPORT_SYMBOL(ufs_qcom_phy_dbg_register_dump);
diff -ruw linux-4.4.115/drivers/phy/phy-qcom-ufs-i.h linux-4.4.115-fbx/drivers/phy/phy-qcom-ufs-i.h
--- linux-4.4.115/drivers/phy/phy-qcom-ufs-i.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/phy/phy-qcom-ufs-i.h	2019-01-22 16:16:26.079269734 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -91,6 +91,7 @@
 	struct clk *ref_clk_src;
 	struct clk *ref_clk_parent;
 	struct clk *ref_clk;
+	struct clk *ref_aux_clk;
 	bool is_ref_clk_enabled;
 	bool is_dev_ref_clk_enabled;
 	struct ufs_qcom_phy_vreg vdda_pll;
@@ -107,6 +108,23 @@
 	*/
 	#define UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE	BIT(0)
 
+	/*
+	 * On some UFS PHY HW revisions, UFS PHY power up calibration sequence
+	 * cannot have SVS mode configuration otherwise calibration result
+	 * cannot be used in HS-G3. So there are additional register writes must
+	 * be done after the PHY is initialized but before the controller
+	 * requests hibernate exit.
+	 */
+	#define UFS_QCOM_PHY_QUIRK_SVS_MODE	BIT(1)
+
+	/*
+	 * On some UFS PHY HW revisions, UFS PHY power up calibration sequence
+	 * requires manual VCO tuning code and its better to rely on the VCO
+	 * tuning code programmed by boot loader. Enable this quirk to enable
+	 * programming the manually tuned VCO code.
+	 */
+	#define UFS_QCOM_PHY_QUIRK_VCO_MANUAL_TUNING	BIT(2)
+
 	u8 host_ctrl_rev_major;
 	u16 host_ctrl_rev_minor;
 	u16 host_ctrl_rev_step;
@@ -116,6 +134,7 @@
 	int cached_regs_table_size;
 	bool is_powered_on;
 	struct ufs_qcom_phy_specific_ops *phy_spec_ops;
+	u32 vco_tune1_mode1;
 };
 
 /**
@@ -127,15 +146,23 @@
  * @is_physical_coding_sublayer_ready: pointer to a function that
  * checks pcs readiness. returns 0 for success and non-zero for error.
  * @set_tx_lane_enable: pointer to a function that enable tx lanes
+ * @ctrl_rx_linecfg: pointer to a function that controls the Host Rx LineCfg
+ * state.
  * @power_control: pointer to a function that controls analog rail of phy
  * and writes to QSERDES_RX_SIGDET_CNTRL attribute
+ * @configure_lpm: pointer to a function that configures the phy
+ * for low power mode.
+ * @dbg_register_dump: pointer to a function that dumps phy registers for debug.
  */
 struct ufs_qcom_phy_specific_ops {
 	int (*calibrate_phy)(struct ufs_qcom_phy *phy, bool is_rate_B);
 	void (*start_serdes)(struct ufs_qcom_phy *phy);
 	int (*is_physical_coding_sublayer_ready)(struct ufs_qcom_phy *phy);
 	void (*set_tx_lane_enable)(struct ufs_qcom_phy *phy, u32 val);
+	void (*ctrl_rx_linecfg)(struct ufs_qcom_phy *phy, bool ctrl);
 	void (*power_control)(struct ufs_qcom_phy *phy, bool val);
+	int (*configure_lpm)(struct ufs_qcom_phy *phy, bool enable);
+	void (*dbg_register_dump)(struct ufs_qcom_phy *phy);
 };
 
 struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy);
@@ -156,4 +183,9 @@
 			struct ufs_qcom_phy_calibration *tbl_A, int tbl_size_A,
 			struct ufs_qcom_phy_calibration *tbl_B, int tbl_size_B,
 			bool is_rate_B);
+void ufs_qcom_phy_write_tbl(struct ufs_qcom_phy *ufs_qcom_phy,
+				struct ufs_qcom_phy_calibration *tbl,
+				int tbl_size);
+void ufs_qcom_phy_dump_regs(struct ufs_qcom_phy *phy,
+			    int offset, int len, char *prefix);
 #endif
diff -ruw linux-4.4.115/drivers/phy/phy-qcom-ufs-qmp-14nm.c linux-4.4.115-fbx/drivers/phy/phy-qcom-ufs-qmp-14nm.c
--- linux-4.4.115/drivers/phy/phy-qcom-ufs-qmp-14nm.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/phy/phy-qcom-ufs-qmp-14nm.c	2019-01-22 16:16:26.079269734 +0100
@@ -15,19 +15,49 @@
 #include "phy-qcom-ufs-qmp-14nm.h"
 
 #define UFS_PHY_NAME "ufs_phy_qmp_14nm"
-#define UFS_PHY_VDDA_PHY_UV	(925000)
 
 static
 int ufs_qcom_phy_qmp_14nm_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
 					bool is_rate_B)
 {
-	int tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A);
-	int tbl_size_B = ARRAY_SIZE(phy_cal_table_rate_B);
 	int err;
+	int tbl_size_A, tbl_size_B;
+	struct ufs_qcom_phy_calibration *tbl_A, *tbl_B;
+	u8 major = ufs_qcom_phy->host_ctrl_rev_major;
+	u16 minor = ufs_qcom_phy->host_ctrl_rev_minor;
+	u16 step = ufs_qcom_phy->host_ctrl_rev_step;
+
+	tbl_size_B = ARRAY_SIZE(phy_cal_table_rate_B);
+	tbl_B = phy_cal_table_rate_B;
+
+	if ((major == 0x2) && (minor == 0x000) && (step == 0x0000)) {
+		tbl_A = phy_cal_table_rate_A_2_0_0;
+		tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A_2_0_0);
+	} else if ((major == 0x2) && (minor == 0x001) && (step == 0x0000)) {
+		tbl_A = phy_cal_table_rate_A_2_1_0;
+		tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A_2_1_0);
+	} else if ((major == 0x2) && (minor == 0x002) && (step == 0x0000)) {
+		tbl_A = phy_cal_table_rate_A_2_2_0;
+		tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A_2_2_0);
+		tbl_B = phy_cal_table_rate_B_2_2_0;
+		tbl_size_B = ARRAY_SIZE(phy_cal_table_rate_B_2_2_0);
+	} else {
+		dev_err(ufs_qcom_phy->dev,
+			"%s: Unknown UFS-PHY version (major 0x%x minor 0x%x step 0x%x), no calibration values\n",
+			__func__, major, minor, step);
+		err = -ENODEV;
+		goto out;
+	}
 
-	err = ufs_qcom_phy_calibrate(ufs_qcom_phy, phy_cal_table_rate_A,
-		tbl_size_A, phy_cal_table_rate_B, tbl_size_B, is_rate_B);
-
+	err = ufs_qcom_phy_calibrate(ufs_qcom_phy,
+				     tbl_A, tbl_size_A,
+				     tbl_B, tbl_size_B,
+				     is_rate_B);
+
+	if (ufs_qcom_phy->quirks & UFS_QCOM_PHY_QUIRK_VCO_MANUAL_TUNING)
+		writel_relaxed(ufs_qcom_phy->vco_tune1_mode1,
+			ufs_qcom_phy->mmio + QSERDES_COM_VCO_TUNE1_MODE1);
+out:
 	if (err)
 		dev_err(ufs_qcom_phy->dev,
 			"%s: ufs_qcom_phy_calibrate() failed %d\n",
@@ -38,8 +68,15 @@
 static
 void ufs_qcom_phy_qmp_14nm_advertise_quirks(struct ufs_qcom_phy *phy_common)
 {
+	u8 major = phy_common->host_ctrl_rev_major;
+	u16 minor = phy_common->host_ctrl_rev_minor;
+	u16 step = phy_common->host_ctrl_rev_step;
+
+	if ((major == 0x2) && (minor == 0x000) && (step == 0x0000))
 	phy_common->quirks =
-		UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE;
+			UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE |
+			UFS_QCOM_PHY_QUIRK_SVS_MODE |
+			UFS_QCOM_PHY_QUIRK_VCO_MANUAL_TUNING;
 }
 
 static int ufs_qcom_phy_qmp_14nm_init(struct phy *generic_phy)
@@ -61,24 +98,66 @@
 			__func__, err);
 		goto out;
 	}
-	phy_common->vdda_phy.max_uV = UFS_PHY_VDDA_PHY_UV;
-	phy_common->vdda_phy.min_uV = UFS_PHY_VDDA_PHY_UV;
 
 	ufs_qcom_phy_qmp_14nm_advertise_quirks(phy_common);
 
+	if (phy_common->quirks & UFS_QCOM_PHY_QUIRK_VCO_MANUAL_TUNING) {
+		phy_common->vco_tune1_mode1 = readl_relaxed(phy_common->mmio +
+						QSERDES_COM_VCO_TUNE1_MODE1);
+		dev_info(phy_common->dev, "%s: vco_tune1_mode1 0x%x\n",
+			__func__, phy_common->vco_tune1_mode1);
+	}
+
 out:
 	return err;
 }
 
 static
-void ufs_qcom_phy_qmp_14nm_power_control(struct ufs_qcom_phy *phy, bool val)
+void ufs_qcom_phy_qmp_14nm_power_control(struct ufs_qcom_phy *phy,
+					 bool power_ctrl)
 {
-	writel_relaxed(val ? 0x1 : 0x0, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
+	bool is_workaround_req = false;
+
+	if (phy->quirks &
+	    UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE)
+		is_workaround_req = true;
+
+	if (!power_ctrl) {
+		/* apply PHY analog power collapse */
+		if (is_workaround_req) {
+			/* assert common reset before analog power collapse */
+			writel_relaxed(0x1, phy->mmio + QSERDES_COM_SW_RESET);
+			/*
+			 * make sure that reset is propogated before analog
+			 * power collapse
+			 */
+			mb();
+		}
+		/* apply analog power collapse */
+		writel_relaxed(0x0, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
+		/*
+		 * Make sure that PHY knows its analog rail is going to be
+		 * powered OFF.
+		 */
+		mb();
+	} else {
+		/* bring PHY out of analog power collapse */
+		writel_relaxed(0x1, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
 	/*
 	 * Before any transactions involving PHY, ensure PHY knows
-	 * that it's analog rail is powered ON (or OFF).
+		 * that it's analog rail is powered ON.
 	 */
 	mb();
+		if (is_workaround_req) {
+			/*
+			 * de-assert common reset after coming out of analog
+			 * power collapse
+			 */
+			writel_relaxed(0x0, phy->mmio + QSERDES_COM_SW_RESET);
+			/* make common reset is de-asserted before proceeding */
+			mb();
+		}
+	}
 }
 
 static inline
@@ -90,6 +169,23 @@
 	 */
 }
 
+static
+void ufs_qcom_phy_qmp_14nm_ctrl_rx_linecfg(struct ufs_qcom_phy *phy, bool ctrl)
+{
+	u32 temp;
+
+	temp = readl_relaxed(phy->mmio + UFS_PHY_LINECFG_DISABLE);
+
+	if (ctrl) /* enable RX LineCfg */
+		temp &= ~UFS_PHY_RX_LINECFG_DISABLE_BIT;
+	else /* disable RX LineCfg */
+		temp |= UFS_PHY_RX_LINECFG_DISABLE_BIT;
+
+	writel_relaxed(temp, phy->mmio + UFS_PHY_LINECFG_DISABLE);
+	/* make sure that RX LineCfg config applied before we return */
+	mb();
+}
+
 static inline void ufs_qcom_phy_qmp_14nm_start_serdes(struct ufs_qcom_phy *phy)
 {
 	u32 tmp;
@@ -109,9 +205,24 @@
 
 	err = readl_poll_timeout(phy_common->mmio + UFS_PHY_PCS_READY_STATUS,
 		val, (val & MASK_PCS_READY), 10, 1000000);
-	if (err)
+	if (err) {
 		dev_err(phy_common->dev, "%s: poll for pcs failed err = %d\n",
 			__func__, err);
+		goto out;
+	}
+
+	if (phy_common->quirks & UFS_QCOM_PHY_QUIRK_SVS_MODE) {
+		int i;
+
+		for (i = 0; i < ARRAY_SIZE(phy_svs_mode_config_2_0_0); i++)
+			writel_relaxed(phy_svs_mode_config_2_0_0[i].cfg_value,
+				(phy_common->mmio +
+				phy_svs_mode_config_2_0_0[i].reg_offset));
+		/* apply above configuration immediately */
+		mb();
+	}
+
+out:
 	return err;
 }
 
@@ -128,6 +239,7 @@
 	.start_serdes		= ufs_qcom_phy_qmp_14nm_start_serdes,
 	.is_physical_coding_sublayer_ready = ufs_qcom_phy_qmp_14nm_is_pcs_ready,
 	.set_tx_lane_enable	= ufs_qcom_phy_qmp_14nm_set_tx_lane_enable,
+	.ctrl_rx_linecfg	= ufs_qcom_phy_qmp_14nm_ctrl_rx_linecfg,
 	.power_control		= ufs_qcom_phy_qmp_14nm_power_control,
 };
 
diff -ruw linux-4.4.115/drivers/phy/phy-qcom-ufs-qmp-14nm.h linux-4.4.115-fbx/drivers/phy/phy-qcom-ufs-qmp-14nm.h
--- linux-4.4.115/drivers/phy/phy-qcom-ufs-qmp-14nm.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/phy/phy-qcom-ufs-qmp-14nm.h	2019-01-22 16:16:26.079269734 +0100
@@ -27,12 +27,14 @@
 #define QSERDES_COM_BG_TIMER			COM_OFF(0x0C)
 #define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		COM_OFF(0x34)
 #define QSERDES_COM_SYS_CLK_CTRL		COM_OFF(0x3C)
+#define QSERDES_COM_PLL_IVCO			COM_OFF(0x48)
 #define QSERDES_COM_LOCK_CMP1_MODE0		COM_OFF(0x4C)
 #define QSERDES_COM_LOCK_CMP2_MODE0		COM_OFF(0x50)
 #define QSERDES_COM_LOCK_CMP3_MODE0		COM_OFF(0x54)
 #define QSERDES_COM_LOCK_CMP1_MODE1		COM_OFF(0x58)
 #define QSERDES_COM_LOCK_CMP2_MODE1		COM_OFF(0x5C)
 #define QSERDES_COM_LOCK_CMP3_MODE1		COM_OFF(0x60)
+#define QSERDES_COM_BG_TRIM			COM_OFF(0x70)
 #define QSERDES_COM_CP_CTRL_MODE0		COM_OFF(0x78)
 #define QSERDES_COM_CP_CTRL_MODE1		COM_OFF(0x7C)
 #define QSERDES_COM_PLL_RCTRL_MODE0		COM_OFF(0x84)
@@ -41,6 +43,7 @@
 #define QSERDES_COM_PLL_CCTRL_MODE1		COM_OFF(0x94)
 #define QSERDES_COM_SYSCLK_EN_SEL		COM_OFF(0xAC)
 #define QSERDES_COM_RESETSM_CNTRL		COM_OFF(0xB4)
+#define QSERDES_COM_RESCODE_DIV_NUM		COM_OFF(0xC4)
 #define QSERDES_COM_LOCK_CMP_EN			COM_OFF(0xC8)
 #define QSERDES_COM_LOCK_CMP_CFG		COM_OFF(0xCC)
 #define QSERDES_COM_DEC_START_MODE0		COM_OFF(0xD0)
@@ -61,19 +64,35 @@
 #define QSERDES_COM_VCO_TUNE2_MODE0		COM_OFF(0x130)
 #define QSERDES_COM_VCO_TUNE1_MODE1		COM_OFF(0x134)
 #define QSERDES_COM_VCO_TUNE2_MODE1		COM_OFF(0x138)
+#define QSERDES_COM_VCO_TUNE_INITVAL1		COM_OFF(0x13C)
+#define QSERDES_COM_VCO_TUNE_INITVAL2		COM_OFF(0x140)
 #define QSERDES_COM_VCO_TUNE_TIMER1		COM_OFF(0x144)
 #define QSERDES_COM_VCO_TUNE_TIMER2		COM_OFF(0x148)
 #define QSERDES_COM_CLK_SELECT			COM_OFF(0x174)
 #define QSERDES_COM_HSCLK_SEL			COM_OFF(0x178)
 #define QSERDES_COM_CORECLK_DIV			COM_OFF(0x184)
+#define QSERDES_COM_SW_RESET			COM_OFF(0x188)
 #define QSERDES_COM_CORE_CLK_EN			COM_OFF(0x18C)
 #define QSERDES_COM_CMN_CONFIG			COM_OFF(0x194)
 #define QSERDES_COM_SVS_MODE_CLK_SEL		COM_OFF(0x19C)
+#define QSERDES_COM_DEBUG_BUS0			COM_OFF(0x1A0)
+#define QSERDES_COM_DEBUG_BUS1			COM_OFF(0x1A4)
+#define QSERDES_COM_DEBUG_BUS2			COM_OFF(0x1A8)
+#define QSERDES_COM_DEBUG_BUS3			COM_OFF(0x1AC)
+#define QSERDES_COM_DEBUG_BUS_SEL		COM_OFF(0x1B0)
+#define QSERDES_COM_CMN_MISC2			COM_OFF(0x1B8)
 #define QSERDES_COM_CORECLK_DIV_MODE1		COM_OFF(0x1BC)
 
 /* UFS PHY registers */
 #define UFS_PHY_PHY_START			PHY_OFF(0x00)
 #define UFS_PHY_POWER_DOWN_CONTROL		PHY_OFF(0x04)
+#define UFS_PHY_TX_LARGE_AMP_DRV_LVL		PHY_OFF(0x34)
+#define UFS_PHY_TX_SMALL_AMP_DRV_LVL		PHY_OFF(0x3C)
+#define UFS_PHY_RX_MIN_STALL_NOCONFIG_TIME_CAP	PHY_OFF(0xCC)
+#define UFS_PHY_LINECFG_DISABLE			PHY_OFF(0x138)
+#define UFS_PHY_RX_SYM_RESYNC_CTRL		PHY_OFF(0x13C)
+#define UFS_PHY_RX_SIGDET_CTRL2			PHY_OFF(0x148)
+#define UFS_PHY_RX_PWM_GEAR_BAND		PHY_OFF(0x154)
 #define UFS_PHY_PCS_READY_STATUS		PHY_OFF(0x168)
 
 /* UFS PHY TX registers */
@@ -81,7 +100,12 @@
 #define QSERDES_TX_LANE_MODE				TX_OFF(0, 0x94)
 
 /* UFS PHY RX registers */
+#define QSERDES_RX_UCDR_SVS_SO_GAIN_HALF	RX_OFF(0, 0x30)
+#define QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER	RX_OFF(0, 0x34)
+#define QSERDES_RX_UCDR_SVS_SO_GAIN_EIGHTH	RX_OFF(0, 0x38)
+#define QSERDES_RX_UCDR_SVS_SO_GAIN		RX_OFF(0, 0x3C)
 #define QSERDES_RX_UCDR_FASTLOCK_FO_GAIN	RX_OFF(0, 0x40)
+#define QSERDES_RX_UCDR_SO_SATURATION_ENABLE	RX_OFF(0, 0x48)
 #define QSERDES_RX_RX_TERM_BW			RX_OFF(0, 0x90)
 #define QSERDES_RX_RX_EQ_GAIN1_LSB		RX_OFF(0, 0xC4)
 #define QSERDES_RX_RX_EQ_GAIN1_MSB		RX_OFF(0, 0xC8)
@@ -93,6 +117,8 @@
 #define QSERDES_RX_SIGDET_DEGLITCH_CNTRL	RX_OFF(0, 0x11C)
 #define QSERDES_RX_RX_INTERFACE_MODE		RX_OFF(0, 0x12C)
 
+#define UFS_PHY_RX_LINECFG_DISABLE_BIT		BIT(1)
+
 /*
  * This structure represents the 14nm specific phy.
  * common_cfg MUST remain the first field in this structure
@@ -105,12 +131,102 @@
 	struct ufs_qcom_phy common_cfg;
 };
 
-static struct ufs_qcom_phy_calibration phy_cal_table_rate_A[] = {
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_2_0_0[] = {
 	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_CONFIG, 0x0e),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL, 0xd7),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL, 0x17),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CLK_SELECT, 0x30),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYS_CLK_CTRL, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYS_CLK_CTRL, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TIMER, 0x0a),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_HSCLK_SEL, 0x05),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV, 0x0a),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE1, 0x0a),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_EN, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_CTRL, 0x1C),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x20),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORE_CLK_EN, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_CFG, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x14),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE0, 0x82),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE0, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE0, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0xff),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x0c),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE1, 0x98),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE1, 0x0b),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE1, 0x16),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE1, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x80),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x0f),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE1, 0x00),
+
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_HIGHZ_TRANSCEIVER_BIAS_DRVR_EN, 0x45),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE, 0x06),
+
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_LVL, 0x24),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_CNTRL, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_INTERFACE_MODE, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x1E),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_TERM_BW, 0x5B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0D),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IVCO, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TRIM, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESCODE_DIV_NUM, 0x15),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_MISC2, 0x1F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN_HALF, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SO_SATURATION_ENABLE, 0x4B),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL2, 0x6c),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_DRV_LVL, 0x12),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_SMALL_AMP_DRV_LVL, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_MIN_STALL_NOCONFIG_TIME_CAP, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SYM_RESYNC_CTRL, 0x03),
+
+	/*
+	 * UFS_PHY_RX_PWM_GEAR_BAND configuration is changed after the power up
+	 * sequence so make sure that this register gets set to power on reset
+	 * value. This is required in case power up sequence is initiated after
+	 * this register value got changed to value other than power on reset
+	 * value.
+	 */
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_PWM_GEAR_BAND, 0x55),
+};
+
+/*
+ * For 2.1.0 revision, SVS mode configuration can be part of PHY power
+ * up sequence itself.
+ */
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_2_1_0[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_CONFIG, 0x0e),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL, 0x14),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CLK_SELECT, 0x30),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYS_CLK_CTRL, 0x02),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TIMER, 0x0a),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_HSCLK_SEL, 0x05),
@@ -155,23 +271,133 @@
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE1, 0x00),
 
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_HIGHZ_TRANSCEIVER_BIAS_DRVR_EN, 0x45),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE, 0x06),
 
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_LVL, 0x24),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_CNTRL, 0x02),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_INTERFACE_MODE, 0x00),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x18),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_CNTRL, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_INTERFACE_MODE, 0x40),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x1E),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0B),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_TERM_BW, 0x5B),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB, 0xFF),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB, 0x3F),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB, 0xFF),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB, 0x0F),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0E),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0D),
+
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IVCO, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TRIM, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_PWM_GEAR_BAND, 0x15),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESCODE_DIV_NUM, 0x15),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_MISC2, 0x1F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN_HALF, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SO_SATURATION_ENABLE, 0x4B),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL2, 0x6c),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_DRV_LVL, 0x12),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_SMALL_AMP_DRV_LVL, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_MIN_STALL_NOCONFIG_TIME_CAP, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SYM_RESYNC_CTRL, 0x03),
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_2_2_0[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_CONFIG, 0x0e),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL, 0x14),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CLK_SELECT, 0x30),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYS_CLK_CTRL, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TIMER, 0x0a),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_HSCLK_SEL, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV, 0x0a),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE1, 0x0a),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_EN, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_CTRL, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x20),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORE_CLK_EN, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_CFG, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE0, 0x82),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE0, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE0, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0xff),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x0c),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE1, 0x98),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE1, 0x0b),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE1, 0x16),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE1, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x80),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE1, 0xd6),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x0f),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE1, 0x00),
+
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_HIGHZ_TRANSCEIVER_BIAS_DRVR_EN, 0x45),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE, 0x06),
+
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_LVL, 0x24),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_CNTRL, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_INTERFACE_MODE, 0x40),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x1E),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_TERM_BW, 0x5B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0D),
+
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IVCO, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TRIM, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_PWM_GEAR_BAND, 0x15),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESCODE_DIV_NUM, 0x40),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_MISC2, 0x63),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN_HALF, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SO_SATURATION_ENABLE, 0x4B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL1, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL2, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL2, 0x6c),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_DRV_LVL, 0x0A),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_SMALL_AMP_DRV_LVL, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_MIN_STALL_NOCONFIG_TIME_CAP, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SYM_RESYNC_CTRL, 0x03),
 };
 
 static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = {
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x54),
 };
 
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_B_2_2_0[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x44),
+};
+
+/*
+ * For 2.0.0 revision, apply this SVS mode configuration after PHY power
+ * up sequence is completed.
+ */
+static struct ufs_qcom_phy_calibration phy_svs_mode_config_2_0_0[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_INTERFACE_MODE, 0x40),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_PWM_GEAR_BAND, 0x15),
+};
+
 #endif
diff -ruw linux-4.4.115/drivers/phy/phy-qcom-ufs-qmp-20nm.c linux-4.4.115-fbx/drivers/phy/phy-qcom-ufs-qmp-20nm.c
--- linux-4.4.115/drivers/phy/phy-qcom-ufs-qmp-20nm.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/phy/phy-qcom-ufs-qmp-20nm.c	2019-01-22 16:16:26.079269734 +0100
@@ -147,6 +147,23 @@
 	mb();
 }
 
+static
+void ufs_qcom_phy_qmp_20nm_ctrl_rx_linecfg(struct ufs_qcom_phy *phy, bool ctrl)
+{
+	u32 temp;
+
+	temp = readl_relaxed(phy->mmio + UFS_PHY_LINECFG_DISABLE);
+
+	if (ctrl) /* enable RX LineCfg */
+		temp &= ~UFS_PHY_RX_LINECFG_DISABLE_BIT;
+	else /* disable RX LineCfg */
+		temp |= UFS_PHY_RX_LINECFG_DISABLE_BIT;
+
+	writel_relaxed(temp, phy->mmio + UFS_PHY_LINECFG_DISABLE);
+	/* make sure that RX LineCfg config applied before we return */
+	mb();
+}
+
 static inline void ufs_qcom_phy_qmp_20nm_start_serdes(struct ufs_qcom_phy *phy)
 {
 	u32 tmp;
@@ -171,7 +188,7 @@
 	return err;
 }
 
-static const struct phy_ops ufs_qcom_phy_qmp_20nm_phy_ops = {
+struct phy_ops ufs_qcom_phy_qmp_20nm_phy_ops = {
 	.init		= ufs_qcom_phy_qmp_20nm_init,
 	.exit		= ufs_qcom_phy_exit,
 	.power_on	= ufs_qcom_phy_power_on,
@@ -179,11 +196,12 @@
 	.owner		= THIS_MODULE,
 };
 
-static struct ufs_qcom_phy_specific_ops phy_20nm_ops = {
+struct ufs_qcom_phy_specific_ops phy_20nm_ops = {
 	.calibrate_phy		= ufs_qcom_phy_qmp_20nm_phy_calibrate,
 	.start_serdes		= ufs_qcom_phy_qmp_20nm_start_serdes,
 	.is_physical_coding_sublayer_ready = ufs_qcom_phy_qmp_20nm_is_pcs_ready,
 	.set_tx_lane_enable	= ufs_qcom_phy_qmp_20nm_set_tx_lane_enable,
+	.ctrl_rx_linecfg	= ufs_qcom_phy_qmp_20nm_ctrl_rx_linecfg,
 	.power_control		= ufs_qcom_phy_qmp_20nm_power_control,
 };
 
diff -ruw linux-4.4.115/drivers/phy/phy-qcom-ufs-qmp-20nm.h linux-4.4.115-fbx/drivers/phy/phy-qcom-ufs-qmp-20nm.h
--- linux-4.4.115/drivers/phy/phy-qcom-ufs-qmp-20nm.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/phy/phy-qcom-ufs-qmp-20nm.h	2019-01-22 16:16:26.079269734 +0100
@@ -101,6 +101,7 @@
 #define UFS_PHY_RX_MIN_SAVE_CONFIG_TIME_CAPABILITY	PHY_OFF(0xE8)
 #define UFS_PHY_RX_PWM_BURST_CLOSURE_LENGTH_CAPABILITY	PHY_OFF(0xFC)
 #define UFS_PHY_RX_MIN_ACTIVATETIME_CAPABILITY		PHY_OFF(0x100)
+#define UFS_PHY_LINECFG_DISABLE				PHY_OFF(0x134)
 #define UFS_PHY_RX_SIGDET_CTRL3				PHY_OFF(0x14c)
 #define UFS_PHY_RMMI_ATTR_CTRL			PHY_OFF(0x160)
 #define UFS_PHY_RMMI_RX_CFGUPDT_L1	(1 << 7)
@@ -118,6 +119,7 @@
 #define UFS_PHY_PCS_READY_STATUS		PHY_OFF(0x174)
 
 #define UFS_PHY_TX_LANE_ENABLE_MASK		0x3
+#define UFS_PHY_RX_LINECFG_DISABLE_BIT		BIT(1)
 
 /*
  * This structure represents the 20nm specific phy.
diff -ruw linux-4.4.115/drivers/pinctrl/devicetree.c linux-4.4.115-fbx/drivers/pinctrl/devicetree.c
--- linux-4.4.115/drivers/pinctrl/devicetree.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/pinctrl/devicetree.c	2019-01-22 16:16:26.087269806 +0100
@@ -195,8 +195,13 @@
 		propname = kasprintf(GFP_KERNEL, "pinctrl-%d", state);
 		prop = of_find_property(np, propname, &size);
 		kfree(propname);
-		if (!prop)
+		if (!prop) {
+			if (!state) {
+				ret = -EINVAL;
+				goto err;
+			}
 			break;
+		}
 		list = prop->value;
 		size /= sizeof(*list);
 
diff -ruw linux-4.4.115/drivers/pinctrl/qcom/Kconfig linux-4.4.115-fbx/drivers/pinctrl/qcom/Kconfig
--- linux-4.4.115/drivers/pinctrl/qcom/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/pinctrl/qcom/Kconfig	2019-01-22 16:16:26.111270024 +0100
@@ -96,4 +96,42 @@
          which are using SSBI for communication with SoC. Example PMIC's
          devices are pm8058 and pm8921.
 
+config PINCTRL_MSM8998
+	tristate "Qualcomm MSM8998 pin controller driver"
+	depends on GPIOLIB && OF
+	select PINCTRL_MSM
+	help
+	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+	  Qualcomm TLMM block found in the Qualcomm MSM8998 platform.
+
+config PINCTRL_MSM8996
+	tristate "Qualcomm MSM8996 pin controller driver"
+	depends on GPIOLIB && OF
+	select PINCTRL_MSM
+	help
+	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+	  Qualcomm TLMM block found in the Qualcomm MSM8996 platform.
+
+config PINCTRL_SDM660
+	tristate "Qualcomm SDM660 pin controller driver"
+	depends on GPIOLIB && OF
+	select PINCTRL_MSM
+	help
+	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+	  Qualcomm TLMM block found in the Qualcomm SDM660 platform.
+
+config PINCTRL_WCD
+	tristate "Qualcomm Technologies, Inc WCD pin controller driver"
+	depends on WCD934X_CODEC
+	help
+	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+	  WCD gpio controller block.
+
+config PINCTRL_LPI
+	tristate "Qualcomm Technologies, Inc LPI pin controller driver"
+	depends on GPIOLIB && OF
+	help
+	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+	  LPI gpio controller block.
+
 endif
diff -ruw linux-4.4.115/drivers/pinctrl/qcom/Makefile linux-4.4.115-fbx/drivers/pinctrl/qcom/Makefile
--- linux-4.4.115/drivers/pinctrl/qcom/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/pinctrl/qcom/Makefile	2019-01-22 16:16:26.111270024 +0100
@@ -12,3 +12,8 @@
 obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-mpp.o
 obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-gpio.o
 obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-mpp.o
+obj-$(CONFIG_PINCTRL_MSM8996)	+= pinctrl-msm8996.o
+obj-$(CONFIG_PINCTRL_MSM8998)	+= pinctrl-msm8998.o
+obj-$(CONFIG_PINCTRL_SDM660)	+= pinctrl-sdm660.o
+obj-$(CONFIG_PINCTRL_WCD)	+= pinctrl-wcd.o
+obj-$(CONFIG_PINCTRL_LPI)	+= pinctrl-lpi.o
diff -ruw linux-4.4.115/drivers/pinctrl/qcom/pinctrl-msm.c linux-4.4.115-fbx/drivers/pinctrl/qcom/pinctrl-msm.c
--- linux-4.4.115/drivers/pinctrl/qcom/pinctrl-msm.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/pinctrl/qcom/pinctrl-msm.c	2019-10-29 09:26:24.585212397 +0100
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2013, Sony Mobile Communications AB.
- * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -27,9 +27,9 @@
 #include <linux/gpio.h>
 #include <linux/interrupt.h>
 #include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
 #include <linux/reboot.h>
-#include <linux/pm.h>
-
+#include <linux/irqchip/msm-mpm-irq.h>
 #include "../core.h"
 #include "../pinconf.h"
 #include "pinctrl-msm.h"
@@ -44,6 +44,7 @@
  * @pctrl:          pinctrl handle.
  * @chip:           gpiochip handle.
  * @restart_nb:     restart notifier block.
+ * @irq_chip_extn:  MPM extension of TLMM irqchip.
  * @irq:            parent irq for the TLMM irq_chip.
  * @lock:           Spinlock to protect register resources as well
  *                  as msm_pinctrl data structures.
@@ -58,6 +59,7 @@
 	struct pinctrl_dev *pctrl;
 	struct gpio_chip chip;
 	struct notifier_block restart_nb;
+	struct irq_chip *irq_chip_extn;
 	int irq;
 
 	spinlock_t lock;
@@ -69,6 +71,8 @@
 	void __iomem *regs;
 };
 
+static struct msm_pinctrl *msm_pinctrl_data;
+
 static inline struct msm_pinctrl *to_msm_pinctrl(struct gpio_chip *gc)
 {
 	return container_of(gc, struct msm_pinctrl, chip);
@@ -421,7 +425,6 @@
 	writel(val, pctrl->regs + g->ctl_reg);
 
 	spin_unlock_irqrestore(&pctrl->lock, flags);
-
 	return 0;
 }
 
@@ -583,6 +586,8 @@
 	clear_bit(d->hwirq, pctrl->enabled_irqs);
 
 	spin_unlock_irqrestore(&pctrl->lock, flags);
+	if (pctrl->irq_chip_extn->irq_mask)
+		pctrl->irq_chip_extn->irq_mask(d);
 }
 
 static void msm_gpio_irq_unmask(struct irq_data *d)
@@ -597,6 +602,10 @@
 
 	spin_lock_irqsave(&pctrl->lock, flags);
 
+	val = readl(pctrl->regs + g->intr_status_reg);
+	val &= ~BIT(g->intr_status_bit);
+	writel(val, pctrl->regs + g->intr_status_reg);
+
 	val = readl(pctrl->regs + g->intr_cfg_reg);
 	val |= BIT(g->intr_enable_bit);
 	writel(val, pctrl->regs + g->intr_cfg_reg);
@@ -604,6 +613,8 @@
 	set_bit(d->hwirq, pctrl->enabled_irqs);
 
 	spin_unlock_irqrestore(&pctrl->lock, flags);
+	if (pctrl->irq_chip_extn->irq_unmask)
+		pctrl->irq_chip_extn->irq_unmask(d);
 }
 
 static void msm_gpio_irq_ack(struct irq_data *d)
@@ -722,6 +733,9 @@
 	else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
 		irq_set_handler_locked(d, handle_edge_irq);
 
+	if (pctrl->irq_chip_extn->irq_set_type)
+		pctrl->irq_chip_extn->irq_set_type(d, type);
+
 	return 0;
 }
 
@@ -737,6 +751,9 @@
 
 	spin_unlock_irqrestore(&pctrl->lock, flags);
 
+	if (pctrl->irq_chip_extn->irq_set_wake)
+		pctrl->irq_chip_extn->irq_set_wake(d, on);
+
 	return 0;
 }
 
@@ -783,6 +800,22 @@
 	chained_irq_exit(chip, desc);
 }
 
+/*
+ * Add MPM extensions for the irqchip.
+ * Enable the MPM driver to enable/disable
+ * suspend/resume based on gpio interrupts
+ */
+
+struct irq_chip mpm_pinctrl_extn = {
+	.irq_eoi	= NULL,
+	.irq_mask	= NULL,
+	.irq_unmask	= NULL,
+	.irq_retrigger	= NULL,
+	.irq_set_type	= NULL,
+	.irq_set_wake	= NULL,
+	.irq_disable	= NULL,
+};
+
 static int msm_gpio_init(struct msm_pinctrl *pctrl)
 {
 	struct gpio_chip *chip;
@@ -826,6 +859,7 @@
 
 	gpiochip_set_chained_irqchip(chip, &msm_gpio_irq_chip, pctrl->irq,
 				     msm_gpio_irq_handler);
+	of_mpm_init();
 
 	return 0;
 }
@@ -865,6 +899,52 @@
 		}
 }
 
+#ifdef CONFIG_PM
+static int msm_pinctrl_suspend(void)
+{
+	return 0;
+}
+
+static void msm_pinctrl_resume(void)
+{
+	int i, irq;
+	u32 val;
+	unsigned long flags;
+	struct irq_desc *desc;
+	const struct msm_pingroup *g;
+	const char *name = "null";
+	struct msm_pinctrl *pctrl = msm_pinctrl_data;
+
+	if (!msm_show_resume_irq_mask)
+		return;
+
+	spin_lock_irqsave(&pctrl->lock, flags);
+	for_each_set_bit(i, pctrl->enabled_irqs, pctrl->chip.ngpio) {
+		g = &pctrl->soc->groups[i];
+		val = readl_relaxed(pctrl->regs + g->intr_status_reg);
+		if (val & BIT(g->intr_status_bit)) {
+			irq = irq_find_mapping(pctrl->chip.irqdomain, i);
+			desc = irq_to_desc(irq);
+			if (desc == NULL)
+				name = "stray irq";
+			else if (desc->action && desc->action->name)
+				name = desc->action->name;
+
+			pr_warn("%s: %d triggered %s\n", __func__, irq, name);
+		}
+	}
+	spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+#else
+#define msm_pinctrl_suspend NULL
+#define msm_pinctrl_resume NULL
+#endif
+
+static struct syscore_ops msm_pinctrl_pm_ops = {
+	.suspend = msm_pinctrl_suspend,
+	.resume = msm_pinctrl_resume,
+};
+
 int msm_pinctrl_probe(struct platform_device *pdev,
 		      const struct msm_pinctrl_soc_data *soc_data)
 {
@@ -872,7 +952,8 @@
 	struct resource *res;
 	int ret;
 
-	pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
+	msm_pinctrl_data = pctrl = devm_kzalloc(&pdev->dev,
+				sizeof(*pctrl), GFP_KERNEL);
 	if (!pctrl) {
 		dev_err(&pdev->dev, "Can't allocate msm_pinctrl\n");
 		return -ENOMEM;
@@ -910,9 +991,10 @@
 		pinctrl_unregister(pctrl->pctrl);
 		return ret;
 	}
-
+	pctrl->irq_chip_extn = &mpm_pinctrl_extn;
 	platform_set_drvdata(pdev, pctrl);
 
+	register_syscore_ops(&msm_pinctrl_pm_ops);
 	dev_dbg(&pdev->dev, "Probed Qualcomm pinctrl driver\n");
 
 	return 0;
@@ -927,6 +1009,7 @@
 	pinctrl_unregister(pctrl->pctrl);
 
 	unregister_restart_handler(&pctrl->restart_nb);
+	unregister_syscore_ops(&msm_pinctrl_pm_ops);
 
 	return 0;
 }
diff -ruw linux-4.4.115/drivers/pinctrl/qcom/pinctrl-msm.h linux-4.4.115-fbx/drivers/pinctrl/qcom/pinctrl-msm.h
--- linux-4.4.115/drivers/pinctrl/qcom/pinctrl-msm.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/pinctrl/qcom/pinctrl-msm.h	2019-01-22 16:16:26.111270024 +0100
@@ -121,4 +121,5 @@
 		      const struct msm_pinctrl_soc_data *soc_data);
 int msm_pinctrl_remove(struct platform_device *pdev);
 
+extern int msm_show_resume_irq_mask;
 #endif
diff -ruw linux-4.4.115/drivers/platform/goldfish/Kconfig linux-4.4.115-fbx/drivers/platform/goldfish/Kconfig
--- linux-4.4.115/drivers/platform/goldfish/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/goldfish/Kconfig	2019-01-22 16:16:26.143270313 +0100
@@ -1,5 +1,23 @@
+menuconfig GOLDFISH
+	bool "Platform support for Goldfish virtual devices"
+	depends on X86_32 || X86_64 || ARM || ARM64 || MIPS
+	---help---
+	  Say Y here to get to see options for the Goldfish virtual platform.
+	  This option alone does not add any kernel code.
+
+	  Unless you are building for the Android Goldfish emulator say N here.
+
+if GOLDFISH
+
+config GOLDFISH_BUS
+	bool "Goldfish platform bus"
+	---help---
+	  This is a virtual bus to host Goldfish Android Virtual Devices.
+
 config GOLDFISH_PIPE
 	tristate "Goldfish virtual device for QEMU pipes"
 	---help---
 	  This is a virtual device to drive the QEMU pipe interface used by
 	  the Goldfish Android Virtual Device.
+
+endif # GOLDFISH
diff -ruw linux-4.4.115/drivers/platform/goldfish/Makefile linux-4.4.115-fbx/drivers/platform/goldfish/Makefile
--- linux-4.4.115/drivers/platform/goldfish/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/goldfish/Makefile	2019-01-22 16:16:26.143270313 +0100
@@ -1,5 +1,6 @@
 #
 # Makefile for Goldfish platform specific drivers
 #
-obj-$(CONFIG_GOLDFISH)	+=	pdev_bus.o
-obj-$(CONFIG_GOLDFISH_PIPE)	+= goldfish_pipe.o
+obj-$(CONFIG_GOLDFISH_BUS)	+= pdev_bus.o
+obj-$(CONFIG_GOLDFISH_PIPE)	+= goldfish_pipe_all.o
+goldfish_pipe_all-objs := goldfish_pipe.o goldfish_pipe_v2.o
diff -ruw linux-4.4.115/drivers/platform/Kconfig linux-4.4.115-fbx/drivers/platform/Kconfig
--- linux-4.4.115/drivers/platform/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/Kconfig	2019-01-22 16:16:26.143270313 +0100
@@ -4,8 +4,8 @@
 if MIPS
 source "drivers/platform/mips/Kconfig"
 endif
-if GOLDFISH
 source "drivers/platform/goldfish/Kconfig"
-endif
 
 source "drivers/platform/chrome/Kconfig"
+
+source "drivers/platform/msm/Kconfig"
diff -ruw linux-4.4.115/drivers/platform/Makefile linux-4.4.115-fbx/drivers/platform/Makefile
--- linux-4.4.115/drivers/platform/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/Makefile	2019-01-22 16:16:26.143270313 +0100
@@ -7,3 +7,5 @@
 obj-$(CONFIG_OLPC)		+= olpc/
 obj-$(CONFIG_GOLDFISH)		+= goldfish/
 obj-$(CONFIG_CHROME_PLATFORMS)	+= chrome/
+obj-$(CONFIG_ARCH_QCOM)          += msm/
+
diff -ruw linux-4.4.115/drivers/power/Kconfig linux-4.4.115-fbx/drivers/power/Kconfig
--- linux-4.4.115/drivers/power/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/Kconfig	2019-10-29 09:26:24.625212788 +0100
@@ -449,6 +449,18 @@
 	  Say Y to include support for Summit Microelectronics SMB347
 	  Battery Charger.
 
+config BATTERY_BQ28400
+	tristate "BQ28400 battery driver"
+	depends on I2C
+	default n
+	help
+	  Say Y here to enable support for batteries with BQ28400 (I2C) chips.
+	  The bq28400 Texas Instruments Inc device monitors the battery
+	  charging/discharging status via Rsens resistor, typically 10 mohm.
+	  It monitors the battery temperature via Thermistor.
+	  The device monitors the battery level (Relative-State-Of-Charge).
+	  The device is SBS compliant, providing battery info over I2C.
+
 config CHARGER_TPS65090
 	tristate "TPS65090 battery charger driver"
 	depends on MFD_TPS65090
@@ -509,7 +521,9 @@
 	  AXP20x PMIC.
 
 source "drivers/power/reset/Kconfig"
+source "drivers/power/supply/Kconfig"
 
 endif # POWER_SUPPLY
 
 source "drivers/power/avs/Kconfig"
+source "drivers/power/qcom/Kconfig"
diff -ruw linux-4.4.115/drivers/power/Makefile linux-4.4.115-fbx/drivers/power/Makefile
--- linux-4.4.115/drivers/power/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/Makefile	2019-01-22 16:16:26.211270929 +0100
@@ -72,3 +72,5 @@
 obj-$(CONFIG_POWER_RESET)	+= reset/
 obj-$(CONFIG_AXP288_FUEL_GAUGE) += axp288_fuel_gauge.o
 obj-$(CONFIG_AXP288_CHARGER)	+= axp288_charger.o
+obj-$(CONFIG_ARCH_QCOM)		+= qcom/
+obj-$(CONFIG_POWER_SUPPLY)	+= supply/
diff -ruw linux-4.4.115/drivers/power/power_supply_sysfs.c linux-4.4.115-fbx/drivers/power/power_supply_sysfs.c
--- linux-4.4.115/drivers/power/power_supply_sysfs.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/power_supply_sysfs.c	2019-10-29 09:26:24.629212827 +0100
@@ -44,19 +44,23 @@
 					  struct device_attribute *attr,
 					  char *buf) {
 	static char *type_text[] = {
-		"Unknown", "Battery", "UPS", "Mains", "USB",
-		"USB_DCP", "USB_CDP", "USB_ACA"
+		"Unknown", "Battery", "UPS", "Mains", "USB", "USB_DCP",
+		"USB_CDP", "USB_ACA", "USB_HVDCP", "USB_HVDCP_3", "USB_PD",
+		"Wireless", "USB_FLOAT", "BMS", "Parallel", "Main", "Wipower",
+		"TYPEC", "TYPEC_UFP", "TYPEC_DFP"
 	};
 	static char *status_text[] = {
 		"Unknown", "Charging", "Discharging", "Not charging", "Full"
 	};
 	static char *charge_type[] = {
-		"Unknown", "N/A", "Trickle", "Fast"
+		"Unknown", "N/A", "Trickle", "Fast",
+		"Taper"
 	};
 	static char *health_text[] = {
 		"Unknown", "Good", "Overheat", "Dead", "Over voltage",
 		"Unspecified failure", "Cold", "Watchdog timer expire",
-		"Safety timer expire"
+		"Safety timer expire",
+		"Warm", "Cool", "Hot"
 	};
 	static char *technology_text[] = {
 		"Unknown", "NiMH", "Li-ion", "Li-poly", "LiFe", "NiCd",
@@ -68,6 +72,17 @@
 	static char *scope_text[] = {
 		"Unknown", "System", "Device"
 	};
+	static char *typec_text[] = {
+		"Nothing attached", "Sink attached", "Powered cable w/ sink",
+		"Debug Accessory", "Audio Adapter", "Powered cable w/o sink",
+		"Source attached (default current)",
+		"Source attached (medium current)",
+		"Source attached (high current)",
+		"Non compliant",
+	};
+	static char *typec_pr_text[] = {
+		"none", "dual power role", "sink", "source"
+	};
 	ssize_t ret = 0;
 	struct power_supply *psy = dev_get_drvdata(dev);
 	const ptrdiff_t off = attr - power_supply_attrs;
@@ -99,13 +114,25 @@
 		return sprintf(buf, "%s\n", technology_text[value.intval]);
 	else if (off == POWER_SUPPLY_PROP_CAPACITY_LEVEL)
 		return sprintf(buf, "%s\n", capacity_level_text[value.intval]);
-	else if (off == POWER_SUPPLY_PROP_TYPE)
+	else if (off == POWER_SUPPLY_PROP_TYPE ||
+			off == POWER_SUPPLY_PROP_REAL_TYPE)
 		return sprintf(buf, "%s\n", type_text[value.intval]);
 	else if (off == POWER_SUPPLY_PROP_SCOPE)
 		return sprintf(buf, "%s\n", scope_text[value.intval]);
+	else if (off == POWER_SUPPLY_PROP_TYPEC_MODE)
+		return sprintf(buf, "%s\n", typec_text[value.intval]);
+	else if (off == POWER_SUPPLY_PROP_TYPEC_POWER_ROLE)
+		return sprintf(buf, "%s\n", typec_pr_text[value.intval]);
+	else if (off == POWER_SUPPLY_PROP_DIE_HEALTH)
+		return sprintf(buf, "%s\n", health_text[value.intval]);
+	else if (off == POWER_SUPPLY_PROP_CONNECTOR_HEALTH)
+		return sprintf(buf, "%s\n", health_text[value.intval]);
 	else if (off >= POWER_SUPPLY_PROP_MODEL_NAME)
 		return sprintf(buf, "%s\n", value.strval);
 
+	if (off == POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT)
+		return sprintf(buf, "%lld\n", value.int64val);
+	else
 	return sprintf(buf, "%d\n", value.intval);
 }
 
@@ -162,6 +189,8 @@
 	POWER_SUPPLY_ATTR(charge_full),
 	POWER_SUPPLY_ATTR(charge_empty),
 	POWER_SUPPLY_ATTR(charge_now),
+	POWER_SUPPLY_ATTR(charge_now_raw),
+	POWER_SUPPLY_ATTR(charge_now_error),
 	POWER_SUPPLY_ATTR(charge_avg),
 	POWER_SUPPLY_ATTR(charge_counter),
 	POWER_SUPPLY_ATTR(constant_charge_current),
@@ -181,6 +210,7 @@
 	POWER_SUPPLY_ATTR(capacity_alert_min),
 	POWER_SUPPLY_ATTR(capacity_alert_max),
 	POWER_SUPPLY_ATTR(capacity_level),
+	POWER_SUPPLY_ATTR(capacity_raw),
 	POWER_SUPPLY_ATTR(temp),
 	POWER_SUPPLY_ATTR(temp_max),
 	POWER_SUPPLY_ATTR(temp_min),
@@ -197,10 +227,91 @@
 	POWER_SUPPLY_ATTR(scope),
 	POWER_SUPPLY_ATTR(charge_term_current),
 	POWER_SUPPLY_ATTR(calibrate),
+	/* Local extensions */
+	POWER_SUPPLY_ATTR(usb_hc),
+	POWER_SUPPLY_ATTR(usb_otg),
+	POWER_SUPPLY_ATTR(battery_charging_enabled),
+	POWER_SUPPLY_ATTR(charging_enabled),
+	POWER_SUPPLY_ATTR(step_charging_enabled),
+	POWER_SUPPLY_ATTR(step_charging_step),
+	POWER_SUPPLY_ATTR(pin_enabled),
+	POWER_SUPPLY_ATTR(input_suspend),
+	POWER_SUPPLY_ATTR(input_voltage_regulation),
+	POWER_SUPPLY_ATTR(input_current_max),
+	POWER_SUPPLY_ATTR(input_current_trim),
+	POWER_SUPPLY_ATTR(input_current_settled),
+	POWER_SUPPLY_ATTR(input_voltage_settled),
+	POWER_SUPPLY_ATTR(bypass_vchg_loop_debouncer),
+	POWER_SUPPLY_ATTR(charge_counter_shadow),
+	POWER_SUPPLY_ATTR(hi_power),
+	POWER_SUPPLY_ATTR(low_power),
+	POWER_SUPPLY_ATTR(temp_cool),
+	POWER_SUPPLY_ATTR(temp_warm),
+	POWER_SUPPLY_ATTR(temp_cold),
+	POWER_SUPPLY_ATTR(temp_hot),
+	POWER_SUPPLY_ATTR(system_temp_level),
+	POWER_SUPPLY_ATTR(resistance),
+	POWER_SUPPLY_ATTR(resistance_capacitive),
+	POWER_SUPPLY_ATTR(resistance_id),
+	POWER_SUPPLY_ATTR(resistance_now),
+	POWER_SUPPLY_ATTR(flash_current_max),
+	POWER_SUPPLY_ATTR(update_now),
+	POWER_SUPPLY_ATTR(esr_count),
+	POWER_SUPPLY_ATTR(buck_freq),
+	POWER_SUPPLY_ATTR(boost_current),
+	POWER_SUPPLY_ATTR(safety_timer_enabled),
+	POWER_SUPPLY_ATTR(charge_done),
+	POWER_SUPPLY_ATTR(flash_active),
+	POWER_SUPPLY_ATTR(flash_trigger),
+	POWER_SUPPLY_ATTR(force_tlim),
+	POWER_SUPPLY_ATTR(dp_dm),
+	POWER_SUPPLY_ATTR(input_current_limited),
+	POWER_SUPPLY_ATTR(input_current_now),
+	POWER_SUPPLY_ATTR(charge_qnovo_enable),
+	POWER_SUPPLY_ATTR(current_qnovo),
+	POWER_SUPPLY_ATTR(voltage_qnovo),
+	POWER_SUPPLY_ATTR(rerun_aicl),
+	POWER_SUPPLY_ATTR(cycle_count_id),
+	POWER_SUPPLY_ATTR(safety_timer_expired),
+	POWER_SUPPLY_ATTR(restricted_charging),
+	POWER_SUPPLY_ATTR(current_capability),
+	POWER_SUPPLY_ATTR(typec_mode),
+	POWER_SUPPLY_ATTR(typec_cc_orientation),
+	POWER_SUPPLY_ATTR(typec_power_role),
+	POWER_SUPPLY_ATTR(pd_allowed),
+	POWER_SUPPLY_ATTR(pd_active),
+	POWER_SUPPLY_ATTR(pd_in_hard_reset),
+	POWER_SUPPLY_ATTR(pd_current_max),
+	POWER_SUPPLY_ATTR(pd_usb_suspend_supported),
+	POWER_SUPPLY_ATTR(charger_temp),
+	POWER_SUPPLY_ATTR(charger_temp_max),
+	POWER_SUPPLY_ATTR(parallel_disable),
+	POWER_SUPPLY_ATTR(pe_start),
+	POWER_SUPPLY_ATTR(set_ship_mode),
+	POWER_SUPPLY_ATTR(soc_reporting_ready),
+	POWER_SUPPLY_ATTR(debug_battery),
+	POWER_SUPPLY_ATTR(fcc_delta),
+	POWER_SUPPLY_ATTR(icl_reduction),
+	POWER_SUPPLY_ATTR(parallel_mode),
+	POWER_SUPPLY_ATTR(die_health),
+	POWER_SUPPLY_ATTR(connector_health),
+	POWER_SUPPLY_ATTR(ctm_current_max),
+	POWER_SUPPLY_ATTR(hw_current_max),
+	POWER_SUPPLY_ATTR(real_type),
+	POWER_SUPPLY_ATTR(pr_swap),
+	POWER_SUPPLY_ATTR(cc_step),
+	POWER_SUPPLY_ATTR(cc_step_sel),
+	POWER_SUPPLY_ATTR(sw_jeita_enabled),
+	POWER_SUPPLY_ATTR(pd_voltage_max),
+	POWER_SUPPLY_ATTR(pd_voltage_min),
+	POWER_SUPPLY_ATTR(sdp_current_max),
+	/* Local extensions of type int64_t */
+	POWER_SUPPLY_ATTR(charge_counter_ext),
 	/* Properties of type `const char *' */
 	POWER_SUPPLY_ATTR(model_name),
 	POWER_SUPPLY_ATTR(manufacturer),
 	POWER_SUPPLY_ATTR(serial_number),
+	POWER_SUPPLY_ATTR(battery_type),
 };
 
 static struct attribute *
diff -ruw linux-4.4.115/drivers/power/reset/Kconfig linux-4.4.115-fbx/drivers/power/reset/Kconfig
--- linux-4.4.115/drivers/power/reset/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/reset/Kconfig	2019-01-22 16:16:26.223271038 +0100
@@ -80,12 +80,6 @@
 	  say N here or disable in dts to make sure pm_power_off never be
 	  overwrote wrongly by this driver.
 
-config POWER_RESET_MSM
-	bool "Qualcomm MSM power-off driver"
-	depends on ARCH_QCOM
-	help
-	  Power off and restart support for Qualcomm boards.
-
 config POWER_RESET_LTC2952
 	bool "LTC2952 PowerPath power-off driver"
 	depends on OF_GPIO
@@ -93,6 +87,22 @@
 	  This driver supports an external powerdown trigger and board power
 	  down via the LTC2952. Bindings are made in the device tree.
 
+config POWER_RESET_QCOM
+	bool "Qualcomm MSM power-off driver"
+	depends on ARCH_MSM || ARCH_QCOM
+	depends on POWER_RESET
+	help
+	  Power off and restart support for Qualcomm boards.
+
+config QCOM_DLOAD_MODE
+	bool "Qualcomm download mode"
+	depends on POWER_RESET_QCOM
+	help
+		This makes the SoC enter download mode when it resets
+		due to a kernel panic. Note that this doesn't by itself
+		make the kernel reboot on a kernel panic - that must be
+		enabled via another mechanism.
+
 config POWER_RESET_QNAP
 	bool "QNAP power-off driver"
 	depends on OF_GPIO && PLAT_ORION
@@ -173,5 +183,19 @@
 	help
 	  Reboot support for ZTE SoCs.
 
+config REBOOT_MODE
+	tristate
+
+config SYSCON_REBOOT_MODE
+	tristate "Generic SYSCON regmap reboot mode driver"
+	depends on OF
+	select REBOOT_MODE
+	select MFD_SYSCON
+	help
+	  Say y here will enable reboot mode driver. This will
+	  get reboot mode arguments and store it in SYSCON mapped
+	  register, then the bootloader can read it to take different
+	  action according to the mode.
+
 endif
 
diff -ruw linux-4.4.115/drivers/power/reset/Makefile linux-4.4.115-fbx/drivers/power/reset/Makefile
--- linux-4.4.115/drivers/power/reset/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/reset/Makefile	2019-01-22 16:16:26.223271038 +0100
@@ -7,7 +7,7 @@
 obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o
 obj-$(CONFIG_POWER_RESET_HISI) += hisi-reboot.o
 obj-$(CONFIG_POWER_RESET_IMX) += imx-snvs-poweroff.o
-obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
+obj-$(CONFIG_POWER_RESET_QCOM) += msm-poweroff.o
 obj-$(CONFIG_POWER_RESET_LTC2952) += ltc2952-poweroff.o
 obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
 obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o
@@ -20,3 +20,5 @@
 obj-$(CONFIG_POWER_RESET_SYSCON_POWEROFF) += syscon-poweroff.o
 obj-$(CONFIG_POWER_RESET_RMOBILE) += rmobile-reset.o
 obj-$(CONFIG_POWER_RESET_ZX) += zx-reboot.o
+obj-$(CONFIG_REBOOT_MODE) += reboot-mode.o
+obj-$(CONFIG_SYSCON_REBOOT_MODE) += syscon-reboot-mode.o
diff -ruw linux-4.4.115/drivers/power/reset/msm-poweroff.c linux-4.4.115-fbx/drivers/power/reset/msm-poweroff.c
--- linux-4.4.115/drivers/power/reset/msm-poweroff.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/reset/msm-poweroff.c	2019-07-17 21:24:56.793296145 +0200
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -15,49 +15,686 @@
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
+
 #include <linux/io.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/module.h>
 #include <linux/reboot.h>
 #include <linux/pm.h>
+#include <linux/delay.h>
+#include <linux/input/qpnp-power-on.h>
+#include <linux/of_address.h>
 
+#include <asm/cacheflush.h>
+#include <asm/system_misc.h>
+#include <asm/memory.h>
+
+#include <soc/qcom/scm.h>
+#include <soc/qcom/restart.h>
+#include <soc/qcom/watchdog.h>
+#include <soc/qcom/minidump.h>
+
+#define EMERGENCY_DLOAD_MAGIC1    0x322A4F99
+#define EMERGENCY_DLOAD_MAGIC2    0xC67E4350
+#define EMERGENCY_DLOAD_MAGIC3    0x77777777
+#define EMMC_DLOAD_TYPE		0x2
+
+#define SCM_IO_DISABLE_PMIC_ARBITER	1
+#define SCM_IO_DEASSERT_PS_HOLD		2
+#define SCM_WDOG_DEBUG_BOOT_PART	0x9
+#define SCM_DLOAD_FULLDUMP		0X10
+#define SCM_EDLOAD_MODE			0X01
+#define SCM_DLOAD_CMD			0x10
+#define SCM_DLOAD_MINIDUMP		0X20
+#define SCM_DLOAD_BOTHDUMPS	(SCM_DLOAD_MINIDUMP | SCM_DLOAD_FULLDUMP)
+
+static int restart_mode;
+static void *restart_reason;
+static bool scm_pmic_arbiter_disable_supported;
+static bool scm_deassert_ps_hold_supported;
+/* Download mode master kill-switch */
 static void __iomem *msm_ps_hold;
-static int do_msm_restart(struct notifier_block *nb, unsigned long action,
-			   void *data)
+static phys_addr_t tcsr_boot_misc_detect;
+static void scm_disable_sdi(void);
+
+/* Runtime could be only changed value once.
+ * There is no API from TZ to re-enable the registers.
+ * So the SDI cannot be re-enabled when it already by-passed.
+*/
+
+#ifdef CONFIG_QCOM_DLOAD_MODE
+#define EDL_MODE_PROP "qcom,msm-imem-emergency_download_mode"
+#define DL_MODE_PROP "qcom,msm-imem-download_mode"
+#ifdef CONFIG_RANDOMIZE_BASE
+#define KASLR_OFFSET_PROP "qcom,msm-imem-kaslr_offset"
+#endif
+
+static int dload_type = SCM_DLOAD_FULLDUMP;
+static int download_mode = 1;
+static struct kobject dload_kobj;
+static void *dload_mode_addr, *dload_type_addr;
+static bool dload_mode_enabled;
+static void *emergency_dload_mode_addr;
+#ifdef CONFIG_RANDOMIZE_BASE
+static void *kaslr_imem_addr;
+#endif
+static bool scm_dload_supported;
+
+static int dload_set(const char *val, struct kernel_param *kp);
+/* interface for exporting attributes */
+struct reset_attribute {
+	struct attribute        attr;
+	ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
+			char *buf);
+	size_t (*store)(struct kobject *kobj, struct attribute *attr,
+			const char *buf, size_t count);
+};
+#define to_reset_attr(_attr) \
+	container_of(_attr, struct reset_attribute, attr)
+#define RESET_ATTR(_name, _mode, _show, _store)	\
+	static struct reset_attribute reset_attr_##_name = \
+			__ATTR(_name, _mode, _show, _store)
+
+module_param_call(download_mode, dload_set, param_get_int,
+			&download_mode, 0644);
+
+
+int scm_set_dload_mode(int arg1, int arg2)
 {
-	writel(0, msm_ps_hold);
-	mdelay(10000);
+	struct scm_desc desc = {
+		.args[0] = arg1,
+		.args[1] = arg2,
+		.arginfo = SCM_ARGS(2),
+	};
+
+	if (!scm_dload_supported) {
+		if (tcsr_boot_misc_detect)
+			return scm_io_write(tcsr_boot_misc_detect, arg1);
+
+		return 0;
+	}
+
+	if (!is_scm_armv8())
+		return scm_call_atomic2(SCM_SVC_BOOT, SCM_DLOAD_CMD, arg1,
+					arg2);
+
+	return scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_BOOT, SCM_DLOAD_CMD),
+				&desc);
+}
+
+static void set_dload_mode(int on)
+{
+	int ret;
+
+	if (dload_mode_addr) {
+		__raw_writel(on ? 0xE47B337D : 0, dload_mode_addr);
+		__raw_writel(on ? 0xCE14091A : 0,
+		       dload_mode_addr + sizeof(unsigned int));
+		mb();
+	}
+
+	ret = scm_set_dload_mode(on ? dload_type : 0, 0);
+	if (ret)
+		pr_err("Failed to set secure DLOAD mode: %d\n", ret);
+
+	dload_mode_enabled = on;
+}
+
+static bool get_dload_mode(void)
+{
+	return dload_mode_enabled;
+}
+
+static void enable_emergency_dload_mode(void)
+{
+	int ret;
+
+	if (emergency_dload_mode_addr) {
+		__raw_writel(EMERGENCY_DLOAD_MAGIC1,
+				emergency_dload_mode_addr);
+		__raw_writel(EMERGENCY_DLOAD_MAGIC2,
+				emergency_dload_mode_addr +
+				sizeof(unsigned int));
+		__raw_writel(EMERGENCY_DLOAD_MAGIC3,
+				emergency_dload_mode_addr +
+				(2 * sizeof(unsigned int)));
+
+		/* Need disable the pmic wdt, then the emergency dload mode
+		 * will not auto reset. */
+		qpnp_pon_wd_config(0);
+		mb();
+	}
+
+	ret = scm_set_dload_mode(SCM_EDLOAD_MODE, 0);
+	if (ret)
+		pr_err("Failed to set secure EDLOAD mode: %d\n", ret);
+}
+
+static int dload_set(const char *val, struct kernel_param *kp)
+{
+	int ret;
+	int old_val = download_mode;
+
+	ret = param_set_int(val, kp);
+	if (ret)
+		return ret;
+
+	/* If download_mode is not zero or one, ignore. */
+	if (download_mode >> 1) {
+		download_mode = old_val;
+		return -EINVAL;
+	}
+
+	set_dload_mode(download_mode);
+
+	return 0;
+}
+#else
+static void set_dload_mode(int on)
+{
+	return;
+}
+
+static void enable_emergency_dload_mode(void)
+{
+	pr_err("dload mode is not enabled on target\n");
+}
 
+static bool get_dload_mode(void)
+{
+	return false;
+}
+#endif
+
+static int in_panic;
+
+static int panic_prep_restart(struct notifier_block *this,
+			      unsigned long event, void *ptr)
+{
+	in_panic = 1;
 	return NOTIFY_DONE;
 }
 
-static struct notifier_block restart_nb = {
-	.notifier_call = do_msm_restart,
-	.priority = 128,
+static struct notifier_block panic_blk = {
+	.notifier_call	= panic_prep_restart,
+};
+
+static void scm_disable_sdi(void)
+{
+	int ret;
+	struct scm_desc desc = {
+		.args[0] = 1,
+		.args[1] = 0,
+		.arginfo = SCM_ARGS(2),
+	};
+
+	/* Needed to bypass debug image on some chips */
+	if (!is_scm_armv8())
+		ret = scm_call_atomic2(SCM_SVC_BOOT,
+			       SCM_WDOG_DEBUG_BOOT_PART, 1, 0);
+	else
+		ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_BOOT,
+			  SCM_WDOG_DEBUG_BOOT_PART), &desc);
+	if (ret)
+		pr_err("Failed to disable secure wdog debug: %d\n", ret);
+}
+
+void msm_set_restart_mode(int mode)
+{
+	restart_mode = mode;
+}
+EXPORT_SYMBOL(msm_set_restart_mode);
+
+/*
+ * Force the SPMI PMIC arbiter to shutdown so that no more SPMI transactions
+ * are sent from the MSM to the PMIC.  This is required in order to avoid an
+ * SPMI lockup on certain PMIC chips if PS_HOLD is lowered in the middle of
+ * an SPMI transaction.
+ */
+static void halt_spmi_pmic_arbiter(void)
+{
+	struct scm_desc desc = {
+		.args[0] = 0,
+		.arginfo = SCM_ARGS(1),
+	};
+
+	if (scm_pmic_arbiter_disable_supported) {
+		pr_crit("Calling SCM to disable SPMI PMIC arbiter\n");
+		if (!is_scm_armv8())
+			scm_call_atomic1(SCM_SVC_PWR,
+					 SCM_IO_DISABLE_PMIC_ARBITER, 0);
+		else
+			scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_PWR,
+				  SCM_IO_DISABLE_PMIC_ARBITER), &desc);
+	}
+}
+
+static void msm_restart_prepare(const char *cmd)
+{
+	bool need_warm_reset = false;
+
+#ifdef CONFIG_QCOM_DLOAD_MODE
+
+	/* Write download mode flags if we're panic'ing
+	 * Write download mode flags if restart_mode says so
+	 * Kill download mode if master-kill switch is set
+	 */
+
+	set_dload_mode(download_mode &&
+			(in_panic || restart_mode == RESTART_DLOAD));
+#endif
+
+	if (qpnp_pon_check_hard_reset_stored()) {
+		/* Set warm reset as true when device is in dload mode */
+		if (get_dload_mode() ||
+			((cmd != NULL && cmd[0] != '\0') &&
+			!strcmp(cmd, "edl")))
+			need_warm_reset = true;
+	} else {
+		need_warm_reset = (get_dload_mode() ||
+				(cmd != NULL && cmd[0] != '\0'));
+	}
+
+#ifdef CONFIG_PSTORE
+	if (in_panic) {
+		pr_warn("disable SDI due to in_panic && CONFIG_PSTORE\n");
+		mdelay(200);
+		need_warm_reset = true;
+		scm_disable_sdi();
+	}
+#endif
+
+	/* Hard reset the PMIC unless memory contents must be maintained. */
+	if (need_warm_reset) {
+		qpnp_pon_system_pwr_off(PON_POWER_OFF_WARM_RESET);
+	} else {
+		qpnp_pon_system_pwr_off(PON_POWER_OFF_HARD_RESET);
+	}
+
+	if (cmd != NULL) {
+		if (!strncmp(cmd, "bootloader", 10)) {
+			qpnp_pon_set_restart_reason(
+				PON_RESTART_REASON_BOOTLOADER);
+			__raw_writel(0x77665500, restart_reason);
+		} else if (!strncmp(cmd, "recovery", 8)) {
+			qpnp_pon_set_restart_reason(
+				PON_RESTART_REASON_RECOVERY);
+			__raw_writel(0x77665502, restart_reason);
+		} else if (!strcmp(cmd, "rtc")) {
+			qpnp_pon_set_restart_reason(
+				PON_RESTART_REASON_RTC);
+			__raw_writel(0x77665503, restart_reason);
+		} else if (!strcmp(cmd, "dm-verity device corrupted")) {
+			qpnp_pon_set_restart_reason(
+				PON_RESTART_REASON_DMVERITY_CORRUPTED);
+			__raw_writel(0x77665508, restart_reason);
+		} else if (!strcmp(cmd, "dm-verity enforcing")) {
+			qpnp_pon_set_restart_reason(
+				PON_RESTART_REASON_DMVERITY_ENFORCE);
+			__raw_writel(0x77665509, restart_reason);
+		} else if (!strcmp(cmd, "keys clear")) {
+			qpnp_pon_set_restart_reason(
+				PON_RESTART_REASON_KEYS_CLEAR);
+			__raw_writel(0x7766550a, restart_reason);
+		} else if (!strncmp(cmd, "oem-", 4)) {
+			unsigned long code;
+			unsigned long reset_reason;
+			int ret;
+			ret = kstrtoul(cmd + 4, 16, &code);
+			if (!ret) {
+				/* Bit-2 to bit-7 of SOFT_RB_SPARE for hard
+				 * reset reason:
+				 * Value 0 to 31 for common defined features
+				 * Value 32 to 63 for oem specific features
+				 */
+				reset_reason = code +
+						PON_RESTART_REASON_OEM_MIN;
+				if (reset_reason > PON_RESTART_REASON_OEM_MAX ||
+				   reset_reason < PON_RESTART_REASON_OEM_MIN) {
+					pr_err("Invalid oem reset reason: %lx\n",
+						reset_reason);
+				} else {
+					qpnp_pon_set_restart_reason(
+						reset_reason);
+				}
+				__raw_writel(0x6f656d00 | (code & 0xff),
+					     restart_reason);
+			}
+		} else if (!strncmp(cmd, "edl", 3)) {
+			enable_emergency_dload_mode();
+		} else {
+			__raw_writel(0x77665501, restart_reason);
+		}
+	}
+
+	flush_cache_all();
+
+	/*outer_flush_all is not supported by 64bit kernel*/
+#ifndef CONFIG_ARM64
+	outer_flush_all();
+#endif
+
+}
+
+/*
+ * Deassert PS_HOLD to signal the PMIC that we are ready to power down or reset.
+ * Do this by calling into the secure environment, if available, or by directly
+ * writing to a hardware register.
+ *
+ * This function should never return.
+ */
+static void deassert_ps_hold(void)
+{
+	struct scm_desc desc = {
+		.args[0] = 0,
+		.arginfo = SCM_ARGS(1),
 };
 
+	if (scm_deassert_ps_hold_supported) {
+		/* This call will be available on ARMv8 only */
+		scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_PWR,
+				 SCM_IO_DEASSERT_PS_HOLD), &desc);
+	}
+
+	/* Fall-through to the direct write in case the scm_call "returns" */
+	__raw_writel(0, msm_ps_hold);
+}
+
+static void do_msm_restart(enum reboot_mode reboot_mode, const char *cmd)
+{
+	pr_notice("Going down for restart now\n");
+
+	msm_restart_prepare(cmd);
+
+#ifdef CONFIG_QCOM_DLOAD_MODE
+	/*
+	 * Trigger a watchdog bite here and if this fails,
+	 * device will take the usual restart path.
+	 */
+
+	if (WDOG_BITE_ON_PANIC && in_panic)
+		msm_trigger_wdog_bite();
+#endif
+
+	scm_disable_sdi();
+	halt_spmi_pmic_arbiter();
+	deassert_ps_hold();
+
+	mdelay(10000);
+}
+
 static void do_msm_poweroff(void)
 {
-	/* TODO: Add poweroff capability */
-	do_msm_restart(&restart_nb, 0, NULL);
+	pr_notice("Powering off the SoC\n");
+
+	set_dload_mode(0);
+	scm_disable_sdi();
+	qpnp_pon_system_pwr_off(PON_POWER_OFF_SHUTDOWN);
+
+	halt_spmi_pmic_arbiter();
+	deassert_ps_hold();
+
+	mdelay(10000);
+	pr_err("Powering off has failed\n");
+	return;
+}
+
+#ifdef CONFIG_QCOM_DLOAD_MODE
+static ssize_t attr_show(struct kobject *kobj, struct attribute *attr,
+				char *buf)
+{
+	struct reset_attribute *reset_attr = to_reset_attr(attr);
+	ssize_t ret = -EIO;
+
+	if (reset_attr->show)
+		ret = reset_attr->show(kobj, attr, buf);
+
+	return ret;
+}
+
+static ssize_t attr_store(struct kobject *kobj, struct attribute *attr,
+				const char *buf, size_t count)
+{
+	struct reset_attribute *reset_attr = to_reset_attr(attr);
+	ssize_t ret = -EIO;
+
+	if (reset_attr->store)
+		ret = reset_attr->store(kobj, attr, buf, count);
+
+	return ret;
+}
+
+static const struct sysfs_ops reset_sysfs_ops = {
+	.show	= attr_show,
+	.store	= attr_store,
+};
+
+static struct kobj_type reset_ktype = {
+	.sysfs_ops	= &reset_sysfs_ops,
+};
+
+static ssize_t show_emmc_dload(struct kobject *kobj, struct attribute *attr,
+				char *buf)
+{
+	uint32_t read_val, show_val;
+
+	read_val = __raw_readl(dload_type_addr);
+	if (read_val == EMMC_DLOAD_TYPE)
+		show_val = 1;
+	else
+		show_val = 0;
+
+	return scnprintf(buf, sizeof(show_val), "%u\n", show_val);
+}
+
+static size_t store_emmc_dload(struct kobject *kobj, struct attribute *attr,
+				const char *buf, size_t count)
+{
+	uint32_t enabled;
+	int ret;
+
+	ret = kstrtouint(buf, 0, &enabled);
+	if (ret < 0)
+		return ret;
+
+	if (!((enabled == 0) || (enabled == 1)))
+		return -EINVAL;
+
+	if (enabled == 1)
+		__raw_writel(EMMC_DLOAD_TYPE, dload_type_addr);
+	else
+		__raw_writel(0, dload_type_addr);
+
+	return count;
+}
+
+#ifdef CONFIG_QCOM_MINIDUMP
+
+static DEFINE_MUTEX(tcsr_lock);
+
+static ssize_t show_dload_mode(struct kobject *kobj, struct attribute *attr,
+				char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "DLOAD dump type: %s\n",
+		(dload_type == SCM_DLOAD_BOTHDUMPS) ? "both" :
+		((dload_type == SCM_DLOAD_MINIDUMP) ? "mini" : "full"));
+}
+
+static size_t store_dload_mode(struct kobject *kobj, struct attribute *attr,
+				const char *buf, size_t count)
+{
+	if (sysfs_streq(buf, "full")) {
+		dload_type = SCM_DLOAD_FULLDUMP;
+	} else if (sysfs_streq(buf, "mini")) {
+		if (!minidump_enabled) {
+			pr_err("Minidump is not enabled\n");
+			return -ENODEV;
+		}
+		dload_type = SCM_DLOAD_MINIDUMP;
+	} else if (sysfs_streq(buf, "both")) {
+		if (!minidump_enabled) {
+			pr_err("Minidump not enabled, setting fulldump only\n");
+			dload_type = SCM_DLOAD_FULLDUMP;
+			return count;
+		}
+		dload_type = SCM_DLOAD_BOTHDUMPS;
+	} else{
+		pr_err("Invalid Dump setup request..\n");
+		pr_err("Supported dumps:'full', 'mini', or 'both'\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&tcsr_lock);
+	/*Overwrite TCSR reg*/
+	set_dload_mode(dload_type);
+	mutex_unlock(&tcsr_lock);
+	return count;
 }
+RESET_ATTR(dload_mode, 0644, show_dload_mode, store_dload_mode);
+#endif
+
+RESET_ATTR(emmc_dload, 0644, show_emmc_dload, store_emmc_dload);
+
+static struct attribute *reset_attrs[] = {
+	&reset_attr_emmc_dload.attr,
+#ifdef CONFIG_QCOM_MINIDUMP
+	&reset_attr_dload_mode.attr,
+#endif
+	NULL
+};
+
+static struct attribute_group reset_attr_group = {
+	.attrs = reset_attrs,
+};
+#endif
 
 static int msm_restart_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct resource *mem;
+	struct device_node *np;
+	int ret = 0;
 
-	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	atomic_notifier_chain_register(&panic_notifier_list, &panic_blk);
+
+#ifdef CONFIG_QCOM_DLOAD_MODE
+	if (scm_is_call_available(SCM_SVC_BOOT, SCM_DLOAD_CMD) > 0)
+		scm_dload_supported = true;
+
+	np = of_find_compatible_node(NULL, NULL, DL_MODE_PROP);
+	if (!np) {
+		pr_err("unable to find DT imem DLOAD mode node\n");
+	} else {
+		dload_mode_addr = of_iomap(np, 0);
+		if (!dload_mode_addr)
+			pr_err("unable to map imem DLOAD offset\n");
+	}
+
+	np = of_find_compatible_node(NULL, NULL, EDL_MODE_PROP);
+	if (!np) {
+		pr_err("unable to find DT imem EDLOAD mode node\n");
+	} else {
+		emergency_dload_mode_addr = of_iomap(np, 0);
+		if (!emergency_dload_mode_addr)
+			pr_err("unable to map imem EDLOAD mode offset\n");
+	}
+
+#ifdef CONFIG_RANDOMIZE_BASE
+#define KASLR_OFFSET_BIT_MASK	0x00000000FFFFFFFF
+	np = of_find_compatible_node(NULL, NULL, KASLR_OFFSET_PROP);
+	if (!np) {
+		pr_err("unable to find DT imem KASLR_OFFSET node\n");
+	} else {
+		kaslr_imem_addr = of_iomap(np, 0);
+		if (!kaslr_imem_addr)
+			pr_err("unable to map imem KASLR offset\n");
+	}
+
+	if (kaslr_imem_addr) {
+		__raw_writel(0xdead4ead, kaslr_imem_addr);
+		__raw_writel(KASLR_OFFSET_BIT_MASK &
+		(kimage_vaddr - KIMAGE_VADDR), kaslr_imem_addr + 4);
+		__raw_writel(KASLR_OFFSET_BIT_MASK &
+			((kimage_vaddr - KIMAGE_VADDR) >> 32),
+			kaslr_imem_addr + 8);
+		iounmap(kaslr_imem_addr);
+	}
+#endif
+
+	np = of_find_compatible_node(NULL, NULL,
+				"qcom,msm-imem-dload-type");
+	if (!np) {
+		pr_err("unable to find DT imem dload-type node\n");
+		goto skip_sysfs_create;
+	} else {
+		dload_type_addr = of_iomap(np, 0);
+		if (!dload_type_addr) {
+			pr_err("unable to map imem dload-type offset\n");
+			goto skip_sysfs_create;
+		}
+	}
+
+	ret = kobject_init_and_add(&dload_kobj, &reset_ktype,
+			kernel_kobj, "%s", "dload");
+	if (ret) {
+		pr_err("%s:Error in creation kobject_add\n", __func__);
+		kobject_put(&dload_kobj);
+		goto skip_sysfs_create;
+	}
+
+	ret = sysfs_create_group(&dload_kobj, &reset_attr_group);
+	if (ret) {
+		pr_err("%s:Error in creation sysfs_create_group\n", __func__);
+		kobject_del(&dload_kobj);
+	}
+skip_sysfs_create:
+#endif
+	np = of_find_compatible_node(NULL, NULL,
+				"qcom,msm-imem-restart_reason");
+	if (!np) {
+		pr_err("unable to find DT imem restart reason node\n");
+	} else {
+		restart_reason = of_iomap(np, 0);
+		if (!restart_reason) {
+			pr_err("unable to map imem restart reason offset\n");
+			ret = -ENOMEM;
+			goto err_restart_reason;
+		}
+	}
+
+	mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pshold-base");
 	msm_ps_hold = devm_ioremap_resource(dev, mem);
 	if (IS_ERR(msm_ps_hold))
 		return PTR_ERR(msm_ps_hold);
 
-	register_restart_handler(&restart_nb);
+	mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "tcsr-boot-misc-detect");
+	if (mem)
+		tcsr_boot_misc_detect = mem->start;
 
 	pm_power_off = do_msm_poweroff;
+	arm_pm_restart = do_msm_restart;
+
+	if (scm_is_call_available(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER) > 0)
+		scm_pmic_arbiter_disable_supported = true;
 
+	if (scm_is_call_available(SCM_SVC_PWR, SCM_IO_DEASSERT_PS_HOLD) > 0)
+		scm_deassert_ps_hold_supported = true;
+
+#ifdef CONFIG_QCOM_DLOAD_MODE
+	set_dload_mode(download_mode);
+	if (!download_mode)
+		scm_disable_sdi();
+#endif
 	return 0;
+
+err_restart_reason:
+#ifdef CONFIG_QCOM_DLOAD_MODE
+	iounmap(emergency_dload_mode_addr);
+	iounmap(dload_mode_addr);
+#endif
+	return ret;
 }
 
 static const struct of_device_id of_msm_restart_match[] = {
@@ -78,4 +715,4 @@
 {
 	return platform_driver_register(&msm_restart_driver);
 }
-device_initcall(msm_restart_init);
+pure_initcall(msm_restart_init);
diff -ruw linux-4.4.115/drivers/pwm/Kconfig linux-4.4.115-fbx/drivers/pwm/Kconfig
--- linux-4.4.115/drivers/pwm/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/pwm/Kconfig	2019-01-22 16:16:26.243271219 +0100
@@ -309,6 +309,16 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called pwm-rcar.
 
+config PWM_QPNP
+	tristate "Qualcomm Technologies, Inc. QPNP LPG/PWM support"
+	depends on SPMI
+	help
+	  This driver supports PWM/LPG devices in Qualcomm Technologies, Inc.
+	  PMIC chips which comply with QPNP.  QPNP is an SPMI based PMIC
+	  implementation.  These devices support Pulse Width Modulation output
+	  with user generated patterns. They share a lookup table with size of
+	  64 entries.
+
 config PWM_RENESAS_TPU
 	tristate "Renesas TPU PWM support"
 	depends on ARCH_SHMOBILE || COMPILE_TEST
diff -ruw linux-4.4.115/drivers/pwm/Makefile linux-4.4.115-fbx/drivers/pwm/Makefile
--- linux-4.4.115/drivers/pwm/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/pwm/Makefile	2019-01-22 16:16:26.243271219 +0100
@@ -28,6 +28,7 @@
 obj-$(CONFIG_PWM_PUV3)		+= pwm-puv3.o
 obj-$(CONFIG_PWM_PXA)		+= pwm-pxa.o
 obj-$(CONFIG_PWM_RCAR)		+= pwm-rcar.o
+obj-$(CONFIG_PWM_QPNP)		+= pwm-qpnp.o
 obj-$(CONFIG_PWM_RENESAS_TPU)	+= pwm-renesas-tpu.o
 obj-$(CONFIG_PWM_ROCKCHIP)	+= pwm-rockchip.o
 obj-$(CONFIG_PWM_SAMSUNG)	+= pwm-samsung.o
@@ -41,3 +42,4 @@
 obj-$(CONFIG_PWM_TWL)		+= pwm-twl.o
 obj-$(CONFIG_PWM_TWL_LED)	+= pwm-twl-led.o
 obj-$(CONFIG_PWM_VT8500)	+= pwm-vt8500.o
+
diff -ruw linux-4.4.115/drivers/regulator/core.c linux-4.4.115-fbx/drivers/regulator/core.c
--- linux-4.4.115/drivers/regulator/core.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/regulator/core.c	2019-01-22 16:16:26.259271364 +0100
@@ -27,6 +27,8 @@
 #include <linux/gpio/consumer.h>
 #include <linux/of.h>
 #include <linux/regmap.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
 #include <linux/regulator/of_regulator.h>
 #include <linux/regulator/consumer.h>
 #include <linux/regulator/driver.h>
@@ -224,6 +226,15 @@
 		return -EPERM;
 	}
 
+	/* check if requested voltage range actually overlaps the constraints */
+	if (*max_uV < rdev->constraints->min_uV ||
+	    *min_uV > rdev->constraints->max_uV) {
+		rdev_err(rdev, "requested voltage range [%d, %d] does not fit "
+			"within constraints: [%d, %d]\n", *min_uV, *max_uV,
+			rdev->constraints->min_uV, rdev->constraints->max_uV);
+		return -EINVAL;
+	}
+
 	if (*max_uV > rdev->constraints->max_uV)
 		*max_uV = rdev->constraints->max_uV;
 	if (*min_uV < rdev->constraints->min_uV)
@@ -245,6 +256,8 @@
 				     int *min_uV, int *max_uV)
 {
 	struct regulator *regulator;
+	int init_min_uV = *min_uV;
+	int init_max_uV = *max_uV;
 
 	list_for_each_entry(regulator, &rdev->consumer_list, list) {
 		/*
@@ -254,6 +267,13 @@
 		if (!regulator->min_uV && !regulator->max_uV)
 			continue;
 
+		if (init_max_uV < regulator->min_uV
+		    || init_min_uV > regulator->max_uV)
+			rdev_err(rdev, "requested voltage range [%d, %d] does "
+				"not fit within previously voted range: "
+				"[%d, %d]\n", init_min_uV, init_max_uV,
+				regulator->min_uV, regulator->max_uV);
+
 		if (*max_uV > regulator->max_uV)
 			*max_uV = regulator->max_uV;
 		if (*min_uV < regulator->min_uV)
@@ -337,7 +357,7 @@
 static int regulator_check_drms(struct regulator_dev *rdev)
 {
 	if (!rdev->constraints) {
-		rdev_err(rdev, "no constraints\n");
+		rdev_dbg(rdev, "no constraints\n");
 		return -ENODEV;
 	}
 	if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) {
@@ -684,7 +704,7 @@
 {
 	struct regulator *sibling;
 	int current_uA = 0, output_uV, input_uV, err;
-	unsigned int mode;
+	unsigned int regulator_curr_mode, mode;
 
 	lockdep_assert_held_once(&rdev->mutex);
 
@@ -745,6 +765,14 @@
 				 current_uA, input_uV, output_uV);
 			return err;
 		}
+		/* return if the same mode is requested */
+		if (rdev->desc->ops->get_mode) {
+			regulator_curr_mode = rdev->desc->ops->get_mode(rdev);
+			if (regulator_curr_mode == mode)
+				return 0;
+		} else {
+			return 0;
+		}
 
 		err = rdev->desc->ops->set_mode(rdev, mode);
 		if (err < 0)
@@ -1514,16 +1542,6 @@
 		return ret;
 	}
 
-	/* Cascade always-on state to supply */
-	if (_regulator_is_enabled(rdev) && rdev->supply) {
-		ret = regulator_enable(rdev->supply);
-		if (ret < 0) {
-			_regulator_put(rdev->supply);
-			rdev->supply = NULL;
-			return ret;
-		}
-	}
-
 	return 0;
 }
 
@@ -2110,6 +2128,8 @@
 			if (ret < 0)
 				return ret;
 
+			_notifier_call_chain(rdev, REGULATOR_EVENT_ENABLE,
+						NULL);
 		} else if (ret < 0) {
 			rdev_err(rdev, "is_enabled() failed: %d\n", ret);
 			return ret;
@@ -2148,7 +2168,11 @@
 	}
 
 	mutex_lock(&rdev->mutex);
+
 	ret = _regulator_enable(rdev);
+	if (ret == 0)
+		regulator->enabled++;
+
 	mutex_unlock(&rdev->mutex);
 
 	if (ret != 0 && rdev->supply)
@@ -2260,6 +2284,8 @@
 
 	mutex_lock(&rdev->mutex);
 	ret = _regulator_disable(rdev);
+	if (ret == 0)
+		regulator->enabled--;
 	mutex_unlock(&rdev->mutex);
 
 	if (ret == 0 && rdev->supply)
@@ -2335,6 +2361,14 @@
 	count = rdev->deferred_disables;
 	rdev->deferred_disables = 0;
 
+	/*
+	 * Workqueue functions queue the new work instance while the previous
+	 * work instance is being processed. Cancel the queued work instance
+	 * as the work instance under processing does the job of the queued
+	 * work instance.
+	 */
+	cancel_delayed_work(&rdev->disable_work);
+
 	for (i = 0; i < count; i++) {
 		ret = _regulator_disable(rdev);
 		if (ret != 0)
@@ -2369,7 +2403,6 @@
 int regulator_disable_deferred(struct regulator *regulator, int ms)
 {
 	struct regulator_dev *rdev = regulator->rdev;
-	int ret;
 
 	if (regulator->always_on)
 		return 0;
@@ -2379,14 +2412,10 @@
 
 	mutex_lock(&rdev->mutex);
 	rdev->deferred_disables++;
+	mod_delayed_work(system_power_efficient_wq, &rdev->disable_work,
+			 msecs_to_jiffies(ms));
 	mutex_unlock(&rdev->mutex);
 
-	ret = queue_delayed_work(system_power_efficient_wq,
-				 &rdev->disable_work,
-				 msecs_to_jiffies(ms));
-	if (ret < 0)
-		return ret;
-	else
 		return 0;
 }
 EXPORT_SYMBOL_GPL(regulator_disable_deferred);
@@ -2603,6 +2632,40 @@
 EXPORT_SYMBOL_GPL(regulator_list_hardware_vsel);
 
 /**
+ * regulator_list_corner_voltage - return the maximum voltage in microvolts that
+ *	can be physically configured for the regulator when operating at the
+ *	specified voltage corner
+ * @regulator: regulator source
+ * @corner: voltage corner value
+ * Context: can sleep
+ *
+ * This function can be used for regulators which allow scaling between
+ * different voltage corners as opposed to be different absolute voltages.  The
+ * absolute voltage for a given corner may vary part-to-part or for a given part
+ * at runtime based upon various factors.
+ *
+ * Returns a voltage corresponding to the specified voltage corner or a negative
+ * errno if the corner value can't be used on this system.
+ */
+int regulator_list_corner_voltage(struct regulator *regulator, int corner)
+{
+	struct regulator_dev *rdev = regulator->rdev;
+	int ret;
+
+	if (corner < rdev->constraints->min_uV ||
+	    corner > rdev->constraints->max_uV ||
+	    !rdev->desc->ops->list_corner_voltage)
+		return -EINVAL;
+
+	mutex_lock(&rdev->mutex);
+	ret = rdev->desc->ops->list_corner_voltage(rdev, corner);
+	mutex_unlock(&rdev->mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(regulator_list_corner_voltage);
+
+/**
  * regulator_get_linear_step - return the voltage step size between VSEL values
  * @regulator: regulator source
  *
@@ -2871,7 +2934,8 @@
 		goto out2;
 
 	if (rdev->supply && (rdev->desc->min_dropout_uV ||
-				!rdev->desc->ops->get_voltage)) {
+				!(rdev->desc->ops->get_voltage ||
+					rdev->desc->ops->get_voltage_sel))) {
 		int current_supply_uV;
 		int selector;
 
@@ -3363,7 +3427,8 @@
 	if (enable && !regulator->bypass) {
 		rdev->bypass_count++;
 
-		if (rdev->bypass_count == rdev->open_count) {
+		if (rdev->bypass_count == rdev->open_count -
+		    rdev->open_offset) {
 			ret = rdev->desc->ops->set_bypass(rdev, enable);
 			if (ret != 0)
 				rdev->bypass_count--;
@@ -3372,7 +3437,8 @@
 	} else if (!enable && regulator->bypass) {
 		rdev->bypass_count--;
 
-		if (rdev->bypass_count != rdev->open_count) {
+		if (rdev->bypass_count != rdev->open_count -
+		    rdev->open_offset) {
 			ret = rdev->desc->ops->set_bypass(rdev, enable);
 			if (ret != 0)
 				rdev->bypass_count++;
@@ -3533,6 +3599,42 @@
 EXPORT_SYMBOL_GPL(regulator_bulk_enable);
 
 /**
+ * regulator_bulk_set_voltage - set voltage for multiple regulator consumers
+ *
+ * @num_consumers: Number of consumers
+ * @consumers:     Consumer data; clients are stored here.
+ * @return         0 on success, an errno on failure
+ *
+ * This convenience API allows the voted voltage ranges of multiple regulator
+ * clients to be set in a single API call. If any consumers cannot have their
+ * voltages set, this function returns WITHOUT withdrawing votes for any
+ * consumers that have already been set.
+ */
+int regulator_bulk_set_voltage(int num_consumers,
+			       struct regulator_bulk_data *consumers)
+{
+	int i;
+	int rc;
+
+	for (i = 0; i < num_consumers; i++) {
+		if (!consumers[i].min_uV && !consumers[i].max_uV)
+			continue;
+		rc = regulator_set_voltage(consumers[i].consumer,
+				consumers[i].min_uV,
+				consumers[i].max_uV);
+		if (rc)
+			goto err;
+	}
+
+	return 0;
+
+err:
+	pr_err("Failed to set voltage for %s: %d\n", consumers[i].supply, rc);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(regulator_bulk_set_voltage);
+
+/**
  * regulator_bulk_disable - disable multiple regulator consumers
  *
  * @num_consumers: Number of consumers
@@ -3800,11 +3902,269 @@
 	.dev_groups = regulator_dev_groups,
 };
 
+#ifdef CONFIG_DEBUG_FS
+
+static int reg_debug_enable_set(void *data, u64 val)
+{
+	struct regulator *regulator = data;
+	int ret;
+
+	if (val) {
+		ret = regulator_enable(regulator);
+		if (ret)
+			rdev_err(regulator->rdev, "enable failed, ret=%d\n",
+				ret);
+	} else {
+		ret = regulator_disable(regulator);
+		if (ret)
+			rdev_err(regulator->rdev, "disable failed, ret=%d\n",
+				ret);
+	}
+
+	return ret;
+}
+
+static int reg_debug_enable_get(void *data, u64 *val)
+{
+	struct regulator *regulator = data;
+
+	*val = regulator_is_enabled(regulator);
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(reg_enable_fops, reg_debug_enable_get,
+			reg_debug_enable_set, "%llu\n");
+
+static int reg_debug_bypass_enable_get(void *data, u64 *val)
+{
+	struct regulator *regulator = data;
+	struct regulator_dev *rdev = regulator->rdev;
+	bool enable = false;
+	int ret = 0;
+
+	mutex_lock(&rdev->mutex);
+	if (rdev->desc->ops->get_bypass) {
+		ret = rdev->desc->ops->get_bypass(rdev, &enable);
+		if (ret)
+			rdev_err(rdev, "get_bypass() failed, ret=%d\n", ret);
+	} else {
+		enable = (rdev->bypass_count == rdev->open_count
+			  - rdev->open_offset);
+	}
+	mutex_unlock(&rdev->mutex);
+
+	*val = enable;
+
+	return ret;
+}
+
+static int reg_debug_bypass_enable_set(void *data, u64 val)
+{
+	struct regulator *regulator = data;
+	struct regulator_dev *rdev = regulator->rdev;
+	int ret = 0;
+
+	mutex_lock(&rdev->mutex);
+	rdev->open_offset = 0;
+	mutex_unlock(&rdev->mutex);
+
+	ret = regulator_allow_bypass(data, val);
+
+	return ret;
+}
+DEFINE_SIMPLE_ATTRIBUTE(reg_bypass_enable_fops, reg_debug_bypass_enable_get,
+			reg_debug_bypass_enable_set, "%llu\n");
+
+static int reg_debug_force_disable_set(void *data, u64 val)
+{
+	struct regulator *regulator = data;
+	int ret = 0;
+
+	if (val > 0) {
+		ret = regulator_force_disable(regulator);
+		if (ret)
+			rdev_err(regulator->rdev, "force_disable failed, ret=%d\n",
+				ret);
+	}
+
+	return ret;
+}
+DEFINE_SIMPLE_ATTRIBUTE(reg_force_disable_fops, reg_debug_enable_get,
+			reg_debug_force_disable_set, "%llu\n");
+
+#define MAX_DEBUG_BUF_LEN 50
+
+static ssize_t reg_debug_voltage_write(struct file *file,
+			const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	struct regulator *regulator = file->private_data;
+	char buf[MAX_DEBUG_BUF_LEN];
+	int ret, filled;
+	int min_uV, max_uV = -1;
+
+	if (count < MAX_DEBUG_BUF_LEN) {
+		if (copy_from_user(buf, ubuf, count))
+			return -EFAULT;
+
+		buf[count] = '\0';
+		filled = sscanf(buf, "%d %d", &min_uV, &max_uV);
+
+		/* Check that both min and max voltage were specified. */
+		if (filled < 2 || min_uV < 0 || max_uV < min_uV) {
+			rdev_err(regulator->rdev, "incorrect values specified: \"%s\"; should be: \"min_uV max_uV\"\n",
+				buf);
+			return -EINVAL;
+		}
+
+		ret = regulator_set_voltage(regulator, min_uV, max_uV);
+		if (ret) {
+			rdev_err(regulator->rdev, "set voltage(%d, %d) failed, ret=%d\n",
+				min_uV, max_uV, ret);
+			return ret;
+		}
+	} else {
+		rdev_err(regulator->rdev, "voltage request string exceeds maximum buffer size\n");
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static ssize_t reg_debug_voltage_read(struct file *file, char __user *ubuf,
+					size_t count, loff_t *ppos)
+{
+	struct regulator *regulator = file->private_data;
+	char buf[MAX_DEBUG_BUF_LEN];
+	int voltage, ret;
+
+	voltage = regulator_get_voltage(regulator);
+
+	ret = snprintf(buf, MAX_DEBUG_BUF_LEN - 1, "%d\n", voltage);
+
+	return simple_read_from_buffer(ubuf, count, ppos, buf, ret);
+}
+
+static int reg_debug_voltage_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+
+	return 0;
+}
+
+static const struct file_operations reg_voltage_fops = {
+	.write	= reg_debug_voltage_write,
+	.open   = reg_debug_voltage_open,
+	.read	= reg_debug_voltage_read,
+};
+
+static int reg_debug_mode_set(void *data, u64 val)
+{
+	struct regulator *regulator = data;
+	unsigned int mode = val;
+	int ret;
+
+	ret = regulator_set_mode(regulator, mode);
+	if (ret)
+		rdev_err(regulator->rdev, "set mode=%u failed, ret=%d\n",
+			mode, ret);
+
+	return ret;
+}
+
+static int reg_debug_mode_get(void *data, u64 *val)
+{
+	struct regulator *regulator = data;
+	int mode;
+
+	mode = regulator_get_mode(regulator);
+	if (mode < 0) {
+		rdev_err(regulator->rdev, "get mode failed, ret=%d\n", mode);
+		return mode;
+	}
+
+	*val = mode;
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(reg_mode_fops, reg_debug_mode_get, reg_debug_mode_set,
+			"%llu\n");
+
+static int reg_debug_set_load(void *data, u64 val)
+{
+	struct regulator *regulator = data;
+	int load = val;
+	int ret;
+
+	ret = regulator_set_load(regulator, load);
+	if (ret)
+		rdev_err(regulator->rdev, "set load=%d failed, ret=%d\n",
+			load, ret);
+
+	return ret;
+}
+DEFINE_SIMPLE_ATTRIBUTE(reg_set_load_fops, reg_debug_mode_get,
+			reg_debug_set_load, "%llu\n");
+
+static int reg_debug_consumers_show(struct seq_file *m, void *v)
+{
+	struct regulator_dev *rdev = m->private;
+	struct regulator *reg;
+	char *supply_name;
+
+	mutex_lock(&rdev->mutex);
+
+	/* Print a header if there are consumers. */
+	if (rdev->open_count)
+		seq_printf(m, "%-32s EN    Min_uV   Max_uV  load_uA\n",
+			"Device-Supply");
+
+	list_for_each_entry(reg, &rdev->consumer_list, list) {
+		if (reg->supply_name)
+			supply_name = reg->supply_name;
+		else
+			supply_name = "(null)-(null)";
+
+		seq_printf(m, "%-32s %c   %8d %8d %8d\n", supply_name,
+			(reg->enabled ? 'Y' : 'N'), reg->min_uV, reg->max_uV,
+			reg->uA_load);
+	}
+
+	mutex_unlock(&rdev->mutex);
+
+	return 0;
+}
+
+static int reg_debug_consumers_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, reg_debug_consumers_show, inode->i_private);
+}
+
+static const struct file_operations reg_consumers_fops = {
+	.owner		= THIS_MODULE,
+	.open		= reg_debug_consumers_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static void rdev_deinit_debugfs(struct regulator_dev *rdev)
+{
+	if (!IS_ERR_OR_NULL(rdev)) {
+		debugfs_remove_recursive(rdev->debugfs);
+		if (rdev->debug_consumer)
+			rdev->debug_consumer->debugfs = NULL;
+		regulator_put(rdev->debug_consumer);
+	}
+}
+
 static void rdev_init_debugfs(struct regulator_dev *rdev)
 {
 	struct device *parent = rdev->dev.parent;
 	const char *rname = rdev_get_name(rdev);
 	char name[NAME_MAX];
+	struct regulator *regulator;
+	const struct regulator_ops *ops;
+	mode_t mode;
 
 	/* Avoid duplicate debugfs directory names */
 	if (parent && rname == rdev->desc->name) {
@@ -3825,8 +4185,75 @@
 			   &rdev->open_count);
 	debugfs_create_u32("bypass_count", 0444, rdev->debugfs,
 			   &rdev->bypass_count);
+	debugfs_create_file("consumers", 0444, rdev->debugfs, rdev,
+			    &reg_consumers_fops);
+
+	regulator = regulator_get(NULL, rdev_get_name(rdev));
+	if (IS_ERR(regulator)) {
+		rdev_err(rdev, "regulator get failed, ret=%ld\n",
+			PTR_ERR(regulator));
+		return;
+	}
+	rdev->debug_consumer = regulator;
+
+	rdev->open_offset = 1;
+	ops = rdev->desc->ops;
+
+	debugfs_create_file("enable", 0644, rdev->debugfs, regulator,
+				&reg_enable_fops);
+	if (ops->set_bypass)
+		debugfs_create_file("bypass", 0644, rdev->debugfs, regulator,
+					&reg_bypass_enable_fops);
+
+	mode = 0;
+	if (ops->is_enabled)
+		mode |= 0444;
+	if (ops->disable)
+		mode |= 0200;
+	if (mode)
+		debugfs_create_file("force_disable", mode, rdev->debugfs,
+					regulator, &reg_force_disable_fops);
+
+	mode = 0;
+	if (ops->get_voltage || ops->get_voltage_sel)
+		mode |= 0444;
+	if (ops->set_voltage || ops->set_voltage_sel)
+		mode |= 0200;
+	if (mode)
+		debugfs_create_file("voltage", mode, rdev->debugfs, regulator,
+					&reg_voltage_fops);
+
+	mode = 0;
+	if (ops->get_mode)
+		mode |= 0444;
+	if (ops->set_mode)
+		mode |= 0200;
+	if (mode)
+		debugfs_create_file("mode", mode, rdev->debugfs, regulator,
+					&reg_mode_fops);
+
+	mode = 0;
+	if (ops->get_mode)
+		mode |= 0444;
+	if (ops->set_load || (ops->get_optimum_mode && ops->set_mode))
+		mode |= 0200;
+	if (mode)
+		debugfs_create_file("load", mode, rdev->debugfs, regulator,
+					&reg_set_load_fops);
+}
+
+#else
+
+static inline void rdev_deinit_debugfs(struct regulator_dev *rdev)
+{
+}
+
+static inline void rdev_init_debugfs(struct regulator_dev *rdev)
+{
 }
 
+#endif
+
 /**
  * regulator_register - register regulator
  * @regulator_desc: regulator to register
@@ -3972,7 +4399,13 @@
 		}
 	}
 
+	mutex_unlock(&regulator_list_mutex);
 	rdev_init_debugfs(rdev);
+	rdev->proxy_consumer = regulator_proxy_consumer_register(dev,
+							config->of_node);
+	kfree(config);
+	return rdev;
+
 out:
 	mutex_unlock(&regulator_list_mutex);
 	kfree(config);
@@ -4013,8 +4446,9 @@
 			regulator_disable(rdev->supply);
 		regulator_put(rdev->supply);
 	}
+	regulator_proxy_consumer_unregister(rdev->proxy_consumer);
+	rdev_deinit_debugfs(rdev);
 	mutex_lock(&regulator_list_mutex);
-	debugfs_remove_recursive(rdev->debugfs);
 	flush_work(&rdev->disable_work.work);
 	WARN_ON(rdev->open_count);
 	unset_regulator_supplies(rdev);
diff -ruw linux-4.4.115/drivers/regulator/internal.h linux-4.4.115-fbx/drivers/regulator/internal.h
--- linux-4.4.115/drivers/regulator/internal.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/regulator/internal.h	2019-01-22 16:16:26.267271436 +0100
@@ -29,6 +29,7 @@
 	int uA_load;
 	int min_uV;
 	int max_uV;
+	int enabled;
 	char *supply_name;
 	struct device_attribute dev_attr;
 	struct regulator_dev *rdev;
diff -ruw linux-4.4.115/drivers/regulator/Kconfig linux-4.4.115-fbx/drivers/regulator/Kconfig
--- linux-4.4.115/drivers/regulator/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/regulator/Kconfig	2019-01-22 16:16:26.255271328 +0100
@@ -241,6 +241,16 @@
 	  input voltage supply of 2.5V to 5.5V. The output voltage is
 	  programmed through an I2C interface.
 
+config REGULATOR_MSM_GFX_LDO
+	tristate "MSM GFX LDO Regulator"
+	depends on OF
+	help
+	  This driver supports the MSM GFX (Graphics) LDO regulator. The
+	  GFU core is either powered by an internal MSM LDO or by BHS.
+	  Typically the lower voltage corners are powered by LDO and
+	  the higher ones by BHS. This driver allows for configuration of
+	  the rail between the LDO/BHS as well as the LDO voltage.
+
 config REGULATOR_GPIO
 	tristate "GPIO regulator support"
 	depends on GPIOLIB || COMPILE_TEST
@@ -334,6 +344,15 @@
 	  regulator via I2C bus. The provided regulator is suitable
 	  for PXA27x chips to control VCC_CORE and VCC_USIM voltages.
 
+config REGULATOR_MAX20010
+	tristate "Maxim MAX20010 regulator support"
+	depends on I2C
+	help
+	  This driver supports the Maxim MAX20010 switching voltage regulator
+	  (buck converter). The regulator is controlled using an I2C interface
+	  and supports 2 programmable voltage ranges from 0.5V to 1.27V in 10mV
+	  steps and 0.625V to 1.5875V in 12.5mV steps.
+
 config REGULATOR_MAX8649
 	tristate "Maxim 8649 voltage regulator"
 	depends on I2C
@@ -462,6 +481,15 @@
 	  This driver supports the control of different power rails of device
 	  through regulator interface.
 
+config REGULATOR_ONSEMI_NCP6335D
+	tristate "OnSemi NCP6335D regulator support"
+	depends on I2C
+	help
+	 This driver supports the OnSemi NCP6335D switching voltage regulator
+	 (buck converter). The regulator is controlled using an I2C interface
+	 and supports a programmable voltage range from 0.6V to 1.4V in steps
+	 of 6.25mV.
+
 config REGULATOR_PALMAS
 	tristate "TI Palmas PMIC Regulators"
 	depends on MFD_PALMAS
@@ -785,5 +813,195 @@
 	  This driver provides support for the voltage regulators on the
 	  WM8994 CODEC.
 
+config REGULATOR_RPM_SMD
+	bool "RPM SMD regulator driver"
+	depends on OF
+	depends on MSM_RPM_SMD
+	help
+	  Compile in support for the RPM SMD regulator driver which is used for
+	  setting voltages and other parameters of the various power rails
+	  supplied by some Qualcomm PMICs. The RPM SMD regulator driver should
+	  be used on systems which contain an RPM which communicates with the
+	  application processor over SMD.
+
+config REGULATOR_QPNP
+	tristate "Qualcomm Technologies, Inc. QPNP regulator support"
+	depends on SPMI
+	help
+	  This driver supports voltage regulators in Qualcomm Technologies, Inc.
+	  PMIC chips which comply with QPNP.  QPNP is a SPMI based PMIC
+	  implementation.  These chips provide several different varieties of
+	  LDO and switching regulators.  They also provide voltage switches and
+	  boost regulators.
+
+config REGULATOR_QPNP_LABIBB
+	tristate "Qualcomm Technologies, Inc. QPNP LAB/IBB regulator support"
+	depends on SPMI
+	help
+	  This driver supports voltage regulators in Qualcomm Technologies, Inc.
+	  PMIC chips which comply with QPNP LAB/IBB regulators. QPNP LAB and IBB
+	  are SPMI based PMIC implementations. LAB regulator can be used as a
+	  regular positive boost regulator. IBB can be used as a regular
+	  negative boost regulator. LAB/IBB regulators can also be used
+	  together for LCD or AMOLED.
+
+config REGULATOR_QPNP_LCDB
+	tristate "Qualcomm Technologies, Inc. QPNP LCDB support"
+	depends on SPMI
+	help
+	  Supports the LCDB module in the Qualcomm Technologies, Inc.
+	  QPNP PMICs. Exposes regulators to control the positive and
+	  negative voltage bias for the LCD display panel. It also
+	  allows configurability for the various bias-voltage parameters.
+
+config REGULATOR_QPNP_OLEDB
+	tristate "Qualcomm Technologies, Inc. QPNP OLEDB regulator support"
+	depends on SPMI
+	help
+	  This driver supports the OLEDB (AVDD bias) signal for AMOLED panel in
+	  Qualcomm Technologies, Inc. QPNP PMICs. It exposes the OLED voltage
+	  configuration via the regulator framework. The configurable range of
+	  this bias is 5 V to 8.1 V.
+
+config REGULATOR_SPM
+	bool "SPM regulator driver"
+	depends on SPMI
+	help
+	  Enable support for the SPM regulator driver which is used for
+	  setting voltages of processor supply regulators via the SPM module
+	  found inside of Qualcomm Technologies (QTI) chips.  The SPM regulator
+	  driver can be used on QTI SoCs where the APSS processor cores are
+	  supplied by their own PMIC regulator.
+
+config REGULATOR_CPR
+	bool "RBCPR regulator driver for APC"
+	depends on OF
+	help
+	  Compile in RBCPR (RapidBridge Core Power Reduction) driver to support
+	  corner vote for APC power rail. The driver takes PTE process voltage
+	  suggestions in efuse as initial settings. It converts corner vote
+	  to voltage value before writing to a voltage regulator API, such as
+	  that provided by spm-regulator driver.
+
+config REGULATOR_CPR2_GFX
+	bool "RBCPR regulator driver for GFX"
+	depends on OF
+	help
+	  This driver supports the CPR (core power reduction) controller for the
+	  graphics (GFX) rail. The GFX CPR2 controller monitors the graphics voltage
+	  requirements. This driver reads initial voltage values out of hardware
+	  fuses and CPR target quotient values out of device tree.
+
+config REGULATOR_CPR3
+	bool "CPR3 regulator core support"
+	help
+	  This driver supports Core Power Reduction (CPR) version 3 controllers
+	  which are used by some Qualcomm Technologies, Inc. (QTI) SoCs to
+	  manage important voltage regulators.  CPR3 controllers are capable of
+	  monitoring several ring oscillator sensing loops simultaneously.  The
+	  CPR3 controller informs software when the silicon conditions require
+	  the supply voltage to be increased or decreased.  On certain supply
+	  rails, the CPR3 controller is able to propagate the voltage increase
+	  or decrease requests all the way to the PMIC without software
+	  involvement.
+
+config REGULATOR_CPR3_HMSS
+	bool "CPR3 regulator for HMSS"
+	depends on OF
+	select REGULATOR_CPR3
+	help
+	  This driver supports Qualcomm Technologies, Inc. HMSS application
+	  processor specific features including memory array power mux (APM)
+	  switching, two CPR3 threads which monitor the two HMSS clusters that
+	  are both powered by a shared supply, and hardware closed-loop auto
+	  voltage stepping.  This driver reads both initial voltage and CPR
+	  target quotient values out of hardware fuses.
+
+config REGULATOR_CPR3_MMSS
+	bool "RBCPR3 regulator for MMSS"
+	depends on OF
+	select REGULATOR_CPR3
+	help
+	  This driver supports Qualcomm Technologies, Inc. MMSS graphics
+	  processor specific features.  The MMSS CPR3 controller only uses one
+	  thread to monitor the MMSS voltage requirements.  This driver reads
+	  initial voltage values out of hardware fuses and CPR target quotient
+	  values out of device tree.
+
+config REGULATOR_CPR4_APSS
+	bool "CPR4 regulator for APSS"
+	depends on OF
+	select REGULATOR_CPR3
+	help
+	  This driver supports Qualcomm Technologies, Inc. APSS application
+	  processor specific features including memory array power mux (APM)
+	  switching, one CPR4 thread which monitor the two APSS clusters that
+	  are both powered by a shared supply, hardware closed-loop auto
+	  voltage stepping, voltage adjustments based on online core count,
+	  voltage adjustments based on temperature readings, and voltage
+	  adjustments for performance boost mode. This driver reads both initial
+	  voltage and CPR target quotient values out of hardware fuses.
+
+config REGULATOR_CPRH_KBSS
+	bool "CPRH regulator for KBSS"
+	depends on OF
+	select REGULATOR_CPR3
+	help
+	  This driver supports Qualcomm Technologies, Inc. KBSS application
+	  processor specific features including CPR hardening (CPRh) and two
+	  CPRh controllers which monitor the two KBSS clusters each powered by
+	  independent voltage supplies. This driver reads both initial voltage
+	  and CPR target quotient values out of hardware fuses.
+
+config REGULATOR_CPR4_MMSS_LDO
+	bool "RBCPR3 regulator for MMSS LDO"
+	depends on OF
+	select REGULATOR_CPR3
+	help
+	  This driver supports Qualcomm Technologies, Inc. MMSS graphics
+	  processor specific features.  The MMSS CPR3 controller only uses one
+	  thread to monitor the MMSS LDO voltage requirements. This driver reads
+	  initial voltage values out of hardware fuses and CPR target quotient
+	  values out of device tree.
+
+config REGULATOR_KRYO
+	bool "Kryo regulator driver"
+	depends on OF
+	help
+	  Some MSM designs have CPUs that can be directly powered from a common
+	  voltage rail via a Block Head Switch (BHS) or an LDO whose output
+	  voltage can be configured for use when certain power constraints are
+	  met.  Say yes to support management of LDO and BHS modes for the
+	  clusters in the CPU subsystem.
+
+config REGULATOR_MEM_ACC
+	tristate "QTI Memory accelerator regulator driver"
+	help
+	  Say y here to enable the memory accelerator driver for Qualcomm
+	  Technologies, Inc. (QTI) chips. The accelerator controls delays
+	  applied for memory accesses.  This driver configures the power-mode
+	  (corner) for the memory accelerator.
+
+config REGULATOR_PROXY_CONSUMER
+	bool "Boot time regulator proxy consumer support"
+	help
+	  This driver provides support for boot time regulator proxy requests.
+	  It can enforce a specified voltage range, set a minimum current,
+	  and/or keep a regulator enabled.  It is needed in circumstances where
+	  reducing one or more of these three quantities will cause hardware to
+	  stop working if performed before the driver managing the hardware has
+	  probed.
+
+config REGULATOR_STUB
+	tristate "Stub Regulator"
+	help
+	  This driver adds stub regulator support. The driver is absent of any
+	  real hardware based implementation. It allows for clients to register
+	  their regulator device constraints and use all of the standard
+	  regulator interfaces. This is useful for bringing up new platforms
+	  when the real hardware based implementation may not be yet available.
+	  Clients can use the real regulator device names with proper
+	  constraint checking while the real driver is being developed.
+
 endif
 
diff -ruw linux-4.4.115/drivers/regulator/Makefile linux-4.4.115-fbx/drivers/regulator/Makefile
--- linux-4.4.115/drivers/regulator/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/regulator/Makefile	2019-01-22 16:16:26.255271328 +0100
@@ -33,6 +33,7 @@
 obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o
 obj-$(CONFIG_REGULATOR_FAN53555) += fan53555.o
 obj-$(CONFIG_REGULATOR_GPIO) += gpio-regulator.o
+obj-$(CONFIG_REGULATOR_MSM_GFX_LDO) += msm_gfx_ldo.o
 obj-$(CONFIG_REGULATOR_HI6421) += hi6421-regulator.o
 obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o
 obj-$(CONFIG_REGULATOR_ISL9305) += isl9305.o
@@ -45,6 +46,7 @@
 obj-$(CONFIG_REGULATOR_LTC3589) += ltc3589.o
 obj-$(CONFIG_REGULATOR_MAX14577) += max14577.o
 obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
+obj-$(CONFIG_REGULATOR_MAX20010) += max20010-regulator.o
 obj-$(CONFIG_REGULATOR_MAX8649)	+= max8649.o
 obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o
 obj-$(CONFIG_REGULATOR_MAX8907) += max8907-regulator.o
@@ -64,6 +66,7 @@
 obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o
 obj-$(CONFIG_REGULATOR_QCOM_SMD_RPM) += qcom_smd-regulator.o
 obj-$(CONFIG_REGULATOR_QCOM_SPMI) += qcom_spmi-regulator.o
+obj-$(CONFIG_REGULATOR_ONSEMI_NCP6335D) += onsemi-ncp6335d.o
 obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
 obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o
 obj-$(CONFIG_REGULATOR_PWM) += pwm-regulator.o
@@ -102,5 +105,23 @@
 obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o
 obj-$(CONFIG_REGULATOR_WM8994) += wm8994-regulator.o
 
+obj-$(CONFIG_REGULATOR_PROXY_CONSUMER) += proxy-consumer.o
+obj-$(CONFIG_REGULATOR_MEM_ACC) += mem-acc-regulator.o
+obj-$(CONFIG_REGULATOR_RPM_SMD) += rpm-smd-regulator.o
+obj-$(CONFIG_REGULATOR_QPNP) += qpnp-regulator.o
+obj-$(CONFIG_REGULATOR_SPM) += spm-regulator.o
+obj-$(CONFIG_REGULATOR_CPR) += cpr-regulator.o
+obj-$(CONFIG_REGULATOR_CPR3) += cpr3-regulator.o cpr3-util.o
+obj-$(CONFIG_REGULATOR_CPR3_HMSS) += cpr3-hmss-regulator.o
+obj-$(CONFIG_REGULATOR_CPR3_MMSS) += cpr3-mmss-regulator.o
+obj-$(CONFIG_REGULATOR_CPR4_APSS) += cpr4-apss-regulator.o
+obj-$(CONFIG_REGULATOR_CPRH_KBSS) += cprh-kbss-regulator.o
+obj-$(CONFIG_REGULATOR_CPR4_MMSS_LDO) += cpr4-mmss-ldo-regulator.o
+obj-$(CONFIG_REGULATOR_QPNP_LABIBB) += qpnp-labibb-regulator.o
+obj-$(CONFIG_REGULATOR_QPNP_LCDB) += qpnp-lcdb-regulator.o
+obj-$(CONFIG_REGULATOR_QPNP_OLEDB) += qpnp-oledb-regulator.o
+obj-$(CONFIG_REGULATOR_STUB) += stub-regulator.o
+obj-$(CONFIG_REGULATOR_KRYO) += kryo-regulator.o
+obj-$(CONFIG_REGULATOR_CPR2_GFX) += cpr2-gfx-regulator.o
 
 ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
diff -ruw linux-4.4.115/drivers/rtc/interface.c linux-4.4.115-fbx/drivers/rtc/interface.c
--- linux-4.4.115/drivers/rtc/interface.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/rtc/interface.c	2019-10-29 09:26:24.653213062 +0100
@@ -362,6 +362,14 @@
 }
 EXPORT_SYMBOL_GPL(rtc_set_alarm);
 
+static void rtc_alarm_disable(struct rtc_device *rtc)
+{
+	if (!rtc->ops || !rtc->ops->alarm_irq_enable)
+		return;
+
+	rtc->ops->alarm_irq_enable(rtc->dev.parent, false);
+}
+
 /* Called once per device from rtc_device_register */
 int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
 {
@@ -389,7 +397,11 @@
 
 		rtc->aie_timer.enabled = 1;
 		timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node);
+	} else if (alarm->enabled && (rtc_tm_to_ktime(now).tv64 >=
+			rtc->aie_timer.node.expires.tv64)){
+		rtc_alarm_disable(rtc);
 	}
+
 	mutex_unlock(&rtc->ops_lock);
 	return err;
 }
@@ -782,14 +794,6 @@
 	return 0;
 }
 
-static void rtc_alarm_disable(struct rtc_device *rtc)
-{
-	if (!rtc->ops || !rtc->ops->alarm_irq_enable)
-		return;
-
-	rtc->ops->alarm_irq_enable(rtc->dev.parent, false);
-}
-
 /**
  * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
  * @rtc rtc device
diff -ruw linux-4.4.115/drivers/rtc/Kconfig linux-4.4.115-fbx/drivers/rtc/Kconfig
--- linux-4.4.115/drivers/rtc/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/rtc/Kconfig	2019-10-29 09:26:24.653213062 +0100
@@ -1591,6 +1591,15 @@
 	   This driver can also be built as a module. If so, the module
 	   will be called rtc-moxart
 
+config RTC_DRV_QPNP
+	tristate "Qualcomm Technologies, Inc. QPNP PMIC RTC"
+	depends on SPMI
+	help
+	  This enables support for the RTC found on Qualcomm Technologies, Inc.
+	  QPNP PMIC chips.  This driver supports using the PMIC RTC peripheral
+	  to wake a mobile device up from suspend or to wake it up from power-
+	  off.
+
 config RTC_DRV_MT6397
 	tristate "Mediatek Real Time Clock driver"
 	depends on MFD_MT6397 || COMPILE_TEST
diff -ruw linux-4.4.115/drivers/rtc/Makefile linux-4.4.115-fbx/drivers/rtc/Makefile
--- linux-4.4.115/drivers/rtc/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/rtc/Makefile	2019-10-29 09:26:24.653213062 +0100
@@ -118,6 +118,7 @@
 obj-$(CONFIG_RTC_DRV_PS3)	+= rtc-ps3.o
 obj-$(CONFIG_RTC_DRV_PUV3)	+= rtc-puv3.o
 obj-$(CONFIG_RTC_DRV_PXA)	+= rtc-pxa.o
+obj-$(CONFIG_RTC_DRV_QPNP)	+= qpnp-rtc.o
 obj-$(CONFIG_RTC_DRV_R9701)	+= rtc-r9701.o
 obj-$(CONFIG_RTC_DRV_RC5T583)	+= rtc-rc5t583.o
 obj-$(CONFIG_RTC_DRV_RK808)	+= rtc-rk808.o
diff -ruw linux-4.4.115/drivers/scsi/scsi_pm.c linux-4.4.115-fbx/drivers/scsi/scsi_pm.c
--- linux-4.4.115/drivers/scsi/scsi_pm.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/scsi/scsi_pm.c	2019-01-22 16:16:26.607274515 +0100
@@ -16,6 +16,9 @@
 
 #include "scsi_priv.h"
 
+static int do_scsi_runtime_resume(struct device *dev,
+				   const struct dev_pm_ops *pm);
+
 #ifdef CONFIG_PM_SLEEP
 
 static int do_scsi_suspend(struct device *dev, const struct dev_pm_ops *pm)
@@ -77,10 +80,22 @@
 	scsi_device_resume(to_scsi_device(dev));
 	dev_dbg(dev, "scsi resume: %d\n", err);
 
-	if (err == 0) {
+	if (err == 0 && (cb != do_scsi_runtime_resume)) {
 		pm_runtime_disable(dev);
-		pm_runtime_set_active(dev);
+		err = pm_runtime_set_active(dev);
 		pm_runtime_enable(dev);
+
+		if (!err && scsi_is_sdev_device(dev)) {
+			struct scsi_device *sdev = to_scsi_device(dev);
+
+			/*
+			 * If scsi device runtime PM is managed by block layer
+			 * then we should update request queue's runtime status
+			 * as well.
+			 */
+			if (sdev->request_queue->dev)
+				blk_post_runtime_resume(sdev->request_queue, 0);
+		}
 	}
 
 	return err;
@@ -213,12 +228,32 @@
 
 #endif /* CONFIG_PM_SLEEP */
 
+static int do_scsi_runtime_suspend(struct device *dev,
+				   const struct dev_pm_ops *pm)
+{
+	return pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0;
+}
+
+static int do_scsi_runtime_resume(struct device *dev,
+				   const struct dev_pm_ops *pm)
+{
+	return pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0;
+}
+
 static int sdev_runtime_suspend(struct device *dev)
 {
 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 	struct scsi_device *sdev = to_scsi_device(dev);
 	int err = 0;
 
+	if (!sdev->request_queue->dev) {
+		err = scsi_dev_type_suspend(dev, do_scsi_runtime_suspend);
+		if (err == -EAGAIN)
+			pm_schedule_suspend(dev, jiffies_to_msecs(
+					round_jiffies_up_relative(HZ/10)));
+		return err;
+	}
+
 	err = blk_pre_runtime_suspend(sdev->request_queue);
 	if (err)
 		return err;
@@ -248,6 +283,9 @@
 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 	int err = 0;
 
+	if (!sdev->request_queue->dev)
+		return scsi_dev_type_resume(dev, do_scsi_runtime_resume);
+
 	blk_pre_runtime_resume(sdev->request_queue);
 	if (pm && pm->runtime_resume)
 		err = pm->runtime_resume(dev);
diff -ruw linux-4.4.115/drivers/scsi/scsi_scan.c linux-4.4.115-fbx/drivers/scsi/scsi_scan.c
--- linux-4.4.115/drivers/scsi/scsi_scan.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/scsi/scsi_scan.c	2019-10-29 09:26:24.785214354 +0100
@@ -820,15 +820,10 @@
 		 * well-known logical units. Force well-known type
 		 * to enumerate them correctly.
 		 */
-		if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN) {
-			sdev_printk(KERN_WARNING, sdev,
-				"%s: correcting incorrect peripheral device type 0x%x for W-LUN 0x%16xhN\n",
-				__func__, sdev->type, (unsigned int)sdev->lun);
+		if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN)
 			sdev->type = TYPE_WLUN;
 		}
 
-	}
-
 	if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) {
 		/* RBC and MMC devices can return SCSI-3 compliance and yet
 		 * still not support REPORT LUNS, so make them act as
@@ -971,6 +966,10 @@
 
 	transport_configure_device(&sdev->sdev_gendev);
 
+	/* The LLD can override auto suspend tunables in ->slave_configure() */
+	sdev->use_rpm_auto = 0;
+	sdev->autosuspend_delay = SCSI_DEFAULT_AUTOSUSPEND_DELAY;
+
 	if (sdev->host->hostt->slave_configure) {
 		ret = sdev->host->hostt->slave_configure(sdev);
 		if (ret) {
diff -ruw linux-4.4.115/drivers/scsi/scsi_sysfs.c linux-4.4.115-fbx/drivers/scsi/scsi_sysfs.c
--- linux-4.4.115/drivers/scsi/scsi_sysfs.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/scsi/scsi_sysfs.c	2019-10-29 09:26:24.785214354 +0100
@@ -1040,6 +1040,7 @@
 	device_enable_async_suspend(&sdev->sdev_gendev);
 	scsi_autopm_get_target(starget);
 	pm_runtime_set_active(&sdev->sdev_gendev);
+	if (!sdev->use_rpm_auto)
 	pm_runtime_forbid(&sdev->sdev_gendev);
 	pm_runtime_enable(&sdev->sdev_gendev);
 	scsi_autopm_put_target(starget);
diff -ruw linux-4.4.115/drivers/scsi/sd.c linux-4.4.115-fbx/drivers/scsi/sd.c
--- linux-4.4.115/drivers/scsi/sd.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/scsi/sd.c	2019-10-29 09:26:24.789214393 +0100
@@ -601,6 +601,31 @@
 	mutex_unlock(&sd_ref_mutex);
 }
 
+struct gendisk *scsi_gendisk_get_from_dev(struct device *dev)
+{
+	struct scsi_disk *sdkp;
+
+	mutex_lock(&sd_ref_mutex);
+	sdkp = dev_get_drvdata(dev);
+	if (sdkp)
+		sdkp = scsi_disk_get(sdkp->disk);
+	mutex_unlock(&sd_ref_mutex);
+	return !sdkp ? NULL : sdkp->disk;
+}
+EXPORT_SYMBOL(scsi_gendisk_get_from_dev);
+
+void scsi_gendisk_put(struct device *dev)
+{
+	struct scsi_disk *sdkp = dev_get_drvdata(dev);
+	struct scsi_device *sdev = sdkp->device;
+
+	mutex_lock(&sd_ref_mutex);
+	put_device(&sdkp->dev);
+	scsi_device_put(sdev);
+	mutex_unlock(&sd_ref_mutex);
+}
+EXPORT_SYMBOL(scsi_gendisk_put);
+
 static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
 					   unsigned int dix, unsigned int dif)
 {
@@ -1395,86 +1420,6 @@
 	return 0;
 }
 
-/**
- *	sd_check_events - check media events
- *	@disk: kernel device descriptor
- *	@clearing: disk events currently being cleared
- *
- *	Returns mask of DISK_EVENT_*.
- *
- *	Note: this function is invoked from the block subsystem.
- **/
-static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
-{
-	struct scsi_disk *sdkp = scsi_disk_get(disk);
-	struct scsi_device *sdp;
-	struct scsi_sense_hdr *sshdr = NULL;
-	int retval;
-
-	if (!sdkp)
-		return 0;
-
-	sdp = sdkp->device;
-	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
-
-	/*
-	 * If the device is offline, don't send any commands - just pretend as
-	 * if the command failed.  If the device ever comes back online, we
-	 * can deal with it then.  It is only because of unrecoverable errors
-	 * that we would ever take a device offline in the first place.
-	 */
-	if (!scsi_device_online(sdp)) {
-		set_media_not_present(sdkp);
-		goto out;
-	}
-
-	/*
-	 * Using TEST_UNIT_READY enables differentiation between drive with
-	 * no cartridge loaded - NOT READY, drive with changed cartridge -
-	 * UNIT ATTENTION, or with same cartridge - GOOD STATUS.
-	 *
-	 * Drives that auto spin down. eg iomega jaz 1G, will be started
-	 * by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever
-	 * sd_revalidate() is called.
-	 */
-	retval = -ENODEV;
-
-	if (scsi_block_when_processing_errors(sdp)) {
-		sshdr  = kzalloc(sizeof(*sshdr), GFP_KERNEL);
-		retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES,
-					      sshdr);
-	}
-
-	/* failed to execute TUR, assume media not present */
-	if (host_byte(retval)) {
-		set_media_not_present(sdkp);
-		goto out;
-	}
-
-	if (media_not_present(sdkp, sshdr))
-		goto out;
-
-	/*
-	 * For removable scsi disk we have to recognise the presence
-	 * of a disk in the drive.
-	 */
-	if (!sdkp->media_present)
-		sdp->changed = 1;
-	sdkp->media_present = 1;
-out:
-	/*
-	 * sdp->changed is set under the following conditions:
-	 *
-	 *	Medium present state has changed in either direction.
-	 *	Device has indicated UNIT_ATTENTION.
-	 */
-	kfree(sshdr);
-	retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
-	sdp->changed = 0;
-	scsi_disk_put(sdkp);
-	return retval;
-}
-
 static int sd_sync_cache(struct scsi_disk *sdkp)
 {
 	int retries, res;
@@ -1667,7 +1612,6 @@
 #ifdef CONFIG_COMPAT
 	.compat_ioctl		= sd_compat_ioctl,
 #endif
-	.check_events		= sd_check_events,
 	.revalidate_disk	= sd_revalidate_disk,
 	.unlock_native_capacity	= sd_unlock_native_capacity,
 	.pr_ops			= &sd_pr_ops,
@@ -2352,11 +2296,6 @@
 				sizeof(cap_str_10));
 
 		if (sdkp->first_scan || old_capacity != sdkp->capacity) {
-			sd_printk(KERN_NOTICE, sdkp,
-				  "%llu %d-byte logical blocks: (%s/%s)\n",
-				  (unsigned long long)sdkp->capacity,
-				  sector_size, cap_str_10, cap_str_2);
-
 			if (sdkp->physical_block_size != sector_size)
 				sd_printk(KERN_NOTICE, sdkp,
 					  "%u-byte physical blocks\n",
@@ -2393,7 +2332,6 @@
 	int res;
 	struct scsi_device *sdp = sdkp->device;
 	struct scsi_mode_data data;
-	int old_wp = sdkp->write_prot;
 
 	set_disk_ro(sdkp->disk, 0);
 	if (sdp->skip_ms_page_3f) {
@@ -2434,13 +2372,6 @@
 	} else {
 		sdkp->write_prot = ((data.device_specific & 0x80) != 0);
 		set_disk_ro(sdkp->disk, sdkp->write_prot);
-		if (sdkp->first_scan || old_wp != sdkp->write_prot) {
-			sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
-				  sdkp->write_prot ? "on" : "off");
-			sd_printk(KERN_DEBUG, sdkp,
-				  "Mode Sense: %02x %02x %02x %02x\n",
-				  buffer[0], buffer[1], buffer[2], buffer[3]);
-		}
 	}
 }
 
@@ -2453,16 +2384,13 @@
 {
 	int len = 0, res;
 	struct scsi_device *sdp = sdkp->device;
+	struct Scsi_Host *host = sdp->host;
 
 	int dbd;
 	int modepage;
 	int first_len;
 	struct scsi_mode_data data;
 	struct scsi_sense_hdr sshdr;
-	int old_wce = sdkp->WCE;
-	int old_rcd = sdkp->RCD;
-	int old_dpofua = sdkp->DPOFUA;
-
 
 	if (sdkp->cache_override)
 		return;
@@ -2484,6 +2412,9 @@
 		dbd = 8;
 	} else {
 		modepage = 8;
+		if (host->set_dbd_for_caching)
+			dbd = 8;
+		else
 		dbd = 0;
 	}
 
@@ -2585,15 +2516,6 @@
 		if (sdkp->WCE && sdkp->write_prot)
 			sdkp->WCE = 0;
 
-		if (sdkp->first_scan || old_wce != sdkp->WCE ||
-		    old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA)
-			sd_printk(KERN_NOTICE, sdkp,
-				  "Write cache: %s, read cache: %s, %s\n",
-				  sdkp->WCE ? "enabled" : "disabled",
-				  sdkp->RCD ? "disabled" : "enabled",
-				  sdkp->DPOFUA ? "supports DPO and FUA"
-				  : "doesn't support DPO or FUA");
-
 		return;
 	}
 
@@ -2907,10 +2829,10 @@
 	if (sdkp->opt_xfer_blocks &&
 	    sdkp->opt_xfer_blocks <= dev_max &&
 	    sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
-	    logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_CACHE_SIZE) {
-		q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
-		rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
-	} else
+	    sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE)
+		rw_max = q->limits.io_opt =
+			sdkp->opt_xfer_blocks * sdp->sector_size;
+	else
 		rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
 				      (sector_t)BLK_DEF_MAX_SECTORS);
 
@@ -3048,14 +2970,15 @@
 	}
 
 	blk_pm_runtime_init(sdp->request_queue, dev);
+	if (sdp->autosuspend_delay >= 0)
+		pm_runtime_set_autosuspend_delay(dev, sdp->autosuspend_delay);
+
 	add_disk(gd);
 	if (sdkp->capacity)
 		sd_dif_config_host(sdkp);
 
 	sd_revalidate_disk(gd);
 
-	sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
-		  sdp->removable ? "removable " : "");
 	scsi_autopm_put_device(sdp);
 	put_device(&sdkp->dev);
 }
@@ -3300,7 +3223,6 @@
 		return 0;
 
 	if (sdkp->WCE && sdkp->media_present) {
-		sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
 		ret = sd_sync_cache(sdkp);
 		if (ret) {
 			/* ignore OFFLINE device */
@@ -3311,7 +3233,7 @@
 	}
 
 	if (sdkp->device->manage_start_stop) {
-		sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
+		sd_printk(KERN_DEBUG, sdkp, "Stopping disk\n");
 		/* an error is not worth aborting a system sleep */
 		ret = sd_start_stop_device(sdkp, 0);
 		if (ignore_stop_errors)
@@ -3342,7 +3264,7 @@
 	if (!sdkp->device->manage_start_stop)
 		return 0;
 
-	sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
+	sd_printk(KERN_DEBUG, sdkp, "Starting disk\n");
 	return sd_start_stop_device(sdkp, 1);
 }
 
diff -ruw linux-4.4.115/drivers/scsi/sd.h linux-4.4.115-fbx/drivers/scsi/sd.h
--- linux-4.4.115/drivers/scsi/sd.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/scsi/sd.h	2019-01-22 16:16:26.611274551 +0100
@@ -151,11 +151,6 @@
 	return blocks << (ilog2(sdev->sector_size) - 9);
 }
 
-static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t blocks)
-{
-	return blocks * sdev->sector_size;
-}
-
 /*
  * A DIF-capable target device can be formatted with different
  * protection schemes.  Currently 0 through 3 are defined:
diff -ruw linux-4.4.115/drivers/scsi/sg.c linux-4.4.115-fbx/drivers/scsi/sg.c
--- linux-4.4.115/drivers/scsi/sg.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/scsi/sg.c	2019-10-29 09:26:24.793214432 +0100
@@ -897,8 +897,10 @@
 			return -ENXIO;
 		if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
 			return -EFAULT;
+		mutex_lock(&sfp->parentdp->open_rel_lock);
 		result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
 				 1, read_only, 1, &srp);
+		mutex_unlock(&sfp->parentdp->open_rel_lock);
 		if (result < 0)
 			return result;
 		result = wait_event_interruptible(sfp->read_wait,
@@ -1009,9 +1011,10 @@
 				mutex_unlock(&sfp->f_mutex);
 				return -EBUSY;
 			}
-
+			mutex_lock(&sfp->parentdp->open_rel_lock);
 			sg_remove_scat(sfp, &sfp->reserve);
 			sg_build_reserve(sfp, val);
+			mutex_unlock(&sfp->parentdp->open_rel_lock);
 		}
 		mutex_unlock(&sfp->f_mutex);
 		return 0;
@@ -1531,9 +1534,6 @@
 	} else
 		pr_warn("%s: sg_sys Invalid\n", __func__);
 
-	sdev_printk(KERN_NOTICE, scsidp, "Attached scsi generic sg%d "
-		    "type %d\n", sdp->index, scsidp->type);
-
 	dev_set_drvdata(cl_dev, sdp);
 
 	return 0;
@@ -2072,11 +2072,12 @@
 		if ((1 == resp->done) && (!resp->sg_io_owned) &&
 		    ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
 			resp->done = 2;	/* guard against other readers */
-			break;
+			write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+			return resp;
 		}
 	}
 	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
-	return resp;
+	return NULL;
 }
 
 /* always adds to end of list */
@@ -2631,6 +2632,9 @@
 			seq_puts(s, srp->done ?
 				 ((1 == srp->done) ?  "rcv:" : "fin:")
 				  : "act:");
+			seq_printf(s, srp->done ?
+				   ((1 == srp->done) ?  "rcv:" : "fin:")
+				   : "act:");
 			seq_printf(s, " id=%d blen=%d",
 				   srp->header.pack_id, blen);
 			if (srp->done)
diff -ruw linux-4.4.115/drivers/scsi/ufs/Kconfig linux-4.4.115-fbx/drivers/scsi/ufs/Kconfig
--- linux-4.4.115/drivers/scsi/ufs/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/scsi/ufs/Kconfig	2019-01-22 16:16:26.627274696 +0100
@@ -83,3 +83,38 @@
 
 	  Select this if you have UFS controller on QCOM chipset.
 	  If unsure, say N.
+
+config SCSI_UFS_QCOM_ICE
+	bool "QCOM specific hooks to Inline Crypto Engine for UFS driver"
+	depends on SCSI_UFS_QCOM && CRYPTO_DEV_QCOM_ICE
+	help
+	  This selects the QCOM specific additions to support Inline Crypto
+	  Engine (ICE).
+	  ICE accelerates the crypto operations and maintains the high UFS
+	  performance.
+
+	  Select this if you have ICE supported for UFS on QCOM chipset.
+	  If unsure, say N.
+
+
+config SCSI_UFS_TEST
+	tristate "Universal Flash Storage host controller driver unit-tests"
+	depends on SCSI_UFSHCD && IOSCHED_TEST
+	default m
+	---help---
+	This adds UFS Host controller unit-test framework.
+	The UFS unit-tests register as a block device test utility to
+	the test-iosched and will be initiated when the test-iosched will
+	be chosen to be the active I/O scheduler.
+
+config SCSI_UFSHCD_CMD_LOGGING
+	bool "Universal Flash Storage host controller driver layer command logging support"
+	depends on SCSI_UFSHCD
+	help
+	  This selects the UFS host controller driver layer command logging.
+	  UFS host controller driver layer command logging records all the
+	  command information sent from UFS host controller for debugging
+	  purpose.
+
+	  Select this if you want above mentioned debug information captured.
+	  If unsure, say N.
diff -ruw linux-4.4.115/drivers/scsi/ufs/Makefile linux-4.4.115-fbx/drivers/scsi/ufs/Makefile
--- linux-4.4.115/drivers/scsi/ufs/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/scsi/ufs/Makefile	2019-01-22 16:16:26.627274696 +0100
@@ -1,5 +1,8 @@
 # UFSHCD makefile
 obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o
-obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
+obj-$(CONFIG_SCSI_UFS_QCOM_ICE) += ufs-qcom-ice.o
+obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o ufs_quirks.o
 obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
 obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
+obj-$(CONFIG_SCSI_UFS_TEST) += ufs_test.o
+obj-$(CONFIG_DEBUG_FS) += ufs-debugfs.o ufs-qcom-debugfs.o
diff -ruw linux-4.4.115/drivers/scsi/ufs/ufs.h linux-4.4.115-fbx/drivers/scsi/ufs/ufs.h
--- linux-4.4.115/drivers/scsi/ufs/ufs.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/scsi/ufs/ufs.h	2019-10-29 09:26:24.797214471 +0100
@@ -38,13 +38,16 @@
 
 #include <linux/mutex.h>
 #include <linux/types.h>
+#include <scsi/ufs/ufs.h>
 
 #define MAX_CDB_SIZE	16
 #define GENERAL_UPIU_REQUEST_SIZE 32
 #define QUERY_DESC_MAX_SIZE       255
 #define QUERY_DESC_MIN_SIZE       2
+#define QUERY_DESC_HDR_SIZE       2
 #define QUERY_OSF_SIZE            (GENERAL_UPIU_REQUEST_SIZE - \
 					(sizeof(struct utp_upiu_header)))
+#define RESPONSE_UPIU_SENSE_DATA_LENGTH	18
 
 #define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
 			cpu_to_be32((byte3 << 24) | (byte2 << 16) |\
@@ -71,6 +74,16 @@
 	UFS_UPIU_RPMB_WLUN		= 0xC4,
 };
 
+/**
+ * ufs_is_valid_unit_desc_lun - checks if the given LUN has a unit descriptor
+ * @lun: LU number to check
+ * @return: true if the lun has a matching unit descriptor, false otherwise
+ */
+static inline bool ufs_is_valid_unit_desc_lun(u8 lun)
+{
+	return lun == UFS_UPIU_RPMB_WLUN || (lun < UFS_UPIU_MAX_GENERAL_LUN);
+}
+
 /*
  * UFS Protocol Information Unit related definitions
  */
@@ -126,42 +139,13 @@
 	UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST          = 0x81,
 };
 
-/* Flag idn for Query Requests*/
-enum flag_idn {
-	QUERY_FLAG_IDN_FDEVICEINIT      = 0x01,
-	QUERY_FLAG_IDN_PWR_ON_WPE	= 0x03,
-	QUERY_FLAG_IDN_BKOPS_EN         = 0x04,
-};
-
-/* Attribute idn for Query requests */
-enum attr_idn {
-	QUERY_ATTR_IDN_ACTIVE_ICC_LVL	= 0x03,
-	QUERY_ATTR_IDN_BKOPS_STATUS	= 0x05,
-	QUERY_ATTR_IDN_EE_CONTROL	= 0x0D,
-	QUERY_ATTR_IDN_EE_STATUS	= 0x0E,
-};
-
-/* Descriptor idn for Query requests */
-enum desc_idn {
-	QUERY_DESC_IDN_DEVICE		= 0x0,
-	QUERY_DESC_IDN_CONFIGURAION	= 0x1,
-	QUERY_DESC_IDN_UNIT		= 0x2,
-	QUERY_DESC_IDN_RFU_0		= 0x3,
-	QUERY_DESC_IDN_INTERCONNECT	= 0x4,
-	QUERY_DESC_IDN_STRING		= 0x5,
-	QUERY_DESC_IDN_RFU_1		= 0x6,
-	QUERY_DESC_IDN_GEOMETRY		= 0x7,
-	QUERY_DESC_IDN_POWER		= 0x8,
-	QUERY_DESC_IDN_MAX,
-};
-
 enum desc_header_offset {
 	QUERY_DESC_LENGTH_OFFSET	= 0x00,
 	QUERY_DESC_DESC_TYPE_OFFSET	= 0x01,
 };
 
 enum ufs_desc_max_size {
-	QUERY_DESC_DEVICE_MAX_SIZE		= 0x1F,
+	QUERY_DESC_DEVICE_MAX_SIZE		= 0x40,
 	QUERY_DESC_CONFIGURAION_MAX_SIZE	= 0x90,
 	QUERY_DESC_UNIT_MAX_SIZE		= 0x23,
 	QUERY_DESC_INTERCONNECT_MAX_SIZE	= 0x06,
@@ -195,6 +179,36 @@
 	UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1	= 0x22,
 };
 
+/* Device descriptor parameters offsets in bytes*/
+enum device_desc_param {
+	DEVICE_DESC_PARAM_LEN			= 0x0,
+	DEVICE_DESC_PARAM_TYPE			= 0x1,
+	DEVICE_DESC_PARAM_DEVICE_TYPE		= 0x2,
+	DEVICE_DESC_PARAM_DEVICE_CLASS		= 0x3,
+	DEVICE_DESC_PARAM_DEVICE_SUB_CLASS	= 0x4,
+	DEVICE_DESC_PARAM_PRTCL			= 0x5,
+	DEVICE_DESC_PARAM_NUM_LU		= 0x6,
+	DEVICE_DESC_PARAM_NUM_WLU		= 0x7,
+	DEVICE_DESC_PARAM_BOOT_ENBL		= 0x8,
+	DEVICE_DESC_PARAM_DESC_ACCSS_ENBL	= 0x9,
+	DEVICE_DESC_PARAM_INIT_PWR_MODE		= 0xA,
+	DEVICE_DESC_PARAM_HIGH_PR_LUN		= 0xB,
+	DEVICE_DESC_PARAM_SEC_RMV_TYPE		= 0xC,
+	DEVICE_DESC_PARAM_SEC_LU		= 0xD,
+	DEVICE_DESC_PARAM_BKOP_TERM_LT		= 0xE,
+	DEVICE_DESC_PARAM_ACTVE_ICC_LVL		= 0xF,
+	DEVICE_DESC_PARAM_SPEC_VER		= 0x10,
+	DEVICE_DESC_PARAM_MANF_DATE		= 0x12,
+	DEVICE_DESC_PARAM_MANF_NAME		= 0x14,
+	DEVICE_DESC_PARAM_PRDCT_NAME		= 0x15,
+	DEVICE_DESC_PARAM_SN			= 0x16,
+	DEVICE_DESC_PARAM_OEM_ID		= 0x17,
+	DEVICE_DESC_PARAM_MANF_ID		= 0x18,
+	DEVICE_DESC_PARAM_UD_OFFSET		= 0x1A,
+	DEVICE_DESC_PARAM_UD_LEN		= 0x1B,
+	DEVICE_DESC_PARAM_RTT_CAP		= 0x1C,
+	DEVICE_DESC_PARAM_FRQ_RTC		= 0x1D,
+};
 /*
  * Logical Unit Write Protect
  * 00h: LU not write protected
@@ -247,19 +261,6 @@
 	BKOPS_STATUS_MAX		 = BKOPS_STATUS_CRITICAL,
 };
 
-/* UTP QUERY Transaction Specific Fields OpCode */
-enum query_opcode {
-	UPIU_QUERY_OPCODE_NOP		= 0x0,
-	UPIU_QUERY_OPCODE_READ_DESC	= 0x1,
-	UPIU_QUERY_OPCODE_WRITE_DESC	= 0x2,
-	UPIU_QUERY_OPCODE_READ_ATTR	= 0x3,
-	UPIU_QUERY_OPCODE_WRITE_ATTR	= 0x4,
-	UPIU_QUERY_OPCODE_READ_FLAG	= 0x5,
-	UPIU_QUERY_OPCODE_SET_FLAG	= 0x6,
-	UPIU_QUERY_OPCODE_CLEAR_FLAG	= 0x7,
-	UPIU_QUERY_OPCODE_TOGGLE_FLAG	= 0x8,
-};
-
 /* Query response result code */
 enum {
 	QUERY_RESULT_SUCCESS                    = 0x00,
@@ -383,7 +384,7 @@
 	__be32 residual_transfer_count;
 	__be32 reserved[4];
 	__be16 sense_data_len;
-	u8 sense_data[18];
+	u8 sense_data[RESPONSE_UPIU_SENSE_DATA_LENGTH];
 };
 
 /**
@@ -469,6 +470,7 @@
 	struct regulator *reg;
 	const char *name;
 	bool enabled;
+	bool unused;
 	int min_uV;
 	int max_uV;
 	int min_uA;
diff -ruw linux-4.4.115/drivers/scsi/ufs/ufshcd.c linux-4.4.115-fbx/drivers/scsi/ufs/ufshcd.c
--- linux-4.4.115/drivers/scsi/ufs/ufshcd.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/scsi/ufs/ufshcd.c	2019-10-29 09:26:24.801214510 +0100
@@ -3,7 +3,7 @@
  *
  * This code is based on drivers/scsi/ufs/ufshcd.c
  * Copyright (C) 2011-2013 Samsung India Software Operations
- * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * Authors:
  *	Santosh Yaraganavi <santosh.sy@samsung.com>
@@ -38,10 +38,145 @@
  */
 
 #include <linux/async.h>
+#include <scsi/ufs/ioctl.h>
 #include <linux/devfreq.h>
+#include <linux/nls.h>
+#include <linux/of.h>
+#include <linux/blkdev.h>
 
 #include "ufshcd.h"
-#include "unipro.h"
+#include "ufshci.h"
+#include "ufs_quirks.h"
+#include "ufs-debugfs.h"
+#include "ufs-qcom.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/ufs.h>
+
+#ifdef CONFIG_DEBUG_FS
+
+static int ufshcd_tag_req_type(struct request *rq)
+{
+	int rq_type = TS_WRITE;
+
+	if (!rq || !(rq->cmd_type & REQ_TYPE_FS))
+		rq_type = TS_NOT_SUPPORTED;
+	else if (rq->cmd_flags & REQ_FLUSH)
+		rq_type = TS_FLUSH;
+	else if (rq_data_dir(rq) == READ)
+		rq_type = (rq->cmd_flags & REQ_URGENT) ?
+			TS_URGENT_READ : TS_READ;
+	else if (rq->cmd_flags & REQ_URGENT)
+		rq_type = TS_URGENT_WRITE;
+
+	return rq_type;
+}
+
+static void ufshcd_update_error_stats(struct ufs_hba *hba, int type)
+{
+	ufsdbg_set_err_state(hba);
+	if (type < UFS_ERR_MAX)
+		hba->ufs_stats.err_stats[type]++;
+}
+
+static void ufshcd_update_tag_stats(struct ufs_hba *hba, int tag)
+{
+	struct request *rq =
+		hba->lrb[tag].cmd ? hba->lrb[tag].cmd->request : NULL;
+	u64 **tag_stats = hba->ufs_stats.tag_stats;
+	int rq_type;
+
+	if (!hba->ufs_stats.enabled)
+		return;
+
+	tag_stats[tag][TS_TAG]++;
+	if (!rq || !(rq->cmd_type & REQ_TYPE_FS))
+		return;
+
+	WARN_ON(hba->ufs_stats.q_depth > hba->nutrs);
+	rq_type = ufshcd_tag_req_type(rq);
+	if (!(rq_type < 0 || rq_type > TS_NUM_STATS))
+		tag_stats[hba->ufs_stats.q_depth++][rq_type]++;
+}
+
+static void ufshcd_update_tag_stats_completion(struct ufs_hba *hba,
+		struct scsi_cmnd *cmd)
+{
+	struct request *rq = cmd ? cmd->request : NULL;
+
+	if (rq && rq->cmd_type & REQ_TYPE_FS)
+		hba->ufs_stats.q_depth--;
+}
+
+static void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+	int rq_type;
+	struct request *rq = lrbp->cmd ? lrbp->cmd->request : NULL;
+	s64 delta = ktime_us_delta(lrbp->complete_time_stamp,
+		lrbp->issue_time_stamp);
+
+	/* update general request statistics */
+	if (hba->ufs_stats.req_stats[TS_TAG].count == 0)
+		hba->ufs_stats.req_stats[TS_TAG].min = delta;
+	hba->ufs_stats.req_stats[TS_TAG].count++;
+	hba->ufs_stats.req_stats[TS_TAG].sum += delta;
+	if (delta > hba->ufs_stats.req_stats[TS_TAG].max)
+		hba->ufs_stats.req_stats[TS_TAG].max = delta;
+	if (delta < hba->ufs_stats.req_stats[TS_TAG].min)
+			hba->ufs_stats.req_stats[TS_TAG].min = delta;
+
+	rq_type = ufshcd_tag_req_type(rq);
+	if (rq_type == TS_NOT_SUPPORTED)
+		return;
+
+	/* update request type specific statistics */
+	if (hba->ufs_stats.req_stats[rq_type].count == 0)
+		hba->ufs_stats.req_stats[rq_type].min = delta;
+	hba->ufs_stats.req_stats[rq_type].count++;
+	hba->ufs_stats.req_stats[rq_type].sum += delta;
+	if (delta > hba->ufs_stats.req_stats[rq_type].max)
+		hba->ufs_stats.req_stats[rq_type].max = delta;
+	if (delta < hba->ufs_stats.req_stats[rq_type].min)
+			hba->ufs_stats.req_stats[rq_type].min = delta;
+}
+
+static void
+ufshcd_update_query_stats(struct ufs_hba *hba, enum query_opcode opcode, u8 idn)
+{
+	if (opcode < UPIU_QUERY_OPCODE_MAX && idn < MAX_QUERY_IDN)
+		hba->ufs_stats.query_stats_arr[opcode][idn]++;
+}
+
+#else
+static inline void ufshcd_update_tag_stats(struct ufs_hba *hba, int tag)
+{
+}
+
+static inline void ufshcd_update_tag_stats_completion(struct ufs_hba *hba,
+		struct scsi_cmnd *cmd)
+{
+}
+
+static inline void ufshcd_update_error_stats(struct ufs_hba *hba, int type)
+{
+}
+
+static inline
+void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+}
+
+static inline
+void ufshcd_update_query_stats(struct ufs_hba *hba,
+			       enum query_opcode opcode, u8 idn)
+{
+}
+#endif
+
+#define PWR_INFO_MASK	0xF
+#define PWR_RX_OFFSET	4
+
+#define UFSHCD_REQ_SENSE_SIZE	18
 
 #define UFSHCD_ENABLE_INTRS	(UTP_TRANSFER_REQ_COMPL |\
 				 UTP_TASK_REQ_COMPL |\
@@ -55,16 +190,22 @@
 #define NOP_OUT_TIMEOUT    30 /* msecs */
 
 /* Query request retries */
-#define QUERY_REQ_RETRIES 10
+#define QUERY_REQ_RETRIES 3
 /* Query request timeout */
-#define QUERY_REQ_TIMEOUT 30 /* msec */
+#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
 
 /* Task management command timeout */
 #define TM_CMD_TIMEOUT	100 /* msecs */
 
+/* maximum number of retries for a general UIC command  */
+#define UFS_UIC_COMMAND_RETRIES 3
+
 /* maximum number of link-startup retries */
 #define DME_LINKSTARTUP_RETRIES 3
 
+/* Maximum retries for Hibern8 enter */
+#define UIC_HIBERN8_ENTER_RETRIES 3
+
 /* maximum number of reset retries before giving up */
 #define MAX_HOST_RESET_RETRIES 5
 
@@ -74,6 +215,17 @@
 /* Interrupt aggregation default timeout, unit: 40us */
 #define INT_AGGR_DEF_TO	0x02
 
+/* default value of auto suspend is 3 seconds */
+#define UFSHCD_AUTO_SUSPEND_DELAY_MS 3000 /* millisecs */
+
+#define UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE	10
+#define UFSHCD_CLK_GATING_DELAY_MS_PERF		50
+
+/* IOCTL opcode for command - ufs set device read only */
+#define UFS_IOCTL_BLKROSET      BLKROSET
+
+#define UFSHCD_DEFAULT_LANES_PER_DIRECTION		2
+
 #define ufshcd_toggle_vreg(_dev, _vreg, _on)				\
 	({                                                              \
 		int _ret;                                               \
@@ -84,6 +236,9 @@
 		_ret;                                                   \
 	})
 
+#define ufshcd_hex_dump(prefix_str, buf, len) \
+print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
+
 static u32 ufs_query_desc_max_size[] = {
 	QUERY_DESC_DEVICE_MAX_SIZE,
 	QUERY_DESC_CONFIGURAION_MAX_SIZE,
@@ -119,9 +274,11 @@
 /* UFSHCD UIC layer error flags */
 enum {
 	UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
-	UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */
-	UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */
-	UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */
+	UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
+	UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
+	UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
+	UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
+	UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
 };
 
 /* Interrupt configuration options */
@@ -131,6 +288,8 @@
 	UFSHCD_INT_CLEAR,
 };
 
+#define DEFAULT_UFSHCD_DBG_PRINT_EN	UFSHCD_DBG_PRINT_ALL
+
 #define ufshcd_set_eh_in_progress(h) \
 	(h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
 #define ufshcd_eh_in_progress(h) \
@@ -172,49 +331,590 @@
 	return ufs_pm_lvl_states[lvl].link_state;
 }
 
-static void ufshcd_tmc_handler(struct ufs_hba *hba);
+static inline enum ufs_pm_level
+ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
+					enum uic_link_state link_state)
+{
+	enum ufs_pm_level lvl;
+
+	for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
+		if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
+			(ufs_pm_lvl_states[lvl].link_state == link_state))
+			return lvl;
+	}
+
+	/* if no match found, return the level 0 */
+	return UFS_PM_LVL_0;
+}
+
+static inline bool ufshcd_is_valid_pm_lvl(int lvl)
+{
+	if (lvl >= 0 && lvl < ARRAY_SIZE(ufs_pm_lvl_states))
+		return true;
+	else
+		return false;
+}
+
+static irqreturn_t ufshcd_intr(int irq, void *__hba);
+static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
+static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
 static void ufshcd_hba_exit(struct ufs_hba *hba);
 static int ufshcd_probe_hba(struct ufs_hba *hba);
-static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
-				 bool skip_ref_clk);
-static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
-static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
-static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
+static int ufshcd_enable_clocks(struct ufs_hba *hba);
+static int ufshcd_disable_clocks(struct ufs_hba *hba,
+				 bool is_gating_context);
+static int ufshcd_disable_clocks_skip_ref_clk(struct ufs_hba *hba,
+					      bool is_gating_context);
+static void ufshcd_hold_all(struct ufs_hba *hba);
+static void ufshcd_release_all(struct ufs_hba *hba);
+static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
+static inline void ufshcd_save_tstamp_of_last_dme_cmd(struct ufs_hba *hba);
 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
-static irqreturn_t ufshcd_intr(int irq, void *__hba);
-static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
-		struct ufs_pa_layer_attr *desired_pwr_mode);
-static int ufshcd_change_power_mode(struct ufs_hba *hba,
-			     struct ufs_pa_layer_attr *pwr_mode);
+static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
+static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
+static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
+static void ufshcd_release_all(struct ufs_hba *hba);
+static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
+static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
+static int ufshcd_devfreq_target(struct device *dev,
+				unsigned long *freq, u32 flags);
+static int ufshcd_devfreq_get_dev_status(struct device *dev,
+		struct devfreq_dev_status *stat);
+
+#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
+static struct devfreq_simple_ondemand_data ufshcd_ondemand_data = {
+	.upthreshold = 35,
+	.downdifferential = 30,
+	.simple_scaling = 1,
+};
+
+static void *gov_data = &ufshcd_ondemand_data;
+#else
+static void *gov_data;
+#endif
+
+static struct devfreq_dev_profile ufs_devfreq_profile = {
+	.polling_ms	= 40,
+	.target		= ufshcd_devfreq_target,
+	.get_dev_status	= ufshcd_devfreq_get_dev_status,
+};
 
-static inline int ufshcd_enable_irq(struct ufs_hba *hba)
+static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
 {
-	int ret = 0;
+	return tag >= 0 && tag < hba->nutrs;
+}
 
+static inline void ufshcd_enable_irq(struct ufs_hba *hba)
+{
 	if (!hba->is_irq_enabled) {
-		ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
-				hba);
-		if (ret)
-			dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
-				__func__, ret);
+		enable_irq(hba->irq);
 		hba->is_irq_enabled = true;
 	}
-
-	return ret;
 }
 
 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
 {
 	if (hba->is_irq_enabled) {
-		free_irq(hba->irq, hba);
+		disable_irq(hba->irq);
 		hba->is_irq_enabled = false;
 	}
 }
 
+void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
+{
+	unsigned long flags;
+	bool unblock = false;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	hba->scsi_block_reqs_cnt--;
+	unblock = !hba->scsi_block_reqs_cnt;
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	if (unblock)
+		scsi_unblock_requests(hba->host);
+}
+EXPORT_SYMBOL(ufshcd_scsi_unblock_requests);
+
+static inline void __ufshcd_scsi_block_requests(struct ufs_hba *hba)
+{
+	if (!hba->scsi_block_reqs_cnt++)
+		scsi_block_requests(hba->host);
+}
+
+void ufshcd_scsi_block_requests(struct ufs_hba *hba)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	__ufshcd_scsi_block_requests(hba);
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+EXPORT_SYMBOL(ufshcd_scsi_block_requests);
+
+static int ufshcd_device_reset_ctrl(struct ufs_hba *hba, bool ctrl)
+{
+	int ret = 0;
+
+	if (!hba->pctrl)
+		return 0;
+
+	/* Assert reset if ctrl == true */
+	if (ctrl)
+		ret = pinctrl_select_state(hba->pctrl,
+			pinctrl_lookup_state(hba->pctrl, "dev-reset-assert"));
+	else
+		ret = pinctrl_select_state(hba->pctrl,
+			pinctrl_lookup_state(hba->pctrl, "dev-reset-deassert"));
+
+	if (ret < 0)
+		dev_err(hba->dev, "%s: %s failed with err %d\n",
+			__func__, ctrl ? "Assert" : "Deassert", ret);
+
+	return ret;
+}
+
+static inline int ufshcd_assert_device_reset(struct ufs_hba *hba)
+{
+	return ufshcd_device_reset_ctrl(hba, true);
+}
+
+static inline int ufshcd_deassert_device_reset(struct ufs_hba *hba)
+{
+	return ufshcd_device_reset_ctrl(hba, false);
+}
+
+static int ufshcd_reset_device(struct ufs_hba *hba)
+{
+	int ret;
+
+	/* reset the connected UFS device */
+	ret = ufshcd_assert_device_reset(hba);
+	if (ret)
+		goto out;
+	/*
+	 * The reset signal is active low.
+	 * The UFS device shall detect more than or equal to 1us of positive
+	 * or negative RST_n pulse width.
+	 * To be on safe side, keep the reset low for atleast 10us.
+	 */
+	usleep_range(10, 15);
+
+	ret = ufshcd_deassert_device_reset(hba);
+	if (ret)
+		goto out;
+	/* same as assert, wait for atleast 10us after deassert */
+	usleep_range(10, 15);
+out:
+	return ret;
+}
+
+/* replace non-printable or non-ASCII characters with spaces */
+static inline void ufshcd_remove_non_printable(char *val)
+{
+	if (!val || !*val)
+		return;
+
+	if (*val < 0x20 || *val > 0x7e)
+		*val = ' ';
+}
+
+#define UFSHCD_MAX_CMD_LOGGING	200
+
+#ifdef CONFIG_TRACEPOINTS
+static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
+			struct ufshcd_cmd_log_entry *entry, u8 opcode)
+{
+	if (trace_ufshcd_command_enabled()) {
+		u32 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+
+		trace_ufshcd_command(dev_name(hba->dev), entry->str, entry->tag,
+				     entry->doorbell, entry->transfer_len, intr,
+				     entry->lba, opcode);
+	}
+}
+#else
+static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
+			struct ufshcd_cmd_log_entry *entry, u8 opcode)
+{
+}
+#endif
+
+#ifdef CONFIG_SCSI_UFSHCD_CMD_LOGGING
+static void ufshcd_cmd_log_init(struct ufs_hba *hba)
+{
+	/* Allocate log entries */
+	if (!hba->cmd_log.entries) {
+		hba->cmd_log.entries = kzalloc(UFSHCD_MAX_CMD_LOGGING *
+			sizeof(struct ufshcd_cmd_log_entry), GFP_KERNEL);
+		if (!hba->cmd_log.entries)
+			return;
+		dev_dbg(hba->dev, "%s: cmd_log.entries initialized\n",
+				__func__);
+	}
+}
+
+static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
+			     unsigned int tag, u8 cmd_id, u8 idn, u8 lun,
+			     sector_t lba, int transfer_len, u8 opcode)
+{
+	struct ufshcd_cmd_log_entry *entry;
+
+	if (!hba->cmd_log.entries)
+		return;
+
+	entry = &hba->cmd_log.entries[hba->cmd_log.pos];
+	entry->lun = lun;
+	entry->str = str;
+	entry->cmd_type = cmd_type;
+	entry->cmd_id = cmd_id;
+	entry->lba = lba;
+	entry->transfer_len = transfer_len;
+	entry->idn = idn;
+	entry->doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+	entry->tag = tag;
+	entry->tstamp = ktime_get();
+	entry->outstanding_reqs = hba->outstanding_reqs;
+	entry->seq_num = hba->cmd_log.seq_num;
+	hba->cmd_log.seq_num++;
+	hba->cmd_log.pos =
+			(hba->cmd_log.pos + 1) % UFSHCD_MAX_CMD_LOGGING;
+
+	ufshcd_add_command_trace(hba, entry, opcode);
+}
+
+static void ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
+	unsigned int tag, u8 cmd_id, u8 idn)
+{
+	__ufshcd_cmd_log(hba, str, cmd_type, tag, cmd_id, idn,
+			 0xff, (sector_t)-1, -1, -1);
+}
+
+static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id)
+{
+	ufshcd_cmd_log(hba, str, "dme", 0xff, cmd_id, 0xff);
+}
+
+static void ufshcd_print_cmd_log(struct ufs_hba *hba)
+{
+	int i;
+	int pos;
+	struct ufshcd_cmd_log_entry *p;
+
+	if (!hba->cmd_log.entries)
+		return;
+
+	pos = hba->cmd_log.pos;
+	for (i = 0; i < UFSHCD_MAX_CMD_LOGGING; i++) {
+		p = &hba->cmd_log.entries[pos];
+		pos = (pos + 1) % UFSHCD_MAX_CMD_LOGGING;
+
+		if (ktime_to_us(p->tstamp)) {
+			pr_err("%s: %s: seq_no=%u lun=0x%x cmd_id=0x%02x lba=0x%llx txfer_len=%d tag=%u, doorbell=0x%x outstanding=0x%x idn=%d time=%lld us\n",
+				p->cmd_type, p->str, p->seq_num,
+				p->lun, p->cmd_id, (unsigned long long)p->lba,
+				p->transfer_len, p->tag, p->doorbell,
+				p->outstanding_reqs, p->idn,
+				ktime_to_us(p->tstamp));
+				usleep_range(1000, 1100);
+		}
+	}
+}
+#else
+static void ufshcd_cmd_log_init(struct ufs_hba *hba)
+{
+}
+
+static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
+			     unsigned int tag, u8 cmd_id, u8 idn, u8 lun,
+			     sector_t lba, int transfer_len, u8 opcode)
+{
+	struct ufshcd_cmd_log_entry entry;
+
+	entry.str = str;
+	entry.lba = lba;
+	entry.transfer_len = transfer_len;
+	entry.doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+	entry.tag = tag;
+
+	ufshcd_add_command_trace(hba, &entry, opcode);
+}
+
+static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id)
+{
+}
+
+static void ufshcd_print_cmd_log(struct ufs_hba *hba)
+{
+}
+#endif
+
+#ifdef CONFIG_TRACEPOINTS
+static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
+					unsigned int tag, const char *str)
+{
+	struct ufshcd_lrb *lrbp;
+	char *cmd_type = NULL;
+	u8 opcode = 0;
+	u8 cmd_id = 0, idn = 0;
+	sector_t lba = -1;
+	int transfer_len = -1;
+
+	lrbp = &hba->lrb[tag];
+
+	if (lrbp->cmd) { /* data phase exists */
+		opcode = (u8)(*lrbp->cmd->cmnd);
+		if ((opcode == READ_10) || (opcode == WRITE_10)) {
+			/*
+			 * Currently we only fully trace read(10) and write(10)
+			 * commands
+			 */
+			if (lrbp->cmd->request && lrbp->cmd->request->bio)
+				lba =
+				lrbp->cmd->request->bio->bi_iter.bi_sector;
+			transfer_len = be32_to_cpu(
+				lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
+		}
+	}
+
+	if (lrbp->cmd && (lrbp->command_type == UTP_CMD_TYPE_SCSI)) {
+		cmd_type = "scsi";
+		cmd_id = (u8)(*lrbp->cmd->cmnd);
+	} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
+		if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) {
+			cmd_type = "nop";
+			cmd_id = 0;
+		} else if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) {
+			cmd_type = "query";
+			cmd_id = hba->dev_cmd.query.request.upiu_req.opcode;
+			idn = hba->dev_cmd.query.request.upiu_req.idn;
+		}
+	}
+
+	__ufshcd_cmd_log(hba, (char *) str, cmd_type, tag, cmd_id, idn,
+			 lrbp->lun, lba, transfer_len, opcode);
+}
+#else
+static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
+					unsigned int tag, const char *str)
+{
+}
+#endif
+
+static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
+{
+	struct ufs_clk_info *clki;
+	struct list_head *head = &hba->clk_list_head;
+
+	if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_CLK_FREQ_EN))
+		return;
+
+	if (!head || list_empty(head))
+		return;
+
+	list_for_each_entry(clki, head, list) {
+		if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
+				clki->max_freq)
+			dev_err(hba->dev, "clk: %s, rate: %u\n",
+					clki->name, clki->curr_freq);
+	}
+}
+
+static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
+		struct ufs_uic_err_reg_hist *err_hist, char *err_name)
+{
+	int i;
+
+	if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_UIC_ERR_HIST_EN))
+		return;
+
+	for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
+		int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
+
+		if (err_hist->reg[p] == 0)
+			continue;
+		dev_err(hba->dev, "%s[%d] = 0x%x at %lld us", err_name, i,
+			err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
+	}
+}
+
+static inline void __ufshcd_print_host_regs(struct ufs_hba *hba, bool no_sleep)
+{
+	if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_HOST_REGS_EN))
+		return;
+
+	/*
+	 * hex_dump reads its data without the readl macro. This might
+	 * cause inconsistency issues on some platform, as the printed
+	 * values may be from cache and not the most recent value.
+	 * To know whether you are looking at an un-cached version verify
+	 * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
+	 * during platform/pci probe function.
+	 */
+	ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE);
+	dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x",
+		hba->ufs_version, hba->capabilities);
+	dev_err(hba->dev,
+		"hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x",
+		(u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
+	dev_err(hba->dev,
+		"last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d",
+		ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
+		hba->ufs_stats.hibern8_exit_cnt);
+
+	ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
+	ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
+	ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
+	ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
+	ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
+
+	ufshcd_print_clk_freqs(hba);
+
+	ufshcd_vops_dbg_register_dump(hba, no_sleep);
+}
+
+static void ufshcd_print_host_regs(struct ufs_hba *hba)
+{
+	__ufshcd_print_host_regs(hba, false);
+}
+
+static
+void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
+{
+	struct ufshcd_lrb *lrbp;
+	int prdt_length;
+	int tag;
+
+	if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_TRS_EN))
+		return;
+
+	for_each_set_bit(tag, &bitmap, hba->nutrs) {
+		lrbp = &hba->lrb[tag];
+
+		dev_err(hba->dev, "UPIU[%d] - issue time %lld us",
+				tag, ktime_to_us(lrbp->issue_time_stamp));
+		dev_err(hba->dev,
+			"UPIU[%d] - Transfer Request Descriptor phys@0x%llx",
+			tag, (u64)lrbp->utrd_dma_addr);
+		ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
+				sizeof(struct utp_transfer_req_desc));
+		dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx", tag,
+			(u64)lrbp->ucd_req_dma_addr);
+		ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
+				sizeof(struct utp_upiu_req));
+		dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx", tag,
+			(u64)lrbp->ucd_rsp_dma_addr);
+		ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
+				sizeof(struct utp_upiu_rsp));
+		prdt_length =
+			le16_to_cpu(lrbp->utr_descriptor_ptr->prd_table_length);
+		dev_err(hba->dev, "UPIU[%d] - PRDT - %d entries  phys@0x%llx",
+			tag, prdt_length, (u64)lrbp->ucd_prdt_dma_addr);
+		if (pr_prdt)
+			ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
+				sizeof(struct ufshcd_sg_entry) * prdt_length);
+	}
+}
+
+static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
+{
+	struct utp_task_req_desc *tmrdp;
+	int tag;
+
+	if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_TMRS_EN))
+		return;
+
+	for_each_set_bit(tag, &bitmap, hba->nutmrs) {
+		tmrdp = &hba->utmrdl_base_addr[tag];
+		dev_err(hba->dev, "TM[%d] - Task Management Header", tag);
+		ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
+				sizeof(struct request_desc_header));
+		dev_err(hba->dev, "TM[%d] - Task Management Request UPIU",
+				tag);
+		ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
+				sizeof(struct utp_upiu_req));
+		dev_err(hba->dev, "TM[%d] - Task Management Response UPIU",
+				tag);
+		ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
+				sizeof(struct utp_task_req_desc));
+	}
+}
+
+static void ufshcd_print_fsm_state(struct ufs_hba *hba)
+{
+	int err = 0, tx_fsm_val = 0, rx_fsm_val = 0;
+
+	err = ufshcd_dme_get(hba,
+			UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
+			UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
+			&tx_fsm_val);
+	dev_err(hba->dev, "%s: TX_FSM_STATE = %u, err = %d\n", __func__,
+			tx_fsm_val, err);
+	err = ufshcd_dme_get(hba,
+			UIC_ARG_MIB_SEL(MPHY_RX_FSM_STATE,
+			UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
+			&rx_fsm_val);
+	dev_err(hba->dev, "%s: RX_FSM_STATE = %u, err = %d\n", __func__,
+			rx_fsm_val, err);
+}
+
+static void ufshcd_print_host_state(struct ufs_hba *hba)
+{
+	if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_HOST_STATE_EN))
+		return;
+
+	dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
+	dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
+		hba->lrb_in_use, hba->outstanding_tasks, hba->outstanding_reqs);
+	dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x, saved_ce_err=0x%x\n",
+		hba->saved_err, hba->saved_uic_err, hba->saved_ce_err);
+	dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
+		hba->curr_dev_pwr_mode, hba->uic_link_state);
+	dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
+		hba->pm_op_in_progress, hba->is_sys_suspended);
+	dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
+		hba->auto_bkops_enabled, hba->host->host_self_blocked);
+	dev_err(hba->dev, "Clk gate=%d, hibern8 on idle=%d\n",
+		hba->clk_gating.state, hba->hibern8_on_idle.state);
+	dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
+		hba->eh_flags, hba->req_abort_count);
+	dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
+		hba->capabilities, hba->caps);
+	dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
+		hba->dev_quirks);
+}
+
+/**
+ * ufshcd_print_pwr_info - print power params as saved in hba
+ * power info
+ * @hba: per-adapter instance
+ */
+static void ufshcd_print_pwr_info(struct ufs_hba *hba)
+{
+	char *names[] = {
+		"INVALID MODE",
+		"FAST MODE",
+		"SLOW_MODE",
+		"INVALID MODE",
+		"FASTAUTO_MODE",
+		"SLOWAUTO_MODE",
+		"INVALID MODE",
+	};
+
+	if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_PWR_EN))
+		return;
+
+	dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
+		 __func__,
+		 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
+		 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
+		 names[hba->pwr_info.pwr_rx],
+		 names[hba->pwr_info.pwr_tx],
+		 hba->pwr_info.hs_rate);
+}
+
 /*
  * ufshcd_wait_for_register - wait for register value to change
  * @hba - per-adapter interface
@@ -223,11 +923,12 @@
  * @val - wait condition
  * @interval_us - polling interval in microsecs
  * @timeout_ms - timeout in millisecs
- *
+ * @can_sleep - perform sleep or just spin
  * Returns -ETIMEDOUT on error, zero on success
  */
-static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
-		u32 val, unsigned long interval_us, unsigned long timeout_ms)
+int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
+				u32 val, unsigned long interval_us,
+				unsigned long timeout_ms, bool can_sleep)
 {
 	int err = 0;
 	unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
@@ -236,9 +937,10 @@
 	val = val & mask;
 
 	while ((ufshcd_readl(hba, reg) & mask) != val) {
-		/* wakeup within 50us of expiry */
+		if (can_sleep)
 		usleep_range(interval_us, interval_us + 50);
-
+		else
+			udelay(interval_us);
 		if (time_after(jiffies, timeout)) {
 			if ((ufshcd_readl(hba, reg) & mask) != val)
 				err = -ETIMEDOUT;
@@ -257,10 +959,27 @@
  */
 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
 {
-	if (hba->ufs_version == UFSHCI_VERSION_10)
-		return INTERRUPT_MASK_ALL_VER_10;
-	else
-		return INTERRUPT_MASK_ALL_VER_11;
+	u32 intr_mask = 0;
+
+	switch (hba->ufs_version) {
+	case UFSHCI_VERSION_10:
+		intr_mask = INTERRUPT_MASK_ALL_VER_10;
+		break;
+	/* allow fall through */
+	case UFSHCI_VERSION_11:
+	case UFSHCI_VERSION_20:
+		intr_mask = INTERRUPT_MASK_ALL_VER_11;
+		break;
+	/* allow fall through */
+	case UFSHCI_VERSION_21:
+	default:
+		intr_mask = INTERRUPT_MASK_ALL_VER_21;
+	}
+
+	if (!ufshcd_is_crypto_supported(hba))
+		intr_mask &= ~CRYPTO_ENGINE_FATAL_ERROR;
+
+	return intr_mask;
 }
 
 /**
@@ -360,6 +1079,16 @@
 }
 
 /**
+ * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
+ * @hba: per adapter instance
+ * @tag: position of the bit to be cleared
+ */
+static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
+{
+	__clear_bit(tag, &hba->outstanding_reqs);
+}
+
+/**
  * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
  * @reg: Register value of host controller status
  *
@@ -374,11 +1103,9 @@
 	 *  1		UTRLRDY
 	 *  2		UTMRLRDY
 	 *  3		UCRDY
-	 *  4		HEI
-	 *  5		DEI
-	 * 6-7		reserved
+	 * 4-7		reserved
 	 */
-	return (((reg) & (0xFF)) >> 1) ^ (0x07);
+	return ((reg & 0xFF) >> 1) ^ 0x07;
 }
 
 /**
@@ -514,7 +1241,11 @@
  */
 static inline void ufshcd_hba_start(struct ufs_hba *hba)
 {
-	ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
+	u32 val = CONTROLLER_ENABLE;
+
+	if (ufshcd_is_crypto_supported(hba))
+		val |= CRYPTO_GENERAL_ENABLE;
+	ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
 }
 
 /**
@@ -528,6 +1259,153 @@
 	return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
 }
 
+static const char *ufschd_uic_link_state_to_string(
+			enum uic_link_state state)
+{
+	switch (state) {
+	case UIC_LINK_OFF_STATE:	return "OFF";
+	case UIC_LINK_ACTIVE_STATE:	return "ACTIVE";
+	case UIC_LINK_HIBERN8_STATE:	return "HIBERN8";
+	default:			return "UNKNOWN";
+	}
+}
+
+static const char *ufschd_ufs_dev_pwr_mode_to_string(
+			enum ufs_dev_pwr_mode state)
+{
+	switch (state) {
+	case UFS_ACTIVE_PWR_MODE:	return "ACTIVE";
+	case UFS_SLEEP_PWR_MODE:	return "SLEEP";
+	case UFS_POWERDOWN_PWR_MODE:	return "POWERDOWN";
+	default:			return "UNKNOWN";
+	}
+}
+
+u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
+{
+	/* HCI version 1.0 and 1.1 supports UniPro 1.41 */
+	if ((hba->ufs_version == UFSHCI_VERSION_10) ||
+	    (hba->ufs_version == UFSHCI_VERSION_11))
+		return UFS_UNIPRO_VER_1_41;
+	else
+		return UFS_UNIPRO_VER_1_6;
+}
+EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
+
+static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
+{
+	/*
+	 * If both host and device support UniPro ver1.6 or later, PA layer
+	 * parameters tuning happens during link startup itself.
+	 *
+	 * We can manually tune PA layer parameters if either host or device
+	 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
+	 * logic simple, we will only do manual tuning if local unipro version
+	 * doesn't support ver1.6 or later.
+	 */
+	if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
+		return true;
+	else
+		return false;
+}
+
+/**
+ * ufshcd_set_clk_freq - set UFS controller clock frequencies
+ * @hba: per adapter instance
+ * @scale_up: If True, set max possible frequency othewise set low frequency
+ *
+ * Returns 0 if successful
+ * Returns < 0 for any other errors
+ */
+static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
+{
+	int ret = 0;
+	struct ufs_clk_info *clki;
+	struct list_head *head = &hba->clk_list_head;
+
+	if (!head || list_empty(head))
+		goto out;
+
+	list_for_each_entry(clki, head, list) {
+		if (!IS_ERR_OR_NULL(clki->clk)) {
+			if (scale_up && clki->max_freq) {
+				if (clki->curr_freq == clki->max_freq)
+					continue;
+
+				ret = clk_set_rate(clki->clk, clki->max_freq);
+				if (ret) {
+					dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+						__func__, clki->name,
+						clki->max_freq, ret);
+					break;
+				}
+				trace_ufshcd_clk_scaling(dev_name(hba->dev),
+						"scaled up", clki->name,
+						clki->curr_freq,
+						clki->max_freq);
+				clki->curr_freq = clki->max_freq;
+
+			} else if (!scale_up && clki->min_freq) {
+				if (clki->curr_freq == clki->min_freq)
+					continue;
+
+				ret = clk_set_rate(clki->clk, clki->min_freq);
+				if (ret) {
+					dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+						__func__, clki->name,
+						clki->min_freq, ret);
+					break;
+				}
+				trace_ufshcd_clk_scaling(dev_name(hba->dev),
+						"scaled down", clki->name,
+						clki->curr_freq,
+						clki->min_freq);
+				clki->curr_freq = clki->min_freq;
+			}
+		}
+		dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
+				clki->name, clk_get_rate(clki->clk));
+	}
+
+out:
+	return ret;
+}
+
+/**
+ * ufshcd_scale_clks - scale up or scale down UFS controller clocks
+ * @hba: per adapter instance
+ * @scale_up: True if scaling up and false if scaling down
+ *
+ * Returns 0 if successful
+ * Returns < 0 for any other errors
+ */
+static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
+{
+	int ret = 0;
+
+	ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
+	if (ret)
+		return ret;
+
+	ret = ufshcd_set_clk_freq(hba, scale_up);
+	if (ret)
+		return ret;
+
+	ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
+	if (ret) {
+		ufshcd_set_clk_freq(hba, !scale_up);
+		return ret;
+	}
+
+	return ret;
+}
+
+static inline void ufshcd_cancel_gate_work(struct ufs_hba *hba)
+{
+	hrtimer_cancel(&hba->clk_gating.gate_hrtimer);
+	cancel_work_sync(&hba->clk_gating.gate_work);
+}
+
 static void ufshcd_ungate_work(struct work_struct *work)
 {
 	int ret;
@@ -535,7 +1413,7 @@
 	struct ufs_hba *hba = container_of(work, struct ufs_hba,
 			clk_gating.ungate_work);
 
-	cancel_delayed_work_sync(&hba->clk_gating.gate_work);
+	ufshcd_cancel_gate_work(hba);
 
 	spin_lock_irqsave(hba->host->host_lock, flags);
 	if (hba->clk_gating.state == CLKS_ON) {
@@ -544,7 +1422,8 @@
 	}
 
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
-	ufshcd_setup_clocks(hba, true);
+	ufshcd_hba_vreg_set_hpm(hba);
+	ufshcd_enable_clocks(hba);
 
 	/* Exit from hibern8 */
 	if (ufshcd_can_hibern8_during_gating(hba)) {
@@ -561,9 +1440,7 @@
 		hba->clk_gating.is_suspended = false;
 	}
 unblock_reqs:
-	if (ufshcd_is_clkscaling_enabled(hba))
-		devfreq_resume_device(hba->devfreq);
-	scsi_unblock_requests(hba->host);
+	ufshcd_scsi_unblock_requests(hba);
 }
 
 /**
@@ -582,24 +1459,53 @@
 	spin_lock_irqsave(hba->host->host_lock, flags);
 	hba->clk_gating.active_reqs++;
 
+	if (ufshcd_eh_in_progress(hba)) {
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		return 0;
+	}
+
 start:
 	switch (hba->clk_gating.state) {
 	case CLKS_ON:
+		/*
+		 * Wait for the ungate work to complete if in progress.
+		 * Though the clocks may be in ON state, the link could
+		 * still be in hibner8 state if hibern8 is allowed
+		 * during clock gating.
+		 * Make sure we exit hibern8 state also in addition to
+		 * clocks being ON.
+		 */
+		if (ufshcd_can_hibern8_during_gating(hba) &&
+		    ufshcd_is_link_hibern8(hba)) {
+			spin_unlock_irqrestore(hba->host->host_lock, flags);
+			flush_work(&hba->clk_gating.ungate_work);
+			spin_lock_irqsave(hba->host->host_lock, flags);
+			goto start;
+		}
 		break;
 	case REQ_CLKS_OFF:
-		if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
+		/*
+		 * If the timer was active but the callback was not running
+		 * we have nothing to do, just change state and return.
+		 */
+		if (hrtimer_try_to_cancel(&hba->clk_gating.gate_hrtimer) == 1) {
 			hba->clk_gating.state = CLKS_ON;
+			trace_ufshcd_clk_gating(dev_name(hba->dev),
+				hba->clk_gating.state);
 			break;
 		}
 		/*
-		 * If we here, it means gating work is either done or
+		 * If we are here, it means gating work is either done or
 		 * currently running. Hence, fall through to cancel gating
 		 * work and to enable clocks.
 		 */
 	case CLKS_OFF:
-		scsi_block_requests(hba->host);
+		__ufshcd_scsi_block_requests(hba);
 		hba->clk_gating.state = REQ_CLKS_ON;
-		schedule_work(&hba->clk_gating.ungate_work);
+		trace_ufshcd_clk_gating(dev_name(hba->dev),
+			hba->clk_gating.state);
+		queue_work(hba->clk_gating.clk_gating_workq,
+				&hba->clk_gating.ungate_work);
 		/*
 		 * fall through to check if we should wait for this
 		 * work to be done or not.
@@ -623,6 +1529,7 @@
 	}
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 out:
+	hba->ufs_stats.clk_hold.ts = ktime_get();
 	return rc;
 }
 EXPORT_SYMBOL_GPL(ufshcd_hold);
@@ -630,12 +1537,21 @@
 static void ufshcd_gate_work(struct work_struct *work)
 {
 	struct ufs_hba *hba = container_of(work, struct ufs_hba,
-			clk_gating.gate_work.work);
+						clk_gating.gate_work);
 	unsigned long flags;
 
 	spin_lock_irqsave(hba->host->host_lock, flags);
-	if (hba->clk_gating.is_suspended) {
+	/*
+	 * In case you are here to cancel this work the gating state
+	 * would be marked as REQ_CLKS_ON. In this case save time by
+	 * skipping the gating work and exit after changing the clock
+	 * state to CLKS_ON.
+	 */
+	if (hba->clk_gating.is_suspended ||
+		(hba->clk_gating.state != REQ_CLKS_OFF)) {
 		hba->clk_gating.state = CLKS_ON;
+		trace_ufshcd_clk_gating(dev_name(hba->dev),
+			hba->clk_gating.state);
 		goto rel_lock;
 	}
 
@@ -647,25 +1563,38 @@
 
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
+	if (ufshcd_is_hibern8_on_idle_allowed(hba) &&
+	    hba->hibern8_on_idle.is_enabled)
+		/*
+		 * Hibern8 enter work (on Idle) needs clocks to be ON hence
+		 * make sure that it is flushed before turning off the clocks.
+		 */
+		flush_delayed_work(&hba->hibern8_on_idle.enter_work);
+
 	/* put the link into hibern8 mode before turning off clocks */
 	if (ufshcd_can_hibern8_during_gating(hba)) {
 		if (ufshcd_uic_hibern8_enter(hba)) {
 			hba->clk_gating.state = CLKS_ON;
+			trace_ufshcd_clk_gating(dev_name(hba->dev),
+				hba->clk_gating.state);
 			goto out;
 		}
 		ufshcd_set_link_hibern8(hba);
 	}
 
-	if (ufshcd_is_clkscaling_enabled(hba)) {
-		devfreq_suspend_device(hba->devfreq);
-		hba->clk_scaling.window_start_t = 0;
-	}
-
-	if (!ufshcd_is_link_active(hba))
-		ufshcd_setup_clocks(hba, false);
+	/*
+	 * If auto hibern8 is supported then the link will already
+	 * be in hibern8 state and the ref clock can be gated.
+	 */
+	if ((ufshcd_is_auto_hibern8_supported(hba) ||
+	     !ufshcd_is_link_active(hba)) && !hba->no_ref_clk_gating)
+		ufshcd_disable_clocks(hba, true);
 	else
 		/* If link is active, device ref_clk can't be switched off */
-		__ufshcd_setup_clocks(hba, false, true);
+		ufshcd_disable_clocks_skip_ref_clk(hba, true);
+
+	/* Put the host controller in low power mode if possible */
+	ufshcd_hba_vreg_set_lpm(hba);
 
 	/*
 	 * In case you are here to cancel this work the gating state
@@ -677,9 +1606,11 @@
 	 * new requests arriving before the current cancel work is done.
 	 */
 	spin_lock_irqsave(hba->host->host_lock, flags);
-	if (hba->clk_gating.state == REQ_CLKS_OFF)
+	if (hba->clk_gating.state == REQ_CLKS_OFF) {
 		hba->clk_gating.state = CLKS_OFF;
-
+		trace_ufshcd_clk_gating(dev_name(hba->dev),
+			hba->clk_gating.state);
+	}
 rel_lock:
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 out:
@@ -687,7 +1618,7 @@
 }
 
 /* host lock must be held before calling this variant */
-static void __ufshcd_release(struct ufs_hba *hba)
+static void __ufshcd_release(struct ufs_hba *hba, bool no_sched)
 {
 	if (!ufshcd_is_clkgating_allowed(hba))
 		return;
@@ -697,20 +1628,25 @@
 	if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
 		|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
 		|| hba->lrb_in_use || hba->outstanding_tasks
-		|| hba->active_uic_cmd || hba->uic_async_done)
+		|| hba->active_uic_cmd || hba->uic_async_done
+		|| ufshcd_eh_in_progress(hba) || no_sched)
 		return;
 
 	hba->clk_gating.state = REQ_CLKS_OFF;
-	schedule_delayed_work(&hba->clk_gating.gate_work,
-			msecs_to_jiffies(hba->clk_gating.delay_ms));
+	trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
+	hba->ufs_stats.clk_rel.ts = ktime_get();
+
+	hrtimer_start(&hba->clk_gating.gate_hrtimer,
+			ms_to_ktime(hba->clk_gating.delay_ms),
+			HRTIMER_MODE_REL);
 }
 
-void ufshcd_release(struct ufs_hba *hba)
+void ufshcd_release(struct ufs_hba *hba, bool no_sched)
 {
 	unsigned long flags;
 
 	spin_lock_irqsave(hba->host->host_lock, flags);
-	__ufshcd_release(hba);
+	__ufshcd_release(hba, no_sched);
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 }
 EXPORT_SYMBOL_GPL(ufshcd_release);
@@ -738,15 +1674,177 @@
 	return count;
 }
 
+static ssize_t ufshcd_clkgate_delay_pwr_save_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%lu\n",
+			hba->clk_gating.delay_ms_pwr_save);
+}
+
+static ssize_t ufshcd_clkgate_delay_pwr_save_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+	unsigned long flags, value;
+
+	if (kstrtoul(buf, 0, &value))
+		return -EINVAL;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+
+	hba->clk_gating.delay_ms_pwr_save = value;
+	if (ufshcd_is_clkscaling_supported(hba) &&
+	    !hba->clk_scaling.is_scaled_up)
+		hba->clk_gating.delay_ms = hba->clk_gating.delay_ms_pwr_save;
+
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	return count;
+}
+
+static ssize_t ufshcd_clkgate_delay_perf_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms_perf);
+}
+
+static ssize_t ufshcd_clkgate_delay_perf_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+	unsigned long flags, value;
+
+	if (kstrtoul(buf, 0, &value))
+		return -EINVAL;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+
+	hba->clk_gating.delay_ms_perf = value;
+	if (ufshcd_is_clkscaling_supported(hba) &&
+	    hba->clk_scaling.is_scaled_up)
+		hba->clk_gating.delay_ms = hba->clk_gating.delay_ms_perf;
+
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	return count;
+}
+
+static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
+}
+
+static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+	unsigned long flags;
+	u32 value;
+
+	if (kstrtou32(buf, 0, &value))
+		return -EINVAL;
+
+	value = !!value;
+	if (value == hba->clk_gating.is_enabled)
+		goto out;
+
+	if (value) {
+		ufshcd_release(hba, false);
+	} else {
+		spin_lock_irqsave(hba->host->host_lock, flags);
+		hba->clk_gating.active_reqs++;
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+	}
+
+	hba->clk_gating.is_enabled = value;
+out:
+	return count;
+}
+
+static enum hrtimer_restart ufshcd_clkgate_hrtimer_handler(
+					struct hrtimer *timer)
+{
+	struct ufs_hba *hba = container_of(timer, struct ufs_hba,
+					   clk_gating.gate_hrtimer);
+
+	queue_work(hba->clk_gating.clk_gating_workq,
+				&hba->clk_gating.gate_work);
+
+	return HRTIMER_NORESTART;
+}
+
 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
 {
+	struct ufs_clk_gating *gating = &hba->clk_gating;
+	char wq_name[sizeof("ufs_clk_gating_00")];
+
+	hba->clk_gating.state = CLKS_ON;
+
 	if (!ufshcd_is_clkgating_allowed(hba))
 		return;
 
-	hba->clk_gating.delay_ms = 150;
-	INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
-	INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
+	/*
+	 * Disable hibern8 during clk gating if
+	 * auto hibern8 is supported
+	 */
+	if (ufshcd_is_auto_hibern8_supported(hba))
+		hba->caps &= ~UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
+
+	INIT_WORK(&gating->gate_work, ufshcd_gate_work);
+	INIT_WORK(&gating->ungate_work, ufshcd_ungate_work);
+	/*
+	 * Clock gating work must be executed only after auto hibern8
+	 * timeout has expired in the hardware or after aggressive
+	 * hibern8 on idle software timeout. Using jiffy based low
+	 * resolution delayed work is not reliable to guarantee this,
+	 * hence use a high resolution timer to make sure we schedule
+	 * the gate work precisely more than hibern8 timeout.
+	 *
+	 * Always make sure gating->delay_ms > hibern8_on_idle->delay_ms
+	 */
+	hrtimer_init(&gating->gate_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	gating->gate_hrtimer.function = ufshcd_clkgate_hrtimer_handler;
+
+	snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
+			hba->host->host_no);
+	hba->clk_gating.clk_gating_workq =
+		create_singlethread_workqueue(wq_name);
+
+	gating->is_enabled = true;
+
+	gating->delay_ms_pwr_save = UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE;
+	gating->delay_ms_perf = UFSHCD_CLK_GATING_DELAY_MS_PERF;
+
+	/* start with performance mode */
+	gating->delay_ms = gating->delay_ms_perf;
+
+	if (!ufshcd_is_clkscaling_supported(hba))
+		goto scaling_not_supported;
+
+	gating->delay_pwr_save_attr.show = ufshcd_clkgate_delay_pwr_save_show;
+	gating->delay_pwr_save_attr.store = ufshcd_clkgate_delay_pwr_save_store;
+	sysfs_attr_init(&gating->delay_pwr_save_attr.attr);
+	gating->delay_pwr_save_attr.attr.name = "clkgate_delay_ms_pwr_save";
+	gating->delay_pwr_save_attr.attr.mode = S_IRUGO | S_IWUSR;
+	if (device_create_file(hba->dev, &gating->delay_pwr_save_attr))
+		dev_err(hba->dev, "Failed to create sysfs for clkgate_delay_ms_pwr_save\n");
+
+	gating->delay_perf_attr.show = ufshcd_clkgate_delay_perf_show;
+	gating->delay_perf_attr.store = ufshcd_clkgate_delay_perf_store;
+	sysfs_attr_init(&gating->delay_perf_attr.attr);
+	gating->delay_perf_attr.attr.name = "clkgate_delay_ms_perf";
+	gating->delay_perf_attr.attr.mode = S_IRUGO | S_IWUSR;
+	if (device_create_file(hba->dev, &gating->delay_perf_attr))
+		dev_err(hba->dev, "Failed to create sysfs for clkgate_delay_ms_perf\n");
 
+	goto add_clkgate_enable;
+
+scaling_not_supported:
 	hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
 	hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
 	sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
@@ -754,23 +1852,456 @@
 	hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
 	if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
 		dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
+
+add_clkgate_enable:
+	gating->enable_attr.show = ufshcd_clkgate_enable_show;
+	gating->enable_attr.store = ufshcd_clkgate_enable_store;
+	sysfs_attr_init(&gating->enable_attr.attr);
+	gating->enable_attr.attr.name = "clkgate_enable";
+	gating->enable_attr.attr.mode = S_IRUGO | S_IWUSR;
+	if (device_create_file(hba->dev, &gating->enable_attr))
+		dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
 }
 
 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
 {
 	if (!ufshcd_is_clkgating_allowed(hba))
 		return;
+	if (ufshcd_is_clkscaling_supported(hba)) {
+		device_remove_file(hba->dev,
+				   &hba->clk_gating.delay_pwr_save_attr);
+		device_remove_file(hba->dev, &hba->clk_gating.delay_perf_attr);
+	} else {
 	device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
+	}
+	device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
+	ufshcd_cancel_gate_work(hba);
 	cancel_work_sync(&hba->clk_gating.ungate_work);
-	cancel_delayed_work_sync(&hba->clk_gating.gate_work);
+	destroy_workqueue(hba->clk_gating.clk_gating_workq);
+}
+
+static void ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba, u32 delay)
+{
+	ufshcd_rmwl(hba, AUTO_HIBERN8_TIMER_SCALE_MASK |
+			 AUTO_HIBERN8_IDLE_TIMER_MASK,
+			AUTO_HIBERN8_TIMER_SCALE_1_MS | delay,
+			REG_AUTO_HIBERN8_IDLE_TIMER);
+	/* Make sure the timer gets applied before further operations */
+	mb();
+}
+
+/**
+ * ufshcd_hibern8_hold - Make sure that link is not in hibern8.
+ *
+ * @hba: per adapter instance
+ * @async: This indicates whether caller wants to exit hibern8 asynchronously.
+ *
+ * Exit from hibern8 mode and set the link as active.
+ *
+ * Return 0 on success, non-zero on failure.
+ */
+static int ufshcd_hibern8_hold(struct ufs_hba *hba, bool async)
+{
+	int rc = 0;
+	unsigned long flags;
+
+	if (!ufshcd_is_hibern8_on_idle_allowed(hba))
+		goto out;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	hba->hibern8_on_idle.active_reqs++;
+
+	if (ufshcd_eh_in_progress(hba)) {
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		return 0;
+	}
+
+start:
+	switch (hba->hibern8_on_idle.state) {
+	case HIBERN8_EXITED:
+		break;
+	case REQ_HIBERN8_ENTER:
+		if (cancel_delayed_work(&hba->hibern8_on_idle.enter_work)) {
+			hba->hibern8_on_idle.state = HIBERN8_EXITED;
+			trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
+				hba->hibern8_on_idle.state);
+			break;
+		}
+		/*
+		 * If we here, it means Hibern8 enter work is either done or
+		 * currently running. Hence, fall through to cancel hibern8
+		 * work and exit hibern8.
+		 */
+	case HIBERN8_ENTERED:
+		__ufshcd_scsi_block_requests(hba);
+		hba->hibern8_on_idle.state = REQ_HIBERN8_EXIT;
+		trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
+			hba->hibern8_on_idle.state);
+		schedule_work(&hba->hibern8_on_idle.exit_work);
+		/*
+		 * fall through to check if we should wait for this
+		 * work to be done or not.
+		 */
+	case REQ_HIBERN8_EXIT:
+		if (async) {
+			rc = -EAGAIN;
+			hba->hibern8_on_idle.active_reqs--;
+			break;
+		} else {
+			spin_unlock_irqrestore(hba->host->host_lock, flags);
+			flush_work(&hba->hibern8_on_idle.exit_work);
+			/* Make sure state is HIBERN8_EXITED before returning */
+			spin_lock_irqsave(hba->host->host_lock, flags);
+			goto start;
+		}
+	default:
+		dev_err(hba->dev, "%s: H8 is in invalid state %d\n",
+				__func__, hba->hibern8_on_idle.state);
+		break;
+	}
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+out:
+	return rc;
+}
+
+/* host lock must be held before calling this variant */
+static void __ufshcd_hibern8_release(struct ufs_hba *hba, bool no_sched)
+{
+	unsigned long delay_in_jiffies;
+
+	if (!ufshcd_is_hibern8_on_idle_allowed(hba))
+		return;
+
+	hba->hibern8_on_idle.active_reqs--;
+	BUG_ON(hba->hibern8_on_idle.active_reqs < 0);
+
+	if (hba->hibern8_on_idle.active_reqs
+		|| hba->hibern8_on_idle.is_suspended
+		|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
+		|| hba->lrb_in_use || hba->outstanding_tasks
+		|| hba->active_uic_cmd || hba->uic_async_done
+		|| ufshcd_eh_in_progress(hba) || no_sched)
+		return;
+
+	hba->hibern8_on_idle.state = REQ_HIBERN8_ENTER;
+	trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
+		hba->hibern8_on_idle.state);
+	/*
+	 * Scheduling the delayed work after 1 jiffies will make the work to
+	 * get schedule any time from 0ms to 1000/HZ ms which is not desirable
+	 * for hibern8 enter work as it may impact the performance if it gets
+	 * scheduled almost immediately. Hence make sure that hibern8 enter
+	 * work gets scheduled atleast after 2 jiffies (any time between
+	 * 1000/HZ ms to 2000/HZ ms).
+	 */
+	delay_in_jiffies = msecs_to_jiffies(hba->hibern8_on_idle.delay_ms);
+	if (delay_in_jiffies == 1)
+		delay_in_jiffies++;
+
+	schedule_delayed_work(&hba->hibern8_on_idle.enter_work,
+			      delay_in_jiffies);
+}
+
+static void ufshcd_hibern8_release(struct ufs_hba *hba, bool no_sched)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	__ufshcd_hibern8_release(hba, no_sched);
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+static void ufshcd_hibern8_enter_work(struct work_struct *work)
+{
+	struct ufs_hba *hba = container_of(work, struct ufs_hba,
+					   hibern8_on_idle.enter_work.work);
+	unsigned long flags;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (hba->hibern8_on_idle.is_suspended) {
+		hba->hibern8_on_idle.state = HIBERN8_EXITED;
+		trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
+			hba->hibern8_on_idle.state);
+		goto rel_lock;
+	}
+
+	if (hba->hibern8_on_idle.active_reqs
+		|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
+		|| hba->lrb_in_use || hba->outstanding_tasks
+		|| hba->active_uic_cmd || hba->uic_async_done)
+		goto rel_lock;
+
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	if (ufshcd_is_link_active(hba) && ufshcd_uic_hibern8_enter(hba)) {
+		/* Enter failed */
+		hba->hibern8_on_idle.state = HIBERN8_EXITED;
+		trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
+			hba->hibern8_on_idle.state);
+		goto out;
+	}
+	ufshcd_set_link_hibern8(hba);
+
+	/*
+	 * In case you are here to cancel this work the hibern8_on_idle.state
+	 * would be marked as REQ_HIBERN8_EXIT. In this case keep the state
+	 * as REQ_HIBERN8_EXIT which would anyway imply that we are in hibern8
+	 * and a request to exit from it is pending. By doing this way,
+	 * we keep the state machine in tact and this would ultimately
+	 * prevent from doing cancel work multiple times when there are
+	 * new requests arriving before the current cancel work is done.
+	 */
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (hba->hibern8_on_idle.state == REQ_HIBERN8_ENTER) {
+		hba->hibern8_on_idle.state = HIBERN8_ENTERED;
+		trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
+			hba->hibern8_on_idle.state);
+	}
+rel_lock:
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+out:
+	return;
+}
+
+static void __ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba,
+					    unsigned long delay_ms)
+{
+	pm_runtime_get_sync(hba->dev);
+	ufshcd_hold_all(hba);
+	ufshcd_scsi_block_requests(hba);
+	down_write(&hba->lock);
+	/* wait for all the outstanding requests to finish */
+	ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
+	ufshcd_set_auto_hibern8_timer(hba, delay_ms);
+	up_write(&hba->lock);
+	ufshcd_scsi_unblock_requests(hba);
+	ufshcd_release_all(hba);
+	pm_runtime_put_sync(hba->dev);
+}
+
+static void ufshcd_hibern8_exit_work(struct work_struct *work)
+{
+	int ret;
+	unsigned long flags;
+	struct ufs_hba *hba = container_of(work, struct ufs_hba,
+					   hibern8_on_idle.exit_work);
+
+	cancel_delayed_work_sync(&hba->hibern8_on_idle.enter_work);
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if ((hba->hibern8_on_idle.state == HIBERN8_EXITED)
+	     || ufshcd_is_link_active(hba)) {
+		hba->hibern8_on_idle.state = HIBERN8_EXITED;
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		goto unblock_reqs;
+	}
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	/* Exit from hibern8 */
+	if (ufshcd_is_link_hibern8(hba)) {
+		hba->ufs_stats.clk_hold.ctx = H8_EXIT_WORK;
+		ufshcd_hold(hba, false);
+		ret = ufshcd_uic_hibern8_exit(hba);
+		hba->ufs_stats.clk_rel.ctx = H8_EXIT_WORK;
+		ufshcd_release(hba, false);
+		if (!ret) {
+			spin_lock_irqsave(hba->host->host_lock, flags);
+			ufshcd_set_link_active(hba);
+			hba->hibern8_on_idle.state = HIBERN8_EXITED;
+			trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
+				hba->hibern8_on_idle.state);
+			spin_unlock_irqrestore(hba->host->host_lock, flags);
+		}
+	}
+unblock_reqs:
+	ufshcd_scsi_unblock_requests(hba);
+}
+
+static ssize_t ufshcd_hibern8_on_idle_delay_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%lu\n", hba->hibern8_on_idle.delay_ms);
+}
+
+static ssize_t ufshcd_hibern8_on_idle_delay_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+	unsigned long flags, value;
+	bool change = true;
+
+	if (kstrtoul(buf, 0, &value))
+		return -EINVAL;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (hba->hibern8_on_idle.delay_ms == value)
+		change = false;
+
+	if (value >= hba->clk_gating.delay_ms_pwr_save ||
+	    value >= hba->clk_gating.delay_ms_perf) {
+		dev_err(hba->dev, "hibern8_on_idle_delay (%lu) can not be >= to clkgate_delay_ms_pwr_save (%lu) and clkgate_delay_ms_perf (%lu)\n",
+			value, hba->clk_gating.delay_ms_pwr_save,
+			hba->clk_gating.delay_ms_perf);
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		return -EINVAL;
+	}
+
+	hba->hibern8_on_idle.delay_ms = value;
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	/* Update auto hibern8 timer value if supported */
+	if (change && ufshcd_is_auto_hibern8_supported(hba) &&
+	    hba->hibern8_on_idle.is_enabled)
+		__ufshcd_set_auto_hibern8_timer(hba,
+						hba->hibern8_on_idle.delay_ms);
+
+	return count;
+}
+
+static ssize_t ufshcd_hibern8_on_idle_enable_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			hba->hibern8_on_idle.is_enabled);
+}
+
+static ssize_t ufshcd_hibern8_on_idle_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+	unsigned long flags;
+	u32 value;
+
+	if (kstrtou32(buf, 0, &value))
+		return -EINVAL;
+
+	value = !!value;
+	if (value == hba->hibern8_on_idle.is_enabled)
+		goto out;
+
+	/* Update auto hibern8 timer value if supported */
+	if (ufshcd_is_auto_hibern8_supported(hba)) {
+		__ufshcd_set_auto_hibern8_timer(hba,
+			value ? hba->hibern8_on_idle.delay_ms : value);
+		goto update;
+	}
+
+	if (value) {
+		/*
+		 * As clock gating work would wait for the hibern8 enter work
+		 * to finish, clocks would remain on during hibern8 enter work.
+		 */
+		ufshcd_hold(hba, false);
+		ufshcd_release_all(hba);
+	} else {
+		spin_lock_irqsave(hba->host->host_lock, flags);
+		hba->hibern8_on_idle.active_reqs++;
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+	}
+
+update:
+	hba->hibern8_on_idle.is_enabled = value;
+out:
+	return count;
+}
+
+static void ufshcd_init_hibern8_on_idle(struct ufs_hba *hba)
+{
+	/* initialize the state variable here */
+	hba->hibern8_on_idle.state = HIBERN8_EXITED;
+
+	if (!ufshcd_is_hibern8_on_idle_allowed(hba) &&
+	    !ufshcd_is_auto_hibern8_supported(hba))
+		return;
+
+	if (ufshcd_is_auto_hibern8_supported(hba)) {
+		hba->hibern8_on_idle.delay_ms = 1;
+		hba->hibern8_on_idle.state = AUTO_HIBERN8;
+		/*
+		 * Disable SW hibern8 enter on idle in case
+		 * auto hibern8 is supported
+		 */
+		hba->caps &= ~UFSHCD_CAP_HIBERN8_ENTER_ON_IDLE;
+	} else {
+		hba->hibern8_on_idle.delay_ms = 10;
+		INIT_DELAYED_WORK(&hba->hibern8_on_idle.enter_work,
+				  ufshcd_hibern8_enter_work);
+		INIT_WORK(&hba->hibern8_on_idle.exit_work,
+			  ufshcd_hibern8_exit_work);
+	}
+
+	hba->hibern8_on_idle.is_enabled = true;
+
+	hba->hibern8_on_idle.delay_attr.show =
+					ufshcd_hibern8_on_idle_delay_show;
+	hba->hibern8_on_idle.delay_attr.store =
+					ufshcd_hibern8_on_idle_delay_store;
+	sysfs_attr_init(&hba->hibern8_on_idle.delay_attr.attr);
+	hba->hibern8_on_idle.delay_attr.attr.name = "hibern8_on_idle_delay_ms";
+	hba->hibern8_on_idle.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
+	if (device_create_file(hba->dev, &hba->hibern8_on_idle.delay_attr))
+		dev_err(hba->dev, "Failed to create sysfs for hibern8_on_idle_delay\n");
+
+	hba->hibern8_on_idle.enable_attr.show =
+					ufshcd_hibern8_on_idle_enable_show;
+	hba->hibern8_on_idle.enable_attr.store =
+					ufshcd_hibern8_on_idle_enable_store;
+	sysfs_attr_init(&hba->hibern8_on_idle.enable_attr.attr);
+	hba->hibern8_on_idle.enable_attr.attr.name = "hibern8_on_idle_enable";
+	hba->hibern8_on_idle.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
+	if (device_create_file(hba->dev, &hba->hibern8_on_idle.enable_attr))
+		dev_err(hba->dev, "Failed to create sysfs for hibern8_on_idle_enable\n");
+}
+
+static void ufshcd_exit_hibern8_on_idle(struct ufs_hba *hba)
+{
+	if (!ufshcd_is_hibern8_on_idle_allowed(hba) &&
+	    !ufshcd_is_auto_hibern8_supported(hba))
+		return;
+	device_remove_file(hba->dev, &hba->hibern8_on_idle.delay_attr);
+	device_remove_file(hba->dev, &hba->hibern8_on_idle.enable_attr);
+}
+
+static void ufshcd_hold_all(struct ufs_hba *hba)
+{
+	ufshcd_hold(hba, false);
+	ufshcd_hibern8_hold(hba, false);
+}
+
+static void ufshcd_release_all(struct ufs_hba *hba)
+{
+	ufshcd_hibern8_release(hba, false);
+	ufshcd_release(hba, false);
 }
 
 /* Must be called with host lock acquired */
 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
 {
-	if (!ufshcd_is_clkscaling_enabled(hba))
+	bool queue_resume_work = false;
+
+	if (!ufshcd_is_clkscaling_supported(hba))
+		return;
+
+	if (!hba->clk_scaling.active_reqs++)
+		queue_resume_work = true;
+
+	if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
 		return;
 
+	if (queue_resume_work)
+		queue_work(hba->clk_scaling.workq,
+			   &hba->clk_scaling.resume_work);
+
+	if (!hba->clk_scaling.window_start_t) {
+		hba->clk_scaling.window_start_t = jiffies;
+		hba->clk_scaling.tot_busy_t = 0;
+		hba->clk_scaling.is_busy_started = false;
+	}
+
 	if (!hba->clk_scaling.is_busy_started) {
 		hba->clk_scaling.busy_start_t = ktime_get();
 		hba->clk_scaling.is_busy_started = true;
@@ -781,7 +2312,7 @@
 {
 	struct ufs_clk_scaling *scaling = &hba->clk_scaling;
 
-	if (!ufshcd_is_clkscaling_enabled(hba))
+	if (!ufshcd_is_clkscaling_supported(hba))
 		return;
 
 	if (!hba->outstanding_reqs && scaling->is_busy_started) {
@@ -791,17 +2322,27 @@
 		scaling->is_busy_started = false;
 	}
 }
+
 /**
  * ufshcd_send_command - Send SCSI or device management commands
  * @hba: per adapter instance
  * @task_tag: Task tag of the command
  */
 static inline
-void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
+int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
 {
+	int ret = 0;
+
+	hba->lrb[task_tag].issue_time_stamp = ktime_get();
+	hba->lrb[task_tag].complete_time_stamp = ktime_set(0, 0);
 	ufshcd_clk_scaling_start_busy(hba);
 	__set_bit(task_tag, &hba->outstanding_reqs);
 	ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+	/* Make sure that doorbell is committed immediately */
+	wmb();
+	ufshcd_cond_add_cmd_trace(hba, task_tag, "send");
+	ufshcd_update_tag_stats(hba, task_tag);
+	return ret;
 }
 
 /**
@@ -813,10 +2354,14 @@
 	int len;
 	if (lrbp->sense_buffer &&
 	    ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
+		int len_to_copy;
+
 		len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
+		len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
+
 		memcpy(lrbp->sense_buffer,
 			lrbp->ucd_rsp_ptr->sr.sense_data,
-			min_t(int, len, SCSI_SENSE_BUFFERSIZE));
+			min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
 	}
 }
 
@@ -912,6 +2457,7 @@
 
 	hba->active_uic_cmd = uic_cmd;
 
+	ufshcd_dme_cmd_log(hba, "send", hba->active_uic_cmd->command);
 	/* Write Args */
 	ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
 	ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
@@ -942,6 +2488,11 @@
 	else
 		ret = -ETIMEDOUT;
 
+	if (ret)
+		ufsdbg_set_err_state(hba);
+
+	ufshcd_dme_cmd_log(hba, "cmp1", hba->active_uic_cmd->command);
+
 	spin_lock_irqsave(hba->host->host_lock, flags);
 	hba->active_uic_cmd = NULL;
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -953,13 +2504,15 @@
  * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
  * @hba: per adapter instance
  * @uic_cmd: UIC command
+ * @completion: initialize the completion only if this is set to true
  *
  * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
  * with mutex held and host_lock locked.
  * Returns 0 only if success.
  */
 static int
-__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
+		      bool completion)
 {
 	if (!ufshcd_ready_for_uic_cmd(hba)) {
 		dev_err(hba->dev,
@@ -967,6 +2520,7 @@
 		return -EIO;
 	}
 
+	if (completion)
 	init_completion(&uic_cmd->done);
 
 	ufshcd_dispatch_uic_cmd(hba, uic_cmd);
@@ -987,19 +2541,25 @@
 	int ret;
 	unsigned long flags;
 
-	ufshcd_hold(hba, false);
+	hba->ufs_stats.clk_hold.ctx = UIC_CMD_SEND;
+	ufshcd_hold_all(hba);
 	mutex_lock(&hba->uic_cmd_mutex);
 	ufshcd_add_delay_before_dme_cmd(hba);
 
 	spin_lock_irqsave(hba->host->host_lock, flags);
-	ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
+	ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 	if (!ret)
 		ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
 
+	ufshcd_save_tstamp_of_last_dme_cmd(hba);
 	mutex_unlock(&hba->uic_cmd_mutex);
+	ufshcd_release_all(hba);
+	hba->ufs_stats.clk_rel.ctx = UIC_CMD_SEND;
+
+	ufsdbg_error_inject_dispatcher(hba,
+		ERR_INJECT_UIC, 0, &ret);
 
-	ufshcd_release(hba);
 	return ret;
 }
 
@@ -1035,6 +2595,7 @@
 				cpu_to_le32(lower_32_bits(sg->dma_address));
 			prd_table[i].upper_addr =
 				cpu_to_le32(upper_32_bits(sg->dma_address));
+			prd_table[i].reserved = 0;
 		}
 	} else {
 		lrbp->utr_descriptor_ptr->prd_table_length = 0;
@@ -1085,15 +2646,52 @@
 	ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
 }
 
+static int ufshcd_prepare_crypto_utrd(struct ufs_hba *hba,
+		struct ufshcd_lrb *lrbp)
+{
+	struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
+	u8 cc_index = 0;
+	bool enable = false;
+	u64 dun = 0;
+	int ret;
+
+	/*
+	 * Call vendor specific code to get crypto info for this request:
+	 * enable, crypto config. index, DUN.
+	 * If bypass is set, don't bother setting the other fields.
+	 */
+	ret = ufshcd_vops_crypto_req_setup(hba, lrbp, &cc_index, &enable, &dun);
+	if (ret) {
+		if (ret != -EAGAIN) {
+			dev_err(hba->dev,
+				"%s: failed to setup crypto request (%d)\n",
+				__func__, ret);
+		}
+
+		return ret;
+	}
+
+	if (!enable)
+		goto out;
+
+	req_desc->header.dword_0 |= cc_index | UTRD_CRYPTO_ENABLE;
+	req_desc->header.dword_1 = (u32)(dun & 0xFFFFFFFF);
+	req_desc->header.dword_3 = (u32)((dun >> 32) & 0xFFFFFFFF);
+out:
+	return 0;
+}
+
 /**
  * ufshcd_prepare_req_desc_hdr() - Fills the requests header
  * descriptor according to request
+ * @hba: per adapter instance
  * @lrbp: pointer to local reference block
  * @upiu_flags: flags required in the header
  * @cmd_dir: requests data direction
  */
-static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
-		u32 *upiu_flags, enum dma_data_direction cmd_dir)
+static int ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba,
+	struct ufshcd_lrb *lrbp, u32 *upiu_flags,
+	enum dma_data_direction cmd_dir)
 {
 	struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
 	u32 data_direction;
@@ -1117,7 +2715,8 @@
 
 	/* Transfer request descriptor header fields */
 	req_desc->header.dword_0 = cpu_to_le32(dword_0);
-
+	/* dword_1 is reserved, hence it is set to 0 */
+	req_desc->header.dword_1 = 0;
 	/*
 	 * assigning invalid value for command status. Controller
 	 * updates OCS on command completion, with the command
@@ -1125,6 +2724,15 @@
 	 */
 	req_desc->header.dword_2 =
 		cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+	/* dword_3 is reserved, hence it is set to 0 */
+	req_desc->header.dword_3 = 0;
+
+	req_desc->prd_table_length = 0;
+
+	if (ufshcd_is_crypto_supported(hba))
+		return ufshcd_prepare_crypto_utrd(hba, lrbp);
+
+	return 0;
 }
 
 /**
@@ -1137,6 +2745,7 @@
 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
 {
 	struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
+	unsigned short cdb_len;
 
 	/* command descriptor fields */
 	ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
@@ -1151,8 +2760,12 @@
 	ucd_req_ptr->sc.exp_data_transfer_len =
 		cpu_to_be32(lrbp->cmd->sdb.length);
 
-	memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd,
-		(min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE)));
+	cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
+	memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
+	if (cdb_len < MAX_CDB_SIZE)
+		memset(ucd_req_ptr->sc.cdb + cdb_len, 0,
+		       (MAX_CDB_SIZE - cdb_len));
+	memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
 }
 
 /**
@@ -1189,6 +2802,7 @@
 	if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
 		memcpy(descp, query->descriptor, len);
 
+	memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
 }
 
 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
@@ -1201,6 +2815,11 @@
 	ucd_req_ptr->header.dword_0 =
 		UPIU_HEADER_DWORD(
 			UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
+	/* clear rest of the fields of basic header */
+	ucd_req_ptr->header.dword_1 = 0;
+	ucd_req_ptr->header.dword_2 = 0;
+
+	memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
 }
 
 /**
@@ -1216,15 +2835,16 @@
 	switch (lrbp->command_type) {
 	case UTP_CMD_TYPE_SCSI:
 		if (likely(lrbp->cmd)) {
-			ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
-					lrbp->cmd->sc_data_direction);
+			ret = ufshcd_prepare_req_desc_hdr(hba, lrbp,
+				&upiu_flags, lrbp->cmd->sc_data_direction);
 			ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
 		} else {
 			ret = -EINVAL;
 		}
 		break;
 	case UTP_CMD_TYPE_DEV_MANAGE:
-		ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
+		ret = ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags,
+			DMA_NONE);
 		if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
 			ufshcd_prepare_utp_query_req_upiu(
 					hba, lrbp, upiu_flags);
@@ -1276,6 +2896,61 @@
 }
 
 /**
+ * ufshcd_get_write_lock - synchronize between shutdown, scaling &
+ * arrival of requests
+ * @hba: ufs host
+ *
+ * Lock is predominantly held by shutdown context thus, ensuring
+ * that no requests from any other context may sneak through.
+ */
+static inline void ufshcd_get_write_lock(struct ufs_hba *hba)
+{
+	down_write(&hba->lock);
+}
+
+/**
+ * ufshcd_get_read_lock - synchronize between shutdown, scaling &
+ * arrival of requests
+ * @hba: ufs host
+ *
+ * Returns 1 if acquired, < 0 on contention
+ *
+ * After shutdown's initiated, allow requests only directed to the
+ * well known device lun. The sync between scaling & issue is maintained
+ * as is and this restructuring syncs shutdown with these too.
+ */
+static int ufshcd_get_read_lock(struct ufs_hba *hba, u64 lun)
+{
+	int err = 0;
+
+	err = down_read_trylock(&hba->lock);
+	if (err > 0)
+		goto out;
+	/* let requests for well known device lun to go through */
+	if (ufshcd_scsi_to_upiu_lun(lun) == UFS_UPIU_UFS_DEVICE_WLUN)
+		return 0;
+	else if (!ufshcd_is_shutdown_ongoing(hba))
+		return -EAGAIN;
+	else
+		return -EPERM;
+
+out:
+	return err;
+}
+
+/**
+ * ufshcd_put_read_lock - synchronize between shutdown, scaling &
+ * arrival of requests
+ * @hba: ufs host
+ *
+ * Returns none
+ */
+static inline void ufshcd_put_read_lock(struct ufs_hba *hba)
+{
+	up_read(&hba->lock);
+}
+
+/**
  * ufshcd_queuecommand - main entry point for SCSI requests
  * @cmd: command from SCSI Midlayer
  * @done: call back function
@@ -1289,12 +2964,42 @@
 	unsigned long flags;
 	int tag;
 	int err = 0;
+	bool has_read_lock = false;
 
 	hba = shost_priv(host);
 
+	if (!cmd || !cmd->request || !hba)
+		return -EINVAL;
+
 	tag = cmd->request->tag;
+	if (!ufshcd_valid_tag(hba, tag)) {
+		dev_err(hba->dev,
+			"%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
+			__func__, tag, cmd, cmd->request);
+		BUG();
+	}
+
+	err = ufshcd_get_read_lock(hba, cmd->device->lun);
+	if (unlikely(err < 0)) {
+		if (err == -EPERM) {
+			set_host_byte(cmd, DID_ERROR);
+			cmd->scsi_done(cmd);
+			return 0;
+		}
+		if (err == -EAGAIN)
+			return SCSI_MLQUEUE_HOST_BUSY;
+	} else if (err == 1) {
+		has_read_lock = true;
+	}
 
 	spin_lock_irqsave(hba->host->host_lock, flags);
+
+	/* if error handling is in progress, return host busy */
+	if (ufshcd_eh_in_progress(hba)) {
+		err = SCSI_MLQUEUE_HOST_BUSY;
+		goto out_unlock;
+	}
+
 	switch (hba->ufshcd_state) {
 	case UFSHCD_STATE_OPERATIONAL:
 		break;
@@ -1314,6 +3019,8 @@
 	}
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
+	hba->req_abort_count = 0;
+
 	/* acquire the tag to make sure device cmds don't use it */
 	if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
 		/*
@@ -1326,40 +3033,118 @@
 		goto out;
 	}
 
+	hba->ufs_stats.clk_hold.ctx = QUEUE_CMD;
 	err = ufshcd_hold(hba, true);
 	if (err) {
 		err = SCSI_MLQUEUE_HOST_BUSY;
 		clear_bit_unlock(tag, &hba->lrb_in_use);
 		goto out;
 	}
+	if (ufshcd_is_clkgating_allowed(hba))
+		WARN_ON(hba->clk_gating.state != CLKS_ON);
+
+	err = ufshcd_hibern8_hold(hba, true);
+	if (err) {
+		clear_bit_unlock(tag, &hba->lrb_in_use);
+		err = SCSI_MLQUEUE_HOST_BUSY;
+		hba->ufs_stats.clk_rel.ctx = QUEUE_CMD;
+		ufshcd_release(hba, true);
+		goto out;
+	}
+	if (ufshcd_is_hibern8_on_idle_allowed(hba))
+		WARN_ON(hba->hibern8_on_idle.state != HIBERN8_EXITED);
+
+	/* Vote PM QoS for the request */
+	ufshcd_vops_pm_qos_req_start(hba, cmd->request);
+
+	/* IO svc time latency histogram */
+	if (hba->latency_hist_enabled &&
+	    (cmd->request->cmd_type == REQ_TYPE_FS)) {
+		cmd->request->lat_hist_io_start = ktime_get();
+		cmd->request->lat_hist_enabled = 1;
+	} else {
+		cmd->request->lat_hist_enabled = 0;
+	}
+
 	WARN_ON(hba->clk_gating.state != CLKS_ON);
 
 	lrbp = &hba->lrb[tag];
 
 	WARN_ON(lrbp->cmd);
 	lrbp->cmd = cmd;
-	lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
+	lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
 	lrbp->sense_buffer = cmd->sense_buffer;
 	lrbp->task_tag = tag;
 	lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
 	lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
 	lrbp->command_type = UTP_CMD_TYPE_SCSI;
+	lrbp->req_abort_skip = false;
 
 	/* form UPIU before issuing the command */
-	ufshcd_compose_upiu(hba, lrbp);
+	err = ufshcd_compose_upiu(hba, lrbp);
+	if (err) {
+		if (err != -EAGAIN)
+			dev_err(hba->dev,
+				"%s: failed to compose upiu %d\n",
+				__func__, err);
+
+		lrbp->cmd = NULL;
+		clear_bit_unlock(tag, &hba->lrb_in_use);
+		ufshcd_release_all(hba);
+		ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
+		goto out;
+	}
+
 	err = ufshcd_map_sg(lrbp);
 	if (err) {
 		lrbp->cmd = NULL;
 		clear_bit_unlock(tag, &hba->lrb_in_use);
+		ufshcd_release_all(hba);
+		ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
+		goto out;
+	}
+
+	err = ufshcd_vops_crypto_engine_cfg_start(hba, tag);
+	if (err) {
+		if (err != -EAGAIN)
+			dev_err(hba->dev,
+				"%s: failed to configure crypto engine %d\n",
+				__func__, err);
+
+		scsi_dma_unmap(lrbp->cmd);
+		lrbp->cmd = NULL;
+		clear_bit_unlock(tag, &hba->lrb_in_use);
+		ufshcd_release_all(hba);
+		ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
+
 		goto out;
 	}
 
+	/* Make sure descriptors are ready before ringing the doorbell */
+	wmb();
 	/* issue command to the controller */
 	spin_lock_irqsave(hba->host->host_lock, flags);
-	ufshcd_send_command(hba, tag);
+
+	err = ufshcd_send_command(hba, tag);
+	if (err) {
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		scsi_dma_unmap(lrbp->cmd);
+		lrbp->cmd = NULL;
+		clear_bit_unlock(tag, &hba->lrb_in_use);
+		ufshcd_release_all(hba);
+		ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
+		ufshcd_vops_crypto_engine_cfg_end(hba, lrbp, cmd->request);
+		dev_err(hba->dev, "%s: failed sending command, %d\n",
+							__func__, err);
+		err = DID_ERROR;
+		goto out;
+	}
+
 out_unlock:
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 out:
+	if (has_read_lock)
+		ufshcd_put_read_lock(hba);
 	return err;
 }
 
@@ -1396,7 +3181,7 @@
 	 */
 	err = ufshcd_wait_for_register(hba,
 			REG_UTP_TRANSFER_REQ_DOOR_BELL,
-			mask, ~mask, 1000, 1000);
+			mask, ~mask, 1000, 1000, true);
 
 	return err;
 }
@@ -1423,6 +3208,7 @@
 	int resp;
 	int err = 0;
 
+	hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
 	resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
 
 	switch (resp) {
@@ -1475,11 +3261,22 @@
 
 	if (!time_left) {
 		err = -ETIMEDOUT;
+		dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
+			__func__, lrbp->task_tag);
 		if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
-			/* sucessfully cleared the command, retry if needed */
+			/* successfully cleared the command, retry if needed */
 			err = -EAGAIN;
+		/*
+		 * in case of an error, after clearing the doorbell,
+		 * we also need to clear the outstanding_request
+		 * field in hba
+		 */
+		ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
 	}
 
+	if (err)
+		ufsdbg_set_err_state(hba);
+
 	return err;
 }
 
@@ -1540,6 +3337,15 @@
 	unsigned long flags;
 
 	/*
+	 * May get invoked from shutdown and IOCTL contexts.
+	 * In shutdown context, it comes in with lock acquired.
+	 * In error recovery context, it may come with lock acquired.
+	 */
+
+	if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba))
+		down_read(&hba->lock);
+
+	/*
 	 * Get free slot, sleep if slots are unavailable.
 	 * Even though we use wait_event() which sleeps indefinitely,
 	 * the maximum wait time is bounded by SCSI request timeout.
@@ -1555,15 +3361,23 @@
 
 	hba->dev_cmd.complete = &wait;
 
+	/* Make sure descriptors are ready before ringing the doorbell */
+	wmb();
 	spin_lock_irqsave(hba->host->host_lock, flags);
-	ufshcd_send_command(hba, tag);
+	err = ufshcd_send_command(hba, tag);
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
-
+	if (err) {
+		dev_err(hba->dev, "%s: failed sending command, %d\n",
+							__func__, err);
+		goto out_put_tag;
+	}
 	err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
 
 out_put_tag:
 	ufshcd_put_dev_cmd_tag(hba, tag);
 	wake_up(&hba->dev_cmd.tag_wq);
+	if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba))
+		up_read(&hba->lock);
 	return err;
 }
 
@@ -1581,6 +3395,12 @@
 		struct ufs_query_req **request, struct ufs_query_res **response,
 		enum query_opcode opcode, u8 idn, u8 index, u8 selector)
 {
+	int idn_t = (int)idn;
+
+	ufsdbg_error_inject_dispatcher(hba,
+		ERR_INJECT_QUERY, idn_t, (int *)&idn_t);
+	idn = idn_t;
+
 	*request = &hba->dev_cmd.query.request;
 	*response = &hba->dev_cmd.query.response;
 	memset(*request, 0, sizeof(struct ufs_query_req));
@@ -1589,6 +3409,31 @@
 	(*request)->upiu_req.idn = idn;
 	(*request)->upiu_req.index = index;
 	(*request)->upiu_req.selector = selector;
+
+	ufshcd_update_query_stats(hba, opcode, idn);
+}
+
+static int ufshcd_query_flag_retry(struct ufs_hba *hba,
+	enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
+{
+	int ret;
+	int retries;
+
+	for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
+		ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
+		if (ret)
+			dev_dbg(hba->dev,
+				"%s: failed with error %d, retries %d\n",
+				__func__, ret, retries);
+		else
+			break;
+	}
+
+	if (ret)
+		dev_err(hba->dev,
+			"%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
+			__func__, opcode, idn, ret, retries);
+	return ret;
 }
 
 /**
@@ -1600,16 +3445,17 @@
  *
  * Returns 0 for success, non-zero in case of failure
  */
-static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
+int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
 			enum flag_idn idn, bool *flag_res)
 {
 	struct ufs_query_req *request = NULL;
 	struct ufs_query_res *response = NULL;
 	int err, index = 0, selector = 0;
+	int timeout = QUERY_REQ_TIMEOUT;
 
 	BUG_ON(!hba);
 
-	ufshcd_hold(hba, false);
+	ufshcd_hold_all(hba);
 	mutex_lock(&hba->dev_cmd.lock);
 	ufshcd_init_query(hba, &request, &response, opcode, idn, index,
 			selector);
@@ -1638,12 +3484,12 @@
 		goto out_unlock;
 	}
 
-	err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+	err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
 
 	if (err) {
 		dev_err(hba->dev,
 			"%s: Sending flag query for idn %d failed, err = %d\n",
-			__func__, idn, err);
+			__func__, request->upiu_req.idn, err);
 		goto out_unlock;
 	}
 
@@ -1653,9 +3499,10 @@
 
 out_unlock:
 	mutex_unlock(&hba->dev_cmd.lock);
-	ufshcd_release(hba);
+	ufshcd_release_all(hba);
 	return err;
 }
+EXPORT_SYMBOL(ufshcd_query_flag);
 
 /**
  * ufshcd_query_attr - API function for sending attribute requests
@@ -1668,7 +3515,7 @@
  *
  * Returns 0 for success, non-zero in case of failure
 */
-static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
+int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
 			enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
 {
 	struct ufs_query_req *request = NULL;
@@ -1677,7 +3524,7 @@
 
 	BUG_ON(!hba);
 
-	ufshcd_hold(hba, false);
+	ufshcd_hold_all(hba);
 	if (!attr_val) {
 		dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
 				__func__, opcode);
@@ -1707,8 +3554,9 @@
 	err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
 
 	if (err) {
-		dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
-				__func__, opcode, idn, err);
+		dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
+				__func__, opcode,
+				request->upiu_req.idn, index, err);
 		goto out_unlock;
 	}
 
@@ -1717,25 +3565,49 @@
 out_unlock:
 	mutex_unlock(&hba->dev_cmd.lock);
 out:
-	ufshcd_release(hba);
+	ufshcd_release_all(hba);
 	return err;
 }
+EXPORT_SYMBOL(ufshcd_query_attr);
 
 /**
- * ufshcd_query_descriptor - API function for sending descriptor requests
- * hba: per-adapter instance
- * opcode: attribute opcode
- * idn: attribute idn to access
- * index: index field
- * selector: selector field
- * desc_buf: the buffer that contains the descriptor
- * buf_len: length parameter passed to the device
+ * ufshcd_query_attr_retry() - API function for sending query
+ * attribute with retries
+ * @hba: per-adapter instance
+ * @opcode: attribute opcode
+ * @idn: attribute idn to access
+ * @index: index field
+ * @selector: selector field
+ * @attr_val: the attribute value after the query request
+ * completes
  *
- * Returns 0 for success, non-zero in case of failure.
- * The buf_len parameter will contain, on return, the length parameter
- * received on the response.
+ * Returns 0 for success, non-zero in case of failure
  */
-static int ufshcd_query_descriptor(struct ufs_hba *hba,
+static int ufshcd_query_attr_retry(struct ufs_hba *hba,
+	enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
+	u32 *attr_val)
+{
+	int ret = 0;
+	u32 retries;
+
+	 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+		ret = ufshcd_query_attr(hba, opcode, idn, index,
+						selector, attr_val);
+		if (ret)
+			dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
+				__func__, ret, retries);
+		else
+			break;
+	}
+
+	if (ret)
+		dev_err(hba->dev,
+			"%s: query attribute, idn %d, failed with error %d after %d retires\n",
+			__func__, idn, ret, retries);
+	return ret;
+}
+
+static int __ufshcd_query_descriptor(struct ufs_hba *hba,
 			enum query_opcode opcode, enum desc_idn idn, u8 index,
 			u8 selector, u8 *desc_buf, int *buf_len)
 {
@@ -1745,7 +3617,7 @@
 
 	BUG_ON(!hba);
 
-	ufshcd_hold(hba, false);
+	ufshcd_hold_all(hba);
 	if (!desc_buf) {
 		dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
 				__func__, opcode);
@@ -1784,8 +3656,9 @@
 	err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
 
 	if (err) {
-		dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
-				__func__, opcode, idn, err);
+		dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
+				__func__, opcode,
+				request->upiu_req.idn, index, err);
 		goto out_unlock;
 	}
 
@@ -1795,9 +3668,41 @@
 out_unlock:
 	mutex_unlock(&hba->dev_cmd.lock);
 out:
-	ufshcd_release(hba);
+	ufshcd_release_all(hba);
+	return err;
+}
+
+/**
+ * ufshcd_query_descriptor - API function for sending descriptor requests
+ * hba: per-adapter instance
+ * opcode: attribute opcode
+ * idn: attribute idn to access
+ * index: index field
+ * selector: selector field
+ * desc_buf: the buffer that contains the descriptor
+ * buf_len: length parameter passed to the device
+ *
+ * Returns 0 for success, non-zero in case of failure.
+ * The buf_len parameter will contain, on return, the length parameter
+ * received on the response.
+ */
+int ufshcd_query_descriptor(struct ufs_hba *hba,
+			enum query_opcode opcode, enum desc_idn idn, u8 index,
+			u8 selector, u8 *desc_buf, int *buf_len)
+{
+	int err;
+	int retries;
+
+	for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+		err = __ufshcd_query_descriptor(hba, opcode, idn, index,
+						selector, desc_buf, buf_len);
+		if (!err || err == -EINVAL)
+			break;
+	}
+
 	return err;
 }
+EXPORT_SYMBOL(ufshcd_query_descriptor);
 
 /**
  * ufshcd_read_desc_param - read the specified descriptor parameter
@@ -1845,15 +3750,38 @@
 				      desc_id, desc_index, 0, desc_buf,
 				      &buff_len);
 
-	if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
-	    (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
-	     ufs_query_desc_max_size[desc_id])
-	    || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
-		dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
-			__func__, desc_id, param_offset, buff_len, ret);
-		if (!ret)
+	if (ret) {
+		dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
+			__func__, desc_id, desc_index, param_offset, ret);
+
+		goto out;
+	}
+
+	/* Sanity check */
+	if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
+		dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
+			__func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
 			ret = -EINVAL;
+		goto out;
+	}
 
+	/*
+	 * While reading variable size descriptors (like string descriptor),
+	 * some UFS devices may report the "LENGTH" (field in "Transaction
+	 * Specific fields" of Query Response UPIU) same as what was requested
+	 * in Query Request UPIU instead of reporting the actual size of the
+	 * variable size descriptor.
+	 * Although it's safe to ignore the "LENGTH" field for variable size
+	 * descriptors as we can always derive the length of the descriptor from
+	 * the descriptor header fields. Hence this change impose the length
+	 * match check only for fixed size descriptors (for which we always
+	 * request the correct size as part of Query Request UPIU).
+	 */
+	if ((desc_id != QUERY_DESC_IDN_STRING) &&
+	    (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
+		dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
+			__func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
+		ret = -EINVAL;
 		goto out;
 	}
 
@@ -1881,6 +3809,82 @@
 	return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
 }
 
+int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
+{
+	return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
+}
+
+/**
+ * ufshcd_read_string_desc - read string descriptor
+ * @hba: pointer to adapter instance
+ * @desc_index: descriptor index
+ * @buf: pointer to buffer where descriptor would be read
+ * @size: size of buf
+ * @ascii: if true convert from unicode to ascii characters
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
+				u32 size, bool ascii)
+{
+	int err = 0;
+
+	err = ufshcd_read_desc(hba,
+				QUERY_DESC_IDN_STRING, desc_index, buf, size);
+
+	if (err) {
+		dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
+			__func__, QUERY_REQ_RETRIES, err);
+		goto out;
+	}
+
+	if (ascii) {
+		int desc_len;
+		int ascii_len;
+		int i;
+		char *buff_ascii;
+
+		desc_len = buf[0];
+		/* remove header and divide by 2 to move from UTF16 to UTF8 */
+		ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
+		if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
+			dev_err(hba->dev, "%s: buffer allocated size is too small\n",
+					__func__);
+			err = -ENOMEM;
+			goto out;
+		}
+
+		buff_ascii = kzalloc(ascii_len, GFP_KERNEL);
+		if (!buff_ascii) {
+			dev_err(hba->dev, "%s: Failed allocating %d bytes\n",
+					__func__, ascii_len);
+			err = -ENOMEM;
+			goto out_free_buff;
+		}
+
+		/*
+		 * the descriptor contains string in UTF16 format
+		 * we need to convert to utf-8 so it can be displayed
+		 */
+		utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
+				desc_len - QUERY_DESC_HDR_SIZE,
+				UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
+
+		/* replace non-printable or non-ASCII characters with spaces */
+		for (i = 0; i < ascii_len; i++)
+			ufshcd_remove_non_printable(&buff_ascii[i]);
+
+		memset(buf + QUERY_DESC_HDR_SIZE, 0,
+				size - QUERY_DESC_HDR_SIZE);
+		memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
+		buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
+out_free_buff:
+		kfree(buff_ascii);
+	}
+out:
+	return err;
+}
+
 /**
  * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
  * @hba: Pointer to adapter instance
@@ -1901,7 +3905,7 @@
 	 * Unit descriptors are only available for general purpose LUs (LUN id
 	 * from 0 to 7) and RPMB Well known LU.
 	 */
-	if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
+	if (!ufs_is_valid_unit_desc_lun(lun))
 		return -EOPNOTSUPP;
 
 	return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
@@ -2043,12 +4047,19 @@
 				cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
 
 		hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
+		hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
+				(i * sizeof(struct utp_transfer_req_desc));
 		hba->lrb[i].ucd_req_ptr =
 			(struct utp_upiu_req *)(cmd_descp + i);
+		hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
 		hba->lrb[i].ucd_rsp_ptr =
 			(struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
+		hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
+				response_offset;
 		hba->lrb[i].ucd_prdt_ptr =
 			(struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
+		hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
+				prdt_offset;
 	}
 }
 
@@ -2072,7 +4083,7 @@
 
 	ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
 	if (ret)
-		dev_err(hba->dev,
+		dev_dbg(hba->dev,
 			"dme-link-startup: error code %d\n", ret);
 	return ret;
 }
@@ -2108,6 +4119,13 @@
 	usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
 }
 
+static inline void ufshcd_save_tstamp_of_last_dme_cmd(
+			struct ufs_hba *hba)
+{
+	if (hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS)
+		hba->last_dme_cmd_tstamp = ktime_get();
+}
+
 /**
  * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
  * @hba: per adapter instance
@@ -2128,6 +4146,10 @@
 	};
 	const char *set = action[!!peer];
 	int ret;
+	int retries = UFS_UIC_COMMAND_RETRIES;
+
+	ufsdbg_error_inject_dispatcher(hba,
+		ERR_INJECT_DME_ATTR, attr_sel, &attr_sel);
 
 	uic_cmd.command = peer ?
 		UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
@@ -2135,10 +4157,18 @@
 	uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
 	uic_cmd.argument3 = mib_val;
 
+	do {
+		/* for peer attributes we retry upon failure */
 	ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
 	if (ret)
-		dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
+			dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
 			set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
+	} while (ret && peer && --retries);
+
+	if (ret)
+		dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
+			set, UIC_GET_ATTR_ID(attr_sel), mib_val,
+			UFS_UIC_COMMAND_RETRIES - retries);
 
 	return ret;
 }
@@ -2163,6 +4193,7 @@
 	};
 	const char *get = action[!!peer];
 	int ret;
+	int retries = UFS_UIC_COMMAND_RETRIES;
 	struct ufs_pa_layer_attr orig_pwr_info;
 	struct ufs_pa_layer_attr temp_pwr_info;
 	bool pwr_mode_change = false;
@@ -2191,16 +4222,26 @@
 
 	uic_cmd.command = peer ?
 		UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
+
+	ufsdbg_error_inject_dispatcher(hba,
+		ERR_INJECT_DME_ATTR, attr_sel, &attr_sel);
+
 	uic_cmd.argument1 = attr_sel;
 
+	do {
+		/* for peer attributes we retry upon failure */
 	ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
-	if (ret) {
-		dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n",
+		if (ret)
+			dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
 			get, UIC_GET_ATTR_ID(attr_sel), ret);
-		goto out;
-	}
+	} while (ret && peer && --retries);
+
+	if (ret)
+		dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
+			get, UIC_GET_ATTR_ID(attr_sel),
+			UFS_UIC_COMMAND_RETRIES - retries);
 
-	if (mib_val)
+	if (mib_val && !ret)
 		*mib_val = uic_cmd.argument3;
 
 	if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
@@ -2233,6 +4274,7 @@
 	unsigned long flags;
 	u8 status;
 	int ret;
+	bool reenable_intr = false;
 
 	mutex_lock(&hba->uic_cmd_mutex);
 	init_completion(&uic_async_done);
@@ -2240,15 +4282,17 @@
 
 	spin_lock_irqsave(hba->host->host_lock, flags);
 	hba->uic_async_done = &uic_async_done;
-	ret = __ufshcd_send_uic_cmd(hba, cmd);
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
-	if (ret) {
-		dev_err(hba->dev,
-			"pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
-			cmd->command, cmd->argument3, ret);
-		goto out;
+	if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
+		ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
+		/*
+		 * Make sure UIC command completion interrupt is disabled before
+		 * issuing UIC command.
+		 */
+		wmb();
+		reenable_intr = true;
 	}
-	ret = ufshcd_wait_for_uic_cmd(hba, cmd);
+	ret = __ufshcd_send_uic_cmd(hba, cmd, false);
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
 	if (ret) {
 		dev_err(hba->dev,
 			"pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
@@ -2272,12 +4316,83 @@
 			cmd->command, status);
 		ret = (status != PWR_OK) ? status : -1;
 	}
+	ufshcd_dme_cmd_log(hba, "cmp2", hba->active_uic_cmd->command);
+
 out:
+	if (ret) {
+		ufsdbg_set_err_state(hba);
+		ufshcd_print_host_state(hba);
+		ufshcd_print_pwr_info(hba);
+		ufshcd_print_host_regs(hba);
+		ufshcd_print_cmd_log(hba);
+	}
+
+	ufshcd_save_tstamp_of_last_dme_cmd(hba);
 	spin_lock_irqsave(hba->host->host_lock, flags);
+	hba->active_uic_cmd = NULL;
 	hba->uic_async_done = NULL;
+	if (reenable_intr)
+		ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 	mutex_unlock(&hba->uic_cmd_mutex);
+	return ret;
+}
 
+int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, u64 wait_timeout_us)
+{
+	unsigned long flags;
+	int ret = 0;
+	u32 tm_doorbell;
+	u32 tr_doorbell;
+	bool timeout = false, do_last_check = false;
+	ktime_t start;
+
+	ufshcd_hold_all(hba);
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	/*
+	 * Wait for all the outstanding tasks/transfer requests.
+	 * Verify by checking the doorbell registers are clear.
+	 */
+	start = ktime_get();
+	do {
+		if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
+			ret = -EBUSY;
+			goto out;
+		}
+
+		tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
+		tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+		if (!tm_doorbell && !tr_doorbell) {
+			timeout = false;
+			break;
+		} else if (do_last_check) {
+			break;
+		}
+
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		schedule();
+		if (ktime_to_us(ktime_sub(ktime_get(), start)) >
+		    wait_timeout_us) {
+			timeout = true;
+			/*
+			 * We might have scheduled out for long time so make
+			 * sure to check if doorbells are cleared by this time
+			 * or not.
+			 */
+			do_last_check = true;
+		}
+		spin_lock_irqsave(hba->host->host_lock, flags);
+	} while (tm_doorbell || tr_doorbell);
+
+	if (timeout) {
+		dev_err(hba->dev,
+			"%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
+			__func__, tm_doorbell, tr_doorbell);
+		ret = -EBUSY;
+	}
+out:
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	ufshcd_release_all(hba);
 	return ret;
 }
 
@@ -2307,33 +4422,149 @@
 	uic_cmd.command = UIC_CMD_DME_SET;
 	uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
 	uic_cmd.argument3 = mode;
-	ufshcd_hold(hba, false);
+	hba->ufs_stats.clk_hold.ctx = PWRCTL_CMD_SEND;
+	ufshcd_hold_all(hba);
 	ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
-	ufshcd_release(hba);
-
+	hba->ufs_stats.clk_rel.ctx = PWRCTL_CMD_SEND;
+	ufshcd_release_all(hba);
 out:
 	return ret;
 }
 
-static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
+static int ufshcd_link_recovery(struct ufs_hba *hba)
 {
+	int ret = 0;
+	unsigned long flags;
+
+	/*
+	 * Check if there is any race with fatal error handling.
+	 * If so, wait for it to complete. Even though fatal error
+	 * handling does reset and restore in some cases, don't assume
+	 * anything out of it. We are just avoiding race here.
+	 */
+	do {
+		spin_lock_irqsave(hba->host->host_lock, flags);
+		if (!(work_pending(&hba->eh_work) ||
+				hba->ufshcd_state == UFSHCD_STATE_RESET))
+			break;
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
+		flush_work(&hba->eh_work);
+	} while (1);
+
+
+	/*
+	 * we don't know if previous reset had really reset the host controller
+	 * or not. So let's force reset here to be sure.
+	 */
+	hba->ufshcd_state = UFSHCD_STATE_ERROR;
+	hba->force_host_reset = true;
+	schedule_work(&hba->eh_work);
+
+	/* wait for the reset work to finish */
+	do {
+		if (!(work_pending(&hba->eh_work) ||
+				hba->ufshcd_state == UFSHCD_STATE_RESET))
+			break;
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
+		flush_work(&hba->eh_work);
+		spin_lock_irqsave(hba->host->host_lock, flags);
+	} while (1);
+
+	if (!((hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) &&
+	      ufshcd_is_link_active(hba)))
+		ret = -ENOLINK;
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	return ret;
+}
+
+static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
+{
+	int ret;
 	struct uic_command uic_cmd = {0};
+	ktime_t start = ktime_get();
 
 	uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
+	ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+	trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
+			     ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+
+	/*
+	 * Do full reinit if enter failed or if LINERESET was detected during
+	 * Hibern8 operation. After LINERESET, link moves to default PWM-G1
+	 * mode hence full reinit is required to move link to HS speeds.
+	 */
+	if (ret || hba->full_init_linereset) {
+		int err;
+
+		hba->full_init_linereset = false;
+		ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_ENTER);
+		dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d",
+			__func__, ret);
+		/*
+		 * If link recovery fails then return error code (-ENOLINK)
+		 * returned ufshcd_link_recovery().
+		 * If link recovery succeeds then return -EAGAIN to attempt
+		 * hibern8 enter retry again.
+		 */
+		err = ufshcd_link_recovery(hba);
+		if (err) {
+			dev_err(hba->dev, "%s: link recovery failed", __func__);
+			ret = err;
+		} else {
+			ret = -EAGAIN;
+		}
+	} else {
+		dev_dbg(hba->dev, "%s: Hibern8 Enter at %lld us", __func__,
+			ktime_to_us(ktime_get()));
+	}
+
+	return ret;
+}
 
-	return ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
+{
+	int ret = 0, retries;
+
+	for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
+		ret = __ufshcd_uic_hibern8_enter(hba);
+		if (!ret)
+			goto out;
+		else if (ret != -EAGAIN)
+			/* Unable to recover the link, so no point proceeding */
+			BUG();
+	}
+out:
+	return ret;
 }
 
-static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
+int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
 {
 	struct uic_command uic_cmd = {0};
 	int ret;
+	ktime_t start = ktime_get();
 
 	uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
 	ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+	trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
+			     ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+
+	/* Do full reinit if exit failed */
 	if (ret) {
-		ufshcd_set_link_off(hba);
-		ret = ufshcd_host_reset_and_restore(hba);
+		ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_EXIT);
+		dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d",
+			__func__, ret);
+		ret = ufshcd_link_recovery(hba);
+		/* Unable to recover the link, so no point proceeding */
+		if (ret)
+			BUG();
+	} else {
+		dev_dbg(hba->dev, "%s: Hibern8 Exit at %lld us", __func__,
+			ktime_to_us(ktime_get()));
+		hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
+		hba->ufs_stats.hibern8_exit_cnt++;
 	}
 
 	return ret;
@@ -2366,8 +4597,8 @@
 	if (hba->max_pwr_info.is_valid)
 		return 0;
 
-	pwr_info->pwr_tx = FASTAUTO_MODE;
-	pwr_info->pwr_rx = FASTAUTO_MODE;
+	pwr_info->pwr_tx = FAST_MODE;
+	pwr_info->pwr_rx = FAST_MODE;
 	pwr_info->hs_rate = PA_HS_MODE_B;
 
 	/* Get the connected lane count */
@@ -2398,7 +4629,7 @@
 				__func__, pwr_info->gear_rx);
 			return -EINVAL;
 		}
-		pwr_info->pwr_rx = SLOWAUTO_MODE;
+		pwr_info->pwr_rx = SLOW_MODE;
 	}
 
 	ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
@@ -2411,20 +4642,21 @@
 				__func__, pwr_info->gear_tx);
 			return -EINVAL;
 		}
-		pwr_info->pwr_tx = SLOWAUTO_MODE;
+		pwr_info->pwr_tx = SLOW_MODE;
 	}
 
 	hba->max_pwr_info.is_valid = true;
 	return 0;
 }
 
-static int ufshcd_change_power_mode(struct ufs_hba *hba,
+int ufshcd_change_power_mode(struct ufs_hba *hba,
 			     struct ufs_pa_layer_attr *pwr_mode)
 {
-	int ret;
+	int ret = 0;
 
 	/* if already configured to the requested pwr_mode */
-	if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
+	if (!hba->restore_needed &&
+		pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
 	    pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
 	    pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
 	    pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
@@ -2435,6 +4667,10 @@
 		return 0;
 	}
 
+	ufsdbg_error_inject_dispatcher(hba, ERR_INJECT_PWR_CHANGE, 0, &ret);
+	if (ret)
+		return ret;
+
 	/*
 	 * Configure attributes for power mode change with below.
 	 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
@@ -2466,10 +4702,25 @@
 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
 						pwr_mode->hs_rate);
 
+	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
+			DL_FC0ProtectionTimeOutVal_Default);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
+			DL_TC0ReplayTimeOutVal_Default);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
+			DL_AFC0ReqTimeOutVal_Default);
+
+	ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
+			DL_FC0ProtectionTimeOutVal_Default);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
+			DL_TC0ReplayTimeOutVal_Default);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
+			DL_AFC0ReqTimeOutVal_Default);
+
 	ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
 			| pwr_mode->pwr_tx);
 
 	if (ret) {
+		ufshcd_update_error_stats(hba, UFS_ERR_POWER_MODE_CHANGE);
 		dev_err(hba->dev,
 			"%s: power mode change failed %d\n", __func__, ret);
 	} else {
@@ -2478,6 +4729,7 @@
 
 		memcpy(&hba->pwr_info, pwr_mode,
 			sizeof(struct ufs_pa_layer_attr));
+		hba->ufs_stats.power_mode_change_cnt++;
 	}
 
 	return ret;
@@ -2501,6 +4753,8 @@
 		memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
 
 	ret = ufshcd_change_power_mode(hba, &final_params);
+	if (!ret)
+		ufshcd_print_pwr_info(hba);
 
 	return ret;
 }
@@ -2513,17 +4767,12 @@
  */
 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
 {
-	int i, retries, err = 0;
+	int i;
+	int err;
 	bool flag_res = 1;
 
-	for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
-		/* Set the fDeviceInit flag */
-		err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+	err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
 					QUERY_FLAG_IDN_FDEVICEINIT, NULL);
-		if (!err || err == -ETIMEDOUT)
-			break;
-		dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
-	}
 	if (err) {
 		dev_err(hba->dev,
 			"%s setting fDeviceInit flag failed with error %d\n",
@@ -2531,18 +4780,11 @@
 		goto out;
 	}
 
-	/* poll for max. 100 iterations for fDeviceInit flag to clear */
-	for (i = 0; i < 100 && !err && flag_res; i++) {
-		for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
-			err = ufshcd_query_flag(hba,
-					UPIU_QUERY_OPCODE_READ_FLAG,
+	/* poll for max. 1000 iterations for fDeviceInit flag to clear */
+	for (i = 0; i < 1000 && !err && flag_res; i++)
+		err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
 					QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
-			if (!err || err == -ETIMEDOUT)
-				break;
-			dev_dbg(hba->dev, "%s: error %d retrying\n", __func__,
-					err);
-		}
-	}
+
 	if (err)
 		dev_err(hba->dev,
 			"%s reading fDeviceInit flag failed with error %d\n",
@@ -2563,7 +4805,7 @@
  * To bring UFS host controller to operational state,
  * 1. Enable required interrupts
  * 2. Configure interrupt aggregation
- * 3. Program UTRL and UTMRL base addres
+ * 3. Program UTRL and UTMRL base address
  * 4. Configure run-stop-registers
  *
  * Returns 0 on success, non-zero value on failure
@@ -2593,8 +4835,13 @@
 			REG_UTP_TASK_REQ_LIST_BASE_H);
 
 	/*
+	 * Make sure base address and interrupt setup are updated before
+	 * enabling the run/stop registers below.
+	 */
+	wmb();
+
+	/*
 	 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
-	 * DEI, HEI bits must be 0
 	 */
 	reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
 	if (!(ufshcd_get_lists_status(reg))) {
@@ -2611,6 +4858,23 @@
 }
 
 /**
+ * ufshcd_hba_stop - Send controller to reset state
+ * @hba: per adapter instance
+ * @can_sleep: perform sleep or just spin
+ */
+static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
+{
+	int err;
+
+	ufshcd_writel(hba, CONTROLLER_DISABLE,  REG_CONTROLLER_ENABLE);
+	err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
+					CONTROLLER_ENABLE, CONTROLLER_DISABLE,
+					10, 1, can_sleep);
+	if (err)
+		dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
+}
+
+/**
  * ufshcd_hba_enable - initialize the controller
  * @hba: per adapter instance
  *
@@ -2630,18 +4894,9 @@
 	 * development and testing of this driver. msleep can be changed to
 	 * mdelay and retry count can be reduced based on the controller.
 	 */
-	if (!ufshcd_is_hba_active(hba)) {
-
+	if (!ufshcd_is_hba_active(hba))
 		/* change controller state to "reset state" */
-		ufshcd_hba_stop(hba);
-
-		/*
-		 * This delay is based on the testing done with UFS host
-		 * controller FPGA. The delay can be changed based on the
-		 * host controller used.
-		 */
-		msleep(5);
-	}
+		ufshcd_hba_stop(hba, true);
 
 	/* UniPro link is disabled at this point */
 	ufshcd_set_link_off(hba);
@@ -2715,6 +4970,11 @@
 	return err;
 }
 
+static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
+{
+	return ufshcd_disable_tx_lcc(hba, false);
+}
+
 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
 {
 	return ufshcd_disable_tx_lcc(hba, true);
@@ -2730,14 +4990,26 @@
 {
 	int ret;
 	int retries = DME_LINKSTARTUP_RETRIES;
+	bool link_startup_again = false;
+
+	/*
+	 * If UFS device isn't active then we will have to issue link startup
+	 * 2 times to make sure the device state move to active.
+	 */
+	if (!ufshcd_is_ufs_dev_active(hba))
+		link_startup_again = true;
 
+link_startup:
 	do {
 		ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
 
 		ret = ufshcd_dme_link_startup(hba);
+		if (ret)
+			ufshcd_update_error_stats(hba, UFS_ERR_LINKSTARTUP);
 
 		/* check if device is detected by inter-connect layer */
 		if (!ret && !ufshcd_is_device_present(hba)) {
+			ufshcd_update_error_stats(hba, UFS_ERR_LINKSTARTUP);
 			dev_err(hba->dev, "%s: Device not present\n", __func__);
 			ret = -ENXIO;
 			goto out;
@@ -2756,12 +5028,28 @@
 		/* failed to get the link up... retire */
 		goto out;
 
+	if (link_startup_again) {
+		link_startup_again = false;
+		retries = DME_LINKSTARTUP_RETRIES;
+		goto link_startup;
+	}
+
+	/* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
+	ufshcd_init_pwr_info(hba);
+	ufshcd_print_pwr_info(hba);
+
 	if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
 		ret = ufshcd_disable_device_tx_lcc(hba);
 		if (ret)
 			goto out;
 	}
 
+	if (hba->dev_quirks & UFS_DEVICE_QUIRK_BROKEN_LCC) {
+		ret = ufshcd_disable_host_tx_lcc(hba);
+		if (ret)
+			goto out;
+	}
+
 	/* Include any host controller configuration via UIC commands */
 	ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
 	if (ret)
@@ -2769,8 +5057,12 @@
 
 	ret = ufshcd_make_hba_operational(hba);
 out:
-	if (ret)
+	if (ret) {
 		dev_err(hba->dev, "link startup failed %d\n", ret);
+		ufshcd_print_host_state(hba);
+		ufshcd_print_pwr_info(hba);
+		ufshcd_print_host_regs(hba);
+	}
 	return ret;
 }
 
@@ -2789,7 +5081,7 @@
 	int err = 0;
 	int retries;
 
-	ufshcd_hold(hba, false);
+	ufshcd_hold_all(hba);
 	mutex_lock(&hba->dev_cmd.lock);
 	for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
 		err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
@@ -2801,7 +5093,7 @@
 		dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
 	}
 	mutex_unlock(&hba->dev_cmd.lock);
-	ufshcd_release(hba);
+	ufshcd_release_all(hba);
 
 	if (err)
 		dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
@@ -2923,6 +5215,8 @@
 	/* REPORT SUPPORTED OPERATION CODES is not supported */
 	sdev->no_report_opcodes = 1;
 
+	/* WRITE_SAME command is not supported*/
+	sdev->no_write_same = 1;
 
 	ufshcd_set_queue_depth(sdev);
 
@@ -2958,6 +5252,9 @@
 	blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
 	blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
 
+	sdev->autosuspend_delay = UFSHCD_AUTO_SUSPEND_DELAY_MS;
+	sdev->use_rpm_auto = 1;
+
 	return 0;
 }
 
@@ -3067,6 +5364,7 @@
 	int result = 0;
 	int scsi_status;
 	int ocs;
+	bool print_prdt;
 
 	/* overall command status of utrd */
 	ocs = ufshcd_get_tr_ocs(lrbp);
@@ -3074,7 +5372,7 @@
 	switch (ocs) {
 	case OCS_SUCCESS:
 		result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
-
+		hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
 		switch (result) {
 		case UPIU_TRANSACTION_RESPONSE:
 			/*
@@ -3090,7 +5388,20 @@
 			scsi_status = result & MASK_SCSI_STATUS;
 			result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
 
-			if (ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
+			/*
+			 * Currently we are only supporting BKOPs exception
+			 * events hence we can ignore BKOPs exception event
+			 * during power management callbacks. BKOPs exception
+			 * event is not expected to be raised in runtime suspend
+			 * callback as it allows the urgent bkops.
+			 * During system suspend, we are anyway forcefully
+			 * disabling the bkops and if urgent bkops is needed
+			 * it will be enabled on system resume. Long term
+			 * solution could be to abort the system suspend if
+			 * UFS device needs urgent BKOPs.
+			 */
+			if (!hba->pm_op_in_progress &&
+			    ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
 				schedule_work(&hba->eeh_work);
 			break;
 		case UPIU_TRANSACTION_REJECT_UPIU:
@@ -3119,13 +5430,34 @@
 	case OCS_MISMATCH_RESP_UPIU_SIZE:
 	case OCS_PEER_COMM_FAILURE:
 	case OCS_FATAL_ERROR:
+	case OCS_DEVICE_FATAL_ERROR:
+	case OCS_INVALID_CRYPTO_CONFIG:
+	case OCS_GENERAL_CRYPTO_ERROR:
 	default:
 		result |= DID_ERROR << 16;
 		dev_err(hba->dev,
-		"OCS error from controller = %x\n", ocs);
+				"OCS error from controller = %x for tag %d\n",
+				ocs, lrbp->task_tag);
+		/*
+		 * This is called in interrupt context, hence avoid sleep
+		 * while printing debug registers. Also print only the minimum
+		 * debug registers needed to debug OCS failure.
+		 */
+		__ufshcd_print_host_regs(hba, true);
+		ufshcd_print_host_state(hba);
 		break;
 	} /* end of switch */
 
+	if ((host_byte(result) != DID_OK) && !hba->silence_err_logs) {
+		print_prdt = (ocs == OCS_INVALID_PRDT_ATTR ||
+			ocs == OCS_MISMATCH_DATA_BUF_SIZE);
+		ufshcd_print_trs(hba, 1 << lrbp->task_tag, print_prdt);
+	}
+
+	if ((host_byte(result) == DID_ERROR) ||
+	    (host_byte(result) == DID_ABORT))
+		ufsdbg_set_err_state(hba);
+
 	return result;
 }
 
@@ -3133,65 +5465,180 @@
  * ufshcd_uic_cmd_compl - handle completion of uic command
  * @hba: per adapter instance
  * @intr_status: interrupt status generated by the controller
+ *
+ * Returns
+ *  IRQ_HANDLED - If interrupt is valid
+ *  IRQ_NONE    - If invalid interrupt
  */
-static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
+static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
 {
+	irqreturn_t retval = IRQ_NONE;
+
 	if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
 		hba->active_uic_cmd->argument2 |=
 			ufshcd_get_uic_cmd_result(hba);
 		hba->active_uic_cmd->argument3 =
 			ufshcd_get_dme_attr_val(hba);
 		complete(&hba->active_uic_cmd->done);
+		retval = IRQ_HANDLED;
 	}
 
-	if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
+	if (intr_status & UFSHCD_UIC_PWR_MASK) {
+		if (hba->uic_async_done) {
 		complete(hba->uic_async_done);
+			retval = IRQ_HANDLED;
+		} else if (ufshcd_is_auto_hibern8_supported(hba)) {
+			/*
+			 * If uic_async_done flag is not set then this
+			 * is an Auto hibern8 err interrupt.
+			 * Perform a host reset followed by a full
+			 * link recovery.
+			 */
+			hba->ufshcd_state = UFSHCD_STATE_ERROR;
+			hba->force_host_reset = true;
+			dev_err(hba->dev, "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
+				__func__, (intr_status & UIC_HIBERNATE_ENTER) ?
+				"Enter" : "Exit",
+				intr_status, ufshcd_get_upmcrs(hba));
+			__ufshcd_print_host_regs(hba, true);
+			ufshcd_print_host_state(hba);
+			schedule_work(&hba->eh_work);
+			retval = IRQ_HANDLED;
+		}
+	}
+	return retval;
 }
 
 /**
- * ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * ufshcd_abort_outstanding_requests - abort all outstanding transfer requests.
  * @hba: per adapter instance
+ * @result: error result to inform scsi layer about
  */
-static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
+void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result)
 {
+	u8 index;
 	struct ufshcd_lrb *lrbp;
 	struct scsi_cmnd *cmd;
-	unsigned long completed_reqs;
-	u32 tr_doorbell;
-	int result;
-	int index;
 
-	/* Resetting interrupt aggregation counters first and reading the
-	 * DOOR_BELL afterward allows us to handle all the completed requests.
-	 * In order to prevent other interrupts starvation the DB is read once
-	 * after reset. The down side of this solution is the possibility of
-	 * false interrupt if device completes another request after resetting
-	 * aggregation and before reading the DB.
-	 */
-	if (ufshcd_is_intr_aggr_allowed(hba))
-		ufshcd_reset_intr_aggr(hba);
+	if (!hba->outstanding_reqs)
+		return;
 
-	tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
-	completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
+	for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
+		lrbp = &hba->lrb[index];
+		cmd = lrbp->cmd;
+		if (cmd) {
+			ufshcd_cond_add_cmd_trace(hba, index, "failed");
+			ufshcd_update_error_stats(hba,
+					UFS_ERR_INT_FATAL_ERRORS);
+			scsi_dma_unmap(cmd);
+			cmd->result = result;
+			/* Clear pending transfer requests */
+			ufshcd_clear_cmd(hba, index);
+			ufshcd_outstanding_req_clear(hba, index);
+			clear_bit_unlock(index, &hba->lrb_in_use);
+			lrbp->complete_time_stamp = ktime_get();
+			update_req_stats(hba, lrbp);
+			/* Mark completed command as NULL in LRB */
+			lrbp->cmd = NULL;
+			ufshcd_release_all(hba);
+			if (cmd->request) {
+				/*
+				 * As we are accessing the "request" structure,
+				 * this must be called before calling
+				 * ->scsi_done() callback.
+				 */
+				ufshcd_vops_pm_qos_req_end(hba, cmd->request,
+					true);
+				ufshcd_vops_crypto_engine_cfg_end(hba,
+						lrbp, cmd->request);
+			}
+			/* Do not touch lrbp after scsi done */
+			cmd->scsi_done(cmd);
+		} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
+			if (hba->dev_cmd.complete) {
+				ufshcd_cond_add_cmd_trace(hba, index,
+							"dev_failed");
+				ufshcd_outstanding_req_clear(hba, index);
+				complete(hba->dev_cmd.complete);
+			}
+		}
+		if (ufshcd_is_clkscaling_supported(hba))
+			hba->clk_scaling.active_reqs--;
+	}
+}
+
+/**
+ * __ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * @hba: per adapter instance
+ * @completed_reqs: requests to complete
+ */
+static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
+					unsigned long completed_reqs)
+{
+	struct ufshcd_lrb *lrbp;
+	struct scsi_cmnd *cmd;
+	int result;
+	int index;
+	struct request *req;
 
 	for_each_set_bit(index, &completed_reqs, hba->nutrs) {
 		lrbp = &hba->lrb[index];
 		cmd = lrbp->cmd;
 		if (cmd) {
+			ufshcd_cond_add_cmd_trace(hba, index, "complete");
+			ufshcd_update_tag_stats_completion(hba, cmd);
 			result = ufshcd_transfer_rsp_status(hba, lrbp);
 			scsi_dma_unmap(cmd);
 			cmd->result = result;
+			clear_bit_unlock(index, &hba->lrb_in_use);
+			lrbp->complete_time_stamp = ktime_get();
+			update_req_stats(hba, lrbp);
 			/* Mark completed command as NULL in LRB */
 			lrbp->cmd = NULL;
+			hba->ufs_stats.clk_rel.ctx = XFR_REQ_COMPL;
+			__ufshcd_release(hba, false);
+			__ufshcd_hibern8_release(hba, false);
+			if (cmd->request) {
+				/*
+				 * As we are accessing the "request" structure,
+				 * this must be called before calling
+				 * ->scsi_done() callback.
+				 */
+				ufshcd_vops_pm_qos_req_end(hba, cmd->request,
+					false);
+				ufshcd_vops_crypto_engine_cfg_end(hba,
+					lrbp, cmd->request);
+			}
+
 			clear_bit_unlock(index, &hba->lrb_in_use);
+			req = cmd->request;
+			if (req) {
+				/* Update IO svc time latency histogram */
+				if (req->lat_hist_enabled) {
+					ktime_t completion;
+					u_int64_t delta_us;
+
+					completion = ktime_get();
+					delta_us = ktime_us_delta(completion,
+						  req->lat_hist_io_start);
+					blk_update_latency_hist(
+						(rq_data_dir(req) == READ) ?
+						&hba->io_lat_read :
+						&hba->io_lat_write, delta_us);
+				}
+			}
 			/* Do not touch lrbp after scsi done */
 			cmd->scsi_done(cmd);
-			__ufshcd_release(hba);
 		} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
-			if (hba->dev_cmd.complete)
+			if (hba->dev_cmd.complete) {
+				ufshcd_cond_add_cmd_trace(hba, index,
+						"dcmp");
 				complete(hba->dev_cmd.complete);
 		}
 	}
+		if (ufshcd_is_clkscaling_supported(hba))
+			hba->clk_scaling.active_reqs--;
+	}
 
 	/* clear corresponding bits of completed commands */
 	hba->outstanding_reqs ^= completed_reqs;
@@ -3203,6 +5650,40 @@
 }
 
 /**
+ * ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * @hba: per adapter instance
+ *
+ * Returns
+ *  IRQ_HANDLED - If interrupt is valid
+ *  IRQ_NONE    - If invalid interrupt
+ */
+static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
+{
+	unsigned long completed_reqs;
+	u32 tr_doorbell;
+
+	/* Resetting interrupt aggregation counters first and reading the
+	 * DOOR_BELL afterward allows us to handle all the completed requests.
+	 * In order to prevent other interrupts starvation the DB is read once
+	 * after reset. The down side of this solution is the possibility of
+	 * false interrupt if device completes another request after resetting
+	 * aggregation and before reading the DB.
+	 */
+	if (ufshcd_is_intr_aggr_allowed(hba))
+		ufshcd_reset_intr_aggr(hba);
+
+	tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+	completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
+
+	if (completed_reqs) {
+		__ufshcd_transfer_req_compl(hba, completed_reqs);
+		return IRQ_HANDLED;
+	} else {
+		return IRQ_NONE;
+	}
+}
+
+/**
  * ufshcd_disable_ee - disable exception event
  * @hba: per-adapter instance
  * @mask: exception event to disable
@@ -3222,7 +5703,7 @@
 
 	val = hba->ee_ctrl_mask & ~mask;
 	val &= 0xFFFF; /* 2 bytes */
-	err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+	err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
 			QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
 	if (!err)
 		hba->ee_ctrl_mask &= ~mask;
@@ -3250,7 +5731,7 @@
 
 	val = hba->ee_ctrl_mask | mask;
 	val &= 0xFFFF; /* 2 bytes */
-	err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+	err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
 			QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
 	if (!err)
 		hba->ee_ctrl_mask |= mask;
@@ -3276,7 +5757,7 @@
 	if (hba->auto_bkops_enabled)
 		goto out;
 
-	err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+	err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
 			QUERY_FLAG_IDN_BKOPS_EN, NULL);
 	if (err) {
 		dev_err(hba->dev, "%s: failed to enable bkops %d\n",
@@ -3285,6 +5766,7 @@
 	}
 
 	hba->auto_bkops_enabled = true;
+	trace_ufshcd_auto_bkops_state(dev_name(hba->dev), 1);
 
 	/* No need of URGENT_BKOPS exception from the device */
 	err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
@@ -3325,7 +5807,7 @@
 		goto out;
 	}
 
-	err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
+	err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
 			QUERY_FLAG_IDN_BKOPS_EN, NULL);
 	if (err) {
 		dev_err(hba->dev, "%s: failed to disable bkops %d\n",
@@ -3335,6 +5817,7 @@
 	}
 
 	hba->auto_bkops_enabled = false;
+	trace_ufshcd_auto_bkops_state(dev_name(hba->dev), 0);
 out:
 	return err;
 }
@@ -3363,7 +5846,7 @@
 
 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
 {
-	return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+	return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
 			QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
 }
 
@@ -3421,15 +5904,52 @@
  */
 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
 {
-	return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT);
+	return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
 }
 
 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
 {
-	return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+	return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
 			QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
 }
 
+static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
+{
+	int err;
+	u32 curr_status = 0;
+
+	if (hba->is_urgent_bkops_lvl_checked)
+		goto enable_auto_bkops;
+
+	err = ufshcd_get_bkops_status(hba, &curr_status);
+	if (err) {
+		dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
+				__func__, err);
+		goto out;
+	}
+
+	/*
+	 * We are seeing that some devices are raising the urgent bkops
+	 * exception events even when BKOPS status doesn't indicate performace
+	 * impacted or critical. Handle these device by determining their urgent
+	 * bkops status at runtime.
+	 */
+	if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
+		dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
+				__func__, curr_status);
+		/* update the current status as the urgent bkops level */
+		hba->urgent_bkops_lvl = curr_status;
+		hba->is_urgent_bkops_lvl_checked = true;
+	}
+
+enable_auto_bkops:
+	err = ufshcd_enable_auto_bkops(hba);
+out:
+	if (err < 0)
+		dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
+				__func__, err);
+}
+
 /**
  * ufshcd_exception_event_handler - handle exceptions raised by device
  * @work: pointer to work data
@@ -3445,6 +5965,7 @@
 	hba = container_of(work, struct ufs_hba, eeh_work);
 
 	pm_runtime_get_sync(hba->dev);
+	ufshcd_scsi_block_requests(hba);
 	err = ufshcd_get_ee_status(hba, &status);
 	if (err) {
 		dev_err(hba->dev, "%s: failed to get exception status %d\n",
@@ -3453,17 +5974,117 @@
 	}
 
 	status &= hba->ee_ctrl_mask;
-	if (status & MASK_EE_URGENT_BKOPS) {
-		err = ufshcd_urgent_bkops(hba);
-		if (err < 0)
-			dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
-					__func__, err);
-	}
+
+	if (status & MASK_EE_URGENT_BKOPS)
+		ufshcd_bkops_exception_event_handler(hba);
+
 out:
-	pm_runtime_put_sync(hba->dev);
+	ufshcd_scsi_unblock_requests(hba);
+	pm_runtime_put(hba->dev);
 	return;
 }
 
+/* Complete requests that have door-bell cleared */
+static void ufshcd_complete_requests(struct ufs_hba *hba)
+{
+	ufshcd_transfer_req_compl(hba);
+	ufshcd_tmc_handler(hba);
+}
+
+/**
+ * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
+ *				to recover from the DL NAC errors or not.
+ * @hba: per-adapter instance
+ *
+ * Returns true if error handling is required, false otherwise
+ */
+static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
+{
+	unsigned long flags;
+	bool err_handling = true;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	/*
+	 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
+	 * device fatal error and/or DL NAC & REPLAY timeout errors.
+	 */
+	if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
+		goto out;
+
+	if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
+	    ((hba->saved_err & UIC_ERROR) &&
+	     (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR))) {
+		/*
+		 * we have to do error recovery but atleast silence the error
+		 * logs.
+		 */
+		hba->silence_err_logs = true;
+		goto out;
+	}
+
+	if ((hba->saved_err & UIC_ERROR) &&
+	    (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
+		int err;
+		/*
+		 * wait for 50ms to see if we can get any other errors or not.
+		 */
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		msleep(50);
+		spin_lock_irqsave(hba->host->host_lock, flags);
+
+		/*
+		 * now check if we have got any other severe errors other than
+		 * DL NAC error?
+		 */
+		if ((hba->saved_err & INT_FATAL_ERRORS) ||
+		    ((hba->saved_err & UIC_ERROR) &&
+		    (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))) {
+			if (((hba->saved_err & INT_FATAL_ERRORS) ==
+				DEVICE_FATAL_ERROR) || (hba->saved_uic_err &
+					~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))
+				hba->silence_err_logs = true;
+			goto out;
+		}
+
+		/*
+		 * As DL NAC is the only error received so far, send out NOP
+		 * command to confirm if link is still active or not.
+		 *   - If we don't get any response then do error recovery.
+		 *   - If we get response then clear the DL NAC error bit.
+		 */
+
+		/* silence the error logs from NOP command */
+		hba->silence_err_logs = true;
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		err = ufshcd_verify_dev_init(hba);
+		spin_lock_irqsave(hba->host->host_lock, flags);
+		hba->silence_err_logs = false;
+
+		if (err) {
+			hba->silence_err_logs = true;
+			goto out;
+		}
+
+		/* Link seems to be alive hence ignore the DL NAC errors */
+		if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
+			hba->saved_err &= ~UIC_ERROR;
+		/* clear NAC error */
+		hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
+		if (!hba->saved_uic_err) {
+			err_handling = false;
+			goto out;
+		}
+		/*
+		 * there seems to be some errors other than NAC, so do error
+		 * recovery
+		 */
+		hba->silence_err_logs = true;
+	}
+out:
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	return err_handling;
+}
+
 /**
  * ufshcd_err_handler - handle UFS errors that require s/w attention
  * @work: pointer to work structure
@@ -3472,51 +6093,149 @@
 {
 	struct ufs_hba *hba;
 	unsigned long flags;
-	u32 err_xfer = 0;
-	u32 err_tm = 0;
+	bool err_xfer = false, err_tm = false;
 	int err = 0;
 	int tag;
+	bool needs_reset = false;
+	bool clks_enabled = false;
 
 	hba = container_of(work, struct ufs_hba, eh_work);
 
-	pm_runtime_get_sync(hba->dev);
-	ufshcd_hold(hba, false);
-
 	spin_lock_irqsave(hba->host->host_lock, flags);
-	if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
-		spin_unlock_irqrestore(hba->host->host_lock, flags);
+	ufsdbg_set_err_state(hba);
+
+	if (hba->ufshcd_state == UFSHCD_STATE_RESET)
 		goto out;
+
+	/*
+	 * Make sure the clocks are ON before we proceed with err
+	 * handling. For the majority of cases err handler would be
+	 * run with clocks ON. There is a possibility that the err
+	 * handler was scheduled due to auto hibern8 error interrupt,
+	 * in which case the clocks could be gated or be in the
+	 * process of gating when the err handler runs.
+	 */
+	if (unlikely((hba->clk_gating.state != CLKS_ON) &&
+	    ufshcd_is_auto_hibern8_supported(hba))) {
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		hba->ufs_stats.clk_hold.ctx = ERR_HNDLR_WORK;
+		ufshcd_hold(hba, false);
+		spin_lock_irqsave(hba->host->host_lock, flags);
+		clks_enabled = true;
 	}
 
 	hba->ufshcd_state = UFSHCD_STATE_RESET;
 	ufshcd_set_eh_in_progress(hba);
 
 	/* Complete requests that have door-bell cleared by h/w */
-	ufshcd_transfer_req_compl(hba);
-	ufshcd_tmc_handler(hba);
+	ufshcd_complete_requests(hba);
+
+	if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
+		bool ret;
+
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		/* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
+		ret = ufshcd_quirk_dl_nac_errors(hba);
+		spin_lock_irqsave(hba->host->host_lock, flags);
+		if (!ret)
+			goto skip_err_handling;
+	}
+
+	/*
+	 * Dump controller state before resetting. Transfer requests state
+	 * will be dump as part of the request completion.
+	 */
+	if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
+		dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x",
+			__func__, hba->saved_err, hba->saved_uic_err);
+		if (!hba->silence_err_logs) {
+			/* release lock as print host regs sleeps */
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
+			ufshcd_print_host_regs(hba);
+			ufshcd_print_host_state(hba);
+			ufshcd_print_pwr_info(hba);
+			ufshcd_print_tmrs(hba, hba->outstanding_tasks);
+			ufshcd_print_cmd_log(hba);
+			spin_lock_irqsave(hba->host->host_lock, flags);
+		}
+	}
+
+	if ((hba->saved_err & INT_FATAL_ERRORS)
+	    || hba->saved_ce_err || hba->force_host_reset ||
+	    ((hba->saved_err & UIC_ERROR) &&
+	    (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
+				   UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
+				   UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
+		needs_reset = true;
+
+	/*
+	 * if host reset is required then skip clearing the pending
+	 * transfers forcefully because they will automatically get
+	 * cleared after link startup.
+	 */
+	if (needs_reset)
+		goto skip_pending_xfer_clear;
 
+	/* release lock as clear command might sleep */
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
 	/* Clear pending transfer requests */
-	for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs)
-		if (ufshcd_clear_cmd(hba, tag))
-			err_xfer |= 1 << tag;
+	for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
+		if (ufshcd_clear_cmd(hba, tag)) {
+			err_xfer = true;
+			goto lock_skip_pending_xfer_clear;
+		}
+	}
 
 	/* Clear pending task management requests */
-	for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs)
-		if (ufshcd_clear_tm_cmd(hba, tag))
-			err_tm |= 1 << tag;
+	for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
+		if (ufshcd_clear_tm_cmd(hba, tag)) {
+			err_tm = true;
+			goto lock_skip_pending_xfer_clear;
+		}
+	}
 
-	/* Complete the requests that are cleared by s/w */
+lock_skip_pending_xfer_clear:
 	spin_lock_irqsave(hba->host->host_lock, flags);
-	ufshcd_transfer_req_compl(hba);
-	ufshcd_tmc_handler(hba);
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
+	/* Complete the requests that are cleared by s/w */
+	ufshcd_complete_requests(hba);
+
+	if (err_xfer || err_tm)
+		needs_reset = true;
+
+skip_pending_xfer_clear:
 	/* Fatal errors need reset */
-	if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) ||
-			((hba->saved_err & UIC_ERROR) &&
-			 (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) {
+	if (needs_reset) {
+		unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
+
+		if (hba->saved_err & INT_FATAL_ERRORS)
+			ufshcd_update_error_stats(hba,
+						  UFS_ERR_INT_FATAL_ERRORS);
+		if (hba->saved_ce_err)
+			ufshcd_update_error_stats(hba, UFS_ERR_CRYPTO_ENGINE);
+
+		if (hba->saved_err & UIC_ERROR)
+			ufshcd_update_error_stats(hba,
+						  UFS_ERR_INT_UIC_ERROR);
+
+		if (err_xfer || err_tm)
+			ufshcd_update_error_stats(hba,
+						  UFS_ERR_CLEAR_PEND_XFER_TM);
+
+		/*
+		 * ufshcd_reset_and_restore() does the link reinitialization
+		 * which will need atleast one empty doorbell slot to send the
+		 * device management commands (NOP and query commands).
+		 * If there is no slot empty at this moment then free up last
+		 * slot forcefully.
+		 */
+		if (hba->outstanding_reqs == max_doorbells)
+			__ufshcd_transfer_req_compl(hba,
+						    (1UL << (hba->nutrs - 1)));
+
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
 		err = ufshcd_reset_and_restore(hba);
+		spin_lock_irqsave(hba->host->host_lock, flags);
 		if (err) {
 			dev_err(hba->dev, "%s: reset and restore failed\n",
 					__func__);
@@ -3529,76 +6248,223 @@
 		scsi_report_bus_reset(hba->host, 0);
 		hba->saved_err = 0;
 		hba->saved_uic_err = 0;
+		hba->saved_ce_err = 0;
+		hba->force_host_reset = false;
+	}
+
+skip_err_handling:
+	if (!needs_reset) {
+		hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+		if (hba->saved_err || hba->saved_uic_err)
+			dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
+			    __func__, hba->saved_err, hba->saved_uic_err);
+	}
+
+	hba->silence_err_logs = false;
+
+	if (clks_enabled) {
+		__ufshcd_release(hba, false);
+		hba->ufs_stats.clk_rel.ctx = ERR_HNDLR_WORK;
 	}
+out:
 	ufshcd_clear_eh_in_progress(hba);
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
+		u32 reg)
+{
+	reg_hist->reg[reg_hist->pos] = reg;
+	reg_hist->tstamp[reg_hist->pos] = ktime_get();
+	reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
+}
+
+static void ufshcd_rls_handler(struct work_struct *work)
+{
+	struct ufs_hba *hba;
+	int ret = 0;
+	u32 mode;
+
+	hba = container_of(work, struct ufs_hba, rls_work);
+	ufshcd_scsi_block_requests(hba);
+	pm_runtime_get_sync(hba->dev);
+	ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
+	if (ret) {
+		dev_err(hba->dev,
+			"Timed out (%d) waiting for DB to clear\n",
+			ret);
+		goto out;
+	}
+
+	ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
+	if (hba->pwr_info.pwr_rx != ((mode >> PWR_RX_OFFSET) & PWR_INFO_MASK))
+		hba->restore_needed = true;
+
+	if (hba->pwr_info.pwr_tx != (mode & PWR_INFO_MASK))
+		hba->restore_needed = true;
+
+	ufshcd_dme_get(hba, UIC_ARG_MIB(PA_RXGEAR), &mode);
+	if (hba->pwr_info.gear_rx != mode)
+		hba->restore_needed = true;
+
+	ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TXGEAR), &mode);
+	if (hba->pwr_info.gear_tx != mode)
+		hba->restore_needed = true;
+
+	if (hba->restore_needed)
+		ret = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
+
+	if (ret)
+		dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
+			__func__, ret);
+	else
+		hba->restore_needed = false;
 
 out:
-	scsi_unblock_requests(hba->host);
-	ufshcd_release(hba);
+	ufshcd_scsi_unblock_requests(hba);
 	pm_runtime_put_sync(hba->dev);
 }
 
 /**
  * ufshcd_update_uic_error - check and set fatal UIC error flags.
  * @hba: per-adapter instance
+ *
+ * Returns
+ *  IRQ_HANDLED - If interrupt is valid
+ *  IRQ_NONE    - If invalid interrupt
  */
-static void ufshcd_update_uic_error(struct ufs_hba *hba)
+static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
 {
 	u32 reg;
+	irqreturn_t retval = IRQ_NONE;
+
+	/* PHY layer lane error */
+	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
+	if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
+	    (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
+		/*
+		 * To know whether this error is fatal or not, DB timeout
+		 * must be checked but this error is handled separately.
+		 */
+		dev_dbg(hba->dev, "%s: UIC Lane error reported, reg 0x%x\n",
+				__func__, reg);
+		ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
+
+		/*
+		 * Don't ignore LINERESET indication during hibern8
+		 * enter operation.
+		 */
+		if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
+			struct uic_command *cmd = hba->active_uic_cmd;
+
+			if (cmd) {
+				if (cmd->command == UIC_CMD_DME_HIBER_ENTER) {
+					dev_err(hba->dev, "%s: LINERESET during hibern8 enter, reg 0x%x\n",
+						__func__, reg);
+					hba->full_init_linereset = true;
+				}
+			}
+			if (!hba->full_init_linereset)
+				schedule_work(&hba->rls_work);
+		}
+		retval |= IRQ_HANDLED;
+	}
 
 	/* PA_INIT_ERROR is fatal and needs UIC reset */
 	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
-	if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
+	if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
+	    (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
+		ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
+
+		if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) {
 		hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
+		} else if (hba->dev_quirks &
+			   UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
+			if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
+				hba->uic_error |=
+					UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
+			else if (reg &
+				 UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
+				hba->uic_error |=
+					UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
+		}
+		retval |= IRQ_HANDLED;
+	}
 
 	/* UIC NL/TL/DME errors needs software retry */
 	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
-	if (reg)
+	if ((reg & UIC_NETWORK_LAYER_ERROR) &&
+	    (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
+		ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
 		hba->uic_error |= UFSHCD_UIC_NL_ERROR;
+		retval |= IRQ_HANDLED;
+	}
 
 	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
-	if (reg)
+	if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
+	    (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
+		ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
 		hba->uic_error |= UFSHCD_UIC_TL_ERROR;
+		retval |= IRQ_HANDLED;
+	}
 
 	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
-	if (reg)
+	if ((reg & UIC_DME_ERROR) &&
+	    (reg & UIC_DME_ERROR_CODE_MASK)) {
+		ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
 		hba->uic_error |= UFSHCD_UIC_DME_ERROR;
+		retval |= IRQ_HANDLED;
+	}
 
 	dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
 			__func__, hba->uic_error);
+	return retval;
 }
 
 /**
  * ufshcd_check_errors - Check for errors that need s/w attention
  * @hba: per-adapter instance
+ *
+ * Returns
+ *  IRQ_HANDLED - If interrupt is valid
+ *  IRQ_NONE    - If invalid interrupt
  */
-static void ufshcd_check_errors(struct ufs_hba *hba)
+static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
 {
 	bool queue_eh_work = false;
+	irqreturn_t retval = IRQ_NONE;
 
-	if (hba->errors & INT_FATAL_ERRORS)
+	if (hba->errors & INT_FATAL_ERRORS || hba->ce_error)
 		queue_eh_work = true;
 
 	if (hba->errors & UIC_ERROR) {
 		hba->uic_error = 0;
-		ufshcd_update_uic_error(hba);
+		retval = ufshcd_update_uic_error(hba);
 		if (hba->uic_error)
 			queue_eh_work = true;
 	}
 
 	if (queue_eh_work) {
-		/* handle fatal errors only when link is functional */
-		if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
-			/* block commands from scsi mid-layer */
-			scsi_block_requests(hba->host);
-
-			/* transfer error masks to sticky bits */
+		/*
+		 * update the transfer error masks to sticky bits, let's do this
+		 * irrespective of current ufshcd_state.
+		 */
 			hba->saved_err |= hba->errors;
 			hba->saved_uic_err |= hba->uic_error;
+		hba->saved_ce_err |= hba->ce_error;
+
+		/* handle fatal errors only when link is functional */
+		if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
+			/*
+			 * Set error handling in progress flag early so that we
+			 * don't issue new requests any more.
+			 */
+			ufshcd_set_eh_in_progress(hba);
 
 			hba->ufshcd_state = UFSHCD_STATE_ERROR;
 			schedule_work(&hba->eh_work);
 		}
+		retval |= IRQ_HANDLED;
 	}
 	/*
 	 * if (!queue_eh_work) -
@@ -3606,40 +6472,63 @@
 	 * itself without s/w intervention or errors that will be
 	 * handled by the SCSI core layer.
 	 */
+	return retval;
 }
 
 /**
  * ufshcd_tmc_handler - handle task management function completion
  * @hba: per adapter instance
+ *
+ * Returns
+ *  IRQ_HANDLED - If interrupt is valid
+ *  IRQ_NONE    - If invalid interrupt
  */
-static void ufshcd_tmc_handler(struct ufs_hba *hba)
+static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
 {
 	u32 tm_doorbell;
 
 	tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
 	hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
+	if (hba->tm_condition) {
 	wake_up(&hba->tm_wq);
+		return IRQ_HANDLED;
+	} else {
+		return IRQ_NONE;
+	}
 }
 
 /**
  * ufshcd_sl_intr - Interrupt service routine
  * @hba: per adapter instance
  * @intr_status: contains interrupts generated by the controller
+ *
+ * Returns
+ *  IRQ_HANDLED - If interrupt is valid
+ *  IRQ_NONE    - If invalid interrupt
  */
-static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
+static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
 {
+	irqreturn_t retval = IRQ_NONE;
+
+	ufsdbg_error_inject_dispatcher(hba,
+		ERR_INJECT_INTR, intr_status, &intr_status);
+
+	ufshcd_vops_crypto_engine_get_status(hba, &hba->ce_error);
+
 	hba->errors = UFSHCD_ERROR_MASK & intr_status;
-	if (hba->errors)
-		ufshcd_check_errors(hba);
+	if (hba->errors || hba->ce_error)
+		retval |= ufshcd_check_errors(hba);
 
 	if (intr_status & UFSHCD_UIC_MASK)
-		ufshcd_uic_cmd_compl(hba, intr_status);
+		retval |= ufshcd_uic_cmd_compl(hba, intr_status);
 
 	if (intr_status & UTP_TASK_REQ_COMPL)
-		ufshcd_tmc_handler(hba);
+		retval |= ufshcd_tmc_handler(hba);
 
 	if (intr_status & UTP_TRANSFER_REQ_COMPL)
-		ufshcd_transfer_req_compl(hba);
+		retval |= ufshcd_transfer_req_compl(hba);
+
+	return retval;
 }
 
 /**
@@ -3647,23 +6536,45 @@
  * @irq: irq number
  * @__hba: pointer to adapter instance
  *
- * Returns IRQ_HANDLED - If interrupt is valid
+ * Returns
+ *  IRQ_HANDLED - If interrupt is valid
  *		IRQ_NONE - If invalid interrupt
  */
 static irqreturn_t ufshcd_intr(int irq, void *__hba)
 {
-	u32 intr_status;
+	u32 intr_status, enabled_intr_status;
 	irqreturn_t retval = IRQ_NONE;
 	struct ufs_hba *hba = __hba;
+	int retries = hba->nutrs;
 
 	spin_lock(hba->host->host_lock);
 	intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
-
-	if (intr_status) {
+	hba->ufs_stats.last_intr_status = intr_status;
+	hba->ufs_stats.last_intr_ts = ktime_get();
+	/*
+	 * There could be max of hba->nutrs reqs in flight and in worst case
+	 * if the reqs get finished 1 by 1 after the interrupt status is
+	 * read, make sure we handle them by checking the interrupt status
+	 * again in a loop until we process all of the reqs before returning.
+	 */
+	do {
+		enabled_intr_status =
+			intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+		if (intr_status)
 		ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
-		ufshcd_sl_intr(hba, intr_status);
-		retval = IRQ_HANDLED;
+		if (enabled_intr_status)
+			retval |= ufshcd_sl_intr(hba, enabled_intr_status);
+
+		intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+	} while (intr_status && --retries);
+
+	if (retval == IRQ_NONE) {
+		dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
+					__func__, intr_status);
+		ufshcd_hex_dump("host regs: ", hba->mmio_base,
+					UFSHCI_REG_SPACE_SIZE);
 	}
+
 	spin_unlock(hba->host->host_lock);
 	return retval;
 }
@@ -3684,7 +6595,7 @@
 	/* poll for max. 1 sec to clear door bell register by h/w */
 	err = ufshcd_wait_for_register(hba,
 			REG_UTP_TASK_REQ_DOOR_BELL,
-			mask, 0, 1000, 1000);
+			mask, 0, 1000, 1000, true);
 out:
 	return err;
 }
@@ -3718,7 +6629,8 @@
 	 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
 	 */
 	wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
-	ufshcd_hold(hba, false);
+	hba->ufs_stats.clk_hold.ctx = TM_CMD_SEND;
+	ufshcd_hold_all(hba);
 
 	spin_lock_irqsave(host->host_lock, flags);
 	task_req_descp = hba->utmrdl_base_addr;
@@ -3747,7 +6659,13 @@
 
 	/* send command to the controller */
 	__set_bit(free_slot, &hba->outstanding_tasks);
+
+	/* Make sure descriptors are ready before ringing the task doorbell */
+	wmb();
+
 	ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
+	/* Make sure that doorbell is committed immediately */
+	wmb();
 
 	spin_unlock_irqrestore(host->host_lock, flags);
 
@@ -3769,8 +6687,9 @@
 	clear_bit(free_slot, &hba->tm_condition);
 	ufshcd_put_tm_slot(hba, free_slot);
 	wake_up(&hba->tm_tag_wq);
+	hba->ufs_stats.clk_rel.ctx = TM_CMD_SEND;
 
-	ufshcd_release(hba);
+	ufshcd_release_all(hba);
 	return err;
 }
 
@@ -3796,6 +6715,7 @@
 	hba = shost_priv(host);
 	tag = cmd->request->tag;
 
+	ufshcd_print_cmd_log(hba);
 	lrbp = &hba->lrb[tag];
 	err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
 	if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
@@ -3815,7 +6735,9 @@
 	spin_lock_irqsave(host->host_lock, flags);
 	ufshcd_transfer_req_compl(hba);
 	spin_unlock_irqrestore(host->host_lock, flags);
+
 out:
+	hba->req_abort_count = 0;
 	if (!err) {
 		err = SUCCESS;
 	} else {
@@ -3825,6 +6747,17 @@
 	return err;
 }
 
+static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
+{
+	struct ufshcd_lrb *lrbp;
+	int tag;
+
+	for_each_set_bit(tag, &bitmap, hba->nutrs) {
+		lrbp = &hba->lrb[tag];
+		lrbp->req_abort_skip = true;
+	}
+}
+
 /**
  * ufshcd_abort - abort a specific command
  * @cmd: SCSI command pointer
@@ -3852,31 +6785,87 @@
 	host = cmd->device->host;
 	hba = shost_priv(host);
 	tag = cmd->request->tag;
+	if (!ufshcd_valid_tag(hba, tag)) {
+		dev_err(hba->dev,
+			"%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
+			__func__, tag, cmd, cmd->request);
+		BUG();
+	}
 
-	ufshcd_hold(hba, false);
+	lrbp = &hba->lrb[tag];
+
+	ufshcd_update_error_stats(hba, UFS_ERR_TASK_ABORT);
+
+	/*
+	 * Task abort to the device W-LUN is illegal. When this command
+	 * will fail, due to spec violation, scsi err handling next step
+	 * will be to send LU reset which, again, is a spec violation.
+	 * To avoid these unnecessary/illegal step we skip to the last error
+	 * handling stage: reset and restore.
+	 */
+	if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
+		return ufshcd_eh_host_reset_handler(cmd);
+
+	ufshcd_hold_all(hba);
+	reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 	/* If command is already aborted/completed, return SUCCESS */
-	if (!(test_bit(tag, &hba->outstanding_reqs)))
+	if (!(test_bit(tag, &hba->outstanding_reqs))) {
+		dev_err(hba->dev,
+			"%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
+			__func__, tag, hba->outstanding_reqs, reg);
 		goto out;
+	}
 
-	reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 	if (!(reg & (1 << tag))) {
 		dev_err(hba->dev,
 		"%s: cmd was completed, but without a notifying intr, tag = %d",
 		__func__, tag);
 	}
 
-	lrbp = &hba->lrb[tag];
+	/* Print Transfer Request of aborted task */
+	dev_err(hba->dev, "%s: Device abort task at tag %d", __func__, tag);
+
+	/*
+	 * Print detailed info about aborted request.
+	 * As more than one request might get aborted at the same time,
+	 * print full information only for the first aborted request in order
+	 * to reduce repeated printouts. For other aborted requests only print
+	 * basic details.
+	 */
+	scsi_print_command(cmd);
+	if (!hba->req_abort_count) {
+		ufshcd_print_fsm_state(hba);
+		ufshcd_print_host_regs(hba);
+		ufshcd_print_host_state(hba);
+		ufshcd_print_pwr_info(hba);
+		ufshcd_print_trs(hba, 1 << tag, true);
+	} else {
+		ufshcd_print_trs(hba, 1 << tag, false);
+	}
+	hba->req_abort_count++;
+
+
+	/* Skip task abort in case previous aborts failed and report failure */
+	if (lrbp->req_abort_skip) {
+		err = -EIO;
+		goto out;
+	}
+
 	for (poll_cnt = 100; poll_cnt; poll_cnt--) {
 		err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
 				UFS_QUERY_TASK, &resp);
 		if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
 			/* cmd pending in the device */
+			dev_err(hba->dev, "%s: cmd pending in the device. tag = %d",
+				__func__, tag);
 			break;
 		} else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
 			/*
 			 * cmd not pending in the device, check if it is
 			 * in transition.
 			 */
+			dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.",
+				__func__, tag);
 			reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 			if (reg & (1 << tag)) {
 				/* sleep for max. 200us to stabilize */
@@ -3884,8 +6873,13 @@
 				continue;
 			}
 			/* command completed already */
+			dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.",
+				__func__, tag);
 			goto out;
 		} else {
+			dev_err(hba->dev,
+				"%s: no response from device. tag = %d, err %d",
+				__func__, tag, err);
 			if (!err)
 				err = resp; /* service response error */
 			goto out;
@@ -3900,19 +6894,25 @@
 	err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
 			UFS_ABORT_TASK, &resp);
 	if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
-		if (!err)
+		if (!err) {
 			err = resp; /* service response error */
+			dev_err(hba->dev, "%s: issued. tag = %d, err %d",
+				__func__, tag, err);
+		}
 		goto out;
 	}
 
 	err = ufshcd_clear_cmd(hba, tag);
-	if (err)
+	if (err) {
+		dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d",
+			__func__, tag, err);
 		goto out;
+	}
 
 	scsi_dma_unmap(cmd);
 
 	spin_lock_irqsave(host->host_lock, flags);
-	__clear_bit(tag, &hba->outstanding_reqs);
+	ufshcd_outstanding_req_clear(hba, tag);
 	hba->lrb[tag].cmd = NULL;
 	spin_unlock_irqrestore(host->host_lock, flags);
 
@@ -3924,14 +6924,15 @@
 		err = SUCCESS;
 	} else {
 		dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
+		ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
 		err = FAILED;
 	}
 
 	/*
-	 * This ufshcd_release() corresponds to the original scsi cmd that got
-	 * aborted here (as we won't get any IRQ for it).
+	 * This ufshcd_release_all() corresponds to the original scsi cmd that
+	 * got aborted here (as we won't get any IRQ for it).
 	 */
-	ufshcd_release(hba);
+	ufshcd_release_all(hba);
 	return err;
 }
 
@@ -3952,9 +6953,12 @@
 
 	/* Reset the host controller */
 	spin_lock_irqsave(hba->host->host_lock, flags);
-	ufshcd_hba_stop(hba);
+	ufshcd_hba_stop(hba, false);
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
+	/* scale up clocks to max frequency before full reinitialization */
+	ufshcd_set_clk_freq(hba, true);
+
 	err = ufshcd_hba_enable(hba);
 	if (err)
 		goto out;
@@ -3962,8 +6966,21 @@
 	/* Establish the link again and restore the device */
 	err = ufshcd_probe_hba(hba);
 
-	if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
+	if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) {
 		err = -EIO;
+		goto out;
+	}
+
+	if (!err) {
+		err = ufshcd_vops_crypto_engine_reset(hba);
+		if (err) {
+			dev_err(hba->dev,
+				"%s: failed to reset crypto engine %d\n",
+				__func__, err);
+			goto out;
+		}
+	}
+
 out:
 	if (err)
 		dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
@@ -3987,10 +7004,26 @@
 	int retries = MAX_HOST_RESET_RETRIES;
 
 	do {
+		err = ufshcd_vops_full_reset(hba);
+		if (err)
+			dev_warn(hba->dev, "%s: full reset returned %d\n",
+				 __func__, err);
+
+		err = ufshcd_reset_device(hba);
+		if (err)
+			dev_warn(hba->dev, "%s: device reset failed. err %d\n",
+				 __func__, err);
+
 		err = ufshcd_host_reset_and_restore(hba);
 	} while (err && --retries);
 
 	/*
+	 * There is no point proceeding even after failing
+	 * to recover after multiple retries.
+	 */
+	if (err)
+		BUG();
+	/*
 	 * After reset the door-bell might be cleared, complete
 	 * outstanding requests in s/w here.
 	 */
@@ -4010,13 +7043,12 @@
  */
 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
 {
-	int err;
+	int err = SUCCESS;
 	unsigned long flags;
 	struct ufs_hba *hba;
 
 	hba = shost_priv(cmd->device->host);
 
-	ufshcd_hold(hba, false);
 	/*
 	 * Check if there is any race with fatal error handling.
 	 * If so, wait for it to complete. Even though fatal error
@@ -4029,28 +7061,37 @@
 				hba->ufshcd_state == UFSHCD_STATE_RESET))
 			break;
 		spin_unlock_irqrestore(hba->host->host_lock, flags);
-		dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
+		dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
 		flush_work(&hba->eh_work);
 	} while (1);
 
-	hba->ufshcd_state = UFSHCD_STATE_RESET;
-	ufshcd_set_eh_in_progress(hba);
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
-
-	err = ufshcd_reset_and_restore(hba);
+	/*
+	 * we don't know if previous reset had really reset the host controller
+	 * or not. So let's force reset here to be sure.
+	 */
+	hba->ufshcd_state = UFSHCD_STATE_ERROR;
+	hba->force_host_reset = true;
+	schedule_work(&hba->eh_work);
 
+	/* wait for the reset work to finish */
+	do {
+		if (!(work_pending(&hba->eh_work) ||
+				hba->ufshcd_state == UFSHCD_STATE_RESET))
+			break;
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		dev_err(hba->dev, "%s: reset in progress - 2\n", __func__);
+		flush_work(&hba->eh_work);
 	spin_lock_irqsave(hba->host->host_lock, flags);
-	if (!err) {
-		err = SUCCESS;
-		hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
-	} else {
+	} while (1);
+
+	if (!((hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) &&
+	      ufshcd_is_link_active(hba))) {
 		err = FAILED;
 		hba->ufshcd_state = UFSHCD_STATE_ERROR;
 	}
-	ufshcd_clear_eh_in_progress(hba);
+
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
-	ufshcd_release(hba);
 	return err;
 }
 
@@ -4162,7 +7203,7 @@
 	dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
 			__func__, hba->init_prefetch_data.icc_level);
 
-	ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+	ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
 			QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
 			&hba->init_prefetch_data.icc_level);
 
@@ -4240,6 +7281,210 @@
 }
 
 /**
+ * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
+ * @hba: per-adapter instance
+ *
+ * PA_TActivate parameter can be tuned manually if UniPro version is less than
+ * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
+ * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
+ * the hibern8 exit latency.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
+{
+	int ret = 0;
+	u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
+
+	if (!ufshcd_is_unipro_pa_params_tuning_req(hba))
+		return 0;
+
+	ret = ufshcd_dme_peer_get(hba,
+				  UIC_ARG_MIB_SEL(
+					RX_MIN_ACTIVATETIME_CAPABILITY,
+					UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
+				  &peer_rx_min_activatetime);
+	if (ret)
+		goto out;
+
+	/* make sure proper unit conversion is applied */
+	tuned_pa_tactivate =
+		((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
+		 / PA_TACTIVATE_TIME_UNIT_US);
+	ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
+			     tuned_pa_tactivate);
+
+out:
+	return ret;
+}
+
+/**
+ * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
+ * @hba: per-adapter instance
+ *
+ * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
+ * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
+ * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
+ * This optimal value can help reduce the hibern8 exit latency.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
+{
+	int ret = 0;
+	u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
+	u32 max_hibern8_time, tuned_pa_hibern8time;
+
+	ret = ufshcd_dme_get(hba,
+			     UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
+					UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
+				  &local_tx_hibern8_time_cap);
+	if (ret)
+		goto out;
+
+	ret = ufshcd_dme_peer_get(hba,
+				  UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
+					UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
+				  &peer_rx_hibern8_time_cap);
+	if (ret)
+		goto out;
+
+	max_hibern8_time = max(local_tx_hibern8_time_cap,
+			       peer_rx_hibern8_time_cap);
+	/* make sure proper unit conversion is applied */
+	tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
+				/ PA_HIBERN8_TIME_UNIT_US);
+	ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
+			     tuned_pa_hibern8time);
+out:
+	return ret;
+}
+
+/**
+ * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
+ * less than device PA_TACTIVATE time.
+ * @hba: per-adapter instance
+ *
+ * Some UFS devices require host PA_TACTIVATE to be lower than device
+ * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
+ * for such devices.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
+{
+	int ret = 0;
+	u32 granularity, peer_granularity;
+	u32 pa_tactivate, peer_pa_tactivate;
+	u32 pa_tactivate_us, peer_pa_tactivate_us;
+	u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
+
+	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+				  &granularity);
+	if (ret)
+		goto out;
+
+	ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+				  &peer_granularity);
+	if (ret)
+		goto out;
+
+	if ((granularity < PA_GRANULARITY_MIN_VAL) ||
+	    (granularity > PA_GRANULARITY_MAX_VAL)) {
+		dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
+			__func__, granularity);
+		return -EINVAL;
+	}
+
+	if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
+	    (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
+		dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
+			__func__, peer_granularity);
+		return -EINVAL;
+	}
+
+	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
+	if (ret)
+		goto out;
+
+	ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
+				  &peer_pa_tactivate);
+	if (ret)
+		goto out;
+
+	pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
+	peer_pa_tactivate_us = peer_pa_tactivate *
+			     gran_to_us_table[peer_granularity - 1];
+
+	if (pa_tactivate_us > peer_pa_tactivate_us) {
+		u32 new_peer_pa_tactivate;
+
+		new_peer_pa_tactivate = pa_tactivate_us /
+				      gran_to_us_table[peer_granularity - 1];
+		new_peer_pa_tactivate++;
+		ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
+					  new_peer_pa_tactivate);
+	}
+
+out:
+	return ret;
+}
+
+static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
+{
+	if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
+		ufshcd_tune_pa_tactivate(hba);
+		ufshcd_tune_pa_hibern8time(hba);
+	}
+
+	if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
+		/* set 1ms timeout for PA_TACTIVATE */
+		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
+
+	if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
+		ufshcd_quirk_tune_host_pa_tactivate(hba);
+
+	ufshcd_vops_apply_dev_quirks(hba);
+}
+
+static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
+{
+	int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
+
+	memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
+	memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
+	memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
+	memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
+	memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
+
+	hba->req_abort_count = 0;
+}
+
+static void ufshcd_apply_pm_quirks(struct ufs_hba *hba)
+{
+	if (hba->dev_quirks & UFS_DEVICE_QUIRK_NO_LINK_OFF) {
+		if (ufs_get_pm_lvl_to_link_pwr_state(hba->rpm_lvl) ==
+		    UIC_LINK_OFF_STATE) {
+			hba->rpm_lvl =
+				ufs_get_desired_pm_lvl_for_dev_link_state(
+						UFS_SLEEP_PWR_MODE,
+						UIC_LINK_HIBERN8_STATE);
+			dev_info(hba->dev, "UFS_DEVICE_QUIRK_NO_LINK_OFF enabled, changed rpm_lvl to %d\n",
+				hba->rpm_lvl);
+		}
+		if (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
+		    UIC_LINK_OFF_STATE) {
+			hba->spm_lvl =
+				ufs_get_desired_pm_lvl_for_dev_link_state(
+						UFS_SLEEP_PWR_MODE,
+						UIC_LINK_HIBERN8_STATE);
+			dev_info(hba->dev, "UFS_DEVICE_QUIRK_NO_LINK_OFF enabled, changed spm_lvl to %d\n",
+				hba->spm_lvl);
+		}
+	}
+}
+
+/**
  * ufshcd_probe_hba - probe hba to detect device and initialize
  * @hba: per-adapter instance
  *
@@ -4248,12 +7493,17 @@
 static int ufshcd_probe_hba(struct ufs_hba *hba)
 {
 	int ret;
+	ktime_t start = ktime_get();
 
 	ret = ufshcd_link_startup(hba);
 	if (ret)
 		goto out;
 
-	ufshcd_init_pwr_info(hba);
+	/* Debug counters initialization */
+	ufshcd_clear_dbg_ufs_stats(hba);
+	/* set the default level for urgent bkops */
+	hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
+	hba->is_urgent_bkops_lvl_checked = false;
 
 	/* UniPro link is active now */
 	ufshcd_set_link_active(hba);
@@ -4266,10 +7516,18 @@
 	if (ret)
 		goto out;
 
+	ufs_advertise_fixup_device(hba);
+	ufshcd_tune_unipro_params(hba);
+
+	ufshcd_apply_pm_quirks(hba);
+	ret = ufshcd_set_vccq_rail_unused(hba,
+		(hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
+	if (ret)
+		goto out;
+
 	/* UFS device is also active now */
 	ufshcd_set_ufs_dev_active(hba);
 	ufshcd_force_reset_auto_bkops(hba);
-	hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
 	hba->wlun_dev_clr_ua = true;
 
 	if (ufshcd_get_max_pwr_mode(hba)) {
@@ -4278,11 +7536,15 @@
 			__func__);
 	} else {
 		ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
-		if (ret)
+		if (ret) {
 			dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
 					__func__, ret);
+			goto out;
+		}
 	}
 
+	/* set the state as operational after switching to desired gear */
+	hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
 	/*
 	 * If we are in error handling context or in power management callbacks
 	 * context, no need to scan the host
@@ -4292,7 +7554,7 @@
 
 		/* clear any previous UFS device information */
 		memset(&hba->dev_info, 0, sizeof(hba->dev_info));
-		if (!ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+		if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
 				       QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
 			hba->dev_info.f_power_on_wp_en = flag;
 
@@ -4303,6 +7565,27 @@
 		if (ufshcd_scsi_add_wlus(hba))
 			goto out;
 
+		/* Initialize devfreq after UFS device is detected */
+		if (ufshcd_is_clkscaling_supported(hba)) {
+			memcpy(&hba->clk_scaling.saved_pwr_info.info,
+			    &hba->pwr_info, sizeof(struct ufs_pa_layer_attr));
+			hba->clk_scaling.saved_pwr_info.is_valid = true;
+			hba->clk_scaling.is_scaled_up = true;
+			if (!hba->devfreq) {
+				hba->devfreq = devfreq_add_device(hba->dev,
+							&ufs_devfreq_profile,
+							"simple_ondemand",
+							gov_data);
+				if (IS_ERR(hba->devfreq)) {
+					ret = PTR_ERR(hba->devfreq);
+					dev_err(hba->dev, "Unable to register with devfreq %d\n",
+						ret);
+					goto out;
+				}
+			}
+			hba->clk_scaling.is_allowed = true;
+		}
+
 		scsi_scan_host(hba->host);
 		pm_runtime_put_sync(hba->dev);
 	}
@@ -4310,10 +7593,13 @@
 	if (!hba->is_init_prefetch)
 		hba->is_init_prefetch = true;
 
-	/* Resume devfreq after UFS device is detected */
-	if (ufshcd_is_clkscaling_enabled(hba))
-		devfreq_resume_device(hba->devfreq);
-
+	/*
+	 * Enable auto hibern8 if supported, after full host and
+	 * device initialization.
+	 */
+	if (ufshcd_is_auto_hibern8_supported(hba))
+		ufshcd_set_auto_hibern8_timer(hba,
+				      hba->hibern8_on_idle.delay_ms);
 out:
 	/*
 	 * If we failed to initialize the device or the device is not
@@ -4324,6 +7610,9 @@
 		ufshcd_hba_exit(hba);
 	}
 
+	trace_ufshcd_init(dev_name(hba->dev), ret,
+		ktime_to_us(ktime_sub(ktime_get(), start)),
+		hba->curr_dev_pwr_mode, hba->uic_link_state);
 	return ret;
 }
 
@@ -4336,7 +7625,296 @@
 {
 	struct ufs_hba *hba = (struct ufs_hba *)data;
 
+	/*
+	 * Don't allow clock gating and hibern8 enter for faster device
+	 * detection.
+	 */
+	ufshcd_hold_all(hba);
 	ufshcd_probe_hba(hba);
+	ufshcd_release_all(hba);
+}
+
+/**
+ * ufshcd_query_ioctl - perform user read queries
+ * @hba: per-adapter instance
+ * @lun: used for lun specific queries
+ * @buffer: user space buffer for reading and submitting query data and params
+ * @return: 0 for success negative error code otherwise
+ *
+ * Expected/Submitted buffer structure is struct ufs_ioctl_query_data.
+ * It will read the opcode, idn and buf_length parameters, and, put the
+ * response in the buffer field while updating the used size in buf_length.
+ */
+static int ufshcd_query_ioctl(struct ufs_hba *hba, u8 lun, void __user *buffer)
+{
+	struct ufs_ioctl_query_data *ioctl_data;
+	int err = 0;
+	int length = 0;
+	void *data_ptr;
+	bool flag;
+	u32 att;
+	u8 index;
+	u8 *desc = NULL;
+
+	ioctl_data = kzalloc(sizeof(struct ufs_ioctl_query_data), GFP_KERNEL);
+	if (!ioctl_data) {
+		dev_err(hba->dev, "%s: Failed allocating %zu bytes\n", __func__,
+				sizeof(struct ufs_ioctl_query_data));
+		err = -ENOMEM;
+		goto out;
+	}
+
+	/* extract params from user buffer */
+	err = copy_from_user(ioctl_data, buffer,
+			sizeof(struct ufs_ioctl_query_data));
+	if (err) {
+		dev_err(hba->dev,
+			"%s: Failed copying buffer from user, err %d\n",
+			__func__, err);
+		goto out_release_mem;
+	}
+
+	/* verify legal parameters & send query */
+	switch (ioctl_data->opcode) {
+	case UPIU_QUERY_OPCODE_READ_DESC:
+		switch (ioctl_data->idn) {
+		case QUERY_DESC_IDN_DEVICE:
+		case QUERY_DESC_IDN_CONFIGURAION:
+		case QUERY_DESC_IDN_INTERCONNECT:
+		case QUERY_DESC_IDN_GEOMETRY:
+		case QUERY_DESC_IDN_POWER:
+			index = 0;
+			break;
+		case QUERY_DESC_IDN_UNIT:
+			if (!ufs_is_valid_unit_desc_lun(lun)) {
+				dev_err(hba->dev,
+					"%s: No unit descriptor for lun 0x%x\n",
+					__func__, lun);
+				err = -EINVAL;
+				goto out_release_mem;
+			}
+			index = lun;
+			break;
+		default:
+			goto out_einval;
+		}
+		length = min_t(int, QUERY_DESC_MAX_SIZE,
+				ioctl_data->buf_size);
+		desc = kzalloc(length, GFP_KERNEL);
+		if (!desc) {
+			dev_err(hba->dev, "%s: Failed allocating %d bytes\n",
+					__func__, length);
+			err = -ENOMEM;
+			goto out_release_mem;
+		}
+		err = ufshcd_query_descriptor(hba, ioctl_data->opcode,
+				ioctl_data->idn, index, 0, desc, &length);
+		break;
+	case UPIU_QUERY_OPCODE_READ_ATTR:
+		switch (ioctl_data->idn) {
+		case QUERY_ATTR_IDN_BOOT_LU_EN:
+		case QUERY_ATTR_IDN_POWER_MODE:
+		case QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
+		case QUERY_ATTR_IDN_OOO_DATA_EN:
+		case QUERY_ATTR_IDN_BKOPS_STATUS:
+		case QUERY_ATTR_IDN_PURGE_STATUS:
+		case QUERY_ATTR_IDN_MAX_DATA_IN:
+		case QUERY_ATTR_IDN_MAX_DATA_OUT:
+		case QUERY_ATTR_IDN_REF_CLK_FREQ:
+		case QUERY_ATTR_IDN_CONF_DESC_LOCK:
+		case QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
+		case QUERY_ATTR_IDN_EE_CONTROL:
+		case QUERY_ATTR_IDN_EE_STATUS:
+		case QUERY_ATTR_IDN_SECONDS_PASSED:
+			index = 0;
+			break;
+		case QUERY_ATTR_IDN_DYN_CAP_NEEDED:
+		case QUERY_ATTR_IDN_CORR_PRG_BLK_NUM:
+			index = lun;
+			break;
+		default:
+			goto out_einval;
+		}
+		err = ufshcd_query_attr(hba, ioctl_data->opcode, ioctl_data->idn,
+					index, 0, &att);
+		break;
+
+	case UPIU_QUERY_OPCODE_WRITE_ATTR:
+		err = copy_from_user(&att,
+				buffer + sizeof(struct ufs_ioctl_query_data),
+				sizeof(u32));
+		if (err) {
+			dev_err(hba->dev,
+				"%s: Failed copying buffer from user, err %d\n",
+				__func__, err);
+			goto out_release_mem;
+		}
+
+		switch (ioctl_data->idn) {
+		case QUERY_ATTR_IDN_BOOT_LU_EN:
+			index = 0;
+			if (att > QUERY_ATTR_IDN_BOOT_LU_EN_MAX) {
+				dev_err(hba->dev,
+					"%s: Illegal ufs query ioctl data, opcode 0x%x, idn 0x%x, att 0x%x\n",
+					__func__, ioctl_data->opcode,
+					(unsigned int)ioctl_data->idn, att);
+				err = -EINVAL;
+				goto out_release_mem;
+			}
+			break;
+		default:
+			goto out_einval;
+		}
+		err = ufshcd_query_attr(hba, ioctl_data->opcode,
+					ioctl_data->idn, index, 0, &att);
+		break;
+
+	case UPIU_QUERY_OPCODE_READ_FLAG:
+		switch (ioctl_data->idn) {
+		case QUERY_FLAG_IDN_FDEVICEINIT:
+		case QUERY_FLAG_IDN_PERMANENT_WPE:
+		case QUERY_FLAG_IDN_PWR_ON_WPE:
+		case QUERY_FLAG_IDN_BKOPS_EN:
+		case QUERY_FLAG_IDN_PURGE_ENABLE:
+		case QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL:
+		case QUERY_FLAG_IDN_BUSY_RTC:
+			break;
+		default:
+			goto out_einval;
+		}
+		err = ufshcd_query_flag_retry(hba, ioctl_data->opcode,
+				ioctl_data->idn, &flag);
+		break;
+	default:
+		goto out_einval;
+	}
+
+	if (err) {
+		dev_err(hba->dev, "%s: Query for idn %d failed\n", __func__,
+				ioctl_data->idn);
+		goto out_release_mem;
+	}
+
+	/*
+	 * copy response data
+	 * As we might end up reading less data then what is specified in
+	 * "ioctl_data->buf_size". So we are updating "ioctl_data->
+	 * buf_size" to what exactly we have read.
+	 */
+	switch (ioctl_data->opcode) {
+	case UPIU_QUERY_OPCODE_READ_DESC:
+		ioctl_data->buf_size = min_t(int, ioctl_data->buf_size, length);
+		data_ptr = desc;
+		break;
+	case UPIU_QUERY_OPCODE_READ_ATTR:
+		ioctl_data->buf_size = sizeof(u32);
+		data_ptr = &att;
+		break;
+	case UPIU_QUERY_OPCODE_READ_FLAG:
+		ioctl_data->buf_size = 1;
+		data_ptr = &flag;
+		break;
+	case UPIU_QUERY_OPCODE_WRITE_ATTR:
+		goto out_release_mem;
+	default:
+		goto out_einval;
+	}
+
+	/* copy to user */
+	err = copy_to_user(buffer, ioctl_data,
+			sizeof(struct ufs_ioctl_query_data));
+	if (err)
+		dev_err(hba->dev, "%s: Failed copying back to user.\n",
+			__func__);
+	err = copy_to_user(buffer + sizeof(struct ufs_ioctl_query_data),
+			data_ptr, ioctl_data->buf_size);
+	if (err)
+		dev_err(hba->dev, "%s: err %d copying back to user.\n",
+				__func__, err);
+	goto out_release_mem;
+
+out_einval:
+	dev_err(hba->dev,
+		"%s: illegal ufs query ioctl data, opcode 0x%x, idn 0x%x\n",
+		__func__, ioctl_data->opcode, (unsigned int)ioctl_data->idn);
+	err = -EINVAL;
+out_release_mem:
+	kfree(ioctl_data);
+	kfree(desc);
+out:
+	return err;
+}
+
+/**
+ * ufshcd_ioctl - ufs ioctl callback registered in scsi_host
+ * @dev: scsi device required for per LUN queries
+ * @cmd: command opcode
+ * @buffer: user space buffer for transferring data
+ *
+ * Supported commands:
+ * UFS_IOCTL_QUERY
+ */
+static int ufshcd_ioctl(struct scsi_device *dev, int cmd, void __user *buffer)
+{
+	struct ufs_hba *hba = shost_priv(dev->host);
+	int err = 0;
+
+	BUG_ON(!hba);
+	if (!buffer) {
+		dev_err(hba->dev, "%s: User buffer is NULL!\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (cmd) {
+	case UFS_IOCTL_QUERY:
+		pm_runtime_get_sync(hba->dev);
+		err = ufshcd_query_ioctl(hba, ufshcd_scsi_to_upiu_lun(dev->lun),
+				buffer);
+		pm_runtime_put_sync(hba->dev);
+		break;
+	default:
+		err = -ENOIOCTLCMD;
+		dev_dbg(hba->dev, "%s: Unsupported ioctl cmd %d\n", __func__,
+			cmd);
+		break;
+	}
+
+	return err;
+}
+
+static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
+{
+	unsigned long flags;
+	struct Scsi_Host *host;
+	struct ufs_hba *hba;
+	int index;
+	bool found = false;
+
+	if (!scmd || !scmd->device || !scmd->device->host)
+		return BLK_EH_NOT_HANDLED;
+
+	host = scmd->device->host;
+	hba = shost_priv(host);
+	if (!hba)
+		return BLK_EH_NOT_HANDLED;
+
+	spin_lock_irqsave(host->host_lock, flags);
+
+	for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
+		if (hba->lrb[index].cmd == scmd) {
+			found = true;
+			break;
+		}
+	}
+
+	spin_unlock_irqrestore(host->host_lock, flags);
+
+	/*
+	 * Bypass SCSI error handling and reset the block layer timer if this
+	 * SCSI command was not actually dispatched to UFS driver, otherwise
+	 * let SCSI layer handle the error as usual.
+	 */
+	return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
 }
 
 static struct scsi_host_template ufshcd_driver_template = {
@@ -4351,6 +7929,11 @@
 	.eh_abort_handler	= ufshcd_abort,
 	.eh_device_reset_handler = ufshcd_eh_device_reset_handler,
 	.eh_host_reset_handler   = ufshcd_eh_host_reset_handler,
+	.eh_timed_out		= ufshcd_eh_timed_out,
+	.ioctl			= ufshcd_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl		= ufshcd_ioctl,
+#endif
 	.this_id		= -1,
 	.sg_tablesize		= SG_ALL,
 	.cmd_per_lun		= UFSHCD_CMD_PER_LUN,
@@ -4379,12 +7962,23 @@
 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
 					 struct ufs_vreg *vreg)
 {
-	return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
+	if (!vreg)
+		return 0;
+	else if (vreg->unused)
+		return 0;
+	else
+		return ufshcd_config_vreg_load(hba->dev, vreg,
+					       UFS_VREG_LPM_LOAD_UA);
 }
 
 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
 					 struct ufs_vreg *vreg)
 {
+	if (!vreg)
+		return 0;
+	else if (vreg->unused)
+		return 0;
+	else
 	return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
 }
 
@@ -4423,7 +8017,9 @@
 {
 	int ret = 0;
 
-	if (!vreg || vreg->enabled)
+	if (!vreg)
+		goto out;
+	else if (vreg->enabled || vreg->unused)
 		goto out;
 
 	ret = ufshcd_config_vreg(dev, vreg, true);
@@ -4443,7 +8039,9 @@
 {
 	int ret = 0;
 
-	if (!vreg || !vreg->enabled)
+	if (!vreg)
+		goto out;
+	else if (!vreg->enabled || vreg->unused)
 		goto out;
 
 	ret = regulator_disable(vreg->reg);
@@ -4493,11 +8091,16 @@
 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
 {
 	struct ufs_vreg_info *info = &hba->vreg_info;
+	int ret = 0;
 
-	if (info)
-		return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
+	if (info->vdd_hba) {
+		ret = ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
 
-	return 0;
+		if (!ret)
+			ufshcd_vops_update_sec_cfg(hba, on);
+	}
+
+	return ret;
 }
 
 static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
@@ -4549,22 +8152,73 @@
 	return 0;
 }
 
-static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
-					bool skip_ref_clk)
+static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
+{
+	int ret = 0;
+	struct ufs_vreg_info *info = &hba->vreg_info;
+
+	if (!info)
+		goto out;
+	else if (!info->vccq)
+		goto out;
+
+	if (unused) {
+		/* shut off the rail here */
+		ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
+		/*
+		 * Mark this rail as no longer used, so it doesn't get enabled
+		 * later by mistake
+		 */
+		if (!ret)
+			info->vccq->unused = true;
+	} else {
+		/*
+		 * rail should have been already enabled hence just make sure
+		 * that unused flag is cleared.
+		 */
+		info->vccq->unused = false;
+	}
+out:
+	return ret;
+}
+
+static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
+			       bool skip_ref_clk, bool is_gating_context)
 {
 	int ret = 0;
 	struct ufs_clk_info *clki;
 	struct list_head *head = &hba->clk_list_head;
 	unsigned long flags;
+	ktime_t start = ktime_get();
+	bool clk_state_changed = false;
 
 	if (!head || list_empty(head))
 		goto out;
 
+	/* call vendor specific bus vote before enabling the clocks */
+	if (on) {
+		ret = ufshcd_vops_set_bus_vote(hba, on);
+		if (ret)
+			return ret;
+	}
+
+	/*
+	 * vendor specific setup_clocks ops may depend on clocks managed by
+	 * this standard driver hence call the vendor specific setup_clocks
+	 * before disabling the clocks managed here.
+	 */
+	if (!on) {
+		ret = ufshcd_vops_setup_clocks(hba, on, is_gating_context);
+		if (ret)
+			return ret;
+	}
+
 	list_for_each_entry(clki, head, list) {
 		if (!IS_ERR_OR_NULL(clki->clk)) {
 			if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
 				continue;
 
+			clk_state_changed = on ^ clki->enabled;
 			if (on && !clki->enabled) {
 				ret = clk_prepare_enable(clki->clk);
 				if (ret) {
@@ -4581,24 +8235,65 @@
 		}
 	}
 
-	ret = ufshcd_vops_setup_clocks(hba, on);
+	/*
+	 * vendor specific setup_clocks ops may depend on clocks managed by
+	 * this standard driver hence call the vendor specific setup_clocks
+	 * after enabling the clocks managed here.
+	 */
+	if (on) {
+		ret = ufshcd_vops_setup_clocks(hba, on, is_gating_context);
+		if (ret)
+			goto out;
+	}
+
+	/*
+	 * call vendor specific bus vote to remove the vote after
+	 * disabling the clocks.
+	 */
+	if (!on)
+		ret = ufshcd_vops_set_bus_vote(hba, on);
+
 out:
 	if (ret) {
+		if (on)
+			/* Can't do much if this fails */
+			(void) ufshcd_vops_set_bus_vote(hba, false);
 		list_for_each_entry(clki, head, list) {
 			if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
 				clk_disable_unprepare(clki->clk);
 		}
-	} else if (on) {
+	} else if (!ret && on) {
 		spin_lock_irqsave(hba->host->host_lock, flags);
 		hba->clk_gating.state = CLKS_ON;
+		trace_ufshcd_clk_gating(dev_name(hba->dev),
+			hba->clk_gating.state);
 		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		/* restore the secure configuration as clocks are enabled */
+		ufshcd_vops_update_sec_cfg(hba, true);
 	}
+
+	if (clk_state_changed)
+		trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
+			(on ? "on" : "off"),
+			ktime_to_us(ktime_sub(ktime_get(), start)), ret);
 	return ret;
 }
 
-static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
+static int ufshcd_enable_clocks(struct ufs_hba *hba)
 {
-	return  __ufshcd_setup_clocks(hba, on, false);
+	return  ufshcd_setup_clocks(hba, true, false, false);
+}
+
+static int ufshcd_disable_clocks(struct ufs_hba *hba,
+				 bool is_gating_context)
+{
+	return  ufshcd_setup_clocks(hba, false, false, is_gating_context);
+}
+
+static int ufshcd_disable_clocks_skip_ref_clk(struct ufs_hba *hba,
+					      bool is_gating_context)
+{
+	return  ufshcd_setup_clocks(hba, false, true, is_gating_context);
 }
 
 static int ufshcd_init_clocks(struct ufs_hba *hba)
@@ -4644,7 +8339,7 @@
 {
 	int err = 0;
 
-	if (!hba->vops)
+	if (!hba->var || !hba->var->vops)
 		goto out;
 
 	err = ufshcd_vops_init(hba);
@@ -4668,11 +8363,9 @@
 
 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
 {
-	if (!hba->vops)
+	if (!hba->var || !hba->var->vops)
 		return;
 
-	ufshcd_vops_setup_clocks(hba, false);
-
 	ufshcd_vops_setup_regulators(hba, false);
 
 	ufshcd_vops_exit(hba);
@@ -4701,7 +8394,7 @@
 	if (err)
 		goto out_disable_hba_vreg;
 
-	err = ufshcd_setup_clocks(hba, true);
+	err = ufshcd_enable_clocks(hba);
 	if (err)
 		goto out_disable_hba_vreg;
 
@@ -4723,7 +8416,7 @@
 out_disable_vreg:
 	ufshcd_setup_vreg(hba, false);
 out_disable_clks:
-	ufshcd_setup_clocks(hba, false);
+	ufshcd_disable_clocks(hba, false);
 out_disable_hba_vreg:
 	ufshcd_setup_hba_vreg(hba, false);
 out:
@@ -4735,7 +8428,12 @@
 	if (hba->is_powered) {
 		ufshcd_variant_hba_exit(hba);
 		ufshcd_setup_vreg(hba, false);
-		ufshcd_setup_clocks(hba, false);
+		if (ufshcd_is_clkscaling_supported(hba)) {
+			if (hba->devfreq)
+				ufshcd_suspend_clkscaling(hba);
+			destroy_workqueue(hba->clk_scaling.workq);
+		}
+		ufshcd_disable_clocks(hba, false);
 		ufshcd_setup_hba_vreg(hba, false);
 		hba->is_powered = false;
 	}
@@ -4748,19 +8446,19 @@
 				0,
 				0,
 				0,
-				SCSI_SENSE_BUFFERSIZE,
+				UFSHCD_REQ_SENSE_SIZE,
 				0};
 	char *buffer;
 	int ret;
 
-	buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
+	buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
 	if (!buffer) {
 		ret = -ENOMEM;
 		goto out;
 	}
 
 	ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
-				SCSI_SENSE_BUFFERSIZE, NULL,
+				UFSHCD_REQ_SENSE_SIZE, NULL,
 				msecs_to_jiffies(1000), 3, NULL, REQ_PM);
 	if (ret)
 		pr_err("%s: failed with err %d\n", __func__, ret);
@@ -4868,10 +8566,20 @@
 		   (!check_for_bkops || (check_for_bkops &&
 		    !hba->auto_bkops_enabled))) {
 		/*
+		 * Let's make sure that link is in low power mode, we are doing
+		 * this currently by putting the link in Hibern8. Otherway to
+		 * put the link in low power mode is to send the DME end point
+		 * to device and then send the DME reset command to local
+		 * unipro. But putting the link in hibern8 is much faster.
+		 */
+		ret = ufshcd_uic_hibern8_enter(hba);
+		if (ret)
+			goto out;
+		/*
 		 * Change controller state to "reset state" which
 		 * should also put the link in off/reset state
 		 */
-		ufshcd_hba_stop(hba);
+		ufshcd_hba_stop(hba, true);
 		/*
 		 * TODO: Check if we need any delay to make sure that
 		 * controller is reset
@@ -4886,6 +8594,15 @@
 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
 {
 	/*
+	 * It seems some UFS devices may keep drawing more than sleep current
+	 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
+	 * To avoid this situation, add 2ms delay before putting these UFS
+	 * rails in LPM mode.
+	 */
+	if (!ufshcd_is_link_active(hba))
+		usleep_range(2000, 2100);
+
+	/*
 	 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
 	 * power.
 	 *
@@ -4917,7 +8634,6 @@
 	    !hba->dev_info.is_lu_power_on_wp) {
 		ret = ufshcd_setup_vreg(hba, true);
 	} else if (!ufshcd_is_ufs_dev_active(hba)) {
-		ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
 		if (!ret && !ufshcd_is_link_active(hba)) {
 			ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
 			if (ret)
@@ -4926,6 +8642,7 @@
 			if (ret)
 				goto vccq_lpm;
 		}
+		ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
 	}
 	goto out;
 
@@ -4939,13 +8656,17 @@
 
 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
 {
-	if (ufshcd_is_link_off(hba))
+	if (ufshcd_is_link_off(hba) ||
+	    (ufshcd_is_link_hibern8(hba)
+	     && ufshcd_is_power_collapse_during_hibern8_allowed(hba)))
 		ufshcd_setup_hba_vreg(hba, false);
 }
 
 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
 {
-	if (ufshcd_is_link_off(hba))
+	if (ufshcd_is_link_off(hba) ||
+	    (ufshcd_is_link_hibern8(hba)
+	     && ufshcd_is_power_collapse_during_hibern8_allowed(hba)))
 		ufshcd_setup_hba_vreg(hba, true);
 }
 
@@ -4987,8 +8708,17 @@
 	 * If we can't transition into any of the low power modes
 	 * just gate the clocks.
 	 */
-	ufshcd_hold(hba, false);
+	WARN_ON(hba->hibern8_on_idle.is_enabled &&
+		hba->hibern8_on_idle.active_reqs);
+	ufshcd_hold_all(hba);
 	hba->clk_gating.is_suspended = true;
+	hba->hibern8_on_idle.is_suspended = true;
+
+	if (hba->clk_scaling.is_allowed) {
+		cancel_work_sync(&hba->clk_scaling.suspend_work);
+		cancel_work_sync(&hba->clk_scaling.resume_work);
+		ufshcd_suspend_clkscaling(hba);
+	}
 
 	if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
 			req_link_state == UIC_LINK_ACTIVE_STATE) {
@@ -4997,12 +8727,12 @@
 
 	if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
 	    (req_link_state == hba->uic_link_state))
-		goto out;
+		goto enable_gating;
 
 	/* UFS device & link must be active before we enter in this function */
 	if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
 		ret = -EINVAL;
-		goto out;
+		goto enable_gating;
 	}
 
 	if (ufshcd_is_runtime_pm(pm_op)) {
@@ -5035,19 +8765,14 @@
 	if (ret)
 		goto set_dev_active;
 
+	if (ufshcd_is_link_hibern8(hba) &&
+	    ufshcd_is_hibern8_on_idle_allowed(hba))
+		hba->hibern8_on_idle.state = HIBERN8_ENTERED;
+
 	ufshcd_vreg_set_lpm(hba);
 
 disable_clks:
 	/*
-	 * The clock scaling needs access to controller registers. Hence, Wait
-	 * for pending clock scaling work to be done before clocks are
-	 * turned off.
-	 */
-	if (ufshcd_is_clkscaling_enabled(hba)) {
-		devfreq_suspend_device(hba->devfreq);
-		hba->clk_scaling.window_start_t = 0;
-	}
-	/*
 	 * Call vendor specific suspend callback. As these callbacks may access
 	 * vendor specific host controller register space call them before the
 	 * host clocks are ON.
@@ -5056,17 +8781,19 @@
 	if (ret)
 		goto set_link_active;
 
-	ret = ufshcd_vops_setup_clocks(hba, false);
-	if (ret)
-		goto vops_resume;
-
 	if (!ufshcd_is_link_active(hba))
-		ufshcd_setup_clocks(hba, false);
+		ret = ufshcd_disable_clocks(hba, false);
 	else
 		/* If link is active, device ref_clk can't be switched off */
-		__ufshcd_setup_clocks(hba, false, true);
+		ret = ufshcd_disable_clocks_skip_ref_clk(hba, false);
+	if (ret)
+		goto set_link_active;
 
+	if (ufshcd_is_clkgating_allowed(hba)) {
 	hba->clk_gating.state = CLKS_OFF;
+		trace_ufshcd_clk_gating(dev_name(hba->dev),
+					hba->clk_gating.state);
+	}
 	/*
 	 * Disable the host irq as host controller as there won't be any
 	 * host controller transaction expected till resume.
@@ -5076,22 +8803,31 @@
 	ufshcd_hba_vreg_set_lpm(hba);
 	goto out;
 
-vops_resume:
-	ufshcd_vops_resume(hba, pm_op);
 set_link_active:
+	if (hba->clk_scaling.is_allowed)
+		ufshcd_resume_clkscaling(hba);
 	ufshcd_vreg_set_hpm(hba);
-	if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
+	if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) {
 		ufshcd_set_link_active(hba);
-	else if (ufshcd_is_link_off(hba))
+	} else if (ufshcd_is_link_off(hba)) {
+		ufshcd_update_error_stats(hba, UFS_ERR_VOPS_SUSPEND);
 		ufshcd_host_reset_and_restore(hba);
+	}
 set_dev_active:
 	if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
 		ufshcd_disable_auto_bkops(hba);
 enable_gating:
+	if (hba->clk_scaling.is_allowed)
+		ufshcd_resume_clkscaling(hba);
+	hba->hibern8_on_idle.is_suspended = false;
 	hba->clk_gating.is_suspended = false;
-	ufshcd_release(hba);
+	ufshcd_release_all(hba);
 out:
 	hba->pm_op_in_progress = 0;
+
+	if (ret)
+		ufshcd_update_error_stats(hba, UFS_ERR_SUSPEND);
+
 	return ret;
 }
 
@@ -5115,14 +8851,12 @@
 
 	ufshcd_hba_vreg_set_hpm(hba);
 	/* Make sure clocks are enabled before accessing controller */
-	ret = ufshcd_setup_clocks(hba, true);
+	ret = ufshcd_enable_clocks(hba);
 	if (ret)
 		goto out;
 
 	/* enable the host irq as host controller would be active soon */
-	ret = ufshcd_enable_irq(hba);
-	if (ret)
-		goto disable_irq_and_vops_clks;
+	ufshcd_enable_irq(hba);
 
 	ret = ufshcd_vreg_set_hpm(hba);
 	if (ret)
@@ -5139,18 +8873,28 @@
 
 	if (ufshcd_is_link_hibern8(hba)) {
 		ret = ufshcd_uic_hibern8_exit(hba);
-		if (!ret)
+		if (!ret) {
 			ufshcd_set_link_active(hba);
-		else
+			if (ufshcd_is_hibern8_on_idle_allowed(hba))
+				hba->hibern8_on_idle.state = HIBERN8_EXITED;
+		} else {
 			goto vendor_suspend;
+		}
 	} else if (ufshcd_is_link_off(hba)) {
-		ret = ufshcd_host_reset_and_restore(hba);
 		/*
-		 * ufshcd_host_reset_and_restore() should have already
+		 * A full initialization of the host and the device is required
+		 * since the link was put to off during suspend.
+		 */
+		ret = ufshcd_reset_and_restore(hba);
+		/*
+		 * ufshcd_reset_and_restore() should have already
 		 * set the link state as active
 		 */
 		if (ret || !ufshcd_is_link_active(hba))
 			goto vendor_suspend;
+		/* mark link state as hibern8 exited */
+		if (ufshcd_is_hibern8_on_idle_allowed(hba))
+			hba->hibern8_on_idle.state = HIBERN8_EXITED;
 	}
 
 	if (!ufshcd_is_ufs_dev_active(hba)) {
@@ -5169,25 +8913,37 @@
 		ufshcd_urgent_bkops(hba);
 
 	hba->clk_gating.is_suspended = false;
+	hba->hibern8_on_idle.is_suspended = false;
 
-	if (ufshcd_is_clkscaling_enabled(hba))
-		devfreq_resume_device(hba->devfreq);
+	if (hba->clk_scaling.is_allowed)
+		ufshcd_resume_clkscaling(hba);
 
 	/* Schedule clock gating in case of no access to UFS device yet */
-	ufshcd_release(hba);
+	ufshcd_release_all(hba);
 	goto out;
 
 set_old_link_state:
 	ufshcd_link_state_transition(hba, old_link_state, 0);
+	if (ufshcd_is_link_hibern8(hba) &&
+	    ufshcd_is_hibern8_on_idle_allowed(hba))
+		hba->hibern8_on_idle.state = HIBERN8_ENTERED;
 vendor_suspend:
 	ufshcd_vops_suspend(hba, pm_op);
 disable_vreg:
 	ufshcd_vreg_set_lpm(hba);
 disable_irq_and_vops_clks:
 	ufshcd_disable_irq(hba);
-	ufshcd_setup_clocks(hba, false);
+	if (hba->clk_scaling.is_allowed)
+		ufshcd_suspend_clkscaling(hba);
+	ufshcd_disable_clocks(hba, false);
+	if (ufshcd_is_clkgating_allowed(hba))
+		hba->clk_gating.state = CLKS_OFF;
 out:
 	hba->pm_op_in_progress = 0;
+
+	if (ret)
+		ufshcd_update_error_stats(hba, UFS_ERR_RESUME);
+
 	return ret;
 }
 
@@ -5203,20 +8959,18 @@
 int ufshcd_system_suspend(struct ufs_hba *hba)
 {
 	int ret = 0;
+	ktime_t start = ktime_get();
 
 	if (!hba || !hba->is_powered)
 		return 0;
 
-	if (pm_runtime_suspended(hba->dev)) {
-		if (hba->rpm_lvl == hba->spm_lvl)
-			/*
-			 * There is possibility that device may still be in
-			 * active state during the runtime suspend.
-			 */
 			if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
-			    hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
+	     hba->curr_dev_pwr_mode) &&
+	    (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
+	     hba->uic_link_state))
 				goto out;
 
+	if (pm_runtime_suspended(hba->dev)) {
 		/*
 		 * UFS device and/or UFS link low power states during runtime
 		 * suspend seems to be different than what is expected during
@@ -5232,6 +8986,9 @@
 
 	ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
 out:
+	trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
+		ktime_to_us(ktime_sub(ktime_get(), start)),
+		hba->curr_dev_pwr_mode, hba->uic_link_state);
 	if (!ret)
 		hba->is_sys_suspended = true;
 	return ret;
@@ -5247,14 +9004,25 @@
 
 int ufshcd_system_resume(struct ufs_hba *hba)
 {
-	if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev))
+	int ret = 0;
+	ktime_t start = ktime_get();
+
+	if (!hba)
+		return -EINVAL;
+
+	if (!hba->is_powered || pm_runtime_suspended(hba->dev))
 		/*
 		 * Let the runtime resume take care of resuming
 		 * if runtime suspended.
 		 */
-		return 0;
-
-	return ufshcd_resume(hba, UFS_SYSTEM_PM);
+		goto out;
+	else
+		ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
+out:
+	trace_ufshcd_system_resume(dev_name(hba->dev), ret,
+		ktime_to_us(ktime_sub(ktime_get(), start)),
+		hba->curr_dev_pwr_mode, hba->uic_link_state);
+	return ret;
 }
 EXPORT_SYMBOL(ufshcd_system_resume);
 
@@ -5268,10 +9036,23 @@
  */
 int ufshcd_runtime_suspend(struct ufs_hba *hba)
 {
-	if (!hba || !hba->is_powered)
-		return 0;
+	int ret = 0;
+	ktime_t start = ktime_get();
+
+	if (!hba)
+		return -EINVAL;
+
+	if (!hba->is_powered)
+		goto out;
+	else
+		ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
+out:
+	trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
+		ktime_to_us(ktime_sub(ktime_get(), start)),
+		hba->curr_dev_pwr_mode,
+		hba->uic_link_state);
+	return ret;
 
-	return ufshcd_suspend(hba, UFS_RUNTIME_PM);
 }
 EXPORT_SYMBOL(ufshcd_runtime_suspend);
 
@@ -5298,10 +9079,22 @@
  */
 int ufshcd_runtime_resume(struct ufs_hba *hba)
 {
-	if (!hba || !hba->is_powered)
-		return 0;
+	int ret = 0;
+	ktime_t start = ktime_get();
+
+	if (!hba)
+		return -EINVAL;
+
+	if (!hba->is_powered)
+		goto out;
 	else
-		return ufshcd_resume(hba, UFS_RUNTIME_PM);
+		ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
+out:
+	trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
+		ktime_to_us(ktime_sub(ktime_get(), start)),
+		hba->curr_dev_pwr_mode,
+		hba->uic_link_state);
+	return ret;
 }
 EXPORT_SYMBOL(ufshcd_runtime_resume);
 
@@ -5311,6 +9104,157 @@
 }
 EXPORT_SYMBOL(ufshcd_runtime_idle);
 
+static inline ssize_t ufshcd_pm_lvl_store(struct device *dev,
+					   struct device_attribute *attr,
+					   const char *buf, size_t count,
+					   bool rpm)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+	unsigned long flags, value;
+
+	if (kstrtoul(buf, 0, &value))
+		return -EINVAL;
+
+	if (value >= UFS_PM_LVL_MAX)
+		return -EINVAL;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (rpm)
+		hba->rpm_lvl = value;
+	else
+		hba->spm_lvl = value;
+	ufshcd_apply_pm_quirks(hba);
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	return count;
+}
+
+static ssize_t ufshcd_rpm_lvl_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+	int curr_len;
+	u8 lvl;
+
+	curr_len = snprintf(buf, PAGE_SIZE,
+			    "\nCurrent Runtime PM level [%d] => dev_state [%s] link_state [%s]\n",
+			    hba->rpm_lvl,
+			    ufschd_ufs_dev_pwr_mode_to_string(
+				ufs_pm_lvl_states[hba->rpm_lvl].dev_state),
+			    ufschd_uic_link_state_to_string(
+				ufs_pm_lvl_states[hba->rpm_lvl].link_state));
+
+	curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
+			     "\nAll available Runtime PM levels info:\n");
+	for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
+		curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
+				     "\tRuntime PM level [%d] => dev_state [%s] link_state [%s]\n",
+				    lvl,
+				    ufschd_ufs_dev_pwr_mode_to_string(
+					ufs_pm_lvl_states[lvl].dev_state),
+				    ufschd_uic_link_state_to_string(
+					ufs_pm_lvl_states[lvl].link_state));
+
+	return curr_len;
+}
+
+static ssize_t ufshcd_rpm_lvl_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	return ufshcd_pm_lvl_store(dev, attr, buf, count, true);
+}
+
+static void ufshcd_add_rpm_lvl_sysfs_nodes(struct ufs_hba *hba)
+{
+	hba->rpm_lvl_attr.show = ufshcd_rpm_lvl_show;
+	hba->rpm_lvl_attr.store = ufshcd_rpm_lvl_store;
+	sysfs_attr_init(&hba->rpm_lvl_attr.attr);
+	hba->rpm_lvl_attr.attr.name = "rpm_lvl";
+	hba->rpm_lvl_attr.attr.mode = S_IRUGO | S_IWUSR;
+	if (device_create_file(hba->dev, &hba->rpm_lvl_attr))
+		dev_err(hba->dev, "Failed to create sysfs for rpm_lvl\n");
+}
+
+static ssize_t ufshcd_spm_lvl_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+	int curr_len;
+	u8 lvl;
+
+	curr_len = snprintf(buf, PAGE_SIZE,
+			    "\nCurrent System PM level [%d] => dev_state [%s] link_state [%s]\n",
+			    hba->spm_lvl,
+			    ufschd_ufs_dev_pwr_mode_to_string(
+				ufs_pm_lvl_states[hba->spm_lvl].dev_state),
+			    ufschd_uic_link_state_to_string(
+				ufs_pm_lvl_states[hba->spm_lvl].link_state));
+
+	curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
+			     "\nAll available System PM levels info:\n");
+	for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
+		curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
+				     "\tSystem PM level [%d] => dev_state [%s] link_state [%s]\n",
+				    lvl,
+				    ufschd_ufs_dev_pwr_mode_to_string(
+					ufs_pm_lvl_states[lvl].dev_state),
+				    ufschd_uic_link_state_to_string(
+					ufs_pm_lvl_states[lvl].link_state));
+
+	return curr_len;
+}
+
+static ssize_t ufshcd_spm_lvl_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	return ufshcd_pm_lvl_store(dev, attr, buf, count, false);
+}
+
+static void ufshcd_add_spm_lvl_sysfs_nodes(struct ufs_hba *hba)
+{
+	hba->spm_lvl_attr.show = ufshcd_spm_lvl_show;
+	hba->spm_lvl_attr.store = ufshcd_spm_lvl_store;
+	sysfs_attr_init(&hba->spm_lvl_attr.attr);
+	hba->spm_lvl_attr.attr.name = "spm_lvl";
+	hba->spm_lvl_attr.attr.mode = S_IRUGO | S_IWUSR;
+	if (device_create_file(hba->dev, &hba->spm_lvl_attr))
+		dev_err(hba->dev, "Failed to create sysfs for spm_lvl\n");
+}
+
+static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
+{
+	ufshcd_add_rpm_lvl_sysfs_nodes(hba);
+	ufshcd_add_spm_lvl_sysfs_nodes(hba);
+}
+
+static void ufshcd_shutdown_clkscaling(struct ufs_hba *hba)
+{
+	bool suspend = false;
+	unsigned long flags;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (hba->clk_scaling.is_allowed) {
+		hba->clk_scaling.is_allowed = false;
+		suspend = true;
+	}
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	/**
+	 * Scaling may be scheduled before, hence make sure it
+	 * doesn't race with shutdown
+	 */
+	if (ufshcd_is_clkscaling_supported(hba)) {
+		device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
+		cancel_work_sync(&hba->clk_scaling.suspend_work);
+		cancel_work_sync(&hba->clk_scaling.resume_work);
+		if (suspend)
+			ufshcd_suspend_clkscaling(hba);
+	}
+
+	/* Unregister so that devfreq_monitor can't race with shutdown */
+	if (hba->devfreq)
+		devfreq_remove_device(hba->devfreq);
+}
+
 /**
  * ufshcd_shutdown - shutdown routine
  * @hba: per adapter instance
@@ -5326,12 +9270,25 @@
 	if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
 		goto out;
 
-	if (pm_runtime_suspended(hba->dev)) {
-		ret = ufshcd_runtime_resume(hba);
+	pm_runtime_get_sync(hba->dev);
+	ufshcd_hold_all(hba);
+	ufshcd_mark_shutdown_ongoing(hba);
+	ufshcd_shutdown_clkscaling(hba);
+	/**
+	 * (1) Acquire the lock to stop any more requests
+	 * (2) Wait for all issued requests to complete
+	 */
+	ufshcd_get_write_lock(hba);
+	ufshcd_scsi_block_requests(hba);
+	ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
 		if (ret)
-			goto out;
-	}
-
+		dev_err(hba->dev, "%s: waiting for DB clear: failed: %d\n",
+			__func__, ret);
+	/* Requests may have errored out above, let it be handled */
+	flush_work(&hba->eh_work);
+	/* reqs issued from contexts other than shutdown will fail from now */
+	ufshcd_scsi_unblock_requests(hba);
+	ufshcd_release_all(hba);
 	ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
 out:
 	if (ret)
@@ -5341,6 +9298,61 @@
 }
 EXPORT_SYMBOL(ufshcd_shutdown);
 
+/*
+ * Values permitted 0, 1, 2.
+ * 0 -> Disable IO latency histograms (default)
+ * 1 -> Enable IO latency histograms
+ * 2 -> Zero out IO latency histograms
+ */
+static ssize_t
+latency_hist_store(struct device *dev, struct device_attribute *attr,
+		   const char *buf, size_t count)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+	long value;
+
+	if (kstrtol(buf, 0, &value))
+		return -EINVAL;
+	if (value == BLK_IO_LAT_HIST_ZERO) {
+		memset(&hba->io_lat_read, 0, sizeof(hba->io_lat_read));
+		memset(&hba->io_lat_write, 0, sizeof(hba->io_lat_write));
+	} else if (value == BLK_IO_LAT_HIST_ENABLE ||
+		 value == BLK_IO_LAT_HIST_DISABLE)
+		hba->latency_hist_enabled = value;
+	return count;
+}
+
+ssize_t
+latency_hist_show(struct device *dev, struct device_attribute *attr,
+		  char *buf)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+	size_t written_bytes;
+
+	written_bytes = blk_latency_hist_show("Read", &hba->io_lat_read,
+			buf, PAGE_SIZE);
+	written_bytes += blk_latency_hist_show("Write", &hba->io_lat_write,
+			buf + written_bytes, PAGE_SIZE - written_bytes);
+
+	return written_bytes;
+}
+
+static DEVICE_ATTR(latency_hist, S_IRUGO | S_IWUSR,
+		   latency_hist_show, latency_hist_store);
+
+static void
+ufshcd_init_latency_hist(struct ufs_hba *hba)
+{
+	if (device_create_file(hba->dev, &dev_attr_latency_hist))
+		dev_err(hba->dev, "Failed to create latency_hist sysfs entry\n");
+}
+
+static void
+ufshcd_exit_latency_hist(struct ufs_hba *hba)
+{
+	device_create_file(hba->dev, &dev_attr_latency_hist);
+}
+
 /**
  * ufshcd_remove - de-allocate SCSI host and host memory space
  *		data structure memory
@@ -5351,14 +9363,17 @@
 	scsi_remove_host(hba->host);
 	/* disable interrupts */
 	ufshcd_disable_intr(hba, hba->intr_mask);
-	ufshcd_hba_stop(hba);
-
-	scsi_host_put(hba->host);
+	ufshcd_hba_stop(hba, true);
 
 	ufshcd_exit_clk_gating(hba);
-	if (ufshcd_is_clkscaling_enabled(hba))
+	ufshcd_exit_hibern8_on_idle(hba);
+	if (ufshcd_is_clkscaling_supported(hba)) {
+		device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
+		ufshcd_exit_latency_hist(hba);
 		devfreq_remove_device(hba->devfreq);
+	}
 	ufshcd_hba_exit(hba);
+	ufsdbg_remove_debugfs(hba);
 }
 EXPORT_SYMBOL_GPL(ufshcd_remove);
 
@@ -5424,71 +9439,400 @@
 }
 EXPORT_SYMBOL(ufshcd_alloc_host);
 
-static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
+/**
+ * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
+ * @hba: per adapter instance
+ * @scale_up: True if scaling up and false if scaling down
+ *
+ * Returns true if scaling is required, false otherwise.
+ */
+static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
+					       bool scale_up)
 {
-	int ret = 0;
 	struct ufs_clk_info *clki;
 	struct list_head *head = &hba->clk_list_head;
 
 	if (!head || list_empty(head))
-		goto out;
-
-	ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
-	if (ret)
-		return ret;
+		return false;
 
 	list_for_each_entry(clki, head, list) {
 		if (!IS_ERR_OR_NULL(clki->clk)) {
 			if (scale_up && clki->max_freq) {
 				if (clki->curr_freq == clki->max_freq)
 					continue;
-				ret = clk_set_rate(clki->clk, clki->max_freq);
-				if (ret) {
-					dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
-						__func__, clki->name,
-						clki->max_freq, ret);
-					break;
-				}
-				clki->curr_freq = clki->max_freq;
-
+				return true;
 			} else if (!scale_up && clki->min_freq) {
 				if (clki->curr_freq == clki->min_freq)
 					continue;
-				ret = clk_set_rate(clki->clk, clki->min_freq);
-				if (ret) {
-					dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
-						__func__, clki->name,
-						clki->min_freq, ret);
-					break;
+				return true;
 				}
-				clki->curr_freq = clki->min_freq;
 			}
 		}
-		dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
-				clki->name, clk_get_rate(clki->clk));
+
+	return false;
 	}
 
-	ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
+/**
+ * ufshcd_scale_gear - scale up/down UFS gear
+ * @hba: per adapter instance
+ * @scale_up: True for scaling up gear and false for scaling down
+ *
+ * Returns 0 for success,
+ * Returns -EBUSY if scaling can't happen at this time
+ * Returns non-zero for any other errors
+ */
+static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
+{
+	int ret = 0;
+	struct ufs_pa_layer_attr new_pwr_info;
+	u32 scale_down_gear = ufshcd_vops_get_scale_down_gear(hba);
+
+	BUG_ON(!hba->clk_scaling.saved_pwr_info.is_valid);
+
+	if (scale_up) {
+		memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
+		       sizeof(struct ufs_pa_layer_attr));
+		/*
+		 * Some UFS devices may stop responding after switching from
+		 * HS-G1 to HS-G3. Also, it is found that these devices work
+		 * fine if we do 2 steps switch: HS-G1 to HS-G2 followed by
+		 * HS-G2 to HS-G3. If UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH
+		 * quirk is enabled for such devices, this 2 steps gear switch
+		 * workaround will be applied.
+		 */
+		if ((hba->dev_quirks & UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH)
+		    && (hba->pwr_info.gear_tx == UFS_HS_G1)
+		    && (new_pwr_info.gear_tx == UFS_HS_G3)) {
+			/* scale up to G2 first */
+			new_pwr_info.gear_tx = UFS_HS_G2;
+			new_pwr_info.gear_rx = UFS_HS_G2;
+			ret = ufshcd_change_power_mode(hba, &new_pwr_info);
+			if (ret)
+				goto out;
+
+			/* scale up to G3 now */
+			new_pwr_info.gear_tx = UFS_HS_G3;
+			new_pwr_info.gear_rx = UFS_HS_G3;
+			/* now, fall through to set the HS-G3 */
+		}
+		ret = ufshcd_change_power_mode(hba, &new_pwr_info);
+		if (ret)
+			goto out;
+	} else {
+		memcpy(&new_pwr_info, &hba->pwr_info,
+		       sizeof(struct ufs_pa_layer_attr));
+
+		if (hba->pwr_info.gear_tx > scale_down_gear
+		    || hba->pwr_info.gear_rx > scale_down_gear) {
+			/* save the current power mode */
+			memcpy(&hba->clk_scaling.saved_pwr_info.info,
+				&hba->pwr_info,
+				sizeof(struct ufs_pa_layer_attr));
+
+			/* scale down gear */
+			new_pwr_info.gear_tx = scale_down_gear;
+			new_pwr_info.gear_rx = scale_down_gear;
+			if (!(hba->dev_quirks & UFS_DEVICE_NO_FASTAUTO)) {
+				new_pwr_info.pwr_tx = FASTAUTO_MODE;
+				new_pwr_info.pwr_rx = FASTAUTO_MODE;
+			}
+		}
+		ret = ufshcd_change_power_mode(hba, &new_pwr_info);
+	}
+
+out:
+	if (ret)
+		dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d), scale_up = %d",
+			__func__, ret,
+			hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
+			new_pwr_info.gear_tx, new_pwr_info.gear_rx,
+			scale_up);
+
+	return ret;
+}
+
+static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
+{
+	#define DOORBELL_CLR_TOUT_US		(1000 * 1000) /* 1 sec */
+	int ret = 0;
+	/*
+	 * make sure that there are no outstanding requests when
+	 * clock scaling is in progress
+	 */
+	ufshcd_scsi_block_requests(hba);
+	down_write(&hba->lock);
+	if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
+		ret = -EBUSY;
+		up_write(&hba->lock);
+		ufshcd_scsi_unblock_requests(hba);
+	}
+
+	return ret;
+}
+
+static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
+{
+	up_write(&hba->lock);
+	ufshcd_scsi_unblock_requests(hba);
+}
+
+/**
+ * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
+ * @hba: per adapter instance
+ * @scale_up: True for scaling up and false for scalin down
+ *
+ * Returns 0 for success,
+ * Returns -EBUSY if scaling can't happen at this time
+ * Returns non-zero for any other errors
+ */
+static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
+{
+	int ret = 0;
+
+	/* let's not get into low power until clock scaling is completed */
+	hba->ufs_stats.clk_hold.ctx = CLK_SCALE_WORK;
+	ufshcd_hold_all(hba);
+
+	ret = ufshcd_clock_scaling_prepare(hba);
+	if (ret)
+		goto out;
+
+	/* scale down the gear before scaling down clocks */
+	if (!scale_up) {
+		ret = ufshcd_scale_gear(hba, false);
+		if (ret)
+			goto clk_scaling_unprepare;
+	}
+
+	/*
+	 * If auto hibern8 is supported then put the link in
+	 * hibern8 manually, this is to avoid auto hibern8
+	 * racing during clock frequency scaling sequence.
+	 */
+	if (ufshcd_is_auto_hibern8_supported(hba)) {
+		ret = ufshcd_uic_hibern8_enter(hba);
+		if (ret)
+			/* link will be bad state so no need to scale_up_gear */
+			return ret;
+	}
+
+	ret = ufshcd_scale_clks(hba, scale_up);
+	if (ret)
+		goto scale_up_gear;
 
+	if (ufshcd_is_auto_hibern8_supported(hba)) {
+		ret = ufshcd_uic_hibern8_exit(hba);
+		if (ret)
+			/* link will be bad state so no need to scale_up_gear */
+			return ret;
+	}
+
+	/* scale up the gear after scaling up clocks */
+	if (scale_up) {
+		ret = ufshcd_scale_gear(hba, true);
+		if (ret) {
+			ufshcd_scale_clks(hba, false);
+			goto clk_scaling_unprepare;
+		}
+	}
+
+	if (!ret) {
+		hba->clk_scaling.is_scaled_up = scale_up;
+		if (scale_up)
+			hba->clk_gating.delay_ms =
+				hba->clk_gating.delay_ms_perf;
+		else
+			hba->clk_gating.delay_ms =
+				hba->clk_gating.delay_ms_pwr_save;
+	}
+
+	goto clk_scaling_unprepare;
+
+scale_up_gear:
+	if (!scale_up)
+		ufshcd_scale_gear(hba, true);
+clk_scaling_unprepare:
+	ufshcd_clock_scaling_unprepare(hba);
 out:
+	hba->ufs_stats.clk_rel.ctx = CLK_SCALE_WORK;
+	ufshcd_release_all(hba);
 	return ret;
 }
 
+static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
+{
+	unsigned long flags;
+
+	devfreq_suspend_device(hba->devfreq);
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	hba->clk_scaling.window_start_t = 0;
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
+{
+	unsigned long flags;
+	bool suspend = false;
+
+	if (!ufshcd_is_clkscaling_supported(hba))
+		return;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (!hba->clk_scaling.is_suspended) {
+		suspend = true;
+		hba->clk_scaling.is_suspended = true;
+	}
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	if (suspend)
+		__ufshcd_suspend_clkscaling(hba);
+}
+
+static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
+{
+	unsigned long flags;
+	bool resume = false;
+
+	if (!ufshcd_is_clkscaling_supported(hba))
+		return;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (hba->clk_scaling.is_suspended) {
+		resume = true;
+		hba->clk_scaling.is_suspended = false;
+	}
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	if (resume)
+		devfreq_resume_device(hba->devfreq);
+}
+
+static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
+}
+
+static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+	u32 value;
+	int err;
+
+	if (kstrtou32(buf, 0, &value))
+		return -EINVAL;
+
+	value = !!value;
+	if (value == hba->clk_scaling.is_allowed)
+		goto out;
+
+	pm_runtime_get_sync(hba->dev);
+	ufshcd_hold(hba, false);
+
+	cancel_work_sync(&hba->clk_scaling.suspend_work);
+	cancel_work_sync(&hba->clk_scaling.resume_work);
+
+	hba->clk_scaling.is_allowed = value;
+
+	if (value) {
+		ufshcd_resume_clkscaling(hba);
+	} else {
+		ufshcd_suspend_clkscaling(hba);
+		err = ufshcd_devfreq_scale(hba, true);
+		if (err)
+			dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
+					__func__, err);
+	}
+
+	ufshcd_release(hba, false);
+	pm_runtime_put_sync(hba->dev);
+out:
+	return count;
+}
+
+static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
+{
+	struct ufs_hba *hba = container_of(work, struct ufs_hba,
+					   clk_scaling.suspend_work);
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(hba->host->host_lock, irq_flags);
+	if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
+		spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+		return;
+	}
+	hba->clk_scaling.is_suspended = true;
+	spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
+	__ufshcd_suspend_clkscaling(hba);
+}
+
+static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
+{
+	struct ufs_hba *hba = container_of(work, struct ufs_hba,
+					   clk_scaling.resume_work);
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(hba->host->host_lock, irq_flags);
+	if (!hba->clk_scaling.is_suspended) {
+		spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+		return;
+	}
+	hba->clk_scaling.is_suspended = false;
+	spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
+	devfreq_resume_device(hba->devfreq);
+}
+
 static int ufshcd_devfreq_target(struct device *dev,
 				unsigned long *freq, u32 flags)
 {
-	int err = 0;
+	int ret = 0;
 	struct ufs_hba *hba = dev_get_drvdata(dev);
+	unsigned long irq_flags;
+	ktime_t start;
+	bool scale_up, sched_clk_scaling_suspend_work = false;
 
-	if (!ufshcd_is_clkscaling_enabled(hba))
+	if (!ufshcd_is_clkscaling_supported(hba))
 		return -EINVAL;
 
-	if (*freq == UINT_MAX)
-		err = ufshcd_scale_clks(hba, true);
-	else if (*freq == 0)
-		err = ufshcd_scale_clks(hba, false);
+	if ((*freq > 0) && (*freq < UINT_MAX)) {
+		dev_err(hba->dev, "%s: invalid freq = %lu\n", __func__, *freq);
+		return -EINVAL;
+	}
 
-	return err;
+	spin_lock_irqsave(hba->host->host_lock, irq_flags);
+	if (ufshcd_eh_in_progress(hba)) {
+		spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+		return 0;
+	}
+
+	if (!hba->clk_scaling.active_reqs)
+		sched_clk_scaling_suspend_work = true;
+
+	scale_up = (*freq == UINT_MAX) ? true : false;
+	if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
+		spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+		ret = 0;
+		goto out; /* no state change required */
+	}
+	spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
+	start = ktime_get();
+	ret = ufshcd_devfreq_scale(hba, scale_up);
+	trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
+		(scale_up ? "up" : "down"),
+		ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+
+out:
+	if (sched_clk_scaling_suspend_work)
+		queue_work(hba->clk_scaling.workq,
+			   &hba->clk_scaling.suspend_work);
+
+	return ret;
 }
 
 static int ufshcd_devfreq_get_dev_status(struct device *dev,
@@ -5498,7 +9842,7 @@
 	struct ufs_clk_scaling *scaling = &hba->clk_scaling;
 	unsigned long flags;
 
-	if (!ufshcd_is_clkscaling_enabled(hba))
+	if (!ufshcd_is_clkscaling_supported(hba))
 		return -EINVAL;
 
 	memset(stat, 0, sizeof(*stat));
@@ -5529,12 +9873,31 @@
 	return 0;
 }
 
-static struct devfreq_dev_profile ufs_devfreq_profile = {
-	.polling_ms	= 100,
-	.target		= ufshcd_devfreq_target,
-	.get_dev_status	= ufshcd_devfreq_get_dev_status,
-};
+static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
+{
+	hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
+	hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
+	sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
+	hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
+	hba->clk_scaling.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
+	if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
+		dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
+}
+
+static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
+{
+	struct device *dev = hba->dev;
+	int ret;
 
+	ret = of_property_read_u32(dev->of_node, "lanes-per-direction",
+		&hba->lanes_per_direction);
+	if (ret) {
+		dev_dbg(hba->dev,
+			"%s: failed to read lanes-per-direction, ret=%d\n",
+			__func__, ret);
+		hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION;
+	}
+}
 /**
  * ufshcd_init - Driver initialization routine
  * @hba: per-adapter instance
@@ -5558,6 +9921,8 @@
 	hba->mmio_base = mmio_base;
 	hba->irq = irq;
 
+	ufshcd_init_lanes_per_dir(hba);
+
 	err = ufshcd_hba_init(hba);
 	if (err)
 		goto out_error;
@@ -5568,9 +9933,20 @@
 	/* Get UFS version supported by the controller */
 	hba->ufs_version = ufshcd_get_ufs_version(hba);
 
+	/* print error message if ufs_version is not valid */
+	if ((hba->ufs_version != UFSHCI_VERSION_10) &&
+	    (hba->ufs_version != UFSHCI_VERSION_11) &&
+	    (hba->ufs_version != UFSHCI_VERSION_20) &&
+	    (hba->ufs_version != UFSHCI_VERSION_21))
+		dev_err(hba->dev, "invalid UFS version 0x%x\n",
+			hba->ufs_version);
+
 	/* Get Interrupt bit mask per version */
 	hba->intr_mask = ufshcd_get_intr_mask(hba);
 
+	/* Enable debug prints */
+	hba->ufshcd_dbg_print = DEFAULT_UFSHCD_DBG_PRINT_EN;
+
 	err = ufshcd_set_dma_mask(hba);
 	if (err) {
 		dev_err(hba->dev, "set dma mask failed\n");
@@ -5594,6 +9970,7 @@
 	host->max_channel = UFSHCD_MAX_CHANNEL;
 	host->unique_id = host->host_no;
 	host->max_cmd_len = MAX_CDB_SIZE;
+	host->set_dbd_for_caching = 1;
 
 	hba->max_pwr_info.is_valid = false;
 
@@ -5604,6 +9981,7 @@
 	/* Initialize work queues */
 	INIT_WORK(&hba->eh_work, ufshcd_err_handler);
 	INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
+	INIT_WORK(&hba->rls_work, ufshcd_rls_handler);
 
 	/* Initialize UIC command mutex */
 	mutex_init(&hba->uic_cmd_mutex);
@@ -5611,10 +9989,28 @@
 	/* Initialize mutex for device management commands */
 	mutex_init(&hba->dev_cmd.lock);
 
+	init_rwsem(&hba->lock);
+
 	/* Initialize device management tag acquire wait queue */
 	init_waitqueue_head(&hba->dev_cmd.tag_wq);
 
 	ufshcd_init_clk_gating(hba);
+	ufshcd_init_hibern8_on_idle(hba);
+
+	/*
+	 * In order to avoid any spurious interrupt immediately after
+	 * registering UFS controller interrupt handler, clear any pending UFS
+	 * interrupt status and disable all the UFS interrupts.
+	 */
+	ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
+		      REG_INTERRUPT_STATUS);
+	ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
+	/*
+	 * Make sure that UFS interrupts are disabled and any pending interrupt
+	 * status is cleared before registering UFS interrupt handler.
+	 */
+	mb();
+
 	/* IRQ registration */
 	err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
 	if (err) {
@@ -5630,46 +10026,84 @@
 		goto exit_gating;
 	}
 
+	/* Reset controller to power on reset (POR) state */
+	ufshcd_vops_full_reset(hba);
+
+	/* reset connected UFS device */
+	err = ufshcd_reset_device(hba);
+	if (err)
+		dev_warn(hba->dev, "%s: device reset failed. err %d\n",
+			 __func__, err);
+
 	/* Host controller enable */
 	err = ufshcd_hba_enable(hba);
 	if (err) {
 		dev_err(hba->dev, "Host controller enable failed\n");
+		ufshcd_print_host_regs(hba);
+		ufshcd_print_host_state(hba);
 		goto out_remove_scsi_host;
 	}
 
-	if (ufshcd_is_clkscaling_enabled(hba)) {
-		hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
-						   "simple_ondemand", NULL);
-		if (IS_ERR(hba->devfreq)) {
-			dev_err(hba->dev, "Unable to register with devfreq %ld\n",
-					PTR_ERR(hba->devfreq));
-			goto out_remove_scsi_host;
-		}
-		/* Suspend devfreq until the UFS device is detected */
-		devfreq_suspend_device(hba->devfreq);
-		hba->clk_scaling.window_start_t = 0;
+	if (ufshcd_is_clkscaling_supported(hba)) {
+		char wq_name[sizeof("ufs_clkscaling_00")];
+
+		INIT_WORK(&hba->clk_scaling.suspend_work,
+			  ufshcd_clk_scaling_suspend_work);
+		INIT_WORK(&hba->clk_scaling.resume_work,
+			  ufshcd_clk_scaling_resume_work);
+
+		snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clkscaling_%d",
+			 host->host_no);
+		hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
+
+		ufshcd_clkscaling_init_sysfs(hba);
 	}
 
+	/*
+	 * If rpm_lvl and and spm_lvl are not already set to valid levels,
+	 * set the default power management level for UFS runtime and system
+	 * suspend. Default power saving mode selected is keeping UFS link in
+	 * Hibern8 state and UFS device in sleep.
+	 */
+	if (!ufshcd_is_valid_pm_lvl(hba->rpm_lvl))
+		hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+							UFS_SLEEP_PWR_MODE,
+							UIC_LINK_HIBERN8_STATE);
+	if (!ufshcd_is_valid_pm_lvl(hba->spm_lvl))
+		hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+							UFS_SLEEP_PWR_MODE,
+							UIC_LINK_HIBERN8_STATE);
+
 	/* Hold auto suspend until async scan completes */
 	pm_runtime_get_sync(dev);
 
+	ufshcd_init_latency_hist(hba);
+
 	/*
-	 * The device-initialize-sequence hasn't been invoked yet.
-	 * Set the device to power-off state
+	 * We are assuming that device wasn't put in sleep/power-down
+	 * state exclusively during the boot stage before kernel.
+	 * This assumption helps avoid doing link startup twice during
+	 * ufshcd_probe_hba().
 	 */
-	ufshcd_set_ufs_dev_poweroff(hba);
+	ufshcd_set_ufs_dev_active(hba);
+
+	ufshcd_cmd_log_init(hba);
 
 	async_schedule(ufshcd_async_scan, hba);
 
+	ufsdbg_add_debugfs(hba);
+
+	ufshcd_add_sysfs_nodes(hba);
+
 	return 0;
 
 out_remove_scsi_host:
 	scsi_remove_host(hba->host);
 exit_gating:
 	ufshcd_exit_clk_gating(hba);
+	ufshcd_exit_latency_hist(hba);
 out_disable:
 	hba->is_irq_enabled = false;
-	scsi_host_put(host);
 	ufshcd_hba_exit(hba);
 out_error:
 	return err;
diff -ruw linux-4.4.115/drivers/scsi/ufs/ufshcd.h linux-4.4.115-fbx/drivers/scsi/ufs/ufshcd.h
--- linux-4.4.115/drivers/scsi/ufs/ufshcd.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/scsi/ufs/ufshcd.h	2019-01-22 16:16:26.635274769 +0100
@@ -3,6 +3,7 @@
  *
  * This code is based on drivers/scsi/ufs/ufshcd.h
  * Copyright (C) 2011-2013 Samsung India Software Operations
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * Authors:
  *	Santosh Yaraganavi <santosh.sy@samsung.com>
@@ -38,12 +39,14 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
+#include <linux/hrtimer.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
+#include <linux/rwsem.h>
 #include <linux/workqueue.h>
 #include <linux/errno.h>
 #include <linux/types.h>
@@ -53,6 +56,8 @@
 #include <linux/clk.h>
 #include <linux/completion.h>
 #include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include "unipro.h"
 
 #include <asm/irq.h>
 #include <asm/byteorder.h>
@@ -63,11 +68,15 @@
 #include <scsi/scsi_dbg.h>
 #include <scsi/scsi_eh.h>
 
+#include <linux/fault-inject.h>
 #include "ufs.h"
 #include "ufshci.h"
 
 #define UFSHCD "ufshcd"
-#define UFSHCD_DRIVER_VERSION "0.2"
+#define UFSHCD_DRIVER_VERSION "0.3"
+
+#define UFS_BIT(x)	BIT(x)
+#define UFS_MASK(x, y)	(x << ((y) % BITS_PER_LONG))
 
 struct ufs_hba;
 
@@ -125,6 +134,26 @@
 #define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
 				    UIC_LINK_HIBERN8_STATE)
 
+enum {
+	/* errors which require the host controller reset for recovery */
+	UFS_ERR_HIBERN8_EXIT,
+	UFS_ERR_VOPS_SUSPEND,
+	UFS_ERR_EH,
+	UFS_ERR_CLEAR_PEND_XFER_TM,
+	UFS_ERR_INT_FATAL_ERRORS,
+	UFS_ERR_INT_UIC_ERROR,
+	UFS_ERR_CRYPTO_ENGINE,
+
+	/* other errors */
+	UFS_ERR_HIBERN8_ENTER,
+	UFS_ERR_RESUME,
+	UFS_ERR_SUSPEND,
+	UFS_ERR_LINKSTARTUP,
+	UFS_ERR_POWER_MODE_CHANGE,
+	UFS_ERR_TASK_ABORT,
+	UFS_ERR_MAX,
+};
+
 /*
  * UFS Power management levels.
  * Each level is in increasing order of power savings.
@@ -150,6 +179,10 @@
  * @ucd_req_ptr: UCD address of the command
  * @ucd_rsp_ptr: Response UPIU address for this command
  * @ucd_prdt_ptr: PRDT address of the command
+ * @utrd_dma_addr: UTRD dma address for debug
+ * @ucd_prdt_dma_addr: PRDT dma address for debug
+ * @ucd_rsp_dma_addr: UPIU response dma address for debug
+ * @ucd_req_dma_addr: UPIU request dma address for debug
  * @cmd: pointer to SCSI command
  * @sense_buffer: pointer to sense buffer address of the SCSI command
  * @sense_bufflen: Length of the sense buffer
@@ -158,6 +191,9 @@
  * @task_tag: Task tag of the command
  * @lun: LUN of the command
  * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation)
+ * @issue_time_stamp: time stamp for debug purposes
+ * @complete_time_stamp: time stamp for statistics
+ * @req_abort_skip: skip request abort task flag
  */
 struct ufshcd_lrb {
 	struct utp_transfer_req_desc *utr_descriptor_ptr;
@@ -165,6 +201,11 @@
 	struct utp_upiu_rsp *ucd_rsp_ptr;
 	struct ufshcd_sg_entry *ucd_prdt_ptr;
 
+	dma_addr_t utrd_dma_addr;
+	dma_addr_t ucd_req_dma_addr;
+	dma_addr_t ucd_rsp_dma_addr;
+	dma_addr_t ucd_prdt_dma_addr;
+
 	struct scsi_cmnd *cmd;
 	u8 *sense_buffer;
 	unsigned int sense_bufflen;
@@ -174,6 +215,10 @@
 	int task_tag;
 	u8 lun; /* UPIU LUN id field is only 8-bit wide */
 	bool intr_cmd;
+	ktime_t issue_time_stamp;
+	ktime_t complete_time_stamp;
+
+	bool req_abort_skip;
 };
 
 /**
@@ -245,7 +290,6 @@
 
 /**
  * struct ufs_hba_variant_ops - variant specific callbacks
- * @name: variant name
  * @init: called when the driver is initialized
  * @exit: called to cleanup everything done in init
  * @get_ufs_hci_version: called to get UFS HCI version
@@ -261,16 +305,23 @@
  *			to be set.
  * @suspend: called during host controller PM callback
  * @resume: called during host controller PM callback
+ * @full_reset:  called during link recovery for handling variant specific
+ *		 implementations of resetting the hci
  * @dbg_register_dump: used to dump controller debug information
+ * @update_sec_cfg: called to restore host controller secure configuration
+ * @get_scale_down_gear: called to get the minimum supported gear to
+ *			 scale down
+ * @set_bus_vote: called to vote for the required bus bandwidth
+ * @add_debugfs: used to add debugfs entries
+ * @remove_debugfs: used to remove debugfs entries
  */
 struct ufs_hba_variant_ops {
-	const char *name;
 	int	(*init)(struct ufs_hba *);
 	void    (*exit)(struct ufs_hba *);
 	u32	(*get_ufs_hci_version)(struct ufs_hba *);
 	int	(*clk_scale_notify)(struct ufs_hba *, bool,
 				    enum ufs_notify_change_status);
-	int	(*setup_clocks)(struct ufs_hba *, bool);
+	int	(*setup_clocks)(struct ufs_hba *, bool, bool);
 	int     (*setup_regulators)(struct ufs_hba *, bool);
 	int	(*hce_enable_notify)(struct ufs_hba *,
 				     enum ufs_notify_change_status);
@@ -280,9 +331,60 @@
 					enum ufs_notify_change_status status,
 					struct ufs_pa_layer_attr *,
 					struct ufs_pa_layer_attr *);
+	int	(*apply_dev_quirks)(struct ufs_hba *);
 	int     (*suspend)(struct ufs_hba *, enum ufs_pm_op);
 	int     (*resume)(struct ufs_hba *, enum ufs_pm_op);
-	void	(*dbg_register_dump)(struct ufs_hba *hba);
+	int	(*full_reset)(struct ufs_hba *);
+	void	(*dbg_register_dump)(struct ufs_hba *hba, bool no_sleep);
+	int	(*update_sec_cfg)(struct ufs_hba *hba, bool restore_sec_cfg);
+	u32	(*get_scale_down_gear)(struct ufs_hba *);
+	int	(*set_bus_vote)(struct ufs_hba *, bool);
+#ifdef CONFIG_DEBUG_FS
+	void	(*add_debugfs)(struct ufs_hba *hba, struct dentry *root);
+	void	(*remove_debugfs)(struct ufs_hba *hba);
+#endif
+};
+
+/**
+ * struct ufs_hba_crypto_variant_ops - variant specific crypto callbacks
+ * @crypto_req_setup:	retreieve the necessary cryptographic arguments to setup
+			a requests's transfer descriptor.
+ * @crypto_engine_cfg_start: start configuring cryptographic engine
+ *							 according to tag
+ *							 parameter
+ * @crypto_engine_cfg_end: end configuring cryptographic engine
+ *						   according to tag parameter
+ * @crypto_engine_reset: perform reset to the cryptographic engine
+ * @crypto_engine_get_status: get errors status of the cryptographic engine
+ */
+struct ufs_hba_crypto_variant_ops {
+	int	(*crypto_req_setup)(struct ufs_hba *, struct ufshcd_lrb *lrbp,
+				    u8 *cc_index, bool *enable, u64 *dun);
+	int	(*crypto_engine_cfg_start)(struct ufs_hba *, unsigned int);
+	int	(*crypto_engine_cfg_end)(struct ufs_hba *, struct ufshcd_lrb *,
+			struct request *);
+	int	(*crypto_engine_reset)(struct ufs_hba *);
+	int	(*crypto_engine_get_status)(struct ufs_hba *, u32 *);
+};
+
+/**
+* struct ufs_hba_pm_qos_variant_ops - variant specific PM QoS callbacks
+*/
+struct ufs_hba_pm_qos_variant_ops {
+	void		(*req_start)(struct ufs_hba *, struct request *);
+	void		(*req_end)(struct ufs_hba *, struct request *, bool);
+};
+
+/**
+ * struct ufs_hba_variant - variant specific parameters
+ * @name: variant name
+ */
+struct ufs_hba_variant {
+	struct device				*dev;
+	const char				*name;
+	struct ufs_hba_variant_ops		*vops;
+	struct ufs_hba_crypto_variant_ops	*crypto_vops;
+	struct ufs_hba_pm_qos_variant_ops	*pm_qos_vops;
 };
 
 /* clock gating state  */
@@ -295,33 +397,117 @@
 
 /**
  * struct ufs_clk_gating - UFS clock gating related info
- * @gate_work: worker to turn off clocks after some delay as specified in
- * delay_ms
+ * @gate_hrtimer: hrtimer to invoke @gate_work after some delay as
+ * specified in @delay_ms
+ * @gate_work: worker to turn off clocks
  * @ungate_work: worker to turn on clocks that will be used in case of
  * interrupt context
  * @state: the current clocks state
- * @delay_ms: gating delay in ms
+ * @delay_ms: current gating delay in ms
+ * @delay_ms_pwr_save: gating delay (in ms) in power save mode
+ * @delay_ms_perf: gating delay (in ms) in performance mode
  * @is_suspended: clk gating is suspended when set to 1 which can be used
  * during suspend/resume
- * @delay_attr: sysfs attribute to control delay_attr
+ * @delay_attr: sysfs attribute to control delay_ms if clock scaling is disabled
+ * @delay_pwr_save_attr: sysfs attribute to control delay_ms_pwr_save
+ * @delay_perf_attr: sysfs attribute to control delay_ms_perf
+ * @enable_attr: sysfs attribute to enable/disable clock gating
+ * @is_enabled: Indicates the current status of clock gating
  * @active_reqs: number of requests that are pending and should be waited for
  * completion before gating clocks.
  */
 struct ufs_clk_gating {
-	struct delayed_work gate_work;
+	struct hrtimer gate_hrtimer;
+	struct work_struct gate_work;
 	struct work_struct ungate_work;
 	enum clk_gating_state state;
 	unsigned long delay_ms;
+	unsigned long delay_ms_pwr_save;
+	unsigned long delay_ms_perf;
 	bool is_suspended;
 	struct device_attribute delay_attr;
+	struct device_attribute delay_pwr_save_attr;
+	struct device_attribute delay_perf_attr;
+	struct device_attribute enable_attr;
+	bool is_enabled;
+	int active_reqs;
+	struct workqueue_struct *clk_gating_workq;
+};
+
+/* Hibern8 state  */
+enum ufshcd_hibern8_on_idle_state {
+	HIBERN8_ENTERED,
+	HIBERN8_EXITED,
+	REQ_HIBERN8_ENTER,
+	REQ_HIBERN8_EXIT,
+	AUTO_HIBERN8,
+};
+
+/**
+ * struct ufs_hibern8_on_idle - UFS Hibern8 on idle related data
+ * @enter_work: worker to put UFS link in hibern8 after some delay as
+ * specified in delay_ms
+ * @exit_work: worker to bring UFS link out of hibern8
+ * @state: the current hibern8 state
+ * @delay_ms: hibern8 enter delay in ms
+ * @is_suspended: hibern8 enter is suspended when set to 1 which can be used
+ * during suspend/resume
+ * @active_reqs: number of requests that are pending and should be waited for
+ * completion before scheduling delayed "enter_work".
+ * @delay_attr: sysfs attribute to control delay_attr
+ * @enable_attr: sysfs attribute to enable/disable hibern8 on idle
+ * @is_enabled: Indicates the current status of hibern8
+ */
+struct ufs_hibern8_on_idle {
+	struct delayed_work enter_work;
+	struct work_struct exit_work;
+	enum ufshcd_hibern8_on_idle_state state;
+	unsigned long delay_ms;
+	bool is_suspended;
 	int active_reqs;
+	struct device_attribute delay_attr;
+	struct device_attribute enable_attr;
+	bool is_enabled;
+};
+
+struct ufs_saved_pwr_info {
+	struct ufs_pa_layer_attr info;
+	bool is_valid;
 };
 
+/**
+ * struct ufs_clk_scaling - UFS clock scaling related data
+ * @active_reqs: number of requests that are pending. If this is zero when
+ * devfreq ->target() function is called then schedule "suspend_work" to
+ * suspend devfreq.
+ * @tot_busy_t: Total busy time in current polling window
+ * @window_start_t: Start time (in jiffies) of the current polling window
+ * @busy_start_t: Start time of current busy period
+ * @enable_attr: sysfs attribute to enable/disable clock scaling
+ * @saved_pwr_info: UFS power mode may also be changed during scaling and this
+ * one keeps track of previous power mode.
+ * @workq: workqueue to schedule devfreq suspend/resume work
+ * @suspend_work: worker to suspend devfreq
+ * @resume_work: worker to resume devfreq
+ * @is_allowed: tracks if scaling is currently allowed or not
+ * @is_busy_started: tracks if busy period has started or not
+ * @is_suspended: tracks if devfreq is suspended or not
+ * @is_scaled_up: tracks if we are currently scaled up or scaled down
+ */
 struct ufs_clk_scaling {
-	ktime_t  busy_start_t;
-	bool is_busy_started;
+	int active_reqs;
 	unsigned long  tot_busy_t;
 	unsigned long window_start_t;
+	ktime_t busy_start_t;
+	struct device_attribute enable_attr;
+	struct ufs_saved_pwr_info saved_pwr_info;
+	struct workqueue_struct *workq;
+	struct work_struct suspend_work;
+	struct work_struct resume_work;
+	bool is_allowed;
+	bool is_busy_started;
+	bool is_suspended;
+	bool is_scaled_up;
 };
 
 /**
@@ -333,6 +519,170 @@
 	u32 icc_level;
 };
 
+#define UIC_ERR_REG_HIST_LENGTH 20
+/**
+ * struct ufs_uic_err_reg_hist - keeps history of uic errors
+ * @pos: index to indicate cyclic buffer position
+ * @reg: cyclic buffer for registers value
+ * @tstamp: cyclic buffer for time stamp
+ */
+struct ufs_uic_err_reg_hist {
+	int pos;
+	u32 reg[UIC_ERR_REG_HIST_LENGTH];
+	ktime_t tstamp[UIC_ERR_REG_HIST_LENGTH];
+};
+
+#ifdef CONFIG_DEBUG_FS
+struct debugfs_files {
+	struct dentry *debugfs_root;
+	struct dentry *stats_folder;
+	struct dentry *tag_stats;
+	struct dentry *err_stats;
+	struct dentry *show_hba;
+	struct dentry *host_regs;
+	struct dentry *dump_dev_desc;
+	struct dentry *power_mode;
+	struct dentry *dme_local_read;
+	struct dentry *dme_peer_read;
+	struct dentry *dbg_print_en;
+	struct dentry *req_stats;
+	struct dentry *query_stats;
+	u32 dme_local_attr_id;
+	u32 dme_peer_attr_id;
+	struct dentry *reset_controller;
+	struct dentry *err_state;
+	bool err_occurred;
+#ifdef CONFIG_UFS_FAULT_INJECTION
+	struct dentry *err_inj_scenario;
+	struct dentry *err_inj_stats;
+	u32 err_inj_scenario_mask;
+	struct fault_attr fail_attr;
+#endif
+	bool is_sys_suspended;
+};
+
+/* tag stats statistics types */
+enum ts_types {
+	TS_NOT_SUPPORTED	= -1,
+	TS_TAG			= 0,
+	TS_READ			= 1,
+	TS_WRITE		= 2,
+	TS_URGENT_READ		= 3,
+	TS_URGENT_WRITE		= 4,
+	TS_FLUSH		= 5,
+	TS_NUM_STATS		= 6,
+};
+
+/**
+ * struct ufshcd_req_stat - statistics for request handling times (in usec)
+ * @min: shortest time measured
+ * @max: longest time measured
+ * @sum: sum of all the handling times measured (used for average calculation)
+ * @count: number of measurements taken
+ */
+struct ufshcd_req_stat {
+	u64 min;
+	u64 max;
+	u64 sum;
+	u64 count;
+};
+#endif
+
+enum ufshcd_ctx {
+	QUEUE_CMD,
+	ERR_HNDLR_WORK,
+	H8_EXIT_WORK,
+	UIC_CMD_SEND,
+	PWRCTL_CMD_SEND,
+	TM_CMD_SEND,
+	XFR_REQ_COMPL,
+	CLK_SCALE_WORK,
+};
+
+struct ufshcd_clk_ctx {
+	ktime_t ts;
+	enum ufshcd_ctx ctx;
+};
+
+/**
+ * struct ufs_stats - keeps usage/err statistics
+ * @enabled: enable tag stats for debugfs
+ * @tag_stats: pointer to tag statistic counters
+ * @q_depth: current amount of busy slots
+ * @err_stats: counters to keep track of various errors
+ * @req_stats: request handling time statistics per request type
+ * @query_stats_arr: array that holds query statistics
+ * @hibern8_exit_cnt: Counter to keep track of number of exits,
+ *		reset this after link-startup.
+ * @last_hibern8_exit_tstamp: Set time after the hibern8 exit.
+ *		Clear after the first successful command completion.
+ * @pa_err: tracks pa-uic errors
+ * @dl_err: tracks dl-uic errors
+ * @nl_err: tracks nl-uic errors
+ * @tl_err: tracks tl-uic errors
+ * @dme_err: tracks dme errors
+ */
+struct ufs_stats {
+#ifdef CONFIG_DEBUG_FS
+	bool enabled;
+	u64 **tag_stats;
+	int q_depth;
+	int err_stats[UFS_ERR_MAX];
+	struct ufshcd_req_stat req_stats[TS_NUM_STATS];
+	int query_stats_arr[UPIU_QUERY_OPCODE_MAX][MAX_QUERY_IDN];
+
+#endif
+	u32 last_intr_status;
+	ktime_t last_intr_ts;
+	struct ufshcd_clk_ctx clk_hold;
+	struct ufshcd_clk_ctx clk_rel;
+	u32 hibern8_exit_cnt;
+	ktime_t last_hibern8_exit_tstamp;
+	u32 power_mode_change_cnt;
+	struct ufs_uic_err_reg_hist pa_err;
+	struct ufs_uic_err_reg_hist dl_err;
+	struct ufs_uic_err_reg_hist nl_err;
+	struct ufs_uic_err_reg_hist tl_err;
+	struct ufs_uic_err_reg_hist dme_err;
+};
+
+/* UFS Host Controller debug print bitmask */
+#define UFSHCD_DBG_PRINT_CLK_FREQ_EN		UFS_BIT(0)
+#define UFSHCD_DBG_PRINT_UIC_ERR_HIST_EN	UFS_BIT(1)
+#define UFSHCD_DBG_PRINT_HOST_REGS_EN		UFS_BIT(2)
+#define UFSHCD_DBG_PRINT_TRS_EN			UFS_BIT(3)
+#define UFSHCD_DBG_PRINT_TMRS_EN		UFS_BIT(4)
+#define UFSHCD_DBG_PRINT_PWR_EN			UFS_BIT(5)
+#define UFSHCD_DBG_PRINT_HOST_STATE_EN		UFS_BIT(6)
+
+#define UFSHCD_DBG_PRINT_ALL						   \
+		(UFSHCD_DBG_PRINT_CLK_FREQ_EN		|		   \
+		 UFSHCD_DBG_PRINT_UIC_ERR_HIST_EN	|		   \
+		 UFSHCD_DBG_PRINT_HOST_REGS_EN | UFSHCD_DBG_PRINT_TRS_EN | \
+		 UFSHCD_DBG_PRINT_TMRS_EN | UFSHCD_DBG_PRINT_PWR_EN |	   \
+		 UFSHCD_DBG_PRINT_HOST_STATE_EN)
+
+struct ufshcd_cmd_log_entry {
+	char *str;	/* context like "send", "complete" */
+	char *cmd_type;	/* "scsi", "query", "nop", "dme" */
+	u8 lun;
+	u8 cmd_id;
+	sector_t lba;
+	int transfer_len;
+	u8 idn;		/* used only for query idn */
+	u32 doorbell;
+	u32 outstanding_reqs;
+	u32 seq_num;
+	unsigned int tag;
+	ktime_t tstamp;
+};
+
+struct ufshcd_cmd_log {
+	struct ufshcd_cmd_log_entry *entries;
+	int pos;
+	u32 seq_num;
+};
+
 /**
  * struct ufs_hba - per adapter private structure
  * @mmio_base: UFSHCI base register address
@@ -352,7 +702,7 @@
  * @nutrs: Transfer Request Queue depth supported by controller
  * @nutmrs: Task Management Queue depth supported by controller
  * @ufs_version: UFS Version to which controller complies
- * @vops: pointer to variant specific operations
+ * @var: pointer to variant specific data
  * @priv: pointer to variant specific private data
  * @irq: Irq number of the controller
  * @active_uic_cmd: handle of active UIC command
@@ -378,10 +728,18 @@
  * @dev_cmd: ufs device management command information
  * @last_dme_cmd_tstamp: time stamp of the last completed DME command
  * @auto_bkops_enabled: to track whether bkops is enabled in device
+ * @ufs_stats: ufshcd statistics to be used via debugfs
+ * @debugfs_files: debugfs files associated with the ufs stats
+ * @ufshcd_dbg_print: Bitmask for enabling debug prints
  * @vreg_info: UFS device voltage regulator information
  * @clk_list_head: UFS host controller clocks list node head
  * @pwr_info: holds current power mode
  * @max_pwr_info: keeps the device max valid pwm
+ * @hibern8_on_idle: UFS Hibern8 on idle related data
+ * @urgent_bkops_lvl: keeps track of urgent bkops level for device
+ * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
+ *  device is known or not.
+ * @scsi_block_reqs_cnt: reference counting for scsi block requests
  */
 struct ufs_hba {
 	void __iomem *mmio_base;
@@ -407,9 +765,11 @@
 	enum ufs_dev_pwr_mode curr_dev_pwr_mode;
 	enum uic_link_state uic_link_state;
 	/* Desired UFS power management level during runtime PM */
-	enum ufs_pm_level rpm_lvl;
+	int rpm_lvl;
 	/* Desired UFS power management level during system PM */
-	enum ufs_pm_level spm_lvl;
+	int spm_lvl;
+	struct device_attribute rpm_lvl_attr;
+	struct device_attribute spm_lvl_attr;
 	int pm_op_in_progress;
 
 	struct ufshcd_lrb *lrb;
@@ -422,7 +782,7 @@
 	int nutrs;
 	int nutmrs;
 	u32 ufs_version;
-	struct ufs_hba_variant_ops *vops;
+	struct ufs_hba_variant *var;
 	void *priv;
 	unsigned int irq;
 	bool is_irq_enabled;
@@ -467,8 +827,14 @@
 	 */
 	#define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION		UFS_BIT(5)
 
+	/* Auto hibern8 support is broken */
+	#define UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8		UFS_BIT(6)
+
 	unsigned int quirks;	/* Deviations from standard UFSHCI spec. */
 
+	/* Device deviations from standard UFS device spec. */
+	unsigned int dev_quirks;
+
 	wait_queue_head_t tm_wq;
 	wait_queue_head_t tm_tag_wq;
 	unsigned long tm_condition;
@@ -489,12 +855,17 @@
 	/* Work Queues */
 	struct work_struct eh_work;
 	struct work_struct eeh_work;
+	struct work_struct rls_work;
 
 	/* HBA Errors */
 	u32 errors;
 	u32 uic_error;
+	u32 ce_error;	/* crypto engine errors */
 	u32 saved_err;
 	u32 saved_uic_err;
+	u32 saved_ce_err;
+	bool silence_err_logs;
+	bool force_host_reset;
 
 	/* Device management request data */
 	struct ufs_dev_cmd dev_cmd;
@@ -503,15 +874,33 @@
 	/* Keeps information of the UFS device connected to this host */
 	struct ufs_dev_info dev_info;
 	bool auto_bkops_enabled;
+
+	struct ufs_stats ufs_stats;
+#ifdef CONFIG_DEBUG_FS
+	struct debugfs_files debugfs_files;
+#endif
+
 	struct ufs_vreg_info vreg_info;
 	struct list_head clk_list_head;
 
 	bool wlun_dev_clr_ua;
 
+	/* Number of requests aborts */
+	int req_abort_count;
+
+	/* Number of lanes available (1 or 2) for Rx/Tx */
+	u32 lanes_per_direction;
+
+	/* Bitmask for enabling debug prints */
+	u32 ufshcd_dbg_print;
+
 	struct ufs_pa_layer_attr pwr_info;
 	struct ufs_pwr_mode_info max_pwr_info;
 
 	struct ufs_clk_gating clk_gating;
+	struct ufs_hibern8_on_idle hibern8_on_idle;
+	struct ufshcd_cmd_log cmd_log;
+
 	/* Control to enable/disable host capabilities */
 	u32 caps;
 	/* Allow dynamic clk gating */
@@ -528,6 +917,8 @@
 	 * CAUTION: Enabling this might reduce overall UFS throughput.
 	 */
 #define UFSHCD_CAP_INTR_AGGR (1 << 4)
+	/* Allow standalone Hibern8 enter on idle */
+#define UFSHCD_CAP_HIBERN8_ENTER_ON_IDLE (1 << 5)
 	/*
 	 * This capability allows the device auto-bkops to be always enabled
 	 * except during suspend (both runtime and suspend).
@@ -535,13 +926,49 @@
 	 * to do background operation when it's active but it might degrade
 	 * the performance of ongoing read/write operations.
 	 */
-#define UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND (1 << 5)
+#define UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND (1 << 6)
+	/*
+	 * If host controller hardware can be power collapsed when UFS link is
+	 * in hibern8 then enable this cap.
+	 */
+#define UFSHCD_CAP_POWER_COLLAPSE_DURING_HIBERN8 (1 << 7)
 
 	struct devfreq *devfreq;
 	struct ufs_clk_scaling clk_scaling;
 	bool is_sys_suspended;
+
+	enum bkops_status urgent_bkops_lvl;
+	bool is_urgent_bkops_lvl_checked;
+
+	/* sync b/w diff contexts */
+	struct rw_semaphore lock;
+	unsigned long shutdown_in_prog;
+
+	struct reset_control *core_reset;
+	/* If set, don't gate device ref_clk during clock gating */
+	bool no_ref_clk_gating;
+
+	int scsi_block_reqs_cnt;
+
+	bool full_init_linereset;
+	struct pinctrl *pctrl;
+
+	int			latency_hist_enabled;
+	struct io_latency_state io_lat_read;
+	struct io_latency_state io_lat_write;
+	bool restore_needed;
 };
 
+static inline void ufshcd_mark_shutdown_ongoing(struct ufs_hba *hba)
+{
+	set_bit(0, &hba->shutdown_in_prog);
+}
+
+static inline bool ufshcd_is_shutdown_ongoing(struct ufs_hba *hba)
+{
+	return !!(test_bit(0, &hba->shutdown_in_prog));
+}
+
 /* Returns true if clocks can be gated. Otherwise false */
 static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
 {
@@ -551,7 +978,7 @@
 {
 	return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
 }
-static inline int ufshcd_is_clkscaling_enabled(struct ufs_hba *hba)
+static inline int ufshcd_is_clkscaling_supported(struct ufs_hba *hba)
 {
 	return hba->caps & UFSHCD_CAP_CLK_SCALING;
 }
@@ -559,6 +986,22 @@
 {
 	return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
 }
+static inline bool ufshcd_is_hibern8_on_idle_allowed(struct ufs_hba *hba)
+{
+	return hba->caps & UFSHCD_CAP_HIBERN8_ENTER_ON_IDLE;
+}
+
+static inline bool ufshcd_is_power_collapse_during_hibern8_allowed(
+						struct ufs_hba *hba)
+{
+	return !!(hba->caps & UFSHCD_CAP_POWER_COLLAPSE_DURING_HIBERN8);
+}
+
+static inline bool ufshcd_keep_autobkops_enabled_except_suspend(
+							struct ufs_hba *hba)
+{
+	return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND;
+}
 
 static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
 {
@@ -569,10 +1012,21 @@
 		return false;
 }
 
+static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
+{
+	return !!((hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) &&
+		!(hba->quirks & UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8));
+}
+
+static inline bool ufshcd_is_crypto_supported(struct ufs_hba *hba)
+{
+	return !!(hba->capabilities & MASK_CRYPTO_SUPPORT);
+}
+
 #define ufshcd_writel(hba, val, reg)	\
-	writel((val), (hba)->mmio_base + (reg))
+	writel_relaxed((val), (hba)->mmio_base + (reg))
 #define ufshcd_readl(hba, reg)	\
-	readl((hba)->mmio_base + (reg))
+	readl_relaxed((hba)->mmio_base + (reg))
 
 /**
  * ufshcd_rmwl - read modify write into a register
@@ -595,21 +1049,11 @@
 void ufshcd_dealloc_host(struct ufs_hba *);
 int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
 void ufshcd_remove(struct ufs_hba *);
-
-/**
- * ufshcd_hba_stop - Send controller to reset state
- * @hba: per adapter instance
- */
-static inline void ufshcd_hba_stop(struct ufs_hba *hba)
-{
-	ufshcd_writel(hba, CONTROLLER_DISABLE,  REG_CONTROLLER_ENABLE);
-}
-
-static inline void check_upiu_size(void)
-{
-	BUILD_BUG_ON(ALIGNED_UPIU_SIZE <
-		GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);
-}
+int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
+				u32 val, unsigned long interval_us,
+				unsigned long timeout_ms, bool can_sleep);
+int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
+int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
 
 /**
  * ufshcd_set_variant - set variant specific data to the hba
@@ -631,12 +1075,6 @@
 	BUG_ON(!hba);
 	return hba->priv;
 }
-static inline bool ufshcd_keep_autobkops_enabled_except_suspend(
-							struct ufs_hba *hba)
-{
-	return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND;
-}
-
 extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
 extern int ufshcd_runtime_resume(struct ufs_hba *hba);
 extern int ufshcd_runtime_idle(struct ufs_hba *hba);
@@ -694,76 +1132,139 @@
 	return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
 }
 
+/**
+ * ufshcd_dme_rmw - get modify set a dme attribute
+ * @hba - per adapter instance
+ * @mask - mask to apply on read value
+ * @val - actual value to write
+ * @attr - dme attribute
+ */
+static inline int ufshcd_dme_rmw(struct ufs_hba *hba, u32 mask,
+				 u32 val, u32 attr)
+{
+	u32 cfg = 0;
+	int err = 0;
+
+	err = ufshcd_dme_get(hba, UIC_ARG_MIB(attr), &cfg);
+	if (err)
+		goto out;
+
+	cfg &= ~mask;
+	cfg |= (val & mask);
+
+	err = ufshcd_dme_set(hba, UIC_ARG_MIB(attr), cfg);
+
+out:
+	return err;
+}
+
+int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size);
+
+static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
+{
+	return (pwr_info->pwr_rx == FAST_MODE ||
+		pwr_info->pwr_rx == FASTAUTO_MODE) &&
+		(pwr_info->pwr_tx == FAST_MODE ||
+		pwr_info->pwr_tx == FASTAUTO_MODE);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static inline void ufshcd_init_req_stats(struct ufs_hba *hba)
+{
+	memset(hba->ufs_stats.req_stats, 0, sizeof(hba->ufs_stats.req_stats));
+}
+#else
+static inline void ufshcd_init_req_stats(struct ufs_hba *hba) {}
+#endif
+
+#define ASCII_STD true
+#define UTF16_STD false
+int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
+				u32 size, bool ascii);
+
+/* Expose Query-Request API */
+int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
+	enum flag_idn idn, bool *flag_res);
+int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
+	enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
+int ufshcd_query_descriptor(struct ufs_hba *hba, enum query_opcode opcode,
+	enum desc_idn idn, u8 index, u8 selector, u8 *desc_buf, int *buf_len);
+
 int ufshcd_hold(struct ufs_hba *hba, bool async);
-void ufshcd_release(struct ufs_hba *hba);
+void ufshcd_release(struct ufs_hba *hba, bool no_sched);
+int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, u64 wait_timeout_us);
+int ufshcd_change_power_mode(struct ufs_hba *hba,
+			     struct ufs_pa_layer_attr *pwr_mode);
+void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba,
+		int result);
+u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
+
+void ufshcd_scsi_block_requests(struct ufs_hba *hba);
+void ufshcd_scsi_unblock_requests(struct ufs_hba *hba);
 
 /* Wrapper functions for safely calling variant operations */
 static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
 {
-	if (hba->vops)
-		return hba->vops->name;
+	if (hba->var && hba->var->name)
+		return hba->var->name;
 	return "";
 }
 
 static inline int ufshcd_vops_init(struct ufs_hba *hba)
 {
-	if (hba->vops && hba->vops->init)
-		return hba->vops->init(hba);
-
+	if (hba->var && hba->var->vops && hba->var->vops->init)
+		return hba->var->vops->init(hba);
 	return 0;
 }
 
 static inline void ufshcd_vops_exit(struct ufs_hba *hba)
 {
-	if (hba->vops && hba->vops->exit)
-		return hba->vops->exit(hba);
+	if (hba->var && hba->var->vops && hba->var->vops->exit)
+		hba->var->vops->exit(hba);
 }
 
 static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
 {
-	if (hba->vops && hba->vops->get_ufs_hci_version)
-		return hba->vops->get_ufs_hci_version(hba);
-
+	if (hba->var && hba->var->vops && hba->var->vops->get_ufs_hci_version)
+		return hba->var->vops->get_ufs_hci_version(hba);
 	return ufshcd_readl(hba, REG_UFS_VERSION);
 }
 
 static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
 			bool up, enum ufs_notify_change_status status)
 {
-	if (hba->vops && hba->vops->clk_scale_notify)
-		return hba->vops->clk_scale_notify(hba, up, status);
+	if (hba->var && hba->var->vops && hba->var->vops->clk_scale_notify)
+		return hba->var->vops->clk_scale_notify(hba, up, status);
 	return 0;
 }
 
-static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on)
+static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on,
+					   bool is_gating_context)
 {
-	if (hba->vops && hba->vops->setup_clocks)
-		return hba->vops->setup_clocks(hba, on);
+	if (hba->var && hba->var->vops && hba->var->vops->setup_clocks)
+		return hba->var->vops->setup_clocks(hba, on, is_gating_context);
 	return 0;
 }
 
 static inline int ufshcd_vops_setup_regulators(struct ufs_hba *hba, bool status)
 {
-	if (hba->vops && hba->vops->setup_regulators)
-		return hba->vops->setup_regulators(hba, status);
-
+	if (hba->var && hba->var->vops && hba->var->vops->setup_regulators)
+		return hba->var->vops->setup_regulators(hba, status);
 	return 0;
 }
 
 static inline int ufshcd_vops_hce_enable_notify(struct ufs_hba *hba,
 						bool status)
 {
-	if (hba->vops && hba->vops->hce_enable_notify)
-		return hba->vops->hce_enable_notify(hba, status);
-
+	if (hba->var && hba->var->vops && hba->var->vops->hce_enable_notify)
+		hba->var->vops->hce_enable_notify(hba, status);
 	return 0;
 }
 static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba,
 						bool status)
 {
-	if (hba->vops && hba->vops->link_startup_notify)
-		return hba->vops->link_startup_notify(hba, status);
-
+	if (hba->var && hba->var->vops && hba->var->vops->link_startup_notify)
+		return hba->var->vops->link_startup_notify(hba, status);
 	return 0;
 }
 
@@ -772,33 +1273,156 @@
 				  struct ufs_pa_layer_attr *dev_max_params,
 				  struct ufs_pa_layer_attr *dev_req_params)
 {
-	if (hba->vops && hba->vops->pwr_change_notify)
-		return hba->vops->pwr_change_notify(hba, status,
+	if (hba->var && hba->var->vops && hba->var->vops->pwr_change_notify)
+		return hba->var->vops->pwr_change_notify(hba, status,
 					dev_max_params, dev_req_params);
-
 	return -ENOTSUPP;
 }
 
-static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
+static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
 {
-	if (hba->vops && hba->vops->suspend)
-		return hba->vops->suspend(hba, op);
+	if (hba->var && hba->var->vops && hba->var->vops->apply_dev_quirks)
+		return hba->var->vops->apply_dev_quirks(hba);
+	return 0;
+}
 
+static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
+{
+	if (hba->var && hba->var->vops && hba->var->vops->suspend)
+		return hba->var->vops->suspend(hba, op);
 	return 0;
 }
 
 static inline int ufshcd_vops_resume(struct ufs_hba *hba, enum ufs_pm_op op)
 {
-	if (hba->vops && hba->vops->resume)
-		return hba->vops->resume(hba, op);
+	if (hba->var && hba->var->vops && hba->var->vops->resume)
+		return hba->var->vops->resume(hba, op);
+	return 0;
+}
+
+static inline int ufshcd_vops_full_reset(struct ufs_hba *hba)
+{
+	if (hba->var && hba->var->vops && hba->var->vops->full_reset)
+		return hba->var->vops->full_reset(hba);
+	return 0;
+}
+
+
+static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba,
+						 bool no_sleep)
+{
+	if (hba->var && hba->var->vops && hba->var->vops->dbg_register_dump)
+		hba->var->vops->dbg_register_dump(hba, no_sleep);
+}
+
+static inline int ufshcd_vops_update_sec_cfg(struct ufs_hba *hba,
+						bool restore_sec_cfg)
+{
+	if (hba->var && hba->var->vops && hba->var->vops->update_sec_cfg)
+		return hba->var->vops->update_sec_cfg(hba, restore_sec_cfg);
+	return 0;
+}
+
+static inline u32 ufshcd_vops_get_scale_down_gear(struct ufs_hba *hba)
+{
+	if (hba->var && hba->var->vops && hba->var->vops->get_scale_down_gear)
+		return hba->var->vops->get_scale_down_gear(hba);
+	/* Default to lowest high speed gear */
+	return UFS_HS_G1;
+}
 
+static inline int ufshcd_vops_set_bus_vote(struct ufs_hba *hba, bool on)
+{
+	if (hba->var && hba->var->vops && hba->var->vops->set_bus_vote)
+		return hba->var->vops->set_bus_vote(hba, on);
 	return 0;
 }
 
-static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
+#ifdef CONFIG_DEBUG_FS
+static inline void ufshcd_vops_add_debugfs(struct ufs_hba *hba,
+						struct dentry *root)
+{
+	if (hba->var && hba->var->vops && hba->var->vops->add_debugfs)
+		hba->var->vops->add_debugfs(hba, root);
+}
+
+static inline void ufshcd_vops_remove_debugfs(struct ufs_hba *hba)
+{
+	if (hba->var && hba->var->vops && hba->var->vops->remove_debugfs)
+		hba->var->vops->remove_debugfs(hba);
+}
+#else
+static inline void ufshcd_vops_add_debugfs(struct ufs_hba *hba, struct dentry *)
+{
+}
+
+static inline void ufshcd_vops_remove_debugfs(struct ufs_hba *hba)
+{
+}
+#endif
+
+static inline int ufshcd_vops_crypto_req_setup(struct ufs_hba *hba,
+	struct ufshcd_lrb *lrbp, u8 *cc_index, bool *enable, u64 *dun)
+{
+	if (hba->var && hba->var->crypto_vops &&
+		hba->var->crypto_vops->crypto_req_setup)
+		return hba->var->crypto_vops->crypto_req_setup(hba, lrbp,
+			cc_index, enable, dun);
+	return 0;
+}
+
+static inline int ufshcd_vops_crypto_engine_cfg_start(struct ufs_hba *hba,
+						unsigned int task_tag)
+{
+	if (hba->var && hba->var->crypto_vops &&
+	    hba->var->crypto_vops->crypto_engine_cfg_start)
+		return hba->var->crypto_vops->crypto_engine_cfg_start
+				(hba, task_tag);
+	return 0;
+}
+
+static inline int ufshcd_vops_crypto_engine_cfg_end(struct ufs_hba *hba,
+						struct ufshcd_lrb *lrbp,
+						struct request *req)
+{
+	if (hba->var && hba->var->crypto_vops &&
+	    hba->var->crypto_vops->crypto_engine_cfg_end)
+		return hba->var->crypto_vops->crypto_engine_cfg_end
+				(hba, lrbp, req);
+	return 0;
+}
+
+static inline int ufshcd_vops_crypto_engine_reset(struct ufs_hba *hba)
+{
+	if (hba->var && hba->var->crypto_vops &&
+	    hba->var->crypto_vops->crypto_engine_reset)
+		return hba->var->crypto_vops->crypto_engine_reset(hba);
+	return 0;
+}
+
+static inline int ufshcd_vops_crypto_engine_get_status(struct ufs_hba *hba,
+		u32 *status)
+{
+	if (hba->var && hba->var->crypto_vops &&
+	    hba->var->crypto_vops->crypto_engine_get_status)
+		return hba->var->crypto_vops->crypto_engine_get_status(hba,
+			status);
+	return 0;
+}
+
+static inline void ufshcd_vops_pm_qos_req_start(struct ufs_hba *hba,
+		struct request *req)
+{
+	if (hba->var && hba->var->pm_qos_vops &&
+		hba->var->pm_qos_vops->req_start)
+		hba->var->pm_qos_vops->req_start(hba, req);
+}
+
+static inline void ufshcd_vops_pm_qos_req_end(struct ufs_hba *hba,
+		struct request *req, bool lock)
 {
-	if (hba->vops && hba->vops->dbg_register_dump)
-		hba->vops->dbg_register_dump(hba);
+	if (hba->var && hba->var->pm_qos_vops && hba->var->pm_qos_vops->req_end)
+		hba->var->pm_qos_vops->req_end(hba, req, lock);
 }
 
 #endif /* End of Header */
diff -ruw linux-4.4.115/drivers/scsi/ufs/ufshcd-pltfrm.c linux-4.4.115-fbx/drivers/scsi/ufs/ufshcd-pltfrm.c
--- linux-4.4.115/drivers/scsi/ufs/ufshcd-pltfrm.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/scsi/ufs/ufshcd-pltfrm.c	2019-01-22 16:16:26.631274732 +0100
@@ -40,6 +40,22 @@
 #include "ufshcd.h"
 #include "ufshcd-pltfrm.h"
 
+static int ufshcd_parse_reset_info(struct ufs_hba *hba)
+{
+	int ret = 0;
+
+	hba->core_reset = devm_reset_control_get(hba->dev,
+				"core_reset");
+	if (IS_ERR(hba->core_reset)) {
+		ret = PTR_ERR(hba->core_reset);
+		dev_err(hba->dev, "core_reset unavailable,err = %d\n",
+				ret);
+		hba->core_reset = NULL;
+	}
+
+	return ret;
+}
+
 static int ufshcd_parse_clock_info(struct ufs_hba *hba)
 {
 	int ret = 0;
@@ -161,7 +177,7 @@
 	if (ret) {
 		dev_err(dev, "%s: unable to find %s err %d\n",
 				__func__, prop_name, ret);
-		goto out_free;
+		goto out;
 	}
 
 	vreg->min_uA = 0;
@@ -183,9 +199,6 @@
 
 	goto out;
 
-out_free:
-	devm_kfree(dev, vreg);
-	vreg = NULL;
 out:
 	if (!ret)
 		*out_vreg = vreg;
@@ -224,7 +237,34 @@
 	return err;
 }
 
-#ifdef CONFIG_PM
+static void ufshcd_parse_pm_levels(struct ufs_hba *hba)
+{
+	struct device *dev = hba->dev;
+	struct device_node *np = dev->of_node;
+
+	if (np) {
+		if (of_property_read_u32(np, "rpm-level", &hba->rpm_lvl))
+			hba->rpm_lvl = -1;
+		if (of_property_read_u32(np, "spm-level", &hba->spm_lvl))
+			hba->spm_lvl = -1;
+	}
+}
+
+static int ufshcd_parse_pinctrl_info(struct ufs_hba *hba)
+{
+	int ret = 0;
+
+	/* Try to obtain pinctrl handle */
+	hba->pctrl = devm_pinctrl_get(hba->dev);
+	if (IS_ERR(hba->pctrl)) {
+		ret = PTR_ERR(hba->pctrl);
+		hba->pctrl = NULL;
+	}
+
+	return ret;
+}
+
+#ifdef CONFIG_SMP
 /**
  * ufshcd_pltfrm_suspend - suspend power management function
  * @dev: pointer to device handle
@@ -280,12 +320,12 @@
 /**
  * ufshcd_pltfrm_init - probe routine of the driver
  * @pdev: pointer to Platform device handle
- * @vops: pointer to variant ops
+ * @var: pointer to variant specific data
  *
  * Returns 0 on success, non-zero value on failure
  */
 int ufshcd_pltfrm_init(struct platform_device *pdev,
-		       struct ufs_hba_variant_ops *vops)
+		       struct ufs_hba_variant *var)
 {
 	struct ufs_hba *hba;
 	void __iomem *mmio_base;
@@ -313,7 +353,7 @@
 		goto out;
 	}
 
-	hba->vops = vops;
+	hba->var = var;
 
 	err = ufshcd_parse_clock_info(hba);
 	if (err) {
@@ -328,22 +368,37 @@
 		goto dealloc_host;
 	}
 
-	pm_runtime_set_active(&pdev->dev);
-	pm_runtime_enable(&pdev->dev);
+	err = ufshcd_parse_reset_info(hba);
+	if (err) {
+		dev_err(&pdev->dev, "%s: reset parse failed %d\n",
+				__func__, err);
+		goto dealloc_host;
+	}
+
+	err = ufshcd_parse_pinctrl_info(hba);
+	if (err) {
+		dev_dbg(&pdev->dev, "%s: unable to parse pinctrl data %d\n",
+				__func__, err);
+		/* let's not fail the probe */
+	}
+
+	ufshcd_parse_pm_levels(hba);
+
+	if (!dev->dma_mask)
+		dev->dma_mask = &dev->coherent_dma_mask;
 
 	err = ufshcd_init(hba, mmio_base, irq);
 	if (err) {
 		dev_err(dev, "Intialization failed\n");
-		goto out_disable_rpm;
+		goto dealloc_host;
 	}
 
 	platform_set_drvdata(pdev, hba);
 
-	return 0;
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
 
-out_disable_rpm:
-	pm_runtime_disable(&pdev->dev);
-	pm_runtime_set_suspended(&pdev->dev);
+	return 0;
 dealloc_host:
 	ufshcd_dealloc_host(hba);
 out:
diff -ruw linux-4.4.115/drivers/scsi/ufs/ufshcd-pltfrm.h linux-4.4.115-fbx/drivers/scsi/ufs/ufshcd-pltfrm.h
--- linux-4.4.115/drivers/scsi/ufs/ufshcd-pltfrm.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/scsi/ufs/ufshcd-pltfrm.h	2019-01-22 16:16:26.631274732 +0100
@@ -17,7 +17,7 @@
 #include "ufshcd.h"
 
 int ufshcd_pltfrm_init(struct platform_device *pdev,
-		       struct ufs_hba_variant_ops *vops);
+		       struct ufs_hba_variant *var);
 void ufshcd_pltfrm_shutdown(struct platform_device *pdev);
 
 #ifdef CONFIG_PM
diff -ruw linux-4.4.115/drivers/scsi/ufs/ufshci.h linux-4.4.115-fbx/drivers/scsi/ufs/ufshci.h
--- linux-4.4.115/drivers/scsi/ufs/ufshci.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/scsi/ufs/ufshci.h	2019-01-22 16:16:26.635274769 +0100
@@ -48,6 +48,7 @@
 	REG_UFS_VERSION				= 0x08,
 	REG_CONTROLLER_DEV_ID			= 0x10,
 	REG_CONTROLLER_PROD_ID			= 0x14,
+	REG_AUTO_HIBERN8_IDLE_TIMER		= 0x18,
 	REG_INTERRUPT_STATUS			= 0x20,
 	REG_INTERRUPT_ENABLE			= 0x24,
 	REG_CONTROLLER_STATUS			= 0x30,
@@ -72,15 +73,24 @@
 	REG_UIC_COMMAND_ARG_1			= 0x94,
 	REG_UIC_COMMAND_ARG_2			= 0x98,
 	REG_UIC_COMMAND_ARG_3			= 0x9C,
+
+	UFSHCI_REG_SPACE_SIZE			= 0xA0,
+
+	REG_UFS_CCAP				= 0x100,
+	REG_UFS_CRYPTOCAP			= 0x104,
+
+	UFSHCI_CRYPTO_REG_SPACE_SIZE		= 0x400,
 };
 
 /* Controller capability masks */
 enum {
 	MASK_TRANSFER_REQUESTS_SLOTS		= 0x0000001F,
 	MASK_TASK_MANAGEMENT_REQUEST_SLOTS	= 0x00070000,
+	MASK_AUTO_HIBERN8_SUPPORT		= 0x00800000,
 	MASK_64_ADDRESSING_SUPPORT		= 0x01000000,
 	MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT	= 0x02000000,
 	MASK_UIC_DME_TEST_MODE_SUPPORT		= 0x04000000,
+	MASK_CRYPTO_SUPPORT			= 0x10000000,
 };
 
 /* UFS Version 08h */
@@ -92,6 +102,7 @@
 	UFSHCI_VERSION_10 = 0x00010000, /* 1.0 */
 	UFSHCI_VERSION_11 = 0x00010100, /* 1.1 */
 	UFSHCI_VERSION_20 = 0x00000200, /* 2.0 */
+	UFSHCI_VERSION_21 = 0x00000210, /* 2.1 */
 };
 
 /*
@@ -108,8 +119,19 @@
 #define MANUFACTURE_ID_MASK	UFS_MASK(0xFFFF, 0)
 #define PRODUCT_ID_MASK		UFS_MASK(0xFFFF, 16)
 
-#define UFS_BIT(x)	(1L << (x))
+/*
+ * AHIT - Auto-Hibernate Idle Timer  18h
+ */
+#define AUTO_HIBERN8_IDLE_TIMER_MASK		UFS_MASK(0x3FF, 0)
+#define AUTO_HIBERN8_TIMER_SCALE_MASK		UFS_MASK(0x7, 10)
+#define AUTO_HIBERN8_TIMER_SCALE_1_US		UFS_MASK(0x0, 10)
+#define AUTO_HIBERN8_TIMER_SCALE_10_US		UFS_MASK(0x1, 10)
+#define AUTO_HIBERN8_TIMER_SCALE_100_US		UFS_MASK(0x2, 10)
+#define AUTO_HIBERN8_TIMER_SCALE_1_MS		UFS_MASK(0x3, 10)
+#define AUTO_HIBERN8_TIMER_SCALE_10_MS		UFS_MASK(0x4, 10)
+#define AUTO_HIBERN8_TIMER_SCALE_100_MS		UFS_MASK(0x5, 10)
 
+/* IS - Interrupt status (20h) / IE - Interrupt enable (24h) */
 #define UTP_TRANSFER_REQ_COMPL			UFS_BIT(0)
 #define UIC_DME_END_PT_RESET			UFS_BIT(1)
 #define UIC_ERROR				UFS_BIT(2)
@@ -124,6 +146,7 @@
 #define DEVICE_FATAL_ERROR			UFS_BIT(11)
 #define CONTROLLER_FATAL_ERROR			UFS_BIT(16)
 #define SYSTEM_BUS_FATAL_ERROR			UFS_BIT(17)
+#define CRYPTO_ENGINE_FATAL_ERROR		UFS_BIT(18)
 
 #define UFSHCD_UIC_PWR_MASK	(UIC_HIBERNATE_ENTER |\
 				UIC_HIBERNATE_EXIT |\
@@ -134,11 +157,13 @@
 #define UFSHCD_ERROR_MASK	(UIC_ERROR |\
 				DEVICE_FATAL_ERROR |\
 				CONTROLLER_FATAL_ERROR |\
-				SYSTEM_BUS_FATAL_ERROR)
+				SYSTEM_BUS_FATAL_ERROR |\
+				CRYPTO_ENGINE_FATAL_ERROR)
 
 #define INT_FATAL_ERRORS	(DEVICE_FATAL_ERROR |\
 				CONTROLLER_FATAL_ERROR |\
-				SYSTEM_BUS_FATAL_ERROR)
+				SYSTEM_BUS_FATAL_ERROR |\
+				CRYPTO_ENGINE_FATAL_ERROR)
 
 /* HCS - Host Controller Status 30h */
 #define DEVICE_PRESENT				UFS_BIT(0)
@@ -160,16 +185,21 @@
 
 /* HCE - Host Controller Enable 34h */
 #define CONTROLLER_ENABLE	UFS_BIT(0)
+#define CRYPTO_GENERAL_ENABLE	UFS_BIT(1)
 #define CONTROLLER_DISABLE	0x0
 
 /* UECPA - Host UIC Error Code PHY Adapter Layer 38h */
 #define UIC_PHY_ADAPTER_LAYER_ERROR			UFS_BIT(31)
+#define UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR		UFS_BIT(4)
 #define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK		0x1F
+#define UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK		0xF
 
 /* UECDL - Host UIC Error Code Data Link Layer 3Ch */
 #define UIC_DATA_LINK_LAYER_ERROR		UFS_BIT(31)
 #define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK	0x7FFF
 #define UIC_DATA_LINK_LAYER_ERROR_PA_INIT	0x2000
+#define UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED	0x0001
+#define UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT 0x0002
 
 /* UECN - Host UIC Error Code Network Layer 40h */
 #define UIC_NETWORK_LAYER_ERROR			UFS_BIT(31)
@@ -209,6 +239,7 @@
 
 /* GenSelectorIndex calculation macros for M-PHY attributes */
 #define UIC_ARG_MPHY_TX_GEN_SEL_INDEX(lane) (lane)
+#define UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane) (PA_MAXDATALANES + (lane))
 
 #define UIC_ARG_MIB_SEL(attr, sel)	((((attr) & 0xFFFF) << 16) |\
 					 ((sel) & 0xFFFF))
@@ -262,6 +293,9 @@
 
 	/* Interrupt disable mask for UFSHCI v1.1 */
 	INTERRUPT_MASK_ALL_VER_11	= 0x31FFF,
+
+	/* Interrupt disable mask for UFSHCI v2.1 */
+	INTERRUPT_MASK_ALL_VER_21	= 0x71FFF,
 };
 
 /*
@@ -299,6 +333,9 @@
 	OCS_PEER_COMM_FAILURE		= 0x5,
 	OCS_ABORTED			= 0x6,
 	OCS_FATAL_ERROR			= 0x7,
+	OCS_DEVICE_FATAL_ERROR		= 0x8,
+	OCS_INVALID_CRYPTO_CONFIG	= 0x9,
+	OCS_GENERAL_CRYPTO_ERROR	= 0xA,
 	OCS_INVALID_COMMAND_STATUS	= 0x0F,
 	MASK_OCS			= 0x0F,
 };
@@ -334,6 +371,8 @@
 	struct ufshcd_sg_entry    prd_table[SG_ALL];
 };
 
+#define UTRD_CRYPTO_ENABLE	UFS_BIT(23)
+
 /**
  * struct request_desc_header - Descriptor Header common to both UTRD and UTMRD
  * @dword0: Descriptor Header DW0
diff -ruw linux-4.4.115/drivers/scsi/ufs/ufs-qcom.c linux-4.4.115-fbx/drivers/scsi/ufs/ufs-qcom.c
--- linux-4.4.115/drivers/scsi/ufs/ufs-qcom.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/scsi/ufs/ufs-qcom.c	2019-10-29 09:26:24.797214471 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,15 +14,33 @@
 
 #include <linux/time.h>
 #include <linux/of.h>
+#include <linux/iopoll.h>
 #include <linux/platform_device.h>
-#include <linux/phy/phy.h>
 
+#ifdef CONFIG_QCOM_BUS_SCALING
+#include <linux/msm-bus.h>
+#endif
+
+#include <soc/qcom/scm.h>
+#include <linux/phy/phy.h>
 #include <linux/phy/phy-qcom-ufs.h>
+
 #include "ufshcd.h"
 #include "ufshcd-pltfrm.h"
 #include "unipro.h"
 #include "ufs-qcom.h"
 #include "ufshci.h"
+#include "ufs_quirks.h"
+#include "ufs-qcom-ice.h"
+#include "ufs-qcom-debugfs.h"
+#include <linux/clk/msm-clk.h>
+
+#define MAX_PROP_SIZE		   32
+#define VDDP_REF_CLK_MIN_UV        1200000
+#define VDDP_REF_CLK_MAX_UV        1200000
+/* TODO: further tuning for this parameter may be required */
+#define UFS_QCOM_PM_QOS_UNVOTE_TIMEOUT_US	(10000) /* microseconds */
+
 #define UFS_QCOM_DEFAULT_DBG_PRINT_EN	\
 	(UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
 
@@ -44,18 +62,24 @@
 
 static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
 
-static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote);
+static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg);
 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
 static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
 						       u32 clk_cycles);
+static void ufs_qcom_pm_qos_suspend(struct ufs_qcom_host *host);
 
 static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len,
 		char *prefix)
 {
 	print_hex_dump(KERN_ERR, prefix,
 			len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
-			16, 4, (void __force *)hba->mmio_base + offset,
-			len * 4, false);
+			16, 4, hba->mmio_base + offset, len * 4, false);
+}
+
+static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
+		char *prefix, void *priv)
+{
+	ufs_qcom_dump_regs(hba, offset, len, prefix);
 }
 
 static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
@@ -78,13 +102,10 @@
 	int err = 0;
 
 	clk = devm_clk_get(dev, name);
-	if (IS_ERR(clk)) {
+	if (IS_ERR(clk))
 		err = PTR_ERR(clk);
-		dev_err(dev, "%s: failed to get %s err %d",
-				__func__, name, err);
-	} else {
+	else
 		*clk_out = clk;
-	}
 
 	return err;
 }
@@ -106,8 +127,10 @@
 	if (!host->is_lane_clks_enabled)
 		return;
 
+	if (host->tx_l1_sync_clk)
 	clk_disable_unprepare(host->tx_l1_sync_clk);
 	clk_disable_unprepare(host->tx_l0_sync_clk);
+	if (host->rx_l1_sync_clk)
 	clk_disable_unprepare(host->rx_l1_sync_clk);
 	clk_disable_unprepare(host->rx_l0_sync_clk);
 
@@ -132,21 +155,20 @@
 	if (err)
 		goto disable_rx_l0;
 
+	if (host->hba->lanes_per_direction > 1) {
 	err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
 		host->rx_l1_sync_clk);
 	if (err)
 		goto disable_tx_l0;
 
-	err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
+		/* The tx lane1 clk could be muxed, hence keep this optional */
+		if (host->tx_l1_sync_clk)
+			ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
 		host->tx_l1_sync_clk);
-	if (err)
-		goto disable_rx_l1;
-
+	}
 	host->is_lane_clks_enabled = true;
 	goto out;
 
-disable_rx_l1:
-	clk_disable_unprepare(host->rx_l1_sync_clk);
 disable_tx_l0:
 	clk_disable_unprepare(host->tx_l0_sync_clk);
 disable_rx_l0:
@@ -162,42 +184,34 @@
 
 	err = ufs_qcom_host_clk_get(dev,
 			"rx_lane0_sync_clk", &host->rx_l0_sync_clk);
-	if (err)
+	if (err) {
+		dev_err(dev, "%s: failed to get rx_lane0_sync_clk, err %d",
+				__func__, err);
 		goto out;
+	}
 
 	err = ufs_qcom_host_clk_get(dev,
 			"tx_lane0_sync_clk", &host->tx_l0_sync_clk);
-	if (err)
+	if (err) {
+		dev_err(dev, "%s: failed to get tx_lane0_sync_clk, err %d",
+				__func__, err);
 		goto out;
+	}
 
+	/* In case of single lane per direction, don't read lane1 clocks */
+	if (host->hba->lanes_per_direction > 1) {
 	err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
 		&host->rx_l1_sync_clk);
-	if (err)
+		if (err) {
+			dev_err(dev, "%s: failed to get rx_lane1_sync_clk, err %d",
+					__func__, err);
 		goto out;
+		}
 
-	err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
+		/* The tx lane1 clk could be muxed, hence keep this optional */
+		ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
 		&host->tx_l1_sync_clk);
-
-out:
-	return err;
 }
-
-static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
-{
-	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-	struct phy *phy = host->generic_phy;
-	u32 tx_lanes;
-	int err = 0;
-
-	err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
-	if (err)
-		goto out;
-
-	err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
-	if (err)
-		dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
-			__func__);
-
 out:
 	return err;
 }
@@ -267,8 +281,7 @@
 	ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
 
 	if (ret) {
-		dev_err(hba->dev,
-		"%s: ufs_qcom_phy_calibrate_phy()failed, ret = %d\n",
+		dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n",
 		__func__, ret);
 		goto out;
 	}
@@ -290,8 +303,7 @@
 
 	ret = ufs_qcom_phy_is_pcs_ready(phy);
 	if (ret)
-		dev_err(hba->dev,
-			"%s: is_physical_coding_sublayer_ready() failed, ret = %d\n",
+		dev_err(hba->dev, "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n",
 			__func__, ret);
 
 	ufs_qcom_select_unipro_mode(host);
@@ -307,15 +319,65 @@
  * in a specific operation, UTP controller CGCs are by default disabled and
  * this function enables them (after every UFS link startup) to save some power
  * leakage.
+ *
+ * UFS host controller v3.0.0 onwards has internal clock gating mechanism
+ * in Qunipro, enable them to save additional power.
  */
-static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
+static int ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
 {
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	int err = 0;
+
+	/* Enable UTP internal clock gating */
 	ufshcd_writel(hba,
 		ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
 		REG_UFS_CFG2);
 
 	/* Ensure that HW clock gating is enabled before next operations */
 	mb();
+
+	/* Enable Qunipro internal clock gating if supported */
+	if (!ufs_qcom_cap_qunipro_clk_gating(host))
+		goto out;
+
+	/* Enable all the mask bits */
+	err = ufshcd_dme_rmw(hba, DL_VS_CLK_CFG_MASK,
+				DL_VS_CLK_CFG_MASK, DL_VS_CLK_CFG);
+	if (err)
+		goto out;
+
+	err = ufshcd_dme_rmw(hba, PA_VS_CLK_CFG_REG_MASK,
+				PA_VS_CLK_CFG_REG_MASK, PA_VS_CLK_CFG_REG);
+	if (err)
+		goto out;
+
+	err = ufshcd_dme_rmw(hba, DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN,
+				DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN,
+				DME_VS_CORE_CLK_CTRL);
+out:
+	return err;
+}
+
+static void ufs_qcom_force_mem_config(struct ufs_hba *hba)
+{
+	struct ufs_clk_info *clki;
+
+	/*
+	 * Configure the behavior of ufs clocks core and peripheral
+	 * memory state when they are turned off.
+	 * This configuration is required to allow retaining
+	 * ICE crypto configuration (including keys) when
+	 * core_clk_ice is turned off, and powering down
+	 * non-ICE RAMs of host controller.
+	 */
+	list_for_each_entry(clki, &hba->clk_list_head, list) {
+		if (!strcmp(clki->name, "core_clk_ice"))
+			clk_set_flags(clki->clk, CLKFLAG_RETAIN_MEM);
+		else
+			clk_set_flags(clki->clk, CLKFLAG_NORETAIN_MEM);
+		clk_set_flags(clki->clk, CLKFLAG_NORETAIN_PERIPH);
+		clk_set_flags(clki->clk, CLKFLAG_PERIPH_OFF_CLEAR);
+	}
 }
 
 static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
@@ -326,6 +388,7 @@
 
 	switch (status) {
 	case PRE_CHANGE:
+		ufs_qcom_force_mem_config(hba);
 		ufs_qcom_power_up_sequence(hba);
 		/*
 		 * The PHY PLL output is the source of tx/rx lane symbol
@@ -333,12 +396,19 @@
 		 * is initialized.
 		 */
 		err = ufs_qcom_enable_lane_clks(host);
+		if (!err && host->ice.pdev) {
+			err = ufs_qcom_ice_init(host);
+			if (err) {
+				dev_err(hba->dev, "%s: ICE init failed (%d)\n",
+					__func__, err);
+				err = -EINVAL;
+			}
+		}
+
 		break;
 	case POST_CHANGE:
 		/* check if UFS PHY moved from DISABLED to HIBERN8 */
 		err = ufs_qcom_check_hibern8(hba);
-		ufs_qcom_enable_hw_clk_gating(hba);
-
 		break;
 	default:
 		dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
@@ -351,8 +421,9 @@
 /**
  * Returns zero for success and non-zero in case of a failure
  */
-static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
-			       u32 hs, u32 rate, bool update_link_startup_timer)
+static int __ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
+			       u32 hs, u32 rate, bool update_link_startup_timer,
+			       bool is_pre_scale_up)
 {
 	int ret = 0;
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -386,9 +457,11 @@
 	 * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
 	 * UFS_REG_PA_LINK_STARTUP_TIMER
 	 * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
-	 * Aggregation logic.
+	 * Aggregation / Auto hibern8 logic.
 	*/
-	if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
+	if (ufs_qcom_cap_qunipro(host) &&
+	    (!(ufshcd_is_intr_aggr_allowed(hba) ||
+	       ufshcd_is_auto_hibern8_supported(hba))))
 		goto out;
 
 	if (gear == 0) {
@@ -397,9 +470,13 @@
 	}
 
 	list_for_each_entry(clki, &hba->clk_list_head, list) {
-		if (!strcmp(clki->name, "core_clk"))
+		if (!strcmp(clki->name, "core_clk")) {
+			if (is_pre_scale_up)
+				core_clk_rate = clki->max_freq;
+			else
 			core_clk_rate = clk_get_rate(clki->clk);
 	}
+	}
 
 	/* If frequency is smaller than 1MHz, set to 1MHz */
 	if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
@@ -495,70 +572,247 @@
 	return ret;
 }
 
-static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
-					enum ufs_notify_change_status status)
+static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
+			       u32 hs, u32 rate, bool update_link_startup_timer)
+{
+	return  __ufs_qcom_cfg_timers(hba, gear, hs, rate,
+				      update_link_startup_timer, false);
+}
+
+static int ufs_qcom_link_startup_pre_change(struct ufs_hba *hba)
 {
-	int err = 0;
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	struct phy *phy = host->generic_phy;
+	u32 unipro_ver;
+	int err = 0;
 
-	switch (status) {
-	case PRE_CHANGE:
-		if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
-					0, true)) {
+	if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE, 0, true)) {
 			dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
 				__func__);
 			err = -EINVAL;
 			goto out;
 		}
 
-		if (ufs_qcom_cap_qunipro(host))
+	/* make sure RX LineCfg is enabled before link startup */
+	err = ufs_qcom_phy_ctrl_rx_linecfg(phy, true);
+	if (err)
+		goto out;
+
+	if (ufs_qcom_cap_qunipro(host)) {
 			/*
-			 * set unipro core clock cycles to 150 & clear clock
-			 * divider
+		 * set unipro core clock cycles to 150 & clear clock divider
 			 */
-			err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
-									  150);
+		err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
+		if (err)
+			goto out;
+	}
+
+	err = ufs_qcom_enable_hw_clk_gating(hba);
+	if (err)
+		goto out;
+
+	/*
+	 * Some UFS devices (and may be host) have issues if LCC is
+	 * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
+	 * before link startup which will make sure that both host
+	 * and device TX LCC are disabled once link startup is
+	 * completed.
+	 */
+	unipro_ver = ufshcd_get_local_unipro_ver(hba);
+	if (unipro_ver != UFS_UNIPRO_VER_1_41)
+		err = ufshcd_dme_set(hba,
+				     UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE),
+				     0);
+	if (err)
+		goto out;
+
+	if (!ufs_qcom_cap_qunipro_clk_gating(host))
+		goto out;
+
+	/* Enable all the mask bits */
+	err = ufshcd_dme_rmw(hba, SAVECONFIGTIME_MODE_MASK,
+				SAVECONFIGTIME_MODE_MASK,
+				PA_VS_CONFIG_REG1);
+out:
+	return err;
+}
+
+static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
+{
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	struct phy *phy = host->generic_phy;
+	u32 tx_lanes;
+	int err = 0;
+
+	err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
+	if (err)
+		goto out;
+
+	err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
+	if (err) {
+		dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
+			__func__);
+		goto out;
+	}
 
+	/*
+	 * Some UFS devices send incorrect LineCfg data as part of power mode
+	 * change sequence which may cause host PHY to go into bad state.
+	 * Disabling Rx LineCfg of host PHY should help avoid this.
+	 */
+	if (ufshcd_get_local_unipro_ver(hba) == UFS_UNIPRO_VER_1_41)
+		err = ufs_qcom_phy_ctrl_rx_linecfg(phy, false);
+	if (err) {
+		dev_err(hba->dev, "%s: ufs_qcom_phy_ctrl_rx_linecfg failed\n",
+			__func__);
+		goto out;
+	}
+
+	/*
+	 * UFS controller has *clk_req output to GCC, for each one if the clocks
+	 * entering it. When *clk_req for a specific clock is de-asserted,
+	 * a corresponding clock from GCC is stopped. UFS controller de-asserts
+	 * *clk_req outputs when it is in Auto Hibernate state only if the
+	 * Clock request feature is enabled.
+	 * Enable the Clock request feature:
+	 * - Enable HW clock control for UFS clocks in GCC (handled by the
+	 *   clock driver as part of clk_prepare_enable).
+	 * - Set the AH8_CFG.*CLK_REQ register bits to 1.
+	 */
+	if (ufshcd_is_auto_hibern8_supported(hba))
+		ufshcd_writel(hba, ufshcd_readl(hba, UFS_AH8_CFG) |
+				   UFS_HW_CLK_CTRL_EN,
+				   UFS_AH8_CFG);
+	/*
+	 * Make sure clock request feature gets enabled for HW clk gating
+	 * before further operations.
+	 */
+	mb();
+
+out:
+	return err;
+}
+
+static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
+					enum ufs_notify_change_status status)
+{
+	int err = 0;
+
+	switch (status) {
+	case PRE_CHANGE:
+		err = ufs_qcom_link_startup_pre_change(hba);
 		break;
 	case POST_CHANGE:
-		ufs_qcom_link_startup_post_change(hba);
+		err = ufs_qcom_link_startup_post_change(hba);
 		break;
 	default:
 		break;
 	}
 
-out:
 	return err;
 }
 
+
+static int ufs_qcom_config_vreg(struct device *dev,
+		struct ufs_vreg *vreg, bool on)
+{
+	int ret = 0;
+	struct regulator *reg;
+	int min_uV, uA_load;
+
+	if (!vreg) {
+		WARN_ON(1);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	reg = vreg->reg;
+	if (regulator_count_voltages(reg) > 0) {
+		min_uV = on ? vreg->min_uV : 0;
+		ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
+		if (ret) {
+			dev_err(dev, "%s: %s set voltage failed, err=%d\n",
+					__func__, vreg->name, ret);
+			goto out;
+		}
+
+		uA_load = on ? vreg->max_uA : 0;
+		ret = regulator_set_load(vreg->reg, uA_load);
+		if (ret)
+			goto out;
+	}
+out:
+	return ret;
+}
+
+static int ufs_qcom_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+	int ret = 0;
+
+	if (vreg->enabled)
+		return ret;
+
+	ret = ufs_qcom_config_vreg(dev, vreg, true);
+	if (ret)
+		goto out;
+
+	ret = regulator_enable(vreg->reg);
+	if (ret)
+		goto out;
+
+	vreg->enabled = true;
+out:
+	return ret;
+}
+
+static int ufs_qcom_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+	int ret = 0;
+
+	if (!vreg->enabled)
+		return ret;
+
+	ret = regulator_disable(vreg->reg);
+	if (ret)
+		goto out;
+
+	ret = ufs_qcom_config_vreg(dev, vreg, false);
+	if (ret)
+		goto out;
+
+	vreg->enabled = false;
+out:
+	return ret;
+}
+
 static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 {
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 	struct phy *phy = host->generic_phy;
 	int ret = 0;
 
-	if (ufs_qcom_is_link_off(hba)) {
 		/*
-		 * Disable the tx/rx lane symbol clocks before PHY is
-		 * powered down as the PLL source should be disabled
-		 * after downstream clocks are disabled.
+	 * If UniPro link is not active or OFF, PHY ref_clk, main PHY analog
+	 * power rail and low noise analog power rail for PLL can be
+	 * switched off.
 		 */
+	if (!ufs_qcom_is_link_active(hba)) {
 		ufs_qcom_disable_lane_clks(host);
 		phy_power_off(phy);
 
+		if (host->vddp_ref_clk && ufs_qcom_is_link_off(hba))
+			ret = ufs_qcom_disable_vreg(hba->dev,
+					host->vddp_ref_clk);
+		ufs_qcom_ice_suspend(host);
+
+		if (ufs_qcom_is_link_off(hba)) {
 		/* Assert PHY soft reset */
 		ufs_qcom_assert_reset(hba);
 		goto out;
 	}
-
-	/*
-	 * If UniPro link is not active, PHY ref_clk, main PHY analog power
-	 * rail and low noise analog power rail for PLL can be switched off.
-	 */
-	if (!ufs_qcom_is_link_active(hba)) {
-		ufs_qcom_disable_lane_clks(host);
-		phy_power_off(phy);
 	}
+	/* Unvote PM QoS */
+	ufs_qcom_pm_qos_suspend(host);
 
 out:
 	return ret;
@@ -577,16 +831,146 @@
 		goto out;
 	}
 
+	if (host->vddp_ref_clk && (hba->rpm_lvl > UFS_PM_LVL_3 ||
+				   hba->spm_lvl > UFS_PM_LVL_3))
+		ufs_qcom_enable_vreg(hba->dev,
+				      host->vddp_ref_clk);
+
 	err = ufs_qcom_enable_lane_clks(host);
 	if (err)
 		goto out;
 
+	err = ufs_qcom_ice_resume(host);
+	if (err) {
+		dev_err(hba->dev, "%s: ufs_qcom_ice_resume failed, err = %d\n",
+			__func__, err);
+		goto out;
+	}
+
 	hba->is_sys_suspended = false;
 
 out:
 	return err;
 }
 
+static int ufs_qcom_full_reset(struct ufs_hba *hba)
+{
+	int ret = -ENOTSUPP;
+
+	if (!hba->core_reset) {
+		dev_err(hba->dev, "%s: failed, err = %d\n", __func__,
+				ret);
+		goto out;
+	}
+
+	ret = reset_control_assert(hba->core_reset);
+	if (ret) {
+		dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
+				__func__, ret);
+		goto out;
+	}
+
+	/*
+	 * The hardware requirement for delay between assert/deassert
+	 * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
+	 * ~125us (4/32768). To be on the safe side add 200us delay.
+	 */
+	usleep_range(200, 210);
+
+	ret = reset_control_deassert(hba->core_reset);
+	if (ret)
+		dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
+				__func__, ret);
+
+out:
+	return ret;
+}
+
+#ifdef CONFIG_SCSI_UFS_QCOM_ICE
+static int ufs_qcom_crypto_req_setup(struct ufs_hba *hba,
+	struct ufshcd_lrb *lrbp, u8 *cc_index, bool *enable, u64 *dun)
+{
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	struct request *req;
+	int ret;
+
+	if (lrbp->cmd && lrbp->cmd->request)
+		req = lrbp->cmd->request;
+	else
+		return 0;
+
+	/* Use request LBA as the DUN value */
+	if (req->bio)
+		*dun = (req->bio->bi_iter.bi_sector) >>
+				UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
+
+	ret = ufs_qcom_ice_req_setup(host, lrbp->cmd, cc_index, enable);
+
+	return ret;
+}
+
+static
+int ufs_qcom_crytpo_engine_cfg_start(struct ufs_hba *hba, unsigned int task_tag)
+{
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
+	int err = 0;
+
+	if (!host->ice.pdev ||
+	    !lrbp->cmd || lrbp->command_type != UTP_CMD_TYPE_SCSI)
+		goto out;
+
+	err = ufs_qcom_ice_cfg_start(host, lrbp->cmd);
+out:
+	return err;
+}
+
+static
+int ufs_qcom_crytpo_engine_cfg_end(struct ufs_hba *hba,
+		struct ufshcd_lrb *lrbp, struct request *req)
+{
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	int err = 0;
+
+	if (!host->ice.pdev || lrbp->command_type != UTP_CMD_TYPE_SCSI)
+		goto out;
+
+	err = ufs_qcom_ice_cfg_end(host, req);
+out:
+	return err;
+}
+
+static
+int ufs_qcom_crytpo_engine_reset(struct ufs_hba *hba)
+{
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	int err = 0;
+
+	if (!host->ice.pdev)
+		goto out;
+
+	err = ufs_qcom_ice_reset(host);
+out:
+	return err;
+}
+
+static int ufs_qcom_crypto_engine_get_status(struct ufs_hba *hba, u32 *status)
+{
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+	if (!status)
+		return -EINVAL;
+
+	return ufs_qcom_ice_get_status(host, status);
+}
+#else /* !CONFIG_SCSI_UFS_QCOM_ICE */
+#define ufs_qcom_crypto_req_setup		NULL
+#define ufs_qcom_crytpo_engine_cfg_start	NULL
+#define ufs_qcom_crytpo_engine_cfg_end		NULL
+#define ufs_qcom_crytpo_engine_reset		NULL
+#define ufs_qcom_crypto_engine_get_status	NULL
+#endif /* CONFIG_SCSI_UFS_QCOM_ICE */
+
 struct ufs_qcom_dev_params {
 	u32 pwm_rx_gear;	/* pwm rx gear to work in */
 	u32 pwm_tx_gear;	/* pwm tx gear to work in */
@@ -685,7 +1069,7 @@
 	return 0;
 }
 
-#ifdef CONFIG_MSM_BUS_SCALING
+#ifdef CONFIG_QCOM_BUS_SCALING
 static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
 		const char *speed_mode)
 {
@@ -739,7 +1123,7 @@
 	}
 }
 
-static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
+static int __ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
 {
 	int err = 0;
 
@@ -770,7 +1154,7 @@
 
 	vote = ufs_qcom_get_bus_vote(host, mode);
 	if (vote >= 0)
-		err = ufs_qcom_set_bus_vote(host, vote);
+		err = __ufs_qcom_set_bus_vote(host, vote);
 	else
 		err = vote;
 
@@ -781,6 +1165,35 @@
 	return err;
 }
 
+static int ufs_qcom_set_bus_vote(struct ufs_hba *hba, bool on)
+{
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	int vote, err;
+
+	/*
+	 * In case ufs_qcom_init() is not yet done, simply ignore.
+	 * This ufs_qcom_set_bus_vote() shall be called from
+	 * ufs_qcom_init() after init is done.
+	 */
+	if (!host)
+		return 0;
+
+	if (on) {
+		vote = host->bus_vote.saved_vote;
+		if (vote == host->bus_vote.min_bw_vote)
+			ufs_qcom_update_bus_bw_vote(host);
+	} else {
+		vote = host->bus_vote.min_bw_vote;
+	}
+
+	err = __ufs_qcom_set_bus_vote(host, vote);
+	if (err)
+		dev_err(hba->dev, "%s: set bus vote failed %d\n",
+				__func__, err);
+
+	return err;
+}
+
 static ssize_t
 show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
 			char *buf)
@@ -851,13 +1264,13 @@
 out:
 	return err;
 }
-#else /* CONFIG_MSM_BUS_SCALING */
+#else /* CONFIG_QCOM_BUS_SCALING */
 static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
 {
 	return 0;
 }
 
-static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
+static int ufs_qcom_set_bus_vote(struct ufs_hba *hba, bool on)
 {
 	return 0;
 }
@@ -866,7 +1279,10 @@
 {
 	return 0;
 }
-#endif /* CONFIG_MSM_BUS_SCALING */
+static inline void msm_bus_scale_unregister_client(uint32_t cl)
+{
+}
+#endif /* CONFIG_QCOM_BUS_SCALING */
 
 static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
 {
@@ -953,6 +1369,18 @@
 				ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
 		}
 
+		/*
+		 * Platforms using QRBTCv2 phy must limit link to PWM Gear-1
+		 * and SLOW mode to successfully bring up the link.
+		 */
+		if (!strcmp(ufs_qcom_phy_name(phy), "ufs_phy_qrbtc_v2")) {
+			ufs_qcom_cap.tx_lanes = 1;
+			ufs_qcom_cap.rx_lanes = 1;
+			ufs_qcom_cap.pwm_rx_gear = UFS_PWM_G1;
+			ufs_qcom_cap.pwm_tx_gear = UFS_PWM_G1;
+			ufs_qcom_cap.desired_working_mode = SLOW;
+		}
+
 		ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap,
 						 dev_max_params,
 						 dev_req_params);
@@ -962,6 +1390,10 @@
 			goto out;
 		}
 
+		/* enable the device ref clock before changing to HS mode */
+		if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
+			ufshcd_is_hs_mode(dev_req_params))
+			ufs_qcom_dev_ref_clk_ctrl(host, true);
 		break;
 	case POST_CHANGE:
 		if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
@@ -989,6 +1421,11 @@
 		memcpy(&host->dev_req_params,
 				dev_req_params, sizeof(*dev_req_params));
 		ufs_qcom_update_bus_bw_vote(host);
+
+		/* disable the device ref clock if entered PWM mode */
+		if (ufshcd_is_hs_mode(&hba->pwr_info) &&
+			!ufshcd_is_hs_mode(dev_req_params))
+			ufs_qcom_dev_ref_clk_ctrl(host, false);
 		break;
 	default:
 		ret = -EINVAL;
@@ -998,6 +1435,34 @@
 	return ret;
 }
 
+static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
+{
+	int err;
+	u32 pa_vs_config_reg1;
+
+	err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
+			     &pa_vs_config_reg1);
+	if (err)
+		goto out;
+
+	/* Allow extension of MSB bits of PA_SaveConfigTime attribute */
+	err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
+			    (pa_vs_config_reg1 | (1 << 12)));
+
+out:
+	return err;
+}
+
+static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
+{
+	int err = 0;
+
+	if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
+		err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
+
+	return err;
+}
+
 static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
 {
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -1021,18 +1486,18 @@
 {
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 
-	if (host->hw_ver.major == 0x01) {
-		hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
+	if (host->hw_ver.major == 0x1) {
+		hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
 			    | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
-			    | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
+			      | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE);
 
-		if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
+		if (host->hw_ver.minor == 0x001 && host->hw_ver.step == 0x0001)
 			hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
 
 		hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
 	}
 
-	if (host->hw_ver.major >= 0x2) {
+	if (host->hw_ver.major == 0x2) {
 		hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
 
 		if (!ufs_qcom_cap_qunipro(host))
@@ -1041,34 +1506,59 @@
 				| UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
 				| UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
 	}
+
+	if (host->disable_lpm)
+		hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
 }
 
 static void ufs_qcom_set_caps(struct ufs_hba *hba)
 {
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 
-	hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
+	if (!host->disable_lpm) {
+		hba->caps |= UFSHCD_CAP_CLK_GATING;
+		hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
 	hba->caps |= UFSHCD_CAP_CLK_SCALING;
+	}
 	hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
 
 	if (host->hw_ver.major >= 0x2) {
+		if (!host->disable_lpm)
+			hba->caps |= UFSHCD_CAP_POWER_COLLAPSE_DURING_HIBERN8;
 		host->caps = UFS_QCOM_CAP_QUNIPRO |
 			     UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
 	}
+	if (host->hw_ver.major >= 0x3) {
+		host->caps |= UFS_QCOM_CAP_QUNIPRO_CLK_GATING;
+		/*
+		 * The UFS PHY attached to v3.0.0 controller supports entering
+		 * deeper low power state of SVS2. This lets the controller
+		 * run at much lower clock frequencies for saving power.
+		 * Assuming this and any future revisions of the controller
+		 * support this capability. Need to revist this assumption if
+		 * any future platform with this core doesn't support the
+		 * capability, as there will be no benefit running at lower
+		 * frequencies then.
+		 */
+		host->caps |= UFS_QCOM_CAP_SVS2;
+	}
 }
 
 /**
  * ufs_qcom_setup_clocks - enables/disable clocks
  * @hba: host controller instance
  * @on: If true, enable clocks else disable them.
+ * @is_gating_context: If true then it means this function is called from
+ * aggressive clock gating context and we may only need to gate off important
+ * clocks. If false then make sure to gate off all clocks.
  *
  * Returns 0 on success, non-zero on failure.
  */
-static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
+static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
+				 bool is_gating_context)
 {
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 	int err;
-	int vote = 0;
 
 	/*
 	 * In case ufs_qcom_init() is not yet done, simply ignore.
@@ -1090,30 +1580,426 @@
 			ufs_qcom_phy_disable_iface_clk(host->generic_phy);
 			goto out;
 		}
-		vote = host->bus_vote.saved_vote;
-		if (vote == host->bus_vote.min_bw_vote)
-			ufs_qcom_update_bus_bw_vote(host);
+		/* enable the device ref clock for HS mode*/
+		if (ufshcd_is_hs_mode(&hba->pwr_info))
+			ufs_qcom_dev_ref_clk_ctrl(host, true);
 
+		err = ufs_qcom_ice_resume(host);
+		if (err)
+			goto out;
 	} else {
+		err = ufs_qcom_ice_suspend(host);
+		if (err)
+			goto out;
 
 		/* M-PHY RMMI interface clocks can be turned off */
 		ufs_qcom_phy_disable_iface_clk(host->generic_phy);
-		if (!ufs_qcom_is_link_active(hba))
+		/*
+		 * If auto hibern8 is supported then the link will already
+		 * be in hibern8 state and the ref clock can be gated.
+		 */
+		if (ufshcd_is_auto_hibern8_supported(hba) ||
+		    !ufs_qcom_is_link_active(hba)) {
+			/* turn off UFS local PHY ref_clk */
+			ufs_qcom_phy_disable_ref_clk(host->generic_phy);
 			/* disable device ref_clk */
 			ufs_qcom_dev_ref_clk_ctrl(host, false);
+		}
+	}
 
-		vote = host->bus_vote.min_bw_vote;
+out:
+	return err;
 	}
 
-	err = ufs_qcom_set_bus_vote(host, vote);
-	if (err)
-		dev_err(hba->dev, "%s: set bus vote failed %d\n",
-				__func__, err);
+#ifdef CONFIG_SMP /* CONFIG_SMP */
+static int ufs_qcom_cpu_to_group(struct ufs_qcom_host *host, int cpu)
+{
+	int i;
+
+	if (cpu >= 0 && cpu < num_possible_cpus())
+		for (i = 0; i < host->pm_qos.num_groups; i++)
+			if (cpumask_test_cpu(cpu, &host->pm_qos.groups[i].mask))
+				return i;
+
+	return host->pm_qos.default_cpu;
+}
+
+static void ufs_qcom_pm_qos_req_start(struct ufs_hba *hba, struct request *req)
+{
+	unsigned long flags;
+	struct ufs_qcom_host *host;
+	struct ufs_qcom_pm_qos_cpu_group *group;
 
+	if (!hba || !req)
+		return;
+
+	host = ufshcd_get_variant(hba);
+	if (!host->pm_qos.groups)
+		return;
+
+	group = &host->pm_qos.groups[ufs_qcom_cpu_to_group(host, req->cpu)];
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (!host->pm_qos.is_enabled)
+		goto out;
+
+	group->active_reqs++;
+	if (group->state != PM_QOS_REQ_VOTE &&
+			group->state != PM_QOS_VOTED) {
+		group->state = PM_QOS_REQ_VOTE;
+		queue_work(host->pm_qos.workq, &group->vote_work);
+	}
 out:
-	return err;
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+/* hba->host->host_lock is assumed to be held by caller */
+static void __ufs_qcom_pm_qos_req_end(struct ufs_qcom_host *host, int req_cpu)
+{
+	struct ufs_qcom_pm_qos_cpu_group *group;
+
+	if (!host->pm_qos.groups || !host->pm_qos.is_enabled)
+		return;
+
+	group = &host->pm_qos.groups[ufs_qcom_cpu_to_group(host, req_cpu)];
+
+	if (--group->active_reqs)
+		return;
+	group->state = PM_QOS_REQ_UNVOTE;
+	queue_work(host->pm_qos.workq, &group->unvote_work);
+}
+
+static void ufs_qcom_pm_qos_req_end(struct ufs_hba *hba, struct request *req,
+	bool should_lock)
+{
+	unsigned long flags = 0;
+
+	if (!hba || !req)
+		return;
+
+	if (should_lock)
+		spin_lock_irqsave(hba->host->host_lock, flags);
+	__ufs_qcom_pm_qos_req_end(ufshcd_get_variant(hba), req->cpu);
+	if (should_lock)
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+static void ufs_qcom_pm_qos_vote_work(struct work_struct *work)
+{
+	struct ufs_qcom_pm_qos_cpu_group *group =
+		container_of(work, struct ufs_qcom_pm_qos_cpu_group, vote_work);
+	struct ufs_qcom_host *host = group->host;
+	unsigned long flags;
+
+	spin_lock_irqsave(host->hba->host->host_lock, flags);
+
+	if (!host->pm_qos.is_enabled || !group->active_reqs) {
+		spin_unlock_irqrestore(host->hba->host->host_lock, flags);
+		return;
+	}
+
+	group->state = PM_QOS_VOTED;
+	spin_unlock_irqrestore(host->hba->host->host_lock, flags);
+
+	pm_qos_update_request(&group->req, group->latency_us);
+}
+
+static void ufs_qcom_pm_qos_unvote_work(struct work_struct *work)
+{
+	struct ufs_qcom_pm_qos_cpu_group *group = container_of(work,
+		struct ufs_qcom_pm_qos_cpu_group, unvote_work);
+	struct ufs_qcom_host *host = group->host;
+	unsigned long flags;
+
+	/*
+	 * Check if new requests were submitted in the meantime and do not
+	 * unvote if so.
+	 */
+	spin_lock_irqsave(host->hba->host->host_lock, flags);
+
+	if (!host->pm_qos.is_enabled || group->active_reqs) {
+		spin_unlock_irqrestore(host->hba->host->host_lock, flags);
+		return;
+	}
+
+	group->state = PM_QOS_UNVOTED;
+	spin_unlock_irqrestore(host->hba->host->host_lock, flags);
+
+	pm_qos_update_request_timeout(&group->req,
+		group->latency_us, UFS_QCOM_PM_QOS_UNVOTE_TIMEOUT_US);
+}
+
+static ssize_t ufs_qcom_pm_qos_enable_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev->parent);
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", host->pm_qos.is_enabled);
+}
+
+static ssize_t ufs_qcom_pm_qos_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev->parent);
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	unsigned long value;
+	unsigned long flags;
+	bool enable;
+	int i;
+
+	if (kstrtoul(buf, 0, &value))
+		return -EINVAL;
+
+	enable = !!value;
+
+	/*
+	 * Must take the spinlock and save irqs before changing the enabled
+	 * flag in order to keep correctness of PM QoS release.
+	 */
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (enable == host->pm_qos.is_enabled) {
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		return count;
+	}
+	host->pm_qos.is_enabled = enable;
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	if (!enable)
+		for (i = 0; i < host->pm_qos.num_groups; i++) {
+			cancel_work_sync(&host->pm_qos.groups[i].vote_work);
+			cancel_work_sync(&host->pm_qos.groups[i].unvote_work);
+			spin_lock_irqsave(hba->host->host_lock, flags);
+			host->pm_qos.groups[i].state = PM_QOS_UNVOTED;
+			host->pm_qos.groups[i].active_reqs = 0;
+			spin_unlock_irqrestore(hba->host->host_lock, flags);
+			pm_qos_update_request(&host->pm_qos.groups[i].req,
+				PM_QOS_DEFAULT_VALUE);
+		}
+
+	return count;
 }
 
+static ssize_t ufs_qcom_pm_qos_latency_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev->parent);
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	int ret;
+	int i;
+	int offset = 0;
+
+	for (i = 0; i < host->pm_qos.num_groups; i++) {
+		ret = snprintf(&buf[offset], PAGE_SIZE,
+			"cpu group #%d(mask=0x%lx): %d\n", i,
+			host->pm_qos.groups[i].mask.bits[0],
+			host->pm_qos.groups[i].latency_us);
+		if (ret > 0)
+			offset += ret;
+		else
+			break;
+	}
+
+	return offset;
+}
+
+static ssize_t ufs_qcom_pm_qos_latency_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev->parent);
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	unsigned long value;
+	unsigned long flags;
+	char *strbuf;
+	char *strbuf_copy;
+	char *token;
+	int i;
+	int ret;
+
+	/* reserve one byte for null termination */
+	strbuf = kmalloc(count + 1, GFP_KERNEL);
+	if (!strbuf)
+		return -ENOMEM;
+	strbuf_copy = strbuf;
+	strlcpy(strbuf, buf, count + 1);
+
+	for (i = 0; i < host->pm_qos.num_groups; i++) {
+		token = strsep(&strbuf, ",");
+		if (!token)
+			break;
+
+		ret = kstrtoul(token, 0, &value);
+		if (ret)
+			break;
+
+		spin_lock_irqsave(hba->host->host_lock, flags);
+		host->pm_qos.groups[i].latency_us = value;
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+	}
+
+	kfree(strbuf_copy);
+	return count;
+}
+
+static int ufs_qcom_pm_qos_init(struct ufs_qcom_host *host)
+{
+	struct device_node *node = host->hba->dev->of_node;
+	struct device_attribute *attr;
+	int ret = 0;
+	int num_groups;
+	int num_values;
+	char wq_name[sizeof("ufs_pm_qos_00")];
+	int i;
+
+	num_groups = of_property_count_u32_elems(node,
+		"qcom,pm-qos-cpu-groups");
+	if (num_groups <= 0)
+		goto no_pm_qos;
+
+	num_values = of_property_count_u32_elems(node,
+		"qcom,pm-qos-cpu-group-latency-us");
+	if (num_values <= 0)
+		goto no_pm_qos;
+
+	if (num_values != num_groups || num_groups > num_possible_cpus()) {
+		dev_err(host->hba->dev, "%s: invalid count: num_groups=%d, num_values=%d, num_possible_cpus=%d\n",
+			__func__, num_groups, num_values, num_possible_cpus());
+		goto no_pm_qos;
+	}
+
+	host->pm_qos.num_groups = num_groups;
+	host->pm_qos.groups = kcalloc(host->pm_qos.num_groups,
+			sizeof(struct ufs_qcom_pm_qos_cpu_group), GFP_KERNEL);
+	if (!host->pm_qos.groups)
+		return -ENOMEM;
+
+	for (i = 0; i < host->pm_qos.num_groups; i++) {
+		u32 mask;
+
+		ret = of_property_read_u32_index(node, "qcom,pm-qos-cpu-groups",
+			i, &mask);
+		if (ret)
+			goto free_groups;
+		host->pm_qos.groups[i].mask.bits[0] = mask;
+		if (!cpumask_subset(&host->pm_qos.groups[i].mask,
+			cpu_possible_mask)) {
+			dev_err(host->hba->dev, "%s: invalid mask 0x%x for cpu group\n",
+				__func__, mask);
+			goto free_groups;
+		}
+
+		ret = of_property_read_u32_index(node,
+			"qcom,pm-qos-cpu-group-latency-us", i,
+			&host->pm_qos.groups[i].latency_us);
+		if (ret)
+			goto free_groups;
+
+		host->pm_qos.groups[i].req.type = PM_QOS_REQ_AFFINE_CORES;
+		host->pm_qos.groups[i].req.cpus_affine =
+			host->pm_qos.groups[i].mask;
+		host->pm_qos.groups[i].state = PM_QOS_UNVOTED;
+		host->pm_qos.groups[i].active_reqs = 0;
+		host->pm_qos.groups[i].host = host;
+
+		INIT_WORK(&host->pm_qos.groups[i].vote_work,
+			ufs_qcom_pm_qos_vote_work);
+		INIT_WORK(&host->pm_qos.groups[i].unvote_work,
+			ufs_qcom_pm_qos_unvote_work);
+	}
+
+	ret = of_property_read_u32(node, "qcom,pm-qos-default-cpu",
+		&host->pm_qos.default_cpu);
+	if (ret || host->pm_qos.default_cpu > num_possible_cpus())
+		host->pm_qos.default_cpu = 0;
+
+	/*
+	 * Use a single-threaded workqueue to assure work submitted to the queue
+	 * is performed in order. Consider the following 2 possible cases:
+	 *
+	 * 1. A new request arrives and voting work is scheduled for it. Before
+	 *    the voting work is performed the request is finished and unvote
+	 *    work is also scheduled.
+	 * 2. A request is finished and unvote work is scheduled. Before the
+	 *    work is performed a new request arrives and voting work is also
+	 *    scheduled.
+	 *
+	 * In both cases a vote work and unvote work wait to be performed.
+	 * If ordering is not guaranteed, then the end state might be the
+	 * opposite of the desired state.
+	 */
+	snprintf(wq_name, ARRAY_SIZE(wq_name), "%s_%d", "ufs_pm_qos",
+		host->hba->host->host_no);
+	host->pm_qos.workq = create_singlethread_workqueue(wq_name);
+	if (!host->pm_qos.workq) {
+		dev_err(host->hba->dev, "%s: failed to create the workqueue\n",
+				__func__);
+		ret = -ENOMEM;
+		goto free_groups;
+	}
+
+	/* Initialization was ok, add all PM QoS requests */
+	for (i = 0; i < host->pm_qos.num_groups; i++)
+		pm_qos_add_request(&host->pm_qos.groups[i].req,
+			PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+
+	/* PM QoS latency sys-fs attribute */
+	attr = &host->pm_qos.latency_attr;
+	attr->show = ufs_qcom_pm_qos_latency_show;
+	attr->store = ufs_qcom_pm_qos_latency_store;
+	sysfs_attr_init(&attr->attr);
+	attr->attr.name = "pm_qos_latency_us";
+	attr->attr.mode = S_IRUGO | S_IWUSR;
+	if (device_create_file(host->hba->var->dev, attr))
+		dev_dbg(host->hba->dev, "Failed to create sysfs for pm_qos_latency_us\n");
+
+	/* PM QoS enable sys-fs attribute */
+	attr = &host->pm_qos.enable_attr;
+	attr->show = ufs_qcom_pm_qos_enable_show;
+	attr->store = ufs_qcom_pm_qos_enable_store;
+	sysfs_attr_init(&attr->attr);
+	attr->attr.name = "pm_qos_enable";
+	attr->attr.mode = S_IRUGO | S_IWUSR;
+	if (device_create_file(host->hba->var->dev, attr))
+		dev_dbg(host->hba->dev, "Failed to create sysfs for pm_qos enable\n");
+
+	host->pm_qos.is_enabled = true;
+
+	return 0;
+
+free_groups:
+	kfree(host->pm_qos.groups);
+no_pm_qos:
+	host->pm_qos.groups = NULL;
+	return ret ? ret : -ENOTSUPP;
+}
+
+static void ufs_qcom_pm_qos_suspend(struct ufs_qcom_host *host)
+{
+	int i;
+
+	if (!host->pm_qos.groups)
+		return;
+
+	for (i = 0; i < host->pm_qos.num_groups; i++)
+		flush_work(&host->pm_qos.groups[i].unvote_work);
+}
+
+static void ufs_qcom_pm_qos_remove(struct ufs_qcom_host *host)
+{
+	int i;
+
+	if (!host->pm_qos.groups)
+		return;
+
+	for (i = 0; i < host->pm_qos.num_groups; i++)
+		pm_qos_remove_request(&host->pm_qos.groups[i].req);
+	destroy_workqueue(host->pm_qos.workq);
+
+	kfree(host->pm_qos.groups);
+	host->pm_qos.groups = NULL;
+}
+#endif /* CONFIG_SMP */
+
 #define	ANDROID_BOOT_DEV_MAX	30
 static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
 
@@ -1126,6 +2012,69 @@
 __setup("androidboot.bootdevice=", get_android_boot_dev);
 #endif
 
+/*
+ * ufs_qcom_parse_lpm - read from DTS whether LPM modes should be disabled.
+ */
+static void ufs_qcom_parse_lpm(struct ufs_qcom_host *host)
+{
+	struct device_node *node = host->hba->dev->of_node;
+
+	host->disable_lpm = of_property_read_bool(node, "qcom,disable-lpm");
+	if (host->disable_lpm)
+		pr_info("%s: will disable all LPM modes\n", __func__);
+}
+
+static int ufs_qcom_parse_reg_info(struct ufs_qcom_host *host, char *name,
+				   struct ufs_vreg **out_vreg)
+{
+	int ret = 0;
+	char prop_name[MAX_PROP_SIZE];
+	struct ufs_vreg *vreg = NULL;
+	struct device *dev = host->hba->dev;
+	struct device_node *np = dev->of_node;
+
+	if (!np) {
+		dev_err(dev, "%s: non DT initialization\n", __func__);
+		goto out;
+	}
+
+	snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
+	if (!of_parse_phandle(np, prop_name, 0)) {
+		dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
+			 __func__, prop_name);
+		ret = -ENODEV;
+		goto out;
+	}
+
+	vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
+	if (!vreg)
+		return -ENOMEM;
+
+	vreg->name = name;
+
+	snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
+	ret = of_property_read_u32(np, prop_name, &vreg->max_uA);
+	if (ret) {
+		dev_err(dev, "%s: unable to find %s err %d\n",
+			__func__, prop_name, ret);
+		goto out;
+	}
+
+	vreg->reg = devm_regulator_get(dev, vreg->name);
+	if (IS_ERR(vreg->reg)) {
+		ret = PTR_ERR(vreg->reg);
+		dev_err(dev, "%s: %s get failed, err=%d\n",
+			__func__, vreg->name, ret);
+	}
+	vreg->min_uV = VDDP_REF_CLK_MIN_UV;
+	vreg->max_uV = VDDP_REF_CLK_MAX_UV;
+
+out:
+	if (!ret)
+		*out_vreg = vreg;
+	return ret;
+}
+
 /**
  * ufs_qcom_init - bind phy with controller
  * @hba: host controller instance
@@ -1144,9 +2093,6 @@
 	struct ufs_qcom_host *host;
 	struct resource *res;
 
-	if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev)))
-		return -ENODEV;
-
 	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
 	if (!host) {
 		err = -ENOMEM;
@@ -1156,21 +2102,58 @@
 
 	/* Make a two way bind between the qcom host and the hba */
 	host->hba = hba;
+	spin_lock_init(&host->ice_work_lock);
+
 	ufshcd_set_variant(hba, host);
 
+	err = ufs_qcom_ice_get_dev(host);
+	if (err == -EPROBE_DEFER) {
+		/*
+		 * UFS driver might be probed before ICE driver does.
+		 * In that case we would like to return EPROBE_DEFER code
+		 * in order to delay its probing.
+		 */
+		dev_err(dev, "%s: required ICE device not probed yet err = %d\n",
+			__func__, err);
+		goto out_host_free;
+
+	} else if (err == -ENODEV) {
 	/*
-	 * voting/devoting device ref_clk source is time consuming hence
-	 * skip devoting it during aggressive clock gating. This clock
-	 * will still be gated off during runtime suspend.
+		 * ICE device is not enabled in DTS file. No need for further
+		 * initialization of ICE driver.
 	 */
+		dev_warn(dev, "%s: ICE device is not enabled",
+			__func__);
+	} else if (err) {
+		dev_err(dev, "%s: ufs_qcom_ice_get_dev failed %d\n",
+			__func__, err);
+		goto out_host_free;
+	}
+
 	host->generic_phy = devm_phy_get(dev, "ufsphy");
 
-	if (IS_ERR(host->generic_phy)) {
+	if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) {
+		/*
+		 * UFS driver might be probed before the phy driver does.
+		 * In that case we would like to return EPROBE_DEFER code.
+		 */
+		err = -EPROBE_DEFER;
+		dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n",
+			__func__, err);
+		goto out_host_free;
+	} else if (IS_ERR(host->generic_phy)) {
 		err = PTR_ERR(host->generic_phy);
 		dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
 		goto out;
 	}
 
+	err = ufs_qcom_pm_qos_init(host);
+	if (err)
+		dev_info(dev, "%s: PM QoS will be disabled\n", __func__);
+
+	/* restore the secure configuration */
+	ufs_qcom_update_sec_cfg(hba, true);
+
 	err = ufs_qcom_bus_register(host);
 	if (err)
 		goto out_host_free;
@@ -1206,19 +2189,33 @@
 	ufs_qcom_phy_save_controller_version(host->generic_phy,
 		host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
 
+	err = ufs_qcom_parse_reg_info(host, "qcom,vddp-ref-clk",
+				      &host->vddp_ref_clk);
 	phy_init(host->generic_phy);
 	err = phy_power_on(host->generic_phy);
 	if (err)
 		goto out_unregister_bus;
+	if (host->vddp_ref_clk) {
+		err = ufs_qcom_enable_vreg(dev, host->vddp_ref_clk);
+		if (err) {
+			dev_err(dev, "%s: failed enabling ref clk supply: %d\n",
+				__func__, err);
+			goto out_disable_phy;
+		}
+	}
 
 	err = ufs_qcom_init_lane_clks(host);
 	if (err)
-		goto out_disable_phy;
+		goto out_disable_vddp;
 
+	ufs_qcom_parse_lpm(host);
+	if (host->disable_lpm)
+		pm_runtime_forbid(host->hba->dev);
 	ufs_qcom_set_caps(hba);
 	ufs_qcom_advertise_quirks(hba);
 
-	ufs_qcom_setup_clocks(hba, true);
+	ufs_qcom_set_bus_vote(hba, true);
+	ufs_qcom_setup_clocks(hba, true, false);
 
 	if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
 		ufs_qcom_hosts[hba->dev->id] = host;
@@ -1234,10 +2231,14 @@
 
 	goto out;
 
+out_disable_vddp:
+	if (host->vddp_ref_clk)
+		ufs_qcom_disable_vreg(dev, host->vddp_ref_clk);
 out_disable_phy:
 	phy_power_off(host->generic_phy);
 out_unregister_bus:
 	phy_exit(host->generic_phy);
+	msm_bus_scale_unregister_client(host->bus_vote.client_handle);
 out_host_free:
 	devm_kfree(dev, host);
 	ufshcd_set_variant(hba, NULL);
@@ -1249,8 +2250,10 @@
 {
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 
+	msm_bus_scale_unregister_client(host->bus_vote.client_handle);
 	ufs_qcom_disable_lane_clks(host);
 	phy_power_off(host->generic_phy);
+	ufs_qcom_pm_qos_remove(host);
 }
 
 static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
@@ -1281,105 +2284,292 @@
 	return err;
 }
 
-static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
+static inline int ufs_qcom_configure_lpm(struct ufs_hba *hba, bool enable)
 {
-	/* nothing to do as of now */
-	return 0;
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	struct phy *phy = host->generic_phy;
+	int err = 0;
+
+	/* The default low power mode configuration is SVS2 */
+	if (!ufs_qcom_cap_svs2(host))
+		goto out;
+
+	if (!((host->hw_ver.major == 0x3) &&
+	    (host->hw_ver.minor == 0x0) &&
+	    (host->hw_ver.step == 0x0)))
+		goto out;
+
+	/*
+	 * The link should be put in hibern8 state before
+	 * configuring the PHY to enter/exit SVS2 mode.
+	 */
+	err = ufshcd_uic_hibern8_enter(hba);
+	if (err)
+		goto out;
+
+	err = ufs_qcom_phy_configure_lpm(phy, enable);
+	if (err)
+		goto out;
+
+	err = ufshcd_uic_hibern8_exit(hba);
+out:
+	return err;
 }
 
-static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
+static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
 {
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	struct ufs_pa_layer_attr *attr = &host->dev_req_params;
+	int err = 0;
 
 	if (!ufs_qcom_cap_qunipro(host))
-		return 0;
+		goto out;
+
+	err = ufs_qcom_configure_lpm(hba, false);
+	if (err)
+		goto out;
+
+	if (attr)
+		__ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
+				      attr->hs_rate, false, true);
 
 	/* set unipro core clock cycles to 150 and clear clock divider */
-	return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
+	err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
+out:
+	return err;
 }
 
 static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
 {
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-	int err;
-	u32 core_clk_ctrl_reg;
 
 	if (!ufs_qcom_cap_qunipro(host))
 		return 0;
 
-	err = ufshcd_dme_get(hba,
-			    UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
-			    &core_clk_ctrl_reg);
-
-	/* make sure CORE_CLK_DIV_EN is cleared */
-	if (!err &&
-	    (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) {
-		core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
-		err = ufshcd_dme_set(hba,
-				    UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
-				    core_clk_ctrl_reg);
-	}
-
-	return err;
+	return ufs_qcom_configure_lpm(hba, true);
 }
 
 static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
 {
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	struct ufs_pa_layer_attr *attr = &host->dev_req_params;
+	int err = 0;
 
 	if (!ufs_qcom_cap_qunipro(host))
 		return 0;
 
-	/* set unipro core clock cycles to 75 and clear clock divider */
-	return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
+	if (attr)
+		ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
+				    attr->hs_rate, false);
+
+	if (ufs_qcom_cap_svs2(host))
+		/*
+		 * For SVS2 set unipro core clock cycles to 37 and
+		 * clear clock divider
+		 */
+		err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 37);
+	else
+		/*
+		 * For SVS set unipro core clock cycles to 75 and
+		 * clear clock divider
+		 */
+		err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
+
+	return err;
 }
 
 static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
 		bool scale_up, enum ufs_notify_change_status status)
 {
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-	struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
 	int err = 0;
 
-	if (status == PRE_CHANGE) {
+	switch (status) {
+	case PRE_CHANGE:
 		if (scale_up)
 			err = ufs_qcom_clk_scale_up_pre_change(hba);
 		else
 			err = ufs_qcom_clk_scale_down_pre_change(hba);
-	} else {
-		if (scale_up)
-			err = ufs_qcom_clk_scale_up_post_change(hba);
-		else
+		break;
+	case POST_CHANGE:
+		if (!scale_up)
 			err = ufs_qcom_clk_scale_down_post_change(hba);
 
-		if (err || !dev_req_params)
+		ufs_qcom_update_bus_bw_vote(host);
+		break;
+	default:
+		dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
+		err = -EINVAL;
+		break;
+	}
+
+	return err;
+}
+
+/*
+ * This function should be called to restore the security configuration of UFS
+ * register space after coming out of UFS host core power collapse.
+ *
+ * @hba: host controller instance
+ * @restore_sec_cfg: Set "true" if secure configuration needs to be restored
+ * and set "false" when secure configuration is lost.
+ */
+static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg)
+{
+	int ret = 0;
+	u64 scm_ret = 0;
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+	/* scm command buffer structrue */
+	struct msm_scm_cmd_buf {
+		unsigned int device_id;
+		unsigned int spare;
+	} cbuf = {0};
+	#define RESTORE_SEC_CFG_CMD	0x2
+	#define UFS_TZ_DEV_ID		19
+
+	if (!host || !hba->vreg_info.vdd_hba ||
+	    !(host->sec_cfg_updated ^ restore_sec_cfg)) {
+		return 0;
+	} else if (host->caps &
+		   UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE) {
+		return 0;
+	} else if (!restore_sec_cfg) {
+		/*
+		 * Clear the flag so next time when this function is called
+		 * with restore_sec_cfg set to true, we can restore the secure
+		 * configuration.
+		 */
+		host->sec_cfg_updated = false;
+		goto out;
+	} else if (hba->clk_gating.state != CLKS_ON) {
+		/*
+		 * Clocks should be ON to restore the host controller secure
+		 * configuration.
+		 */
 			goto out;
+	}
 
-		ufs_qcom_cfg_timers(hba,
-				    dev_req_params->gear_rx,
-				    dev_req_params->pwr_rx,
-				    dev_req_params->hs_rate,
-				    false);
-		ufs_qcom_update_bus_bw_vote(host);
+	/*
+	 * If we are here, Host controller clocks are running, Host controller
+	 * power collapse feature is supported and Host controller has just came
+	 * out of power collapse.
+	 */
+	cbuf.device_id = UFS_TZ_DEV_ID;
+	ret = scm_restore_sec_cfg(cbuf.device_id, cbuf.spare, &scm_ret);
+	if (ret || scm_ret) {
+		dev_dbg(hba->dev, "%s: failed, ret %d scm_ret %llu\n",
+			__func__, ret, scm_ret);
+		if (!ret)
+			ret = scm_ret;
+	} else {
+		host->sec_cfg_updated = true;
 	}
 
 out:
-	return err;
+	dev_dbg(hba->dev, "%s: ip: restore_sec_cfg %d, op: restore_sec_cfg %d, ret %d scm_ret %llu\n",
+		__func__, restore_sec_cfg, host->sec_cfg_updated, ret, scm_ret);
+	return ret;
+}
+
+
+static inline u32 ufs_qcom_get_scale_down_gear(struct ufs_hba *hba)
+{
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+	if (ufs_qcom_cap_svs2(host))
+		return UFS_HS_G1;
+	/* Default SVS support @ HS G2 frequencies*/
+	return UFS_HS_G2;
+}
+
+void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, void *priv,
+		void (*print_fn)(struct ufs_hba *hba, int offset, int num_regs,
+				char *str, void *priv))
+{
+	u32 reg;
+	struct ufs_qcom_host *host;
+
+	if (unlikely(!hba)) {
+		pr_err("%s: hba is NULL\n", __func__);
+		return;
+	}
+	if (unlikely(!print_fn)) {
+		dev_err(hba->dev, "%s: print_fn is NULL\n", __func__);
+		return;
+	}
+
+	host = ufshcd_get_variant(hba);
+	if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN))
+		return;
+
+	reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
+	print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
+
+	reg = ufshcd_readl(hba, REG_UFS_CFG1);
+	reg |= UFS_BIT(17);
+	ufshcd_writel(hba, reg, REG_UFS_CFG1);
+
+	reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
+	print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv);
+
+	reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
+	print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv);
+
+	reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
+	print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
+
+	/* clear bit 17 - UTP_DBG_RAMS_EN */
+	ufshcd_rmwl(hba, UFS_BIT(17), 0, REG_UFS_CFG1);
+
+	reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
+	print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
+
+	reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
+	print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv);
+
+	reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
+	print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv);
+
+	reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
+	print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv);
+
+	reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
+	print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv);
+
+	reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
+	print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv);
+
+	reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
+	print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv);
+}
+
+static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
+{
+	if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) {
+		ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
+				UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
+		ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
+	} else {
+		ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1);
+		ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
+	}
 }
 
 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
 {
 	/* provide a legal default configuration */
-	host->testbus.select_major = TSTBUS_UAWM;
-	host->testbus.select_minor = 1;
+	host->testbus.select_major = TSTBUS_UNIPRO;
+	host->testbus.select_minor = 37;
 }
 
-static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
+bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host,
+		u8 select_major, u8 select_minor)
 {
-	if (host->testbus.select_major >= TSTBUS_MAX) {
+	if (select_major >= TSTBUS_MAX) {
 		dev_err(host->hba->dev,
 			"%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
-			__func__, host->testbus.select_major);
+			__func__, select_major);
 		return false;
 	}
 
@@ -1388,28 +2578,33 @@
 	 * mappings of select_minor, since there is no harm in
 	 * configuring a non-existent select_minor
 	 */
-	if (host->testbus.select_minor > 0x1F) {
+	if (select_minor > 0xFF) {
 		dev_err(host->hba->dev,
 			"%s: 0x%05X is not a legal testbus option\n",
-			__func__, host->testbus.select_minor);
+			__func__, select_minor);
 		return false;
 	}
 
 	return true;
 }
 
+/*
+ * The caller of this function must make sure that the controller
+ * is out of runtime suspend and appropriate clocks are enabled
+ * before accessing.
+ */
 int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
 {
-	int reg;
-	int offset;
+	int reg = 0;
+	int offset, ret = 0, testbus_sel_offset = 19;
 	u32 mask = TEST_BUS_SUB_SEL_MASK;
+	unsigned long flags;
+	struct ufs_hba *hba;
 
 	if (!host)
 		return -EINVAL;
-
-	if (!ufs_qcom_testbus_cfg_is_ok(host))
-		return -EPERM;
-
+	hba = host->hba;
+	spin_lock_irqsave(hba->host->host_lock, flags);
 	switch (host->testbus.select_major) {
 	case TSTBUS_UAWM:
 		reg = UFS_TEST_BUS_CTRL_0;
@@ -1457,7 +2652,8 @@
 		break;
 	case TSTBUS_UNIPRO:
 		reg = UFS_UNIPRO_CFG;
-		offset = 1;
+		offset = 20;
+		mask = 0xFFF;
 		break;
 	/*
 	 * No need for a default case, since
@@ -1466,19 +2662,27 @@
 	 */
 	}
 	mask <<= offset;
-
-	pm_runtime_get_sync(host->hba->dev);
-	ufshcd_hold(host->hba, false);
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	if (reg) {
 	ufshcd_rmwl(host->hba, TEST_BUS_SEL,
-		    (u32)host->testbus.select_major << 19,
+		    (u32)host->testbus.select_major << testbus_sel_offset,
 		    REG_UFS_CFG1);
 	ufshcd_rmwl(host->hba, mask,
 		    (u32)host->testbus.select_minor << offset,
 		    reg);
-	ufshcd_release(host->hba);
-	pm_runtime_put_sync(host->hba->dev);
-
-	return 0;
+	} else {
+		dev_err(hba->dev, "%s: Problem setting minor\n", __func__);
+		ret = -EINVAL;
+		goto out;
+	}
+	ufs_qcom_enable_test_bus(host);
+	/*
+	 * Make sure the test bus configuration is
+	 * committed before returning.
+	 */
+	mb();
+out:
+	return ret;
 }
 
 static void ufs_qcom_testbus_read(struct ufs_hba *hba)
@@ -1486,13 +2690,50 @@
 	ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS ");
 }
 
-static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
+static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
 {
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	u32 *testbus = NULL;
+	int i, nminor = 256, testbus_len = nminor * sizeof(u32);
+
+	testbus = kmalloc(testbus_len, GFP_KERNEL);
+	if (!testbus)
+		return;
+
+	host->testbus.select_major = TSTBUS_UNIPRO;
+	for (i = 0; i < nminor; i++) {
+		host->testbus.select_minor = i;
+		ufs_qcom_testbus_config(host);
+		testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
+	}
+	print_hex_dump(KERN_ERR, "UNIPRO_TEST_BUS ", DUMP_PREFIX_OFFSET,
+			16, 4, testbus, testbus_len, false);
+	kfree(testbus);
+}
+
+static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba, bool no_sleep)
+{
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	struct phy *phy = host->generic_phy;
+
 	ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
 			"HCI Vendor Specific Registers ");
+	ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
+
+	if (no_sleep)
+		return;
 
+	/* sleep a bit intermittently as we are dumping too much data */
+	usleep_range(1000, 1100);
 	ufs_qcom_testbus_read(hba);
+	usleep_range(1000, 1100);
+	ufs_qcom_print_unipro_testbus(hba);
+	usleep_range(1000, 1100);
+	ufs_qcom_phy_dbg_register_dump(phy);
+	usleep_range(1000, 1100);
+	ufs_qcom_ice_print_regs(host);
 }
+
 /**
  * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
  *
@@ -1500,7 +2741,6 @@
  * handshake during initialization.
  */
 static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
-	.name                   = "qcom",
 	.init                   = ufs_qcom_init,
 	.exit                   = ufs_qcom_exit,
 	.get_ufs_hci_version	= ufs_qcom_get_ufs_hci_version,
@@ -1509,9 +2749,37 @@
 	.hce_enable_notify      = ufs_qcom_hce_enable_notify,
 	.link_startup_notify    = ufs_qcom_link_startup_notify,
 	.pwr_change_notify	= ufs_qcom_pwr_change_notify,
+	.apply_dev_quirks	= ufs_qcom_apply_dev_quirks,
 	.suspend		= ufs_qcom_suspend,
 	.resume			= ufs_qcom_resume,
+	.full_reset		= ufs_qcom_full_reset,
+	.update_sec_cfg		= ufs_qcom_update_sec_cfg,
+	.get_scale_down_gear	= ufs_qcom_get_scale_down_gear,
+	.set_bus_vote		= ufs_qcom_set_bus_vote,
 	.dbg_register_dump	= ufs_qcom_dump_dbg_regs,
+#ifdef CONFIG_DEBUG_FS
+	.add_debugfs		= ufs_qcom_dbg_add_debugfs,
+#endif
+};
+
+static struct ufs_hba_crypto_variant_ops ufs_hba_crypto_variant_ops = {
+	.crypto_req_setup	= ufs_qcom_crypto_req_setup,
+	.crypto_engine_cfg_start	= ufs_qcom_crytpo_engine_cfg_start,
+	.crypto_engine_cfg_end	= ufs_qcom_crytpo_engine_cfg_end,
+	.crypto_engine_reset	  = ufs_qcom_crytpo_engine_reset,
+	.crypto_engine_get_status = ufs_qcom_crypto_engine_get_status,
+};
+
+static struct ufs_hba_pm_qos_variant_ops ufs_hba_pm_qos_variant_ops = {
+	.req_start	= ufs_qcom_pm_qos_req_start,
+	.req_end	= ufs_qcom_pm_qos_req_end,
+};
+
+static struct ufs_hba_variant ufs_hba_qcom_variant = {
+	.name		= "qcom",
+	.vops		= &ufs_hba_qcom_vops,
+	.crypto_vops	= &ufs_hba_crypto_variant_ops,
+	.pm_qos_vops	= &ufs_hba_pm_qos_variant_ops,
 };
 
 /**
@@ -1524,9 +2792,27 @@
 {
 	int err;
 	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+
+	/*
+	 * On qcom platforms, bootdevice is the primary storage
+	 * device. This device can either be eMMC or UFS.
+	 * The type of device connected is detected at runtime.
+	 * So, if an eMMC device is connected, and this function
+	 * is invoked, it would turn-off the regulator if it detects
+	 * that the storage device is not ufs.
+	 * These regulators are turned ON by the bootloaders & turning
+	 * them off without sending PON may damage the connected device.
+	 * Hence, check for the connected device early-on & don't turn-off
+	 * the regulators.
+	 */
+	if (of_property_read_bool(np, "non-removable") &&
+	    strlen(android_boot_dev) &&
+	    strcmp(android_boot_dev, dev_name(dev)))
+		return -ENODEV;
 
 	/* Perform generic probe */
-	err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
+	err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_variant);
 	if (err)
 		dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
 
diff -ruw linux-4.4.115/drivers/scsi/ufs/ufs-qcom.h linux-4.4.115-fbx/drivers/scsi/ufs/ufs-qcom.h
--- linux-4.4.115/drivers/scsi/ufs/ufs-qcom.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/scsi/ufs/ufs-qcom.h	2019-01-22 16:16:26.631274732 +0100
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,9 +14,14 @@
 #ifndef UFS_QCOM_H_
 #define UFS_QCOM_H_
 
+#include <linux/phy/phy.h>
+#include <linux/pm_qos.h>
+#include "ufshcd.h"
+
 #define MAX_UFS_QCOM_HOSTS	1
 #define MAX_U32                 (~(u32)0)
 #define MPHY_TX_FSM_STATE       0x41
+#define MPHY_RX_FSM_STATE       0xC1
 #define TX_FSM_HIBERN8          0x1
 #define HBRN8_POLL_TOUT_MS      100
 #define DEFAULT_CLK_RATE_HZ     1000000
@@ -71,6 +76,7 @@
 	UFS_AH8_CFG				= 0xFC,
 };
 
+
 /* QCOM UFS host controller vendor specific debug registers */
 enum {
 	UFS_DBG_RD_REG_UAWM			= 0x100,
@@ -94,7 +100,8 @@
 /* bit definitions for REG_UFS_CFG1 register */
 #define QUNIPRO_SEL	UFS_BIT(0)
 #define TEST_BUS_EN		BIT(18)
-#define TEST_BUS_SEL		GENMASK(22, 19)
+#define TEST_BUS_SEL		0x780000
+#define UFS_REG_TEST_BUS_EN	BIT(30)
 
 /* bit definitions for REG_UFS_CFG2 register */
 #define UAWM_HW_CGC_EN		(1 << 0)
@@ -114,6 +121,17 @@
 				 DFC_HW_CGC_EN | TRLUT_HW_CGC_EN |\
 				 TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN)
 
+/* bit definitions for UFS_AH8_CFG register */
+#define CC_UFS_HCLK_REQ_EN		BIT(1)
+#define CC_UFS_SYS_CLK_REQ_EN		BIT(2)
+#define CC_UFS_ICE_CORE_CLK_REQ_EN	BIT(3)
+#define CC_UFS_UNIPRO_CORE_CLK_REQ_EN	BIT(4)
+#define CC_UFS_AUXCLK_REQ_EN		BIT(5)
+
+#define UFS_HW_CLK_CTRL_EN	(CC_UFS_SYS_CLK_REQ_EN |\
+				 CC_UFS_ICE_CORE_CLK_REQ_EN |\
+				 CC_UFS_UNIPRO_CORE_CLK_REQ_EN |\
+				 CC_UFS_AUXCLK_REQ_EN)
 /* bit offset */
 enum {
 	OFFSET_UFS_PHY_SOFT_RESET           = 1,
@@ -142,10 +160,20 @@
 	 UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
 
 /* QUniPro Vendor specific attributes */
+#define PA_VS_CONFIG_REG1		0x9000
+#define SAVECONFIGTIME_MODE_MASK	0x6000
+
+#define PA_VS_CLK_CFG_REG	0x9004
+#define PA_VS_CLK_CFG_REG_MASK	0x1FF
+
+#define DL_VS_CLK_CFG		0xA00B
+#define DL_VS_CLK_CFG_MASK	0x3FF
+
 #define DME_VS_CORE_CLK_CTRL	0xD002
 /* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */
-#define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT		BIT(8)
 #define DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK	0xFF
+#define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT		BIT(8)
+#define DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN			BIT(9)
 
 static inline void
 ufs_qcom_get_controller_revision(struct ufs_hba *hba,
@@ -192,6 +220,26 @@
 	struct device_attribute max_bus_bw;
 };
 
+/**
+ * struct ufs_qcom_ice_data - ICE related information
+ * @vops:	pointer to variant operations of ICE
+ * @async_done:	completion for supporting ICE's driver asynchronous nature
+ * @pdev:	pointer to the proper ICE platform device
+ * @state:      UFS-ICE interface's internal state (see
+ *       ufs-qcom-ice.h for possible internal states)
+ * @quirks:     UFS-ICE interface related quirks
+ * @crypto_engine_err: crypto engine errors
+ */
+struct ufs_qcom_ice_data {
+	struct qcom_ice_variant_ops *vops;
+	struct platform_device *pdev;
+	int state;
+
+	u16 quirks;
+
+	bool crypto_engine_err;
+};
+
 /* Host controller hardware version: major.minor.step */
 struct ufs_hw_version {
 	u16 step;
@@ -199,11 +247,76 @@
 	u8 major;
 };
 
+#ifdef CONFIG_DEBUG_FS
+struct qcom_debugfs_files {
+	struct dentry *debugfs_root;
+	struct dentry *dbg_print_en;
+	struct dentry *testbus;
+	struct dentry *testbus_en;
+	struct dentry *testbus_cfg;
+	struct dentry *testbus_bus;
+	struct dentry *dbg_regs;
+	struct dentry *pm_qos;
+};
+#endif
+
 struct ufs_qcom_testbus {
 	u8 select_major;
 	u8 select_minor;
 };
 
+/* PM QoS voting state  */
+enum ufs_qcom_pm_qos_state {
+	PM_QOS_UNVOTED,
+	PM_QOS_VOTED,
+	PM_QOS_REQ_VOTE,
+	PM_QOS_REQ_UNVOTE,
+};
+
+/**
+ * struct ufs_qcom_pm_qos_cpu_group - data related to cluster PM QoS voting
+ *	logic
+ * @req: request object for PM QoS
+ * @vote_work: work object for voting procedure
+ * @unvote_work: work object for un-voting procedure
+ * @host: back pointer to the main structure
+ * @state: voting state machine current state
+ * @latency_us: requested latency value used for cluster voting, in
+ *	microseconds
+ * @mask: cpu mask defined for this cluster
+ * @active_reqs: number of active requests on this cluster
+ */
+struct ufs_qcom_pm_qos_cpu_group {
+	struct pm_qos_request req;
+	struct work_struct vote_work;
+	struct work_struct unvote_work;
+	struct ufs_qcom_host *host;
+	enum ufs_qcom_pm_qos_state state;
+	s32 latency_us;
+	cpumask_t mask;
+	int active_reqs;
+};
+
+/**
+ * struct ufs_qcom_pm_qos - data related to PM QoS voting logic
+ * @groups: PM QoS cpu group state array
+ * @enable_attr: sysfs attribute to enable/disable PM QoS voting logic
+ * @latency_attr: sysfs attribute to set latency value
+ * @workq: single threaded workqueue to run PM QoS voting/unvoting
+ * @num_clusters: number of clusters defined
+ * @default_cpu: cpu to use for voting for request not specifying a cpu
+ * @is_enabled: flag specifying whether voting logic is enabled
+ */
+struct ufs_qcom_pm_qos {
+	struct ufs_qcom_pm_qos_cpu_group *groups;
+	struct device_attribute enable_attr;
+	struct device_attribute latency_attr;
+	struct workqueue_struct *workq;
+	int num_groups;
+	int default_cpu;
+	bool is_enabled;
+};
+
 struct ufs_qcom_host {
 	/*
 	 * Set this capability if host controller supports the QUniPro mode
@@ -218,6 +331,17 @@
 	 * configuration even after UFS controller core power collapse.
 	 */
 	#define UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE	UFS_BIT(1)
+
+	/*
+	 * Set this capability if host controller supports Qunipro internal
+	 * clock gating.
+	 */
+	#define UFS_QCOM_CAP_QUNIPRO_CLK_GATING		UFS_BIT(2)
+
+	/*
+	 * Set this capability if host controller supports SVS2 frequencies.
+	 */
+	#define UFS_QCOM_CAP_SVS2	UFS_BIT(3)
 	u32 caps;
 
 	struct phy *generic_phy;
@@ -228,24 +352,51 @@
 	struct clk *tx_l0_sync_clk;
 	struct clk *rx_l1_sync_clk;
 	struct clk *tx_l1_sync_clk;
-	bool is_lane_clks_enabled;
 
+	/* PM Quality-of-Service (QoS) data */
+	struct ufs_qcom_pm_qos pm_qos;
+
+	bool disable_lpm;
+	bool is_lane_clks_enabled;
+	bool sec_cfg_updated;
+	struct ufs_qcom_ice_data ice;
 	void __iomem *dev_ref_clk_ctrl_mmio;
 	bool is_dev_ref_clk_enabled;
 	struct ufs_hw_version hw_ver;
-
 	u32 dev_ref_clk_en_mask;
-
+#ifdef CONFIG_DEBUG_FS
+	struct qcom_debugfs_files debugfs_files;
+#endif
 	/* Bitmask for enabling debug prints */
 	u32 dbg_print_en;
 	struct ufs_qcom_testbus testbus;
+
+	spinlock_t ice_work_lock;
+	struct work_struct ice_cfg_work;
+	struct request *req_pending;
+	struct ufs_vreg *vddp_ref_clk;
+	bool work_pending;
+};
+
+static inline u32
+ufs_qcom_get_debug_reg_offset(struct ufs_qcom_host *host, u32 reg)
+{
+	if (host->hw_ver.major <= 0x02)
+		return UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(reg);
+
+	return UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(reg);
 };
 
 #define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba)
 #define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba)
 #define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba)
 
+bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host, u8 select_major,
+		u8 select_minor);
 int ufs_qcom_testbus_config(struct ufs_qcom_host *host);
+void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, void *priv,
+		void (*print_fn)(struct ufs_hba *hba, int offset, int num_regs,
+				char *str, void *priv));
 
 static inline bool ufs_qcom_cap_qunipro(struct ufs_qcom_host *host)
 {
@@ -255,4 +406,14 @@
 		return false;
 }
 
+static inline bool ufs_qcom_cap_qunipro_clk_gating(struct ufs_qcom_host *host)
+{
+	return !!(host->caps & UFS_QCOM_CAP_QUNIPRO_CLK_GATING);
+}
+
+static inline bool ufs_qcom_cap_svs2(struct ufs_qcom_host *host)
+{
+	return !!(host->caps & UFS_QCOM_CAP_SVS2);
+}
+
 #endif /* UFS_QCOM_H_ */
diff -ruw linux-4.4.115/drivers/scsi/ufs/unipro.h linux-4.4.115-fbx/drivers/scsi/ufs/unipro.h
--- linux-4.4.115/drivers/scsi/ufs/unipro.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/scsi/ufs/unipro.h	2019-01-22 16:16:26.635274769 +0100
@@ -1,6 +1,4 @@
 /*
- * drivers/scsi/ufs/unipro.h
- *
  * Copyright (C) 2013 Samsung Electronics Co., Ltd.
  *
  * This program is free software; you can redistribute it and/or modify
@@ -15,6 +13,7 @@
 /*
  * M-TX Configuration Attributes
  */
+#define TX_HIBERN8TIME_CAPABILITY		0x000F
 #define TX_MODE					0x0021
 #define TX_HSRATE_SERIES			0x0022
 #define TX_HSGEAR				0x0023
@@ -48,8 +47,16 @@
 #define RX_ENTER_HIBERN8			0x00A7
 #define RX_BYPASS_8B10B_ENABLE			0x00A8
 #define RX_TERMINATION_FORCE_ENABLE		0x0089
+#define RX_MIN_ACTIVATETIME_CAPABILITY		0x008F
+#define RX_HIBERN8TIME_CAPABILITY		0x0092
+
+#define MPHY_RX_ATTR_ADDR_START			0x81
+#define MPHY_RX_ATTR_ADDR_END			0xC1
 
 #define is_mphy_tx_attr(attr)			(attr < RX_MODE)
+#define RX_MIN_ACTIVATETIME_UNIT_US		100
+#define HIBERN8TIME_UNIT_US			100
+
 /*
  * PHY Adpater attributes
  */
@@ -70,6 +77,7 @@
 #define PA_MAXRXSPEEDFAST	0x1541
 #define PA_MAXRXSPEEDSLOW	0x1542
 #define PA_TXLINKSTARTUPHS	0x1544
+#define PA_LOCAL_TX_LCC_ENABLE	0x155E
 #define PA_TXSPEEDFAST		0x1565
 #define PA_TXSPEEDSLOW		0x1566
 #define PA_REMOTEVERINFO	0x15A0
@@ -83,6 +91,7 @@
 #define PA_MAXRXHSGEAR		0x1587
 #define PA_RXHSUNTERMCAP	0x15A5
 #define PA_RXLSTERMCAP		0x15A6
+#define PA_GRANULARITY		0x15AA
 #define PA_PACPREQTIMEOUT	0x1590
 #define PA_PACPREQEOBTIMEOUT	0x1591
 #define PA_HIBERN8TIME		0x15A7
@@ -110,6 +119,23 @@
 #define PA_STALLNOCONFIGTIME	0x15A3
 #define PA_SAVECONFIGTIME	0x15A4
 
+#define PA_TACTIVATE_TIME_UNIT_US	10
+#define PA_HIBERN8_TIME_UNIT_US		100
+
+#define PA_GRANULARITY_MIN_VAL	1
+#define PA_GRANULARITY_MAX_VAL	6
+
+/* PHY Adapter Protocol Constants */
+#define PA_MAXDATALANES	4
+
+#define DL_FC0ProtectionTimeOutVal_Default	8191
+#define DL_TC0ReplayTimeOutVal_Default		65535
+#define DL_AFC0ReqTimeOutVal_Default		32767
+
+#define DME_LocalFC0ProtectionTimeOutVal	0xD041
+#define DME_LocalTC0ReplayTimeOutVal		0xD042
+#define DME_LocalAFC0ReqTimeOutVal		0xD043
+
 /* PA power modes */
 enum {
 	FAST_MODE	= 1,
@@ -143,6 +169,16 @@
 	UFS_HS_G3,		/* HS Gear 3 */
 };
 
+enum ufs_unipro_ver {
+	UFS_UNIPRO_VER_RESERVED = 0,
+	UFS_UNIPRO_VER_1_40 = 1, /* UniPro version 1.40 */
+	UFS_UNIPRO_VER_1_41 = 2, /* UniPro version 1.41 */
+	UFS_UNIPRO_VER_1_6 = 3,  /* UniPro version 1.6 */
+	UFS_UNIPRO_VER_MAX = 4,  /* UniPro unsupported version */
+	/* UniPro version field mask in PA_LOCALVERINFO */
+	UFS_UNIPRO_VER_MASK = 0xF,
+};
+
 /*
  * Data Link Layer Attributes
  */
diff -ruw linux-4.4.115/drivers/soc/Makefile linux-4.4.115-fbx/drivers/soc/Makefile
--- linux-4.4.115/drivers/soc/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/Makefile	2019-01-22 16:16:26.643274841 +0100
@@ -7,7 +7,9 @@
 obj-$(CONFIG_ARCH_MEDIATEK)	+= mediatek/
 obj-$(CONFIG_ARCH_QCOM)		+= qcom/
 obj-$(CONFIG_ARCH_ROCKCHIP)		+= rockchip/
+obj-$(CONFIG_QCOM_SCM_QCPE)	+= qcom/
 obj-$(CONFIG_ARCH_SUNXI)	+= sunxi/
 obj-$(CONFIG_ARCH_TEGRA)	+= tegra/
 obj-$(CONFIG_SOC_TI)		+= ti/
 obj-$(CONFIG_PLAT_VERSATILE)	+= versatile/
+obj-$(CONFIG_ARCH_QCOM)		+= qcom/
diff -ruw linux-4.4.115/drivers/soc/qcom/Kconfig linux-4.4.115-fbx/drivers/soc/qcom/Kconfig
--- linux-4.4.115/drivers/soc/qcom/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/Kconfig	2019-10-29 09:26:24.805214550 +0100
@@ -1,6 +1,223 @@
 #
 # QCOM Soc drivers
 #
+source "drivers/soc/qcom/hab/Kconfig"
+
+config MSM_INRUSH_CURRENT_MITIGATION
+	bool "Inrush-current mitigation Driver"
+	help
+	  This driver helps in mitigating in-rush current on MSM
+	  chipsets which has voltage droop issues due to sudden
+	  huge load on a rail. This driver introduces an intermediate
+	  load to mitigate the in-rush current.
+
+config MSM_PFE_WA
+	depends on HW_PERF_EVENTS
+	bool "Enable a H/W PFE WA"
+	help
+	  Sometimes the PFTLB entries get stuck in the invalid state and new
+	  prefetches get dropped. For a workaround, count L1 prefeches dropped
+	  due to PFTLB miss and reset H/W PFE when a overflow happens.
+
+	  If unsure, say N.
+
+config QCOM_COMMON_LOG
+	bool "QCOM Common Log Support"
+	help
+	  Use this to export symbols of some log address and variables
+	  that need to parse crash dump files to a memory dump table. This
+	  table can be used by post analysis tools to extract information
+	  from memory when device crashes.
+
+config MSM_SMEM
+	depends on ARCH_QCOM
+	depends on REMOTE_SPINLOCK_MSM
+	bool "MSM Shared Memory (SMEM)"
+	help
+	  Support for the shared memory interface between the various
+	  processors in the System on a Chip (SoC) which allows basic
+	  inter-processor communication.
+
+config QPNP_HAPTIC
+	tristate "Haptic support for QPNP PMIC"
+	depends on ARCH_QCOM
+	help
+	  This option enables device driver support for the haptic peripheral
+	  found on Qualcomm Technologies, Inc. QPNP PMICs.  The haptic
+	  peripheral is capable of driving both LRA and ERM vibrators.  This
+	  module provides haptic feedback for user actions such as a long press
+	  on the touch screen.  It uses the Android timed-output framework.
+
+config QPNP_PBS
+	tristate "PBS trigger support for QPNP PMIC"
+	depends on SPMI
+	help
+	  This driver supports configuring software PBS trigger event through PBS
+	  RAM on Qualcomm Technologies, Inc. QPNP PMICs. This module provides
+	  the APIs to the client drivers that wants to send the PBS trigger
+	  event to the PBS RAM.
+
+config MSM_SMD
+	depends on MSM_SMEM
+	bool "MSM Shared Memory Driver (SMD)"
+	help
+	  Support for the shared memory interprocessor communication protocol
+	  which provides virual point to point serial channels between processes
+	  on the apps processor and processes on other processors in the SoC.
+	  Also includes support for the Shared Memory State Machine (SMSM)
+	  protocol which provides a mechanism to publish single bit state
+	  information to one or more processors in the SoC.
+
+config MSM_SMD_DEBUG
+	depends on MSM_SMD
+	bool "MSM SMD debug support"
+	help
+	  Support for debugging SMD and SMSM communication between apps and
+	  other processors in the SoC. Debug support primarily consists of
+	  logs consisting of information such as what interrupts were processed,
+	  what channels caused interrupt activity, and when internal state
+	  change events occur.
+
+config MSM_GLINK
+	bool "Generic Link (G-Link)"
+	help
+	  G-Link is a generic link transport that replaces SMD.  It is used
+	  within a System-on-Chip (SoC) for communication between both internal
+	  processors and external peripherals.  The actual physical transport
+	  is handled by transport plug-ins that can be individually enabled and
+	  configured separately.
+
+config MSM_GLINK_LOOPBACK_SERVER
+	bool "Generic Link (G-Link) Loopback Server"
+	help
+	  G-Link Loopback Server that enable loopback test framework to test
+	  and validate the G-Link protocol stack. It support both local and
+	  remote clients to configure the loopback server and echo back the
+	  data received from the clients.
+
+config MSM_GLINK_SMD_XPRT
+	depends on MSM_SMD
+	depends on MSM_GLINK
+	bool "Generic Link (G-Link) SMD Transport"
+	help
+	  G-Link SMD Transport is a G-Link Transport plug-in.  It allows G-Link
+	  communication to remote entities through a SMD physical transport
+	  channel.  The remote side is assumed to be pure SMD.  The nature of
+	  SMD limits this G-Link transport to only connecting with entities
+	  internal to the System-on-Chip.
+
+config MSM_GLINK_SMEM_NATIVE_XPRT
+	depends on MSM_SMEM
+	depends on MSM_GLINK
+	bool "Generic Link (G-Link) SMEM Native Transport"
+	help
+	  G-Link SMEM Native Transport is a G-Link Transport plug-in.  It allows
+	  G-Link communication to remote entities through a shared memory
+	  physical transport.  The nature of shared memory limits this G-Link
+	  transport to only connecting with entities internal to the
+	  System-on-Chip.
+
+config MSM_GLINK_SPI_XPRT
+	depends on MSM_GLINK
+	tristate "Generic Link (G-Link) SPI Transport"
+	help
+	  G-Link SPI Transport is a Transport plug-in developed over SPI
+	  bus. This transport plug-in performs marshaling of G-Link
+	  commands & data to the appropriate SPI bus wire format and
+	  allows for G-Link communication with remote subsystems that are
+	  external to the System-on-Chip.
+
+config MSM_SPCOM
+	depends on MSM_GLINK
+	bool "Secure Processor Communication over GLINK"
+	help
+	  spcom driver allows loading Secure Processor Applications and
+	  sending messages to Secure Processor Applications.
+	  spcom provides interface to both user space app and kernel driver.
+	  It is using glink as the transport layer, which provides multiple
+	  logical channels over signle physical channel.
+	  The physical layer is based on shared memory and interrupts.
+	  spcom provides clients/server API, although currently only one client
+	  or server is allowed per logical channel.
+
+config MSM_SPSS_UTILS
+	depends on MSM_PIL
+	bool "Secure Processor Utilities"
+	help
+	  spss-utils driver selects Secure Processor firmware file name.
+	  The firmware file name for test or production is selected based
+	  on a test fuse.
+	  Different file name is used for differnt SPSS HW versions,
+	  because the SPSS firmware size is too small to support multiple
+	  HW versions.
+
+config MSM_SMEM_LOGGING
+	depends on MSM_SMEM
+	bool "MSM Shared Memory Logger"
+	help
+	  Enable the shared memory logging to log the events between
+	  the various processors in the system. This option exposes
+	  the shared memory logger at /dev/smem_log and a debugfs node
+	  named smem_log.
+
+config MSM_SMP2P
+	bool "SMSM Point-to-Point (SMP2P)"
+	depends on MSM_SMEM
+	help
+	  Provide point-to-point remote signaling support.
+	  SMP2P enables transferring 32-bit values between
+	  the local and a remote system using shared
+	  memory and interrupts. A client can open multiple
+	  32-bit values by specifying a unique string and
+	  remote processor ID.
+
+config MSM_SMP2P_TEST
+	bool "SMSM Point-to-Point Test"
+	depends on MSM_SMP2P
+	help
+	  Enables loopback and unit testing support for
+	  SMP2P. Loopback support is used by other
+	  processors to do unit testing. Unit tests
+	  are used to verify the local and remote
+	  implementations.
+
+config MSM_QMI_INTERFACE
+	depends on IPC_ROUTER
+	depends on QMI_ENCDEC
+	bool "MSM QMI Interface Library"
+	help
+	  Library to send and receive QMI messages over IPC Router.
+	  This library provides interface functions to the kernel drivers
+	  to perform QMI message marshaling and transport them over IPC
+	  Router.
+
+config MSM_L2_IA_DEBUG
+       bool "Enable MSM L2 Indirect Access Debug"
+       depends on DEBUG_FS
+       default n
+       help
+         This option enables L2 indirect access debug
+         capability. It exposes L2 indirect access
+         debugfs interface to get/set data, address,
+         and target cpus.
+
+config MSM_RPM_SMD
+	bool "RPM driver using SMD protocol"
+	help
+	  RPM is the dedicated hardware engine for managing shared SoC
+	  resources. This config adds driver support for using SMD as a
+	  transport layer communication with RPM hardware. It also selects
+	  the MSM_MPM config that programs the MPM module to monitor interrupts
+	  during sleep modes.
+
+config QCOM_BUS_SCALING
+	bool "Bus scaling driver"
+	help
+	  This option enables bus scaling on MSM devices.  Bus scaling
+	  allows devices to request the clocks be set to rates sufficient
+	  for the active devices needs without keeping the clocks at max
+	  frequency when a slower speed is sufficient.
+
 config QCOM_GSBI
         tristate "QCOM General Serial Bus Interface"
         depends on ARCH_QCOM
@@ -28,6 +245,100 @@
 	  The driver provides an interface to items in a heap shared among all
 	  processors in a Qualcomm platform.
 
+config MSM_SERVICE_LOCATOR
+	bool "Service Locator"
+	depends on MSM_QMI_INTERFACE
+	help
+	  The Service Locator provides a library to retrieve location
+	  information given a service identifier. Location here translates
+	  to what process domain exports the service, and which subsystem
+	  that process domain will execute in.
+
+config MSM_HVC
+	bool "MSM Hypervisor Call Support"
+	help
+	  This enables the Hypervisor Call module. It provides apis to call
+	  into the hypervisor thereby allowing access to services exposed by
+	  the hypervisor. It is primarily intended to be used for Silicon
+	  Partner/Manufacturer function identifier subrange but supports other
+	  service call subranges as well.
+
+config QCOM_DCC
+	bool "QCOM Data Capture and Compare enigne support"
+	help
+	  This option enables driver for Data Capture and Compare engine. DCC
+	  driver provides interface to configure DCC block and read back
+	  captured data from DCC's internal SRAM.
+
+config MSM_IPC_ROUTER_SMD_XPRT
+	depends on MSM_SMD
+	depends on IPC_ROUTER
+	bool "MSM SMD XPRT Layer"
+	help
+	  SMD Transport Layer that enables IPC Router communication within
+	  a System-on-Chip(SoC). When the SMD channels become available,
+	  this layer registers a transport with IPC Router and enable
+	  message exchange.
+
+config MSM_SYSMON_GLINK_COMM
+	bool "MSM System Monitor communication support using GLINK transport"
+	depends on MSM_GLINK && MSM_SUBSYSTEM_RESTART
+	help
+	  This option adds support for MSM System Monitor APIs using the GLINK
+	  transport layer. The APIs provided may be used for notifying
+	  subsystems within the SoC about other subsystems' power-up/down
+	  state-changes.
+
+config MSM_IPC_ROUTER_HSIC_XPRT
+	depends on USB_QCOM_IPC_BRIDGE
+	depends on IPC_ROUTER
+	bool "MSM HSIC XPRT Layer"
+	help
+	  HSIC Transport Layer that enables off-chip communication of
+	  IPC Router. When the HSIC endpoint becomes available, this layer
+	  registers the transport with IPC Router and enable message
+	  exchange.
+
+config MSM_IPC_ROUTER_MHI_XPRT
+	depends on MSM_MHI
+	depends on IPC_ROUTER
+	bool "MSM MHI XPRT Layer"
+	help
+	  MHI Transport Layer that enables off-chip communication of
+	  IPC Router. When the MHI endpoint becomes available, this layer
+	  registers the transport with IPC Router and enable message
+	  exchange.
+
+config MSM_IPC_ROUTER_GLINK_XPRT
+	depends on MSM_GLINK
+	depends on IPC_ROUTER
+	bool "MSM GLINK XPRT Layer"
+	help
+	  GLINK Transport Layer that enables IPC Router communication within
+	  a System-on-Chip(SoC). When the GLINK channels become available,
+	  this layer registers a transport with IPC Router and enable
+	  message exchange.
+
+config MSM_SYSTEM_HEALTH_MONITOR
+	bool "System Health Monitor"
+	depends on MSM_QMI_INTERFACE && MSM_SUBSYSTEM_RESTART
+	help
+	  System Health Monitor (SHM) passively monitors the health of the
+	  peripherals connected to the application processor. Software
+	  components in the application processor that experience
+	  communication failure can request the SHM to perform a system-wide
+	  health check. If any failures are detected during the health-check,
+	  then a subsystem restart will be triggered for the failed subsystem.
+
+config MSM_GLINK_PKT
+	bool "Enable device interface for GLINK packet channels"
+	depends on MSM_GLINK
+	help
+	  G-link packet driver provides the interface for the userspace
+	  clients to communicate over G-Link via deivce nodes.
+	  This enable the usersapce clients to read and write to
+	  some glink packets channel.
+
 config QCOM_SMD
 	tristate "Qualcomm Shared Memory Driver (SMD)"
 	depends on QCOM_SMEM
@@ -49,3 +360,611 @@
 
 	  Say M here if you want to include support for the Qualcomm RPM as a
 	  module. This will build a module called "qcom-smd-rpm".
+
+config MSM_SPM
+	bool "Driver support for SPM and AVS wrapper hardware"
+	help
+	  Enables the support SAW and AVS wrapper hardware on MSMs SPM
+	  hardware is used to manage the processor power during sleep. The
+	  driver allows configuring SPM to allow different low power modes for
+	  both core and L2.
+
+config MSM_L2_SPM
+	bool "SPM support for L2 cache"
+	help
+	  Enable SPM driver support for L2 cache. Some MSM chipsets allow
+	  control of L2 cache low power mode with a Subsystem Power manager.
+	  Enabling this driver allows configuring L2 SPM for low power modes
+	  on supported chipsets
+
+config QCOM_SCM
+       bool "Secure Channel Manager (SCM) support"
+       default n
+
+config QCOM_SCM_QCPE
+       bool "Para-Virtualized Secure Channel Manager (SCM) support over QCPE"
+       default n
+
+menuconfig QCOM_SCM_XPU
+	bool "Qualcomm XPU configuration driver"
+	depends on QCOM_SCM
+
+if QCOM_SCM_XPU
+
+choice
+	prompt "XPU Violation Behavior"
+	default QCOM_XPU_ERR_FATAL
+
+config QCOM_XPU_ERR_FATAL
+	bool "Configure XPU violations as fatal errors"
+	help
+	 Select if XPU violations have to be configured as fatal errors.
+
+config QCOM_XPU_ERR_NONFATAL
+	bool "Configure XPU violations as non-fatal errors"
+	help
+	 Select if XPU violations have to be configured as non-fatal errors.
+
+endchoice
+
+endif
+
+config QCOM_SCM_ERRATA
+	depends on DEBUG_FS
+	depends on QCOM_SCM
+	bool "Support for enabling/disabling errata workarounds via debugfs"
+	help
+	  Exposes a debugfs interface intended for advanced system debugging
+	  where it may be desirable to enable or disable certain hardware
+	  errata workarounds at runtime.
+
+	  If unsure, say N.
+
+if ARCH_QCOM
+
+config QCOM_WATCHDOG_V2
+	bool "Qualcomm Watchdog Support"
+	help
+	  This enables the watchdog module. It causes kernel panic if the
+	  watchdog times out. It allows for detection of cpu hangs and
+	  deadlocks. It does not run during the bootup process, so it will
+	  not catch any early lockups.
+
+config QCOM_IRQ_HELPER
+	bool "QCOM Irq Helper"
+	help
+	  This enables the irq helper module. It exposes two APIs
+	  int irq_blacklist_on(void) and int irq_blacklist_off(void)
+	  to other kernel module.
+	  These two apis will be used to control the black list used
+	  by the irq balancer.
+
+config QCOM_MEMORY_DUMP
+	bool "Qualcomm Memory Dump Support"
+	help
+	  This enables memory dump feature. It allows various client
+	  subsystems to register respective dump regions. At the time
+	  of deadlocks or cpu hangs these dump regions are captured to
+	  give a snapshot of the system at the time of the crash.
+
+config QCOM_MEMORY_DUMP_V2
+	bool "QCOM Memory Dump V2 Support"
+	help
+	  This enables memory dump feature. It allows various client
+	  subsystems to register respective dump regions. At the time
+	  of deadlocks or cpu hangs these dump regions are captured to
+	  give a snapshot of the system at the time of the crash.
+
+config QCOM_MINIDUMP
+	bool "QCOM Minidump Support"
+	depends on MSM_SMEM && QCOM_DLOAD_MODE
+	help
+	  This enables minidump feature. It allows various clients to
+	  register to dump their state at system bad state (panic/WDT,etc.,).
+	  This uses SMEM to store all registered client information.
+	  This will dump all registered entries, only when DLOAD mode is enabled.
+
+config MINIDUMP_MAX_ENTRIES
+	int "Minidump Maximum num of entries"
+	default 200
+	depends on QCOM_MINIDUMP
+	help
+	  This defines maximum number of entries to be allocated for application
+	  subsytem in Minidump SMEM table.
+
+config ICNSS
+	tristate "Platform driver for Q6 integrated connectivity"
+	select CNSS_UTILS
+	---help---
+	  This module adds support for Q6 integrated WLAN connectivity
+	  subsystem. This module is responsible for communicating WLAN on/off
+	  control messages to FW over QMI channel. It is also responsible for
+	  handling WLAN PD restart notifications.
+
+config ICNSS_DEBUG
+	bool "ICNSS debug support"
+	depends on ICNSS
+	---help---
+	  Say 'Y' here to enable ICNSS driver debug support. Debug support
+	  primarily consists of logs consisting of information related to
+	  hardware register access and enabling BUG_ON for certain cases to aid
+	  the debugging.
+
+config MSM_SECURE_BUFFER
+	bool "Helper functions for securing buffers through TZ"
+	help
+	 Say 'Y' here for targets that need to call into TZ to secure
+	 memory buffers. This ensures that only the correct clients can
+	 use this memory and no unauthorized access is made to the
+	 buffer
+
+config MSM_TZ_SMMU
+	bool "Helper functions for SMMU configuration through TZ"
+	depends on ARCH_MSMTHULIUM
+	help
+	  Say 'Y' here for targets that need to call into TZ to configure
+	  SMMUs for any reason (for example, for errata workarounds or
+	  configuration of SMMU virtualization).
+
+	  If unsure, say N.
+
+config MSM_GLADIATOR_ERP
+	tristate "GLADIATOR coherency interconnect error reporting driver"
+	help
+	  Support dumping debug information for the GLADIATOR
+	  cache interconnect in the error interrupt handler.
+	  Meant to be used for debug scenarios only.
+
+	  If unsure, say N.
+
+config MSM_GLADIATOR_ERP_V2
+	tristate "GLADIATOR coherency interconnect error reporting driver v2"
+	help
+		Support dumping debug information for the GLADIATOR
+		cache interconnect in the error interrupt handler.
+		Meant to be used for debug scenarios only.
+
+		If unsure, say N.
+
+config PANIC_ON_GLADIATOR_ERROR_V2
+	depends on MSM_GLADIATOR_ERP_V2
+	bool "Panic on GLADIATOR error report v2"
+	help
+		Panic upon detection of an Gladiator coherency interconnect error
+		in order to support dumping debug information.
+		Meant to be used for debug scenarios only.
+
+		If unsure, say N.
+
+config MSM_GLADIATOR_ERROR_V2_MAIN_LOGGER_ONLY
+	depends on MSM_GLADIATOR_ERP_V2
+	bool "QCOM Gladiator error v2 main logger support only"
+	help
+	  Gladiator has two error loggers to report error captured.
+	  By default, two error loggers will both be enabled.
+	  This option enables only the main error logger.
+	  If unsure, say no
+
+config MSM_GLADIATOR_HANG_DETECT
+	tristate "MSM Gladiator Hang Detection Support"
+	help
+	  This enables the gladiator hang detection module.
+	  If the configured threshold is reached, it causes SoC reset on
+	  gladiator hang detection and collects the context for the
+	  gladiator hang.
+
+config MSM_CORE_HANG_DETECT
+	tristate "MSM Core Hang Detection Support"
+	help
+	  This enables the core hang detection module. It causes SoC
+	  reset on core hang detection and collects the core context
+	  for hang.
+
+config MSM_RUN_QUEUE_STATS
+	bool "Enable collection and exporting of MSM Run Queue stats to userspace"
+	help
+	 This option enables the driver to periodically collecting the statistics
+	 of kernel run queue information and calculate the load of the system.
+	 This information is exported to usespace via sysfs entries and userspace
+	 algorithms uses info and decide when to turn on/off the cpu cores.
+
+config MSM_JTAGV8
+	bool "Debug and ETM trace support across power collapse for ARMv8"
+	default y if CORESIGHT_SOURCE_ETM4X
+	help
+	  Enables support for debugging (specifically breakpoints) and ETM
+	  processor tracing across power collapse both for JTag and OS hosted
+	  software running on ARMv8 target. Enabling this will ensure debug
+	  and ETM registers are saved and restored across power collapse.
+
+	  If unsure, say 'N' here to avoid potential power, performance and
+	  memory penalty.
+
+config MSM_BOOT_STATS
+	bool "Use MSM boot stats reporting"
+	help
+	 Use this to report msm boot stats such as bootloader throughput,
+	 display init, total boot time.
+	 This figures are reported in mpm sleep clock cycles and have a
+	 resolution of 31 bits as 1 bit is used as an overflow check.
+
+config MSM_BOOT_TIME_MARKER
+	bool "Use MSM boot time marker reporting"
+	depends on MSM_BOOT_STATS
+	help
+	 Use this to mark msm boot kpi for measurement.
+	 An instrumentation for boot time measurement.
+	 To create an entry, call "place_marker" function.
+	 At userspace, write marker name to "/sys/kernel/debug/bootkpi/kpi_values"
+	 If unsure, say N
+
+config QCOM_CPUSS_DUMP
+    bool "CPU Subsystem Dumping support"
+    help
+	  Add support to dump various hardware entities such as the instruction
+	  and data tlb's as well as the unified tlb, which are a part of the
+	  cpu subsystem to an allocated buffer. This allows for analysis of the
+	  the entities if corruption is suspected.
+	  If unsure, say N
+
+config MSM_QDSP6_APRV2
+        bool "Audio QDSP6 APRv2 support"
+        depends on MSM_SMD
+        help
+          Enable APRv2 IPC protocol support between
+          application processor and QDSP6. APR is
+          used by audio driver to configure QDSP6's
+          ASM, ADM and AFE.
+
+config MSM_QDSP6_APRV2_VM
+        bool "Audio QDSP6 APRv2 virtualization support"
+        depends on MSM_HAB
+        help
+          Enable APRv2 IPC protocol support over
+          HAB between application processor and
+          QDSP6. APR is used by audio driver to
+          configure QDSP6's ASM, ADM and AFE.
+
+config MSM_QDSP6_APRV3
+	bool "Audio QDSP6 APRv3 support"
+	depends on MSM_SMD
+	help
+	  Enable APRv3 IPC protocol support between
+	  application processor and QDSP6. APR is
+	  used by audio driver to configure QDSP6v2's
+	  ASM, ADM and AFE.
+
+config MSM_QDSP6_APRV2_GLINK
+	bool "Audio QDSP6 APRv2 over Glink support"
+	depends on MSM_GLINK
+	help
+	  Enable APRv2 IPC protocol support over
+	  Glink between application processor and
+	  QDSP6. APR is used by audio driver to
+	  configure QDSP6's ASM, ADM and AFE.
+
+config MSM_QDSP6_APRV3_GLINK
+	bool "Audio QDSP6 APRv3 over Glink support"
+	depends on MSM_GLINK
+	help
+	  Enable APRv3 IPC protocol support over
+	  Glink between application processor and
+	  QDSP6. APR is used by audio driver to
+	  configure QDSP6v2's ASM, ADM and AFE.
+
+config MSM_QDSP6_SSR
+	bool "Audio QDSP6 SSR support"
+	depends on MSM_QDSP6_APRV2 || MSM_QDSP6_APRV3 || \
+		MSM_QDSP6_APRV2_GLINK || MSM_QDSP6_APRV3_GLINK
+	help
+	  Enable Subsystem Restart. Reset audio
+	  clients when the ADSP subsystem is
+	  restarted. Subsystem Restart for audio
+	  is only used for processes on the ADSP
+	  and signals audio drivers through APR.
+
+
+config MSM_QDSP6_PDR
+	bool "Audio QDSP6 PDR support"
+	depends on MSM_QDSP6_APRV2 || MSM_QDSP6_APRV3 || \
+		MSM_QDSP6_APRV2_GLINK || MSM_QDSP6_APRV3_GLINK
+	help
+	  Enable Protection Domain Restart. Reset
+          audio clients when a process on the ADSP
+          is restarted. PDR for audio is only used
+          for processes on the ADSP and signals
+          audio drivers through APR.
+
+config MSM_QDSP6_NOTIFIER
+	bool "Audio QDSP6 PDR support"
+	depends on MSM_QDSP6_SSR || MSM_QDSP6_PDR
+	help
+	  Enable notifier which decides whether
+	  to use SSR or PDR and notifies all
+	  audio clients of the event. Both SSR
+	  and PDR are recovery methods when
+	  there is a crash on ADSP. Audio drivers
+	  are contacted by ADSP through APR.
+
+config MSM_ADSP_LOADER
+	tristate "ADSP loader support"
+	select SND_SOC_MSM_APRV2_INTF
+	depends on MSM_QDSP6_APRV2 || MSM_QDSP6_APRV3 || \
+		MSM_QDSP6_APRV2_GLINK || MSM_QDSP6_APRV3_GLINK
+	help
+	  Enable ADSP image loader.
+	  The ADSP loader brings ADSP out of reset
+	  for the platforms that use APRv2.
+	  Say M if you want to enable this module.
+
+config MSM_CDSP_LOADER
+	tristate "CDSP loader support"
+	help
+	  Enable CDSP image loader.
+	  The CDSP loader brings CDSP out of reset
+	  during boot.
+	  Say M if you want to enable this module.
+
+config MSM_PERFORMANCE
+	tristate "msm_performance driver to support perflock request"
+	help
+	  This driver is used to set minfreq/maxfreq for CPUs from userspace via
+	  perflock. It also add CPU hotplug support to userspace. It ensures
+	  that no more than a user specified number of CPUs stay online at any
+	  given point in time. It also provides CPU/IO intensive workload
+	  detection for userspace.
+
+config MSM_PERFORMANCE_HOTPLUG_ON
+	bool "Hotplug functionality through msm_performance turned on"
+	depends on MSM_PERFORMANCE
+	help
+	 Setting this flag to true will enable the nodes needed for core-control
+	 functionality of hot plugging cores through msm_performance if there is
+	 no default core-control driver available.
+
+endif # ARCH_QCOM
+
+config MSM_SUBSYSTEM_RESTART
+	bool "MSM Subsystem Restart"
+	help
+	  This option enables the MSM subsystem restart framework.
+
+	  The MSM subsystem restart framework provides support to boot,
+	  shutdown, and restart subsystems with a reference counted API.
+	  It also notifies userspace of transitions between these states via
+	  sysfs.
+
+config MSM_SYSMON_COMM
+	bool "MSM System Monitor communication support"
+	depends on MSM_SMD && MSM_SUBSYSTEM_RESTART
+	help
+	  This option adds support for MSM System Monitor library, which
+	  provides an API that may be used for notifying subsystems within
+	  the SoC about other subsystems' power-up/down state-changes.
+
+config MSM_PIL
+	bool "Peripheral image loading"
+	select FW_LOADER
+	default n
+	help
+	  Some peripherals need to be loaded into memory before they can be
+	  brought out of reset.
+
+	  Say yes to support these devices.
+
+config MSM_PIL_SSR_GENERIC
+	tristate "MSM Subsystem Boot Support"
+	depends on MSM_PIL && MSM_SUBSYSTEM_RESTART
+	help
+	  Support for booting and shutting down MSM Subsystem processors.
+	  This driver also monitors the SMSM status bits and the watchdog
+	  interrupt for the subsystem and restarts it on a watchdog bite
+	  or a fatal error. Subsystems include LPASS, Venus, VPU, WCNSS and
+	  BCSS.
+
+config MSM_PIL_MSS_QDSP6V5
+	tristate "MSS QDSP6v5 (Hexagon) Boot Support"
+	depends on MSM_PIL && MSM_SUBSYSTEM_RESTART
+	help
+	  Support for booting and shutting down QDSP6v5 (Hexagon) processors
+	  in modem subsystems. If you would like to make or receive phone
+	  calls then say Y here.
+
+	  If unsure, say N.
+
+config TRACER_PKT
+	bool "Tracer Packet"
+	help
+	  Tracer Packet helps in profiling the performance of inter-
+	  processor communication protocols. The profiling information
+	  can be logged into the tracer packet itself.
+
+config QCOM_FORCE_WDOG_BITE_ON_PANIC
+	bool "QCOM force watchdog bite"
+	depends on QCOM_WATCHDOG_V2
+	help
+	  This forces a watchdog bite when the device restarts due to a
+	  kernel panic. On certain MSM SoCs, this provides us
+	  additional debugging information.
+
+config MSM_MPM_OF
+       bool "Modem Power Manager"
+       depends on OF
+       help
+        MPM is a dedicated hardware resource responsible for entering and
+        waking up from a system wide low power mode. The MPM driver tracks
+        the wakeup interrupts and configures the MPM to monitor the wakeup
+        interrupts when going to a system wide sleep mode. This config option
+        enables the MPM driver that supports initialization from a device
+        tree
+
+
+config MSM_EVENT_TIMER
+      bool "Event timer"
+      help
+        This option enables a modules that manages a list of event timers that
+        need to be monitored by the PM. The enables the PM code to monitor
+        events that require the core to be awake and ready to handle the
+        event.
+
+config MSM_AVTIMER
+	tristate "Avtimer Driver"
+	depends on MSM_QDSP6_APRV2 || MSM_QDSP6_APRV3 || MSM_QDSP6_APRV2_GLINK || \
+		MSM_QDSP6_APRV2_VM
+	help
+		This driver gets the Q6 out of power collapsed state and
+		exposes ioctl control to read avtimer tick.
+
+config MSM_KERNEL_PROTECT
+	bool "Protect kernel text by removing write permissions in stage-2"
+        depends on !FUNCTION_TRACER
+        help
+          On hypervisor-enabled targets, this option will make a call into
+          the hypervisor to request that the kernel text be remapped
+          without write permissions.  This protects against malicious
+          devices rewriting kernel code.
+
+          Note that this will BREAK any runtime patching of the kernel text
+          (i.e. anything that uses apply_alternatives,
+          aarch64_insn_patch_text_nosync, etc. including the various CPU
+          errata workarounds in arch/arm64/kernel/cpu_errata.c).
+
+config MSM_KERNEL_PROTECT_TEST
+	bool "Bootup test of kernel protection (INTENTIONAL CRASH)"
+        depends on MSM_KERNEL_PROTECT
+        help
+          Attempts to write to the kernel text after making the kernel text
+          read-only.  This test is FATAL whether it passes or fails!
+          Success is signaled by a stage-2 fault.
+
+config QCOM_REMOTEQDSS
+	bool "Allow debug tools to enable events on other processors"
+	depends on QCOM_SCM && DEBUG_FS
+	help
+	  Other onchip processors/execution environments may support debug
+	  events. Provide a sysfs interface for debug tools to dynamically
+	  enable/disable these events. Interface located in
+	  /sys/class/remoteqdss.
+
+config MSM_SERVICE_NOTIFIER
+	bool "Service Notifier"
+	depends on MSM_SERVICE_LOCATOR && MSM_SUBSYSTEM_RESTART
+	help
+	  The Service Notifier provides a library for a kernel client to
+	  register for state change notifications regarding a remote service.
+	  A remote service here refers to a process providing certain services
+	  like audio, the identifier for which is provided by the service
+	  locator.
+
+config MSM_QBT1000
+	bool "QBT1000 Ultrasonic Fingerprint Sensor"
+	help
+	  This driver provides services for configuring the fingerprint
+	  sensor hardware and for communicating with the trusted app which
+	  uses it. It enables clocks and provides commands for loading
+	  trusted apps, unloading them and marshalling buffers to the
+	  trusted fingerprint app.
+
+config MSM_RPM_RBCPR_STATS_V2_LOG
+	tristate "MSM Resource Power Manager RPBCPR Stat Driver"
+	depends on DEBUG_FS
+	help
+	  This option enables v2 of the rpmrbcpr_stats driver which reads RPM
+	  memory for statistics pertaining to RPM's RBCPR(Rapid Bridge Core
+	  Power Reduction) driver. The drivers outputs the message via a
+	  debugfs node.
+
+config MSM_RPM_LOG
+	tristate "MSM Resource Power Manager Log Driver"
+	depends on DEBUG_FS
+	depends on MSM_RPM_SMD
+	default n
+	help
+	  This option enables a driver which can read from a circular buffer
+	  of messages produced by the RPM. These messages provide diagnostic
+	  information about RPM operation. The driver outputs the messages
+	  via a debugfs node.
+
+config MSM_RPM_STATS_LOG
+	tristate "MSM Resource Power Manager Stat Driver"
+	depends on DEBUG_FS
+	depends on MSM_RPM_SMD
+	default n
+	help
+	  This option enables a driver which reads RPM messages from a shared
+	  memory location. These messages provide statistical information about
+	  the low power modes that RPM enters. The drivers outputs the message
+	  via a debugfs node.
+
+config QSEE_IPC_IRQ_BRIDGE
+	tristate "QSEE IPC Interrupt Bridge"
+	help
+	  This module enables bridging an Inter-Processor Communication(IPC)
+	  interrupt from a remote subsystem directed towards Qualcomm
+	  Technologies, Inc. Secure Execution Environment(QSEE).
+
+config WCD_DSP_GLINK
+	tristate "WCD DSP GLINK Driver"
+	depends on MSM_GLINK
+	default y if SND_SOC_WCD934X=y
+	help
+	   This option enables driver which provides communication interface
+	   between MSM and WCD DSP over glink transport protocol. This driver
+	   provides read and write interface via char device.
+
+config QCOM_SMCINVOKE
+	bool "Secure QSEE Support"
+	help
+	  Enable SMCInvoke driver which supports capability based secure
+	  communication between QSEE and HLOS.
+
+config QCOM_EARLY_RANDOM
+	bool "Initialize random pool very early"
+	help
+	  The standard random pool may not initialize until late in the boot
+	  process which means that any calls to get random numbers before then
+	  may not be truly random. Select this option to make an early call
+	  to get some random data to put in the pool. If unsure, say N.
+
+config QCOM_CX_IPEAK
+	bool "Common driver to handle Cx iPeak limitation"
+	help
+	  Cx ipeak HW module is used to limit the current drawn by various subsystem
+	  blocks on Cx power rail. Each client needs to set their
+	  bit in tcsr register if it is going to cross its own threshold. If all
+	  clients are going to cross their thresholds then Cx ipeak hw module will raise
+	  an interrupt to cDSP block to throttle cDSP fmax.
+
+config MSM_CACHE_M4M_ERP64
+       bool "Cache and M4M error report"
+       depends on ARCH_MSM8996
+       help
+         Say 'Y' here to enable reporting of cache and M4M errors to the kernel
+         log. The kernel log contains collected error syndrome and address
+         registers. These register dumps can be used as useful information
+         to find out possible hardware problems.
+
+config MSM_CACHE_M4M_ERP64_PANIC_ON_CE
+       bool "Panic on correctable cache/M4M errors"
+       help
+         Say 'Y' here to cause kernel panic when correctable cache/M4M errors
+         are detected.  Enabling this is useful when you want to dump memory
+         and system state close to the time when the error occured.
+
+          If unsure, say N.
+
+config MSM_CACHE_M4M_ERP64_PANIC_ON_UE
+       bool "Panic on uncorrectable cache/M4M errors"
+       help
+         Say 'Y' here to cause kernel panic when uncorrectable cache/M4M errors
+         are detected.
+
+config QCOM_QDSS_BRIDGE
+	bool "Configure bridge driver for QTI/Qualcomm Technologies, Inc. MDM"
+	depends on MSM_MHI
+	help
+	  The driver will help route diag traffic from modem side over the QDSS
+	  sub-system to USB on APSS side. The driver acts as a bridge between the
+	  MHI and USB interface. If unsure, say N.
+
+source "drivers/soc/qcom/memshare/Kconfig"
diff -ruw linux-4.4.115/drivers/soc/qcom/Makefile linux-4.4.115-fbx/drivers/soc/qcom/Makefile
--- linux-4.4.115/drivers/soc/qcom/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/Makefile	2019-10-29 09:26:24.805214550 +0100
@@ -1,5 +1,111 @@
+KASAN_SANITIZE_scm.o := n
+
+CFLAGS_scm.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1)
+CFLAGS_tracer_pkt.o = -I$(src)
+
+ccflags-$(CONFIG_MSM_QBT1000) += -Idrivers/misc/
+
+obj-$(CONFIG_MSM_INRUSH_CURRENT_MITIGATION) += inrush-current-mitigation.o
+obj-$(CONFIG_MSM_SMEM)	+=	msm_smem.o smem_debug.o
+obj-$(CONFIG_MSM_SMD)	+= 	msm_smd.o smd_debug.o smd_private.o smd_init_dt.o smsm_debug.o
+obj-$(CONFIG_MSM_GLINK)	+=	glink.o glink_debugfs.o glink_ssr.o
+obj-$(CONFIG_MSM_GLINK_LOOPBACK_SERVER)	+=	glink_loopback_server.o
+obj-$(CONFIG_MSM_GLINK_SMD_XPRT)	+=	glink_smd_xprt.o
+obj-$(CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT)+=	glink_smem_native_xprt.o
+obj-$(CONFIG_MSM_GLINK_SPI_XPRT)	+=	glink_spi_xprt.o
+obj-$(CONFIG_MSM_SMEM_LOGGING)	+=	smem_log.o
+obj-$(CONFIG_MSM_SYSMON_GLINK_COMM)	+= sysmon-glink.o sysmon-qmi.o
+obj-$(CONFIG_ARCH_MSM8996) +=	kryo-l2-accessors.o
+obj-$(CONFIG_MSM_SMP2P)	+=	smp2p.o smp2p_loopback.o smp2p_debug.o smp2p_sleepstate.o
+obj-$(CONFIG_MSM_QMI_INTERFACE)	+=	qmi_interface.o
+obj-$(CONFIG_MSM_RPM_SMD)	+=	rpm-smd.o
+obj-$(CONFIG_MSM_HVC) += hvc.o
+ifdef CONFIG_DEBUG_FS
+obj-$(CONFIG_MSM_RPM_SMD)	+=	rpm-smd-debug.o
+endif
+obj-$(CONFIG_MSM_IPC_ROUTER_SMD_XPRT)	+=	ipc_router_smd_xprt.o
+obj-$(CONFIG_MSM_IPC_ROUTER_HSIC_XPRT)	+=	ipc_router_hsic_xprt.o
+obj-$(CONFIG_MSM_IPC_ROUTER_MHI_XPRT)	+=	ipc_router_mhi_xprt.o
+obj-$(CONFIG_MSM_IPC_ROUTER_GLINK_XPRT)	+=	ipc_router_glink_xprt.o
+obj-$(CONFIG_MSM_SPCOM) += spcom.o
+obj-$(CONFIG_MSM_SPSS_UTILS) += spss_utils.o
+obj-y			+=	qdsp6v2/
+obj-$(CONFIG_MSM_SYSTEM_HEALTH_MONITOR)	+=	system_health_monitor_v01.o
+obj-$(CONFIG_MSM_SYSTEM_HEALTH_MONITOR)	+=	system_health_monitor.o
+obj-$(CONFIG_MSM_GLINK_PKT)	+=	msm_glink_pkt.o
+
+obj-$(CONFIG_MEM_SHARE_QMI_SERVICE)		+= memshare/
+obj-$(CONFIG_MSM_PIL_SSR_GENERIC) += subsys-pil-tz.o
+obj-$(CONFIG_MSM_PIL_MSS_QDSP6V5) += pil-q6v5.o pil-msa.o pil-q6v5-mss.o
+obj-$(CONFIG_MSM_PIL)   +=      peripheral-loader.o
+obj-$(CONFIG_MSM_PFE_WA) += pfe-wa.o
+obj-$(CONFIG_ARCH_MSM8996) += msm_cpu_voltage.o
+
+obj-$(CONFIG_MSM_PERFORMANCE) += msm_performance.o
+
+ifdef CONFIG_MSM_SUBSYSTEM_RESTART
+	obj-y += subsystem_notif.o
+	obj-y += subsystem_restart.o
+	obj-y += ramdump.o
+endif
+
+obj-$(CONFIG_QPNP_HAPTIC) += qpnp-haptic.o
 obj-$(CONFIG_QCOM_GSBI)	+=	qcom_gsbi.o
+obj-$(CONFIG_QCOM_CPUSS_DUMP) += cpuss_dump.o
+obj-$(CONFIG_QPNP_PBS) += qpnp-pbs.o
 obj-$(CONFIG_QCOM_PM)	+=	spm.o
 obj-$(CONFIG_QCOM_SMD) +=	smd.o
 obj-$(CONFIG_QCOM_SMD_RPM)	+= smd-rpm.o
 obj-$(CONFIG_QCOM_SMEM) +=	smem.o
+obj-$(CONFIG_MSM_SPM) += msm-spm.o spm_devices.o
+CFLAGS_scm.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1)
+
+obj-$(CONFIG_QCOM_SCM_ERRATA) += scm-errata.o
+obj-$(CONFIG_QCOM_SCM)  +=      scm.o scm-boot.o
+obj-$(CONFIG_QCOM_SCM_QCPE)  += scm_qcpe.o
+obj-$(CONFIG_QCOM_SCM_XPU) += scm-xpu.o
+obj-$(CONFIG_QCOM_WATCHDOG_V2) += watchdog_v2.o
+obj-$(CONFIG_QCOM_MEMORY_DUMP) += memory_dump.o
+obj-$(CONFIG_QCOM_MEMORY_DUMP_V2) += memory_dump_v2.o
+obj-$(CONFIG_QCOM_MINIDUMP) += msm_minidump.o
+obj-$(CONFIG_QCOM_DCC) += dcc.o
+obj-$(CONFIG_QCOM_WATCHDOG_V2) += watchdog_v2.o
+obj-$(CONFIG_QCOM_COMMON_LOG) += common_log.o
+obj-$(CONFIG_QCOM_IRQ_HELPER) += irq-helper.o
+obj-$(CONFIG_TRACER_PKT)	+=	tracer_pkt.o
+obj-$(CONFIG_ICNSS) += icnss.o wlan_firmware_service_v01.o icnss_utils.o
+obj-$(CONFIG_SOC_BUS)  +=      socinfo.o
+obj-$(CONFIG_QCOM_BUS_SCALING) += msm_bus/
+obj-$(CONFIG_MSM_SERVICE_NOTIFIER) += service-notifier.o
+obj-$(CONFIG_MSM_SECURE_BUFFER) += secure_buffer.o
+obj-$(CONFIG_MSM_MPM_OF) += mpm-of.o
+obj-$(CONFIG_MSM_EVENT_TIMER) += event_timer.o
+obj-$(CONFIG_MSM_TZ_SMMU) += msm_tz_smmu.o
+obj-$(CONFIG_MSM_GLADIATOR_ERP) += gladiator_erp.o
+obj-$(CONFIG_MSM_GLADIATOR_ERP_V2) += gladiator_erp_v2.o
+obj-$(CONFIG_MSM_CORE_HANG_DETECT) += core_hang_detect.o
+obj-$(CONFIG_MSM_GLADIATOR_HANG_DETECT) += gladiator_hang_detect.o
+obj-$(CONFIG_MSM_RUN_QUEUE_STATS) += msm_rq_stats.o
+obj-$(CONFIG_MSM_BOOT_STATS) += boot_stats.o
+obj-$(CONFIG_MSM_BOOT_TIME_MARKER) += boot_marker.o
+obj-$(CONFIG_MSM_AVTIMER) += avtimer.o
+ifdef CONFIG_ARCH_MSM8996
+obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_kryo.o
+endif
+obj-$(CONFIG_MSM_JTAGV8) += jtag-fuse.o jtagv8.o jtagv8-etm.o
+obj-$(CONFIG_MSM_KERNEL_PROTECT) += kernel_protect.o
+obj-$(CONFIG_MSM_RTB) += msm_rtb-hotplug.o
+obj-$(CONFIG_QCOM_REMOTEQDSS) += remoteqdss.o
+obj-$(CONFIG_MSM_SERVICE_LOCATOR) += service-locator.o
+obj-$(CONFIG_MSM_QBT1000) += qbt1000.o
+obj-$(CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG) += rpm_rbcpr_stats_v2.o
+obj-$(CONFIG_MSM_RPM_STATS_LOG) += rpm_stats.o rpm_master_stat.o rpm_rail_stats.o system_stats.o
+obj-$(CONFIG_MSM_RPM_LOG) += rpm_log.o
+obj-$(CONFIG_QSEE_IPC_IRQ_BRIDGE) += qsee_ipc_irq_bridge.o
+obj-$(CONFIG_WCD_DSP_GLINK) += wcd-dsp-glink.o
+obj-$(CONFIG_QCOM_SMCINVOKE) += smcinvoke.o
+obj-$(CONFIG_QCOM_EARLY_RANDOM)	+= early_random.o
+obj-$(CONFIG_QCOM_CX_IPEAK) += cx_ipeak.o
+obj-$(CONFIG_MSM_CACHE_M4M_ERP64) += cache_m4m_erp64.o
+obj-$(CONFIG_MSM_HAB) += hab/
+obj-$(CONFIG_QCOM_QDSS_BRIDGE) += qdss_bridge.o
diff -ruw linux-4.4.115/drivers/spi/Kconfig linux-4.4.115-fbx/drivers/spi/Kconfig
--- linux-4.4.115/drivers/spi/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/spi/Kconfig	2019-01-22 16:16:26.687275240 +0100
@@ -704,6 +704,18 @@
 
 endif # SPI_MASTER
 
-# (slave support would go here)
+#
+# SLAVE side ... listening to other SPI masters
+#
+
+config SPI_SLAVE
+	bool "SPI slave protocol handlers"
+	help
+	  If your system has a slave-capable SPI controller, you can enable
+	  slave protocol handlers.
+
+if SPI_SLAVE
+
+endif # SPI_SLAVE
 
 endif # SPI
diff -ruw linux-4.4.115/drivers/spi/Makefile linux-4.4.115-fbx/drivers/spi/Makefile
--- linux-4.4.115/drivers/spi/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/spi/Makefile	2019-01-22 16:16:26.687275240 +0100
@@ -93,3 +93,7 @@
 obj-$(CONFIG_SPI_XLP)			+= spi-xlp.o
 obj-$(CONFIG_SPI_XTENSA_XTFPGA)		+= spi-xtensa-xtfpga.o
 obj-$(CONFIG_SPI_ZYNQMP_GQSPI)		+= spi-zynqmp-gqspi.o
+obj-$(CONFIG_SPI_QSD)			+= spi_qsd.o
+obj-$(CONFIG_SPI_QUP)			+= spi_qsd.o
+
+# SPI slave protocol handlers
diff -ruw linux-4.4.115/drivers/spi/spi.c linux-4.4.115-fbx/drivers/spi/spi.c
--- linux-4.4.115/drivers/spi/spi.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/spi/spi.c	2019-10-29 09:26:24.841214902 +0100
@@ -1421,39 +1421,11 @@
 /*-------------------------------------------------------------------------*/
 
 #if defined(CONFIG_OF)
-static struct spi_device *
-of_register_spi_device(struct spi_master *master, struct device_node *nc)
+static int of_spi_parse_dt(struct spi_master *master, struct spi_device *spi,
+			   struct device_node *nc)
 {
-	struct spi_device *spi;
-	int rc;
 	u32 value;
-
-	/* Alloc an spi_device */
-	spi = spi_alloc_device(master);
-	if (!spi) {
-		dev_err(&master->dev, "spi_device alloc error for %s\n",
-			nc->full_name);
-		rc = -ENOMEM;
-		goto err_out;
-	}
-
-	/* Select device driver */
-	rc = of_modalias_node(nc, spi->modalias,
-				sizeof(spi->modalias));
-	if (rc < 0) {
-		dev_err(&master->dev, "cannot find modalias for %s\n",
-			nc->full_name);
-		goto err_out;
-	}
-
-	/* Device address */
-	rc = of_property_read_u32(nc, "reg", &value);
-	if (rc) {
-		dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
-			nc->full_name, rc);
-		goto err_out;
-	}
-	spi->chip_select = value;
+	int rc;
 
 	/* Mode (clock phase/polarity/etc.) */
 	if (of_find_property(nc, "spi-cpha", NULL))
@@ -1504,15 +1476,64 @@
 		}
 	}
 
+	if (spi_controller_is_slave(master)) {
+		if (strcmp(nc->name, "slave")) {
+			dev_err(&master->dev, "%s is not called 'slave'\n",
+				nc->full_name);
+			return -EINVAL;
+		}
+		return 0;
+	}
+
+	/* Device address */
+	rc = of_property_read_u32(nc, "reg", &value);
+	if (rc) {
+		dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
+			nc->full_name, rc);
+		return rc;
+	}
+	spi->chip_select = value;
+
 	/* Device speed */
 	rc = of_property_read_u32(nc, "spi-max-frequency", &value);
 	if (rc) {
 		dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
 			nc->full_name, rc);
-		goto err_out;
+		return rc;
 	}
 	spi->max_speed_hz = value;
 
+	return 0;
+}
+
+static struct spi_device *
+of_register_spi_device(struct spi_master *master, struct device_node *nc)
+{
+	struct spi_device *spi;
+	int rc;
+
+	/* Alloc an spi_device */
+	spi = spi_alloc_device(master);
+	if (!spi) {
+		dev_err(&master->dev, "spi_device alloc error for %s\n",
+			nc->full_name);
+		rc = -ENOMEM;
+		goto err_out;
+	}
+
+	/* Select device driver */
+	rc = of_modalias_node(nc, spi->modalias,
+				sizeof(spi->modalias));
+	if (rc < 0) {
+		dev_err(&master->dev, "cannot find modalias for %s\n",
+			nc->full_name);
+		goto err_out;
+	}
+
+	rc = of_spi_parse_dt(master, spi, nc);
+	if (rc)
+		goto err_out;
+
 	/* Store a pointer to the node in the device structure */
 	of_node_get(nc);
 	spi->dev.of_node = nc;
@@ -1536,8 +1557,8 @@
  * of_register_spi_devices() - Register child devices onto the SPI bus
  * @master:	Pointer to spi_master device
  *
- * Registers an spi_device for each child node of master node which has a 'reg'
- * property.
+ * Registers an spi_device for each child node of controller node which
+ * represents a valid SPI slave.
  */
 static void of_register_spi_devices(struct spi_master *master)
 {
@@ -1669,28 +1690,129 @@
 	.dev_groups	= spi_master_groups,
 };
 
+#ifdef CONFIG_SPI_SLAVE
+/**
+ * spi_slave_abort - abort the ongoing transfer request on an SPI slave
+ *		     controller
+ * @spi: device used for the current transfer
+ */
+int spi_slave_abort(struct spi_device *spi)
+{
+	struct spi_master *master = spi->master;
+
+	if (spi_controller_is_slave(master) && master->slave_abort)
+		return master->slave_abort(master);
+
+	return -ENOTSUPP;
+}
+EXPORT_SYMBOL_GPL(spi_slave_abort);
+
+static int match_true(struct device *dev, void *data)
+{
+	return 1;
+}
+
+static ssize_t spi_slave_show(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct spi_master *ctlr = container_of(dev, struct spi_master, dev);
+	struct device *child;
+
+	child = device_find_child(&ctlr->dev, NULL, match_true);
+	return sprintf(buf, "%s\n",
+		       child ? to_spi_device(child)->modalias : NULL);
+}
+
+static ssize_t spi_slave_store(struct device *dev,
+			       struct device_attribute *attr, const char *buf,
+			       size_t count)
+{
+	struct spi_master *ctlr = container_of(dev, struct spi_master, dev);
+	struct spi_device *spi;
+	struct device *child;
+	char name[32];
+	int rc;
+
+	rc = sscanf(buf, "%31s", name);
+	if (rc != 1 || !name[0])
+		return -EINVAL;
+
+	child = device_find_child(&ctlr->dev, NULL, match_true);
+	if (child) {
+		/* Remove registered slave */
+		device_unregister(child);
+		put_device(child);
+	}
+
+	if (strcmp(name, "(null)")) {
+		/* Register new slave */
+		spi = spi_alloc_device(ctlr);
+		if (!spi)
+			return -ENOMEM;
+
+		strlcpy(spi->modalias, name, sizeof(spi->modalias));
+
+		rc = spi_add_device(spi);
+		if (rc) {
+			spi_dev_put(spi);
+			return rc;
+		}
+	}
+
+	return count;
+}
+
+static DEVICE_ATTR(slave, 0644, spi_slave_show, spi_slave_store);
+
+static struct attribute *spi_slave_attrs[] = {
+	&dev_attr_slave.attr,
+	NULL,
+};
+
+static const struct attribute_group spi_slave_group = {
+	.attrs = spi_slave_attrs,
+};
+
+static const struct attribute_group *spi_slave_groups[] = {
+	&spi_master_statistics_group,
+	&spi_slave_group,
+	NULL,
+};
+
+static struct class spi_slave_class = {
+	.name		= "spi_slave",
+	.owner		= THIS_MODULE,
+	.dev_release	= spi_master_release,
+	.dev_groups	= spi_slave_groups,
+};
+#else
+extern struct class spi_slave_class;	/* dummy */
+#endif
 
 /**
- * spi_alloc_master - allocate SPI master controller
+ * __spi_alloc_controller - allocate an SPI master or slave controller
  * @dev: the controller, possibly using the platform_bus
  * @size: how much zeroed driver-private data to allocate; the pointer to this
  *	memory is in the driver_data field of the returned device,
  *	accessible with spi_master_get_devdata().
+ * @slave: flag indicating whether to allocate an SPI master (false) or SPI
+ *	slave (true) controller
  * Context: can sleep
  *
- * This call is used only by SPI master controller drivers, which are the
+ * This call is used only by SPI controller drivers, which are the
  * only ones directly touching chip registers.  It's how they allocate
  * an spi_master structure, prior to calling spi_register_master().
  *
  * This must be called from context that can sleep.
  *
- * The caller is responsible for assigning the bus number and initializing
- * the master's methods before calling spi_register_master(); and (after errors
+ * The caller is responsible for assigning the bus number and initializing the
+ * controller's methods before calling spi_register_master(); and (after errors
  * adding the device) calling spi_master_put() to prevent a memory leak.
  *
- * Return: the SPI master structure on success, else NULL.
+ * Return: the SPI controller structure on success, else NULL.
  */
-struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
+struct spi_master *__spi_alloc_controller(struct device *dev,
+					  unsigned int size, bool slave)
 {
 	struct spi_master	*master;
 
@@ -1704,13 +1826,17 @@
 	device_initialize(&master->dev);
 	master->bus_num = -1;
 	master->num_chipselect = 1;
+	master->slave = slave;
+	if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
+		master->dev.class = &spi_slave_class;
+	else
 	master->dev.class = &spi_master_class;
 	master->dev.parent = dev;
 	spi_master_set_devdata(master, &master[1]);
 
 	return master;
 }
-EXPORT_SYMBOL_GPL(spi_alloc_master);
+EXPORT_SYMBOL_GPL(__spi_alloc_controller);
 
 #ifdef CONFIG_OF
 static int of_spi_register_master(struct spi_master *master)
@@ -1786,9 +1912,11 @@
 	if (!dev)
 		return -ENODEV;
 
+	if (!spi_controller_is_slave(master)) {
 	status = of_spi_register_master(master);
 	if (status)
 		return status;
+	}
 
 	/* even if it's just one always-selected device, there must
 	 * be at least one chipselect
@@ -1824,8 +1952,9 @@
 	status = device_add(&master->dev);
 	if (status < 0)
 		goto done;
-	dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
-			dynamic ? " (dynamic)" : "");
+	dev_dbg(dev, "registered %s %s%s\n",
+			spi_controller_is_slave(master) ? "slave" : "master",
+			dev_name(&master->dev), dynamic ? " (dynamic)" : "");
 
 	/* If we're using a queued driver, start the queue */
 	if (master->transfer)
@@ -2613,6 +2742,9 @@
 
 	dev = class_find_device(&spi_master_class, NULL, node,
 				__spi_of_master_match);
+	if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
+		dev = class_find_device(&spi_slave_class, NULL, node,
+					__spi_of_master_match);
 	if (!dev)
 		return NULL;
 
@@ -2685,11 +2817,19 @@
 	if (status < 0)
 		goto err2;
 
+	if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
+		status = class_register(&spi_slave_class);
+		if (status < 0)
+			goto err3;
+	}
+
 	if (IS_ENABLED(CONFIG_OF_DYNAMIC))
 		WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
 
 	return 0;
 
+err3:
+	class_unregister(&spi_master_class);
 err2:
 	bus_unregister(&spi_bus_type);
 err1:
diff -ruw linux-4.4.115/drivers/spi/spidev.c linux-4.4.115-fbx/drivers/spi/spidev.c
--- linux-4.4.115/drivers/spi/spidev.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/spi/spidev.c	2019-01-22 16:16:26.707275421 +0100
@@ -695,6 +695,9 @@
 static const struct of_device_id spidev_dt_ids[] = {
 	{ .compatible = "rohm,dh2228fv" },
 	{ .compatible = "lineartechnology,ltc2488" },
+	{ .compatible = "qcom,spi-msm-codec-slave", },
+	{ .compatible = "nxp,mpc57xx", },
+	{ .compatible = "infineon,sli97", },
 	{},
 };
 MODULE_DEVICE_TABLE(of, spidev_dt_ids);
@@ -715,8 +718,10 @@
 	 */
 	if (spi->dev.of_node && !of_match_device(spidev_dt_ids, &spi->dev)) {
 		dev_err(&spi->dev, "buggy DT: spidev listed directly in DT\n");
+#if 0
 		WARN_ON(spi->dev.of_node &&
 			!of_match_device(spidev_dt_ids, &spi->dev));
+#endif
 	}
 
 	/* Allocate driver data */
diff -ruw linux-4.4.115/drivers/spmi/spmi.c linux-4.4.115-fbx/drivers/spmi/spmi.c
--- linux-4.4.115/drivers/spmi/spmi.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/spmi/spmi.c	2019-01-22 16:16:26.707275421 +0100
@@ -69,7 +69,7 @@
 	struct spmi_controller *ctrl = sdev->ctrl;
 	int err;
 
-	dev_set_name(&sdev->dev, "%d-%02x", ctrl->nr, sdev->usid);
+	dev_set_name(&sdev->dev, "spmi%d-%02x", ctrl->nr, sdev->usid);
 
 	err = device_add(&sdev->dev);
 	if (err < 0) {
diff -ruw linux-4.4.115/drivers/spmi/spmi-pmic-arb.c linux-4.4.115-fbx/drivers/spmi/spmi-pmic-arb.c
--- linux-4.4.115/drivers/spmi/spmi-pmic-arb.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/spmi/spmi-pmic-arb.c	2019-10-29 09:26:24.841214902 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,6 +10,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
+#include <linux/bitmap.h>
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/interrupt.h>
@@ -23,10 +24,13 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/spmi.h>
+#include <linux/syscore_ops.h>
 
 /* PMIC Arbiter configuration registers */
 #define PMIC_ARB_VERSION		0x0000
 #define PMIC_ARB_VERSION_V2_MIN		0x20010000
+#define PMIC_ARB_VERSION_V3_MIN		0x30000000
+#define PMIC_ARB_VERSION_V5_MIN		0x50000000
 #define PMIC_ARB_INT_EN			0x0004
 
 /* PMIC Arbiter channel registers offsets */
@@ -37,7 +41,6 @@
 #define PMIC_ARB_WDATA1			0x14
 #define PMIC_ARB_RDATA0			0x18
 #define PMIC_ARB_RDATA1			0x1C
-#define PMIC_ARB_REG_CHNL(N)		(0x800 + 0x4 * (N))
 
 /* Mapping Table */
 #define SPMI_MAPPING_TABLE_REG(N)	(0x0B00 + (4 * (N)))
@@ -47,9 +50,11 @@
 #define SPMI_MAPPING_BIT_IS_1_FLAG(X)	(((X) >> 8) & 0x1)
 #define SPMI_MAPPING_BIT_IS_1_RESULT(X)	(((X) >> 0) & 0xFF)
 
-#define SPMI_MAPPING_TABLE_LEN		255
 #define SPMI_MAPPING_TABLE_TREE_DEPTH	16	/* Maximum of 16-bits */
-#define PPID_TO_CHAN_TABLE_SZ		BIT(12)	/* PPID is 12bit chan is 1byte*/
+#define PMIC_ARB_MAX_PPID		BIT(12) /* PPID is 12bit */
+#define PMIC_ARB_CHAN_VALID		BIT(15)
+#define PMIC_ARB_CHAN_IS_IRQ_OWNER(reg)	((reg) & BIT(24))
+#define INVALID_EE			(-1)
 
 /* Ownership Table */
 #define SPMI_OWNERSHIP_TABLE_REG(N)	(0x0700 + (4 * (N)))
@@ -57,10 +62,10 @@
 
 /* Channel Status fields */
 enum pmic_arb_chnl_status {
-	PMIC_ARB_STATUS_DONE	= (1 << 0),
-	PMIC_ARB_STATUS_FAILURE	= (1 << 1),
-	PMIC_ARB_STATUS_DENIED	= (1 << 2),
-	PMIC_ARB_STATUS_DROPPED	= (1 << 3),
+	PMIC_ARB_STATUS_DONE	= BIT(0),
+	PMIC_ARB_STATUS_FAILURE	= BIT(1),
+	PMIC_ARB_STATUS_DENIED	= BIT(2),
+	PMIC_ARB_STATUS_DROPPED	= BIT(3),
 };
 
 /* Command register fields */
@@ -84,10 +89,17 @@
 	PMIC_ARB_OP_ZERO_WRITE = 16,
 };
 
+/*
+ * PMIC arbiter version 5 uses different register offsets for read/write vs
+ * observer channels.
+ */
+enum pmic_arb_channel {
+	PMIC_ARB_CHANNEL_RW,
+	PMIC_ARB_CHANNEL_OBS,
+};
+
 /* Maximum number of support PMIC peripherals */
-#define PMIC_ARB_MAX_PERIPHS		256
-#define PMIC_ARB_MAX_CHNL		128
-#define PMIC_ARB_PERIPH_ID_VALID	(1 << 15)
+#define PMIC_ARB_MAX_PERIPHS		512
 #define PMIC_ARB_TIMEOUT_US		100
 #define PMIC_ARB_MAX_TRANS_BYTES	(8)
 
@@ -97,14 +109,32 @@
 /* interrupt enable bit */
 #define SPMI_PIC_ACC_ENABLE_BIT		BIT(0)
 
+#define HWIRQ(slave_id, periph_id, irq_id, apid) \
+	((((slave_id) & 0xF)   << 28) | \
+	(((periph_id) & 0xFF)  << 20) | \
+	(((irq_id)    & 0x7)   << 16) | \
+	(((apid)      & 0x1FF) << 0))
+
+#define HWIRQ_SID(hwirq)  (((hwirq) >> 28) & 0xF)
+#define HWIRQ_PER(hwirq)  (((hwirq) >> 20) & 0xFF)
+#define HWIRQ_IRQ(hwirq)  (((hwirq) >> 16) & 0x7)
+#define HWIRQ_APID(hwirq) (((hwirq) >> 0)  & 0x1FF)
+
 struct pmic_arb_ver_ops;
 
+struct apid_data {
+	u16		ppid;
+	u8		write_owner;
+	u8		irq_owner;
+};
+
 /**
- * spmi_pmic_arb_dev - SPMI PMIC Arbiter object
+ * spmi_pmic_arb - SPMI PMIC Arbiter object
  *
  * @rd_base:		on v1 "core", on v2 "observer" register base off DT.
  * @wr_base:		on v1 "core", on v2 "chnls"    register base off DT.
  * @intr:		address of the SPMI interrupt control registers.
+ * @acc_status:		address of SPMI ACC interrupt status registers.
  * @cnfg:		address of the PMIC Arbiter configuration registers.
  * @lock:		lock to synchronize accesses.
  * @channel:		execution environment channel to use for accesses.
@@ -112,36 +142,47 @@
  * @ee:			the current Execution Environment
  * @min_apid:		minimum APID (used for bounding IRQ search)
  * @max_apid:		maximum APID
+ * @max_periph:		maximum number of PMIC peripherals supported by HW.
  * @mapping_table:	in-memory copy of PPID -> APID mapping table.
  * @domain:		irq domain object for PMIC IRQ domain
  * @spmic:		SPMI controller object
- * @apid_to_ppid:	in-memory copy of APID -> PPID mapping table.
  * @ver_ops:		version dependent operations.
- * @ppid_to_chan	in-memory copy of PPID -> channel (APID) mapping table.
+ * @ppid_to_apid	in-memory copy of PPID -> channel (APID) mapping table.
  *			v2 only.
  */
-struct spmi_pmic_arb_dev {
+struct spmi_pmic_arb {
 	void __iomem		*rd_base;
 	void __iomem		*wr_base;
 	void __iomem		*intr;
+	void __iomem		*acc_status;
 	void __iomem		*cnfg;
+	void __iomem		*core;
+	resource_size_t		core_size;
 	raw_spinlock_t		lock;
 	u8			channel;
 	int			irq;
 	u8			ee;
-	u8			min_apid;
-	u8			max_apid;
-	u32			mapping_table[SPMI_MAPPING_TABLE_LEN];
+	u16			min_apid;
+	u16			max_apid;
+	u16			max_periph;
+	u32			*mapping_table;
+	int			reserved_chan;
+	DECLARE_BITMAP(mapping_table_valid, PMIC_ARB_MAX_PERIPHS);
 	struct irq_domain	*domain;
 	struct spmi_controller	*spmic;
-	u16			apid_to_ppid[256];
 	const struct pmic_arb_ver_ops *ver_ops;
-	u8			*ppid_to_chan;
+	u16			*ppid_to_apid;
+	u16			last_apid;
+	struct apid_data	apid_data[PMIC_ARB_MAX_PERIPHS];
 };
+static struct spmi_pmic_arb *the_pa;
 
 /**
  * pmic_arb_ver: version dependent functionality.
  *
+ * @ver_str:		version string.
+ * @ppid_to_apid:	finds the apid for a given ppid.
+ * @mode:		access rights to specified pmic peripheral.
  * @non_data_cmd:	on v1 issues an spmi non-data command.
  *			on v2 no HW support, returns -EOPNOTSUPP.
  * @offset:		on v1 offset of per-ee channel.
@@ -155,29 +196,37 @@
  *			on v2 offset of SPMI_PIC_IRQ_STATUSn.
  * @irq_clear:		on v1 offset of PMIC_ARB_SPMI_PIC_IRQ_CLEARn
  *			on v2 offset of SPMI_PIC_IRQ_CLEARn.
+ * @channel_map_offset:	offset of PMIC_ARB_REG_CHNLn
  */
 struct pmic_arb_ver_ops {
+	const char *ver_str;
+	int (*ppid_to_apid)(struct spmi_pmic_arb *pa, u8 sid, u16 addr,
+			u16 *apid);
+	int (*mode)(struct spmi_pmic_arb *dev, u8 sid, u16 addr,
+			mode_t *mode);
 	/* spmi commands (read_cmd, write_cmd, cmd) functionality */
-	u32 (*offset)(struct spmi_pmic_arb_dev *dev, u8 sid, u16 addr);
+	int (*offset)(struct spmi_pmic_arb *dev, u8 sid, u16 addr,
+			enum pmic_arb_channel ch_type, u32 *offset);
 	u32 (*fmt_cmd)(u8 opc, u8 sid, u16 addr, u8 bc);
 	int (*non_data_cmd)(struct spmi_controller *ctrl, u8 opc, u8 sid);
 	/* Interrupts controller functionality (offset of PIC registers) */
-	u32 (*owner_acc_status)(u8 m, u8 n);
-	u32 (*acc_enable)(u8 n);
-	u32 (*irq_status)(u8 n);
-	u32 (*irq_clear)(u8 n);
+	u32 (*owner_acc_status)(u8 m, u16 n);
+	u32 (*acc_enable)(u16 n);
+	u32 (*irq_status)(u16 n);
+	u32 (*irq_clear)(u16 n);
+	u32 (*channel_map_offset)(u16 n);
 };
 
-static inline void pmic_arb_base_write(struct spmi_pmic_arb_dev *dev,
+static inline void pmic_arb_base_write(struct spmi_pmic_arb *pa,
 				       u32 offset, u32 val)
 {
-	writel_relaxed(val, dev->wr_base + offset);
+	writel_relaxed(val, pa->wr_base + offset);
 }
 
-static inline void pmic_arb_set_rd_cmd(struct spmi_pmic_arb_dev *dev,
+static inline void pmic_arb_set_rd_cmd(struct spmi_pmic_arb *pa,
 				       u32 offset, u32 val)
 {
-	writel_relaxed(val, dev->rd_base + offset);
+	writel_relaxed(val, pa->rd_base + offset);
 }
 
 /**
@@ -186,9 +235,10 @@
  * @reg:	register's address
  * @buf:	output parameter, length must be bc + 1
  */
-static void pa_read_data(struct spmi_pmic_arb_dev *dev, u8 *buf, u32 reg, u8 bc)
+static void pa_read_data(struct spmi_pmic_arb *pa, u8 *buf, u32 reg, u8 bc)
 {
-	u32 data = __raw_readl(dev->rd_base + reg);
+	u32 data = __raw_readl(pa->rd_base + reg);
+
 	memcpy(buf, &data, (bc & 3) + 1);
 }
 
@@ -199,20 +249,29 @@
  * @buf:	buffer to write. length must be bc + 1.
  */
 static void
-pa_write_data(struct spmi_pmic_arb_dev *dev, const u8 *buf, u32 reg, u8 bc)
+pa_write_data(struct spmi_pmic_arb *pa, const u8 *buf, u32 reg, u8 bc)
 {
 	u32 data = 0;
+
 	memcpy(&data, buf, (bc & 3) + 1);
-	__raw_writel(data, dev->wr_base + reg);
+	pmic_arb_base_write(pa, reg, data);
 }
 
 static int pmic_arb_wait_for_done(struct spmi_controller *ctrl,
-				  void __iomem *base, u8 sid, u16 addr)
+				  void __iomem *base, u8 sid, u16 addr,
+				  enum pmic_arb_channel ch_type)
 {
-	struct spmi_pmic_arb_dev *dev = spmi_controller_get_drvdata(ctrl);
+	struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
 	u32 status = 0;
 	u32 timeout = PMIC_ARB_TIMEOUT_US;
-	u32 offset = dev->ver_ops->offset(dev, sid, addr) + PMIC_ARB_STATUS;
+	u32 offset;
+	int rc;
+
+	rc = pa->ver_ops->offset(pa, sid, addr, ch_type, &offset);
+	if (rc)
+		return rc;
+
+	offset += PMIC_ARB_STATUS;
 
 	while (timeout--) {
 		status = readl_relaxed(base + offset);
@@ -253,18 +312,23 @@
 static int
 pmic_arb_non_data_cmd_v1(struct spmi_controller *ctrl, u8 opc, u8 sid)
 {
-	struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl);
+	struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
 	unsigned long flags;
 	u32 cmd;
 	int rc;
-	u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, 0);
+	u32 offset;
+
+	rc = pa->ver_ops->offset(pa, sid, 0, PMIC_ARB_CHANNEL_RW, &offset);
+	if (rc)
+		return rc;
 
 	cmd = ((opc | 0x40) << 27) | ((sid & 0xf) << 20);
 
-	raw_spin_lock_irqsave(&pmic_arb->lock, flags);
-	pmic_arb_base_write(pmic_arb, offset + PMIC_ARB_CMD, cmd);
-	rc = pmic_arb_wait_for_done(ctrl, pmic_arb->wr_base, sid, 0);
-	raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
+	raw_spin_lock_irqsave(&pa->lock, flags);
+	pmic_arb_base_write(pa, offset + PMIC_ARB_CMD, cmd);
+	rc = pmic_arb_wait_for_done(ctrl, pa->wr_base, sid, 0,
+				    PMIC_ARB_CHANNEL_RW);
+	raw_spin_unlock_irqrestore(&pa->lock, flags);
 
 	return rc;
 }
@@ -278,7 +342,7 @@
 /* Non-data command */
 static int pmic_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid)
 {
-	struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl);
+	struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
 
 	dev_dbg(&ctrl->dev, "cmd op:0x%x sid:%d\n", opc, sid);
 
@@ -286,18 +350,34 @@
 	if (opc < SPMI_CMD_RESET || opc > SPMI_CMD_WAKEUP)
 		return -EINVAL;
 
-	return pmic_arb->ver_ops->non_data_cmd(ctrl, opc, sid);
+	return pa->ver_ops->non_data_cmd(ctrl, opc, sid);
 }
 
 static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
 			     u16 addr, u8 *buf, size_t len)
 {
-	struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl);
+	struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
 	unsigned long flags;
 	u8 bc = len - 1;
 	u32 cmd;
 	int rc;
-	u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, addr);
+	u32 offset;
+	mode_t mode;
+
+	rc = pa->ver_ops->offset(pa, sid, addr, PMIC_ARB_CHANNEL_OBS, &offset);
+	if (rc)
+		return rc;
+
+	rc = pa->ver_ops->mode(pa, sid, addr, &mode);
+	if (rc)
+		return rc;
+
+	if (!(mode & 0400)) {
+		dev_err(&pa->spmic->dev,
+			"error: impermissible read from peripheral sid:%d addr:0x%x\n",
+			sid, addr);
+		return -ENODEV;
+	}
 
 	if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
 		dev_err(&ctrl->dev,
@@ -316,35 +396,51 @@
 	else
 		return -EINVAL;
 
-	cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc);
+	cmd = pa->ver_ops->fmt_cmd(opc, sid, addr, bc);
 
-	raw_spin_lock_irqsave(&pmic_arb->lock, flags);
-	pmic_arb_set_rd_cmd(pmic_arb, offset + PMIC_ARB_CMD, cmd);
-	rc = pmic_arb_wait_for_done(ctrl, pmic_arb->rd_base, sid, addr);
+	raw_spin_lock_irqsave(&pa->lock, flags);
+	pmic_arb_set_rd_cmd(pa, offset + PMIC_ARB_CMD, cmd);
+	rc = pmic_arb_wait_for_done(ctrl, pa->rd_base, sid, addr,
+				    PMIC_ARB_CHANNEL_OBS);
 	if (rc)
 		goto done;
 
-	pa_read_data(pmic_arb, buf, offset + PMIC_ARB_RDATA0,
+	pa_read_data(pa, buf, offset + PMIC_ARB_RDATA0,
 		     min_t(u8, bc, 3));
 
 	if (bc > 3)
-		pa_read_data(pmic_arb, buf + 4,
-				offset + PMIC_ARB_RDATA1, bc - 4);
+		pa_read_data(pa, buf + 4, offset + PMIC_ARB_RDATA1, bc - 4);
 
 done:
-	raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
+	raw_spin_unlock_irqrestore(&pa->lock, flags);
 	return rc;
 }
 
 static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
 			      u16 addr, const u8 *buf, size_t len)
 {
-	struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl);
+	struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
 	unsigned long flags;
 	u8 bc = len - 1;
 	u32 cmd;
 	int rc;
-	u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, addr);
+	u32 offset;
+	mode_t mode;
+
+	rc = pa->ver_ops->offset(pa, sid, addr, PMIC_ARB_CHANNEL_RW, &offset);
+	if (rc)
+		return rc;
+
+	rc = pa->ver_ops->mode(pa, sid, addr, &mode);
+	if (rc)
+		return rc;
+
+	if (!(mode & 0200)) {
+		dev_err(&pa->spmic->dev,
+			"error: impermissible write to peripheral sid:%d addr:0x%x\n",
+			sid, addr);
+		return -ENODEV;
+	}
 
 	if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
 		dev_err(&ctrl->dev,
@@ -365,20 +461,19 @@
 	else
 		return -EINVAL;
 
-	cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc);
+	cmd = pa->ver_ops->fmt_cmd(opc, sid, addr, bc);
 
 	/* Write data to FIFOs */
-	raw_spin_lock_irqsave(&pmic_arb->lock, flags);
-	pa_write_data(pmic_arb, buf, offset + PMIC_ARB_WDATA0,
-		      min_t(u8, bc, 3));
+	raw_spin_lock_irqsave(&pa->lock, flags);
+	pa_write_data(pa, buf, offset + PMIC_ARB_WDATA0, min_t(u8, bc, 3));
 	if (bc > 3)
-		pa_write_data(pmic_arb, buf + 4,
-				offset + PMIC_ARB_WDATA1, bc - 4);
+		pa_write_data(pa, buf + 4, offset + PMIC_ARB_WDATA1, bc - 4);
 
 	/* Start the transaction */
-	pmic_arb_base_write(pmic_arb, offset + PMIC_ARB_CMD, cmd);
-	rc = pmic_arb_wait_for_done(ctrl, pmic_arb->wr_base, sid, addr);
-	raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
+	pmic_arb_base_write(pa, offset + PMIC_ARB_CMD, cmd);
+	rc = pmic_arb_wait_for_done(ctrl, pa->wr_base, sid, addr,
+				    PMIC_ARB_CHANNEL_RW);
+	raw_spin_unlock_irqrestore(&pa->lock, flags);
 
 	return rc;
 }
@@ -404,9 +499,9 @@
 static void qpnpint_spmi_write(struct irq_data *d, u8 reg, void *buf,
 			       size_t len)
 {
-	struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
-	u8 sid = d->hwirq >> 24;
-	u8 per = d->hwirq >> 16;
+	struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
+	u8 sid = HWIRQ_SID(d->hwirq);
+	u8 per = HWIRQ_PER(d->hwirq);
 
 	if (pmic_arb_write_cmd(pa->spmic, SPMI_CMD_EXT_WRITEL, sid,
 			       (per << 8) + reg, buf, len))
@@ -417,9 +512,9 @@
 
 static void qpnpint_spmi_read(struct irq_data *d, u8 reg, void *buf, size_t len)
 {
-	struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
-	u8 sid = d->hwirq >> 24;
-	u8 per = d->hwirq >> 16;
+	struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
+	u8 sid = HWIRQ_SID(d->hwirq);
+	u8 per = HWIRQ_PER(d->hwirq);
 
 	if (pmic_arb_read_cmd(pa->spmic, SPMI_CMD_EXT_READL, sid,
 			      (per << 8) + reg, buf, len))
@@ -428,145 +523,196 @@
 				    d->irq);
 }
 
-static void periph_interrupt(struct spmi_pmic_arb_dev *pa, u8 apid)
+static void cleanup_irq(struct spmi_pmic_arb *pa, u16 apid, int id)
+{
+	u16 ppid = pa->apid_data[apid].ppid;
+	u8 sid = ppid >> 8;
+	u8 per = ppid & 0xFF;
+	u8 irq_mask = BIT(id);
+
+	dev_err_ratelimited(&pa->spmic->dev,
+		"cleanup_irq apid=%d sid=0x%x per=0x%x irq=%d\n",
+		apid, sid, per, id);
+	writel_relaxed(irq_mask, pa->intr + pa->ver_ops->irq_clear(apid));
+}
+
+static void periph_interrupt(struct spmi_pmic_arb *pa, u16 apid, bool show)
 {
 	unsigned int irq;
 	u32 status;
 	int id;
+	u8 sid = (pa->apid_data[apid].ppid >> 8) & 0xF;
+	u8 per = pa->apid_data[apid].ppid & 0xFF;
 
 	status = readl_relaxed(pa->intr + pa->ver_ops->irq_status(apid));
 	while (status) {
 		id = ffs(status) - 1;
-		status &= ~(1 << id);
-		irq = irq_find_mapping(pa->domain,
-				       pa->apid_to_ppid[apid] << 16
-				     | id << 8
-				     | apid);
+		status &= ~BIT(id);
+		irq = irq_find_mapping(pa->domain, HWIRQ(sid, per, id, apid));
+		if (irq == 0) {
+			cleanup_irq(pa, apid, id);
+			continue;
+		}
+		if (show) {
+			struct irq_desc *desc;
+			const char *name = "null";
+
+			desc = irq_to_desc(irq);
+			if (desc == NULL)
+				name = "stray irq";
+			else if (desc->action && desc->action->name)
+				name = desc->action->name;
+
+			pr_warn("spmi_show_resume_irq: %d triggered [0x%01x, 0x%02x, 0x%01x] %s\n",
+				irq, sid, per, id, name);
+		} else {
 		generic_handle_irq(irq);
 	}
 }
+}
 
-static void pmic_arb_chained_irq(struct irq_desc *desc)
+static void __pmic_arb_chained_irq(struct spmi_pmic_arb *pa, bool show)
 {
-	struct spmi_pmic_arb_dev *pa = irq_desc_get_handler_data(desc);
-	struct irq_chip *chip = irq_desc_get_chip(desc);
-	void __iomem *intr = pa->intr;
 	int first = pa->min_apid >> 5;
 	int last = pa->max_apid >> 5;
-	u32 status;
-	int i, id;
-
-	chained_irq_enter(chip, desc);
+	u32 status, enable;
+	int i, id, apid;
+	/* status based dispatch */
+	bool acc_valid = false;
+	u32 irq_status = 0;
 
 	for (i = first; i <= last; ++i) {
-		status = readl_relaxed(intr +
+		status = readl_relaxed(pa->acc_status +
 				      pa->ver_ops->owner_acc_status(pa->ee, i));
+		if (status)
+			acc_valid = true;
+
 		while (status) {
 			id = ffs(status) - 1;
-			status &= ~(1 << id);
-			periph_interrupt(pa, id + i * 32);
+			status &= ~BIT(id);
+			apid = id + i * 32;
+			if (apid < pa->min_apid || apid > pa->max_apid) {
+				WARN_ONCE(true, "spurious spmi irq received for apid=%d\n",
+					apid);
+				continue;
+			}
+			enable = readl_relaxed(pa->intr +
+					pa->ver_ops->acc_enable(apid));
+			if (enable & SPMI_PIC_ACC_ENABLE_BIT)
+				periph_interrupt(pa, apid, show);
 		}
 	}
 
+	/* ACC_STATUS is empty but IRQ fired check IRQ_STATUS */
+	if (!acc_valid) {
+		for (i = pa->min_apid; i <= pa->max_apid; i++) {
+			/* skip if APPS is not irq owner */
+			if (pa->apid_data[i].irq_owner != pa->ee)
+				continue;
+
+			irq_status = readl_relaxed(pa->intr +
+						pa->ver_ops->irq_status(i));
+			if (irq_status) {
+				enable = readl_relaxed(pa->intr +
+						pa->ver_ops->acc_enable(i));
+				if (enable & SPMI_PIC_ACC_ENABLE_BIT) {
+					dev_dbg(&pa->spmic->dev,
+						"Dispatching IRQ for apid=%d status=%x\n",
+						i, irq_status);
+					periph_interrupt(pa, i, show);
+				}
+			}
+		}
+	}
+}
+
+static void pmic_arb_chained_irq(struct irq_desc *desc)
+{
+	struct spmi_pmic_arb *pa = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+
+	chained_irq_enter(chip, desc);
+	__pmic_arb_chained_irq(pa, false);
 	chained_irq_exit(chip, desc);
 }
 
 static void qpnpint_irq_ack(struct irq_data *d)
 {
-	struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
-	u8 irq  = d->hwirq >> 8;
-	u8 apid = d->hwirq;
-	unsigned long flags;
+	struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
+	u8 irq = HWIRQ_IRQ(d->hwirq);
+	u16 apid = HWIRQ_APID(d->hwirq);
 	u8 data;
 
-	raw_spin_lock_irqsave(&pa->lock, flags);
-	writel_relaxed(1 << irq, pa->intr + pa->ver_ops->irq_clear(apid));
-	raw_spin_unlock_irqrestore(&pa->lock, flags);
+	writel_relaxed(BIT(irq), pa->intr + pa->ver_ops->irq_clear(apid));
 
-	data = 1 << irq;
+	data = BIT(irq);
 	qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &data, 1);
 }
 
 static void qpnpint_irq_mask(struct irq_data *d)
 {
-	struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
-	u8 irq  = d->hwirq >> 8;
-	u8 apid = d->hwirq;
-	unsigned long flags;
-	u32 status;
-	u8 data;
+	u8 irq = HWIRQ_IRQ(d->hwirq);
+	u8 data = BIT(irq);
 
-	raw_spin_lock_irqsave(&pa->lock, flags);
-	status = readl_relaxed(pa->intr + pa->ver_ops->acc_enable(apid));
-	if (status & SPMI_PIC_ACC_ENABLE_BIT) {
-		status = status & ~SPMI_PIC_ACC_ENABLE_BIT;
-		writel_relaxed(status, pa->intr +
-			       pa->ver_ops->acc_enable(apid));
-	}
-	raw_spin_unlock_irqrestore(&pa->lock, flags);
-
-	data = 1 << irq;
 	qpnpint_spmi_write(d, QPNPINT_REG_EN_CLR, &data, 1);
 }
 
 static void qpnpint_irq_unmask(struct irq_data *d)
 {
-	struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
-	u8 irq  = d->hwirq >> 8;
-	u8 apid = d->hwirq;
-	unsigned long flags;
-	u32 status;
-	u8 data;
+	struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
+	u8 irq = HWIRQ_IRQ(d->hwirq);
+	u16 apid = HWIRQ_APID(d->hwirq);
+	u8 buf[2];
 
-	raw_spin_lock_irqsave(&pa->lock, flags);
-	status = readl_relaxed(pa->intr + pa->ver_ops->acc_enable(apid));
-	if (!(status & SPMI_PIC_ACC_ENABLE_BIT)) {
-		writel_relaxed(status | SPMI_PIC_ACC_ENABLE_BIT,
+	writel_relaxed(SPMI_PIC_ACC_ENABLE_BIT,
 				pa->intr + pa->ver_ops->acc_enable(apid));
-	}
-	raw_spin_unlock_irqrestore(&pa->lock, flags);
 
-	data = 1 << irq;
-	qpnpint_spmi_write(d, QPNPINT_REG_EN_SET, &data, 1);
+	qpnpint_spmi_read(d, QPNPINT_REG_EN_SET, &buf[0], 1);
+	if (!(buf[0] & BIT(irq))) {
+		/*
+		 * Since the interrupt is currently disabled, write to both the
+		 * LATCHED_CLR and EN_SET registers so that a spurious interrupt
+		 * cannot be triggered when the interrupt is enabled
+		 */
+		buf[0] = BIT(irq);
+		buf[1] = BIT(irq);
+		qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &buf, 2);
 }
-
-static void qpnpint_irq_enable(struct irq_data *d)
-{
-	u8 irq  = d->hwirq >> 8;
-	u8 data;
-
-	qpnpint_irq_unmask(d);
-
-	data = 1 << irq;
-	qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &data, 1);
 }
 
 static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type)
 {
 	struct spmi_pmic_arb_qpnpint_type type;
-	u8 irq = d->hwirq >> 8;
+	u8 irq = HWIRQ_IRQ(d->hwirq);
+	u8 bit_mask_irq = BIT(irq);
 
 	qpnpint_spmi_read(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
 
 	if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
-		type.type |= 1 << irq;
+		type.type |= bit_mask_irq;
 		if (flow_type & IRQF_TRIGGER_RISING)
-			type.polarity_high |= 1 << irq;
+			type.polarity_high |= bit_mask_irq;
 		if (flow_type & IRQF_TRIGGER_FALLING)
-			type.polarity_low  |= 1 << irq;
+			type.polarity_low  |= bit_mask_irq;
 	} else {
 		if ((flow_type & (IRQF_TRIGGER_HIGH)) &&
 		    (flow_type & (IRQF_TRIGGER_LOW)))
 			return -EINVAL;
 
-		type.type &= ~(1 << irq); /* level trig */
+		type.type &= ~bit_mask_irq; /* level trig */
 		if (flow_type & IRQF_TRIGGER_HIGH)
-			type.polarity_high |= 1 << irq;
+			type.polarity_high |= bit_mask_irq;
 		else
-			type.polarity_low  |= 1 << irq;
+			type.polarity_low  |= bit_mask_irq;
 	}
 
 	qpnpint_spmi_write(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
+
+	if (flow_type & IRQ_TYPE_EDGE_BOTH)
+		irq_set_handler_locked(d, handle_edge_irq);
+	else
+		irq_set_handler_locked(d, handle_level_irq);
+
 	return 0;
 }
 
@@ -574,7 +720,7 @@
 				     enum irqchip_irq_state which,
 				     bool *state)
 {
-	u8 irq = d->hwirq >> 8;
+	u8 irq = HWIRQ_IRQ(d->hwirq);
 	u8 status = 0;
 
 	if (which != IRQCHIP_STATE_LINE_LEVEL)
@@ -588,7 +734,6 @@
 
 static struct irq_chip pmic_arb_irqchip = {
 	.name		= "pmic_arb",
-	.irq_enable	= qpnpint_irq_enable,
 	.irq_ack	= qpnpint_irq_ack,
 	.irq_mask	= qpnpint_irq_mask,
 	.irq_unmask	= qpnpint_irq_unmask,
@@ -598,42 +743,15 @@
 			| IRQCHIP_SKIP_SET_WAKE,
 };
 
-struct spmi_pmic_arb_irq_spec {
-	unsigned slave:4;
-	unsigned per:8;
-	unsigned irq:3;
-};
-
-static int search_mapping_table(struct spmi_pmic_arb_dev *pa,
-				struct spmi_pmic_arb_irq_spec *spec,
-				u8 *apid)
+static void qpnpint_irq_domain_activate(struct irq_domain *domain,
+					struct irq_data *d)
 {
-	u16 ppid = spec->slave << 8 | spec->per;
-	u32 *mapping_table = pa->mapping_table;
-	int index = 0, i;
-	u32 data;
+	u8 irq = HWIRQ_IRQ(d->hwirq);
+	u8 buf;
 
-	for (i = 0; i < SPMI_MAPPING_TABLE_TREE_DEPTH; ++i) {
-		data = mapping_table[index];
-
-		if (ppid & (1 << SPMI_MAPPING_BIT_INDEX(data))) {
-			if (SPMI_MAPPING_BIT_IS_1_FLAG(data)) {
-				index = SPMI_MAPPING_BIT_IS_1_RESULT(data);
-			} else {
-				*apid = SPMI_MAPPING_BIT_IS_1_RESULT(data);
-				return 0;
-			}
-		} else {
-			if (SPMI_MAPPING_BIT_IS_0_FLAG(data)) {
-				index = SPMI_MAPPING_BIT_IS_0_RESULT(data);
-			} else {
-				*apid = SPMI_MAPPING_BIT_IS_0_RESULT(data);
-				return 0;
-			}
-		}
-	}
-
-	return -ENODEV;
+	buf = BIT(irq);
+	qpnpint_spmi_write(d, QPNPINT_REG_EN_CLR, &buf, 1);
+	qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &buf, 1);
 }
 
 static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
@@ -643,10 +761,9 @@
 					   unsigned long *out_hwirq,
 					   unsigned int *out_type)
 {
-	struct spmi_pmic_arb_dev *pa = d->host_data;
-	struct spmi_pmic_arb_irq_spec spec;
-	int err;
-	u8 apid;
+	struct spmi_pmic_arb *pa = d->host_data;
+	int rc;
+	u16 apid;
 
 	dev_dbg(&pa->spmic->dev,
 		"intspec[0] 0x%1x intspec[1] 0x%02x intspec[2] 0x%02x\n",
@@ -659,15 +776,21 @@
 	if (intspec[0] > 0xF || intspec[1] > 0xFF || intspec[2] > 0x7)
 		return -EINVAL;
 
-	spec.slave = intspec[0];
-	spec.per   = intspec[1];
-	spec.irq   = intspec[2];
-
-	err = search_mapping_table(pa, &spec, &apid);
-	if (err)
-		return err;
+	rc = pa->ver_ops->ppid_to_apid(pa, intspec[0],
+			(intspec[1] << 8), &apid);
+	if (rc < 0) {
+		dev_err(&pa->spmic->dev,
+		"failed to xlate sid = 0x%x, periph = 0x%x, irq = %u rc = %d\n",
+		intspec[0], intspec[1], intspec[2], rc);
+		return rc;
+	}
 
-	pa->apid_to_ppid[apid] = spec.slave << 8 | spec.per;
+	if (pa->apid_data[apid].irq_owner != pa->ee) {
+		dev_err(&pa->spmic->dev, "failed to xlate sid = 0x%x, periph = 0x%x, irq = %u: ee=%u but owner=%u\n",
+			intspec[0], intspec[1], intspec[2], pa->ee,
+			pa->apid_data[apid].irq_owner);
+		return -ENODEV;
+	}
 
 	/* Keep track of {max,min}_apid for bounding search during interrupt */
 	if (apid > pa->max_apid)
@@ -675,10 +798,7 @@
 	if (apid < pa->min_apid)
 		pa->min_apid = apid;
 
-	*out_hwirq = spec.slave << 24
-		   | spec.per   << 16
-		   | spec.irq   << 8
-		   | apid;
+	*out_hwirq = HWIRQ(intspec[0], intspec[1], intspec[2], apid);
 	*out_type  = intspec[3] & IRQ_TYPE_SENSE_MASK;
 
 	dev_dbg(&pa->spmic->dev, "out_hwirq = %lu\n", *out_hwirq);
@@ -690,7 +810,7 @@
 				  unsigned int virq,
 				  irq_hw_number_t hwirq)
 {
-	struct spmi_pmic_arb_dev *pa = d->host_data;
+	struct spmi_pmic_arb *pa = d->host_data;
 
 	dev_dbg(&pa->spmic->dev, "virq = %u, hwirq = %lu\n", virq, hwirq);
 
@@ -700,19 +820,267 @@
 	return 0;
 }
 
+static int
+pmic_arb_ppid_to_apid_v1(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u16 *apid)
+{
+	u16 ppid = sid << 8 | ((addr >> 8) & 0xFF);
+	u32 *mapping_table = pa->mapping_table;
+	int index = 0, i;
+	u16 apid_valid;
+	u32 data;
+
+	apid_valid = pa->ppid_to_apid[ppid];
+	if (apid_valid & PMIC_ARB_CHAN_VALID) {
+		*apid = (apid_valid & ~PMIC_ARB_CHAN_VALID);
+		return 0;
+	}
+
+	for (i = 0; i < SPMI_MAPPING_TABLE_TREE_DEPTH; ++i) {
+		if (!test_and_set_bit(index, pa->mapping_table_valid))
+			mapping_table[index] = readl_relaxed(pa->cnfg +
+						SPMI_MAPPING_TABLE_REG(index));
+
+		data = mapping_table[index];
+
+		if (ppid & BIT(SPMI_MAPPING_BIT_INDEX(data))) {
+			if (SPMI_MAPPING_BIT_IS_1_FLAG(data)) {
+				index = SPMI_MAPPING_BIT_IS_1_RESULT(data);
+			} else {
+				*apid = SPMI_MAPPING_BIT_IS_1_RESULT(data);
+				pa->ppid_to_apid[ppid]
+					= *apid | PMIC_ARB_CHAN_VALID;
+				pa->apid_data[*apid].ppid = ppid;
+				return 0;
+			}
+		} else {
+			if (SPMI_MAPPING_BIT_IS_0_FLAG(data)) {
+				index = SPMI_MAPPING_BIT_IS_0_RESULT(data);
+			} else {
+				*apid = SPMI_MAPPING_BIT_IS_0_RESULT(data);
+				pa->ppid_to_apid[ppid]
+					= *apid | PMIC_ARB_CHAN_VALID;
+				pa->apid_data[*apid].ppid = ppid;
+				return 0;
+			}
+		}
+	}
+
+	return -ENODEV;
+}
+
+static int
+pmic_arb_mode_v1_v3(struct spmi_pmic_arb *pa, u8 sid, u16 addr, mode_t *mode)
+{
+	*mode = 0600;
+	return 0;
+}
+
 /* v1 offset per ee */
-static u32 pmic_arb_offset_v1(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr)
+static int
+pmic_arb_offset_v1(struct spmi_pmic_arb *pa, u8 sid, u16 addr,
+		   enum pmic_arb_channel ch_type, u32 *offset)
 {
-	return 0x800 + 0x80 * pa->channel;
+	*offset = 0x800 + 0x80 * pa->channel;
+	return 0;
+}
+
+static u16 pmic_arb_find_apid(struct spmi_pmic_arb *pa, u16 ppid)
+{
+	u32 regval, offset;
+	u16 apid;
+	u16 id;
+
+	/*
+	 * PMIC_ARB_REG_CHNL is a table in HW mapping channel to ppid.
+	 * ppid_to_apid is an in-memory invert of that table.
+	 */
+	for (apid = pa->last_apid; apid < pa->max_periph; apid++) {
+		/* Do not keep the reserved channel in the mapping table */
+		if (pa->reserved_chan >= 0 && apid == pa->reserved_chan)
+			continue;
+
+		regval = readl_relaxed(pa->cnfg +
+				      SPMI_OWNERSHIP_TABLE_REG(apid));
+		pa->apid_data[apid].irq_owner
+			= SPMI_OWNERSHIP_PERIPH2OWNER(regval);
+		pa->apid_data[apid].write_owner = pa->apid_data[apid].irq_owner;
+
+		offset = pa->ver_ops->channel_map_offset(apid);
+		if (offset >= pa->core_size)
+			break;
+
+		regval = readl_relaxed(pa->core + offset);
+		if (!regval)
+			continue;
+
+		id = (regval >> 8) & PMIC_ARB_PPID_MASK;
+		pa->ppid_to_apid[id] = apid | PMIC_ARB_CHAN_VALID;
+		pa->apid_data[apid].ppid = id;
+		if (id == ppid) {
+			apid |= PMIC_ARB_CHAN_VALID;
+			break;
+		}
+	}
+	pa->last_apid = apid & ~PMIC_ARB_CHAN_VALID;
+
+	return apid;
 }
 
-/* v2 offset per ppid (chan) and per ee */
-static u32 pmic_arb_offset_v2(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr)
+static int
+pmic_arb_ppid_to_apid_v2(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u16 *apid)
 {
 	u16 ppid = (sid << 8) | (addr >> 8);
-	u8  chan = pa->ppid_to_chan[ppid];
+	u16 apid_valid;
+
+	apid_valid = pa->ppid_to_apid[ppid];
+	if (!(apid_valid & PMIC_ARB_CHAN_VALID))
+		apid_valid = pmic_arb_find_apid(pa, ppid);
+	if (!(apid_valid & PMIC_ARB_CHAN_VALID))
+		return -ENODEV;
+
+	*apid = (apid_valid & ~PMIC_ARB_CHAN_VALID);
+	return 0;
+}
+
+static int pmic_arb_read_apid_map_v5(struct spmi_pmic_arb *pa)
+{
+	u32 regval, offset;
+	u16 apid, prev_apid, ppid;
+	bool valid, is_irq_owner;
+
+	/*
+	 * PMIC_ARB_REG_CHNL is a table in HW mapping APID (channel) to PPID.
+	 * ppid_to_apid is an in-memory invert of that table.  In order to allow
+	 * multiple EE's to write to a single PPID in arbiter version 5, there
+	 * is more than one APID mapped to each PPID.  The owner field for each
+	 * of these mappings specifies the EE which is allowed to write to the
+	 * APID.  The owner of the last (highest) APID for a given PPID will
+	 * receive interrupts from the PPID.
+	 */
+	for (apid = 0; apid < pa->max_periph; apid++) {
+		/* Do not keep the reserved channel in the mapping table */
+		if (pa->reserved_chan >= 0 && apid == pa->reserved_chan)
+			continue;
+
+		offset = pa->ver_ops->channel_map_offset(apid);
+		if (offset >= pa->core_size)
+			break;
+
+		regval = readl_relaxed(pa->core + offset);
+		if (!regval)
+			continue;
+		ppid = (regval >> 8) & PMIC_ARB_PPID_MASK;
+		is_irq_owner = PMIC_ARB_CHAN_IS_IRQ_OWNER(regval);
+
+		regval = readl_relaxed(pa->cnfg +
+				      SPMI_OWNERSHIP_TABLE_REG(apid));
+		pa->apid_data[apid].write_owner
+			= SPMI_OWNERSHIP_PERIPH2OWNER(regval);
+
+		pa->apid_data[apid].irq_owner = is_irq_owner ?
+			pa->apid_data[apid].write_owner : INVALID_EE;
+
+		valid = pa->ppid_to_apid[ppid] & PMIC_ARB_CHAN_VALID;
+		prev_apid = pa->ppid_to_apid[ppid] & ~PMIC_ARB_CHAN_VALID;
+
+		if (valid && is_irq_owner &&
+		    pa->apid_data[prev_apid].write_owner == pa->ee) {
+			/*
+			 * Duplicate PPID mapping after the one for this EE;
+			 * override the irq owner
+			 */
+			pa->apid_data[prev_apid].irq_owner
+				= pa->apid_data[apid].irq_owner;
+		} else if (!valid || is_irq_owner) {
+			/* First PPID mapping or duplicate for another EE */
+			pa->ppid_to_apid[ppid] = apid | PMIC_ARB_CHAN_VALID;
+		}
+
+		pa->apid_data[apid].ppid = ppid;
+		pa->last_apid = apid;
+	}
+
+	/* Dump the mapping table for debug purposes. */
+	dev_dbg(&pa->spmic->dev, "PPID APID Write-EE IRQ-EE\n");
+	for (ppid = 0; ppid < PMIC_ARB_MAX_PPID; ppid++) {
+		valid = pa->ppid_to_apid[ppid] & PMIC_ARB_CHAN_VALID;
+		apid = pa->ppid_to_apid[ppid] & ~PMIC_ARB_CHAN_VALID;
+
+		if (valid)
+			dev_dbg(&pa->spmic->dev, "0x%03X %3u %2u %2u\n",
+				ppid, apid, pa->apid_data[apid].write_owner,
+				pa->apid_data[apid].irq_owner);
+	}
+
+	return 0;
+}
+
+static int
+pmic_arb_ppid_to_apid_v5(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u16 *apid)
+{
+	u16 ppid = (sid << 8) | (addr >> 8);
+
+	if (!(pa->ppid_to_apid[ppid] & PMIC_ARB_CHAN_VALID))
+		return -ENODEV;
+
+	*apid = pa->ppid_to_apid[ppid] & ~PMIC_ARB_CHAN_VALID;
+
+	return 0;
+}
+
+static int
+pmic_arb_mode_v2(struct spmi_pmic_arb *pa, u8 sid, u16 addr, mode_t *mode)
+{
+	u16 apid;
+	int rc;
+
+	rc = pa->ver_ops->ppid_to_apid(pa, sid, addr, &apid);
+	if (rc < 0)
+		return rc;
+
+	*mode = 0;
+	*mode |= 0400;
+
+	if (pa->ee == pa->apid_data[apid].write_owner)
+		*mode |= 0200;
+	return 0;
+}
+
+/* v2 offset per ppid and per ee */
+static int
+pmic_arb_offset_v2(struct spmi_pmic_arb *pa, u8 sid, u16 addr,
+		   enum pmic_arb_channel ch_type, u32 *offset)
+{
+	u16 apid;
+	int rc;
+
+	rc = pmic_arb_ppid_to_apid_v2(pa, sid, addr, &apid);
+	if (rc < 0)
+		return rc;
+
+	*offset = 0x1000 * pa->ee + 0x8000 * apid;
+	return 0;
+}
+
+/*
+ * v5 offset per ee and per apid for observer channels and per apid for
+ * read/write channels.
+ */
+static int
+pmic_arb_offset_v5(struct spmi_pmic_arb *pa, u8 sid, u16 addr,
+		   enum pmic_arb_channel ch_type, u32 *offset)
+{
+	u16 apid;
+	int rc;
 
-	return 0x1000 * pa->ee + 0x8000 * chan;
+	rc = pmic_arb_ppid_to_apid_v5(pa, sid, addr, &apid);
+	if (rc < 0)
+		return rc;
+
+	*offset = (ch_type == PMIC_ARB_CHANNEL_OBS)
+			? 0x10000 * pa->ee + 0x80 * apid
+			: 0x10000 * apid;
+	return 0;
 }
 
 static u32 pmic_arb_fmt_cmd_v1(u8 opc, u8 sid, u16 addr, u8 bc)
@@ -725,47 +1093,85 @@
 	return (opc << 27) | ((addr & 0xff) << 4) | (bc & 0x7);
 }
 
-static u32 pmic_arb_owner_acc_status_v1(u8 m, u8 n)
+static u32 pmic_arb_owner_acc_status_v1(u8 m, u16 n)
 {
 	return 0x20 * m + 0x4 * n;
 }
 
-static u32 pmic_arb_owner_acc_status_v2(u8 m, u8 n)
+static u32 pmic_arb_owner_acc_status_v2(u8 m, u16 n)
 {
 	return 0x100000 + 0x1000 * m + 0x4 * n;
 }
 
-static u32 pmic_arb_acc_enable_v1(u8 n)
+static u32 pmic_arb_owner_acc_status_v3(u8 m, u16 n)
+{
+	return 0x200000 + 0x1000 * m + 0x4 * n;
+}
+
+static u32 pmic_arb_owner_acc_status_v5(u8 m, u16 n)
+{
+	return 0x10000 * m + 0x4 * n;
+}
+
+static u32 pmic_arb_acc_enable_v1(u16 n)
 {
 	return 0x200 + 0x4 * n;
 }
 
-static u32 pmic_arb_acc_enable_v2(u8 n)
+static u32 pmic_arb_acc_enable_v2(u16 n)
 {
 	return 0x1000 * n;
 }
 
-static u32 pmic_arb_irq_status_v1(u8 n)
+static u32 pmic_arb_acc_enable_v5(u16 n)
+{
+	return 0x100 + 0x10000 * n;
+}
+
+static u32 pmic_arb_irq_status_v1(u16 n)
 {
 	return 0x600 + 0x4 * n;
 }
 
-static u32 pmic_arb_irq_status_v2(u8 n)
+static u32 pmic_arb_irq_status_v2(u16 n)
 {
 	return 0x4 + 0x1000 * n;
 }
 
-static u32 pmic_arb_irq_clear_v1(u8 n)
+static u32 pmic_arb_irq_status_v5(u16 n)
+{
+	return 0x104 + 0x10000 * n;
+}
+
+static u32 pmic_arb_irq_clear_v1(u16 n)
 {
 	return 0xA00 + 0x4 * n;
 }
 
-static u32 pmic_arb_irq_clear_v2(u8 n)
+static u32 pmic_arb_irq_clear_v2(u16 n)
 {
 	return 0x8 + 0x1000 * n;
 }
 
+static u32 pmic_arb_irq_clear_v5(u16 n)
+{
+	return 0x108 + 0x10000 * n;
+}
+
+static u32 pmic_arb_channel_map_offset_v2(u16 n)
+{
+	return 0x800 + 0x4 * n;
+}
+
+static u32 pmic_arb_channel_map_offset_v5(u16 n)
+{
+	return 0x900 + 0x4 * n;
+}
+
 static const struct pmic_arb_ver_ops pmic_arb_v1 = {
+	.ver_str		= "v1",
+	.ppid_to_apid		= pmic_arb_ppid_to_apid_v1,
+	.mode			= pmic_arb_mode_v1_v3,
 	.non_data_cmd		= pmic_arb_non_data_cmd_v1,
 	.offset			= pmic_arb_offset_v1,
 	.fmt_cmd		= pmic_arb_fmt_cmd_v1,
@@ -773,9 +1179,13 @@
 	.acc_enable		= pmic_arb_acc_enable_v1,
 	.irq_status		= pmic_arb_irq_status_v1,
 	.irq_clear		= pmic_arb_irq_clear_v1,
+	.channel_map_offset	= pmic_arb_channel_map_offset_v2,
 };
 
 static const struct pmic_arb_ver_ops pmic_arb_v2 = {
+	.ver_str		= "v2",
+	.ppid_to_apid		= pmic_arb_ppid_to_apid_v2,
+	.mode			= pmic_arb_mode_v2,
 	.non_data_cmd		= pmic_arb_non_data_cmd_v2,
 	.offset			= pmic_arb_offset_v2,
 	.fmt_cmd		= pmic_arb_fmt_cmd_v2,
@@ -783,22 +1193,61 @@
 	.acc_enable		= pmic_arb_acc_enable_v2,
 	.irq_status		= pmic_arb_irq_status_v2,
 	.irq_clear		= pmic_arb_irq_clear_v2,
+	.channel_map_offset	= pmic_arb_channel_map_offset_v2,
+};
+
+static const struct pmic_arb_ver_ops pmic_arb_v3 = {
+	.ver_str		= "v3",
+	.ppid_to_apid		= pmic_arb_ppid_to_apid_v2,
+	.mode			= pmic_arb_mode_v1_v3,
+	.non_data_cmd		= pmic_arb_non_data_cmd_v2,
+	.offset			= pmic_arb_offset_v2,
+	.fmt_cmd		= pmic_arb_fmt_cmd_v2,
+	.owner_acc_status	= pmic_arb_owner_acc_status_v3,
+	.acc_enable		= pmic_arb_acc_enable_v2,
+	.irq_status		= pmic_arb_irq_status_v2,
+	.irq_clear		= pmic_arb_irq_clear_v2,
+	.channel_map_offset	= pmic_arb_channel_map_offset_v2,
+};
+
+static const struct pmic_arb_ver_ops pmic_arb_v5 = {
+	.ver_str		= "v5",
+	.ppid_to_apid		= pmic_arb_ppid_to_apid_v5,
+	.mode			= pmic_arb_mode_v2,
+	.non_data_cmd		= pmic_arb_non_data_cmd_v2,
+	.offset			= pmic_arb_offset_v5,
+	.fmt_cmd		= pmic_arb_fmt_cmd_v2,
+	.owner_acc_status	= pmic_arb_owner_acc_status_v5,
+	.acc_enable		= pmic_arb_acc_enable_v5,
+	.irq_status		= pmic_arb_irq_status_v5,
+	.irq_clear		= pmic_arb_irq_clear_v5,
+	.channel_map_offset	= pmic_arb_channel_map_offset_v5,
 };
 
 static const struct irq_domain_ops pmic_arb_irq_domain_ops = {
 	.map	= qpnpint_irq_domain_map,
 	.xlate	= qpnpint_irq_domain_dt_translate,
+	.activate	= qpnpint_irq_domain_activate,
+};
+
+static void spmi_pmic_arb_resume(void)
+{
+	if (spmi_show_resume_irq())
+		__pmic_arb_chained_irq(the_pa, true);
+}
+
+static struct syscore_ops spmi_pmic_arb_syscore_ops = {
+	.resume = spmi_pmic_arb_resume,
 };
 
 static int spmi_pmic_arb_probe(struct platform_device *pdev)
 {
-	struct spmi_pmic_arb_dev *pa;
+	struct spmi_pmic_arb *pa;
 	struct spmi_controller *ctrl;
 	struct resource *res;
 	void __iomem *core;
 	u32 channel, ee, hw_ver;
-	int err, i;
-	bool is_v1;
+	int err;
 
 	ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pa));
 	if (!ctrl)
@@ -808,6 +1257,19 @@
 	pa->spmic = ctrl;
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
+	if (!res) {
+		dev_err(&pdev->dev, "core resource not specified\n");
+		err = -EINVAL;
+		goto err_put_ctrl;
+	}
+
+	pa->core_size = resource_size(res);
+	if (pa->core_size <= 0x800) {
+		dev_err(&pdev->dev, "core_size is smaller than 0x800. Failing Probe\n");
+		err = -EINVAL;
+		goto err_put_ctrl;
+	}
+
 	core = devm_ioremap_resource(&ctrl->dev, res);
 	if (IS_ERR(core)) {
 		err = PTR_ERR(core);
@@ -815,21 +1277,24 @@
 	}
 
 	hw_ver = readl_relaxed(core + PMIC_ARB_VERSION);
-	is_v1  = (hw_ver < PMIC_ARB_VERSION_V2_MIN);
-
-	dev_info(&ctrl->dev, "PMIC Arb Version-%d (0x%x)\n", (is_v1 ? 1 : 2),
-		hw_ver);
 
-	if (is_v1) {
+	if (hw_ver < PMIC_ARB_VERSION_V2_MIN) {
 		pa->ver_ops = &pmic_arb_v1;
 		pa->wr_base = core;
 		pa->rd_base = core;
 	} else {
-		u8  chan;
-		u16 ppid;
-		u32 regval;
+		pa->core = core;
 
+		if (hw_ver < PMIC_ARB_VERSION_V3_MIN)
 		pa->ver_ops = &pmic_arb_v2;
+		else if (hw_ver < PMIC_ARB_VERSION_V5_MIN)
+			pa->ver_ops = &pmic_arb_v3;
+		else
+			pa->ver_ops = &pmic_arb_v5;
+
+		/* the apid to ppid table starts at PMIC_ARB_REG_CHNL0 */
+		pa->max_periph
+		     = (pa->core_size - pa->ver_ops->channel_map_offset(0)) / 4;
 
 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 						   "obsrvr");
@@ -847,32 +1312,33 @@
 			goto err_put_ctrl;
 		}
 
-		pa->ppid_to_chan = devm_kzalloc(&ctrl->dev,
-					PPID_TO_CHAN_TABLE_SZ, GFP_KERNEL);
-		if (!pa->ppid_to_chan) {
+		pa->ppid_to_apid = devm_kcalloc(&ctrl->dev,
+						PMIC_ARB_MAX_PPID,
+						sizeof(*pa->ppid_to_apid),
+						GFP_KERNEL);
+		if (!pa->ppid_to_apid) {
 			err = -ENOMEM;
 			goto err_put_ctrl;
 		}
-		/*
-		 * PMIC_ARB_REG_CHNL is a table in HW mapping channel to ppid.
-		 * ppid_to_chan is an in-memory invert of that table.
-		 */
-		for (chan = 0; chan < PMIC_ARB_MAX_CHNL; ++chan) {
-			regval = readl_relaxed(core + PMIC_ARB_REG_CHNL(chan));
-			if (!regval)
-				continue;
-
-			ppid = (regval >> 8) & 0xFFF;
-			pa->ppid_to_chan[ppid] = chan;
-		}
 	}
 
+	dev_info(&ctrl->dev, "PMIC arbiter version %s (0x%x)\n",
+		 pa->ver_ops->ver_str, hw_ver);
+
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr");
 	pa->intr = devm_ioremap_resource(&ctrl->dev, res);
 	if (IS_ERR(pa->intr)) {
 		err = PTR_ERR(pa->intr);
 		goto err_put_ctrl;
 	}
+	pa->acc_status = pa->intr;
+
+	/*
+	 * PMIC arbiter v5 groups the IRQ control registers in the same hardware
+	 * module as the read/write channels.
+	 */
+	if (hw_ver >= PMIC_ARB_VERSION_V5_MIN)
+		pa->intr = pa->wr_base;
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cnfg");
 	pa->cnfg = devm_ioremap_resource(&ctrl->dev, res);
@@ -896,6 +1362,7 @@
 	if (channel > 5) {
 		dev_err(&pdev->dev, "invalid channel (%u) specified.\n",
 			channel);
+		err = -EINVAL;
 		goto err_put_ctrl;
 	}
 
@@ -915,9 +1382,16 @@
 
 	pa->ee = ee;
 
-	for (i = 0; i < ARRAY_SIZE(pa->mapping_table); ++i)
-		pa->mapping_table[i] = readl_relaxed(
-				pa->cnfg + SPMI_MAPPING_TABLE_REG(i));
+	pa->reserved_chan = -EINVAL;
+	of_property_read_u32(pdev->dev.of_node, "qcom,reserved-chan",
+						&pa->reserved_chan);
+
+	pa->mapping_table = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PERIPHS - 1,
+					sizeof(*pa->mapping_table), GFP_KERNEL);
+	if (!pa->mapping_table) {
+		err = -ENOMEM;
+		goto err_put_ctrl;
+	}
 
 	/* Initialize max_apid/min_apid to the opposite bounds, during
 	 * the irq domain translation, we are sure to update these */
@@ -931,6 +1405,15 @@
 	ctrl->read_cmd = pmic_arb_read_cmd;
 	ctrl->write_cmd = pmic_arb_write_cmd;
 
+	if (hw_ver >= PMIC_ARB_VERSION_V5_MIN) {
+		err = pmic_arb_read_apid_map_v5(pa);
+		if (err) {
+			dev_err(&pdev->dev, "could not read APID->PPID mapping table, rc= %d\n",
+				err);
+			goto err_put_ctrl;
+		}
+	}
+
 	dev_dbg(&pdev->dev, "adding irq domain\n");
 	pa->domain = irq_domain_add_tree(pdev->dev.of_node,
 					 &pmic_arb_irq_domain_ops, pa);
@@ -941,11 +1424,14 @@
 	}
 
 	irq_set_chained_handler_and_data(pa->irq, pmic_arb_chained_irq, pa);
+	enable_irq_wake(pa->irq);
 
 	err = spmi_controller_add(ctrl);
 	if (err)
 		goto err_domain_remove;
 
+	the_pa = pa;
+	register_syscore_ops(&spmi_pmic_arb_syscore_ops);
 	return 0;
 
 err_domain_remove:
@@ -959,9 +1445,12 @@
 static int spmi_pmic_arb_remove(struct platform_device *pdev)
 {
 	struct spmi_controller *ctrl = platform_get_drvdata(pdev);
-	struct spmi_pmic_arb_dev *pa = spmi_controller_get_drvdata(ctrl);
+	struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
+
 	spmi_controller_remove(ctrl);
 	irq_set_chained_handler_and_data(pa->irq, NULL, NULL);
+	unregister_syscore_ops(&spmi_pmic_arb_syscore_ops);
+	the_pa = NULL;
 	irq_domain_remove(pa->domain);
 	spmi_controller_put(ctrl);
 	return 0;
@@ -981,7 +1470,12 @@
 		.of_match_table = spmi_pmic_arb_match_table,
 	},
 };
-module_platform_driver(spmi_pmic_arb_driver);
+
+int __init spmi_pmic_arb_init(void)
+{
+	return platform_driver_register(&spmi_pmic_arb_driver);
+}
+arch_initcall(spmi_pmic_arb_init);
 
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("platform:spmi_pmic_arb");
diff -ruw linux-4.4.115/drivers/staging/android/ashmem.c linux-4.4.115-fbx/drivers/staging/android/ashmem.c
--- linux-4.4.115/drivers/staging/android/ashmem.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/android/ashmem.c	2019-10-29 09:26:24.841214902 +0100
@@ -397,21 +397,13 @@
 	}
 	get_file(asma->file);
 
-	/*
-	 * XXX - Reworked to use shmem_zero_setup() instead of
-	 * shmem_set_file while we're in staging. -jstultz
-	 */
-	if (vma->vm_flags & VM_SHARED) {
-		ret = shmem_zero_setup(vma);
-		if (ret) {
-			fput(asma->file);
-			goto out;
-		}
-	}
-
+	if (vma->vm_flags & VM_SHARED)
+		shmem_set_file(vma, asma->file);
+	else {
 	if (vma->vm_file)
 		fput(vma->vm_file);
 	vma->vm_file = asma->file;
+	}
 
 out:
 	mutex_unlock(&ashmem_mutex);
@@ -442,12 +434,14 @@
 	if (!(sc->gfp_mask & __GFP_FS))
 		return SHRINK_STOP;
 
-	mutex_lock(&ashmem_mutex);
+	if (!mutex_trylock(&ashmem_mutex))
+		return -1;
+
 	list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
 		loff_t start = range->pgstart * PAGE_SIZE;
 		loff_t end = (range->pgend + 1) * PAGE_SIZE;
 
-		vfs_fallocate(range->asma->file,
+		range->asma->file->f_op->fallocate(range->asma->file,
 			      FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
 			      start, end - start);
 		range->purged = ASHMEM_WAS_PURGED;
diff -ruw linux-4.4.115/drivers/staging/android/ion/compat_ion.h linux-4.4.115-fbx/drivers/staging/android/ion/compat_ion.h
--- linux-4.4.115/drivers/staging/android/ion/compat_ion.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/android/ion/compat_ion.h	2019-01-22 16:16:26.711275457 +0100
@@ -21,6 +21,8 @@
 
 long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
 
+#define compat_ion_user_handle_t compat_int_t
+
 #else
 
 #define compat_ion_ioctl  NULL
diff -ruw linux-4.4.115/drivers/staging/android/ion/ion.c linux-4.4.115-fbx/drivers/staging/android/ion/ion.c
--- linux-4.4.115/drivers/staging/android/ion/ion.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/android/ion/ion.c	2019-10-29 09:26:24.845214941 +0100
@@ -3,6 +3,7 @@
  * drivers/staging/android/ion/ion.c
  *
  * Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -15,7 +16,7 @@
  *
  */
 
-#include <linux/device.h>
+#include <linux/atomic.h>
 #include <linux/err.h>
 #include <linux/file.h>
 #include <linux/freezer.h>
@@ -23,6 +24,7 @@
 #include <linux/anon_inodes.h>
 #include <linux/kthread.h>
 #include <linux/list.h>
+#include <linux/list_sort.h>
 #include <linux/memblock.h>
 #include <linux/miscdevice.h>
 #include <linux/export.h>
@@ -36,6 +38,10 @@
 #include <linux/debugfs.h>
 #include <linux/dma-buf.h>
 #include <linux/idr.h>
+#include <linux/msm_ion.h>
+#include <linux/msm_dma_iommu_mapping.h>
+#include <trace/events/kmem.h>
+
 
 #include "ion.h"
 #include "ion_priv.h"
@@ -86,7 +92,7 @@
 	struct rb_root handles;
 	struct idr idr;
 	struct mutex lock;
-	const char *name;
+	char *name;
 	char *display_name;
 	int display_serial;
 	struct task_struct *task;
@@ -108,6 +114,7 @@
  */
 struct ion_handle {
 	struct kref ref;
+	unsigned int user_ref_count;
 	struct ion_client *client;
 	struct ion_buffer *buffer;
 	struct rb_node node;
@@ -207,6 +214,8 @@
 
 	buffer->dev = dev;
 	buffer->size = len;
+	buffer->flags = flags;
+	INIT_LIST_HEAD(&buffer->vmas);
 
 	table = heap->ops->map_dma(heap, buffer);
 	if (WARN_ONCE(table == NULL,
@@ -237,9 +246,6 @@
 		}
 	}
 
-	buffer->dev = dev;
-	buffer->size = len;
-	INIT_LIST_HEAD(&buffer->vmas);
 	mutex_init(&buffer->lock);
 	/*
 	 * this will set up dma addresses for the sglist -- it is not
@@ -258,6 +264,7 @@
 	mutex_lock(&dev->buffer_lock);
 	ion_buffer_add(dev, buffer);
 	mutex_unlock(&dev->buffer_lock);
+	atomic_long_add(len, &heap->total_allocated);
 	return buffer;
 
 err:
@@ -274,6 +281,8 @@
 	if (WARN_ON(buffer->kmap_cnt > 0))
 		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
 	buffer->heap->ops->unmap_dma(buffer->heap, buffer);
+
+	atomic_long_sub(buffer->size, &buffer->heap->total_allocated);
 	buffer->heap->ops->free(buffer);
 	vfree(buffer->pages);
 	kfree(buffer);
@@ -285,6 +294,8 @@
 	struct ion_heap *heap = buffer->heap;
 	struct ion_device *dev = buffer->dev;
 
+	msm_dma_buf_freed(buffer);
+
 	mutex_lock(&dev->buffer_lock);
 	rb_erase(&buffer->node, &dev->buffers);
 	mutex_unlock(&dev->buffer_lock);
@@ -308,6 +319,9 @@
 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
 {
 	mutex_lock(&buffer->lock);
+	if (buffer->handle_count == 0)
+		atomic_long_add(buffer->size, &buffer->heap->total_handles);
+
 	buffer->handle_count++;
 	mutex_unlock(&buffer->lock);
 }
@@ -332,6 +346,7 @@
 		task = current->group_leader;
 		get_task_comm(buffer->task_comm, task);
 		buffer->pid = task_pid_nr(task);
+		atomic_long_sub(buffer->size, &buffer->heap->total_handles);
 	}
 	mutex_unlock(&buffer->lock);
 }
@@ -387,6 +402,15 @@
 	kref_get(&handle->ref);
 }
 
+/* Must hold the client lock */
+static struct ion_handle* ion_handle_get_check_overflow(struct ion_handle *handle)
+{
+	if (atomic_read(&handle->ref.refcount) + 1 == 0)
+		return ERR_PTR(-EOVERFLOW);
+	ion_handle_get(handle);
+	return handle;
+}
+
 static int ion_handle_put_nolock(struct ion_handle *handle)
 {
 	int ret;
@@ -408,6 +432,50 @@
 	return ret;
 }
 
+/* Must hold the client lock */
+static void user_ion_handle_get(struct ion_handle *handle)
+{
+	if (handle->user_ref_count++ == 0)
+		kref_get(&handle->ref);
+}
+
+/* Must hold the client lock */
+static struct ion_handle *user_ion_handle_get_check_overflow(
+	struct ion_handle *handle)
+{
+	if (handle->user_ref_count + 1 == 0)
+		return ERR_PTR(-EOVERFLOW);
+	user_ion_handle_get(handle);
+	return handle;
+}
+
+/* passes a kref to the user ref count.
+ * We know we're holding a kref to the object before and
+ * after this call, so no need to reverify handle.
+ */
+static struct ion_handle *pass_to_user(struct ion_handle *handle)
+{
+	struct ion_client *client = handle->client;
+	struct ion_handle *ret;
+
+	mutex_lock(&client->lock);
+	ret = user_ion_handle_get_check_overflow(handle);
+	ion_handle_put_nolock(handle);
+	mutex_unlock(&client->lock);
+	return ret;
+}
+
+/* Must hold the client lock */
+static int user_ion_handle_put_nolock(struct ion_handle *handle)
+{
+	int ret = 0;
+
+	if (--handle->user_ref_count == 0)
+		ret = ion_handle_put_nolock(handle);
+
+	return ret;
+}
+
 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
 					    struct ion_buffer *buffer)
 {
@@ -433,9 +501,9 @@
 
 	handle = idr_find(&client->idr, id);
 	if (handle)
-		ion_handle_get(handle);
+		return ion_handle_get_check_overflow(handle);
 
-	return handle ? handle : ERR_PTR(-EINVAL);
+	return ERR_PTR(-EINVAL);
 }
 
 struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
@@ -488,15 +556,28 @@
 	return 0;
 }
 
-struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+static struct ion_handle *__ion_alloc(struct ion_client *client, size_t len,
 			     size_t align, unsigned int heap_id_mask,
-			     unsigned int flags)
+			     unsigned int flags, bool grab_handle)
 {
 	struct ion_handle *handle;
 	struct ion_device *dev = client->dev;
 	struct ion_buffer *buffer = NULL;
 	struct ion_heap *heap;
 	int ret;
+	const unsigned int MAX_DBG_STR_LEN = 64;
+	char dbg_str[MAX_DBG_STR_LEN];
+	unsigned int dbg_str_idx = 0;
+
+	dbg_str[0] = '\0';
+
+	/*
+	 * For now, we don't want to fault in pages individually since
+	 * clients are already doing manual cache maintenance. In
+	 * other words, the implicit caching infrastructure is in
+	 * place (in code) but should not be used.
+	 */
+	flags |= ION_FLAG_CACHED_NEEDS_SYNC;
 
 	pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
 		 len, align, heap_id_mask, flags);
@@ -516,17 +597,49 @@
 		/* if the caller didn't specify this heap id */
 		if (!((1 << heap->id) & heap_id_mask))
 			continue;
+		trace_ion_alloc_buffer_start(client->name, heap->name, len,
+					     heap_id_mask, flags);
 		buffer = ion_buffer_create(heap, dev, len, align, flags);
+		trace_ion_alloc_buffer_end(client->name, heap->name, len,
+					   heap_id_mask, flags);
 		if (!IS_ERR(buffer))
 			break;
+
+		trace_ion_alloc_buffer_fallback(client->name, heap->name, len,
+					    heap_id_mask, flags,
+					    PTR_ERR(buffer));
+		if (dbg_str_idx < MAX_DBG_STR_LEN) {
+			unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1;
+			int ret_value = snprintf(&dbg_str[dbg_str_idx],
+						len_left, "%s ", heap->name);
+			if (ret_value >= len_left) {
+				/* overflow */
+				dbg_str[MAX_DBG_STR_LEN-1] = '\0';
+				dbg_str_idx = MAX_DBG_STR_LEN;
+			} else if (ret_value >= 0) {
+				dbg_str_idx += ret_value;
+			} else {
+				/* error */
+				dbg_str[MAX_DBG_STR_LEN-1] = '\0';
+			}
+		}
 	}
 	up_read(&dev->lock);
 
-	if (buffer == NULL)
+	if (buffer == NULL) {
+		trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
+					    heap_id_mask, flags, -ENODEV);
 		return ERR_PTR(-ENODEV);
+	}
 
-	if (IS_ERR(buffer))
+	if (IS_ERR(buffer)) {
+		trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
+					    heap_id_mask, flags,
+					    PTR_ERR(buffer));
+		pr_debug("ION is unable to allocate 0x%zx bytes (alignment: 0x%zx) from heap(s) %sfor client %s\n",
+			len, align, dbg_str, client->name);
 		return ERR_CAST(buffer);
+	}
 
 	handle = ion_handle_create(client, buffer);
 
@@ -540,6 +653,8 @@
 		return handle;
 
 	mutex_lock(&client->lock);
+	if (grab_handle)
+		ion_handle_get(handle);
 	ret = ion_handle_add(client, handle);
 	mutex_unlock(&client->lock);
 	if (ret) {
@@ -549,6 +664,13 @@
 
 	return handle;
 }
+
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+			     size_t align, unsigned int heap_id_mask,
+			     unsigned int flags)
+{
+	return __ion_alloc(client, len, align, heap_id_mask, flags, false);
+}
 EXPORT_SYMBOL(ion_alloc);
 
 static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
@@ -558,7 +680,6 @@
 	BUG_ON(client != handle->client);
 
 	valid_handle = ion_handle_validate(client, handle);
-
 	if (!valid_handle) {
 		WARN(1, "%s: invalid handle passed to free.\n", __func__);
 		return;
@@ -566,6 +687,25 @@
 	ion_handle_put_nolock(handle);
 }
 
+static void user_ion_free_nolock(struct ion_client *client,
+				 struct ion_handle *handle)
+{
+	bool valid_handle;
+
+	WARN_ON(client != handle->client);
+
+	valid_handle = ion_handle_validate(client, handle);
+	if (!valid_handle) {
+		WARN(1, "%s: invalid handle passed to free.\n", __func__);
+		return;
+	}
+	if (handle->user_ref_count == 0) {
+		WARN(1, "%s: User does not have access!\n", __func__);
+		return;
+	}
+	user_ion_handle_put_nolock(handle);
+}
+
 void ion_free(struct ion_client *client, struct ion_handle *handle)
 {
 	BUG_ON(client != handle->client);
@@ -702,32 +842,66 @@
 }
 EXPORT_SYMBOL(ion_unmap_kernel);
 
+static struct mutex debugfs_mutex;
+static struct rb_root *ion_root_client;
+static int is_client_alive(struct ion_client *client)
+{
+	struct rb_node *node;
+	struct ion_client *tmp;
+	struct ion_device *dev;
+
+	node = ion_root_client->rb_node;
+	dev = container_of(ion_root_client, struct ion_device, clients);
+
+	down_read(&dev->lock);
+	while (node) {
+		tmp = rb_entry(node, struct ion_client, node);
+		if (client < tmp) {
+			node = node->rb_left;
+		} else if (client > tmp) {
+			node = node->rb_right;
+		} else {
+			up_read(&dev->lock);
+			return 1;
+		}
+	}
+
+	up_read(&dev->lock);
+	return 0;
+}
+
 static int ion_debug_client_show(struct seq_file *s, void *unused)
 {
 	struct ion_client *client = s->private;
 	struct rb_node *n;
-	size_t sizes[ION_NUM_HEAP_IDS] = {0};
-	const char *names[ION_NUM_HEAP_IDS] = {NULL};
-	int i;
+
+	mutex_lock(&debugfs_mutex);
+	if (!is_client_alive(client)) {
+		seq_printf(s, "ion_client 0x%pK dead, can't dump its buffers\n",
+			   client);
+		mutex_unlock(&debugfs_mutex);
+		return 0;
+	}
+
+	seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s\n",
+			"heap_name", "size_in_bytes", "handle refcount",
+			"buffer");
 
 	mutex_lock(&client->lock);
 	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
 		struct ion_handle *handle = rb_entry(n, struct ion_handle,
 						     node);
-		unsigned int id = handle->buffer->heap->id;
 
-		if (!names[id])
-			names[id] = handle->buffer->heap->name;
-		sizes[id] += handle->buffer->size;
-	}
-	mutex_unlock(&client->lock);
+		seq_printf(s, "%16.16s: %16zx : %16d : %12pK",
+				handle->buffer->heap->name,
+				handle->buffer->size,
+				atomic_read(&handle->ref.refcount),
+				handle->buffer);
 
-	seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
-	for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
-		if (!names[i])
-			continue;
-		seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
+		seq_printf(s, "\n");
 	}
+	mutex_unlock(&client->lock);
+	mutex_unlock(&debugfs_mutex);
 	return 0;
 }
 
@@ -798,6 +972,7 @@
 	client->handles = RB_ROOT;
 	idr_init(&client->idr);
 	mutex_init(&client->lock);
+
 	client->task = task;
 	client->pid = pid;
 	client->name = kstrdup(name, GFP_KERNEL);
@@ -857,6 +1032,7 @@
 	struct rb_node *n;
 
 	pr_debug("%s: %d\n", __func__, __LINE__);
+	mutex_lock(&debugfs_mutex);
 	while ((n = rb_first(&client->handles))) {
 		struct ion_handle *handle = rb_entry(n, struct ion_handle,
 						     node);
@@ -870,14 +1046,67 @@
 		put_task_struct(client->task);
 	rb_erase(&client->node, &dev->clients);
 	debugfs_remove_recursive(client->debug_root);
+
 	up_write(&dev->lock);
 
 	kfree(client->display_name);
 	kfree(client->name);
 	kfree(client);
+	mutex_unlock(&debugfs_mutex);
 }
 EXPORT_SYMBOL(ion_client_destroy);
 
+int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
+			unsigned long *flags)
+{
+	struct ion_buffer *buffer;
+
+	mutex_lock(&client->lock);
+	if (!ion_handle_validate(client, handle)) {
+		pr_err("%s: invalid handle passed to %s.\n",
+		       __func__, __func__);
+		mutex_unlock(&client->lock);
+		return -EINVAL;
+	}
+	buffer = handle->buffer;
+	mutex_lock(&buffer->lock);
+	*flags = buffer->flags;
+	mutex_unlock(&buffer->lock);
+	mutex_unlock(&client->lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(ion_handle_get_flags);
+
+int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
+			size_t *size)
+{
+	struct ion_buffer *buffer;
+
+	mutex_lock(&client->lock);
+	if (!ion_handle_validate(client, handle)) {
+		pr_err("%s: invalid handle passed to %s.\n",
+		       __func__, __func__);
+		mutex_unlock(&client->lock);
+		return -EINVAL;
+	}
+	buffer = handle->buffer;
+	mutex_lock(&buffer->lock);
+	*size = buffer->size;
+	mutex_unlock(&buffer->lock);
+	mutex_unlock(&client->lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(ion_handle_get_size);
+
+/**
+ * ion_sg_table - get an sg_table for the buffer
+ *
+ * NOTE: most likely you should NOT being using this API.
+ * You should be using Ion as a DMA Buf exporter and using
+ * the sg_table returned by dma_buf_map_attachment.
+ */
 struct sg_table *ion_sg_table(struct ion_client *client,
 			      struct ion_handle *handle)
 {
@@ -898,6 +1127,60 @@
 }
 EXPORT_SYMBOL(ion_sg_table);
 
+struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base,
+					size_t chunk_size, size_t total_size)
+{
+	struct sg_table *table;
+	int i, n_chunks, ret;
+	struct scatterlist *sg;
+
+	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!table)
+		return ERR_PTR(-ENOMEM);
+
+	n_chunks = DIV_ROUND_UP(total_size, chunk_size);
+	pr_debug("creating sg_table with %d chunks\n", n_chunks);
+
+	ret = sg_alloc_table(table, n_chunks, GFP_KERNEL);
+	if (ret)
+		goto err0;
+
+	for_each_sg(table->sgl, sg, table->nents, i) {
+		dma_addr_t addr = buffer_base + i * chunk_size;
+		sg_dma_address(sg) = addr;
+		sg->length = chunk_size;
+	}
+
+	return table;
+err0:
+	kfree(table);
+	return ERR_PTR(ret);
+}
+
+static struct sg_table *ion_dupe_sg_table(struct sg_table *orig_table)
+{
+	int ret, i;
+	struct scatterlist *sg, *sg_orig;
+	struct sg_table *table;
+
+	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!table)
+		return NULL;
+
+	ret = sg_alloc_table(table, orig_table->nents, GFP_KERNEL);
+	if (ret) {
+		kfree(table);
+		return NULL;
+	}
+
+	sg_orig = orig_table->sgl;
+	for_each_sg(table->sgl, sg, table->nents, i) {
+		memcpy(sg, sg_orig, sizeof(*sg));
+		sg_orig = sg_next(sg_orig);
+	}
+	return table;
+}
+
 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
 				       struct device *dev,
 				       enum dma_data_direction direction);
@@ -907,15 +1190,22 @@
 {
 	struct dma_buf *dmabuf = attachment->dmabuf;
 	struct ion_buffer *buffer = dmabuf->priv;
+	struct sg_table *table;
+
+	table = ion_dupe_sg_table(buffer->sg_table);
+	if (!table)
+		return NULL;
 
 	ion_buffer_sync_for_device(buffer, attachment->dev, direction);
-	return buffer->sg_table;
+	return table;
 }
 
 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
 			      struct sg_table *table,
 			      enum dma_data_direction direction)
 {
+	sg_free_table(table);
+	kfree(table);
 }
 
 void ion_pages_sync_for_device(struct device *dev, struct page *page,
@@ -923,6 +1213,8 @@
 {
 	struct scatterlist sg;
 
+	WARN_ONCE(!dev, "A device is required for dma_sync\n");
+
 	sg_init_table(&sg, 1);
 	sg_set_page(&sg, page, size, 0);
 	/*
@@ -1003,7 +1295,7 @@
 	mutex_lock(&buffer->lock);
 	list_add(&vma_list->list, &buffer->vmas);
 	mutex_unlock(&buffer->lock);
-	pr_debug("%s: adding %p\n", __func__, vma);
+	pr_debug("%s: adding %pK\n", __func__, vma);
 }
 
 static void ion_vm_close(struct vm_area_struct *vma)
@@ -1018,10 +1310,13 @@
 			continue;
 		list_del(&vma_list->list);
 		kfree(vma_list);
-		pr_debug("%s: deleting %p\n", __func__, vma);
+		pr_debug("%s: deleting %pK\n", __func__, vma);
 		break;
 	}
 	mutex_unlock(&buffer->lock);
+
+	if (buffer->heap->ops->unmap_user)
+		buffer->heap->ops->unmap_user(buffer->heap, buffer);
 }
 
 static const struct vm_operations_struct ion_vma_ops = {
@@ -1046,6 +1341,7 @@
 							VM_DONTDUMP;
 		vma->vm_private_data = buffer;
 		vma->vm_ops = &ion_vma_ops;
+		vma->vm_flags |= VM_MIXEDMAP;
 		ion_vm_open(vma);
 		return 0;
 	}
@@ -1173,11 +1469,32 @@
 	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
 	if (fd < 0)
 		dma_buf_put(dmabuf);
-
 	return fd;
 }
 EXPORT_SYMBOL(ion_share_dma_buf_fd);
 
+bool ion_dma_buf_is_secure(struct dma_buf *dmabuf)
+{
+	struct ion_buffer *buffer;
+	enum ion_heap_type type;
+
+	/* Return false if we didn't create the buffer */
+	if (!dmabuf || dmabuf->ops != &dma_buf_ops)
+		return false;
+
+	buffer = dmabuf->priv;
+
+	if (!buffer || !buffer->heap)
+		return false;
+
+	type = buffer->heap->type;
+
+	return (type == (enum ion_heap_type)ION_HEAP_TYPE_SECURE_DMA ||
+		type == (enum ion_heap_type)ION_HEAP_TYPE_SYSTEM_SECURE) ?
+		true : false;
+}
+EXPORT_SYMBOL(ion_dma_buf_is_secure);
+
 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
 {
 	struct dma_buf *dmabuf;
@@ -1202,7 +1519,7 @@
 	/* if a handle exists for this buffer just take a reference to it */
 	handle = ion_handle_lookup(client, buffer);
 	if (!IS_ERR(handle)) {
-		ion_handle_get(handle);
+		handle = ion_handle_get_check_overflow(handle);
 		mutex_unlock(&client->lock);
 		goto end;
 	}
@@ -1244,6 +1561,11 @@
 	}
 	buffer = dmabuf->priv;
 
+	if (get_secure_vmid(buffer->flags) > 0) {
+		pr_err("%s: cannot sync a secure dmabuf\n", __func__);
+		dma_buf_put(dmabuf);
+		return -EINVAL;
+	}
 	dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
 			       buffer->sg_table->nents, DMA_BIDIRECTIONAL);
 	dma_buf_put(dmabuf);
@@ -1292,13 +1614,13 @@
 	{
 		struct ion_handle *handle;
 
-		handle = ion_alloc(client, data.allocation.len,
+		handle = __ion_alloc(client, data.allocation.len,
 						data.allocation.align,
 						data.allocation.heap_id_mask,
-						data.allocation.flags);
+						data.allocation.flags, true);
 		if (IS_ERR(handle))
 			return PTR_ERR(handle);
-
+		pass_to_user(handle);
 		data.allocation.handle = handle->id;
 
 		cleanup_handle = handle;
@@ -1314,7 +1636,7 @@
 			mutex_unlock(&client->lock);
 			return PTR_ERR(handle);
 		}
-		ion_free_nolock(client, handle);
+		user_ion_free_nolock(client, handle);
 		ion_handle_put_nolock(handle);
 		mutex_unlock(&client->lock);
 		break;
@@ -1338,10 +1660,15 @@
 		struct ion_handle *handle;
 
 		handle = ion_import_dma_buf(client, data.fd.fd);
+		if (IS_ERR(handle)) {
+			ret = PTR_ERR(handle);
+		} else {
+			handle = pass_to_user(handle);
 		if (IS_ERR(handle))
 			ret = PTR_ERR(handle);
 		else
 			data.handle.handle = handle->id;
+		}
 		break;
 	}
 	case ION_IOC_SYNC:
@@ -1357,17 +1684,32 @@
 						data.custom.arg);
 		break;
 	}
+	case ION_IOC_CLEAN_CACHES:
+		return client->dev->custom_ioctl(client,
+						ION_IOC_CLEAN_CACHES, arg);
+	case ION_IOC_INV_CACHES:
+		return client->dev->custom_ioctl(client,
+						ION_IOC_INV_CACHES, arg);
+	case ION_IOC_CLEAN_INV_CACHES:
+		return client->dev->custom_ioctl(client,
+						ION_IOC_CLEAN_INV_CACHES, arg);
 	default:
 		return -ENOTTY;
 	}
 
 	if (dir & _IOC_READ) {
 		if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
-			if (cleanup_handle)
-				ion_free(client, cleanup_handle);
+			if (cleanup_handle) {
+				mutex_lock(&client->lock);
+				user_ion_free_nolock(client, cleanup_handle);
+				ion_handle_put_nolock(cleanup_handle);
+				mutex_unlock(&client->lock);
+			}
 			return -EFAULT;
 		}
 	}
+	if (cleanup_handle)
+		ion_handle_put(cleanup_handle);
 	return ret;
 }
 
@@ -1423,6 +1765,106 @@
 	return size;
 }
 
+/**
+ * Create a mem_map of the heap.
+ * @param s seq_file to log error message to.
+ * @param heap The heap to create mem_map for.
+ * @param mem_map The mem map to be created.
+ */
+void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
+			      struct list_head *mem_map)
+{
+	struct ion_device *dev = heap->dev;
+	struct rb_node *cnode;
+	size_t size;
+	struct ion_client *client;
+
+	if (!heap->ops->phys)
+		return;
+
+	down_read(&dev->lock);
+	for (cnode = rb_first(&dev->clients); cnode; cnode = rb_next(cnode)) {
+		struct rb_node *hnode;
+		client = rb_entry(cnode, struct ion_client, node);
+
+		mutex_lock(&client->lock);
+		for (hnode = rb_first(&client->handles);
+		     hnode;
+		     hnode = rb_next(hnode)) {
+			struct ion_handle *handle = rb_entry(
+				hnode, struct ion_handle, node);
+			if (handle->buffer->heap == heap) {
+				struct mem_map_data *data =
+					kzalloc(sizeof(*data), GFP_KERNEL);
+				if (!data)
+					goto inner_error;
+				heap->ops->phys(heap, handle->buffer,
+							&(data->addr), &size);
+				data->size = (unsigned long) size;
+				data->addr_end = data->addr + data->size - 1;
+				data->client_name = kstrdup(client->name,
+							GFP_KERNEL);
+				if (!data->client_name) {
+					kfree(data);
+					goto inner_error;
+				}
+				list_add(&data->node, mem_map);
+			}
+		}
+		mutex_unlock(&client->lock);
+	}
+	up_read(&dev->lock);
+	return;
+
+inner_error:
+	seq_puts(s,
+		"ERROR: out of memory. Part of memory map will not be logged\n");
+	mutex_unlock(&client->lock);
+	up_read(&dev->lock);
+}
+
+/**
+ * Free the memory allocated by ion_debug_mem_map_create
+ * @param mem_map The mem map to free.
+ */
+static void ion_debug_mem_map_destroy(struct list_head *mem_map)
+{
+	if (mem_map) {
+		struct mem_map_data *data, *tmp;
+		list_for_each_entry_safe(data, tmp, mem_map, node) {
+			list_del(&data->node);
+			kfree(data->client_name);
+			kfree(data);
+		}
+	}
+}
+
+static int mem_map_cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+	struct mem_map_data *d1, *d2;
+	d1 = list_entry(a, struct mem_map_data, node);
+	d2 = list_entry(b, struct mem_map_data, node);
+	if (d1->addr == d2->addr)
+		return d1->size - d2->size;
+	return d1->addr - d2->addr;
+}
+
+/**
+ * Print heap debug information.
+ * @param s seq_file to log message to.
+ * @param heap pointer to heap that we will print debug information for.
+ */
+static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
+{
+	if (heap->ops->print_debug) {
+		struct list_head mem_map = LIST_HEAD_INIT(mem_map);
+		ion_debug_mem_map_create(s, heap, &mem_map);
+		list_sort(NULL, &mem_map, mem_map_cmp);
+		heap->ops->print_debug(heap, s, &mem_map);
+		ion_debug_mem_map_destroy(&mem_map);
+	}
+}
+
 static int ion_debug_heap_show(struct seq_file *s, void *unused)
 {
 	struct ion_heap *heap = s->private;
@@ -1434,6 +1876,7 @@
 	seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
 	seq_puts(s, "----------------------------------------------------\n");
 
+	mutex_lock(&debugfs_mutex);
 	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
 		struct ion_client *client = rb_entry(n, struct ion_client,
 						     node);
@@ -1452,6 +1895,8 @@
 				   client->pid, size);
 		}
 	}
+	mutex_unlock(&debugfs_mutex);
+
 	seq_puts(s, "----------------------------------------------------\n");
 	seq_puts(s, "orphaned allocations (info is from last known client):\n");
 	mutex_lock(&dev->buffer_lock);
@@ -1482,6 +1927,7 @@
 	if (heap->debug_show)
 		heap->debug_show(heap, s, unused);
 
+	ion_heap_print_debug(s, heap);
 	return 0;
 }
 
@@ -1497,6 +1943,31 @@
 	.release = single_release,
 };
 
+void show_ion_usage(struct ion_device *dev)
+{
+	struct ion_heap *heap;
+
+	if (!down_read_trylock(&dev->lock)) {
+		pr_err("Ion output would deadlock, can't print debug information\n");
+		return;
+	}
+
+	pr_info("%16.s %16.s %16.s\n", "Heap name", "Total heap size",
+					"Total orphaned size");
+	pr_info("---------------------------------\n");
+	plist_for_each_entry(heap, &dev->heaps, node) {
+		pr_info("%16.s 0x%16.lx 0x%16.lx\n",
+			heap->name, atomic_long_read(&heap->total_allocated),
+			atomic_long_read(&heap->total_allocated) -
+			atomic_long_read(&heap->total_handles));
+		if (heap->debug_show)
+			heap->debug_show(heap, NULL, 0);
+
+	}
+	up_read(&dev->lock);
+}
+
+#ifdef DEBUG_HEAP_SHRINKER
 static int debug_shrink_set(void *data, u64 val)
 {
 	struct ion_heap *heap = data;
@@ -1531,6 +2002,7 @@
 
 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
 			debug_shrink_set, "%llu\n");
+#endif
 
 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
 {
@@ -1570,6 +2042,7 @@
 			path, heap->name);
 	}
 
+#ifdef DEBUG_HEAP_SHRINKER
 	if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
 		char debug_name[64];
 
@@ -1585,11 +2058,36 @@
 				path, debug_name);
 		}
 	}
+#endif
 
 	up_write(&dev->lock);
 }
 EXPORT_SYMBOL(ion_device_add_heap);
 
+int ion_walk_heaps(struct ion_client *client, int heap_id,
+			enum ion_heap_type type, void *data,
+			int (*f)(struct ion_heap *heap, void *data))
+{
+	int ret_val = 0;
+	struct ion_heap *heap;
+	struct ion_device *dev = client->dev;
+	/*
+	 * traverse the list of heaps available in this system
+	 * and find the heap that is specified.
+	 */
+	down_write(&dev->lock);
+	plist_for_each_entry(heap, &dev->heaps, node) {
+		if (ION_HEAP(heap->id) != heap_id ||
+			type != heap->type)
+			continue;
+		ret_val = f(heap, data);
+		break;
+	}
+	up_write(&dev->lock);
+	return ret_val;
+}
+EXPORT_SYMBOL(ion_walk_heaps);
+
 struct ion_device *ion_device_create(long (*custom_ioctl)
 				     (struct ion_client *client,
 				      unsigned int cmd,
@@ -1636,6 +2134,8 @@
 	init_rwsem(&idev->lock);
 	plist_head_init(&idev->heaps);
 	idev->clients = RB_ROOT;
+	ion_root_client = &idev->clients;
+	mutex_init(&debugfs_mutex);
 	return idev;
 }
 EXPORT_SYMBOL(ion_device_create);
@@ -1673,13 +2173,13 @@
 			int ret = memblock_reserve(data->heaps[i].base,
 					       data->heaps[i].size);
 			if (ret)
-				pr_err("memblock reserve of %zx@%lx failed\n",
+				pr_err("memblock reserve of %zx@%pa failed\n",
 				       data->heaps[i].size,
-				       data->heaps[i].base);
+				       &data->heaps[i].base);
 		}
-		pr_info("%s: %s reserved base %lx size %zu\n", __func__,
+		pr_info("%s: %s reserved base %pa size %zu\n", __func__,
 			data->heaps[i].name,
-			data->heaps[i].base,
+			&data->heaps[i].base,
 			data->heaps[i].size);
 	}
 }
diff -ruw linux-4.4.115/drivers/staging/android/ion/ion_carveout_heap.c linux-4.4.115-fbx/drivers/staging/android/ion/ion_carveout_heap.c
--- linux-4.4.115/drivers/staging/android/ion/ion_carveout_heap.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/android/ion/ion_carveout_heap.c	2019-01-22 16:16:26.711275457 +0100
@@ -111,12 +111,14 @@
 	struct ion_heap *heap = buffer->heap;
 	struct sg_table *table = buffer->priv_virt;
 	struct page *page = sg_page(table->sgl);
+	struct device *dev = heap->priv;
+
 	ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
 
 	ion_heap_buffer_zero(buffer);
 
 	if (ion_buffer_cached(buffer))
-		dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+		dma_sync_sg_for_device(dev, table->sgl, table->nents,
 							DMA_BIDIRECTIONAL);
 
 	ion_carveout_free(heap, paddr, buffer->size);
@@ -153,11 +155,12 @@
 
 	struct page *page;
 	size_t size;
+	struct device *dev = heap_data->priv;
 
 	page = pfn_to_page(PFN_DOWN(heap_data->base));
 	size = heap_data->size;
 
-	ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
+	ion_pages_sync_for_device(dev, page, size, DMA_BIDIRECTIONAL);
 
 	ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
 	if (ret)
@@ -167,7 +170,7 @@
 	if (!carveout_heap)
 		return ERR_PTR(-ENOMEM);
 
-	carveout_heap->pool = gen_pool_create(12, -1);
+	carveout_heap->pool = gen_pool_create(PAGE_SHIFT, -1);
 	if (!carveout_heap->pool) {
 		kfree(carveout_heap);
 		return ERR_PTR(-ENOMEM);
diff -ruw linux-4.4.115/drivers/staging/android/ion/ion_chunk_heap.c linux-4.4.115-fbx/drivers/staging/android/ion/ion_chunk_heap.c
--- linux-4.4.115/drivers/staging/android/ion/ion_chunk_heap.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/android/ion/ion_chunk_heap.c	2019-01-22 16:16:26.711275457 +0100
@@ -99,13 +99,14 @@
 	struct scatterlist *sg;
 	int i;
 	unsigned long allocated_size;
+	struct device *dev = heap->priv;
 
 	allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size);
 
 	ion_heap_buffer_zero(buffer);
 
 	if (ion_buffer_cached(buffer))
-		dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+		dma_sync_sg_for_device(dev, table->sgl, table->nents,
 							DMA_BIDIRECTIONAL);
 
 	for_each_sg(table->sgl, sg, table->nents, i) {
@@ -144,11 +145,12 @@
 	int ret;
 	struct page *page;
 	size_t size;
+	struct device *dev = heap_data->priv;
 
 	page = pfn_to_page(PFN_DOWN(heap_data->base));
 	size = heap_data->size;
 
-	ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
+	ion_pages_sync_for_device(dev, page, size, DMA_BIDIRECTIONAL);
 
 	ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
 	if (ret)
@@ -173,8 +175,8 @@
 	chunk_heap->heap.ops = &chunk_heap_ops;
 	chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
 	chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
-	pr_debug("%s: base %lu size %zu align %ld\n", __func__,
-		chunk_heap->base, heap_data->size, heap_data->align);
+	pr_debug("%s: base %pad size %zu align %pad\n", __func__,
+		&chunk_heap->base, heap_data->size, &heap_data->align);
 
 	return &chunk_heap->heap;
 
diff -ruw linux-4.4.115/drivers/staging/android/ion/ion_cma_heap.c linux-4.4.115-fbx/drivers/staging/android/ion/ion_cma_heap.c
--- linux-4.4.115/drivers/staging/android/ion/ion_cma_heap.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/android/ion/ion_cma_heap.c	2019-10-29 09:26:24.845214941 +0100
@@ -20,49 +20,63 @@
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/dma-mapping.h>
+#include <linux/msm_ion.h>
+
+#include <asm/cacheflush.h>
+#include <soc/qcom/secure_buffer.h>
 
 #include "ion.h"
 #include "ion_priv.h"
 
 #define ION_CMA_ALLOCATE_FAILED -1
 
-struct ion_cma_heap {
-	struct ion_heap heap;
-	struct device *dev;
-};
-
-#define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
-
 struct ion_cma_buffer_info {
 	void *cpu_addr;
 	dma_addr_t handle;
 	struct sg_table *table;
+	bool is_cached;
 };
 
+static int cma_heap_has_outer_cache;
+/*
+ * Create scatter-list for the already allocated DMA buffer.
+ * This function could be replace by dma_common_get_sgtable
+ * as soon as it will avalaible.
+ */
+static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
+			       void *cpu_addr, dma_addr_t handle, size_t size)
+{
+	struct page *page = pfn_to_page(PFN_DOWN(handle));
+	int ret;
+
+	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+	if (unlikely(ret))
+		return ret;
+
+	sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+	return 0;
+}
 
 /* ION CMA heap operations functions */
 static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
 			    unsigned long len, unsigned long align,
 			    unsigned long flags)
 {
-	struct ion_cma_heap *cma_heap = to_cma_heap(heap);
-	struct device *dev = cma_heap->dev;
+	struct device *dev = heap->priv;
 	struct ion_cma_buffer_info *info;
 
 	dev_dbg(dev, "Request buffer allocation len %ld\n", len);
 
-	if (buffer->flags & ION_FLAG_CACHED)
-		return -EINVAL;
-
-	if (align > PAGE_SIZE)
-		return -EINVAL;
-
 	info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
 	if (!info)
 		return ION_CMA_ALLOCATE_FAILED;
 
-	info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle),
-						GFP_HIGHUSER | __GFP_ZERO);
+	if (!ION_IS_CACHED(flags))
+		info->cpu_addr = dma_alloc_writecombine(dev, len,
+					&(info->handle), GFP_KERNEL);
+	else
+		info->cpu_addr = dma_alloc_nonconsistent(dev, len,
+					&(info->handle), GFP_KERNEL);
 
 	if (!info->cpu_addr) {
 		dev_err(dev, "Fail to allocate buffer\n");
@@ -71,20 +85,18 @@
 
 	info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
 	if (!info->table)
-		goto free_mem;
+		goto err;
+
+	info->is_cached = ION_IS_CACHED(flags);
+
+	ion_cma_get_sgtable(dev,
+			info->table, info->cpu_addr, info->handle, len);
 
-	if (dma_get_sgtable(dev, info->table, info->cpu_addr, info->handle,
-			    len))
-		goto free_table;
 	/* keep this for memory release */
 	buffer->priv_virt = info;
-	dev_dbg(dev, "Allocate buffer %p\n", buffer);
+	dev_dbg(dev, "Allocate buffer %pK\n", buffer);
 	return 0;
 
-free_table:
-	kfree(info->table);
-free_mem:
-	dma_free_coherent(dev, len, info->cpu_addr, info->handle);
 err:
 	kfree(info);
 	return ION_CMA_ALLOCATE_FAILED;
@@ -92,15 +104,14 @@
 
 static void ion_cma_free(struct ion_buffer *buffer)
 {
-	struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
-	struct device *dev = cma_heap->dev;
+	struct device *dev = buffer->heap->priv;
 	struct ion_cma_buffer_info *info = buffer->priv_virt;
 
-	dev_dbg(dev, "Release buffer %p\n", buffer);
+	dev_dbg(dev, "Release buffer %pK\n", buffer);
 	/* release memory */
 	dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
-	/* release sg table */
 	sg_free_table(info->table);
+	/* release sg table */
 	kfree(info->table);
 	kfree(info);
 }
@@ -109,11 +120,10 @@
 static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
 			ion_phys_addr_t *addr, size_t *len)
 {
-	struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
-	struct device *dev = cma_heap->dev;
+	struct device *dev = heap->priv;
 	struct ion_cma_buffer_info *info = buffer->priv_virt;
 
-	dev_dbg(dev, "Return buffer %p physical address %pa\n", buffer,
+	dev_dbg(dev, "Return buffer %pK physical address %pa\n", buffer,
 		&info->handle);
 
 	*addr = info->handle;
@@ -138,25 +148,56 @@
 static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
 			struct vm_area_struct *vma)
 {
-	struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
-	struct device *dev = cma_heap->dev;
+	struct device *dev = buffer->heap->priv;
 	struct ion_cma_buffer_info *info = buffer->priv_virt;
 
-	return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle,
-				 buffer->size);
+	if (info->is_cached)
+		return dma_mmap_nonconsistent(dev, vma, info->cpu_addr,
+				info->handle, buffer->size);
+	else
+		return dma_mmap_writecombine(dev, vma, info->cpu_addr,
+				info->handle, buffer->size);
 }
 
 static void *ion_cma_map_kernel(struct ion_heap *heap,
 				struct ion_buffer *buffer)
 {
 	struct ion_cma_buffer_info *info = buffer->priv_virt;
-	/* kernel memory mapping has been done at allocation time */
+
 	return info->cpu_addr;
 }
 
 static void ion_cma_unmap_kernel(struct ion_heap *heap,
 					struct ion_buffer *buffer)
 {
+	return;
+}
+
+static int ion_cma_print_debug(struct ion_heap *heap, struct seq_file *s,
+			const struct list_head *mem_map)
+{
+	if (mem_map) {
+		struct mem_map_data *data;
+
+		seq_printf(s, "\nMemory Map\n");
+		seq_printf(s, "%16.s %14.s %14.s %14.s\n",
+			   "client", "start address", "end address",
+			   "size");
+
+		list_for_each_entry(data, mem_map, node) {
+			const char *client_name = "(null)";
+
+
+			if (data->client_name)
+				client_name = data->client_name;
+
+			seq_printf(s, "%16.s 0x%14pa 0x%14pa %14lu (0x%lx)\n",
+				   client_name, &data->addr,
+				   &data->addr_end,
+				   data->size, data->size);
+		}
+	}
+	return 0;
 }
 
 static struct ion_heap_ops ion_cma_ops = {
@@ -168,30 +209,147 @@
 	.map_user = ion_cma_mmap,
 	.map_kernel = ion_cma_map_kernel,
 	.unmap_kernel = ion_cma_unmap_kernel,
+	.print_debug = ion_cma_print_debug,
 };
 
 struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
 {
-	struct ion_cma_heap *cma_heap;
+	struct ion_heap *heap;
 
-	cma_heap = kzalloc(sizeof(struct ion_cma_heap), GFP_KERNEL);
+	heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
 
-	if (!cma_heap)
+	if (!heap)
 		return ERR_PTR(-ENOMEM);
 
-	cma_heap->heap.ops = &ion_cma_ops;
+	heap->ops = &ion_cma_ops;
+	/* set device as private heaps data, later it will be
+	 * used to make the link with reserved CMA memory */
+	heap->priv = data->priv;
+	heap->type = ION_HEAP_TYPE_DMA;
+	cma_heap_has_outer_cache = data->has_outer_cache;
+	return heap;
+}
+
+void ion_cma_heap_destroy(struct ion_heap *heap)
+{
+	kfree(heap);
+}
+
+static void ion_secure_cma_free(struct ion_buffer *buffer)
+{
+	int i, ret = 0;
+	int source_vm;
+	int dest_vmid;
+	int dest_perms;
+	struct sg_table *sgt;
+	struct scatterlist *sg;
+	struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+	source_vm = get_secure_vmid(buffer->flags);
+	if (source_vm < 0) {
+		pr_err("%s: Failed to get secure vmid\n", __func__);
+		return;
+	}
+	dest_vmid = VMID_HLOS;
+	dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
+
+	sgt = info->table;
+	ret = hyp_assign_table(sgt, &source_vm, 1, &dest_vmid, &dest_perms, 1);
+	if (ret) {
+		pr_err("%s: Not freeing memory since assign failed\n",
+							__func__);
+		return;
+	}
+
+	for_each_sg(sgt->sgl, sg, sgt->nents, i)
+		ClearPagePrivate(sg_page(sg));
+
+	ion_cma_free(buffer);
+}
+
+static int ion_secure_cma_allocate(struct ion_heap *heap,
+			struct ion_buffer *buffer, unsigned long len,
+			unsigned long align, unsigned long flags)
+{
+	int i, ret = 0;
+	int source_vm;
+	int dest_vm;
+	int dest_perms;
+	struct ion_cma_buffer_info *info;
+	struct sg_table *sgt;
+	struct scatterlist *sg;
+
+	source_vm = VMID_HLOS;
+	dest_vm = get_secure_vmid(flags);
+
+	if (dest_vm < 0) {
+		pr_err("%s: Failed to get secure vmid\n", __func__);
+		return -EINVAL;
+	}
+
+	if (dest_vm == VMID_CP_SEC_DISPLAY)
+		dest_perms = PERM_READ;
+	else
+		dest_perms = PERM_READ | PERM_WRITE;
+
+	ret = ion_cma_allocate(heap, buffer, len, align, flags);
+	if (ret) {
+		dev_err(heap->priv, "Unable to allocate cma buffer");
+		return ret;
+	}
+
+	info = buffer->priv_virt;
+	sgt = info->table;
+	ret = hyp_assign_table(sgt, &source_vm, 1, &dest_vm, &dest_perms, 1);
+	if (ret) {
+		pr_err("%s: Assign call failed\n", __func__);
+		goto err;
+	}
+
+	/* Set the private bit to indicate that we've secured this */
+	for_each_sg(sgt->sgl, sg, sgt->nents, i)
+		SetPagePrivate(sg_page(sg));
+
+	return ret;
+
+err:
+	ion_secure_cma_free(buffer);
+	return ret;
+}
+
+static struct ion_heap_ops ion_secure_cma_ops = {
+	.allocate = ion_secure_cma_allocate,
+	.free = ion_secure_cma_free,
+	.map_dma = ion_cma_heap_map_dma,
+	.unmap_dma = ion_cma_heap_unmap_dma,
+	.phys = ion_cma_phys,
+	.map_user = ion_cma_mmap,
+	.map_kernel = ion_cma_map_kernel,
+	.unmap_kernel = ion_cma_unmap_kernel,
+	.print_debug = ion_cma_print_debug,
+};
+
+struct ion_heap *ion_cma_secure_heap_create(struct ion_platform_heap *data)
+{
+	struct ion_heap *heap;
+
+	heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
+
+	if (!heap)
+		return ERR_PTR(-ENOMEM);
+
+	heap->ops = &ion_secure_cma_ops;
 	/*
-	 * get device from private heaps data, later it will be
+	 *  set device as private heaps data, later it will be
 	 * used to make the link with reserved CMA memory
 	 */
-	cma_heap->dev = data->priv;
-	cma_heap->heap.type = ION_HEAP_TYPE_DMA;
-	return &cma_heap->heap;
+	heap->priv = data->priv;
+	heap->type = ION_HEAP_TYPE_HYP_CMA;
+	cma_heap_has_outer_cache = data->has_outer_cache;
+	return heap;
 }
 
-void ion_cma_heap_destroy(struct ion_heap *heap)
+void ion_cma_secure_heap_destroy(struct ion_heap *heap)
 {
-	struct ion_cma_heap *cma_heap = to_cma_heap(heap);
-
-	kfree(cma_heap);
+	kfree(heap);
 }
diff -ruw linux-4.4.115/drivers/staging/android/ion/ion.h linux-4.4.115-fbx/drivers/staging/android/ion/ion.h
--- linux-4.4.115/drivers/staging/android/ion/ion.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/android/ion/ion.h	2019-01-22 16:16:26.711275457 +0100
@@ -2,6 +2,7 @@
  * drivers/staging/android/ion/ion.h
  *
  * Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2011-2014,2017 The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -17,8 +18,7 @@
 #ifndef _LINUX_ION_H
 #define _LINUX_ION_H
 
-#include <linux/types.h>
-
+#include <linux/err.h>
 #include "../uapi/ion.h"
 
 struct ion_handle;
@@ -28,13 +28,11 @@
 struct ion_client;
 struct ion_buffer;
 
-/*
- * This should be removed some day when phys_addr_t's are fully
- * plumbed in the kernel, and all instances of ion_phys_addr_t should
- * be converted to phys_addr_t.  For the time being many kernel interfaces
- * do not accept phys_addr_t's that would have to
- */
-#define ion_phys_addr_t unsigned long
+/* This should be removed some day when phys_addr_t's are fully
+   plumbed in the kernel, and all instances of ion_phys_addr_t should
+   be converted to phys_addr_t.  For the time being many kernel interfaces
+   do not accept phys_addr_t's that would have to */
+#define ion_phys_addr_t dma_addr_t
 
 /**
  * struct ion_platform_heap - defines a heap in the given platform
@@ -45,6 +43,9 @@
  * @name:	used for debug purposes
  * @base:	base address of heap in physical memory if applicable
  * @size:	size of the heap in bytes if applicable
+ * @has_outer_cache:    set to 1 if outer cache is used, 0 otherwise.
+ * @extra_data:	Extra data specific to each heap type
+ * @priv:	heap private data
  * @align:	required alignment in physical memory if applicable
  * @priv:	private info passed from the board file
  *
@@ -56,22 +57,28 @@
 	const char *name;
 	ion_phys_addr_t base;
 	size_t size;
+	unsigned int has_outer_cache;
+	void *extra_data;
 	ion_phys_addr_t align;
 	void *priv;
 };
 
 /**
  * struct ion_platform_data - array of platform heaps passed from board file
+ * @has_outer_cache:    set to 1 if outer cache is used, 0 otherwise.
  * @nr:		number of structures in the array
  * @heaps:	array of platform_heap structions
  *
  * Provided by the board file in the form of platform data to a platform device.
  */
 struct ion_platform_data {
+	unsigned int has_outer_cache;
 	int nr;
 	struct ion_platform_heap *heaps;
 };
 
+#ifdef CONFIG_ION
+
 /**
  * ion_reserve() - reserve memory for ion heaps if applicable
  * @data:	platform data specifying starting physical address and
@@ -202,4 +209,83 @@
  */
 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd);
 
+/**
+ * ion_dma_buf_is_secure() - Returns true if the dma buf is secure
+ * dmabuf
+ * @dmabuf: pointer to a dma-buf
+ *
+ * Given a dma-buf pointer, return true if ion created it and it is from
+ * a secure heap.
+ */
+bool ion_dma_buf_is_secure(struct dma_buf *dmabuf);
+
+#else
+static inline void ion_reserve(struct ion_platform_data *data)
+{
+
+}
+
+static inline struct ion_client *ion_client_create(
+	struct ion_device *dev, unsigned int heap_id_mask, const char *name)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline void ion_client_destroy(struct ion_client *client) { }
+
+static inline struct ion_handle *ion_alloc(struct ion_client *client,
+					size_t len, size_t align,
+					unsigned int heap_id_mask,
+					unsigned int flags)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline void ion_free(struct ion_client *client,
+	struct ion_handle *handle) { }
+
+
+static inline int ion_phys(struct ion_client *client,
+	struct ion_handle *handle, ion_phys_addr_t *addr, size_t *len)
+{
+	return -ENODEV;
+}
+
+static inline struct sg_table *ion_sg_table(struct ion_client *client,
+			      struct ion_handle *handle)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline void *ion_map_kernel(struct ion_client *client,
+	struct ion_handle *handle)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline void ion_unmap_kernel(struct ion_client *client,
+	struct ion_handle *handle) { }
+
+static inline int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
+{
+	return -ENODEV;
+}
+
+static inline struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline int ion_handle_get_flags(struct ion_client *client,
+	struct ion_handle *handle, unsigned long *flags)
+{
+	return -ENODEV;
+}
+
+bool ion_dma_buf_is_secure(struct dma_buf *dmabuf)
+{
+	return false;
+}
+
+#endif /* CONFIG_ION */
 #endif /* _LINUX_ION_H */
diff -ruw linux-4.4.115/drivers/staging/android/ion/ion_heap.c linux-4.4.115-fbx/drivers/staging/android/ion/ion_heap.c
--- linux-4.4.115/drivers/staging/android/ion/ion_heap.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/android/ion/ion_heap.c	2019-01-22 16:16:26.715275493 +0100
@@ -2,6 +2,7 @@
  * drivers/staging/android/ion/ion_heap.c
  *
  * Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -22,6 +23,9 @@
 #include <linux/sched.h>
 #include <linux/scatterlist.h>
 #include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/dma-mapping.h>
 #include "ion.h"
 #include "ion_priv.h"
 
@@ -38,7 +42,7 @@
 	struct page **tmp = pages;
 
 	if (!pages)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
 	if (buffer->flags & ION_FLAG_CACHED)
 		pgprot = PAGE_KERNEL;
@@ -321,8 +325,9 @@
 
 	switch (heap_data->type) {
 	case ION_HEAP_TYPE_SYSTEM_CONTIG:
-		heap = ion_system_contig_heap_create(heap_data);
-		break;
+		pr_err("%s: Heap type is disabled: %d\n", __func__,
+		       heap_data->type);
+		return ERR_PTR(-EINVAL);
 	case ION_HEAP_TYPE_SYSTEM:
 		heap = ion_system_heap_create(heap_data);
 		break;
@@ -342,14 +347,15 @@
 	}
 
 	if (IS_ERR_OR_NULL(heap)) {
-		pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
+		pr_err("%s: error creating heap %s type %d base %pa size %zu\n",
 		       __func__, heap_data->name, heap_data->type,
-		       heap_data->base, heap_data->size);
+		       &heap_data->base, heap_data->size);
 		return ERR_PTR(-EINVAL);
 	}
 
 	heap->name = heap_data->name;
 	heap->id = heap_data->id;
+	heap->priv = heap_data->priv;
 	return heap;
 }
 EXPORT_SYMBOL(ion_heap_create);
@@ -361,7 +367,8 @@
 
 	switch (heap->type) {
 	case ION_HEAP_TYPE_SYSTEM_CONTIG:
-		ion_system_contig_heap_destroy(heap);
+		pr_err("%s: Heap type is disabled: %d\n", __func__,
+		       heap->type);
 		break;
 	case ION_HEAP_TYPE_SYSTEM:
 		ion_system_heap_destroy(heap);
diff -ruw linux-4.4.115/drivers/staging/android/ion/ion_page_pool.c linux-4.4.115-fbx/drivers/staging/android/ion/ion_page_pool.c
--- linux-4.4.115/drivers/staging/android/ion/ion_page_pool.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/android/ion/ion_page_pool.c	2019-01-22 16:16:26.715275493 +0100
@@ -1,5 +1,5 @@
 /*
- * drivers/staging/android/ion/ion_mem_pool.c
+ * drivers/staging/android/ion/ion_page_pool.c
  *
  * Copyright (C) 2011 Google, Inc.
  *
@@ -22,22 +22,35 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
+#include <linux/vmalloc.h>
 #include "ion_priv.h"
 
 static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
 {
-	struct page *page = alloc_pages(pool->gfp_mask, pool->order);
+	struct page *page;
+
+	page = alloc_pages(pool->gfp_mask & ~__GFP_ZERO, pool->order);
 
 	if (!page)
 		return NULL;
-	ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
-						DMA_BIDIRECTIONAL);
+
+	if (pool->gfp_mask & __GFP_ZERO)
+		if (msm_ion_heap_high_order_page_zero(pool->dev, page,
+						      pool->order))
+			goto error_free_pages;
+
+	ion_page_pool_alloc_set_cache_policy(pool, page);
+
 	return page;
+error_free_pages:
+	__free_pages(page, pool->order);
+	return NULL;
 }
 
 static void ion_page_pool_free_pages(struct ion_page_pool *pool,
 				     struct page *page)
 {
+	ion_page_pool_free_set_cache_policy(pool, page);
 	__free_pages(page, pool->order);
 }
 
@@ -51,6 +64,9 @@
 		list_add_tail(&page->lru, &pool->low_items);
 		pool->low_count++;
 	}
+
+	mod_zone_page_state(page_zone(page), NR_INDIRECTLY_RECLAIMABLE_BYTES,
+			    (1 << (PAGE_SHIFT + pool->order)));
 	mutex_unlock(&pool->mutex);
 	return 0;
 }
@@ -70,24 +86,49 @@
 	}
 
 	list_del(&page->lru);
+	mod_zone_page_state(page_zone(page), NR_INDIRECTLY_RECLAIMABLE_BYTES,
+			    -(1 << (PAGE_SHIFT + pool->order)));
 	return page;
 }
 
-struct page *ion_page_pool_alloc(struct ion_page_pool *pool)
+void *ion_page_pool_alloc(struct ion_page_pool *pool, bool *from_pool)
 {
 	struct page *page = NULL;
 
 	BUG_ON(!pool);
 
-	mutex_lock(&pool->mutex);
+	*from_pool = true;
+
+	if (mutex_trylock(&pool->mutex)) {
 	if (pool->high_count)
 		page = ion_page_pool_remove(pool, true);
 	else if (pool->low_count)
 		page = ion_page_pool_remove(pool, false);
 	mutex_unlock(&pool->mutex);
-
-	if (!page)
+	}
+	if (!page) {
 		page = ion_page_pool_alloc_pages(pool);
+		*from_pool = false;
+	}
+	return page;
+}
+
+/*
+ * Tries to allocate from only the specified Pool and returns NULL otherwise
+ */
+void *ion_page_pool_alloc_pool_only(struct ion_page_pool *pool)
+{
+	struct page *page = NULL;
+
+	BUG_ON(!pool);
+
+	if (mutex_trylock(&pool->mutex)) {
+		if (pool->high_count)
+			page = ion_page_pool_remove(pool, true);
+		else if (pool->low_count)
+			page = ion_page_pool_remove(pool, false);
+		mutex_unlock(&pool->mutex);
+	}
 
 	return page;
 }
@@ -96,14 +137,17 @@
 {
 	int ret;
 
-	BUG_ON(pool->order != compound_order(page));
-
 	ret = ion_page_pool_add(pool, page);
 	if (ret)
 		ion_page_pool_free_pages(pool, page);
 }
 
-static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
+void ion_page_pool_free_immediate(struct ion_page_pool *pool, struct page *page)
+{
+	ion_page_pool_free_pages(pool, page);
+}
+
+int ion_page_pool_total(struct ion_page_pool *pool, bool high)
 {
 	int count = pool->low_count;
 
@@ -147,17 +191,19 @@
 	return freed;
 }
 
-struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
+struct ion_page_pool *ion_page_pool_create(struct device *dev, gfp_t gfp_mask,
+					   unsigned int order)
 {
 	struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
 					     GFP_KERNEL);
 	if (!pool)
 		return NULL;
+	pool->dev = dev;
 	pool->high_count = 0;
 	pool->low_count = 0;
 	INIT_LIST_HEAD(&pool->low_items);
 	INIT_LIST_HEAD(&pool->high_items);
-	pool->gfp_mask = gfp_mask | __GFP_COMP;
+	pool->gfp_mask = gfp_mask;
 	pool->order = order;
 	mutex_init(&pool->mutex);
 	plist_node_init(&pool->list, order);
diff -ruw linux-4.4.115/drivers/staging/android/ion/ion_priv.h linux-4.4.115-fbx/drivers/staging/android/ion/ion_priv.h
--- linux-4.4.115/drivers/staging/android/ion/ion_priv.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/android/ion/ion_priv.h	2019-10-29 09:26:24.845214941 +0100
@@ -2,6 +2,7 @@
  * drivers/staging/android/ion/ion_priv.h
  *
  * Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -23,9 +24,16 @@
 #include <linux/mm_types.h>
 #include <linux/mutex.h>
 #include <linux/rbtree.h>
+#include <linux/seq_file.h>
+
+#include "msm_ion_priv.h"
 #include <linux/sched.h>
 #include <linux/shrinker.h>
 #include <linux/types.h>
+#ifdef CONFIG_ION_POOL_CACHE_POLICY
+#include <asm/cacheflush.h>
+#endif
+#include <linux/device.h>
 
 #include "ion.h"
 
@@ -46,9 +54,12 @@
  *			an ion_phys_addr_t (and someday a phys_addr_t)
  * @lock:		protects the buffers cnt fields
  * @kmap_cnt:		number of times the buffer is mapped to the kernel
- * @vaddr:		the kernel mapping if kmap_cnt is not zero
- * @dmap_cnt:		number of times the buffer is mapped for dma
- * @sg_table:		the sg table for the buffer if dmap_cnt is not zero
+ * @vaddr:		the kenrel mapping if kmap_cnt is not zero
+ * @sg_table:		the sg table for the buffer.  Note that if you need
+ *			an sg_table for this buffer, you should likely be
+ *			using Ion as a DMA Buf exporter and using
+ *			dma_buf_map_attachment rather than trying to use this
+ *			field directly.
  * @pages:		flat array of pages in the buffer -- used by fault
  *			handler and only valid for buffers that are faulted in
  * @vmas:		list of vma's mapping this buffer
@@ -76,7 +87,6 @@
 	struct mutex lock;
 	int kmap_cnt;
 	void *vaddr;
-	int dmap_cnt;
 	struct sg_table *sg_table;
 	struct page **pages;
 	struct list_head vmas;
@@ -90,7 +100,11 @@
 /**
  * struct ion_heap_ops - ops to operate on a given heap
  * @allocate:		allocate memory
- * @free:		free memory
+ * @free:		free memory. Will be called with
+ *			ION_PRIV_FLAG_SHRINKER_FREE set in buffer flags when
+ *			called from a shrinker. In that case, the pages being
+ *			free'd must be truly free'd back to the system, not put
+ *			in a page pool or otherwise cached.
  * @phys		get physical address of a buffer (only define on
  *			physically contiguous heaps)
  * @map_dma		map the memory for dma to a scatterlist
@@ -98,6 +112,7 @@
  * @map_kernel		map memory to the kernel
  * @unmap_kernel	unmap memory to the kernel
  * @map_user		map memory to userspace
+ * @unmap_user		unmap memory to userspace
  *
  * allocate, phys, and map_user return 0 on success, -errno on error.
  * map_dma and map_kernel return pointer on success, ERR_PTR on
@@ -121,6 +136,9 @@
 	int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
 			struct vm_area_struct *vma);
 	int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
+	void (*unmap_user) (struct ion_heap *mapper, struct ion_buffer *buffer);
+	int (*print_debug)(struct ion_heap *heap, struct seq_file *s,
+			   const struct list_head *mem_map);
 };
 
 /**
@@ -151,6 +169,7 @@
  *			MUST be unique
  * @name:		used for debugging
  * @shrinker:		a shrinker for the heap
+ * @priv:		private heap data
  * @free_list:		free list head if deferred free is used
  * @free_list_size	size of the deferred free list in bytes
  * @lock:		protects the free list
@@ -173,6 +192,7 @@
 	unsigned int id;
 	const char *name;
 	struct shrinker shrinker;
+	void *priv;
 	struct list_head free_list;
 	size_t free_list_size;
 	spinlock_t free_lock;
@@ -180,6 +200,8 @@
 	struct task_struct *task;
 
 	int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
+	atomic_long_t total_allocated;
+	atomic_long_t total_handles;
 };
 
 /**
@@ -223,6 +245,12 @@
  */
 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
 
+struct pages_mem {
+	struct page **pages;
+	u32 size;
+	void (*free_fn) (const void *);
+};
+
 /**
  * some helpers for common operations on buffers using the sg_table
  * and vaddr fields
@@ -234,6 +262,32 @@
 int ion_heap_buffer_zero(struct ion_buffer *buffer);
 int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
 
+int msm_ion_heap_high_order_page_zero(struct device *dev, struct page *page,
+				      int order);
+struct ion_heap *get_ion_heap(int heap_id);
+int msm_ion_heap_sg_table_zero(struct device *dev, struct sg_table *,
+			       size_t size);
+int msm_ion_heap_pages_zero(struct page **pages, int num_pages);
+int msm_ion_heap_alloc_pages_mem(struct pages_mem *pages_mem);
+void msm_ion_heap_free_pages_mem(struct pages_mem *pages_mem);
+
+/**
+ * Functions to help assign/unassign sg_table for System Secure Heap
+ */
+
+int ion_system_secure_heap_unassign_sg(struct sg_table *sgt, int source_vmid);
+int ion_system_secure_heap_assign_sg(struct sg_table *sgt, int dest_vmid);
+
+/**
+ * ion_heap_init_shrinker
+ * @heap:		the heap
+ *
+ * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
+ * this function will be called to setup a shrinker to shrink the freelists
+ * and call the heap's shrink op.
+ */
+void ion_heap_init_shrinker(struct ion_heap *heap);
+
 /**
  * ion_heap_init_shrinker
  * @heap:		the heap
@@ -276,7 +330,7 @@
 size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
 
 /**
- * ion_heap_freelist_shrink - drain the deferred free
+ * ion_heap_freelist_drain_from_shrinker - drain the deferred free
  *				list, skipping any heap-specific
  *				pooling or caching mechanisms
  *
@@ -292,10 +346,10 @@
  * page pools or otherwise cache the pages. Everything must be
  * genuinely free'd back to the system. If you're free'ing from a
  * shrinker you probably want to use this. Note that this relies on
- * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
- * flag.
+ * the heap.ops.free callback honoring the
+ * ION_PRIV_FLAG_SHRINKER_FREE flag.
  */
-size_t ion_heap_freelist_shrink(struct ion_heap *heap,
+size_t ion_heap_freelist_drain_from_shrinker(struct ion_heap *heap,
 					size_t size);
 
 /**
@@ -324,8 +378,16 @@
 
 struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *);
 void ion_chunk_heap_destroy(struct ion_heap *);
+#ifdef CONFIG_CMA
 struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
 void ion_cma_heap_destroy(struct ion_heap *);
+#else
+static inline struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *h)
+{
+	return NULL;
+}
+static inline void ion_cma_heap_destroy(struct ion_heap *h) {}
+#endif
 
 /**
  * kernel api to allocate/free from carveout -- used when carveout is
@@ -372,15 +434,51 @@
 	struct list_head high_items;
 	struct list_head low_items;
 	struct mutex mutex;
+	struct device *dev;
 	gfp_t gfp_mask;
 	unsigned int order;
 	struct plist_node list;
 };
 
-struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
+struct ion_page_pool *ion_page_pool_create(struct device *dev, gfp_t gfp_mask,
+					   unsigned int order);
 void ion_page_pool_destroy(struct ion_page_pool *);
-struct page *ion_page_pool_alloc(struct ion_page_pool *);
+void *ion_page_pool_alloc(struct ion_page_pool *, bool *from_pool);
+void *ion_page_pool_alloc_pool_only(struct ion_page_pool *);
 void ion_page_pool_free(struct ion_page_pool *, struct page *);
+void ion_page_pool_free_immediate(struct ion_page_pool *, struct page *);
+int ion_page_pool_total(struct ion_page_pool *pool, bool high);
+size_t ion_system_heap_secure_page_pool_total(struct ion_heap *heap, int vmid);
+
+#ifdef CONFIG_ION_POOL_CACHE_POLICY
+static inline void ion_page_pool_alloc_set_cache_policy
+				(struct ion_page_pool *pool,
+				struct page *page){
+	void *va = page_address(page);
+
+	if (va)
+		set_memory_wc((unsigned long)va, 1 << pool->order);
+}
+
+static inline void ion_page_pool_free_set_cache_policy
+				(struct ion_page_pool *pool,
+				struct page *page){
+	void *va = page_address(page);
+
+	if (va)
+		set_memory_wb((unsigned long)va, 1 << pool->order);
+
+}
+#else
+static inline void ion_page_pool_alloc_set_cache_policy
+				(struct ion_page_pool *pool,
+				struct page *page){ }
+
+static inline void ion_page_pool_free_set_cache_policy
+				(struct ion_page_pool *pool,
+				struct page *page){ }
+#endif
+
 
 /** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
  * @pool:		the pool
@@ -403,4 +501,13 @@
 void ion_pages_sync_for_device(struct device *dev, struct page *page,
 		size_t size, enum dma_data_direction dir);
 
+int ion_walk_heaps(struct ion_client *client, int heap_id,
+			enum ion_heap_type type, void *data,
+			int (*f)(struct ion_heap *heap, void *data));
+
+struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
+					int id);
+
+int ion_handle_put(struct ion_handle *handle);
+
 #endif /* _ION_PRIV_H */
diff -ruw linux-4.4.115/drivers/staging/android/ion/ion_system_heap.c linux-4.4.115-fbx/drivers/staging/android/ion/ion_system_heap.c
--- linux-4.4.115/drivers/staging/android/ion/ion_system_heap.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/android/ion/ion_system_heap.c	2019-10-29 09:26:24.845214941 +0100
@@ -2,6 +2,7 @@
  * drivers/staging/android/ion/ion_system_heap.c
  *
  * Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -19,22 +20,32 @@
 #include <linux/err.h>
 #include <linux/highmem.h>
 #include <linux/mm.h>
+#include <linux/msm_ion.h>
 #include <linux/scatterlist.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include "ion.h"
 #include "ion_priv.h"
+#include <linux/dma-mapping.h>
+#include <trace/events/kmem.h>
+#include <soc/qcom/secure_buffer.h>
+
+static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_NOWARN |
+				     __GFP_NORETRY)
+				     & ~__GFP_RECLAIM;
+static gfp_t low_order_gfp_flags  = (GFP_HIGHUSER | __GFP_NOWARN);
+
+#ifndef CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS
+static const unsigned int orders[] = {9, 8, 4, 0};
+#else
+static const unsigned int orders[] = {0};
+#endif
 
-static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
-				     __GFP_NORETRY) & ~__GFP_DIRECT_RECLAIM;
-static gfp_t low_order_gfp_flags  = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
-static const unsigned int orders[] = {8, 4, 0};
 static const int num_orders = ARRAY_SIZE(orders);
 static int order_to_index(unsigned int order)
 {
 	int i;
-
 	for (i = 0; i < num_orders; i++)
 		if (order == orders[i])
 			return i;
@@ -42,81 +53,284 @@
 	return -1;
 }
 
-static inline unsigned int order_to_size(int order)
+static unsigned int order_to_size(int order)
 {
 	return PAGE_SIZE << order;
 }
 
 struct ion_system_heap {
 	struct ion_heap heap;
-	struct ion_page_pool *pools[0];
+	struct ion_page_pool **uncached_pools;
+	struct ion_page_pool **cached_pools;
+	struct ion_page_pool **secure_pools[VMID_LAST];
+	/* Prevents unnecessary page splitting */
+	struct mutex split_page_mutex;
+};
+
+struct page_info {
+	struct page *page;
+	bool from_pool;
+	unsigned int order;
+	struct list_head list;
 };
 
+/*
+ * Used by ion_system_secure_heap only
+ * Since no lock is held, results are approximate.
+ */
+size_t ion_system_heap_secure_page_pool_total(struct ion_heap *heap,
+					      int vmid_flags)
+{
+	struct ion_system_heap *sys_heap;
+	struct ion_page_pool *pool;
+	size_t total = 0;
+	int vmid, i;
+
+	sys_heap = container_of(heap, struct ion_system_heap, heap);
+	vmid = get_secure_vmid(vmid_flags);
+	if (!is_secure_vmid_valid(vmid))
+		return 0;
+
+	for (i = 0; i < num_orders; i++) {
+		pool = sys_heap->secure_pools[vmid][i];
+		total += ion_page_pool_total(pool, true);
+	}
+
+	return total << PAGE_SHIFT;
+}
+
 static struct page *alloc_buffer_page(struct ion_system_heap *heap,
 				      struct ion_buffer *buffer,
-				      unsigned long order)
+				      unsigned long order,
+				      bool *from_pool)
 {
 	bool cached = ion_buffer_cached(buffer);
-	struct ion_page_pool *pool = heap->pools[order_to_index(order)];
 	struct page *page;
+	struct ion_page_pool *pool;
+	int vmid = get_secure_vmid(buffer->flags);
+	struct device *dev = heap->heap.priv;
 
-	if (!cached) {
-		page = ion_page_pool_alloc(pool);
-	} else {
-		gfp_t gfp_flags = low_order_gfp_flags;
+	if (*from_pool) {
+		if (vmid > 0)
+			pool = heap->secure_pools[vmid][order_to_index(order)];
+		else if (!cached)
+			pool = heap->uncached_pools[order_to_index(order)];
+		else
+			pool = heap->cached_pools[order_to_index(order)];
 
-		if (order > 4)
-			gfp_flags = high_order_gfp_flags;
-		page = alloc_pages(gfp_flags | __GFP_COMP, order);
-		if (!page)
-			return NULL;
-		ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
+		page = ion_page_pool_alloc(pool, from_pool);
+	} else {
+		gfp_t gfp_mask = low_order_gfp_flags;
+		if (order)
+			gfp_mask = high_order_gfp_flags;
+
+		page = alloc_pages(gfp_mask, order);
+		if (page)
+			ion_pages_sync_for_device(dev, page, PAGE_SIZE << order,
 						DMA_BIDIRECTIONAL);
 	}
+	if (!page)
+		return 0;
 
 	return page;
 }
 
+/*
+ * For secure pages that need to be freed and not added back to the pool; the
+ *  hyp_unassign should be called before calling this function
+ */
 static void free_buffer_page(struct ion_system_heap *heap,
-			     struct ion_buffer *buffer, struct page *page)
+			     struct ion_buffer *buffer, struct page *page,
+			     unsigned int order)
 {
-	unsigned int order = compound_order(page);
 	bool cached = ion_buffer_cached(buffer);
+	int vmid = get_secure_vmid(buffer->flags);
 
-	if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) {
-		struct ion_page_pool *pool = heap->pools[order_to_index(order)];
-
+	if (!(buffer->flags & ION_FLAG_POOL_FORCE_ALLOC)) {
+		struct ion_page_pool *pool;
+		if (vmid > 0)
+			pool = heap->secure_pools[vmid][order_to_index(order)];
+		else if (cached)
+			pool = heap->cached_pools[order_to_index(order)];
+		else
+			pool = heap->uncached_pools[order_to_index(order)];
+
+		if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)
+			ion_page_pool_free_immediate(pool, page);
+		else
 		ion_page_pool_free(pool, page);
 	} else {
 		__free_pages(page, order);
 	}
 }
 
+static struct page *alloc_from_secure_pool_order(struct ion_system_heap *heap,
+						 struct ion_buffer *buffer,
+						 unsigned long order)
+{
+	int vmid = get_secure_vmid(buffer->flags);
+	struct ion_page_pool *pool;
 
-static struct page *alloc_largest_available(struct ion_system_heap *heap,
+	if (!is_secure_vmid_valid(vmid))
+		return NULL;
+
+	pool = heap->secure_pools[vmid][order_to_index(order)];
+	return ion_page_pool_alloc_pool_only(pool);
+}
+
+static struct page *split_page_from_secure_pool(struct ion_system_heap *heap,
+						struct ion_buffer *buffer)
+{
+	int i, j;
+	struct page *page;
+	unsigned int order;
+
+	mutex_lock(&heap->split_page_mutex);
+
+	/*
+	 * Someone may have just split a page and returned the unused portion
+	 * back to the pool, so try allocating from the pool one more time
+	 * before splitting. We want to maintain large pages sizes when
+	 * possible.
+	 */
+	page = alloc_from_secure_pool_order(heap, buffer, 0);
+	if (page)
+		goto got_page;
+
+	for (i = num_orders - 2; i >= 0; i--) {
+		order = orders[i];
+		page = alloc_from_secure_pool_order(heap, buffer, order);
+		if (!page)
+			continue;
+
+		split_page(page, order);
+		break;
+	}
+	/*
+	 * Return the remaining order-0 pages to the pool.
+	 * SetPagePrivate flag to mark memory as secure.
+	 */
+	if (page) {
+		for (j = 1; j < (1 << order); j++) {
+			SetPagePrivate(page + j);
+			free_buffer_page(heap, buffer, page + j, 0);
+		}
+	}
+got_page:
+	mutex_unlock(&heap->split_page_mutex);
+
+	return page;
+}
+
+static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
 					    struct ion_buffer *buffer,
 					    unsigned long size,
 					    unsigned int max_order)
 {
 	struct page *page;
+	struct page_info *info;
 	int i;
+	bool from_pool;
+
+	info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
+	if (!info)
+		return NULL;
 
 	for (i = 0; i < num_orders; i++) {
 		if (size < order_to_size(orders[i]))
 			continue;
 		if (max_order < orders[i])
 			continue;
-
-		page = alloc_buffer_page(heap, buffer, orders[i]);
+		from_pool = !(buffer->flags & ION_FLAG_POOL_FORCE_ALLOC);
+		page = alloc_buffer_page(heap, buffer, orders[i], &from_pool);
 		if (!page)
 			continue;
 
-		return page;
+		info->page = page;
+		info->order = orders[i];
+		info->from_pool = from_pool;
+		INIT_LIST_HEAD(&info->list);
+		return info;
 	}
+	kfree(info);
 
 	return NULL;
 }
 
+static struct page_info *alloc_from_pool_preferred(
+		struct ion_system_heap *heap, struct ion_buffer *buffer,
+		unsigned long size, unsigned int max_order)
+{
+	struct page *page;
+	struct page_info *info;
+	int i;
+
+	if (buffer->flags & ION_FLAG_POOL_FORCE_ALLOC)
+		goto force_alloc;
+
+	info = kmalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return NULL;
+
+	for (i = 0; i < num_orders; i++) {
+		if (size < order_to_size(orders[i]))
+			continue;
+		if (max_order < orders[i])
+			continue;
+
+		page = alloc_from_secure_pool_order(heap, buffer, orders[i]);
+		if (!page)
+			continue;
+
+		info->page = page;
+		info->order = orders[i];
+		info->from_pool = true;
+		INIT_LIST_HEAD(&info->list);
+		return info;
+	}
+
+	page = split_page_from_secure_pool(heap, buffer);
+	if (page) {
+		info->page = page;
+		info->order = 0;
+		info->from_pool = true;
+		INIT_LIST_HEAD(&info->list);
+		return info;
+	}
+
+	kfree(info);
+force_alloc:
+	return alloc_largest_available(heap, buffer, size, max_order);
+}
+
+static unsigned int process_info(struct page_info *info,
+				 struct scatterlist *sg,
+				 struct scatterlist *sg_sync,
+				 struct pages_mem *data, unsigned int i)
+{
+	struct page *page = info->page;
+	unsigned int j;
+
+	if (sg_sync) {
+		sg_set_page(sg_sync, page, (1 << info->order) * PAGE_SIZE, 0);
+		sg_dma_address(sg_sync) = page_to_phys(page);
+	}
+	sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 0);
+	/*
+	 * This is not correct - sg_dma_address needs a dma_addr_t
+	 * that is valid for the the targeted device, but this works
+	 * on the currently targeted hardware.
+	 */
+	sg_dma_address(sg) = page_to_phys(page);
+	if (data) {
+		for (j = 0; j < (1 << info->order); ++j)
+			data->pages[i++] = nth_page(page, j);
+	}
+	list_del(&info->list);
+	kfree(info);
+	return i;
+}
+
 static int ion_system_heap_allocate(struct ion_heap *heap,
 				     struct ion_buffer *buffer,
 				     unsigned long size, unsigned long align,
@@ -126,12 +340,21 @@
 							struct ion_system_heap,
 							heap);
 	struct sg_table *table;
+	struct sg_table table_sync;
 	struct scatterlist *sg;
+	struct scatterlist *sg_sync;
+	int ret;
 	struct list_head pages;
-	struct page *page, *tmp_page;
+	struct list_head pages_from_pool;
+	struct page_info *info, *tmp_info;
 	int i = 0;
+	unsigned int nents_sync = 0;
 	unsigned long size_remaining = PAGE_ALIGN(size);
 	unsigned int max_order = orders[0];
+	struct pages_mem data;
+	unsigned int sz;
+	int vmid = get_secure_vmid(buffer->flags);
+	struct device *dev = heap->priv;
 
 	if (align > PAGE_SIZE)
 		return -EINVAL;
@@ -139,84 +362,253 @@
 	if (size / PAGE_SIZE > totalram_pages / 2)
 		return -ENOMEM;
 
+	data.size = 0;
 	INIT_LIST_HEAD(&pages);
+	INIT_LIST_HEAD(&pages_from_pool);
+
 	while (size_remaining > 0) {
-		page = alloc_largest_available(sys_heap, buffer, size_remaining,
+		if (is_secure_vmid_valid(vmid))
+			info = alloc_from_pool_preferred(
+					sys_heap, buffer, size_remaining,
 						max_order);
-		if (!page)
-			goto free_pages;
-		list_add_tail(&page->lru, &pages);
-		size_remaining -= PAGE_SIZE << compound_order(page);
-		max_order = compound_order(page);
+		else
+			info = alloc_largest_available(
+					sys_heap, buffer, size_remaining,
+					max_order);
+
+		if (!info)
+			goto err;
+
+		sz = (1 << info->order) * PAGE_SIZE;
+
+		if (info->from_pool) {
+			list_add_tail(&info->list, &pages_from_pool);
+		} else {
+			list_add_tail(&info->list, &pages);
+			data.size += sz;
+			++nents_sync;
+		}
+		size_remaining -= sz;
+		max_order = info->order;
 		i++;
 	}
-	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+
+	ret = msm_ion_heap_alloc_pages_mem(&data);
+
+	if (ret)
+		goto err;
+
+	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
 	if (!table)
-		goto free_pages;
+		goto err_free_data_pages;
 
-	if (sg_alloc_table(table, i, GFP_KERNEL))
-		goto free_table;
+	ret = sg_alloc_table(table, i, GFP_KERNEL);
+	if (ret)
+		goto err1;
 
+	if (nents_sync) {
+		ret = sg_alloc_table(&table_sync, nents_sync, GFP_KERNEL);
+		if (ret)
+			goto err_free_sg;
+	}
+
+	i = 0;
 	sg = table->sgl;
-	list_for_each_entry_safe(page, tmp_page, &pages, lru) {
-		sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
+	sg_sync = table_sync.sgl;
+
+	/*
+	 * We now have two separate lists. One list contains pages from the
+	 * pool and the other pages from buddy. We want to merge these
+	 * together while preserving the ordering of the pages (higher order
+	 * first).
+	 */
+	do {
+		info = list_first_entry_or_null(&pages, struct page_info, list);
+		tmp_info = list_first_entry_or_null(&pages_from_pool,
+							struct page_info, list);
+		if (info && tmp_info) {
+			if (info->order >= tmp_info->order) {
+				i = process_info(info, sg, sg_sync, &data, i);
+				sg_sync = sg_next(sg_sync);
+			} else {
+				i = process_info(tmp_info, sg, 0, 0, i);
+			}
+		} else if (info) {
+			i = process_info(info, sg, sg_sync, &data, i);
+			sg_sync = sg_next(sg_sync);
+		} else if (tmp_info) {
+			i = process_info(tmp_info, sg, 0, 0, i);
+		} else {
+			BUG();
+		}
 		sg = sg_next(sg);
-		list_del(&page->lru);
+
+	} while (sg);
+
+	ret = msm_ion_heap_pages_zero(data.pages, data.size >> PAGE_SHIFT);
+	if (ret) {
+		pr_err("Unable to zero pages\n");
+		goto err_free_sg2;
+	}
+
+	if (nents_sync) {
+		dma_sync_sg_for_device(dev, table_sync.sgl, table_sync.nents,
+				       DMA_BIDIRECTIONAL);
+		if (vmid > 0) {
+			ret = ion_system_secure_heap_assign_sg(&table_sync,
+							       vmid);
+			if (ret)
+				goto err_free_sg2;
+		}
 	}
 
 	buffer->priv_virt = table;
+	if (nents_sync)
+		sg_free_table(&table_sync);
+	msm_ion_heap_free_pages_mem(&data);
 	return 0;
 
-free_table:
+err_free_sg2:
+	/* We failed to zero buffers. Bypass pool */
+	buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
+
+	if (vmid > 0)
+		ion_system_secure_heap_unassign_sg(table, vmid);
+
+	for_each_sg(table->sgl, sg, table->nents, i)
+		free_buffer_page(sys_heap, buffer, sg_page(sg),
+				get_order(sg->length));
+	if (nents_sync)
+		sg_free_table(&table_sync);
+err_free_sg:
+	sg_free_table(table);
+err1:
 	kfree(table);
-free_pages:
-	list_for_each_entry_safe(page, tmp_page, &pages, lru)
-		free_buffer_page(sys_heap, buffer, page);
+err_free_data_pages:
+	msm_ion_heap_free_pages_mem(&data);
+err:
+	list_for_each_entry_safe(info, tmp_info, &pages, list) {
+		free_buffer_page(sys_heap, buffer, info->page, info->order);
+		kfree(info);
+	}
+	list_for_each_entry_safe(info, tmp_info, &pages_from_pool, list) {
+		free_buffer_page(sys_heap, buffer, info->page, info->order);
+		kfree(info);
+	}
 	return -ENOMEM;
 }
 
-static void ion_system_heap_free(struct ion_buffer *buffer)
+void ion_system_heap_free(struct ion_buffer *buffer)
 {
-	struct ion_system_heap *sys_heap = container_of(buffer->heap,
+	struct ion_heap *heap = buffer->heap;
+	struct ion_system_heap *sys_heap = container_of(heap,
 							struct ion_system_heap,
 							heap);
-	struct sg_table *table = buffer->sg_table;
-	bool cached = ion_buffer_cached(buffer);
+	struct sg_table *table = buffer->priv_virt;
 	struct scatterlist *sg;
+	LIST_HEAD(pages);
 	int i;
+	int vmid = get_secure_vmid(buffer->flags);
+	struct device *dev = heap->priv;
 
-	/*
-	 *  uncached pages come from the page pools, zero them before returning
-	 *  for security purposes (other allocations are zerod at
-	 *  alloc time
-	 */
-	if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
-		ion_heap_buffer_zero(buffer);
+	if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) &&
+	    !(buffer->flags & ION_FLAG_POOL_FORCE_ALLOC)) {
+		if (vmid < 0)
+			msm_ion_heap_sg_table_zero(dev, table, buffer->size);
+	} else if (vmid > 0) {
+		if (ion_system_secure_heap_unassign_sg(table, vmid))
+			return;
+	}
 
 	for_each_sg(table->sgl, sg, table->nents, i)
-		free_buffer_page(sys_heap, buffer, sg_page(sg));
+		free_buffer_page(sys_heap, buffer, sg_page(sg),
+				get_order(sg->length));
 	sg_free_table(table);
 	kfree(table);
 }
 
-static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
+struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
 						struct ion_buffer *buffer)
 {
 	return buffer->priv_virt;
 }
 
-static void ion_system_heap_unmap_dma(struct ion_heap *heap,
+void ion_system_heap_unmap_dma(struct ion_heap *heap,
 				      struct ion_buffer *buffer)
 {
 }
 
+static int ion_secure_page_pool_shrink(
+		struct ion_system_heap *sys_heap,
+		int vmid, int order_idx, int nr_to_scan)
+{
+	int ret, freed = 0;
+	int order = orders[order_idx];
+	struct page *page, *tmp;
+	struct sg_table sgt;
+	struct scatterlist *sg;
+	struct ion_page_pool *pool = sys_heap->secure_pools[vmid][order_idx];
+	LIST_HEAD(pages);
+
+	if (nr_to_scan == 0)
+		return ion_page_pool_total(pool, true);
+
+	while (freed < nr_to_scan) {
+		page = ion_page_pool_alloc_pool_only(pool);
+		if (!page)
+			break;
+		list_add(&page->lru, &pages);
+		freed += (1 << order);
+	}
+
+	if (!freed)
+		return freed;
+
+	ret = sg_alloc_table(&sgt, (freed >> order), GFP_KERNEL);
+	if (ret)
+		goto out1;
+	sg = sgt.sgl;
+	list_for_each_entry(page, &pages, lru) {
+		sg_set_page(sg, page, (1 << order) * PAGE_SIZE, 0);
+		sg_dma_address(sg) = page_to_phys(page);
+		sg = sg_next(sg);
+	}
+
+	if (ion_system_secure_heap_unassign_sg(&sgt, vmid))
+		goto out2;
+
+	list_for_each_entry_safe(page, tmp, &pages, lru) {
+		list_del(&page->lru);
+		ion_page_pool_free_immediate(pool, page);
+	}
+
+	sg_free_table(&sgt);
+	return freed;
+
+out1:
+	/* Restore pages to secure pool */
+	list_for_each_entry_safe(page, tmp, &pages, lru) {
+		list_del(&page->lru);
+		ion_page_pool_free(pool, page);
+	}
+	return 0;
+out2:
+	/*
+	 * The security state of the pages is unknown after a failure;
+	 * They can neither be added back to the secure pool nor buddy system.
+	 */
+	sg_free_table(&sgt);
+	return 0;
+}
+
 static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
 					int nr_to_scan)
 {
 	struct ion_system_heap *sys_heap;
 	int nr_total = 0;
-	int i, nr_freed;
+	int i, j, nr_freed = 0;
 	int only_scan = 0;
+	struct ion_page_pool *pool;
 
 	sys_heap = container_of(heap, struct ion_system_heap, heap);
 
@@ -224,9 +616,19 @@
 		only_scan = 1;
 
 	for (i = 0; i < num_orders; i++) {
-		struct ion_page_pool *pool = sys_heap->pools[i];
+		nr_freed = 0;
+
+		for (j = 0; j < VMID_LAST; j++) {
+			if (is_secure_vmid_valid(j))
+				nr_freed += ion_secure_page_pool_shrink(
+						sys_heap, j, i, nr_to_scan);
+		}
+
+		pool = sys_heap->uncached_pools[i];
+		nr_freed += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
 
-		nr_freed = ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
+		pool = sys_heap->cached_pools[i];
+		nr_freed += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
 		nr_total += nr_freed;
 
 		if (!only_scan) {
@@ -258,53 +660,196 @@
 	struct ion_system_heap *sys_heap = container_of(heap,
 							struct ion_system_heap,
 							heap);
-	int i;
+	bool use_seq = s != NULL;
+	unsigned long uncached_total = 0;
+	unsigned long cached_total = 0;
+	unsigned long secure_total = 0;
+	struct ion_page_pool *pool;
+	int i, j;
 
 	for (i = 0; i < num_orders; i++) {
-		struct ion_page_pool *pool = sys_heap->pools[i];
+		pool = sys_heap->uncached_pools[i];
+		if (use_seq) {
+			seq_printf(s,
+				"%d order %u highmem pages in uncached pool = %lu total\n",
+				pool->high_count, pool->order,
+				(1 << pool->order) * PAGE_SIZE *
+					pool->high_count);
+			seq_printf(s,
+				"%d order %u lowmem pages in uncached pool = %lu total\n",
+				pool->low_count, pool->order,
+				(1 << pool->order) * PAGE_SIZE *
+					pool->low_count);
+		}
+
+		uncached_total += (1 << pool->order) * PAGE_SIZE *
+			pool->high_count;
+		uncached_total += (1 << pool->order) * PAGE_SIZE *
+			pool->low_count;
+	}
 
-		seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
+	for (i = 0; i < num_orders; i++) {
+		pool = sys_heap->cached_pools[i];
+		if (use_seq) {
+			seq_printf(s,
+				"%d order %u highmem pages in cached pool = %lu total\n",
 			   pool->high_count, pool->order,
-			   (PAGE_SIZE << pool->order) * pool->high_count);
-		seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
+				(1 << pool->order) * PAGE_SIZE * pool->high_count);
+			seq_printf(s,
+				"%d order %u lowmem pages in cached pool = %lu total\n",
 			   pool->low_count, pool->order,
-			   (PAGE_SIZE << pool->order) * pool->low_count);
+				(1 << pool->order) * PAGE_SIZE *
+					pool->low_count);
+		}
+
+		cached_total += (1 << pool->order) * PAGE_SIZE *
+			pool->high_count;
+		cached_total += (1 << pool->order) * PAGE_SIZE *
+			pool->low_count;
+	}
+
+	for (i = 0; i < num_orders; i++) {
+		for (j = 0; j < VMID_LAST; j++) {
+			if (!is_secure_vmid_valid(j))
+				continue;
+			pool = sys_heap->secure_pools[j][i];
+
+			if (use_seq) {
+				seq_printf(s,
+					   "VMID %d: %d order %u highmem pages in secure pool = %lu total\n",
+					   j, pool->high_count, pool->order,
+					   (1 << pool->order) * PAGE_SIZE *
+						pool->high_count);
+				seq_printf(s,
+					   "VMID  %d: %d order %u lowmem pages in secure pool = %lu total\n",
+					   j, pool->low_count, pool->order,
+					   (1 << pool->order) * PAGE_SIZE *
+						pool->low_count);
+			}
+
+			secure_total += (1 << pool->order) * PAGE_SIZE *
+					 pool->high_count;
+			secure_total += (1 << pool->order) * PAGE_SIZE *
+					 pool->low_count;
+		}
+	}
+
+	if (use_seq) {
+		seq_puts(s, "--------------------------------------------\n");
+		seq_printf(s, "uncached pool = %lu cached pool = %lu secure pool = %lu\n",
+			   uncached_total, cached_total, secure_total);
+		seq_printf(s, "pool total (uncached + cached + secure) = %lu\n",
+			   uncached_total + cached_total + secure_total);
+		seq_puts(s, "--------------------------------------------\n");
+	} else {
+		pr_info("-------------------------------------------------\n");
+		pr_info("uncached pool = %lu cached pool = %lu secure pool = %lu\n",
+			uncached_total, cached_total, secure_total);
+		pr_info("pool total (uncached + cached + secure) = %lu\n",
+			uncached_total + cached_total + secure_total);
+		pr_info("-------------------------------------------------\n");
+	}
+
+	return 0;
+}
+
+
+static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
+{
+	int i;
+	for (i = 0; i < num_orders; i++)
+		if (pools[i]) {
+			ion_page_pool_destroy(pools[i]);
+			pools[i] = NULL;
+		}
+}
+
+/**
+ * ion_system_heap_create_pools - Creates pools for all orders
+ *
+ * If this fails you don't need to destroy any pools. It's all or
+ * nothing. If it succeeds you'll eventually need to use
+ * ion_system_heap_destroy_pools to destroy the pools.
+ */
+static int ion_system_heap_create_pools(struct device *dev,
+					struct ion_page_pool **pools)
+{
+	int i;
+	for (i = 0; i < num_orders; i++) {
+		struct ion_page_pool *pool;
+		gfp_t gfp_flags = low_order_gfp_flags;
+
+		if (orders[i])
+			gfp_flags = high_order_gfp_flags;
+		pool = ion_page_pool_create(dev, gfp_flags, orders[i]);
+		if (!pool)
+			goto err_create_pool;
+		pools[i] = pool;
 	}
 	return 0;
+err_create_pool:
+	ion_system_heap_destroy_pools(pools);
+	return 1;
 }
 
-struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
+struct ion_heap *ion_system_heap_create(struct ion_platform_heap *data)
 {
 	struct ion_system_heap *heap;
 	int i;
+	int pools_size = sizeof(struct ion_page_pool *) * num_orders;
+	struct device *dev = data->priv;
 
-	heap = kzalloc(sizeof(struct ion_system_heap) +
-			sizeof(struct ion_page_pool *) * num_orders,
-			GFP_KERNEL);
+	heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
 	if (!heap)
 		return ERR_PTR(-ENOMEM);
 	heap->heap.ops = &system_heap_ops;
 	heap->heap.type = ION_HEAP_TYPE_SYSTEM;
 	heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
 
-	for (i = 0; i < num_orders; i++) {
-		struct ion_page_pool *pool;
-		gfp_t gfp_flags = low_order_gfp_flags;
+	heap->uncached_pools = kzalloc(pools_size, GFP_KERNEL);
+	if (!heap->uncached_pools)
+		goto err_alloc_uncached_pools;
 
-		if (orders[i] > 4)
-			gfp_flags = high_order_gfp_flags;
-		pool = ion_page_pool_create(gfp_flags, orders[i]);
-		if (!pool)
-			goto destroy_pools;
-		heap->pools[i] = pool;
+	heap->cached_pools = kzalloc(pools_size, GFP_KERNEL);
+	if (!heap->cached_pools)
+		goto err_alloc_cached_pools;
+
+	for (i = 0; i < VMID_LAST; i++) {
+		if (is_secure_vmid_valid(i)) {
+			heap->secure_pools[i] = kzalloc(pools_size, GFP_KERNEL);
+			if (!heap->secure_pools[i])
+				goto err_create_secure_pools;
+			if (ion_system_heap_create_pools(
+					dev, heap->secure_pools[i]))
+				goto err_create_secure_pools;
 	}
+	}
+
+	if (ion_system_heap_create_pools(dev, heap->uncached_pools))
+		goto err_create_uncached_pools;
+
+	if (ion_system_heap_create_pools(dev, heap->cached_pools))
+		goto err_create_cached_pools;
+
+	mutex_init(&heap->split_page_mutex);
 
 	heap->heap.debug_show = ion_system_heap_debug_show;
 	return &heap->heap;
 
-destroy_pools:
-	while (i--)
-		ion_page_pool_destroy(heap->pools[i]);
+err_create_cached_pools:
+	ion_system_heap_destroy_pools(heap->uncached_pools);
+err_create_uncached_pools:
+	kfree(heap->cached_pools);
+err_create_secure_pools:
+	for (i = 0; i < VMID_LAST; i++) {
+		if (heap->secure_pools[i]) {
+			ion_system_heap_destroy_pools(heap->secure_pools[i]);
+			kfree(heap->secure_pools[i]);
+		}
+	}
+err_alloc_cached_pools:
+	kfree(heap->uncached_pools);
+err_alloc_uncached_pools:
 	kfree(heap);
 	return ERR_PTR(-ENOMEM);
 }
@@ -314,10 +859,20 @@
 	struct ion_system_heap *sys_heap = container_of(heap,
 							struct ion_system_heap,
 							heap);
-	int i;
+	int i, j;
 
-	for (i = 0; i < num_orders; i++)
-		ion_page_pool_destroy(sys_heap->pools[i]);
+	for (i = 0; i < VMID_LAST; i++) {
+		if (!is_secure_vmid_valid(i))
+			continue;
+		for (j = 0; j < num_orders; j++)
+			ion_secure_page_pool_shrink(sys_heap, i, j, UINT_MAX);
+
+		ion_system_heap_destroy_pools(sys_heap->secure_pools[i]);
+	}
+	ion_system_heap_destroy_pools(sys_heap->uncached_pools);
+	ion_system_heap_destroy_pools(sys_heap->cached_pools);
+	kfree(sys_heap->uncached_pools);
+	kfree(sys_heap->cached_pools);
 	kfree(sys_heap);
 }
 
@@ -332,11 +887,12 @@
 	struct sg_table *table;
 	unsigned long i;
 	int ret;
+	struct device *dev = heap->priv;
 
 	if (align > (PAGE_SIZE << order))
 		return -EINVAL;
 
-	page = alloc_pages(low_order_gfp_flags, order);
+	page = alloc_pages(low_order_gfp_flags | __GFP_ZERO, order);
 	if (!page)
 		return -ENOMEM;
 
@@ -346,34 +902,32 @@
 	for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
 		__free_page(page + i);
 
-	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
 	if (!table) {
 		ret = -ENOMEM;
-		goto free_pages;
+		goto out;
 	}
 
 	ret = sg_alloc_table(table, 1, GFP_KERNEL);
 	if (ret)
-		goto free_table;
+		goto out;
 
 	sg_set_page(table->sgl, page, len, 0);
 
 	buffer->priv_virt = table;
 
-	ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
+	ion_pages_sync_for_device(dev, page, len, DMA_BIDIRECTIONAL);
 
 	return 0;
 
-free_table:
-	kfree(table);
-free_pages:
+out:
 	for (i = 0; i < len >> PAGE_SHIFT; i++)
 		__free_page(page + i);
-
+	kfree(table);
 	return ret;
 }
 
-static void ion_system_contig_heap_free(struct ion_buffer *buffer)
+void ion_system_contig_heap_free(struct ion_buffer *buffer)
 {
 	struct sg_table *table = buffer->priv_virt;
 	struct page *page = sg_page(table->sgl);
@@ -397,13 +951,13 @@
 	return 0;
 }
 
-static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
+struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
 						struct ion_buffer *buffer)
 {
 	return buffer->priv_virt;
 }
 
-static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
+void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
 					     struct ion_buffer *buffer)
 {
 }
diff -ruw linux-4.4.115/drivers/staging/android/ion/Kconfig linux-4.4.115-fbx/drivers/staging/android/ion/Kconfig
--- linux-4.4.115/drivers/staging/android/ion/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/android/ion/Kconfig	2019-01-22 16:16:26.711275457 +0100
@@ -33,3 +33,24 @@
 	help
 	  Choose this option if you wish to use ion on an nVidia Tegra.
 
+config ION_POOL_CACHE_POLICY
+	bool "Ion set page pool cache policy"
+	depends on ION && X86
+	default y if X86
+	help
+	  Choose this option if need to explicity set cache policy of the
+	  pages in the page pool.
+
+config ION_MSM
+	tristate "Ion for MSM"
+	depends on ARCH_QCOM && ION
+	select MSM_SECURE_BUFFER
+	help
+	  Choose this option if you wish to use ion on an MSM target.
+
+config ALLOC_BUFFERS_IN_4K_CHUNKS
+	bool "Turns off allocation optimization and allocate only 4K pages"
+	depends on ARCH_QCOM && ION
+	help
+          Choose this option if you want ION to allocate buffers in
+          only 4KB chunks.
diff -ruw linux-4.4.115/drivers/staging/android/ion/Makefile linux-4.4.115-fbx/drivers/staging/android/ion/Makefile
--- linux-4.4.115/drivers/staging/android/ion/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/android/ion/Makefile	2019-01-22 16:16:26.711275457 +0100
@@ -1,10 +1,13 @@
 obj-$(CONFIG_ION) +=	ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \
-			ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o
+			ion_carveout_heap.o ion_chunk_heap.o
+ifdef CONFIG_ION_MSM
+obj-$(CONFIG_CMA) += ion_cma_heap.o ion_cma_secure_heap.o
+endif
 obj-$(CONFIG_ION_TEST) += ion_test.o
 ifdef CONFIG_COMPAT
 obj-$(CONFIG_ION) += compat_ion.o
 endif
-
 obj-$(CONFIG_ION_DUMMY) += ion_dummy_driver.o
 obj-$(CONFIG_ION_TEGRA) += tegra/
-
+obj-$(CONFIG_ION_MSM) += msm/
+obj-$(CONFIG_ION_MSM) += ion_system_secure_heap.o
diff -ruw linux-4.4.115/drivers/staging/android/Kconfig linux-4.4.115-fbx/drivers/staging/android/Kconfig
--- linux-4.4.115/drivers/staging/android/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/android/Kconfig	2019-10-29 09:26:24.841214902 +0100
@@ -38,6 +38,15 @@
 	  scripts (/init.rc), and it defines priority values with minimum free memory size
 	  for each priority.
 
+config ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
+	bool "Android Low Memory Killer: detect oom_adj values"
+	depends on ANDROID_LOW_MEMORY_KILLER
+	default y
+	---help---
+	  Detect oom_adj values written to
+	  /sys/module/lowmemorykiller/parameters/adj and convert them
+	  to oom_score_adj values.
+
 config SYNC
 	bool "Synchronization framework"
 	default n
@@ -66,8 +75,25 @@
 	  *WARNING* improper use of this can result in deadlocking kernel
 	  drivers from userspace.
 
+config ONESHOT_SYNC
+	bool "One shot sync objects"
+	depends on SYNC
+	help
+	  This sync driver provides a way to create sync objects that may
+	  be signaled by userspace. Unlike other sync objects, the
+	  sync objects created by this driver may be signaled in any order
+	  without changing the state of other sync objects on the timeline.
+
+config ONESHOT_SYNC_USER
+	bool "Userspace API for ONESHOT_SYNC"
+	depends on SYNC
+	help
+	  Provide a userspace API for creating oneshot sync objects.
+
 source "drivers/staging/android/ion/Kconfig"
 
+source "drivers/staging/android/fiq_debugger/Kconfig"
+
 endif # if ANDROID
 
 endmenu
diff -ruw linux-4.4.115/drivers/staging/android/lowmemorykiller.c linux-4.4.115-fbx/drivers/staging/android/lowmemorykiller.c
--- linux-4.4.115/drivers/staging/android/lowmemorykiller.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/android/lowmemorykiller.c	2019-10-29 09:26:24.849214980 +0100
@@ -42,6 +42,25 @@
 #include <linux/rcupdate.h>
 #include <linux/profile.h>
 #include <linux/notifier.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/swap.h>
+#include <linux/fs.h>
+#include <linux/cpuset.h>
+#include <linux/vmpressure.h>
+#include <linux/zcache.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/almk.h>
+
+#ifdef CONFIG_HIGHMEM
+#define _ZONE ZONE_HIGHMEM
+#else
+#define _ZONE ZONE_NORMAL
+#endif
+
+#define CREATE_TRACE_POINTS
+#include "trace/lowmemorykiller.h"
 
 static uint32_t lowmem_debug_level = 1;
 static short lowmem_adj[6] = {
@@ -58,6 +77,7 @@
 	16 * 1024,	/* 64MB */
 };
 static int lowmem_minfree_size = 4;
+static int lmk_fast_run = 1;
 
 static unsigned long lowmem_deathpending_timeout;
 
@@ -76,6 +96,314 @@
 		global_page_state(NR_INACTIVE_FILE);
 }
 
+static atomic_t shift_adj = ATOMIC_INIT(0);
+static short adj_max_shift = 353;
+module_param_named(adj_max_shift, adj_max_shift, short,
+                   S_IRUGO | S_IWUSR);
+
+/* User knob to enable/disable adaptive lmk feature */
+static int enable_adaptive_lmk;
+module_param_named(enable_adaptive_lmk, enable_adaptive_lmk, int,
+		   S_IRUGO | S_IWUSR);
+
+/*
+ * This parameter controls the behaviour of LMK when vmpressure is in
+ * the range of 90-94. Adaptive lmk triggers based on number of file
+ * pages wrt vmpressure_file_min, when vmpressure is in the range of
+ * 90-94. Usually this is a pseudo minfree value, higher than the
+ * highest configured value in minfree array.
+ */
+static int vmpressure_file_min;
+module_param_named(vmpressure_file_min, vmpressure_file_min, int,
+		   S_IRUGO | S_IWUSR);
+
+enum {
+	VMPRESSURE_NO_ADJUST = 0,
+	VMPRESSURE_ADJUST_ENCROACH,
+	VMPRESSURE_ADJUST_NORMAL,
+};
+
+int adjust_minadj(short *min_score_adj)
+{
+	int ret = VMPRESSURE_NO_ADJUST;
+
+	if (!enable_adaptive_lmk)
+		return 0;
+
+	if (atomic_read(&shift_adj) &&
+	    (*min_score_adj > adj_max_shift)) {
+		if (*min_score_adj == OOM_SCORE_ADJ_MAX + 1)
+			ret = VMPRESSURE_ADJUST_ENCROACH;
+		else
+			ret = VMPRESSURE_ADJUST_NORMAL;
+		*min_score_adj = adj_max_shift;
+	}
+	atomic_set(&shift_adj, 0);
+
+	return ret;
+}
+
+static int lmk_vmpressure_notifier(struct notifier_block *nb,
+				   unsigned long action, void *data)
+{
+	int other_free = 0, other_file = 0;
+	unsigned long pressure = action;
+	int array_size = ARRAY_SIZE(lowmem_adj);
+
+	if (!enable_adaptive_lmk)
+		return 0;
+
+	if (pressure >= 95) {
+		other_file = global_page_state(NR_FILE_PAGES) + zcache_pages() -
+			global_page_state(NR_SHMEM) -
+			total_swapcache_pages();
+		other_free = global_page_state(NR_FREE_PAGES);
+
+		atomic_set(&shift_adj, 1);
+		trace_almk_vmpressure(pressure, other_free, other_file);
+	} else if (pressure >= 90) {
+		if (lowmem_adj_size < array_size)
+			array_size = lowmem_adj_size;
+		if (lowmem_minfree_size < array_size)
+			array_size = lowmem_minfree_size;
+
+		other_file = global_page_state(NR_FILE_PAGES) + zcache_pages() -
+			global_page_state(NR_SHMEM) -
+			total_swapcache_pages();
+
+		other_free = global_page_state(NR_FREE_PAGES);
+
+		if ((other_free < lowmem_minfree[array_size - 1]) &&
+		    (other_file < vmpressure_file_min)) {
+			atomic_set(&shift_adj, 1);
+			trace_almk_vmpressure(pressure, other_free, other_file);
+		}
+	} else if (atomic_read(&shift_adj)) {
+		other_file = global_page_state(NR_FILE_PAGES) + zcache_pages() -
+			global_page_state(NR_SHMEM) -
+			total_swapcache_pages();
+		other_free = global_page_state(NR_FREE_PAGES);
+
+		/*
+		 * shift_adj would have been set by a previous invocation
+		 * of notifier, which is not followed by a lowmem_shrink yet.
+		 * Since vmpressure has improved, reset shift_adj to avoid
+		 * false adaptive LMK trigger.
+		 */
+		trace_almk_vmpressure(pressure, other_free, other_file);
+		atomic_set(&shift_adj, 0);
+	}
+
+	return 0;
+}
+
+static struct notifier_block lmk_vmpr_nb = {
+	.notifier_call = lmk_vmpressure_notifier,
+};
+
+static int test_task_flag(struct task_struct *p, int flag)
+{
+	struct task_struct *t;
+
+	for_each_thread(p, t) {
+		task_lock(t);
+		if (test_tsk_thread_flag(t, flag)) {
+			task_unlock(t);
+			return 1;
+		}
+		task_unlock(t);
+	}
+
+	return 0;
+}
+
+static int test_task_state(struct task_struct *p, int state)
+{
+	struct task_struct *t;
+
+	for_each_thread(p, t) {
+		task_lock(t);
+		if (t->state & state) {
+			task_unlock(t);
+			return 1;
+		}
+		task_unlock(t);
+	}
+
+	return 0;
+}
+
+static DEFINE_MUTEX(scan_mutex);
+
+int can_use_cma_pages(gfp_t gfp_mask)
+{
+	int can_use = 0;
+	int mtype = gfpflags_to_migratetype(gfp_mask);
+	int i = 0;
+	int *mtype_fallbacks = get_migratetype_fallbacks(mtype);
+
+	if (is_migrate_cma(mtype)) {
+		can_use = 1;
+	} else {
+		for (i = 0;; i++) {
+			int fallbacktype = mtype_fallbacks[i];
+
+			if (is_migrate_cma(fallbacktype)) {
+				can_use = 1;
+				break;
+			}
+
+			if (fallbacktype == MIGRATE_TYPES)
+				break;
+		}
+	}
+	return can_use;
+}
+
+void tune_lmk_zone_param(struct zonelist *zonelist, int classzone_idx,
+					int *other_free, int *other_file,
+					int use_cma_pages)
+{
+	struct zone *zone;
+	struct zoneref *zoneref;
+	int zone_idx;
+
+	for_each_zone_zonelist(zone, zoneref, zonelist, MAX_NR_ZONES) {
+		zone_idx = zonelist_zone_idx(zoneref);
+		if (zone_idx == ZONE_MOVABLE) {
+			if (!use_cma_pages && other_free)
+				*other_free -=
+				    zone_page_state(zone, NR_FREE_CMA_PAGES);
+			continue;
+		}
+
+		if (zone_idx > classzone_idx) {
+			if (other_free != NULL)
+				*other_free -= zone_page_state(zone,
+							       NR_FREE_PAGES);
+			if (other_file != NULL)
+				*other_file -= zone_page_state(zone,
+							       NR_FILE_PAGES)
+					- zone_page_state(zone, NR_SHMEM)
+					- zone_page_state(zone, NR_SWAPCACHE);
+		} else if (zone_idx < classzone_idx) {
+			if (zone_watermark_ok(zone, 0, 0, classzone_idx, 0) &&
+			    other_free) {
+				if (!use_cma_pages) {
+					*other_free -= min(
+					  zone->lowmem_reserve[classzone_idx] +
+					  zone_page_state(
+					    zone, NR_FREE_CMA_PAGES),
+					  zone_page_state(
+					    zone, NR_FREE_PAGES));
+				} else {
+					*other_free -=
+					  zone->lowmem_reserve[classzone_idx];
+				}
+			} else {
+				if (other_free)
+					*other_free -=
+					  zone_page_state(zone, NR_FREE_PAGES);
+			}
+		}
+	}
+}
+
+#ifdef CONFIG_HIGHMEM
+void adjust_gfp_mask(gfp_t *gfp_mask)
+{
+	struct zone *preferred_zone;
+	struct zonelist *zonelist;
+	enum zone_type high_zoneidx;
+
+	if (current_is_kswapd()) {
+		zonelist = node_zonelist(0, *gfp_mask);
+		high_zoneidx = gfp_zone(*gfp_mask);
+		first_zones_zonelist(zonelist, high_zoneidx, NULL,
+				     &preferred_zone);
+
+		if (high_zoneidx == ZONE_NORMAL) {
+			if (zone_watermark_ok_safe(
+					preferred_zone, 0,
+					high_wmark_pages(preferred_zone), 0))
+				*gfp_mask |= __GFP_HIGHMEM;
+		} else if (high_zoneidx == ZONE_HIGHMEM) {
+			*gfp_mask |= __GFP_HIGHMEM;
+		}
+	}
+}
+#else
+void adjust_gfp_mask(gfp_t *unused)
+{
+}
+#endif
+
+void tune_lmk_param(int *other_free, int *other_file, struct shrink_control *sc)
+{
+	gfp_t gfp_mask;
+	struct zone *preferred_zone;
+	struct zonelist *zonelist;
+	enum zone_type high_zoneidx, classzone_idx;
+	unsigned long balance_gap;
+	int use_cma_pages;
+
+	gfp_mask = sc->gfp_mask;
+	adjust_gfp_mask(&gfp_mask);
+
+	zonelist = node_zonelist(0, gfp_mask);
+	high_zoneidx = gfp_zone(gfp_mask);
+	first_zones_zonelist(zonelist, high_zoneidx, NULL, &preferred_zone);
+	classzone_idx = zone_idx(preferred_zone);
+	use_cma_pages = can_use_cma_pages(gfp_mask);
+
+	balance_gap = min(low_wmark_pages(preferred_zone),
+			  (preferred_zone->present_pages +
+			   KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
+			   KSWAPD_ZONE_BALANCE_GAP_RATIO);
+
+	if (likely(current_is_kswapd() && zone_watermark_ok(preferred_zone, 0,
+			  high_wmark_pages(preferred_zone) + SWAP_CLUSTER_MAX +
+			  balance_gap, 0, 0))) {
+		if (lmk_fast_run)
+			tune_lmk_zone_param(zonelist, classzone_idx, other_free,
+				       other_file, use_cma_pages);
+		else
+			tune_lmk_zone_param(zonelist, classzone_idx, other_free,
+				       NULL, use_cma_pages);
+
+		if (zone_watermark_ok(preferred_zone, 0, 0, _ZONE, 0)) {
+			if (!use_cma_pages) {
+				*other_free -= min(
+				  preferred_zone->lowmem_reserve[_ZONE]
+				  + zone_page_state(
+				    preferred_zone, NR_FREE_CMA_PAGES),
+				  zone_page_state(
+				    preferred_zone, NR_FREE_PAGES));
+			} else {
+				*other_free -=
+				  preferred_zone->lowmem_reserve[_ZONE];
+			}
+		} else {
+			*other_free -= zone_page_state(preferred_zone,
+						      NR_FREE_PAGES);
+		}
+
+		lowmem_print(4, "lowmem_shrink of kswapd tunning for highmem "
+			     "ofree %d, %d\n", *other_free, *other_file);
+	} else {
+		tune_lmk_zone_param(zonelist, classzone_idx, other_free,
+			       other_file, use_cma_pages);
+
+		if (!use_cma_pages) {
+			*other_free -=
+			  zone_page_state(preferred_zone, NR_FREE_CMA_PAGES);
+		}
+
+		lowmem_print(4, "lowmem_shrink tunning for others ofree %d, "
+			     "%d\n", *other_free, *other_file);
+	}
+}
+
 static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
 {
 	struct task_struct *tsk;
@@ -83,34 +411,54 @@
 	unsigned long rem = 0;
 	int tasksize;
 	int i;
+	int ret = 0;
 	short min_score_adj = OOM_SCORE_ADJ_MAX + 1;
+	int minfree = 0;
 	int selected_tasksize = 0;
 	short selected_oom_score_adj;
 	int array_size = ARRAY_SIZE(lowmem_adj);
-	int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
-	int other_file = global_page_state(NR_FILE_PAGES) -
+	int other_free;
+	int other_file;
+
+	if (!mutex_trylock(&scan_mutex))
+		return 0;
+
+	other_free = global_page_state(NR_FREE_PAGES);
+
+	if (global_page_state(NR_SHMEM) + total_swapcache_pages() <
+		global_page_state(NR_FILE_PAGES) + zcache_pages())
+		other_file = global_page_state(NR_FILE_PAGES) + zcache_pages() -
 						global_page_state(NR_SHMEM) -
+						global_page_state(NR_UNEVICTABLE) -
 						total_swapcache_pages();
+	else
+		other_file = 0;
+
+	tune_lmk_param(&other_free, &other_file, sc);
 
 	if (lowmem_adj_size < array_size)
 		array_size = lowmem_adj_size;
 	if (lowmem_minfree_size < array_size)
 		array_size = lowmem_minfree_size;
 	for (i = 0; i < array_size; i++) {
-		if (other_free < lowmem_minfree[i] &&
-		    other_file < lowmem_minfree[i]) {
+		minfree = lowmem_minfree[i];
+		if (other_free < minfree && other_file < minfree) {
 			min_score_adj = lowmem_adj[i];
 			break;
 		}
 	}
 
+	ret = adjust_minadj(&min_score_adj);
+
 	lowmem_print(3, "lowmem_scan %lu, %x, ofree %d %d, ma %hd\n",
 			sc->nr_to_scan, sc->gfp_mask, other_free,
 			other_file, min_score_adj);
 
 	if (min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
+		trace_almk_shrink(0, ret, other_free, other_file, 0);
 		lowmem_print(5, "lowmem_scan %lu, %x, return 0\n",
 			     sc->nr_to_scan, sc->gfp_mask);
+		mutex_unlock(&scan_mutex);
 		return 0;
 	}
 
@@ -124,16 +472,22 @@
 		if (tsk->flags & PF_KTHREAD)
 			continue;
 
-		p = find_lock_task_mm(tsk);
-		if (!p)
+		/* if task no longer has any memory ignore it */
+		if (test_task_flag(tsk, TIF_MM_RELEASED))
 			continue;
 
-		if (test_tsk_thread_flag(p, TIF_MEMDIE) &&
-		    time_before_eq(jiffies, lowmem_deathpending_timeout)) {
-			task_unlock(p);
+		if (time_before_eq(jiffies, lowmem_deathpending_timeout)) {
+			if (test_task_flag(tsk, TIF_MEMDIE)) {
 			rcu_read_unlock();
+				mutex_unlock(&scan_mutex);
 			return 0;
 		}
+		}
+
+		p = find_lock_task_mm(tsk);
+		if (!p)
+			continue;
+
 		oom_score_adj = p->signal->oom_score_adj;
 		if (oom_score_adj < min_score_adj) {
 			task_unlock(p);
@@ -153,10 +507,22 @@
 		selected = p;
 		selected_tasksize = tasksize;
 		selected_oom_score_adj = oom_score_adj;
-		lowmem_print(2, "select %d (%s), adj %hd, size %d, to kill\n",
-			     p->pid, p->comm, oom_score_adj, tasksize);
+		lowmem_print(3, "select '%s' (%d), adj %hd, size %d, to kill\n",
+			     p->comm, p->pid, oom_score_adj, tasksize);
 	}
 	if (selected) {
+		long cache_size, cache_limit, free;
+
+		if (test_task_flag(selected, TIF_MEMDIE) &&
+		    (test_task_state(selected, TASK_UNINTERRUPTIBLE))) {
+			lowmem_print(2, "'%s' (%d) is already killed\n",
+				     selected->comm,
+				     selected->pid);
+			rcu_read_unlock();
+			mutex_unlock(&scan_mutex);
+			return 0;
+		}
+
 		task_lock(selected);
 		send_sig(SIGKILL, selected, 0);
 		/*
@@ -167,40 +533,169 @@
 		if (selected->mm)
 			mark_oom_victim(selected);
 		task_unlock(selected);
-		lowmem_print(1, "send sigkill to %d (%s), adj %hd, size %d\n",
-			     selected->pid, selected->comm,
-			     selected_oom_score_adj, selected_tasksize);
+		cache_size = other_file * (long)(PAGE_SIZE / 1024);
+		cache_limit = minfree * (long)(PAGE_SIZE / 1024);
+		free = other_free * (long)(PAGE_SIZE / 1024);
+		trace_lowmemory_kill(selected, cache_size, cache_limit, free);
+		lowmem_print(1, "Killing '%s' (%d) (tgid %d), adj %hd,\n" \
+			        "   to free %ldkB on behalf of '%s' (%d) because\n" \
+			        "   cache %ldkB is below limit %ldkB for oom_score_adj %hd\n" \
+				"   Free memory is %ldkB above reserved.\n" \
+				"   Free CMA is %ldkB\n" \
+				"   Total reserve is %ldkB\n" \
+				"   Total free pages is %ldkB\n" \
+				"   Total file cache is %ldkB\n" \
+				"   Total zcache is %ldkB\n" \
+				"   GFP mask is 0x%x\n",
+			     selected->comm, selected->pid, selected->tgid,
+			     selected_oom_score_adj,
+			     selected_tasksize * (long)(PAGE_SIZE / 1024),
+			     current->comm, current->pid,
+			     cache_size, cache_limit,
+			     min_score_adj,
+			     free,
+			     global_page_state(NR_FREE_CMA_PAGES) *
+				(long)(PAGE_SIZE / 1024),
+			     totalreserve_pages * (long)(PAGE_SIZE / 1024),
+			     global_page_state(NR_FREE_PAGES) *
+				(long)(PAGE_SIZE / 1024),
+			     global_page_state(NR_FILE_PAGES) *
+				(long)(PAGE_SIZE / 1024),
+			     (long)zcache_pages() * (long)(PAGE_SIZE / 1024),
+			     sc->gfp_mask);
+
+		if (lowmem_debug_level >= 2 && selected_oom_score_adj == 0) {
+			show_mem(SHOW_MEM_FILTER_NODES);
+			dump_tasks(NULL, NULL);
+		}
+
 		lowmem_deathpending_timeout = jiffies + HZ;
 		rem += selected_tasksize;
+		rcu_read_unlock();
+		/* give the system time to free up the memory */
+		msleep_interruptible(20);
+		trace_almk_shrink(selected_tasksize, ret,
+				  other_free, other_file,
+				  selected_oom_score_adj);
+	} else {
+		trace_almk_shrink(1, ret, other_free, other_file, 0);
+		rcu_read_unlock();
 	}
 
 	lowmem_print(4, "lowmem_scan %lu, %x, return %lu\n",
 		     sc->nr_to_scan, sc->gfp_mask, rem);
-	rcu_read_unlock();
+	mutex_unlock(&scan_mutex);
 	return rem;
 }
 
 static struct shrinker lowmem_shrinker = {
 	.scan_objects = lowmem_scan,
 	.count_objects = lowmem_count,
-	.seeks = DEFAULT_SEEKS * 16
+	.seeks = DEFAULT_SEEKS * 16,
+	.flags = SHRINKER_LMK
 };
 
 static int __init lowmem_init(void)
 {
 	register_shrinker(&lowmem_shrinker);
+	vmpressure_notifier_register(&lmk_vmpr_nb);
 	return 0;
 }
 device_initcall(lowmem_init);
 
+#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
+static short lowmem_oom_adj_to_oom_score_adj(short oom_adj)
+{
+	if (oom_adj == OOM_ADJUST_MAX)
+		return OOM_SCORE_ADJ_MAX;
+	else
+		return (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE;
+}
+
+static void lowmem_autodetect_oom_adj_values(void)
+{
+	int i;
+	short oom_adj;
+	short oom_score_adj;
+	int array_size = ARRAY_SIZE(lowmem_adj);
+
+	if (lowmem_adj_size < array_size)
+		array_size = lowmem_adj_size;
+
+	if (array_size <= 0)
+		return;
+
+	oom_adj = lowmem_adj[array_size - 1];
+	if (oom_adj > OOM_ADJUST_MAX)
+		return;
+
+	oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj);
+	if (oom_score_adj <= OOM_ADJUST_MAX)
+		return;
+
+	lowmem_print(1, "lowmem_shrink: convert oom_adj to oom_score_adj:\n");
+	for (i = 0; i < array_size; i++) {
+		oom_adj = lowmem_adj[i];
+		oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj);
+		lowmem_adj[i] = oom_score_adj;
+		lowmem_print(1, "oom_adj %d => oom_score_adj %d\n",
+			     oom_adj, oom_score_adj);
+	}
+}
+
+static int lowmem_adj_array_set(const char *val, const struct kernel_param *kp)
+{
+	int ret;
+
+	ret = param_array_ops.set(val, kp);
+
+	/* HACK: Autodetect oom_adj values in lowmem_adj array */
+	lowmem_autodetect_oom_adj_values();
+
+	return ret;
+}
+
+static int lowmem_adj_array_get(char *buffer, const struct kernel_param *kp)
+{
+	return param_array_ops.get(buffer, kp);
+}
+
+static void lowmem_adj_array_free(void *arg)
+{
+	param_array_ops.free(arg);
+}
+
+static struct kernel_param_ops lowmem_adj_array_ops = {
+	.set = lowmem_adj_array_set,
+	.get = lowmem_adj_array_get,
+	.free = lowmem_adj_array_free,
+};
+
+static const struct kparam_array __param_arr_adj = {
+	.max = ARRAY_SIZE(lowmem_adj),
+	.num = &lowmem_adj_size,
+	.ops = &param_ops_short,
+	.elemsize = sizeof(lowmem_adj[0]),
+	.elem = lowmem_adj,
+};
+#endif
+
 /*
  * not really modular, but the easiest way to keep compat with existing
  * bootargs behaviour is to continue using module_param here.
  */
 module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
+#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
+module_param_cb(adj, &lowmem_adj_array_ops,
+		.arr = &__param_arr_adj,
+		S_IRUGO | S_IWUSR);
+__MODULE_PARM_TYPE(adj, "array of short");
+#else
 module_param_array_named(adj, lowmem_adj, short, &lowmem_adj_size,
 			 S_IRUGO | S_IWUSR);
+#endif
 module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
 			 S_IRUGO | S_IWUSR);
 module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
+module_param_named(lmk_fast_run, lmk_fast_run, int, S_IRUGO | S_IWUSR);
 
diff -ruw linux-4.4.115/drivers/staging/android/Makefile linux-4.4.115-fbx/drivers/staging/android/Makefile
--- linux-4.4.115/drivers/staging/android/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/android/Makefile	2019-10-29 09:26:24.841214902 +0100
@@ -1,6 +1,7 @@
 ccflags-y += -I$(src)			# needed for trace events
 
 obj-y					+= ion/
+obj-$(CONFIG_FIQ_DEBUGGER)		+= fiq_debugger/
 
 obj-$(CONFIG_ASHMEM)			+= ashmem.o
 obj-$(CONFIG_ANDROID_TIMED_OUTPUT)	+= timed_output.o
@@ -8,3 +9,4 @@
 obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER)	+= lowmemorykiller.o
 obj-$(CONFIG_SYNC)			+= sync.o sync_debug.o
 obj-$(CONFIG_SW_SYNC)			+= sw_sync.o
+obj-$(CONFIG_ONESHOT_SYNC)		+= oneshot_sync.o
diff -ruw linux-4.4.115/drivers/staging/android/sync.c linux-4.4.115-fbx/drivers/staging/android/sync.c
--- linux-4.4.115/drivers/staging/android/sync.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/android/sync.c	2019-10-29 09:26:24.849214980 +0100
@@ -29,6 +29,7 @@
 #include "sync.h"
 
 #define CREATE_TRACE_POINTS
+#define SYNC_DUMP_TIME_LIMIT 7000
 #include "trace/sync.h"
 
 static const struct fence_ops android_fence_ops;
@@ -390,8 +391,10 @@
 		return ret;
 	} else if (ret == 0) {
 		if (timeout) {
-			pr_info("fence timeout on [%p] after %dms\n", fence,
+			pr_info("fence timeout on [%pK] after %dms\n", fence,
 				jiffies_to_msecs(timeout));
+			if (jiffies_to_msecs(timeout) >=
+				SYNC_DUMP_TIME_LIMIT)
 			sync_dump();
 		}
 		return -ETIME;
@@ -399,7 +402,7 @@
 
 	ret = atomic_read(&fence->status);
 	if (ret) {
-		pr_info("fence error %ld on [%p]\n", ret, fence);
+		pr_info("fence error %ld on [%pK]\n", ret, fence);
 		sync_dump();
 	}
 	return ret;
@@ -465,6 +468,13 @@
 	return true;
 }
 
+static void android_fence_disable_signaling(struct fence *fence)
+{
+	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
+
+	list_del_init(&pt->active_list);
+}
+
 static int android_fence_fill_driver_data(struct fence *fence,
 					  void *data, int size)
 {
@@ -508,6 +518,7 @@
 	.get_driver_name = android_fence_get_driver_name,
 	.get_timeline_name = android_fence_get_timeline_name,
 	.enable_signaling = android_fence_enable_signaling,
+	.disable_signaling = android_fence_disable_signaling,
 	.signaled = android_fence_signaled,
 	.wait = fence_default_wait,
 	.release = android_fence_release,
@@ -519,12 +530,10 @@
 static void sync_fence_free(struct kref *kref)
 {
 	struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
-	int i, status = atomic_read(&fence->status);
+	int i;
 
 	for (i = 0; i < fence->num_fences; ++i) {
-		if (status)
-			fence_remove_callback(fence->cbs[i].sync_pt,
-					      &fence->cbs[i].cb);
+		fence_remove_callback(fence->cbs[i].sync_pt, &fence->cbs[i].cb);
 		fence_put(fence->cbs[i].sync_pt);
 	}
 
diff -ruw linux-4.4.115/drivers/staging/android/sync_debug.c linux-4.4.115-fbx/drivers/staging/android/sync_debug.c
--- linux-4.4.115/drivers/staging/android/sync_debug.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/android/sync_debug.c	2019-01-22 16:16:26.719275529 +0100
@@ -87,7 +87,7 @@
 	int status = 1;
 	struct sync_timeline *parent = sync_pt_parent(pt);
 
-	if (fence_is_signaled_locked(&pt->base))
+	if (test_bit(FENCE_FLAG_SIGNALED_BIT, &pt->base.flags))
 		status = pt->base.status;
 
 	seq_printf(s, "  %s%spt %s",
@@ -149,7 +149,7 @@
 	unsigned long flags;
 	int i;
 
-	seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
+	seq_printf(s, "[%pK] %s: %s\n", fence, fence->name,
 		   sync_status_str(atomic_read(&fence->status)));
 
 	for (i = 0; i < fence->num_fences; ++i) {
diff -ruw linux-4.4.115/drivers/staging/android/sync.h linux-4.4.115-fbx/drivers/staging/android/sync.h
--- linux-4.4.115/drivers/staging/android/sync.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/android/sync.h	2019-01-22 16:16:26.719275529 +0100
@@ -92,7 +92,7 @@
 struct sync_timeline {
 	struct kref		kref;
 	const struct sync_timeline_ops	*ops;
-	char			name[32];
+	char			name[64];
 
 	/* protected by child_list_lock */
 	bool			destroyed;
@@ -154,7 +154,7 @@
 struct sync_fence {
 	struct file		*file;
 	struct kref		kref;
-	char			name[32];
+	char			name[64];
 #ifdef CONFIG_DEBUG_FS
 	struct list_head	sync_fence_list;
 #endif
diff -ruw linux-4.4.115/drivers/staging/android/uapi/ashmem.h linux-4.4.115-fbx/drivers/staging/android/uapi/ashmem.h
--- linux-4.4.115/drivers/staging/android/uapi/ashmem.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/android/uapi/ashmem.h	2019-01-22 16:16:26.719275529 +0100
@@ -13,6 +13,7 @@
 #define _UAPI_LINUX_ASHMEM_H
 
 #include <linux/ioctl.h>
+#include <linux/types.h>
 
 #define ASHMEM_NAME_LEN		256
 
diff -ruw linux-4.4.115/drivers/staging/goldfish/Kconfig linux-4.4.115-fbx/drivers/staging/goldfish/Kconfig
--- linux-4.4.115/drivers/staging/goldfish/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/goldfish/Kconfig	2019-10-29 09:26:24.861215098 +0100
@@ -4,6 +4,12 @@
 	---help---
 	  Emulated audio channel for the Goldfish Android Virtual Device
 
+config GOLDFISH_SYNC
+    tristate "Goldfish AVD Sync Driver"
+    depends on GOLDFISH
+	---help---
+	  Emulated sync fences for the Goldfish Android Virtual Device
+
 config MTD_GOLDFISH_NAND
 	tristate "Goldfish NAND device"
 	depends on GOLDFISH
diff -ruw linux-4.4.115/drivers/staging/goldfish/Makefile linux-4.4.115-fbx/drivers/staging/goldfish/Makefile
--- linux-4.4.115/drivers/staging/goldfish/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/goldfish/Makefile	2019-10-29 09:26:24.861215098 +0100
@@ -4,3 +4,8 @@
 
 obj-$(CONFIG_GOLDFISH_AUDIO) += goldfish_audio.o
 obj-$(CONFIG_MTD_GOLDFISH_NAND)	+= goldfish_nand.o
+
+# and sync
+
+ccflags-y := -Idrivers/staging/android
+obj-$(CONFIG_GOLDFISH_SYNC) += goldfish_sync.o
diff -ruw linux-4.4.115/drivers/staging/Makefile linux-4.4.115-fbx/drivers/staging/Makefile
--- linux-4.4.115/drivers/staging/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/Makefile	2019-01-22 16:16:26.711275457 +0100
@@ -26,7 +26,6 @@
 obj-$(CONFIG_FB_XGI)		+= xgifb/
 obj-$(CONFIG_USB_EMXX)		+= emxx_udc/
 obj-$(CONFIG_SPEAKUP)		+= speakup/
-obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4)	+= ste_rmi4/
 obj-$(CONFIG_MFD_NVEC)		+= nvec/
 obj-$(CONFIG_STAGING_RDMA)	+= rdma/
 obj-$(CONFIG_ANDROID)		+= android/
diff -ruw linux-4.4.115/drivers/thermal/cpu_cooling.c linux-4.4.115-fbx/drivers/thermal/cpu_cooling.c
--- linux-4.4.115/drivers/thermal/cpu_cooling.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/thermal/cpu_cooling.c	2019-01-22 16:16:27.151279441 +0100
@@ -103,6 +103,7 @@
 	int dyn_power_table_entries;
 	struct device *cpu_dev;
 	get_static_t plat_get_static_power;
+	struct cpu_cooling_ops *plat_ops;
 };
 static DEFINE_IDR(cpufreq_idr);
 static DEFINE_MUTEX(cooling_cpufreq_lock);
@@ -506,7 +507,12 @@
 				 unsigned long *state)
 {
 	struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
+	unsigned int cpu = cpumask_any(&cpufreq_device->allowed_cpus);
 
+	if (cpufreq_device->plat_ops
+			&& cpufreq_device->plat_ops->get_cur_state)
+		cpufreq_device->plat_ops->get_cur_state(cpu, state);
+	else
 	*state = cpufreq_device->cpufreq_state;
 
 	return 0;
@@ -541,7 +547,17 @@
 	cpufreq_device->cpufreq_state = state;
 	cpufreq_device->clipped_freq = clip_freq;
 
+	/* Check if the device has a platform mitigation function that
+	 * can handle the CPU freq mitigation, if not, notify cpufreq
+	 * framework.
+	 */
+	if (cpufreq_device->plat_ops) {
+		if (cpufreq_device->plat_ops->ceil_limit)
+			cpufreq_device->plat_ops->ceil_limit(cpu,
+						clip_freq);
+	} else {
 	cpufreq_update_policy(cpu);
+	}
 
 	return 0;
 }
@@ -775,6 +791,9 @@
  * @capacitance: dynamic power coefficient for these cpus
  * @plat_static_func: function to calculate the static power consumed by these
  *                    cpus (optional)
+ * @plat_mitig_func: function that does the mitigation by changing the
+ *                   frequencies (Optional). By default, cpufreq framweork will
+ *                   be notified of the new limits.
  *
  * This interface function registers the cpufreq cooling device with the name
  * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
@@ -787,7 +806,8 @@
 static struct thermal_cooling_device *
 __cpufreq_cooling_register(struct device_node *np,
 			const struct cpumask *clip_cpus, u32 capacitance,
-			get_static_t plat_static_func)
+			get_static_t plat_static_func,
+			struct cpu_cooling_ops *plat_ops)
 {
 	struct thermal_cooling_device *cool_dev;
 	struct cpufreq_cooling_device *cpufreq_dev;
@@ -853,6 +873,8 @@
 		}
 	}
 
+	cpufreq_dev->plat_ops = plat_ops;
+
 	ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
 	if (ret) {
 		cool_dev = ERR_PTR(ret);
@@ -926,7 +948,7 @@
 struct thermal_cooling_device *
 cpufreq_cooling_register(const struct cpumask *clip_cpus)
 {
-	return __cpufreq_cooling_register(NULL, clip_cpus, 0, NULL);
+	return __cpufreq_cooling_register(NULL, clip_cpus, 0, NULL, NULL);
 }
 EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
 
@@ -950,7 +972,7 @@
 	if (!np)
 		return ERR_PTR(-EINVAL);
 
-	return __cpufreq_cooling_register(np, clip_cpus, 0, NULL);
+	return __cpufreq_cooling_register(np, clip_cpus, 0, NULL, NULL);
 }
 EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
 
@@ -980,11 +1002,31 @@
 			       get_static_t plat_static_func)
 {
 	return __cpufreq_cooling_register(NULL, clip_cpus, capacitance,
-				plat_static_func);
+				plat_static_func, NULL);
 }
 EXPORT_SYMBOL(cpufreq_power_cooling_register);
 
 /**
+ * cpufreq_platform_cooling_register() - create cpufreq cooling device with
+ * additional platform specific mitigation function.
+ *
+ * @clip_cpus: cpumask of cpus where the frequency constraints will happen
+ * @plat_ops: the platform mitigation functions that will be called insted of
+ * cpufreq, if provided.
+ *
+ * Return: a valid struct thermal_cooling_device pointer on success,
+ * on failure, it returns a corresponding ERR_PTR().
+ */
+struct thermal_cooling_device *
+cpufreq_platform_cooling_register(const struct cpumask *clip_cpus,
+				struct cpu_cooling_ops *plat_ops)
+{
+	return __cpufreq_cooling_register(NULL, clip_cpus, 0, NULL,
+						plat_ops);
+}
+EXPORT_SYMBOL(cpufreq_platform_cooling_register);
+
+/**
  * of_cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
  * @np:	a valid struct device_node to the cooling device device tree node
  * @clip_cpus:	cpumask of cpus where the frequency constraints will happen
@@ -1017,7 +1059,7 @@
 		return ERR_PTR(-EINVAL);
 
 	return __cpufreq_cooling_register(np, clip_cpus, capacitance,
-				plat_static_func);
+				plat_static_func, NULL);
 }
 EXPORT_SYMBOL(of_cpufreq_power_cooling_register);
 
diff -ruw linux-4.4.115/drivers/thermal/Kconfig linux-4.4.115-fbx/drivers/thermal/Kconfig
--- linux-4.4.115/drivers/thermal/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/thermal/Kconfig	2019-10-29 09:26:24.913215606 +0100
@@ -175,6 +175,36 @@
 	  because userland can easily disable the thermal policy by simply
 	  flooding this sysfs node with low temperature values.
 
+config LIMITS_MONITOR
+	bool "LMH monitor driver"
+	depends on THERMAL
+	help
+	  Enable this to manage the limits hardware for interrupts, throttling
+	  intensities, and LMH device profiles. This driver also registers the
+	  Limits hardware's monitoring entities as sensors with the thermal
+	  framework.
+
+config LIMITS_LITE_HW
+	bool "LMH Lite hardware driver"
+	depends on LIMITS_MONITOR
+	help
+	 Enable this option for interacting with LMH Lite hardware. This
+	 implements the APIs required for getting the details about sensors
+	 supported by LMH Lite, their throttling intensity and the operating
+	 profiles.
+
+config THERMAL_MONITOR
+	bool "Monitor thermal state and limit CPU Frequency"
+	depends on THERMAL_TSENS8974
+	depends on CPU_FREQ || CPU_FREQ_MSM
+	depends on PM_OPP
+	default n
+	help
+	  This enables thermal monitoring capability in the kernel in the
+	  absence of a system wide thermal monitoring entity or until such an
+	  entity starts running in the userspace. Monitors TSENS temperature
+	  and limits the max frequency of the cores.
+
 config HISI_THERMAL
 	tristate "Hisilicon thermal driver"
 	depends on (ARCH_HISI && CPU_THERMAL && OF) || COMPILE_TEST
@@ -183,6 +213,16 @@
 	  thermal framework. cpufreq is used as the cooling device to throttle
 	  CPUs when the passive trip is crossed.
 
+config THERMAL_TSENS8974
+	tristate "Qualcomm 8974 TSENS Temperature driver"
+	depends on THERMAL
+	help
+	  This enables the thermal sysfs driver for the TSENS device. It shows
+	  up in Sysfs as a thermal zone with multiple trip points. Also able
+	  to set threshold temperature for both warm and cool and update
+	  thermal userspace client when a threshold is reached. Warm/Cool
+	  temperature thresholds can be set independently for each sensor.
+
 config IMX_THERMAL
 	tristate "Temperature sensor driver for Freescale i.MX SoCs"
 	depends on CPU_THERMAL
@@ -365,6 +405,42 @@
 	  Thermal reporting device will provide temperature reading,
 	  programmable trip points and other information.
 
+config THERMAL_QPNP
+	tristate "Qualcomm Technologies, Inc. QPNP PMIC Temperature Alarm"
+	depends on OF && SPMI
+	help
+	  This enables a thermal Sysfs driver for Qualcomm Technologies, Inc.
+	  QPNP PMIC devices. It shows up in Sysfs as a thermal zone with
+	  multiple trip points. The temperature reported by the thermal zone
+	  reflects the real time die temperature if an ADC is present or an
+	  estimate of the temperature based upon the over temperature stage
+	  value if no ADC is available. If allowed via compile time
+	  configuration; enabling the thermal zone device via the mode file
+	  results in shifting PMIC over temperature shutdown control from
+	  hardware to software.
+
+config THERMAL_QPNP_ADC_TM
+	tristate "Qualcomm 8974 Thermal Monitor ADC Driver"
+	depends on THERMAL
+	depends on  SPMI
+	help
+	  This enables the thermal Sysfs driver for the ADC thermal monitoring
+	  device. It shows up in Sysfs as a thermal zone with multiple trip points.
+	  Disabling the thermal zone device via the mode file results in disabling
+	  the sensor. Also able to set threshold temperature for both hot and cold
+	  and update when a threshold is reached.
+
+config QCOM_THERMAL_LIMITS_DCVS
+	bool "QTI LMH DCVS Driver"
+	depends on THERMAL && ARCH_QCOM
+	depends on OF
+	help
+	  This enables the driver for Limits Management Hardware - DCVS block
+	  for the application processors. The h/w block that is available for
+	  each cluster can be used to peform quick thermal mitigations by
+	  tracking temperatures of the CPUs and taking thermal action in the
+	  hardware without s/w intervention.
+
 menu "Texas Instruments thermal drivers"
 depends on ARCH_HAS_BANDGAP || COMPILE_TEST
 source "drivers/thermal/ti-soc-thermal/Kconfig"
diff -ruw linux-4.4.115/drivers/thermal/Makefile linux-4.4.115-fbx/drivers/thermal/Makefile
--- linux-4.4.115/drivers/thermal/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/thermal/Makefile	2019-01-22 16:16:27.147279405 +0100
@@ -48,3 +48,10 @@
 obj-$(CONFIG_ST_THERMAL)	+= st/
 obj-$(CONFIG_TEGRA_SOCTHERM)	+= tegra_soctherm.o
 obj-$(CONFIG_HISI_THERMAL)     += hisi_thermal.o
+obj-$(CONFIG_THERMAL_QPNP)	+= qpnp-temp-alarm.o
+obj-$(CONFIG_THERMAL_QPNP_ADC_TM)	+= qpnp-adc-tm.o
+obj-$(CONFIG_THERMAL_TSENS8974)	+= msm-tsens.o
+obj-$(CONFIG_THERMAL_MONITOR)	+= msm_thermal.o msm_thermal-dev.o
+obj-$(CONFIG_LIMITS_MONITOR)	+= lmh_interface.o
+obj-$(CONFIG_LIMITS_LITE_HW)	+= lmh_lite.o
+obj-$(CONFIG_QCOM_THERMAL_LIMITS_DCVS)	+= msm_lmh_dcvs.o
diff -ruw linux-4.4.115/drivers/thermal/thermal_core.c linux-4.4.115-fbx/drivers/thermal/thermal_core.c
--- linux-4.4.115/drivers/thermal/thermal_core.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/thermal/thermal_core.c	2019-01-22 16:16:27.159279514 +0100
@@ -4,6 +4,7 @@
  *  Copyright (C) 2008 Intel Corp
  *  Copyright (C) 2008 Zhang Rui <rui.zhang@intel.com>
  *  Copyright (C) 2008 Sujith Thomas <sujith.thomas@intel.com>
+ *  Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  *
@@ -35,9 +36,9 @@
 #include <linux/reboot.h>
 #include <linux/string.h>
 #include <linux/of.h>
+#include <linux/kthread.h>
 #include <net/netlink.h>
 #include <net/genetlink.h>
-#include <linux/suspend.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/thermal.h>
@@ -45,6 +46,8 @@
 #include "thermal_core.h"
 #include "thermal_hwmon.h"
 
+#define THERMAL_UEVENT_DATA "type"
+
 MODULE_AUTHOR("Zhang Rui");
 MODULE_DESCRIPTION("Generic thermal management sysfs support");
 MODULE_LICENSE("GPL v2");
@@ -60,8 +63,6 @@
 static DEFINE_MUTEX(thermal_list_lock);
 static DEFINE_MUTEX(thermal_governor_lock);
 
-static atomic_t in_suspend;
-
 static struct thermal_governor *def_governor;
 
 static struct thermal_governor *__find_governor(const char *name)
@@ -206,6 +207,407 @@
 	return;
 }
 
+static LIST_HEAD(sensor_info_list);
+static DEFINE_MUTEX(sensor_list_lock);
+
+static struct sensor_info *get_sensor(uint32_t sensor_id)
+{
+	struct sensor_info *pos = NULL, *matching_sensor = NULL;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(pos, &sensor_info_list, sensor_list) {
+		if (pos->sensor_id == sensor_id) {
+			matching_sensor = pos;
+			break;
+		}
+	}
+	rcu_read_unlock();
+
+	return matching_sensor;
+}
+
+int sensor_get_id(char *name)
+{
+	struct sensor_info *pos = NULL;
+	int matching_id = -ENODEV;
+
+	if (!name)
+		return matching_id;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(pos, &sensor_info_list, sensor_list) {
+		if (!strcmp(pos->tz->type, name)) {
+			matching_id = pos->sensor_id;
+			break;
+		}
+	}
+	rcu_read_unlock();
+
+	return matching_id;
+}
+EXPORT_SYMBOL(sensor_get_id);
+
+static void init_sensor_trip(struct sensor_info *sensor)
+{
+	int ret = 0, i = 0;
+	enum thermal_trip_type type;
+
+	for (i = 0; ((sensor->max_idx == -1) ||
+		(sensor->min_idx == -1)) &&
+		(sensor->tz->ops->get_trip_type) &&
+		(i < sensor->tz->trips); i++) {
+
+		sensor->tz->ops->get_trip_type(sensor->tz, i, &type);
+		if (type == THERMAL_TRIP_CONFIGURABLE_HI)
+			sensor->max_idx = i;
+		if (type == THERMAL_TRIP_CONFIGURABLE_LOW)
+			sensor->min_idx = i;
+		type = 0;
+	}
+
+	ret = sensor->tz->ops->get_trip_temp(sensor->tz,
+		sensor->min_idx, &sensor->threshold_min);
+	if (ret)
+		pr_err("Unable to get MIN trip temp. sensor:%d err:%d\n",
+				sensor->sensor_id, ret);
+
+	ret = sensor->tz->ops->get_trip_temp(sensor->tz,
+		sensor->max_idx, &sensor->threshold_max);
+	if (ret)
+		pr_err("Unable to get MAX trip temp. sensor:%d err:%d\n",
+				sensor->sensor_id, ret);
+}
+
+static int __update_sensor_thresholds(struct sensor_info *sensor)
+{
+	long max_of_low_thresh = LONG_MIN;
+	long min_of_high_thresh = LONG_MAX;
+	struct sensor_threshold *pos = NULL;
+	int ret = 0;
+
+	if (!sensor->tz->ops->set_trip_temp ||
+		!sensor->tz->ops->activate_trip_type ||
+		!sensor->tz->ops->get_trip_type ||
+		!sensor->tz->ops->get_trip_temp) {
+		ret = -ENODEV;
+		goto update_done;
+	}
+
+	if ((sensor->max_idx == -1) || (sensor->min_idx == -1))
+		init_sensor_trip(sensor);
+
+	list_for_each_entry(pos, &sensor->threshold_list, list) {
+		if (!pos->active)
+			continue;
+		if (pos->trip == THERMAL_TRIP_CONFIGURABLE_LOW) {
+			if (pos->temp > max_of_low_thresh)
+				max_of_low_thresh = pos->temp;
+		}
+		if (pos->trip == THERMAL_TRIP_CONFIGURABLE_HI) {
+			if (pos->temp < min_of_high_thresh)
+				min_of_high_thresh = pos->temp;
+		}
+	}
+
+	pr_debug("sensor %d: Thresholds: max of low: %ld min of high: %ld\n",
+			sensor->sensor_id, max_of_low_thresh,
+			min_of_high_thresh);
+
+	if (min_of_high_thresh != LONG_MAX) {
+		ret = sensor->tz->ops->set_trip_temp(sensor->tz,
+			sensor->max_idx, min_of_high_thresh);
+		if (ret) {
+			pr_err("sensor %d: Unable to set high threshold %d",
+					sensor->sensor_id, ret);
+			goto update_done;
+		}
+		sensor->threshold_max = min_of_high_thresh;
+	}
+	ret = sensor->tz->ops->activate_trip_type(sensor->tz,
+		sensor->max_idx,
+		(min_of_high_thresh == LONG_MAX) ?
+		THERMAL_TRIP_ACTIVATION_DISABLED :
+		THERMAL_TRIP_ACTIVATION_ENABLED);
+	if (ret) {
+		pr_err("sensor %d: Unable to activate high threshold %d",
+			sensor->sensor_id, ret);
+		goto update_done;
+	}
+
+	if (max_of_low_thresh != LONG_MIN) {
+		ret = sensor->tz->ops->set_trip_temp(sensor->tz,
+			sensor->min_idx, max_of_low_thresh);
+		if (ret) {
+			pr_err("sensor %d: Unable to set low threshold %d",
+				sensor->sensor_id, ret);
+			goto update_done;
+		}
+		sensor->threshold_min = max_of_low_thresh;
+	}
+	ret = sensor->tz->ops->activate_trip_type(sensor->tz,
+		sensor->min_idx,
+		(max_of_low_thresh == LONG_MIN) ?
+		THERMAL_TRIP_ACTIVATION_DISABLED :
+		THERMAL_TRIP_ACTIVATION_ENABLED);
+	if (ret) {
+		pr_err("sensor %d: Unable to activate low threshold %d",
+			sensor->sensor_id, ret);
+		goto update_done;
+	}
+
+	pr_debug("sensor %d: low: %d high: %d\n",
+		sensor->sensor_id,
+		sensor->threshold_min, sensor->threshold_max);
+
+update_done:
+	return ret;
+}
+
+static void sensor_update_work(struct work_struct *work)
+{
+	struct sensor_info *sensor = container_of(work, struct sensor_info,
+						work);
+	int ret = 0;
+	mutex_lock(&sensor->lock);
+	ret = __update_sensor_thresholds(sensor);
+	if (ret)
+		pr_err("sensor %d: Error %d setting threshold\n",
+			sensor->sensor_id, ret);
+	mutex_unlock(&sensor->lock);
+}
+
+static __ref int sensor_sysfs_notify(void *data)
+{
+	int ret = 0;
+	struct sensor_info *sensor = (struct sensor_info *)data;
+
+	while (!kthread_should_stop()) {
+		if (wait_for_completion_interruptible(
+			&sensor->sysfs_notify_complete) != 0)
+			continue;
+		if (sensor->deregister_active)
+			return ret;
+		reinit_completion(&sensor->sysfs_notify_complete);
+		sysfs_notify(&sensor->tz->device.kobj, NULL,
+					THERMAL_UEVENT_DATA);
+	}
+	return ret;
+}
+
+/* May be called in an interrupt context.
+ * Do NOT call sensor_set_trip from this function
+ */
+int thermal_sensor_trip(struct thermal_zone_device *tz,
+		enum thermal_trip_type trip, long temp)
+{
+	struct sensor_threshold *pos = NULL;
+	int ret = -ENODEV;
+
+	if (trip != THERMAL_TRIP_CONFIGURABLE_HI &&
+			trip != THERMAL_TRIP_CONFIGURABLE_LOW)
+		return 0;
+
+	if (list_empty(&tz->sensor.threshold_list))
+		return 0;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(pos, &tz->sensor.threshold_list, list) {
+		if ((pos->trip != trip) || (!pos->active))
+			continue;
+		if (((trip == THERMAL_TRIP_CONFIGURABLE_LOW) &&
+			(pos->temp <= tz->sensor.threshold_min) &&
+			(pos->temp >= temp)) ||
+			((trip == THERMAL_TRIP_CONFIGURABLE_HI) &&
+				(pos->temp >= tz->sensor.threshold_max) &&
+				(pos->temp <= temp))) {
+			if ((pos == &tz->tz_threshold[0])
+				|| (pos == &tz->tz_threshold[1]))
+				complete(&tz->sensor.sysfs_notify_complete);
+			pos->active = 0;
+			pos->notify(trip, temp, pos->data);
+		}
+	}
+	rcu_read_unlock();
+
+	schedule_work(&tz->sensor.work);
+
+	return ret;
+}
+EXPORT_SYMBOL(thermal_sensor_trip);
+
+int sensor_get_temp(uint32_t sensor_id, int *temp)
+{
+	struct sensor_info *sensor = get_sensor(sensor_id);
+	int ret = 0;
+
+	if (!sensor)
+		return -ENODEV;
+
+	ret = sensor->tz->ops->get_temp(sensor->tz, temp);
+
+	return ret;
+}
+EXPORT_SYMBOL(sensor_get_temp);
+
+int sensor_activate_trip(uint32_t sensor_id,
+	struct sensor_threshold *threshold, bool enable)
+{
+	struct sensor_info *sensor = get_sensor(sensor_id);
+	int ret = 0;
+
+	if (!sensor || !threshold) {
+		pr_err("%s: uninitialized data\n",
+			KBUILD_MODNAME);
+		ret = -ENODEV;
+		goto activate_trip_exit;
+	}
+
+	mutex_lock(&sensor->lock);
+	threshold->active = (enable) ? 1 : 0;
+	ret = __update_sensor_thresholds(sensor);
+	mutex_unlock(&sensor->lock);
+
+activate_trip_exit:
+	return ret;
+}
+EXPORT_SYMBOL(sensor_activate_trip);
+
+int sensor_set_trip(uint32_t sensor_id, struct sensor_threshold *threshold)
+{
+	struct sensor_threshold *pos = NULL;
+	struct sensor_info *sensor = get_sensor(sensor_id);
+
+	if (!sensor)
+		return -ENODEV;
+
+	if (!threshold || !threshold->notify)
+		return -EFAULT;
+
+	mutex_lock(&sensor->lock);
+	list_for_each_entry(pos, &sensor->threshold_list, list) {
+		if (pos == threshold)
+			break;
+	}
+
+	if (pos != threshold) {
+		INIT_LIST_HEAD(&threshold->list);
+		list_add_rcu(&threshold->list, &sensor->threshold_list);
+	}
+	threshold->active = 0; /* Do not allow active threshold right away */
+
+	mutex_unlock(&sensor->lock);
+
+	return 0;
+
+}
+EXPORT_SYMBOL(sensor_set_trip);
+
+int sensor_cancel_trip(uint32_t sensor_id, struct sensor_threshold *threshold)
+{
+	struct sensor_threshold *pos = NULL, *var = NULL;
+	struct sensor_info *sensor = get_sensor(sensor_id);
+	int ret = 0;
+
+	if (!sensor)
+		return -ENODEV;
+
+	mutex_lock(&sensor->lock);
+	list_for_each_entry_safe(pos, var, &sensor->threshold_list, list) {
+		if (pos == threshold) {
+			pos->active = 0;
+			list_del_rcu(&pos->list);
+			break;
+		}
+	}
+
+	ret = __update_sensor_thresholds(sensor);
+	mutex_unlock(&sensor->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(sensor_cancel_trip);
+
+static int tz_notify_trip(enum thermal_trip_type type, int temp, void *data)
+{
+	struct thermal_zone_device *tz = (struct thermal_zone_device *)data;
+
+	pr_debug("sensor %d tripped: type %d temp %d\n",
+			tz->sensor.sensor_id, type, temp);
+
+	return 0;
+}
+
+static void get_trip_threshold(struct thermal_zone_device *tz, int trip,
+	struct sensor_threshold **threshold)
+{
+	enum thermal_trip_type type;
+
+	tz->ops->get_trip_type(tz, trip, &type);
+
+	if (type == THERMAL_TRIP_CONFIGURABLE_HI)
+		*threshold = &tz->tz_threshold[0];
+	else if (type == THERMAL_TRIP_CONFIGURABLE_LOW)
+		*threshold = &tz->tz_threshold[1];
+	else
+		*threshold = NULL;
+}
+
+int sensor_set_trip_temp(struct thermal_zone_device *tz,
+		int trip, long temp)
+{
+	int ret = 0;
+	struct sensor_threshold *threshold = NULL;
+
+	if (!tz->ops->get_trip_type)
+		return -EPERM;
+
+	get_trip_threshold(tz, trip, &threshold);
+	if (threshold) {
+		threshold->temp = temp;
+		ret = sensor_set_trip(tz->sensor.sensor_id, threshold);
+	} else {
+		ret = tz->ops->set_trip_temp(tz, trip, temp);
+	}
+
+	return ret;
+}
+
+int sensor_init(struct thermal_zone_device *tz)
+{
+	struct sensor_info *sensor = &tz->sensor;
+
+	sensor->sensor_id = tz->id;
+	sensor->tz = tz;
+	sensor->threshold_min = INT_MIN;
+	sensor->threshold_max = INT_MAX;
+	sensor->max_idx = -1;
+	sensor->min_idx = -1;
+	sensor->deregister_active = false;
+	mutex_init(&sensor->lock);
+	INIT_LIST_HEAD_RCU(&sensor->sensor_list);
+	INIT_LIST_HEAD_RCU(&sensor->threshold_list);
+	INIT_LIST_HEAD(&tz->tz_threshold[0].list);
+	INIT_LIST_HEAD(&tz->tz_threshold[1].list);
+	tz->tz_threshold[0].notify = tz_notify_trip;
+	tz->tz_threshold[0].data = tz;
+	tz->tz_threshold[0].trip = THERMAL_TRIP_CONFIGURABLE_HI;
+	tz->tz_threshold[1].notify = tz_notify_trip;
+	tz->tz_threshold[1].data = tz;
+	tz->tz_threshold[1].trip = THERMAL_TRIP_CONFIGURABLE_LOW;
+	list_add_rcu(&sensor->sensor_list, &sensor_info_list);
+	INIT_WORK(&sensor->work, sensor_update_work);
+	init_completion(&sensor->sysfs_notify_complete);
+	sensor->sysfs_notify_thread = kthread_run(sensor_sysfs_notify,
+						  &tz->sensor,
+						  "therm_core:notify%d",
+						  tz->id);
+	if (IS_ERR(sensor->sysfs_notify_thread))
+		pr_err("Failed to create notify thread %d", tz->id);
+
+
+	return 0;
+}
+
 static int get_idr(struct idr *idr, struct mutex *lock, int *id)
 {
 	int ret;
@@ -434,15 +836,19 @@
 	tz->ops->get_trip_temp(tz, trip, &trip_temp);
 
 	/* If we have not crossed the trip_temp, we do not care. */
-	if (trip_temp <= 0 || tz->temperature < trip_temp)
+	if (trip_type != THERMAL_TRIP_CRITICAL_LOW &&
+	    trip_type != THERMAL_TRIP_CONFIGURABLE_LOW) {
+		if (tz->temperature < trip_temp)
+			return;
+	} else
+		if (tz->temperature >= trip_temp)
 		return;
-
-	trace_thermal_zone_trip(tz, trip, trip_type);
 
 	if (tz->ops->notify)
 		tz->ops->notify(tz, trip, trip_type);
 
-	if (trip_type == THERMAL_TRIP_CRITICAL) {
+	if (trip_type == THERMAL_TRIP_CRITICAL ||
+	    trip_type == THERMAL_TRIP_CRITICAL_LOW) {
 		dev_emerg(&tz->device,
 			  "critical temperature reached(%d C),shutting down\n",
 			  tz->temperature / 1000);
@@ -460,7 +866,10 @@
 
 	tz->ops->get_trip_type(tz, trip, &type);
 
-	if (type == THERMAL_TRIP_CRITICAL || type == THERMAL_TRIP_HOT)
+	if (type == THERMAL_TRIP_CRITICAL || type == THERMAL_TRIP_HOT ||
+	    type == THERMAL_TRIP_CONFIGURABLE_HI ||
+	    type == THERMAL_TRIP_CONFIGURABLE_LOW ||
+	    type == THERMAL_TRIP_CRITICAL_LOW)
 		handle_critical_trips(tz, trip, type);
 	else
 		handle_non_critical_trips(tz, trip, type);
@@ -561,9 +970,6 @@
 {
 	int count;
 
-	if (atomic_read(&in_suspend))
-		return;
-
 	if (!tz->ops->get_temp)
 		return;
 
@@ -673,6 +1079,12 @@
 		return sprintf(buf, "critical\n");
 	case THERMAL_TRIP_HOT:
 		return sprintf(buf, "hot\n");
+	case THERMAL_TRIP_CONFIGURABLE_HI:
+		return sprintf(buf, "configurable_hi\n");
+	case THERMAL_TRIP_CONFIGURABLE_LOW:
+		return sprintf(buf, "configurable_low\n");
+	case THERMAL_TRIP_CRITICAL_LOW:
+		return sprintf(buf, "critical_low\n");
 	case THERMAL_TRIP_PASSIVE:
 		return sprintf(buf, "passive\n");
 	case THERMAL_TRIP_ACTIVE:
@@ -683,12 +1095,57 @@
 }
 
 static ssize_t
+trip_point_type_activate(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct thermal_zone_device *tz = to_thermal_zone(dev);
+	int trip, result = 0;
+	bool activate;
+	struct sensor_threshold *threshold = NULL;
+
+	if (!tz->ops->get_trip_type ||
+		!tz->ops->activate_trip_type) {
+		result = -EPERM;
+		goto trip_activate_exit;
+	}
+
+	if (!sscanf(attr->attr.name, "trip_point_%d_type", &trip)) {
+		result = -EINVAL;
+		goto trip_activate_exit;
+	}
+
+	if (!strcmp(buf, "enabled")) {
+		activate = true;
+	} else if (!strcmp(buf, "disabled")) {
+		activate = false;
+	} else {
+		result = -EINVAL;
+		goto trip_activate_exit;
+	}
+
+	get_trip_threshold(tz, trip, &threshold);
+	if (threshold)
+		result = sensor_activate_trip(tz->sensor.sensor_id,
+			threshold, activate);
+	else
+		result = tz->ops->activate_trip_type(tz, trip,
+			activate ? THERMAL_TRIP_ACTIVATION_ENABLED :
+			THERMAL_TRIP_ACTIVATION_DISABLED);
+
+trip_activate_exit:
+	if (result)
+		return result;
+
+	return count;
+}
+
+static ssize_t
 trip_point_temp_store(struct device *dev, struct device_attribute *attr,
 		     const char *buf, size_t count)
 {
 	struct thermal_zone_device *tz = to_thermal_zone(dev);
 	int trip, ret;
-	unsigned long temperature;
+	long temperature;
 
 	if (!tz->ops->set_trip_temp)
 		return -EPERM;
@@ -696,10 +1153,10 @@
 	if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip))
 		return -EINVAL;
 
-	if (kstrtoul(buf, 10, &temperature))
+	if (kstrtol(buf, 10, &temperature))
 		return -EINVAL;
 
-	ret = tz->ops->set_trip_temp(tz, trip, temperature);
+	ret = sensor_set_trip_temp(tz, trip, temperature);
 
 	return ret ? ret : count;
 }
@@ -719,7 +1176,6 @@
 		return -EINVAL;
 
 	ret = tz->ops->get_trip_temp(tz, trip, &temperature);
-
 	if (ret)
 		return ret;
 
@@ -1702,8 +2158,9 @@
 		sysfs_attr_init(&tz->trip_type_attrs[indx].attr.attr);
 		tz->trip_type_attrs[indx].attr.attr.name =
 						tz->trip_type_attrs[indx].name;
-		tz->trip_type_attrs[indx].attr.attr.mode = S_IRUGO;
+		tz->trip_type_attrs[indx].attr.attr.mode = S_IRUGO | S_IWUSR;
 		tz->trip_type_attrs[indx].attr.show = trip_point_type_show;
+		tz->trip_type_attrs[indx].attr.store = trip_point_type_activate;
 
 		device_create_file(&tz->device,
 				   &tz->trip_type_attrs[indx].attr);
@@ -1933,7 +2390,8 @@
 	}
 
 	mutex_lock(&thermal_list_lock);
-	list_add_tail(&tz->node, &thermal_tz_list);
+	list_add_tail_rcu(&tz->node, &thermal_tz_list);
+	sensor_init(tz);
 	mutex_unlock(&thermal_list_lock);
 
 	/* Bind cooling devices for this zone */
@@ -1980,7 +2438,7 @@
 		mutex_unlock(&thermal_list_lock);
 		return;
 	}
-	list_del(&tz->node);
+	list_del_rcu(&tz->node);
 
 	/* Unbind all cdevs associated with 'this' thermal zone */
 	list_for_each_entry(cdev, &thermal_cdev_list, node) {
@@ -2015,6 +2473,13 @@
 	thermal_set_governor(tz, NULL);
 
 	thermal_remove_hwmon_sysfs(tz);
+	flush_work(&tz->sensor.work);
+	tz->sensor.deregister_active = true;
+	complete(&tz->sensor.sysfs_notify_complete);
+	kthread_stop(tz->sensor.sysfs_notify_thread);
+	mutex_lock(&thermal_list_lock);
+	list_del_rcu(&tz->sensor.sensor_list);
+	mutex_unlock(&thermal_list_lock);
 	release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
 	idr_destroy(&tz->idr);
 	mutex_destroy(&tz->lock);
@@ -2041,13 +2506,13 @@
 	if (!name)
 		goto exit;
 
-	mutex_lock(&thermal_list_lock);
-	list_for_each_entry(pos, &thermal_tz_list, node)
+	rcu_read_lock();
+	list_for_each_entry_rcu(pos, &thermal_tz_list, node)
 		if (!strncasecmp(name, pos->type, THERMAL_NAME_LENGTH)) {
 			found++;
 			ref = pos;
 		}
-	mutex_unlock(&thermal_list_lock);
+	rcu_read_unlock();
 
 	/* nothing has been found, thus an error code for it */
 	if (found == 0)
@@ -2184,36 +2649,6 @@
 	thermal_gov_power_allocator_unregister();
 }
 
-static int thermal_pm_notify(struct notifier_block *nb,
-				unsigned long mode, void *_unused)
-{
-	struct thermal_zone_device *tz;
-
-	switch (mode) {
-	case PM_HIBERNATION_PREPARE:
-	case PM_RESTORE_PREPARE:
-	case PM_SUSPEND_PREPARE:
-		atomic_set(&in_suspend, 1);
-		break;
-	case PM_POST_HIBERNATION:
-	case PM_POST_RESTORE:
-	case PM_POST_SUSPEND:
-		atomic_set(&in_suspend, 0);
-		list_for_each_entry(tz, &thermal_tz_list, node) {
-			thermal_zone_device_reset(tz);
-			thermal_zone_device_update(tz);
-		}
-		break;
-	default:
-		break;
-	}
-	return 0;
-}
-
-static struct notifier_block thermal_pm_nb = {
-	.notifier_call = thermal_pm_notify,
-};
-
 static int __init thermal_init(void)
 {
 	int result;
@@ -2234,11 +2669,6 @@
 	if (result)
 		goto exit_netlink;
 
-	result = register_pm_notifier(&thermal_pm_nb);
-	if (result)
-		pr_warn("Thermal: Can not register suspend notifier, return %d\n",
-			result);
-
 	return 0;
 
 exit_netlink:
@@ -2258,7 +2688,6 @@
 
 static void __exit thermal_exit(void)
 {
-	unregister_pm_notifier(&thermal_pm_nb);
 	of_thermal_destroy_zones();
 	genetlink_exit();
 	class_unregister(&thermal_class);
diff -ruw linux-4.4.115/drivers/tty/serial/earlycon.c linux-4.4.115-fbx/drivers/tty/serial/earlycon.c
--- linux-4.4.115/drivers/tty/serial/earlycon.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/tty/serial/earlycon.c	2019-01-22 16:16:27.191279804 +0100
@@ -19,7 +19,6 @@
 #include <linux/io.h>
 #include <linux/serial_core.h>
 #include <linux/sizes.h>
-#include <linux/mod_devicetable.h>
 
 #ifdef CONFIG_FIX_EARLYCON_MEM
 #include <asm/fixmap.h>
@@ -37,13 +36,6 @@
 	.con = &early_con,
 };
 
-extern struct earlycon_id __earlycon_table[];
-static const struct earlycon_id __earlycon_table_sentinel
-	__used __section(__earlycon_table_end);
-
-static const struct of_device_id __earlycon_of_table_sentinel
-	__used __section(__earlycon_of_table_end);
-
 static void __iomem * __init earlycon_map(unsigned long paddr, size_t size)
 {
 	void __iomem *base;
@@ -159,7 +151,7 @@
 	if (early_con.flags & CON_ENABLED)
 		return -EALREADY;
 
-	for (match = __earlycon_table; match->name[0]; match++) {
+	for (match = __earlycon_table; match < __earlycon_table_end; match++) {
 		size_t len = strlen(match->name);
 
 		if (strncmp(buf, match->name, len))
diff -ruw linux-4.4.115/drivers/tty/serial/Kconfig linux-4.4.115-fbx/drivers/tty/serial/Kconfig
--- linux-4.4.115/drivers/tty/serial/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/tty/serial/Kconfig	2019-01-22 16:16:27.183279731 +0100
@@ -1054,6 +1054,18 @@
 	select SERIAL_CORE_CONSOLE
 	select SERIAL_EARLYCON
 
+config SERIAL_MSM_HS
+	tristate "MSM UART High Speed: Serial Driver"
+	depends on ARCH_QCOM
+	select SERIAL_CORE
+	help
+	  If you have a machine based on MSM family of SoCs, you
+	  can enable its onboard high speed serial port by enabling
+	  this option.
+
+	  Choose M here to compile it as a module. The module will be
+	  called msm_serial_hs.
+
 config SERIAL_VT8500
 	bool "VIA VT8500 on-chip serial port support"
 	depends on ARCH_VT8500
@@ -1408,6 +1420,14 @@
 	  (the system  console is the device which receives all kernel messages and
 	  warnings and which allows logins in single user mode).
 
+config SERIAL_MSM_SMD
+	bool "Enable tty device interface for some SMD ports"
+	default n
+	depends on MSM_SMD
+	help
+	  Enables userspace clients to read and write to some streaming SMD
+	  ports via tty device interface for MSM chipset.
+
 config SERIAL_MXS_AUART
 	depends on ARCH_MXS || COMPILE_TEST
 	tristate "MXS AUART support"
diff -ruw linux-4.4.115/drivers/tty/serial/Makefile linux-4.4.115-fbx/drivers/tty/serial/Makefile
--- linux-4.4.115/drivers/tty/serial/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/tty/serial/Makefile	2019-01-22 16:16:27.183279731 +0100
@@ -62,6 +62,7 @@
 obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o
 obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
 obj-$(CONFIG_SERIAL_MSM) += msm_serial.o
+obj-$(CONFIG_SERIAL_MSM_HS) += msm_serial_hs.o
 obj-$(CONFIG_SERIAL_NETX) += netx-serial.o
 obj-$(CONFIG_SERIAL_OF_PLATFORM) += of_serial.o
 obj-$(CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL) += nwpserial.o
@@ -79,6 +80,7 @@
 obj-$(CONFIG_SERIAL_VT8500) += vt8500_serial.o
 obj-$(CONFIG_SERIAL_IFX6X60)  	+= ifx6x60.o
 obj-$(CONFIG_SERIAL_PCH_UART)	+= pch_uart.o
+obj-$(CONFIG_SERIAL_MSM_SMD)	+= msm_smd_tty.o
 obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o
 obj-$(CONFIG_SERIAL_LANTIQ)	+= lantiq.o
 obj-$(CONFIG_SERIAL_XILINX_PS_UART) += xilinx_uartps.o
diff -ruw linux-4.4.115/drivers/tty/serial/msm_serial.c linux-4.4.115-fbx/drivers/tty/serial/msm_serial.c
--- linux-4.4.115/drivers/tty/serial/msm_serial.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/tty/serial/msm_serial.c	2019-10-29 09:26:24.941215880 +0100
@@ -19,28 +19,142 @@
 # define SUPPORT_SYSRQ
 #endif
 
+#include <linux/kernel.h>
 #include <linux/atomic.h>
 #include <linux/dma-mapping.h>
 #include <linux/dmaengine.h>
-#include <linux/hrtimer.h>
 #include <linux/module.h>
 #include <linux/io.h>
 #include <linux/ioport.h>
-#include <linux/irq.h>
+#include <linux/interrupt.h>
 #include <linux/init.h>
 #include <linux/console.h>
 #include <linux/tty.h>
 #include <linux/tty_flip.h>
 #include <linux/serial_core.h>
-#include <linux/serial.h>
 #include <linux/slab.h>
 #include <linux/clk.h>
 #include <linux/platform_device.h>
 #include <linux/delay.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/wait.h>
 
-#include "msm_serial.h"
+#define UART_MR1			0x0000
+
+#define UART_MR1_AUTO_RFR_LEVEL0	0x3F
+#define UART_MR1_AUTO_RFR_LEVEL1	0x3FF00
+#define UART_DM_MR1_AUTO_RFR_LEVEL1	0xFFFFFF00
+#define UART_MR1_RX_RDY_CTL		BIT(7)
+#define UART_MR1_CTS_CTL		BIT(6)
+
+#define UART_MR2			0x0004
+#define UART_MR2_ERROR_MODE		BIT(6)
+#define UART_MR2_BITS_PER_CHAR		0x30
+#define UART_MR2_BITS_PER_CHAR_5	(0x0 << 4)
+#define UART_MR2_BITS_PER_CHAR_6	(0x1 << 4)
+#define UART_MR2_BITS_PER_CHAR_7	(0x2 << 4)
+#define UART_MR2_BITS_PER_CHAR_8	(0x3 << 4)
+#define UART_MR2_STOP_BIT_LEN_ONE	(0x1 << 2)
+#define UART_MR2_STOP_BIT_LEN_TWO	(0x3 << 2)
+#define UART_MR2_PARITY_MODE_NONE	0x0
+#define UART_MR2_PARITY_MODE_ODD	0x1
+#define UART_MR2_PARITY_MODE_EVEN	0x2
+#define UART_MR2_PARITY_MODE_SPACE	0x3
+#define UART_MR2_PARITY_MODE		0x3
+
+#define UART_CSR			0x0008
+
+#define UART_TF				0x000C
+#define UARTDM_TF			0x0070
+
+#define UART_CR				0x0010
+#define UART_CR_CMD_NULL		(0 << 4)
+#define UART_CR_CMD_RESET_RX		(1 << 4)
+#define UART_CR_CMD_RESET_TX		(2 << 4)
+#define UART_CR_CMD_RESET_ERR		(3 << 4)
+#define UART_CR_CMD_RESET_BREAK_INT	(4 << 4)
+#define UART_CR_CMD_START_BREAK		(5 << 4)
+#define UART_CR_CMD_STOP_BREAK		(6 << 4)
+#define UART_CR_CMD_RESET_CTS		(7 << 4)
+#define UART_CR_CMD_RESET_STALE_INT	(8 << 4)
+#define UART_CR_CMD_PACKET_MODE		(9 << 4)
+#define UART_CR_CMD_MODE_RESET		(12 << 4)
+#define UART_CR_CMD_SET_RFR		(13 << 4)
+#define UART_CR_CMD_RESET_RFR		(14 << 4)
+#define UART_CR_CMD_PROTECTION_EN	(16 << 4)
+#define UART_CR_CMD_STALE_EVENT_DISABLE	(6 << 8)
+#define UART_CR_CMD_STALE_EVENT_ENABLE	(80 << 4)
+#define UART_CR_CMD_FORCE_STALE		(4 << 8)
+#define UART_CR_CMD_RESET_TX_READY	(3 << 8)
+#define UART_CR_TX_DISABLE		BIT(3)
+#define UART_CR_TX_ENABLE		BIT(2)
+#define UART_CR_RX_DISABLE		BIT(1)
+#define UART_CR_RX_ENABLE		BIT(0)
+#define UART_CR_CMD_RESET_RXBREAK_START	((1 << 11) | (2 << 4))
+
+#define UART_IMR			0x0014
+#define UART_IMR_TXLEV			BIT(0)
+#define UART_IMR_RXSTALE		BIT(3)
+#define UART_IMR_RXLEV			BIT(4)
+#define UART_IMR_DELTA_CTS		BIT(5)
+#define UART_IMR_CURRENT_CTS		BIT(6)
+#define UART_IMR_RXBREAK_START		BIT(10)
+
+#define UART_IPR_RXSTALE_LAST		0x20
+#define UART_IPR_STALE_LSB		0x1F
+#define UART_IPR_STALE_TIMEOUT_MSB	0x3FF80
+#define UART_DM_IPR_STALE_TIMEOUT_MSB	0xFFFFFF80
+
+#define UART_IPR			0x0018
+#define UART_TFWR			0x001C
+#define UART_RFWR			0x0020
+#define UART_HCR			0x0024
+
+#define UART_MREG			0x0028
+#define UART_NREG			0x002C
+#define UART_DREG			0x0030
+#define UART_MNDREG			0x0034
+#define UART_IRDA			0x0038
+#define UART_MISR_MODE			0x0040
+#define UART_MISR_RESET			0x0044
+#define UART_MISR_EXPORT		0x0048
+#define UART_MISR_VAL			0x004C
+#define UART_TEST_CTRL			0x0050
+
+#define UART_SR				0x0008
+#define UART_SR_HUNT_CHAR		BIT(7)
+#define UART_SR_RX_BREAK		BIT(6)
+#define UART_SR_PAR_FRAME_ERR		BIT(5)
+#define UART_SR_OVERRUN			BIT(4)
+#define UART_SR_TX_EMPTY		BIT(3)
+#define UART_SR_TX_READY		BIT(2)
+#define UART_SR_RX_FULL			BIT(1)
+#define UART_SR_RX_READY		BIT(0)
+
+#define UART_RF				0x000C
+#define UARTDM_RF			0x0070
+#define UART_MISR			0x0010
+#define UART_ISR			0x0014
+#define UART_ISR_TX_READY		BIT(7)
+
+#define UARTDM_RXFS			0x50
+#define UARTDM_RXFS_BUF_SHIFT		0x7
+#define UARTDM_RXFS_BUF_MASK		0x7
+
+#define UARTDM_DMEN			0x3C
+#define UARTDM_DMEN_RX_SC_ENABLE	BIT(5)
+#define UARTDM_DMEN_TX_SC_ENABLE	BIT(4)
+
+#define UARTDM_DMEN_TX_BAM_ENABLE	BIT(2)	/* UARTDM_1P4 */
+#define UARTDM_DMEN_TX_DM_ENABLE	BIT(0)	/* < UARTDM_1P4 */
+
+#define UARTDM_DMEN_RX_BAM_ENABLE	BIT(3)	/* UARTDM_1P4 */
+#define UARTDM_DMEN_RX_DM_ENABLE	BIT(1)	/* < UARTDM_1P4 */
+
+#define UARTDM_DMRX			0x34
+#define UARTDM_NCF_TX			0x40
+#define UARTDM_RX_TOTAL_SNAP		0x38
 
 #define UARTDM_BURST_SIZE	16   /* in bytes */
 #define UARTDM_TX_AIGN(x)	((x) & ~0x3) /* valid for > 1p3 */
@@ -78,10 +192,65 @@
 	struct msm_dma		rx_dma;
 };
 
+#define UART_TO_MSM(uart_port)	container_of(uart_port, struct msm_port, uart)
+
+static
+void msm_write(struct uart_port *port, unsigned int val, unsigned int off)
+{
+	writel_relaxed_no_log(val, port->membase + off);
+}
+
+static
+unsigned int msm_read(struct uart_port *port, unsigned int off)
+{
+	return readl_relaxed_no_log(port->membase + off);
+}
+
+/*
+ * Setup the MND registers to use the TCXO clock.
+ */
+static void msm_serial_set_mnd_regs_tcxo(struct uart_port *port)
+{
+	msm_write(port, 0x06, UART_MREG);
+	msm_write(port, 0xF1, UART_NREG);
+	msm_write(port, 0x0F, UART_DREG);
+	msm_write(port, 0x1A, UART_MNDREG);
+	port->uartclk = 1843200;
+}
+
+/*
+ * Setup the MND registers to use the TCXO clock divided by 4.
+ */
+static void msm_serial_set_mnd_regs_tcxoby4(struct uart_port *port)
+{
+	msm_write(port, 0x18, UART_MREG);
+	msm_write(port, 0xF6, UART_NREG);
+	msm_write(port, 0x0F, UART_DREG);
+	msm_write(port, 0x0A, UART_MNDREG);
+	port->uartclk = 1843200;
+}
+
+static void msm_serial_set_mnd_regs(struct uart_port *port)
+{
+	struct msm_port *msm_port = UART_TO_MSM(port);
+
+	/*
+	 * These registers don't exist so we change the clk input rate
+	 * on uartdm hardware instead
+	 */
+	if (msm_port->is_uartdm)
+		return;
+
+	if (port->uartclk == 19200000)
+		msm_serial_set_mnd_regs_tcxo(port);
+	else if (port->uartclk == 4800000)
+		msm_serial_set_mnd_regs_tcxoby4(port);
+}
+
 static void msm_handle_tx(struct uart_port *port);
 static void msm_start_rx_dma(struct msm_port *msm_port);
 
-void msm_stop_dma(struct uart_port *port, struct msm_dma *dma)
+static void msm_stop_dma(struct uart_port *port, struct msm_dma *dma)
 {
 	struct device *dev = port->dev;
 	unsigned int mapped;
@@ -134,15 +303,17 @@
 	struct device *dev = msm_port->uart.dev;
 	struct dma_slave_config conf;
 	struct msm_dma *dma;
+	struct dma_chan *dma_chan;
 	u32 crci = 0;
 	int ret;
 
 	dma = &msm_port->tx_dma;
 
 	/* allocate DMA resources, if available */
-	dma->chan = dma_request_slave_channel_reason(dev, "tx");
-	if (IS_ERR(dma->chan))
+	dma_chan = dma_request_slave_channel_reason(dev, "tx");
+	if (IS_ERR(dma_chan))
 		goto no_tx;
+	dma->chan = dma_chan;
 
 	of_property_read_u32(dev->of_node, "qcom,tx-crci", &crci);
 
@@ -177,15 +348,17 @@
 	struct device *dev = msm_port->uart.dev;
 	struct dma_slave_config conf;
 	struct msm_dma *dma;
+	struct dma_chan *dma_chan;
 	u32 crci = 0;
 	int ret;
 
 	dma = &msm_port->rx_dma;
 
 	/* allocate DMA resources, if available */
-	dma->chan = dma_request_slave_channel_reason(dev, "rx");
-	if (IS_ERR(dma->chan))
+	dma_chan = dma_request_slave_channel_reason(dev, "rx");
+	if (IS_ERR(dma_chan))
 		goto no_rx;
+	dma->chan = dma_chan;
 
 	of_property_read_u32(dev->of_node, "qcom,rx-crci", &crci);
 
@@ -388,10 +561,6 @@
 	val &= ~dma->enable_bit;
 	msm_write(port, val, UARTDM_DMEN);
 
-	/* Restore interrupts */
-	msm_port->imr |= UART_IMR_RXLEV | UART_IMR_RXSTALE;
-	msm_write(port, msm_port->imr, UART_IMR);
-
 	if (msm_read(port, UART_SR) & UART_SR_OVERRUN) {
 		port->icount.overrun++;
 		tty_insert_flip_char(tport, 0, TTY_OVERRUN);
@@ -861,37 +1030,72 @@
 };
 
 static const struct msm_baud_map *
-msm_find_best_baud(struct uart_port *port, unsigned int baud)
+msm_find_best_baud(struct uart_port *port, unsigned int baud,
+		   unsigned long *rate)
 {
-	unsigned int i, divisor;
-	const struct msm_baud_map *entry;
+	struct msm_port *msm_port = UART_TO_MSM(port);
+	unsigned int divisor, result;
+	unsigned long target, old, best_rate = 0, diff, best_diff = ULONG_MAX;
+	const struct msm_baud_map *entry, *end, *best;
 	static const struct msm_baud_map table[] = {
-		{ 1536, 0x00,  1 },
-		{  768, 0x11,  1 },
-		{  384, 0x22,  1 },
-		{  192, 0x33,  1 },
-		{   96, 0x44,  1 },
-		{   48, 0x55,  1 },
-		{   32, 0x66,  1 },
-		{   24, 0x77,  1 },
-		{   16, 0x88,  1 },
-		{   12, 0x99,  6 },
-		{    8, 0xaa,  6 },
-		{    6, 0xbb,  6 },
-		{    4, 0xcc,  6 },
-		{    3, 0xdd,  8 },
-		{    2, 0xee, 16 },
 		{    1, 0xff, 31 },
-		{    0, 0xff, 31 },
+		{    2, 0xee, 16 },
+		{    3, 0xdd,  8 },
+		{    4, 0xcc,  6 },
+		{    6, 0xbb,  6 },
+		{    8, 0xaa,  6 },
+		{   12, 0x99,  6 },
+		{   16, 0x88,  1 },
+		{   24, 0x77,  1 },
+		{   32, 0x66,  1 },
+		{   48, 0x55,  1 },
+		{   96, 0x44,  1 },
+		{  192, 0x33,  1 },
+		{  384, 0x22,  1 },
+		{  768, 0x11,  1 },
+		{ 1536, 0x00,  1 },
 	};
 
-	divisor = uart_get_divisor(port, baud);
+	best = table; /* Default to smallest divider */
+	target = clk_round_rate(msm_port->clk, 16 * baud);
+	divisor = DIV_ROUND_CLOSEST(target, 16 * baud);
+
+	end = table + ARRAY_SIZE(table);
+	entry = table;
+	while (entry < end) {
+		if (entry->divisor <= divisor) {
+			result = target / entry->divisor / 16;
+			diff = abs(result - baud);
+
+			/* Keep track of best entry */
+			if (diff < best_diff) {
+				best_diff = diff;
+				best = entry;
+				best_rate = target;
+			}
 
-	for (i = 0, entry = table; i < ARRAY_SIZE(table); i++, entry++)
-		if (entry->divisor <= divisor)
+			if (result == baud)
+				break;
+		} else if (entry->divisor > divisor) {
+			old = target;
+			target = clk_round_rate(msm_port->clk, old + 1);
+			/*
+			 * The rate didn't get any faster so we can't do
+			 * better at dividing it down
+			 */
+			if (target == old)
 			break;
 
-	return entry; /* Default to smallest divider */
+			/* Start the divisor search over at this new rate */
+			entry = table;
+			divisor = DIV_ROUND_CLOSEST(target, 16 * baud);
+			continue;
+		}
+		entry++;
+	}
+
+	*rate = best_rate;
+	return best;
 }
 
 static int msm_set_baud_rate(struct uart_port *port, unsigned int baud,
@@ -900,22 +1104,20 @@
 	unsigned int rxstale, watermark, mask;
 	struct msm_port *msm_port = UART_TO_MSM(port);
 	const struct msm_baud_map *entry;
-	unsigned long flags;
-
-	entry = msm_find_best_baud(port, baud);
-
-	msm_write(port, entry->code, UART_CSR);
-
-	if (baud > 460800)
-		port->uartclk = baud * 16;
+	unsigned long flags, rate;
 
 	flags = *saved_flags;
 	spin_unlock_irqrestore(&port->lock, flags);
 
-	clk_set_rate(msm_port->clk, port->uartclk);
+	entry = msm_find_best_baud(port, baud, &rate);
+	clk_set_rate(msm_port->clk, rate);
+	baud = rate / 16 / entry->divisor;
 
 	spin_lock_irqsave(&port->lock, flags);
 	*saved_flags = flags;
+	port->uartclk = rate;
+
+	msm_write(port, entry->code, UART_CSR);
 
 	/* RX stale watermark */
 	rxstale = entry->rxstale;
@@ -959,15 +1161,6 @@
 	return baud;
 }
 
-static void msm_init_clock(struct uart_port *port)
-{
-	struct msm_port *msm_port = UART_TO_MSM(port);
-
-	clk_prepare_enable(msm_port->clk);
-	clk_prepare_enable(msm_port->pclk);
-	msm_serial_set_mnd_regs(port);
-}
-
 static int msm_startup(struct uart_port *port)
 {
 	struct msm_port *msm_port = UART_TO_MSM(port);
@@ -977,12 +1170,19 @@
 	snprintf(msm_port->name, sizeof(msm_port->name),
 		 "msm_serial%d", port->line);
 
-	ret = request_irq(port->irq, msm_uart_irq, IRQF_TRIGGER_HIGH,
-			  msm_port->name, port);
-	if (unlikely(ret))
+	/*
+	 * UART clk must be kept enabled to
+	 * avoid losing received character
+	 */
+	ret = clk_prepare_enable(msm_port->clk);
+	if (ret)
 		return ret;
 
-	msm_init_clock(port);
+	ret = clk_prepare_enable(msm_port->pclk);
+	if (ret)
+		goto err_pclk;
+
+	msm_serial_set_mnd_regs(port);
 
 	if (likely(port->fifosize > 12))
 		rfr_level = port->fifosize - 12;
@@ -1008,7 +1208,23 @@
 		msm_request_rx_dma(msm_port, msm_port->uart.mapbase);
 	}
 
+	ret = request_irq(port->irq, msm_uart_irq, IRQF_TRIGGER_HIGH,
+			  msm_port->name, port);
+	if (unlikely(ret))
+		goto err_irq;
+
 	return 0;
+
+err_irq:
+	if (msm_port->is_uartdm)
+		msm_release_dma(msm_port);
+
+	clk_disable_unprepare(msm_port->pclk);
+
+err_pclk:
+	clk_disable_unprepare(msm_port->clk);
+
+	return ret;
 }
 
 static void msm_shutdown(struct uart_port *port)
@@ -1021,6 +1237,7 @@
 	if (msm_port->is_uartdm)
 		msm_release_dma(msm_port);
 
+	clk_disable_unprepare(msm_port->pclk);
 	clk_disable_unprepare(msm_port->clk);
 
 	free_irq(port->irq, port);
@@ -1187,8 +1404,16 @@
 
 	switch (state) {
 	case 0:
-		clk_prepare_enable(msm_port->clk);
-		clk_prepare_enable(msm_port->pclk);
+		/*
+		 * UART clk must be kept enabled to
+		 * avoid losing received character
+		 */
+		if (clk_prepare_enable(msm_port->clk))
+			return;
+		if (clk_prepare_enable(msm_port->pclk)) {
+			clk_disable_unprepare(msm_port->clk);
+			return;
+		}
 		break;
 	case 3:
 		clk_disable_unprepare(msm_port->clk);
@@ -1391,6 +1616,7 @@
 		int j;
 		unsigned int num_chars;
 		char buf[4] = { 0 };
+		const u32 *buffer;
 
 		if (is_uartdm)
 			num_chars = min(count - i, (unsigned int)sizeof(buf));
@@ -1415,7 +1641,8 @@
 		while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
 			cpu_relax();
 
-		iowrite32_rep(tf, buf, 1);
+		buffer = (const u32 *)buf;
+		writel_relaxed_no_log(*buffer, tf);
 		i += num_chars;
 	}
 	spin_unlock(&port->lock);
@@ -1451,7 +1678,7 @@
 	if (unlikely(!port->membase))
 		return -ENXIO;
 
-	msm_init_clock(port);
+	msm_serial_set_mnd_regs(port);
 
 	if (options)
 		uart_parse_options(options, &baud, &parity, &bits, &flow);
@@ -1478,7 +1705,6 @@
 	device->con->write = msm_serial_early_write;
 	return 0;
 }
-EARLYCON_DECLARE(msm_serial, msm_serial_early_console_setup);
 OF_EARLYCON_DECLARE(msm_serial, "qcom,msm-uart",
 		    msm_serial_early_console_setup);
 
@@ -1500,7 +1726,6 @@
 	device->con->write = msm_serial_early_write_dm;
 	return 0;
 }
-EARLYCON_DECLARE(msm_serial_dm, msm_serial_early_console_setup_dm);
 OF_EARLYCON_DECLARE(msm_serial_dm, "qcom,msm-uartdm",
 		    msm_serial_early_console_setup_dm);
 
@@ -1579,8 +1804,6 @@
 		msm_port->pclk = devm_clk_get(&pdev->dev, "iface");
 		if (IS_ERR(msm_port->pclk))
 			return PTR_ERR(msm_port->pclk);
-
-		clk_set_rate(msm_port->clk, 1843200);
 	}
 
 	port->uartclk = clk_get_rate(msm_port->clk);
@@ -1617,12 +1840,37 @@
 };
 MODULE_DEVICE_TABLE(of, msm_match_table);
 
+#ifdef CONFIG_PM_SLEEP
+static int msm_serial_suspend(struct device *dev)
+{
+	struct uart_port *port = dev_get_drvdata(dev);
+
+	uart_suspend_port(&msm_uart_driver, port);
+
+	return 0;
+}
+
+static int msm_serial_resume(struct device *dev)
+{
+	struct uart_port *port = dev_get_drvdata(dev);
+
+	uart_resume_port(&msm_uart_driver, port);
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops msm_serial_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(msm_serial_suspend, msm_serial_resume)
+};
+
 static struct platform_driver msm_platform_driver = {
 	.remove = msm_serial_remove,
 	.probe = msm_serial_probe,
 	.driver = {
 		.name = "msm_serial",
 		.of_match_table = msm_match_table,
+		.pm = &msm_serial_pm_ops,
 	},
 };
 
diff -ruw linux-4.4.115/drivers/tty/serial/serial_core.c linux-4.4.115-fbx/drivers/tty/serial/serial_core.c
--- linux-4.4.115/drivers/tty/serial/serial_core.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/tty/serial/serial_core.c	2019-01-22 16:16:27.207279948 +0100
@@ -95,6 +95,9 @@
 	struct uart_state *state = tty->driver_data;
 	struct uart_port *port = state->uart_port;
 
+	if (port->ops->wake_peer)
+		port->ops->wake_peer(port);
+
 	if (!uart_tx_stopped(port))
 		port->ops->start_tx(port);
 }
diff -ruw linux-4.4.115/drivers/tty/sysrq.c linux-4.4.115-fbx/drivers/tty/sysrq.c
--- linux-4.4.115/drivers/tty/sysrq.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/tty/sysrq.c	2019-01-22 16:16:27.219280057 +0100
@@ -55,10 +55,11 @@
 static int __read_mostly sysrq_enabled = CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE;
 static bool __read_mostly sysrq_always_enabled;
 
-static bool sysrq_on(void)
+bool sysrq_on(void)
 {
 	return sysrq_enabled || sysrq_always_enabled;
 }
+EXPORT_SYMBOL(sysrq_on);
 
 /*
  * A value of 1 means 'all', other nonzero values are an op mask:
diff -ruw linux-4.4.115/drivers/uio/Kconfig linux-4.4.115-fbx/drivers/uio/Kconfig
--- linux-4.4.115/drivers/uio/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/uio/Kconfig	2019-01-22 16:16:27.223280093 +0100
@@ -155,4 +155,11 @@
 
 	  If you compile this as a module, it will be called uio_mf624.
 
+config UIO_MSM_SHAREDMEM
+	bool "MSM shared memory driver"
+	default n
+	help
+	  Provides the clients with their respective alloted shared memory
+	  addresses which are used as transport buffer.
+
 endif
diff -ruw linux-4.4.115/drivers/uio/Makefile linux-4.4.115-fbx/drivers/uio/Makefile
--- linux-4.4.115/drivers/uio/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/uio/Makefile	2019-01-22 16:16:27.223280093 +0100
@@ -9,3 +9,4 @@
 obj-$(CONFIG_UIO_PRUSS)         += uio_pruss.o
 obj-$(CONFIG_UIO_MF624)         += uio_mf624.o
 obj-$(CONFIG_UIO_FSL_ELBC_GPCM)	+= uio_fsl_elbc_gpcm.o
+obj-$(CONFIG_UIO_MSM_SHAREDMEM) += msm_sharedmem/
diff -ruw linux-4.4.115/drivers/usb/common/common.c linux-4.4.115-fbx/drivers/usb/common/common.c
--- linux-4.4.115/drivers/usb/common/common.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/common/common.c	2019-01-22 16:16:27.235280202 +0100
@@ -34,6 +34,7 @@
 		[OTG_STATE_B_PERIPHERAL] = "b_peripheral",
 		[OTG_STATE_B_WAIT_ACON] = "b_wait_acon",
 		[OTG_STATE_B_HOST] = "b_host",
+		[OTG_STATE_B_SUSPEND] = "b_suspend",
 	};
 
 	if (state < 0 || state >= ARRAY_SIZE(names))
diff -ruw linux-4.4.115/drivers/usb/core/config.c linux-4.4.115-fbx/drivers/usb/core/config.c
--- linux-4.4.115/drivers/usb/core/config.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/core/config.c	2019-10-29 09:26:24.965216115 +0100
@@ -732,18 +732,21 @@
 		return;
 
 	if (dev->rawdescriptors) {
-		for (i = 0; i < dev->descriptor.bNumConfigurations; i++)
+		for (i = 0; i < dev->descriptor.bNumConfigurations &&
+				i < USB_MAXCONFIG; i++)
 			kfree(dev->rawdescriptors[i]);
 
 		kfree(dev->rawdescriptors);
 		dev->rawdescriptors = NULL;
 	}
 
-	for (c = 0; c < dev->descriptor.bNumConfigurations; c++) {
+	for (c = 0; c < dev->descriptor.bNumConfigurations &&
+			c < USB_MAXCONFIG; c++) {
 		struct usb_host_config *cf = &dev->config[c];
 
 		kfree(cf->string);
-		for (i = 0; i < cf->desc.bNumInterfaces; i++) {
+		for (i = 0; i < cf->desc.bNumInterfaces &&
+				i < USB_MAXINTERFACES; i++) {
 			if (cf->intf_cache[i])
 				kref_put(&cf->intf_cache[i]->ref,
 					  usb_release_interface_cache);
@@ -984,6 +987,15 @@
 		case USB_PTM_CAP_TYPE:
 			dev->bos->ptm_cap =
 				(struct usb_ptm_cap_descriptor *)buffer;
+			break;
+		case USB_CAP_TYPE_CONFIG_SUMMARY:
+			/* one such desc per configuration */
+			if (!dev->bos->num_config_summary_desc)
+				dev->bos->config_summary =
+				(struct usb_config_summary_descriptor *)buffer;
+
+			dev->bos->num_config_summary_desc++;
+			break;
 		default:
 			break;
 		}
diff -ruw linux-4.4.115/drivers/usb/core/driver.c linux-4.4.115-fbx/drivers/usb/core/driver.c
--- linux-4.4.115/drivers/usb/core/driver.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/core/driver.c	2019-10-29 09:26:24.965216115 +0100
@@ -1459,6 +1459,9 @@
 {
 	struct usb_device	*udev = to_usb_device(dev);
 
+	if (udev->bus->skip_resume && udev->state == USB_STATE_SUSPENDED)
+		return 0;
+
 	unbind_no_pm_drivers_interfaces(udev);
 
 	/* From now on we are sure all drivers support suspend/resume
@@ -1488,6 +1491,15 @@
 	struct usb_device	*udev = to_usb_device(dev);
 	int			status;
 
+	/*
+	 * Some buses would like to keep their devices in suspend
+	 * state after system resume.  Their resume happen when
+	 * a remote wakeup is detected or interface driver start
+	 * I/O.
+	 */
+	if (udev->bus->skip_resume)
+		return 0;
+
 	/* For all calls, take the device back to full power and
 	 * tell the PM core in case it was autosuspended previously.
 	 * Unbind the interfaces that will need rebinding later,
diff -ruw linux-4.4.115/drivers/usb/core/generic.c linux-4.4.115-fbx/drivers/usb/core/generic.c
--- linux-4.4.115/drivers/usb/core/generic.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/core/generic.c	2019-10-29 09:26:24.969216154 +0100
@@ -19,6 +19,8 @@
 
 #include <linux/usb.h>
 #include <linux/usb/hcd.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/audio-v3.h>
 #include "usb.h"
 
 static inline const char *plural(int n)
@@ -40,6 +42,36 @@
 		&& desc->bInterfaceProtocol == 1;
 }
 
+static int usb_audio_max_rev_config(struct usb_host_bos *bos)
+{
+	int desc_cnt, func_cnt, numfunc;
+	int num_cfg_desc;
+	struct usb_config_summary_descriptor *conf_summary;
+
+	if (!bos || !bos->config_summary)
+		goto done;
+
+	conf_summary = bos->config_summary;
+	num_cfg_desc = bos->num_config_summary_desc;
+
+	for (desc_cnt = 0; desc_cnt < num_cfg_desc; desc_cnt++) {
+		numfunc = conf_summary->bNumFunctions;
+		for (func_cnt = 0; func_cnt < numfunc; func_cnt++) {
+			/* look for BADD 3.0 */
+			if (conf_summary->cs_info[func_cnt].bClass ==
+				USB_CLASS_AUDIO &&
+				conf_summary->cs_info[func_cnt].bProtocol ==
+				UAC_VERSION_3 &&
+				conf_summary->cs_info[func_cnt].bSubClass !=
+				FULL_ADC_PROFILE)
+				return conf_summary->bConfigurationValue;
+		}
+	}
+
+done:
+	return -EINVAL;
+}
+
 int usb_choose_configuration(struct usb_device *udev)
 {
 	int i;
@@ -130,7 +162,6 @@
 			best = c;
 			break;
 		}
-
 		/* If all the remaining configs are vendor-specific,
 		 * choose the first one. */
 		else if (!best)
@@ -143,6 +174,9 @@
 			insufficient_power, plural(insufficient_power));
 
 	if (best) {
+		/* choose usb audio class preferred config if available */
+		i = usb_audio_max_rev_config(udev->bos);
+		if (i < 0)
 		i = best->desc.bConfigurationValue;
 		dev_dbg(&udev->dev,
 			"configuration #%d chosen from %d choice%s\n",
diff -ruw linux-4.4.115/drivers/usb/core/hcd.c linux-4.4.115-fbx/drivers/usb/core/hcd.c
--- linux-4.4.115/drivers/usb/core/hcd.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/core/hcd.c	2019-10-29 09:26:24.969216154 +0100
@@ -2205,8 +2205,64 @@
 	return hcd->driver->get_frame_number (hcd);
 }
 
+int usb_hcd_sec_event_ring_setup(struct usb_device *udev,
+	unsigned intr_num)
+{
+	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);
+
+	if (!HCD_RH_RUNNING(hcd))
+		return 0;
+
+	return hcd->driver->sec_event_ring_setup(hcd, intr_num);
+}
+
+int usb_hcd_sec_event_ring_cleanup(struct usb_device *udev,
+	unsigned intr_num)
+{
+	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);
+
+	if (!HCD_RH_RUNNING(hcd))
+		return 0;
+
+	return hcd->driver->sec_event_ring_cleanup(hcd, intr_num);
+}
+
 /*-------------------------------------------------------------------------*/
 
+dma_addr_t
+usb_hcd_get_sec_event_ring_dma_addr(struct usb_device *udev,
+	unsigned intr_num)
+{
+	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);
+
+	if (!HCD_RH_RUNNING(hcd))
+		return 0;
+
+	return hcd->driver->get_sec_event_ring_dma_addr(hcd, intr_num);
+}
+
+dma_addr_t
+usb_hcd_get_xfer_ring_dma_addr(struct usb_device *udev,
+		struct usb_host_endpoint *ep)
+{
+	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);
+
+	if (!HCD_RH_RUNNING(hcd))
+		return 0;
+
+	return hcd->driver->get_xfer_ring_dma_addr(hcd, udev, ep);
+}
+
+int usb_hcd_get_controller_id(struct usb_device *udev)
+{
+	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);
+
+	if (!HCD_RH_RUNNING(hcd))
+		return -EINVAL;
+
+	return hcd->driver->get_core_id(hcd);
+}
+
 #ifdef	CONFIG_PM
 
 int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg)
@@ -2461,6 +2517,7 @@
 	}
 	spin_unlock_irqrestore (&hcd_root_hub_lock, flags);
 	/* Make sure that the other roothub is also deallocated. */
+	usb_atomic_notify_dead_bus(&hcd->self);
 }
 EXPORT_SYMBOL_GPL (usb_hc_died);
 
@@ -2946,6 +3003,9 @@
 	cancel_work_sync(&hcd->wakeup_work);
 #endif
 
+	/* handle any pending hub events before XHCI stops */
+	usb_flush_hub_wq();
+
 	mutex_lock(&usb_bus_list_lock);
 	usb_disconnect(&rhdev);		/* Sets rhdev to NULL */
 	mutex_unlock(&usb_bus_list_lock);
diff -ruw linux-4.4.115/drivers/usb/core/hub.c linux-4.4.115-fbx/drivers/usb/core/hub.c
--- linux-4.4.115/drivers/usb/core/hub.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/core/hub.c	2019-10-29 09:26:24.969216154 +0100
@@ -48,6 +48,11 @@
 /* synchronize hub-port add/remove and peering operations */
 DEFINE_MUTEX(usb_port_peer_mutex);
 
+static bool skip_extended_resume_delay = 1;
+module_param(skip_extended_resume_delay, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(skip_extended_resume_delay,
+		"removes extra delay added to finish bus resume");
+
 /* cycle leds on hubs that aren't blinking for attention */
 static bool blinkenlights = 0;
 module_param(blinkenlights, bool, S_IRUGO);
@@ -620,6 +625,12 @@
 		kick_hub_wq(hub);
 }
 
+void usb_flush_hub_wq(void)
+{
+	flush_workqueue(hub_wq);
+}
+EXPORT_SYMBOL(usb_flush_hub_wq);
+
 /*
  * Let the USB core know that a USB 3.0 device has sent a Function Wake Device
  * Notification, which indicates it had initiated remote wakeup.
@@ -3376,7 +3387,9 @@
 		/* drive resume for USB_RESUME_TIMEOUT msec */
 		dev_dbg(&udev->dev, "usb %sresume\n",
 				(PMSG_IS_AUTO(msg) ? "auto-" : ""));
-		msleep(USB_RESUME_TIMEOUT);
+		if (!skip_extended_resume_delay)
+			usleep_range(USB_RESUME_TIMEOUT * 1000,
+					(USB_RESUME_TIMEOUT + 1) * 1000);
 
 		/* Virtual root hubs can trigger on GET_PORT_STATUS to
 		 * stop resume signaling.  Then finish the resume
@@ -3385,7 +3398,7 @@
 		status = hub_port_status(hub, port1, &portstatus, &portchange);
 
 		/* TRSMRCY = 10 msec */
-		msleep(10);
+		usleep_range(10000, 10500);
 	}
 
  SuspendCleared:
@@ -4281,6 +4294,8 @@
 	enum usb_device_speed	oldspeed = udev->speed;
 	const char		*speed;
 	int			devnum = udev->devnum;
+	char			*error_event[] = {
+				"USB_DEVICE_ERROR=Device_No_Response", NULL };
 
 	/* root hub ports have a slightly longer reset period
 	 * (from USB 2.0 spec, section 7.1.7.5)
@@ -4454,6 +4469,8 @@
 				if (r != -ENODEV)
 					dev_err(&udev->dev, "device descriptor read/64, error %d\n",
 							r);
+				kobject_uevent_env(&udev->parent->dev.kobj,
+						KOBJ_CHANGE, error_event);
 				retval = -EMSGSIZE;
 				continue;
 			}
@@ -4506,6 +4523,8 @@
 				dev_err(&udev->dev,
 					"device descriptor read/8, error %d\n",
 					retval);
+			kobject_uevent_env(&udev->parent->dev.kobj,
+						KOBJ_CHANGE, error_event);
 			if (retval >= 0)
 				retval = -EMSGSIZE;
 		} else {
diff -ruw linux-4.4.115/drivers/usb/core/message.c linux-4.4.115-fbx/drivers/usb/core/message.c
--- linux-4.4.115/drivers/usb/core/message.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/core/message.c	2019-10-29 09:26:24.973216194 +0100
@@ -12,6 +12,7 @@
 #include <linux/nls.h>
 #include <linux/device.h>
 #include <linux/scatterlist.h>
+#include <linux/usb/cdc.h>
 #include <linux/usb/quirks.h>
 #include <linux/usb/hcd.h>	/* for usbcore internals */
 #include <asm/byteorder.h>
@@ -2027,3 +2028,159 @@
 	return 0;
 }
 EXPORT_SYMBOL_GPL(usb_driver_set_configuration);
+
+/**
+ * cdc_parse_cdc_header - parse the extra headers present in CDC devices
+ * @hdr: the place to put the results of the parsing
+ * @intf: the interface for which parsing is requested
+ * @buffer: pointer to the extra headers to be parsed
+ * @buflen: length of the extra headers
+ *
+ * This evaluates the extra headers present in CDC devices which
+ * bind the interfaces for data and control and provide details
+ * about the capabilities of the device.
+ *
+ * Return: number of descriptors parsed or -EINVAL
+ * if the header is contradictory beyond salvage
+ */
+
+int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
+				struct usb_interface *intf,
+				u8 *buffer,
+				int buflen)
+{
+	/* duplicates are ignored */
+	struct usb_cdc_union_desc *union_header = NULL;
+
+	/* duplicates are not tolerated */
+	struct usb_cdc_header_desc *header = NULL;
+	struct usb_cdc_ether_desc *ether = NULL;
+	struct usb_cdc_mdlm_detail_desc *detail = NULL;
+	struct usb_cdc_mdlm_desc *desc = NULL;
+
+	unsigned int elength;
+	int cnt = 0;
+
+	memset(hdr, 0x00, sizeof(struct usb_cdc_parsed_header));
+	hdr->phonet_magic_present = false;
+	while (buflen > 0) {
+		elength = buffer[0];
+		if (!elength) {
+			dev_err(&intf->dev, "skipping garbage byte\n");
+			elength = 1;
+			goto next_desc;
+		}
+		if ((buflen < elength) || (elength < 3)) {
+			dev_err(&intf->dev, "invalid descriptor buffer length\n");
+			break;
+		}
+		if (buffer[1] != USB_DT_CS_INTERFACE) {
+			dev_err(&intf->dev, "skipping garbage\n");
+			goto next_desc;
+		}
+
+		switch (buffer[2]) {
+		case USB_CDC_UNION_TYPE: /* we've found it */
+			if (elength < sizeof(struct usb_cdc_union_desc))
+				goto next_desc;
+			if (union_header) {
+				dev_err(&intf->dev, "More than one union descriptor, skipping ...\n");
+				goto next_desc;
+			}
+			union_header = (struct usb_cdc_union_desc *)buffer;
+			break;
+		case USB_CDC_COUNTRY_TYPE:
+			if (elength < sizeof(struct usb_cdc_country_functional_desc))
+				goto next_desc;
+			hdr->usb_cdc_country_functional_desc =
+				(struct usb_cdc_country_functional_desc *)buffer;
+			break;
+		case USB_CDC_HEADER_TYPE:
+			if (elength != sizeof(struct usb_cdc_header_desc))
+				goto next_desc;
+			if (header)
+				return -EINVAL;
+			header = (struct usb_cdc_header_desc *)buffer;
+			break;
+		case USB_CDC_ACM_TYPE:
+			if (elength < sizeof(struct usb_cdc_acm_descriptor))
+				goto next_desc;
+			hdr->usb_cdc_acm_descriptor =
+				(struct usb_cdc_acm_descriptor *)buffer;
+			break;
+		case USB_CDC_ETHERNET_TYPE:
+			if (elength != sizeof(struct usb_cdc_ether_desc))
+				goto next_desc;
+			if (ether)
+				return -EINVAL;
+			ether = (struct usb_cdc_ether_desc *)buffer;
+			break;
+		case USB_CDC_CALL_MANAGEMENT_TYPE:
+			if (elength < sizeof(struct usb_cdc_call_mgmt_descriptor))
+				goto next_desc;
+			hdr->usb_cdc_call_mgmt_descriptor =
+				(struct usb_cdc_call_mgmt_descriptor *)buffer;
+			break;
+		case USB_CDC_DMM_TYPE:
+			if (elength < sizeof(struct usb_cdc_dmm_desc))
+				goto next_desc;
+			hdr->usb_cdc_dmm_desc =
+				(struct usb_cdc_dmm_desc *)buffer;
+			break;
+		case USB_CDC_MDLM_TYPE:
+			if (elength < sizeof(struct usb_cdc_mdlm_desc *))
+				goto next_desc;
+			if (desc)
+				return -EINVAL;
+			desc = (struct usb_cdc_mdlm_desc *)buffer;
+			break;
+		case USB_CDC_MDLM_DETAIL_TYPE:
+			if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *))
+				goto next_desc;
+			if (detail)
+				return -EINVAL;
+			detail = (struct usb_cdc_mdlm_detail_desc *)buffer;
+			break;
+		case USB_CDC_NCM_TYPE:
+			if (elength < sizeof(struct usb_cdc_ncm_desc))
+				goto next_desc;
+			hdr->usb_cdc_ncm_desc = (struct usb_cdc_ncm_desc *)buffer;
+			break;
+		case USB_CDC_MBIM_TYPE:
+			if (elength < sizeof(struct usb_cdc_mbim_desc))
+				goto next_desc;
+
+			hdr->usb_cdc_mbim_desc = (struct usb_cdc_mbim_desc *)buffer;
+			break;
+		case USB_CDC_MBIM_EXTENDED_TYPE:
+			if (elength < sizeof(struct usb_cdc_mbim_extended_desc))
+				break;
+			hdr->usb_cdc_mbim_extended_desc =
+				(struct usb_cdc_mbim_extended_desc *)buffer;
+			break;
+		case CDC_PHONET_MAGIC_NUMBER:
+			hdr->phonet_magic_present = true;
+			break;
+		default:
+			/*
+			 * there are LOTS more CDC descriptors that
+			 * could legitimately be found here.
+			 */
+			dev_dbg(&intf->dev, "Ignoring descriptor: type %02x, length %ud\n",
+					buffer[2], elength);
+			goto next_desc;
+		}
+		cnt++;
+next_desc:
+		buflen -= elength;
+		buffer += elength;
+	}
+	hdr->usb_cdc_union_desc = union_header;
+	hdr->usb_cdc_header_desc = header;
+	hdr->usb_cdc_mdlm_detail_desc = detail;
+	hdr->usb_cdc_mdlm_desc = desc;
+	hdr->usb_cdc_ether_desc = ether;
+	return cnt;
+}
+
+EXPORT_SYMBOL(cdc_parse_cdc_header);
diff -ruw linux-4.4.115/drivers/usb/core/notify.c linux-4.4.115-fbx/drivers/usb/core/notify.c
--- linux-4.4.115/drivers/usb/core/notify.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/core/notify.c	2019-01-22 16:16:27.243280274 +0100
@@ -17,6 +17,7 @@
 #include "usb.h"
 
 static BLOCKING_NOTIFIER_HEAD(usb_notifier_list);
+static ATOMIC_NOTIFIER_HEAD(usb_atomic_notifier_list);
 
 /**
  * usb_register_notify - register a notifier callback whenever a usb change happens
@@ -67,3 +68,33 @@
 {
 	blocking_notifier_call_chain(&usb_notifier_list, USB_BUS_REMOVE, ubus);
 }
+
+/**
+ * usb_register_atomic_notify - register a atomic notifier callback whenever a
+ * HC dies
+ * @nb: pointer to the atomic notifier block for the callback events.
+ *
+ */
+void usb_register_atomic_notify(struct notifier_block *nb)
+{
+	atomic_notifier_chain_register(&usb_atomic_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(usb_register_atomic_notify);
+
+/**
+ * usb_unregister_atomic_notify - unregister a atomic notifier callback
+ * @nb: pointer to the notifier block for the callback events.
+ *
+ */
+void usb_unregister_atomic_notify(struct notifier_block *nb)
+{
+	atomic_notifier_chain_unregister(&usb_atomic_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(usb_unregister_atomic_notify);
+
+
+void usb_atomic_notify_dead_bus(struct usb_bus *ubus)
+{
+	atomic_notifier_call_chain(&usb_atomic_notifier_list, USB_BUS_DIED,
+					 ubus);
+}
diff -ruw linux-4.4.115/drivers/usb/core/usb.c linux-4.4.115-fbx/drivers/usb/core/usb.c
--- linux-4.4.115/drivers/usb/core/usb.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/core/usb.c	2019-10-29 09:26:24.973216194 +0100
@@ -669,6 +669,53 @@
 }
 EXPORT_SYMBOL_GPL(usb_get_current_frame_number);
 
+int usb_sec_event_ring_setup(struct usb_device *dev,
+	unsigned intr_num)
+{
+	if (dev->state == USB_STATE_NOTATTACHED)
+		return 0;
+
+	return usb_hcd_sec_event_ring_setup(dev, intr_num);
+}
+EXPORT_SYMBOL(usb_sec_event_ring_setup);
+
+int usb_sec_event_ring_cleanup(struct usb_device *dev,
+	unsigned intr_num)
+{
+	return usb_hcd_sec_event_ring_cleanup(dev, intr_num);
+}
+EXPORT_SYMBOL(usb_sec_event_ring_cleanup);
+
+dma_addr_t
+usb_get_sec_event_ring_dma_addr(struct usb_device *dev,
+	unsigned intr_num)
+{
+	if (dev->state == USB_STATE_NOTATTACHED)
+		return 0;
+
+	return usb_hcd_get_sec_event_ring_dma_addr(dev, intr_num);
+}
+EXPORT_SYMBOL(usb_get_sec_event_ring_dma_addr);
+
+dma_addr_t usb_get_xfer_ring_dma_addr(struct usb_device *dev,
+	struct usb_host_endpoint *ep)
+{
+	if (dev->state == USB_STATE_NOTATTACHED)
+		return 0;
+
+	return usb_hcd_get_xfer_ring_dma_addr(dev, ep);
+}
+EXPORT_SYMBOL(usb_get_xfer_ring_dma_addr);
+
+int usb_get_controller_id(struct usb_device *dev)
+{
+	if (dev->state == USB_STATE_NOTATTACHED)
+		return -EINVAL;
+
+	return usb_hcd_get_controller_id(dev);
+}
+EXPORT_SYMBOL(usb_get_controller_id);
+
 /*-------------------------------------------------------------------*/
 /*
  * __usb_get_extra_descriptor() finds a descriptor of specific type in the
diff -ruw linux-4.4.115/drivers/usb/core/usb.h linux-4.4.115-fbx/drivers/usb/core/usb.h
--- linux-4.4.115/drivers/usb/core/usb.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/core/usb.h	2019-10-29 09:26:24.973216194 +0100
@@ -175,6 +175,7 @@
 extern void usb_notify_remove_device(struct usb_device *udev);
 extern void usb_notify_add_bus(struct usb_bus *ubus);
 extern void usb_notify_remove_bus(struct usb_bus *ubus);
+extern void usb_atomic_notify_dead_bus(struct usb_bus *ubus);
 extern void usb_hub_adjust_deviceremovable(struct usb_device *hdev,
 		struct usb_hub_descriptor *desc);
 
diff -ruw linux-4.4.115/drivers/usb/dwc3/core.c linux-4.4.115-fbx/drivers/usb/dwc3/core.c
--- linux-4.4.115/drivers/usb/dwc3/core.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/dwc3/core.c	2019-10-29 09:26:24.981216272 +0100
@@ -35,6 +35,7 @@
 #include <linux/of.h>
 #include <linux/acpi.h>
 #include <linux/pinctrl/consumer.h>
+#include <linux/irq.h>
 
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
@@ -50,6 +51,20 @@
 
 /* -------------------------------------------------------------------------- */
 
+void dwc3_usb3_phy_suspend(struct dwc3 *dwc, int suspend)
+{
+	u32			reg;
+
+	reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+
+	if (suspend)
+		reg |= DWC3_GUSB3PIPECTL_SUSPHY;
+	else
+		reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
+
+	dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
+}
+
 void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
 {
 	u32 reg;
@@ -57,35 +72,74 @@
 	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
 	reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG));
 	reg |= DWC3_GCTL_PRTCAPDIR(mode);
+	/*
+	 * Set this bit so that device attempts three more times at SS, even
+	 * if it failed previously to operate in SS mode.
+	 */
+	reg |= DWC3_GCTL_U2RSTECN;
+	reg &= ~(DWC3_GCTL_SOFITPSYNC);
+	reg &= ~(DWC3_GCTL_PWRDNSCALEMASK);
+	reg |= DWC3_GCTL_PWRDNSCALE(2);
+	reg |= DWC3_GCTL_U2EXIT_LFPS;
 	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+
+	if (mode == DWC3_GCTL_PRTCAP_OTG || mode == DWC3_GCTL_PRTCAP_HOST) {
+		/*
+		 * Allow ITP generated off of ref clk based counter instead
+		 * of UTMI/ULPI clk based counter, when superspeed only is
+		 * active so that UTMI/ULPI PHY can be suspened.
+		 *
+		 * Starting with revision 2.50A, GFLADJ_REFCLK_LPM_SEL is used
+		 * instead.
+		 */
+		if (dwc->revision < DWC3_REVISION_250A) {
+			reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+			reg |= DWC3_GCTL_SOFITPSYNC;
+			dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+		} else {
+			reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
+			reg |= DWC3_GFLADJ_REFCLK_LPM_SEL;
+			dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
+		}
+	}
 }
 
 /**
- * dwc3_core_soft_reset - Issues core soft reset and PHY reset
+ * Peforms initialization of HS and SS PHYs.
+ * If used as a part of POR or init sequence it is recommended
+ * that we should perform hard reset of the PHYs prior to invoking
+ * this function.
  * @dwc: pointer to our context structure
  */
-static int dwc3_core_soft_reset(struct dwc3 *dwc)
+static int dwc3_init_usb_phys(struct dwc3 *dwc)
 {
-	u32		reg;
 	int		ret;
 
-	/* Before Resetting PHY, put Core in Reset */
-	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
-	reg |= DWC3_GCTL_CORESOFTRESET;
-	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+	/* Bring up PHYs */
+	ret = usb_phy_init(dwc->usb2_phy);
+	if (ret) {
+		pr_err("%s: usb_phy_init(dwc->usb2_phy) returned %d\n",
+				__func__, ret);
+		return ret;
+	}
 
-	/* Assert USB3 PHY reset */
-	reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
-	reg |= DWC3_GUSB3PIPECTL_PHYSOFTRST;
-	dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
+	if (dwc->maximum_speed == USB_SPEED_HIGH)
+		goto generic_phy_init;
 
-	/* Assert USB2 PHY reset */
-	reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
-	reg |= DWC3_GUSB2PHYCFG_PHYSOFTRST;
-	dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+	ret = usb_phy_init(dwc->usb3_phy);
+	if (ret == -EBUSY) {
+		/*
+		 * Setting Max speed as high when USB3 PHY initialiation
+		 * is failing and USB superspeed can't be supported.
+		 */
+		dwc->maximum_speed = USB_SPEED_HIGH;
+	} else if (ret) {
+		pr_err("%s: usb_phy_init(dwc->usb3_phy) returned %d\n",
+				__func__, ret);
+		return ret;
+	}
 
-	usb_phy_init(dwc->usb2_phy);
-	usb_phy_init(dwc->usb3_phy);
+generic_phy_init:
 	ret = phy_init(dwc->usb2_generic_phy);
 	if (ret < 0)
 		return ret;
@@ -95,24 +149,45 @@
 		phy_exit(dwc->usb2_generic_phy);
 		return ret;
 	}
-	mdelay(100);
 
-	/* Clear USB3 PHY reset */
+	return 0;
+}
+
+/**
+ * dwc3_core_reset - Issues core soft reset and PHY reset
+ * @dwc: pointer to our context structure
+ */
+static int dwc3_core_reset(struct dwc3 *dwc)
+{
+	int		ret;
+	u32	reg;
+
+	/* Reset PHYs */
+	usb_phy_reset(dwc->usb2_phy);
+
+	if (dwc->maximum_speed == USB_SPEED_SUPER)
+		usb_phy_reset(dwc->usb3_phy);
+
+	/* Initialize PHYs */
+	ret = dwc3_init_usb_phys(dwc);
+	if (ret) {
+		pr_err("%s: dwc3_init_phys returned %d\n",
+				__func__, ret);
+		return ret;
+	}
+
 	reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
-	reg &= ~DWC3_GUSB3PIPECTL_PHYSOFTRST;
-	dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
+	reg &= ~DWC3_GUSB3PIPECTL_DELAYP1TRANS;
 
-	/* Clear USB2 PHY reset */
-	reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
-	reg &= ~DWC3_GUSB2PHYCFG_PHYSOFTRST;
-	dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+	/* core exits U1/U2/U3 only in PHY power state P1/P2/P3 respectively */
+	if (dwc->revision <= DWC3_REVISION_310A)
+		reg |= DWC3_GUSB3PIPECTL_UX_EXIT_IN_PX;
 
-	mdelay(100);
+	dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
 
-	/* After PHYs are stable we can take Core out of reset state */
-	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
-	reg &= ~DWC3_GCTL_CORESOFTRESET;
-	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+	dwc3_notify_event(dwc, DWC3_CONTROLLER_RESET_EVENT, 0);
+
+	dwc3_notify_event(dwc, DWC3_CONTROLLER_POST_RESET_EVENT, 0);
 
 	return 0;
 }
@@ -190,7 +265,7 @@
  * otherwise ERR_PTR(errno).
  */
 static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc,
-		unsigned length)
+		unsigned length, enum event_buf_type type)
 {
 	struct dwc3_event_buffer	*evt;
 
@@ -200,6 +275,7 @@
 
 	evt->dwc	= dwc;
 	evt->length	= length;
+	evt->type	= type;
 	evt->buf	= dma_alloc_coherent(dwc->dev, length,
 			&evt->dma, GFP_KERNEL);
 	if (!evt->buf)
@@ -234,26 +310,40 @@
  */
 static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned length)
 {
-	int			num;
 	int			i;
+	int	j = 0;
 
-	num = DWC3_NUM_INT(dwc->hwparams.hwparams1);
-	dwc->num_event_buffers = num;
+	dwc->num_event_buffers = dwc->num_normal_event_buffers +
+		dwc->num_gsi_event_buffers;
 
-	dwc->ev_buffs = devm_kzalloc(dwc->dev, sizeof(*dwc->ev_buffs) * num,
+	dwc->ev_buffs = devm_kzalloc(dwc->dev,
+			sizeof(*dwc->ev_buffs) * dwc->num_event_buffers,
 			GFP_KERNEL);
 	if (!dwc->ev_buffs)
 		return -ENOMEM;
 
-	for (i = 0; i < num; i++) {
+	for (i = 0; i < dwc->num_normal_event_buffers; i++) {
 		struct dwc3_event_buffer	*evt;
 
-		evt = dwc3_alloc_one_event_buffer(dwc, length);
+		evt = dwc3_alloc_one_event_buffer(dwc, length,
+				EVT_BUF_TYPE_NORMAL);
 		if (IS_ERR(evt)) {
 			dev_err(dwc->dev, "can't allocate event buffer\n");
 			return PTR_ERR(evt);
 		}
-		dwc->ev_buffs[i] = evt;
+		dwc->ev_buffs[j++] = evt;
+	}
+
+	for (i = 0; i < dwc->num_gsi_event_buffers; i++) {
+		struct dwc3_event_buffer	*evt;
+
+		evt = dwc3_alloc_one_event_buffer(dwc, length,
+				EVT_BUF_TYPE_GSI);
+		if (IS_ERR(evt)) {
+			dev_err(dwc->dev, "can't allocate event buffer\n");
+			return PTR_ERR(evt);
+		}
+		dwc->ev_buffs[j++] = evt;
 	}
 
 	return 0;
@@ -265,25 +355,40 @@
  *
  * Returns 0 on success otherwise negative errno.
  */
-static int dwc3_event_buffers_setup(struct dwc3 *dwc)
+int dwc3_event_buffers_setup(struct dwc3 *dwc)
 {
 	struct dwc3_event_buffer	*evt;
 	int				n;
 
 	for (n = 0; n < dwc->num_event_buffers; n++) {
 		evt = dwc->ev_buffs[n];
-		dev_dbg(dwc->dev, "Event buf %p dma %08llx length %d\n",
+		dev_dbg(dwc->dev, "Event buf %pK dma %08llx length %d\n",
 				evt->buf, (unsigned long long) evt->dma,
 				evt->length);
 
+		memset(evt->buf, 0, evt->length);
+
 		evt->lpos = 0;
 
 		dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(n),
 				lower_32_bits(evt->dma));
+
+		if (evt->type == EVT_BUF_TYPE_NORMAL) {
 		dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(n),
 				upper_32_bits(evt->dma));
 		dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(n),
 				DWC3_GEVNTSIZ_SIZE(evt->length));
+		} else {
+			dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(n),
+				DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(
+					DWC3_GEVENT_TYPE_GSI) |
+				DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX(n));
+
+			dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(n),
+				DWC3_GEVNTCOUNT_EVNTINTRPTMASK |
+				((evt->length) & 0xffff));
+		}
+
 		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(n), 0);
 	}
 
@@ -529,7 +634,7 @@
  *
  * Returns 0 on success otherwise negative errno.
  */
-static int dwc3_core_init(struct dwc3 *dwc)
+int dwc3_core_init(struct dwc3 *dwc)
 {
 	u32			hwparams4 = dwc->hwparams.hwparams4;
 	u32			reg;
@@ -559,16 +664,28 @@
 	/* Handle USB2.0-only core configuration */
 	if (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
 			DWC3_GHWPARAMS3_SSPHY_IFC_DIS) {
-		if (dwc->maximum_speed == USB_SPEED_SUPER)
-			dwc->maximum_speed = USB_SPEED_HIGH;
+		if (dwc->max_hw_supp_speed == USB_SPEED_SUPER) {
+			dwc->max_hw_supp_speed = USB_SPEED_HIGH;
+			dwc->maximum_speed = dwc->max_hw_supp_speed;
+		}
 	}
 
-	/* issue device SoftReset too */
-	ret = dwc3_soft_reset(dwc);
+	/*
+	 * Workaround for STAR 9000961433 which affects only version
+	 * 3.00a of the DWC_usb3 core. This prevents the controller
+	 * interrupt from being masked while handling events. IMOD
+	 * allows us to work around this issue. Enable it for the
+	 * affected version.
+	 */
+	 if (!dwc->imod_interval && (dwc->revision == DWC3_REVISION_300A))
+		dwc->imod_interval = 1;
+
+	ret = dwc3_core_reset(dwc);
 	if (ret)
 		goto err0;
 
-	ret = dwc3_core_soft_reset(dwc);
+	/* issue device SoftReset too */
+	ret = dwc3_soft_reset(dwc);
 	if (ret)
 		goto err0;
 
@@ -639,6 +756,15 @@
 
 	dwc3_core_num_eps(dwc);
 
+	/*
+	 * Disable clock gating to work around a known HW bug that causes the
+	 * internal RAM clock to get stuck when entering low power modes.
+	 */
+	if (dwc->disable_clk_gating) {
+		dev_dbg(dwc->dev, "Disabling controller clock gating.\n");
+		reg |= DWC3_GCTL_DSBLCLKGTNG;
+	}
+
 	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
 
 	ret = dwc3_alloc_scratch_buffers(dwc);
@@ -649,6 +775,17 @@
 	if (ret)
 		goto err2;
 
+	/*
+	 * clear Elastic buffer mode in GUSBPIPE_CTRL(0) register, otherwise
+	 * it results in high link errors and could cause SS mode transfer
+	 * failure.
+	 */
+	if (!dwc->nominal_elastic_buffer) {
+		reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+		reg &= ~DWC3_GUSB3PIPECTL_ELASTIC_BUF_MODE;
+		dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
+	}
+
 	return 0;
 
 err2:
@@ -743,38 +880,16 @@
 static int dwc3_core_init_mode(struct dwc3 *dwc)
 {
 	struct device *dev = dwc->dev;
-	int ret;
 
 	switch (dwc->dr_mode) {
 	case USB_DR_MODE_PERIPHERAL:
 		dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
-		ret = dwc3_gadget_init(dwc);
-		if (ret) {
-			dev_err(dev, "failed to initialize gadget\n");
-			return ret;
-		}
 		break;
 	case USB_DR_MODE_HOST:
 		dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
-		ret = dwc3_host_init(dwc);
-		if (ret) {
-			dev_err(dev, "failed to initialize host\n");
-			return ret;
-		}
 		break;
 	case USB_DR_MODE_OTG:
 		dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_OTG);
-		ret = dwc3_host_init(dwc);
-		if (ret) {
-			dev_err(dev, "failed to initialize host\n");
-			return ret;
-		}
-
-		ret = dwc3_gadget_init(dwc);
-		if (ret) {
-			dev_err(dev, "failed to initialize gadget\n");
-			return ret;
-		}
 		break;
 	default:
 		dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode);
@@ -803,8 +918,310 @@
 	}
 }
 
+/* XHCI reset, resets other CORE registers as well, re-init those */
+void dwc3_post_host_reset_core_init(struct dwc3 *dwc)
+{
+	dwc3_core_init(dwc);
+	dwc3_gadget_restart(dwc);
+}
+
+static void (*notify_event)(struct dwc3 *, unsigned, unsigned);
+void dwc3_set_notifier(void (*notify)(struct dwc3 *, unsigned, unsigned))
+{
+	notify_event = notify;
+}
+EXPORT_SYMBOL(dwc3_set_notifier);
+
+int dwc3_notify_event(struct dwc3 *dwc, unsigned event, unsigned value)
+{
+	int ret = 0;
+
+	if (dwc->notify_event)
+		dwc->notify_event(dwc, event, value);
+	else
+		ret = -ENODEV;
+
+	return ret;
+}
+EXPORT_SYMBOL(dwc3_notify_event);
+
+int dwc3_core_pre_init(struct dwc3 *dwc)
+{
+	int ret;
+
+	dwc3_cache_hwparams(dwc);
+
+	ret = dwc3_phy_setup(dwc);
+	if (ret)
+		goto err0;
+
+	if (!dwc->ev_buffs) {
+		ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
+		if (ret) {
+			dev_err(dwc->dev, "failed to allocate event buffers\n");
+			ret = -ENOMEM;
+			goto err1;
+		}
+	}
+
+	ret = dwc3_core_init(dwc);
+	if (ret) {
+		dev_err(dwc->dev, "failed to initialize core\n");
+		goto err2;
+	}
+
+	ret = phy_power_on(dwc->usb2_generic_phy);
+	if (ret < 0)
+		goto err3;
+
+	ret = phy_power_on(dwc->usb3_generic_phy);
+	if (ret < 0)
+		goto err4;
+
+	ret = dwc3_event_buffers_setup(dwc);
+	if (ret) {
+		dev_err(dwc->dev, "failed to setup event buffers\n");
+		goto err5;
+	}
+
+	ret = dwc3_core_init_mode(dwc);
+	if (ret) {
+		dev_err(dwc->dev, "failed to set mode with dwc3 core\n");
+		goto err6;
+	}
+
+	return ret;
+
+err6:
+	dwc3_event_buffers_cleanup(dwc);
+err5:
+	phy_power_off(dwc->usb3_generic_phy);
+err4:
+	phy_power_off(dwc->usb2_generic_phy);
+err3:
+	dwc3_core_exit(dwc);
+err2:
+	dwc3_free_event_buffers(dwc);
+err1:
+	dwc3_ulpi_exit(dwc);
+err0:
+	return ret;
+}
+
+static void dwc3_process_event_entry(struct dwc3 *dwc,
+		const union dwc3_event *event)
+{
+	trace_dwc3_event(event->raw);
+
+	/* skip event processing in absence of vbus */
+	if (!dwc->vbus_active) {
+		dbg_print_reg("SKIP EVT", event->raw);
+		return;
+	}
+
+	/* If run/stop is cleared don't process any more events */
+	if (!dwc->pullups_connected) {
+		dbg_print_reg("SKIP_EVT_PULLUP", event->raw);
+		return;
+	}
+
+	/* Endpoint IRQ, handle it and return early */
+	if (event->type.is_devspec == 0) {
+		/* depevt */
+		return dwc3_endpoint_interrupt(dwc, &event->depevt);
+	}
+
+	switch (event->type.type) {
+	case DWC3_EVENT_TYPE_DEV:
+		dwc3_gadget_interrupt(dwc, &event->devt);
+		break;
+	/* REVISIT what to do with Carkit and I2C events ? */
+	default:
+		dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
+	}
+}
+
+static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
+{
+	struct dwc3_event_buffer *evt;
+	irqreturn_t ret = IRQ_NONE;
+	int left;
+	u32 reg;
+
+	evt = dwc->ev_buffs[buf];
+	left = evt->count;
+
+	if (!(evt->flags & DWC3_EVENT_PENDING))
+		return IRQ_NONE;
+
+	while (left > 0) {
+		union dwc3_event event;
+
+		event.raw = *(u32 *) (evt->buf + evt->lpos);
+
+		dwc3_process_event_entry(dwc, &event);
+
+		if (dwc->err_evt_seen) {
+			/*
+			 * if erratic error, skip remaining events
+			 * while controller undergoes reset
+			 */
+			evt->lpos = (evt->lpos + left) %
+					DWC3_EVENT_BUFFERS_SIZE;
+			dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), left);
+			if (dwc3_notify_event(dwc,
+						DWC3_CONTROLLER_ERROR_EVENT, 0))
+				dwc->err_evt_seen = 0;
+			break;
+		}
+
+		/*
+		 * FIXME we wrap around correctly to the next entry as
+		 * almost all entries are 4 bytes in size. There is one
+		 * entry which has 12 bytes which is a regular entry
+		 * followed by 8 bytes data. ATM I don't know how
+		 * things are organized if we get next to the a
+		 * boundary so I worry about that once we try to handle
+		 * that.
+		 */
+		evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
+		left -= 4;
+	}
+
+	dwc->bh_handled_evt_cnt[dwc->bh_dbg_index] += (evt->count / 4);
+
+	evt->count = 0;
+	evt->flags &= ~DWC3_EVENT_PENDING;
+	ret = IRQ_HANDLED;
+
+	/* Unmask interrupt */
+	reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
+	reg &= ~DWC3_GEVNTSIZ_INTMASK;
+	dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
+
+	if (dwc->imod_interval)
+		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf),
+				DWC3_GEVNTCOUNT_EHB);
+
+	return ret;
+}
+
+static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)
+{
+	struct dwc3 *dwc = _dwc;
+	unsigned long flags;
+	irqreturn_t ret = IRQ_NONE;
+	int i;
+	unsigned temp_time;
+	ktime_t start_time;
+
+	start_time = ktime_get();
+
+	spin_lock_irqsave(&dwc->lock, flags);
+	dwc->bh_handled_evt_cnt[dwc->bh_dbg_index] = 0;
+
+	for (i = 0; i < dwc->num_normal_event_buffers; i++)
+		ret |= dwc3_process_event_buf(dwc, i);
+
+	spin_unlock_irqrestore(&dwc->lock, flags);
+
+	temp_time = ktime_to_us(ktime_sub(ktime_get(), start_time));
+	dwc->bh_completion_time[dwc->bh_dbg_index] = temp_time;
+	dwc->bh_dbg_index = (dwc->bh_dbg_index + 1) % 10;
+
+	return ret;
+}
+
+void dwc3_bh_work(struct work_struct *w)
+{
+	struct dwc3 *dwc = container_of(w, struct dwc3, bh_work);
+
+	 pm_runtime_get_sync(dwc->dev);
+	 dwc3_thread_interrupt(dwc->irq, dwc);
+	 pm_runtime_put(dwc->dev);
+}
+
+static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf)
+{
+	struct dwc3_event_buffer *evt;
+	u32 count;
+	u32 reg;
+
+	evt = dwc->ev_buffs[buf];
+
+	count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
+	count &= DWC3_GEVNTCOUNT_MASK;
+	if (!count)
+		return IRQ_NONE;
+
+	if (count > evt->length) {
+		dbg_event(0xFF, "HUGE_EVCNT", count);
+		evt->lpos = (evt->lpos + count) % DWC3_EVENT_BUFFERS_SIZE;
+		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), count);
+		return IRQ_HANDLED;
+	}
+
+	evt->count = count;
+	evt->flags |= DWC3_EVENT_PENDING;
+
+	/* Mask interrupt */
+	reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
+	reg |= DWC3_GEVNTSIZ_INTMASK;
+	dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
+
+	dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), count);
+
+	return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
+{
+	struct dwc3			*dwc = _dwc;
+	int				i;
+	irqreturn_t			ret = IRQ_NONE;
+	unsigned			temp_cnt = 0;
+	ktime_t				start_time;
+
+	start_time = ktime_get();
+	dwc->irq_cnt++;
+
+	/* controller reset is still pending */
+	if (dwc->err_evt_seen)
+		return IRQ_HANDLED;
+
+	for (i = 0; i < dwc->num_normal_event_buffers; i++) {
+		irqreturn_t status;
+
+		status = dwc3_check_event_buf(dwc, i);
+		if (status == IRQ_WAKE_THREAD)
+			ret = status;
+
+		temp_cnt += dwc->ev_buffs[i]->count;
+	}
+
+	dwc->irq_start_time[dwc->irq_dbg_index] = start_time;
+	dwc->irq_completion_time[dwc->irq_dbg_index] =
+		ktime_us_delta(ktime_get(), start_time);
+	dwc->irq_event_count[dwc->irq_dbg_index] = temp_cnt / 4;
+	dwc->irq_dbg_index = (dwc->irq_dbg_index + 1) % MAX_INTR_STATS;
+
+	if (ret == IRQ_WAKE_THREAD)
+		queue_work(dwc->dwc_wq, &dwc->bh_work);
+
+	return IRQ_HANDLED;
+}
+
 #define DWC3_ALIGN_MASK		(16 - 1)
 
+/* check whether the core supports IMOD */
+bool dwc3_has_imod(struct dwc3 *dwc)
+{
+	return ((dwc3_is_usb3(dwc) &&
+		dwc->revision >= DWC3_REVISION_300A) ||
+		(dwc3_is_usb31(dwc) &&
+		dwc->revision >= DWC3_USB31_REVISION_120A));
+}
+
 static int dwc3_probe(struct platform_device *pdev)
 {
 	struct device		*dev = &pdev->dev;
@@ -815,7 +1232,8 @@
 	u8			tx_de_emphasis;
 	u8			hird_threshold;
 	u32			fladj = 0;
-
+	u32			num_evt_buffs;
+	int			irq;
 	int			ret;
 
 	void __iomem		*regs;
@@ -829,6 +1247,7 @@
 	dwc->mem = mem;
 	dwc->dev = dev;
 
+	dwc->notify_event = notify_event;
 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 	if (!res) {
 		dev_err(dev, "missing IRQ\n");
@@ -839,12 +1258,27 @@
 	dwc->xhci_resources[1].flags = res->flags;
 	dwc->xhci_resources[1].name = res->name;
 
+	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
+
+	/* will be enabled in dwc3_msm_resume() */
+	irq_set_status_flags(irq, IRQ_NOAUTOEN);
+	ret = devm_request_irq(dev, irq, dwc3_interrupt, IRQF_SHARED, "dwc3",
+			dwc);
+	if (ret) {
+		dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
+				irq, ret);
+		return -ENODEV;
+	}
+
+	dwc->irq = irq;
+
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!res) {
 		dev_err(dev, "missing memory resource\n");
 		return -ENODEV;
 	}
 
+	dwc->reg_phys = res->start;
 	dwc->xhci_resources[0].start = res->start;
 	dwc->xhci_resources[0].end = dwc->xhci_resources[0].start +
 					DWC3_XHCI_REGS_END;
@@ -867,7 +1301,7 @@
 	dwc->regs_size	= resource_size(res);
 
 	/* default to highest possible threshold */
-	lpm_nyet_threshold = 0xff;
+	lpm_nyet_threshold = 0xf;
 
 	/* default to -3.5dB de-emphasis */
 	tx_de_emphasis = 1;
@@ -879,6 +1313,7 @@
 	hird_threshold = 12;
 
 	dwc->maximum_speed = usb_get_maximum_speed(dev);
+	dwc->max_hw_supp_speed = dwc->maximum_speed;
 	dwc->dr_mode = usb_get_dr_mode(dev);
 
 	dwc->has_lpm_erratum = device_property_read_bool(dev,
@@ -927,8 +1362,32 @@
 	device_property_read_u32(dev, "snps,quirk-frame-length-adjustment",
 				 &fladj);
 
+	dwc->nominal_elastic_buffer = device_property_read_bool(dev,
+				"snps,nominal-elastic-buffer");
+	dwc->usb3_u1u2_disable = device_property_read_bool(dev,
+				"snps,usb3-u1u2-disable");
+	dwc->disable_clk_gating = device_property_read_bool(dev,
+				"snps,disable-clk-gating");
+	dwc->enable_bus_suspend = device_property_read_bool(dev,
+				"snps,bus-suspend-enable");
+
+	dwc->num_normal_event_buffers = 1;
+	ret = device_property_read_u32(dev,
+		"snps,num-normal-evt-buffs", &num_evt_buffs);
+	if (!ret)
+		dwc->num_normal_event_buffers = num_evt_buffs;
+
+	ret = device_property_read_u32(dev,
+		"snps,num-gsi-evt-buffs", &dwc->num_gsi_event_buffers);
+
+	if (dwc->enable_bus_suspend) {
+		pm_runtime_set_autosuspend_delay(dev, 500);
+		pm_runtime_use_autosuspend(dev);
+	}
+
 	if (pdata) {
 		dwc->maximum_speed = pdata->maximum_speed;
+		dwc->max_hw_supp_speed = dwc->maximum_speed;
 		dwc->has_lpm_erratum = pdata->has_lpm_erratum;
 		if (pdata->lpm_nyet_threshold)
 			lpm_nyet_threshold = pdata->lpm_nyet_threshold;
@@ -962,7 +1421,7 @@
 
 	/* default to superspeed if no maximum_speed passed */
 	if (dwc->maximum_speed == USB_SPEED_UNKNOWN)
-		dwc->maximum_speed = USB_SPEED_SUPER;
+		dwc->max_hw_supp_speed = dwc->maximum_speed = USB_SPEED_SUPER;
 
 	dwc->lpm_nyet_threshold = lpm_nyet_threshold;
 	dwc->tx_de_emphasis = tx_de_emphasis;
@@ -970,104 +1429,84 @@
 	dwc->hird_threshold = hird_threshold
 		| (dwc->is_utmi_l1_suspend << 4);
 
+	init_waitqueue_head(&dwc->wait_linkstate);
 	platform_set_drvdata(pdev, dwc);
-	dwc3_cache_hwparams(dwc);
-
-	ret = dwc3_phy_setup(dwc);
-	if (ret)
-		goto err0;
-
 	ret = dwc3_core_get_phy(dwc);
 	if (ret)
 		goto err0;
 
 	spin_lock_init(&dwc->lock);
 
-	if (!dev->dma_mask) {
 		dev->dma_mask = dev->parent->dma_mask;
 		dev->dma_parms = dev->parent->dma_parms;
 		dma_set_coherent_mask(dev, dev->parent->coherent_dma_mask);
+
+	dwc->dwc_wq = alloc_ordered_workqueue("dwc_wq", WQ_HIGHPRI);
+	if (!dwc->dwc_wq) {
+		pr_err("%s: Unable to create workqueue dwc_wq\n", __func__);
+		return -ENOMEM;
 	}
 
+	INIT_WORK(&dwc->bh_work, dwc3_bh_work);
+
+	pm_runtime_no_callbacks(dev);
+	pm_runtime_set_active(dev);
 	pm_runtime_enable(dev);
-	pm_runtime_get_sync(dev);
 	pm_runtime_forbid(dev);
 
-	ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
-	if (ret) {
-		dev_err(dwc->dev, "failed to allocate event buffers\n");
-		ret = -ENOMEM;
-		goto err1;
-	}
-
 	if (IS_ENABLED(CONFIG_USB_DWC3_HOST))
 		dwc->dr_mode = USB_DR_MODE_HOST;
 	else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET))
 		dwc->dr_mode = USB_DR_MODE_PERIPHERAL;
 
-	if (dwc->dr_mode == USB_DR_MODE_UNKNOWN)
+	if (dwc->dr_mode == USB_DR_MODE_UNKNOWN) {
 		dwc->dr_mode = USB_DR_MODE_OTG;
-
-	ret = dwc3_core_init(dwc);
-	if (ret) {
-		dev_err(dev, "failed to initialize core\n");
-		goto err1;
+		dwc->is_drd = true;
 	}
 
 	/* Adjust Frame Length */
 	dwc3_frame_length_adjustment(dwc, fladj);
 
-	usb_phy_set_suspend(dwc->usb2_phy, 0);
-	usb_phy_set_suspend(dwc->usb3_phy, 0);
-	ret = phy_power_on(dwc->usb2_generic_phy);
-	if (ret < 0)
-		goto err2;
-
-	ret = phy_power_on(dwc->usb3_generic_phy);
-	if (ret < 0)
-		goto err3;
+	/* Hardcode number of eps */
+	dwc->num_in_eps = 16;
+	dwc->num_out_eps = 16;
 
-	ret = dwc3_event_buffers_setup(dwc);
+	if (dwc->dr_mode == USB_DR_MODE_OTG ||
+		dwc->dr_mode == USB_DR_MODE_PERIPHERAL) {
+		ret = dwc3_gadget_init(dwc);
 	if (ret) {
-		dev_err(dwc->dev, "failed to setup event buffers\n");
-		goto err4;
+			dev_err(dev, "failed to initialize gadget\n");
+			goto err0;
+		}
 	}
 
-	ret = dwc3_core_init_mode(dwc);
-	if (ret)
-		goto err5;
+	if (dwc->dr_mode == USB_DR_MODE_OTG ||
+		dwc->dr_mode ==  USB_DR_MODE_HOST) {
+		ret = dwc3_host_init(dwc);
+		if (ret) {
+			dev_err(dev, "failed to initialize host\n");
+			goto err_gadget;
+		}
+	}
 
 	ret = dwc3_debugfs_init(dwc);
 	if (ret) {
 		dev_err(dev, "failed to initialize debugfs\n");
-		goto err6;
+		goto err_host;
 	}
 
 	pm_runtime_allow(dev);
 
 	return 0;
 
-err6:
-	dwc3_core_exit_mode(dwc);
-
-err5:
-	dwc3_event_buffers_cleanup(dwc);
-
-err4:
-	phy_power_off(dwc->usb3_generic_phy);
-
-err3:
-	phy_power_off(dwc->usb2_generic_phy);
-
-err2:
-	usb_phy_set_suspend(dwc->usb2_phy, 1);
-	usb_phy_set_suspend(dwc->usb3_phy, 1);
-	dwc3_core_exit(dwc);
-
-err1:
-	dwc3_free_event_buffers(dwc);
-	dwc3_ulpi_exit(dwc);
-
+err_host:
+	if (dwc->dr_mode == USB_DR_MODE_OTG ||
+		dwc->dr_mode ==  USB_DR_MODE_HOST)
+		dwc3_host_exit(dwc);
+err_gadget:
+	if (dwc->dr_mode == USB_DR_MODE_OTG ||
+		dwc->dr_mode == USB_DR_MODE_PERIPHERAL)
+		dwc3_gadget_exit(dwc);
 err0:
 	/*
 	 * restore res->start back to its original value so that, in case the
@@ -1075,6 +1514,7 @@
 	 * memory region the next time probe is called.
 	 */
 	res->start -= DWC3_GLOBALS_REGS_START;
+	destroy_workqueue(dwc->dwc_wq);
 
 	return ret;
 }
@@ -1096,14 +1536,14 @@
 	dwc3_event_buffers_cleanup(dwc);
 	dwc3_free_event_buffers(dwc);
 
-	usb_phy_set_suspend(dwc->usb2_phy, 1);
-	usb_phy_set_suspend(dwc->usb3_phy, 1);
 	phy_power_off(dwc->usb2_generic_phy);
 	phy_power_off(dwc->usb3_generic_phy);
 
 	dwc3_core_exit(dwc);
 	dwc3_ulpi_exit(dwc);
 
+	destroy_workqueue(dwc->dwc_wq);
+
 	pm_runtime_put_sync(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 
@@ -1116,6 +1556,10 @@
 	struct dwc3	*dwc = dev_get_drvdata(dev);
 	unsigned long	flags;
 
+	/* Check if platform glue driver handling PM, if not then handle here */
+	if (!dwc3_notify_event(dwc, DWC3_CORE_PM_SUSPEND_EVENT, 0))
+		return 0;
+
 	spin_lock_irqsave(&dwc->lock, flags);
 
 	switch (dwc->dr_mode) {
@@ -1148,6 +1592,10 @@
 	unsigned long	flags;
 	int		ret;
 
+	/* Check if platform glue driver handling PM, if not then handle here */
+	if (!dwc3_notify_event(dwc, DWC3_CORE_PM_RESUME_EVENT, 0))
+		return 0;
+
 	pinctrl_pm_select_default_state(dev);
 
 	usb_phy_init(dwc->usb3_phy);
diff -ruw linux-4.4.115/drivers/usb/dwc3/core.h linux-4.4.115-fbx/drivers/usb/dwc3/core.h
--- linux-4.4.115/drivers/usb/dwc3/core.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/dwc3/core.h	2019-10-29 09:26:24.981216272 +0100
@@ -26,6 +26,8 @@
 #include <linux/dma-mapping.h>
 #include <linux/mm.h>
 #include <linux/debugfs.h>
+#include <linux/workqueue.h>
+#include <linux/wait.h>
 
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
@@ -37,6 +39,7 @@
 #define DWC3_MSG_MAX	500
 
 /* Global constants */
+#define DWC3_ZLP_BUF_SIZE	1024	/* size of a superspeed bulk */
 #define DWC3_EP0_BOUNCE_SIZE	512
 #define DWC3_ENDPOINTS_NUM	32
 #define DWC3_XHCI_RESOURCES_NUM	2
@@ -56,12 +59,15 @@
 #define DWC3_DEVICE_EVENT_WAKEUP		4
 #define DWC3_DEVICE_EVENT_HIBER_REQ		5
 #define DWC3_DEVICE_EVENT_EOPF			6
+/* For version 2.30a and above */
+#define DWC3_DEVICE_EVENT_SUSPEND		6
 #define DWC3_DEVICE_EVENT_SOF			7
 #define DWC3_DEVICE_EVENT_ERRATIC_ERROR		9
 #define DWC3_DEVICE_EVENT_CMD_CMPL		10
 #define DWC3_DEVICE_EVENT_OVERFLOW		11
 
 #define DWC3_GEVNTCOUNT_MASK	0xfffc
+#define DWC3_GEVNTCOUNT_EHB	(1 << 31)
 #define DWC3_GSNPSID_MASK	0xffff0000
 #define DWC3_GSNPSREV_MASK	0xffff
 
@@ -124,6 +130,11 @@
 #define DWC3_GEVNTSIZ(n)	(0xc408 + (n * 0x10))
 #define DWC3_GEVNTCOUNT(n)	(0xc40c + (n * 0x10))
 
+#define DWC3_GEVNTCOUNT_EVNTINTRPTMASK		(1 << 31)
+#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(n)	(n << 22)
+#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX(n)	(n << 16)
+#define DWC3_GEVENT_TYPE_GSI			0x3
+
 #define DWC3_GHWPARAMS8		0xc600
 #define DWC3_GFLADJ		0xc630
 
@@ -140,6 +151,8 @@
 #define DWC3_DEPCMDPAR0(n)	(0xc808 + (n * 0x10))
 #define DWC3_DEPCMD(n)		(0xc80c + (n * 0x10))
 
+#define DWC3_DEV_IMOD(n)	(0xca00 + (n * 0x4))
+
 /* OTG Registers */
 #define DWC3_OCFG		0xcc00
 #define DWC3_OCTL		0xcc04
@@ -149,9 +162,16 @@
 
 /* Bit fields */
 
+/* Global SoC Bus Configuration Register 1 */
+#define DWC3_GSBUSCFG1_PIPETRANSLIMIT_MASK	(0x0f << 8)
+#define DWC3_GSBUSCFG1_PIPETRANSLIMIT(n)	((n) << 8)
+
 /* Global Configuration Register */
 #define DWC3_GCTL_PWRDNSCALE(n)	((n) << 19)
+#define DWC3_GCTL_PWRDNSCALEMASK (0xFFF80000)
 #define DWC3_GCTL_U2RSTECN	(1 << 16)
+#define DWC3_GCTL_SOFITPSYNC	(1 << 10)
+#define DWC3_GCTL_U2EXIT_LFPS	(1 << 2)
 #define DWC3_GCTL_RAMCLKSEL(x)	(((x) & DWC3_GCTL_CLK_MASK) << 6)
 #define DWC3_GCTL_CLK_BUS	(0)
 #define DWC3_GCTL_CLK_PIPE	(1)
@@ -173,8 +193,15 @@
 #define DWC3_GCTL_GBLHIBERNATIONEN	(1 << 1)
 #define DWC3_GCTL_DSBLCLKGTNG		(1 << 0)
 
+/* Global User Control Register */
+#define DWC3_GUCTL_REFCLKPER		(0x3FF << 22)
+
+/* Global Debug LTSSM Register */
+#define DWC3_GDBGLTSSM_LINKSTATE_MASK	(0xF << 22)
+
 /* Global USB2 PHY Configuration Register */
 #define DWC3_GUSB2PHYCFG_PHYSOFTRST	(1 << 31)
+#define DWC3_GUSB2PHYCFG_ENBLSLPM	(1 << 8)
 #define DWC3_GUSB2PHYCFG_SUSPHY		(1 << 6)
 #define DWC3_GUSB2PHYCFG_ULPI_UTMI	(1 << 4)
 #define DWC3_GUSB2PHYCFG_ENBLSLPM	(1 << 8)
@@ -190,6 +217,7 @@
 /* Global USB3 PIPE Control Register */
 #define DWC3_GUSB3PIPECTL_PHYSOFTRST	(1 << 31)
 #define DWC3_GUSB3PIPECTL_U2SSINP3OK	(1 << 29)
+#define DWC3_GUSB3PIPECTL_UX_EXIT_IN_PX	(1 << 27)
 #define DWC3_GUSB3PIPECTL_REQP1P2P3	(1 << 24)
 #define DWC3_GUSB3PIPECTL_DEP1P2P3(n)	((n) << 19)
 #define DWC3_GUSB3PIPECTL_DEP1P2P3_MASK	DWC3_GUSB3PIPECTL_DEP1P2P3(7)
@@ -200,6 +228,8 @@
 #define DWC3_GUSB3PIPECTL_RX_DETOPOLL	(1 << 8)
 #define DWC3_GUSB3PIPECTL_TX_DEEPH_MASK	DWC3_GUSB3PIPECTL_TX_DEEPH(3)
 #define DWC3_GUSB3PIPECTL_TX_DEEPH(n)	((n) << 1)
+#define DWC3_GUSB3PIPECTL_DELAYP1TRANS  (1 << 18)
+#define DWC3_GUSB3PIPECTL_ELASTIC_BUF_MODE	(1 << 0)
 
 /* Global TX Fifo Size Register */
 #define DWC3_GTXFIFOSIZ_TXFDEF(n)	((n) & 0xffff)
@@ -241,6 +271,12 @@
 #define DWC3_GFLADJ_30MHZ_SDBND_SEL		(1 << 7)
 #define DWC3_GFLADJ_30MHZ_MASK			0x3f
 
+/* Global Frame Length Adjustment Register */
+#define DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1	(1 << 31)
+#define DWC3_GFLADJ_REFCLK_240MHZ_DECR		(0x7F << 24)
+#define DWC3_GFLADJ_REFCLK_LPM_SEL		(1 << 23)
+#define DWC3_GFLADJ_REFCLK_FLADJ		(0x3FFF << 8)
+
 /* Device Configuration Register */
 #define DWC3_DCFG_DEVADDR(addr)	((addr) << 3)
 #define DWC3_DCFG_DEVADDR_MASK	DWC3_DCFG_DEVADDR(0x7f)
@@ -306,6 +342,8 @@
 #define DWC3_DEVTEN_ERRTICERREN		(1 << 9)
 #define DWC3_DEVTEN_SOFEN		(1 << 7)
 #define DWC3_DEVTEN_EOPFEN		(1 << 6)
+/* For version 2.30a and above*/
+#define DWC3_DEVTEN_SUSPEND		(1 << 6)
 #define DWC3_DEVTEN_HIBERNATIONREQEVTEN	(1 << 5)
 #define DWC3_DEVTEN_WKUPEVTEN		(1 << 4)
 #define DWC3_DEVTEN_ULSTCNGEN		(1 << 3)
@@ -346,6 +384,7 @@
 #define DWC3_DGCMD_SET_LMP		0x01
 #define DWC3_DGCMD_SET_PERIODIC_PAR	0x02
 #define DWC3_DGCMD_XMIT_FUNCTION	0x03
+#define DWC3_DGCMD_XMIT_DEV		0x07
 
 /* These apply for core versions 1.94a and later */
 #define DWC3_DGCMD_SET_SCRATCHPAD_ADDR_LO	0x04
@@ -398,10 +437,20 @@
 #define DWC3_DEPCMD_TYPE_BULK		2
 #define DWC3_DEPCMD_TYPE_INTR		3
 
+#define DWC3_DEV_IMOD_COUNT_SHIFT	16
+#define DWC3_DEV_IMOD_COUNT_MASK	(0xffff << 16)
+#define DWC3_DEV_IMOD_INTERVAL_SHIFT	0
+#define DWC3_DEV_IMOD_INTERVAL_MASK	(0xffff << 0)
+
 /* Structures */
 
 struct dwc3_trb;
 
+enum event_buf_type {
+	EVT_BUF_TYPE_NORMAL,
+	EVT_BUF_TYPE_GSI
+};
+
 /**
  * struct dwc3_event_buffer - Software event buffer representation
  * @buf: _THE_ buffer
@@ -415,6 +464,7 @@
 struct dwc3_event_buffer {
 	void			*buf;
 	unsigned		length;
+	enum event_buf_type	type;
 	unsigned int		lpos;
 	unsigned int		count;
 	unsigned int		flags;
@@ -426,6 +476,36 @@
 	struct dwc3		*dwc;
 };
 
+struct dwc3_gadget_events {
+	unsigned int	disconnect;
+	unsigned int	reset;
+	unsigned int	connect;
+	unsigned int	wakeup;
+	unsigned int	link_status_change;
+	unsigned int	eopf;
+	unsigned int	suspend;
+	unsigned int	sof;
+	unsigned int	erratic_error;
+	unsigned int	overflow;
+	unsigned int	vendor_dev_test_lmp;
+	unsigned int	cmdcmplt;
+	unsigned int	unknown_event;
+};
+
+struct dwc3_ep_events {
+	unsigned int	xfercomplete;
+	unsigned int	xfernotready;
+	unsigned int	control_data;
+	unsigned int	control_status;
+	unsigned int	xferinprogress;
+	unsigned int	rxtxfifoevent;
+	unsigned int	streamevent;
+	unsigned int	epcmdcomplete;
+	unsigned int	cmdcmplt;
+	unsigned int	unknown_event;
+	unsigned int	total;
+};
+
 #define DWC3_EP_FLAG_STALLED	(1 << 0)
 #define DWC3_EP_FLAG_WEDGED	(1 << 1)
 
@@ -440,8 +520,10 @@
  * @endpoint: usb endpoint
  * @request_list: list of requests for this endpoint
  * @req_queued: list of requests on this ep which have TRBs setup
+ * @trb_dma_pool: dma pool used to get aligned trb memory pool
  * @trb_pool: array of transaction buffers
  * @trb_pool_dma: dma address of @trb_pool
+ * @num_trbs: num of trbs in the trb dma pool
  * @free_slot: next slot which is going to be used
  * @busy_slot: first slot which is owned by HW
  * @desc: usb_endpoint_descriptor pointer
@@ -451,18 +533,24 @@
  * @number: endpoint number (1 - 15)
  * @type: set to bmAttributes & USB_ENDPOINT_XFERTYPE_MASK
  * @resource_index: Resource transfer index
+ * @current_uf: Current uf received through last event parameter
  * @interval: the interval on which the ISOC transfer is started
  * @name: a human readable name e.g. ep1out-bulk
  * @direction: true for TX, false for RX
  * @stream_capable: true when streams are enabled
+ * @dbg_ep_events: different events counter for endpoint
+ * @dbg_ep_events_diff: differential events counter for endpoint
+ * @dbg_ep_events_ts: timestamp for previous event counters
  */
 struct dwc3_ep {
 	struct usb_ep		endpoint;
 	struct list_head	request_list;
 	struct list_head	req_queued;
 
+	struct dma_pool		*trb_dma_pool;
 	struct dwc3_trb		*trb_pool;
 	dma_addr_t		trb_pool_dma;
+	u32			num_trbs;
 	u32			free_slot;
 	u32			busy_slot;
 	const struct usb_ss_ep_comp_descriptor *comp_desc;
@@ -483,12 +571,16 @@
 	u8			number;
 	u8			type;
 	u8			resource_index;
+	u16			current_uf;
 	u32			interval;
 
 	char			name[20];
 
 	unsigned		direction:1;
 	unsigned		stream_capable:1;
+	struct dwc3_ep_events	dbg_ep_events;
+	struct dwc3_ep_events	dbg_ep_events_diff;
+	struct timespec		dbg_ep_events_ts;
 };
 
 enum dwc3_phy {
@@ -640,11 +732,24 @@
 	__le64	dma_adr[DWC3_MAX_HIBER_SCRATCHBUFS];
 };
 
+#define DWC3_CONTROLLER_ERROR_EVENT			0
+#define DWC3_CONTROLLER_RESET_EVENT			1
+#define DWC3_CONTROLLER_POST_RESET_EVENT		2
+#define DWC3_CORE_PM_SUSPEND_EVENT			3
+#define DWC3_CORE_PM_RESUME_EVENT			4
+#define DWC3_CONTROLLER_CONNDONE_EVENT			5
+#define DWC3_CONTROLLER_NOTIFY_OTG_EVENT		6
+#define DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT		7
+#define DWC3_CONTROLLER_RESTART_USB_SESSION		8
+#define DWC3_CONTROLLER_NOTIFY_DISABLE_UPDXFER		9
+
+#define MAX_INTR_STATS					10
 /**
  * struct dwc3 - representation of our controller
  * @ctrl_req: usb control request which is used for ep0
  * @ep0_trb: trb which is used for the ctrl_req
  * @ep0_bounce: bounce buffer for ep0
+ * @zlp_buf: used when request->zero is set
  * @setup_buf: used while precessing STD USB requests
  * @ctrl_req_addr: dma address of ctrl_req
  * @ep0_trb: dma address of ep0_trb
@@ -659,10 +764,12 @@
  * @gadget_driver: pointer to the gadget driver
  * @regs: base address for our registers
  * @regs_size: address space size
+ * @reg_phys: physical base address of dwc3 core register address space
  * @nr_scratch: number of scratch buffers
  * @num_event_buffers: calculated number of event buffers
  * @u1u2: only used on revisions <1.83a for workaround
- * @maximum_speed: maximum speed requested (mainly for testing purposes)
+ * @maximum_speed: maximum speed to operate as requested by sw
+ * @max_hw_supp_speed: maximum speed supported by hw design
  * @revision: revision register contents
  * @dr_mode: requested mode of operation
  * @usb2_phy: pointer to USB2 PHY
@@ -727,11 +834,29 @@
  * 	1	- -3.5dB de-emphasis
  * 	2	- No de-emphasis
  * 	3	- Reserved
+ * @is_drd: device supports dual-role or not
+ * @err_evt_seen: previous event in queue was erratic error
+ * @usb3_u1u2_disable: if true, disable U1U2 low power modes in Superspeed mode.
+ * @in_lpm: indicates if controller is in low power mode (no clocks)
+ * @tx_fifo_size: Available RAM size for TX fifo allocation
+ * @irq: irq number
+ * @bh: tasklet which handles the interrupt
+ * @irq_cnt: total irq count
+ * @last_irq_cnt: last irq count
+ * @bh_completion_time: time taken for taklet completion
+ * @bh_handled_evt_cnt: no. of events handled by tasklet per interrupt
+ * @bh_dbg_index: index for capturing bh_completion_time and bh_handled_evt_cnt
+ * @wait_linkstate: waitqueue for waiting LINK to move into required state
+ * @vbus_draw: current to be drawn from USB
+ * @imod_interval: set the interrupt moderation interval in 250ns
+ *			increments or 0 to disable.
+ * @create_reg_debugfs: create debugfs entry to allow dwc3 register dump
  */
 struct dwc3 {
 	struct usb_ctrlrequest	*ctrl_req;
 	struct dwc3_trb		*ep0_trb;
 	void			*ep0_bounce;
+	void			*zlp_buf;
 	void			*scratchbuf;
 	u8			*setup_buf;
 	dma_addr_t		ctrl_req_addr;
@@ -764,6 +889,7 @@
 
 	void __iomem		*regs;
 	size_t			regs_size;
+	phys_addr_t		reg_phys;
 
 	enum usb_dr_mode	dr_mode;
 
@@ -773,8 +899,13 @@
 
 	u32			nr_scratch;
 	u32			num_event_buffers;
+	u32			num_normal_event_buffers;
+	u32			num_gsi_event_buffers;
+
+	u32			u1;
 	u32			u1u2;
 	u32			maximum_speed;
+	u32			max_hw_supp_speed;
 
 	/*
 	 * All 3.1 IP version constants are greater than the 3.0 IP
@@ -804,6 +935,8 @@
 #define DWC3_REVISION_260A	0x5533260a
 #define DWC3_REVISION_270A	0x5533270a
 #define DWC3_REVISION_280A	0x5533280a
+#define DWC3_REVISION_300A	0x5533300a
+#define DWC3_REVISION_310A	0x5533310a
 
 /*
  * NOTICE: we're using bit 31 as a "is usb 3.1" flag. This is really
@@ -811,6 +944,7 @@
  */
 #define DWC3_REVISION_IS_DWC31		0x80000000
 #define DWC3_USB31_REVISION_110A	(0x3131302a | DWC3_REVISION_IS_USB31)
+#define DWC3_USB31_REVISION_120A	(0x3132302a | DWC3_REVISION_IS_DWC31)
 
 	enum dwc3_ep0_next	ep0_next_event;
 	enum dwc3_ep0_state	ep0state;
@@ -840,6 +974,9 @@
 
 	const char		*hsphy_interface;
 
+	void (*notify_event)(struct dwc3 *, unsigned, unsigned);
+	struct work_struct	wakeup_work;
+
 	unsigned		delayed_status:1;
 	unsigned		ep0_bounced:1;
 	unsigned		ep0_expect_in:1;
@@ -868,6 +1005,49 @@
 
 	unsigned		tx_de_emphasis_quirk:1;
 	unsigned		tx_de_emphasis:2;
+
+	unsigned		is_drd:1;
+	/* Indicate if the gadget was powered by the otg driver */
+	unsigned		vbus_active:1;
+	/* Indicate if software connect was issued by the usb_gadget_driver */
+	unsigned		softconnect:1;
+	unsigned		nominal_elastic_buffer:1;
+	unsigned		err_evt_seen:1;
+	unsigned		usb3_u1u2_disable:1;
+	/* Indicate if need to disable controller internal clkgating */
+	unsigned		disable_clk_gating:1;
+	unsigned		enable_bus_suspend:1;
+
+	struct dwc3_gadget_events	dbg_gadget_events;
+
+	atomic_t		in_lpm;
+	int			tx_fifo_size;
+	bool			b_suspend;
+	unsigned		vbus_draw;
+
+	u16			imod_interval;
+
+	struct workqueue_struct	*dwc_wq;
+	struct work_struct	bh_work;
+
+	/* IRQ timing statistics */
+	int			irq;
+	unsigned long		irq_cnt;
+	unsigned long		last_irq_cnt;
+	unsigned long		ep_cmd_timeout_cnt;
+	unsigned                bh_completion_time[MAX_INTR_STATS];
+	unsigned                bh_handled_evt_cnt[MAX_INTR_STATS];
+	unsigned                bh_dbg_index;
+	ktime_t			irq_start_time[MAX_INTR_STATS];
+	ktime_t			t_pwr_evt_irq;
+	unsigned                irq_completion_time[MAX_INTR_STATS];
+	unsigned                irq_event_count[MAX_INTR_STATS];
+	unsigned                irq_dbg_index;
+
+	unsigned long		l1_remote_wakeup_cnt;
+
+	wait_queue_head_t	wait_linkstate;
+	bool			create_reg_debugfs;
 };
 
 /* -------------------------------------------------------------------------- */
@@ -1019,6 +1199,20 @@
 void dwc3_set_mode(struct dwc3 *dwc, u32 mode);
 int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc);
 
+/* check whether we are on the DWC_usb3 core */
+static inline bool dwc3_is_usb3(struct dwc3 *dwc)
+{
+	return !(dwc->revision & DWC3_REVISION_IS_DWC31);
+}
+
+/* check whether we are on the DWC_usb31 core */
+static inline bool dwc3_is_usb31(struct dwc3 *dwc)
+{
+	return !!(dwc->revision & DWC3_REVISION_IS_DWC31);
+}
+
+bool dwc3_has_imod(struct dwc3 *dwc);
+
 #if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
 int dwc3_host_init(struct dwc3 *dwc);
 void dwc3_host_exit(struct dwc3 *dwc);
@@ -1032,17 +1226,22 @@
 #if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
 int dwc3_gadget_init(struct dwc3 *dwc);
 void dwc3_gadget_exit(struct dwc3 *dwc);
+void dwc3_gadget_restart(struct dwc3 *dwc);
 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode);
 int dwc3_gadget_get_link_state(struct dwc3 *dwc);
 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state);
 int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
 		unsigned cmd, struct dwc3_gadget_ep_cmd_params *params);
 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param);
+void dwc3_gadget_enable_irq(struct dwc3 *dwc);
+void dwc3_gadget_disable_irq(struct dwc3 *dwc);
 #else
 static inline int dwc3_gadget_init(struct dwc3 *dwc)
 { return 0; }
 static inline void dwc3_gadget_exit(struct dwc3 *dwc)
 { }
+static inline void dwc3_gadget_restart(struct dwc3 *dwc)
+{ }
 static inline int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
 { return 0; }
 static inline int dwc3_gadget_get_link_state(struct dwc3 *dwc)
@@ -1057,6 +1256,10 @@
 static inline int dwc3_send_gadget_generic_command(struct dwc3 *dwc,
 		int cmd, u32 param)
 { return 0; }
+static inline void dwc3_gadget_enable_irq(struct dwc3 *dwc)
+{ }
+static inline void dwc3_gadget_disable_irq(struct dwc3 *dwc)
+{ }
 #endif
 
 /* power management interface */
@@ -1075,6 +1278,7 @@
 }
 #endif /* !IS_ENABLED(CONFIG_USB_DWC3_HOST) */
 
+
 #if IS_ENABLED(CONFIG_USB_DWC3_ULPI)
 int dwc3_ulpi_init(struct dwc3 *dwc);
 void dwc3_ulpi_exit(struct dwc3 *dwc);
@@ -1085,4 +1289,15 @@
 { }
 #endif
 
+
+int dwc3_core_init(struct dwc3 *dwc);
+int dwc3_core_pre_init(struct dwc3 *dwc);
+void dwc3_post_host_reset_core_init(struct dwc3 *dwc);
+int dwc3_event_buffers_setup(struct dwc3 *dwc);
+void dwc3_usb3_phy_suspend(struct dwc3 *dwc, int suspend);
+
+extern void dwc3_set_notifier(
+	void (*notify)(struct dwc3 *dwc3, unsigned event, unsigned value));
+extern int dwc3_notify_event(struct dwc3 *dwc3, unsigned event, unsigned value);
+
 #endif /* __DRIVERS_USB_DWC3_CORE_H */
diff -ruw linux-4.4.115/drivers/usb/dwc3/debugfs.c linux-4.4.115-fbx/drivers/usb/dwc3/debugfs.c
--- linux-4.4.115/drivers/usb/dwc3/debugfs.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/dwc3/debugfs.c	2019-01-22 16:16:27.255280383 +0100
@@ -16,6 +16,7 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/ptrace.h>
@@ -39,6 +40,9 @@
 	.offset	= DWC3_ ##nm - DWC3_GLOBALS_REGS_START,	\
 }
 
+#define ep_event_rate(ev, c, p, dt)	\
+	((dt) ? ((c.ev - p.ev) * (MSEC_PER_SEC)) / (dt) : 0)
+
 static const struct debugfs_reg32 dwc3_regs[] = {
 	dump_register(GSBUSCFG0),
 	dump_register(GSBUSCFG1),
@@ -210,6 +214,7 @@
 	dump_register(GEVNTCOUNT(0)),
 
 	dump_register(GHWPARAMS8),
+	dump_register(GFLADJ),
 	dump_register(DCFG),
 	dump_register(DCTL),
 	dump_register(DEVTEN),
@@ -363,6 +368,11 @@
 	unsigned long		flags;
 	u32			reg;
 
+	if (atomic_read(&dwc->in_lpm)) {
+		seq_puts(s, "USB device is powered off\n");
+		return 0;
+	}
+
 	spin_lock_irqsave(&dwc->lock, flags);
 	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
 	spin_unlock_irqrestore(&dwc->lock, flags);
@@ -396,7 +406,12 @@
 	struct dwc3		*dwc = s->private;
 	unsigned long		flags;
 	u32			mode = 0;
-	char			buf[32];
+	char buf[32] = {};
+
+	if (atomic_read(&dwc->in_lpm)) {
+		dev_err(dwc->dev, "USB device is powered off\n");
+		return count;
+	}
 
 	if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
 		return -EFAULT;
@@ -432,6 +447,12 @@
 	unsigned long		flags;
 	u32			reg;
 
+
+	if (atomic_read(&dwc->in_lpm)) {
+		seq_puts(s, "USB device is powered off\n");
+		return 0;
+	}
+
 	spin_lock_irqsave(&dwc->lock, flags);
 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
 	reg &= DWC3_DCTL_TSTCTRL_MASK;
@@ -476,7 +497,12 @@
 	struct dwc3		*dwc = s->private;
 	unsigned long		flags;
 	u32			testmode = 0;
-	char			buf[32];
+	char			buf[32] = {};
+
+	if (atomic_read(&dwc->in_lpm)) {
+		seq_puts(s, "USB device is powered off\n");
+		return count;
+	}
 
 	if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
 		return -EFAULT;
@@ -516,6 +542,11 @@
 	enum dwc3_link_state	state;
 	u32			reg;
 
+	if (atomic_read(&dwc->in_lpm)) {
+		seq_puts(s, "USB device is powered off\n");
+		return 0;
+	}
+
 	spin_lock_irqsave(&dwc->lock, flags);
 	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
 	state = DWC3_DSTS_USBLNKST(reg);
@@ -583,7 +614,12 @@
 	struct dwc3		*dwc = s->private;
 	unsigned long		flags;
 	enum dwc3_link_state	state = 0;
-	char			buf[32];
+	char			buf[32] = {};
+
+	if (atomic_read(&dwc->in_lpm)) {
+		seq_puts(s, "USB device is powered off\n");
+		return count;
+	}
 
 	if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
 		return -EFAULT;
@@ -618,6 +654,600 @@
 	.release		= single_release,
 };
 
+static int ep_num;
+static ssize_t dwc3_store_ep_num(struct file *file, const char __user *ubuf,
+				 size_t count, loff_t *ppos)
+{
+	struct seq_file		*s = file->private_data;
+	struct dwc3		*dwc = s->private;
+	char			kbuf[10] = {};
+	unsigned int		num, dir, temp;
+	unsigned long		flags;
+
+	if (copy_from_user(kbuf, ubuf, min_t(size_t, sizeof(kbuf) - 1, count)))
+		return -EFAULT;
+
+	if (sscanf(kbuf, "%u %u", &num, &dir) != 2)
+		return -EINVAL;
+
+	if (dir != 0 && dir != 1)
+		return -EINVAL;
+
+	temp = (num << 1) + dir;
+	if (temp >= (dwc->num_in_eps + dwc->num_out_eps) ||
+					temp >= DWC3_ENDPOINTS_NUM)
+		return -EINVAL;
+
+	spin_lock_irqsave(&dwc->lock, flags);
+	ep_num = temp;
+	spin_unlock_irqrestore(&dwc->lock, flags);
+
+	return count;
+}
+
+static int dwc3_ep_req_list_show(struct seq_file *s, void *unused)
+{
+	struct dwc3		*dwc = s->private;
+	struct dwc3_ep		*dep;
+	struct dwc3_request	*req = NULL;
+	struct list_head	*ptr = NULL;
+	unsigned long		flags;
+
+	spin_lock_irqsave(&dwc->lock, flags);
+	dep = dwc->eps[ep_num];
+
+	seq_printf(s, "%s request list: flags: 0x%x\n", dep->name, dep->flags);
+	list_for_each(ptr, &dep->request_list) {
+		req = list_entry(ptr, struct dwc3_request, list);
+
+		seq_printf(s,
+			"req:0x%pK len: %d sts: %d dma:0x%pa num_sgs: %d\n",
+			req, req->request.length, req->request.status,
+			&req->request.dma, req->request.num_sgs);
+	}
+	spin_unlock_irqrestore(&dwc->lock, flags);
+
+	return 0;
+}
+
+static int dwc3_ep_req_list_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dwc3_ep_req_list_show, inode->i_private);
+}
+
+static const struct file_operations dwc3_ep_req_list_fops = {
+	.open			= dwc3_ep_req_list_open,
+	.write			= dwc3_store_ep_num,
+	.read			= seq_read,
+	.llseek			= seq_lseek,
+	.release		= single_release,
+};
+
+static int dwc3_ep_queued_req_show(struct seq_file *s, void *unused)
+{
+	struct dwc3		*dwc = s->private;
+	struct dwc3_ep		*dep;
+	struct dwc3_request	*req = NULL;
+	struct list_head	*ptr = NULL;
+	unsigned long		flags;
+
+	spin_lock_irqsave(&dwc->lock, flags);
+	dep = dwc->eps[ep_num];
+
+	seq_printf(s, "%s queued reqs to HW: flags:0x%x\n", dep->name,
+								dep->flags);
+	list_for_each(ptr, &dep->req_queued) {
+		req = list_entry(ptr, struct dwc3_request, list);
+
+		seq_printf(s,
+			"req:0x%pK len:%d sts:%d dma:%pa nsg:%d trb:0x%pK\n",
+			req, req->request.length, req->request.status,
+			&req->request.dma, req->request.num_sgs, req->trb);
+	}
+	spin_unlock_irqrestore(&dwc->lock, flags);
+
+	return 0;
+}
+
+static int dwc3_ep_queued_req_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dwc3_ep_queued_req_show, inode->i_private);
+}
+
+const struct file_operations dwc3_ep_req_queued_fops = {
+	.open			= dwc3_ep_queued_req_open,
+	.write			= dwc3_store_ep_num,
+	.read			= seq_read,
+	.llseek			= seq_lseek,
+	.release		= single_release,
+};
+
+static int dwc3_ep_trbs_show(struct seq_file *s, void *unused)
+{
+	struct dwc3		*dwc = s->private;
+	struct dwc3_ep		*dep;
+	struct dwc3_trb		*trb;
+	unsigned long		flags;
+	int			j;
+
+	if (!ep_num)
+		return 0;
+
+	spin_lock_irqsave(&dwc->lock, flags);
+	dep = dwc->eps[ep_num];
+
+	seq_printf(s, "%s trb pool: flags:0x%x freeslot:%d busyslot:%d\n",
+		dep->name, dep->flags, dep->free_slot, dep->busy_slot);
+	for (j = 0; j < DWC3_TRB_NUM; j++) {
+		trb = &dep->trb_pool[j];
+		seq_printf(s, "trb:0x%pK bph:0x%x bpl:0x%x size:0x%x ctrl: %x\n",
+			trb, trb->bph, trb->bpl, trb->size, trb->ctrl);
+	}
+	spin_unlock_irqrestore(&dwc->lock, flags);
+
+	return 0;
+}
+
+static int dwc3_ep_trbs_list_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dwc3_ep_trbs_show, inode->i_private);
+}
+
+const struct file_operations dwc3_ep_trb_list_fops = {
+	.open			= dwc3_ep_trbs_list_open,
+	.write			= dwc3_store_ep_num,
+	.read			= seq_read,
+	.llseek			= seq_lseek,
+	.release		= single_release,
+};
+
+static unsigned int ep_addr_rxdbg_mask = 1;
+module_param(ep_addr_rxdbg_mask, uint, S_IRUGO | S_IWUSR);
+static unsigned int ep_addr_txdbg_mask = 1;
+module_param(ep_addr_txdbg_mask, uint, S_IRUGO | S_IWUSR);
+
+/* Maximum debug message length */
+#define DBG_DATA_MSG   64UL
+
+/* Maximum number of messages */
+#define DBG_DATA_MAX   2048UL
+
+static struct {
+	char     (buf[DBG_DATA_MAX])[DBG_DATA_MSG];   /* buffer */
+	unsigned idx;   /* index */
+	unsigned tty;   /* print to console? */
+	rwlock_t lck;   /* lock */
+} dbg_dwc3_data = {
+	.idx = 0,
+	.tty = 0,
+	.lck = __RW_LOCK_UNLOCKED(lck)
+};
+
+/**
+ * dbg_dec: decrements debug event index
+ * @idx: buffer index
+ */
+static inline void __maybe_unused dbg_dec(unsigned *idx)
+{
+	*idx = (*idx - 1) % DBG_DATA_MAX;
+}
+
+/**
+ * dbg_inc: increments debug event index
+ * @idx: buffer index
+ */
+static inline void dbg_inc(unsigned *idx)
+{
+	*idx = (*idx + 1) % DBG_DATA_MAX;
+}
+
+#define TIME_BUF_LEN  20
+/*get_timestamp - returns time of day in us */
+static char *get_timestamp(char *tbuf)
+{
+	unsigned long long t;
+	unsigned long nanosec_rem;
+
+	t = cpu_clock(smp_processor_id());
+	nanosec_rem = do_div(t, 1000000000)/1000;
+	scnprintf(tbuf, TIME_BUF_LEN, "[%5lu.%06lu] ", (unsigned long)t,
+		nanosec_rem);
+	return tbuf;
+}
+
+static int allow_dbg_print(u8 ep_num)
+{
+	int dir, num;
+
+	/* allow bus wide events */
+	if (ep_num == 0xff)
+		return 1;
+
+	dir = ep_num & 0x1;
+	num = ep_num >> 1;
+	num = 1 << num;
+
+	if (dir && (num & ep_addr_txdbg_mask))
+		return 1;
+	if (!dir && (num & ep_addr_rxdbg_mask))
+		return 1;
+
+	return 0;
+}
+
+/**
+ * dbg_print:  prints the common part of the event
+ * @addr:   endpoint address
+ * @name:   event name
+ * @status: status
+ * @extra:  extra information
+ */
+void dbg_print(u8 ep_num, const char *name, int status, const char *extra)
+{
+	unsigned long flags;
+	char tbuf[TIME_BUF_LEN];
+
+	if (!allow_dbg_print(ep_num))
+		return;
+
+	write_lock_irqsave(&dbg_dwc3_data.lck, flags);
+
+	scnprintf(dbg_dwc3_data.buf[dbg_dwc3_data.idx], DBG_DATA_MSG,
+		  "%s\t? %02X %-12.12s %4i ?\t%s\n",
+		  get_timestamp(tbuf), ep_num, name, status, extra);
+
+	dbg_inc(&dbg_dwc3_data.idx);
+
+	write_unlock_irqrestore(&dbg_dwc3_data.lck, flags);
+
+	if (dbg_dwc3_data.tty != 0)
+		pr_notice("%s\t? %02X %-7.7s %4i ?\t%s\n",
+			  get_timestamp(tbuf), ep_num, name, status, extra);
+}
+
+/**
+ * dbg_done: prints a DONE event
+ * @addr:   endpoint address
+ * @td:     transfer descriptor
+ * @status: status
+ */
+void dbg_done(u8 ep_num, const u32 count, int status)
+{
+	char msg[DBG_DATA_MSG];
+
+	if (!allow_dbg_print(ep_num))
+		return;
+
+	scnprintf(msg, sizeof(msg), "%d", count);
+	dbg_print(ep_num, "DONE", status, msg);
+}
+
+/**
+ * dbg_event: prints a generic event
+ * @addr:   endpoint address
+ * @name:   event name
+ * @status: status
+ */
+void dbg_event(u8 ep_num, const char *name, int status)
+{
+	if (!allow_dbg_print(ep_num))
+		return;
+
+	if (name != NULL)
+		dbg_print(ep_num, name, status, "");
+}
+
+/*
+ * dbg_queue: prints a QUEUE event
+ * @addr:   endpoint address
+ * @req:    USB request
+ * @status: status
+ */
+void dbg_queue(u8 ep_num, const struct usb_request *req, int status)
+{
+	char msg[DBG_DATA_MSG];
+
+	if (!allow_dbg_print(ep_num))
+		return;
+
+	if (req != NULL) {
+		scnprintf(msg, sizeof(msg),
+			  "%d %d", !req->no_interrupt, req->length);
+		dbg_print(ep_num, "QUEUE", status, msg);
+	}
+}
+
+/**
+ * dbg_setup: prints a SETUP event
+ * @addr: endpoint address
+ * @req:  setup request
+ */
+void dbg_setup(u8 ep_num, const struct usb_ctrlrequest *req)
+{
+	char msg[DBG_DATA_MSG];
+
+	if (!allow_dbg_print(ep_num))
+		return;
+
+	if (req != NULL) {
+		scnprintf(msg, sizeof(msg),
+			  "%02X %02X %04X %04X %d", req->bRequestType,
+			  req->bRequest, le16_to_cpu(req->wValue),
+			  le16_to_cpu(req->wIndex), le16_to_cpu(req->wLength));
+		dbg_print(ep_num, "SETUP", 0, msg);
+	}
+}
+
+/**
+ * dbg_print_reg: prints a reg value
+ * @name:   reg name
+ * @reg: reg value to be printed
+ */
+void dbg_print_reg(const char *name, int reg)
+{
+	unsigned long flags;
+
+	write_lock_irqsave(&dbg_dwc3_data.lck, flags);
+
+	scnprintf(dbg_dwc3_data.buf[dbg_dwc3_data.idx], DBG_DATA_MSG,
+		  "%s = 0x%08x\n", name, reg);
+
+	dbg_inc(&dbg_dwc3_data.idx);
+
+	write_unlock_irqrestore(&dbg_dwc3_data.lck, flags);
+
+	if (dbg_dwc3_data.tty != 0)
+		pr_notice("%s = 0x%08x\n", name, reg);
+}
+
+/**
+ * store_events: configure if events are going to be also printed to console
+ *
+ */
+static ssize_t dwc3_store_events(struct file *file,
+			    const char __user *buf, size_t count, loff_t *ppos)
+{
+	int ret;
+	u8 tty;
+
+	if (buf == NULL) {
+		pr_err("[%s] EINVAL\n", __func__);
+		ret = -EINVAL;
+		return ret;
+	}
+
+	ret = kstrtou8_from_user(buf, count, 0, &tty);
+	if (ret < 0) {
+		pr_err("can't get enter value.\n");
+		return ret;
+	}
+
+	if (tty > 1) {
+		pr_err("<1|0>: enable|disable console log\n");
+		ret = -EINVAL;
+		return ret;
+	}
+
+	dbg_dwc3_data.tty = tty;
+	pr_info("tty = %u", dbg_dwc3_data.tty);
+
+	return count;
+}
+
+static int dwc3_gadget_data_events_show(struct seq_file *s, void *unused)
+{
+	unsigned long	flags;
+	unsigned	i;
+
+	read_lock_irqsave(&dbg_dwc3_data.lck, flags);
+
+	i = dbg_dwc3_data.idx;
+	if (strnlen(dbg_dwc3_data.buf[i], DBG_DATA_MSG))
+		seq_printf(s, "%s\n", dbg_dwc3_data.buf[i]);
+	for (dbg_inc(&i); i != dbg_dwc3_data.idx; dbg_inc(&i)) {
+		if (!strnlen(dbg_dwc3_data.buf[i], DBG_DATA_MSG))
+			continue;
+		seq_printf(s, "%s\n", dbg_dwc3_data.buf[i]);
+	}
+
+	read_unlock_irqrestore(&dbg_dwc3_data.lck, flags);
+
+	return 0;
+}
+
+static int dwc3_gadget_data_events_open(struct inode *inode, struct file *f)
+{
+	return single_open(f, dwc3_gadget_data_events_show, inode->i_private);
+}
+
+const struct file_operations dwc3_gadget_dbg_data_fops = {
+	.open			= dwc3_gadget_data_events_open,
+	.read			= seq_read,
+	.write			= dwc3_store_events,
+	.llseek			= seq_lseek,
+	.release		= single_release,
+};
+
+static ssize_t dwc3_store_int_events(struct file *file,
+			const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int i, ret;
+	unsigned long flags;
+	struct seq_file *s = file->private_data;
+	struct dwc3 *dwc = s->private;
+	struct dwc3_ep *dep;
+	struct timespec ts;
+	u8 clear_stats;
+
+	if (ubuf == NULL) {
+		pr_err("[%s] EINVAL\n", __func__);
+		ret = -EINVAL;
+		return ret;
+	}
+
+	ret = kstrtou8_from_user(ubuf, count, 0, &clear_stats);
+	if (ret < 0) {
+		pr_err("can't get enter value.\n");
+		return ret;
+	}
+
+	if (clear_stats != 0) {
+		pr_err("Wrong value. To clear stats, enter value as 0.\n");
+		ret = -EINVAL;
+		return ret;
+	}
+
+	spin_lock_irqsave(&dwc->lock, flags);
+
+	pr_debug("%s(): clearing debug interrupt buffers\n", __func__);
+	ts = current_kernel_time();
+	for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
+		dep = dwc->eps[i];
+		memset(&dep->dbg_ep_events, 0, sizeof(dep->dbg_ep_events));
+		memset(&dep->dbg_ep_events_diff, 0, sizeof(dep->dbg_ep_events));
+		dep->dbg_ep_events_ts = ts;
+	}
+	memset(&dwc->dbg_gadget_events, 0, sizeof(dwc->dbg_gadget_events));
+
+	spin_unlock_irqrestore(&dwc->lock, flags);
+
+	return count;
+}
+
+static int dwc3_gadget_int_events_show(struct seq_file *s, void *unused)
+{
+	unsigned long   flags;
+	struct dwc3 *dwc = s->private;
+	struct dwc3_gadget_events *dbg_gadget_events;
+	struct dwc3_ep *dep;
+	int i;
+	struct timespec ts_delta;
+	struct timespec ts_current;
+	u32 ts_delta_ms;
+
+	spin_lock_irqsave(&dwc->lock, flags);
+	dbg_gadget_events = &dwc->dbg_gadget_events;
+
+	for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
+		dep = dwc->eps[i];
+
+		if (dep == NULL || !(dep->flags & DWC3_EP_ENABLED))
+			continue;
+
+		ts_current = current_kernel_time();
+		ts_delta = timespec_sub(ts_current, dep->dbg_ep_events_ts);
+		ts_delta_ms = ts_delta.tv_nsec / NSEC_PER_MSEC +
+			ts_delta.tv_sec * MSEC_PER_SEC;
+
+		seq_printf(s, "\n\n===== dbg_ep_events for EP(%d) %s =====\n",
+			i, dep->name);
+		seq_printf(s, "xfercomplete:%u @ %luHz\n",
+			dep->dbg_ep_events.xfercomplete,
+			ep_event_rate(xfercomplete, dep->dbg_ep_events,
+				dep->dbg_ep_events_diff, ts_delta_ms));
+		seq_printf(s, "xfernotready:%u @ %luHz\n",
+			dep->dbg_ep_events.xfernotready,
+			ep_event_rate(xfernotready, dep->dbg_ep_events,
+				dep->dbg_ep_events_diff, ts_delta_ms));
+		seq_printf(s, "control_data:%u @ %luHz\n",
+			dep->dbg_ep_events.control_data,
+			ep_event_rate(control_data, dep->dbg_ep_events,
+				dep->dbg_ep_events_diff, ts_delta_ms));
+		seq_printf(s, "control_status:%u @ %luHz\n",
+			dep->dbg_ep_events.control_status,
+			ep_event_rate(control_status, dep->dbg_ep_events,
+				dep->dbg_ep_events_diff, ts_delta_ms));
+		seq_printf(s, "xferinprogress:%u @ %luHz\n",
+			dep->dbg_ep_events.xferinprogress,
+			ep_event_rate(xferinprogress, dep->dbg_ep_events,
+				dep->dbg_ep_events_diff, ts_delta_ms));
+		seq_printf(s, "rxtxfifoevent:%u @ %luHz\n",
+			dep->dbg_ep_events.rxtxfifoevent,
+			ep_event_rate(rxtxfifoevent, dep->dbg_ep_events,
+				dep->dbg_ep_events_diff, ts_delta_ms));
+		seq_printf(s, "streamevent:%u @ %luHz\n",
+			dep->dbg_ep_events.streamevent,
+			ep_event_rate(streamevent, dep->dbg_ep_events,
+				dep->dbg_ep_events_diff, ts_delta_ms));
+		seq_printf(s, "epcmdcomplt:%u @ %luHz\n",
+			dep->dbg_ep_events.epcmdcomplete,
+			ep_event_rate(epcmdcomplete, dep->dbg_ep_events,
+				dep->dbg_ep_events_diff, ts_delta_ms));
+		seq_printf(s, "cmdcmplt:%u @ %luHz\n",
+			dep->dbg_ep_events.cmdcmplt,
+			ep_event_rate(cmdcmplt, dep->dbg_ep_events,
+				dep->dbg_ep_events_diff, ts_delta_ms));
+		seq_printf(s, "unknown:%u @ %luHz\n",
+			dep->dbg_ep_events.unknown_event,
+			ep_event_rate(unknown_event, dep->dbg_ep_events,
+				dep->dbg_ep_events_diff, ts_delta_ms));
+		seq_printf(s, "total:%u @ %luHz\n",
+			dep->dbg_ep_events.total,
+			ep_event_rate(total, dep->dbg_ep_events,
+				dep->dbg_ep_events_diff, ts_delta_ms));
+
+		dep->dbg_ep_events_ts = ts_current;
+		dep->dbg_ep_events_diff = dep->dbg_ep_events;
+	}
+
+	seq_puts(s, "\n=== dbg_gadget events ==\n");
+	seq_printf(s, "disconnect:%u\n reset:%u\n",
+		dbg_gadget_events->disconnect, dbg_gadget_events->reset);
+	seq_printf(s, "connect:%u\n wakeup:%u\n",
+		dbg_gadget_events->connect, dbg_gadget_events->wakeup);
+	seq_printf(s, "link_status_change:%u\n eopf:%u\n",
+		dbg_gadget_events->link_status_change, dbg_gadget_events->eopf);
+	seq_printf(s, "sof:%u\n suspend:%u\n",
+		dbg_gadget_events->sof, dbg_gadget_events->suspend);
+	seq_printf(s, "erratic_error:%u\n overflow:%u\n",
+		dbg_gadget_events->erratic_error,
+		dbg_gadget_events->overflow);
+	seq_printf(s, "vendor_dev_test_lmp:%u\n cmdcmplt:%u\n",
+		dbg_gadget_events->vendor_dev_test_lmp,
+		dbg_gadget_events->cmdcmplt);
+	seq_printf(s, "unknown_event:%u\n", dbg_gadget_events->unknown_event);
+
+	seq_printf(s, "\n\t== Last %d interrupts stats ==\t\n", MAX_INTR_STATS);
+	seq_puts(s, "@ time (us):\t");
+	for (i = 0; i < MAX_INTR_STATS; i++)
+		seq_printf(s, "%lld\t", ktime_to_us(dwc->irq_start_time[i]));
+	seq_puts(s, "\nhard irq time (us):\t");
+	for (i = 0; i < MAX_INTR_STATS; i++)
+		seq_printf(s, "%d\t", dwc->irq_completion_time[i]);
+	seq_puts(s, "\nevents count:\t\t");
+	for (i = 0; i < MAX_INTR_STATS; i++)
+		seq_printf(s, "%d\t", dwc->irq_event_count[i]);
+	seq_puts(s, "\nbh handled count:\t");
+	for (i = 0; i < MAX_INTR_STATS; i++)
+		seq_printf(s, "%d\t", dwc->bh_handled_evt_cnt[i]);
+	seq_puts(s, "\nirq thread time (us):\t");
+	for (i = 0; i < MAX_INTR_STATS; i++)
+		seq_printf(s, "%d\t", dwc->bh_completion_time[i]);
+	seq_putc(s, '\n');
+
+	seq_printf(s, "t_pwr evt irq : %lld\n",
+			ktime_to_us(dwc->t_pwr_evt_irq));
+
+	seq_printf(s, "l1_remote_wakeup_cnt : %lu\n",
+		dwc->l1_remote_wakeup_cnt);
+
+	spin_unlock_irqrestore(&dwc->lock, flags);
+	return 0;
+}
+
+static int dwc3_gadget_events_open(struct inode *inode, struct file *f)
+{
+	return single_open(f, dwc3_gadget_int_events_show, inode->i_private);
+}
+
+const struct file_operations dwc3_gadget_dbg_events_fops = {
+	.open		= dwc3_gadget_events_open,
+	.read		= seq_read,
+	.write		= dwc3_store_int_events,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
 int dwc3_debugfs_init(struct dwc3 *dwc)
 {
 	struct dentry		*root;
@@ -642,11 +1272,15 @@
 	dwc->regset->nregs = ARRAY_SIZE(dwc3_regs);
 	dwc->regset->base = dwc->regs;
 
-	file = debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset);
+	if (dwc->create_reg_debugfs) {
+		file = debugfs_create_regset32("regdump", 0444,
+						root, dwc->regset);
 	if (!file) {
+			dev_dbg(dwc->dev, "Can't create debugfs regdump\n");
 		ret = -ENOMEM;
 		goto err1;
 	}
+	}
 
 	if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)) {
 		file = debugfs_create_file("mode", S_IRUGO | S_IWUSR, root,
@@ -674,6 +1308,41 @@
 		}
 	}
 
+	file = debugfs_create_file("trbs", S_IRUGO | S_IWUSR, root,
+			dwc, &dwc3_ep_trb_list_fops);
+	if (!file) {
+		ret = -ENOMEM;
+		goto err1;
+	}
+
+	file = debugfs_create_file("requests", S_IRUGO | S_IWUSR, root,
+			dwc, &dwc3_ep_req_list_fops);
+	if (!file) {
+		ret = -ENOMEM;
+		goto err1;
+	}
+
+	file = debugfs_create_file("queued_reqs", S_IRUGO | S_IWUSR, root,
+			dwc, &dwc3_ep_req_queued_fops);
+	if (!file) {
+		ret = -ENOMEM;
+		goto err1;
+	}
+
+	file = debugfs_create_file("events", S_IRUGO | S_IWUSR, root,
+			dwc, &dwc3_gadget_dbg_data_fops);
+	if (!file) {
+		ret = -ENOMEM;
+		goto err1;
+	}
+
+	file = debugfs_create_file("int_events", S_IRUGO | S_IWUSR, root,
+			dwc, &dwc3_gadget_dbg_events_fops);
+	if (!file) {
+		ret = -ENOMEM;
+		goto err1;
+	}
+
 	return 0;
 
 err1:
diff -ruw linux-4.4.115/drivers/usb/dwc3/debug.h linux-4.4.115-fbx/drivers/usb/dwc3/debug.h
--- linux-4.4.115/drivers/usb/dwc3/debug.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/dwc3/debug.h	2019-01-22 16:16:27.255280383 +0100
@@ -217,9 +217,29 @@
 void dwc3_trace(void (*trace)(struct va_format *), const char *fmt, ...);
 
 #ifdef CONFIG_DEBUG_FS
+extern void dbg_event(u8, const char*, int);
+extern void dbg_print(u8, const char*, int, const char*);
+extern void dbg_done(u8, const u32, int);
+extern void dbg_queue(u8, const struct usb_request*, int);
+extern void dbg_setup(u8, const struct usb_ctrlrequest*);
 extern int dwc3_debugfs_init(struct dwc3 *);
 extern void dwc3_debugfs_exit(struct dwc3 *);
+extern void dbg_print_reg(const char *name, int reg);
 #else
+static inline void dbg_event(u8 ep_num, const char *name, int status)
+{  }
+static inline void dbg_print(u8 ep_num, const char *name, int status,
+			     const char *extra)
+{  }
+static inline void dbg_done(u8 ep_num, const u32 count, int status)
+{  }
+static inline void dbg_queue(u8 ep_num, const struct usb_request *req,
+			     int status)
+{  }
+static inline void dbg_setup(u8 ep_num, const struct usb_ctrlrequest *req)
+{  }
+static inline void dbg_print_reg(const char *name, int reg)
+{  }
 static inline int dwc3_debugfs_init(struct dwc3 *d)
 {  return 0;  }
 static inline void dwc3_debugfs_exit(struct dwc3 *d)
diff -ruw linux-4.4.115/drivers/usb/dwc3/gadget.h linux-4.4.115-fbx/drivers/usb/dwc3/gadget.h
--- linux-4.4.115/drivers/usb/dwc3/gadget.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/dwc3/gadget.h	2019-01-22 16:16:27.263280456 +0100
@@ -68,6 +68,14 @@
 	return list_first_entry(list, struct dwc3_request, list);
 }
 
+static inline void dwc3_gadget_move_request_list_front(struct dwc3_request *req)
+{
+	struct dwc3_ep		*dep = req->dep;
+
+	req->queued = false;
+	list_move(&req->list, &dep->request_list);
+}
+
 static inline void dwc3_gadget_move_request_queued(struct dwc3_request *req)
 {
 	struct dwc3_ep		*dep = req->dep;
@@ -76,17 +84,49 @@
 	list_move_tail(&req->list, &dep->req_queued);
 }
 
+static inline enum dwc3_link_state dwc3_get_link_state(struct dwc3 *dwc)
+{
+	u32 reg;
+
+	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+	return DWC3_DSTS_USBLNKST(reg);
+}
+
 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
 		int status);
 
 void dwc3_ep0_interrupt(struct dwc3 *dwc,
 		const struct dwc3_event_depevt *event);
 void dwc3_ep0_out_start(struct dwc3 *dwc);
+void dwc3_ep0_stall_and_restart(struct dwc3 *dwc);
 int __dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
 int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
 int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
 		gfp_t gfp_flags);
 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol);
+void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
+
+#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
+void dwc3_gadget_interrupt(struct dwc3 *dwc,
+		const struct dwc3_event_devt *event);
+void dwc3_endpoint_interrupt(struct dwc3 *dwc,
+		const struct dwc3_event_depevt *event);
+#else
+static inline void dwc3_gadget_interrupt(struct dwc3 *dwc,
+		const struct dwc3_event_devt *event)
+{ }
+static inline void dwc3_endpoint_interrupt(struct dwc3 *dwc,
+		const struct dwc3_event_depevt *event)
+{ }
+#endif
+
+static inline dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
+		struct dwc3_trb *trb)
+{
+	u32		offset = (char *) trb - (char *) dep->trb_pool;
+
+	return dep->trb_pool_dma + offset;
+}
 
 /**
  * dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW
diff -ruw linux-4.4.115/drivers/usb/dwc3/host.c linux-4.4.115-fbx/drivers/usb/dwc3/host.c
--- linux-4.4.115/drivers/usb/dwc3/host.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/dwc3/host.c	2019-01-22 16:16:27.263280456 +0100
@@ -25,6 +25,7 @@
 	struct platform_device	*xhci;
 	struct usb_xhci_pdata	pdata;
 	int			ret;
+	struct device_node	*node = dwc->dev->of_node;
 
 	xhci = platform_device_alloc("xhci-hcd", PLATFORM_DEVID_AUTO);
 	if (!xhci) {
@@ -32,6 +33,7 @@
 		return -ENOMEM;
 	}
 
+	arch_setup_dma_ops(&xhci->dev, 0, 0, NULL, 0);
 	dma_set_coherent_mask(&xhci->dev, dwc->dev->coherent_dma_mask);
 
 	xhci->dev.parent	= dwc->dev;
@@ -51,6 +53,11 @@
 
 	pdata.usb3_lpm_capable = dwc->usb3_lpm_capable;
 
+	ret = of_property_read_u32(node, "xhci-imod-value",
+					   &pdata.imod_interval);
+	if (ret)
+		pdata.imod_interval = 0;	/* use default xhci.c value */
+
 	ret = platform_device_add_data(xhci, &pdata, sizeof(pdata));
 	if (ret) {
 		dev_err(dwc->dev, "couldn't add platform data to xHCI device\n");
@@ -62,18 +69,9 @@
 	phy_create_lookup(dwc->usb3_generic_phy, "usb3-phy",
 			  dev_name(&xhci->dev));
 
-	ret = platform_device_add(xhci);
-	if (ret) {
-		dev_err(dwc->dev, "failed to register xHCI device\n");
-		goto err2;
-	}
-
+	/* Platform device gets added as part of state machine */
 	return 0;
-err2:
-	phy_remove_lookup(dwc->usb2_generic_phy, "usb2-phy",
-			  dev_name(&xhci->dev));
-	phy_remove_lookup(dwc->usb3_generic_phy, "usb3-phy",
-			  dev_name(&xhci->dev));
+
 err1:
 	platform_device_put(xhci);
 	return ret;
@@ -85,5 +83,6 @@
 			  dev_name(&dwc->xhci->dev));
 	phy_remove_lookup(dwc->usb3_generic_phy, "usb3-phy",
 			  dev_name(&dwc->xhci->dev));
+	if (!dwc->is_drd)
 	platform_device_unregister(dwc->xhci);
 }
diff -ruw linux-4.4.115/drivers/usb/dwc3/io.h linux-4.4.115-fbx/drivers/usb/dwc3/io.h
--- linux-4.4.115/drivers/usb/dwc3/io.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/dwc3/io.h	2019-01-22 16:16:27.263280456 +0100
@@ -41,7 +41,7 @@
 	 * documentation, so we revert it back to the proper addresses, the
 	 * same way they are described on SNPS documentation
 	 */
-	dwc3_trace(trace_dwc3_readl, "addr %p value %08x",
+	dwc3_trace(trace_dwc3_readl, "addr %pK value %08x",
 			base - DWC3_GLOBALS_REGS_START + offset, value);
 
 	return value;
@@ -63,7 +63,7 @@
 	 * documentation, so we revert it back to the proper addresses, the
 	 * same way they are described on SNPS documentation
 	 */
-	dwc3_trace(trace_dwc3_writel, "addr %p value %08x",
+	dwc3_trace(trace_dwc3_writel, "addr %pK value %08x",
 			base - DWC3_GLOBALS_REGS_START + offset, value);
 }
 
diff -ruw linux-4.4.115/drivers/usb/dwc3/Makefile linux-4.4.115-fbx/drivers/usb/dwc3/Makefile
--- linux-4.4.115/drivers/usb/dwc3/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/dwc3/Makefile	2019-01-22 16:16:27.255280383 +0100
@@ -1,5 +1,6 @@
 # define_trace.h needs to know how to find our header
 CFLAGS_trace.o				:= -I$(src)
+CFLAGS_dwc3-msm.o                      := -Idrivers/usb/host -Idrivers/base/power
 
 obj-$(CONFIG_USB_DWC3)			+= dwc3.o
 
@@ -37,5 +38,5 @@
 obj-$(CONFIG_USB_DWC3_EXYNOS)		+= dwc3-exynos.o
 obj-$(CONFIG_USB_DWC3_PCI)		+= dwc3-pci.o
 obj-$(CONFIG_USB_DWC3_KEYSTONE)		+= dwc3-keystone.o
-obj-$(CONFIG_USB_DWC3_QCOM)		+= dwc3-qcom.o
+obj-$(CONFIG_USB_DWC3_QCOM)		+= dwc3-qcom.o dwc3-msm.o dbm.o
 obj-$(CONFIG_USB_DWC3_ST)		+= dwc3-st.o
diff -ruw linux-4.4.115/drivers/usb/dwc3/trace.h linux-4.4.115-fbx/drivers/usb/dwc3/trace.h
--- linux-4.4.115/drivers/usb/dwc3/trace.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/dwc3/trace.h	2019-01-22 16:16:27.263280456 +0100
@@ -125,7 +125,7 @@
 		__entry->length = req->request.length;
 		__entry->status = req->request.status;
 	),
-	TP_printk("%s: req %p length %u/%u ==> %d",
+	TP_printk("%s: req %pK length %u/%u ==> %d",
 		__get_str(name), __entry->req, __entry->actual, __entry->length,
 		__entry->status
 	)
@@ -228,7 +228,7 @@
 		__entry->size = trb->size;
 		__entry->ctrl = trb->ctrl;
 	),
-	TP_printk("%s: trb %p bph %08x bpl %08x size %08x ctrl %08x",
+	TP_printk("%s: trb %pK bph %08x bpl %08x size %08x ctrl %08x",
 		__get_str(name), __entry->trb, __entry->bph, __entry->bpl,
 		__entry->size, __entry->ctrl
 	)
diff -ruw linux-4.4.115/drivers/usb/gadget/function/Makefile linux-4.4.115-fbx/drivers/usb/gadget/function/Makefile
--- linux-4.4.115/drivers/usb/gadget/function/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/gadget/function/Makefile	2019-10-29 09:26:24.993216389 +0100
@@ -44,3 +44,25 @@
 obj-$(CONFIG_USB_F_HID)		+= usb_f_hid.o
 usb_f_printer-y			:= f_printer.o
 obj-$(CONFIG_USB_F_PRINTER)	+= usb_f_printer.o
+usb_f_mtp-y                     := f_mtp.o
+obj-$(CONFIG_USB_F_MTP)         += usb_f_mtp.o
+usb_f_ptp-y                     := f_ptp.o
+obj-$(CONFIG_USB_F_PTP)         += usb_f_ptp.o
+usb_f_audio_source-y            := f_audio_source.o
+obj-$(CONFIG_USB_F_AUDIO_SRC)   += usb_f_audio_source.o
+usb_f_accessory-y               := f_accessory.o
+obj-$(CONFIG_USB_F_ACC)         += usb_f_accessory.o
+usb_f_diag-y			:= f_diag.o
+obj-$(CONFIG_USB_F_DIAG)	+= usb_f_diag.o
+usb_f_gsi-y			:= f_gsi.o rndis.o
+obj-$(CONFIG_USB_F_GSI)         += usb_f_gsi.o
+usb_f_cdev-y			:= f_cdev.o
+obj-$(CONFIG_USB_F_CDEV)         += usb_f_cdev.o
+usb_f_qdss-y			:= f_qdss.o u_qdss.o
+obj-$(CONFIG_USB_F_QDSS)	+= usb_f_qdss.o
+usb_f_qcrndis-y			:= f_qc_rndis.o rndis.o u_data_ipa.o
+obj-$(CONFIG_USB_F_QCRNDIS)	+= usb_f_qcrndis.o
+usb_f_rmnet_bam-y		:= f_rmnet.o u_ctrl_qti.o
+obj-$(CONFIG_USB_F_RMNET_BAM)   += usb_f_rmnet_bam.o
+usb_f_ccid-y			:= f_ccid.o
+obj-$(CONFIG_USB_F_CCID)   	+= usb_f_ccid.o
diff -ruw linux-4.4.115/drivers/usb/gadget/Kconfig linux-4.4.115-fbx/drivers/usb/gadget/Kconfig
--- linux-4.4.115/drivers/usb/gadget/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/gadget/Kconfig	2019-10-29 09:26:24.989216350 +0100
@@ -175,6 +175,12 @@
 config USB_F_RNDIS
 	tristate
 
+config USB_F_QCRNDIS
+	tristate
+
+config USB_F_RMNET_BAM
+	tristate
+
 config USB_F_MASS_STORAGE
 	tristate
 
@@ -199,6 +205,33 @@
 config USB_F_PRINTER
 	tristate
 
+config USB_F_MTP
+	tristate
+
+config USB_F_PTP
+        tristate
+
+config USB_F_AUDIO_SRC
+	tristate
+
+config USB_F_ACC
+	tristate
+
+config USB_F_DIAG
+	tristate
+
+config USB_F_GSI
+	tristate
+
+config USB_F_CDEV
+	tristate
+
+config USB_F_QDSS
+	tristate
+
+config USB_F_CCID
+	tristate
+
 choice
 	tristate "USB Gadget Drivers"
 	default USB_ETH
@@ -294,6 +327,14 @@
 	  On hardware that can't implement the full protocol,
 	  a simple CDC subset is used, placing fewer demands on USB.
 
+config USB_CONFIGFS_QCRNDIS
+	bool "QCRNDIS"
+	depends on USB_CONFIGFS
+	depends on RNDIS_IPA
+	depends on NET
+	select USB_U_ETHER
+	select USB_F_QCRNDIS
+
 config USB_CONFIGFS_RNDIS
 	bool "RNDIS"
 	depends on USB_CONFIGFS
@@ -310,6 +351,12 @@
 	   XP, you'll need to download drivers from Microsoft's website; a URL
 	   is given in comments found in that info file.
 
+config USB_CONFIGFS_RMNET_BAM
+	bool "RMNET"
+	depends on USB_CONFIGFS
+	depends on IPA
+	select USB_F_RMNET_BAM
+
 config USB_CONFIGFS_EEM
 	bool "Ethernet Emulation Model (EEM)"
 	depends on USB_CONFIGFS
@@ -371,6 +418,44 @@
 	  implemented in kernel space (for instance Ethernet, serial or
 	  mass storage) and other are implemented in user space.
 
+config USB_CONFIGFS_F_MTP
+	boolean "MTP gadget"
+	depends on USB_CONFIGFS
+	select USB_F_MTP
+	help
+	  USB gadget MTP support
+
+config USB_CONFIGFS_F_PTP
+	boolean "PTP gadget"
+	depends on USB_CONFIGFS && USB_CONFIGFS_F_MTP
+	select USB_F_PTP
+	help
+	  USB gadget PTP support
+
+config USB_CONFIGFS_F_ACC
+	boolean "Accessory gadget"
+	depends on USB_CONFIGFS
+	select USB_F_ACC
+	help
+	  USB gadget Accessory support
+
+config USB_CONFIGFS_F_AUDIO_SRC
+	boolean "Audio Source gadget"
+	depends on USB_CONFIGFS && USB_CONFIGFS_F_ACC
+	depends on SND
+	select SND_PCM
+	select USB_F_AUDIO_SRC
+	help
+	  USB gadget Audio Source support
+
+config USB_CONFIGFS_UEVENT
+	boolean "Uevent notification of Gadget state"
+	depends on USB_CONFIGFS
+	help
+	  Enable uevent notifications to userspace when the gadget
+	  state changes. The gadget can be in any of the following
+	  three states: "CONNECTED/DISCONNECTED/CONFIGURED"
+
 config USB_CONFIGFS_F_UAC1
 	bool "Audio Class 1.0"
 	depends on USB_CONFIGFS
@@ -451,6 +536,43 @@
 	  For more information, see Documentation/usb/gadget_printer.txt
 	  which includes sample code for accessing the device file.
 
+config USB_CONFIGFS_F_DIAG
+	bool "USB Diag function"
+	select USB_F_DIAG
+	depends on USB_CONFIGFS
+	help
+	  Diag function driver enables support for Qualcomm diagnostics
+	  port over USB.
+
+config USB_CONFIGFS_F_GSI
+	bool "USB GSI function"
+	select USB_F_GSI
+	depends on USB_CONFIGFS
+	help
+	  Generic function driver to support h/w acceleration to IPA over GSI.
+
+config USB_CONFIGFS_F_CDEV
+	bool "USB Serial Character function"
+	select USB_F_CDEV
+	depends on USB_CONFIGFS
+	help
+	  Generic USB serial character function driver to support DUN/NMEA.
+
+config USB_CONFIGFS_F_QDSS
+	bool "USB QDSS function"
+	select USB_F_QDSS
+	depends on USB_CONFIGFS
+	help
+	  USB QDSS function driver to get hwtracing related data over USB.
+
+config USB_CONFIGFS_F_CCID
+	bool "USB CCID function"
+	select USB_F_CCID
+	depends on USB_CONFIGFS
+	help
+	  USB CCID function driver creats transport layer between the
+	  userspace CCID component and the Windows Host.
+
 source "drivers/usb/gadget/legacy/Kconfig"
 
 endchoice
diff -ruw linux-4.4.115/drivers/usb/gadget/Makefile linux-4.4.115-fbx/drivers/usb/gadget/Makefile
--- linux-4.4.115/drivers/usb/gadget/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/gadget/Makefile	2019-01-22 16:16:27.263280456 +0100
@@ -10,3 +10,4 @@
 libcomposite-y			+= composite.o functions.o configfs.o u_f.o
 
 obj-$(CONFIG_USB_GADGET)	+= udc/ function/ legacy/
+obj-y				+= debug.o
diff -ruw linux-4.4.115/drivers/usb/host/xhci.c linux-4.4.115-fbx/drivers/usb/host/xhci.c
--- linux-4.4.115/drivers/usb/host/xhci.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/host/xhci.c	2019-10-29 09:26:25.033216781 +0100
@@ -114,10 +114,20 @@
 			STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
 	if (!ret) {
 		xhci->xhc_state |= XHCI_STATE_HALTED;
-		xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
-	} else
+	} else {
 		xhci_warn(xhci, "Host not halted after %u microseconds.\n",
 				XHCI_MAX_HALT_USEC);
+	}
+
+	xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
+
+	if (delayed_work_pending(&xhci->cmd_timer)) {
+		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+				"Cleanup command queue");
+		cancel_delayed_work(&xhci->cmd_timer);
+		xhci_cleanup_command_queue(xhci);
+	}
+
 	return ret;
 }
 
@@ -128,7 +138,13 @@
 {
 	u32 temp;
 	int ret;
+	struct usb_hcd *hcd = xhci_to_hcd(xhci);
 
+	/*
+	 * disable irq to avoid xhci_irq flooding due to unhandeled port
+	 * change event in halt state, as soon as xhci_start clears halt bit
+	 */
+	disable_irq(hcd->irq);
 	temp = readl(&xhci->op_regs->command);
 	temp |= (CMD_RUN);
 	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
@@ -149,6 +165,8 @@
 		/* clear state flags. Including dying, halted or removing */
 		xhci->xhc_state = 0;
 
+	enable_irq(hcd->irq);
+
 	return ret;
 }
 
@@ -647,7 +665,7 @@
 
 	temp = readl(&xhci->ir_set->irq_pending);
 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
+			"// Enabling event ring interrupter %pK by writing 0x%x to irq_pending",
 			xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
 	writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
 	xhci_print_ir_set(xhci, 0);
@@ -745,6 +763,10 @@
 		usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
 
 	spin_lock_irq(&xhci->lock);
+	if (!HCD_HW_ACCESSIBLE(hcd)) {
+		spin_unlock_irq(&xhci->lock);
+		return;
+	}
 	xhci_halt(xhci);
 	/* Workaround for spurious wakeups at shutdown with HSW */
 	if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
@@ -900,7 +922,7 @@
 	struct usb_hcd		*hcd = xhci_to_hcd(xhci);
 	u32			command;
 
-	if (!hcd->state)
+	if (!hcd->state || xhci->suspended)
 		return 0;
 
 	if (hcd->state != HC_STATE_SUSPENDED ||
@@ -970,6 +992,7 @@
 	/* step 5: remove core well power */
 	/* synchronize irq when using MSI-X */
 	xhci_msix_sync_irqs(xhci);
+	xhci->suspended = true;
 
 	return rc;
 }
@@ -989,7 +1012,7 @@
 	int			retval = 0;
 	bool			comp_timer_running = false;
 
-	if (!hcd->state)
+	if (!hcd->state || !xhci->suspended)
 		return 0;
 
 	/* Wait a bit if either of the roothubs need to settle from the
@@ -1123,6 +1146,7 @@
 
 	/* Re-enable port polling. */
 	xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
+	xhci->suspended = false;
 	set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
 	usb_hcd_poll_rh_status(xhci->shared_hcd);
 	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
@@ -1446,7 +1470,7 @@
 exit:
 	return ret;
 dying:
-	xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
+	xhci_dbg(xhci, "Ep 0x%x: URB %pK submitted for "
 			"non-responsive xHCI host.\n",
 			urb->ep->desc.bEndpointAddress, urb);
 	ret = -ESHUTDOWN;
@@ -1582,7 +1606,7 @@
 	i = urb_priv->td_cnt;
 	if (i < urb_priv->length)
 		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
-				"Cancel URB %p, dev %s, ep 0x%x, "
+				"Cancel URB %pK, dev %s, ep 0x%x, "
 				"starting at offset 0x%llx",
 				urb, urb->dev->devpath,
 				urb->ep->desc.bEndpointAddress,
@@ -1650,7 +1674,7 @@
 	if (xhci->xhc_state & XHCI_STATE_DYING)
 		return -ENODEV;
 
-	xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+	xhci_dbg(xhci, "%s called for udev %pK\n", __func__, udev);
 	drop_flag = xhci_get_endpoint_flag(&ep->desc);
 	if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
 		xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
@@ -1678,7 +1702,7 @@
 	    xhci_get_endpoint_flag(&ep->desc)) {
 		/* Do not warn when called after a usb_device_reset */
 		if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
-			xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
+			xhci_warn(xhci, "xHCI %s called with disabled ep %pK\n",
 				  __func__, ep);
 		return 0;
 	}
@@ -1770,7 +1794,7 @@
 	 * ignore this request.
 	 */
 	if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
-		xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
+		xhci_warn(xhci, "xHCI %s called with enabled ep %pK\n",
 				__func__, ep);
 		return 0;
 	}
@@ -2751,7 +2775,7 @@
 		(xhci->xhc_state & XHCI_STATE_REMOVING))
 		return -ENODEV;
 
-	xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+	xhci_dbg(xhci, "%s called for udev %pK\n", __func__, udev);
 	virt_dev = xhci->devs[udev->slot_id];
 
 	command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
@@ -2848,7 +2872,7 @@
 		return;
 	xhci = hcd_to_xhci(hcd);
 
-	xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+	xhci_dbg(xhci, "%s called for udev %pK\n", __func__, udev);
 	virt_dev = xhci->devs[udev->slot_id];
 	/* Free any rings allocated for added endpoints */
 	for (i = 0; i < 31; ++i) {
@@ -2901,7 +2925,7 @@
 	if (addr == 0) {
 		xhci_warn(xhci, "WARN Cannot submit config ep after "
 				"reset ep command\n");
-		xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
+		xhci_warn(xhci, "WARN deq seg = %pK, deq ptr = %pK\n",
 				deq_state->new_deq_seg,
 				deq_state->new_deq_ptr);
 		return;
@@ -3925,7 +3949,7 @@
 	xhci_dbg_trace(xhci, trace_xhci_dbg_address,
 			"Op regs DCBAA ptr = %#016llx", temp_64);
 	xhci_dbg_trace(xhci, trace_xhci_dbg_address,
-		"Slot ID %d dcbaa entry @%p = %#016llx",
+		"Slot ID %d dcbaa entry @%pK = %#016llx",
 		udev->slot_id,
 		&xhci->dcbaa->dev_context_ptrs[udev->slot_id],
 		(unsigned long long)
@@ -4956,6 +4980,57 @@
 }
 EXPORT_SYMBOL_GPL(xhci_gen_setup);
 
+dma_addr_t xhci_get_sec_event_ring_dma_addr(struct usb_hcd *hcd,
+	unsigned intr_num)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	if (intr_num >= xhci->max_interrupters) {
+		xhci_err(xhci, "intr num %d >= max intrs %d\n", intr_num,
+			xhci->max_interrupters);
+		return 0;
+	}
+
+	if (!(xhci->xhc_state & XHCI_STATE_HALTED) &&
+		xhci->sec_event_ring && xhci->sec_event_ring[intr_num]
+		&& xhci->sec_event_ring[intr_num]->first_seg)
+		return xhci->sec_event_ring[intr_num]->first_seg->dma;
+
+	return 0;
+}
+
+dma_addr_t xhci_get_xfer_ring_dma_addr(struct usb_hcd *hcd,
+	struct usb_device *udev, struct usb_host_endpoint *ep)
+{
+	int ret;
+	unsigned int ep_index;
+	struct xhci_virt_device *virt_dev;
+
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
+	if (ret <= 0) {
+		xhci_err(xhci, "%s: invalid args\n", __func__);
+		return 0;
+	}
+
+	virt_dev = xhci->devs[udev->slot_id];
+	ep_index = xhci_get_endpoint_index(&ep->desc);
+
+	if (virt_dev->eps[ep_index].ring &&
+		virt_dev->eps[ep_index].ring->first_seg)
+		return virt_dev->eps[ep_index].ring->first_seg->dma;
+
+	return 0;
+}
+
+int xhci_get_core_id(struct usb_hcd *hcd)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	return xhci->core_id;
+}
+
 static const struct hc_driver xhci_hc_driver = {
 	.description =		"xhci-hcd",
 	.product_desc =		"xHCI Host Controller",
@@ -5015,6 +5090,11 @@
 	.enable_usb3_lpm_timeout =	xhci_enable_usb3_lpm_timeout,
 	.disable_usb3_lpm_timeout =	xhci_disable_usb3_lpm_timeout,
 	.find_raw_port_number =	xhci_find_raw_port_number,
+	.sec_event_ring_setup =		xhci_sec_event_ring_setup,
+	.sec_event_ring_cleanup =	xhci_sec_event_ring_cleanup,
+	.get_sec_event_ring_dma_addr =	xhci_get_sec_event_ring_dma_addr,
+	.get_xfer_ring_dma_addr =	xhci_get_xfer_ring_dma_addr,
+	.get_core_id =			xhci_get_core_id,
 };
 
 void xhci_init_driver(struct hc_driver *drv,
diff -ruw linux-4.4.115/drivers/usb/host/xhci-dbg.c linux-4.4.115-fbx/drivers/usb/host/xhci-dbg.c
--- linux-4.4.115/drivers/usb/host/xhci-dbg.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/host/xhci-dbg.c	2019-01-22 16:16:27.343281180 +0100
@@ -30,10 +30,10 @@
 {
 	u32 temp;
 
-	xhci_dbg(xhci, "// xHCI capability registers at %p:\n",
+	xhci_dbg(xhci, "// xHCI capability registers at %pK:\n",
 			xhci->cap_regs);
 	temp = readl(&xhci->cap_regs->hc_capbase);
-	xhci_dbg(xhci, "// @%p = 0x%x (CAPLENGTH AND HCIVERSION)\n",
+	xhci_dbg(xhci, "// @%pK = 0x%x (CAPLENGTH AND HCIVERSION)\n",
 			&xhci->cap_regs->hc_capbase, temp);
 	xhci_dbg(xhci, "//   CAPLENGTH: 0x%x\n",
 			(unsigned int) HC_LENGTH(temp));
@@ -42,17 +42,17 @@
 			(unsigned int) HC_VERSION(temp));
 #endif
 
-	xhci_dbg(xhci, "// xHCI operational registers at %p:\n", xhci->op_regs);
+	xhci_dbg(xhci, "// xHCI operational registers at %pK:\n", xhci->op_regs);
 
 	temp = readl(&xhci->cap_regs->run_regs_off);
-	xhci_dbg(xhci, "// @%p = 0x%x RTSOFF\n",
+	xhci_dbg(xhci, "// @%pK = 0x%x RTSOFF\n",
 			&xhci->cap_regs->run_regs_off,
 			(unsigned int) temp & RTSOFF_MASK);
-	xhci_dbg(xhci, "// xHCI runtime registers at %p:\n", xhci->run_regs);
+	xhci_dbg(xhci, "// xHCI runtime registers at %pK:\n", xhci->run_regs);
 
 	temp = readl(&xhci->cap_regs->db_off);
-	xhci_dbg(xhci, "// @%p = 0x%x DBOFF\n", &xhci->cap_regs->db_off, temp);
-	xhci_dbg(xhci, "// Doorbell array at %p:\n", xhci->dba);
+	xhci_dbg(xhci, "// @%pK = 0x%x DBOFF\n", &xhci->cap_regs->db_off, temp);
+	xhci_dbg(xhci, "// Doorbell array at %pK:\n", xhci->dba);
 }
 
 static void xhci_print_cap_regs(struct xhci_hcd *xhci)
@@ -60,7 +60,7 @@
 	u32 temp;
 	u32 hci_version;
 
-	xhci_dbg(xhci, "xHCI capability registers at %p:\n", xhci->cap_regs);
+	xhci_dbg(xhci, "xHCI capability registers at %pK:\n", xhci->cap_regs);
 
 	temp = readl(&xhci->cap_regs->hc_capbase);
 	hci_version = HC_VERSION(temp);
@@ -157,7 +157,7 @@
 
 static void xhci_print_op_regs(struct xhci_hcd *xhci)
 {
-	xhci_dbg(xhci, "xHCI operational registers at %p:\n", xhci->op_regs);
+	xhci_dbg(xhci, "xHCI operational registers at %pK:\n", xhci->op_regs);
 	xhci_print_command_reg(xhci);
 	xhci_print_status(xhci);
 }
@@ -178,7 +178,7 @@
 	addr = &xhci->op_regs->port_status_base;
 	for (i = 0; i < ports; i++) {
 		for (j = 0; j < NUM_PORT_REGS; ++j) {
-			xhci_dbg(xhci, "%p port %s reg = 0x%x\n",
+			xhci_dbg(xhci, "%pK port %s reg = 0x%x\n",
 					addr, names[j],
 					(unsigned int) readl(addr));
 			addr++;
@@ -198,35 +198,35 @@
 	if (temp == XHCI_INIT_VALUE)
 		return;
 
-	xhci_dbg(xhci, "  %p: ir_set[%i]\n", ir_set, set_num);
+	xhci_dbg(xhci, "  %pK: ir_set[%i]\n", ir_set, set_num);
 
-	xhci_dbg(xhci, "  %p: ir_set.pending = 0x%x\n", addr,
+	xhci_dbg(xhci, "  %pK: ir_set.pending = 0x%x\n", addr,
 			(unsigned int)temp);
 
 	addr = &ir_set->irq_control;
 	temp = readl(addr);
-	xhci_dbg(xhci, "  %p: ir_set.control = 0x%x\n", addr,
+	xhci_dbg(xhci, "  %pK: ir_set.control = 0x%x\n", addr,
 			(unsigned int)temp);
 
 	addr = &ir_set->erst_size;
 	temp = readl(addr);
-	xhci_dbg(xhci, "  %p: ir_set.erst_size = 0x%x\n", addr,
+	xhci_dbg(xhci, "  %pK: ir_set.erst_size = 0x%x\n", addr,
 			(unsigned int)temp);
 
 	addr = &ir_set->rsvd;
 	temp = readl(addr);
 	if (temp != XHCI_INIT_VALUE)
-		xhci_dbg(xhci, "  WARN: %p: ir_set.rsvd = 0x%x\n",
+		xhci_dbg(xhci, "  WARN: %pK: ir_set.rsvd = 0x%x\n",
 				addr, (unsigned int)temp);
 
 	addr = &ir_set->erst_base;
 	temp_64 = xhci_read_64(xhci, addr);
-	xhci_dbg(xhci, "  %p: ir_set.erst_base = @%08llx\n",
+	xhci_dbg(xhci, "  %pK: ir_set.erst_base = @%08llx\n",
 			addr, temp_64);
 
 	addr = &ir_set->erst_dequeue;
 	temp_64 = xhci_read_64(xhci, addr);
-	xhci_dbg(xhci, "  %p: ir_set.erst_dequeue = @%08llx\n",
+	xhci_dbg(xhci, "  %pK: ir_set.erst_dequeue = @%08llx\n",
 			addr, temp_64);
 }
 
@@ -235,15 +235,15 @@
 	u32 temp;
 	int i;
 
-	xhci_dbg(xhci, "xHCI runtime registers at %p:\n", xhci->run_regs);
+	xhci_dbg(xhci, "xHCI runtime registers at %pK:\n", xhci->run_regs);
 	temp = readl(&xhci->run_regs->microframe_index);
-	xhci_dbg(xhci, "  %p: Microframe index = 0x%x\n",
+	xhci_dbg(xhci, "  %pK: Microframe index = 0x%x\n",
 			&xhci->run_regs->microframe_index,
 			(unsigned int) temp);
 	for (i = 0; i < 7; ++i) {
 		temp = readl(&xhci->run_regs->rsvd[i]);
 		if (temp != XHCI_INIT_VALUE)
-			xhci_dbg(xhci, "  WARN: %p: Rsvd[%i] = 0x%x\n",
+			xhci_dbg(xhci, "  WARN: %pK: Rsvd[%i] = 0x%x\n",
 					&xhci->run_regs->rsvd[i],
 					i, (unsigned int) temp);
 	}
@@ -345,13 +345,13 @@
 
 void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring)
 {
-	xhci_dbg(xhci, "Ring deq = %p (virt), 0x%llx (dma)\n",
+	xhci_dbg(xhci, "Ring deq = %pK (virt), 0x%llx (dma)\n",
 			ring->dequeue,
 			(unsigned long long)xhci_trb_virt_to_dma(ring->deq_seg,
 							    ring->dequeue));
 	xhci_dbg(xhci, "Ring deq updated %u times\n",
 			ring->deq_updates);
-	xhci_dbg(xhci, "Ring enq = %p (virt), 0x%llx (dma)\n",
+	xhci_dbg(xhci, "Ring enq = %pK (virt), 0x%llx (dma)\n",
 			ring->enqueue,
 			(unsigned long long)xhci_trb_virt_to_dma(ring->enq_seg,
 							    ring->enqueue));
@@ -441,7 +441,7 @@
 {
 	int i;
 	for (i = 0; i < 4; ++i) {
-		xhci_dbg(xhci, "@%p (virt) @%08llx "
+		xhci_dbg(xhci, "@%pK (virt) @%08llx "
 			 "(dma) %#08llx - rsvd64[%d]\n",
 			 &ctx[4 + i], (unsigned long long)dma,
 			 ctx[4 + i], i);
@@ -480,24 +480,24 @@
 	int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
 
 	xhci_dbg(xhci, "Slot Context:\n");
-	xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n",
+	xhci_dbg(xhci, "@%pK (virt) @%08llx (dma) %#08x - dev_info\n",
 			&slot_ctx->dev_info,
 			(unsigned long long)dma, slot_ctx->dev_info);
 	dma += field_size;
-	xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n",
+	xhci_dbg(xhci, "@%pK (virt) @%08llx (dma) %#08x - dev_info2\n",
 			&slot_ctx->dev_info2,
 			(unsigned long long)dma, slot_ctx->dev_info2);
 	dma += field_size;
-	xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n",
+	xhci_dbg(xhci, "@%pK (virt) @%08llx (dma) %#08x - tt_info\n",
 			&slot_ctx->tt_info,
 			(unsigned long long)dma, slot_ctx->tt_info);
 	dma += field_size;
-	xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n",
+	xhci_dbg(xhci, "@%pK (virt) @%08llx (dma) %#08x - dev_state\n",
 			&slot_ctx->dev_state,
 			(unsigned long long)dma, slot_ctx->dev_state);
 	dma += field_size;
 	for (i = 0; i < 4; ++i) {
-		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
+		xhci_dbg(xhci, "@%pK (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
 				&slot_ctx->reserved[i], (unsigned long long)dma,
 				slot_ctx->reserved[i], i);
 		dma += field_size;
@@ -528,24 +528,24 @@
 		xhci_dbg(xhci, "%s Endpoint %02d Context (ep_index %02d):\n",
 				usb_endpoint_out(epaddr) ? "OUT" : "IN",
 				epaddr & USB_ENDPOINT_NUMBER_MASK, i);
-		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
+		xhci_dbg(xhci, "@%pK (virt) @%08llx (dma) %#08x - ep_info\n",
 				&ep_ctx->ep_info,
 				(unsigned long long)dma, ep_ctx->ep_info);
 		dma += field_size;
-		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n",
+		xhci_dbg(xhci, "@%pK (virt) @%08llx (dma) %#08x - ep_info2\n",
 				&ep_ctx->ep_info2,
 				(unsigned long long)dma, ep_ctx->ep_info2);
 		dma += field_size;
-		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08llx - deq\n",
+		xhci_dbg(xhci, "@%pK (virt) @%08llx (dma) %#08llx - deq\n",
 				&ep_ctx->deq,
 				(unsigned long long)dma, ep_ctx->deq);
 		dma += 2*field_size;
-		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
+		xhci_dbg(xhci, "@%pK (virt) @%08llx (dma) %#08x - tx_info\n",
 				&ep_ctx->tx_info,
 				(unsigned long long)dma, ep_ctx->tx_info);
 		dma += field_size;
 		for (j = 0; j < 3; ++j) {
-			xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
+			xhci_dbg(xhci, "@%pK (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
 					&ep_ctx->reserved[j],
 					(unsigned long long)dma,
 					ep_ctx->reserved[j], j);
@@ -575,16 +575,16 @@
 			return;
 		}
 
-		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
+		xhci_dbg(xhci, "@%pK (virt) @%08llx (dma) %#08x - drop flags\n",
 			 &ctrl_ctx->drop_flags, (unsigned long long)dma,
 			 ctrl_ctx->drop_flags);
 		dma += field_size;
-		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
+		xhci_dbg(xhci, "@%pK (virt) @%08llx (dma) %#08x - add flags\n",
 			 &ctrl_ctx->add_flags, (unsigned long long)dma,
 			 ctrl_ctx->add_flags);
 		dma += field_size;
 		for (i = 0; i < 6; ++i) {
-			xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd2[%d]\n",
+			xhci_dbg(xhci, "@%pK (virt) @%08llx (dma) %#08x - rsvd2[%d]\n",
 				 &ctrl_ctx->rsvd2[i], (unsigned long long)dma,
 				 ctrl_ctx->rsvd2[i], i);
 			dma += field_size;
diff -ruw linux-4.4.115/drivers/usb/host/xhci.h linux-4.4.115-fbx/drivers/usb/host/xhci.h
--- linux-4.4.115/drivers/usb/host/xhci.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/host/xhci.h	2019-10-29 09:26:25.033216781 +0100
@@ -1516,6 +1516,11 @@
 	/* Our HCD's current interrupter register set */
 	struct	xhci_intr_reg __iomem *ir_set;
 
+	/* secondary interrupter */
+	struct	xhci_intr_reg __iomem **sec_ir_set;
+
+	int		core_id;
+
 	/* Cached register copies of read-only HC data */
 	__u32		hcs_params1;
 	__u32		hcs_params2;
@@ -1557,6 +1562,11 @@
 	struct xhci_command	*current_cmd;
 	struct xhci_ring	*event_ring;
 	struct xhci_erst	erst;
+
+	/* secondary event ring and erst */
+	struct xhci_ring	**sec_event_ring;
+	struct xhci_erst	*sec_erst;
+
 	/* Scratchpad */
 	struct xhci_scratchpad  *scratchpad;
 	/* Store LPM test failed devices' information */
@@ -1659,6 +1669,7 @@
 	/* Compliance Mode Recovery Data */
 	struct timer_list	comp_mode_recovery_timer;
 	u32			port_status_u0;
+	bool			suspended;
 /* Compliance Mode Timer Triggered every 2 seconds */
 #define COMP_MODE_RCVRY_MSECS 2000
 };
@@ -1816,6 +1827,8 @@
 void xhci_urb_free_priv(struct urb_priv *urb_priv);
 void xhci_free_command(struct xhci_hcd *xhci,
 		struct xhci_command *command);
+int xhci_sec_event_ring_setup(struct usb_hcd *hcd, unsigned intr_num);
+int xhci_sec_event_ring_cleanup(struct usb_hcd *hcd, unsigned intr_num);
 
 /* xHCI host controller glue */
 typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
@@ -1935,6 +1948,7 @@
 		char *buf, u16 wLength);
 int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
 int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
+int xhci_get_core_id(struct usb_hcd *hcd);
 
 #ifdef CONFIG_PM
 int xhci_bus_suspend(struct usb_hcd *hcd);
@@ -1954,4 +1968,8 @@
 struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index);
 
+/* EHSET */
+int xhci_submit_single_step_set_feature(struct usb_hcd *hcd, struct urb *urb,
+					int is_setup);
+
 #endif /* __LINUX_XHCI_HCD_H */
diff -ruw linux-4.4.115/drivers/usb/host/xhci-hub.c linux-4.4.115-fbx/drivers/usb/host/xhci-hub.c
--- linux-4.4.115/drivers/usb/host/xhci-hub.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/host/xhci-hub.c	2019-10-29 09:26:25.025216703 +0100
@@ -20,7 +20,7 @@
  * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
-
+#include <linux/gfp.h>
 #include <linux/slab.h>
 #include <asm/unaligned.h>
 
@@ -376,10 +376,6 @@
 	int i;
 
 	ret = 0;
-	virt_dev = xhci->devs[slot_id];
-	if (!virt_dev)
-		return -ENODEV;
-
 	cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
 	if (!cmd) {
 		xhci_dbg(xhci, "Couldn't allocate command structure.\n");
@@ -387,6 +383,13 @@
 	}
 
 	spin_lock_irqsave(&xhci->lock, flags);
+	virt_dev = xhci->devs[slot_id];
+	if (!virt_dev) {
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		xhci_free_command(xhci, cmd);
+		return -ENODEV;
+	}
+
 	for (i = LAST_EP_INDEX; i > 0; i--) {
 		if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) {
 			struct xhci_command *command;
@@ -878,6 +881,151 @@
 	return status;
 }
 
+static void xhci_single_step_completion(struct urb *urb)
+{
+	struct completion *done = urb->context;
+
+	complete(done);
+}
+
+/*
+ * Allocate a URB and initialize the various fields of it.
+ * This API is used by the single_step_set_feature test of
+ * EHSET where IN packet of the GetDescriptor request is
+ * sent 15secs after the SETUP packet.
+ * Return NULL if failed.
+ */
+static struct urb *xhci_request_single_step_set_feature_urb(
+		struct usb_device *udev,
+		void *dr,
+		void *buf,
+		struct completion *done)
+{
+	struct urb *urb;
+	struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+	struct usb_host_endpoint *ep;
+
+	urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (!urb)
+		return NULL;
+
+	urb->pipe = usb_rcvctrlpipe(udev, 0);
+	ep = udev->ep_in[usb_pipeendpoint(urb->pipe)];
+	if (!ep) {
+		usb_free_urb(urb);
+		return NULL;
+	}
+
+	/*
+	 * Initialize the various URB fields as these are used by the HCD
+	 * driver to queue it and as well as when completion happens.
+	 */
+	urb->ep = ep;
+	urb->dev = udev;
+	urb->setup_packet = dr;
+	urb->transfer_buffer = buf;
+	urb->transfer_buffer_length = USB_DT_DEVICE_SIZE;
+	urb->complete = xhci_single_step_completion;
+	urb->status = -EINPROGRESS;
+	urb->actual_length = 0;
+	urb->transfer_flags = URB_DIR_IN;
+	usb_get_urb(urb);
+	atomic_inc(&urb->use_count);
+	atomic_inc(&urb->dev->urbnum);
+	usb_hcd_map_urb_for_dma(hcd, urb, GFP_KERNEL);
+	urb->context = done;
+	return urb;
+}
+
+/*
+ * This function implements the USB_PORT_FEAT_TEST handling of the
+ * SINGLE_STEP_SET_FEATURE test mode as defined in the Embedded
+ * High-Speed Electrical Test (EHSET) specification. This simply
+ * issues a GetDescriptor control transfer, with an inserted 15-second
+ * delay after the end of the SETUP stage and before the IN token of
+ * the DATA stage is set. The idea is that this gives the test operator
+ * enough time to configure the oscilloscope to perform a measurement
+ * of the response time between the DATA and ACK packets that follow.
+ */
+static int xhci_ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
+{
+	int retval;
+	struct usb_ctrlrequest *dr;
+	struct urb *urb;
+	struct usb_device *udev;
+	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+	struct usb_device_descriptor *buf;
+	unsigned long flags;
+	DECLARE_COMPLETION_ONSTACK(done);
+
+	/* Obtain udev of the rhub's child port */
+	udev = usb_hub_find_child(hcd->self.root_hub, port);
+	if (!udev) {
+		xhci_err(xhci, "No device attached to the RootHub\n");
+		return -ENODEV;
+	}
+	buf = kmalloc(USB_DT_DEVICE_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
+	if (!dr) {
+		kfree(buf);
+		return -ENOMEM;
+	}
+
+	/* Fill Setup packet for GetDescriptor */
+	dr->bRequestType = USB_DIR_IN;
+	dr->bRequest = USB_REQ_GET_DESCRIPTOR;
+	dr->wValue = cpu_to_le16(USB_DT_DEVICE << 8);
+	dr->wIndex = 0;
+	dr->wLength = cpu_to_le16(USB_DT_DEVICE_SIZE);
+	urb = xhci_request_single_step_set_feature_urb(udev, dr, buf, &done);
+	if (!urb) {
+		retval = -ENOMEM;
+		goto cleanup;
+	}
+
+	/* Now complete just the SETUP stage */
+	spin_lock_irqsave(&xhci->lock, flags);
+	retval = xhci_submit_single_step_set_feature(hcd, urb, 1);
+	spin_unlock_irqrestore(&xhci->lock, flags);
+	if (retval)
+		goto out1;
+
+	if (!wait_for_completion_timeout(&done, msecs_to_jiffies(2000))) {
+		usb_kill_urb(urb);
+		retval = -ETIMEDOUT;
+		xhci_err(xhci, "%s SETUP stage timed out on ep0\n", __func__);
+		goto out1;
+	}
+
+	/* Sleep for 15 seconds; HC will send SOFs during this period */
+	msleep(15 * 1000);
+
+	/* Complete remaining DATA and status stages. Re-use same URB */
+	urb->status = -EINPROGRESS;
+	usb_get_urb(urb);
+	atomic_inc(&urb->use_count);
+	atomic_inc(&urb->dev->urbnum);
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	retval = xhci_submit_single_step_set_feature(hcd, urb, 0);
+	spin_unlock_irqrestore(&xhci->lock, flags);
+	if (!retval && !wait_for_completion_timeout(&done,
+						msecs_to_jiffies(2000))) {
+		usb_kill_urb(urb);
+		retval = -ETIMEDOUT;
+		xhci_err(xhci, "%s IN stage timed out on ep0\n", __func__);
+	}
+out1:
+	usb_free_urb(urb);
+cleanup:
+	kfree(dr);
+	kfree(buf);
+	return retval;
+}
+
 int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
 		u16 wIndex, char *buf, u16 wLength)
 {
@@ -892,6 +1040,7 @@
 	u16 link_state = 0;
 	u16 wake_mask = 0;
 	u16 timeout = 0;
+	u16 test_mode = 0;
 
 	max_ports = xhci_get_ports(hcd, &port_array);
 	bus_state = &xhci->bus_state[hcd_index(hcd)];
@@ -965,8 +1114,8 @@
 			link_state = (wIndex & 0xff00) >> 3;
 		if (wValue == USB_PORT_FEAT_REMOTE_WAKE_MASK)
 			wake_mask = wIndex & 0xff00;
-		/* The MSB of wIndex is the U1/U2 timeout */
-		timeout = (wIndex & 0xff00) >> 8;
+		/* The MSB of wIndex is the U1/U2 timeout OR TEST mode*/
+		test_mode = timeout = (wIndex & 0xff00) >> 8;
 		wIndex &= 0xff;
 		if (!wIndex || wIndex > max_ports)
 			goto error;
@@ -1140,6 +1289,32 @@
 			temp |= PORT_U2_TIMEOUT(timeout);
 			writel(temp, port_array[wIndex] + PORTPMSC);
 			break;
+		case USB_PORT_FEAT_TEST:
+			slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+							wIndex + 1);
+			if (test_mode && test_mode <= 5) {
+				/* unlock to execute stop endpoint commands */
+				spin_unlock_irqrestore(&xhci->lock, flags);
+				xhci_stop_device(xhci, slot_id, 1);
+				spin_lock_irqsave(&xhci->lock, flags);
+				xhci_halt(xhci);
+
+				temp = readl_relaxed(port_array[wIndex] +
+								PORTPMSC);
+				temp |= test_mode << 28;
+				writel_relaxed(temp, port_array[wIndex] +
+								PORTPMSC);
+				/* to make sure above write goes through */
+				mb();
+			} else if (test_mode == 6) {
+				spin_unlock_irqrestore(&xhci->lock, flags);
+				retval = xhci_ehset_single_step_set_feature(hcd,
+									wIndex);
+				spin_lock_irqsave(&xhci->lock, flags);
+			} else {
+				goto error;
+			}
+			break;
 		default:
 			goto error;
 		}
@@ -1172,7 +1347,7 @@
 				xhci_set_link_state(xhci, port_array, wIndex,
 							XDEV_RESUME);
 				spin_unlock_irqrestore(&xhci->lock, flags);
-				msleep(USB_RESUME_TIMEOUT);
+				usleep_range(21000, 21500);
 				spin_lock_irqsave(&xhci->lock, flags);
 				xhci_set_link_state(xhci, port_array, wIndex,
 							XDEV_U0);
@@ -1453,7 +1628,7 @@
 
 	if (need_usb2_u3_exit) {
 		spin_unlock_irqrestore(&xhci->lock, flags);
-		msleep(USB_RESUME_TIMEOUT);
+		usleep_range(21000, 21500);
 		spin_lock_irqsave(&xhci->lock, flags);
 	}
 
diff -ruw linux-4.4.115/drivers/usb/host/xhci-mem.c linux-4.4.115-fbx/drivers/usb/host/xhci-mem.c
--- linux-4.4.115/drivers/usb/host/xhci-mem.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/host/xhci-mem.c	2019-10-29 09:26:25.029216742 +0100
@@ -1062,7 +1062,7 @@
 
 	/* Point to output device context in dcbaa. */
 	xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
-	xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
+	xhci_dbg(xhci, "Set slot id %d dcbaa entry %pK to 0x%llx\n",
 		 slot_id,
 		 &xhci->dcbaa->dev_context_ptrs[slot_id],
 		 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
@@ -1233,7 +1233,7 @@
 		if (udev->tt->multi)
 			slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
 	}
-	xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
+	xhci_dbg(xhci, "udev->tt = %pK\n", udev->tt);
 	xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
 
 	/* Step 4 - ring already allocated */
@@ -1525,6 +1525,8 @@
 		}
 		break;
 	case USB_SPEED_FULL:
+		if (usb_endpoint_xfer_bulk(&ep->desc) && max_packet < 8)
+			max_packet = 8;
 	case USB_SPEED_LOW:
 		break;
 	default:
@@ -1838,25 +1840,151 @@
 	kfree(command);
 }
 
-void xhci_mem_cleanup(struct xhci_hcd *xhci)
+void xhci_handle_sec_intr_events(struct xhci_hcd *xhci, int intr_num)
+{
+	union xhci_trb *erdp_trb, *current_trb;
+	struct xhci_segment	*seg;
+	u64 erdp_reg;
+	u32 iman_reg;
+	dma_addr_t deq;
+	unsigned long segment_offset;
+
+	/* disable irq, ack pending interrupt and ack all pending events */
+
+	iman_reg =
+		readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
+	iman_reg &= ~IMAN_IE;
+	writel_relaxed(iman_reg,
+			&xhci->sec_ir_set[intr_num]->irq_pending);
+	iman_reg =
+		readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
+	if (iman_reg & IMAN_IP)
+		writel_relaxed(iman_reg,
+			&xhci->sec_ir_set[intr_num]->irq_pending);
+
+	/* last acked event trb is in erdp reg  */
+	erdp_reg =
+		xhci_read_64(xhci, &xhci->sec_ir_set[intr_num]->erst_dequeue);
+	deq = (dma_addr_t)(erdp_reg & ~ERST_PTR_MASK);
+	if (!deq) {
+		pr_debug("%s: event ring handling not required\n", __func__);
+		return;
+	}
+
+	seg = xhci->sec_event_ring[intr_num]->first_seg;
+	segment_offset = deq - seg->dma;
+
+	/* find out virtual address of the last acked event trb */
+	erdp_trb = current_trb = &seg->trbs[0] +
+				(segment_offset/sizeof(*current_trb));
+
+	/* read cycle state of the last acked trb to find out CCS */
+	xhci->sec_event_ring[intr_num]->cycle_state =
+				(current_trb->event_cmd.flags & TRB_CYCLE);
+
+	 while (1) {
+		/* last trb of the event ring: toggle cycle state */
+		if (current_trb == &seg->trbs[TRBS_PER_SEGMENT - 1]) {
+			xhci->sec_event_ring[intr_num]->cycle_state ^= 1;
+			current_trb = &seg->trbs[0];
+		} else {
+			current_trb++;
+		}
+
+		/* cycle state transition */
+		if ((le32_to_cpu(current_trb->event_cmd.flags) & TRB_CYCLE) !=
+		    xhci->sec_event_ring[intr_num]->cycle_state)
+			break;
+	}
+
+	if (erdp_trb != current_trb) {
+		deq =
+		xhci_trb_virt_to_dma(xhci->sec_event_ring[intr_num]->deq_seg,
+					current_trb);
+		if (deq == 0)
+			xhci_warn(xhci,
+				"WARN ivalid SW event ring dequeue ptr.\n");
+		/* Update HC event ring dequeue pointer */
+		erdp_reg &= ERST_PTR_MASK;
+		erdp_reg |= ((u64) deq & (u64) ~ERST_PTR_MASK);
+	}
+
+	/* Clear the event handler busy flag (RW1C); event ring is empty. */
+	erdp_reg |= ERST_EHB;
+	xhci_write_64(xhci, erdp_reg,
+			&xhci->sec_ir_set[intr_num]->erst_dequeue);
+}
+
+int xhci_sec_event_ring_cleanup(struct usb_hcd *hcd, unsigned intr_num)
 {
+	int size;
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 	struct device	*dev = xhci_to_hcd(xhci)->self.controller;
+
+	if (intr_num >= xhci->max_interrupters) {
+		xhci_err(xhci, "invalid secondary interrupter num %d\n",
+			intr_num);
+		return -EINVAL;
+	}
+
+	size =
+	sizeof(struct xhci_erst_entry)*(xhci->sec_erst[intr_num].num_entries);
+	if (xhci->sec_erst[intr_num].entries) {
+		xhci_handle_sec_intr_events(xhci, intr_num);
+		dma_free_coherent(dev, size, xhci->sec_erst[intr_num].entries,
+				xhci->sec_erst[intr_num].erst_dma_addr);
+		xhci->sec_erst[intr_num].entries = NULL;
+	}
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed SEC ERST#%d",
+		intr_num);
+	if (xhci->sec_event_ring[intr_num])
+		xhci_ring_free(xhci, xhci->sec_event_ring[intr_num]);
+
+	xhci->sec_event_ring[intr_num] = NULL;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+		"Freed sec event ring");
+
+	return 0;
+}
+
+void xhci_event_ring_cleanup(struct xhci_hcd *xhci)
+{
 	int size;
-	int i, j, num_ports;
+	unsigned int i;
+	struct device	*dev = xhci_to_hcd(xhci)->self.controller;
 
-	cancel_delayed_work_sync(&xhci->cmd_timer);
+	/* sec event ring clean up */
+	for (i = 1; i < xhci->max_interrupters; i++)
+		xhci_sec_event_ring_cleanup(xhci_to_hcd(xhci), i);
+
+	kfree(xhci->sec_ir_set);
+	xhci->sec_ir_set = NULL;
+	kfree(xhci->sec_erst);
+	xhci->sec_erst = NULL;
+	kfree(xhci->sec_event_ring);
+	xhci->sec_event_ring = NULL;
 
-	/* Free the Event Ring Segment Table and the actual Event Ring */
+	/* primary event ring clean up */
 	size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
 	if (xhci->erst.entries)
 		dma_free_coherent(dev, size,
 				xhci->erst.entries, xhci->erst.erst_dma_addr);
 	xhci->erst.entries = NULL;
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed ERST");
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed primary ERST");
 	if (xhci->event_ring)
 		xhci_ring_free(xhci, xhci->event_ring);
 	xhci->event_ring = NULL;
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed priamry event ring");
+}
+
+void xhci_mem_cleanup(struct xhci_hcd *xhci)
+{
+	struct device	*dev = xhci_to_hcd(xhci)->self.controller;
+	int i, j, num_ports;
+
+	cancel_delayed_work_sync(&xhci->cmd_timer);
+
+	xhci_event_ring_cleanup(xhci);
 
 	if (xhci->lpm_command)
 		xhci_free_command(xhci, xhci->lpm_command);
@@ -1958,15 +2086,15 @@
 	if (seg != result_seg) {
 		xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
 				test_name, test_number);
-		xhci_warn(xhci, "Tested TRB math w/ seg %p and "
+		xhci_warn(xhci, "Tested TRB math w/ seg %pK and "
 				"input DMA 0x%llx\n",
 				input_seg,
 				(unsigned long long) input_dma);
-		xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
-				"ending TRB %p (0x%llx DMA)\n",
+		xhci_warn(xhci, "starting TRB %pK (0x%llx DMA), "
+				"ending TRB %pK (0x%llx DMA)\n",
 				start_trb, start_dma,
 				end_trb, end_dma);
-		xhci_warn(xhci, "Expected seg %p, got seg %p\n",
+		xhci_warn(xhci, "Expected seg %pK, got seg %pK\n",
 				result_seg, seg);
 		trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma,
 			  true);
@@ -2097,30 +2225,6 @@
 	return 0;
 }
 
-static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
-{
-	u64 temp;
-	dma_addr_t deq;
-
-	deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
-			xhci->event_ring->dequeue);
-	if (deq == 0 && !in_interrupt())
-		xhci_warn(xhci, "WARN something wrong with SW event ring "
-				"dequeue ptr.\n");
-	/* Update HC event ring dequeue pointer */
-	temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
-	temp &= ERST_PTR_MASK;
-	/* Don't clear the EHB bit (which is RW1C) because
-	 * there might be more events to service.
-	 */
-	temp &= ~ERST_EHB;
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"// Write event ring dequeue pointer, "
-			"preserving EHB bit");
-	xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
-			&xhci->ir_set->erst_dequeue);
-}
-
 static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
 		__le32 __iomem *addr, u8 major_revision, int max_caps)
 {
@@ -2136,7 +2240,7 @@
 		rhub = &xhci->usb2_rhub;
 	} else {
 		xhci_warn(xhci, "Ignoring unknown port speed, "
-				"Ext Cap %p, revision = 0x%x\n",
+				"Ext Cap %pK, revision = 0x%x\n",
 				addr, major_revision);
 		/* Ignoring port protocol we can't understand. FIXME */
 		return;
@@ -2149,7 +2253,7 @@
 	port_offset = XHCI_EXT_PORT_OFF(temp);
 	port_count = XHCI_EXT_PORT_COUNT(temp);
 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"Ext Cap %p, port offset = %u, "
+			"Ext Cap %pK, port offset = %u, "
 			"count = %u, revision = 0x%x",
 			addr, port_offset, port_count, major_revision);
 	/* Port count includes the current port offset */
@@ -2211,7 +2315,7 @@
 	for (i = port_offset; i < (port_offset + port_count); i++) {
 		/* Duplicate entry.  Ignore the port if the revisions differ. */
 		if (xhci->port_array[i] != 0) {
-			xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
+			xhci_warn(xhci, "Duplicate port entry, Ext Cap %pK,"
 					" port %u\n", addr, i);
 			xhci_warn(xhci, "Port was marked as USB %u, "
 					"duplicated as USB %u\n",
@@ -2367,7 +2471,7 @@
 				NUM_PORT_REGS*i;
 			xhci_dbg_trace(xhci, trace_xhci_dbg_init,
 					"USB 2.0 port at index %u, "
-					"addr = %p", i,
+					"addr = %pK", i,
 					xhci->usb2_ports[port_index]);
 			port_index++;
 			if (port_index == xhci->num_usb2_ports)
@@ -2388,7 +2492,7 @@
 					NUM_PORT_REGS*i;
 				xhci_dbg_trace(xhci, trace_xhci_dbg_init,
 						"USB 3.0 port at index %u, "
-						"addr = %p", i,
+						"addr = %pK", i,
 						xhci->usb3_ports[port_index]);
 				port_index++;
 				if (port_index == xhci->num_usb3_ports)
@@ -2398,13 +2502,184 @@
 	return 0;
 }
 
+int xhci_event_ring_setup(struct xhci_hcd *xhci, struct xhci_ring **er,
+	struct xhci_intr_reg __iomem *ir_set, struct xhci_erst *erst,
+	unsigned int intr_num, gfp_t flags)
+{
+	dma_addr_t dma, deq;
+	u64 val_64;
+	unsigned int val;
+	struct xhci_segment *seg;
+	struct device *dev = xhci_to_hcd(xhci)->self.controller;
+
+	*er = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1,
+				TYPE_EVENT, flags);
+		if (!*er)
+			return -ENOMEM;
+
+	erst->entries = dma_alloc_coherent(dev,
+			sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
+			flags);
+	if (!erst->entries) {
+		xhci_ring_free(xhci, *er);
+		return -ENOMEM;
+	}
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+		"intr# %d: Allocated event ring segment table at 0x%llx",
+		intr_num, (unsigned long long)dma);
+
+	memset(erst->entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
+	erst->num_entries = ERST_NUM_SEGS;
+	erst->erst_dma_addr = dma;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+		"intr# %d: num segs = %i, virt addr = %pK, dma addr = 0x%llx",
+			intr_num,
+			erst->num_entries,
+			erst->entries,
+			(unsigned long long)erst->erst_dma_addr);
+
+	/* set ring base address and size for each segment table entry */
+	for (val = 0, seg = (*er)->first_seg; val < ERST_NUM_SEGS; val++) {
+		struct xhci_erst_entry *entry = &erst->entries[val];
+
+		entry->seg_addr = cpu_to_le64(seg->dma);
+		entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
+		entry->rsvd = 0;
+		seg = seg->next;
+	}
+
+	/* set ERST count with the number of entries in the segment table */
+	val = readl_relaxed(&ir_set->erst_size);
+	val &= ERST_SIZE_MASK;
+	val |= ERST_NUM_SEGS;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+		"Write ERST size = %i to ir_set %d (some bits preserved)", val,
+		intr_num);
+	writel_relaxed(val, &ir_set->erst_size);
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"intr# %d: Set ERST entries to point to event ring.",
+			intr_num);
+	/* set the segment table base address */
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"Set ERST base address for ir_set %d = 0x%llx",
+			intr_num,
+			(unsigned long long)erst->erst_dma_addr);
+	val_64 = xhci_read_64(xhci, &ir_set->erst_base);
+	val_64 &= ERST_PTR_MASK;
+	val_64 |= (erst->erst_dma_addr & (u64) ~ERST_PTR_MASK);
+	xhci_write_64(xhci, val_64, &ir_set->erst_base);
+
+	/* Set the event ring dequeue address */
+	deq = xhci_trb_virt_to_dma((*er)->deq_seg, (*er)->dequeue);
+	if (deq == 0 && !in_interrupt())
+		xhci_warn(xhci,
+		"intr# %d:WARN something wrong with SW event ring deq ptr.\n",
+		intr_num);
+	/* Update HC event ring dequeue pointer */
+	val_64 = xhci_read_64(xhci, &ir_set->erst_dequeue);
+	val_64 &= ERST_PTR_MASK;
+	/* Don't clear the EHB bit (which is RW1C) because
+	 * there might be more events to service.
+	 */
+	val_64 &= ~ERST_EHB;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+		"intr# %d:Write event ring dequeue pointer, preserving EHB bit",
+		intr_num);
+	xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | val_64,
+			&ir_set->erst_dequeue);
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"Wrote ERST address to ir_set %d.", intr_num);
+	xhci_print_ir_set(xhci, intr_num);
+
+	return 0;
+}
+
+int xhci_sec_event_ring_setup(struct usb_hcd *hcd, unsigned intr_num)
+{
+	int ret;
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	if ((xhci->xhc_state & XHCI_STATE_HALTED) || !xhci->sec_ir_set
+		|| !xhci->sec_event_ring || !xhci->sec_erst ||
+		intr_num >= xhci->max_interrupters) {
+		xhci_err(xhci,
+		"%s:state %x ir_set %pK evt_ring %pK erst %pK intr# %d\n",
+		__func__, xhci->xhc_state, xhci->sec_ir_set,
+		xhci->sec_event_ring, xhci->sec_erst, intr_num);
+		return -EINVAL;
+	}
+
+	if (xhci->sec_event_ring && xhci->sec_event_ring[intr_num]
+		&& xhci->sec_event_ring[intr_num]->first_seg)
+		goto done;
+
+	xhci->sec_ir_set[intr_num] = &xhci->run_regs->ir_set[intr_num];
+	ret = xhci_event_ring_setup(xhci,
+				&xhci->sec_event_ring[intr_num],
+				xhci->sec_ir_set[intr_num],
+				&xhci->sec_erst[intr_num],
+				intr_num, GFP_KERNEL);
+	if (ret) {
+		xhci_err(xhci, "sec event ring setup failed inter#%d\n",
+			intr_num);
+		return ret;
+	}
+done:
+	return 0;
+}
+
+int xhci_event_ring_init(struct xhci_hcd *xhci, gfp_t flags)
+{
+	int ret = 0;
+
+	/* primary + secondary */
+	xhci->max_interrupters = HCS_MAX_INTRS(xhci->hcs_params1);
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+		"// Allocating primary event ring");
+
+	/* Set ir_set to interrupt register set 0 */
+	xhci->ir_set = &xhci->run_regs->ir_set[0];
+	ret = xhci_event_ring_setup(xhci, &xhci->event_ring, xhci->ir_set,
+		&xhci->erst, 0, flags);
+	if (ret) {
+		xhci_err(xhci, "failed to setup primary event ring\n");
+		goto fail;
+	}
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+		"// Allocating sec event ring related pointers");
+
+	xhci->sec_ir_set = kcalloc(xhci->max_interrupters,
+				sizeof(*xhci->sec_ir_set), flags);
+	if (!xhci->sec_ir_set) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	xhci->sec_event_ring = kcalloc(xhci->max_interrupters,
+				sizeof(*xhci->sec_event_ring), flags);
+	if (!xhci->sec_event_ring) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	xhci->sec_erst = kcalloc(xhci->max_interrupters,
+				sizeof(*xhci->sec_erst), flags);
+	if (!xhci->sec_erst)
+		ret = -ENOMEM;
+fail:
+	return ret;
+}
+
 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
 {
 	dma_addr_t	dma;
 	struct device	*dev = xhci_to_hcd(xhci)->self.controller;
 	unsigned int	val, val2;
 	u64		val_64;
-	struct xhci_segment	*seg;
 	u32 page_size, temp;
 	int i;
 
@@ -2457,7 +2732,7 @@
 	memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
 	xhci->dcbaa->dma = dma;
 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"// Device context base array address = 0x%llx (DMA), %p (virt)",
+			"// Device context base array address = 0x%llx (DMA), %pK (virt)",
 			(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
 	xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
 
@@ -2498,7 +2773,7 @@
 	if (!xhci->cmd_ring)
 		goto fail;
 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"Allocated command ring at %p", xhci->cmd_ring);
+			"Allocated command ring at %pK", xhci->cmd_ring);
 	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
 			(unsigned long long)xhci->cmd_ring->first_seg->dma);
 
@@ -2530,73 +2805,16 @@
 	xhci->dba = (void __iomem *) xhci->cap_regs + val;
 	xhci_dbg_regs(xhci);
 	xhci_print_run_regs(xhci);
-	/* Set ir_set to interrupt register set 0 */
-	xhci->ir_set = &xhci->run_regs->ir_set[0];
 
 	/*
 	 * Event ring setup: Allocate a normal ring, but also setup
 	 * the event ring segment table (ERST).  Section 4.9.3.
 	 */
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
-	xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
-						flags);
-	if (!xhci->event_ring)
-		goto fail;
-	if (xhci_check_trb_in_td_math(xhci) < 0)
+	if (xhci_event_ring_init(xhci, GFP_KERNEL))
 		goto fail;
 
-	xhci->erst.entries = dma_alloc_coherent(dev,
-			sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
-			flags);
-	if (!xhci->erst.entries)
+	if (xhci_check_trb_in_td_math(xhci) < 0)
 		goto fail;
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"// Allocated event ring segment table at 0x%llx",
-			(unsigned long long)dma);
-
-	memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
-	xhci->erst.num_entries = ERST_NUM_SEGS;
-	xhci->erst.erst_dma_addr = dma;
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx",
-			xhci->erst.num_entries,
-			xhci->erst.entries,
-			(unsigned long long)xhci->erst.erst_dma_addr);
-
-	/* set ring base address and size for each segment table entry */
-	for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
-		struct xhci_erst_entry *entry = &xhci->erst.entries[val];
-		entry->seg_addr = cpu_to_le64(seg->dma);
-		entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
-		entry->rsvd = 0;
-		seg = seg->next;
-	}
-
-	/* set ERST count with the number of entries in the segment table */
-	val = readl(&xhci->ir_set->erst_size);
-	val &= ERST_SIZE_MASK;
-	val |= ERST_NUM_SEGS;
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"// Write ERST size = %i to ir_set 0 (some bits preserved)",
-			val);
-	writel(val, &xhci->ir_set->erst_size);
-
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"// Set ERST entries to point to event ring.");
-	/* set the segment table base address */
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"// Set ERST base address for ir_set 0 = 0x%llx",
-			(unsigned long long)xhci->erst.erst_dma_addr);
-	val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
-	val_64 &= ERST_PTR_MASK;
-	val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
-	xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
-
-	/* Set the event ring dequeue address */
-	xhci_set_hc_event_deq(xhci);
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"Wrote ERST address to ir_set 0.");
-	xhci_print_ir_set(xhci, 0);
 
 	/*
 	 * XXX: Might need to set the Interrupter Moderation Register to
diff -ruw linux-4.4.115/drivers/usb/host/xhci-plat.c linux-4.4.115-fbx/drivers/usb/host/xhci-plat.c
--- linux-4.4.115/drivers/usb/host/xhci-plat.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/host/xhci-plat.c	2019-10-29 09:26:25.029216742 +0100
@@ -38,12 +38,19 @@
 
 static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
 {
+	struct device_node *node = dev->of_node;
+	struct usb_xhci_pdata *pdata = dev_get_platdata(dev);
+
 	/*
 	 * As of now platform drivers don't provide MSI support so we ensure
 	 * here that the generic code does not try to make a pci_dev from our
 	 * dev struct in order to setup MSI
 	 */
 	xhci->quirks |= XHCI_PLAT;
+
+	if ((node && of_property_read_bool(node, "usb3-lpm-capable")) ||
+			(pdata && pdata->usb3_lpm_capable))
+		xhci->quirks |= XHCI_LPM_SUPPORT;
 }
 
 /* called during probe() after chip reset completes */
@@ -73,9 +80,62 @@
 	return xhci_run(hcd);
 }
 
+static ssize_t config_imod_store(struct device *pdev,
+		struct device_attribute *attr, const char *buff, size_t size)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(pdev);
+	struct xhci_hcd *xhci;
+	u32 temp;
+	u32 imod;
+	unsigned long flags;
+
+	if (kstrtouint(buff, 10, &imod) != 1)
+		return 0;
+
+	imod &= ER_IRQ_INTERVAL_MASK;
+	xhci = hcd_to_xhci(hcd);
+
+	if (xhci->shared_hcd->state == HC_STATE_SUSPENDED
+		&& hcd->state == HC_STATE_SUSPENDED)
+		return -EACCES;
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	temp = readl_relaxed(&xhci->ir_set->irq_control);
+	temp &= ~ER_IRQ_INTERVAL_MASK;
+	temp |= imod;
+	writel_relaxed(temp, &xhci->ir_set->irq_control);
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	return size;
+}
+
+static ssize_t config_imod_show(struct device *pdev,
+		struct device_attribute *attr, char *buff)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(pdev);
+	struct xhci_hcd *xhci;
+	u32 temp;
+	unsigned long flags;
+
+	xhci = hcd_to_xhci(hcd);
+
+	if (xhci->shared_hcd->state == HC_STATE_SUSPENDED
+		&& hcd->state == HC_STATE_SUSPENDED)
+		return -EACCES;
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	temp = readl_relaxed(&xhci->ir_set->irq_control) &
+			ER_IRQ_INTERVAL_MASK;
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	return snprintf(buff, PAGE_SIZE, "%08u\n", temp);
+}
+
+static DEVICE_ATTR(config_imod, S_IRUGO | S_IWUSR,
+		config_imod_show, config_imod_store);
+
 static int xhci_plat_probe(struct platform_device *pdev)
 {
-	struct device_node	*node = pdev->dev.of_node;
 	struct usb_xhci_pdata	*pdata = dev_get_platdata(&pdev->dev);
 	const struct hc_driver	*driver;
 	struct xhci_hcd		*xhci;
@@ -84,6 +144,8 @@
 	struct clk              *clk;
 	int			ret;
 	int			irq;
+	u32			temp, imod;
+	unsigned long		flags;
 
 	if (usb_disabled())
 		return -ENODEV;
@@ -113,6 +175,8 @@
 	if (!hcd)
 		return -ENOMEM;
 
+	hcd_to_bus(hcd)->skip_resume = true;
+
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	hcd->regs = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(hcd->regs)) {
@@ -137,6 +201,15 @@
 		goto put_hcd;
 	}
 
+	if (pdev->dev.parent)
+		pm_runtime_resume(pdev->dev.parent);
+
+	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_get_sync(&pdev->dev);
+
 	if (of_device_is_compatible(pdev->dev.of_node,
 				    "marvell,armada-375-xhci") ||
 	    of_device_is_compatible(pdev->dev.of_node,
@@ -158,9 +231,11 @@
 		goto disable_clk;
 	}
 
-	if ((node && of_property_read_bool(node, "usb3-lpm-capable")) ||
-			(pdata && pdata->usb3_lpm_capable))
-		xhci->quirks |= XHCI_LPM_SUPPORT;
+	hcd_to_bus(xhci->shared_hcd)->skip_resume = true;
+
+	if (device_property_read_u32(pdev->dev.parent, "usb-core-id",
+								&xhci->core_id))
+		xhci->core_id = -EINVAL;
 
 	hcd->usb_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "usb-phy", 0);
 	if (IS_ERR(hcd->usb_phy)) {
@@ -178,6 +253,8 @@
 	if (ret)
 		goto disable_usb_phy;
 
+	device_wakeup_enable(&hcd->self.root_hub->dev);
+
 	if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
 		xhci->shared_hcd->can_do_streams = 1;
 
@@ -185,6 +262,28 @@
 	if (ret)
 		goto dealloc_usb2_hcd;
 
+	device_wakeup_enable(&xhci->shared_hcd->self.root_hub->dev);
+
+	/* override imod interval if specified */
+	if (pdata && pdata->imod_interval) {
+		imod = pdata->imod_interval & ER_IRQ_INTERVAL_MASK;
+		spin_lock_irqsave(&xhci->lock, flags);
+		temp = readl_relaxed(&xhci->ir_set->irq_control);
+		temp &= ~ER_IRQ_INTERVAL_MASK;
+		temp |= imod;
+		writel_relaxed(temp, &xhci->ir_set->irq_control);
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		dev_dbg(&pdev->dev, "%s: imod set to %u\n", __func__, imod);
+	}
+
+	ret = device_create_file(&pdev->dev, &dev_attr_config_imod);
+	if (ret)
+		dev_err(&pdev->dev, "%s: unable to create imod sysfs entry\n",
+					__func__);
+	
+	pm_runtime_mark_last_busy(&pdev->dev);
+	pm_runtime_put_autosuspend(&pdev->dev);
+
 	return 0;
 
 
@@ -213,8 +312,10 @@
 	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
 	struct clk *clk = xhci->clk;
 
-	xhci->xhc_state |= XHCI_STATE_REMOVING;
+	pm_runtime_disable(&dev->dev);
 
+	device_remove_file(&dev->dev, &dev_attr_config_imod);
+	xhci->xhc_state |= XHCI_STATE_REMOVING;
 	usb_remove_hcd(xhci->shared_hcd);
 	usb_phy_shutdown(hcd->usb_phy);
 
@@ -234,27 +335,79 @@
 	struct usb_hcd	*hcd = dev_get_drvdata(dev);
 	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
 
+	if (!xhci)
+		return 0;
+
+	dev_dbg(dev, "xhci-plat PM suspend\n");
+
+	return xhci_suspend(xhci, true);
+}
+
+static int xhci_plat_resume(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	if (!xhci)
+		return 0;
+
+	dev_dbg(dev, "xhci-plat PM resume\n");
+
+	return xhci_resume(xhci, false);
+}
+#endif
+
+#ifdef CONFIG_PM
+static int xhci_plat_runtime_idle(struct device *dev)
+{
 	/*
-	 * xhci_suspend() needs `do_wakeup` to know whether host is allowed
-	 * to do wakeup during suspend. Since xhci_plat_suspend is currently
-	 * only designed for system suspend, device_may_wakeup() is enough
-	 * to dertermine whether host is allowed to do wakeup. Need to
-	 * reconsider this when xhci_plat_suspend enlarges its scope, e.g.,
-	 * also applies to runtime suspend.
+	 * When pm_runtime_put_autosuspend() is called on this device,
+	 * after this idle callback returns the PM core will schedule the
+	 * autosuspend if there is any remaining time until expiry. However,
+	 * when reaching this point because the child_count becomes 0, the
+	 * core does not honor autosuspend in that case and results in
+	 * idle/suspend happening immediately. In order to have a delay
+	 * before suspend we have to call pm_runtime_autosuspend() manually.
 	 */
-	return xhci_suspend(xhci, device_may_wakeup(dev));
+	pm_runtime_mark_last_busy(dev);
+	pm_runtime_autosuspend(dev);
+	return -EBUSY;
 }
 
-static int xhci_plat_resume(struct device *dev)
+static int xhci_plat_runtime_suspend(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	if (!xhci)
+		return 0;
+
+	dev_dbg(dev, "xhci-plat runtime suspend\n");
+
+	return xhci_suspend(xhci, true);
+}
+
+static int xhci_plat_runtime_resume(struct device *dev)
 {
 	struct usb_hcd	*hcd = dev_get_drvdata(dev);
 	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+	int ret;
 
-	return xhci_resume(xhci, 0);
+	if (!xhci)
+		return 0;
+
+	dev_dbg(dev, "xhci-plat runtime resume\n");
+
+	ret = xhci_resume(xhci, false);
+	pm_runtime_mark_last_busy(dev);
+
+	return ret;
 }
 
 static const struct dev_pm_ops xhci_plat_pm_ops = {
 	SET_SYSTEM_SLEEP_PM_OPS(xhci_plat_suspend, xhci_plat_resume)
+	SET_RUNTIME_PM_OPS(xhci_plat_runtime_suspend, xhci_plat_runtime_resume,
+			   xhci_plat_runtime_idle)
 };
 #define DEV_PM_OPS	(&xhci_plat_pm_ops)
 #else
diff -ruw linux-4.4.115/drivers/usb/host/xhci-ring.c linux-4.4.115-fbx/drivers/usb/host/xhci-ring.c
--- linux-4.4.115/drivers/usb/host/xhci-ring.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/host/xhci-ring.c	2019-10-29 09:26:25.029216742 +0100
@@ -310,7 +310,7 @@
 
 		i_cmd->status = COMP_CMD_STOP;
 
-		xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
+		xhci_dbg(xhci, "Turn aborted command %pK to no-op\n",
 			 i_cmd->command_trb);
 		/* get cycle state from the original cmd trb */
 		cycle_state = le32_to_cpu(
@@ -592,7 +592,7 @@
 			"Cycle state = 0x%x", state->new_cycle_state);
 
 	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
-			"New dequeue segment = %p (virtual)",
+			"New dequeue segment = %pK (virtual)",
 			state->new_deq_seg);
 	addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
 	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
@@ -627,8 +627,8 @@
 			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 					"Cancel (unchain) link TRB");
 			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
-					"Address = %p (0x%llx dma); "
-					"in seg %p (0x%llx dma)",
+					"Address = %pK (0x%llx dma); "
+					"in seg %pK (0x%llx dma)",
 					cur_trb,
 					(unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
 					cur_seg,
@@ -764,7 +764,7 @@
 			 * short, don't muck with the stream ID after
 			 * submission.
 			 */
-			xhci_warn(xhci, "WARN Cancelled URB %p "
+			xhci_warn(xhci, "WARN Cancelled URB %pK "
 					"has invalid stream ID %u.\n",
 					cur_td->urb,
 					cur_td->urb->stream_id);
@@ -1103,7 +1103,7 @@
 				ep_ring, ep_index);
 		} else {
 			xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
-			xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
+			xhci_warn(xhci, "ep deq seg = %pK, deq ptr = %pK\n",
 				  ep->queued_deq_seg, ep->queued_deq_ptr);
 		}
 	}
@@ -2624,7 +2624,7 @@
 						 URB_SHORT_NOT_OK)) ||
 					(status != 0 &&
 					 !usb_endpoint_xfer_isoc(&urb->ep->desc)))
-				xhci_dbg(xhci, "Giveback URB %p, len = %d, "
+				xhci_dbg(xhci, "Giveback URB %pK, len = %d, "
 						"expected = %d, status = %d\n",
 						urb, urb->actual_length,
 						urb->transfer_buffer_length,
@@ -3574,6 +3574,156 @@
 	return 0;
 }
 
+/*
+ * Variant of xhci_queue_ctrl_tx() used to implement EHSET
+ * SINGLE_STEP_SET_FEATURE test mode. It differs in that the control
+ * transfer is broken up so that the SETUP stage can happen and call
+ * the URB's completion handler before the DATA/STATUS stages are
+ * executed by the xHC hardware. This assumes the control transfer is a
+ * GetDescriptor, with a DATA stage in the IN direction, and an OUT
+ * STATUS stage.
+ *
+ * This function is called twice, usually with a 15-second delay in between.
+ * - with is_setup==true, the SETUP stage for the control request
+ *   (GetDescriptor) is queued in the TRB ring and sent to HW immediately
+ * - with is_setup==false, the DATA and STATUS TRBs are queued and exceuted
+ *
+ * Caller must have locked xhci->lock
+ */
+int xhci_submit_single_step_set_feature(struct usb_hcd *hcd, struct urb *urb,
+					int is_setup)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	struct xhci_ring *ep_ring;
+	int num_trbs;
+	int ret;
+	unsigned int slot_id, ep_index;
+	struct usb_ctrlrequest *setup;
+	struct xhci_generic_trb *start_trb;
+	int start_cycle;
+	u32 field, length_field, remainder;
+	struct urb_priv *urb_priv;
+	struct xhci_td *td;
+
+	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
+	if (!ep_ring)
+		return -EINVAL;
+
+	/* Need buffer for data stage */
+	if (urb->transfer_buffer_length <= 0)
+		return -EINVAL;
+
+	/*
+	 * Need to copy setup packet into setup TRB, so we can't use the setup
+	 * DMA address.
+	 */
+	if (!urb->setup_packet)
+		return -EINVAL;
+	setup = (struct usb_ctrlrequest *) urb->setup_packet;
+
+	slot_id = urb->dev->slot_id;
+	ep_index = xhci_get_endpoint_index(&urb->ep->desc);
+
+	urb_priv = kzalloc(sizeof(struct urb_priv) +
+				  sizeof(struct xhci_td *), GFP_ATOMIC);
+	if (!urb_priv)
+		return -ENOMEM;
+
+	td = urb_priv->td[0] = kzalloc(sizeof(struct xhci_td), GFP_ATOMIC);
+	if (!td) {
+		kfree(urb_priv);
+		return -ENOMEM;
+	}
+
+	urb_priv->length = 1;
+	urb_priv->td_cnt = 0;
+	urb->hcpriv = urb_priv;
+
+	num_trbs = is_setup ? 1 : 2;
+
+	ret = prepare_transfer(xhci, xhci->devs[slot_id],
+			ep_index, urb->stream_id,
+			num_trbs, urb, 0, GFP_ATOMIC);
+	if (ret < 0) {
+		kfree(td);
+		kfree(urb_priv);
+		return ret;
+	}
+
+	/*
+	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
+	 * until we've finished creating all the other TRBs.  The ring's cycle
+	 * state may change as we enqueue the other TRBs, so save it too.
+	 */
+	start_trb = &ep_ring->enqueue->generic;
+	start_cycle = ep_ring->cycle_state;
+
+	if (is_setup) {
+		/* Queue only the setup TRB */
+		field = TRB_IDT | TRB_IOC | TRB_TYPE(TRB_SETUP);
+		if (start_cycle == 0)
+			field |= 0x1;
+
+		/* xHCI 1.0 6.4.1.2.1: Transfer Type field */
+		if (xhci->hci_version == 0x100) {
+			if (setup->bRequestType & USB_DIR_IN)
+				field |= TRB_TX_TYPE(TRB_DATA_IN);
+			else
+				field |= TRB_TX_TYPE(TRB_DATA_OUT);
+		}
+
+		/* Save the DMA address of the last TRB in the TD */
+		td->last_trb = ep_ring->enqueue;
+
+		queue_trb(xhci, ep_ring, false,
+			  setup->bRequestType | setup->bRequest << 8 |
+				le16_to_cpu(setup->wValue) << 16,
+			  le16_to_cpu(setup->wIndex) |
+				le16_to_cpu(setup->wLength) << 16,
+			  TRB_LEN(8) | TRB_INTR_TARGET(0),
+			  field);
+	} else {
+		/* Queue data TRB */
+		field = TRB_ISP | TRB_TYPE(TRB_DATA);
+		if (start_cycle == 0)
+			field |= 0x1;
+		if (setup->bRequestType & USB_DIR_IN)
+			field |= TRB_DIR_IN;
+
+		remainder = xhci_td_remainder(xhci, 0,
+					   urb->transfer_buffer_length,
+					   urb->transfer_buffer_length,
+					   urb, 1);
+
+		length_field = TRB_LEN(urb->transfer_buffer_length) |
+			TRB_TD_SIZE(remainder) |
+			TRB_INTR_TARGET(0);
+
+		queue_trb(xhci, ep_ring, true,
+			  lower_32_bits(urb->transfer_dma),
+			  upper_32_bits(urb->transfer_dma),
+			  length_field,
+			  field);
+
+		/* Save the DMA address of the last TRB in the TD */
+		td->last_trb = ep_ring->enqueue;
+
+		/* Queue status TRB */
+		field = TRB_IOC | TRB_TYPE(TRB_STATUS);
+		if (!(setup->bRequestType & USB_DIR_IN))
+			field |= TRB_DIR_IN;
+
+		queue_trb(xhci, ep_ring, false,
+			  0,
+			  0,
+			  TRB_INTR_TARGET(0),
+			  field | ep_ring->cycle_state);
+	}
+
+	giveback_first_trb(xhci, slot_id, ep_index, 0, start_cycle, start_trb);
+	return 0;
+}
+
 static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
 		struct urb *urb, int i)
 {
@@ -4178,7 +4328,7 @@
 	int ret;
 
 	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
-		"Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), new deq ptr = %p (0x%llx dma), new cycle = %u",
+		"Set TR Deq Ptr cmd, new deq seg = %pK (0x%llx dma), new deq ptr = %pK (0x%llx dma), new cycle = %u",
 		deq_state->new_deq_seg,
 		(unsigned long long)deq_state->new_deq_seg->dma,
 		deq_state->new_deq_ptr,
@@ -4190,7 +4340,7 @@
 				    deq_state->new_deq_ptr);
 	if (addr == 0) {
 		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
-		xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
+		xhci_warn(xhci, "WARN deq seg = %pK, deq pt = %pK\n",
 			  deq_state->new_deq_seg, deq_state->new_deq_ptr);
 		return;
 	}
diff -ruw linux-4.4.115/drivers/usb/host/xhci-trace.h linux-4.4.115-fbx/drivers/usb/host/xhci-trace.h
--- linux-4.4.115/drivers/usb/host/xhci-trace.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/host/xhci-trace.h	2019-01-22 16:16:27.347281216 +0100
@@ -103,7 +103,7 @@
 			((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 32) *
 			((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1));
 	),
-	TP_printk("\nctx_64=%d, ctx_type=%u, ctx_dma=@%llx, ctx_va=@%p",
+	TP_printk("\nctx_64=%d, ctx_type=%u, ctx_dma=@%llx, ctx_va=@%pK",
 			__entry->ctx_64, __entry->ctx_type,
 			(unsigned long long) __entry->ctx_dma, __entry->ctx_va
 	)
@@ -134,7 +134,7 @@
 		memcpy(__get_dynamic_array(trb), trb_va,
 			sizeof(struct xhci_generic_trb));
 	),
-	TP_printk("\ntrb_dma=@%llx, trb_va=@%p, status=%08x, flags=%08x",
+	TP_printk("\ntrb_dma=@%llx, trb_va=@%pK, status=%08x, flags=%08x",
 			(unsigned long long) __entry->dma, __entry->va,
 			__entry->status, __entry->flags
 	)
diff -ruw linux-4.4.115/drivers/usb/Kconfig linux-4.4.115-fbx/drivers/usb/Kconfig
--- linux-4.4.115/drivers/usb/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/Kconfig	2019-01-22 16:16:27.227280130 +0100
@@ -106,6 +106,8 @@
 
 source "drivers/usb/isp1760/Kconfig"
 
+source "drivers/usb/pd/Kconfig"
+
 comment "USB port drivers"
 
 if USB
diff -ruw linux-4.4.115/drivers/usb/Makefile linux-4.4.115-fbx/drivers/usb/Makefile
--- linux-4.4.115/drivers/usb/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/Makefile	2019-01-22 16:16:27.227280130 +0100
@@ -61,3 +61,5 @@
 obj-$(CONFIG_USB_COMMON)	+= common/
 
 obj-$(CONFIG_USBIP_CORE)	+= usbip/
+
+obj-$(CONFIG_USB_PD)		+= pd/
diff -ruw linux-4.4.115/drivers/usb/misc/Kconfig linux-4.4.115-fbx/drivers/usb/misc/Kconfig
--- linux-4.4.115/drivers/usb/misc/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/misc/Kconfig	2019-10-29 09:26:25.037216820 +0100
@@ -268,3 +268,13 @@
 
 	  To compile this driver as a module, choose M here: the
 	  module will be called chaoskey.
+
+config USB_QTI_KS_BRIDGE
+	tristate "USB QTI kick start bridge"
+	depends on USB
+	help
+	  Say Y here if you have a QTI modem device connected via USB that
+	  will be bridged in kernel space. This driver works as a bridge to pass
+	  boot images, ram-dumps and efs sync.
+	  To compile this driver as a module, choose M here: the module
+	  will be called ks_bridge. If unsure, choose N.
diff -ruw linux-4.4.115/drivers/usb/misc/Makefile linux-4.4.115-fbx/drivers/usb/misc/Makefile
--- linux-4.4.115/drivers/usb/misc/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/misc/Makefile	2019-10-29 09:26:25.037216820 +0100
@@ -29,3 +29,5 @@
 
 obj-$(CONFIG_USB_SISUSBVGA)		+= sisusbvga/
 obj-$(CONFIG_USB_LINK_LAYER_TEST)	+= lvstest.o
+
+obj-$(CONFIG_USB_QTI_KS_BRIDGE)		+= ks_bridge.o
diff -ruw linux-4.4.115/drivers/usb/phy/Kconfig linux-4.4.115-fbx/drivers/usb/phy/Kconfig
--- linux-4.4.115/drivers/usb/phy/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/phy/Kconfig	2019-10-29 09:26:25.045216898 +0100
@@ -6,6 +6,14 @@
 config USB_PHY
 	def_bool n
 
+config USB_OTG_WAKELOCK
+	bool "Hold a wakelock when USB connected"
+	depends on WAKELOCK
+	select USB_OTG_UTILS
+	help
+	  Select this to automatically hold a wakelock when USB is
+	  connected, preventing suspend.
+
 #
 # USB Transceiver Drivers
 #
@@ -165,6 +173,46 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called phy-qcom-8x16-usb.
 
+config USB_MSM_HSPHY
+	tristate "MSM HSUSB PHY Driver"
+	depends on ARCH_QCOM
+	select USB_PHY
+	help
+	  Enable this to support the High-speed USB transceiver on MSM chips.
+	  This driver supports the PHY which uses the QSCRATCH-based register
+	  set for its control sequences, normally paired with newer DWC3-based
+	  SuperSpeed controllers.
+
+config USB_MSM_SSPHY
+	tristate "MSM SSUSB PHY Driver"
+	depends on ARCH_QCOM
+	select USB_PHY
+	help
+	  Enable this to support the SuperSpeed USB transceiver on MSM chips.
+	  This driver supports the PHY which uses the QSCRATCH-based register
+	  set for its control sequences, normally paired with newer DWC3-based
+	  SuperSpeed controllers.
+
+config USB_MSM_SSPHY_QMP
+	tristate "MSM SSUSB QMP PHY Driver"
+	depends on ARCH_QCOM
+	select USB_PHY
+	help
+	  Enable this to support the SuperSpeed USB transceiver on MSM chips.
+	  This driver supports the PHY which uses the QSCRATCH-based register
+	  set for its control sequences, normally paired with newer DWC3-based
+	  SuperSpeed controllers.
+
+config MSM_QUSB_PHY
+	tristate "MSM QUSB2 PHY Driver"
+	depends on ARCH_QCOM
+	select USB_PHY
+	help
+	  Enable this to support the QUSB2 PHY on MSM chips. This driver supports
+	  the high-speed PHY which is usually paired with either the ChipIdea or
+	  Synopsys DWC3 USB IPs on MSM SOCs. This driver expects to configure the
+	  PHY with a dedicated register I/O memory region.
+
 config USB_MV_OTG
 	tristate "Marvell USB OTG support"
 	depends on USB_EHCI_MV && USB_MV_UDC && PM && USB_OTG
@@ -213,4 +261,13 @@
 	  Provides read/write operations to the ULPI phy register set for
 	  controllers with a viewport register (e.g. Chipidea/ARC controllers).
 
+config DUAL_ROLE_USB_INTF
+	bool "Generic DUAL ROLE sysfs interface"
+	depends on SYSFS && USB_PHY
+	help
+	  A generic sysfs interface to track and change the state of
+	  dual role usb phys. The usb phy drivers can register to
+	  this interface to expose it capabilities to the userspace
+	  and thereby allowing userspace to change the port mode.
+
 endmenu
diff -ruw linux-4.4.115/drivers/usb/phy/Makefile linux-4.4.115-fbx/drivers/usb/phy/Makefile
--- linux-4.4.115/drivers/usb/phy/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/phy/Makefile	2019-01-22 16:16:27.371281434 +0100
@@ -3,6 +3,8 @@
 #
 obj-$(CONFIG_USB_PHY)			+= phy.o
 obj-$(CONFIG_OF)			+= of.o
+obj-$(CONFIG_USB_OTG_WAKELOCK)		+= otg-wakelock.o
+obj-$(CONFIG_DUAL_ROLE_USB_INTF)	+= class-dual-role.o
 
 # transceiver drivers, keep the list sorted
 
@@ -21,6 +23,10 @@
 obj-$(CONFIG_USB_ISP1301)		+= phy-isp1301.o
 obj-$(CONFIG_USB_MSM_OTG)		+= phy-msm-usb.o
 obj-$(CONFIG_USB_QCOM_8X16_PHY)	+= phy-qcom-8x16-usb.o
+obj-$(CONFIG_USB_MSM_HSPHY)     	+= phy-msm-hsusb.o
+obj-$(CONFIG_USB_MSM_SSPHY)     	+= phy-msm-ssusb.o
+obj-$(CONFIG_USB_MSM_SSPHY_QMP)     	+= phy-msm-ssusb-qmp.o
+obj-$(CONFIG_MSM_QUSB_PHY)              += phy-msm-qusb.o phy-msm-qusb-v2.o
 obj-$(CONFIG_USB_MV_OTG)		+= phy-mv-usb.o
 obj-$(CONFIG_USB_MXS_PHY)		+= phy-mxs-usb.o
 obj-$(CONFIG_USB_RCAR_PHY)		+= phy-rcar-usb.o
diff -ruw linux-4.4.115/drivers/video/fbdev/core/fbcmap.c linux-4.4.115-fbx/drivers/video/fbdev/core/fbcmap.c
--- linux-4.4.115/drivers/video/fbdev/core/fbcmap.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/video/fbdev/core/fbcmap.c	2019-10-29 09:26:25.085217290 +0100
@@ -203,6 +203,8 @@
 		return -EINVAL;
 	size *= sizeof(u16);
 
+
+
 	if (copy_to_user(to->red+tooff, from->red+fromoff, size))
 		return -EFAULT;
 	if (copy_to_user(to->green+tooff, from->green+fromoff, size))
diff -ruw linux-4.4.115/drivers/video/fbdev/core/fbmem.c linux-4.4.115-fbx/drivers/video/fbdev/core/fbmem.c
--- linux-4.4.115/drivers/video/fbdev/core/fbmem.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/video/fbdev/core/fbmem.c	2019-10-29 09:26:25.085217290 +0100
@@ -1085,7 +1085,7 @@
 EXPORT_SYMBOL(fb_blank);
 
 static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
-			unsigned long arg)
+			unsigned long arg, struct file *file)
 {
 	struct fb_ops *fb;
 	struct fb_var_screeninfo var;
@@ -1097,6 +1097,13 @@
 	void __user *argp = (void __user *)arg;
 	long ret = 0;
 
+	memset(&var, 0, sizeof(var));
+	memset(&fix, 0, sizeof(fix));
+	memset(&con2fb, 0, sizeof(con2fb));
+	memset(&cmap_from, 0, sizeof(cmap_from));
+	memset(&cmap, 0, sizeof(cmap));
+	memset(&event, 0, sizeof(event));
+
 	switch (cmd) {
 	case FBIOGET_VSCREENINFO:
 		if (!lock_fb_info(info))
@@ -1212,14 +1219,13 @@
 		console_unlock();
 		break;
 	default:
-		if (!lock_fb_info(info))
-			return -ENODEV;
 		fb = info->fbops;
-		if (fb->fb_ioctl)
+		if (fb->fb_ioctl_v2)
+			ret = fb->fb_ioctl_v2(info, cmd, arg, file);
+		else if (fb->fb_ioctl)
 			ret = fb->fb_ioctl(info, cmd, arg);
 		else
 			ret = -ENOTTY;
-		unlock_fb_info(info);
 	}
 	return ret;
 }
@@ -1230,7 +1236,7 @@
 
 	if (!info)
 		return -ENODEV;
-	return do_fb_ioctl(info, cmd, arg);
+	return do_fb_ioctl(info, cmd, arg, file);
 }
 
 #ifdef CONFIG_COMPAT
@@ -1261,7 +1267,7 @@
 };
 
 static int fb_getput_cmap(struct fb_info *info, unsigned int cmd,
-			  unsigned long arg)
+			  unsigned long arg, struct file *file)
 {
 	struct fb_cmap_user __user *cmap;
 	struct fb_cmap32 __user *cmap32;
@@ -1284,7 +1290,7 @@
 	    put_user(compat_ptr(data), &cmap->transp))
 		return -EFAULT;
 
-	err = do_fb_ioctl(info, cmd, (unsigned long) cmap);
+	err = do_fb_ioctl(info, cmd, (unsigned long) cmap, file);
 
 	if (!err) {
 		if (copy_in_user(&cmap32->start,
@@ -1329,7 +1335,7 @@
 }
 
 static int fb_get_fscreeninfo(struct fb_info *info, unsigned int cmd,
-			      unsigned long arg)
+			      unsigned long arg, struct file *file)
 {
 	mm_segment_t old_fs;
 	struct fb_fix_screeninfo fix;
@@ -1340,7 +1346,7 @@
 
 	old_fs = get_fs();
 	set_fs(KERNEL_DS);
-	err = do_fb_ioctl(info, cmd, (unsigned long) &fix);
+	err = do_fb_ioctl(info, cmd, (unsigned long) &fix, file);
 	set_fs(old_fs);
 
 	if (!err)
@@ -1367,20 +1373,22 @@
 	case FBIOPUT_CON2FBMAP:
 		arg = (unsigned long) compat_ptr(arg);
 	case FBIOBLANK:
-		ret = do_fb_ioctl(info, cmd, arg);
+		ret = do_fb_ioctl(info, cmd, arg, file);
 		break;
 
 	case FBIOGET_FSCREENINFO:
-		ret = fb_get_fscreeninfo(info, cmd, arg);
+		ret = fb_get_fscreeninfo(info, cmd, arg, file);
 		break;
 
 	case FBIOGETCMAP:
 	case FBIOPUTCMAP:
-		ret = fb_getput_cmap(info, cmd, arg);
+		ret = fb_getput_cmap(info, cmd, arg, file);
 		break;
 
 	default:
-		if (fb->fb_compat_ioctl)
+		if (fb->fb_compat_ioctl_v2)
+			ret = fb->fb_compat_ioctl_v2(info, cmd, arg, file);
+		else if (fb->fb_compat_ioctl)
 			ret = fb->fb_compat_ioctl(info, cmd, arg);
 		break;
 	}
@@ -1460,6 +1468,7 @@
 		goto out;
 	}
 	file->private_data = info;
+	info->file = file;
 	if (info->fbops->fb_open) {
 		res = info->fbops->fb_open(info,1);
 		if (res)
@@ -1484,6 +1493,7 @@
 	struct fb_info * const info = file->private_data;
 
 	mutex_lock(&info->lock);
+	info->file = file;
 	if (info->fbops->fb_release)
 		info->fbops->fb_release(info,1);
 	module_put(info->fbops->owner);
diff -ruw linux-4.4.115/drivers/video/fbdev/Kconfig linux-4.4.115-fbx/drivers/video/fbdev/Kconfig
--- linux-4.4.115/drivers/video/fbdev/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/video/fbdev/Kconfig	2019-10-29 09:26:25.081217251 +0100
@@ -2328,6 +2328,19 @@
 	  Select this option if display contents should be inherited as set by
 	  the bootloader.
 
+config FB_MSM
+	tristate "MSM Framebuffer support"
+	depends on FB && ARCH_QCOM
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
+	select SYNC
+	select SW_SYNC
+	---help---
+	The MSM driver implements a frame buffer interface to provide access to
+	the display hardware and provide a way for users to display graphics
+	on connected display panels.
+
 config FB_MX3
 	tristate "MX3 Framebuffer support"
 	depends on FB && MX3_IPU
@@ -2449,6 +2462,7 @@
 source "drivers/video/fbdev/omap2/Kconfig"
 source "drivers/video/fbdev/exynos/Kconfig"
 source "drivers/video/fbdev/mmp/Kconfig"
+source "drivers/video/fbdev/msm/Kconfig"
 
 config FB_SH_MOBILE_MERAM
 	tristate "SuperH Mobile MERAM read ahead support"
diff -ruw linux-4.4.115/drivers/video/fbdev/Makefile linux-4.4.115-fbx/drivers/video/fbdev/Makefile
--- linux-4.4.115/drivers/video/fbdev/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/video/fbdev/Makefile	2019-01-22 16:16:27.443282086 +0100
@@ -126,6 +126,11 @@
 obj-$(CONFIG_XEN_FBDEV_FRONTEND)  += xen-fbfront.o
 obj-$(CONFIG_FB_CARMINE)          += carminefb.o
 obj-$(CONFIG_FB_MB862XX)	  += mb862xx/
+ifeq ($(CONFIG_FB_MSM),y)
+obj-y                             += msm/
+else
+obj-$(CONFIG_MSM_DBA)             += msm/msm_dba/
+endif
 obj-$(CONFIG_FB_NUC900)           += nuc900fb.o
 obj-$(CONFIG_FB_JZ4740)		  += jz4740_fb.o
 obj-$(CONFIG_FB_PUV3_UNIGFX)      += fb-puv3.o
diff -ruw linux-4.4.115/drivers/video/hdmi.c linux-4.4.115-fbx/drivers/video/hdmi.c
--- linux-4.4.115/drivers/video/hdmi.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/video/hdmi.c	2019-10-29 09:26:25.129217720 +0100
@@ -533,6 +533,10 @@
 		return "4:3";
 	case HDMI_PICTURE_ASPECT_16_9:
 		return "16:9";
+	case HDMI_PICTURE_ASPECT_64_27:
+		return "64:27";
+	case HDMI_PICTURE_ASPECT_256_135:
+		return "256:135";
 	case HDMI_PICTURE_ASPECT_RESERVED:
 		return "Reserved";
 	}
diff -ruw linux-4.4.115/drivers/video/Kconfig linux-4.4.115-fbx/drivers/video/Kconfig
--- linux-4.4.115/drivers/video/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/video/Kconfig	2019-01-22 16:16:27.431281977 +0100
@@ -21,14 +21,17 @@
 
 source "drivers/gpu/host1x/Kconfig"
 source "drivers/gpu/ipu-v3/Kconfig"
+source "drivers/gpu/msm/Kconfig"
 
 source "drivers/gpu/drm/Kconfig"
 
+source "drivers/video/msm/ba/Kconfig"
 menu "Frame buffer Devices"
 source "drivers/video/fbdev/Kconfig"
 endmenu
 
 source "drivers/video/backlight/Kconfig"
+source "drivers/video/adf/Kconfig"
 
 config VGASTATE
        tristate
diff -ruw linux-4.4.115/drivers/video/Makefile linux-4.4.115-fbx/drivers/video/Makefile
--- linux-4.4.115/drivers/video/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/drivers/video/Makefile	2019-01-22 16:16:27.431281977 +0100
@@ -1,6 +1,8 @@
+obj-$(CONFIG_MSM_BA_V4L2)         += msm/ba/
 obj-$(CONFIG_VGASTATE)            += vgastate.o
 obj-$(CONFIG_HDMI)                += hdmi.o
 
+obj-$(CONFIG_ADF)		  += adf/
 obj-$(CONFIG_VT)		  += console/
 obj-$(CONFIG_LOGO)		  += logo/
 obj-y				  += backlight/
diff -ruw linux-4.4.115/fs/attr.c linux-4.4.115-fbx/fs/attr.c
--- linux-4.4.115/fs/attr.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/attr.c	2019-01-22 16:16:27.695284368 +0100
@@ -187,7 +187,7 @@
  * the file open for write, as there can be no conflicting delegation in
  * that case.
  */
-int notify_change(struct dentry * dentry, struct iattr * attr, struct inode **delegated_inode)
+int notify_change2(struct vfsmount *mnt, struct dentry * dentry, struct iattr * attr, struct inode **delegated_inode)
 {
 	struct inode *inode = dentry->d_inode;
 	umode_t mode = inode->i_mode;
@@ -211,7 +211,7 @@
 			return -EPERM;
 
 		if (!inode_owner_or_capable(inode)) {
-			error = inode_permission(inode, MAY_WRITE);
+			error = inode_permission2(mnt, inode, MAY_WRITE);
 			if (error)
 				return error;
 		}
@@ -277,7 +277,9 @@
 	if (error)
 		return error;
 
-	if (inode->i_op->setattr)
+	if (mnt && inode->i_op->setattr2)
+		error = inode->i_op->setattr2(mnt, dentry, attr);
+	else if (inode->i_op->setattr)
 		error = inode->i_op->setattr(dentry, attr);
 	else
 		error = simple_setattr(dentry, attr);
@@ -290,4 +292,10 @@
 
 	return error;
 }
+EXPORT_SYMBOL(notify_change2);
+
+int notify_change(struct dentry * dentry, struct iattr * attr, struct inode **delegated_inode)
+{
+	return notify_change2(NULL, dentry, attr, delegated_inode);
+}
 EXPORT_SYMBOL(notify_change);
diff -ruw linux-4.4.115/fs/binfmt_elf.c linux-4.4.115-fbx/fs/binfmt_elf.c
--- linux-4.4.115/fs/binfmt_elf.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/binfmt_elf.c	2019-10-29 09:26:25.149217916 +0100
@@ -651,7 +651,7 @@
 
 	if ((current->flags & PF_RANDOMIZE) &&
 		!(current->personality & ADDR_NO_RANDOMIZE)) {
-		random_variable = (unsigned long) get_random_int();
+		random_variable = get_random_long();
 		random_variable &= STACK_RND_MASK;
 		random_variable <<= PAGE_SHIFT;
 	}
diff -ruw linux-4.4.115/fs/block_dev.c linux-4.4.115-fbx/fs/block_dev.c
--- linux-4.4.115/fs/block_dev.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/block_dev.c	2019-01-22 16:16:27.703284440 +0100
@@ -532,6 +532,7 @@
 #ifdef CONFIG_SYSFS
 	INIT_LIST_HEAD(&bdev->bd_holder_disks);
 #endif
+	bdev->bd_bdi = &noop_backing_dev_info;
 	inode_init_once(&ei->vfs_inode);
 	/* Initialize mutex for freeze. */
 	mutex_init(&bdev->bd_fsfreeze_mutex);
@@ -557,6 +558,12 @@
 	}
 	list_del_init(&bdev->bd_list);
 	spin_unlock(&bdev_lock);
+	/* Detach inode from wb early as bdi_put() may free bdi->wb */
+	inode_detach_wb(inode);
+	if (bdev->bd_bdi != &noop_backing_dev_info) {
+		bdi_put(bdev->bd_bdi);
+		bdev->bd_bdi = &noop_backing_dev_info;
+	}
 }
 
 static const struct super_operations bdev_sops = {
@@ -623,6 +630,21 @@
 
 static LIST_HEAD(all_bdevs);
 
+/*
+ * If there is a bdev inode for this device, unhash it so that it gets evicted
+ * as soon as last inode reference is dropped.
+ */
+void bdev_unhash_inode(dev_t dev)
+{
+	struct inode *inode;
+
+	inode = ilookup5(blockdev_superblock, hash(dev), bdev_test, &dev);
+	if (inode) {
+		remove_inode_hash(inode);
+		iput(inode);
+	}
+}
+
 struct block_device *bdget(dev_t dev)
 {
 	struct block_device *bdev;
@@ -1201,6 +1223,7 @@
 		bdev->bd_disk = disk;
 		bdev->bd_queue = disk->queue;
 		bdev->bd_contains = bdev;
+
 		bdev->bd_inode->i_flags = disk->fops->direct_access ? S_DAX : 0;
 		if (!partno) {
 			ret = -ENXIO;
@@ -1271,6 +1294,9 @@
 			    (bdev->bd_part->nr_sects % (PAGE_SIZE / 512)))
 				bdev->bd_inode->i_flags &= ~S_DAX;
 		}
+
+		if (bdev->bd_bdi == &noop_backing_dev_info)
+			bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info);
 	} else {
 		if (bdev->bd_contains == bdev) {
 			ret = 0;
@@ -1523,12 +1549,6 @@
 		kill_bdev(bdev);
 
 		bdev_write_inode(bdev);
-		/*
-		 * Detaching bdev inode from its wb in __destroy_inode()
-		 * is too late: the queue which embeds its bdi (along with
-		 * root wb) can be gone as soon as we put_disk() below.
-		 */
-		inode_detach_wb(bdev->bd_inode);
 	}
 	if (bdev->bd_contains == bdev) {
 		if (disk->fops->release)
diff -ruw linux-4.4.115/fs/buffer.c linux-4.4.115-fbx/fs/buffer.c
--- linux-4.4.115/fs/buffer.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/buffer.c	2019-10-29 09:26:25.193218347 +0100
@@ -621,6 +621,18 @@
 }
 EXPORT_SYMBOL(mark_buffer_dirty_inode);
 
+#ifdef CONFIG_BLK_DEV_IO_TRACE
+static inline void save_dirty_task(struct page *page)
+{
+	/* Save the task that is dirtying this page */
+	page->tsk_dirty = current;
+}
+#else
+static inline void save_dirty_task(struct page *page)
+{
+}
+#endif
+
 /*
  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
  * dirty.
@@ -641,6 +653,7 @@
 		account_page_dirtied(page, mapping, memcg);
 		radix_tree_tag_set(&mapping->page_tree,
 				page_index(page), PAGECACHE_TAG_DIRTY);
+		save_dirty_task(page);
 	}
 	spin_unlock_irqrestore(&mapping->tree_lock, flags);
 }
@@ -1466,12 +1479,48 @@
 	return 0;
 }
 
+static void __evict_bh_lru(void *arg)
+{
+	struct bh_lru *b = &get_cpu_var(bh_lrus);
+	struct buffer_head *bh = arg;
+	int i;
+
+	for (i = 0; i < BH_LRU_SIZE; i++) {
+		if (b->bhs[i] == bh) {
+			brelse(b->bhs[i]);
+			b->bhs[i] = NULL;
+			goto out;
+		}
+	}
+out:
+	put_cpu_var(bh_lrus);
+}
+
+static bool bh_exists_in_lru(int cpu, void *arg)
+{
+	struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
+	struct buffer_head *bh = arg;
+	int i;
+
+	for (i = 0; i < BH_LRU_SIZE; i++) {
+		if (b->bhs[i] == bh)
+			return 1;
+	}
+
+	return 0;
+
+}
 void invalidate_bh_lrus(void)
 {
 	on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL);
 }
 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
 
+static void evict_bh_lrus(struct buffer_head *bh)
+{
+	on_each_cpu_cond(bh_exists_in_lru, __evict_bh_lru, bh, 1, GFP_ATOMIC);
+}
+
 void set_bh_page(struct buffer_head *bh,
 		struct page *page, unsigned long offset)
 {
@@ -3192,8 +3241,15 @@
 	do {
 		if (buffer_write_io_error(bh) && page->mapping)
 			set_bit(AS_EIO, &page->mapping->flags);
+		if (buffer_busy(bh)) {
+			/*
+			 * Check if the busy failure was due to an
+			 * outstanding LRU reference
+			 */
+			evict_bh_lrus(bh);
 		if (buffer_busy(bh))
 			goto failed;
+		}
 		bh = bh->b_this_page;
 	} while (bh != head);
 
diff -ruw linux-4.4.115/fs/compat_ioctl.c linux-4.4.115-fbx/fs/compat_ioctl.c
--- linux-4.4.115/fs/compat_ioctl.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/compat_ioctl.c	2019-10-29 09:26:25.225218660 +0100
@@ -871,6 +871,9 @@
 COMPATIBLE_IOCTL(TIOCSPTLCK)
 COMPATIBLE_IOCTL(TIOCSERGETLSR)
 COMPATIBLE_IOCTL(TIOCSIG)
+COMPATIBLE_IOCTL(TIOCPMGET)
+COMPATIBLE_IOCTL(TIOCPMPUT)
+COMPATIBLE_IOCTL(TIOCPMACT)
 #ifdef TIOCSRS485
 COMPATIBLE_IOCTL(TIOCSRS485)
 #endif
diff -ruw linux-4.4.115/fs/coredump.c linux-4.4.115-fbx/fs/coredump.c
--- linux-4.4.115/fs/coredump.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/coredump.c	2019-01-22 16:16:27.779285128 +0100
@@ -720,7 +720,7 @@
 			goto close_fail;
 		if (!(cprm.file->f_mode & FMODE_CAN_WRITE))
 			goto close_fail;
-		if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
+		if (do_truncate2(cprm.file->f_path.mnt, cprm.file->f_path.dentry, 0, 0, cprm.file))
 			goto close_fail;
 	}
 
diff -ruw linux-4.4.115/fs/dcache.c linux-4.4.115-fbx/fs/dcache.c
--- linux-4.4.115/fs/dcache.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/dcache.c	2019-10-29 09:26:25.233218738 +0100
@@ -1392,7 +1392,7 @@
 		goto out;
 
 	if (dentry->d_flags & DCACHE_SHRINK_LIST) {
-		data->found++;
+		goto out;
 	} else {
 		if (dentry->d_flags & DCACHE_LRU_LIST)
 			d_lru_del(dentry);
@@ -3054,6 +3054,7 @@
 		return ERR_PTR(error);
 	return res;
 }
+EXPORT_SYMBOL(d_absolute_path);
 
 /*
  * same as __d_path but appends "(deleted)" for unlinked files.
diff -ruw linux-4.4.115/fs/direct-io.c linux-4.4.115-fbx/fs/direct-io.c
--- linux-4.4.115/fs/direct-io.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/direct-io.c	2019-01-22 16:16:27.787285201 +0100
@@ -399,6 +399,7 @@
 	if (dio->is_async && dio->rw == READ && dio->should_dirty)
 		bio_set_pages_dirty(bio);
 
+	bio->bi_dio_inode = dio->inode;
 	dio->bio_bdev = bio->bi_bdev;
 
 	if (sdio->submit_io) {
@@ -413,6 +414,19 @@
 	sdio->logical_offset_in_bio = 0;
 }
 
+struct inode *dio_bio_get_inode(struct bio *bio)
+{
+	struct inode *inode = NULL;
+
+	if (bio == NULL)
+		return NULL;
+
+	inode = bio->bi_dio_inode;
+
+	return inode;
+}
+EXPORT_SYMBOL(dio_bio_get_inode);
+
 /*
  * Release any resources in case of a failure
  */
diff -ruw linux-4.4.115/fs/drop_caches.c linux-4.4.115-fbx/fs/drop_caches.c
--- linux-4.4.115/fs/drop_caches.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/drop_caches.c	2019-10-29 09:26:25.233218738 +0100
@@ -13,7 +13,7 @@
 /* A global variable is a bit ugly, but it keeps the code simple */
 int sysctl_drop_caches;
 
-static void drop_pagecache_sb(struct super_block *sb, void *unused)
+void drop_pagecache_sb(struct super_block *sb, void *unused)
 {
 	struct inode *inode, *toput_inode = NULL;
 
diff -ruw linux-4.4.115/fs/ecryptfs/Makefile linux-4.4.115-fbx/fs/ecryptfs/Makefile
--- linux-4.4.115/fs/ecryptfs/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/ecryptfs/Makefile	2019-01-22 16:16:27.791285237 +0100
@@ -4,7 +4,7 @@
 
 obj-$(CONFIG_ECRYPT_FS) += ecryptfs.o
 
-ecryptfs-y := dentry.o file.o inode.o main.o super.o mmap.o read_write.o \
+ecryptfs-y := dentry.o file.o inode.o main.o super.o mmap.o read_write.o events.o \
 	      crypto.o keystore.o kthread.o debug.o
 
 ecryptfs-$(CONFIG_ECRYPT_FS_MESSAGING) += messaging.o miscdev.o
diff -ruw linux-4.4.115/fs/eventpoll.c linux-4.4.115-fbx/fs/eventpoll.c
--- linux-4.4.115/fs/eventpoll.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/eventpoll.c	2019-10-29 09:26:25.237218777 +0100
@@ -34,6 +34,7 @@
 #include <linux/mutex.h>
 #include <linux/anon_inodes.h>
 #include <linux/device.h>
+#include <linux/freezer.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
 #include <asm/mman.h>
@@ -1598,7 +1599,7 @@
 {
 	int res = 0, eavail, timed_out = 0;
 	unsigned long flags;
-	long slack = 0;
+	u64 slack = 0;
 	wait_queue_t wait;
 	ktime_t expires, *to = NULL;
 
@@ -1645,7 +1646,8 @@
 			}
 
 			spin_unlock_irqrestore(&ep->lock, flags);
-			if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
+			if (!freezable_schedule_hrtimeout_range(to, slack,
+								HRTIMER_MODE_ABS))
 				timed_out = 1;
 
 			spin_lock_irqsave(&ep->lock, flags);
diff -ruw linux-4.4.115/fs/exec.c linux-4.4.115-fbx/fs/exec.c
--- linux-4.4.115/fs/exec.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/exec.c	2019-10-29 09:26:25.237218777 +0100
@@ -1153,8 +1153,10 @@
 void would_dump(struct linux_binprm *bprm, struct file *file)
 {
 	struct inode *inode = file_inode(file);
-	if (inode_permission(inode, MAY_READ) < 0) {
+
+	if (inode_permission2(file->f_path.mnt, inode, MAY_READ) < 0) {
 		struct user_namespace *old, *user_ns;
+
 		bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
 
 		/* Ensure mm->user_ns contains the executable */
@@ -1537,6 +1539,23 @@
 		return PTR_ERR(filename);
 
 	/*
+	 * handle current->exec_mode:
+	 * - if unlimited, then nothing to do.
+	 * - if once, then set it to denied and continue (next execve
+	 *   after this one will fail).
+	 * - if denied, then effectively fail the execve call with EPERM.
+	 */
+	switch (current->exec_mode) {
+	case EXEC_MODE_UNLIMITED:
+		break;
+	case EXEC_MODE_ONCE:
+		current->exec_mode = EXEC_MODE_DENIED;
+		break;
+	case EXEC_MODE_DENIED:
+		return -EPERM;
+	}
+
+	/*
 	 * We move the actual failure in case of RLIMIT_NPROC excess from
 	 * set*uid() to execve() because too many poorly written programs
 	 * don't check setuid() return code.  Here we additionally recheck
diff -ruw linux-4.4.115/fs/ext4/dir.c linux-4.4.115-fbx/fs/ext4/dir.c
--- linux-4.4.115/fs/ext4/dir.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/ext4/dir.c	2019-10-29 09:26:25.253218934 +0100
@@ -163,8 +163,11 @@
 					index, 1);
 			file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
 			bh = ext4_bread(NULL, inode, map.m_lblk, 0);
-			if (IS_ERR(bh))
-				return PTR_ERR(bh);
+			if (IS_ERR(bh)) {
+				err = PTR_ERR(bh);
+				bh = NULL;
+				goto errout;
+			}
 		}
 
 		if (!bh) {
diff -ruw linux-4.4.115/fs/ext4/ext4_crypto.h linux-4.4.115-fbx/fs/ext4/ext4_crypto.h
--- linux-4.4.115/fs/ext4/ext4_crypto.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/ext4/ext4_crypto.h	2019-01-22 16:16:27.819285490 +0100
@@ -12,6 +12,7 @@
 #define _EXT4_CRYPTO_H
 
 #include <linux/fs.h>
+#include <linux/pfk.h>
 
 #define EXT4_KEY_DESCRIPTOR_SIZE 8
 
@@ -58,9 +59,12 @@
 #define EXT4_XTS_TWEAK_SIZE 16
 #define EXT4_AES_128_ECB_KEY_SIZE 16
 #define EXT4_AES_256_GCM_KEY_SIZE 32
+#define EXT4_AES_256_ECB_KEY_SIZE 32
 #define EXT4_AES_256_CBC_KEY_SIZE 32
 #define EXT4_AES_256_CTS_KEY_SIZE 32
+#define EXT4_AES_256_HEH_KEY_SIZE 32
 #define EXT4_AES_256_XTS_KEY_SIZE 64
+#define EXT4_PRIVATE_KEY_SIZE 64
 #define EXT4_MAX_KEY_SIZE 64
 
 #define EXT4_KEY_DESC_PREFIX "ext4:"
@@ -78,9 +82,13 @@
 	char		ci_filename_mode;
 	char		ci_flags;
 	struct crypto_ablkcipher *ci_ctfm;
+	struct key	*ci_keyring_key;
 	char		ci_master_key[EXT4_KEY_DESCRIPTOR_SIZE];
+	char		ci_raw_key[EXT4_MAX_KEY_SIZE];
 };
 
+
+
 #define EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL             0x00000001
 #define EXT4_WRITE_PATH_FL			      0x00000002
 
@@ -113,6 +121,7 @@
 {
 	switch (mode) {
 	case EXT4_ENCRYPTION_MODE_AES_256_XTS:
+	case EXT4_ENCRYPTION_MODE_PRIVATE:
 		return EXT4_AES_256_XTS_KEY_SIZE;
 	case EXT4_ENCRYPTION_MODE_AES_256_GCM:
 		return EXT4_AES_256_GCM_KEY_SIZE;
@@ -120,6 +129,8 @@
 		return EXT4_AES_256_CBC_KEY_SIZE;
 	case EXT4_ENCRYPTION_MODE_AES_256_CTS:
 		return EXT4_AES_256_CTS_KEY_SIZE;
+	case EXT4_ENCRYPTION_MODE_AES_256_HEH:
+		return EXT4_AES_256_HEH_KEY_SIZE;
 	default:
 		BUG();
 	}
diff -ruw linux-4.4.115/fs/ext4/ext4.h linux-4.4.115-fbx/fs/ext4/ext4.h
--- linux-4.4.115/fs/ext4/ext4.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/ext4/ext4.h	2019-10-29 09:26:25.253218934 +0100
@@ -589,6 +589,8 @@
 #define EXT4_ENCRYPTION_MODE_AES_256_GCM	2
 #define EXT4_ENCRYPTION_MODE_AES_256_CBC	3
 #define EXT4_ENCRYPTION_MODE_AES_256_CTS	4
+#define EXT4_ENCRYPTION_MODE_PRIVATE		127
+#define EXT4_ENCRYPTION_MODE_AES_256_HEH	126
 
 #include "ext4_crypto.h"
 
@@ -1441,7 +1443,7 @@
 	struct list_head s_es_list;	/* List of inodes with reclaimable extents */
 	long s_es_nr_inode;
 	struct ext4_es_stats s_es_stats;
-	struct mb_cache *s_mb_cache;
+	struct mb2_cache *s_mb_cache;
 	spinlock_t s_es_lock ____cacheline_aligned_in_smp;
 
 	/* Ratelimit ext4 messages. */
@@ -2269,7 +2271,8 @@
 			  struct page *plaintext_page,
 			  gfp_t gfp_flags);
 int ext4_decrypt(struct page *page);
-int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex);
+int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
+			   ext4_fsblk_t pblk, ext4_lblk_t len);
 extern const struct dentry_operations ext4_encrypted_d_ops;
 
 #ifdef CONFIG_EXT4_FS_ENCRYPTION
@@ -2333,17 +2336,37 @@
 /* crypto_key.c */
 void ext4_free_crypt_info(struct ext4_crypt_info *ci);
 void ext4_free_encryption_info(struct inode *inode, struct ext4_crypt_info *ci);
+int _ext4_get_encryption_info(struct inode *inode);
 
 #ifdef CONFIG_EXT4_FS_ENCRYPTION
 int ext4_has_encryption_key(struct inode *inode);
 
-int ext4_get_encryption_info(struct inode *inode);
+static inline int ext4_get_encryption_info(struct inode *inode)
+{
+	struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
+
+	if (!ci ||
+	    (ci->ci_keyring_key &&
+	     (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
+					   (1 << KEY_FLAG_REVOKED) |
+					   (1 << KEY_FLAG_DEAD)))))
+		return _ext4_get_encryption_info(inode);
+	return 0;
+}
 
 static inline struct ext4_crypt_info *ext4_encryption_info(struct inode *inode)
 {
 	return EXT4_I(inode)->i_crypt_info;
 }
 
+static inline int ext4_using_hardware_encryption(struct inode *inode)
+{
+	struct ext4_crypt_info *ci = ext4_encryption_info(inode);
+
+	return S_ISREG(inode->i_mode) && ci &&
+		ci->ci_data_mode == EXT4_ENCRYPTION_MODE_PRIVATE;
+}
+
 #else
 static inline int ext4_has_encryption_key(struct inode *inode)
 {
@@ -2357,6 +2380,10 @@
 {
 	return NULL;
 }
+static inline int ext4_using_hardware_encryption(struct inode *inode)
+{
+	return 0;
+}
 #endif
 
 
@@ -2456,7 +2483,8 @@
 		ext4_group_t i, struct ext4_group_desc *desc);
 extern int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
 				ext4_fsblk_t block, unsigned long count);
-extern int ext4_trim_fs(struct super_block *, struct fstrim_range *);
+extern int ext4_trim_fs(struct super_block *, struct fstrim_range *,
+				unsigned long blkdev_flags);
 
 /* inode.c */
 int ext4_inode_is_fast_symlink(struct inode *inode);
@@ -2512,6 +2540,8 @@
 extern qsize_t *ext4_get_reserved_space(struct inode *inode);
 extern void ext4_da_update_reserve_space(struct inode *inode,
 					int used, int quota_claim);
+extern int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk,
+			      ext4_fsblk_t pblk, ext4_lblk_t len);
 
 /* indirect.c */
 extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
@@ -3014,8 +3044,7 @@
 					 struct page *page);
 extern int ext4_try_add_inline_entry(handle_t *handle,
 				     struct ext4_filename *fname,
-				     struct dentry *dentry,
-				     struct inode *inode);
+				     struct inode *dir, struct inode *inode);
 extern int ext4_try_create_inline_dir(handle_t *handle,
 				      struct inode *parent,
 				      struct inode *inode);
diff -ruw linux-4.4.115/fs/ext4/extents.c linux-4.4.115-fbx/fs/ext4/extents.c
--- linux-4.4.115/fs/ext4/extents.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/ext4/extents.c	2019-10-29 09:26:25.257218973 +0100
@@ -3127,19 +3127,11 @@
 {
 	ext4_fsblk_t ee_pblock;
 	unsigned int ee_len;
-	int ret;
 
 	ee_len    = ext4_ext_get_actual_len(ex);
 	ee_pblock = ext4_ext_pblock(ex);
-
-	if (ext4_encrypted_inode(inode))
-		return ext4_encrypted_zeroout(inode, ex);
-
-	ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
-	if (ret > 0)
-		ret = 0;
-
-	return ret;
+	return ext4_issue_zeroout(inode, le32_to_cpu(ex->ee_block), ee_pblock,
+				  ee_len);
 }
 
 /*
diff -ruw linux-4.4.115/fs/ext4/inline.c linux-4.4.115-fbx/fs/ext4/inline.c
--- linux-4.4.115/fs/ext4/inline.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/ext4/inline.c	2019-10-29 09:26:25.257218973 +0100
@@ -18,6 +18,7 @@
 #include "ext4.h"
 #include "xattr.h"
 #include "truncate.h"
+#include <trace/events/android_fs.h>
 
 #define EXT4_XATTR_SYSTEM_DATA	"data"
 #define EXT4_MIN_INLINE_DATA_SIZE	((sizeof(__le32) * EXT4_N_BLOCKS))
@@ -502,6 +503,17 @@
 		return -EAGAIN;
 	}
 
+	if (trace_android_fs_dataread_start_enabled()) {
+		char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+		path = android_fstrace_get_pathname(pathbuf,
+						    MAX_TRACE_PATHBUF_LEN,
+						    inode);
+		trace_android_fs_dataread_start(inode, page_offset(page),
+						PAGE_SIZE, current->pid,
+						path, current->comm);
+	}
+
 	/*
 	 * Current inline data can only exist in the 1st page,
 	 * So for all the other pages, just set them uptodate.
@@ -513,6 +525,8 @@
 		SetPageUptodate(page);
 	}
 
+	trace_android_fs_dataread_end(inode, page_offset(page), PAGE_SIZE);
+
 	up_read(&EXT4_I(inode)->xattr_sem);
 
 	unlock_page(page);
@@ -1004,12 +1018,11 @@
  */
 static int ext4_add_dirent_to_inline(handle_t *handle,
 				     struct ext4_filename *fname,
-				     struct dentry *dentry,
+				     struct inode *dir,
 				     struct inode *inode,
 				     struct ext4_iloc *iloc,
 				     void *inline_start, int inline_size)
 {
-	struct inode	*dir = d_inode(dentry->d_parent);
 	int		err;
 	struct ext4_dir_entry_2 *de;
 
@@ -1253,12 +1266,11 @@
  * the new created block.
  */
 int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname,
-			      struct dentry *dentry, struct inode *inode)
+			      struct inode *dir, struct inode *inode)
 {
 	int ret, inline_size;
 	void *inline_start;
 	struct ext4_iloc iloc;
-	struct inode *dir = d_inode(dentry->d_parent);
 
 	ret = ext4_get_inode_loc(dir, &iloc);
 	if (ret)
@@ -1272,7 +1284,7 @@
 						 EXT4_INLINE_DOTDOT_SIZE;
 	inline_size = EXT4_MIN_INLINE_DATA_SIZE - EXT4_INLINE_DOTDOT_SIZE;
 
-	ret = ext4_add_dirent_to_inline(handle, fname, dentry, inode, &iloc,
+	ret = ext4_add_dirent_to_inline(handle, fname, dir, inode, &iloc,
 					inline_start, inline_size);
 	if (ret != -ENOSPC)
 		goto out;
@@ -1293,7 +1305,7 @@
 	if (inline_size) {
 		inline_start = ext4_get_inline_xattr_pos(dir, &iloc);
 
-		ret = ext4_add_dirent_to_inline(handle, fname, dentry,
+		ret = ext4_add_dirent_to_inline(handle, fname, dir,
 						inode, &iloc, inline_start,
 						inline_size);
 
diff -ruw linux-4.4.115/fs/ext4/inode.c linux-4.4.115-fbx/fs/ext4/inode.c
--- linux-4.4.115/fs/ext4/inode.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/ext4/inode.c	2019-10-29 09:26:25.261219012 +0100
@@ -42,8 +42,10 @@
 #include "xattr.h"
 #include "acl.h"
 #include "truncate.h"
+#include "ext4_ice.h"
 
 #include <trace/events/ext4.h>
+#include <trace/events/android_fs.h>
 
 #define MPAGE_DA_EXTENT_TAIL 0x01
 
@@ -388,6 +390,21 @@
 	return 0;
 }
 
+int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
+		       ext4_lblk_t len)
+{
+	int ret;
+
+	if (ext4_encrypted_inode(inode))
+		return ext4_encrypted_zeroout(inode, lblk, pblk, len);
+
+	ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
+	if (ret > 0)
+		ret = 0;
+
+	return ret;
+}
+
 #define check_block_validity(inode, map)	\
 	__check_block_validity((inode), __func__, __LINE__, (map))
 
@@ -998,7 +1015,8 @@
 			ll_rw_block(READ, 1, &bh);
 			*wait_bh++ = bh;
 			decrypt = ext4_encrypted_inode(inode) &&
-				S_ISREG(inode->i_mode);
+				S_ISREG(inode->i_mode) &&
+				!ext4_is_ice_enabled();
 		}
 	}
 	/*
@@ -1029,6 +1047,16 @@
 	pgoff_t index;
 	unsigned from, to;
 
+	if (trace_android_fs_datawrite_start_enabled()) {
+		char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+		path = android_fstrace_get_pathname(pathbuf,
+						    MAX_TRACE_PATHBUF_LEN,
+						    inode);
+		trace_android_fs_datawrite_start(inode, pos, len,
+						 current->pid, path,
+						 current->comm);
+	}
 	trace_ext4_write_begin(inode, pos, len, flags);
 	/*
 	 * Reserve one block more for addition to orphan list in case
@@ -1165,6 +1193,7 @@
 	int ret = 0, ret2;
 	int i_size_changed = 0;
 
+	trace_android_fs_datawrite_end(inode, pos, len);
 	trace_ext4_write_end(inode, pos, len, copied);
 	if (ext4_has_inline_data(inode)) {
 		ret = ext4_write_inline_data_end(inode, pos, len,
@@ -1269,6 +1298,7 @@
 	unsigned from, to;
 	int size_changed = 0;
 
+	trace_android_fs_datawrite_end(inode, pos, len);
 	trace_ext4_journalled_write_end(inode, pos, len, copied);
 	from = pos & (PAGE_CACHE_SIZE - 1);
 	to = from + len;
@@ -2758,6 +2788,16 @@
 					len, flags, pagep, fsdata);
 	}
 	*fsdata = (void *)0;
+	if (trace_android_fs_datawrite_start_enabled()) {
+		char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+		path = android_fstrace_get_pathname(pathbuf,
+						    MAX_TRACE_PATHBUF_LEN,
+						    inode);
+		trace_android_fs_datawrite_start(inode, pos, len,
+						 current->pid,
+						 path, current->comm);
+	}
 	trace_ext4_da_write_begin(inode, pos, len, flags);
 
 	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
@@ -2876,6 +2916,7 @@
 		return ext4_write_end(file, mapping, pos,
 				      len, copied, page, fsdata);
 
+	trace_android_fs_datawrite_end(inode, pos, len);
 	trace_ext4_da_write_end(inode, pos, len, copied);
 	start = pos & (PAGE_CACHE_SIZE - 1);
 	end = start + copied - 1;
@@ -3282,7 +3323,9 @@
 		get_block_func = ext4_get_block_write;
 		dio_flags = DIO_LOCKING;
 	}
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#if defined(CONFIG_EXT4_FS_ENCRYPTION) && \
+!defined(CONFIG_EXT4_FS_ICE_ENCRYPTION)
+
 	BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
 #endif
 	if (IS_DAX(inode))
@@ -3349,7 +3392,9 @@
 	size_t count = iov_iter_count(iter);
 	ssize_t ret;
 
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#if defined(CONFIG_EXT4_FS_ENCRYPTION) && \
+!defined(CONFIG_EXT4_FS_ICE_ENCRYPTION)
+
 	if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode))
 		return 0;
 #endif
@@ -3364,12 +3409,42 @@
 	if (ext4_has_inline_data(inode))
 		return 0;
 
+	if (trace_android_fs_dataread_start_enabled() &&
+	    (iov_iter_rw(iter) == READ)) {
+		char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+		path = android_fstrace_get_pathname(pathbuf,
+						    MAX_TRACE_PATHBUF_LEN,
+						    inode);
+		trace_android_fs_dataread_start(inode, offset, count,
+						current->pid, path,
+						current->comm);
+	}
+	if (trace_android_fs_datawrite_start_enabled() &&
+	    (iov_iter_rw(iter) == WRITE)) {
+		char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+		path = android_fstrace_get_pathname(pathbuf,
+						    MAX_TRACE_PATHBUF_LEN,
+						    inode);
+		trace_android_fs_datawrite_start(inode, offset, count,
+						 current->pid, path,
+						 current->comm);
+	}
 	trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
 		ret = ext4_ext_direct_IO(iocb, iter, offset);
 	else
 		ret = ext4_ind_direct_IO(iocb, iter, offset);
 	trace_ext4_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret);
+
+	if (trace_android_fs_dataread_start_enabled() &&
+	    (iov_iter_rw(iter) == READ))
+		trace_android_fs_dataread_end(inode, offset, count);
+	if (trace_android_fs_datawrite_start_enabled() &&
+	    (iov_iter_rw(iter) == WRITE))
+		trace_android_fs_datawrite_end(inode, offset, count);
+
 	return ret;
 }
 
@@ -3519,7 +3594,8 @@
 		if (!buffer_uptodate(bh))
 			goto unlock;
 		if (S_ISREG(inode->i_mode) &&
-		    ext4_encrypted_inode(inode)) {
+		    ext4_encrypted_inode(inode) &&
+		    !ext4_using_hardware_encryption(inode)) {
 			/* We expect the key to be set. */
 			BUG_ON(!ext4_has_encryption_key(inode));
 			BUG_ON(blocksize != PAGE_CACHE_SIZE);
@@ -3692,6 +3768,7 @@
 
 int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
 {
+#if 0
 	struct super_block *sb = inode->i_sb;
 	ext4_lblk_t first_block, stop_block;
 	struct address_space *mapping = inode->i_mapping;
@@ -3822,6 +3899,12 @@
 out_mutex:
 	mutex_unlock(&inode->i_mutex);
 	return ret;
+#else
+	/*
+	 * Disabled as per b/28760453
+	 */
+	return -EOPNOTSUPP;
+#endif
 }
 
 int ext4_inode_attach_jinode(struct inode *inode)
@@ -4140,8 +4223,11 @@
 		new_fl |= S_DIRSYNC;
 	if (test_opt(inode->i_sb, DAX))
 		new_fl |= S_DAX;
+	if (flags & EXT4_ENCRYPT_FL)
+		new_fl |= S_ENCRYPTED;
 	inode_set_flags(inode, new_fl,
-			S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX);
+			S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX|
+			S_ENCRYPTED);
 }
 
 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
diff -ruw linux-4.4.115/fs/ext4/ioctl.c linux-4.4.115-fbx/fs/ext4/ioctl.c
--- linux-4.4.115/fs/ext4/ioctl.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/ext4/ioctl.c	2019-10-29 09:26:25.261219012 +0100
@@ -587,11 +587,13 @@
 		return err;
 	}
 
+	case FIDTRIM:
 	case FITRIM:
 	{
 		struct request_queue *q = bdev_get_queue(sb->s_bdev);
 		struct fstrim_range range;
 		int ret = 0;
+		int flags  = cmd == FIDTRIM ? BLKDEV_DISCARD_SECURE : 0;
 
 		if (!capable(CAP_SYS_ADMIN))
 			return -EPERM;
@@ -599,13 +601,15 @@
 		if (!blk_queue_discard(q))
 			return -EOPNOTSUPP;
 
+		if ((flags & BLKDEV_DISCARD_SECURE) && !blk_queue_secdiscard(q))
+			return -EOPNOTSUPP;
 		if (copy_from_user(&range, (struct fstrim_range __user *)arg,
 		    sizeof(range)))
 			return -EFAULT;
 
 		range.minlen = max((unsigned int)range.minlen,
 				   q->limits.discard_granularity);
-		ret = ext4_trim_fs(sb, &range);
+		ret = ext4_trim_fs(sb, &range, flags);
 		if (ret < 0)
 			return ret;
 
@@ -622,9 +626,6 @@
 		struct ext4_encryption_policy policy;
 		int err = 0;
 
-		if (!ext4_has_feature_encrypt(sb))
-			return -EOPNOTSUPP;
-
 		if (copy_from_user(&policy,
 				   (struct ext4_encryption_policy __user *)arg,
 				   sizeof(policy))) {
diff -ruw linux-4.4.115/fs/ext4/Kconfig linux-4.4.115-fbx/fs/ext4/Kconfig
--- linux-4.4.115/fs/ext4/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/ext4/Kconfig	2019-01-22 16:16:27.815285454 +0100
@@ -106,6 +106,7 @@
 	select CRYPTO_ECB
 	select CRYPTO_XTS
 	select CRYPTO_CTS
+	select CRYPTO_HEH
 	select CRYPTO_CTR
 	select CRYPTO_SHA256
 	select KEYS
@@ -117,10 +118,16 @@
 	  decrypted pages in the page cache.
 
 config EXT4_FS_ENCRYPTION
-	bool
-	default y
+	bool "Ext4 FS Encryption"
+	default n
 	depends on EXT4_ENCRYPTION
 
+config EXT4_FS_ICE_ENCRYPTION
+	bool "Ext4 Encryption with ICE support"
+	default n
+	depends on EXT4_FS_ENCRYPTION
+	depends on PFK
+
 config EXT4_DEBUG
 	bool "EXT4 debugging support"
 	depends on EXT4_FS
diff -ruw linux-4.4.115/fs/ext4/Makefile linux-4.4.115-fbx/fs/ext4/Makefile
--- linux-4.4.115/fs/ext4/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/ext4/Makefile	2019-01-22 16:16:27.815285454 +0100
@@ -14,3 +14,5 @@
 ext4-$(CONFIG_EXT4_FS_SECURITY)		+= xattr_security.o
 ext4-$(CONFIG_EXT4_FS_ENCRYPTION)	+= crypto_policy.o crypto.o \
 		crypto_key.o crypto_fname.o
+
+ext4-$(CONFIG_EXT4_FS_ICE_ENCRYPTION)	+= ext4_ice.o
diff -ruw linux-4.4.115/fs/ext4/mballoc.c linux-4.4.115-fbx/fs/ext4/mballoc.c
--- linux-4.4.115/fs/ext4/mballoc.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/ext4/mballoc.c	2019-10-29 09:26:25.261219012 +0100
@@ -2772,7 +2772,8 @@
 }
 
 static inline int ext4_issue_discard(struct super_block *sb,
-		ext4_group_t block_group, ext4_grpblk_t cluster, int count)
+		ext4_group_t block_group, ext4_grpblk_t cluster, int count,
+		unsigned long flags)
 {
 	ext4_fsblk_t discard_block;
 
@@ -2781,7 +2782,7 @@
 	count = EXT4_C2B(EXT4_SB(sb), count);
 	trace_ext4_discard_blocks(sb,
 			(unsigned long long) discard_block, count);
-	return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
+	return sb_issue_discard(sb, discard_block, count, GFP_NOFS, flags);
 }
 
 /*
@@ -2803,7 +2804,7 @@
 	if (test_opt(sb, DISCARD)) {
 		err = ext4_issue_discard(sb, entry->efd_group,
 					 entry->efd_start_cluster,
-					 entry->efd_count);
+					 entry->efd_count, 0);
 		if (err && err != -EOPNOTSUPP)
 			ext4_msg(sb, KERN_WARNING, "discard request in"
 				 " group:%d block:%d count:%d failed"
@@ -4855,7 +4856,8 @@
 		 * them with group lock_held
 		 */
 		if (test_opt(sb, DISCARD)) {
-			err = ext4_issue_discard(sb, block_group, bit, count);
+			err = ext4_issue_discard(sb, block_group, bit, count,
+						 0);
 			if (err && err != -EOPNOTSUPP)
 				ext4_msg(sb, KERN_WARNING, "discard request in"
 					 " group:%d block:%d count:%lu failed"
@@ -5051,13 +5053,15 @@
  * @count:	number of blocks to TRIM
  * @group:	alloc. group we are working with
  * @e4b:	ext4 buddy for the group
+ * @blkdev_flags: flags for the block device
  *
  * Trim "count" blocks starting at "start" in the "group". To assure that no
  * one will allocate those blocks, mark it as used in buddy bitmap. This must
  * be called with under the group lock.
  */
 static int ext4_trim_extent(struct super_block *sb, int start, int count,
-			     ext4_group_t group, struct ext4_buddy *e4b)
+			    ext4_group_t group, struct ext4_buddy *e4b,
+			    unsigned long blkdev_flags)
 __releases(bitlock)
 __acquires(bitlock)
 {
@@ -5078,7 +5082,7 @@
 	 */
 	mb_mark_used(e4b, &ex);
 	ext4_unlock_group(sb, group);
-	ret = ext4_issue_discard(sb, group, start, count);
+	ret = ext4_issue_discard(sb, group, start, count, blkdev_flags);
 	ext4_lock_group(sb, group);
 	mb_free_blocks(NULL, e4b, start, ex.fe_len);
 	return ret;
@@ -5091,6 +5095,7 @@
  * @start:		first group block to examine
  * @max:		last group block to examine
  * @minblocks:		minimum extent block count
+ * @blkdev_flags:	flags for the block device
  *
  * ext4_trim_all_free walks through group's buddy bitmap searching for free
  * extents. When the free block is found, ext4_trim_extent is called to TRIM
@@ -5105,7 +5110,7 @@
 static ext4_grpblk_t
 ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
 		   ext4_grpblk_t start, ext4_grpblk_t max,
-		   ext4_grpblk_t minblocks)
+		   ext4_grpblk_t minblocks, unsigned long blkdev_flags)
 {
 	void *bitmap;
 	ext4_grpblk_t next, count = 0, free_count = 0;
@@ -5138,7 +5143,8 @@
 
 		if ((next - start) >= minblocks) {
 			ret = ext4_trim_extent(sb, start,
-					       next - start, group, &e4b);
+					       next - start, group, &e4b,
+					       blkdev_flags);
 			if (ret && ret != -EOPNOTSUPP)
 				break;
 			ret = 0;
@@ -5180,6 +5186,7 @@
  * ext4_trim_fs() -- trim ioctl handle function
  * @sb:			superblock for filesystem
  * @range:		fstrim_range structure
+ * @blkdev_flags:	flags for the block device
  *
  * start:	First Byte to trim
  * len:		number of Bytes to trim from start
@@ -5188,7 +5195,8 @@
  * start to start+len. For each such a group ext4_trim_all_free function
  * is invoked to trim all free space.
  */
-int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
+int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range,
+			unsigned long blkdev_flags)
 {
 	struct ext4_group_info *grp;
 	ext4_group_t group, first_group, last_group;
@@ -5244,7 +5252,7 @@
 
 		if (grp->bb_free >= minlen) {
 			cnt = ext4_trim_all_free(sb, group, first_cluster,
-						end, minlen);
+						end, minlen, blkdev_flags);
 			if (cnt < 0) {
 				ret = cnt;
 				break;
diff -ruw linux-4.4.115/fs/ext4/namei.c linux-4.4.115-fbx/fs/ext4/namei.c
--- linux-4.4.115/fs/ext4/namei.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/ext4/namei.c	2019-10-29 09:26:25.265219051 +0100
@@ -273,7 +273,7 @@
 		struct ext4_filename *fname,
 		struct ext4_dir_entry_2 **res_dir);
 static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
-			     struct dentry *dentry, struct inode *inode);
+			     struct inode *dir, struct inode *inode);
 
 /* checksumming functions */
 void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
@@ -1949,10 +1949,9 @@
  * directory, and adds the dentry to the indexed directory.
  */
 static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
-			    struct dentry *dentry,
+			    struct inode *dir,
 			    struct inode *inode, struct buffer_head *bh)
 {
-	struct inode	*dir = d_inode(dentry->d_parent);
 	struct buffer_head *bh2;
 	struct dx_root	*root;
 	struct dx_frame	frames[2], *frame;
@@ -2105,8 +2104,7 @@
 		return retval;
 
 	if (ext4_has_inline_data(dir)) {
-		retval = ext4_try_add_inline_entry(handle, &fname,
-						   dentry, inode);
+		retval = ext4_try_add_inline_entry(handle, &fname, dir, inode);
 		if (retval < 0)
 			goto out;
 		if (retval == 1) {
@@ -2116,7 +2114,7 @@
 	}
 
 	if (is_dx(dir)) {
-		retval = ext4_dx_add_entry(handle, &fname, dentry, inode);
+		retval = ext4_dx_add_entry(handle, &fname, dir, inode);
 		if (!retval || (retval != ERR_BAD_DX_DIR))
 			goto out;
 		ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
@@ -2138,7 +2136,7 @@
 
 		if (blocks == 1 && !dx_fallback &&
 		    ext4_has_feature_dir_index(sb)) {
-			retval = make_indexed_dir(handle, &fname, dentry,
+			retval = make_indexed_dir(handle, &fname, dir,
 						  inode, bh);
 			bh = NULL; /* make_indexed_dir releases bh */
 			goto out;
@@ -2173,12 +2171,11 @@
  * Returns 0 for success, or a negative error value
  */
 static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
-			     struct dentry *dentry, struct inode *inode)
+			     struct inode *dir, struct inode *inode)
 {
 	struct dx_frame frames[2], *frame;
 	struct dx_entry *entries, *at;
 	struct buffer_head *bh;
-	struct inode *dir = d_inode(dentry->d_parent);
 	struct super_block *sb = dir->i_sb;
 	struct ext4_dir_entry_2 *de;
 	int err;
diff -ruw linux-4.4.115/fs/ext4/page-io.c linux-4.4.115-fbx/fs/ext4/page-io.c
--- linux-4.4.115/fs/ext4/page-io.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/ext4/page-io.c	2019-01-22 16:16:27.831285599 +0100
@@ -28,6 +28,7 @@
 #include "ext4_jbd2.h"
 #include "xattr.h"
 #include "acl.h"
+#include "ext4_ice.h"
 
 static struct kmem_cache *io_end_cachep;
 
@@ -489,7 +490,11 @@
 		gfp_t gfp_flags = GFP_NOFS;
 
 	retry_encrypt:
+
+		if (!ext4_using_hardware_encryption(inode))
 		data_page = ext4_encrypt(inode, page, gfp_flags);
+
+
 		if (IS_ERR(data_page)) {
 			ret = PTR_ERR(data_page);
 			if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) {
diff -ruw linux-4.4.115/fs/ext4/readpage.c linux-4.4.115-fbx/fs/ext4/readpage.c
--- linux-4.4.115/fs/ext4/readpage.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/ext4/readpage.c	2019-01-22 16:16:27.831285599 +0100
@@ -45,6 +45,8 @@
 #include <linux/cleancache.h>
 
 #include "ext4.h"
+#include "ext4_ice.h"
+#include <trace/events/android_fs.h>
 
 /*
  * Call ext4_decrypt on every single page, reusing the encryption
@@ -62,12 +64,17 @@
 	bio_for_each_segment_all(bv, bio, i) {
 		struct page *page = bv->bv_page;
 
+		if (ext4_is_ice_enabled()) {
+			SetPageUptodate(page);
+		} else {
 		int ret = ext4_decrypt(page);
+
 		if (ret) {
 			WARN_ON_ONCE(1);
 			SetPageError(page);
 		} else
 			SetPageUptodate(page);
+		}
 		unlock_page(page);
 	}
 	ext4_release_crypto_ctx(ctx);
@@ -86,6 +93,17 @@
 #endif
 }
 
+static void
+ext4_trace_read_completion(struct bio *bio)
+{
+	struct page *first_page = bio->bi_io_vec[0].bv_page;
+
+	if (first_page != NULL)
+		trace_android_fs_dataread_end(first_page->mapping->host,
+					      page_offset(first_page),
+					      bio->bi_iter.bi_size);
+}
+
 /*
  * I/O completion handler for multipage BIOs.
  *
@@ -103,6 +121,9 @@
 	struct bio_vec *bv;
 	int i;
 
+	if (trace_android_fs_dataread_start_enabled())
+		ext4_trace_read_completion(bio);
+
 	if (ext4_bio_encrypted(bio)) {
 		struct ext4_crypto_ctx *ctx = bio->bi_private;
 
@@ -130,6 +151,30 @@
 	bio_put(bio);
 }
 
+static void
+ext4_submit_bio_read(struct bio *bio)
+{
+	if (trace_android_fs_dataread_start_enabled()) {
+		struct page *first_page = bio->bi_io_vec[0].bv_page;
+
+		if (first_page != NULL) {
+			char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+			path = android_fstrace_get_pathname(pathbuf,
+						    MAX_TRACE_PATHBUF_LEN,
+						    first_page->mapping->host);
+			trace_android_fs_dataread_start(
+				first_page->mapping->host,
+				page_offset(first_page),
+				bio->bi_iter.bi_size,
+				current->pid,
+				path,
+				current->comm);
+		}
+	}
+	submit_bio(READ, bio);
+}
+
 int ext4_mpage_readpages(struct address_space *mapping,
 			 struct list_head *pages, struct page *page,
 			 unsigned nr_pages)
@@ -271,7 +316,7 @@
 		 */
 		if (bio && (last_block_in_bio != blocks[0] - 1)) {
 		submit_and_realloc:
-			submit_bio(READ, bio);
+			ext4_submit_bio_read(bio);
 			bio = NULL;
 		}
 		if (bio == NULL) {
@@ -303,14 +348,14 @@
 		if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
 		     (relative_block == map.m_len)) ||
 		    (first_hole != blocks_per_page)) {
-			submit_bio(READ, bio);
+			ext4_submit_bio_read(bio);
 			bio = NULL;
 		} else
 			last_block_in_bio = blocks[blocks_per_page - 1];
 		goto next_page;
 	confused:
 		if (bio) {
-			submit_bio(READ, bio);
+			ext4_submit_bio_read(bio);
 			bio = NULL;
 		}
 		if (!PageUptodate(page))
@@ -323,6 +368,6 @@
 	}
 	BUG_ON(pages && !list_empty(pages));
 	if (bio)
-		submit_bio(READ, bio);
+		ext4_submit_bio_read(bio);
 	return 0;
 }
diff -ruw linux-4.4.115/fs/ext4/super.c linux-4.4.115-fbx/fs/ext4/super.c
--- linux-4.4.115/fs/ext4/super.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/ext4/super.c	2019-10-29 09:26:25.265219051 +0100
@@ -816,7 +816,6 @@
 	ext4_release_system_zone(sb);
 	ext4_mb_release(sb);
 	ext4_ext_release(sb);
-	ext4_xattr_put_super(sb);
 
 	if (!(sb->s_flags & MS_RDONLY) && !aborted) {
 		ext4_clear_feature_journal_needs_recovery(sb);
@@ -3854,7 +3853,7 @@
 
 no_journal:
 	if (ext4_mballoc_ready) {
-		sbi->s_mb_cache = ext4_xattr_create_cache(sb->s_id);
+		sbi->s_mb_cache = ext4_xattr_create_cache();
 		if (!sbi->s_mb_cache) {
 			ext4_msg(sb, KERN_ERR, "Failed to create an mb_cache");
 			goto failed_mount_wq;
@@ -4086,6 +4085,10 @@
 	if (EXT4_SB(sb)->rsv_conversion_wq)
 		destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
 failed_mount_wq:
+	if (sbi->s_mb_cache) {
+		ext4_xattr_destroy_cache(sbi->s_mb_cache);
+		sbi->s_mb_cache = NULL;
+	}
 	if (sbi->s_journal) {
 		jbd2_journal_destroy(sbi->s_journal);
 		sbi->s_journal = NULL;
diff -ruw linux-4.4.115/fs/ext4/xattr.c linux-4.4.115-fbx/fs/ext4/xattr.c
--- linux-4.4.115/fs/ext4/xattr.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/ext4/xattr.c	2019-10-29 09:26:25.269219090 +0100
@@ -53,7 +53,7 @@
 #include <linux/init.h>
 #include <linux/fs.h>
 #include <linux/slab.h>
-#include <linux/mbcache.h>
+#include <linux/mbcache2.h>
 #include <linux/quotaops.h>
 #include "ext4_jbd2.h"
 #include "ext4.h"
@@ -80,10 +80,10 @@
 # define ea_bdebug(bh, fmt, ...)	no_printk(fmt, ##__VA_ARGS__)
 #endif
 
-static void ext4_xattr_cache_insert(struct mb_cache *, struct buffer_head *);
+static void ext4_xattr_cache_insert(struct mb2_cache *, struct buffer_head *);
 static struct buffer_head *ext4_xattr_cache_find(struct inode *,
 						 struct ext4_xattr_header *,
-						 struct mb_cache_entry **);
+						 struct mb2_cache_entry **);
 static void ext4_xattr_rehash(struct ext4_xattr_header *,
 			      struct ext4_xattr_entry *);
 static int ext4_xattr_list(struct dentry *dentry, char *buffer,
@@ -300,7 +300,7 @@
 	struct ext4_xattr_entry *entry;
 	size_t size;
 	int error;
-	struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
+	struct mb2_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
 
 	ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
 		  name_index, name, buffer, (long)buffer_size);
@@ -447,7 +447,7 @@
 	struct inode *inode = d_inode(dentry);
 	struct buffer_head *bh = NULL;
 	int error;
-	struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
+	struct mb2_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
 
 	ea_idebug(inode, "buffer=%p, buffer_size=%ld",
 		  buffer, (long)buffer_size);
@@ -564,11 +564,8 @@
 ext4_xattr_release_block(handle_t *handle, struct inode *inode,
 			 struct buffer_head *bh)
 {
-	struct mb_cache_entry *ce = NULL;
 	int error = 0;
-	struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
 
-	ce = mb_cache_entry_get(ext4_mb_cache, bh->b_bdev, bh->b_blocknr);
 	BUFFER_TRACE(bh, "get_write_access");
 	error = ext4_journal_get_write_access(handle, bh);
 	if (error)
@@ -576,9 +573,15 @@
 
 	lock_buffer(bh);
 	if (BHDR(bh)->h_refcount == cpu_to_le32(1)) {
+		__u32 hash = le32_to_cpu(BHDR(bh)->h_hash);
+
 		ea_bdebug(bh, "refcount now=0; freeing");
-		if (ce)
-			mb_cache_entry_free(ce);
+		/*
+		 * This must happen under buffer lock for
+		 * ext4_xattr_block_set() to reliably detect freed block
+		 */
+		mb2_cache_entry_delete_block(EXT4_GET_MB_CACHE(inode), hash,
+					     bh->b_blocknr);
 		get_bh(bh);
 		unlock_buffer(bh);
 		ext4_free_blocks(handle, inode, bh, 0, 1,
@@ -586,8 +589,6 @@
 				 EXT4_FREE_BLOCKS_FORGET);
 	} else {
 		le32_add_cpu(&BHDR(bh)->h_refcount, -1);
-		if (ce)
-			mb_cache_entry_release(ce);
 		/*
 		 * Beware of this ugliness: Releasing of xattr block references
 		 * from different inodes can race and so we have to protect
@@ -800,17 +801,15 @@
 	struct super_block *sb = inode->i_sb;
 	struct buffer_head *new_bh = NULL;
 	struct ext4_xattr_search *s = &bs->s;
-	struct mb_cache_entry *ce = NULL;
+	struct mb2_cache_entry *ce = NULL;
 	int error = 0;
-	struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
+	struct mb2_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
 
 #define header(x) ((struct ext4_xattr_header *)(x))
 
 	if (i->value && i->value_len > sb->s_blocksize)
 		return -ENOSPC;
 	if (s->base) {
-		ce = mb_cache_entry_get(ext4_mb_cache, bs->bh->b_bdev,
-					bs->bh->b_blocknr);
 		BUFFER_TRACE(bs->bh, "get_write_access");
 		error = ext4_journal_get_write_access(handle, bs->bh);
 		if (error)
@@ -818,10 +817,15 @@
 		lock_buffer(bs->bh);
 
 		if (header(s->base)->h_refcount == cpu_to_le32(1)) {
-			if (ce) {
-				mb_cache_entry_free(ce);
-				ce = NULL;
-			}
+			__u32 hash = le32_to_cpu(BHDR(bs->bh)->h_hash);
+
+			/*
+			 * This must happen under buffer lock for
+			 * ext4_xattr_block_set() to reliably detect modified
+			 * block
+			 */
+			mb2_cache_entry_delete_block(ext4_mb_cache, hash,
+						     bs->bh->b_blocknr);
 			ea_bdebug(bs->bh, "modifying in-place");
 			error = ext4_xattr_set_entry(i, s);
 			if (!error) {
@@ -845,10 +849,6 @@
 			int offset = (char *)s->here - bs->bh->b_data;
 
 			unlock_buffer(bs->bh);
-			if (ce) {
-				mb_cache_entry_release(ce);
-				ce = NULL;
-			}
 			ea_bdebug(bs->bh, "cloning");
 			s->base = kmalloc(bs->bh->b_size, GFP_NOFS);
 			error = -ENOMEM;
@@ -903,6 +903,31 @@
 				if (error)
 					goto cleanup_dquot;
 				lock_buffer(new_bh);
+				/*
+				 * We have to be careful about races with
+				 * freeing or rehashing of xattr block. Once we
+				 * hold buffer lock xattr block's state is
+				 * stable so we can check whether the block got
+				 * freed / rehashed or not.  Since we unhash
+				 * mbcache entry under buffer lock when freeing
+				 * / rehashing xattr block, checking whether
+				 * entry is still hashed is reliable.
+				 */
+				if (hlist_bl_unhashed(&ce->e_hash_list)) {
+					/*
+					 * Undo everything and check mbcache
+					 * again.
+					 */
+					unlock_buffer(new_bh);
+					dquot_free_block(inode,
+							 EXT4_C2B(EXT4_SB(sb),
+								  1));
+					brelse(new_bh);
+					mb2_cache_entry_put(ext4_mb_cache, ce);
+					ce = NULL;
+					new_bh = NULL;
+					goto inserted;
+				}
 				le32_add_cpu(&BHDR(new_bh)->h_refcount, 1);
 				ea_bdebug(new_bh, "reusing; refcount now=%d",
 					le32_to_cpu(BHDR(new_bh)->h_refcount));
@@ -913,7 +938,8 @@
 				if (error)
 					goto cleanup_dquot;
 			}
-			mb_cache_entry_release(ce);
+			mb2_cache_entry_touch(ext4_mb_cache, ce);
+			mb2_cache_entry_put(ext4_mb_cache, ce);
 			ce = NULL;
 		} else if (bs->bh && s->base == bs->bh->b_data) {
 			/* We were modifying this block in-place. */
@@ -978,7 +1004,7 @@
 
 cleanup:
 	if (ce)
-		mb_cache_entry_release(ce);
+		mb2_cache_entry_put(ext4_mb_cache, ce);
 	brelse(new_bh);
 	if (!(bs->bh && s->base == bs->bh->b_data))
 		kfree(s->base);
@@ -1543,17 +1569,6 @@
 }
 
 /*
- * ext4_xattr_put_super()
- *
- * This is called when a file system is unmounted.
- */
-void
-ext4_xattr_put_super(struct super_block *sb)
-{
-	mb_cache_shrink(sb->s_bdev);
-}
-
-/*
  * ext4_xattr_cache_insert()
  *
  * Create a new entry in the extended attribute cache, and insert
@@ -1562,28 +1577,18 @@
  * Returns 0, or a negative error number on failure.
  */
 static void
-ext4_xattr_cache_insert(struct mb_cache *ext4_mb_cache, struct buffer_head *bh)
+ext4_xattr_cache_insert(struct mb2_cache *ext4_mb_cache, struct buffer_head *bh)
 {
 	__u32 hash = le32_to_cpu(BHDR(bh)->h_hash);
-	struct mb_cache_entry *ce;
 	int error;
 
-	ce = mb_cache_entry_alloc(ext4_mb_cache, GFP_NOFS);
-	if (!ce) {
-		ea_bdebug(bh, "out of memory");
-		return;
-	}
-	error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, hash);
+	error = mb2_cache_entry_create(ext4_mb_cache, GFP_NOFS, hash,
+				       bh->b_blocknr);
 	if (error) {
-		mb_cache_entry_free(ce);
-		if (error == -EBUSY) {
+		if (error == -EBUSY)
 			ea_bdebug(bh, "already in cache");
-			error = 0;
-		}
-	} else {
+	} else
 		ea_bdebug(bh, "inserting [%x]", (int)hash);
-		mb_cache_entry_release(ce);
-	}
 }
 
 /*
@@ -1636,26 +1641,19 @@
  */
 static struct buffer_head *
 ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header,
-		      struct mb_cache_entry **pce)
+		      struct mb2_cache_entry **pce)
 {
 	__u32 hash = le32_to_cpu(header->h_hash);
-	struct mb_cache_entry *ce;
-	struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
+	struct mb2_cache_entry *ce;
+	struct mb2_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
 
 	if (!header->h_hash)
 		return NULL;  /* never share */
 	ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
-again:
-	ce = mb_cache_entry_find_first(ext4_mb_cache, inode->i_sb->s_bdev,
-				       hash);
+	ce = mb2_cache_entry_find_first(ext4_mb_cache, hash);
 	while (ce) {
 		struct buffer_head *bh;
 
-		if (IS_ERR(ce)) {
-			if (PTR_ERR(ce) == -EAGAIN)
-				goto again;
-			break;
-		}
 		bh = sb_bread(inode->i_sb, ce->e_block);
 		if (!bh) {
 			EXT4_ERROR_INODE(inode, "block %lu read error",
@@ -1671,7 +1669,7 @@
 			return bh;
 		}
 		brelse(bh);
-		ce = mb_cache_entry_find_next(ce, inode->i_sb->s_bdev, hash);
+		ce = mb2_cache_entry_find_next(ext4_mb_cache, ce);
 	}
 	return NULL;
 }
@@ -1746,15 +1744,15 @@
 
 #define	HASH_BUCKET_BITS	10
 
-struct mb_cache *
-ext4_xattr_create_cache(char *name)
+struct mb2_cache *
+ext4_xattr_create_cache(void)
 {
-	return mb_cache_create(name, HASH_BUCKET_BITS);
+	return mb2_cache_create(HASH_BUCKET_BITS);
 }
 
-void ext4_xattr_destroy_cache(struct mb_cache *cache)
+void ext4_xattr_destroy_cache(struct mb2_cache *cache)
 {
 	if (cache)
-		mb_cache_destroy(cache);
+		mb2_cache_destroy(cache);
 }
 
diff -ruw linux-4.4.115/fs/ext4/xattr.h linux-4.4.115-fbx/fs/ext4/xattr.h
--- linux-4.4.115/fs/ext4/xattr.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/ext4/xattr.h	2019-10-29 09:26:25.269219090 +0100
@@ -108,7 +108,6 @@
 extern int ext4_xattr_set_handle(handle_t *, struct inode *, int, const char *, const void *, size_t, int);
 
 extern void ext4_xattr_delete_inode(handle_t *, struct inode *);
-extern void ext4_xattr_put_super(struct super_block *);
 
 extern int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
 			    struct ext4_inode *raw_inode, handle_t *handle);
@@ -124,8 +123,8 @@
 				       struct ext4_xattr_info *i,
 				       struct ext4_xattr_ibody_find *is);
 
-extern struct mb_cache *ext4_xattr_create_cache(char *name);
-extern void ext4_xattr_destroy_cache(struct mb_cache *);
+extern struct mb2_cache *ext4_xattr_create_cache(void);
+extern void ext4_xattr_destroy_cache(struct mb2_cache *);
 
 #ifdef CONFIG_EXT4_FS_SECURITY
 extern int ext4_init_security(handle_t *handle, struct inode *inode,
diff -ruw linux-4.4.115/fs/f2fs/Kconfig linux-4.4.115-fbx/fs/f2fs/Kconfig
--- linux-4.4.115/fs/f2fs/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/f2fs/Kconfig	2019-01-22 16:16:27.835285635 +0100
@@ -1,6 +1,8 @@
 config F2FS_FS
 	tristate "F2FS filesystem support"
 	depends on BLOCK
+	select CRYPTO
+	select CRYPTO_CRC32
 	help
 	  F2FS is based on Log-structured File System (LFS), which supports
 	  versatile "flash-friendly" features. The design has been focused on
@@ -76,15 +78,7 @@
 	bool "F2FS Encryption"
 	depends on F2FS_FS
 	depends on F2FS_FS_XATTR
-	select CRYPTO_AES
-	select CRYPTO_CBC
-	select CRYPTO_ECB
-	select CRYPTO_XTS
-	select CRYPTO_CTS
-	select CRYPTO_CTR
-	select CRYPTO_SHA256
-	select KEYS
-	select ENCRYPTED_KEYS
+	select FS_ENCRYPTION
 	help
 	  Enable encryption of f2fs files and directories.  This
 	  feature is similar to ecryptfs, but it is more memory
@@ -100,3 +94,11 @@
 	  information and block IO patterns in the filesystem level.
 
 	  If unsure, say N.
+
+config F2FS_FAULT_INJECTION
+	bool "F2FS fault injection facility"
+	depends on F2FS_FS
+	help
+	  Test F2FS to inject faults such as ENOMEM, ENOSPC, and so on.
+
+	  If unsure, say N.
diff -ruw linux-4.4.115/fs/f2fs/Makefile linux-4.4.115-fbx/fs/f2fs/Makefile
--- linux-4.4.115/fs/f2fs/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/f2fs/Makefile	2019-01-22 16:16:27.835285635 +0100
@@ -2,10 +2,8 @@
 
 f2fs-y		:= dir.o file.o inode.o namei.o hash.o super.o inline.o
 f2fs-y		+= checkpoint.o gc.o data.o node.o segment.o recovery.o
-f2fs-y		+= shrinker.o extent_cache.o
+f2fs-y		+= shrinker.o extent_cache.o sysfs.o
 f2fs-$(CONFIG_F2FS_STAT_FS) += debug.o
 f2fs-$(CONFIG_F2FS_FS_XATTR) += xattr.o
 f2fs-$(CONFIG_F2FS_FS_POSIX_ACL) += acl.o
 f2fs-$(CONFIG_F2FS_IO_TRACE) += trace.o
-f2fs-$(CONFIG_F2FS_FS_ENCRYPTION) += crypto_policy.o crypto.o \
-		crypto_key.o crypto_fname.o
diff -ruw linux-4.4.115/fs/fat/fatent.c linux-4.4.115-fbx/fs/fat/fatent.c
--- linux-4.4.115/fs/fat/fatent.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/fat/fatent.c	2019-10-29 09:26:25.285219247 +0100
@@ -92,7 +92,8 @@
 err_brelse:
 	brelse(bhs[0]);
 err:
-	fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)", (llu)blocknr);
+	fat_msg_ratelimit(sb, KERN_ERR,
+			"FAT read failed (blocknr %llu)", (llu)blocknr);
 	return -EIO;
 }
 
@@ -105,8 +106,8 @@
 	fatent->fat_inode = MSDOS_SB(sb)->fat_inode;
 	fatent->bhs[0] = sb_bread(sb, blocknr);
 	if (!fatent->bhs[0]) {
-		fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)",
-		       (llu)blocknr);
+		fat_msg_ratelimit(sb, KERN_ERR,
+			"FAT read failed (blocknr %llu)", (llu)blocknr);
 		return -EIO;
 	}
 	fatent->nr_bhs = 1;
diff -ruw linux-4.4.115/fs/fat/inode.c linux-4.4.115-fbx/fs/fat/inode.c
--- linux-4.4.115/fs/fat/inode.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/fat/inode.c	2019-10-29 09:26:25.289219286 +0100
@@ -760,8 +760,9 @@
 	fat_get_blknr_offset(sbi, i_pos, &blocknr, &offset);
 	bh = sb_bread(sb, blocknr);
 	if (!bh) {
-		fat_msg(sb, KERN_ERR, "unable to read inode block "
-		       "for updating (i_pos %lld)", i_pos);
+		fat_msg_ratelimit(sb, KERN_ERR,
+			"unable to read inode block for updating (i_pos %lld)",
+			i_pos);
 		return -EIO;
 	}
 	spin_lock(&sbi->inode_hash_lock);
diff -ruw linux-4.4.115/fs/file_table.c linux-4.4.115-fbx/fs/file_table.c
--- linux-4.4.115/fs/file_table.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/file_table.c	2019-01-22 16:16:27.851285780 +0100
@@ -41,6 +41,141 @@
 
 static struct percpu_counter nr_files __cacheline_aligned_in_smp;
 
+#ifdef CONFIG_FILE_TABLE_DEBUG
+#include <linux/hashtable.h>
+#include <mount.h>
+static DEFINE_MUTEX(global_files_lock);
+static DEFINE_HASHTABLE(global_files_hashtable, 10);
+
+struct global_filetable_lookup_key {
+	struct work_struct work;
+	uintptr_t value;
+};
+
+void global_filetable_print_warning_once(void)
+{
+	pr_err_once("\n**********************************************************\n");
+	pr_err_once("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
+	pr_err_once("**                                                      **\n");
+	pr_err_once("**      VFS FILE TABLE DEBUG is enabled .               **\n");
+	pr_err_once("**  Allocating extra memory and slowing access to files **\n");
+	pr_err_once("**                                                      **\n");
+	pr_err_once("** This means that this is a DEBUG kernel and it is     **\n");
+	pr_err_once("** unsafe for production use.                           **\n");
+	pr_err_once("**                                                      **\n");
+	pr_err_once("** If you see this message and you are not debugging    **\n");
+	pr_err_once("** the kernel, report this immediately to your vendor!  **\n");
+	pr_err_once("**                                                      **\n");
+	pr_err_once("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
+	pr_err_once("**********************************************************\n");
+}
+
+void global_filetable_add(struct file *filp)
+{
+	struct mount *mnt;
+
+	if (filp->f_path.dentry->d_iname == NULL ||
+	    strlen(filp->f_path.dentry->d_iname) == 0)
+		return;
+
+	mnt = real_mount(filp->f_path.mnt);
+
+	mutex_lock(&global_files_lock);
+	hash_add(global_files_hashtable, &filp->f_hash, (uintptr_t)mnt);
+	mutex_unlock(&global_files_lock);
+}
+
+void global_filetable_del(struct file *filp)
+{
+	mutex_lock(&global_files_lock);
+	hash_del(&filp->f_hash);
+	mutex_unlock(&global_files_lock);
+}
+
+static void global_print_file(struct file *filp, char *path_buffer, int *count)
+{
+	char *pathname;
+
+	pathname = d_path(&filp->f_path, path_buffer, PAGE_SIZE);
+	if (IS_ERR(pathname))
+		pr_err("VFS: File %d Address : %pa partial filename: %s ref_count=%ld\n",
+			++(*count), &filp, filp->f_path.dentry->d_iname,
+			atomic_long_read(&filp->f_count));
+	else
+		pr_err("VFS: File %d Address : %pa full filepath: %s ref_count=%ld\n",
+			++(*count), &filp, pathname,
+			atomic_long_read(&filp->f_count));
+}
+
+static void global_filetable_print(uintptr_t lookup_mnt)
+{
+	struct hlist_node *tmp;
+	struct file *filp;
+	struct mount *mnt;
+	int index;
+	int count = 0;
+	char *path_buffer = (char *)__get_free_page(GFP_TEMPORARY);
+
+	mutex_lock(&global_files_lock);
+	pr_err("\n**********************************************************\n");
+	pr_err("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
+
+	pr_err("\n");
+	pr_err("VFS: The following files hold a reference to the mount\n");
+	pr_err("\n");
+	hash_for_each_possible_safe(global_files_hashtable, filp, tmp, f_hash,
+				    lookup_mnt) {
+		mnt = real_mount(filp->f_path.mnt);
+		if ((uintptr_t)mnt == lookup_mnt)
+			global_print_file(filp, path_buffer, &count);
+	}
+	pr_err("\n");
+	pr_err("VFS: Found total of %d open files\n", count);
+	pr_err("\n");
+
+	count = 0;
+	pr_err("\n");
+	pr_err("VFS: The following files need to cleaned up\n");
+	pr_err("\n");
+	hash_for_each_safe(global_files_hashtable, index, tmp, filp, f_hash) {
+		if (atomic_long_read(&filp->f_count) == 0)
+			global_print_file(filp, path_buffer, &count);
+	}
+
+	pr_err("\n");
+	pr_err("VFS: Found total of %d files awaiting clean-up\n", count);
+	pr_err("\n");
+	pr_err("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
+	pr_err("\n**********************************************************\n");
+
+	mutex_unlock(&global_files_lock);
+	free_page((unsigned long)path_buffer);
+}
+
+static void global_filetable_print_work_fn(struct work_struct *work)
+{
+	struct global_filetable_lookup_key *key;
+	uintptr_t lookup_mnt;
+
+	key = container_of(work, struct global_filetable_lookup_key, work);
+	lookup_mnt = key->value;
+	kfree(key);
+	global_filetable_print(lookup_mnt);
+}
+
+void global_filetable_delayed_print(struct mount *mnt)
+{
+	struct global_filetable_lookup_key *key;
+
+	key = kzalloc(sizeof(*key), GFP_KERNEL);
+	if (key == NULL)
+		return;
+	key->value = (uintptr_t)mnt;
+	INIT_WORK(&key->work, global_filetable_print_work_fn);
+	schedule_work(&key->work);
+}
+#endif /* CONFIG_FILE_TABLE_DEBUG */
+
 static void file_free_rcu(struct rcu_head *head)
 {
 	struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
@@ -219,6 +354,7 @@
 		put_write_access(inode);
 		__mnt_drop_write(mnt);
 	}
+	global_filetable_del(file);
 	file->f_path.dentry = NULL;
 	file->f_path.mnt = NULL;
 	file->f_inode = NULL;
@@ -314,6 +450,7 @@
 	filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
 			SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
 	percpu_counter_init(&nr_files, 0, GFP_KERNEL);
+	global_filetable_print_warning_once();
 }
 
 /*
diff -ruw linux-4.4.115/fs/fs_struct.c linux-4.4.115-fbx/fs/fs_struct.c
--- linux-4.4.115/fs/fs_struct.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/fs_struct.c	2019-01-22 16:16:27.855285816 +0100
@@ -44,6 +44,7 @@
 	if (old_pwd.dentry)
 		path_put(&old_pwd);
 }
+EXPORT_SYMBOL(set_fs_pwd);
 
 static inline int replace_path(struct path *p, const struct path *old, const struct path *new)
 {
@@ -89,6 +90,7 @@
 	path_put(&fs->pwd);
 	kmem_cache_free(fs_cachep, fs);
 }
+EXPORT_SYMBOL(free_fs_struct);
 
 void exit_fs(struct task_struct *tsk)
 {
@@ -127,6 +129,7 @@
 	}
 	return fs;
 }
+EXPORT_SYMBOL_GPL(copy_fs_struct);
 
 int unshare_fs_struct(void)
 {
diff -ruw linux-4.4.115/fs/fs-writeback.c linux-4.4.115-fbx/fs/fs-writeback.c
--- linux-4.4.115/fs/fs-writeback.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/fs-writeback.c	2019-10-29 09:26:25.289219286 +0100
@@ -2069,7 +2069,7 @@
 	    (dirtytime && (inode->i_state & I_DIRTY_INODE)))
 		return;
 
-	if (unlikely(block_dump))
+	if (unlikely(block_dump > 1))
 		block_dump___mark_inode_dirty(inode);
 
 	spin_lock(&inode->i_lock);
diff -ruw linux-4.4.115/fs/fuse/dev.c linux-4.4.115-fbx/fs/fuse/dev.c
--- linux-4.4.115/fs/fuse/dev.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/fuse/dev.c	2019-10-29 09:26:25.293219325 +0100
@@ -7,18 +7,21 @@
 */
 
 #include "fuse_i.h"
+#include "fuse_passthrough.h"
 
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/poll.h>
 #include <linux/uio.h>
 #include <linux/miscdevice.h>
+#include <linux/namei.h>
 #include <linux/pagemap.h>
 #include <linux/file.h>
 #include <linux/slab.h>
 #include <linux/pipe_fs_i.h>
 #include <linux/swap.h>
 #include <linux/splice.h>
+#include <linux/freezer.h>
 
 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
 MODULE_ALIAS("devname:fuse");
@@ -477,7 +480,9 @@
 	 * Either request is already in userspace, or it was forced.
 	 * Wait it out.
 	 */
-	wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
+	while (!test_bit(FR_FINISHED, &req->flags))
+		wait_event_freezable(req->waitq,
+				test_bit(FR_FINISHED, &req->flags));
 }
 
 static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
@@ -570,10 +575,15 @@
 	       args->out.numargs * sizeof(struct fuse_arg));
 	fuse_request_send(fc, req);
 	ret = req->out.h.error;
-	if (!ret && args->out.argvar) {
+	if (!ret) {
+		if (args->out.argvar) {
 		BUG_ON(args->out.numargs != 1);
 		ret = req->out.args[0].size;
 	}
+
+		if (req->passthrough_filp != NULL)
+			args->out.passthrough_filp = req->passthrough_filp;
+	}
 	fuse_put_request(fc, req);
 
 	return ret;
@@ -1936,8 +1946,15 @@
 		cs->move_pages = 0;
 
 	err = copy_out_args(cs, &req->out, nbytes);
+	if (req->in.h.opcode == FUSE_CANONICAL_PATH) {
+		char *path = (char *)req->out.args[0].value;
+
+		path[req->out.args[0].size - 1] = 0;
+		req->out.h.error = kern_path(path, 0, req->canonical_path);
+	}
 	fuse_copy_finish(cs);
 
+	fuse_setup_passthrough(fc, req);
 	spin_lock(&fpq->lock);
 	clear_bit(FR_LOCKED, &req->flags);
 	if (!fpq->connected)
diff -ruw linux-4.4.115/fs/fuse/dir.c linux-4.4.115-fbx/fs/fuse/dir.c
--- linux-4.4.115/fs/fuse/dir.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/fuse/dir.c	2019-10-29 09:26:25.293219325 +0100
@@ -267,6 +267,50 @@
 	goto out;
 }
 
+/*
+ * Get the canonical path. Since we must translate to a path, this must be done
+ * in the context of the userspace daemon, however, the userspace daemon cannot
+ * look up paths on its own. Instead, we handle the lookup as a special case
+ * inside of the write request.
+ */
+static void fuse_dentry_canonical_path(const struct path *path, struct path *canonical_path) {
+	struct inode *inode = path->dentry->d_inode;
+	struct fuse_conn *fc = get_fuse_conn(inode);
+	struct fuse_req *req;
+	int err;
+	char *path_name;
+
+	req = fuse_get_req(fc, 1);
+	err = PTR_ERR(req);
+	if (IS_ERR(req))
+		goto default_path;
+
+	path_name = (char*)__get_free_page(GFP_KERNEL);
+	if (!path_name) {
+		fuse_put_request(fc, req);
+		goto default_path;
+	}
+
+	req->in.h.opcode = FUSE_CANONICAL_PATH;
+	req->in.h.nodeid = get_node_id(inode);
+	req->in.numargs = 0;
+	req->out.numargs = 1;
+	req->out.args[0].size = PATH_MAX;
+	req->out.args[0].value = path_name;
+	req->canonical_path = canonical_path;
+	req->out.argvar = 1;
+	fuse_request_send(fc, req);
+	err = req->out.h.error;
+	fuse_put_request(fc, req);
+	free_page((unsigned long)path_name);
+	if (!err)
+		return;
+default_path:
+	canonical_path->dentry = path->dentry;
+	canonical_path->mnt = path->mnt;
+	path_get(canonical_path);
+}
+
 static int invalid_nodeid(u64 nodeid)
 {
 	return !nodeid || nodeid == FUSE_ROOT_ID;
@@ -274,6 +318,7 @@
 
 const struct dentry_operations fuse_dentry_operations = {
 	.d_revalidate	= fuse_dentry_revalidate,
+	.d_canonical_path = fuse_dentry_canonical_path,
 };
 
 int fuse_valid_type(int m)
@@ -428,6 +473,7 @@
 	args.out.args[0].value = &outentry;
 	args.out.args[1].size = sizeof(outopen);
 	args.out.args[1].value = &outopen;
+	args.out.passthrough_filp = NULL;
 	err = fuse_simple_request(fc, &args);
 	if (err)
 		goto out_free_ff;
@@ -439,6 +485,8 @@
 	ff->fh = outopen.fh;
 	ff->nodeid = outentry.nodeid;
 	ff->open_flags = outopen.open_flags;
+	if (args.out.passthrough_filp != NULL)
+		ff->passthrough_filp = args.out.passthrough_filp;
 	inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation,
 			  &outentry.attr, entry_attr_timeout(&outentry), 0);
 	if (!inode) {
diff -ruw linux-4.4.115/fs/fuse/file.c linux-4.4.115-fbx/fs/fuse/file.c
--- linux-4.4.115/fs/fuse/file.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/fuse/file.c	2019-10-29 09:26:25.293219325 +0100
@@ -7,6 +7,7 @@
 */
 
 #include "fuse_i.h"
+#include "fuse_passthrough.h"
 
 #include <linux/pagemap.h>
 #include <linux/slab.h>
@@ -21,8 +22,10 @@
 static const struct file_operations fuse_direct_io_file_operations;
 
 static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
-			  int opcode, struct fuse_open_out *outargp)
+			  int opcode, struct fuse_open_out *outargp,
+			  struct file **passthrough_filpp)
 {
+	int ret_val;
 	struct fuse_open_in inarg;
 	FUSE_ARGS(args);
 
@@ -38,8 +41,14 @@
 	args.out.numargs = 1;
 	args.out.args[0].size = sizeof(*outargp);
 	args.out.args[0].value = outargp;
+	args.out.passthrough_filp = NULL;
 
-	return fuse_simple_request(fc, &args);
+	ret_val = fuse_simple_request(fc, &args);
+
+	if (args.out.passthrough_filp != NULL)
+		*passthrough_filpp = args.out.passthrough_filp;
+
+	return ret_val;
 }
 
 struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
@@ -50,6 +59,10 @@
 	if (unlikely(!ff))
 		return NULL;
 
+	ff->passthrough_filp = NULL;
+	ff->passthrough_enabled = 0;
+	if (fc->passthrough)
+		ff->passthrough_enabled = 1;
 	ff->fc = fc;
 	ff->reserved_req = fuse_request_alloc(0);
 	if (unlikely(!ff->reserved_req)) {
@@ -118,6 +131,7 @@
 		 bool isdir)
 {
 	struct fuse_file *ff;
+	struct file *passthrough_filp = NULL;
 	int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
 
 	ff = fuse_file_alloc(fc);
@@ -130,10 +144,12 @@
 		struct fuse_open_out outarg;
 		int err;
 
-		err = fuse_send_open(fc, nodeid, file, opcode, &outarg);
+		err = fuse_send_open(fc, nodeid, file, opcode, &outarg,
+				     &(passthrough_filp));
 		if (!err) {
 			ff->fh = outarg.fh;
 			ff->open_flags = outarg.open_flags;
+			ff->passthrough_filp = passthrough_filp;
 
 		} else if (err != -ENOSYS || isdir) {
 			fuse_file_free(ff);
@@ -253,6 +269,8 @@
 	if (unlikely(!ff))
 		return;
 
+	fuse_passthrough_release(ff);
+
 	req = ff->reserved_req;
 	fuse_prepare_release(ff, file->f_flags, opcode);
 
@@ -883,6 +901,43 @@
 		return -EIO;
 	}
 
+#ifdef CONFIG_CMA
+	if (is_cma_pageblock(page)) {
+		struct page *oldpage = page, *newpage;
+		int err;
+
+		/* make sure that old page is not free in-between the calls */
+		page_cache_get(oldpage);
+
+		newpage = alloc_page(GFP_HIGHUSER);
+		if (!newpage) {
+			page_cache_release(oldpage);
+			return -ENOMEM;
+		}
+
+		err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
+		if (err) {
+			__free_page(newpage);
+			page_cache_release(oldpage);
+			return err;
+		}
+
+		/*
+		 * Decrement the count on new page to make page cache the only
+		 * owner of it
+		 */
+		lock_page(newpage);
+		put_page(newpage);
+
+		lru_cache_add_file(newpage);
+
+		/* finally release the old page and swap pointers */
+		unlock_page(oldpage);
+		page_cache_release(oldpage);
+		page = newpage;
+	}
+#endif
+
 	page_cache_get(page);
 	req->pages[req->num_pages] = page;
 	req->page_descs[req->num_pages].length = PAGE_SIZE;
@@ -928,8 +983,10 @@
 
 static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
+	ssize_t ret_val;
 	struct inode *inode = iocb->ki_filp->f_mapping->host;
 	struct fuse_conn *fc = get_fuse_conn(inode);
+	struct fuse_file *ff = iocb->ki_filp->private_data;
 
 	/*
 	 * In auto invalidate mode, always update attributes on read.
@@ -944,7 +1001,12 @@
 			return err;
 	}
 
-	return generic_file_read_iter(iocb, to);
+	if (ff && ff->passthrough_enabled && ff->passthrough_filp)
+		ret_val = fuse_passthrough_read_iter(iocb, to);
+	else
+		ret_val = generic_file_read_iter(iocb, to);
+
+	return ret_val;
 }
 
 static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff,
@@ -1176,6 +1238,7 @@
 static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
 	struct file *file = iocb->ki_filp;
+	struct fuse_file *ff = file->private_data;
 	struct address_space *mapping = file->f_mapping;
 	ssize_t written = 0;
 	ssize_t written_buffered = 0;
@@ -1209,8 +1272,14 @@
 	if (err)
 		goto out;
 
+	if (ff && ff->passthrough_enabled && ff->passthrough_filp) {
+		written = fuse_passthrough_write_iter(iocb, from);
+		goto out;
+	}
+
 	if (iocb->ki_flags & IOCB_DIRECT) {
 		loff_t pos = iocb->ki_pos;
+
 		written = generic_file_direct_write(iocb, from, pos);
 		if (written < 0 || !iov_iter_count(from))
 			goto out;
@@ -2081,6 +2150,9 @@
 
 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
 {
+	struct fuse_file *ff = file->private_data;
+
+	ff->passthrough_enabled = 0;
 	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
 		fuse_link_write_file(file);
 
@@ -2091,6 +2163,9 @@
 
 static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma)
 {
+	struct fuse_file *ff = file->private_data;
+
+	ff->passthrough_enabled = 0;
 	/* Can't provide the coherency needed for MAP_SHARED */
 	if (vma->vm_flags & VM_MAYSHARE)
 		return -ENODEV;
diff -ruw linux-4.4.115/fs/fuse/fuse_i.h linux-4.4.115-fbx/fs/fuse/fuse_i.h
--- linux-4.4.115/fs/fuse/fuse_i.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/fuse/fuse_i.h	2019-10-29 09:26:25.293219325 +0100
@@ -158,6 +158,10 @@
 
 	/** Has flock been performed on this file? */
 	bool flock:1;
+
+	/* the read write file */
+	struct file *passthrough_filp;
+	bool passthrough_enabled;
 };
 
 /** One input argument of a request */
@@ -237,6 +241,7 @@
 		unsigned argvar:1;
 		unsigned numargs;
 		struct fuse_arg args[2];
+		struct file *passthrough_filp;
 	} out;
 };
 
@@ -372,6 +377,9 @@
 	/** Inode used in the request or NULL */
 	struct inode *inode;
 
+	/** Path used for completing d_canonical_path */
+	struct path *canonical_path;
+
 	/** AIO control block */
 	struct fuse_io_priv *io;
 
@@ -383,6 +391,9 @@
 
 	/** Request is stolen from fuse_file->reserved_req */
 	struct file *stolen_file;
+
+	/** fuse passthrough file  */
+	struct file *passthrough_filp;
 };
 
 struct fuse_iqueue {
@@ -540,6 +551,9 @@
 	/** write-back cache policy (default is write-through) */
 	unsigned writeback_cache:1;
 
+	/** passthrough IO. */
+	unsigned passthrough:1;
+
 	/*
 	 * The following bitfields are only for optimization purposes
 	 * and hence races in setting them will not cause malfunction
diff -ruw linux-4.4.115/fs/fuse/inode.c linux-4.4.115-fbx/fs/fuse/inode.c
--- linux-4.4.115/fs/fuse/inode.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/fuse/inode.c	2019-10-29 09:26:25.293219325 +0100
@@ -860,6 +860,7 @@
 		fc->conn_error = 1;
 	else {
 		unsigned long ra_pages;
+		struct super_block *sb = fc->sb;
 
 		process_init_limits(fc, arg);
 
@@ -898,6 +899,13 @@
 				fc->async_dio = 1;
 			if (arg->flags & FUSE_WRITEBACK_CACHE)
 				fc->writeback_cache = 1;
+			if (arg->flags & FUSE_PASSTHROUGH) {
+				fc->passthrough = 1;
+				/* Prevent further stacking */
+				sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
+				pr_info("FUSE: Pass through is enabled [%s : %d]!\n",
+					current->comm, current->pid);
+			}
 			if (arg->time_gran && arg->time_gran <= 1000000000)
 				fc->sb->s_time_gran = arg->time_gran;
 		} else {
diff -ruw linux-4.4.115/fs/fuse/Makefile linux-4.4.115-fbx/fs/fuse/Makefile
--- linux-4.4.115/fs/fuse/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/fuse/Makefile	2019-01-22 16:16:27.855285816 +0100
@@ -5,4 +5,4 @@
 obj-$(CONFIG_FUSE_FS) += fuse.o
 obj-$(CONFIG_CUSE) += cuse.o
 
-fuse-objs := dev.o dir.o file.o inode.o control.o
+fuse-objs := dev.o dir.o file.o inode.o control.o passthrough.o
diff -ruw linux-4.4.115/fs/inode.c linux-4.4.115-fbx/fs/inode.c
--- linux-4.4.115/fs/inode.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/inode.c	2019-10-29 09:26:25.305219443 +0100
@@ -154,6 +154,12 @@
 	inode->i_rdev = 0;
 	inode->dirtied_when = 0;
 
+#ifdef CONFIG_CGROUP_WRITEBACK
+	inode->i_wb_frn_winner = 0;
+	inode->i_wb_frn_avg_time = 0;
+	inode->i_wb_frn_history = 0;
+#endif
+
 	if (security_inode_alloc(inode))
 		goto out;
 	spin_lock_init(&inode->i_lock);
@@ -1715,7 +1721,7 @@
 }
 EXPORT_SYMBOL(dentry_needs_remove_privs);
 
-static int __remove_privs(struct dentry *dentry, int kill)
+static int __remove_privs(struct vfsmount *mnt, struct dentry *dentry, int kill)
 {
 	struct iattr newattrs;
 
@@ -1724,7 +1730,7 @@
 	 * Note we call this on write, so notify_change will not
 	 * encounter any conflicting delegations:
 	 */
-	return notify_change(dentry, &newattrs, NULL);
+	return notify_change2(mnt, dentry, &newattrs, NULL);
 }
 
 /*
@@ -1746,7 +1752,7 @@
 	if (kill < 0)
 		return kill;
 	if (kill)
-		error = __remove_privs(dentry, kill);
+		error = __remove_privs(file->f_path.mnt, dentry, kill);
 	if (!error)
 		inode_has_no_xattr(inode);
 
diff -ruw linux-4.4.115/fs/internal.h linux-4.4.115-fbx/fs/internal.h
--- linux-4.4.115/fs/internal.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/internal.h	2019-10-29 09:26:25.305219443 +0100
@@ -84,9 +84,11 @@
  * super.c
  */
 extern int do_remount_sb(struct super_block *, int, void *, int);
+extern int do_remount_sb2(struct vfsmount *, struct super_block *, int,
+								void *, int);
 extern bool trylock_super(struct super_block *sb);
 extern struct dentry *mount_fs(struct file_system_type *,
-			       int, const char *, void *);
+			       int, const char *, struct vfsmount *, void *);
 extern struct super_block *user_get_super(dev_t);
 
 /*
@@ -151,3 +153,29 @@
  * fs/nsfs.c
  */
 extern struct dentry_operations ns_dentry_operations;
+
+#ifdef CONFIG_FILE_TABLE_DEBUG
+void global_filetable_print_warning_once(void);
+void global_filetable_add(struct file *filp);
+void global_filetable_del(struct file *filp);
+void global_filetable_delayed_print(struct mount *mnt);
+
+#else /* i.e NOT CONFIG_FILE_TABLE_DEBUG */
+
+static inline void global_filetable_print_warning_once(void)
+{
+}
+
+static inline void global_filetable_add(struct file *filp)
+{
+}
+
+static inline void global_filetable_del(struct file *filp)
+{
+}
+
+static inline void global_filetable_delayed_print(struct mount *mnt)
+{
+}
+
+#endif /* CONFIG_FILE_TABLE_DEBUG */
diff -ruw linux-4.4.115/fs/jbd2/journal.c linux-4.4.115-fbx/fs/jbd2/journal.c
--- linux-4.4.115/fs/jbd2/journal.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/jbd2/journal.c	2019-10-29 09:26:25.309219482 +0100
@@ -275,11 +275,11 @@
 	goto loop;
 
 end_loop:
-	write_unlock(&journal->j_state_lock);
 	del_timer_sync(&journal->j_commit_timer);
 	journal->j_task = NULL;
 	wake_up(&journal->j_wait_done_commit);
 	jbd_debug(1, "Journal thread exiting.\n");
+	write_unlock(&journal->j_state_lock);
 	return 0;
 }
 
diff -ruw linux-4.4.115/fs/Kconfig linux-4.4.115-fbx/fs/Kconfig
--- linux-4.4.115/fs/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/Kconfig	2019-01-25 20:32:47.247752405 +0100
@@ -73,6 +73,8 @@
           for filesystems like NFS and for the flock() system
           call. Disabling this option saves about 11k.
 
+source "fs/crypto/Kconfig"
+
 source "fs/notify/Kconfig"
 
 source "fs/quota/Kconfig"
@@ -102,6 +104,7 @@
 
 source "fs/fat/Kconfig"
 source "fs/ntfs/Kconfig"
+source "fs/exfat/Kconfig"
 
 endmenu
 endif # BLOCK
@@ -199,6 +202,7 @@
 source "fs/adfs/Kconfig"
 source "fs/affs/Kconfig"
 source "fs/ecryptfs/Kconfig"
+source "fs/sdcardfs/Kconfig"
 source "fs/hfs/Kconfig"
 source "fs/hfsplus/Kconfig"
 source "fs/befs/Kconfig"
@@ -281,4 +285,9 @@
 source "fs/nls/Kconfig"
 source "fs/dlm/Kconfig"
 
+config FILE_TABLE_DEBUG
+	bool "Enable FILE_TABLE_DEBUG"
+	help
+	  This option enables debug of the open files using a global filetable
+
 endmenu
diff -ruw linux-4.4.115/fs/Makefile linux-4.4.115-fbx/fs/Makefile
--- linux-4.4.115/fs/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/Makefile	2019-01-25 20:32:47.247752405 +0100
@@ -30,6 +30,7 @@
 obj-$(CONFIG_USERFAULTFD)	+= userfaultfd.o
 obj-$(CONFIG_AIO)               += aio.o
 obj-$(CONFIG_FS_DAX)		+= dax.o
+obj-$(CONFIG_FS_ENCRYPTION)	+= crypto/
 obj-$(CONFIG_FILE_LOCKING)      += locks.o
 obj-$(CONFIG_COMPAT)		+= compat.o compat_ioctl.o
 obj-$(CONFIG_BINFMT_AOUT)	+= binfmt_aout.o
@@ -41,7 +42,7 @@
 obj-$(CONFIG_BINFMT_ELF_FDPIC)	+= binfmt_elf_fdpic.o
 obj-$(CONFIG_BINFMT_FLAT)	+= binfmt_flat.o
 
-obj-$(CONFIG_FS_MBCACHE)	+= mbcache.o
+obj-$(CONFIG_FS_MBCACHE)	+= mbcache.o mbcache2.o
 obj-$(CONFIG_FS_POSIX_ACL)	+= posix_acl.o
 obj-$(CONFIG_NFS_COMMON)	+= nfs_common/
 obj-$(CONFIG_COREDUMP)		+= coredump.o
@@ -81,6 +82,7 @@
 obj-$(CONFIG_HFSPLUS_FS)	+= hfsplus/ # Before hfs to find wrapped HFS+
 obj-$(CONFIG_HFS_FS)		+= hfs/
 obj-$(CONFIG_ECRYPT_FS)		+= ecryptfs/
+obj-$(CONFIG_SDCARD_FS)		+= sdcardfs/
 obj-$(CONFIG_VXFS_FS)		+= freevxfs/
 obj-$(CONFIG_NFS_FS)		+= nfs/
 obj-$(CONFIG_EXPORTFS)		+= exportfs/
@@ -126,3 +128,4 @@
 obj-$(CONFIG_CEPH_FS)		+= ceph/
 obj-$(CONFIG_PSTORE)		+= pstore/
 obj-$(CONFIG_EFIVAR_FS)		+= efivarfs/
+obj-$(CONFIG_EXFAT_FS)		+= exfat/
diff -ruw linux-4.4.115/fs/mbcache.c linux-4.4.115-fbx/fs/mbcache.c
--- linux-4.4.115/fs/mbcache.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/mbcache.c	2019-01-22 16:16:27.911286324 +0100
@@ -222,8 +222,19 @@
 		 * then reacquire the lock in the proper order.
 		 */
 		spin_lock(&mb_cache_spinlock);
+		/*
+		 * Evaluate the conditions under global lock mb_cache_spinlock,
+		 * to check if mb_cache_entry_get() is running now
+		 * and has already deleted the entry from mb_cache_lru_list
+		 * and incremented ce->e_refcnt to prevent further additions
+		 * to mb_cache_lru_list.
+		 */
+		if (!(ce->e_used || ce->e_queued ||
+				atomic_read(&ce->e_refcnt))) {
 		if (list_empty(&ce->e_lru_list))
-			list_add_tail(&ce->e_lru_list, &mb_cache_lru_list);
+				list_add_tail(&ce->e_lru_list,
+						&mb_cache_lru_list);
+		}
 		spin_unlock(&mb_cache_spinlock);
 	}
 	__spin_unlock_mb_cache_entry(ce);
@@ -262,7 +273,6 @@
 		list_del_init(&ce->e_lru_list);
 		if (ce->e_used || ce->e_queued || atomic_read(&ce->e_refcnt))
 			continue;
-		spin_unlock(&mb_cache_spinlock);
 		/* Prevent any find or get operation on the entry */
 		hlist_bl_lock(ce->e_block_hash_p);
 		hlist_bl_lock(ce->e_index_hash_p);
@@ -271,10 +281,10 @@
 			!list_empty(&ce->e_lru_list)) {
 			hlist_bl_unlock(ce->e_index_hash_p);
 			hlist_bl_unlock(ce->e_block_hash_p);
-			spin_lock(&mb_cache_spinlock);
 			continue;
 		}
 		__mb_cache_entry_unhash_unlock(ce);
+		spin_unlock(&mb_cache_spinlock);
 		list_add_tail(&ce->e_lru_list, &free_list);
 		spin_lock(&mb_cache_spinlock);
 	}
@@ -516,7 +526,6 @@
 				if (ce->e_used || ce->e_queued ||
 					atomic_read(&ce->e_refcnt))
 					continue;
-				spin_unlock(&mb_cache_spinlock);
 				/*
 				 * Prevent any find or get operation on the
 				 * entry.
@@ -530,13 +539,13 @@
 					hlist_bl_unlock(ce->e_index_hash_p);
 					hlist_bl_unlock(ce->e_block_hash_p);
 					l = &mb_cache_lru_list;
-					spin_lock(&mb_cache_spinlock);
 					continue;
 				}
 				mb_assert(list_empty(&ce->e_lru_list));
 				mb_assert(!(ce->e_used || ce->e_queued ||
 					atomic_read(&ce->e_refcnt)));
 				__mb_cache_entry_unhash_unlock(ce);
+				spin_unlock(&mb_cache_spinlock);
 				goto found;
 			}
 		}
@@ -670,6 +679,7 @@
 			   cache->c_bucket_bits);
 	block_hash_p = &cache->c_block_hash[bucket];
 	/* First serialize access to the block corresponding hash chain. */
+	spin_lock(&mb_cache_spinlock);
 	hlist_bl_lock(block_hash_p);
 	hlist_bl_for_each_entry(ce, l, block_hash_p, e_block_list) {
 		mb_assert(ce->e_block_hash_p == block_hash_p);
@@ -678,9 +688,11 @@
 			 * Prevent a free from removing the entry.
 			 */
 			atomic_inc(&ce->e_refcnt);
+			if (!list_empty(&ce->e_lru_list))
+				list_del_init(&ce->e_lru_list);
 			hlist_bl_unlock(block_hash_p);
+			spin_unlock(&mb_cache_spinlock);
 			__spin_lock_mb_cache_entry(ce);
-			atomic_dec(&ce->e_refcnt);
 			if (ce->e_used > 0) {
 				DEFINE_WAIT(wait);
 				while (ce->e_used > 0) {
@@ -695,13 +707,9 @@
 				finish_wait(&mb_cache_queue, &wait);
 			}
 			ce->e_used += 1 + MB_CACHE_WRITER;
+			atomic_dec(&ce->e_refcnt);
 			__spin_unlock_mb_cache_entry(ce);
 
-			if (!list_empty(&ce->e_lru_list)) {
-				spin_lock(&mb_cache_spinlock);
-				list_del_init(&ce->e_lru_list);
-				spin_unlock(&mb_cache_spinlock);
-			}
 			if (!__mb_cache_entry_is_block_hashed(ce)) {
 				__mb_cache_entry_release(ce);
 				return NULL;
@@ -710,6 +718,7 @@
 		}
 	}
 	hlist_bl_unlock(block_hash_p);
+	spin_unlock(&mb_cache_spinlock);
 	return NULL;
 }
 
diff -ruw linux-4.4.115/fs/mpage.c linux-4.4.115-fbx/fs/mpage.c
--- linux-4.4.115/fs/mpage.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/mpage.c	2019-01-22 16:16:27.911286324 +0100
@@ -30,6 +30,14 @@
 #include <linux/cleancache.h>
 #include "internal.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/android_fs.h>
+
+EXPORT_TRACEPOINT_SYMBOL(android_fs_datawrite_start);
+EXPORT_TRACEPOINT_SYMBOL(android_fs_datawrite_end);
+EXPORT_TRACEPOINT_SYMBOL(android_fs_dataread_start);
+EXPORT_TRACEPOINT_SYMBOL(android_fs_dataread_end);
+
 /*
  * I/O completion handler for multipage BIOs.
  *
@@ -47,6 +55,16 @@
 	struct bio_vec *bv;
 	int i;
 
+	if (trace_android_fs_dataread_end_enabled() &&
+	    (bio_data_dir(bio) == READ)) {
+		struct page *first_page = bio->bi_io_vec[0].bv_page;
+
+		if (first_page != NULL)
+			trace_android_fs_dataread_end(first_page->mapping->host,
+						      page_offset(first_page),
+						      bio->bi_iter.bi_size);
+	}
+
 	bio_for_each_segment_all(bv, bio, i) {
 		struct page *page = bv->bv_page;
 		page_endio(page, bio_data_dir(bio), bio->bi_error);
@@ -57,6 +75,24 @@
 
 static struct bio *mpage_bio_submit(int rw, struct bio *bio)
 {
+	if (trace_android_fs_dataread_start_enabled() && (rw == READ)) {
+		struct page *first_page = bio->bi_io_vec[0].bv_page;
+
+		if (first_page != NULL) {
+			char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+			path = android_fstrace_get_pathname(pathbuf,
+						    MAX_TRACE_PATHBUF_LEN,
+						    first_page->mapping->host);
+			trace_android_fs_dataread_start(
+				first_page->mapping->host,
+				page_offset(first_page),
+				bio->bi_iter.bi_size,
+				current->pid,
+				path,
+				current->comm);
+		}
+	}
 	bio->bi_end_io = mpage_end_io;
 	guard_bio_eod(rw, bio);
 	submit_bio(rw, bio);
diff -ruw linux-4.4.115/fs/namei.c linux-4.4.115-fbx/fs/namei.c
--- linux-4.4.115/fs/namei.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/namei.c	2019-10-29 09:26:25.317219560 +0100
@@ -373,9 +373,11 @@
  * flag in inode->i_opflags, that says "this has not special
  * permission function, use the fast case".
  */
-static inline int do_inode_permission(struct inode *inode, int mask)
+static inline int do_inode_permission(struct vfsmount *mnt, struct inode *inode, int mask)
 {
 	if (unlikely(!(inode->i_opflags & IOP_FASTPERM))) {
+		if (likely(mnt && inode->i_op->permission2))
+			return inode->i_op->permission2(mnt, inode, mask);
 		if (likely(inode->i_op->permission))
 			return inode->i_op->permission(inode, mask);
 
@@ -399,7 +401,7 @@
  * This does not check for a read-only file system.  You probably want
  * inode_permission().
  */
-int __inode_permission(struct inode *inode, int mask)
+int __inode_permission2(struct vfsmount *mnt, struct inode *inode, int mask)
 {
 	int retval;
 
@@ -411,7 +413,7 @@
 			return -EACCES;
 	}
 
-	retval = do_inode_permission(inode, mask);
+	retval = do_inode_permission(mnt, inode, mask);
 	if (retval)
 		return retval;
 
@@ -419,7 +421,14 @@
 	if (retval)
 		return retval;
 
-	return security_inode_permission(inode, mask);
+	retval = security_inode_permission(inode, mask);
+	return retval;
+}
+EXPORT_SYMBOL(__inode_permission2);
+
+int __inode_permission(struct inode *inode, int mask)
+{
+	return __inode_permission2(NULL, inode, mask);
 }
 EXPORT_SYMBOL(__inode_permission);
 
@@ -455,14 +464,20 @@
  *
  * When checking for MAY_APPEND, MAY_WRITE must also be set in @mask.
  */
-int inode_permission(struct inode *inode, int mask)
+int inode_permission2(struct vfsmount *mnt, struct inode *inode, int mask)
 {
 	int retval;
 
 	retval = sb_permission(inode->i_sb, inode, mask);
 	if (retval)
 		return retval;
-	return __inode_permission(inode, mask);
+	return __inode_permission2(mnt, inode, mask);
+}
+EXPORT_SYMBOL(inode_permission2);
+
+int inode_permission(struct inode *inode, int mask)
+{
+	return inode_permission2(NULL, inode, mask);
 }
 EXPORT_SYMBOL(inode_permission);
 
@@ -1645,13 +1660,13 @@
 static inline int may_lookup(struct nameidata *nd)
 {
 	if (nd->flags & LOOKUP_RCU) {
-		int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK);
+		int err = inode_permission2(nd->path.mnt, nd->inode, MAY_EXEC|MAY_NOT_BLOCK);
 		if (err != -ECHILD)
 			return err;
 		if (unlazy_walk(nd, NULL, 0))
 			return -ECHILD;
 	}
-	return inode_permission(nd->inode, MAY_EXEC);
+	return inode_permission2(nd->path.mnt, nd->inode, MAY_EXEC);
 }
 
 static inline int handle_dots(struct nameidata *nd, int type)
@@ -2005,11 +2020,12 @@
 	nd->depth = 0;
 	if (flags & LOOKUP_ROOT) {
 		struct dentry *root = nd->root.dentry;
+		struct vfsmount *mnt = nd->root.mnt;
 		struct inode *inode = root->d_inode;
 		if (*s) {
 			if (!d_can_lookup(root))
 				return ERR_PTR(-ENOTDIR);
-			retval = inode_permission(inode, MAY_EXEC);
+			retval = inode_permission2(mnt, inode, MAY_EXEC);
 			if (retval)
 				return ERR_PTR(retval);
 		}
@@ -2280,13 +2296,14 @@
 /**
  * lookup_one_len - filesystem helper to lookup single pathname component
  * @name:	pathname component to lookup
+ * @mnt:	mount we are looking up on
  * @base:	base directory to lookup from
  * @len:	maximum length @len should be interpreted to
  *
  * Note that this routine is purely a helper for filesystem usage and should
  * not be called by generic code.
  */
-struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
+struct dentry *lookup_one_len2(const char *name, struct vfsmount *mnt, struct dentry *base, int len)
 {
 	struct qstr this;
 	unsigned int c;
@@ -2320,12 +2337,18 @@
 			return ERR_PTR(err);
 	}
 
-	err = inode_permission(base->d_inode, MAY_EXEC);
+	err = inode_permission2(mnt, base->d_inode, MAY_EXEC);
 	if (err)
 		return ERR_PTR(err);
 
 	return __lookup_hash(&this, base, 0);
 }
+EXPORT_SYMBOL(lookup_one_len2);
+
+struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
+{
+	return lookup_one_len2(name, NULL, base, len);
+}
 EXPORT_SYMBOL(lookup_one_len);
 
 int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
@@ -2552,7 +2575,7 @@
  * 10. We don't allow removal of NFS sillyrenamed files; it's handled by
  *     nfs_async_unlink().
  */
-static int may_delete(struct inode *dir, struct dentry *victim, bool isdir)
+static int may_delete(struct vfsmount *mnt, struct inode *dir, struct dentry *victim, bool isdir)
 {
 	struct inode *inode = d_backing_inode(victim);
 	int error;
@@ -2564,7 +2587,7 @@
 	BUG_ON(victim->d_parent->d_inode != dir);
 	audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
 
-	error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
+	error = inode_permission2(mnt, dir, MAY_WRITE | MAY_EXEC);
 	if (error)
 		return error;
 	if (IS_APPEND(dir))
@@ -2595,14 +2618,14 @@
  *  3. We should have write and exec permissions on dir
  *  4. We can't do it if dir is immutable (done in permission())
  */
-static inline int may_create(struct inode *dir, struct dentry *child)
+static inline int may_create(struct vfsmount *mnt, struct inode *dir, struct dentry *child)
 {
 	audit_inode_child(dir, child, AUDIT_TYPE_CHILD_CREATE);
 	if (child->d_inode)
 		return -EEXIST;
 	if (IS_DEADDIR(dir))
 		return -ENOENT;
-	return inode_permission(dir, MAY_WRITE | MAY_EXEC);
+	return inode_permission2(mnt, dir, MAY_WRITE | MAY_EXEC);
 }
 
 /*
@@ -2649,10 +2672,10 @@
 }
 EXPORT_SYMBOL(unlock_rename);
 
-int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
-		bool want_excl)
+int vfs_create2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry,
+		umode_t mode, bool want_excl)
 {
-	int error = may_create(dir, dentry);
+	int error = may_create(mnt, dir, dentry);
 	if (error)
 		return error;
 
@@ -2664,15 +2687,29 @@
 	if (error)
 		return error;
 	error = dir->i_op->create(dir, dentry, mode, want_excl);
+	if (error)
+		return error;
+	error = security_inode_post_create(dir, dentry, mode);
+	if (error)
+		return error;
 	if (!error)
 		fsnotify_create(dir, dentry);
+
 	return error;
 }
+EXPORT_SYMBOL(vfs_create2);
+
+int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+		bool want_excl)
+{
+	return vfs_create2(NULL, dir, dentry, mode, want_excl);
+}
 EXPORT_SYMBOL(vfs_create);
 
 static int may_open(struct path *path, int acc_mode, int flag)
 {
 	struct dentry *dentry = path->dentry;
+	struct vfsmount *mnt = path->mnt;
 	struct inode *inode = dentry->d_inode;
 	int error;
 
@@ -2701,7 +2738,7 @@
 		break;
 	}
 
-	error = inode_permission(inode, acc_mode);
+	error = inode_permission2(mnt, inode, acc_mode);
 	if (error)
 		return error;
 
@@ -2736,7 +2773,7 @@
 	if (!error)
 		error = security_path_truncate(path);
 	if (!error) {
-		error = do_truncate(path->dentry, 0,
+		error = do_truncate2(path->mnt, path->dentry, 0,
 				    ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
 				    filp);
 	}
@@ -2757,7 +2794,7 @@
 	if (error)
 		return error;
 
-	error = inode_permission(dir->dentry->d_inode, MAY_WRITE | MAY_EXEC);
+	error = inode_permission2(dir->mnt, dir->dentry->d_inode, MAY_WRITE | MAY_EXEC);
 	if (error)
 		return error;
 
@@ -2943,6 +2980,7 @@
 			bool got_write, int *opened)
 {
 	struct dentry *dir = nd->path.dentry;
+	struct vfsmount *mnt = nd->path.mnt;
 	struct inode *dir_inode = dir->d_inode;
 	struct dentry *dentry;
 	int error;
@@ -2990,7 +3028,7 @@
 		error = security_path_mknod(&nd->path, dentry, mode, 0);
 		if (error)
 			goto out_dput;
-		error = vfs_create(dir->d_inode, dentry, mode,
+		error = vfs_create2(mnt, dir->d_inode, dentry, mode,
 				   nd->flags & LOOKUP_EXCL);
 		if (error)
 			goto out_dput;
@@ -3252,7 +3290,7 @@
 		goto out;
 	dir = path.dentry->d_inode;
 	/* we want directory to be writable */
-	error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
+	error = inode_permission2(path.mnt, dir, MAY_WRITE | MAY_EXEC);
 	if (error)
 		goto out2;
 	if (!dir->i_op->tmpfile) {
@@ -3341,6 +3379,8 @@
 				error = -ESTALE;
 		}
 		file = ERR_PTR(error);
+	} else {
+		global_filetable_add(file);
 	}
 	return file;
 }
@@ -3486,9 +3526,9 @@
 }
 EXPORT_SYMBOL(user_path_create);
 
-int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
+int vfs_mknod2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
 {
-	int error = may_create(dir, dentry);
+	int error = may_create(mnt, dir, dentry);
 
 	if (error)
 		return error;
@@ -3508,10 +3548,24 @@
 		return error;
 
 	error = dir->i_op->mknod(dir, dentry, mode, dev);
+	if (error)
+		return error;
+
+	error = security_inode_post_create(dir, dentry, mode);
+	if (error)
+		return error;
+
 	if (!error)
 		fsnotify_create(dir, dentry);
+
 	return error;
 }
+EXPORT_SYMBOL(vfs_mknod2);
+
+int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
+{
+	return vfs_mknod2(NULL, dir, dentry, mode, dev);
+}
 EXPORT_SYMBOL(vfs_mknod);
 
 static int may_mknod(umode_t mode)
@@ -3554,10 +3608,10 @@
 		goto out;
 	switch (mode & S_IFMT) {
 		case 0: case S_IFREG:
-			error = vfs_create(path.dentry->d_inode,dentry,mode,true);
+			error = vfs_create2(path.mnt, path.dentry->d_inode,dentry,mode,true);
 			break;
 		case S_IFCHR: case S_IFBLK:
-			error = vfs_mknod(path.dentry->d_inode,dentry,mode,
+			error = vfs_mknod2(path.mnt, path.dentry->d_inode,dentry,mode,
 					new_decode_dev(dev));
 			break;
 		case S_IFIFO: case S_IFSOCK:
@@ -3578,9 +3632,9 @@
 	return sys_mknodat(AT_FDCWD, filename, mode, dev);
 }
 
-int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+int vfs_mkdir2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry, umode_t mode)
 {
-	int error = may_create(dir, dentry);
+	int error = may_create(mnt, dir, dentry);
 	unsigned max_links = dir->i_sb->s_max_links;
 
 	if (error)
@@ -3602,6 +3656,12 @@
 		fsnotify_mkdir(dir, dentry);
 	return error;
 }
+EXPORT_SYMBOL(vfs_mkdir2);
+
+int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+	return vfs_mkdir2(NULL, dir, dentry, mode);
+}
 EXPORT_SYMBOL(vfs_mkdir);
 
 SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
@@ -3620,7 +3680,7 @@
 		mode &= ~current_umask();
 	error = security_path_mkdir(&path, dentry, mode);
 	if (!error)
-		error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
+		error = vfs_mkdir2(path.mnt, path.dentry->d_inode, dentry, mode);
 	done_path_create(&path, dentry);
 	if (retry_estale(error, lookup_flags)) {
 		lookup_flags |= LOOKUP_REVAL;
@@ -3659,9 +3719,9 @@
 }
 EXPORT_SYMBOL(dentry_unhash);
 
-int vfs_rmdir(struct inode *dir, struct dentry *dentry)
+int vfs_rmdir2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry)
 {
-	int error = may_delete(dir, dentry, 1);
+	int error = may_delete(mnt, dir, dentry, 1);
 
 	if (error)
 		return error;
@@ -3696,6 +3756,12 @@
 		d_delete(dentry);
 	return error;
 }
+EXPORT_SYMBOL(vfs_rmdir2);
+
+int vfs_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	return vfs_rmdir2(NULL, dir, dentry);
+}
 EXPORT_SYMBOL(vfs_rmdir);
 
 static long do_rmdir(int dfd, const char __user *pathname)
@@ -3741,7 +3807,7 @@
 	error = security_path_rmdir(&path, dentry);
 	if (error)
 		goto exit3;
-	error = vfs_rmdir(path.dentry->d_inode, dentry);
+	error = vfs_rmdir2(path.mnt, path.dentry->d_inode, dentry);
 exit3:
 	dput(dentry);
 exit2:
@@ -3780,10 +3846,10 @@
  * be appropriate for callers that expect the underlying filesystem not
  * to be NFS exported.
  */
-int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegated_inode)
+int vfs_unlink2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry, struct inode **delegated_inode)
 {
 	struct inode *target = dentry->d_inode;
-	int error = may_delete(dir, dentry, 0);
+	int error = may_delete(mnt, dir, dentry, 0);
 
 	if (error)
 		return error;
@@ -3818,6 +3884,12 @@
 
 	return error;
 }
+EXPORT_SYMBOL(vfs_unlink2);
+
+int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegated_inode)
+{
+	return vfs_unlink2(NULL, dir, dentry, delegated_inode);
+}
 EXPORT_SYMBOL(vfs_unlink);
 
 /*
@@ -3865,7 +3937,7 @@
 		error = security_path_unlink(&path, dentry);
 		if (error)
 			goto exit2;
-		error = vfs_unlink(path.dentry->d_inode, dentry, &delegated_inode);
+		error = vfs_unlink2(path.mnt, path.dentry->d_inode, dentry, &delegated_inode);
 exit2:
 		dput(dentry);
 	}
@@ -3915,9 +3987,9 @@
 	return do_unlinkat(AT_FDCWD, pathname);
 }
 
-int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
+int vfs_symlink2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry, const char *oldname)
 {
-	int error = may_create(dir, dentry);
+	int error = may_create(mnt, dir, dentry);
 
 	if (error)
 		return error;
@@ -3934,6 +4006,12 @@
 		fsnotify_create(dir, dentry);
 	return error;
 }
+EXPORT_SYMBOL(vfs_symlink2);
+
+int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
+{
+	return vfs_symlink2(NULL, dir, dentry, oldname);
+}
 EXPORT_SYMBOL(vfs_symlink);
 
 SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
@@ -3956,7 +4034,7 @@
 
 	error = security_path_symlink(&path, dentry, from->name);
 	if (!error)
-		error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
+		error = vfs_symlink2(path.mnt, path.dentry->d_inode, dentry, from->name);
 	done_path_create(&path, dentry);
 	if (retry_estale(error, lookup_flags)) {
 		lookup_flags |= LOOKUP_REVAL;
@@ -3991,7 +4069,7 @@
  * be appropriate for callers that expect the underlying filesystem not
  * to be NFS exported.
  */
-int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry, struct inode **delegated_inode)
+int vfs_link2(struct vfsmount *mnt, struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry, struct inode **delegated_inode)
 {
 	struct inode *inode = old_dentry->d_inode;
 	unsigned max_links = dir->i_sb->s_max_links;
@@ -4000,7 +4078,7 @@
 	if (!inode)
 		return -ENOENT;
 
-	error = may_create(dir, new_dentry);
+	error = may_create(mnt, dir, new_dentry);
 	if (error)
 		return error;
 
@@ -4043,6 +4121,12 @@
 		fsnotify_link(dir, inode, new_dentry);
 	return error;
 }
+EXPORT_SYMBOL(vfs_link2);
+
+int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry, struct inode **delegated_inode)
+{
+	return vfs_link2(NULL, old_dentry, dir, new_dentry, delegated_inode);
+}
 EXPORT_SYMBOL(vfs_link);
 
 /*
@@ -4098,7 +4182,7 @@
 	error = security_path_link(old_path.dentry, &new_path, new_dentry);
 	if (error)
 		goto out_dput;
-	error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
+	error = vfs_link2(old_path.mnt, old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
 out_dput:
 	done_path_create(&new_path, new_dentry);
 	if (delegated_inode) {
@@ -4173,7 +4257,8 @@
  *	   ->i_mutex on parents, which works but leads to some truly excessive
  *	   locking].
  */
-int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+int vfs_rename2(struct vfsmount *mnt,
+	       struct inode *old_dir, struct dentry *old_dentry,
 	       struct inode *new_dir, struct dentry *new_dentry,
 	       struct inode **delegated_inode, unsigned int flags)
 {
@@ -4192,19 +4277,19 @@
 	if (vfs_select_inode(old_dentry, 0) == vfs_select_inode(new_dentry, 0))
 		return 0;
 
-	error = may_delete(old_dir, old_dentry, is_dir);
+	error = may_delete(mnt, old_dir, old_dentry, is_dir);
 	if (error)
 		return error;
 
 	if (!target) {
-		error = may_create(new_dir, new_dentry);
+		error = may_create(mnt, new_dir, new_dentry);
 	} else {
 		new_is_dir = d_is_dir(new_dentry);
 
 		if (!(flags & RENAME_EXCHANGE))
-			error = may_delete(new_dir, new_dentry, is_dir);
+			error = may_delete(mnt, new_dir, new_dentry, is_dir);
 		else
-			error = may_delete(new_dir, new_dentry, new_is_dir);
+			error = may_delete(mnt, new_dir, new_dentry, new_is_dir);
 	}
 	if (error)
 		return error;
@@ -4221,12 +4306,12 @@
 	 */
 	if (new_dir != old_dir) {
 		if (is_dir) {
-			error = inode_permission(source, MAY_WRITE);
+			error = inode_permission2(mnt, source, MAY_WRITE);
 			if (error)
 				return error;
 		}
 		if ((flags & RENAME_EXCHANGE) && new_is_dir) {
-			error = inode_permission(target, MAY_WRITE);
+			error = inode_permission2(mnt, target, MAY_WRITE);
 			if (error)
 				return error;
 		}
@@ -4309,6 +4394,14 @@
 
 	return error;
 }
+EXPORT_SYMBOL(vfs_rename2);
+
+int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+	       struct inode *new_dir, struct dentry *new_dentry,
+	       struct inode **delegated_inode, unsigned int flags)
+{
+	return vfs_rename2(NULL, old_dir, old_dentry, new_dir, new_dentry, delegated_inode, flags);
+}
 EXPORT_SYMBOL(vfs_rename);
 
 SYSCALL_DEFINE5(renameat2, int, olddfd, const char __user *, oldname,
@@ -4422,7 +4515,7 @@
 				     &new_path, new_dentry, flags);
 	if (error)
 		goto exit5;
-	error = vfs_rename(old_path.dentry->d_inode, old_dentry,
+	error = vfs_rename2(old_path.mnt, old_path.dentry->d_inode, old_dentry,
 			   new_path.dentry->d_inode, new_dentry,
 			   &delegated_inode, flags);
 exit5:
@@ -4467,7 +4560,7 @@
 
 int vfs_whiteout(struct inode *dir, struct dentry *dentry)
 {
-	int error = may_create(dir, dentry);
+	int error = may_create(NULL, dir, dentry);
 	if (error)
 		return error;
 
diff -ruw linux-4.4.115/fs/namespace.c linux-4.4.115-fbx/fs/namespace.c
--- linux-4.4.115/fs/namespace.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/namespace.c	2019-10-29 09:26:25.317219560 +0100
@@ -227,6 +227,7 @@
 		mnt->mnt_count = 1;
 		mnt->mnt_writers = 0;
 #endif
+		mnt->mnt.data = NULL;
 
 		INIT_HLIST_NODE(&mnt->mnt_hash);
 		INIT_LIST_HEAD(&mnt->mnt_child);
@@ -581,6 +582,7 @@
 
 static void free_vfsmnt(struct mount *mnt)
 {
+	kfree(mnt->mnt.data);
 	kfree_const(mnt->mnt_devname);
 #ifdef CONFIG_SMP
 	free_percpu(mnt->mnt_pcp);
@@ -975,10 +977,18 @@
 	if (!mnt)
 		return ERR_PTR(-ENOMEM);
 
+	if (type->alloc_mnt_data) {
+		mnt->mnt.data = type->alloc_mnt_data();
+		if (!mnt->mnt.data) {
+			mnt_free_id(mnt);
+			free_vfsmnt(mnt);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
 	if (flags & MS_KERNMOUNT)
 		mnt->mnt.mnt_flags = MNT_INTERNAL;
 
-	root = mount_fs(type, flags, name, data);
+	root = mount_fs(type, flags, name, &mnt->mnt, data);
 	if (IS_ERR(root)) {
 		mnt_free_id(mnt);
 		free_vfsmnt(mnt);
@@ -1007,6 +1017,14 @@
 	if (!mnt)
 		return ERR_PTR(-ENOMEM);
 
+	if (sb->s_op->clone_mnt_data) {
+		mnt->mnt.data = sb->s_op->clone_mnt_data(old->mnt.data);
+		if (!mnt->mnt.data) {
+			err = -ENOMEM;
+			goto out_free;
+		}
+	}
+
 	if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE))
 		mnt->mnt_group_id = 0; /* not a peer of original */
 	else
@@ -1576,6 +1594,8 @@
 	}
 	unlock_mount_hash();
 	namespace_unlock();
+	if (retval == -EBUSY)
+		global_filetable_delayed_print(mnt);
 	return retval;
 }
 
@@ -2288,8 +2308,14 @@
 		err = change_mount_flags(path->mnt, flags);
 	else if (!capable(CAP_SYS_ADMIN))
 		err = -EPERM;
-	else
-		err = do_remount_sb(sb, flags, data, 0);
+	else {
+		err = do_remount_sb2(path->mnt, sb, flags, data, 0);
+		namespace_lock();
+		lock_mount_hash();
+		propagate_remount(mnt);
+		unlock_mount_hash();
+		namespace_unlock();
+	}
 	if (!err) {
 		lock_mount_hash();
 		mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
diff -ruw linux-4.4.115/fs/notify/fanotify/fanotify_user.c linux-4.4.115-fbx/fs/notify/fanotify/fanotify_user.c
--- linux-4.4.115/fs/notify/fanotify/fanotify_user.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/notify/fanotify/fanotify_user.c	2019-01-22 16:16:27.991287048 +0100
@@ -488,7 +488,7 @@
 	}
 
 	/* you can only watch an inode if you have read permissions on it */
-	ret = inode_permission(path->dentry->d_inode, MAY_READ);
+	ret = inode_permission2(path->mnt, path->dentry->d_inode, MAY_READ);
 	if (ret)
 		path_put(path);
 out:
diff -ruw linux-4.4.115/fs/notify/inotify/inotify_user.c linux-4.4.115-fbx/fs/notify/inotify/inotify_user.c
--- linux-4.4.115/fs/notify/inotify/inotify_user.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/notify/inotify/inotify_user.c	2019-01-22 16:16:27.991287048 +0100
@@ -337,7 +337,7 @@
 	if (error)
 		return error;
 	/* you can only watch an inode if you have read permissions on it */
-	error = inode_permission(path->dentry->d_inode, MAY_READ);
+	error = inode_permission2(path->mnt, path->dentry->d_inode, MAY_READ);
 	if (error)
 		path_put(path);
 	return error;
@@ -702,6 +702,8 @@
 	struct fsnotify_group *group;
 	struct inode *inode;
 	struct path path;
+	struct path alteredpath;
+	struct path *canonical_path = &path;
 	struct fd f;
 	int ret;
 	unsigned flags = 0;
@@ -741,13 +743,22 @@
 	if (ret)
 		goto fput_and_out;
 
+	/* support stacked filesystems */
+	if(path.dentry && path.dentry->d_op) {
+		if (path.dentry->d_op->d_canonical_path) {
+			path.dentry->d_op->d_canonical_path(&path, &alteredpath);
+			canonical_path = &alteredpath;
+			path_put(&path);
+		}
+	}
+
 	/* inode held in place by reference to path; group by fget on fd */
-	inode = path.dentry->d_inode;
+	inode = canonical_path->dentry->d_inode;
 	group = f.file->private_data;
 
 	/* create/update an inode mark */
 	ret = inotify_update_watch(group, inode, mask);
-	path_put(&path);
+	path_put(canonical_path);
 fput_and_out:
 	fdput(f);
 	return ret;
diff -ruw linux-4.4.115/fs/nsfs.c linux-4.4.115-fbx/fs/nsfs.c
--- linux-4.4.115/fs/nsfs.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/nsfs.c	2019-10-29 09:26:25.353219912 +0100
@@ -95,7 +95,8 @@
 		return ERR_PTR(-ENOMEM);
 	}
 	d_instantiate(dentry, inode);
-	dentry->d_fsdata = (void *)ns_ops;
+	dentry->d_flags |= DCACHE_RCUACCESS;
+	dentry->d_fsdata = (void *)ns->ops;
 	d = atomic_long_cmpxchg(&ns->stashed, 0, (unsigned long)dentry);
 	if (d) {
 		d_delete(dentry);	/* make sure ->d_prune() does nothing */
diff -ruw linux-4.4.115/fs/open.c linux-4.4.115-fbx/fs/open.c
--- linux-4.4.115/fs/open.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/open.c	2019-10-29 09:26:25.365220030 +0100
@@ -34,8 +34,8 @@
 
 #include "internal.h"
 
-int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
-	struct file *filp)
+int do_truncate2(struct vfsmount *mnt, struct dentry *dentry, loff_t length,
+		unsigned int time_attrs, struct file *filp)
 {
 	int ret;
 	struct iattr newattrs;
@@ -60,17 +60,24 @@
 
 	mutex_lock(&dentry->d_inode->i_mutex);
 	/* Note any delegations or leases have already been broken: */
-	ret = notify_change(dentry, &newattrs, NULL);
+	ret = notify_change2(mnt, dentry, &newattrs, NULL);
 	mutex_unlock(&dentry->d_inode->i_mutex);
 	return ret;
 }
+int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
+	struct file *filp)
+{
+	return do_truncate2(NULL, dentry, length, time_attrs, filp);
+}
 
 long vfs_truncate(struct path *path, loff_t length)
 {
 	struct inode *inode;
+	struct vfsmount *mnt;
 	long error;
 
 	inode = path->dentry->d_inode;
+	mnt = path->mnt;
 
 	/* For directories it's -EISDIR, for other non-regulars - -EINVAL */
 	if (S_ISDIR(inode->i_mode))
@@ -82,7 +89,7 @@
 	if (error)
 		goto out;
 
-	error = inode_permission(inode, MAY_WRITE);
+	error = inode_permission2(mnt, inode, MAY_WRITE);
 	if (error)
 		goto mnt_drop_write_and_out;
 
@@ -106,7 +113,7 @@
 	if (!error)
 		error = security_path_truncate(path);
 	if (!error)
-		error = do_truncate(path->dentry, length, 0, NULL);
+		error = do_truncate2(mnt, path->dentry, length, 0, NULL);
 
 put_write_and_out:
 	put_write_access(inode);
@@ -155,6 +162,7 @@
 {
 	struct inode *inode;
 	struct dentry *dentry;
+	struct vfsmount *mnt;
 	struct fd f;
 	int error;
 
@@ -171,6 +179,7 @@
 		small = 0;
 
 	dentry = f.file->f_path.dentry;
+	mnt = f.file->f_path.mnt;
 	inode = dentry->d_inode;
 	error = -EINVAL;
 	if (!S_ISREG(inode->i_mode) || !(f.file->f_mode & FMODE_WRITE))
@@ -190,7 +199,7 @@
 	if (!error)
 		error = security_path_truncate(&f.file->f_path);
 	if (!error)
-		error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
+		error = do_truncate2(mnt, dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
 	sb_end_write(inode->i_sb);
 out_putf:
 	fdput(f);
@@ -340,6 +349,7 @@
 	struct cred *override_cred;
 	struct path path;
 	struct inode *inode;
+	struct vfsmount *mnt;
 	int res;
 	unsigned int lookup_flags = LOOKUP_FOLLOW;
 
@@ -370,6 +380,7 @@
 		goto out;
 
 	inode = d_backing_inode(path.dentry);
+	mnt = path.mnt;
 
 	if ((mode & MAY_EXEC) && S_ISREG(inode->i_mode)) {
 		/*
@@ -381,7 +392,7 @@
 			goto out_path_release;
 	}
 
-	res = inode_permission(inode, mode | MAY_ACCESS);
+	res = inode_permission2(mnt, inode, mode | MAY_ACCESS);
 	/* SuS v2 requires we report a read only fs too */
 	if (res || !(mode & S_IWOTH) || special_file(inode->i_mode))
 		goto out_path_release;
@@ -425,7 +436,7 @@
 	if (error)
 		goto out;
 
-	error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_CHDIR);
+	error = inode_permission2(path.mnt, path.dentry->d_inode, MAY_EXEC | MAY_CHDIR);
 	if (error)
 		goto dput_and_out;
 
@@ -445,6 +456,7 @@
 {
 	struct fd f = fdget_raw(fd);
 	struct inode *inode;
+	struct vfsmount *mnt;
 	int error = -EBADF;
 
 	error = -EBADF;
@@ -452,12 +464,13 @@
 		goto out;
 
 	inode = file_inode(f.file);
+	mnt = f.file->f_path.mnt;
 
 	error = -ENOTDIR;
 	if (!S_ISDIR(inode->i_mode))
 		goto out_putf;
 
-	error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
+	error = inode_permission2(mnt, inode, MAY_EXEC | MAY_CHDIR);
 	if (!error)
 		set_fs_pwd(current->fs, &f.file->f_path);
 out_putf:
@@ -476,7 +489,7 @@
 	if (error)
 		goto out;
 
-	error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_CHDIR);
+	error = inode_permission2(path.mnt, path.dentry->d_inode, MAY_EXEC | MAY_CHDIR);
 	if (error)
 		goto dput_and_out;
 
@@ -516,7 +529,7 @@
 		goto out_unlock;
 	newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
 	newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
-	error = notify_change(path->dentry, &newattrs, &delegated_inode);
+	error = notify_change2(path->mnt, path->dentry, &newattrs, &delegated_inode);
 out_unlock:
 	mutex_unlock(&inode->i_mutex);
 	if (delegated_inode) {
@@ -596,7 +609,7 @@
 	mutex_lock(&inode->i_mutex);
 	error = security_path_chown(path, uid, gid);
 	if (!error)
-		error = notify_change(path->dentry, &newattrs, &delegated_inode);
+		error = notify_change2(path->mnt, path->dentry, &newattrs, &delegated_inode);
 	mutex_unlock(&inode->i_mutex);
 	if (delegated_inode) {
 		error = break_deleg_wait(&delegated_inode);
diff -ruw linux-4.4.115/fs/pnode.c linux-4.4.115-fbx/fs/pnode.c
--- linux-4.4.115/fs/pnode.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/pnode.c	2019-10-29 09:26:25.369220069 +0100
@@ -609,3 +609,37 @@
 
 	return 0;
 }
+
+/*
+ *  Iterates over all slaves, and slaves of slaves.
+ */
+static struct mount *next_descendent(struct mount *root, struct mount *cur)
+{
+	if (!IS_MNT_NEW(cur) && !list_empty(&cur->mnt_slave_list))
+		return first_slave(cur);
+	do {
+		struct mount *master = cur->mnt_master;
+
+		if (!master || cur->mnt_slave.next != &master->mnt_slave_list) {
+			struct mount *next = next_slave(cur);
+
+			return (next == root) ? NULL : next;
+		}
+		cur = master;
+	} while (cur != root);
+	return NULL;
+}
+
+void propagate_remount(struct mount *mnt)
+{
+	struct mount *m = mnt;
+	struct super_block *sb = mnt->mnt.mnt_sb;
+
+	if (sb->s_op->copy_mnt_data) {
+		m = next_descendent(mnt, m);
+		while (m) {
+			sb->s_op->copy_mnt_data(m->mnt.data, mnt->mnt.data);
+			m = next_descendent(mnt, m);
+		}
+	}
+}
diff -ruw linux-4.4.115/fs/pnode.h linux-4.4.115-fbx/fs/pnode.h
--- linux-4.4.115/fs/pnode.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/pnode.h	2019-01-22 16:16:28.031287410 +0100
@@ -44,6 +44,7 @@
 int propagate_umount(struct list_head *);
 int propagate_mount_busy(struct mount *, int);
 void propagate_mount_unlock(struct mount *);
+void propagate_remount(struct mount *);
 void mnt_release_group_id(struct mount *);
 int get_dominating_id(struct mount *mnt, const struct path *root);
 unsigned int mnt_get_count(struct mount *mnt);
diff -ruw linux-4.4.115/fs/proc/array.c linux-4.4.115-fbx/fs/proc/array.c
--- linux-4.4.115/fs/proc/array.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/proc/array.c	2019-10-29 09:26:25.369220069 +0100
@@ -139,6 +139,21 @@
 	return task_state_array[fls(state)];
 }
 
+static const char *const task_exec_mode_array[] = {
+	"0 (Denied)",
+	"1 (Once)",
+	"2 (Unlimited)",
+};
+
+static inline const char *get_task_exec_mode(struct task_struct *tsk)
+{
+	unsigned int exec_mode = tsk->exec_mode;
+
+	if (exec_mode > EXEC_MODE_UNLIMITED)
+		return "? (Invalid)";
+	return task_exec_mode_array[exec_mode];
+}
+
 static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
 				struct pid *pid, struct task_struct *p)
 {
@@ -171,15 +186,15 @@
 	seq_printf(m,
 		"State:\t%s\n"
 		"Tgid:\t%d\n"
-		"Ngid:\t%d\n"
 		"Pid:\t%d\n"
 		"PPid:\t%d\n"
 		"TracerPid:\t%d\n"
 		"Uid:\t%d\t%d\t%d\t%d\n"
 		"Gid:\t%d\t%d\t%d\t%d\n"
+		"Ngid:\t%d\n"
 		"FDSize:\t%d\nGroups:\t",
 		get_task_state(p),
-		tgid, ngid, pid_nr_ns(pid, ns), ppid, tpid,
+		tgid, pid_nr_ns(pid, ns), ppid, tpid,
 		from_kuid_munged(user_ns, cred->uid),
 		from_kuid_munged(user_ns, cred->euid),
 		from_kuid_munged(user_ns, cred->suid),
@@ -188,7 +203,7 @@
 		from_kgid_munged(user_ns, cred->egid),
 		from_kgid_munged(user_ns, cred->sgid),
 		from_kgid_munged(user_ns, cred->fsgid),
-		max_fds);
+		ngid, max_fds);
 
 	group_info = cred->group_info;
 	for (g = 0; g < group_info->ngroups; g++)
@@ -343,6 +358,12 @@
 			p->nivcsw);
 }
 
+static inline void task_exec_mode(struct seq_file *m,
+				  struct task_struct *p)
+{
+	seq_printf(m, "Exec mode: %s\n", get_task_exec_mode(p));
+}
+
 static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
 {
 	seq_printf(m, "Cpus_allowed:\t%*pb\n",
@@ -369,6 +390,7 @@
 	task_cpus_allowed(m, task);
 	cpuset_task_status_allowed(m, task);
 	task_context_switch_counts(m, task);
+	task_exec_mode(m, task);
 	return 0;
 }
 
diff -ruw linux-4.4.115/fs/proc/base.c linux-4.4.115-fbx/fs/proc/base.c
--- linux-4.4.115/fs/proc/base.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/proc/base.c	2019-10-29 09:26:25.373220108 +0100
@@ -1018,15 +1018,20 @@
 	int oom_adj = OOM_ADJUST_MIN;
 	size_t len;
 	unsigned long flags;
+	int mult = 1;
 
 	if (!task)
 		return -ESRCH;
 	if (lock_task_sighand(task, &flags)) {
-		if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MAX)
+		if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MAX) {
 			oom_adj = OOM_ADJUST_MAX;
-		else
-			oom_adj = (task->signal->oom_score_adj * -OOM_DISABLE) /
-				  OOM_SCORE_ADJ_MAX;
+		} else {
+			if (task->signal->oom_score_adj < 0)
+				mult = -1;
+			oom_adj = roundup(mult * task->signal->oom_score_adj *
+				-OOM_DISABLE, OOM_SCORE_ADJ_MAX) /
+				OOM_SCORE_ADJ_MAX * mult;
+		}
 		unlock_task_sighand(task, &flags);
 	}
 	put_task_struct(task);
@@ -1410,6 +1415,204 @@
 
 #endif
 
+/*
+ * Print out various scheduling related per-task fields:
+ */
+
+#ifdef CONFIG_SMP
+
+static int sched_wake_up_idle_show(struct seq_file *m, void *v)
+{
+	struct inode *inode = m->private;
+	struct task_struct *p;
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+
+	seq_printf(m, "%d\n", sched_get_wake_up_idle(p));
+
+	put_task_struct(p);
+
+	return 0;
+}
+
+static ssize_t
+sched_wake_up_idle_write(struct file *file, const char __user *buf,
+	    size_t count, loff_t *offset)
+{
+	struct inode *inode = file_inode(file);
+	struct task_struct *p;
+	char buffer[PROC_NUMBUF];
+	int wake_up_idle, err;
+
+	memset(buffer, 0, sizeof(buffer));
+	if (count > sizeof(buffer) - 1)
+		count = sizeof(buffer) - 1;
+	if (copy_from_user(buffer, buf, count)) {
+		err = -EFAULT;
+		goto out;
+	}
+
+	err = kstrtoint(strstrip(buffer), 0, &wake_up_idle);
+	if (err)
+		goto out;
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+
+	err = sched_set_wake_up_idle(p, wake_up_idle);
+
+	put_task_struct(p);
+
+out:
+	return err < 0 ? err : count;
+}
+
+static int sched_wake_up_idle_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, sched_wake_up_idle_show, inode);
+}
+
+static const struct file_operations proc_pid_sched_wake_up_idle_operations = {
+	.open		= sched_wake_up_idle_open,
+	.read		= seq_read,
+	.write		= sched_wake_up_idle_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+#endif	/* CONFIG_SMP */
+
+#ifdef CONFIG_SCHED_HMP
+
+static int sched_init_task_load_show(struct seq_file *m, void *v)
+{
+	struct inode *inode = m->private;
+	struct task_struct *p;
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+
+	seq_printf(m, "%d\n", sched_get_init_task_load(p));
+
+	put_task_struct(p);
+
+	return 0;
+}
+
+static ssize_t
+sched_init_task_load_write(struct file *file, const char __user *buf,
+	    size_t count, loff_t *offset)
+{
+	struct inode *inode = file_inode(file);
+	struct task_struct *p;
+	char buffer[PROC_NUMBUF];
+	int init_task_load, err;
+
+	memset(buffer, 0, sizeof(buffer));
+	if (count > sizeof(buffer) - 1)
+		count = sizeof(buffer) - 1;
+	if (copy_from_user(buffer, buf, count)) {
+		err = -EFAULT;
+		goto out;
+	}
+
+	err = kstrtoint(strstrip(buffer), 0, &init_task_load);
+	if (err)
+		goto out;
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+
+	err = sched_set_init_task_load(p, init_task_load);
+
+	put_task_struct(p);
+
+out:
+	return err < 0 ? err : count;
+}
+
+static int sched_init_task_load_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, sched_init_task_load_show, inode);
+}
+
+static const struct file_operations proc_pid_sched_init_task_load_operations = {
+	.open		= sched_init_task_load_open,
+	.read		= seq_read,
+	.write		= sched_init_task_load_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int sched_group_id_show(struct seq_file *m, void *v)
+{
+	struct inode *inode = m->private;
+	struct task_struct *p;
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+
+	seq_printf(m, "%d\n", sched_get_group_id(p));
+
+	put_task_struct(p);
+
+	return 0;
+}
+
+static ssize_t
+sched_group_id_write(struct file *file, const char __user *buf,
+	    size_t count, loff_t *offset)
+{
+	struct inode *inode = file_inode(file);
+	struct task_struct *p;
+	char buffer[PROC_NUMBUF];
+	int group_id, err;
+
+	memset(buffer, 0, sizeof(buffer));
+	if (count > sizeof(buffer) - 1)
+		count = sizeof(buffer) - 1;
+	if (copy_from_user(buffer, buf, count)) {
+		err = -EFAULT;
+		goto out;
+	}
+
+	err = kstrtoint(strstrip(buffer), 0, &group_id);
+	if (err)
+		goto out;
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+
+	err = sched_set_group_id(p, group_id);
+
+	put_task_struct(p);
+
+out:
+	return err < 0 ? err : count;
+}
+
+static int sched_group_id_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, sched_group_id_show, inode);
+}
+
+static const struct file_operations proc_pid_sched_group_id_operations = {
+	.open		= sched_group_id_open,
+	.read		= seq_read,
+	.write		= sched_group_id_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+#endif	/* CONFIG_SCHED_HMP */
+
 #ifdef CONFIG_SCHED_AUTOGROUP
 /*
  * Print out autogroup related information:
@@ -2240,6 +2443,92 @@
 	.release	= seq_release_private,
 };
 
+static ssize_t timerslack_ns_write(struct file *file, const char __user *buf,
+					size_t count, loff_t *offset)
+{
+	struct inode *inode = file_inode(file);
+	struct task_struct *p;
+	u64 slack_ns;
+	int err;
+
+	err = kstrtoull_from_user(buf, count, 10, &slack_ns);
+	if (err < 0)
+		return err;
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+
+	if (p != current) {
+		if (!capable(CAP_SYS_NICE)) {
+			count = -EPERM;
+			goto out;
+		}
+
+		err = security_task_setscheduler(p);
+		if (err) {
+			count = err;
+			goto out;
+		}
+	}
+
+	task_lock(p);
+	if (slack_ns == 0)
+		p->timer_slack_ns = p->default_timer_slack_ns;
+	else
+		p->timer_slack_ns = slack_ns;
+	task_unlock(p);
+
+out:
+	put_task_struct(p);
+
+	return count;
+}
+
+static int timerslack_ns_show(struct seq_file *m, void *v)
+{
+	struct inode *inode = m->private;
+	struct task_struct *p;
+	int err = 0;
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+
+	if (p != current) {
+
+		if (!capable(CAP_SYS_NICE)) {
+			err = -EPERM;
+			goto out;
+		}
+		err = security_task_getscheduler(p);
+		if (err)
+			goto out;
+	}
+
+	task_lock(p);
+	seq_printf(m, "%llu\n", p->timer_slack_ns);
+	task_unlock(p);
+
+out:
+	put_task_struct(p);
+
+	return err;
+}
+
+static int timerslack_ns_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, timerslack_ns_show, inode);
+}
+
+static const struct file_operations proc_pid_set_timerslack_ns_operations = {
+	.open		= timerslack_ns_open,
+	.read		= seq_read,
+	.write		= timerslack_ns_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
 static int proc_pident_instantiate(struct inode *dir,
 	struct dentry *dentry, struct task_struct *task, const void *ptr)
 {
@@ -2739,6 +3028,13 @@
 	ONE("status",     S_IRUGO, proc_pid_status),
 	ONE("personality", S_IRUSR, proc_pid_personality),
 	ONE("limits",	  S_IRUGO, proc_pid_limits),
+#ifdef CONFIG_SMP
+	REG("sched_wake_up_idle",      S_IRUGO|S_IWUSR, proc_pid_sched_wake_up_idle_operations),
+#endif
+#ifdef CONFIG_SCHED_HMP
+	REG("sched_init_task_load",      S_IRUGO|S_IWUSR, proc_pid_sched_init_task_load_operations),
+	REG("sched_group_id",      S_IRUGO|S_IWUGO, proc_pid_sched_group_id_operations),
+#endif
 #ifdef CONFIG_SCHED_DEBUG
 	REG("sched",      S_IRUGO|S_IWUSR, proc_pid_sched_operations),
 #endif
@@ -2763,6 +3059,9 @@
 	REG("mounts",     S_IRUGO, proc_mounts_operations),
 	REG("mountinfo",  S_IRUGO, proc_mountinfo_operations),
 	REG("mountstats", S_IRUSR, proc_mountstats_operations),
+#ifdef CONFIG_PROCESS_RECLAIM
+	REG("reclaim", S_IWUSR, proc_reclaim_operations),
+#endif
 #ifdef CONFIG_PROC_PAGE_MONITOR
 	REG("clear_refs", S_IWUSR, proc_clear_refs_operations),
 	REG("smaps",      S_IRUGO, proc_pid_smaps_operations),
@@ -2790,8 +3089,8 @@
 	ONE("cgroup",  S_IRUGO, proc_cgroup_show),
 #endif
 	ONE("oom_score",  S_IRUGO, proc_oom_score),
-	REG("oom_adj",    S_IRUGO|S_IWUSR, proc_oom_adj_operations),
-	REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
+	REG("oom_adj",    S_IRUSR, proc_oom_adj_operations),
+	REG("oom_score_adj", S_IRUSR, proc_oom_score_adj_operations),
 #ifdef CONFIG_AUDITSYSCALL
 	REG("loginuid",   S_IWUSR|S_IRUGO, proc_loginuid_operations),
 	REG("sessionid",  S_IRUGO, proc_sessionid_operations),
@@ -2817,6 +3116,7 @@
 #ifdef CONFIG_CHECKPOINT_RESTORE
 	REG("timers",	  S_IRUGO, proc_timers_operations),
 #endif
+	REG("timerslack_ns", S_IRUGO|S_IWUGO, proc_pid_set_timerslack_ns_operations),
 };
 
 static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx)
@@ -3076,6 +3376,44 @@
 }
 
 /*
+ * proc_tid_comm_permission is a special permission function exclusively
+ * used for the node /proc/<pid>/task/<tid>/comm.
+ * It bypasses generic permission checks in the case where a task of the same
+ * task group attempts to access the node.
+ * The rational behind this is that glibc and bionic access this node for
+ * cross thread naming (pthread_set/getname_np(!self)). However, if
+ * PR_SET_DUMPABLE gets set to 0 this node among others becomes uid=0 gid=0,
+ * which locks out the cross thread naming implementation.
+ * This function makes sure that the node is always accessible for members of
+ * same thread group.
+ */
+static int proc_tid_comm_permission(struct inode *inode, int mask)
+{
+	bool is_same_tgroup;
+	struct task_struct *task;
+
+	task = get_proc_task(inode);
+	if (!task)
+		return -ESRCH;
+	is_same_tgroup = same_thread_group(current, task);
+	put_task_struct(task);
+
+	if (likely(is_same_tgroup && !(mask & MAY_EXEC))) {
+		/* This file (/proc/<pid>/task/<tid>/comm) can always be
+		 * read or written by the members of the corresponding
+		 * thread group.
+		 */
+		return 0;
+	}
+
+	return generic_permission(inode, mask);
+}
+
+static const struct inode_operations proc_tid_comm_inode_operations = {
+		.permission = proc_tid_comm_permission,
+};
+
+/*
  * Tasks
  */
 static const struct pid_entry tid_base_stuff[] = {
@@ -3093,7 +3431,9 @@
 #ifdef CONFIG_SCHED_DEBUG
 	REG("sched",     S_IRUGO|S_IWUSR, proc_pid_sched_operations),
 #endif
-	REG("comm",      S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
+	NOD("comm",      S_IFREG|S_IRUGO|S_IWUSR,
+			 &proc_tid_comm_inode_operations,
+			 &proc_pid_set_comm_operations, {}),
 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
 	ONE("syscall",   S_IRUSR, proc_pid_syscall),
 #endif
@@ -3140,8 +3480,8 @@
 	ONE("cgroup",  S_IRUGO, proc_cgroup_show),
 #endif
 	ONE("oom_score", S_IRUGO, proc_oom_score),
-	REG("oom_adj",   S_IRUGO|S_IWUSR, proc_oom_adj_operations),
-	REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
+	REG("oom_adj",   S_IRUSR, proc_oom_adj_operations),
+	REG("oom_score_adj", S_IRUSR, proc_oom_score_adj_operations),
 #ifdef CONFIG_AUDITSYSCALL
 	REG("loginuid",  S_IWUSR|S_IRUGO, proc_loginuid_operations),
 	REG("sessionid",  S_IRUGO, proc_sessionid_operations),
diff -ruw linux-4.4.115/fs/proc/internal.h linux-4.4.115-fbx/fs/proc/internal.h
--- linux-4.4.115/fs/proc/internal.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/proc/internal.h	2019-10-29 09:26:25.373220108 +0100
@@ -209,6 +209,7 @@
 extern const struct inode_operations proc_link_inode_operations;
 
 extern const struct inode_operations proc_pid_link_inode_operations;
+extern const struct file_operations proc_reclaim_operations;
 
 extern void proc_init_inodecache(void);
 extern struct inode *proc_get_inode(struct super_block *, struct proc_dir_entry *);
diff -ruw linux-4.4.115/fs/proc/meminfo.c linux-4.4.115-fbx/fs/proc/meminfo.c
--- linux-4.4.115/fs/proc/meminfo.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/proc/meminfo.c	2019-10-29 09:26:25.373220108 +0100
@@ -79,6 +79,13 @@
 	available += global_page_state(NR_SLAB_RECLAIMABLE) -
 		     min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low);
 
+	/*
+	 * Part of the kernel memory, which can be released under memory
+	 * pressure.
+	 */
+	available += global_page_state(NR_INDIRECTLY_RECLAIMABLE_BYTES) >>
+		PAGE_SHIFT;
+
 	if (available < 0)
 		available = 0;
 
diff -ruw linux-4.4.115/fs/proc/task_mmu.c linux-4.4.115-fbx/fs/proc/task_mmu.c
--- linux-4.4.115/fs/proc/task_mmu.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/proc/task_mmu.c	2019-10-29 09:26:25.373220108 +0100
@@ -14,6 +14,8 @@
 #include <linux/swapops.h>
 #include <linux/mmu_notifier.h>
 #include <linux/page_idle.h>
+#include <linux/mm_inline.h>
+#include <linux/ctype.h>
 
 #include <asm/elf.h>
 #include <asm/uaccess.h>
@@ -116,6 +118,56 @@
 }
 #endif
 
+static void seq_print_vma_name(struct seq_file *m, struct vm_area_struct *vma)
+{
+	const char __user *name = vma_get_anon_name(vma);
+	struct mm_struct *mm = vma->vm_mm;
+
+	unsigned long page_start_vaddr;
+	unsigned long page_offset;
+	unsigned long num_pages;
+	unsigned long max_len = NAME_MAX;
+	int i;
+
+	page_start_vaddr = (unsigned long)name & PAGE_MASK;
+	page_offset = (unsigned long)name - page_start_vaddr;
+	num_pages = DIV_ROUND_UP(page_offset + max_len, PAGE_SIZE);
+
+	seq_puts(m, "[anon:");
+
+	for (i = 0; i < num_pages; i++) {
+		int len;
+		int write_len;
+		const char *kaddr;
+		long pages_pinned;
+		struct page *page;
+
+		pages_pinned = get_user_pages(current, mm, page_start_vaddr,
+				1, 0, 0, &page, NULL);
+		if (pages_pinned < 1) {
+			seq_puts(m, "<fault>]");
+			return;
+		}
+
+		kaddr = (const char *)kmap(page);
+		len = min(max_len, PAGE_SIZE - page_offset);
+		write_len = strnlen(kaddr + page_offset, len);
+		seq_write(m, kaddr + page_offset, write_len);
+		kunmap(page);
+		put_page(page);
+
+		/* if strnlen hit a null terminator then we're done */
+		if (write_len != len)
+			break;
+
+		max_len -= len;
+		page_offset = 0;
+		page_start_vaddr += PAGE_SIZE;
+	}
+
+	seq_putc(m, ']');
+}
+
 static void vma_stop(struct proc_maps_private *priv)
 {
 	struct mm_struct *mm = priv->mm;
@@ -337,8 +389,14 @@
 			goto done;
 		}
 
-		if (is_stack(priv, vma, is_pid))
+		if (is_stack(priv, vma, is_pid)) {
 			name = "[stack]";
+			goto done;
+		}
+		if (vma_get_anon_name(vma)) {
+			seq_pad(m, ' ');
+			seq_print_vma_name(m, vma);
+		}
 	}
 
 done:
@@ -663,6 +721,12 @@
 
 	show_map_vma(m, vma, is_pid);
 
+	if (vma_get_anon_name(vma)) {
+		seq_puts(m, "Name:           ");
+		seq_print_vma_name(m, vma);
+		seq_putc(m, '\n');
+	}
+
 	seq_printf(m,
 		   "Size:           %8lu kB\n"
 		   "Rss:            %8lu kB\n"
@@ -1361,6 +1425,241 @@
 };
 #endif /* CONFIG_PROC_PAGE_MONITOR */
 
+#ifdef CONFIG_PROCESS_RECLAIM
+static int reclaim_pte_range(pmd_t *pmd, unsigned long addr,
+				unsigned long end, struct mm_walk *walk)
+{
+	struct reclaim_param *rp = walk->private;
+	struct vm_area_struct *vma = rp->vma;
+	pte_t *pte, ptent;
+	spinlock_t *ptl;
+	struct page *page;
+	LIST_HEAD(page_list);
+	int isolated;
+	int reclaimed;
+
+	split_huge_page_pmd(vma, addr, pmd);
+	if (pmd_trans_unstable(pmd) || !rp->nr_to_reclaim)
+		return 0;
+cont:
+	isolated = 0;
+	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+	for (; addr != end; pte++, addr += PAGE_SIZE) {
+		ptent = *pte;
+		if (!pte_present(ptent))
+			continue;
+
+		page = vm_normal_page(vma, addr, ptent);
+		if (!page)
+			continue;
+
+		if (isolate_lru_page(page))
+			continue;
+
+		list_add(&page->lru, &page_list);
+		inc_zone_page_state(page, NR_ISOLATED_ANON +
+				page_is_file_cache(page));
+		isolated++;
+		rp->nr_scanned++;
+		if ((isolated >= SWAP_CLUSTER_MAX) || !rp->nr_to_reclaim)
+			break;
+	}
+	pte_unmap_unlock(pte - 1, ptl);
+	reclaimed = reclaim_pages_from_list(&page_list, vma);
+	rp->nr_reclaimed += reclaimed;
+	rp->nr_to_reclaim -= reclaimed;
+	if (rp->nr_to_reclaim < 0)
+		rp->nr_to_reclaim = 0;
+
+	if (rp->nr_to_reclaim && (addr != end))
+		goto cont;
+
+	cond_resched();
+	return 0;
+}
+
+enum reclaim_type {
+	RECLAIM_FILE,
+	RECLAIM_ANON,
+	RECLAIM_ALL,
+	RECLAIM_RANGE,
+};
+
+struct reclaim_param reclaim_task_anon(struct task_struct *task,
+		int nr_to_reclaim)
+{
+	struct mm_struct *mm;
+	struct vm_area_struct *vma;
+	struct mm_walk reclaim_walk = {};
+	struct reclaim_param rp;
+
+	rp.nr_reclaimed = 0;
+	rp.nr_scanned = 0;
+	get_task_struct(task);
+	mm = get_task_mm(task);
+	if (!mm)
+		goto out;
+
+	reclaim_walk.mm = mm;
+	reclaim_walk.pmd_entry = reclaim_pte_range;
+
+	rp.nr_to_reclaim = nr_to_reclaim;
+	reclaim_walk.private = &rp;
+
+	down_read(&mm->mmap_sem);
+	for (vma = mm->mmap; vma; vma = vma->vm_next) {
+		if (is_vm_hugetlb_page(vma))
+			continue;
+
+		if (vma->vm_file)
+			continue;
+
+		if (vma->vm_flags & VM_LOCKED)
+			continue;
+
+		if (!rp.nr_to_reclaim)
+			break;
+
+		rp.vma = vma;
+		walk_page_range(vma->vm_start, vma->vm_end,
+			&reclaim_walk);
+	}
+
+	flush_tlb_mm(mm);
+	up_read(&mm->mmap_sem);
+	mmput(mm);
+out:
+	put_task_struct(task);
+	return rp;
+}
+
+static ssize_t reclaim_write(struct file *file, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	struct task_struct *task;
+	char buffer[200];
+	struct mm_struct *mm;
+	struct vm_area_struct *vma;
+	enum reclaim_type type;
+	char *type_buf;
+	struct mm_walk reclaim_walk = {};
+	unsigned long start = 0;
+	unsigned long end = 0;
+	struct reclaim_param rp;
+
+	memset(buffer, 0, sizeof(buffer));
+	if (count > sizeof(buffer) - 1)
+		count = sizeof(buffer) - 1;
+
+	if (copy_from_user(buffer, buf, count))
+		return -EFAULT;
+
+	type_buf = strstrip(buffer);
+	if (!strcmp(type_buf, "file"))
+		type = RECLAIM_FILE;
+	else if (!strcmp(type_buf, "anon"))
+		type = RECLAIM_ANON;
+	else if (!strcmp(type_buf, "all"))
+		type = RECLAIM_ALL;
+	else if (isdigit(*type_buf))
+		type = RECLAIM_RANGE;
+	else
+		goto out_err;
+
+	if (type == RECLAIM_RANGE) {
+		char *token;
+		unsigned long long len, len_in, tmp;
+		token = strsep(&type_buf, " ");
+		if (!token)
+			goto out_err;
+		tmp = memparse(token, &token);
+		if (tmp & ~PAGE_MASK || tmp > ULONG_MAX)
+			goto out_err;
+		start = tmp;
+
+		token = strsep(&type_buf, " ");
+		if (!token)
+			goto out_err;
+		len_in = memparse(token, &token);
+		len = (len_in + ~PAGE_MASK) & PAGE_MASK;
+		if (len > ULONG_MAX)
+			goto out_err;
+		/*
+		 * Check to see whether len was rounded up from small -ve
+		 * to zero.
+		 */
+		if (len_in && !len)
+			goto out_err;
+
+		end = start + len;
+		if (end < start)
+			goto out_err;
+	}
+
+	task = get_proc_task(file->f_path.dentry->d_inode);
+	if (!task)
+		return -ESRCH;
+
+	mm = get_task_mm(task);
+	if (!mm)
+		goto out;
+
+	reclaim_walk.mm = mm;
+	reclaim_walk.pmd_entry = reclaim_pte_range;
+
+	rp.nr_to_reclaim = ~0;
+	rp.nr_reclaimed = 0;
+	reclaim_walk.private = &rp;
+
+	down_read(&mm->mmap_sem);
+	if (type == RECLAIM_RANGE) {
+		vma = find_vma(mm, start);
+		while (vma) {
+			if (vma->vm_start > end)
+				break;
+			if (is_vm_hugetlb_page(vma))
+				continue;
+
+			rp.vma = vma;
+			walk_page_range(max(vma->vm_start, start),
+					min(vma->vm_end, end),
+					&reclaim_walk);
+			vma = vma->vm_next;
+		}
+	} else {
+		for (vma = mm->mmap; vma; vma = vma->vm_next) {
+			if (is_vm_hugetlb_page(vma))
+				continue;
+
+			if (type == RECLAIM_ANON && vma->vm_file)
+				continue;
+
+			if (type == RECLAIM_FILE && !vma->vm_file)
+				continue;
+
+			rp.vma = vma;
+			walk_page_range(vma->vm_start, vma->vm_end,
+				&reclaim_walk);
+		}
+	}
+
+	flush_tlb_mm(mm);
+	up_read(&mm->mmap_sem);
+	mmput(mm);
+out:
+	put_task_struct(task);
+	return count;
+
+out_err:
+	return -EINVAL;
+}
+
+const struct file_operations proc_reclaim_operations = {
+	.write		= reclaim_write,
+	.llseek		= noop_llseek,
+};
+#endif
+
 #ifdef CONFIG_NUMA
 
 struct numa_maps {
diff -ruw linux-4.4.115/fs/proc_namespace.c linux-4.4.115-fbx/fs/proc_namespace.c
--- linux-4.4.115/fs/proc_namespace.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/proc_namespace.c	2019-01-22 16:16:28.039287483 +0100
@@ -118,7 +118,9 @@
 	if (err)
 		goto out;
 	show_mnt_opts(m, mnt);
-	if (sb->s_op->show_options)
+	if (sb->s_op->show_options2)
+			err = sb->s_op->show_options2(mnt, m, mnt_path.dentry);
+	else if (sb->s_op->show_options)
 		err = sb->s_op->show_options(m, mnt_path.dentry);
 	seq_puts(m, " 0 0\n");
 out:
@@ -178,7 +180,9 @@
 	err = show_sb_opts(m, sb);
 	if (err)
 		goto out;
-	if (sb->s_op->show_options)
+	if (sb->s_op->show_options2) {
+		err = sb->s_op->show_options2(mnt, m, mnt->mnt_root);
+	} else if (sb->s_op->show_options)
 		err = sb->s_op->show_options(m, mnt->mnt_root);
 	seq_putc(m, '\n');
 out:
diff -ruw linux-4.4.115/fs/pstore/platform.c linux-4.4.115-fbx/fs/pstore/platform.c
--- linux-4.4.115/fs/pstore/platform.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/pstore/platform.c	2019-10-29 09:26:25.377220147 +0100
@@ -431,6 +431,40 @@
 			     size, psi);
 }
 
+static int pstore_write_buf_user_compat(enum pstore_type_id type,
+			       enum kmsg_dump_reason reason,
+			       u64 *id, unsigned int part,
+			       const char __user *buf,
+			       bool compressed, size_t size,
+			       struct pstore_info *psi)
+{
+	unsigned long flags = 0;
+	size_t i, bufsize = size;
+	long ret = 0;
+
+	if (unlikely(!access_ok(VERIFY_READ, buf, size)))
+		return -EFAULT;
+	if (bufsize > psinfo->bufsize)
+		bufsize = psinfo->bufsize;
+	spin_lock_irqsave(&psinfo->buf_lock, flags);
+	for (i = 0; i < size; ) {
+		size_t c = min(size - i, bufsize);
+
+		ret = __copy_from_user(psinfo->buf, buf + i, c);
+		if (unlikely(ret != 0)) {
+			ret = -EFAULT;
+			break;
+		}
+		ret = psi->write_buf(type, reason, id, part, psinfo->buf,
+				     compressed, c, psi);
+		if (unlikely(ret < 0))
+			break;
+		i += c;
+	}
+	spin_unlock_irqrestore(&psinfo->buf_lock, flags);
+	return unlikely(ret < 0) ? ret : size;
+}
+
 /*
  * platform specific persistent storage driver registers with
  * us here. If pstore is already mounted, call the platform
@@ -453,6 +487,8 @@
 
 	if (!psi->write)
 		psi->write = pstore_write_compat;
+	if (!psi->write_buf_user)
+		psi->write_buf_user = pstore_write_buf_user_compat;
 	psinfo = psi;
 	mutex_init(&psinfo->read_mutex);
 	spin_unlock(&pstore_lock);
diff -ruw linux-4.4.115/fs/pstore/ram.c linux-4.4.115-fbx/fs/pstore/ram.c
--- linux-4.4.115/fs/pstore/ram.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/pstore/ram.c	2019-01-22 16:16:28.043287519 +0100
@@ -34,6 +34,8 @@
 #include <linux/slab.h>
 #include <linux/compiler.h>
 #include <linux/pstore_ram.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
 
 #define RAMOOPS_KERNMSG_HDR "===="
 #define MIN_MEM_SIZE 4096UL
@@ -329,6 +331,24 @@
 	return 0;
 }
 
+static int notrace ramoops_pstore_write_buf_user(enum pstore_type_id type,
+						 enum kmsg_dump_reason reason,
+						 u64 *id, unsigned int part,
+						 const char __user *buf,
+						 bool compressed, size_t size,
+						 struct pstore_info *psi)
+{
+	if (type == PSTORE_TYPE_PMSG) {
+		struct ramoops_context *cxt = psi->data;
+
+		if (!cxt->mprz)
+			return -ENOMEM;
+		return persistent_ram_write_user(cxt->mprz, buf, size);
+	}
+
+	return -EINVAL;
+}
+
 static int ramoops_pstore_erase(enum pstore_type_id type, u64 id, int count,
 				struct timespec time, struct pstore_info *psi)
 {
@@ -367,6 +387,7 @@
 		.open	= ramoops_pstore_open,
 		.read	= ramoops_pstore_read,
 		.write_buf	= ramoops_pstore_write_buf,
+		.write_buf_user	= ramoops_pstore_write_buf_user,
 		.erase	= ramoops_pstore_erase,
 	},
 };
@@ -467,6 +488,97 @@
 	return 0;
 }
 
+void notrace ramoops_console_write_buf(const char *buf, size_t size)
+{
+	struct ramoops_context *cxt = &oops_cxt;
+	persistent_ram_write(cxt->cprz, buf, size);
+}
+
+static int ramoops_parse_dt_size(struct platform_device *pdev,
+		const char *propname, unsigned long *val)
+{
+	u32 val64;
+	int ret;
+
+	ret = of_property_read_u32(pdev->dev.of_node, propname, &val64);
+	if (ret == -EINVAL) {
+		*val = 0;
+		return 0;
+	} else if (ret != 0) {
+		dev_err(&pdev->dev, "failed to parse property %s: %d\n",
+				propname, ret);
+		return ret;
+	}
+
+	if (val64 > ULONG_MAX) {
+		dev_err(&pdev->dev, "invalid %s %u\n", propname, val64);
+		return -EOVERFLOW;
+	}
+
+	*val = val64;
+	return 0;
+}
+
+static int ramoops_parse_dt(struct platform_device *pdev,
+		struct ramoops_platform_data *pdata)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	struct device_node *mem_region;
+	struct resource res;
+	u32 ecc_size;
+	int ret;
+
+	dev_dbg(&pdev->dev, "using Device Tree\n");
+
+	mem_region = of_parse_phandle(of_node, "memory-region", 0);
+	if (!mem_region) {
+		dev_err(&pdev->dev, "no memory-region phandle\n");
+		return -ENODEV;
+	}
+
+	ret = of_address_to_resource(mem_region, 0, &res);
+	of_node_put(mem_region);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to translate memory-region to resource: %d\n",
+				ret);
+		return ret;
+	}
+
+	pdata->mem_size = resource_size(&res);
+	pdata->mem_address = res.start;
+	pdata->mem_type = of_property_read_bool(of_node, "unbuffered");
+	pdata->dump_oops = !of_property_read_bool(of_node, "no-dump-oops");
+
+	ret = ramoops_parse_dt_size(pdev, "record-size", &pdata->record_size);
+	if (ret < 0)
+		return ret;
+
+	ret = ramoops_parse_dt_size(pdev, "console-size", &pdata->console_size);
+	if (ret < 0)
+		return ret;
+
+	ret = ramoops_parse_dt_size(pdev, "ftrace-size", &pdata->ftrace_size);
+	if (ret < 0)
+		return ret;
+
+	ret = ramoops_parse_dt_size(pdev, "pmsg-size", &pdata->pmsg_size);
+	if (ret < 0)
+		return ret;
+
+	ret = of_property_read_u32(of_node, "ecc-size", &ecc_size);
+	if (ret == 0) {
+		if (ecc_size > INT_MAX) {
+			dev_err(&pdev->dev, "invalid ecc-size %u\n", ecc_size);
+			return -EOVERFLOW;
+		}
+		pdata->ecc_info.ecc_size = ecc_size;
+	} else if (ret != -EINVAL) {
+		return ret;
+	}
+
+	return 0;
+}
+
 static int ramoops_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
@@ -476,6 +588,18 @@
 	phys_addr_t paddr;
 	int err = -EINVAL;
 
+	if (dev->of_node && !pdata) {
+		pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+		if (!pdata) {
+			err = -ENOMEM;
+			goto fail_out;
+		}
+
+		err = ramoops_parse_dt(pdev, pdata);
+		if (err < 0)
+			goto fail_out;
+	}
+
 	/* Only a single ramoops area allowed at a time, so fail extra
 	 * probes.
 	 */
@@ -604,11 +728,17 @@
 	return 0;
 }
 
+static const struct of_device_id dt_match[] = {
+	{ .compatible = "ramoops" },
+	{}
+};
+
 static struct platform_driver ramoops_driver = {
 	.probe		= ramoops_probe,
 	.remove		= ramoops_remove,
 	.driver		= {
 		.name	= "ramoops",
+		.of_match_table	= dt_match,
 	},
 };
 
diff -ruw linux-4.4.115/fs/pstore/ram_core.c linux-4.4.115-fbx/fs/pstore/ram_core.c
--- linux-4.4.115/fs/pstore/ram_core.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/pstore/ram_core.c	2019-10-29 09:26:25.377220147 +0100
@@ -17,15 +17,16 @@
 #include <linux/device.h>
 #include <linux/err.h>
 #include <linux/errno.h>
-#include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/io.h>
+#include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/memblock.h>
+#include <linux/pstore_ram.h>
 #include <linux/rslib.h>
 #include <linux/slab.h>
+#include <linux/uaccess.h>
 #include <linux/vmalloc.h>
-#include <linux/pstore_ram.h>
 #include <asm/page.h>
 
 struct persistent_ram_buffer {
@@ -269,6 +270,16 @@
 	persistent_ram_update_ecc(prz, start, count);
 }
 
+static int notrace persistent_ram_update_user(struct persistent_ram_zone *prz,
+	const void __user *s, unsigned int start, unsigned int count)
+{
+	struct persistent_ram_buffer *buffer = prz->buffer;
+	int ret = unlikely(__copy_from_user(buffer->data + start, s, count)) ?
+		-EFAULT : 0;
+	persistent_ram_update_ecc(prz, start, count);
+	return ret;
+}
+
 void persistent_ram_save_old(struct persistent_ram_zone *prz)
 {
 	struct persistent_ram_buffer *buffer = prz->buffer;
@@ -322,6 +333,38 @@
 	return count;
 }
 
+int notrace persistent_ram_write_user(struct persistent_ram_zone *prz,
+	const void __user *s, unsigned int count)
+{
+	int rem, ret = 0, c = count;
+	size_t start;
+
+	if (unlikely(!access_ok(VERIFY_READ, s, count)))
+		return -EFAULT;
+	if (unlikely(c > prz->buffer_size)) {
+		s += c - prz->buffer_size;
+		c = prz->buffer_size;
+	}
+
+	buffer_size_add(prz, c);
+
+	start = buffer_start_add(prz, c);
+
+	rem = prz->buffer_size - start;
+	if (unlikely(rem < c)) {
+		ret = persistent_ram_update_user(prz, s, start, rem);
+		s += rem;
+		c -= rem;
+		start = 0;
+	}
+	if (likely(!ret))
+		ret = persistent_ram_update_user(prz, s, start, c);
+
+	persistent_ram_update_header_ecc(prz);
+
+	return unlikely(ret) ? ret : count;
+}
+
 size_t persistent_ram_old_size(struct persistent_ram_zone *prz)
 {
 	return prz->old_log_size;
diff -ruw linux-4.4.115/fs/select.c linux-4.4.115-fbx/fs/select.c
--- linux-4.4.115/fs/select.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/select.c	2019-01-22 16:16:28.063287700 +0100
@@ -71,9 +71,9 @@
 	return slack;
 }
 
-long select_estimate_accuracy(struct timespec *tv)
+u64 select_estimate_accuracy(struct timespec *tv)
 {
-	unsigned long ret;
+	u64 ret;
 	struct timespec now;
 
 	/*
@@ -403,7 +403,7 @@
 	struct poll_wqueues table;
 	poll_table *wait;
 	int retval, i, timed_out = 0;
-	unsigned long slack = 0;
+	u64 slack = 0;
 	unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
 	unsigned long busy_end = 0;
 
@@ -792,7 +792,7 @@
 	poll_table* pt = &wait->pt;
 	ktime_t expire, *to = NULL;
 	int timed_out = 0, count = 0;
-	unsigned long slack = 0;
+	u64 slack = 0;
 	unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
 	unsigned long busy_end = 0;
 
diff -ruw linux-4.4.115/fs/squashfs/block.c linux-4.4.115-fbx/fs/squashfs/block.c
--- linux-4.4.115/fs/squashfs/block.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/squashfs/block.c	2019-10-29 09:26:25.385220225 +0100
@@ -28,9 +28,12 @@
 
 #include <linux/fs.h>
 #include <linux/vfs.h>
+#include <linux/bio.h>
 #include <linux/slab.h>
 #include <linux/string.h>
+#include <linux/pagemap.h>
 #include <linux/buffer_head.h>
+#include <linux/workqueue.h>
 
 #include "squashfs_fs.h"
 #include "squashfs_fs_sb.h"
@@ -38,177 +41,435 @@
 #include "decompressor.h"
 #include "page_actor.h"
 
+static struct workqueue_struct *squashfs_read_wq;
+
+struct squashfs_read_request {
+	struct super_block *sb;
+	u64 index;
+	int length;
+	int compressed;
+	int offset;
+	u64 read_end;
+	struct squashfs_page_actor *output;
+	enum {
+		SQUASHFS_COPY,
+		SQUASHFS_DECOMPRESS,
+		SQUASHFS_METADATA,
+	} data_processing;
+	bool synchronous;
+
 /*
- * Read the metadata block length, this is stored in the first two
- * bytes of the metadata block.
+	 * If the read is synchronous, it is possible to retrieve information
+	 * about the request by setting these pointers.
  */
-static struct buffer_head *get_block_length(struct super_block *sb,
-			u64 *cur_index, int *offset, int *length)
+	int *res;
+	int *bytes_read;
+	int *bytes_uncompressed;
+
+	int nr_buffers;
+	struct buffer_head **bh;
+	struct work_struct offload;
+};
+
+struct squashfs_bio_request {
+	struct buffer_head **bh;
+	int nr_buffers;
+};
+
+static int squashfs_bio_submit(struct squashfs_read_request *req);
+
+int squashfs_init_read_wq(void)
 {
-	struct squashfs_sb_info *msblk = sb->s_fs_info;
-	struct buffer_head *bh;
+	squashfs_read_wq = create_workqueue("SquashFS read wq");
+	return !!squashfs_read_wq;
+}
 
-	bh = sb_bread(sb, *cur_index);
-	if (bh == NULL)
-		return NULL;
-
-	if (msblk->devblksize - *offset == 1) {
-		*length = (unsigned char) bh->b_data[*offset];
-		put_bh(bh);
-		bh = sb_bread(sb, ++(*cur_index));
-		if (bh == NULL)
-			return NULL;
-		*length |= (unsigned char) bh->b_data[0] << 8;
-		*offset = 1;
-	} else {
-		*length = (unsigned char) bh->b_data[*offset] |
-			(unsigned char) bh->b_data[*offset + 1] << 8;
-		*offset += 2;
+void squashfs_destroy_read_wq(void)
+{
+	flush_workqueue(squashfs_read_wq);
+	destroy_workqueue(squashfs_read_wq);
+}
 
-		if (*offset == msblk->devblksize) {
-			put_bh(bh);
-			bh = sb_bread(sb, ++(*cur_index));
-			if (bh == NULL)
-				return NULL;
-			*offset = 0;
+static void free_read_request(struct squashfs_read_request *req, int error)
+{
+	if (!req->synchronous)
+		squashfs_page_actor_free(req->output, error);
+	if (req->res)
+		*(req->res) = error;
+	kfree(req->bh);
+	kfree(req);
 		}
+
+static void squashfs_process_blocks(struct squashfs_read_request *req)
+{
+	int error = 0;
+	int bytes, i, length;
+	struct squashfs_sb_info *msblk = req->sb->s_fs_info;
+	struct squashfs_page_actor *actor = req->output;
+	struct buffer_head **bh = req->bh;
+	int nr_buffers = req->nr_buffers;
+
+	for (i = 0; i < nr_buffers; ++i) {
+		if (!bh[i])
+			continue;
+		wait_on_buffer(bh[i]);
+		if (!buffer_uptodate(bh[i]))
+			error = -EIO;
 	}
+	if (error)
+		goto cleanup;
 
-	return bh;
+	if (req->data_processing == SQUASHFS_METADATA) {
+		/* Extract the length of the metadata block */
+		if (req->offset != msblk->devblksize - 1) {
+			length = le16_to_cpup((__le16 *)
+					(bh[0]->b_data + req->offset));
+		} else {
+			length = (unsigned char)bh[0]->b_data[req->offset];
+			length |= (unsigned char)bh[1]->b_data[0] << 8;
+		}
+		req->compressed = SQUASHFS_COMPRESSED(length);
+		req->data_processing = req->compressed ? SQUASHFS_DECOMPRESS
+						       : SQUASHFS_COPY;
+		length = SQUASHFS_COMPRESSED_SIZE(length);
+		if (req->index + length + 2 > req->read_end) {
+			for (i = 0; i < nr_buffers; ++i)
+				put_bh(bh[i]);
+			kfree(bh);
+			req->length = length;
+			req->index += 2;
+			squashfs_bio_submit(req);
+			return;
+		}
+		req->length = length;
+		req->offset = (req->offset + 2) % PAGE_SIZE;
+		if (req->offset < 2) {
+			put_bh(bh[0]);
+			++bh;
+			--nr_buffers;
+		}
+	}
+	if (req->bytes_read)
+		*(req->bytes_read) = req->length;
+
+	if (req->data_processing == SQUASHFS_COPY) {
+		squashfs_bh_to_actor(bh, nr_buffers, req->output, req->offset,
+			req->length, msblk->devblksize);
+	} else if (req->data_processing == SQUASHFS_DECOMPRESS) {
+		req->length = squashfs_decompress(msblk, bh, nr_buffers,
+			req->offset, req->length, actor);
+		if (req->length < 0) {
+			error = -EIO;
+			goto cleanup;
+		}
 }
 
+	/* Last page may have trailing bytes not filled */
+	bytes = req->length % PAGE_SIZE;
+	if (bytes && actor->page[actor->pages - 1])
+		zero_user_segment(actor->page[actor->pages - 1], bytes,
+				  PAGE_SIZE);
+
+cleanup:
+	if (req->bytes_uncompressed)
+		*(req->bytes_uncompressed) = req->length;
+	if (error) {
+		for (i = 0; i < nr_buffers; ++i)
+			if (bh[i])
+				put_bh(bh[i]);
+	}
+	free_read_request(req, error);
+}
 
-/*
- * Read and decompress a metadata block or datablock.  Length is non-zero
- * if a datablock is being read (the size is stored elsewhere in the
- * filesystem), otherwise the length is obtained from the first two bytes of
- * the metadata block.  A bit in the length field indicates if the block
- * is stored uncompressed in the filesystem (usually because compression
- * generated a larger block - this does occasionally happen with compression
- * algorithms).
- */
-int squashfs_read_data(struct super_block *sb, u64 index, int length,
-		u64 *next_index, struct squashfs_page_actor *output)
+static void read_wq_handler(struct work_struct *work)
 {
-	struct squashfs_sb_info *msblk = sb->s_fs_info;
-	struct buffer_head **bh;
-	int offset = index & ((1 << msblk->devblksize_log2) - 1);
-	u64 cur_index = index >> msblk->devblksize_log2;
-	int bytes, compressed, b = 0, k = 0, avail, i;
-
-	bh = kcalloc(((output->length + msblk->devblksize - 1)
-		>> msblk->devblksize_log2) + 1, sizeof(*bh), GFP_KERNEL);
-	if (bh == NULL)
-		return -ENOMEM;
+	squashfs_process_blocks(container_of(work,
+		    struct squashfs_read_request, offload));
+}
 
-	if (length) {
-		/*
-		 * Datablock.
-		 */
-		bytes = -offset;
-		compressed = SQUASHFS_COMPRESSED_BLOCK(length);
-		length = SQUASHFS_COMPRESSED_SIZE_BLOCK(length);
-		if (next_index)
-			*next_index = index + length;
+static void squashfs_bio_end_io(struct bio *bio)
+{
+	int i;
+	int error = bio->bi_error;
+	struct squashfs_bio_request *bio_req = bio->bi_private;
 
-		TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n",
-			index, compressed ? "" : "un", length, output->length);
+	bio_put(bio);
 
-		if (length < 0 || length > output->length ||
-				(index + length) > msblk->bytes_used)
-			goto read_failure;
-
-		for (b = 0; bytes < length; b++, cur_index++) {
-			bh[b] = sb_getblk(sb, cur_index);
-			if (bh[b] == NULL)
-				goto block_release;
-			bytes += msblk->devblksize;
+	for (i = 0; i < bio_req->nr_buffers; ++i) {
+		if (!bio_req->bh[i])
+			continue;
+		if (!error)
+			set_buffer_uptodate(bio_req->bh[i]);
+		else
+			clear_buffer_uptodate(bio_req->bh[i]);
+		unlock_buffer(bio_req->bh[i]);
+	}
+	kfree(bio_req);
 		}
-		ll_rw_block(READ, b, bh);
-	} else {
-		/*
-		 * Metadata block.
-		 */
-		if ((index + 2) > msblk->bytes_used)
-			goto read_failure;
 
-		bh[0] = get_block_length(sb, &cur_index, &offset, &length);
-		if (bh[0] == NULL)
-			goto read_failure;
-		b = 1;
+static int bh_is_optional(struct squashfs_read_request *req, int idx)
+{
+	int start_idx, end_idx;
+	struct squashfs_sb_info *msblk = req->sb->s_fs_info;
 
-		bytes = msblk->devblksize - offset;
-		compressed = SQUASHFS_COMPRESSED(length);
-		length = SQUASHFS_COMPRESSED_SIZE(length);
-		if (next_index)
-			*next_index = index + length + 2;
+	start_idx = (idx * msblk->devblksize - req->offset) >> PAGE_SHIFT;
+	end_idx = ((idx + 1) * msblk->devblksize - req->offset + 1) >> PAGE_SHIFT;
+	if (start_idx >= req->output->pages)
+		return 1;
+	if (start_idx < 0)
+		start_idx = end_idx;
+	if (end_idx >= req->output->pages)
+		end_idx = start_idx;
+	return !req->output->page[start_idx] && !req->output->page[end_idx];
+}
 
-		TRACE("Block @ 0x%llx, %scompressed size %d\n", index,
-				compressed ? "" : "un", length);
+static int actor_getblks(struct squashfs_read_request *req, u64 block)
+{
+	int i;
 
-		if (length < 0 || length > output->length ||
-					(index + length) > msblk->bytes_used)
-			goto block_release;
+	req->bh = kmalloc_array(req->nr_buffers, sizeof(*(req->bh)), GFP_NOIO);
+	if (!req->bh)
+		return -ENOMEM;
 
-		for (; bytes < length; b++) {
-			bh[b] = sb_getblk(sb, ++cur_index);
-			if (bh[b] == NULL)
-				goto block_release;
-			bytes += msblk->devblksize;
+	for (i = 0; i < req->nr_buffers; ++i) {
+		/*
+		 * When dealing with an uncompressed block, the actor may
+		 * contains NULL pages. There's no need to read the buffers
+		 * associated with these pages.
+		 */
+		if (!req->compressed && bh_is_optional(req, i)) {
+			req->bh[i] = NULL;
+			continue;
+		}
+		req->bh[i] = sb_getblk(req->sb, block + i);
+		if (!req->bh[i]) {
+			while (--i) {
+				if (req->bh[i])
+					put_bh(req->bh[i]);
 		}
-		ll_rw_block(READ, b - 1, bh + 1);
+			return -1;
+		}
+	}
+	return 0;
 	}
 
-	for (i = 0; i < b; i++) {
-		wait_on_buffer(bh[i]);
-		if (!buffer_uptodate(bh[i]))
-			goto block_release;
+static int squashfs_bio_submit(struct squashfs_read_request *req)
+{
+	struct bio *bio = NULL;
+	struct buffer_head *bh;
+	struct squashfs_bio_request *bio_req = NULL;
+	int b = 0, prev_block = 0;
+	struct squashfs_sb_info *msblk = req->sb->s_fs_info;
+
+	u64 read_start = round_down(req->index, msblk->devblksize);
+	u64 read_end = round_up(req->index + req->length, msblk->devblksize);
+	sector_t block = read_start >> msblk->devblksize_log2;
+	sector_t block_end = read_end >> msblk->devblksize_log2;
+	int offset = read_start - round_down(req->index, PAGE_SIZE);
+	int nr_buffers = block_end - block;
+	int blksz = msblk->devblksize;
+	int bio_max_pages = nr_buffers > BIO_MAX_PAGES ? BIO_MAX_PAGES
+						       : nr_buffers;
+
+	/* Setup the request */
+	req->read_end = read_end;
+	req->offset = req->index - read_start;
+	req->nr_buffers = nr_buffers;
+	if (actor_getblks(req, block) < 0)
+		goto getblk_failed;
+
+	/* Create and submit the BIOs */
+	for (b = 0; b < nr_buffers; ++b, offset += blksz) {
+		bh = req->bh[b];
+		if (!bh || !trylock_buffer(bh))
+			continue;
+		if (buffer_uptodate(bh)) {
+			unlock_buffer(bh);
+			continue;
+		}
+		offset %= PAGE_SIZE;
+
+		/* Append the buffer to the current BIO if it is contiguous */
+		if (bio && bio_req && prev_block + 1 == b) {
+			if (bio_add_page(bio, bh->b_page, blksz, offset)) {
+				bio_req->nr_buffers += 1;
+				prev_block = b;
+				continue;
+			}
+		}
+
+		/* Otherwise, submit the current BIO and create a new one */
+		if (bio)
+			submit_bio(READ, bio);
+		bio_req = kcalloc(1, sizeof(struct squashfs_bio_request),
+				  GFP_NOIO);
+		if (!bio_req)
+			goto req_alloc_failed;
+		bio_req->bh = &req->bh[b];
+		bio = bio_alloc(GFP_NOIO, bio_max_pages);
+		if (!bio)
+			goto bio_alloc_failed;
+		bio->bi_bdev = req->sb->s_bdev;
+		bio->bi_iter.bi_sector = (block + b)
+				       << (msblk->devblksize_log2 - 9);
+		bio->bi_private = bio_req;
+		bio->bi_end_io = squashfs_bio_end_io;
+
+		bio_add_page(bio, bh->b_page, blksz, offset);
+		bio_req->nr_buffers += 1;
+		prev_block = b;
+	}
+	if (bio)
+		submit_bio(READ, bio);
+
+	if (req->synchronous)
+		squashfs_process_blocks(req);
+	else {
+		INIT_WORK(&req->offload, read_wq_handler);
+		schedule_work(&req->offload);
+	}
+	return 0;
+
+bio_alloc_failed:
+	kfree(bio_req);
+req_alloc_failed:
+	unlock_buffer(bh);
+	while (--nr_buffers >= b)
+		if (req->bh[nr_buffers])
+			put_bh(req->bh[nr_buffers]);
+	while (--b >= 0)
+		if (req->bh[b])
+			wait_on_buffer(req->bh[b]);
+getblk_failed:
+	free_read_request(req, -ENOMEM);
+	return -ENOMEM;
 	}
 
-	if (compressed) {
-		length = squashfs_decompress(msblk, bh, b, offset, length,
-			output);
-		if (length < 0)
-			goto read_failure;
-	} else {
+static int read_metadata_block(struct squashfs_read_request *req,
+			       u64 *next_index)
+{
+	int ret, error, bytes_read = 0, bytes_uncompressed = 0;
+	struct squashfs_sb_info *msblk = req->sb->s_fs_info;
+
+	if (req->index + 2 > msblk->bytes_used) {
+		free_read_request(req, -EINVAL);
+		return -EINVAL;
+	}
+	req->length = 2;
+
+	/* Do not read beyond the end of the device */
+	if (req->index + req->length > msblk->bytes_used)
+		req->length = msblk->bytes_used - req->index;
+	req->data_processing = SQUASHFS_METADATA;
+
 		/*
-		 * Block is uncompressed.
+	 * Reading metadata is always synchronous because we don't know the
+	 * length in advance and the function is expected to update
+	 * 'next_index' and return the length.
 		 */
-		int in, pg_offset = 0;
-		void *data = squashfs_first_page(output);
-
-		for (bytes = length; k < b; k++) {
-			in = min(bytes, msblk->devblksize - offset);
-			bytes -= in;
-			while (in) {
-				if (pg_offset == PAGE_CACHE_SIZE) {
-					data = squashfs_next_page(output);
-					pg_offset = 0;
-				}
-				avail = min_t(int, in, PAGE_CACHE_SIZE -
-						pg_offset);
-				memcpy(data + pg_offset, bh[k]->b_data + offset,
-						avail);
-				in -= avail;
-				pg_offset += avail;
-				offset += avail;
+	req->synchronous = true;
+	req->res = &error;
+	req->bytes_read = &bytes_read;
+	req->bytes_uncompressed = &bytes_uncompressed;
+
+	TRACE("Metadata block @ 0x%llx, %scompressed size %d, src size %d\n",
+	      req->index, req->compressed ? "" : "un", bytes_read,
+	      req->output->length);
+
+	ret = squashfs_bio_submit(req);
+	if (ret)
+		return ret;
+	if (error)
+		return error;
+	if (next_index)
+		*next_index += 2 + bytes_read;
+	return bytes_uncompressed;
 			}
-			offset = 0;
-			put_bh(bh[k]);
+
+static int read_data_block(struct squashfs_read_request *req, int length,
+			   u64 *next_index, bool synchronous)
+{
+	int ret, error = 0, bytes_uncompressed = 0, bytes_read = 0;
+
+	req->compressed = SQUASHFS_COMPRESSED_BLOCK(length);
+	req->length = length = SQUASHFS_COMPRESSED_SIZE_BLOCK(length);
+	req->data_processing = req->compressed ? SQUASHFS_DECOMPRESS
+					       : SQUASHFS_COPY;
+
+	req->synchronous = synchronous;
+	if (synchronous) {
+		req->res = &error;
+		req->bytes_read = &bytes_read;
+		req->bytes_uncompressed = &bytes_uncompressed;
+	}
+
+	TRACE("Data block @ 0x%llx, %scompressed size %d, src size %d\n",
+	      req->index, req->compressed ? "" : "un", req->length,
+	      req->output->length);
+
+	ret = squashfs_bio_submit(req);
+	if (ret)
+		return ret;
+	if (synchronous)
+		ret = error ? error : bytes_uncompressed;
+	if (next_index)
+		*next_index += length;
+	return ret;
 		}
-		squashfs_finish_page(output);
+
+/*
+ * Read and decompress a metadata block or datablock.  Length is non-zero
+ * if a datablock is being read (the size is stored elsewhere in the
+ * filesystem), otherwise the length is obtained from the first two bytes of
+ * the metadata block.  A bit in the length field indicates if the block
+ * is stored uncompressed in the filesystem (usually because compression
+ * generated a larger block - this does occasionally happen with compression
+ * algorithms).
+ */
+static int __squashfs_read_data(struct super_block *sb, u64 index, int length,
+	u64 *next_index, struct squashfs_page_actor *output, bool sync)
+{
+	struct squashfs_read_request *req;
+
+	req = kcalloc(1, sizeof(struct squashfs_read_request), GFP_KERNEL);
+	if (!req) {
+		if (!sync)
+			squashfs_page_actor_free(output, -ENOMEM);
+		return -ENOMEM;
 	}
 
-	kfree(bh);
-	return length;
+	req->sb = sb;
+	req->index = index;
+	req->output = output;
 
-block_release:
-	for (; k < b; k++)
-		put_bh(bh[k]);
+	if (next_index)
+		*next_index = index;
+
+	if (length)
+		length = read_data_block(req, length, next_index, sync);
+	else
+		length = read_metadata_block(req, next_index);
 
-read_failure:
+	if (length < 0) {
 	ERROR("squashfs_read_data failed to read block 0x%llx\n",
 					(unsigned long long) index);
-	kfree(bh);
 	return -EIO;
 }
+
+	return length;
+}
+
+int squashfs_read_data(struct super_block *sb, u64 index, int length,
+	u64 *next_index, struct squashfs_page_actor *output)
+{
+	return __squashfs_read_data(sb, index, length, next_index, output,
+				    true);
+}
+
+int squashfs_read_data_async(struct super_block *sb, u64 index, int length,
+	u64 *next_index, struct squashfs_page_actor *output)
+{
+
+	return __squashfs_read_data(sb, index, length, next_index, output,
+				    false);
+}
diff -ruw linux-4.4.115/fs/squashfs/cache.c linux-4.4.115-fbx/fs/squashfs/cache.c
--- linux-4.4.115/fs/squashfs/cache.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/squashfs/cache.c	2019-10-29 09:26:25.385220225 +0100
@@ -209,17 +209,14 @@
  */
 void squashfs_cache_delete(struct squashfs_cache *cache)
 {
-	int i, j;
+	int i;
 
 	if (cache == NULL)
 		return;
 
 	for (i = 0; i < cache->entries; i++) {
-		if (cache->entry[i].data) {
-			for (j = 0; j < cache->pages; j++)
-				kfree(cache->entry[i].data[j]);
-			kfree(cache->entry[i].data);
-		}
+		if (cache->entry[i].page)
+			free_page_array(cache->entry[i].page, cache->pages);
 		kfree(cache->entry[i].actor);
 	}
 
@@ -236,7 +233,7 @@
 struct squashfs_cache *squashfs_cache_init(char *name, int entries,
 	int block_size)
 {
-	int i, j;
+	int i;
 	struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL);
 
 	if (cache == NULL) {
@@ -268,22 +265,13 @@
 		init_waitqueue_head(&cache->entry[i].wait_queue);
 		entry->cache = cache;
 		entry->block = SQUASHFS_INVALID_BLK;
-		entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL);
-		if (entry->data == NULL) {
+		entry->page = alloc_page_array(cache->pages, GFP_KERNEL);
+		if (!entry->page) {
 			ERROR("Failed to allocate %s cache entry\n", name);
 			goto cleanup;
 		}
-
-		for (j = 0; j < cache->pages; j++) {
-			entry->data[j] = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
-			if (entry->data[j] == NULL) {
-				ERROR("Failed to allocate %s buffer\n", name);
-				goto cleanup;
-			}
-		}
-
-		entry->actor = squashfs_page_actor_init(entry->data,
-						cache->pages, 0);
+		entry->actor = squashfs_page_actor_init(entry->page,
+			cache->pages, 0, NULL);
 		if (entry->actor == NULL) {
 			ERROR("Failed to allocate %s cache entry\n", name);
 			goto cleanup;
@@ -314,18 +302,20 @@
 		return min(length, entry->length - offset);
 
 	while (offset < entry->length) {
-		void *buff = entry->data[offset / PAGE_CACHE_SIZE]
+		void *buff = kmap_atomic(entry->page[offset / PAGE_CACHE_SIZE])
 				+ (offset % PAGE_CACHE_SIZE);
 		int bytes = min_t(int, entry->length - offset,
 				PAGE_CACHE_SIZE - (offset % PAGE_CACHE_SIZE));
 
 		if (bytes >= remaining) {
 			memcpy(buffer, buff, remaining);
+			kunmap_atomic(buff);
 			remaining = 0;
 			break;
 		}
 
 		memcpy(buffer, buff, bytes);
+		kunmap_atomic(buff);
 		buffer += bytes;
 		remaining -= bytes;
 		offset += bytes;
@@ -416,43 +406,38 @@
 void *squashfs_read_table(struct super_block *sb, u64 block, int length)
 {
 	int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-	int i, res;
-	void *table, *buffer, **data;
+	struct page **page;
+	void *buff;
+	int res;
 	struct squashfs_page_actor *actor;
 
-	table = buffer = kmalloc(length, GFP_KERNEL);
-	if (table == NULL)
+	page = alloc_page_array(pages, GFP_KERNEL);
+	if (!page)
 		return ERR_PTR(-ENOMEM);
 
-	data = kcalloc(pages, sizeof(void *), GFP_KERNEL);
-	if (data == NULL) {
-		res = -ENOMEM;
-		goto failed;
-	}
-
-	actor = squashfs_page_actor_init(data, pages, length);
+	actor = squashfs_page_actor_init(page, pages, length, NULL);
 	if (actor == NULL) {
 		res = -ENOMEM;
-		goto failed2;
+		goto failed;
 	}
 
-	for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE)
-		data[i] = buffer;
-
 	res = squashfs_read_data(sb, block, length |
 		SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, actor);
 
-	kfree(data);
-	kfree(actor);
-
 	if (res < 0)
-		goto failed;
+		goto failed2;
 
-	return table;
+	buff = kmalloc(length, GFP_KERNEL);
+	if (!buff)
+		goto failed2;
+	squashfs_actor_to_buf(actor, buff, length);
+	squashfs_page_actor_free(actor, 0);
+	free_page_array(page, pages);
+	return buff;
 
 failed2:
-	kfree(data);
+	squashfs_page_actor_free(actor, 0);
 failed:
-	kfree(table);
+	free_page_array(page, pages);
 	return ERR_PTR(res);
 }
diff -ruw linux-4.4.115/fs/squashfs/decompressor.c linux-4.4.115-fbx/fs/squashfs/decompressor.c
--- linux-4.4.115/fs/squashfs/decompressor.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/squashfs/decompressor.c	2019-10-29 09:26:25.385220225 +0100
@@ -24,7 +24,8 @@
 #include <linux/types.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
-#include <linux/buffer_head.h>
+#include <linux/highmem.h>
+#include <linux/fs.h>
 
 #include "squashfs_fs.h"
 #include "squashfs_fs_sb.h"
@@ -65,6 +66,12 @@
 };
 #endif
 
+#ifndef CONFIG_SQUASHFS_ZSTD
+static const struct squashfs_decompressor squashfs_zstd_comp_ops = {
+	NULL, NULL, NULL, NULL, ZSTD_COMPRESSION, "zstd", 0
+};
+#endif
+
 static const struct squashfs_decompressor squashfs_unknown_comp_ops = {
 	NULL, NULL, NULL, NULL, 0, "unknown", 0
 };
@@ -75,6 +82,7 @@
 	&squashfs_lzo_comp_ops,
 	&squashfs_xz_comp_ops,
 	&squashfs_lzma_unsupported_comp_ops,
+	&squashfs_zstd_comp_ops,
 	&squashfs_unknown_comp_ops
 };
 
@@ -94,24 +102,26 @@
 static void *get_comp_opts(struct super_block *sb, unsigned short flags)
 {
 	struct squashfs_sb_info *msblk = sb->s_fs_info;
-	void *buffer = NULL, *comp_opts;
+	void *comp_opts, *buffer = NULL;
+	struct page *page;
 	struct squashfs_page_actor *actor = NULL;
 	int length = 0;
 
+	if (!SQUASHFS_COMP_OPTS(flags))
+		return squashfs_comp_opts(msblk, buffer, length);
+
 	/*
 	 * Read decompressor specific options from file system if present
 	 */
-	if (SQUASHFS_COMP_OPTS(flags)) {
-		buffer = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
-		if (buffer == NULL) {
-			comp_opts = ERR_PTR(-ENOMEM);
-			goto out;
-		}
 
-		actor = squashfs_page_actor_init(&buffer, 1, 0);
+	page = alloc_page(GFP_KERNEL);
+	if (!page)
+		return ERR_PTR(-ENOMEM);
+
+	actor = squashfs_page_actor_init(&page, 1, 0, NULL);
 		if (actor == NULL) {
 			comp_opts = ERR_PTR(-ENOMEM);
-			goto out;
+		goto actor_error;
 		}
 
 		length = squashfs_read_data(sb,
@@ -119,15 +129,17 @@
 
 		if (length < 0) {
 			comp_opts = ERR_PTR(length);
-			goto out;
-		}
+		goto read_error;
 	}
 
+	buffer = kmap_atomic(page);
 	comp_opts = squashfs_comp_opts(msblk, buffer, length);
+	kunmap_atomic(buffer);
 
-out:
-	kfree(actor);
-	kfree(buffer);
+read_error:
+	squashfs_page_actor_free(actor, 0);
+actor_error:
+	__free_page(page);
 	return comp_opts;
 }
 
diff -ruw linux-4.4.115/fs/squashfs/decompressor.h linux-4.4.115-fbx/fs/squashfs/decompressor.h
--- linux-4.4.115/fs/squashfs/decompressor.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/squashfs/decompressor.h	2019-10-29 09:26:25.385220225 +0100
@@ -58,4 +58,8 @@
 extern const struct squashfs_decompressor squashfs_zlib_comp_ops;
 #endif
 
+#ifdef CONFIG_SQUASHFS_ZSTD
+extern const struct squashfs_decompressor squashfs_zstd_comp_ops;
+#endif
+
 #endif
diff -ruw linux-4.4.115/fs/squashfs/file.c linux-4.4.115-fbx/fs/squashfs/file.c
--- linux-4.4.115/fs/squashfs/file.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/squashfs/file.c	2019-10-29 09:26:25.385220225 +0100
@@ -47,12 +47,16 @@
 #include <linux/string.h>
 #include <linux/pagemap.h>
 #include <linux/mutex.h>
+#include <linux/mm_inline.h>
 
 #include "squashfs_fs.h"
 #include "squashfs_fs_sb.h"
 #include "squashfs_fs_i.h"
 #include "squashfs.h"
 
+// Backported from 4.5
+#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
+
 /*
  * Locate cache slot in range [offset, index] for specified inode.  If
  * there's more than one return the slot closest to index.
@@ -438,6 +442,21 @@
 	return res;
 }
 
+static int squashfs_readpages_fragment(struct page *page,
+	struct list_head *readahead_pages, struct address_space *mapping)
+{
+	if (!page) {
+		page = lru_to_page(readahead_pages);
+		list_del(&page->lru);
+		if (add_to_page_cache_lru(page, mapping, page->index,
+			mapping_gfp_constraint(mapping, GFP_KERNEL))) {
+			put_page(page);
+			return 0;
+		}
+	}
+	return squashfs_readpage_fragment(page);
+}
+
 static int squashfs_readpage_sparse(struct page *page, int index, int file_end)
 {
 	struct inode *inode = page->mapping->host;
@@ -450,54 +469,105 @@
 	return 0;
 }
 
-static int squashfs_readpage(struct file *file, struct page *page)
+static int squashfs_readpages_sparse(struct page *page,
+	struct list_head *readahead_pages, int index, int file_end,
+	struct address_space *mapping)
+{
+	if (!page) {
+		page = lru_to_page(readahead_pages);
+		list_del(&page->lru);
+		if (add_to_page_cache_lru(page, mapping, page->index,
+			mapping_gfp_constraint(mapping, GFP_KERNEL))) {
+			put_page(page);
+			return 0;
+		}
+	}
+	return squashfs_readpage_sparse(page, index, file_end);
+}
+
+static int __squashfs_readpages(struct file *file, struct page *page,
+	struct list_head *readahead_pages, unsigned int nr_pages,
+	struct address_space *mapping)
 {
-	struct inode *inode = page->mapping->host;
+	struct inode *inode = mapping->host;
 	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
-	int index = page->index >> (msblk->block_log - PAGE_CACHE_SHIFT);
 	int file_end = i_size_read(inode) >> msblk->block_log;
 	int res;
-	void *pageaddr;
 
-	TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
-				page->index, squashfs_i(inode)->start);
+	do {
+		struct page *cur_page = page ? page
+					     : lru_to_page(readahead_pages);
+		int page_index = cur_page->index;
+		int index = page_index >> (msblk->block_log - PAGE_CACHE_SHIFT);
 
-	if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
+		if (page_index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
 					PAGE_CACHE_SHIFT))
-		goto out;
+			return 1;
 
 	if (index < file_end || squashfs_i(inode)->fragment_block ==
 					SQUASHFS_INVALID_BLK) {
 		u64 block = 0;
 		int bsize = read_blocklist(inode, index, &block);
+
 		if (bsize < 0)
-			goto error_out;
+				return -1;
 
-		if (bsize == 0)
-			res = squashfs_readpage_sparse(page, index, file_end);
-		else
-			res = squashfs_readpage_block(page, block, bsize);
-	} else
-		res = squashfs_readpage_fragment(page);
+			if (bsize == 0) {
+				res = squashfs_readpages_sparse(page,
+					readahead_pages, index, file_end,
+					mapping);
+			} else {
+				res = squashfs_readpages_block(page,
+					readahead_pages, &nr_pages, mapping,
+					page_index, block, bsize);
+			}
+		} else {
+			res = squashfs_readpages_fragment(page,
+				readahead_pages, mapping);
+		}
+		if (res)
+			return 0;
+		page = NULL;
+	} while (readahead_pages && !list_empty(readahead_pages));
 
-	if (!res)
 		return 0;
+}
 
-error_out:
-	SetPageError(page);
-out:
-	pageaddr = kmap_atomic(page);
-	memset(pageaddr, 0, PAGE_CACHE_SIZE);
-	kunmap_atomic(pageaddr);
+static int squashfs_readpage(struct file *file, struct page *page)
+{
+	int ret;
+
+	TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
+	      page->index, squashfs_i(page->mapping->host)->start);
+
+	get_page(page);
+
+	ret = __squashfs_readpages(file, page, NULL, 1, page->mapping);
+	if (ret) {
 	flush_dcache_page(page);
-	if (!PageError(page))
+		if (ret < 0)
+			SetPageError(page);
+		else
 		SetPageUptodate(page);
+		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
 	unlock_page(page);
+		put_page(page);
+	}
 
 	return 0;
 }
 
+static int squashfs_readpages(struct file *file, struct address_space *mapping,
+			      struct list_head *pages, unsigned int nr_pages)
+{
+	TRACE("Entered squashfs_readpages, %u pages, first page index %lx\n",
+		nr_pages, lru_to_page(pages)->index);
+	__squashfs_readpages(file, NULL, pages, nr_pages, mapping);
+	return 0;
+}
+
 
 const struct address_space_operations squashfs_aops = {
-	.readpage = squashfs_readpage
+	.readpage = squashfs_readpage,
+	.readpages = squashfs_readpages,
 };
diff -ruw linux-4.4.115/fs/squashfs/file_direct.c linux-4.4.115-fbx/fs/squashfs/file_direct.c
--- linux-4.4.115/fs/squashfs/file_direct.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/squashfs/file_direct.c	2019-10-29 09:26:25.385220225 +0100
@@ -13,6 +13,7 @@
 #include <linux/string.h>
 #include <linux/pagemap.h>
 #include <linux/mutex.h>
+#include <linux/mm_inline.h>
 
 #include "squashfs_fs.h"
 #include "squashfs_fs_sb.h"
@@ -20,157 +21,139 @@
 #include "squashfs.h"
 #include "page_actor.h"
 
-static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
-	int pages, struct page **page);
-
-/* Read separately compressed datablock directly into page cache */
-int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
+// Backported from 4.5
+#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
 
+static void release_actor_pages(struct page **page, int pages, int error)
 {
-	struct inode *inode = target_page->mapping->host;
-	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
-
-	int file_end = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
-	int mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1;
-	int start_index = target_page->index & ~mask;
-	int end_index = start_index | mask;
-	int i, n, pages, missing_pages, bytes, res = -ENOMEM;
-	struct page **page;
-	struct squashfs_page_actor *actor;
-	void *pageaddr;
-
-	if (end_index > file_end)
-		end_index = file_end;
+	int i;
 
-	pages = end_index - start_index + 1;
-
-	page = kmalloc_array(pages, sizeof(void *), GFP_KERNEL);
-	if (page == NULL)
-		return res;
+	for (i = 0; i < pages; i++) {
+		if (!page[i])
+			continue;
+		flush_dcache_page(page[i]);
+		if (!error)
+			SetPageUptodate(page[i]);
+		else {
+			SetPageError(page[i]);
+			zero_user_segment(page[i], 0, PAGE_CACHE_SIZE);
+		}
+		unlock_page(page[i]);
+		put_page(page[i]);
+	}
+	kfree(page);
+}
 
 	/*
 	 * Create a "page actor" which will kmap and kunmap the
 	 * page cache pages appropriately within the decompressor
 	 */
-	actor = squashfs_page_actor_init_special(page, pages, 0);
-	if (actor == NULL)
-		goto out;
-
-	/* Try to grab all the pages covered by the Squashfs block */
-	for (missing_pages = 0, i = 0, n = start_index; i < pages; i++, n++) {
-		page[i] = (n == target_page->index) ? target_page :
-			grab_cache_page_nowait(target_page->mapping, n);
+static struct squashfs_page_actor *actor_from_page_cache(
+	unsigned int actor_pages, struct page *target_page,
+	struct list_head *rpages, unsigned int *nr_pages, int start_index,
+	struct address_space *mapping)
+{
+	struct page **page;
+	struct squashfs_page_actor *actor;
+	int i, n;
+	gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
 
-		if (page[i] == NULL) {
-			missing_pages++;
+	page = kmalloc_array(actor_pages, sizeof(void *), GFP_KERNEL);
+	if (!page)
+		return NULL;
+
+	for (i = 0, n = start_index; i < actor_pages; i++, n++) {
+		if (target_page == NULL && rpages && !list_empty(rpages)) {
+			struct page *cur_page = lru_to_page(rpages);
+
+			if (cur_page->index < start_index + actor_pages) {
+				list_del(&cur_page->lru);
+				--(*nr_pages);
+				if (add_to_page_cache_lru(cur_page, mapping,
+							  cur_page->index, gfp))
+					put_page(cur_page);
+				else
+					target_page = cur_page;
+			} else
+				rpages = NULL;
+		}
+
+		if (target_page && target_page->index == n) {
+			page[i] = target_page;
+			target_page = NULL;
+		} else {
+			page[i] = grab_cache_page_nowait(mapping, n);
+			if (page[i] == NULL)
 			continue;
 		}
 
 		if (PageUptodate(page[i])) {
 			unlock_page(page[i]);
-			page_cache_release(page[i]);
+			put_page(page[i]);
 			page[i] = NULL;
-			missing_pages++;
 		}
 	}
 
-	if (missing_pages) {
-		/*
-		 * Couldn't get one or more pages, this page has either
-		 * been VM reclaimed, but others are still in the page cache
-		 * and uptodate, or we're racing with another thread in
-		 * squashfs_readpage also trying to grab them.  Fall back to
-		 * using an intermediate buffer.
-		 */
-		res = squashfs_read_cache(target_page, block, bsize, pages,
-								page);
-		if (res < 0)
-			goto mark_errored;
-
-		goto out;
-	}
-
-	/* Decompress directly into the page cache buffers */
-	res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
-	if (res < 0)
-		goto mark_errored;
-
-	/* Last page may have trailing bytes not filled */
-	bytes = res % PAGE_CACHE_SIZE;
-	if (bytes) {
-		pageaddr = kmap_atomic(page[pages - 1]);
-		memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
-		kunmap_atomic(pageaddr);
-	}
-
-	/* Mark pages as uptodate, unlock and release */
-	for (i = 0; i < pages; i++) {
-		flush_dcache_page(page[i]);
-		SetPageUptodate(page[i]);
-		unlock_page(page[i]);
-		if (page[i] != target_page)
-			page_cache_release(page[i]);
-	}
-
-	kfree(actor);
+	actor = squashfs_page_actor_init(page, actor_pages, 0,
+			release_actor_pages);
+	if (!actor) {
+		release_actor_pages(page, actor_pages, -ENOMEM);
 	kfree(page);
-
-	return 0;
-
-mark_errored:
-	/* Decompression failed, mark pages as errored.  Target_page is
-	 * dealt with by the caller
-	 */
-	for (i = 0; i < pages; i++) {
-		if (page[i] == NULL || page[i] == target_page)
-			continue;
-		flush_dcache_page(page[i]);
-		SetPageError(page[i]);
-		unlock_page(page[i]);
-		page_cache_release(page[i]);
+		return NULL;
 	}
-
-out:
-	kfree(actor);
-	kfree(page);
-	return res;
+	return actor;
 }
 
+int squashfs_readpages_block(struct page *target_page,
+			     struct list_head *readahead_pages,
+			     unsigned int *nr_pages,
+			     struct address_space *mapping,
+			     int page_index, u64 block, int bsize)
 
-static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
-	int pages, struct page **page)
 {
-	struct inode *i = target_page->mapping->host;
-	struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
-						 block, bsize);
-	int bytes = buffer->length, res = buffer->error, n, offset = 0;
-	void *pageaddr;
-
-	if (res) {
-		ERROR("Unable to read page, block %llx, size %x\n", block,
-			bsize);
-		goto out;
-	}
-
-	for (n = 0; n < pages && bytes > 0; n++,
-			bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) {
-		int avail = min_t(int, bytes, PAGE_CACHE_SIZE);
+	struct squashfs_page_actor *actor;
+	struct inode *inode = mapping->host;
+	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+	int start_index, end_index, file_end, actor_pages, res;
+	int mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1;
 
-		if (page[n] == NULL)
-			continue;
+	/*
+	 * If readpage() is called on an uncompressed datablock, we can just
+	 * read the pages instead of fetching the whole block.
+	 * This greatly improves the performance when a process keep doing
+	 * random reads because we only fetch the necessary data.
+	 * The readahead algorithm will take care of doing speculative reads
+	 * if necessary.
+	 * We can't read more than 1 block even if readahead provides use more
+	 * pages because we don't know yet if the next block is compressed or
+	 * not.
+	 */
+	if (bsize && !SQUASHFS_COMPRESSED_BLOCK(bsize)) {
+		u64 block_end = block + msblk->block_size;
 
-		pageaddr = kmap_atomic(page[n]);
-		squashfs_copy_data(pageaddr, buffer, offset, avail);
-		memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail);
-		kunmap_atomic(pageaddr);
-		flush_dcache_page(page[n]);
-		SetPageUptodate(page[n]);
-		unlock_page(page[n]);
-		if (page[n] != target_page)
-			page_cache_release(page[n]);
+		block += (page_index & mask) * PAGE_CACHE_SIZE;
+		actor_pages = (block_end - block) / PAGE_CACHE_SIZE;
+		if (*nr_pages < actor_pages)
+			actor_pages = *nr_pages;
+		start_index = page_index;
+		bsize = min_t(int, bsize, (PAGE_CACHE_SIZE * actor_pages)
+					  | SQUASHFS_COMPRESSED_BIT_BLOCK);
+	} else {
+		file_end = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
+		start_index = page_index & ~mask;
+		end_index = start_index | mask;
+		if (end_index > file_end)
+			end_index = file_end;
+		actor_pages = end_index - start_index + 1;
 	}
 
-out:
-	squashfs_cache_put(buffer);
-	return res;
+	actor = actor_from_page_cache(actor_pages, target_page,
+				      readahead_pages, nr_pages, start_index,
+				      mapping);
+	if (!actor)
+		return -ENOMEM;
+
+	res = squashfs_read_data_async(inode->i_sb, block, bsize, NULL,
+				       actor);
+	return res < 0 ? res : 0;
 }
diff -ruw linux-4.4.115/fs/squashfs/Kconfig linux-4.4.115-fbx/fs/squashfs/Kconfig
--- linux-4.4.115/fs/squashfs/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/squashfs/Kconfig	2019-10-29 09:26:25.385220225 +0100
@@ -26,34 +26,6 @@
 	  If unsure, say N.
 
 choice
-	prompt "File decompression options"
-	depends on SQUASHFS
-	help
-	  Squashfs now supports two options for decompressing file
-	  data.  Traditionally Squashfs has decompressed into an
-	  intermediate buffer and then memcopied it into the page cache.
-	  Squashfs now supports the ability to decompress directly into
-	  the page cache.
-
-	  If unsure, select "Decompress file data into an intermediate buffer"
-
-config SQUASHFS_FILE_CACHE
-	bool "Decompress file data into an intermediate buffer"
-	help
-	  Decompress file data into an intermediate buffer and then
-	  memcopy it into the page cache.
-
-config SQUASHFS_FILE_DIRECT
-	bool "Decompress files directly into the page cache"
-	help
-	  Directly decompress file data into the page cache.
-	  Doing so can significantly improve performance because
-	  it eliminates a memcpy and it also removes the lock contention
-	  on the single buffer.
-
-endchoice
-
-choice
 	prompt "Decompressor parallelisation options"
 	depends on SQUASHFS
 	help
@@ -164,6 +136,20 @@
 	  file systems will be readable without selecting this option.
 
 	  If unsure, say N.
+
+config SQUASHFS_ZSTD
+	bool "Include support for ZSTD compressed file systems"
+	depends on SQUASHFS
+	select ZSTD_DECOMPRESS
+	help
+	  Saying Y here includes support for reading Squashfs file systems
+	  compressed with ZSTD compression.  ZSTD gives better compression than
+	  the default ZLIB compression, while using less CPU.
+
+	  ZSTD is not the standard compression used in Squashfs and so most
+	  file systems will be readable without selecting this option.
+
+	  If unsure, say N.
 
 config SQUASHFS_4K_DEVBLK_SIZE
 	bool "Use 4K device block size?"
diff -ruw linux-4.4.115/fs/squashfs/Makefile linux-4.4.115-fbx/fs/squashfs/Makefile
--- linux-4.4.115/fs/squashfs/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/squashfs/Makefile	2019-10-29 09:26:25.385220225 +0100
@@ -5,8 +5,7 @@
 obj-$(CONFIG_SQUASHFS) += squashfs.o
 squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o
 squashfs-y += namei.o super.o symlink.o decompressor.o
-squashfs-$(CONFIG_SQUASHFS_FILE_CACHE) += file_cache.o
-squashfs-$(CONFIG_SQUASHFS_FILE_DIRECT) += file_direct.o page_actor.o
+squashfs-y += file_direct.o page_actor.o
 squashfs-$(CONFIG_SQUASHFS_DECOMP_SINGLE) += decompressor_single.o
 squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI) += decompressor_multi.o
 squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU) += decompressor_multi_percpu.o
@@ -15,3 +14,4 @@
 squashfs-$(CONFIG_SQUASHFS_LZO) += lzo_wrapper.o
 squashfs-$(CONFIG_SQUASHFS_XZ) += xz_wrapper.o
 squashfs-$(CONFIG_SQUASHFS_ZLIB) += zlib_wrapper.o
+squashfs-$(CONFIG_SQUASHFS_ZSTD) += zstd_wrapper.o
diff -ruw linux-4.4.115/fs/squashfs/page_actor.c linux-4.4.115-fbx/fs/squashfs/page_actor.c
--- linux-4.4.115/fs/squashfs/page_actor.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/squashfs/page_actor.c	2019-10-29 09:26:25.385220225 +0100
@@ -9,92 +9,145 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/pagemap.h>
+#include <linux/buffer_head.h>
 #include "page_actor.h"
 
-/*
- * This file contains implementations of page_actor for decompressing into
- * an intermediate buffer, and for decompressing directly into the
- * page cache.
- *
- * Calling code should avoid sleeping between calls to squashfs_first_page()
- * and squashfs_finish_page().
- */
-
-/* Implementation of page_actor for decompressing into intermediate buffer */
-static void *cache_first_page(struct squashfs_page_actor *actor)
+struct squashfs_page_actor *squashfs_page_actor_init(struct page **page,
+	int pages, int length, void (*release_pages)(struct page **, int, int))
 {
-	actor->next_page = 1;
-	return actor->buffer[0];
-}
+	struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
 
-static void *cache_next_page(struct squashfs_page_actor *actor)
-{
-	if (actor->next_page == actor->pages)
+	if (actor == NULL)
 		return NULL;
 
-	return actor->buffer[actor->next_page++];
+	actor->length = length ? : pages * PAGE_CACHE_SIZE;
+	actor->page = page;
+	actor->pages = pages;
+	actor->next_page = 0;
+	actor->pageaddr = NULL;
+	actor->release_pages = release_pages;
+	return actor;
 }
 
-static void cache_finish_page(struct squashfs_page_actor *actor)
+void squashfs_page_actor_free(struct squashfs_page_actor *actor, int error)
 {
-	/* empty */
+	if (!actor)
+		return;
+
+	if (actor->release_pages)
+		actor->release_pages(actor->page, actor->pages, error);
+	kfree(actor);
 }
 
-struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
-	int pages, int length)
+void squashfs_actor_to_buf(struct squashfs_page_actor *actor, void *buf,
+	int length)
 {
-	struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
+	void *pageaddr;
+	int pos = 0, avail, i;
 
-	if (actor == NULL)
-		return NULL;
+	for (i = 0; i < actor->pages && pos < length; ++i) {
+		avail = min_t(int, length - pos, PAGE_CACHE_SIZE);
+		if (actor->page[i]) {
+			pageaddr = kmap_atomic(actor->page[i]);
+			memcpy(buf + pos, pageaddr, avail);
+			kunmap_atomic(pageaddr);
+		}
+		pos += avail;
+	}
+}
 
-	actor->length = length ? : pages * PAGE_CACHE_SIZE;
-	actor->buffer = buffer;
-	actor->pages = pages;
-	actor->next_page = 0;
-	actor->squashfs_first_page = cache_first_page;
-	actor->squashfs_next_page = cache_next_page;
-	actor->squashfs_finish_page = cache_finish_page;
-	return actor;
+void squashfs_buf_to_actor(void *buf, struct squashfs_page_actor *actor,
+	int length)
+{
+	void *pageaddr;
+	int pos = 0, avail, i;
+
+	for (i = 0; i < actor->pages && pos < length; ++i) {
+		avail = min_t(int, length - pos, PAGE_CACHE_SIZE);
+		if (actor->page[i]) {
+			pageaddr = kmap_atomic(actor->page[i]);
+			memcpy(pageaddr, buf + pos, avail);
+			kunmap_atomic(pageaddr);
+		}
+		pos += avail;
+	}
 }
 
-/* Implementation of page_actor for decompressing directly into page cache. */
-static void *direct_first_page(struct squashfs_page_actor *actor)
+void squashfs_bh_to_actor(struct buffer_head **bh, int nr_buffers,
+	struct squashfs_page_actor *actor, int offset, int length, int blksz)
 {
-	actor->next_page = 1;
-	return actor->pageaddr = kmap_atomic(actor->page[0]);
+	void *kaddr = NULL;
+	int bytes = 0, pgoff = 0, b = 0, p = 0, avail, i;
+
+	while (bytes < length) {
+		if (actor->page[p]) {
+			kaddr = kmap_atomic(actor->page[p]);
+			while (pgoff < PAGE_CACHE_SIZE && bytes < length) {
+				avail = min_t(int, blksz - offset,
+						PAGE_CACHE_SIZE - pgoff);
+				memcpy(kaddr + pgoff, bh[b]->b_data + offset,
+				       avail);
+				pgoff += avail;
+				bytes += avail;
+				offset = (offset + avail) % blksz;
+				if (!offset) {
+					put_bh(bh[b]);
+					++b;
+				}
+			}
+			kunmap_atomic(kaddr);
+			pgoff = 0;
+		} else {
+			for (i = 0; i < PAGE_CACHE_SIZE / blksz; ++i) {
+				if (bh[b])
+					put_bh(bh[b]);
+				++b;
+			}
+			bytes += PAGE_CACHE_SIZE;
+		}
+		++p;
+	}
 }
 
-static void *direct_next_page(struct squashfs_page_actor *actor)
+void squashfs_bh_to_buf(struct buffer_head **bh, int nr_buffers, void *buf,
+	int offset, int length, int blksz)
 {
-	if (actor->pageaddr)
-		kunmap_atomic(actor->pageaddr);
+	int i, avail, bytes = 0;
 
-	return actor->pageaddr = actor->next_page == actor->pages ? NULL :
-		kmap_atomic(actor->page[actor->next_page++]);
+	for (i = 0; i < nr_buffers && bytes < length; ++i) {
+		avail = min_t(int, length - bytes, blksz - offset);
+		if (bh[i]) {
+			memcpy(buf + bytes, bh[i]->b_data + offset, avail);
+			put_bh(bh[i]);
+		}
+		bytes += avail;
+		offset = 0;
+	}
 }
 
-static void direct_finish_page(struct squashfs_page_actor *actor)
+void free_page_array(struct page **page, int nr_pages)
 {
-	if (actor->pageaddr)
-		kunmap_atomic(actor->pageaddr);
+	int i;
+
+	for (i = 0; i < nr_pages; ++i)
+		__free_page(page[i]);
+	kfree(page);
 }
 
-struct squashfs_page_actor *squashfs_page_actor_init_special(struct page **page,
-	int pages, int length)
+struct page **alloc_page_array(int nr_pages, int gfp_mask)
 {
-	struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
+	int i;
+	struct page **page;
 
-	if (actor == NULL)
+	page = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
+	if (!page)
 		return NULL;
-
-	actor->length = length ? : pages * PAGE_CACHE_SIZE;
-	actor->page = page;
-	actor->pages = pages;
-	actor->next_page = 0;
-	actor->pageaddr = NULL;
-	actor->squashfs_first_page = direct_first_page;
-	actor->squashfs_next_page = direct_next_page;
-	actor->squashfs_finish_page = direct_finish_page;
-	return actor;
+	for (i = 0; i < nr_pages; ++i) {
+		page[i] = alloc_page(gfp_mask);
+		if (!page[i]) {
+			free_page_array(page, i);
+			return NULL;
+		}
+	}
+	return page;
 }
diff -ruw linux-4.4.115/fs/squashfs/page_actor.h linux-4.4.115-fbx/fs/squashfs/page_actor.h
--- linux-4.4.115/fs/squashfs/page_actor.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/squashfs/page_actor.h	2019-10-29 09:26:25.385220225 +0100
@@ -5,77 +5,61 @@
  * Phillip Lougher <phillip@squashfs.org.uk>
  *
  * This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level directory.
+ * the COPYING file in the top-level squashfsory.
  */
 
-#ifndef CONFIG_SQUASHFS_FILE_DIRECT
 struct squashfs_page_actor {
-	void	**page;
+	struct page	**page;
+	void	*pageaddr;
 	int	pages;
 	int	length;
 	int	next_page;
+	void	(*release_pages)(struct page **, int, int);
 };
 
-static inline struct squashfs_page_actor *squashfs_page_actor_init(void **page,
-	int pages, int length)
-{
-	struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
-
-	if (actor == NULL)
-		return NULL;
-
-	actor->length = length ? : pages * PAGE_CACHE_SIZE;
-	actor->page = page;
-	actor->pages = pages;
-	actor->next_page = 0;
-	return actor;
-}
-
+extern struct squashfs_page_actor *squashfs_page_actor_init(struct page **,
+	int, int, void (*)(struct page **, int, int));
+extern void squashfs_page_actor_free(struct squashfs_page_actor *, int);
+
+extern void squashfs_actor_to_buf(struct squashfs_page_actor *, void *, int);
+extern void squashfs_buf_to_actor(void *, struct squashfs_page_actor *, int);
+extern void squashfs_bh_to_actor(struct buffer_head **, int,
+	struct squashfs_page_actor *, int, int, int);
+extern void squashfs_bh_to_buf(struct buffer_head **, int, void *, int, int,
+	int);
+
+/*
+ * Calling code should avoid sleeping between calls to squashfs_first_page()
+ * and squashfs_finish_page().
+ */
 static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
 {
 	actor->next_page = 1;
-	return actor->page[0];
+	return actor->pageaddr = actor->page[0] ? kmap_atomic(actor->page[0])
+						: NULL;
 }
 
 static inline void *squashfs_next_page(struct squashfs_page_actor *actor)
 {
-	return actor->next_page == actor->pages ? NULL :
-		actor->page[actor->next_page++];
-}
+	if (!IS_ERR_OR_NULL(actor->pageaddr))
+		kunmap_atomic(actor->pageaddr);
 
-static inline void squashfs_finish_page(struct squashfs_page_actor *actor)
-{
-	/* empty */
-}
-#else
-struct squashfs_page_actor {
-	union {
-		void		**buffer;
-		struct page	**page;
-	};
-	void	*pageaddr;
-	void    *(*squashfs_first_page)(struct squashfs_page_actor *);
-	void    *(*squashfs_next_page)(struct squashfs_page_actor *);
-	void    (*squashfs_finish_page)(struct squashfs_page_actor *);
-	int	pages;
-	int	length;
-	int	next_page;
-};
+	if (actor->next_page == actor->pages)
+		return actor->pageaddr = ERR_PTR(-ENODATA);
 
-extern struct squashfs_page_actor *squashfs_page_actor_init(void **, int, int);
-extern struct squashfs_page_actor *squashfs_page_actor_init_special(struct page
-							 **, int, int);
-static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
-{
-	return actor->squashfs_first_page(actor);
-}
-static inline void *squashfs_next_page(struct squashfs_page_actor *actor)
-{
-	return actor->squashfs_next_page(actor);
+	actor->pageaddr = actor->page[actor->next_page] ?
+	    kmap_atomic(actor->page[actor->next_page]) : NULL;
+	++actor->next_page;
+	return actor->pageaddr;
 }
+
 static inline void squashfs_finish_page(struct squashfs_page_actor *actor)
 {
-	actor->squashfs_finish_page(actor);
+	if (!IS_ERR_OR_NULL(actor->pageaddr))
+		kunmap_atomic(actor->pageaddr);
 }
-#endif
+
+extern struct page **alloc_page_array(int, int);
+extern void free_page_array(struct page **, int);
+
 #endif
diff -ruw linux-4.4.115/fs/squashfs/squashfs_fs.h linux-4.4.115-fbx/fs/squashfs/squashfs_fs.h
--- linux-4.4.115/fs/squashfs/squashfs_fs.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/squashfs/squashfs_fs.h	2019-10-29 09:26:25.385220225 +0100
@@ -241,6 +241,7 @@
 #define LZO_COMPRESSION		3
 #define XZ_COMPRESSION		4
 #define LZ4_COMPRESSION		5
+#define ZSTD_COMPRESSION	6
 
 struct squashfs_super_block {
 	__le32			s_magic;
diff -ruw linux-4.4.115/fs/squashfs/squashfs_fs_sb.h linux-4.4.115-fbx/fs/squashfs/squashfs_fs_sb.h
--- linux-4.4.115/fs/squashfs/squashfs_fs_sb.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/squashfs/squashfs_fs_sb.h	2019-10-29 09:26:25.389220265 +0100
@@ -49,7 +49,7 @@
 	int			num_waiters;
 	wait_queue_head_t	wait_queue;
 	struct squashfs_cache	*cache;
-	void			**data;
+	struct page		**page;
 	struct squashfs_page_actor	*actor;
 };
 
diff -ruw linux-4.4.115/fs/squashfs/squashfs.h linux-4.4.115-fbx/fs/squashfs/squashfs.h
--- linux-4.4.115/fs/squashfs/squashfs.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/squashfs/squashfs.h	2019-10-29 09:26:25.385220225 +0100
@@ -28,8 +28,14 @@
 #define WARNING(s, args...)	pr_warn("SQUASHFS: "s, ## args)
 
 /* block.c */
+extern int squashfs_init_read_wq(void);
+extern void squashfs_destroy_read_wq(void);
 extern int squashfs_read_data(struct super_block *, u64, int, u64 *,
 				struct squashfs_page_actor *);
+extern int squashfs_read_data(struct super_block *, u64, int, u64 *,
+	struct squashfs_page_actor *);
+extern int squashfs_read_data_async(struct super_block *, u64, int, u64 *,
+	struct squashfs_page_actor *);
 
 /* cache.c */
 extern struct squashfs_cache *squashfs_cache_init(char *, int, int);
@@ -70,8 +76,9 @@
 void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int,
 				int);
 
-/* file_xxx.c */
-extern int squashfs_readpage_block(struct page *, u64, int);
+/* file_direct.c */
+extern int squashfs_readpages_block(struct page *, struct list_head *,
+	unsigned int *, struct address_space *, int, u64, int);
 
 /* id.c */
 extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *);
diff -ruw linux-4.4.115/fs/squashfs/super.c linux-4.4.115-fbx/fs/squashfs/super.c
--- linux-4.4.115/fs/squashfs/super.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/squashfs/super.c	2019-10-29 09:26:25.389220265 +0100
@@ -444,9 +444,15 @@
 	if (err)
 		return err;
 
+	if (!squashfs_init_read_wq()) {
+		destroy_inodecache();
+		return -ENOMEM;
+        }
+
 	err = register_filesystem(&squashfs_fs_type);
 	if (err) {
 		destroy_inodecache();
+		squashfs_destroy_read_wq();
 		return err;
 	}
 
@@ -460,6 +466,7 @@
 {
 	unregister_filesystem(&squashfs_fs_type);
 	destroy_inodecache();
+	squashfs_destroy_read_wq();
 }
 
 
diff -ruw linux-4.4.115/fs/squashfs/xz_wrapper.c linux-4.4.115-fbx/fs/squashfs/xz_wrapper.c
--- linux-4.4.115/fs/squashfs/xz_wrapper.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/squashfs/xz_wrapper.c	2019-10-29 09:26:25.389220265 +0100
@@ -55,7 +55,7 @@
 	struct comp_opts *opts;
 	int err = 0, n;
 
-	opts = kmalloc(sizeof(*opts), GFP_KERNEL);
+	opts = kmalloc(sizeof(*opts), GFP_ATOMIC);
 	if (opts == NULL) {
 		err = -ENOMEM;
 		goto out2;
@@ -136,6 +136,7 @@
 	enum xz_ret xz_err;
 	int avail, total = 0, k = 0;
 	struct squashfs_xz *stream = strm;
+	void *buf = NULL;
 
 	xz_dec_reset(stream->state);
 	stream->buf.in_pos = 0;
@@ -156,12 +157,20 @@
 
 		if (stream->buf.out_pos == stream->buf.out_size) {
 			stream->buf.out = squashfs_next_page(output);
-			if (stream->buf.out != NULL) {
+			if (!IS_ERR(stream->buf.out)) {
 				stream->buf.out_pos = 0;
 				total += PAGE_CACHE_SIZE;
 			}
 		}
 
+		if (!stream->buf.out) {
+			if (!buf) {
+				buf = kmalloc(PAGE_CACHE_SIZE, GFP_ATOMIC);
+				if (!buf)
+					goto out;
+			}
+			stream->buf.out = buf;
+		}
 		xz_err = xz_dec_run(stream->state, &stream->buf);
 
 		if (stream->buf.in_pos == stream->buf.in_size && k < b)
@@ -173,11 +182,13 @@
 	if (xz_err != XZ_STREAM_END || k < b)
 		goto out;
 
+	kfree(buf);
 	return total + stream->buf.out_pos;
 
 out:
 	for (; k < b; k++)
 		put_bh(bh[k]);
+	kfree(buf);
 
 	return -EIO;
 }
diff -ruw linux-4.4.115/fs/super.c linux-4.4.115-fbx/fs/super.c
--- linux-4.4.115/fs/super.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/super.c	2019-10-29 09:26:25.389220265 +0100
@@ -703,7 +703,8 @@
 }
 
 /**
- *	do_remount_sb - asks filesystem to change mount options.
+ *	do_remount_sb2 - asks filesystem to change mount options.
+ *	@mnt:   mount we are looking at
  *	@sb:	superblock in question
  *	@flags:	numeric part of options
  *	@data:	the rest of options
@@ -711,7 +712,7 @@
  *
  *	Alters the mount options of a mounted file system.
  */
-int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
+int do_remount_sb2(struct vfsmount *mnt, struct super_block *sb, int flags, void *data, int force)
 {
 	int retval;
 	int remount_ro;
@@ -753,7 +754,16 @@
 		}
 	}
 
-	if (sb->s_op->remount_fs) {
+	if (mnt && sb->s_op->remount_fs2) {
+		retval = sb->s_op->remount_fs2(mnt, sb, &flags, data);
+		if (retval) {
+			if (!force)
+				goto cancel_readonly;
+			/* If forced remount, go ahead despite any errors */
+			WARN(1, "forced remount of a %s fs returned %i\n",
+			     sb->s_type->name, retval);
+		}
+	} else if (sb->s_op->remount_fs) {
 		retval = sb->s_op->remount_fs(sb, &flags, data);
 		if (retval) {
 			if (!force)
@@ -785,12 +795,17 @@
 	return retval;
 }
 
+int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
+{
+	return do_remount_sb2(NULL, sb, flags, data, force);
+}
+
 static void do_emergency_remount(struct work_struct *work)
 {
 	struct super_block *sb, *p = NULL;
 
 	spin_lock(&sb_lock);
-	list_for_each_entry(sb, &super_blocks, s_list) {
+	list_for_each_entry_reverse(sb, &super_blocks, s_list) {
 		if (hlist_unhashed(&sb->s_instances))
 			continue;
 		sb->s_count++;
@@ -953,7 +968,7 @@
 	 * We set the bdi here to the queue backing, file systems can
 	 * overwrite this in ->fill_super()
 	 */
-	s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
+	s->s_bdi = bdev_get_queue(s->s_bdev)->backing_dev_info;
 	return 0;
 }
 
@@ -1104,7 +1119,7 @@
 EXPORT_SYMBOL(mount_single);
 
 struct dentry *
-mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
+mount_fs(struct file_system_type *type, int flags, const char *name, struct vfsmount *mnt, void *data)
 {
 	struct dentry *root;
 	struct super_block *sb;
@@ -1121,6 +1136,9 @@
 			goto out_free_secdata;
 	}
 
+	if (type->mount2)
+		root = type->mount2(mnt, type, flags, name, data);
+	else
 	root = type->mount(type, flags, name, data);
 	if (IS_ERR(root)) {
 		error = PTR_ERR(root);
diff -ruw linux-4.4.115/fs/sync.c linux-4.4.115-fbx/fs/sync.c
--- linux-4.4.115/fs/sync.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/sync.c	2019-01-22 16:16:28.067287736 +0100
@@ -218,6 +218,7 @@
 	if (f.file) {
 		ret = vfs_fsync(f.file, datasync);
 		fdput(f);
+		inc_syscfs(current);
 	}
 	return ret;
 }
diff -ruw linux-4.4.115/fs/timerfd.c linux-4.4.115-fbx/fs/timerfd.c
--- linux-4.4.115/fs/timerfd.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/timerfd.c	2019-10-29 09:26:25.389220265 +0100
@@ -50,7 +50,8 @@
 static inline bool isalarm(struct timerfd_ctx *ctx)
 {
 	return ctx->clockid == CLOCK_REALTIME_ALARM ||
-		ctx->clockid == CLOCK_BOOTTIME_ALARM;
+		ctx->clockid == CLOCK_BOOTTIME_ALARM ||
+		ctx->clockid == CLOCK_POWEROFF_ALARM;
 }
 
 /*
@@ -142,7 +143,8 @@
 {
 	spin_lock(&ctx->cancel_lock);
 	if ((ctx->clockid == CLOCK_REALTIME ||
-	     ctx->clockid == CLOCK_REALTIME_ALARM) &&
+	     ctx->clockid == CLOCK_REALTIME_ALARM ||
+	     ctx->clockid == CLOCK_POWEROFF_ALARM) &&
 	    (flags & TFD_TIMER_ABSTIME) && (flags & TFD_TIMER_CANCEL_ON_SET)) {
 		if (!ctx->might_cancel) {
 			ctx->might_cancel = true;
@@ -174,6 +176,7 @@
 	enum hrtimer_mode htmode;
 	ktime_t texp;
 	int clockid = ctx->clockid;
+	enum alarmtimer_type type;
 
 	htmode = (flags & TFD_TIMER_ABSTIME) ?
 		HRTIMER_MODE_ABS: HRTIMER_MODE_REL;
@@ -184,10 +187,8 @@
 	ctx->tintv = timespec_to_ktime(ktmr->it_interval);
 
 	if (isalarm(ctx)) {
-		alarm_init(&ctx->t.alarm,
-			   ctx->clockid == CLOCK_REALTIME_ALARM ?
-			   ALARM_REALTIME : ALARM_BOOTTIME,
-			   timerfd_alarmproc);
+		type = clock2alarm(ctx->clockid);
+		alarm_init(&ctx->t.alarm, type, timerfd_alarmproc);
 	} else {
 		hrtimer_init(&ctx->t.tmr, clockid, htmode);
 		hrtimer_set_expires(&ctx->t.tmr, texp);
@@ -387,6 +388,7 @@
 {
 	int ufd;
 	struct timerfd_ctx *ctx;
+	enum alarmtimer_type type;
 
 	/* Check the TFD_* constants for consistency.  */
 	BUILD_BUG_ON(TFD_CLOEXEC != O_CLOEXEC);
@@ -397,7 +399,8 @@
 	     clockid != CLOCK_REALTIME &&
 	     clockid != CLOCK_REALTIME_ALARM &&
 	     clockid != CLOCK_BOOTTIME &&
-	     clockid != CLOCK_BOOTTIME_ALARM))
+	     clockid != CLOCK_BOOTTIME_ALARM &&
+	     clockid != CLOCK_POWEROFF_ALARM))
 		return -EINVAL;
 
 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -408,13 +411,12 @@
 	spin_lock_init(&ctx->cancel_lock);
 	ctx->clockid = clockid;
 
-	if (isalarm(ctx))
-		alarm_init(&ctx->t.alarm,
-			   ctx->clockid == CLOCK_REALTIME_ALARM ?
-			   ALARM_REALTIME : ALARM_BOOTTIME,
-			   timerfd_alarmproc);
-	else
+	if (isalarm(ctx)) {
+		type = clock2alarm(ctx->clockid);
+		alarm_init(&ctx->t.alarm, type, timerfd_alarmproc);
+	} else {
 		hrtimer_init(&ctx->t.tmr, clockid, HRTIMER_MODE_ABS);
+	}
 
 	ctx->moffs = ktime_mono_to_real((ktime_t){ .tv64 = 0 });
 
@@ -486,6 +488,10 @@
 	ret = timerfd_setup(ctx, flags, new);
 
 	spin_unlock_irq(&ctx->wqh.lock);
+
+	if (ctx->clockid == CLOCK_POWEROFF_ALARM)
+		set_power_on_alarm();
+
 	fdput(f);
 	return ret;
 }
diff -ruw linux-4.4.115/fs/utimes.c linux-4.4.115-fbx/fs/utimes.c
--- linux-4.4.115/fs/utimes.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/utimes.c	2019-01-22 16:16:28.091287954 +0100
@@ -91,7 +91,7 @@
 	}
 retry_deleg:
 	mutex_lock(&inode->i_mutex);
-	error = notify_change(path->dentry, &newattrs, &delegated_inode);
+	error = notify_change2(path->mnt, path->dentry, &newattrs, &delegated_inode);
 	mutex_unlock(&inode->i_mutex);
 	if (delegated_inode) {
 		error = break_deleg_wait(&delegated_inode);
diff -ruw linux-4.4.115/fs/xattr.c linux-4.4.115-fbx/fs/xattr.c
--- linux-4.4.115/fs/xattr.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/fs/xattr.c	2019-10-29 09:26:25.397220343 +0100
@@ -70,7 +70,7 @@
 			return -EPERM;
 	}
 
-	return inode_permission(inode, mask);
+	return inode_permission2(ERR_PTR(-EOPNOTSUPP), inode, mask);
 }
 
 /**
diff -ruw linux-4.4.115/include/asm-generic/dma-mapping-common.h linux-4.4.115-fbx/include/asm-generic/dma-mapping-common.h
--- linux-4.4.115/include/asm-generic/dma-mapping-common.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/asm-generic/dma-mapping-common.h	2019-01-22 16:16:28.147288461 +0100
@@ -13,7 +13,7 @@
 					      enum dma_data_direction dir,
 					      struct dma_attrs *attrs)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 	dma_addr_t addr;
 
 	kmemcheck_mark_initialized(ptr, size);
@@ -32,7 +32,7 @@
 					  enum dma_data_direction dir,
 					  struct dma_attrs *attrs)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 
 	BUG_ON(!valid_dma_direction(dir));
 	if (ops->unmap_page)
@@ -48,7 +48,7 @@
 				   int nents, enum dma_data_direction dir,
 				   struct dma_attrs *attrs)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 	int i, ents;
 	struct scatterlist *s;
 
@@ -66,7 +66,7 @@
 				      int nents, enum dma_data_direction dir,
 				      struct dma_attrs *attrs)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 
 	BUG_ON(!valid_dma_direction(dir));
 	debug_dma_unmap_sg(dev, sg, nents, dir);
@@ -78,7 +78,7 @@
 				      size_t offset, size_t size,
 				      enum dma_data_direction dir)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 	dma_addr_t addr;
 
 	kmemcheck_mark_initialized(page_address(page) + offset, size);
@@ -92,7 +92,7 @@
 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
 				  size_t size, enum dma_data_direction dir)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 
 	BUG_ON(!valid_dma_direction(dir));
 	if (ops->unmap_page)
@@ -104,7 +104,7 @@
 					   size_t size,
 					   enum dma_data_direction dir)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 
 	BUG_ON(!valid_dma_direction(dir));
 	if (ops->sync_single_for_cpu)
@@ -116,7 +116,7 @@
 					      dma_addr_t addr, size_t size,
 					      enum dma_data_direction dir)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 
 	BUG_ON(!valid_dma_direction(dir));
 	if (ops->sync_single_for_device)
@@ -156,7 +156,7 @@
 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
 		    int nelems, enum dma_data_direction dir)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 
 	BUG_ON(!valid_dma_direction(dir));
 	if (ops->sync_sg_for_cpu)
@@ -168,7 +168,7 @@
 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 		       int nelems, enum dma_data_direction dir)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 
 	BUG_ON(!valid_dma_direction(dir));
 	if (ops->sync_sg_for_device)
@@ -192,7 +192,8 @@
 void *dma_common_pages_remap(struct page **pages, size_t size,
 			unsigned long vm_flags, pgprot_t prot,
 			const void *caller);
-void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
+void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags,
+				bool no_warn);
 
 /**
  * dma_mmap_attrs - map a coherent DMA allocation into user space
@@ -211,7 +212,7 @@
 dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
 	       dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 	BUG_ON(!ops);
 	if (ops->mmap)
 		return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
@@ -228,7 +229,7 @@
 dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
 		      dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 	BUG_ON(!ops);
 	if (ops->get_sgtable)
 		return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
@@ -246,7 +247,7 @@
 				       dma_addr_t *dma_handle, gfp_t flag,
 				       struct dma_attrs *attrs)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 	void *cpu_addr;
 
 	BUG_ON(!ops);
@@ -268,7 +269,7 @@
 				     void *cpu_addr, dma_addr_t dma_handle,
 				     struct dma_attrs *attrs)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 
 	BUG_ON(!ops);
 	WARN_ON(irqs_disabled());
@@ -327,10 +328,35 @@
 #endif
 }
 
+static inline void *dma_alloc_nonconsistent(struct device *dev, size_t size,
+					dma_addr_t *dma_handle, gfp_t flag)
+{
+	DEFINE_DMA_ATTRS(attrs);
+	dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+	return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
+}
+
+static inline void dma_free_nonconsistent(struct device *dev, size_t size,
+					void *cpu_addr, dma_addr_t dma_handle)
+{
+	DEFINE_DMA_ATTRS(attrs);
+	dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+	return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
+}
+
+static inline int dma_mmap_nonconsistent(struct device *dev,
+		struct vm_area_struct *vma, void *cpu_addr,
+		dma_addr_t dma_addr, size_t size)
+{
+	DEFINE_DMA_ATTRS(attrs);
+	dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+	return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
+}
+
 #ifndef HAVE_ARCH_DMA_SUPPORTED
 static inline int dma_supported(struct device *dev, u64 mask)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 
 	if (!ops)
 		return 0;
@@ -343,7 +369,7 @@
 #ifndef HAVE_ARCH_DMA_SET_MASK
 static inline int dma_set_mask(struct device *dev, u64 mask)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 
 	if (ops->set_dma_mask)
 		return ops->set_dma_mask(dev, mask);
diff -ruw linux-4.4.115/include/asm-generic/fixmap.h linux-4.4.115-fbx/include/asm-generic/fixmap.h
--- linux-4.4.115/include/asm-generic/fixmap.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/asm-generic/fixmap.h	2019-01-22 16:16:28.151288497 +0100
@@ -72,10 +72,10 @@
 /* Return a pointer with offset calculated */
 #define __set_fixmap_offset(idx, phys, flags)		      \
 ({							      \
-	unsigned long addr;				      \
+	unsigned long ________addr;					\
 	__set_fixmap(idx, phys, flags);			      \
-	addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \
-	addr;						      \
+	________addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1));	\
+	________addr;							\
 })
 
 #define set_fixmap_offset(idx, phys) \
diff -ruw linux-4.4.115/include/asm-generic/preempt.h linux-4.4.115-fbx/include/asm-generic/preempt.h
--- linux-4.4.115/include/asm-generic/preempt.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/asm-generic/preempt.h	2019-01-22 16:16:28.151288497 +0100
@@ -7,10 +7,10 @@
 
 static __always_inline int preempt_count(void)
 {
-	return current_thread_info()->preempt_count;
+	return READ_ONCE(current_thread_info()->preempt_count);
 }
 
-static __always_inline int *preempt_count_ptr(void)
+static __always_inline volatile int *preempt_count_ptr(void)
 {
 	return &current_thread_info()->preempt_count;
 }
diff -ruw linux-4.4.115/include/asm-generic/sections.h linux-4.4.115-fbx/include/asm-generic/sections.h
--- linux-4.4.115/include/asm-generic/sections.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/asm-generic/sections.h	2019-01-22 16:16:28.155288533 +0100
@@ -24,6 +24,8 @@
  *	__kprobes_text_start, __kprobes_text_end
  *	__entry_text_start, __entry_text_end
  *	__ctors_start, __ctors_end
+ *	__irqentry_text_start, __irqentry_text_end
+ *	__softirqentry_text_start, __softirqentry_text_end
  */
 extern char _text[], _stext[], _etext[];
 extern char _data[], _sdata[], _edata[];
@@ -35,6 +37,8 @@
 extern char __kprobes_text_start[], __kprobes_text_end[];
 extern char __entry_text_start[], __entry_text_end[];
 extern char __start_rodata[], __end_rodata[];
+extern char __irqentry_text_start[], __irqentry_text_end[];
+extern char __softirqentry_text_start[], __softirqentry_text_end[];
 
 /* Start and end of .ctors section - used for constructor calls. */
 extern char __ctors_start[], __ctors_end[];
diff -ruw linux-4.4.115/include/asm-generic/vmlinux.lds.h linux-4.4.115-fbx/include/asm-generic/vmlinux.lds.h
--- linux-4.4.115/include/asm-generic/vmlinux.lds.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/asm-generic/vmlinux.lds.h	2019-01-22 16:16:28.155288533 +0100
@@ -157,7 +157,7 @@
 #define EARLYCON_TABLE() STRUCT_ALIGN();			\
 			 VMLINUX_SYMBOL(__earlycon_table) = .;	\
 			 *(__earlycon_table)			\
-			 *(__earlycon_table_end)
+			 VMLINUX_SYMBOL(__earlycon_table_end) = .;
 #else
 #define EARLYCON_TABLE()
 #endif
@@ -179,7 +179,6 @@
 #define RESERVEDMEM_OF_TABLES()	OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
 #define CPU_METHOD_OF_TABLES()	OF_TABLE(CONFIG_SMP, cpu_method)
 #define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method)
-#define EARLYCON_OF_TABLES()	OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon)
 
 #ifdef CONFIG_ACPI
 #define ACPI_PROBE_TABLE(name)						\
@@ -249,6 +248,14 @@
 	*(.data..init_task)
 
 /*
+ * Allow architectures to handle ro_after_init data on their
+ * own by defining an empty RO_AFTER_INIT_DATA.
+ */
+#ifndef RO_AFTER_INIT_DATA
+#define RO_AFTER_INIT_DATA *(.data..ro_after_init)
+#endif
+
+/*
  * Read only Data
  */
 #define RO_DATA_SECTION(align)						\
@@ -256,6 +263,7 @@
 	.rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {		\
 		VMLINUX_SYMBOL(__start_rodata) = .;			\
 		*(.rodata) *(.rodata.*)					\
+		RO_AFTER_INIT_DATA	/* Read only after init */	\
 		*(__vermagic)		/* Kernel version magic */	\
 		. = ALIGN(8);						\
 		VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .;		\
@@ -456,15 +464,17 @@
 		*(.entry.text)						\
 		VMLINUX_SYMBOL(__entry_text_end) = .;
 
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 #define IRQENTRY_TEXT							\
 		ALIGN_FUNCTION();					\
 		VMLINUX_SYMBOL(__irqentry_text_start) = .;		\
 		*(.irqentry.text)					\
 		VMLINUX_SYMBOL(__irqentry_text_end) = .;
-#else
-#define IRQENTRY_TEXT
-#endif
+
+#define SOFTIRQENTRY_TEXT						\
+		ALIGN_FUNCTION();					\
+		VMLINUX_SYMBOL(__softirqentry_text_start) = .;		\
+		*(.softirqentry.text)					\
+		VMLINUX_SYMBOL(__softirqentry_text_end) = .;
 
 /* Section used for early init (in .S files) */
 #define HEAD_TEXT  *(.head.text)
@@ -526,8 +536,7 @@
 	IRQCHIP_OF_MATCH_TABLE()					\
 	ACPI_PROBE_TABLE(irqchip)					\
 	ACPI_PROBE_TABLE(clksrc)					\
-	EARLYCON_TABLE()						\
-	EARLYCON_OF_TABLES()
+	EARLYCON_TABLE()
 
 #define INIT_TEXT							\
 	*(.init.text)							\
diff -ruw linux-4.4.115/include/clocksource/arm_arch_timer.h linux-4.4.115-fbx/include/clocksource/arm_arch_timer.h
--- linux-4.4.115/include/clocksource/arm_arch_timer.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/clocksource/arm_arch_timer.h	2019-01-22 16:16:28.155288533 +0100
@@ -23,6 +23,12 @@
 #define ARCH_TIMER_CTRL_IT_MASK		(1 << 1)
 #define ARCH_TIMER_CTRL_IT_STAT		(1 << 2)
 
+#define CNTHCTL_EL1PCTEN		(1 << 0)
+#define CNTHCTL_EL1PCEN			(1 << 1)
+#define CNTHCTL_EVNTEN			(1 << 2)
+#define CNTHCTL_EVNTDIR			(1 << 3)
+#define CNTHCTL_EVNTI			(0xF << 4)
+
 enum arch_timer_reg {
 	ARCH_TIMER_REG_CTRL,
 	ARCH_TIMER_REG_TVAL,
diff -ruw linux-4.4.115/include/crypto/algapi.h linux-4.4.115-fbx/include/crypto/algapi.h
--- linux-4.4.115/include/crypto/algapi.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/crypto/algapi.h	2019-01-22 16:16:28.155288533 +0100
@@ -202,6 +202,9 @@
 				   struct blkcipher_walk *walk,
 				   struct crypto_aead *tfm,
 				   unsigned int blocksize);
+int blkcipher_ablkcipher_walk_virt(struct blkcipher_desc *desc,
+				   struct blkcipher_walk *walk,
+				   struct crypto_ablkcipher *tfm);
 
 int ablkcipher_walk_done(struct ablkcipher_request *req,
 			 struct ablkcipher_walk *walk, int err);
diff -ruw linux-4.4.115/include/crypto/gf128mul.h linux-4.4.115-fbx/include/crypto/gf128mul.h
--- linux-4.4.115/include/crypto/gf128mul.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/crypto/gf128mul.h	2019-01-22 16:16:28.159288569 +0100
@@ -43,7 +43,7 @@
  ---------------------------------------------------------------------------
  Issue Date: 31/01/2006
 
- An implementation of field multiplication in Galois Field GF(128)
+ An implementation of field multiplication in Galois Field GF(2^128)
 */
 
 #ifndef _CRYPTO_GF128MUL_H
@@ -65,7 +65,7 @@
  * are left and the lsb's are right. char b[16] is an array and b[0] is
  * the first octet.
  *
- * 80000000 00000000 00000000 00000000 .... 00000000 00000000 00000000
+ * 10000000 00000000 00000000 00000000 .... 00000000 00000000 00000000
  *   b[0]     b[1]     b[2]     b[3]          b[13]    b[14]    b[15]
  *
  * Every bit is a coefficient of some power of X. We can store the bits
@@ -155,42 +155,45 @@
     64...71 72...79 80...87 88...95  96..103 104.111 112.119 120.127
 */
 
-/*	A slow generic version of gf_mul, implemented for lle and bbe
- * 	It multiplies a and b and puts the result in a */
+/*  A slow generic version of gf_mul, implemented for lle, bbe, and ble.
+ *  It multiplies a and b and puts the result in a
+ */
 void gf128mul_lle(be128 *a, const be128 *b);
-
 void gf128mul_bbe(be128 *a, const be128 *b);
+void gf128mul_ble(be128 *a, const be128 *b);
 
-/* multiply by x in ble format, needed by XTS */
+/* multiply by x in ble format, needed by XTS and HEH */
 void gf128mul_x_ble(be128 *a, const be128 *b);
 
 /* 4k table optimization */
-
 struct gf128mul_4k {
 	be128 t[256];
 };
 
 struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g);
 struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g);
+struct gf128mul_4k *gf128mul_init_4k_ble(const be128 *g);
 void gf128mul_4k_lle(be128 *a, struct gf128mul_4k *t);
 void gf128mul_4k_bbe(be128 *a, struct gf128mul_4k *t);
+void gf128mul_4k_ble(be128 *a, struct gf128mul_4k *t);
 
 static inline void gf128mul_free_4k(struct gf128mul_4k *t)
 {
-	kfree(t);
+	kzfree(t);
 }
 
 
-/* 64k table optimization, implemented for lle and bbe */
+/* 64k table optimization, implemented for lle, ble, and bbe */
 
 struct gf128mul_64k {
 	struct gf128mul_4k *t[16];
 };
 
-/* first initialize with the constant factor with which you
- * want to multiply and then call gf128_64k_lle with the other
- * factor in the first argument, the table in the second and a
- * scratch register in the third. Afterwards *a = *r. */
+/* First initialize with the constant factor with which you
+ * want to multiply and then call gf128mul_64k_bbe with the other
+ * factor in the first argument, and the table in the second.
+ * Afterwards, the result is stored in *a.
+ */
 struct gf128mul_64k *gf128mul_init_64k_lle(const be128 *g);
 struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g);
 void gf128mul_free_64k(struct gf128mul_64k *t);
diff -ruw linux-4.4.115/include/crypto/internal/hash.h linux-4.4.115-fbx/include/crypto/internal/hash.h
--- linux-4.4.115/include/crypto/internal/hash.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/crypto/internal/hash.h	2019-10-29 09:26:25.409220460 +0100
@@ -110,6 +110,8 @@
 			    struct shash_instance *inst);
 void shash_free_instance(struct crypto_instance *inst);
 
+int crypto_grab_shash(struct crypto_shash_spawn *spawn,
+		      const char *name, u32 type, u32 mask);
 int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn,
 			    struct shash_alg *alg,
 			    struct crypto_instance *inst);
@@ -119,6 +121,12 @@
 	crypto_drop_spawn(&spawn->base);
 }
 
+static inline struct shash_alg *crypto_spawn_shash_alg(
+	struct crypto_shash_spawn *spawn)
+{
+	return container_of(spawn->base.alg, struct shash_alg, base);
+}
+
 struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask);
 
 int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc);
diff -ruw linux-4.4.115/include/drm/drm_crtc.h linux-4.4.115-fbx/include/drm/drm_crtc.h
--- linux-4.4.115/include/drm/drm_crtc.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/drm/drm_crtc.h	2019-01-22 16:16:28.163288606 +0100
@@ -61,7 +61,7 @@
 	struct drm_object_properties *properties;
 };
 
-#define DRM_OBJECT_MAX_PROPERTY 24
+#define DRM_OBJECT_MAX_PROPERTY 64
 struct drm_object_properties {
 	int count, atomic_count;
 	/* NOTE: if we ever start dynamically destroying properties (ie.
@@ -644,6 +644,23 @@
  * @audio_latency: audio latency info from ELD, if found
  * @null_edid_counter: track sinks that give us all zeros for the EDID
  * @bad_edid_counter: track sinks that give us an EDID with invalid checksum
+ * @max_tmds_char: indicates the maximum TMDS Character Rate supported
+ * @scdc_present: when set the sink supports SCDC functionality
+ * @rr_capable: when set the sink is capable of initiating an SCDC read request
+ * @supports_scramble: when set the sink supports less than 340Mcsc scrambling
+ * @flags_3d: 3D view(s) supported by the sink, see drm_edid.h (DRM_EDID_3D_*)
+ * @pt_scan_info: PT scan info obtained from the VCDB of EDID
+ * @it_scan_info: IT scan info obtained from the VCDB of EDID
+ * @ce_scan_info: CE scan info obtained from the VCDB of EDID
+ * @color_enc_fmt: Colorimetry encoding formats of sink
+ * @hdr_eotf: Electro optical transfer function obtained from HDR block
+ * @hdr_metadata_type_one: Metadata type one obtained from HDR block
+ * @hdr_max_luminance: desired max luminance obtained from HDR block
+ * @hdr_avg_luminance: desired avg luminance obtained from HDR block
+ * @hdr_min_luminance: desired min luminance obtained from HDR block
+ * @hdr_supported: does the sink support HDR content
+ * @rgb_qs: does the sink declare RGB selectable quantization range
+ * @yuv_qs: does the sink declare YCC selectable quantization range
  * @edid_corrupt: indicates whether the last read EDID was corrupt
  * @debugfs_entry: debugfs directory for this connector
  * @state: current atomic state for this connector
@@ -717,6 +734,24 @@
 	int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
 	unsigned bad_edid_counter;
 
+	/* EDID bits HDMI 2.0 */
+	int max_tmds_char;	/* in Mcsc */
+	bool scdc_present;
+	bool rr_capable;
+	bool supports_scramble;
+	int flags_3d;
+	u8 pt_scan_info;
+	u8 it_scan_info;
+	u8 ce_scan_info;
+	u8 color_enc_fmt;
+	u32 hdr_eotf;
+	bool hdr_metadata_type_one;
+	u32 hdr_max_luminance;
+	u32 hdr_avg_luminance;
+	u32 hdr_min_luminance;
+	bool hdr_supported;
+	bool rgb_qs;
+	bool yuv_qs;
 	/* Flag for raw EDID header corruption - used in Displayport
 	 * compliance testing - * Displayport Link CTS Core 1.2 rev1.1 4.2.2.6
 	 */
diff -ruw linux-4.4.115/include/drm/drm_displayid.h linux-4.4.115-fbx/include/drm/drm_displayid.h
--- linux-4.4.115/include/drm/drm_displayid.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/drm/drm_displayid.h	2019-01-22 16:16:28.163288606 +0100
@@ -73,4 +73,21 @@
 	u8 topology_id[8];
 } __packed;
 
+struct displayid_detailed_timings_1 {
+	u8 pixel_clock[3];
+	u8 flags;
+	u8 hactive[2];
+	u8 hblank[2];
+	u8 hsync[2];
+	u8 hsw[2];
+	u8 vactive[2];
+	u8 vblank[2];
+	u8 vsync[2];
+	u8 vsw[2];
+} __packed;
+
+struct displayid_detailed_timing_block {
+	struct displayid_block base;
+	struct displayid_detailed_timings_1 timings[0];
+};
 #endif
diff -ruw linux-4.4.115/include/drm/drm_edid.h linux-4.4.115-fbx/include/drm/drm_edid.h
--- linux-4.4.115/include/drm/drm_edid.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/drm/drm_edid.h	2019-10-29 09:26:25.413220499 +0100
@@ -209,6 +209,20 @@
 #define DRM_EDID_HDMI_DC_30               (1 << 4)
 #define DRM_EDID_HDMI_DC_Y444             (1 << 3)
 
+/* YCBCR 420 deep color modes */
+#define DRM_EDID_YCBCR420_DC_48  (1 << 2)
+#define DRM_EDID_YCBCR420_DC_36  (1 << 1)
+#define DRM_EDID_YCBCR420_DC_30  (1 << 0)
+
+#define DRM_EDID_COLORIMETRY_xvYCC_601	(1 << 0)
+#define DRM_EDID_COLORIMETRY_xvYCC_709	(1 << 1)
+#define DRM_EDID_COLORIMETRY_sYCC_601	(1 << 2)
+#define DRM_EDID_COLORIMETRY_ADBYCC_601	(1 << 3)
+#define DRM_EDID_COLORIMETRY_ADB_RGB	(1 << 4)
+#define DRM_EDID_COLORIMETRY_BT2020_CYCC	(1 << 5)
+#define DRM_EDID_COLORIMETRY_BT2020_YCC	(1 << 6)
+#define DRM_EDID_COLORIMETRY_BT2020_RGB	(1 << 7)
+
 /* ELD Header Block */
 #define DRM_ELD_HEADER_BLOCK_SIZE	4
 
@@ -266,6 +280,11 @@
 
 #define DRM_ELD_CEA_SAD(mnl, sad)	(20 + (mnl) + 3 * (sad))
 
+/* HDMI 2.0 */
+#define DRM_EDID_3D_INDEPENDENT_VIEW	(1 << 2)
+#define DRM_EDID_3D_DUAL_VIEW		(1 << 1)
+#define DRM_EDID_3D_OSD_DISPARITY	(1 << 0)
+
 struct edid {
 	u8 header[8];
 	/* Vendor & product info */
diff -ruw linux-4.4.115/include/drm/drm_mm.h linux-4.4.115-fbx/include/drm/drm_mm.h
--- linux-4.4.115/include/drm/drm_mm.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/drm/drm_mm.h	2019-01-22 16:16:28.163288606 +0100
@@ -37,6 +37,7 @@
  * Generic range manager structs
  */
 #include <linux/bug.h>
+#include <linux/rbtree.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/spinlock.h>
@@ -48,6 +49,7 @@
 	DRM_MM_SEARCH_DEFAULT =		0,
 	DRM_MM_SEARCH_BEST =		1 << 0,
 	DRM_MM_SEARCH_BELOW =		1 << 1,
+	DRM_MM_SEARCH_BOTTOM_UP =	1 << 2,
 };
 
 enum drm_mm_allocator_flags {
@@ -61,6 +63,8 @@
 struct drm_mm_node {
 	struct list_head node_list;
 	struct list_head hole_stack;
+	struct rb_node rb;
+	struct rb_node hole_node;
 	unsigned hole_follows : 1;
 	unsigned scanned_block : 1;
 	unsigned scanned_prev_free : 1;
@@ -70,6 +74,7 @@
 	unsigned long color;
 	u64 start;
 	u64 size;
+	u64 __subtree_last;
 	struct drm_mm *mm;
 };
 
@@ -79,6 +84,10 @@
 	/* head_node.node_list is the list of all memory nodes, ordered
 	 * according to the (increasing) start address of the memory node. */
 	struct drm_mm_node head_node;
+	/* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
+	struct rb_root interval_tree;
+	struct rb_root holes_tree;
+
 	unsigned int scan_check_range : 1;
 	unsigned scan_alignment;
 	unsigned long scan_color;
@@ -301,6 +310,12 @@
 void drm_mm_takedown(struct drm_mm *mm);
 bool drm_mm_clean(struct drm_mm *mm);
 
+struct drm_mm_node *
+drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last);
+
+struct drm_mm_node *
+drm_mm_interval_next(struct drm_mm_node *node, u64 start, u64 last);
+
 void drm_mm_init_scan(struct drm_mm *mm,
 		      u64 size,
 		      unsigned alignment,
diff -ruw linux-4.4.115/include/dt-bindings/input/linux-event-codes.h linux-4.4.115-fbx/include/dt-bindings/input/linux-event-codes.h
--- linux-4.4.115/include/dt-bindings/input/linux-event-codes.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/dt-bindings/input/linux-event-codes.h	2019-10-29 09:26:25.545221791 +0100
@@ -611,6 +611,37 @@
 #define KEY_KBDINPUTASSIST_ACCEPT		0x264
 #define KEY_KBDINPUTASSIST_CANCEL		0x265
 
+/* Diagonal movement keys */
+#define KEY_RIGHT_UP			0x266
+#define KEY_RIGHT_DOWN			0x267
+#define KEY_LEFT_UP			0x268
+#define KEY_LEFT_DOWN			0x269
+
+#define KEY_ROOT_MENU			0x26a /* Show Device's Root Menu */
+/* Show Top Menu of the Media (e.g. DVD) */
+#define KEY_MEDIA_TOP_MENU		0x26b
+#define KEY_NUMERIC_11			0x26c
+#define KEY_NUMERIC_12			0x26d
+/*
+ * Toggle Audio Description: refers to an audio service that helps blind and
+ * visually impaired consumers understand the action in a program. Note: in
+ * some countries this is referred to as "Video Description".
+ */
+#define KEY_AUDIO_DESC			0x26e
+#define KEY_3D_MODE			0x26f
+#define KEY_NEXT_FAVORITE		0x270
+#define KEY_STOP_RECORD			0x271
+#define KEY_PAUSE_RECORD		0x272
+#define KEY_VOD				0x273 /* Video on Demand */
+#define KEY_UNMUTE			0x274
+#define KEY_FASTREVERSE			0x275
+#define KEY_SLOWREVERSE			0x276
+/*
+ * Control a data application associated with the currently viewed channel,
+ * e.g. teletext or data broadcast application (MHEG, MHP, HbbTV, etc.)
+ */
+#define KEY_DATA			0x275
+
 #define BTN_TRIGGER_HAPPY		0x2c0
 #define BTN_TRIGGER_HAPPY1		0x2c0
 #define BTN_TRIGGER_HAPPY2		0x2c1
@@ -653,6 +684,18 @@
 #define BTN_TRIGGER_HAPPY39		0x2e6
 #define BTN_TRIGGER_HAPPY40		0x2e7
 
+#define KEY_APP_TV			0x2f1
+#define KEY_APP_REPLAY			0x2f2
+#define KEY_APP_VIDEOCLUB		0x2f3
+#define KEY_APP_WHATSON			0x2f4
+#define KEY_APP_RECORDS			0x2f5
+#define KEY_APP_MEDIA			0x2f6
+#define KEY_APP_YOUTUBE			0x2f7
+#define KEY_APP_RADIOS			0x2f8
+#define KEY_APP_CANALVOD		0x2f9
+#define KEY_APP_PIP			0x2fa
+#define KEY_APP_NETFLIX			0x2fb
+
 /* We avoid low common keys in module aliases so they don't get huge. */
 #define KEY_MIN_INTERESTING	KEY_MUTE
 #define KEY_MAX			0x2ff
@@ -749,7 +792,11 @@
 #define SW_ROTATE_LOCK		0x0c  /* set = rotate locked/disabled */
 #define SW_LINEIN_INSERT	0x0d  /* set = inserted */
 #define SW_MUTE_DEVICE		0x0e  /* set = device disabled */
-#define SW_MAX			0x0f
+#define SW_HPHL_OVERCURRENT	0x0f  /* set = over current on left hph */
+#define SW_HPHR_OVERCURRENT	0x10  /* set = over current on right hph */
+#define SW_MICROPHONE2_INSERT   0x11  /* set = inserted */
+#define SW_UNSUPPORT_INSERT	0x12  /* set = unsupported device inserted */
+#define SW_MAX			0x20
 #define SW_CNT			(SW_MAX+1)
 
 /*
diff -ruw linux-4.4.115/include/kvm/arm_vgic.h linux-4.4.115-fbx/include/kvm/arm_vgic.h
--- linux-4.4.115/include/kvm/arm_vgic.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/kvm/arm_vgic.h	2019-01-22 16:16:28.187288823 +0100
@@ -279,6 +279,12 @@
 	u32		vgic_lr[VGIC_V2_MAX_LRS];
 };
 
+/*
+ * LRs are stored in reverse order in memory. make sure we index them
+ * correctly.
+ */
+#define VGIC_V3_LR_INDEX(lr)		(VGIC_V3_MAX_LRS - 1 - lr)
+
 struct vgic_v3_cpu_if {
 #ifdef CONFIG_KVM_ARM_VGIC_V3
 	u32		vgic_hcr;
diff -ruw linux-4.4.115/include/linux/alarmtimer.h linux-4.4.115-fbx/include/linux/alarmtimer.h
--- linux-4.4.115/include/linux/alarmtimer.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/alarmtimer.h	2019-10-29 09:26:25.413220499 +0100
@@ -5,10 +5,12 @@
 #include <linux/hrtimer.h>
 #include <linux/timerqueue.h>
 #include <linux/rtc.h>
+#include <linux/types.h>
 
 enum alarmtimer_type {
 	ALARM_REALTIME,
 	ALARM_BOOTTIME,
+	ALARM_POWEROFF_REALTIME,
 
 	ALARM_NUMTYPE,
 };
@@ -48,6 +50,9 @@
 void alarm_restart(struct alarm *alarm);
 int alarm_try_to_cancel(struct alarm *alarm);
 int alarm_cancel(struct alarm *alarm);
+void set_power_on_alarm(void);
+void power_on_alarm_init(void);
+enum alarmtimer_type clock2alarm(clockid_t clockid);
 
 u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval);
 u64 alarm_forward_now(struct alarm *alarm, ktime_t interval);
@@ -55,5 +60,8 @@
 
 /* Provide way to access the rtc device being used by alarmtimers */
 struct rtc_device *alarmtimer_get_rtcdev(void);
+#ifdef CONFIG_RTC_DRV_QPNP
+extern bool poweron_alarm;
+#endif
 
 #endif
diff -ruw linux-4.4.115/include/linux/amba/bus.h linux-4.4.115-fbx/include/linux/amba/bus.h
--- linux-4.4.115/include/linux/amba/bus.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/amba/bus.h	2019-01-22 16:16:28.187288823 +0100
@@ -163,4 +163,13 @@
 #define module_amba_driver(__amba_drv) \
 	module_driver(__amba_drv, amba_driver_register, amba_driver_unregister)
 
+/*
+ * builtin_amba_driver() - Helper macro for drivers that don't do anything
+ * special in driver initcall.  This eliminates a lot of boilerplate.  Each
+ * driver may only use this macro once, and calling it replaces the instance
+ * device_initcall().
+ */
+#define builtin_amba_driver(__amba_drv) \
+	builtin_driver(__amba_drv, amba_driver_register)
+
 #endif
diff -ruw linux-4.4.115/include/linux/backing-dev-defs.h linux-4.4.115-fbx/include/linux/backing-dev-defs.h
--- linux-4.4.115/include/linux/backing-dev-defs.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/backing-dev-defs.h	2019-10-29 09:26:25.413220499 +0100
@@ -10,6 +10,7 @@
 #include <linux/flex_proportions.h>
 #include <linux/timer.h>
 #include <linux/workqueue.h>
+#include <linux/kref.h>
 
 struct page;
 struct device;
@@ -141,6 +142,7 @@
 	void *congested_data;	/* Pointer to aux data for congested func */
 
 	char *name;
+	struct kref refcnt;	/* Reference counter for the structure */
 
 	unsigned int min_ratio;
 	unsigned int max_ratio, max_prop_frac;
diff -ruw linux-4.4.115/include/linux/backing-dev.h linux-4.4.115-fbx/include/linux/backing-dev.h
--- linux-4.4.115/include/linux/backing-dev.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/backing-dev.h	2019-10-29 09:26:25.413220499 +0100
@@ -18,7 +18,14 @@
 #include <linux/slab.h>
 
 int __must_check bdi_init(struct backing_dev_info *bdi);
-void bdi_exit(struct backing_dev_info *bdi);
+
+static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
+{
+	kref_get(&bdi->refcnt);
+	return bdi;
+}
+
+void bdi_put(struct backing_dev_info *bdi);
 
 __printf(3, 4)
 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
@@ -29,6 +36,7 @@
 
 int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
 void bdi_destroy(struct backing_dev_info *bdi);
+struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id);
 
 void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
 			bool range_cyclic, enum wb_reason reason);
diff -ruw linux-4.4.115/include/linux/balloon_compaction.h linux-4.4.115-fbx/include/linux/balloon_compaction.h
--- linux-4.4.115/include/linux/balloon_compaction.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/balloon_compaction.h	2019-01-22 16:16:28.191288859 +0100
@@ -48,6 +48,7 @@
 #include <linux/migrate.h>
 #include <linux/gfp.h>
 #include <linux/err.h>
+#include <linux/fs.h>
 
 /*
  * Balloon device information descriptor.
@@ -62,6 +63,7 @@
 	struct list_head pages;		/* Pages enqueued & handled to Host */
 	int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
 			struct page *page, enum migrate_mode mode);
+	struct inode *inode;
 };
 
 extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info);
@@ -73,45 +75,19 @@
 	spin_lock_init(&balloon->pages_lock);
 	INIT_LIST_HEAD(&balloon->pages);
 	balloon->migratepage = NULL;
+	balloon->inode = NULL;
 }
 
 #ifdef CONFIG_BALLOON_COMPACTION
-extern bool balloon_page_isolate(struct page *page);
+extern const struct address_space_operations balloon_aops;
+extern bool balloon_page_isolate(struct page *page,
+				isolate_mode_t mode);
 extern void balloon_page_putback(struct page *page);
-extern int balloon_page_migrate(struct page *newpage,
+extern int balloon_page_migrate(struct address_space *mapping,
+				struct page *newpage,
 				struct page *page, enum migrate_mode mode);
 
 /*
- * __is_movable_balloon_page - helper to perform @page PageBalloon tests
- */
-static inline bool __is_movable_balloon_page(struct page *page)
-{
-	return PageBalloon(page);
-}
-
-/*
- * balloon_page_movable - test PageBalloon to identify balloon pages
- *			  and PagePrivate to check that the page is not
- *			  isolated and can be moved by compaction/migration.
- *
- * As we might return false positives in the case of a balloon page being just
- * released under us, this need to be re-tested later, under the page lock.
- */
-static inline bool balloon_page_movable(struct page *page)
-{
-	return PageBalloon(page) && PagePrivate(page);
-}
-
-/*
- * isolated_balloon_page - identify an isolated balloon page on private
- *			   compaction/migration page lists.
- */
-static inline bool isolated_balloon_page(struct page *page)
-{
-	return PageBalloon(page);
-}
-
-/*
  * balloon_page_insert - insert a page into the balloon's page list and make
  *			 the page->private assignment accordingly.
  * @balloon : pointer to balloon device
@@ -124,7 +100,7 @@
 				       struct page *page)
 {
 	__SetPageBalloon(page);
-	SetPagePrivate(page);
+	__SetPageMovable(page, balloon->inode->i_mapping);
 	set_page_private(page, (unsigned long)balloon);
 	list_add(&page->lru, &balloon->pages);
 }
@@ -140,12 +116,15 @@
 static inline void balloon_page_delete(struct page *page)
 {
 	__ClearPageBalloon(page);
+	__ClearPageMovable(page);
 	set_page_private(page, 0);
-	if (PagePrivate(page)) {
-		ClearPagePrivate(page);
+	/*
+	 * No touch page.lru field once @page has been isolated
+	 * because VM is using the field.
+	 */
+	if (!PageIsolated(page))
 		list_del(&page->lru);
 	}
-}
 
 /*
  * balloon_page_device - get the b_dev_info descriptor for the balloon device
diff -ruw linux-4.4.115/include/linux/blkdev.h linux-4.4.115-fbx/include/linux/blkdev.h
--- linux-4.4.115/include/linux/blkdev.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/blkdev.h	2019-10-29 09:26:25.417220539 +0100
@@ -197,6 +197,9 @@
 
 	/* for bidi */
 	struct request *next_rq;
+
+	ktime_t			lat_hist_io_start;
+	int			lat_hist_enabled;
 };
 
 static inline unsigned short req_get_ioprio(struct request *req)
@@ -329,7 +332,7 @@
 	 */
 	struct delayed_work	delay_work;
 
-	struct backing_dev_info	backing_dev_info;
+	struct backing_dev_info	*backing_dev_info;
 
 	/*
 	 * The queue owner gets to use this for whatever they like.
@@ -489,6 +492,7 @@
 #define QUEUE_FLAG_INIT_DONE   20	/* queue is initialized */
 #define QUEUE_FLAG_NO_SG_MERGE 21	/* don't attempt to merge SG segments*/
 #define QUEUE_FLAG_POLL	       22	/* IO polling enabled if set */
+#define QUEUE_FLAG_FAST        23	/* fast block device (e.g. ram based) */
 
 #define QUEUE_FLAG_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
 				 (1 << QUEUE_FLAG_STACKABLE)	|	\
@@ -577,6 +581,7 @@
 #define blk_queue_discard(q)	test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
 #define blk_queue_secdiscard(q)	(blk_queue_discard(q) && \
 	test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags))
+#define blk_queue_fast(q)	test_bit(QUEUE_FLAG_FAST, &(q)->queue_flags)
 
 #define blk_noretry_request(rq) \
 	((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
@@ -794,6 +799,7 @@
 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
 			 struct scsi_ioctl_command __user *);
 
+extern void blk_recalc_rq_segments(struct request *rq);
 extern int blk_queue_enter(struct request_queue *q, gfp_t gfp);
 extern void blk_queue_exit(struct request_queue *q);
 extern void blk_start_queue(struct request_queue *q);
@@ -1006,6 +1012,8 @@
 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
 
 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
+extern int blk_rq_map_sg_no_cluster
+	(struct request_queue *, struct request *, struct scatterlist *);
 extern void blk_dump_rq_flags(struct request *, char *);
 extern long nr_blockdev_pages(void);
 
@@ -1118,6 +1126,7 @@
 #define BLKDEV_DISCARD_SECURE  0x01    /* secure discard */
 
 extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
+extern int blkdev_issue_barrier(struct block_device *, gfp_t, sector_t *);
 extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
 extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
@@ -1656,6 +1665,62 @@
 						struct writeback_control *);
 extern long bdev_direct_access(struct block_device *, sector_t,
 		void __pmem **addr, unsigned long *pfn, long size);
+
+/*
+ * X-axis for IO latency histogram support.
+ */
+static const u_int64_t latency_x_axis_us[] = {
+	100,
+	200,
+	300,
+	400,
+	500,
+	600,
+	700,
+	800,
+	900,
+	1000,
+	1200,
+	1400,
+	1600,
+	1800,
+	2000,
+	2500,
+	3000,
+	4000,
+	5000,
+	6000,
+	7000,
+	9000,
+	10000
+};
+
+#define BLK_IO_LAT_HIST_DISABLE         0
+#define BLK_IO_LAT_HIST_ENABLE          1
+#define BLK_IO_LAT_HIST_ZERO            2
+
+struct io_latency_state {
+	u_int64_t	latency_y_axis[ARRAY_SIZE(latency_x_axis_us) + 1];
+	u_int64_t	latency_elems;
+	u_int64_t	latency_sum;
+};
+
+static inline void
+blk_update_latency_hist(struct io_latency_state *s, u_int64_t delta_us)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(latency_x_axis_us); i++)
+		if (delta_us < (u_int64_t)latency_x_axis_us[i])
+			break;
+	s->latency_y_axis[i]++;
+	s->latency_elems++;
+	s->latency_sum += delta_us;
+}
+
+ssize_t blk_latency_hist_show(char* name, struct io_latency_state *s,
+		char *buf, int buf_size);
+
 #else /* CONFIG_BLOCK */
 
 struct block_device;
diff -ruw linux-4.4.115/include/linux/blk_types.h linux-4.4.115-fbx/include/linux/blk_types.h
--- linux-4.4.115/include/linux/blk_types.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/blk_types.h	2019-10-29 09:26:25.417220539 +0100
@@ -39,6 +39,15 @@
 						   current bvec */
 };
 
+#ifdef CONFIG_BLOCK_PERF_FRAMEWORK
+/* Double declaration from ktime.h so as to not break the include dependency
+ * chain. Should be kept up to date.
+ */
+union blk_ktime {
+	s64	tv64;
+};
+#endif
+
 /*
  * main unit of I/O for the block layer and lower layers (ie drivers and
  * stacking drivers)
@@ -54,6 +63,10 @@
 
 	struct bvec_iter	bi_iter;
 
+#ifdef CONFIG_BLOCK_PERF_FRAMEWORK
+	union blk_ktime		submit_time;
+	unsigned int            blk_sector_count;
+#endif
 	/* Number of segments in this BIO after
 	 * physical address coalescing is performed.
 	 */
@@ -88,6 +101,13 @@
 	unsigned short		bi_vcnt;	/* how many bio_vec's */
 
 	/*
+	 * When using dircet-io (O_DIRECT), we can't get the inode from a bio
+	 * by walking bio->bi_io_vec->bv_page->mapping->host
+	 * since the page is anon.
+	 */
+	struct inode		*bi_dio_inode;
+
+	/*
 	 * Everything starting with bi_max_vecs will be preserved by bio_reset()
 	 */
 
@@ -127,6 +147,13 @@
  */
 #define BIO_RESET_BITS	13
 #define BIO_OWNS_VEC	13	/* bio_free() should free bvec */
+/*
+ * Added for Req based dm which need to perform post processing. This flag
+ * ensures blk_update_request does not free the bios or request, this is done
+ * at the dm level
+ */
+#define BIO_DONTFREE 14
+#define BIO_INLINECRYPT 15
 
 /*
  * top 4 bits of bio flags indicate the pool this bio came from
@@ -161,6 +188,8 @@
 	__REQ_INTEGRITY,	/* I/O includes block integrity payload */
 	__REQ_FUA,		/* forced unit access */
 	__REQ_FLUSH,		/* request for cache flush */
+	__REQ_POST_FLUSH_BARRIER,/* cache barrier after a data req */
+	__REQ_BARRIER,		/* marks flush req as barrier */
 
 	/* bio only flags */
 	__REQ_RAHEAD,		/* read ahead, can fail anytime */
@@ -168,7 +197,7 @@
 				 * throttling rules. Don't do it again. */
 
 	/* request only flags */
-	__REQ_SORTED,		/* elevator knows about this request */
+	__REQ_SORTED = __REQ_RAHEAD, /* elevator knows about this request */
 	__REQ_SOFTBARRIER,	/* may not be passed by ioscheduler */
 	__REQ_NOMERGE,		/* don't touch this for merging */
 	__REQ_STARTED,		/* drive already may have started this one */
@@ -189,6 +218,7 @@
 	__REQ_HASHED,		/* on IO scheduler merge hash */
 	__REQ_MQ_INFLIGHT,	/* track inflight for MQ */
 	__REQ_NO_TIMEOUT,	/* requests may never expire */
+	__REQ_URGENT,		/* urgent request */
 	__REQ_NR_BITS,		/* stops here */
 };
 
@@ -201,6 +231,7 @@
 #define REQ_PRIO		(1ULL << __REQ_PRIO)
 #define REQ_DISCARD		(1ULL << __REQ_DISCARD)
 #define REQ_WRITE_SAME		(1ULL << __REQ_WRITE_SAME)
+#define REQ_URGENT		(1ULL << __REQ_URGENT)
 #define REQ_NOIDLE		(1ULL << __REQ_NOIDLE)
 #define REQ_INTEGRITY		(1ULL << __REQ_INTEGRITY)
 
@@ -209,7 +240,7 @@
 #define REQ_COMMON_MASK \
 	(REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \
 	 REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \
-	 REQ_SECURE | REQ_INTEGRITY)
+	 REQ_SECURE | REQ_INTEGRITY | REQ_BARRIER)
 #define REQ_CLONE_MASK		REQ_COMMON_MASK
 
 #define BIO_NO_ADVANCE_ITER_MASK	(REQ_DISCARD|REQ_WRITE_SAME)
@@ -224,6 +255,7 @@
 #define REQ_SORTED		(1ULL << __REQ_SORTED)
 #define REQ_SOFTBARRIER		(1ULL << __REQ_SOFTBARRIER)
 #define REQ_FUA			(1ULL << __REQ_FUA)
+#define REQ_BARRIER		(1ULL << __REQ_BARRIER)
 #define REQ_NOMERGE		(1ULL << __REQ_NOMERGE)
 #define REQ_STARTED		(1ULL << __REQ_STARTED)
 #define REQ_DONTPREP		(1ULL << __REQ_DONTPREP)
@@ -235,6 +267,7 @@
 #define REQ_ALLOCED		(1ULL << __REQ_ALLOCED)
 #define REQ_COPY_USER		(1ULL << __REQ_COPY_USER)
 #define REQ_FLUSH		(1ULL << __REQ_FLUSH)
+#define REQ_POST_FLUSH_BARRIER	(1ULL << __REQ_POST_FLUSH_BARRIER)
 #define REQ_FLUSH_SEQ		(1ULL << __REQ_FLUSH_SEQ)
 #define REQ_IO_STAT		(1ULL << __REQ_IO_STAT)
 #define REQ_MIXED_MERGE		(1ULL << __REQ_MIXED_MERGE)
diff -ruw linux-4.4.115/include/linux/bug.h linux-4.4.115-fbx/include/linux/bug.h
--- linux-4.4.115/include/linux/bug.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/bug.h	2019-10-29 09:26:25.417220539 +0100
@@ -109,4 +109,10 @@
 }
 
 #endif	/* CONFIG_GENERIC_BUG */
+
+#ifdef CONFIG_PANIC_ON_DATA_CORRUPTION
+#define PANIC_CORRUPTION 1
+#else
+#define PANIC_CORRUPTION 0
+#endif  /* CONFIG_PANIC_ON_DATA_CORRUPTION */
 #endif	/* _LINUX_BUG_H */
diff -ruw linux-4.4.115/include/linux/cache.h linux-4.4.115-fbx/include/linux/cache.h
--- linux-4.4.115/include/linux/cache.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/cache.h	2019-01-22 16:16:28.199288932 +0100
@@ -12,10 +12,24 @@
 #define SMP_CACHE_BYTES L1_CACHE_BYTES
 #endif
 
+/*
+ * __read_mostly is used to keep rarely changing variables out of frequently
+ * updated cachelines. If an architecture doesn't support it, ignore the
+ * hint.
+ */
 #ifndef __read_mostly
 #define __read_mostly
 #endif
 
+/*
+ * __ro_after_init is used to mark things that are read-only after init (i.e.
+ * after mark_rodata_ro() has been called). These are effectively read-only,
+ * but may get written to during init, so can't live in .rodata (via "const").
+ */
+#ifndef __ro_after_init
+#define __ro_after_init __attribute__((__section__(".data..ro_after_init")))
+#endif
+
 #ifndef ____cacheline_aligned
 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
 #endif
diff -ruw linux-4.4.115/include/linux/cgroup-defs.h linux-4.4.115-fbx/include/linux/cgroup-defs.h
--- linux-4.4.115/include/linux/cgroup-defs.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/cgroup-defs.h	2019-01-22 16:16:28.203288968 +0100
@@ -66,7 +66,6 @@
 
 /* cgroup_root->flags */
 enum {
-	CGRP_ROOT_SANE_BEHAVIOR	= (1 << 0), /* __DEVEL__sane_behavior specified */
 	CGRP_ROOT_NOPREFIX	= (1 << 1), /* mounted subsystems have no named prefix */
 	CGRP_ROOT_XATTR		= (1 << 2), /* supports extended attributes */
 };
diff -ruw linux-4.4.115/include/linux/cgroup_subsys.h linux-4.4.115-fbx/include/linux/cgroup_subsys.h
--- linux-4.4.115/include/linux/cgroup_subsys.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/cgroup_subsys.h	2019-01-22 16:16:28.203288968 +0100
@@ -26,6 +26,10 @@
 SUBSYS(cpuacct)
 #endif
 
+#if IS_ENABLED(CONFIG_CGROUP_SCHEDTUNE)
+SUBSYS(schedtune)
+#endif
+
 #if IS_ENABLED(CONFIG_BLK_CGROUP)
 SUBSYS(io)
 #endif
diff -ruw linux-4.4.115/include/linux/clkdev.h linux-4.4.115-fbx/include/linux/clkdev.h
--- linux-4.4.115/include/linux/clkdev.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/clkdev.h	2019-01-22 16:16:28.203288968 +0100
@@ -21,6 +21,7 @@
 	struct list_head	node;
 	const char		*dev_id;
 	const char		*con_id;
+	int                     of_idx;
 	struct clk		*clk;
 	struct clk_hw		*clk_hw;
 };
diff -ruw linux-4.4.115/include/linux/clk.h linux-4.4.115-fbx/include/linux/clk.h
--- linux-4.4.115/include/linux/clk.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/clk.h	2019-01-22 16:16:28.203288968 +0100
@@ -408,6 +408,16 @@
  */
 struct clk *clk_get_sys(const char *dev_id, const char *con_id);
 
+/**
+ * clk_set_flags - set the custom specific flags for this clock
+ * @clk: clock source
+ * @flags: custom flags which would be hardware specific, defined for specific
+ *	   hardware.
+ *
+ * Returns success 0 or negative errno.
+ */
+int clk_set_flags(struct clk *clk, unsigned long flags);
+
 #else /* !CONFIG_HAVE_CLK */
 
 static inline struct clk *clk_get(struct device *dev, const char *id)
@@ -488,7 +498,7 @@
 struct device_node;
 struct of_phandle_args;
 
-#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
+#if defined(CONFIG_OF)
 struct clk *of_clk_get(struct device_node *np, int index);
 struct clk *of_clk_get_by_name(struct device_node *np, const char *name);
 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec);
diff -ruw linux-4.4.115/include/linux/clk-provider.h linux-4.4.115-fbx/include/linux/clk-provider.h
--- linux-4.4.115/include/linux/clk-provider.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/clk-provider.h	2019-01-22 16:16:28.203288968 +0100
@@ -13,6 +13,7 @@
 
 #include <linux/io.h>
 #include <linux/of.h>
+#include <linux/mutex.h>
 
 #ifdef CONFIG_COMMON_CLK
 
@@ -31,6 +32,11 @@
 #define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */
 #define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */
 #define CLK_RECALC_NEW_RATES	BIT(9) /* recalc rates after notifications */
+#define CLK_IS_CRITICAL		BIT(11) /* do not gate, ever */
+#define CLK_ENABLE_HAND_OFF	BIT(12) /* enable clock when registered.
+					   hand-off enable_count & prepare_count
+					   to first consumer that enables clk */
+#define CLK_IS_MEASURE          BIT(14) /* measure clock */
 
 struct clk;
 struct clk_hw;
@@ -173,6 +179,16 @@
  *		directory is provided as an argument.  Called with
  *		prepare_lock held.  Returns 0 on success, -EERROR otherwise.
  *
+ * @set_flags: Set custom flags which deals with hardware specifics. Returns 0
+ *	       on success, -EEROR otherwise.
+ *
+ * @list_registers: Queries the hardware to get the current register contents.
+ *		    This callback is optional and required clocks could
+ *		    add this callback.
+ *
+ * @list_rate:  Return the nth supported frequency for a given clock which is
+ *		below rate_max on success and -ENXIO in case of no frequency
+ *		table.
  *
  * The clk_enable/clk_disable and clk_prepare/clk_unprepare pairs allow
  * implementations to split any work between atomic (enable) and sleepable
@@ -213,6 +229,11 @@
 	int		(*set_phase)(struct clk_hw *hw, int degrees);
 	void		(*init)(struct clk_hw *hw);
 	int		(*debug_init)(struct clk_hw *hw, struct dentry *dentry);
+	int		(*set_flags)(struct clk_hw *hw, unsigned flags);
+	void		(*list_registers)(struct seq_file *f,
+							struct clk_hw *hw);
+	long		(*list_rate)(struct clk_hw *hw, unsigned n,
+							unsigned long rate_max);
 };
 
 /**
@@ -224,6 +245,9 @@
  * @parent_names: array of string names for all possible parents
  * @num_parents: number of possible parents
  * @flags: framework-level hints and quirks
+ * @vdd_class: voltage scaling requirement class
+ * @rate_max: maximum clock rate in Hz supported at each voltage level
+ * @num_rate_max: number of maximum voltage level supported
  */
 struct clk_init_data {
 	const char		*name;
@@ -231,7 +255,72 @@
 	const char		* const *parent_names;
 	u8			num_parents;
 	unsigned long		flags;
-};
+	struct clk_vdd_class	*vdd_class;
+	unsigned long		*rate_max;
+	int			num_rate_max;
+};
+
+struct regulator;
+
+/**
+ * struct clk_vdd_class - Voltage scaling class
+ * @class_name: name of the class
+ * @regulator: array of regulators
+ * @num_regulators: size of regulator array. Standard regulator APIs will be
+			used if this field > 0
+ * @set_vdd: function to call when applying a new voltage setting
+ * @vdd_uv: sorted 2D array of legal voltage settings. Indexed by level, then
+		regulator
+ * @level_votes: array of votes for each level
+ * @num_levels: specifies the size of level_votes array
+ * @skip_handoff: do not vote for the max possible voltage during init
+ * @use_max_uV: use INT_MAX for max_uV when calling regulator_set_voltage
+ * @cur_level: the currently set voltage level
+ * @lock: lock to protect this struct
+ */
+struct clk_vdd_class {
+	const char *class_name;
+	struct regulator **regulator;
+	int num_regulators;
+	int (*set_vdd)(struct clk_vdd_class *v_class, int level);
+	int *vdd_uv;
+	int *level_votes;
+	int num_levels;
+	bool skip_handoff;
+	bool use_max_uV;
+	unsigned long cur_level;
+	struct mutex lock;
+};
+
+#define DEFINE_VDD_CLASS(_name, _set_vdd, _num_levels) \
+	struct clk_vdd_class _name = { \
+		.class_name = #_name, \
+		.set_vdd = _set_vdd, \
+		.level_votes = (int [_num_levels]) {}, \
+		.num_levels = _num_levels, \
+		.cur_level = _num_levels, \
+		.lock = __MUTEX_INITIALIZER(_name.lock) \
+	}
+
+#define DEFINE_VDD_REGULATORS(_name, _num_levels, _num_regulators, _vdd_uv) \
+	struct clk_vdd_class _name = { \
+		.class_name = #_name, \
+		.vdd_uv = _vdd_uv, \
+		.regulator = (struct regulator * [_num_regulators]) {}, \
+		.num_regulators = _num_regulators, \
+		.level_votes = (int [_num_levels]) {}, \
+		.num_levels = _num_levels, \
+		.cur_level = _num_levels, \
+		.lock = __MUTEX_INITIALIZER(_name.lock) \
+	}
+
+#define DEFINE_VDD_REGS_INIT(_name, _num_regulators) \
+	struct clk_vdd_class _name = { \
+		.class_name = #_name, \
+		.regulator = (struct regulator * [_num_regulators]) {}, \
+		.num_regulators = _num_regulators, \
+		.lock = __MUTEX_INITIALIZER(_name.lock) \
+	}
 
 /**
  * struct clk_hw - handle for traversing from a struct clk to its corresponding
@@ -667,6 +756,9 @@
 void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
 			   unsigned long max_rate);
 
+unsigned long clk_aggregate_rate(struct clk_hw *hw,
+					const struct clk_core *parent);
+
 static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src)
 {
 	dst->clk = src->clk;
@@ -704,7 +796,8 @@
 int of_clk_parent_fill(struct device_node *np, const char **parents,
 		       unsigned int size);
 const char *of_clk_get_parent_name(struct device_node *np, int index);
-
+int of_clk_detect_critical(struct device_node *np, int index,
+			    unsigned long *flags);
 void of_clk_init(const struct of_device_id *matches);
 
 #else /* !CONFIG_OF */
@@ -742,6 +835,13 @@
 {
 	return NULL;
 }
+
+static inline int of_clk_detect_critical(struct device_node *np, int index,
+					  unsigned long *flags)
+{
+	return 0;
+}
+
 #define of_clk_init(matches) \
 	{ while (0); }
 #endif /* CONFIG_OF */
@@ -781,6 +881,13 @@
 struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
 				void *data, const struct file_operations *fops);
 #endif
+#else
+struct of_device_id;
+
+static inline void __init of_clk_init(const struct of_device_id *matches)
+{
+	return;
+}
 
 #endif /* CONFIG_COMMON_CLK */
 #endif /* CLK_PROVIDER_H */
diff -ruw linux-4.4.115/include/linux/clocksource.h linux-4.4.115-fbx/include/linux/clocksource.h
--- linux-4.4.115/include/linux/clocksource.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/clocksource.h	2019-10-29 09:26:25.421220578 +0100
@@ -186,6 +186,7 @@
 extern void clocksource_resume(void);
 extern struct clocksource * __init clocksource_default_clock(void);
 extern void clocksource_mark_unstable(struct clocksource *cs);
+extern void clocksource_select_force(void);
 
 extern u64
 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cycles);
diff -ruw linux-4.4.115/include/linux/compaction.h linux-4.4.115-fbx/include/linux/compaction.h
--- linux-4.4.115/include/linux/compaction.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/compaction.h	2019-01-22 16:16:28.207289004 +0100
@@ -52,6 +52,10 @@
 				bool alloc_success);
 extern bool compaction_restarting(struct zone *zone, int order);
 
+extern int kcompactd_run(int nid);
+extern void kcompactd_stop(int nid);
+extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx);
+
 #else
 static inline unsigned long try_to_compact_pages(gfp_t gfp_mask,
 			unsigned int order, int alloc_flags,
@@ -84,9 +88,22 @@
 	return true;
 }
 
+static inline int kcompactd_run(int nid)
+{
+	return 0;
+}
+static inline void kcompactd_stop(int nid)
+{
+}
+
+static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
+{
+}
+
 #endif /* CONFIG_COMPACTION */
 
 #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
+struct node;
 extern int compaction_register_node(struct node *node);
 extern void compaction_unregister_node(struct node *node);
 
diff -ruw linux-4.4.115/include/linux/compiler-gcc.h linux-4.4.115-fbx/include/linux/compiler-gcc.h
--- linux-4.4.115/include/linux/compiler-gcc.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/compiler-gcc.h	2019-10-29 09:26:25.421220578 +0100
@@ -66,18 +66,22 @@
 
 /*
  * Force always-inline if the user requests it so via the .config,
- * or if gcc is too old:
+ * or if gcc is too old.
+ * GCC does not warn about unused static inline functions for
+ * -Wunused-function.  This turns out to avoid the need for complex #ifdef
+ * directives.  Suppress the warning in clang as well by using "unused"
+ * function attribute, which is redundant but not harmful for gcc.
  */
 #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) ||		\
     !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
-#define inline		inline		__attribute__((always_inline)) notrace
-#define __inline__	__inline__	__attribute__((always_inline)) notrace
-#define __inline	__inline	__attribute__((always_inline)) notrace
+#define inline inline		__attribute__((always_inline,unused)) notrace
+#define __inline__ __inline__	__attribute__((always_inline,unused)) notrace
+#define __inline __inline	__attribute__((always_inline,unused)) notrace
 #else
 /* A lot of inline functions can cause havoc with function tracing */
-#define inline		inline		notrace
-#define __inline__	__inline__	notrace
-#define __inline	__inline	notrace
+#define inline inline		__attribute__((unused)) notrace
+#define __inline__ __inline__	__attribute__((unused)) notrace
+#define __inline __inline	__attribute__((unused)) notrace
 #endif
 
 #define __always_inline	inline __attribute__((always_inline))
diff -ruw linux-4.4.115/include/linux/coresight.h linux-4.4.115-fbx/include/linux/coresight.h
--- linux-4.4.115/include/linux/coresight.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/coresight.h	2019-01-22 16:16:28.207289004 +0100
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2016 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -40,6 +40,13 @@
 
 extern struct bus_type coresight_bustype;
 
+enum coresight_clk_rate {
+	CORESIGHT_CLK_RATE_OFF,
+	CORESIGHT_CLK_RATE_TRACE = 1000,
+	CORESIGHT_CLK_RATE_HSTRACE = 2000,
+	CORESIGHT_CLK_RATE_FIXED = 3000,
+};
+
 enum coresight_dev_type {
 	CORESIGHT_DEV_TYPE_NONE,
 	CORESIGHT_DEV_TYPE_SINK,
@@ -94,6 +101,7 @@
 		connected  to.
  * @nr_outport:	number of output ports for this component.
  * @clk:	The clock this component is associated to.
+ * @default_sink: Flag to set default sink
  */
 struct coresight_platform_data {
 	int cpu;
@@ -104,6 +112,7 @@
 	int *child_ports;
 	int nr_outport;
 	struct clk *clk;
+	bool default_sink;
 };
 
 /**
@@ -185,10 +194,12 @@
  * Operations available for sinks
  * @enable:	enables the sink.
  * @disable:	disables the sink.
+ * @abort:	captures sink trace on abort
  */
 struct coresight_ops_sink {
 	int (*enable)(struct coresight_device *csdev);
 	void (*disable)(struct coresight_device *csdev);
+	void (*abort)(struct coresight_device *csdev);
 };
 
 /**
@@ -230,6 +241,7 @@
 extern void coresight_disable(struct coresight_device *csdev);
 extern int coresight_timeout(void __iomem *addr, u32 offset,
 			     int position, int value);
+extern void coresight_abort(void);
 #else
 static inline struct coresight_device *
 coresight_register(struct coresight_desc *desc) { return NULL; }
@@ -239,14 +251,19 @@
 static inline void coresight_disable(struct coresight_device *csdev) {}
 static inline int coresight_timeout(void __iomem *addr, u32 offset,
 				     int position, int value) { return 1; }
+static inline void coresight_abort(void) {}
 #endif
 
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_CORESIGHT)
 extern struct coresight_platform_data *of_get_coresight_platform_data(
 				struct device *dev, struct device_node *node);
+extern struct coresight_cti_data *of_get_coresight_cti_data(
+				struct device *dev, struct device_node *node);
 #else
 static inline struct coresight_platform_data *of_get_coresight_platform_data(
 	struct device *dev, struct device_node *node) { return NULL; }
+static inline struct coresight_cti_data *of_get_coresight_cti_data(
+	struct device *dev, struct device_node *node) { return NULL; }
 #endif
 
 #ifdef CONFIG_PID_NS
diff -ruw linux-4.4.115/include/linux/cpu_cooling.h linux-4.4.115-fbx/include/linux/cpu_cooling.h
--- linux-4.4.115/include/linux/cpu_cooling.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/cpu_cooling.h	2019-01-22 16:16:28.207289004 +0100
@@ -31,6 +31,11 @@
 typedef int (*get_static_t)(cpumask_t *cpumask, int interval,
 			    unsigned long voltage, u32 *power);
 
+struct cpu_cooling_ops {
+	int (*ceil_limit)(int, u32);
+	int (*get_cur_state)(int, unsigned long *);
+};
+
 #ifdef CONFIG_CPU_THERMAL
 /**
  * cpufreq_cooling_register - function to create cpufreq cooling device.
@@ -43,6 +48,10 @@
 cpufreq_power_cooling_register(const struct cpumask *clip_cpus,
 			       u32 capacitance, get_static_t plat_static_func);
 
+struct thermal_cooling_device *
+cpufreq_platform_cooling_register(const struct cpumask *clip_cpus,
+					struct cpu_cooling_ops *ops);
+
 /**
  * of_cpufreq_cooling_register - create cpufreq cooling device based on DT.
  * @np: a valid struct device_node to the cooling device device tree node.
@@ -111,6 +120,13 @@
 {
 	return NULL;
 }
+
+static inline struct thermal_cooling_device *
+cpufreq_platform_cooling_register(const struct cpumask *clip_cpus,
+					struct cpu_cooling_ops *ops)
+{
+	return NULL;
+}
 
 static inline
 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
diff -ruw linux-4.4.115/include/linux/cpufreq.h linux-4.4.115-fbx/include/linux/cpufreq.h
--- linux-4.4.115/include/linux/cpufreq.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/cpufreq.h	2019-10-29 09:26:25.425220617 +0100
@@ -107,6 +107,30 @@
 	 */
 	struct rw_semaphore	rwsem;
 
+
+	/*
+	 * Fast switch flags:
+	 * - fast_switch_possible should be set by the driver if it can
+	 *   guarantee that frequency can be changed on any CPU sharing the
+	 *   policy and that the change will affect all of the policy CPUs then.
+	 * - fast_switch_enabled is to be set by governors that support fast
+	 *   freqnency switching with the help of cpufreq_enable_fast_switch().
+	 */
+	bool                    fast_switch_possible;
+	bool                    fast_switch_enabled;
+
+	/*
+	 * Preferred average time interval between consecutive invocations of
+	 * the driver to set the frequency for this policy.  To be set by the
+	 * scaling driver (0, which is the default, means no preference).
+	 */
+	unsigned int		up_transition_delay_us;
+	unsigned int		down_transition_delay_us;
+
+	 /* Cached frequency lookup from cpufreq_driver_resolve_freq. */
+	unsigned int cached_target_freq;
+	int cached_resolved_idx;
+
 	/* Synchronization for frequency transitions */
 	bool			transition_ongoing; /* Tracks transition status */
 	spinlock_t		transition_lock;
@@ -160,6 +184,7 @@
 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
 int cpufreq_update_policy(unsigned int cpu);
 bool have_governor_per_policy(void);
+bool cpufreq_driver_is_slow(void);
 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
 #else
 static inline unsigned int cpufreq_get(unsigned int cpu)
@@ -317,6 +342,14 @@
  */
 #define CPUFREQ_NEED_INITIAL_FREQ_CHECK	(1 << 5)
 
+/*
+ * Indicates that it is safe to call cpufreq_driver_target from
+ * non-interruptable context in scheduler hot paths.  Drivers must
+ * opt-in to this flag, as the safe default is that they might sleep
+ * or be too slow for hot path use.
+ */
+#define CPUFREQ_DRIVER_FAST		(1 << 6)
+
 int cpufreq_register_driver(struct cpufreq_driver *driver_data);
 int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
 
@@ -361,6 +394,7 @@
 
 #define CPUFREQ_TRANSITION_NOTIFIER	(0)
 #define CPUFREQ_POLICY_NOTIFIER		(1)
+#define CPUFREQ_GOVINFO_NOTIFIER	(2)
 
 /* Transition notifiers */
 #define CPUFREQ_PRECHANGE		(0)
@@ -373,6 +407,9 @@
 #define CPUFREQ_CREATE_POLICY		(3)
 #define CPUFREQ_REMOVE_POLICY		(4)
 
+/* Govinfo Notifiers */
+#define CPUFREQ_LOAD_CHANGE		(0)
+
 #ifdef CONFIG_CPU_FREQ
 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
@@ -381,6 +418,16 @@
 		struct cpufreq_freqs *freqs);
 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
 		struct cpufreq_freqs *freqs, int transition_failed);
+/*
+ * Governor specific info that can be passed to modules that subscribe
+ * to CPUFREQ_GOVINFO_NOTIFIER
+ */
+struct cpufreq_govinfo {
+	unsigned int cpu;
+	unsigned int load;
+	unsigned int sampling_rate_us;
+};
+extern struct atomic_notifier_head cpufreq_govinfo_notifier_list;
 
 #else /* CONFIG_CPU_FREQ */
 static inline int cpufreq_register_notifier(struct notifier_block *nb,
@@ -462,6 +509,8 @@
 int __cpufreq_driver_target(struct cpufreq_policy *policy,
 				   unsigned int target_freq,
 				   unsigned int relation);
+unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
+					 unsigned int target_freq);
 int cpufreq_register_governor(struct cpufreq_governor *governor);
 void cpufreq_unregister_governor(struct cpufreq_governor *governor);
 
@@ -487,8 +536,48 @@
 #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE)
 extern struct cpufreq_governor cpufreq_gov_conservative;
 #define CPUFREQ_DEFAULT_GOVERNOR	(&cpufreq_gov_conservative)
+#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE)
+extern struct cpufreq_governor cpufreq_gov_interactive;
+#define CPUFREQ_DEFAULT_GOVERNOR	(&cpufreq_gov_interactive)
+#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_SCHED)
+extern struct cpufreq_governor cpufreq_gov_sched;
+#define CPUFREQ_DEFAULT_GOVERNOR	(&cpufreq_gov_sched)
+#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL)
+extern struct cpufreq_governor cpufreq_gov_schedutil;
+#define CPUFREQ_DEFAULT_GOVERNOR	(&cpufreq_gov_schedutil)
 #endif
 
+static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy)
+{
+	if (policy->max < policy->cur)
+		__cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
+	else if (policy->min > policy->cur)
+		__cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
+}
+
+/* Governor attribute set */
+struct gov_attr_set {
+	struct kobject kobj;
+	struct list_head policy_list;
+	struct mutex update_lock;
+	int usage_count;
+};
+
+/* sysfs ops for cpufreq governors */
+extern const struct sysfs_ops governor_sysfs_ops;
+
+void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node);
+void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node);
+unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node);
+
+/* Governor sysfs attribute */
+struct governor_attr {
+	struct attribute attr;
+	ssize_t (*show)(struct gov_attr_set *attr_set, char *buf);
+	ssize_t (*store)(struct gov_attr_set *attr_set, const char *buf,
+			 size_t count);
+};
+
 /*********************************************************************
  *                     FREQUENCY TABLE HELPERS                       *
  *********************************************************************/
@@ -616,4 +705,8 @@
 int cpufreq_generic_init(struct cpufreq_policy *policy,
 		struct cpufreq_frequency_table *table,
 		unsigned int transition_latency);
+
+struct sched_domain;
+unsigned long cpufreq_scale_freq_capacity(struct sched_domain *sd, int cpu);
+unsigned long cpufreq_scale_max_freq_capacity(int cpu);
 #endif /* _LINUX_CPUFREQ_H */
diff -ruw linux-4.4.115/include/linux/cpu.h linux-4.4.115-fbx/include/linux/cpu.h
--- linux-4.4.115/include/linux/cpu.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/cpu.h	2019-10-29 09:26:25.425220617 +0100
@@ -27,6 +27,19 @@
 	struct device dev;
 };
 
+struct cpu_pstate_pwr {
+	unsigned int freq;
+	uint32_t power;
+};
+
+struct cpu_pwr_stats {
+	int cpu;
+	long temp;
+	struct cpu_pstate_pwr *ptable;
+	bool throttling;
+	int len;
+};
+
 extern int register_cpu(struct cpu *cpu, int num);
 extern struct device *get_cpu_device(unsigned cpu);
 extern bool cpu_is_hotpluggable(unsigned cpu);
@@ -228,6 +241,7 @@
 extern void cpu_hotplug_begin(void);
 extern void cpu_hotplug_done(void);
 extern void get_online_cpus(void);
+extern void cpu_hotplug_mutex_held(void);
 extern void put_online_cpus(void);
 extern void cpu_hotplug_disable(void);
 extern void cpu_hotplug_enable(void);
@@ -250,6 +264,7 @@
 #define cpu_hotplug_enable()	do { } while (0)
 #define hotcpu_notifier(fn, pri)	do { (void)(fn); } while (0)
 #define __hotcpu_notifier(fn, pri)	do { (void)(fn); } while (0)
+#define cpu_hotplug_mutex_held()	do { } while (0)
 /* These aren't inline functions due to a GCC bug. */
 #define register_hotcpu_notifier(nb)	({ (void)(nb); 0; })
 #define __register_hotcpu_notifier(nb)	({ (void)(nb); 0; })
@@ -265,6 +280,9 @@
 static inline void enable_nonboot_cpus(void) {}
 #endif /* !CONFIG_PM_SLEEP_SMP */
 
+struct cpu_pwr_stats *get_cpu_pwr_stats(void);
+void trigger_cpu_pwr_stats_calc(void);
+
 enum cpuhp_state {
 	CPUHP_OFFLINE,
 	CPUHP_ONLINE,
@@ -290,4 +308,11 @@
 bool cpu_report_death(void);
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
 
+#define IDLE_START 1
+#define IDLE_END 2
+
+void idle_notifier_register(struct notifier_block *n);
+void idle_notifier_unregister(struct notifier_block *n);
+void idle_notifier_call_chain(unsigned long val);
+
 #endif /* _LINUX_CPU_H_ */
diff -ruw linux-4.4.115/include/linux/cpuidle.h linux-4.4.115-fbx/include/linux/cpuidle.h
--- linux-4.4.115/include/linux/cpuidle.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/cpuidle.h	2019-10-29 09:26:25.425220617 +0100
@@ -204,7 +204,7 @@
 #endif
 
 /* kernel/sched/idle.c */
-extern void sched_idle_set_state(struct cpuidle_state *idle_state);
+extern void sched_idle_set_state(struct cpuidle_state *idle_state, int index);
 extern void default_idle_call(void);
 
 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
diff -ruw linux-4.4.115/include/linux/cpumask.h linux-4.4.115-fbx/include/linux/cpumask.h
--- linux-4.4.115/include/linux/cpumask.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/cpumask.h	2019-10-29 09:26:25.425220617 +0100
@@ -53,6 +53,7 @@
  *     cpu_present_mask - has bit 'cpu' set iff cpu is populated
  *     cpu_online_mask  - has bit 'cpu' set iff cpu available to scheduler
  *     cpu_active_mask  - has bit 'cpu' set iff cpu available to migration
+ *     cpu_isolated_mask- has bit 'cpu' set iff cpu isolated
  *
  *  If !CONFIG_HOTPLUG_CPU, present == possible, and active == online.
  *
@@ -89,25 +90,38 @@
 extern const struct cpumask *const cpu_online_mask;
 extern const struct cpumask *const cpu_present_mask;
 extern const struct cpumask *const cpu_active_mask;
+extern const struct cpumask *const cpu_isolated_mask;
 
 #if NR_CPUS > 1
 #define num_online_cpus()	cpumask_weight(cpu_online_mask)
 #define num_possible_cpus()	cpumask_weight(cpu_possible_mask)
 #define num_present_cpus()	cpumask_weight(cpu_present_mask)
 #define num_active_cpus()	cpumask_weight(cpu_active_mask)
+#define num_isolated_cpus()	cpumask_weight(cpu_isolated_mask)
+#define num_online_uniso_cpus()						\
+({									\
+	cpumask_t mask;							\
+									\
+	cpumask_andnot(&mask, cpu_online_mask, cpu_isolated_mask);	\
+	cpumask_weight(&mask);						\
+})
 #define cpu_online(cpu)		cpumask_test_cpu((cpu), cpu_online_mask)
 #define cpu_possible(cpu)	cpumask_test_cpu((cpu), cpu_possible_mask)
 #define cpu_present(cpu)	cpumask_test_cpu((cpu), cpu_present_mask)
 #define cpu_active(cpu)		cpumask_test_cpu((cpu), cpu_active_mask)
+#define cpu_isolated(cpu)	cpumask_test_cpu((cpu), cpu_isolated_mask)
 #else
 #define num_online_cpus()	1U
 #define num_possible_cpus()	1U
 #define num_present_cpus()	1U
 #define num_active_cpus()	1U
+#define num_isolated_cpus()	0U
+#define num_online_uniso_cpus()	1U
 #define cpu_online(cpu)		((cpu) == 0)
 #define cpu_possible(cpu)	((cpu) == 0)
 #define cpu_present(cpu)	((cpu) == 0)
 #define cpu_active(cpu)		((cpu) == 0)
+#define cpu_isolated(cpu)	((cpu) != 0)
 #endif
 
 /* verify cpu argument to cpumask_* operators */
@@ -714,12 +728,14 @@
 #define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
 #define for_each_online_cpu(cpu)   for_each_cpu((cpu), cpu_online_mask)
 #define for_each_present_cpu(cpu)  for_each_cpu((cpu), cpu_present_mask)
+#define for_each_isolated_cpu(cpu) for_each_cpu((cpu), cpu_isolated_mask)
 
 /* Wrappers for arch boot code to manipulate normally-constant masks */
 void set_cpu_possible(unsigned int cpu, bool possible);
 void set_cpu_present(unsigned int cpu, bool present);
 void set_cpu_online(unsigned int cpu, bool online);
 void set_cpu_active(unsigned int cpu, bool active);
+void set_cpu_isolated(unsigned int cpu, bool isolated);
 void init_cpu_present(const struct cpumask *src);
 void init_cpu_possible(const struct cpumask *src);
 void init_cpu_online(const struct cpumask *src);
diff -ruw linux-4.4.115/include/linux/cpu_pm.h linux-4.4.115-fbx/include/linux/cpu_pm.h
--- linux-4.4.115/include/linux/cpu_pm.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/cpu_pm.h	2019-01-22 16:16:28.207289004 +0100
@@ -71,8 +71,8 @@
 int cpu_pm_unregister_notifier(struct notifier_block *nb);
 int cpu_pm_enter(void);
 int cpu_pm_exit(void);
-int cpu_cluster_pm_enter(void);
-int cpu_cluster_pm_exit(void);
+int cpu_cluster_pm_enter(unsigned long aff_level);
+int cpu_cluster_pm_exit(unsigned long aff_level);
 
 #else
 
@@ -96,12 +96,12 @@
 	return 0;
 }
 
-static inline int cpu_cluster_pm_enter(void)
+static inline int cpu_cluster_pm_enter(unsigned long aff_level)
 {
 	return 0;
 }
 
-static inline int cpu_cluster_pm_exit(void)
+static inline int cpu_cluster_pm_exit(unsigned long aff_level)
 {
 	return 0;
 }
diff -ruw linux-4.4.115/include/linux/crypto.h linux-4.4.115-fbx/include/linux/crypto.h
--- linux-4.4.115/include/linux/crypto.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/crypto.h	2019-10-29 09:26:25.425220617 +0100
@@ -24,6 +24,7 @@
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/uaccess.h>
+#include <linux/completion.h>
 
 /*
  * Autoloaded crypto modules should only use a prefixed name to avoid allowing
@@ -470,6 +471,45 @@
 } CRYPTO_MINALIGN_ATTR;
 
 /*
+ * A helper struct for waiting for completion of async crypto ops
+ */
+struct crypto_wait {
+	struct completion completion;
+	int err;
+};
+
+/*
+ * Macro for declaring a crypto op async wait object on stack
+ */
+#define DECLARE_CRYPTO_WAIT(_wait) \
+	struct crypto_wait _wait = { \
+		COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 }
+
+/*
+ * Async ops completion helper functioons
+ */
+void crypto_req_done(struct crypto_async_request *req, int err);
+
+static inline int crypto_wait_req(int err, struct crypto_wait *wait)
+{
+	switch (err) {
+	case -EINPROGRESS:
+	case -EBUSY:
+		wait_for_completion(&wait->completion);
+		reinit_completion(&wait->completion);
+		err = wait->err;
+		break;
+	};
+
+	return err;
+}
+
+static inline void crypto_init_wait(struct crypto_wait *wait)
+{
+	init_completion(&wait->completion);
+}
+
+/*
  * Algorithm registration interface.
  */
 int crypto_register_alg(struct crypto_alg *alg);
diff -ruw linux-4.4.115/include/linux/dcache.h linux-4.4.115-fbx/include/linux/dcache.h
--- linux-4.4.115/include/linux/dcache.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/dcache.h	2019-01-22 16:16:28.215289076 +0100
@@ -162,6 +162,7 @@
 	int (*d_manage)(struct dentry *, bool);
 	struct inode *(*d_select_inode)(struct dentry *, unsigned);
 	struct dentry *(*d_real)(struct dentry *, struct inode *);
+	void (*d_canonical_path)(const struct path *, struct path *);
 } ____cacheline_aligned;
 
 /*
@@ -228,6 +229,7 @@
 #define DCACHE_MAY_FREE			0x00800000
 #define DCACHE_FALLTHRU			0x01000000 /* Fall through to lower layer */
 #define DCACHE_OP_SELECT_INODE		0x02000000 /* Unioned entry: dcache op selects inode */
+#define DCACHE_ENCRYPTED_WITH_KEY	0x04000000 /* dir is encrypted with a valid key */
 #define DCACHE_OP_REAL			0x08000000
 
 extern seqlock_t rename_lock;
diff -ruw linux-4.4.115/include/linux/devfreq_cooling.h linux-4.4.115-fbx/include/linux/devfreq_cooling.h
--- linux-4.4.115/include/linux/devfreq_cooling.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/devfreq_cooling.h	2019-01-22 16:16:28.215289076 +0100
@@ -4,6 +4,8 @@
  *
  * Copyright (C) 2014-2015 ARM Limited
  *
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
@@ -20,7 +22,6 @@
 #include <linux/devfreq.h>
 #include <linux/thermal.h>
 
-#ifdef CONFIG_DEVFREQ_THERMAL
 
 /**
  * struct devfreq_cooling_power - Devfreq cooling power ops
@@ -43,6 +44,8 @@
 	unsigned long dyn_power_coeff;
 };
 
+#ifdef CONFIG_DEVFREQ_THERMAL
+
 struct thermal_cooling_device *
 of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df,
 				  struct devfreq_cooling_power *dfc_power);
diff -ruw linux-4.4.115/include/linux/devfreq.h linux-4.4.115-fbx/include/linux/devfreq.h
--- linux-4.4.115/include/linux/devfreq.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/devfreq.h	2019-01-22 16:16:28.215289076 +0100
@@ -5,6 +5,8 @@
  * Copyright (C) 2011 Samsung Electronics
  *	MyungJoo Ham <myungjoo.ham@samsung.com>
  *
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
@@ -52,6 +54,10 @@
  */
 #define DEVFREQ_FLAG_LEAST_UPPER_BOUND		0x1
 
+#define DEVFREQ_FLAG_WAKEUP_MAXFREQ		0x2
+#define DEVFREQ_FLAG_FAST_HINT			0x4
+#define DEVFREQ_FLAG_SLOW_HINT			0x8
+
 /**
  * struct devfreq_dev_profile - Devfreq's user device profile
  * @initial_freq:	The operating frequency when devfreq_add_device() is
@@ -114,7 +120,8 @@
 	struct list_head node;
 
 	const char name[DEVFREQ_NAME_LEN];
-	int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
+	int (*get_target_freq)(struct devfreq *this, unsigned long *freq,
+				u32 *flag);
 	int (*event_handler)(struct devfreq *devfreq,
 				unsigned int event, void *data);
 };
@@ -196,6 +203,14 @@
 extern int devfreq_suspend_device(struct devfreq *devfreq);
 extern int devfreq_resume_device(struct devfreq *devfreq);
 
+/**
+ * update_devfreq() - Reevaluate the device and configure frequency
+ * @devfreq:	the devfreq device
+ *
+ * Note: devfreq->lock must be held
+ */
+extern int update_devfreq(struct devfreq *devfreq);
+
 /* Helper functions for devfreq user device driver with OPP. */
 extern struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
 					   unsigned long *freq, u32 flags);
@@ -231,6 +246,9 @@
  *			the governor may consider slowing the frequency down.
  *			Specify 0 to use the default. Valid value = 0 to 100.
  *			downdifferential < upthreshold must hold.
+ * @simple_scaling:	Setting this flag will scale the clocks up only if the
+ *			load is above @upthreshold and will scale the clocks
+ *			down only if the load is below @downdifferential.
  *
  * If the fed devfreq_simple_ondemand_data pointer is NULL to the governor,
  * the governor uses the default values.
@@ -238,6 +256,7 @@
 struct devfreq_simple_ondemand_data {
 	unsigned int upthreshold;
 	unsigned int downdifferential;
+	unsigned int simple_scaling;
 };
 #endif
 
diff -ruw linux-4.4.115/include/linux/device.h linux-4.4.115-fbx/include/linux/device.h
--- linux-4.4.115/include/linux/device.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/device.h	2019-10-29 09:26:25.429220656 +0100
@@ -683,6 +683,18 @@
 int devm_add_action(struct device *dev, void (*action)(void *), void *data);
 void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
 
+static inline int devm_add_action_or_reset(struct device *dev,
+					   void (*action)(void *), void *data)
+{
+	int ret;
+
+	ret = devm_add_action(dev, action, data);
+	if (ret)
+		action(data);
+
+	return ret;
+}
+
 struct device_dma_parameters {
 	/*
 	 * a low level driver may set these to teach IOMMU code about
@@ -817,6 +829,7 @@
 	struct cma *cma_area;		/* contiguous memory area for dma
 					   allocations */
 #endif
+	struct removed_region *removed_mem;
 	/* arch specific additions */
 	struct dev_archdata	archdata;
 
@@ -1012,6 +1025,7 @@
 extern void lock_device_hotplug(void);
 extern void unlock_device_hotplug(void);
 extern int lock_device_hotplug_sysfs(void);
+extern void lock_device_hotplug_assert(void);
 extern int device_offline(struct device *dev);
 extern int device_online(struct device *dev);
 extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
diff -ruw linux-4.4.115/include/linux/device-mapper.h linux-4.4.115-fbx/include/linux/device-mapper.h
--- linux-4.4.115/include/linux/device-mapper.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/device-mapper.h	2019-10-29 09:26:25.425220617 +0100
@@ -383,6 +383,12 @@
 void *dm_get_mdptr(struct mapped_device *md);
 
 /*
+ * Export the device via the ioctl interface (uses mdptr).
+ */
+int dm_ioctl_export(struct mapped_device *md, const char *name,
+		    const char *uuid);
+
+/*
  * A device can still be used while suspended, but I/O is deferred.
  */
 int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
@@ -409,6 +415,13 @@
 
 struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
 
+void dm_lock_md_type(struct mapped_device *md);
+void dm_unlock_md_type(struct mapped_device *md);
+void dm_set_md_type(struct mapped_device *md, unsigned type);
+unsigned dm_get_md_type(struct mapped_device *md);
+int dm_setup_md_queue(struct mapped_device *md);
+unsigned dm_table_get_type(struct dm_table *t);
+
 /*
  * Geometry functions.
  */
@@ -603,4 +616,11 @@
 	return (n << SECTOR_SHIFT);
 }
 
+/*-----------------------------------------------------------------
+ * Helper for block layer and dm core operations
+ *-----------------------------------------------------------------
+ */
+void dm_dispatch_request(struct request *rq);
+void dm_kill_unmapped_request(struct request *rq, int error);
+void dm_end_request(struct request *clone, int error);
 #endif	/* _LINUX_DEVICE_MAPPER_H */
diff -ruw linux-4.4.115/include/linux/dma-attrs.h linux-4.4.115-fbx/include/linux/dma-attrs.h
--- linux-4.4.115/include/linux/dma-attrs.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/dma-attrs.h	2019-01-22 16:16:28.219289113 +0100
@@ -18,6 +18,12 @@
 	DMA_ATTR_NO_KERNEL_MAPPING,
 	DMA_ATTR_SKIP_CPU_SYNC,
 	DMA_ATTR_FORCE_CONTIGUOUS,
+	DMA_ATTR_STRONGLY_ORDERED,
+	DMA_ATTR_SKIP_ZEROING,
+	DMA_ATTR_NO_DELAYED_UNMAP,
+	DMA_ATTR_EXEC_MAPPING,
+	DMA_ATTR_FORCE_COHERENT,
+	DMA_ATTR_FORCE_NON_COHERENT,
 	DMA_ATTR_MAX,
 };
 
diff -ruw linux-4.4.115/include/linux/dma-mapping.h linux-4.4.115-fbx/include/linux/dma-mapping.h
--- linux-4.4.115/include/linux/dma-mapping.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/dma-mapping.h	2019-01-22 16:16:28.219289113 +0100
@@ -61,6 +61,10 @@
 	int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
 	int (*dma_supported)(struct device *dev, u64 mask);
 	int (*set_dma_mask)(struct device *dev, u64 mask);
+	void *(*remap)(struct device *dev, void *cpu_addr, dma_addr_t handle,
+			size_t size, struct dma_attrs *attrs);
+	void (*unremap)(struct device *dev, void *remapped_address,
+			size_t size);
 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
 	u64 (*get_required_mask)(struct device *dev);
 #endif
@@ -89,6 +93,40 @@
 #include <asm-generic/dma-mapping-broken.h>
 #endif
 
+#ifndef CONFIG_NO_DMA
+static inline void *dma_remap(struct device *dev, void *cpu_addr,
+		dma_addr_t dma_handle, size_t size, struct dma_attrs *attrs)
+{
+	const struct dma_map_ops *ops = get_dma_ops(dev);
+	BUG_ON(!ops);
+
+	if (!ops->remap) {
+		WARN_ONCE(1, "Remap function not implemented for %pS\n",
+				ops->remap);
+		return NULL;
+	}
+
+	return ops->remap(dev, cpu_addr, dma_handle, size, attrs);
+}
+
+
+static inline void dma_unremap(struct device *dev, void *remapped_addr,
+				size_t size)
+{
+	const struct dma_map_ops *ops = get_dma_ops(dev);
+	BUG_ON(!ops);
+
+	if (!ops->unremap) {
+		WARN_ONCE(1, "unremap function not implemented for %pS\n",
+				ops->unremap);
+		return;
+	}
+
+	return ops->unremap(dev, remapped_addr, size);
+}
+#endif
+
+
 static inline u64 dma_get_mask(struct device *dev)
 {
 	if (dev && dev->dma_mask && *dev->dma_mask)
diff -ruw linux-4.4.115/include/linux/efi.h linux-4.4.115-fbx/include/linux/efi.h
--- linux-4.4.115/include/linux/efi.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/efi.h	2019-10-29 09:26:25.429220656 +0100
@@ -299,7 +299,7 @@
 	void *open_protocol_information;
 	void *protocols_per_handle;
 	void *locate_handle_buffer;
-	void *locate_protocol;
+	efi_status_t (*locate_protocol)(efi_guid_t *, void *, void **);
 	void *install_multiple_protocol_interfaces;
 	void *uninstall_multiple_protocol_interfaces;
 	void *calculate_crc32;
@@ -599,6 +599,10 @@
 #define EFI_PROPERTIES_TABLE_GUID \
     EFI_GUID(  0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5 )
 
+#define EFI_RNG_PROTOCOL_GUID \
+	EFI_GUID(0x3152bca5, 0xeade, 0x433d, \
+		 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44)
+
 typedef struct {
 	efi_guid_t guid;
 	u64 table;
@@ -1288,7 +1292,7 @@
 				  unsigned long *load_addr,
 				  unsigned long *load_size);
 
-efi_status_t efi_parse_options(char *cmdline);
+efi_status_t efi_parse_options(char const *cmdline);
 
 bool efi_runtime_disabled(void);
 #endif /* _LINUX_EFI_H */
diff -ruw linux-4.4.115/include/linux/extcon.h linux-4.4.115-fbx/include/linux/extcon.h
--- linux-4.4.115/include/linux/extcon.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/extcon.h	2019-10-29 09:26:25.429220656 +0100
@@ -55,6 +55,12 @@
 #define EXTCON_JACK_SPDIF_IN	26	/* Sony Philips Digital InterFace */
 #define EXTCON_JACK_SPDIF_OUT	27
 
+/* connector orientation 0 - CC1, 1 - CC2 */
+#define EXTCON_USB_CC		28
+
+/* connector speed 0 - High Speed, 1 - super speed */
+#define EXTCON_USB_SPEED	29
+
 /* Display external connector */
 #define EXTCON_DISP_HDMI	40	/* High-Definition Multimedia Interface */
 #define EXTCON_DISP_MHL		41	/* Mobile High-Definition Link */
diff -ruw linux-4.4.115/include/linux/fb.h linux-4.4.115-fbx/include/linux/fb.h
--- linux-4.4.115/include/linux/fb.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/fb.h	2019-01-22 16:16:28.227289185 +0100
@@ -288,10 +288,18 @@
 	int (*fb_ioctl)(struct fb_info *info, unsigned int cmd,
 			unsigned long arg);
 
+	/* perform fb specific ioctl v2 (optional) - provides file param */
+	int (*fb_ioctl_v2)(struct fb_info *info, unsigned int cmd,
+			unsigned long arg, struct file *file);
+
 	/* Handle 32bit compat ioctl (optional) */
 	int (*fb_compat_ioctl)(struct fb_info *info, unsigned cmd,
 			unsigned long arg);
 
+	/* Handle 32bit compat ioctl (optional) */
+	int (*fb_compat_ioctl_v2)(struct fb_info *info, unsigned cmd,
+			unsigned long arg, struct file *file);
+
 	/* perform fb specific mmap */
 	int (*fb_mmap)(struct fb_info *info, struct vm_area_struct *vma);
 
@@ -460,17 +468,8 @@
 	struct fb_cmap cmap;		/* Current cmap */
 	struct list_head modelist;      /* mode list */
 	struct fb_videomode *mode;	/* current mode */
+	struct file *file;		/* current file node */
 
-#ifdef CONFIG_FB_BACKLIGHT
-	/* assigned backlight device */
-	/* set before framebuffer registration, 
-	   remove after unregister */
-	struct backlight_device *bl_dev;
-
-	/* Backlight level curve */
-	struct mutex bl_curve_mutex;	
-	u8 bl_curve[FB_BACKLIGHT_LEVELS];
-#endif
 #ifdef CONFIG_FB_DEFERRED_IO
 	struct delayed_work deferred_work;
 	struct fb_deferred_io *fbdefio;
diff -ruw linux-4.4.115/include/linux/fence.h linux-4.4.115-fbx/include/linux/fence.h
--- linux-4.4.115/include/linux/fence.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/fence.h	2019-01-22 16:16:28.227289185 +0100
@@ -107,6 +107,7 @@
  * @get_driver_name: returns the driver name.
  * @get_timeline_name: return the name of the context this fence belongs to.
  * @enable_signaling: enable software signaling of fence.
+ * @disable_signaling: disable software signaling of fence (optional).
  * @signaled: [optional] peek whether the fence is signaled, can be null.
  * @wait: custom wait implementation, or fence_default_wait.
  * @release: [optional] called on destruction of fence, can be null
@@ -166,6 +167,7 @@
 	const char * (*get_driver_name)(struct fence *fence);
 	const char * (*get_timeline_name)(struct fence *fence);
 	bool (*enable_signaling)(struct fence *fence);
+	void (*disable_signaling)(struct fence *fence);
 	bool (*signaled)(struct fence *fence);
 	signed long (*wait)(struct fence *fence, bool intr, signed long timeout);
 	void (*release)(struct fence *fence);
diff -ruw linux-4.4.115/include/linux/firmware.h linux-4.4.115-fbx/include/linux/firmware.h
--- linux-4.4.115/include/linux/firmware.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/firmware.h	2019-01-22 16:16:28.227289185 +0100
@@ -48,6 +48,20 @@
 int request_firmware_direct(const struct firmware **fw, const char *name,
 			    struct device *device);
 
+int request_firmware_into_buf(const char *name, struct device *device,
+			    phys_addr_t dest_addr, size_t dest_size,
+			    void * (*map_fw_mem)(phys_addr_t phys,
+						 size_t size, void *data),
+			    void (*unmap_fw_mem)(void *virt, size_t size,
+						 void *data),
+			    void *data);
+int request_firmware_nowait_into_buf(
+	struct module *module, bool uevent,
+	const char *name, struct device *device, gfp_t gfp, void *context,
+	void (*cont)(const struct firmware *fw, void *context),
+	phys_addr_t dest_addr, size_t dest_size,
+	void * (*map_fw_mem)(phys_addr_t phys, size_t size, void *data),
+	void (*unmap_fw_mem)(void *virt, size_t size, void *data), void *data);
 void release_firmware(const struct firmware *fw);
 #else
 static inline int request_firmware(const struct firmware **fw,
@@ -56,6 +70,19 @@
 {
 	return -EINVAL;
 }
+static inline int request_firmware_into_buf(const char *name,
+					  struct device *device,
+					  phys_addr_t dest_addr,
+					  size_t dest_size,
+					  void * (*map_fw_mem)(phys_addr_t phys,
+						       size_t size, void *data),
+					  void (*unmap_fw_mem)(void *virt,
+							       size_t size,
+							       void *data),
+					  void *data)
+{
+	return -EINVAL;
+}
 static inline int request_firmware_nowait(
 	struct module *module, bool uevent,
 	const char *name, struct device *device, gfp_t gfp, void *context,
@@ -63,7 +90,16 @@
 {
 	return -EINVAL;
 }
-
+static inline int request_firmware_nowait_into_buf(
+	struct module *module, bool uevent,
+	const char *name, struct device *device, gfp_t gfp, void *context,
+	void (*cont)(const struct firmware *fw, void *context),
+	phys_addr_t dest_addr, size_t dest_size,
+	void * (*map_fw_mem)(phys_addr_t phys, size_t size, void *data),
+	void (*unmap_fw_mem)(void *virt, size_t size, void *data), void *data)
+{
+	return -EINVAL;
+}
 static inline void release_firmware(const struct firmware *fw)
 {
 }
diff -ruw linux-4.4.115/include/linux/freezer.h linux-4.4.115-fbx/include/linux/freezer.h
--- linux-4.4.115/include/linux/freezer.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/freezer.h	2019-01-22 16:16:28.227289185 +0100
@@ -231,7 +231,7 @@
  * call this with locks held.
  */
 static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
-		unsigned long delta, const enum hrtimer_mode mode)
+		u64 delta, const enum hrtimer_mode mode)
 {
 	int __retval;
 	freezer_do_not_count();
diff -ruw linux-4.4.115/include/linux/fs.h linux-4.4.115-fbx/include/linux/fs.h
--- linux-4.4.115/include/linux/fs.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/fs.h	2019-10-29 09:26:25.433220695 +0100
@@ -52,6 +52,8 @@
 struct seq_file;
 struct workqueue_struct;
 struct iov_iter;
+struct fscrypt_info;
+struct fscrypt_operations;
 
 extern void __init inode_init(void);
 extern void __init inode_init_early(void);
@@ -202,8 +204,15 @@
 #define WRITE_SYNC		(WRITE | REQ_SYNC | REQ_NOIDLE)
 #define WRITE_ODIRECT		(WRITE | REQ_SYNC)
 #define WRITE_FLUSH		(WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH)
+#define WRITE_FLUSH_BARRIER	(WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | \
+					REQ_BARRIER)
 #define WRITE_FUA		(WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA)
 #define WRITE_FLUSH_FUA		(WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
+#define WRITE_POST_FLUSH_BARRIER	(WRITE | REQ_SYNC | REQ_NOIDLE | \
+					 REQ_POST_FLUSH_BARRIER | REQ_BARRIER)
+#define WRITE_ORDERED_FLUSH_BARRIER	(WRITE | REQ_SYNC | REQ_NOIDLE | \
+					 REQ_FLUSH | REQ_POST_FLUSH_BARRIER | \
+					 REQ_BARRIER)
 
 /*
  * Attribute flags.  These should be or-ed together to figure out what
@@ -398,6 +407,8 @@
 	 */
 	int (*migratepage) (struct address_space *,
 			struct page *, struct page *, enum migrate_mode);
+	bool (*isolate_page)(struct page *, isolate_mode_t);
+	void (*putback_page)(struct page *);
 	int (*launder_page) (struct page *);
 	int (*is_partially_uptodate) (struct page *, unsigned long,
 					unsigned long);
@@ -470,6 +481,7 @@
 	int			bd_invalidated;
 	struct gendisk *	bd_disk;
 	struct request_queue *  bd_queue;
+	struct backing_dev_info *bd_bdi;
 	struct list_head	bd_list;
 	/*
 	 * Private data.  You must have bd_claim'ed the block_device
@@ -677,6 +689,9 @@
 	struct hlist_head	i_fsnotify_marks;
 #endif
 
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+	struct fscrypt_info	*i_crypt_info;
+#endif
 	void			*i_private; /* fs or device private pointer */
 };
 
@@ -903,6 +918,10 @@
 	struct list_head	f_tfile_llink;
 #endif /* #ifdef CONFIG_EPOLL */
 	struct address_space	*f_mapping;
+
+#ifdef CONFIG_FILE_TABLE_DEBUG
+	struct hlist_node f_hash;
+#endif /* #ifdef CONFIG_FILE_TABLE_DEBUG */
 } __attribute__((aligned(4)));	/* lest something weird decides that 2 is OK */
 
 struct file_handle {
@@ -1337,6 +1356,8 @@
 #endif
 	const struct xattr_handler **s_xattr;
 
+	const struct fscrypt_operations	*s_cop;
+
 	struct hlist_bl_head	s_anon;		/* anonymous dentries for (nfs) exporting */
 	struct list_head	s_mounts;	/* list of mounts; _not_ for fs use */
 	struct block_device	*s_bdev;
@@ -1544,13 +1565,21 @@
  * VFS helper functions..
  */
 extern int vfs_create(struct inode *, struct dentry *, umode_t, bool);
+extern int vfs_create2(struct vfsmount *, struct inode *, struct dentry *, umode_t, bool);
 extern int vfs_mkdir(struct inode *, struct dentry *, umode_t);
+extern int vfs_mkdir2(struct vfsmount *, struct inode *, struct dentry *, umode_t);
 extern int vfs_mknod(struct inode *, struct dentry *, umode_t, dev_t);
+extern int vfs_mknod2(struct vfsmount *, struct inode *, struct dentry *, umode_t, dev_t);
 extern int vfs_symlink(struct inode *, struct dentry *, const char *);
+extern int vfs_symlink2(struct vfsmount *, struct inode *, struct dentry *, const char *);
 extern int vfs_link(struct dentry *, struct inode *, struct dentry *, struct inode **);
+extern int vfs_link2(struct vfsmount *, struct dentry *, struct inode *, struct dentry *, struct inode **);
 extern int vfs_rmdir(struct inode *, struct dentry *);
+extern int vfs_rmdir2(struct vfsmount *, struct inode *, struct dentry *);
 extern int vfs_unlink(struct inode *, struct dentry *, struct inode **);
+extern int vfs_unlink2(struct vfsmount *, struct inode *, struct dentry *, struct inode **);
 extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int);
+extern int vfs_rename2(struct vfsmount *, struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int);
 extern int vfs_whiteout(struct inode *, struct dentry *);
 
 /*
@@ -1676,6 +1705,7 @@
 	struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
 	const char * (*follow_link) (struct dentry *, void **);
 	int (*permission) (struct inode *, int);
+	int (*permission2) (struct vfsmount *, struct inode *, int);
 	struct posix_acl * (*get_acl)(struct inode *, int);
 
 	int (*readlink) (struct dentry *, char __user *,int);
@@ -1693,6 +1723,7 @@
 	int (*rename2) (struct inode *, struct dentry *,
 			struct inode *, struct dentry *, unsigned int);
 	int (*setattr) (struct dentry *, struct iattr *);
+	int (*setattr2) (struct vfsmount *, struct dentry *, struct iattr *);
 	int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
 	int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
 	ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
@@ -1738,9 +1769,13 @@
 	int (*unfreeze_fs) (struct super_block *);
 	int (*statfs) (struct dentry *, struct kstatfs *);
 	int (*remount_fs) (struct super_block *, int *, char *);
+	int (*remount_fs2) (struct vfsmount *, struct super_block *, int *, char *);
+	void *(*clone_mnt_data) (void *);
+	void (*copy_mnt_data) (void *, void *);
 	void (*umount_begin) (struct super_block *);
 
 	int (*show_options)(struct seq_file *, struct dentry *);
+	int (*show_options2)(struct vfsmount *,struct seq_file *, struct dentry *);
 	int (*show_devname)(struct seq_file *, struct dentry *);
 	int (*show_path)(struct seq_file *, struct dentry *);
 	int (*show_stats)(struct seq_file *, struct dentry *);
@@ -1777,6 +1812,7 @@
 #else
 #define S_DAX		0	/* Make all the DAX code disappear */
 #endif
+#define S_ENCRYPTED	16384	/* Encrypted file (using fs/crypto/) */
 
 /*
  * Note that nosuid etc flags are inode-specific: setting some file-system
@@ -1815,6 +1851,7 @@
 #define IS_AUTOMOUNT(inode)	((inode)->i_flags & S_AUTOMOUNT)
 #define IS_NOSEC(inode)		((inode)->i_flags & S_NOSEC)
 #define IS_DAX(inode)		((inode)->i_flags & S_DAX)
+#define IS_ENCRYPTED(inode)	((inode)->i_flags & S_ENCRYPTED)
 
 #define IS_WHITEOUT(inode)	(S_ISCHR(inode->i_mode) && \
 				 (inode)->i_rdev == WHITEOUT_DEV)
@@ -1972,6 +2009,9 @@
 #define FS_RENAME_DOES_D_MOVE	32768	/* FS will handle d_move() during rename() internally. */
 	struct dentry *(*mount) (struct file_system_type *, int,
 		       const char *, void *);
+	struct dentry *(*mount2) (struct vfsmount *, struct file_system_type *, int,
+			       const char *, void *);
+	void *(*alloc_mnt_data) (void);
 	void (*kill_sb) (struct super_block *);
 	struct module *owner;
 	struct file_system_type * next;
@@ -2251,6 +2291,8 @@
 extern long vfs_truncate(struct path *, loff_t);
 extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
 		       struct file *filp);
+extern int do_truncate2(struct vfsmount *, struct dentry *, loff_t start,
+			unsigned int time_attrs, struct file *filp);
 extern int vfs_fallocate(struct file *file, int mode, loff_t offset,
 			loff_t len);
 extern long do_sys_open(int dfd, const char __user *filename, int flags,
@@ -2292,6 +2334,7 @@
 #ifdef CONFIG_BLOCK
 extern int register_blkdev(unsigned int, const char *);
 extern void unregister_blkdev(unsigned int, const char *);
+extern void bdev_unhash_inode(dev_t dev);
 extern struct block_device *bdget(dev_t);
 extern struct block_device *bdgrab(struct block_device *bdev);
 extern void bd_set_size(struct block_device *, loff_t size);
@@ -2475,8 +2518,11 @@
 extern sector_t bmap(struct inode *, sector_t);
 #endif
 extern int notify_change(struct dentry *, struct iattr *, struct inode **);
+extern int notify_change2(struct vfsmount *, struct dentry *, struct iattr *, struct inode **);
 extern int inode_permission(struct inode *, int);
+extern int inode_permission2(struct vfsmount *, struct inode *, int);
 extern int __inode_permission(struct inode *, int);
+extern int __inode_permission2(struct vfsmount *, struct inode *, int);
 extern int generic_permission(struct inode *, int);
 extern int __check_sticky(struct inode *dir, struct inode *inode);
 
@@ -2768,6 +2814,8 @@
 		wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
 }
 
+struct inode *dio_bio_get_inode(struct bio *bio);
+
 extern void inode_set_flags(struct inode *inode, unsigned int flags,
 			    unsigned int mask);
 
diff -ruw linux-4.4.115/include/linux/fsnotify.h linux-4.4.115-fbx/include/linux/fsnotify.h
--- linux-4.4.115/include/linux/fsnotify.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/fsnotify.h	2019-01-22 16:16:28.231289221 +0100
@@ -230,12 +230,19 @@
 static inline void fsnotify_open(struct file *file)
 {
 	struct path *path = &file->f_path;
+	struct path lower_path;
 	struct inode *inode = file_inode(file);
 	__u32 mask = FS_OPEN;
 
 	if (S_ISDIR(inode->i_mode))
 		mask |= FS_ISDIR;
 
+	if (path->dentry->d_op && path->dentry->d_op->d_canonical_path) {
+		path->dentry->d_op->d_canonical_path(path, &lower_path);
+		fsnotify_parent(&lower_path, NULL, mask);
+		fsnotify(lower_path.dentry->d_inode, mask, &lower_path, FSNOTIFY_EVENT_PATH, NULL, 0);
+		path_put(&lower_path);
+	}
 	fsnotify_parent(path, NULL, mask);
 	fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
 }
diff -ruw linux-4.4.115/include/linux/ftrace.h linux-4.4.115-fbx/include/linux/ftrace.h
--- linux-4.4.115/include/linux/ftrace.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/ftrace.h	2019-01-22 16:16:28.231289221 +0100
@@ -702,7 +702,8 @@
   static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
 #endif
 
-#ifdef CONFIG_PREEMPT_TRACER
+#if defined(CONFIG_PREEMPT_TRACER) || \
+	(defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
   extern void trace_preempt_on(unsigned long a0, unsigned long a1);
   extern void trace_preempt_off(unsigned long a0, unsigned long a1);
 #else
@@ -780,16 +781,6 @@
  */
 #define __notrace_funcgraph		notrace
 
-/*
- * We want to which function is an entrypoint of a hardirq.
- * That will help us to put a signal on output.
- */
-#define __irq_entry		 __attribute__((__section__(".irqentry.text")))
-
-/* Limits of hardirq entrypoints */
-extern char __irqentry_text_start[];
-extern char __irqentry_text_end[];
-
 #define FTRACE_NOTRACE_DEPTH 65536
 #define FTRACE_RETFUNC_DEPTH 50
 #define FTRACE_RETSTACK_ALLOC_SIZE 32
@@ -826,7 +817,6 @@
 #else /* !CONFIG_FUNCTION_GRAPH_TRACER */
 
 #define __notrace_funcgraph
-#define __irq_entry
 #define INIT_FTRACE_GRAPH
 
 static inline void ftrace_graph_init_task(struct task_struct *t) { }
diff -ruw linux-4.4.115/include/linux/gfp.h linux-4.4.115-fbx/include/linux/gfp.h
--- linux-4.4.115/include/linux/gfp.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/gfp.h	2019-01-22 16:16:28.235289258 +0100
@@ -36,6 +36,7 @@
 #define ___GFP_OTHER_NODE	0x800000u
 #define ___GFP_WRITE		0x1000000u
 #define ___GFP_KSWAPD_RECLAIM	0x2000000u
+#define ___GFP_CMA		0x4000000u
 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
 
 /*
@@ -50,8 +51,9 @@
 #define __GFP_DMA32	((__force gfp_t)___GFP_DMA32)
 #define __GFP_MOVABLE	((__force gfp_t)___GFP_MOVABLE)  /* Page is movable */
 #define __GFP_MOVABLE	((__force gfp_t)___GFP_MOVABLE)  /* ZONE_MOVABLE allowed */
-#define GFP_ZONEMASK	(__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
-
+#define __GFP_CMA	((__force gfp_t)___GFP_CMA)
+#define GFP_ZONEMASK	(__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE| \
+			__GFP_CMA)
 /*
  * Page mobility and placement hints
  *
@@ -183,7 +185,7 @@
 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE)
 
 /* Room for N __GFP_FOO bits */
-#define __GFP_BITS_SHIFT 26
+#define __GFP_BITS_SHIFT 27
 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
 
 /*
@@ -264,7 +266,12 @@
 		return MIGRATE_UNMOVABLE;
 
 	/* Group based on mobility */
+#ifndef CONFIG_CMA
 	return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
+#else
+	return ((gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT) |
+	       ((gfp_flags & __GFP_CMA) != 0);
+#endif
 }
 #undef GFP_MOVABLE_MASK
 #undef GFP_MOVABLE_SHIFT
diff -ruw linux-4.4.115/include/linux/hash.h linux-4.4.115-fbx/include/linux/hash.h
--- linux-4.4.115/include/linux/hash.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/hash.h	2019-01-22 16:16:28.235289258 +0100
@@ -15,6 +15,7 @@
  */
 
 #include <asm/types.h>
+#include <asm/hash.h>
 #include <linux/compiler.h>
 
 /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
@@ -99,4 +100,38 @@
 	return (u32)val;
 }
 
+struct fast_hash_ops {
+	u32 (*hash)(const void *data, u32 len, u32 seed);
+	u32 (*hash2)(const u32 *data, u32 len, u32 seed);
+};
+
+/**
+ *	arch_fast_hash - Caclulates a hash over a given buffer that can have
+ *			 arbitrary size. This function will eventually use an
+ *			 architecture-optimized hashing implementation if
+ *			 available, and trades off distribution for speed.
+ *
+ *	@data: buffer to hash
+ *	@len: length of buffer in bytes
+ *	@seed: start seed
+ *
+ *	Returns 32bit hash.
+ */
+extern u32 arch_fast_hash(const void *data, u32 len, u32 seed);
+
+/**
+ *	arch_fast_hash2 - Caclulates a hash over a given buffer that has a
+ *			  size that is of a multiple of 32bit words. This
+ *			  function will eventually use an architecture-
+ *			  optimized hashing implementation if available,
+ *			  and trades off distribution for speed.
+ *
+ *	@data: buffer to hash (must be 32bit padded)
+ *	@len: number of 32bit words
+ *	@seed: start seed
+ *
+ *	Returns 32bit hash.
+ */
+extern u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed);
+
 #endif /* _LINUX_HASH_H */
diff -ruw linux-4.4.115/include/linux/hdmi.h linux-4.4.115-fbx/include/linux/hdmi.h
--- linux-4.4.115/include/linux/hdmi.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/hdmi.h	2019-01-22 16:16:28.235289258 +0100
@@ -35,6 +35,7 @@
 };
 
 #define HDMI_IEEE_OUI 0x000c03
+#define HDMI_IEEE_OUI_HF	0xc45dd8
 #define HDMI_INFOFRAME_HEADER_SIZE  4
 #define HDMI_AVI_INFOFRAME_SIZE    13
 #define HDMI_SPD_INFOFRAME_SIZE    25
@@ -78,6 +79,8 @@
 	HDMI_PICTURE_ASPECT_NONE,
 	HDMI_PICTURE_ASPECT_4_3,
 	HDMI_PICTURE_ASPECT_16_9,
+	HDMI_PICTURE_ASPECT_64_27,
+	HDMI_PICTURE_ASPECT_256_135,
 	HDMI_PICTURE_ASPECT_RESERVED,
 };
 
diff -ruw linux-4.4.115/include/linux/highmem.h linux-4.4.115-fbx/include/linux/highmem.h
--- linux-4.4.115/include/linux/highmem.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/highmem.h	2019-01-22 16:16:28.239289294 +0100
@@ -39,6 +39,12 @@
 
 void kmap_flush_unused(void);
 
+#ifdef CONFIG_ARCH_WANT_KMAP_ATOMIC_FLUSH
+void kmap_atomic_flush_unused(void);
+#else
+static inline void kmap_atomic_flush_unused(void) { }
+#endif
+
 struct page *kmap_to_page(void *addr);
 
 #else /* CONFIG_HIGHMEM */
@@ -80,6 +86,7 @@
 #define kmap_atomic_pfn(pfn)	kmap_atomic(pfn_to_page(pfn))
 
 #define kmap_flush_unused()	do {} while(0)
+#define kmap_atomic_flush_unused()	do {} while (0)
 #endif
 
 #endif /* CONFIG_HIGHMEM */
@@ -180,9 +187,24 @@
 alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
 					unsigned long vaddr)
 {
+#ifndef CONFIG_CMA
 	return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
+#else
+	return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma,
+						vaddr);
+#endif
 }
 
+#ifdef CONFIG_CMA
+static inline struct page *
+alloc_zeroed_user_highpage_movable_cma(struct vm_area_struct *vma,
+						unsigned long vaddr)
+{
+	return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma,
+						vaddr);
+}
+#endif
+
 static inline void clear_highpage(struct page *page)
 {
 	void *kaddr = kmap_atomic(page);
diff -ruw linux-4.4.115/include/linux/hrtimer.h linux-4.4.115-fbx/include/linux/hrtimer.h
--- linux-4.4.115/include/linux/hrtimer.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/hrtimer.h	2019-01-22 16:16:28.239289294 +0100
@@ -53,6 +53,7 @@
  *
  * 0x00		inactive
  * 0x01		enqueued into rbtree
+ * 0x02		timer is pinned to a cpu
  *
  * The callback state is not part of the timer->state because clearing it would
  * mean touching the timer after the callback, this makes it impossible to free
@@ -72,6 +73,8 @@
  */
 #define HRTIMER_STATE_INACTIVE	0x00
 #define HRTIMER_STATE_ENQUEUED	0x01
+#define HRTIMER_PINNED_SHIFT	1
+#define HRTIMER_STATE_PINNED	(1 << HRTIMER_PINNED_SHIFT)
 
 /**
  * struct hrtimer - the basic hrtimer structure
@@ -88,12 +91,6 @@
  * @base:	pointer to the timer base (per cpu and per clock)
  * @state:	state information (See bit values above)
  * @is_rel:	Set if the timer was armed relative
- * @start_pid:  timer statistics field to store the pid of the task which
- *		started the timer
- * @start_site:	timer statistics field to store the site where the timer
- *		was started
- * @start_comm: timer statistics field to store the name of the process which
- *		started the timer
  *
  * The hrtimer structure must be initialized by hrtimer_init()
  */
@@ -104,11 +101,6 @@
 	struct hrtimer_clock_base	*base;
 	u8				state;
 	u8				is_rel;
-#ifdef CONFIG_TIMER_STATS
-	int				start_pid;
-	void				*start_site;
-	char				start_comm[16];
-#endif
 };
 
 /**
@@ -220,7 +212,7 @@
 	timer->node.expires = ktime_add_safe(time, delta);
 }
 
-static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta)
+static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, u64 delta)
 {
 	timer->_softexpires = time;
 	timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta));
@@ -357,6 +349,9 @@
 
 /* Exported timer functions: */
 
+/* To be used from cpusets, only */
+extern void hrtimer_quiesce_cpu(void *cpup);
+
 /* Initialize timers: */
 extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock,
 			 enum hrtimer_mode mode);
@@ -378,7 +373,7 @@
 
 /* Basic timer operations: */
 extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
-			unsigned long range_ns, const enum hrtimer_mode mode);
+				   u64 range_ns, const enum hrtimer_mode mode);
 
 /**
  * hrtimer_start - (re)start an hrtimer on the current CPU
@@ -399,7 +394,7 @@
 static inline void hrtimer_start_expires(struct hrtimer *timer,
 					 enum hrtimer_mode mode)
 {
-	unsigned long delta;
+	u64 delta;
 	ktime_t soft, hard;
 	soft = hrtimer_get_softexpires(timer);
 	hard = hrtimer_get_expires(timer);
@@ -477,10 +472,12 @@
 extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
 				 struct task_struct *tsk);
 
-extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
+extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta,
 						const enum hrtimer_mode mode);
 extern int schedule_hrtimeout_range_clock(ktime_t *expires,
-		unsigned long delta, const enum hrtimer_mode mode, int clock);
+					  u64 delta,
+					  const enum hrtimer_mode mode,
+					  int clock);
 extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
 
 /* Soft interrupt function to run the hrtimer queues: */
diff -ruw linux-4.4.115/include/linux/hugetlb.h linux-4.4.115-fbx/include/linux/hugetlb.h
--- linux-4.4.115/include/linux/hugetlb.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/hugetlb.h	2019-10-29 09:26:25.437220734 +0100
@@ -96,9 +96,7 @@
 				struct address_space *mapping,
 				pgoff_t idx, unsigned long address);
 
-#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
-#endif
 
 extern int hugepages_treat_as_movable;
 extern int sysctl_hugetlb_shm_group;
diff -ruw linux-4.4.115/include/linux/ieee80211.h linux-4.4.115-fbx/include/linux/ieee80211.h
--- linux-4.4.115/include/linux/ieee80211.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/ieee80211.h	2019-01-22 16:16:28.247289366 +0100
@@ -1535,6 +1535,9 @@
 #define WLAN_AUTH_SHARED_KEY 1
 #define WLAN_AUTH_FT 2
 #define WLAN_AUTH_SAE 3
+#define WLAN_AUTH_FILS_SK 4
+#define WLAN_AUTH_FILS_SK_PFS 5
+#define WLAN_AUTH_FILS_PK 6
 #define WLAN_AUTH_LEAP 128
 
 #define WLAN_AUTH_CHALLENGE_LEN 128
@@ -1674,6 +1677,9 @@
 	WLAN_STATUS_REJECT_DSE_BAND = 96,
 	WLAN_STATUS_DENIED_WITH_SUGGESTED_BAND_AND_CHANNEL = 99,
 	WLAN_STATUS_DENIED_DUE_TO_SPECTRUM_MANAGEMENT = 103,
+	/* 802.11ai */
+	WLAN_STATUS_FILS_AUTHENTICATION_FAILURE = 108,
+	WLAN_STATUS_UNKNOWN_AUTHENTICATION_SERVER = 109,
 };
 
 
@@ -2032,6 +2038,15 @@
 #define IEEE80211_GCMP_MIC_LEN		16
 #define IEEE80211_GCMP_PN_LEN		6
 
+#define FILS_NONCE_LEN			16
+#define FILS_MAX_KEK_LEN		64
+
+#define FILS_ERP_MAX_USERNAME_LEN	16
+#define FILS_ERP_MAX_REALM_LEN		253
+#define FILS_ERP_MAX_RRK_LEN		64
+
+#define PMK_MAX_LEN			48
+
 /* Public action codes */
 enum ieee80211_pub_actioncode {
 	WLAN_PUB_ACTION_EXT_CHANSW_ANN = 4,
@@ -2245,31 +2260,37 @@
 };
 
 
+#define SUITE(oui, id)	(((oui) << 8) | (id))
+
 /* cipher suite selectors */
-#define WLAN_CIPHER_SUITE_USE_GROUP	0x000FAC00
-#define WLAN_CIPHER_SUITE_WEP40		0x000FAC01
-#define WLAN_CIPHER_SUITE_TKIP		0x000FAC02
-/* reserved: 				0x000FAC03 */
-#define WLAN_CIPHER_SUITE_CCMP		0x000FAC04
-#define WLAN_CIPHER_SUITE_WEP104	0x000FAC05
-#define WLAN_CIPHER_SUITE_AES_CMAC	0x000FAC06
-#define WLAN_CIPHER_SUITE_GCMP		0x000FAC08
-#define WLAN_CIPHER_SUITE_GCMP_256	0x000FAC09
-#define WLAN_CIPHER_SUITE_CCMP_256	0x000FAC0A
-#define WLAN_CIPHER_SUITE_BIP_GMAC_128	0x000FAC0B
-#define WLAN_CIPHER_SUITE_BIP_GMAC_256	0x000FAC0C
-#define WLAN_CIPHER_SUITE_BIP_CMAC_256	0x000FAC0D
+#define WLAN_CIPHER_SUITE_USE_GROUP	SUITE(0x000FAC, 0)
+#define WLAN_CIPHER_SUITE_WEP40		SUITE(0x000FAC, 1)
+#define WLAN_CIPHER_SUITE_TKIP		SUITE(0x000FAC, 2)
+/* reserved:				SUITE(0x000FAC, 3) */
+#define WLAN_CIPHER_SUITE_CCMP		SUITE(0x000FAC, 4)
+#define WLAN_CIPHER_SUITE_WEP104	SUITE(0x000FAC, 5)
+#define WLAN_CIPHER_SUITE_AES_CMAC	SUITE(0x000FAC, 6)
+#define WLAN_CIPHER_SUITE_GCMP		SUITE(0x000FAC, 8)
+#define WLAN_CIPHER_SUITE_GCMP_256	SUITE(0x000FAC, 9)
+#define WLAN_CIPHER_SUITE_CCMP_256	SUITE(0x000FAC, 10)
+#define WLAN_CIPHER_SUITE_BIP_GMAC_128	SUITE(0x000FAC, 11)
+#define WLAN_CIPHER_SUITE_BIP_GMAC_256	SUITE(0x000FAC, 12)
+#define WLAN_CIPHER_SUITE_BIP_CMAC_256	SUITE(0x000FAC, 13)
 
-#define WLAN_CIPHER_SUITE_SMS4		0x00147201
+#define WLAN_CIPHER_SUITE_SMS4		SUITE(0x001472, 1)
 
 /* AKM suite selectors */
-#define WLAN_AKM_SUITE_8021X		0x000FAC01
-#define WLAN_AKM_SUITE_PSK		0x000FAC02
-#define WLAN_AKM_SUITE_8021X_SHA256	0x000FAC05
-#define WLAN_AKM_SUITE_PSK_SHA256	0x000FAC06
-#define WLAN_AKM_SUITE_TDLS		0x000FAC07
-#define WLAN_AKM_SUITE_SAE		0x000FAC08
-#define WLAN_AKM_SUITE_FT_OVER_SAE	0x000FAC09
+#define WLAN_AKM_SUITE_8021X		SUITE(0x000FAC, 1)
+#define WLAN_AKM_SUITE_PSK		SUITE(0x000FAC, 2)
+#define WLAN_AKM_SUITE_8021X_SHA256	SUITE(0x000FAC, 5)
+#define WLAN_AKM_SUITE_PSK_SHA256	SUITE(0x000FAC, 6)
+#define WLAN_AKM_SUITE_TDLS		SUITE(0x000FAC, 7)
+#define WLAN_AKM_SUITE_SAE		SUITE(0x000FAC, 8)
+#define WLAN_AKM_SUITE_FT_OVER_SAE	SUITE(0x000FAC, 9)
+#define WLAN_AKM_SUITE_FILS_SHA256	SUITE(0x000FAC, 14)
+#define WLAN_AKM_SUITE_FILS_SHA384	SUITE(0x000FAC, 15)
+#define WLAN_AKM_SUITE_FT_FILS_SHA256	SUITE(0x000FAC, 16)
+#define WLAN_AKM_SUITE_FT_FILS_SHA384	SUITE(0x000FAC, 17)
 
 #define WLAN_MAX_KEY_LEN		32
 
diff -ruw linux-4.4.115/include/linux/if_pppox.h linux-4.4.115-fbx/include/linux/if_pppox.h
--- linux-4.4.115/include/linux/if_pppox.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/if_pppox.h	2019-10-29 09:26:25.441220774 +0100
@@ -43,6 +43,25 @@
 	u32 seq_sent, seq_recv;
 	int ppp_flags;
 };
+
+struct pppolac_opt {
+	__u32		local;
+	__u32		remote;
+	__u32		recv_sequence;
+	__u32		xmit_sequence;
+	atomic_t	sequencing;
+	int		(*backlog_rcv)(struct sock *sk_udp, struct sk_buff *skb);
+};
+
+struct pppopns_opt {
+	__u16		local;
+	__u16		remote;
+	__u32		recv_sequence;
+	__u32		xmit_sequence;
+	void		(*data_ready)(struct sock *sk_raw);
+	int		(*backlog_rcv)(struct sock *sk_raw, struct sk_buff *skb);
+};
+
 #include <net/sock.h>
 
 struct pppox_sock {
@@ -53,6 +72,8 @@
 	union {
 		struct pppoe_opt pppoe;
 		struct pptp_opt  pptp;
+		struct pppolac_opt lac;
+		struct pppopns_opt pns;
 	} proto;
 	__be16			num;
 };
diff -ruw linux-4.4.115/include/linux/iio/consumer.h linux-4.4.115-fbx/include/linux/iio/consumer.h
--- linux-4.4.115/include/linux/iio/consumer.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/iio/consumer.h	2019-01-22 16:16:28.251289402 +0100
@@ -161,6 +161,16 @@
 int iio_write_channel_raw(struct iio_channel *chan, int val);
 
 /**
+ * iio_write_channel_processed() - write to a given channel
+ * @chan:		The channel being queried.
+ * @val:		Value being written.
+ *
+ * Note processed writes to iio channels are converted to raw
+ * values before being written.
+ */
+int iio_write_channel_processed(struct iio_channel *chan, int val);
+
+/**
  * iio_get_channel_type() - get the type of a channel
  * @channel:		The channel being queried.
  * @type:		The type of the channel.
diff -ruw linux-4.4.115/include/linux/inetdevice.h linux-4.4.115-fbx/include/linux/inetdevice.h
--- linux-4.4.115/include/linux/inetdevice.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/inetdevice.h	2019-01-22 16:16:28.251289402 +0100
@@ -128,6 +128,8 @@
 #define IN_DEV_ARP_ANNOUNCE(in_dev)	IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE)
 #define IN_DEV_ARP_IGNORE(in_dev)	IN_DEV_MAXCONF((in_dev), ARP_IGNORE)
 #define IN_DEV_ARP_NOTIFY(in_dev)	IN_DEV_MAXCONF((in_dev), ARP_NOTIFY)
+#define IN_DEV_NF_IPV4_DEFRAG_SKIP(in_dev) \
+	IN_DEV_ORCONF((in_dev), NF_IPV4_DEFRAG_SKIP)
 
 struct in_ifaddr {
 	struct hlist_node	hash;
diff -ruw linux-4.4.115/include/linux/inet_diag.h linux-4.4.115-fbx/include/linux/inet_diag.h
--- linux-4.4.115/include/linux/inet_diag.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/inet_diag.h	2019-01-22 16:16:28.251289402 +0100
@@ -3,6 +3,7 @@
 
 #include <uapi/linux/inet_diag.h>
 
+struct net;
 struct sock;
 struct inet_hashinfo;
 struct nlattr;
@@ -23,6 +24,10 @@
 	void		(*idiag_get_info)(struct sock *sk,
 					  struct inet_diag_msg *r,
 					  void *info);
+
+	int		(*destroy)(struct sk_buff *in_skb,
+				   const struct inet_diag_req_v2 *req);
+
 	__u16		idiag_type;
 	__u16		idiag_info_size;
 };
@@ -32,7 +37,7 @@
 		      struct sk_buff *skb, const struct inet_diag_req_v2 *req,
 		      struct user_namespace *user_ns,
 		      u32 pid, u32 seq, u16 nlmsg_flags,
-		      const struct nlmsghdr *unlh);
+		      const struct nlmsghdr *unlh, bool net_admin);
 void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
 			 struct netlink_callback *cb,
 			 const struct inet_diag_req_v2 *r,
@@ -41,6 +46,10 @@
 			    struct sk_buff *in_skb, const struct nlmsghdr *nlh,
 			    const struct inet_diag_req_v2 *req);
 
+struct sock *inet_diag_find_one_icsk(struct net *net,
+				     struct inet_hashinfo *hashinfo,
+				     const struct inet_diag_req_v2 *req);
+
 int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
 
 extern int  inet_diag_register(const struct inet_diag_handler *handler);
diff -ruw linux-4.4.115/include/linux/inet_lro.h linux-4.4.115-fbx/include/linux/inet_lro.h
--- linux-4.4.115/include/linux/inet_lro.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/inet_lro.h	2019-01-22 16:16:28.251289402 +0100
@@ -81,6 +81,7 @@
 #define LRO_F_EXTRACT_VLAN_ID 2  /* Set flag if VLAN IDs are extracted
 				    from received packets and eth protocol
 				    is still ETH_P_8021Q */
+#define LRO_F_NI              4  /* If not NAPI, Pass packets to stack via NI */
 
 	/*
 	 * Set for generated SKBs that are not added to
@@ -122,6 +123,50 @@
 };
 
 /*
+ * Large Receive Offload (LRO) information provided by the driver
+ *
+ * Fields must be set by driver when using the lro_receive_skb_ext()
+ */
+struct net_lro_info {
+	/* bitmask indicating the supported fields */
+	unsigned long valid_fields;
+	/*
+	 * Driver has checked the LRO eligibilty of the skb
+	 */
+	#define LRO_ELIGIBILITY_CHECKED (1 << 0)
+	/*
+	 * Driver has provided the TCP payload checksum
+	 */
+	#define LRO_TCP_DATA_CSUM (1 << 1)
+	/*
+	 * Driver has extracted the TCP window from the skb
+	 * The value is in network format
+	 */
+	#define LRO_TCP_WIN (1 << 2)
+	/*
+	 * Driver has extracted the TCP sequence number from skb
+	 * The value is in network format
+	 */
+	#define LRO_TCP_SEQ_NUM (1 << 3)
+	/*
+	 * Driver has extracted the TCP ack number from the skb
+	 * The value is in network format
+	 */
+	#define LRO_TCP_ACK_NUM (1 << 4)
+	/*
+	 * Driver has provided the LRO descriptor
+	 */
+	#define LRO_DESC (1 << 5)
+
+	bool lro_eligible;
+	__wsum tcp_data_csum;
+	__be16 tcp_win;
+	__be32 tcp_seq_num;
+	__be32 tcp_ack_num;
+	struct net_lro_desc *lro_desc;
+};
+
+/*
  * Processes a SKB
  *
  * @lro_mgr: LRO manager to use
@@ -133,10 +178,54 @@
 void lro_receive_skb(struct net_lro_mgr *lro_mgr,
 		     struct sk_buff *skb,
 		     void *priv);
+
+/*
+ * Processes an SKB
+ *
+ * This API provides means to pass any LRO information that has already
+ * been extracted by the driver
+ *
+ * @lro_mgr: LRO manager to use
+ * @skb: SKB to aggregate
+ * @priv: Private data that may be used by driver functions
+ *       (for example get_tcp_ip_hdr)
+ * @lro_info: LRO information extracted by the driver
+ */
+
+void lro_receive_skb_ext(struct net_lro_mgr *lro_mgr,
+		struct sk_buff *skb,
+		void *priv,
+		struct net_lro_info *lro_info);
+
+/*
+ * Processes a fragment list
+ *
+ * This functions aggregate fragments and generate SKBs do pass
+ * the packets to the stack.
+ *
+ * @lro_mgr: LRO manager to use
+ * @frags: Fragment to be processed. Must contain entire header in first
+ *         element.
+ * @len: Length of received data
+ * @true_size: Actual size of memory the fragment is consuming
+ * @priv: Private data that may be used by driver functions
+ *        (for example get_tcp_ip_hdr)
+ */
+
+void lro_receive_frags(struct net_lro_mgr *lro_mgr,
+		       struct skb_frag_struct *frags,
+		       int len, int true_size, void *priv, __wsum sum);
+
 /*
  * Forward all aggregated SKBs held by lro_mgr to network stack
  */
 
 void lro_flush_all(struct net_lro_mgr *lro_mgr);
 
+void lro_flush_pkt(struct net_lro_mgr *lro_mgr,
+		   struct iphdr *iph, struct tcphdr *tcph);
+
+void lro_flush_desc(struct net_lro_mgr *lro_mgr,
+		    struct net_lro_desc *lro_desc);
+
 #endif
diff -ruw linux-4.4.115/include/linux/init.h linux-4.4.115-fbx/include/linux/init.h
--- linux-4.4.115/include/linux/init.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/init.h	2019-10-29 09:26:25.441220774 +0100
@@ -142,6 +142,10 @@
 void __init load_default_modules(void);
 int __init init_rootfs(void);
 
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void);
+#endif
+
 extern void (*late_time_init)(void);
 
 extern bool initcall_debug;
diff -ruw linux-4.4.115/include/linux/init_task.h linux-4.4.115-fbx/include/linux/init_task.h
--- linux-4.4.115/include/linux/init_task.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/init_task.h	2019-01-22 16:16:28.251289402 +0100
@@ -15,6 +15,8 @@
 #include <net/net_namespace.h>
 #include <linux/sched/rt.h>
 
+#include <asm/thread_info.h>
+
 #ifdef CONFIG_SMP
 # define INIT_PUSHABLE_TASKS(tsk)					\
 	.pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO),
@@ -183,16 +185,24 @@
 # define INIT_KASAN(tsk)
 #endif
 
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+# define INIT_TASK_TI(tsk) .thread_info = INIT_THREAD_INFO(tsk),
+#else
+# define INIT_TASK_TI(tsk)
+#endif
+
 /*
  *  INIT_TASK is used to set up the first task table, touch at
  * your own risk!. Base=0, limit=0x1fffff (=2MB)
  */
 #define INIT_TASK(tsk)	\
 {									\
+	INIT_TASK_TI(tsk)						\
 	.state		= 0,						\
-	.stack		= &init_thread_info,				\
+	.stack		= init_stack,					\
 	.usage		= ATOMIC_INIT(2),				\
 	.flags		= PF_KTHREAD,					\
+	.exec_mode	= EXEC_MODE_UNLIMITED,				\
 	.prio		= MAX_PRIO-20,					\
 	.static_prio	= MAX_PRIO-20,					\
 	.normal_prio	= MAX_PRIO-20,					\
diff -ruw linux-4.4.115/include/linux/interrupt.h linux-4.4.115-fbx/include/linux/interrupt.h
--- linux-4.4.115/include/linux/interrupt.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/interrupt.h	2019-01-22 16:16:28.255289439 +0100
@@ -18,6 +18,7 @@
 #include <linux/atomic.h>
 #include <asm/ptrace.h>
 #include <asm/irq.h>
+#include <asm/sections.h>
 
 /*
  * These correspond to the IORESOURCE_IRQ_* defines in
@@ -423,6 +424,12 @@
 };
 
 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
+/* Softirq's where the handling might be long: */
+#define LONG_SOFTIRQ_MASK ((1 << NET_TX_SOFTIRQ)       | \
+			   (1 << NET_RX_SOFTIRQ)       | \
+			   (1 << BLOCK_SOFTIRQ)        | \
+			   (1 << BLOCK_IOPOLL_SOFTIRQ) | \
+			   (1 << TASKLET_SOFTIRQ))
 
 /* map softirq index to softirq name. update 'softirq_to_name' in
  * kernel/softirq.c when adding a new softirq.
@@ -458,6 +465,7 @@
 extern void raise_softirq(unsigned int nr);
 
 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
+DECLARE_PER_CPU(__u32, active_softirqs);
 
 static inline struct task_struct *this_cpu_ksoftirqd(void)
 {
@@ -672,4 +680,11 @@
 extern int arch_probe_nr_irqs(void);
 extern int arch_early_irq_init(void);
 
+/*
+ * We want to know which function is an entrypoint of a hardirq or a softirq.
+ */
+#define __irq_entry		 __attribute__((__section__(".irqentry.text")))
+#define __softirq_entry  \
+	__attribute__((__section__(".softirqentry.text")))
+
 #endif
diff -ruw linux-4.4.115/include/linux/iommu.h linux-4.4.115-fbx/include/linux/iommu.h
--- linux-4.4.115/include/linux/iommu.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/iommu.h	2019-01-22 16:16:28.255289439 +0100
@@ -23,13 +23,14 @@
 #include <linux/err.h>
 #include <linux/of.h>
 #include <linux/types.h>
-#include <linux/scatterlist.h>
 #include <trace/events/iommu.h>
 
 #define IOMMU_READ	(1 << 0)
 #define IOMMU_WRITE	(1 << 1)
 #define IOMMU_CACHE	(1 << 2) /* DMA cache coherency */
 #define IOMMU_NOEXEC	(1 << 3)
+#define IOMMU_PRIV	(1 << 4)
+#define IOMMU_DEVICE	(1 << 5) /* Indicates access to device memory */
 
 struct iommu_ops;
 struct iommu_group;
@@ -39,8 +40,12 @@
 struct notifier_block;
 
 /* iommu fault flags */
-#define IOMMU_FAULT_READ	0x0
-#define IOMMU_FAULT_WRITE	0x1
+#define IOMMU_FAULT_READ                (1 << 0)
+#define IOMMU_FAULT_WRITE               (1 << 1)
+#define IOMMU_FAULT_TRANSLATION         (1 << 2)
+#define IOMMU_FAULT_PERMISSION          (1 << 3)
+#define IOMMU_FAULT_EXTERNAL            (1 << 4)
+#define IOMMU_FAULT_TRANSACTION_STALLED (1 << 5)
 
 typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
 			struct device *, unsigned long, int, void *);
@@ -51,6 +56,10 @@
 	bool force_aperture;       /* DMA only allowed in mappable range? */
 };
 
+struct iommu_pgtbl_info {
+	void *pmds;
+};
+
 /* Domain feature flags */
 #define __IOMMU_DOMAIN_PAGING	(1U << 0)  /* Support for iommu_map/unmap */
 #define __IOMMU_DOMAIN_DMA_API	(1U << 1)  /* Domain for use in DMA-API
@@ -112,9 +121,28 @@
 	DOMAIN_ATTR_FSL_PAMU_ENABLE,
 	DOMAIN_ATTR_FSL_PAMUV1,
 	DOMAIN_ATTR_NESTING,	/* two stages of translation */
+	DOMAIN_ATTR_PT_BASE_ADDR,
+	DOMAIN_ATTR_SECURE_VMID,
+	DOMAIN_ATTR_ATOMIC,
+	DOMAIN_ATTR_CONTEXT_BANK,
+	DOMAIN_ATTR_TTBR0,
+	DOMAIN_ATTR_CONTEXTIDR,
+	DOMAIN_ATTR_PROCID,
+	DOMAIN_ATTR_DYNAMIC,
+	DOMAIN_ATTR_NON_FATAL_FAULTS,
+	DOMAIN_ATTR_S1_BYPASS,
+	DOMAIN_ATTR_FAST,
+	DOMAIN_ATTR_PGTBL_INFO,
+	DOMAIN_ATTR_EARLY_MAP,
+	DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT,
+	DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT,
+	DOMAIN_ATTR_ENABLE_TTBR1,
+	DOMAIN_ATTR_CB_STALL_DISABLE,
 	DOMAIN_ATTR_MAX,
 };
 
+extern struct dentry *iommu_debugfs_top;
+
 /**
  * struct iommu_dm_region - descriptor for a direct mapped memory region
  * @list: Linked list pointers
@@ -142,12 +170,21 @@
  * @map_sg: map a scatter-gather list of physically contiguous memory chunks
  * to an iommu domain
  * @iova_to_phys: translate iova to physical address
+ * @iova_to_phys_hard: translate iova to physical address using IOMMU hardware
  * @add_device: add device to iommu grouping
  * @remove_device: remove device from iommu grouping
  * @domain_get_attr: Query domain attributes
  * @domain_set_attr: Change domain attributes
  * @of_xlate: add OF master IDs to iommu grouping
  * @pgsize_bitmap: bitmap of supported page sizes
+ * @get_pgsize_bitmap: gets a bitmap of supported page sizes for a domain
+ *                     This takes precedence over @pgsize_bitmap.
+ * @trigger_fault: trigger a fault on the device attached to an iommu domain
+ * @reg_read: read an IOMMU register
+ * @reg_write: write an IOMMU register
+ * @tlbi_domain: Invalidate all TLBs covering an iommu domain
+ * @enable_config_clocks: Enable all config clocks for this domain's IOMMU
+ * @disable_config_clocks: Disable all config clocks for this domain's IOMMU
  * @priv: per-instance data private to the iommu driver
  */
 struct iommu_ops {
@@ -166,6 +203,8 @@
 	size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
 			 struct scatterlist *sg, unsigned int nents, int prot);
 	phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
+	phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
+					 dma_addr_t iova);
 	int (*add_device)(struct device *dev);
 	void (*remove_device)(struct device *dev);
 	struct iommu_group *(*device_group)(struct device *dev);
@@ -186,11 +225,25 @@
 	int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count);
 	/* Get the numer of window per domain */
 	u32 (*domain_get_windows)(struct iommu_domain *domain);
+	int (*dma_supported)(struct iommu_domain *domain, struct device *dev,
+			     u64 mask);
+	void (*trigger_fault)(struct iommu_domain *domain, unsigned long flags);
+	unsigned long (*reg_read)(struct iommu_domain *domain,
+				  unsigned long offset);
+	void (*reg_write)(struct iommu_domain *domain, unsigned long val,
+			  unsigned long offset);
+	void (*tlbi_domain)(struct iommu_domain *domain);
+	int (*enable_config_clocks)(struct iommu_domain *domain);
+	void (*disable_config_clocks)(struct iommu_domain *domain);
+	uint64_t (*iova_to_pte)(struct iommu_domain *domain,
+			 dma_addr_t iova);
 
 #ifdef CONFIG_OF_IOMMU
 	int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
 #endif
 
+	unsigned long (*get_pgsize_bitmap)(struct iommu_domain *domain);
+	bool (*is_iova_coherent)(struct iommu_domain *domain, dma_addr_t iova);
 	unsigned long pgsize_bitmap;
 	void *priv;
 };
@@ -212,17 +265,31 @@
 			       struct device *dev);
 extern void iommu_detach_device(struct iommu_domain *domain,
 				struct device *dev);
+extern size_t iommu_pgsize(unsigned long pgsize_bitmap,
+			   unsigned long addr_merge, size_t size);
 extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
 extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
 		     phys_addr_t paddr, size_t size, int prot);
 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
 		       size_t size);
+extern int iommu_unmap_range(struct iommu_domain *domain, unsigned int iova,
+		      unsigned int len);
 extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
 				struct scatterlist *sg,unsigned int nents,
 				int prot);
 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
+extern phys_addr_t iommu_iova_to_phys_hard(struct iommu_domain *domain,
+					   dma_addr_t iova);
+extern bool iommu_is_iova_coherent(struct iommu_domain *domain,
+				dma_addr_t iova);
 extern void iommu_set_fault_handler(struct iommu_domain *domain,
 			iommu_fault_handler_t handler, void *token);
+extern void iommu_trigger_fault(struct iommu_domain *domain,
+				unsigned long flags);
+extern unsigned long iommu_reg_read(struct iommu_domain *domain,
+				    unsigned long offset);
+extern void iommu_reg_write(struct iommu_domain *domain, unsigned long offset,
+			    unsigned long val);
 
 extern void iommu_get_dm_regions(struct device *dev, struct list_head *list);
 extern void iommu_put_dm_regions(struct device *dev, struct list_head *list);
@@ -269,6 +336,9 @@
 				      phys_addr_t offset, u64 size,
 				      int prot);
 extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr);
+
+extern uint64_t iommu_iova_to_pte(struct iommu_domain *domain,
+	    dma_addr_t iova);
 /**
  * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
  * @domain: the iommu domain where the fault has happened
@@ -292,6 +362,11 @@
  * Specifically, -ENOSYS is returned if a fault handler isn't installed
  * (though fault handlers can also return -ENOSYS, in case they want to
  * elicit the default behavior of the IOMMU drivers).
+
+ * Client fault handler returns -EBUSY to signal to the IOMMU driver
+ * that the client will take responsibility for any further fault
+ * handling, including clearing fault status registers or retrying
+ * the faulting transaction.
  */
 static inline int report_iommu_fault(struct iommu_domain *domain,
 		struct device *dev, unsigned long iova, int flags)
@@ -314,14 +389,41 @@
 				  unsigned long iova, struct scatterlist *sg,
 				  unsigned int nents, int prot)
 {
-	return domain->ops->map_sg(domain, iova, sg, nents, prot);
+	size_t ret;
+
+	trace_map_sg_start(iova, nents);
+	ret = domain->ops->map_sg(domain, iova, sg, nents, prot);
+	trace_map_sg_end(iova, nents);
+	return ret;
 }
 
+extern int iommu_dma_supported(struct iommu_domain *domain, struct device *dev,
+			       u64 mask);
+
 /* PCI device grouping function */
 extern struct iommu_group *pci_device_group(struct device *dev);
 /* Generic device grouping function */
 extern struct iommu_group *generic_device_group(struct device *dev);
 
+static inline void iommu_tlbiall(struct iommu_domain *domain)
+{
+	if (domain->ops->tlbi_domain)
+		domain->ops->tlbi_domain(domain);
+}
+
+static inline int iommu_enable_config_clocks(struct iommu_domain *domain)
+{
+	if (domain->ops->enable_config_clocks)
+		return domain->ops->enable_config_clocks(domain);
+	return 0;
+}
+
+static inline void iommu_disable_config_clocks(struct iommu_domain *domain)
+{
+	if (domain->ops->disable_config_clocks)
+		domain->ops->disable_config_clocks(domain);
+}
+
 #else /* CONFIG_IOMMU_API */
 
 struct iommu_ops {};
@@ -379,6 +481,12 @@
 	return -ENODEV;
 }
 
+static inline int iommu_unmap_range(struct iommu_domain *domain,
+				unsigned int iova, unsigned int len)
+{
+	return -ENODEV;
+}
+
 static inline size_t iommu_map_sg(struct iommu_domain *domain,
 				  unsigned long iova, struct scatterlist *sg,
 				  unsigned int nents, int prot)
@@ -403,11 +511,39 @@
 	return 0;
 }
 
+static inline phys_addr_t iommu_iova_to_phys_hard(struct iommu_domain *domain,
+						  dma_addr_t iova)
+{
+	return 0;
+}
+
+static inline bool iommu_is_iova_coherent(struct iommu_domain *domain,
+					  dma_addr_t iova)
+{
+	return 0;
+}
+
 static inline void iommu_set_fault_handler(struct iommu_domain *domain,
 				iommu_fault_handler_t handler, void *token)
 {
 }
 
+static inline void iommu_trigger_fault(struct iommu_domain *domain,
+				       unsigned long flags)
+{
+}
+
+static inline unsigned long iommu_reg_read(struct iommu_domain *domain,
+					   unsigned long offset)
+{
+	return 0;
+}
+
+static inline void iommu_reg_write(struct iommu_domain *domain,
+				   unsigned long val, unsigned long offset)
+{
+}
+
 static inline void iommu_get_dm_regions(struct device *dev,
 					struct list_head *list)
 {
@@ -532,6 +668,25 @@
 {
 }
 
+static inline int iommu_dma_supported(struct iommu_domain *domain,
+				      struct device *dev, u64 mask)
+{
+	return -EINVAL;
+}
+
+static inline void iommu_tlbiall(struct iommu_domain *domain)
+{
+}
+
+static inline int iommu_enable_config_clocks(struct iommu_domain *domain)
+{
+	return 0;
+}
+
+static inline void iommu_disable_config_clocks(struct iommu_domain *domain)
+{
+}
+
 #endif /* CONFIG_IOMMU_API */
 
 #endif /* __LINUX_IOMMU_H */
diff -ruw linux-4.4.115/include/linux/ipv6.h linux-4.4.115-fbx/include/linux/ipv6.h
--- linux-4.4.115/include/linux/ipv6.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/ipv6.h	2019-01-22 16:16:28.259289475 +0100
@@ -18,6 +18,7 @@
 	__s32		dad_transmits;
 	__s32		rtr_solicits;
 	__s32		rtr_solicit_interval;
+	__s32		rtr_solicit_max_interval;
 	__s32		rtr_solicit_delay;
 	__s32		force_mld_version;
 	__s32		mldv1_unsolicited_report_interval;
@@ -36,9 +37,11 @@
 	__s32		accept_ra_rtr_pref;
 	__s32		rtr_probe_interval;
 #ifdef CONFIG_IPV6_ROUTE_INFO
+	__s32		accept_ra_rt_info_min_plen;
 	__s32		accept_ra_rt_info_max_plen;
 #endif
 #endif
+	__s32		accept_ra_rt_table;
 	__s32		proxy_ndp;
 	__s32		accept_source_route;
 	__s32		accept_ra_from_local;
@@ -60,6 +63,7 @@
 		struct in6_addr secret;
 	} stable_secret;
 	__s32		use_oif_addrs_only;
+	__s32		accept_ra_prefix_route;
 	void		*sysctl;
 };
 
diff -ruw linux-4.4.115/include/linux/irqchip/arm-gic.h linux-4.4.115-fbx/include/linux/irqchip/arm-gic.h
--- linux-4.4.115/include/linux/irqchip/arm-gic.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/irqchip/arm-gic.h	2019-01-22 16:16:28.259289475 +0100
@@ -100,6 +100,11 @@
 
 struct device_node;
 
+extern struct irq_chip gic_arch_extn;
+
+void gic_set_irqchip_flags(unsigned long flags);
+void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
+		    u32 offset, struct device_node *);
 void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
 int gic_cpu_if_down(unsigned int gic_nr);
 
diff -ruw linux-4.4.115/include/linux/irq.h linux-4.4.115-fbx/include/linux/irq.h
--- linux-4.4.115/include/linux/irq.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/irq.h	2019-01-22 16:16:28.259289475 +0100
@@ -73,6 +73,7 @@
  *				  it from the spurious interrupt detection
  *				  mechanism and from core side polling.
  * IRQ_DISABLE_UNLAZY		- Disable lazy irq disable
+ * IRQ_AFFINITY_MANAGED		- Affinity is auto-managed by the kernel
  */
 enum {
 	IRQ_TYPE_NONE		= 0x00000000,
@@ -99,13 +100,14 @@
 	IRQ_PER_CPU_DEVID	= (1 << 17),
 	IRQ_IS_POLLED		= (1 << 18),
 	IRQ_DISABLE_UNLAZY	= (1 << 19),
+	IRQ_AFFINITY_MANAGED	= (1 << 21),
 };
 
 #define IRQF_MODIFY_MASK	\
 	(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
 	 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
 	 IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
-	 IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY)
+	 IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_AFFINITY_MANAGED)
 
 #define IRQ_NO_BALANCING_MASK	(IRQ_PER_CPU | IRQ_NO_BALANCING)
 
@@ -191,6 +193,7 @@
  * IRQD_IRQ_INPROGRESS		- In progress state of the interrupt
  * IRQD_WAKEUP_ARMED		- Wakeup mode armed
  * IRQD_FORWARDED_TO_VCPU	- The interrupt is forwarded to a VCPU
+ * IRQD_AFFINITY_MANAGED	- Affinity is auto-managed by the kernel
  */
 enum {
 	IRQD_TRIGGER_MASK		= 0xf,
@@ -206,6 +209,7 @@
 	IRQD_IRQ_INPROGRESS		= (1 << 18),
 	IRQD_WAKEUP_ARMED		= (1 << 19),
 	IRQD_FORWARDED_TO_VCPU		= (1 << 20),
+	IRQD_AFFINITY_MANAGED		= (1 << 21),
 };
 
 #define __irqd_to_state(d)		((d)->common->state_use_accessors)
@@ -299,6 +303,11 @@
 	__irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
 }
 
+static inline bool irqd_affinity_is_managed(struct irq_data *d)
+{
+	return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
+}
+
 static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
 {
 	return d->hwirq;
diff -ruw linux-4.4.115/include/linux/kasan.h linux-4.4.115-fbx/include/linux/kasan.h
--- linux-4.4.115/include/linux/kasan.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/kasan.h	2019-01-22 16:16:28.263289511 +0100
@@ -1,6 +1,7 @@
 #ifndef _LINUX_KASAN_H
 #define _LINUX_KASAN_H
 
+#include <linux/sched.h>
 #include <linux/types.h>
 
 struct kmem_cache;
@@ -13,7 +14,6 @@
 
 #include <asm/kasan.h>
 #include <asm/pgtable.h>
-#include <linux/sched.h>
 
 extern unsigned char kasan_zero_page[PAGE_SIZE];
 extern pte_t kasan_zero_pte[PTRS_PER_PTE];
@@ -30,67 +30,102 @@
 }
 
 /* Enable reporting bugs after kasan_disable_current() */
-static inline void kasan_enable_current(void)
-{
-	current->kasan_depth++;
-}
+extern void kasan_enable_current(void);
 
 /* Disable reporting bugs for current task */
-static inline void kasan_disable_current(void)
-{
-	current->kasan_depth--;
-}
+extern void kasan_disable_current(void);
 
 void kasan_unpoison_shadow(const void *address, size_t size);
 
+void kasan_unpoison_task_stack(struct task_struct *task);
+void kasan_unpoison_stack_above_sp_to(const void *watermark);
+
 void kasan_alloc_pages(struct page *page, unsigned int order);
 void kasan_free_pages(struct page *page, unsigned int order);
 
+void kasan_cache_create(struct kmem_cache *cache, size_t *size,
+			unsigned long *flags);
+void kasan_cache_shrink(struct kmem_cache *cache);
+void kasan_cache_shutdown(struct kmem_cache *cache);
+
 void kasan_poison_slab(struct page *page);
 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
 void kasan_poison_object_data(struct kmem_cache *cache, void *object);
+void kasan_init_slab_obj(struct kmem_cache *cache, const void *object);
 
-void kasan_kmalloc_large(const void *ptr, size_t size);
+void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
 void kasan_kfree_large(const void *ptr);
-void kasan_kfree(void *ptr);
-void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size);
-void kasan_krealloc(const void *object, size_t new_size);
-
-void kasan_slab_alloc(struct kmem_cache *s, void *object);
-void kasan_slab_free(struct kmem_cache *s, void *object);
+void kasan_poison_kfree(void *ptr);
+void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
+		  gfp_t flags);
+void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);
+
+void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
+bool kasan_slab_free(struct kmem_cache *s, void *object);
+
+struct kasan_cache {
+	int alloc_meta_offset;
+	int free_meta_offset;
+};
 
 int kasan_module_alloc(void *addr, size_t size);
 void kasan_free_shadow(const struct vm_struct *vm);
 
+size_t ksize(const void *);
+static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
+size_t kasan_metadata_size(struct kmem_cache *cache);
+
+bool kasan_save_enable_multi_shot(void);
+void kasan_restore_multi_shot(bool enabled);
+
 #else /* CONFIG_KASAN */
 
 static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
 
+static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
+static inline void kasan_unpoison_stack_above_sp_to(const void *watermark) {}
+
 static inline void kasan_enable_current(void) {}
 static inline void kasan_disable_current(void) {}
 
 static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
 static inline void kasan_free_pages(struct page *page, unsigned int order) {}
 
+static inline void kasan_cache_create(struct kmem_cache *cache,
+				      size_t *size,
+				      unsigned long *flags) {}
+static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
+static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
+
 static inline void kasan_poison_slab(struct page *page) {}
 static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
 					void *object) {}
 static inline void kasan_poison_object_data(struct kmem_cache *cache,
 					void *object) {}
+static inline void kasan_init_slab_obj(struct kmem_cache *cache,
+				const void *object) {}
 
-static inline void kasan_kmalloc_large(void *ptr, size_t size) {}
+static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
 static inline void kasan_kfree_large(const void *ptr) {}
-static inline void kasan_kfree(void *ptr) {}
+static inline void kasan_poison_kfree(void *ptr) {}
 static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
-				size_t size) {}
-static inline void kasan_krealloc(const void *object, size_t new_size) {}
-
-static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {}
-static inline void kasan_slab_free(struct kmem_cache *s, void *object) {}
+				size_t size, gfp_t flags) {}
+static inline void kasan_krealloc(const void *object, size_t new_size,
+				 gfp_t flags) {}
+
+static inline void kasan_slab_alloc(struct kmem_cache *s, void *object,
+				   gfp_t flags) {}
+static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
+{
+	return false;
+}
 
 static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
 static inline void kasan_free_shadow(const struct vm_struct *vm) {}
 
+static inline void kasan_unpoison_slab(const void *ptr) { }
+static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
+
 #endif /* CONFIG_KASAN */
 
 #endif /* LINUX_KASAN_H */
diff -ruw linux-4.4.115/include/linux/kbuild.h linux-4.4.115-fbx/include/linux/kbuild.h
--- linux-4.4.115/include/linux/kbuild.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/kbuild.h	2019-01-22 16:16:28.263289511 +0100
@@ -2,14 +2,14 @@
 #define __LINUX_KBUILD_H
 
 #define DEFINE(sym, val) \
-        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+	asm volatile("\n.ascii \"->" #sym " %0 " #val "\"" : : "i" (val))
 
-#define BLANK() asm volatile("\n->" : : )
+#define BLANK() asm volatile("\n.ascii \"->\"" : : )
 
 #define OFFSET(sym, str, mem) \
 	DEFINE(sym, offsetof(struct str, mem))
 
 #define COMMENT(x) \
-	asm volatile("\n->#" x)
+	asm volatile("\n.ascii \"->#" x "\"")
 
 #endif
diff -ruw linux-4.4.115/include/linux/kdb.h linux-4.4.115-fbx/include/linux/kdb.h
--- linux-4.4.115/include/linux/kdb.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/kdb.h	2019-01-22 16:16:28.267289547 +0100
@@ -177,7 +177,7 @@
 static inline
 int kdb_process_cpu(const struct task_struct *p)
 {
-	unsigned int cpu = task_thread_info(p)->cpu;
+	unsigned int cpu = task_cpu(p);
 	if (cpu > num_possible_cpus())
 		cpu = 0;
 	return cpu;
diff -ruw linux-4.4.115/include/linux/kernel.h linux-4.4.115-fbx/include/linux/kernel.h
--- linux-4.4.115/include/linux/kernel.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/kernel.h	2019-01-22 16:16:28.267289547 +0100
@@ -53,6 +53,13 @@
 
 #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
 
+#define u64_to_user_ptr(x) (		\
+{					\
+	typecheck(u64, x);		\
+	(void __user *)(uintptr_t)x;	\
+}					\
+)
+
 /*
  * This looks more complex than it should be. But we need to
  * get the type for the ~ right in round_down (it needs to be
diff -ruw linux-4.4.115/include/linux/kobject.h linux-4.4.115-fbx/include/linux/kobject.h
--- linux-4.4.115/include/linux/kobject.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/kobject.h	2019-10-29 09:26:25.445220813 +0100
@@ -29,7 +29,7 @@
 #include <linux/workqueue.h>
 
 #define UEVENT_HELPER_PATH_LEN		256
-#define UEVENT_NUM_ENVP			32	/* number of env pointers */
+#define UEVENT_NUM_ENVP			64	/* number of env pointers */
 #define UEVENT_BUFFER_SIZE		2048	/* buffer for the variables */
 
 #ifdef CONFIG_UEVENT_HELPER
diff -ruw linux-4.4.115/include/linux/kref.h linux-4.4.115-fbx/include/linux/kref.h
--- linux-4.4.115/include/linux/kref.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/kref.h	2019-01-22 16:16:28.267289547 +0100
@@ -19,6 +19,7 @@
 #include <linux/atomic.h>
 #include <linux/kernel.h>
 #include <linux/mutex.h>
+#include <linux/spinlock.h>
 
 struct kref {
 	atomic_t refcount;
@@ -98,6 +99,38 @@
 	return kref_sub(kref, 1, release);
 }
 
+/**
+ * kref_put_spinlock_irqsave - decrement refcount for object.
+ * @kref: object.
+ * @release: pointer to the function that will clean up the object when the
+ *	     last reference to the object is released.
+ *	     This pointer is required, and it is not acceptable to pass kfree
+ *	     in as this function.
+ * @lock: lock to take in release case
+ *
+ * Behaves identical to kref_put with one exception.  If the reference count
+ * drops to zero, the lock will be taken atomically wrt dropping the reference
+ * count.  The release function has to call spin_unlock() without _irqrestore.
+ */
+static inline int kref_put_spinlock_irqsave(struct kref *kref,
+		void (*release)(struct kref *kref),
+		spinlock_t *lock)
+{
+	unsigned long flags;
+
+	WARN_ON(release == NULL);
+	if (atomic_add_unless(&kref->refcount, -1, 1))
+		return 0;
+	spin_lock_irqsave(lock, flags);
+	if (atomic_dec_and_test(&kref->refcount)) {
+		release(kref);
+		local_irq_restore(flags);
+		return 1;
+	}
+	spin_unlock_irqrestore(lock, flags);
+	return 0;
+}
+
 static inline int kref_put_mutex(struct kref *kref,
 				 void (*release)(struct kref *kref),
 				 struct mutex *lock)
diff -ruw linux-4.4.115/include/linux/ksm.h linux-4.4.115-fbx/include/linux/ksm.h
--- linux-4.4.115/include/linux/ksm.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/ksm.h	2019-01-22 16:16:28.267289547 +0100
@@ -43,8 +43,7 @@
 static inline void set_page_stable_node(struct page *page,
 					struct stable_node *stable_node)
 {
-	page->mapping = (void *)stable_node +
-				(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
+	page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
 }
 
 /*
diff -ruw linux-4.4.115/include/linux/kthread.h linux-4.4.115-fbx/include/linux/kthread.h
--- linux-4.4.115/include/linux/kthread.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/kthread.h	2019-01-22 16:16:28.267289547 +0100
@@ -75,6 +75,8 @@
 	struct list_head	node;
 	kthread_work_func_t	func;
 	struct kthread_worker	*worker;
+	/* Number of canceling calls that are running at the moment. */
+	int			canceling;
 };
 
 #define KTHREAD_WORKER_INIT(worker)	{				\
@@ -129,4 +131,6 @@
 void flush_kthread_work(struct kthread_work *work);
 void flush_kthread_worker(struct kthread_worker *worker);
 
+bool kthread_cancel_work_sync(struct kthread_work *work);
+
 #endif /* _LINUX_KTHREAD_H */
diff -ruw linux-4.4.115/include/linux/leds.h linux-4.4.115-fbx/include/linux/leds.h
--- linux-4.4.115/include/linux/leds.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/leds.h	2019-01-22 16:16:28.271289584 +0100
@@ -35,6 +35,7 @@
 	const char		*name;
 	enum led_brightness	 brightness;
 	enum led_brightness	 max_brightness;
+	enum led_brightness	 usr_brightness_req;
 	int			 flags;
 
 	/* Lower 16 bits reflect status */
@@ -48,6 +49,7 @@
 #define SET_BRIGHTNESS_ASYNC	(1 << 21)
 #define SET_BRIGHTNESS_SYNC	(1 << 22)
 #define LED_DEV_CAP_FLASH	(1 << 23)
+#define LED_KEEP_TRIGGER	(1 << 24)
 
 	/* Set LED brightness level */
 	/* Must not sleep, use a workqueue if needed */
diff -ruw linux-4.4.115/include/linux/libfdt_env.h linux-4.4.115-fbx/include/linux/libfdt_env.h
--- linux-4.4.115/include/linux/libfdt_env.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/libfdt_env.h	2019-01-22 16:16:28.271289584 +0100
@@ -1,6 +1,7 @@
 #ifndef _LIBFDT_ENV_H
 #define _LIBFDT_ENV_H
 
+#include <linux/kernel.h>
 #include <linux/string.h>
 
 #include <asm/byteorder.h>
diff -ruw linux-4.4.115/include/linux/lsm_hooks.h linux-4.4.115-fbx/include/linux/lsm_hooks.h
--- linux-4.4.115/include/linux/lsm_hooks.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/lsm_hooks.h	2019-01-22 16:16:28.275289620 +0100
@@ -1384,6 +1384,8 @@
 					size_t *len);
 	int (*inode_create)(struct inode *dir, struct dentry *dentry,
 				umode_t mode);
+	int (*inode_post_create)(struct inode *dir, struct dentry *dentry,
+				umode_t mode);
 	int (*inode_link)(struct dentry *old_dentry, struct inode *dir,
 				struct dentry *new_dentry);
 	int (*inode_unlink)(struct inode *dir, struct dentry *dentry);
@@ -1440,6 +1442,7 @@
 					struct fown_struct *fown, int sig);
 	int (*file_receive)(struct file *file);
 	int (*file_open)(struct file *file, const struct cred *cred);
+	int (*file_close)(struct file *file);
 
 	int (*task_create)(unsigned long clone_flags);
 	void (*task_free)(struct task_struct *task);
@@ -1666,6 +1669,7 @@
 	struct list_head inode_free_security;
 	struct list_head inode_init_security;
 	struct list_head inode_create;
+	struct list_head inode_post_create;
 	struct list_head inode_link;
 	struct list_head inode_unlink;
 	struct list_head inode_symlink;
@@ -1702,6 +1706,7 @@
 	struct list_head file_send_sigiotask;
 	struct list_head file_receive;
 	struct list_head file_open;
+	struct list_head file_close;
 	struct list_head task_create;
 	struct list_head task_free;
 	struct list_head cred_alloc_blank;
diff -ruw linux-4.4.115/include/linux/memblock.h linux-4.4.115-fbx/include/linux/memblock.h
--- linux-4.4.115/include/linux/memblock.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/memblock.h	2019-01-22 16:16:28.279289656 +0100
@@ -25,6 +25,7 @@
 	MEMBLOCK_NONE		= 0x0,	/* No special request */
 	MEMBLOCK_HOTPLUG	= 0x1,	/* hotpluggable region */
 	MEMBLOCK_MIRROR		= 0x2,	/* mirrored region */
+	MEMBLOCK_NOMAP		= 0x4,	/* don't add to kernel direct mapping */
 };
 
 struct memblock_region {
@@ -82,7 +83,11 @@
 int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
 int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
 int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
+int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
+int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
 ulong choose_memblock_flags(void);
+unsigned long memblock_region_resize_late_begin(void);
+void memblock_region_resize_late_end(unsigned long);
 
 /* Low level functions */
 int memblock_add_range(struct memblock_type *type,
@@ -184,6 +189,11 @@
 	return m->flags & MEMBLOCK_MIRROR;
 }
 
+static inline bool memblock_is_nomap(struct memblock_region *m)
+{
+	return m->flags & MEMBLOCK_NOMAP;
+}
+
 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
 			    unsigned long  *end_pfn);
@@ -319,7 +329,9 @@
 phys_addr_t memblock_end_of_DRAM(void);
 void memblock_enforce_memory_limit(phys_addr_t memory_limit);
 int memblock_is_memory(phys_addr_t addr);
+int memblock_is_map_memory(phys_addr_t addr);
 int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
+bool memblock_overlaps_memory(phys_addr_t base, phys_addr_t size);
 int memblock_is_reserved(phys_addr_t addr);
 bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
 
@@ -391,6 +403,11 @@
 	     region < (memblock.memblock_type.regions + memblock.memblock_type.cnt);	\
 	     region++)
 
+#define for_each_memblock_rev(memblock_type, region)	\
+	for (region = memblock.memblock_type.regions + \
+			memblock.memblock_type.cnt - 1;	\
+	     region >= memblock.memblock_type.regions;	\
+	     region--)
 
 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
 #define __init_memblock __meminit
diff -ruw linux-4.4.115/include/linux/memory_hotplug.h linux-4.4.115-fbx/include/linux/memory_hotplug.h
--- linux-4.4.115/include/linux/memory_hotplug.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/memory_hotplug.h	2019-01-22 16:16:28.279289656 +0100
@@ -89,7 +89,7 @@
 	unsigned long *valid_start, unsigned long *valid_end);
 extern void __offline_isolated_pages(unsigned long, unsigned long);
 
-typedef void (*online_page_callback_t)(struct page *page);
+typedef int (*online_page_callback_t)(struct page *page);
 
 extern int set_online_page_callback(online_page_callback_t callback);
 extern int restore_online_page_callback(online_page_callback_t callback);
diff -ruw linux-4.4.115/include/linux/migrate.h linux-4.4.115-fbx/include/linux/migrate.h
--- linux-4.4.115/include/linux/migrate.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/migrate.h	2019-10-29 09:26:25.453220891 +0100
@@ -23,9 +23,13 @@
 	MR_SYSCALL,		/* also applies to cpusets */
 	MR_MEMPOLICY_MBIND,
 	MR_NUMA_MISPLACED,
-	MR_CMA
+	MR_CMA,
+	MR_TYPES
 };
 
+/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
+extern char *migrate_reason_names[MR_TYPES];
+
 #ifdef CONFIG_MIGRATION
 
 extern void putback_movable_pages(struct list_head *l);
@@ -33,6 +37,8 @@
 			struct page *, struct page *, enum migrate_mode);
 extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
 		unsigned long private, enum migrate_mode mode, int reason);
+extern bool isolate_movable_page(struct page *page, isolate_mode_t mode);
+extern void putback_movable_page(struct page *page);
 
 extern int migrate_prep(void);
 extern int migrate_prep_local(void);
@@ -65,6 +71,21 @@
 
 #endif /* CONFIG_MIGRATION */
 
+#ifdef CONFIG_COMPACTION
+extern int PageMovable(struct page *page);
+extern void __SetPageMovable(struct page *page, struct address_space *mapping);
+extern void __ClearPageMovable(struct page *page);
+#else
+static inline int PageMovable(struct page *page) { return 0; };
+static inline void __SetPageMovable(struct page *page,
+				struct address_space *mapping)
+{
+}
+static inline void __ClearPageMovable(struct page *page)
+{
+}
+#endif
+
 #ifdef CONFIG_NUMA_BALANCING
 extern bool pmd_trans_migrating(pmd_t pmd);
 extern int migrate_misplaced_page(struct page *page,
diff -ruw linux-4.4.115/include/linux/mmc/card.h linux-4.4.115-fbx/include/linux/mmc/card.h
--- linux-4.4.115/include/linux/mmc/card.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/mmc/card.h	2019-01-22 16:16:28.307289910 +0100
@@ -12,7 +12,11 @@
 
 #include <linux/device.h>
 #include <linux/mmc/core.h>
+#include <linux/mmc/mmc.h>
 #include <linux/mod_devicetable.h>
+#include <linux/notifier.h>
+
+#define MMC_CARD_CMDQ_BLK_SIZE 512
 
 struct mmc_cid {
 	unsigned int		manfid;
@@ -52,6 +56,7 @@
 	u8			sec_feature_support;
 	u8			rel_sectors;
 	u8			rel_param;
+	bool			enhanced_rpmb_supported;
 	u8			part_config;
 	u8			cache_ctrl;
 	u8			rst_n_function;
@@ -83,11 +88,13 @@
 	bool			hpi;			/* HPI support bit */
 	unsigned int		hpi_cmd;		/* cmd used as HPI */
 	bool			bkops;		/* background support bit */
-	bool			man_bkops_en;	/* manual bkops enable bit */
+	u8			bkops_en;	/* bkops enable */
 	unsigned int            data_sector_size;       /* 512 bytes or 4KB */
 	unsigned int            data_tag_unit_size;     /* DATA TAG UNIT size */
 	unsigned int		boot_ro_lock;		/* ro lock support */
 	bool			boot_ro_lockable;
+	u8			raw_ext_csd_cmdq;	/* 15 */
+	u8			raw_ext_csd_cache_ctrl;	/* 33 */
 	bool			ffu_capable;	/* Firmware upgrade support */
 #define MMC_FIRMWARE_LEN 8
 	u8			fwrev[MMC_FIRMWARE_LEN];  /* FW version */
@@ -95,6 +102,10 @@
 	u8			raw_partition_support;	/* 160 */
 	u8			raw_rpmb_size_mult;	/* 168 */
 	u8			raw_erased_mem_count;	/* 181 */
+	u8			raw_ext_csd_bus_width;	/* 183 */
+	u8			strobe_support;		/* 184 */
+#define MMC_STROBE_SUPPORT	(1 << 0)
+	u8			raw_ext_csd_hs_timing;	/* 185 */
 	u8			raw_ext_csd_structure;	/* 194 */
 	u8			raw_card_type;		/* 196 */
 	u8			raw_driver_strength;	/* 197 */
@@ -116,9 +127,19 @@
 	u8			raw_pwr_cl_ddr_52_195;	/* 238 */
 	u8			raw_pwr_cl_ddr_52_360;	/* 239 */
 	u8			raw_pwr_cl_ddr_200_360;	/* 253 */
+	u8			cache_flush_policy;	/* 240 */
+#define MMC_BKOPS_URGENCY_MASK 0x3
 	u8			raw_bkops_status;	/* 246 */
 	u8			raw_sectors[4];		/* 212 - 4 bytes */
+	u8			pre_eol_info;		/* 267 */
+	u8			device_life_time_est_typ_a;	/* 268 */
+	u8			device_life_time_est_typ_b;	/* 269 */
+	u8			cmdq_depth;		/* 307 */
+	u8			cmdq_support;		/* 308 */
+	u8			barrier_support;	/* 486 */
+	u8			barrier_en;
 
+	u8			fw_version;		/* 254 */
 	unsigned int            feature_support;
 #define MMC_DISCARD_FEATURE	BIT(0)                  /* CMD38 feature */
 };
@@ -189,7 +210,8 @@
 				wide_bus:1,
 				high_power:1,
 				high_speed:1,
-				disable_cd:1;
+				disable_cd:1,
+				async_intr_sup:1;
 };
 
 struct sdio_cis {
@@ -218,6 +240,28 @@
 	MMC_BLK_NEW_REQUEST,
 };
 
+enum mmc_packed_stop_reasons {
+	EXCEEDS_SEGMENTS = 0,
+	EXCEEDS_SECTORS,
+	WRONG_DATA_DIR,
+	FLUSH_OR_DISCARD,
+	EMPTY_QUEUE,
+	REL_WRITE,
+	THRESHOLD,
+	LARGE_SEC_ALIGN,
+	RANDOM,
+	FUA,
+	MAX_REASONS,
+};
+
+struct mmc_wr_pack_stats {
+	u32 *packing_events;
+	u32 pack_stop_reason[MAX_REASONS];
+	spinlock_t lock;
+	bool enabled;
+	bool print_in_read;
+};
+
 /* The number of MMC physical partitions.  These consist of:
  * boot partitions (2), general purpose partitions (4) and
  * RPMB partition (1) in MMC v4.4.
@@ -242,6 +286,62 @@
 #define MMC_BLK_DATA_AREA_RPMB	(1<<3)
 };
 
+enum {
+	MMC_BKOPS_NO_OP,
+	MMC_BKOPS_NOT_CRITICAL,
+	MMC_BKOPS_PERF_IMPACT,
+	MMC_BKOPS_CRITICAL,
+	MMC_BKOPS_NUM_SEVERITY_LEVELS,
+};
+
+/**
+ * struct mmc_bkops_stats - BKOPS statistics
+ * @lock: spinlock used for synchronizing the debugfs and the runtime accesses
+ *	to this structure. No need to call with spin_lock_irq api
+ * @manual_start: number of times START_BKOPS was sent to the device
+ * @hpi: number of times HPI was sent to the device
+ * @auto_start: number of times AUTO_EN was set to 1
+ * @auto_stop: number of times AUTO_EN was set to 0
+ * @level: number of times the device reported the need for each level of
+ *	bkops handling
+ * @enabled: control over whether statistics should be gathered
+ *
+ * This structure is used to collect statistics regarding the bkops
+ * configuration and use-patterns. It is collected during runtime and can be
+ * shown to the user via a debugfs entry.
+ */
+struct mmc_bkops_stats {
+	spinlock_t	lock;
+	unsigned int	manual_start;
+	unsigned int	hpi;
+	unsigned int	auto_start;
+	unsigned int	auto_stop;
+	unsigned int	level[MMC_BKOPS_NUM_SEVERITY_LEVELS];
+	bool		enabled;
+};
+
+/**
+ * struct mmc_bkops_info - BKOPS data
+ * @stats: statistic information regarding bkops
+ * @needs_check: indication whether need to check with the device
+ *	whether it requires handling of BKOPS (CMD8)
+ * @needs_manual: indication whether have to send START_BKOPS
+ *	to the device
+ */
+struct mmc_bkops_info {
+	struct mmc_bkops_stats stats;
+	bool needs_check;
+	bool needs_bkops;
+	u32  retry_counter;
+};
+
+enum mmc_pon_type {
+	MMC_LONG_PON = 1,
+	MMC_SHRT_PON,
+};
+
+#define MMC_QUIRK_CMDQ_DELAY_BEFORE_DCMD 6 /* microseconds */
+
 /*
  * MMC device
  */
@@ -249,6 +349,10 @@
 	struct mmc_host		*host;		/* the host this device belongs to */
 	struct device		dev;		/* the device */
 	u32			ocr;		/* the current OCR setting */
+	unsigned long		clk_scaling_lowest;	/* lowest scaleable
+							 * frequency */
+	unsigned long		clk_scaling_highest;	/* highest scaleable
+							 * frequency */
 	unsigned int		rca;		/* relative card address of device */
 	unsigned int		type;		/* card type */
 #define MMC_TYPE_MMC		0		/* MMC card */
@@ -261,14 +365,17 @@
 #define MMC_STATE_BLOCKADDR	(1<<2)		/* card uses block-addressing */
 #define MMC_CARD_SDXC		(1<<3)		/* card is SDXC */
 #define MMC_CARD_REMOVED	(1<<4)		/* card has been removed */
-#define MMC_STATE_DOING_BKOPS	(1<<5)		/* card is doing BKOPS */
+#define MMC_STATE_DOING_BKOPS	(1<<5)		/* card is doing manual BKOPS */
 #define MMC_STATE_SUSPENDED	(1<<6)		/* card is suspended */
+#define MMC_STATE_CMDQ		(1<<12)         /* card is in cmd queue mode */
+#define MMC_STATE_AUTO_BKOPS	(1<<13)		/* card is doing auto BKOPS */
 	unsigned int		quirks; 	/* card quirks */
 #define MMC_QUIRK_LENIENT_FN0	(1<<0)		/* allow SDIO FN0 writes outside of the VS CCCR range */
 #define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1)	/* use func->cur_blksize */
 						/* for byte mode */
 #define MMC_QUIRK_NONSTD_SDIO	(1<<2)		/* non-standard SDIO card attached */
 						/* (missing CIA registers) */
+#define MMC_QUIRK_BROKEN_CLK_GATING (1<<3)	/* clock gating the sdio bus will make card fail */
 #define MMC_QUIRK_NONSTD_FUNC_IF (1<<4)		/* SDIO card has nonstd function interfaces */
 #define MMC_QUIRK_DISABLE_CD	(1<<5)		/* disconnect CD/DAT[3] resistor */
 #define MMC_QUIRK_INAND_CMD38	(1<<6)		/* iNAND devices have broken CMD38 */
@@ -279,7 +386,17 @@
 #define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10)	/* Skip secure for erase/trim */
 #define MMC_QUIRK_BROKEN_IRQ_POLLING	(1<<11)	/* Polling SDIO_CCCR_INTx could create a fake interrupt */
 #define MMC_QUIRK_TRIM_BROKEN	(1<<12)		/* Skip trim */
+						/* byte mode */
+#define MMC_QUIRK_INAND_DATA_TIMEOUT  (1<<13)   /* For incorrect data timeout */
+#define MMC_QUIRK_BROKEN_HPI (1 << 14)		/* For devices which gets */
+						/* broken due to HPI feature */
+#define MMC_QUIRK_CACHE_DISABLE (1 << 15)	/* prevent cache enable */
+#define MMC_QUIRK_QCA6574_SETTINGS (1 << 16)	/* QCA6574 card settings*/
+#define MMC_QUIRK_QCA9377_SETTINGS (1 << 17)	/* QCA9377 card settings*/
+
 
+/* Make sure CMDQ is empty before queuing DCMD */
+#define MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD (1 << 17)
 
 	unsigned int		erase_size;	/* erase size in sectors */
  	unsigned int		erase_shift;	/* if erase unit is power 2 */
@@ -313,6 +430,15 @@
 	struct dentry		*debugfs_root;
 	struct mmc_part	part[MMC_NUM_PHY_PARTITION]; /* physical partitions */
 	unsigned int    nr_parts;
+	unsigned int	part_curr;
+
+	struct mmc_wr_pack_stats wr_pack_stats; /* packed commands stats*/
+	struct notifier_block        reboot_notify;
+	enum mmc_pon_type pon_type;
+	bool cmdq_init;
+	struct mmc_bkops_info bkops;
+	bool err_in_sdr104;
+	bool sdr104_blocked;
 };
 
 /*
@@ -353,19 +479,43 @@
 	/* SDIO-specfic fields. You can use SDIO_ANY_ID here of course */
 	u16 cis_vendor, cis_device;
 
+	/* MMC-specific field, You can use EXT_CSD_REV_ANY here of course */
+	unsigned int ext_csd_rev;
+
 	void (*vendor_fixup)(struct mmc_card *card, int data);
 	int data;
 };
 
+#define CID_MANFID_SANDISK	0x2
+#define CID_MANFID_TOSHIBA	0x11
+#define CID_MANFID_MICRON	0x13
+#define CID_MANFID_SAMSUNG	0x15
+#define CID_MANFID_KINGSTON	0x70
+#define CID_MANFID_HYNIX	0x90
+
 #define CID_MANFID_ANY (-1u)
 #define CID_OEMID_ANY ((unsigned short) -1)
 #define CID_NAME_ANY (NULL)
+#define EXT_CSD_REV_ANY (-1u)
 
 #define END_FIXUP { NULL }
 
+/* extended CSD mapping to mmc version */
+enum mmc_version_ext_csd_rev {
+	MMC_V4_0,
+	MMC_V4_1,
+	MMC_V4_2,
+	MMC_V4_41 = 5,
+	MMC_V4_5,
+	MMC_V4_51 = MMC_V4_5,
+	MMC_V5_0,
+	MMC_V5_01 = MMC_V5_0,
+	MMC_V5_1
+};
+
 #define _FIXUP_EXT(_name, _manfid, _oemid, _rev_start, _rev_end,	\
 		   _cis_vendor, _cis_device,				\
-		   _fixup, _data)					\
+		   _fixup, _data, _ext_csd_rev)				\
 	{						   \
 		.name = (_name),			   \
 		.manfid = (_manfid),			   \
@@ -376,23 +526,30 @@
 		.cis_device = (_cis_device),		   \
 		.vendor_fixup = (_fixup),		   \
 		.data = (_data),			   \
+		.ext_csd_rev = (_ext_csd_rev),		   \
 	 }
 
 #define MMC_FIXUP_REV(_name, _manfid, _oemid, _rev_start, _rev_end,	\
-		      _fixup, _data)					\
+		      _fixup, _data, _ext_csd_rev)			\
 	_FIXUP_EXT(_name, _manfid,					\
 		   _oemid, _rev_start, _rev_end,			\
 		   SDIO_ANY_ID, SDIO_ANY_ID,				\
-		   _fixup, _data)					\
+		   _fixup, _data, _ext_csd_rev)				\
 
 #define MMC_FIXUP(_name, _manfid, _oemid, _fixup, _data) \
-	MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data)
+	MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data,	\
+		      EXT_CSD_REV_ANY)
+
+#define MMC_FIXUP_EXT_CSD_REV(_name, _manfid, _oemid, _fixup, _data,	\
+			      _ext_csd_rev)				\
+	MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data,	\
+		      _ext_csd_rev)
 
 #define SDIO_FIXUP(_vendor, _device, _fixup, _data)			\
 	_FIXUP_EXT(CID_NAME_ANY, CID_MANFID_ANY,			\
 		    CID_OEMID_ANY, 0, -1ull,				\
 		   _vendor, _device,					\
-		   _fixup, _data)					\
+		   _fixup, _data, EXT_CSD_REV_ANY)			\
 
 #define cid_rev(hwrev, fwrev, year, month)	\
 	(((u64) hwrev) << 40 |                  \
@@ -431,6 +588,8 @@
 #define mmc_card_removed(c)	((c) && ((c)->state & MMC_CARD_REMOVED))
 #define mmc_card_doing_bkops(c)	((c)->state & MMC_STATE_DOING_BKOPS)
 #define mmc_card_suspended(c)	((c)->state & MMC_STATE_SUSPENDED)
+#define mmc_card_cmdq(c)       ((c)->state & MMC_STATE_CMDQ)
+#define mmc_card_doing_auto_bkops(c)	((c)->state & MMC_STATE_AUTO_BKOPS)
 
 #define mmc_card_set_present(c)	((c)->state |= MMC_STATE_PRESENT)
 #define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
@@ -441,6 +600,12 @@
 #define mmc_card_clr_doing_bkops(c)	((c)->state &= ~MMC_STATE_DOING_BKOPS)
 #define mmc_card_set_suspended(c) ((c)->state |= MMC_STATE_SUSPENDED)
 #define mmc_card_clr_suspended(c) ((c)->state &= ~MMC_STATE_SUSPENDED)
+#define mmc_card_set_cmdq(c)           ((c)->state |= MMC_STATE_CMDQ)
+#define mmc_card_clr_cmdq(c)           ((c)->state &= ~MMC_STATE_CMDQ)
+#define mmc_card_set_auto_bkops(c)	((c)->state |= MMC_STATE_AUTO_BKOPS)
+#define mmc_card_clr_auto_bkops(c)	((c)->state &= ~MMC_STATE_AUTO_BKOPS)
+
+#define mmc_card_strobe(c) (((c)->ext_csd).strobe_support & MMC_STROBE_SUPPORT)
 
 /*
  * Quirk add/remove for MMC products.
@@ -511,10 +676,37 @@
 	return c->quirks & MMC_QUIRK_BROKEN_IRQ_POLLING;
 }
 
+static inline bool mmc_card_support_auto_bkops(const struct mmc_card *c)
+{
+	return c->ext_csd.rev >= MMC_V5_1;
+}
+
+static inline bool mmc_card_configured_manual_bkops(const struct mmc_card *c)
+{
+	return c->ext_csd.bkops_en & EXT_CSD_BKOPS_MANUAL_EN;
+}
+
+static inline bool mmc_card_configured_auto_bkops(const struct mmc_card *c)
+{
+	return c->ext_csd.bkops_en & EXT_CSD_BKOPS_AUTO_EN;
+}
+
+static inline bool mmc_enable_qca6574_settings(const struct mmc_card *c)
+{
+	return c->quirks & MMC_QUIRK_QCA6574_SETTINGS;
+}
+
+static inline bool mmc_enable_qca9377_settings(const struct mmc_card *c)
+{
+	return c->quirks & MMC_QUIRK_QCA9377_SETTINGS;
+}
+
 #define mmc_card_name(c)	((c)->cid.prod_name)
 #define mmc_card_id(c)		(dev_name(&(c)->dev))
 
 #define mmc_dev_to_card(d)	container_of(d, struct mmc_card, dev)
+#define mmc_get_drvdata(c)	dev_get_drvdata(&(c)->dev)
+#define mmc_set_drvdata(c,d)	dev_set_drvdata(&(c)->dev, d)
 
 /*
  * MMC device driver (e.g., Flash card, I/O card...)
@@ -531,5 +723,9 @@
 
 extern void mmc_fixup_device(struct mmc_card *card,
 			     const struct mmc_fixup *table);
-
+extern struct mmc_wr_pack_stats *mmc_blk_get_packed_statistics(
+			struct mmc_card *card);
+extern void mmc_blk_init_packed_statistics(struct mmc_card *card);
+extern int mmc_send_pon(struct mmc_card *card);
+extern void mmc_blk_cmdq_req_done(struct mmc_request *mrq);
 #endif /* LINUX_MMC_CARD_H */
diff -ruw linux-4.4.115/include/linux/mmc/core.h linux-4.4.115-fbx/include/linux/mmc/core.h
--- linux-4.4.115/include/linux/mmc/core.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/mmc/core.h	2019-10-29 09:26:25.457220930 +0100
@@ -8,6 +8,7 @@
 #ifndef LINUX_MMC_CORE_H
 #define LINUX_MMC_CORE_H
 
+#include <uapi/linux/mmc/core.h>
 #include <linux/interrupt.h>
 #include <linux/completion.h>
 
@@ -23,38 +24,6 @@
 #define MMC_CMD23_ARG_TAG_REQ	(1 << 29)
 	u32			resp[4];
 	unsigned int		flags;		/* expected response type */
-#define MMC_RSP_PRESENT	(1 << 0)
-#define MMC_RSP_136	(1 << 1)		/* 136 bit response */
-#define MMC_RSP_CRC	(1 << 2)		/* expect valid crc */
-#define MMC_RSP_BUSY	(1 << 3)		/* card may send busy */
-#define MMC_RSP_OPCODE	(1 << 4)		/* response contains opcode */
-
-#define MMC_CMD_MASK	(3 << 5)		/* non-SPI command type */
-#define MMC_CMD_AC	(0 << 5)
-#define MMC_CMD_ADTC	(1 << 5)
-#define MMC_CMD_BC	(2 << 5)
-#define MMC_CMD_BCR	(3 << 5)
-
-#define MMC_RSP_SPI_S1	(1 << 7)		/* one status byte */
-#define MMC_RSP_SPI_S2	(1 << 8)		/* second byte */
-#define MMC_RSP_SPI_B4	(1 << 9)		/* four data bytes */
-#define MMC_RSP_SPI_BUSY (1 << 10)		/* card may send busy */
-
-/*
- * These are the native response types, and correspond to valid bit
- * patterns of the above flags.  One additional valid pattern
- * is all zeros, which means we don't expect a response.
- */
-#define MMC_RSP_NONE	(0)
-#define MMC_RSP_R1	(MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
-#define MMC_RSP_R1B	(MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE|MMC_RSP_BUSY)
-#define MMC_RSP_R2	(MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC)
-#define MMC_RSP_R3	(MMC_RSP_PRESENT)
-#define MMC_RSP_R4	(MMC_RSP_PRESENT)
-#define MMC_RSP_R5	(MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
-#define MMC_RSP_R6	(MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
-#define MMC_RSP_R7	(MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
-
 #define mmc_resp_type(cmd)	((cmd)->flags & (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC|MMC_RSP_BUSY|MMC_RSP_OPCODE))
 
 /*
@@ -98,6 +67,8 @@
 	unsigned int		busy_timeout;	/* busy detect timeout in ms */
 	/* Set this flag only for blocking sanitize request */
 	bool			sanitize_busy;
+	/* Set this flag only for blocking bkops request */
+	bool			bkops_busy;
 
 	struct mmc_data		*data;		/* data segment associated with cmd */
 	struct mmc_request	*mrq;		/* associated request */
@@ -124,6 +95,7 @@
 	int			sg_count;	/* mapped sg entries */
 	struct scatterlist	*sg;		/* I/O scatter list */
 	s32			host_cookie;	/* host private data */
+	bool			fault_injected; /* fault injected */
 };
 
 struct mmc_host;
@@ -136,10 +108,47 @@
 	struct completion	completion;
 	void			(*done)(struct mmc_request *);/* completion function */
 	struct mmc_host		*host;
+	struct mmc_cmdq_req	*cmdq_req;
+	struct request *req;
+	ktime_t io_start;
+#ifdef CONFIG_BLOCK
+	int					lat_hist_enabled;
+#endif
+};
+
+struct mmc_bus_ops {
+	void (*remove)(struct mmc_host *);
+	void (*detect)(struct mmc_host *);
+	int (*pre_suspend)(struct mmc_host *);
+	int (*suspend)(struct mmc_host *);
+	int (*resume)(struct mmc_host *);
+	int (*runtime_suspend)(struct mmc_host *);
+	int (*runtime_resume)(struct mmc_host *);
+	int (*runtime_idle)(struct mmc_host *);
+	int (*power_save)(struct mmc_host *);
+	int (*power_restore)(struct mmc_host *);
+	int (*alive)(struct mmc_host *);
+	int (*shutdown)(struct mmc_host *);
+	int (*reset)(struct mmc_host *);
+	int (*change_bus_speed)(struct mmc_host *, unsigned long *);
 };
 
 struct mmc_card;
 struct mmc_async_req;
+struct mmc_cmdq_req;
+
+extern int mmc_cmdq_discard_queue(struct mmc_host *host, u32 tasks);
+extern int mmc_cmdq_halt(struct mmc_host *host, bool enable);
+extern int mmc_cmdq_halt_on_empty_queue(struct mmc_host *host);
+extern void mmc_cmdq_post_req(struct mmc_host *host, int tag, int err);
+extern int mmc_cmdq_start_req(struct mmc_host *host,
+			      struct mmc_cmdq_req *cmdq_req);
+extern int mmc_cmdq_prepare_flush(struct mmc_command *cmd);
+extern int mmc_cmdq_wait_for_dcmd(struct mmc_host *host,
+			struct mmc_cmdq_req *cmdq_req);
+extern int mmc_cmdq_erase(struct mmc_cmdq_req *cmdq_req,
+	      struct mmc_card *card, unsigned int from, unsigned int nr,
+	      unsigned int arg);
 
 extern int mmc_stop_bkops(struct mmc_card *);
 extern int mmc_read_bkops_status(struct mmc_card *);
@@ -151,10 +160,17 @@
 extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *);
 extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
 	struct mmc_command *, int);
-extern void mmc_start_bkops(struct mmc_card *card, bool from_exception);
+extern void mmc_check_bkops(struct mmc_card *card);
+extern void mmc_start_manual_bkops(struct mmc_card *card);
 extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int);
+extern int __mmc_switch_cmdq_mode(struct mmc_command *cmd, u8 set, u8 index,
+				  u8 value, unsigned int timeout_ms,
+				  bool use_busy_signal, bool ignore_timeout);
 extern int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
 extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
+extern int mmc_set_auto_bkops(struct mmc_card *card, bool enable);
+extern int mmc_suspend_clk_scaling(struct mmc_host *host);
+extern void mmc_flush_detect_work(struct mmc_host *);
 
 #define MMC_ERASE_ARG		0x00000000
 #define MMC_SECURE_ERASE_ARG	0x80000000
@@ -181,6 +197,7 @@
 extern int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
 			      bool is_rel_write);
 extern int mmc_hw_reset(struct mmc_host *host);
+extern int mmc_cmdq_hw_reset(struct mmc_host *host);
 extern int mmc_can_reset(struct mmc_card *card);
 
 extern void mmc_set_data_timeout(struct mmc_data *, const struct mmc_card *);
@@ -188,14 +205,27 @@
 
 extern int __mmc_claim_host(struct mmc_host *host, atomic_t *abort);
 extern void mmc_release_host(struct mmc_host *host);
+extern int mmc_try_claim_host(struct mmc_host *host, unsigned int delay);
 
 extern void mmc_get_card(struct mmc_card *card);
 extern void mmc_put_card(struct mmc_card *card);
+extern void __mmc_put_card(struct mmc_card *card);
 
+extern void mmc_set_ios(struct mmc_host *host);
 extern int mmc_flush_cache(struct mmc_card *);
+extern int mmc_cache_barrier(struct mmc_card *);
 
 extern int mmc_detect_card_removed(struct mmc_host *host);
 
+extern void mmc_blk_init_bkops_statistics(struct mmc_card *card);
+
+extern void mmc_deferred_scaling(struct mmc_host *host);
+extern void mmc_cmdq_clk_scaling_start_busy(struct mmc_host *host,
+	bool lock_needed);
+extern void mmc_cmdq_clk_scaling_stop_busy(struct mmc_host *host,
+	bool lock_needed, bool is_cmdq_dcmd);
+extern int mmc_recovery_fallback_lower_speed(struct mmc_host *host);
+
 /**
  *	mmc_claim_host - exclusively claim a host
  *	@host: mmc host to claim
diff -ruw linux-4.4.115/include/linux/mmc/host.h linux-4.4.115-fbx/include/linux/mmc/host.h
--- linux-4.4.115/include/linux/mmc/host.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/mmc/host.h	2019-10-29 09:26:25.457220930 +0100
@@ -15,14 +15,21 @@
 #include <linux/timer.h>
 #include <linux/sched.h>
 #include <linux/device.h>
+#include <linux/devfreq.h>
 #include <linux/fault-inject.h>
+#include <linux/blkdev.h>
 
 #include <linux/mmc/core.h>
 #include <linux/mmc/card.h>
 #include <linux/mmc/pm.h>
+#include <linux/mmc/ring_buffer.h>
+
+#define MMC_AUTOSUSPEND_DELAY_MS	3000
 
 struct mmc_ios {
 	unsigned int	clock;			/* clock rate */
+	unsigned int	old_rate;       /* saved clock rate */
+	unsigned long	clk_ts;         /* time stamp of last updated clock */
 	unsigned short	vdd;
 
 /* vdd stores the bit number of the selected voltage range from below. */
@@ -79,7 +86,31 @@
 #define MMC_SET_DRIVER_TYPE_D	3
 };
 
+/* states to represent load on the host */
+enum mmc_load {
+	MMC_LOAD_HIGH,
+	MMC_LOAD_LOW,
+};
+
+struct mmc_cmdq_host_ops {
+	int (*init)(struct mmc_host *host);
+	int (*enable)(struct mmc_host *host);
+	void (*disable)(struct mmc_host *host, bool soft);
+	int (*request)(struct mmc_host *host, struct mmc_request *mrq);
+	void (*post_req)(struct mmc_host *host, int tag, int err);
+	int (*halt)(struct mmc_host *host, bool halt);
+	void (*reset)(struct mmc_host *host, bool soft);
+	void (*dumpstate)(struct mmc_host *host);
+};
+
 struct mmc_host_ops {
+	int (*init)(struct mmc_host *host);
+	/*
+	 * 'enable' is called when the host is claimed and 'disable' is called
+	 * when the host is released. 'enable' and 'disable' are deprecated.
+	 */
+	int (*enable)(struct mmc_host *host);
+	int (*disable)(struct mmc_host *host);
 	/*
 	 * It is optional for the host to implement pre_req and post_req in
 	 * order to support double buffering of requests (prepare one
@@ -132,6 +163,7 @@
 
 	/* Prepare HS400 target operating frequency depending host driver */
 	int	(*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios);
+	int	(*enhanced_strobe)(struct mmc_host *host);
 	int	(*select_drive_strength)(struct mmc_card *card,
 					 unsigned int max_dtr, int host_drv,
 					 int card_drv, int *drv_type);
@@ -144,11 +176,43 @@
 	 */
 	int	(*multi_io_quirk)(struct mmc_card *card,
 				  unsigned int direction, int blk_size);
+
+	unsigned long (*get_max_frequency)(struct mmc_host *host);
+	unsigned long (*get_min_frequency)(struct mmc_host *host);
+
+	int	(*notify_load)(struct mmc_host *, enum mmc_load);
+	void	(*notify_halt)(struct mmc_host *mmc, bool halt);
+	void	(*force_err_irq)(struct mmc_host *host, u64 errmask);
 };
 
 struct mmc_card;
 struct device;
 
+struct mmc_cmdq_req {
+	unsigned int cmd_flags;
+	u32 blk_addr;
+	/* active mmc request */
+	struct mmc_request	mrq;
+	struct mmc_data		data;
+	struct mmc_command	cmd;
+#define DCMD		(1 << 0)
+#define QBR		(1 << 1)
+#define DIR		(1 << 2)
+#define PRIO		(1 << 3)
+#define REL_WR		(1 << 4)
+#define DAT_TAG	(1 << 5)
+#define FORCED_PRG	(1 << 6)
+	unsigned int		cmdq_req_flags;
+
+	unsigned int		resp_idx;
+	unsigned int		resp_arg;
+	unsigned int		dev_pend_tasks;
+	bool			resp_err;
+	bool			skip_err_handling;
+	int			tag; /* used for command queuing */
+	u8			ctx_id;
+};
+
 struct mmc_async_req {
 	/* active mmc request */
 	struct mmc_request	*mrq;
@@ -175,6 +239,33 @@
 	void *handler_priv;
 };
 
+
+/**
+ * mmc_cmdq_context_info - describes the contexts of cmdq
+ * @active_reqs		requests being processed
+ * @data_active_reqs	data requests being processed
+ * @curr_state		state of cmdq engine
+ * @cmdq_ctx_lock	acquire this before accessing this structure
+ * @queue_empty_wq	workqueue for waiting for all
+ *			the outstanding requests to be completed
+ * @wait		waiting for all conditions described in
+ *			mmc_cmdq_ready_wait to be satisified before
+ *			issuing the new request to LLD.
+ */
+struct mmc_cmdq_context_info {
+	unsigned long	active_reqs; /* in-flight requests */
+	unsigned long	data_active_reqs; /* in-flight data requests */
+	unsigned long	curr_state;
+#define	CMDQ_STATE_ERR 0
+#define	CMDQ_STATE_DCMD_ACTIVE 1
+#define	CMDQ_STATE_HALT 2
+#define	CMDQ_STATE_CQ_DISABLE 3
+#define	CMDQ_STATE_REQ_TIMED_OUT 4
+	wait_queue_head_t	queue_empty_wq;
+	wait_queue_head_t	wait;
+	int active_small_sector_read_reqs;
+};
+
 /**
  * mmc_context_info - synchronization details for mmc context
  * @is_done_rcv		wake up reason was done request
@@ -199,11 +290,70 @@
 	struct regulator *vqmmc;	/* Optional Vccq supply */
 };
 
+enum dev_state {
+	DEV_SUSPENDING = 1,
+	DEV_SUSPENDED,
+	DEV_RESUMED,
+};
+
+/**
+ * struct mmc_devfeq_clk_scaling - main context for MMC clock scaling logic
+ *
+ * @lock: spinlock to protect statistics
+ * @devfreq: struct that represent mmc-host as a client for devfreq
+ * @devfreq_profile: MMC device profile, mostly polling interval and callbacks
+ * @ondemand_gov_data: struct supplied to ondemmand governor (thresholds)
+ * @state: load state, can be HIGH or LOW. used to notify mmc_host_ops callback
+ * @start_busy: timestamped armed once a data request is started
+ * @measure_interval_start: timestamped armed once a measure interval started
+ * @devfreq_abort: flag to sync between different contexts relevant to devfreq
+ * @skip_clk_scale_freq_update: flag that enable/disable frequency change
+ * @freq_table_sz: table size of frequencies supplied to devfreq
+ * @freq_table: frequencies table supplied to devfreq
+ * @curr_freq: current frequency
+ * @polling_delay_ms: polling interval for status collection used by devfreq
+ * @upthreshold: up-threshold supplied to ondemand governor
+ * @downthreshold: down-threshold supplied to ondemand governor
+ * @need_freq_change: flag indicating if a frequency change is required
+ * @clk_scaling_in_progress: flag indicating if there's ongoing frequency change
+ * @is_busy_started: flag indicating if a request is handled by the HW
+ * @enable: flag indicating if the clock scaling logic is enabled for this host
+ */
+struct mmc_devfeq_clk_scaling {
+	spinlock_t	lock;
+	struct		devfreq *devfreq;
+	struct		devfreq_dev_profile devfreq_profile;
+	struct		devfreq_simple_ondemand_data ondemand_gov_data;
+	enum mmc_load	state;
+	ktime_t		start_busy;
+	ktime_t		measure_interval_start;
+	atomic_t	devfreq_abort;
+	bool		skip_clk_scale_freq_update;
+	int		freq_table_sz;
+	int		pltfm_freq_table_sz;
+	u32		*freq_table;
+	u32		*pltfm_freq_table;
+	unsigned long	total_busy_time_us;
+	unsigned long	target_freq;
+	unsigned long	curr_freq;
+	unsigned long	polling_delay_ms;
+	unsigned int	upthreshold;
+	unsigned int	downthreshold;
+	unsigned int	lower_bus_speed_mode;
+#define MMC_SCALING_LOWER_DDR52_MODE	1
+	bool		need_freq_change;
+	bool		clk_scaling_in_progress;
+	bool		is_busy_started;
+	bool		enable;
+};
+
 struct mmc_host {
 	struct device		*parent;
 	struct device		class_dev;
+	struct mmc_devfeq_clk_scaling	clk_scaling;
 	int			index;
 	const struct mmc_host_ops *ops;
+	const struct mmc_cmdq_host_ops *cmdq_ops;
 	struct mmc_pwrseq	*pwrseq;
 	unsigned int		f_min;
 	unsigned int		f_max;
@@ -289,9 +439,33 @@
 #define MMC_CAP2_HSX00_1_2V	(MMC_CAP2_HS200_1_2V_SDR | MMC_CAP2_HS400_1_2V)
 #define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17)
 #define MMC_CAP2_NO_WRITE_PROTECT (1 << 18)	/* No physical write protect pin, assume that card is always read-write */
+#define MMC_CAP2_PACKED_WR_CONTROL (1 << 19)	/* Allow write packing control */
+#define MMC_CAP2_CLK_SCALE	(1 << 20)	/* Allow dynamic clk scaling */
+/* Allows Asynchronous SDIO irq while card is in 4-bit mode */
+#define MMC_CAP2_ASYNC_SDIO_IRQ_4BIT_MODE (1 << 21)
+/* Some hosts need additional tuning */
+#define MMC_CAP2_HS400_POST_TUNING	(1 << 22)
+#define MMC_CAP2_NONHOTPLUG	(1 << 25)	/*Don't support hotplug*/
+#define MMC_CAP2_CMD_QUEUE	(1 << 26)	/* support eMMC command queue */
+#define MMC_CAP2_SANITIZE       (1 << 27)               /* Support Sanitize */
+#define MMC_CAP2_SLEEP_AWAKE	(1 << 28)	/* Use Sleep/Awake (CMD5) */
+/* use max discard ignoring max_busy_timeout parameter */
+#define MMC_CAP2_MAX_DISCARD_SIZE	(1 << 29)
 
 	mmc_pm_flag_t		pm_caps;	/* supported pm features */
 
+#ifdef CONFIG_MMC_CLKGATE
+	int			clk_requests;	/* internal reference counter */
+	unsigned int		clk_delay;	/* number of MCI clk hold cycles */
+	bool			clk_gated;	/* clock gated */
+	struct delayed_work	clk_gate_work; /* delayed clock gate */
+	unsigned int		clk_old;	/* old clock value cache */
+	spinlock_t		clk_lock;	/* lock for clk fields */
+	struct mutex		clk_gate_mutex;	/* mutex for clock gating */
+	struct device_attribute clkgate_delay_attr;
+	unsigned long           clkgate_delay;
+#endif
+
 	/* host specific block data */
 	unsigned int		max_seg_size;	/* see blk_queue_max_segment_size */
 	unsigned short		max_segs;	/* see blk_queue_max_segments */
@@ -305,6 +479,7 @@
 	spinlock_t		lock;		/* lock for claim and bus ops */
 
 	struct mmc_ios		ios;		/* current io bus settings */
+	struct mmc_ios		cached_ios;
 
 	/* group bitfields together to minimize padding */
 	unsigned int		use_spi_crc:1;
@@ -331,6 +506,7 @@
 
 	wait_queue_head_t	wq;
 	struct task_struct	*claimer;	/* task that has host claimed */
+	struct task_struct	*suspend_task;
 	int			claim_cnt;	/* "claim" nesting count */
 
 	struct delayed_work	detect;
@@ -340,6 +516,11 @@
 	const struct mmc_bus_ops *bus_ops;	/* current bus driver */
 	unsigned int		bus_refs;	/* reference counter */
 
+	unsigned int		bus_resume_flags;
+#define MMC_BUSRESUME_MANUAL_RESUME	(1 << 0)
+#define MMC_BUSRESUME_NEEDS_RESUME	(1 << 1)
+	bool ignore_bus_resume_flags;
+
 	unsigned int		sdio_irqs;
 	struct task_struct	*sdio_irq_thread;
 	bool			sdio_irq_pending;
@@ -356,6 +537,8 @@
 
 	struct dentry		*debugfs_root;
 
+	bool			err_occurred;
+
 	struct mmc_async_req	*areq;		/* active async req */
 	struct mmc_context_info	context_info;	/* async synchronization info */
 
@@ -370,25 +553,100 @@
 	int			dsr_req;	/* DSR value is valid */
 	u32			dsr;	/* optional driver stage (DSR) value */
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+	struct {
+		struct sdio_cis			*cis;
+		struct sdio_cccr		*cccr;
+		struct sdio_embedded_func	*funcs;
+		int				num_funcs;
+	} embedded_sdio_data;
+#endif
+	/*
+	 * Set to 1 to just stop the SDCLK to the card without
+	 * actually disabling the clock from it's source.
+	 */
+	bool			card_clock_off;
+
+#ifdef CONFIG_MMC_PERF_PROFILING
+	struct {
+
+		unsigned long rbytes_drv;  /* Rd bytes MMC Host  */
+		unsigned long wbytes_drv;  /* Wr bytes MMC Host  */
+		ktime_t rtime_drv;	   /* Rd time  MMC Host  */
+		ktime_t wtime_drv;	   /* Wr time  MMC Host  */
+		ktime_t start;
+	} perf;
+	bool perf_enable;
+#endif
+	struct mmc_trace_buffer trace_buf;
+	enum dev_state dev_status;
+	bool			wakeup_on_idle;
+	struct mmc_cmdq_context_info	cmdq_ctx;
+	int num_cq_slots;
+	int dcmd_cq_slot;
+	bool			cmdq_thist_enabled;
+	/*
+	 * several cmdq supporting host controllers are extensions
+	 * of legacy controllers. This variable can be used to store
+	 * a reference to the cmdq extension of the existing host
+	 * controller.
+	 */
+	void *cmdq_private;
+	struct mmc_request	*err_mrq;
+#ifdef CONFIG_BLOCK
+	int			latency_hist_enabled;
+	struct io_latency_state io_lat_read;
+	struct io_latency_state io_lat_write;
+#endif
+
+	bool sdr104_wa;
 	unsigned long		private[0] ____cacheline_aligned;
 };
 
 struct mmc_host *mmc_alloc_host(int extra, struct device *);
+extern bool mmc_host_may_gate_card(struct mmc_card *);
 int mmc_add_host(struct mmc_host *);
 void mmc_remove_host(struct mmc_host *);
 void mmc_free_host(struct mmc_host *);
 int mmc_of_parse(struct mmc_host *host);
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+extern void mmc_set_embedded_sdio_data(struct mmc_host *host,
+				       struct sdio_cis *cis,
+				       struct sdio_cccr *cccr,
+				       struct sdio_embedded_func *funcs,
+				       int num_funcs);
+#endif
+
 static inline void *mmc_priv(struct mmc_host *host)
 {
 	return (void *)host->private;
 }
 
+static inline void *mmc_cmdq_private(struct mmc_host *host)
+{
+	return host->cmdq_private;
+}
+
 #define mmc_host_is_spi(host)	((host)->caps & MMC_CAP_SPI)
 
 #define mmc_dev(x)	((x)->parent)
 #define mmc_classdev(x)	(&(x)->class_dev)
 #define mmc_hostname(x)	(dev_name(&(x)->class_dev))
+#define mmc_bus_needs_resume(host) ((host)->bus_resume_flags & \
+				    MMC_BUSRESUME_NEEDS_RESUME)
+#define mmc_bus_manual_resume(host) ((host)->bus_resume_flags & \
+				MMC_BUSRESUME_MANUAL_RESUME)
+
+static inline void mmc_set_bus_resume_policy(struct mmc_host *host, int manual)
+{
+	if (manual)
+		host->bus_resume_flags |= MMC_BUSRESUME_MANUAL_RESUME;
+	else
+		host->bus_resume_flags &= ~MMC_BUSRESUME_MANUAL_RESUME;
+}
+
+extern int mmc_resume_bus(struct mmc_host *host);
 
 int mmc_power_save_host(struct mmc_host *host);
 int mmc_power_restore_host(struct mmc_host *host);
@@ -461,6 +719,12 @@
 	return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC);
 }
 
+static inline bool mmc_card_and_host_support_async_int(struct mmc_host *host)
+{
+	return ((host->caps2 & MMC_CAP2_ASYNC_SDIO_IRQ_4BIT_MODE) &&
+			(host->card->cccr.async_intr_sup));
+}
+
 static inline int mmc_host_uhs(struct mmc_host *host)
 {
 	return host->caps &
@@ -469,11 +733,71 @@
 		 MMC_CAP_UHS_DDR50);
 }
 
+static inline void mmc_host_clear_sdr104(struct mmc_host *host)
+{
+	host->caps &= ~MMC_CAP_UHS_SDR104;
+}
+
+static inline void mmc_host_set_sdr104(struct mmc_host *host)
+{
+	host->caps |= MMC_CAP_UHS_SDR104;
+}
+
 static inline int mmc_host_packed_wr(struct mmc_host *host)
 {
 	return host->caps2 & MMC_CAP2_PACKED_WR;
 }
 
+static inline void mmc_host_set_halt(struct mmc_host *host)
+{
+	set_bit(CMDQ_STATE_HALT, &host->cmdq_ctx.curr_state);
+}
+
+static inline void mmc_host_clr_halt(struct mmc_host *host)
+{
+	clear_bit(CMDQ_STATE_HALT, &host->cmdq_ctx.curr_state);
+}
+
+static inline int mmc_host_halt(struct mmc_host *host)
+{
+	return test_bit(CMDQ_STATE_HALT, &host->cmdq_ctx.curr_state);
+}
+
+static inline void mmc_host_set_cq_disable(struct mmc_host *host)
+{
+	set_bit(CMDQ_STATE_CQ_DISABLE, &host->cmdq_ctx.curr_state);
+}
+
+static inline void mmc_host_clr_cq_disable(struct mmc_host *host)
+{
+	clear_bit(CMDQ_STATE_CQ_DISABLE, &host->cmdq_ctx.curr_state);
+}
+
+static inline int mmc_host_cq_disable(struct mmc_host *host)
+{
+	return test_bit(CMDQ_STATE_CQ_DISABLE, &host->cmdq_ctx.curr_state);
+}
+
+#ifdef CONFIG_MMC_CLKGATE
+void mmc_host_clk_hold(struct mmc_host *host);
+void mmc_host_clk_release(struct mmc_host *host);
+unsigned int mmc_host_clk_rate(struct mmc_host *host);
+
+#else
+static inline void mmc_host_clk_hold(struct mmc_host *host)
+{
+}
+
+static inline void mmc_host_clk_release(struct mmc_host *host)
+{
+}
+
+static inline unsigned int mmc_host_clk_rate(struct mmc_host *host)
+{
+	return host->ios.clock;
+}
+#endif
+
 static inline int mmc_card_hs(struct mmc_card *card)
 {
 	return card->host->ios.timing == MMC_TIMING_SD_HS ||
@@ -501,6 +825,8 @@
 	return card->host->ios.timing == MMC_TIMING_MMC_HS400;
 }
 
+void mmc_retune_enable(struct mmc_host *host);
+void mmc_retune_disable(struct mmc_host *host);
 void mmc_retune_timer_stop(struct mmc_host *host);
 
 static inline void mmc_retune_needed(struct mmc_host *host)
diff -ruw linux-4.4.115/include/linux/mmc/mmc.h linux-4.4.115-fbx/include/linux/mmc/mmc.h
--- linux-4.4.115/include/linux/mmc/mmc.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/mmc/mmc.h	2019-01-22 16:16:28.311289946 +0100
@@ -24,65 +24,12 @@
 #ifndef LINUX_MMC_MMC_H
 #define LINUX_MMC_MMC_H
 
-/* Standard MMC commands (4.1)           type  argument     response */
-   /* class 1 */
-#define MMC_GO_IDLE_STATE         0   /* bc                          */
-#define MMC_SEND_OP_COND          1   /* bcr  [31:0] OCR         R3  */
-#define MMC_ALL_SEND_CID          2   /* bcr                     R2  */
-#define MMC_SET_RELATIVE_ADDR     3   /* ac   [31:16] RCA        R1  */
-#define MMC_SET_DSR               4   /* bc   [31:16] RCA            */
-#define MMC_SLEEP_AWAKE		  5   /* ac   [31:16] RCA 15:flg R1b */
-#define MMC_SWITCH                6   /* ac   [31:0] See below   R1b */
-#define MMC_SELECT_CARD           7   /* ac   [31:16] RCA        R1  */
-#define MMC_SEND_EXT_CSD          8   /* adtc                    R1  */
-#define MMC_SEND_CSD              9   /* ac   [31:16] RCA        R2  */
-#define MMC_SEND_CID             10   /* ac   [31:16] RCA        R2  */
-#define MMC_READ_DAT_UNTIL_STOP  11   /* adtc [31:0] dadr        R1  */
-#define MMC_STOP_TRANSMISSION    12   /* ac                      R1b */
-#define MMC_SEND_STATUS          13   /* ac   [31:16] RCA        R1  */
-#define MMC_BUS_TEST_R           14   /* adtc                    R1  */
-#define MMC_GO_INACTIVE_STATE    15   /* ac   [31:16] RCA            */
-#define MMC_BUS_TEST_W           19   /* adtc                    R1  */
-#define MMC_SPI_READ_OCR         58   /* spi                  spi_R3 */
-#define MMC_SPI_CRC_ON_OFF       59   /* spi  [0:0] flag      spi_R1 */
-
-  /* class 2 */
-#define MMC_SET_BLOCKLEN         16   /* ac   [31:0] block len   R1  */
-#define MMC_READ_SINGLE_BLOCK    17   /* adtc [31:0] data addr   R1  */
-#define MMC_READ_MULTIPLE_BLOCK  18   /* adtc [31:0] data addr   R1  */
-#define MMC_SEND_TUNING_BLOCK    19   /* adtc                    R1  */
-#define MMC_SEND_TUNING_BLOCK_HS200	21	/* adtc R1  */
-
-  /* class 3 */
-#define MMC_WRITE_DAT_UNTIL_STOP 20   /* adtc [31:0] data addr   R1  */
-
-  /* class 4 */
-#define MMC_SET_BLOCK_COUNT      23   /* adtc [31:0] data addr   R1  */
-#define MMC_WRITE_BLOCK          24   /* adtc [31:0] data addr   R1  */
-#define MMC_WRITE_MULTIPLE_BLOCK 25   /* adtc                    R1  */
-#define MMC_PROGRAM_CID          26   /* adtc                    R1  */
-#define MMC_PROGRAM_CSD          27   /* adtc                    R1  */
-
-  /* class 6 */
-#define MMC_SET_WRITE_PROT       28   /* ac   [31:0] data addr   R1b */
-#define MMC_CLR_WRITE_PROT       29   /* ac   [31:0] data addr   R1b */
-#define MMC_SEND_WRITE_PROT      30   /* adtc [31:0] wpdata addr R1  */
-
-  /* class 5 */
-#define MMC_ERASE_GROUP_START    35   /* ac   [31:0] data addr   R1  */
-#define MMC_ERASE_GROUP_END      36   /* ac   [31:0] data addr   R1  */
-#define MMC_ERASE                38   /* ac                      R1b */
-
-  /* class 9 */
-#define MMC_FAST_IO              39   /* ac   <Complex>          R4  */
-#define MMC_GO_IRQ_STATE         40   /* bcr                     R5  */
-
-  /* class 7 */
-#define MMC_LOCK_UNLOCK          42   /* adtc                    R1b */
-
-  /* class 8 */
-#define MMC_APP_CMD              55   /* ac   [31:16] RCA        R1  */
-#define MMC_GEN_CMD              56   /* adtc [0] RD/WR          R1  */
+#include <uapi/linux/mmc/mmc.h>
+
+/* class 11 */
+#define MMC_CMDQ_TASK_MGMT       48  /* ac   [31:0] task ID     R1b */
+#define DISCARD_QUEUE		0x1
+#define DISCARD_TASK		0x2
 
 static inline bool mmc_op_multi(u32 opcode)
 {
@@ -223,6 +170,7 @@
  * OCR bits are mostly in host.h
  */
 #define MMC_CARD_BUSY	0x80000000	/* Card Power up status bit */
+#define MMC_CARD_SECTOR_ADDR 0x40000000 /* Card supports sectors */
 
 /*
  * Card Command Classes (CCC)
@@ -272,6 +220,8 @@
  * EXT_CSD fields
  */
 
+#define EXT_CSD_CMDQ			15	/* R/W */
+#define EXT_CSD_BARRIER_CTRL		31      /* R/W */
 #define EXT_CSD_FLUSH_CACHE		32      /* W */
 #define EXT_CSD_CACHE_CTRL		33      /* R/W */
 #define EXT_CSD_POWER_OFF_NOTIFICATION	34	/* R/W */
@@ -297,6 +247,7 @@
 #define EXT_CSD_PART_CONFIG		179	/* R/W */
 #define EXT_CSD_ERASED_MEM_CONT		181	/* RO */
 #define EXT_CSD_BUS_WIDTH		183	/* R/W */
+#define EXT_CSD_STROBE_SUPPORT		184	/* RO */
 #define EXT_CSD_HS_TIMING		185	/* R/W */
 #define EXT_CSD_POWER_CLASS		187	/* R/W */
 #define EXT_CSD_REV			192	/* RO */
@@ -324,12 +275,20 @@
 #define EXT_CSD_PWR_CL_200_360		237	/* RO */
 #define EXT_CSD_PWR_CL_DDR_52_195	238	/* RO */
 #define EXT_CSD_PWR_CL_DDR_52_360	239	/* RO */
+#define EXT_CSD_CACHE_FLUSH_POLICY	240	/* RO */
 #define EXT_CSD_BKOPS_STATUS		246	/* RO */
 #define EXT_CSD_POWER_OFF_LONG_TIME	247	/* RO */
 #define EXT_CSD_GENERIC_CMD6_TIME	248	/* RO */
 #define EXT_CSD_CACHE_SIZE		249	/* RO, 4 bytes */
 #define EXT_CSD_PWR_CL_DDR_200_360	253	/* RO */
 #define EXT_CSD_FIRMWARE_VERSION	254	/* RO, 8 bytes */
+#define EXT_CSD_PRE_EOL_INFO		267	/* RO */
+#define EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_A	268	/* RO */
+#define EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_B	269	/* RO */
+#define EXT_CSD_CMDQ_DEPTH		307	/* RO */
+#define EXT_CSD_CMDQ_SUPPORT		308	/* RO */
+#define EXT_CSD_BARRIER_SUPPORT		486	/* RO */
+ #define EXT_CSD_TAG_UNIT_SIZE		498	/* RO */
 #define EXT_CSD_SUPPORTED_MODE		493	/* RO */
 #define EXT_CSD_TAG_UNIT_SIZE		498	/* RO */
 #define EXT_CSD_DATA_TAG_SUPPORT	499	/* RO */
@@ -343,6 +302,7 @@
  */
 
 #define EXT_CSD_WR_REL_PARAM_EN		(1<<2)
+#define EXT_CSD_WR_REL_PARAM_EN_RPMB_REL_WR	(1<<4)
 
 #define EXT_CSD_BOOT_WP_B_PWR_WP_DIS	(0x40)
 #define EXT_CSD_BOOT_WP_B_PERM_WP_DIS	(0x10)
@@ -386,6 +346,7 @@
 #define EXT_CSD_BUS_WIDTH_8	2	/* Card is in 8 bit mode */
 #define EXT_CSD_DDR_BUS_WIDTH_4	5	/* Card is in 4 bit DDR mode */
 #define EXT_CSD_DDR_BUS_WIDTH_8	6	/* Card is in 8 bit DDR mode */
+#define EXT_CSD_BUS_WIDTH_STROBE	0x80	/* Card is in 8 bit DDR mode */
 
 #define EXT_CSD_TIMING_BC	0	/* Backwards compatility */
 #define EXT_CSD_TIMING_HS	1	/* High speed */
@@ -413,6 +374,9 @@
 
 #define EXT_CSD_PACKED_EVENT_EN	BIT(3)
 
+#define EXT_CSD_BKOPS_MANUAL_EN		BIT(0)
+#define EXT_CSD_BKOPS_AUTO_EN		BIT(1)
+
 /*
  * EXCEPTION_EVENT_STATUS field
  */
diff -ruw linux-4.4.115/include/linux/mmc/pm.h linux-4.4.115-fbx/include/linux/mmc/pm.h
--- linux-4.4.115/include/linux/mmc/pm.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/mmc/pm.h	2019-01-22 16:16:28.311289946 +0100
@@ -26,5 +26,6 @@
 
 #define MMC_PM_KEEP_POWER	(1 << 0)	/* preserve card power during suspend */
 #define MMC_PM_WAKE_SDIO_IRQ	(1 << 1)	/* wake up host system on SDIO IRQ assertion */
+#define MMC_PM_IGNORE_PM_NOTIFY	(1 << 2)	/* ignore mmc pm notify */
 
 #endif /* LINUX_MMC_PM_H */
diff -ruw linux-4.4.115/include/linux/mmc/sdio_func.h linux-4.4.115-fbx/include/linux/mmc/sdio_func.h
--- linux-4.4.115/include/linux/mmc/sdio_func.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/mmc/sdio_func.h	2019-01-22 16:16:28.311289946 +0100
@@ -23,6 +23,14 @@
 typedef void (sdio_irq_handler_t)(struct sdio_func *);
 
 /*
+ * Structure used to hold embedded SDIO device data from platform layer
+ */
+struct sdio_embedded_func {
+	uint8_t f_class;
+	uint32_t f_maxblksize;
+};
+
+/*
  * SDIO function CIS tuple (unknown to the core)
  */
 struct sdio_func_tuple {
@@ -128,6 +136,8 @@
 extern unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz);
 
 extern u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret);
+extern u8 sdio_readb_ext(struct sdio_func *func, unsigned int addr, int *err_ret,
+	unsigned in);
 extern u16 sdio_readw(struct sdio_func *func, unsigned int addr, int *err_ret);
 extern u32 sdio_readl(struct sdio_func *func, unsigned int addr, int *err_ret);
 
diff -ruw linux-4.4.115/include/linux/mmc/sdio.h linux-4.4.115-fbx/include/linux/mmc/sdio.h
--- linux-4.4.115/include/linux/mmc/sdio.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/mmc/sdio.h	2019-01-22 16:16:28.311289946 +0100
@@ -102,6 +102,7 @@
 #define  SDIO_BUS_WIDTH_1BIT	0x00
 #define  SDIO_BUS_WIDTH_RESERVED 0x01
 #define  SDIO_BUS_WIDTH_4BIT	0x02
+#define  SDIO_BUS_WIDTH_8BIT  	0x03
 #define  SDIO_BUS_ECSI		0x20	/* Enable continuous SPI interrupt */
 #define  SDIO_BUS_SCSI		0x40	/* Support continuous SPI interrupt */
 
@@ -163,6 +164,10 @@
 #define  SDIO_DTSx_SET_TYPE_A	(1 << SDIO_DRIVE_DTSx_SHIFT)
 #define  SDIO_DTSx_SET_TYPE_C	(2 << SDIO_DRIVE_DTSx_SHIFT)
 #define  SDIO_DTSx_SET_TYPE_D	(3 << SDIO_DRIVE_DTSx_SHIFT)
+
+#define SDIO_CCCR_INTERRUPT_EXTENSION	0x16
+#define	SDIO_SUPPORT_ASYNC_INTR		(1<<0)
+#define	SDIO_ENABLE_ASYNC_INTR		(1<<1)
 /*
  * Function Basic Registers (FBR)
  */
diff -ruw linux-4.4.115/include/linux/mm.h linux-4.4.115-fbx/include/linux/mm.h
--- linux-4.4.115/include/linux/mm.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/mm.h	2019-10-29 09:26:25.457220930 +0100
@@ -29,6 +29,7 @@
 struct user_struct;
 struct writeback_control;
 struct bdi_writeback;
+struct super_block;
 
 #ifndef CONFIG_NEED_MULTIPLE_NODES	/* Don't use mapnrs, do it properly */
 extern unsigned long max_mapnr;
@@ -51,6 +52,17 @@
 #define sysctl_legacy_va_layout 0
 #endif
 
+#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
+extern const int mmap_rnd_bits_min;
+extern const int mmap_rnd_bits_max;
+extern int mmap_rnd_bits __read_mostly;
+#endif
+#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
+extern const int mmap_rnd_compat_bits_min;
+extern const int mmap_rnd_compat_bits_max;
+extern int mmap_rnd_compat_bits __read_mostly;
+#endif
+
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/processor.h>
@@ -59,6 +71,10 @@
 #define __pa_symbol(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
 #endif
 
+#ifndef lm_alias
+#define lm_alias(x)	__va(__pa_symbol(x))
+#endif
+
 /*
  * To prevent common memory management code establishing
  * a zero page mapping on a read fault.
@@ -374,16 +390,16 @@
  * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
  * is no special casing required.
  */
-static inline int is_vmalloc_addr(const void *x)
-{
-#ifdef CONFIG_MMU
-	unsigned long addr = (unsigned long)x;
 
-	return addr >= VMALLOC_START && addr < VMALLOC_END;
+#ifdef CONFIG_MMU
+extern int is_vmalloc_addr(const void *x);
 #else
+static inline int is_vmalloc_addr(const void *x)
+{
 	return 0;
-#endif
 }
+#endif
+
 #ifdef CONFIG_MMU
 extern int is_vmalloc_or_module_addr(const void *x);
 #else
@@ -517,7 +533,6 @@
 void put_pages_list(struct list_head *pages);
 
 void split_page(struct page *page, unsigned int order);
-int split_free_page(struct page *page);
 
 /*
  * Compound pages have a destructor function.  Provide a
@@ -983,6 +998,7 @@
 {
 	return atomic_read(&(page)->_mapcount) >= 0;
 }
+struct address_space *page_mapping(struct page *page);
 
 /*
  * Return true only if the page has been allocated with
@@ -1059,6 +1075,7 @@
 extern void show_free_areas(unsigned int flags);
 extern bool skip_free_areas_node(unsigned int flags, int nid);
 
+void shmem_set_file(struct vm_area_struct *vma, struct file *file);
 int shmem_zero_setup(struct vm_area_struct *);
 #ifdef CONFIG_SHMEM
 bool shmem_mapping(struct address_space *mapping);
@@ -1838,7 +1855,7 @@
 extern struct vm_area_struct *vma_merge(struct mm_struct *,
 	struct vm_area_struct *prev, unsigned long addr, unsigned long end,
 	unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
-	struct mempolicy *, struct vm_userfaultfd_ctx);
+	struct mempolicy *, struct vm_userfaultfd_ctx, const char __user *);
 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
 extern int split_vma(struct mm_struct *,
 	struct vm_area_struct *, unsigned long addr, int new_below);
@@ -1951,8 +1968,11 @@
 
 /* truncate.c */
 extern void truncate_inode_pages(struct address_space *, loff_t);
+extern void truncate_inode_pages_fill_zero(struct address_space *, loff_t);
 extern void truncate_inode_pages_range(struct address_space *,
 				       loff_t lstart, loff_t lend);
+extern void truncate_inode_pages_range_fill_zero(struct address_space *,
+				       loff_t lstart, loff_t lend);
 extern void truncate_inode_pages_final(struct address_space *);
 
 /* generic vm_area_ops exported for stackable file systems */
@@ -1965,7 +1985,7 @@
 void task_dirty_inc(struct task_struct *tsk);
 
 /* readahead.c */
-#define VM_MAX_READAHEAD	128	/* kbytes */
+#define VM_MAX_READAHEAD	512	/* kbytes */
 #define VM_MIN_READAHEAD	16	/* kbytes (includes current page) */
 
 int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
@@ -2126,6 +2146,17 @@
 }
 #endif /* CONFIG_PROC_FS */
 
+#ifdef CONFIG_PAGE_POISONING
+extern bool page_poisoning_enabled(void);
+extern void kernel_poison_pages(struct page *page, int numpages, int enable);
+extern bool page_is_poisoned(struct page *page);
+#else
+static inline bool page_poisoning_enabled(void) { return false; }
+static inline void kernel_poison_pages(struct page *page, int numpages,
+					int enable) { }
+static inline bool page_is_poisoned(struct page *page) { return false; }
+#endif
+
 #ifdef CONFIG_DEBUG_PAGEALLOC
 extern bool _debug_pagealloc_enabled;
 extern void __kernel_map_pages(struct page *page, int numpages, int enable);
@@ -2146,13 +2177,17 @@
 #ifdef CONFIG_HIBERNATION
 extern bool kernel_page_present(struct page *page);
 #endif /* CONFIG_HIBERNATION */
-#else
+#else	/* CONFIG_DEBUG_PAGEALLOC */
 static inline void
 kernel_map_pages(struct page *page, int numpages, int enable) {}
 #ifdef CONFIG_HIBERNATION
 static inline bool kernel_page_present(struct page *page) { return true; }
 #endif /* CONFIG_HIBERNATION */
-#endif
+static inline bool debug_pagealloc_enabled(void)
+{
+	return false;
+}
+#endif	/* CONFIG_DEBUG_PAGEALLOC */
 
 #ifdef __HAVE_ARCH_GATE_AREA
 extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
@@ -2179,6 +2214,8 @@
 void drop_slab(void);
 void drop_slab_node(int nid);
 
+void drop_pagecache_sb(struct super_block *sb, void *unused);
+
 #ifndef CONFIG_MMU
 #define randomize_va_space 0
 #else
@@ -2273,7 +2310,6 @@
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
 
 extern struct page_ext_operations debug_guardpage_ops;
-extern struct page_ext_operations page_poisoning_ops;
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
 extern unsigned int _debug_guardpage_minorder;
@@ -2311,5 +2347,19 @@
 static inline void setup_nr_node_ids(void) {}
 #endif
 
+#ifdef CONFIG_PROCESS_RECLAIM
+struct reclaim_param {
+	struct vm_area_struct *vma;
+	/* Number of pages scanned */
+	int nr_scanned;
+	/* max pages to reclaim */
+	int nr_to_reclaim;
+	/* pages reclaimed */
+	int nr_reclaimed;
+};
+extern struct reclaim_param reclaim_task_anon(struct task_struct *task,
+		int nr_to_reclaim);
+#endif
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_MM_H */
diff -ruw linux-4.4.115/include/linux/mm_types.h linux-4.4.115-fbx/include/linux/mm_types.h
--- linux-4.4.115/include/linux/mm_types.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/mm_types.h	2019-10-29 09:26:25.457220930 +0100
@@ -12,6 +12,7 @@
 #include <linux/cpumask.h>
 #include <linux/uprobes.h>
 #include <linux/page-flags-layout.h>
+#include <linux/workqueue.h>
 #include <asm/page.h>
 #include <asm/mmu.h>
 
@@ -207,6 +208,10 @@
 					   not kmapped, ie. highmem) */
 #endif /* WANT_PAGE_VIRTUAL */
 
+#ifdef CONFIG_BLK_DEV_IO_TRACE
+	struct task_struct *tsk_dirty;	/* task that sets this page dirty */
+#endif
+
 #ifdef CONFIG_KMEMCHECK
 	/*
 	 * kmemcheck wants to track the status of each byte in a page; this
@@ -323,11 +328,18 @@
 	/*
 	 * For areas with an address space and backing store,
 	 * linkage into the address_space->i_mmap interval tree.
+	 *
+	 * For private anonymous mappings, a pointer to a null terminated string
+	 * in the user process containing the name given to the vma, or NULL
+	 * if unnamed.
 	 */
+	union {
 	struct {
 		struct rb_node rb;
 		unsigned long rb_subtree_last;
 	} shared;
+		const char __user *anon_name;
+	};
 
 	/*
 	 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
@@ -516,6 +528,11 @@
 #ifdef CONFIG_HUGETLB_PAGE
 	atomic_long_t hugetlb_usage;
 #endif
+#ifdef CONFIG_MSM_APP_SETTINGS
+	int app_setting;
+#endif
+
+	struct work_struct async_put_work;
 };
 
 static inline void mm_init_cpumask(struct mm_struct *mm)
@@ -596,4 +613,13 @@
 	unsigned long val;
 } swp_entry_t;
 
+/* Return the name for an anonymous mapping or NULL for a file-backed mapping */
+static inline const char __user *vma_get_anon_name(struct vm_area_struct *vma)
+{
+	if (vma->vm_file)
+		return NULL;
+
+	return vma->anon_name;
+}
+
 #endif /* _LINUX_MM_TYPES_H */
diff -ruw linux-4.4.115/include/linux/mmzone.h linux-4.4.115-fbx/include/linux/mmzone.h
--- linux-4.4.115/include/linux/mmzone.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/mmzone.h	2019-01-22 16:16:28.311289946 +0100
@@ -39,8 +39,6 @@
 	MIGRATE_UNMOVABLE,
 	MIGRATE_MOVABLE,
 	MIGRATE_RECLAIMABLE,
-	MIGRATE_PCPTYPES,	/* the number of types on the pcp lists */
-	MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
 #ifdef CONFIG_CMA
 	/*
 	 * MIGRATE_CMA migration type is designed to mimic the way
@@ -57,16 +55,35 @@
 	 */
 	MIGRATE_CMA,
 #endif
+	MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
+	MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
 #ifdef CONFIG_MEMORY_ISOLATION
 	MIGRATE_ISOLATE,	/* can't allocate from here */
 #endif
 	MIGRATE_TYPES
 };
 
+/*
+ * Returns a list which contains the migrate types on to which
+ * an allocation falls back when the free list for the migrate
+ * type mtype is depleted.
+ * The end of the list is delimited by the type MIGRATE_TYPES.
+ */
+extern int *get_migratetype_fallbacks(int mtype);
+
+/* In mm/page_alloc.c; keep in sync also with show_migration_types() there */
+extern char * const migratetype_names[MIGRATE_TYPES];
+
 #ifdef CONFIG_CMA
+bool is_cma_pageblock(struct page *page);
 #  define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
+#  define get_cma_migrate_type() MIGRATE_CMA
+#  define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
 #else
+#  define is_cma_pageblock(page) false
 #  define is_migrate_cma(migratetype) false
+#  define get_cma_migrate_type() MIGRATE_MOVABLE
+#  define is_migrate_cma_page(_page) false
 #endif
 
 #define for_each_migratetype_order(order, type) \
@@ -158,6 +175,8 @@
 	WORKINGSET_NODERECLAIM,
 	NR_ANON_TRANSPARENT_HUGEPAGES,
 	NR_FREE_CMA_PAGES,
+	NR_SWAPCACHE,
+	NR_INDIRECTLY_RECLAIMABLE_BYTES, /* measured in bytes */
 	NR_VM_ZONE_STAT_ITEMS };
 
 /*
@@ -362,10 +381,13 @@
 	struct per_cpu_pageset __percpu *pageset;
 
 	/*
-	 * This is a per-zone reserve of pages that should not be
-	 * considered dirtyable memory.
+	 * This is a per-zone reserve of pages that are not available
+	 * to userspace allocations.
 	 */
-	unsigned long		dirty_balance_reserve;
+	unsigned long		totalreserve_pages;
+#ifdef CONFIG_CMA
+	bool			cma_alloc;
+#endif
 
 #ifndef CONFIG_SPARSEMEM
 	/*
@@ -672,6 +694,12 @@
 					   mem_hotplug_begin/end() */
 	int kswapd_max_order;
 	enum zone_type classzone_idx;
+#ifdef CONFIG_COMPACTION
+	int kcompactd_max_order;
+	enum zone_type kcompactd_classzone_idx;
+	wait_queue_head_t kcompactd_wait;
+	struct task_struct *kcompactd;
+#endif
 #ifdef CONFIG_NUMA_BALANCING
 	/* Lock serializing the migrate rate limiting window */
 	spinlock_t numabalancing_migrate_lock;
diff -ruw linux-4.4.115/include/linux/mod_devicetable.h linux-4.4.115-fbx/include/linux/mod_devicetable.h
--- linux-4.4.115/include/linux/mod_devicetable.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/mod_devicetable.h	2019-10-29 09:26:25.461220969 +0100
@@ -291,7 +291,7 @@
 #define INPUT_DEVICE_ID_LED_MAX		0x0f
 #define INPUT_DEVICE_ID_SND_MAX		0x07
 #define INPUT_DEVICE_ID_FF_MAX		0x7f
-#define INPUT_DEVICE_ID_SW_MAX		0x0f
+#define INPUT_DEVICE_ID_SW_MAX		0x20
 
 #define INPUT_DEVICE_ID_MATCH_BUS	1
 #define INPUT_DEVICE_ID_MATCH_VENDOR	2
@@ -445,6 +445,16 @@
 	kernel_ulong_t driver_data;	/* Data private to the driver */
 };
 
+/* soundwire */
+
+#define SOUNDWIRE_NAME_SIZE	32
+#define SOUNDWIRE_MODULE_PREFIX "swr:"
+
+struct swr_device_id {
+	char name[SOUNDWIRE_NAME_SIZE];
+	kernel_ulong_t driver_data;	/* Data private to the driver */
+};
+
 /* dmi */
 enum dmi_field {
 	DMI_NONE,
@@ -481,6 +491,16 @@
 	struct dmi_strmatch matches[4];
 	void *driver_data;
 };
+
+#define SLIMBUS_NAME_SIZE	32
+#define SLIMBUS_MODULE_PREFIX "slim:"
+
+struct slim_device_id {
+	char name[SLIMBUS_NAME_SIZE];
+	kernel_ulong_t driver_data	/* Data private to the driver */
+			__attribute__((aligned(sizeof(kernel_ulong_t))));
+};
+
 /*
  * struct dmi_device_id appears during expansion of
  * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
diff -ruw linux-4.4.115/include/linux/module.h linux-4.4.115-fbx/include/linux/module.h
--- linux-4.4.115/include/linux/module.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/module.h	2019-10-29 09:26:25.461220969 +0100
@@ -125,13 +125,13 @@
 
 /* Each module must use one module_init(). */
 #define module_init(initfn)					\
-	static inline initcall_t __inittest(void)		\
+	static inline initcall_t __maybe_unused __inittest(void)		\
 	{ return initfn; }					\
 	int init_module(void) __attribute__((alias(#initfn)));
 
 /* This is only required if you want to be unloadable. */
 #define module_exit(exitfn)					\
-	static inline exitcall_t __exittest(void)		\
+	static inline exitcall_t __maybe_unused __exittest(void)		\
 	{ return exitfn; }					\
 	void cleanup_module(void) __attribute__((alias(#exitfn)));
 
diff -ruw linux-4.4.115/include/linux/mount.h linux-4.4.115-fbx/include/linux/mount.h
--- linux-4.4.115/include/linux/mount.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/mount.h	2019-01-22 16:16:28.311289946 +0100
@@ -67,6 +67,7 @@
 	struct dentry *mnt_root;	/* root of the mounted tree */
 	struct super_block *mnt_sb;	/* pointer to superblock */
 	int mnt_flags;
+	void *data;
 };
 
 struct file; /* forward dec */
diff -ruw linux-4.4.115/include/linux/namei.h linux-4.4.115-fbx/include/linux/namei.h
--- linux-4.4.115/include/linux/namei.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/namei.h	2019-01-22 16:16:28.319290018 +0100
@@ -75,8 +75,11 @@
 extern void done_path_create(struct path *, struct dentry *);
 extern struct dentry *kern_path_locked(const char *, struct path *);
 extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int);
+extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
+		const char *, unsigned int, struct path *);
 
 extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
+extern struct dentry *lookup_one_len2(const char *, struct vfsmount *mnt, struct dentry *, int);
 
 extern int follow_down_one(struct path *);
 extern int follow_down(struct path *);
diff -ruw linux-4.4.115/include/linux/netdevice.h linux-4.4.115-fbx/include/linux/netdevice.h
--- linux-4.4.115/include/linux/netdevice.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/netdevice.h	2019-10-29 09:26:25.465221008 +0100
@@ -2582,6 +2582,7 @@
  */
 struct softnet_data {
 	struct list_head	poll_list;
+	struct napi_struct	*current_napi;
 	struct sk_buff_head	process_queue;
 
 	/* stats */
@@ -2589,6 +2590,8 @@
 	unsigned int		time_squeeze;
 	unsigned int		cpu_collision;
 	unsigned int		received_rps;
+	unsigned int		gro_coalesced;
+
 #ifdef CONFIG_RPS
 	struct softnet_data	*rps_ipi_list;
 #endif
@@ -3076,6 +3079,7 @@
 gro_result_t napi_gro_frags(struct napi_struct *napi);
 struct packet_offload *gro_find_receive_by_type(__be16 type);
 struct packet_offload *gro_find_complete_by_type(__be16 type);
+extern struct napi_struct *get_current_napi_context(void);
 
 static inline void napi_free_frags(struct napi_struct *napi)
 {
diff -ruw linux-4.4.115/include/linux/nmi.h linux-4.4.115-fbx/include/linux/nmi.h
--- linux-4.4.115/include/linux/nmi.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/nmi.h	2019-01-22 16:16:28.327290091 +0100
@@ -14,8 +14,11 @@
  * may be used to reset the timeout - for code which intentionally
  * disables interrupts for a long time. This call is stateless.
  */
-#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
+#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR_NMI)
 #include <asm/nmi.h>
+#endif
+
+#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
 extern void touch_nmi_watchdog(void);
 #else
 static inline void touch_nmi_watchdog(void)
@@ -38,13 +41,22 @@
 #ifdef arch_trigger_all_cpu_backtrace
 static inline bool trigger_all_cpu_backtrace(void)
 {
+	#if defined(CONFIG_ARM64)
+	arch_trigger_all_cpu_backtrace();
+	#else
 	arch_trigger_all_cpu_backtrace(true);
+	#endif
 
 	return true;
 }
 static inline bool trigger_allbutself_cpu_backtrace(void)
 {
+	#if defined(CONFIG_ARM64)
+	arch_trigger_all_cpu_backtrace();
+	#else
 	arch_trigger_all_cpu_backtrace(false);
+	#endif
+
 	return true;
 }
 
diff -ruw linux-4.4.115/include/linux/of_address.h linux-4.4.115-fbx/include/linux/of_address.h
--- linux-4.4.115/include/linux/of_address.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/of_address.h	2019-01-22 16:16:28.331290127 +0100
@@ -36,6 +36,8 @@
 					const struct of_device_id *matches,
 					u64 base_address);
 extern void __iomem *of_iomap(struct device_node *device, int index);
+extern void __iomem *of_iomap_by_name(struct device_node *device,
+					const char *name);
 
 /* Extract an address from a device, returns the region size and
  * the address space flags too. The PCI version uses a BAR number
@@ -43,6 +45,8 @@
  */
 extern const __be32 *of_get_address(struct device_node *dev, int index,
 			   u64 *size, unsigned int *flags);
+extern const __be32 *of_get_address_by_name(struct device_node *dev,
+			   const char *name, u64 *size, unsigned int *flags);
 
 extern int pci_register_io_range(phys_addr_t addr, resource_size_t size);
 extern unsigned long pci_address_to_pio(phys_addr_t addr);
diff -ruw linux-4.4.115/include/linux/of_fdt.h linux-4.4.115-fbx/include/linux/of_fdt.h
--- linux-4.4.115/include/linux/of_fdt.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/of_fdt.h	2019-01-22 16:16:28.331290127 +0100
@@ -59,6 +59,27 @@
 extern unsigned long of_get_flat_dt_root(void);
 extern int of_get_flat_dt_size(void);
 
+/*
+ * early_init_dt_scan_chosen - scan the device tree for ramdisk and bootargs
+ *
+ * The boot arguments will be placed into the memory pointed to by @data.
+ * That memory should be COMMAND_LINE_SIZE big and initialized to be a valid
+ * (possibly empty) string.  Logic for what will be in @data after this
+ * function finishes:
+ *
+ * - CONFIG_CMDLINE_FORCE=true
+ *     CONFIG_CMDLINE
+ * - CONFIG_CMDLINE_EXTEND=true, @data is non-empty string
+ *     @data + dt bootargs (even if dt bootargs are empty)
+ * - CONFIG_CMDLINE_EXTEND=true, @data is empty string
+ *     CONFIG_CMDLINE + dt bootargs (even if dt bootargs are empty)
+ * - CMDLINE_FROM_BOOTLOADER=true, dt bootargs=non-empty:
+ *     dt bootargs
+ * - CMDLINE_FROM_BOOTLOADER=true, dt bootargs=empty, @data is non-empty string
+ *     @data is left unchanged
+ * - CMDLINE_FROM_BOOTLOADER=true, dt bootargs=empty, @data is empty string
+ *     CONFIG_CMDLINE (or "" if that's not defined)
+ */
 extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
 				     int depth, void *data);
 extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
diff -ruw linux-4.4.115/include/linux/of_reserved_mem.h linux-4.4.115-fbx/include/linux/of_reserved_mem.h
--- linux-4.4.115/include/linux/of_reserved_mem.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/of_reserved_mem.h	2019-01-22 16:16:28.331290127 +0100
@@ -13,6 +13,7 @@
 	phys_addr_t			base;
 	phys_addr_t			size;
 	void				*priv;
+	int				fixup;
 };
 
 struct reserved_mem_ops {
diff -ruw linux-4.4.115/include/linux/oom.h linux-4.4.115-fbx/include/linux/oom.h
--- linux-4.4.115/include/linux/oom.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/oom.h	2019-01-22 16:16:28.331290127 +0100
@@ -113,6 +113,9 @@
 		!(task->signal->flags & SIGNAL_GROUP_COREDUMP);
 }
 
+extern void dump_tasks(struct mem_cgroup *memcg,
+		const nodemask_t *nodemask);
+
 /* sysctls */
 extern int sysctl_oom_dump_tasks;
 extern int sysctl_oom_kill_allocating_task;
diff -ruw linux-4.4.115/include/linux/page_ext.h linux-4.4.115-fbx/include/linux/page_ext.h
--- linux-4.4.115/include/linux/page_ext.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/page_ext.h	2019-01-22 16:16:28.335290163 +0100
@@ -3,6 +3,7 @@
 
 #include <linux/types.h>
 #include <linux/stacktrace.h>
+#include <linux/stackdepot.h>
 
 struct pglist_data;
 struct page_ext_operations {
@@ -44,8 +45,8 @@
 #ifdef CONFIG_PAGE_OWNER
 	unsigned int order;
 	gfp_t gfp_mask;
-	unsigned int nr_entries;
-	unsigned long trace_entries[8];
+	int last_migrate_reason;
+	depot_stack_handle_t handle;
 #endif
 };
 
diff -ruw linux-4.4.115/include/linux/page-flags.h linux-4.4.115-fbx/include/linux/page-flags.h
--- linux-4.4.115/include/linux/page-flags.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/page-flags.h	2019-10-29 09:26:25.469221048 +0100
@@ -108,6 +108,9 @@
 	PG_young,
 	PG_idle,
 #endif
+#ifdef CONFIG_ZCACHE
+	PG_was_active,
+#endif
 	__NR_PAGEFLAGS,
 
 	/* Filesystems */
@@ -129,6 +132,9 @@
 
 	/* SLOB */
 	PG_slob_free = PG_private,
+
+	/* non-lru isolated movable page */
+	PG_isolated = PG_reclaim,
 };
 
 #ifndef __GENERATING_BOUNDS_H
@@ -224,6 +230,11 @@
 	__SETPAGEFLAG(SwapBacked, swapbacked)
 
 __PAGEFLAG(SlobFree, slob_free)
+#ifdef CONFIG_ZCACHE
+PAGEFLAG(WasActive, was_active)
+#else
+PAGEFLAG_FALSE(WasActive)
+#endif
 
 /*
  * Private page markings that may be used by the filesystem that owns the page
@@ -301,25 +312,38 @@
  * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
  *
  * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
- * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
- * and then page->mapping points, not to an anon_vma, but to a private
+ * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
+ * bit; and then page->mapping points, not to an anon_vma, but to a private
  * structure which KSM associates with that merged page.  See ksm.h.
  *
- * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
+ * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
+ * page and then page->mapping points a struct address_space.
  *
  * Please note that, confusingly, "page_mapping" refers to the inode
  * address_space which maps the page from disk; whereas "page_mapped"
  * refers to user virtual address space into which the page is mapped.
  */
-#define PAGE_MAPPING_ANON	1
-#define PAGE_MAPPING_KSM	2
-#define PAGE_MAPPING_FLAGS	(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
+#define PAGE_MAPPING_ANON	0x1
+#define PAGE_MAPPING_MOVABLE	0x2
+#define PAGE_MAPPING_KSM	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
+#define PAGE_MAPPING_FLAGS	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
+
+static __always_inline int PageMappingFlags(struct page *page)
+{
+	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
+}
 
 static inline int PageAnon(struct page *page)
 {
 	return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
 }
 
+static __always_inline int __PageMovable(struct page *page)
+{
+	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
+				PAGE_MAPPING_MOVABLE;
+}
+
 #ifdef CONFIG_KSM
 /*
  * A KSM page is one of those write-protected "shared pages" or "merged pages"
@@ -330,7 +354,7 @@
 static inline int PageKsm(struct page *page)
 {
 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
-				(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
+				PAGE_MAPPING_KSM;
 }
 #else
 TESTPAGEFLAG_FALSE(Ksm)
@@ -549,6 +573,8 @@
 	atomic_set(&page->_mapcount, -1);
 }
 
+__PAGEFLAG(Isolated, isolated);
+
 /*
  * If network-based swap is enabled, sl*b must keep track of whether pages
  * were allocated from pfmemalloc reserves.
diff -ruw linux-4.4.115/include/linux/page_owner.h linux-4.4.115-fbx/include/linux/page_owner.h
--- linux-4.4.115/include/linux/page_owner.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/page_owner.h	2019-01-22 16:16:28.335290163 +0100
@@ -1,38 +1,52 @@
 #ifndef __LINUX_PAGE_OWNER_H
 #define __LINUX_PAGE_OWNER_H
 
+#include <linux/jump_label.h>
+
 #ifdef CONFIG_PAGE_OWNER
-extern bool page_owner_inited;
+extern struct static_key_false page_owner_inited;
 extern struct page_ext_operations page_owner_ops;
 
 extern void __reset_page_owner(struct page *page, unsigned int order);
 extern void __set_page_owner(struct page *page,
 			unsigned int order, gfp_t gfp_mask);
-extern gfp_t __get_page_owner_gfp(struct page *page);
+extern void __split_page_owner(struct page *page, unsigned int order);
+extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
+extern void __set_page_owner_migrate_reason(struct page *page, int reason);
+extern void __dump_page_owner(struct page *page);
 
 static inline void reset_page_owner(struct page *page, unsigned int order)
 {
-	if (likely(!page_owner_inited))
-		return;
-
+	if (static_branch_unlikely(&page_owner_inited))
 	__reset_page_owner(page, order);
 }
 
 static inline void set_page_owner(struct page *page,
 			unsigned int order, gfp_t gfp_mask)
 {
-	if (likely(!page_owner_inited))
-		return;
-
+	if (static_branch_unlikely(&page_owner_inited))
 	__set_page_owner(page, order, gfp_mask);
 }
 
-static inline gfp_t get_page_owner_gfp(struct page *page)
+static inline void split_page_owner(struct page *page, unsigned int order)
 {
-	if (likely(!page_owner_inited))
-		return 0;
-
-	return __get_page_owner_gfp(page);
+	if (static_branch_unlikely(&page_owner_inited))
+		__split_page_owner(page, order);
+}
+static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
+{
+	if (static_branch_unlikely(&page_owner_inited))
+		__copy_page_owner(oldpage, newpage);
+}
+static inline void set_page_owner_migrate_reason(struct page *page, int reason)
+{
+	if (static_branch_unlikely(&page_owner_inited))
+		__set_page_owner_migrate_reason(page, reason);
+}
+static inline void dump_page_owner(struct page *page)
+{
+	if (static_branch_unlikely(&page_owner_inited))
+		__dump_page_owner(page);
 }
 #else
 static inline void reset_page_owner(struct page *page, unsigned int order)
@@ -42,10 +56,18 @@
 			unsigned int order, gfp_t gfp_mask)
 {
 }
-static inline gfp_t get_page_owner_gfp(struct page *page)
+static inline void split_page_owner(struct page *page,
+			unsigned int order)
+{
+}
+static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
+{
+}
+static inline void set_page_owner_migrate_reason(struct page *page, int reason)
+{
+}
+static inline void dump_page_owner(struct page *page)
 {
-	return 0;
 }
-
 #endif /* CONFIG_PAGE_OWNER */
 #endif /* __LINUX_PAGE_OWNER_H */
diff -ruw linux-4.4.115/include/linux/percpu-rwsem.h linux-4.4.115-fbx/include/linux/percpu-rwsem.h
--- linux-4.4.115/include/linux/percpu-rwsem.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/percpu-rwsem.h	2019-01-22 16:16:28.339290199 +0100
@@ -10,30 +10,96 @@
 
 struct percpu_rw_semaphore {
 	struct rcu_sync		rss;
-	unsigned int __percpu	*fast_read_ctr;
+	unsigned int __percpu	*read_count;
 	struct rw_semaphore	rw_sem;
-	atomic_t		slow_read_ctr;
-	wait_queue_head_t	write_waitq;
+	wait_queue_head_t	writer;
+	int			readers_block;
 };
 
-extern void percpu_down_read(struct percpu_rw_semaphore *);
-extern int  percpu_down_read_trylock(struct percpu_rw_semaphore *);
-extern void percpu_up_read(struct percpu_rw_semaphore *);
+extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
+extern void __percpu_up_read(struct percpu_rw_semaphore *);
+
+static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
+{
+	might_sleep();
+
+	rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 0, _RET_IP_);
+
+	preempt_disable();
+	/*
+	 * We are in an RCU-sched read-side critical section, so the writer
+	 * cannot both change sem->state from readers_fast and start checking
+	 * counters while we are here. So if we see !sem->state, we know that
+	 * the writer won't be checking until we're past the preempt_enable()
+	 * and that one the synchronize_sched() is done, the writer will see
+	 * anything we did within this RCU-sched read-size critical section.
+	 */
+	__this_cpu_inc(*sem->read_count);
+	if (unlikely(!rcu_sync_is_idle(&sem->rss)))
+		__percpu_down_read(sem, false); /* Unconditional memory barrier */
+	preempt_enable();
+	/*
+	 * The barrier() from preempt_enable() prevents the compiler from
+	 * bleeding the critical section out.
+	 */
+}
+
+static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
+{
+	int ret = 1;
+
+	preempt_disable();
+	/*
+	 * Same as in percpu_down_read().
+	 */
+	__this_cpu_inc(*sem->read_count);
+	if (unlikely(!rcu_sync_is_idle(&sem->rss)))
+		ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
+	preempt_enable();
+	/*
+	 * The barrier() from preempt_enable() prevents the compiler from
+	 * bleeding the critical section out.
+	 */
+
+	if (ret)
+		rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 1, _RET_IP_);
+
+	return ret;
+}
+
+static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
+{
+	/*
+	 * The barrier() in preempt_disable() prevents the compiler from
+	 * bleeding the critical section out.
+	 */
+	preempt_disable();
+	/*
+	 * Same as in percpu_down_read().
+	 */
+	if (likely(rcu_sync_is_idle(&sem->rss)))
+		__this_cpu_dec(*sem->read_count);
+	else
+		__percpu_up_read(sem); /* Unconditional memory barrier */
+	preempt_enable();
+
+	rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
+}
 
 extern void percpu_down_write(struct percpu_rw_semaphore *);
 extern void percpu_up_write(struct percpu_rw_semaphore *);
 
 extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
 				const char *, struct lock_class_key *);
+
 extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
 
-#define percpu_init_rwsem(brw)	\
+#define percpu_init_rwsem(sem)					\
 ({								\
 	static struct lock_class_key rwsem_key;			\
-	__percpu_init_rwsem(brw, #brw, &rwsem_key);		\
+	__percpu_init_rwsem(sem, #sem, &rwsem_key);		\
 })
 
-
 #define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem)
 
 static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
diff -ruw linux-4.4.115/include/linux/perf/arm_pmu.h linux-4.4.115-fbx/include/linux/perf/arm_pmu.h
--- linux-4.4.115/include/linux/perf/arm_pmu.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/perf/arm_pmu.h	2019-01-22 16:16:28.339290199 +0100
@@ -77,6 +77,12 @@
 	struct arm_pmu		*percpu_pmu;
 };
 
+enum armpmu_pmu_states {
+	ARM_PMU_STATE_OFF,
+	ARM_PMU_STATE_RUNNING,
+	ARM_PMU_STATE_GOING_DOWN,
+};
+
 struct arm_pmu {
 	struct pmu	pmu;
 	cpumask_t	active_irqs;
@@ -101,17 +107,24 @@
 	void		(*free_irq)(struct arm_pmu *);
 	int		(*map_event)(struct perf_event *event);
 	int		num_events;
+	int		pmu_state;
+	int		percpu_irq;
 	atomic_t	active_events;
 	struct mutex	reserve_mutex;
 	u64		max_period;
+	bool		secure_access; /* 32-bit ARM only */
 	struct platform_device	*plat_device;
 	struct pmu_hw_events	__percpu *hw_events;
 	struct notifier_block	hotplug_nb;
+	struct notifier_block	cpu_pm_nb;
 };
 
 #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
 
-int armpmu_register(struct arm_pmu *armpmu, int type);
+extern const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX];
+extern const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+						[PERF_COUNT_HW_CACHE_OP_MAX]
+						[PERF_COUNT_HW_CACHE_RESULT_MAX];
 
 u64 armpmu_event_update(struct perf_event *event);
 
@@ -149,6 +162,18 @@
 			 const struct of_device_id *of_table,
 			 const struct pmu_probe_info *probe_table);
 
+void armv8_pmu_init(struct arm_pmu *cpu_pmu);
+int armv8pmu_enable_intens(int idx);
+int armv8pmu_disable_intens(int idx);
+int armv8pmu_enable_counter(int idx);
+int armv8pmu_disable_counter(int idx);
+u32 armv8pmu_getreset_flags(void);
+void armv8pmu_pmcr_write(u32 val);
+void armv8pmu_write_evtype(int idx, u32 val);
+int armv8pmu_probe_num_events(struct arm_pmu *arm_pmu);
+
+int kryo_pmu_init(struct arm_pmu *cpu_pmu);
+
 #endif /* CONFIG_ARM_PMU */
 
 #endif /* __ARM_PMU_H__ */
diff -ruw linux-4.4.115/include/linux/perf_event.h linux-4.4.115-fbx/include/linux/perf_event.h
--- linux-4.4.115/include/linux/perf_event.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/perf_event.h	2019-10-29 09:26:25.469221048 +0100
@@ -235,6 +235,8 @@
 	atomic_t			exclusive_cnt; /* < 0: cpu; > 0: tsk */
 	int				task_ctx_nr;
 	int				hrtimer_interval_ms;
+	u32                             events_across_hotplug:1,
+					reserved:31;
 
 	/*
 	 * Fully disable/enable this PMU, can be used to protect from the PMI
@@ -379,7 +381,7 @@
 	/*
 	 * Set up pmu-private data structures for an AUX area
 	 */
-	void *(*setup_aux)		(int cpu, void **pages,
+	void *(*setup_aux)		(struct perf_event *event, void **pages,
 					 int nr_pages, bool overwrite);
 					/* optional */
 
@@ -392,6 +394,14 @@
 	 * Filter events for PMU-specific reasons.
 	 */
 	int (*filter_match)		(struct perf_event *event); /* optional */
+
+	/*
+	 * Initial, PMU driver specific configuration.
+	 */
+	int (*get_drv_configs)		(struct perf_event *event,
+					 void __user *arg); /* optional */
+	void (*free_drv_configs)	(struct perf_event *event);
+					/* optional */
 };
 
 /**
@@ -467,6 +477,12 @@
 	int				nr_siblings;
 	int				group_flags;
 	struct perf_event		*group_leader;
+
+	/*
+	 * Protect the pmu, attributes and context of a group leader.
+	 * Note: does not protect the pointer to the group_leader.
+	 */
+	struct mutex			group_leader_mutex;
 	struct pmu			*pmu;
 
 	enum perf_event_active_state	state;
@@ -559,6 +575,7 @@
 	struct irq_work			pending;
 
 	atomic_t			event_limit;
+	struct list_head		drv_configs;
 
 	void (*destroy)(struct perf_event *);
 	struct rcu_head			rcu_head;
@@ -754,6 +771,7 @@
 extern u64 perf_event_read_value(struct perf_event *event,
 				 u64 *enabled, u64 *running);
 
+extern struct dentry *perf_create_debug_dir(void);
 
 struct perf_sample_data {
 	/*
@@ -990,6 +1008,11 @@
 		loff_t *ppos);
 
 
+static inline bool perf_paranoid_any(void)
+{
+	return sysctl_perf_event_paranoid > 2;
+}
+
 static inline bool perf_paranoid_tracepoint_raw(void)
 {
 	return sysctl_perf_event_paranoid > -1;
diff -ruw linux-4.4.115/include/linux/phy/phy-qcom-ufs.h linux-4.4.115-fbx/include/linux/phy/phy-qcom-ufs.h
--- linux-4.4.115/include/linux/phy/phy-qcom-ufs.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/phy/phy-qcom-ufs.h	2019-01-22 16:16:28.339290199 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -51,9 +51,13 @@
 void ufs_qcom_phy_disable_iface_clk(struct phy *phy);
 int ufs_qcom_phy_start_serdes(struct phy *phy);
 int ufs_qcom_phy_set_tx_lane_enable(struct phy *phy, u32 tx_lanes);
+int ufs_qcom_phy_ctrl_rx_linecfg(struct phy *generic_phy, bool ctrl);
 int ufs_qcom_phy_calibrate_phy(struct phy *phy, bool is_rate_B);
 int ufs_qcom_phy_is_pcs_ready(struct phy *phy);
 void ufs_qcom_phy_save_controller_version(struct phy *phy,
 			u8 major, u16 minor, u16 step);
+const char *ufs_qcom_phy_name(struct phy *phy);
+int ufs_qcom_phy_configure_lpm(struct phy *generic_phy, bool enable);
+void ufs_qcom_phy_dbg_register_dump(struct phy *generic_phy);
 
 #endif /* PHY_QCOM_UFS_H_ */
diff -ruw linux-4.4.115/include/linux/platform_device.h linux-4.4.115-fbx/include/linux/platform_device.h
--- linux-4.4.115/include/linux/platform_device.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/platform_device.h	2019-01-22 16:16:28.355290344 +0100
@@ -51,6 +51,7 @@
 extern struct resource *platform_get_resource(struct platform_device *,
 					      unsigned int, unsigned int);
 extern int platform_get_irq(struct platform_device *, unsigned int);
+extern int platform_irq_count(struct platform_device *);
 extern struct resource *platform_get_resource_byname(struct platform_device *,
 						     unsigned int,
 						     const char *);
diff -ruw linux-4.4.115/include/linux/plist.h linux-4.4.115-fbx/include/linux/plist.h
--- linux-4.4.115/include/linux/plist.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/plist.h	2019-01-22 16:16:28.355290344 +0100
@@ -266,6 +266,9 @@
 #define plist_next(pos) \
 	list_next_entry(pos, node_list)
 
+#define plist_next_entry(pos, type, member)   \
+	container_of(plist_next(pos), type, member)
+
 /**
  * plist_prev - get the prev entry in list
  * @pos:	the type * to cursor
diff -ruw linux-4.4.115/include/linux/pm.h linux-4.4.115-fbx/include/linux/pm.h
--- linux-4.4.115/include/linux/pm.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/pm.h	2019-01-22 16:16:28.355290344 +0100
@@ -573,6 +573,7 @@
 	struct wakeup_source	*wakeup;
 	bool			wakeup_path:1;
 	bool			syscore:1;
+	bool			no_pm_callbacks:1;	/* Owned by the PM core */
 #else
 	unsigned int		should_wakeup:1;
 #endif
diff -ruw linux-4.4.115/include/linux/pm_opp.h linux-4.4.115-fbx/include/linux/pm_opp.h
--- linux-4.4.115/include/linux/pm_opp.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/pm_opp.h	2019-01-22 16:16:28.355290344 +0100
@@ -34,6 +34,8 @@
 
 int dev_pm_opp_get_opp_count(struct device *dev);
 unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev);
+unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev);
+unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev);
 struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev);
 
 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
@@ -55,6 +57,14 @@
 int dev_pm_opp_disable(struct device *dev, unsigned long freq);
 
 struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev);
+int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
+				unsigned int count);
+void dev_pm_opp_put_supported_hw(struct device *dev);
+int dev_pm_opp_set_prop_name(struct device *dev, const char *name);
+void dev_pm_opp_put_prop_name(struct device *dev);
+int dev_pm_opp_set_regulator(struct device *dev, const char *name);
+void dev_pm_opp_put_regulator(struct device *dev);
+int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
 #else
 static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
 {
@@ -81,6 +91,16 @@
 	return 0;
 }
 
+static inline unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
+{
+	return 0;
+}
+
+static inline unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
+{
+	return 0;
+}
+
 static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
 {
 	return NULL;
@@ -129,6 +149,35 @@
 {
 	return ERR_PTR(-EINVAL);
 }
+
+static inline int dev_pm_opp_set_supported_hw(struct device *dev,
+					      const u32 *versions,
+					      unsigned int count)
+{
+	return -EINVAL;
+}
+
+static inline void dev_pm_opp_put_supported_hw(struct device *dev) {}
+
+static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
+{
+	return -EINVAL;
+}
+
+static inline void dev_pm_opp_put_prop_name(struct device *dev) {}
+
+static inline int dev_pm_opp_set_regulator(struct device *dev, const char *name)
+{
+	return -EINVAL;
+}
+
+static inline void dev_pm_opp_put_regulator(struct device *dev) {}
+
+static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
+{
+	return -EINVAL;
+}
+
 #endif		/* CONFIG_PM_OPP */
 
 #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
diff -ruw linux-4.4.115/include/linux/pm_qos.h linux-4.4.115-fbx/include/linux/pm_qos.h
--- linux-4.4.115/include/linux/pm_qos.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/pm_qos.h	2019-01-22 16:16:28.355290344 +0100
@@ -9,6 +9,8 @@
 #include <linux/miscdevice.h>
 #include <linux/device.h>
 #include <linux/workqueue.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
 
 enum {
 	PM_QOS_RESERVED = 0,
@@ -42,7 +44,22 @@
 #define PM_QOS_FLAG_NO_POWER_OFF	(1 << 0)
 #define PM_QOS_FLAG_REMOTE_WAKEUP	(1 << 1)
 
+enum pm_qos_req_type {
+	PM_QOS_REQ_ALL_CORES = 0,
+	PM_QOS_REQ_AFFINE_CORES,
+#ifdef CONFIG_SMP
+	PM_QOS_REQ_AFFINE_IRQ,
+#endif
+};
+
 struct pm_qos_request {
+	enum pm_qos_req_type type;
+	struct cpumask cpus_affine;
+#ifdef CONFIG_SMP
+	uint32_t irq;
+	/* Internal structure members */
+	struct irq_affinity_notify irq_notify;
+#endif
 	struct plist_node node;
 	int pm_qos_class;
 	struct delayed_work work; /* for pm_qos_update_request_timeout */
@@ -62,7 +79,7 @@
 struct dev_pm_qos_request {
 	enum dev_pm_qos_req_type type;
 	union {
-		struct plist_node pnode;
+		struct pm_qos_request lat;
 		struct pm_qos_flags_request flr;
 	} data;
 	struct device *dev;
@@ -83,6 +100,7 @@
 struct pm_qos_constraints {
 	struct plist_head list;
 	s32 target_value;	/* Do not change to 64 bit */
+	s32 target_per_cpu[NR_CPUS];
 	s32 default_value;
 	s32 no_constraint_value;
 	enum pm_qos_type type;
@@ -115,7 +133,8 @@
 	return req->dev != NULL;
 }
 
-int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
+int pm_qos_update_target(struct pm_qos_constraints *c,
+				struct pm_qos_request *req,
 			 enum pm_qos_req_action action, int value);
 bool pm_qos_update_flags(struct pm_qos_flags *pqf,
 			 struct pm_qos_flags_request *req,
@@ -129,6 +148,8 @@
 void pm_qos_remove_request(struct pm_qos_request *req);
 
 int pm_qos_request(int pm_qos_class);
+int pm_qos_request_for_cpu(int pm_qos_class, int cpu);
+int pm_qos_request_for_cpumask(int pm_qos_class, struct cpumask *mask);
 int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
 int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
 int pm_qos_request_active(struct pm_qos_request *req);
@@ -166,7 +187,7 @@
 
 static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
 {
-	return dev->power.qos->resume_latency_req->data.pnode.prio;
+	return dev->power.qos->resume_latency_req->data.lat.node.prio;
 }
 
 static inline s32 dev_pm_qos_requested_flags(struct device *dev)
diff -ruw linux-4.4.115/include/linux/poison.h linux-4.4.115-fbx/include/linux/poison.h
--- linux-4.4.115/include/linux/poison.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/poison.h	2019-01-22 16:16:28.355290344 +0100
@@ -30,7 +30,11 @@
 #define TIMER_ENTRY_STATIC	((void *) 0x74737461)
 
 /********** mm/debug-pagealloc.c **********/
+#ifdef CONFIG_PAGE_POISONING_ZERO
+#define PAGE_POISON 0x00
+#else
 #define PAGE_POISON 0xaa
+#endif
 
 /********** mm/slab.c **********/
 /*
diff -ruw linux-4.4.115/include/linux/poll.h linux-4.4.115-fbx/include/linux/poll.h
--- linux-4.4.115/include/linux/poll.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/poll.h	2019-01-22 16:16:28.355290344 +0100
@@ -96,7 +96,7 @@
 extern void poll_freewait(struct poll_wqueues *pwq);
 extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
 				 ktime_t *expires, unsigned long slack);
-extern long select_estimate_accuracy(struct timespec *tv);
+extern u64 select_estimate_accuracy(struct timespec *tv);
 
 
 static inline int poll_schedule(struct poll_wqueues *pwq, int state)
diff -ruw linux-4.4.115/include/linux/power_supply.h linux-4.4.115-fbx/include/linux/power_supply.h
--- linux-4.4.115/include/linux/power_supply.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/power_supply.h	2019-10-29 09:26:25.473221087 +0100
@@ -18,6 +18,7 @@
 #include <linux/leds.h>
 #include <linux/spinlock.h>
 #include <linux/notifier.h>
+#include <linux/types.h>
 
 /*
  * All voltages, currents, charges, energies, time and temperatures in uV,
@@ -45,6 +46,7 @@
 	POWER_SUPPLY_CHARGE_TYPE_NONE,
 	POWER_SUPPLY_CHARGE_TYPE_TRICKLE,
 	POWER_SUPPLY_CHARGE_TYPE_FAST,
+	POWER_SUPPLY_CHARGE_TYPE_TAPER,
 };
 
 enum {
@@ -57,6 +59,9 @@
 	POWER_SUPPLY_HEALTH_COLD,
 	POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE,
 	POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE,
+	POWER_SUPPLY_HEALTH_WARM,
+	POWER_SUPPLY_HEALTH_COOL,
+	POWER_SUPPLY_HEALTH_HOT,
 };
 
 enum {
@@ -84,6 +89,32 @@
 	POWER_SUPPLY_SCOPE_DEVICE,
 };
 
+enum {
+	POWER_SUPPLY_DP_DM_UNKNOWN = 0,
+	POWER_SUPPLY_DP_DM_PREPARE = 1,
+	POWER_SUPPLY_DP_DM_UNPREPARE = 2,
+	POWER_SUPPLY_DP_DM_CONFIRMED_HVDCP3 = 3,
+	POWER_SUPPLY_DP_DM_DP_PULSE = 4,
+	POWER_SUPPLY_DP_DM_DM_PULSE = 5,
+	POWER_SUPPLY_DP_DM_DP0P6_DMF = 6,
+	POWER_SUPPLY_DP_DM_DP0P6_DM3P3 = 7,
+	POWER_SUPPLY_DP_DM_DPF_DMF = 8,
+	POWER_SUPPLY_DP_DM_DPR_DMR = 9,
+	POWER_SUPPLY_DP_DM_HVDCP3_SUPPORTED = 10,
+	POWER_SUPPLY_DP_DM_ICL_DOWN = 11,
+	POWER_SUPPLY_DP_DM_ICL_UP = 12,
+	POWER_SUPPLY_DP_DM_FORCE_5V = 13,
+	POWER_SUPPLY_DP_DM_FORCE_9V = 14,
+	POWER_SUPPLY_DP_DM_FORCE_12V = 15,
+};
+
+enum {
+	POWER_SUPPLY_PL_NONE,
+	POWER_SUPPLY_PL_USBIN_USBIN,
+	POWER_SUPPLY_PL_USBIN_USBIN_EXT,
+	POWER_SUPPLY_PL_USBMID_USBMID,
+};
+
 enum power_supply_property {
 	/* Properties of type `int' */
 	POWER_SUPPLY_PROP_STATUS = 0,
@@ -113,6 +144,8 @@
 	POWER_SUPPLY_PROP_CHARGE_FULL,
 	POWER_SUPPLY_PROP_CHARGE_EMPTY,
 	POWER_SUPPLY_PROP_CHARGE_NOW,
+	POWER_SUPPLY_PROP_CHARGE_NOW_RAW,
+	POWER_SUPPLY_PROP_CHARGE_NOW_ERROR,
 	POWER_SUPPLY_PROP_CHARGE_AVG,
 	POWER_SUPPLY_PROP_CHARGE_COUNTER,
 	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
@@ -132,6 +165,7 @@
 	POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN, /* in percents! */
 	POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX, /* in percents! */
 	POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+	POWER_SUPPLY_PROP_CAPACITY_RAW,
 	POWER_SUPPLY_PROP_TEMP,
 	POWER_SUPPLY_PROP_TEMP_MAX,
 	POWER_SUPPLY_PROP_TEMP_MIN,
@@ -148,10 +182,91 @@
 	POWER_SUPPLY_PROP_SCOPE,
 	POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
 	POWER_SUPPLY_PROP_CALIBRATE,
+	/* Local extensions */
+	POWER_SUPPLY_PROP_USB_HC,
+	POWER_SUPPLY_PROP_USB_OTG,
+	POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED,
+	POWER_SUPPLY_PROP_CHARGING_ENABLED,
+	POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED,
+	POWER_SUPPLY_PROP_STEP_CHARGING_STEP,
+	POWER_SUPPLY_PROP_PIN_ENABLED,
+	POWER_SUPPLY_PROP_INPUT_SUSPEND,
+	POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_MAX,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_TRIM,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+	POWER_SUPPLY_PROP_INPUT_VOLTAGE_SETTLED,
+	POWER_SUPPLY_PROP_VCHG_LOOP_DBC_BYPASS,
+	POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW,
+	POWER_SUPPLY_PROP_HI_POWER,
+	POWER_SUPPLY_PROP_LOW_POWER,
+	POWER_SUPPLY_PROP_COOL_TEMP,
+	POWER_SUPPLY_PROP_WARM_TEMP,
+	POWER_SUPPLY_PROP_COLD_TEMP,
+	POWER_SUPPLY_PROP_HOT_TEMP,
+	POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL,
+	POWER_SUPPLY_PROP_RESISTANCE,
+	POWER_SUPPLY_PROP_RESISTANCE_CAPACITIVE,
+	POWER_SUPPLY_PROP_RESISTANCE_ID, /* in Ohms */
+	POWER_SUPPLY_PROP_RESISTANCE_NOW,
+	POWER_SUPPLY_PROP_FLASH_CURRENT_MAX,
+	POWER_SUPPLY_PROP_UPDATE_NOW,
+	POWER_SUPPLY_PROP_ESR_COUNT,
+	POWER_SUPPLY_PROP_BUCK_FREQ,
+	POWER_SUPPLY_PROP_BOOST_CURRENT,
+	POWER_SUPPLY_PROP_SAFETY_TIMER_ENABLE,
+	POWER_SUPPLY_PROP_CHARGE_DONE,
+	POWER_SUPPLY_PROP_FLASH_ACTIVE,
+	POWER_SUPPLY_PROP_FLASH_TRIGGER,
+	POWER_SUPPLY_PROP_FORCE_TLIM,
+	POWER_SUPPLY_PROP_DP_DM,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_NOW,
+	POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE,
+	POWER_SUPPLY_PROP_CURRENT_QNOVO,
+	POWER_SUPPLY_PROP_VOLTAGE_QNOVO,
+	POWER_SUPPLY_PROP_RERUN_AICL,
+	POWER_SUPPLY_PROP_CYCLE_COUNT_ID,
+	POWER_SUPPLY_PROP_SAFETY_TIMER_EXPIRED,
+	POWER_SUPPLY_PROP_RESTRICTED_CHARGING,
+	POWER_SUPPLY_PROP_CURRENT_CAPABILITY,
+	POWER_SUPPLY_PROP_TYPEC_MODE,
+	POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION, /* 0: N/C, 1: CC1, 2: CC2 */
+	POWER_SUPPLY_PROP_TYPEC_POWER_ROLE,
+	POWER_SUPPLY_PROP_PD_ALLOWED,
+	POWER_SUPPLY_PROP_PD_ACTIVE,
+	POWER_SUPPLY_PROP_PD_IN_HARD_RESET,
+	POWER_SUPPLY_PROP_PD_CURRENT_MAX,
+	POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED,
+	POWER_SUPPLY_PROP_CHARGER_TEMP,
+	POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
+	POWER_SUPPLY_PROP_PARALLEL_DISABLE,
+	POWER_SUPPLY_PROP_PE_START,
+	POWER_SUPPLY_PROP_SET_SHIP_MODE,
+	POWER_SUPPLY_PROP_SOC_REPORTING_READY,
+	POWER_SUPPLY_PROP_DEBUG_BATTERY,
+	POWER_SUPPLY_PROP_FCC_DELTA,
+	POWER_SUPPLY_PROP_ICL_REDUCTION,
+	POWER_SUPPLY_PROP_PARALLEL_MODE,
+	POWER_SUPPLY_PROP_DIE_HEALTH,
+	POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
+	POWER_SUPPLY_PROP_CTM_CURRENT_MAX,
+	POWER_SUPPLY_PROP_HW_CURRENT_MAX,
+	POWER_SUPPLY_PROP_REAL_TYPE,
+	POWER_SUPPLY_PROP_PR_SWAP,
+	POWER_SUPPLY_PROP_CC_STEP,
+	POWER_SUPPLY_PROP_CC_STEP_SEL,
+	POWER_SUPPLY_PROP_SW_JEITA_ENABLED,
+	POWER_SUPPLY_PROP_PD_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_PD_VOLTAGE_MIN,
+	POWER_SUPPLY_PROP_SDP_CURRENT_MAX,
+	/* Local extensions of type int64_t */
+	POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
 	/* Properties of type `const char *' */
 	POWER_SUPPLY_PROP_MODEL_NAME,
 	POWER_SUPPLY_PROP_MANUFACTURER,
 	POWER_SUPPLY_PROP_SERIAL_NUMBER,
+	POWER_SUPPLY_PROP_BATTERY_TYPE,
 };
 
 enum power_supply_type {
@@ -163,15 +278,59 @@
 	POWER_SUPPLY_TYPE_USB_DCP,	/* Dedicated Charging Port */
 	POWER_SUPPLY_TYPE_USB_CDP,	/* Charging Downstream Port */
 	POWER_SUPPLY_TYPE_USB_ACA,	/* Accessory Charger Adapters */
+	POWER_SUPPLY_TYPE_USB_HVDCP,	/* High Voltage DCP */
+	POWER_SUPPLY_TYPE_USB_HVDCP_3,	/* Efficient High Voltage DCP */
+	POWER_SUPPLY_TYPE_USB_PD,	/* Power Delivery */
+	POWER_SUPPLY_TYPE_WIRELESS,	/* Accessory Charger Adapters */
+	POWER_SUPPLY_TYPE_USB_FLOAT,	/* Floating charger */
+	POWER_SUPPLY_TYPE_BMS,		/* Battery Monitor System */
+	POWER_SUPPLY_TYPE_PARALLEL,	/* Parallel Path */
+	POWER_SUPPLY_TYPE_MAIN,		/* Main Path */
+	POWER_SUPPLY_TYPE_WIPOWER,	/* Wipower */
+	POWER_SUPPLY_TYPE_TYPEC,	/* Type-C */
+	POWER_SUPPLY_TYPE_UFP,		/* Type-C UFP */
+	POWER_SUPPLY_TYPE_DFP,		/* TYpe-C DFP */
+};
+
+/* Indicates USB Type-C CC connection status */
+enum power_supply_typec_mode {
+	POWER_SUPPLY_TYPEC_NONE,
+
+	/* Acting as source */
+	POWER_SUPPLY_TYPEC_SINK,			/* Rd only */
+	POWER_SUPPLY_TYPEC_SINK_POWERED_CABLE,		/* Rd/Ra */
+	POWER_SUPPLY_TYPEC_SINK_DEBUG_ACCESSORY,	/* Rd/Rd */
+	POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER,		/* Ra/Ra */
+	POWER_SUPPLY_TYPEC_POWERED_CABLE_ONLY,		/* Ra only */
+
+	/* Acting as sink */
+	POWER_SUPPLY_TYPEC_SOURCE_DEFAULT,		/* Rp default */
+	POWER_SUPPLY_TYPEC_SOURCE_MEDIUM,		/* Rp 1.5A */
+	POWER_SUPPLY_TYPEC_SOURCE_HIGH,			/* Rp 3A */
+	POWER_SUPPLY_TYPEC_NON_COMPLIANT,
+};
+
+enum power_supply_typec_power_role {
+	POWER_SUPPLY_TYPEC_PR_NONE,	/* CC lines in high-Z */
+	POWER_SUPPLY_TYPEC_PR_DUAL,
+	POWER_SUPPLY_TYPEC_PR_SINK,
+	POWER_SUPPLY_TYPEC_PR_SOURCE,
 };
 
 enum power_supply_notifier_events {
 	PSY_EVENT_PROP_CHANGED,
 };
 
+enum vmbms_power_usecase {
+	VMBMS_IGNORE_ALL_BIT = 1,
+	VMBMS_VOICE_CALL_BIT = (1 << 4),
+	VMBMS_STATIC_DISPLAY_BIT = (1 << 5),
+};
+
 union power_supply_propval {
 	int intval;
 	const char *strval;
+	int64_t int64val;
 };
 
 struct device_node;
@@ -361,6 +520,9 @@
 	case POWER_SUPPLY_PROP_CURRENT_NOW:
 	case POWER_SUPPLY_PROP_CURRENT_AVG:
 	case POWER_SUPPLY_PROP_CURRENT_BOOT:
+	case POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW:
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_MAX:
+	case POWER_SUPPLY_PROP_FLASH_CURRENT_MAX:
 		return 1;
 	default:
 		break;
diff -ruw linux-4.4.115/include/linux/psci.h linux-4.4.115-fbx/include/linux/psci.h
--- linux-4.4.115/include/linux/psci.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/psci.h	2019-01-22 16:16:28.359290380 +0100
@@ -24,7 +24,11 @@
 bool psci_power_state_loses_context(u32 state);
 bool psci_power_state_is_valid(u32 state);
 
+int psci_cpu_init_idle(unsigned int cpu);
+int psci_cpu_suspend_enter(unsigned long index);
+
 struct psci_operations {
+	u32 (*get_version)(void);
 	int (*cpu_suspend)(u32 state, unsigned long entry_point);
 	int (*cpu_off)(u32 state);
 	int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
diff -ruw linux-4.4.115/include/linux/pstore.h linux-4.4.115-fbx/include/linux/pstore.h
--- linux-4.4.115/include/linux/pstore.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/pstore.h	2019-01-22 16:16:28.359290380 +0100
@@ -22,12 +22,13 @@
 #ifndef _LINUX_PSTORE_H
 #define _LINUX_PSTORE_H
 
-#include <linux/time.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
 #include <linux/kmsg_dump.h>
 #include <linux/mutex.h>
-#include <linux/types.h>
 #include <linux/spinlock.h>
-#include <linux/errno.h>
+#include <linux/time.h>
+#include <linux/types.h>
 
 /* types */
 enum pstore_type_id {
@@ -67,6 +68,10 @@
 			enum kmsg_dump_reason reason, u64 *id,
 			unsigned int part, const char *buf, bool compressed,
 			size_t size, struct pstore_info *psi);
+	int		(*write_buf_user)(enum pstore_type_id type,
+			enum kmsg_dump_reason reason, u64 *id,
+			unsigned int part, const char __user *buf,
+			bool compressed, size_t size, struct pstore_info *psi);
 	int		(*erase)(enum pstore_type_id type, u64 id,
 			int count, struct timespec time,
 			struct pstore_info *psi);
diff -ruw linux-4.4.115/include/linux/pstore_ram.h linux-4.4.115-fbx/include/linux/pstore_ram.h
--- linux-4.4.115/include/linux/pstore_ram.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/pstore_ram.h	2019-01-22 16:16:28.359290380 +0100
@@ -17,11 +17,12 @@
 #ifndef __LINUX_PSTORE_RAM_H__
 #define __LINUX_PSTORE_RAM_H__
 
+#include <linux/compiler.h>
 #include <linux/device.h>
+#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/types.h>
-#include <linux/init.h>
 
 /*
  * Choose whether access to the RAM zone requires locking or not.  If a zone
@@ -69,6 +70,8 @@
 
 int persistent_ram_write(struct persistent_ram_zone *prz, const void *s,
 	unsigned int count);
+int persistent_ram_write_user(struct persistent_ram_zone *prz,
+			      const void __user *s, unsigned int count);
 
 void persistent_ram_save_old(struct persistent_ram_zone *prz);
 size_t persistent_ram_old_size(struct persistent_ram_zone *prz);
@@ -77,6 +80,8 @@
 ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
 	char *str, size_t len);
 
+void ramoops_console_write_buf(const char *buf, size_t size);
+
 /*
  * Ramoops platform data
  * @mem_size	memory size for ramoops
diff -ruw linux-4.4.115/include/linux/radix-tree.h linux-4.4.115-fbx/include/linux/radix-tree.h
--- linux-4.4.115/include/linux/radix-tree.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/radix-tree.h	2019-10-29 09:26:25.473221087 +0100
@@ -274,6 +274,10 @@
 unsigned int
 radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
 			unsigned long first_index, unsigned int max_items);
+unsigned int
+radix_tree_gang_lookup_index(struct radix_tree_root *root, void **results,
+			unsigned long *indices, unsigned long first_index,
+			unsigned int max_items);
 unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
 			void ***results, unsigned long *indices,
 			unsigned long first_index, unsigned int max_items);
diff -ruw linux-4.4.115/include/linux/rcu_sync.h linux-4.4.115-fbx/include/linux/rcu_sync.h
--- linux-4.4.115/include/linux/rcu_sync.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/rcu_sync.h	2019-01-22 16:16:28.367290453 +0100
@@ -59,6 +59,7 @@
 }
 
 extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type);
+extern void rcu_sync_enter_start(struct rcu_sync *);
 extern void rcu_sync_enter(struct rcu_sync *);
 extern void rcu_sync_exit(struct rcu_sync *);
 extern void rcu_sync_dtor(struct rcu_sync *);
diff -ruw linux-4.4.115/include/linux/rcutree.h linux-4.4.115-fbx/include/linux/rcutree.h
--- linux-4.4.115/include/linux/rcutree.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/rcutree.h	2019-01-22 16:16:28.367290453 +0100
@@ -37,7 +37,7 @@
 /*
  * Note a virtualization-based context switch.  This is simply a
  * wrapper around rcu_note_context_switch(), which allows TINY_RCU
- * to save a few bytes.
+ * to save a few bytes. The caller must have disabled interrupts.
  */
 static inline void rcu_virt_note_context_switch(int cpu)
 {
diff -ruw linux-4.4.115/include/linux/regmap.h linux-4.4.115-fbx/include/linux/regmap.h
--- linux-4.4.115/include/linux/regmap.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/regmap.h	2019-01-22 16:16:28.371290489 +0100
@@ -29,6 +29,7 @@
 struct regmap_range_cfg;
 struct regmap_field;
 struct snd_ac97;
+struct swr_device;
 
 /* An enum of all the supported cache types */
 enum regcache_type {
@@ -387,7 +388,10 @@
 				  const struct regmap_config *config,
 				  struct lock_class_key *lock_key,
 				  const char *lock_name);
-
+struct regmap *__regmap_init_swr(struct swr_device *dev,
+				 const struct regmap_config *config,
+				 struct lock_class_key *lock_key,
+				 const char *lock_name);
 struct regmap *__devm_regmap_init(struct device *dev,
 				  const struct regmap_bus *bus,
 				  void *bus_context,
@@ -420,6 +424,10 @@
 				       const struct regmap_config *config,
 				       struct lock_class_key *lock_key,
 				       const char *lock_name);
+struct regmap *__devm_regmap_init_swr(struct swr_device *dev,
+				      const struct regmap_config *config,
+				      struct lock_class_key *lock_key,
+				      const char *lock_name);
 
 /*
  * Wrapper for regmap_init macros to include a unique lockdep key and name
@@ -554,6 +562,18 @@
 bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
 
 /**
+ * regmap_init_swr(): Initialise register map
+ *
+ * @swr: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_swr(swr, config)					\
+	__regmap_lockdep_wrapper(__regmap_init_swr, #config,		\
+				swr, config)
+/**
  * devm_regmap_init(): Initialise managed register map
  *
  * @dev: Device that will be interacted with
@@ -668,6 +688,20 @@
 	__regmap_lockdep_wrapper(__devm_regmap_init_ac97, #config,	\
 				ac97, config)
 
+/**
+ * devm_regmap_init_swr(): Initialise managed register map
+ *
+ * @swr: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap.  The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_swr(swr, config)				\
+	__regmap_lockdep_wrapper(__devm_regmap_init_swr, #config,	\
+				swr, config)
+
 void regmap_exit(struct regmap *map);
 int regmap_reinit_cache(struct regmap *map,
 			const struct regmap_config *config);
diff -ruw linux-4.4.115/include/linux/regulator/consumer.h linux-4.4.115-fbx/include/linux/regulator/consumer.h
--- linux-4.4.115/include/linux/regulator/consumer.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/regulator/consumer.h	2019-01-22 16:16:28.371290489 +0100
@@ -103,6 +103,7 @@
  *                      Data passed is old voltage cast to (void *).
  * PRE_DISABLE    Regulator is about to be disabled
  * ABORT_DISABLE  Regulator disable failed for some reason
+ * ENABLE         Regulator was enabled.
  *
  * NOTE: These events can be OR'ed together when passed into handler.
  */
@@ -119,6 +120,7 @@
 #define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE	0x200
 #define REGULATOR_EVENT_PRE_DISABLE		0x400
 #define REGULATOR_EVENT_ABORT_DISABLE		0x800
+#define REGULATOR_EVENT_ENABLE			0x1000
 
 /**
  * struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event
@@ -142,6 +144,10 @@
  *            using the bulk regulator APIs.
  * @consumer: The regulator consumer for the supply.  This will be managed
  *            by the bulk API.
+ * @min_uV:   The minimum requested voltage for the regulator (in microvolts),
+ *            or 0 to not set a voltage.
+ * @max_uV:   The maximum requested voltage for the regulator (in microvolts),
+ *            or 0 to use @min_uV.
  *
  * The regulator APIs provide a series of regulator_bulk_() API calls as
  * a convenience to consumers which require multiple supplies.  This
@@ -150,6 +156,8 @@
 struct regulator_bulk_data {
 	const char *supply;
 	struct regulator *consumer;
+	int min_uV;
+	int max_uV;
 
 	/* private: Internal use */
 	int ret;
@@ -214,6 +222,8 @@
 					 struct regulator_bulk_data *consumers);
 int __must_check regulator_bulk_enable(int num_consumers,
 				       struct regulator_bulk_data *consumers);
+int regulator_bulk_set_voltage(int num_consumers,
+			  struct regulator_bulk_data *consumers);
 int regulator_bulk_disable(int num_consumers,
 			   struct regulator_bulk_data *consumers);
 int regulator_bulk_force_disable(int num_consumers,
@@ -224,6 +234,7 @@
 int regulator_can_change_voltage(struct regulator *regulator);
 int regulator_count_voltages(struct regulator *regulator);
 int regulator_list_voltage(struct regulator *regulator, unsigned selector);
+int regulator_list_corner_voltage(struct regulator *regulator, int corner);
 int regulator_is_supported_voltage(struct regulator *regulator,
 				   int min_uV, int max_uV);
 unsigned int regulator_get_linear_step(struct regulator *regulator);
@@ -556,6 +567,11 @@
 	return -EINVAL;
 }
 
+static inline int regulator_list_corner_voltage(struct regulator *regulator,
+	int corner)
+{
+	return -EINVAL;
+}
 #endif
 
 static inline int regulator_set_voltage_triplet(struct regulator *regulator,
diff -ruw linux-4.4.115/include/linux/regulator/driver.h linux-4.4.115-fbx/include/linux/regulator/driver.h
--- linux-4.4.115/include/linux/regulator/driver.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/regulator/driver.h	2019-01-22 16:16:28.371290489 +0100
@@ -18,6 +18,7 @@
 #include <linux/device.h>
 #include <linux/notifier.h>
 #include <linux/regulator/consumer.h>
+#include <linux/regulator/proxy-consumer.h>
 
 struct regmap;
 struct regulator_dev;
@@ -87,6 +88,10 @@
  *	if the selector indicates a voltage that is unusable on this system;
  *	or negative errno.  Selectors range from zero to one less than
  *	regulator_desc.n_voltages.  Voltages may be reported in any order.
+ * @list_corner_voltage: Return the maximum voltage in microvolts that
+ *	that can be physically configured for the regulator when operating at
+ *	the specified voltage corner or a negative errno if the corner value
+ *	can't be used on this system.
  *
  * @set_current_limit: Configure a limit for a current-limited regulator.
  *                     The driver should select the current closest to max_uA.
@@ -133,6 +138,7 @@
 
 	/* enumerate supported voltages */
 	int (*list_voltage) (struct regulator_dev *, unsigned selector);
+	int (*list_corner_voltage)(struct regulator_dev *, int corner);
 
 	/* get/set regulator voltage */
 	int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV,
@@ -370,6 +376,7 @@
 	int exclusive;
 	u32 use_count;
 	u32 open_count;
+	u32 open_offset;
 	u32 bypass_count;
 
 	/* lists we belong to */
@@ -399,6 +406,8 @@
 
 	/* time when this regulator was disabled last time */
 	unsigned long last_off_jiffy;
+	struct proxy_consumer *proxy_consumer;
+	struct regulator *debug_consumer;
 };
 
 struct regulator_dev *
diff -ruw linux-4.4.115/include/linux/rmap.h linux-4.4.115-fbx/include/linux/rmap.h
--- linux-4.4.115/include/linux/rmap.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/rmap.h	2019-01-22 16:16:28.375290525 +0100
@@ -10,6 +10,11 @@
 #include <linux/rwsem.h>
 #include <linux/memcontrol.h>
 
+extern int isolate_lru_page(struct page *page);
+extern void putback_lru_page(struct page *page);
+extern unsigned long reclaim_pages_from_list(struct list_head *page_list,
+					     struct vm_area_struct *vma);
+
 /*
  * The anon_vma heads a list of private "related" vmas, to scan if
  * an anonymous page pointing to this anon_vma needs to be unmapped:
@@ -176,7 +181,8 @@
 
 #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
 
-int try_to_unmap(struct page *, enum ttu_flags flags);
+int try_to_unmap(struct page *, enum ttu_flags flags,
+			struct vm_area_struct *vma);
 
 /*
  * Used by uprobes to replace a userspace page safely
@@ -232,6 +238,7 @@
  */
 struct rmap_walk_control {
 	void *arg;
+	struct vm_area_struct *target_vma;
 	int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
 					unsigned long addr, void *arg);
 	int (*done)(struct page *page);
@@ -255,7 +262,7 @@
 	return 0;
 }
 
-#define try_to_unmap(page, refs) SWAP_FAIL
+#define try_to_unmap(page, refs, vma) SWAP_FAIL
 
 static inline int page_mkclean(struct page *page)
 {
diff -ruw linux-4.4.115/include/linux/sched/sysctl.h linux-4.4.115-fbx/include/linux/sched/sysctl.h
--- linux-4.4.115/include/linux/sched/sysctl.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/sched/sysctl.h	2019-10-29 09:26:25.481221165 +0100
@@ -39,6 +39,51 @@
 extern unsigned int sysctl_sched_min_granularity;
 extern unsigned int sysctl_sched_wakeup_granularity;
 extern unsigned int sysctl_sched_child_runs_first;
+extern unsigned int sysctl_sched_sync_hint_enable;
+extern unsigned int sysctl_sched_cstate_aware;
+
+#ifdef CONFIG_SCHED_HMP
+
+enum freq_reporting_policy {
+	FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK,
+	FREQ_REPORT_CPU_LOAD,
+	FREQ_REPORT_TOP_TASK,
+	FREQ_REPORT_INVALID_POLICY
+};
+
+extern int sysctl_sched_freq_inc_notify;
+extern int sysctl_sched_freq_dec_notify;
+extern unsigned int sysctl_sched_freq_reporting_policy;
+extern unsigned int sysctl_sched_window_stats_policy;
+extern unsigned int sysctl_sched_ravg_hist_size;
+extern unsigned int sysctl_sched_cpu_high_irqload;
+extern unsigned int sysctl_sched_init_task_load_pct;
+extern unsigned int sysctl_sched_spill_nr_run;
+extern unsigned int sysctl_sched_spill_load_pct;
+extern unsigned int sysctl_sched_upmigrate_pct;
+extern unsigned int sysctl_sched_downmigrate_pct;
+extern unsigned int sysctl_sched_group_upmigrate_pct;
+extern unsigned int sysctl_sched_group_downmigrate_pct;
+extern unsigned int sysctl_early_detection_duration;
+extern unsigned int sysctl_sched_boost;
+extern unsigned int sysctl_sched_small_wakee_task_load_pct;
+extern unsigned int sysctl_sched_big_waker_task_load_pct;
+extern unsigned int sysctl_sched_select_prev_cpu_us;
+extern unsigned int sysctl_sched_restrict_cluster_spill;
+extern unsigned int sysctl_sched_new_task_windows;
+extern unsigned int sysctl_sched_pred_alert_freq;
+extern unsigned int sysctl_sched_freq_aggregate;
+extern unsigned int sysctl_sched_enable_thread_grouping;
+extern unsigned int sysctl_sched_freq_aggregate_threshold_pct;
+extern unsigned int sysctl_sched_prefer_sync_wakee_to_waker;
+extern unsigned int sysctl_sched_short_burst;
+extern unsigned int sysctl_sched_short_sleep;
+
+#else /* CONFIG_SCHED_HMP */
+
+#define sysctl_sched_enable_hmp_task_placement 0
+
+#endif /* CONFIG_SCHED_HMP */
 
 enum sched_tunable_scaling {
 	SCHED_TUNABLESCALING_NONE,
@@ -64,6 +109,18 @@
 		loff_t *ppos);
 #endif
 
+extern int sched_migrate_notify_proc_handler(struct ctl_table *table,
+		int write, void __user *buffer, size_t *lenp, loff_t *ppos);
+
+extern int sched_hmp_proc_update_handler(struct ctl_table *table,
+		int write, void __user *buffer, size_t *lenp, loff_t *ppos);
+
+extern int sched_boost_handler(struct ctl_table *table, int write,
+			void __user *buffer, size_t *lenp, loff_t *ppos);
+
+extern int sched_window_update_handler(struct ctl_table *table,
+		 int write, void __user *buffer, size_t *lenp, loff_t *ppos);
+
 /*
  *  control realtime throttling:
  *
@@ -77,6 +134,22 @@
 extern unsigned int sysctl_sched_cfs_bandwidth_slice;
 #endif
 
+#ifdef CONFIG_SCHED_TUNE
+extern unsigned int sysctl_sched_cfs_boost;
+int sysctl_sched_cfs_boost_handler(struct ctl_table *table, int write,
+				   void __user *buffer, size_t *length,
+				   loff_t *ppos);
+static inline unsigned int get_sysctl_sched_cfs_boost(void)
+{
+	return sysctl_sched_cfs_boost;
+}
+#else
+static inline unsigned int get_sysctl_sched_cfs_boost(void)
+{
+	return 0;
+}
+#endif
+
 #ifdef CONFIG_SCHED_AUTOGROUP
 extern unsigned int sysctl_sched_autogroup_enabled;
 #endif
diff -ruw linux-4.4.115/include/linux/sched.h linux-4.4.115-fbx/include/linux/sched.h
--- linux-4.4.115/include/linux/sched.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/sched.h	2019-10-29 09:26:25.481221165 +0100
@@ -51,6 +51,7 @@
 #include <linux/resource.h>
 #include <linux/timer.h>
 #include <linux/hrtimer.h>
+#include <linux/kcov.h>
 #include <linux/task_io_accounting.h>
 #include <linux/latencytop.h>
 #include <linux/cred.h>
@@ -173,6 +174,14 @@
 extern unsigned long nr_iowait(void);
 extern unsigned long nr_iowait_cpu(int cpu);
 extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
+#ifdef CONFIG_CPU_QUIET
+extern u64 nr_running_integral(unsigned int cpu);
+#endif
+
+extern void sched_update_nr_prod(int cpu, long delta, bool inc);
+extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg,
+				     unsigned int *max_nr,
+				     unsigned int *big_max_nr);
 
 extern void calc_global_load(unsigned long ticks);
 
@@ -219,9 +228,10 @@
 #define TASK_WAKING		256
 #define TASK_PARKED		512
 #define TASK_NOLOAD		1024
-#define TASK_STATE_MAX		2048
+#define TASK_NEW		2048
+#define TASK_STATE_MAX		4096
 
-#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPN"
+#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
 
 extern char ___assert_task_state[1 - 2*!!(
 		sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
@@ -314,6 +324,23 @@
 /* Task command name length */
 #define TASK_COMM_LEN 16
 
+extern const char *sched_window_reset_reasons[];
+
+enum task_event {
+	PUT_PREV_TASK   = 0,
+	PICK_NEXT_TASK  = 1,
+	TASK_WAKE       = 2,
+	TASK_MIGRATE    = 3,
+	TASK_UPDATE     = 4,
+	IRQ_UPDATE	= 5,
+};
+
+/* Note: this need to be in sync with migrate_type_names array */
+enum migrate_types {
+	GROUP_TO_RQ,
+	RQ_TO_GROUP,
+};
+
 #include <linux/spinlock.h>
 
 /*
@@ -334,13 +361,48 @@
 extern void sched_init(void);
 extern void sched_init_smp(void);
 extern asmlinkage void schedule_tail(struct task_struct *prev);
-extern void init_idle(struct task_struct *idle, int cpu);
+extern void init_idle(struct task_struct *idle, int cpu, bool hotplug);
 extern void init_idle_bootup_task(struct task_struct *idle);
 
 extern cpumask_var_t cpu_isolated_map;
 
 extern int runqueue_is_locked(int cpu);
 
+#ifdef CONFIG_HOTPLUG_CPU
+extern int sched_isolate_count(const cpumask_t *mask, bool include_offline);
+extern int sched_isolate_cpu(int cpu);
+extern int sched_unisolate_cpu(int cpu);
+extern int sched_unisolate_cpu_unlocked(int cpu);
+#else
+static inline int sched_isolate_count(const cpumask_t *mask,
+				      bool include_offline)
+{
+	cpumask_t count_mask;
+
+	if (include_offline)
+		cpumask_andnot(&count_mask, mask, cpu_online_mask);
+	else
+		return 0;
+
+	return cpumask_weight(&count_mask);
+}
+
+static inline int sched_isolate_cpu(int cpu)
+{
+	return 0;
+}
+
+static inline int sched_unisolate_cpu(int cpu)
+{
+	return 0;
+}
+
+static inline int sched_unisolate_cpu_unlocked(int cpu)
+{
+	return 0;
+}
+#endif
+
 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
 extern void nohz_balance_enter_idle(int cpu);
 extern void set_cpu_sd_state_idle(void);
@@ -377,6 +439,7 @@
 extern void sched_show_task(struct task_struct *p);
 
 #ifdef CONFIG_LOCKUP_DETECTOR
+extern void touch_softlockup_watchdog_sched(void);
 extern void touch_softlockup_watchdog(void);
 extern void touch_softlockup_watchdog_sync(void);
 extern void touch_all_softlockup_watchdogs(void);
@@ -386,7 +449,13 @@
 extern unsigned int  softlockup_panic;
 extern unsigned int  hardlockup_panic;
 void lockup_detector_init(void);
+extern void watchdog_enable(unsigned int cpu);
+extern void watchdog_disable(unsigned int cpu);
+extern bool watchdog_configured(unsigned int cpu);
 #else
+static inline void touch_softlockup_watchdog_sched(void)
+{
+}
 static inline void touch_softlockup_watchdog(void)
 {
 }
@@ -399,6 +468,20 @@
 static inline void lockup_detector_init(void)
 {
 }
+static inline void watchdog_enable(unsigned int cpu)
+{
+}
+static inline void watchdog_disable(unsigned int cpu)
+{
+}
+static inline bool watchdog_configured(unsigned int cpu)
+{
+	/*
+	 * Predend the watchdog is always configured.
+	 * We will be waiting for the watchdog to be enabled in core isolation
+	 */
+	return true;
+}
 #endif
 
 #ifdef CONFIG_DETECT_HUNG_TASK
@@ -939,6 +1022,14 @@
 #define SCHED_CAPACITY_SHIFT	10
 #define SCHED_CAPACITY_SCALE	(1L << SCHED_CAPACITY_SHIFT)
 
+struct sched_capacity_reqs {
+	unsigned long cfs;
+	unsigned long rt;
+	unsigned long dl;
+
+	unsigned long total;
+};
+
 /*
  * Wake-queues are lists of tasks with a pending wakeup, whose
  * callers have already marked the task as woken internally,
@@ -972,12 +1063,13 @@
 struct wake_q_head {
 	struct wake_q_node *first;
 	struct wake_q_node **lastp;
+	int count;
 };
 
 #define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
 
 #define WAKE_Q(name)					\
-	struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
+	struct wake_q_head name = { WAKE_Q_TAIL, &name.first, 0 }
 
 extern void wake_q_add(struct wake_q_head *head,
 		       struct task_struct *task);
@@ -993,7 +1085,8 @@
 #define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */
 #define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */
 #define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */
-#define SD_SHARE_CPUCAPACITY	0x0080	/* Domain members share cpu power */
+#define SD_ASYM_CPUCAPACITY	0x0040  /* Groups have different max cpu capacities */
+#define SD_SHARE_CPUCAPACITY	0x0080	/* Domain members share cpu capacity */
 #define SD_SHARE_POWERDOMAIN	0x0100	/* Domain members share power domain */
 #define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share cpu pkg resources */
 #define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */
@@ -1001,6 +1094,7 @@
 #define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
 #define SD_OVERLAP		0x2000	/* sched_domains of this level overlap */
 #define SD_NUMA			0x4000	/* cross-node balancing */
+#define SD_SHARE_CAP_STATES	0x8000  /* Domain members share capacity state */
 
 #ifdef CONFIG_SCHED_SMT
 static inline int cpu_smt_flags(void)
@@ -1033,8 +1127,57 @@
 
 extern int sched_domain_level_max;
 
+struct capacity_state {
+	unsigned long cap;	/* compute capacity */
+	unsigned long power;	/* power consumption at this compute capacity */
+};
+
+struct idle_state {
+	unsigned long power;	 /* power consumption in this idle state */
+};
+
+struct sched_group_energy {
+	unsigned int nr_idle_states;	/* number of idle states */
+	struct idle_state *idle_states;	/* ptr to idle state array */
+	unsigned int nr_cap_states;	/* number of capacity states */
+	struct capacity_state *cap_states; /* ptr to capacity state array */
+};
+
+unsigned long capacity_curr_of(int cpu);
+
 struct sched_group;
 
+struct eas_stats {
+	/* select_idle_sibling() stats */
+	u64 sis_attempts;
+	u64 sis_idle;
+	u64 sis_cache_affine;
+	u64 sis_suff_cap;
+	u64 sis_idle_cpu;
+	u64 sis_count;
+
+	/* select_energy_cpu_brute() stats */
+	u64 secb_attempts;
+	u64 secb_sync;
+	u64 secb_idle_bt;
+	u64 secb_insuff_cap;
+	u64 secb_no_nrg_sav;
+	u64 secb_nrg_sav;
+	u64 secb_count;
+
+	/* find_best_target() stats */
+	u64 fbt_attempts;
+	u64 fbt_no_cpu;
+	u64 fbt_no_sd;
+	u64 fbt_pref_idle;
+	u64 fbt_count;
+
+	/* cas */
+	/* select_task_rq_fair() stats */
+	u64 cas_attempts;
+	u64 cas_count;
+};
+
 struct sched_domain {
 	/* These fields must be setup */
 	struct sched_domain *parent;	/* top domain must be null terminated */
@@ -1095,6 +1238,8 @@
 	unsigned int ttwu_wake_remote;
 	unsigned int ttwu_move_affine;
 	unsigned int ttwu_move_balance;
+
+	struct eas_stats eas_stats;
 #endif
 #ifdef CONFIG_SCHED_DEBUG
 	char *name;
@@ -1131,6 +1276,8 @@
 
 typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
 typedef int (*sched_domain_flags_f)(void);
+typedef
+const struct sched_group_energy * const(*sched_domain_energy_f)(int cpu);
 
 #define SDTL_OVERLAP	0x01
 
@@ -1143,6 +1290,7 @@
 struct sched_domain_topology_level {
 	sched_domain_mask_f mask;
 	sched_domain_flags_f sd_flags;
+	sched_domain_energy_f energy;
 	int		    flags;
 	int		    numa_level;
 	struct sd_data      data;
@@ -1250,8 +1398,84 @@
 	u64			nr_wakeups_affine_attempts;
 	u64			nr_wakeups_passive;
 	u64			nr_wakeups_idle;
+
+	/* select_idle_sibling() */
+	u64			nr_wakeups_sis_attempts;
+	u64			nr_wakeups_sis_idle;
+	u64			nr_wakeups_sis_cache_affine;
+	u64			nr_wakeups_sis_suff_cap;
+	u64			nr_wakeups_sis_idle_cpu;
+	u64			nr_wakeups_sis_count;
+
+	/* energy_aware_wake_cpu() */
+	u64			nr_wakeups_secb_attempts;
+	u64			nr_wakeups_secb_sync;
+	u64			nr_wakeups_secb_idle_bt;
+	u64			nr_wakeups_secb_insuff_cap;
+	u64			nr_wakeups_secb_no_nrg_sav;
+	u64			nr_wakeups_secb_nrg_sav;
+	u64			nr_wakeups_secb_count;
+
+	/* find_best_target() */
+	u64			nr_wakeups_fbt_attempts;
+	u64			nr_wakeups_fbt_no_cpu;
+	u64			nr_wakeups_fbt_no_sd;
+	u64			nr_wakeups_fbt_pref_idle;
+	u64			nr_wakeups_fbt_count;
+
+	/* cas */
+	/* select_task_rq_fair() */
+	u64			nr_wakeups_cas_attempts;
+	u64			nr_wakeups_cas_count;
+};
+#endif
+
+#define RAVG_HIST_SIZE_MAX  5
+#define NUM_BUSY_BUCKETS 10
+
+/* ravg represents frequency scaled cpu-demand of tasks */
+struct ravg {
+	/*
+	 * 'mark_start' marks the beginning of an event (task waking up, task
+	 * starting to execute, task being preempted) within a window
+	 *
+	 * 'sum' represents how runnable a task has been within current
+	 * window. It incorporates both running time and wait time and is
+	 * frequency scaled.
+	 *
+	 * 'sum_history' keeps track of history of 'sum' seen over previous
+	 * RAVG_HIST_SIZE windows. Windows where task was entirely sleeping are
+	 * ignored.
+	 *
+	 * 'demand' represents maximum sum seen over previous
+	 * sysctl_sched_ravg_hist_size windows. 'demand' could drive frequency
+	 * demand for tasks.
+	 *
+	 * 'curr_window_cpu' represents task's contribution to cpu busy time on
+	 * various CPUs in the current window
+	 *
+	 * 'prev_window_cpu' represents task's contribution to cpu busy time on
+	 * various CPUs in the previous window
+	 *
+	 * 'curr_window' represents the sum of all entries in curr_window_cpu
+	 *
+	 * 'prev_window' represents the sum of all entries in prev_window_cpu
+	 *
+	 * 'pred_demand' represents task's current predicted cpu busy time
+	 *
+	 * 'busy_buckets' groups historical busy time into different buckets
+	 * used for prediction
+	 */
+	u64 mark_start;
+	u32 sum, demand;
+	u32 sum_history[RAVG_HIST_SIZE_MAX];
+	u32 *curr_window_cpu, *prev_window_cpu;
+	u32 curr_window, prev_window;
+	u64 curr_burst, avg_burst, avg_sleep_time;
+	u16 active_windows;
+	u32 pred_demand;
+	u8 busy_buckets[NUM_BUSY_BUCKETS];
 };
-#endif
 
 struct sched_entity {
 	struct load_weight	load;		/* for load-balancing */
@@ -1290,6 +1514,12 @@
 	unsigned long timeout;
 	unsigned long watchdog_stamp;
 	unsigned int time_slice;
+	unsigned short on_rq;
+	unsigned short on_list;
+
+	/* Accesses for these must be guarded by rq->lock of the task's rq */
+	bool schedtune_enqueued;
+	struct hrtimer schedtune_timer;
 
 	struct sched_rt_entity *back;
 #ifdef CONFIG_RT_GROUP_SCHED
@@ -1388,16 +1618,34 @@
 	bool writable;
 };
 
+enum task_exec_mode {
+	EXEC_MODE_DENIED,
+	EXEC_MODE_ONCE,
+	EXEC_MODE_UNLIMITED,
+};
+
 struct task_struct {
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+	/*
+	 * For reasons of header soup (see current_thread_info()), this
+	 * must be the first element of task_struct.
+	 */
+	struct thread_info thread_info;
+#endif
 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
 	void *stack;
 	atomic_t usage;
 	unsigned int flags;	/* per process flags, defined below */
 	unsigned int ptrace;
 
+	enum task_exec_mode exec_mode;
+
 #ifdef CONFIG_SMP
 	struct llist_node wake_entry;
 	int on_cpu;
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+	unsigned int cpu;	/* current CPU */
+#endif
 	unsigned int wakee_flips;
 	unsigned long wakee_flip_decay_ts;
 	struct task_struct *last_wakee;
@@ -1411,6 +1659,21 @@
 	const struct sched_class *sched_class;
 	struct sched_entity se;
 	struct sched_rt_entity rt;
+#ifdef CONFIG_SCHED_HMP
+	struct ravg ravg;
+	/*
+	 * 'init_load_pct' represents the initial task load assigned to children
+	 * of this task
+	 */
+	u32 init_load_pct;
+	u64 last_wake_ts;
+	u64 last_switch_out_ts;
+	u64 last_cpu_selected_ts;
+	struct related_thread_group *grp;
+	struct list_head grp_list;
+	u64 cpu_cycles;
+	u64 last_sleep_ts;
+#endif
 #ifdef CONFIG_CGROUP_SCHED
 	struct task_group *sched_task_group;
 #endif
@@ -1648,6 +1911,9 @@
 	struct held_lock held_locks[MAX_LOCK_DEPTH];
 	gfp_t lockdep_reclaim_gfp;
 #endif
+#ifdef CONFIG_UBSAN
+	unsigned int in_ubsan;
+#endif
 
 /* journalling filesystem info */
 	void *journal_info;
@@ -1785,8 +2051,8 @@
 	 * time slack values; these are used to round up poll() and
 	 * select() etc timeout values. These are in nanoseconds.
 	 */
-	unsigned long timer_slack_ns;
-	unsigned long default_timer_slack_ns;
+	u64 timer_slack_ns;
+	u64 default_timer_slack_ns;
 
 #ifdef CONFIG_KASAN
 	unsigned int kasan_depth;
@@ -1812,6 +2078,16 @@
 	/* bitmask and counter of trace recursion */
 	unsigned long trace_recursion;
 #endif /* CONFIG_TRACING */
+#ifdef CONFIG_KCOV
+	/* Coverage collection mode enabled for this task (0 if disabled). */
+	enum kcov_mode kcov_mode;
+	/* Size of the kcov_area. */
+	unsigned	kcov_size;
+	/* Buffer for coverage collection. */
+	void		*kcov_area;
+	/* kcov desciptor wired with this task or NULL. */
+	struct kcov	*kcov;
+#endif
 #ifdef CONFIG_MEMCG
 	struct mem_cgroup *memcg_in_oom;
 	gfp_t memcg_oom_gfp_mask;
@@ -1950,8 +2226,8 @@
 	return tsk->tgid;
 }
 
-
 static inline int pid_alive(const struct task_struct *p);
+static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
 
 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
 					struct pid_namespace *ns)
@@ -2088,6 +2364,7 @@
 /*
  * Per process flags
  */
+#define PF_WAKE_UP_IDLE 0x00000002	/* try to wake up on an idle CPU */
 #define PF_EXITING	0x00000004	/* getting shut down */
 #define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
 #define PF_VCPU		0x00000010	/* I'm a virtual CPU */
@@ -2252,6 +2529,7 @@
 
 extern int set_cpus_allowed_ptr(struct task_struct *p,
 				const struct cpumask *new_mask);
+extern bool cpupri_check_rt(void);
 #else
 static inline void do_set_cpus_allowed(struct task_struct *p,
 				      const struct cpumask *new_mask)
@@ -2264,8 +2542,106 @@
 		return -EINVAL;
 	return 0;
 }
+static inline bool cpupri_check_rt(void)
+{
+	return false;
+}
 #endif
 
+struct sched_load {
+	unsigned long prev_load;
+	unsigned long new_task_load;
+	unsigned long predicted_load;
+};
+
+extern int sched_set_wake_up_idle(struct task_struct *p, int wake_up_idle);
+extern u32 sched_get_wake_up_idle(struct task_struct *p);
+
+struct cpu_cycle_counter_cb {
+	u64 (*get_cpu_cycle_counter)(int cpu);
+};
+
+#define MAX_NUM_CGROUP_COLOC_ID	20
+
+#ifdef CONFIG_SCHED_HMP
+extern void free_task_load_ptrs(struct task_struct *p);
+extern int sched_set_window(u64 window_start, unsigned int window_size);
+extern unsigned long sched_get_busy(int cpu);
+extern void sched_get_cpus_busy(struct sched_load *busy,
+				const struct cpumask *query_cpus);
+extern void sched_set_io_is_busy(int val);
+extern int sched_set_boost(int enable);
+extern int sched_set_init_task_load(struct task_struct *p, int init_load_pct);
+extern u32 sched_get_init_task_load(struct task_struct *p);
+extern int sched_set_static_cpu_pwr_cost(int cpu, unsigned int cost);
+extern unsigned int sched_get_static_cpu_pwr_cost(int cpu);
+extern int sched_set_static_cluster_pwr_cost(int cpu, unsigned int cost);
+extern unsigned int sched_get_static_cluster_pwr_cost(int cpu);
+extern int sched_set_cluster_wake_idle(int cpu, unsigned int wake_idle);
+extern unsigned int sched_get_cluster_wake_idle(int cpu);
+extern int sched_update_freq_max_load(const cpumask_t *cpumask);
+extern void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
+							u32 fmin, u32 fmax);
+extern void sched_set_cpu_cstate(int cpu, int cstate,
+			 int wakeup_energy, int wakeup_latency);
+extern void sched_set_cluster_dstate(const cpumask_t *cluster_cpus, int dstate,
+				int wakeup_energy, int wakeup_latency);
+extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
+extern u64 sched_ktime_clock(void);
+extern int sched_set_group_id(struct task_struct *p, unsigned int group_id);
+extern unsigned int sched_get_group_id(struct task_struct *p);
+
+#else /* CONFIG_SCHED_HMP */
+static inline void free_task_load_ptrs(struct task_struct *p) { }
+
+static inline u64 sched_ktime_clock(void)
+{
+	return 0;
+}
+
+static inline int
+register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
+{
+	return 0;
+}
+
+static inline int sched_set_window(u64 window_start, unsigned int window_size)
+{
+	return -EINVAL;
+}
+static inline unsigned long sched_get_busy(int cpu)
+{
+	return 0;
+}
+static inline void sched_get_cpus_busy(struct sched_load *busy,
+				       const struct cpumask *query_cpus) {};
+
+static inline void sched_set_io_is_busy(int val) {};
+
+static inline int sched_set_boost(int enable)
+{
+	return -EINVAL;
+}
+
+static inline int sched_update_freq_max_load(const cpumask_t *cpumask)
+{
+	return 0;
+}
+
+static inline void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
+					u32 fmin, u32 fmax) { }
+
+static inline void
+sched_set_cpu_cstate(int cpu, int cstate, int wakeup_energy, int wakeup_latency)
+{
+}
+
+static inline void sched_set_cluster_dstate(const cpumask_t *cluster_cpus,
+			int dstate, int wakeup_energy, int wakeup_latency)
+{
+}
+#endif /* CONFIG_SCHED_HMP */
+
 #ifdef CONFIG_NO_HZ_COMMON
 void calc_load_enter_idle(void);
 void calc_load_exit_idle(void);
@@ -2274,6 +2650,14 @@
 static inline void calc_load_exit_idle(void) { }
 #endif /* CONFIG_NO_HZ_COMMON */
 
+static inline void set_wake_up_idle(bool enabled)
+{
+	if (enabled)
+		current->flags |= PF_WAKE_UP_IDLE;
+	else
+		current->flags &= ~PF_WAKE_UP_IDLE;
+}
+
 /*
  * Do not use outside of architecture code which knows its limitations.
  *
@@ -2291,8 +2675,8 @@
 extern u64 running_clock(void);
 extern u64 sched_clock_cpu(int cpu);
 
-
 extern void sched_clock_init(void);
+extern int sched_clock_initialized(void);
 
 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
 static inline void sched_clock_tick(void)
@@ -2339,7 +2723,7 @@
 task_sched_runtime(struct task_struct *task);
 
 /* sched_exec is called by processes performing an exec */
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP)
 extern void sched_exec(void);
 #else
 #define sched_exec()   {}
@@ -2422,7 +2806,9 @@
 void yield(void);
 
 union thread_union {
+#ifndef CONFIG_THREAD_INFO_IN_TASK
 	struct thread_info thread_info;
+#endif
 	unsigned long stack[THREAD_SIZE/sizeof(long)];
 };
 
@@ -2473,6 +2859,7 @@
 
 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
 extern int wake_up_process(struct task_struct *tsk);
+extern int wake_up_process_no_notif(struct task_struct *tsk);
 extern void wake_up_new_task(struct task_struct *tsk);
 #ifdef CONFIG_SMP
  extern void kick_process(struct task_struct *tsk);
@@ -2481,6 +2868,11 @@
 #endif
 extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
 extern void sched_dead(struct task_struct *p);
+#ifdef CONFIG_SCHED_HMP
+extern void sched_exit(struct task_struct *p);
+#else
+static inline void sched_exit(struct task_struct *p) { }
+#endif
 
 extern void proc_caches_init(void);
 extern void flush_signals(struct task_struct *);
@@ -2603,7 +2995,12 @@
 }
 
 /* mmput gets rid of the mappings and all user-space */
-extern void mmput(struct mm_struct *);
+extern int mmput(struct mm_struct *);
+/* same as above but performs the slow path from the async kontext. Can
+ * be called from the atomic context as well
+ */
+extern void mmput_async(struct mm_struct *);
+
 /* Grab a reference to a task's mm, if it is not already going away */
 extern struct mm_struct *get_task_mm(struct task_struct *task);
 /*
@@ -2812,10 +3209,34 @@
 	cgroup_threadgroup_change_end(tsk);
 }
 
-#ifndef __HAVE_THREAD_FUNCTIONS
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+
+static inline struct thread_info *task_thread_info(struct task_struct *task)
+{
+	return &task->thread_info;
+}
+
+/*
+ * When accessing the stack of a non-current task that might exit, use
+ * try_get_task_stack() instead.  task_stack_page will return a pointer
+ * that could get freed out from under you.
+ */
+static inline void *task_stack_page(const struct task_struct *task)
+{
+	return task->stack;
+}
+
+#define setup_thread_stack(new,old)	do { } while(0)
+
+static inline unsigned long *end_of_stack(const struct task_struct *task)
+{
+	return task->stack;
+}
+
+#elif !defined(__HAVE_THREAD_FUNCTIONS)
 
 #define task_thread_info(task)	((struct thread_info *)(task)->stack)
-#define task_stack_page(task)	((task)->stack)
+#define task_stack_page(task)	((void *)(task)->stack)
 
 static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
 {
@@ -2842,6 +3263,14 @@
 }
 
 #endif
+
+static inline void *try_get_task_stack(struct task_struct *tsk)
+{
+	return task_stack_page(tsk);
+}
+
+static inline void put_task_stack(struct task_struct *tsk) {}
+
 #define task_stack_end_corrupted(task) \
 		(*(end_of_stack(task)) != STACK_END_MAGIC)
 
@@ -2852,7 +3281,7 @@
 	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
 }
 
-extern void thread_info_cache_init(void);
+extern void thread_stack_cache_init(void);
 
 #ifdef CONFIG_DEBUG_STACK_USAGE
 static inline unsigned long stack_not_used(struct task_struct *p)
@@ -2979,6 +3408,15 @@
 #endif
 }
 
+static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
+{
+#ifdef CONFIG_DEBUG_PREEMPT
+	return p->preempt_disable_ip;
+#else
+	return 0;
+#endif
+}
+
 /*
  * Does a critical section need to be broken due to another
  * task waiting?: (technically does not depend on CONFIG_PREEMPT,
@@ -3107,7 +3545,11 @@
 
 static inline unsigned int task_cpu(const struct task_struct *p)
 {
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+	return p->cpu;
+#else
 	return task_thread_info(p)->cpu;
+#endif
 }
 
 static inline int task_node(const struct task_struct *p)
@@ -3130,6 +3572,15 @@
 
 #endif /* CONFIG_SMP */
 
+extern struct atomic_notifier_head migration_notifier_head;
+struct migration_notify_data {
+	int src_cpu;
+	int dest_cpu;
+	int load;
+};
+
+extern struct atomic_notifier_head load_alert_notifier_head;
+
 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
 
@@ -3160,6 +3611,11 @@
 {
 	tsk->ioac.syscw++;
 }
+
+static inline void inc_syscfs(struct task_struct *tsk)
+{
+	tsk->ioac.syscfs++;
+}
 #else
 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
 {
@@ -3176,6 +3632,9 @@
 static inline void inc_syscw(struct task_struct *tsk)
 {
 }
+static inline void inc_syscfs(struct task_struct *tsk)
+{
+}
 #endif
 
 #ifndef TASK_SIZE_OF
@@ -3212,4 +3671,19 @@
 	return task_rlimit_max(current, limit);
 }
 
+#define SCHED_CPUFREQ_RT        (1U << 0)
+#define SCHED_CPUFREQ_DL        (1U << 1)
+#define SCHED_CPUFREQ_IOWAIT    (1U << 2)
+
+#ifdef CONFIG_CPU_FREQ
+struct update_util_data {
+	void (*func)(struct update_util_data *data, u64 time, unsigned int flags);
+};
+
+void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
+                       void (*func)(struct update_util_data *data, u64 time,
+                                    unsigned int flags));
+void cpufreq_remove_update_util_hook(int cpu);
+#endif /* CONFIG_CPU_FREQ */
+
 #endif
diff -ruw linux-4.4.115/include/linux/security.h linux-4.4.115-fbx/include/linux/security.h
--- linux-4.4.115/include/linux/security.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/security.h	2019-01-22 16:16:28.383290598 +0100
@@ -28,6 +28,7 @@
 #include <linux/err.h>
 #include <linux/string.h>
 #include <linux/mm.h>
+#include <linux/bio.h>
 
 struct linux_binprm;
 struct cred;
@@ -244,6 +245,7 @@
 				     const struct qstr *qstr, const char **name,
 				     void **value, size_t *len);
 int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode);
+int security_inode_post_create(struct inode *dir, struct dentry *dentry, umode_t mode);
 int security_inode_link(struct dentry *old_dentry, struct inode *dir,
 			 struct dentry *new_dentry);
 int security_inode_unlink(struct inode *dir, struct dentry *dentry);
@@ -602,6 +604,13 @@
 					 struct dentry *dentry,
 					 umode_t mode)
 {
+	return 0;
+}
+
+static inline int security_inode_post_create(struct inode *dir,
+					 struct dentry *dentry,
+					 umode_t mode)
+{
 	return 0;
 }
 
diff -ruw linux-4.4.115/include/linux/serial_core.h linux-4.4.115-fbx/include/linux/serial_core.h
--- linux-4.4.115/include/linux/serial_core.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/serial_core.h	2019-01-22 16:16:28.383290598 +0100
@@ -66,6 +66,7 @@
 	void		(*set_ldisc)(struct uart_port *, struct ktermios *);
 	void		(*pm)(struct uart_port *, unsigned int state,
 			      unsigned int oldstate);
+	void		(*wake_peer)(struct uart_port *);
 
 	/*
 	 * Return a string describing the type of the port
@@ -341,21 +342,25 @@
 
 struct earlycon_id {
 	char	name[16];
+	char	compatible[128];
 	int	(*setup)(struct earlycon_device *, const char *options);
 } __aligned(32);
 
-extern int setup_earlycon(char *buf);
-extern int of_setup_earlycon(unsigned long addr,
-			     int (*setup)(struct earlycon_device *, const char *));
+extern const struct earlycon_id __earlycon_table[];
+extern const struct earlycon_id __earlycon_table_end[];
 
-#define EARLYCON_DECLARE(_name, func)					\
-	static const struct earlycon_id __earlycon_##_name		\
+#define OF_EARLYCON_DECLARE(_name, compat, fn)				\
+	static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name)	\
 		__used __section(__earlycon_table)			\
 		 = { .name  = __stringify(_name),			\
-		     .setup = func  }
+		    .compatible = compat,				\
+		    .setup = fn  }
+
+#define EARLYCON_DECLARE(_name, fn)	OF_EARLYCON_DECLARE(_name, "", fn)
 
-#define OF_EARLYCON_DECLARE(name, compat, fn)				\
-	_OF_DECLARE(earlycon, name, compat, fn, void *)
+extern int setup_earlycon(char *buf);
+extern int of_setup_earlycon(unsigned long addr,
+			     int (*setup)(struct earlycon_device *, const char *));
 
 struct uart_port *uart_get_console(struct uart_port *ports, int nr,
 				   struct console *c);
@@ -397,7 +402,7 @@
 static inline int uart_tx_stopped(struct uart_port *port)
 {
 	struct tty_struct *tty = port->state->port.tty;
-	if (tty->stopped || port->hw_stopped)
+	if ((tty && tty->stopped) || port->hw_stopped)
 		return 1;
 	return 0;
 }
diff -ruw linux-4.4.115/include/linux/shrinker.h linux-4.4.115-fbx/include/linux/shrinker.h
--- linux-4.4.115/include/linux/shrinker.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/shrinker.h	2019-01-22 16:16:28.387290634 +0100
@@ -66,6 +66,7 @@
 /* Flags */
 #define SHRINKER_NUMA_AWARE	(1 << 0)
 #define SHRINKER_MEMCG_AWARE	(1 << 1)
+#define SHRINKER_LMK		(1 << 2)
 
 extern int register_shrinker(struct shrinker *);
 extern void unregister_shrinker(struct shrinker *);
diff -ruw linux-4.4.115/include/linux/slab.h linux-4.4.115-fbx/include/linux/slab.h
--- linux-4.4.115/include/linux/slab.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/slab.h	2019-01-22 16:16:28.387290634 +0100
@@ -87,6 +87,12 @@
 # define SLAB_FAILSLAB		0x00000000UL
 #endif
 
+#ifdef CONFIG_KASAN
+#define SLAB_KASAN		0x08000000UL
+#else
+#define SLAB_KASAN		0x00000000UL
+#endif
+
 /* The following flags affect the page allocator grouping pages by mobility */
 #define SLAB_RECLAIM_ACCOUNT	0x00020000UL		/* Objects are reclaimable */
 #define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */
@@ -144,6 +150,18 @@
 void kzfree(const void *);
 size_t ksize(const void *);
 
+#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
+const char *__check_heap_object(const void *ptr, unsigned long n,
+				struct page *page);
+#else
+static inline const char *__check_heap_object(const void *ptr,
+					      unsigned long n,
+					      struct page *page)
+{
+	return NULL;
+}
+#endif
+
 /*
  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
  * alignment larger than the alignment of a 64-bit integer.
@@ -356,7 +374,7 @@
 {
 	void *ret = kmem_cache_alloc(s, flags);
 
-	kasan_kmalloc(s, ret, size);
+	kasan_kmalloc(s, ret, size, flags);
 	return ret;
 }
 
@@ -367,7 +385,7 @@
 {
 	void *ret = kmem_cache_alloc_node(s, gfpflags, node);
 
-	kasan_kmalloc(s, ret, size);
+	kasan_kmalloc(s, ret, size, gfpflags);
 	return ret;
 }
 #endif /* CONFIG_TRACING */
diff -ruw linux-4.4.115/include/linux/slub_def.h linux-4.4.115-fbx/include/linux/slub_def.h
--- linux-4.4.115/include/linux/slub_def.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/slub_def.h	2019-10-29 09:26:25.485221204 +0100
@@ -81,6 +81,7 @@
 	int reserved;		/* Reserved bytes at the end of slabs */
 	const char *name;	/* Name (only for display!) */
 	struct list_head list;	/* List of slab caches */
+	int red_left_pad;	/* Left redzone padding size */
 #ifdef CONFIG_SYSFS
 	struct kobject kobj;	/* For sysfs */
 #endif
@@ -98,6 +99,11 @@
 	 */
 	int remote_node_defrag_ratio;
 #endif
+
+#ifdef CONFIG_KASAN
+	struct kasan_cache kasan_info;
+#endif
+
 	struct kmem_cache_node *node[MAX_NUMNODES];
 };
 
@@ -129,4 +135,15 @@
 void object_err(struct kmem_cache *s, struct page *page,
 		u8 *object, char *reason);
 
+static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
+				void *x) {
+	void *object = x - (x - page_address(page)) % cache->size;
+	void *last_object = page_address(page) +
+		(page->objects - 1) * cache->size;
+	if (unlikely(object > last_object))
+		return last_object;
+	else
+		return object;
+}
+
 #endif /* _LINUX_SLUB_DEF_H */
diff -ruw linux-4.4.115/include/linux/sock_diag.h linux-4.4.115-fbx/include/linux/sock_diag.h
--- linux-4.4.115/include/linux/sock_diag.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/sock_diag.h	2019-01-22 16:16:28.391290670 +0100
@@ -15,6 +15,7 @@
 	__u8 family;
 	int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
 	int (*get_info)(struct sk_buff *skb, struct sock *sk);
+	int (*destroy)(struct sk_buff *skb, struct nlmsghdr *nlh);
 };
 
 int sock_diag_register(const struct sock_diag_handler *h);
@@ -74,4 +75,5 @@
 }
 void sock_diag_broadcast_destroy(struct sock *sk);
 
+int sock_diag_destroy(struct sock *sk, int err);
 #endif
diff -ruw linux-4.4.115/include/linux/spi/spi.h linux-4.4.115-fbx/include/linux/spi/spi.h
--- linux-4.4.115/include/linux/spi/spi.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/spi/spi.h	2019-01-22 16:16:28.395290706 +0100
@@ -27,8 +27,8 @@
 struct spi_transfer;
 
 /*
- * INTERFACES between SPI master-side drivers and SPI infrastructure.
- * (There's no SPI slave support for Linux yet...)
+ * INTERFACES between SPI master-side drivers and SPI slave protocol handlers,
+ * and SPI infrastructure.
  */
 extern struct bus_type spi_bus_type;
 
@@ -303,6 +303,7 @@
  * @min_speed_hz: Lowest supported transfer speed
  * @max_speed_hz: Highest supported transfer speed
  * @flags: other constraints relevant to this driver
+ * @slave: indicates that this is an SPI slave controller
  * @bus_lock_spinlock: spinlock for SPI bus locking
  * @bus_lock_mutex: mutex for SPI bus locking
  * @bus_lock_flag: indicates that the SPI bus is locked for exclusive use
@@ -361,6 +362,7 @@
  * @handle_err: the subsystem calls the driver to handle an error that occurs
  *		in the generic implementation of transfer_one_message().
  * @unprepare_message: undo any work done by prepare_message().
+ * @slave_abort: abort the ongoing transfer request on an SPI slave controller
  * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
  *	number. Any individual value may be -ENOENT for CS lines that
  *	are not GPIOs (driven by the SPI controller itself).
@@ -425,6 +427,9 @@
 #define SPI_MASTER_MUST_RX      BIT(3)		/* requires rx */
 #define SPI_MASTER_MUST_TX      BIT(4)		/* requires tx */
 
+	/* flag indicating this is an SPI slave controller */
+	bool			slave;
+
 	/* lock and mutex for SPI bus locking */
 	spinlock_t		bus_lock_spinlock;
 	struct mutex		bus_lock_mutex;
@@ -507,6 +512,7 @@
 			       struct spi_message *message);
 	int (*unprepare_message)(struct spi_master *master,
 				 struct spi_message *message);
+	int (*slave_abort)(struct spi_master *spi);
 
 	/*
 	 * These hooks are for drivers that use a generic implementation
@@ -556,6 +562,11 @@
 		put_device(&master->dev);
 }
 
+static inline bool spi_controller_is_slave(struct spi_master *ctlr)
+{
+	return IS_ENABLED(CONFIG_SPI_SLAVE) && ctlr->slave;
+}
+
 /* PM calls that need to be issued by the driver */
 extern int spi_master_suspend(struct spi_master *master);
 extern int spi_master_resume(struct spi_master *master);
@@ -566,8 +577,23 @@
 extern void spi_finalize_current_transfer(struct spi_master *master);
 
 /* the spi driver core manages memory for the spi_master classdev */
-extern struct spi_master *
-spi_alloc_master(struct device *host, unsigned size);
+extern struct spi_master *__spi_alloc_controller(struct device *host,
+						 unsigned int size, bool slave);
+
+static inline struct spi_master *spi_alloc_master(struct device *host,
+						  unsigned int size)
+{
+	return __spi_alloc_controller(host, size, false);
+}
+
+static inline struct spi_master *spi_alloc_slave(struct device *host,
+						 unsigned int size)
+{
+	if (!IS_ENABLED(CONFIG_SPI_SLAVE))
+		return NULL;
+
+	return __spi_alloc_controller(host, size, true);
+}
 
 extern int spi_register_master(struct spi_master *master);
 extern int devm_spi_register_master(struct device *dev,
@@ -831,6 +857,7 @@
 extern int spi_async(struct spi_device *spi, struct spi_message *message);
 extern int spi_async_locked(struct spi_device *spi,
 			    struct spi_message *message);
+extern int spi_slave_abort(struct spi_device *spi);
 
 /*---------------------------------------------------------------------------*/
 
diff -ruw linux-4.4.115/include/linux/spmi.h linux-4.4.115-fbx/include/linux/spmi.h
--- linux-4.4.115/include/linux/spmi.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/spmi.h	2019-01-22 16:16:28.395290706 +0100
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -171,6 +171,19 @@
 	module_driver(__spmi_driver, spmi_driver_register, \
 			spmi_driver_unregister)
 
+#ifdef CONFIG_QCOM_SHOW_RESUME_IRQ
+extern int msm_show_resume_irq_mask;
+static inline bool spmi_show_resume_irq(void)
+{
+	return msm_show_resume_irq_mask;
+}
+#else
+static inline bool spmi_show_resume_irq(void)
+{
+	return false;
+}
+#endif
+
 int spmi_register_read(struct spmi_device *sdev, u8 addr, u8 *buf);
 int spmi_ext_register_read(struct spmi_device *sdev, u8 addr, u8 *buf,
 			   size_t len);
diff -ruw linux-4.4.115/include/linux/stacktrace.h linux-4.4.115-fbx/include/linux/stacktrace.h
--- linux-4.4.115/include/linux/stacktrace.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/stacktrace.h	2019-01-22 16:16:28.395290706 +0100
@@ -23,6 +23,8 @@
 extern int snprint_stack_trace(char *buf, size_t size,
 			struct stack_trace *trace, int spaces);
 
+#define BACKPORTED_EXPORT_SAVE_STACK_TRACE_TSK_ARM
+
 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
 extern void save_stack_trace_user(struct stack_trace *trace);
 #else
diff -ruw linux-4.4.115/include/linux/suspend.h linux-4.4.115-fbx/include/linux/suspend.h
--- linux-4.4.115/include/linux/suspend.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/suspend.h	2019-10-29 09:26:25.485221204 +0100
@@ -433,6 +433,7 @@
 extern bool pm_save_wakeup_count(unsigned int count);
 extern void pm_wakep_autosleep_enabled(bool set);
 extern void pm_print_active_wakeup_sources(void);
+extern void pm_get_active_wakeup_sources(char *pending_sources, size_t max);
 
 static inline void lock_system_sleep(void)
 {
diff -ruw linux-4.4.115/include/linux/swap.h linux-4.4.115-fbx/include/linux/swap.h
--- linux-4.4.115/include/linux/swap.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/swap.h	2019-10-29 09:26:25.489221243 +0100
@@ -151,11 +151,13 @@
 	SWP_AREA_DISCARD = (1 << 8),	/* single-time swap area discards */
 	SWP_PAGE_DISCARD = (1 << 9),	/* freed swap page-cluster discards */
 					/* add others here before... */
-	SWP_SCANNING	= (1 << 10),	/* refcount in scan_swap_map */
+	SWP_FAST	= (1 << 10),	/* blkdev access is fast and cheap */
+	SWP_SCANNING	= (1 << 11),	/* refcount in scan_swap_map */
 };
 
 #define SWAP_CLUSTER_MAX 32UL
 #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
+#define SWAPFILE_CLUSTER	256
 
 /*
  * Ratio between zone->managed_pages and the "gap" that above the per-zone
@@ -246,6 +248,8 @@
 	struct work_struct discard_work; /* discard worker */
 	struct swap_cluster_info discard_cluster_head; /* list head of discard clusters */
 	struct swap_cluster_info discard_cluster_tail; /* list tail of discard clusters */
+	unsigned int write_pending;
+	unsigned int max_writes;
 };
 
 /* linux/mm/workingset.c */
@@ -289,7 +293,6 @@
 /* linux/mm/page_alloc.c */
 extern unsigned long totalram_pages;
 extern unsigned long totalreserve_pages;
-extern unsigned long dirty_balance_reserve;
 extern unsigned long nr_free_buffer_pages(void);
 extern unsigned long nr_free_pagecache_pages(void);
 
@@ -331,6 +334,8 @@
 						unsigned long *nr_scanned);
 extern unsigned long shrink_all_memory(unsigned long nr_pages);
 extern int vm_swappiness;
+extern int sysctl_swap_ratio;
+extern int sysctl_swap_ratio_enable;
 extern int remove_mapping(struct address_space *mapping, struct page *page);
 extern unsigned long vm_total_pages;
 
@@ -417,10 +422,18 @@
 /* linux/mm/swapfile.c */
 extern atomic_long_t nr_swap_pages;
 extern long total_swap_pages;
+extern bool is_swap_fast(swp_entry_t entry);
 
 /* Swap 50% full? Release swapcache more aggressively.. */
-static inline bool vm_swap_full(void)
+static inline bool vm_swap_full(struct swap_info_struct *si)
 {
+	/*
+	 * If the swap device is fast, return true
+	 * not to delay swap free.
+	 */
+	if (si->flags & SWP_FAST)
+		return true;
+
 	return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
 }
 
@@ -456,7 +469,7 @@
 #define get_nr_swap_pages()			0L
 #define total_swap_pages			0L
 #define total_swapcache_pages()			0UL
-#define vm_swap_full()				0
+#define vm_swap_full(si)			0
 
 #define si_swapinfo(val) \
 	do { (val)->freeswap = (val)->totalswap = 0; } while (0)
diff -ruw linux-4.4.115/include/linux/sysrq.h linux-4.4.115-fbx/include/linux/sysrq.h
--- linux-4.4.115/include/linux/sysrq.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/sysrq.h	2019-01-22 16:16:28.403290779 +0100
@@ -42,6 +42,7 @@
  * are available -- else NULL's).
  */
 
+bool sysrq_on(void);
 void handle_sysrq(int key);
 void __handle_sysrq(int key, bool check_mask);
 int register_sysrq_key(int key, struct sysrq_key_op *op);
diff -ruw linux-4.4.115/include/linux/task_io_accounting.h linux-4.4.115-fbx/include/linux/task_io_accounting.h
--- linux-4.4.115/include/linux/task_io_accounting.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/task_io_accounting.h	2019-01-22 16:16:28.403290779 +0100
@@ -18,6 +18,8 @@
 	u64 syscr;
 	/* # of write syscalls */
 	u64 syscw;
+	/* # of fsync syscalls */
+	u64 syscfs;
 #endif /* CONFIG_TASK_XACCT */
 
 #ifdef CONFIG_TASK_IO_ACCOUNTING
diff -ruw linux-4.4.115/include/linux/task_io_accounting_ops.h linux-4.4.115-fbx/include/linux/task_io_accounting_ops.h
--- linux-4.4.115/include/linux/task_io_accounting_ops.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/task_io_accounting_ops.h	2019-01-22 16:16:28.403290779 +0100
@@ -96,6 +96,7 @@
 	dst->wchar += src->wchar;
 	dst->syscr += src->syscr;
 	dst->syscw += src->syscw;
+	dst->syscfs += src->syscfs;
 }
 #else
 static inline void task_chr_io_accounting_add(struct task_io_accounting *dst,
diff -ruw linux-4.4.115/include/linux/tcp.h linux-4.4.115-fbx/include/linux/tcp.h
--- linux-4.4.115/include/linux/tcp.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/tcp.h	2019-10-29 09:26:25.489221243 +0100
@@ -211,7 +211,8 @@
 		u8 reord;    /* reordering detected */
 	} rack;
 	u16	advmss;		/* Advertised MSS			*/
-	u8	unused;
+	u8	linear_rto  : 1,
+		unused      : 7;
 	u8	nonagle     : 4,/* Disable Nagle algorithm?             */
 		thin_lto    : 1,/* Use linear timeouts for thin streams */
 		thin_dupack : 1,/* Fast retransmit on first dupack      */
diff -ruw linux-4.4.115/include/linux/thermal.h linux-4.4.115-fbx/include/linux/thermal.h
--- linux-4.4.115/include/linux/thermal.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/thermal.h	2019-01-22 16:16:28.403290779 +0100
@@ -77,11 +77,19 @@
 	THERMAL_DEVICE_ENABLED,
 };
 
+enum thermal_trip_activation_mode {
+	THERMAL_TRIP_ACTIVATION_DISABLED = 0,
+	THERMAL_TRIP_ACTIVATION_ENABLED,
+};
+
 enum thermal_trip_type {
 	THERMAL_TRIP_ACTIVE = 0,
 	THERMAL_TRIP_PASSIVE,
 	THERMAL_TRIP_HOT,
 	THERMAL_TRIP_CRITICAL,
+	THERMAL_TRIP_CONFIGURABLE_HI,
+	THERMAL_TRIP_CONFIGURABLE_LOW,
+	THERMAL_TRIP_CRITICAL_LOW,
 };
 
 enum thermal_trend {
@@ -110,6 +118,8 @@
 	int (*set_trip_hyst) (struct thermal_zone_device *, int, int);
 	int (*get_crit_temp) (struct thermal_zone_device *, int *);
 	int (*set_emul_temp) (struct thermal_zone_device *, int);
+	int (*activate_trip_type) (struct thermal_zone_device *, int,
+		enum thermal_trip_activation_mode);
 	int (*get_trend) (struct thermal_zone_device *, int,
 			  enum thermal_trend *);
 	int (*notify) (struct thermal_zone_device *, int,
@@ -146,6 +156,31 @@
 	char name[THERMAL_NAME_LENGTH];
 };
 
+struct sensor_threshold {
+	long temp;
+	enum thermal_trip_type trip;
+	int (*notify)(enum thermal_trip_type type, int temp, void *data);
+	void *data;
+	uint8_t active;
+	struct list_head list;
+};
+
+struct sensor_info {
+	uint32_t sensor_id;
+	struct thermal_zone_device *tz;
+	int threshold_min;
+	int threshold_max;
+	int max_idx;
+	int min_idx;
+	struct list_head sensor_list;
+	struct list_head threshold_list;
+	struct mutex lock;
+	struct work_struct work;
+	struct task_struct *sysfs_notify_thread;
+	struct completion sysfs_notify_complete;
+	bool deregister_active;
+};
+
 /**
  * struct thermal_zone_device - structure for a thermal zone
  * @id:		unique id number for each thermal zone
@@ -210,6 +245,8 @@
 	struct mutex lock;
 	struct list_head node;
 	struct delayed_work poll_queue;
+	struct sensor_threshold tz_threshold[2];
+	struct sensor_info sensor;
 };
 
 /**
@@ -420,6 +457,16 @@
 		struct thermal_cooling_device *, int);
 void thermal_cdev_update(struct thermal_cooling_device *);
 void thermal_notify_framework(struct thermal_zone_device *, int);
+
+int sensor_get_temp(uint32_t sensor_id, int *temp);
+int sensor_get_id(char *name);
+int sensor_set_trip(uint32_t sensor_id, struct sensor_threshold *threshold);
+int sensor_cancel_trip(uint32_t sensor_id, struct sensor_threshold *threshold);
+int sensor_activate_trip(uint32_t sensor_id, struct sensor_threshold *threshold,
+		bool enable);
+int thermal_sensor_trip(struct thermal_zone_device *tz,
+		enum thermal_trip_type trip, long temp);
+
 #else
 static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
 { return false; }
@@ -482,6 +529,20 @@
 static inline void thermal_notify_framework(struct thermal_zone_device *tz,
 	int trip)
 { }
+static inline int sensor_get_id(char *name){ return -ENODEV;}
+static inline int sensor_set_trip(uint32_t sensor_id,
+		struct sensor_threshold *threshold)
+{ return ENODEV;}
+static inline int sensor_cancel_trip(uint32_t sensor_id,
+		struct sensor_threshold *threshold)
+{ return -ENODEV;}
+
+static inline int thermal_sensor_trip(struct thermal_zone_device *tz,
+		enum thermal_trip_type trip, unsigned long temp)
+{ return -ENODEV;}
+static inline int sensor_get_temp(uint32_t sensor_id, long *temp)
+{ return -ENODEV;}
+
 #endif /* CONFIG_THERMAL */
 
 #if defined(CONFIG_NET) && IS_ENABLED(CONFIG_THERMAL)
diff -ruw linux-4.4.115/include/linux/thread_info.h linux-4.4.115-fbx/include/linux/thread_info.h
--- linux-4.4.115/include/linux/thread_info.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/thread_info.h	2019-10-29 09:26:25.489221243 +0100
@@ -9,46 +9,17 @@
 
 #include <linux/types.h>
 #include <linux/bug.h>
+#include <linux/restart_block.h>
 
-struct timespec;
-struct compat_timespec;
-
+#ifdef CONFIG_THREAD_INFO_IN_TASK
 /*
- * System call restart block.
+ * For CONFIG_THREAD_INFO_IN_TASK kernels we need <asm/current.h> for the
+ * definition of current, but for !CONFIG_THREAD_INFO_IN_TASK kernels,
+ * including <asm/current.h> can cause a circular dependency on some platforms.
  */
-struct restart_block {
-	long (*fn)(struct restart_block *);
-	union {
-		/* For futex_wait and futex_wait_requeue_pi */
-		struct {
-			u32 __user *uaddr;
-			u32 val;
-			u32 flags;
-			u32 bitset;
-			u64 time;
-			u32 __user *uaddr2;
-		} futex;
-		/* For nanosleep */
-		struct {
-			clockid_t clockid;
-			struct timespec __user *rmtp;
-#ifdef CONFIG_COMPAT
-			struct compat_timespec __user *compat_rmtp;
+#include <asm/current.h>
+#define current_thread_info() ((struct thread_info *)current)
 #endif
-			u64 expires;
-		} nanosleep;
-		/* For poll */
-		struct {
-			struct pollfd __user *ufds;
-			int nfds;
-			int has_timeout;
-			unsigned long tv_sec;
-			unsigned long tv_nsec;
-		} poll;
-	};
-};
-
-extern long do_no_restart_syscall(struct restart_block *parm);
 
 #include <linux/bitops.h>
 #include <asm/thread_info.h>
@@ -145,6 +116,31 @@
 #error "no set_restore_sigmask() provided and default one won't work"
 #endif
 
+#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
+static inline int arch_within_stack_frames(const void * const stack,
+					   const void * const stackend,
+					   const void *obj, unsigned long len)
+{
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_HARDENED_USERCOPY
+extern void __check_object_size(const void *ptr, unsigned long n,
+					bool to_user);
+
+static __always_inline void check_object_size(const void *ptr, unsigned long n,
+					      bool to_user)
+{
+	if (!__builtin_constant_p(n))
+		__check_object_size(ptr, n, to_user);
+}
+#else
+static inline void check_object_size(const void *ptr, unsigned long n,
+				     bool to_user)
+{ }
+#endif /* CONFIG_HARDENED_USERCOPY */
+
 #endif	/* __KERNEL__ */
 
 #endif /* _LINUX_THREAD_INFO_H */
diff -ruw linux-4.4.115/include/linux/tick.h linux-4.4.115-fbx/include/linux/tick.h
--- linux-4.4.115/include/linux/tick.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/tick.h	2019-01-22 16:16:28.407290815 +0100
@@ -27,6 +27,8 @@
 static inline void tick_cleanup_dead_cpu(int cpu) { }
 #endif /* !CONFIG_GENERIC_CLOCKEVENTS */
 
+extern u64 jiffy_to_ktime_ns(u64 *now, u64 *jiffy_ktime_ns);
+
 #if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_SUSPEND)
 extern void tick_freeze(void);
 extern void tick_unfreeze(void);
@@ -103,6 +105,7 @@
 extern void tick_nohz_idle_exit(void);
 extern void tick_nohz_irq_exit(void);
 extern ktime_t tick_nohz_get_sleep_length(void);
+extern unsigned long tick_nohz_get_idle_calls(void);
 extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
 extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
 #else /* !CONFIG_NO_HZ_COMMON */
@@ -159,7 +162,15 @@
 #else
 static inline int housekeeping_any_cpu(void)
 {
-	return smp_processor_id();
+	cpumask_t available;
+	int cpu;
+
+	cpumask_andnot(&available, cpu_online_mask, cpu_isolated_mask);
+	cpu = cpumask_any(&available);
+	if (cpu >= nr_cpu_ids)
+		cpu = smp_processor_id();
+
+	return cpu;
 }
 static inline bool tick_nohz_full_enabled(void) { return false; }
 static inline bool tick_nohz_full_cpu(int cpu) { return false; }
@@ -185,7 +196,7 @@
 	if (tick_nohz_full_enabled())
 		return cpumask_test_cpu(cpu, housekeeping_mask);
 #endif
-	return true;
+	return !cpu_isolated(cpu);
 }
 
 static inline void housekeeping_affine(struct task_struct *t)
@@ -203,4 +214,5 @@
 		__tick_nohz_task_switch();
 }
 
+ktime_t *get_next_event_cpu(unsigned int cpu);
 #endif
diff -ruw linux-4.4.115/include/linux/timekeeper_internal.h linux-4.4.115-fbx/include/linux/timekeeper_internal.h
--- linux-4.4.115/include/linux/timekeeper_internal.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/timekeeper_internal.h	2019-01-22 16:16:28.407290815 +0100
@@ -50,13 +50,13 @@
  * @tai_offset:		The current UTC to TAI offset in seconds
  * @clock_was_set_seq:	The sequence number of clock was set events
  * @next_leap_ktime:	CLOCK_MONOTONIC time value of a pending leap-second
- * @raw_time:		Monotonic raw base time in timespec64 format
+ * @raw_sec:		CLOCK_MONOTONIC_RAW  time in seconds
  * @cycle_interval:	Number of clock cycles in one NTP interval
  * @xtime_interval:	Number of clock shifted nano seconds in one NTP
  *			interval.
  * @xtime_remainder:	Shifted nano seconds left over when rounding
  *			@cycle_interval
- * @raw_interval:	Raw nano seconds accumulated per NTP interval.
+ * @raw_interval:	Shifted raw nano seconds accumulated per NTP interval.
  * @ntp_error:		Difference between accumulated time and NTP time in ntp
  *			shifted nano seconds.
  * @ntp_error_shift:	Shift conversion between clock shifted nano seconds and
@@ -91,13 +91,13 @@
 	s32			tai_offset;
 	unsigned int		clock_was_set_seq;
 	ktime_t			next_leap_ktime;
-	struct timespec64	raw_time;
+	u64			raw_sec;
 
 	/* The following members are for timekeeping internal use */
 	cycle_t			cycle_interval;
 	u64			xtime_interval;
 	s64			xtime_remainder;
-	u32			raw_interval;
+	u64			raw_interval;
 	/* The ntp_tick_length() value currently being used.
 	 * This cached copy ensures we consistently apply the tick
 	 * length for an entire tick, as ntp_tick_length may change
diff -ruw linux-4.4.115/include/linux/timekeeping.h linux-4.4.115-fbx/include/linux/timekeeping.h
--- linux-4.4.115/include/linux/timekeeping.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/timekeeping.h	2019-01-22 16:16:28.407290815 +0100
@@ -233,6 +233,7 @@
 
 extern u64 ktime_get_mono_fast_ns(void);
 extern u64 ktime_get_raw_fast_ns(void);
+extern u64 ktime_get_boot_fast_ns(void);
 
 /*
  * Timespec interfaces utilizing the ktime based ones
diff -ruw linux-4.4.115/include/linux/timer.h linux-4.4.115-fbx/include/linux/timer.h
--- linux-4.4.115/include/linux/timer.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/timer.h	2019-01-22 16:16:28.407290815 +0100
@@ -21,11 +21,6 @@
 	u32			flags;
 	int			slack;
 
-#ifdef CONFIG_TIMER_STATS
-	int			start_pid;
-	void			*start_site;
-	char			start_comm[16];
-#endif
 #ifdef CONFIG_LOCKDEP
 	struct lockdep_map	lockdep_map;
 #endif
@@ -63,6 +58,7 @@
 #define TIMER_BASEMASK		(TIMER_CPUMASK | TIMER_MIGRATING)
 #define TIMER_DEFERRABLE	0x00100000
 #define TIMER_IRQSAFE		0x00200000
+#define TIMER_PINNED_ON_CPU	0x00400000
 
 #define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \
 		.entry = { .next = TIMER_ENTRY_STATIC },	\
@@ -172,6 +168,9 @@
 extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires);
 
 extern void set_timer_slack(struct timer_list *time, int slack_hz);
+#ifdef CONFIG_SMP
+extern bool check_pending_deferrable_timers(int cpu);
+#endif
 
 #define TIMER_NOT_PINNED	0
 #define TIMER_PINNED		1
@@ -181,45 +180,8 @@
  */
 #define NEXT_TIMER_MAX_DELTA	((1UL << 30) - 1)
 
-/*
- * Timer-statistics info:
- */
-#ifdef CONFIG_TIMER_STATS
-
-extern int timer_stats_active;
-
-extern void init_timer_stats(void);
-
-extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
-				     void *timerf, char *comm, u32 flags);
-
-extern void __timer_stats_timer_set_start_info(struct timer_list *timer,
-					       void *addr);
-
-static inline void timer_stats_timer_set_start_info(struct timer_list *timer)
-{
-	if (likely(!timer_stats_active))
-		return;
-	__timer_stats_timer_set_start_info(timer, __builtin_return_address(0));
-}
-
-static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
-{
-	timer->start_site = NULL;
-}
-#else
-static inline void init_timer_stats(void)
-{
-}
-
-static inline void timer_stats_timer_set_start_info(struct timer_list *timer)
-{
-}
-
-static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
-{
-}
-#endif
+/* To be used from cpusets, only */
+extern void timer_quiesce_cpu(void *cpup);
 
 extern void add_timer(struct timer_list *timer);
 
@@ -241,6 +203,8 @@
 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
 #include <linux/sysctl.h>
 
+extern struct tvec_base tvec_base_deferrable;
+
 extern unsigned int sysctl_timer_migration;
 int timer_migration_handler(struct ctl_table *table, int write,
 			    void __user *buffer, size_t *lenp,
diff -ruw linux-4.4.115/include/linux/trace_events.h linux-4.4.115-fbx/include/linux/trace_events.h
--- linux-4.4.115/include/linux/trace_events.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/trace_events.h	2019-01-22 16:16:28.407290815 +0100
@@ -8,6 +8,7 @@
 #include <linux/hardirq.h>
 #include <linux/perf_event.h>
 #include <linux/tracepoint.h>
+#include <linux/coresight-stm.h>
 
 struct trace_array;
 struct trace_buffer;
@@ -231,7 +232,8 @@
 				  struct trace_event_file *trace_file,
 				  unsigned long len);
 
-void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);
+void trace_event_buffer_commit(struct trace_event_buffer *fbuffer,
+			       unsigned long len);
 
 enum {
 	TRACE_EVENT_FL_FILTERED_BIT,
@@ -501,6 +503,7 @@
  * @entry: The event itself
  * @irq_flags: The state of the interrupts at the start of the event
  * @pc: The state of the preempt count at the start of the event.
+ * @len: The length of the payload data required for stm logging.
  *
  * This is a helper function to handle triggers that require data
  * from the event itself. It also tests the event against filters and
@@ -510,12 +513,16 @@
 event_trigger_unlock_commit(struct trace_event_file *file,
 			    struct ring_buffer *buffer,
 			    struct ring_buffer_event *event,
-			    void *entry, unsigned long irq_flags, int pc)
+			    void *entry, unsigned long irq_flags, int pc,
+			    unsigned long len)
 {
 	enum event_trigger_type tt = ETT_NONE;
 
-	if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
+	if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) {
+		if (len)
+			stm_log(OST_ENTITY_FTRACE_EVENTS, entry, len);
 		trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
+	}
 
 	if (tt)
 		event_triggers_post_call(file, tt);
diff -ruw linux-4.4.115/include/linux/types.h linux-4.4.115-fbx/include/linux/types.h
--- linux-4.4.115/include/linux/types.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/types.h	2019-01-22 16:16:28.407290815 +0100
@@ -9,6 +9,9 @@
 #define DECLARE_BITMAP(name,bits) \
 	unsigned long name[BITS_TO_LONGS(bits)]
 
+#define DECLARE_BITMAP_ARRAY(name,nr,bits) \
+	unsigned long name[nr][BITS_TO_LONGS(bits)]
+
 typedef __u32 __kernel_dev_t;
 
 typedef __kernel_fd_set		fd_set;
diff -ruw linux-4.4.115/include/linux/uaccess.h linux-4.4.115-fbx/include/linux/uaccess.h
--- linux-4.4.115/include/linux/uaccess.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/uaccess.h	2019-10-29 09:26:25.493221282 +0100
@@ -111,4 +111,11 @@
 #define probe_kernel_address(addr, retval)		\
 	probe_kernel_read(&retval, addr, sizeof(retval))
 
+#ifndef user_access_begin
+#define user_access_begin() do { } while (0)
+#define user_access_end() do { } while (0)
+#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
+#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
+#endif
+
 #endif		/* __LINUX_UACCESS_H__ */
diff -ruw linux-4.4.115/include/linux/usb/gadget.h linux-4.4.115-fbx/include/linux/usb/gadget.h
--- linux-4.4.115/include/linux/usb/gadget.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/usb/gadget.h	2019-10-29 09:26:25.493221282 +0100
@@ -24,9 +24,82 @@
 #include <linux/types.h>
 #include <linux/workqueue.h>
 #include <linux/usb/ch9.h>
+#include <linux/pm_runtime.h>
 
 struct usb_ep;
 
+enum ep_type {
+	EP_TYPE_NORMAL = 0,
+	EP_TYPE_GSI,
+};
+
+/* Operations codes for GSI enabled EPs */
+enum gsi_ep_op {
+	GSI_EP_OP_CONFIG = 0,
+	GSI_EP_OP_STARTXFER,
+	GSI_EP_OP_STORE_DBL_INFO,
+	GSI_EP_OP_ENABLE_GSI,
+	GSI_EP_OP_UPDATEXFER,
+	GSI_EP_OP_RING_IN_DB,
+	GSI_EP_OP_ENDXFER,
+	GSI_EP_OP_GET_CH_INFO,
+	GSI_EP_OP_GET_XFER_IDX,
+	GSI_EP_OP_PREPARE_TRBS,
+	GSI_EP_OP_FREE_TRBS,
+	GSI_EP_OP_SET_CLR_BLOCK_DBL,
+	GSI_EP_OP_CHECK_FOR_SUSPEND,
+	GSI_EP_OP_DISABLE,
+};
+
+/*
+ * @buf_base_addr: Base pointer to buffer allocated for each GSI enabled EP.
+ *	TRBs point to buffers that are split from this pool. The size of the
+ *	buffer is num_bufs times buf_len. num_bufs and buf_len are determined
+	based on desired performance and aggregation size.
+ * @dma: DMA address corresponding to buf_base_addr.
+ * @num_bufs: Number of buffers associated with the GSI enabled EP. This
+ *	corresponds to the number of non-zlp TRBs allocated for the EP.
+ *	The value is determined based on desired performance for the EP.
+ * @buf_len: Size of each individual buffer is determined based on aggregation
+ *	negotiated as per the protocol. In case of no aggregation supported by
+ *	the protocol, we use default values.
+ */
+struct usb_gsi_request {
+	void *buf_base_addr;
+	dma_addr_t dma;
+	size_t num_bufs;
+	size_t buf_len;
+};
+
+/*
+ * @last_trb_addr: Address (LSB - based on alignment restrictions) of
+ *	last TRB in queue. Used to identify rollover case.
+ * @const_buffer_size: TRB buffer size in KB (similar to IPA aggregation
+ *	configuration). Must be aligned to Max USB Packet Size.
+ *	Should be 1 <= const_buffer_size <= 31.
+ * @depcmd_low_addr: Used by GSI hardware to write "Update Transfer" cmd
+ * @depcmd_hi_addr: Used to write "Update Transfer" command.
+ * @gevntcount_low_addr: GEVNCOUNT low address for GSI hardware to read and
+ *	clear processed events.
+ * @gevntcount_hi_addr:	GEVNCOUNT high address.
+ * @xfer_ring_len: length of transfer ring in bytes (must be integral
+ *	multiple of TRB size - 16B for xDCI).
+ * @xfer_ring_base_addr: physical base address of transfer ring. Address must
+ *	be aligned to xfer_ring_len rounded to power of two.
+ * @ch_req: Used to pass request specific info for certain operations on GSI EP
+ */
+struct gsi_channel_info {
+	u16 last_trb_addr;
+	u8 const_buffer_size;
+	u32 depcmd_low_addr;
+	u8 depcmd_hi_addr;
+	u32 gevntcount_low_addr;
+	u8 gevntcount_hi_addr;
+	u16 xfer_ring_len;
+	u64 xfer_ring_base_addr;
+	struct usb_gsi_request *ch_req;
+};
+
 /**
  * struct usb_request - describes one i/o request
  * @buf: Buffer used for data.  Always provide this; some controllers
@@ -46,6 +119,11 @@
  *     by adding a zero length packet as needed;
  * @short_not_ok: When reading data, makes short packets be
  *     treated as errors (queue stops advancing till cleanup).
+ * @dma_pre_mapped: Tells the USB core driver whether this request should be
+ *	DMA-mapped before it is queued to the USB HW. When set to true, it means
+ *	that the request has already been mapped in advance and therefore the
+ *	USB core driver does NOT need to do DMA-mapping when the request is
+ *	queued to the USB HW.
  * @complete: Function called when request completes, so this request and
  *	its buffer may be re-used.  The function will always be called with
  *	interrupts disabled, and it must not sleep.
@@ -69,6 +147,7 @@
  *	Note that for writes (IN transfers) some data bytes may still
  *	reside in a device-side FIFO when the request is reported as
  *	complete.
+ * @udc_priv: Vendor private data in usage by the UDC.
  *
  * These are allocated/freed through the endpoint they're used with.  The
  * hardware's driver can add extra per-request data to the memory it returns,
@@ -101,6 +180,7 @@
 	unsigned		no_interrupt:1;
 	unsigned		zero:1;
 	unsigned		short_not_ok:1;
+	unsigned		dma_pre_mapped:1;
 
 	void			(*complete)(struct usb_ep *ep,
 					struct usb_request *req);
@@ -109,6 +189,7 @@
 
 	int			status;
 	unsigned		actual;
+	unsigned		udc_priv;
 };
 
 /*-------------------------------------------------------------------------*/
@@ -138,6 +219,8 @@
 
 	int (*fifo_status) (struct usb_ep *ep);
 	void (*fifo_flush) (struct usb_ep *ep);
+	int (*gsi_ep_op)(struct usb_ep *ep, void *op_data,
+		enum gsi_ep_op op);
 };
 
 /**
@@ -201,6 +284,10 @@
  *	enabled and remains valid until the endpoint is disabled.
  * @comp_desc: In case of SuperSpeed support, this is the endpoint companion
  *	descriptor that is used to configure the endpoint
+ * @ep_type: Used to specify type of EP eg. normal vs h/w accelerated.
+ * @ep_intr_num: Interrupter number for EP.
+ * @endless: In case where endless transfer is being initiated, this is set
+ *	to disable usb event interrupt for few events.
  *
  * the bus controller driver lists all the general purpose endpoints in
  * gadget->ep_list.  the control endpoint (gadget->ep0) is not in that list,
@@ -224,6 +311,10 @@
 	u8			address;
 	const struct usb_endpoint_descriptor	*desc;
 	const struct usb_ss_ep_comp_descriptor	*comp_desc;
+	enum ep_type		ep_type;
+	u8			ep_num;
+	u8			ep_intr_num;
+	bool			endless;
 };
 
 /*-------------------------------------------------------------------------*/
@@ -526,7 +617,20 @@
 		ep->ops->fifo_flush(ep);
 }
 
+/**
+ * usb_gsi_ep_op - performs operation on GSI accelerated EP based on EP op code
+ *
+ * Operations such as EP configuration, TRB allocation, StartXfer etc.
+ * See gsi_ep_op for more details.
+ */
+static inline int usb_gsi_ep_op(struct usb_ep *ep,
+		struct usb_gsi_request *req, enum gsi_ep_op op)
+{
+	if (ep->ops->gsi_ep_op)
+		return ep->ops->gsi_ep_op(ep, req, op);
 
+	return -EOPNOTSUPP;
+}
 /*-------------------------------------------------------------------------*/
 
 struct usb_dcd_config_params {
@@ -547,10 +651,12 @@
 struct usb_gadget_ops {
 	int	(*get_frame)(struct usb_gadget *);
 	int	(*wakeup)(struct usb_gadget *);
+	int	(*func_wakeup)(struct usb_gadget *, int interface_id);
 	int	(*set_selfpowered) (struct usb_gadget *, int is_selfpowered);
 	int	(*vbus_session) (struct usb_gadget *, int is_active);
 	int	(*vbus_draw) (struct usb_gadget *, unsigned mA);
 	int	(*pullup) (struct usb_gadget *, int is_on);
+	int	(*restart)(struct usb_gadget *);
 	int	(*ioctl)(struct usb_gadget *,
 				unsigned code, unsigned long param);
 	void	(*get_config_params)(struct usb_dcd_config_params *);
@@ -646,6 +752,7 @@
 	unsigned			is_selfpowered:1;
 	unsigned			deactivated:1;
 	unsigned			connected:1;
+	bool                            remote_wakeup;
 };
 #define work_to_gadget(w)	(container_of((w), struct usb_gadget, work))
 
@@ -774,6 +881,26 @@
 }
 
 /**
+ * usb_gadget_func_wakeup - send a function remote wakeup up notification
+ * to the host connected to this gadget
+ * @gadget: controller used to wake up the host
+ * @interface_id: the interface which triggered the remote wakeup event
+ *
+ * Returns zero on success. Otherwise, negative error code is returned.
+ */
+static inline int usb_gadget_func_wakeup(struct usb_gadget *gadget,
+	int interface_id)
+{
+	if (gadget->speed != USB_SPEED_SUPER)
+		return -EOPNOTSUPP;
+
+	if (!gadget->ops->func_wakeup)
+		return -EOPNOTSUPP;
+
+	return gadget->ops->func_wakeup(gadget, interface_id);
+}
+
+/**
  * usb_gadget_set_selfpowered - sets the device selfpowered feature.
  * @gadget:the device being declared as self-powered
  *
@@ -929,6 +1056,20 @@
 }
 
 /**
+ * usb_gadget_restart - software-controlled reset of USB peripheral connection
+ * @gadget:the peripheral being reset
+ *
+ * Informs controller driver for Vbus LOW followed by Vbus HIGH notification.
+ * This performs full hardware reset and re-initialization.
+  */
+static inline int usb_gadget_restart(struct usb_gadget *gadget)
+{
+	if (!gadget->ops->restart)
+		return -EOPNOTSUPP;
+	return gadget->ops->restart(gadget);
+}
+
+/**
  * usb_gadget_deactivate - deactivate function which is not ready to work
  * @gadget: the peripheral being deactivated
  *
@@ -987,6 +1128,129 @@
 	return 0;
 }
 
+/**
+ * usb_gadget_autopm_get - increment PM-usage counter of usb gadget's parent
+ * device.
+ * @gadget: usb gadget whose parent device counter is incremented
+ *
+ * This routine should be called by function driver when it wants to use
+ * gadget's parent device and needs to guarantee that it is not suspended. In
+ * addition, the routine prevents subsequent autosuspends of gadget's parent
+ * device. However if the autoresume fails then the counter is re-decremented.
+ *
+ * This routine can run only in process context.
+ */
+static inline int usb_gadget_autopm_get(struct usb_gadget *gadget)
+{
+	int status = -ENODEV;
+
+	if (!gadget || !gadget->dev.parent)
+		return status;
+
+	status = pm_runtime_get_sync(gadget->dev.parent);
+	if (status < 0)
+		pm_runtime_put_sync(gadget->dev.parent);
+
+	if (status > 0)
+		status = 0;
+	return status;
+}
+
+/**
+ * usb_gadget_autopm_get_async - increment PM-usage counter of usb gadget's
+ * parent device.
+ * @gadget: usb gadget whose parent device counter is incremented
+ *
+ * This routine increments @gadget parent device PM usage counter and queue an
+ * autoresume request if the device is suspended. It does not autoresume device
+ * directly (it only queues a request). After a successful call, the device may
+ * not yet be resumed.
+ *
+ * This routine can run in atomic context.
+ */
+static inline int usb_gadget_autopm_get_async(struct usb_gadget *gadget)
+{
+	int status = -ENODEV;
+
+	if (!gadget || !gadget->dev.parent)
+		return status;
+
+	status = pm_runtime_get(gadget->dev.parent);
+	if (status < 0 && status != -EINPROGRESS)
+		pm_runtime_put_noidle(gadget->dev.parent);
+
+	if (status > 0 || status == -EINPROGRESS)
+		status = 0;
+	return status;
+}
+
+/**
+ * usb_gadget_autopm_get_noresume - increment PM-usage counter of usb gadget's
+ * parent device.
+ * @gadget: usb gadget whose parent device counter is incremented
+ *
+ * This routine increments PM-usage count of @gadget parent device but does not
+ * carry out an autoresume.
+ *
+ * This routine can run in atomic context.
+ */
+static inline void usb_gadget_autopm_get_noresume(struct usb_gadget *gadget)
+{
+	if (gadget && gadget->dev.parent)
+		pm_runtime_get_noresume(gadget->dev.parent);
+}
+
+/**
+ * usb_gadget_autopm_put - decrement PM-usage counter of usb gadget's parent
+ * device.
+ * @gadget: usb gadget whose parent device counter is decremented.
+ *
+ * This routine should be called by function driver when it is finished using
+ * @gadget parent device and wants to allow it to autosuspend. It decrements
+ * PM-usage counter of @gadget parent device, when the counter reaches 0, a
+ * delayed autosuspend request is attempted.
+ *
+ * This routine can run only in process context.
+ */
+static inline void usb_gadget_autopm_put(struct usb_gadget *gadget)
+{
+	if (gadget && gadget->dev.parent)
+		pm_runtime_put_sync(gadget->dev.parent);
+}
+
+/**
+ * usb_gadget_autopm_put_async - decrement PM-usage counter of usb gadget's
+ * parent device.
+ * @gadget: usb gadget whose parent device counter is decremented.
+ *
+ * This routine decrements PM-usage counter of @gadget parent device and
+ * schedules a delayed autosuspend request if the counter is <= 0.
+ *
+ * This routine can run in atomic context.
+ */
+static inline void usb_gadget_autopm_put_async(struct usb_gadget *gadget)
+{
+	if (gadget && gadget->dev.parent)
+		pm_runtime_put(gadget->dev.parent);
+}
+
+/**
+ * usb_gadget_autopm_put_no_suspend - decrement PM-usage counter of usb gadget
+'s
+ * parent device.
+ * @gadget: usb gadget whose parent device counter is decremented.
+ *
+ * This routine decrements PM-usage counter of @gadget parent device but does
+ * not carry out an autosuspend.
+ *
+ * This routine can run in atomic context.
+ */
+static inline void usb_gadget_autopm_put_no_suspend(struct usb_gadget *gadget)
+{
+	if (gadget && gadget->dev.parent)
+		pm_runtime_put_noidle(gadget->dev.parent);
+}
+
 /*-------------------------------------------------------------------------*/
 
 /**
@@ -1196,6 +1460,24 @@
 		struct usb_descriptor_header *otg_desc);
 /*-------------------------------------------------------------------------*/
 
+/**
+ * usb_func_ep_queue - queues (submits) an I/O request to a function endpoint.
+ * This function is similar to the usb_ep_queue function, but in addition it
+ * also checks whether the function is in Super Speed USB Function Suspend
+ * state, and if so a Function Wake notification is sent to the host
+ * (USB 3.0 spec, section 9.2.5.2).
+ * @func: the function which issues the USB I/O request.
+ * @ep:the endpoint associated with the request
+ * @req:the request being submitted
+ * @gfp_flags: GFP_* flags to use in case the lower level driver couldn't
+ *	pre-allocate all necessary memory with the request.
+ *
+ */
+int usb_func_ep_queue(struct usb_function *func, struct usb_ep *ep,
+				struct usb_request *req, gfp_t gfp_flags);
+
+/*-------------------------------------------------------------------------*/
+
 /* utility to simplify map/unmap of usb_requests to/from DMA */
 
 extern int usb_gadget_map_request(struct usb_gadget *gadget,
@@ -1259,5 +1541,8 @@
 extern void usb_ep_autoconfig_release(struct usb_ep *);
 
 extern void usb_ep_autoconfig_reset(struct usb_gadget *);
+extern struct usb_ep *usb_ep_autoconfig_by_name(struct usb_gadget *,
+			struct usb_endpoint_descriptor *,
+			const char *ep_name);
 
 #endif /* __LINUX_USB_GADGET_H */
diff -ruw linux-4.4.115/include/linux/usb/hcd.h linux-4.4.115-fbx/include/linux/usb/hcd.h
--- linux-4.4.115/include/linux/usb/hcd.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/usb/hcd.h	2019-10-29 09:26:25.493221282 +0100
@@ -397,6 +397,13 @@
 	/* Call for power on/off the port if necessary */
 	int	(*port_power)(struct usb_hcd *hcd, int portnum, bool enable);
 
+	int (*sec_event_ring_setup)(struct usb_hcd *hcd, unsigned intr_num);
+	int (*sec_event_ring_cleanup)(struct usb_hcd *hcd, unsigned intr_num);
+	dma_addr_t (*get_sec_event_ring_dma_addr)(struct usb_hcd *hcd,
+			unsigned intr_num);
+	dma_addr_t (*get_xfer_ring_dma_addr)(struct usb_hcd *hcd,
+			struct usb_device *udev, struct usb_host_endpoint *ep);
+	int (*get_core_id)(struct usb_hcd *hcd);
 };
 
 static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd)
@@ -435,6 +442,17 @@
 		struct usb_host_interface *old_alt,
 		struct usb_host_interface *new_alt);
 extern int usb_hcd_get_frame_number(struct usb_device *udev);
+extern int usb_hcd_sec_event_ring_setup(struct usb_device *udev,
+	unsigned intr_num);
+extern int usb_hcd_sec_event_ring_cleanup(struct usb_device *udev,
+	unsigned intr_num);
+extern dma_addr_t
+usb_hcd_get_sec_event_ring_dma_addr(struct usb_device *udev,
+		unsigned intr_num);
+extern dma_addr_t
+usb_hcd_get_xfer_ring_dma_addr(struct usb_device *udev,
+	struct usb_host_endpoint *ep);
+extern int usb_hcd_get_controller_id(struct usb_device *udev);
 
 extern struct usb_hcd *usb_create_hcd(const struct hc_driver *driver,
 		struct device *dev, const char *bus_name);
@@ -484,7 +502,7 @@
 extern void usb_hcd_poll_rh_status(struct usb_hcd *hcd);
 extern void usb_wakeup_notification(struct usb_device *hdev,
 		unsigned int portnum);
-
+extern void usb_flush_hub_wq(void);
 extern void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum);
 extern void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum);
 
diff -ruw linux-4.4.115/include/linux/usb/msm_hsusb.h linux-4.4.115-fbx/include/linux/usb/msm_hsusb.h
--- linux-4.4.115/include/linux/usb/msm_hsusb.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/usb/msm_hsusb.h	2019-01-22 16:16:28.415290888 +0100
@@ -1,8 +1,8 @@
-/* linux/include/asm-arm/arch-msm/hsusb.h
+/* include/linux/usb/msm_hsusb.h
  *
  * Copyright (C) 2008 Google, Inc.
  * Author: Brian Swetland <swetland@google.com>
- * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2009-2016, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -20,8 +20,24 @@
 
 #include <linux/extcon.h>
 #include <linux/types.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
 #include <linux/usb/otg.h>
-#include <linux/clk.h>
+/*
+ * The following are bit fields describing the usb_request.udc_priv word.
+ * These bit fields are set by function drivers that wish to queue
+ * usb_requests with sps/bam parameters.
+ */
+#define MSM_PIPE_ID_MASK		(0x1F)
+#define MSM_TX_PIPE_ID_OFS		(16)
+#define MSM_SPS_MODE			BIT(5)
+#define MSM_IS_FINITE_TRANSFER		BIT(6)
+#define MSM_PRODUCER			BIT(7)
+#define MSM_DISABLE_WB			BIT(8)
+#define MSM_ETD_IOC			BIT(9)
+#define MSM_INTERNAL_MEM		BIT(10)
+#define MSM_VENDOR_ID			BIT(16)
+
 
 /**
  * OTG control
@@ -99,6 +115,25 @@
 };
 
 /**
+ * Supported USB controllers
+ */
+enum usb_ctrl {
+	DWC3_CTRL = 0,	/* DWC3 controller */
+	CI_CTRL,	/* ChipIdea controller */
+	HSIC_CTRL,	/* HSIC controller */
+	NUM_CTRL,
+};
+
+
+/**
+ * USB ID state
+ */
+enum usb_id_state {
+	USB_ID_GROUND = 0,
+	USB_ID_FLOAT,
+};
+
+/**
  * struct msm_otg_platform_data - platform device data
  *              for msm_otg driver.
  * @phy_init_seq: PHY configuration sequence values. Value of -1 is reserved as
@@ -131,6 +166,21 @@
 	struct extcon_dev		*extcon;
 };
 
+
+/* phy related flags */
+#define ENABLE_DP_MANUAL_PULLUP		BIT(0)
+#define ENABLE_SECONDARY_PHY		BIT(1)
+#define PHY_HOST_MODE			BIT(2)
+#define PHY_CHARGER_CONNECTED		BIT(3)
+#define PHY_VBUS_VALID_OVERRIDE		BIT(4)
+#define DEVICE_IN_SS_MODE		BIT(5)
+#define PHY_LANE_A			BIT(6)
+#define PHY_LANE_B			BIT(7)
+#define PHY_HSFS_MODE			BIT(8)
+#define PHY_LS_MODE			BIT(9)
+
+#define USB_NUM_BUS_CLOCKS      3
+
 /**
  * struct msm_otg: OTG driver data. Shared by HCD and DCD.
  * @otg: USB OTG Transceiver structure.
@@ -197,4 +247,94 @@
 	struct notifier_block reboot;
 };
 
+#ifdef CONFIG_USB_BAM
+void msm_bam_set_usb_host_dev(struct device *dev);
+void msm_bam_set_hsic_host_dev(struct device *dev);
+void msm_bam_wait_for_usb_host_prod_granted(void);
+void msm_bam_wait_for_hsic_host_prod_granted(void);
+bool msm_bam_hsic_lpm_ok(void);
+void msm_bam_usb_host_notify_on_resume(void);
+void msm_bam_hsic_host_notify_on_resume(void);
+bool msm_bam_hsic_host_pipe_empty(void);
+bool msm_usb_bam_enable(enum usb_ctrl ctrl, bool bam_enable);
+#else
+static inline void msm_bam_set_usb_host_dev(struct device *dev) {}
+static inline void msm_bam_set_hsic_host_dev(struct device *dev) {}
+static inline void msm_bam_wait_for_usb_host_prod_granted(void) {}
+static inline void msm_bam_wait_for_hsic_host_prod_granted(void) {}
+static inline bool msm_bam_hsic_lpm_ok(void) { return true; }
+static inline void msm_bam_hsic_host_notify_on_resume(void) {}
+static inline void msm_bam_usb_host_notify_on_resume(void) {}
+static inline bool msm_bam_hsic_host_pipe_empty(void) { return true; }
+static inline bool msm_usb_bam_enable(enum usb_ctrl ctrl, bool bam_enable)
+{
+	return true;
+}
+#endif
+
+/* CONFIG_PM */
+#ifdef CONFIG_PM
+static inline int get_pm_runtime_counter(struct device *dev)
+{
+	return atomic_read(&dev->power.usage_count);
+}
+#else /* !CONFIG_PM */
+static inline int get_pm_runtime_counter(struct device *dev) { return -ENOSYS; }
+#endif
+
+#ifdef CONFIG_USB_CI13XXX_MSM
+void msm_hw_bam_disable(bool bam_disable);
+void msm_usb_irq_disable(bool disable);
+#else
+static inline void msm_hw_bam_disable(bool bam_disable)
+{
+}
+
+static inline void msm_usb_irq_disable(bool disable)
+{
+}
+#endif
+
+#ifdef CONFIG_USB_DWC3_QCOM
+int msm_ep_config(struct usb_ep *ep, struct usb_request *request);
+int msm_ep_unconfig(struct usb_ep *ep);
+void dwc3_tx_fifo_resize_request(struct usb_ep *ep, bool qdss_enable);
+int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr, u32 size,
+	u8 dst_pipe_idx);
+bool msm_dwc3_reset_ep_after_lpm(struct usb_gadget *gadget);
+int msm_dwc3_reset_dbm_ep(struct usb_ep *ep);
+
+#else
+static inline int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr,
+	u32 size, u8 dst_pipe_idx)
+{
+	return -ENODEV;
+}
+
+static inline int msm_ep_config(struct usb_ep *ep, struct usb_request *request)
+{
+	return -ENODEV;
+}
+
+static inline int msm_ep_unconfig(struct usb_ep *ep)
+{
+	return -ENODEV;
+}
+
+static inline void dwc3_tx_fifo_resize_request(
+					struct usb_ep *ep, bool qdss_enable)
+{
+}
+
+static inline bool msm_dwc3_reset_ep_after_lpm(struct usb_gadget *gadget)
+{
+	return false;
+}
+
+static inline int msm_dwc3_reset_dbm_ep(struct usb_ep *ep)
+{
+	return -ENODEV;
+}
+
+#endif
 #endif
diff -ruw linux-4.4.115/include/linux/usb/phy.h linux-4.4.115-fbx/include/linux/usb/phy.h
--- linux-4.4.115/include/linux/usb/phy.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/usb/phy.h	2019-10-29 09:26:25.493221282 +0100
@@ -44,6 +44,7 @@
 	OTG_STATE_B_IDLE,
 	OTG_STATE_B_SRP_INIT,
 	OTG_STATE_B_PERIPHERAL,
+	OTG_STATE_B_SUSPEND,
 
 	/* extra dual-role default-b states */
 	OTG_STATE_B_WAIT_ACON,
@@ -122,6 +123,9 @@
 			enum usb_device_speed speed);
 	int	(*notify_disconnect)(struct usb_phy *x,
 			enum usb_device_speed speed);
+
+	/* reset the PHY clocks */
+	int	(*reset)(struct usb_phy *x);
 };
 
 /**
@@ -196,6 +200,15 @@
 	return x->set_vbus(x, false);
 }
 
+static inline int
+usb_phy_reset(struct usb_phy *x)
+{
+	if (x && x->reset)
+		return x->reset(x);
+
+	return 0;
+}
+
 /* for usb host and peripheral controller drivers */
 #if IS_ENABLED(CONFIG_USB_PHY)
 extern struct usb_phy *usb_get_phy(enum usb_phy_type type);
diff -ruw linux-4.4.115/include/linux/usb/xhci_pdriver.h linux-4.4.115-fbx/include/linux/usb/xhci_pdriver.h
--- linux-4.4.115/include/linux/usb/xhci_pdriver.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/usb/xhci_pdriver.h	2019-01-22 16:16:28.419290924 +0100
@@ -19,9 +19,13 @@
  * @usb3_lpm_capable:	determines if this xhci platform supports USB3
  *			LPM capability
  *
+ * @imod_interval:	minimum inter-interrupt interval. Specified in
+ *			250nsec increments.
+ *
  */
 struct usb_xhci_pdata {
 	unsigned	usb3_lpm_capable:1;
+	unsigned	imod_interval;
 };
 
 #endif /* __USB_CORE_XHCI_PDRIVER_H */
diff -ruw linux-4.4.115/include/linux/usb.h linux-4.4.115-fbx/include/linux/usb.h
--- linux-4.4.115/include/linux/usb.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/usb.h	2019-10-29 09:26:25.493221282 +0100
@@ -331,6 +331,8 @@
 	struct usb_ssp_cap_descriptor	*ssp_cap;
 	struct usb_ss_container_id_descriptor	*ss_id;
 	struct usb_ptm_cap_descriptor	*ptm_cap;
+	struct usb_config_summary_descriptor	*config_summary;
+	unsigned int	num_config_summary_desc;
 };
 
 int __usb_get_extra_descriptor(char *buffer, unsigned size,
@@ -395,6 +397,15 @@
 	struct mon_bus *mon_bus;	/* non-null when associated */
 	int monitored;			/* non-zero when monitored */
 #endif
+	unsigned skip_resume:1;		/* All USB devices are brought into full
+					 * power state after system resume. It
+					 * is desirable for some buses to keep
+					 * their devices in suspend state even
+					 * after system resume. The devices
+					 * are resumed later when a remote
+					 * wakeup is detected or an interface
+					 * driver starts I/O.
+					 */
 };
 
 struct usb_dev_state;
@@ -734,6 +745,17 @@
 
 /* for drivers using iso endpoints */
 extern int usb_get_current_frame_number(struct usb_device *usb_dev);
+extern int usb_sec_event_ring_setup(struct usb_device *dev,
+	unsigned intr_num);
+extern int usb_sec_event_ring_cleanup(struct usb_device *dev,
+	unsigned intr_num);
+
+extern dma_addr_t
+usb_get_sec_event_ring_dma_addr(struct usb_device *dev,
+		unsigned intr_num);
+extern dma_addr_t usb_get_xfer_ring_dma_addr(struct usb_device *dev,
+	struct usb_host_endpoint *ep);
+extern int usb_get_controller_id(struct usb_device *dev);
 
 /* Sets up a group of bulk endpoints to support multiple stream IDs. */
 extern int usb_alloc_streams(struct usb_interface *interface,
@@ -1885,8 +1907,11 @@
 #define USB_DEVICE_REMOVE	0x0002
 #define USB_BUS_ADD		0x0003
 #define USB_BUS_REMOVE		0x0004
+#define USB_BUS_DIED		0x0005
 extern void usb_register_notify(struct notifier_block *nb);
 extern void usb_unregister_notify(struct notifier_block *nb);
+extern void usb_register_atomic_notify(struct notifier_block *nb);
+extern void usb_unregister_atomic_notify(struct notifier_block *nb);
 
 /* debugfs stuff */
 extern struct dentry *usb_debug_root;
diff -ruw linux-4.4.115/include/linux/vmalloc.h linux-4.4.115-fbx/include/linux/vmalloc.h
--- linux-4.4.115/include/linux/vmalloc.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/vmalloc.h	2019-01-22 16:16:28.423290960 +0100
@@ -4,6 +4,7 @@
 #include <linux/spinlock.h>
 #include <linux/init.h>
 #include <linux/list.h>
+#include <linux/llist.h>
 #include <asm/page.h>		/* pgprot_t */
 #include <linux/rbtree.h>
 
@@ -18,6 +19,8 @@
 #define VM_UNINITIALIZED	0x00000020	/* vm_struct is not fully initialized */
 #define VM_NO_GUARD		0x00000040      /* don't add guard page */
 #define VM_KASAN		0x00000080      /* has allocated kasan shadow memory */
+#define VM_LOWMEM		0x00000100	/* Tracking of direct mapped lowmem */
+
 /* bits [20..32] reserved for arch specific ioremap internals */
 
 /*
@@ -45,7 +48,7 @@
 	unsigned long flags;
 	struct rb_node rb_node;         /* address sorted rbtree */
 	struct list_head list;          /* address sorted list */
-	struct list_head purge_list;    /* "lazy purge" list */
+	struct llist_node purge_list;    /* "lazy purge" list */
 	struct vm_struct *vm;
 	struct rcu_head rcu_head;
 };
@@ -81,6 +84,7 @@
 			const void *caller);
 
 extern void vfree(const void *addr);
+extern void vfree_atomic(const void *addr);
 
 extern void *vmap(struct page **pages, unsigned int count,
 			unsigned long flags, pgprot_t prot);
@@ -158,6 +162,13 @@
 extern struct list_head vmap_area_list;
 extern __init void vm_area_add_early(struct vm_struct *vm);
 extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
+extern __init int vm_area_check_early(struct vm_struct *vm);
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+extern void mark_vmalloc_reserved_area(void *addr, unsigned long size);
+#else
+static inline void mark_vmalloc_reserved_area(void *addr, unsigned long size)
+{ };
+#endif
 
 #ifdef CONFIG_SMP
 # ifdef CONFIG_MMU
@@ -183,7 +194,12 @@
 #endif
 
 #ifdef CONFIG_MMU
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+extern unsigned long total_vmalloc_size;
+#define VMALLOC_TOTAL total_vmalloc_size
+#else
 #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
+#endif
 #else
 #define VMALLOC_TOTAL 0UL
 #endif
diff -ruw linux-4.4.115/include/linux/vm_event_item.h linux-4.4.115-fbx/include/linux/vm_event_item.h
--- linux-4.4.115/include/linux/vm_event_item.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/vm_event_item.h	2019-10-29 09:26:25.497221322 +0100
@@ -21,7 +21,7 @@
 
 #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL, HIGHMEM_ZONE(xx) xx##_MOVABLE
 
-enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
+enum vm_event_item { PGPGIN, PGPGOUT, PGPGOUTCLEAN, PSWPIN, PSWPOUT,
 		FOR_ALL_ZONES(PGALLOC),
 		PGFREE, PGACTIVATE, PGDEACTIVATE,
 		PGFAULT, PGMAJFAULT,
@@ -52,6 +52,7 @@
 		COMPACTMIGRATE_SCANNED, COMPACTFREE_SCANNED,
 		COMPACTISOLATED,
 		COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS,
+		KCOMPACTD_WAKE,
 #endif
 #ifdef CONFIG_HUGETLB_PAGE
 		HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
diff -ruw linux-4.4.115/include/linux/vmpressure.h linux-4.4.115-fbx/include/linux/vmpressure.h
--- linux-4.4.115/include/linux/vmpressure.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/vmpressure.h	2019-01-22 16:16:28.423290960 +0100
@@ -12,6 +12,7 @@
 struct vmpressure {
 	unsigned long scanned;
 	unsigned long reclaimed;
+	unsigned long stall;
 	/* The lock is used to keep the scanned/reclaimed above in sync. */
 	struct spinlock sr_lock;
 
@@ -25,11 +26,13 @@
 
 struct mem_cgroup;
 
-#ifdef CONFIG_MEMCG
+extern int vmpressure_notifier_register(struct notifier_block *nb);
+extern int vmpressure_notifier_unregister(struct notifier_block *nb);
 extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg,
 		       unsigned long scanned, unsigned long reclaimed);
 extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio);
 
+#ifdef CONFIG_MEMCG
 extern void vmpressure_init(struct vmpressure *vmpr);
 extern void vmpressure_cleanup(struct vmpressure *vmpr);
 extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg);
@@ -40,9 +43,9 @@
 extern void vmpressure_unregister_event(struct mem_cgroup *memcg,
 					struct eventfd_ctx *eventfd);
 #else
-static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg,
-			      unsigned long scanned, unsigned long reclaimed) {}
-static inline void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg,
-				   int prio) {}
+static inline struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
+{
+	return NULL;
+}
 #endif /* CONFIG_MEMCG */
 #endif /* __LINUX_VMPRESSURE_H */
diff -ruw linux-4.4.115/include/linux/vmstat.h linux-4.4.115-fbx/include/linux/vmstat.h
--- linux-4.4.115/include/linux/vmstat.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/vmstat.h	2019-01-22 16:16:28.423290960 +0100
@@ -160,6 +160,26 @@
 	return x;
 }
 
+static inline unsigned long global_page_state_snapshot(enum zone_stat_item item)
+{
+	long x = atomic_long_read(&vm_stat[item]);
+
+#ifdef CONFIG_SMP
+	struct zone *zone;
+	int cpu;
+
+	for_each_online_cpu(cpu) {
+		for_each_populated_zone(zone)
+			x += per_cpu_ptr(zone->pageset,
+				cpu)->vm_stat_diff[item];
+	}
+
+	if (x < 0)
+		x = 0;
+#endif
+	return x;
+}
+
 #ifdef CONFIG_NUMA
 
 extern unsigned long node_page_state(int node, enum zone_stat_item item);
@@ -189,6 +209,7 @@
 extern void dec_zone_state(struct zone *, enum zone_stat_item);
 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
 
+void quiet_vmstat(void);
 void cpu_vm_stats_fold(int cpu);
 void refresh_zone_stat_thresholds(void);
 
@@ -249,6 +270,7 @@
 
 static inline void refresh_zone_stat_thresholds(void) { }
 static inline void cpu_vm_stats_fold(int cpu) { }
+static inline void quiet_vmstat(void) { }
 
 static inline void drain_zonestat(struct zone *zone,
 			struct per_cpu_pageset *pset) { }
diff -ruw linux-4.4.115/include/linux/workqueue.h linux-4.4.115-fbx/include/linux/workqueue.h
--- linux-4.4.115/include/linux/workqueue.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/workqueue.h	2019-10-29 09:26:25.497221322 +0100
@@ -620,4 +620,10 @@
 { return 0; }
 #endif	/* CONFIG_SYSFS */
 
+#ifdef CONFIG_WQ_WATCHDOG
+void wq_watchdog_touch(int cpu);
+#else	/* CONFIG_WQ_WATCHDOG */
+static inline void wq_watchdog_touch(int cpu) { }
+#endif	/* CONFIG_WQ_WATCHDOG */
+
 #endif
diff -ruw linux-4.4.115/include/linux/writeback.h linux-4.4.115-fbx/include/linux/writeback.h
--- linux-4.4.115/include/linux/writeback.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/writeback.h	2019-01-22 16:16:28.427290996 +0100
@@ -224,6 +224,7 @@
 static inline void inode_detach_wb(struct inode *inode)
 {
 	if (inode->i_wb) {
+		WARN_ON_ONCE(!(inode->i_state & I_CLEAR));
 		wb_put(inode->i_wb);
 		inode->i_wb = NULL;
 	}
diff -ruw linux-4.4.115/include/linux/zsmalloc.h linux-4.4.115-fbx/include/linux/zsmalloc.h
--- linux-4.4.115/include/linux/zsmalloc.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/zsmalloc.h	2019-10-29 09:26:25.497221322 +0100
@@ -41,10 +41,10 @@
 
 struct zs_pool;
 
-struct zs_pool *zs_create_pool(const char *name, gfp_t flags);
+struct zs_pool *zs_create_pool(const char *name);
 void zs_destroy_pool(struct zs_pool *pool);
 
-unsigned long zs_malloc(struct zs_pool *pool, size_t size);
+unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags);
 void zs_free(struct zs_pool *pool, unsigned long obj);
 
 void *zs_map_object(struct zs_pool *pool, unsigned long handle,
diff -ruw linux-4.4.115/include/media/rc-map.h linux-4.4.115-fbx/include/media/rc-map.h
--- linux-4.4.115/include/media/rc-map.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/media/rc-map.h	2019-10-29 09:26:25.501221361 +0100
@@ -31,6 +31,7 @@
 	RC_TYPE_RC6_MCE		= 16,	/* MCE (Philips RC6-6A-32 subtype) protocol */
 	RC_TYPE_SHARP		= 17,	/* Sharp protocol */
 	RC_TYPE_XMP		= 18,	/* XMP protocol */
+	RC_TYPE_CEC		= 19,	/* CEC protocol */
 };
 
 #define RC_BIT_NONE		0
@@ -53,6 +54,7 @@
 #define RC_BIT_RC6_MCE		(1 << RC_TYPE_RC6_MCE)
 #define RC_BIT_SHARP		(1 << RC_TYPE_SHARP)
 #define RC_BIT_XMP		(1 << RC_TYPE_XMP)
+#define RC_BIT_CEC		(1 << RC_TYPE_CEC)
 
 #define RC_BIT_ALL	(RC_BIT_UNKNOWN | RC_BIT_OTHER | \
 			 RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ | \
@@ -61,7 +63,7 @@
 			 RC_BIT_NEC | RC_BIT_SANYO | RC_BIT_MCE_KBD | \
 			 RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 | \
 			 RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE | RC_BIT_SHARP | \
-			 RC_BIT_XMP)
+			 RC_BIT_XMP | RC_BIT_CEC)
 
 
 #define RC_SCANCODE_UNKNOWN(x)			(x)
@@ -123,6 +125,7 @@
 #define RC_MAP_BEHOLD_COLUMBUS           "rc-behold-columbus"
 #define RC_MAP_BEHOLD                    "rc-behold"
 #define RC_MAP_BUDGET_CI_OLD             "rc-budget-ci-old"
+#define RC_MAP_CEC                       "rc-cec"
 #define RC_MAP_CINERGY_1400              "rc-cinergy-1400"
 #define RC_MAP_CINERGY                   "rc-cinergy"
 #define RC_MAP_DELOCK_61959              "rc-delock-61959"
diff -ruw linux-4.4.115/include/media/videobuf2-core.h linux-4.4.115-fbx/include/media/videobuf2-core.h
--- linux-4.4.115/include/media/videobuf2-core.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/media/videobuf2-core.h	2019-01-22 16:16:28.443291141 +0100
@@ -17,7 +17,7 @@
 #include <linux/poll.h>
 #include <linux/dma-buf.h>
 
-#define VB2_MAX_FRAME	(32)
+#define VB2_MAX_FRAME	(64)
 #define VB2_MAX_PLANES	(8)
 
 enum vb2_memory {
diff -ruw linux-4.4.115/include/net/addrconf.h linux-4.4.115-fbx/include/net/addrconf.h
--- linux-4.4.115/include/net/addrconf.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/net/addrconf.h	2019-10-29 09:26:25.501221361 +0100
@@ -1,8 +1,9 @@
 #ifndef _ADDRCONF_H
 #define _ADDRCONF_H
 
-#define MAX_RTR_SOLICITATIONS		3
+#define MAX_RTR_SOLICITATIONS		-1		/* unlimited */
 #define RTR_SOLICITATION_INTERVAL	(4*HZ)
+#define RTR_SOLICITATION_MAX_INTERVAL	(3600*HZ)	/* 1 hour */
 
 #define MIN_VALID_LIFETIME		(2*3600)	/* 2 hours */
 
@@ -229,6 +230,8 @@
 void addrconf_prefix_rcv(struct net_device *dev,
 			 u8 *opt, int len, bool sllao);
 
+u32 addrconf_rt_table(const struct net_device *dev, u32 default_table);
+
 /*
  *	anycast prototypes (anycast.c)
  */
diff -ruw linux-4.4.115/include/net/cfg80211.h linux-4.4.115-fbx/include/net/cfg80211.h
--- linux-4.4.115/include/net/cfg80211.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/net/cfg80211.h	2019-10-29 09:26:25.505221400 +0100
@@ -63,6 +63,25 @@
 
 struct wiphy;
 
+#define CFG80211_SCAN_BSSID 1
+#define CFG80211_CONNECT_PREV_BSSID 1
+#define CFG80211_CONNECT_BSS 1
+#define CFG80211_ABORT_SCAN 1
+#define CFG80211_UPDATE_CONNECT_PARAMS 1
+#define CFG80211_BEACON_TX_RATE_CUSTOM_BACKPORT 1
+#define CFG80211_RAND_TA_FOR_PUBLIC_ACTION_FRAME 1
+#define CFG80211_REPORT_BETTER_BSS_IN_SCHED_SCAN 1
+#define CFG80211_CONNECT_TIMEOUT 1
+#define CFG80211_CONNECT_TIMEOUT_REASON_CODE 1
+
+/* Indicate backport support for the new connect done api */
+#define CFG80211_CONNECT_DONE 1
+/* Indicate backport support for FILS SK offload in cfg80211 */
+#define CFG80211_FILS_SK_OFFLOAD_SUPPORT 1
+
+/* Indicate support for including KEK length in rekey data */
+#define CFG80211_REKEY_DATA_KEK_LEN 1
+
 /*
  * wireless hardware capability structures
  */
@@ -690,6 +709,18 @@
 	struct mac_address mac_addrs[];
 };
 
+/*
+ * cfg80211_bitrate_mask - masks for bitrate control
+ */
+struct cfg80211_bitrate_mask {
+	struct {
+		u32 legacy;
+		u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
+		u16 vht_mcs[NL80211_VHT_NSS_MAX];
+		enum nl80211_txrate_gi gi;
+	} control[IEEE80211_NUM_BANDS];
+};
+
 /**
  * struct cfg80211_ap_settings - AP configuration
  *
@@ -712,6 +743,9 @@
  * @p2p_opp_ps: P2P opportunistic PS
  * @acl: ACL configuration used by the drivers which has support for
  *	MAC address based access control
+ * @pbss: If set, start as a PCP instead of AP. Relevant for DMG
+ *	networks.
+ * @beacon_rate: bitrate to be used for beacons
  */
 struct cfg80211_ap_settings {
 	struct cfg80211_chan_def chandef;
@@ -730,6 +764,8 @@
 	u8 p2p_ctwindow;
 	bool p2p_opp_ps;
 	const struct cfg80211_acl_data *acl;
+	bool pbss;
+	struct cfg80211_bitrate_mask beacon_rate;
 };
 
 /**
@@ -762,6 +798,30 @@
 };
 
 /**
+ * struct iface_combination_params - input parameters for interface combinations
+ *
+ * Used to pass interface combination parameters
+ *
+ * @num_different_channels: the number of different channels we want
+ *	to use for verification
+ * @radar_detect: a bitmap where each bit corresponds to a channel
+ *	width where radar detection is needed, as in the definition of
+ *	&struct ieee80211_iface_combination.@radar_detect_widths
+ * @iftype_num: array with the number of interfaces of each interface
+ *	type.  The index is the interface type as specified in &enum
+ *	nl80211_iftype.
+ * @new_beacon_int: set this to the beacon interval of a new interface
+ *	that's not operating yet, if such is to be checked as part of
+ *	the verification
+ */
+struct iface_combination_params {
+	int num_different_channels;
+	u8 radar_detect;
+	int iftype_num[NUM_NL80211_IFTYPES];
+	u32 new_beacon_int;
+};
+
+/**
  * enum station_parameters_apply_mask - station parameter values to apply
  * @STATION_PARAM_APPLY_UAPSD: apply new uAPSD parameters (uapsd_queues, max_sp)
  * @STATION_PARAM_APPLY_CAPABILITY: apply new capability
@@ -1347,6 +1407,7 @@
  * @beacon_interval: beacon interval to use
  * @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a]
  * @basic_rates: basic rates to use when creating the mesh
+ * @beacon_rate: bitrate to be used for beacons
  *
  * These parameters are fixed when the mesh is created.
  */
@@ -1367,6 +1428,7 @@
 	u16 beacon_interval;
 	int mcast_rate[IEEE80211_NUM_BANDS];
 	u32 basic_rates;
+	struct cfg80211_bitrate_mask beacon_rate;
 };
 
 /**
@@ -1452,6 +1514,7 @@
  * @mac_addr_mask: MAC address mask used with randomisation, bits that
  *	are 0 in the mask should be randomised, bits that are 1 should
  *	be taken from the @mac_addr
+ * @bssid: BSSID to scan for (most commonly, the wildcard BSSID)
  */
 struct cfg80211_scan_request {
 	struct cfg80211_ssid *ssids;
@@ -1468,6 +1531,7 @@
 
 	u8 mac_addr[ETH_ALEN] __aligned(2);
 	u8 mac_addr_mask[ETH_ALEN] __aligned(2);
+	u8 bssid[ETH_ALEN] __aligned(2);
 
 	/* internal */
 	struct wiphy *wiphy;
@@ -1516,6 +1580,17 @@
 };
 
 /**
+ * struct cfg80211_bss_select_adjust - BSS selection with RSSI adjustment.
+ *
+ * @band: band of BSS which should match for RSSI level adjustment.
+ * @delta: value of RSSI level adjustment.
+ */
+struct cfg80211_bss_select_adjust {
+	enum nl80211_band band;
+	s8 delta;
+};
+
+/**
  * struct cfg80211_sched_scan_request - scheduled scan request description
  *
  * @ssids: SSIDs to scan for (passed in the probe_reqs in active scans)
@@ -1550,6 +1625,16 @@
  *	cycle.  The driver may ignore this parameter and start
  *	immediately (or at any other time), if this feature is not
  *	supported.
+ * @relative_rssi_set: Indicates whether @relative_rssi is set or not.
+ * @relative_rssi: Relative RSSI threshold in dB to restrict scan result
+ *	reporting in connected state to cases where a matching BSS is determined
+ *	to have better or slightly worse RSSI than the current connected BSS.
+ *	The relative RSSI threshold values are ignored in disconnected state.
+ * @rssi_adjust: delta dB of RSSI preference to be given to the BSSs that belong
+ *	to the specified band while deciding whether a better BSS is reported
+ *	using @relative_rssi. If delta is a negative number, the BSSs that
+ *	belong to the specified band will be penalized by delta dB in relative
+ *	comparisions.
  */
 struct cfg80211_sched_scan_request {
 	struct cfg80211_ssid *ssids;
@@ -1569,6 +1654,10 @@
 	u8 mac_addr[ETH_ALEN] __aligned(2);
 	u8 mac_addr_mask[ETH_ALEN] __aligned(2);
 
+	bool relative_rssi_set;
+	s8 relative_rssi;
+	struct cfg80211_bss_select_adjust rssi_adjust;
+
 	/* internal */
 	struct wiphy *wiphy;
 	struct net_device *dev;
@@ -1702,9 +1791,11 @@
  * @key_len: length of WEP key for shared key authentication
  * @key_idx: index of WEP key for shared key authentication
  * @key: WEP key for shared key authentication
- * @sae_data: Non-IE data to use with SAE or %NULL. This starts with
- *	Authentication transaction sequence number field.
- * @sae_data_len: Length of sae_data buffer in octets
+ * @auth_data: Fields and elements in Authentication frames. This contains
+ *	the authentication frame body (non-IE and IE data), excluding the
+ *	Authentication algorithm number, i.e., starting at the Authentication
+ *	transaction sequence number field.
+ * @auth_data_len: Length of auth_data buffer in octets
  */
 struct cfg80211_auth_request {
 	struct cfg80211_bss *bss;
@@ -1713,8 +1804,8 @@
 	enum nl80211_auth_type auth_type;
 	const u8 *key;
 	u8 key_len, key_idx;
-	const u8 *sae_data;
-	size_t sae_data_len;
+	const u8 *auth_data;
+	size_t auth_data_len;
 };
 
 /**
@@ -1750,6 +1841,12 @@
  * @ht_capa_mask:  The bits of ht_capa which are to be used.
  * @vht_capa: VHT capability override
  * @vht_capa_mask: VHT capability mask indicating which fields to use
+ * @fils_kek: FILS KEK for protecting (Re)Association Request/Response frame or
+ *	%NULL if FILS is not used.
+ * @fils_kek_len: Length of fils_kek in octets
+ * @fils_nonces: FILS nonces (part of AAD) for protecting (Re)Association
+ *	Request/Response frame or %NULL if FILS is not used. This field starts
+ *	with 16 octets of STA Nonce followed by 16 octets of AP Nonce.
  */
 struct cfg80211_assoc_request {
 	struct cfg80211_bss *bss;
@@ -1761,6 +1858,9 @@
 	struct ieee80211_ht_cap ht_capa;
 	struct ieee80211_ht_cap ht_capa_mask;
 	struct ieee80211_vht_cap vht_capa, vht_capa_mask;
+	const u8 *fils_kek;
+	size_t fils_kek_len;
+	const u8 *fils_nonces;
 };
 
 /**
@@ -1854,6 +1954,22 @@
 };
 
 /**
+ * struct cfg80211_bss_selection - connection parameters for BSS selection.
+ *
+ * @behaviour: requested BSS selection behaviour.
+ * @param: parameters for requestion behaviour.
+ * @band_pref: preferred band for %NL80211_BSS_SELECT_ATTR_BAND_PREF.
+ * @adjust: parameters for %NL80211_BSS_SELECT_ATTR_RSSI_ADJUST.
+ */
+struct cfg80211_bss_selection {
+	enum nl80211_bss_select_attr behaviour;
+	union {
+		enum ieee80211_band band_pref;
+		struct cfg80211_bss_select_adjust adjust;
+	} param;
+};
+
+/**
  * struct cfg80211_connect_params - Connection parameters
  *
  * This structure provides information needed to complete IEEE 802.11
@@ -1888,6 +2004,23 @@
  * @ht_capa_mask:  The bits of ht_capa which are to be used.
  * @vht_capa:  VHT Capability overrides
  * @vht_capa_mask: The bits of vht_capa which are to be used.
+ * @pbss: if set, connect to a PCP instead of AP. Valid for DMG
+ *	networks.
+ * @bss_select: criteria to be used for BSS selection.
+ * @prev_bssid: previous BSSID, if not %NULL use reassociate frame
+ * @fils_erp_username: EAP re-authentication protocol (ERP) username part of the
+ *	NAI or %NULL if not specified. This is used to construct FILS wrapped
+ *	data IE.
+ * @fils_erp_username_len: Length of @fils_erp_username in octets.
+ * @fils_erp_realm: EAP re-authentication protocol (ERP) realm part of NAI or
+ *	%NULL if not specified. This specifies the domain name of ER server and
+ *	is used to construct FILS wrapped data IE.
+ * @fils_erp_realm_len: Length of @fils_erp_realm in octets.
+ * @fils_erp_next_seq_num: The next sequence number to use in the FILS ERP
+ *	messages. This is also used to construct FILS wrapped data IE.
+ * @fils_erp_rrk: ERP re-authentication Root Key (rRK) used to derive additional
+ *	keys in FILS or %NULL if not specified.
+ * @fils_erp_rrk_len: Length of @fils_erp_rrk in octets.
  */
 struct cfg80211_connect_params {
 	struct ieee80211_channel *channel;
@@ -1910,6 +2043,33 @@
 	struct ieee80211_ht_cap ht_capa_mask;
 	struct ieee80211_vht_cap vht_capa;
 	struct ieee80211_vht_cap vht_capa_mask;
+	bool pbss;
+	struct cfg80211_bss_selection bss_select;
+	const u8 *prev_bssid;
+	const u8 *fils_erp_username;
+	size_t fils_erp_username_len;
+	const u8 *fils_erp_realm;
+	size_t fils_erp_realm_len;
+	u16 fils_erp_next_seq_num;
+	const u8 *fils_erp_rrk;
+	size_t fils_erp_rrk_len;
+};
+
+/**
+ * enum cfg80211_connect_params_changed - Connection parameters being updated
+ *
+ * This enum provides information of all connect parameters that
+ * have to be updated as part of update_connect_params() call.
+ *
+ * @UPDATE_ASSOC_IES: Indicates whether association request IEs are updated
+ * @UPDATE_FILS_ERP_INFO: Indicates that FILS connection parameters (realm,
+ *	username, erp sequence number and rrk) are updated
+ * @UPDATE_AUTH_TYPE: Indicates that Authentication type is updated
+ */
+enum cfg80211_connect_params_changed {
+	UPDATE_ASSOC_IES		= BIT(0),
+	UPDATE_FILS_ERP_INFO		= BIT(1),
+	UPDATE_AUTH_TYPE		= BIT(2),
 };
 
 /**
@@ -1930,29 +2090,33 @@
 	WIPHY_PARAM_DYN_ACK		= 1 << 5,
 };
 
-/*
- * cfg80211_bitrate_mask - masks for bitrate control
- */
-struct cfg80211_bitrate_mask {
-	struct {
-		u32 legacy;
-		u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
-		u16 vht_mcs[NL80211_VHT_NSS_MAX];
-		enum nl80211_txrate_gi gi;
-	} control[IEEE80211_NUM_BANDS];
-};
 /**
  * struct cfg80211_pmksa - PMK Security Association
  *
  * This structure is passed to the set/del_pmksa() method for PMKSA
  * caching.
  *
- * @bssid: The AP's BSSID.
- * @pmkid: The PMK material itself.
+ * @bssid: The AP's BSSID (may be %NULL).
+ * @pmkid: The identifier to refer a PMKSA.
+ * @pmk: The PMK for the PMKSA identified by @pmkid. This is used for key
+ *	derivation by a FILS STA. Otherwise, %NULL.
+ * @pmk_len: Length of the @pmk. The length of @pmk can differ depending on
+ *	the hash algorithm used to generate this.
+ * @ssid: SSID to specify the ESS within which a PMKSA is valid when using FILS
+ *	cache identifier (may be %NULL).
+ * @ssid_len: Length of the @ssid in octets.
+ * @cache_id: 2-octet cache identifier advertized by a FILS AP identifying the
+ *	scope of PMKSA. This is valid only if @ssid_len is non-zero (may be
+ *	%NULL).
  */
 struct cfg80211_pmksa {
 	const u8 *bssid;
 	const u8 *pmkid;
+	const u8 *pmk;
+	size_t pmk_len;
+	const u8 *ssid;
+	size_t ssid_len;
+	const u8 *cache_id;
 };
 
 /**
@@ -2127,12 +2291,14 @@
 
 /**
  * struct cfg80211_gtk_rekey_data - rekey data
- * @kek: key encryption key (NL80211_KEK_LEN bytes)
+ * @kek: key encryption key
  * @kck: key confirmation key (NL80211_KCK_LEN bytes)
  * @replay_ctr: replay counter (NL80211_REPLAY_CTR_LEN bytes)
+ * @kek_len: Length of @kek in octets
  */
 struct cfg80211_gtk_rekey_data {
 	const u8 *kek, *kck, *replay_ctr;
+	size_t kek_len;
 };
 
 /**
@@ -2321,6 +2487,8 @@
  *	the driver, and will be valid until passed to cfg80211_scan_done().
  *	For scan results, call cfg80211_inform_bss(); you can call this outside
  *	the scan/scan_done bracket too.
+ * @abort_scan: Tell the driver to abort an ongoing scan. The driver shall
+ *	indicate the status of the scan through cfg80211_scan_done().
  *
  * @auth: Request to authenticate with the specified peer
  *	(invoked with the wireless_dev mutex held)
@@ -2332,9 +2500,31 @@
  *	(invoked with the wireless_dev mutex held)
  *
  * @connect: Connect to the ESS with the specified parameters. When connected,
- *	call cfg80211_connect_result() with status code %WLAN_STATUS_SUCCESS.
- *	If the connection fails for some reason, call cfg80211_connect_result()
- *	with the status from the AP.
+ *	call cfg80211_connect_result()/cfg80211_connect_bss() with status code
+ *	%WLAN_STATUS_SUCCESS. If the connection fails for some reason, call
+ *	cfg80211_connect_result()/cfg80211_connect_bss() with the status code
+ *	from the AP or cfg80211_connect_timeout() if no frame with status code
+ *	was received.
+ *	The driver is allowed to roam to other BSSes within the ESS when the
+ *	other BSS matches the connect parameters. When such roaming is initiated
+ *	by the driver, the driver is expected to verify that the target matches
+ *	the configured security parameters and to use Reassociation Request
+ *	frame instead of Association Request frame.
+ *	The connect function can also be used to request the driver to perform a
+ *	specific roam when connected to an ESS. In that case, the prev_bssid
+ *	parameter is set to the BSSID of the currently associated BSS as an
+ *	indication of requesting reassociation.
+ *	In both the driver-initiated and new connect() call initiated roaming
+ *	cases, the result of roaming is indicated with a call to
+ *	cfg80211_roamed() or cfg80211_roamed_bss().
+ *	(invoked with the wireless_dev mutex held)
+ * @update_connect_params: Update the connect parameters while connected to a
+ *	BSS. The updated parameters can be used by driver/firmware for
+ *	subsequent BSS selection (roaming) decisions and to form the
+ *	Authentication/(Re)Association Request frames. This call does not
+ *	request an immediate disassociation or reassociation with the current
+ *	BSS, i.e., this impacts only subsequent (re)associations. The bits in
+ *	changed are defined in &enum cfg80211_connect_params_changed.
  *	(invoked with the wireless_dev mutex held)
  * @disconnect: Disconnect from the BSS/ESS.
  *	(invoked with the wireless_dev mutex held)
@@ -2593,6 +2783,7 @@
 
 	int	(*scan)(struct wiphy *wiphy,
 			struct cfg80211_scan_request *request);
+	void	(*abort_scan)(struct wiphy *wiphy, struct wireless_dev *wdev);
 
 	int	(*auth)(struct wiphy *wiphy, struct net_device *dev,
 			struct cfg80211_auth_request *req);
@@ -2605,6 +2796,10 @@
 
 	int	(*connect)(struct wiphy *wiphy, struct net_device *dev,
 			   struct cfg80211_connect_params *sme);
+	int	(*update_connect_params)(struct wiphy *wiphy,
+					 struct net_device *dev,
+					 struct cfg80211_connect_params *sme,
+					 u32 changed);
 	int	(*disconnect)(struct wiphy *wiphy, struct net_device *dev,
 			      u16 reason_code);
 
@@ -2799,6 +2994,7 @@
  *	responds to probe-requests in hardware.
  * @WIPHY_FLAG_OFFCHAN_TX: Device supports direct off-channel TX.
  * @WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL: Device supports remain-on-channel call.
+ * @WIPHY_FLAG_DFS_OFFLOAD: The driver handles all the DFS related operations.
  * @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels.
  * @WIPHY_FLAG_HAS_CHANNEL_SWITCH: Device supports channel switch in
  *	beaconing mode (AP, IBSS, Mesh, ...).
@@ -2827,6 +3023,7 @@
 	WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL	= BIT(21),
 	WIPHY_FLAG_SUPPORTS_5_10_MHZ		= BIT(22),
 	WIPHY_FLAG_HAS_CHANNEL_SWITCH		= BIT(23),
+	WIPHY_FLAG_DFS_OFFLOAD			= BIT(24),
 };
 
 /**
@@ -2851,6 +3048,12 @@
  *	only in special cases.
  * @radar_detect_widths: bitmap of channel widths supported for radar detection
  * @radar_detect_regions: bitmap of regions supported for radar detection
+ * @beacon_int_min_gcd: This interface combination supports different
+ *	beacon intervals.
+ *	= 0 - all beacon intervals for different interface must be same.
+ *	> 0 - any beacon interval for the interface part of this combination AND
+ *	      *GCD* of all beacon intervals from beaconing interfaces of this
+ *	      combination must be greater or equal to this value.
  *
  * With this structure the driver can describe which interface
  * combinations it supports concurrently.
@@ -2909,6 +3112,7 @@
 	bool beacon_int_infra_match;
 	u8 radar_detect_widths;
 	u8 radar_detect_regions;
+	u32 beacon_int_min_gcd;
 };
 
 struct ieee80211_txrx_stypes {
@@ -3034,6 +3238,24 @@
 };
 
 /**
+ * struct wiphy_iftype_ext_capab - extended capabilities per interface type
+ * @iftype: interface type
+ * @extended_capabilities: extended capabilities supported by the driver,
+ *	additional capabilities might be supported by userspace; these are the
+ *	802.11 extended capabilities ("Extended Capabilities element") and are
+ *	in the same format as in the information element. See IEEE Std
+ *	802.11-2012 8.4.2.29 for the defined fields.
+ * @extended_capabilities_mask: mask of the valid values
+ * @extended_capabilities_len: length of the extended capabilities
+ */
+struct wiphy_iftype_ext_capab {
+	enum nl80211_iftype iftype;
+	const u8 *extended_capabilities;
+	const u8 *extended_capabilities_mask;
+	u8 extended_capabilities_len;
+};
+
+/**
  * struct wiphy - wireless hardware description
  * @reg_notifier: the driver's regulatory notification callback,
  *	note that if your driver uses wiphy_apply_custom_regulatory()
@@ -3143,6 +3365,9 @@
  * @vht_capa_mod_mask:  Specify what VHT capabilities can be over-ridden.
  *	If null, then none can be over-ridden.
  *
+ * @wdev_list: the list of associated (virtual) interfaces; this list must
+ *	not be modified by the driver, but can be read with RTNL/RCU protection.
+ *
  * @max_acl_mac_addrs: Maximum number of MAC addresses that the device
  *	supports for ACL.
  *
@@ -3150,9 +3375,14 @@
  *	additional capabilities might be supported by userspace; these are
  *	the 802.11 extended capabilities ("Extended Capabilities element")
  *	and are in the same format as in the information element. See
- *	802.11-2012 8.4.2.29 for the defined fields.
+ *	802.11-2012 8.4.2.29 for the defined fields. These are the default
+ *	extended capabilities to be used if the capabilities are not specified
+ *	for a specific interface type in iftype_ext_capab.
  * @extended_capabilities_mask: mask of the valid values
  * @extended_capabilities_len: length of the extended capabilities
+ * @iftype_ext_capab: array of extended capabilities per interface type
+ * @num_iftype_ext_capab: number of interface types for which extended
+ *	capabilities are specified separately.
  * @coalesce: packet coalescing support information
  *
  * @vendor_commands: array of vendor commands supported by the hardware
@@ -3175,6 +3405,9 @@
  *	low rssi when a frame is heard on different channel, then it should set
  *	this variable to the maximal offset for which it can compensate.
  *	This value should be set in MHz.
+ * @bss_select_support: bitmask indicating the BSS selection criteria supported
+ *	by the driver in the .connect() callback. The bit position maps to the
+ *	attribute indices defined in &enum nl80211_bss_select_attr.
  */
 struct wiphy {
 	/* assign these fields before you register the wiphy */
@@ -3249,6 +3482,9 @@
 	const u8 *extended_capabilities, *extended_capabilities_mask;
 	u8 extended_capabilities_len;
 
+	const struct wiphy_iftype_ext_capab *iftype_ext_capab;
+	unsigned int num_iftype_ext_capab;
+
 	/* If multiple wiphys are registered and you're handed e.g.
 	 * a regular netdev with assigned ieee80211_ptr, you won't
 	 * know whether it points to a wiphy your driver has registered
@@ -3279,6 +3515,8 @@
 	const struct ieee80211_ht_cap *ht_capa_mod_mask;
 	const struct ieee80211_vht_cap *vht_capa_mod_mask;
 
+	struct list_head wdev_list;
+
 	/* the network namespace this phy lives in currently */
 	possible_net_t _net;
 
@@ -3297,6 +3535,8 @@
 	u8 max_num_csa_counters;
 	u8 max_adj_channel_rssi_comp;
 
+	u32 bss_select_support;
+
 	char priv[0] __aligned(NETDEV_ALIGN);
 };
 
@@ -3486,6 +3726,7 @@
  *	registered for unexpected class 3 frames (AP mode)
  * @conn: (private) cfg80211 software SME connection state machine data
  * @connect_keys: (private) keys to set after connection is established
+ * @conn_bss_type: connecting/connected BSS type
  * @ibss_fixed: (private) IBSS is using fixed BSSID
  * @ibss_dfs_possible: (private) IBSS may change to a DFS channel
  * @event_list: (private) list for internal event processing
@@ -3516,6 +3757,7 @@
 	u8 ssid_len, mesh_id_len, mesh_id_up_len;
 	struct cfg80211_conn *conn;
 	struct cfg80211_cached_keys *connect_keys;
+	enum ieee80211_bss_type conn_bss_type;
 
 	struct list_head event_list;
 	spinlock_t event_lock;
@@ -3889,6 +4131,32 @@
 int regulatory_hint(struct wiphy *wiphy, const char *alpha2);
 
 /**
+ * regulatory_hint_user - hint to the wireless core a regulatory domain
+ * which the driver has received from an application
+ * @alpha2: the ISO/IEC 3166 alpha2 the driver claims its regulatory domain
+ *	should be in. If @rd is set this should be NULL. Note that if you
+ *	set this to NULL you should still set rd->alpha2 to some accepted
+ *	alpha2.
+ * @user_reg_hint_type: the type of user regulatory hint.
+ *
+ * Wireless drivers can use this function to hint to the wireless core
+ * the current regulatory domain as specified by trusted applications,
+ * it is the driver's responsibilty to estbalish which applications it
+ * trusts.
+ *
+ * The wiphy should be registered to cfg80211 prior to this call.
+ * For cfg80211 drivers this means you must first use wiphy_register(),
+ * for mac80211 drivers you must first use ieee80211_register_hw().
+ *
+ * Drivers should check the return value, its possible you can get
+ * an -ENOMEM or an -EINVAL.
+ *
+ * Return: 0 on success. -ENOMEM, -EINVAL.
+ */
+int regulatory_hint_user(const char *alpha2,
+			 enum nl80211_user_reg_hint_type user_reg_hint_type);
+
+/**
  * regulatory_set_wiphy_regd - set regdom info for self managed drivers
  * @wiphy: the wireless device we want to process the regulatory domain on
  * @rd: the regulatory domain informatoin to use for this wiphy
@@ -4610,6 +4878,134 @@
 #endif
 
 /**
+ * struct cfg80211_connect_resp_params - Connection response params
+ * @status: Status code, %WLAN_STATUS_SUCCESS for successful connection, use
+ *	%WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you
+ *	the real status code for failures. If this call is used to report a
+ *	failure due to a timeout (e.g., not receiving an Authentication frame
+ *	from the AP) instead of an explicit rejection by the AP, -1 is used to
+ *	indicate that this is a failure, but without a status code.
+ *	@timeout_reason is used to report the reason for the timeout in that
+ *	case.
+ * @bssid: The BSSID of the AP (may be %NULL)
+ * @bss: Entry of bss to which STA got connected to, can be obtained through
+ *	cfg80211_get_bss() (may be %NULL). Only one parameter among @bssid and
+ *	@bss needs to be specified.
+ * @req_ie: Association request IEs (may be %NULL)
+ * @req_ie_len: Association request IEs length
+ * @resp_ie: Association response IEs (may be %NULL)
+ * @resp_ie_len: Association response IEs length
+ * @fils_kek: KEK derived from a successful FILS connection (may be %NULL)
+ * @fils_kek_len: Length of @fils_kek in octets
+ * @update_erp_next_seq_num: Boolean value to specify whether the value in
+ *	@fils_erp_next_seq_num is valid.
+ * @fils_erp_next_seq_num: The next sequence number to use in ERP message in
+ *	FILS Authentication. This value should be specified irrespective of the
+ *	status for a FILS connection.
+ * @pmk: A new PMK if derived from a successful FILS connection (may be %NULL).
+ * @pmk_len: Length of @pmk in octets
+ * @pmkid: A new PMKID if derived from a successful FILS connection or the PMKID
+ *	used for this FILS connection (may be %NULL).
+ * @timeout_reason: Reason for connection timeout. This is used when the
+ *	connection fails due to a timeout instead of an explicit rejection from
+ *	the AP. %NL80211_TIMEOUT_UNSPECIFIED is used when the timeout reason is
+ *	not known. This value is used only if @status < 0 to indicate that the
+ *	failure is due to a timeout and not due to explicit rejection by the AP.
+ *	This value is ignored in other cases (@status >= 0).
+ */
+struct cfg80211_connect_resp_params {
+	int status;
+	const u8 *bssid;
+	struct cfg80211_bss *bss;
+	const u8 *req_ie;
+	size_t req_ie_len;
+	const u8 *resp_ie;
+	size_t resp_ie_len;
+	const u8 *fils_kek;
+	size_t fils_kek_len;
+	bool update_erp_next_seq_num;
+	u16 fils_erp_next_seq_num;
+	const u8 *pmk;
+	size_t pmk_len;
+	const u8 *pmkid;
+	enum nl80211_timeout_reason timeout_reason;
+};
+
+/**
+ * cfg80211_connect_done - notify cfg80211 of connection result
+ *
+ * @dev: network device
+ * @params: connection response parameters
+ * @gfp: allocation flags
+ *
+ * It should be called by the underlying driver once execution of the connection
+ * request from connect() has been completed. This is similar to
+ * cfg80211_connect_bss(), but takes a structure pointer for connection response
+ * parameters. Only one of the functions among cfg80211_connect_bss(),
+ * cfg80211_connect_result(), cfg80211_connect_timeout(),
+ * and cfg80211_connect_done() should be called.
+ */
+void cfg80211_connect_done(struct net_device *dev,
+			   struct cfg80211_connect_resp_params *params,
+			   gfp_t gfp);
+
+/**
+ * cfg80211_connect_bss - notify cfg80211 of connection result
+ *
+ * @dev: network device
+ * @bssid: the BSSID of the AP
+ * @bss: entry of bss to which STA got connected to, can be obtained
+ *	through cfg80211_get_bss (may be %NULL)
+ * @req_ie: association request IEs (maybe be %NULL)
+ * @req_ie_len: association request IEs length
+ * @resp_ie: association response IEs (may be %NULL)
+ * @resp_ie_len: assoc response IEs length
+ * @status: status code, %WLAN_STATUS_SUCCESS for successful connection, use
+ *	%WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you
+ *	the real status code for failures. If this call is used to report a
+ *	failure due to a timeout (e.g., not receiving an Authentication frame
+ *	from the AP) instead of an explicit rejection by the AP, -1 is used to
+ *	indicate that this is a failure, but without a status code.
+ *	@timeout_reason is used to report the reason for the timeout in that
+ *	case.
+ * @gfp: allocation flags
+ * @timeout_reason: reason for connection timeout. This is used when the
+ *	connection fails due to a timeout instead of an explicit rejection from
+ *	the AP. %NL80211_TIMEOUT_UNSPECIFIED is used when the timeout reason is
+ *	not known. This value is used only if @status < 0 to indicate that the
+ *	failure is due to a timeout and not due to explicit rejection by the AP.
+ *	This value is ignored in other cases (@status >= 0).
+ *
+ * It should be called by the underlying driver once execution of the connection
+ * request from connect() has been completed. This is similar to
+ * cfg80211_connect_result(), but with the option of identifying the exact bss
+ * entry for the connection. Only one of the functions among
+ * cfg80211_connect_bss(), cfg80211_connect_result(),
+ * cfg80211_connect_timeout(), and cfg80211_connect_done() should be called.
+ */
+static inline void
+cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
+		     struct cfg80211_bss *bss, const u8 *req_ie,
+		     size_t req_ie_len, const u8 *resp_ie,
+		     size_t resp_ie_len, int status, gfp_t gfp,
+		     enum nl80211_timeout_reason timeout_reason)
+{
+	struct cfg80211_connect_resp_params params;
+
+	memset(&params, 0, sizeof(params));
+	params.status = status;
+	params.bssid = bssid;
+	params.bss = bss;
+	params.req_ie = req_ie;
+	params.req_ie_len = req_ie_len;
+	params.resp_ie = resp_ie;
+	params.resp_ie_len = resp_ie_len;
+	params.timeout_reason = timeout_reason;
+
+	cfg80211_connect_done(dev, &params, gfp);
+}
+
+/**
  * cfg80211_connect_result - notify cfg80211 of connection result
  *
  * @dev: network device
@@ -4618,18 +5014,54 @@
  * @req_ie_len: association request IEs length
  * @resp_ie: association response IEs (may be %NULL)
  * @resp_ie_len: assoc response IEs length
- * @status: status code, 0 for successful connection, use
+ * @status: status code, %WLAN_STATUS_SUCCESS for successful connection, use
  *	%WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you
  *	the real status code for failures.
  * @gfp: allocation flags
  *
- * It should be called by the underlying driver whenever connect() has
- * succeeded.
+ * It should be called by the underlying driver once execution of the connection
+ * request from connect() has been completed. This is similar to
+ * cfg80211_connect_bss() which allows the exact bss entry to be specified. Only
+ * one of the functions among cfg80211_connect_bss(), cfg80211_connect_result(),
+ * cfg80211_connect_timeout(), and cfg80211_connect_done() should be called.
  */
-void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
+static inline void
+cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
 			     const u8 *req_ie, size_t req_ie_len,
 			     const u8 *resp_ie, size_t resp_ie_len,
-			     u16 status, gfp_t gfp);
+			u16 status, gfp_t gfp)
+{
+	cfg80211_connect_bss(dev, bssid, NULL, req_ie, req_ie_len, resp_ie,
+			     resp_ie_len, status, gfp,
+			     NL80211_TIMEOUT_UNSPECIFIED);
+}
+
+/**
+ * cfg80211_connect_timeout - notify cfg80211 of connection timeout
+ *
+ * @dev: network device
+ * @bssid: the BSSID of the AP
+ * @req_ie: association request IEs (maybe be %NULL)
+ * @req_ie_len: association request IEs length
+ * @gfp: allocation flags
+ * @timeout_reason: reason for connection timeout.
+ *
+ * It should be called by the underlying driver whenever connect() has failed
+ * in a sequence where no explicit authentication/association rejection was
+ * received from the AP. This could happen, e.g., due to not being able to send
+ * out the Authentication or Association Request frame or timing out while
+ * waiting for the response. Only one of the functions among
+ * cfg80211_connect_bss(), cfg80211_connect_result(),
+ * cfg80211_connect_timeout(), and cfg80211_connect_done() should be called.
+ */
+static inline void
+cfg80211_connect_timeout(struct net_device *dev, const u8 *bssid,
+			 const u8 *req_ie, size_t req_ie_len, gfp_t gfp,
+			 enum nl80211_timeout_reason timeout_reason)
+{
+	cfg80211_connect_bss(dev, bssid, NULL, req_ie, req_ie_len, NULL, 0, -1,
+			     gfp, timeout_reason);
+}
 
 /**
  * cfg80211_roamed - notify cfg80211 of roaming
@@ -5215,6 +5647,16 @@
 void cfg80211_crit_proto_stopped(struct wireless_dev *wdev, gfp_t gfp);
 
 /**
+ * cfg80211_is_gratuitous_arp_unsolicited_na - packet is grat. ARP/unsol. NA
+ * @skb: the input packet, must be an ethernet frame already
+ *
+ * Return: %true if the packet is a gratuitous ARP or unsolicited NA packet.
+ * This is used to drop packets that shouldn't occur because the AP implements
+ * a proxy service.
+ */
+bool cfg80211_is_gratuitous_arp_unsolicited_na(struct sk_buff *skb);
+
+/**
  * ieee80211_get_num_supported_channels - get number of channels device has
  * @wiphy: the wiphy
  *
@@ -5226,36 +5668,20 @@
  * cfg80211_check_combinations - check interface combinations
  *
  * @wiphy: the wiphy
- * @num_different_channels: the number of different channels we want
- *	to use for verification
- * @radar_detect: a bitmap where each bit corresponds to a channel
- *	width where radar detection is needed, as in the definition of
- *	&struct ieee80211_iface_combination.@radar_detect_widths
- * @iftype_num: array with the numbers of interfaces of each interface
- *	type.  The index is the interface type as specified in &enum
- *	nl80211_iftype.
+ * @params: the interface combinations parameter
  *
  * This function can be called by the driver to check whether a
  * combination of interfaces and their types are allowed according to
  * the interface combinations.
  */
 int cfg80211_check_combinations(struct wiphy *wiphy,
-				const int num_different_channels,
-				const u8 radar_detect,
-				const int iftype_num[NUM_NL80211_IFTYPES]);
+				struct iface_combination_params *params);
 
 /**
  * cfg80211_iter_combinations - iterate over matching combinations
  *
  * @wiphy: the wiphy
- * @num_different_channels: the number of different channels we want
- *	to use for verification
- * @radar_detect: a bitmap where each bit corresponds to a channel
- *	width where radar detection is needed, as in the definition of
- *	&struct ieee80211_iface_combination.@radar_detect_widths
- * @iftype_num: array with the numbers of interfaces of each interface
- *	type.  The index is the interface type as specified in &enum
- *	nl80211_iftype.
+ * @params: the interface combinations parameter
  * @iter: function to call for each matching combination
  * @data: pointer to pass to iter function
  *
@@ -5264,9 +5690,7 @@
  * purposes.
  */
 int cfg80211_iter_combinations(struct wiphy *wiphy,
-			       const int num_different_channels,
-			       const u8 radar_detect,
-			       const int iftype_num[NUM_NL80211_IFTYPES],
+			       struct iface_combination_params *params,
 			       void (*iter)(const struct ieee80211_iface_combination *c,
 					    void *data),
 			       void *data);
@@ -5340,6 +5764,13 @@
 /* ethtool helper */
 void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info);
 
+/**
+ * cfg80211_ap_stopped - notify userspace that AP mode stopped
+ * @netdev: network device
+ * @gfp: context flags
+ */
+void cfg80211_ap_stopped(struct net_device *netdev, gfp_t gfp);
+
 /* Logging, debugging and troubleshooting/diagnostic helpers. */
 
 /* wiphy_printk helpers, similar to dev_printk */
diff -ruw linux-4.4.115/include/net/fib_rules.h linux-4.4.115-fbx/include/net/fib_rules.h
--- linux-4.4.115/include/net/fib_rules.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/net/fib_rules.h	2019-01-22 16:16:28.451291214 +0100
@@ -8,6 +8,11 @@
 #include <net/flow.h>
 #include <net/rtnetlink.h>
 
+struct fib_kuid_range {
+	kuid_t start;
+	kuid_t end;
+};
+
 struct fib_rule {
 	struct list_head	list;
 	int			iifindex;
@@ -29,6 +34,7 @@
 	int			suppress_prefixlen;
 	char			iifname[IFNAMSIZ];
 	char			oifname[IFNAMSIZ];
+	struct fib_kuid_range	uid_range;
 	struct rcu_head		rcu;
 };
 
@@ -89,7 +95,8 @@
 	[FRA_TABLE]     = { .type = NLA_U32 }, \
 	[FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, \
 	[FRA_SUPPRESS_IFGROUP] = { .type = NLA_U32 }, \
-	[FRA_GOTO]	= { .type = NLA_U32 }
+	[FRA_GOTO]	= { .type = NLA_U32 }, \
+	[FRA_UID_RANGE]	= { .len = sizeof(struct fib_rule_uid_range) }
 
 static inline void fib_rule_get(struct fib_rule *rule)
 {
diff -ruw linux-4.4.115/include/net/flow.h linux-4.4.115-fbx/include/net/flow.h
--- linux-4.4.115/include/net/flow.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/net/flow.h	2019-01-22 16:16:28.451291214 +0100
@@ -11,6 +11,7 @@
 #include <linux/in6.h>
 #include <linux/atomic.h>
 #include <net/flow_dissector.h>
+#include <linux/uidgid.h>
 
 /*
  * ifindex generation is per-net namespace, and loopback is
@@ -38,6 +39,7 @@
 #define FLOWI_FLAG_SKIP_NH_OIF		0x08
 	__u32	flowic_secid;
 	struct flowi_tunnel flowic_tun_key;
+	kuid_t  flowic_uid;
 };
 
 union flowi_uli {
@@ -75,6 +77,7 @@
 #define flowi4_flags		__fl_common.flowic_flags
 #define flowi4_secid		__fl_common.flowic_secid
 #define flowi4_tun_key		__fl_common.flowic_tun_key
+#define flowi4_uid		__fl_common.flowic_uid
 
 	/* (saddr,daddr) must be grouped, same order as in IP header */
 	__be32			saddr;
@@ -94,7 +97,8 @@
 				      __u32 mark, __u8 tos, __u8 scope,
 				      __u8 proto, __u8 flags,
 				      __be32 daddr, __be32 saddr,
-				      __be16 dport, __be16 sport)
+				      __be16 dport, __be16 sport,
+				      kuid_t uid)
 {
 	fl4->flowi4_oif = oif;
 	fl4->flowi4_iif = LOOPBACK_IFINDEX;
@@ -105,6 +109,7 @@
 	fl4->flowi4_flags = flags;
 	fl4->flowi4_secid = 0;
 	fl4->flowi4_tun_key.tun_id = 0;
+	fl4->flowi4_uid = uid;
 	fl4->daddr = daddr;
 	fl4->saddr = saddr;
 	fl4->fl4_dport = dport;
@@ -133,6 +138,7 @@
 #define flowi6_flags		__fl_common.flowic_flags
 #define flowi6_secid		__fl_common.flowic_secid
 #define flowi6_tun_key		__fl_common.flowic_tun_key
+#define flowi6_uid		__fl_common.flowic_uid
 	struct in6_addr		daddr;
 	struct in6_addr		saddr;
 	__be32			flowlabel;
@@ -177,6 +183,7 @@
 #define flowi_flags	u.__fl_common.flowic_flags
 #define flowi_secid	u.__fl_common.flowic_secid
 #define flowi_tun_key	u.__fl_common.flowic_tun_key
+#define flowi_uid	u.__fl_common.flowic_uid
 } __attribute__((__aligned__(BITS_PER_LONG/8)));
 
 static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4)
diff -ruw linux-4.4.115/include/net/if_inet6.h linux-4.4.115-fbx/include/net/if_inet6.h
--- linux-4.4.115/include/net/if_inet6.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/net/if_inet6.h	2019-01-22 16:16:28.455291250 +0100
@@ -201,6 +201,7 @@
 	struct ipv6_devstat	stats;
 
 	struct timer_list	rs_timer;
+	__s32			rs_interval;	/* in jiffies */
 	__u8			rs_probes;
 
 	__u8			addr_gen_mode;
diff -ruw linux-4.4.115/include/net/ip6_route.h linux-4.4.115-fbx/include/net/ip6_route.h
--- linux-4.4.115/include/net/ip6_route.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/net/ip6_route.h	2019-01-22 16:16:28.455291250 +0100
@@ -118,9 +118,10 @@
 		  const struct in6_addr *gwaddr);
 
 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, int oif,
-		     u32 mark);
+		     u32 mark, kuid_t uid);
 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu);
-void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark);
+void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
+		  kuid_t uid);
 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
 			    u32 mark);
 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk);
diff -ruw linux-4.4.115/include/net/ip.h linux-4.4.115-fbx/include/net/ip.h
--- linux-4.4.115/include/net/ip.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/net/ip.h	2019-10-29 09:26:25.509221439 +0100
@@ -172,6 +172,7 @@
 				/* -1 if not needed */ 
 	int	    bound_dev_if;
 	u8  	    tos;
+	kuid_t	    uid;
 }; 
 
 #define IP_REPLY_ARG_NOSRCCHECK 1
@@ -242,6 +243,8 @@
 }
 #endif
 
+extern int sysctl_reserved_port_bind;
+
 /* From inetpeer.c */
 extern int inet_peer_threshold;
 extern int inet_peer_minttl;
diff -ruw linux-4.4.115/include/net/l3mdev.h linux-4.4.115-fbx/include/net/l3mdev.h
--- linux-4.4.115/include/net/l3mdev.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/net/l3mdev.h	2019-01-22 16:16:28.459291286 +0100
@@ -51,6 +51,24 @@
 	return ifindex;
 }
 
+static inline int l3mdev_master_ifindex_by_index(struct net *net, int ifindex)
+{
+	struct net_device *dev;
+	int rc = 0;
+
+	if (likely(ifindex)) {
+		rcu_read_lock();
+
+		dev = dev_get_by_index_rcu(net, ifindex);
+		if (dev)
+			rc = l3mdev_master_ifindex_rcu(dev);
+
+		rcu_read_unlock();
+	}
+
+	return rc;
+}
+
 /* get index of an interface to use for FIB lookups. For devices
  * enslaved to an L3 master device FIB lookups are based on the
  * master index
@@ -169,6 +187,11 @@
 {
 	return 0;
 }
+
+static inline int l3mdev_master_ifindex_by_index(struct net *net, int ifindex)
+{
+	return 0;
+}
 
 static inline int l3mdev_fib_oif_rcu(struct net_device *dev)
 {
diff -ruw linux-4.4.115/include/net/mac80211.h linux-4.4.115-fbx/include/net/mac80211.h
--- linux-4.4.115/include/net/mac80211.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/net/mac80211.h	2019-10-29 09:26:25.513221478 +0100
@@ -1014,6 +1014,14 @@
  * @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC
  *	is stored in the @ampdu_delimiter_crc field)
  * @RX_FLAG_LDPC: LDPC was used
+ * @RX_FLAG_ONLY_MONITOR: Report frame only to monitor interfaces without
+ *	processing it in any regular way.
+ *	This is useful if drivers offload some frames but still want to report
+ *	them for sniffing purposes.
+ * @RX_FLAG_SKIP_MONITOR: Process and report frame to all interfaces except
+ *	monitor interfaces.
+ *	This is useful if drivers offload some frames but still want to report
+ *	them for sniffing purposes.
  * @RX_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3
  * @RX_FLAG_10MHZ: 10 MHz (half channel) was used
  * @RX_FLAG_5MHZ: 5 MHz (quarter channel) was used
@@ -1054,6 +1062,8 @@
 	RX_FLAG_MACTIME_END		= BIT(21),
 	RX_FLAG_VHT			= BIT(22),
 	RX_FLAG_LDPC			= BIT(23),
+	RX_FLAG_ONLY_MONITOR		= BIT(24),
+	RX_FLAG_SKIP_MONITOR		= BIT(25),
 	RX_FLAG_STBC_MASK		= BIT(26) | BIT(27),
 	RX_FLAG_10MHZ			= BIT(28),
 	RX_FLAG_5MHZ			= BIT(29),
@@ -1072,6 +1082,7 @@
  * @RX_VHT_FLAG_160MHZ: 160 MHz was used
  * @RX_VHT_FLAG_BF: packet was beamformed
  */
+
 enum mac80211_rx_vht_flags {
 	RX_VHT_FLAG_80MHZ		= BIT(0),
 	RX_VHT_FLAG_160MHZ		= BIT(1),
@@ -3773,11 +3784,12 @@
  * This function must be called with BHs disabled.
  *
  * @hw: the hardware this frame came in on
+ * @sta: the station the frame was received from, or %NULL
  * @skb: the buffer to receive, owned by mac80211 after this call
  * @napi: the NAPI context
  */
-void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb,
-		       struct napi_struct *napi);
+void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+		       struct sk_buff *skb, struct napi_struct *napi);
 
 /**
  * ieee80211_rx - receive frame
@@ -3801,7 +3813,7 @@
  */
 static inline void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
-	ieee80211_rx_napi(hw, skb, NULL);
+	ieee80211_rx_napi(hw, NULL, skb, NULL);
 }
 
 /**
@@ -5489,4 +5501,19 @@
  */
 struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
 				     struct ieee80211_txq *txq);
+
+/**
+ * ieee80211_txq_get_depth - get pending frame/byte count of given txq
+ *
+ * The values are not guaranteed to be coherent with regard to each other, i.e.
+ * txq state can change half-way of this function and the caller may end up
+ * with "new" frame_cnt and "old" byte_cnt or vice-versa.
+ *
+ * @txq: pointer obtained from station or virtual interface
+ * @frame_cnt: pointer to store frame count
+ * @byte_cnt: pointer to store byte count
+ */
+void ieee80211_txq_get_depth(struct ieee80211_txq *txq,
+			     unsigned long *frame_cnt,
+			     unsigned long *byte_cnt);
 #endif /* MAC80211_H */
diff -ruw linux-4.4.115/include/net/pkt_sched.h linux-4.4.115-fbx/include/net/pkt_sched.h
--- linux-4.4.115/include/net/pkt_sched.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/net/pkt_sched.h	2019-01-22 16:16:28.471291395 +0100
@@ -124,6 +124,8 @@
 	return skb->protocol;
 }
 
+extern int tc_qdisc_flow_control(struct net_device *dev, u32 tcm_handle,
+				  int flow_enable);
 /* Calculate maximal size of packet seen by hard_start_xmit
    routine of this device.
  */
diff -ruw linux-4.4.115/include/net/route.h linux-4.4.115-fbx/include/net/route.h
--- linux-4.4.115/include/net/route.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/net/route.h	2019-10-29 09:26:25.517221517 +0100
@@ -154,7 +154,7 @@
 	flowi4_init_output(fl4, oif, sk ? sk->sk_mark : 0, tos,
 			   RT_SCOPE_UNIVERSE, proto,
 			   sk ? inet_sk_flowi_flags(sk) : 0,
-			   daddr, saddr, dport, sport);
+			   daddr, saddr, dport, sport, sock_net_uid(net, sk));
 	if (sk)
 		security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
 	return ip_route_output_flow(net, fl4, sk);
@@ -267,7 +267,8 @@
 		flow_flags |= FLOWI_FLAG_ANYSRC;
 
 	flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE,
-			   protocol, flow_flags, dst, src, dport, sport);
+			   protocol, flow_flags, dst, src, dport, sport,
+			   sk->sk_uid);
 }
 
 static inline struct rtable *ip_route_connect(struct flowi4 *fl4,
diff -ruw linux-4.4.115/include/net/sock.h linux-4.4.115-fbx/include/net/sock.h
--- linux-4.4.115/include/net/sock.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/net/sock.h	2019-10-29 09:26:25.521221556 +0100
@@ -446,6 +446,7 @@
 	void			*sk_security;
 #endif
 	__u32			sk_mark;
+	kuid_t			sk_uid;
 #ifdef CONFIG_CGROUP_NET_CLASSID
 	u32			sk_classid;
 #endif
@@ -457,6 +458,7 @@
 	int			(*sk_backlog_rcv)(struct sock *sk,
 						  struct sk_buff *skb);
 	void                    (*sk_destruct)(struct sock *sk);
+	struct rcu_head		sk_rcu;
 };
 
 #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
@@ -739,6 +741,7 @@
 		     */
 	SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */
 	SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
+	SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */
 };
 
 #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
@@ -1067,6 +1070,7 @@
 	void			(*destroy_cgroup)(struct mem_cgroup *memcg);
 	struct cg_proto		*(*proto_cgroup)(struct mem_cgroup *memcg);
 #endif
+	int			(*diag_destroy)(struct sock *sk, int err);
 };
 
 int proto_register(struct proto *prot, int alloc_slab);
@@ -1691,6 +1695,7 @@
 	sk->sk_wq = parent->wq;
 	parent->sk = sk;
 	sk_set_socket(sk, parent);
+	sk->sk_uid = SOCK_INODE(parent)->i_uid;
 	security_sock_graft(sk, parent);
 	write_unlock_bh(&sk->sk_callback_lock);
 }
@@ -1698,6 +1703,11 @@
 kuid_t sock_i_uid(struct sock *sk);
 unsigned long sock_i_ino(struct sock *sk);
 
+static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk)
+{
+	return sk ? sk->sk_uid : make_kuid(net->user_ns, 0);
+}
+
 static inline u32 net_tx_rndhash(void)
 {
 	u32 v = prandom_u32();
@@ -2302,4 +2312,15 @@
 extern __u32 sysctl_wmem_default;
 extern __u32 sysctl_rmem_default;
 
+/* SOCKEV Notifier Events */
+#define SOCKEV_SOCKET   0x00
+#define SOCKEV_BIND     0x01
+#define SOCKEV_LISTEN   0x02
+#define SOCKEV_ACCEPT   0x03
+#define SOCKEV_CONNECT  0x04
+#define SOCKEV_SHUTDOWN 0x05
+
+int sockev_register_notify(struct notifier_block *nb);
+int sockev_unregister_notify(struct notifier_block *nb);
+
 #endif	/* _SOCK_H */
diff -ruw linux-4.4.115/include/net/tcp.h linux-4.4.115-fbx/include/net/tcp.h
--- linux-4.4.115/include/net/tcp.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/net/tcp.h	2019-10-29 09:26:25.521221556 +0100
@@ -141,6 +141,9 @@
 						 * most likely due to retrans in 3WHS.
 						 */
 
+/* Number of full MSS to receive before Acking RFC2581 */
+#define TCP_DELACK_SEG          1
+
 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
 					                 * for local resources.
 					                 */
@@ -284,8 +287,14 @@
 extern int sysctl_tcp_invalid_ratelimit;
 extern int sysctl_tcp_pacing_ss_ratio;
 extern int sysctl_tcp_pacing_ca_ratio;
+extern int sysctl_tcp_default_init_rwnd;
 
 extern atomic_long_t tcp_memory_allocated;
+
+/* sysctl variables for controlling various tcp parameters */
+extern int sysctl_tcp_delack_seg;
+extern int sysctl_tcp_use_userconfig;
+
 extern struct percpu_counter tcp_sockets_allocated;
 extern int tcp_memory_pressure;
 
@@ -376,6 +385,12 @@
 			struct pipe_inode_info *pipe, size_t len,
 			unsigned int flags);
 
+/* sysctl master controller */
+extern int tcp_use_userconfig_sysctl_handler(struct ctl_table *, int,
+				void __user *, size_t *, loff_t *);
+extern int tcp_proc_delayed_ack_control(struct ctl_table *, int,
+				void __user *, size_t *, loff_t *);
+
 static inline void tcp_dec_quickack_mode(struct sock *sk,
 					 const unsigned int pkts)
 {
@@ -1171,6 +1186,8 @@
 
 void tcp_done(struct sock *sk);
 
+int tcp_abort(struct sock *sk, int err);
+
 static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
 {
 	rx_opt->dsack = 0;
diff -ruw linux-4.4.115/include/net/udp.h linux-4.4.115-fbx/include/net/udp.h
--- linux-4.4.115/include/net/udp.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/net/udp.h	2019-01-22 16:16:28.479291467 +0100
@@ -238,6 +238,7 @@
 		 int (*saddr_cmp)(const struct sock *,
 				  const struct sock *));
 void udp_err(struct sk_buff *, u32);
+int udp_abort(struct sock *sk, int err);
 int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
 int udp_push_pending_frames(struct sock *sk);
 void udp_flush_pending_frames(struct sock *sk);
diff -ruw linux-4.4.115/include/net/xfrm.h linux-4.4.115-fbx/include/net/xfrm.h
--- linux-4.4.115/include/net/xfrm.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/net/xfrm.h	2019-10-29 09:26:25.525221596 +0100
@@ -159,6 +159,7 @@
 		int		header_len;
 		int		trailer_len;
 		u32		extra_flags;
+		u32		output_mark;
 	} props;
 
 	struct xfrm_lifetime_cfg lft;
@@ -288,10 +289,12 @@
 	struct dst_entry	*(*dst_lookup)(struct net *net,
 					       int tos, int oif,
 					       const xfrm_address_t *saddr,
-					       const xfrm_address_t *daddr);
+					       const xfrm_address_t *daddr,
+					       u32 mark);
 	int			(*get_saddr)(struct net *net, int oif,
 					     xfrm_address_t *saddr,
-					     xfrm_address_t *daddr);
+					     xfrm_address_t *daddr,
+					     u32 mark);
 	void			(*decode_session)(struct sk_buff *skb,
 						  struct flowi *fl,
 						  int reverse);
diff -ruw linux-4.4.115/include/scsi/scsi_device.h linux-4.4.115-fbx/include/scsi/scsi_device.h
--- linux-4.4.115/include/scsi/scsi_device.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/scsi/scsi_device.h	2019-10-29 09:26:25.525221596 +0100
@@ -175,6 +175,10 @@
 	unsigned no_dif:1;	/* T10 PI (DIF) should be disabled */
 	unsigned broken_fua:1;		/* Don't set FUA bit */
 	unsigned lun_in_cdb:1;		/* Store LUN bits in CDB[1] */
+	unsigned use_rpm_auto:1; /* Enable runtime PM auto suspend */
+
+#define SCSI_DEFAULT_AUTOSUSPEND_DELAY  -1
+	int autosuspend_delay;
 
 	atomic_t disk_events_disable_depth; /* disable depth for disk events */
 
diff -ruw linux-4.4.115/include/scsi/scsi.h linux-4.4.115-fbx/include/scsi/scsi.h
--- linux-4.4.115/include/scsi/scsi.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/scsi/scsi.h	2019-01-22 16:16:28.491291576 +0100
@@ -8,6 +8,7 @@
 #include <linux/types.h>
 #include <linux/scatterlist.h>
 #include <linux/kernel.h>
+#include <linux/device.h>
 #include <scsi/scsi_common.h>
 #include <scsi/scsi_proto.h>
 
@@ -306,4 +307,9 @@
 	return (ptr[0]<<24) + (ptr[1]<<16) + (ptr[2]<<8) + ptr[3];
 }
 
+struct scsi_disk *scsi_disk_get_from_dev(struct device *dev);
+
+struct gendisk *scsi_gendisk_get_from_dev(struct device *dev);
+void scsi_gendisk_put(struct device *dev);
+
 #endif /* _SCSI_SCSI_H */
diff -ruw linux-4.4.115/include/scsi/scsi_host.h linux-4.4.115-fbx/include/scsi/scsi_host.h
--- linux-4.4.115/include/scsi/scsi_host.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/scsi/scsi_host.h	2019-01-22 16:16:28.491291576 +0100
@@ -672,6 +672,12 @@
 	unsigned short_inquiry:1;
 
 	/*
+	 * Set "DBD" field in mode_sense caching mode page in case it is
+	 * mandatory by LLD standard.
+	 */
+	unsigned set_dbd_for_caching:1;
+
+	/*
 	 * Optional work queue to be utilized by the transport
 	 */
 	char work_q_name[20];
diff -ruw linux-4.4.115/include/sound/compress_driver.h linux-4.4.115-fbx/include/sound/compress_driver.h
--- linux-4.4.115/include/sound/compress_driver.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/sound/compress_driver.h	2019-10-29 09:26:25.533221674 +0100
@@ -82,6 +82,7 @@
 	bool metadata_set;
 	bool next_track;
 	void *private_data;
+	struct snd_soc_pcm_runtime *be;
 };
 
 /**
@@ -96,6 +97,8 @@
  * @get_params: retrieve the codec parameters, mandatory
  * @set_metadata: Set the metadata values for a stream
  * @get_metadata: retrieves the requested metadata values from stream
+ * @set_next_track_param: send codec specific data of subsequent track
+ * in gapless
  * @trigger: Trigger operations like start, pause, resume, drain, stop.
  * This callback is mandatory
  * @pointer: Retrieve current h/w pointer information. Mandatory
@@ -118,6 +121,8 @@
 			struct snd_compr_metadata *metadata);
 	int (*get_metadata)(struct snd_compr_stream *stream,
 			struct snd_compr_metadata *metadata);
+	int (*set_next_track_param)(struct snd_compr_stream *stream,
+			union snd_codec_options *codec_options);
 	int (*trigger)(struct snd_compr_stream *stream, int cmd);
 	int (*pointer)(struct snd_compr_stream *stream,
 			struct snd_compr_tstamp *tstamp);
@@ -159,6 +164,7 @@
 int snd_compress_deregister(struct snd_compr *device);
 int snd_compress_new(struct snd_card *card, int device,
 			int type, struct snd_compr *compr);
+void snd_compress_free(struct snd_card *card, struct snd_compr *compr);
 
 /* dsp driver callback apis
  * For playback: driver should call snd_compress_fragment_elapsed() to let the
diff -ruw linux-4.4.115/include/sound/core.h linux-4.4.115-fbx/include/sound/core.h
--- linux-4.4.115/include/sound/core.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/sound/core.h	2019-01-22 16:16:28.503291684 +0100
@@ -134,6 +134,9 @@
 	struct device card_dev;		/* cardX object for sysfs */
 	const struct attribute_group *dev_groups[4]; /* assigned sysfs attr */
 	bool registered;		/* card_dev is registered? */
+	int offline;			/* if this sound card is offline */
+	unsigned long offline_change;
+	wait_queue_head_t offline_poll_wait;
 
 #ifdef CONFIG_PM
 	unsigned int power_state;	/* power state */
@@ -265,6 +268,8 @@
 int snd_card_file_add(struct snd_card *card, struct file *file);
 int snd_card_file_remove(struct snd_card *card, struct file *file);
 #define snd_card_unref(card)	put_device(&(card)->card_dev)
+void snd_card_change_online_state(struct snd_card *card, int online);
+bool snd_card_is_online_state(struct snd_card *card);
 
 #define snd_card_set_dev(card, devptr) ((card)->dev = (devptr))
 
diff -ruw linux-4.4.115/include/sound/info.h linux-4.4.115-fbx/include/sound/info.h
--- linux-4.4.115/include/sound/info.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/sound/info.h	2019-01-22 16:16:28.507291721 +0100
@@ -161,7 +161,9 @@
 }
 
 int snd_info_check_reserved_words(const char *str);
-
+struct snd_info_entry *snd_register_module_info(struct module *module,
+						const char *name,
+						struct snd_info_entry *parent);
 #else
 
 #define snd_seq_root NULL
@@ -190,7 +192,9 @@
 					 void *private_data,
 					 void (*read)(struct snd_info_entry *, struct snd_info_buffer *)) {}
 static inline int snd_info_check_reserved_words(const char *str) { return 1; }
-
+static inline struct snd_info_entry *snd_register_module_info(
+				struct module *module, const char *name,
+				struct snd_info_entry *parent) { return NULL; }
 #endif
 
 /*
diff -ruw linux-4.4.115/include/sound/jack.h linux-4.4.115-fbx/include/sound/jack.h
--- linux-4.4.115/include/sound/jack.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/sound/jack.h	2019-01-22 16:16:28.507291721 +0100
@@ -58,14 +58,20 @@
 	SND_JACK_VIDEOOUT	= 0x0010,
 	SND_JACK_AVOUT		= SND_JACK_LINEOUT | SND_JACK_VIDEOOUT,
 	SND_JACK_LINEIN		= 0x0020,
+	SND_JACK_OC_HPHL        = 0x0040,
+	SND_JACK_OC_HPHR        = 0x0080,
+	SND_JACK_UNSUPPORTED    = 0x0100,
+	SND_JACK_MICROPHONE2    = 0x0200,
+	SND_JACK_ANC_HEADPHONE = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
+				 SND_JACK_MICROPHONE2,
 
 	/* Kept separate from switches to facilitate implementation */
-	SND_JACK_BTN_0		= 0x4000,
-	SND_JACK_BTN_1		= 0x2000,
-	SND_JACK_BTN_2		= 0x1000,
-	SND_JACK_BTN_3		= 0x0800,
-	SND_JACK_BTN_4		= 0x0400,
-	SND_JACK_BTN_5		= 0x0200,
+	SND_JACK_BTN_0		= 0x8000,
+	SND_JACK_BTN_1		= 0x4000,
+	SND_JACK_BTN_2		= 0x2000,
+	SND_JACK_BTN_3		= 0x1000,
+	SND_JACK_BTN_4		= 0x0800,
+	SND_JACK_BTN_5		= 0x0400,
 };
 
 /* Keep in sync with definitions above */
diff -ruw linux-4.4.115/include/sound/pcm.h linux-4.4.115-fbx/include/sound/pcm.h
--- linux-4.4.115/include/sound/pcm.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/sound/pcm.h	2019-01-22 16:16:28.507291721 +0100
@@ -68,6 +68,8 @@
 	int (*close)(struct snd_pcm_substream *substream);
 	int (*ioctl)(struct snd_pcm_substream * substream,
 		     unsigned int cmd, void *arg);
+	int (*compat_ioctl)(struct snd_pcm_substream *substream,
+		     unsigned int cmd, void *arg);
 	int (*hw_params)(struct snd_pcm_substream *substream,
 			 struct snd_pcm_hw_params *params);
 	int (*hw_free)(struct snd_pcm_substream *substream);
@@ -78,6 +80,9 @@
 			struct timespec *system_ts, struct timespec *audio_ts,
 			struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
 			struct snd_pcm_audio_tstamp_report *audio_tstamp_report);
+	int (*delay_blk)(struct snd_pcm_substream *substream);
+	int (*wall_clock)(struct snd_pcm_substream *substream,
+			  struct timespec *audio_ts);
 	int (*copy)(struct snd_pcm_substream *substream, int channel,
 		    snd_pcm_uframes_t pos,
 		    void __user *buf, snd_pcm_uframes_t count);
@@ -87,6 +92,7 @@
 			     unsigned long offset);
 	int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
 	int (*ack)(struct snd_pcm_substream *substream);
+	int (*restart)(struct snd_pcm_substream *substream);
 };
 
 /*
@@ -100,7 +106,7 @@
 #endif
 
 #define SNDRV_PCM_IOCTL1_RESET		0
-#define SNDRV_PCM_IOCTL1_INFO		1
+/* 1 is absent slot. */
 #define SNDRV_PCM_IOCTL1_CHANNEL_INFO	2
 #define SNDRV_PCM_IOCTL1_GSTATE		3
 #define SNDRV_PCM_IOCTL1_FIFO_SIZE	4
@@ -115,6 +121,12 @@
 
 #define SNDRV_PCM_POS_XRUN		((snd_pcm_uframes_t)-1)
 
+#define SNDRV_DMA_MODE          (0)
+#define SNDRV_NON_DMA_MODE      (1 << 0)
+#define SNDRV_RENDER_STOPPED    (1 << 1)
+#define SNDRV_RENDER_RUNNING    (1 << 2)
+
+
 /* If you change this don't forget to change rates[] table in pcm_native.c */
 #define SNDRV_PCM_RATE_5512		(1<<0)		/* 5512Hz */
 #define SNDRV_PCM_RATE_8000		(1<<1)		/* 8000Hz */
@@ -129,6 +141,8 @@
 #define SNDRV_PCM_RATE_96000		(1<<10)		/* 96000Hz */
 #define SNDRV_PCM_RATE_176400		(1<<11)		/* 176400Hz */
 #define SNDRV_PCM_RATE_192000		(1<<12)		/* 192000Hz */
+#define SNDRV_PCM_RATE_352800		(1<<13)		/* 352800Hz */
+#define SNDRV_PCM_RATE_384000		(1<<14)		/* 384000Hz */
 
 #define SNDRV_PCM_RATE_CONTINUOUS	(1<<30)		/* continuous range */
 #define SNDRV_PCM_RATE_KNOT		(1<<31)		/* supports more non-continuos rates */
@@ -141,6 +155,9 @@
 					 SNDRV_PCM_RATE_88200|SNDRV_PCM_RATE_96000)
 #define SNDRV_PCM_RATE_8000_192000	(SNDRV_PCM_RATE_8000_96000|SNDRV_PCM_RATE_176400|\
 					 SNDRV_PCM_RATE_192000)
+#define SNDRV_PCM_RATE_8000_384000	(SNDRV_PCM_RATE_8000_192000|\
+					 SNDRV_PCM_RATE_352800|\
+					 SNDRV_PCM_RATE_384000)
 #define _SNDRV_PCM_FMTBIT(fmt)		(1ULL << (__force int)SNDRV_PCM_FORMAT_##fmt)
 #define SNDRV_PCM_FMTBIT_S8		_SNDRV_PCM_FMTBIT(S8)
 #define SNDRV_PCM_FMTBIT_U8		_SNDRV_PCM_FMTBIT(U8)
@@ -368,6 +385,7 @@
 	unsigned int rate_num;
 	unsigned int rate_den;
 	unsigned int no_period_wakeup: 1;
+	unsigned int render_flag;
 
 	/* -- SW params -- */
 	int tstamp_mode;		/* mmap timestamp is updated */
@@ -448,6 +466,7 @@
 	const struct snd_pcm_ops *ops;
 	/* -- runtime information -- */
 	struct snd_pcm_runtime *runtime;
+	spinlock_t runtime_lock;
         /* -- timer section -- */
 	struct snd_timer *timer;		/* timer */
 	unsigned timer_running: 1;	/* time is running */
@@ -482,6 +501,7 @@
 #endif /* CONFIG_SND_VERBOSE_PROCFS */
 	/* misc flags */
 	unsigned int hw_opened: 1;
+	unsigned int hw_no_buffer: 1; /* substream may not have a buffer */
 };
 
 #define SUBSTREAM_BUSY(substream) ((substream)->ref_count > 0)
@@ -507,6 +527,8 @@
 #endif
 #endif
 	struct snd_kcontrol *chmap_kctl; /* channel-mapping controls */
+	struct snd_kcontrol *vol_kctl; /* volume controls */
+	struct snd_kcontrol *usr_kctl; /* user controls */
 	struct device dev;
 };
 
@@ -1399,6 +1421,54 @@
 	return 1ULL << (__force int) pcm_format;
 }
 
+/*
+ * PCM Volume control API
+ */
+/* array element of volume */
+struct snd_pcm_volume_elem {
+	int volume;
+};
+
+/* pp information; retrieved via snd_kcontrol_chip() */
+struct snd_pcm_volume {
+	struct snd_pcm *pcm;	/* assigned PCM instance */
+	int stream;		/* PLAYBACK or CAPTURE */
+	struct snd_kcontrol *kctl;
+	const struct snd_pcm_volume_elem *volume;
+	int max_length;
+	void *private_data;	/* optional: private data pointer */
+};
+
+int snd_pcm_add_volume_ctls(struct snd_pcm *pcm, int stream,
+			   const struct snd_pcm_volume_elem *volume,
+			   int max_length,
+			   unsigned long private_value,
+			   struct snd_pcm_volume **info_ret);
+
+/*
+ * PCM User control API
+ */
+/* array element of usr elem */
+struct snd_pcm_usr_elem {
+	int val[128];
+};
+
+/* pp information; retrieved via snd_kcontrol_chip() */
+struct snd_pcm_usr {
+	struct snd_pcm *pcm;	/* assigned PCM instance */
+	int stream;		/* PLAYBACK or CAPTURE */
+	struct snd_kcontrol *kctl;
+	const struct snd_pcm_usr_elem *usr;
+	int max_length;
+	void *private_data;	/* optional: private data pointer */
+};
+
+int snd_pcm_add_usr_ctls(struct snd_pcm *pcm, int stream,
+			 const struct snd_pcm_usr_elem *usr,
+			 int max_length, int max_control_str_len,
+			 unsigned long private_value,
+			 struct snd_pcm_usr **info_ret);
+
 /* printk helpers */
 #define pcm_err(pcm, fmt, args...) \
 	dev_err((pcm)->card->dev, fmt, ##args)
diff -ruw linux-4.4.115/include/sound/pcm_params.h linux-4.4.115-fbx/include/sound/pcm_params.h
--- linux-4.4.115/include/sound/pcm_params.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/sound/pcm_params.h	2019-10-29 09:26:25.533221674 +0100
@@ -110,10 +110,14 @@
 
 static inline void snd_mask_leave(struct snd_mask *mask, unsigned int val)
 {
-	unsigned int v;
-	v = mask->bits[MASK_OFS(val)] & MASK_BIT(val);
+	unsigned int v, bits_index;
+
+	bits_index = MASK_OFS(val);
+	if (bits_index < ((SNDRV_MASK_MAX+31)/32)) {
+		v = mask->bits[bits_index] & MASK_BIT(val);
 	snd_mask_none(mask);
-	mask->bits[MASK_OFS(val)] = v;
+		mask->bits[bits_index] = v;
+	}
 }
 
 static inline void snd_mask_intersect(struct snd_mask *mask,
diff -ruw linux-4.4.115/include/sound/rawmidi.h linux-4.4.115-fbx/include/sound/rawmidi.h
--- linux-4.4.115/include/sound/rawmidi.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/sound/rawmidi.h	2019-01-22 16:16:28.511291757 +0100
@@ -78,6 +78,7 @@
 	size_t xruns;		/* over/underruns counter */
 	/* misc */
 	spinlock_t lock;
+	struct mutex realloc_mutex;
 	wait_queue_head_t sleep;
 	/* event handler (new bytes, input only) */
 	void (*event)(struct snd_rawmidi_substream *substream);
diff -ruw linux-4.4.115/include/sound/soc-dai.h linux-4.4.115-fbx/include/sound/soc-dai.h
--- linux-4.4.115/include/sound/soc-dai.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/sound/soc-dai.h	2019-01-22 16:16:28.511291757 +0100
@@ -138,6 +138,10 @@
 int snd_soc_dai_digital_mute(struct snd_soc_dai *dai, int mute,
 			     int direction);
 
+int snd_soc_dai_get_channel_map(struct snd_soc_dai *dai,
+	unsigned int *tx_num, unsigned int *tx_slot,
+	unsigned int *rx_num, unsigned int *rx_slot);
+
 int snd_soc_dai_is_dummy(struct snd_soc_dai *dai);
 
 struct snd_soc_dai_ops {
@@ -166,6 +170,9 @@
 		unsigned int tx_num, unsigned int *tx_slot,
 		unsigned int rx_num, unsigned int *rx_slot);
 	int (*set_tristate)(struct snd_soc_dai *dai, int tristate);
+	int (*get_channel_map)(struct snd_soc_dai *dai,
+		unsigned int *tx_num, unsigned int *tx_slot,
+		unsigned int *rx_num, unsigned int *rx_slot);
 
 	/*
 	 * DAI digital mute - optional.
@@ -262,8 +269,8 @@
 	struct snd_soc_dai_driver *driver;
 
 	/* DAI runtime info */
-	unsigned int capture_active:1;		/* stream is in use */
-	unsigned int playback_active:1;		/* stream is in use */
+	unsigned int capture_active;		/* stream is in use */
+	unsigned int playback_active;		/* stream is in use */
 	unsigned int symmetric_rates:1;
 	unsigned int symmetric_channels:1;
 	unsigned int symmetric_samplebits:1;
diff -ruw linux-4.4.115/include/sound/soc-dapm.h linux-4.4.115-fbx/include/sound/soc-dapm.h
--- linux-4.4.115/include/sound/soc-dapm.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/sound/soc-dapm.h	2019-01-22 16:16:28.511291757 +0100
@@ -314,6 +314,11 @@
 	.get = snd_soc_dapm_get_pin_switch, \
 	.put = snd_soc_dapm_put_pin_switch, \
 	.private_value = (unsigned long)xname }
+#define SND_SOC_DAPM_MICBIAS_E(wname, wreg, wshift, winvert, wevent, wflags) \
+{	.id = snd_soc_dapm_micbias, .name = wname, \
+	SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+	.kcontrol_news = NULL, .num_kcontrols = 0, \
+	.event = wevent, .event_flags = wflags}
 
 /* dapm stream operations */
 #define SND_SOC_DAPM_STREAM_NOP			0x0
@@ -451,6 +456,8 @@
 
 struct snd_soc_dapm_context *snd_soc_dapm_kcontrol_dapm(
 	struct snd_kcontrol *kcontrol);
+struct snd_soc_dapm_widget_list *dapm_kcontrol_get_wlist(
+	const struct snd_kcontrol *kcontrol);
 
 struct snd_soc_dapm_widget *snd_soc_dapm_kcontrol_widget(
 		struct snd_kcontrol *kcontrol);
diff -ruw linux-4.4.115/include/sound/soc-dpcm.h linux-4.4.115-fbx/include/sound/soc-dpcm.h
--- linux-4.4.115/include/sound/soc-dpcm.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/sound/soc-dpcm.h	2019-01-22 16:16:28.511291757 +0100
@@ -17,6 +17,7 @@
 
 struct snd_soc_pcm_runtime;
 
+#define DPCM_MAX_BE_USERS   8
 /*
  * Types of runtime_update to perform. e.g. originated from FE PCM ops
  * or audio route changes triggered by muxes/mixers.
@@ -86,6 +87,7 @@
 #ifdef CONFIG_DEBUG_FS
 	struct dentry *debugfs_state;
 #endif
+	int stream;
 };
 
 /*
@@ -148,8 +150,13 @@
 void dpcm_clear_pending_state(struct snd_soc_pcm_runtime *fe, int stream);
 int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream);
 int dpcm_be_dai_hw_params(struct snd_soc_pcm_runtime *fe, int tream);
+int dpcm_fe_dai_hw_params_be(struct snd_soc_pcm_runtime *fe,
+	struct snd_soc_pcm_runtime *be, struct snd_pcm_hw_params *hw_params,
+							    int stream);
 int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream, int cmd);
 int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream);
+int dpcm_fe_dai_prepare_be(struct snd_soc_pcm_runtime *fe,
+		struct snd_soc_pcm_runtime *be, int stream);
 int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir,
 	int event);
 
diff -ruw linux-4.4.115/include/sound/soc.h linux-4.4.115-fbx/include/sound/soc.h
--- linux-4.4.115/include/sound/soc.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/sound/soc.h	2019-01-22 16:16:28.511291757 +0100
@@ -22,6 +22,7 @@
 #include <linux/kernel.h>
 #include <linux/regmap.h>
 #include <linux/log2.h>
+#include <linux/async.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/compress_driver.h>
@@ -224,6 +225,14 @@
 	.get = xhandler_get, .put = xhandler_put, \
 	.private_value = SOC_DOUBLE_R_VALUE(reg_left, reg_right, xshift, \
 					    xmax, xinvert) }
+#define SOC_SINGLE_MULTI_EXT(xname, xreg, xshift, xmax, xinvert, xcount,\
+	xhandler_get, xhandler_put) \
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+	.info = snd_soc_info_multi_ext, \
+	.get = xhandler_get, .put = xhandler_put, \
+	.private_value = (unsigned long)&(struct soc_multi_mixer_control) \
+		{.reg = xreg, .shift = xshift, .rshift = xshift, .max = xmax, \
+		.count = xcount, .platform_max = xmax, .invert = xinvert} }
 #define SOC_SINGLE_EXT_TLV(xname, xreg, xshift, xmax, xinvert,\
 	 xhandler_get, xhandler_put, tlv_array) \
 {	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
@@ -354,6 +363,10 @@
 #define SND_SOC_COMP_ORDER_LATE		1
 #define SND_SOC_COMP_ORDER_LAST		2
 
+/* DAI Link Host Mode Support */
+#define SND_SOC_DAI_LINK_NO_HOST		0x1
+#define SND_SOC_DAI_LINK_OPT_HOST		0x2
+
 /*
  * Bias levels
  *
@@ -545,12 +558,13 @@
 int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned int reg,
 				unsigned int mask, unsigned int value);
 
+void snd_soc_card_change_online_state(struct snd_soc_card *soc_card,
+				      int online);
 #ifdef CONFIG_SND_SOC_AC97_BUS
 struct snd_ac97 *snd_soc_alloc_ac97_codec(struct snd_soc_codec *codec);
 struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec,
 	unsigned int id, unsigned int id_mask);
 void snd_soc_free_ac97_codec(struct snd_ac97 *ac97);
-
 int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops);
 int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops,
 		struct platform_device *pdev);
@@ -636,6 +650,8 @@
 	struct snd_ctl_elem_value *ucontrol);
 int snd_soc_put_strobe(struct snd_kcontrol *kcontrol,
 	struct snd_ctl_elem_value *ucontrol);
+int snd_soc_info_multi_ext(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_info *uinfo);
 
 /**
  * struct snd_soc_jack_pin - Describes a pin to update based on jack detection
@@ -727,6 +743,7 @@
 	unsigned int channels_min;	/* min channels */
 	unsigned int channels_max;	/* max channels */
 	unsigned int sig_bits;		/* number of bits of content */
+	const char *aif_name;		/* DAPM AIF widget name */
 };
 
 /* SoC audio ops */
@@ -925,6 +942,16 @@
 	snd_pcm_sframes_t (*delay)(struct snd_pcm_substream *,
 		struct snd_soc_dai *);
 
+	/*
+	 * For platform-caused delay reporting, where the thread blocks waiting
+	 * for the delay amount to be determined.  Defining this will cause the
+	 * ASoC core to skip calling the delay callbacks for all components in
+	 * the runtime.
+	 * Optional.
+	 */
+	snd_pcm_sframes_t (*delay_blk)(struct snd_pcm_substream *,
+		struct snd_soc_dai *);
+
 	/* platform stream pcm ops */
 	const struct snd_pcm_ops *ops;
 
@@ -949,6 +976,14 @@
 	struct snd_soc_component component;
 };
 
+enum snd_soc_async_ops {
+	ASYNC_DPCM_SND_SOC_OPEN = 1 << 0,
+	ASYNC_DPCM_SND_SOC_CLOSE = 1 << 1,
+	ASYNC_DPCM_SND_SOC_PREPARE = 1 << 2,
+	ASYNC_DPCM_SND_SOC_HW_PARAMS = 1 << 3,
+	ASYNC_DPCM_SND_SOC_FREE = 1 << 4,
+};
+
 struct snd_soc_dai_link {
 	/* config - must be set by machine driver */
 	const char *name;			/* Codec name */
@@ -1028,6 +1063,9 @@
 	/* This DAI link can route to other DAI links at runtime (Frontend)*/
 	unsigned int dynamic:1;
 
+	/* This DAI can support no host IO (no pcm data is copied to from host) */
+	unsigned int no_host_mode:2;
+
 	/* DPCM capture and Playback support */
 	unsigned int dpcm_capture:1;
 	unsigned int dpcm_playback:1;
@@ -1037,6 +1075,9 @@
 
 	/* pmdown_time is ignored at stop */
 	unsigned int ignore_pmdown_time:1;
+
+	/* this value determines what all ops can be started asynchronously */
+	enum snd_soc_async_ops async_ops;
 };
 
 struct snd_soc_codec_conf {
@@ -1079,6 +1120,7 @@
 
 	struct mutex mutex;
 	struct mutex dapm_mutex;
+	struct mutex dapm_power_mutex;
 
 	bool instantiated;
 
@@ -1184,6 +1226,8 @@
 	long pmdown_time;
 	unsigned char pop_wait:1;
 
+	/* err in case of ops failed */
+	int err_ops;
 	/* runtime devices */
 	struct snd_pcm *pcm;
 	struct snd_compr *compr;
@@ -1225,8 +1269,10 @@
 	struct snd_soc_dobj dobj;
 
 	/* used for TLV byte control */
-	int (*get)(unsigned int __user *bytes, unsigned int size);
-	int (*put)(const unsigned int __user *bytes, unsigned int size);
+	int (*get)(struct snd_kcontrol *kcontrol, unsigned int __user *bytes,
+			unsigned int size);
+	int (*put)(struct snd_kcontrol *kcontrol, const unsigned int __user *bytes,
+			unsigned int size);
 };
 
 /* multi register control */
@@ -1235,6 +1281,11 @@
 	unsigned int regbase, regcount, nbits, invert;
 };
 
+struct soc_multi_mixer_control {
+	int min, max, platform_max, count;
+	unsigned int reg, rreg, shift, rshift, invert;
+};
+
 /* enumerated kcontrol */
 struct soc_enum {
 	int reg;
@@ -1420,6 +1471,8 @@
 void snd_soc_component_async_complete(struct snd_soc_component *component);
 int snd_soc_component_test_bits(struct snd_soc_component *component,
 	unsigned int reg, unsigned int mask, unsigned int value);
+struct snd_soc_component *soc_find_component(
+	const struct device_node *of_node, const char *name);
 
 #ifdef CONFIG_REGMAP
 
diff -ruw linux-4.4.115/include/sound/soc-topology.h linux-4.4.115-fbx/include/sound/soc-topology.h
--- linux-4.4.115/include/sound/soc-topology.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/sound/soc-topology.h	2019-01-22 16:16:28.511291757 +0100
@@ -92,8 +92,10 @@
 /* Bytes ext operations, for TLV byte controls */
 struct snd_soc_tplg_bytes_ext_ops {
 	u32 id;
-	int (*get)(unsigned int __user *bytes, unsigned int size);
-	int (*put)(const unsigned int __user *bytes, unsigned int size);
+	int (*get)(struct snd_kcontrol *kcontrol, unsigned int __user *bytes,
+							unsigned int size);
+	int (*put)(struct snd_kcontrol *kcontrol,
+			const unsigned int __user *bytes, unsigned int size);
 };
 
 /*
diff -ruw linux-4.4.115/include/trace/events/cma.h linux-4.4.115-fbx/include/trace/events/cma.h
--- linux-4.4.115/include/trace/events/cma.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/trace/events/cma.h	2019-01-22 16:16:28.519291829 +0100
@@ -7,7 +7,7 @@
 #include <linux/types.h>
 #include <linux/tracepoint.h>
 
-TRACE_EVENT(cma_alloc,
+DECLARE_EVENT_CLASS(cma_alloc_class,
 
 	TP_PROTO(unsigned long pfn, const struct page *page,
 		 unsigned int count, unsigned int align),
@@ -60,6 +60,44 @@
 		  __entry->count)
 );
 
+TRACE_EVENT(cma_alloc_start,
+
+	TP_PROTO(unsigned int count, unsigned int align),
+
+	TP_ARGS(count, align),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, count)
+		__field(unsigned int, align)
+	),
+
+	TP_fast_assign(
+		__entry->count = count;
+		__entry->align = align;
+	),
+
+	TP_printk("count=%u align=%u",
+		  __entry->count,
+		  __entry->align)
+);
+
+DEFINE_EVENT(cma_alloc_class, cma_alloc,
+
+	TP_PROTO(unsigned long pfn, const struct page *page,
+		 unsigned int count, unsigned int align),
+
+	TP_ARGS(pfn, page, count, align)
+);
+
+DEFINE_EVENT(cma_alloc_class, cma_alloc_busy_retry,
+
+	TP_PROTO(unsigned long pfn, const struct page *page,
+		 unsigned int count, unsigned int align),
+
+	TP_ARGS(pfn, page, count, align)
+);
+
+
 #endif /* _TRACE_CMA_H */
 
 /* This part must be outside protection */
diff -ruw linux-4.4.115/include/trace/events/compaction.h linux-4.4.115-fbx/include/trace/events/compaction.h
--- linux-4.4.115/include/trace/events/compaction.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/trace/events/compaction.h	2019-01-22 16:16:28.519291829 +0100
@@ -350,6 +350,61 @@
 );
 #endif
 
+TRACE_EVENT(mm_compaction_kcompactd_sleep,
+
+	TP_PROTO(int nid),
+
+	TP_ARGS(nid),
+
+	TP_STRUCT__entry(
+		__field(int, nid)
+	),
+
+	TP_fast_assign(
+		__entry->nid = nid;
+	),
+
+	TP_printk("nid=%d", __entry->nid)
+);
+
+DECLARE_EVENT_CLASS(kcompactd_wake_template,
+
+	TP_PROTO(int nid, int order, enum zone_type classzone_idx),
+
+	TP_ARGS(nid, order, classzone_idx),
+
+	TP_STRUCT__entry(
+		__field(int, nid)
+		__field(int, order)
+		__field(enum zone_type, classzone_idx)
+	),
+
+	TP_fast_assign(
+		__entry->nid = nid;
+		__entry->order = order;
+		__entry->classzone_idx = classzone_idx;
+	),
+
+	TP_printk("nid=%d order=%d classzone_idx=%-8s",
+		__entry->nid,
+		__entry->order,
+		__print_symbolic(__entry->classzone_idx, ZONE_TYPE))
+);
+
+DEFINE_EVENT(kcompactd_wake_template, mm_compaction_wakeup_kcompactd,
+
+	TP_PROTO(int nid, int order, enum zone_type classzone_idx),
+
+	TP_ARGS(nid, order, classzone_idx)
+);
+
+DEFINE_EVENT(kcompactd_wake_template, mm_compaction_kcompactd_wake,
+
+	TP_PROTO(int nid, int order, enum zone_type classzone_idx),
+
+	TP_ARGS(nid, order, classzone_idx)
+);
+
 #endif /* _TRACE_COMPACTION_H */
 
 /* This part must be outside protection */
diff -ruw linux-4.4.115/include/trace/events/iommu.h linux-4.4.115-fbx/include/trace/events/iommu.h
--- linux-4.4.115/include/trace/events/iommu.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/trace/events/iommu.h	2019-01-22 16:16:28.519291829 +0100
@@ -83,6 +83,29 @@
 	TP_ARGS(dev)
 );
 
+DECLARE_EVENT_CLASS(iommu_map_unmap,
+
+	TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
+
+	TP_ARGS(iova, paddr, size),
+
+	TP_STRUCT__entry(
+		__field(u64, iova)
+		__field(u64, paddr)
+		__field(size_t, size)
+	),
+
+	TP_fast_assign(
+		__entry->iova = iova;
+		__entry->paddr = paddr;
+		__entry->size = size;
+	),
+
+	TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=0x%zx",
+			__entry->iova, __entry->paddr, __entry->size
+	)
+);
+
 TRACE_EVENT(map,
 
 	TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
@@ -129,6 +152,77 @@
 	)
 );
 
+DEFINE_EVENT(iommu_map_unmap, map_start,
+
+	TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
+
+	TP_ARGS(iova, paddr, size)
+);
+
+DEFINE_EVENT(iommu_map_unmap, map_end,
+
+	TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
+
+	TP_ARGS(iova, paddr, size)
+);
+
+DEFINE_EVENT_PRINT(iommu_map_unmap, unmap_start,
+
+	TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
+
+	TP_ARGS(iova, paddr, size),
+
+	TP_printk("IOMMU: iova=0x%016llx size=0x%x",
+			__entry->iova, __entry->size
+	)
+);
+
+DEFINE_EVENT_PRINT(iommu_map_unmap, unmap_end,
+
+	TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
+
+	TP_ARGS(iova, paddr, size),
+
+	TP_printk("IOMMU: iova=0x%016llx size=0x%x",
+			__entry->iova, __entry->size
+	)
+);
+
+DECLARE_EVENT_CLASS(iommu_map_sg,
+
+	TP_PROTO(unsigned long iova, unsigned int nents),
+
+	TP_ARGS(iova, nents),
+
+	TP_STRUCT__entry(
+		__field(u64, iova)
+		__field(int, nents)
+	),
+
+	TP_fast_assign(
+		__entry->iova = iova;
+		__entry->nents = nents;
+	),
+
+	TP_printk("IOMMU: iova=0x%016llx nents=%u",
+			__entry->iova, __entry->nents
+	)
+);
+
+DEFINE_EVENT(iommu_map_sg, map_sg_start,
+
+	TP_PROTO(unsigned long iova, unsigned int nents),
+
+	TP_ARGS(iova, nents)
+);
+
+DEFINE_EVENT(iommu_map_sg, map_sg_end,
+
+	TP_PROTO(unsigned long iova, unsigned int nents),
+
+	TP_ARGS(iova, nents)
+);
+
 DECLARE_EVENT_CLASS(iommu_error,
 
 	TP_PROTO(struct device *dev, unsigned long iova, int flags),
diff -ruw linux-4.4.115/include/trace/events/kmem.h linux-4.4.115-fbx/include/trace/events/kmem.h
--- linux-4.4.115/include/trace/events/kmem.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/trace/events/kmem.h	2019-01-22 16:16:28.523291866 +0100
@@ -351,6 +351,550 @@
 		__entry->change_ownership)
 );
 
+
+DECLARE_EVENT_CLASS(ion_alloc,
+
+	TP_PROTO(const char *client_name,
+		 const char *heap_name,
+		 size_t len,
+		 unsigned int mask,
+		 unsigned int flags),
+
+	TP_ARGS(client_name, heap_name, len, mask, flags),
+
+	TP_STRUCT__entry(
+		__array(char,		client_name, 64)
+		__field(const char *,	heap_name)
+		__field(size_t,		len)
+		__field(unsigned int,	mask)
+		__field(unsigned int,	flags)
+	),
+
+	TP_fast_assign(
+		strlcpy(__entry->client_name, client_name, 64);
+		__entry->heap_name	= heap_name;
+		__entry->len		= len;
+		__entry->mask		= mask;
+		__entry->flags		= flags;
+	),
+
+	TP_printk("client_name=%s heap_name=%s len=%zu mask=0x%x flags=0x%x",
+		__entry->client_name,
+		__entry->heap_name,
+		__entry->len,
+		__entry->mask,
+		__entry->flags)
+);
+
+DEFINE_EVENT(ion_alloc, ion_alloc_buffer_start,
+
+	TP_PROTO(const char *client_name,
+		 const char *heap_name,
+		 size_t len,
+		 unsigned int mask,
+		 unsigned int flags),
+
+	TP_ARGS(client_name, heap_name, len, mask, flags)
+);
+
+DEFINE_EVENT(ion_alloc, ion_alloc_buffer_end,
+
+	TP_PROTO(const char *client_name,
+		 const char *heap_name,
+		 size_t len,
+		 unsigned int mask,
+		 unsigned int flags),
+
+	TP_ARGS(client_name, heap_name, len, mask, flags)
+);
+
+DECLARE_EVENT_CLASS(ion_alloc_error,
+
+	TP_PROTO(const char *client_name,
+		 const char *heap_name,
+		 size_t len,
+		 unsigned int mask,
+		 unsigned int flags,
+		 long error),
+
+	TP_ARGS(client_name, heap_name, len, mask, flags, error),
+
+	TP_STRUCT__entry(
+		__field(const char *,	client_name)
+		__field(const char *,	heap_name)
+		__field(size_t,		len)
+		__field(unsigned int,	mask)
+		__field(unsigned int,	flags)
+		__field(long,		error)
+	),
+
+	TP_fast_assign(
+		__entry->client_name	= client_name;
+		__entry->heap_name	= heap_name;
+		__entry->len		= len;
+		__entry->mask		= mask;
+		__entry->flags		= flags;
+		__entry->error		= error;
+	),
+
+	TP_printk(
+	"client_name=%s heap_name=%s len=%zu mask=0x%x flags=0x%x error=%ld",
+		__entry->client_name,
+		__entry->heap_name,
+		__entry->len,
+		__entry->mask,
+		__entry->flags,
+		__entry->error)
+);
+
+
+DEFINE_EVENT(ion_alloc_error, ion_alloc_buffer_fallback,
+
+	TP_PROTO(const char *client_name,
+		 const char *heap_name,
+		 size_t len,
+		 unsigned int mask,
+		 unsigned int flags,
+		 long error),
+
+	TP_ARGS(client_name, heap_name, len, mask, flags, error)
+);
+
+DEFINE_EVENT(ion_alloc_error, ion_alloc_buffer_fail,
+
+	TP_PROTO(const char *client_name,
+		 const char *heap_name,
+		 size_t len,
+		 unsigned int mask,
+		 unsigned int flags,
+		 long error),
+
+	TP_ARGS(client_name, heap_name, len, mask, flags, error)
+);
+
+
+DECLARE_EVENT_CLASS(alloc_retry,
+
+	TP_PROTO(int tries),
+
+	TP_ARGS(tries),
+
+	TP_STRUCT__entry(
+		__field(int, tries)
+	),
+
+	TP_fast_assign(
+		__entry->tries = tries;
+	),
+
+	TP_printk("tries=%d",
+		__entry->tries)
+);
+
+DEFINE_EVENT(alloc_retry, ion_cp_alloc_retry,
+
+	TP_PROTO(int tries),
+
+	TP_ARGS(tries)
+);
+
+DEFINE_EVENT(alloc_retry, migrate_retry,
+
+	TP_PROTO(int tries),
+
+	TP_ARGS(tries)
+);
+
+DEFINE_EVENT(alloc_retry, dma_alloc_contiguous_retry,
+
+	TP_PROTO(int tries),
+
+	TP_ARGS(tries)
+);
+
+DECLARE_EVENT_CLASS(migrate_pages,
+
+	TP_PROTO(int mode),
+
+	TP_ARGS(mode),
+
+	TP_STRUCT__entry(
+		__field(int, mode)
+	),
+
+	TP_fast_assign(
+		__entry->mode = mode;
+	),
+
+	TP_printk("mode=%d",
+		__entry->mode)
+);
+
+DEFINE_EVENT(migrate_pages, migrate_pages_start,
+
+	TP_PROTO(int mode),
+
+	TP_ARGS(mode)
+);
+
+DEFINE_EVENT(migrate_pages, migrate_pages_end,
+
+	TP_PROTO(int mode),
+
+	TP_ARGS(mode)
+);
+
+DECLARE_EVENT_CLASS(ion_alloc_pages,
+
+	TP_PROTO(gfp_t gfp_flags,
+		unsigned int order),
+
+	TP_ARGS(gfp_flags, order),
+
+	TP_STRUCT__entry(
+		__field(gfp_t, gfp_flags)
+		__field(unsigned int, order)
+		),
+
+	TP_fast_assign(
+		__entry->gfp_flags = gfp_flags;
+		__entry->order = order;
+		),
+
+	TP_printk("gfp_flags=%s order=%d",
+		show_gfp_flags(__entry->gfp_flags),
+		__entry->order)
+	);
+
+DEFINE_EVENT(ion_alloc_pages, alloc_pages_iommu_start,
+	TP_PROTO(gfp_t gfp_flags,
+		unsigned int order),
+
+	TP_ARGS(gfp_flags, order)
+	);
+
+DEFINE_EVENT(ion_alloc_pages, alloc_pages_iommu_end,
+	TP_PROTO(gfp_t gfp_flags,
+		unsigned int order),
+
+	TP_ARGS(gfp_flags, order)
+	);
+
+DEFINE_EVENT(ion_alloc_pages, alloc_pages_iommu_fail,
+	TP_PROTO(gfp_t gfp_flags,
+		unsigned int order),
+
+	TP_ARGS(gfp_flags, order)
+	);
+
+DEFINE_EVENT(ion_alloc_pages, alloc_pages_sys_start,
+	TP_PROTO(gfp_t gfp_flags,
+		unsigned int order),
+
+	TP_ARGS(gfp_flags, order)
+	);
+
+DEFINE_EVENT(ion_alloc_pages, alloc_pages_sys_end,
+	TP_PROTO(gfp_t gfp_flags,
+		unsigned int order),
+
+	TP_ARGS(gfp_flags, order)
+	);
+
+DEFINE_EVENT(ion_alloc_pages, alloc_pages_sys_fail,
+	TP_PROTO(gfp_t gfp_flags,
+		unsigned int order),
+
+	TP_ARGS(gfp_flags, order)
+
+	);
+
+DECLARE_EVENT_CLASS(smmu_map,
+
+	TP_PROTO(unsigned long va,
+		phys_addr_t pa,
+		unsigned long chunk_size,
+		size_t len),
+
+	TP_ARGS(va, pa, chunk_size, len),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, va)
+		__field(phys_addr_t, pa)
+		__field(unsigned long, chunk_size)
+		__field(size_t, len)
+		),
+
+	TP_fast_assign(
+		__entry->va = va;
+		__entry->pa = pa;
+		__entry->chunk_size = chunk_size;
+		__entry->len = len;
+		),
+
+	TP_printk("v_addr=%p p_addr=%pa chunk_size=0x%lu len=%zu",
+		(void *)__entry->va,
+		&__entry->pa,
+		__entry->chunk_size,
+		__entry->len)
+	);
+
+DEFINE_EVENT(smmu_map, iommu_map_range,
+	TP_PROTO(unsigned long va,
+		phys_addr_t pa,
+		unsigned long chunk_size,
+		size_t len),
+
+	TP_ARGS(va, pa, chunk_size, len)
+	);
+
+DECLARE_EVENT_CLASS(ion_secure_cma_add_to_pool,
+
+	TP_PROTO(unsigned long len,
+		 int pool_total,
+		 bool is_prefetch),
+
+	TP_ARGS(len, pool_total, is_prefetch),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, len)
+		__field(int, pool_total)
+		__field(bool, is_prefetch)
+		),
+
+	TP_fast_assign(
+		__entry->len = len;
+		__entry->pool_total = pool_total;
+		__entry->is_prefetch = is_prefetch;
+		),
+
+	TP_printk("len %lx, pool total %x is_prefetch %d",
+		__entry->len,
+		__entry->pool_total,
+		__entry->is_prefetch)
+	);
+
+DEFINE_EVENT(ion_secure_cma_add_to_pool, ion_secure_cma_add_to_pool_start,
+	TP_PROTO(unsigned long len,
+		int pool_total,
+		bool is_prefetch),
+
+	TP_ARGS(len, pool_total, is_prefetch)
+	);
+
+DEFINE_EVENT(ion_secure_cma_add_to_pool, ion_secure_cma_add_to_pool_end,
+	TP_PROTO(unsigned long len,
+		int pool_total,
+		bool is_prefetch),
+
+	TP_ARGS(len, pool_total, is_prefetch)
+	);
+
+DECLARE_EVENT_CLASS(ion_secure_cma_shrink_pool,
+
+	TP_PROTO(unsigned long drained_size,
+		 unsigned long skipped_size),
+
+	TP_ARGS(drained_size, skipped_size),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, drained_size)
+		__field(unsigned long, skipped_size)
+		),
+
+	TP_fast_assign(
+		__entry->drained_size = drained_size;
+		__entry->skipped_size = skipped_size;
+		),
+
+	TP_printk("drained size %lx, skipped size %lx",
+		__entry->drained_size,
+		__entry->skipped_size)
+	);
+
+DEFINE_EVENT(ion_secure_cma_shrink_pool, ion_secure_cma_shrink_pool_start,
+	TP_PROTO(unsigned long drained_size,
+		 unsigned long skipped_size),
+
+	TP_ARGS(drained_size, skipped_size)
+	);
+
+DEFINE_EVENT(ion_secure_cma_shrink_pool, ion_secure_cma_shrink_pool_end,
+	TP_PROTO(unsigned long drained_size,
+		 unsigned long skipped_size),
+
+	TP_ARGS(drained_size, skipped_size)
+	);
+
+TRACE_EVENT(ion_prefetching,
+
+	TP_PROTO(unsigned long len),
+
+	TP_ARGS(len),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, len)
+		),
+
+	TP_fast_assign(
+		__entry->len = len;
+		),
+
+	TP_printk("prefetch size %lx",
+		__entry->len)
+	);
+
+DECLARE_EVENT_CLASS(ion_secure_cma_allocate,
+
+	TP_PROTO(const char *heap_name,
+		unsigned long len,
+		unsigned long align,
+		unsigned long flags),
+
+	TP_ARGS(heap_name, len, align, flags),
+
+	TP_STRUCT__entry(
+		__field(const char *, heap_name)
+		__field(unsigned long, len)
+		__field(unsigned long, align)
+		__field(unsigned long, flags)
+		),
+
+	TP_fast_assign(
+		__entry->heap_name = heap_name;
+		__entry->len = len;
+		__entry->align = align;
+		__entry->flags = flags;
+		),
+
+	TP_printk("heap_name=%s len=%lx align=%lx flags=%lx",
+		__entry->heap_name,
+		__entry->len,
+		__entry->align,
+		__entry->flags)
+	);
+
+DEFINE_EVENT(ion_secure_cma_allocate, ion_secure_cma_allocate_start,
+	TP_PROTO(const char *heap_name,
+		unsigned long len,
+		unsigned long align,
+		unsigned long flags),
+
+	TP_ARGS(heap_name, len, align, flags)
+	);
+
+DEFINE_EVENT(ion_secure_cma_allocate, ion_secure_cma_allocate_end,
+	TP_PROTO(const char *heap_name,
+		unsigned long len,
+		unsigned long align,
+		unsigned long flags),
+
+	TP_ARGS(heap_name, len, align, flags)
+	);
+
+DECLARE_EVENT_CLASS(ion_cp_secure_buffer,
+
+	TP_PROTO(const char *heap_name,
+		unsigned long len,
+		unsigned long align,
+		unsigned long flags),
+
+	TP_ARGS(heap_name, len, align, flags),
+
+	TP_STRUCT__entry(
+		__field(const char *, heap_name)
+		__field(unsigned long, len)
+		__field(unsigned long, align)
+		__field(unsigned long, flags)
+		),
+
+	TP_fast_assign(
+		__entry->heap_name = heap_name;
+		__entry->len = len;
+		__entry->align = align;
+		__entry->flags = flags;
+		),
+
+	TP_printk("heap_name=%s len=%lx align=%lx flags=%lx",
+		__entry->heap_name,
+		__entry->len,
+		__entry->align,
+		__entry->flags)
+	);
+
+DEFINE_EVENT(ion_cp_secure_buffer, ion_cp_secure_buffer_start,
+	TP_PROTO(const char *heap_name,
+		unsigned long len,
+		unsigned long align,
+		unsigned long flags),
+
+	TP_ARGS(heap_name, len, align, flags)
+	);
+
+DEFINE_EVENT(ion_cp_secure_buffer, ion_cp_secure_buffer_end,
+	TP_PROTO(const char *heap_name,
+		unsigned long len,
+		unsigned long align,
+		unsigned long flags),
+
+	TP_ARGS(heap_name, len, align, flags)
+	);
+
+DECLARE_EVENT_CLASS(iommu_sec_ptbl_map_range,
+
+	TP_PROTO(int sec_id,
+		int num,
+		unsigned long va,
+		unsigned int pa,
+		size_t len),
+
+	TP_ARGS(sec_id, num, va, pa, len),
+
+	TP_STRUCT__entry(
+		__field(int, sec_id)
+		__field(int, num)
+		__field(unsigned long, va)
+		__field(unsigned int, pa)
+		__field(size_t, len)
+	),
+
+	TP_fast_assign(
+		__entry->sec_id = sec_id;
+		__entry->num = num;
+		__entry->va = va;
+		__entry->pa = pa;
+		__entry->len = len;
+	),
+
+	TP_printk("sec_id=%d num=%d va=%lx pa=%u len=%zu",
+		__entry->sec_id,
+		__entry->num,
+		__entry->va,
+		__entry->pa,
+		__entry->len)
+	);
+
+DEFINE_EVENT(iommu_sec_ptbl_map_range, iommu_sec_ptbl_map_range_start,
+
+	TP_PROTO(int sec_id,
+		int num,
+		unsigned long va,
+		unsigned int pa,
+		size_t len),
+
+	TP_ARGS(sec_id, num, va, pa, len)
+	);
+
+DEFINE_EVENT(iommu_sec_ptbl_map_range, iommu_sec_ptbl_map_range_end,
+
+	TP_PROTO(int sec_id,
+		int num,
+		unsigned long va,
+		unsigned int pa,
+		size_t len),
+
+	TP_ARGS(sec_id, num, va, pa, len)
+	);
 #endif /* _TRACE_KMEM_H */
 
 /* This part must be outside protection */
diff -ruw linux-4.4.115/include/trace/events/migrate.h linux-4.4.115-fbx/include/trace/events/migrate.h
--- linux-4.4.115/include/trace/events/migrate.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/trace/events/migrate.h	2019-01-22 16:16:28.523291866 +0100
@@ -96,6 +96,27 @@
 		__entry->dst_nid,
 		__entry->nr_pages)
 );
+
+TRACE_EVENT(mm_migrate_pages_start,
+
+	TP_PROTO(enum migrate_mode mode, int reason),
+
+	TP_ARGS(mode, reason),
+
+	TP_STRUCT__entry(
+		__field(enum migrate_mode, mode)
+		__field(int, reason)
+	),
+
+	TP_fast_assign(
+		__entry->mode	= mode;
+		__entry->reason	= reason;
+	),
+
+	TP_printk("mode=%s reason=%s",
+		__print_symbolic(__entry->mode, MIGRATE_MODE),
+		__print_symbolic(__entry->reason, MIGRATE_REASON))
+);
 #endif /* _TRACE_MIGRATE_H */
 
 /* This part must be outside protection */
diff -ruw linux-4.4.115/include/trace/events/net.h linux-4.4.115-fbx/include/trace/events/net.h
--- linux-4.4.115/include/trace/events/net.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/trace/events/net.h	2019-01-22 16:16:28.523291866 +0100
@@ -57,7 +57,7 @@
 		__entry->gso_type = skb_shinfo(skb)->gso_type;
 	),
 
-	TP_printk("dev=%s queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d len=%u data_len=%u network_offset=%d transport_offset_valid=%d transport_offset=%d tx_flags=%d gso_size=%d gso_segs=%d gso_type=%#x",
+	TP_printk("dev=%s queue_mapping=%u skbaddr=%pK vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d len=%u data_len=%u network_offset=%d transport_offset_valid=%d transport_offset=%d tx_flags=%d gso_size=%d gso_segs=%d gso_type=%#x",
 		  __get_str(name), __entry->queue_mapping, __entry->skbaddr,
 		  __entry->vlan_tagged, __entry->vlan_proto, __entry->vlan_tci,
 		  __entry->protocol, __entry->ip_summed, __entry->len,
@@ -90,7 +90,7 @@
 		__assign_str(name, dev->name);
 	),
 
-	TP_printk("dev=%s skbaddr=%p len=%u rc=%d",
+	TP_printk("dev=%s skbaddr=%pK len=%u rc=%d",
 		__get_str(name), __entry->skbaddr, __entry->len, __entry->rc)
 );
 
@@ -112,7 +112,7 @@
 		__assign_str(name, skb->dev->name);
 	),
 
-	TP_printk("dev=%s skbaddr=%p len=%u",
+	TP_printk("dev=%s skbaddr=%pK len=%u",
 		__get_str(name), __entry->skbaddr, __entry->len)
 )
 
@@ -191,7 +191,7 @@
 		__entry->gso_type = skb_shinfo(skb)->gso_type;
 	),
 
-	TP_printk("dev=%s napi_id=%#x queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d hash=0x%08x l4_hash=%d len=%u data_len=%u truesize=%u mac_header_valid=%d mac_header=%d nr_frags=%d gso_size=%d gso_type=%#x",
+	TP_printk("dev=%s napi_id=%#x queue_mapping=%u skbaddr=%pK vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d hash=0x%08x l4_hash=%d len=%u data_len=%u truesize=%u mac_header_valid=%d mac_header=%d nr_frags=%d gso_size=%d gso_type=%#x",
 		  __get_str(name), __entry->napi_id, __entry->queue_mapping,
 		  __entry->skbaddr, __entry->vlan_tagged, __entry->vlan_proto,
 		  __entry->vlan_tci, __entry->protocol, __entry->ip_summed,
diff -ruw linux-4.4.115/include/trace/events/power.h linux-4.4.115-fbx/include/trace/events/power.h
--- linux-4.4.115/include/trace/events/power.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/trace/events/power.h	2019-01-22 16:16:28.523291866 +0100
@@ -120,6 +120,80 @@
 	TP_ARGS(frequency, cpu_id)
 );
 
+TRACE_EVENT(cpu_frequency_limits,
+
+	TP_PROTO(unsigned int max_freq, unsigned int min_freq,
+		unsigned int cpu_id),
+
+	TP_ARGS(max_freq, min_freq, cpu_id),
+
+	TP_STRUCT__entry(
+		__field(	u32,		min_freq	)
+		__field(	u32,		max_freq	)
+		__field(	u32,		cpu_id		)
+	),
+
+	TP_fast_assign(
+		__entry->min_freq = min_freq;
+		__entry->max_freq = max_freq;
+		__entry->cpu_id = cpu_id;
+	),
+
+	TP_printk("min=%lu max=%lu cpu_id=%lu",
+		  (unsigned long)__entry->min_freq,
+		  (unsigned long)__entry->max_freq,
+		  (unsigned long)__entry->cpu_id)
+);
+
+TRACE_EVENT(cpu_frequency_switch_start,
+
+	TP_PROTO(unsigned int start_freq, unsigned int end_freq,
+		 unsigned int cpu_id),
+
+	TP_ARGS(start_freq, end_freq, cpu_id),
+
+	TP_STRUCT__entry(
+		__field(	u32,		start_freq	)
+		__field(	u32,		end_freq	)
+		__field(	u32,		cpu_id		)
+	),
+
+	TP_fast_assign(
+		__entry->start_freq = start_freq;
+		__entry->end_freq = end_freq;
+		__entry->cpu_id = cpu_id;
+	),
+
+	TP_printk("start=%lu end=%lu cpu_id=%lu",
+		  (unsigned long)__entry->start_freq,
+		  (unsigned long)__entry->end_freq,
+		  (unsigned long)__entry->cpu_id)
+);
+
+TRACE_EVENT(cpu_frequency_switch_end,
+
+	TP_PROTO(unsigned int cpu_id),
+
+	TP_ARGS(cpu_id),
+
+	TP_STRUCT__entry(
+		__field(	u32,		cpu_id		)
+	),
+
+	TP_fast_assign(
+		__entry->cpu_id = cpu_id;
+	),
+
+	TP_printk("cpu_id=%lu", (unsigned long)__entry->cpu_id)
+);
+	
+DEFINE_EVENT(cpu, cpu_capacity,
+
+	TP_PROTO(unsigned int capacity, unsigned int cpu_id),
+
+	TP_ARGS(capacity, cpu_id)
+);
+
 TRACE_EVENT(device_pm_callback_start,
 
 	TP_PROTO(struct device *dev, const char *pm_ops, int event),
@@ -230,6 +304,7 @@
  * The clock events are used for clock enable/disable and for
  *  clock rate change
  */
+#if defined(CONFIG_COMMON_CLK_MSM)
 DECLARE_EVENT_CLASS(clock,
 
 	TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
@@ -273,6 +348,62 @@
 	TP_ARGS(name, state, cpu_id)
 );
 
+DEFINE_EVENT(clock, clock_set_rate_complete,
+
+	TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
+
+	TP_ARGS(name, state, cpu_id)
+);
+
+TRACE_EVENT(clock_set_parent,
+
+	TP_PROTO(const char *name, const char *parent_name),
+
+	TP_ARGS(name, parent_name),
+
+	TP_STRUCT__entry(
+		__string(       name,           name            )
+		__string(       parent_name,    parent_name     )
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__assign_str(parent_name, parent_name);
+	),
+
+	TP_printk("%s parent=%s", __get_str(name), __get_str(parent_name))
+);
+
+TRACE_EVENT(clock_state,
+
+	TP_PROTO(const char *name, unsigned long prepare_count,
+		unsigned long count, unsigned long rate,
+		unsigned int vdd_level),
+
+	TP_ARGS(name, prepare_count, count, rate, vdd_level),
+
+	TP_STRUCT__entry(
+		__string(name,			name)
+		__field(unsigned long,		prepare_count)
+		__field(unsigned long,		count)
+		__field(unsigned long,		rate)
+		__field(unsigned int,		vdd_level)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->prepare_count = prepare_count;
+		__entry->count = count;
+		__entry->rate = rate;
+		__entry->vdd_level = vdd_level;
+	),
+
+	TP_printk("%s\tprepare:enable cnt [%lu:%lu]\trate: vdd level [%lu:%u]",
+			__get_str(name), __entry->prepare_count,
+			__entry->count, __entry->rate, __entry->vdd_level)
+);
+#endif /* CONFIG_COMMON_CLK_MSM */
+
 /*
  * The power domain events are used for power domains transitions
  */
@@ -476,6 +607,494 @@
 
 	TP_ARGS(name, type, new_value)
 );
+
+TRACE_EVENT(bw_hwmon_meas,
+
+	TP_PROTO(const char *name, unsigned long mbps,
+		 unsigned long us, int wake),
+
+	TP_ARGS(name, mbps, us, wake),
+
+	TP_STRUCT__entry(
+		__string(	name,			name	)
+		__field(	unsigned long,		mbps	)
+		__field(	unsigned long,		us	)
+		__field(	int,			wake	)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->mbps = mbps;
+		__entry->us = us;
+		__entry->wake = wake;
+	),
+
+	TP_printk("dev: %s, mbps = %lu, us = %lu, wake = %d",
+		__get_str(name),
+		__entry->mbps,
+		__entry->us,
+		__entry->wake)
+);
+
+TRACE_EVENT(bw_hwmon_update,
+
+	TP_PROTO(const char *name, unsigned long mbps, unsigned long freq,
+		 unsigned long up_thres, unsigned long down_thres),
+
+	TP_ARGS(name, mbps, freq, up_thres, down_thres),
+
+	TP_STRUCT__entry(
+		__string(	name,			name		)
+		__field(	unsigned long,		mbps		)
+		__field(	unsigned long,		freq		)
+		__field(	unsigned long,		up_thres	)
+		__field(	unsigned long,		down_thres	)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->mbps = mbps;
+		__entry->freq = freq;
+		__entry->up_thres = up_thres;
+		__entry->down_thres = down_thres;
+	),
+
+	TP_printk("dev: %s, mbps = %lu, freq = %lu, up = %lu, down = %lu",
+		__get_str(name),
+		__entry->mbps,
+		__entry->freq,
+		__entry->up_thres,
+		__entry->down_thres)
+);
+
+TRACE_EVENT(cache_hwmon_meas,
+	TP_PROTO(const char *name, unsigned long high_mrps,
+		 unsigned long med_mrps, unsigned long low_mrps,
+		 unsigned int busy_percent, unsigned int us),
+	TP_ARGS(name, high_mrps, med_mrps, low_mrps, busy_percent, us),
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(unsigned long, high_mrps)
+		__field(unsigned long, med_mrps)
+		__field(unsigned long, low_mrps)
+		__field(unsigned long, total_mrps)
+		__field(unsigned int, busy_percent)
+		__field(unsigned int, us)
+	),
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->high_mrps = high_mrps;
+		__entry->med_mrps = med_mrps;
+		__entry->low_mrps = low_mrps;
+		__entry->total_mrps = high_mrps + med_mrps + low_mrps;
+		__entry->busy_percent = busy_percent;
+		__entry->us = us;
+	),
+	TP_printk("dev=%s H=%lu M=%lu L=%lu T=%lu busy_pct=%u period=%u",
+		  __get_str(name), __entry->high_mrps, __entry->med_mrps,
+		  __entry->low_mrps, __entry->total_mrps,
+		  __entry->busy_percent, __entry->us)
+);
+
+TRACE_EVENT(cache_hwmon_update,
+	TP_PROTO(const char *name, unsigned long freq_mhz),
+	TP_ARGS(name, freq_mhz),
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(unsigned long, freq)
+	),
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->freq = freq_mhz;
+	),
+	TP_printk("dev=%s freq=%lu", __get_str(name), __entry->freq)
+);
+
+TRACE_EVENT(memlat_dev_meas,
+
+	TP_PROTO(const char *name, unsigned int dev_id, unsigned long inst,
+		 unsigned long mem, unsigned long freq, unsigned int ratio),
+
+	TP_ARGS(name, dev_id, inst, mem, freq, ratio),
+
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(unsigned int, dev_id)
+		__field(unsigned long, inst)
+		__field(unsigned long, mem)
+		__field(unsigned long, freq)
+		__field(unsigned int, ratio)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->dev_id = dev_id;
+		__entry->inst = inst;
+		__entry->mem = mem;
+		__entry->freq = freq;
+		__entry->ratio = ratio;
+	),
+
+	TP_printk("dev: %s, id=%u, inst=%lu, mem=%lu, freq=%lu, ratio=%u",
+		__get_str(name),
+		__entry->dev_id,
+		__entry->inst,
+		__entry->mem,
+		__entry->freq,
+		__entry->ratio)
+);
+
+TRACE_EVENT(memlat_dev_update,
+
+	TP_PROTO(const char *name, unsigned int dev_id, unsigned long inst,
+		 unsigned long mem, unsigned long freq, unsigned long vote),
+
+	TP_ARGS(name, dev_id, inst, mem, freq, vote),
+
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(unsigned int, dev_id)
+		__field(unsigned long, inst)
+		__field(unsigned long, mem)
+		__field(unsigned long, freq)
+		__field(unsigned long, vote)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->dev_id = dev_id;
+		__entry->inst = inst;
+		__entry->mem = mem;
+		__entry->freq = freq;
+		__entry->vote = vote;
+	),
+
+	TP_printk("dev: %s, id=%u, inst=%lu, mem=%lu, freq=%lu, vote=%lu",
+		__get_str(name),
+		__entry->dev_id,
+		__entry->inst,
+		__entry->mem,
+		__entry->freq,
+		__entry->vote)
+);
+
+DECLARE_EVENT_CLASS(kpm_module,
+
+	TP_PROTO(unsigned int managed_cpus, unsigned int max_cpus),
+
+	TP_ARGS(managed_cpus, max_cpus),
+
+	TP_STRUCT__entry(
+		__field(u32, managed_cpus)
+		__field(u32, max_cpus)
+	),
+
+	TP_fast_assign(
+		__entry->managed_cpus = managed_cpus;
+		__entry->max_cpus = max_cpus;
+	),
+
+	TP_printk("managed:%x max_cpus=%u", (unsigned int)__entry->managed_cpus,
+					(unsigned int)__entry->max_cpus)
+);
+
+DEFINE_EVENT(kpm_module, set_max_cpus,
+	TP_PROTO(unsigned int managed_cpus, unsigned int max_cpus),
+	TP_ARGS(managed_cpus, max_cpus)
+);
+
+DEFINE_EVENT(kpm_module, reevaluate_hotplug,
+	TP_PROTO(unsigned int managed_cpus, unsigned int max_cpus),
+	TP_ARGS(managed_cpus, max_cpus)
+);
+
+DECLARE_EVENT_CLASS(kpm_module2,
+
+	TP_PROTO(unsigned int cpu, unsigned int enter_cycle_cnt,
+		unsigned int exit_cycle_cnt,
+		unsigned int io_busy, u64 iowait),
+
+	TP_ARGS(cpu, enter_cycle_cnt, exit_cycle_cnt, io_busy, iowait),
+
+	TP_STRUCT__entry(
+		__field(u32, cpu)
+		__field(u32, enter_cycle_cnt)
+		__field(u32, exit_cycle_cnt)
+		__field(u32, io_busy)
+		__field(u64, iowait)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->enter_cycle_cnt = enter_cycle_cnt;
+		__entry->exit_cycle_cnt = exit_cycle_cnt;
+		__entry->io_busy = io_busy;
+		__entry->iowait = iowait;
+	),
+
+	TP_printk("CPU:%u enter_cycles=%u exit_cycles=%u io_busy=%u iowait=%lu",
+		(unsigned int)__entry->cpu,
+		(unsigned int)__entry->enter_cycle_cnt,
+		(unsigned int)__entry->exit_cycle_cnt,
+		(unsigned int)__entry->io_busy,
+		(unsigned long)__entry->iowait)
+);
+
+DEFINE_EVENT(kpm_module2, track_iowait,
+	TP_PROTO(unsigned int cpu, unsigned int enter_cycle_cnt,
+		unsigned int exit_cycle_cnt, unsigned int io_busy, u64 iowait),
+	TP_ARGS(cpu, enter_cycle_cnt, exit_cycle_cnt, io_busy, iowait)
+);
+
+DECLARE_EVENT_CLASS(cpu_modes,
+
+	TP_PROTO(unsigned int cpu, unsigned int max_load,
+		unsigned int single_enter_cycle_cnt,
+		unsigned int single_exit_cycle_cnt,
+		unsigned int total_load, unsigned int multi_enter_cycle_cnt,
+		unsigned int multi_exit_cycle_cnt,
+		unsigned int perf_cl_peak_enter_cycle_cnt,
+		unsigned int perf_cl_peak_exit_cycle_cnt,
+		unsigned int mode,
+		unsigned int cpu_cnt),
+
+	TP_ARGS(cpu, max_load, single_enter_cycle_cnt, single_exit_cycle_cnt,
+		total_load, multi_enter_cycle_cnt, multi_exit_cycle_cnt,
+		perf_cl_peak_enter_cycle_cnt, perf_cl_peak_exit_cycle_cnt, mode,
+		cpu_cnt),
+
+	TP_STRUCT__entry(
+		__field(u32, cpu)
+		__field(u32, max_load)
+		__field(u32, single_enter_cycle_cnt)
+		__field(u32, single_exit_cycle_cnt)
+		__field(u32, total_load)
+		__field(u32, multi_enter_cycle_cnt)
+		__field(u32, multi_exit_cycle_cnt)
+		__field(u32, perf_cl_peak_enter_cycle_cnt)
+		__field(u32, perf_cl_peak_exit_cycle_cnt)
+		__field(u32, mode)
+		__field(u32, cpu_cnt)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->max_load = max_load;
+		__entry->single_enter_cycle_cnt = single_enter_cycle_cnt;
+		__entry->single_exit_cycle_cnt = single_exit_cycle_cnt;
+		__entry->total_load = total_load;
+		__entry->multi_enter_cycle_cnt = multi_enter_cycle_cnt;
+		__entry->multi_exit_cycle_cnt = multi_exit_cycle_cnt;
+		__entry->perf_cl_peak_enter_cycle_cnt =
+				perf_cl_peak_enter_cycle_cnt;
+		__entry->perf_cl_peak_exit_cycle_cnt =
+				perf_cl_peak_exit_cycle_cnt;
+		__entry->mode = mode;
+		__entry->cpu_cnt = cpu_cnt;
+	),
+
+	TP_printk("%u:%4u:%4u:%4u:%4u:%4u:%4u:%4u:%4u:%4u:%u",
+		(unsigned int)__entry->cpu, (unsigned int)__entry->max_load,
+		(unsigned int)__entry->single_enter_cycle_cnt,
+		(unsigned int)__entry->single_exit_cycle_cnt,
+		(unsigned int)__entry->total_load,
+		(unsigned int)__entry->multi_enter_cycle_cnt,
+		(unsigned int)__entry->multi_exit_cycle_cnt,
+		(unsigned int)__entry->perf_cl_peak_enter_cycle_cnt,
+		(unsigned int)__entry->perf_cl_peak_exit_cycle_cnt,
+		(unsigned int)__entry->mode,
+		(unsigned int)__entry->cpu_cnt)
+);
+
+DEFINE_EVENT(cpu_modes, cpu_mode_detect,
+	TP_PROTO(unsigned int cpu, unsigned int max_load,
+		unsigned int single_enter_cycle_cnt,
+		unsigned int single_exit_cycle_cnt,
+		unsigned int total_load, unsigned int multi_enter_cycle_cnt,
+		unsigned int multi_exit_cycle_cnt,
+		unsigned int perf_cl_peak_enter_cycle_cnt,
+		unsigned int perf_cl_peak_exit_cycle_cnt,
+		unsigned int mode,
+		unsigned int cpu_cnt),
+	TP_ARGS(cpu, max_load, single_enter_cycle_cnt, single_exit_cycle_cnt,
+		total_load, multi_enter_cycle_cnt, multi_exit_cycle_cnt,
+		perf_cl_peak_enter_cycle_cnt, perf_cl_peak_exit_cycle_cnt,
+		mode, cpu_cnt)
+);
+
+DECLARE_EVENT_CLASS(timer_status,
+	TP_PROTO(unsigned int cpu, unsigned int single_enter_cycles,
+		unsigned int single_enter_cycle_cnt,
+		unsigned int single_exit_cycles,
+		unsigned int single_exit_cycle_cnt,
+		unsigned int multi_enter_cycles,
+		unsigned int multi_enter_cycle_cnt,
+		unsigned int multi_exit_cycles,
+		unsigned int multi_exit_cycle_cnt, unsigned int timer_rate,
+		unsigned int mode),
+	TP_ARGS(cpu, single_enter_cycles, single_enter_cycle_cnt,
+		single_exit_cycles, single_exit_cycle_cnt, multi_enter_cycles,
+		multi_enter_cycle_cnt, multi_exit_cycles,
+		multi_exit_cycle_cnt, timer_rate, mode),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, cpu)
+		__field(unsigned int, single_enter_cycles)
+		__field(unsigned int, single_enter_cycle_cnt)
+		__field(unsigned int, single_exit_cycles)
+		__field(unsigned int, single_exit_cycle_cnt)
+		__field(unsigned int, multi_enter_cycles)
+		__field(unsigned int, multi_enter_cycle_cnt)
+		__field(unsigned int, multi_exit_cycles)
+		__field(unsigned int, multi_exit_cycle_cnt)
+		__field(unsigned int, timer_rate)
+		__field(unsigned int, mode)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->single_enter_cycles = single_enter_cycles;
+		__entry->single_enter_cycle_cnt = single_enter_cycle_cnt;
+		__entry->single_exit_cycles = single_exit_cycles;
+		__entry->single_exit_cycle_cnt = single_exit_cycle_cnt;
+		__entry->multi_enter_cycles = multi_enter_cycles;
+		__entry->multi_enter_cycle_cnt = multi_enter_cycle_cnt;
+		__entry->multi_exit_cycles = multi_exit_cycles;
+		__entry->multi_exit_cycle_cnt = multi_exit_cycle_cnt;
+		__entry->timer_rate = timer_rate;
+		__entry->mode = mode;
+	),
+
+	TP_printk("%u:%4u:%4u:%4u:%4u:%4u:%4u:%4u:%4u:%4u:%4u",
+		(unsigned int) __entry->cpu,
+		(unsigned int) __entry->single_enter_cycles,
+		(unsigned int) __entry->single_enter_cycle_cnt,
+		(unsigned int) __entry->single_exit_cycles,
+		(unsigned int) __entry->single_exit_cycle_cnt,
+		(unsigned int) __entry->multi_enter_cycles,
+		(unsigned int) __entry->multi_enter_cycle_cnt,
+		(unsigned int) __entry->multi_exit_cycles,
+		(unsigned int) __entry->multi_exit_cycle_cnt,
+		(unsigned int) __entry->timer_rate,
+		(unsigned int) __entry->mode)
+);
+
+DEFINE_EVENT(timer_status, single_mode_timeout,
+	TP_PROTO(unsigned int cpu, unsigned int single_enter_cycles,
+		unsigned int single_enter_cycle_cnt,
+		unsigned int single_exit_cycles,
+		unsigned int single_exit_cycle_cnt,
+		unsigned int multi_enter_cycles,
+		unsigned int multi_enter_cycle_cnt,
+		unsigned int multi_exit_cycles,
+		unsigned int multi_exit_cycle_cnt, unsigned int timer_rate,
+		unsigned int mode),
+	TP_ARGS(cpu, single_enter_cycles, single_enter_cycle_cnt,
+		single_exit_cycles, single_exit_cycle_cnt, multi_enter_cycles,
+		multi_enter_cycle_cnt, multi_exit_cycles, multi_exit_cycle_cnt,
+		timer_rate, mode)
+);
+
+DEFINE_EVENT(timer_status, single_cycle_exit_timer_start,
+	TP_PROTO(unsigned int cpu, unsigned int single_enter_cycles,
+		unsigned int single_enter_cycle_cnt,
+		unsigned int single_exit_cycles,
+		unsigned int single_exit_cycle_cnt,
+		unsigned int multi_enter_cycles,
+		unsigned int multi_enter_cycle_cnt,
+		unsigned int multi_exit_cycles,
+		unsigned int multi_exit_cycle_cnt, unsigned int timer_rate,
+		unsigned int mode),
+	TP_ARGS(cpu, single_enter_cycles, single_enter_cycle_cnt,
+		single_exit_cycles, single_exit_cycle_cnt, multi_enter_cycles,
+		multi_enter_cycle_cnt, multi_exit_cycles, multi_exit_cycle_cnt,
+		timer_rate, mode)
+);
+
+DEFINE_EVENT(timer_status, single_cycle_exit_timer_stop,
+	TP_PROTO(unsigned int cpu, unsigned int single_enter_cycles,
+		unsigned int single_enter_cycle_cnt,
+		unsigned int single_exit_cycles,
+		unsigned int single_exit_cycle_cnt,
+		unsigned int multi_enter_cycles,
+		unsigned int multi_enter_cycle_cnt,
+		unsigned int multi_exit_cycles,
+		unsigned int multi_exit_cycle_cnt, unsigned int timer_rate,
+		unsigned int mode),
+	TP_ARGS(cpu, single_enter_cycles, single_enter_cycle_cnt,
+		single_exit_cycles, single_exit_cycle_cnt, multi_enter_cycles,
+		multi_enter_cycle_cnt, multi_exit_cycles, multi_exit_cycle_cnt,
+		timer_rate, mode)
+);
+
+DECLARE_EVENT_CLASS(perf_cl_peak_timer_status,
+	TP_PROTO(unsigned int cpu, unsigned int perf_cl_peak_enter_cycles,
+		unsigned int perf_cl_peak_enter_cycle_cnt,
+		unsigned int perf_cl_peak_exit_cycles,
+		unsigned int perf_cl_peak_exit_cycle_cnt,
+		unsigned int timer_rate,
+		unsigned int mode),
+	TP_ARGS(cpu, perf_cl_peak_enter_cycles, perf_cl_peak_enter_cycle_cnt,
+		perf_cl_peak_exit_cycles, perf_cl_peak_exit_cycle_cnt,
+		timer_rate, mode),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, cpu)
+		__field(unsigned int, perf_cl_peak_enter_cycles)
+		__field(unsigned int, perf_cl_peak_enter_cycle_cnt)
+		__field(unsigned int, perf_cl_peak_exit_cycles)
+		__field(unsigned int, perf_cl_peak_exit_cycle_cnt)
+		__field(unsigned int, timer_rate)
+		__field(unsigned int, mode)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->perf_cl_peak_enter_cycles = perf_cl_peak_enter_cycles;
+		__entry->perf_cl_peak_enter_cycle_cnt =
+				perf_cl_peak_enter_cycle_cnt;
+		__entry->perf_cl_peak_exit_cycles = perf_cl_peak_exit_cycles;
+		__entry->perf_cl_peak_exit_cycle_cnt =
+				perf_cl_peak_exit_cycle_cnt;
+		__entry->timer_rate = timer_rate;
+		__entry->mode = mode;
+	),
+
+	TP_printk("%u:%4u:%4u:%4u:%4u:%4u:%4u",
+		(unsigned int) __entry->cpu,
+		(unsigned int) __entry->perf_cl_peak_enter_cycles,
+		(unsigned int) __entry->perf_cl_peak_enter_cycle_cnt,
+		(unsigned int) __entry->perf_cl_peak_exit_cycles,
+		(unsigned int) __entry->perf_cl_peak_exit_cycle_cnt,
+		(unsigned int) __entry->timer_rate,
+		(unsigned int) __entry->mode)
+);
+
+DEFINE_EVENT(perf_cl_peak_timer_status, perf_cl_peak_exit_timer_start,
+	TP_PROTO(unsigned int cpu, unsigned int perf_cl_peak_enter_cycles,
+		unsigned int perf_cl_peak_enter_cycle_cnt,
+		unsigned int perf_cl_peak_exit_cycles,
+		unsigned int perf_cl_peak_exit_cycle_cnt,
+		unsigned int timer_rate,
+		unsigned int mode),
+	TP_ARGS(cpu, perf_cl_peak_enter_cycles, perf_cl_peak_enter_cycle_cnt,
+		perf_cl_peak_exit_cycles, perf_cl_peak_exit_cycle_cnt,
+		timer_rate, mode)
+);
+
+
+DEFINE_EVENT(perf_cl_peak_timer_status, perf_cl_peak_exit_timer_stop,
+	TP_PROTO(unsigned int cpu, unsigned int perf_cl_peak_enter_cycles,
+		unsigned int perf_cl_peak_enter_cycle_cnt,
+		unsigned int perf_cl_peak_exit_cycles,
+		unsigned int perf_cl_peak_exit_cycle_cnt,
+		unsigned int timer_rate,
+		unsigned int mode),
+	TP_ARGS(cpu, perf_cl_peak_enter_cycles, perf_cl_peak_enter_cycle_cnt,
+		perf_cl_peak_exit_cycles, perf_cl_peak_exit_cycle_cnt,
+		timer_rate, mode)
+);
+
 #endif /* _TRACE_POWER_H */
 
 /* This part must be outside protection */
diff -ruw linux-4.4.115/include/trace/events/sched.h linux-4.4.115-fbx/include/trace/events/sched.h
--- linux-4.4.115/include/trace/events/sched.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/trace/events/sched.h	2019-10-29 09:26:25.537221713 +0100
@@ -8,6 +8,8 @@
 #include <linux/tracepoint.h>
 #include <linux/binfmts.h>
 
+struct rq;
+
 /*
  * Tracepoint for calling kthread_stop, performed to end a kthread:
  */
@@ -51,6 +53,653 @@
 );
 
 /*
+ * Tracepoint for task enqueue/dequeue:
+ */
+TRACE_EVENT(sched_enq_deq_task,
+
+	TP_PROTO(struct task_struct *p, bool enqueue, unsigned int cpus_allowed),
+
+	TP_ARGS(p, enqueue, cpus_allowed),
+
+	TP_STRUCT__entry(
+		__array(	char,	comm,	TASK_COMM_LEN	)
+		__field(	pid_t,	pid			)
+		__field(	int,	prio			)
+		__field(	int,	cpu			)
+		__field(	bool,	enqueue			)
+		__field(unsigned int,	nr_running		)
+		__field(unsigned long,	cpu_load		)
+		__field(unsigned int,	rt_nr_running		)
+		__field(unsigned int,	cpus_allowed		)
+#ifdef CONFIG_SCHED_HMP
+		__field(unsigned int,	demand			)
+		__field(unsigned int,	pred_demand		)
+#endif
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+		__entry->pid		= p->pid;
+		__entry->prio		= p->prio;
+		__entry->cpu		= task_cpu(p);
+		__entry->enqueue	= enqueue;
+		__entry->nr_running	= task_rq(p)->nr_running;
+		__entry->cpu_load	= task_rq(p)->cpu_load[0];
+		__entry->rt_nr_running	= task_rq(p)->rt.rt_nr_running;
+		__entry->cpus_allowed	= cpus_allowed;
+#ifdef CONFIG_SCHED_HMP
+		__entry->demand		= p->ravg.demand;
+		__entry->pred_demand	= p->ravg.pred_demand;
+#endif
+	),
+
+	TP_printk("cpu=%d %s comm=%s pid=%d prio=%d nr_running=%u cpu_load=%lu rt_nr_running=%u affine=%x"
+#ifdef CONFIG_SCHED_HMP
+		 " demand=%u pred_demand=%u"
+#endif
+			, __entry->cpu,
+			__entry->enqueue ? "enqueue" : "dequeue",
+			__entry->comm, __entry->pid,
+			__entry->prio, __entry->nr_running,
+			__entry->cpu_load, __entry->rt_nr_running, __entry->cpus_allowed
+#ifdef CONFIG_SCHED_HMP
+			, __entry->demand, __entry->pred_demand
+#endif
+			)
+);
+
+#ifdef CONFIG_SCHED_HMP
+
+struct group_cpu_time;
+struct migration_sum_data;
+extern const char *task_event_names[];
+extern const char *migrate_type_names[];
+
+TRACE_EVENT(sched_task_load,
+
+	TP_PROTO(struct task_struct *p, bool boost, int reason,
+		 bool sync, bool need_idle, u32 flags, int best_cpu),
+
+	TP_ARGS(p, boost, reason, sync, need_idle, flags, best_cpu),
+
+	TP_STRUCT__entry(
+		__array(	char,	comm,	TASK_COMM_LEN	)
+		__field(	pid_t,	pid			)
+		__field(unsigned int,	demand			)
+		__field(	bool,	boost			)
+		__field(	int,	reason			)
+		__field(	bool,	sync			)
+		__field(	bool,	need_idle		)
+		__field(	u32,	flags			)
+		__field(	int,	best_cpu		)
+		__field(	u64,	latency			)
+		__field(	int,	grp_id			)
+		__field(	u64,	avg_burst		)
+		__field(	u64,	avg_sleep		)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+		__entry->pid		= p->pid;
+		__entry->demand		= p->ravg.demand;
+		__entry->boost		= boost;
+		__entry->reason		= reason;
+		__entry->sync		= sync;
+		__entry->need_idle	= need_idle;
+		__entry->flags		= flags;
+		__entry->best_cpu	= best_cpu;
+		__entry->latency	= p->state == TASK_WAKING ?
+						      sched_ktime_clock() -
+						      p->ravg.mark_start : 0;
+		__entry->grp_id		= p->grp ? p->grp->id : 0;
+		__entry->avg_burst	= p->ravg.avg_burst;
+		__entry->avg_sleep	= p->ravg.avg_sleep_time;
+	),
+
+	TP_printk("%d (%s): demand=%u boost=%d reason=%d sync=%d need_idle=%d flags=%x grp=%d best_cpu=%d latency=%llu avg_burst=%llu avg_sleep=%llu",
+		__entry->pid, __entry->comm, __entry->demand,
+		__entry->boost, __entry->reason, __entry->sync,
+		__entry->need_idle, __entry->flags, __entry->grp_id,
+		__entry->best_cpu, __entry->latency, __entry->avg_burst,
+		__entry->avg_sleep)
+);
+
+TRACE_EVENT(sched_set_preferred_cluster,
+
+	TP_PROTO(struct related_thread_group *grp, u64 total_demand),
+
+	TP_ARGS(grp, total_demand),
+
+	TP_STRUCT__entry(
+		__field(	int,	id			)
+		__field(	u64,	demand			)
+		__field(	int,	cluster_first_cpu	)
+		__array(	char,	comm,	TASK_COMM_LEN	)
+		__field(	pid_t,	pid			)
+		__field(unsigned int,	task_demand			)
+	),
+
+	TP_fast_assign(
+		__entry->id			= grp->id;
+		__entry->demand			= total_demand;
+		__entry->cluster_first_cpu	= grp->preferred_cluster ?
+							cluster_first_cpu(grp->preferred_cluster)
+							: -1;
+	),
+
+	TP_printk("group_id %d total_demand %llu preferred_cluster_first_cpu %d",
+			__entry->id, __entry->demand,
+			__entry->cluster_first_cpu)
+);
+
+DECLARE_EVENT_CLASS(sched_cpu_load,
+
+	TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
+
+	TP_ARGS(rq, idle, irqload, power_cost, temp),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, cpu			)
+		__field(unsigned int, idle			)
+		__field(unsigned int, nr_running		)
+		__field(unsigned int, nr_big_tasks		)
+		__field(unsigned int, load_scale_factor		)
+		__field(unsigned int, capacity			)
+		__field(	 u64, cumulative_runnable_avg	)
+		__field(	 u64, irqload			)
+		__field(unsigned int, max_freq			)
+		__field(unsigned int, power_cost		)
+		__field(	 int, cstate			)
+		__field(	 int, dstate			)
+		__field(	 int, temp			)
+	),
+
+	TP_fast_assign(
+		__entry->cpu			= rq->cpu;
+		__entry->idle			= idle;
+		__entry->nr_running		= rq->nr_running;
+		__entry->nr_big_tasks		= rq->hmp_stats.nr_big_tasks;
+		__entry->load_scale_factor	= cpu_load_scale_factor(rq->cpu);
+		__entry->capacity		= cpu_capacity(rq->cpu);
+		__entry->cumulative_runnable_avg = rq->hmp_stats.cumulative_runnable_avg;
+		__entry->irqload		= irqload;
+		__entry->max_freq		= cpu_max_freq(rq->cpu);
+		__entry->power_cost		= power_cost;
+		__entry->cstate			= rq->cstate;
+		__entry->dstate			= rq->cluster->dstate;
+		__entry->temp			= temp;
+	),
+
+	TP_printk("cpu %u idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fmax %u power_cost %u cstate %d dstate %d temp %d",
+	__entry->cpu, __entry->idle, __entry->nr_running, __entry->nr_big_tasks,
+	__entry->load_scale_factor, __entry->capacity,
+	__entry->cumulative_runnable_avg, __entry->irqload,
+	__entry->max_freq, __entry->power_cost, __entry->cstate,
+	__entry->dstate, __entry->temp)
+);
+
+DEFINE_EVENT(sched_cpu_load, sched_cpu_load_wakeup,
+	TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
+	TP_ARGS(rq, idle, irqload, power_cost, temp)
+);
+
+DEFINE_EVENT(sched_cpu_load, sched_cpu_load_lb,
+	TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
+	TP_ARGS(rq, idle, irqload, power_cost, temp)
+);
+
+DEFINE_EVENT(sched_cpu_load, sched_cpu_load_cgroup,
+	TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
+	TP_ARGS(rq, idle, irqload, power_cost, temp)
+);
+
+TRACE_EVENT(sched_set_boost,
+
+	TP_PROTO(int type),
+
+	TP_ARGS(type),
+
+	TP_STRUCT__entry(
+		__field(int, type			)
+	),
+
+	TP_fast_assign(
+		__entry->type = type;
+	),
+
+	TP_printk("type %d", __entry->type)
+);
+
+#if defined(CREATE_TRACE_POINTS) && defined(CONFIG_SCHED_HMP)
+static inline void __window_data(u32 *dst, u32 *src)
+{
+	if (src)
+		memcpy(dst, src, nr_cpu_ids * sizeof(u32));
+	else
+		memset(dst, 0, nr_cpu_ids * sizeof(u32));
+}
+
+struct trace_seq;
+const char *__window_print(struct trace_seq *p, const u32 *buf, int buf_len)
+{
+	int i;
+	const char *ret = p->buffer + seq_buf_used(&p->seq);
+
+	for (i = 0; i < buf_len; i++)
+		trace_seq_printf(p, "%u ", buf[i]);
+
+	trace_seq_putc(p, 0);
+
+	return ret;
+}
+
+static inline s64 __rq_update_sum(struct rq *rq, bool curr, bool new)
+{
+	if (curr)
+		if (new)
+			return rq->nt_curr_runnable_sum;
+		else
+			return rq->curr_runnable_sum;
+	else
+		if (new)
+			return rq->nt_prev_runnable_sum;
+		else
+			return rq->prev_runnable_sum;
+}
+
+static inline s64 __grp_update_sum(struct rq *rq, bool curr, bool new)
+{
+	if (curr)
+		if (new)
+			return rq->grp_time.nt_curr_runnable_sum;
+		else
+			return rq->grp_time.curr_runnable_sum;
+	else
+		if (new)
+			return rq->grp_time.nt_prev_runnable_sum;
+		else
+			return rq->grp_time.prev_runnable_sum;
+}
+
+static inline s64
+__get_update_sum(struct rq *rq, enum migrate_types migrate_type,
+		 bool src, bool new, bool curr)
+{
+	switch (migrate_type) {
+	case RQ_TO_GROUP:
+		if (src)
+			return __rq_update_sum(rq, curr, new);
+		else
+			return __grp_update_sum(rq, curr, new);
+	case GROUP_TO_RQ:
+		if (src)
+			return __grp_update_sum(rq, curr, new);
+		else
+			return __rq_update_sum(rq, curr, new);
+	default:
+		WARN_ON_ONCE(1);
+		return -1;
+	}
+}
+#endif
+
+TRACE_EVENT(sched_update_task_ravg,
+
+	TP_PROTO(struct task_struct *p, struct rq *rq, enum task_event evt,
+		 u64 wallclock, u64 irqtime, u64 cycles, u64 exec_time,
+		 struct group_cpu_time *cpu_time),
+
+	TP_ARGS(p, rq, evt, wallclock, irqtime, cycles, exec_time, cpu_time),
+
+	TP_STRUCT__entry(
+		__array(	char,	comm,   TASK_COMM_LEN	)
+		__field(	pid_t,	pid			)
+		__field(	pid_t,	cur_pid			)
+		__field(unsigned int,	cur_freq		)
+		__field(	u64,	wallclock		)
+		__field(	u64,	mark_start		)
+		__field(	u64,	delta_m			)
+		__field(	u64,	win_start		)
+		__field(	u64,	delta			)
+		__field(	u64,	irqtime			)
+		__field(enum task_event,	evt		)
+		__field(unsigned int,	demand			)
+		__field(unsigned int,	sum			)
+		__field(	 int,	cpu			)
+		__field(unsigned int,	pred_demand		)
+		__field(	u64,	rq_cs			)
+		__field(	u64,	rq_ps			)
+		__field(	u64,	grp_cs			)
+		__field(	u64,	grp_ps			)
+		__field(	u64,	grp_nt_cs		)
+		__field(	u64,	grp_nt_ps		)
+		__field(	u32,	curr_window		)
+		__field(	u32,	prev_window		)
+		__dynamic_array(u32,	curr_sum, nr_cpu_ids	)
+		__dynamic_array(u32,	prev_sum, nr_cpu_ids	)
+		__field(	u64,	nt_cs			)
+		__field(	u64,	nt_ps			)
+		__field(	u32,	active_windows		)
+		__field(	u8,	curr_top		)
+		__field(	u8,	prev_top		)
+	),
+
+	TP_fast_assign(
+		__entry->wallclock      = wallclock;
+		__entry->win_start      = rq->window_start;
+		__entry->delta          = (wallclock - rq->window_start);
+		__entry->evt            = evt;
+		__entry->cpu            = rq->cpu;
+		__entry->cur_pid        = rq->curr->pid;
+		__entry->cur_freq       = cpu_cycles_to_freq(cycles, exec_time);
+		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+		__entry->pid            = p->pid;
+		__entry->mark_start     = p->ravg.mark_start;
+		__entry->delta_m        = (wallclock - p->ravg.mark_start);
+		__entry->demand         = p->ravg.demand;
+		__entry->sum            = p->ravg.sum;
+		__entry->irqtime        = irqtime;
+		__entry->pred_demand     = p->ravg.pred_demand;
+		__entry->rq_cs          = rq->curr_runnable_sum;
+		__entry->rq_ps          = rq->prev_runnable_sum;
+		__entry->grp_cs = cpu_time ? cpu_time->curr_runnable_sum : 0;
+		__entry->grp_ps = cpu_time ? cpu_time->prev_runnable_sum : 0;
+		__entry->grp_nt_cs = cpu_time ? cpu_time->nt_curr_runnable_sum : 0;
+		__entry->grp_nt_ps = cpu_time ? cpu_time->nt_prev_runnable_sum : 0;
+		__entry->curr_window	= p->ravg.curr_window;
+		__entry->prev_window	= p->ravg.prev_window;
+		__window_data(__get_dynamic_array(curr_sum), p->ravg.curr_window_cpu);
+		__window_data(__get_dynamic_array(prev_sum), p->ravg.prev_window_cpu);
+		__entry->nt_cs		= rq->nt_curr_runnable_sum;
+		__entry->nt_ps		= rq->nt_prev_runnable_sum;
+		__entry->active_windows	= p->ravg.active_windows;
+		__entry->curr_top	= rq->curr_top;
+		__entry->prev_top	= rq->prev_top;
+	),
+
+	TP_printk("wc %llu ws %llu delta %llu event %s cpu %d cur_freq %u cur_pid %d task %d (%s) ms %llu delta %llu demand %u sum %u irqtime %llu pred_demand %u rq_cs %llu rq_ps %llu cur_window %u (%s) prev_window %u (%s) nt_cs %llu nt_ps %llu active_wins %u grp_cs %lld grp_ps %lld, grp_nt_cs %llu, grp_nt_ps: %llu curr_top %u prev_top %u",
+		__entry->wallclock, __entry->win_start, __entry->delta,
+		task_event_names[__entry->evt], __entry->cpu,
+		__entry->cur_freq, __entry->cur_pid,
+		__entry->pid, __entry->comm, __entry->mark_start,
+		__entry->delta_m, __entry->demand,
+		__entry->sum, __entry->irqtime, __entry->pred_demand,
+		__entry->rq_cs, __entry->rq_ps, __entry->curr_window,
+		__window_print(p, __get_dynamic_array(curr_sum), nr_cpu_ids),
+		__entry->prev_window,
+		__window_print(p, __get_dynamic_array(prev_sum), nr_cpu_ids),
+		__entry->nt_cs, __entry->nt_ps,
+		__entry->active_windows, __entry->grp_cs,
+		__entry->grp_ps, __entry->grp_nt_cs, __entry->grp_nt_ps,
+		__entry->curr_top, __entry->prev_top)
+);
+
+TRACE_EVENT(sched_get_task_cpu_cycles,
+
+	TP_PROTO(int cpu, int event, u64 cycles, u64 exec_time, struct task_struct *p),
+
+	TP_ARGS(cpu, event, cycles, exec_time, p),
+
+	TP_STRUCT__entry(
+		__field(int,		cpu		)
+		__field(int,		event		)
+		__field(u64,		cycles		)
+		__field(u64,		exec_time	)
+		__field(u32,		freq		)
+		__field(u32,		legacy_freq	)
+		__field(u32,		max_freq)
+		__field(pid_t,		pid		)
+		__array(char,	comm,   TASK_COMM_LEN	)
+	),
+
+	TP_fast_assign(
+		__entry->cpu 		= cpu;
+		__entry->event 		= event;
+		__entry->cycles 	= cycles;
+		__entry->exec_time 	= exec_time;
+		__entry->freq		= cpu_cycles_to_freq(cycles, exec_time);
+		__entry->legacy_freq 	= cpu_cur_freq(cpu);
+		__entry->max_freq	= cpu_max_freq(cpu);
+		__entry->pid            = p->pid;
+		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+	),
+
+	TP_printk("cpu=%d event=%d cycles=%llu exec_time=%llu freq=%u legacy_freq=%u max_freq=%u task=%d (%s)",
+		__entry->cpu, __entry->event, __entry->cycles,
+		__entry->exec_time, __entry->freq, __entry->legacy_freq,
+		__entry->max_freq, __entry->pid, __entry->comm)
+);
+
+TRACE_EVENT(sched_update_history,
+
+	TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int samples,
+			enum task_event evt),
+
+	TP_ARGS(rq, p, runtime, samples, evt),
+
+	TP_STRUCT__entry(
+		__array(	char,	comm,   TASK_COMM_LEN	)
+		__field(	pid_t,	pid			)
+		__field(unsigned int,	runtime			)
+		__field(	 int,	samples			)
+		__field(enum task_event,	evt		)
+		__field(unsigned int,	demand			)
+		__field(unsigned int,	pred_demand		)
+		__array(	 u32,	hist, RAVG_HIST_SIZE_MAX)
+		__field(unsigned int,	nr_big_tasks		)
+		__field(	 int,	cpu			)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+		__entry->pid            = p->pid;
+		__entry->runtime        = runtime;
+		__entry->samples        = samples;
+		__entry->evt            = evt;
+		__entry->demand         = p->ravg.demand;
+		__entry->pred_demand     = p->ravg.pred_demand;
+		memcpy(__entry->hist, p->ravg.sum_history,
+					RAVG_HIST_SIZE_MAX * sizeof(u32));
+		__entry->nr_big_tasks   = rq->hmp_stats.nr_big_tasks;
+		__entry->cpu            = rq->cpu;
+	),
+
+	TP_printk("%d (%s): runtime %u samples %d event %s demand %u pred_demand %u"
+		" (hist: %u %u %u %u %u) cpu %d nr_big %u",
+		__entry->pid, __entry->comm,
+		__entry->runtime, __entry->samples,
+		task_event_names[__entry->evt],
+		__entry->demand, __entry->pred_demand,
+		__entry->hist[0], __entry->hist[1],
+		__entry->hist[2], __entry->hist[3],
+		__entry->hist[4], __entry->cpu, __entry->nr_big_tasks)
+);
+
+TRACE_EVENT(sched_reset_all_window_stats,
+
+	TP_PROTO(u64 window_start, u64 window_size, u64 time_taken,
+		int reason, unsigned int old_val, unsigned int new_val),
+
+	TP_ARGS(window_start, window_size, time_taken,
+		reason, old_val, new_val),
+
+	TP_STRUCT__entry(
+		__field(	u64,	window_start		)
+		__field(	u64,	window_size		)
+		__field(	u64,	time_taken		)
+		__field(	int,	reason			)
+		__field(unsigned int,	old_val			)
+		__field(unsigned int,	new_val			)
+	),
+
+	TP_fast_assign(
+		__entry->window_start = window_start;
+		__entry->window_size = window_size;
+		__entry->time_taken = time_taken;
+		__entry->reason	= reason;
+		__entry->old_val = old_val;
+		__entry->new_val = new_val;
+	),
+
+	TP_printk("time_taken %llu window_start %llu window_size %llu reason %s old_val %u new_val %u",
+		  __entry->time_taken, __entry->window_start,
+		  __entry->window_size,
+		  sched_window_reset_reasons[__entry->reason],
+		  __entry->old_val, __entry->new_val)
+);
+
+TRACE_EVENT(sched_update_pred_demand,
+
+	TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int pct,
+		 unsigned int pred_demand),
+
+	TP_ARGS(rq, p, runtime, pct, pred_demand),
+
+	TP_STRUCT__entry(
+		__array(	char,	comm,   TASK_COMM_LEN	)
+		__field(       pid_t,	pid			)
+		__field(unsigned int,	runtime			)
+		__field(	 int,	pct			)
+		__field(unsigned int,	pred_demand		)
+		__array(	  u8,	bucket, NUM_BUSY_BUCKETS)
+		__field(	 int,	cpu			)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+		__entry->pid            = p->pid;
+		__entry->runtime        = runtime;
+		__entry->pct            = pct;
+		__entry->pred_demand     = pred_demand;
+		memcpy(__entry->bucket, p->ravg.busy_buckets,
+					NUM_BUSY_BUCKETS * sizeof(u8));
+		__entry->cpu            = rq->cpu;
+	),
+
+	TP_printk("%d (%s): runtime %u pct %d cpu %d pred_demand %u (buckets: %u %u %u %u %u %u %u %u %u %u)",
+		__entry->pid, __entry->comm,
+		__entry->runtime, __entry->pct, __entry->cpu,
+		__entry->pred_demand, __entry->bucket[0], __entry->bucket[1],
+		__entry->bucket[2], __entry->bucket[3],__entry->bucket[4],
+		__entry->bucket[5], __entry->bucket[6], __entry->bucket[7],
+		__entry->bucket[8], __entry->bucket[9])
+);
+
+TRACE_EVENT(sched_migration_update_sum,
+
+	TP_PROTO(struct task_struct *p, enum migrate_types migrate_type, struct rq *rq),
+
+	TP_ARGS(p, migrate_type, rq),
+
+	TP_STRUCT__entry(
+		__field(int,		tcpu			)
+		__field(int,		pid			)
+		__field(enum migrate_types,	migrate_type	)
+		__field(	s64,	src_cs			)
+		__field(	s64,	src_ps			)
+		__field(	s64,	dst_cs			)
+		__field(	s64,	dst_ps			)
+		__field(	s64,	src_nt_cs		)
+		__field(	s64,	src_nt_ps		)
+		__field(	s64,	dst_nt_cs		)
+		__field(	s64,	dst_nt_ps		)
+	),
+
+	TP_fast_assign(
+		__entry->tcpu		= task_cpu(p);
+		__entry->pid		= p->pid;
+		__entry->migrate_type	= migrate_type;
+		__entry->src_cs		= __get_update_sum(rq, migrate_type,
+							   true, false, true);
+		__entry->src_ps		= __get_update_sum(rq, migrate_type,
+							   true, false, false);
+		__entry->dst_cs		= __get_update_sum(rq, migrate_type,
+							   false, false, true);
+		__entry->dst_ps		= __get_update_sum(rq, migrate_type,
+							   false, false, false);
+		__entry->src_nt_cs	= __get_update_sum(rq, migrate_type,
+							   true, true, true);
+		__entry->src_nt_ps	= __get_update_sum(rq, migrate_type,
+							   true, true, false);
+		__entry->dst_nt_cs	= __get_update_sum(rq, migrate_type,
+							   false, true, true);
+		__entry->dst_nt_ps	= __get_update_sum(rq, migrate_type,
+							   false, true, false);
+	),
+
+	TP_printk("pid %d task_cpu %d migrate_type %s src_cs %llu src_ps %llu dst_cs %lld dst_ps %lld src_nt_cs %llu src_nt_ps %llu dst_nt_cs %lld dst_nt_ps %lld",
+		__entry->pid, __entry->tcpu, migrate_type_names[__entry->migrate_type],
+		__entry->src_cs, __entry->src_ps, __entry->dst_cs, __entry->dst_ps,
+		__entry->src_nt_cs, __entry->src_nt_ps, __entry->dst_nt_cs, __entry->dst_nt_ps)
+);
+
+TRACE_EVENT(sched_get_busy,
+
+	TP_PROTO(int cpu, u64 load, u64 nload, u64 pload, int early, bool aggregated),
+
+	TP_ARGS(cpu, load, nload, pload, early, aggregated),
+
+	TP_STRUCT__entry(
+		__field(	int,	cpu			)
+		__field(	u64,	load			)
+		__field(	u64,	nload			)
+		__field(	u64,	pload			)
+		__field(	int,	early			)
+		__field(	bool,	aggregated		)
+	),
+
+	TP_fast_assign(
+		__entry->cpu		= cpu;
+		__entry->load		= load;
+		__entry->nload		= nload;
+		__entry->pload		= pload;
+		__entry->early		= early;
+		__entry->aggregated	= aggregated;
+	),
+
+	TP_printk("cpu %d load %lld new_task_load %lld predicted_load %lld early %d aggregated %d",
+		__entry->cpu, __entry->load, __entry->nload,
+		__entry->pload, __entry->early, __entry->aggregated)
+);
+
+TRACE_EVENT(sched_freq_alert,
+
+	TP_PROTO(int cpu, int pd_notif, int check_groups, struct rq *rq,
+		u64 new_load),
+
+	TP_ARGS(cpu, pd_notif, check_groups, rq, new_load),
+
+	TP_STRUCT__entry(
+		__field(	int,	cpu			)
+		__field(	int,	pd_notif		)
+		__field(	int,	check_groups		)
+		__field(	u64,	old_busy_time		)
+		__field(	u64,	ps			)
+		__field(	u64,	new_load		)
+		__field(	u64,	old_pred		)
+		__field(	u64,	new_pred		)
+	),
+
+	TP_fast_assign(
+		__entry->cpu		= cpu;
+		__entry->pd_notif	= pd_notif;
+		__entry->check_groups	= check_groups;
+		__entry->old_busy_time	= rq->old_busy_time;
+		__entry->ps		= rq->prev_runnable_sum;
+		__entry->new_load	= new_load;
+		__entry->old_pred	= rq->old_estimated_time;
+		__entry->new_pred	= rq->hmp_stats.pred_demands_sum;
+	),
+
+	TP_printk("cpu %d pd_notif=%d check_groups %d old_busy_time=%llu prev_sum=%lld new_load=%llu old_pred=%llu new_pred=%llu",
+		__entry->cpu, __entry->pd_notif, __entry->check_groups,
+		__entry->old_busy_time, __entry->ps, __entry->new_load,
+		__entry->old_pred, __entry->new_pred)
+);
+
+#endif	/* CONFIG_SCHED_HMP */
+
+/*
  * Tracepoint for waking up a task:
  */
 DECLARE_EVENT_CLASS(sched_wakeup_template,
@@ -166,14 +815,16 @@
  */
 TRACE_EVENT(sched_migrate_task,
 
-	TP_PROTO(struct task_struct *p, int dest_cpu),
+	TP_PROTO(struct task_struct *p, int dest_cpu,
+		 unsigned int load),
 
-	TP_ARGS(p, dest_cpu),
+	TP_ARGS(p, dest_cpu, load),
 
 	TP_STRUCT__entry(
 		__array(	char,	comm,	TASK_COMM_LEN	)
 		__field(	pid_t,	pid			)
 		__field(	int,	prio			)
+		__field(unsigned int,	load			)
 		__field(	int,	orig_cpu		)
 		__field(	int,	dest_cpu		)
 	),
@@ -182,15 +833,91 @@
 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
 		__entry->pid		= p->pid;
 		__entry->prio		= p->prio;
+		__entry->load		= load;
 		__entry->orig_cpu	= task_cpu(p);
 		__entry->dest_cpu	= dest_cpu;
 	),
 
-	TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
-		  __entry->comm, __entry->pid, __entry->prio,
+	TP_printk("comm=%s pid=%d prio=%d load=%d orig_cpu=%d dest_cpu=%d",
+		  __entry->comm, __entry->pid, __entry->prio,  __entry->load,
 		  __entry->orig_cpu, __entry->dest_cpu)
 );
 
+/*
+ * Tracepoint for a CPU going offline/online:
+ */
+TRACE_EVENT(sched_cpu_hotplug,
+
+	TP_PROTO(int affected_cpu, int error, int status),
+
+	TP_ARGS(affected_cpu, error, status),
+
+	TP_STRUCT__entry(
+		__field(	int,	affected_cpu		)
+		__field(	int,	error			)
+		__field(	int,	status			)
+	),
+
+	TP_fast_assign(
+		__entry->affected_cpu	= affected_cpu;
+		__entry->error		= error;
+		__entry->status		= status;
+	),
+
+	TP_printk("cpu %d %s error=%d", __entry->affected_cpu,
+		__entry->status ? "online" : "offline", __entry->error)
+);
+
+/*
+ * Tracepoint for load balancing:
+ */
+#if NR_CPUS > 32
+#error "Unsupported NR_CPUS for lb tracepoint."
+#endif
+TRACE_EVENT(sched_load_balance,
+
+	TP_PROTO(int cpu, enum cpu_idle_type idle, int balance,
+		 unsigned long group_mask, int busiest_nr_running,
+		 unsigned long imbalance, unsigned int env_flags, int ld_moved,
+		 unsigned int balance_interval),
+
+	TP_ARGS(cpu, idle, balance, group_mask, busiest_nr_running,
+		imbalance, env_flags, ld_moved, balance_interval),
+
+	TP_STRUCT__entry(
+		__field(	int,			cpu)
+		__field(	enum cpu_idle_type,	idle)
+		__field(	int,			balance)
+		__field(	unsigned long,		group_mask)
+		__field(	int,			busiest_nr_running)
+		__field(	unsigned long,		imbalance)
+		__field(	unsigned int,		env_flags)
+		__field(	int,			ld_moved)
+		__field(	unsigned int,		balance_interval)
+	),
+
+	TP_fast_assign(
+		__entry->cpu			= cpu;
+		__entry->idle			= idle;
+		__entry->balance		= balance;
+		__entry->group_mask		= group_mask;
+		__entry->busiest_nr_running	= busiest_nr_running;
+		__entry->imbalance		= imbalance;
+		__entry->env_flags		= env_flags;
+		__entry->ld_moved		= ld_moved;
+		__entry->balance_interval	= balance_interval;
+	),
+
+	TP_printk("cpu=%d state=%s balance=%d group=%#lx busy_nr=%d imbalance=%ld flags=%#x ld_moved=%d bal_int=%d",
+		  __entry->cpu,
+		  __entry->idle == CPU_IDLE ? "idle" :
+		  (__entry->idle == CPU_NEWLY_IDLE ? "newly_idle" : "busy"),
+		  __entry->balance,
+		  __entry->group_mask, __entry->busiest_nr_running,
+		  __entry->imbalance, __entry->env_flags, __entry->ld_moved,
+		  __entry->balance_interval)
+);
+
 DECLARE_EVENT_CLASS(sched_process_template,
 
 	TP_PROTO(struct task_struct *p),
@@ -374,6 +1101,30 @@
 	     TP_ARGS(tsk, delay));
 
 /*
+ * Tracepoint for recording the cause of uninterruptible sleep.
+ */
+TRACE_EVENT(sched_blocked_reason,
+
+	TP_PROTO(struct task_struct *tsk),
+
+	TP_ARGS(tsk),
+
+	TP_STRUCT__entry(
+		__field( pid_t,	pid	)
+		__field( void*, caller	)
+		__field( bool, io_wait	)
+	),
+
+	TP_fast_assign(
+		__entry->pid	= tsk->pid;
+		__entry->caller = (void*)get_wchan(tsk);
+		__entry->io_wait = tsk->in_iowait;
+	),
+
+	TP_printk("pid=%d iowait=%d caller=%pS", __entry->pid, __entry->io_wait, __entry->caller)
+);
+
+/*
  * Tracepoint for accounting runtime (time the task is executing
  * on a CPU).
  */
@@ -562,6 +1313,698 @@
 
 	TP_printk("cpu=%d", __entry->cpu)
 );
+
+TRACE_EVENT(sched_get_nr_running_avg,
+
+	TP_PROTO(int avg, int big_avg, int iowait_avg,
+		 unsigned int max_nr, unsigned int big_max_nr),
+
+	TP_ARGS(avg, big_avg, iowait_avg, max_nr, big_max_nr),
+
+	TP_STRUCT__entry(
+		__field( int,	avg			)
+		__field( int,	big_avg			)
+		__field( int,	iowait_avg		)
+		__field( unsigned int,	max_nr		)
+		__field( unsigned int,	big_max_nr	)
+	),
+
+	TP_fast_assign(
+		__entry->avg		= avg;
+		__entry->big_avg	= big_avg;
+		__entry->iowait_avg	= iowait_avg;
+		__entry->max_nr		= max_nr;
+		__entry->big_max_nr	= big_max_nr;
+	),
+
+	TP_printk("avg=%d big_avg=%d iowait_avg=%d max_nr=%u big_max_nr=%u",
+		__entry->avg, __entry->big_avg, __entry->iowait_avg,
+		__entry->max_nr, __entry->big_max_nr)
+);
+
+TRACE_EVENT(core_ctl_eval_need,
+
+	TP_PROTO(unsigned int cpu, unsigned int old_need,
+		 unsigned int new_need, unsigned int updated),
+	TP_ARGS(cpu, old_need, new_need, updated),
+	TP_STRUCT__entry(
+		__field(u32, cpu)
+		__field(u32, old_need)
+		__field(u32, new_need)
+		__field(u32, updated)
+	),
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->old_need = old_need;
+		__entry->new_need = new_need;
+		__entry->updated = updated;
+	),
+	TP_printk("cpu=%u, old_need=%u, new_need=%u, updated=%u", __entry->cpu,
+		  __entry->old_need, __entry->new_need, __entry->updated)
+);
+
+TRACE_EVENT(core_ctl_set_busy,
+
+	TP_PROTO(unsigned int cpu, unsigned int busy,
+		 unsigned int old_is_busy, unsigned int is_busy),
+	TP_ARGS(cpu, busy, old_is_busy, is_busy),
+	TP_STRUCT__entry(
+		__field(u32, cpu)
+		__field(u32, busy)
+		__field(u32, old_is_busy)
+		__field(u32, is_busy)
+	),
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->busy = busy;
+		__entry->old_is_busy = old_is_busy;
+		__entry->is_busy = is_busy;
+	),
+	TP_printk("cpu=%u, busy=%u, old_is_busy=%u, new_is_busy=%u",
+		  __entry->cpu, __entry->busy, __entry->old_is_busy,
+		  __entry->is_busy)
+);
+
+TRACE_EVENT(core_ctl_set_boost,
+
+	TP_PROTO(u32 refcount, s32 ret),
+	TP_ARGS(refcount, ret),
+	TP_STRUCT__entry(
+		__field(u32, refcount)
+		__field(s32, ret)
+	),
+	TP_fast_assign(
+		__entry->refcount = refcount;
+		__entry->ret = ret;
+	),
+	TP_printk("refcount=%u, ret=%d", __entry->refcount, __entry->ret)
+);
+
+/**
+ * sched_isolate - called when cores are isolated/unisolated
+ *
+ * @acutal_mask: mask of cores actually isolated/unisolated
+ * @req_mask: mask of cores requested isolated/unisolated
+ * @online_mask: cpu online mask
+ * @time: amount of time in us it took to isolate/unisolate
+ * @isolate: 1 if isolating, 0 if unisolating
+ *
+ */
+TRACE_EVENT(sched_isolate,
+
+	TP_PROTO(unsigned int requested_cpu, unsigned int isolated_cpus,
+		 u64 start_time, unsigned char isolate),
+
+	TP_ARGS(requested_cpu, isolated_cpus, start_time, isolate),
+
+	TP_STRUCT__entry(
+		__field(u32, requested_cpu)
+		__field(u32, isolated_cpus)
+		__field(u32, time)
+		__field(unsigned char, isolate)
+	),
+
+	TP_fast_assign(
+		__entry->requested_cpu = requested_cpu;
+		__entry->isolated_cpus = isolated_cpus;
+		__entry->time = div64_u64(sched_clock() - start_time, 1000);
+		__entry->isolate = isolate;
+	),
+
+	TP_printk("iso cpu=%u cpus=0x%x time=%u us isolated=%d",
+		  __entry->requested_cpu, __entry->isolated_cpus,
+		  __entry->time, __entry->isolate)
+);
+
+TRACE_EVENT(sched_contrib_scale_f,
+
+	TP_PROTO(int cpu, unsigned long freq_scale_factor,
+		 unsigned long cpu_scale_factor),
+
+	TP_ARGS(cpu, freq_scale_factor, cpu_scale_factor),
+
+	TP_STRUCT__entry(
+		__field(int, cpu)
+		__field(unsigned long, freq_scale_factor)
+		__field(unsigned long, cpu_scale_factor)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->freq_scale_factor = freq_scale_factor;
+		__entry->cpu_scale_factor = cpu_scale_factor;
+	),
+
+	TP_printk("cpu=%d freq_scale_factor=%lu cpu_scale_factor=%lu",
+		  __entry->cpu, __entry->freq_scale_factor,
+		  __entry->cpu_scale_factor)
+);
+
+#ifdef CONFIG_SMP
+
+#ifdef CONFIG_SCHED_WALT
+extern unsigned int sysctl_sched_use_walt_cpu_util;
+extern unsigned int sysctl_sched_use_walt_task_util;
+extern unsigned int walt_ravg_window;
+extern bool walt_disabled;
+#endif
+
+/*
+ * Tracepoint for accounting sched averages for tasks.
+ */
+TRACE_EVENT(sched_load_avg_task,
+
+	TP_PROTO(struct task_struct *tsk, struct sched_avg *avg, void *_ravg),
+
+	TP_ARGS(tsk, avg, _ravg),
+
+	TP_STRUCT__entry(
+		__array( char,	comm,	TASK_COMM_LEN		)
+		__field( pid_t,	pid				)
+		__field( int,	cpu				)
+		__field( unsigned long,	load_avg		)
+		__field( unsigned long,	util_avg		)
+		__field( unsigned long,	util_avg_pelt	)
+		__field( unsigned long,	util_avg_walt	)
+		__field( u64,		load_sum		)
+		__field( u32,		util_sum		)
+		__field( u32,		period_contrib		)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+		__entry->pid			= tsk->pid;
+		__entry->cpu			= task_cpu(tsk);
+		__entry->load_avg		= avg->load_avg;
+		__entry->util_avg		= avg->util_avg;
+		__entry->load_sum		= avg->load_sum;
+		__entry->util_sum		= avg->util_sum;
+		__entry->period_contrib		= avg->period_contrib;
+		__entry->util_avg_pelt  = avg->util_avg;
+		__entry->util_avg_walt  = 0;
+#ifdef CONFIG_SCHED_WALT
+		__entry->util_avg_walt = (((unsigned long)((struct ravg*)_ravg)->demand) << SCHED_LOAD_SHIFT);
+		do_div(__entry->util_avg_walt, walt_ravg_window);
+		if (!walt_disabled && sysctl_sched_use_walt_task_util)
+			__entry->util_avg = __entry->util_avg_walt;
+#endif
+	),
+	TP_printk("comm=%s pid=%d cpu=%d load_avg=%lu util_avg=%lu "
+			"util_avg_pelt=%lu util_avg_walt=%lu load_sum=%llu"
+		  " util_sum=%u period_contrib=%u",
+		  __entry->comm,
+		  __entry->pid,
+		  __entry->cpu,
+		  __entry->load_avg,
+		  __entry->util_avg,
+		  __entry->util_avg_pelt,
+		  __entry->util_avg_walt,
+		  (u64)__entry->load_sum,
+		  (u32)__entry->util_sum,
+		  (u32)__entry->period_contrib)
+);
+
+/*
+ * Tracepoint for accounting sched averages for cpus.
+ */
+TRACE_EVENT(sched_load_avg_cpu,
+
+	TP_PROTO(int cpu, struct cfs_rq *cfs_rq),
+
+	TP_ARGS(cpu, cfs_rq),
+
+	TP_STRUCT__entry(
+		__field( int,	cpu				)
+		__field( unsigned long,	load_avg		)
+		__field( unsigned long,	util_avg		)
+		__field( unsigned long,	util_avg_pelt	)
+		__field( unsigned long,	util_avg_walt	)
+	),
+
+	TP_fast_assign(
+		__entry->cpu			= cpu;
+		__entry->load_avg		= cfs_rq->avg.load_avg;
+		__entry->util_avg		= cfs_rq->avg.util_avg;
+		__entry->util_avg_pelt	= cfs_rq->avg.util_avg;
+		__entry->util_avg_walt	= 0;
+#ifdef CONFIG_SCHED_WALT
+		__entry->util_avg_walt =
+				div64_u64(cpu_rq(cpu)->cumulative_runnable_avg,
+						  walt_ravg_window >> SCHED_LOAD_SHIFT);
+		if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
+			__entry->util_avg		= __entry->util_avg_walt;
+#endif
+	),
+
+	TP_printk("cpu=%d load_avg=%lu util_avg=%lu "
+			  "util_avg_pelt=%lu util_avg_walt=%lu",
+		  __entry->cpu, __entry->load_avg, __entry->util_avg,
+		  __entry->util_avg_pelt, __entry->util_avg_walt)
+);
+
+/*
+ * Tracepoint for sched_tune_config settings
+ */
+TRACE_EVENT(sched_tune_config,
+
+	TP_PROTO(int boost),
+
+	TP_ARGS(boost),
+
+	TP_STRUCT__entry(
+		__field( int,	boost		)
+	),
+
+	TP_fast_assign(
+		__entry->boost 	= boost;
+	),
+
+	TP_printk("boost=%d ", __entry->boost)
+);
+
+/*
+ * Tracepoint for accounting CPU  boosted utilization
+ */
+TRACE_EVENT(sched_boost_cpu,
+
+	TP_PROTO(int cpu, unsigned long util, long margin),
+
+	TP_ARGS(cpu, util, margin),
+
+	TP_STRUCT__entry(
+		__field( int,		cpu			)
+		__field( unsigned long,	util			)
+		__field(long,		margin			)
+	),
+
+	TP_fast_assign(
+		__entry->cpu	= cpu;
+		__entry->util	= util;
+		__entry->margin	= margin;
+	),
+
+	TP_printk("cpu=%d util=%lu margin=%ld",
+		  __entry->cpu,
+		  __entry->util,
+		  __entry->margin)
+);
+
+/*
+ * Tracepoint for schedtune_tasks_update
+ */
+TRACE_EVENT(sched_tune_tasks_update,
+
+	TP_PROTO(struct task_struct *tsk, int cpu, int tasks, int idx,
+		int boost, int max_boost),
+
+	TP_ARGS(tsk, cpu, tasks, idx, boost, max_boost),
+
+	TP_STRUCT__entry(
+		__array( char,	comm,	TASK_COMM_LEN	)
+		__field( pid_t,		pid		)
+		__field( int,		cpu		)
+		__field( int,		tasks		)
+		__field( int,		idx		)
+		__field( int,		boost		)
+		__field( int,		max_boost	)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+		__entry->pid		= tsk->pid;
+		__entry->cpu 		= cpu;
+		__entry->tasks		= tasks;
+		__entry->idx 		= idx;
+		__entry->boost		= boost;
+		__entry->max_boost	= max_boost;
+	),
+
+	TP_printk("pid=%d comm=%s "
+			"cpu=%d tasks=%d idx=%d boost=%d max_boost=%d",
+		__entry->pid, __entry->comm,
+		__entry->cpu, __entry->tasks, __entry->idx,
+		__entry->boost, __entry->max_boost)
+);
+
+/*
+ * Tracepoint for schedtune_boostgroup_update
+ */
+TRACE_EVENT(sched_tune_boostgroup_update,
+
+	TP_PROTO(int cpu, int variation, int max_boost),
+
+	TP_ARGS(cpu, variation, max_boost),
+
+	TP_STRUCT__entry(
+		__field( int,	cpu		)
+		__field( int,	variation	)
+		__field( int,	max_boost	)
+	),
+
+	TP_fast_assign(
+		__entry->cpu		= cpu;
+		__entry->variation	= variation;
+		__entry->max_boost	= max_boost;
+	),
+
+	TP_printk("cpu=%d variation=%d max_boost=%d",
+		__entry->cpu, __entry->variation, __entry->max_boost)
+);
+
+/*
+ * Tracepoint for accounting task boosted utilization
+ */
+TRACE_EVENT(sched_boost_task,
+
+	TP_PROTO(struct task_struct *tsk, unsigned long util, long margin),
+
+	TP_ARGS(tsk, util, margin),
+
+	TP_STRUCT__entry(
+		__array( char,	comm,	TASK_COMM_LEN		)
+		__field( pid_t,		pid			)
+		__field( unsigned long,	util			)
+		__field( long,		margin			)
+
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+		__entry->pid	= tsk->pid;
+		__entry->util	= util;
+		__entry->margin	= margin;
+	),
+
+	TP_printk("comm=%s pid=%d util=%lu margin=%ld",
+		  __entry->comm, __entry->pid,
+		  __entry->util,
+		  __entry->margin)
+);
+
+/*
+ * Tracepoint for find_best_target
+ */
+TRACE_EVENT(sched_find_best_target,
+
+	TP_PROTO(struct task_struct *tsk, bool prefer_idle,
+		unsigned long min_util, int start_cpu,
+		int best_idle, int best_active, int target),
+
+	TP_ARGS(tsk, prefer_idle, min_util, start_cpu,
+		best_idle, best_active, target),
+
+	TP_STRUCT__entry(
+		__array( char,	comm,	TASK_COMM_LEN	)
+		__field( pid_t,	pid			)
+		__field( unsigned long,	min_util	)
+		__field( bool,	prefer_idle		)
+		__field( int,	start_cpu		)
+		__field( int,	best_idle		)
+		__field( int,	best_active		)
+		__field( int,	target			)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+		__entry->pid		= tsk->pid;
+		__entry->min_util	= min_util;
+		__entry->prefer_idle	= prefer_idle;
+		__entry->start_cpu 	= start_cpu;
+		__entry->best_idle	= best_idle;
+		__entry->best_active	= best_active;
+		__entry->target		= target;
+	),
+
+	TP_printk("pid=%d comm=%s prefer_idle=%d start_cpu=%d "
+		  "best_idle=%d best_active=%d target=%d",
+		__entry->pid, __entry->comm,
+		__entry->prefer_idle, __entry->start_cpu,
+		__entry->best_idle, __entry->best_active,
+		__entry->target)
+);
+
+/*
+ * Tracepoint for accounting sched group energy
+ */
+TRACE_EVENT(sched_energy_diff,
+
+	TP_PROTO(struct task_struct *tsk, int scpu, int dcpu, int udelta,
+		int nrgb, int nrga, int nrgd, int capb, int capa, int capd,
+		int nrgn, int nrgp),
+
+	TP_ARGS(tsk, scpu, dcpu, udelta,
+		nrgb, nrga, nrgd, capb, capa, capd,
+		nrgn, nrgp),
+
+	TP_STRUCT__entry(
+		__array( char,	comm,	TASK_COMM_LEN	)
+		__field( pid_t,	pid	)
+		__field( int,	scpu	)
+		__field( int,	dcpu	)
+		__field( int,	udelta	)
+		__field( int,	nrgb	)
+		__field( int,	nrga	)
+		__field( int,	nrgd	)
+		__field( int,	capb	)
+		__field( int,	capa	)
+		__field( int,	capd	)
+		__field( int,	nrgn	)
+		__field( int,	nrgp	)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+		__entry->pid		= tsk->pid;
+		__entry->scpu 		= scpu;
+		__entry->dcpu 		= dcpu;
+		__entry->udelta 	= udelta;
+		__entry->nrgb 		= nrgb;
+		__entry->nrga 		= nrga;
+		__entry->nrgd 		= nrgd;
+		__entry->capb 		= capb;
+		__entry->capa 		= capa;
+		__entry->capd 		= capd;
+		__entry->nrgn 		= nrgn;
+		__entry->nrgp 		= nrgp;
+	),
+
+	TP_printk("pid=%d comm=%s "
+			"src_cpu=%d dst_cpu=%d usage_delta=%d "
+			"nrg_before=%d nrg_after=%d nrg_diff=%d "
+			"cap_before=%d cap_after=%d cap_delta=%d "
+			"nrg_delta=%d nrg_payoff=%d",
+		__entry->pid, __entry->comm,
+		__entry->scpu, __entry->dcpu, __entry->udelta,
+		__entry->nrgb, __entry->nrga, __entry->nrgd,
+		__entry->capb, __entry->capa, __entry->capd,
+		__entry->nrgn, __entry->nrgp)
+);
+
+/*
+ * Tracepoint for schedtune_tasks_update
+ */
+TRACE_EVENT(sched_tune_filter,
+
+	TP_PROTO(int nrg_delta, int cap_delta,
+		 int nrg_gain,  int cap_gain,
+		 int payoff, int region),
+
+	TP_ARGS(nrg_delta, cap_delta, nrg_gain, cap_gain, payoff, region),
+
+	TP_STRUCT__entry(
+		__field( int,	nrg_delta	)
+		__field( int,	cap_delta	)
+		__field( int,	nrg_gain	)
+		__field( int,	cap_gain	)
+		__field( int,	payoff		)
+		__field( int,	region		)
+	),
+
+	TP_fast_assign(
+		__entry->nrg_delta	= nrg_delta;
+		__entry->cap_delta	= cap_delta;
+		__entry->nrg_gain	= nrg_gain;
+		__entry->cap_gain	= cap_gain;
+		__entry->payoff		= payoff;
+		__entry->region		= region;
+	),
+
+	TP_printk("nrg_delta=%d cap_delta=%d nrg_gain=%d cap_gain=%d payoff=%d region=%d",
+		__entry->nrg_delta, __entry->cap_delta,
+		__entry->nrg_gain, __entry->cap_gain,
+		__entry->payoff, __entry->region)
+);
+
+/*
+ * Tracepoint for system overutilized flag
+ */
+TRACE_EVENT(sched_overutilized,
+
+	TP_PROTO(bool overutilized),
+
+	TP_ARGS(overutilized),
+
+	TP_STRUCT__entry(
+		__field( bool,	overutilized	)
+	),
+
+	TP_fast_assign(
+		__entry->overutilized	= overutilized;
+	),
+
+	TP_printk("overutilized=%d",
+		__entry->overutilized ? 1 : 0)
+);
+#ifdef CONFIG_SCHED_WALT
+struct rq;
+
+TRACE_EVENT(walt_update_task_ravg,
+
+	TP_PROTO(struct task_struct *p, struct rq *rq, int evt,
+						u64 wallclock, u64 irqtime),
+
+	TP_ARGS(p, rq, evt, wallclock, irqtime),
+
+	TP_STRUCT__entry(
+		__array(	char,	comm,   TASK_COMM_LEN	)
+		__field(	pid_t,	pid			)
+		__field(	pid_t,	cur_pid			)
+		__field(	u64,	wallclock		)
+		__field(	u64,	mark_start		)
+		__field(	u64,	delta_m			)
+		__field(	u64,	win_start		)
+		__field(	u64,	delta			)
+		__field(	u64,	irqtime			)
+		__field(        int,    evt			)
+		__field(unsigned int,	demand			)
+		__field(unsigned int,	sum			)
+		__field(	 int,	cpu			)
+		__field(	u64,	cs			)
+		__field(	u64,	ps			)
+		__field(unsigned long,	util			)
+		__field(	u32,	curr_window		)
+		__field(	u32,	prev_window		)
+		__field(	u64,	nt_cs			)
+		__field(	u64,	nt_ps			)
+		__field(	u32,	active_windows		)
+	),
+
+	TP_fast_assign(
+		__entry->wallclock      = wallclock;
+		__entry->win_start      = rq->window_start;
+		__entry->delta          = (wallclock - rq->window_start);
+		__entry->evt            = evt;
+		__entry->cpu            = rq->cpu;
+		__entry->cur_pid        = rq->curr->pid;
+		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+		__entry->pid            = p->pid;
+		__entry->mark_start     = p->ravg.mark_start;
+		__entry->delta_m        = (wallclock - p->ravg.mark_start);
+		__entry->demand         = p->ravg.demand;
+		__entry->sum            = p->ravg.sum;
+		__entry->irqtime        = irqtime;
+		__entry->cs             = rq->curr_runnable_sum;
+		__entry->ps             = rq->prev_runnable_sum;
+		__entry->util           = rq->prev_runnable_sum << SCHED_LOAD_SHIFT;
+		do_div(__entry->util, walt_ravg_window);
+		__entry->curr_window	= p->ravg.curr_window;
+		__entry->prev_window	= p->ravg.prev_window;
+		__entry->nt_cs		= rq->nt_curr_runnable_sum;
+		__entry->nt_ps		= rq->nt_prev_runnable_sum;
+		__entry->active_windows	= p->ravg.active_windows;
+	),
+
+	TP_printk("wc %llu ws %llu delta %llu event %d cpu %d cur_pid %d task %d (%s) ms %llu delta %llu demand %u sum %u irqtime %llu"
+		" cs %llu ps %llu util %lu cur_window %u prev_window %u active_wins %u"
+		, __entry->wallclock, __entry->win_start, __entry->delta,
+		__entry->evt, __entry->cpu, __entry->cur_pid,
+		__entry->pid, __entry->comm, __entry->mark_start,
+		__entry->delta_m, __entry->demand,
+		__entry->sum, __entry->irqtime,
+		__entry->cs, __entry->ps, __entry->util,
+		__entry->curr_window, __entry->prev_window,
+		  __entry->active_windows
+		)
+);
+
+TRACE_EVENT(walt_update_history,
+
+	TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int samples,
+			int evt),
+
+	TP_ARGS(rq, p, runtime, samples, evt),
+
+	TP_STRUCT__entry(
+		__array(	char,	comm,   TASK_COMM_LEN	)
+		__field(	pid_t,	pid			)
+		__field(unsigned int,	runtime			)
+		__field(	 int,	samples			)
+		__field(	 int,	evt			)
+		__field(	 u64,	demand			)
+		__field(	 u64,	walt_avg		)
+		__field(unsigned int,	pelt_avg		)
+		__array(	 u32,	hist, RAVG_HIST_SIZE_MAX)
+		__field(	 int,	cpu			)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+		__entry->pid            = p->pid;
+		__entry->runtime        = runtime;
+		__entry->samples        = samples;
+		__entry->evt            = evt;
+		__entry->demand         = p->ravg.demand;
+		__entry->walt_avg = (__entry->demand << 10) / walt_ravg_window,
+		__entry->pelt_avg	= p->se.avg.util_avg;
+		memcpy(__entry->hist, p->ravg.sum_history,
+					RAVG_HIST_SIZE_MAX * sizeof(u32));
+		__entry->cpu            = rq->cpu;
+	),
+
+	TP_printk("%d (%s): runtime %u samples %d event %d demand %llu"
+		" walt %llu pelt %u (hist: %u %u %u %u %u) cpu %d",
+		__entry->pid, __entry->comm,
+		__entry->runtime, __entry->samples, __entry->evt,
+		__entry->demand,
+		__entry->walt_avg,
+		__entry->pelt_avg,
+		__entry->hist[0], __entry->hist[1],
+		__entry->hist[2], __entry->hist[3],
+		__entry->hist[4], __entry->cpu)
+);
+
+TRACE_EVENT(walt_migration_update_sum,
+
+	TP_PROTO(struct rq *rq, struct task_struct *p),
+
+	TP_ARGS(rq, p),
+
+	TP_STRUCT__entry(
+		__field(int,		cpu			)
+		__field(int,		pid			)
+		__field(	u64,	cs			)
+		__field(	u64,	ps			)
+		__field(	s64,	nt_cs			)
+		__field(	s64,	nt_ps			)
+	),
+
+	TP_fast_assign(
+		__entry->cpu		= cpu_of(rq);
+		__entry->cs		= rq->curr_runnable_sum;
+		__entry->ps		= rq->prev_runnable_sum;
+		__entry->nt_cs		= (s64)rq->nt_curr_runnable_sum;
+		__entry->nt_ps		= (s64)rq->nt_prev_runnable_sum;
+		__entry->pid		= p->pid;
+	),
+
+	TP_printk("cpu %d: cs %llu ps %llu nt_cs %lld nt_ps %lld pid %d",
+		  __entry->cpu, __entry->cs, __entry->ps,
+		  __entry->nt_cs, __entry->nt_ps, __entry->pid)
+);
+#endif /* CONFIG_SCHED_WALT */
+
+#endif /* CONFIG_SMP */
+
 #endif /* _TRACE_SCHED_H */
 
 /* This part must be outside protection */
diff -ruw linux-4.4.115/include/trace/events/skb.h linux-4.4.115-fbx/include/trace/events/skb.h
--- linux-4.4.115/include/trace/events/skb.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/trace/events/skb.h	2019-01-22 16:16:28.523291866 +0100
@@ -50,6 +50,33 @@
 	TP_printk("skbaddr=%p", __entry->skbaddr)
 );
 
+TRACE_EVENT(print_skb_gso,
+
+	TP_PROTO(struct sk_buff *skb, __be16 src, __be16 dest),
+
+	TP_ARGS(skb, src, dest),
+
+	TP_STRUCT__entry(
+		__field(void *,	skbaddr)
+		__field(int   ,	len)
+		__field(int   ,	data_len)
+		__field(__be16, src)
+		__field(__be16, dest)
+	),
+
+	TP_fast_assign(
+		__entry->skbaddr = skb;
+		__entry->len = skb->len;
+		__entry->data_len = skb->data_len;
+		__entry->src = src;
+		__entry->dest = dest;
+	),
+
+	TP_printk("GSO: skbaddr=%pK, len=%d, data_len=%d, src=%u, dest=%u",
+		__entry->skbaddr, __entry->len, __entry->data_len,
+		be16_to_cpu(__entry->src), be16_to_cpu(__entry->dest))
+);
+
 TRACE_EVENT(skb_copy_datagram_iovec,
 
 	TP_PROTO(const struct sk_buff *skb, int len),
diff -ruw linux-4.4.115/include/trace/trace_events.h linux-4.4.115-fbx/include/trace/trace_events.h
--- linux-4.4.115/include/trace/trace_events.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/trace/trace_events.h	2019-01-22 16:16:28.527291902 +0100
@@ -682,7 +682,8 @@
 									\
 	{ assign; }							\
 									\
-	trace_event_buffer_commit(&fbuffer);				\
+	trace_event_buffer_commit(&fbuffer,				\
+				  sizeof(*entry) + __data_size);	\
 }
 /*
  * The ftrace_test_probe is compiled out, it is only here as a build time check
diff -ruw linux-4.4.115/include/uapi/asm-generic/ioctls.h linux-4.4.115-fbx/include/uapi/asm-generic/ioctls.h
--- linux-4.4.115/include/uapi/asm-generic/ioctls.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/asm-generic/ioctls.h	2019-01-22 16:16:28.527291902 +0100
@@ -77,6 +77,9 @@
 #define TIOCGPKT	_IOR('T', 0x38, int) /* Get packet mode state */
 #define TIOCGPTLCK	_IOR('T', 0x39, int) /* Get Pty lock state */
 #define TIOCGEXCL	_IOR('T', 0x40, int) /* Get exclusive mode state */
+#define TIOCPMGET	0x5441	/* PM get */
+#define TIOCPMPUT	0x5442	/* PM put */
+#define TIOCPMACT	0x5443	/* PM is active */
 
 #define FIONCLEX	0x5450
 #define FIOCLEX		0x5451
diff -ruw linux-4.4.115/include/uapi/drm/drm_fourcc.h linux-4.4.115-fbx/include/uapi/drm/drm_fourcc.h
--- linux-4.4.115/include/uapi/drm/drm_fourcc.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/drm/drm_fourcc.h	2019-01-22 16:16:28.531291938 +0100
@@ -26,6 +26,10 @@
 
 #include <linux/types.h>
 
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
 #define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
 				 ((__u32)(c) << 16) | ((__u32)(d) << 24))
 
@@ -230,4 +234,39 @@
  */
 #define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE	fourcc_mod_code(SAMSUNG, 1)
 
+/*
+ * Qualcomm Compressed Format
+ *
+ * Refers to a compressed variant of the base format that is compressed.
+ * Implementation may be platform and base-format specific.
+ */
+#define DRM_FORMAT_MOD_QCOM_COMPRESSED	fourcc_mod_code(QCOM, 1)
+
+/*
+ * QTI DX Format
+ *
+ * Refers to a DX variant of the base format.
+ * Implementation may be platform and base-format specific.
+ */
+#define DRM_FORMAT_MOD_QCOM_DX	fourcc_mod_code(QCOM, 0x2)
+
+/*
+ * QTI Tight Format
+ *
+ * Refers to a tightly packed variant of the base format.
+ * Implementation may be platform and base-format specific.
+ */
+#define DRM_FORMAT_MOD_QCOM_TIGHT	fourcc_mod_code(QCOM, 0x4)
+
+/*
+ * QTI Tile Format
+ *
+ * Refers to a tile variant of the base format.
+ * Implementation may be platform and base-format specific.
+ */
+#define DRM_FORMAT_MOD_QCOM_TILE	fourcc_mod_code(QCOM, 0x8)
+
+#if defined(__cplusplus)
+}
+#endif
 #endif /* DRM_FOURCC_H */
diff -ruw linux-4.4.115/include/uapi/drm/drm.h linux-4.4.115-fbx/include/uapi/drm/drm.h
--- linux-4.4.115/include/uapi/drm/drm.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/drm/drm.h	2019-01-22 16:16:28.531291938 +0100
@@ -58,6 +58,10 @@
 
 #endif
 
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
 #define DRM_NAME	"drm"	  /**< Name in kernel, /dev, and /proc */
 #define DRM_MIN_ORDER	5	  /**< At least 2^5 bytes = 32 bytes */
 #define DRM_MAX_ORDER	22	  /**< Up to 2^22 bytes = 4MB */
@@ -372,7 +376,11 @@
  */
 struct drm_buf_map {
 	int count;		/**< Length of the buffer list */
+#ifdef __cplusplus
+	void __user *virt;
+#else
 	void __user *virtual;		/**< Mmap'd area in user-virtual */
+#endif
 	struct drm_buf_pub __user *list;	/**< Buffer information */
 };
 
@@ -631,6 +639,7 @@
 #define DRM_CAP_CURSOR_WIDTH		0x8
 #define DRM_CAP_CURSOR_HEIGHT		0x9
 #define DRM_CAP_ADDFB2_MODIFIERS	0x10
+#define DRM_CAP_CRTC_IN_VBLANK_EVENT	0x12
 
 /** DRM_IOCTL_GET_CAP ioctl argument type */
 struct drm_get_cap {
@@ -679,7 +688,15 @@
 	__s32 fd;
 };
 
-#include <drm/drm_mode.h>
+#if defined(__cplusplus)
+}
+#endif
+
+#include "drm_mode.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
 
 #define DRM_IOCTL_BASE			'd'
 #define DRM_IO(nr)			_IO(DRM_IOCTL_BASE,nr)
@@ -826,7 +843,7 @@
 	__u32 tv_sec;
 	__u32 tv_usec;
 	__u32 sequence;
-	__u32 reserved;
+	__u32 crtc_id; /* 0 on older kernels that do not support this */
 };
 
 /* typedef area */
@@ -874,4 +891,8 @@
 typedef struct drm_set_version drm_set_version_t;
 #endif
 
+#if defined(__cplusplus)
+}
+#endif
+
 #endif
diff -ruw linux-4.4.115/include/uapi/drm/drm_mode.h linux-4.4.115-fbx/include/uapi/drm/drm_mode.h
--- linux-4.4.115/include/uapi/drm/drm_mode.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/drm/drm_mode.h	2019-10-29 09:26:25.537221713 +0100
@@ -29,6 +29,10 @@
 
 #include <linux/types.h>
 
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
 #define DRM_DISPLAY_INFO_LEN	32
 #define DRM_CONNECTOR_NAME_LEN	32
 #define DRM_DISPLAY_MODE_LEN	32
@@ -72,7 +76,9 @@
 #define  DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH	(6<<14)
 #define  DRM_MODE_FLAG_3D_TOP_AND_BOTTOM	(7<<14)
 #define  DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF	(8<<14)
-
+#define  DRM_MODE_FLAG_SEAMLESS			(1<<19)
+#define  DRM_MODE_FLAG_SUPPORTS_RGB		(1<<20)
+#define  DRM_MODE_FLAG_SUPPORTS_YUV		(1<<21)
 
 /* DPMS flags */
 /* bit compatible with the xorg definitions. */
@@ -354,6 +360,7 @@
 
 #define DRM_MODE_FB_INTERLACED	(1<<0) /* for interlaced framebuffers */
 #define DRM_MODE_FB_MODIFIERS	(1<<1) /* enables ->modifer[] */
+#define DRM_MODE_FB_SECURE	(1<<2) /* for secure framebuffers */
 
 struct drm_mode_fb_cmd2 {
 	__u32 fb_id;
@@ -596,4 +603,8 @@
 	__u32 blob_id;
 };
 
+#if defined(__cplusplus)
+}
+#endif
+
 #endif
diff -ruw linux-4.4.115/include/uapi/drm/drm_sarea.h linux-4.4.115-fbx/include/uapi/drm/drm_sarea.h
--- linux-4.4.115/include/uapi/drm/drm_sarea.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/drm/drm_sarea.h	2019-01-22 16:16:28.531291938 +0100
@@ -34,6 +34,10 @@
 
 #include <drm/drm.h>
 
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
 /* SAREA area needs to be at least a page */
 #if defined(__alpha__)
 #define SAREA_MAX                       0x2000U
@@ -83,4 +87,8 @@
 typedef struct drm_sarea drm_sarea_t;
 #endif
 
+#if defined(__cplusplus)
+}
+#endif
+
 #endif				/* _DRM_SAREA_H_ */
diff -ruw linux-4.4.115/include/uapi/drm/Kbuild linux-4.4.115-fbx/include/uapi/drm/Kbuild
--- linux-4.4.115/include/uapi/drm/Kbuild	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/drm/Kbuild	2019-01-22 16:16:28.527291902 +0100
@@ -18,3 +18,5 @@
 header-y += vmwgfx_drm.h
 header-y += msm_drm.h
 header-y += virtgpu_drm.h
+header-y += sde_drm.h
+header-y += msm_drm_pp.h
diff -ruw linux-4.4.115/include/uapi/drm/msm_drm.h linux-4.4.115-fbx/include/uapi/drm/msm_drm.h
--- linux-4.4.115/include/uapi/drm/msm_drm.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/drm/msm_drm.h	2019-01-22 16:16:28.531291938 +0100
@@ -20,6 +20,11 @@
 
 #include <stddef.h>
 #include <drm/drm.h>
+#include <drm/sde_drm.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
 
 /* Please note that modifications to all structs defined here are
  * subject to backwards-compatibility constraints:
@@ -39,6 +44,15 @@
 #define MSM_PIPE_2D1         0x02
 #define MSM_PIPE_3D0         0x10
 
+/* The pipe-id just uses the lower bits, so can be OR'd with flags in
+ * the upper 16 bits (which could be extended further, if needed, maybe
+ * we extend/overload the pipe-id some day to deal with multiple rings,
+ * but even then I don't think we need the full lower 16 bits).
+ */
+#define MSM_PIPE_ID_MASK     0xffff
+#define MSM_PIPE_ID(x)       ((x) & MSM_PIPE_ID_MASK)
+#define MSM_PIPE_FLAGS(x)    ((x) & ~MSM_PIPE_ID_MASK)
+
 /* timeouts are specified in clock-monotonic absolute times (to simplify
  * restarting interrupted ioctls).  The following struct is logically the
  * same as 'struct timespec' but 32/64b ABI safe.
@@ -48,9 +62,73 @@
 	__s64 tv_nsec;         /* nanoseconds */
 };
 
+/* From CEA.861.3 */
+#define HDR_EOTF_SMTPE_ST2084	0x2
+#define HDR_EOTF_HLG		0x3
+
+/* hdr hdmi state takes possible values of 0, 1 and 2 respectively */
+#define DRM_MSM_HDR_DISABLE  0
+#define DRM_MSM_HDR_ENABLE   1
+#define DRM_MSM_HDR_RESET    2
+
+/*
+ * HDR Metadata
+ * These are defined as per EDID spec and shall be used by the sink
+ * to set the HDR metadata for playback from userspace.
+ */
+
+#define HDR_PRIMARIES_COUNT   3
+
+struct drm_msm_ext_panel_hdr_metadata {
+	__u32 eotf;             /* electro optical transfer function */
+	__u32 hdr_supported;    /* HDR supported */
+	__u32 display_primaries_x[HDR_PRIMARIES_COUNT]; /* Primaries x */
+	__u32 display_primaries_y[HDR_PRIMARIES_COUNT]; /* Primaries y */
+	__u32 white_point_x;    /* white_point_x */
+	__u32 white_point_y;    /* white_point_y */
+	__u32 max_luminance;    /* Max luminance */
+	__u32 min_luminance;    /* Min Luminance */
+	__u32 max_content_light_level; /* max content light level */
+	__u32 max_average_light_level; /* max average light level */
+};
+
+/**
+ * HDR Control
+ * This encapsulates the HDR metadata as well as a state control
+ * for the HDR metadata as required by the HDMI spec to send the
+ * relevant metadata depending on the state of the HDR playback.
+ * hdr_state: Controls HDR state, takes values ENABLE(1)/DISABLE(0)
+ * hdr_meta: Metadata sent by the userspace for the HDR clip
+ */
+
+#define DRM_MSM_EXT_PANEL_HDR_CTRL
+struct drm_msm_ext_panel_hdr_ctrl {
+	__u8 hdr_state;                                 /* HDR state */
+	struct drm_msm_ext_panel_hdr_metadata hdr_meta; /* HDR metadata */
+};
+
+/**
+ * HDR sink properties
+ * These are defined as per EDID spec and shall be used by the userspace
+ * to determine the HDR properties to be set to the sink.
+ */
+struct drm_msm_ext_panel_hdr_properties {
+	__u8 hdr_metadata_type_one;   /* static metadata type one */
+	__u32 hdr_supported;          /* HDR supported */
+	__u32 hdr_eotf;               /* electro optical transfer function */
+	__u32 hdr_max_luminance;      /* Max luminance */
+	__u32 hdr_avg_luminance;      /* Avg luminance */
+	__u32 hdr_min_luminance;      /* Min Luminance */
+};
+
 #define MSM_PARAM_GPU_ID     0x01
 #define MSM_PARAM_GMEM_SIZE  0x02
 #define MSM_PARAM_CHIP_ID    0x03
+#define MSM_PARAM_MAX_FREQ           0x04
+#define MSM_PARAM_TIMESTAMP          0x05
+#define MSM_PARAM_GMEM_BASE          0x06
+#define MSM_PARAM_NR_RINGS           0x07
+#define MSM_PARAM_GPU_HANG_TIMEOUT   0xa0 /* timeout in ms */
 
 struct drm_msm_param {
 	__u32 pipe;           /* in, MSM_PIPE_x */
@@ -64,6 +142,8 @@
 
 #define MSM_BO_SCANOUT       0x00000001     /* scanout capable */
 #define MSM_BO_GPU_READONLY  0x00000002
+#define MSM_BO_PRIVILEGED    0x00000004
+#define MSM_BO_SECURE        0x00000008	    /* Allocate and map as secure */
 #define MSM_BO_CACHE_MASK    0x000f0000
 /* cache modes */
 #define MSM_BO_CACHED        0x00010000
@@ -72,6 +152,7 @@
 
 #define MSM_BO_FLAGS         (MSM_BO_SCANOUT | \
                               MSM_BO_GPU_READONLY | \
+                              MSM_BO_SECURE | \
                               MSM_BO_CACHED | \
                               MSM_BO_WC | \
                               MSM_BO_UNCACHED)
@@ -82,10 +163,21 @@
 	__u32 handle;         /* out */
 };
 
+struct drm_msm_gem_svm_new {
+	__u64 hostptr;        /* in, must be page-aligned */
+	__u64 size;           /* in, must be page-aligned */
+	__u32 flags;          /* in, mask of MSM_BO_x */
+	__u32 handle;         /* out */
+};
+
+#define MSM_INFO_IOVA	0x01
+
+#define MSM_INFO_FLAGS (MSM_INFO_IOVA)
+
 struct drm_msm_gem_info {
 	__u32 handle;         /* in */
-	__u32 pad;
-	__u64 offset;         /* out, offset to pass to mmap() */
+	__u32 flags;	      /* in - combination of MSM_INFO_* flags */
+	__u64 offset;         /* out, mmap() offset or iova */
 };
 
 #define MSM_PREP_READ        0x01
@@ -121,7 +213,11 @@
  */
 struct drm_msm_gem_submit_reloc {
 	__u32 submit_offset;  /* in, offset from submit_bo */
+#ifdef __cplusplus
+	__u32 or_val;
+#else
 	__u32 or;             /* in, value OR'd with result */
+#endif
 	__s32  shift;          /* in, amount of left shift (can be negative) */
 	__u32 reloc_idx;      /* in, index of reloc_bo buffer */
 	__u64 reloc_offset;   /* in, offset from start of reloc_bo */
@@ -134,10 +230,13 @@
  *      this buffer in the first-level ringbuffer
  *   CTX_RESTORE_BUF - only executed if there has been a GPU context
  *      switch since the last SUBMIT ioctl
+ *   PROFILE_BUF - A profiling buffer written to by both GPU and CPU.
  */
 #define MSM_SUBMIT_CMD_BUF             0x0001
 #define MSM_SUBMIT_CMD_IB_TARGET_BUF   0x0002
 #define MSM_SUBMIT_CMD_CTX_RESTORE_BUF 0x0003
+#define MSM_SUBMIT_CMD_PROFILE_BUF     0x0004
+
 struct drm_msm_gem_submit_cmd {
 	__u32 type;           /* in, one of MSM_SUBMIT_CMD_x */
 	__u32 submit_idx;     /* in, index of submit_bo cmdstream buffer */
@@ -145,7 +244,7 @@
 	__u32 size;           /* in, cmdstream size */
 	__u32 pad;
 	__u32 nr_relocs;      /* in, number of submit_reloc's */
-	__u64 __user relocs;  /* in, ptr to array of submit_reloc's */
+	__u64 relocs;         /* in, ptr to array of submit_reloc's */
 };
 
 /* Each buffer referenced elsewhere in the cmdstream submit (ie. the
@@ -170,17 +269,39 @@
 	__u64 presumed;       /* in/out, presumed buffer address */
 };
 
+/* Valid submit ioctl flags: */
+#define MSM_SUBMIT_RING_MASK 0x000F0000
+#define MSM_SUBMIT_RING_SHIFT 16
+
+#define MSM_SUBMIT_FLAGS (MSM_SUBMIT_RING_MASK)
+
 /* Each cmdstream submit consists of a table of buffers involved, and
  * one or more cmdstream buffers.  This allows for conditional execution
  * (context-restore), and IB buffers needed for per tile/bin draw cmds.
  */
 struct drm_msm_gem_submit {
-	__u32 pipe;           /* in, MSM_PIPE_x */
+	__u32 flags;          /* MSM_PIPE_x | MSM_SUBMIT_x */
 	__u32 fence;          /* out */
 	__u32 nr_bos;         /* in, number of submit_bo's */
 	__u32 nr_cmds;        /* in, number of submit_cmd's */
-	__u64 __user bos;     /* in, ptr to array of submit_bo's */
-	__u64 __user cmds;    /* in, ptr to array of submit_cmd's */
+	__u64 bos;     /* in, ptr to array of submit_bo's */
+	__u64 cmds;    /* in, ptr to array of submit_cmd's */
+	__s32 fence_fd;       /* gap for the fence_fd which is upstream */
+	__u32 queueid;         /* in, submitqueue id */
+};
+
+/*
+ * Define a preprocessor variable to let the userspace know that
+ * drm_msm_gem_submit_profile_buffer switched to only support a kernel timestamp
+ * for submit time
+ */
+#define MSM_PROFILE_BUFFER_SUBMIT_TIME 1
+
+struct drm_msm_gem_submit_profile_buffer {
+	struct drm_msm_timespec time;   /* out, submission time */
+	__u64 ticks_queued;    /* out, GPU ticks at ringbuffer submission */
+	__u64 ticks_submitted; /* out, GPU ticks before cmdstream execution*/
+	__u64 ticks_retired;   /* out, GPU ticks after cmdstream execution */
 };
 
 /* The normal way to synchronize with the GPU is just to CPU_PREP on
@@ -196,6 +317,141 @@
 	struct drm_msm_timespec timeout;   /* in */
 };
 
+/**
+ * struct drm_msm_event_req - Payload to event enable/disable ioctls.
+ * @object_id: DRM object id. Ex: for crtc pass crtc id.
+ * @object_type: DRM object type. Ex: for crtc set it to DRM_MODE_OBJECT_CRTC.
+ * @event: Event for which notification is being enabled/disabled.
+ *         Ex: for Histogram set - DRM_EVENT_HISTOGRAM.
+ * @client_context: Opaque pointer that will be returned during event response
+ *                  notification.
+ * @index: Object index(ex: crtc index), optional for user-space to set.
+ *         Driver will override value based on object_id and object_type.
+ */
+struct drm_msm_event_req {
+	__u32 object_id;
+	__u32 object_type;
+	__u32 event;
+	__u64 client_context;
+	__u32 index;
+};
+
+/**
+ * struct drm_msm_event_resp - payload returned when read is called for
+ *                            custom notifications.
+ * @base: Event type and length of complete notification payload.
+ * @info: Contains information about DRM that which raised this event.
+ * @data: Custom payload that driver returns for event type.
+ *        size of data = base.length - (sizeof(base) + sizeof(info))
+ */
+struct drm_msm_event_resp {
+	struct drm_event base;
+	struct drm_msm_event_req info;
+	__u8 data[];
+};
+
+#define MSM_COUNTER_GROUP_CP 0
+#define MSM_COUNTER_GROUP_RBBM 1
+#define MSM_COUNTER_GROUP_PC 2
+#define MSM_COUNTER_GROUP_VFD 3
+#define MSM_COUNTER_GROUP_HLSQ 4
+#define MSM_COUNTER_GROUP_VPC 5
+#define MSM_COUNTER_GROUP_TSE 6
+#define MSM_COUNTER_GROUP_RAS 7
+#define MSM_COUNTER_GROUP_UCHE 8
+#define MSM_COUNTER_GROUP_TP 9
+#define MSM_COUNTER_GROUP_SP 10
+#define MSM_COUNTER_GROUP_RB 11
+#define MSM_COUNTER_GROUP_VBIF 12
+#define MSM_COUNTER_GROUP_VBIF_PWR 13
+#define MSM_COUNTER_GROUP_VSC 23
+#define MSM_COUNTER_GROUP_CCU 24
+#define MSM_COUNTER_GROUP_LRZ 25
+#define MSM_COUNTER_GROUP_CMP 26
+#define MSM_COUNTER_GROUP_ALWAYSON 27
+#define MSM_COUNTER_GROUP_SP_PWR 28
+#define MSM_COUNTER_GROUP_TP_PWR 29
+#define MSM_COUNTER_GROUP_RB_PWR 30
+#define MSM_COUNTER_GROUP_CCU_PWR 31
+#define MSM_COUNTER_GROUP_UCHE_PWR 32
+#define MSM_COUNTER_GROUP_CP_PWR 33
+#define MSM_COUNTER_GROUP_GPMU_PWR 34
+#define MSM_COUNTER_GROUP_ALWAYSON_PWR 35
+
+/**
+ * struct drm_msm_counter - allocate or release a GPU performance counter
+ * @groupid: The group ID of the counter to get/put
+ * @counterid: For GET returns the counterid that was assigned. For PUT
+ *	       release the counter identified by groupid/counterid
+ * @countable: For GET the countable for the counter
+ */
+struct drm_msm_counter {
+	__u32 groupid;
+	int counterid;
+	__u32 countable;
+	__u32 counter_lo;
+	__u32 counter_hi;
+};
+
+struct drm_msm_counter_read_op {
+	__u64 value;
+	__u32 groupid;
+	int counterid;
+};
+
+/**
+ * struct drm_msm_counter_read - Read a number of GPU performance counters
+ * ops: Pointer to the list of struct drm_msm_counter_read_op operations
+ * nr_ops: Number of operations in the list
+ */
+struct drm_msm_counter_read {
+	__u64 __user ops;
+	__u32 nr_ops;
+};
+
+#define MSM_GEM_SYNC_TO_DEV 0
+#define MSM_GEM_SYNC_TO_CPU 1
+
+struct drm_msm_gem_syncop {
+	__u32 handle;
+	__u32 op;
+};
+
+struct drm_msm_gem_sync {
+	__u32 nr_ops;
+	__u64 __user ops;
+};
+
+/*
+ * Draw queues allow the user to set specific submission parameter. Command
+ * submissions will specify a specific submit queue id to use. id '0' is
+ * reserved as a "default" drawqueue with medium priority. The user can safely
+ * use and query 0 but cannot destroy it.
+ */
+
+/*
+ * Allows a process to bypass the 2 second quality of service timeout.
+ * Only CAP_SYS_ADMIN capable processes can set this flag.
+ */
+#define MSM_SUBMITQUEUE_BYPASS_QOS_TIMEOUT 0x00000001
+
+#define MSM_SUBMITQUEUE_FLAGS (MSM_SUBMITQUEUE_BYPASS_QOS_TIMEOUT)
+
+struct drm_msm_submitqueue {
+	__u32 flags;   /* in, MSM_SUBMITQUEUE_x */
+	__u32 prio;    /* in, Priority level */
+	__u32 id;      /* out, identifier */
+};
+
+#define MSM_SUBMITQUEUE_PARAM_FAULTS 0
+
+struct drm_msm_submitqueue_query {
+	__u64 data;
+	__u32 id;
+	__u32 param;
+	__u32 len;
+};
+
 #define DRM_MSM_GET_PARAM              0x00
 /* placeholder:
 #define DRM_MSM_SET_PARAM              0x01
@@ -206,7 +462,27 @@
 #define DRM_MSM_GEM_CPU_FINI           0x05
 #define DRM_MSM_GEM_SUBMIT             0x06
 #define DRM_MSM_WAIT_FENCE             0x07
-#define DRM_MSM_NUM_IOCTLS             0x08
+/* Gap for upstream DRM_MSM_GEM_MADVISE */
+#define DRM_MSM_GEM_SVM_NEW            0x09
+#define DRM_MSM_SUBMITQUEUE_NEW        0x0A
+#define DRM_MSM_SUBMITQUEUE_CLOSE      0x0B
+#define DRM_MSM_SUBMITQUEUE_QUERY      0x0C
+
+#define DRM_SDE_WB_CONFIG              0x40
+#define DRM_MSM_REGISTER_EVENT         0x41
+#define DRM_MSM_DEREGISTER_EVENT       0x42
+#define DRM_MSM_COUNTER_GET            0x43
+#define DRM_MSM_COUNTER_PUT            0x44
+#define DRM_MSM_COUNTER_READ           0x45
+#define DRM_MSM_GEM_SYNC               0x46
+
+/**
+ * Currently DRM framework supports only VSYNC event.
+ * Starting the custom events at 0xff to provide space for DRM
+ * framework to add new events.
+ */
+#define DRM_EVENT_HISTOGRAM 0xff
+#define DRM_EVENT_AD 0x100
 
 #define DRM_IOCTL_MSM_GET_PARAM        DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
 #define DRM_IOCTL_MSM_GEM_NEW          DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
@@ -215,5 +491,36 @@
 #define DRM_IOCTL_MSM_GEM_CPU_FINI     DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_FINI, struct drm_msm_gem_cpu_fini)
 #define DRM_IOCTL_MSM_GEM_SUBMIT       DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit)
 #define DRM_IOCTL_MSM_WAIT_FENCE       DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence)
+#define DRM_IOCTL_SDE_WB_CONFIG \
+	DRM_IOW((DRM_COMMAND_BASE + DRM_SDE_WB_CONFIG), struct sde_drm_wb_cfg)
+#define DRM_IOCTL_MSM_REGISTER_EVENT   DRM_IOW((DRM_COMMAND_BASE + \
+			DRM_MSM_REGISTER_EVENT), struct drm_msm_event_req)
+#define DRM_IOCTL_MSM_DEREGISTER_EVENT DRM_IOW((DRM_COMMAND_BASE + \
+			DRM_MSM_DEREGISTER_EVENT), struct drm_msm_event_req)
+#define DRM_IOCTL_MSM_COUNTER_GET \
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_COUNTER_GET, struct drm_msm_counter)
+#define DRM_IOCTL_MSM_COUNTER_PUT \
+	DRM_IOW(DRM_COMMAND_BASE + DRM_MSM_COUNTER_PUT, struct drm_msm_counter)
+#define DRM_IOCTL_MSM_COUNTER_READ \
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_COUNTER_READ, \
+		struct drm_msm_counter_read)
+#define DRM_IOCTL_MSM_GEM_SYNC DRM_IOW(DRM_COMMAND_BASE + DRM_MSM_GEM_SYNC,\
+		struct drm_msm_gem_sync)
+#define DRM_IOCTL_MSM_GEM_SVM_NEW \
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SVM_NEW, \
+		struct drm_msm_gem_svm_new)
+#define DRM_IOCTL_MSM_SUBMITQUEUE_NEW \
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_NEW, \
+		struct drm_msm_submitqueue)
+#define DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE \
+	DRM_IOW(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_CLOSE, \
+		struct drm_msm_submitqueue)
+#define DRM_IOCTL_MSM_SUBMITQUEUE_QUERY \
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_QUERY, \
+		struct drm_msm_submitqueue_query)
+
+#if defined(__cplusplus)
+}
+#endif
 
 #endif /* __MSM_DRM_H__ */
diff -ruw linux-4.4.115/include/uapi/Kbuild linux-4.4.115-fbx/include/uapi/Kbuild
--- linux-4.4.115/include/uapi/Kbuild	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/Kbuild	2019-01-22 16:16:28.527291902 +0100
@@ -13,3 +13,4 @@
 header-y += xen/
 header-y += scsi/
 header-y += misc/
+header-y += media/
diff -ruw linux-4.4.115/include/uapi/linux/android/binder.h linux-4.4.115-fbx/include/uapi/linux/android/binder.h
--- linux-4.4.115/include/uapi/linux/android/binder.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/android/binder.h	2019-10-29 09:26:25.541221752 +0100
@@ -33,11 +33,60 @@
 	BINDER_TYPE_HANDLE	= B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
 	BINDER_TYPE_WEAK_HANDLE	= B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
 	BINDER_TYPE_FD		= B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
+	BINDER_TYPE_FDA		= B_PACK_CHARS('f', 'd', 'a', B_TYPE_LARGE),
+	BINDER_TYPE_PTR		= B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE),
 };
 
-enum {
+/**
+ * enum flat_binder_object_shifts: shift values for flat_binder_object_flags
+ * @FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT: shift for getting scheduler policy.
+ *
+ */
+enum flat_binder_object_shifts {
+	FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT = 9,
+};
+
+/**
+ * enum flat_binder_object_flags - flags for use in flat_binder_object.flags
+ */
+enum flat_binder_object_flags {
+	/**
+	 * @FLAT_BINDER_FLAG_PRIORITY_MASK: bit-mask for min scheduler priority
+	 *
+	 * These bits can be used to set the minimum scheduler priority
+	 * at which transactions into this node should run. Valid values
+	 * in these bits depend on the scheduler policy encoded in
+	 * @FLAT_BINDER_FLAG_SCHED_POLICY_MASK.
+	 *
+	 * For SCHED_NORMAL/SCHED_BATCH, the valid range is between [-20..19]
+	 * For SCHED_FIFO/SCHED_RR, the value can run between [1..99]
+	 */
 	FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
+	/**
+	 * @FLAT_BINDER_FLAG_ACCEPTS_FDS: whether the node accepts fds.
+	 */
 	FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
+	/**
+	 * @FLAT_BINDER_FLAG_SCHED_POLICY_MASK: bit-mask for scheduling policy
+	 *
+	 * These two bits can be used to set the min scheduling policy at which
+	 * transactions on this node should run. These match the UAPI
+	 * scheduler policy values, eg:
+	 * 00b: SCHED_NORMAL
+	 * 01b: SCHED_FIFO
+	 * 10b: SCHED_RR
+	 * 11b: SCHED_BATCH
+	 */
+	FLAT_BINDER_FLAG_SCHED_POLICY_MASK =
+		3U << FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT,
+
+	/**
+	 * @FLAT_BINDER_FLAG_INHERIT_RT: whether the node inherits RT policy
+	 *
+	 * Only when set, calls into this node will inherit a real-time
+	 * scheduling policy from the caller (for synchronous transactions).
+	 */
+	FLAT_BINDER_FLAG_INHERIT_RT = 0x800,
 };
 
 #ifdef BINDER_IPC_32BIT
@@ -48,6 +97,14 @@
 typedef __u64 binder_uintptr_t;
 #endif
 
+/**
+ * struct binder_object_header - header shared by all binder metadata objects.
+ * @type:	type of the object
+ */
+struct binder_object_header {
+	__u32        type;
+};
+
 /*
  * This is the flattened representation of a Binder object for transfer
  * between processes.  The 'offsets' supplied as part of a binder transaction
@@ -56,8 +113,7 @@
  * between processes.
  */
 struct flat_binder_object {
-	/* 8 bytes for large_flat_header. */
-	__u32		type;
+	struct binder_object_header	hdr;
 	__u32		flags;
 
 	/* 8 bytes of data. */
@@ -70,6 +126,86 @@
 	binder_uintptr_t	cookie;
 };
 
+/**
+ * struct binder_fd_object - describes a filedescriptor to be fixed up.
+ * @hdr:	common header structure
+ * @pad_flags:	padding to remain compatible with old userspace code
+ * @pad_binder:	padding to remain compatible with old userspace code
+ * @fd:		file descriptor
+ * @cookie:	opaque data, used by user-space
+ */
+struct binder_fd_object {
+	struct binder_object_header	hdr;
+	__u32				pad_flags;
+	union {
+		binder_uintptr_t	pad_binder;
+		__u32			fd;
+	};
+
+	binder_uintptr_t		cookie;
+};
+
+/* struct binder_buffer_object - object describing a userspace buffer
+ * @hdr:		common header structure
+ * @flags:		one or more BINDER_BUFFER_* flags
+ * @buffer:		address of the buffer
+ * @length:		length of the buffer
+ * @parent:		index in offset array pointing to parent buffer
+ * @parent_offset:	offset in @parent pointing to this buffer
+ *
+ * A binder_buffer object represents an object that the
+ * binder kernel driver can copy verbatim to the target
+ * address space. A buffer itself may be pointed to from
+ * within another buffer, meaning that the pointer inside
+ * that other buffer needs to be fixed up as well. This
+ * can be done by setting the BINDER_BUFFER_FLAG_HAS_PARENT
+ * flag in @flags, by setting @parent buffer to the index
+ * in the offset array pointing to the parent binder_buffer_object,
+ * and by setting @parent_offset to the offset in the parent buffer
+ * at which the pointer to this buffer is located.
+ */
+struct binder_buffer_object {
+	struct binder_object_header	hdr;
+	__u32				flags;
+	binder_uintptr_t		buffer;
+	binder_size_t			length;
+	binder_size_t			parent;
+	binder_size_t			parent_offset;
+};
+
+enum {
+	BINDER_BUFFER_FLAG_HAS_PARENT = 0x01,
+};
+
+/* struct binder_fd_array_object - object describing an array of fds in a buffer
+ * @hdr:		common header structure
+ * @pad:		padding to ensure correct alignment
+ * @num_fds:		number of file descriptors in the buffer
+ * @parent:		index in offset array to buffer holding the fd array
+ * @parent_offset:	start offset of fd array in the buffer
+ *
+ * A binder_fd_array object represents an array of file
+ * descriptors embedded in a binder_buffer_object. It is
+ * different from a regular binder_buffer_object because it
+ * describes a list of file descriptors to fix up, not an opaque
+ * blob of memory, and hence the kernel needs to treat it differently.
+ *
+ * An example of how this would be used is with Android's
+ * native_handle_t object, which is a struct with a list of integers
+ * and a list of file descriptors. The native_handle_t struct itself
+ * will be represented by a struct binder_buffer_objct, whereas the
+ * embedded list of file descriptors is represented by a
+ * struct binder_fd_array_object with that binder_buffer_object as
+ * a parent.
+ */
+struct binder_fd_array_object {
+	struct binder_object_header	hdr;
+	__u32				pad;
+	binder_size_t			num_fds;
+	binder_size_t			parent;
+	binder_size_t			parent_offset;
+};
+
 /*
  * On 64-bit platforms where user code may run in 32-bits the driver must
  * translate the buffer (and local binder) addresses appropriately.
@@ -97,6 +233,19 @@
 #define BINDER_CURRENT_PROTOCOL_VERSION 8
 #endif
 
+/*
+ * Use with BINDER_GET_NODE_DEBUG_INFO, driver reads ptr, writes to all fields.
+ * Set ptr to NULL for the first call to get the info for the first node, and
+ * then repeat the call passing the previously returned value to get the next
+ * nodes.  ptr will be 0 when there are no more nodes.
+ */
+struct binder_node_debug_info {
+	binder_uintptr_t ptr;
+	binder_uintptr_t cookie;
+	__u32            has_strong_ref;
+	__u32            has_weak_ref;
+};
+
 #define BINDER_WRITE_READ		_IOWR('b', 1, struct binder_write_read)
 #define BINDER_SET_IDLE_TIMEOUT		_IOW('b', 3, __s64)
 #define BINDER_SET_MAX_THREADS		_IOW('b', 5, __u32)
@@ -104,6 +253,7 @@
 #define BINDER_SET_CONTEXT_MGR		_IOW('b', 7, __s32)
 #define BINDER_THREAD_EXIT		_IOW('b', 8, __s32)
 #define BINDER_VERSION			_IOWR('b', 9, struct binder_version)
+#define BINDER_GET_NODE_DEBUG_INFO	_IOWR('b', 11, struct binder_node_debug_info)
 
 /*
  * NOTE: Two special error codes you should check for when calling
@@ -162,6 +312,11 @@
 	} data;
 };
 
+struct binder_transaction_data_sg {
+	struct binder_transaction_data transaction_data;
+	binder_size_t buffers_size;
+};
+
 struct binder_ptr_cookie {
 	binder_uintptr_t ptr;
 	binder_uintptr_t cookie;
@@ -346,6 +501,12 @@
 	/*
 	 * void *: cookie
 	 */
+
+	BC_TRANSACTION_SG = _IOW('c', 17, struct binder_transaction_data_sg),
+	BC_REPLY_SG = _IOW('c', 18, struct binder_transaction_data_sg),
+	/*
+	 * binder_transaction_data_sg: the sent command.
+	 */
 };
 
 #endif /* _UAPI_LINUX_BINDER_H */
diff -ruw linux-4.4.115/include/uapi/linux/eventpoll.h linux-4.4.115-fbx/include/uapi/linux/eventpoll.h
--- linux-4.4.115/include/uapi/linux/eventpoll.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/eventpoll.h	2019-01-22 16:16:28.547292083 +0100
@@ -39,6 +39,9 @@
 #define EPOLLMSG	0x00000400
 #define EPOLLRDHUP	0x00002000
 
+/* Set exclusive wakeup mode for the target file descriptor */
+#define EPOLLEXCLUSIVE (1U << 28)
+
 /*
  * Request the handling of system wakeup events so as to prevent system suspends
  * from happening while those events are being processed.
@@ -49,13 +52,13 @@
  *
  * Requires CAP_BLOCK_SUSPEND
  */
-#define EPOLLWAKEUP (1 << 29)
+#define EPOLLWAKEUP (1U << 29)
 
 /* Set the One Shot behaviour for the target file descriptor */
-#define EPOLLONESHOT (1 << 30)
+#define EPOLLONESHOT (1U << 30)
 
 /* Set the Edge Triggered behaviour for the target file descriptor */
-#define EPOLLET (1 << 31)
+#define EPOLLET (1U << 31)
 
 /* 
  * On x86-64 make the 64bit structure have the same alignment as the
@@ -69,6 +72,7 @@
 #define EPOLL_PACKED
 #endif
 
+#ifdef __KERNEL__
 struct epoll_event {
 	__u32 events;
 	__u64 data;
@@ -86,4 +90,5 @@
 	epev->events &= ~EPOLLWAKEUP;
 }
 #endif
+#endif /* __KERNEL__ */
 #endif /* _UAPI_LINUX_EVENTPOLL_H */
diff -ruw linux-4.4.115/include/uapi/linux/fib_rules.h linux-4.4.115-fbx/include/uapi/linux/fib_rules.h
--- linux-4.4.115/include/uapi/linux/fib_rules.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/fib_rules.h	2019-01-22 16:16:28.547292083 +0100
@@ -29,6 +29,11 @@
 	__u32		flags;
 };
 
+struct fib_rule_uid_range {
+	__u32		start;
+	__u32		end;
+};
+
 enum {
 	FRA_UNSPEC,
 	FRA_DST,	/* destination address */
@@ -49,6 +54,9 @@
 	FRA_TABLE,	/* Extended table id */
 	FRA_FWMASK,	/* mask for netfilter mark */
 	FRA_OIFNAME,
+	FRA_PAD,
+	FRA_L3MDEV,	/* iif or oif is l3mdev goto its table */
+	FRA_UID_RANGE,	/* UID range */
 	__FRA_MAX
 };
 
diff -ruw linux-4.4.115/include/uapi/linux/fs.h linux-4.4.115-fbx/include/uapi/linux/fs.h
--- linux-4.4.115/include/uapi/linux/fs.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/fs.h	2019-10-29 09:26:25.545221791 +0100
@@ -160,6 +160,8 @@
 #define FITHAW		_IOWR('X', 120, int)	/* Thaw */
 #define FITRIM		_IOWR('X', 121, struct fstrim_range)	/* Trim */
 
+#define FIDTRIM	_IOWR('f', 128, struct fstrim_range)	/* Deep discard trim */
+
 #define	FS_IOC_GETFLAGS			_IOR('f', 1, long)
 #define	FS_IOC_SETFLAGS			_IOW('f', 2, long)
 #define	FS_IOC_GETVERSION		_IOR('v', 1, long)
@@ -171,6 +173,54 @@
 #define FS_IOC32_SETVERSION		_IOW('v', 2, int)
 
 /*
+ * File system encryption support
+ */
+/* Policy provided via an ioctl on the topmost directory */
+#define FS_KEY_DESCRIPTOR_SIZE	8
+
+#define FS_POLICY_FLAGS_PAD_4		0x00
+#define FS_POLICY_FLAGS_PAD_8		0x01
+#define FS_POLICY_FLAGS_PAD_16		0x02
+#define FS_POLICY_FLAGS_PAD_32		0x03
+#define FS_POLICY_FLAGS_PAD_MASK	0x03
+#define FS_POLICY_FLAGS_VALID		0x03
+
+/* Encryption algorithms */
+#define FS_ENCRYPTION_MODE_INVALID		0
+#define FS_ENCRYPTION_MODE_AES_256_XTS		1
+#define FS_ENCRYPTION_MODE_AES_256_GCM		2
+#define FS_ENCRYPTION_MODE_AES_256_CBC		3
+#define FS_ENCRYPTION_MODE_AES_256_CTS		4
+#define FS_ENCRYPTION_MODE_AES_128_CBC		5
+#define FS_ENCRYPTION_MODE_AES_128_CTS		6
+
+
+struct fscrypt_policy {
+	__u8 version;
+	__u8 contents_encryption_mode;
+	__u8 filenames_encryption_mode;
+	__u8 flags;
+	__u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
+} __packed;
+
+#define FS_IOC_SET_ENCRYPTION_POLICY	_IOR('f', 19, struct fscrypt_policy)
+#define FS_IOC_GET_ENCRYPTION_PWSALT	_IOW('f', 20, __u8[16])
+#define FS_IOC_GET_ENCRYPTION_POLICY	_IOW('f', 21, struct fscrypt_policy)
+
+/* Parameters for passing an encryption key into the kernel keyring */
+#define FS_KEY_DESC_PREFIX		"fscrypt:"
+#define FS_KEY_DESC_PREFIX_SIZE		8
+
+/* Structure that userspace passes to the kernel keyring */
+#define FS_MAX_KEY_SIZE			64
+
+struct fscrypt_key {
+	__u32 mode;
+	__u8 raw[FS_MAX_KEY_SIZE];
+	__u32 size;
+};
+
+/*
  * Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS)
  */
 #define	FS_SECRM_FL			0x00000001 /* Secure deletion */
diff -ruw linux-4.4.115/include/uapi/linux/fuse.h linux-4.4.115-fbx/include/uapi/linux/fuse.h
--- linux-4.4.115/include/uapi/linux/fuse.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/fuse.h	2019-10-29 09:26:25.545221791 +0100
@@ -250,6 +250,7 @@
 #define FUSE_ASYNC_DIO		(1 << 15)
 #define FUSE_WRITEBACK_CACHE	(1 << 16)
 #define FUSE_NO_OPEN_SUPPORT	(1 << 17)
+#define FUSE_PASSTHROUGH	(1 << 18)
 
 /**
  * CUSE INIT request/reply flags
@@ -358,6 +359,7 @@
 	FUSE_FALLOCATE     = 43,
 	FUSE_READDIRPLUS   = 44,
 	FUSE_RENAME2       = 45,
+	FUSE_CANONICAL_PATH= 2016,
 
 	/* CUSE specific operations */
 	CUSE_INIT          = 4096,
@@ -480,7 +482,7 @@
 struct fuse_open_out {
 	uint64_t	fh;
 	uint32_t	open_flags;
-	uint32_t	padding;
+	int32_t         passthrough_fd;
 };
 
 struct fuse_release_in {
diff -ruw linux-4.4.115/include/uapi/linux/hw_breakpoint.h linux-4.4.115-fbx/include/uapi/linux/hw_breakpoint.h
--- linux-4.4.115/include/uapi/linux/hw_breakpoint.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/hw_breakpoint.h	2019-01-22 16:16:28.551292119 +0100
@@ -4,7 +4,11 @@
 enum {
 	HW_BREAKPOINT_LEN_1 = 1,
 	HW_BREAKPOINT_LEN_2 = 2,
+	HW_BREAKPOINT_LEN_3 = 3,
 	HW_BREAKPOINT_LEN_4 = 4,
+	HW_BREAKPOINT_LEN_5 = 5,
+	HW_BREAKPOINT_LEN_6 = 6,
+	HW_BREAKPOINT_LEN_7 = 7,
 	HW_BREAKPOINT_LEN_8 = 8,
 };
 
diff -ruw linux-4.4.115/include/uapi/linux/if_arp.h linux-4.4.115-fbx/include/uapi/linux/if_arp.h
--- linux-4.4.115/include/uapi/linux/if_arp.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/if_arp.h	2019-01-22 16:16:28.551292119 +0100
@@ -59,6 +59,7 @@
 #define ARPHRD_LAPB	516		/* LAPB				*/
 #define ARPHRD_DDCMP    517		/* Digital's DDCMP protocol     */
 #define ARPHRD_RAWHDLC	518		/* Raw HDLC			*/
+#define ARPHRD_RAWIP    519		/* Raw IP                       */
 
 #define ARPHRD_TUNNEL	768		/* IPIP tunnel			*/
 #define ARPHRD_TUNNEL6	769		/* IP6IP6 tunnel       		*/
diff -ruw linux-4.4.115/include/uapi/linux/if_ether.h linux-4.4.115-fbx/include/uapi/linux/if_ether.h
--- linux-4.4.115/include/uapi/linux/if_ether.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/if_ether.h	2019-10-29 09:26:25.545221791 +0100
@@ -130,6 +130,9 @@
 #define ETH_P_IEEE802154 0x00F6		/* IEEE802.15.4 frame		*/
 #define ETH_P_CAIF	0x00F7		/* ST-Ericsson CAIF protocol	*/
 #define ETH_P_XDSA	0x00F8		/* Multiplexed DSA protocol	*/
+#define ETH_P_MAP	0x00F9		/* Qualcomm multiplexing and
+					 * aggregation protocol
+					 */
 
 /*
  *	This is an Ethernet frame header.
diff -ruw linux-4.4.115/include/uapi/linux/if.h linux-4.4.115-fbx/include/uapi/linux/if.h
--- linux-4.4.115/include/uapi/linux/if.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/if.h	2019-04-24 19:28:47.292498169 +0200
@@ -24,6 +24,10 @@
 #include <linux/socket.h>		/* for "struct sockaddr" et al	*/
 #include <linux/compiler.h>		/* for "__user" et al           */
 
+#ifndef __KERNEL__
+#include <sys/socket.h>			/* for struct sockaddr.		*/
+#endif
+
 #if __UAPI_DEF_IF_IFNAMSIZ
 #define	IFNAMSIZ	16
 #endif /* __UAPI_DEF_IF_IFNAMSIZ */
diff -ruw linux-4.4.115/include/uapi/linux/if_pppox.h linux-4.4.115-fbx/include/uapi/linux/if_pppox.h
--- linux-4.4.115/include/uapi/linux/if_pppox.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/if_pppox.h	2019-01-22 16:16:28.551292119 +0100
@@ -23,6 +23,8 @@
 #include <linux/socket.h>
 #include <linux/if_ether.h>
 #include <linux/if_pppol2tp.h>
+#include <linux/if_pppolac.h>
+#include <linux/if_pppopns.h>
 
 /* For user-space programs to pick up these definitions
  * which they wouldn't get otherwise without defining __KERNEL__
@@ -56,7 +58,9 @@
 #define PX_PROTO_OE    0 /* Currently just PPPoE */
 #define PX_PROTO_OL2TP 1 /* Now L2TP also */
 #define PX_PROTO_PPTP  2
-#define PX_MAX_PROTO   3
+#define PX_PROTO_OLAC  3
+#define PX_PROTO_OPNS  4
+#define PX_MAX_PROTO   5
 
 struct sockaddr_pppox {
 	__kernel_sa_family_t sa_family;       /* address family, AF_PPPOX */
diff -ruw linux-4.4.115/include/uapi/linux/if_tun.h linux-4.4.115-fbx/include/uapi/linux/if_tun.h
--- linux-4.4.115/include/uapi/linux/if_tun.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/if_tun.h	2019-01-22 16:16:28.555292155 +0100
@@ -57,6 +57,32 @@
 #define TUNSETVNETBE _IOW('T', 222, int)
 #define TUNGETVNETBE _IOR('T', 223, int)
 
+
+struct smalltun_rule {
+	__u8	proto;
+	__be16	src_port_start;
+	__be16	src_port_end;
+	__be16	dst_port_start;
+	__be16	dst_port_end;
+};
+
+struct smalltun_fp {
+	__be32	inner_src;
+	__be32	inner_dst;
+
+	__u32	af;
+	__u8	outer_src[16];
+	__u8	outer_dst[16];
+	__be16	outer_src_port;
+	__be16	outer_dst_port;
+
+	struct smalltun_rule rules[8];
+	__u32	rule_count;
+};
+
+#define TUNSMALLTUNSETFP _IOW('T', 224, struct smalltun_fp)
+#define TUNSMALLTUNDELFP _IOW('T', 225, struct smalltun_fp)
+
 /* TUNSETIFF ifr flags */
 #define IFF_TUN		0x0001
 #define IFF_TAP		0x0002
diff -ruw linux-4.4.115/include/uapi/linux/inet_diag.h linux-4.4.115-fbx/include/uapi/linux/inet_diag.h
--- linux-4.4.115/include/uapi/linux/inet_diag.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/inet_diag.h	2019-01-22 16:16:28.555292155 +0100
@@ -72,6 +72,8 @@
 	INET_DIAG_BC_AUTO,
 	INET_DIAG_BC_S_COND,
 	INET_DIAG_BC_D_COND,
+	INET_DIAG_BC_DEV_COND,   /* u32 ifindex */
+	INET_DIAG_BC_MARK_COND,
 };
 
 struct inet_diag_hostcond {
@@ -81,6 +83,11 @@
 	__be32	addr[0];
 };
 
+struct inet_diag_markcond {
+	__u32 mark;
+	__u32 mask;
+};
+
 /* Base info structure. It contains socket identity (addrs/ports/cookie)
  * and, alas, the information shown by netstat. */
 struct inet_diag_msg {
@@ -113,9 +120,13 @@
 	INET_DIAG_DCTCPINFO,
 	INET_DIAG_PROTOCOL,  /* response attribute only */
 	INET_DIAG_SKV6ONLY,
+	INET_DIAG_LOCALS,
+	INET_DIAG_PEERS,
+	INET_DIAG_PAD,
+	INET_DIAG_MARK,
 };
 
-#define INET_DIAG_MAX INET_DIAG_SKV6ONLY
+#define INET_DIAG_MAX INET_DIAG_MARK
 
 /* INET_DIAG_MEM */
 
diff -ruw linux-4.4.115/include/uapi/linux/input-event-codes.h linux-4.4.115-fbx/include/uapi/linux/input-event-codes.h
--- linux-4.4.115/include/uapi/linux/input-event-codes.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/input-event-codes.h	2019-10-29 09:26:25.545221791 +0100
@@ -611,6 +611,37 @@
 #define KEY_KBDINPUTASSIST_ACCEPT		0x264
 #define KEY_KBDINPUTASSIST_CANCEL		0x265
 
+/* Diagonal movement keys */
+#define KEY_RIGHT_UP			0x266
+#define KEY_RIGHT_DOWN			0x267
+#define KEY_LEFT_UP			0x268
+#define KEY_LEFT_DOWN			0x269
+
+#define KEY_ROOT_MENU			0x26a /* Show Device's Root Menu */
+/* Show Top Menu of the Media (e.g. DVD) */
+#define KEY_MEDIA_TOP_MENU		0x26b
+#define KEY_NUMERIC_11			0x26c
+#define KEY_NUMERIC_12			0x26d
+/*
+ * Toggle Audio Description: refers to an audio service that helps blind and
+ * visually impaired consumers understand the action in a program. Note: in
+ * some countries this is referred to as "Video Description".
+ */
+#define KEY_AUDIO_DESC			0x26e
+#define KEY_3D_MODE			0x26f
+#define KEY_NEXT_FAVORITE		0x270
+#define KEY_STOP_RECORD			0x271
+#define KEY_PAUSE_RECORD		0x272
+#define KEY_VOD				0x273 /* Video on Demand */
+#define KEY_UNMUTE			0x274
+#define KEY_FASTREVERSE			0x275
+#define KEY_SLOWREVERSE			0x276
+/*
+ * Control a data application associated with the currently viewed channel,
+ * e.g. teletext or data broadcast application (MHEG, MHP, HbbTV, etc.)
+ */
+#define KEY_DATA			0x275
+
 #define BTN_TRIGGER_HAPPY		0x2c0
 #define BTN_TRIGGER_HAPPY1		0x2c0
 #define BTN_TRIGGER_HAPPY2		0x2c1
@@ -653,6 +684,18 @@
 #define BTN_TRIGGER_HAPPY39		0x2e6
 #define BTN_TRIGGER_HAPPY40		0x2e7
 
+#define KEY_APP_TV			0x2f1
+#define KEY_APP_REPLAY			0x2f2
+#define KEY_APP_VIDEOCLUB		0x2f3
+#define KEY_APP_WHATSON			0x2f4
+#define KEY_APP_RECORDS			0x2f5
+#define KEY_APP_MEDIA			0x2f6
+#define KEY_APP_YOUTUBE			0x2f7
+#define KEY_APP_RADIOS			0x2f8
+#define KEY_APP_CANALVOD		0x2f9
+#define KEY_APP_PIP			0x2fa
+#define KEY_APP_NETFLIX			0x2fb
+
 /* We avoid low common keys in module aliases so they don't get huge. */
 #define KEY_MIN_INTERESTING	KEY_MUTE
 #define KEY_MAX			0x2ff
@@ -749,7 +792,11 @@
 #define SW_ROTATE_LOCK		0x0c  /* set = rotate locked/disabled */
 #define SW_LINEIN_INSERT	0x0d  /* set = inserted */
 #define SW_MUTE_DEVICE		0x0e  /* set = device disabled */
-#define SW_MAX			0x0f
+#define SW_HPHL_OVERCURRENT	0x0f  /* set = over current on left hph */
+#define SW_HPHR_OVERCURRENT	0x10  /* set = over current on right hph */
+#define SW_MICROPHONE2_INSERT   0x11  /* set = inserted */
+#define SW_UNSUPPORT_INSERT	0x12  /* set = unsupported device inserted */
+#define SW_MAX			0x20
 #define SW_CNT			(SW_MAX+1)
 
 /*
diff -ruw linux-4.4.115/include/uapi/linux/input.h linux-4.4.115-fbx/include/uapi/linux/input.h
--- linux-4.4.115/include/uapi/linux/input.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/input.h	2019-01-22 16:16:28.555292155 +0100
@@ -161,6 +161,50 @@
 #define EVIOCGRAB		_IOW('E', 0x90, int)			/* Grab/Release device */
 #define EVIOCREVOKE		_IOW('E', 0x91, int)			/* Revoke device access */
 
+#define EVIOCSCLOCKID		_IOW('E', 0xa0, int)			/* Set clockid to be used for timestamps */
+
+/*
+ * Device properties and quirks
+ */
+
+/* HACK: disable conflicting EVIOCREVOKE until Android userspace stops using EVIOCSSUSPENDBLOCK */
+/*#define EVIOCREVOKE		_IOW('E', 0x91, int)*/			/* Revoke device access */
+
+#define INPUT_PROP_POINTER		0x00	/* needs a pointer */
+#define INPUT_PROP_DIRECT		0x01	/* direct input devices */
+#define INPUT_PROP_BUTTONPAD		0x02	/* has button(s) under pad */
+#define INPUT_PROP_SEMI_MT		0x03	/* touch rectangle only */
+#define INPUT_PROP_TOPBUTTONPAD		0x04	/* softbuttons at top of pad */
+#define INPUT_PROP_POINTING_STICK	0x05	/* is a pointing stick */
+#define INPUT_PROP_NO_DUMMY_RELEASE	0x06	/* no dummy event */
+
+#define INPUT_PROP_MAX			0x1f
+#define INPUT_PROP_CNT			(INPUT_PROP_MAX + 1)
+
+/*
+ * Event types
+ */
+
+#define EV_SYN			0x00
+#define EV_KEY			0x01
+#define EV_REL			0x02
+#define EV_ABS			0x03
+#define EV_MSC			0x04
+#define EV_SW			0x05
+#define EV_LED			0x11
+#define EV_SND			0x12
+#define EV_REP			0x14
+#define EV_FF			0x15
+#define EV_PWR			0x16
+#define EV_FF_STATUS		0x17
+#define EV_MAX			0x1f
+#define EV_CNT			(EV_MAX+1)
+
+/*
+ * Synchronization events.
+ */
+
+
 /**
  * EVIOCGMASK - Retrieve current event mask
  *
@@ -246,6 +290,7 @@
 #define BUS_GSC			0x1A
 #define BUS_ATARI		0x1B
 #define BUS_SPI			0x1C
+#define BUS_CEC			0x1E
 
 /*
  * MT_TOOL types
diff -ruw linux-4.4.115/include/uapi/linux/ip.h linux-4.4.115-fbx/include/uapi/linux/ip.h
--- linux-4.4.115/include/uapi/linux/ip.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/ip.h	2019-01-22 16:16:28.555292155 +0100
@@ -165,6 +165,7 @@
 	IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL,
 	IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL,
 	IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN,
+	IPV4_DEVCONF_NF_IPV4_DEFRAG_SKIP,
 	__IPV4_DEVCONF_MAX
 };
 
diff -ruw linux-4.4.115/include/uapi/linux/ipv6.h linux-4.4.115-fbx/include/uapi/linux/ipv6.h
--- linux-4.4.115/include/uapi/linux/ipv6.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/ipv6.h	2019-01-22 16:16:28.555292155 +0100
@@ -164,6 +164,7 @@
 	DEVCONF_ACCEPT_DAD,
 	DEVCONF_FORCE_TLLAO,
 	DEVCONF_NDISC_NOTIFY,
+	DEVCONF_ACCEPT_RA_RT_TABLE,
 	DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL,
 	DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL,
 	DEVCONF_SUPPRESS_FRAG_NDISC,
@@ -174,6 +175,17 @@
 	DEVCONF_USE_OIF_ADDRS_ONLY,
 	DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT,
 	DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN,
+	DEVCONF_ACCEPT_RA_PREFIX_ROUTE,
+	DEVCONF_DROP_UNICAST_IN_L2_MULTICAST,
+	DEVCONF_DROP_UNSOLICITED_NA,
+	DEVCONF_KEEP_ADDR_ON_DOWN,
+	DEVCONF_RTR_SOLICIT_MAX_INTERVAL,
+	DEVCONF_SEG6_ENABLED,
+	DEVCONF_SEG6_REQUIRE_HMAC,
+	DEVCONF_ENHANCED_DAD,
+	DEVCONF_ADDR_GEN_MODE,
+	DEVCONF_DISABLE_POLICY,
+	DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN,
 	DEVCONF_MAX
 };
 
diff -ruw linux-4.4.115/include/uapi/linux/Kbuild linux-4.4.115-fbx/include/uapi/linux/Kbuild
--- linux-4.4.115/include/uapi/linux/Kbuild	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/Kbuild	2019-10-29 09:26:25.541221752 +0100
@@ -22,6 +22,8 @@
 header-y += netfilter_ipv6/
 header-y += usb/
 header-y += wimax/
+header-y += mfd/
+header-y += nfc/
 
 genhdr-y += version.h
 
@@ -30,6 +32,7 @@
 header-y += a.out.h
 endif
 
+header-y += ashmem.h
 header-y += acct.h
 header-y += adb.h
 header-y += adfs_fs.h
@@ -62,8 +65,10 @@
 header-y += auto_fs4.h
 header-y += auto_fs.h
 header-y += auxvec.h
+header-y += avtimer.h
 header-y += ax25.h
 header-y += b1lli.h
+header-y += batterydata-interface.h
 header-y += baycom.h
 header-y += bcm933xx_hcs.h
 header-y += bfs_fs.h
@@ -81,6 +86,8 @@
 header-y += cciss_defs.h
 header-y += cciss_ioctl.h
 header-y += cdrom.h
+header-y += cec.h
+header-y += cec-funcs.h
 header-y += cgroupstats.h
 header-y += chio.h
 header-y += cm4000_cs.h
@@ -111,10 +118,13 @@
 header-y += elf-em.h
 header-y += elf-fdpic.h
 header-y += elf.h
+header-y += epm_adc.h
 header-y += errno.h
 header-y += errqueue.h
+header-y += esoc_ctrl.h
 header-y += ethtool.h
 header-y += eventpoll.h
+header-y += exfat_user.h
 header-y += fadvise.h
 header-y += falloc.h
 header-y += fanotify.h
@@ -125,6 +135,7 @@
 header-y += fib_rules.h
 header-y += fiemap.h
 header-y += filter.h
+header-y += fips_status.h
 header-y += firewire-cdev.h
 header-y += firewire-constants.h
 header-y += flat.h
@@ -139,6 +150,8 @@
 header-y += gfs2_ondisk.h
 header-y += gigaset_dev.h
 header-y += gsmmux.h
+header-y += hbtp_input.h
+header-y += hbtp_vm.h
 header-y += hdlcdrv.h
 header-y += hdlc.h
 header-y += hdreg.h
@@ -195,6 +208,8 @@
 header-y += input-event-codes.h
 header-y += in_route.h
 header-y += ioctl.h
+header-y += ion.h
+header-y += ion_test.h
 header-y += ip6_tunnel.h
 header-y += ipc.h
 header-y += ip.h
@@ -250,7 +265,9 @@
 header-y += major.h
 header-y += map_to_7segment.h
 header-y += matroxfb.h
+header-y += maxim_sti.h
 header-y += mdio.h
+header-y += mdss_rotator.h
 header-y += media.h
 header-y += media-bus-format.h
 header-y += mei.h
@@ -258,6 +275,7 @@
 header-y += memfd.h
 header-y += mempolicy.h
 header-y += meye.h
+header-y += mhi.h
 header-y += mic_common.h
 header-y += mic_ioctl.h
 header-y += mii.h
@@ -271,6 +289,34 @@
 header-y += mroute.h
 header-y += msdos_fs.h
 header-y += msg.h
+header-y += msm_adsp.h
+header-y += msm_audio.h
+header-y += msm_audio_aac.h
+header-y += msm_audio_ac3.h
+header-y += msm_audio_amrnb.h
+header-y += msm_audio_amrwb.h
+header-y += msm_audio_amrwbplus.h
+header-y += msm_audio_calibration.h
+header-y += msm_audio_mvs.h
+header-y += msm_audio_qcp.h
+header-y += msm_audio_sbc.h
+header-y += msm_audio_voicememo.h
+header-y += msm_audio_wma.h
+header-y += msm_audio_wmapro.h
+header-y += msm_audio_alac.h
+header-y += msm_audio_ape.h
+header-y += msm_audio_g711.h
+header-y += msm_audio_g711_dec.h
+header-y += msm_ion.h
+header-y += msm_ipc.h
+header-y += msm_kgsl.h
+header-y += msm_pft.h
+header-y += msm_mdp.h
+header-y += msm_mdp_ext.h
+header-y += msm_rmnet.h
+header-y += msm_rotator.h
+header-y += msm_vidc_dec.h
+header-y += msm_vidc_enc.h
 header-y += mtio.h
 header-y += nbd.h
 header-y += ncp_fs.h
@@ -282,6 +328,7 @@
 header-y += netconf.h
 header-y += netdevice.h
 header-y += net_dropmon.h
+header-y += net_map.h
 header-y += netfilter_arp.h
 header-y += netfilter_bridge.h
 header-y += netfilter_decnet.h
@@ -311,6 +358,7 @@
 header-y += nvram.h
 header-y += omap3isp.h
 header-y += omapfb.h
+header-y += oneshot_sync.h
 header-y += oom.h
 header-y += openvswitch.h
 header-y += packet_diag.h
@@ -337,11 +385,17 @@
 header-y += ppp-ioctl.h
 header-y += pps.h
 header-y += prctl.h
+header-y += prctl-private.h
 header-y += psci.h
 header-y += ptp_clock.h
 header-y += ptrace.h
+header-y += qbt1000.h
+header-y += qcedev.h
+header-y += qcota.h
 header-y += qnx4_fs.h
 header-y += qnxtypes.h
+header-y += qseecom.h
+header-y += qrng.h
 header-y += quota.h
 header-y += radeonfb.h
 header-y += random.h
@@ -352,6 +406,8 @@
 header-y += reiserfs_xattr.h
 header-y += resource.h
 header-y += rfkill.h
+header-y += rmnet_data.h
+header-y += rmnet.h
 header-y += romfs_fs.h
 header-y += rose.h
 header-y += route.h
@@ -365,6 +421,8 @@
 header-y += sdla.h
 header-y += seccomp.h
 header-y += securebits.h
+header-y += seemp_api.h
+header-y += seemp_param_id.h
 header-y += selinux_netlink.h
 header-y += sem.h
 header-y += serial_core.h
@@ -378,16 +436,20 @@
 header-y += snmp.h
 header-y += sock_diag.h
 header-y += socket.h
+header-y += sockev.h
 header-y += sockios.h
 header-y += sonet.h
 header-y += sonypi.h
 header-y += soundcard.h
 header-y += sound.h
+header-y += spcom.h
 header-y += stat.h
 header-y += stddef.h
 header-y += string.h
 header-y += suspend_ioctls.h
+header-y += sw_sync.h
 header-y += swab.h
+header-y += sync.h
 header-y += synclink.h
 header-y += sysctl.h
 header-y += sysinfo.h
@@ -461,3 +523,16 @@
 header-y += zorro.h
 header-y += zorro_ids.h
 header-y += userfaultfd.h
+header-y += msm_dsps.h
+header-y += msm-core-interface.h
+header-y += msm_thermal_ioctl.h
+header-y += android_pmem.h
+header-y += ipa_qmi_service_v01.h
+header-y += rmnet_ipa_fd_ioctl.h
+header-y += msm_ipa.h
+header-y += smcinvoke.h
+header-y += habmm.h
+
+header-y += fbxatm.h
+header-y += rtl8367c_ioctl.h
+header-y += hdmi-cec/
diff -ruw linux-4.4.115/include/uapi/linux/magic.h linux-4.4.115-fbx/include/uapi/linux/magic.h
--- linux-4.4.115/include/uapi/linux/magic.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/magic.h	2019-01-22 16:16:28.559292192 +0100
@@ -52,8 +52,11 @@
 #define REISER2FS_SUPER_MAGIC_STRING	"ReIsEr2Fs"
 #define REISER2FS_JR_SUPER_MAGIC_STRING	"ReIsEr3Fs"
 
+#define SDCARDFS_SUPER_MAGIC	0x5dca2df5
+
 #define SMB_SUPER_MAGIC		0x517B
 #define CGROUP_SUPER_MAGIC	0x27e0eb
+#define CGROUP2_SUPER_MAGIC	0x63677270
 
 
 #define STACK_END_MAGIC		0x57AC6E9D
@@ -76,5 +79,7 @@
 #define BTRFS_TEST_MAGIC	0x73727279
 #define NSFS_MAGIC		0x6e736673
 #define BPF_FS_MAGIC		0xcafe4a11
+#define BALLOON_KVM_MAGIC	0x13661366
+#define ZSMALLOC_MAGIC		0x58295829
 
 #endif /* __LINUX_MAGIC_H__ */
diff -ruw linux-4.4.115/include/uapi/linux/mmc/ioctl.h linux-4.4.115-fbx/include/uapi/linux/mmc/ioctl.h
--- linux-4.4.115/include/uapi/linux/mmc/ioctl.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/mmc/ioctl.h	2019-01-22 16:16:28.563292228 +0100
@@ -63,6 +63,61 @@
  *	commands in array in sequence to card.
  */
 #define MMC_IOC_MULTI_CMD _IOWR(MMC_BLOCK_MAJOR, 1, struct mmc_ioc_multi_cmd)
+
+/**
+ * There are four request types that are applicable for rpmb accesses- two
+ * under read category and two under write. They are
+ *
+ *  Reads
+ *  -------
+ *  1. Read Write Counter
+ *  2. Authenticated data read
+ *
+ *
+ *  Writes
+ *  -------
+ *  1. Provision RPMB key (though it might be done in a secure environment)
+ *  2. Authenticated data write
+ *
+ *  While its given that the rpmb data frames are going to have that
+ *  information encoded in it and the frames should be generated by a secure
+ *  piece of code, the request types can be classified as above.
+ *
+ *  So here are the set of commands that should be executed atomically in the
+ *  ioctl for rpmb read operation
+ *  1. Switch partition
+ *  2. Set block count
+ *  3. Write data frame - CMD25 to write the rpmb data frame
+ *  4. Set block count
+ *  5. Read the data - CMD18 to do the actual read
+ *
+ *  Similarly for rpmb write operation, these are the commands that should be
+ *  executed atomically in the ioctl for rpmb write operation
+ *  1. Switch partition
+ *  2. Set block count
+ *  3. Write data frame - CMD25 to write the rpmb data frame with data
+ *  4. Set block count
+ *  5. Read the data - CMD25 to write rpmb data frame indicating that rpmb
+ *     result register is about to be read
+ *  6. Set block count
+ *  7. Read rpmb result - CMD18 to read the rpmb result register
+ *
+ * Each of the above commands should be sent individually via struct mmc_ioc_cmd
+ * and fields like is_acmd that are not needed for rpmb operations will be
+ * ignored.
+ */
+#define MMC_IOC_MAX_RPMB_CMD	3
+struct mmc_ioc_rpmb {
+	struct mmc_ioc_cmd cmds[MMC_IOC_MAX_RPMB_CMD];
+};
+
+/*
+ * This ioctl is meant for use with rpmb partitions. This is needed since the
+ * access procedure for this particular partition is different from regular
+ * or normal partitions.
+ */
+#define MMC_IOC_RPMB_CMD _IOWR(MMC_BLOCK_MAJOR, 0, struct mmc_ioc_rpmb)
+
 /*
  * Since this ioctl is only meant to enhance (and not replace) normal access
  * to the mmc bus device, an upper data transfer limit of MMC_IOC_MAX_BYTES
diff -ruw linux-4.4.115/include/uapi/linux/mmc/Kbuild linux-4.4.115-fbx/include/uapi/linux/mmc/Kbuild
--- linux-4.4.115/include/uapi/linux/mmc/Kbuild	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/mmc/Kbuild	2019-01-22 16:16:28.563292228 +0100
@@ -1,2 +1,6 @@
 # UAPI Header export list
+header-y += core.h
+header-y += core.h
 header-y += ioctl.h
+header-y += mmc.h
+header-y += mmc.h
diff -ruw linux-4.4.115/include/uapi/linux/netfilter/xt_IDLETIMER.h linux-4.4.115-fbx/include/uapi/linux/netfilter/xt_IDLETIMER.h
--- linux-4.4.115/include/uapi/linux/netfilter/xt_IDLETIMER.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/netfilter/xt_IDLETIMER.h	2019-01-22 16:16:28.571292300 +0100
@@ -4,6 +4,7 @@
  * Header file for Xtables timer target module.
  *
  * Copyright (C) 2004, 2010 Nokia Corporation
+ *
  * Written by Timo Teras <ext-timo.teras@nokia.com>
  *
  * Converted to x_tables and forward-ported to 2.6.34
@@ -32,12 +33,19 @@
 #include <linux/types.h>
 
 #define MAX_IDLETIMER_LABEL_SIZE 28
+#define NLMSG_MAX_SIZE 64
+
+#define NL_EVENT_TYPE_INACTIVE 0
+#define NL_EVENT_TYPE_ACTIVE 1
 
 struct idletimer_tg_info {
 	__u32 timeout;
 
 	char label[MAX_IDLETIMER_LABEL_SIZE];
 
+	/* Use netlink messages for notification in addition to sysfs */
+	__u8 send_nl_msg;
+
 	/* for kernel module internal use only */
 	struct idletimer_tg *timer __attribute__((aligned(8)));
 };
diff -ruw linux-4.4.115/include/uapi/linux/netfilter/xt_socket.h linux-4.4.115-fbx/include/uapi/linux/netfilter/xt_socket.h
--- linux-4.4.115/include/uapi/linux/netfilter/xt_socket.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/netfilter/xt_socket.h	2019-01-22 16:16:28.575292336 +0100
@@ -26,4 +26,11 @@
 			   | XT_SOCKET_NOWILDCARD \
 			   | XT_SOCKET_RESTORESKMARK)
 
+struct sock *xt_socket_lookup_slow_v4(struct net *net,
+				      const struct sk_buff *skb,
+				      const struct net_device *indev);
+struct sock *xt_socket_lookup_slow_v6(struct net *net,
+				      const struct sk_buff *skb,
+				      const struct net_device *indev);
+
 #endif /* _XT_SOCKET_H */
diff -ruw linux-4.4.115/include/uapi/linux/netlink.h linux-4.4.115-fbx/include/uapi/linux/netlink.h
--- linux-4.4.115/include/uapi/linux/netlink.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/netlink.h	2019-01-22 16:16:28.579292373 +0100
@@ -27,7 +27,7 @@
 #define NETLINK_ECRYPTFS	19
 #define NETLINK_RDMA		20
 #define NETLINK_CRYPTO		21	/* Crypto layer */
-
+#define NETLINK_SOCKEV		22	/* Socket Administrative Events */
 #define NETLINK_INET_DIAG	NETLINK_SOCK_DIAG
 
 #define MAX_LINKS 32		
diff -ruw linux-4.4.115/include/uapi/linux/nl80211.h linux-4.4.115-fbx/include/uapi/linux/nl80211.h
--- linux-4.4.115/include/uapi/linux/nl80211.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/nl80211.h	2019-10-29 09:26:25.549221830 +0100
@@ -172,6 +172,42 @@
  */
 
 /**
+ * DOC: FILS shared key authentication offload
+ *
+ * FILS shared key authentication offload can be advertized by drivers by
+ * setting @NL80211_EXT_FEATURE_FILS_SK_OFFLOAD flag. The drivers that support
+ * FILS shared key authentication offload should be able to construct the
+ * authentication and association frames for FILS shared key authentication and
+ * eventually do a key derivation as per IEEE 802.11ai. The below additional
+ * parameters should be given to driver in %NL80211_CMD_CONNECT.
+ *	%NL80211_ATTR_FILS_ERP_USERNAME - used to construct keyname_nai
+ *	%NL80211_ATTR_FILS_ERP_REALM - used to construct keyname_nai
+ *	%NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM - used to construct erp message
+ *	%NL80211_ATTR_FILS_ERP_RRK - used to generate the rIK and rMSK
+ * rIK should be used to generate an authentication tag on the ERP message and
+ * rMSK should be used to derive a PMKSA.
+ * rIK, rMSK should be generated and keyname_nai, sequence number should be used
+ * as specified in IETF RFC 6696.
+ *
+ * When FILS shared key authentication is completed, driver needs to provide the
+ * below additional parameters to userspace.
+ *	%NL80211_ATTR_FILS_KEK - used for key renewal
+ *	%NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM - used in further EAP-RP exchanges
+ *	%NL80211_ATTR_PMKID - used to identify the PMKSA used/generated
+ *	%Nl80211_ATTR_PMK - used to update PMKSA cache in userspace
+ * The PMKSA can be maintained in userspace persistently so that it can be used
+ * later after reboots or wifi turn off/on also.
+ *
+ * %NL80211_ATTR_FILS_CACHE_ID is the cache identifier advertized by a FILS
+ * capable AP supporting PMK caching. It specifies the scope within which the
+ * PMKSAs are cached in an ESS. %NL80211_CMD_SET_PMKSA and
+ * %NL80211_CMD_DEL_PMKSA are enhanced to allow support for PMKSA caching based
+ * on FILS cache identifier. Additionally %NL80211_ATTR_PMK is used with
+ * %NL80211_SET_PMKSA to specify the PMK corresponding to a PMKSA for driver to
+ * use in a FILS shared key connection with PMKSA caching.
+ */
+
+/**
  * enum nl80211_commands - supported nl80211 commands
  *
  * @NL80211_CMD_UNSPEC: unspecified command to catch errors
@@ -322,7 +358,9 @@
  * @NL80211_CMD_GET_SCAN: get scan results
  * @NL80211_CMD_TRIGGER_SCAN: trigger a new scan with the given parameters
  *	%NL80211_ATTR_TX_NO_CCK_RATE is used to decide whether to send the
- *	probe requests at CCK rate or not.
+ *	probe requests at CCK rate or not. %NL80211_ATTR_BSSID can be used to
+ *	specify a BSSID to scan for; if not included, the wildcard BSSID will
+ *	be used.
  * @NL80211_CMD_NEW_SCAN_RESULTS: scan notification (as a reply to
  *	NL80211_CMD_GET_SCAN and on the "scan" multicast group)
  * @NL80211_CMD_SCAN_ABORTED: scan was aborted, for unspecified reasons,
@@ -367,10 +405,18 @@
  * @NL80211_CMD_NEW_SURVEY_RESULTS: survey data notification (as a reply to
  *	NL80211_CMD_GET_SURVEY and on the "scan" multicast group)
  *
- * @NL80211_CMD_SET_PMKSA: Add a PMKSA cache entry, using %NL80211_ATTR_MAC
- *	(for the BSSID) and %NL80211_ATTR_PMKID.
+ * @NL80211_CMD_SET_PMKSA: Add a PMKSA cache entry using %NL80211_ATTR_MAC
+ *	(for the BSSID), %NL80211_ATTR_PMKID, and optionally %NL80211_ATTR_PMK
+ *	(PMK is used for PTKSA derivation in case of FILS shared key offload) or
+ *	using %NL80211_ATTR_SSID, %NL80211_ATTR_FILS_CACHE_ID,
+ *	%NL80211_ATTR_PMKID, and %NL80211_ATTR_PMK in case of FILS
+ *	authentication where %NL80211_ATTR_FILS_CACHE_ID is the identifier
+ *	advertized by a FILS capable AP identifying the scope of PMKSA in an
+ *	ESS.
  * @NL80211_CMD_DEL_PMKSA: Delete a PMKSA cache entry, using %NL80211_ATTR_MAC
- *	(for the BSSID) and %NL80211_ATTR_PMKID.
+ *	(for the BSSID) and %NL80211_ATTR_PMKID or using %NL80211_ATTR_SSID,
+ *	%NL80211_ATTR_FILS_CACHE_ID, and %NL80211_ATTR_PMKID in case of FILS
+ *	authentication.
  * @NL80211_CMD_FLUSH_PMKSA: Flush all PMKSA cache entries.
  *
  * @NL80211_CMD_REG_CHANGE: indicates to userspace the regulatory domain
@@ -484,7 +530,12 @@
  *	This attribute is ignored if driver does not support roam scan.
  *	It is also sent as an event, with the BSSID and response IEs when the
  *	connection is established or failed to be established. This can be
- *	determined by the STATUS_CODE attribute.
+ *	determined by the %NL80211_ATTR_STATUS_CODE attribute (0 = success,
+ *	non-zero = failure). If %NL80211_ATTR_TIMED_OUT is included in the
+ *	event, the connection attempt failed due to not being able to initiate
+ *	authentication/association or not receiving a response from the AP.
+ *	Non-zero %NL80211_ATTR_STATUS_CODE value is indicated in that case as
+ *	well to remain backwards compatible.
  * @NL80211_CMD_ROAM: request that the card roam (currently not implemented),
  *	sent as an event when the card/driver roamed by itself.
  * @NL80211_CMD_DISCONNECT: drop a given connection; also used to notify
@@ -820,6 +871,51 @@
  *	as an event to indicate changes for devices with wiphy-specific regdom
  *	management.
  *
+ * @NL80211_CMD_ABORT_SCAN: Stop an ongoing scan. Returns -ENOENT if a scan is
+ *	not running. The driver indicates the status of the scan through
+ *	cfg80211_scan_done().
+ *
+ * @NL80211_CMD_START_NAN: Start NAN operation, identified by its
+ *	%NL80211_ATTR_WDEV interface. This interface must have been previously
+ *	created with %NL80211_CMD_NEW_INTERFACE. After it has been started, the
+ *	NAN interface will create or join a cluster. This command must have a
+ *	valid %NL80211_ATTR_NAN_MASTER_PREF attribute and optional
+ *	%NL80211_ATTR_NAN_DUAL attributes.
+ *	After this command NAN functions can be added.
+ * @NL80211_CMD_STOP_NAN: Stop the NAN operation, identified by
+ *	its %NL80211_ATTR_WDEV interface.
+ * @NL80211_CMD_ADD_NAN_FUNCTION: Add a NAN function. The function is defined
+ *	with %NL80211_ATTR_NAN_FUNC nested attribute. When called, this
+ *	operation returns the strictly positive and unique instance id
+ *	(%NL80211_ATTR_NAN_FUNC_INST_ID) and a cookie (%NL80211_ATTR_COOKIE)
+ *	of the function upon success.
+ *	Since instance ID's can be re-used, this cookie is the right
+ *	way to identify the function. This will avoid races when a termination
+ *	event is handled by the user space after it has already added a new
+ *	function that got the same instance id from the kernel as the one
+ *	which just terminated.
+ *	This cookie may be used in NAN events even before the command
+ *	returns, so userspace shouldn't process NAN events until it processes
+ *	the response to this command.
+ *	Look at %NL80211_ATTR_SOCKET_OWNER as well.
+ * @NL80211_CMD_DEL_NAN_FUNCTION: Delete a NAN function by cookie.
+ *	This command is also used as a notification sent when a NAN function is
+ *	terminated. This will contain a %NL80211_ATTR_NAN_FUNC_INST_ID
+ *	and %NL80211_ATTR_COOKIE attributes.
+ * @NL80211_CMD_CHANGE_NAN_CONFIG: Change current NAN configuration. NAN
+ *	must be operational (%NL80211_CMD_START_NAN was executed).
+ *	It must contain at least one of the following attributes:
+ *	%NL80211_ATTR_NAN_MASTER_PREF, %NL80211_ATTR_NAN_DUAL.
+ * @NL80211_CMD_NAN_FUNC_MATCH: Notification sent when a match is reported.
+ *	This will contain a %NL80211_ATTR_NAN_MATCH nested attribute and
+ *	%NL80211_ATTR_COOKIE.
+ *
+ * @NL80211_CMD_UPDATE_CONNECT_PARAMS: Update one or more connect parameters
+ *	for subsequent roaming cases if the driver or firmware uses internal
+ *	BSS selection. This command can be issued only while connected and it
+ *	does not result in a change for the current association. Currently,
+ *	only the %NL80211_ATTR_IE data is used and updated with this command.
+ *
  * @NL80211_CMD_MAX: highest used command number
  * @__NL80211_CMD_AFTER_LAST: internal use
  */
@@ -1006,6 +1102,19 @@
 
 	NL80211_CMD_WIPHY_REG_CHANGE,
 
+	NL80211_CMD_ABORT_SCAN,
+
+	NL80211_CMD_START_NAN,
+	NL80211_CMD_STOP_NAN,
+	NL80211_CMD_ADD_NAN_FUNCTION,
+	NL80211_CMD_DEL_NAN_FUNCTION,
+	NL80211_CMD_CHANGE_NAN_CONFIG,
+	NL80211_CMD_NAN_MATCH,
+
+	NL80211_CMD_SET_MULTICAST_TO_UNICAST,
+
+	NL80211_CMD_UPDATE_CONNECT_PARAMS,
+
 	/* add new commands above here */
 
 	/* used to define NL80211_CMD_MAX below */
@@ -1320,7 +1429,13 @@
  *	enum nl80211_band value is used as the index (nla_type() of the nested
  *	data. If a band is not included, it will be configured to allow all
  *	rates based on negotiated supported rates information. This attribute
- *	is used with %NL80211_CMD_SET_TX_BITRATE_MASK.
+ *	is used with %NL80211_CMD_SET_TX_BITRATE_MASK and with starting AP,
+ *	and joining mesh networks (not IBSS yet). In the later case, it must
+ *	specify just a single bitrate, which is to be used for the beacon.
+ *	The driver must also specify support for this with the extended
+ *	features NL80211_EXT_FEATURE_BEACON_RATE_LEGACY,
+ *	NL80211_EXT_FEATURE_BEACON_RATE_HT and
+ *	NL80211_EXT_FEATURE_BEACON_RATE_VHT.
  *
  * @NL80211_ATTR_FRAME_MATCH: A binary attribute which typically must contain
  *	at least one byte, currently used with @NL80211_CMD_REGISTER_FRAME.
@@ -1566,8 +1681,16 @@
  *	the connection request from a station. nl80211_connect_failed_reason
  *	enum has different reasons of connection failure.
  *
- * @NL80211_ATTR_SAE_DATA: SAE elements in Authentication frames. This starts
- *	with the Authentication transaction sequence number field.
+ * @NL80211_ATTR_AUTH_DATA: Fields and elements in Authentication frames.
+ *	This contains the authentication frame body (non-IE and IE data),
+ *	excluding the Authentication algorithm number, i.e., starting at the
+ *	Authentication transaction sequence number field. It is used with
+ *	authentication algorithms that need special fields to be added into
+ *	the frames (SAE and FILS). Currently, only the SAE cases use the
+ *	initial two fields (Authentication transaction sequence number and
+ *	Status code). However, those fields are included in the attribute data
+ *	for all authentication algorithms to keep the attribute definition
+ *	consistent.
  *
  * @NL80211_ATTR_VHT_CAPABILITY: VHT Capability information element (from
  *	association request when used with NL80211_CMD_NEW_STATION)
@@ -1764,8 +1887,9 @@
  *	over all channels.
  *
  * @NL80211_ATTR_SCHED_SCAN_DELAY: delay before the first cycle of a
- *	scheduled scan (or a WoWLAN net-detect scan) is started, u32
- *	in seconds.
+ *	scheduled scan is started.  Or the delay before a WoWLAN
+ *	net-detect scan is started, counting from the moment the
+ *	system is suspended.  This value is a u32, in seconds.
 
  * @NL80211_ATTR_REG_INDOOR: flag attribute, if set indicates that the device
  *      is operating in an indoor environment.
@@ -1782,6 +1906,133 @@
  *	thus it must not specify the number of iterations, only the interval
  *	between scans. The scan plans are executed sequentially.
  *	Each scan plan is a nested attribute of &enum nl80211_sched_scan_plan.
+ * @NL80211_ATTR_PBSS: flag attribute. If set it means operate
+ *	in a PBSS. Specified in %NL80211_CMD_CONNECT to request
+ *	connecting to a PCP, and in %NL80211_CMD_START_AP to start
+ *	a PCP instead of AP. Relevant for DMG networks only.
+ * @NL80211_ATTR_BSS_SELECT: nested attribute for driver supporting the
+ *	BSS selection feature. When used with %NL80211_CMD_GET_WIPHY it contains
+ *	attributes according &enum nl80211_bss_select_attr to indicate what
+ *	BSS selection behaviours are supported. When used with %NL80211_CMD_CONNECT
+ *	it contains the behaviour-specific attribute containing the parameters for
+ *	BSS selection to be done by driver and/or firmware.
+ *
+ * @NL80211_ATTR_STA_SUPPORT_P2P_PS: whether P2P PS mechanism supported
+ *	or not. u8, one of the values of &enum nl80211_sta_p2p_ps_status
+ *
+ * @NL80211_ATTR_PAD: attribute used for padding for 64-bit alignment
+ *
+ * @NL80211_ATTR_IFTYPE_EXT_CAPA: Nested attribute of the following attributes:
+ *	%NL80211_ATTR_IFTYPE, %NL80211_ATTR_EXT_CAPA,
+ *	%NL80211_ATTR_EXT_CAPA_MASK, to specify the extended capabilities per
+ *	interface type.
+ *
+ * @NL80211_ATTR_MU_MIMO_GROUP_DATA: array of 24 bytes that defines a MU-MIMO
+ *	groupID for monitor mode.
+ *	The first 8 bytes are a mask that defines the membership in each
+ *	group (there are 64 groups, group 0 and 63 are reserved),
+ *	each bit represents a group and set to 1 for being a member in
+ *	that group and 0 for not being a member.
+ *	The remaining 16 bytes define the position in each group: 2 bits for
+ *	each group.
+ *	(smaller group numbers represented on most significant bits and bigger
+ *	group numbers on least significant bits.)
+ *	This attribute is used only if all interfaces are in monitor mode.
+ *	Set this attribute in order to monitor packets using the given MU-MIMO
+ *	groupID data.
+ *	to turn off that feature set all the bits of the groupID to zero.
+ * @NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR: mac address for the sniffer to follow
+ *	when using MU-MIMO air sniffer.
+ *	to turn that feature off set an invalid mac address
+ *	(e.g. FF:FF:FF:FF:FF:FF)
+ *
+ * @NL80211_ATTR_SCAN_START_TIME_TSF: The time at which the scan was actually
+ *	started (u64). The time is the TSF of the BSS the interface that
+ *	requested the scan is connected to (if available, otherwise this
+ *	attribute must not be included).
+ * @NL80211_ATTR_SCAN_START_TIME_TSF_BSSID: The BSS according to which
+ *	%NL80211_ATTR_SCAN_START_TIME_TSF is set.
+ * @NL80211_ATTR_MEASUREMENT_DURATION: measurement duration in TUs (u16). If
+ *	%NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY is not set, this is the
+ *	maximum measurement duration allowed. This attribute is used with
+ *	measurement requests. It can also be used with %NL80211_CMD_TRIGGER_SCAN
+ *	if the scan is used for beacon report radio measurement.
+ * @NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY: flag attribute that indicates
+ *	that the duration specified with %NL80211_ATTR_MEASUREMENT_DURATION is
+ *	mandatory. If this flag is not set, the duration is the maximum duration
+ *	and the actual measurement duration may be shorter.
+ *
+ * @NL80211_ATTR_MESH_PEER_AID: Association ID for the mesh peer (u16). This is
+ *	used to pull the stored data for mesh peer in power save state.
+ *
+ * @NL80211_ATTR_NAN_MASTER_PREF: the master preference to be used by
+ *	%NL80211_CMD_START_NAN and optionally with
+ *	%NL80211_CMD_CHANGE_NAN_CONFIG. Its type is u8 and it can't be 0.
+ *	Also, values 1 and 255 are reserved for certification purposes and
+ *	should not be used during a normal device operation.
+ * @NL80211_ATTR_NAN_DUAL: NAN dual band operation config (see
+ *	&enum nl80211_nan_dual_band_conf). This attribute is used with
+ *	%NL80211_CMD_START_NAN and optionally with
+ *	%NL80211_CMD_CHANGE_NAN_CONFIG.
+ * @NL80211_ATTR_NAN_FUNC: a function that can be added to NAN. See
+ *	&enum nl80211_nan_func_attributes for description of this nested
+ *	attribute.
+ * @NL80211_ATTR_NAN_MATCH: used to report a match. This is a nested attribute.
+ *	See &enum nl80211_nan_match_attributes.
+ * @NL80211_ATTR_FILS_KEK: KEK for FILS (Re)Association Request/Response frame
+ *	protection.
+ * @NL80211_ATTR_FILS_NONCES: Nonces (part of AAD) for FILS (Re)Association
+ *	Request/Response frame protection. This attribute contains the 16 octet
+ *	STA Nonce followed by 16 octets of AP Nonce.
+ *
+ * @NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED: Indicates whether or not multicast
+ *	packets should be send out as unicast to all stations (flag attribute).
+ *
+ * @NL80211_ATTR_BSSID: The BSSID of the AP. Note that %NL80211_ATTR_MAC is also
+ *	used in various commands/events for specifying the BSSID.
+ *
+ * @NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI: Relative RSSI threshold by which
+ *	other BSSs has to be better or slightly worse than the current
+ *	connected BSS so that they get reported to user space.
+ *	This will give an opportunity to userspace to consider connecting to
+ *	other matching BSSs which have better or slightly worse RSSI than
+ *	the current connected BSS by using an offloaded operation to avoid
+ *	unnecessary wakeups.
+ *
+ * @NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST: When present the RSSI level for BSSs in
+ *	the specified band is to be adjusted before doing
+ *	%NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI based comparision to figure out
+ *	better BSSs. The attribute value is a packed structure
+ *	value as specified by &struct nl80211_bss_select_rssi_adjust.
+ *
+ * @NL80211_ATTR_TIMEOUT_REASON: The reason for which an operation timed out.
+ *	u32 attribute with an &enum nl80211_timeout_reason value. This is used,
+ *	e.g., with %NL80211_CMD_CONNECT event.
+ *
+ * @NL80211_ATTR_FILS_ERP_USERNAME: EAP Re-authentication Protocol (ERP)
+ *	username part of NAI used to refer keys rRK and rIK. This is used with
+ *	%NL80211_CMD_CONNECT.
+ *
+ * @NL80211_ATTR_FILS_ERP_REALM: EAP Re-authentication Protocol (ERP) realm part
+ *	of NAI specifying the domain name of the ER server. This is used with
+ *	%NL80211_CMD_CONNECT.
+ *
+ * @NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM: Unsigned 16-bit ERP next sequence number
+ *	to use in ERP messages. This is used in generating the FILS wrapped data
+ *	for FILS authentication and is used with %NL80211_CMD_CONNECT.
+ *
+ * @NL80211_ATTR_FILS_ERP_RRK: ERP re-authentication Root Key (rRK) for the
+ *	NAI specified by %NL80211_ATTR_FILS_ERP_USERNAME and
+ *	%NL80211_ATTR_FILS_ERP_REALM. This is used for generating rIK and rMSK
+ *	from successful FILS authentication and is used with
+ *	%NL80211_CMD_CONNECT.
+ *
+ * @NL80211_ATTR_FILS_CACHE_ID: A 2-octet identifier advertized by a FILS AP
+ *	identifying the scope of PMKSAs. This is used with
+ *	@NL80211_CMD_SET_PMKSA and @NL80211_CMD_DEL_PMKSA.
+ *
+ * @NL80211_ATTR_PMK: PMK for the PMKSA identified by %NL80211_ATTR_PMKID.
+ *	This is used with @NL80211_CMD_SET_PMKSA.
  *
  * @NUM_NL80211_ATTR: total number of nl80211_attrs available
  * @NL80211_ATTR_MAX: highest attribute number currently defined
@@ -2041,7 +2292,7 @@
 
 	NL80211_ATTR_CONN_FAILED_REASON,
 
-	NL80211_ATTR_SAE_DATA,
+	NL80211_ATTR_AUTH_DATA,
 
 	NL80211_ATTR_VHT_CAPABILITY,
 
@@ -2157,6 +2408,51 @@
 	NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS,
 	NL80211_ATTR_SCHED_SCAN_PLANS,
 
+	NL80211_ATTR_PBSS,
+
+	NL80211_ATTR_BSS_SELECT,
+
+	NL80211_ATTR_STA_SUPPORT_P2P_PS,
+
+	NL80211_ATTR_PAD,
+
+	NL80211_ATTR_IFTYPE_EXT_CAPA,
+
+	NL80211_ATTR_MU_MIMO_GROUP_DATA,
+	NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR,
+
+	NL80211_ATTR_SCAN_START_TIME_TSF,
+	NL80211_ATTR_SCAN_START_TIME_TSF_BSSID,
+	NL80211_ATTR_MEASUREMENT_DURATION,
+	NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY,
+
+	NL80211_ATTR_MESH_PEER_AID,
+
+	NL80211_ATTR_NAN_MASTER_PREF,
+	NL80211_ATTR_NAN_DUAL,
+	NL80211_ATTR_NAN_FUNC,
+	NL80211_ATTR_NAN_MATCH,
+
+	NL80211_ATTR_FILS_KEK,
+	NL80211_ATTR_FILS_NONCES,
+
+	NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED,
+
+	NL80211_ATTR_BSSID,
+
+	NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI,
+	NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST,
+
+	NL80211_ATTR_TIMEOUT_REASON,
+
+	NL80211_ATTR_FILS_ERP_USERNAME,
+	NL80211_ATTR_FILS_ERP_REALM,
+	NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM,
+	NL80211_ATTR_FILS_ERP_RRK,
+	NL80211_ATTR_FILS_CACHE_ID,
+
+	NL80211_ATTR_PMK,
+
 	/* add attributes here, update the policy in nl80211.c */
 
 	__NL80211_ATTR_AFTER_LAST,
@@ -2168,6 +2464,7 @@
 #define NL80211_ATTR_SCAN_GENERATION NL80211_ATTR_GENERATION
 #define	NL80211_ATTR_MESH_PARAMS NL80211_ATTR_MESH_CONFIG
 #define NL80211_ATTR_IFACE_SOCKET_OWNER NL80211_ATTR_SOCKET_OWNER
+#define NL80211_ATTR_SAE_DATA NL80211_ATTR_AUTH_DATA
 
 /*
  * Allow user space programs to use #ifdef on new attributes by defining them
@@ -2825,6 +3122,13 @@
  *	how this API was implemented in the past. Also, due to the same problem,
  *	the only way to create a matchset with only an RSSI filter (with this
  *	attribute) is if there's only a single matchset with the RSSI attribute.
+ * @NL80211_SCHED_SCAN_MATCH_ATTR_RELATIVE_RSSI: Flag indicating whether
+ *	%NL80211_SCHED_SCAN_MATCH_ATTR_RSSI to be used as absolute RSSI or
+ *	relative to current bss's RSSI.
+ * @NL80211_SCHED_SCAN_MATCH_ATTR_RSSI_ADJUST: When present the RSSI level for
+ *	BSS-es in the specified band is to be adjusted before doing
+ *	RSSI-based BSS selection. The attribute value is a packed structure
+ *	value as specified by &struct nl80211_bss_select_rssi_adjust.
  * @NL80211_SCHED_SCAN_MATCH_ATTR_MAX: highest scheduled scan filter
  *	attribute number currently defined
  * @__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST: internal use
@@ -2834,6 +3138,8 @@
 
 	NL80211_SCHED_SCAN_MATCH_ATTR_SSID,
 	NL80211_SCHED_SCAN_MATCH_ATTR_RSSI,
+	NL80211_SCHED_SCAN_MATCH_ATTR_RELATIVE_RSSI,
+	NL80211_SCHED_SCAN_MATCH_ATTR_RSSI_ADJUST,
 
 	/* keep last */
 	__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST,
@@ -3446,6 +3752,9 @@
  * @NL80211_AUTHTYPE_FT: Fast BSS Transition (IEEE 802.11r)
  * @NL80211_AUTHTYPE_NETWORK_EAP: Network EAP (some Cisco APs and mainly LEAP)
  * @NL80211_AUTHTYPE_SAE: Simultaneous authentication of equals
+ * @NL80211_AUTHTYPE_FILS_SK: Fast Initial Link Setup shared key
+ * @NL80211_AUTHTYPE_FILS_SK_PFS: Fast Initial Link Setup shared key with PFS
+ * @NL80211_AUTHTYPE_FILS_PK: Fast Initial Link Setup public key
  * @__NL80211_AUTHTYPE_NUM: internal
  * @NL80211_AUTHTYPE_MAX: maximum valid auth algorithm
  * @NL80211_AUTHTYPE_AUTOMATIC: determine automatically (if necessary by
@@ -3458,6 +3767,9 @@
 	NL80211_AUTHTYPE_FT,
 	NL80211_AUTHTYPE_NETWORK_EAP,
 	NL80211_AUTHTYPE_SAE,
+	NL80211_AUTHTYPE_FILS_SK,
+	NL80211_AUTHTYPE_FILS_SK_PFS,
+	NL80211_AUTHTYPE_FILS_PK,
 
 	/* keep last */
 	__NL80211_AUTHTYPE_NUM,
@@ -3621,7 +3933,10 @@
  * @__NL80211_ATTR_CQM_INVALID: invalid
  * @NL80211_ATTR_CQM_RSSI_THOLD: RSSI threshold in dBm. This value specifies
  *	the threshold for the RSSI level at which an event will be sent. Zero
- *	to disable.
+ *	to disable.  Alternatively, if %NL80211_EXT_FEATURE_CQM_RSSI_LIST is
+ *	set, multiple values can be supplied as a low-to-high sorted array of
+ *	threshold values in dBm.  Events will be sent when the RSSI value
+ *	crosses any of the thresholds.
  * @NL80211_ATTR_CQM_RSSI_HYST: RSSI hysteresis in dBm. This value specifies
  *	the minimum amount the RSSI level must change after an event before a
  *	new event may be issued (to reduce effects of RSSI oscillation).
@@ -4062,6 +4377,9 @@
  *	of supported channel widths for radar detection.
  * @NL80211_IFACE_COMB_RADAR_DETECT_REGIONS: u32 attribute containing the bitmap
  *	of supported regulatory regions for radar detection.
+ * @NL80211_IFACE_COMB_BI_MIN_GCD: u32 attribute specifying the minimum GCD of
+ *	different beacon intervals supported by all the interface combinations
+ *	in this group (if not present, all beacon intervals be identical).
  * @NUM_NL80211_IFACE_COMB: number of attributes
  * @MAX_NL80211_IFACE_COMB: highest attribute number
  *
@@ -4069,8 +4387,8 @@
  *	limits = [ #{STA} <= 1, #{AP} <= 1 ], matching BI, channels = 1, max = 2
  *	=> allows an AP and a STA that must match BIs
  *
- *	numbers = [ #{AP, P2P-GO} <= 8 ], channels = 1, max = 8
- *	=> allows 8 of AP/GO
+ *	numbers = [ #{AP, P2P-GO} <= 8 ], BI min gcd, channels = 1, max = 8,
+ *	=> allows 8 of AP/GO that can have BI gcd >= min gcd
  *
  *	numbers = [ #{STA} <= 2 ], channels = 2, max = 2
  *	=> allows two STAs on different channels
@@ -4096,6 +4414,7 @@
 	NL80211_IFACE_COMB_NUM_CHANNELS,
 	NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS,
 	NL80211_IFACE_COMB_RADAR_DETECT_REGIONS,
+	NL80211_IFACE_COMB_BI_MIN_GCD,
 
 	/* keep last */
 	NUM_NL80211_IFACE_COMB,
@@ -4389,12 +4708,67 @@
 /**
  * enum nl80211_ext_feature_index - bit index of extended features.
  * @NL80211_EXT_FEATURE_VHT_IBSS: This driver supports IBSS with VHT datarates.
+ * @NL80211_EXT_FEATURE_RRM: This driver supports RRM. When featured, user can
+ *	can request to use RRM (see %NL80211_ATTR_USE_RRM) with
+ *	%NL80211_CMD_ASSOCIATE and %NL80211_CMD_CONNECT requests, which will set
+ *	the ASSOC_REQ_USE_RRM flag in the association request even if
+ *	NL80211_FEATURE_QUIET is not advertized.
+ * @NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER: This device supports MU-MIMO air
+ *	sniffer which means that it can be configured to hear packets from
+ *	certain groups which can be configured by the
+ *	%NL80211_ATTR_MU_MIMO_GROUP_DATA attribute,
+ *	or can be configured to follow a station by configuring the
+ *	%NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR attribute.
+ * @NL80211_EXT_FEATURE_SCAN_START_TIME: This driver includes the actual
+ *	time the scan started in scan results event. The time is the TSF of
+ *	the BSS that the interface that requested the scan is connected to
+ *	(if available).
+ * @NL80211_EXT_FEATURE_BSS_PARENT_TSF: Per BSS, this driver reports the
+ *	time the last beacon/probe was received. The time is the TSF of the
+ *	BSS that the interface that requested the scan is connected to
+ *	(if available).
+ * @NL80211_EXT_FEATURE_SET_SCAN_DWELL: This driver supports configuration of
+ *	channel dwell time.
+ * @NL80211_EXT_FEATURE_BEACON_RATE_LEGACY: Driver supports beacon rate
+ *	configuration (AP/mesh), supporting a legacy (non HT/VHT) rate.
+ * @NL80211_EXT_FEATURE_BEACON_RATE_HT: Driver supports beacon rate
+ *	configuration (AP/mesh) with HT rates.
+ * @NL80211_EXT_FEATURE_BEACON_RATE_VHT: Driver supports beacon rate
+ *	configuration (AP/mesh) with VHT rates.
+ * @NL80211_EXT_FEATURE_FILS_STA: This driver supports Fast Initial Link Setup
+ *	with user space SME (NL80211_CMD_AUTHENTICATE) in station mode.
+ * @NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA: This driver supports randomized TA
+ *	in @NL80211_CMD_FRAME while not associated.
+ * @NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED: This driver supports
+ *	randomized TA in @NL80211_CMD_FRAME while associated.
+ * @NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI: The driver supports sched_scan
+ *	for reporting BSSs with better RSSI than the current connected BSS
+ *	(%NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI).
+ * @NL80211_EXT_FEATURE_CQM_RSSI_LIST: With this driver the
+ *	%NL80211_ATTR_CQM_RSSI_THOLD attribute accepts a list of zero or more
+ *	RSSI threshold values to monitor rather than exactly one threshold.
+ * @NL80211_EXT_FEATURE_FILS_SK_OFFLOAD: Driver SME supports FILS shared key
+ *	authentication with %NL80211_CMD_CONNECT.
  *
  * @NUM_NL80211_EXT_FEATURES: number of extended features.
  * @MAX_NL80211_EXT_FEATURES: highest extended feature index.
  */
 enum nl80211_ext_feature_index {
 	NL80211_EXT_FEATURE_VHT_IBSS,
+	NL80211_EXT_FEATURE_RRM,
+	NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER,
+	NL80211_EXT_FEATURE_SCAN_START_TIME,
+	NL80211_EXT_FEATURE_BSS_PARENT_TSF,
+	NL80211_EXT_FEATURE_SET_SCAN_DWELL,
+	NL80211_EXT_FEATURE_BEACON_RATE_LEGACY,
+	NL80211_EXT_FEATURE_BEACON_RATE_HT,
+	NL80211_EXT_FEATURE_BEACON_RATE_VHT,
+	NL80211_EXT_FEATURE_FILS_STA,
+	NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA,
+	NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED,
+	NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI,
+	NL80211_EXT_FEATURE_CQM_RSSI_LIST,
+	NL80211_EXT_FEATURE_FILS_SK_OFFLOAD,
 
 	/* add new features before the definition below */
 	NUM_NL80211_EXT_FEATURES,
@@ -4434,6 +4808,21 @@
 };
 
 /**
+ * enum nl80211_timeout_reason - timeout reasons
+ *
+ * @NL80211_TIMEOUT_UNSPECIFIED: Timeout reason unspecified.
+ * @NL80211_TIMEOUT_SCAN: Scan (AP discovery) timed out.
+ * @NL80211_TIMEOUT_AUTH: Authentication timed out.
+ * @NL80211_TIMEOUT_ASSOC: Association timed out.
+ */
+enum nl80211_timeout_reason {
+	NL80211_TIMEOUT_UNSPECIFIED,
+	NL80211_TIMEOUT_SCAN,
+	NL80211_TIMEOUT_AUTH,
+	NL80211_TIMEOUT_ASSOC,
+};
+
+/**
  * enum nl80211_scan_flags -  scan request control flags
  *
  * Scan request control flags are used to control the handling
@@ -4515,12 +4904,17 @@
  *	change to the channel status.
  * @NL80211_RADAR_NOP_FINISHED: The Non-Occupancy Period for this channel is
  *	over, channel becomes usable.
+ * @NL80211_RADAR_PRE_CAC_EXPIRED: Channel Availability Check done on this
+ *	non-operating channel is expired and no longer valid. New CAC must
+ *	be done on this channel before starting the operation. This is not
+ *	applicable for ETSI dfs domain where pre-CAC is valid for ever.
  */
 enum nl80211_radar_event {
 	NL80211_RADAR_DETECTED,
 	NL80211_RADAR_CAC_FINISHED,
 	NL80211_RADAR_CAC_ABORTED,
 	NL80211_RADAR_NOP_FINISHED,
+	NL80211_RADAR_PRE_CAC_EXPIRED,
 };
 
 /**
@@ -4644,4 +5038,48 @@
 		__NL80211_SCHED_SCAN_PLAN_AFTER_LAST - 1
 };
 
+/**
+ * struct nl80211_bss_select_rssi_adjust - RSSI adjustment parameters.
+ *
+ * @band: band of BSS that must match for RSSI value adjustment.
+ * @delta: value used to adjust the RSSI value of matching BSS.
+ */
+struct nl80211_bss_select_rssi_adjust {
+	__u8 band;
+	__s8 delta;
+} __attribute__((packed));
+
+/**
+ * enum nl80211_bss_select_attr - attributes for bss selection.
+ *
+ * @__NL80211_BSS_SELECT_ATTR_INVALID: reserved.
+ * @NL80211_BSS_SELECT_ATTR_RSSI: Flag indicating only RSSI-based BSS selection
+ *	is requested.
+ * @NL80211_BSS_SELECT_ATTR_BAND_PREF: attribute indicating BSS
+ *	selection should be done such that the specified band is preferred.
+ *	When there are multiple BSS-es in the preferred band, the driver
+ *	shall use RSSI-based BSS selection as a second step. The value of
+ *	this attribute is according to &enum nl80211_band (u32).
+ * @NL80211_BSS_SELECT_ATTR_RSSI_ADJUST: When present the RSSI level for
+ *	BSS-es in the specified band is to be adjusted before doing
+ *	RSSI-based BSS selection. The attribute value is a packed structure
+ *	value as specified by &struct nl80211_bss_select_rssi_adjust.
+ * @NL80211_BSS_SELECT_ATTR_MAX: highest bss select attribute number.
+ * @__NL80211_BSS_SELECT_ATTR_AFTER_LAST: internal use.
+ *
+ * One and only one of these attributes are found within %NL80211_ATTR_BSS_SELECT
+ * for %NL80211_CMD_CONNECT. It specifies the required BSS selection behaviour
+ * which the driver shall use.
+ */
+enum nl80211_bss_select_attr {
+	__NL80211_BSS_SELECT_ATTR_INVALID,
+	NL80211_BSS_SELECT_ATTR_RSSI,
+	NL80211_BSS_SELECT_ATTR_BAND_PREF,
+	NL80211_BSS_SELECT_ATTR_RSSI_ADJUST,
+
+	/* keep last */
+	__NL80211_BSS_SELECT_ATTR_AFTER_LAST,
+	NL80211_BSS_SELECT_ATTR_MAX = __NL80211_BSS_SELECT_ATTR_AFTER_LAST - 1
+};
+
 #endif /* __LINUX_NL80211_H */
diff -ruw linux-4.4.115/include/uapi/linux/perf_event.h linux-4.4.115-fbx/include/uapi/linux/perf_event.h
--- linux-4.4.115/include/uapi/linux/perf_event.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/perf_event.h	2019-01-22 16:16:28.583292409 +0100
@@ -334,7 +334,9 @@
 				comm_exec      :  1, /* flag comm events that are due to an exec */
 				use_clockid    :  1, /* use @clockid for time fields */
 				context_switch :  1, /* context switch data */
-				__reserved_1   : 37;
+				constraint_duplicate : 1,
+
+				__reserved_1   : 36;
 
 	union {
 		__u32		wakeup_events;	  /* wakeup every n events */
@@ -395,6 +397,7 @@
 #define PERF_EVENT_IOC_SET_FILTER	_IOW('$', 6, char *)
 #define PERF_EVENT_IOC_ID		_IOR('$', 7, __u64 *)
 #define PERF_EVENT_IOC_SET_BPF		_IOW('$', 8, __u32)
+#define PERF_EVENT_IOC_SET_DRV_CONFIGS	_IOW('$', 10, char *)
 
 enum perf_event_ioc_flags {
 	PERF_IOC_FLAG_GROUP		= 1U << 0,
diff -ruw linux-4.4.115/include/uapi/linux/pkt_sched.h linux-4.4.115-fbx/include/uapi/linux/pkt_sched.h
--- linux-4.4.115/include/uapi/linux/pkt_sched.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/pkt_sched.h	2019-01-22 16:16:28.583292409 +0100
@@ -126,6 +126,7 @@
 struct tc_prio_qopt {
 	int	bands;			/* Number of bands */
 	__u8	priomap[TC_PRIO_MAX+1];	/* Map: logical priority -> PRIO band */
+	__u8	enable_flow;		/* Enable dequeue */
 };
 
 /* MULTIQ section */
diff -ruw linux-4.4.115/include/uapi/linux/prctl.h linux-4.4.115-fbx/include/uapi/linux/prctl.h
--- linux-4.4.115/include/uapi/linux/prctl.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/prctl.h	2019-10-29 09:26:25.553221870 +0100
@@ -197,4 +197,13 @@
 # define PR_CAP_AMBIENT_LOWER		3
 # define PR_CAP_AMBIENT_CLEAR_ALL	4
 
+/* Sets the timerslack for arbitrary threads
+ * arg2 slack value, 0 means "use default"
+ * arg3 pid of the thread whose timer slack needs to be set
+ */
+#define PR_SET_TIMERSLACK_PID	127
+
+#define PR_SET_VMA		0x53564d41
+# define PR_SET_VMA_ANON_NAME		0
+
 #endif /* _LINUX_PRCTL_H */
diff -ruw linux-4.4.115/include/uapi/linux/rtnetlink.h linux-4.4.115-fbx/include/uapi/linux/rtnetlink.h
--- linux-4.4.115/include/uapi/linux/rtnetlink.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/rtnetlink.h	2019-01-22 16:16:28.587292445 +0100
@@ -311,6 +311,9 @@
 	RTA_PREF,
 	RTA_ENCAP_TYPE,
 	RTA_ENCAP,
+	RTA_EXPIRES,
+	RTA_PAD,
+	RTA_UID,
 	__RTA_MAX
 };
 
diff -ruw linux-4.4.115/include/uapi/linux/sock_diag.h linux-4.4.115-fbx/include/uapi/linux/sock_diag.h
--- linux-4.4.115/include/uapi/linux/sock_diag.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/sock_diag.h	2019-01-22 16:16:28.587292445 +0100
@@ -4,6 +4,7 @@
 #include <linux/types.h>
 
 #define SOCK_DIAG_BY_FAMILY 20
+#define SOCK_DESTROY_BACKPORT 21
 
 struct sock_diag_req {
 	__u8	sdiag_family;
diff -ruw linux-4.4.115/include/uapi/linux/sockios.h linux-4.4.115-fbx/include/uapi/linux/sockios.h
--- linux-4.4.115/include/uapi/linux/sockios.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/sockios.h	2019-01-22 16:16:28.587292445 +0100
@@ -65,6 +65,7 @@
 #define SIOCDIFADDR	0x8936		/* delete PA address		*/
 #define	SIOCSIFHWBROADCAST	0x8937	/* set hardware broadcast addr	*/
 #define SIOCGIFCOUNT	0x8938		/* get number of devices */
+#define SIOCKILLADDR	0x8939		/* kill sockets with this local addr */
 
 #define SIOCGIFBR	0x8940		/* Bridging support		*/
 #define SIOCSIFBR	0x8941		/* Set bridging options 	*/
diff -ruw linux-4.4.115/include/uapi/linux/sysctl.h linux-4.4.115-fbx/include/uapi/linux/sysctl.h
--- linux-4.4.115/include/uapi/linux/sysctl.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/sysctl.h	2019-01-22 16:16:28.591292481 +0100
@@ -154,6 +154,7 @@
 	KERN_NMI_WATCHDOG=75, /* int: enable/disable nmi watchdog */
 	KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
 	KERN_PANIC_ON_WARN=77, /* int: call panic() in WARN() functions */
+	KERN_COLD_BOOT = 78, /* int: identify if system cold booted */
 };
 
 
@@ -482,6 +483,7 @@
 	NET_IPV4_CONF_PROMOTE_SECONDARIES=20,
 	NET_IPV4_CONF_ARP_ACCEPT=21,
 	NET_IPV4_CONF_ARP_NOTIFY=22,
+	NET_IPV4_CONF_NF_IPV4_DEFRAG_SKIP = 23,
 };
 
 /* /proc/sys/net/ipv4/netfilter */
@@ -570,6 +572,7 @@
 	NET_IPV6_PROXY_NDP=23,
 	NET_IPV6_ACCEPT_SOURCE_ROUTE=25,
 	NET_IPV6_ACCEPT_RA_FROM_LOCAL=26,
+	NET_IPV6_ACCEPT_RA_RT_INFO_MIN_PLEN=27,
 	__NET_IPV6_MAX
 };
 
diff -ruw linux-4.4.115/include/uapi/linux/tcp.h linux-4.4.115-fbx/include/uapi/linux/tcp.h
--- linux-4.4.115/include/uapi/linux/tcp.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/tcp.h	2019-01-22 16:16:28.591292481 +0100
@@ -116,6 +116,8 @@
 #define TCP_SAVE_SYN		27	/* Record SYN headers for new connections */
 #define TCP_SAVED_SYN		28	/* Get SYN headers recorded for connection */
 
+#define TCP_LINEAR_RTO		128	/* force use of linear timeouts */
+
 struct tcp_repair_opt {
 	__u32	opt_code;
 	__u32	opt_val;
@@ -157,6 +159,7 @@
 	__u8	tcpi_backoff;
 	__u8	tcpi_options;
 	__u8	tcpi_snd_wscale : 4, tcpi_rcv_wscale : 4;
+	__u8	tcpi_count;
 
 	__u32	tcpi_rto;
 	__u32	tcpi_ato;
diff -ruw linux-4.4.115/include/uapi/linux/time.h linux-4.4.115-fbx/include/uapi/linux/time.h
--- linux-4.4.115/include/uapi/linux/time.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/time.h	2019-10-29 09:26:25.553221870 +0100
@@ -56,6 +56,7 @@
 #define CLOCK_BOOTTIME_ALARM		9
 #define CLOCK_SGI_CYCLE			10	/* Hardware specific */
 #define CLOCK_TAI			11
+#define CLOCK_POWEROFF_ALARM		12
 
 #define MAX_CLOCKS			16
 #define CLOCKS_MASK			(CLOCK_REALTIME | CLOCK_MONOTONIC)
diff -ruw linux-4.4.115/include/uapi/linux/usb/audio.h linux-4.4.115-fbx/include/uapi/linux/usb/audio.h
--- linux-4.4.115/include/uapi/linux/usb/audio.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/usb/audio.h	2019-10-29 09:26:25.553221870 +0100
@@ -26,6 +26,7 @@
 /* bInterfaceProtocol values to denote the version of the standard used */
 #define UAC_VERSION_1			0x00
 #define UAC_VERSION_2			0x20
+#define UAC_VERSION_3			0x30
 
 /* A.2 Audio Interface Subclass Codes */
 #define USB_SUBCLASS_AUDIOCONTROL	0x01
diff -ruw linux-4.4.115/include/uapi/linux/usb/cdc.h linux-4.4.115-fbx/include/uapi/linux/usb/cdc.h
--- linux-4.4.115/include/uapi/linux/usb/cdc.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/usb/cdc.h	2019-01-22 16:16:28.595292518 +0100
@@ -231,6 +231,7 @@
 
 #define USB_CDC_SEND_ENCAPSULATED_COMMAND	0x00
 #define USB_CDC_GET_ENCAPSULATED_RESPONSE	0x01
+#define USB_CDC_RESET_FUNCTION			0x05
 #define USB_CDC_REQ_SET_LINE_CODING		0x20
 #define USB_CDC_REQ_GET_LINE_CODING		0x21
 #define USB_CDC_REQ_SET_CONTROL_LINE_STATE	0x22
diff -ruw linux-4.4.115/include/uapi/linux/usb/ch9.h linux-4.4.115-fbx/include/uapi/linux/usb/ch9.h
--- linux-4.4.115/include/uapi/linux/usb/ch9.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/usb/ch9.h	2019-01-22 16:16:28.595292518 +0100
@@ -909,6 +909,30 @@
 } __attribute__((packed));
 
 /*
+ * Configuration Summary descriptors: Defines a list of functions in the
+ * configuration. This descriptor may be used by Host software to decide
+ * which Configuration to use to obtain the desired functionality.
+ */
+#define	USB_CAP_TYPE_CONFIG_SUMMARY	0x10
+
+struct function_class_info {
+	__u8 bClass;
+	__u8 bSubClass;
+	__u8 bProtocol;
+};
+
+struct usb_config_summary_descriptor {
+	__u8 bLength;
+	__u8 bDescriptorType;
+	__u8 bDevCapabilityType;
+	__u16 bcdVersion;
+	__u8 bConfigurationValue;
+	__u8 bMaxPower;
+	__u8 bNumFunctions;
+	struct function_class_info cs_info[];
+} __attribute__((packed));
+
+/*
  * The size of the descriptor for the Sublink Speed Attribute Count
  * (SSAC) specified in bmAttributes[4:0].
  */
diff -ruw linux-4.4.115/include/uapi/linux/usb/Kbuild linux-4.4.115-fbx/include/uapi/linux/usb/Kbuild
--- linux-4.4.115/include/uapi/linux/usb/Kbuild	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/usb/Kbuild	2019-01-22 16:16:28.595292518 +0100
@@ -9,4 +9,5 @@
 header-y += gadgetfs.h
 header-y += midi.h
 header-y += tmc.h
+header-y += usb_ctrl_qti.h
 header-y += video.h
diff -ruw linux-4.4.115/include/uapi/linux/v4l2-controls.h linux-4.4.115-fbx/include/uapi/linux/v4l2-controls.h
--- linux-4.4.115/include/uapi/linux/v4l2-controls.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/v4l2-controls.h	2019-01-22 16:16:28.595292518 +0100
@@ -369,6 +369,7 @@
 enum v4l2_mpeg_video_header_mode {
 	V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE			= 0,
 	V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME	= 1,
+	V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_I_FRAME		= 2,
 
 };
 #define V4L2_CID_MPEG_VIDEO_MAX_REF_PIC			(V4L2_CID_MPEG_BASE+217)
@@ -380,6 +381,7 @@
 	V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE		= 0,
 	V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB		= 1,
 	V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES	= 2,
+	V4L2_MPEG_VIDEO_MULTI_SLICE_GOB			= 3,
 };
 #define V4L2_CID_MPEG_VIDEO_VBV_SIZE			(V4L2_CID_MPEG_BASE+222)
 #define V4L2_CID_MPEG_VIDEO_DEC_PTS			(V4L2_CID_MPEG_BASE+223)
@@ -425,6 +427,7 @@
 	V4L2_MPEG_VIDEO_H264_LEVEL_4_2	= 13,
 	V4L2_MPEG_VIDEO_H264_LEVEL_5_0	= 14,
 	V4L2_MPEG_VIDEO_H264_LEVEL_5_1	= 15,
+	V4L2_MPEG_VIDEO_H264_LEVEL_5_2	= 16,
 };
 #define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA	(V4L2_CID_MPEG_BASE+360)
 #define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA	(V4L2_CID_MPEG_BASE+361)
@@ -453,6 +456,7 @@
 	V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH_INTRA	= 14,
 	V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH		= 15,
 	V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH		= 16,
+	V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH		= 17,
 };
 #define V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT	(V4L2_CID_MPEG_BASE+364)
 #define V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH	(V4L2_CID_MPEG_BASE+365)
@@ -644,6 +648,590 @@
 #define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_STATIC		(V4L2_CID_MPEG_MFC51_BASE+53)
 #define V4L2_CID_MPEG_MFC51_VIDEO_H264_NUM_REF_PIC_FOR_P		(V4L2_CID_MPEG_MFC51_BASE+54)
 
+/*  MPEG-class control IDs specific to the msm_vidc driver */
+#define V4L2_CID_MPEG_MSM_VIDC_BASE		(V4L2_CTRL_CLASS_MPEG | 0x2000)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_PICTYPE_DEC_MODE \
+			(V4L2_CID_MPEG_MSM_VIDC_BASE+0)
+enum v4l2_mpeg_vidc_video_pictype_dec_mode {
+	V4L2_MPEG_VIDC_VIDEO_PICTYPE_DECODE_OFF = 0,
+	V4L2_MPEG_VIDC_VIDEO_PICTYPE_DECODE_ON = 1,
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_KEEP_ASPECT_RATIO \
+			(V4L2_CID_MPEG_MSM_VIDC_BASE+1)
+#define V4L2_CID_MPEG_VIDC_VIDEO_POST_LOOP_DEBLOCKER_MODE \
+			(V4L2_CID_MPEG_MSM_VIDC_BASE+2)
+#define V4L2_CID_MPEG_VIDC_VIDEO_DIVX_FORMAT \
+			(V4L2_CID_MPEG_MSM_VIDC_BASE+3)
+enum v4l2_mpeg_vidc_video_divx_format_type {
+	V4L2_MPEG_VIDC_VIDEO_DIVX_FORMAT_4		= 0,
+	V4L2_MPEG_VIDC_VIDEO_DIVX_FORMAT_5		= 1,
+	V4L2_MPEG_VIDC_VIDEO_DIVX_FORMAT_6	    = 2,
+};
+#define V4L2_CID_MPEG_VIDC_VIDEO_MB_ERROR_MAP_REPORTING	\
+			(V4L2_CID_MPEG_MSM_VIDC_BASE+4)
+#define V4L2_CID_MPEG_VIDC_VIDEO_CONTINUE_DATA_TRANSFER \
+			(V4L2_CID_MPEG_MSM_VIDC_BASE+5)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_STREAM_FORMAT   (V4L2_CID_MPEG_MSM_VIDC_BASE+6)
+enum v4l2_mpeg_vidc_video_stream_format {
+	V4L2_MPEG_VIDC_VIDEO_NAL_FORMAT_STARTCODES         = 0,
+	V4L2_MPEG_VIDC_VIDEO_NAL_FORMAT_ONE_NAL_PER_BUFFER = 1,
+	V4L2_MPEG_VIDC_VIDEO_NAL_FORMAT_ONE_BYTE_LENGTH    = 2,
+	V4L2_MPEG_VIDC_VIDEO_NAL_FORMAT_TWO_BYTE_LENGTH    = 3,
+	V4L2_MPEG_VIDC_VIDEO_NAL_FORMAT_FOUR_BYTE_LENGTH   = 4,
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_OUTPUT_ORDER   (V4L2_CID_MPEG_MSM_VIDC_BASE+7)
+enum v4l2_mpeg_vidc_video_output_order {
+	V4L2_MPEG_VIDC_VIDEO_OUTPUT_ORDER_DISPLAY         = 0,
+	V4L2_MPEG_VIDC_VIDEO_OUTPUT_ORDER_DECODE          = 1,
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_FRAME_RATE   (V4L2_CID_MPEG_MSM_VIDC_BASE+8)
+#define V4L2_CID_MPEG_VIDC_VIDEO_IDR_PERIOD   (V4L2_CID_MPEG_MSM_VIDC_BASE+9)
+#define V4L2_CID_MPEG_VIDC_VIDEO_NUM_P_FRAMES (V4L2_CID_MPEG_MSM_VIDC_BASE+10)
+#define V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES (V4L2_CID_MPEG_MSM_VIDC_BASE+11)
+#define V4L2_CID_MPEG_VIDC_VIDEO_REQUEST_IFRAME (V4L2_CID_MPEG_MSM_VIDC_BASE+12)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL (V4L2_CID_MPEG_MSM_VIDC_BASE+13)
+enum v4l2_mpeg_vidc_video_rate_control {
+	V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_OFF = 0,
+	V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_VBR_VFR = 1,
+	V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_VBR_CFR = 2,
+	V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_VFR = 3,
+	V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_CFR = 4,
+	V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_MBR_CFR = 5,
+	V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_MBR_VFR = 6,
+};
+#define V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_MBR_CFR	\
+			V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_MBR_CFR
+#define V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_MBR_VFR	\
+			V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_MBR_VFR
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_ROTATION (V4L2_CID_MPEG_MSM_VIDC_BASE+14)
+enum v4l2_mpeg_vidc_video_rotation {
+	V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_NONE = 0,
+	V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_90 = 1,
+	V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_180 = 2,
+	V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_270 = 3,
+};
+#define MSM_VIDC_BASE V4L2_CID_MPEG_MSM_VIDC_BASE
+#define V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL (MSM_VIDC_BASE+15)
+enum v4l2_mpeg_vidc_h264_cabac_model {
+	V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL_0 = 0,
+	V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL_1 = 1,
+	V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL_2 = 2,
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_MODE (MSM_VIDC_BASE+16)
+enum v4l2_mpeg_vidc_video_intra_refresh_mode {
+	V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_NONE = 0,
+	V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_CYCLIC = 1,
+	V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_ADAPTIVE = 2,
+	V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_CYCLIC_ADAPTIVE = 3,
+	V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_RANDOM = 4,
+};
+#define V4L2_CID_MPEG_VIDC_VIDEO_AIR_MBS (V4L2_CID_MPEG_MSM_VIDC_BASE+17)
+#define V4L2_CID_MPEG_VIDC_VIDEO_AIR_REF (V4L2_CID_MPEG_MSM_VIDC_BASE+18)
+#define V4L2_CID_MPEG_VIDC_VIDEO_CIR_MBS (V4L2_CID_MPEG_MSM_VIDC_BASE+19)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_H263_PROFILE (V4L2_CID_MPEG_MSM_VIDC_BASE+20)
+enum v4l2_mpeg_vidc_video_h263_profile {
+	V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_BASELINE = 0,
+	V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_H320CODING	= 1,
+	V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_BACKWARDCOMPATIBLE = 2,
+	V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_ISWV2 = 3,
+	V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_ISWV3 = 4,
+	V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_HIGHCOMPRESSION = 5,
+	V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_INTERNET = 6,
+	V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_INTERLACE = 7,
+	V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_HIGHLATENCY = 8,
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_H263_LEVEL (V4L2_CID_MPEG_MSM_VIDC_BASE+21)
+enum v4l2_mpeg_vidc_video_h263_level {
+	V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_1_0 = 0,
+	V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_2_0 = 1,
+	V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_3_0 = 2,
+	V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_4_0 = 3,
+	V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_4_5 = 4,
+	V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_5_0 = 5,
+	V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_6_0 = 6,
+	V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_7_0 = 7,
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_H264_AU_DELIMITER \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 22)
+enum v4l2_mpeg_vidc_video_h264_au_delimiter {
+	V4L2_MPEG_VIDC_VIDEO_H264_AU_DELIMITER_DISABLED = 0,
+	V4L2_MPEG_VIDC_VIDEO_H264_AU_DELIMITER_ENABLED = 1
+};
+#define V4L2_CID_MPEG_VIDC_VIDEO_SYNC_FRAME_DECODE \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 23)
+enum v4l2_mpeg_vidc_video_sync_frame_decode {
+	V4L2_MPEG_VIDC_VIDEO_SYNC_FRAME_DECODE_DISABLE = 0,
+	V4L2_MPEG_VIDC_VIDEO_SYNC_FRAME_DECODE_ENABLE = 1
+};
+#define V4L2_CID_MPEG_VIDC_VIDEO_SECURE (V4L2_CID_MPEG_MSM_VIDC_BASE+24)
+#define V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 25)
+enum v4l2_mpeg_vidc_extradata {
+	V4L2_MPEG_VIDC_EXTRADATA_NONE = 0,
+	V4L2_MPEG_VIDC_EXTRADATA_MB_QUANTIZATION = 1,
+	V4L2_MPEG_VIDC_EXTRADATA_INTERLACE_VIDEO = 2,
+	V4L2_MPEG_VIDC_EXTRADATA_VC1_FRAMEDISP = 3,
+	V4L2_MPEG_VIDC_EXTRADATA_VC1_SEQDISP = 4,
+	V4L2_MPEG_VIDC_EXTRADATA_TIMESTAMP = 5,
+	V4L2_MPEG_VIDC_EXTRADATA_S3D_FRAME_PACKING = 6,
+	V4L2_MPEG_VIDC_EXTRADATA_FRAME_RATE = 7,
+	V4L2_MPEG_VIDC_EXTRADATA_PANSCAN_WINDOW = 8,
+	V4L2_MPEG_VIDC_EXTRADATA_RECOVERY_POINT_SEI = 9,
+	V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO = 10,
+	V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB = 11,
+	V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER = 12,
+	V4L2_MPEG_VIDC_EXTRADATA_INPUT_CROP = 13,
+	V4L2_MPEG_VIDC_EXTRADATA_DIGITAL_ZOOM = 14,
+	V4L2_MPEG_VIDC_EXTRADATA_ASPECT_RATIO = 15,
+	V4L2_MPEG_VIDC_EXTRADATA_MPEG2_SEQDISP = 16,
+	V4L2_MPEG_VIDC_EXTRADATA_STREAM_USERDATA = 17,
+	V4L2_MPEG_VIDC_EXTRADATA_FRAME_QP = 18,
+	V4L2_MPEG_VIDC_EXTRADATA_FRAME_BITS_INFO = 19,
+	V4L2_MPEG_VIDC_EXTRADATA_LTR = 20,
+	V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI = 21,
+	V4L2_MPEG_VIDC_EXTRADATA_VQZIP_SEI = 22,
+	V4L2_MPEG_VIDC_EXTRADATA_YUV_STATS = 23,
+	V4L2_MPEG_VIDC_EXTRADATA_ROI_QP = 24,
+#define V4L2_MPEG_VIDC_EXTRADATA_OUTPUT_CROP \
+	V4L2_MPEG_VIDC_EXTRADATA_OUTPUT_CROP
+	V4L2_MPEG_VIDC_EXTRADATA_OUTPUT_CROP = 25,
+#define V4L2_MPEG_VIDC_EXTRADATA_DISPLAY_COLOUR_SEI \
+	V4L2_MPEG_VIDC_EXTRADATA_DISPLAY_COLOUR_SEI
+	V4L2_MPEG_VIDC_EXTRADATA_DISPLAY_COLOUR_SEI = 26,
+#define V4L2_MPEG_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI \
+	V4L2_MPEG_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI
+	V4L2_MPEG_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI = 27,
+#define V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO \
+	V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO
+	V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO = 28,
+#define V4L2_MPEG_VIDC_EXTRADATA_VUI_DISPLAY \
+	V4L2_MPEG_VIDC_EXTRADATA_VUI_DISPLAY
+	V4L2_MPEG_VIDC_EXTRADATA_VUI_DISPLAY = 29,
+#define V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE \
+	V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE
+	V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE = 30,
+};
+
+#define V4L2_CID_MPEG_VIDC_SET_PERF_LEVEL (V4L2_CID_MPEG_MSM_VIDC_BASE + 26)
+enum v4l2_mpeg_vidc_perf_level {
+	V4L2_CID_MPEG_VIDC_PERF_LEVEL_NOMINAL			= 0,
+	V4L2_CID_MPEG_VIDC_PERF_LEVEL_PERFORMANCE		= 1,
+	V4L2_CID_MPEG_VIDC_PERF_LEVEL_TURBO			= 2,
+};
+#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_GOB		\
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 27)
+
+#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_DELIVERY_MODE	\
+	(V4L2_CID_MPEG_MSM_VIDC_BASE + 28)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_H264_VUI_TIMING_INFO \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 29)
+enum v4l2_mpeg_vidc_video_h264_vui_timing_info {
+	V4L2_MPEG_VIDC_VIDEO_H264_VUI_TIMING_INFO_DISABLED = 0,
+	V4L2_MPEG_VIDC_VIDEO_H264_VUI_TIMING_INFO_ENABLED = 1
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_ALLOC_MODE_INPUT	\
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 30)
+#define V4L2_CID_MPEG_VIDC_VIDEO_ALLOC_MODE_OUTPUT       \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 31)
+enum v4l2_mpeg_vidc_video_alloc_mode_type {
+	V4L2_MPEG_VIDC_VIDEO_STATIC	= 0,
+	V4L2_MPEG_VIDC_VIDEO_RING	= 1,
+	V4L2_MPEG_VIDC_VIDEO_DYNAMIC	= 2,
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_FRAME_ASSEMBLY	\
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 32)
+enum v4l2_mpeg_vidc_video_assembly {
+	V4L2_MPEG_VIDC_FRAME_ASSEMBLY_DISABLE	= 0,
+	V4L2_MPEG_VIDC_FRAME_ASSEMBLY_ENABLE	= 1,
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 33)
+enum v4l2_mpeg_vidc_video_vp8_profile_level {
+	V4L2_MPEG_VIDC_VIDEO_VP8_UNUSED,
+	V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_0,
+	V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_1,
+	V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_2,
+	V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_3,
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_H264_VUI_BITSTREAM_RESTRICT \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 34)
+enum v4l2_mpeg_vidc_video_h264_vui_bitstream_restrict {
+	V4L2_MPEG_VIDC_VIDEO_H264_VUI_BITSTREAM_RESTRICT_DISABLED = 0,
+	V4L2_MPEG_VIDC_VIDEO_H264_VUI_BITSTREAM_RESTRICT_ENABLED = 1
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_PRESERVE_TEXT_QUALITY \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 35)
+enum v4l2_mpeg_vidc_video_preserve_text_quality {
+	V4L2_MPEG_VIDC_VIDEO_PRESERVE_TEXT_QUALITY_DISABLED = 0,
+	V4L2_MPEG_VIDC_VIDEO_PRESERVE_TEXT_QUALITY_ENABLED = 1
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_DEINTERLACE \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 36)
+enum v4l2_mpeg_vidc_video_deinterlace {
+	V4L2_CID_MPEG_VIDC_VIDEO_DEINTERLACE_DISABLED = 0,
+	V4L2_CID_MPEG_VIDC_VIDEO_DEINTERLACE_ENABLED = 1
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_MPEG4_TIME_RESOLUTION \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 37)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_MODE \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 38)
+enum v4l2_mpeg_vidc_video_decoder_multi_stream {
+	V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_PRIMARY = 0,
+	V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_SECONDARY = 1,
+};
+#define V4L2_CID_MPEG_VIDC_VIDEO_SCS_THRESHOLD \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 39)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_LEVEL	(V4L2_CID_MPEG_MSM_VIDC_BASE+40)
+enum v4l2_mpeg_vidc_video_mpeg2_level {
+	V4L2_MPEG_VIDC_VIDEO_MPEG2_LEVEL_0	= 0,
+	V4L2_MPEG_VIDC_VIDEO_MPEG2_LEVEL_1	= 1,
+	V4L2_MPEG_VIDC_VIDEO_MPEG2_LEVEL_2	= 2,
+	V4L2_MPEG_VIDC_VIDEO_MPEG2_LEVEL_3	= 3,
+};
+#define V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_PROFILE	(V4L2_CID_MPEG_MSM_VIDC_BASE+41)
+enum v4l2_mpeg_vidc_video_mpeg2_profile {
+	V4L2_MPEG_VIDC_VIDEO_MPEG2_PROFILE_SIMPLE		= 0,
+	V4L2_MPEG_VIDC_VIDEO_MPEG2_PROFILE_MAIN			= 1,
+	V4L2_MPEG_VIDC_VIDEO_MPEG2_PROFILE_422			= 2,
+	V4L2_MPEG_VIDC_VIDEO_MPEG2_PROFILE_SNR_SCALABLE		= 3,
+	V4L2_MPEG_VIDC_VIDEO_MPEG2_PROFILE_SPATIAL_SCALABLE	= 4,
+	V4L2_MPEG_VIDC_VIDEO_MPEG2_PROFILE_HIGH			= 5,
+};
+#define V4L2_CID_MPEG_VIDC_VIDEO_REQUEST_SEQ_HEADER \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 42)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_MVC_BUFFER_LAYOUT \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 43)
+enum v4l2_mpeg_vidc_video_mvc_layout {
+	V4L2_MPEG_VIDC_VIDEO_MVC_SEQUENTIAL = 0,
+	V4L2_MPEG_VIDC_VIDEO_MVC_TOP_BOTTOM = 1
+};
+#define V4L2_CID_MPEG_VIDC_VIDEO_VP8_MIN_QP (V4L2_CID_MPEG_MSM_VIDC_BASE + 44)
+#define V4L2_CID_MPEG_VIDC_VIDEO_VP8_MAX_QP (V4L2_CID_MPEG_MSM_VIDC_BASE + 45)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 46)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_LTRMODE \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 47)
+
+enum v4l2_mpeg_vidc_video_ltrmode {
+	V4L2_MPEG_VIDC_VIDEO_LTR_MODE_DISABLE = 0,
+	V4L2_MPEG_VIDC_VIDEO_LTR_MODE_MANUAL = 1,
+	V4L2_MPEG_VIDC_VIDEO_LTR_MODE_PERIODIC = 2
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_LTRCOUNT \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 48)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_USELTRFRAME \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 49)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_MARKLTRFRAME \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 50)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_HIER_P_NUM_LAYERS \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 51)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_TIMESTAMP_MODE \
+	(V4L2_CID_MPEG_MSM_VIDC_BASE + 52)
+enum v4l2_mpeg_vidc_video_rate_control_timestamp_mode {
+	V4L2_MPEG_VIDC_VIDEO_RATE_CONTROL_TIMESTAMP_MODE_HONOR = 0,
+	V4L2_MPEG_VIDC_VIDEO_RATE_CONTROL_TIMESTAMP_MODE_IGNORE = 1,
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_ENABLE_INITIAL_QP \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 53)
+enum vl42_mpeg_vidc_video_enable_initial_qp {
+	V4L2_CID_MPEG_VIDC_VIDEO_ENABLE_INITIAL_QP_IFRAME = 0x1,
+	V4L2_CID_MPEG_VIDC_VIDEO_ENABLE_INITIAL_QP_PFRAME = 0x2,
+	V4L2_CID_MPEG_VIDC_VIDEO_ENABLE_INITIAL_QP_BFRAME = 0x4,
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_INITIAL_I_FRAME_QP \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 54)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_INITIAL_P_FRAME_QP \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 55)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_INITIAL_B_FRAME_QP \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 56)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_X_RANGE \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 57)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_PFRAME_X_RANGE \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 58)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_BFRAME_X_RANGE \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 59)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_Y_RANGE \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 60)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_PFRAME_Y_RANGE \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 61)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_BFRAME_Y_RANGE \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 62)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_VPX_ERROR_RESILIENCE \
+	(V4L2_CID_MPEG_MSM_VIDC_BASE + 63)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_BUFFER_SIZE_LIMIT \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 64)
+
+enum vl42_mpeg_vidc_video_vpx_error_resilience {
+	V4L2_MPEG_VIDC_VIDEO_VPX_ERROR_RESILIENCE_DISABLED = 0,
+	V4L2_MPEG_VIDC_VIDEO_VPX_ERROR_RESILIENCE_ENABLED = 1,
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_HEVC_PROFILE \
+	(V4L2_CID_MPEG_MSM_VIDC_BASE + 65)
+enum v4l2_mpeg_video_hevc_profile {
+	V4L2_MPEG_VIDC_VIDEO_HEVC_PROFILE_MAIN			= 0,
+	V4L2_MPEG_VIDC_VIDEO_HEVC_PROFILE_MAIN10		= 1,
+	V4L2_MPEG_VIDC_VIDEO_HEVC_PROFILE_MAIN_STILL_PIC	= 2,
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_HEVC_TIER_LEVEL \
+	(V4L2_CID_MPEG_MSM_VIDC_BASE + 66)
+enum v4l2_mpeg_video_hevc_level {
+	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_1	= 0,
+	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_1	= 1,
+	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_2	= 2,
+	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_2	= 3,
+	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_2_1	= 4,
+	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_2_1	= 5,
+	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_3	= 6,
+	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_3	= 7,
+	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_3_1	= 8,
+	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_3_1	= 9,
+	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_4	= 10,
+	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_4	= 11,
+	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_4_1	= 12,
+	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_4_1	= 13,
+	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_5	= 14,
+	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_5	= 15,
+	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_5_1	= 16,
+	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_5_1	= 17,
+	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_5_2	= 18,
+	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_5_2	= 19,
+	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_6	= 20,
+	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_6	= 21,
+	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_6_1	= 22,
+	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_6_1	= 23,
+	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_6_2	= 24,
+	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_6_2	= 25,
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_H264_NAL_SVC \
+	(V4L2_CID_MPEG_MSM_VIDC_BASE + 67)
+
+enum vl42_mpeg_vidc_video_h264_svc_nal {
+	V4L2_CID_MPEG_VIDC_VIDEO_H264_NAL_SVC_DISABLED = 0,
+	V4L2_CID_MPEG_VIDC_VIDEO_H264_NAL_SVC_ENABLED = 1,
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_PERF_MODE	 \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 68)
+
+enum v4l2_mpeg_vidc_video_perf_mode {
+#define V4L2_MPEG_VIDC_VIDEO_PERF_UNINIT \
+	V4L2_MPEG_VIDC_VIDEO_PERF_UNINIT
+	V4L2_MPEG_VIDC_VIDEO_PERF_UNINIT = 0,
+	V4L2_MPEG_VIDC_VIDEO_PERF_MAX_QUALITY = 1,
+	V4L2_MPEG_VIDC_VIDEO_PERF_POWER_SAVE = 2
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_HIER_B_NUM_LAYERS \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 69)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_SECURE_SCALING_THRESHOLD \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 70)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_NON_SECURE_OUTPUT2 \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 71)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_HYBRID_HIERP_MODE \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 72)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_DPB_COLOR_FORMAT \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 73)
+
+enum v4l2_mpeg_vidc_video_dpb_color_format {
+	V4L2_MPEG_VIDC_VIDEO_DPB_COLOR_FMT_NONE = 0,
+	V4L2_MPEG_VIDC_VIDEO_DPB_COLOR_FMT_UBWC = 1,
+	V4L2_MPEG_VIDC_VIDEO_DPB_COLOR_FMT_TP10_UBWC = 2
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_MBI_STATISTICS_MODE	\
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 74)
+
+enum v4l2_mpeg_vidc_video_mbi_statistics_mode {
+	V4L2_CID_MPEG_VIDC_VIDEO_MBI_MODE_DEFAULT	= 0,
+	V4L2_CID_MPEG_VIDC_VIDEO_MBI_MODE_1		= 1,
+	V4L2_CID_MPEG_VIDC_VIDEO_MBI_MODE_2		= 2,
+	V4L2_CID_MPEG_VIDC_VIDEO_MBI_MODE_3		= 3,
+};
+
+#define V4L2_CID_VIDC_QBUF_MODE \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 75)
+enum v4l2_vidc_qbuf_mode {
+	V4L2_VIDC_QBUF_STANDARD = 0,
+	V4L2_VIDC_QBUF_BATCHED = 1,
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_MAX_HIERP_LAYERS \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 76)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_BASELAYER_ID \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 77)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_CONFIG_QP \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 78)
+
+#define V4L2_CID_MPEG_VIDC_VENC_PARAM_SAR_WIDTH \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 79)
+
+#define V4L2_CID_MPEG_VIDC_VENC_PARAM_SAR_HEIGHT \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 80)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_VQZIP_SEI	\
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 81)
+
+enum v4l2_mpeg_vidc_video_vqzip_sei_enable {
+	V4L2_CID_MPEG_VIDC_VIDEO_VQZIP_SEI_DISABLE	= 0,
+	V4L2_CID_MPEG_VIDC_VIDEO_VQZIP_SEI_ENABLE	= 1
+};
+
+#define V4L2_CID_MPEG_VIDC_VENC_PARAM_LAYER_BITRATE \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 82)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_PRIORITY \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 83)
+
+enum v4l2_mpeg_vidc_video_priority {
+	V4L2_MPEG_VIDC_VIDEO_PRIORITY_REALTIME_ENABLE = 0,
+	V4L2_MPEG_VIDC_VIDEO_PRIORITY_REALTIME_DISABLE = 1,
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 84)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_VENC_BITRATE_TYPE \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 85)
+
+enum v4l2_mpeg_vidc_video_venc_bitrate_type_enable {
+	V4L2_CID_MPEG_VIDC_VIDEO_VENC_BITRATE_DISABLE	= 0,
+	V4L2_CID_MPEG_VIDC_VIDEO_VENC_BITRATE_ENABLE	= 1
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_H264_PIC_ORDER_CNT \
+	(V4L2_CID_MPEG_MSM_VIDC_BASE + 86)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_VPE_CSC \
+	(V4L2_CID_MPEG_MSM_VIDC_BASE + 87)
+
+enum v4l2_cid_mpeg_vidc_video_vpe_csc_type_enable {
+	V4L2_CID_MPEG_VIDC_VIDEO_VPE_CSC_DISABLE  = 0,
+	V4L2_CID_MPEG_VIDC_VIDEO_VPE_CSC_ENABLE   = 1
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_MODE \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 88)
+
+enum v4l2_mpeg_vidc_video_lowlatency_mode {
+	V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_DISABLE     = 0,
+	V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_ENABLE      = 1,
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_BLUR_WIDTH \
+	(V4L2_CID_MPEG_MSM_VIDC_BASE + 89)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_BLUR_HEIGHT \
+	(V4L2_CID_MPEG_MSM_VIDC_BASE + 90)
+
+#define V4L2_CID_MPEG_VIDEO_MIN_QP_PACKED \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 91)
+#define V4L2_CID_MPEG_VIDEO_MAX_QP_PACKED \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 92)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_H264_TRANSFORM_8x8 \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 93)
+enum v4l2_mpeg_vidc_video_h264_transform_8x8 {
+	V4L2_MPEG_VIDC_VIDEO_H264_TRANSFORM_8x8_DISABLE = 0,
+	V4L2_MPEG_VIDC_VIDEO_H264_TRANSFORM_8x8_ENABLE = 1,
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_COLOR_SPACE \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 94)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_FULL_RANGE \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 95)
+
+enum v4l2_cid_mpeg_vidc_video_full_range {
+	V4L2_CID_MPEG_VIDC_VIDEO_FULL_RANGE_DISABLE = 0,
+	V4L2_CID_MPEG_VIDC_VIDEO_FULL_RANGE_ENABLE = 1,
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_TRANSFER_CHARS \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 96)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_MATRIX_COEFFS \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 97)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_TYPE \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 98)
+enum v4l2_mpeg_vidc_video_venc_iframesize_type {
+	V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_DEFAULT,
+	V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_MEDIUM,
+	V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_HUGE,
+	V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_UNLIMITED,
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 99)
+#define V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 100)
+#define V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 101)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_AU_DELIMITER \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 102)
+enum v4l2_mpeg_vidc_video_au_delimiter {
+	V4L2_MPEG_VIDC_VIDEO_AU_DELIMITER_DISABLED = 0,
+	V4L2_MPEG_VIDC_VIDEO_AU_DELIMITER_ENABLED = 1
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_SEND_SKIPPED_FRAME \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 103)
+enum v4l2_mpeg_vidc_video_venc_send_skipped_frame {
+	V4L2_MPEG_VIDC_VIDEO_SEND_SKIPPED_FRAME_DISABLE = 0,
+	V4L2_MPEG_VIDC_VIDEO_SEND_SKIPPED_FRAME_ENABLE = 1
+};
+
 
 /*  Camera class control IDs */
 
@@ -757,6 +1345,12 @@
 #define V4L2_CID_PAN_SPEED			(V4L2_CID_CAMERA_CLASS_BASE+32)
 #define V4L2_CID_TILT_SPEED			(V4L2_CID_CAMERA_CLASS_BASE+33)
 
+/* User-class control IDs specific to the msm_ba driver */
+
+#define MSM_BA_PRIV_BASE_START			(V4L2_CID_USER_BASE | 0x7000)
+#define MSM_BA_PRIV_SD_NODE_ADDR		(MSM_BA_PRIV_BASE_START + 1)
+#define MSM_BA_PRIV_FPS			(MSM_BA_PRIV_BASE_START + 2)
+
 /* FM Modulator class control IDs */
 
 #define V4L2_CID_FM_TX_CLASS_BASE		(V4L2_CTRL_CLASS_FM_TX | 0x900)
diff -ruw linux-4.4.115/include/uapi/linux/videodev2.h linux-4.4.115-fbx/include/uapi/linux/videodev2.h
--- linux-4.4.115/include/uapi/linux/videodev2.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/videodev2.h	2019-01-22 16:16:28.595292518 +0100
@@ -2,6 +2,7 @@
  *  Video for Linux Two header file
  *
  *  Copyright (C) 1999-2012 the contributors
+ *  Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -69,7 +70,7 @@
  * Common stuff for both V4L1 and V4L2
  * Moved from videodev.h
  */
-#define VIDEO_MAX_FRAME               32
+#define VIDEO_MAX_FRAME               64
 #define VIDEO_MAX_PLANES               8
 
 /*
@@ -488,6 +489,9 @@
 #define V4L2_PIX_FMT_ARGB32  v4l2_fourcc('B', 'A', '2', '4') /* 32  ARGB-8-8-8-8  */
 #define V4L2_PIX_FMT_XRGB32  v4l2_fourcc('B', 'X', '2', '4') /* 32  XRGB-8-8-8-8  */
 
+/* UBWC 32-bit RGBA8888 */
+#define V4L2_PIX_FMT_RGBA8888_UBWC   v4l2_fourcc('Q', 'R', 'G', 'B')
+
 /* Grey formats */
 #define V4L2_PIX_FMT_GREY    v4l2_fourcc('G', 'R', 'E', 'Y') /*  8  Greyscale     */
 #define V4L2_PIX_FMT_Y4      v4l2_fourcc('Y', '0', '4', ' ') /*  4  Greyscale     */
@@ -535,6 +539,11 @@
 #define V4L2_PIX_FMT_NV24    v4l2_fourcc('N', 'V', '2', '4') /* 24  Y/CbCr 4:4:4  */
 #define V4L2_PIX_FMT_NV42    v4l2_fourcc('N', 'V', '4', '2') /* 24  Y/CrCb 4:4:4  */
 
+/* UBWC 8-bit Y/CbCr 4:2:0  */
+#define V4L2_PIX_FMT_NV12_UBWC        v4l2_fourcc('Q', '1', '2', '8')
+/* UBWC 10-bit Y/CbCr 4:2:0 */
+#define V4L2_PIX_FMT_NV12_TP10_UBWC   v4l2_fourcc('Q', '1', '2', 'A')
+
 /* two non contiguous planes - one Y, one Cr + Cb interleaved  */
 #define V4L2_PIX_FMT_NV12M   v4l2_fourcc('N', 'M', '1', '2') /* 12  Y/CbCr 4:2:0  */
 #define V4L2_PIX_FMT_NV21M   v4l2_fourcc('N', 'M', '2', '1') /* 21  Y/CrCb 4:2:0  */
@@ -576,6 +585,16 @@
 #define V4L2_PIX_FMT_SGRBG12 v4l2_fourcc('B', 'A', '1', '2') /* 12  GRGR.. BGBG.. */
 #define V4L2_PIX_FMT_SRGGB12 v4l2_fourcc('R', 'G', '1', '2') /* 12  RGRG.. GBGB.. */
 #define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B', 'Y', 'R', '2') /* 16  BGBG.. GRGR.. */
+	/* 10bit raw bayer DPCM compressed to 6 bits */
+#define V4L2_PIX_FMT_SBGGR10DPCM6 v4l2_fourcc('b', 'B', 'A', '6')
+#define V4L2_PIX_FMT_SGBRG10DPCM6 v4l2_fourcc('b', 'G', 'A', '6')
+#define V4L2_PIX_FMT_SGRBG10DPCM6 v4l2_fourcc('B', 'D', '1', '6')
+#define V4L2_PIX_FMT_SRGGB10DPCM6 v4l2_fourcc('b', 'R', 'A', '6')
+	/* 10bit raw bayer, plain16 packed */
+#define V4L2_PIX_FMT_SBGGRPLAIN16 v4l2_fourcc('B', 'G', '1', '6')
+#define V4L2_PIX_FMT_SGBRGPLAIN16 v4l2_fourcc('G', 'B', '1', '6')
+#define V4L2_PIX_FMT_SGRBGPLAIN16 v4l2_fourcc('G', 'R', '1', '6')
+#define V4L2_PIX_FMT_SRGGBPLAIN16 v4l2_fourcc('R', 'G', '1', '6')
 
 /* compressed formats */
 #define V4L2_PIX_FMT_MJPEG    v4l2_fourcc('M', 'J', 'P', 'G') /* Motion-JPEG   */
@@ -593,6 +612,11 @@
 #define V4L2_PIX_FMT_VC1_ANNEX_G v4l2_fourcc('V', 'C', '1', 'G') /* SMPTE 421M Annex G compliant stream */
 #define V4L2_PIX_FMT_VC1_ANNEX_L v4l2_fourcc('V', 'C', '1', 'L') /* SMPTE 421M Annex L compliant stream */
 #define V4L2_PIX_FMT_VP8      v4l2_fourcc('V', 'P', '8', '0') /* VP8 */
+#define V4L2_PIX_FMT_VP9      v4l2_fourcc('V', 'P', '9', '0') /* VP9 */
+#define V4L2_PIX_FMT_DIVX_311  v4l2_fourcc('D', 'I', 'V', '3') /* DIVX311     */
+#define V4L2_PIX_FMT_DIVX      v4l2_fourcc('D', 'I', 'V', 'X') /* DIVX        */
+#define V4L2_PIX_FMT_HEVC v4l2_fourcc('H', 'E', 'V', 'C') /* for HEVC stream */
+#define V4L2_PIX_FMT_HEVC_HYBRID v4l2_fourcc('H', 'V', 'C', 'H')
 
 /*  Vendor-specific formats   */
 #define V4L2_PIX_FMT_CPIA1    v4l2_fourcc('C', 'P', 'I', 'A') /* cpia1 YUV */
@@ -625,6 +649,79 @@
 #define V4L2_PIX_FMT_Y12I     v4l2_fourcc('Y', '1', '2', 'I') /* Greyscale 12-bit L/R interleaved */
 #define V4L2_PIX_FMT_Z16      v4l2_fourcc('Z', '1', '6', ' ') /* Depth data 16-bit */
 
+#define V4L2_PIX_FMT_SDE_ABGR_8888 \
+	v4l2_fourcc('R', 'A', '2', '4') /* 32-bit ABGR 8:8:8:8 */
+#define V4L2_PIX_FMT_SDE_RGBA_8888 \
+	v4l2_fourcc('A', 'B', '2', '4') /* 32-bit RGBA 8:8:8:8 */
+#define V4L2_PIX_FMT_SDE_RGBX_8888 \
+	v4l2_fourcc('X', 'B', '2', '4') /* 32-bit RGBX 8:8:8:8 */
+#define V4L2_PIX_FMT_SDE_XBGR_8888 \
+	v4l2_fourcc('R', 'X', '2', '4') /* 32-bit XBGR 8:8:8:8 */
+#define V4L2_PIX_FMT_SDE_RGBA_5551 \
+	v4l2_fourcc('R', 'A', '1', '5') /* 16-bit RGBA 5:5:5:1 */
+#define V4L2_PIX_FMT_SDE_ABGR_1555 \
+	v4l2_fourcc('A', 'B', '1', '5') /* 16-bit ABGR 1:5:5:5 */
+#define V4L2_PIX_FMT_SDE_BGRA_5551 \
+	v4l2_fourcc('B', 'A', '1', '5') /* 16-bit BGRA 5:5:5:1 */
+#define V4L2_PIX_FMT_SDE_BGRX_5551 \
+	v4l2_fourcc('B', 'X', '1', '5') /* 16-bit BGRX 5:5:5:1 */
+#define V4L2_PIX_FMT_SDE_RGBX_5551 \
+	v4l2_fourcc('R', 'X', '1', '5') /* 16-bit RGBX 5:5:5:1 */
+#define V4L2_PIX_FMT_SDE_XBGR_1555 \
+	v4l2_fourcc('X', 'B', '1', '5') /* 16-bit XBGR 1:5:5:5 */
+#define V4L2_PIX_FMT_SDE_RGBA_4444 \
+	v4l2_fourcc('R', 'A', '1', '2') /* 16-bit RGBA 4:4:4:4 */
+#define V4L2_PIX_FMT_SDE_BGRA_4444 \
+	v4l2_fourcc('b', 'A', '1', '2') /* 16-bit BGRA 4:4:4:4 */
+#define V4L2_PIX_FMT_SDE_ABGR_4444 \
+	v4l2_fourcc('A', 'B', '1', '2') /* 16-bit ABGR 4:4:4:4 */
+#define V4L2_PIX_FMT_SDE_RGBX_4444 \
+	v4l2_fourcc('R', 'X', '1', '2') /* 16-bit RGBX 4:4:4:4 */
+#define V4L2_PIX_FMT_SDE_BGRX_4444 \
+	v4l2_fourcc('B', 'X', '1', '2') /* 16-bit BGRX 4:4:4:4 */
+#define V4L2_PIX_FMT_SDE_XBGR_4444 \
+	v4l2_fourcc('X', 'B', '1', '2') /* 16-bit XBGR 4:4:4:4 */
+#define V4L2_PIX_FMT_SDE_BGR_565 \
+	v4l2_fourcc('B', 'G', '1', '6') /* 16-bit BGR 5:6:5 */
+#define V4L2_PIX_FMT_SDE_Y_CR_CB_GH2V2 \
+	v4l2_fourcc('Y', 'U', '4', '2') /* Planar YVU 4:2:0 A16 */
+#define V4L2_PIX_FMT_SDE_Y_CBCR_H1V2 \
+	v4l2_fourcc('N', 'H', '1', '6') /* Y/CbCr 4:2:2 */
+#define V4L2_PIX_FMT_SDE_Y_CRCB_H1V2 \
+	v4l2_fourcc('N', 'H', '6', '1') /* Y/CrCb 4:2:2 */
+#define V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_VENUS \
+	v4l2_fourcc('Q', 'N', 'V', '2') /* Y/CbCr 4:2:0 Venus */
+#define V4L2_PIX_FMT_SDE_Y_CRCB_H2V2_VENUS \
+	v4l2_fourcc('Q', 'N', 'V', '1') /* Y/CrCb 4:2:0 Venus */
+#define V4L2_PIX_FMT_SDE_RGBX_8888_UBWC \
+	v4l2_fourcc('Q', 'X', 'B', '4') /* RGBX 8:8:8:8 UBWC */
+#define V4L2_PIX_FMT_SDE_RGB_565_UBWC \
+	v4l2_fourcc('Q', 'R', 'G', '6') /* RGB 5:6:5 UBWC */
+#define V4L2_PIX_FMT_SDE_RGBA_1010102 \
+	v4l2_fourcc('A', 'B', '3', '0') /* RGBA 10:10:10:2 */
+#define V4L2_PIX_FMT_SDE_RGBX_1010102 \
+	v4l2_fourcc('X', 'B', '3', '0') /* RGBX 10:10:10:2 */
+#define V4L2_PIX_FMT_SDE_ARGB_2101010 \
+	v4l2_fourcc('A', 'R', '3', '0') /* ARGB 2:10:10:10 */
+#define V4L2_PIX_FMT_SDE_XRGB_2101010 \
+	v4l2_fourcc('X', 'R', '3', '0') /* XRGB 2:10:10:10 */
+#define V4L2_PIX_FMT_SDE_BGRA_1010102 \
+	v4l2_fourcc('B', 'A', '3', '0') /* BGRA 10:10:10:2 */
+#define V4L2_PIX_FMT_SDE_BGRX_1010102 \
+	v4l2_fourcc('B', 'X', '3', '0') /* BGRX 10:10:10:2 */
+#define V4L2_PIX_FMT_SDE_ABGR_2101010 \
+	v4l2_fourcc('R', 'A', '3', '0') /* ABGR 2:10:10:10 */
+#define V4L2_PIX_FMT_SDE_XBGR_2101010 \
+	v4l2_fourcc('R', 'X', '3', '0') /* XBGR 2:10:10:10 */
+#define V4L2_PIX_FMT_SDE_RGBA_1010102_UBWC \
+	v4l2_fourcc('Q', 'R', 'B', 'A') /* RGBA 10:10:10:2 UBWC */
+#define V4L2_PIX_FMT_SDE_RGBX_1010102_UBWC \
+	v4l2_fourcc('Q', 'X', 'B', 'A') /* RGBX 10:10:10:2 UBWC */
+#define V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_TP10 \
+	v4l2_fourcc('T', 'P', '1', '0') /* Y/CbCr 4:2:0 TP10 */
+#define V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010 \
+	v4l2_fourcc('P', '0', '1', '0') /* Y/CbCr 4:2:0 P10 */
+
 /* SDR formats - used only for Software Defined Radio devices */
 #define V4L2_SDR_FMT_CU8          v4l2_fourcc('C', 'U', '0', '8') /* IQ u8 */
 #define V4L2_SDR_FMT_CU16LE       v4l2_fourcc('C', 'U', '1', '6') /* IQ u16le */
@@ -907,6 +1004,21 @@
 #define V4L2_BUF_FLAG_TSTAMP_SRC_SOE		0x00010000
 /* mem2mem encoder/decoder */
 #define V4L2_BUF_FLAG_LAST			0x00100000
+/* Vendor extensions */
+#define V4L2_QCOM_BUF_FLAG_CODECCONFIG		0x00020000
+#define V4L2_QCOM_BUF_FLAG_EOSEQ		0x00040000
+#define V4L2_QCOM_BUF_TIMESTAMP_INVALID		0x00080000
+#define V4L2_QCOM_BUF_FLAG_DECODEONLY		0x00200000
+#define V4L2_QCOM_BUF_DATA_CORRUPT		0x00400000
+#define V4L2_QCOM_BUF_DROP_FRAME		0x00800000
+#define V4L2_QCOM_BUF_INPUT_UNSUPPORTED		0x01000000
+#define V4L2_QCOM_BUF_FLAG_EOS			0x02000000
+#define V4L2_QCOM_BUF_FLAG_READONLY		0x04000000
+#define V4L2_MSM_VIDC_BUF_START_CODE_NOT_FOUND	0x08000000
+#define V4L2_MSM_BUF_FLAG_YUV_601_709_CLAMP	0x10000000
+#define V4L2_MSM_BUF_FLAG_MBAFF			0x20000000
+#define V4L2_MSM_BUF_FLAG_DEFER			0x40000000
+#define V4L2_QCOM_BUF_FLAG_IDRFRAME		0x80000000
 
 /**
  * struct v4l2_exportbuffer - export of video buffer as DMABUF file descriptor
@@ -1003,6 +1115,12 @@
 /*  Flags for 'capability' and 'capturemode' fields */
 #define V4L2_MODE_HIGHQUALITY	0x0001	/*  High quality imaging mode */
 #define V4L2_CAP_TIMEPERFRAME	0x1000	/*  timeperframe field is supported */
+#define V4L2_CAP_QCOM_FRAMESKIP	0x2000	/*  frame skipping is supported */
+
+struct v4l2_qcom_frameskip {
+	__u64		   maxframeinterval;
+	__u8		   fpsvariance;
+};
 
 struct v4l2_outputparm {
 	__u32		   capability;	 /*  Supported modes */
@@ -1561,6 +1679,7 @@
 #define V4L2_CTRL_FLAG_VOLATILE		0x0080
 #define V4L2_CTRL_FLAG_HAS_PAYLOAD	0x0100
 #define V4L2_CTRL_FLAG_EXECUTE_ON_WRITE	0x0200
+#define V4L2_CTRL_FLAG_MODIFY_LAYOUT	0X0400
 
 /*  Query flags, to be ORed with the control ID */
 #define V4L2_CTRL_FLAG_NEXT_CTRL	0x80000000
@@ -1744,6 +1863,7 @@
 #define V4L2_ENC_CMD_STOP       (1)
 #define V4L2_ENC_CMD_PAUSE      (2)
 #define V4L2_ENC_CMD_RESUME     (3)
+#define V4L2_ENC_QCOM_CMD_FLUSH  (4)
 
 /* Flags for V4L2_ENC_CMD_STOP */
 #define V4L2_ENC_CMD_STOP_AT_GOP_END    (1 << 0)
@@ -1763,6 +1883,8 @@
 #define V4L2_DEC_CMD_STOP        (1)
 #define V4L2_DEC_CMD_PAUSE       (2)
 #define V4L2_DEC_CMD_RESUME      (3)
+#define V4L2_DEC_QCOM_CMD_FLUSH  (4)
+#define V4L2_DEC_QCOM_CMD_RECONFIG_HINT  (5)
 
 /* Flags for V4L2_DEC_CMD_START */
 #define V4L2_DEC_CMD_START_MUTE_AUDIO	(1 << 0)
@@ -1774,6 +1896,13 @@
 #define V4L2_DEC_CMD_STOP_TO_BLACK	(1 << 0)
 #define V4L2_DEC_CMD_STOP_IMMEDIATELY	(1 << 1)
 
+/* Flags for V4L2_DEC_QCOM_CMD_FLUSH */
+#define V4L2_DEC_QCOM_CMD_FLUSH_OUTPUT  (1 << 0)
+#define V4L2_DEC_QCOM_CMD_FLUSH_CAPTURE (1 << 1)
+
+#define V4L2_QCOM_CMD_FLUSH_OUTPUT  (1 << 0)
+#define V4L2_QCOM_CMD_FLUSH_CAPTURE (1 << 1)
+
 /* Play format requirements (returned by the driver): */
 
 /* The decoder has no special format requirements */
@@ -2039,6 +2168,56 @@
 #define V4L2_EVENT_MOTION_DET			6
 #define V4L2_EVENT_PRIVATE_START		0x08000000
 
+#define V4L2_EVENT_BITDEPTH_FLAG	0x1
+#define V4L2_EVENT_PICSTRUCT_FLAG	0x2
+#define V4L2_EVENT_COLOUR_SPACE_FLAG    0x4
+
+#define V4L2_EVENT_MSM_VIDC_START	(V4L2_EVENT_PRIVATE_START + 0x00001000)
+#define V4L2_EVENT_MSM_VIDC_FLUSH_DONE	(V4L2_EVENT_MSM_VIDC_START + 1)
+#define V4L2_EVENT_MSM_VIDC_PORT_SETTINGS_CHANGED_SUFFICIENT	\
+		(V4L2_EVENT_MSM_VIDC_START + 2)
+#define V4L2_EVENT_MSM_VIDC_PORT_SETTINGS_CHANGED_INSUFFICIENT	\
+		(V4L2_EVENT_MSM_VIDC_START + 3)
+/*
+ * Bitdepth changed insufficient is deprecated now, however retaining
+ * to prevent changing the values of the other macros after bitdepth
+ */
+#define V4L2_EVENT_MSM_VIDC_PORT_SETTINGS_BITDEPTH_CHANGED_INSUFFICIENT \
+		(V4L2_EVENT_MSM_VIDC_START + 4)
+#define V4L2_EVENT_MSM_VIDC_SYS_ERROR	(V4L2_EVENT_MSM_VIDC_START + 5)
+#define V4L2_EVENT_MSM_VIDC_RELEASE_BUFFER_REFERENCE \
+		(V4L2_EVENT_MSM_VIDC_START + 6)
+#define V4L2_EVENT_MSM_VIDC_RELEASE_UNQUEUED_BUFFER \
+		(V4L2_EVENT_MSM_VIDC_START + 7)
+#define V4L2_EVENT_MSM_VIDC_HW_OVERLOAD (V4L2_EVENT_MSM_VIDC_START + 8)
+#define V4L2_EVENT_MSM_VIDC_MAX_CLIENTS (V4L2_EVENT_MSM_VIDC_START + 9)
+#define V4L2_EVENT_MSM_VIDC_HW_UNSUPPORTED (V4L2_EVENT_MSM_VIDC_START + 10)
+
+#define V4L2_EVENT_MSM_BA_PRIVATE_EVENT_BASE	\
+		(V4L2_EVENT_PRIVATE_START + 0x00005000)
+#define V4L2_EVENT_MSM_BA_START	V4L2_EVENT_MSM_BA_PRIVATE_EVENT_BASE
+#define V4L2_EVENT_MSM_BA_DEVICE_AVAILABLE	(V4L2_EVENT_MSM_BA_START + 1)
+#define V4L2_EVENT_MSM_BA_DEVICE_UNAVAILABLE	\
+		(V4L2_EVENT_MSM_BA_START + 2)
+#define V4L2_EVENT_MSM_BA_PORT_SETTINGS_CHANGED	\
+		(V4L2_EVENT_MSM_BA_START + 3)
+#define V4L2_EVENT_MSM_BA_SIGNAL_IN_LOCK	\
+		(V4L2_EVENT_MSM_BA_START + 4)
+#define V4L2_EVENT_MSM_BA_SIGNAL_LOST_LOCK	\
+		(V4L2_EVENT_MSM_BA_START + 5)
+#define V4L2_EVENT_MSM_BA_SOURCE_CHANGE	\
+		(V4L2_EVENT_MSM_BA_START + 6)
+#define V4L2_EVENT_MSM_BA_HDMI_HPD	\
+		(V4L2_EVENT_MSM_BA_START + 7)
+#define V4L2_EVENT_MSM_BA_HDMI_CEC_MESSAGE	\
+		(V4L2_EVENT_MSM_BA_START + 8)
+#define V4L2_EVENT_MSM_BA_CP	\
+		(V4L2_EVENT_MSM_BA_START + 9)
+#define V4L2_EVENT_MSM_BA_CABLE_DETECT	\
+		(V4L2_EVENT_MSM_BA_START + 10)
+#define V4L2_EVENT_MSM_BA_ERROR	\
+		(V4L2_EVENT_MSM_BA_START + 11)
+
 /* Payload for V4L2_EVENT_VSYNC */
 struct v4l2_event_vsync {
 	/* Can be V4L2_FIELD_ANY, _NONE, _TOP or _BOTTOM */
@@ -2294,4 +2473,11 @@
 
 #define BASE_VIDIOC_PRIVATE	192		/* 192-255 are private */
 
+/* HDMI rx provide ioctls */
+#define VIDIOC_HDMI_RX_CEC_S_LOGICAL _IOW('V', BASE_VIDIOC_PRIVATE + 0, int)
+#define VIDIOC_HDMI_RX_CEC_CLEAR_LOGICAL _IO('V', BASE_VIDIOC_PRIVATE + 1)
+#define VIDIOC_HDMI_RX_CEC_G_PHYSICAL _IOR('V', BASE_VIDIOC_PRIVATE + 2, int)
+#define VIDIOC_HDMI_RX_CEC_G_CONNECTED _IOR('V', BASE_VIDIOC_PRIVATE + 3, int)
+#define VIDIOC_HDMI_RX_CEC_S_ENABLE _IOR('V', BASE_VIDIOC_PRIVATE + 4, int)
+
 #endif /* _UAPI__LINUX_VIDEODEV2_H */
diff -ruw linux-4.4.115/include/uapi/linux/xfrm.h linux-4.4.115-fbx/include/uapi/linux/xfrm.h
--- linux-4.4.115/include/uapi/linux/xfrm.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/xfrm.h	2019-01-22 16:16:28.599292554 +0100
@@ -302,6 +302,9 @@
 	XFRMA_SA_EXTRA_FLAGS,	/* __u32 */
 	XFRMA_PROTO,		/* __u8 */
 	XFRMA_ADDRESS_FILTER,	/* struct xfrm_address_filter */
+	XFRMA_PAD,
+	XFRMA_OFFLOAD_DEV,	/* struct xfrm_state_offload */
+	XFRMA_OUTPUT_MARK,	/* __u32 */
 	__XFRMA_MAX
 
 #define XFRMA_MAX (__XFRMA_MAX - 1)
diff -ruw linux-4.4.115/include/uapi/scsi/Kbuild linux-4.4.115-fbx/include/uapi/scsi/Kbuild
--- linux-4.4.115/include/uapi/scsi/Kbuild	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/scsi/Kbuild	2019-01-22 16:16:28.603292590 +0100
@@ -1,5 +1,6 @@
 # UAPI Header export list
 header-y += fc/
+header-y += ufs/
 header-y += scsi_bsg_fc.h
 header-y += scsi_netlink.h
 header-y += scsi_netlink_fc.h
diff -ruw linux-4.4.115/include/uapi/sound/asound.h linux-4.4.115-fbx/include/uapi/sound/asound.h
--- linux-4.4.115/include/uapi/sound/asound.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/sound/asound.h	2019-01-22 16:16:28.607292626 +0100
@@ -102,9 +102,11 @@
 	SNDRV_HWDEP_IFACE_FW_OXFW,	/* Oxford OXFW970/971 based device */
 	SNDRV_HWDEP_IFACE_FW_DIGI00X,	/* Digidesign Digi 002/003 family */
 	SNDRV_HWDEP_IFACE_FW_TASCAM,	/* TASCAM FireWire series */
+	SNDRV_HWDEP_IFACE_AUDIO_BE,	/* Backend Audio Control */
+	SNDRV_HWDEP_IFACE_AUDIO_CODEC,  /* codec Audio Control */
 
 	/* Don't forget to change the following: */
-	SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_FW_TASCAM
+	SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_AUDIO_CODEC
 };
 
 struct snd_hwdep_info {
diff -ruw linux-4.4.115/include/uapi/sound/compress_offload.h linux-4.4.115-fbx/include/uapi/sound/compress_offload.h
--- linux-4.4.115/include/uapi/sound/compress_offload.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/sound/compress_offload.h	2019-01-22 16:16:28.607292626 +0100
@@ -66,10 +66,11 @@
  */
 struct snd_compr_tstamp {
 	__u32 byte_offset;
-	__u32 copied_total;
+	__u64 copied_total;
 	__u32 pcm_frames;
 	__u32 pcm_io_frames;
 	__u32 sampling_rate;
+	__u64 timestamp;
 } __attribute__((packed, aligned(4)));
 
 /**
@@ -122,17 +123,65 @@
 } __attribute__((packed, aligned(4)));
 
 /**
+ * struct snd_compr_audio_info: compressed input audio information
+ * @frame_size: legth of the encoded frame with valid data
+ * @reserved: reserved for furture use
+ */
+struct snd_compr_audio_info {
+	__u32 frame_size;
+	__u32 reserved[15];
+} __attribute__((packed, aligned(4)));
+
+#define SNDRV_COMPRESS_RENDER_MODE_AUDIO_MASTER 0
+#define SNDRV_COMPRESS_RENDER_MODE_STC_MASTER 1
+
+#define SNDRV_COMPRESS_CLK_REC_MODE_NONE 0
+#define SNDRV_COMPRESS_CLK_REC_MODE_AUTO 1
+
+enum sndrv_compress_latency_mode {
+	SNDRV_COMPRESS_LEGACY_LATENCY_MODE = 0,
+	SNDRV_COMPRESS_LOW_LATENCY_MODE = 1,
+};
+
+/**
  * enum sndrv_compress_encoder
  * @SNDRV_COMPRESS_ENCODER_PADDING: no of samples appended by the encoder at the
  * end of the track
  * @SNDRV_COMPRESS_ENCODER_DELAY: no of samples inserted by the encoder at the
  * beginning of the track
+ * @SNDRV_COMPRESS_PATH_DELAY: dsp path delay in microseconds
+ * @SNDRV_COMPRESS_RENDER_MODE: dsp render mode (audio master or stc)
+ * @SNDRV_COMPRESS_CLK_REC_MODE: clock recovery mode ( none or auto)
+ * @SNDRV_COMPRESS_RENDER_WINDOW: render window
+ * @SNDRV_COMPRESS_START_DELAY: start delay
+ * @SNDRV_COMPRESS_ENABLE_ADJUST_SESSION_CLOCK: enable dsp drift correction
+ * @SNDRV_COMPRESS_ADJUST_SESSION_CLOCK: set drift correction value
  */
 enum sndrv_compress_encoder {
 	SNDRV_COMPRESS_ENCODER_PADDING = 1,
 	SNDRV_COMPRESS_ENCODER_DELAY = 2,
+	SNDRV_COMPRESS_MIN_BLK_SIZE = 3,
+	SNDRV_COMPRESS_MAX_BLK_SIZE = 4,
+	SNDRV_COMPRESS_PATH_DELAY = 5,
+	SNDRV_COMPRESS_RENDER_MODE = 6,
+	SNDRV_COMPRESS_CLK_REC_MODE = 7,
+	SNDRV_COMPRESS_RENDER_WINDOW = 8,
+	SNDRV_COMPRESS_START_DELAY = 9,
+	SNDRV_COMPRESS_ENABLE_ADJUST_SESSION_CLOCK = 10,
+	SNDRV_COMPRESS_ADJUST_SESSION_CLOCK = 11,
+	SNDRV_COMPRESS_LATENCY_MODE = 12,
 };
 
+#define SNDRV_COMPRESS_PATH_DELAY SNDRV_COMPRESS_PATH_DELAY
+#define SNDRV_COMPRESS_RENDER_MODE SNDRV_COMPRESS_RENDER_MODE
+#define SNDRV_COMPRESS_CLK_REC_MODE SNDRV_COMPRESS_CLK_REC_MODE
+#define SNDRV_COMPRESS_RENDER_WINDOW SNDRV_COMPRESS_RENDER_WINDOW
+#define SNDRV_COMPRESS_START_DELAY SNDRV_COMPRESS_START_DELAY
+#define SNDRV_COMPRESS_ENABLE_ADJUST_SESSION_CLOCK \
+			SNDRV_COMPRESS_ENABLE_ADJUST_SESSION_CLOCK
+#define SNDRV_COMPRESS_ADJUST_SESSION_CLOCK SNDRV_COMPRESS_ADJUST_SESSION_CLOCK
+#define SNDRV_COMPRESS_LATENCY_MODE SNDRV_COMPRESS_LATENCY_MODE
+
 /**
  * struct snd_compr_metadata - compressed stream metadata
  * @key: key id
@@ -159,6 +208,8 @@
  * SNDRV_COMPRESS_STOP: stop a running stream, discarding ring buffer content
  * and the buffers currently with DSP
  * SNDRV_COMPRESS_DRAIN: Play till end of buffers and stop after that
+ * SNDRV_COMPRESS_SET_NEXT_TRACK_PARAM: send codec specific data for the next
+ * track in gapless
  * SNDRV_COMPRESS_IOCTL_VERSION: Query the API version
  */
 #define SNDRV_COMPRESS_IOCTL_VERSION	_IOR('C', 0x00, int)
@@ -180,6 +231,8 @@
 #define SNDRV_COMPRESS_DRAIN		_IO('C', 0x34)
 #define SNDRV_COMPRESS_NEXT_TRACK	_IO('C', 0x35)
 #define SNDRV_COMPRESS_PARTIAL_DRAIN	_IO('C', 0x36)
+#define SNDRV_COMPRESS_SET_NEXT_TRACK_PARAM\
+					_IOW('C', 0x80, union snd_codec_options)
 /*
  * TODO
  * 1. add mmap support
diff -ruw linux-4.4.115/include/uapi/sound/compress_params.h linux-4.4.115-fbx/include/uapi/sound/compress_params.h
--- linux-4.4.115/include/uapi/sound/compress_params.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/sound/compress_params.h	2019-01-22 16:16:28.607292626 +0100
@@ -53,12 +53,28 @@
 
 #include <linux/types.h>
 
+#define SND_DEC_DDP_MAX_PARAMS 18
+
 /* AUDIO CODECS SUPPORTED */
 #define MAX_NUM_CODECS 32
 #define MAX_NUM_CODEC_DESCRIPTORS 32
 #define MAX_NUM_BITRATES 32
 #define MAX_NUM_SAMPLE_RATES 32
 
+/* compressed TX */
+#define MAX_NUM_FRAMES_PER_BUFFER 1
+#define COMPRESSED_META_DATA_MODE 0x10
+#define META_DATA_LEN_BYTES 36
+#define Q6_AC3_DECODER	0x00010BF6
+#define Q6_EAC3_DECODER 0x00010C3C
+#define Q6_DTS		0x00010D88
+#define Q6_DTS_LBR	0x00010DBB
+
+/* Timestamp flsg */
+/* Bit-0 - 1 : Enable Timestamp mode */
+/* Bit-0 - 0 : Disable Timestamp mode */
+#define COMPRESSED_TIMESTAMP_FLAG 0x0001
+
 /* Codecs are listed linearly to allow for extensibility */
 #define SND_AUDIOCODEC_PCM                   ((__u32) 0x00000001)
 #define SND_AUDIOCODEC_MP3                   ((__u32) 0x00000002)
@@ -73,8 +89,23 @@
 #define SND_AUDIOCODEC_IEC61937              ((__u32) 0x0000000B)
 #define SND_AUDIOCODEC_G723_1                ((__u32) 0x0000000C)
 #define SND_AUDIOCODEC_G729                  ((__u32) 0x0000000D)
-#define SND_AUDIOCODEC_MAX                   SND_AUDIOCODEC_G729
-
+#define SND_AUDIOCODEC_DTS_PASS_THROUGH      ((__u32) 0x0000000E)
+#define SND_AUDIOCODEC_DTS_LBR               ((__u32) 0x0000000F)
+#define SND_AUDIOCODEC_DTS_TRANSCODE_LOOPBACK ((__u32) 0x00000010)
+#define SND_AUDIOCODEC_PASS_THROUGH          ((__u32) 0x00000011)
+#define SND_AUDIOCODEC_MP2                   ((__u32) 0x00000012)
+#define SND_AUDIOCODEC_DTS_LBR_PASS_THROUGH  ((__u32) 0x00000013)
+#define SND_AUDIOCODEC_AC3                   ((__u32) 0x00000014)
+#define SND_AUDIOCODEC_AC3_PASS_THROUGH      ((__u32) 0x00000015)
+#define SND_AUDIOCODEC_WMA_PRO               ((__u32) 0x00000016)
+#define SND_AUDIOCODEC_DTS             	     ((__u32) 0x00000017)
+#define SND_AUDIOCODEC_EAC3                  ((__u32) 0x00000018)
+#define SND_AUDIOCODEC_ALAC                  ((__u32) 0x00000019)
+#define SND_AUDIOCODEC_APE                   ((__u32) 0x00000020)
+#define SND_AUDIOCODEC_DSD                   ((__u32) 0x00000021)
+#define SND_AUDIOCODEC_APTX                  ((__u32) 0x00000022)
+#define SND_AUDIOCODEC_TRUEHD                ((__u32) 0x00000023)
+#define SND_AUDIOCODEC_MAX                   SND_AUDIOCODEC_TRUEHD
 /*
  * Profile and modes are listed with bit masks. This allows for a
  * more compact representation of fields that will not evolve
@@ -239,6 +270,12 @@
 
 struct snd_enc_wma {
 	__u32 super_block_align; /* WMA Type-specific data */
+	__u32 bits_per_sample;
+	__u32 channelmask;
+	__u32 encodeopt;
+	__u32 encodeopt1;
+	__u32 encodeopt2;
+	__u32 avg_bit_rate;
 };
 
 
@@ -315,13 +352,71 @@
 	__s32 reserved[15];
 } __attribute__((packed, aligned(4)));
 
+struct snd_dec_ddp {
+	__u32 params_length;
+	__u32 params_id[SND_DEC_DDP_MAX_PARAMS];
+	__u32 params_value[SND_DEC_DDP_MAX_PARAMS];
+} __attribute__((packed, aligned(4)));
+
+struct snd_dec_flac {
+	__u16 sample_size;
+	__u16 min_blk_size;
+	__u16 max_blk_size;
+	__u16 min_frame_size;
+	__u16 max_frame_size;
+} __attribute__((packed, aligned(4)));
+
+struct snd_dec_vorbis {
+	__u32 bit_stream_fmt;
+};
+
+struct snd_dec_alac {
+	__u32 frame_length;
+	__u8 compatible_version;
+	__u8 bit_depth;
+	__u8 pb;
+	__u8 mb;
+	__u8 kb;
+	__u8 num_channels;
+	__u16 max_run;
+	__u32 max_frame_bytes;
+	__u32 avg_bit_rate;
+	__u32 sample_rate;
+	__u32 channel_layout_tag;
+};
+
+struct snd_dec_ape {
+	__u16 compatible_version;
+	__u16 compression_level;
+	__u32 format_flags;
+	__u32 blocks_per_frame;
+	__u32 final_frame_blocks;
+	__u32 total_frames;
+	__u16 bits_per_sample;
+	__u16 num_channels;
+	__u32 sample_rate;
+	__u32 seek_table_present;
+};
+
+struct snd_dec_aptx {
+	__u32 lap;
+	__u32 uap;
+	__u32 nap;
+};
+
 union snd_codec_options {
 	struct snd_enc_wma wma;
 	struct snd_enc_vorbis vorbis;
 	struct snd_enc_real real;
 	struct snd_enc_flac flac;
 	struct snd_enc_generic generic;
-} __attribute__((packed, aligned(4)));
+	struct snd_dec_ddp ddp;
+	struct snd_dec_flac flac_dec;
+	struct snd_dec_vorbis vorbis_dec;
+	struct snd_dec_alac alac;
+	struct snd_dec_ape ape;
+	struct snd_dec_aptx aptx_dec;
+};
 
 /** struct snd_codec_desc - description of codec capabilities
  * @max_ch: Maximum number of audio channels
@@ -397,8 +492,26 @@
 	__u32 ch_mode;
 	__u32 format;
 	__u32 align;
+	__u32 compr_passthr;
 	union snd_codec_options options;
-	__u32 reserved[3];
+	__u32 flags;
+	__u32 reserved[2];
 } __attribute__((packed, aligned(4)));
 
+
+/** struct snd_codec_metadata
+ * @length: Length of the encoded buffer.
+ * @offset: Offset from the buffer address to the first byte of the first
+ *		encoded frame. All encoded frames are consecutive starting
+ *		from this offset.
+ * @timestamp: Session time in microseconds of the first sample in the buffer.
+ * @reserved: Reserved for future use.
+ */
+struct snd_codec_metadata {
+	__u32 length;
+	__u32 offset;
+	__u64 timestamp;
+	__u32 reserved[4];
+};
+
 #endif
diff -ruw linux-4.4.115/include/uapi/sound/Kbuild linux-4.4.115-fbx/include/uapi/sound/Kbuild
--- linux-4.4.115/include/uapi/sound/Kbuild	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/sound/Kbuild	2019-01-22 16:16:28.607292626 +0100
@@ -10,3 +10,12 @@
 header-y += hdspm.h
 header-y += sb16_csp.h
 header-y += sfnt_info.h
+header-y += tlv.h
+header-y += lsm_params.h
+header-y += audio_slimslave.h
+header-y += voice_params.h
+header-y += audio_effects.h
+header-y += voice_svc.h
+header-y += devdep_params.h
+header-y += msmcal-hwdep.h
+header-y += wcd-dsp-glink.h
diff -ruw linux-4.4.115/include/uapi/video/Kbuild linux-4.4.115-fbx/include/uapi/video/Kbuild
--- linux-4.4.115/include/uapi/video/Kbuild	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/video/Kbuild	2019-01-22 16:16:28.607292626 +0100
@@ -2,3 +2,5 @@
 header-y += edid.h
 header-y += sisfb.h
 header-y += uvesafb.h
+header-y += msm_hdmi_modes.h
+header-y += msm_hdmi_hdcp_mgr.h
diff -ruw linux-4.4.115/init/do_mounts.c linux-4.4.115-fbx/init/do_mounts.c
--- linux-4.4.115/init/do_mounts.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/init/do_mounts.c	2019-01-22 16:16:28.623292771 +0100
@@ -566,6 +566,7 @@
 	wait_for_device_probe();
 
 	md_run_setup();
+	dm_run_setup();
 
 	if (saved_root_name[0]) {
 		root_device_name = saved_root_name;
diff -ruw linux-4.4.115/init/do_mounts.h linux-4.4.115-fbx/init/do_mounts.h
--- linux-4.4.115/init/do_mounts.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/init/do_mounts.h	2019-01-22 16:16:28.623292771 +0100
@@ -74,3 +74,13 @@
 static inline void md_run_setup(void) {}
 
 #endif
+
+#ifdef CONFIG_BLK_DEV_DM
+
+void dm_run_setup(void);
+
+#else
+
+static inline void dm_run_setup(void) {}
+
+#endif
diff -ruw linux-4.4.115/init/initramfs.c linux-4.4.115-fbx/init/initramfs.c
--- linux-4.4.115/init/initramfs.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/init/initramfs.c	2019-01-22 16:16:28.623292771 +0100
@@ -18,6 +18,7 @@
 #include <linux/dirent.h>
 #include <linux/syscalls.h>
 #include <linux/utime.h>
+#include <linux/initramfs.h>
 
 static ssize_t __init xwrite(int fd, const char *p, size_t count)
 {
@@ -605,9 +606,28 @@
 }
 #endif
 
+static int __initdata do_skip_initramfs;
+
+static int __init skip_initramfs_param(char *str)
+{
+	if (*str)
+		return 0;
+	do_skip_initramfs = 1;
+	return 1;
+}
+__setup("skip_initramfs", skip_initramfs_param);
+
 static int __init populate_rootfs(void)
 {
-	char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
+	char *err;
+
+	if (do_skip_initramfs) {
+		if (initrd_start)
+			free_initrd();
+		return default_rootfs();
+	}
+
+	err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
 	if (err)
 		panic("%s", err); /* Failed to decompress INTERNAL initramfs */
 	if (initrd_start) {
@@ -628,7 +648,18 @@
 		fd = sys_open("/initrd.image",
 			      O_WRONLY|O_CREAT, 0700);
 		if (fd >= 0) {
-			ssize_t written = xwrite(fd, (char *)initrd_start,
+			ssize_t written;
+#ifdef CONFIG_FBX_DECRYPT_INITRD
+			int err;
+			extern int fbx_decrypt_initrd(char *start,
+						      u32 size);
+
+			err = fbx_decrypt_initrd((char*)initrd_start,
+						 initrd_end - initrd_start);
+			if (err)
+				printk(KERN_ERR "Decrypt failed: %i\n", err);
+#endif
+			written = xwrite(fd, (char *)initrd_start,
 						initrd_end - initrd_start);
 
 			if (written != initrd_end - initrd_start)
diff -ruw linux-4.4.115/init/init_task.c linux-4.4.115-fbx/init/init_task.c
--- linux-4.4.115/init/init_task.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/init/init_task.c	2019-01-22 16:16:28.623292771 +0100
@@ -22,5 +22,8 @@
  * Initial thread structure. Alignment of this is handled by a special
  * linker map entry.
  */
-union thread_union init_thread_union __init_task_data =
-	{ INIT_THREAD_INFO(init_task) };
+union thread_union init_thread_union __init_task_data = {
+#ifndef CONFIG_THREAD_INFO_IN_TASK
+	INIT_THREAD_INFO(init_task)
+#endif
+};
diff -ruw linux-4.4.115/init/Kconfig linux-4.4.115-fbx/init/Kconfig
--- linux-4.4.115/init/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/init/Kconfig	2019-10-29 09:26:25.557221909 +0100
@@ -26,6 +26,16 @@
 config BUILDTIME_EXTABLE_SORT
 	bool
 
+config THREAD_INFO_IN_TASK
+	bool
+	help
+	  Select this to move thread_info off the stack into task_struct.  To
+	  make this work, an arch will need to remove all thread_info fields
+	  except flags and fix any runtime bugs.
+
+	  One subtle change that will be needed is to use try_get_task_stack()
+	  and put_task_stack() in save_thread_stack_tsk() and get_wchan().
+
 menu "General setup"
 
 config BROKEN
@@ -392,6 +402,16 @@
 
 endchoice
 
+config SCHED_WALT
+        bool "Support window based load tracking"
+        depends on SMP
+        depends on FAIR_GROUP_SCHED
+        help
+        This feature will allow the scheduler to maintain a tunable window
+	based set of metrics for tasks and runqueues. These metrics can be
+	used to guide task placement as well as task frequency requirements
+	for cpufreq governors.
+
 config BSD_PROCESS_ACCT
 	bool "BSD Process Accounting"
 	depends on MULTIUSER
@@ -866,6 +886,15 @@
 		     13 =>   8 KB for each CPU
 		     12 =>   4 KB for each CPU
 
+config FBX_DECRYPT_INITRD
+	bool "Decrypt initrd at boot"
+	depends on BLK_DEV_RAM
+	default n
+
+config FBX_DECRYPT_INITRD_KEY
+	string "Decryption key"
+	depends on FBX_DECRYPT_INITRD
+
 #
 # Architectures with an unreliable sched_clock() should select this:
 #
@@ -999,9 +1028,43 @@
 	  Provides a simple Resource Controller for monitoring the
 	  total CPU consumed by the tasks in a cgroup.
 
+config CGROUP_SCHEDTUNE
+	bool "CFS tasks boosting cgroup subsystem (EXPERIMENTAL)"
+	depends on SCHED_TUNE
+	help
+	  This option provides the "schedtune" controller which improves the
+	  flexibility of the task boosting mechanism by introducing the support
+	  to define "per task" boost values.
+
+	  This new controller:
+	  1. allows only a two layers hierarchy, where the root defines the
+	     system-wide boost value and its direct childrens define each one a
+	     different "class of tasks" to be boosted with a different value
+	  2. supports up to 16 different task classes, each one which could be
+	     configured with a different boost value
+
+	  Say N if unsure.
+
 config PAGE_COUNTER
        bool
 
+config CGROUP_SCHEDTUNE
+	bool "CFS tasks boosting cgroup subsystem (EXPERIMENTAL)"
+	depends on SCHED_TUNE
+	help
+	  This option provides the "schedtune" controller which improves the
+	  flexibility of the task boosting mechanism by introducing the support
+	  to define "per task" boost values.
+
+	  This new controller:
+	  1. allows only a two layers hierarchy, where the root defines the
+	     system-wide boost value and its direct childrens define each one a
+	     different "class of tasks" to be boosted with a different value
+	  2. supports up to 16 different task classes, each one which could be
+	     configured with a different boost value
+
+	  Say N if unsure.
+
 config MEMCG
 	bool "Memory Resource Controller for Control Groups"
 	select PAGE_COUNTER
@@ -1153,6 +1216,33 @@
 
 endif # CGROUPS
 
+config SCHED_HMP
+	bool "Scheduler support for heterogenous multi-processor systems"
+	depends on SMP && FAIR_GROUP_SCHED
+	help
+	  This feature will let the scheduler optimize task placement on
+	  systems made of heterogeneous cpus i.e cpus that differ either
+	  in their instructions per-cycle capability or the maximum
+	  frequency they can attain.
+
+config SCHED_HMP_CSTATE_AWARE
+	bool "CPU C-state aware scheduler"
+	depends on SCHED_HMP
+	help
+	  This feature will let the HMP scheduler optimize task placement
+	  with CPUs C-state. If this is enabled, scheduler places tasks
+	  onto the shallowest C-state CPU among the most power efficient CPUs.
+
+config SCHED_CORE_CTL
+	bool "QTI Core Control"
+	depends on SMP
+	help
+	  This options enables the core control functionality in
+	  the scheduler. Core control automatically offline and
+	  online cores based on cpu load and utilization.
+
+	  If unsure, say N here.
+
 config CHECKPOINT_RESTORE
 	bool "Checkpoint/restore support" if EXPERT
 	select PROC_CHILDREN
@@ -1237,6 +1327,43 @@
 	  desktop applications.  Task group autogeneration is currently based
 	  upon task session.
 
+config SCHED_TUNE
+	bool "Boosting for CFS tasks (EXPERIMENTAL)"
+	depends on SMP
+	help
+	  This option enables the system-wide support for task boosting.
+	  When this support is enabled a new sysctl interface is exposed to
+	  userspace via:
+	     /proc/sys/kernel/sched_cfs_boost
+	  which allows to set a system-wide boost value in range [0..100].
+
+	  The currently boosting strategy is implemented in such a way that:
+	  - a 0% boost value requires to operate in "standard" mode by
+	    scheduling all tasks at the minimum capacities required by their
+	    workload demand
+	  - a 100% boost value requires to push at maximum the task
+	    performances, "regardless" of the incurred energy consumption
+
+	  A boost value in between these two boundaries is used to bias the
+	  power/performance trade-off, the higher the boost value the more the
+	  scheduler is biased toward performance boosting instead of energy
+	  efficiency.
+
+	  Since this support exposes a single system-wide knob, the specified
+	  boost value is applied to all (CFS) tasks in the system.
+
+	  If unsure, say N.
+
+config DEFAULT_USE_ENERGY_AWARE
+	bool "Default to enabling the Energy Aware Scheduler feature"
+	default n
+	help
+	  This option defaults the ENERGY_AWARE scheduling feature to true,
+	  as without SCHED_DEBUG set this feature can't be enabled or disabled
+	  via sysctl.
+
+	  Say N if unsure.
+
 config SYSFS_DEPRECATED
 	bool "Enable deprecated sysfs features to support old userspace tools"
 	depends on SYSFS
@@ -1355,7 +1482,6 @@
 menuconfig EXPERT
 	bool "Configure standard kernel features (expert users)"
 	# Unhide debug options, to make the on-by-default options visible
-	select DEBUG_KERNEL
 	help
 	  This option allows certain base kernel options and settings
           to be disabled or tweaked. This is for specialized
@@ -1726,6 +1852,7 @@
 
 config SLAB
 	bool "SLAB"
+	select HAVE_HARDENED_USERCOPY_ALLOCATOR
 	help
 	  The regular slab allocator that is established and known to work
 	  well in all environments. It organizes cache hot objects in
@@ -1733,6 +1860,7 @@
 
 config SLUB
 	bool "SLUB (Unqueued Allocator)"
+	select HAVE_HARDENED_USERCOPY_ALLOCATOR
 	help
 	   SLUB is a slab allocator that minimizes cache line usage
 	   instead of managing queues of cached objects (SLAB approach).
diff -ruw linux-4.4.115/init/main.c linux-4.4.115-fbx/init/main.c
--- linux-4.4.115/init/main.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/init/main.c	2019-10-29 09:26:25.561221948 +0100
@@ -94,9 +94,6 @@
 extern void init_IRQ(void);
 extern void fork_init(void);
 extern void radix_tree_init(void);
-#ifndef CONFIG_DEBUG_RODATA
-static inline void mark_rodata_ro(void) { }
-#endif
 
 /*
  * Debug helper: via this flag we know that we are in 'early bootup code'
@@ -472,7 +469,7 @@
 }
 
 # if THREAD_SIZE >= PAGE_SIZE
-void __init __weak thread_info_cache_init(void)
+void __init __weak thread_stack_cache_init(void)
 {
 }
 #endif
@@ -510,11 +507,6 @@
 	smp_setup_processor_id();
 	debug_objects_early_init();
 
-	/*
-	 * Set up the the initial canary ASAP:
-	 */
-	boot_init_stack_canary();
-
 	cgroup_init_early();
 
 	local_irq_disable();
@@ -528,6 +520,10 @@
 	page_address_init();
 	pr_notice("%s", linux_banner);
 	setup_arch(&command_line);
+	/*
+	 * Set up the the initial canary ASAP:
+	 */
+	boot_init_stack_canary();
 	mm_init_cpumask(&init_mm);
 	setup_command_line(command_line);
 	setup_nr_cpu_ids();
@@ -650,7 +646,7 @@
 	/* Should be run before the first non-init thread is created */
 	init_espfix_bsp();
 #endif
-	thread_info_cache_init();
+	thread_stack_cache_init();
 	cred_init();
 	fork_init();
 	proc_caches_init();
@@ -931,6 +927,28 @@
 
 static noinline void __init kernel_init_freeable(void);
 
+#ifdef CONFIG_DEBUG_RODATA
+static bool rodata_enabled = true;
+static int __init set_debug_rodata(char *str)
+{
+	return strtobool(str, &rodata_enabled);
+}
+__setup("rodata=", set_debug_rodata);
+
+static void mark_readonly(void)
+{
+	if (rodata_enabled)
+		mark_rodata_ro();
+	else
+		pr_info("Kernel memory protection disabled.\n");
+}
+#else
+static inline void mark_readonly(void)
+{
+	pr_warn("This architecture does not have kernel memory protection.\n");
+}
+#endif
+
 static int __ref kernel_init(void *unused)
 {
 	int ret;
@@ -939,7 +957,7 @@
 	/* need to finish all async __init code before freeing the memory */
 	async_synchronize_full();
 	free_initmem();
-	mark_rodata_ro();
+	mark_readonly();
 	system_state = SYSTEM_RUNNING;
 	numa_default_policy();
 
diff -ruw linux-4.4.115/init/Makefile linux-4.4.115-fbx/init/Makefile
--- linux-4.4.115/init/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/init/Makefile	2019-01-22 16:16:28.619292735 +0100
@@ -3,21 +3,20 @@
 #
 
 obj-y                          := main.o version.o mounts.o
-ifneq ($(CONFIG_BLK_DEV_INITRD),y)
 obj-y                          += noinitramfs.o
-else
 obj-$(CONFIG_BLK_DEV_INITRD)   += initramfs.o
-endif
 obj-$(CONFIG_GENERIC_CALIBRATE_DELAY) += calibrate.o
 
 ifneq ($(CONFIG_ARCH_INIT_TASK),y)
 obj-y                          += init_task.o
 endif
+obj-$(CONFIG_FBX_DECRYPT_INITRD)+= fbx_decrypt_initrd.o rc4.o
 
 mounts-y			:= do_mounts.o
 mounts-$(CONFIG_BLK_DEV_RAM)	+= do_mounts_rd.o
 mounts-$(CONFIG_BLK_DEV_INITRD)	+= do_mounts_initrd.o
 mounts-$(CONFIG_BLK_DEV_MD)	+= do_mounts_md.o
+mounts-$(CONFIG_BLK_DEV_DM)	+= do_mounts_dm.o
 
 # dependencies on generated files need to be listed explicitly
 $(obj)/version.o: include/generated/compile.h
diff -ruw linux-4.4.115/init/noinitramfs.c linux-4.4.115-fbx/init/noinitramfs.c
--- linux-4.4.115/init/noinitramfs.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/init/noinitramfs.c	2019-01-22 16:16:28.623292771 +0100
@@ -21,11 +21,16 @@
 #include <linux/stat.h>
 #include <linux/kdev_t.h>
 #include <linux/syscalls.h>
+#include <linux/kconfig.h>
+#include <linux/initramfs.h>
 
 /*
  * Create a simple rootfs that is similar to the default initramfs
  */
-static int __init default_rootfs(void)
+#if !IS_BUILTIN(CONFIG_BLK_DEV_INITRD)
+static
+#endif
+int __init default_rootfs(void)
 {
 	int err;
 
@@ -49,4 +54,6 @@
 	printk(KERN_WARNING "Failed to create a rootfs\n");
 	return err;
 }
+#if !IS_BUILTIN(CONFIG_BLK_DEV_INITRD)
 rootfs_initcall(default_rootfs);
+#endif
diff -ruw linux-4.4.115/ipc/mqueue.c linux-4.4.115-fbx/ipc/mqueue.c
--- linux-4.4.115/ipc/mqueue.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/ipc/mqueue.c	2019-10-29 09:26:25.561221948 +0100
@@ -747,7 +747,7 @@
 	}
 
 	mode &= ~current_umask();
-	ret = vfs_create(dir, path->dentry, mode, true);
+	ret = vfs_create2(path->mnt, dir, path->dentry, mode, true);
 	path->dentry->d_fsdata = NULL;
 	if (ret)
 		return ERR_PTR(ret);
@@ -763,7 +763,7 @@
 	if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
 		return ERR_PTR(-EINVAL);
 	acc = oflag2acc[oflag & O_ACCMODE];
-	if (inode_permission(d_inode(path->dentry), acc))
+	if (inode_permission2(path->mnt, d_inode(path->dentry), acc))
 		return ERR_PTR(-EACCES);
 	return dentry_open(path, oflag, current_cred());
 }
@@ -796,7 +796,7 @@
 	ro = mnt_want_write(mnt);	/* we'll drop it in any case */
 	error = 0;
 	mutex_lock(&d_inode(root)->i_mutex);
-	path.dentry = lookup_one_len(name->name, root, strlen(name->name));
+	path.dentry = lookup_one_len2(name->name, mnt, root, strlen(name->name));
 	if (IS_ERR(path.dentry)) {
 		error = PTR_ERR(path.dentry);
 		goto out_putfd;
@@ -867,7 +867,7 @@
 	if (err)
 		goto out_name;
 	mutex_lock_nested(&d_inode(mnt->mnt_root)->i_mutex, I_MUTEX_PARENT);
-	dentry = lookup_one_len(name->name, mnt->mnt_root,
+	dentry = lookup_one_len2(name->name, mnt, mnt->mnt_root,
 				strlen(name->name));
 	if (IS_ERR(dentry)) {
 		err = PTR_ERR(dentry);
@@ -879,7 +879,7 @@
 		err = -ENOENT;
 	} else {
 		ihold(inode);
-		err = vfs_unlink(d_inode(dentry->d_parent), dentry, NULL);
+		err = vfs_unlink2(mnt, d_inode(dentry->d_parent), dentry, NULL);
 	}
 	dput(dentry);
 
diff -ruw linux-4.4.115/ipc/shm.c linux-4.4.115-fbx/ipc/shm.c
--- linux-4.4.115/ipc/shm.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/ipc/shm.c	2019-10-29 09:26:25.561221948 +0100
@@ -1269,7 +1269,7 @@
 	int retval = -EINVAL;
 #ifdef CONFIG_MMU
 	loff_t size = 0;
-	struct file *file;
+	struct file *file = NULL;
 	struct vm_area_struct *next;
 #endif
 
diff -ruw linux-4.4.115/Kbuild linux-4.4.115-fbx/Kbuild
--- linux-4.4.115/Kbuild	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/Kbuild	2019-01-22 16:16:21.027223985 +0100
@@ -6,31 +6,6 @@
 # 3) Generate asm-offsets.h (may need bounds.h and timeconst.h)
 # 4) Check for missing system calls
 
-# Default sed regexp - multiline due to syntax constraints
-define sed-y
-	"/^->/{s:->#\(.*\):/* \1 */:; \
-	s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 \2 /* \3 */:; \
-	s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
-	s:->::; p;}"
-endef
-
-# Use filechk to avoid rebuilds when a header changes, but the resulting file
-# does not
-define filechk_offsets
-	(set -e; \
-	 echo "#ifndef $2"; \
-	 echo "#define $2"; \
-	 echo "/*"; \
-	 echo " * DO NOT MODIFY."; \
-	 echo " *"; \
-	 echo " * This file was generated by Kbuild"; \
-	 echo " */"; \
-	 echo ""; \
-	 sed -ne $(sed-y); \
-	 echo ""; \
-	 echo "#endif" )
-endef
-
 #####
 # 1) Generate bounds.h
 
diff -ruw linux-4.4.115/kernel/audit.c linux-4.4.115-fbx/kernel/audit.c
--- linux-4.4.115/kernel/audit.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/audit.c	2019-10-29 09:26:25.565221987 +0100
@@ -870,6 +870,12 @@
 				return err;
 		}
 		if (s.mask & AUDIT_STATUS_PID) {
+			/* NOTE: we are using task_tgid_vnr() below because
+			 *       the s.pid value is relative to the namespace
+			 *       of the caller; at present this doesn't matter
+			 *       much since you can really only run auditd
+			 *       from the initial pid namespace, but something
+			 *       to keep in mind if this changes */
 			int new_pid = s.pid;
 
 			if ((!new_pid) && (task_tgid_vnr(current) != audit_pid))
@@ -1896,7 +1902,7 @@
 			 " euid=%u suid=%u fsuid=%u"
 			 " egid=%u sgid=%u fsgid=%u tty=%s ses=%u",
 			 task_ppid_nr(tsk),
-			 task_pid_nr(tsk),
+			 task_tgid_nr(tsk),
 			 from_kuid(&init_user_ns, audit_get_loginuid(tsk)),
 			 from_kuid(&init_user_ns, cred->uid),
 			 from_kgid(&init_user_ns, cred->gid),
diff -ruw linux-4.4.115/kernel/auditsc.c linux-4.4.115-fbx/kernel/auditsc.c
--- linux-4.4.115/kernel/auditsc.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/auditsc.c	2019-10-29 09:26:25.569222026 +0100
@@ -458,7 +458,7 @@
 
 		switch (f->type) {
 		case AUDIT_PID:
-			pid = task_pid_nr(tsk);
+			pid = task_tgid_nr(tsk);
 			result = audit_comparator(pid, f->op, f->val);
 			break;
 		case AUDIT_PPID:
@@ -1987,7 +1987,7 @@
 	ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN);
 	if (!ab)
 		return;
-	audit_log_format(ab, "pid=%d uid=%u", task_pid_nr(current), uid);
+	audit_log_format(ab, "pid=%d uid=%u", task_tgid_nr(current), uid);
 	audit_log_task_context(ab);
 	audit_log_format(ab, " old-auid=%u auid=%u old-ses=%u ses=%u res=%d",
 			 oldloginuid, loginuid, oldsessionid, sessionid, !rc);
@@ -2212,7 +2212,7 @@
 {
 	struct audit_context *context = current->audit_context;
 
-	context->target_pid = task_pid_nr(t);
+	context->target_pid = task_tgid_nr(t);
 	context->target_auid = audit_get_loginuid(t);
 	context->target_uid = task_uid(t);
 	context->target_sessionid = audit_get_sessionid(t);
@@ -2237,7 +2237,7 @@
 
 	if (audit_pid && t->tgid == audit_pid) {
 		if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) {
-			audit_sig_pid = task_pid_nr(tsk);
+			audit_sig_pid = task_tgid_nr(tsk);
 			if (uid_valid(tsk->loginuid))
 				audit_sig_uid = tsk->loginuid;
 			else
@@ -2337,7 +2337,7 @@
 void __audit_log_capset(const struct cred *new, const struct cred *old)
 {
 	struct audit_context *context = current->audit_context;
-	context->capset.pid = task_pid_nr(current);
+	context->capset.pid = task_tgid_nr(current);
 	context->capset.cap.effective   = new->cap_effective;
 	context->capset.cap.inheritable = new->cap_effective;
 	context->capset.cap.permitted   = new->cap_permitted;
@@ -2369,7 +2369,7 @@
 			 from_kgid(&init_user_ns, gid),
 			 sessionid);
 	audit_log_task_context(ab);
-	audit_log_format(ab, " pid=%d comm=", task_pid_nr(current));
+	audit_log_format(ab, " pid=%d comm=", task_tgid_nr(current));
 	audit_log_untrustedstring(ab, get_task_comm(comm, current));
 	audit_log_d_path_exe(ab, current->mm);
 }
diff -ruw linux-4.4.115/kernel/cgroup.c linux-4.4.115-fbx/kernel/cgroup.c
--- linux-4.4.115/kernel/cgroup.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/cgroup.c	2019-01-22 16:16:28.639292916 +0100
@@ -211,6 +211,7 @@
 /* Ditto for the can_fork callback. */
 static unsigned long have_canfork_callback __read_mostly;
 
+static struct file_system_type cgroup2_fs_type;
 static struct cftype cgroup_dfl_base_files[];
 static struct cftype cgroup_legacy_base_files[];
 
@@ -716,10 +717,10 @@
 
 	if (to_cset) {
 		/*
-		 * We are synchronized through cgroup_threadgroup_rwsem
-		 * against PF_EXITING setting such that we can't race
-		 * against cgroup_exit() changing the css_set to
-		 * init_css_set and dropping the old one.
+		 * We are synchronized through css_set_lock against
+		 * PF_EXITING setting such that we can't race against
+		 * cgroup_exit() disassociating the task from the
+		 * css_set.
 		 */
 		WARN_ON_ONCE(task->flags & PF_EXITING);
 
@@ -784,6 +785,8 @@
 
 static void put_css_set(struct css_set *cset)
 {
+	unsigned long flags;
+
 	/*
 	 * Ensure that the refcount doesn't hit zero while any readers
 	 * can see it. Similar to atomic_dec_and_lock(), but for an
@@ -792,9 +795,9 @@
 	if (atomic_add_unless(&cset->refcount, -1, 1))
 		return;
 
-	spin_lock_bh(&css_set_lock);
+	spin_lock_irqsave(&css_set_lock, flags);
 	put_css_set_locked(cset);
-	spin_unlock_bh(&css_set_lock);
+	spin_unlock_irqrestore(&css_set_lock, flags);
 }
 
 /*
@@ -1017,11 +1020,11 @@
 
 	/* First see if we already have a cgroup group that matches
 	 * the desired set */
-	spin_lock_bh(&css_set_lock);
+	spin_lock_irq(&css_set_lock);
 	cset = find_existing_css_set(old_cset, cgrp, template);
 	if (cset)
 		get_css_set(cset);
-	spin_unlock_bh(&css_set_lock);
+	spin_unlock_irq(&css_set_lock);
 
 	if (cset)
 		return cset;
@@ -1049,7 +1052,7 @@
 	 * find_existing_css_set() */
 	memcpy(cset->subsys, template, sizeof(cset->subsys));
 
-	spin_lock_bh(&css_set_lock);
+	spin_lock_irq(&css_set_lock);
 	/* Add reference counts and links from the new css_set. */
 	list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
 		struct cgroup *c = link->cgrp;
@@ -1075,7 +1078,7 @@
 		css_get(css);
 	}
 
-	spin_unlock_bh(&css_set_lock);
+	spin_unlock_irq(&css_set_lock);
 
 	return cset;
 }
@@ -1139,7 +1142,7 @@
 	 * Release all the links from cset_links to this hierarchy's
 	 * root cgroup
 	 */
-	spin_lock_bh(&css_set_lock);
+	spin_lock_irq(&css_set_lock);
 
 	list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
 		list_del(&link->cset_link);
@@ -1147,7 +1150,7 @@
 		kfree(link);
 	}
 
-	spin_unlock_bh(&css_set_lock);
+	spin_unlock_irq(&css_set_lock);
 
 	if (!list_empty(&root->root_list)) {
 		list_del(&root->root_list);
@@ -1551,11 +1554,11 @@
 		ss->root = dst_root;
 		css->cgroup = dcgrp;
 
-		spin_lock_bh(&css_set_lock);
+		spin_lock_irq(&css_set_lock);
 		hash_for_each(css_set_table, i, cset, hlist)
 			list_move_tail(&cset->e_cset_node[ss->id],
 				       &dcgrp->e_csets[ss->id]);
-		spin_unlock_bh(&css_set_lock);
+		spin_unlock_irq(&css_set_lock);
 
 		src_root->subsys_mask &= ~(1 << ssid);
 		scgrp->subtree_control &= ~(1 << ssid);
@@ -1650,10 +1653,6 @@
 			all_ss = true;
 			continue;
 		}
-		if (!strcmp(token, "__DEVEL__sane_behavior")) {
-			opts->flags |= CGRP_ROOT_SANE_BEHAVIOR;
-			continue;
-		}
 		if (!strcmp(token, "noprefix")) {
 			opts->flags |= CGRP_ROOT_NOPREFIX;
 			continue;
@@ -1720,15 +1719,6 @@
 			return -ENOENT;
 	}
 
-	if (opts->flags & CGRP_ROOT_SANE_BEHAVIOR) {
-		pr_warn("sane_behavior: this is still under development and its behaviors will change, proceed at your own risk\n");
-		if (nr_opts != 1) {
-			pr_err("sane_behavior: no other mount options allowed\n");
-			return -EINVAL;
-		}
-		return 0;
-	}
-
 	/*
 	 * If the 'all' option was specified select all the subsystems,
 	 * otherwise if 'none', 'name=' and a subsystem name options were
@@ -1832,7 +1822,7 @@
 {
 	struct task_struct *p, *g;
 
-	spin_lock_bh(&css_set_lock);
+	spin_lock_irq(&css_set_lock);
 
 	if (use_task_css_set_links)
 		goto out_unlock;
@@ -1857,8 +1847,12 @@
 		 * entry won't be deleted though the process has exited.
 		 * Do it while holding siglock so that we don't end up
 		 * racing against cgroup_exit().
+		 *
+		 * Interrupts were already disabled while acquiring
+		 * the css_set_lock, so we do not need to disable it
+		 * again when acquiring the sighand->siglock here.
 		 */
-		spin_lock_irq(&p->sighand->siglock);
+		spin_lock(&p->sighand->siglock);
 		if (!(p->flags & PF_EXITING)) {
 			struct css_set *cset = task_css_set(p);
 
@@ -1867,11 +1861,11 @@
 			list_add_tail(&p->cg_list, &cset->tasks);
 			get_css_set(cset);
 		}
-		spin_unlock_irq(&p->sighand->siglock);
+		spin_unlock(&p->sighand->siglock);
 	} while_each_thread(g, p);
 	read_unlock(&tasklist_lock);
 out_unlock:
-	spin_unlock_bh(&css_set_lock);
+	spin_unlock_irq(&css_set_lock);
 }
 
 static void init_cgroup_housekeeping(struct cgroup *cgrp)
@@ -1976,13 +1970,13 @@
 	 * Link the root cgroup in this hierarchy into all the css_set
 	 * objects.
 	 */
-	spin_lock_bh(&css_set_lock);
+	spin_lock_irq(&css_set_lock);
 	hash_for_each(css_set_table, i, cset, hlist) {
 		link_css_set(&tmp_links, cset, root_cgrp);
 		if (css_set_populated(cset))
 			cgroup_update_populated(root_cgrp, true);
 	}
-	spin_unlock_bh(&css_set_lock);
+	spin_unlock_irq(&css_set_lock);
 
 	BUG_ON(!list_empty(&root_cgrp->self.children));
 	BUG_ON(atomic_read(&root->nr_cgrps) != 1);
@@ -2007,9 +2001,10 @@
 			 int flags, const char *unused_dev_name,
 			 void *data)
 {
+	bool is_v2 = fs_type == &cgroup2_fs_type;
 	struct super_block *pinned_sb = NULL;
 	struct cgroup_subsys *ss;
-	struct cgroup_root *root;
+	struct cgroup_root *root = NULL;
 	struct cgroup_sb_opts opts;
 	struct dentry *dentry;
 	int ret;
@@ -2023,6 +2018,17 @@
 	if (!use_task_css_set_links)
 		cgroup_enable_task_cg_lists();
 
+	if (is_v2) {
+		if (data) {
+			pr_err("cgroup2: unknown option \"%s\"\n", (char *)data);
+			return ERR_PTR(-EINVAL);
+		}
+		cgrp_dfl_root_visible = true;
+		root = &cgrp_dfl_root;
+		cgroup_get(&root->cgrp);
+		goto out_mount;
+	}
+
 	mutex_lock(&cgroup_mutex);
 
 	/* First find the desired set of subsystems */
@@ -2030,15 +2036,6 @@
 	if (ret)
 		goto out_unlock;
 
-	/* look for a matching existing root */
-	if (opts.flags & CGRP_ROOT_SANE_BEHAVIOR) {
-		cgrp_dfl_root_visible = true;
-		root = &cgrp_dfl_root;
-		cgroup_get(&root->cgrp);
-		ret = 0;
-		goto out_unlock;
-	}
-
 	/*
 	 * Destruction of cgroup root is asynchronous, so subsystems may
 	 * still be dying after the previous unmount.  Let's drain the
@@ -2149,9 +2146,10 @@
 
 	if (ret)
 		return ERR_PTR(ret);
-
+out_mount:
 	dentry = kernfs_mount(fs_type, flags, root->kf_root,
-				CGROUP_SUPER_MAGIC, &new_sb);
+			      is_v2 ? CGROUP2_SUPER_MAGIC : CGROUP_SUPER_MAGIC,
+			      &new_sb);
 	if (IS_ERR(dentry) || !new_sb)
 		cgroup_put(&root->cgrp);
 
@@ -2194,6 +2192,12 @@
 	.kill_sb = cgroup_kill_sb,
 };
 
+static struct file_system_type cgroup2_fs_type = {
+	.name = "cgroup2",
+	.mount = cgroup_mount,
+	.kill_sb = cgroup_kill_sb,
+};
+
 /**
  * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy
  * @task: target task
@@ -2215,7 +2219,7 @@
 	char *path = NULL;
 
 	mutex_lock(&cgroup_mutex);
-	spin_lock_bh(&css_set_lock);
+	spin_lock_irq(&css_set_lock);
 
 	root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
 
@@ -2228,7 +2232,7 @@
 			path = buf;
 	}
 
-	spin_unlock_bh(&css_set_lock);
+	spin_unlock_irq(&css_set_lock);
 	mutex_unlock(&cgroup_mutex);
 	return path;
 }
@@ -2403,7 +2407,7 @@
 	 * the new cgroup.  There are no failure cases after here, so this
 	 * is the commit point.
 	 */
-	spin_lock_bh(&css_set_lock);
+	spin_lock_irq(&css_set_lock);
 	list_for_each_entry(cset, &tset->src_csets, mg_node) {
 		list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) {
 			struct css_set *from_cset = task_css_set(task);
@@ -2414,7 +2418,7 @@
 			put_css_set_locked(from_cset);
 		}
 	}
-	spin_unlock_bh(&css_set_lock);
+	spin_unlock_irq(&css_set_lock);
 
 	/*
 	 * Migration is committed, all target tasks are now on dst_csets.
@@ -2443,13 +2447,13 @@
 		}
 	}
 out_release_tset:
-	spin_lock_bh(&css_set_lock);
+	spin_lock_irq(&css_set_lock);
 	list_splice_init(&tset->dst_csets, &tset->src_csets);
 	list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) {
 		list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
 		list_del_init(&cset->mg_node);
 	}
-	spin_unlock_bh(&css_set_lock);
+	spin_unlock_irq(&css_set_lock);
 	return ret;
 }
 
@@ -2466,14 +2470,14 @@
 
 	lockdep_assert_held(&cgroup_mutex);
 
-	spin_lock_bh(&css_set_lock);
+	spin_lock_irq(&css_set_lock);
 	list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) {
 		cset->mg_src_cgrp = NULL;
 		cset->mg_dst_cset = NULL;
 		list_del_init(&cset->mg_preload_node);
 		put_css_set_locked(cset);
 	}
-	spin_unlock_bh(&css_set_lock);
+	spin_unlock_irq(&css_set_lock);
 }
 
 /**
@@ -2623,7 +2627,7 @@
 	 * already PF_EXITING could be freed from underneath us unless we
 	 * take an rcu_read_lock.
 	 */
-	spin_lock_bh(&css_set_lock);
+	spin_lock_irq(&css_set_lock);
 	rcu_read_lock();
 	task = leader;
 	do {
@@ -2632,7 +2636,7 @@
 			break;
 	} while_each_thread(leader, task);
 	rcu_read_unlock();
-	spin_unlock_bh(&css_set_lock);
+	spin_unlock_irq(&css_set_lock);
 
 	return cgroup_taskset_migrate(&tset, cgrp);
 }
@@ -2653,7 +2657,7 @@
 	int ret;
 
 	/* look up all src csets */
-	spin_lock_bh(&css_set_lock);
+	spin_lock_irq(&css_set_lock);
 	rcu_read_lock();
 	task = leader;
 	do {
@@ -2663,7 +2667,7 @@
 			break;
 	} while_each_thread(leader, task);
 	rcu_read_unlock();
-	spin_unlock_bh(&css_set_lock);
+	spin_unlock_irq(&css_set_lock);
 
 	/* prepare dst csets and commit */
 	ret = cgroup_migrate_prepare_dst(dst_cgrp, &preloaded_csets);
@@ -2688,7 +2692,8 @@
 	 */
 	if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
 	    !uid_eq(cred->euid, tcred->uid) &&
-	    !uid_eq(cred->euid, tcred->suid))
+	    !uid_eq(cred->euid, tcred->suid) &&
+	    !ns_capable(tcred->user_ns, CAP_SYS_NICE))
 		ret = -EACCES;
 
 	if (!ret && cgroup_on_dfl(dst_cgrp)) {
@@ -2696,9 +2701,9 @@
 		struct cgroup *cgrp;
 		struct inode *inode;
 
-		spin_lock_bh(&css_set_lock);
+		spin_lock_irq(&css_set_lock);
 		cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
-		spin_unlock_bh(&css_set_lock);
+		spin_unlock_irq(&css_set_lock);
 
 		while (!cgroup_is_descendant(dst_cgrp, cgrp))
 			cgrp = cgroup_parent(cgrp);
@@ -2794,20 +2799,22 @@
 	int retval = 0;
 
 	mutex_lock(&cgroup_mutex);
+	percpu_down_write(&cgroup_threadgroup_rwsem);
 	for_each_root(root) {
 		struct cgroup *from_cgrp;
 
 		if (root == &cgrp_dfl_root)
 			continue;
 
-		spin_lock_bh(&css_set_lock);
+		spin_lock_irq(&css_set_lock);
 		from_cgrp = task_cgroup_from_root(from, root);
-		spin_unlock_bh(&css_set_lock);
+		spin_unlock_irq(&css_set_lock);
 
 		retval = cgroup_attach_task(from_cgrp, tsk, false);
 		if (retval)
 			break;
 	}
+	percpu_up_write(&cgroup_threadgroup_rwsem);
 	mutex_unlock(&cgroup_mutex);
 
 	return retval;
@@ -2927,7 +2934,7 @@
 	percpu_down_write(&cgroup_threadgroup_rwsem);
 
 	/* look up all csses currently attached to @cgrp's subtree */
-	spin_lock_bh(&css_set_lock);
+	spin_lock_irq(&css_set_lock);
 	css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) {
 		struct cgrp_cset_link *link;
 
@@ -2939,14 +2946,14 @@
 			cgroup_migrate_add_src(link->cset, cgrp,
 					       &preloaded_csets);
 	}
-	spin_unlock_bh(&css_set_lock);
+	spin_unlock_irq(&css_set_lock);
 
 	/* NULL dst indicates self on default hierarchy */
 	ret = cgroup_migrate_prepare_dst(NULL, &preloaded_csets);
 	if (ret)
 		goto out_finish;
 
-	spin_lock_bh(&css_set_lock);
+	spin_lock_irq(&css_set_lock);
 	list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) {
 		struct task_struct *task, *ntask;
 
@@ -2958,7 +2965,7 @@
 		list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list)
 			cgroup_taskset_add(task, &tset);
 	}
-	spin_unlock_bh(&css_set_lock);
+	spin_unlock_irq(&css_set_lock);
 
 	ret = cgroup_taskset_migrate(&tset, cgrp);
 out_finish:
@@ -3641,10 +3648,10 @@
 	int count = 0;
 	struct cgrp_cset_link *link;
 
-	spin_lock_bh(&css_set_lock);
+	spin_lock_irq(&css_set_lock);
 	list_for_each_entry(link, &cgrp->cset_links, cset_link)
 		count += atomic_read(&link->cset->refcount);
-	spin_unlock_bh(&css_set_lock);
+	spin_unlock_irq(&css_set_lock);
 	return count;
 }
 
@@ -3982,7 +3989,7 @@
 
 	memset(it, 0, sizeof(*it));
 
-	spin_lock_bh(&css_set_lock);
+	spin_lock_irq(&css_set_lock);
 
 	it->ss = css->ss;
 
@@ -3995,7 +4002,7 @@
 
 	css_task_iter_advance_css_set(it);
 
-	spin_unlock_bh(&css_set_lock);
+	spin_unlock_irq(&css_set_lock);
 }
 
 /**
@@ -4013,7 +4020,7 @@
 		it->cur_task = NULL;
 	}
 
-	spin_lock_bh(&css_set_lock);
+	spin_lock_irq(&css_set_lock);
 
 	if (it->task_pos) {
 		it->cur_task = list_entry(it->task_pos, struct task_struct,
@@ -4022,7 +4029,7 @@
 		css_task_iter_advance(it);
 	}
 
-	spin_unlock_bh(&css_set_lock);
+	spin_unlock_irq(&css_set_lock);
 
 	return it->cur_task;
 }
@@ -4036,10 +4043,10 @@
 void css_task_iter_end(struct css_task_iter *it)
 {
 	if (it->cur_cset) {
-		spin_lock_bh(&css_set_lock);
+		spin_lock_irq(&css_set_lock);
 		list_del(&it->iters_node);
 		put_css_set_locked(it->cur_cset);
-		spin_unlock_bh(&css_set_lock);
+		spin_unlock_irq(&css_set_lock);
 	}
 
 	if (it->cur_task)
@@ -4067,11 +4074,13 @@
 
 	mutex_lock(&cgroup_mutex);
 
+	percpu_down_write(&cgroup_threadgroup_rwsem);
+
 	/* all tasks in @from are being moved, all csets are source */
-	spin_lock_bh(&css_set_lock);
+	spin_lock_irq(&css_set_lock);
 	list_for_each_entry(link, &from->cset_links, cset_link)
 		cgroup_migrate_add_src(link->cset, to, &preloaded_csets);
-	spin_unlock_bh(&css_set_lock);
+	spin_unlock_irq(&css_set_lock);
 
 	ret = cgroup_migrate_prepare_dst(to, &preloaded_csets);
 	if (ret)
@@ -4083,7 +4092,11 @@
 	 */
 	do {
 		css_task_iter_start(&from->self, &it);
+
+		do {
 		task = css_task_iter_next(&it);
+		} while (task && (task->flags & PF_EXITING));
+
 		if (task)
 			get_task_struct(task);
 		css_task_iter_end(&it);
@@ -4095,6 +4108,7 @@
 	} while (task && !ret);
 out_err:
 	cgroup_migrate_finish(&preloaded_csets);
+	percpu_up_write(&cgroup_threadgroup_rwsem);
 	mutex_unlock(&cgroup_mutex);
 	return ret;
 }
@@ -5176,10 +5190,10 @@
 	 */
 	cgrp->self.flags &= ~CSS_ONLINE;
 
-	spin_lock_bh(&css_set_lock);
+	spin_lock_irq(&css_set_lock);
 	list_for_each_entry(link, &cgrp->cset_links, cset_link)
 		link->cset->dead = true;
-	spin_unlock_bh(&css_set_lock);
+	spin_unlock_irq(&css_set_lock);
 
 	/* initiate massacre of all css's */
 	for_each_css(css, ssid, cgrp)
@@ -5329,6 +5343,12 @@
 	BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files));
 	BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files));
 
+	/*
+	 * The latency of the synchronize_sched() is too high for cgroups,
+	 * avoid it at the cost of forcing all readers into the slow path.
+	 */
+	rcu_sync_enter_start(&cgroup_threadgroup_rwsem.rss);
+
 	mutex_lock(&cgroup_mutex);
 
 	/* Add init_css_set to the hash table */
@@ -5384,6 +5404,7 @@
 
 	WARN_ON(sysfs_create_mount_point(fs_kobj, "cgroup"));
 	WARN_ON(register_filesystem(&cgroup_fs_type));
+	WARN_ON(register_filesystem(&cgroup2_fs_type));
 	WARN_ON(!proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations));
 
 	return 0;
@@ -5432,7 +5453,7 @@
 		goto out;
 
 	mutex_lock(&cgroup_mutex);
-	spin_lock_bh(&css_set_lock);
+	spin_lock_irq(&css_set_lock);
 
 	for_each_root(root) {
 		struct cgroup_subsys *ss;
@@ -5484,7 +5505,7 @@
 
 	retval = 0;
 out_unlock:
-	spin_unlock_bh(&css_set_lock);
+	spin_unlock_irq(&css_set_lock);
 	mutex_unlock(&cgroup_mutex);
 	kfree(buf);
 out:
@@ -5645,13 +5666,13 @@
 	if (use_task_css_set_links) {
 		struct css_set *cset;
 
-		spin_lock_bh(&css_set_lock);
+		spin_lock_irq(&css_set_lock);
 		cset = task_css_set(current);
 		if (list_empty(&child->cg_list)) {
 			get_css_set(cset);
 			css_set_move_task(child, NULL, cset, false);
 		}
-		spin_unlock_bh(&css_set_lock);
+		spin_unlock_irq(&css_set_lock);
 	}
 
 	/*
@@ -5689,19 +5710,22 @@
 	int i;
 
 	/*
-	 * Unlink from @tsk from its css_set.  As migration path can't race
-	 * with us, we can check css_set and cg_list without synchronization.
+	 * Avoid potential race with the migrate path.
+	 */
+	spin_lock_irq(&css_set_lock);
+	/*
+	 * Unlink from @tsk from its css_set.
 	 */
 	cset = task_css_set(tsk);
 
 	if (!list_empty(&tsk->cg_list)) {
-		spin_lock_bh(&css_set_lock);
 		css_set_move_task(tsk, cset, NULL, false);
-		spin_unlock_bh(&css_set_lock);
 	} else {
 		get_css_set(cset);
 	}
 
+	spin_unlock_irq(&css_set_lock);
+
 	/* see cgroup_post_fork() for details */
 	for_each_subsys_which(ss, i, &have_exit_callback)
 		ss->exit(tsk);
@@ -5763,7 +5787,9 @@
 	if (!pathbuf || !agentbuf)
 		goto out;
 
+	spin_lock_irq(&css_set_lock);
 	path = cgroup_path(cgrp, pathbuf, PATH_MAX);
+	spin_unlock_irq(&css_set_lock);
 	if (!path)
 		goto out;
 
@@ -5910,7 +5936,7 @@
 	if (!name_buf)
 		return -ENOMEM;
 
-	spin_lock_bh(&css_set_lock);
+	spin_lock_irq(&css_set_lock);
 	rcu_read_lock();
 	cset = rcu_dereference(current->cgroups);
 	list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
@@ -5921,7 +5947,7 @@
 			   c->root->hierarchy_id, name_buf);
 	}
 	rcu_read_unlock();
-	spin_unlock_bh(&css_set_lock);
+	spin_unlock_irq(&css_set_lock);
 	kfree(name_buf);
 	return 0;
 }
@@ -5932,13 +5958,13 @@
 	struct cgroup_subsys_state *css = seq_css(seq);
 	struct cgrp_cset_link *link;
 
-	spin_lock_bh(&css_set_lock);
+	spin_lock_irq(&css_set_lock);
 	list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
 		struct css_set *cset = link->cset;
 		struct task_struct *task;
 		int count = 0;
 
-		seq_printf(seq, "css_set %p\n", cset);
+		seq_printf(seq, "css_set %pK\n", cset);
 
 		list_for_each_entry(task, &cset->tasks, cg_list) {
 			if (count++ > MAX_TASKS_SHOWN_PER_CSS)
@@ -5955,7 +5981,7 @@
 	overflow:
 		seq_puts(seq, "  ...\n");
 	}
-	spin_unlock_bh(&css_set_lock);
+	spin_unlock_irq(&css_set_lock);
 	return 0;
 }
 
diff -ruw linux-4.4.115/kernel/cpu.c linux-4.4.115-fbx/kernel/cpu.c
--- linux-4.4.115/kernel/cpu.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/cpu.c	2019-10-29 09:26:25.573222065 +0100
@@ -24,6 +24,8 @@
 #include <linux/irq.h>
 #include <trace/events/power.h>
 
+#include <trace/events/sched.h>
+
 #include "smpboot.h"
 
 #ifdef CONFIG_SMP
@@ -89,6 +91,11 @@
 #define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
 #define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)
 
+void cpu_hotplug_mutex_held(void)
+{
+	lockdep_assert_held(&cpu_hotplug.lock);
+}
+EXPORT_SYMBOL(cpu_hotplug_mutex_held);
 
 void get_online_cpus(void)
 {
@@ -183,10 +190,17 @@
 }
 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
 
+static void __cpu_hotplug_enable(void)
+{
+	if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
+		return;
+	cpu_hotplug_disabled--;
+}
+
 void cpu_hotplug_enable(void)
 {
 	cpu_maps_update_begin();
-	WARN_ON(--cpu_hotplug_disabled < 0);
+	__cpu_hotplug_enable();
 	cpu_maps_update_done();
 }
 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
@@ -352,6 +366,9 @@
 	if (!cpu_online(cpu))
 		return -EINVAL;
 
+	if (!tasks_frozen && !cpu_isolated(cpu) && num_online_uniso_cpus() == 1)
+		return -EBUSY;
+
 	cpu_hotplug_begin();
 
 	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
@@ -425,6 +442,7 @@
 
 out_release:
 	cpu_hotplug_done();
+	trace_sched_cpu_hotplug(cpu, err, 0);
 	if (!err)
 		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
 	return err;
@@ -510,7 +528,7 @@
 	ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
 	if (ret) {
 		nr_calls--;
-		pr_warn("%s: attempt to bring up CPU %u failed\n",
+		pr_warn_ratelimited("%s: attempt to bring up CPU %u failed\n",
 			__func__, cpu);
 		goto out_notify;
 	}
@@ -530,13 +548,46 @@
 		__cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
 out:
 	cpu_hotplug_done();
+	trace_sched_cpu_hotplug(cpu, ret, 1);
 
 	return ret;
 }
 
+static int switch_to_rt_policy(void)
+{
+	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+	unsigned int policy = current->policy;
+	int err;
+
+	/* Nobody should be attempting hotplug from these policy contexts. */
+	if (policy == SCHED_BATCH || policy == SCHED_IDLE ||
+					policy == SCHED_DEADLINE)
+		return -EPERM;
+
+	if (policy == SCHED_FIFO || policy == SCHED_RR)
+		return 1;
+
+	/* Only SCHED_NORMAL left. */
+	err = sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
+	return err;
+
+}
+
+static int switch_to_fair_policy(void)
+{
+	struct sched_param param = { .sched_priority = 0 };
+
+	return sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
+}
+
 int cpu_up(unsigned int cpu)
 {
 	int err = 0;
+	int switch_err = 0;
+
+	switch_err = switch_to_rt_policy();
+	if (switch_err < 0)
+		return switch_err;
 
 	if (!cpu_possible(cpu)) {
 		pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
@@ -562,6 +613,14 @@
 
 out:
 	cpu_maps_update_done();
+
+	if (!switch_err) {
+		switch_err = switch_to_fair_policy();
+		if (switch_err)
+			pr_err("Hotplug policy switch err=%d Task %s pid=%d\n",
+				switch_err, current->comm, current->pid);
+	}
+
 	return err;
 }
 EXPORT_SYMBOL_GPL(cpu_up);
@@ -623,10 +682,11 @@
 void enable_nonboot_cpus(void)
 {
 	int cpu, error;
+	struct device *cpu_device;
 
 	/* Allow everyone to use the CPU hotplug again */
 	cpu_maps_update_begin();
-	WARN_ON(--cpu_hotplug_disabled < 0);
+	__cpu_hotplug_enable();
 	if (cpumask_empty(frozen_cpus))
 		goto out;
 
@@ -640,6 +700,12 @@
 		trace_suspend_resume(TPS("CPU_ON"), cpu, false);
 		if (!error) {
 			pr_info("CPU%d is up\n", cpu);
+			cpu_device = get_cpu_device(cpu);
+			if (!cpu_device)
+				pr_err("%s: failed to get cpu%d device\n",
+				       __func__, cpu);
+			else
+				kobject_uevent(&cpu_device->kobj, KOBJ_ONLINE);
 			continue;
 		}
 		pr_warn("Error taking CPU%d up: %d\n", cpu, error);
@@ -779,6 +845,10 @@
 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
 EXPORT_SYMBOL(cpu_active_mask);
 
+static DECLARE_BITMAP(cpu_isolated_bits, CONFIG_NR_CPUS) __read_mostly;
+const struct cpumask *const cpu_isolated_mask = to_cpumask(cpu_isolated_bits);
+EXPORT_SYMBOL(cpu_isolated_mask);
+
 void set_cpu_possible(unsigned int cpu, bool possible)
 {
 	if (possible)
@@ -813,6 +883,14 @@
 		cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
 }
 
+void set_cpu_isolated(unsigned int cpu, bool isolated)
+{
+	if (isolated)
+		cpumask_set_cpu(cpu, to_cpumask(cpu_isolated_bits));
+	else
+		cpumask_clear_cpu(cpu, to_cpumask(cpu_isolated_bits));
+}
+
 void init_cpu_present(const struct cpumask *src)
 {
 	cpumask_copy(to_cpumask(cpu_present_bits), src);
@@ -827,3 +905,28 @@
 {
 	cpumask_copy(to_cpumask(cpu_online_bits), src);
 }
+
+void init_cpu_isolated(const struct cpumask *src)
+{
+	cpumask_copy(to_cpumask(cpu_isolated_bits), src);
+}
+
+static ATOMIC_NOTIFIER_HEAD(idle_notifier);
+
+void idle_notifier_register(struct notifier_block *n)
+{
+	atomic_notifier_chain_register(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_register);
+
+void idle_notifier_unregister(struct notifier_block *n)
+{
+	atomic_notifier_chain_unregister(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_unregister);
+
+void idle_notifier_call_chain(unsigned long val)
+{
+	atomic_notifier_call_chain(&idle_notifier, val, NULL);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_call_chain);
diff -ruw linux-4.4.115/kernel/cpu_pm.c linux-4.4.115-fbx/kernel/cpu_pm.c
--- linux-4.4.115/kernel/cpu_pm.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/cpu_pm.c	2019-01-22 16:16:28.639292916 +0100
@@ -22,14 +22,17 @@
 #include <linux/spinlock.h>
 #include <linux/syscore_ops.h>
 
+bool from_suspend = false;
+
 static DEFINE_RWLOCK(cpu_pm_notifier_lock);
 static RAW_NOTIFIER_HEAD(cpu_pm_notifier_chain);
 
-static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls)
+static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls,
+		void *data)
 {
 	int ret;
 
-	ret = __raw_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL,
+	ret = __raw_notifier_call_chain(&cpu_pm_notifier_chain, event, data,
 		nr_to_call, nr_calls);
 
 	return notifier_to_errno(ret);
@@ -101,13 +104,13 @@
 	int ret = 0;
 
 	read_lock(&cpu_pm_notifier_lock);
-	ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls);
+	ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls, NULL);
 	if (ret)
 		/*
 		 * Inform listeners (nr_calls - 1) about failure of CPU PM
 		 * PM entry who are notified earlier to prepare for it.
 		 */
-		cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL);
+		cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL, NULL);
 	read_unlock(&cpu_pm_notifier_lock);
 
 	return ret;
@@ -131,7 +134,7 @@
 	int ret;
 
 	read_lock(&cpu_pm_notifier_lock);
-	ret = cpu_pm_notify(CPU_PM_EXIT, -1, NULL);
+	ret = cpu_pm_notify(CPU_PM_EXIT, -1, NULL, NULL);
 	read_unlock(&cpu_pm_notifier_lock);
 
 	return ret;
@@ -154,19 +157,21 @@
  *
  * Return conditions are same as __raw_notifier_call_chain.
  */
-int cpu_cluster_pm_enter(void)
+int cpu_cluster_pm_enter(unsigned long aff_level)
 {
 	int nr_calls;
 	int ret = 0;
 
 	read_lock(&cpu_pm_notifier_lock);
-	ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls);
+	ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls,
+			(void *) aff_level);
 	if (ret)
 		/*
 		 * Inform listeners (nr_calls - 1) about failure of CPU cluster
 		 * PM entry who are notified earlier to prepare for it.
 		 */
-		cpu_pm_notify(CPU_CLUSTER_PM_ENTER_FAILED, nr_calls - 1, NULL);
+		cpu_pm_notify(CPU_CLUSTER_PM_ENTER_FAILED, nr_calls - 1, NULL,
+				(void *) aff_level);
 	read_unlock(&cpu_pm_notifier_lock);
 
 	return ret;
@@ -188,12 +193,12 @@
  *
  * Return conditions are same as __raw_notifier_call_chain.
  */
-int cpu_cluster_pm_exit(void)
+int cpu_cluster_pm_exit(unsigned long aff_level)
 {
 	int ret;
 
 	read_lock(&cpu_pm_notifier_lock);
-	ret = cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL);
+	ret = cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL, (void *) aff_level);
 	read_unlock(&cpu_pm_notifier_lock);
 
 	return ret;
@@ -205,17 +210,19 @@
 {
 	int ret;
 
+	from_suspend = true;
 	ret = cpu_pm_enter();
 	if (ret)
 		return ret;
 
-	ret = cpu_cluster_pm_enter();
+	ret = cpu_cluster_pm_enter(0);
 	return ret;
 }
 
 static void cpu_pm_resume(void)
 {
-	cpu_cluster_pm_exit();
+	from_suspend = false;
+	cpu_cluster_pm_exit(0);
 	cpu_pm_exit();
 }
 
diff -ruw linux-4.4.115/kernel/cpuset.c linux-4.4.115-fbx/kernel/cpuset.c
--- linux-4.4.115/kernel/cpuset.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/cpuset.c	2019-10-29 09:26:25.577222104 +0100
@@ -99,6 +99,7 @@
 
 	/* user-configured CPUs and Memory Nodes allow to tasks */
 	cpumask_var_t cpus_allowed;
+	cpumask_var_t cpus_requested;   /* CPUS requested, but not used because of hotplug */
 	nodemask_t mems_allowed;
 
 	/* effective CPUs and Memory Nodes allow to tasks */
@@ -398,7 +399,7 @@
 
 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
 {
-	return	cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
+	return	cpumask_subset(p->cpus_requested, q->cpus_requested) &&
 		nodes_subset(p->mems_allowed, q->mems_allowed) &&
 		is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
 		is_mem_exclusive(p) <= is_mem_exclusive(q);
@@ -498,7 +499,7 @@
 	cpuset_for_each_child(c, css, par) {
 		if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
 		    c != cur &&
-		    cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
+		    cpumask_intersects(trial->cpus_requested, c->cpus_requested))
 			goto out;
 		if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
 		    c != cur &&
@@ -806,16 +807,15 @@
  * 'cpus' is removed, then call this routine to rebuild the
  * scheduler's dynamic sched domains.
  *
- * Call with cpuset_mutex held.  Takes get_online_cpus().
  */
-static void rebuild_sched_domains_locked(void)
+static void rebuild_sched_domains_unlocked(void)
 {
 	struct sched_domain_attr *attr;
 	cpumask_var_t *doms;
 	int ndoms;
 
+	cpu_hotplug_mutex_held();
 	lockdep_assert_held(&cpuset_mutex);
-	get_online_cpus();
 
 	/*
 	 * We have raced with CPU hotplug. Don't do anything to avoid
@@ -823,27 +823,27 @@
 	 * Anyways, hotplug work item will rebuild sched domains.
 	 */
 	if (!cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
-		goto out;
+		return;
 
 	/* Generate domain masks and attrs */
 	ndoms = generate_sched_domains(&doms, &attr);
 
 	/* Have scheduler rebuild the domains */
 	partition_sched_domains(ndoms, doms, attr);
-out:
-	put_online_cpus();
 }
 #else /* !CONFIG_SMP */
-static void rebuild_sched_domains_locked(void)
+static void rebuild_sched_domains_unlocked(void)
 {
 }
 #endif /* CONFIG_SMP */
 
 void rebuild_sched_domains(void)
 {
+	get_online_cpus();
 	mutex_lock(&cpuset_mutex);
-	rebuild_sched_domains_locked();
+	rebuild_sched_domains_unlocked();
 	mutex_unlock(&cpuset_mutex);
+	put_online_cpus();
 }
 
 /**
@@ -875,7 +875,6 @@
  *
  * On legacy hierachy, effective_cpus will be the same with cpu_allowed.
  *
- * Called with cpuset_mutex held
  */
 static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
 {
@@ -930,7 +929,7 @@
 	rcu_read_unlock();
 
 	if (need_rebuild_sched_domains)
-		rebuild_sched_domains_locked();
+		rebuild_sched_domains_unlocked();
 }
 
 /**
@@ -957,17 +956,18 @@
 	if (!*buf) {
 		cpumask_clear(trialcs->cpus_allowed);
 	} else {
-		retval = cpulist_parse(buf, trialcs->cpus_allowed);
+		retval = cpulist_parse(buf, trialcs->cpus_requested);
 		if (retval < 0)
 			return retval;
 
-		if (!cpumask_subset(trialcs->cpus_allowed,
-				    top_cpuset.cpus_allowed))
+		if (!cpumask_subset(trialcs->cpus_requested, cpu_present_mask))
 			return -EINVAL;
+
+		cpumask_and(trialcs->cpus_allowed, trialcs->cpus_requested, cpu_active_mask);
 	}
 
 	/* Nothing to do if the cpus didn't change */
-	if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
+	if (cpumask_equal(cs->cpus_requested, trialcs->cpus_requested))
 		return 0;
 
 	retval = validate_change(cs, trialcs);
@@ -976,6 +976,7 @@
 
 	spin_lock_irq(&callback_lock);
 	cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
+	cpumask_copy(cs->cpus_requested, trialcs->cpus_requested);
 	spin_unlock_irq(&callback_lock);
 
 	/* use trialcs->cpus_allowed as a temp variable */
@@ -1287,7 +1288,7 @@
 		cs->relax_domain_level = val;
 		if (!cpumask_empty(cs->cpus_allowed) &&
 		    is_sched_load_balance(cs))
-			rebuild_sched_domains_locked();
+			rebuild_sched_domains_unlocked();
 	}
 
 	return 0;
@@ -1318,7 +1319,6 @@
  * cs:		the cpuset to update
  * turning_on: 	whether the flag is being set or cleared
  *
- * Call with cpuset_mutex held.
  */
 
 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
@@ -1353,7 +1353,7 @@
 	spin_unlock_irq(&callback_lock);
 
 	if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
-		rebuild_sched_domains_locked();
+		rebuild_sched_domains_unlocked();
 
 	if (spread_flag_changed)
 		update_tasks_flags(cs);
@@ -1618,6 +1618,7 @@
 	cpuset_filetype_t type = cft->private;
 	int retval = 0;
 
+	get_online_cpus();
 	mutex_lock(&cpuset_mutex);
 	if (!is_cpuset_online(cs)) {
 		retval = -ENODEV;
@@ -1655,6 +1656,7 @@
 	}
 out_unlock:
 	mutex_unlock(&cpuset_mutex);
+	put_online_cpus();
 	return retval;
 }
 
@@ -1665,6 +1667,7 @@
 	cpuset_filetype_t type = cft->private;
 	int retval = -ENODEV;
 
+	get_online_cpus();
 	mutex_lock(&cpuset_mutex);
 	if (!is_cpuset_online(cs))
 		goto out_unlock;
@@ -1679,6 +1682,7 @@
 	}
 out_unlock:
 	mutex_unlock(&cpuset_mutex);
+	put_online_cpus();
 	return retval;
 }
 
@@ -1717,6 +1721,7 @@
 	kernfs_break_active_protection(of->kn);
 	flush_work(&cpuset_hotplug_work);
 
+	get_online_cpus();
 	mutex_lock(&cpuset_mutex);
 	if (!is_cpuset_online(cs))
 		goto out_unlock;
@@ -1742,6 +1747,7 @@
 	free_trial_cpuset(trialcs);
 out_unlock:
 	mutex_unlock(&cpuset_mutex);
+	put_online_cpus();
 	kernfs_unbreak_active_protection(of->kn);
 	css_put(&cs->css);
 	flush_workqueue(cpuset_migrate_mm_wq);
@@ -1766,7 +1772,7 @@
 
 	switch (type) {
 	case FILE_CPULIST:
-		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
+		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_requested));
 		break;
 	case FILE_MEMLIST:
 		seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
@@ -1955,12 +1961,15 @@
 	if (!cs)
 		return ERR_PTR(-ENOMEM);
 	if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL))
-		goto free_cs;
+		goto error_allowed;
 	if (!alloc_cpumask_var(&cs->effective_cpus, GFP_KERNEL))
-		goto free_cpus;
+		goto error_effective;
+	if (!alloc_cpumask_var(&cs->cpus_requested, GFP_KERNEL))
+		goto error_requested;
 
 	set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
 	cpumask_clear(cs->cpus_allowed);
+	cpumask_clear(cs->cpus_requested);
 	nodes_clear(cs->mems_allowed);
 	cpumask_clear(cs->effective_cpus);
 	nodes_clear(cs->effective_mems);
@@ -1969,9 +1978,11 @@
 
 	return &cs->css;
 
-free_cpus:
+error_requested:
+	free_cpumask_var(cs->effective_cpus);
+error_effective:
 	free_cpumask_var(cs->cpus_allowed);
-free_cs:
+error_allowed:
 	kfree(cs);
 	return ERR_PTR(-ENOMEM);
 }
@@ -2032,6 +2043,7 @@
 	cs->mems_allowed = parent->mems_allowed;
 	cs->effective_mems = parent->mems_allowed;
 	cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
+	cpumask_copy(cs->cpus_requested, parent->cpus_requested);
 	cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
 	spin_unlock_irq(&callback_lock);
 out_unlock:
@@ -2042,13 +2054,14 @@
 /*
  * If the cpuset being removed has its flag 'sched_load_balance'
  * enabled, then simulate turning sched_load_balance off, which
- * will call rebuild_sched_domains_locked().
+ * will call rebuild_sched_domains_unlocked().
  */
 
 static void cpuset_css_offline(struct cgroup_subsys_state *css)
 {
 	struct cpuset *cs = css_cs(css);
 
+	get_online_cpus();
 	mutex_lock(&cpuset_mutex);
 
 	if (is_sched_load_balance(cs))
@@ -2058,6 +2071,7 @@
 	clear_bit(CS_ONLINE, &cs->flags);
 
 	mutex_unlock(&cpuset_mutex);
+	put_online_cpus();
 }
 
 static void cpuset_css_free(struct cgroup_subsys_state *css)
@@ -2066,6 +2080,7 @@
 
 	free_cpumask_var(cs->effective_cpus);
 	free_cpumask_var(cs->cpus_allowed);
+	free_cpumask_var(cs->cpus_requested);
 	kfree(cs);
 }
 
@@ -2130,8 +2145,11 @@
 		BUG();
 	if (!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL))
 		BUG();
+	if (!alloc_cpumask_var(&top_cpuset.cpus_requested, GFP_KERNEL))
+		BUG();
 
 	cpumask_setall(top_cpuset.cpus_allowed);
+	cpumask_setall(top_cpuset.cpus_requested);
 	nodes_setall(top_cpuset.mems_allowed);
 	cpumask_setall(top_cpuset.effective_cpus);
 	nodes_setall(top_cpuset.effective_mems);
@@ -2265,7 +2283,8 @@
 		goto retry;
 	}
 
-	cpumask_and(&new_cpus, cs->cpus_allowed, parent_cs(cs)->effective_cpus);
+	cpumask_and(&new_cpus, cs->cpus_requested,
+						parent_cs(cs)->effective_cpus);
 	nodes_and(new_mems, cs->mems_allowed, parent_cs(cs)->effective_mems);
 
 	cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
diff -ruw linux-4.4.115/kernel/events/core.c linux-4.4.115-fbx/kernel/events/core.c
--- linux-4.4.115/kernel/events/core.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/events/core.c	2019-10-29 09:26:25.585222183 +0100
@@ -158,6 +158,7 @@
 struct static_key_deferred perf_sched_events __read_mostly;
 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
 static DEFINE_PER_CPU(int, perf_sched_cb_usages);
+static DEFINE_PER_CPU(bool, is_idle);
 
 static atomic_t nr_mmap_events __read_mostly;
 static atomic_t nr_comm_events __read_mostly;
@@ -175,8 +176,15 @@
  *   0 - disallow raw tracepoint access for unpriv
  *   1 - disallow cpu events for unpriv
  *   2 - disallow kernel profiling for unpriv
+ *   3 - disallow all unpriv perf event use
  */
+#ifdef CONFIG_PERF_EVENTS_USERMODE
+int sysctl_perf_event_paranoid __read_mostly = -1;
+#elif defined CONFIG_SECURITY_PERF_EVENTS_RESTRICT
+int sysctl_perf_event_paranoid __read_mostly = 3;
+#else
 int sysctl_perf_event_paranoid __read_mostly = 1;
+#endif
 
 /* Minimum for 512 kiB + 1 user control page */
 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
@@ -1481,10 +1489,17 @@
 	 * If this was a group event with sibling events then
 	 * upgrade the siblings to singleton events by adding them
 	 * to whatever list we are on.
+	 * If this isn't on a list, make sure we still remove the sibling's
+	 * group_entry from this sibling_list; otherwise, when that sibling
+	 * is later deallocated, it will try to remove itself from this
+	 * sibling_list, which may well have been deallocated already,
+	 * resulting in a use-after-free.
 	 */
 	list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
 		if (list)
 			list_move_tail(&sibling->group_entry, list);
+		else
+			list_del_init(&sibling->group_entry);
 		sibling->group_leader = sibling;
 
 		/* Inherit group flags from the previous leader */
@@ -1679,6 +1694,31 @@
 }
 
 
+#ifdef CONFIG_SMP
+static void perf_retry_remove(struct perf_event *event,
+			      struct remove_event *rep)
+{
+	int up_ret;
+	/*
+	 * CPU was offline. Bring it online so we can
+	 * gracefully exit a perf context.
+	 */
+	up_ret = cpu_up(event->cpu);
+	if (!up_ret)
+		/* Try the remove call once again. */
+		cpu_function_call(event->cpu, __perf_remove_from_context,
+				  rep);
+	else
+		pr_err("Failed to bring up CPU: %d, ret: %d\n",
+		       event->cpu, up_ret);
+}
+#else
+static void perf_retry_remove(struct perf_event *event,
+			      struct remove_event *rep)
+{
+}
+#endif
+
 /*
  * Remove the event from a task's (or a CPU's) list of events.
  *
@@ -1692,7 +1732,8 @@
  * When called from perf_event_exit_task, it's OK because the
  * context has been detached from its task.
  */
-static void perf_remove_from_context(struct perf_event *event, bool detach_group)
+static void __ref perf_remove_from_context(struct perf_event *event,
+					   bool detach_group)
 {
 	struct perf_event_context *ctx = event->ctx;
 	struct task_struct *task = ctx->task;
@@ -1700,6 +1741,7 @@
 		.event = event,
 		.detach_group = detach_group,
 	};
+	int ret;
 
 	lockdep_assert_held(&ctx->mutex);
 
@@ -1710,7 +1752,11 @@
 		 * already called __perf_remove_from_context from
 		 * perf_event_exit_cpu.
 		 */
-		cpu_function_call(event->cpu, __perf_remove_from_context, &re);
+		ret = cpu_function_call(event->cpu, __perf_remove_from_context,
+					&re);
+		if (ret == -ENXIO)
+			perf_retry_remove(event, &re);
+
 		return;
 	}
 
@@ -1906,8 +1952,13 @@
 	if (event->state <= PERF_EVENT_STATE_OFF)
 		return 0;
 
-	event->state = PERF_EVENT_STATE_ACTIVE;
-	event->oncpu = smp_processor_id();
+	WRITE_ONCE(event->oncpu, smp_processor_id());
+	/*
+	 * Order event::oncpu write to happen before the ACTIVE state
+	 * is visible.
+	 */
+	smp_wmb();
+	WRITE_ONCE(event->state, PERF_EVENT_STATE_ACTIVE);
 
 	/*
 	 * Unthrottle events, since we scheduled we might have missed several
@@ -2388,6 +2439,29 @@
 }
 EXPORT_SYMBOL_GPL(perf_event_enable);
 
+static int __perf_event_stop(void *info)
+{
+	struct perf_event *event = info;
+
+	/* for AUX events, our job is done if the event is already inactive */
+	if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
+		return 0;
+
+	/* matches smp_wmb() in event_sched_in() */
+	smp_rmb();
+
+	/*
+	 * There is a window with interrupts enabled before we get here,
+	 * so we need to check again lest we try to stop another CPU's event.
+	 */
+	if (READ_ONCE(event->oncpu) != smp_processor_id())
+		return -EAGAIN;
+
+	event->pmu->stop(event, PERF_EF_UPDATE);
+
+	return 0;
+}
+
 static int _perf_event_refresh(struct perf_event *event, int refresh)
 {
 	/*
@@ -3363,21 +3437,30 @@
 
 static int perf_event_read(struct perf_event *event, bool group)
 {
-	int ret = 0;
+	int event_cpu, ret = 0;
 
 	/*
 	 * If event is enabled and currently active on a CPU, update the
 	 * value in the event structure:
 	 */
-	if (event->state == PERF_EVENT_STATE_ACTIVE) {
+	event_cpu = READ_ONCE(event->oncpu);
+
+	if (event->state == PERF_EVENT_STATE_ACTIVE &&
+						!cpu_isolated(event_cpu)) {
 		struct perf_read_data data = {
 			.event = event,
 			.group = group,
 			.ret = 0,
 		};
-		smp_call_function_single(event->oncpu,
+
+		if ((unsigned int)event_cpu >= nr_cpu_ids)
+			return 0;
+		if (!event->attr.exclude_idle ||
+					!per_cpu(is_idle, event_cpu)) {
+			smp_call_function_single(event_cpu,
 					 __perf_event_read, &data, 1);
 		ret = data.ret;
+		}
 	} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
 		struct perf_event_context *ctx = event->ctx;
 		unsigned long flags;
@@ -3472,7 +3555,8 @@
 
 	if (!task) {
 		/* Must be root to operate on a CPU event: */
-		if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
+		if (event->owner != EVENT_OWNER_KERNEL && perf_paranoid_cpu() &&
+			!capable(CAP_SYS_ADMIN))
 			return ERR_PTR(-EACCES);
 
 		/*
@@ -3713,6 +3797,9 @@
 	if (event->destroy)
 		event->destroy(event);
 
+	if (event->pmu->free_drv_configs)
+		event->pmu->free_drv_configs(event);
+
 	if (event->ctx)
 		put_ctx(event->ctx);
 
@@ -3856,6 +3943,15 @@
  */
 static int perf_release(struct inode *inode, struct file *file)
 {
+	struct perf_event *event = file->private_data;
+
+	/*
+	 * Event can be in state OFF because of a constraint check.
+	 * Change to ACTIVE so that it gets cleaned up correctly.
+	 */
+	if ((event->state == PERF_EVENT_STATE_OFF) &&
+	    event->attr.constraint_duplicate)
+		event->state = PERF_EVENT_STATE_ACTIVE;
 	put_event(file->private_data);
 	return 0;
 }
@@ -4265,6 +4361,8 @@
 				 struct perf_event *output_event);
 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd);
+static int perf_event_drv_configs(struct perf_event *event,
+				  void __user *arg);
 
 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
 {
@@ -4321,6 +4419,9 @@
 	case PERF_EVENT_IOC_SET_BPF:
 		return perf_event_set_bpf_prog(event, arg);
 
+	case PERF_EVENT_IOC_SET_DRV_CONFIGS:
+		return perf_event_drv_configs(event, (void __user *)arg);
+
 	default:
 		return -ENOTTY;
 	}
@@ -4353,6 +4454,7 @@
 	switch (_IOC_NR(cmd)) {
 	case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
 	case _IOC_NR(PERF_EVENT_IOC_ID):
+	case _IOC_NR(PERF_EVENT_IOC_SET_DRV_CONFIGS):
 		/* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
 		if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
 			cmd &= ~IOCSIZE_MASK;
@@ -4637,6 +4739,8 @@
 		event->pmu->event_mapped(event);
 }
 
+static void perf_pmu_output_stop(struct perf_event *event);
+
 /*
  * A buffer can be mmap()ed multiple times; either directly through the same
  * event, or through other events by use of perf_event_set_output().
@@ -4664,10 +4768,22 @@
 	 */
 	if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
 	    atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
+		/*
+		 * Stop all AUX events that are writing to this buffer,
+		 * so that we can free its AUX pages and corresponding PMU
+		 * data. Note that after rb::aux_mmap_count dropped to zero,
+		 * they won't start any more (see perf_aux_output_begin()).
+		 */
+		perf_pmu_output_stop(event);
+
+		/* now it's safe to free the pages */
 		atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
 		vma->vm_mm->pinned_vm -= rb->aux_mmap_locked;
 
+		/* this has to be the last one */
 		rb_free_aux(rb);
+		WARN_ON_ONCE(atomic_read(&rb->aux_refcount));
+
 		mutex_unlock(&event->mmap_mutex);
 	}
 
@@ -5738,6 +5854,80 @@
 	rcu_read_unlock();
 }
 
+struct remote_output {
+	struct ring_buffer	*rb;
+	int			err;
+};
+
+static void __perf_event_output_stop(struct perf_event *event, void *data)
+{
+	struct perf_event *parent = event->parent;
+	struct remote_output *ro = data;
+	struct ring_buffer *rb = ro->rb;
+
+	if (!has_aux(event))
+		return;
+
+	if (!parent)
+		parent = event;
+
+	/*
+	 * In case of inheritance, it will be the parent that links to the
+	 * ring-buffer, but it will be the child that's actually using it:
+	 */
+	if (rcu_dereference(parent->rb) == rb)
+		ro->err = __perf_event_stop(event);
+}
+
+static int __perf_pmu_output_stop(void *info)
+{
+	struct perf_event *event = info;
+	struct pmu *pmu = event->pmu;
+	struct perf_cpu_context *cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+	struct remote_output ro = {
+		.rb	= event->rb,
+	};
+
+	rcu_read_lock();
+	perf_event_aux_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro);
+	if (cpuctx->task_ctx)
+		perf_event_aux_ctx(cpuctx->task_ctx, __perf_event_output_stop,
+				   &ro);
+	rcu_read_unlock();
+
+	return ro.err;
+}
+
+static void perf_pmu_output_stop(struct perf_event *event)
+{
+	struct perf_event *iter;
+	int err, cpu;
+
+restart:
+	rcu_read_lock();
+	list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) {
+		/*
+		 * For per-CPU events, we need to make sure that neither they
+		 * nor their children are running; for cpu==-1 events it's
+		 * sufficient to stop the event itself if it's active, since
+		 * it can't have children.
+		 */
+		cpu = iter->cpu;
+		if (cpu == -1)
+			cpu = READ_ONCE(iter->oncpu);
+
+		if (cpu == -1)
+			continue;
+
+		err = cpu_function_call(cpu, __perf_pmu_output_stop, event);
+		if (err == -EAGAIN) {
+			rcu_read_unlock();
+			goto restart;
+		}
+	}
+	rcu_read_unlock();
+}
+
 /*
  * task tracking -- fork/exit
  *
@@ -6932,6 +7122,8 @@
 	.start		= perf_swevent_start,
 	.stop		= perf_swevent_stop,
 	.read		= perf_swevent_read,
+
+	.events_across_hotplug = 1,
 };
 
 #ifdef CONFIG_EVENT_TRACING
@@ -7053,6 +7245,8 @@
 	.start		= perf_swevent_start,
 	.stop		= perf_swevent_stop,
 	.read		= perf_swevent_read,
+
+	.events_across_hotplug = 1,
 };
 
 static inline void perf_tp_register(void)
@@ -7165,6 +7359,15 @@
 }
 #endif
 
+static int perf_event_drv_configs(struct perf_event *event,
+				  void __user *arg)
+{
+	if (!event->pmu->get_drv_configs)
+		return -EINVAL;
+
+	return event->pmu->get_drv_configs(event, arg);
+}
+
 /*
  * hrtimer based swevent callback
  */
@@ -7332,6 +7535,8 @@
 	.start		= cpu_clock_event_start,
 	.stop		= cpu_clock_event_stop,
 	.read		= cpu_clock_event_read,
+
+	.events_across_hotplug = 1,
 };
 
 /*
@@ -7413,6 +7618,8 @@
 	.start		= task_clock_event_start,
 	.stop		= task_clock_event_stop,
 	.read		= task_clock_event_read,
+
+	.events_across_hotplug = 1,
 };
 
 static void perf_pmu_nop_void(struct pmu *pmu)
@@ -7893,6 +8100,7 @@
 	if (!group_leader)
 		group_leader = event;
 
+	mutex_init(&event->group_leader_mutex);
 	mutex_init(&event->child_mutex);
 	INIT_LIST_HEAD(&event->child_list);
 
@@ -7901,6 +8109,7 @@
 	INIT_LIST_HEAD(&event->sibling_list);
 	INIT_LIST_HEAD(&event->rb_entry);
 	INIT_LIST_HEAD(&event->active_entry);
+	INIT_LIST_HEAD(&event->drv_configs);
 	INIT_HLIST_NODE(&event->hlist_entry);
 
 
@@ -8312,10 +8521,16 @@
 	if (flags & ~PERF_FLAG_ALL)
 		return -EINVAL;
 
+	if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
 	err = perf_copy_attr(attr_uptr, &attr);
 	if (err)
 		return err;
 
+	if (attr.constraint_duplicate || attr.__reserved_1)
+		return -EINVAL;
+
 	if (!attr.exclude_kernel) {
 		if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
 			return -EACCES;
@@ -8356,6 +8571,16 @@
 			group_leader = NULL;
 	}
 
+	/*
+	 * Take the group_leader's group_leader_mutex before observing
+	 * anything in the group leader that leads to changes in ctx,
+	 * many of which may be changing on another thread.
+	 * In particular, we want to take this lock before deciding
+	 * whether we need to move_group.
+	 */
+	if (group_leader)
+		mutex_lock(&group_leader->group_leader_mutex);
+
 	if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
 		task = find_lively_task_by_vpid(pid);
 		if (IS_ERR(task)) {
@@ -8513,6 +8738,7 @@
 					f_flags);
 	if (IS_ERR(event_file)) {
 		err = PTR_ERR(event_file);
+		event_file = NULL;
 		goto err_context;
 	}
 
@@ -8633,6 +8859,8 @@
 	if (move_group)
 		perf_event_ctx_unlock(group_leader, gctx);
 	mutex_unlock(&ctx->mutex);
+	if (group_leader)
+		mutex_unlock(&group_leader->group_leader_mutex);
 
 	if (task) {
 		mutex_unlock(&task->signal->cred_guard_mutex);
@@ -8682,6 +8910,8 @@
 	if (task)
 		put_task_struct(task);
 err_group_fd:
+	if (group_leader)
+		mutex_unlock(&group_leader->group_leader_mutex);
 	fdput(group);
 err_fd:
 	put_unused_fd(event_fd);
@@ -9393,6 +9623,18 @@
 	rcu_read_unlock();
 }
 
+static void __perf_event_stop_swclock(void *__info)
+{
+	struct perf_event_context *ctx = __info;
+	struct perf_event *event, *tmp;
+
+	list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) {
+		if (event->attr.config == PERF_COUNT_SW_CPU_CLOCK &&
+		    event->attr.type == PERF_TYPE_SOFTWARE)
+			cpu_clock_event_stop(event, 0);
+	}
+}
+
 static void perf_event_exit_cpu_context(int cpu)
 {
 	struct perf_event_context *ctx;
@@ -9402,20 +9644,56 @@
 	idx = srcu_read_lock(&pmus_srcu);
 	list_for_each_entry_rcu(pmu, &pmus, entry) {
 		ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
-
 		mutex_lock(&ctx->mutex);
-		smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
+		/*
+		 * If keeping events across hotplugging is supported, do not
+		 * remove the event list, but keep it alive across CPU hotplug.
+		 * The context is exited via an fd close path when userspace
+		 * is done and the target CPU is online. If software clock
+		 * event is active, then stop hrtimer associated with it.
+		 * Start the timer when the CPU comes back online.
+		 */
+		if (!pmu->events_across_hotplug)
+			smp_call_function_single(cpu, __perf_event_exit_context,
+						 ctx, 1);
+		else
+			smp_call_function_single(cpu, __perf_event_stop_swclock,
+						 ctx, 1);
 		mutex_unlock(&ctx->mutex);
 	}
 	srcu_read_unlock(&pmus_srcu, idx);
 }
 
+static void perf_event_start_swclock(int cpu)
+{
+	struct perf_event_context *ctx;
+	struct pmu *pmu;
+	int idx;
+	struct perf_event *event, *tmp;
+
+	idx = srcu_read_lock(&pmus_srcu);
+	list_for_each_entry_rcu(pmu, &pmus, entry) {
+		if (pmu->events_across_hotplug) {
+			ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
+			list_for_each_entry_safe(event, tmp, &ctx->event_list,
+						 event_entry) {
+				if (event->attr.config ==
+				    PERF_COUNT_SW_CPU_CLOCK &&
+				    event->attr.type == PERF_TYPE_SOFTWARE)
+					cpu_clock_event_start(event, 0);
+			}
+		}
+	}
+	srcu_read_unlock(&pmus_srcu, idx);
+}
+
 static void perf_event_exit_cpu(int cpu)
 {
 	perf_event_exit_cpu_context(cpu);
 }
 #else
 static inline void perf_event_exit_cpu(int cpu) { }
+static inline void perf_event_start_swclock(int cpu) { }
 #endif
 
 static int
@@ -9454,6 +9732,11 @@
 	case CPU_DOWN_PREPARE:
 		perf_event_exit_cpu(cpu);
 		break;
+
+	case CPU_STARTING:
+		perf_event_start_swclock(cpu);
+		break;
+
 	default:
 		break;
 	}
@@ -9461,6 +9744,25 @@
 	return NOTIFY_OK;
 }
 
+static int event_idle_notif(struct notifier_block *nb, unsigned long action,
+							void *data)
+{
+	switch (action) {
+	case IDLE_START:
+		__this_cpu_write(is_idle, true);
+		break;
+	case IDLE_END:
+		__this_cpu_write(is_idle, false);
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block perf_event_idle_nb = {
+	.notifier_call = event_idle_notif,
+};
+
 void __init perf_event_init(void)
 {
 	int ret;
@@ -9474,6 +9776,7 @@
 	perf_pmu_register(&perf_task_clock, NULL, -1);
 	perf_tp_register();
 	perf_cpu_notifier(perf_cpu_notify);
+	idle_notifier_register(&perf_event_idle_nb);
 	register_reboot_notifier(&perf_reboot_notifier);
 
 	ret = init_hw_breakpoint();
diff -ruw linux-4.4.115/kernel/events/internal.h linux-4.4.115-fbx/kernel/events/internal.h
--- linux-4.4.115/kernel/events/internal.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/events/internal.h	2019-01-22 16:16:28.651293025 +0100
@@ -11,7 +11,6 @@
 struct ring_buffer {
 	atomic_t			refcount;
 	struct rcu_head			rcu_head;
-	struct irq_work			irq_work;
 #ifdef CONFIG_PERF_USE_VMALLOC
 	struct work_struct		work;
 	int				page_order;	/* allocation order  */
diff -ruw linux-4.4.115/kernel/events/ring_buffer.c linux-4.4.115-fbx/kernel/events/ring_buffer.c
--- linux-4.4.115/kernel/events/ring_buffer.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/events/ring_buffer.c	2019-10-29 09:26:25.585222183 +0100
@@ -221,8 +221,6 @@
 	rcu_read_unlock();
 }
 
-static void rb_irq_work(struct irq_work *work);
-
 static void
 ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
 {
@@ -243,16 +241,6 @@
 
 	INIT_LIST_HEAD(&rb->event_list);
 	spin_lock_init(&rb->event_lock);
-	init_irq_work(&rb->irq_work, rb_irq_work);
-}
-
-static void ring_buffer_put_async(struct ring_buffer *rb)
-{
-	if (!atomic_dec_and_test(&rb->refcount))
-		return;
-
-	rb->rcu_head.next = (void *)rb;
-	irq_work_queue(&rb->irq_work);
 }
 
 /*
@@ -264,6 +252,10 @@
  * The ordering is similar to that of perf_output_{begin,end}, with
  * the exception of (B), which should be taken care of by the pmu
  * driver, since ordering rules will differ depending on hardware.
+ *
+ * Call this from pmu::start(); see the comment in perf_aux_output_end()
+ * about its use in pmu callbacks. Both can also be called from the PMI
+ * handler if needed.
  */
 void *perf_aux_output_begin(struct perf_output_handle *handle,
 			    struct perf_event *event)
@@ -288,6 +280,13 @@
 		goto err;
 
 	/*
+	 * If rb::aux_mmap_count is zero (and rb_has_aux() above went through),
+	 * the aux buffer is in perf_mmap_close(), about to get freed.
+	 */
+	if (!atomic_read(&rb->aux_mmap_count))
+		goto err_put;
+
+	/*
 	 * Nesting is not supported for AUX area, make sure nested
 	 * writers are caught early
 	 */
@@ -328,10 +327,11 @@
 	return handle->rb->aux_priv;
 
 err_put:
+	/* can't be last */
 	rb_free_aux(rb);
 
 err:
-	ring_buffer_put_async(rb);
+	ring_buffer_put(rb);
 	handle->event = NULL;
 
 	return NULL;
@@ -342,6 +342,10 @@
  * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
  * pmu driver's responsibility to observe ordering rules of the hardware,
  * so that all the data is externally visible before this is called.
+ *
+ * Note: this has to be called from pmu::stop() callback, as the assumption
+ * of the AUX buffer management code is that after pmu::stop(), the AUX
+ * transaction must be stopped and therefore drop the AUX reference count.
  */
 void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
 			 bool truncated)
@@ -389,8 +393,9 @@
 	handle->event = NULL;
 
 	local_set(&rb->aux_nest, 0);
+	/* can't be last */
 	rb_free_aux(rb);
-	ring_buffer_put_async(rb);
+	ring_buffer_put(rb);
 }
 
 /*
@@ -467,6 +472,33 @@
 	__free_page(page);
 }
 
+static void __rb_free_aux(struct ring_buffer *rb)
+{
+	int pg;
+
+	/*
+	 * Should never happen, the last reference should be dropped from
+	 * perf_mmap_close() path, which first stops aux transactions (which
+	 * in turn are the atomic holders of aux_refcount) and then does the
+	 * last rb_free_aux().
+	 */
+	WARN_ON_ONCE(in_atomic());
+
+	if (rb->aux_priv) {
+		rb->free_aux(rb->aux_priv);
+		rb->free_aux = NULL;
+		rb->aux_priv = NULL;
+	}
+
+	if (rb->aux_nr_pages) {
+		for (pg = 0; pg < rb->aux_nr_pages; pg++)
+			rb_free_aux_page(rb, pg);
+
+		kfree(rb->aux_pages);
+		rb->aux_nr_pages = 0;
+	}
+}
+
 int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
 		 pgoff_t pgoff, int nr_pages, long watermark, int flags)
 {
@@ -530,7 +562,7 @@
 			goto out;
 	}
 
-	rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages,
+	rb->aux_priv = event->pmu->setup_aux(event, rb->aux_pages, nr_pages,
 					     overwrite);
 	if (!rb->aux_priv)
 		goto out;
@@ -555,45 +587,15 @@
 	if (!ret)
 		rb->aux_pgoff = pgoff;
 	else
-		rb_free_aux(rb);
+		__rb_free_aux(rb);
 
 	return ret;
 }
 
-static void __rb_free_aux(struct ring_buffer *rb)
-{
-	int pg;
-
-	if (rb->aux_priv) {
-		rb->free_aux(rb->aux_priv);
-		rb->free_aux = NULL;
-		rb->aux_priv = NULL;
-	}
-
-	if (rb->aux_nr_pages) {
-		for (pg = 0; pg < rb->aux_nr_pages; pg++)
-			rb_free_aux_page(rb, pg);
-
-		kfree(rb->aux_pages);
-		rb->aux_nr_pages = 0;
-	}
-}
-
 void rb_free_aux(struct ring_buffer *rb)
 {
 	if (atomic_dec_and_test(&rb->aux_refcount))
-		irq_work_queue(&rb->irq_work);
-}
-
-static void rb_irq_work(struct irq_work *work)
-{
-	struct ring_buffer *rb = container_of(work, struct ring_buffer, irq_work);
-
-	if (!atomic_read(&rb->aux_refcount))
 		__rb_free_aux(rb);
-
-	if (rb->rcu_head.next == (void *)rb)
-		call_rcu(&rb->rcu_head, rb_free_rcu);
 }
 
 #ifndef CONFIG_PERF_USE_VMALLOC
diff -ruw linux-4.4.115/kernel/exit.c linux-4.4.115-fbx/kernel/exit.c
--- linux-4.4.115/kernel/exit.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/exit.c	2019-10-29 09:26:25.589222222 +0100
@@ -53,6 +53,9 @@
 #include <linux/oom.h>
 #include <linux/writeback.h>
 #include <linux/shm.h>
+#include <linux/kcov.h>
+
+#include "sched/tune.h"
 
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
@@ -388,6 +391,7 @@
 {
 	struct mm_struct *mm = tsk->mm;
 	struct core_state *core_state;
+	int mm_released;
 
 	mm_release(tsk, mm);
 	if (!mm)
@@ -434,9 +438,12 @@
 	enter_lazy_tlb(mm, current);
 	task_unlock(tsk);
 	mm_update_next_owner(mm);
-	mmput(mm);
+
+	mm_released = mmput(mm);
 	if (test_thread_flag(TIF_MEMDIE))
 		exit_oom_victim();
+	if (mm_released)
+		set_tsk_thread_flag(tsk, TIF_MM_RELEASED);
 }
 
 static struct task_struct *find_alive_thread(struct task_struct *p)
@@ -632,6 +639,7 @@
 	static DEFINE_SPINLOCK(low_water_lock);
 	static int lowest_to_date = THREAD_SIZE;
 	unsigned long free;
+	int islower = false;
 
 	free = stack_not_used(current);
 
@@ -640,11 +648,16 @@
 
 	spin_lock(&low_water_lock);
 	if (free < lowest_to_date) {
-		pr_warn("%s (%d) used greatest stack depth: %lu bytes left\n",
-			current->comm, task_pid_nr(current), free);
 		lowest_to_date = free;
+		islower = true;
 	}
 	spin_unlock(&low_water_lock);
+
+	if (islower) {
+		printk(KERN_WARNING "%s (%d) used greatest stack depth: "
+				"%lu bytes left\n",
+				current->comm, task_pid_nr(current), free);
+	}
 }
 #else
 static inline void check_stack_usage(void) {}
@@ -657,6 +670,7 @@
 	TASKS_RCU(int tasks_rcu_i);
 
 	profile_task_exit(tsk);
+	kcov_task_exit(tsk);
 
 	WARN_ON(blk_needs_flush_plug(tsk));
 
@@ -699,6 +713,10 @@
 	}
 
 	exit_signals(tsk);  /* sets PF_EXITING */
+
+	sched_exit(tsk);
+	schedtune_exit_task(tsk);
+
 	/*
 	 * tsk->flags are checked in the futex code to protect against
 	 * an exiting task cleaning up the robust pi futexes.
diff -ruw linux-4.4.115/kernel/fork.c linux-4.4.115-fbx/kernel/fork.c
--- linux-4.4.115/kernel/fork.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/fork.c	2019-10-29 09:26:25.589222222 +0100
@@ -23,6 +23,7 @@
 #include <linux/file.h>
 #include <linux/fdtable.h>
 #include <linux/iocontext.h>
+#include <linux/kasan.h>
 #include <linux/key.h>
 #include <linux/binfmts.h>
 #include <linux/mman.h>
@@ -76,6 +77,7 @@
 #include <linux/aio.h>
 #include <linux/compiler.h>
 #include <linux/sysctl.h>
+#include <linux/kcov.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
@@ -148,18 +150,18 @@
 }
 #endif
 
-void __weak arch_release_thread_info(struct thread_info *ti)
+void __weak arch_release_thread_stack(unsigned long *stack)
 {
 }
 
-#ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR
+#ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR
 
 /*
  * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
  * kmemcache based allocator.
  */
 # if THREAD_SIZE >= PAGE_SIZE
-static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
+static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
 						  int node)
 {
 	struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP,
@@ -168,30 +170,33 @@
 	return page ? page_address(page) : NULL;
 }
 
-static inline void free_thread_info(struct thread_info *ti)
+static inline void free_thread_stack(unsigned long *stack)
 {
-	kaiser_unmap_thread_stack(ti);
-	free_kmem_pages((unsigned long)ti, THREAD_SIZE_ORDER);
+	struct page *page = virt_to_page(stack);
+
+	kasan_alloc_pages(page, THREAD_SIZE_ORDER);
+	kaiser_unmap_thread_stack(stack);
+	__free_kmem_pages(page, THREAD_SIZE_ORDER);
 }
 # else
-static struct kmem_cache *thread_info_cache;
+static struct kmem_cache *thread_stack_cache;
 
-static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
+static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
 						  int node)
 {
-	return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node);
+	return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
 }
 
-static void free_thread_info(struct thread_info *ti)
+static void free_thread_stack(unsigned long *stack)
 {
-	kmem_cache_free(thread_info_cache, ti);
+	kmem_cache_free(thread_stack_cache, stack);
 }
 
-void thread_info_cache_init(void)
+void thread_stack_cache_init(void)
 {
-	thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
+	thread_stack_cache = kmem_cache_create("thread_stack", THREAD_SIZE,
 					      THREAD_SIZE, 0, NULL);
-	BUG_ON(thread_info_cache == NULL);
+	BUG_ON(thread_stack_cache == NULL);
 }
 # endif
 #endif
@@ -214,9 +219,9 @@
 /* SLAB cache for mm_struct structures (tsk->mm) */
 static struct kmem_cache *mm_cachep;
 
-static void account_kernel_stack(struct thread_info *ti, int account)
+static void account_kernel_stack(unsigned long *stack, int account)
 {
-	struct zone *zone = page_zone(virt_to_page(ti));
+	struct zone *zone = page_zone(virt_to_page(stack));
 
 	mod_zone_page_state(zone, NR_KERNEL_STACK, account);
 }
@@ -224,8 +229,8 @@
 void free_task(struct task_struct *tsk)
 {
 	account_kernel_stack(tsk->stack, -1);
-	arch_release_thread_info(tsk->stack);
-	free_thread_info(tsk->stack);
+	arch_release_thread_stack(tsk->stack);
+	free_thread_stack(tsk->stack);
 	rt_mutex_debug_task_free(tsk);
 	ftrace_graph_exit_task(tsk);
 	put_seccomp_filter(tsk);
@@ -336,7 +341,7 @@
 static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
 {
 	struct task_struct *tsk;
-	struct thread_info *ti;
+	unsigned long *stack;
 	int err;
 
 	if (node == NUMA_NO_NODE)
@@ -345,19 +350,19 @@
 	if (!tsk)
 		return NULL;
 
-	ti = alloc_thread_info_node(tsk, node);
-	if (!ti)
+	stack = alloc_thread_stack_node(tsk, node);
+	if (!stack)
 		goto free_tsk;
 
 	err = arch_dup_task_struct(tsk, orig);
 	if (err)
-		goto free_ti;
+		goto free_stack;
 
-	tsk->stack = ti;
+	tsk->stack = stack;
 
 	err = kaiser_map_thread_stack(tsk->stack);
 	if (err)
-		goto free_ti;
+		goto free_stack;
 #ifdef CONFIG_SECCOMP
 	/*
 	 * We must handle setting up seccomp filters once we're under
@@ -389,12 +394,19 @@
 	tsk->task_frag.page = NULL;
 	tsk->wake_q.next = NULL;
 
-	account_kernel_stack(ti, 1);
+	/*
+	 * inherit parent exec_mode.
+	 */
+	tsk->exec_mode = orig->exec_mode;
+
+	account_kernel_stack(stack, 1);
+
+	kcov_task_init(tsk);
 
 	return tsk;
 
-free_ti:
-	free_thread_info(ti);
+free_stack:
+	free_thread_stack(stack);
 free_tsk:
 	free_task_struct(tsk);
 	return NULL;
@@ -699,14 +711,10 @@
 }
 EXPORT_SYMBOL_GPL(__mmdrop);
 
-/*
- * Decrement the use count and release all resources for an mm.
- */
-void mmput(struct mm_struct *mm)
+static inline void __mmput(struct mm_struct *mm)
 {
-	might_sleep();
+	VM_BUG_ON(atomic_read(&mm->mm_users));
 
-	if (atomic_dec_and_test(&mm->mm_users)) {
 		uprobe_clear_state(mm);
 		exit_aio(mm);
 		ksm_exit(mm);
@@ -722,9 +730,37 @@
 			module_put(mm->binfmt->module);
 		mmdrop(mm);
 	}
+
+/*
+ * Decrement the use count and release all resources for an mm.
+ */
+int mmput(struct mm_struct *mm)
+{
+	int mm_freed = 0;
+	might_sleep();
+
+	if (atomic_dec_and_test(&mm->mm_users)) {
+		__mmput(mm);
+		mm_freed = 1;
+	}
+	return mm_freed;
 }
 EXPORT_SYMBOL_GPL(mmput);
 
+static void mmput_async_fn(struct work_struct *work)
+{
+	struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
+	__mmput(mm);
+}
+
+void mmput_async(struct mm_struct *mm)
+{
+	if (atomic_dec_and_test(&mm->mm_users)) {
+		INIT_WORK(&mm->async_put_work, mmput_async_fn);
+		schedule_work(&mm->async_put_work);
+	}
+}
+
 /**
  * set_mm_exe_file - change a reference to the mm's executable file
  *
@@ -1684,6 +1720,7 @@
 bad_fork_cleanup_perf:
 	perf_event_free_task(p);
 bad_fork_cleanup_policy:
+	free_task_load_ptrs(p);
 #ifdef CONFIG_NUMA
 	mpol_put(p->mempolicy);
 bad_fork_cleanup_threadgroup_lock:
@@ -1715,7 +1752,7 @@
 			    cpu_to_node(cpu));
 	if (!IS_ERR(task)) {
 		init_idle_pids(task->pids);
-		init_idle(task, cpu);
+		init_idle(task, cpu, false);
 	}
 
 	return task;
diff -ruw linux-4.4.115/kernel/futex.c linux-4.4.115-fbx/kernel/futex.c
--- linux-4.4.115/kernel/futex.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/futex.c	2019-10-29 09:26:25.593222261 +0100
@@ -470,6 +470,7 @@
 	unsigned long address = (unsigned long)uaddr;
 	struct mm_struct *mm = current->mm;
 	struct page *page, *page_head;
+	struct address_space *mapping;
 	int err, ro = 0;
 
 	/*
@@ -555,7 +556,19 @@
 	}
 #endif
 
-	lock_page(page_head);
+	/*
+	 * The treatment of mapping from this point on is critical. The page
+	 * lock protects many things but in this context the page lock
+	 * stabilizes mapping, prevents inode freeing in the shared
+	 * file-backed region case and guards against movement to swap cache.
+	 *
+	 * Strictly speaking the page lock is not needed in all cases being
+	 * considered here and page lock forces unnecessarily serialization
+	 * From this point on, mapping will be re-verified if necessary and
+	 * page lock will be acquired only if it is unavoidable
+	 */
+
+	mapping = READ_ONCE(page_head->mapping);
 
 	/*
 	 * If page_head->mapping is NULL, then it cannot be a PageAnon
@@ -572,18 +585,31 @@
 	 * shmem_writepage move it from filecache to swapcache beneath us:
 	 * an unlikely race, but we do need to retry for page_head->mapping.
 	 */
-	if (!page_head->mapping) {
-		int shmem_swizzled = PageSwapCache(page_head);
+	if (unlikely(!mapping)) {
+		int shmem_swizzled;
+
+		/*
+		 * Page lock is required to identify which special case above
+		 * applies. If this is really a shmem page then the page lock
+		 * will prevent unexpected transitions.
+		 */
+		lock_page(page);
+		shmem_swizzled = PageSwapCache(page) || page->mapping;
 		unlock_page(page_head);
 		put_page(page_head);
+
 		if (shmem_swizzled)
 			goto again;
+
 		return -EFAULT;
 	}
 
 	/*
 	 * Private mappings are handled in a simple way.
 	 *
+	 * If the futex key is stored on an anonymous page, then the associated
+	 * object is the mm which is implicitly pinned by the calling process.
+	 *
 	 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
 	 * it's a read-only handle, it's expected that futexes attach to
 	 * the object not the particular process.
@@ -601,16 +627,74 @@
 		key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
 		key->private.mm = mm;
 		key->private.address = address;
+
+		get_futex_key_refs(key); /* implies smp_mb(); (B) */
+
 	} else {
+		struct inode *inode;
+
+		/*
+		 * The associated futex object in this case is the inode and
+		 * the page->mapping must be traversed. Ordinarily this should
+		 * be stabilised under page lock but it's not strictly
+		 * necessary in this case as we just want to pin the inode, not
+		 * update the radix tree or anything like that.
+		 *
+		 * The RCU read lock is taken as the inode is finally freed
+		 * under RCU. If the mapping still matches expectations then the
+		 * mapping->host can be safely accessed as being a valid inode.
+		 */
+		rcu_read_lock();
+
+		if (READ_ONCE(page_head->mapping) != mapping) {
+			rcu_read_unlock();
+			put_page(page_head);
+
+			goto again;
+		}
+
+		inode = READ_ONCE(mapping->host);
+		if (!inode) {
+			rcu_read_unlock();
+			put_page(page_head);
+
+			goto again;
+		}
+
+		/*
+		 * Take a reference unless it is about to be freed. Previously
+		 * this reference was taken by ihold under the page lock
+		 * pinning the inode in place so i_lock was unnecessary. The
+		 * only way for this check to fail is if the inode was
+		 * truncated in parallel so warn for now if this happens.
+		 *
+		 * We are not calling into get_futex_key_refs() in file-backed
+		 * cases, therefore a successful atomic_inc return below will
+		 * guarantee that get_futex_key() will still imply smp_mb(); (B).
+		 */
+		if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) {
+			rcu_read_unlock();
+			put_page(page_head);
+
+			goto again;
+		}
+
+		/* Should be impossible but lets be paranoid for now */
+		if (WARN_ON_ONCE(inode->i_mapping != mapping)) {
+			err = -EFAULT;
+			rcu_read_unlock();
+			iput(inode);
+
+			goto out;
+		}
+
 		key->both.offset |= FUT_OFF_INODE; /* inode-based key */
-		key->shared.inode = page_head->mapping->host;
+		key->shared.inode = inode;
 		key->shared.pgoff = basepage_index(page);
+		rcu_read_unlock();
 	}
 
-	get_futex_key_refs(key); /* implies MB (B) */
-
 out:
-	unlock_page(page_head);
 	put_page(page_head);
 	return err;
 }
@@ -681,7 +765,7 @@
 	int ret;
 
 	pagefault_disable();
-	ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
+	ret = __get_user(*dest, from);
 	pagefault_enable();
 
 	return ret ? -EFAULT : 0;
diff -ruw linux-4.4.115/kernel/irq/chip.c linux-4.4.115-fbx/kernel/irq/chip.c
--- linux-4.4.115/kernel/irq/chip.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/irq/chip.c	2019-10-29 09:26:25.593222261 +0100
@@ -836,7 +836,8 @@
 	irq_settings_clr_and_set(desc, clr, set);
 
 	irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
-		   IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
+		   IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT |
+		   IRQD_AFFINITY_MANAGED);
 	if (irq_settings_has_no_balance_set(desc))
 		irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
 	if (irq_settings_is_per_cpu(desc))
@@ -845,6 +846,8 @@
 		irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
 	if (irq_settings_is_level(desc))
 		irqd_set(&desc->irq_data, IRQD_LEVEL);
+	if (irq_settings_has_affinity_managed_set(desc))
+		irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED);
 
 	irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
 
diff -ruw linux-4.4.115/kernel/irq/cpuhotplug.c linux-4.4.115-fbx/kernel/irq/cpuhotplug.c
--- linux-4.4.115/kernel/irq/cpuhotplug.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/irq/cpuhotplug.c	2019-01-22 16:16:28.659293097 +0100
@@ -11,6 +11,7 @@
 #include <linux/interrupt.h>
 #include <linux/ratelimit.h>
 #include <linux/irq.h>
+#include <linux/cpumask.h>
 
 #include "internals.h"
 
@@ -20,6 +21,7 @@
 	const struct cpumask *affinity = d->common->affinity;
 	struct irq_chip *c;
 	bool ret = false;
+	struct cpumask available_cpus;
 
 	/*
 	 * If this is a per-CPU interrupt, or the affinity does not
@@ -29,8 +31,37 @@
 	    !cpumask_test_cpu(smp_processor_id(), affinity))
 		return false;
 
+	cpumask_copy(&available_cpus, affinity);
+	cpumask_andnot(&available_cpus, &available_cpus, cpu_isolated_mask);
+	affinity = &available_cpus;
+
 	if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
+		/*
+		 * The order of preference for selecting a fallback CPU is
+		 *
+		 * (1) online and un-isolated CPU from default affinity
+		 * (2) online and un-isolated CPU
+		 * (3) online CPU
+		 */
+		cpumask_andnot(&available_cpus, cpu_online_mask,
+							cpu_isolated_mask);
+		if (cpumask_intersects(&available_cpus, irq_default_affinity))
+			cpumask_and(&available_cpus, &available_cpus,
+							irq_default_affinity);
+		else if (cpumask_empty(&available_cpus))
 		affinity = cpu_online_mask;
+
+		/*
+		 * We are overriding the affinity with all online and
+		 * un-isolated cpus. irq_set_affinity_locked() call
+		 * below notify this mask to PM QOS affinity listener.
+		 * That results in applying the CPU_DMA_LATENCY QOS
+		 * to all the CPUs specified in the mask. But the low
+		 * level irqchip driver sets the affinity of an irq
+		 * to only one CPU. So pick only one CPU from the
+		 * prepared mask while overriding the user affinity.
+		 */
+		affinity = cpumask_of(cpumask_any(affinity));
 		ret = true;
 	}
 
@@ -38,7 +69,7 @@
 	if (!c->irq_set_affinity) {
 		pr_debug("IRQ%u: unable to set affinity\n", d->irq);
 	} else {
-		int r = irq_do_set_affinity(d, affinity, false);
+		int r = irq_set_affinity_locked(d, affinity, false);
 		if (r)
 			pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
 					    d->irq, r);
@@ -69,6 +100,9 @@
 		bool affinity_broken;
 
 		desc = irq_to_desc(irq);
+		if (!desc)
+			continue;
+
 		raw_spin_lock(&desc->lock);
 		affinity_broken = migrate_one_irq(desc);
 		raw_spin_unlock(&desc->lock);
diff -ruw linux-4.4.115/kernel/irq/internals.h linux-4.4.115-fbx/kernel/irq/internals.h
--- linux-4.4.115/kernel/irq/internals.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/irq/internals.h	2019-01-22 16:16:28.659293097 +0100
@@ -105,6 +105,8 @@
 					   struct irqaction *action) { }
 #endif
 
+extern bool irq_can_set_affinity_usr(unsigned int irq);
+
 extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
 
 extern void irq_set_thread_affinity(struct irq_desc *desc);
diff -ruw linux-4.4.115/kernel/irq/manage.c linux-4.4.115-fbx/kernel/irq/manage.c
--- linux-4.4.115/kernel/irq/manage.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/irq/manage.c	2019-10-29 09:26:25.593222261 +0100
@@ -115,12 +115,12 @@
 #ifdef CONFIG_SMP
 cpumask_var_t irq_default_affinity;
 
-static int __irq_can_set_affinity(struct irq_desc *desc)
+static bool __irq_can_set_affinity(struct irq_desc *desc)
 {
 	if (!desc || !irqd_can_balance(&desc->irq_data) ||
 	    !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
-		return 0;
-	return 1;
+		return false;
+	return true;
 }
 
 /**
@@ -134,6 +134,21 @@
 }
 
 /**
+ * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
+ * @irq:	Interrupt to check
+ *
+ * Like irq_can_set_affinity() above, but additionally checks for the
+ * AFFINITY_MANAGED flag.
+ */
+bool irq_can_set_affinity_usr(unsigned int irq)
+{
+	struct irq_desc *desc = irq_to_desc(irq);
+
+	return __irq_can_set_affinity(desc) &&
+		!irqd_affinity_is_managed(&desc->irq_data);
+}
+
+/**
  *	irq_set_thread_affinity - Notify irq threads to adjust affinity
  *	@desc:		irq descriptor which has affitnity changed
  *
@@ -319,6 +334,9 @@
 	desc->affinity_notify = notify;
 	raw_spin_unlock_irqrestore(&desc->lock, flags);
 
+	if (!notify && old_notify)
+		cancel_work_sync(&old_notify->work);
+
 	if (old_notify)
 		kref_put(&old_notify->kref, old_notify->release);
 
diff -ruw linux-4.4.115/kernel/irq/msi.c linux-4.4.115-fbx/kernel/irq/msi.c
--- linux-4.4.115/kernel/irq/msi.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/irq/msi.c	2019-01-22 16:16:28.659293097 +0100
@@ -268,7 +268,7 @@
 	struct msi_domain_ops *ops = info->ops;
 	msi_alloc_info_t arg;
 	struct msi_desc *desc;
-	int i, ret, virq;
+	int i, ret, virq = 0;
 
 	ret = ops->msi_check(domain, info, dev);
 	if (ret == 0)
diff -ruw linux-4.4.115/kernel/irq/proc.c linux-4.4.115-fbx/kernel/irq/proc.c
--- linux-4.4.115/kernel/irq/proc.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/irq/proc.c	2019-01-22 16:16:28.663293133 +0100
@@ -96,7 +96,7 @@
 	cpumask_var_t new_value;
 	int err;
 
-	if (!irq_can_set_affinity(irq) || no_irq_affinity)
+	if (!irq_can_set_affinity_usr(irq) || no_irq_affinity)
 		return -EIO;
 
 	if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
@@ -113,6 +113,11 @@
 		err = -EINVAL;
 		goto free_cpumask;
 	}
+
+	if (cpumask_subset(new_value, cpu_isolated_mask)) {
+		err = -EINVAL;
+		goto free_cpumask;
+	}
 
 	/*
 	 * Do not allow disabling IRQs completely - it's a too easy
diff -ruw linux-4.4.115/kernel/irq/settings.h linux-4.4.115-fbx/kernel/irq/settings.h
--- linux-4.4.115/kernel/irq/settings.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/irq/settings.h	2019-01-22 16:16:28.663293133 +0100
@@ -17,6 +17,7 @@
 	_IRQ_IS_POLLED		= IRQ_IS_POLLED,
 	_IRQ_DISABLE_UNLAZY	= IRQ_DISABLE_UNLAZY,
 	_IRQF_MODIFY_MASK	= IRQF_MODIFY_MASK,
+	_IRQ_AFFINITY_MANAGED	= IRQ_AFFINITY_MANAGED,
 };
 
 #define IRQ_PER_CPU		GOT_YOU_MORON
@@ -32,6 +33,7 @@
 #define IRQ_DISABLE_UNLAZY	GOT_YOU_MORON
 #undef IRQF_MODIFY_MASK
 #define IRQF_MODIFY_MASK	GOT_YOU_MORON
+#define IRQ_AFFINITY_MANAGED	GOT_YOU_MORON
 
 static inline void
 irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
@@ -65,6 +67,16 @@
 	return desc->status_use_accessors & _IRQ_NO_BALANCING;
 }
 
+static inline void irq_settings_set_affinity_managed(struct irq_desc *desc)
+{
+	desc->status_use_accessors |= _IRQ_AFFINITY_MANAGED;
+}
+
+static inline bool irq_settings_has_affinity_managed_set(struct irq_desc *desc)
+{
+	return desc->status_use_accessors & _IRQ_AFFINITY_MANAGED;
+}
+
 static inline u32 irq_settings_get_trigger_mask(struct irq_desc *desc)
 {
 	return desc->status_use_accessors & IRQ_TYPE_SENSE_MASK;
diff -ruw linux-4.4.115/kernel/kthread.c linux-4.4.115-fbx/kernel/kthread.c
--- linux-4.4.115/kernel/kthread.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/kthread.c	2019-10-29 09:26:25.597222300 +0100
@@ -65,7 +65,7 @@
 static struct kthread *to_live_kthread(struct task_struct *k)
 {
 	struct completion *vfork = ACCESS_ONCE(k->vfork_done);
-	if (likely(vfork))
+	if (likely(vfork) && try_get_task_stack(k))
 		return __to_kthread(vfork);
 	return NULL;
 }
@@ -427,8 +427,10 @@
 {
 	struct kthread *kthread = to_live_kthread(k);
 
-	if (kthread)
+	if (kthread) {
 		__kthread_unpark(k, kthread);
+		put_task_stack(k);
+	}
 }
 EXPORT_SYMBOL_GPL(kthread_unpark);
 
@@ -457,6 +459,7 @@
 				wait_for_completion(&kthread->parked);
 			}
 		}
+		put_task_stack(k);
 		ret = 0;
 	}
 	return ret;
@@ -492,6 +495,7 @@
 		__kthread_unpark(k, kthread);
 		wake_up_process(k);
 		wait_for_completion(&kthread->exited);
+		put_task_stack(k);
 	}
 	ret = k->exit_code;
 	put_task_struct(k);
@@ -604,6 +608,19 @@
 }
 EXPORT_SYMBOL_GPL(kthread_worker_fn);
 
+/*
+ * Returns true when the work could not be queued at the moment.
+ * It happens when it is already pending in a worker list
+ * or when it is being cancelled.
+ */
+static inline bool queuing_blocked(struct kthread_worker *worker,
+				   struct kthread_work *work)
+{
+	lockdep_assert_held(&worker->lock);
+
+	return !list_empty(&work->node) || work->canceling;
+}
+
 /* insert @work before @pos in @worker */
 static void insert_kthread_work(struct kthread_worker *worker,
 			       struct kthread_work *work,
@@ -633,7 +650,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&worker->lock, flags);
-	if (list_empty(&work->node)) {
+	if (!queuing_blocked(worker, work)) {
 		insert_kthread_work(worker, work, &worker->work_list);
 		ret = true;
 	}
@@ -694,6 +711,87 @@
 }
 EXPORT_SYMBOL_GPL(flush_kthread_work);
 
+/*
+ * This function removes the work from the worker queue. Also it makes sure
+ * that it won't get queued later via the delayed work's timer.
+ *
+ * The work might still be in use when this function finishes. See the
+ * current_work proceed by the worker.
+ *
+ * Return: %true if @work was pending and successfully canceled,
+ *	%false if @work was not pending
+ */
+static bool __kthread_cancel_work(struct kthread_work *work,
+				  unsigned long *flags)
+{
+	/*
+	 * Try to remove the work from a worker list. It might either
+	 * be from worker->work_list or from worker->delayed_work_list.
+	 */
+	if (!list_empty(&work->node)) {
+		list_del_init(&work->node);
+		return true;
+	}
+
+	return false;
+}
+
+static bool __kthread_cancel_work_sync(struct kthread_work *work)
+{
+	struct kthread_worker *worker = work->worker;
+	unsigned long flags;
+	int ret = false;
+
+	if (!worker)
+		goto out;
+
+	spin_lock_irqsave(&worker->lock, flags);
+	/* Work must not be used with >1 worker, see kthread_queue_work(). */
+	WARN_ON_ONCE(work->worker != worker);
+
+	ret = __kthread_cancel_work(work, &flags);
+
+	if (worker->current_work != work)
+		goto out_fast;
+
+	/*
+	 * The work is in progress and we need to wait with the lock released.
+	 * In the meantime, block any queuing by setting the canceling counter.
+	 */
+	work->canceling++;
+	spin_unlock_irqrestore(&worker->lock, flags);
+	flush_kthread_work(work);
+	spin_lock_irqsave(&worker->lock, flags);
+	work->canceling--;
+
+out_fast:
+	spin_unlock_irqrestore(&worker->lock, flags);
+out:
+	return ret;
+}
+
+/**
+ * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
+ * @work: the kthread work to cancel
+ *
+ * Cancel @work and wait for its execution to finish.  This function
+ * can be used even if the work re-queues itself. On return from this
+ * function, @work is guaranteed to be not pending or executing on any CPU.
+ *
+ * kthread_cancel_work_sync(&delayed_work->work) must not be used for
+ * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
+ *
+ * The caller must ensure that the worker on which @work was last
+ * queued can't be destroyed before this function returns.
+ *
+ * Return: %true if @work was pending, %false otherwise.
+ */
+bool kthread_cancel_work_sync(struct kthread_work *work)
+{
+	return __kthread_cancel_work_sync(work);
+}
+EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
+
 /**
  * flush_kthread_worker - flush all current works on a kthread_worker
  * @worker: worker to flush
diff -ruw linux-4.4.115/kernel/locking/Makefile linux-4.4.115-fbx/kernel/locking/Makefile
--- linux-4.4.115/kernel/locking/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/locking/Makefile	2019-01-22 16:16:28.667293170 +0100
@@ -1,3 +1,6 @@
+# Any varying coverage in these files is non-deterministic
+# and is generally not a function of system call inputs.
+KCOV_INSTRUMENT		:= n
 
 obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o
 
diff -ruw linux-4.4.115/kernel/locking/mutex.c linux-4.4.115-fbx/kernel/locking/mutex.c
--- linux-4.4.115/kernel/locking/mutex.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/locking/mutex.c	2019-01-22 16:16:28.671293206 +0100
@@ -26,6 +26,7 @@
 #include <linux/interrupt.h>
 #include <linux/debug_locks.h>
 #include <linux/osq_lock.h>
+#include <linux/delay.h>
 
 /*
  * In the DEBUG case we are using the "NULL fastpath" for mutexes,
@@ -378,6 +379,17 @@
 		 * values at the cost of a few extra spins.
 		 */
 		cpu_relax_lowlatency();
+
+		/*
+		 * On arm systems, we must slow down the waiter's repeated
+		 * aquisition of spin_mlock and atomics on the lock count, or
+		 * we risk starving out a thread attempting to release the
+		 * mutex. The mutex slowpath release must take spin lock
+		 * wait_lock. This spin lock can share a monitor with the
+		 * other waiter atomics in the mutex data structure, so must
+		 * take care to rate limit the waiters.
+		 */
+		udelay(1);
 	}
 
 	osq_unlock(&lock->osq);
@@ -537,7 +549,7 @@
 		goto skip_wait;
 
 	debug_mutex_lock_common(lock, &waiter);
-	debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
+	debug_mutex_add_waiter(lock, &waiter, task);
 
 	/* add waiting tasks to the end of the waitqueue (FIFO): */
 	list_add_tail(&waiter.list, &lock->wait_list);
@@ -584,7 +596,7 @@
 	}
 	__set_task_state(task, TASK_RUNNING);
 
-	mutex_remove_waiter(lock, &waiter, current_thread_info());
+	mutex_remove_waiter(lock, &waiter, task);
 	/* set it to 0 if there are no waiters left: */
 	if (likely(list_empty(&lock->wait_list)))
 		atomic_set(&lock->count, 0);
@@ -605,7 +617,7 @@
 	return 0;
 
 err:
-	mutex_remove_waiter(lock, &waiter, task_thread_info(task));
+	mutex_remove_waiter(lock, &waiter, task);
 	spin_unlock_mutex(&lock->wait_lock, flags);
 	debug_mutex_free_waiter(&waiter);
 	mutex_release(&lock->dep_map, 1, ip);
diff -ruw linux-4.4.115/kernel/locking/mutex.h linux-4.4.115-fbx/kernel/locking/mutex.h
--- linux-4.4.115/kernel/locking/mutex.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/locking/mutex.h	2019-01-22 16:16:28.671293206 +0100
@@ -13,7 +13,7 @@
 		do { spin_lock(lock); (void)(flags); } while (0)
 #define spin_unlock_mutex(lock, flags) \
 		do { spin_unlock(lock); (void)(flags); } while (0)
-#define mutex_remove_waiter(lock, waiter, ti) \
+#define mutex_remove_waiter(lock, waiter, task) \
 		__list_del((waiter)->list.prev, (waiter)->list.next)
 
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
diff -ruw linux-4.4.115/kernel/locking/osq_lock.c linux-4.4.115-fbx/kernel/locking/osq_lock.c
--- linux-4.4.115/kernel/locking/osq_lock.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/locking/osq_lock.c	2019-01-22 16:16:28.671293206 +0100
@@ -1,6 +1,7 @@
 #include <linux/percpu.h>
 #include <linux/sched.h>
 #include <linux/osq_lock.h>
+#include <linux/sched/rt.h>
 
 /*
  * An MCS like lock especially tailored for optimistic spinning for sleeping
@@ -85,6 +86,7 @@
 {
 	struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
 	struct optimistic_spin_node *prev, *next;
+	struct task_struct *task = current;
 	int curr = encode_cpu(smp_processor_id());
 	int old;
 
@@ -104,6 +106,19 @@
 
 	prev = decode_cpu(old);
 	node->prev = prev;
+
+	/*
+	 * osq_lock()			unqueue
+	 *
+	 * node->prev = prev		osq_wait_next()
+	 * WMB				MB
+	 * prev->next = node		next->prev = prev // unqueue-C
+	 *
+	 * Here 'node->prev' and 'next->prev' are the same variable and we need
+	 * to ensure these stores happen in-order to avoid corrupting the list.
+	 */
+	smp_wmb();
+
 	WRITE_ONCE(prev->next, node);
 
 	/*
@@ -118,8 +133,13 @@
 	while (!READ_ONCE(node->locked)) {
 		/*
 		 * If we need to reschedule bail... so we can block.
+		 * If a task spins on owner on a CPU after acquiring
+		 * osq_lock while a RT task spins on another CPU  to
+		 * acquire osq_lock, it will starve the owner from
+		 * completing if owner is to be scheduled on the same CPU.
+		 * It will be a live lock.
 		 */
-		if (need_resched())
+		if (need_resched() || rt_task(task))
 			goto unqueue;
 
 		cpu_relax_lowlatency();
diff -ruw linux-4.4.115/kernel/locking/percpu-rwsem.c linux-4.4.115-fbx/kernel/locking/percpu-rwsem.c
--- linux-4.4.115/kernel/locking/percpu-rwsem.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/locking/percpu-rwsem.c	2019-01-22 16:16:28.671293206 +0100
@@ -8,151 +8,186 @@
 #include <linux/sched.h>
 #include <linux/errno.h>
 
-int __percpu_init_rwsem(struct percpu_rw_semaphore *brw,
+int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
 			const char *name, struct lock_class_key *rwsem_key)
 {
-	brw->fast_read_ctr = alloc_percpu(int);
-	if (unlikely(!brw->fast_read_ctr))
+	sem->read_count = alloc_percpu(int);
+	if (unlikely(!sem->read_count))
 		return -ENOMEM;
 
 	/* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
-	__init_rwsem(&brw->rw_sem, name, rwsem_key);
-	rcu_sync_init(&brw->rss, RCU_SCHED_SYNC);
-	atomic_set(&brw->slow_read_ctr, 0);
-	init_waitqueue_head(&brw->write_waitq);
+	rcu_sync_init(&sem->rss, RCU_SCHED_SYNC);
+	__init_rwsem(&sem->rw_sem, name, rwsem_key);
+	init_waitqueue_head(&sem->writer);
+	sem->readers_block = 0;
 	return 0;
 }
 EXPORT_SYMBOL_GPL(__percpu_init_rwsem);
 
-void percpu_free_rwsem(struct percpu_rw_semaphore *brw)
+void percpu_free_rwsem(struct percpu_rw_semaphore *sem)
 {
 	/*
 	 * XXX: temporary kludge. The error path in alloc_super()
 	 * assumes that percpu_free_rwsem() is safe after kzalloc().
 	 */
-	if (!brw->fast_read_ctr)
+	if (!sem->read_count)
 		return;
 
-	rcu_sync_dtor(&brw->rss);
-	free_percpu(brw->fast_read_ctr);
-	brw->fast_read_ctr = NULL; /* catch use after free bugs */
+	rcu_sync_dtor(&sem->rss);
+	free_percpu(sem->read_count);
+	sem->read_count = NULL; /* catch use after free bugs */
 }
+EXPORT_SYMBOL_GPL(percpu_free_rwsem);
 
+int __percpu_down_read(struct percpu_rw_semaphore *sem, int try)
+{
 /*
- * This is the fast-path for down_read/up_read. If it succeeds we rely
- * on the barriers provided by rcu_sync_enter/exit; see the comments in
- * percpu_down_write() and percpu_up_write().
+	 * Due to having preemption disabled the decrement happens on
+	 * the same CPU as the increment, avoiding the
+	 * increment-on-one-CPU-and-decrement-on-another problem.
+	 *
+	 * If the reader misses the writer's assignment of readers_block, then
+	 * the writer is guaranteed to see the reader's increment.
  *
- * If this helper fails the callers rely on the normal rw_semaphore and
- * atomic_dec_and_test(), so in this case we have the necessary barriers.
+	 * Conversely, any readers that increment their sem->read_count after
+	 * the writer looks are guaranteed to see the readers_block value,
+	 * which in turn means that they are guaranteed to immediately
+	 * decrement their sem->read_count, so that it doesn't matter that the
+	 * writer missed them.
  */
-static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val)
-{
-	bool success;
 
-	preempt_disable();
-	success = rcu_sync_is_idle(&brw->rss);
-	if (likely(success))
-		__this_cpu_add(*brw->fast_read_ctr, val);
-	preempt_enable();
+	smp_mb(); /* A matches D */
 
-	return success;
-}
+	/*
+	 * If !readers_block the critical section starts here, matched by the
+	 * release in percpu_up_write().
+	 */
+	if (likely(!smp_load_acquire(&sem->readers_block)))
+		return 1;
 
 /*
- * Like the normal down_read() this is not recursive, the writer can
- * come after the first percpu_down_read() and create the deadlock.
- *
- * Note: returns with lock_is_held(brw->rw_sem) == T for lockdep,
- * percpu_up_read() does rwsem_release(). This pairs with the usage
- * of ->rw_sem in percpu_down/up_write().
+	 * Per the above comment; we still have preemption disabled and
+	 * will thus decrement on the same CPU as we incremented.
  */
-void percpu_down_read(struct percpu_rw_semaphore *brw)
-{
-	might_sleep();
-	rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_);
+	__percpu_up_read(sem);
 
-	if (likely(update_fast_ctr(brw, +1)))
-		return;
+	if (try)
+		return 0;
 
-	/* Avoid rwsem_acquire_read() and rwsem_release() */
-	__down_read(&brw->rw_sem);
-	atomic_inc(&brw->slow_read_ctr);
-	__up_read(&brw->rw_sem);
-}
-EXPORT_SYMBOL_GPL(percpu_down_read);
+	/*
+	 * We either call schedule() in the wait, or we'll fall through
+	 * and reschedule on the preempt_enable() in percpu_down_read().
+	 */
+	preempt_enable_no_resched();
 
-int percpu_down_read_trylock(struct percpu_rw_semaphore *brw)
-{
-	if (unlikely(!update_fast_ctr(brw, +1))) {
-		if (!__down_read_trylock(&brw->rw_sem))
-			return 0;
-		atomic_inc(&brw->slow_read_ctr);
-		__up_read(&brw->rw_sem);
-	}
+	/*
+	 * Avoid lockdep for the down/up_read() we already have them.
+	 */
+	__down_read(&sem->rw_sem);
+	this_cpu_inc(*sem->read_count);
+	__up_read(&sem->rw_sem);
 
-	rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 1, _RET_IP_);
+	preempt_disable();
 	return 1;
 }
+EXPORT_SYMBOL_GPL(__percpu_down_read);
 
-void percpu_up_read(struct percpu_rw_semaphore *brw)
+void __percpu_up_read(struct percpu_rw_semaphore *sem)
 {
-	rwsem_release(&brw->rw_sem.dep_map, 1, _RET_IP_);
-
-	if (likely(update_fast_ctr(brw, -1)))
-		return;
+	smp_mb(); /* B matches C */
+	/*
+	 * In other words, if they see our decrement (presumably to aggregate
+	 * zero, as that is the only time it matters) they will also see our
+	 * critical section.
+	 */
+	__this_cpu_dec(*sem->read_count);
 
-	/* false-positive is possible but harmless */
-	if (atomic_dec_and_test(&brw->slow_read_ctr))
-		wake_up_all(&brw->write_waitq);
+	/* Prod writer to recheck readers_active */
+	wake_up(&sem->writer);
 }
-EXPORT_SYMBOL_GPL(percpu_up_read);
+EXPORT_SYMBOL_GPL(__percpu_up_read);
+
+#define per_cpu_sum(var)						\
+({									\
+	typeof(var) __sum = 0;						\
+	int cpu;							\
+	compiletime_assert_atomic_type(__sum);				\
+	for_each_possible_cpu(cpu)					\
+		__sum += per_cpu(var, cpu);				\
+	__sum;								\
+})
 
-static int clear_fast_ctr(struct percpu_rw_semaphore *brw)
+/*
+ * Return true if the modular sum of the sem->read_count per-CPU variable is
+ * zero.  If this sum is zero, then it is stable due to the fact that if any
+ * newly arriving readers increment a given counter, they will immediately
+ * decrement that same counter.
+ */
+static bool readers_active_check(struct percpu_rw_semaphore *sem)
 {
-	unsigned int sum = 0;
-	int cpu;
+	if (per_cpu_sum(*sem->read_count) != 0)
+		return false;
 
-	for_each_possible_cpu(cpu) {
-		sum += per_cpu(*brw->fast_read_ctr, cpu);
-		per_cpu(*brw->fast_read_ctr, cpu) = 0;
-	}
+	/*
+	 * If we observed the decrement; ensure we see the entire critical
+	 * section.
+	 */
+
+	smp_mb(); /* C matches B */
 
-	return sum;
+	return true;
 }
 
-void percpu_down_write(struct percpu_rw_semaphore *brw)
+void percpu_down_write(struct percpu_rw_semaphore *sem)
 {
+	/* Notify readers to take the slow path. */
+	rcu_sync_enter(&sem->rss);
+
+	down_write(&sem->rw_sem);
+
 	/*
-	 * Make rcu_sync_is_idle() == F and thus disable the fast-path in
-	 * percpu_down_read() and percpu_up_read(), and wait for gp pass.
-	 *
-	 * The latter synchronises us with the preceding readers which used
-	 * the fast-past, so we can not miss the result of __this_cpu_add()
-	 * or anything else inside their criticial sections.
+	 * Notify new readers to block; up until now, and thus throughout the
+	 * longish rcu_sync_enter() above, new readers could still come in.
 	 */
-	rcu_sync_enter(&brw->rss);
+	WRITE_ONCE(sem->readers_block, 1);
 
-	/* exclude other writers, and block the new readers completely */
-	down_write(&brw->rw_sem);
+	smp_mb(); /* D matches A */
 
-	/* nobody can use fast_read_ctr, move its sum into slow_read_ctr */
-	atomic_add(clear_fast_ctr(brw), &brw->slow_read_ctr);
+	/*
+	 * If they don't see our writer of readers_block, then we are
+	 * guaranteed to see their sem->read_count increment, and therefore
+	 * will wait for them.
+	 */
 
-	/* wait for all readers to complete their percpu_up_read() */
-	wait_event(brw->write_waitq, !atomic_read(&brw->slow_read_ctr));
+	/* Wait for all now active readers to complete. */
+	wait_event(sem->writer, readers_active_check(sem));
 }
 EXPORT_SYMBOL_GPL(percpu_down_write);
 
-void percpu_up_write(struct percpu_rw_semaphore *brw)
+void percpu_up_write(struct percpu_rw_semaphore *sem)
 {
-	/* release the lock, but the readers can't use the fast-path */
-	up_write(&brw->rw_sem);
 	/*
-	 * Enable the fast-path in percpu_down_read() and percpu_up_read()
-	 * but only after another gp pass; this adds the necessary barrier
-	 * to ensure the reader can't miss the changes done by us.
+	 * Signal the writer is done, no fast path yet.
+	 *
+	 * One reason that we cannot just immediately flip to readers_fast is
+	 * that new readers might fail to see the results of this writer's
+	 * critical section.
+	 *
+	 * Therefore we force it through the slow path which guarantees an
+	 * acquire and thereby guarantees the critical section's consistency.
+	 */
+	smp_store_release(&sem->readers_block, 0);
+
+	/*
+	 * Release the write lock, this will allow readers back in the game.
+	 */
+	up_write(&sem->rw_sem);
+
+	/*
+	 * Once this completes (at least one RCU-sched grace period hence) the
+	 * reader fast path will be available again. Safe to use outside the
+	 * exclusive write lock because its counting.
 	 */
-	rcu_sync_exit(&brw->rss);
+	rcu_sync_exit(&sem->rss);
 }
 EXPORT_SYMBOL_GPL(percpu_up_write);
diff -ruw linux-4.4.115/kernel/locking/rwsem-xadd.c linux-4.4.115-fbx/kernel/locking/rwsem-xadd.c
--- linux-4.4.115/kernel/locking/rwsem-xadd.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/locking/rwsem-xadd.c	2019-10-29 09:26:25.601222339 +0100
@@ -511,6 +511,41 @@
 	unsigned long flags;
 
 	/*
+	 * If a spinner is present, there is a chance that the load of
+	 * rwsem_has_spinner() in rwsem_wake() can be reordered with
+	 * respect to decrement of rwsem count in __up_write() leading
+	 * to wakeup being missed.
+	 *
+	 * spinning writer                  up_write caller
+	 * ---------------                  -----------------------
+	 * [S] osq_unlock()                 [L] osq
+	 *  spin_lock(wait_lock)
+	 *  sem->count=0xFFFFFFFF00000001
+	 *            +0xFFFFFFFF00000000
+	 *  count=sem->count
+	 *  MB
+	 *                                   sem->count=0xFFFFFFFE00000001
+	 *                                             -0xFFFFFFFF00000001
+	 *                                   RMB
+	 *                                   spin_trylock(wait_lock)
+	 *                                   return
+	 * rwsem_try_write_lock(count)
+	 * spin_unlock(wait_lock)
+	 * schedule()
+	 *
+	 * Reordering of atomic_long_sub_return_release() in __up_write()
+	 * and rwsem_has_spinner() in rwsem_wake() can cause missing of
+	 * wakeup in up_write() context. In spinning writer, sem->count
+	 * and local variable count is 0XFFFFFFFE00000001. It would result
+	 * in rwsem_try_write_lock() failing to acquire rwsem and spinning
+	 * writer going to sleep in rwsem_down_write_failed().
+	 *
+	 * The smp_rmb() here is to make sure that the spinner state is
+	 * consulted after sem->count is updated in up_write context.
+	 */
+	smp_rmb();
+
+	/*
 	 * If a spinner is present, it is not necessary to do the wakeup.
 	 * Try to do wakeup only if the trylock succeeds to minimize
 	 * spinlock contention which may introduce too much delay in the
diff -ruw linux-4.4.115/kernel/Makefile linux-4.4.115-fbx/kernel/Makefile
--- linux-4.4.115/kernel/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/Makefile	2019-01-22 16:16:28.627292807 +0100
@@ -19,6 +19,17 @@
 CFLAGS_REMOVE_irq_work.o = $(CC_FLAGS_FTRACE)
 endif
 
+# Prevents flicker of uninteresting __do_softirq()/__local_bh_disable_ip()
+# in coverage traces.
+KCOV_INSTRUMENT_softirq.o := n
+# These are called from save_stack_trace() on slub debug path,
+# and produce insane amounts of uninteresting coverage.
+KCOV_INSTRUMENT_module.o := n
+KCOV_INSTRUMENT_extable.o := n
+# Don't self-instrument.
+KCOV_INSTRUMENT_kcov.o := n
+KASAN_SANITIZE_kcov.o := n
+
 # cond_syscall is currently not LTO compatible
 CFLAGS_sys_ni.o = $(DISABLE_LTO)
 
@@ -69,6 +80,7 @@
 obj-$(CONFIG_AUDIT_WATCH) += audit_watch.o audit_fsnotify.o
 obj-$(CONFIG_AUDIT_TREE) += audit_tree.o
 obj-$(CONFIG_GCOV_KERNEL) += gcov/
+obj-$(CONFIG_KCOV) += kcov.o
 obj-$(CONFIG_KPROBES) += kprobes.o
 obj-$(CONFIG_KGDB) += debug/
 obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o
diff -ruw linux-4.4.115/kernel/module.c linux-4.4.115-fbx/kernel/module.c
--- linux-4.4.115/kernel/module.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/module.c	2019-10-29 09:26:25.601222339 +0100
@@ -2585,7 +2585,13 @@
 	return vmalloc_exec(size);
 }
 
-#ifdef CONFIG_DEBUG_KMEMLEAK
+#if defined(CONFIG_DEBUG_KMEMLEAK) && defined(CONFIG_DEBUG_MODULE_SCAN_OFF)
+static void kmemleak_load_module(const struct module *mod,
+				 const struct load_info *info)
+{
+	kmemleak_no_scan(mod->module_core);
+}
+#elif defined(CONFIG_DEBUG_KMEMLEAK)
 static void kmemleak_load_module(const struct module *mod,
 				 const struct load_info *info)
 {
diff -ruw linux-4.4.115/kernel/panic.c linux-4.4.115-fbx/kernel/panic.c
--- linux-4.4.115/kernel/panic.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/panic.c	2019-01-22 16:16:28.675293242 +0100
@@ -24,6 +24,10 @@
 #include <linux/init.h>
 #include <linux/nmi.h>
 #include <linux/console.h>
+#include <soc/qcom/minidump.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/exception.h>
 
 #define PANIC_TIMER_STEP 100
 #define PANIC_BLINK_SPD 18
@@ -77,6 +81,8 @@
 	long i, i_next = 0;
 	int state = 0;
 
+	trace_kernel_panic(0);
+
 	/*
 	 * Disable local interrupts. This will prevent panic_smp_self_stop
 	 * from deadlocking the first cpu that invokes the panic, since
@@ -103,6 +109,7 @@
 	va_start(args, fmt);
 	vsnprintf(buf, sizeof(buf), fmt, args);
 	va_end(args);
+	dump_stack_minidump(0);
 	pr_emerg("Kernel panic - not syncing: %s\n", buf);
 #ifdef CONFIG_DEBUG_BUGVERBOSE
 	/*
@@ -178,6 +185,9 @@
 			mdelay(PANIC_TIMER_STEP);
 		}
 	}
+
+	trace_kernel_panic_late(0);
+
 	if (panic_timeout != 0) {
 		/*
 		 * This will not be a clean reboot, with everything
diff -ruw linux-4.4.115/kernel/power/Kconfig linux-4.4.115-fbx/kernel/power/Kconfig
--- linux-4.4.115/kernel/power/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/power/Kconfig	2019-10-29 09:26:25.605222378 +0100
@@ -1,6 +1,7 @@
 config SUSPEND
 	bool "Suspend to RAM and standby"
 	depends on ARCH_SUSPEND_POSSIBLE
+	select RTC_LIB
 	default y
 	---help---
 	  Allow the system to enter sleep states in which main memory is
@@ -28,6 +29,15 @@
 	  of suspend, or they are content with invoking sync() from
 	  user-space before invoking suspend.  Say Y if that's your case.
 
+config WAKELOCK
+	bool "Android's method of preventing suspend"
+	default y
+	---help---
+	  This allows applications to prevent the CPU from suspending while
+	  they need it.
+
+	  Say Y if you are running an android userspace.
+
 config HIBERNATE_CALLBACKS
 	bool
 
diff -ruw linux-4.4.115/kernel/power/main.c linux-4.4.115-fbx/kernel/power/main.c
--- linux-4.4.115/kernel/power/main.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/power/main.c	2019-01-22 16:16:28.679293278 +0100
@@ -38,12 +38,19 @@
 }
 EXPORT_SYMBOL_GPL(unregister_pm_notifier);
 
-int pm_notifier_call_chain(unsigned long val)
+int __pm_notifier_call_chain(unsigned long val, int nr_to_call, int *nr_calls)
 {
-	int ret = blocking_notifier_call_chain(&pm_chain_head, val, NULL);
+	int ret;
+
+	ret = __blocking_notifier_call_chain(&pm_chain_head, val, NULL,
+						nr_to_call, nr_calls);
 
 	return notifier_to_errno(ret);
 }
+int pm_notifier_call_chain(unsigned long val)
+{
+	return __pm_notifier_call_chain(val, -1, NULL);
+}
 
 /* If set, devices may be suspended and resumed asynchronously. */
 int pm_async_enabled = 1;
@@ -280,13 +287,7 @@
 	return pm_wakeup_irq ? sprintf(buf, "%u\n", pm_wakeup_irq) : -ENODATA;
 }
 
-static ssize_t pm_wakeup_irq_store(struct kobject *kobj,
-					struct kobj_attribute *attr,
-					const char *buf, size_t n)
-{
-	return -EINVAL;
-}
-power_attr(pm_wakeup_irq);
+power_attr_ro(pm_wakeup_irq);
 
 #else /* !CONFIG_PM_SLEEP_DEBUG */
 static inline void pm_print_times_init(void) {}
@@ -564,14 +565,7 @@
 	return show_trace_dev_match(buf, PAGE_SIZE);
 }
 
-static ssize_t
-pm_trace_dev_match_store(struct kobject *kobj, struct kobj_attribute *attr,
-			 const char *buf, size_t n)
-{
-	return -EINVAL;
-}
-
-power_attr(pm_trace_dev_match);
+power_attr_ro(pm_trace_dev_match);
 
 #endif /* CONFIG_PM_TRACE */
 
diff -ruw linux-4.4.115/kernel/power/Makefile linux-4.4.115-fbx/kernel/power/Makefile
--- linux-4.4.115/kernel/power/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/power/Makefile	2019-01-22 16:16:28.675293242 +0100
@@ -12,3 +12,5 @@
 obj-$(CONFIG_PM_WAKELOCKS)	+= wakelock.o
 
 obj-$(CONFIG_MAGIC_SYSRQ)	+= poweroff.o
+
+obj-$(CONFIG_SUSPEND)	+= wakeup_reason.o
diff -ruw linux-4.4.115/kernel/power/power.h linux-4.4.115-fbx/kernel/power/power.h
--- linux-4.4.115/kernel/power/power.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/power/power.h	2019-10-29 09:26:25.605222378 +0100
@@ -77,6 +77,15 @@
 	.store	= _name##_store,		\
 }
 
+#define power_attr_ro(_name) \
+static struct kobj_attribute _name##_attr = {	\
+	.attr	= {				\
+		.name = __stringify(_name),	\
+		.mode = S_IRUGO,		\
+	},					\
+	.show	= _name##_show,			\
+}
+
 /* Preferred image size in bytes (default 500 MB) */
 extern unsigned long image_size;
 /* Size of memory reserved for drivers (default SPARE_PAGES x PAGE_SIZE) */
@@ -191,6 +200,8 @@
 
 #ifdef CONFIG_PM_SLEEP
 /* kernel/power/main.c */
+extern int __pm_notifier_call_chain(unsigned long val, int nr_to_call,
+				    int *nr_calls);
 extern int pm_notifier_call_chain(unsigned long val);
 #endif
 
diff -ruw linux-4.4.115/kernel/power/process.c linux-4.4.115-fbx/kernel/power/process.c
--- linux-4.4.115/kernel/power/process.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/power/process.c	2019-01-22 16:16:28.679293278 +0100
@@ -18,6 +18,7 @@
 #include <linux/workqueue.h>
 #include <linux/kmod.h>
 #include <trace/events/power.h>
+#include <linux/wakeup_reason.h>
 #include <linux/cpuset.h>
 
 /*
@@ -36,6 +37,9 @@
 	unsigned int elapsed_msecs;
 	bool wakeup = false;
 	int sleep_usecs = USEC_PER_MSEC;
+#ifdef CONFIG_PM_SLEEP
+	char suspend_abort[MAX_SUSPEND_ABORT_LEN];
+#endif
 
 	do_gettimeofday(&start);
 
@@ -65,6 +69,11 @@
 			break;
 
 		if (pm_wakeup_pending()) {
+#ifdef CONFIG_PM_SLEEP
+			pm_get_active_wakeup_sources(suspend_abort,
+				MAX_SUSPEND_ABORT_LEN);
+			log_suspend_abort_reason(suspend_abort);
+#endif
 			wakeup = true;
 			break;
 		}
@@ -84,15 +93,17 @@
 	do_div(elapsed_msecs64, NSEC_PER_MSEC);
 	elapsed_msecs = elapsed_msecs64;
 
-	if (todo) {
+	if (wakeup) {
 		pr_cont("\n");
-		pr_err("Freezing of tasks %s after %d.%03d seconds "
+		pr_err("Freezing of tasks aborted after %d.%03d seconds",
+		       elapsed_msecs / 1000, elapsed_msecs % 1000);
+	} else if (todo) {
+		pr_cont("\n");
+		pr_err("Freezing of tasks failed after %d.%03d seconds"
 		       "(%d tasks refusing to freeze, wq_busy=%d):\n",
-		       wakeup ? "aborted" : "failed",
 		       elapsed_msecs / 1000, elapsed_msecs % 1000,
 		       todo - wq_busy, wq_busy);
 
-		if (!wakeup) {
 			read_lock(&tasklist_lock);
 			for_each_process_thread(g, p) {
 				if (p != current && !freezer_should_skip(p)
@@ -100,7 +111,6 @@
 					sched_show_task(p);
 			}
 			read_unlock(&tasklist_lock);
-		}
 	} else {
 		pr_cont("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000,
 			elapsed_msecs % 1000);
diff -ruw linux-4.4.115/kernel/power/qos.c linux-4.4.115-fbx/kernel/power/qos.c
--- linux-4.4.115/kernel/power/qos.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/power/qos.c	2019-01-22 16:16:28.679293278 +0100
@@ -43,6 +43,9 @@
 #include <linux/kernel.h>
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
+#include <linux/cpumask.h>
 
 #include <linux/uaccess.h>
 #include <linux/export.h>
@@ -67,6 +70,8 @@
 static struct pm_qos_constraints cpu_dma_constraints = {
 	.list = PLIST_HEAD_INIT(cpu_dma_constraints.list),
 	.target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
+	.target_per_cpu = { [0 ... (NR_CPUS - 1)] =
+				PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE },
 	.default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
 	.no_constraint_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
 	.type = PM_QOS_MIN,
@@ -81,6 +86,8 @@
 static struct pm_qos_constraints network_lat_constraints = {
 	.list = PLIST_HEAD_INIT(network_lat_constraints.list),
 	.target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
+	.target_per_cpu = { [0 ... (NR_CPUS - 1)] =
+				PM_QOS_NETWORK_LAT_DEFAULT_VALUE },
 	.default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
 	.no_constraint_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
 	.type = PM_QOS_MIN,
@@ -91,11 +98,12 @@
 	.name = "network_latency",
 };
 
-
 static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
 static struct pm_qos_constraints network_tput_constraints = {
 	.list = PLIST_HEAD_INIT(network_tput_constraints.list),
 	.target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
+	.target_per_cpu = { [0 ... (NR_CPUS - 1)] =
+				PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE },
 	.default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
 	.no_constraint_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
 	.type = PM_QOS_MAX,
@@ -259,22 +267,60 @@
 	.release        = single_release,
 };
 
+static inline void pm_qos_set_value_for_cpus(struct pm_qos_constraints *c,
+		struct cpumask *cpus)
+{
+	struct pm_qos_request *req = NULL;
+	int cpu;
+	s32 qos_val[NR_CPUS] = { [0 ... (NR_CPUS - 1)] = c->default_value };
+
+	plist_for_each_entry(req, &c->list, node) {
+		for_each_cpu(cpu, &req->cpus_affine) {
+			switch (c->type) {
+			case PM_QOS_MIN:
+				if (qos_val[cpu] > req->node.prio)
+					qos_val[cpu] = req->node.prio;
+				break;
+			case PM_QOS_MAX:
+				if (req->node.prio > qos_val[cpu])
+					qos_val[cpu] = req->node.prio;
+				break;
+			case PM_QOS_SUM:
+				qos_val[cpu] += req->node.prio;
+				break;
+			default:
+				BUG();
+				break;
+			}
+		}
+	}
+
+	for_each_possible_cpu(cpu) {
+		if (c->target_per_cpu[cpu] != qos_val[cpu])
+			cpumask_set_cpu(cpu, cpus);
+		c->target_per_cpu[cpu] = qos_val[cpu];
+	}
+}
+
 /**
  * pm_qos_update_target - manages the constraints list and calls the notifiers
  *  if needed
  * @c: constraints data struct
- * @node: request to add to the list, to update or to remove
+ * @req: request to add to the list, to update or to remove
  * @action: action to take on the constraints list
  * @value: value of the request to add or update
  *
  * This function returns 1 if the aggregated constraint value has changed, 0
  *  otherwise.
  */
-int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
+int pm_qos_update_target(struct pm_qos_constraints *c,
+				struct pm_qos_request *req,
 			 enum pm_qos_req_action action, int value)
 {
 	unsigned long flags;
 	int prev_value, curr_value, new_value;
+	struct plist_node *node = &req->node;
+	struct cpumask cpus;
 	int ret;
 
 	spin_lock_irqsave(&pm_qos_lock, flags);
@@ -305,17 +351,23 @@
 	}
 
 	curr_value = pm_qos_get_value(c);
+	cpumask_clear(&cpus);
 	pm_qos_set_value(c, curr_value);
+	pm_qos_set_value_for_cpus(c, &cpus);
 
 	spin_unlock_irqrestore(&pm_qos_lock, flags);
 
 	trace_pm_qos_update_target(action, prev_value, curr_value);
-	if (prev_value != curr_value) {
+	/*
+	 * if cpu mask bits are set, call the notifier call chain
+	 * to update the new qos restriction for the cores
+	 */
+	if (!cpumask_empty(&cpus)) {
 		ret = 1;
 		if (c->notifiers)
 			blocking_notifier_call_chain(c->notifiers,
 						     (unsigned long)curr_value,
-						     NULL);
+						     &cpus);
 	} else {
 		ret = 0;
 	}
@@ -398,12 +450,56 @@
 }
 EXPORT_SYMBOL_GPL(pm_qos_request);
 
+int pm_qos_request_for_cpu(int pm_qos_class, int cpu)
+{
+	if (cpu_isolated(cpu))
+		return INT_MAX;
+
+	return pm_qos_array[pm_qos_class]->constraints->target_per_cpu[cpu];
+}
+EXPORT_SYMBOL(pm_qos_request_for_cpu);
+
 int pm_qos_request_active(struct pm_qos_request *req)
 {
 	return req->pm_qos_class != 0;
 }
 EXPORT_SYMBOL_GPL(pm_qos_request_active);
 
+int pm_qos_request_for_cpumask(int pm_qos_class, struct cpumask *mask)
+{
+	unsigned long irqflags;
+	int cpu;
+	struct pm_qos_constraints *c = NULL;
+	int val;
+
+	spin_lock_irqsave(&pm_qos_lock, irqflags);
+	c = pm_qos_array[pm_qos_class]->constraints;
+	val = c->default_value;
+
+	for_each_cpu(cpu, mask) {
+		if (cpu_isolated(cpu))
+			continue;
+
+		switch (c->type) {
+		case PM_QOS_MIN:
+			if (c->target_per_cpu[cpu] < val)
+				val = c->target_per_cpu[cpu];
+			break;
+		case PM_QOS_MAX:
+			if (c->target_per_cpu[cpu] > val)
+				val = c->target_per_cpu[cpu];
+			break;
+		default:
+			BUG();
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&pm_qos_lock, irqflags);
+
+	return val;
+}
+EXPORT_SYMBOL(pm_qos_request_for_cpumask);
+
 static void __pm_qos_update_request(struct pm_qos_request *req,
 			   s32 new_value)
 {
@@ -412,7 +508,7 @@
 	if (new_value != req->node.prio)
 		pm_qos_update_target(
 			pm_qos_array[req->pm_qos_class]->constraints,
-			&req->node, PM_QOS_UPDATE_REQ, new_value);
+			req, PM_QOS_UPDATE_REQ, new_value);
 }
 
 /**
@@ -430,6 +526,41 @@
 	__pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
 }
 
+#ifdef CONFIG_SMP
+static void pm_qos_irq_release(struct kref *ref)
+{
+	unsigned long flags;
+	struct irq_affinity_notify *notify = container_of(ref,
+					struct irq_affinity_notify, kref);
+	struct pm_qos_request *req = container_of(notify,
+					struct pm_qos_request, irq_notify);
+	struct pm_qos_constraints *c =
+				pm_qos_array[req->pm_qos_class]->constraints;
+
+	spin_lock_irqsave(&pm_qos_lock, flags);
+	cpumask_setall(&req->cpus_affine);
+	spin_unlock_irqrestore(&pm_qos_lock, flags);
+
+	pm_qos_update_target(c, req, PM_QOS_UPDATE_REQ, c->default_value);
+}
+
+static void pm_qos_irq_notify(struct irq_affinity_notify *notify,
+		const cpumask_t *mask)
+{
+	unsigned long flags;
+	struct pm_qos_request *req = container_of(notify,
+					struct pm_qos_request, irq_notify);
+	struct pm_qos_constraints *c =
+				pm_qos_array[req->pm_qos_class]->constraints;
+
+	spin_lock_irqsave(&pm_qos_lock, flags);
+	cpumask_copy(&req->cpus_affine, mask);
+	spin_unlock_irqrestore(&pm_qos_lock, flags);
+
+	pm_qos_update_target(c, req, PM_QOS_UPDATE_REQ, req->node.prio);
+}
+#endif
+
 /**
  * pm_qos_add_request - inserts new qos request into the list
  * @req: pointer to a preallocated handle
@@ -453,11 +584,70 @@
 		WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n");
 		return;
 	}
+
+	switch (req->type) {
+	case PM_QOS_REQ_AFFINE_CORES:
+		if (cpumask_empty(&req->cpus_affine)) {
+			req->type = PM_QOS_REQ_ALL_CORES;
+			cpumask_setall(&req->cpus_affine);
+			WARN(1, KERN_ERR "Affine cores not set for request with affinity flag\n");
+		}
+		break;
+#ifdef CONFIG_SMP
+	case PM_QOS_REQ_AFFINE_IRQ:
+		if (irq_can_set_affinity(req->irq)) {
+			struct irq_desc *desc = irq_to_desc(req->irq);
+			struct cpumask *mask;
+
+			if (!desc)
+				return;
+			mask = desc->irq_data.common->affinity;
+
+			/* Get the current affinity */
+			cpumask_copy(&req->cpus_affine, mask);
+			req->irq_notify.irq = req->irq;
+			req->irq_notify.notify = pm_qos_irq_notify;
+			req->irq_notify.release = pm_qos_irq_release;
+
+		} else {
+			req->type = PM_QOS_REQ_ALL_CORES;
+			cpumask_setall(&req->cpus_affine);
+			WARN(1, KERN_ERR "IRQ-%d not set for request with affinity flag\n",
+					req->irq);
+		}
+		break;
+#endif
+	default:
+		WARN(1, KERN_ERR "Unknown request type %d\n", req->type);
+		/* fall through */
+	case PM_QOS_REQ_ALL_CORES:
+		cpumask_setall(&req->cpus_affine);
+		break;
+	}
+
 	req->pm_qos_class = pm_qos_class;
 	INIT_DELAYED_WORK(&req->work, pm_qos_work_fn);
 	trace_pm_qos_add_request(pm_qos_class, value);
 	pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints,
-			     &req->node, PM_QOS_ADD_REQ, value);
+			     req, PM_QOS_ADD_REQ, value);
+
+#ifdef CONFIG_SMP
+	if (req->type == PM_QOS_REQ_AFFINE_IRQ &&
+			irq_can_set_affinity(req->irq)) {
+		int ret = 0;
+
+		ret = irq_set_affinity_notifier(req->irq,
+					&req->irq_notify);
+		if (ret) {
+			WARN(1, "IRQ affinity notify set failed\n");
+			req->type = PM_QOS_REQ_ALL_CORES;
+			cpumask_setall(&req->cpus_affine);
+			pm_qos_update_target(
+				pm_qos_array[pm_qos_class]->constraints,
+				req, PM_QOS_UPDATE_REQ, value);
+		}
+	}
+#endif
 }
 EXPORT_SYMBOL_GPL(pm_qos_add_request);
 
@@ -511,7 +701,7 @@
 	if (new_value != req->node.prio)
 		pm_qos_update_target(
 			pm_qos_array[req->pm_qos_class]->constraints,
-			&req->node, PM_QOS_UPDATE_REQ, new_value);
+			req, PM_QOS_UPDATE_REQ, new_value);
 
 	schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us));
 }
@@ -531,15 +721,25 @@
 		/* silent return to keep pcm code cleaner */
 
 	if (!pm_qos_request_active(req)) {
-		WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n");
+		WARN(1, "pm_qos_remove_request() called for unknown object\n");
 		return;
 	}
 
 	cancel_delayed_work_sync(&req->work);
 
+#ifdef CONFIG_SMP
+	if (req->type == PM_QOS_REQ_AFFINE_IRQ) {
+		int ret = 0;
+		/* Get the current affinity */
+		ret = irq_set_affinity_notifier(req->irq, NULL);
+		if (ret)
+			WARN(1, "IRQ affinity notify set failed\n");
+	}
+#endif
+
 	trace_pm_qos_remove_request(req->pm_qos_class, PM_QOS_DEFAULT_VALUE);
 	pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
-			     &req->node, PM_QOS_REMOVE_REQ,
+			     req, PM_QOS_REMOVE_REQ,
 			     PM_QOS_DEFAULT_VALUE);
 	memset(req, 0, sizeof(*req));
 }
diff -ruw linux-4.4.115/kernel/power/suspend.c linux-4.4.115-fbx/kernel/power/suspend.c
--- linux-4.4.115/kernel/power/suspend.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/power/suspend.c	2019-01-22 16:16:28.679293278 +0100
@@ -26,9 +26,11 @@
 #include <linux/suspend.h>
 #include <linux/syscore_ops.h>
 #include <linux/ftrace.h>
+#include <linux/rtc.h>
 #include <trace/events/power.h>
 #include <linux/compiler.h>
 #include <linux/moduleparam.h>
+#include <linux/wakeup_reason.h>
 
 #include "power.h"
 
@@ -266,16 +268,18 @@
  */
 static int suspend_prepare(suspend_state_t state)
 {
-	int error;
+	int error, nr_calls = 0;
 
 	if (!sleep_state_supported(state))
 		return -EPERM;
 
 	pm_prepare_console();
 
-	error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
-	if (error)
+	error = __pm_notifier_call_chain(PM_SUSPEND_PREPARE, -1, &nr_calls);
+	if (error) {
+		nr_calls--;
 		goto Finish;
+	}
 
 	trace_suspend_resume(TPS("freeze_processes"), 0, true);
 	error = suspend_freeze_processes();
@@ -286,7 +290,7 @@
 	suspend_stats.failed_freeze++;
 	dpm_save_failed_step(SUSPEND_FREEZE);
  Finish:
-	pm_notifier_call_chain(PM_POST_SUSPEND);
+	__pm_notifier_call_chain(PM_POST_SUSPEND, nr_calls, NULL);
 	pm_restore_console();
 	return error;
 }
@@ -312,7 +316,8 @@
  */
 static int suspend_enter(suspend_state_t state, bool *wakeup)
 {
-	int error;
+	char suspend_abort[MAX_SUSPEND_ABORT_LEN];
+	int error, last_dev;
 
 	error = platform_suspend_prepare(state);
 	if (error)
@@ -320,7 +325,11 @@
 
 	error = dpm_suspend_late(PMSG_SUSPEND);
 	if (error) {
+		last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
+		last_dev %= REC_FAILED_NUM;
 		printk(KERN_ERR "PM: late suspend of devices failed\n");
+		log_suspend_abort_reason("%s device failed to power down",
+			suspend_stats.failed_devs[last_dev]);
 		goto Platform_finish;
 	}
 	error = platform_suspend_prepare_late(state);
@@ -329,7 +338,11 @@
 
 	error = dpm_suspend_noirq(PMSG_SUSPEND);
 	if (error) {
+		last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
+		last_dev %= REC_FAILED_NUM;
 		printk(KERN_ERR "PM: noirq suspend of devices failed\n");
+		log_suspend_abort_reason("noirq suspend of %s device failed",
+			suspend_stats.failed_devs[last_dev]);
 		goto Platform_early_resume;
 	}
 	error = platform_suspend_prepare_noirq(state);
@@ -353,8 +366,10 @@
 	}
 
 	error = disable_nonboot_cpus();
-	if (error || suspend_test(TEST_CPUS))
+	if (error || suspend_test(TEST_CPUS)) {
+		log_suspend_abort_reason("Disabling non-boot cpus failed");
 		goto Enable_cpus;
+	}
 
 	arch_suspend_disable_irqs();
 	BUG_ON(!irqs_disabled());
@@ -370,6 +385,9 @@
 				state, false);
 			events_check_enabled = false;
 		} else if (*wakeup) {
+			pm_get_active_wakeup_sources(suspend_abort,
+				MAX_SUSPEND_ABORT_LEN);
+			log_suspend_abort_reason(suspend_abort);
 			error = -EBUSY;
 		}
 		syscore_resume();
@@ -417,6 +435,7 @@
 	error = dpm_suspend_start(PMSG_SUSPEND);
 	if (error) {
 		pr_err("PM: Some devices failed to suspend, or early wake event detected\n");
+		log_suspend_abort_reason("Some devices failed to suspend, or early wake event detected");
 		goto Recover_platform;
 	}
 	suspend_test_finish("suspend devices");
@@ -518,6 +537,18 @@
 	return error;
 }
 
+static void pm_suspend_marker(char *annotation)
+{
+	struct timespec ts;
+	struct rtc_time tm;
+
+	getnstimeofday(&ts);
+	rtc_time_to_tm(ts.tv_sec, &tm);
+	pr_info("PM: suspend %s %d-%02d-%02d %02d:%02d:%02d.%09lu UTC\n",
+		annotation, tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+		tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec);
+}
+
 /**
  * pm_suspend - Externally visible function for suspending the system.
  * @state: System sleep state to enter.
@@ -532,6 +563,7 @@
 	if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
 		return -EINVAL;
 
+	pm_suspend_marker("entry");
 	error = enter_state(state);
 	if (error) {
 		suspend_stats.fail++;
@@ -539,6 +571,7 @@
 	} else {
 		suspend_stats.success++;
 	}
+	pm_suspend_marker("exit");
 	return error;
 }
 EXPORT_SYMBOL(pm_suspend);
diff -ruw linux-4.4.115/kernel/printk/printk.c linux-4.4.115-fbx/kernel/printk/printk.c
--- linux-4.4.115/kernel/printk/printk.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/printk/printk.c	2019-10-29 09:26:25.609222418 +0100
@@ -55,6 +55,10 @@
 #include "console_cmdline.h"
 #include "braille.h"
 
+#ifdef CONFIG_EARLY_PRINTK_DIRECT
+extern void printascii(char *);
+#endif
+
 int console_printk[4] = {
 	CONSOLE_LOGLEVEL_DEFAULT,	/* console_loglevel */
 	MESSAGE_LOGLEVEL_DEFAULT,	/* default_message_loglevel */
@@ -232,7 +236,11 @@
 	u8 facility;		/* syslog facility */
 	u8 flags:5;		/* internal record flags */
 	u8 level:3;		/* syslog level */
-};
+}
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+__packed __aligned(4)
+#endif
+;
 
 /*
  * The logbuf_lock protects kmsg buffer, indices, counters.  This can be taken
@@ -273,11 +281,7 @@
 #define LOG_FACILITY(v)		((v) >> 3 & 0xff)
 
 /* record buffer */
-#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
-#define LOG_ALIGN 4
-#else
 #define LOG_ALIGN __alignof__(struct printk_log)
-#endif
 #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
 static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
 static char *log_buf = __log_buf;
@@ -1754,6 +1758,10 @@
 		}
 	}
 
+#ifdef CONFIG_EARLY_PRINTK_DIRECT
+	printascii(text);
+#endif
+
 	if (level == LOGLEVEL_DEFAULT)
 		level = default_message_loglevel;
 
@@ -2130,8 +2138,12 @@
 	case CPU_DEAD:
 	case CPU_DOWN_FAILED:
 	case CPU_UP_CANCELED:
+	case CPU_DYING:
+#ifdef CONFIG_CONSOLE_FLUSH_ON_HOTPLUG
 		console_lock();
 		console_unlock();
+#endif
+		break;
 	}
 	return NOTIFY_OK;
 }
@@ -3168,9 +3180,8 @@
 {
 	dump_stack_print_info(log_lvl);
 
-	printk("%stask: %p ti: %p task.ti: %p\n",
-	       log_lvl, current, current_thread_info(),
-	       task_thread_info(current));
+	printk("%stask: %p task.stack: %p\n",
+	       log_lvl, current, task_stack_page(current));
 }
 
 #endif
diff -ruw linux-4.4.115/kernel/rcu/Makefile linux-4.4.115-fbx/kernel/rcu/Makefile
--- linux-4.4.115/kernel/rcu/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/rcu/Makefile	2019-01-22 16:16:28.683293314 +0100
@@ -1,3 +1,7 @@
+# Any varying coverage in these files is non-deterministic
+# and is generally not a function of system call inputs.
+KCOV_INSTRUMENT := n
+
 obj-y += update.o sync.o
 obj-$(CONFIG_SRCU) += srcu.o
 obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
diff -ruw linux-4.4.115/kernel/rcu/sync.c linux-4.4.115-fbx/kernel/rcu/sync.c
--- linux-4.4.115/kernel/rcu/sync.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/rcu/sync.c	2019-01-22 16:16:28.683293314 +0100
@@ -68,6 +68,7 @@
 	RCU_LOCKDEP_WARN(!gp_ops[rsp->gp_type].held(),
 			 "suspicious rcu_sync_is_idle() usage");
 }
+EXPORT_SYMBOL_GPL(rcu_sync_lockdep_assert);
 #endif
 
 /**
@@ -83,6 +84,18 @@
 }
 
 /**
+ * Must be called after rcu_sync_init() and before first use.
+ *
+ * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}()
+ * pairs turn into NO-OPs.
+ */
+void rcu_sync_enter_start(struct rcu_sync *rsp)
+{
+	rsp->gp_count++;
+	rsp->gp_state = GP_PASSED;
+}
+
+/**
  * rcu_sync_enter() - Force readers onto slowpath
  * @rsp: Pointer to rcu_sync structure to use for synchronization
  *
diff -ruw linux-4.4.115/kernel/rcu/tree.c linux-4.4.115-fbx/kernel/rcu/tree.c
--- linux-4.4.115/kernel/rcu/tree.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/rcu/tree.c	2019-10-29 09:26:25.613222457 +0100
@@ -57,6 +57,8 @@
 #include <linux/trace_events.h>
 #include <linux/suspend.h>
 
+#include <soc/qcom/watchdog.h>
+
 #include "tree.h"
 #include "rcu.h"
 
@@ -246,24 +248,17 @@
  */
 void rcu_sched_qs(void)
 {
-	unsigned long flags;
-
-	if (__this_cpu_read(rcu_sched_data.cpu_no_qs.s)) {
+	if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s))
+		return;
 		trace_rcu_grace_period(TPS("rcu_sched"),
 				       __this_cpu_read(rcu_sched_data.gpnum),
 				       TPS("cpuqs"));
 		__this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
 		if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
 			return;
-		local_irq_save(flags);
-		if (__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)) {
 			__this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
 			rcu_report_exp_rdp(&rcu_sched_state,
-					   this_cpu_ptr(&rcu_sched_data),
-					   true);
-		}
-		local_irq_restore(flags);
-	}
+			   this_cpu_ptr(&rcu_sched_data), true);
 }
 
 void rcu_bh_qs(void)
@@ -300,17 +295,16 @@
  * We inform the RCU core by emulating a zero-duration dyntick-idle
  * period, which we in turn do by incrementing the ->dynticks counter
  * by two.
+ *
+ * The caller must have disabled interrupts.
  */
 static void rcu_momentary_dyntick_idle(void)
 {
-	unsigned long flags;
 	struct rcu_data *rdp;
 	struct rcu_dynticks *rdtp;
 	int resched_mask;
 	struct rcu_state *rsp;
 
-	local_irq_save(flags);
-
 	/*
 	 * Yes, we can lose flag-setting operations.  This is OK, because
 	 * the flag will be set again after some delay.
@@ -340,13 +334,12 @@
 		smp_mb__after_atomic(); /* Later stuff after QS. */
 		break;
 	}
-	local_irq_restore(flags);
 }
 
 /*
  * Note a context switch.  This is a quiescent state for RCU-sched,
  * and requires special handling for preemptible RCU.
- * The caller must have disabled preemption.
+ * The caller must have disabled interrupts.
  */
 void rcu_note_context_switch(void)
 {
@@ -376,9 +369,14 @@
  */
 void rcu_all_qs(void)
 {
+	unsigned long flags;
+
 	barrier(); /* Avoid RCU read-side critical sections leaking down. */
-	if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
+	if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) {
+		local_irq_save(flags);
 		rcu_momentary_dyntick_idle();
+		local_irq_restore(flags);
+	}
 	this_cpu_inc(rcu_qs_ctr);
 	barrier(); /* Avoid RCU read-side critical sections leaking up. */
 }
@@ -1310,6 +1308,11 @@
 
 	rcu_check_gp_kthread_starvation(rsp);
 
+#ifdef CONFIG_RCU_STALL_WATCHDOG_BITE
+	/* Induce watchdog bite */
+	msm_trigger_wdog_bite();
+#endif
+
 	force_quiescent_state(rsp);  /* Kick them all. */
 }
 
@@ -1345,6 +1348,11 @@
 			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
 
+#ifdef CONFIG_RCU_STALL_WATCHDOG_BITE
+	/* Induce non secure watchdog bite to collect context */
+	msm_trigger_wdog_bite();
+#endif
+
 	/*
 	 * Attempt to revive the RCU machinery by forcing a context switch.
 	 *
diff -ruw linux-4.4.115/kernel/rcu/tree_plugin.h linux-4.4.115-fbx/kernel/rcu/tree_plugin.h
--- linux-4.4.115/kernel/rcu/tree_plugin.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/rcu/tree_plugin.h	2019-01-22 16:16:28.687293351 +0100
@@ -147,8 +147,8 @@
  * the corresponding expedited grace period will also be the end of the
  * normal grace period.
  */
-static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp,
-				   unsigned long flags) __releases(rnp->lock)
+static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
+	__releases(rnp->lock) /* But leaves rrupts disabled. */
 {
 	int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) +
 			 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) +
@@ -236,7 +236,7 @@
 		rnp->gp_tasks = &t->rcu_node_entry;
 	if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
 		rnp->exp_tasks = &t->rcu_node_entry;
-	raw_spin_unlock(&rnp->lock);
+	raw_spin_unlock(&rnp->lock); /* rrupts remain disabled. */
 
 	/*
 	 * Report the quiescent state for the expedited GP.  This expedited
@@ -251,7 +251,6 @@
 	} else {
 		WARN_ON_ONCE(t->rcu_read_unlock_special.b.exp_need_qs);
 	}
-	local_irq_restore(flags);
 }
 
 /*
@@ -286,12 +285,11 @@
  * predating the current grace period drain, in other words, until
  * rnp->gp_tasks becomes NULL.
  *
- * Caller must disable preemption.
+ * Caller must disable interrupts.
  */
 static void rcu_preempt_note_context_switch(void)
 {
 	struct task_struct *t = current;
-	unsigned long flags;
 	struct rcu_data *rdp;
 	struct rcu_node *rnp;
 
@@ -301,7 +299,7 @@
 		/* Possibly blocking in an RCU read-side critical section. */
 		rdp = this_cpu_ptr(rcu_state_p->rda);
 		rnp = rdp->mynode;
-		raw_spin_lock_irqsave(&rnp->lock, flags);
+		raw_spin_lock(&rnp->lock);
 		smp_mb__after_unlock_lock();
 		t->rcu_read_unlock_special.b.blocked = true;
 		t->rcu_blocked_node = rnp;
@@ -318,7 +316,7 @@
 				       (rnp->qsmask & rdp->grpmask)
 				       ? rnp->gpnum
 				       : rnp->gpnum + 1);
-		rcu_preempt_ctxt_queue(rnp, rdp, flags);
+		rcu_preempt_ctxt_queue(rnp, rdp);
 	} else if (t->rcu_read_lock_nesting < 0 &&
 		   t->rcu_read_unlock_special.s) {
 
diff -ruw linux-4.4.115/kernel/resource.c linux-4.4.115-fbx/kernel/resource.c
--- linux-4.4.115/kernel/resource.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/resource.c	2019-10-29 09:26:25.613222457 +0100
@@ -172,7 +172,7 @@
 static int __init ioresources_init(void)
 {
 	proc_create("ioports", 0, NULL, &proc_ioports_operations);
-	proc_create("iomem", 0, NULL, &proc_iomem_operations);
+	proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
 	return 0;
 }
 __initcall(ioresources_init);
@@ -367,15 +367,14 @@
 	read_lock(&resource_lock);
 
 	for (p = iomem_resource.child; p; p = next_resource(p, sibling_only)) {
-		if (p->flags != res->flags)
-			continue;
-		if (name && strcmp(p->name, name))
-			continue;
 		if (p->start > end) {
 			p = NULL;
 			break;
 		}
-		if ((p->end >= start) && (p->start < end))
+		if (p->flags != res->flags)
+			continue;
+		if ((p->end >= start) && (p->start < end) &&
+		    (name == NULL || !strcmp(p->name, name)))
 			break;
 	}
 
diff -ruw linux-4.4.115/kernel/sched/clock.c linux-4.4.115-fbx/kernel/sched/clock.c
--- linux-4.4.115/kernel/sched/clock.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/sched/clock.c	2019-01-22 16:16:28.691293387 +0100
@@ -354,7 +354,7 @@
 		return;
 
 	sched_clock_tick();
-	touch_softlockup_watchdog();
+	touch_softlockup_watchdog_sched();
 }
 EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
 
diff -ruw linux-4.4.115/kernel/sched/core.c linux-4.4.115-fbx/kernel/sched/core.c
--- linux-4.4.115/kernel/sched/core.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/sched/core.c	2019-10-29 09:26:25.617222496 +0100
@@ -26,6 +26,7 @@
  *              Thomas Gleixner, Mike Kravetz
  */
 
+#include <linux/kasan.h>
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/nmi.h>
@@ -74,6 +75,8 @@
 #include <linux/binfmts.h>
 #include <linux/context_tracking.h>
 #include <linux/compiler.h>
+#include <linux/irq.h>
+#include <linux/sched/core_ctl.h>
 
 #include <asm/switch_to.h>
 #include <asm/tlb.h>
@@ -82,14 +85,20 @@
 #ifdef CONFIG_PARAVIRT
 #include <asm/paravirt.h>
 #endif
+#ifdef CONFIG_MSM_APP_SETTINGS
+#include <asm/app_api.h>
+#endif
 
 #include "sched.h"
 #include "../workqueue_internal.h"
 #include "../smpboot.h"
+#include "../time/tick-internal.h"
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/sched.h>
 
+ATOMIC_NOTIFIER_HEAD(load_alert_notifier_head);
+
 DEFINE_MUTEX(sched_domains_mutex);
 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 
@@ -287,6 +296,18 @@
 /* cpus with isolated domains */
 cpumask_var_t cpu_isolated_map;
 
+struct rq *
+lock_rq_of(struct task_struct *p, unsigned long *flags)
+{
+	return task_rq_lock(p, flags);
+}
+
+void
+unlock_rq_of(struct rq *rq, struct task_struct *p, unsigned long *flags)
+{
+	task_rq_unlock(rq, p, flags);
+}
+
 /*
  * this_rq_lock - lock this runqueue and disable interrupts.
  */
@@ -533,6 +554,8 @@
 	if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
 		return;
 
+	head->count++;
+
 	get_task_struct(task);
 
 	/*
@@ -542,6 +565,10 @@
 	head->lastp = &node->next;
 }
 
+static int
+try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags,
+	       int sibling_count_hint);
+
 void wake_up_q(struct wake_q_head *head)
 {
 	struct wake_q_node *node = head->first;
@@ -556,10 +583,10 @@
 		task->wake_q.next = NULL;
 
 		/*
-		 * wake_up_process() implies a wmb() to pair with the queueing
+		 * try_to_wake_up() implies a wmb() to pair with the queueing
 		 * in wake_q_add() so as not to miss wakeups.
 		 */
-		wake_up_process(task);
+		try_to_wake_up(task, TASK_NORMAL, 0, head->count);
 		put_task_struct(task);
 	}
 }
@@ -835,6 +862,7 @@
 	if (!(flags & ENQUEUE_RESTORE))
 		sched_info_queued(rq, p);
 	p->sched_class->enqueue_task(rq, p, flags);
+	trace_sched_enq_deq_task(p, 1, cpumask_bits(&p->cpus_allowed)[0]);
 }
 
 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
@@ -843,6 +871,7 @@
 	if (!(flags & DEQUEUE_SAVE))
 		sched_info_dequeued(rq, p);
 	p->sched_class->dequeue_task(rq, p, flags);
+	trace_sched_enq_deq_task(p, 0, cpumask_bits(&p->cpus_allowed)[0]);
 }
 
 void activate_task(struct rq *rq, struct task_struct *p, int flags)
@@ -858,6 +887,9 @@
 	if (task_contributes_to_load(p))
 		rq->nr_uninterruptible++;
 
+	if (flags & DEQUEUE_SLEEP)
+		clear_ed_task(p, rq);
+
 	dequeue_task(rq, p, flags);
 }
 
@@ -1073,17 +1105,19 @@
 {
 	lockdep_assert_held(&rq->lock);
 
-	dequeue_task(rq, p, 0);
 	p->on_rq = TASK_ON_RQ_MIGRATING;
+	dequeue_task(rq, p, 0);
+	double_lock_balance(rq, cpu_rq(new_cpu));
 	set_task_cpu(p, new_cpu);
+	double_unlock_balance(rq, cpu_rq(new_cpu));
 	raw_spin_unlock(&rq->lock);
 
 	rq = cpu_rq(new_cpu);
 
 	raw_spin_lock(&rq->lock);
 	BUG_ON(task_cpu(p) != new_cpu);
-	p->on_rq = TASK_ON_RQ_QUEUED;
 	enqueue_task(rq, p, 0);
+	p->on_rq = TASK_ON_RQ_QUEUED;
 	check_preempt_curr(rq, p, 0);
 
 	return rq;
@@ -1105,6 +1139,8 @@
  */
 static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu)
 {
+	int src_cpu;
+
 	if (unlikely(!cpu_active(dest_cpu)))
 		return rq;
 
@@ -1112,6 +1148,7 @@
 	if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
 		return rq;
 
+	src_cpu = cpu_of(rq);
 	rq = move_queued_task(rq, p, dest_cpu);
 
 	return rq;
@@ -1127,6 +1164,8 @@
 	struct migration_arg *arg = data;
 	struct task_struct *p = arg->task;
 	struct rq *rq = this_rq();
+	int src_cpu = cpu_of(rq);
+	bool moved = false;
 
 	/*
 	 * The original target cpu might have gone down and we might
@@ -1147,12 +1186,18 @@
 	 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
 	 * we're holding p->pi_lock.
 	 */
-	if (task_rq(p) == rq && task_on_rq_queued(p))
+	if (task_rq(p) == rq && task_on_rq_queued(p)) {
 		rq = __migrate_task(rq, p, arg->dest_cpu);
+		moved = true;
+	}
 	raw_spin_unlock(&rq->lock);
 	raw_spin_unlock(&p->pi_lock);
 
 	local_irq_enable();
+
+	if (moved)
+		notify_migration(src_cpu, arg->dest_cpu, false, p);
+
 	return 0;
 }
 
@@ -1211,6 +1256,7 @@
 	struct rq *rq;
 	unsigned int dest_cpu;
 	int ret = 0;
+	cpumask_t allowed_mask;
 
 	rq = task_rq_lock(p, &flags);
 
@@ -1226,18 +1272,25 @@
 	if (cpumask_equal(&p->cpus_allowed, new_mask))
 		goto out;
 
-	if (!cpumask_intersects(new_mask, cpu_active_mask)) {
+	cpumask_andnot(&allowed_mask, new_mask, cpu_isolated_mask);
+	cpumask_and(&allowed_mask, &allowed_mask, cpu_active_mask);
+
+	dest_cpu = cpumask_any(&allowed_mask);
+	if (dest_cpu >= nr_cpu_ids) {
+		cpumask_and(&allowed_mask, cpu_active_mask, new_mask);
+		dest_cpu = cpumask_any(&allowed_mask);
+		if (dest_cpu >= nr_cpu_ids) {
 		ret = -EINVAL;
 		goto out;
 	}
+	}
 
 	do_set_cpus_allowed(p, new_mask);
 
 	/* Can the task run on the task's current CPU? If so, we're done */
-	if (cpumask_test_cpu(task_cpu(p), new_mask))
+	if (cpumask_test_cpu(task_cpu(p), &allowed_mask))
 		goto out;
 
-	dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
 	if (task_running(rq, p) || p->state == TASK_WAKING) {
 		struct migration_arg arg = { p, dest_cpu };
 		/* Need help from migration thread: drop lock and wait. */
@@ -1276,6 +1329,15 @@
 	WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
 			!p->on_rq);
 
+	/*
+	 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
+	 * because schedstat_wait_{start,end} rebase migrating task's wait_start
+	 * time relying on p->on_rq.
+	 */
+	WARN_ON_ONCE(p->state == TASK_RUNNING &&
+		     p->sched_class == &fair_sched_class &&
+		     (p->on_rq && !task_on_rq_migrating(p)));
+
 #ifdef CONFIG_LOCKDEP
 	/*
 	 * The caller should hold either p->pi_lock or rq->lock, when changing
@@ -1292,13 +1354,15 @@
 #endif
 #endif
 
-	trace_sched_migrate_task(p, new_cpu);
+	trace_sched_migrate_task(p, new_cpu, pct_task_load(p));
 
 	if (task_cpu(p) != new_cpu) {
 		if (p->sched_class->migrate_task_rq)
 			p->sched_class->migrate_task_rq(p);
 		p->se.nr_migrations++;
 		perf_event_task_migrate(p);
+
+		fixup_busy_time(p, new_cpu);
 	}
 
 	__set_task_cpu(p, new_cpu);
@@ -1312,9 +1376,13 @@
 		src_rq = task_rq(p);
 		dst_rq = cpu_rq(cpu);
 
+		p->on_rq = TASK_ON_RQ_MIGRATING;
 		deactivate_task(src_rq, p, 0);
+		p->on_rq = TASK_ON_RQ_MIGRATING;
 		set_task_cpu(p, cpu);
+		p->on_rq = TASK_ON_RQ_QUEUED;
 		activate_task(dst_rq, p, 0);
+		p->on_rq = TASK_ON_RQ_QUEUED;
 		check_preempt_curr(dst_rq, p, 0);
 	} else {
 		/*
@@ -1500,7 +1568,7 @@
 		 * yield - it could be a while.
 		 */
 		if (unlikely(queued)) {
-			ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
+			ktime_t to = ktime_set(0, NSEC_PER_MSEC);
 
 			set_current_state(TASK_UNINTERRUPTIBLE);
 			schedule_hrtimeout(&to, HRTIMER_MODE_REL);
@@ -1546,12 +1614,13 @@
 /*
  * ->cpus_allowed is protected by both rq->lock and p->pi_lock
  */
-static int select_fallback_rq(int cpu, struct task_struct *p)
+static int select_fallback_rq(int cpu, struct task_struct *p, bool allow_iso)
 {
 	int nid = cpu_to_node(cpu);
 	const struct cpumask *nodemask = NULL;
-	enum { cpuset, possible, fail } state = cpuset;
+	enum { cpuset, possible, fail, bug } state = cpuset;
 	int dest_cpu;
+	int isolated_candidate = -1;
 
 	/*
 	 * If the node that the cpu is on has been offlined, cpu_to_node()
@@ -1567,6 +1636,8 @@
 				continue;
 			if (!cpu_active(dest_cpu))
 				continue;
+			if (cpu_isolated(dest_cpu))
+				continue;
 			if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
 				return dest_cpu;
 		}
@@ -1579,6 +1650,16 @@
 				continue;
 			if (!cpu_active(dest_cpu))
 				continue;
+			if (cpu_isolated(dest_cpu)) {
+				if (allow_iso)
+					isolated_candidate = dest_cpu;
+				continue;
+			}
+			goto out;
+		}
+
+		if (isolated_candidate != -1) {
+			dest_cpu = isolated_candidate;
 			goto out;
 		}
 
@@ -1597,6 +1678,11 @@
 			break;
 
 		case fail:
+			allow_iso = true;
+			state = bug;
+			break;
+
+		case bug:
 			BUG();
 			break;
 		}
@@ -1622,12 +1708,16 @@
  * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
  */
 static inline
-int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
+int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags,
+		   int sibling_count_hint)
 {
+	bool allow_isolated = (p->flags & PF_KTHREAD);
+
 	lockdep_assert_held(&p->pi_lock);
 
 	if (p->nr_cpus_allowed > 1)
-		cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
+		cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags,
+						     sibling_count_hint);
 
 	/*
 	 * In order not to call set_task_cpu() on a blocking task we need
@@ -1640,13 +1730,14 @@
 	 *   not worry about this generic constraint ]
 	 */
 	if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
-		     !cpu_online(cpu)))
-		cpu = select_fallback_rq(task_cpu(p), p);
+		     !cpu_online(cpu)) ||
+		     (cpu_isolated(cpu) && !allow_isolated))
+		cpu = select_fallback_rq(task_cpu(p), p, allow_isolated);
 
 	return cpu;
 }
 
-static void update_avg(u64 *avg, u64 sample)
+void update_avg(u64 *avg, u64 sample)
 {
 	s64 diff = sample - *avg;
 	*avg += diff >> 3;
@@ -1719,6 +1810,7 @@
 ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
 {
 	check_preempt_curr(rq, p, wake_flags);
+
 	p->state = TASK_RUNNING;
 	trace_sched_wakeup(p);
 
@@ -1810,6 +1902,8 @@
 
 void scheduler_ipi(void)
 {
+	int cpu = smp_processor_id();
+
 	/*
 	 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
 	 * TIF_NEED_RESCHED remotely (for the first time) will also send
@@ -1817,9 +1911,18 @@
 	 */
 	preempt_fold_need_resched();
 
-	if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
+	if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick() &&
+							!got_boost_kick())
 		return;
 
+	if (got_boost_kick()) {
+		struct rq *rq = cpu_rq(cpu);
+
+		if (rq->curr->sched_class == &fair_sched_class)
+			check_for_migration(rq, rq->curr);
+		clear_boost_kick(cpu);
+	}
+
 	/*
 	 * Not all reschedule IPI handlers call irq_enter/irq_exit, since
 	 * traditionally all their work was done from the interrupt return
@@ -1839,7 +1942,7 @@
 	/*
 	 * Check if someone kicked us for doing the nohz idle load balance.
 	 */
-	if (unlikely(got_nohz_idle_kick())) {
+	if (unlikely(got_nohz_idle_kick()) && !cpu_isolated(cpu)) {
 		this_rq()->idle_balance = 1;
 		raise_softirq_irqoff(SCHED_SOFTIRQ);
 	}
@@ -1912,6 +2015,8 @@
  * @p: the thread to be awakened
  * @state: the mask of task states that can be woken
  * @wake_flags: wake modifier flags (WF_*)
+ * @sibling_count_hint: A hint at the number of threads that are being woken up
+ *                      in this event.
  *
  * Put it on the run-queue if it's not already there. The "current"
  * thread is always on the run-queue (except when the actual
@@ -1923,10 +2028,21 @@
  * or @state didn't match @p's state.
  */
 static int
-try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags,
+	       int sibling_count_hint)
 {
 	unsigned long flags;
-	int cpu, success = 0;
+	int cpu, src_cpu, success = 0;
+#ifdef CONFIG_SMP
+	unsigned int old_load;
+	struct rq *rq;
+	u64 wallclock;
+	struct related_thread_group *grp = NULL;
+#endif
+	bool freq_notif_allowed = !(wake_flags & WF_NO_NOTIFIER);
+	bool check_group = false;
+
+	wake_flags &= ~WF_NO_NOTIFIER;
 
 	/*
 	 * If we are going to wake up a thread waiting for CONDITION we
@@ -1936,13 +2052,14 @@
 	 */
 	smp_mb__before_spinlock();
 	raw_spin_lock_irqsave(&p->pi_lock, flags);
+	src_cpu = cpu = task_cpu(p);
+
 	if (!(p->state & state))
 		goto out;
 
 	trace_sched_waking(p);
 
 	success = 1; /* we're going to change ->state */
-	cpu = task_cpu(p);
 
 	/*
 	 * Ensure we load p->on_rq _after_ p->state, otherwise it would
@@ -2006,25 +2123,57 @@
 	 */
 	smp_rmb();
 
+	rq = cpu_rq(task_cpu(p));
+
+	raw_spin_lock(&rq->lock);
+	old_load = task_load(p);
+	wallclock = sched_ktime_clock();
+	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+	update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
+	raw_spin_unlock(&rq->lock);
+
+	rcu_read_lock();
+	grp = task_related_thread_group(p);
+	if (update_preferred_cluster(grp, p, old_load))
+		set_preferred_cluster(grp);
+	rcu_read_unlock();
+	check_group = grp != NULL;
+
 	p->sched_contributes_to_load = !!task_contributes_to_load(p);
 	p->state = TASK_WAKING;
 
 	if (p->sched_class->task_waking)
 		p->sched_class->task_waking(p);
 
-	cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
-	if (task_cpu(p) != cpu) {
+	cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags,
+			     sibling_count_hint);
+
+	/* Refresh src_cpu as it could have changed since we last read it */
+	src_cpu = task_cpu(p);
+	if (src_cpu != cpu) {
 		wake_flags |= WF_MIGRATED;
 		set_task_cpu(p, cpu);
 	}
-#endif /* CONFIG_SMP */
 
+	note_task_waking(p, wallclock);
+#endif /* CONFIG_SMP */
 	ttwu_queue(p, cpu);
 stat:
 	ttwu_stat(p, cpu, wake_flags);
 out:
 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 
+	if (freq_notif_allowed) {
+		if (!same_freq_domain(src_cpu, cpu)) {
+			check_for_freq_change(cpu_rq(cpu),
+						false, check_group);
+			check_for_freq_change(cpu_rq(src_cpu),
+						false, check_group);
+		} else if (success) {
+			check_for_freq_change(cpu_rq(cpu), true, false);
+		}
+	}
+
 	return success;
 }
 
@@ -2040,9 +2189,13 @@
 {
 	struct rq *rq = task_rq(p);
 
-	if (WARN_ON_ONCE(rq != this_rq()) ||
-	    WARN_ON_ONCE(p == current))
+	if (rq != this_rq() || p == current) {
+		printk_deferred("%s: Failed to wakeup task %d (%s), rq = %p,"
+				" this_rq = %p, p = %p, current = %p\n",
+			__func__, task_pid_nr(p), p->comm, rq,
+			this_rq(), p, current);
 		return;
+	}
 
 	lockdep_assert_held(&rq->lock);
 
@@ -2065,13 +2218,20 @@
 
 	trace_sched_waking(p);
 
-	if (!task_on_rq_queued(p))
+	if (!task_on_rq_queued(p)) {
+		u64 wallclock = sched_ktime_clock();
+
+		update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+		update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
 		ttwu_activate(rq, p, ENQUEUE_WAKEUP);
+		note_task_waking(p, wallclock);
+	}
 
 	ttwu_do_wakeup(rq, p, 0);
 	ttwu_stat(p, smp_processor_id(), 0);
 out:
 	raw_spin_unlock(&p->pi_lock);
+	/* Todo : Send cpufreq notifier */
 }
 
 /**
@@ -2088,13 +2248,33 @@
  */
 int wake_up_process(struct task_struct *p)
 {
-	return try_to_wake_up(p, TASK_NORMAL, 0);
+	return try_to_wake_up(p, TASK_NORMAL, 0, 1);
 }
 EXPORT_SYMBOL(wake_up_process);
 
+/**
+ * wake_up_process_no_notif - Wake up a specific process without notifying
+ * governor
+ * @p: The process to be woken up.
+ *
+ * Attempt to wake up the nominated process and move it to the set of runnable
+ * processes.
+ *
+ * Return: 1 if the process was woken up, 0 if it was already running.
+ *
+ * It may be assumed that this function implies a write memory barrier before
+ * changing the task state if and only if any tasks are woken up.
+ */
+int wake_up_process_no_notif(struct task_struct *p)
+{
+	WARN_ON(task_is_stopped_or_traced(p));
+	return try_to_wake_up(p, TASK_NORMAL, WF_NO_NOTIFIER, 1);
+}
+EXPORT_SYMBOL(wake_up_process_no_notif);
+
 int wake_up_state(struct task_struct *p, unsigned int state)
 {
-	return try_to_wake_up(p, state, 0);
+	return try_to_wake_up(p, state, 0, 1);
 }
 
 /*
@@ -2116,6 +2296,44 @@
 	dl_se->dl_yielded = 0;
 }
 
+#ifdef CONFIG_SCHED_HMP
+/*
+ * sched_exit() - Set EXITING_TASK_MARKER in task's ravg.demand field
+ *
+ * Stop accounting (exiting) task's future cpu usage
+ *
+ * We need this so that reset_all_windows_stats() can function correctly.
+ * reset_all_window_stats() depends on do_each_thread/for_each_thread task
+ * iterators to reset *all* task's statistics. Exiting tasks however become
+ * invisible to those iterators. sched_exit() is called on a exiting task prior
+ * to being removed from task_list, which will let reset_all_window_stats()
+ * function correctly.
+ */
+void sched_exit(struct task_struct *p)
+{
+	unsigned long flags;
+	struct rq *rq;
+	u64 wallclock;
+
+	sched_set_group_id(p, 0);
+
+	rq = task_rq_lock(p, &flags);
+
+	/* rq->curr == p */
+	wallclock = sched_ktime_clock();
+	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+	dequeue_task(rq, p, 0);
+	reset_task_stats(p);
+	p->ravg.mark_start = wallclock;
+	p->ravg.sum_history[0] = EXITING_TASK_MARKER;
+
+	enqueue_task(rq, p, 0);
+	clear_ed_task(p, rq);
+	task_rq_unlock(rq, p, &flags);
+	free_task_load_ptrs(p);
+}
+#endif /* CONFIG_SCHED_HMP */
+
 /*
  * Perform scheduler related setup for a newly forked process p.
  * p is forked by current.
@@ -2132,8 +2350,16 @@
 	p->se.prev_sum_exec_runtime	= 0;
 	p->se.nr_migrations		= 0;
 	p->se.vruntime			= 0;
+#ifdef CONFIG_SCHED_WALT
+	p->last_sleep_ts		= 0;
+#endif
+
 	INIT_LIST_HEAD(&p->se.group_node);
 
+#ifdef CONFIG_FAIR_GROUP_SCHED
+	p->se.cfs_rq			= NULL;
+#endif
+
 #ifdef CONFIG_SCHEDSTATS
 	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
 #endif
@@ -2142,7 +2368,12 @@
 	init_dl_task_timer(&p->dl);
 	__dl_clear_params(p);
 
+	init_rt_schedtune_timer(&p->rt);
 	INIT_LIST_HEAD(&p->rt.run_list);
+	p->rt.timeout		= 0;
+	p->rt.time_slice	= sched_rr_timeslice;
+	p->rt.on_rq		= 0;
+	p->rt.on_list		= 0;
 
 #ifdef CONFIG_PREEMPT_NOTIFIERS
 	INIT_HLIST_HEAD(&p->preempt_notifiers);
@@ -2212,15 +2443,18 @@
 int sched_fork(unsigned long clone_flags, struct task_struct *p)
 {
 	unsigned long flags;
-	int cpu = get_cpu();
+	int cpu;
+
+	init_new_task_load(p, false);
+	cpu = get_cpu();
 
 	__sched_fork(clone_flags, p);
 	/*
-	 * We mark the process as running here. This guarantees that
+	 * We mark the process as NEW here. This guarantees that
 	 * nobody will actually run it, and a signal or other external
 	 * event cannot wake it up and insert it on the runqueue either.
 	 */
-	p->state = TASK_RUNNING;
+	p->state = TASK_NEW;
 
 	/*
 	 * Make sure we do not leak PI boosting priority to the child.
@@ -2257,8 +2491,7 @@
 		p->sched_class = &fair_sched_class;
 	}
 
-	if (p->sched_class->task_fork)
-		p->sched_class->task_fork(p);
+	init_entity_runnable_average(&p->se);
 
 	/*
 	 * The child is not yet in the pid-hash so no cgroup attach races,
@@ -2268,7 +2501,13 @@
 	 * Silence PROVE_RCU.
 	 */
 	raw_spin_lock_irqsave(&p->pi_lock, flags);
-	set_task_cpu(p, cpu);
+	/*
+	 * We're setting the cpu for the first time, we don't migrate,
+	 * so use __set_task_cpu().
+	 */
+	__set_task_cpu(p, cpu);
+	if (p->sched_class->task_fork)
+		p->sched_class->task_fork(p);
 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 
 #ifdef CONFIG_SCHED_INFO
@@ -2399,7 +2638,10 @@
 	unsigned long flags;
 	struct rq *rq;
 
+	add_new_task_to_grp(p);
 	raw_spin_lock_irqsave(&p->pi_lock, flags);
+	p->state = TASK_RUNNING;
+
 	/* Initialize new task's runnable average */
 	init_entity_runnable_average(&p->se);
 #ifdef CONFIG_SMP
@@ -2407,12 +2649,17 @@
 	 * Fork balancing, do it here and not earlier because:
 	 *  - cpus_allowed can change in the fork path
 	 *  - any previously selected cpu might disappear through hotplug
+	 *
+	 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
+	 * as we're not fully set-up yet.
 	 */
-	set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
+	__set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0, 1));
 #endif
-
 	rq = __task_rq_lock(p);
-	activate_task(rq, p, 0);
+	mark_task_starting(p);
+	update_rq_clock(rq);
+	post_init_entity_util_avg(&p->se);
+	activate_task(rq, p, ENQUEUE_WAKEUP_NEW);
 	p->on_rq = TASK_ON_RQ_QUEUED;
 	trace_sched_wakeup_new(p);
 	check_preempt_curr(rq, p, WF_FORK);
@@ -2539,6 +2786,14 @@
 	fire_sched_out_preempt_notifiers(prev, next);
 	prepare_lock_switch(rq, next);
 	prepare_arch_switch(next);
+
+#ifdef CONFIG_MSM_APP_SETTINGS
+	if (use_app_setting)
+		switch_app_setting_bit(prev, next);
+
+	if (use_32bit_app_setting || use_32bit_app_setting_pro)
+		switch_32bit_app_setting_bit(prev, next);
+#endif
 }
 
 /**
@@ -2793,6 +3048,36 @@
 	return atomic_read(&this->nr_iowait);
 }
 
+#ifdef CONFIG_CPU_QUIET
+u64 nr_running_integral(unsigned int cpu)
+{
+	unsigned int seqcnt;
+	u64 integral;
+	struct rq *q;
+
+	if (cpu >= nr_cpu_ids)
+		return 0;
+
+	q = cpu_rq(cpu);
+
+	/*
+	 * Update average to avoid reading stalled value if there were
+	 * no run-queue changes for a long time. On the other hand if
+	 * the changes are happening right now, just read current value
+	 * directly.
+	 */
+
+	seqcnt = read_seqcount_begin(&q->ave_seqcnt);
+	integral = do_nr_running_integral(q);
+	if (read_seqcount_retry(&q->ave_seqcnt, seqcnt)) {
+		read_seqcount_begin(&q->ave_seqcnt);
+		integral = q->nr_running_integral;
+	}
+
+	return integral;
+}
+#endif
+
 void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
 {
 	struct rq *rq = this_rq();
@@ -2800,7 +3085,7 @@
 	*load = rq->load.weight;
 }
 
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP)
 
 /*
  * sched_exec - execve() is a valuable balancing opportunity, because at
@@ -2810,18 +3095,23 @@
 {
 	struct task_struct *p = current;
 	unsigned long flags;
-	int dest_cpu;
+	int dest_cpu, curr_cpu;
+
+#ifdef CONFIG_SCHED_HMP
+	return;
+#endif
 
 	raw_spin_lock_irqsave(&p->pi_lock, flags);
-	dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
+	curr_cpu = task_cpu(p);
+	dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0, 1);
 	if (dest_cpu == smp_processor_id())
 		goto unlock;
 
-	if (likely(cpu_active(dest_cpu))) {
+	if (likely(cpu_active(dest_cpu) && likely(!cpu_isolated(dest_cpu)))) {
 		struct migration_arg arg = { p, dest_cpu };
 
 		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-		stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
+		stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
 		return;
 	}
 unlock:
@@ -2888,16 +3178,29 @@
 	int cpu = smp_processor_id();
 	struct rq *rq = cpu_rq(cpu);
 	struct task_struct *curr = rq->curr;
+	u64 wallclock;
+	bool early_notif;
+	u32 old_load;
+	struct related_thread_group *grp;
 
 	sched_clock_tick();
 
 	raw_spin_lock(&rq->lock);
+	old_load = task_load(curr);
+	set_window_start(rq);
 	update_rq_clock(rq);
 	curr->sched_class->task_tick(rq, curr, 0);
 	update_cpu_load_active(rq);
 	calc_global_load_tick(rq);
+	wallclock = sched_ktime_clock();
+	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+	early_notif = early_detection_notify(rq, wallclock);
 	raw_spin_unlock(&rq->lock);
 
+	if (early_notif)
+		atomic_notifier_call_chain(&load_alert_notifier_head,
+					0, (void *)(long)cpu);
+
 	perf_event_task_tick();
 
 #ifdef CONFIG_SMP
@@ -2905,6 +3208,18 @@
 	trigger_load_balance(rq);
 #endif
 	rq_last_tick_reset(rq);
+
+	rcu_read_lock();
+	grp = task_related_thread_group(curr);
+	if (update_preferred_cluster(grp, curr, old_load))
+		set_preferred_cluster(grp);
+	rcu_read_unlock();
+
+	if (curr->sched_class == &fair_sched_class)
+		check_for_migration(rq, curr);
+
+	if (cpu == tick_do_timer_cpu)
+		core_ctl_check(wallclock);
 }
 
 #ifdef CONFIG_NO_HZ_FULL
@@ -3006,6 +3321,9 @@
  */
 static noinline void __schedule_bug(struct task_struct *prev)
 {
+	/* Save this before calling printk(), since that will clobber it */
+	unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
+
 	if (oops_in_progress)
 		return;
 
@@ -3016,12 +3334,14 @@
 	print_modules();
 	if (irqs_disabled())
 		print_irqtrace_events(prev);
-#ifdef CONFIG_DEBUG_PREEMPT
-	if (in_atomic_preempt_off()) {
+	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
+	    && in_atomic_preempt_off()) {
 		pr_err("Preemption disabled at:");
-		print_ip_sym(current->preempt_disable_ip);
+		print_ip_sym(preempt_disable_ip);
 		pr_cont("\n");
 	}
+#ifdef CONFIG_PANIC_ON_SCHED_BUG
+	BUG();
 #endif
 	dump_stack();
 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
@@ -3132,10 +3452,10 @@
 	unsigned long *switch_count;
 	struct rq *rq;
 	int cpu;
+	u64 wallclock;
 
 	cpu = smp_processor_id();
 	rq = cpu_rq(cpu);
-	rcu_note_context_switch();
 	prev = rq->curr;
 
 	/*
@@ -3154,13 +3474,16 @@
 	if (sched_feat(HRTICK))
 		hrtick_clear(rq);
 
+	local_irq_disable();
+	rcu_note_context_switch();
+
 	/*
 	 * Make sure that signal_pending_state()->signal_pending() below
 	 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
 	 * done by the caller to avoid the race with signal_wake_up().
 	 */
 	smp_mb__before_spinlock();
-	raw_spin_lock_irq(&rq->lock);
+	raw_spin_lock(&rq->lock);
 	lockdep_pin_lock(&rq->lock);
 
 	rq->clock_skip_update <<= 1; /* promote REQ to ACT */
@@ -3197,15 +3520,30 @@
 	clear_preempt_need_resched();
 	rq->clock_skip_update = 0;
 
+	BUG_ON(task_cpu(next) != cpu_of(rq));
+
+	wallclock = sched_ktime_clock();
 	if (likely(prev != next)) {
+		update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
+		update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
+		if (!is_idle_task(prev) && !prev->on_rq)
+			update_avg_burst(prev);
+
+#ifdef CONFIG_SCHED_WALT
+		if (!prev->on_rq)
+			prev->last_sleep_ts = wallclock;
+#endif
 		rq->nr_switches++;
 		rq->curr = next;
 		++*switch_count;
 
+		set_task_last_switch_out(prev, wallclock);
+
 		trace_sched_switch(preempt, prev, next);
 		rq = context_switch(rq, prev, next); /* unlocks the rq */
 		cpu = cpu_of(rq);
 	} else {
+		update_task_ravg(prev, rq, TASK_UPDATE, wallclock, 0);
 		lockdep_unpin_lock(&rq->lock);
 		raw_spin_unlock_irq(&rq->lock);
 	}
@@ -3371,7 +3709,7 @@
 int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
 			  void *key)
 {
-	return try_to_wake_up(curr->private, mode, wake_flags);
+	return try_to_wake_up(curr->private, mode, wake_flags, 1);
 }
 EXPORT_SYMBOL(default_wake_function);
 
@@ -3390,13 +3728,14 @@
  */
 void rt_mutex_setprio(struct task_struct *p, int prio)
 {
-	int oldprio, queued, running, enqueue_flag = ENQUEUE_RESTORE;
+	int oldprio, queued, running, queue_flag = DEQUEUE_SAVE | DEQUEUE_MOVE;
 	struct rq *rq;
 	const struct sched_class *prev_class;
 
 	BUG_ON(prio > MAX_PRIO);
 
 	rq = __task_rq_lock(p);
+	update_rq_clock(rq);
 
 	/*
 	 * Idle task boosting is a nono in general. There is one
@@ -3418,11 +3757,15 @@
 
 	trace_sched_pi_setprio(p, prio);
 	oldprio = p->prio;
+
+	if (oldprio == prio)
+		queue_flag &= ~DEQUEUE_MOVE;
+
 	prev_class = p->sched_class;
 	queued = task_on_rq_queued(p);
 	running = task_current(rq, p);
 	if (queued)
-		dequeue_task(rq, p, DEQUEUE_SAVE);
+		dequeue_task(rq, p, queue_flag);
 	if (running)
 		put_prev_task(rq, p);
 
@@ -3440,7 +3783,7 @@
 		if (!dl_prio(p->normal_prio) ||
 		    (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
 			p->dl.dl_boosted = 1;
-			enqueue_flag |= ENQUEUE_REPLENISH;
+			queue_flag |= ENQUEUE_REPLENISH;
 		} else
 			p->dl.dl_boosted = 0;
 		p->sched_class = &dl_sched_class;
@@ -3448,7 +3791,7 @@
 		if (dl_prio(oldprio))
 			p->dl.dl_boosted = 0;
 		if (oldprio < prio)
-			enqueue_flag |= ENQUEUE_HEAD;
+			queue_flag |= ENQUEUE_HEAD;
 		p->sched_class = &rt_sched_class;
 	} else {
 		if (dl_prio(oldprio))
@@ -3463,7 +3806,7 @@
 	if (running)
 		p->sched_class->set_curr_task(rq);
 	if (queued)
-		enqueue_task(rq, p, enqueue_flag);
+		enqueue_task(rq, p, queue_flag);
 
 	check_class_changed(rq, p, prev_class, oldprio);
 out_unlock:
@@ -3488,6 +3831,8 @@
 	 * the task might be in the middle of scheduling on another CPU.
 	 */
 	rq = task_rq_lock(p, &flags);
+	update_rq_clock(rq);
+
 	/*
 	 * The RT priorities are set via sched_setscheduler(), but we still
 	 * allow the 'normal' nice value to be set - but as expected
@@ -3820,6 +4165,7 @@
 	const struct sched_class *prev_class;
 	struct rq *rq;
 	int reset_on_fork;
+	int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE;
 
 	/* may grab non-irq protected spin_locks */
 	BUG_ON(in_interrupt());
@@ -3915,6 +4261,7 @@
 	 * runqueue lock must be held.
 	 */
 	rq = task_rq_lock(p, &flags);
+	update_rq_clock(rq);
 
 	/*
 	 * Changing the policy of the stop threads its a very bad idea
@@ -4002,17 +4349,14 @@
 		 * itself.
 		 */
 		new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
-		if (new_effective_prio == oldprio) {
-			__setscheduler_params(p, attr);
-			task_rq_unlock(rq, p, &flags);
-			return 0;
-		}
+		if (new_effective_prio == oldprio)
+			queue_flags &= ~DEQUEUE_MOVE;
 	}
 
 	queued = task_on_rq_queued(p);
 	running = task_current(rq, p);
 	if (queued)
-		dequeue_task(rq, p, DEQUEUE_SAVE);
+		dequeue_task(rq, p, queue_flags);
 	if (running)
 		put_prev_task(rq, p);
 
@@ -4022,15 +4366,14 @@
 	if (running)
 		p->sched_class->set_curr_task(rq);
 	if (queued) {
-		int enqueue_flags = ENQUEUE_RESTORE;
 		/*
 		 * We enqueue to tail when the priority of a task is
 		 * increased (user space view).
 		 */
-		if (oldprio <= p->prio)
-			enqueue_flags |= ENQUEUE_HEAD;
+		if (oldprio < p->prio)
+			queue_flags |= ENQUEUE_HEAD;
 
-		enqueue_task(rq, p, enqueue_flags);
+		enqueue_task(rq, p, queue_flags);
 	}
 
 	check_class_changed(rq, p, prev_class, oldprio);
@@ -4108,7 +4451,7 @@
 {
 	return _sched_setscheduler(p, policy, param, false);
 }
-EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
+EXPORT_SYMBOL(sched_setscheduler_nocheck);
 
 static int
 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
@@ -4428,6 +4771,8 @@
 	cpumask_var_t cpus_allowed, new_mask;
 	struct task_struct *p;
 	int retval;
+	int dest_cpu;
+	cpumask_t allowed_mask;
 
 	rcu_read_lock();
 
@@ -4489,8 +4834,10 @@
 	}
 #endif
 again:
+	cpumask_andnot(&allowed_mask, new_mask, cpu_isolated_mask);
+	dest_cpu = cpumask_any_and(cpu_active_mask, &allowed_mask);
+	if (dest_cpu < nr_cpu_ids) {
 	retval = __set_cpus_allowed_ptr(p, new_mask, true);
-
 	if (!retval) {
 		cpuset_cpus_allowed(p, cpus_allowed);
 		if (!cpumask_subset(new_mask, cpus_allowed)) {
@@ -4503,6 +4850,10 @@
 			goto again;
 		}
 	}
+	} else {
+		retval = -EINVAL;
+	}
+
 out_free_new_mask:
 	free_cpumask_var(new_mask);
 out_free_cpus_allowed:
@@ -4566,6 +4917,15 @@
 
 	raw_spin_lock_irqsave(&p->pi_lock, flags);
 	cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
+
+	/*
+	 * The userspace tasks are forbidden to run on
+	 * isolated CPUs. So exclude isolated CPUs from
+	 * the getaffinity.
+	 */
+	if (!(p->flags & PF_KTHREAD))
+		cpumask_andnot(mask, mask, cpu_isolated_mask);
+
 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 
 out_unlock:
@@ -4987,6 +5347,8 @@
 			sched_show_task(p);
 	}
 
+	touch_all_softlockup_watchdogs();
+
 #ifdef CONFIG_SCHED_DEBUG
 	sysrq_sched_debug_show();
 #endif
@@ -5007,22 +5369,29 @@
  * init_idle - set up an idle thread for a given CPU
  * @idle: task in question
  * @cpu: cpu the idle task belongs to
+ * @cpu_up: differentiate between initial boot vs hotplug
  *
  * NOTE: this function does not set the idle thread's NEED_RESCHED
  * flag, to make booting more robust.
  */
-void init_idle(struct task_struct *idle, int cpu)
+void init_idle(struct task_struct *idle, int cpu, bool cpu_up)
 {
 	struct rq *rq = cpu_rq(cpu);
 	unsigned long flags;
 
+	if (!cpu_up)
+		init_new_task_load(idle, true);
+
 	raw_spin_lock_irqsave(&idle->pi_lock, flags);
 	raw_spin_lock(&rq->lock);
 
 	__sched_fork(0, idle);
+
 	idle->state = TASK_RUNNING;
 	idle->se.exec_start = sched_clock();
 
+	kasan_unpoison_task_stack(idle);
+
 #ifdef CONFIG_SMP
 	/*
 	 * Its possible that init_idle() gets called multiple times on a task,
@@ -5245,18 +5614,54 @@
 };
 
 /*
- * Migrate all tasks from the rq, sleeping tasks will be migrated by
- * try_to_wake_up()->select_task_rq().
+ * Remove a task from the runqueue and pretend that it's migrating. This
+ * should prevent migrations for the detached task and disallow further
+ * changes to tsk_cpus_allowed.
+ */
+static void
+detach_one_task(struct task_struct *p, struct rq *rq, struct list_head *tasks)
+{
+	lockdep_assert_held(&rq->lock);
+
+	p->on_rq = TASK_ON_RQ_MIGRATING;
+	deactivate_task(rq, p, 0);
+	list_add(&p->se.group_node, tasks);
+}
+
+static void attach_tasks(struct list_head *tasks, struct rq *rq)
+{
+	struct task_struct *p;
+
+	lockdep_assert_held(&rq->lock);
+
+	while (!list_empty(tasks)) {
+		p = list_first_entry(tasks, struct task_struct, se.group_node);
+		list_del_init(&p->se.group_node);
+
+		BUG_ON(task_rq(p) != rq);
+		activate_task(rq, p, 0);
+		p->on_rq = TASK_ON_RQ_QUEUED;
+	}
+}
+
+/*
+ * Migrate all tasks (not pinned if pinned argument say so) from the rq,
+ * sleeping tasks will be migrated by try_to_wake_up()->select_task_rq().
  *
  * Called with rq->lock held even though we'er in stop_machine() and
  * there's no concurrency possible, we hold the required locks anyway
  * because of lock validation efforts.
  */
-static void migrate_tasks(struct rq *dead_rq)
+static void migrate_tasks(struct rq *dead_rq, bool migrate_pinned_tasks)
 {
 	struct rq *rq = dead_rq;
 	struct task_struct *next, *stop = rq->stop;
 	int dest_cpu;
+	unsigned int num_pinned_kthreads = 1; /* this thread */
+	LIST_HEAD(tasks);
+	cpumask_t avail_cpus;
+
+	cpumask_andnot(&avail_cpus, cpu_online_mask, cpu_isolated_mask);
 
 	/*
 	 * Fudge the rq selection such that the below task selection loop
@@ -5292,6 +5697,14 @@
 		BUG_ON(!next);
 		next->sched_class->put_prev_task(rq, next);
 
+		if (!migrate_pinned_tasks && next->flags & PF_KTHREAD &&
+			!cpumask_intersects(&avail_cpus, &next->cpus_allowed)) {
+			detach_one_task(next, rq, &tasks);
+			num_pinned_kthreads += 1;
+			lockdep_unpin_lock(&rq->lock);
+			continue;
+		}
+
 		/*
 		 * Rules for changing task_struct::cpus_allowed are holding
 		 * both pi_lock and rq->lock, such that holding either
@@ -5310,26 +5723,271 @@
 		 * Since we're inside stop-machine, _nothing_ should have
 		 * changed the task, WARN if weird stuff happened, because in
 		 * that case the above rq->lock drop is a fail too.
+		 * However, during cpu isolation the load balancer might have
+		 * interferred since we don't stop all CPUs. Ignore warning for
+		 * this case.
 		 */
-		if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) {
+		if (task_rq(next) != rq || !task_on_rq_queued(next)) {
+			WARN_ON(migrate_pinned_tasks);
 			raw_spin_unlock(&next->pi_lock);
 			continue;
 		}
 
 		/* Find suitable destination for @next, with force if needed. */
-		dest_cpu = select_fallback_rq(dead_rq->cpu, next);
+		dest_cpu = select_fallback_rq(dead_rq->cpu, next, false);
 
 		rq = __migrate_task(rq, next, dest_cpu);
 		if (rq != dead_rq) {
+			raw_spin_unlock(&next->pi_lock);
 			raw_spin_unlock(&rq->lock);
+			notify_migration(dead_rq->cpu, dest_cpu, true, next);
 			rq = dead_rq;
+			raw_spin_lock(&next->pi_lock);
 			raw_spin_lock(&rq->lock);
 		}
 		raw_spin_unlock(&next->pi_lock);
 	}
 
 	rq->stop = stop;
+
+	if (num_pinned_kthreads > 1)
+		attach_tasks(&tasks, rq);
 }
+
+static void set_rq_online(struct rq *rq);
+static void set_rq_offline(struct rq *rq);
+
+int do_isolation_work_cpu_stop(void *data)
+{
+	unsigned int cpu = smp_processor_id();
+	struct rq *rq = cpu_rq(cpu);
+
+	watchdog_disable(cpu);
+
+	irq_migrate_all_off_this_cpu();
+
+	local_irq_disable();
+
+	sched_ttwu_pending();
+
+	raw_spin_lock(&rq->lock);
+
+	/*
+	 * Temporarily mark the rq as offline. This will allow us to
+	 * move tasks off the CPU.
+	 */
+	if (rq->rd) {
+		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
+		set_rq_offline(rq);
+	}
+
+	migrate_tasks(rq, false);
+
+	if (rq->rd)
+		set_rq_online(rq);
+	raw_spin_unlock(&rq->lock);
+
+	/*
+	 * We might have been in tickless state. Clear NOHZ flags to avoid
+	 * us being kicked for helping out with balancing
+	 */
+	nohz_balance_clear_nohz_mask(cpu);
+
+	clear_hmp_request(cpu);
+	local_irq_enable();
+	return 0;
+}
+
+int do_unisolation_work_cpu_stop(void *data)
+{
+	watchdog_enable(smp_processor_id());
+	return 0;
+}
+
+static void init_sched_groups_capacity(int cpu, struct sched_domain *sd);
+
+static void sched_update_group_capacities(int cpu)
+{
+	struct sched_domain *sd;
+
+	mutex_lock(&sched_domains_mutex);
+	rcu_read_lock();
+
+	for_each_domain(cpu, sd) {
+		int balance_cpu = group_balance_cpu(sd->groups);
+
+		init_sched_groups_capacity(cpu, sd);
+		/*
+		 * Need to ensure this is also called with balancing
+		 * cpu.
+		*/
+		if (cpu != balance_cpu)
+			init_sched_groups_capacity(balance_cpu, sd);
+	}
+
+	rcu_read_unlock();
+	mutex_unlock(&sched_domains_mutex);
+}
+
+static unsigned int cpu_isolation_vote[NR_CPUS];
+
+int sched_isolate_count(const cpumask_t *mask, bool include_offline)
+{
+	cpumask_t count_mask = CPU_MASK_NONE;
+
+	if (include_offline) {
+		cpumask_complement(&count_mask, cpu_online_mask);
+		cpumask_or(&count_mask, &count_mask, cpu_isolated_mask);
+		cpumask_and(&count_mask, &count_mask, mask);
+	} else {
+		cpumask_and(&count_mask, mask, cpu_isolated_mask);
+	}
+
+	return cpumask_weight(&count_mask);
+}
+
+/*
+ * 1) CPU is isolated and cpu is offlined:
+ *	Unisolate the core.
+ * 2) CPU is not isolated and CPU is offlined:
+ *	No action taken.
+ * 3) CPU is offline and request to isolate
+ *	Request ignored.
+ * 4) CPU is offline and isolated:
+ *	Not a possible state.
+ * 5) CPU is online and request to isolate
+ *	Normal case: Isolate the CPU
+ * 6) CPU is not isolated and comes back online
+ *	Nothing to do
+ *
+ * Note: The client calling sched_isolate_cpu() is repsonsible for ONLY
+ * calling sched_unisolate_cpu() on a CPU that the client previously isolated.
+ * Client is also responsible for unisolating when a core goes offline
+ * (after CPU is marked offline).
+ */
+int sched_isolate_cpu(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+	cpumask_t avail_cpus;
+	int ret_code = 0;
+	u64 start_time = 0;
+
+	if (trace_sched_isolate_enabled())
+		start_time = sched_clock();
+
+	cpu_maps_update_begin();
+
+	cpumask_andnot(&avail_cpus, cpu_online_mask, cpu_isolated_mask);
+
+	/* We cannot isolate ALL cpus in the system */
+	if (cpumask_weight(&avail_cpus) == 1) {
+		ret_code = -EINVAL;
+		goto out;
+	}
+
+	if (!cpu_online(cpu)) {
+		ret_code = -EINVAL;
+		goto out;
+	}
+
+	if (++cpu_isolation_vote[cpu] > 1)
+		goto out;
+
+	/*
+	 * There is a race between watchdog being enabled by hotplug and
+	 * core isolation disabling the watchdog. When a CPU is hotplugged in
+	 * and the hotplug lock has been released the watchdog thread might
+	 * not have run yet to enable the watchdog.
+	 * We have to wait for the watchdog to be enabled before proceeding.
+	 */
+	if (!watchdog_configured(cpu)) {
+		msleep(20);
+		if (!watchdog_configured(cpu)) {
+			--cpu_isolation_vote[cpu];
+			ret_code = -EBUSY;
+			goto out;
+		}
+	}
+
+	set_cpu_isolated(cpu, true);
+	cpumask_clear_cpu(cpu, &avail_cpus);
+
+	/* Migrate timers */
+	smp_call_function_any(&avail_cpus, hrtimer_quiesce_cpu, &cpu, 1);
+	smp_call_function_any(&avail_cpus, timer_quiesce_cpu, &cpu, 1);
+
+	stop_cpus(cpumask_of(cpu), do_isolation_work_cpu_stop, 0);
+
+	calc_load_migrate(rq);
+	update_max_interval();
+	sched_update_group_capacities(cpu);
+
+out:
+	cpu_maps_update_done();
+	trace_sched_isolate(cpu, cpumask_bits(cpu_isolated_mask)[0],
+			    start_time, 1);
+	return ret_code;
+}
+
+/*
+ * Note: The client calling sched_isolate_cpu() is repsonsible for ONLY
+ * calling sched_unisolate_cpu() on a CPU that the client previously isolated.
+ * Client is also responsible for unisolating when a core goes offline
+ * (after CPU is marked offline).
+ */
+int sched_unisolate_cpu_unlocked(int cpu)
+{
+	int ret_code = 0;
+	struct rq *rq = cpu_rq(cpu);
+	u64 start_time = 0;
+
+	if (trace_sched_isolate_enabled())
+		start_time = sched_clock();
+
+	if (!cpu_isolation_vote[cpu]) {
+		ret_code = -EINVAL;
+		goto out;
+	}
+
+	if (--cpu_isolation_vote[cpu])
+		goto out;
+
+	if (cpu_online(cpu)) {
+		unsigned long flags;
+
+		raw_spin_lock_irqsave(&rq->lock, flags);
+		rq->age_stamp = sched_clock_cpu(cpu);
+		raw_spin_unlock_irqrestore(&rq->lock, flags);
+	}
+
+	set_cpu_isolated(cpu, false);
+	update_max_interval();
+	sched_update_group_capacities(cpu);
+
+	if (cpu_online(cpu)) {
+		stop_cpus(cpumask_of(cpu), do_unisolation_work_cpu_stop, 0);
+
+		/* Kick CPU to immediately do load balancing */
+		if (!test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
+			smp_send_reschedule(cpu);
+	}
+
+out:
+	trace_sched_isolate(cpu, cpumask_bits(cpu_isolated_mask)[0],
+			    start_time, 0);
+	return ret_code;
+}
+
+int sched_unisolate_cpu(int cpu)
+{
+	int ret_code;
+
+	cpu_maps_update_begin();
+	ret_code = sched_unisolate_cpu_unlocked(cpu);
+	cpu_maps_update_done();
+	return ret_code;
+}
+
 #endif /* CONFIG_HOTPLUG_CPU */
 
 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
@@ -5402,9 +6060,60 @@
 }
 
 static struct ctl_table *
+sd_alloc_ctl_energy_table(struct sched_group_energy *sge)
+{
+	struct ctl_table *table = sd_alloc_ctl_entry(5);
+
+	if (table == NULL)
+		return NULL;
+
+	set_table_entry(&table[0], "nr_idle_states", &sge->nr_idle_states,
+			sizeof(int), 0644, proc_dointvec_minmax, false);
+	set_table_entry(&table[1], "idle_states", &sge->idle_states[0].power,
+			sge->nr_idle_states*sizeof(struct idle_state), 0644,
+			proc_doulongvec_minmax, false);
+	set_table_entry(&table[2], "nr_cap_states", &sge->nr_cap_states,
+			sizeof(int), 0644, proc_dointvec_minmax, false);
+	set_table_entry(&table[3], "cap_states", &sge->cap_states[0].cap,
+			sge->nr_cap_states*sizeof(struct capacity_state), 0644,
+			proc_doulongvec_minmax, false);
+
+	return table;
+}
+
+static struct ctl_table *
+sd_alloc_ctl_group_table(struct sched_group *sg)
+{
+	struct ctl_table *table = sd_alloc_ctl_entry(2);
+
+	if (table == NULL)
+		return NULL;
+
+	table->procname = kstrdup("energy", GFP_KERNEL);
+	table->mode = 0555;
+	table->child = sd_alloc_ctl_energy_table((struct sched_group_energy *)sg->sge);
+
+	return table;
+}
+
+static struct ctl_table *
 sd_alloc_ctl_domain_table(struct sched_domain *sd)
 {
-	struct ctl_table *table = sd_alloc_ctl_entry(14);
+	struct ctl_table *table;
+	unsigned int nr_entries = 14;
+
+	int i = 0;
+	struct sched_group *sg = sd->groups;
+
+	if (sg->sge) {
+		int nr_sgs = 0;
+
+		do {} while (nr_sgs++, sg = sg->next, sg != sd->groups);
+
+		nr_entries += nr_sgs;
+	}
+
+	table = sd_alloc_ctl_entry(nr_entries);
 
 	if (table == NULL)
 		return NULL;
@@ -5437,7 +6146,19 @@
 		sizeof(long), 0644, proc_doulongvec_minmax, false);
 	set_table_entry(&table[12], "name", sd->name,
 		CORENAME_MAX_SIZE, 0444, proc_dostring, false);
-	/* &table[13] is terminator */
+	sg = sd->groups;
+	if (sg->sge) {
+		char buf[32];
+		struct ctl_table *entry = &table[13];
+
+		do {
+			snprintf(buf, 32, "group%d", i);
+			entry->procname = kstrdup(buf, GFP_KERNEL);
+			entry->mode = 0555;
+			entry->child = sd_alloc_ctl_group_table(sg);
+		} while (entry++, i++, sg = sg->next, sg != sd->groups);
+	}
+	/* &table[nr_entries-1] is terminator */
 
 	return table;
 }
@@ -5553,6 +6274,9 @@
 	switch (action & ~CPU_TASKS_FROZEN) {
 
 	case CPU_UP_PREPARE:
+		raw_spin_lock_irqsave(&rq->lock, flags);
+		set_window_start(rq);
+		raw_spin_unlock_irqrestore(&rq->lock, flags);
 		rq->calc_load_update = calc_load_update;
 		break;
 
@@ -5572,16 +6296,18 @@
 		sched_ttwu_pending();
 		/* Update our root-domain */
 		raw_spin_lock_irqsave(&rq->lock, flags);
+
 		if (rq->rd) {
 			BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
 			set_rq_offline(rq);
 		}
-		migrate_tasks(rq);
+		migrate_tasks(rq, true);
 		BUG_ON(rq->nr_running != 1); /* the migration thread */
 		raw_spin_unlock_irqrestore(&rq->lock, flags);
 		break;
 
 	case CPU_DEAD:
+		clear_hmp_request(cpu);
 		calc_load_migrate(rq);
 		break;
 #endif
@@ -5699,9 +6425,6 @@
 
 	if (!(sd->flags & SD_LOAD_BALANCE)) {
 		printk("does not load-balance\n");
-		if (sd->parent)
-			printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
-					" has parent");
 		return -1;
 	}
 
@@ -5743,7 +6466,7 @@
 		printk(KERN_CONT " %*pbl",
 		       cpumask_pr_args(sched_group_cpus(group)));
 		if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
-			printk(KERN_CONT " (cpu_capacity = %d)",
+			printk(KERN_CONT " (cpu_capacity = %lu)",
 				group->sgc->capacity);
 		}
 
@@ -5794,8 +6517,12 @@
 
 static int sd_degenerate(struct sched_domain *sd)
 {
-	if (cpumask_weight(sched_domain_span(sd)) == 1)
+	if (cpumask_weight(sched_domain_span(sd)) == 1) {
+		if (sd->groups->sge)
+			sd->flags &= ~SD_LOAD_BALANCE;
+		else
 		return 1;
+	}
 
 	/* Following flags need at least 2 groups */
 	if (sd->flags & (SD_LOAD_BALANCE |
@@ -5803,8 +6530,10 @@
 			 SD_BALANCE_FORK |
 			 SD_BALANCE_EXEC |
 			 SD_SHARE_CPUCAPACITY |
+			 SD_ASYM_CPUCAPACITY |
 			 SD_SHARE_PKG_RESOURCES |
-			 SD_SHARE_POWERDOMAIN)) {
+			 SD_SHARE_POWERDOMAIN |
+			 SD_SHARE_CAP_STATES)) {
 		if (sd->groups != sd->groups->next)
 			return 0;
 	}
@@ -5833,10 +6562,16 @@
 				SD_BALANCE_NEWIDLE |
 				SD_BALANCE_FORK |
 				SD_BALANCE_EXEC |
+				SD_ASYM_CPUCAPACITY |
 				SD_SHARE_CPUCAPACITY |
 				SD_SHARE_PKG_RESOURCES |
 				SD_PREFER_SIBLING |
-				SD_SHARE_POWERDOMAIN);
+				SD_SHARE_POWERDOMAIN |
+				SD_SHARE_CAP_STATES);
+		if (parent->groups->sge) {
+			parent->flags &= ~SD_LOAD_BALANCE;
+			return 0;
+		}
 		if (nr_node_ids == 1)
 			pflags &= ~SD_SERIALIZE;
 	}
@@ -5921,6 +6656,11 @@
 
 	if (cpupri_init(&rd->cpupri) != 0)
 		goto free_rto_mask;
+
+	init_max_cpu_capacity(&rd->max_cpu_capacity);
+
+	rd->max_cap_orig_cpu = rd->min_cap_orig_cpu = -1;
+
 	return 0;
 
 free_rto_mask:
@@ -6026,11 +6766,13 @@
 DEFINE_PER_CPU(struct sched_domain *, sd_numa);
 DEFINE_PER_CPU(struct sched_domain *, sd_busy);
 DEFINE_PER_CPU(struct sched_domain *, sd_asym);
+DEFINE_PER_CPU(struct sched_domain *, sd_ea);
+DEFINE_PER_CPU(struct sched_domain *, sd_scs);
 
 static void update_top_cache_domain(int cpu)
 {
 	struct sched_domain *sd;
-	struct sched_domain *busy_sd = NULL;
+	struct sched_domain *busy_sd = NULL, *ea_sd = NULL;
 	int id = cpu;
 	int size = 1;
 
@@ -6051,6 +6793,17 @@
 
 	sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
 	rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
+
+	for_each_domain(cpu, sd) {
+		if (sd->groups->sge)
+			ea_sd = sd;
+		else
+			break;
+	}
+	rcu_assign_pointer(per_cpu(sd_ea, cpu), ea_sd);
+
+	sd = highest_flag_domain(cpu, SD_SHARE_CAP_STATES);
+	rcu_assign_pointer(per_cpu(sd_scs, cpu), sd);
 }
 
 /*
@@ -6062,6 +6815,7 @@
 {
 	struct rq *rq = cpu_rq(cpu);
 	struct sched_domain *tmp;
+	unsigned long next_balance = rq->next_balance;
 
 	/* Remove the sched domains which do not contribute to scheduling. */
 	for (tmp = sd; tmp; ) {
@@ -6093,6 +6847,17 @@
 			sd->child = NULL;
 	}
 
+	for (tmp = sd; tmp; ) {
+		unsigned long interval;
+
+		interval = msecs_to_jiffies(tmp->balance_interval);
+		if (time_after(next_balance, tmp->last_balance + interval))
+			next_balance = tmp->last_balance + interval;
+
+		tmp = tmp->parent;
+	}
+	rq->next_balance = next_balance;
+
 	sched_domain_debug(sd, cpu);
 
 	rq_attach_root(rq, rd);
@@ -6227,6 +6992,8 @@
 		 * die on a /0 trap.
 		 */
 		sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
+		sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
+		sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
 
 		/*
 		 * Make sure the first group of this domain contains the
@@ -6340,11 +7107,14 @@
 static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
 {
 	struct sched_group *sg = sd->groups;
+	cpumask_t avail_mask;
 
 	WARN_ON(!sg);
 
 	do {
-		sg->group_weight = cpumask_weight(sched_group_cpus(sg));
+		cpumask_andnot(&avail_mask, sched_group_cpus(sg),
+							cpu_isolated_mask);
+		sg->group_weight = cpumask_weight(&avail_mask);
 		sg = sg->next;
 	} while (sg != sd->groups);
 
@@ -6356,6 +7126,66 @@
 }
 
 /*
+ * Check that the per-cpu provided sd energy data is consistent for all cpus
+ * within the mask.
+ */
+static inline void check_sched_energy_data(int cpu, sched_domain_energy_f fn,
+					   const struct cpumask *cpumask)
+{
+	const struct sched_group_energy * const sge = fn(cpu);
+	struct cpumask mask;
+	int i;
+
+	if (cpumask_weight(cpumask) <= 1)
+		return;
+
+	cpumask_xor(&mask, cpumask, get_cpu_mask(cpu));
+
+	for_each_cpu(i, &mask) {
+		const struct sched_group_energy * const e = fn(i);
+		int y;
+
+		BUG_ON(e->nr_idle_states != sge->nr_idle_states);
+
+		for (y = 0; y < (e->nr_idle_states); y++) {
+			BUG_ON(e->idle_states[y].power !=
+					sge->idle_states[y].power);
+		}
+
+		BUG_ON(e->nr_cap_states != sge->nr_cap_states);
+
+		for (y = 0; y < (e->nr_cap_states); y++) {
+			BUG_ON(e->cap_states[y].cap != sge->cap_states[y].cap);
+			BUG_ON(e->cap_states[y].power !=
+					sge->cap_states[y].power);
+		}
+	}
+}
+
+static void init_sched_energy(int cpu, struct sched_domain *sd,
+			      sched_domain_energy_f fn)
+{
+	if (!(fn && fn(cpu)))
+		return;
+
+	if (cpu != group_balance_cpu(sd->groups))
+		return;
+
+	if (sd->child && !sd->child->groups->sge) {
+		pr_err("BUG: EAS setup broken for CPU%d\n", cpu);
+#ifdef CONFIG_SCHED_DEBUG
+		pr_err("     energy data on %s but not on %s domain\n",
+			sd->name, sd->child->name);
+#endif
+		return;
+	}
+
+	check_sched_energy_data(cpu, fn, sched_group_cpus(sd->groups));
+
+	sd->groups->sge = fn(cpu);
+}
+
+/*
  * Initializers for schedule domains
  * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
  */
@@ -6459,10 +7289,19 @@
 /*
  * SD_flags allowed in topology descriptions.
  *
+ * These flags are purely descriptive of the topology and do not prescribe
+ * behaviour. Behaviour is artificial and mapped in the below sd_init()
+ * function:
+ *
  * SD_SHARE_CPUCAPACITY      - describes SMT topologies
  * SD_SHARE_PKG_RESOURCES - describes shared caches
  * SD_NUMA                - describes NUMA topologies
  * SD_SHARE_POWERDOMAIN   - describes shared power domain
+ *   SD_ASYM_CPUCAPACITY    - describes mixed capacity topologies
+ *   SD_SHARE_CAP_STATES    - describes shared capacity states
+ *
+ * Odd one out, which beside describing the topology has a quirk also
+ * prescribes the desired behaviour that goes along with it:
  *
  * Odd one out:
  * SD_ASYM_PACKING        - describes SMT quirks
@@ -6472,10 +7311,13 @@
 	 SD_SHARE_PKG_RESOURCES |	\
 	 SD_NUMA |			\
 	 SD_ASYM_PACKING |		\
-	 SD_SHARE_POWERDOMAIN)
+	 SD_ASYM_CPUCAPACITY |		\
+	 SD_SHARE_POWERDOMAIN |		\
+	 SD_SHARE_CAP_STATES)
 
 static struct sched_domain *
-sd_init(struct sched_domain_topology_level *tl, int cpu)
+sd_init(struct sched_domain_topology_level *tl,
+	struct sched_domain *child, int cpu)
 {
 	struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);
 	int sd_weight, sd_flags = 0;
@@ -6527,6 +7369,7 @@
 		.smt_gain		= 0,
 		.max_newidle_lb_cost	= 0,
 		.next_decay_max_lb_cost	= jiffies,
+		.child			= child,
 #ifdef CONFIG_SCHED_DEBUG
 		.name			= tl->name,
 #endif
@@ -6536,6 +7379,13 @@
 	 * Convert topological properties into behaviour.
 	 */
 
+	if (sd->flags & SD_ASYM_CPUCAPACITY) {
+		struct sched_domain *t = sd;
+
+		for_each_lower_domain(t)
+			t->flags |= SD_BALANCE_WAKE;
+	}
+
 	if (sd->flags & SD_SHARE_CPUCAPACITY) {
 		sd->flags |= SD_PREFER_SIBLING;
 		sd->imbalance_pct = 110;
@@ -6982,16 +7832,13 @@
 		const struct cpumask *cpu_map, struct sched_domain_attr *attr,
 		struct sched_domain *child, int cpu)
 {
-	struct sched_domain *sd = sd_init(tl, cpu);
-	if (!sd)
-		return child;
+	struct sched_domain *sd = sd_init(tl, child, cpu);
 
 	cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
 	if (child) {
 		sd->level = child->level + 1;
 		sched_domain_level_max = max(sched_domain_level_max, sd->level);
 		child->parent = sd;
-		sd->child = child;
 
 		if (!cpumask_subset(sched_domain_span(child),
 				    sched_domain_span(sd))) {
@@ -7000,6 +7847,9 @@
 			pr_err("     the %s domain not a subset of the %s domain\n",
 					child->name, sd->name);
 #endif
+#ifdef CONFIG_PANIC_ON_SCHED_BUG
+			BUG();
+#endif
 			/* Fixup, ensure @sd has at least @child cpus. */
 			cpumask_or(sched_domain_span(sd),
 				   sched_domain_span(sd),
@@ -7039,8 +7889,6 @@
 				*per_cpu_ptr(d.sd, i) = sd;
 			if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
 				sd->flags |= SD_OVERLAP;
-			if (cpumask_equal(cpu_map, sched_domain_span(sd)))
-				break;
 		}
 	}
 
@@ -7060,10 +7908,14 @@
 
 	/* Calculate CPU capacity for physical packages and nodes */
 	for (i = nr_cpumask_bits-1; i >= 0; i--) {
+		struct sched_domain_topology_level *tl = sched_domain_topology;
+
 		if (!cpumask_test_cpu(i, cpu_map))
 			continue;
 
-		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
+		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent, tl++) {
+			if (energy_aware())
+				init_sched_energy(i, sd, tl->energy);
 			claim_allocations(i, sd);
 			init_sched_groups_capacity(i, sd);
 		}
@@ -7072,7 +7924,19 @@
 	/* Attach the domains */
 	rcu_read_lock();
 	for_each_cpu(i, cpu_map) {
+		int max_cpu = READ_ONCE(d.rd->max_cap_orig_cpu);
+		int min_cpu = READ_ONCE(d.rd->min_cap_orig_cpu);
+
+		if ((max_cpu < 0) || (cpu_rq(i)->cpu_capacity_orig >
+		    cpu_rq(max_cpu)->cpu_capacity_orig))
+			WRITE_ONCE(d.rd->max_cap_orig_cpu, i);
+
+		if ((min_cpu < 0) || (cpu_rq(i)->cpu_capacity_orig <
+		    cpu_rq(min_cpu)->cpu_capacity_orig))
+			WRITE_ONCE(d.rd->min_cap_orig_cpu, i);
+
 		sd = *per_cpu_ptr(d.sd, i);
+
 		cpu_attach_domain(sd, d.rd, i);
 	}
 	rcu_read_unlock();
@@ -7373,6 +8237,8 @@
 	hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
 	hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
 
+	update_cluster_topology();
+
 	init_hrtick();
 
 	/* Move init over to a non-isolated CPU */
@@ -7391,6 +8257,7 @@
 }
 #endif /* CONFIG_SMP */
 
+
 int in_sched_functions(unsigned long addr)
 {
 	return in_lock_functions(addr) ||
@@ -7414,6 +8281,15 @@
 	int i, j;
 	unsigned long alloc_size = 0, ptr;
 
+#ifdef CONFIG_SCHED_HMP
+	pr_info("HMP scheduling enabled.\n");
+#endif
+
+	BUG_ON(num_possible_cpus() > BITS_PER_LONG);
+
+	sched_boost_parse_dt();
+	init_clusters();
+
 #ifdef CONFIG_FAIR_GROUP_SCHED
 	alloc_size += 2 * nr_cpu_ids * sizeof(void **);
 #endif
@@ -7483,6 +8359,7 @@
 #ifdef CONFIG_FAIR_GROUP_SCHED
 		root_task_group.shares = ROOT_TASK_GROUP_LOAD;
 		INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
+		rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
 		/*
 		 * How much cpu bandwidth does root_task_group get?
 		 *
@@ -7524,10 +8401,57 @@
 		rq->active_balance = 0;
 		rq->next_balance = jiffies;
 		rq->push_cpu = 0;
+		rq->push_task = NULL;
 		rq->cpu = i;
 		rq->online = 0;
 		rq->idle_stamp = 0;
 		rq->avg_idle = 2*sysctl_sched_migration_cost;
+#ifdef CONFIG_SCHED_HMP
+		cpumask_set_cpu(i, &rq->freq_domain_cpumask);
+		rq->hmp_stats.cumulative_runnable_avg = 0;
+		rq->window_start = 0;
+		rq->hmp_stats.nr_big_tasks = 0;
+		rq->hmp_flags = 0;
+		rq->cur_irqload = 0;
+		rq->avg_irqload = 0;
+		rq->irqload_ts = 0;
+		rq->static_cpu_pwr_cost = 0;
+		rq->cc.cycles = 1;
+		rq->cc.time = 1;
+		rq->cstate = 0;
+		rq->wakeup_latency = 0;
+		rq->wakeup_energy = 0;
+
+		/*
+		 * All cpus part of same cluster by default. This avoids the
+		 * need to check for rq->cluster being non-NULL in hot-paths
+		 * like select_best_cpu()
+		 */
+		rq->cluster = &init_cluster;
+		rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
+		rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
+		memset(&rq->grp_time, 0, sizeof(struct group_cpu_time));
+		rq->old_busy_time = 0;
+		rq->old_estimated_time = 0;
+		rq->old_busy_time_group = 0;
+		rq->hmp_stats.pred_demands_sum = 0;
+		rq->curr_table = 0;
+		rq->prev_top = 0;
+		rq->curr_top = 0;
+
+		for (j = 0; j < NUM_TRACKED_WINDOWS; j++) {
+			memset(&rq->load_subs[j], 0,
+					sizeof(struct load_subtractions));
+
+			rq->top_tasks[j] = kcalloc(NUM_LOAD_INDICES,
+						sizeof(u8), GFP_NOWAIT);
+
+			/* No other choice */
+			BUG_ON(!rq->top_tasks[j]);
+
+			clear_top_tasks_bitmap(rq->top_tasks_bitmap[j]);
+		}
+#endif
 		rq->max_idle_balance_cost = sysctl_sched_migration_cost;
 
 		INIT_LIST_HEAD(&rq->cfs_tasks);
@@ -7544,6 +8468,11 @@
 		atomic_set(&rq->nr_iowait, 0);
 	}
 
+	i = alloc_related_thread_groups();
+	BUG_ON(i);
+
+	set_hmp_defaults();
+
 	set_load_weight(&init_task);
 
 #ifdef CONFIG_PREEMPT_NOTIFIERS
@@ -7567,7 +8496,7 @@
 	 * but because we are the idle thread, we just pick up running again
 	 * when this runqueue becomes "idle".
 	 */
-	init_idle(current, smp_processor_id());
+	init_idle(current, smp_processor_id(), false);
 
 	calc_load_update = jiffies + LOAD_FREQ;
 
@@ -7592,6 +8521,14 @@
 	return (nested == preempt_offset);
 }
 
+static int __might_sleep_init_called;
+int __init __might_sleep_init(void)
+{
+	__might_sleep_init_called = 1;
+	return 0;
+}
+early_initcall(__might_sleep_init);
+
 void __might_sleep(const char *file, int line, int preempt_offset)
 {
 	/*
@@ -7613,16 +8550,22 @@
 void ___might_sleep(const char *file, int line, int preempt_offset)
 {
 	static unsigned long prev_jiffy;	/* ratelimiting */
+	unsigned long preempt_disable_ip;
 
 	rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
 	if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
-	     !is_idle_task(current)) ||
-	    system_state != SYSTEM_RUNNING || oops_in_progress)
+	     !is_idle_task(current)) || oops_in_progress)
+		return;
+	if (system_state != SYSTEM_RUNNING &&
+	    (!__might_sleep_init_called || system_state != SYSTEM_BOOTING))
 		return;
 	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
 		return;
 	prev_jiffy = jiffies;
 
+	/* Save this before calling printk(), since that will clobber it */
+	preempt_disable_ip = get_preempt_disable_ip(current);
+
 	printk(KERN_ERR
 		"BUG: sleeping function called from invalid context at %s:%d\n",
 			file, line);
@@ -7637,12 +8580,14 @@
 	debug_show_held_locks(current);
 	if (irqs_disabled())
 		print_irqtrace_events(current);
-#ifdef CONFIG_DEBUG_PREEMPT
-	if (!preempt_count_equals(preempt_offset)) {
+	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
+	    && !preempt_count_equals(preempt_offset)) {
 		pr_err("Preemption disabled at:");
-		print_ip_sym(current->preempt_disable_ip);
+		print_ip_sym(preempt_disable_ip);
 		pr_cont("\n");
 	}
+#ifdef CONFIG_PANIC_ON_SCHED_BUG
+	BUG();
 #endif
 	dump_stack();
 }
@@ -7803,11 +8748,9 @@
 void sched_offline_group(struct task_group *tg)
 {
 	unsigned long flags;
-	int i;
 
 	/* end participation in shares distribution */
-	for_each_possible_cpu(i)
-		unregister_fair_sched_group(tg, i);
+	unregister_fair_sched_group(tg);
 
 	spin_lock_irqsave(&task_group_lock, flags);
 	list_del_rcu(&tg->list);
@@ -7815,27 +8758,9 @@
 	spin_unlock_irqrestore(&task_group_lock, flags);
 }
 
-/* change task's runqueue when it moves between groups.
- *	The caller of this function should have put the task in its new group
- *	by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
- *	reflect its new group.
- */
-void sched_move_task(struct task_struct *tsk)
+static void sched_change_group(struct task_struct *tsk, int type)
 {
 	struct task_group *tg;
-	int queued, running;
-	unsigned long flags;
-	struct rq *rq;
-
-	rq = task_rq_lock(tsk, &flags);
-
-	running = task_current(rq, tsk);
-	queued = task_on_rq_queued(tsk);
-
-	if (queued)
-		dequeue_task(rq, tsk, DEQUEUE_SAVE);
-	if (unlikely(running))
-		put_prev_task(rq, tsk);
 
 	/*
 	 * All callers are synchronized by task_rq_lock(); we do not use RCU
@@ -7848,16 +8773,42 @@
 	tsk->sched_task_group = tg;
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-	if (tsk->sched_class->task_move_group)
-		tsk->sched_class->task_move_group(tsk);
+	if (tsk->sched_class->task_change_group)
+		tsk->sched_class->task_change_group(tsk, type);
 	else
 #endif
 		set_task_rq(tsk, task_cpu(tsk));
+}
+
+/*
+ * Change task's runqueue when it moves between groups.
+ *
+ * The caller of this function should have put the task in its new group by
+ * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
+ * its new group.
+ */
+void sched_move_task(struct task_struct *tsk)
+{
+	int queued, running;
+	unsigned long flags;
+	struct rq *rq;
+
+	rq = task_rq_lock(tsk, &flags);
+
+	running = task_current(rq, tsk);
+	queued = task_on_rq_queued(tsk);
+
+	if (queued)
+		dequeue_task(rq, tsk, DEQUEUE_SAVE | DEQUEUE_MOVE);
+	if (unlikely(running))
+		put_prev_task(rq, tsk);
+
+	sched_change_group(tsk, TASK_MOVE_GROUP);
 
 	if (unlikely(running))
 		tsk->sched_class->set_curr_task(rq);
 	if (queued)
-		enqueue_task(rq, tsk, ENQUEUE_RESTORE);
+		enqueue_task(rq, tsk, ENQUEUE_RESTORE | ENQUEUE_MOVE);
 
 	task_rq_unlock(rq, tsk, &flags);
 }
@@ -8238,7 +9189,7 @@
 
 #ifdef CONFIG_CGROUP_SCHED
 
-static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
+inline struct task_group *css_tg(struct cgroup_subsys_state *css)
 {
 	return css ? container_of(css, struct task_group, css) : NULL;
 }
@@ -8289,15 +9240,28 @@
 	sched_free_group(tg);
 }
 
+/*
+ * This is called before wake_up_new_task(), therefore we really only
+ * have to set its group bits, all the other stuff does not apply.
+ */
 static void cpu_cgroup_fork(struct task_struct *task, void *private)
 {
-	sched_move_task(task);
+	unsigned long flags;
+	struct rq *rq;
+
+	rq = task_rq_lock(task, &flags);
+
+	update_rq_clock(rq);
+	sched_change_group(task, TASK_SET_GROUP);
+
+	task_rq_unlock(rq, task, &flags);
 }
 
 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
 {
 	struct task_struct *task;
 	struct cgroup_subsys_state *css;
+	int ret = 0;
 
 	cgroup_taskset_for_each(task, css, tset) {
 #ifdef CONFIG_RT_GROUP_SCHED
@@ -8308,8 +9272,24 @@
 		if (task->sched_class != &fair_sched_class)
 			return -EINVAL;
 #endif
+		/*
+		 * Serialize against wake_up_new_task() such that if its
+		 * running, we're sure to observe its full state.
+		 */
+		raw_spin_lock_irq(&task->pi_lock);
+		/*
+		 * Avoid calling sched_move_task() before wake_up_new_task()
+		 * has happened. This would lead to problems with PELT, due to
+		 * move wanting to detach+attach while we're not attached yet.
+		 */
+		if (task->state == TASK_NEW)
+			ret = -EINVAL;
+		raw_spin_unlock_irq(&task->pi_lock);
+
+		if (ret)
+			break;
 	}
-	return 0;
+	return ret;
 }
 
 static void cpu_cgroup_attach(struct cgroup_taskset *tset)
@@ -8606,6 +9586,13 @@
 #endif /* CONFIG_RT_GROUP_SCHED */
 
 static struct cftype cpu_files[] = {
+#ifdef CONFIG_SCHED_HMP
+	{
+		.name = "upmigrate_discourage",
+		.read_u64 = cpu_upmigrate_discourage_read_u64,
+		.write_u64 = cpu_upmigrate_discourage_write_u64,
+	},
+#endif
 #ifdef CONFIG_FAIR_GROUP_SCHED
 	{
 		.name = "shares",
diff -ruw linux-4.4.115/kernel/sched/cpupri.c linux-4.4.115-fbx/kernel/sched/cpupri.c
--- linux-4.4.115/kernel/sched/cpupri.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/sched/cpupri.c	2019-01-22 16:16:28.695293423 +0100
@@ -27,6 +27,8 @@
  *  of the License.
  */
 
+#include "sched.h"
+
 #include <linux/gfp.h>
 #include <linux/sched.h>
 #include <linux/sched/rt.h>
@@ -51,6 +53,27 @@
 }
 
 /**
+ * drop_nopreempt_cpus - remove a cpu from the mask if it is likely
+ *			 non-preemptible
+ * @lowest_mask: mask with selected CPUs (non-NULL)
+ */
+static void
+drop_nopreempt_cpus(struct cpumask *lowest_mask)
+{
+	unsigned int cpu = cpumask_first(lowest_mask);
+
+	while (cpu < nr_cpu_ids) {
+		/* unlocked access */
+		struct task_struct *task = READ_ONCE(cpu_rq(cpu)->curr);
+
+		if (task_may_not_preempt(task, cpu))
+			cpumask_clear_cpu(cpu, lowest_mask);
+
+		cpu = cpumask_next(cpu, lowest_mask);
+	}
+}
+
+/**
  * cpupri_find - find the best (lowest-pri) CPU in the system
  * @cp: The cpupri context
  * @p: The task
@@ -70,9 +93,11 @@
 {
 	int idx = 0;
 	int task_pri = convert_prio(p->prio);
+	bool drop_nopreempts = task_pri <= MAX_RT_PRIO;
 
 	BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
 
+retry:
 	for (idx = 0; idx < task_pri; idx++) {
 		struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
 		int skip = 0;
@@ -108,7 +133,8 @@
 
 		if (lowest_mask) {
 			cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
-
+			if (drop_nopreempts)
+				drop_nopreempt_cpus(lowest_mask);
 			/*
 			 * We have to ensure that we have at least one bit
 			 * still set in the array, since the map could have
@@ -123,7 +149,14 @@
 
 		return 1;
 	}
-
+	/*
+	 * If we can't find any non-preemptible cpu's, retry so we can
+	 * find the lowest priority target and avoid priority inversion.
+	 */
+	if (drop_nopreempts) {
+		drop_nopreempts = false;
+		goto retry;
+	}
 	return 0;
 }
 
@@ -246,3 +279,14 @@
 	for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
 		free_cpumask_var(cp->pri_to_cpu[i].mask);
 }
+
+/*
+ * cpupri_check_rt - check if CPU has a RT task
+ * should be called from rcu-sched read section.
+ */
+bool cpupri_check_rt(void)
+{
+	int cpu = raw_smp_processor_id();
+
+	return cpu_rq(cpu)->rd->cpupri.cpu_to_pri[cpu] > CPUPRI_NORMAL;
+}
diff -ruw linux-4.4.115/kernel/sched/cputime.c linux-4.4.115-fbx/kernel/sched/cputime.c
--- linux-4.4.115/kernel/sched/cputime.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/sched/cputime.c	2019-10-29 09:26:25.617222496 +0100
@@ -49,6 +49,8 @@
 	unsigned long flags;
 	s64 delta;
 	int cpu;
+	u64 wallclock;
+	bool account = true;
 
 	if (!sched_clock_irqtime)
 		return;
@@ -56,7 +58,8 @@
 	local_irq_save(flags);
 
 	cpu = smp_processor_id();
-	delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
+	wallclock = sched_clock_cpu(cpu);
+	delta = wallclock - __this_cpu_read(irq_start_time);
 	__this_cpu_add(irq_start_time, delta);
 
 	irq_time_write_begin();
@@ -70,8 +73,16 @@
 		__this_cpu_add(cpu_hardirq_time, delta);
 	else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
 		__this_cpu_add(cpu_softirq_time, delta);
+	else
+		account = false;
 
 	irq_time_write_end();
+
+	if (account)
+		sched_account_irqtime(cpu, curr, delta, wallclock);
+	else if (curr != this_cpu_ksoftirqd())
+		sched_account_irqstart(cpu, curr, wallclock);
+
 	local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(irqtime_account_irq);
diff -ruw linux-4.4.115/kernel/sched/deadline.c linux-4.4.115-fbx/kernel/sched/deadline.c
--- linux-4.4.115/kernel/sched/deadline.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/sched/deadline.c	2019-01-22 16:16:28.695293423 +0100
@@ -18,6 +18,8 @@
 
 #include <linux/slab.h>
 
+#include "walt.h"
+
 struct dl_bandwidth def_dl_bandwidth;
 
 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
@@ -43,6 +45,24 @@
 	return !RB_EMPTY_NODE(&dl_se->rb_node);
 }
 
+static void add_average_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
+{
+	u64 se_bw = dl_se->dl_bw;
+
+	dl_rq->avg_bw += se_bw;
+}
+
+static void clear_average_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
+{
+	u64 se_bw = dl_se->dl_bw;
+
+	dl_rq->avg_bw -= se_bw;
+	if (dl_rq->avg_bw < 0) {
+		WARN_ON(1);
+		dl_rq->avg_bw = 0;
+	}
+}
+
 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
 {
 	struct sched_dl_entity *dl_se = &p->dl;
@@ -271,9 +291,11 @@
 	/*
 	 * By now the task is replenished and enqueued; migrate it.
 	 */
+	p->on_rq = TASK_ON_RQ_MIGRATING;
 	deactivate_task(rq, p, 0);
 	set_task_cpu(p, later_rq->cpu);
 	activate_task(later_rq, p, 0);
+	p->on_rq = TASK_ON_RQ_QUEUED;
 
 	if (!fallback)
 		resched_curr(later_rq);
@@ -565,6 +587,9 @@
 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 	struct rq *rq = rq_of_dl_rq(dl_rq);
 
+	if (dl_se->dl_new)
+		add_average_bw(dl_se, dl_rq);
+
 	/*
 	 * The arrival of a new instance needs special treatment, i.e.,
 	 * the actual scheduling parameters have to be "renewed".
@@ -849,6 +874,9 @@
 	if (unlikely((s64)delta_exec <= 0))
 		return;
 
+	/* kick cpufreq (see the comment in kernel/sched/sched.h). */
+	cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_DL);
+
 	schedstat_set(curr->se.statistics.exec_max,
 		      max(curr->se.statistics.exec_max, delta_exec));
 
@@ -858,8 +886,6 @@
 	curr->se.exec_start = rq_clock_task(rq);
 	cpuacct_charge(curr, delta_exec);
 
-	sched_rt_avg_update(rq, delta_exec);
-
 	dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec;
 	if (dl_runtime_exceeded(dl_se)) {
 		dl_se->dl_throttled = 1;
@@ -968,6 +994,41 @@
 
 #endif /* CONFIG_SMP */
 
+#ifdef CONFIG_SCHED_HMP
+
+static void
+inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
+{
+	inc_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+static void
+dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
+{
+	dec_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+static void
+fixup_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p,
+			 u32 new_task_load, u32 new_pred_demand)
+{
+	s64 task_load_delta = (s64)new_task_load - task_load(p);
+	s64 pred_demand_delta = PRED_DEMAND_DELTA;
+
+	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
+				      pred_demand_delta);
+}
+
+#else	/* CONFIG_SCHED_HMP */
+
+static inline void
+inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }
+
+static inline void
+dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }
+
+#endif	/* CONFIG_SCHED_HMP */
+
 static inline
 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 {
@@ -977,6 +1038,7 @@
 	WARN_ON(!dl_prio(prio));
 	dl_rq->dl_nr_running++;
 	add_nr_running(rq_of_dl_rq(dl_rq), 1);
+	inc_hmp_sched_stats_dl(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
 
 	inc_dl_deadline(dl_rq, deadline);
 	inc_dl_migration(dl_se, dl_rq);
@@ -991,6 +1053,7 @@
 	WARN_ON(!dl_rq->dl_nr_running);
 	dl_rq->dl_nr_running--;
 	sub_nr_running(rq_of_dl_rq(dl_rq), 1);
+	dec_hmp_sched_stats_dl(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
 
 	dec_dl_deadline(dl_rq, dl_se->deadline);
 	dec_dl_migration(dl_se, dl_rq);
@@ -1170,7 +1233,8 @@
 static int find_later_rq(struct task_struct *task);
 
 static int
-select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
+select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags,
+		  int sibling_count_hint)
 {
 	struct task_struct *curr;
 	struct rq *rq;
@@ -1367,6 +1431,8 @@
 static void task_dead_dl(struct task_struct *p)
 {
 	struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
+	struct dl_rq *dl_rq = dl_rq_of_se(&p->dl);
+	struct rq *rq = rq_of_dl_rq(dl_rq);
 
 	/*
 	 * Since we are TASK_DEAD we won't slip out of the domain!
@@ -1375,6 +1441,8 @@
 	/* XXX we should retain the bw until 0-lag */
 	dl_b->total_bw -= p->dl.dl_bw;
 	raw_spin_unlock_irq(&dl_b->lock);
+
+	clear_average_bw(&p->dl, &rq->dl);
 }
 
 static void set_curr_task_dl(struct rq *rq)
@@ -1681,9 +1749,15 @@
 		goto retry;
 	}
 
+	next_task->on_rq = TASK_ON_RQ_MIGRATING;
 	deactivate_task(rq, next_task, 0);
+	clear_average_bw(&next_task->dl, &rq->dl);
+	next_task->on_rq = TASK_ON_RQ_MIGRATING;
 	set_task_cpu(next_task, later_rq->cpu);
+	next_task->on_rq = TASK_ON_RQ_QUEUED;
+	add_average_bw(&next_task->dl, &later_rq->dl);
 	activate_task(later_rq, next_task, 0);
+	next_task->on_rq = TASK_ON_RQ_QUEUED;
 	ret = 1;
 
 	resched_curr(later_rq);
@@ -1769,9 +1843,15 @@
 
 			resched = true;
 
+			p->on_rq = TASK_ON_RQ_MIGRATING;
 			deactivate_task(src_rq, p, 0);
+			clear_average_bw(&p->dl, &src_rq->dl);
+			p->on_rq = TASK_ON_RQ_MIGRATING;
 			set_task_cpu(p, this_cpu);
+			p->on_rq = TASK_ON_RQ_QUEUED;
+			add_average_bw(&p->dl, &this_rq->dl);
 			activate_task(this_rq, p, 0);
+			p->on_rq = TASK_ON_RQ_QUEUED;
 			dmin = p->dl.deadline;
 
 			/* Is there any other task even earlier? */
@@ -1876,6 +1956,8 @@
 	if (!start_dl_timer(p))
 		__dl_clear_params(p);
 
+	clear_average_bw(&p->dl, &rq->dl);
+
 	/*
 	 * Since this might be the only -deadline task on the rq,
 	 * this is the right place to try to pull some other one
@@ -1971,6 +2053,11 @@
 	.switched_to		= switched_to_dl,
 
 	.update_curr		= update_curr_dl,
+#ifdef CONFIG_SCHED_HMP
+	.inc_hmp_sched_stats	= inc_hmp_sched_stats_dl,
+	.dec_hmp_sched_stats	= dec_hmp_sched_stats_dl,
+	.fixup_hmp_sched_stats	= fixup_hmp_sched_stats_dl,
+#endif
 };
 
 #ifdef CONFIG_SCHED_DEBUG
diff -ruw linux-4.4.115/kernel/sched/debug.c linux-4.4.115-fbx/kernel/sched/debug.c
--- linux-4.4.115/kernel/sched/debug.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/sched/debug.c	2019-01-22 16:16:28.695293423 +0100
@@ -227,6 +227,14 @@
 			cfs_rq->throttled);
 	SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
 			cfs_rq->throttle_count);
+	SEQ_printf(m, "  .%-30s: %d\n", "runtime_enabled",
+			cfs_rq->runtime_enabled);
+#ifdef CONFIG_SCHED_HMP
+	SEQ_printf(m, "  .%-30s: %d\n", "nr_big_tasks",
+			cfs_rq->hmp_stats.nr_big_tasks);
+	SEQ_printf(m, "  .%-30s: %llu\n", "cumulative_runnable_avg",
+			cfs_rq->hmp_stats.cumulative_runnable_avg);
+#endif
 #endif
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
@@ -306,6 +314,23 @@
 	P(cpu_load[2]);
 	P(cpu_load[3]);
 	P(cpu_load[4]);
+#ifdef CONFIG_SMP
+	P(cpu_capacity);
+#endif
+#ifdef CONFIG_SCHED_HMP
+	P(static_cpu_pwr_cost);
+	P(cluster->static_cluster_pwr_cost);
+	P(cluster->load_scale_factor);
+	P(cluster->capacity);
+	P(cluster->max_possible_capacity);
+	P(cluster->efficiency);
+	P(cluster->cur_freq);
+	P(cluster->max_freq);
+	P(cluster->exec_scale_factor);
+	P(hmp_stats.nr_big_tasks);
+	SEQ_printf(m, "  .%-30s: %llu\n", "hmp_stats.cumulative_runnable_avg",
+			rq->hmp_stats.cumulative_runnable_avg);
+#endif
 #undef P
 #undef PN
 
@@ -386,6 +411,15 @@
 	PN(sysctl_sched_wakeup_granularity);
 	P(sysctl_sched_child_runs_first);
 	P(sysctl_sched_features);
+#ifdef CONFIG_SCHED_HMP
+	P(sched_upmigrate);
+	P(sched_downmigrate);
+	P(sched_init_task_load_windows);
+	P(min_capacity);
+	P(max_capacity);
+	P(sched_ravg_window);
+	P(sched_load_granule);
+#endif
 #undef PN
 #undef P
 
@@ -408,6 +442,7 @@
 	return 0;
 }
 
+#ifdef CONFIG_SYSRQ_SCHED_DEBUG
 void sysrq_sched_debug_show(void)
 {
 	int cpu;
@@ -417,6 +452,7 @@
 		print_cpu(NULL, cpu);
 
 }
+#endif
 
 /*
  * This itererator needs some explanation.
@@ -547,6 +583,9 @@
 void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
 {
 	unsigned long nr_switches;
+	unsigned int load_avg;
+
+	load_avg = pct_task_load(p);
 
 	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p),
 						get_nr_threads(p));
@@ -597,6 +636,39 @@
 	P(se.statistics.nr_wakeups_affine_attempts);
 	P(se.statistics.nr_wakeups_passive);
 	P(se.statistics.nr_wakeups_idle);
+	/* eas */
+	/* select_idle_sibling() */
+	P(se.statistics.nr_wakeups_sis_attempts);
+	P(se.statistics.nr_wakeups_sis_idle);
+	P(se.statistics.nr_wakeups_sis_cache_affine);
+	P(se.statistics.nr_wakeups_sis_suff_cap);
+	P(se.statistics.nr_wakeups_sis_idle_cpu);
+	P(se.statistics.nr_wakeups_sis_count);
+	/* select_energy_cpu_brute() */
+	P(se.statistics.nr_wakeups_secb_attempts);
+	P(se.statistics.nr_wakeups_secb_sync);
+	P(se.statistics.nr_wakeups_secb_idle_bt);
+	P(se.statistics.nr_wakeups_secb_insuff_cap);
+	P(se.statistics.nr_wakeups_secb_no_nrg_sav);
+	P(se.statistics.nr_wakeups_secb_nrg_sav);
+	P(se.statistics.nr_wakeups_secb_count);
+	/* find_best_target() */
+	P(se.statistics.nr_wakeups_fbt_attempts);
+	P(se.statistics.nr_wakeups_fbt_no_cpu);
+	P(se.statistics.nr_wakeups_fbt_no_sd);
+	P(se.statistics.nr_wakeups_fbt_pref_idle);
+	P(se.statistics.nr_wakeups_fbt_count);
+	/* cas */
+	/* select_task_rq_fair() */
+	P(se.statistics.nr_wakeups_cas_attempts);
+	P(se.statistics.nr_wakeups_cas_count);
+
+#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
+	__P(load_avg);
+#ifdef CONFIG_SCHED_HMP
+	P(ravg.demand);
+#endif
+#endif
 
 	{
 		u64 avg_atom, avg_per_cpu;
diff -ruw linux-4.4.115/kernel/sched/fair.c linux-4.4.115-fbx/kernel/sched/fair.c
--- linux-4.4.115/kernel/sched/fair.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/sched/fair.c	2019-10-29 09:26:25.625222574 +0100
@@ -30,10 +30,12 @@
 #include <linux/mempolicy.h>
 #include <linux/migrate.h>
 #include <linux/task_work.h>
-
-#include <trace/events/sched.h>
+#include <linux/module.h>
 
 #include "sched.h"
+#include <trace/events/sched.h>
+#include "tune.h"
+#include "walt.h"
 
 /*
  * Targeted preemption latency for CPU-bound tasks:
@@ -50,6 +52,9 @@
 unsigned int sysctl_sched_latency = 6000000ULL;
 unsigned int normalized_sysctl_sched_latency = 6000000ULL;
 
+unsigned int sysctl_sched_sync_hint_enable = 1;
+unsigned int sysctl_sched_cstate_aware = 1;
+
 /*
  * The initial- and re-scaling of tunables is configurable
  * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
@@ -114,6 +119,12 @@
 unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
 #endif
 
+/*
+ * The margin used when comparing utilization with CPU capacity:
+ * util * margin < capacity * 1024
+ */
+unsigned int capacity_margin = 1280; /* ~20% */
+
 static inline void update_load_add(struct load_weight *lw, unsigned long inc)
 {
 	lw->weight += inc;
@@ -236,6 +247,9 @@
 	return mul_u64_u32_shr(delta_exec, fact, shift);
 }
 
+#ifdef CONFIG_SMP
+static int active_load_balance_cpu_stop(void *data);
+#endif
 
 const struct sched_class fair_sched_class;
 
@@ -286,19 +300,59 @@
 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
 {
 	if (!cfs_rq->on_list) {
+		struct rq *rq = rq_of(cfs_rq);
+		int cpu = cpu_of(rq);
 		/*
 		 * Ensure we either appear before our parent (if already
 		 * enqueued) or force our parent to appear after us when it is
 		 * enqueued.  The fact that we always enqueue bottom-up
-		 * reduces this to two cases.
+		 * reduces this to two cases and a special case for the root
+		 * cfs_rq. Furthermore, it also means that we will always reset
+		 * tmp_alone_branch either when the branch is connected
+		 * to a tree or when we reach the beg of the tree
 		 */
 		if (cfs_rq->tg->parent &&
-		    cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
-			list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
-				&rq_of(cfs_rq)->leaf_cfs_rq_list);
-		} else {
+		    cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
+			/*
+			 * If parent is already on the list, we add the child
+			 * just before. Thanks to circular linked property of
+			 * the list, this means to put the child at the tail
+			 * of the list that starts by parent.
+			 */
 			list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
-				&rq_of(cfs_rq)->leaf_cfs_rq_list);
+				&(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
+			/*
+			 * The branch is now connected to its tree so we can
+			 * reset tmp_alone_branch to the beginning of the
+			 * list.
+			 */
+			rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
+		} else if (!cfs_rq->tg->parent) {
+			/*
+			 * cfs rq without parent should be put
+			 * at the tail of the list.
+			 */
+			list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
+				&rq->leaf_cfs_rq_list);
+			/*
+			 * We have reach the beg of a tree so we can reset
+			 * tmp_alone_branch to the beginning of the list.
+			 */
+			rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
+		} else {
+			/*
+			 * The parent has not already been added so we want to
+			 * make sure that it will be put after us.
+			 * tmp_alone_branch points to the beg of the branch
+			 * where we will add parent.
+			 */
+			list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
+				rq->tmp_alone_branch);
+			/*
+			 * update tmp_alone_branch to points to the new beg
+			 * of the branch
+			 */
+			rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
 		}
 
 		cfs_rq->on_list = 1;
@@ -656,7 +710,7 @@
 }
 
 #ifdef CONFIG_SMP
-static int select_idle_sibling(struct task_struct *p, int cpu);
+static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
 static unsigned long task_h_load(struct task_struct *p);
 
 /*
@@ -680,18 +734,115 @@
 	 * will definitely be update (after enqueue).
 	 */
 	sa->period_contrib = 1023;
+	/*
+	 * Tasks are intialized with full load to be seen as heavy tasks until
+	 * they get a chance to stabilize to their real load level.
+	 * Group entities are intialized with zero load to reflect the fact that
+	 * nothing has been attached to the task group yet.
+	 */
+	if (entity_is_task(se))
 	sa->load_avg = scale_load_down(se->load.weight);
 	sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
-	sa->util_avg = scale_load_down(SCHED_LOAD_SCALE);
-	sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
+	/*
+	 * In previous Android versions, we used to have:
+	 * 	sa->util_avg = scale_load_down(SCHED_LOAD_SCALE);
+	 * 	sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
+	 * However, that functionality has been moved to enqueue.
+	 * It is unclear if we should restore this in enqueue.
+	 */
+	/*
+	 * At this point, util_avg won't be used in select_task_rq_fair anyway
+	 */
+	sa->util_avg = 0;
+	sa->util_sum = 0;
 	/* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
 }
 
-#else
+static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
+static int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq);
+static void attach_entity_cfs_rq(struct sched_entity *se);
+static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se);
+
+/*
+ * With new tasks being created, their initial util_avgs are extrapolated
+ * based on the cfs_rq's current util_avg:
+ *
+ *   util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
+ *
+ * However, in many cases, the above util_avg does not give a desired
+ * value. Moreover, the sum of the util_avgs may be divergent, such
+ * as when the series is a harmonic series.
+ *
+ * To solve this problem, we also cap the util_avg of successive tasks to
+ * only 1/2 of the left utilization budget:
+ *
+ *   util_avg_cap = (1024 - cfs_rq->avg.util_avg) / 2^n
+ *
+ * where n denotes the nth task.
+ *
+ * For example, a simplest series from the beginning would be like:
+ *
+ *  task  util_avg: 512, 256, 128,  64,  32,   16,    8, ...
+ * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ...
+ *
+ * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap)
+ * if util_avg > util_avg_cap.
+ */
+void post_init_entity_util_avg(struct sched_entity *se)
+{
+	struct cfs_rq *cfs_rq = cfs_rq_of(se);
+	struct sched_avg *sa = &se->avg;
+	long cap = (long)(SCHED_CAPACITY_SCALE - cfs_rq->avg.util_avg) / 2;
+
+	if (cap > 0) {
+		if (cfs_rq->avg.util_avg != 0) {
+			sa->util_avg  = cfs_rq->avg.util_avg * se->load.weight;
+			sa->util_avg /= (cfs_rq->avg.load_avg + 1);
+
+			if (sa->util_avg > cap)
+				sa->util_avg = cap;
+		} else {
+			sa->util_avg = cap;
+		}
+		/*
+		 * If we wish to restore tuning via setting initial util,
+		 * this is where we should do it.
+		 */
+		sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
+	}
+
+	if (entity_is_task(se)) {
+		struct task_struct *p = task_of(se);
+		if (p->sched_class != &fair_sched_class) {
+			/*
+			 * For !fair tasks do:
+			 *
+			update_cfs_rq_load_avg(now, cfs_rq, false);
+			attach_entity_load_avg(cfs_rq, se);
+			switched_from_fair(rq, p);
+			 *
+			 * such that the next switched_to_fair() has the
+			 * expected state.
+			 */
+			se->avg.last_update_time = cfs_rq_clock_task(cfs_rq);
+			return;
+		}
+	}
+
+	attach_entity_cfs_rq(se);
+}
+
+#else /* !CONFIG_SMP */
 void init_entity_runnable_average(struct sched_entity *se)
 {
 }
-#endif
+void post_init_entity_util_avg(struct sched_entity *se)
+{
+}
+static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
+{
+}
+#endif /* CONFIG_SMP */
 
 /*
  * Update the current task's runtime statistics.
@@ -736,12 +887,56 @@
 	update_curr(cfs_rq_of(&rq->curr->se));
 }
 
+#ifdef CONFIG_SCHEDSTATS
+static inline void
+update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+	u64 wait_start = rq_clock(rq_of(cfs_rq));
+
+	if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) &&
+	    likely(wait_start > se->statistics.wait_start))
+		wait_start -= se->statistics.wait_start;
+
+	se->statistics.wait_start = wait_start;
+}
+
+static void
+update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+	struct task_struct *p;
+	u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start;
+
+	if (entity_is_task(se)) {
+		p = task_of(se);
+		if (task_on_rq_migrating(p)) {
+			/*
+			 * Preserve migrating task's wait time so wait_start
+			 * time stamp can be adjusted to accumulate wait time
+			 * prior to migration.
+			 */
+			se->statistics.wait_start = delta;
+			return;
+		}
+		trace_sched_stat_wait(p, delta);
+	}
+
+	se->statistics.wait_max = max(se->statistics.wait_max, delta);
+	se->statistics.wait_count++;
+	se->statistics.wait_sum += delta;
+	se->statistics.wait_start = 0;
+}
+#else
 static inline void
 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
-	schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
 }
 
+static inline void
+update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+}
+#endif
+
 /*
  * Task is being enqueued - update stats:
  */
@@ -755,23 +950,6 @@
 		update_stats_wait_start(cfs_rq, se);
 }
 
-static void
-update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
-	schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
-			rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
-	schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
-	schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
-			rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
-#ifdef CONFIG_SCHEDSTATS
-	if (entity_is_task(se)) {
-		trace_sched_stat_wait(task_of(se),
-			rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
-	}
-#endif
-	schedstat_set(se->statistics.wait_start, 0);
-}
-
 static inline void
 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
@@ -1388,7 +1566,8 @@
 	 * Call select_idle_sibling to maybe find a better one.
 	 */
 	if (!cur)
-		env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu);
+		env->dst_cpu = select_idle_sibling(env->p, env->src_cpu,
+						   env->dst_cpu);
 
 assign:
 	assigned = true;
@@ -2373,28 +2552,22 @@
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
 # ifdef CONFIG_SMP
-static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
+static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
 {
-	long tg_weight;
+	long tg_weight, load, shares;
 
 	/*
-	 * Use this CPU's real-time load instead of the last load contribution
-	 * as the updating of the contribution is delayed, and we will use the
-	 * the real-time load to calc the share. See update_tg_load_avg().
+	 * This really should be: cfs_rq->avg.load_avg, but instead we use
+	 * cfs_rq->load.weight, which is its upper bound. This helps ramp up
+	 * the shares for small weight interactive tasks.
 	 */
-	tg_weight = atomic_long_read(&tg->load_avg);
-	tg_weight -= cfs_rq->tg_load_avg_contrib;
-	tg_weight += cfs_rq->load.weight;
-
-	return tg_weight;
-}
+	load = scale_load_down(cfs_rq->load.weight);
 
-static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
-{
-	long tg_weight, load, shares;
+	tg_weight = atomic_long_read(&tg->load_avg);
 
-	tg_weight = calc_tg_weight(tg, cfs_rq);
-	load = cfs_rq->load.weight;
+	/* Ensure tg_weight >= load */
+	tg_weight -= cfs_rq->tg_load_avg_contrib;
+	tg_weight += load;
 
 	shares = (tg->shares * load);
 	if (tg_weight)
@@ -2413,6 +2586,7 @@
 	return tg->shares;
 }
 # endif /* CONFIG_SMP */
+
 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
 			    unsigned long weight)
 {
@@ -2431,16 +2605,20 @@
 
 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
 
-static void update_cfs_shares(struct cfs_rq *cfs_rq)
+static void update_cfs_shares(struct sched_entity *se)
 {
+	struct cfs_rq *cfs_rq = group_cfs_rq(se);
 	struct task_group *tg;
-	struct sched_entity *se;
 	long shares;
 
-	tg = cfs_rq->tg;
-	se = tg->se[cpu_of(rq_of(cfs_rq))];
-	if (!se || throttled_hierarchy(cfs_rq))
+	if (!cfs_rq)
 		return;
+
+	if (throttled_hierarchy(cfs_rq))
+		return;
+
+	tg = cfs_rq->tg;
+
 #ifndef CONFIG_SMP
 	if (likely(se->load.weight == tg->shares))
 		return;
@@ -2449,14 +2627,33 @@
 
 	reweight_entity(cfs_rq_of(se), se, shares);
 }
+
 #else /* CONFIG_FAIR_GROUP_SCHED */
-static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
+static inline void update_cfs_shares(struct sched_entity *se)
 {
 }
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
 #ifdef CONFIG_SMP
-/* Precomputed fixed inverse multiplies for multiplication by y^n */
+u32 sched_get_wake_up_idle(struct task_struct *p)
+{
+	u32 enabled = p->flags & PF_WAKE_UP_IDLE;
+
+	return !!enabled;
+}
+
+int sched_set_wake_up_idle(struct task_struct *p, int wake_up_idle)
+{
+	int enable = !!wake_up_idle;
+
+	if (enable)
+		p->flags |= PF_WAKE_UP_IDLE;
+	else
+		p->flags &= ~PF_WAKE_UP_IDLE;
+
+	return 0;
+}
+
 static const u32 runnable_avg_yN_inv[] = {
 	0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
 	0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
@@ -2536,6 +2733,1064 @@
 	return contrib + runnable_avg_yN_sum[n];
 }
 
+#ifdef CONFIG_SCHED_HMP
+
+/* CPU selection flag */
+#define SBC_FLAG_PREV_CPU				0x1
+#define SBC_FLAG_BEST_CAP_CPU				0x2
+#define SBC_FLAG_CPU_COST				0x4
+#define SBC_FLAG_MIN_COST				0x8
+#define SBC_FLAG_IDLE_LEAST_LOADED			0x10
+#define SBC_FLAG_IDLE_CSTATE				0x20
+#define SBC_FLAG_COST_CSTATE_TIE_BREAKER		0x40
+#define SBC_FLAG_COST_CSTATE_PREV_CPU_TIE_BREAKER	0x80
+#define SBC_FLAG_CSTATE_LOAD				0x100
+#define SBC_FLAG_BEST_SIBLING				0x200
+#define SBC_FLAG_WAKER_CPU				0x400
+#define SBC_FLAG_PACK_TASK				0x800
+
+/* Cluster selection flag */
+#define SBC_FLAG_COLOC_CLUSTER				0x10000
+#define SBC_FLAG_WAKER_CLUSTER				0x20000
+#define SBC_FLAG_BACKUP_CLUSTER				0x40000
+#define SBC_FLAG_BOOST_CLUSTER				0x80000
+
+struct cpu_select_env {
+	struct task_struct *p;
+	struct related_thread_group *rtg;
+	u8 reason;
+	u8 need_idle:1;
+	u8 need_waker_cluster:1;
+	u8 sync:1;
+	enum sched_boost_policy boost_policy;
+	u8 pack_task:1;
+	int prev_cpu;
+	DECLARE_BITMAP(candidate_list, NR_CPUS);
+	DECLARE_BITMAP(backup_list, NR_CPUS);
+	u64 task_load;
+	u64 cpu_load;
+	u32 sbc_best_flag;
+	u32 sbc_best_cluster_flag;
+	struct cpumask search_cpus;
+};
+
+struct cluster_cpu_stats {
+	int best_idle_cpu, least_loaded_cpu;
+	int best_capacity_cpu, best_cpu, best_sibling_cpu;
+	int min_cost, best_sibling_cpu_cost;
+	int best_cpu_wakeup_latency;
+	u64 min_load, best_load, best_sibling_cpu_load;
+	s64 highest_spare_capacity;
+};
+
+/*
+ * Should task be woken to any available idle cpu?
+ *
+ * Waking tasks to idle cpu has mixed implications on both performance and
+ * power. In many cases, scheduler can't estimate correctly impact of using idle
+ * cpus on either performance or power. PF_WAKE_UP_IDLE allows external kernel
+ * module to pass a strong hint to scheduler that the task in question should be
+ * woken to idle cpu, generally to improve performance.
+ */
+static inline int wake_to_idle(struct task_struct *p)
+{
+	return (current->flags & PF_WAKE_UP_IDLE) ||
+		 (p->flags & PF_WAKE_UP_IDLE);
+}
+
+static int spill_threshold_crossed(struct cpu_select_env *env, struct rq *rq)
+{
+	u64 total_load;
+
+	total_load = env->task_load + env->cpu_load;
+
+	if (total_load > sched_spill_load ||
+	    (rq->nr_running + 1) > sysctl_sched_spill_nr_run)
+		return 1;
+
+	return 0;
+}
+
+static int skip_cpu(int cpu, struct cpu_select_env *env)
+{
+	int tcpu = task_cpu(env->p);
+	int skip = 0;
+
+	if (!env->reason)
+		return 0;
+
+	if (is_reserved(cpu))
+		return 1;
+
+	switch (env->reason) {
+	case UP_MIGRATION:
+		skip = !idle_cpu(cpu);
+		break;
+	case IRQLOAD_MIGRATION:
+		/* Purposely fall through */
+	default:
+		skip = (cpu == tcpu);
+		break;
+	}
+
+	return skip;
+}
+
+static inline int
+acceptable_capacity(struct sched_cluster *cluster, struct cpu_select_env *env)
+{
+	int tcpu;
+
+	if (!env->reason)
+		return 1;
+
+	tcpu = task_cpu(env->p);
+	switch (env->reason) {
+	case UP_MIGRATION:
+		return cluster->capacity > cpu_capacity(tcpu);
+
+	case DOWN_MIGRATION:
+		return cluster->capacity < cpu_capacity(tcpu);
+
+	default:
+		break;
+	}
+
+	return 1;
+}
+
+static int
+skip_cluster(struct sched_cluster *cluster, struct cpu_select_env *env)
+{
+	if (!test_bit(cluster->id, env->candidate_list))
+		return 1;
+
+	if (!acceptable_capacity(cluster, env)) {
+		__clear_bit(cluster->id, env->candidate_list);
+		return 1;
+	}
+
+	return 0;
+}
+
+static struct sched_cluster *
+select_least_power_cluster(struct cpu_select_env *env)
+{
+	struct sched_cluster *cluster;
+
+	if (env->rtg) {
+		int cpu = cluster_first_cpu(env->rtg->preferred_cluster);
+
+		env->task_load = scale_load_to_cpu(task_load(env->p), cpu);
+
+		if (task_load_will_fit(env->p, env->task_load,
+					cpu, env->boost_policy)) {
+			env->sbc_best_cluster_flag |= SBC_FLAG_COLOC_CLUSTER;
+
+			if (env->boost_policy == SCHED_BOOST_NONE)
+				return env->rtg->preferred_cluster;
+
+			for_each_sched_cluster(cluster) {
+				if (cluster != env->rtg->preferred_cluster) {
+					__set_bit(cluster->id,
+						env->backup_list);
+					__clear_bit(cluster->id,
+						env->candidate_list);
+				}
+			}
+
+			return env->rtg->preferred_cluster;
+		}
+
+		/*
+		 * Since the task load does not fit on the preferred
+		 * cluster anymore, pretend that the task does not
+		 * have any preferred cluster. This allows the waking
+		 * task to get the appropriate CPU it needs as per the
+		 * non co-location placement policy without having to
+		 * wait until the preferred cluster is updated.
+		 */
+		env->rtg = NULL;
+	}
+
+	for_each_sched_cluster(cluster) {
+		if (!skip_cluster(cluster, env)) {
+			int cpu = cluster_first_cpu(cluster);
+
+			env->task_load = scale_load_to_cpu(task_load(env->p),
+									 cpu);
+			if (task_load_will_fit(env->p, env->task_load, cpu,
+					       env->boost_policy))
+				return cluster;
+
+			__set_bit(cluster->id, env->backup_list);
+			__clear_bit(cluster->id, env->candidate_list);
+		}
+	}
+
+	return NULL;
+}
+
+static struct sched_cluster *
+next_candidate(const unsigned long *list, int start, int end)
+{
+	int cluster_id;
+
+	cluster_id = find_next_bit(list, end, start - 1 + 1);
+	if (cluster_id >= end)
+		return NULL;
+
+	return sched_cluster[cluster_id];
+}
+
+static void
+update_spare_capacity(struct cluster_cpu_stats *stats,
+		      struct cpu_select_env *env, int cpu, int capacity,
+		      u64 cpu_load)
+{
+	s64 spare_capacity = sched_ravg_window - cpu_load;
+
+	if (spare_capacity > 0 &&
+	    (spare_capacity > stats->highest_spare_capacity ||
+	     (spare_capacity == stats->highest_spare_capacity &&
+	      ((!env->need_waker_cluster &&
+		capacity > cpu_capacity(stats->best_capacity_cpu)) ||
+	       (env->need_waker_cluster &&
+		cpu_rq(cpu)->nr_running <
+		cpu_rq(stats->best_capacity_cpu)->nr_running))))) {
+		/*
+		 * If sync waker is the only runnable of CPU, cr_avg of the
+		 * CPU is 0 so we have high chance to place the wakee on the
+		 * waker's CPU which likely causes preemtion of the waker.
+		 * This can lead migration of preempted waker.  Place the
+		 * wakee on the real idle CPU when it's possible by checking
+		 * nr_running to avoid such preemption.
+		 */
+		stats->highest_spare_capacity = spare_capacity;
+		stats->best_capacity_cpu = cpu;
+	}
+}
+
+static inline void find_backup_cluster(
+struct cpu_select_env *env, struct cluster_cpu_stats *stats)
+{
+	struct sched_cluster *next = NULL;
+	int i;
+	struct cpumask search_cpus;
+
+	while (!bitmap_empty(env->backup_list, num_clusters)) {
+		next = next_candidate(env->backup_list, 0, num_clusters);
+		__clear_bit(next->id, env->backup_list);
+
+		cpumask_and(&search_cpus, &env->search_cpus, &next->cpus);
+		for_each_cpu(i, &search_cpus) {
+			trace_sched_cpu_load_wakeup(cpu_rq(i), idle_cpu(i),
+			sched_irqload(i), power_cost(i, task_load(env->p) +
+					cpu_cravg_sync(i, env->sync)), 0);
+
+			update_spare_capacity(stats, env, i, next->capacity,
+					  cpu_load_sync(i, env->sync));
+		}
+		env->sbc_best_cluster_flag = SBC_FLAG_BACKUP_CLUSTER;
+	}
+}
+
+struct sched_cluster *
+next_best_cluster(struct sched_cluster *cluster, struct cpu_select_env *env,
+					struct cluster_cpu_stats *stats)
+{
+	struct sched_cluster *next = NULL;
+
+	__clear_bit(cluster->id, env->candidate_list);
+
+	if (env->rtg && preferred_cluster(cluster, env->p))
+		return NULL;
+
+	do {
+		if (bitmap_empty(env->candidate_list, num_clusters))
+			return NULL;
+
+		next = next_candidate(env->candidate_list, 0, num_clusters);
+		if (next) {
+			if (next->min_power_cost > stats->min_cost) {
+				clear_bit(next->id, env->candidate_list);
+				next = NULL;
+				continue;
+			}
+
+			if (skip_cluster(next, env))
+				next = NULL;
+		}
+	} while (!next);
+
+	env->task_load = scale_load_to_cpu(task_load(env->p),
+					cluster_first_cpu(next));
+	return next;
+}
+
+#ifdef CONFIG_SCHED_HMP_CSTATE_AWARE
+static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
+				   struct cpu_select_env *env, int cpu_cost)
+{
+	int wakeup_latency;
+	int prev_cpu = env->prev_cpu;
+
+	wakeup_latency = cpu_rq(cpu)->wakeup_latency;
+
+	if (env->need_idle) {
+		stats->min_cost = cpu_cost;
+		if (idle_cpu(cpu)) {
+			if (wakeup_latency < stats->best_cpu_wakeup_latency ||
+			    (wakeup_latency == stats->best_cpu_wakeup_latency &&
+			     cpu == prev_cpu)) {
+				stats->best_idle_cpu = cpu;
+				stats->best_cpu_wakeup_latency = wakeup_latency;
+			}
+		} else {
+			if (env->cpu_load < stats->min_load ||
+				(env->cpu_load == stats->min_load &&
+							cpu == prev_cpu)) {
+				stats->least_loaded_cpu = cpu;
+				stats->min_load = env->cpu_load;
+			}
+		}
+
+		return;
+	}
+
+	if (cpu_cost < stats->min_cost)  {
+		stats->min_cost = cpu_cost;
+		stats->best_cpu_wakeup_latency = wakeup_latency;
+		stats->best_load = env->cpu_load;
+		stats->best_cpu = cpu;
+		env->sbc_best_flag = SBC_FLAG_CPU_COST;
+		return;
+	}
+
+	/* CPU cost is the same. Start breaking the tie by C-state */
+
+	if (wakeup_latency > stats->best_cpu_wakeup_latency)
+		return;
+
+	if (wakeup_latency < stats->best_cpu_wakeup_latency) {
+		stats->best_cpu_wakeup_latency = wakeup_latency;
+		stats->best_load = env->cpu_load;
+		stats->best_cpu = cpu;
+		env->sbc_best_flag = SBC_FLAG_COST_CSTATE_TIE_BREAKER;
+		return;
+	}
+
+	/* C-state is the same. Use prev CPU to break the tie */
+	if (cpu == prev_cpu) {
+		stats->best_cpu = cpu;
+		env->sbc_best_flag = SBC_FLAG_COST_CSTATE_PREV_CPU_TIE_BREAKER;
+		return;
+	}
+
+	if (stats->best_cpu != prev_cpu &&
+	    ((wakeup_latency == 0 && env->cpu_load < stats->best_load) ||
+	    (wakeup_latency > 0 && env->cpu_load > stats->best_load))) {
+		stats->best_load = env->cpu_load;
+		stats->best_cpu = cpu;
+		env->sbc_best_flag = SBC_FLAG_CSTATE_LOAD;
+	}
+}
+#else /* CONFIG_SCHED_HMP_CSTATE_AWARE */
+static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
+				   struct cpu_select_env *env, int cpu_cost)
+{
+	int prev_cpu = env->prev_cpu;
+
+	if (cpu != prev_cpu && cpus_share_cache(prev_cpu, cpu)) {
+		if (stats->best_sibling_cpu_cost > cpu_cost ||
+		    (stats->best_sibling_cpu_cost == cpu_cost &&
+		     stats->best_sibling_cpu_load > env->cpu_load)) {
+			stats->best_sibling_cpu_cost = cpu_cost;
+			stats->best_sibling_cpu_load = env->cpu_load;
+			stats->best_sibling_cpu = cpu;
+		}
+	}
+
+	if ((cpu_cost < stats->min_cost) ||
+	    ((stats->best_cpu != prev_cpu &&
+	      stats->min_load > env->cpu_load) || cpu == prev_cpu)) {
+		if (env->need_idle) {
+			if (idle_cpu(cpu)) {
+				stats->min_cost = cpu_cost;
+				stats->best_idle_cpu = cpu;
+			}
+		} else {
+			stats->min_cost = cpu_cost;
+			stats->min_load = env->cpu_load;
+			stats->best_cpu = cpu;
+			env->sbc_best_flag = SBC_FLAG_MIN_COST;
+		}
+	}
+}
+#endif /* CONFIG_SCHED_HMP_CSTATE_AWARE */
+
+static void update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
+					 struct cpu_select_env *env)
+{
+	int cpu_cost;
+
+	/*
+	 * We try to find the least loaded *busy* CPU irrespective
+	 * of the power cost.
+	 */
+	if (env->pack_task)
+		cpu_cost = cpu_min_power_cost(cpu);
+
+	else
+		cpu_cost = power_cost(cpu, task_load(env->p) +
+				cpu_cravg_sync(cpu, env->sync));
+
+	if (cpu_cost <= stats->min_cost)
+		__update_cluster_stats(cpu, stats, env, cpu_cost);
+}
+
+static void find_best_cpu_in_cluster(struct sched_cluster *c,
+	 struct cpu_select_env *env, struct cluster_cpu_stats *stats)
+{
+	int i;
+	struct cpumask search_cpus;
+
+	cpumask_and(&search_cpus, &env->search_cpus, &c->cpus);
+
+	env->need_idle = wake_to_idle(env->p) || c->wake_up_idle;
+
+	for_each_cpu(i, &search_cpus) {
+		env->cpu_load = cpu_load_sync(i, env->sync);
+
+		trace_sched_cpu_load_wakeup(cpu_rq(i), idle_cpu(i),
+			sched_irqload(i),
+			power_cost(i, task_load(env->p) +
+					cpu_cravg_sync(i, env->sync)), 0);
+
+		if (skip_cpu(i, env))
+			continue;
+
+		update_spare_capacity(stats, env, i, c->capacity,
+				      env->cpu_load);
+
+		/*
+		 * need_idle takes precedence over sched boost but when both
+		 * are set, idlest CPU with in all the clusters is selected
+		 * when boost_policy = BOOST_ON_ALL whereas idlest CPU in the
+		 * big cluster is selected within boost_policy = BOOST_ON_BIG.
+		 */
+		if ((!env->need_idle &&
+		    env->boost_policy != SCHED_BOOST_NONE) ||
+		    env->need_waker_cluster ||
+		    sched_cpu_high_irqload(i) ||
+		    spill_threshold_crossed(env, cpu_rq(i)))
+			continue;
+
+		update_cluster_stats(i, stats, env);
+	}
+}
+
+static inline void init_cluster_cpu_stats(struct cluster_cpu_stats *stats)
+{
+	stats->best_cpu = stats->best_idle_cpu = -1;
+	stats->best_capacity_cpu = stats->best_sibling_cpu  = -1;
+	stats->min_cost = stats->best_sibling_cpu_cost = INT_MAX;
+	stats->min_load	= stats->best_sibling_cpu_load = ULLONG_MAX;
+	stats->highest_spare_capacity = 0;
+	stats->least_loaded_cpu = -1;
+	stats->best_cpu_wakeup_latency = INT_MAX;
+	/* No need to initialize stats->best_load */
+}
+
+static inline bool env_has_special_flags(struct cpu_select_env *env)
+{
+	if (env->need_idle || env->boost_policy != SCHED_BOOST_NONE ||
+	    env->reason)
+		return true;
+
+	return false;
+}
+
+static inline bool
+bias_to_prev_cpu(struct cpu_select_env *env, struct cluster_cpu_stats *stats)
+{
+	int prev_cpu;
+	struct task_struct *task = env->p;
+	struct sched_cluster *cluster;
+
+	if (!task->ravg.mark_start || !sched_short_sleep_task_threshold)
+		return false;
+
+	prev_cpu = env->prev_cpu;
+	if (!cpumask_test_cpu(prev_cpu, &env->search_cpus))
+		return false;
+
+	if (task->ravg.mark_start - task->last_cpu_selected_ts >=
+				sched_long_cpu_selection_threshold)
+		return false;
+
+	/*
+	 * This function should be used by task wake up path only as it's
+	 * assuming p->last_switch_out_ts as last sleep time.
+	 * p->last_switch_out_ts can denote last preemption time as well as
+	 * last sleep time.
+	 */
+	if (task->ravg.mark_start - task->last_switch_out_ts >=
+					sched_short_sleep_task_threshold)
+		return false;
+
+	env->task_load = scale_load_to_cpu(task_load(task), prev_cpu);
+	cluster = cpu_rq(prev_cpu)->cluster;
+
+	if (!task_load_will_fit(task, env->task_load, prev_cpu,
+				sched_boost_policy())) {
+
+		__set_bit(cluster->id, env->backup_list);
+		__clear_bit(cluster->id, env->candidate_list);
+		return false;
+	}
+
+	env->cpu_load = cpu_load_sync(prev_cpu, env->sync);
+	if (sched_cpu_high_irqload(prev_cpu) ||
+			spill_threshold_crossed(env, cpu_rq(prev_cpu))) {
+		update_spare_capacity(stats, env, prev_cpu,
+				cluster->capacity, env->cpu_load);
+		cpumask_clear_cpu(prev_cpu, &env->search_cpus);
+		return false;
+	}
+
+	return true;
+}
+
+static inline bool
+wake_to_waker_cluster(struct cpu_select_env *env)
+{
+	return env->sync &&
+	       task_load(current) > sched_big_waker_task_load &&
+	       task_load(env->p) < sched_small_wakee_task_load;
+}
+
+static inline bool
+bias_to_waker_cpu(struct cpu_select_env *env, int cpu)
+{
+	return sysctl_sched_prefer_sync_wakee_to_waker &&
+	       cpu_rq(cpu)->nr_running == 1 &&
+	       cpumask_test_cpu(cpu, &env->search_cpus);
+}
+
+static inline int
+cluster_allowed(struct cpu_select_env *env, struct sched_cluster *cluster)
+{
+	return cpumask_intersects(&env->search_cpus, &cluster->cpus);
+}
+
+/* return cheapest cpu that can fit this task */
+static int select_best_cpu(struct task_struct *p, int target, int reason,
+			   int sync)
+{
+	struct sched_cluster *cluster, *pref_cluster = NULL;
+	struct cluster_cpu_stats stats;
+	struct related_thread_group *grp;
+	unsigned int sbc_flag = 0;
+	int cpu = raw_smp_processor_id();
+	bool special;
+
+	struct cpu_select_env env = {
+		.p			= p,
+		.reason			= reason,
+		.need_idle		= wake_to_idle(p),
+		.need_waker_cluster	= 0,
+		.sync			= sync,
+		.prev_cpu		= target,
+		.rtg			= NULL,
+		.sbc_best_flag		= 0,
+		.sbc_best_cluster_flag	= 0,
+		.pack_task              = false,
+	};
+
+	env.boost_policy = task_sched_boost(p) ?
+			sched_boost_policy() : SCHED_BOOST_NONE;
+
+	bitmap_copy(env.candidate_list, all_cluster_ids, NR_CPUS);
+	bitmap_zero(env.backup_list, NR_CPUS);
+
+	cpumask_and(&env.search_cpus, tsk_cpus_allowed(p), cpu_active_mask);
+	cpumask_andnot(&env.search_cpus, &env.search_cpus, cpu_isolated_mask);
+
+	init_cluster_cpu_stats(&stats);
+	special = env_has_special_flags(&env);
+
+	rcu_read_lock();
+
+	grp = task_related_thread_group(p);
+
+	if (grp && grp->preferred_cluster) {
+		pref_cluster = grp->preferred_cluster;
+		if (!cluster_allowed(&env, pref_cluster))
+			clear_bit(pref_cluster->id, env.candidate_list);
+		else
+			env.rtg = grp;
+	} else if (!special) {
+		cluster = cpu_rq(cpu)->cluster;
+		if (wake_to_waker_cluster(&env)) {
+			if (bias_to_waker_cpu(&env, cpu)) {
+				target = cpu;
+				sbc_flag = SBC_FLAG_WAKER_CLUSTER |
+					   SBC_FLAG_WAKER_CPU;
+				goto out;
+			} else if (cluster_allowed(&env, cluster)) {
+				env.need_waker_cluster = 1;
+				bitmap_zero(env.candidate_list, NR_CPUS);
+				__set_bit(cluster->id, env.candidate_list);
+				env.sbc_best_cluster_flag =
+							SBC_FLAG_WAKER_CLUSTER;
+			}
+		} else if (bias_to_prev_cpu(&env, &stats)) {
+			sbc_flag = SBC_FLAG_PREV_CPU;
+			goto out;
+		}
+	}
+
+	if (!special && is_short_burst_task(p)) {
+		env.pack_task = true;
+		sbc_flag = SBC_FLAG_PACK_TASK;
+	}
+retry:
+	cluster = select_least_power_cluster(&env);
+
+	if (!cluster)
+		goto out;
+
+	/*
+	 * 'cluster' now points to the minimum power cluster which can satisfy
+	 * task's perf goals. Walk down the cluster list starting with that
+	 * cluster. For non-small tasks, skip clusters that don't have
+	 * mostly_idle/idle cpus
+	 */
+
+	do {
+		find_best_cpu_in_cluster(cluster, &env, &stats);
+
+	} while ((cluster = next_best_cluster(cluster, &env, &stats)));
+
+	if (env.need_idle) {
+		if (stats.best_idle_cpu >= 0) {
+			target = stats.best_idle_cpu;
+			sbc_flag |= SBC_FLAG_IDLE_CSTATE;
+		} else if (stats.least_loaded_cpu >= 0) {
+			target = stats.least_loaded_cpu;
+			sbc_flag |= SBC_FLAG_IDLE_LEAST_LOADED;
+		}
+	} else if (stats.best_cpu >= 0) {
+		if (stats.best_sibling_cpu >= 0 &&
+				stats.best_cpu != task_cpu(p) &&
+				stats.min_cost == stats.best_sibling_cpu_cost) {
+			stats.best_cpu = stats.best_sibling_cpu;
+			sbc_flag |= SBC_FLAG_BEST_SIBLING;
+		}
+		sbc_flag |= env.sbc_best_flag;
+		target = stats.best_cpu;
+	} else {
+		if (env.rtg && env.boost_policy == SCHED_BOOST_NONE) {
+			env.rtg = NULL;
+			goto retry;
+		}
+
+		/*
+		 * With boost_policy == SCHED_BOOST_ON_BIG, we reach here with
+		 * backup_list = little cluster, candidate_list = none and
+		 * stats->best_capacity_cpu points the best spare capacity
+		 * CPU among the CPUs in the big cluster.
+		 */
+		if (env.boost_policy == SCHED_BOOST_ON_BIG &&
+		    stats.best_capacity_cpu >= 0)
+			sbc_flag |= SBC_FLAG_BOOST_CLUSTER;
+		else
+			find_backup_cluster(&env, &stats);
+
+		if (stats.best_capacity_cpu >= 0) {
+			target = stats.best_capacity_cpu;
+			sbc_flag |= SBC_FLAG_BEST_CAP_CPU;
+		}
+	}
+	p->last_cpu_selected_ts = sched_ktime_clock();
+out:
+	sbc_flag |= env.sbc_best_cluster_flag;
+	rcu_read_unlock();
+	trace_sched_task_load(p, sched_boost_policy() && task_sched_boost(p),
+		env.reason, env.sync, env.need_idle, sbc_flag, target);
+	return target;
+}
+
+#ifdef CONFIG_CFS_BANDWIDTH
+
+static inline struct task_group *next_task_group(struct task_group *tg)
+{
+	tg = list_entry_rcu(tg->list.next, typeof(struct task_group), list);
+
+	return (&tg->list == &task_groups) ? NULL : tg;
+}
+
+/* Iterate over all cfs_rq in a cpu */
+#define for_each_cfs_rq(cfs_rq, tg, cpu)	\
+	for (tg = container_of(&task_groups, struct task_group, list);	\
+		((tg = next_task_group(tg)) && (cfs_rq = tg->cfs_rq[cpu]));)
+
+void reset_cfs_rq_hmp_stats(int cpu, int reset_cra)
+{
+	struct task_group *tg;
+	struct cfs_rq *cfs_rq;
+
+	rcu_read_lock();
+
+	for_each_cfs_rq(cfs_rq, tg, cpu)
+		reset_hmp_stats(&cfs_rq->hmp_stats, reset_cra);
+
+	rcu_read_unlock();
+}
+
+static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq);
+
+static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+	 struct task_struct *p, int change_cra);
+static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+	 struct task_struct *p, int change_cra);
+
+/* Add task's contribution to a cpu' HMP statistics */
+void _inc_hmp_sched_stats_fair(struct rq *rq,
+			struct task_struct *p, int change_cra)
+{
+	struct cfs_rq *cfs_rq;
+	struct sched_entity *se = &p->se;
+
+	/*
+	 * Although below check is not strictly required  (as
+	 * inc/dec_nr_big_task and inc/dec_cumulative_runnable_avg called
+	 * from inc_cfs_rq_hmp_stats() have similar checks), we gain a bit on
+	 * efficiency by short-circuiting for_each_sched_entity() loop when
+	 * sched_disable_window_stats
+	 */
+	if (sched_disable_window_stats)
+		return;
+
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+		inc_cfs_rq_hmp_stats(cfs_rq, p, change_cra);
+		if (cfs_rq_throttled(cfs_rq))
+			break;
+	}
+
+	/* Update rq->hmp_stats only if we didn't find any throttled cfs_rq */
+	if (!se)
+		inc_rq_hmp_stats(rq, p, change_cra);
+}
+
+/* Remove task's contribution from a cpu' HMP statistics */
+static void
+_dec_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p, int change_cra)
+{
+	struct cfs_rq *cfs_rq;
+	struct sched_entity *se = &p->se;
+
+	/* See comment on efficiency in _inc_hmp_sched_stats_fair */
+	if (sched_disable_window_stats)
+		return;
+
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+		dec_cfs_rq_hmp_stats(cfs_rq, p, change_cra);
+		if (cfs_rq_throttled(cfs_rq))
+			break;
+	}
+
+	/* Update rq->hmp_stats only if we didn't find any throttled cfs_rq */
+	if (!se)
+		dec_rq_hmp_stats(rq, p, change_cra);
+}
+
+static void inc_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p)
+{
+	_inc_hmp_sched_stats_fair(rq, p, 1);
+}
+
+static void dec_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p)
+{
+	_dec_hmp_sched_stats_fair(rq, p, 1);
+}
+
+static void fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
+				       u32 new_task_load, u32 new_pred_demand)
+{
+	struct cfs_rq *cfs_rq;
+	struct sched_entity *se = &p->se;
+	s64 task_load_delta = (s64)new_task_load - task_load(p);
+	s64 pred_demand_delta = PRED_DEMAND_DELTA;
+
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+
+		fixup_cumulative_runnable_avg(&cfs_rq->hmp_stats, p,
+					      task_load_delta,
+					      pred_demand_delta);
+		fixup_nr_big_tasks(&cfs_rq->hmp_stats, p, task_load_delta);
+		if (cfs_rq_throttled(cfs_rq))
+			break;
+	}
+
+	/* Fix up rq->hmp_stats only if we didn't find any throttled cfs_rq */
+	if (!se) {
+		fixup_cumulative_runnable_avg(&rq->hmp_stats, p,
+					      task_load_delta,
+					      pred_demand_delta);
+		fixup_nr_big_tasks(&rq->hmp_stats, p, task_load_delta);
+	}
+}
+
+static int task_will_be_throttled(struct task_struct *p);
+
+#else	/* CONFIG_CFS_BANDWIDTH */
+
+inline void reset_cfs_rq_hmp_stats(int cpu, int reset_cra) { }
+
+static void
+inc_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p)
+{
+	inc_nr_big_task(&rq->hmp_stats, p);
+	inc_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+static void
+dec_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p)
+{
+	dec_nr_big_task(&rq->hmp_stats, p);
+	dec_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+static void
+fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
+			   u32 new_task_load, u32 new_pred_demand)
+{
+	s64 task_load_delta = (s64)new_task_load - task_load(p);
+	s64 pred_demand_delta = PRED_DEMAND_DELTA;
+
+	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
+				      pred_demand_delta);
+	fixup_nr_big_tasks(&rq->hmp_stats, p, task_load_delta);
+}
+
+static inline int task_will_be_throttled(struct task_struct *p)
+{
+	return 0;
+}
+
+void _inc_hmp_sched_stats_fair(struct rq *rq,
+			struct task_struct *p, int change_cra)
+{
+	inc_nr_big_task(&rq->hmp_stats, p);
+}
+
+#endif	/* CONFIG_CFS_BANDWIDTH */
+
+/*
+ * Reset balance_interval at all sched_domain levels of given cpu, so that it
+ * honors kick.
+ */
+static inline void reset_balance_interval(int cpu)
+{
+	struct sched_domain *sd;
+
+	if (cpu >= nr_cpu_ids)
+		return;
+
+	rcu_read_lock();
+	for_each_domain(cpu, sd)
+		sd->balance_interval = 0;
+	rcu_read_unlock();
+}
+
+/*
+ * Check if a task is on the "wrong" cpu (i.e its current cpu is not the ideal
+ * cpu as per its demand or priority)
+ *
+ * Returns reason why task needs to be migrated
+ */
+static inline int migration_needed(struct task_struct *p, int cpu)
+{
+	int nice;
+	struct related_thread_group *grp;
+
+	if (p->state != TASK_RUNNING || p->nr_cpus_allowed == 1)
+		return 0;
+
+	/* No need to migrate task that is about to be throttled */
+	if (task_will_be_throttled(p))
+		return 0;
+
+	if (sched_boost_policy() == SCHED_BOOST_ON_BIG &&
+		 cpu_capacity(cpu) != max_capacity && task_sched_boost(p))
+		return UP_MIGRATION;
+
+	if (sched_cpu_high_irqload(cpu))
+		return IRQLOAD_MIGRATION;
+
+	nice = task_nice(p);
+	rcu_read_lock();
+	grp = task_related_thread_group(p);
+	/*
+	 * Don't assume higher capacity means higher power. If the task
+	 * is running on the power efficient CPU, avoid migrating it
+	 * to a lower capacity cluster.
+	 */
+	if (!grp && (nice > SCHED_UPMIGRATE_MIN_NICE ||
+			upmigrate_discouraged(p)) &&
+			cpu_capacity(cpu) > min_capacity &&
+			cpu_max_power_cost(cpu) == max_power_cost) {
+		rcu_read_unlock();
+		return DOWN_MIGRATION;
+	}
+
+	if (!task_will_fit(p, cpu)) {
+		rcu_read_unlock();
+		return UP_MIGRATION;
+	}
+	rcu_read_unlock();
+
+	return 0;
+}
+
+static inline int
+kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
+{
+	unsigned long flags;
+	int rc = 0;
+
+	/* Invoke active balance to force migrate currently running task */
+	raw_spin_lock_irqsave(&rq->lock, flags);
+	if (!rq->active_balance) {
+		rq->active_balance = 1;
+		rq->push_cpu = new_cpu;
+		get_task_struct(p);
+		rq->push_task = p;
+		rc = 1;
+	}
+	raw_spin_unlock_irqrestore(&rq->lock, flags);
+
+	return rc;
+}
+
+static DEFINE_RAW_SPINLOCK(migration_lock);
+
+static bool do_migration(int reason, int new_cpu, int cpu)
+{
+	if ((reason == UP_MIGRATION || reason == DOWN_MIGRATION)
+				&& same_cluster(new_cpu, cpu))
+		return false;
+
+	/* Inter cluster high irqload migrations are OK */
+	return new_cpu != cpu;
+}
+
+/*
+ * Check if currently running task should be migrated to a better cpu.
+ *
+ * Todo: Effect this via changes to nohz_balancer_kick() and load balance?
+ */
+void check_for_migration(struct rq *rq, struct task_struct *p)
+{
+	int cpu = cpu_of(rq), new_cpu;
+	int active_balance = 0, reason;
+
+	reason = migration_needed(p, cpu);
+	if (!reason)
+		return;
+
+	raw_spin_lock(&migration_lock);
+	new_cpu = select_best_cpu(p, cpu, reason, 0);
+
+	if (do_migration(reason, new_cpu, cpu)) {
+		active_balance = kick_active_balance(rq, p, new_cpu);
+		if (active_balance)
+			mark_reserved(new_cpu);
+	}
+
+	raw_spin_unlock(&migration_lock);
+
+	if (active_balance)
+		stop_one_cpu_nowait(cpu, active_load_balance_cpu_stop, rq,
+					&rq->active_balance_work);
+}
+
+#ifdef CONFIG_CFS_BANDWIDTH
+
+static void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq)
+{
+	cfs_rq->hmp_stats.nr_big_tasks = 0;
+	cfs_rq->hmp_stats.cumulative_runnable_avg = 0;
+	cfs_rq->hmp_stats.pred_demands_sum = 0;
+}
+
+static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+		 struct task_struct *p, int change_cra)
+{
+	inc_nr_big_task(&cfs_rq->hmp_stats, p);
+	if (change_cra)
+		inc_cumulative_runnable_avg(&cfs_rq->hmp_stats, p);
+}
+
+static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+		 struct task_struct *p, int change_cra)
+{
+	dec_nr_big_task(&cfs_rq->hmp_stats, p);
+	if (change_cra)
+		dec_cumulative_runnable_avg(&cfs_rq->hmp_stats, p);
+}
+
+static void inc_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
+			 struct cfs_rq *cfs_rq)
+{
+	stats->nr_big_tasks += cfs_rq->hmp_stats.nr_big_tasks;
+	stats->cumulative_runnable_avg +=
+				cfs_rq->hmp_stats.cumulative_runnable_avg;
+	stats->pred_demands_sum += cfs_rq->hmp_stats.pred_demands_sum;
+}
+
+static void dec_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
+				 struct cfs_rq *cfs_rq)
+{
+	stats->nr_big_tasks -= cfs_rq->hmp_stats.nr_big_tasks;
+	stats->cumulative_runnable_avg -=
+				cfs_rq->hmp_stats.cumulative_runnable_avg;
+	stats->pred_demands_sum -= cfs_rq->hmp_stats.pred_demands_sum;
+
+	BUG_ON(stats->nr_big_tasks < 0 ||
+		(s64)stats->cumulative_runnable_avg < 0);
+	BUG_ON((s64)stats->pred_demands_sum < 0);
+}
+
+#else	/* CONFIG_CFS_BANDWIDTH */
+
+static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+	 struct task_struct *p, int change_cra) { }
+
+static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+	 struct task_struct *p, int change_cra) { }
+
+#endif	/* CONFIG_CFS_BANDWIDTH */
+
+#else	/* CONFIG_SCHED_HMP */
+
+static inline void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq) { }
+
+static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+	 struct task_struct *p, int change_cra) { }
+
+static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+	 struct task_struct *p, int change_cra) { }
+
+#define dec_throttled_cfs_rq_hmp_stats(...)
+#define inc_throttled_cfs_rq_hmp_stats(...)
+
+#endif	/* CONFIG_SCHED_HMP */
+
 #if (SCHED_LOAD_SHIFT - SCHED_LOAD_RESOLUTION) != 10 || SCHED_CAPACITY_SHIFT != 10
 #error "load tracking assumes 2^10 as unit"
 #endif
@@ -2600,6 +3855,7 @@
 
 	scale_freq = arch_scale_freq_capacity(NULL, cpu);
 	scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
+	trace_sched_contrib_scale_f(cpu, scale_freq, scale_cpu);
 
 	/* delta_w is the amount already accumulated against our next period */
 	delta_w = sa->period_contrib;
@@ -2658,6 +3914,7 @@
 		if (cfs_rq)
 			cfs_rq->runnable_load_sum += weight * scaled_delta;
 	}
+
 	if (running)
 		sa->util_sum += scaled_delta * scale_cpu;
 
@@ -2675,25 +3932,262 @@
 	return decayed;
 }
 
-#ifdef CONFIG_FAIR_GROUP_SCHED
 /*
- * Updating tg's load_avg is necessary before update_cfs_share (which is done)
- * and effective_load (which is not done because it is too costly).
+ * Signed add and clamp on underflow.
+ *
+ * Explicitly do a load-store to ensure the intermediate value never hits
+ * memory. This allows lockless observations without ever seeing the negative
+ * values.
+ */
+#define add_positive(_ptr, _val) do {                           \
+	typeof(_ptr) ptr = (_ptr);                              \
+	typeof(_val) val = (_val);                              \
+	typeof(*ptr) res, var = READ_ONCE(*ptr);                \
+								\
+	res = var + val;                                        \
+								\
+	if (val < 0 && res > var)                               \
+		res = 0;                                        \
+								\
+	WRITE_ONCE(*ptr, res);                                  \
+} while (0)
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+/**
+ * update_tg_load_avg - update the tg's load avg
+ * @cfs_rq: the cfs_rq whose avg changed
+ * @force: update regardless of how small the difference
+ *
+ * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
+ * However, because tg->load_avg is a global value there are performance
+ * considerations.
+ *
+ * In order to avoid having to look at the other cfs_rq's, we use a
+ * differential update where we store the last value we propagated. This in
+ * turn allows skipping updates if the differential is 'small'.
+ *
+ * Updating tg's load_avg is necessary before update_cfs_share() (which is
+ * done) and effective_load() (which is not done because it is too costly).
  */
 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
 {
 	long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
 
+	/*
+	 * No need to update load_avg for root_task_group as it is not used.
+	 */
+	if (cfs_rq->tg == &root_task_group)
+		return;
+
 	if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
 		atomic_long_add(delta, &cfs_rq->tg->load_avg);
 		cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
 	}
 }
 
+/*
+ * Called within set_task_rq() right before setting a task's cpu. The
+ * caller only guarantees p->pi_lock is held; no other assumptions,
+ * including the state of rq->lock, should be made.
+ */
+void set_task_rq_fair(struct sched_entity *se,
+		      struct cfs_rq *prev, struct cfs_rq *next)
+{
+	if (!sched_feat(ATTACH_AGE_LOAD))
+		return;
+
+	/*
+	 * We are supposed to update the task to "current" time, then its up to
+	 * date and ready to go to new CPU/cfs_rq. But we have difficulty in
+	 * getting what current time is, so simply throw away the out-of-date
+	 * time. This will result in the wakee task is less decayed, but giving
+	 * the wakee more load sounds not bad.
+	 */
+	if (se->avg.last_update_time && prev) {
+		u64 p_last_update_time;
+		u64 n_last_update_time;
+
+#ifndef CONFIG_64BIT
+		u64 p_last_update_time_copy;
+		u64 n_last_update_time_copy;
+
+		do {
+			p_last_update_time_copy = prev->load_last_update_time_copy;
+			n_last_update_time_copy = next->load_last_update_time_copy;
+
+			smp_rmb();
+
+			p_last_update_time = prev->avg.last_update_time;
+			n_last_update_time = next->avg.last_update_time;
+
+		} while (p_last_update_time != p_last_update_time_copy ||
+			 n_last_update_time != n_last_update_time_copy);
+#else
+		p_last_update_time = prev->avg.last_update_time;
+		n_last_update_time = next->avg.last_update_time;
+#endif
+		__update_load_avg(p_last_update_time, cpu_of(rq_of(prev)),
+				  &se->avg, 0, 0, NULL);
+		se->avg.last_update_time = n_last_update_time;
+	}
+}
+
+/* Take into account change of utilization of a child task group */
+static inline void
+update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+	struct cfs_rq *gcfs_rq = group_cfs_rq(se);
+	long delta = gcfs_rq->avg.util_avg - se->avg.util_avg;
+
+	/* Nothing to update */
+	if (!delta)
+		return;
+
+	/* Set new sched_entity's utilization */
+	se->avg.util_avg = gcfs_rq->avg.util_avg;
+	se->avg.util_sum = se->avg.util_avg * LOAD_AVG_MAX;
+
+	/* Update parent cfs_rq utilization */
+	add_positive(&cfs_rq->avg.util_avg, delta);
+	cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * LOAD_AVG_MAX;
+}
+
+/* Take into account change of load of a child task group */
+static inline void
+update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+	struct cfs_rq *gcfs_rq = group_cfs_rq(se);
+	long delta, load = gcfs_rq->avg.load_avg;
+
+	/*
+	 * If the load of group cfs_rq is null, the load of the
+	 * sched_entity will also be null so we can skip the formula
+	 */
+	if (load) {
+		long tg_load;
+
+		/* Get tg's load and ensure tg_load > 0 */
+		tg_load = atomic_long_read(&gcfs_rq->tg->load_avg) + 1;
+
+		/* Ensure tg_load >= load and updated with current load*/
+		tg_load -= gcfs_rq->tg_load_avg_contrib;
+		tg_load += load;
+
+		/*
+		 * We need to compute a correction term in the case that the
+		 * task group is consuming more CPU than a task of equal
+		 * weight. A task with a weight equals to tg->shares will have
+		 * a load less or equal to scale_load_down(tg->shares).
+		 * Similarly, the sched_entities that represent the task group
+		 * at parent level, can't have a load higher than
+		 * scale_load_down(tg->shares). And the Sum of sched_entities'
+		 * load must be <= scale_load_down(tg->shares).
+		 */
+		if (tg_load > scale_load_down(gcfs_rq->tg->shares)) {
+			/* scale gcfs_rq's load into tg's shares*/
+			load *= scale_load_down(gcfs_rq->tg->shares);
+			load /= tg_load;
+		}
+	}
+
+	delta = load - se->avg.load_avg;
+
+	/* Nothing to update */
+	if (!delta)
+		return;
+
+	/* Set new sched_entity's load */
+	se->avg.load_avg = load;
+	se->avg.load_sum = se->avg.load_avg * LOAD_AVG_MAX;
+
+	/* Update parent cfs_rq load */
+	add_positive(&cfs_rq->avg.load_avg, delta);
+	cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * LOAD_AVG_MAX;
+
+	/*
+	 * If the sched_entity is already enqueued, we also have to update the
+	 * runnable load avg.
+	 */
+	if (se->on_rq) {
+		/* Update parent cfs_rq runnable_load_avg */
+		add_positive(&cfs_rq->runnable_load_avg, delta);
+		cfs_rq->runnable_load_sum = cfs_rq->runnable_load_avg * LOAD_AVG_MAX;
+	}
+}
+
+static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq)
+{
+	cfs_rq->propagate_avg = 1;
+}
+
+static inline int test_and_clear_tg_cfs_propagate(struct sched_entity *se)
+{
+	struct cfs_rq *cfs_rq = group_cfs_rq(se);
+
+	if (!cfs_rq->propagate_avg)
+		return 0;
+
+	cfs_rq->propagate_avg = 0;
+	return 1;
+}
+
+/* Update task and its cfs_rq load average */
+static inline int propagate_entity_load_avg(struct sched_entity *se)
+{
+	struct cfs_rq *cfs_rq;
+
+	if (entity_is_task(se))
+		return 0;
+
+	if (!test_and_clear_tg_cfs_propagate(se))
+		return 0;
+
+	cfs_rq = cfs_rq_of(se);
+
+	set_tg_cfs_propagate(cfs_rq);
+
+	update_tg_cfs_util(cfs_rq, se);
+	update_tg_cfs_load(cfs_rq, se);
+
+	return 1;
+}
+
 #else /* CONFIG_FAIR_GROUP_SCHED */
+
 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
+
+static inline int propagate_entity_load_avg(struct sched_entity *se)
+{
+	return 0;
+}
+
+static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq) {}
+
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
+static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
+{
+        if (&this_rq()->cfs == cfs_rq) {
+                /*
+                 * There are a few boundary cases this might miss but it should
+                 * get called often enough that that should (hopefully) not be
+                 * a real problem -- added to that it only calls on the local
+                 * CPU, so if we enqueue remotely we'll miss an update, but
+                 * the next tick/schedule should update.
+                 *
+                 * It will not get called when we go idle, because the idle
+                 * thread is a different class (!fair), nor will the utilization
+                 * number include things like RT tasks.
+                 *
+                 * As is, the util number is not freq-invariant (we'd have to
+                 * implement arch_scale_freq_capacity() for that).
+                 *
+                 * See cpu_util().
+                 */
+                cpufreq_update_util(rq_of(cfs_rq), 0);
+        }
+}
+
 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
 
 /*
@@ -2713,23 +4207,43 @@
 	WRITE_ONCE(*ptr, res);					\
 } while (0)
 
-/* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
-static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
+/**
+ * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
+ * @now: current time, as per cfs_rq_clock_task()
+ * @cfs_rq: cfs_rq to update
+ * @update_freq: should we call cfs_rq_util_change() or will the call do so
+ *
+ * The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
+ * avg. The immediate corollary is that all (fair) tasks must be attached, see
+ * post_init_entity_util_avg().
+ *
+ * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example.
+ *
+ * Returns true if the load decayed or we removed load.
+ *
+ * Since both these conditions indicate a changed cfs_rq->avg.load we should
+ * call update_tg_load_avg() when this function returns true.
+ */
+static inline int
+update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
 {
 	struct sched_avg *sa = &cfs_rq->avg;
-	int decayed, removed = 0;
+	int decayed, removed = 0, removed_util = 0;
 
 	if (atomic_long_read(&cfs_rq->removed_load_avg)) {
 		s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
 		sub_positive(&sa->load_avg, r);
 		sub_positive(&sa->load_sum, r * LOAD_AVG_MAX);
 		removed = 1;
+		set_tg_cfs_propagate(cfs_rq);
 	}
 
 	if (atomic_long_read(&cfs_rq->removed_util_avg)) {
 		long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
 		sub_positive(&sa->util_avg, r);
 		sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
+		removed_util = 1;
+		set_tg_cfs_propagate(cfs_rq);
 	}
 
 	decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
@@ -2740,65 +4254,93 @@
 	cfs_rq->load_last_update_time_copy = sa->last_update_time;
 #endif
 
+	/* Trace CPU load, unless cfs_rq belongs to a non-root task_group */
+	if (cfs_rq == &rq_of(cfs_rq)->cfs)
+		trace_sched_load_avg_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq);
+
+	if (update_freq && (decayed || removed_util))
+		cfs_rq_util_change(cfs_rq);
+
 	return decayed || removed;
 }
 
+/*
+ * Optional action to be done while updating the load average
+ */
+#define UPDATE_TG	0x1
+#define SKIP_AGE_LOAD	0x2
+
 /* Update task and its cfs_rq load average */
-static inline void update_load_avg(struct sched_entity *se, int update_tg)
+static inline void update_load_avg(struct sched_entity *se, int flags)
 {
 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
 	u64 now = cfs_rq_clock_task(cfs_rq);
 	int cpu = cpu_of(rq_of(cfs_rq));
+	int decayed;
+	void *ptr = NULL;
 
 	/*
 	 * Track task load average for carrying it to new CPU after migrated, and
 	 * track group sched_entity load average for task_h_load calc in migration
 	 */
+	if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) {
 	__update_load_avg(now, cpu, &se->avg,
 			  se->on_rq * scale_load_down(se->load.weight),
 			  cfs_rq->curr == se, NULL);
-
-	if (update_cfs_rq_load_avg(now, cfs_rq) && update_tg)
-		update_tg_load_avg(cfs_rq, 0);
 }
 
-static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
-	if (!sched_feat(ATTACH_AGE_LOAD))
-		goto skip_aging;
+	decayed  = update_cfs_rq_load_avg(now, cfs_rq, true);
+	decayed |= propagate_entity_load_avg(se);
 
-	/*
-	 * If we got migrated (either between CPUs or between cgroups) we'll
-	 * have aged the average right before clearing @last_update_time.
-	 */
-	if (se->avg.last_update_time) {
-		__update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
-				  &se->avg, 0, 0, NULL);
+	if (decayed && (flags & UPDATE_TG))
+		update_tg_load_avg(cfs_rq, 0);
 
-		/*
-		 * XXX: we could have just aged the entire load away if we've been
-		 * absent from the fair class for too long.
-		 */
+	if (entity_is_task(se)) {
+#ifdef CONFIG_SCHED_WALT
+		ptr = (void *)&(task_of(se)->ravg);
+#endif
+		trace_sched_load_avg_task(task_of(se), &se->avg, ptr);
+	}
 	}
 
-skip_aging:
+/**
+ * attach_entity_load_avg - attach this entity to its cfs_rq load avg
+ * @cfs_rq: cfs_rq to attach to
+ * @se: sched_entity to attach
+ *
+ * Must call update_cfs_rq_load_avg() before this, since we rely on
+ * cfs_rq->avg.last_update_time being current.
+ */
+static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
 	se->avg.last_update_time = cfs_rq->avg.last_update_time;
 	cfs_rq->avg.load_avg += se->avg.load_avg;
 	cfs_rq->avg.load_sum += se->avg.load_sum;
 	cfs_rq->avg.util_avg += se->avg.util_avg;
 	cfs_rq->avg.util_sum += se->avg.util_sum;
+	set_tg_cfs_propagate(cfs_rq);
+
+	cfs_rq_util_change(cfs_rq);
 }
 
+/**
+ * detach_entity_load_avg - detach this entity from its cfs_rq load avg
+ * @cfs_rq: cfs_rq to detach from
+ * @se: sched_entity to detach
+ *
+ * Must call update_cfs_rq_load_avg() before this, since we rely on
+ * cfs_rq->avg.last_update_time being current.
+ */
 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
-	__update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
-			  &se->avg, se->on_rq * scale_load_down(se->load.weight),
-			  cfs_rq->curr == se, NULL);
 
 	sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
 	sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum);
 	sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
 	sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
+	set_tg_cfs_propagate(cfs_rq);
+
+	cfs_rq_util_change(cfs_rq);
 }
 
 /* Add the load generated by se into cfs_rq's load average */
@@ -2806,62 +4348,79 @@
 enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
 	struct sched_avg *sa = &se->avg;
-	u64 now = cfs_rq_clock_task(cfs_rq);
-	int migrated, decayed;
-
-	migrated = !sa->last_update_time;
-	if (!migrated) {
-		__update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
-			se->on_rq * scale_load_down(se->load.weight),
-			cfs_rq->curr == se, NULL);
-	}
-
-	decayed = update_cfs_rq_load_avg(now, cfs_rq);
 
 	cfs_rq->runnable_load_avg += sa->load_avg;
 	cfs_rq->runnable_load_sum += sa->load_sum;
 
-	if (migrated)
+	if (!sa->last_update_time) {
 		attach_entity_load_avg(cfs_rq, se);
-
-	if (decayed || migrated)
 		update_tg_load_avg(cfs_rq, 0);
 }
+}
 
 /* Remove the runnable load generated by se from cfs_rq's runnable load average */
 static inline void
 dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
-	update_load_avg(se, 1);
-
 	cfs_rq->runnable_load_avg =
 		max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0);
 	cfs_rq->runnable_load_sum =
 		max_t(s64,  cfs_rq->runnable_load_sum - se->avg.load_sum, 0);
 }
 
-/*
- * Task first catches up with cfs_rq, and then subtract
- * itself from the cfs_rq (task must be off the queue now).
- */
-void remove_entity_load_avg(struct sched_entity *se)
-{
-	struct cfs_rq *cfs_rq = cfs_rq_of(se);
-	u64 last_update_time;
-
 #ifndef CONFIG_64BIT
+static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
+{
 	u64 last_update_time_copy;
+	u64 last_update_time;
 
 	do {
 		last_update_time_copy = cfs_rq->load_last_update_time_copy;
 		smp_rmb();
 		last_update_time = cfs_rq->avg.last_update_time;
 	} while (last_update_time != last_update_time_copy);
+
+	return last_update_time;
+}
 #else
-	last_update_time = cfs_rq->avg.last_update_time;
+static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
+{
+	return cfs_rq->avg.last_update_time;
+}
 #endif
 
+/*
+ * Synchronize entity load avg of dequeued entity without locking
+ * the previous rq.
+ */
+void sync_entity_load_avg(struct sched_entity *se)
+{
+	struct cfs_rq *cfs_rq = cfs_rq_of(se);
+	u64 last_update_time;
+
+	last_update_time = cfs_rq_last_update_time(cfs_rq);
 	__update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL);
+}
+
+/*
+ * Task first catches up with cfs_rq, and then subtract
+ * itself from the cfs_rq (task must be off the queue now).
+ */
+void remove_entity_load_avg(struct sched_entity *se)
+{
+	struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+	/*
+	 * tasks cannot exit without having gone through wake_up_new_task() ->
+	 * post_init_entity_util_avg() which will have added things to the
+	 * cfs_rq, so we can remove unconditionally.
+	 *
+	 * Similarly for groups, they will have passed through
+	 * post_init_entity_util_avg() before unregister_sched_fair_group()
+	 * calls this.
+	 */
+
+	sync_entity_load_avg(se);
 	atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg);
 	atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg);
 }
@@ -2898,7 +4457,16 @@
 
 #else /* CONFIG_SMP */
 
-static inline void update_load_avg(struct sched_entity *se, int update_tg) {}
+static inline int
+update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
+{
+	return 0;
+}
+
+#define UPDATE_TG	0x0
+#define SKIP_AGE_LOAD	0x0
+
+static inline void update_load_avg(struct sched_entity *se, int not_used1){}
 static inline void
 enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
 static inline void
@@ -2915,6 +4483,12 @@
 	return 0;
 }
 
+static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+	 struct task_struct *p, int change_cra) { }
+
+static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+	 struct task_struct *p, int change_cra) { }
+
 #endif /* CONFIG_SMP */
 
 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -2962,6 +4536,7 @@
 			}
 
 			trace_sched_stat_blocked(tsk, delta);
+			trace_sched_blocked_reason(tsk);
 
 			/*
 			 * Blocking time is in units of nanosecs, so shift by
@@ -3040,9 +4615,10 @@
 	 * Update run-time statistics of the 'current'.
 	 */
 	update_curr(cfs_rq);
+	update_load_avg(se, UPDATE_TG);
 	enqueue_entity_load_avg(cfs_rq, se);
+	update_cfs_shares(se);
 	account_entity_enqueue(cfs_rq, se);
-	update_cfs_shares(cfs_rq);
 
 	if (flags & ENQUEUE_WAKEUP) {
 		place_entity(cfs_rq, se, 0);
@@ -3115,6 +4691,16 @@
 	 * Update run-time statistics of the 'current'.
 	 */
 	update_curr(cfs_rq);
+
+	/*
+	 * When dequeuing a sched_entity, we must:
+	 *   - Update loads to have both entity and cfs_rq synced with now.
+	 *   - Substract its load from the cfs_rq->runnable_avg.
+	 *   - Substract its previous weight from cfs_rq->load.weight.
+	 *   - For group entity, update its weight to reflect the new share
+	 *     of its group cfs_rq.
+	 */
+	update_load_avg(se, UPDATE_TG);
 	dequeue_entity_load_avg(cfs_rq, se);
 
 	update_stats_dequeue(cfs_rq, se);
@@ -3150,7 +4736,7 @@
 	return_cfs_rq_runtime(cfs_rq);
 
 	update_min_vruntime(cfs_rq);
-	update_cfs_shares(cfs_rq);
+	update_cfs_shares(se);
 }
 
 /*
@@ -3205,7 +4791,7 @@
 		 */
 		update_stats_wait_end(cfs_rq, se);
 		__dequeue_entity(cfs_rq, se);
-		update_load_avg(se, 1);
+		update_load_avg(se, UPDATE_TG);
 	}
 
 	update_stats_curr_start(cfs_rq, se);
@@ -3321,8 +4907,8 @@
 	/*
 	 * Ensure that runnable average is periodically updated.
 	 */
-	update_load_avg(curr, 1);
-	update_cfs_shares(cfs_rq);
+	update_load_avg(curr, UPDATE_TG);
+	update_cfs_shares(curr);
 
 #ifdef CONFIG_SCHED_HRTICK
 	/*
@@ -3529,6 +5115,35 @@
 	return cfs_bandwidth_used() && cfs_rq->throttled;
 }
 
+#ifdef CONFIG_SCHED_HMP
+/*
+ * Check if task is part of a hierarchy where some cfs_rq does not have any
+ * runtime left.
+ *
+ * We can't rely on throttled_hierarchy() to do this test, as
+ * cfs_rq->throttle_count will not be updated yet when this function is called
+ * from scheduler_tick()
+ */
+static int task_will_be_throttled(struct task_struct *p)
+{
+	struct sched_entity *se = &p->se;
+	struct cfs_rq *cfs_rq;
+
+	if (!cfs_bandwidth_used())
+		return 0;
+
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+		if (!cfs_rq->runtime_enabled)
+			continue;
+		if (cfs_rq->runtime_remaining <= 0)
+			return 1;
+	}
+
+	return 0;
+}
+#endif
+
 /* check whether cfs_rq, or any parent, is throttled */
 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
 {
@@ -3608,13 +5223,16 @@
 		if (dequeue)
 			dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
 		qcfs_rq->h_nr_running -= task_delta;
+		dec_throttled_cfs_rq_hmp_stats(&qcfs_rq->hmp_stats, cfs_rq);
 
 		if (qcfs_rq->load.weight)
 			dequeue = 0;
 	}
 
-	if (!se)
+	if (!se) {
 		sub_nr_running(rq, task_delta);
+		dec_throttled_cfs_rq_hmp_stats(&rq->hmp_stats, cfs_rq);
+	}
 
 	cfs_rq->throttled = 1;
 	cfs_rq->throttled_clock = rq_clock(rq);
@@ -3635,6 +5253,12 @@
 		start_cfs_bandwidth(cfs_b);
 
 	raw_spin_unlock(&cfs_b->lock);
+
+	/* Log effect on hmp stats after throttling */
+	trace_sched_cpu_load_cgroup(rq, idle_cpu(cpu_of(rq)),
+			     sched_irqload(cpu_of(rq)),
+			     power_cost(cpu_of(rq), 0),
+			     cpu_temp(cpu_of(rq)));
 }
 
 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
@@ -3644,6 +5268,7 @@
 	struct sched_entity *se;
 	int enqueue = 1;
 	long task_delta;
+	struct cfs_rq *tcfs_rq __maybe_unused = cfs_rq;
 
 	se = cfs_rq->tg->se[cpu_of(rq)];
 
@@ -3671,17 +5296,26 @@
 		if (enqueue)
 			enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
 		cfs_rq->h_nr_running += task_delta;
+		inc_throttled_cfs_rq_hmp_stats(&cfs_rq->hmp_stats, tcfs_rq);
 
 		if (cfs_rq_throttled(cfs_rq))
 			break;
 	}
 
-	if (!se)
+	if (!se) {
 		add_nr_running(rq, task_delta);
+		inc_throttled_cfs_rq_hmp_stats(&rq->hmp_stats, tcfs_rq);
+	}
 
 	/* determine whether we need to wake up potentially idle cpu */
 	if (rq->curr == rq->idle && rq->cfs.nr_running)
 		resched_curr(rq);
+
+	/* Log effect on hmp stats after un-throttling */
+	trace_sched_cpu_load_cgroup(rq, idle_cpu(cpu_of(rq)),
+			     sched_irqload(cpu_of(rq)),
+			     power_cost(cpu_of(rq), 0),
+			     cpu_temp(cpu_of(rq)));
 }
 
 static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
@@ -4022,6 +5656,7 @@
 {
 	cfs_rq->runtime_enabled = 0;
 	INIT_LIST_HEAD(&cfs_rq->throttled_list);
+	init_cfs_rq_hmp_stats(cfs_rq);
 }
 
 void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
@@ -4137,7 +5772,7 @@
 
 	WARN_ON(task_rq(p) != rq);
 
-	if (cfs_rq->nr_running > 1) {
+	if (rq->cfs.h_nr_running > 1) {
 		u64 slice = sched_slice(cfs_rq, se);
 		u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
 		s64 delta = slice - ran;
@@ -4153,8 +5788,7 @@
 
 /*
  * called from enqueue/dequeue and updates the hrtick when the
- * current task is from our class and nr_running is low enough
- * to matter.
+ * current task is from our class.
  */
 static void hrtick_update(struct rq *rq)
 {
@@ -4163,7 +5797,6 @@
 	if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
 		return;
 
-	if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
 		hrtick_start_fair(rq, curr);
 }
 #else /* !CONFIG_SCHED_HRTICK */
@@ -4177,6 +5810,14 @@
 }
 #endif
 
+#ifdef CONFIG_SMP
+static bool __cpu_overutilized(int cpu, int delta);
+static bool cpu_overutilized(int cpu);
+unsigned long boosted_cpu_util(int cpu);
+#else
+#define boosted_cpu_util(cpu) cpu_util_freq(cpu)
+#endif
+
 /*
  * The enqueue_task method is called before nr_running is
  * increased. Here we update the fair scheduling stats and
@@ -4187,6 +5828,17 @@
 {
 	struct cfs_rq *cfs_rq;
 	struct sched_entity *se = &p->se;
+#ifdef CONFIG_SMP
+	int task_new = flags & ENQUEUE_WAKEUP_NEW;
+#endif
+
+	/*
+	 * If in_iowait is set, the code below may not trigger any cpufreq
+	 * utilization updates, so do it here explicitly with the IOWAIT flag
+	 * passed.
+	 */
+	if (p->in_iowait)
+		cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_IOWAIT);
 
 	for_each_sched_entity(se) {
 		if (se->on_rq)
@@ -4203,6 +5855,7 @@
 		if (cfs_rq_throttled(cfs_rq))
 			break;
 		cfs_rq->h_nr_running++;
+		inc_cfs_rq_hmp_stats(cfs_rq, p, 1);
 
 		flags = ENQUEUE_WAKEUP;
 	}
@@ -4210,17 +5863,50 @@
 	for_each_sched_entity(se) {
 		cfs_rq = cfs_rq_of(se);
 		cfs_rq->h_nr_running++;
+		inc_cfs_rq_hmp_stats(cfs_rq, p, 1);
 
 		if (cfs_rq_throttled(cfs_rq))
 			break;
 
-		update_load_avg(se, 1);
-		update_cfs_shares(cfs_rq);
+		update_load_avg(se, UPDATE_TG);
+		update_cfs_shares(se);
 	}
 
-	if (!se)
+	if (!se) {
 		add_nr_running(rq, 1);
+		inc_rq_hmp_stats(rq, p, 1);
+	}
+
+#ifdef CONFIG_SMP
 
+	/*
+	 * Update SchedTune accounting.
+	 *
+	 * We do it before updating the CPU capacity to ensure the
+	 * boost value of the current task is accounted for in the
+	 * selection of the OPP.
+	 *
+	 * We do it also in the case where we enqueue a throttled task;
+	 * we could argue that a throttled task should not boost a CPU,
+	 * however:
+	 * a) properly implementing CPU boosting considering throttled
+	 *    tasks will increase a lot the complexity of the solution
+	 * b) it's not easy to quantify the benefits introduced by
+	 *    such a more complex solution.
+	 * Thus, for the time being we go for the simple solution and boost
+	 * also for throttled RQs.
+	 */
+	schedtune_enqueue_task(p, cpu_of(rq));
+
+	if (energy_aware() && !se) {
+		if (!task_new && !rq->rd->overutilized &&
+		    cpu_overutilized(rq->cpu)) {
+			rq->rd->overutilized = true;
+			trace_sched_overutilized(true);
+		}
+	}
+
+#endif /* CONFIG_SMP */
 	hrtick_update(rq);
 }
 
@@ -4250,6 +5936,7 @@
 		if (cfs_rq_throttled(cfs_rq))
 			break;
 		cfs_rq->h_nr_running--;
+		dec_cfs_rq_hmp_stats(cfs_rq, p, 1);
 
 		/* Don't dequeue parent if it has other entities besides us */
 		if (cfs_rq->load.weight) {
@@ -4269,16 +5956,32 @@
 	for_each_sched_entity(se) {
 		cfs_rq = cfs_rq_of(se);
 		cfs_rq->h_nr_running--;
+		dec_cfs_rq_hmp_stats(cfs_rq, p, 1);
 
 		if (cfs_rq_throttled(cfs_rq))
 			break;
 
-		update_load_avg(se, 1);
-		update_cfs_shares(cfs_rq);
+		update_load_avg(se, UPDATE_TG);
+		update_cfs_shares(se);
 	}
 
-	if (!se)
+	if (!se) {
 		sub_nr_running(rq, 1);
+		dec_rq_hmp_stats(rq, p, 1);
+	}
+
+#ifdef CONFIG_SMP
+
+	/*
+	 * Update SchedTune accounting
+	 *
+	 * We do it before updating the CPU capacity to ensure the
+	 * boost value of the current task is accounted for in the
+	 * selection of the OPP.
+	 */
+	schedtune_dequeue_task(p, cpu_of(rq));
+
+#endif /* CONFIG_SMP */
 
 	hrtick_update(rq);
 }
@@ -4506,15 +6209,6 @@
 	return max(rq->cpu_load[type-1], total);
 }
 
-static unsigned long capacity_of(int cpu)
-{
-	return cpu_rq(cpu)->cpu_capacity;
-}
-
-static unsigned long capacity_orig_of(int cpu)
-{
-	return cpu_rq(cpu)->cpu_capacity_orig;
-}
 
 static unsigned long cpu_avg_load_per_task(int cpu)
 {
@@ -4689,6 +6383,487 @@
 #endif
 
 /*
+ * Returns the current capacity of cpu after applying both
+ * cpu and freq scaling.
+ */
+unsigned long capacity_curr_of(int cpu)
+{
+	return cpu_rq(cpu)->cpu_capacity_orig *
+	       arch_scale_freq_capacity(NULL, cpu)
+	       >> SCHED_CAPACITY_SHIFT;
+}
+
+struct energy_env {
+	struct sched_group	*sg_top;
+	struct sched_group	*sg_cap;
+	int			cap_idx;
+	int			util_delta;
+	int			src_cpu;
+	int			dst_cpu;
+	int			trg_cpu;
+	int			energy;
+	int			payoff;
+	struct task_struct	*task;
+	struct {
+		int before;
+		int after;
+		int delta;
+		int diff;
+	} nrg;
+	struct {
+		int before;
+		int after;
+		int delta;
+	} cap;
+};
+
+static int cpu_util_wake(int cpu, struct task_struct *p);
+
+/*
+ * __cpu_norm_util() returns the cpu util relative to a specific capacity,
+ * i.e. it's busy ratio, in the range [0..SCHED_LOAD_SCALE], which is useful for
+ * energy calculations.
+ *
+ * Since util is a scale-invariant utilization defined as:
+ *
+ *   util ~ (curr_freq/max_freq)*1024 * capacity_orig/1024 * running_time/time
+ *
+ * the normalized util can be found using the specific capacity.
+ *
+ *   capacity = capacity_orig * curr_freq/max_freq
+ *
+ *   norm_util = running_time/time ~ util/capacity
+ */
+static unsigned long __cpu_norm_util(unsigned long util, unsigned long capacity)
+{
+	if (util >= capacity)
+		return SCHED_CAPACITY_SCALE;
+
+	return (util << SCHED_CAPACITY_SHIFT)/capacity;
+}
+
+static unsigned long group_max_util(struct energy_env *eenv)
+{
+	unsigned long max_util = 0;
+	unsigned long util;
+	int cpu;
+
+	for_each_cpu(cpu, sched_group_cpus(eenv->sg_cap)) {
+		util = cpu_util_wake(cpu, eenv->task);
+
+		/*
+		 * If we are looking at the target CPU specified by the eenv,
+		 * then we should add the (estimated) utilization of the task
+		 * assuming we will wake it up on that CPU.
+		 */
+		if (unlikely(cpu == eenv->trg_cpu))
+			util += eenv->util_delta;
+
+		max_util = max(max_util, util);
+	}
+
+	return max_util;
+}
+
+/*
+ * group_norm_util() returns the approximated group util relative to it's
+ * current capacity (busy ratio), in the range [0..SCHED_LOAD_SCALE], for use
+ * in energy calculations.
+ *
+ * Since task executions may or may not overlap in time in the group the true
+ * normalized util is between MAX(cpu_norm_util(i)) and SUM(cpu_norm_util(i))
+ * when iterating over all CPUs in the group.
+ * The latter estimate is used as it leads to a more pessimistic energy
+ * estimate (more busy).
+ */
+static unsigned
+long group_norm_util(struct energy_env *eenv, struct sched_group *sg)
+{
+	unsigned long capacity = sg->sge->cap_states[eenv->cap_idx].cap;
+	unsigned long util, util_sum = 0;
+	int cpu;
+
+	for_each_cpu(cpu, sched_group_cpus(sg)) {
+		util = cpu_util_wake(cpu, eenv->task);
+
+		/*
+		 * If we are looking at the target CPU specified by the eenv,
+		 * then we should add the (estimated) utilization of the task
+		 * assuming we will wake it up on that CPU.
+		 */
+		if (unlikely(cpu == eenv->trg_cpu))
+			util += eenv->util_delta;
+
+		util_sum += __cpu_norm_util(util, capacity);
+	}
+
+	return min_t(unsigned long, util_sum, SCHED_CAPACITY_SCALE);
+}
+
+static int find_new_capacity(struct energy_env *eenv,
+	const struct sched_group_energy * const sge)
+{
+	int idx, max_idx = sge->nr_cap_states - 1;
+	unsigned long util = group_max_util(eenv);
+
+	/* default is max_cap if we don't find a match */
+	eenv->cap_idx = max_idx;
+
+	for (idx = 0; idx < sge->nr_cap_states; idx++) {
+		if (sge->cap_states[idx].cap >= util) {
+			eenv->cap_idx = idx;
+			break;
+		}
+	}
+
+	return eenv->cap_idx;
+}
+
+static int group_idle_state(struct energy_env *eenv, struct sched_group *sg)
+{
+	int i, state = INT_MAX;
+	int src_in_grp, dst_in_grp;
+	long grp_util = 0;
+
+	/* Find the shallowest idle state in the sched group. */
+	for_each_cpu(i, sched_group_cpus(sg))
+		state = min(state, idle_get_state_idx(cpu_rq(i)));
+
+	/* Take non-cpuidle idling into account (active idle/arch_cpu_idle()) */
+	state++;
+
+	src_in_grp = cpumask_test_cpu(eenv->src_cpu, sched_group_cpus(sg));
+	dst_in_grp = cpumask_test_cpu(eenv->dst_cpu, sched_group_cpus(sg));
+	if (src_in_grp == dst_in_grp) {
+		/* both CPUs under consideration are in the same group or not in
+		 * either group, migration should leave idle state the same.
+		 */
+		goto end;
+	}
+
+	/*
+	 * Try to estimate if a deeper idle state is
+	 * achievable when we move the task.
+	 */
+	for_each_cpu(i, sched_group_cpus(sg)) {
+		grp_util += cpu_util_wake(i, eenv->task);
+		if (unlikely(i == eenv->trg_cpu))
+			grp_util += eenv->util_delta;
+	}
+
+	if (grp_util <=
+		((long)sg->sgc->max_capacity * (int)sg->group_weight)) {
+		/* after moving, this group is at most partly
+		 * occupied, so it should have some idle time.
+		 */
+		int max_idle_state_idx = sg->sge->nr_idle_states - 2;
+		int new_state = grp_util * max_idle_state_idx;
+		if (grp_util <= 0)
+			/* group will have no util, use lowest state */
+			new_state = max_idle_state_idx + 1;
+		else {
+			/* for partially idle, linearly map util to idle
+			 * states, excluding the lowest one. This does not
+			 * correspond to the state we expect to enter in
+			 * reality, but an indication of what might happen.
+			 */
+			new_state = min(max_idle_state_idx, (int)
+					(new_state / sg->sgc->max_capacity));
+			new_state = max_idle_state_idx - new_state;
+		}
+		state = new_state;
+	} else {
+		/* After moving, the group will be fully occupied
+		 * so assume it will not be idle at all.
+		 */
+		state = 0;
+	}
+end:
+	return state;
+}
+
+/*
+ * sched_group_energy(): Computes the absolute energy consumption of cpus
+ * belonging to the sched_group including shared resources shared only by
+ * members of the group. Iterates over all cpus in the hierarchy below the
+ * sched_group starting from the bottom working it's way up before going to
+ * the next cpu until all cpus are covered at all levels. The current
+ * implementation is likely to gather the same util statistics multiple times.
+ * This can probably be done in a faster but more complex way.
+ * Note: sched_group_energy() may fail when racing with sched_domain updates.
+ */
+static int sched_group_energy(struct energy_env *eenv)
+{
+	struct cpumask visit_cpus;
+	u64 total_energy = 0;
+
+	WARN_ON(!eenv->sg_top->sge);
+
+	cpumask_copy(&visit_cpus, sched_group_cpus(eenv->sg_top));
+
+	while (!cpumask_empty(&visit_cpus)) {
+		struct sched_group *sg_shared_cap = NULL;
+		int cpu = cpumask_first(&visit_cpus);
+		struct sched_domain *sd;
+
+		/*
+		 * Is the group utilization affected by cpus outside this
+		 * sched_group?
+		 */
+		sd = rcu_dereference(per_cpu(sd_scs, cpu));
+
+		if (sd && sd->parent)
+			sg_shared_cap = sd->parent->groups;
+
+		for_each_domain(cpu, sd) {
+			struct sched_group *sg = sd->groups;
+
+			/* Has this sched_domain already been visited? */
+			if (sd->child && group_first_cpu(sg) != cpu)
+				break;
+
+			do {
+				unsigned long group_util;
+				int sg_busy_energy, sg_idle_energy;
+				int cap_idx, idle_idx;
+
+				if (sg_shared_cap && sg_shared_cap->group_weight >= sg->group_weight)
+					eenv->sg_cap = sg_shared_cap;
+				else
+					eenv->sg_cap = sg;
+
+				cap_idx = find_new_capacity(eenv, sg->sge);
+
+				if (sg->group_weight == 1) {
+					/* Remove capacity of src CPU (before task move) */
+					if (eenv->trg_cpu == eenv->src_cpu &&
+					    cpumask_test_cpu(eenv->src_cpu, sched_group_cpus(sg))) {
+						eenv->cap.before = sg->sge->cap_states[cap_idx].cap;
+						eenv->cap.delta -= eenv->cap.before;
+					}
+					/* Add capacity of dst CPU  (after task move) */
+					if (eenv->trg_cpu == eenv->dst_cpu &&
+					    cpumask_test_cpu(eenv->dst_cpu, sched_group_cpus(sg))) {
+						eenv->cap.after = sg->sge->cap_states[cap_idx].cap;
+						eenv->cap.delta += eenv->cap.after;
+					}
+				}
+
+				idle_idx = group_idle_state(eenv, sg);
+				group_util = group_norm_util(eenv, sg);
+
+				sg_busy_energy = (group_util * sg->sge->cap_states[cap_idx].power);
+				sg_idle_energy = ((SCHED_LOAD_SCALE-group_util)
+								* sg->sge->idle_states[idle_idx].power);
+
+				total_energy += sg_busy_energy + sg_idle_energy;
+
+				if (!sd->child)
+					cpumask_xor(&visit_cpus, &visit_cpus, sched_group_cpus(sg));
+
+				if (cpumask_equal(sched_group_cpus(sg), sched_group_cpus(eenv->sg_top)))
+					goto next_cpu;
+
+			} while (sg = sg->next, sg != sd->groups);
+		}
+
+		/*
+		 * If we raced with hotplug and got an sd NULL-pointer;
+		 * returning a wrong energy estimation is better than
+		 * entering an infinite loop.
+		 */
+		if (cpumask_test_cpu(cpu, &visit_cpus))
+			return -EINVAL;
+next_cpu:
+		cpumask_clear_cpu(cpu, &visit_cpus);
+		continue;
+	}
+
+	eenv->energy = total_energy >> SCHED_CAPACITY_SHIFT;
+	return 0;
+}
+
+static inline bool cpu_in_sg(struct sched_group *sg, int cpu)
+{
+	return cpu != -1 && cpumask_test_cpu(cpu, sched_group_cpus(sg));
+}
+
+static inline unsigned long task_util(struct task_struct *p);
+
+/*
+ * energy_diff(): Estimate the energy impact of changing the utilization
+ * distribution. eenv specifies the change: utilisation amount, source, and
+ * destination cpu. Source or destination cpu may be -1 in which case the
+ * utilization is removed from or added to the system (e.g. task wake-up). If
+ * both are specified, the utilization is migrated.
+ */
+static inline int __energy_diff(struct energy_env *eenv)
+{
+	struct sched_domain *sd;
+	struct sched_group *sg;
+	int sd_cpu = -1, energy_before = 0, energy_after = 0;
+	int diff, margin;
+
+	struct energy_env eenv_before = {
+		.util_delta	= task_util(eenv->task),
+		.src_cpu	= eenv->src_cpu,
+		.dst_cpu	= eenv->dst_cpu,
+		.trg_cpu	= eenv->src_cpu,
+		.nrg		= { 0, 0, 0, 0},
+		.cap		= { 0, 0, 0 },
+		.task		= eenv->task,
+	};
+
+	if (eenv->src_cpu == eenv->dst_cpu)
+		return 0;
+
+	sd_cpu = (eenv->src_cpu != -1) ? eenv->src_cpu : eenv->dst_cpu;
+	sd = rcu_dereference(per_cpu(sd_ea, sd_cpu));
+
+	if (!sd)
+		return 0; /* Error */
+
+	sg = sd->groups;
+
+	do {
+		if (cpu_in_sg(sg, eenv->src_cpu) || cpu_in_sg(sg, eenv->dst_cpu)) {
+			eenv_before.sg_top = eenv->sg_top = sg;
+
+			if (sched_group_energy(&eenv_before))
+				return 0; /* Invalid result abort */
+			energy_before += eenv_before.energy;
+
+			/* Keep track of SRC cpu (before) capacity */
+			eenv->cap.before = eenv_before.cap.before;
+			eenv->cap.delta = eenv_before.cap.delta;
+
+			if (sched_group_energy(eenv))
+				return 0; /* Invalid result abort */
+			energy_after += eenv->energy;
+		}
+	} while (sg = sg->next, sg != sd->groups);
+
+	eenv->nrg.before = energy_before;
+	eenv->nrg.after = energy_after;
+	eenv->nrg.diff = eenv->nrg.after - eenv->nrg.before;
+	eenv->payoff = 0;
+#ifndef CONFIG_SCHED_TUNE
+	trace_sched_energy_diff(eenv->task,
+			eenv->src_cpu, eenv->dst_cpu, eenv->util_delta,
+			eenv->nrg.before, eenv->nrg.after, eenv->nrg.diff,
+			eenv->cap.before, eenv->cap.after, eenv->cap.delta,
+			eenv->nrg.delta, eenv->payoff);
+#endif
+	/*
+	 * Dead-zone margin preventing too many migrations.
+	 */
+
+	margin = eenv->nrg.before >> 6; /* ~1.56% */
+
+	diff = eenv->nrg.after - eenv->nrg.before;
+
+	eenv->nrg.diff = (abs(diff) < margin) ? 0 : eenv->nrg.diff;
+
+	return eenv->nrg.diff;
+}
+
+#ifdef CONFIG_SCHED_TUNE
+
+struct target_nrg schedtune_target_nrg;
+
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+extern bool schedtune_initialized;
+#endif /* CONFIG_CGROUP_SCHEDTUNE */
+
+/*
+ * System energy normalization
+ * Returns the normalized value, in the range [0..SCHED_CAPACITY_SCALE],
+ * corresponding to the specified energy variation.
+ */
+static inline int
+normalize_energy(int energy_diff)
+{
+	u32 normalized_nrg;
+
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+	/* during early setup, we don't know the extents */
+	if (unlikely(!schedtune_initialized))
+		return energy_diff < 0 ? -1 : 1 ;
+#endif /* CONFIG_CGROUP_SCHEDTUNE */
+
+#ifdef CONFIG_SCHED_DEBUG
+	{
+	int max_delta;
+
+	/* Check for boundaries */
+	max_delta  = schedtune_target_nrg.max_power;
+	max_delta -= schedtune_target_nrg.min_power;
+	WARN_ON(abs(energy_diff) >= max_delta);
+	}
+#endif
+
+	/* Do scaling using positive numbers to increase the range */
+	normalized_nrg = (energy_diff < 0) ? -energy_diff : energy_diff;
+
+	/* Scale by energy magnitude */
+	normalized_nrg <<= SCHED_CAPACITY_SHIFT;
+
+	/* Normalize on max energy for target platform */
+	normalized_nrg = reciprocal_divide(
+			normalized_nrg, schedtune_target_nrg.rdiv);
+
+	return (energy_diff < 0) ? -normalized_nrg : normalized_nrg;
+}
+
+static inline int
+energy_diff(struct energy_env *eenv)
+{
+	int boost = schedtune_task_boost(eenv->task);
+	int nrg_delta;
+
+	/* Conpute "absolute" energy diff */
+	__energy_diff(eenv);
+
+	/* Return energy diff when boost margin is 0 */
+	if (boost == 0) {
+		trace_sched_energy_diff(eenv->task,
+				eenv->src_cpu, eenv->dst_cpu, eenv->util_delta,
+				eenv->nrg.before, eenv->nrg.after, eenv->nrg.diff,
+				eenv->cap.before, eenv->cap.after, eenv->cap.delta,
+				0, -eenv->nrg.diff);
+		return eenv->nrg.diff;
+	}
+
+	/* Compute normalized energy diff */
+	nrg_delta = normalize_energy(eenv->nrg.diff);
+	eenv->nrg.delta = nrg_delta;
+
+	eenv->payoff = schedtune_accept_deltas(
+			eenv->nrg.delta,
+			eenv->cap.delta,
+			eenv->task);
+
+	trace_sched_energy_diff(eenv->task,
+			eenv->src_cpu, eenv->dst_cpu, eenv->util_delta,
+			eenv->nrg.before, eenv->nrg.after, eenv->nrg.diff,
+			eenv->cap.before, eenv->cap.after, eenv->cap.delta,
+			eenv->nrg.delta, eenv->payoff);
+
+	/*
+	 * When SchedTune is enabled, the energy_diff() function will return
+	 * the computed energy payoff value. Since the energy_diff() return
+	 * value is expected to be negative by its callers, this evaluation
+	 * function return a negative value each time the evaluation return a
+	 * positive payoff, which is the condition for the acceptance of
+	 * a scheduling decision
+	 */
+	return -eenv->payoff;
+}
+#else /* CONFIG_SCHED_TUNE */
+#define energy_diff(eenv) __energy_diff(eenv)
+#endif
+
+/*
  * Detect M:N waker/wakee relationships via a switching-frequency heuristic.
  * A waker of many should wake a different task than the one last awakened
  * at a frequency roughly N times higher than one of its wakees.  In order
@@ -4700,31 +6875,34 @@
  * being client/server, worker/dispatcher, interrupt source or whatever is
  * irrelevant, spread criteria is apparent partner count exceeds socket size.
  */
-static int wake_wide(struct task_struct *p)
+static int wake_wide(struct task_struct *p, int sibling_count_hint)
 {
 	unsigned int master = current->wakee_flips;
 	unsigned int slave = p->wakee_flips;
-	int factor = this_cpu_read(sd_llc_size);
+	int llc_size = this_cpu_read(sd_llc_size);
+
+	if (sibling_count_hint >= llc_size)
+		return 1;
 
 	if (master < slave)
 		swap(master, slave);
-	if (slave < factor || master < slave * factor)
+	if (slave < llc_size || master < slave * llc_size)
 		return 0;
 	return 1;
 }
 
-static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
+static int wake_affine(struct sched_domain *sd, struct task_struct *p,
+		       int prev_cpu, int sync)
 {
 	s64 this_load, load;
 	s64 this_eff_load, prev_eff_load;
-	int idx, this_cpu, prev_cpu;
+	int idx, this_cpu;
 	struct task_group *tg;
 	unsigned long weight;
 	int balanced;
 
 	idx	  = sd->wake_idx;
 	this_cpu  = smp_processor_id();
-	prev_cpu  = task_cpu(p);
 	load	  = source_load(prev_cpu, idx);
 	this_load = target_load(this_cpu, idx);
 
@@ -4779,16 +6957,160 @@
 	return 1;
 }
 
+static inline unsigned long task_util(struct task_struct *p)
+{
+	return p->se.avg.util_avg;
+}
+
+static inline unsigned long boosted_task_util(struct task_struct *task);
+
+static inline bool __task_fits(struct task_struct *p, int cpu, int util)
+{
+	unsigned long capacity = capacity_of(cpu);
+
+	util += boosted_task_util(p);
+
+	return (capacity * 1024) > (util * capacity_margin);
+}
+
+static inline bool task_fits_max(struct task_struct *p, int cpu)
+{
+	unsigned long capacity = capacity_of(cpu);
+	unsigned long max_capacity = cpu_rq(cpu)->rd->max_cpu_capacity.val;
+
+	if (capacity == max_capacity)
+		return true;
+
+	if (capacity * capacity_margin > max_capacity * 1024)
+		return true;
+
+	return __task_fits(p, cpu, 0);
+}
+
+static bool __cpu_overutilized(int cpu, int delta)
+{
+	return (capacity_of(cpu) * 1024) < ((cpu_util(cpu) + delta) * capacity_margin);
+}
+
+static bool cpu_overutilized(int cpu)
+{
+	return __cpu_overutilized(cpu, 0);
+}
+
+#ifdef CONFIG_SCHED_TUNE
+
+struct reciprocal_value schedtune_spc_rdiv;
+
+static long
+schedtune_margin(unsigned long signal, long boost)
+{
+	long long margin = 0;
+
+	/*
+	 * Signal proportional compensation (SPC)
+	 *
+	 * The Boost (B) value is used to compute a Margin (M) which is
+	 * proportional to the complement of the original Signal (S):
+	 *   M = B * (SCHED_CAPACITY_SCALE - S)
+	 * The obtained M could be used by the caller to "boost" S.
+	 */
+	if (boost >= 0) {
+		margin  = SCHED_CAPACITY_SCALE - signal;
+		margin *= boost;
+	} else
+		margin = -signal * boost;
+
+	margin  = reciprocal_divide(margin, schedtune_spc_rdiv);
+
+	if (boost < 0)
+		margin *= -1;
+	return margin;
+}
+
+static inline int
+schedtune_cpu_margin(unsigned long util, int cpu)
+{
+	int boost = schedtune_cpu_boost(cpu);
+
+	if (boost == 0)
+		return 0;
+
+	return schedtune_margin(util, boost);
+}
+
+static inline long
+schedtune_task_margin(struct task_struct *task)
+{
+	int boost = schedtune_task_boost(task);
+	unsigned long util;
+	long margin;
+
+	if (boost == 0)
+		return 0;
+
+	util = task_util(task);
+	margin = schedtune_margin(util, boost);
+
+	return margin;
+}
+
+#else /* CONFIG_SCHED_TUNE */
+
+static inline int
+schedtune_cpu_margin(unsigned long util, int cpu)
+{
+	return 0;
+}
+
+static inline int
+schedtune_task_margin(struct task_struct *task)
+{
+	return 0;
+}
+
+#endif /* CONFIG_SCHED_TUNE */
+
+unsigned long
+boosted_cpu_util(int cpu)
+{
+	unsigned long util = cpu_util_freq(cpu);
+	long margin = schedtune_cpu_margin(util, cpu);
+
+	trace_sched_boost_cpu(cpu, util, margin);
+
+	return util + margin;
+}
+
+static inline unsigned long
+boosted_task_util(struct task_struct *task)
+{
+	unsigned long util = task_util(task);
+	long margin = schedtune_task_margin(task);
+
+	trace_sched_boost_task(task, util, margin);
+
+	return util + margin;
+}
+
+static unsigned long capacity_spare_wake(int cpu, struct task_struct *p)
+{
+	return capacity_orig_of(cpu) - cpu_util_wake(cpu, p);
+}
+
 /*
  * find_idlest_group finds and returns the least busy CPU group within the
  * domain.
+ *
+ * Assumes p is allowed on at least one CPU in sd.
  */
 static struct sched_group *
 find_idlest_group(struct sched_domain *sd, struct task_struct *p,
 		  int this_cpu, int sd_flag)
 {
 	struct sched_group *idlest = NULL, *group = sd->groups;
-	unsigned long min_load = ULONG_MAX, this_load = 0;
+	struct sched_group *most_spare_sg = NULL;
+	unsigned long min_load = ULONG_MAX, this_load = ULONG_MAX;
+	unsigned long most_spare = 0, this_spare = 0;
 	int load_idx = sd->forkexec_idx;
 	int imbalance = 100 + (sd->imbalance_pct-100)/2;
 
@@ -4796,7 +7118,7 @@
 		load_idx = sd->wake_idx;
 
 	do {
-		unsigned long load, avg_load;
+		unsigned long load, avg_load, spare_cap, max_spare_cap;
 		int local_group;
 		int i;
 
@@ -4808,8 +7130,12 @@
 		local_group = cpumask_test_cpu(this_cpu,
 					       sched_group_cpus(group));
 
-		/* Tally up the load of all CPUs in the group */
+		/*
+		 * Tally up the load of all CPUs in the group and find
+		 * the group containing the CPU with most spare capacity.
+		 */
 		avg_load = 0;
+		max_spare_cap = 0;
 
 		for_each_cpu(i, sched_group_cpus(group)) {
 			/* Bias balancing toward cpus of our domain */
@@ -4819,6 +7145,11 @@
 				load = target_load(i, load_idx);
 
 			avg_load += load;
+
+			spare_cap = capacity_spare_wake(i, p);
+
+			if (spare_cap > max_spare_cap)
+				max_spare_cap = spare_cap;
 		}
 
 		/* Adjust by relative CPU capacity of the group */
@@ -4826,22 +7157,51 @@
 
 		if (local_group) {
 			this_load = avg_load;
-		} else if (avg_load < min_load) {
+			this_spare = max_spare_cap;
+		} else {
+			if (avg_load < min_load) {
 			min_load = avg_load;
 			idlest = group;
 		}
+
+			if (most_spare < max_spare_cap) {
+				most_spare = max_spare_cap;
+				most_spare_sg = group;
+			}
+		}
 	} while (group = group->next, group != sd->groups);
 
+	/*
+	 * The cross-over point between using spare capacity or least load
+	 * is too conservative for high utilization tasks on partially
+	 * utilized systems if we require spare_capacity > task_util(p),
+	 * so we allow for some task stuffing by using
+	 * spare_capacity > task_util(p)/2.
+	 *
+	 * Spare capacity can't be used for fork because the utilization has
+	 * not been set yet, we must first select a rq to compute the initial
+	 * utilization.
+	 */
+	if (sd_flag & SD_BALANCE_FORK)
+		goto skip_spare;
+
+	if (this_spare > task_util(p) / 2 &&
+	    imbalance*this_spare > 100*most_spare)
+		return NULL;
+	else if (most_spare > task_util(p) / 2)
+		return most_spare_sg;
+
+skip_spare:
 	if (!idlest || 100*this_load < imbalance*min_load)
 		return NULL;
 	return idlest;
 }
 
 /*
- * find_idlest_cpu - find the idlest cpu among the cpus in group.
+ * find_idlest_group_cpu - find the idlest cpu among the cpus in group.
  */
 static int
-find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
+find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
 {
 	unsigned long load, min_load = ULONG_MAX;
 	unsigned int min_exit_latency = UINT_MAX;
@@ -4850,6 +7210,10 @@
 	int shallowest_idle_cpu = -1;
 	int i;
 
+	/* Check if we have any choice: */
+	if (group->group_weight == 1)
+		return cpumask_first(sched_group_cpus(group));
+
 	/* Traverse only the allowed CPUs */
 	for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
 		if (idle_cpu(i)) {
@@ -4886,23 +7250,102 @@
 	return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
 }
 
+static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p,
+				  int cpu, int prev_cpu, int sd_flag)
+{
+	int new_cpu = cpu;
+	int wu = sd_flag & SD_BALANCE_WAKE;
+	int cas_cpu = -1;
+
+	if (wu) {
+		schedstat_inc(p, se.statistics.nr_wakeups_cas_attempts);
+		schedstat_inc(this_rq(), eas_stats.cas_attempts);
+	}
+
+	if (!cpumask_intersects(sched_domain_span(sd), &p->cpus_allowed))
+		return prev_cpu;
+
+	while (sd) {
+		struct sched_group *group;
+		struct sched_domain *tmp;
+		int weight;
+
+		if (wu)
+			schedstat_inc(sd, eas_stats.cas_attempts);
+
+		if (!(sd->flags & sd_flag)) {
+			sd = sd->child;
+			continue;
+		}
+
+		group = find_idlest_group(sd, p, cpu, sd_flag);
+		if (!group) {
+			sd = sd->child;
+			continue;
+		}
+
+		new_cpu = find_idlest_group_cpu(group, p, cpu);
+		if (new_cpu == cpu) {
+			/* Now try balancing at a lower domain level of cpu */
+			sd = sd->child;
+			continue;
+		}
+
+		/* Now try balancing at a lower domain level of new_cpu */
+		cpu = cas_cpu = new_cpu;
+		weight = sd->span_weight;
+		sd = NULL;
+		for_each_domain(cpu, tmp) {
+			if (weight <= tmp->span_weight)
+				break;
+			if (tmp->flags & sd_flag)
+				sd = tmp;
+		}
+		/* while loop will break here if sd == NULL */
+	}
+
+	if (wu && (cas_cpu >= 0)) {
+		schedstat_inc(p, se.statistics.nr_wakeups_cas_count);
+		schedstat_inc(this_rq(), eas_stats.cas_count);
+	}
+
+	return new_cpu;
+}
+
 /*
  * Try and locate an idle CPU in the sched_domain.
  */
-static int select_idle_sibling(struct task_struct *p, int target)
+static int select_idle_sibling(struct task_struct *p, int prev, int target)
 {
 	struct sched_domain *sd;
 	struct sched_group *sg;
-	int i = task_cpu(p);
-
-	if (idle_cpu(target))
+	int best_idle_cpu = -1;
+	int best_idle_cstate = INT_MAX;
+	unsigned long best_idle_capacity = ULONG_MAX;
+
+	schedstat_inc(p, se.statistics.nr_wakeups_sis_attempts);
+	schedstat_inc(this_rq(), eas_stats.sis_attempts);
+
+	if (!sysctl_sched_cstate_aware) {
+		if (idle_cpu(target)) {
+			schedstat_inc(p, se.statistics.nr_wakeups_sis_idle);
+			schedstat_inc(this_rq(), eas_stats.sis_idle);
 		return target;
+		}
 
 	/*
 	 * If the prevous cpu is cache affine and idle, don't be stupid.
 	 */
-	if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
-		return i;
+		if (prev != target && cpus_share_cache(prev, target) && idle_cpu(prev)) {
+			schedstat_inc(p, se.statistics.nr_wakeups_sis_cache_affine);
+			schedstat_inc(this_rq(), eas_stats.sis_cache_affine);
+			return prev;
+		}
+	}
+
+	if (!(current->flags & PF_WAKE_UP_IDLE) &&
+			!(p->flags & PF_WAKE_UP_IDLE))
+		return target;
 
 	/*
 	 * Otherwise, iterate the domains and find an elegible idle cpu.
@@ -4911,10 +7354,35 @@
 	for_each_lower_domain(sd) {
 		sg = sd->groups;
 		do {
+			int i;
 			if (!cpumask_intersects(sched_group_cpus(sg),
 						tsk_cpus_allowed(p)))
 				goto next;
 
+			if (sysctl_sched_cstate_aware) {
+				for_each_cpu_and(i, tsk_cpus_allowed(p), sched_group_cpus(sg)) {
+					int idle_idx = idle_get_state_idx(cpu_rq(i));
+					unsigned long new_usage = boosted_task_util(p);
+					unsigned long capacity_orig = capacity_orig_of(i);
+
+					if (new_usage > capacity_orig || !idle_cpu(i))
+						goto next;
+
+					if (i == target && new_usage <= capacity_curr_of(target)) {
+						schedstat_inc(p, se.statistics.nr_wakeups_sis_suff_cap);
+						schedstat_inc(this_rq(), eas_stats.sis_suff_cap);
+						schedstat_inc(sd, eas_stats.sis_suff_cap);
+						return target;
+					}
+
+					if (idle_idx < best_idle_cstate &&
+					    capacity_orig <= best_idle_capacity) {
+						best_idle_cpu = i;
+						best_idle_cstate = idle_idx;
+						best_idle_capacity = capacity_orig;
+					}
+				}
+			} else {
 			for_each_cpu(i, sched_group_cpus(sg)) {
 				if (i == target || !idle_cpu(i))
 					goto next;
@@ -4922,49 +7390,465 @@
 
 			target = cpumask_first_and(sched_group_cpus(sg),
 					tsk_cpus_allowed(p));
+				schedstat_inc(p, se.statistics.nr_wakeups_sis_idle_cpu);
+				schedstat_inc(this_rq(), eas_stats.sis_idle_cpu);
+				schedstat_inc(sd, eas_stats.sis_idle_cpu);
 			goto done;
+			}
 next:
 			sg = sg->next;
 		} while (sg != sd->groups);
 	}
+
+	if (best_idle_cpu >= 0)
+		target = best_idle_cpu;
+
 done:
+	schedstat_inc(p, se.statistics.nr_wakeups_sis_count);
+	schedstat_inc(this_rq(), eas_stats.sis_count);
+
 	return target;
 }
 
 /*
- * cpu_util returns the amount of capacity of a CPU that is used by CFS
- * tasks. The unit of the return value must be the one of capacity so we can
- * compare the utilization with the capacity of the CPU that is available for
- * CFS task (ie cpu_capacity).
- *
- * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
- * recent utilization of currently non-runnable tasks on a CPU. It represents
- * the amount of utilization of a CPU in the range [0..capacity_orig] where
- * capacity_orig is the cpu_capacity available at the highest frequency
- * (arch_scale_freq_capacity()).
- * The utilization of a CPU converges towards a sum equal to or less than the
- * current capacity (capacity_curr <= capacity_orig) of the CPU because it is
- * the running time on this CPU scaled by capacity_curr.
- *
- * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even
- * higher than capacity_orig because of unfortunate rounding in
- * cfs.avg.util_avg or just after migrating tasks and new task wakeups until
- * the average stabilizes with the new running time. We need to check that the
- * utilization stays within the range of [0..capacity_orig] and cap it if
- * necessary. Without utilization capping, a group could be seen as overloaded
- * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of
- * available capacity. We allow utilization to overshoot capacity_curr (but not
- * capacity_orig) as it useful for predicting the capacity required after task
- * migrations (scheduler-driven DVFS).
+ * cpu_util_wake: Compute cpu utilization with any contributions from
+ * the waking task p removed.  check_for_migration() looks for a better CPU of
+ * rq->curr. For that case we should return cpu util with contributions from
+ * currently running task p removed.
  */
-static int cpu_util(int cpu)
+static int cpu_util_wake(int cpu, struct task_struct *p)
 {
-	unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
-	unsigned long capacity = capacity_orig_of(cpu);
+	unsigned long util, capacity;
+
+#ifdef CONFIG_SCHED_WALT
+	/*
+	 * WALT does not decay idle tasks in the same manner
+	 * as PELT, so it makes little sense to subtract task
+	 * utilization from cpu utilization. Instead just use
+	 * cpu_util for this case.
+	 */
+	if (!walt_disabled && sysctl_sched_use_walt_cpu_util &&
+	    p->state == TASK_WAKING)
+		return cpu_util(cpu);
+#endif
+	/* Task has no contribution or is new */
+	if (cpu != task_cpu(p) || !p->se.avg.last_update_time)
+		return cpu_util(cpu);
+
+	capacity = capacity_orig_of(cpu);
+	util = max_t(long, cpu_util(cpu) - task_util(p), 0);
 
 	return (util >= capacity) ? capacity : util;
 }
 
+static int start_cpu(bool boosted)
+{
+	struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
+
+	return boosted ? rd->max_cap_orig_cpu : rd->min_cap_orig_cpu;
+}
+
+static inline int find_best_target(struct task_struct *p, int *backup_cpu,
+				   bool boosted, bool prefer_idle)
+{
+	unsigned long best_idle_min_cap_orig = ULONG_MAX;
+	unsigned long min_util = boosted_task_util(p);
+	unsigned long target_capacity = ULONG_MAX;
+	unsigned long min_wake_util = ULONG_MAX;
+	unsigned long target_max_spare_cap = 0;
+	unsigned long best_active_util = ULONG_MAX;
+	int best_idle_cstate = INT_MAX;
+	struct sched_domain *sd;
+	struct sched_group *sg;
+	int best_active_cpu = -1;
+	int best_idle_cpu = -1;
+	int target_cpu = -1;
+	int cpu, i;
+
+	*backup_cpu = -1;
+
+	schedstat_inc(p, se.statistics.nr_wakeups_fbt_attempts);
+	schedstat_inc(this_rq(), eas_stats.fbt_attempts);
+
+	/* Find start CPU based on boost value */
+	cpu = start_cpu(boosted);
+	if (cpu < 0) {
+		schedstat_inc(p, se.statistics.nr_wakeups_fbt_no_cpu);
+		schedstat_inc(this_rq(), eas_stats.fbt_no_cpu);
+		return -1;
+	}
+
+	/* Find SD for the start CPU */
+	sd = rcu_dereference(per_cpu(sd_ea, cpu));
+	if (!sd) {
+		schedstat_inc(p, se.statistics.nr_wakeups_fbt_no_sd);
+		schedstat_inc(this_rq(), eas_stats.fbt_no_sd);
+		return -1;
+	}
+
+	/* Scan CPUs in all SDs */
+	sg = sd->groups;
+	do {
+		for_each_cpu_and(i, tsk_cpus_allowed(p), sched_group_cpus(sg)) {
+			unsigned long capacity_curr = capacity_curr_of(i);
+			unsigned long capacity_orig = capacity_orig_of(i);
+			unsigned long wake_util, new_util;
+
+			if (!cpu_online(i))
+				continue;
+
+			if (walt_cpu_high_irqload(i))
+				continue;
+
+			/*
+			 * p's blocked utilization is still accounted for on prev_cpu
+			 * so prev_cpu will receive a negative bias due to the double
+			 * accounting. However, the blocked utilization may be zero.
+			 */
+			wake_util = cpu_util_wake(i, p);
+			new_util = wake_util + task_util(p);
+
+			/*
+			 * Ensure minimum capacity to grant the required boost.
+			 * The target CPU can be already at a capacity level higher
+			 * than the one required to boost the task.
+			 */
+			new_util = max(min_util, new_util);
+			if (new_util > capacity_orig)
+				continue;
+
+			/*
+			 * Case A) Latency sensitive tasks
+			 *
+			 * Unconditionally favoring tasks that prefer idle CPU to
+			 * improve latency.
+			 *
+			 * Looking for:
+			 * - an idle CPU, whatever its idle_state is, since
+			 *   the first CPUs we explore are more likely to be
+			 *   reserved for latency sensitive tasks.
+			 * - a non idle CPU where the task fits in its current
+			 *   capacity and has the maximum spare capacity.
+			 * - a non idle CPU with lower contention from other
+			 *   tasks and running at the lowest possible OPP.
+			 *
+			 * The last two goals tries to favor a non idle CPU
+			 * where the task can run as if it is "almost alone".
+			 * A maximum spare capacity CPU is favoured since
+			 * the task already fits into that CPU's capacity
+			 * without waiting for an OPP chance.
+			 *
+			 * The following code path is the only one in the CPUs
+			 * exploration loop which is always used by
+			 * prefer_idle tasks. It exits the loop with wither a
+			 * best_active_cpu or a target_cpu which should
+			 * represent an optimal choice for latency sensitive
+			 * tasks.
+			 */
+			if (prefer_idle) {
+
+				/*
+				 * Case A.1: IDLE CPU
+				 * Return the first IDLE CPU we find.
+				 */
+				if (idle_cpu(i)) {
+					schedstat_inc(p, se.statistics.nr_wakeups_fbt_pref_idle);
+					schedstat_inc(this_rq(), eas_stats.fbt_pref_idle);
+
+					trace_sched_find_best_target(p,
+							prefer_idle, min_util,
+							cpu, best_idle_cpu,
+							best_active_cpu, i);
+
+					return i;
+				}
+
+				/*
+				 * Case A.2: Target ACTIVE CPU
+				 * Favor CPUs with max spare capacity.
+				 */
+				if ((capacity_curr > new_util) &&
+					(capacity_orig - new_util > target_max_spare_cap)) {
+					target_max_spare_cap = capacity_orig - new_util;
+					target_cpu = i;
+					continue;
+				}
+				if (target_cpu != -1)
+					continue;
+
+
+				/*
+				 * Case A.3: Backup ACTIVE CPU
+				 * Favor CPUs with:
+				 * - lower utilization due to other tasks
+				 * - lower utilization with the task in
+				 */
+				if (wake_util > min_wake_util)
+					continue;
+				if (new_util > best_active_util)
+					continue;
+				min_wake_util = wake_util;
+				best_active_util = new_util;
+				best_active_cpu = i;
+				continue;
+			}
+
+			/*
+			 * Enforce EAS mode
+			 *
+			 * For non latency sensitive tasks, skip CPUs that
+			 * will be overutilized by moving the task there.
+			 *
+			 * The goal here is to remain in EAS mode as long as
+			 * possible at least for !prefer_idle tasks.
+			 */
+			if ((new_util * capacity_margin) >
+			    (capacity_orig * SCHED_CAPACITY_SCALE))
+				continue;
+
+			/*
+			 * Case B) Non latency sensitive tasks on IDLE CPUs.
+			 *
+			 * Find an optimal backup IDLE CPU for non latency
+			 * sensitive tasks.
+			 *
+			 * Looking for:
+			 * - minimizing the capacity_orig,
+			 *   i.e. preferring LITTLE CPUs
+			 * - favoring shallowest idle states
+			 *   i.e. avoid to wakeup deep-idle CPUs
+			 *
+			 * The following code path is used by non latency
+			 * sensitive tasks if IDLE CPUs are available. If at
+			 * least one of such CPUs are available it sets the
+			 * best_idle_cpu to the most suitable idle CPU to be
+			 * selected.
+			 *
+			 * If idle CPUs are available, favour these CPUs to
+			 * improve performances by spreading tasks.
+			 * Indeed, the energy_diff() computed by the caller
+			 * will take care to ensure the minimization of energy
+			 * consumptions without affecting performance.
+			 */
+			if (idle_cpu(i)) {
+				int idle_idx = idle_get_state_idx(cpu_rq(i));
+
+				/* Select idle CPU with lower cap_orig */
+				if (capacity_orig > best_idle_min_cap_orig)
+					continue;
+
+				/*
+				 * Skip CPUs in deeper idle state, but only
+				 * if they are also less energy efficient.
+				 * IOW, prefer a deep IDLE LITTLE CPU vs a
+				 * shallow idle big CPU.
+				 */
+				if (sysctl_sched_cstate_aware &&
+				    best_idle_cstate <= idle_idx)
+					continue;
+
+				/* Keep track of best idle CPU */
+				best_idle_min_cap_orig = capacity_orig;
+				best_idle_cstate = idle_idx;
+				best_idle_cpu = i;
+				continue;
+			}
+
+			/*
+			 * Case C) Non latency sensitive tasks on ACTIVE CPUs.
+			 *
+			 * Pack tasks in the most energy efficient capacities.
+			 *
+			 * This task packing strategy prefers more energy
+			 * efficient CPUs (i.e. pack on smaller maximum
+			 * capacity CPUs) while also trying to spread tasks to
+			 * run them all at the lower OPP.
+			 *
+			 * This assumes for example that it's more energy
+			 * efficient to run two tasks on two CPUs at a lower
+			 * OPP than packing both on a single CPU but running
+			 * that CPU at an higher OPP.
+			 *
+			 * Thus, this case keep track of the CPU with the
+			 * smallest maximum capacity and highest spare maximum
+			 * capacity.
+			 */
+
+			/* Favor CPUs with smaller capacity */
+			if (capacity_orig > target_capacity)
+				continue;
+
+			/* Favor CPUs with maximum spare capacity */
+			if ((capacity_orig - new_util) < target_max_spare_cap)
+				continue;
+
+			target_max_spare_cap = capacity_orig - new_util;
+			target_capacity = capacity_orig;
+			target_cpu = i;
+		}
+
+	} while (sg = sg->next, sg != sd->groups);
+
+	/*
+	 * For non latency sensitive tasks, cases B and C in the previous loop,
+	 * we pick the best IDLE CPU only if we was not able to find a target
+	 * ACTIVE CPU.
+	 *
+	 * Policies priorities:
+	 *
+	 * - prefer_idle tasks:
+	 *
+	 *   a) IDLE CPU available, we return immediately
+	 *   b) ACTIVE CPU where task fits and has the bigger maximum spare
+	 *      capacity (i.e. target_cpu)
+	 *   c) ACTIVE CPU with less contention due to other tasks
+	 *      (i.e. best_active_cpu)
+	 *
+	 * - NON prefer_idle tasks:
+	 *
+	 *   a) ACTIVE CPU: target_cpu
+	 *   b) IDLE CPU: best_idle_cpu
+	 */
+	if (target_cpu == -1)
+		target_cpu = prefer_idle
+			? best_active_cpu
+			: best_idle_cpu;
+	else
+		*backup_cpu = prefer_idle
+		? best_active_cpu
+		: best_idle_cpu;
+
+	trace_sched_find_best_target(p, prefer_idle, min_util, cpu,
+				     best_idle_cpu, best_active_cpu,
+				     target_cpu);
+
+	schedstat_inc(p, se.statistics.nr_wakeups_fbt_count);
+	schedstat_inc(this_rq(), eas_stats.fbt_count);
+
+	return target_cpu;
+}
+
+/*
+ * Disable WAKE_AFFINE in the case where task @p doesn't fit in the
+ * capacity of either the waking CPU @cpu or the previous CPU @prev_cpu.
+ *
+ * In that case WAKE_AFFINE doesn't make sense and we'll let
+ * BALANCE_WAKE sort things out.
+ */
+static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
+{
+	long min_cap, max_cap;
+
+	min_cap = min(capacity_orig_of(prev_cpu), capacity_orig_of(cpu));
+	max_cap = cpu_rq(cpu)->rd->max_cpu_capacity.val;
+
+	/* Minimum capacity is close to max, no need to abort wake_affine */
+	if (max_cap - min_cap < max_cap >> 3)
+		return 0;
+
+	/* Bring task utilization in sync with prev_cpu */
+	sync_entity_load_avg(&p->se);
+
+	return min_cap * 1024 < task_util(p) * capacity_margin;
+}
+
+static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync)
+{
+	struct sched_domain *sd;
+	int target_cpu = prev_cpu, tmp_target, tmp_backup;
+	bool boosted, prefer_idle;
+
+	schedstat_inc(p, se.statistics.nr_wakeups_secb_attempts);
+	schedstat_inc(this_rq(), eas_stats.secb_attempts);
+
+	if (sysctl_sched_sync_hint_enable && sync) {
+		int cpu = smp_processor_id();
+
+		if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
+			schedstat_inc(p, se.statistics.nr_wakeups_secb_sync);
+			schedstat_inc(this_rq(), eas_stats.secb_sync);
+			return cpu;
+		}
+	}
+
+	rcu_read_lock();
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+	boosted = schedtune_task_boost(p) > 0;
+	prefer_idle = schedtune_prefer_idle(p) > 0;
+#else
+	boosted = get_sysctl_sched_cfs_boost() > 0;
+	prefer_idle = 0;
+#endif
+
+	sync_entity_load_avg(&p->se);
+
+	sd = rcu_dereference(per_cpu(sd_ea, prev_cpu));
+	/* Find a cpu with sufficient capacity */
+	tmp_target = find_best_target(p, &tmp_backup, boosted, prefer_idle);
+
+	if (!sd)
+		goto unlock;
+	if (tmp_target >= 0) {
+		target_cpu = tmp_target;
+		if ((boosted || prefer_idle) && idle_cpu(target_cpu)) {
+			schedstat_inc(p, se.statistics.nr_wakeups_secb_idle_bt);
+			schedstat_inc(this_rq(), eas_stats.secb_idle_bt);
+			goto unlock;
+		}
+	}
+
+	if (target_cpu != prev_cpu) {
+		int delta = 0;
+		struct energy_env eenv = {
+			.util_delta     = task_util(p),
+			.src_cpu        = prev_cpu,
+			.dst_cpu        = target_cpu,
+			.task           = p,
+			.trg_cpu	= target_cpu,
+		};
+
+
+#ifdef CONFIG_SCHED_WALT
+		if (!walt_disabled && sysctl_sched_use_walt_cpu_util &&
+			p->state == TASK_WAKING)
+			delta = task_util(p);
+#endif
+		/* Not enough spare capacity on previous cpu */
+		if (__cpu_overutilized(prev_cpu, delta)) {
+			schedstat_inc(p, se.statistics.nr_wakeups_secb_insuff_cap);
+			schedstat_inc(this_rq(), eas_stats.secb_insuff_cap);
+			goto unlock;
+		}
+
+		if (energy_diff(&eenv) >= 0) {
+			/* No energy saving for target_cpu, try backup */
+			target_cpu = tmp_backup;
+			eenv.dst_cpu = target_cpu;
+			eenv.trg_cpu = target_cpu;
+			if (tmp_backup < 0 ||
+			    tmp_backup == prev_cpu ||
+			    energy_diff(&eenv) >= 0) {
+				schedstat_inc(p, se.statistics.nr_wakeups_secb_no_nrg_sav);
+				schedstat_inc(this_rq(), eas_stats.secb_no_nrg_sav);
+				target_cpu = prev_cpu;
+				goto unlock;
+			}
+		}
+
+		schedstat_inc(p, se.statistics.nr_wakeups_secb_nrg_sav);
+		schedstat_inc(this_rq(), eas_stats.secb_nrg_sav);
+		goto unlock;
+	}
+
+	schedstat_inc(p, se.statistics.nr_wakeups_secb_count);
+	schedstat_inc(this_rq(), eas_stats.secb_count);
+
+unlock:
+	rcu_read_unlock();
+
+	return target_cpu;
+}
+
 /*
  * select_task_rq_fair: Select target runqueue for the waking task in domains
  * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
@@ -4978,7 +7862,8 @@
  * preempt must be disabled.
  */
 static int
-select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
+select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags,
+		    int sibling_count_hint)
 {
 	struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
 	int cpu = smp_processor_id();
@@ -4986,8 +7871,19 @@
 	int want_affine = 0;
 	int sync = wake_flags & WF_SYNC;
 
-	if (sd_flag & SD_BALANCE_WAKE)
-		want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, tsk_cpus_allowed(p));
+#ifdef CONFIG_SCHED_HMP
+	return select_best_cpu(p, prev_cpu, 0, sync);
+#endif
+
+	if (sd_flag & SD_BALANCE_WAKE) {
+		record_wakee(p);
+		want_affine = !wake_wide(p, sibling_count_hint) &&
+			      !wake_cap(p, cpu, prev_cpu) &&
+			      cpumask_test_cpu(cpu, &p->cpus_allowed);
+	}
+
+	if (energy_aware() && !(cpu_rq(prev_cpu)->rd->overutilized))
+		return select_energy_cpu_brute(p, prev_cpu, sync);
 
 	rcu_read_lock();
 	for_each_domain(cpu, tmp) {
@@ -5012,47 +7908,25 @@
 
 	if (affine_sd) {
 		sd = NULL; /* Prefer wake_affine over balance flags */
-		if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
+		if (cpu != prev_cpu && wake_affine(affine_sd, p, prev_cpu, sync))
 			new_cpu = cpu;
 	}
 
-	if (!sd) {
-		if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */
-			new_cpu = select_idle_sibling(p, new_cpu);
-
-	} else while (sd) {
-		struct sched_group *group;
-		int weight;
-
-		if (!(sd->flags & sd_flag)) {
-			sd = sd->child;
-			continue;
-		}
-
-		group = find_idlest_group(sd, p, cpu, sd_flag);
-		if (!group) {
-			sd = sd->child;
-			continue;
+	if (sd && !(sd_flag & SD_BALANCE_FORK)) {
+		/*
+		 * We're going to need the task's util for capacity_spare_wake
+		 * in find_idlest_group. Sync it up to prev_cpu's
+		 * last_update_time.
+		 */
+		sync_entity_load_avg(&p->se);
 		}
 
-		new_cpu = find_idlest_cpu(group, p, cpu);
-		if (new_cpu == -1 || new_cpu == cpu) {
-			/* Now try balancing at a lower domain level of cpu */
-			sd = sd->child;
-			continue;
-		}
+	if (!sd) {
+		if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */
+			new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
 
-		/* Now try balancing at a lower domain level of new_cpu */
-		cpu = new_cpu;
-		weight = sd->span_weight;
-		sd = NULL;
-		for_each_domain(cpu, tmp) {
-			if (weight <= tmp->span_weight)
-				break;
-			if (tmp->flags & sd_flag)
-				sd = tmp;
-		}
-		/* while loop will break here if sd == NULL */
+	} else {
+		new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
 	}
 	rcu_read_unlock();
 
@@ -5087,6 +7961,8 @@
 {
 	remove_entity_load_avg(&p->se);
 }
+#else
+#define task_fits_max(p, cpu) true
 #endif /* CONFIG_SMP */
 
 static unsigned long
@@ -5333,6 +8209,8 @@
 	if (hrtick_enabled(rq))
 		hrtick_start_fair(rq, p);
 
+	rq->misfit_task = !task_fits_max(p, rq->cpu);
+
 	return p;
 simple:
 	cfs_rq = &rq->cfs;
@@ -5354,9 +8232,12 @@
 	if (hrtick_enabled(rq))
 		hrtick_start_fair(rq, p);
 
+	rq->misfit_task = !task_fits_max(p, rq->cpu);
+
 	return p;
 
 idle:
+	rq->misfit_task = 0;
 	/*
 	 * This is OK, because current is on_cpu, which avoids it being picked
 	 * for load-balance and preemption/IRQs are still disabled avoiding
@@ -5569,10 +8450,21 @@
 
 enum fbq_type { regular, remote, all };
 
+enum group_type {
+	group_other = 0,
+	group_misfit_task,
+	group_imbalanced,
+	group_overloaded,
+};
+
 #define LBF_ALL_PINNED	0x01
 #define LBF_NEED_BREAK	0x02
 #define LBF_DST_PINNED  0x04
 #define LBF_SOME_PINNED	0x08
+#define LBF_BIG_TASK_ACTIVE_BALANCE 0x80
+#define LBF_IGNORE_BIG_TASKS 0x100
+#define LBF_IGNORE_PREFERRED_CLUSTER_TASKS 0x200
+#define LBF_MOVED_RELATED_THREAD_GROUP_TASK 0x400
 
 struct lb_env {
 	struct sched_domain	*sd;
@@ -5587,8 +8479,11 @@
 	int			new_dst_cpu;
 	enum cpu_idle_type	idle;
 	long			imbalance;
+	unsigned int		src_grp_nr_running;
 	/* The set of CPUs under consideration for load-balancing */
 	struct cpumask		*cpus;
+	unsigned int		busiest_grp_capacity;
+	unsigned int		busiest_nr_running;
 
 	unsigned int		flags;
 
@@ -5597,7 +8492,9 @@
 	unsigned int		loop_max;
 
 	enum fbq_type		fbq_type;
+	enum group_type		busiest_group_type;
 	struct list_head	tasks;
+	enum sched_boost_policy	boost_policy;
 };
 
 /*
@@ -5695,6 +8592,7 @@
 int can_migrate_task(struct task_struct *p, struct lb_env *env)
 {
 	int tsk_cache_hot;
+	int twf, group_cpus;
 
 	lockdep_assert_held(&env->src_rq->lock);
 
@@ -5741,6 +8639,39 @@
 	/* Record that we found atleast one task that could run on dst_cpu */
 	env->flags &= ~LBF_ALL_PINNED;
 
+	if (cpu_capacity(env->dst_cpu) > cpu_capacity(env->src_cpu)) {
+		if (nr_big_tasks(env->src_rq) && !is_big_task(p))
+			return 0;
+
+		if (env->boost_policy == SCHED_BOOST_ON_BIG &&
+					!task_sched_boost(p))
+			return 0;
+	}
+
+	twf = task_will_fit(p, env->dst_cpu);
+
+	/*
+	 * Attempt to not pull tasks that don't fit. We may get lucky and find
+	 * one that actually fits.
+	 */
+	if (env->flags & LBF_IGNORE_BIG_TASKS && !twf)
+		return 0;
+
+	if (env->flags & LBF_IGNORE_PREFERRED_CLUSTER_TASKS &&
+	    !preferred_cluster(rq_cluster(cpu_rq(env->dst_cpu)), p))
+		return 0;
+
+	/*
+	 * Group imbalance can sometimes cause work to be pulled across groups
+	 * even though the group could have managed the imbalance on its own.
+	 * Prevent inter-cluster migrations for big tasks when the number of
+	 * tasks is lower than the capacity of the group.
+	 */
+	group_cpus = DIV_ROUND_UP(env->busiest_grp_capacity,
+						 SCHED_CAPACITY_SCALE);
+	if (!twf && env->busiest_nr_running <= group_cpus)
+		return 0;
+
 	if (task_running(env->src_rq, p)) {
 		schedstat_inc(p, se.statistics.nr_failed_migrations_running);
 		return 0;
@@ -5748,15 +8679,16 @@
 
 	/*
 	 * Aggressive migration if:
-	 * 1) destination numa is preferred
-	 * 2) task is cache cold, or
-	 * 3) too many balance attempts have failed.
+	 * 1) IDLE or NEWLY_IDLE balance.
+	 * 2) destination numa is preferred
+	 * 3) task is cache cold, or
+	 * 4) too many balance attempts have failed.
 	 */
 	tsk_cache_hot = migrate_degrades_locality(p, env);
 	if (tsk_cache_hot == -1)
 		tsk_cache_hot = task_hot(p, env);
 
-	if (tsk_cache_hot <= 0 ||
+	if (env->idle != CPU_NOT_IDLE || tsk_cache_hot <= 0 ||
 	    env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
 		if (tsk_cache_hot == 1) {
 			schedstat_inc(env->sd, lb_hot_gained[env->idle]);
@@ -5776,9 +8708,13 @@
 {
 	lockdep_assert_held(&env->src_rq->lock);
 
-	deactivate_task(env->src_rq, p, 0);
 	p->on_rq = TASK_ON_RQ_MIGRATING;
+	deactivate_task(env->src_rq, p, 0);
+	double_lock_balance(env->src_rq, env->dst_rq);
 	set_task_cpu(p, env->dst_cpu);
+	if (task_in_related_thread_group(p))
+		env->flags |= LBF_MOVED_RELATED_THREAD_GROUP_TASK;
+	double_unlock_balance(env->src_rq, env->dst_rq);
 }
 
 /*
@@ -5806,6 +8742,7 @@
 		 * inside detach_tasks().
 		 */
 		schedstat_inc(env->sd, lb_gained[env->idle]);
+
 		return p;
 	}
 	return NULL;
@@ -5825,12 +8762,20 @@
 	struct task_struct *p;
 	unsigned long load;
 	int detached = 0;
+	int orig_loop = env->loop;
 
 	lockdep_assert_held(&env->src_rq->lock);
 
 	if (env->imbalance <= 0)
 		return 0;
 
+	if (!same_cluster(env->dst_cpu, env->src_cpu))
+		env->flags |= LBF_IGNORE_PREFERRED_CLUSTER_TASKS;
+
+	if (cpu_capacity(env->dst_cpu) < cpu_capacity(env->src_cpu))
+		env->flags |= LBF_IGNORE_BIG_TASKS;
+
+redo:
 	while (!list_empty(tasks)) {
 		/*
 		 * We don't want to steal all, otherwise we may be treated likewise,
@@ -5892,6 +8837,15 @@
 		list_move_tail(&p->se.group_node, tasks);
 	}
 
+	if (env->flags & (LBF_IGNORE_BIG_TASKS |
+			LBF_IGNORE_PREFERRED_CLUSTER_TASKS) && !detached) {
+		tasks = &env->src_rq->cfs_tasks;
+		env->flags &= ~(LBF_IGNORE_BIG_TASKS |
+				LBF_IGNORE_PREFERRED_CLUSTER_TASKS);
+		env->loop = orig_loop;
+		goto redo;
+	}
+
 	/*
 	 * Right now, this is one of only two places we collect this stat
 	 * so we can safely collect detach_one_task() stats here rather
@@ -5910,8 +8864,8 @@
 	lockdep_assert_held(&rq->lock);
 
 	BUG_ON(task_rq(p) != rq);
-	p->on_rq = TASK_ON_RQ_QUEUED;
 	activate_task(rq, p, 0);
+	p->on_rq = TASK_ON_RQ_QUEUED;
 	check_preempt_curr(rq, p, 0);
 }
 
@@ -5966,8 +8920,13 @@
 		if (throttled_hierarchy(cfs_rq))
 			continue;
 
-		if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq))
+		if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq,
+					   true))
 			update_tg_load_avg(cfs_rq, 0);
+
+		/* Propagate pending load changes to the parent */
+		if (cfs_rq->tg->se[cpu])
+			update_load_avg(cfs_rq->tg->se[cpu], 0);
 	}
 	raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
@@ -6027,7 +8986,7 @@
 
 	raw_spin_lock_irqsave(&rq->lock, flags);
 	update_rq_clock(rq);
-	update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
+	update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true);
 	raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 
@@ -6039,12 +8998,6 @@
 
 /********** Helpers for find_busiest_group ************************/
 
-enum group_type {
-	group_other = 0,
-	group_imbalanced,
-	group_overloaded,
-};
-
 /*
  * sg_lb_stats - stats of a sched_group required for load_balancing
  */
@@ -6056,10 +9009,15 @@
 	unsigned long group_capacity;
 	unsigned long group_util; /* Total utilization of the group */
 	unsigned int sum_nr_running; /* Nr tasks running in the group */
+#ifdef CONFIG_SCHED_HMP
+	unsigned long sum_nr_big_tasks;
+	u64 group_cpu_load; /* Scaled load of all CPUs of the group */
+#endif
 	unsigned int idle_cpus;
 	unsigned int group_weight;
 	enum group_type group_type;
 	int group_no_capacity;
+	int group_misfit_task; /* A cpu has a task too big for its capacity */
 #ifdef CONFIG_NUMA_BALANCING
 	unsigned int nr_numa_running;
 	unsigned int nr_preferred_running;
@@ -6098,10 +9056,64 @@
 			.avg_load = 0UL,
 			.sum_nr_running = 0,
 			.group_type = group_other,
+#ifdef CONFIG_SCHED_HMP
+			.sum_nr_big_tasks = 0UL,
+			.group_cpu_load = 0ULL,
+#endif
 		},
 	};
 }
 
+#ifdef CONFIG_SCHED_HMP
+
+static int
+bail_inter_cluster_balance(struct lb_env *env, struct sd_lb_stats *sds)
+{
+	int local_cpu, busiest_cpu;
+	int local_capacity, busiest_capacity;
+	int local_pwr_cost, busiest_pwr_cost;
+	int nr_cpus;
+	int boost = sched_boost();
+
+	if (!sysctl_sched_restrict_cluster_spill ||
+		boost == FULL_THROTTLE_BOOST || boost == CONSERVATIVE_BOOST)
+		return 0;
+
+	local_cpu = group_first_cpu(sds->local);
+	busiest_cpu = group_first_cpu(sds->busiest);
+
+	local_capacity = cpu_max_possible_capacity(local_cpu);
+	busiest_capacity = cpu_max_possible_capacity(busiest_cpu);
+
+	local_pwr_cost = cpu_max_power_cost(local_cpu);
+	busiest_pwr_cost = cpu_max_power_cost(busiest_cpu);
+
+	if (local_pwr_cost <= busiest_pwr_cost)
+		return 0;
+
+	if (local_capacity > busiest_capacity &&
+			sds->busiest_stat.sum_nr_big_tasks)
+		return 0;
+
+	nr_cpus = cpumask_weight(sched_group_cpus(sds->busiest));
+	if ((sds->busiest_stat.group_cpu_load < nr_cpus * sched_spill_load) &&
+		(sds->busiest_stat.sum_nr_running <
+			nr_cpus * sysctl_sched_spill_nr_run))
+		return 1;
+
+	return 0;
+}
+
+#else	/* CONFIG_SCHED_HMP */
+
+static inline int
+bail_inter_cluster_balance(struct lb_env *env, struct sd_lb_stats *sds)
+{
+	return 0;
+}
+
+#endif	/* CONFIG_SCHED_HMP */
+
 /**
  * get_sd_load_idx - Obtain the load index for a given sched domain.
  * @sd: The sched_domain whose load_idx is to be obtained.
@@ -6151,19 +9163,58 @@
 
 	used = div_u64(avg, total);
 
+	/*
+	 * deadline bandwidth is defined at system level so we must
+	 * weight this bandwidth with the max capacity of the system.
+	 * As a reminder, avg_bw is 20bits width and
+	 * scale_cpu_capacity is 10 bits width
+	 */
+	used += div_u64(rq->dl.avg_bw, arch_scale_cpu_capacity(NULL, cpu));
+
 	if (likely(used < SCHED_CAPACITY_SCALE))
 		return SCHED_CAPACITY_SCALE - used;
 
 	return 1;
 }
 
+void init_max_cpu_capacity(struct max_cpu_capacity *mcc)
+{
+	raw_spin_lock_init(&mcc->lock);
+	mcc->val = 0;
+	mcc->cpu = -1;
+}
+
 static void update_cpu_capacity(struct sched_domain *sd, int cpu)
 {
 	unsigned long capacity = arch_scale_cpu_capacity(sd, cpu);
 	struct sched_group *sdg = sd->groups;
+	struct max_cpu_capacity *mcc;
+	unsigned long max_capacity;
+	int max_cap_cpu;
+	unsigned long flags;
 
 	cpu_rq(cpu)->cpu_capacity_orig = capacity;
 
+	mcc = &cpu_rq(cpu)->rd->max_cpu_capacity;
+
+	raw_spin_lock_irqsave(&mcc->lock, flags);
+	max_capacity = mcc->val;
+	max_cap_cpu = mcc->cpu;
+
+	if ((max_capacity > capacity && max_cap_cpu == cpu) ||
+	    (max_capacity < capacity)) {
+		mcc->val = capacity;
+		mcc->cpu = cpu;
+#ifdef CONFIG_SCHED_DEBUG
+		raw_spin_unlock_irqrestore(&mcc->lock, flags);
+		printk_deferred(KERN_INFO "CPU%d: update max cpu_capacity %lu\n",
+				cpu, capacity);
+		goto skip_unlock;
+#endif
+	}
+	raw_spin_unlock_irqrestore(&mcc->lock, flags);
+
+skip_unlock: __attribute__ ((unused));
 	capacity *= scale_rt_capacity(cpu);
 	capacity >>= SCHED_CAPACITY_SHIFT;
 
@@ -6172,13 +9223,15 @@
 
 	cpu_rq(cpu)->cpu_capacity = capacity;
 	sdg->sgc->capacity = capacity;
+	sdg->sgc->max_capacity = capacity;
+	sdg->sgc->min_capacity = capacity;
 }
 
 void update_group_capacity(struct sched_domain *sd, int cpu)
 {
 	struct sched_domain *child = sd->child;
 	struct sched_group *group, *sdg = sd->groups;
-	unsigned long capacity;
+	unsigned long capacity, max_capacity, min_capacity;
 	unsigned long interval;
 
 	interval = msecs_to_jiffies(sd->balance_interval);
@@ -6191,6 +9244,8 @@
 	}
 
 	capacity = 0;
+	max_capacity = 0;
+	min_capacity = ULONG_MAX;
 
 	if (child->flags & SD_OVERLAP) {
 		/*
@@ -6202,6 +9257,8 @@
 			struct sched_group_capacity *sgc;
 			struct rq *rq = cpu_rq(cpu);
 
+			if (cpumask_test_cpu(cpu, cpu_isolated_mask))
+				continue;
 			/*
 			 * build_sched_domains() -> init_sched_groups_capacity()
 			 * gets here before we've attached the domains to the
@@ -6215,12 +9272,14 @@
 			 */
 			if (unlikely(!rq->sd)) {
 				capacity += capacity_of(cpu);
-				continue;
-			}
-
+			} else {
 			sgc = rq->sd->groups->sgc;
 			capacity += sgc->capacity;
 		}
+
+			max_capacity = max(capacity, max_capacity);
+			min_capacity = min(capacity, min_capacity);
+		}
 	} else  {
 		/*
 		 * !SD_OVERLAP domains can assume that child groups
@@ -6229,12 +9288,23 @@
 
 		group = child->groups;
 		do {
-			capacity += group->sgc->capacity;
+			struct sched_group_capacity *sgc = group->sgc;
+
+			cpumask_t *cpus = sched_group_cpus(group);
+
+			/* Revisit this later. This won't work for MT domain */
+			if (!cpu_isolated(cpumask_first(cpus))) {
+				capacity += sgc->capacity;
+				max_capacity = max(sgc->max_capacity, max_capacity);
+				min_capacity = min(sgc->min_capacity, min_capacity);
+			}
 			group = group->next;
 		} while (group != child->groups);
 	}
 
 	sdg->sgc->capacity = capacity;
+	sdg->sgc->max_capacity = max_capacity;
+	sdg->sgc->min_capacity = min_capacity;
 }
 
 /*
@@ -6329,9 +9399,21 @@
 	return false;
 }
 
+
+/*
+ * group_smaller_cpu_capacity: Returns true if sched_group sg has smaller
+ * per-cpu capacity than sched_group ref.
+ */
+static inline bool
+group_smaller_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
+{
+	return sg->sgc->max_capacity + capacity_margin - SCHED_LOAD_SCALE <
+							ref->sgc->max_capacity;
+}
+
 static inline enum
 group_type group_classify(struct sched_group *group,
-			  struct sg_lb_stats *sgs)
+			  struct sg_lb_stats *sgs, struct lb_env *env)
 {
 	if (sgs->group_no_capacity)
 		return group_overloaded;
@@ -6339,9 +9421,44 @@
 	if (sg_imbalanced(group))
 		return group_imbalanced;
 
+	if (sgs->group_misfit_task)
+		return group_misfit_task;
+
 	return group_other;
 }
 
+#ifdef CONFIG_NO_HZ_COMMON
+/*
+ * idle load balancing data
+ *  - used by the nohz balance, but we want it available here
+ *    so that we can see which CPUs have no tick.
+ */
+static struct {
+	cpumask_var_t idle_cpus_mask;
+	atomic_t nr_cpus;
+	unsigned long next_balance;     /* in jiffy units */
+} nohz ____cacheline_aligned;
+
+static inline void update_cpu_stats_if_tickless(struct rq *rq)
+{
+	/* only called from update_sg_lb_stats when irqs are disabled */
+	if (cpumask_test_cpu(rq->cpu, nohz.idle_cpus_mask)) {
+		/* rate limit updates to once-per-jiffie at most */
+		if (READ_ONCE(jiffies) <= rq->last_load_update_tick)
+			return;
+
+		raw_spin_lock(&rq->lock);
+		update_rq_clock(rq);
+		update_idle_cpu_load(rq);
+		update_cfs_rq_load_avg(rq->clock_task, &rq->cfs, false);
+		raw_spin_unlock(&rq->lock);
+	}
+}
+
+#else
+static inline void update_cpu_stats_if_tickless(struct rq *rq) { }
+#endif
+
 /**
  * update_sg_lb_stats - Update sched_group's statistics for load balancing.
  * @env: The load balancing environment.
@@ -6350,20 +9467,35 @@
  * @local_group: Does group contain this_cpu.
  * @sgs: variable to hold the statistics for this group.
  * @overload: Indicate more than one runnable task for any CPU.
+ * @overutilized: Indicate overutilization for any CPU.
  */
 static inline void update_sg_lb_stats(struct lb_env *env,
 			struct sched_group *group, int load_idx,
 			int local_group, struct sg_lb_stats *sgs,
-			bool *overload)
+			bool *overload, bool *overutilized)
 {
 	unsigned long load;
-	int i;
+	int i, nr_running;
 
 	memset(sgs, 0, sizeof(*sgs));
 
 	for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
 		struct rq *rq = cpu_rq(i);
 
+		trace_sched_cpu_load_lb(cpu_rq(i), idle_cpu(i),
+				     sched_irqload(i),
+				     power_cost(i, 0),
+				     cpu_temp(i));
+
+		if (cpu_isolated(i))
+			continue;
+
+		/* if we are entering idle and there are CPUs with
+		 * their tick stopped, do an update for them
+		 */
+		if (env->idle == CPU_NEWLY_IDLE)
+			update_cpu_stats_if_tickless(rq);
+
 		/* Bias balancing toward cpus of our domain */
 		if (local_group)
 			load = target_load(i, load_idx);
@@ -6374,30 +9506,82 @@
 		sgs->group_util += cpu_util(i);
 		sgs->sum_nr_running += rq->cfs.h_nr_running;
 
-		if (rq->nr_running > 1)
+		nr_running = rq->nr_running;
+		if (nr_running > 1)
 			*overload = true;
 
+#ifdef CONFIG_SCHED_HMP
+		sgs->sum_nr_big_tasks += rq->hmp_stats.nr_big_tasks;
+		sgs->group_cpu_load += cpu_load(i);
+#endif
+
 #ifdef CONFIG_NUMA_BALANCING
 		sgs->nr_numa_running += rq->nr_numa_running;
 		sgs->nr_preferred_running += rq->nr_preferred_running;
 #endif
 		sgs->sum_weighted_load += weighted_cpuload(i);
-		if (idle_cpu(i))
+		/*
+		 * No need to call idle_cpu() if nr_running is not 0
+		 */
+		if (!nr_running && idle_cpu(i))
 			sgs->idle_cpus++;
+
+		if (energy_aware() && cpu_overutilized(i)) {
+			*overutilized = true;
+			if (!sgs->group_misfit_task && rq->misfit_task)
+				sgs->group_misfit_task = capacity_of(i);
+		}
 	}
 
+	/* Isolated CPU has no weight */
+	if (!group->group_weight) {
+		sgs->group_capacity = 0;
+		sgs->avg_load = 0;
+		sgs->group_no_capacity = 1;
+		sgs->group_type = group_other;
+		sgs->group_weight = group->group_weight;
+	} else {
 	/* Adjust by relative CPU capacity of the group */
 	sgs->group_capacity = group->sgc->capacity;
-	sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
+		sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) /
+							sgs->group_capacity;
+
+		sgs->group_weight = group->group_weight;
+
+		sgs->group_no_capacity = group_is_overloaded(env, sgs);
+		sgs->group_type = group_classify(group, sgs, env);
+	}
 
 	if (sgs->sum_nr_running)
 		sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
+}
 
-	sgs->group_weight = group->group_weight;
+#ifdef CONFIG_SCHED_HMP
+static bool update_sd_pick_busiest_active_balance(struct lb_env *env,
+						  struct sd_lb_stats *sds,
+						  struct sched_group *sg,
+						  struct sg_lb_stats *sgs)
+{
+	if (env->idle != CPU_NOT_IDLE &&
+	    cpu_capacity(env->dst_cpu) > group_rq_capacity(sg)) {
+		if (sgs->sum_nr_big_tasks >
+				sds->busiest_stat.sum_nr_big_tasks) {
+			env->flags |= LBF_BIG_TASK_ACTIVE_BALANCE;
+			return true;
+		}
+	}
 
-	sgs->group_no_capacity = group_is_overloaded(env, sgs);
-	sgs->group_type = group_classify(group, sgs);
+	return false;
 }
+#else
+static bool update_sd_pick_busiest_active_balance(struct lb_env *env,
+						  struct sd_lb_stats *sds,
+						  struct sched_group *sg,
+						  struct sg_lb_stats *sgs)
+{
+	return false;
+}
+#endif
 
 /**
  * update_sd_pick_busiest - return 1 on busiest group
@@ -6419,15 +9603,42 @@
 {
 	struct sg_lb_stats *busiest = &sds->busiest_stat;
 
+	if (update_sd_pick_busiest_active_balance(env, sds, sg, sgs))
+		return true;
+
 	if (sgs->group_type > busiest->group_type)
 		return true;
 
 	if (sgs->group_type < busiest->group_type)
 		return false;
 
+	if (energy_aware()) {
+		/*
+		 * Candidate sg doesn't face any serious load-balance problems
+		 * so don't pick it if the local sg is already filled up.
+		 */
+		if (sgs->group_type == group_other &&
+		    !group_has_capacity(env, &sds->local_stat))
+			return false;
+
 	if (sgs->avg_load <= busiest->avg_load)
 		return false;
 
+		if (!(env->sd->flags & SD_ASYM_CPUCAPACITY))
+			goto asym_packing;
+
+		/*
+		 * Candidate sg has no more than one task per CPU and
+		 * has higher per-CPU capacity. Migrating tasks to less
+		 * capable CPUs may harm throughput. Maximize throughput,
+		 * power/energy consequences are not considered.
+		 */
+		if (sgs->sum_nr_running <= sgs->group_weight &&
+		    group_smaller_cpu_capacity(sds->local, sg))
+			return false;
+	}
+
+asym_packing:
 	/* This is the busiest node in its class. */
 	if (!(env->sd->flags & SD_ASYM_PACKING))
 		return true;
@@ -6478,6 +9689,9 @@
 }
 #endif /* CONFIG_NUMA_BALANCING */
 
+#define lb_sd_parent(sd) \
+	(sd->parent && sd->parent->groups != sd->parent->groups->next)
+
 /**
  * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
  * @env: The load balancing environment.
@@ -6489,7 +9703,7 @@
 	struct sched_group *sg = env->sd->groups;
 	struct sg_lb_stats tmp_sgs;
 	int load_idx, prefer_sibling = 0;
-	bool overload = false;
+	bool overload = false, overutilized = false;
 
 	if (child && child->flags & SD_PREFER_SIBLING)
 		prefer_sibling = 1;
@@ -6511,7 +9725,7 @@
 		}
 
 		update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
-						&overload);
+						&overload, &overutilized);
 
 		if (local_group)
 			goto next_group;
@@ -6530,12 +9744,24 @@
 		    group_has_capacity(env, &sds->local_stat) &&
 		    (sgs->sum_nr_running > 1)) {
 			sgs->group_no_capacity = 1;
-			sgs->group_type = group_classify(sg, sgs);
+			sgs->group_type = group_classify(sg, sgs, env);
 		}
 
+		/*
+		 * Ignore task groups with misfit tasks if local group has no
+		 * capacity or if per-cpu capacity isn't higher.
+		 */
+		if (energy_aware() &&
+		    sgs->group_type == group_misfit_task &&
+		    (!group_has_capacity(env, &sds->local_stat) ||
+		     !group_smaller_cpu_capacity(sg, sds->local)))
+			sgs->group_type = group_other;
+
 		if (update_sd_pick_busiest(env, sds, sg, sgs)) {
 			sds->busiest = sg;
 			sds->busiest_stat = *sgs;
+			env->busiest_nr_running = sgs->sum_nr_running;
+			env->busiest_grp_capacity = sgs->group_capacity;
 		}
 
 next_group:
@@ -6549,10 +9775,23 @@
 	if (env->sd->flags & SD_NUMA)
 		env->fbq_type = fbq_classify_group(&sds->busiest_stat);
 
-	if (!env->sd->parent) {
+	env->src_grp_nr_running = sds->busiest_stat.sum_nr_running;
+
+	if (!lb_sd_parent(env->sd)) {
 		/* update overload indicator if we are at root domain */
 		if (env->dst_rq->rd->overload != overload)
 			env->dst_rq->rd->overload = overload;
+
+		/* Update over-utilization (tipping point, U >= 0) indicator */
+		if (energy_aware() && env->dst_rq->rd->overutilized != overutilized) {
+			env->dst_rq->rd->overutilized = overutilized;
+			trace_sched_overutilized(overutilized);
+		}
+	} else {
+		if (energy_aware() && !env->dst_rq->rd->overutilized && overutilized) {
+			env->dst_rq->rd->overutilized = true;
+			trace_sched_overutilized(true);
+		}
 	}
 
 }
@@ -6701,6 +9940,24 @@
 	 */
 	if (busiest->avg_load <= sds->avg_load ||
 	    local->avg_load >= sds->avg_load) {
+		if (energy_aware()) {
+			/* Misfitting tasks should be migrated in any case */
+			if (busiest->group_type == group_misfit_task) {
+				env->imbalance = busiest->group_misfit_task;
+				return;
+			}
+
+			/*
+			 * Busiest group is overloaded, local is not, use the spare
+			 * cycles to maximize throughput
+			 */
+			if (busiest->group_type == group_overloaded &&
+			    local->group_type <= group_misfit_task) {
+				env->imbalance = busiest->load_per_task;
+				return;
+			}
+		}
+
 		env->imbalance = 0;
 		return fix_small_imbalance(env, sds);
 	}
@@ -6734,6 +9991,11 @@
 		(sds->avg_load - local->avg_load) * local->group_capacity
 	) / SCHED_CAPACITY_SCALE;
 
+	/* Boost imbalance to allow misfit task to be balanced. */
+	if (energy_aware() && busiest->group_type == group_misfit_task)
+		env->imbalance = max_t(long, env->imbalance,
+				     busiest->group_misfit_task);
+
 	/*
 	 * if *imbalance is less than the average load per runnable task
 	 * there is no guarantee that any tasks will be moved so we'll have
@@ -6775,6 +10037,10 @@
 	 * this level.
 	 */
 	update_sd_lb_stats(env, &sds);
+
+	if (energy_aware() && !env->dst_rq->rd->overutilized)
+		goto out_balanced;
+
 	local = &sds.local_stat;
 	busiest = &sds.busiest_stat;
 
@@ -6787,6 +10053,12 @@
 	if (!sds.busiest || busiest->sum_nr_running == 0)
 		goto out_balanced;
 
+	if (env->flags & LBF_BIG_TASK_ACTIVE_BALANCE)
+		goto force_balance;
+
+	if (bail_inter_cluster_balance(env, &sds))
+		goto out_balanced;
+
 	sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load)
 						/ sds.total_capacity;
 
@@ -6798,11 +10070,19 @@
 	if (busiest->group_type == group_imbalanced)
 		goto force_balance;
 
-	/* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
-	if (env->idle == CPU_NEWLY_IDLE && group_has_capacity(env, local) &&
+	/*
+	 * When dst_cpu is idle, prevent SMP nice and/or asymmetric group
+	 * capacities from resulting in underutilization due to avg_load.
+	 */
+	if (env->idle != CPU_NOT_IDLE && group_has_capacity(env, local) &&
 	    busiest->group_no_capacity)
 		goto force_balance;
 
+	/* Misfitting tasks should be dealt with regardless of the avg load */
+	if (energy_aware() && busiest->group_type == group_misfit_task) {
+		goto force_balance;
+	}
+
 	/*
 	 * If the local group is busier than the selected busiest group
 	 * don't try and pull any tasks.
@@ -6826,7 +10106,8 @@
 		 * might end up to just move the imbalance on another group
 		 */
 		if ((busiest->group_type != group_overloaded) &&
-				(local->idle_cpus <= (busiest->idle_cpus + 1)))
+		    (local->idle_cpus <= (busiest->idle_cpus + 1)) &&
+		    !group_smaller_cpu_capacity(sds.busiest, sds.local))
 			goto out_balanced;
 	} else {
 		/*
@@ -6839,6 +10120,7 @@
 	}
 
 force_balance:
+	env->busiest_group_type = busiest->group_type;
 	/* Looks like there is an imbalance. Compute it */
 	calculate_imbalance(env, &sds);
 	return sds.busiest;
@@ -6848,6 +10130,60 @@
 	return NULL;
 }
 
+#ifdef CONFIG_SCHED_HMP
+static struct rq *find_busiest_queue_hmp(struct lb_env *env,
+				     struct sched_group *group)
+{
+	struct rq *busiest = NULL, *busiest_big = NULL;
+	u64 max_runnable_avg = 0, max_runnable_avg_big = 0;
+	int max_nr_big = 0, nr_big;
+	bool find_big = !!(env->flags & LBF_BIG_TASK_ACTIVE_BALANCE);
+	int i;
+	cpumask_t cpus;
+
+	cpumask_andnot(&cpus, sched_group_cpus(group), cpu_isolated_mask);
+
+	for_each_cpu(i, &cpus) {
+		struct rq *rq = cpu_rq(i);
+		u64 cumulative_runnable_avg =
+				rq->hmp_stats.cumulative_runnable_avg;
+
+		if (!cpumask_test_cpu(i, env->cpus))
+			continue;
+
+
+		if (find_big) {
+			nr_big = nr_big_tasks(rq);
+			if (nr_big > max_nr_big ||
+			    (nr_big > 0 && nr_big == max_nr_big &&
+			     cumulative_runnable_avg > max_runnable_avg_big)) {
+				max_runnable_avg_big = cumulative_runnable_avg;
+				busiest_big = rq;
+				max_nr_big = nr_big;
+				continue;
+			}
+		}
+
+		if (cumulative_runnable_avg > max_runnable_avg) {
+			max_runnable_avg = cumulative_runnable_avg;
+			busiest = rq;
+		}
+	}
+
+	if (busiest_big)
+		return busiest_big;
+
+	env->flags &= ~LBF_BIG_TASK_ACTIVE_BALANCE;
+	return busiest;
+}
+#else
+static inline struct rq *find_busiest_queue_hmp(struct lb_env *env,
+                                    struct sched_group *group)
+{
+	return NULL;
+}
+#endif
+
 /*
  * find_busiest_queue - find the busiest runqueue among the cpus in group.
  */
@@ -6858,6 +10194,10 @@
 	unsigned long busiest_load = 0, busiest_capacity = 1;
 	int i;
 
+#ifdef CONFIG_SCHED_HMP
+	return find_busiest_queue_hmp(env, group);
+#endif
+
 	for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
 		unsigned long capacity, wl;
 		enum fbq_type rt;
@@ -6897,7 +10237,8 @@
 		 */
 
 		if (rq->nr_running == 1 && wl > env->imbalance &&
-		    !check_cpu_capacity(rq, env->sd))
+		    !check_cpu_capacity(rq, env->sd) &&
+		    env->busiest_group_type != group_misfit_task)
 			continue;
 
 		/*
@@ -6925,15 +10266,20 @@
  * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
  * so long as it is large enough.
  */
-#define MAX_PINNED_INTERVAL	512
+#define MAX_PINNED_INTERVAL	16
 
 /* Working cpumask for load_balance and load_balance_newidle. */
 DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
 
+#define NEED_ACTIVE_BALANCE_THRESHOLD 10
+
 static int need_active_balance(struct lb_env *env)
 {
 	struct sched_domain *sd = env->sd;
 
+	if (env->flags & LBF_BIG_TASK_ACTIVE_BALANCE)
+		return 1;
+
 	if (env->idle == CPU_NEWLY_IDLE) {
 
 		/*
@@ -6958,10 +10304,27 @@
 			return 1;
 	}
 
-	return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
+	if (energy_aware() &&
+	    (capacity_of(env->src_cpu) < capacity_of(env->dst_cpu)) &&
+	    ((capacity_orig_of(env->src_cpu) < capacity_orig_of(env->dst_cpu))) &&
+				env->src_rq->cfs.h_nr_running == 1 &&
+				cpu_overutilized(env->src_cpu) &&
+				!cpu_overutilized(env->dst_cpu)) {
+			return 1;
+	}
+
+	return unlikely(sd->nr_balance_failed >
+			sd->cache_nice_tries + NEED_ACTIVE_BALANCE_THRESHOLD);
 }
 
-static int active_load_balance_cpu_stop(void *data);
+static int group_balance_cpu_not_isolated(struct sched_group *sg)
+{
+	cpumask_t cpus;
+
+	cpumask_and(&cpus, sched_group_cpus(sg), sched_group_mask(sg));
+	cpumask_andnot(&cpus, &cpus, cpu_isolated_mask);
+	return cpumask_first(&cpus);
+}
 
 static int should_we_balance(struct lb_env *env)
 {
@@ -6980,7 +10343,8 @@
 	sg_mask = sched_group_mask(sg);
 	/* Try to find first idle cpu */
 	for_each_cpu_and(cpu, sg_cpus, env->cpus) {
-		if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
+		if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu) ||
+		    cpu_isolated(cpu))
 			continue;
 
 		balance_cpu = cpu;
@@ -6988,7 +10352,7 @@
 	}
 
 	if (balance_cpu == -1)
-		balance_cpu = group_balance_cpu(sg);
+		balance_cpu = group_balance_cpu_not_isolated(sg);
 
 	/*
 	 * First idle cpu or the first cpu(busiest) in this sched group
@@ -7005,10 +10369,10 @@
 			struct sched_domain *sd, enum cpu_idle_type idle,
 			int *continue_balancing)
 {
-	int ld_moved, cur_ld_moved, active_balance = 0;
-	struct sched_domain *sd_parent = sd->parent;
-	struct sched_group *group;
-	struct rq *busiest;
+	int ld_moved = 0, cur_ld_moved, active_balance = 0;
+	struct sched_domain *sd_parent = lb_sd_parent(sd) ? sd->parent : NULL;
+	struct sched_group *group = NULL;
+	struct rq *busiest = NULL;
 	unsigned long flags;
 	struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
 
@@ -7022,6 +10386,12 @@
 		.cpus		= cpus,
 		.fbq_type	= all,
 		.tasks		= LIST_HEAD_INIT(env.tasks),
+		.imbalance		= 0,
+		.flags			= 0,
+		.loop			= 0,
+		.busiest_nr_running     = 0,
+		.busiest_grp_capacity   = 0,
+		.boost_policy		= sched_boost_policy(),
 	};
 
 	/*
@@ -7073,6 +10443,14 @@
 
 more_balance:
 		raw_spin_lock_irqsave(&busiest->lock, flags);
+		update_rq_clock(busiest);
+
+		/* The world might have changed. Validate assumptions */
+		if (busiest->nr_running <= 1) {
+			raw_spin_unlock_irqrestore(&busiest->lock, flags);
+			env.flags &= ~LBF_ALL_PINNED;
+			goto no_move;
+		}
 
 		/*
 		 * cur_ld_moved - load moved in current iteration
@@ -7161,16 +10539,22 @@
 		}
 	}
 
+no_move:
 	if (!ld_moved) {
+		if (!(env.flags & LBF_BIG_TASK_ACTIVE_BALANCE))
 		schedstat_inc(sd, lb_failed[idle]);
+
 		/*
 		 * Increment the failure counter only on periodic balance.
 		 * We do not want newidle balance, which can be very
 		 * frequent, pollute the failure counter causing
 		 * excessive cache_hot migrations and active balances.
 		 */
-		if (idle != CPU_NEWLY_IDLE)
+		if (idle != CPU_NEWLY_IDLE &&
+		    !(env.flags & LBF_BIG_TASK_ACTIVE_BALANCE)) {
+		    	if (env.src_grp_nr_running > 1)
 			sd->nr_balance_failed++;
+		}
 
 		if (need_active_balance(&env)) {
 			raw_spin_lock_irqsave(&busiest->lock, flags);
@@ -7192,7 +10576,8 @@
 			 * ->active_balance_work.  Once set, it's cleared
 			 * only after active load balance is finished.
 			 */
-			if (!busiest->active_balance) {
+			if (!busiest->active_balance &&
+			    !cpu_isolated(cpu_of(busiest))) {
 				busiest->active_balance = 1;
 				busiest->push_cpu = this_cpu;
 				active_balance = 1;
@@ -7203,17 +10588,31 @@
 				stop_one_cpu_nowait(cpu_of(busiest),
 					active_load_balance_cpu_stop, busiest,
 					&busiest->active_balance_work);
+				*continue_balancing = 0;
 			}
 
 			/*
 			 * We've kicked active balancing, reset the failure
 			 * counter.
 			 */
-			sd->nr_balance_failed = sd->cache_nice_tries+1;
+			sd->nr_balance_failed =
+			    sd->cache_nice_tries +
+			    NEED_ACTIVE_BALANCE_THRESHOLD - 1;
 		}
-	} else
+	} else {
 		sd->nr_balance_failed = 0;
 
+		/* Assumes one 'busiest' cpu that we pulled tasks from */
+		if (!same_freq_domain(this_cpu, cpu_of(busiest))) {
+			int check_groups = !!(env.flags &
+					 LBF_MOVED_RELATED_THREAD_GROUP_TASK);
+
+			check_for_freq_change(this_rq, false, check_groups);
+			check_for_freq_change(busiest, false, check_groups);
+		} else {
+			check_for_freq_change(this_rq, true, false);
+		}
+	}
 	if (likely(!active_balance)) {
 		/* We were unbalanced, so reset the balancing interval */
 		sd->balance_interval = sd->min_interval;
@@ -7261,6 +10660,11 @@
 
 	ld_moved = 0;
 out:
+	trace_sched_load_balance(this_cpu, idle, *continue_balancing,
+				 group ? group->cpumask[0] : 0,
+				 busiest ? busiest->nr_running : 0,
+				 env.imbalance, env.flags, ld_moved,
+				 sd->balance_interval);
 	return ld_moved;
 }
 
@@ -7303,6 +10707,9 @@
 	int pulled_task = 0;
 	u64 curr_cost = 0;
 
+	if (cpu_isolated(this_cpu))
+		return 0;
+
 	idle_enter_fair(this_rq);
 
 	/*
@@ -7311,8 +10718,9 @@
 	 */
 	this_rq->idle_stamp = rq_clock(this_rq);
 
-	if (this_rq->avg_idle < sysctl_sched_migration_cost ||
-	    !this_rq->rd->overload) {
+	if (!energy_aware() &&
+	    (this_rq->avg_idle < sysctl_sched_migration_cost ||
+	     !this_rq->rd->overload)) {
 		rcu_read_lock();
 		sd = rcu_dereference_check_sched_domain(this_rq->sd);
 		if (sd)
@@ -7356,9 +10764,12 @@
 
 		/*
 		 * Stop searching for tasks to pull if there are
-		 * now runnable tasks on this rq.
+		 * now runnable tasks on the balance rq or if
+		 * continue_balancing has been unset (only possible
+		 * due to active migration).
 		 */
-		if (pulled_task || this_rq->nr_running > 0)
+		if (pulled_task || this_rq->nr_running > 0 ||
+						!continue_balancing)
 			break;
 	}
 	rcu_read_unlock();
@@ -7405,8 +10816,24 @@
 	int busiest_cpu = cpu_of(busiest_rq);
 	int target_cpu = busiest_rq->push_cpu;
 	struct rq *target_rq = cpu_rq(target_cpu);
-	struct sched_domain *sd;
+	struct sched_domain *sd = NULL;
 	struct task_struct *p = NULL;
+	struct task_struct *push_task = NULL;
+	int push_task_detached = 0;
+	struct lb_env env = {
+		.sd			= sd,
+		.dst_cpu		= target_cpu,
+		.dst_rq			= target_rq,
+		.src_cpu		= busiest_rq->cpu,
+		.src_rq			= busiest_rq,
+		.idle			= CPU_IDLE,
+		.busiest_nr_running 	= 0,
+		.busiest_grp_capacity 	= 0,
+		.flags			= 0,
+		.loop			= 0,
+		.boost_policy		= sched_boost_policy(),
+	};
+	bool moved = false;
 
 	raw_spin_lock_irq(&busiest_rq->lock);
 
@@ -7426,6 +10853,20 @@
 	 */
 	BUG_ON(busiest_rq == target_rq);
 
+	push_task = busiest_rq->push_task;
+	target_cpu = busiest_rq->push_cpu;
+	if (push_task) {
+		if (task_on_rq_queued(push_task) &&
+			push_task->state == TASK_RUNNING &&
+			task_cpu(push_task) == busiest_cpu &&
+					cpu_online(target_cpu)) {
+			detach_task(push_task, &env);
+			push_task_detached = 1;
+			moved = true;
+		}
+		goto out_unlock;
+	}
+
 	/* Search for an sd spanning us and the target CPU. */
 	rcu_read_lock();
 	for_each_domain(target_cpu, sd) {
@@ -7435,33 +10876,50 @@
 	}
 
 	if (likely(sd)) {
-		struct lb_env env = {
-			.sd		= sd,
-			.dst_cpu	= target_cpu,
-			.dst_rq		= target_rq,
-			.src_cpu	= busiest_rq->cpu,
-			.src_rq		= busiest_rq,
-			.idle		= CPU_IDLE,
-		};
-
+		env.sd = sd;
 		schedstat_inc(sd, alb_count);
+		update_rq_clock(busiest_rq);
 
 		p = detach_one_task(&env);
-		if (p)
+		if (p) {
 			schedstat_inc(sd, alb_pushed);
-		else
+			moved = true;
+		} else {
 			schedstat_inc(sd, alb_failed);
 	}
+	}
 	rcu_read_unlock();
 out_unlock:
 	busiest_rq->active_balance = 0;
+	push_task = busiest_rq->push_task;
+	target_cpu = busiest_rq->push_cpu;
+
+	if (push_task)
+		busiest_rq->push_task = NULL;
+
 	raw_spin_unlock(&busiest_rq->lock);
 
+	if (push_task) {
+		if (push_task_detached)
+			attach_one_task(target_rq, push_task);
+		put_task_struct(push_task);
+		clear_reserved(target_cpu);
+	}
+
 	if (p)
 		attach_one_task(target_rq, p);
 
 	local_irq_enable();
 
+	if (moved && !same_freq_domain(busiest_cpu, target_cpu)) {
+		int check_groups = !!(env.flags &
+					 LBF_MOVED_RELATED_THREAD_GROUP_TASK);
+		check_for_freq_change(busiest_rq, false, check_groups);
+		check_for_freq_change(target_rq, false, check_groups);
+	} else if (moved) {
+		check_for_freq_change(target_rq, true, false);
+	}
+
 	return 0;
 }
 
@@ -7477,15 +10935,49 @@
  *   needed, they will kick the idle load balancer, which then does idle
  *   load balancing for all the idle CPUs.
  */
-static struct {
-	cpumask_var_t idle_cpus_mask;
-	atomic_t nr_cpus;
-	unsigned long next_balance;     /* in jiffy units */
-} nohz ____cacheline_aligned;
 
-static inline int find_new_ilb(void)
+#ifdef CONFIG_SCHED_HMP
+static inline int find_new_hmp_ilb(int type)
+{
+	int call_cpu = raw_smp_processor_id();
+	struct sched_domain *sd;
+	int ilb;
+
+	rcu_read_lock();
+
+	/* Pick an idle cpu "closest" to call_cpu */
+	for_each_domain(call_cpu, sd) {
+		for_each_cpu_and(ilb, nohz.idle_cpus_mask,
+						sched_domain_span(sd)) {
+			if (idle_cpu(ilb) && (type != NOHZ_KICK_RESTRICT ||
+					cpu_max_power_cost(ilb) <=
+					cpu_max_power_cost(call_cpu))) {
+				rcu_read_unlock();
+				reset_balance_interval(ilb);
+				return ilb;
+			}
+		}
+	}
+
+	rcu_read_unlock();
+	return nr_cpu_ids;
+}
+#else	/* CONFIG_SCHED_HMP */
+static inline int find_new_hmp_ilb(int type)
+{
+	return 0;
+}
+#endif	/* CONFIG_SCHED_HMP */
+
+static inline int find_new_ilb(int type)
 {
-	int ilb = cpumask_first(nohz.idle_cpus_mask);
+	int ilb;
+
+#ifdef CONFIG_SCHED_HMP
+	return find_new_hmp_ilb(type);
+#endif
+
+	ilb = cpumask_first(nohz.idle_cpus_mask);
 
 	if (ilb < nr_cpu_ids && idle_cpu(ilb))
 		return ilb;
@@ -7498,13 +10990,13 @@
  * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
  * CPU (if there is one).
  */
-static void nohz_balancer_kick(void)
+static void nohz_balancer_kick(int type)
 {
 	int ilb_cpu;
 
 	nohz.next_balance++;
 
-	ilb_cpu = find_new_ilb();
+	ilb_cpu = find_new_ilb(type);
 
 	if (ilb_cpu >= nr_cpu_ids)
 		return;
@@ -7521,16 +11013,21 @@
 	return;
 }
 
+void nohz_balance_clear_nohz_mask(int cpu)
+{
+	if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
+		cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
+		atomic_dec(&nohz.nr_cpus);
+	}
+}
+
 static inline void nohz_balance_exit_idle(int cpu)
 {
 	if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
 		/*
 		 * Completely isolated CPUs don't ever set, so we must test.
 		 */
-		if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
-			cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
-			atomic_dec(&nohz.nr_cpus);
-		}
+		nohz_balance_clear_nohz_mask(cpu);
 		clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
 	}
 }
@@ -7587,7 +11084,7 @@
 	/*
 	 * If we're a completely isolated CPU, we don't play.
 	 */
-	if (on_null_domain(cpu_rq(cpu)))
+	if (on_null_domain(cpu_rq(cpu)) || cpu_isolated(cpu))
 		return;
 
 	cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
@@ -7616,7 +11113,13 @@
  */
 void update_max_interval(void)
 {
-	max_load_balance_interval = HZ*num_online_cpus()/10;
+	cpumask_t avail_mask;
+	unsigned int available_cpus;
+
+	cpumask_andnot(&avail_mask, cpu_online_mask, cpu_isolated_mask);
+	available_cpus = cpumask_weight(&avail_mask);
+
+	max_load_balance_interval = HZ*available_cpus/10;
 }
 
 /*
@@ -7741,12 +11244,15 @@
 	/* Earliest time when we have to do rebalance again */
 	unsigned long next_balance = jiffies + 60*HZ;
 	int update_next_balance = 0;
+	cpumask_t cpus;
 
 	if (idle != CPU_IDLE ||
 	    !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
 		goto end;
 
-	for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
+	cpumask_andnot(&cpus, nohz.idle_cpus_mask, cpu_isolated_mask);
+
+	for_each_cpu(balance_cpu, &cpus) {
 		if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
 			continue;
 
@@ -7789,6 +11295,79 @@
 	clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
 }
 
+#ifdef CONFIG_SCHED_HMP
+static inline int _nohz_kick_needed_hmp(struct rq *rq, int cpu, int *type)
+{
+	struct sched_domain *sd;
+	int i;
+
+	if (rq->nr_running < 2)
+		return 0;
+
+	if (!sysctl_sched_restrict_cluster_spill ||
+			sched_boost_policy() == SCHED_BOOST_ON_ALL)
+		return 1;
+
+	if (cpu_max_power_cost(cpu) == max_power_cost)
+		return 1;
+
+	rcu_read_lock();
+	sd = rcu_dereference_check_sched_domain(rq->sd);
+	if (!sd) {
+		rcu_read_unlock();
+		return 0;
+	}
+
+	for_each_cpu(i, sched_domain_span(sd)) {
+		if (cpu_load(i) < sched_spill_load &&
+				cpu_rq(i)->nr_running <
+				sysctl_sched_spill_nr_run) {
+			/* Change the kick type to limit to CPUs that
+			 * are of equal or lower capacity.
+			 */
+			*type = NOHZ_KICK_RESTRICT;
+			break;
+		}
+	}
+	rcu_read_unlock();
+	return 1;
+}
+#else
+static inline int _nohz_kick_needed_hmp(struct rq *rq, int cpu, int *type)
+{
+	return 0;
+}
+#endif
+
+static inline int _nohz_kick_needed(struct rq *rq, int cpu, int *type)
+{
+	unsigned long now = jiffies;
+
+	/*
+	 * None are in tickless mode and hence no need for NOHZ idle load
+	 * balancing.
+	 */
+	if (likely(!atomic_read(&nohz.nr_cpus)))
+		return 0;
+
+#ifdef CONFIG_SCHED_HMP
+	return _nohz_kick_needed_hmp(rq, cpu, type);
+#endif
+
+	if (time_before(now, nohz.next_balance))
+		return 0;
+
+	if (rq->nr_running >= 2 &&
+	    (!energy_aware() || cpu_overutilized(cpu)))
+	    	return true;
+
+	/* Do idle load balance if there have misfit task */
+	if (energy_aware())
+		return rq->misfit_task;
+
+	return (rq->nr_running >= 2);
+}
+
 /*
  * Current heuristic for kicking the idle load balancer in the presence
  * of an idle cpu in the system.
@@ -7800,12 +11379,14 @@
  *   - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
  *     domain span are idle.
  */
-static inline bool nohz_kick_needed(struct rq *rq)
+static inline bool nohz_kick_needed(struct rq *rq, int *type)
 {
-	unsigned long now = jiffies;
+#ifndef CONFIG_SCHED_HMP
 	struct sched_domain *sd;
 	struct sched_group_capacity *sgc;
-	int nr_busy, cpu = rq->cpu;
+	int nr_busy;
+#endif
+	int cpu = rq->cpu;
 	bool kick = false;
 
 	if (unlikely(rq->idle_balance))
@@ -7818,19 +11399,10 @@
 	set_cpu_sd_state_busy();
 	nohz_balance_exit_idle(cpu);
 
-	/*
-	 * None are in tickless mode and hence no need for NOHZ idle load
-	 * balancing.
-	 */
-	if (likely(!atomic_read(&nohz.nr_cpus)))
-		return false;
-
-	if (time_before(now, nohz.next_balance))
-		return false;
-
-	if (rq->nr_running >= 2)
+	if (_nohz_kick_needed(rq, cpu, type))
 		return true;
 
+#ifndef CONFIG_SCHED_HMP
 	rcu_read_lock();
 	sd = rcu_dereference(per_cpu(sd_busy, cpu));
 	if (sd) {
@@ -7862,6 +11434,7 @@
 
 unlock:
 	rcu_read_unlock();
+#endif
 	return kick;
 }
 #else
@@ -7895,15 +11468,19 @@
  */
 void trigger_load_balance(struct rq *rq)
 {
-	/* Don't need to rebalance while attached to NULL domain */
-	if (unlikely(on_null_domain(rq)))
+	int type = NOHZ_KICK_ANY;
+
+	/* Don't need to rebalance while attached to NULL domain or
+	 * cpu is isolated.
+	 */
+	if (unlikely(on_null_domain(rq)) || cpu_isolated(cpu_of(rq)))
 		return;
 
 	if (time_after_eq(jiffies, rq->next_balance))
 		raise_softirq(SCHED_SOFTIRQ);
 #ifdef CONFIG_NO_HZ_COMMON
-	if (nohz_kick_needed(rq))
-		nohz_balancer_kick();
+	if (nohz_kick_needed(rq, &type))
+		nohz_balancer_kick(type);
 #endif
 }
 
@@ -7939,6 +11516,17 @@
 
 	if (static_branch_unlikely(&sched_numa_balancing))
 		task_tick_numa(rq, curr);
+
+#ifdef CONFIG_SMP
+	if (energy_aware() &&
+	    !rq->rd->overutilized && cpu_overutilized(task_cpu(curr))) {
+		rq->rd->overutilized = true;
+		trace_sched_overutilized(true);
+	}
+
+	rq->misfit_task = !task_fits_max(curr, rq->cpu);
+#endif
+
 }
 
 /*
@@ -7950,31 +11538,17 @@
 {
 	struct cfs_rq *cfs_rq;
 	struct sched_entity *se = &p->se, *curr;
-	int this_cpu = smp_processor_id();
 	struct rq *rq = this_rq();
-	unsigned long flags;
-
-	raw_spin_lock_irqsave(&rq->lock, flags);
 
+	raw_spin_lock(&rq->lock);
 	update_rq_clock(rq);
 
 	cfs_rq = task_cfs_rq(current);
 	curr = cfs_rq->curr;
-
-	/*
-	 * Not only the cpu but also the task_group of the parent might have
-	 * been changed after parent->se.parent,cfs_rq were copied to
-	 * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
-	 * of child point to valid ones.
-	 */
-	rcu_read_lock();
-	__set_task_cpu(p, this_cpu);
-	rcu_read_unlock();
-
+	if (curr) {
 	update_curr(cfs_rq);
-
-	if (curr)
 		se->vruntime = curr->vruntime;
+	}
 	place_entity(cfs_rq, se, 1);
 
 	if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
@@ -7987,8 +11561,7 @@
 	}
 
 	se->vruntime -= cfs_rq->min_vruntime;
-
-	raw_spin_unlock_irqrestore(&rq->lock, flags);
+	raw_spin_unlock(&rq->lock);
 }
 
 /*
@@ -8040,6 +11613,61 @@
 	return false;
 }
 
+#ifdef CONFIG_FAIR_GROUP_SCHED
+/*
+ * Propagate the changes of the sched_entity across the tg tree to make it
+ * visible to the root
+ */
+static void propagate_entity_cfs_rq(struct sched_entity *se)
+{
+	struct cfs_rq *cfs_rq;
+
+	/* Start to propagate at parent */
+	se = se->parent;
+
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+
+		if (cfs_rq_throttled(cfs_rq))
+			break;
+
+		update_load_avg(se, UPDATE_TG);
+	}
+}
+#else
+static void propagate_entity_cfs_rq(struct sched_entity *se) { }
+#endif
+
+static void detach_entity_cfs_rq(struct sched_entity *se)
+{
+	struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+	/* Catch up with the cfs_rq and remove our load when we leave */
+	update_load_avg(se, 0);
+	detach_entity_load_avg(cfs_rq, se);
+	update_tg_load_avg(cfs_rq, false);
+	propagate_entity_cfs_rq(se);
+}
+
+static void attach_entity_cfs_rq(struct sched_entity *se)
+{
+	struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+	/*
+	 * Since the real-depth could have been changed (only FAIR
+	 * class maintain depth value), reset depth properly.
+	 */
+	se->depth = se->parent ? se->parent->depth + 1 : 0;
+#endif
+
+	/* Synchronize entity with its cfs_rq */
+	update_load_avg(se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
+	attach_entity_load_avg(cfs_rq, se);
+	update_tg_load_avg(cfs_rq, false);
+	propagate_entity_cfs_rq(se);
+}
+
 static void detach_task_cfs_rq(struct task_struct *p)
 {
 	struct sched_entity *se = &p->se;
@@ -8054,8 +11682,7 @@
 		se->vruntime -= cfs_rq->min_vruntime;
 	}
 
-	/* Catch up with the cfs_rq and remove our load when we leave */
-	detach_entity_load_avg(cfs_rq, se);
+	detach_entity_cfs_rq(se);
 }
 
 static void attach_task_cfs_rq(struct task_struct *p)
@@ -8063,16 +11690,7 @@
 	struct sched_entity *se = &p->se;
 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
 
-#ifdef CONFIG_FAIR_GROUP_SCHED
-	/*
-	 * Since the real-depth could have been changed (only FAIR
-	 * class maintain depth value), reset depth properly.
-	 */
-	se->depth = se->parent ? se->parent->depth + 1 : 0;
-#endif
-
-	/* Synchronize task with its cfs_rq */
-	attach_entity_load_avg(cfs_rq, se);
+	attach_entity_cfs_rq(se);
 
 	if (!vruntime_normalized(p))
 		se->vruntime += cfs_rq->min_vruntime;
@@ -8126,12 +11744,23 @@
 	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
 #endif
 #ifdef CONFIG_SMP
+#ifdef CONFIG_FAIR_GROUP_SCHED
+	cfs_rq->propagate_avg = 0;
+#endif
 	atomic_long_set(&cfs_rq->removed_load_avg, 0);
 	atomic_long_set(&cfs_rq->removed_util_avg, 0);
 #endif
 }
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
+static void task_set_group_fair(struct task_struct *p)
+{
+	struct sched_entity *se = &p->se;
+
+	set_task_rq(p, task_cpu(p));
+	se->depth = se->parent ? se->parent->depth + 1 : 0;
+}
+
 static void task_move_group_fair(struct task_struct *p)
 {
 	detach_task_cfs_rq(p);
@@ -8144,6 +11773,19 @@
 	attach_task_cfs_rq(p);
 }
 
+static void task_change_group_fair(struct task_struct *p, int type)
+{
+	switch (type) {
+	case TASK_SET_GROUP:
+		task_set_group_fair(p);
+		break;
+
+	case TASK_MOVE_GROUP:
+		task_move_group_fair(p);
+		break;
+	}
+}
+
 void free_fair_sched_group(struct task_group *tg)
 {
 	int i;
@@ -8153,12 +11795,9 @@
 	for_each_possible_cpu(i) {
 		if (tg->cfs_rq)
 			kfree(tg->cfs_rq[i]);
-		if (tg->se) {
-			if (tg->se[i])
-				remove_entity_load_avg(tg->se[i]);
+		if (tg->se)
 			kfree(tg->se[i]);
 		}
-	}
 
 	kfree(tg->cfs_rq);
 	kfree(tg->se);
@@ -8166,8 +11805,9 @@
 
 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
 {
-	struct cfs_rq *cfs_rq;
 	struct sched_entity *se;
+	struct cfs_rq *cfs_rq;
+	struct rq *rq;
 	int i;
 
 	tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
@@ -8182,6 +11822,8 @@
 	init_cfs_bandwidth(tg_cfs_bandwidth(tg));
 
 	for_each_possible_cpu(i) {
+		rq = cpu_rq(i);
+
 		cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
 				      GFP_KERNEL, cpu_to_node(i));
 		if (!cfs_rq)
@@ -8195,6 +11837,10 @@
 		init_cfs_rq(cfs_rq);
 		init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
 		init_entity_runnable_average(se);
+
+		raw_spin_lock_irq(&rq->lock);
+		post_init_entity_util_avg(se);
+		raw_spin_unlock_irq(&rq->lock);
 	}
 
 	return 1;
@@ -8205,22 +11851,30 @@
 	return 0;
 }
 
-void unregister_fair_sched_group(struct task_group *tg, int cpu)
+void unregister_fair_sched_group(struct task_group *tg)
 {
-	struct rq *rq = cpu_rq(cpu);
 	unsigned long flags;
+	struct rq *rq;
+	int cpu;
+
+	for_each_possible_cpu(cpu) {
+		if (tg->se[cpu])
+			remove_entity_load_avg(tg->se[cpu]);
 
 	/*
 	* Only empty task groups can be destroyed; so we can speculatively
 	* check on_list without danger of it being re-added.
 	*/
 	if (!tg->cfs_rq[cpu]->on_list)
-		return;
+			continue;
+
+		rq = cpu_rq(cpu);
 
 	raw_spin_lock_irqsave(&rq->lock, flags);
 	list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
 	raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
+}
 
 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
 			struct sched_entity *se, int cpu,
@@ -8283,8 +11937,10 @@
 
 		/* Possible calls to update_curr() need rq clock */
 		update_rq_clock(rq);
-		for_each_sched_entity(se)
-			update_cfs_shares(group_cfs_rq(se));
+		for_each_sched_entity(se) {
+			update_load_avg(se, UPDATE_TG);
+			update_cfs_shares(se);
+		}
 		raw_spin_unlock_irqrestore(&rq->lock, flags);
 	}
 
@@ -8301,7 +11957,7 @@
 	return 1;
 }
 
-void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
+void unregister_fair_sched_group(struct task_group *tg) { }
 
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
@@ -8361,7 +12017,12 @@
 	.update_curr		= update_curr_fair,
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-	.task_move_group	= task_move_group_fair,
+	.task_change_group	= task_change_group_fair,
+#endif
+#ifdef CONFIG_SCHED_HMP
+	.inc_hmp_sched_stats	= inc_hmp_sched_stats_fair,
+	.dec_hmp_sched_stats	= dec_hmp_sched_stats_fair,
+	.fixup_hmp_sched_stats	= fixup_hmp_sched_stats_fair,
 #endif
 };
 
diff -ruw linux-4.4.115/kernel/sched/features.h linux-4.4.115-fbx/kernel/sched/features.h
--- linux-4.4.115/kernel/sched/features.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/sched/features.h	2019-01-22 16:16:28.699293459 +0100
@@ -49,7 +49,7 @@
  * Queue remote wakeups on the target CPU and process them
  * using the scheduler IPI. Reduces rq->lock contention/bounces.
  */
-SCHED_FEAT(TTWU_QUEUE, true)
+SCHED_FEAT(TTWU_QUEUE, false)
 
 #ifdef HAVE_RT_PUSH_IPI
 /*
@@ -69,3 +69,12 @@
 SCHED_FEAT(LB_MIN, false)
 SCHED_FEAT(ATTACH_AGE_LOAD, true)
 
+/*
+ * Energy aware scheduling. Use platform energy model to guide scheduling
+ * decisions optimizing for energy efficiency.
+ */
+#ifdef CONFIG_DEFAULT_USE_ENERGY_AWARE
+SCHED_FEAT(ENERGY_AWARE, true)
+#else
+SCHED_FEAT(ENERGY_AWARE, false)
+#endif
diff -ruw linux-4.4.115/kernel/sched/idle.c linux-4.4.115-fbx/kernel/sched/idle.c
--- linux-4.4.115/kernel/sched/idle.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/sched/idle.c	2019-10-29 09:26:25.625222574 +0100
@@ -19,9 +19,10 @@
  * sched_idle_set_state - Record idle state for the current CPU.
  * @idle_state: State to record.
  */
-void sched_idle_set_state(struct cpuidle_state *idle_state)
+void sched_idle_set_state(struct cpuidle_state *idle_state, int index)
 {
 	idle_set_state(this_rq(), idle_state);
+	idle_set_state_idx(this_rq(), index);
 }
 
 static int __read_mostly cpu_idle_force_poll;
@@ -219,6 +220,7 @@
 		 */
 
 		__current_set_polling();
+		quiet_vmstat();
 		tick_nohz_idle_enter();
 
 		while (!need_resched()) {
diff -ruw linux-4.4.115/kernel/sched/idle_task.c linux-4.4.115-fbx/kernel/sched/idle_task.c
--- linux-4.4.115/kernel/sched/idle_task.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/sched/idle_task.c	2019-01-22 16:16:28.703293496 +0100
@@ -9,7 +9,8 @@
 
 #ifdef CONFIG_SMP
 static int
-select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
+select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags,
+		    int sibling_count_hint)
 {
 	return task_cpu(p); /* IDLE tasks as never migrated */
 }
@@ -79,6 +80,26 @@
 {
 }
 
+#ifdef CONFIG_SCHED_HMP
+
+static void
+inc_hmp_sched_stats_idle(struct rq *rq, struct task_struct *p)
+{
+}
+
+static void
+dec_hmp_sched_stats_idle(struct rq *rq, struct task_struct *p)
+{
+}
+
+static void
+fixup_hmp_sched_stats_idle(struct rq *rq, struct task_struct *p,
+			   u32 new_task_load, u32 new_pred_demand)
+{
+}
+
+#endif
+
 /*
  * Simple, special scheduling class for the per-CPU idle tasks:
  */
@@ -107,4 +128,9 @@
 	.prio_changed		= prio_changed_idle,
 	.switched_to		= switched_to_idle,
 	.update_curr		= update_curr_idle,
+#ifdef CONFIG_SCHED_HMP
+	.inc_hmp_sched_stats	= inc_hmp_sched_stats_idle,
+	.dec_hmp_sched_stats	= dec_hmp_sched_stats_idle,
+	.fixup_hmp_sched_stats	= fixup_hmp_sched_stats_idle,
+#endif
 };
diff -ruw linux-4.4.115/kernel/sched/Makefile linux-4.4.115-fbx/kernel/sched/Makefile
--- linux-4.4.115/kernel/sched/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/sched/Makefile	2019-01-22 16:16:28.691293387 +0100
@@ -2,6 +2,10 @@
 CFLAGS_REMOVE_clock.o = $(CC_FLAGS_FTRACE)
 endif
 
+# These files are disabled because they produce non-interesting flaky coverage
+# that is not a function of syscall inputs. E.g. involuntary context switches.
+KCOV_INSTRUMENT := n
+
 ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
 # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
 # needed for x86 only.  Why this used to be enabled for all architectures is beyond
@@ -13,9 +17,14 @@
 
 obj-y += core.o loadavg.o clock.o cputime.o
 obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
-obj-y += wait.o completion.o idle.o
-obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
+obj-y += wait.o completion.o idle.o sched_avg.o
+obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o energy.o
+obj-$(CONFIG_SCHED_HMP) += hmp.o boost.o
 obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
 obj-$(CONFIG_SCHEDSTATS) += stats.o
 obj-$(CONFIG_SCHED_DEBUG) += debug.o
+obj-$(CONFIG_SCHED_TUNE) += tune.o
 obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
+obj-$(CONFIG_SCHED_CORE_CTL) += core_ctl.o
+obj-$(CONFIG_CPU_FREQ) += cpufreq.o
+obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
diff -ruw linux-4.4.115/kernel/sched/rt.c linux-4.4.115-fbx/kernel/sched/rt.c
--- linux-4.4.115/kernel/sched/rt.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/sched/rt.c	2019-10-29 09:26:25.625222574 +0100
@@ -5,8 +5,13 @@
 
 #include "sched.h"
 
+#include <linux/interrupt.h>
 #include <linux/slab.h>
 #include <linux/irq_work.h>
+#include <trace/events/sched.h>
+#include <linux/hrtimer.h>
+
+#include "tune.h"
 
 int sched_rr_timeslice = RR_TIMESLICE;
 
@@ -253,8 +258,12 @@
 
 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
 {
-	/* Try to pull RT tasks here if we lower this rq's prio */
-	return rq->rt.highest_prio.curr > prev->prio;
+	/*
+	 * Try to pull RT tasks here if we lower this rq's prio and cpu is not
+	 * isolated
+	 */
+	return rq->rt.highest_prio.curr > prev->prio &&
+	       !cpu_isolated(cpu_of(rq));
 }
 
 static inline int rt_overloaded(struct rq *rq)
@@ -425,7 +434,7 @@
 
 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
 {
-	return !list_empty(&rt_se->run_list);
+	return rt_se->on_rq;
 }
 
 #ifdef CONFIG_RT_GROUP_SCHED
@@ -471,8 +480,8 @@
 	return rt_se->my_q;
 }
 
-static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
-static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
+static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
+static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
 
 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 {
@@ -488,7 +497,7 @@
 		if (!rt_se)
 			enqueue_top_rt_rq(rt_rq);
 		else if (!on_rt_rq(rt_se))
-			enqueue_rt_entity(rt_se, false);
+			enqueue_rt_entity(rt_se, 0);
 
 		if (rt_rq->highest_prio.curr < curr->prio)
 			resched_curr(rq);
@@ -505,7 +514,7 @@
 	if (!rt_se)
 		dequeue_top_rt_rq(rt_rq);
 	else if (on_rt_rq(rt_se))
-		dequeue_rt_entity(rt_se);
+		dequeue_rt_entity(rt_se, 0);
 }
 
 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
@@ -878,6 +887,51 @@
 	return rt_task_of(rt_se)->prio;
 }
 
+static void dump_throttled_rt_tasks(struct rt_rq *rt_rq)
+{
+	struct rt_prio_array *array = &rt_rq->active;
+	struct sched_rt_entity *rt_se;
+	char buf[500];
+	char *pos = buf;
+	char *end = buf + sizeof(buf);
+	int idx;
+
+	pos += snprintf(pos, sizeof(buf),
+		"sched: RT throttling activated for rt_rq %p (cpu %d)\n",
+		rt_rq, cpu_of(rq_of_rt_rq(rt_rq)));
+
+	if (bitmap_empty(array->bitmap, MAX_RT_PRIO))
+		goto out;
+
+	pos += snprintf(pos, end - pos, "potential CPU hogs:\n");
+	idx = sched_find_first_bit(array->bitmap);
+	while (idx < MAX_RT_PRIO) {
+		list_for_each_entry(rt_se, array->queue + idx, run_list) {
+			struct task_struct *p;
+
+			if (!rt_entity_is_task(rt_se))
+				continue;
+
+			p = rt_task_of(rt_se);
+			if (pos < end)
+				pos += snprintf(pos, end - pos, "\t%s (%d)\n",
+					p->comm, p->pid);
+		}
+		idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx + 1);
+	}
+out:
+#ifdef CONFIG_PANIC_ON_RT_THROTTLING
+	/*
+	 * Use pr_err() in the BUG() case since printk_sched() will
+	 * not get flushed and deadlock is not a concern.
+	 */
+	pr_err("%s", buf);
+	BUG();
+#else
+	printk_deferred("%s", buf);
+#endif
+}
+
 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
 {
 	u64 runtime = sched_rt_runtime(rt_rq);
@@ -901,8 +955,14 @@
 		 * but accrue some time due to boosting.
 		 */
 		if (likely(rt_b->rt_runtime)) {
+			static bool once = false;
+
 			rt_rq->rt_throttled = 1;
-			printk_deferred_once("sched: RT throttling activated\n");
+
+			if (!once) {
+				once = true;
+				dump_throttled_rt_tasks(rt_rq);
+			}
 		} else {
 			/*
 			 * In case we did anyway, make it go away,
@@ -921,6 +981,70 @@
 	return 0;
 }
 
+#define RT_SCHEDTUNE_INTERVAL 50000000ULL
+
+static enum hrtimer_restart rt_schedtune_timer(struct hrtimer *timer)
+{
+	struct sched_rt_entity *rt_se = container_of(timer,
+			struct sched_rt_entity,
+			schedtune_timer);
+	struct task_struct *p = rt_task_of(rt_se);
+	struct rq *rq = task_rq(p);
+
+	raw_spin_lock(&rq->lock);
+
+	/*
+	 * Nothing to do if:
+	 * - task has switched runqueues
+	 * - task isn't RT anymore
+	 */
+	if (rq != task_rq(p) || (p->sched_class != &rt_sched_class))
+		goto out;
+
+	/*
+	 * If task got enqueued back during callback time, it means we raced
+	 * with the enqueue on another cpu, that's Ok, just do nothing as
+	 * enqueue path would have tried to cancel us and we shouldn't run
+	 * Also check the schedtune_enqueued flag as class-switch on a
+	 * sleeping task may have already canceled the timer and done dq
+	 */
+	if (p->on_rq || !rt_se->schedtune_enqueued)
+		goto out;
+
+	/*
+	 * RT task is no longer active, cancel boost
+	 */
+	rt_se->schedtune_enqueued = false;
+	schedtune_dequeue_task(p, cpu_of(rq));
+	cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
+out:
+	raw_spin_unlock(&rq->lock);
+
+	/*
+	 * This can free the task_struct if no more references.
+	 */
+	put_task_struct(p);
+
+	return HRTIMER_NORESTART;
+}
+
+void init_rt_schedtune_timer(struct sched_rt_entity *rt_se)
+{
+	struct hrtimer *timer = &rt_se->schedtune_timer;
+
+	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	timer->function = rt_schedtune_timer;
+	rt_se->schedtune_enqueued = false;
+}
+
+static void start_schedtune_timer(struct sched_rt_entity *rt_se)
+{
+	struct hrtimer *timer = &rt_se->schedtune_timer;
+
+	hrtimer_start(timer, ns_to_ktime(RT_SCHEDTUNE_INTERVAL),
+			HRTIMER_MODE_REL_PINNED);
+}
+
 /*
  * Update the current task's runtime statistics. Skip current tasks that
  * are not in our scheduling class.
@@ -938,6 +1062,9 @@
 	if (unlikely((s64)delta_exec <= 0))
 		return;
 
+	/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
+	cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
+
 	schedstat_set(curr->se.statistics.exec_max,
 		      max(curr->se.statistics.exec_max, delta_exec));
 
@@ -1119,6 +1246,41 @@
 
 #endif /* CONFIG_RT_GROUP_SCHED */
 
+#ifdef CONFIG_SCHED_HMP
+
+static void
+inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p)
+{
+	inc_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+static void
+dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p)
+{
+	dec_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+static void
+fixup_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p,
+			 u32 new_task_load, u32 new_pred_demand)
+{
+	s64 task_load_delta = (s64)new_task_load - task_load(p);
+	s64 pred_demand_delta = PRED_DEMAND_DELTA;
+
+	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
+				      pred_demand_delta);
+}
+
+#else	/* CONFIG_SCHED_HMP */
+
+static inline void
+inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { }
+
+static inline void
+dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { }
+
+#endif	/* CONFIG_SCHED_HMP */
+
 static inline
 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
 {
@@ -1155,7 +1317,30 @@
 	dec_rt_group(rt_se, rt_rq);
 }
 
-static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
+/*
+ * Change rt_se->run_list location unless SAVE && !MOVE
+ *
+ * assumes ENQUEUE/DEQUEUE flags match
+ */
+static inline bool move_entity(unsigned int flags)
+{
+	if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
+		return false;
+
+	return true;
+}
+
+static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
+{
+	list_del_init(&rt_se->run_list);
+
+	if (list_empty(array->queue + rt_se_prio(rt_se)))
+		__clear_bit(rt_se_prio(rt_se), array->bitmap);
+
+	rt_se->on_list = 0;
+}
+
+static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
 {
 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
 	struct rt_prio_array *array = &rt_rq->active;
@@ -1168,26 +1353,37 @@
 	 * get throttled and the current group doesn't have any other
 	 * active members.
 	 */
-	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
+	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
+		if (rt_se->on_list)
+			__delist_rt_entity(rt_se, array);
 		return;
+	}
 
-	if (head)
+	if (move_entity(flags)) {
+		WARN_ON_ONCE(rt_se->on_list);
+		if (flags & ENQUEUE_HEAD)
 		list_add(&rt_se->run_list, queue);
 	else
 		list_add_tail(&rt_se->run_list, queue);
+
 	__set_bit(rt_se_prio(rt_se), array->bitmap);
+		rt_se->on_list = 1;
+	}
+	rt_se->on_rq = 1;
 
 	inc_rt_tasks(rt_se, rt_rq);
 }
 
-static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
+static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
 {
 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
 	struct rt_prio_array *array = &rt_rq->active;
 
-	list_del_init(&rt_se->run_list);
-	if (list_empty(array->queue + rt_se_prio(rt_se)))
-		__clear_bit(rt_se_prio(rt_se), array->bitmap);
+	if (move_entity(flags)) {
+		WARN_ON_ONCE(!rt_se->on_list);
+		__delist_rt_entity(rt_se, array);
+	}
+	rt_se->on_rq = 0;
 
 	dec_rt_tasks(rt_se, rt_rq);
 }
@@ -1196,7 +1392,7 @@
  * Because the prio of an upper entry depends on the lower
  * entries, we must remove entries top - down.
  */
-static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
+static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
 {
 	struct sched_rt_entity *back = NULL;
 
@@ -1209,31 +1405,31 @@
 
 	for (rt_se = back; rt_se; rt_se = rt_se->back) {
 		if (on_rt_rq(rt_se))
-			__dequeue_rt_entity(rt_se);
+			__dequeue_rt_entity(rt_se, flags);
 	}
 }
 
-static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
+static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
 {
 	struct rq *rq = rq_of_rt_se(rt_se);
 
-	dequeue_rt_stack(rt_se);
+	dequeue_rt_stack(rt_se, flags);
 	for_each_sched_rt_entity(rt_se)
-		__enqueue_rt_entity(rt_se, head);
+		__enqueue_rt_entity(rt_se, flags);
 	enqueue_top_rt_rq(&rq->rt);
 }
 
-static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
+static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
 {
 	struct rq *rq = rq_of_rt_se(rt_se);
 
-	dequeue_rt_stack(rt_se);
+	dequeue_rt_stack(rt_se, flags);
 
 	for_each_sched_rt_entity(rt_se) {
 		struct rt_rq *rt_rq = group_rt_rq(rt_se);
 
 		if (rt_rq && rt_rq->rt_nr_running)
-			__enqueue_rt_entity(rt_se, false);
+			__enqueue_rt_entity(rt_se, flags);
 	}
 	enqueue_top_rt_rq(&rq->rt);
 }
@@ -1249,10 +1445,38 @@
 	if (flags & ENQUEUE_WAKEUP)
 		rt_se->timeout = 0;
 
-	enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
+	enqueue_rt_entity(rt_se, flags);
+	inc_hmp_sched_stats_rt(rq, p);
 
 	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
 		enqueue_pushable_task(rq, p);
+
+	if (!schedtune_task_boost(p))
+		return;
+
+	/*
+	 * If schedtune timer is active, that means a boost was already
+	 * done, just cancel the timer so that deboost doesn't happen.
+	 * Otherwise, increase the boost. If an enqueued timer was
+	 * cancelled, put the task reference.
+	 */
+	if (hrtimer_try_to_cancel(&rt_se->schedtune_timer) == 1)
+		put_task_struct(p);
+
+	/*
+	 * schedtune_enqueued can be true in the following situation:
+	 * enqueue_task_rt grabs rq lock before timer fires
+	 *    or before its callback acquires rq lock
+	 * schedtune_enqueued can be false if timer callback is running
+	 * and timer just released rq lock, or if the timer finished
+	 * running and canceling the boost
+	 */
+	if (rt_se->schedtune_enqueued)
+		return;
+
+	rt_se->schedtune_enqueued = true;
+	schedtune_enqueue_task(p, cpu_of(rq));
+	cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
 }
 
 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
@@ -1260,9 +1484,23 @@
 	struct sched_rt_entity *rt_se = &p->rt;
 
 	update_curr_rt(rq);
-	dequeue_rt_entity(rt_se);
+	dequeue_rt_entity(rt_se, flags);
+	dec_hmp_sched_stats_rt(rq, p);
 
 	dequeue_pushable_task(rq, p);
+
+	if (!rt_se->schedtune_enqueued)
+		return;
+
+	if (flags == DEQUEUE_SLEEP) {
+		get_task_struct(p);
+		start_schedtune_timer(rt_se);
+		return;
+	}
+
+	rt_se->schedtune_enqueued = false;
+	schedtune_dequeue_task(p, cpu_of(rq));
+	cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
 }
 
 /*
@@ -1302,11 +1540,77 @@
 #ifdef CONFIG_SMP
 static int find_lowest_rq(struct task_struct *task);
 
+#ifdef CONFIG_SCHED_HMP
+static int
+select_task_rq_rt_hmp(struct task_struct *p, int cpu, int sd_flag, int flags)
+{
+	int target;
+
+	rcu_read_lock();
+	target = find_lowest_rq(p);
+	if (target != -1)
+		cpu = target;
+	rcu_read_unlock();
+
+	return cpu;
+}
+#endif
+
+/*
+ * Return whether the task on the given cpu is currently non-preemptible
+ * while handling a potentially long softint, or if the task is likely
+ * to block preemptions soon because it is a ksoftirq thread that is
+ * handling slow softints.
+ */
+bool
+task_may_not_preempt(struct task_struct *task, int cpu)
+{
+	__u32 softirqs = per_cpu(active_softirqs, cpu) |
+			 __IRQ_STAT(cpu, __softirq_pending);
+	struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu);
+
+	return ((softirqs & LONG_SOFTIRQ_MASK) &&
+		(task == cpu_ksoftirqd ||
+		 task_thread_info(task)->preempt_count & SOFTIRQ_MASK));
+}
+
+/*
+ * Perform a schedtune dequeue and cancelation of boost timers if needed.
+ * Should be called only with the rq->lock held.
+ */
+static void schedtune_dequeue_rt(struct rq *rq, struct task_struct *p)
+{
+	struct sched_rt_entity *rt_se = &p->rt;
+
+	BUG_ON(!raw_spin_is_locked(&rq->lock));
+
+	if (!rt_se->schedtune_enqueued)
+		return;
+
+	/*
+	 * Incase of class change cancel any active timers. If an enqueued
+	 * timer was cancelled, put the task ref.
+	 */
+	if (hrtimer_try_to_cancel(&rt_se->schedtune_timer) == 1)
+		put_task_struct(p);
+
+	/* schedtune_enqueued is true, deboost it */
+	rt_se->schedtune_enqueued = false;
+	schedtune_dequeue_task(p, task_cpu(p));
+	cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
+}
+
 static int
-select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
+select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags,
+		  int sibling_count_hint)
 {
 	struct task_struct *curr;
 	struct rq *rq;
+	bool may_not_preempt;
+
+#ifdef CONFIG_SCHED_HMP
+	return select_task_rq_rt_hmp(p, cpu, sd_flag, flags);
+#endif
 
 	/* For anything but wake ups, just return the task_cpu */
 	if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
@@ -1318,7 +1622,17 @@
 	curr = READ_ONCE(rq->curr); /* unlocked access */
 
 	/*
-	 * If the current task on @p's runqueue is an RT task, then
+	 * If the current task on @p's runqueue is a softirq task,
+	 * it may run without preemption for a time that is
+	 * ill-suited for a waiting RT task. Therefore, try to
+	 * wake this RT task on another runqueue.
+	 *
+	 * Also, if the current task on @p's runqueue is an RT task, then
+	 * it may run without preemption for a time that is
+	 * ill-suited for a waiting RT task. Therefore, try to
+	 * wake this RT task on another runqueue.
+	 *
+	 * Also, if the current task on @p's runqueue is an RT task, then
 	 * try to see if we can wake this RT task up on another
 	 * runqueue. Otherwise simply start this RT task
 	 * on its current runqueue.
@@ -1339,22 +1653,40 @@
 	 * This test is optimistic, if we get it wrong the load-balancer
 	 * will have to sort it out.
 	 */
-	if (curr && unlikely(rt_task(curr)) &&
+	may_not_preempt = task_may_not_preempt(curr, cpu);
+	if (may_not_preempt ||
+	    (unlikely(rt_task(curr)) &&
 	    (curr->nr_cpus_allowed < 2 ||
-	     curr->prio <= p->prio)) {
+	     curr->prio <= p->prio))) {
 		int target = find_lowest_rq(p);
 
 		/*
-		 * Don't bother moving it if the destination CPU is
-		 * not running a lower priority task.
+		 * If cpu is non-preemptible, prefer remote cpu
+		 * even if it's running a higher-prio task.
+		 * Otherwise: Don't bother moving it if the
+		 * destination CPU is not running a lower priority task.
 		 */
 		if (target != -1 &&
-		    p->prio < cpu_rq(target)->rt.highest_prio.curr)
+		   (may_not_preempt ||
+		    p->prio < cpu_rq(target)->rt.highest_prio.curr))
 			cpu = target;
 	}
 	rcu_read_unlock();
 
 out:
+	/*
+	 * If previous CPU was different, make sure to cancel any active
+	 * schedtune timers and deboost.
+	 */
+	if (task_cpu(p) != cpu) {
+		unsigned long fl;
+		struct rq *prq = task_rq(p);
+
+		raw_spin_lock_irqsave(&prq->lock, fl);
+		schedtune_dequeue_rt(prq, p);
+		raw_spin_unlock_irqrestore(&prq->lock, fl);
+	}
+
 	return cpu;
 }
 
@@ -1545,6 +1877,109 @@
 
 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
 
+#ifdef CONFIG_SCHED_HMP
+
+static int find_lowest_rq_hmp(struct task_struct *task)
+{
+	struct cpumask *lowest_mask = *this_cpu_ptr(&local_cpu_mask);
+	struct cpumask candidate_mask = CPU_MASK_NONE;
+	struct sched_cluster *cluster;
+	int best_cpu = -1;
+	int prev_cpu = task_cpu(task);
+	u64 cpu_load, min_load = ULLONG_MAX;
+	int i;
+	int restrict_cluster;
+	int boost_on_big;
+	int pack_task, wakeup_latency, least_wakeup_latency = INT_MAX;
+
+	boost_on_big = sched_boost() == FULL_THROTTLE_BOOST &&
+			sched_boost_policy() == SCHED_BOOST_ON_BIG;
+
+	restrict_cluster = sysctl_sched_restrict_cluster_spill;
+
+	/* Make sure the mask is initialized first */
+	if (unlikely(!lowest_mask))
+		return best_cpu;
+
+	if (task->nr_cpus_allowed == 1)
+		return best_cpu; /* No other targets possible */
+
+	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
+		return best_cpu; /* No targets found */
+
+	pack_task = is_short_burst_task(task);
+
+	/*
+	 * At this point we have built a mask of cpus representing the
+	 * lowest priority tasks in the system.  Now we want to elect
+	 * the best one based on our affinity and topology.
+	 */
+
+retry:
+	for_each_sched_cluster(cluster) {
+		if (boost_on_big && cluster->capacity != max_possible_capacity)
+			continue;
+
+		cpumask_and(&candidate_mask, &cluster->cpus, lowest_mask);
+		cpumask_andnot(&candidate_mask, &candidate_mask,
+			       cpu_isolated_mask);
+		/*
+		 * When placement boost is active, if there is no eligible CPU
+		 * in the highest capacity cluster, we fallback to the other
+		 * clusters. So clear the CPUs of the traversed cluster from
+		 * the lowest_mask.
+		 */
+		if (unlikely(boost_on_big))
+			cpumask_andnot(lowest_mask, lowest_mask,
+				       &cluster->cpus);
+
+		if (cpumask_empty(&candidate_mask))
+			continue;
+
+		for_each_cpu(i, &candidate_mask) {
+			if (sched_cpu_high_irqload(i))
+				continue;
+
+			cpu_load = cpu_rq(i)->hmp_stats.cumulative_runnable_avg;
+			if (!restrict_cluster)
+				cpu_load = scale_load_to_cpu(cpu_load, i);
+
+			if (pack_task) {
+				wakeup_latency = cpu_rq(i)->wakeup_latency;
+
+				if (wakeup_latency > least_wakeup_latency)
+					continue;
+
+				if (wakeup_latency < least_wakeup_latency) {
+					least_wakeup_latency = wakeup_latency;
+					min_load = cpu_load;
+					best_cpu = i;
+					continue;
+				}
+			}
+
+			if (cpu_load < min_load ||
+				(cpu_load == min_load &&
+				(i == prev_cpu || (best_cpu != prev_cpu &&
+				cpus_share_cache(prev_cpu, i))))) {
+				min_load = cpu_load;
+				best_cpu = i;
+			}
+		}
+
+		if (restrict_cluster && best_cpu != -1)
+			break;
+	}
+
+	if (unlikely(boost_on_big && best_cpu == -1)) {
+		boost_on_big = 0;
+		goto retry;
+	}
+
+	return best_cpu;
+}
+#endif	/* CONFIG_SCHED_HMP */
+
 static int find_lowest_rq(struct task_struct *task)
 {
 	struct sched_domain *sd;
@@ -1552,6 +1987,10 @@
 	int this_cpu = smp_processor_id();
 	int cpu      = task_cpu(task);
 
+#ifdef CONFIG_SCHED_HMP
+	return find_lowest_rq_hmp(task);
+#endif
+
 	/* Make sure the mask is initialized first */
 	if (unlikely(!lowest_mask))
 		return -1;
@@ -1768,9 +2207,13 @@
 		goto retry;
 	}
 
+	next_task->on_rq = TASK_ON_RQ_MIGRATING;
 	deactivate_task(rq, next_task, 0);
+	next_task->on_rq = TASK_ON_RQ_MIGRATING;
 	set_task_cpu(next_task, lowest_rq->cpu);
+	next_task->on_rq = TASK_ON_RQ_QUEUED;
 	activate_task(lowest_rq, next_task, 0);
+	next_task->on_rq = TASK_ON_RQ_QUEUED;
 	ret = 1;
 
 	resched_curr(lowest_rq);
@@ -2034,9 +2477,13 @@
 
 			resched = true;
 
+			p->on_rq = TASK_ON_RQ_MIGRATING;
 			deactivate_task(src_rq, p, 0);
+			p->on_rq = TASK_ON_RQ_MIGRATING;
 			set_task_cpu(p, this_cpu);
+			p->on_rq = TASK_ON_RQ_QUEUED;
 			activate_task(this_rq, p, 0);
+			p->on_rq = TASK_ON_RQ_QUEUED;
 			/*
 			 * We continue with the search, just in
 			 * case there's an even higher prio task
@@ -2096,13 +2543,21 @@
 static void switched_from_rt(struct rq *rq, struct task_struct *p)
 {
 	/*
+	 * On class switch from rt, always cancel active schedtune timers,
+	 * this handles the cases where we switch class for a task that is
+	 * already rt-dequeued but has a running timer.
+	 */
+	schedtune_dequeue_rt(rq, p);
+
+	/*
 	 * If there are other RT tasks then we will reschedule
 	 * and the scheduling of the other RT tasks will handle
 	 * the balancing. But if we are the last RT task
 	 * we may need to handle the pulling of RT tasks
 	 * now.
 	 */
-	if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
+	if (!task_on_rq_queued(p) || rq->rt.rt_nr_running ||
+		cpu_isolated(cpu_of(rq)))
 		return;
 
 	queue_pull_task(rq);
@@ -2117,6 +2572,7 @@
 					GFP_KERNEL, cpu_to_node(i));
 	}
 }
+
 #endif /* CONFIG_SMP */
 
 /*
@@ -2290,6 +2746,11 @@
 	.switched_to		= switched_to_rt,
 
 	.update_curr		= update_curr_rt,
+#ifdef CONFIG_SCHED_HMP
+	.inc_hmp_sched_stats	= inc_hmp_sched_stats_rt,
+	.dec_hmp_sched_stats	= dec_hmp_sched_stats_rt,
+	.fixup_hmp_sched_stats	= fixup_hmp_sched_stats_rt,
+#endif
 };
 
 #ifdef CONFIG_SCHED_DEBUG
diff -ruw linux-4.4.115/kernel/sched/sched.h linux-4.4.115-fbx/kernel/sched/sched.h
--- linux-4.4.115/kernel/sched/sched.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/sched/sched.h	2019-10-29 09:26:25.625222574 +0100
@@ -27,6 +27,7 @@
 extern atomic_long_t calc_load_tasks;
 
 extern void calc_global_load_tick(struct rq *this_rq);
+
 extern long calc_load_fold_active(struct rq *this_rq);
 
 #ifdef CONFIG_SMP
@@ -240,6 +241,10 @@
 struct task_group {
 	struct cgroup_subsys_state css;
 
+#ifdef CONFIG_SCHED_HMP
+	bool upmigrate_discouraged;
+#endif
+
 #ifdef CONFIG_FAIR_GROUP_SCHED
 	/* schedulable entities of this group on each cpu */
 	struct sched_entity **se;
@@ -308,7 +313,7 @@
 
 extern void free_fair_sched_group(struct task_group *tg);
 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
-extern void unregister_fair_sched_group(struct task_group *tg, int cpu);
+extern void unregister_fair_sched_group(struct task_group *tg);
 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
 			struct sched_entity *se, int cpu,
 			struct sched_entity *parent);
@@ -335,14 +340,106 @@
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
-#endif
 
+#ifdef CONFIG_SMP
+extern void set_task_rq_fair(struct sched_entity *se,
+			     struct cfs_rq *prev, struct cfs_rq *next);
+#else /* !CONFIG_SMP */
+static inline void set_task_rq_fair(struct sched_entity *se,
+			     struct cfs_rq *prev, struct cfs_rq *next) { }
+#endif /* CONFIG_SMP */
+#endif /* CONFIG_FAIR_GROUP_SCHED */
+
+extern struct task_group *css_tg(struct cgroup_subsys_state *css);
 #else /* CONFIG_CGROUP_SCHED */
 
 struct cfs_bandwidth { };
 
 #endif	/* CONFIG_CGROUP_SCHED */
 
+#ifdef CONFIG_SCHED_HMP
+
+#define NUM_TRACKED_WINDOWS 2
+#define NUM_LOAD_INDICES 1000
+
+struct hmp_sched_stats {
+	int nr_big_tasks;
+	u64 cumulative_runnable_avg;
+	u64 pred_demands_sum;
+};
+
+struct load_subtractions {
+	u64 window_start;
+	u64 subs;
+	u64 new_subs;
+};
+
+struct group_cpu_time {
+	u64 curr_runnable_sum;
+	u64 prev_runnable_sum;
+	u64 nt_curr_runnable_sum;
+	u64 nt_prev_runnable_sum;
+};
+
+struct sched_cluster {
+	raw_spinlock_t load_lock;
+	struct list_head list;
+	struct cpumask cpus;
+	int id;
+	int max_power_cost;
+	int min_power_cost;
+	int max_possible_capacity;
+	int capacity;
+	int efficiency; /* Differentiate cpus with different IPC capability */
+	int load_scale_factor;
+	unsigned int exec_scale_factor;
+	/*
+	 * max_freq = user maximum
+	 * max_mitigated_freq = thermal defined maximum
+	 * max_possible_freq = maximum supported by hardware
+	 */
+	unsigned int cur_freq, max_freq, max_mitigated_freq, min_freq;
+	unsigned int max_possible_freq;
+	bool freq_init_done;
+	int dstate, dstate_wakeup_latency, dstate_wakeup_energy;
+	unsigned int static_cluster_pwr_cost;
+	int notifier_sent;
+	bool wake_up_idle;
+	atomic64_t last_cc_update;
+	atomic64_t cycles;
+};
+
+extern unsigned long all_cluster_ids[];
+
+static inline int cluster_first_cpu(struct sched_cluster *cluster)
+{
+	return cpumask_first(&cluster->cpus);
+}
+
+struct related_thread_group {
+	int id;
+	raw_spinlock_t lock;
+	struct list_head tasks;
+	struct list_head list;
+	struct sched_cluster *preferred_cluster;
+	struct rcu_head rcu;
+	u64 last_update;
+};
+
+extern struct list_head cluster_head;
+extern int num_clusters;
+extern struct sched_cluster *sched_cluster[NR_CPUS];
+
+struct cpu_cycle {
+	u64 cycles;
+	u64 time;
+};
+
+#define for_each_sched_cluster(cluster) \
+	list_for_each_entry_rcu(cluster, &cluster_head, list)
+
+#endif /* CONFIG_SCHED_HMP */
+
 /* CFS-related fields in a runqueue */
 struct cfs_rq {
 	struct load_weight load;
@@ -376,6 +473,7 @@
 	unsigned long runnable_load_avg;
 #ifdef CONFIG_FAIR_GROUP_SCHED
 	unsigned long tg_load_avg_contrib;
+	unsigned long propagate_avg;
 #endif
 	atomic_long_t removed_load_avg, removed_util_avg;
 #ifndef CONFIG_64BIT
@@ -411,6 +509,11 @@
 	struct task_group *tg;	/* group that "owns" this runqueue */
 
 #ifdef CONFIG_CFS_BANDWIDTH
+
+#ifdef CONFIG_SCHED_HMP
+	struct hmp_sched_stats hmp_stats;
+#endif
+
 	int runtime_enabled;
 	u64 runtime_expires;
 	s64 runtime_remaining;
@@ -500,10 +603,18 @@
 #else
 	struct dl_bw dl_bw;
 #endif
+	/* This is the "average utilization" for this runqueue */
+	s64 avg_bw;
 };
 
 #ifdef CONFIG_SMP
 
+struct max_cpu_capacity {
+	raw_spinlock_t lock;
+	unsigned long val;
+	int cpu;
+};
+
 /*
  * We add the notion of a root-domain which will be used to define per-domain
  * variables. Each exclusive cpuset essentially defines an island domain by
@@ -522,6 +633,9 @@
 	/* Indicate more than one runnable task for any CPU */
 	bool overload;
 
+	/* Indicate one or more cpus over-utilized (tipping point) */
+	bool overutilized;
+
 	/*
 	 * The bit corresponding to a CPU gets set here if such CPU has more
 	 * than one runnable -deadline task (as it is below for RT tasks).
@@ -550,6 +664,12 @@
 	 */
 	cpumask_var_t rto_mask;
 	struct cpupri cpupri;
+
+	/* Maximum cpu capacity in the system. */
+	struct max_cpu_capacity max_cpu_capacity;
+
+	/* First cpu with maximum and minimum original capacity */
+	int max_cap_orig_cpu, min_cap_orig_cpu;
 };
 
 extern struct root_domain def_root_domain;
@@ -582,6 +702,7 @@
 	#define CPU_LOAD_IDX_MAX 5
 	unsigned long cpu_load[CPU_LOAD_IDX_MAX];
 	unsigned long last_load_update_tick;
+	unsigned int misfit_task;
 #ifdef CONFIG_NO_HZ_COMMON
 	u64 nohz_stamp;
 	unsigned long nohz_flags;
@@ -589,6 +710,14 @@
 #ifdef CONFIG_NO_HZ_FULL
 	unsigned long last_sched_tick;
 #endif
+
+#ifdef CONFIG_CPU_QUIET
+	/* time-based average load */
+	u64 nr_last_stamp;
+	u64 nr_running_integral;
+	seqcount_t ave_seqcnt;
+#endif
+
 	/* capture load from *all* tasks on this cpu: */
 	struct load_weight load;
 	unsigned long nr_load_updates;
@@ -601,6 +730,7 @@
 #ifdef CONFIG_FAIR_GROUP_SCHED
 	/* list of leaf cfs_rq on this cpu: */
 	struct list_head leaf_cfs_rq_list;
+	struct list_head *tmp_alone_branch;
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
 	/*
@@ -634,6 +764,7 @@
 	/* For active balancing */
 	int active_balance;
 	int push_cpu;
+	struct task_struct *push_task;
 	struct cpu_stop_work active_balance_work;
 	/* cpu of this runqueue: */
 	int cpu;
@@ -650,6 +781,50 @@
 	u64 max_idle_balance_cost;
 #endif
 
+#ifdef CONFIG_SCHED_HMP
+	struct sched_cluster *cluster;
+	struct cpumask freq_domain_cpumask;
+	struct hmp_sched_stats hmp_stats;
+
+	int cstate, wakeup_latency, wakeup_energy;
+	u64 window_start;
+	unsigned long hmp_flags;
+
+	u64 cur_irqload;
+	u64 avg_irqload;
+	u64 irqload_ts;
+	unsigned int static_cpu_pwr_cost;
+	struct task_struct *ed_task;
+	struct cpu_cycle cc;
+	u64 old_busy_time, old_busy_time_group;
+	u64 old_estimated_time;
+	u64 curr_runnable_sum;
+	u64 prev_runnable_sum;
+	u64 nt_curr_runnable_sum;
+	u64 nt_prev_runnable_sum;
+	struct group_cpu_time grp_time;
+	struct load_subtractions load_subs[NUM_TRACKED_WINDOWS];
+	DECLARE_BITMAP_ARRAY(top_tasks_bitmap,
+			NUM_TRACKED_WINDOWS, NUM_LOAD_INDICES);
+	u8 *top_tasks[NUM_TRACKED_WINDOWS];
+	u8 curr_table;
+	int prev_top;
+	int curr_top;
+#endif
+
+#ifdef CONFIG_SCHED_WALT
+	u64 cumulative_runnable_avg;
+	u64 window_start;
+	u64 curr_runnable_sum;
+	u64 prev_runnable_sum;
+	u64 nt_curr_runnable_sum;
+	u64 nt_prev_runnable_sum;
+	u64 cur_irqload;
+	u64 avg_irqload;
+	u64 irqload_ts;
+	u64 cum_window_demand;
+#endif /* CONFIG_SCHED_WALT */
+
 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
 	u64 prev_irq_time;
 #endif
@@ -688,6 +863,9 @@
 	/* try_to_wake_up() stats */
 	unsigned int ttwu_count;
 	unsigned int ttwu_local;
+#ifdef CONFIG_SMP
+	struct eas_stats eas_stats;
+#endif
 #endif
 
 #ifdef CONFIG_SMP
@@ -697,6 +875,7 @@
 #ifdef CONFIG_CPU_IDLE
 	/* Must be inspected within a rcu lock section */
 	struct cpuidle_state *idle_state;
+	int idle_state_idx;
 #endif
 };
 
@@ -846,6 +1025,8 @@
 DECLARE_PER_CPU(struct sched_domain *, sd_numa);
 DECLARE_PER_CPU(struct sched_domain *, sd_busy);
 DECLARE_PER_CPU(struct sched_domain *, sd_asym);
+DECLARE_PER_CPU(struct sched_domain *, sd_ea);
+DECLARE_PER_CPU(struct sched_domain *, sd_scs);
 
 struct sched_group_capacity {
 	atomic_t ref;
@@ -853,7 +1034,9 @@
 	 * CPU capacity of this group, SCHED_LOAD_SCALE being max capacity
 	 * for a single CPU.
 	 */
-	unsigned int capacity;
+	unsigned long capacity;
+	unsigned long max_capacity; /* Max per-cpu capacity in group */
+	unsigned long min_capacity; /* Min per-CPU capacity in group */
 	unsigned long next_update;
 	int imbalance; /* XXX unrelated to capacity but shared group state */
 	/*
@@ -870,6 +1053,7 @@
 
 	unsigned int group_weight;
 	struct sched_group_capacity *sgc;
+	const struct sched_group_energy *sge;
 
 	/*
 	 * The CPUs this group covers.
@@ -915,6 +1099,648 @@
 #include "stats.h"
 #include "auto_group.h"
 
+enum sched_boost_policy {
+	SCHED_BOOST_NONE,
+	SCHED_BOOST_ON_BIG,
+	SCHED_BOOST_ON_ALL,
+};
+
+#ifdef CONFIG_SCHED_HMP
+
+#define WINDOW_STATS_RECENT		0
+#define WINDOW_STATS_MAX		1
+#define WINDOW_STATS_MAX_RECENT_AVG	2
+#define WINDOW_STATS_AVG		3
+#define WINDOW_STATS_INVALID_POLICY	4
+
+#define SCHED_UPMIGRATE_MIN_NICE 15
+#define EXITING_TASK_MARKER	0xdeaddead
+
+#define UP_MIGRATION		1
+#define DOWN_MIGRATION		2
+#define IRQLOAD_MIGRATION	3
+
+extern struct mutex policy_mutex;
+extern unsigned int sched_ravg_window;
+extern unsigned int sched_disable_window_stats;
+extern unsigned int max_possible_freq;
+extern unsigned int min_max_freq;
+extern unsigned int pct_task_load(struct task_struct *p);
+extern unsigned int max_possible_efficiency;
+extern unsigned int min_possible_efficiency;
+extern unsigned int max_capacity;
+extern unsigned int min_capacity;
+extern unsigned int max_load_scale_factor;
+extern unsigned int max_possible_capacity;
+extern unsigned int min_max_possible_capacity;
+extern unsigned int max_power_cost;
+extern unsigned int sched_init_task_load_windows;
+extern unsigned int up_down_migrate_scale_factor;
+extern unsigned int sysctl_sched_restrict_cluster_spill;
+extern unsigned int sched_pred_alert_load;
+extern struct sched_cluster init_cluster;
+extern unsigned int  __read_mostly sched_short_sleep_task_threshold;
+extern unsigned int  __read_mostly sched_long_cpu_selection_threshold;
+extern unsigned int  __read_mostly sched_big_waker_task_load;
+extern unsigned int  __read_mostly sched_small_wakee_task_load;
+extern unsigned int  __read_mostly sched_spill_load;
+extern unsigned int  __read_mostly sched_upmigrate;
+extern unsigned int  __read_mostly sched_downmigrate;
+extern unsigned int  __read_mostly sysctl_sched_spill_nr_run;
+extern unsigned int  __read_mostly sched_load_granule;
+
+extern void init_new_task_load(struct task_struct *p, bool idle_task);
+extern u64 sched_ktime_clock(void);
+extern int got_boost_kick(void);
+extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
+extern void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
+						u64 wallclock, u64 irqtime);
+extern bool early_detection_notify(struct rq *rq, u64 wallclock);
+extern void clear_ed_task(struct task_struct *p, struct rq *rq);
+extern void fixup_busy_time(struct task_struct *p, int new_cpu);
+extern void clear_boost_kick(int cpu);
+extern void clear_hmp_request(int cpu);
+extern void mark_task_starting(struct task_struct *p);
+extern void set_window_start(struct rq *rq);
+extern void update_cluster_topology(void);
+extern void note_task_waking(struct task_struct *p, u64 wallclock);
+extern void set_task_last_switch_out(struct task_struct *p, u64 wallclock);
+extern void init_clusters(void);
+extern void reset_cpu_hmp_stats(int cpu, int reset_cra);
+extern unsigned int max_task_load(void);
+extern void sched_account_irqtime(int cpu, struct task_struct *curr,
+				 u64 delta, u64 wallclock);
+extern void sched_account_irqstart(int cpu, struct task_struct *curr,
+				   u64 wallclock);
+extern unsigned int cpu_temp(int cpu);
+extern unsigned int nr_eligible_big_tasks(int cpu);
+extern int update_preferred_cluster(struct related_thread_group *grp,
+			struct task_struct *p, u32 old_load);
+extern void set_preferred_cluster(struct related_thread_group *grp);
+extern void add_new_task_to_grp(struct task_struct *new);
+extern unsigned int update_freq_aggregate_threshold(unsigned int threshold);
+extern void update_avg_burst(struct task_struct *p);
+extern void update_avg(u64 *avg, u64 sample);
+
+#define NO_BOOST 0
+#define FULL_THROTTLE_BOOST 1
+#define CONSERVATIVE_BOOST 2
+#define RESTRAINED_BOOST 3
+
+static inline struct sched_cluster *cpu_cluster(int cpu)
+{
+	return cpu_rq(cpu)->cluster;
+}
+
+static inline int cpu_capacity(int cpu)
+{
+	return cpu_rq(cpu)->cluster->capacity;
+}
+
+static inline int cpu_max_possible_capacity(int cpu)
+{
+	return cpu_rq(cpu)->cluster->max_possible_capacity;
+}
+
+static inline int cpu_load_scale_factor(int cpu)
+{
+	return cpu_rq(cpu)->cluster->load_scale_factor;
+}
+
+static inline int cpu_efficiency(int cpu)
+{
+	return cpu_rq(cpu)->cluster->efficiency;
+}
+
+static inline unsigned int cpu_cur_freq(int cpu)
+{
+	return cpu_rq(cpu)->cluster->cur_freq;
+}
+
+static inline unsigned int cpu_min_freq(int cpu)
+{
+	return cpu_rq(cpu)->cluster->min_freq;
+}
+
+static inline unsigned int cluster_max_freq(struct sched_cluster *cluster)
+{
+	/*
+	 * Governor and thermal driver don't know the other party's mitigation
+	 * voting. So struct cluster saves both and return min() for current
+	 * cluster fmax.
+	 */
+	return min(cluster->max_mitigated_freq, cluster->max_freq);
+}
+
+static inline unsigned int cpu_max_freq(int cpu)
+{
+	return cluster_max_freq(cpu_rq(cpu)->cluster);
+}
+
+static inline unsigned int cpu_max_possible_freq(int cpu)
+{
+	return cpu_rq(cpu)->cluster->max_possible_freq;
+}
+
+static inline int same_cluster(int src_cpu, int dst_cpu)
+{
+	return cpu_rq(src_cpu)->cluster == cpu_rq(dst_cpu)->cluster;
+}
+
+static inline int cpu_max_power_cost(int cpu)
+{
+	return cpu_rq(cpu)->cluster->max_power_cost;
+}
+
+static inline int cpu_min_power_cost(int cpu)
+{
+	return cpu_rq(cpu)->cluster->min_power_cost;
+}
+
+static inline u32 cpu_cycles_to_freq(u64 cycles, u64 period)
+{
+	return div64_u64(cycles, period);
+}
+
+static inline bool hmp_capable(void)
+{
+	return max_possible_capacity != min_max_possible_capacity;
+}
+
+static inline bool is_max_capacity_cpu(int cpu)
+{
+	return cpu_max_possible_capacity(cpu) == max_possible_capacity;
+}
+
+/*
+ * 'load' is in reference to "best cpu" at its best frequency.
+ * Scale that in reference to a given cpu, accounting for how bad it is
+ * in reference to "best cpu".
+ */
+static inline u64 scale_load_to_cpu(u64 task_load, int cpu)
+{
+	u64 lsf = cpu_load_scale_factor(cpu);
+
+	if (lsf != 1024) {
+		task_load *= lsf;
+		task_load /= 1024;
+	}
+
+	return task_load;
+}
+
+static inline unsigned int task_load(struct task_struct *p)
+{
+	return p->ravg.demand;
+}
+
+static inline void
+inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+				 struct task_struct *p)
+{
+	u32 task_load;
+
+	if (sched_disable_window_stats)
+		return;
+
+	task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
+
+	stats->cumulative_runnable_avg += task_load;
+	stats->pred_demands_sum += p->ravg.pred_demand;
+}
+
+static inline void
+dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+				struct task_struct *p)
+{
+	u32 task_load;
+
+	if (sched_disable_window_stats)
+		return;
+
+	task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
+
+	stats->cumulative_runnable_avg -= task_load;
+
+	BUG_ON((s64)stats->cumulative_runnable_avg < 0);
+
+	stats->pred_demands_sum -= p->ravg.pred_demand;
+	BUG_ON((s64)stats->pred_demands_sum < 0);
+}
+
+static inline void
+fixup_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+			      struct task_struct *p, s64 task_load_delta,
+			      s64 pred_demand_delta)
+{
+	if (sched_disable_window_stats)
+		return;
+
+	stats->cumulative_runnable_avg += task_load_delta;
+	BUG_ON((s64)stats->cumulative_runnable_avg < 0);
+
+	stats->pred_demands_sum += pred_demand_delta;
+	BUG_ON((s64)stats->pred_demands_sum < 0);
+}
+
+#define pct_to_real(tunable)	\
+		(div64_u64((u64)tunable * (u64)max_task_load(), 100))
+
+#define real_to_pct(tunable)	\
+		(div64_u64((u64)tunable * (u64)100, (u64)max_task_load()))
+
+#define SCHED_HIGH_IRQ_TIMEOUT 3
+static inline u64 sched_irqload(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+	s64 delta;
+
+	delta = get_jiffies_64() - rq->irqload_ts;
+	/*
+	 * Current context can be preempted by irq and rq->irqload_ts can be
+	 * updated by irq context so that delta can be negative.
+	 * But this is okay and we can safely return as this means there
+	 * was recent irq occurrence.
+	 */
+
+	if (delta < SCHED_HIGH_IRQ_TIMEOUT)
+		return rq->avg_irqload;
+	else
+		return 0;
+}
+
+static inline int sched_cpu_high_irqload(int cpu)
+{
+	return sched_irqload(cpu) >= sysctl_sched_cpu_high_irqload;
+}
+
+static inline bool task_in_related_thread_group(struct task_struct *p)
+{
+	return !!(rcu_access_pointer(p->grp) != NULL);
+}
+
+static inline
+struct related_thread_group *task_related_thread_group(struct task_struct *p)
+{
+	return rcu_dereference(p->grp);
+}
+
+#define PRED_DEMAND_DELTA ((s64)new_pred_demand - p->ravg.pred_demand)
+
+extern void
+check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups);
+
+extern void notify_migration(int src_cpu, int dest_cpu,
+			bool src_cpu_dead, struct task_struct *p);
+
+/* Is frequency of two cpus synchronized with each other? */
+static inline int same_freq_domain(int src_cpu, int dst_cpu)
+{
+	struct rq *rq = cpu_rq(src_cpu);
+
+	if (src_cpu == dst_cpu)
+		return 1;
+
+	return cpumask_test_cpu(dst_cpu, &rq->freq_domain_cpumask);
+}
+
+#define	BOOST_KICK	0
+#define	CPU_RESERVED	1
+
+static inline int is_reserved(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	return test_bit(CPU_RESERVED, &rq->hmp_flags);
+}
+
+static inline int mark_reserved(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	/* Name boost_flags as hmp_flags? */
+	return test_and_set_bit(CPU_RESERVED, &rq->hmp_flags);
+}
+
+static inline void clear_reserved(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	clear_bit(CPU_RESERVED, &rq->hmp_flags);
+}
+
+static inline u64 cpu_cravg_sync(int cpu, int sync)
+{
+	struct rq *rq = cpu_rq(cpu);
+	u64 load;
+
+	load = rq->hmp_stats.cumulative_runnable_avg;
+
+	/*
+	 * If load is being checked in a sync wakeup environment,
+	 * we may want to discount the load of the currently running
+	 * task.
+	 */
+	if (sync && cpu == smp_processor_id()) {
+		if (load > rq->curr->ravg.demand)
+			load -= rq->curr->ravg.demand;
+		else
+			load = 0;
+	}
+
+	return load;
+}
+
+static inline bool is_short_burst_task(struct task_struct *p)
+{
+	return p->ravg.avg_burst < sysctl_sched_short_burst &&
+	       p->ravg.avg_sleep_time > sysctl_sched_short_sleep;
+}
+
+extern void check_for_migration(struct rq *rq, struct task_struct *p);
+extern void pre_big_task_count_change(const struct cpumask *cpus);
+extern void post_big_task_count_change(const struct cpumask *cpus);
+extern void set_hmp_defaults(void);
+extern int power_delta_exceeded(unsigned int cpu_cost, unsigned int base_cost);
+extern unsigned int power_cost(int cpu, u64 demand);
+extern void reset_all_window_stats(u64 window_start, unsigned int window_size);
+extern int sched_boost(void);
+extern int task_load_will_fit(struct task_struct *p, u64 task_load, int cpu,
+					enum sched_boost_policy boost_policy);
+extern enum sched_boost_policy sched_boost_policy(void);
+extern int task_will_fit(struct task_struct *p, int cpu);
+extern u64 cpu_load(int cpu);
+extern u64 cpu_load_sync(int cpu, int sync);
+extern int preferred_cluster(struct sched_cluster *cluster,
+						struct task_struct *p);
+extern void inc_nr_big_task(struct hmp_sched_stats *stats,
+					struct task_struct *p);
+extern void dec_nr_big_task(struct hmp_sched_stats *stats,
+					struct task_struct *p);
+extern void inc_rq_hmp_stats(struct rq *rq,
+				struct task_struct *p, int change_cra);
+extern void dec_rq_hmp_stats(struct rq *rq,
+				struct task_struct *p, int change_cra);
+extern void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra);
+extern int is_big_task(struct task_struct *p);
+extern int upmigrate_discouraged(struct task_struct *p);
+extern struct sched_cluster *rq_cluster(struct rq *rq);
+extern int nr_big_tasks(struct rq *rq);
+extern void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
+					struct task_struct *p, s64 delta);
+extern void reset_task_stats(struct task_struct *p);
+extern void reset_cfs_rq_hmp_stats(int cpu, int reset_cra);
+extern void _inc_hmp_sched_stats_fair(struct rq *rq,
+			struct task_struct *p, int change_cra);
+extern u64 cpu_upmigrate_discourage_read_u64(struct cgroup_subsys_state *css,
+					struct cftype *cft);
+extern int cpu_upmigrate_discourage_write_u64(struct cgroup_subsys_state *css,
+				struct cftype *cft, u64 upmigrate_discourage);
+extern void sched_boost_parse_dt(void);
+extern void clear_top_tasks_bitmap(unsigned long *bitmap);
+
+#if defined(CONFIG_SCHED_TUNE) && defined(CONFIG_CGROUP_SCHEDTUNE)
+extern bool task_sched_boost(struct task_struct *p);
+extern int sync_cgroup_colocation(struct task_struct *p, bool insert);
+extern bool same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2);
+extern void update_cgroup_boost_settings(void);
+extern void restore_cgroup_boost_settings(void);
+
+#else
+static inline bool
+same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2)
+{
+	return true;
+}
+
+static inline bool task_sched_boost(struct task_struct *p)
+{
+	return true;
+}
+
+static inline void update_cgroup_boost_settings(void) { }
+static inline void restore_cgroup_boost_settings(void) { }
+#endif
+
+extern int alloc_related_thread_groups(void);
+
+#else	/* CONFIG_SCHED_HMP */
+
+struct hmp_sched_stats;
+struct related_thread_group;
+struct sched_cluster;
+
+static inline enum sched_boost_policy sched_boost_policy(void)
+{
+	return SCHED_BOOST_NONE;
+}
+
+static inline bool task_sched_boost(struct task_struct *p)
+{
+	return true;
+}
+
+static inline int got_boost_kick(void)
+{
+	return 0;
+}
+
+static inline void update_task_ravg(struct task_struct *p, struct rq *rq,
+				int event, u64 wallclock, u64 irqtime) { }
+
+static inline bool early_detection_notify(struct rq *rq, u64 wallclock)
+{
+	return 0;
+}
+
+static inline void clear_ed_task(struct task_struct *p, struct rq *rq) { }
+static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { }
+static inline void clear_boost_kick(int cpu) { }
+static inline void clear_hmp_request(int cpu) { }
+static inline void mark_task_starting(struct task_struct *p) { }
+static inline void set_window_start(struct rq *rq) { }
+static inline void init_clusters(void) {}
+static inline void update_cluster_topology(void) { }
+static inline void note_task_waking(struct task_struct *p, u64 wallclock) { }
+static inline void set_task_last_switch_out(struct task_struct *p,
+					    u64 wallclock) { }
+
+static inline int task_will_fit(struct task_struct *p, int cpu)
+{
+	return 1;
+}
+
+static inline int select_best_cpu(struct task_struct *p, int target,
+				  int reason, int sync)
+{
+	return 0;
+}
+
+static inline unsigned int power_cost(int cpu, u64 demand)
+{
+	return SCHED_CAPACITY_SCALE;
+}
+
+static inline int sched_boost(void)
+{
+	return 0;
+}
+
+static inline int is_big_task(struct task_struct *p)
+{
+	return 0;
+}
+
+static inline int nr_big_tasks(struct rq *rq)
+{
+	return 0;
+}
+
+static inline int is_cpu_throttling_imminent(int cpu)
+{
+	return 0;
+}
+
+static inline int is_task_migration_throttled(struct task_struct *p)
+{
+	return 0;
+}
+
+static inline unsigned int cpu_temp(int cpu)
+{
+	return 0;
+}
+
+static inline void
+inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { }
+
+static inline void
+dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { }
+
+static inline void
+inc_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p) { }
+
+static inline void
+dec_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p) { }
+
+static inline int
+preferred_cluster(struct sched_cluster *cluster, struct task_struct *p)
+{
+	return 1;
+}
+
+static inline struct sched_cluster *rq_cluster(struct rq *rq)
+{
+	return NULL;
+}
+
+static inline void init_new_task_load(struct task_struct *p, bool idle_task)
+{
+}
+
+static inline u64 scale_load_to_cpu(u64 load, int cpu)
+{
+	return load;
+}
+
+static inline unsigned int nr_eligible_big_tasks(int cpu)
+{
+	return 0;
+}
+
+static inline bool is_max_capacity_cpu(int cpu) { return true; }
+
+static inline int pct_task_load(struct task_struct *p) { return 0; }
+
+static inline int cpu_capacity(int cpu)
+{
+	return SCHED_LOAD_SCALE;
+}
+
+static inline int same_cluster(int src_cpu, int dst_cpu) { return 1; }
+
+static inline void inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+		 struct task_struct *p)
+{
+}
+
+static inline void dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+		 struct task_struct *p)
+{
+}
+
+static inline void sched_account_irqtime(int cpu, struct task_struct *curr,
+				 u64 delta, u64 wallclock)
+{
+}
+
+static inline void sched_account_irqstart(int cpu, struct task_struct *curr,
+					  u64 wallclock)
+{
+}
+
+static inline int sched_cpu_high_irqload(int cpu) { return 0; }
+
+static inline void set_preferred_cluster(struct related_thread_group *grp) { }
+
+static inline bool task_in_related_thread_group(struct task_struct *p)
+{
+	return false;
+}
+
+static inline
+struct related_thread_group *task_related_thread_group(struct task_struct *p)
+{
+	return NULL;
+}
+
+static inline u32 task_load(struct task_struct *p) { return 0; }
+
+static inline int update_preferred_cluster(struct related_thread_group *grp,
+			 struct task_struct *p, u32 old_load)
+{
+	return 0;
+}
+
+static inline void add_new_task_to_grp(struct task_struct *new) {}
+
+#define PRED_DEMAND_DELTA (0)
+
+static inline void
+check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups) { }
+
+static inline void notify_migration(int src_cpu, int dest_cpu,
+			bool src_cpu_dead, struct task_struct *p) { }
+
+static inline int same_freq_domain(int src_cpu, int dst_cpu)
+{
+	return 1;
+}
+
+static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
+static inline void pre_big_task_count_change(void) { }
+static inline void post_big_task_count_change(void) { }
+static inline void set_hmp_defaults(void) { }
+
+static inline void clear_reserved(int cpu) { }
+static inline void sched_boost_parse_dt(void) {}
+static inline int alloc_related_thread_groups(void) { return 0; }
+
+#define trace_sched_cpu_load(...)
+#define trace_sched_cpu_load_lb(...)
+#define trace_sched_cpu_load_cgroup(...)
+#define trace_sched_cpu_load_wakeup(...)
+
+static inline void update_avg_burst(struct task_struct *p) {}
+
+#endif	/* CONFIG_SCHED_HMP */
+
+/*
+ * Returns the rq capacity of any rq in a group. This does not play
+ * well with groups where rq capacity can change independently.
+ */
+#define group_rq_capacity(group) cpu_capacity(group_first_cpu(group))
+
 #ifdef CONFIG_CGROUP_SCHED
 
 /*
@@ -943,6 +1769,7 @@
 #endif
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
+	set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
 	p->se.cfs_rq = tg->cfs_rq[cpu];
 	p->se.parent = tg->se[cpu];
 #endif
@@ -960,7 +1787,6 @@
 {
 	return NULL;
 }
-
 #endif /* CONFIG_CGROUP_SCHED */
 
 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
@@ -973,7 +1799,11 @@
 	 * per-task data have been completed by this moment.
 	 */
 	smp_wmb();
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+	p->cpu = cpu;
+#else
 	task_thread_info(p)->cpu = cpu;
+#endif
 	p->wake_cpu = cpu;
 #endif
 }
@@ -1110,6 +1940,7 @@
 #define WF_SYNC		0x01		/* waker goes to sleep after wakeup */
 #define WF_FORK		0x02		/* child wakeup after fork */
 #define WF_MIGRATED	0x4		/* internal use, task got migrated */
+#define WF_NO_NOTIFIER	0x08		/* do not notify governor */
 
 /*
  * To aid in avoiding the subversion of "niceness" due to uneven distribution
@@ -1164,18 +1995,41 @@
  /*  15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
 };
 
+/*
+ * {de,en}queue flags:
+ *
+ * DEQUEUE_SLEEP  - task is no longer runnable
+ * ENQUEUE_WAKEUP - task just became runnable
+ *
+ * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
+ *                are in a known state which allows modification. Such pairs
+ *                should preserve as much state as possible.
+ *
+ * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
+ *        in the runqueue.
+ *
+ * ENQUEUE_HEAD      - place at front of runqueue (tail if not specified)
+ * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
+ * ENQUEUE_WAKING    - sched_class::task_waking was called
+ *
+ */
+
+#define DEQUEUE_SLEEP		0x01
+#define DEQUEUE_SAVE		0x02 /* matches ENQUEUE_RESTORE */
+#define DEQUEUE_MOVE		0x04 /* matches ENQUEUE_MOVE */
+
 #define ENQUEUE_WAKEUP		0x01
-#define ENQUEUE_HEAD		0x02
+#define ENQUEUE_RESTORE		0x02
+#define ENQUEUE_MOVE		0x04
+
+#define ENQUEUE_HEAD		0x08
+#define ENQUEUE_REPLENISH	0x10
 #ifdef CONFIG_SMP
-#define ENQUEUE_WAKING		0x04	/* sched_class::task_waking was called */
+#define ENQUEUE_WAKING		0x20
 #else
 #define ENQUEUE_WAKING		0x00
 #endif
-#define ENQUEUE_REPLENISH	0x08
-#define ENQUEUE_RESTORE	0x10
-
-#define DEQUEUE_SLEEP		0x01
-#define DEQUEUE_SAVE		0x02
+#define ENQUEUE_WAKEUP_NEW	0x40
 
 #define RETRY_TASK		((void *)-1UL)
 
@@ -1202,7 +2056,8 @@
 	void (*put_prev_task) (struct rq *rq, struct task_struct *p);
 
 #ifdef CONFIG_SMP
-	int  (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
+	int  (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags,
+			       int subling_count_hint);
 	void (*migrate_task_rq)(struct task_struct *p);
 
 	void (*task_waking) (struct task_struct *task);
@@ -1235,8 +2090,17 @@
 
 	void (*update_curr) (struct rq *rq);
 
+#define TASK_SET_GROUP  0
+#define TASK_MOVE_GROUP	1
+
 #ifdef CONFIG_FAIR_GROUP_SCHED
-	void (*task_move_group) (struct task_struct *p);
+	void (*task_change_group)(struct task_struct *p, int type);
+#endif
+#ifdef CONFIG_SCHED_HMP
+	void (*inc_hmp_sched_stats)(struct rq *rq, struct task_struct *p);
+	void (*dec_hmp_sched_stats)(struct rq *rq, struct task_struct *p);
+	void (*fixup_hmp_sched_stats)(struct rq *rq, struct task_struct *p,
+				      u32 new_task_load, u32 new_pred_demand);
 #endif
 };
 
@@ -1258,9 +2122,11 @@
 
 #ifdef CONFIG_SMP
 
+extern void init_max_cpu_capacity(struct max_cpu_capacity *mcc);
 extern void update_group_capacity(struct sched_domain *sd, int cpu);
 
 extern void trigger_load_balance(struct rq *rq);
+extern void nohz_balance_clear_nohz_mask(int cpu);
 
 extern void idle_enter_fair(struct rq *this_rq);
 extern void idle_exit_fair(struct rq *this_rq);
@@ -1286,6 +2152,17 @@
 	WARN_ON(!rcu_read_lock_held());
 	return rq->idle_state;
 }
+
+static inline void idle_set_state_idx(struct rq *rq, int idle_state_idx)
+{
+	rq->idle_state_idx = idle_state_idx;
+}
+
+static inline int idle_get_state_idx(struct rq *rq)
+{
+	WARN_ON(!rcu_read_lock_held());
+	return rq->idle_state_idx;
+}
 #else
 static inline void idle_set_state(struct rq *rq,
 				  struct cpuidle_state *idle_state)
@@ -1296,9 +2173,20 @@
 {
 	return NULL;
 }
+
+static inline void idle_set_state_idx(struct rq *rq, int idle_state_idx)
+{
+}
+
+static inline int idle_get_state_idx(struct rq *rq)
+{
+	return -1;
+}
 #endif
 
+#ifdef CONFIG_SYSRQ_SCHED_DEBUG
 extern void sysrq_sched_debug_show(void);
+#endif
 extern void sched_init_granularity(void);
 extern void update_max_interval(void);
 
@@ -1311,6 +2199,7 @@
 
 extern struct rt_bandwidth def_rt_bandwidth;
 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
+extern void init_rt_schedtune_timer(struct sched_rt_entity *rt_se);
 
 extern struct dl_bandwidth def_dl_bandwidth;
 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
@@ -1319,11 +2208,13 @@
 unsigned long to_ratio(u64 period, u64 runtime);
 
 extern void init_entity_runnable_average(struct sched_entity *se);
+extern void post_init_entity_util_avg(struct sched_entity *se);
 
-static inline void add_nr_running(struct rq *rq, unsigned count)
+static inline void __add_nr_running(struct rq *rq, unsigned count)
 {
 	unsigned prev_nr = rq->nr_running;
 
+	sched_update_nr_prod(cpu_of(rq), count, true);
 	rq->nr_running = prev_nr + count;
 
 	if (prev_nr < 2 && rq->nr_running >= 2) {
@@ -1348,11 +2239,49 @@
 	}
 }
 
-static inline void sub_nr_running(struct rq *rq, unsigned count)
+static inline void __sub_nr_running(struct rq *rq, unsigned count)
 {
+	sched_update_nr_prod(cpu_of(rq), count, false);
 	rq->nr_running -= count;
 }
 
+#ifdef CONFIG_CPU_QUIET
+#define NR_AVE_SCALE(x)		((x) << FSHIFT)
+static inline u64 do_nr_running_integral(struct rq *rq)
+{
+	s64 nr, deltax;
+	u64 nr_running_integral = rq->nr_running_integral;
+
+	deltax = rq->clock_task - rq->nr_last_stamp;
+	nr = NR_AVE_SCALE(rq->nr_running);
+
+	nr_running_integral += nr * deltax;
+
+	return nr_running_integral;
+}
+
+static inline void add_nr_running(struct rq *rq, unsigned count)
+{
+	write_seqcount_begin(&rq->ave_seqcnt);
+	rq->nr_running_integral = do_nr_running_integral(rq);
+	rq->nr_last_stamp = rq->clock_task;
+	__add_nr_running(rq, count);
+	write_seqcount_end(&rq->ave_seqcnt);
+}
+
+static inline void sub_nr_running(struct rq *rq, unsigned count)
+{
+	write_seqcount_begin(&rq->ave_seqcnt);
+	rq->nr_running_integral = do_nr_running_integral(rq);
+	rq->nr_last_stamp = rq->clock_task;
+	__sub_nr_running(rq, count);
+	write_seqcount_end(&rq->ave_seqcnt);
+}
+#else
+#define add_nr_running __add_nr_running
+#define sub_nr_running __sub_nr_running
+#endif
+
 static inline void rq_last_tick_reset(struct rq *rq)
 {
 #ifdef CONFIG_NO_HZ_FULL
@@ -1425,10 +2354,102 @@
 }
 #endif
 
+#ifdef CONFIG_SMP
+static inline unsigned long capacity_of(int cpu)
+{
+	return cpu_rq(cpu)->cpu_capacity;
+}
+
+static inline unsigned long capacity_orig_of(int cpu)
+{
+	return cpu_rq(cpu)->cpu_capacity_orig;
+}
+
+extern unsigned int sysctl_sched_use_walt_cpu_util;
+extern unsigned int walt_ravg_window;
+extern bool walt_disabled;
+
+/*
+ * cpu_util returns the amount of capacity of a CPU that is used by CFS
+ * tasks. The unit of the return value must be the one of capacity so we can
+ * compare the utilization with the capacity of the CPU that is available for
+ * CFS task (ie cpu_capacity).
+ *
+ * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
+ * recent utilization of currently non-runnable tasks on a CPU. It represents
+ * the amount of utilization of a CPU in the range [0..capacity_orig] where
+ * capacity_orig is the cpu_capacity available at the highest frequency
+ * (arch_scale_freq_capacity()).
+ * The utilization of a CPU converges towards a sum equal to or less than the
+ * current capacity (capacity_curr <= capacity_orig) of the CPU because it is
+ * the running time on this CPU scaled by capacity_curr.
+ *
+ * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even
+ * higher than capacity_orig because of unfortunate rounding in
+ * cfs.avg.util_avg or just after migrating tasks and new task wakeups until
+ * the average stabilizes with the new running time. We need to check that the
+ * utilization stays within the range of [0..capacity_orig] and cap it if
+ * necessary. Without utilization capping, a group could be seen as overloaded
+ * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of
+ * available capacity. We allow utilization to overshoot capacity_curr (but not
+ * capacity_orig) as it useful for predicting the capacity required after task
+ * migrations (scheduler-driven DVFS).
+ */
+static inline unsigned long __cpu_util(int cpu, int delta)
+{
+	unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
+	unsigned long capacity = capacity_orig_of(cpu);
+
+#ifdef CONFIG_SCHED_WALT
+	if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
+		util = div64_u64(cpu_rq(cpu)->cumulative_runnable_avg,
+				 walt_ravg_window >> SCHED_LOAD_SHIFT);
+#endif
+
+	delta += util;
+	if (delta < 0)
+		return 0;
+
+	return (delta >= capacity) ? capacity : delta;
+}
+
+static inline unsigned long cpu_util(int cpu)
+{
+	return __cpu_util(cpu, 0);
+}
+
+static inline unsigned long cpu_util_freq(int cpu)
+{
+	unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
+	unsigned long capacity = capacity_orig_of(cpu);
+
+#ifdef CONFIG_SCHED_WALT
+	if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
+		util = div64_u64(cpu_rq(cpu)->prev_runnable_sum,
+				 walt_ravg_window >> SCHED_LOAD_SHIFT);
+#endif
+	return (util >= capacity) ? capacity : util;
+}
+
+#endif
+
+#ifdef CONFIG_SCHED_HMP
+/*
+ * HMP and EAS are orthogonal. Hopefully the compiler just elides out all code
+ * with the energy_aware() check, so that we don't even pay the comparison
+ * penalty at runtime.
+ */
+#define energy_aware() false
+#else
+static inline bool energy_aware(void)
+{
+	return sched_feat(ENERGY_AWARE);
+}
+#endif
+
 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
 {
 	rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));
-	sched_avg_update(rq);
 }
 #else
 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
@@ -1517,6 +2538,9 @@
 	raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
 }
 
+extern struct rq *lock_rq_of(struct task_struct *p, unsigned long *flags);
+extern void unlock_rq_of(struct rq *rq, struct task_struct *p, unsigned long *flags);
+
 #ifdef CONFIG_SMP
 #ifdef CONFIG_PREEMPT
 
@@ -1589,6 +2613,7 @@
 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
 	__releases(busiest->lock)
 {
+	if (this_rq != busiest)
 	raw_spin_unlock(&busiest->lock);
 	lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
 }
@@ -1662,6 +2687,11 @@
 		__release(rq2->lock);
 }
 
+/*
+ * task_may_not_preempt - check whether a task may not be preemptible soon
+ */
+extern bool task_may_not_preempt(struct task_struct *task, int cpu);
+
 #else /* CONFIG_SMP */
 
 /*
@@ -1729,6 +2759,9 @@
 	NOHZ_BALANCE_KICK,
 };
 
+#define NOHZ_KICK_ANY 0
+#define NOHZ_KICK_RESTRICT 1
+
 #define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
 #endif
 
@@ -1780,3 +2813,66 @@
 }
 #endif /* CONFIG_64BIT */
 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
+
+#ifdef CONFIG_CPU_FREQ
+DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
+
+/**
+ * cpufreq_update_util - Take a note about CPU utilization changes.
+ * @rq: Runqueue to carry out the update for.
+ * @flags: Update reason flags.
+ *
+ * This function is called by the scheduler on the CPU whose utilization is
+ * being updated.
+ *
+ * It can only be called from RCU-sched read-side critical sections.
+ *
+ * The way cpufreq is currently arranged requires it to evaluate the CPU
+ * performance state (frequency/voltage) on a regular basis to prevent it from
+ * being stuck in a completely inadequate performance level for too long.
+ * That is not guaranteed to happen if the updates are only triggered from CFS,
+ * though, because they may not be coming in if RT or deadline tasks are active
+ * all the time (or there are RT and DL tasks only).
+ *
+ * As a workaround for that issue, this function is called by the RT and DL
+ * sched classes to trigger extra cpufreq updates to prevent it from stalling,
+ * but that really is a band-aid.  Going forward it should be replaced with
+ * solutions targeted more specifically at RT and DL tasks.
+ */
+static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
+{
+        struct update_util_data *data;
+
+        data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
+        if (data)
+                data->func(data, rq_clock(rq), flags);
+}
+
+static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags)
+{
+        if (cpu_of(rq) == smp_processor_id())
+                cpufreq_update_util(rq, flags);
+}
+#else
+static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
+static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) {}
+#endif /* CONFIG_CPU_FREQ */
+
+#ifdef CONFIG_SCHED_WALT
+
+static inline bool
+walt_task_in_cum_window_demand(struct rq *rq, struct task_struct *p)
+{
+	return cpu_of(rq) == task_cpu(p) &&
+	       (p->on_rq || p->last_sleep_ts >= rq->window_start);
+}
+
+#endif /* CONFIG_SCHED_WALT */
+
+#ifdef arch_scale_freq_capacity
+#ifndef arch_scale_freq_invariant
+#define arch_scale_freq_invariant()     (true)
+#endif
+#else /* arch_scale_freq_capacity */
+#define arch_scale_freq_invariant()     (false)
+#endif
diff -ruw linux-4.4.115/kernel/sched/stats.c linux-4.4.115-fbx/kernel/sched/stats.c
--- linux-4.4.115/kernel/sched/stats.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/sched/stats.c	2019-01-22 16:16:28.703293496 +0100
@@ -12,6 +12,28 @@
  */
 #define SCHEDSTAT_VERSION 15
 
+#ifdef CONFIG_SMP
+static inline void show_easstat(struct seq_file *seq, struct eas_stats *stats)
+{
+	/* eas-specific runqueue stats */
+	seq_printf(seq, "eas %llu %llu %llu %llu %llu %llu ",
+	    stats->sis_attempts, stats->sis_idle, stats->sis_cache_affine,
+	    stats->sis_suff_cap, stats->sis_idle_cpu, stats->sis_count);
+
+	seq_printf(seq, "%llu %llu %llu %llu %llu %llu %llu ",
+	    stats->secb_attempts, stats->secb_sync, stats->secb_idle_bt,
+	    stats->secb_insuff_cap, stats->secb_no_nrg_sav,
+	    stats->secb_nrg_sav, stats->secb_count);
+
+	seq_printf(seq, "%llu %llu %llu %llu %llu ",
+	    stats->fbt_attempts, stats->fbt_no_cpu, stats->fbt_no_sd,
+	    stats->fbt_pref_idle, stats->fbt_count);
+
+	seq_printf(seq, "%llu %llu\n",
+	    stats->cas_attempts, stats->cas_count);
+}
+#endif
+
 static int show_schedstat(struct seq_file *seq, void *v)
 {
 	int cpu;
@@ -40,6 +62,8 @@
 		seq_printf(seq, "\n");
 
 #ifdef CONFIG_SMP
+		show_easstat(seq, &rq->eas_stats);
+
 		/* domain-specific stats */
 		rcu_read_lock();
 		for_each_domain(cpu, sd) {
@@ -66,6 +90,8 @@
 			    sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
 			    sd->ttwu_wake_remote, sd->ttwu_move_affine,
 			    sd->ttwu_move_balance);
+
+			show_easstat(seq, &sd->eas_stats);
 		}
 		rcu_read_unlock();
 #endif
diff -ruw linux-4.4.115/kernel/sched/stop_task.c linux-4.4.115-fbx/kernel/sched/stop_task.c
--- linux-4.4.115/kernel/sched/stop_task.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/sched/stop_task.c	2019-01-22 16:16:28.707293532 +0100
@@ -11,12 +11,48 @@
 
 #ifdef CONFIG_SMP
 static int
-select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags)
+select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags,
+		    int sibling_count_hint)
 {
 	return task_cpu(p); /* stop tasks as never migrate */
 }
 #endif /* CONFIG_SMP */
 
+#ifdef CONFIG_SCHED_HMP
+
+static void
+inc_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p)
+{
+	inc_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+static void
+dec_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p)
+{
+	dec_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+static void
+fixup_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p,
+			   u32 new_task_load, u32 new_pred_demand)
+{
+	s64 task_load_delta = (s64)new_task_load - task_load(p);
+	s64 pred_demand_delta = PRED_DEMAND_DELTA;
+
+	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
+				      pred_demand_delta);
+}
+
+#else	/* CONFIG_SCHED_HMP */
+
+static inline void
+inc_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p) { }
+
+static inline void
+dec_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p) { }
+
+#endif	/* CONFIG_SCHED_HMP */
+
 static void
 check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
 {
@@ -42,12 +78,14 @@
 enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
 {
 	add_nr_running(rq, 1);
+	inc_hmp_sched_stats_stop(rq, p);
 }
 
 static void
 dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
 {
 	sub_nr_running(rq, 1);
+	dec_hmp_sched_stats_stop(rq, p);
 }
 
 static void yield_task_stop(struct rq *rq)
@@ -134,4 +172,9 @@
 	.prio_changed		= prio_changed_stop,
 	.switched_to		= switched_to_stop,
 	.update_curr		= update_curr_stop,
+#ifdef CONFIG_SCHED_HMP
+	.inc_hmp_sched_stats	= inc_hmp_sched_stats_stop,
+	.dec_hmp_sched_stats	= dec_hmp_sched_stats_stop,
+	.fixup_hmp_sched_stats	= fixup_hmp_sched_stats_stop,
+#endif
 };
diff -ruw linux-4.4.115/kernel/smpboot.c linux-4.4.115-fbx/kernel/smpboot.c
--- linux-4.4.115/kernel/smpboot.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/smpboot.c	2019-10-29 09:26:25.633222652 +0100
@@ -13,6 +13,7 @@
 #include <linux/percpu.h>
 #include <linux/kthread.h>
 #include <linux/smpboot.h>
+#include <linux/kmemleak.h>
 
 #include "smpboot.h"
 
@@ -31,7 +32,7 @@
 
 	if (!tsk)
 		return ERR_PTR(-ENOMEM);
-	init_idle(tsk, cpu);
+	init_idle(tsk, cpu, true);
 	return tsk;
 }
 
@@ -177,6 +178,8 @@
 	td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu));
 	if (!td)
 		return -ENOMEM;
+
+	kmemleak_not_leak(td);
 	td->cpu = cpu;
 	td->ht = ht;
 
diff -ruw linux-4.4.115/kernel/smp.c linux-4.4.115-fbx/kernel/smp.c
--- linux-4.4.115/kernel/smp.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/smp.c	2019-01-22 16:16:28.711293568 +0100
@@ -32,6 +32,9 @@
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
 
 static void flush_smp_call_function_queue(bool warn_cpu_offline);
+/* CPU mask indicating which CPUs to bring online during smp_init() */
+static bool have_boot_cpu_mask;
+static cpumask_var_t boot_cpu_mask;
 
 static int
 hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
@@ -548,6 +551,19 @@
 
 early_param("maxcpus", maxcpus);
 
+static int __init boot_cpus(char *str)
+{
+	alloc_bootmem_cpumask_var(&boot_cpu_mask);
+	if (cpulist_parse(str, boot_cpu_mask) < 0) {
+		pr_warn("SMP: Incorrect boot_cpus cpumask\n");
+		return -EINVAL;
+	}
+	have_boot_cpu_mask = true;
+	return 0;
+}
+
+early_param("boot_cpus", boot_cpus);
+
 /* Setup number of possible processor ids */
 int nr_cpu_ids __read_mostly = NR_CPUS;
 EXPORT_SYMBOL(nr_cpu_ids);
@@ -563,6 +579,21 @@
 	printk(KERN_INFO "Brought up %d CPUs\n", num_online_cpus());
 }
 
+/* Should the given CPU be booted during smp_init() ? */
+static inline bool boot_cpu(int cpu)
+{
+	if (!have_boot_cpu_mask)
+		return true;
+
+	return cpumask_test_cpu(cpu, boot_cpu_mask);
+}
+
+static inline void free_boot_cpu_mask(void)
+{
+	if (have_boot_cpu_mask)	/* Allocated from boot_cpus() */
+		free_bootmem_cpumask_var(boot_cpu_mask);
+}
+
 /* Called by boot processor to activate the rest. */
 void __init smp_init(void)
 {
@@ -574,10 +605,12 @@
 	for_each_present_cpu(cpu) {
 		if (num_online_cpus() >= setup_max_cpus)
 			break;
-		if (!cpu_online(cpu))
+		if (!cpu_online(cpu) && boot_cpu(cpu))
 			cpu_up(cpu);
 	}
 
+	free_boot_cpu_mask();
+
 	/* Any cleanup work */
 	smp_announce();
 	smp_cpus_done(setup_max_cpus);
@@ -733,7 +766,7 @@
 	for_each_online_cpu(cpu) {
 		if (cpu == smp_processor_id())
 			continue;
-
+		if (!cpu_isolated(cpu))
 		wake_up_if_idle(cpu);
 	}
 	preempt_enable();
diff -ruw linux-4.4.115/kernel/softirq.c linux-4.4.115-fbx/kernel/softirq.c
--- linux-4.4.115/kernel/softirq.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/softirq.c	2019-10-29 09:26:25.633222652 +0100
@@ -57,6 +57,13 @@
 
 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
 
+/*
+ * active_softirqs -- per cpu, a mask of softirqs that are being handled,
+ * with the expectation that approximate answers are acceptable and therefore
+ * no synchronization.
+ */
+DEFINE_PER_CPU(__u32, active_softirqs);
+
 const char * const softirq_to_name[NR_SOFTIRQS] = {
 	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
 	"TASKLET", "SCHED", "HRTIMER", "RCU"
@@ -227,7 +234,9 @@
 static inline void lockdep_softirq_end(bool in_hardirq) { }
 #endif
 
-asmlinkage __visible void __do_softirq(void)
+#define long_softirq_pending()	(local_softirq_pending() & LONG_SOFTIRQ_MASK)
+#define defer_for_rt()		(long_softirq_pending() && cpupri_check_rt())
+asmlinkage __visible void __softirq_entry __do_softirq(void)
 {
 	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
 	unsigned long old_flags = current->flags;
@@ -253,6 +262,7 @@
 restart:
 	/* Reset the pending bitmask before enabling irqs */
 	set_softirq_pending(0);
+	__this_cpu_write(active_softirqs, pending);
 
 	local_irq_enable();
 
@@ -282,12 +292,14 @@
 		pending >>= softirq_bit;
 	}
 
+	__this_cpu_write(active_softirqs, 0);
 	rcu_bh_qs();
 	local_irq_disable();
 
 	pending = local_softirq_pending();
 	if (pending) {
 		if (time_before(jiffies, end) && !need_resched() &&
+		    !defer_for_rt() &&
 		    --max_restart)
 			goto restart;
 
@@ -340,7 +352,7 @@
 
 static inline void invoke_softirq(void)
 {
-	if (!force_irqthreads) {
+	if (!force_irqthreads && !defer_for_rt()) {
 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
 		/*
 		 * We can safely execute softirq on the current stack if
diff -ruw linux-4.4.115/kernel/sys.c linux-4.4.115-fbx/kernel/sys.c
--- linux-4.4.115/kernel/sys.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/sys.c	2019-10-29 09:26:25.633222652 +0100
@@ -10,6 +10,7 @@
 #include <linux/mman.h>
 #include <linux/reboot.h>
 #include <linux/prctl.h>
+#include <linux/prctl-private.h>
 #include <linux/highuid.h>
 #include <linux/fs.h>
 #include <linux/kmod.h>
@@ -41,6 +42,8 @@
 #include <linux/syscore_ops.h>
 #include <linux/version.h>
 #include <linux/ctype.h>
+#include <linux/mm.h>
+#include <linux/mempolicy.h>
 
 #include <linux/compat.h>
 #include <linux/syscalls.h>
@@ -2072,10 +2075,158 @@
 }
 #endif
 
+#ifdef CONFIG_MMU
+static int prctl_update_vma_anon_name(struct vm_area_struct *vma,
+		struct vm_area_struct **prev,
+		unsigned long start, unsigned long end,
+		const char __user *name_addr)
+{
+	struct mm_struct *mm = vma->vm_mm;
+	int error = 0;
+	pgoff_t pgoff;
+
+	if (name_addr == vma_get_anon_name(vma)) {
+		*prev = vma;
+		goto out;
+	}
+
+	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
+	*prev = vma_merge(mm, *prev, start, end, vma->vm_flags, vma->anon_vma,
+				vma->vm_file, pgoff, vma_policy(vma),
+				vma->vm_userfaultfd_ctx, name_addr);
+	if (*prev) {
+		vma = *prev;
+		goto success;
+	}
+
+	*prev = vma;
+
+	if (start != vma->vm_start) {
+		error = split_vma(mm, vma, start, 1);
+		if (error)
+			goto out;
+	}
+
+	if (end != vma->vm_end) {
+		error = split_vma(mm, vma, end, 0);
+		if (error)
+			goto out;
+	}
+
+success:
+	if (!vma->vm_file)
+		vma->anon_name = name_addr;
+
+out:
+	if (error == -ENOMEM)
+		error = -EAGAIN;
+	return error;
+}
+
+static int prctl_set_vma_anon_name(unsigned long start, unsigned long end,
+			unsigned long arg)
+{
+	unsigned long tmp;
+	struct vm_area_struct *vma, *prev;
+	int unmapped_error = 0;
+	int error = -EINVAL;
+
+	/*
+	 * If the interval [start,end) covers some unmapped address
+	 * ranges, just ignore them, but return -ENOMEM at the end.
+	 * - this matches the handling in madvise.
+	 */
+	vma = find_vma_prev(current->mm, start, &prev);
+	if (vma && start > vma->vm_start)
+		prev = vma;
+
+	for (;;) {
+		/* Still start < end. */
+		error = -ENOMEM;
+		if (!vma)
+			return error;
+
+		/* Here start < (end|vma->vm_end). */
+		if (start < vma->vm_start) {
+			unmapped_error = -ENOMEM;
+			start = vma->vm_start;
+			if (start >= end)
+				return error;
+		}
+
+		/* Here vma->vm_start <= start < (end|vma->vm_end) */
+		tmp = vma->vm_end;
+		if (end < tmp)
+			tmp = end;
+
+		/* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
+		error = prctl_update_vma_anon_name(vma, &prev, start, tmp,
+				(const char __user *)arg);
+		if (error)
+			return error;
+		start = tmp;
+		if (prev && start < prev->vm_end)
+			start = prev->vm_end;
+		error = unmapped_error;
+		if (start >= end)
+			return error;
+		if (prev)
+			vma = prev->vm_next;
+		else	/* madvise_remove dropped mmap_sem */
+			vma = find_vma(current->mm, start);
+	}
+}
+
+static int prctl_set_vma(unsigned long opt, unsigned long start,
+		unsigned long len_in, unsigned long arg)
+{
+	struct mm_struct *mm = current->mm;
+	int error;
+	unsigned long len;
+	unsigned long end;
+
+	if (start & ~PAGE_MASK)
+		return -EINVAL;
+	len = (len_in + ~PAGE_MASK) & PAGE_MASK;
+
+	/* Check to see whether len was rounded up from small -ve to zero */
+	if (len_in && !len)
+		return -EINVAL;
+
+	end = start + len;
+	if (end < start)
+		return -EINVAL;
+
+	if (end == start)
+		return 0;
+
+	down_write(&mm->mmap_sem);
+
+	switch (opt) {
+	case PR_SET_VMA_ANON_NAME:
+		error = prctl_set_vma_anon_name(start, end, arg);
+		break;
+	default:
+		error = -EINVAL;
+	}
+
+	up_write(&mm->mmap_sem);
+
+	return error;
+}
+#else /* CONFIG_MMU */
+static int prctl_set_vma(unsigned long opt, unsigned long start,
+		unsigned long len_in, unsigned long arg)
+{
+	return -EINVAL;
+}
+#endif
+
 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
 		unsigned long, arg4, unsigned long, arg5)
 {
 	struct task_struct *me = current;
+	struct task_struct *tsk;
 	unsigned char comm[sizeof(me->comm)];
 	long error;
 
@@ -2169,6 +2320,9 @@
 		error = perf_event_task_enable();
 		break;
 	case PR_GET_TIMERSLACK:
+		if (current->timer_slack_ns > ULONG_MAX)
+			error = ULONG_MAX;
+		else
 		error = current->timer_slack_ns;
 		break;
 	case PR_SET_TIMERSLACK:
@@ -2218,6 +2372,26 @@
 	case PR_GET_TID_ADDRESS:
 		error = prctl_get_tid_address(me, (int __user **)arg2);
 		break;
+	case PR_SET_TIMERSLACK_PID:
+		if (task_pid_vnr(current) != (pid_t)arg3 &&
+				!capable(CAP_SYS_NICE))
+			return -EPERM;
+		rcu_read_lock();
+		tsk = find_task_by_vpid((pid_t)arg3);
+		if (tsk == NULL) {
+			rcu_read_unlock();
+			return -EINVAL;
+		}
+		get_task_struct(tsk);
+		rcu_read_unlock();
+		if (arg2 <= 0)
+			tsk->timer_slack_ns =
+				tsk->default_timer_slack_ns;
+		else
+			tsk->timer_slack_ns = arg2;
+		put_task_struct(tsk);
+		error = 0;
+		break;
 	case PR_SET_CHILD_SUBREAPER:
 		me->signal->is_child_subreaper = !!arg2;
 		break;
@@ -2266,6 +2440,21 @@
 	case PR_GET_FP_MODE:
 		error = GET_FP_MODE(me);
 		break;
+	case PR_SET_VMA:
+		error = prctl_set_vma(arg2, arg3, arg4, arg5);
+		break;
+	case PR_SET_EXEC_MODE:
+		if (arg2 != EXEC_MODE_UNLIMITED &&
+		    arg2 != EXEC_MODE_ONCE &&
+		    arg2 != EXEC_MODE_DENIED)
+			return -EINVAL;
+
+		if (arg2 > current->exec_mode)
+			return -EPERM;
+		current->exec_mode = arg2;
+		return 0;
+	case PR_GET_EXEC_MODE:
+		return current->exec_mode;
 	default:
 		error = -EINVAL;
 		break;
diff -ruw linux-4.4.115/kernel/sysctl_binary.c linux-4.4.115-fbx/kernel/sysctl_binary.c
--- linux-4.4.115/kernel/sysctl_binary.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/sysctl_binary.c	2019-01-22 16:16:28.715293604 +0100
@@ -138,6 +138,8 @@
 	{ CTL_INT,	KERN_MAX_LOCK_DEPTH,		"max_lock_depth" },
 	{ CTL_INT,	KERN_PANIC_ON_NMI,		"panic_on_unrecovered_nmi" },
 	{ CTL_INT,	KERN_PANIC_ON_WARN,		"panic_on_warn" },
+	{ CTL_INT,	KERN_BOOT_REASON,		"boot_reason" },
+	{ CTL_INT,	KERN_COLD_BOOT,			"cold_boot" },
 	{}
 };
 
@@ -253,6 +255,7 @@
 	{ CTL_INT,	NET_IPV4_CONF_NOPOLICY,			"disable_policy" },
 	{ CTL_INT,	NET_IPV4_CONF_FORCE_IGMP_VERSION,	"force_igmp_version" },
 	{ CTL_INT,	NET_IPV4_CONF_PROMOTE_SECONDARIES,	"promote_secondaries" },
+	{ CTL_INT, NET_IPV4_CONF_NF_IPV4_DEFRAG_SKIP, "nf_ipv4_defrag_skip" },
 	{}
 };
 
@@ -523,6 +526,7 @@
 	{ CTL_INT,	NET_IPV6_PROXY_NDP,			"proxy_ndp" },
 	{ CTL_INT,	NET_IPV6_ACCEPT_SOURCE_ROUTE,		"accept_source_route" },
 	{ CTL_INT,	NET_IPV6_ACCEPT_RA_FROM_LOCAL,		"accept_ra_from_local" },
+	{ CTL_INT,	NET_IPV6_ACCEPT_RA_PREFIX_ROUTE,	"accept_ra_prefix_route" },
 	{}
 };
 
diff -ruw linux-4.4.115/kernel/sysctl.c linux-4.4.115-fbx/kernel/sysctl.c
--- linux-4.4.115/kernel/sysctl.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/sysctl.c	2019-10-29 09:26:25.633222652 +0100
@@ -105,6 +105,7 @@
 extern unsigned int core_pipe_limit;
 #endif
 extern int pid_max;
+extern int extra_free_kbytes;
 extern int pid_max_min, pid_max_max;
 extern int percpu_pagelist_fraction;
 extern int compat_log;
@@ -124,12 +125,17 @@
 static int zero;
 static int __maybe_unused one = 1;
 static int __maybe_unused two = 2;
+static int __maybe_unused three = 3;
 static int __maybe_unused four = 4;
 static unsigned long one_ul = 1;
 static int one_hundred = 100;
 #ifdef CONFIG_PRINTK
 static int ten_thousand = 10000;
 #endif
+#ifdef CONFIG_SCHED_HMP
+static int one_thousand = 1000;
+static int max_freq_reporting_policy = FREQ_REPORT_INVALID_POLICY - 1;
+#endif
 
 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
 static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
@@ -285,6 +291,217 @@
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec,
 	},
+#ifdef CONFIG_SCHED_HMP
+	{
+		.procname	= "sched_freq_reporting_policy",
+		.data		= &sysctl_sched_freq_reporting_policy,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &max_freq_reporting_policy,
+	},
+	{
+		.procname	= "sched_freq_inc_notify",
+		.data		= &sysctl_sched_freq_inc_notify,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+	},
+	{
+		.procname	= "sched_freq_dec_notify",
+		.data		= &sysctl_sched_freq_dec_notify,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+	},
+	{
+		.procname       = "sched_cpu_high_irqload",
+		.data           = &sysctl_sched_cpu_high_irqload,
+		.maxlen         = sizeof(unsigned int),
+		.mode           = 0644,
+		.proc_handler   = proc_dointvec,
+	},
+	{
+		.procname       = "sched_ravg_hist_size",
+		.data           = &sysctl_sched_ravg_hist_size,
+		.maxlen         = sizeof(unsigned int),
+		.mode           = 0644,
+		.proc_handler   = sched_window_update_handler,
+	},
+	{
+		.procname       = "sched_window_stats_policy",
+		.data           = &sysctl_sched_window_stats_policy,
+		.maxlen         = sizeof(unsigned int),
+		.mode           = 0644,
+		.proc_handler   = sched_window_update_handler,
+	},
+	{
+		.procname	= "sched_spill_load",
+		.data		= &sysctl_sched_spill_load_pct,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= sched_hmp_proc_update_handler,
+		.extra1		= &zero,
+		.extra2		= &one_hundred,
+	},
+	{
+		.procname	= "sched_spill_nr_run",
+		.data		= &sysctl_sched_spill_nr_run,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+	},
+	{
+		.procname	= "sched_upmigrate",
+		.data		= &sysctl_sched_upmigrate_pct,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= sched_hmp_proc_update_handler,
+		.extra1		= &zero,
+		.extra2		= &one_hundred,
+	},
+	{
+		.procname	= "sched_downmigrate",
+		.data		= &sysctl_sched_downmigrate_pct,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= sched_hmp_proc_update_handler,
+		.extra1		= &zero,
+		.extra2		= &one_hundred,
+	},
+	{
+		.procname	= "sched_group_upmigrate",
+		.data		= &sysctl_sched_group_upmigrate_pct,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= sched_hmp_proc_update_handler,
+		.extra1		= &zero,
+	},
+	{
+		.procname	= "sched_group_downmigrate",
+		.data		= &sysctl_sched_group_downmigrate_pct,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= sched_hmp_proc_update_handler,
+		.extra1		= &zero,
+	},
+	{
+		.procname	= "sched_init_task_load",
+		.data		= &sysctl_sched_init_task_load_pct,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= sched_hmp_proc_update_handler,
+		.extra1		= &zero,
+		.extra2		= &one_hundred,
+	},
+	{
+		.procname	= "sched_select_prev_cpu_us",
+		.data		= &sysctl_sched_select_prev_cpu_us,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler   = sched_hmp_proc_update_handler,
+		.extra1		= &zero,
+	},
+	{
+		.procname	= "sched_restrict_cluster_spill",
+		.data		= &sysctl_sched_restrict_cluster_spill,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &one,
+	},
+	{
+		.procname	= "sched_small_wakee_task_load",
+		.data		= &sysctl_sched_small_wakee_task_load_pct,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler   = sched_hmp_proc_update_handler,
+		.extra1		= &zero,
+		.extra2		= &one_hundred,
+	},
+	{
+		.procname	= "sched_big_waker_task_load",
+		.data		= &sysctl_sched_big_waker_task_load_pct,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler   = sched_hmp_proc_update_handler,
+		.extra1		= &zero,
+		.extra2		= &one_hundred,
+	},
+	{
+		.procname	= "sched_prefer_sync_wakee_to_waker",
+		.data		= &sysctl_sched_prefer_sync_wakee_to_waker,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &one,
+	},
+	{
+		.procname       = "sched_enable_thread_grouping",
+		.data           = &sysctl_sched_enable_thread_grouping,
+		.maxlen         = sizeof(unsigned int),
+		.mode           = 0644,
+		.proc_handler   = proc_dointvec,
+	},
+	{
+		.procname	= "sched_pred_alert_freq",
+		.data		= &sysctl_sched_pred_alert_freq,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+	},
+	{
+		.procname       = "sched_freq_aggregate",
+		.data           = &sysctl_sched_freq_aggregate,
+		.maxlen         = sizeof(unsigned int),
+		.mode           = 0644,
+		.proc_handler   = sched_window_update_handler,
+	},
+	{
+		.procname	= "sched_freq_aggregate_threshold",
+		.data		= &sysctl_sched_freq_aggregate_threshold_pct,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= sched_hmp_proc_update_handler,
+		.extra1		= &zero,
+		/*
+		 * Special handling for sched_freq_aggregate_threshold_pct
+		 * which can be greater than 100. Use 1000 as an upper bound
+		 * value which works for all practical use cases.
+		 */
+		.extra2		= &one_thousand,
+	},
+	{
+		.procname	= "sched_boost",
+		.data		= &sysctl_sched_boost,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= sched_boost_handler,
+		.extra1         = &zero,
+		.extra2		= &three,
+	},
+	{
+		.procname	= "sched_short_burst_ns",
+		.data		= &sysctl_sched_short_burst,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname       = "sched_short_sleep_ns",
+		.data           = &sysctl_sched_short_sleep,
+		.maxlen         = sizeof(unsigned int),
+		.mode           = 0644,
+		.proc_handler   = proc_dointvec,
+	},
+#endif	/* CONFIG_SCHED_HMP */
 #ifdef CONFIG_SCHED_DEBUG
 	{
 		.procname	= "sched_min_granularity_ns",
@@ -305,6 +522,20 @@
 		.extra2		= &max_sched_granularity_ns,
 	},
 	{
+		.procname	= "sched_sync_hint_enable",
+		.data		= &sysctl_sched_sync_hint_enable,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "sched_cstate_aware",
+		.data		= &sysctl_sched_cstate_aware,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
 		.procname	= "sched_wakeup_granularity_ns",
 		.data		= &sysctl_sched_wakeup_granularity,
 		.maxlen		= sizeof(unsigned int),
@@ -342,7 +573,8 @@
 		.data		= &sysctl_sched_time_avg,
 		.maxlen		= sizeof(unsigned int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &one,
 	},
 	{
 		.procname	= "sched_shares_window_ns",
@@ -435,6 +667,21 @@
 		.extra1		= &one,
 	},
 #endif
+#ifdef CONFIG_SCHED_TUNE
+	{
+		.procname	= "sched_cfs_boost",
+		.data		= &sysctl_sched_cfs_boost,
+		.maxlen		= sizeof(sysctl_sched_cfs_boost),
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+		.mode		= 0444,
+#else
+		.mode		= 0644,
+#endif
+		.proc_handler	= &sysctl_sched_cfs_boost_handler,
+		.extra1		= &zero,
+		.extra2		= &one_hundred,
+	},
+#endif
 #ifdef CONFIG_PROVE_LOCKING
 	{
 		.procname	= "prove_locking",
@@ -1175,6 +1422,27 @@
 		.extra2		= &one,
 	},
 #endif
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+	{
+		.procname	= "boot_reason",
+		.data		= &boot_reason,
+		.maxlen		= sizeof(int),
+		.mode		= 0444,
+		.proc_handler	= proc_dointvec,
+	},
+
+	{
+		.procname	= "cold_boot",
+		.data		= &cold_boot,
+		.maxlen		= sizeof(int),
+		.mode		= 0444,
+		.proc_handler	= proc_dointvec,
+	},
+#endif
+/*
+ * NOTE: do not add new entries to this table unless you have read
+ * Documentation/sysctl/ctl_unnumbered.txt
+ */
 	{ }
 };
 
@@ -1396,6 +1664,14 @@
 		.extra1		= &zero,
 	},
 	{
+		.procname	= "extra_free_kbytes",
+		.data		= &extra_free_kbytes,
+		.maxlen		= sizeof(extra_free_kbytes),
+		.mode		= 0644,
+		.proc_handler	= min_free_kbytes_sysctl_handler,
+		.extra1		= &zero,
+	},
+	{
 		.procname	= "percpu_pagelist_fraction",
 		.data		= &percpu_pagelist_fraction,
 		.maxlen		= sizeof(percpu_pagelist_fraction),
@@ -1571,6 +1847,44 @@
 		.mode		= 0644,
 		.proc_handler	= proc_doulongvec_minmax,
 	},
+#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
+	{
+		.procname	= "mmap_rnd_bits",
+		.data		= &mmap_rnd_bits,
+		.maxlen		= sizeof(mmap_rnd_bits),
+		.mode		= 0600,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= (void *)&mmap_rnd_bits_min,
+		.extra2		= (void *)&mmap_rnd_bits_max,
+	},
+#endif
+#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
+	{
+		.procname	= "mmap_rnd_compat_bits",
+		.data		= &mmap_rnd_compat_bits,
+		.maxlen		= sizeof(mmap_rnd_compat_bits),
+		.mode		= 0600,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= (void *)&mmap_rnd_compat_bits_min,
+		.extra2		= (void *)&mmap_rnd_compat_bits_max,
+	},
+#endif
+#ifdef CONFIG_SWAP
+	{
+		.procname	= "swap_ratio",
+		.data		= &sysctl_swap_ratio,
+		.maxlen		= sizeof(sysctl_swap_ratio),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+	},
+	{
+		.procname	= "swap_ratio_enable",
+		.data		= &sysctl_swap_ratio_enable,
+		.maxlen		= sizeof(sysctl_swap_ratio_enable),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+	},
+#endif
 	{ }
 };
 
@@ -2040,15 +2354,7 @@
 				 int write, void *data)
 {
 	if (write) {
-		if (*negp) {
-			if (*lvalp > (unsigned long) INT_MAX + 1)
-				return -EINVAL;
-			*valp = -*lvalp;
-		} else {
-			if (*lvalp > (unsigned long) INT_MAX)
-				return -EINVAL;
-			*valp = *lvalp;
-		}
+		*valp = *negp ? -*lvalp : *lvalp;
 	} else {
 		int val = *valp;
 		if (val < 0) {
diff -ruw linux-4.4.115/kernel/taskstats.c linux-4.4.115-fbx/kernel/taskstats.c
--- linux-4.4.115/kernel/taskstats.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/taskstats.c	2019-01-22 16:16:28.715293604 +0100
@@ -54,7 +54,11 @@
 	[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
 	[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
 
-static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = {
+/*
+ * We have to use TASKSTATS_CMD_ATTR_MAX here, it is the maxattr in the family.
+ * Make sure they are always aligned.
+ */
+static const struct nla_policy cgroupstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
 	[CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
 };
 
diff -ruw linux-4.4.115/kernel/time/alarmtimer.c linux-4.4.115-fbx/kernel/time/alarmtimer.c
--- linux-4.4.115/kernel/time/alarmtimer.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/time/alarmtimer.c	2019-10-29 09:26:25.637222692 +0100
@@ -26,6 +26,11 @@
 #include <linux/workqueue.h>
 #include <linux/freezer.h>
 
+#ifdef CONFIG_MSM_PM
+#include "lpm-levels.h"
+#endif
+#include <linux/workqueue.h>
+
 /**
  * struct alarm_base - Alarm timer bases
  * @lock:		Lock for syncrhonized access to the base
@@ -46,14 +51,130 @@
 static DEFINE_SPINLOCK(freezer_delta_lock);
 
 static struct wakeup_source *ws;
+static struct delayed_work work;
+static struct workqueue_struct *power_off_alarm_workqueue;
 
 #ifdef CONFIG_RTC_CLASS
 /* rtc timer and device for setting alarm wakeups at suspend */
 static struct rtc_timer		rtctimer;
 static struct rtc_device	*rtcdev;
 static DEFINE_SPINLOCK(rtcdev_lock);
+static struct mutex power_on_alarm_lock;
+static struct alarm init_alarm;
+
+/**
+ * power_on_alarm_init - Init power on alarm value
+ *
+ * Read rtc alarm value after device booting up and add this alarm
+ * into alarm queue.
+ */
+void power_on_alarm_init(void)
+{
+	struct rtc_wkalrm rtc_alarm;
+	struct rtc_time rt;
+	unsigned long alarm_time;
+	struct rtc_device *rtc;
+	ktime_t alarm_ktime;
+
+	rtc = alarmtimer_get_rtcdev();
+
+	if (!rtc)
+		return;
+
+	rtc_read_alarm(rtc, &rtc_alarm);
+	rt = rtc_alarm.time;
+
+	rtc_tm_to_time(&rt, &alarm_time);
+
+	if (alarm_time) {
+		alarm_ktime = ktime_set(alarm_time, 0);
+		alarm_init(&init_alarm, ALARM_POWEROFF_REALTIME, NULL);
+		alarm_start(&init_alarm, alarm_ktime);
+	}
+}
 
 /**
+ * set_power_on_alarm - set power on alarm value into rtc register
+ *
+ * Get the soonest power off alarm timer and set the alarm value into rtc
+ * register.
+ */
+void set_power_on_alarm(void)
+{
+	int rc;
+	struct timespec wall_time, alarm_ts;
+	long alarm_secs = 0l;
+	long rtc_secs, alarm_time, alarm_delta;
+	struct rtc_time rtc_time;
+	struct rtc_wkalrm alarm;
+	struct rtc_device *rtc;
+	struct timerqueue_node *next;
+	unsigned long flags;
+	struct alarm_base *base = &alarm_bases[ALARM_POWEROFF_REALTIME];
+
+	rc = mutex_lock_interruptible(&power_on_alarm_lock);
+	if (rc != 0)
+		return;
+
+	spin_lock_irqsave(&base->lock, flags);
+	next = timerqueue_getnext(&base->timerqueue);
+	spin_unlock_irqrestore(&base->lock, flags);
+
+	if (next) {
+		alarm_ts = ktime_to_timespec(next->expires);
+		alarm_secs = alarm_ts.tv_sec;
+	}
+
+	if (!alarm_secs)
+		goto disable_alarm;
+
+	getnstimeofday(&wall_time);
+
+	/*
+	 * alarm_secs have to be bigger than "wall_time +1".
+	 * It is to make sure that alarm time will be always
+	 * bigger than wall time.
+	*/
+	if (alarm_secs <= wall_time.tv_sec + 1)
+		goto disable_alarm;
+
+	rtc = alarmtimer_get_rtcdev();
+	if (!rtc)
+		goto exit;
+
+	rtc_read_time(rtc, &rtc_time);
+	rtc_tm_to_time(&rtc_time, &rtc_secs);
+	alarm_delta = wall_time.tv_sec - rtc_secs;
+	alarm_time = alarm_secs - alarm_delta;
+
+	rtc_time_to_tm(alarm_time, &alarm.time);
+	alarm.enabled = 1;
+	rc = rtc_set_alarm(rtcdev, &alarm);
+	if (rc)
+		goto disable_alarm;
+
+	mutex_unlock(&power_on_alarm_lock);
+	return;
+
+disable_alarm:
+	rtc_alarm_irq_enable(rtcdev, 0);
+exit:
+	mutex_unlock(&power_on_alarm_lock);
+}
+
+static void alarmtimer_triggered_func(void *p)
+{
+	struct rtc_device *rtc = rtcdev;
+
+	if (!(rtc->irq_data & RTC_AF))
+		return;
+	__pm_wakeup_event(ws, 2 * MSEC_PER_SEC);
+}
+
+static struct rtc_task alarmtimer_rtc_task = {
+	.func = alarmtimer_triggered_func
+};
+/**
  * alarmtimer_get_rtcdev - Return selected rtcdevice
  *
  * This function returns the rtc device to use for wakealarms.
@@ -63,7 +184,7 @@
 struct rtc_device *alarmtimer_get_rtcdev(void)
 {
 	unsigned long flags;
-	struct rtc_device *ret;
+	struct rtc_device *ret = NULL;
 
 	spin_lock_irqsave(&rtcdev_lock, flags);
 	ret = rtcdev;
@@ -77,33 +198,48 @@
 				struct class_interface *class_intf)
 {
 	unsigned long flags;
+	int err = 0;
 	struct rtc_device *rtc = to_rtc_device(dev);
-
 	if (rtcdev)
 		return -EBUSY;
-
 	if (!rtc->ops->set_alarm)
 		return -1;
-	if (!device_may_wakeup(rtc->dev.parent))
-		return -1;
 
 	spin_lock_irqsave(&rtcdev_lock, flags);
 	if (!rtcdev) {
+		err = rtc_irq_register(rtc, &alarmtimer_rtc_task);
+		if (err)
+			goto rtc_irq_reg_err;
 		rtcdev = rtc;
 		/* hold a reference so it doesn't go away */
 		get_device(dev);
 	}
+
+rtc_irq_reg_err:
 	spin_unlock_irqrestore(&rtcdev_lock, flags);
-	return 0;
+	return err;
+
+}
+
+static void alarmtimer_rtc_remove_device(struct device *dev,
+				struct class_interface *class_intf)
+{
+	if (rtcdev && dev == &rtcdev->dev) {
+		rtc_irq_unregister(rtcdev, &alarmtimer_rtc_task);
+		rtcdev = NULL;
+	}
 }
 
 static inline void alarmtimer_rtc_timer_init(void)
 {
+	mutex_init(&power_on_alarm_lock);
+
 	rtc_timer_init(&rtctimer, NULL, NULL);
 }
 
 static struct class_interface alarmtimer_rtc_interface = {
 	.add_dev = &alarmtimer_rtc_add_device,
+	.remove_dev = &alarmtimer_rtc_remove_device,
 };
 
 static int alarmtimer_rtc_interface_setup(void)
@@ -124,8 +260,14 @@
 static inline int alarmtimer_rtc_interface_setup(void) { return 0; }
 static inline void alarmtimer_rtc_interface_remove(void) { }
 static inline void alarmtimer_rtc_timer_init(void) { }
+void set_power_on_alarm(void) { }
 #endif
 
+static void alarm_work_func(struct work_struct *unused)
+{
+	set_power_on_alarm();
+}
+
 /**
  * alarmtimer_enqueue - Adds an alarm timer to an alarm_base timerqueue
  * @base: pointer to the base where the timer is being run
@@ -195,6 +337,10 @@
 	}
 	spin_unlock_irqrestore(&base->lock, flags);
 
+	/* set next power off alarm */
+	if (alarm->type == ALARM_POWEROFF_REALTIME)
+		queue_delayed_work(power_off_alarm_workqueue, &work, 0);
+
 	return ret;
 
 }
@@ -217,6 +363,68 @@
  * set an rtc timer to fire that far into the future, which
  * will wake us from suspend.
  */
+#if defined(CONFIG_RTC_DRV_QPNP) && defined(CONFIG_MSM_PM)
+static int alarmtimer_suspend(struct device *dev)
+{
+	struct rtc_time tm;
+	ktime_t min, now;
+	unsigned long flags;
+	struct rtc_device *rtc;
+	int i;
+	int ret = 0;
+
+	spin_lock_irqsave(&freezer_delta_lock, flags);
+	min = freezer_delta;
+	freezer_delta = ktime_set(0, 0);
+	spin_unlock_irqrestore(&freezer_delta_lock, flags);
+
+	rtc = alarmtimer_get_rtcdev();
+	/* If we have no rtcdev, just return */
+	if (!rtc)
+		return 0;
+
+	/* Find the soonest timer to expire*/
+	for (i = 0; i < ALARM_NUMTYPE; i++) {
+		struct alarm_base *base = &alarm_bases[i];
+		struct timerqueue_node *next;
+		ktime_t delta;
+
+		spin_lock_irqsave(&base->lock, flags);
+		next = timerqueue_getnext(&base->timerqueue);
+		spin_unlock_irqrestore(&base->lock, flags);
+		if (!next)
+			continue;
+		delta = ktime_sub(next->expires, base->gettime());
+		if (!min.tv64 || (delta.tv64 < min.tv64))
+			min = delta;
+	}
+	if (min.tv64 == 0)
+		return 0;
+
+	if (ktime_to_ns(min) < 2 * NSEC_PER_SEC) {
+		__pm_wakeup_event(ws, 2 * MSEC_PER_SEC);
+		return -EBUSY;
+	}
+
+	/* Setup a timer to fire that far in the future */
+	rtc_timer_cancel(rtc, &rtctimer);
+	rtc_read_time(rtc, &tm);
+	now = rtc_tm_to_ktime(tm);
+	now = ktime_add(now, min);
+	if (poweron_alarm) {
+		uint64_t msec = 0;
+
+		msec = ktime_to_ms(min);
+		lpm_suspend_wake_time(msec);
+	} else {
+		/* Set alarm, if in the past reject suspend briefly to handle */
+		ret = rtc_timer_start(rtc, &rtctimer, now, ktime_set(0, 0));
+		if (ret < 0)
+			__pm_wakeup_event(ws, MSEC_PER_SEC);
+	}
+	return ret;
+}
+#else
 static int alarmtimer_suspend(struct device *dev)
 {
 	struct rtc_time tm;
@@ -226,6 +434,8 @@
 	int i;
 	int ret;
 
+	cancel_delayed_work_sync(&work);
+
 	spin_lock_irqsave(&freezer_delta_lock, flags);
 	min = freezer_delta;
 	freezer_delta = ktime_set(0, 0);
@@ -271,11 +481,31 @@
 		__pm_wakeup_event(ws, MSEC_PER_SEC);
 	return ret;
 }
+#endif
+static int alarmtimer_resume(struct device *dev)
+{
+	struct rtc_device *rtc;
+
+	rtc = alarmtimer_get_rtcdev();
+	/* If we have no rtcdev, just return */
+	if (!rtc)
+		return 0;
+	rtc_timer_cancel(rtc, &rtctimer);
+
+	queue_delayed_work(power_off_alarm_workqueue, &work, 0);
+	return 0;
+}
+
 #else
 static int alarmtimer_suspend(struct device *dev)
 {
 	return 0;
 }
+
+static int alarmtimer_resume(struct device *dev)
+{
+	return 0;
+}
 #endif
 
 static void alarmtimer_freezerset(ktime_t absexp, enum alarmtimer_type type)
@@ -443,12 +673,14 @@
  * clock2alarm - helper that converts from clockid to alarmtypes
  * @clockid: clockid.
  */
-static enum alarmtimer_type clock2alarm(clockid_t clockid)
+enum alarmtimer_type clock2alarm(clockid_t clockid)
 {
 	if (clockid == CLOCK_REALTIME_ALARM)
 		return ALARM_REALTIME;
 	if (clockid == CLOCK_BOOTTIME_ALARM)
 		return ALARM_BOOTTIME;
+	if (clockid == CLOCK_POWEROFF_ALARM)
+		return ALARM_POWEROFF_REALTIME;
 	return -1;
 }
 
@@ -809,6 +1041,7 @@
 /* Suspend hook structures */
 static const struct dev_pm_ops alarmtimer_pm_ops = {
 	.suspend = alarmtimer_suspend,
+	.resume = alarmtimer_resume,
 };
 
 static struct platform_driver alarmtimer_driver = {
@@ -843,10 +1076,13 @@
 
 	posix_timers_register_clock(CLOCK_REALTIME_ALARM, &alarm_clock);
 	posix_timers_register_clock(CLOCK_BOOTTIME_ALARM, &alarm_clock);
+	posix_timers_register_clock(CLOCK_POWEROFF_ALARM, &alarm_clock);
 
 	/* Initialize alarm bases */
 	alarm_bases[ALARM_REALTIME].base_clockid = CLOCK_REALTIME;
 	alarm_bases[ALARM_REALTIME].gettime = &ktime_get_real;
+	alarm_bases[ALARM_POWEROFF_REALTIME].base_clockid = CLOCK_REALTIME;
+	alarm_bases[ALARM_POWEROFF_REALTIME].gettime = &ktime_get_real;
 	alarm_bases[ALARM_BOOTTIME].base_clockid = CLOCK_BOOTTIME;
 	alarm_bases[ALARM_BOOTTIME].gettime = &ktime_get_boottime;
 	for (i = 0; i < ALARM_NUMTYPE; i++) {
@@ -868,8 +1104,24 @@
 		goto out_drv;
 	}
 	ws = wakeup_source_register("alarmtimer");
-	return 0;
+	if (!ws) {
+		error = -ENOMEM;
+		goto out_ws;
+	}
+
+	INIT_DELAYED_WORK(&work, alarm_work_func);
+	power_off_alarm_workqueue =
+		create_singlethread_workqueue("power_off_alarm");
+	if (!power_off_alarm_workqueue) {
+		error = -ENOMEM;
+		goto out_wq;
+	}
 
+	return 0;
+out_wq:
+	wakeup_source_unregister(ws);
+out_ws:
+	platform_device_unregister(pdev);
 out_drv:
 	platform_driver_unregister(&alarmtimer_driver);
 out_if:
diff -ruw linux-4.4.115/kernel/time/clocksource.c linux-4.4.115-fbx/kernel/time/clocksource.c
--- linux-4.4.115/kernel/time/clocksource.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/time/clocksource.c	2019-01-22 16:16:28.715293604 +0100
@@ -108,7 +108,7 @@
 
 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
 static void clocksource_watchdog_work(struct work_struct *work);
-static void clocksource_select(void);
+static void clocksource_select(bool force);
 
 static LIST_HEAD(watchdog_list);
 static struct clocksource *watchdog;
@@ -415,7 +415,7 @@
 {
 	mutex_lock(&clocksource_mutex);
 	if (__clocksource_watchdog_kthread())
-		clocksource_select();
+		clocksource_select(false);
 	mutex_unlock(&clocksource_mutex);
 	return 0;
 }
@@ -555,11 +555,12 @@
 
 #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
 
-static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur)
+static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur,
+						bool force)
 {
 	struct clocksource *cs;
 
-	if (!finished_booting || list_empty(&clocksource_list))
+	if ((!finished_booting && !force) || list_empty(&clocksource_list))
 		return NULL;
 
 	/*
@@ -577,13 +578,13 @@
 	return NULL;
 }
 
-static void __clocksource_select(bool skipcur)
+static void __clocksource_select(bool skipcur, bool force)
 {
 	bool oneshot = tick_oneshot_mode_active();
 	struct clocksource *best, *cs;
 
 	/* Find the best suitable clocksource */
-	best = clocksource_find_best(oneshot, skipcur);
+	best = clocksource_find_best(oneshot, skipcur, force);
 	if (!best)
 		return;
 
@@ -623,22 +624,40 @@
  * Select the clocksource with the best rating, or the clocksource,
  * which is selected by userspace override.
  */
-static void clocksource_select(void)
+static void clocksource_select(bool force)
 {
-	__clocksource_select(false);
+	return __clocksource_select(false, force);
 }
 
 static void clocksource_select_fallback(void)
 {
-	__clocksource_select(true);
+	__clocksource_select(true, false);
 }
 
 #else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */
-static inline void clocksource_select(void) { }
+
+static inline void clocksource_select(bool force) { }
 static inline void clocksource_select_fallback(void) { }
 
 #endif
 
+/**
+ * clocksource_select_force - Force re-selection of the best clocksource
+ *				among registered clocksources
+ *
+ * clocksource_select() can't select the best clocksource before
+ * calling clocksource_done_booting() and since clocksource_select()
+ * should be called with clocksource_mutex held, provide a new API
+ * can be called from other files to select best clockrouce irrespective
+ * of finished_booting flag.
+ */
+void clocksource_select_force(void)
+{
+	mutex_lock(&clocksource_mutex);
+	clocksource_select(true);
+	mutex_unlock(&clocksource_mutex);
+}
+
 /*
  * clocksource_done_booting - Called near the end of core bootup
  *
@@ -655,7 +674,7 @@
 	 * Run the watchdog first to eliminate unstable clock sources
 	 */
 	__clocksource_watchdog_kthread();
-	clocksource_select();
+	clocksource_select(false);
 	mutex_unlock(&clocksource_mutex);
 	return 0;
 }
@@ -744,6 +763,7 @@
 }
 EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
 
+
 /**
  * __clocksource_register_scale - Used to install new clocksources
  * @cs:		clocksource to be registered
@@ -765,7 +785,7 @@
 	mutex_lock(&clocksource_mutex);
 	clocksource_enqueue(cs);
 	clocksource_enqueue_watchdog(cs);
-	clocksource_select();
+	clocksource_select(false);
 	clocksource_select_watchdog(false);
 	mutex_unlock(&clocksource_mutex);
 	return 0;
@@ -788,7 +808,7 @@
 {
 	mutex_lock(&clocksource_mutex);
 	__clocksource_change_rating(cs, rating);
-	clocksource_select();
+	clocksource_select(false);
 	clocksource_select_watchdog(false);
 	mutex_unlock(&clocksource_mutex);
 }
@@ -892,7 +912,7 @@
 
 	ret = sysfs_get_uname(buf, override_name, count);
 	if (ret >= 0)
-		clocksource_select();
+		clocksource_select(false);
 
 	mutex_unlock(&clocksource_mutex);
 
diff -ruw linux-4.4.115/kernel/time/hrtimer.c linux-4.4.115-fbx/kernel/time/hrtimer.c
--- linux-4.4.115/kernel/time/hrtimer.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/time/hrtimer.c	2019-10-29 09:26:25.637222692 +0100
@@ -783,34 +783,6 @@
 	clock_was_set_delayed();
 }
 
-static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
-{
-#ifdef CONFIG_TIMER_STATS
-	if (timer->start_site)
-		return;
-	timer->start_site = __builtin_return_address(0);
-	memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
-	timer->start_pid = current->pid;
-#endif
-}
-
-static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
-{
-#ifdef CONFIG_TIMER_STATS
-	timer->start_site = NULL;
-#endif
-}
-
-static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
-{
-#ifdef CONFIG_TIMER_STATS
-	if (likely(!timer_stats_active))
-		return;
-	timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
-				 timer->function, timer->start_comm, 0);
-#endif
-}
-
 /*
  * Counterpart to lock_hrtimer_base above:
  */
@@ -887,7 +859,7 @@
 
 	base->cpu_base->active_bases |= 1 << base->index;
 
-	timer->state = HRTIMER_STATE_ENQUEUED;
+	timer->state |= HRTIMER_STATE_ENQUEUED;
 
 	return timerqueue_add(&base->active, &timer->node);
 }
@@ -907,11 +879,9 @@
 			     u8 newstate, int reprogram)
 {
 	struct hrtimer_cpu_base *cpu_base = base->cpu_base;
-	u8 state = timer->state;
 
-	timer->state = newstate;
-	if (!(state & HRTIMER_STATE_ENQUEUED))
-		return;
+	if (!(timer->state & HRTIMER_STATE_ENQUEUED))
+		goto out;
 
 	if (!timerqueue_del(&base->active, &timer->node))
 		cpu_base->active_bases &= ~(1 << base->index);
@@ -928,6 +898,13 @@
 	if (reprogram && timer == cpu_base->next_timer)
 		hrtimer_force_reprogram(cpu_base, 1);
 #endif
+
+out:
+	/*
+	* We need to preserve PINNED state here, otherwise we may end up
+	* migrating pinned hrtimers as well.
+	*/
+	timer->state = newstate | (timer->state & HRTIMER_STATE_PINNED);
 }
 
 /*
@@ -949,13 +926,13 @@
 		 * rare case and less expensive than a smp call.
 		 */
 		debug_deactivate(timer);
-		timer_stats_hrtimer_clear_start_info(timer);
 		reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
 
 		if (!restart)
 			state = HRTIMER_STATE_INACTIVE;
 
 		__remove_hrtimer(timer, base, state, reprogram);
+		timer->state &= ~HRTIMER_STATE_PINNED;
 		return 1;
 	}
 	return 0;
@@ -986,7 +963,7 @@
  *		relative (HRTIMER_MODE_REL)
  */
 void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
-			    unsigned long delta_ns, const enum hrtimer_mode mode)
+			    u64 delta_ns, const enum hrtimer_mode mode)
 {
 	struct hrtimer_clock_base *base, *new_base;
 	unsigned long flags;
@@ -1007,7 +984,9 @@
 	/* Switch the timer base, if necessary: */
 	new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
 
-	timer_stats_hrtimer_set_start_info(timer);
+	/* Update pinned state */
+	timer->state &= ~HRTIMER_STATE_PINNED;
+	timer->state |= (!!(mode & HRTIMER_MODE_PINNED)) << HRTIMER_PINNED_SHIFT;
 
 	leftmost = enqueue_hrtimer(timer, new_base);
 	if (!leftmost)
@@ -1145,12 +1124,6 @@
 	base = hrtimer_clockid_to_base(clock_id);
 	timer->base = &cpu_base->clock_base[base];
 	timerqueue_init(&timer->node);
-
-#ifdef CONFIG_TIMER_STATS
-	timer->start_site = NULL;
-	timer->start_pid = -1;
-	memset(timer->start_comm, 0, TASK_COMM_LEN);
-#endif
 }
 
 /**
@@ -1183,8 +1156,8 @@
 		cpu_base = READ_ONCE(timer->base->cpu_base);
 		seq = raw_read_seqcount_begin(&cpu_base->seq);
 
-		if (timer->state != HRTIMER_STATE_INACTIVE ||
-		    cpu_base->running == timer)
+		if (((timer->state & ~HRTIMER_STATE_PINNED) !=
+		      HRTIMER_STATE_INACTIVE) || cpu_base->running == timer)
 			return true;
 
 	} while (read_seqcount_retry(&cpu_base->seq, seq) ||
@@ -1234,7 +1207,6 @@
 	raw_write_seqcount_barrier(&cpu_base->seq);
 
 	__remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
-	timer_stats_account_hrtimer(timer);
 	fn = timer->function;
 
 	/*
@@ -1555,7 +1527,7 @@
 	struct restart_block *restart;
 	struct hrtimer_sleeper t;
 	int ret = 0;
-	unsigned long slack;
+	u64 slack;
 
 	slack = current->timer_slack_ns;
 	if (dl_task(current) || rt_task(current))
@@ -1622,16 +1594,22 @@
 	hrtimer_init_hres(cpu_base);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-
+#if defined(CONFIG_HOTPLUG_CPU)
 static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
-				struct hrtimer_clock_base *new_base)
+				 struct hrtimer_clock_base *new_base,
+				 bool remove_pinned)
 {
 	struct hrtimer *timer;
 	struct timerqueue_node *node;
+	struct timerqueue_head pinned;
+	int is_pinned;
+	bool is_hotplug = !cpu_online(old_base->cpu_base->cpu);
+
+	timerqueue_init_head(&pinned);
 
 	while ((node = timerqueue_getnext(&old_base->active))) {
 		timer = container_of(node, struct hrtimer, node);
+		if (is_hotplug)
 		BUG_ON(hrtimer_callback_running(timer));
 		debug_deactivate(timer);
 
@@ -1641,6 +1619,13 @@
 		 * under us on another CPU
 		 */
 		__remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0);
+
+		is_pinned = timer->state & HRTIMER_STATE_PINNED;
+		if (!remove_pinned && is_pinned) {
+			timerqueue_add(&pinned, &timer->node);
+			continue;
+		}
+
 		timer->base = new_base;
 		/*
 		 * Enqueue the timers on the new cpu. This does not
@@ -1652,17 +1637,23 @@
 		 */
 		enqueue_hrtimer(timer, new_base);
 	}
+
+	/* Re-queue pinned timers for non-hotplug usecase */
+	while ((node = timerqueue_getnext(&pinned))) {
+		timer = container_of(node, struct hrtimer, node);
+
+		timerqueue_del(&pinned, &timer->node);
+		enqueue_hrtimer(timer, old_base);
+	}
 }
 
-static void migrate_hrtimers(int scpu)
+static void __migrate_hrtimers(int scpu, bool remove_pinned)
 {
 	struct hrtimer_cpu_base *old_base, *new_base;
+	unsigned long flags;
 	int i;
 
-	BUG_ON(cpu_online(scpu));
-	tick_cancel_sched_timer(scpu);
-
-	local_irq_disable();
+	local_irq_save(flags);
 	old_base = &per_cpu(hrtimer_bases, scpu);
 	new_base = this_cpu_ptr(&hrtimer_bases);
 	/*
@@ -1674,7 +1665,7 @@
 
 	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
 		migrate_hrtimer_list(&old_base->clock_base[i],
-				     &new_base->clock_base[i]);
+				     &new_base->clock_base[i], remove_pinned);
 	}
 
 	raw_spin_unlock(&old_base->lock);
@@ -1682,7 +1673,20 @@
 
 	/* Check, if we got expired work to do */
 	__hrtimer_peek_ahead_timers();
-	local_irq_enable();
+	local_irq_restore(flags);
+}
+
+static void migrate_hrtimers(int scpu)
+{
+	BUG_ON(cpu_online(scpu));
+	tick_cancel_sched_timer(scpu);
+
+	__migrate_hrtimers(scpu, true);
+}
+
+void hrtimer_quiesce_cpu(void *cpup)
+{
+	__migrate_hrtimers(*(int *)cpup, false);
 }
 
 #endif /* CONFIG_HOTPLUG_CPU */
@@ -1732,7 +1736,7 @@
  * @clock:	timer clock, CLOCK_MONOTONIC or CLOCK_REALTIME
  */
 int __sched
-schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta,
+schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
 			       const enum hrtimer_mode mode, int clock)
 {
 	struct hrtimer_sleeper t;
@@ -1790,17 +1794,21 @@
  * You can set the task state as follows -
  *
  * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
- * pass before the routine returns.
+ * pass before the routine returns unless the current task is explicitly
+ * woken up, (e.g. by wake_up_process()).
  *
  * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
- * delivered to the current task.
+ * delivered to the current task or the current task is explicitly woken
+ * up.
  *
  * The current task state is guaranteed to be TASK_RUNNING when this
  * routine returns.
  *
- * Returns 0 when the timer has expired otherwise -EINTR
+ * Returns 0 when the timer has expired. If the task was woken before the
+ * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or
+ * by an explicit wakeup, it returns -EINTR.
  */
-int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
+int __sched schedule_hrtimeout_range(ktime_t *expires, u64 delta,
 				     const enum hrtimer_mode mode)
 {
 	return schedule_hrtimeout_range_clock(expires, delta, mode,
@@ -1820,15 +1828,19 @@
  * You can set the task state as follows -
  *
  * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
- * pass before the routine returns.
+ * pass before the routine returns unless the current task is explicitly
+ * woken up, (e.g. by wake_up_process()).
  *
  * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
- * delivered to the current task.
+ * delivered to the current task or the current task is explicitly woken
+ * up.
  *
  * The current task state is guaranteed to be TASK_RUNNING when this
  * routine returns.
  *
- * Returns 0 when the timer has expired otherwise -EINTR
+ * Returns 0 when the timer has expired. If the task was woken before the
+ * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or
+ * by an explicit wakeup, it returns -EINTR.
  */
 int __sched schedule_hrtimeout(ktime_t *expires,
 			       const enum hrtimer_mode mode)
diff -ruw linux-4.4.115/kernel/time/Makefile linux-4.4.115-fbx/kernel/time/Makefile
--- linux-4.4.115/kernel/time/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/time/Makefile	2019-01-22 16:16:28.715293604 +0100
@@ -9,6 +9,7 @@
 endif
 obj-$(CONFIG_GENERIC_SCHED_CLOCK)		+= sched_clock.o
 obj-$(CONFIG_TICK_ONESHOT)			+= tick-oneshot.o tick-sched.o
-obj-$(CONFIG_TIMER_STATS)			+= timer_stats.o
 obj-$(CONFIG_DEBUG_FS)				+= timekeeping_debug.o
 obj-$(CONFIG_TEST_UDELAY)			+= test_udelay.o
+
+ccflags-y += -Idrivers/cpuidle
diff -ruw linux-4.4.115/kernel/time/posix-cpu-timers.c linux-4.4.115-fbx/kernel/time/posix-cpu-timers.c
--- linux-4.4.115/kernel/time/posix-cpu-timers.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/time/posix-cpu-timers.c	2019-10-29 09:26:25.641222731 +0100
@@ -1250,7 +1250,7 @@
 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
 			   cputime_t *newval, cputime_t *oldval)
 {
-	unsigned long long now;
+	unsigned long long now = 0;
 
 	WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
 	cpu_timer_sample_group(clock_idx, tsk, &now);
diff -ruw linux-4.4.115/kernel/time/sched_clock.c linux-4.4.115-fbx/kernel/time/sched_clock.c
--- linux-4.4.115/kernel/time/sched_clock.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/time/sched_clock.c	2019-10-29 09:26:25.641222731 +0100
@@ -70,6 +70,11 @@
 
 static struct hrtimer sched_clock_timer;
 static int irqtime = -1;
+static int initialized;
+static u64 suspend_ns;
+static u64 suspend_cycles;
+static u64 resume_cycles;
+
 
 core_param(irqtime, irqtime, int, 0400);
 
@@ -231,6 +236,11 @@
 	pr_debug("Registered %pF as sched_clock source\n", read);
 }
 
+int sched_clock_initialized(void)
+{
+	return initialized;
+}
+
 void __init sched_clock_postinit(void)
 {
 	/*
@@ -249,6 +259,8 @@
 	hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 	sched_clock_timer.function = sched_clock_poll;
 	hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
+
+	initialized = 1;
 }
 
 /*
@@ -274,6 +286,11 @@
 	struct clock_read_data *rd = &cd.read_data[0];
 
 	update_sched_clock();
+
+	suspend_ns = rd->epoch_ns;
+	suspend_cycles = rd->epoch_cyc;
+	pr_info("suspend ns:%17llu	suspend cycles:%17llu\n",
+				rd->epoch_ns, rd->epoch_cyc);
 	hrtimer_cancel(&sched_clock_timer);
 	rd->read_sched_clock = suspended_sched_clock_read;
 
@@ -285,6 +302,8 @@
 	struct clock_read_data *rd = &cd.read_data[0];
 
 	rd->epoch_cyc = cd.actual_read_sched_clock();
+	resume_cycles = rd->epoch_cyc;
+	pr_info("resume cycles:%17llu\n", rd->epoch_cyc);
 	hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
 	rd->read_sched_clock = cd.actual_read_sched_clock;
 }
diff -ruw linux-4.4.115/kernel/time/tick-sched.c linux-4.4.115-fbx/kernel/time/tick-sched.c
--- linux-4.4.115/kernel/time/tick-sched.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/time/tick-sched.c	2019-10-29 09:26:25.641222731 +0100
@@ -19,11 +19,13 @@
 #include <linux/percpu.h>
 #include <linux/profile.h>
 #include <linux/sched.h>
+#include <linux/timer.h>
 #include <linux/module.h>
 #include <linux/irq_work.h>
 #include <linux/posix-timers.h>
 #include <linux/perf_event.h>
 #include <linux/context_tracking.h>
+#include <linux/rq_stats.h>
 
 #include <asm/irq_regs.h>
 
@@ -31,6 +33,10 @@
 
 #include <trace/events/timer.h>
 
+struct rq_data rq_info;
+struct workqueue_struct *rq_wq;
+spinlock_t rq_lock;
+
 /*
  * Per cpu nohz control structure
  */
@@ -41,6 +47,21 @@
  */
 static ktime_t last_jiffies_update;
 
+u64 jiffy_to_ktime_ns(u64 *now, u64 *jiffy_ktime_ns)
+{
+	u64 cur_jiffies;
+	unsigned long seq;
+
+	do {
+		seq = read_seqbegin(&jiffies_lock);
+		*now = ktime_get_ns();
+		*jiffy_ktime_ns = ktime_to_ns(last_jiffies_update);
+		cur_jiffies = get_jiffies_64();
+	} while (read_seqretry(&jiffies_lock, seq));
+
+	return cur_jiffies;
+}
+
 struct tick_sched *tick_get_tick_sched(int cpu)
 {
 	return &per_cpu(tick_cpu_sched, cpu);
@@ -143,7 +164,7 @@
 	 * when we go busy again does not account too much ticks.
 	 */
 	if (ts->tick_stopped) {
-		touch_softlockup_watchdog();
+		touch_softlockup_watchdog_sched();
 		if (is_idle_task(current))
 			ts->idle_jiffies++;
 	}
@@ -430,7 +451,7 @@
 	tick_do_update_jiffies64(now);
 	local_irq_restore(flags);
 
-	touch_softlockup_watchdog();
+	touch_softlockup_watchdog_sched();
 }
 
 /*
@@ -716,7 +737,7 @@
 	update_cpu_load_nohz();
 
 	calc_load_exit_idle();
-	touch_softlockup_watchdog();
+	touch_softlockup_watchdog_sched();
 	/*
 	 * Cancel the scheduled timer and restore the tick
 	 */
@@ -804,6 +825,11 @@
 
 	now = tick_nohz_start_idle(ts);
 
+#ifdef CONFIG_SMP
+	if (check_pending_deferrable_timers(cpu))
+		raise_softirq_irqoff(TIMER_SOFTIRQ);
+#endif
+
 	if (can_stop_idle_tick(cpu, ts)) {
 		int was_stopped = ts->tick_stopped;
 
@@ -885,6 +911,18 @@
 	return ts->sleep_length;
 }
 
+/**
+ * tick_nohz_get_idle_calls - return the current idle calls counter value
+ *
+ * Called from the schedutil frequency scaling governor in scheduler context.
+ */
+unsigned long tick_nohz_get_idle_calls(void)
+{
+	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
+
+	return ts->idle_calls;
+}
+
 static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
 {
 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
@@ -1064,6 +1102,51 @@
  * High resolution timer specific code
  */
 #ifdef CONFIG_HIGH_RES_TIMERS
+static void update_rq_stats(void)
+{
+	unsigned long jiffy_gap = 0;
+	unsigned int rq_avg = 0;
+	unsigned long flags = 0;
+
+	jiffy_gap = jiffies - rq_info.rq_poll_last_jiffy;
+
+	if (jiffy_gap >= rq_info.rq_poll_jiffies) {
+
+		spin_lock_irqsave(&rq_lock, flags);
+
+		if (!rq_info.rq_avg)
+			rq_info.rq_poll_total_jiffies = 0;
+
+		rq_avg = nr_running() * 10;
+
+		if (rq_info.rq_poll_total_jiffies) {
+			rq_avg = (rq_avg * jiffy_gap) +
+				(rq_info.rq_avg *
+				 rq_info.rq_poll_total_jiffies);
+			do_div(rq_avg,
+			       rq_info.rq_poll_total_jiffies + jiffy_gap);
+		}
+
+		rq_info.rq_avg =  rq_avg;
+		rq_info.rq_poll_total_jiffies += jiffy_gap;
+		rq_info.rq_poll_last_jiffy = jiffies;
+
+		spin_unlock_irqrestore(&rq_lock, flags);
+	}
+}
+
+static void wakeup_user(void)
+{
+	unsigned long jiffy_gap;
+
+	jiffy_gap = jiffies - rq_info.def_timer_last_jiffy;
+
+	if (jiffy_gap >= rq_info.def_timer_jiffies) {
+		rq_info.def_timer_last_jiffy = jiffies;
+		queue_work(rq_wq, &rq_info.def_timer_work);
+	}
+}
+
 /*
  * We rearm the timer until we get disabled by the idle code.
  * Called with interrupts disabled.
@@ -1081,9 +1164,23 @@
 	 * Do not call, when we are not in irq context and have
 	 * no valid regs pointer
 	 */
-	if (regs)
+	if (regs) {
 		tick_sched_handle(ts, regs);
 
+		if (rq_info.init == 1 &&
+				tick_do_timer_cpu == smp_processor_id()) {
+			/*
+			 * update run queue statistics
+			 */
+			update_rq_stats();
+
+			/*
+			 * wakeup user if needed
+			 */
+			wakeup_user();
+		}
+	}
+
 	/* No need to reprogram if we are in idle or full dynticks mode */
 	if (unlikely(ts->tick_stopped))
 		return HRTIMER_NORESTART;
@@ -1196,3 +1293,8 @@
 	tick_nohz_switch_to_nohz();
 	return 0;
 }
+
+ktime_t * get_next_event_cpu(unsigned int cpu)
+{
+	return &(per_cpu(tick_cpu_device, cpu).evtdev->next_event);
+}
diff -ruw linux-4.4.115/kernel/time/timekeeping.c linux-4.4.115-fbx/kernel/time/timekeeping.c
--- linux-4.4.115/kernel/time/timekeeping.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/time/timekeeping.c	2019-10-29 09:26:25.645222770 +0100
@@ -70,6 +70,10 @@
 		tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
 		tk->xtime_sec++;
 	}
+	while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) {
+		tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
+		tk->raw_sec++;
+	}
 }
 
 static inline struct timespec64 tk_xtime(struct timekeeper *tk)
@@ -277,18 +281,19 @@
 	/* Go back from cycles -> shifted ns */
 	tk->xtime_interval = (u64) interval * clock->mult;
 	tk->xtime_remainder = ntpinterval - tk->xtime_interval;
-	tk->raw_interval =
-		((u64) interval * clock->mult) >> clock->shift;
+	tk->raw_interval = interval * clock->mult;
 
 	 /* if changing clocks, convert xtime_nsec shift units */
 	if (old_clock) {
 		int shift_change = clock->shift - old_clock->shift;
-		if (shift_change < 0)
+		if (shift_change < 0) {
 			tk->tkr_mono.xtime_nsec >>= -shift_change;
-		else
+			tk->tkr_raw.xtime_nsec >>= -shift_change;
+		} else {
 			tk->tkr_mono.xtime_nsec <<= shift_change;
+			tk->tkr_raw.xtime_nsec <<= shift_change;
+		}
 	}
-	tk->tkr_raw.xtime_nsec = 0;
 
 	tk->tkr_mono.shift = clock->shift;
 	tk->tkr_raw.shift = clock->shift;
@@ -442,6 +447,35 @@
 }
 EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
 
+/**
+ * ktime_get_boot_fast_ns - NMI safe and fast access to boot clock.
+ *
+ * To keep it NMI safe since we're accessing from tracing, we're not using a
+ * separate timekeeper with updates to monotonic clock and boot offset
+ * protected with seqlocks. This has the following minor side effects:
+ *
+ * (1) Its possible that a timestamp be taken after the boot offset is updated
+ * but before the timekeeper is updated. If this happens, the new boot offset
+ * is added to the old timekeeping making the clock appear to update slightly
+ * earlier:
+ *    CPU 0                                        CPU 1
+ *    timekeeping_inject_sleeptime64()
+ *    __timekeeping_inject_sleeptime(tk, delta);
+ *                                                 timestamp();
+ *    timekeeping_update(tk, TK_CLEAR_NTP...);
+ *
+ * (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
+ * partially updated.  Since the tk->offs_boot update is a rare event, this
+ * should be a rare occurrence which postprocessing should be able to handle.
+ */
+u64 notrace ktime_get_boot_fast_ns(void)
+{
+	struct timekeeper *tk = &tk_core.timekeeper;
+
+	return (ktime_get_mono_fast_ns() + ktime_to_ns(tk->offs_boot));
+}
+EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
+
 /* Suspend-time cycles value for halted fast timekeeper. */
 static cycle_t cycles_at_suspend;
 
@@ -588,9 +622,6 @@
 	nsec = (u32) tk->wall_to_monotonic.tv_nsec;
 	tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
 
-	/* Update the monotonic raw base */
-	tk->tkr_raw.base = timespec64_to_ktime(tk->raw_time);
-
 	/*
 	 * The sum of the nanoseconds portions of xtime and
 	 * wall_to_monotonic can be greater/equal one second. Take
@@ -600,6 +631,9 @@
 	if (nsec >= NSEC_PER_SEC)
 		seconds++;
 	tk->ktime_sec = seconds;
+
+	/* Update the monotonic raw base */
+	tk->tkr_raw.base = ns_to_ktime(tk->raw_sec * NSEC_PER_SEC);
 }
 
 /* must hold timekeeper_lock */
@@ -641,7 +675,6 @@
 static void timekeeping_forward_now(struct timekeeper *tk)
 {
 	cycle_t cycle_now, delta;
-	s64 nsec;
 
 	cycle_now = tk_clock_read(&tk->tkr_mono);
 	delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
@@ -653,10 +686,13 @@
 	/* If arch requires, add in get_arch_timeoffset() */
 	tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift;
 
-	tk_normalize_xtime(tk);
 
-	nsec = clocksource_cyc2ns(delta, tk->tkr_raw.mult, tk->tkr_raw.shift);
-	timespec64_add_ns(&tk->raw_time, nsec);
+	tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult;
+
+	/* If arch requires, add in get_arch_timeoffset() */
+	tk->tkr_raw.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_raw.shift;
+
+	tk_normalize_xtime(tk);
 }
 
 /**
@@ -1150,19 +1186,18 @@
 void getrawmonotonic64(struct timespec64 *ts)
 {
 	struct timekeeper *tk = &tk_core.timekeeper;
-	struct timespec64 ts64;
 	unsigned long seq;
 	s64 nsecs;
 
 	do {
 		seq = read_seqcount_begin(&tk_core.seq);
+		ts->tv_sec = tk->raw_sec;
 		nsecs = timekeeping_get_ns(&tk->tkr_raw);
-		ts64 = tk->raw_time;
 
 	} while (read_seqcount_retry(&tk_core.seq, seq));
 
-	timespec64_add_ns(&ts64, nsecs);
-	*ts = ts64;
+	ts->tv_nsec = 0;
+	timespec64_add_ns(ts, nsecs);
 }
 EXPORT_SYMBOL(getrawmonotonic64);
 
@@ -1286,8 +1321,7 @@
 	tk_setup_internals(tk, clock);
 
 	tk_set_xtime(tk, &now);
-	tk->raw_time.tv_sec = 0;
-	tk->raw_time.tv_nsec = 0;
+	tk->raw_sec = 0;
 	if (boot.tv_sec == 0 && boot.tv_nsec == 0)
 		boot = tk_xtime(tk);
 
@@ -1767,7 +1801,7 @@
 						unsigned int *clock_set)
 {
 	cycle_t interval = tk->cycle_interval << shift;
-	u64 raw_nsecs;
+	u64 snsec_per_sec;
 
 	/* If the offset is smaller than a shifted interval, do nothing */
 	if (offset < interval)
@@ -1782,14 +1816,12 @@
 	*clock_set |= accumulate_nsecs_to_secs(tk);
 
 	/* Accumulate raw time */
-	raw_nsecs = (u64)tk->raw_interval << shift;
-	raw_nsecs += tk->raw_time.tv_nsec;
-	if (raw_nsecs >= NSEC_PER_SEC) {
-		u64 raw_secs = raw_nsecs;
-		raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
-		tk->raw_time.tv_sec += raw_secs;
+	tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
+	snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
+	while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
+		tk->tkr_raw.xtime_nsec -= snsec_per_sec;
+		tk->raw_sec++;
 	}
-	tk->raw_time.tv_nsec = raw_nsecs;
 
 	/* Accumulate error between NTP and clock interval */
 	tk->ntp_error += tk->ntp_tick << shift;
diff -ruw linux-4.4.115/kernel/time/timer.c linux-4.4.115-fbx/kernel/time/timer.c
--- linux-4.4.115/kernel/time/timer.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/time/timer.c	2019-01-22 16:16:28.723293677 +0100
@@ -94,12 +94,16 @@
 	struct tvec tv5;
 } ____cacheline_aligned;
 
+static inline void __run_timers(struct tvec_base *base);
 
 static DEFINE_PER_CPU(struct tvec_base, tvec_bases);
 
 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
 unsigned int sysctl_timer_migration = 1;
 
+struct tvec_base tvec_base_deferrable;
+static atomic_t deferrable_pending;
+
 void timers_update_migration(bool update_nohz)
 {
 	bool on = sysctl_timer_migration && tick_nohz_active;
@@ -135,18 +139,66 @@
 }
 
 static inline struct tvec_base *get_target_base(struct tvec_base *base,
-						int pinned)
+						int pinned, u32 timer_flags)
 {
+	if (!pinned && !(timer_flags & TIMER_PINNED_ON_CPU) &&
+	    (timer_flags & TIMER_DEFERRABLE))
+		return &tvec_base_deferrable;
 	if (pinned || !base->migration_enabled)
 		return this_cpu_ptr(&tvec_bases);
 	return per_cpu_ptr(&tvec_bases, get_nohz_timer_target());
 }
+
+static inline void __run_deferrable_timers(void)
+{
+	if ((atomic_cmpxchg(&deferrable_pending, 1, 0) &&
+		tick_do_timer_cpu == TICK_DO_TIMER_NONE) ||
+		tick_do_timer_cpu == smp_processor_id()) {
+		if (time_after_eq(jiffies,
+			tvec_base_deferrable.timer_jiffies))
+			__run_timers(&tvec_base_deferrable);
+	}
+}
+
+static inline void init_timer_deferrable_global(void)
+{
+	tvec_base_deferrable.cpu = nr_cpu_ids;
+	spin_lock_init(&tvec_base_deferrable.lock);
+	tvec_base_deferrable.timer_jiffies = jiffies;
+	tvec_base_deferrable.next_timer = tvec_base_deferrable.timer_jiffies;
+}
+
+static inline struct tvec_base *get_timer_base(u32 timer_flags)
+{
+	if (!(timer_flags & TIMER_PINNED_ON_CPU) &&
+	    timer_flags & TIMER_DEFERRABLE)
+		return &tvec_base_deferrable;
+	else
+		return per_cpu_ptr(&tvec_bases, timer_flags & TIMER_CPUMASK);
+}
 #else
 static inline struct tvec_base *get_target_base(struct tvec_base *base,
-						int pinned)
+						int pinned, u32 timer_flags)
 {
 	return this_cpu_ptr(&tvec_bases);
 }
+
+static inline void __run_deferrable_timers(void)
+{
+}
+
+static inline void init_timer_deferrable_global(void)
+{
+	/*
+	 * initialize cpu unbound deferrable timer base only when CONFIG_SMP.
+	 * UP kernel handles the timers with cpu 0 timer base.
+	 */
+}
+
+static inline struct tvec_base *get_timer_base(u32 timer_flags)
+{
+	return per_cpu_ptr(&tvec_bases, timer_flags & TIMER_CPUMASK);
+}
 #endif
 
 static unsigned long round_jiffies_common(unsigned long j, int cpu,
@@ -448,38 +500,6 @@
 	}
 }
 
-#ifdef CONFIG_TIMER_STATS
-void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
-{
-	if (timer->start_site)
-		return;
-
-	timer->start_site = addr;
-	memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
-	timer->start_pid = current->pid;
-}
-
-static void timer_stats_account_timer(struct timer_list *timer)
-{
-	void *site;
-
-	/*
-	 * start_site can be concurrently reset by
-	 * timer_stats_timer_clear_start_info()
-	 */
-	site = READ_ONCE(timer->start_site);
-	if (likely(!site))
-		return;
-
-	timer_stats_update_stats(timer, timer->start_pid, site,
-				 timer->function, timer->start_comm,
-				 timer->flags);
-}
-
-#else
-static void timer_stats_account_timer(struct timer_list *timer) {}
-#endif
-
 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
 
 static struct debug_obj_descr timer_debug_descr;
@@ -682,11 +702,6 @@
 	timer->entry.pprev = NULL;
 	timer->flags = flags | raw_smp_processor_id();
 	timer->slack = -1;
-#ifdef CONFIG_TIMER_STATS
-	timer->start_site = NULL;
-	timer->start_pid = -1;
-	memset(timer->start_comm, 0, TASK_COMM_LEN);
-#endif
 	lockdep_init_map(&timer->lockdep_map, name, key, 0);
 }
 
@@ -775,7 +790,7 @@
 		tf = READ_ONCE(timer->flags);
 
 		if (!(tf & TIMER_MIGRATING)) {
-			base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK);
+			base = get_timer_base(tf);
 			spin_lock_irqsave(&base->lock, *flags);
 			if (timer->flags == tf)
 				return base;
@@ -793,7 +808,6 @@
 	unsigned long flags;
 	int ret = 0;
 
-	timer_stats_timer_set_start_info(timer);
 	BUG_ON(!timer->function);
 
 	base = lock_timer_base(timer, &flags);
@@ -804,7 +818,7 @@
 
 	debug_activate(timer, expires);
 
-	new_base = get_target_base(base, pinned);
+	new_base = get_target_base(base, pinned, timer->flags);
 
 	if (base != new_base) {
 		/*
@@ -826,6 +840,10 @@
 		}
 	}
 
+	if (pinned == TIMER_PINNED)
+		timer->flags |= TIMER_PINNED_ON_CPU;
+	else
+		timer->flags &= ~TIMER_PINNED_ON_CPU;
 	timer->expires = expires;
 	internal_add_timer(base, timer);
 
@@ -988,7 +1006,6 @@
 	struct tvec_base *base;
 	unsigned long flags;
 
-	timer_stats_timer_set_start_info(timer);
 	BUG_ON(timer_pending(timer) || !timer->function);
 
 	/*
@@ -1007,6 +1024,7 @@
 			   (timer->flags & ~TIMER_BASEMASK) | cpu);
 	}
 
+	timer->flags |= TIMER_PINNED_ON_CPU;
 	debug_activate(timer, timer->expires);
 	internal_add_timer(base, timer);
 	spin_unlock_irqrestore(&base->lock, flags);
@@ -1032,7 +1050,6 @@
 
 	debug_assert_init(timer);
 
-	timer_stats_timer_clear_start_info(timer);
 	if (timer_pending(timer)) {
 		base = lock_timer_base(timer, &flags);
 		ret = detach_if_pending(timer, base, true);
@@ -1060,10 +1077,9 @@
 
 	base = lock_timer_base(timer, &flags);
 
-	if (base->running_timer != timer) {
-		timer_stats_timer_clear_start_info(timer);
+	if (base->running_timer != timer)
 		ret = detach_if_pending(timer, base, true);
-	}
+
 	spin_unlock_irqrestore(&base->lock, flags);
 
 	return ret;
@@ -1247,8 +1263,6 @@
 			data = timer->data;
 			irqsafe = timer->flags & TIMER_IRQSAFE;
 
-			timer_stats_account_timer(timer);
-
 			base->running_timer = timer;
 			detach_expired_timer(timer, base);
 
@@ -1376,6 +1390,30 @@
 	return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
 }
 
+#ifdef CONFIG_SMP
+/*
+ * check_pending_deferrable_timers - Check for unbound deferrable timer expiry.
+ * @cpu - Current CPU
+ *
+ * The function checks whether any global deferrable pending timers
+ * are exipired or not. This function does not check cpu bounded
+ * diferrable pending timers expiry.
+ *
+ * The function returns true when a cpu unbounded deferrable timer is expired.
+ */
+bool check_pending_deferrable_timers(int cpu)
+{
+	if (cpu == tick_do_timer_cpu ||
+		tick_do_timer_cpu == TICK_DO_TIMER_NONE) {
+		if (time_after_eq(jiffies, tvec_base_deferrable.timer_jiffies)
+			&& !atomic_cmpxchg(&deferrable_pending, 0, 1)) {
+			return true;
+		}
+	}
+	return false;
+}
+#endif
+
 /**
  * get_next_timer_interrupt - return the time (clock mono) of the next timer
  * @basej:	base time jiffies
@@ -1440,6 +1478,8 @@
 {
 	struct tvec_base *base = this_cpu_ptr(&tvec_bases);
 
+	__run_deferrable_timers();
+
 	if (time_after_eq(jiffies, base->timer_jiffies))
 		__run_timers(base);
 }
@@ -1482,11 +1522,12 @@
  * You can set the task state as follows -
  *
  * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
- * pass before the routine returns. The routine will return 0
+ * pass before the routine returns unless the current task is explicitly
+ * woken up, (e.g. by wake_up_process())".
  *
  * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
- * delivered to the current task. In this case the remaining time
- * in jiffies will be returned, or 0 if the timer expired in time
+ * delivered to the current task or the current task is explicitly woken
+ * up.
  *
  * The current task state is guaranteed to be TASK_RUNNING when this
  * routine returns.
@@ -1495,7 +1536,9 @@
  * the CPU away without a bound on the timeout. In this case the return
  * value will be %MAX_SCHEDULE_TIMEOUT.
  *
- * In all cases the return value is guaranteed to be non-negative.
+ * Returns 0 when the timer has expired otherwise the remaining time in
+ * jiffies will be returned.  In all cases the return value is guaranteed
+ * to be non-negative.
  */
 signed long __sched schedule_timeout(signed long timeout)
 {
@@ -1573,56 +1616,82 @@
 }
 EXPORT_SYMBOL(schedule_timeout_uninterruptible);
 
-#ifdef CONFIG_HOTPLUG_CPU
-static void migrate_timer_list(struct tvec_base *new_base, struct hlist_head *head)
+#if defined(CONFIG_HOTPLUG_CPU)
+static void migrate_timer_list(struct tvec_base *new_base,
+			       struct hlist_head *head, bool remove_pinned)
 {
 	struct timer_list *timer;
 	int cpu = new_base->cpu;
+	struct hlist_node *n;
+	int is_pinned;
 
-	while (!hlist_empty(head)) {
-		timer = hlist_entry(head->first, struct timer_list, entry);
-		/* We ignore the accounting on the dying cpu */
-		detach_timer(timer, false);
+	hlist_for_each_entry_safe(timer, n, head, entry) {
+		is_pinned = timer->flags & TIMER_PINNED_ON_CPU;
+		if (!remove_pinned && is_pinned)
+			continue;
+
+		detach_if_pending(timer, get_timer_base(timer->flags), false);
 		timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
 		internal_add_timer(new_base, timer);
 	}
 }
 
-static void migrate_timers(int cpu)
+static void __migrate_timers(int cpu, bool remove_pinned)
 {
 	struct tvec_base *old_base;
 	struct tvec_base *new_base;
+	unsigned long flags;
 	int i;
 
-	BUG_ON(cpu_online(cpu));
 	old_base = per_cpu_ptr(&tvec_bases, cpu);
 	new_base = get_cpu_ptr(&tvec_bases);
 	/*
 	 * The caller is globally serialized and nobody else
 	 * takes two locks at once, deadlock is not possible.
 	 */
-	spin_lock_irq(&new_base->lock);
+	spin_lock_irqsave(&new_base->lock, flags);
 	spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
 
+	/*
+	 * If we're in the hotplug path, kill the system if there's a running
+	 * timer. It's ok to have a running timer in the isolation case - the
+	 * currently running or just expired timers are off of the timer wheel
+	 * and so everything else can be migrated off.
+	 */
+	if (!cpu_online(cpu))
 	BUG_ON(old_base->running_timer);
 
 	for (i = 0; i < TVR_SIZE; i++)
-		migrate_timer_list(new_base, old_base->tv1.vec + i);
+		migrate_timer_list(new_base, old_base->tv1.vec + i,
+				   remove_pinned);
 	for (i = 0; i < TVN_SIZE; i++) {
-		migrate_timer_list(new_base, old_base->tv2.vec + i);
-		migrate_timer_list(new_base, old_base->tv3.vec + i);
-		migrate_timer_list(new_base, old_base->tv4.vec + i);
-		migrate_timer_list(new_base, old_base->tv5.vec + i);
+		migrate_timer_list(new_base, old_base->tv2.vec + i,
+				remove_pinned);
+		migrate_timer_list(new_base, old_base->tv3.vec + i,
+				remove_pinned);
+		migrate_timer_list(new_base, old_base->tv4.vec + i,
+				remove_pinned);
+		migrate_timer_list(new_base, old_base->tv5.vec + i,
+				remove_pinned);
 	}
 
-	old_base->active_timers = 0;
-	old_base->all_timers = 0;
-
 	spin_unlock(&old_base->lock);
-	spin_unlock_irq(&new_base->lock);
+	spin_unlock_irqrestore(&new_base->lock, flags);
 	put_cpu_ptr(&tvec_bases);
 }
 
+/* Migrate timers from 'cpu' to this_cpu */
+static void migrate_timers(int cpu)
+{
+	BUG_ON(cpu_online(cpu));
+	__migrate_timers(cpu, true);
+}
+
+void timer_quiesce_cpu(void *cpup)
+{
+	__migrate_timers(*(int *)cpup, false);
+}
+
 static int timer_cpu_notify(struct notifier_block *self,
 				unsigned long action, void *hcpu)
 {
@@ -1663,12 +1732,13 @@
 
 	for_each_possible_cpu(cpu)
 		init_timer_cpu(cpu);
+
+	init_timer_deferrable_global();
 }
 
 void __init init_timers(void)
 {
 	init_timer_cpus();
-	init_timer_stats();
 	timer_register_cpu_notifier();
 	open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
 }
@@ -1702,16 +1772,6 @@
 
 EXPORT_SYMBOL(msleep_interruptible);
 
-static void __sched do_usleep_range(unsigned long min, unsigned long max)
-{
-	ktime_t kmin;
-	unsigned long delta;
-
-	kmin = ktime_set(0, min * NSEC_PER_USEC);
-	delta = (max - min) * NSEC_PER_USEC;
-	schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
-}
-
 /**
  * usleep_range - Drop in replacement for udelay where wakeup is flexible
  * @min: Minimum time in usecs to sleep
@@ -1719,7 +1779,14 @@
  */
 void __sched usleep_range(unsigned long min, unsigned long max)
 {
+	ktime_t exp = ktime_add_us(ktime_get(), min);
+	u64 delta = (u64)(max - min) * NSEC_PER_USEC;
+
+	for (;;) {
 	__set_current_state(TASK_UNINTERRUPTIBLE);
-	do_usleep_range(min, max);
+		/* Do not return before the requested sleep time has elapsed */
+		if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS))
+			break;
+	}
 }
 EXPORT_SYMBOL(usleep_range);
diff -ruw linux-4.4.115/kernel/time/timer_list.c linux-4.4.115-fbx/kernel/time/timer_list.c
--- linux-4.4.115/kernel/time/timer_list.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/time/timer_list.c	2019-10-29 09:26:25.645222770 +0100
@@ -62,21 +62,11 @@
 print_timer(struct seq_file *m, struct hrtimer *taddr, struct hrtimer *timer,
 	    int idx, u64 now)
 {
-#ifdef CONFIG_TIMER_STATS
-	char tmp[TASK_COMM_LEN + 1];
-#endif
 	SEQ_printf(m, " #%d: ", idx);
 	print_name_offset(m, taddr);
 	SEQ_printf(m, ", ");
 	print_name_offset(m, timer->function);
 	SEQ_printf(m, ", S:%02x", timer->state);
-#ifdef CONFIG_TIMER_STATS
-	SEQ_printf(m, ", ");
-	print_name_offset(m, timer->start_site);
-	memcpy(tmp, timer->start_comm, TASK_COMM_LEN);
-	tmp[TASK_COMM_LEN] = 0;
-	SEQ_printf(m, ", %s/%d", tmp, timer->start_pid);
-#endif
 	SEQ_printf(m, "\n");
 	SEQ_printf(m, " # expires at %Lu-%Lu nsecs [in %Ld to %Ld nsecs]\n",
 		(unsigned long long)ktime_to_ns(hrtimer_get_softexpires(timer)),
diff -ruw linux-4.4.115/kernel/trace/blktrace.c linux-4.4.115-fbx/kernel/trace/blktrace.c
--- linux-4.4.115/kernel/trace/blktrace.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/trace/blktrace.c	2019-10-29 09:26:25.645222770 +0100
@@ -199,9 +199,9 @@
  * blk_io_trace structure and places it in a per-cpu subbuffer.
  */
 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
-		     int rw, u32 what, int error, int pdu_len, void *pdu_data)
+				int rw, u32 what, int error, int pdu_len,
+				void *pdu_data, struct task_struct *tsk)
 {
-	struct task_struct *tsk = current;
 	struct ring_buffer_event *event = NULL;
 	struct ring_buffer *buffer = NULL;
 	struct blk_io_trace *t;
@@ -708,18 +708,33 @@
 			     unsigned int nr_bytes, u32 what)
 {
 	struct blk_trace *bt = q->blk_trace;
+	struct task_struct *tsk = current;
 
 	if (likely(!bt))
 		return;
 
+	/*
+	 * Use the bio context for all events except ISSUE and
+	 * COMPLETE events.
+	 *
+	 * Not all the pages in the bio are dirtied by the same task but
+	 * most likely it will be, since the sectors accessed on the device
+	 * must be adjacent.
+	 */
+	if (!((what == BLK_TA_ISSUE) || (what == BLK_TA_COMPLETE)) &&
+	    bio_has_data(rq->bio) && rq->bio->bi_io_vec &&
+	    rq->bio->bi_io_vec->bv_page &&
+	    rq->bio->bi_io_vec->bv_page->tsk_dirty)
+		tsk = rq->bio->bi_io_vec->bv_page->tsk_dirty;
+
 	if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
 		what |= BLK_TC_ACT(BLK_TC_PC);
 		__blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags,
-				what, rq->errors, rq->cmd_len, rq->cmd);
+				what, rq->errors, rq->cmd_len, rq->cmd, tsk);
 	} else  {
 		what |= BLK_TC_ACT(BLK_TC_FS);
 		__blk_add_trace(bt, blk_rq_pos(rq), nr_bytes,
-				rq->cmd_flags, what, rq->errors, 0, NULL);
+				rq->cmd_flags, what, rq->errors, 0, NULL, tsk);
 	}
 }
 
@@ -771,12 +786,22 @@
 			      u32 what, int error)
 {
 	struct blk_trace *bt = q->blk_trace;
+	struct task_struct *tsk = current;
 
 	if (likely(!bt))
 		return;
 
+	/*
+	 * Not all the pages in the bio are dirtied by the same task but
+	 * most likely it will be, since the sectors accessed on the device
+	 * must be adjacent.
+	 */
+	if (bio_has_data(bio) && bio->bi_io_vec && bio->bi_io_vec->bv_page &&
+	    bio->bi_io_vec->bv_page->tsk_dirty)
+		tsk = bio->bi_io_vec->bv_page->tsk_dirty;
+
 	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
-			bio->bi_rw, what, error, 0, NULL);
+			bio->bi_rw, what, error, 0, NULL, tsk);
 }
 
 static void blk_add_trace_bio_bounce(void *ignore,
@@ -824,7 +849,8 @@
 		struct blk_trace *bt = q->blk_trace;
 
 		if (bt)
-			__blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL);
+			__blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0,
+					NULL, current);
 	}
 }
 
@@ -840,7 +866,7 @@
 
 		if (bt)
 			__blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ,
-					0, 0, NULL);
+					0, 0, NULL, current);
 	}
 }
 
@@ -849,7 +875,8 @@
 	struct blk_trace *bt = q->blk_trace;
 
 	if (bt)
-		__blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
+		__blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL,
+				current);
 }
 
 static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
@@ -866,7 +893,8 @@
 		else
 			what = BLK_TA_UNPLUG_TIMER;
 
-		__blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
+		__blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu,
+				current);
 	}
 }
 
@@ -875,13 +903,19 @@
 				unsigned int pdu)
 {
 	struct blk_trace *bt = q->blk_trace;
+	struct task_struct *tsk = current;
 
 	if (bt) {
 		__be64 rpdu = cpu_to_be64(pdu);
 
+		if (bio_has_data(bio) && bio->bi_io_vec &&
+		    bio->bi_io_vec->bv_page &&
+		    bio->bi_io_vec->bv_page->tsk_dirty)
+			tsk = bio->bi_io_vec->bv_page->tsk_dirty;
+
 		__blk_add_trace(bt, bio->bi_iter.bi_sector,
 				bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_SPLIT,
-				bio->bi_error, sizeof(rpdu), &rpdu);
+				bio->bi_error, sizeof(rpdu), &rpdu, tsk);
 	}
 }
 
@@ -904,6 +938,7 @@
 {
 	struct blk_trace *bt = q->blk_trace;
 	struct blk_io_trace_remap r;
+	struct task_struct *tsk = current;
 
 	if (likely(!bt))
 		return;
@@ -912,9 +947,14 @@
 	r.device_to   = cpu_to_be32(bio->bi_bdev->bd_dev);
 	r.sector_from = cpu_to_be64(from);
 
+	if (bio_has_data(bio) && bio->bi_io_vec &&
+	    bio->bi_io_vec->bv_page &&
+	    bio->bi_io_vec->bv_page->tsk_dirty)
+		tsk = bio->bi_io_vec->bv_page->tsk_dirty;
+
 	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
 			bio->bi_rw, BLK_TA_REMAP, bio->bi_error,
-			sizeof(r), &r);
+			sizeof(r), &r, tsk);
 }
 
 /**
@@ -937,6 +977,7 @@
 {
 	struct blk_trace *bt = q->blk_trace;
 	struct blk_io_trace_remap r;
+	struct task_struct *tsk = current;
 
 	if (likely(!bt))
 		return;
@@ -945,9 +986,14 @@
 	r.device_to   = cpu_to_be32(disk_devt(rq->rq_disk));
 	r.sector_from = cpu_to_be64(from);
 
+	if (bio_has_data(rq->bio) && rq->bio->bi_io_vec &&
+	    rq->bio->bi_io_vec->bv_page &&
+	    rq->bio->bi_io_vec->bv_page->tsk_dirty)
+		tsk = rq->bio->bi_io_vec->bv_page->tsk_dirty;
+
 	__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
 			rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors,
-			sizeof(r), &r);
+			sizeof(r), &r, tsk);
 }
 
 /**
@@ -966,16 +1012,22 @@
 			 void *data, size_t len)
 {
 	struct blk_trace *bt = q->blk_trace;
+	struct task_struct *tsk = current;
 
 	if (likely(!bt))
 		return;
 
+	if (bio_has_data(rq->bio) && rq->bio->bi_io_vec &&
+	    rq->bio->bi_io_vec->bv_page &&
+	    rq->bio->bi_io_vec->bv_page->tsk_dirty)
+		tsk = rq->bio->bi_io_vec->bv_page->tsk_dirty;
+
 	if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
 		__blk_add_trace(bt, 0, blk_rq_bytes(rq), 0,
-				BLK_TA_DRV_DATA, rq->errors, len, data);
+				BLK_TA_DRV_DATA, rq->errors, len, data, tsk);
 	else
 		__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0,
-				BLK_TA_DRV_DATA, rq->errors, len, data);
+				BLK_TA_DRV_DATA, rq->errors, len, data, tsk);
 }
 EXPORT_SYMBOL_GPL(blk_add_driver_data);
 
diff -ruw linux-4.4.115/kernel/trace/Kconfig linux-4.4.115-fbx/kernel/trace/Kconfig
--- linux-4.4.115/kernel/trace/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/trace/Kconfig	2019-01-22 16:16:28.727293713 +0100
@@ -77,6 +77,9 @@
 	select CONTEXT_SWITCH_TRACER
 	bool
 
+config GPU_TRACEPOINTS
+	bool
+
 config CONTEXT_SWITCH_TRACER
 	bool
 
@@ -86,6 +89,31 @@
 	 Allow the use of ring_buffer_swap_cpu.
 	 Adds a very slight overhead to tracing when enabled.
 
+config IPC_LOGGING
+	bool "Debug Logging for IPC Drivers"
+	select GENERIC_TRACER
+	help
+	  This option allows the debug logging for IPC Drivers.
+
+	  If in doubt, say no.
+
+config QCOM_RTB
+	bool "Register tracing"
+	help
+	  Add support for logging different events to a small uncached
+	  region. This is designed to aid in debugging reset cases where the
+	  caches may not be flushed before the target resets.
+
+config QCOM_RTB_SEPARATE_CPUS
+	bool "Separate entries for each cpu"
+	depends on QCOM_RTB
+	depends on SMP
+	help
+	  Under some circumstances, it may be beneficial to give dedicated space
+	  for each cpu to log accesses. Selecting this option will log each cpu
+	  separately. This will guarantee that the last acesses for each cpu
+	  will be logged but there will be fewer entries per cpu
+
 # All tracer options should select GENERIC_TRACER. For those options that are
 # enabled by all tracers (context switch and event tracer) they select TRACING.
 # This allows those options to appear when no other tracer is selected. But the
@@ -162,6 +190,17 @@
 	  address on the current task structure into a stack of calls.
 
 
+config PREEMPTIRQ_EVENTS
+	bool "Enable trace events for preempt and irq disable/enable"
+	select TRACE_IRQFLAGS
+	depends on DEBUG_PREEMPT || !PROVE_LOCKING
+	default n
+	help
+	  Enable tracing of disable and enable events for preemption and irqs.
+	  For tracing preempt disable/enable events, DEBUG_PREEMPT must be
+	  enabled. For tracing irq disable/enable events, PROVE_LOCKING must
+	  be disabled.
+
 config IRQSOFF_TRACER
 	bool "Interrupts-off Latency Tracer"
 	default n
@@ -485,6 +524,19 @@
 
 	  If in doubt, say N.
 
+config CPU_FREQ_SWITCH_PROFILER
+	bool "CPU frequency switch time profiler"
+	select GENERIC_TRACER
+	help
+	  This option enables the CPU frequency switch profiler. A file is
+	  created in debugfs called "cpu_freq_switch_profile_enabled", which
+	  defaults to zero. When a 1 is echoed into this file, profiling begins.
+	  When a zero is echoed, profiling stops. A "cpu_freq_switch" file is
+	  also created in the trace_stats directory; this file shows the
+	  switches that have occurred and duration statistics.
+
+	  If in doubt, say N.
+
 config FTRACE_MCOUNT_RECORD
 	def_bool y
 	depends on DYNAMIC_FTRACE
diff -ruw linux-4.4.115/kernel/trace/Makefile linux-4.4.115-fbx/kernel/trace/Makefile
--- linux-4.4.115/kernel/trace/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/trace/Makefile	2019-01-22 16:16:28.727293713 +0100
@@ -37,9 +37,11 @@
 obj-$(CONFIG_TRACING) += trace_printk.o
 obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
 obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
+obj-$(CONFIG_PREEMPTIRQ_EVENTS) += trace_irqsoff.o
 obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
 obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
 obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
+obj-$(CONFIG_CPU_FREQ_SWITCH_PROFILER) += trace_cpu_freq_switch.o
 obj-$(CONFIG_NOP_TRACER) += trace_nop.o
 obj-$(CONFIG_STACK_TRACER) += trace_stack.o
 obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
@@ -68,7 +70,13 @@
 endif
 obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o
 obj-$(CONFIG_UPROBE_EVENT) += trace_uprobe.o
+obj-$(CONFIG_GPU_TRACEPOINTS) += gpu-traces.o
+obj-$(CONFIG_QCOM_RTB) += msm_rtb.o
 
 obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o
+obj-$(CONFIG_IPC_LOGGING) += ipc_logging.o
+ifdef CONFIG_DEBUG_FS
+obj-$(CONFIG_IPC_LOGGING) += ipc_logging_debug.o
+endif
 
 libftrace-y := ftrace.o
diff -ruw linux-4.4.115/kernel/trace/power-traces.c linux-4.4.115-fbx/kernel/trace/power-traces.c
--- linux-4.4.115/kernel/trace/power-traces.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/trace/power-traces.c	2019-01-22 16:16:28.731293749 +0100
@@ -15,4 +15,3 @@
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(suspend_resume);
 EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_idle);
-
diff -ruw linux-4.4.115/kernel/trace/trace.c linux-4.4.115-fbx/kernel/trace/trace.c
--- linux-4.4.115/kernel/trace/trace.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/trace/trace.c	2019-10-29 09:26:25.653222848 +0100
@@ -41,6 +41,7 @@
 #include <linux/nmi.h>
 #include <linux/fs.h>
 #include <linux/sched/rt.h>
+#include <linux/coresight-stm.h>
 
 #include "trace.h"
 #include "trace_output.h"
@@ -573,8 +574,11 @@
 	if (entry->buf[size - 1] != '\n') {
 		entry->buf[size] = '\n';
 		entry->buf[size + 1] = '\0';
-	} else
+		stm_log(OST_ENTITY_TRACE_PRINTK, entry->buf, size + 2);
+	} else {
 		entry->buf[size] = '\0';
+		stm_log(OST_ENTITY_TRACE_PRINTK, entry->buf, size + 1);
+	}
 
 	__buffer_unlock_commit(buffer, event);
 	ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
@@ -615,6 +619,7 @@
 	entry = ring_buffer_event_data(event);
 	entry->ip			= ip;
 	entry->str			= str;
+	stm_log(OST_ENTITY_TRACE_PRINTK, entry->str, strlen(entry->str)+1);
 
 	__buffer_unlock_commit(buffer, event);
 	ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
@@ -890,6 +895,7 @@
 	{ trace_clock,			"perf",		1 },
 	{ ktime_get_mono_fast_ns,	"mono",		1 },
 	{ ktime_get_raw_fast_ns,	"mono_raw",	1 },
+	{ ktime_get_boot_fast_ns,	"boot",		1 },
 	ARCH_TRACE_CLOCKS
 };
 
@@ -1356,6 +1362,7 @@
 struct saved_cmdlines_buffer {
 	unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
 	unsigned *map_cmdline_to_pid;
+	unsigned *map_cmdline_to_tgid;
 	unsigned cmdline_num;
 	int cmdline_idx;
 	char *saved_cmdlines;
@@ -1389,12 +1396,23 @@
 		return -ENOMEM;
 	}
 
+	s->map_cmdline_to_tgid = kmalloc_array(val,
+					       sizeof(*s->map_cmdline_to_tgid),
+					       GFP_KERNEL);
+	if (!s->map_cmdline_to_tgid) {
+		kfree(s->map_cmdline_to_pid);
+		kfree(s->saved_cmdlines);
+		return -ENOMEM;
+	}
+
 	s->cmdline_idx = 0;
 	s->cmdline_num = val;
 	memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
 	       sizeof(s->map_pid_to_cmdline));
 	memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
 	       val * sizeof(*s->map_cmdline_to_pid));
+	memset(s->map_cmdline_to_tgid, NO_CMDLINE_MAP,
+	       val * sizeof(*s->map_cmdline_to_tgid));
 
 	return 0;
 }
@@ -1560,14 +1578,17 @@
 	if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
 		return 0;
 
+	preempt_disable();
 	/*
 	 * It's not the end of the world if we don't get
 	 * the lock, but we also don't want to spin
 	 * nor do we want to disable interrupts,
 	 * so if we miss here, then better luck next time.
 	 */
-	if (!arch_spin_trylock(&trace_cmdline_lock))
+	if (!arch_spin_trylock(&trace_cmdline_lock)) {
+		preempt_enable();
 		return 0;
+	}
 
 	idx = savedcmd->map_pid_to_cmdline[tsk->pid];
 	if (idx == NO_CMDLINE_MAP) {
@@ -1590,8 +1611,9 @@
 	}
 
 	set_cmdline(idx, tsk->comm);
-
+	savedcmd->map_cmdline_to_tgid[idx] = tsk->tgid;
 	arch_spin_unlock(&trace_cmdline_lock);
+	preempt_enable();
 
 	return 1;
 }
@@ -1617,7 +1639,7 @@
 
 	map = savedcmd->map_pid_to_cmdline[pid];
 	if (map != NO_CMDLINE_MAP)
-		strcpy(comm, get_saved_cmdlines(map));
+		strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN - 1);
 	else
 		strcpy(comm, "<...>");
 }
@@ -1633,6 +1655,35 @@
 	preempt_enable();
 }
 
+static int __find_tgid_locked(int pid)
+{
+	unsigned map;
+	int tgid;
+
+	map = savedcmd->map_pid_to_cmdline[pid];
+	if (map != NO_CMDLINE_MAP)
+		tgid = savedcmd->map_cmdline_to_tgid[map];
+	else
+		tgid = -1;
+
+	return tgid;
+}
+
+int trace_find_tgid(int pid)
+{
+	int tgid;
+
+	preempt_disable();
+	arch_spin_lock(&trace_cmdline_lock);
+
+	tgid = __find_tgid_locked(pid);
+
+	arch_spin_unlock(&trace_cmdline_lock);
+	preempt_enable();
+
+	return tgid;
+}
+
 void tracing_record_cmdline(struct task_struct *tsk)
 {
 	if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
@@ -2220,6 +2271,7 @@
 
 	memcpy(&entry->buf, tbuffer, len + 1);
 	if (!call_filter_check_discard(call, entry, buffer, event)) {
+		stm_log(OST_ENTITY_TRACE_PRINTK, entry->buf, len + 1);
 		__buffer_unlock_commit(buffer, event);
 		ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
 	}
@@ -2583,6 +2635,13 @@
 		    "#              | |       |          |         |\n");
 }
 
+static void print_func_help_header_tgid(struct trace_buffer *buf, struct seq_file *m)
+{
+	print_event_info(buf, m);
+	seq_puts(m, "#           TASK-PID    TGID   CPU#      TIMESTAMP  FUNCTION\n");
+	seq_puts(m, "#              | |        |      |          |         |\n");
+}
+
 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
 {
 	print_event_info(buf, m);
@@ -2595,6 +2654,18 @@
 		    "#              | |       |   ||||       |         |\n");
 }
 
+static void print_func_help_header_irq_tgid(struct trace_buffer *buf, struct seq_file *m)
+{
+	print_event_info(buf, m);
+	seq_puts(m, "#                                      _-----=> irqs-off\n");
+	seq_puts(m, "#                                     / _----=> need-resched\n");
+	seq_puts(m, "#                                    | / _---=> hardirq/softirq\n");
+	seq_puts(m, "#                                    || / _--=> preempt-depth\n");
+	seq_puts(m, "#                                    ||| /     delay\n");
+	seq_puts(m, "#           TASK-PID    TGID   CPU#  ||||    TIMESTAMP  FUNCTION\n");
+	seq_puts(m, "#              | |        |      |   ||||       |         |\n");
+}
+
 void
 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
 {
@@ -2907,8 +2978,14 @@
 	} else {
 		if (!(trace_flags & TRACE_ITER_VERBOSE)) {
 			if (trace_flags & TRACE_ITER_IRQ_INFO)
+				if (trace_flags & TRACE_ITER_TGID)
+					print_func_help_header_irq_tgid(iter->trace_buffer, m);
+				else
 				print_func_help_header_irq(iter->trace_buffer, m);
 			else
+				if (trace_flags & TRACE_ITER_TGID)
+					print_func_help_header_tgid(iter->trace_buffer, m);
+				else
 				print_func_help_header(iter->trace_buffer, m);
 		}
 	}
@@ -3912,10 +3989,15 @@
 {
 	char buf[64];
 	int r;
+	unsigned int n;
 
+	preempt_disable();
 	arch_spin_lock(&trace_cmdline_lock);
-	r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
+	n = savedcmd->cmdline_num;
 	arch_spin_unlock(&trace_cmdline_lock);
+	preempt_enable();
+
+	r = scnprintf(buf, sizeof(buf), "%u\n", n);
 
 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 }
@@ -3924,6 +4006,7 @@
 {
 	kfree(s->saved_cmdlines);
 	kfree(s->map_cmdline_to_pid);
+	kfree(s->map_cmdline_to_tgid);
 	kfree(s);
 }
 
@@ -3940,10 +4023,12 @@
 		return -ENOMEM;
 	}
 
+	preempt_disable();
 	arch_spin_lock(&trace_cmdline_lock);
 	savedcmd_temp = savedcmd;
 	savedcmd = s;
 	arch_spin_unlock(&trace_cmdline_lock);
+	preempt_enable();
 	free_saved_cmdlines_buffer(savedcmd_temp);
 
 	return 0;
@@ -4156,6 +4241,78 @@
 }
 
 static ssize_t
+tracing_saved_tgids_read(struct file *file, char __user *ubuf,
+				size_t cnt, loff_t *ppos)
+{
+	char *file_buf;
+	char *buf;
+	int len = 0;
+	int i;
+	int *pids;
+	int n = 0;
+
+	preempt_disable();
+	arch_spin_lock(&trace_cmdline_lock);
+
+	pids = kmalloc_array(savedcmd->cmdline_num, 2*sizeof(int), GFP_KERNEL);
+	if (!pids) {
+		arch_spin_unlock(&trace_cmdline_lock);
+		preempt_enable();
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < savedcmd->cmdline_num; i++) {
+		int pid;
+
+		pid = savedcmd->map_cmdline_to_pid[i];
+		if (pid == -1 || pid == NO_CMDLINE_MAP)
+			continue;
+
+		pids[n] = pid;
+		pids[n+1] = __find_tgid_locked(pid);
+		n += 2;
+	}
+	arch_spin_unlock(&trace_cmdline_lock);
+	preempt_enable();
+
+	if (n == 0) {
+		kfree(pids);
+		return 0;
+	}
+
+	/* enough to hold max pair of pids + space, lr and nul */
+	len = n * 12;
+	file_buf = kmalloc(len, GFP_KERNEL);
+	if (!file_buf) {
+		kfree(pids);
+		return -ENOMEM;
+	}
+
+	buf = file_buf;
+	for (i = 0; i < n && len > 0; i += 2) {
+		int r;
+
+		r = snprintf(buf, len, "%d %d\n", pids[i], pids[i+1]);
+		buf += r;
+		len -= r;
+	}
+
+	len = simple_read_from_buffer(ubuf, cnt, ppos,
+				      file_buf, buf - file_buf);
+
+	kfree(file_buf);
+	kfree(pids);
+
+	return len;
+}
+
+static const struct file_operations tracing_saved_tgids_fops = {
+	.open	= tracing_open_generic,
+	.read	= tracing_saved_tgids_read,
+	.llseek	= generic_file_llseek,
+};
+
+static ssize_t
 tracing_set_trace_read(struct file *filp, char __user *ubuf,
 		       size_t cnt, loff_t *ppos)
 {
@@ -5175,8 +5332,11 @@
 	if (entry->buf[cnt - 1] != '\n') {
 		entry->buf[cnt] = '\n';
 		entry->buf[cnt + 1] = '\0';
-	} else
+		stm_log(OST_ENTITY_TRACE_MARKER, entry->buf, cnt + 2);
+	} else {
 		entry->buf[cnt] = '\0';
+		stm_log(OST_ENTITY_TRACE_MARKER, entry->buf, cnt + 1);
+	}
 
 	__buffer_unlock_commit(buffer, event);
 
@@ -6783,6 +6943,9 @@
 	trace_create_file("trace_marker", 0220, d_tracer,
 			  tr, &tracing_mark_fops);
 
+	trace_create_file("saved_tgids", 0444, d_tracer,
+			  tr, &tracing_saved_tgids_fops);
+
 	trace_create_file("trace_clock", 0644, d_tracer, tr,
 			  &trace_clock_fops);
 
diff -ruw linux-4.4.115/kernel/trace/trace_event_perf.c linux-4.4.115-fbx/kernel/trace/trace_event_perf.c
--- linux-4.4.115/kernel/trace/trace_event_perf.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/trace/trace_event_perf.c	2019-01-22 16:16:28.735293785 +0100
@@ -256,6 +256,7 @@
 void perf_trace_del(struct perf_event *p_event, int flags)
 {
 	struct trace_event_call *tp_event = p_event->tp_event;
+	if (!hlist_unhashed(&p_event->hlist_entry))
 	hlist_del_rcu(&p_event->hlist_entry);
 	tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
 }
diff -ruw linux-4.4.115/kernel/trace/trace_events.c linux-4.4.115-fbx/kernel/trace/trace_events.c
--- linux-4.4.115/kernel/trace/trace_events.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/trace/trace_events.c	2019-10-29 09:26:25.657222887 +0100
@@ -287,14 +287,15 @@
 	spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
 }
 
-void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
+void trace_event_buffer_commit(struct trace_event_buffer *fbuffer,
+			       unsigned long len)
 {
 	if (tracepoint_printk)
 		output_printk(fbuffer);
 
 	event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
 				    fbuffer->event, fbuffer->entry,
-				    fbuffer->flags, fbuffer->pc);
+				    fbuffer->flags, fbuffer->pc, len);
 }
 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
 
diff -ruw linux-4.4.115/kernel/trace/trace.h linux-4.4.115-fbx/kernel/trace/trace.h
--- linux-4.4.115/kernel/trace/trace.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/trace/trace.h	2019-10-29 09:26:25.653222848 +0100
@@ -656,6 +656,7 @@
 extern cycle_t ftrace_now(int cpu);
 
 extern void trace_find_cmdline(int pid, char comm[]);
+extern int trace_find_tgid(int pid);
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 extern unsigned long ftrace_update_tot_cnt;
@@ -970,7 +971,8 @@
 		FUNCTION_FLAGS					\
 		FGRAPH_FLAGS					\
 		STACK_FLAGS					\
-		BRANCH_FLAGS
+		BRANCH_FLAGS					\
+		C(TGID,			"print-tgid"),
 
 /*
  * By defining C, we can make TRACE_FLAGS a list of bit names
diff -ruw linux-4.4.115/kernel/trace/trace_output.c linux-4.4.115-fbx/kernel/trace/trace_output.c
--- linux-4.4.115/kernel/trace/trace_output.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/trace/trace_output.c	2019-01-22 16:16:28.739293822 +0100
@@ -526,11 +526,21 @@
 	unsigned long long t;
 	unsigned long secs, usec_rem;
 	char comm[TASK_COMM_LEN];
+	int tgid;
 
 	trace_find_cmdline(entry->pid, comm);
 
-	trace_seq_printf(s, "%16s-%-5d [%03d] ",
-			       comm, entry->pid, iter->cpu);
+	trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
+
+	if (tr->trace_flags & TRACE_ITER_TGID) {
+		tgid = trace_find_tgid(entry->pid);
+		if (tgid < 0)
+			trace_seq_puts(s, "(-----) ");
+		else
+			trace_seq_printf(s, "(%5d) ", tgid);
+	}
+
+	trace_seq_printf(s, "[%03d] ", iter->cpu);
 
 	if (tr->trace_flags & TRACE_ITER_IRQ_INFO)
 		trace_print_lat_fmt(s, entry);
@@ -845,6 +855,174 @@
 	.funcs		= &trace_fn_funcs,
 };
 
+/* TRACE_GRAPH_ENT */
+static enum print_line_t trace_graph_ent_trace(struct trace_iterator *iter, int flags,
+					struct trace_event *event)
+{
+	struct trace_seq *s = &iter->seq;
+	struct ftrace_graph_ent_entry *field;
+
+	trace_assign_type(field, iter->ent);
+
+	trace_seq_puts(s, "graph_ent: func=");
+	if (trace_seq_has_overflowed(s))
+		return TRACE_TYPE_PARTIAL_LINE;
+
+	if (!seq_print_ip_sym(s, field->graph_ent.func, flags))
+		return TRACE_TYPE_PARTIAL_LINE;
+
+	trace_seq_puts(s, "\n");
+	if (trace_seq_has_overflowed(s))
+		return TRACE_TYPE_PARTIAL_LINE;
+
+	return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_graph_ent_raw(struct trace_iterator *iter, int flags,
+				      struct trace_event *event)
+{
+	struct ftrace_graph_ent_entry *field;
+
+	trace_assign_type(field, iter->ent);
+
+	trace_seq_printf(&iter->seq, "%lx %d\n",
+			      field->graph_ent.func,
+			      field->graph_ent.depth);
+	if (trace_seq_has_overflowed(&iter->seq))
+		return TRACE_TYPE_PARTIAL_LINE;
+
+	return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_graph_ent_hex(struct trace_iterator *iter, int flags,
+				      struct trace_event *event)
+{
+	struct ftrace_graph_ent_entry *field;
+	struct trace_seq *s = &iter->seq;
+
+	trace_assign_type(field, iter->ent);
+
+	SEQ_PUT_HEX_FIELD(s, field->graph_ent.func);
+	SEQ_PUT_HEX_FIELD(s, field->graph_ent.depth);
+
+	return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_graph_ent_bin(struct trace_iterator *iter, int flags,
+				      struct trace_event *event)
+{
+	struct ftrace_graph_ent_entry *field;
+	struct trace_seq *s = &iter->seq;
+
+	trace_assign_type(field, iter->ent);
+
+	SEQ_PUT_FIELD(s, field->graph_ent.func);
+	SEQ_PUT_FIELD(s, field->graph_ent.depth);
+
+	return TRACE_TYPE_HANDLED;
+}
+
+static struct trace_event_functions trace_graph_ent_funcs = {
+	.trace		= trace_graph_ent_trace,
+	.raw		= trace_graph_ent_raw,
+	.hex		= trace_graph_ent_hex,
+	.binary		= trace_graph_ent_bin,
+};
+
+static struct trace_event trace_graph_ent_event = {
+	.type		= TRACE_GRAPH_ENT,
+	.funcs		= &trace_graph_ent_funcs,
+};
+
+/* TRACE_GRAPH_RET */
+static enum print_line_t trace_graph_ret_trace(struct trace_iterator *iter, int flags,
+					struct trace_event *event)
+{
+	struct trace_seq *s = &iter->seq;
+	struct trace_entry *entry = iter->ent;
+	struct ftrace_graph_ret_entry *field;
+
+	trace_assign_type(field, entry);
+
+	trace_seq_puts(s, "graph_ret: func=");
+	if (trace_seq_has_overflowed(s))
+		return TRACE_TYPE_PARTIAL_LINE;
+
+	if (!seq_print_ip_sym(s, field->ret.func, flags))
+		return TRACE_TYPE_PARTIAL_LINE;
+
+	trace_seq_puts(s, "\n");
+	if (trace_seq_has_overflowed(s))
+		return TRACE_TYPE_PARTIAL_LINE;
+
+	return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_graph_ret_raw(struct trace_iterator *iter, int flags,
+				      struct trace_event *event)
+{
+	struct ftrace_graph_ret_entry *field;
+
+	trace_assign_type(field, iter->ent);
+
+	trace_seq_printf(&iter->seq, "%lx %lld %lld %ld %d\n",
+			      field->ret.func,
+			      field->ret.calltime,
+			      field->ret.rettime,
+			      field->ret.overrun,
+			      field->ret.depth);
+	if (trace_seq_has_overflowed(&iter->seq))
+		return TRACE_TYPE_PARTIAL_LINE;
+
+	return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_graph_ret_hex(struct trace_iterator *iter, int flags,
+				      struct trace_event *event)
+{
+	struct ftrace_graph_ret_entry *field;
+	struct trace_seq *s = &iter->seq;
+
+	trace_assign_type(field, iter->ent);
+
+	SEQ_PUT_HEX_FIELD(s, field->ret.func);
+	SEQ_PUT_HEX_FIELD(s, field->ret.calltime);
+	SEQ_PUT_HEX_FIELD(s, field->ret.rettime);
+	SEQ_PUT_HEX_FIELD(s, field->ret.overrun);
+	SEQ_PUT_HEX_FIELD(s, field->ret.depth);
+
+	return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_graph_ret_bin(struct trace_iterator *iter, int flags,
+				      struct trace_event *event)
+{
+	struct ftrace_graph_ret_entry *field;
+	struct trace_seq *s = &iter->seq;
+
+	trace_assign_type(field, iter->ent);
+
+	SEQ_PUT_FIELD(s, field->ret.func);
+	SEQ_PUT_FIELD(s, field->ret.calltime);
+	SEQ_PUT_FIELD(s, field->ret.rettime);
+	SEQ_PUT_FIELD(s, field->ret.overrun);
+	SEQ_PUT_FIELD(s, field->ret.depth);
+
+	return TRACE_TYPE_HANDLED;
+}
+
+static struct trace_event_functions trace_graph_ret_funcs = {
+	.trace		= trace_graph_ret_trace,
+	.raw		= trace_graph_ret_raw,
+	.hex		= trace_graph_ret_hex,
+	.binary		= trace_graph_ret_bin,
+};
+
+static struct trace_event trace_graph_ret_event = {
+	.type		= TRACE_GRAPH_RET,
+	.funcs		= &trace_graph_ret_funcs,
+};
+
 /* TRACE_CTX an TRACE_WAKE */
 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
 					     char *delim)
@@ -1222,6 +1400,8 @@
 
 static struct trace_event *events[] __initdata = {
 	&trace_fn_event,
+	&trace_graph_ent_event,
+	&trace_graph_ret_event,
 	&trace_ctx_event,
 	&trace_wake_event,
 	&trace_stack_event,
diff -ruw linux-4.4.115/kernel/trace/trace_printk.c linux-4.4.115-fbx/kernel/trace/trace_printk.c
--- linux-4.4.115/kernel/trace/trace_printk.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/trace/trace_printk.c	2019-01-22 16:16:28.739293822 +0100
@@ -304,7 +304,7 @@
 	if (!*fmt)
 		return 0;
 
-	seq_printf(m, "0x%lx : \"", *(unsigned long *)fmt);
+	seq_printf(m, "0x%lx : \"", 0L);
 
 	/*
 	 * Tabs and new lines need to be converted.
diff -ruw linux-4.4.115/kernel/watchdog.c linux-4.4.115-fbx/kernel/watchdog.c
--- linux-4.4.115/kernel/watchdog.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/watchdog.c	2019-01-22 16:16:28.747293894 +0100
@@ -13,6 +13,7 @@
 
 #include <linux/mm.h>
 #include <linux/cpu.h>
+#include <linux/device.h>
 #include <linux/nmi.h>
 #include <linux/init.h>
 #include <linux/module.h>
@@ -20,6 +21,7 @@
 #include <linux/smpboot.h>
 #include <linux/sched/rt.h>
 #include <linux/tick.h>
+#include <linux/workqueue.h>
 
 #include <asm/irq_regs.h>
 #include <linux/kvm_para.h>
@@ -94,6 +96,7 @@
 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
+static DEFINE_PER_CPU(unsigned int, watchdog_en);
 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
 static DEFINE_PER_CPU(bool, soft_watchdog_warn);
 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
@@ -103,6 +106,11 @@
 static DEFINE_PER_CPU(bool, hard_watchdog_warn);
 static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
+#endif
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
+static cpumask_t __read_mostly watchdog_cpus;
+#endif
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
 #endif
 static unsigned long soft_lockup_nmi_warn;
@@ -114,7 +122,7 @@
 #ifdef CONFIG_HARDLOCKUP_DETECTOR
 unsigned int __read_mostly hardlockup_panic =
 			CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
-static unsigned long hardlockup_allcpu_dumped;
+static unsigned long __maybe_unused hardlockup_allcpu_dumped;
 /*
  * We may not want to enable hard lockup detection by default in all cases,
  * for example when running the kernel as a guest on a hypervisor. In these
@@ -225,7 +233,15 @@
 	__this_cpu_write(watchdog_touch_ts, get_timestamp());
 }
 
-void touch_softlockup_watchdog(void)
+/**
+ * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
+ *
+ * Call when the scheduler may have stalled for legitimate reasons
+ * preventing the watchdog task from executing - e.g. the scheduler
+ * entering idle state.  This should only be used for scheduler events.
+ * Use touch_softlockup_watchdog() for everything else.
+ */
+void touch_softlockup_watchdog_sched(void)
 {
 	/*
 	 * Preemption can be enabled.  It doesn't matter which CPU's timestamp
@@ -233,6 +249,12 @@
 	 */
 	raw_cpu_write(watchdog_touch_ts, 0);
 }
+
+void touch_softlockup_watchdog(void)
+{
+	touch_softlockup_watchdog_sched();
+	wq_watchdog_touch(raw_smp_processor_id());
+}
 EXPORT_SYMBOL(touch_softlockup_watchdog);
 
 void touch_all_softlockup_watchdogs(void)
@@ -246,6 +268,7 @@
 	 */
 	for_each_watchdog_cpu(cpu)
 		per_cpu(watchdog_touch_ts, cpu) = 0;
+	wq_watchdog_touch(-1);
 }
 
 #ifdef CONFIG_HARDLOCKUP_DETECTOR
@@ -271,7 +294,7 @@
 	__this_cpu_write(watchdog_touch_ts, 0);
 }
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
 /* watchdog detector functions */
 static bool is_hardlockup(void)
 {
@@ -285,6 +308,76 @@
 }
 #endif
 
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
+static unsigned int watchdog_next_cpu(unsigned int cpu)
+{
+	cpumask_t cpus = watchdog_cpus;
+	unsigned int next_cpu;
+
+	next_cpu = cpumask_next(cpu, &cpus);
+	if (next_cpu >= nr_cpu_ids)
+		next_cpu = cpumask_first(&cpus);
+
+	if (next_cpu == cpu)
+		return nr_cpu_ids;
+
+	return next_cpu;
+}
+
+static int is_hardlockup_other_cpu(unsigned int cpu)
+{
+	unsigned long hrint = per_cpu(hrtimer_interrupts, cpu);
+
+	if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint)
+		return 1;
+
+	per_cpu(hrtimer_interrupts_saved, cpu) = hrint;
+	return 0;
+}
+
+static void watchdog_check_hardlockup_other_cpu(void)
+{
+	unsigned int next_cpu;
+
+	/*
+	 * Test for hardlockups every 3 samples.  The sample period is
+	 *  watchdog_thresh * 2 / 5, so 3 samples gets us back to slightly over
+	 *  watchdog_thresh (over by 20%).
+	 */
+	if (__this_cpu_read(hrtimer_interrupts) % 3 != 0)
+		return;
+
+	/* check for a hardlockup on the next cpu */
+	next_cpu = watchdog_next_cpu(smp_processor_id());
+	if (next_cpu >= nr_cpu_ids)
+		return;
+
+	smp_rmb();
+
+	if (per_cpu(watchdog_nmi_touch, next_cpu) == true) {
+		per_cpu(watchdog_nmi_touch, next_cpu) = false;
+		return;
+	}
+
+	if (is_hardlockup_other_cpu(next_cpu)) {
+		/* only warn once */
+		if (per_cpu(hard_watchdog_warn, next_cpu) == true)
+			return;
+
+		if (hardlockup_panic)
+			panic("Watchdog detected hard LOCKUP on cpu %u", next_cpu);
+		else
+			WARN(1, "Watchdog detected hard LOCKUP on cpu %u", next_cpu);
+
+		per_cpu(hard_watchdog_warn, next_cpu) = true;
+	} else {
+		per_cpu(hard_watchdog_warn, next_cpu) = false;
+	}
+}
+#else
+static inline void watchdog_check_hardlockup_other_cpu(void) { return; }
+#endif
+
 static int is_softlockup(unsigned long touch_ts)
 {
 	unsigned long now = get_timestamp();
@@ -297,7 +390,7 @@
 	return 0;
 }
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
 
 static struct perf_event_attr wd_hw_attr = {
 	.type		= PERF_TYPE_HARDWARE,
@@ -359,7 +452,7 @@
 	__this_cpu_write(hard_watchdog_warn, false);
 	return;
 }
-#endif /* CONFIG_HARDLOCKUP_DETECTOR */
+#endif /* CONFIG_HARDLOCKUP_DETECTOR_NMI */
 
 static void watchdog_interrupt_count(void)
 {
@@ -383,6 +476,9 @@
 	/* kick the hardlockup detector */
 	watchdog_interrupt_count();
 
+	/* test for hardlockups on the next cpu */
+	watchdog_check_hardlockup_other_cpu();
+
 	/* kick the softlockup detector */
 	wake_up_process(__this_cpu_read(softlockup_watchdog));
 
@@ -489,9 +585,13 @@
 	sched_setscheduler(current, policy, &param);
 }
 
-static void watchdog_enable(unsigned int cpu)
+void watchdog_enable(unsigned int cpu)
 {
 	struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
+	unsigned int *enabled = raw_cpu_ptr(&watchdog_en);
+
+	if (*enabled)
+		return;
 
 	/* kick off the timer for the hardlockup detector */
 	hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -507,16 +607,40 @@
 	/* initialize timestamp */
 	watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
 	__touch_watchdog();
+
+	/*
+	 * Need to ensure above operations are observed by other CPUs before
+	 * indicating that timer is enabled. This is to synchronize core
+	 * isolation and hotplug. Core isolation will wait for this flag to be
+	 * set.
+	 */
+	mb();
+	*enabled = 1;
 }
 
-static void watchdog_disable(unsigned int cpu)
+void watchdog_disable(unsigned int cpu)
 {
 	struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
+	unsigned int *enabled = raw_cpu_ptr(&watchdog_en);
+
+	if (!*enabled)
+		return;
 
 	watchdog_set_prio(SCHED_NORMAL, 0);
 	hrtimer_cancel(hrtimer);
 	/* disable the perf event */
 	watchdog_nmi_disable(cpu);
+
+	/*
+	 * No need for barrier here since disabling the watchdog is
+	 * synchronized with hotplug lock
+	 */
+	*enabled = 0;
+}
+
+bool watchdog_configured(unsigned int cpu)
+{
+	return *per_cpu_ptr(&watchdog_en, cpu);
 }
 
 static void watchdog_cleanup(unsigned int cpu, bool online)
@@ -560,7 +684,7 @@
 		watchdog_nmi_disable(cpu);
 }
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
 /*
  * People like the simple clean cpu node info on boot.
  * Reduce the watchdog noise by only printing messages
@@ -659,9 +783,44 @@
 }
 
 #else
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
+static int watchdog_nmi_enable(unsigned int cpu)
+{
+	/*
+	 * The new cpu will be marked online before the first hrtimer interrupt
+	 * runs on it.  If another cpu tests for a hardlockup on the new cpu
+	 * before it has run its first hrtimer, it will get a false positive.
+	 * Touch the watchdog on the new cpu to delay the first check for at
+	 * least 3 sampling periods to guarantee one hrtimer has run on the new
+	 * cpu.
+	 */
+	per_cpu(watchdog_nmi_touch, cpu) = true;
+	smp_wmb();
+	cpumask_set_cpu(cpu, &watchdog_cpus);
+	return 0;
+}
+
+static void watchdog_nmi_disable(unsigned int cpu)
+{
+	unsigned int next_cpu = watchdog_next_cpu(cpu);
+
+	/*
+	 * Offlining this cpu will cause the cpu before this one to start
+	 * checking the one after this one.  If this cpu just finished checking
+	 * the next cpu and updating hrtimer_interrupts_saved, and then the
+	 * previous cpu checks it within one sample period, it will trigger a
+	 * false positive.  Touch the watchdog on the next cpu to prevent it.
+	 */
+	if (next_cpu < nr_cpu_ids)
+		per_cpu(watchdog_nmi_touch, next_cpu) = true;
+	smp_wmb();
+	cpumask_clear_cpu(cpu, &watchdog_cpus);
+}
+#else
 static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
 static void watchdog_nmi_disable(unsigned int cpu) { return; }
-#endif /* CONFIG_HARDLOCKUP_DETECTOR */
+#endif /* CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU */
+#endif /* CONFIG_HARDLOCKUP_DETECTOR_NMI */
 
 static struct smp_hotplug_thread watchdog_threads = {
 	.store			= &softlockup_watchdog,
diff -ruw linux-4.4.115/kernel/workqueue.c linux-4.4.115-fbx/kernel/workqueue.c
--- linux-4.4.115/kernel/workqueue.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/kernel/workqueue.c	2019-10-29 09:26:25.665222966 +0100
@@ -48,6 +48,8 @@
 #include <linux/nodemask.h>
 #include <linux/moduleparam.h>
 #include <linux/uaccess.h>
+#include <linux/bug.h>
+#include <linux/delay.h>
 
 #include "workqueue_internal.h"
 
@@ -149,6 +151,8 @@
 	int			id;		/* I: pool ID */
 	unsigned int		flags;		/* X: flags */
 
+	unsigned long		watchdog_ts;	/* L: watchdog timestamp */
+
 	struct list_head	worklist;	/* L: list of pending works */
 	int			nr_workers;	/* L: total number of workers */
 
@@ -1123,6 +1127,8 @@
 	struct pool_workqueue *pwq = get_work_pwq(work);
 
 	trace_workqueue_activate_work(work);
+	if (list_empty(&pwq->pool->worklist))
+		pwq->pool->watchdog_ts = jiffies;
 	move_linked_works(work, &pwq->pool->worklist, NULL);
 	__clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
 	pwq->nr_active++;
@@ -1281,6 +1287,12 @@
 	if (work_is_canceling(work))
 		return -ENOENT;
 	cpu_relax();
+	/*
+	 * The queueing is in progress in another context. If we keep
+	 * taking the pool->lock in a busy loop, the other context may
+	 * never get the lock. Give 1 usec delay to avoid this contention.
+	 */
+	udelay(1);
 	return -EAGAIN;
 }
 
@@ -1425,6 +1437,8 @@
 		trace_workqueue_activate_work(work);
 		pwq->nr_active++;
 		worklist = &pwq->pool->worklist;
+		if (list_empty(worklist))
+			pwq->pool->watchdog_ts = jiffies;
 	} else {
 		work_flags |= WORK_STRUCT_DELAYED;
 		worklist = &pwq->delayed_works;
@@ -1496,8 +1510,6 @@
 		return;
 	}
 
-	timer_stats_timer_set_start_info(&dwork->timer);
-
 	dwork->wq = wq;
 	dwork->cpu = cpu;
 	timer->expires = jiffies + delay;
@@ -2076,6 +2088,7 @@
 		       current->comm, preempt_count(), task_pid_nr(current),
 		       worker->current_func);
 		debug_show_held_locks(current);
+		BUG_ON(PANIC_CORRUPTION);
 		dump_stack();
 	}
 
@@ -2191,6 +2204,8 @@
 			list_first_entry(&pool->worklist,
 					 struct work_struct, entry);
 
+		pool->watchdog_ts = jiffies;
+
 		if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
 			/* optimization path, not strictly necessary */
 			process_one_work(worker, work);
@@ -2274,6 +2289,7 @@
 					struct pool_workqueue, mayday_node);
 		struct worker_pool *pool = pwq->pool;
 		struct work_struct *work, *n;
+		bool first = true;
 
 		__set_current_state(TASK_RUNNING);
 		list_del_init(&pwq->mayday_node);
@@ -2290,9 +2306,14 @@
 		 * process'em.
 		 */
 		WARN_ON_ONCE(!list_empty(scheduled));
-		list_for_each_entry_safe(work, n, &pool->worklist, entry)
-			if (get_work_pwq(work) == pwq)
+		list_for_each_entry_safe(work, n, &pool->worklist, entry) {
+			if (get_work_pwq(work) == pwq) {
+				if (first)
+					pool->watchdog_ts = jiffies;
 				move_linked_works(work, scheduled, &n);
+			}
+			first = false;
+		}
 
 		if (!list_empty(scheduled)) {
 			process_scheduled_works(rescuer);
@@ -3103,6 +3124,7 @@
 	pool->cpu = -1;
 	pool->node = NUMA_NO_NODE;
 	pool->flags |= POOL_DISASSOCIATED;
+	pool->watchdog_ts = jiffies;
 	INIT_LIST_HEAD(&pool->worklist);
 	INIT_LIST_HEAD(&pool->idle_list);
 	hash_init(pool->busy_hash);
@@ -4356,7 +4378,9 @@
 
 		pr_info("pool %d:", pool->id);
 		pr_cont_pool_info(pool);
-		pr_cont(" workers=%d", pool->nr_workers);
+		pr_cont(" hung=%us workers=%d",
+			jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
+			pool->nr_workers);
 		if (pool->manager)
 			pr_cont(" manager: %d",
 				task_pid_nr(pool->manager->task));
@@ -5226,6 +5250,154 @@
 static void workqueue_sysfs_unregister(struct workqueue_struct *wq)	{ }
 #endif	/* CONFIG_SYSFS */
 
+/*
+ * Workqueue watchdog.
+ *
+ * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal
+ * flush dependency, a concurrency managed work item which stays RUNNING
+ * indefinitely.  Workqueue stalls can be very difficult to debug as the
+ * usual warning mechanisms don't trigger and internal workqueue state is
+ * largely opaque.
+ *
+ * Workqueue watchdog monitors all worker pools periodically and dumps
+ * state if some pools failed to make forward progress for a while where
+ * forward progress is defined as the first item on ->worklist changing.
+ *
+ * This mechanism is controlled through the kernel parameter
+ * "workqueue.watchdog_thresh" which can be updated at runtime through the
+ * corresponding sysfs parameter file.
+ */
+#ifdef CONFIG_WQ_WATCHDOG
+
+static void wq_watchdog_timer_fn(unsigned long data);
+
+static unsigned long wq_watchdog_thresh = 30;
+static struct timer_list wq_watchdog_timer =
+	TIMER_DEFERRED_INITIALIZER(wq_watchdog_timer_fn, 0, 0);
+
+static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
+static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
+
+static void wq_watchdog_reset_touched(void)
+{
+	int cpu;
+
+	wq_watchdog_touched = jiffies;
+	for_each_possible_cpu(cpu)
+		per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
+}
+
+static void wq_watchdog_timer_fn(unsigned long data)
+{
+	unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
+	bool lockup_detected = false;
+	struct worker_pool *pool;
+	int pi;
+
+	if (!thresh)
+		return;
+
+	rcu_read_lock();
+
+	for_each_pool(pool, pi) {
+		unsigned long pool_ts, touched, ts;
+
+		if (list_empty(&pool->worklist))
+			continue;
+
+		/* get the latest of pool and touched timestamps */
+		pool_ts = READ_ONCE(pool->watchdog_ts);
+		touched = READ_ONCE(wq_watchdog_touched);
+
+		if (time_after(pool_ts, touched))
+			ts = pool_ts;
+		else
+			ts = touched;
+
+		if (pool->cpu >= 0) {
+			unsigned long cpu_touched =
+				READ_ONCE(per_cpu(wq_watchdog_touched_cpu,
+						  pool->cpu));
+			if (time_after(cpu_touched, ts))
+				ts = cpu_touched;
+		}
+
+		/* did we stall? */
+		if (time_after(jiffies, ts + thresh)) {
+			lockup_detected = true;
+			pr_emerg("BUG: workqueue lockup - pool");
+			pr_cont_pool_info(pool);
+			pr_cont(" stuck for %us!\n",
+				jiffies_to_msecs(jiffies - pool_ts) / 1000);
+		}
+	}
+
+	rcu_read_unlock();
+
+	if (lockup_detected)
+		show_workqueue_state();
+
+	wq_watchdog_reset_touched();
+	mod_timer(&wq_watchdog_timer, jiffies + thresh);
+}
+
+void wq_watchdog_touch(int cpu)
+{
+	if (cpu >= 0)
+		per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
+	else
+		wq_watchdog_touched = jiffies;
+}
+
+static void wq_watchdog_set_thresh(unsigned long thresh)
+{
+	wq_watchdog_thresh = 0;
+	del_timer_sync(&wq_watchdog_timer);
+
+	if (thresh) {
+		wq_watchdog_thresh = thresh;
+		wq_watchdog_reset_touched();
+		mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ);
+	}
+}
+
+static int wq_watchdog_param_set_thresh(const char *val,
+					const struct kernel_param *kp)
+{
+	unsigned long thresh;
+	int ret;
+
+	ret = kstrtoul(val, 0, &thresh);
+	if (ret)
+		return ret;
+
+	if (system_wq)
+		wq_watchdog_set_thresh(thresh);
+	else
+		wq_watchdog_thresh = thresh;
+
+	return 0;
+}
+
+static const struct kernel_param_ops wq_watchdog_thresh_ops = {
+	.set	= wq_watchdog_param_set_thresh,
+	.get	= param_get_ulong,
+};
+
+module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
+		0644);
+
+static void wq_watchdog_init(void)
+{
+	wq_watchdog_set_thresh(wq_watchdog_thresh);
+}
+
+#else	/* CONFIG_WQ_WATCHDOG */
+
+static inline void wq_watchdog_init(void) { }
+
+#endif	/* CONFIG_WQ_WATCHDOG */
+
 static void __init wq_numa_init(void)
 {
 	cpumask_var_t *tbl;
@@ -5349,6 +5521,9 @@
 	       !system_unbound_wq || !system_freezable_wq ||
 	       !system_power_efficient_wq ||
 	       !system_freezable_power_efficient_wq);
+
+	wq_watchdog_init();
+
 	return 0;
 }
 early_initcall(init_workqueues);
diff -ruw linux-4.4.115/lib/extable.c linux-4.4.115-fbx/lib/extable.c
--- linux-4.4.115/lib/extable.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/lib/extable.c	2019-01-22 16:16:28.759294003 +0100
@@ -14,7 +14,37 @@
 #include <linux/sort.h>
 #include <asm/uaccess.h>
 
+#ifndef ARCH_HAS_RELATIVE_EXTABLE
+#define ex_to_insn(x)	((x)->insn)
+#else
+static inline unsigned long ex_to_insn(const struct exception_table_entry *x)
+{
+	return (unsigned long)&x->insn + x->insn;
+}
+#endif
+
 #ifndef ARCH_HAS_SORT_EXTABLE
+#ifndef ARCH_HAS_RELATIVE_EXTABLE
+#define swap_ex		NULL
+#else
+static void swap_ex(void *a, void *b, int size)
+{
+	struct exception_table_entry *x = a, *y = b, tmp;
+	int delta = b - a;
+
+	tmp = *x;
+	x->insn = y->insn + delta;
+	y->insn = tmp.insn - delta;
+
+#ifdef swap_ex_entry_fixup
+	swap_ex_entry_fixup(x, y, tmp, delta);
+#else
+	x->fixup = y->fixup + delta;
+	y->fixup = tmp.fixup - delta;
+#endif
+}
+#endif /* ARCH_HAS_RELATIVE_EXTABLE */
+
 /*
  * The exception table needs to be sorted so that the binary
  * search that we use to find entries in it works properly.
@@ -26,9 +56,9 @@
 	const struct exception_table_entry *x = a, *y = b;
 
 	/* avoid overflow */
-	if (x->insn > y->insn)
+	if (ex_to_insn(x) > ex_to_insn(y))
 		return 1;
-	if (x->insn < y->insn)
+	if (ex_to_insn(x) < ex_to_insn(y))
 		return -1;
 	return 0;
 }
@@ -37,7 +67,7 @@
 		  struct exception_table_entry *finish)
 {
 	sort(start, finish - start, sizeof(struct exception_table_entry),
-	     cmp_ex, NULL);
+	     cmp_ex, swap_ex);
 }
 
 #ifdef CONFIG_MODULES
@@ -48,13 +78,15 @@
 void trim_init_extable(struct module *m)
 {
 	/*trim the beginning*/
-	while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
+	while (m->num_exentries &&
+	       within_module_init(ex_to_insn(&m->extable[0]), m)) {
 		m->extable++;
 		m->num_exentries--;
 	}
 	/*trim the end*/
 	while (m->num_exentries &&
-		within_module_init(m->extable[m->num_exentries-1].insn, m))
+	       within_module_init(ex_to_insn(&m->extable[m->num_exentries - 1]),
+				  m))
 		m->num_exentries--;
 }
 #endif /* CONFIG_MODULES */
@@ -81,9 +113,9 @@
 		 * careful, the distance between value and insn
 		 * can be larger than MAX_LONG:
 		 */
-		if (mid->insn < value)
+		if (ex_to_insn(mid) < value)
 			first = mid + 1;
-		else if (mid->insn > value)
+		else if (ex_to_insn(mid) > value)
 			last = mid - 1;
 		else
 			return mid;
diff -ruw linux-4.4.115/lib/hweight.c linux-4.4.115-fbx/lib/hweight.c
--- linux-4.4.115/lib/hweight.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/lib/hweight.c	2019-01-22 16:16:28.763294039 +0100
@@ -9,6 +9,7 @@
  * The Hamming Weight of a number is the total number of bits set in it.
  */
 
+#ifndef __HAVE_ARCH_SW_HWEIGHT
 unsigned int __sw_hweight32(unsigned int w)
 {
 #ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
@@ -25,6 +26,7 @@
 #endif
 }
 EXPORT_SYMBOL(__sw_hweight32);
+#endif
 
 unsigned int __sw_hweight16(unsigned int w)
 {
@@ -43,6 +45,7 @@
 }
 EXPORT_SYMBOL(__sw_hweight8);
 
+#ifndef __HAVE_ARCH_SW_HWEIGHT
 unsigned long __sw_hweight64(__u64 w)
 {
 #if BITS_PER_LONG == 32
@@ -65,3 +68,4 @@
 #endif
 }
 EXPORT_SYMBOL(__sw_hweight64);
+#endif
diff -ruw linux-4.4.115/lib/Kconfig linux-4.4.115-fbx/lib/Kconfig
--- linux-4.4.115/lib/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/lib/Kconfig	2019-01-22 16:16:28.751293930 +0100
@@ -185,6 +185,9 @@
 	  when they need to do cyclic redundancy check according CRC8
 	  algorithm. Module will be called crc8.
 
+config XXHASH
+	tristate
+
 config AUDIT_GENERIC
 	bool
 	depends on AUDIT && !AUDIT_ARCH
@@ -239,6 +242,14 @@
 config LZ4_DECOMPRESS
 	tristate
 
+config ZSTD_COMPRESS
+	select XXHASH
+	tristate
+
+config ZSTD_DECOMPRESS
+	select XXHASH
+	tristate
+
 source "lib/xz/Kconfig"
 
 #
@@ -531,4 +542,31 @@
 config ARCH_HAS_MMIO_FLUSH
 	bool
 
+config QMI_ENCDEC
+	bool "QMI Encode/Decode Library"
+	help
+	  Library to encode & decode QMI messages from within
+	  the kernel. The kernel drivers encode the C structure into
+	  QMI message wire format and then send it over a transport.
+	  The kernel drivers receive the QMI message over a transport
+	  and then decode it into a C structure.
+
+config QMI_ENCDEC_DEBUG
+	bool "QMI Encode/Decode Library Debug"
+	help
+	  Kernel config option to enable debugging QMI Encode/Decode
+	  library. This will log the information regarding the element
+	  and message being encoded & decoded.
+
+config STACKDEPOT
+	bool
+	select STACKTRACE
+
+config ARCH_HAS_FBXSERIAL
+	bool
+
+config FBXSERIAL
+	bool "fbxserial"
+	select CRC32
+
 endmenu
diff -ruw linux-4.4.115/lib/Kconfig.debug linux-4.4.115-fbx/lib/Kconfig.debug
--- linux-4.4.115/lib/Kconfig.debug	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/lib/Kconfig.debug	2019-10-29 09:26:25.669223005 +0100
@@ -244,6 +244,7 @@
 	depends on DEBUG_KERNEL && STACKTRACE_SUPPORT
 	select DEBUG_FS
 	select STACKTRACE
+	select STACKDEPOT
 	select PAGE_EXTENSION
 	help
 	  This keeps track of what call chain is the owner of a page, may
@@ -255,6 +256,13 @@
 
 	  If unsure, say N.
 
+config PAGE_OWNER_ENABLE_DEFAULT
+	bool "Enable Track page owner by default"
+	depends on PAGE_OWNER
+	---help---
+	  Enable track page owner by default? This value
+	  can be overridden by page_owner_disabled=off|on.
+
 config DEBUG_FS
 	bool "Debug Filesystem"
 	help
@@ -674,8 +682,42 @@
 
 source "lib/Kconfig.kasan"
 
+config DEBUG_REFCOUNT
+	bool "Verbose refcount checks"
+	help
+	  Say Y here if you want reference counters (refcount_t and kref) to
+	  generate WARNs on dubious usage. Without this refcount_t will still
+	  be a saturating counter and avoid Use-After-Free by turning it into
+	  a resource leak Denial-Of-Service.
+
+	  Use of this option will increase kernel text size but will alert the
+	  admin of potential abuse.
+
+	  If in doubt, say "N".
+
 endmenu # "Memory Debugging"
 
+config ARCH_HAS_KCOV
+	bool
+	help
+	  KCOV does not have any arch-specific code, but currently it is enabled
+	  only for x86_64. KCOV requires testing on other archs, and most likely
+	  disabling of instrumentation for some early boot code.
+
+config KCOV
+	bool "Code coverage for fuzzing"
+	depends on ARCH_HAS_KCOV
+	select DEBUG_FS
+	help
+	  KCOV exposes kernel code coverage information in a form suitable
+	  for coverage-guided fuzzing (randomized testing).
+
+	  If RANDOMIZE_BASE is enabled, PC values will not be stable across
+	  different machines and across reboots. If you need stable PC values,
+	  disable RANDOMIZE_BASE.
+
+	  For more details, see Documentation/kcov.txt.
+
 config DEBUG_SHIRQ
 	bool "Debug shared IRQ handlers"
 	depends on DEBUG_KERNEL
@@ -707,15 +749,27 @@
 	  The overhead should be minimal.  A periodic hrtimer runs to
 	  generate interrupts and kick the watchdog task every 4 seconds.
 	  An NMI is generated every 10 seconds or so to check for hardlockups.
+	  If NMIs are not available on the platform, every 12 seconds the
+	  hrtimer interrupt on one cpu will be used to check for hardlockups
+	  on the next cpu.
 
 	  The frequency of hrtimer and NMI events and the soft and hard lockup
 	  thresholds can be controlled through the sysctl watchdog_thresh.
 
-config HARDLOCKUP_DETECTOR
+config HARDLOCKUP_DETECTOR_NMI
 	def_bool y
 	depends on LOCKUP_DETECTOR && !HAVE_NMI_WATCHDOG
 	depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI
 
+config HARDLOCKUP_DETECTOR_OTHER_CPU
+	def_bool y
+	depends on LOCKUP_DETECTOR && SMP
+	depends on !HARDLOCKUP_DETECTOR_NMI && !HAVE_NMI_WATCHDOG
+
+config HARDLOCKUP_DETECTOR
+	def_bool y
+	depends on HARDLOCKUP_DETECTOR_NMI || HARDLOCKUP_DETECTOR_OTHER_CPU
+
 config BOOTPARAM_HARDLOCKUP_PANIC
 	bool "Panic (Reboot) On Hard Lockups"
 	depends on HARDLOCKUP_DETECTOR
@@ -812,6 +866,17 @@
 	default 0 if !BOOTPARAM_HUNG_TASK_PANIC
 	default 1 if BOOTPARAM_HUNG_TASK_PANIC
 
+config WQ_WATCHDOG
+	bool "Detect Workqueue Stalls"
+	depends on DEBUG_KERNEL
+	help
+	  Say Y here to enable stall detection on workqueues.  If a
+	  worker pool doesn't make forward progress on a pending work
+	  item for over a given amount of time, 30s by default, a
+	  warning message is printed along with dump of workqueue
+	  state.  This can be configured through kernel parameter
+	  "workqueue.watchdog_thresh" and its sysfs counterpart.
+
 endmenu # "Debug lockups and hangs"
 
 config PANIC_ON_OOPS
@@ -855,6 +920,34 @@
 	bool
 	default n
 
+config PANIC_ON_SCHED_BUG
+	bool "Panic on all bugs encountered by the scheduler"
+	help
+	  Say Y here to panic on all 'BUG:' conditions encountered by the
+	  scheduler, even potentially-recoverable ones such as scheduling
+	  while atomic, sleeping from invalid context, and detection of
+	  broken arch topologies.
+
+	  Say N if unsure.
+
+config PANIC_ON_RT_THROTTLING
+	bool "Panic on RT throttling"
+	help
+	  Say Y here to enable the kernel to panic when a realtime
+	  runqueue is throttled. This may be useful for detecting
+	  and debugging RT throttling issues.
+
+	  Say N if unsure.
+
+config SYSRQ_SCHED_DEBUG
+	bool "Print scheduling debugging info from sysrq-trigger"
+	depends on SCHED_DEBUG
+	default y
+	help
+	  If you say Y here, the "show-task-states(T)" and
+	  "show-blocked-tasks(W)" sysrq-triggers will print additional
+	  scheduling statistics.
+
 config SCHEDSTATS
 	bool "Collect scheduler statistics"
 	depends on DEBUG_KERNEL && PROC_FS
@@ -893,19 +986,23 @@
 
 	  If unsure, say N.
 
-config TIMER_STATS
-	bool "Collect kernel timers statistics"
-	depends on DEBUG_KERNEL && PROC_FS
+config DEBUG_TASK_STACK_SCAN_OFF
+	bool "Disable kmemleak task stack scan by default"
+	depends on DEBUG_KMEMLEAK
 	help
-	  If you say Y here, additional code will be inserted into the
-	  timer routines to collect statistics about kernel timers being
-	  reprogrammed. The statistics can be read from /proc/timer_stats.
-	  The statistics collection is started by writing 1 to /proc/timer_stats,
-	  writing 0 stops it. This feature is useful to collect information
-	  about timer usage patterns in kernel and userspace. This feature
-	  is lightweight if enabled in the kernel config but not activated
-	  (it defaults to deactivated on bootup and will only be activated
-	  if some application like powertop activates it explicitly).
+	  Say Y here to disable kmemleak task stack scan by default
+	  at compile time. It can be enabled later if required by
+	  writing to the debugfs entry :
+	  echo "stack=on" > /sys/kernel/debug/kmemleak.
+
+config DEBUG_MODULE_SCAN_OFF
+	bool "Disable module memory scan for leaks by default"
+	depends on DEBUG_KMEMLEAK
+	help
+	  Say Y here to disable scanning kernel modules area list
+	  by default for memory leaks. Module scan an potentially
+	  run with irq/preemption disabled for considerable amount
+	  of time.
 
 config DEBUG_PREEMPT
 	bool "Debug preemptible kernel"
@@ -936,6 +1033,28 @@
 	  best used in conjunction with the NMI watchdog so that spinlock
 	  deadlocks are also debuggable.
 
+choice
+	prompt "Perform Action on spinlock bug"
+	depends on DEBUG_SPINLOCK
+
+	default DEBUG_SPINLOCK_BITE_ON_BUG
+
+	config DEBUG_SPINLOCK_BITE_ON_BUG
+		bool "Cause a Watchdog Bite on Spinlock bug"
+		depends on QCOM_WATCHDOG_V2
+		help
+		  On a spinlock bug, cause a watchdog bite so that we can get the precise
+		  state of the system captured at the time of spin dump. This is mutually
+		  exclusive with the below DEBUG_SPINLOCK_PANIC_ON_BUG config.
+
+	config DEBUG_SPINLOCK_PANIC_ON_BUG
+		bool "Cause a Kernel Panic on Spinlock bug"
+		help
+		  On a spinlock bug, cause a kernel panic so that we can get the complete
+		  information about the system at the time of spin dump in the dmesg.
+		  This is mutually exclusive with the above DEBUG_SPINLOCK_BITE_ON_BUG.
+endchoice
+
 config DEBUG_MUTEXES
 	bool "Mutex debugging: basic checks"
 	depends on DEBUG_KERNEL
@@ -1357,6 +1476,17 @@
 	  RCU grace period persists, additional CPU stall warnings are
 	  printed at more widely spaced intervals.
 
+config RCU_STALL_WATCHDOG_BITE
+	bool "RCU stall induce watchdog bite"
+	depends on RCU_STALL_COMMON && QCOM_WATCHDOG_V2
+	help
+	  Induce watchdog bite if RCU grace period extends more than
+	  specified no of seconds instead of just warning messages.
+	  This helps to collect ram dumps and cpu context for
+	  postmortem analysis. Generally if a given RCU grace period
+	  extends more than the specified number of seconds,
+	  a CPU stall warning is printed.
+
 config RCU_TRACE
 	bool "Enable tracing for RCU"
 	depends on DEBUG_KERNEL
@@ -1532,6 +1662,20 @@
 	  and to test how the mmc host driver handles retries from
 	  the block device.
 
+config UFS_FAULT_INJECTION
+	bool "Fault-injection capability for UFS IO"
+	select DEBUG_FS
+	depends on FAULT_INJECTION && SCSI_UFSHCD
+	help
+	 Provide fault-injection capability for UFS IO.
+	 This will make the UFS host controller driver to randomly
+	 abort ongoing commands in the host controller, update OCS
+	 field according to the injected fatal error and can also
+	 forcefully hang the command indefinitely till upper layer
+	 timeout occurs. This is useful to test error handling in
+	 the UFS contoller driver and test how the driver handles
+	 the retries from block/SCSI mid layer.
+
 config FAIL_FUTEX
 	bool "Fault-injection capability for futexes"
 	select DEBUG_FS
@@ -1840,6 +1984,19 @@
 	        memtest=17, mean do 17 test patterns.
 	  If you are unsure how to answer this question, answer N.
 
+config MEMTEST_ENABLE_DEFAULT
+	int "Enable Memtest pattern test by default? (0-17)"
+	range 0 17
+	default "0"
+	depends on MEMTEST
+	help
+	  This option helps to select Memtest to be enabled through
+	  kernel defconfig options. Alternatively it can be enabled
+	  using memtest=<patterns> kernel command line.
+
+	  Default value is kept as "0" so that it is kept as disabled.
+	  To enable enter any value between 1-17 range.
+
 config TEST_STATIC_KEYS
 	tristate "Test static keys"
 	default n
@@ -1849,7 +2006,16 @@
 
 	  If unsure, say N.
 
+config PANIC_ON_DATA_CORRUPTION
+	bool "Cause a Kernel Panic When Data Corruption is detected"
+	help
+	 Select this option to upgrade warnings for potentially
+	 recoverable data corruption scenarios to system-halting panics,
+	 for easier detection and debug.
+
 source "samples/Kconfig"
 
 source "lib/Kconfig.kgdb"
 
+source "lib/Kconfig.ubsan"
+
diff -ruw linux-4.4.115/lib/Kconfig.kasan linux-4.4.115-fbx/lib/Kconfig.kasan
--- linux-4.4.115/lib/Kconfig.kasan	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/lib/Kconfig.kasan	2019-01-22 16:16:28.751293930 +0100
@@ -5,8 +5,9 @@
 
 config KASAN
 	bool "KASan: runtime memory debugger"
-	depends on SLUB_DEBUG
+	depends on SLUB || (SLAB && !DEBUG_SLAB)
 	select CONSTRUCTORS
+	select STACKDEPOT
 	help
 	  Enables kernel address sanitizer - runtime memory debugger,
 	  designed to find out-of-bounds accesses and use-after-free bugs.
@@ -16,6 +17,10 @@
 	  This feature consumes about 1/8 of available memory and brings about
 	  ~x3 performance slowdown.
 	  For better error detection enable CONFIG_STACKTRACE.
+	  See KASAN_SANITIZE_ALL for selectively compiling files and directories
+	  with this compiler feature enabled.
+	  Currently CONFIG_KASAN doesn't work with CONFIG_DEBUG_SLAB
+	  (the resulting kernel does not boot).
 
 choice
 	prompt "Instrumentation type"
@@ -42,6 +47,22 @@
 
 endchoice
 
+config KASAN_SANITIZE_ALL
+	bool "KASan: Enable Instrumentation for entire kernel"
+	depends on KASAN
+	default y
+	help
+	  Enable compilation with $(CFLAGS_KASAN) by default.
+	  KASAN_SANITIZE := n - exclude all files in a directory
+	  KASAN_SANITIZE_file_name.o := n - exclude a single file
+	  Setting KASAN_SANITIZE_ALL to 'n' allows enabling kasan in
+	  only certain files or directories.
+	  KASAN_SANITIZE := y - include all files in a directory
+	  KASAN_SANITIZE_file_name.o := y - include single file
+
+	  KASAN_SANITIZE does not affect subdirectories.
+	  KASAN_SANITIZE_file_name.o has priority over KASAN_SANITIZE.
+
 config TEST_KASAN
 	tristate "Module for testing kasan for bug detection"
 	depends on m && KASAN
diff -ruw linux-4.4.115/lib/libcrc32c.c linux-4.4.115-fbx/lib/libcrc32c.c
--- linux-4.4.115/lib/libcrc32c.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/lib/libcrc32c.c	2019-01-22 16:16:28.767294075 +0100
@@ -42,7 +42,7 @@
 u32 crc32c(u32 crc, const void *address, unsigned int length)
 {
 	SHASH_DESC_ON_STACK(shash, tfm);
-	u32 *ctx = (u32 *)shash_desc_ctx(shash);
+	u32 ret, *ctx = (u32 *)shash_desc_ctx(shash);
 	int err;
 
 	shash->tfm = tfm;
@@ -52,7 +52,9 @@
 	err = crypto_shash_update(shash, address, length);
 	BUG_ON(err);
 
-	return *ctx;
+	ret = *ctx;
+	barrier_data(ctx);
+	return ret;
 }
 
 EXPORT_SYMBOL(crc32c);
diff -ruw linux-4.4.115/lib/Makefile linux-4.4.115-fbx/lib/Makefile
--- linux-4.4.115/lib/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/lib/Makefile	2019-10-29 09:26:25.669223005 +0100
@@ -7,6 +7,18 @@
 KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))
 endif
 
+# These files are disabled because they produce lots of non-interesting and/or
+# flaky coverage that is not a function of syscall inputs. For example,
+# rbtree can be global and individual rotations don't correlate with inputs.
+KCOV_INSTRUMENT_string.o := n
+KCOV_INSTRUMENT_rbtree.o := n
+KCOV_INSTRUMENT_list_debug.o := n
+KCOV_INSTRUMENT_debugobjects.o := n
+KCOV_INSTRUMENT_dynamic_debug.o := n
+# Kernel does not boot if we instrument this file as it uses custom calling
+# convention (see CONFIG_ARCH_HWEIGHT_CFLAGS).
+KCOV_INSTRUMENT_hweight.o := n
+
 lib-y := ctype.o string.o vsprintf.o cmdline.o \
 	 rbtree.o radix-tree.o dump_stack.o timerqueue.o\
 	 idr.o int_sqrt.o extable.o \
@@ -22,12 +34,14 @@
 lib-y	+= kobject.o klist.o
 obj-y	+= lockref.o
 
+KASAN_SANITIZE_find_bit.o := n
+
 obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
 	 bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
 	 gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
 	 bsearch.o find_bit.o llist.o memweight.o kfifo.o \
 	 percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
-	 once.o
+	 once.o hash.o
 obj-y += string_helpers.o
 obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
 obj-y += hexdump.o
@@ -58,8 +72,6 @@
 obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
 obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
 
-GCOV_PROFILE_hweight.o := n
-CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
 obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
 
 obj-$(CONFIG_BTREE) += btree.o
@@ -83,6 +95,7 @@
 obj-$(CONFIG_CRC7)	+= crc7.o
 obj-$(CONFIG_LIBCRC32C)	+= libcrc32c.o
 obj-$(CONFIG_CRC8)	+= crc8.o
+obj-$(CONFIG_XXHASH)	+= xxhash.o
 obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
 
 obj-$(CONFIG_842_COMPRESS) += 842/
@@ -96,6 +109,8 @@
 obj-$(CONFIG_LZ4_COMPRESS) += lz4/
 obj-$(CONFIG_LZ4HC_COMPRESS) += lz4/
 obj-$(CONFIG_LZ4_DECOMPRESS) += lz4/
+obj-$(CONFIG_ZSTD_COMPRESS) += zstd/
+obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd/
 obj-$(CONFIG_XZ_DEC) += xz/
 obj-$(CONFIG_RAID6_PQ) += raid6/
 
@@ -165,6 +180,9 @@
 obj-$(CONFIG_SG_SPLIT) += sg_split.o
 obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
 
+obj-$(CONFIG_STACKDEPOT) += stackdepot.o
+KASAN_SANITIZE_stackdepot.o := n
+
 libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
 	       fdt_empty_tree.o
 $(foreach file, $(libfdt_files), \
@@ -208,3 +226,11 @@
 clean-files	+= oid_registry_data.c
 
 obj-$(CONFIG_UCS2_STRING) += ucs2_string.o
+
+obj-$(CONFIG_QMI_ENCDEC)	+= qmi_encdec.o
+
+obj-$(CONFIG_UBSAN) += ubsan.o
+
+UBSAN_SANITIZE_ubsan.o := n
+
+obj-$(CONFIG_FBXSERIAL) += fbxserial.o
diff -ruw linux-4.4.115/lib/radix-tree.c linux-4.4.115-fbx/lib/radix-tree.c
--- linux-4.4.115/lib/radix-tree.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/lib/radix-tree.c	2019-01-22 16:16:28.771294111 +0100
@@ -1035,6 +1035,49 @@
 EXPORT_SYMBOL(radix_tree_gang_lookup);
 
 /**
+ *	radix_tree_gang_lookup_index - perform multiple lookup on a radix tree
+ *	@root:		radix tree root
+ *	@results:	where the results of the lookup are placed
+ *	@indices:	where their indices should be placed
+ *	@first_index:	start the lookup from this key
+ *	@max_items:	place up to this many items at *results
+ *
+ *	Performs an index-ascending scan of the tree for present items.  Places
+ *	them at *@results and returns the number of items which were placed at
+ *	*@results. The indices are placed in @indices.
+ *
+ *	The implementation is naive.
+ *
+ *	Just one difference from radix_tree_gang_lookup, the indices are also
+ *	collected along with the results of lookup.
+ */
+unsigned int
+radix_tree_gang_lookup_index(struct radix_tree_root *root, void **results,
+			unsigned long *indices, unsigned long first_index,
+			unsigned int max_items)
+{
+	struct radix_tree_iter iter;
+	void **slot;
+	unsigned int ret = 0;
+
+	if (unlikely(!max_items))
+		return 0;
+
+	radix_tree_for_each_slot(slot, root, &iter, first_index) {
+		results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot));
+		if (!results[ret])
+			continue;
+		if (indices)
+			indices[ret] = iter.index;
+		if (++ret == max_items)
+			break;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(radix_tree_gang_lookup_index);
+
+/**
  *	radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree
  *	@root:		radix tree root
  *	@results:	where the results of the lookup are placed
diff -ruw linux-4.4.115/lib/strncpy_from_user.c linux-4.4.115-fbx/lib/strncpy_from_user.c
--- linux-4.4.115/lib/strncpy_from_user.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/lib/strncpy_from_user.c	2019-10-29 09:26:25.677223083 +0100
@@ -1,5 +1,6 @@
 #include <linux/compiler.h>
 #include <linux/export.h>
+#include <linux/thread_info.h>
 #include <linux/uaccess.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
@@ -14,6 +15,8 @@
 	(((long) dst | (long) src) & (sizeof(long) - 1))
 #endif
 
+#define CHECK_ALIGN(v, a) ((((unsigned long)(v)) & ((a) - 1)) == 0)
+
 /*
  * Do a strncpy, return length of string without final '\0'.
  * 'count' is the user-supplied count (return 'count' if we
@@ -35,12 +38,27 @@
 	if (IS_UNALIGNED(src, dst))
 		goto byte_at_a_time;
 
+	/* Copy a byte at a time until we align to 8 bytes */
+	while (max && (!CHECK_ALIGN(src + res, 8))) {
+		char c;
+		int ret;
+
+		ret = __get_user(c, src + res);
+		if (ret)
+			return -EFAULT;
+		dst[res] = c;
+		if (!c)
+			return res;
+		res++;
+		max--;
+	}
+
 	while (max >= sizeof(unsigned long)) {
 		unsigned long c, data;
 
 		/* Fall back to byte-at-a-time if we get a page fault */
-		if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
-			break;
+		unsafe_get_user(c, (unsigned long __user *)(src+res), byte_at_a_time);
+
 		*(unsigned long *)(dst+res) = c;
 		if (has_zero(c, &data, &constants)) {
 			data = prep_zero_mask(c, data, &constants);
@@ -55,8 +73,7 @@
 	while (max) {
 		char c;
 
-		if (unlikely(__get_user(c,src+res)))
-			return -EFAULT;
+		unsafe_get_user(c,src+res, efault);
 		dst[res] = c;
 		if (!c)
 			return res;
@@ -75,6 +92,7 @@
 	 * Nope: we hit the address space limit, and we still had more
 	 * characters the caller would have wanted. That's an EFAULT.
 	 */
+efault:
 	return -EFAULT;
 }
 
@@ -107,7 +125,13 @@
 	src_addr = (unsigned long)src;
 	if (likely(src_addr < max_addr)) {
 		unsigned long max = max_addr - src_addr;
-		return do_strncpy_from_user(dst, src, count, max);
+		long retval;
+
+		check_object_size(dst, count, false);
+		user_access_begin();
+		retval = do_strncpy_from_user(dst, src, count, max);
+		user_access_end();
+		return retval;
 	}
 	return -EFAULT;
 }
diff -ruw linux-4.4.115/lib/strnlen_user.c linux-4.4.115-fbx/lib/strnlen_user.c
--- linux-4.4.115/lib/strnlen_user.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/lib/strnlen_user.c	2019-10-29 09:26:25.677223083 +0100
@@ -45,8 +45,7 @@
 	src -= align;
 	max += align;
 
-	if (unlikely(__get_user(c,(unsigned long __user *)src)))
-		return 0;
+	unsafe_get_user(c, (unsigned long __user *)src, efault);
 	c |= aligned_byte_mask(align);
 
 	for (;;) {
@@ -61,8 +60,7 @@
 		if (unlikely(max <= sizeof(unsigned long)))
 			break;
 		max -= sizeof(unsigned long);
-		if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
-			return 0;
+		unsafe_get_user(c, (unsigned long __user *)(src+res), efault);
 	}
 	res -= align;
 
@@ -77,6 +75,7 @@
 	 * Nope: we hit the address space limit, and we still had more
 	 * characters the caller would have wanted. That's 0.
 	 */
+efault:
 	return 0;
 }
 
@@ -112,7 +111,12 @@
 	src_addr = (unsigned long)str;
 	if (likely(src_addr < max_addr)) {
 		unsigned long max = max_addr - src_addr;
-		return do_strnlen_user(str, count, max);
+		long retval;
+
+		user_access_begin();
+		retval = do_strnlen_user(str, count, max);
+		user_access_end();
+		return retval;
 	}
 	return 0;
 }
@@ -141,7 +145,12 @@
 	src_addr = (unsigned long)str;
 	if (likely(src_addr < max_addr)) {
 		unsigned long max = max_addr - src_addr;
-		return do_strnlen_user(str, ~0ul, max);
+		long retval;
+
+		user_access_begin();
+		retval = do_strnlen_user(str, ~0ul, max);
+		user_access_end();
+		return retval;
 	}
 	return 0;
 }
diff -ruw linux-4.4.115/Makefile linux-4.4.115-fbx/Makefile
--- linux-4.4.115/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/Makefile	2019-10-29 09:32:37.124855595 +0100
@@ -146,7 +146,7 @@
 $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
 	@:
 
-sub-make: FORCE
+sub-make:
 	$(Q)$(MAKE) -C $(KBUILD_OUTPUT) KBUILD_SRC=$(CURDIR) \
 	-f $(CURDIR)/Makefile $(filter-out _all sub-make,$(MAKECMDGOALS))
 
@@ -301,7 +301,7 @@
 
 HOSTCC       = gcc
 HOSTCXX      = g++
-HOSTCFLAGS   = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
+HOSTCFLAGS   := -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
 HOSTCXXFLAGS = -O2
 
 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
@@ -369,6 +369,7 @@
 CFLAGS_KERNEL	=
 AFLAGS_KERNEL	=
 CFLAGS_GCOV	= -fprofile-arcs -ftest-coverage -fno-tree-loop-im
+CFLAGS_KCOV	= -fsanitize-coverage=trace-pc
 
 
 # Use USERINCLUDE when you must reference the UAPI directories only.
@@ -416,7 +417,7 @@
 export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
 
 export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
-export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV CFLAGS_KASAN
+export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV CFLAGS_KCOV CFLAGS_KASAN CFLAGS_UBSAN
 export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
 export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
 export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
@@ -624,7 +625,7 @@
 KBUILD_CFLAGS	+= $(call cc-disable-warning, int-in-bool-context)
 
 ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-KBUILD_CFLAGS	+= -Os
+KBUILD_CFLAGS	+= $(call cc-option,-Oz,-Os)
 else
 ifdef CONFIG_PROFILE_ALL_BRANCHES
 KBUILD_CFLAGS	+= -O2
@@ -693,12 +694,31 @@
 endif
 KBUILD_CFLAGS += $(stackp-flag)
 
+ifdef CONFIG_KCOV
+  ifeq ($(call cc-option, $(CFLAGS_KCOV)),)
+    $(warning Cannot use CONFIG_KCOV: \
+             -fsanitize-coverage=trace-pc is not supported by compiler)
+    CFLAGS_KCOV =
+  endif
+endif
+
 ifeq ($(cc-name),clang)
+ifneq ($(CROSS_COMPILE),)
+CLANG_TRIPLE    ?= $(CROSS_COMPILE)
+CLANG_TARGET	:= --target=$(notdir $(CLANG_TRIPLE:%-=%))
+GCC_TOOLCHAIN	:= $(realpath $(dir $(shell which $(LD)))/..)
+endif
+ifneq ($(GCC_TOOLCHAIN),)
+CLANG_GCC_TC	:= --gcc-toolchain=$(GCC_TOOLCHAIN)
+endif
+KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
+KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
 KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
-KBUILD_CPPFLAGS += $(call cc-option,-Wno-unknown-warning-option,)
 KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
 KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
 KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
+KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
+KBUILD_CFLAGS += $(call cc-disable-warning, duplicate-decl-specifier)
 # Quiet clang warning: comparison of unsigned expression < 0 is always false
 KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
 # CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
@@ -706,6 +726,8 @@
 # See modpost pattern 2
 KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
 KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
+KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
+KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
 else
 
 # These warnings generated too much noise in a regular build.
@@ -802,6 +824,7 @@
 
 include scripts/Makefile.kasan
 include scripts/Makefile.extrawarn
+include scripts/Makefile.ubsan
 
 # Add any arch overrides and user supplied CPPFLAGS, AFLAGS and CFLAGS as the
 # last assignments
@@ -1006,7 +1029,7 @@
 
 archprepare: archheaders archscripts prepare1 scripts_basic
 
-prepare0: archprepare FORCE
+prepare0: archprepare
 	$(Q)$(MAKE) $(build)=.
 
 # All the preparing..
@@ -1051,7 +1074,7 @@
 export INSTALL_FW_PATH
 
 PHONY += firmware_install
-firmware_install: FORCE
+firmware_install:
 	@mkdir -p $(objtree)/firmware
 	$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.fwinst obj=firmware __fw_install
 
@@ -1071,7 +1094,7 @@
 archscripts:
 
 PHONY += __headers
-__headers: $(version_h) scripts_basic asm-generic archheaders archscripts FORCE
+__headers: $(version_h) scripts_basic asm-generic archheaders archscripts
 	$(Q)$(MAKE) $(build)=scripts build_unifdef
 
 PHONY += headers_install_all
@@ -1284,6 +1307,8 @@
 	@echo  '                    (default: $$(INSTALL_MOD_PATH)/lib/firmware)'
 	@echo  '  dir/            - Build all files in dir and below'
 	@echo  '  dir/file.[ois]  - Build specified target only'
+	@echo  '  dir/file.ll     - Build the LLVM assembly file'
+	@echo  '                    (requires compiler support for LLVM assembly generation)'
 	@echo  '  dir/file.lst    - Build specified mixed source/assembly target only'
 	@echo  '                    (requires a recent binutils and recent build (System.map))'
 	@echo  '  dir/file.ko     - Build module including final link'
@@ -1459,6 +1484,7 @@
 		-o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
 		-o -name '*.symtypes' -o -name 'modules.order' \
 		-o -name modules.builtin -o -name '.tmp_*.o.*' \
+		-o -name '*.ll' \
 		-o -name '*.gcno' \) -type f -print | xargs rm -f
 
 # Generate tags for editors
@@ -1562,6 +1588,8 @@
 	$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
 %.symtypes: %.c prepare scripts FORCE
 	$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+%.ll: %.c prepare scripts FORCE
+	$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
 
 # Modules
 /: prepare scripts FORCE
diff -ruw linux-4.4.115/mm/backing-dev.c linux-4.4.115-fbx/mm/backing-dev.c
--- linux-4.4.115/mm/backing-dev.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/backing-dev.c	2019-10-29 09:26:25.681223122 +0100
@@ -237,6 +237,7 @@
 
 	bdi_class->dev_groups = bdi_dev_groups;
 	bdi_debug_init();
+
 	return 0;
 }
 postcore_initcall(bdi_class_init);
@@ -780,6 +781,7 @@
 
 	bdi->dev = NULL;
 
+	kref_init(&bdi->refcnt);
 	bdi->min_ratio = 0;
 	bdi->max_ratio = 100;
 	bdi->max_prop_frac = FPROP_FRAC_BASE;
@@ -795,6 +797,22 @@
 }
 EXPORT_SYMBOL(bdi_init);
 
+struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id)
+{
+	struct backing_dev_info *bdi;
+
+	bdi = kmalloc_node(sizeof(struct backing_dev_info),
+			   gfp_mask | __GFP_ZERO, node_id);
+	if (!bdi)
+		return NULL;
+
+	if (bdi_init(bdi)) {
+		kfree(bdi);
+		return NULL;
+	}
+	return bdi;
+}
+
 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
 		const char *fmt, ...)
 {
@@ -875,12 +893,26 @@
 	}
 }
 
-void bdi_exit(struct backing_dev_info *bdi)
+static void bdi_exit(struct backing_dev_info *bdi)
 {
 	WARN_ON_ONCE(bdi->dev);
 	wb_exit(&bdi->wb);
 }
 
+static void release_bdi(struct kref *ref)
+{
+	struct backing_dev_info *bdi =
+			container_of(ref, struct backing_dev_info, refcnt);
+
+	bdi_exit(bdi);
+	kfree(bdi);
+}
+
+void bdi_put(struct backing_dev_info *bdi)
+{
+	kref_put(&bdi->refcnt, release_bdi);
+}
+
 void bdi_destroy(struct backing_dev_info *bdi)
 {
 	bdi_unregister(bdi);
diff -ruw linux-4.4.115/mm/cma.c linux-4.4.115-fbx/mm/cma.c
--- linux-4.4.115/mm/cma.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/cma.c	2019-10-29 09:26:25.685223161 +0100
@@ -35,6 +35,7 @@
 #include <linux/cma.h>
 #include <linux/highmem.h>
 #include <linux/io.h>
+#include <linux/delay.h>
 #include <trace/events/cma.h>
 
 #include "cma.h"
@@ -131,6 +132,10 @@
 	spin_lock_init(&cma->mem_head_lock);
 #endif
 
+	if (!PageHighMem(pfn_to_page(cma->base_pfn)))
+		kmemleak_free_part(__va(cma->base_pfn << PAGE_SHIFT),
+				cma->count << PAGE_SHIFT);
+
 	return 0;
 
 err:
@@ -367,6 +372,7 @@
 	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
 	struct page *page = NULL;
 	int ret;
+	int retry_after_sleep = 0;
 
 	if (!cma || !cma->count)
 		return NULL;
@@ -377,20 +383,41 @@
 	if (!count)
 		return NULL;
 
+	trace_cma_alloc_start(count, align);
+
 	mask = cma_bitmap_aligned_mask(cma, align);
 	offset = cma_bitmap_aligned_offset(cma, align);
 	bitmap_maxno = cma_bitmap_maxno(cma);
 	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
 
+	if (bitmap_count > bitmap_maxno)
+		return NULL;
+
 	for (;;) {
 		mutex_lock(&cma->lock);
 		bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
 				bitmap_maxno, start, bitmap_count, mask,
 				offset);
 		if (bitmap_no >= bitmap_maxno) {
+			if (retry_after_sleep < 2) {
+				start = 0;
+				/*
+				* Page may be momentarily pinned by some other
+				* process which has been scheduled out, eg.
+				* in exit path, during unmap call, or process
+				* fork and so cannot be freed there. Sleep
+				* for 100ms and retry twice to see if it has
+				* been freed later.
+				*/
+				mutex_unlock(&cma->lock);
+				msleep(100);
+				retry_after_sleep++;
+				continue;
+			} else {
 			mutex_unlock(&cma->lock);
 			break;
 		}
+		}
 		bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
 		/*
 		 * It's safe to drop the lock here. We've marked this region for
@@ -414,6 +441,8 @@
 
 		pr_debug("%s(): memory range at %p is busy, retrying\n",
 			 __func__, pfn_to_page(pfn));
+
+		trace_cma_alloc_busy_retry(pfn, pfn_to_page(pfn), count, align);
 		/* try again with a bit different memory target */
 		start = bitmap_no + mask + 1;
 	}
diff -ruw linux-4.4.115/mm/compaction.c linux-4.4.115-fbx/mm/compaction.c
--- linux-4.4.115/mm/compaction.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/compaction.c	2019-10-29 09:26:25.685223161 +0100
@@ -7,6 +7,7 @@
  *
  * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
  */
+#include <linux/cpu.h>
 #include <linux/swap.h>
 #include <linux/migrate.h>
 #include <linux/compaction.h>
@@ -14,9 +15,11 @@
 #include <linux/backing-dev.h>
 #include <linux/sysctl.h>
 #include <linux/sysfs.h>
-#include <linux/balloon_compaction.h>
 #include <linux/page-isolation.h>
 #include <linux/kasan.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/page_owner.h>
 #include "internal.h"
 
 #ifdef CONFIG_COMPACTION
@@ -57,13 +60,27 @@
 
 static void map_pages(struct list_head *list)
 {
-	struct page *page;
+	unsigned int i, order, nr_pages;
+	struct page *page, *next;
+	LIST_HEAD(tmp_list);
+
+	list_for_each_entry_safe(page, next, list, lru) {
+		list_del(&page->lru);
+
+		order = page_private(page);
+		nr_pages = 1 << order;
+
+		post_alloc_hook(page, order, __GFP_MOVABLE);
+		if (order)
+			split_page(page, order);
 
-	list_for_each_entry(page, list, lru) {
-		arch_alloc_page(page, 0);
-		kernel_map_pages(page, 1, 1);
-		kasan_alloc_pages(page, 0);
+		for (i = 0; i < nr_pages; i++) {
+			list_add(&page->lru, &tmp_list);
+			page++;
+		}
 	}
+
+	list_splice(&tmp_list, list);
 }
 
 static inline bool migrate_async_suitable(int migratetype)
@@ -116,6 +133,44 @@
 
 #ifdef CONFIG_COMPACTION
 
+int PageMovable(struct page *page)
+{
+	struct address_space *mapping;
+
+	VM_BUG_ON_PAGE(!PageLocked(page), page);
+	if (!__PageMovable(page))
+		return 0;
+
+	mapping = page_mapping(page);
+	if (mapping && mapping->a_ops && mapping->a_ops->isolate_page)
+		return 1;
+
+	return 0;
+}
+EXPORT_SYMBOL(PageMovable);
+
+void __SetPageMovable(struct page *page, struct address_space *mapping)
+{
+	VM_BUG_ON_PAGE(!PageLocked(page), page);
+	VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page);
+	page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE);
+}
+EXPORT_SYMBOL(__SetPageMovable);
+
+void __ClearPageMovable(struct page *page)
+{
+	VM_BUG_ON_PAGE(!PageLocked(page), page);
+	VM_BUG_ON_PAGE(!PageMovable(page), page);
+	/*
+	 * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE
+	 * flag so that VM can catch up released page by driver after isolation.
+	 * With it, VM migration doesn't try to put it back.
+	 */
+	page->mapping = (void *)((unsigned long)page->mapping &
+				PAGE_MAPPING_MOVABLE);
+}
+EXPORT_SYMBOL(__ClearPageMovable);
+
 /* Do not skip compaction more than 64 times */
 #define COMPACT_MAX_DEFER_SHIFT 6
 
@@ -403,12 +458,13 @@
 	unsigned long flags = 0;
 	bool locked = false;
 	unsigned long blockpfn = *start_pfn;
+	unsigned int order;
 
 	cursor = pfn_to_page(blockpfn);
 
 	/* Isolate free pages. */
 	for (; blockpfn < end_pfn; blockpfn++, cursor++) {
-		int isolated, i;
+		int isolated;
 		struct page *page = cursor;
 
 		/*
@@ -474,17 +530,17 @@
 				goto isolate_fail;
 		}
 
-		/* Found a free page, break it into order-0 pages */
-		isolated = split_free_page(page);
+		/* Found a free page, will break it into order-0 pages */
+		order = page_order(page);
+		isolated = __isolate_free_page(page, order);
 		if (!isolated)
 			break;
+		set_page_private(page, order);
 
 		total_isolated += isolated;
 		cc->nr_freepages += isolated;
-		for (i = 0; i < isolated; i++) {
-			list_add(&page->lru, freelist);
-			page++;
-		}
+		list_add_tail(&page->lru, freelist);
+
 		if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
 			blockpfn += isolated;
 			break;
@@ -603,7 +659,7 @@
 		 */
 	}
 
-	/* split_free_page does not map the pages */
+	/* __isolate_free_page() does not map the pages */
 	map_pages(&freelist);
 
 	if (pfn < end_pfn) {
@@ -632,21 +688,46 @@
 	mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
 }
 
-/* Similar to reclaim, but different enough that they don't share logic */
-static bool too_many_isolated(struct zone *zone)
+static bool __too_many_isolated(struct zone *zone, int safe)
 {
 	unsigned long active, inactive, isolated;
 
+	if (safe) {
+		inactive = zone_page_state_snapshot(zone, NR_INACTIVE_FILE) +
+			zone_page_state_snapshot(zone, NR_INACTIVE_ANON);
+		active = zone_page_state_snapshot(zone, NR_ACTIVE_FILE) +
+			zone_page_state_snapshot(zone, NR_ACTIVE_ANON);
+		isolated = zone_page_state_snapshot(zone, NR_ISOLATED_FILE) +
+			zone_page_state_snapshot(zone, NR_ISOLATED_ANON);
+	} else {
 	inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
 					zone_page_state(zone, NR_INACTIVE_ANON);
 	active = zone_page_state(zone, NR_ACTIVE_FILE) +
 					zone_page_state(zone, NR_ACTIVE_ANON);
 	isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
 					zone_page_state(zone, NR_ISOLATED_ANON);
+	}
 
 	return isolated > (inactive + active) / 2;
 }
 
+/* Similar to reclaim, but different enough that they don't share logic */
+static bool too_many_isolated(struct compact_control *cc)
+{
+	/*
+	 * __too_many_isolated(safe=0) is fast but inaccurate, because it
+	 * doesn't account for the vm_stat_diff[] counters.  So if it looks
+	 * like too_many_isolated() is about to return true, fall back to the
+	 * slower, more accurate zone_page_state_snapshot().
+	 */
+	if (unlikely(__too_many_isolated(cc->zone, 0))) {
+		if (cc->mode != MIGRATE_ASYNC)
+			return __too_many_isolated(cc->zone, 1);
+	}
+
+	return false;
+}
+
 /**
  * isolate_migratepages_block() - isolate all migrate-able pages within
  *				  a single pageblock
@@ -683,7 +764,7 @@
 	 * list by either parallel reclaimers or compaction. If there are,
 	 * delay for some time until fewer pages are isolated
 	 */
-	while (unlikely(too_many_isolated(zone))) {
+	while (unlikely(too_many_isolated(cc))) {
 		/* async migration should just abort */
 		if (cc->mode == MIGRATE_ASYNC)
 			return 0;
@@ -699,7 +780,6 @@
 
 	/* Time to isolate some pages for migration */
 	for (; low_pfn < end_pfn; low_pfn++) {
-		bool is_lru;
 
 		/*
 		 * Periodically drop the lock (if held) regardless of its
@@ -740,21 +820,6 @@
 		}
 
 		/*
-		 * Check may be lockless but that's ok as we recheck later.
-		 * It's possible to migrate LRU pages and balloon pages
-		 * Skip any other type of page
-		 */
-		is_lru = PageLRU(page);
-		if (!is_lru) {
-			if (unlikely(balloon_page_movable(page))) {
-				if (balloon_page_isolate(page)) {
-					/* Successfully isolated */
-					goto isolate_success;
-				}
-			}
-		}
-
-		/*
 		 * Regardless of being on LRU, compound pages such as THP and
 		 * hugetlbfs are not to be compacted. We can potentially save
 		 * a lot of iterations if we skip them at once. The check is
@@ -770,8 +835,30 @@
 			continue;
 		}
 
-		if (!is_lru)
+		/*
+		 * Check may be lockless but that's ok as we recheck later.
+		 * It's possible to migrate LRU and non-lru movable pages.
+		 * Skip any other type of page
+		 */
+		if (!PageLRU(page)) {
+			/*
+			 * __PageMovable can return false positive so we need
+			 * to verify it under page_lock.
+			 */
+			if (unlikely(__PageMovable(page)) &&
+					!PageIsolated(page)) {
+				if (locked) {
+					spin_unlock_irqrestore(&zone->lru_lock,
+									flags);
+					locked = false;
+				}
+
+				if (isolate_movable_page(page, isolate_mode))
+					goto isolate_success;
+			}
+
 			continue;
+		}
 
 		/*
 		 * Migration will fail if an anonymous page is pinned in memory,
@@ -1026,7 +1113,7 @@
 		}
 	}
 
-	/* split_free_page does not map the pages */
+	/* __isolate_free_page() does not map the pages */
 	map_pages(freelist);
 
 	/*
@@ -1218,11 +1305,11 @@
 
 		/*
 		 * Mark that the PG_migrate_skip information should be cleared
-		 * by kswapd when it goes to sleep. kswapd does not set the
+		 * by kswapd when it goes to sleep. kcompactd does not set the
 		 * flag itself as the decision to be clear should be directly
 		 * based on an allocation request.
 		 */
-		if (!current_is_kswapd())
+		if (cc->direct_compaction)
 			zone->compact_blockskip_flush = true;
 
 		return COMPACT_COMPLETE;
@@ -1365,10 +1452,9 @@
 
 	/*
 	 * Clear pageblock skip if there were failures recently and compaction
-	 * is about to be retried after being deferred. kswapd does not do
-	 * this reset as it'll reset the cached information when going to sleep.
+	 * is about to be retried after being deferred.
 	 */
-	if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
+	if (compaction_restarting(zone, cc->order))
 		__reset_isolation_suitable(zone);
 
 	/*
@@ -1504,6 +1590,7 @@
 		.mode = mode,
 		.alloc_flags = alloc_flags,
 		.classzone_idx = classzone_idx,
+		.direct_compaction = true,
 	};
 	INIT_LIST_HEAD(&cc.freepages);
 	INIT_LIST_HEAD(&cc.migratepages);
@@ -1762,4 +1849,225 @@
 }
 #endif /* CONFIG_SYSFS && CONFIG_NUMA */
 
+static inline bool kcompactd_work_requested(pg_data_t *pgdat)
+{
+	return pgdat->kcompactd_max_order > 0 || kthread_should_stop();
+}
+
+static bool kcompactd_node_suitable(pg_data_t *pgdat)
+{
+	int zoneid;
+	struct zone *zone;
+	enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx;
+
+	for (zoneid = 0; zoneid <= classzone_idx; zoneid++) {
+		zone = &pgdat->node_zones[zoneid];
+
+		if (!populated_zone(zone))
+			continue;
+
+		if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
+					classzone_idx) == COMPACT_CONTINUE)
+			return true;
+	}
+
+	return false;
+}
+
+static void kcompactd_do_work(pg_data_t *pgdat)
+{
+	/*
+	 * With no special task, compact all zones so that a page of requested
+	 * order is allocatable.
+	 */
+	int zoneid;
+	struct zone *zone;
+	struct compact_control cc = {
+		.order = pgdat->kcompactd_max_order,
+		.classzone_idx = pgdat->kcompactd_classzone_idx,
+		.mode = MIGRATE_SYNC_LIGHT,
+		.ignore_skip_hint = true,
+
+	};
+	bool success = false;
+
+	trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
+							cc.classzone_idx);
+	count_vm_event(KCOMPACTD_WAKE);
+
+	for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) {
+		int status;
+
+		zone = &pgdat->node_zones[zoneid];
+		if (!populated_zone(zone))
+			continue;
+
+		if (compaction_deferred(zone, cc.order))
+			continue;
+
+		if (compaction_suitable(zone, cc.order, 0, zoneid) !=
+							COMPACT_CONTINUE)
+			continue;
+
+		cc.nr_freepages = 0;
+		cc.nr_migratepages = 0;
+		cc.zone = zone;
+		INIT_LIST_HEAD(&cc.freepages);
+		INIT_LIST_HEAD(&cc.migratepages);
+
+		if (kthread_should_stop())
+			return;
+		status = compact_zone(zone, &cc);
+
+		if (zone_watermark_ok(zone, cc.order, low_wmark_pages(zone),
+						cc.classzone_idx, 0)) {
+			success = true;
+			compaction_defer_reset(zone, cc.order, false);
+		} else if (status == COMPACT_COMPLETE) {
+			/*
+			 * We use sync migration mode here, so we defer like
+			 * sync direct compaction does.
+			 */
+			defer_compaction(zone, cc.order);
+		}
+
+		VM_BUG_ON(!list_empty(&cc.freepages));
+		VM_BUG_ON(!list_empty(&cc.migratepages));
+	}
+
+	/*
+	 * Regardless of success, we are done until woken up next. But remember
+	 * the requested order/classzone_idx in case it was higher/tighter than
+	 * our current ones
+	 */
+	if (pgdat->kcompactd_max_order <= cc.order)
+		pgdat->kcompactd_max_order = 0;
+	if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx)
+		pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
+}
+
+void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
+{
+	if (!order)
+		return;
+
+	if (pgdat->kcompactd_max_order < order)
+		pgdat->kcompactd_max_order = order;
+
+	if (pgdat->kcompactd_classzone_idx > classzone_idx)
+		pgdat->kcompactd_classzone_idx = classzone_idx;
+
+	if (!waitqueue_active(&pgdat->kcompactd_wait))
+		return;
+
+	if (!kcompactd_node_suitable(pgdat))
+		return;
+
+	trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
+							classzone_idx);
+	wake_up_interruptible(&pgdat->kcompactd_wait);
+}
+
+/*
+ * The background compaction daemon, started as a kernel thread
+ * from the init process.
+ */
+static int kcompactd(void *p)
+{
+	pg_data_t *pgdat = (pg_data_t*)p;
+	struct task_struct *tsk = current;
+
+	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
+
+	if (!cpumask_empty(cpumask))
+		set_cpus_allowed_ptr(tsk, cpumask);
+
+	set_freezable();
+
+	pgdat->kcompactd_max_order = 0;
+	pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
+
+	while (!kthread_should_stop()) {
+		trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
+		wait_event_freezable(pgdat->kcompactd_wait,
+				kcompactd_work_requested(pgdat));
+
+		kcompactd_do_work(pgdat);
+	}
+
+	return 0;
+}
+
+/*
+ * This kcompactd start function will be called by init and node-hot-add.
+ * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
+ */
+int kcompactd_run(int nid)
+{
+	pg_data_t *pgdat = NODE_DATA(nid);
+	int ret = 0;
+
+	if (pgdat->kcompactd)
+		return 0;
+
+	pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
+	if (IS_ERR(pgdat->kcompactd)) {
+		pr_err("Failed to start kcompactd on node %d\n", nid);
+		ret = PTR_ERR(pgdat->kcompactd);
+		pgdat->kcompactd = NULL;
+	}
+	return ret;
+}
+
+/*
+ * Called by memory hotplug when all memory in a node is offlined. Caller must
+ * hold mem_hotplug_begin/end().
+ */
+void kcompactd_stop(int nid)
+{
+	struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
+
+	if (kcompactd) {
+		kthread_stop(kcompactd);
+		NODE_DATA(nid)->kcompactd = NULL;
+	}
+}
+
+/*
+ * It's optimal to keep kcompactd on the same CPUs as their memory, but
+ * not required for correctness. So if the last cpu in a node goes
+ * away, we get changed to run anywhere: as the first one comes back,
+ * restore their cpu bindings.
+ */
+static int cpu_callback(struct notifier_block *nfb, unsigned long action,
+			void *hcpu)
+{
+	int nid;
+
+	if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
+		for_each_node_state(nid, N_MEMORY) {
+			pg_data_t *pgdat = NODE_DATA(nid);
+			const struct cpumask *mask;
+
+			mask = cpumask_of_node(pgdat->node_id);
+
+			if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
+				/* One of our CPUs online: restore mask */
+				set_cpus_allowed_ptr(pgdat->kcompactd, mask);
+		}
+	}
+	return NOTIFY_OK;
+}
+
+static int __init kcompactd_init(void)
+{
+	int nid;
+
+	for_each_node_state(nid, N_MEMORY)
+		kcompactd_run(nid);
+	hotcpu_notifier(cpu_callback, 0);
+	return 0;
+}
+subsys_initcall(kcompactd_init)
+
 #endif /* CONFIG_COMPACTION */
diff -ruw linux-4.4.115/mm/debug.c linux-4.4.115-fbx/mm/debug.c
--- linux-4.4.115/mm/debug.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/debug.c	2019-10-29 09:26:25.685223161 +0100
@@ -9,6 +9,18 @@
 #include <linux/mm.h>
 #include <linux/trace_events.h>
 #include <linux/memcontrol.h>
+#include <linux/migrate.h>
+#include <linux/page_owner.h>
+
+char *migrate_reason_names[MR_TYPES] = {
+	"compaction",
+	"memory_failure",
+	"memory_hotplug",
+	"syscall_or_cpuset",
+	"mempolicy_mbind",
+	"numa_misplaced",
+	"cma",
+};
 
 static const struct trace_print_flags pageflag_names[] = {
 	{1UL << PG_locked,		"locked"	},
@@ -47,6 +59,9 @@
 	{1UL << PG_young,		"young"		},
 	{1UL << PG_idle,		"idle"		},
 #endif
+#ifdef CONFIG_ZCACHE
+	{1UL << PG_was_active,		"was_active"	},
+#endif
 };
 
 static void dump_flags(unsigned long flags,
@@ -103,6 +118,7 @@
 void dump_page(struct page *page, const char *reason)
 {
 	dump_page_badflags(page, reason, 0);
+	dump_page_owner(page);
 }
 EXPORT_SYMBOL(dump_page);
 
diff -ruw linux-4.4.115/mm/dmapool.c linux-4.4.115-fbx/mm/dmapool.c
--- linux-4.4.115/mm/dmapool.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/dmapool.c	2019-01-22 16:16:28.791294292 +0100
@@ -452,13 +452,11 @@
 			}
 			spin_unlock_irqrestore(&pool->lock, flags);
 			if (pool->dev)
-				dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
-					"already free\n", pool->name,
-					(unsigned long long)dma);
+				dev_err(pool->dev, "dma_pool_free %s, dma %Lx already free\n",
+					pool->name, (unsigned long long)dma);
 			else
-				printk(KERN_ERR "dma_pool_free %s, dma %Lx "
-					"already free\n", pool->name,
-					(unsigned long long)dma);
+				printk(KERN_ERR "dma_pool_free %s, dma %Lx already free\n",
+					pool->name, (unsigned long long)dma);
 			return;
 		}
 	}
diff -ruw linux-4.4.115/mm/filemap.c linux-4.4.115-fbx/mm/filemap.c
--- linux-4.4.115/mm/filemap.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/filemap.c	2019-10-29 09:26:25.689223200 +0100
@@ -239,10 +239,12 @@
 	 * invalidate any existing cleancache entries.  We can't leave
 	 * stale data around in the cleancache once our page is gone
 	 */
-	if (PageUptodate(page) && PageMappedToDisk(page))
+	if (PageUptodate(page) && PageMappedToDisk(page)) {
+		count_vm_event(PGPGOUTCLEAN);
 		cleancache_put_page(page);
-	else
+	} else {
 		cleancache_invalidate_page(mapping, page);
+	}
 
 	page_cache_tree_delete(mapping, page, shadow);
 
diff -ruw linux-4.4.115/mm/internal.h linux-4.4.115-fbx/mm/internal.h
--- linux-4.4.115/mm/internal.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/internal.h	2019-01-22 16:16:28.795294329 +0100
@@ -182,6 +182,8 @@
 #ifdef CONFIG_MEMORY_FAILURE
 extern bool is_free_buddy_page(struct page *page);
 #endif
+extern void post_alloc_hook(struct page *page, unsigned int order,
+					gfp_t gfp_flags);
 extern int user_min_free_kbytes;
 
 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
@@ -206,6 +208,7 @@
 	unsigned long last_migrated_pfn;/* Not yet flushed page being freed */
 	enum migrate_mode mode;		/* Async or sync migration mode */
 	bool ignore_skip_hint;		/* Scan blocks even if marked skip */
+	bool direct_compaction;		/* False from kcompactd or /proc/... */
 	int order;			/* order a direct compactor needs */
 	const gfp_t gfp_mask;		/* gfp mask of a direct compactor */
 	const int alloc_flags;		/* alloc flags of a direct compactor */
@@ -310,10 +313,8 @@
 
 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
 
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 extern unsigned long vma_address(struct page *page,
 				 struct vm_area_struct *vma);
-#endif
 #else /* !CONFIG_MMU */
 static inline void clear_page_mlock(struct page *page) { }
 static inline void mlock_vma_page(struct page *page) { }
diff -ruw linux-4.4.115/mm/kasan/Makefile linux-4.4.115-fbx/mm/kasan/Makefile
--- linux-4.4.115/mm/kasan/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/kasan/Makefile	2019-01-22 16:16:28.795294329 +0100
@@ -1,8 +1,10 @@
 KASAN_SANITIZE := n
+UBSAN_SANITIZE_kasan.o := n
+KCOV_INSTRUMENT := n
 
 CFLAGS_REMOVE_kasan.o = -pg
 # Function splitter causes unnecessary splits in __asan_load1/__asan_store1
 # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533
 CFLAGS_kasan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
 
-obj-y := kasan.o report.o kasan_init.o
+obj-y := kasan.o report.o kasan_init.o quarantine.o
diff -ruw linux-4.4.115/mm/Kconfig linux-4.4.115-fbx/mm/Kconfig
--- linux-4.4.115/mm/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/Kconfig	2019-10-29 09:26:25.681223122 +0100
@@ -187,7 +187,7 @@
 	bool "Allow for memory hot-add"
 	depends on SPARSEMEM || X86_64_ACPI_NUMA
 	depends on ARCH_ENABLE_MEMORY_HOTPLUG
-	depends on (IA64 || X86 || PPC_BOOK3S_64 || SUPERH || S390)
+	depends on (IA64 || X86 || PPC_BOOK3S_64 || SUPERH || S390 || ARM64)
 
 config MEMORY_HOTPLUG_SPARSE
 	def_bool y
@@ -619,6 +619,44 @@
 
 	  A sane initial value is 80 MB.
 
+config ZCACHE
+       bool "Compressed cache for file pages (EXPERIMENTAL)"
+       depends on CRYPTO && CLEANCACHE
+       select CRYPTO_LZO
+       select ZBUD
+       default n
+       help
+         A compressed cache for file pages.
+         It takes active file pages that are in the process of being reclaimed
+         and attempts to compress them into a dynamically allocated RAM-based
+         memory pool.
+
+         If this process is successful, when those file pages needed again, the
+         I/O reading operation was avoided. This results in a significant performance
+         gains under memory pressure for systems full with file pages.
+
+config BALANCE_ANON_FILE_RECLAIM
+	bool "During reclaim treat anon and file backed pages equally"
+	depends on SWAP
+	help
+	  When performing memory reclaim treat anonymous and file backed pages
+	  equally.
+	  Swapping anonymous pages out to memory can be efficient enough to justify
+	  treating anonymous and file backed pages equally.
+
+config KSWAPD_CPU_AFFINITY_MASK
+	string "kswapd cpu affinity mask"
+	depends on SMP
+	help
+	  Set the cpu affinity for the kswapd task.
+	  There can be power benefits on certain targets when limiting kswapd
+	  to run only on certain cores.
+	  The cpu affinity bitmask is represented by a hex string where commas
+	  group hex digits into chunks.  Each chunk defines exactly 32 bits of
+	  the resultant bitmask.
+	  For example to limit kswapd to the first 4 cores use the following:
+	  CONFIG_KSWAPD_CPU_AFFINITY_MASK="f"
+
 # For architectures that support deferred memory initialisation
 config ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
 	bool
@@ -668,3 +706,29 @@
 
 config FRAME_VECTOR
 	bool
+
+config FORCE_ALLOC_FROM_DMA_ZONE
+	bool "Force certain memory allocators to always return ZONE_DMA memory"
+	depends on ZONE_DMA
+	help
+	  Ensure certain memory allocators always return memory from ZONE_DMA.
+	  This option helps ensure that clients who require ZONE_DMA memory are
+	  always using ZONE_DMA memory.
+
+	  If unsure, say "n".
+
+config PROCESS_RECLAIM
+	bool "Enable process reclaim"
+	depends on PROC_FS
+	default n
+	help
+	 It allows to reclaim pages of the process by /proc/pid/reclaim.
+
+	 (echo file > /proc/PID/reclaim) reclaims file-backed pages only.
+	 (echo anon > /proc/PID/reclaim) reclaims anonymous pages only.
+	 (echo all > /proc/PID/reclaim) reclaims all pages.
+
+	 (echo addr size-byte > /proc/PID/reclaim) reclaims pages in
+	 (addr, addr + size-bytes) of the process.
+
+	 Any other vaule is ignored.
diff -ruw linux-4.4.115/mm/Kconfig.debug linux-4.4.115-fbx/mm/Kconfig.debug
--- linux-4.4.115/mm/Kconfig.debug	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/Kconfig.debug	2019-01-22 16:16:28.787294256 +0100
@@ -16,8 +16,8 @@
 	select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC
 	---help---
 	  Unmap pages from the kernel linear mapping after free_pages().
-	  This results in a large slowdown, but helps to find certain types
-	  of memory corruption.
+	  Depending on runtime enablement, this results in a small or large
+	  slowdown, but helps to find certain types of memory corruption.
 
 	  For architectures which don't enable ARCH_SUPPORTS_DEBUG_PAGEALLOC,
 	  fill the pages with poison patterns after free_pages() and verify
@@ -26,5 +26,76 @@
 	  that would result in incorrect warnings of memory corruption after
 	  a resume because free pages are not saved to the suspend image.
 
+	  By default this option will have a small overhead, e.g. by not
+	  allowing the kernel mapping to be backed by large pages on some
+	  architectures. Even bigger overhead comes when the debugging is
+	  enabled by DEBUG_PAGEALLOC_ENABLE_DEFAULT or the debug_pagealloc
+	  command line parameter.
+
+config DEBUG_PAGEALLOC_ENABLE_DEFAULT
+	bool "Enable debug page memory allocations by default?"
+	default n
+	depends on DEBUG_PAGEALLOC
+	---help---
+	  Enable debug page memory allocations by default? This value
+	  can be overridden by debug_pagealloc=off|on.
+
+config SLUB_DEBUG_PANIC_ON
+	bool "Enable to Panic on SLUB corruption detection"
+	depends on SLUB_DEBUG
+	help
+	  SLUB has a resiliency feature enabled which restores bytes in
+	  order for production environments to continue to operate. IN
+	  debug options this may not be desirable as it prevents from
+	  investigating the root cause which may be rooted within cache
+	  or memory.
+
 config PAGE_POISONING
-	bool
+	bool "Poison pages after freeing"
+	select PAGE_EXTENSION
+	select PAGE_POISONING_NO_SANITY if HIBERNATION
+	---help---
+	  Fill the pages with poison patterns after free_pages() and verify
+	  the patterns before alloc_pages. The filling of the memory helps
+	  reduce the risk of information leaks from freed data. This does
+	  have a potential performance impact.
+
+	  Note that "poison" here is not the same thing as the "HWPoison"
+	  for CONFIG_MEMORY_FAILURE. This is software poisoning only.
+
+	  If unsure, say N
+
+config PAGE_POISONING_ENABLE_DEFAULT
+	bool "Enable page poisoning by default?"
+	default n
+	depends on PAGE_POISONING
+	---help---
+	  Enable page poisoning of free pages by default? This value
+	  can be overridden by page_poison=off|on. This can be used
+	  to avoid passing the kernel parameter and let page poisoning
+	  feature enabled by default.
+
+config PAGE_POISONING_NO_SANITY
+	depends on PAGE_POISONING
+	bool "Only poison, don't sanity check"
+	---help---
+	   Skip the sanity checking on alloc, only fill the pages with
+	   poison on free. This reduces some of the overhead of the
+	   poisoning feature.
+
+	   If you are only interested in sanitization, say Y. Otherwise
+	   say N.
+
+config PAGE_POISONING_ZERO
+	bool "Use zero for poisoning instead of random data"
+	depends on PAGE_POISONING
+	---help---
+	   Instead of using the existing poison value, fill the pages with
+	   zeros. This makes it harder to detect when errors are occurring
+	   due to sanitization but the zeroing at free means that it is
+	   no longer necessary to write zeros when GFP_ZERO is used on
+	   allocation.
+
+	   Enabling page poisoning with this option will disable hibernation
+
+	   If unsure, say N
diff -ruw linux-4.4.115/mm/maccess.c linux-4.4.115-fbx/mm/maccess.c
--- linux-4.4.115/mm/maccess.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/maccess.c	2019-01-22 16:16:28.799294365 +0100
@@ -96,8 +96,7 @@
 	pagefault_disable();
 
 	do {
-		ret = __copy_from_user_inatomic(dst++,
-						(const void __user __force *)src++, 1);
+		ret = __get_user(*dst++, (const char __user __force *)src++);
 	} while (dst[-1] && ret == 0 && src - unsafe_addr < count);
 
 	dst[-1] = '\0';
diff -ruw linux-4.4.115/mm/madvise.c linux-4.4.115-fbx/mm/madvise.c
--- linux-4.4.115/mm/madvise.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/madvise.c	2019-10-29 09:26:25.697223279 +0100
@@ -104,7 +104,7 @@
 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
 	*prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
 			  vma->vm_file, pgoff, vma_policy(vma),
-			  vma->vm_userfaultfd_ctx);
+			  vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
 	if (*prev) {
 		vma = *prev;
 		goto success;
diff -ruw linux-4.4.115/mm/Makefile linux-4.4.115-fbx/mm/Makefile
--- linux-4.4.115/mm/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/Makefile	2019-01-22 16:16:28.787294256 +0100
@@ -3,8 +3,27 @@
 #
 
 KASAN_SANITIZE_slab_common.o := n
+KASAN_SANITIZE_slab.o := n
 KASAN_SANITIZE_slub.o := n
 
+# Since __builtin_frame_address does work as used, disable the warning.
+CFLAGS_usercopy.o += $(call cc-disable-warning, frame-address)
+
+# These files are disabled because they produce non-interesting and/or
+# flaky coverage that is not a function of syscall inputs. E.g. slab is out of
+# free pages, or a task is migrated between nodes.
+KCOV_INSTRUMENT_slab_common.o := n
+KCOV_INSTRUMENT_slob.o := n
+KCOV_INSTRUMENT_slab.o := n
+KCOV_INSTRUMENT_slub.o := n
+KCOV_INSTRUMENT_page_alloc.o := n
+KCOV_INSTRUMENT_debug-pagealloc.o := n
+KCOV_INSTRUMENT_kmemleak.o := n
+KCOV_INSTRUMENT_kmemcheck.o := n
+KCOV_INSTRUMENT_memcontrol.o := n
+KCOV_INSTRUMENT_mmzone.o := n
+KCOV_INSTRUMENT_vmstat.o := n
+
 mmu-y			:= nommu.o
 mmu-$(CONFIG_MMU)	:= gup.o highmem.o memory.o mincore.o \
 			   mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
@@ -21,7 +40,7 @@
 			   mm_init.o mmu_context.o percpu.o slab_common.o \
 			   compaction.o vmacache.o \
 			   interval_tree.o list_lru.o workingset.o \
-			   debug.o $(mmu-y)
+			   debug.o $(mmu-y) showmem.o vmpressure.o
 
 obj-y += init-mm.o
 
@@ -37,9 +56,10 @@
 endif
 obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
 
-obj-$(CONFIG_SWAP)	+= page_io.o swap_state.o swapfile.o
+obj-$(CONFIG_SWAP)	+= page_io.o swap_state.o swapfile.o swap_ratio.o
 obj-$(CONFIG_FRONTSWAP)	+= frontswap.o
 obj-$(CONFIG_ZSWAP)	+= zswap.o
+obj-$(CONFIG_ZCACHE)	+= zcache.o
 obj-$(CONFIG_HAS_DMA)	+= dmapool.o
 obj-$(CONFIG_HUGETLBFS)	+= hugetlb.o
 obj-$(CONFIG_NUMA) 	+= mempolicy.o
@@ -48,7 +68,7 @@
 obj-$(CONFIG_SLOB) += slob.o
 obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
 obj-$(CONFIG_KSM) += ksm.o
-obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o
+obj-$(CONFIG_PAGE_POISONING) += page_poison.o
 obj-$(CONFIG_SLAB) += slab.o
 obj-$(CONFIG_SLUB) += slub.o
 obj-$(CONFIG_KMEMCHECK) += kmemcheck.o
@@ -60,7 +80,7 @@
 obj-$(CONFIG_QUICKLIST) += quicklist.o
 obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o
 obj-$(CONFIG_PAGE_COUNTER) += page_counter.o
-obj-$(CONFIG_MEMCG) += memcontrol.o vmpressure.o
+obj-$(CONFIG_MEMCG) += memcontrol.o
 obj-$(CONFIG_MEMCG_SWAP) += swap_cgroup.o
 obj-$(CONFIG_CGROUP_HUGETLB) += hugetlb_cgroup.o
 obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
@@ -81,3 +101,5 @@
 obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
 obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o
 obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o
+obj-$(CONFIG_PROCESS_RECLAIM)	+= process_reclaim.o
+obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o
diff -ruw linux-4.4.115/mm/memblock.c linux-4.4.115-fbx/mm/memblock.c
--- linux-4.4.115/mm/memblock.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/memblock.c	2019-01-22 16:16:28.799294365 +0100
@@ -19,6 +19,9 @@
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
 #include <linux/memblock.h>
+#include <linux/preempt.h>
+#include <linux/seqlock.h>
+#include <linux/irqflags.h>
 
 #include <asm-generic/sections.h>
 #include <linux/io.h>
@@ -31,6 +34,7 @@
 static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
 #endif
 
+static seqcount_t memblock_seq;
 struct memblock memblock __initdata_memblock = {
 	.memory.regions		= memblock_memory_init_regions,
 	.memory.cnt		= 1,	/* empty dummy entry */
@@ -241,8 +245,7 @@
 		 * so we use WARN_ONCE() here to see the stack trace if
 		 * fail happens.
 		 */
-		WARN_ONCE(1, "memblock: bottom-up allocation failed, "
-			     "memory hotunplug may be affected\n");
+		WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n");
 	}
 
 	return __memblock_find_range_top_down(start, end, size, align, nid,
@@ -734,6 +737,7 @@
 		     (unsigned long long)base + size - 1,
 		     (void *)_RET_IP_);
 
+	if (base < memblock.current_limit)
 	kmemleak_free_part(__va(base), size);
 	return memblock_remove_range(&memblock.reserved, base, size);
 }
@@ -822,6 +826,27 @@
 	return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
 }
 
+/**
+ * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
+ * @base: the base phys addr of the region
+ * @size: the size of the region
+ *
+ * Return 0 on success, -errno on failure.
+ */
+int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
+{
+	return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
+}
+
+/**
+ * memblock_clear_nomap - Clear a flag of MEMBLOCK_NOMAP memory region
+ * @base: the base phys addr of the region
+ * @size: the size of the region
+ */
+int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
+{
+	return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
+}
 
 /**
  * __next_reserved_mem_region - next function for for_each_reserved_region()
@@ -913,6 +938,10 @@
 		if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
 			continue;
 
+		/* skip nomap memory unless we were asked for it explicitly */
+		if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
+			continue;
+
 		if (!type_b) {
 			if (out_start)
 				*out_start = m_start;
@@ -1022,6 +1051,10 @@
 		if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
 			continue;
 
+		/* skip nomap memory unless we were asked for it explicitly */
+		if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
+			continue;
+
 		if (!type_b) {
 			if (out_start)
 				*out_start = m_start;
@@ -1151,6 +1184,7 @@
 		 * The min_count is set to 0 so that memblock allocations are
 		 * never reported as leaks.
 		 */
+		if (found < memblock.current_limit)
 		kmemleak_alloc(__va(found), size, 0, 0);
 		return found;
 	}
@@ -1491,7 +1525,7 @@
 			      (phys_addr_t)ULLONG_MAX);
 }
 
-static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
+static int __init_memblock __memblock_search(struct memblock_type *type, phys_addr_t addr)
 {
 	unsigned int left = 0, right = type->cnt;
 
@@ -1509,6 +1543,19 @@
 	return -1;
 }
 
+static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
+{
+	int ret;
+	unsigned long seq;
+
+	do {
+		seq = raw_read_seqcount_begin(&memblock_seq);
+		ret = __memblock_search(type, addr);
+	} while (unlikely(read_seqcount_retry(&memblock_seq, seq)));
+
+	return ret;
+}
+
 int __init memblock_is_reserved(phys_addr_t addr)
 {
 	return memblock_search(&memblock.reserved, addr) != -1;
@@ -1519,6 +1566,15 @@
 	return memblock_search(&memblock.memory, addr) != -1;
 }
 
+int __init_memblock memblock_is_map_memory(phys_addr_t addr)
+{
+	int i = memblock_search(&memblock.memory, addr);
+
+	if (i == -1)
+		return false;
+	return !memblock_is_nomap(&memblock.memory.regions[i]);
+}
+
 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
 			 unsigned long *start_pfn, unsigned long *end_pfn)
@@ -1558,6 +1614,14 @@
 		 memblock.memory.regions[idx].size) >= end;
 }
 
+bool __init_memblock memblock_overlaps_memory(phys_addr_t base,
+					      phys_addr_t size)
+{
+	memblock_cap_size(base, &size);
+
+	return memblock_overlaps_region(&memblock.memory, base, size);
+}
+
 /**
  * memblock_is_region_reserved - check if a region intersects reserved memory
  * @base: base of region to check
@@ -1674,6 +1738,37 @@
 	memblock_can_resize = 1;
 }
 
+static unsigned long __init_memblock
+memblock_resize_late(int begin, unsigned long flags)
+{
+	static int memblock_can_resize_old;
+
+	if (begin) {
+		preempt_disable();
+		local_irq_save(flags);
+		memblock_can_resize_old = memblock_can_resize;
+		memblock_can_resize = 0;
+		raw_write_seqcount_begin(&memblock_seq);
+	} else {
+		raw_write_seqcount_end(&memblock_seq);
+		memblock_can_resize = memblock_can_resize_old;
+		local_irq_restore(flags);
+		preempt_enable();
+	}
+
+	return flags;
+}
+
+unsigned long __init_memblock memblock_region_resize_late_begin(void)
+{
+	return memblock_resize_late(1, 0);
+}
+
+void __init_memblock memblock_region_resize_late_end(unsigned long flags)
+{
+	memblock_resize_late(0, flags);
+}
+
 static int __init early_memblock(char *p)
 {
 	if (p && strstr(p, "debug"))
diff -ruw linux-4.4.115/mm/memory.c linux-4.4.115-fbx/mm/memory.c
--- linux-4.4.115/mm/memory.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/memory.c	2019-10-29 09:26:25.705223357 +0100
@@ -2619,7 +2619,8 @@
 	}
 
 	swap_free(entry);
-	if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
+	if ((PageSwapCache(page) && vm_swap_full(page_swap_info(page))) ||
+		(vma->vm_flags & VM_LOCKED) || PageMlocked(page))
 		try_to_free_swap(page);
 	unlock_page(page);
 	if (page != swapcache) {
@@ -2833,7 +2834,7 @@
 }
 
 static unsigned long fault_around_bytes __read_mostly =
-	rounddown_pow_of_two(65536);
+	rounddown_pow_of_two(4096);
 
 #ifdef CONFIG_DEBUG_FS
 static int fault_around_bytes_get(void *data, u64 *val)
diff -ruw linux-4.4.115/mm/mempool.c linux-4.4.115-fbx/mm/mempool.c
--- linux-4.4.115/mm/mempool.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/mempool.c	2019-01-22 16:16:28.811294474 +0100
@@ -104,20 +104,16 @@
 
 static void kasan_poison_element(mempool_t *pool, void *element)
 {
-	if (pool->alloc == mempool_alloc_slab)
-		kasan_slab_free(pool->pool_data, element);
-	if (pool->alloc == mempool_kmalloc)
-		kasan_kfree(element);
+	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
+		kasan_poison_kfree(element);
 	if (pool->alloc == mempool_alloc_pages)
 		kasan_free_pages(element, (unsigned long)pool->pool_data);
 }
 
-static void kasan_unpoison_element(mempool_t *pool, void *element)
+static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags)
 {
-	if (pool->alloc == mempool_alloc_slab)
-		kasan_slab_alloc(pool->pool_data, element);
-	if (pool->alloc == mempool_kmalloc)
-		kasan_krealloc(element, (size_t)pool->pool_data);
+	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
+		kasan_unpoison_slab(element);
 	if (pool->alloc == mempool_alloc_pages)
 		kasan_alloc_pages(element, (unsigned long)pool->pool_data);
 }
@@ -130,12 +126,12 @@
 	pool->elements[pool->curr_nr++] = element;
 }
 
-static void *remove_element(mempool_t *pool)
+static void *remove_element(mempool_t *pool, gfp_t flags)
 {
 	void *element = pool->elements[--pool->curr_nr];
 
 	BUG_ON(pool->curr_nr < 0);
-	kasan_unpoison_element(pool, element);
+	kasan_unpoison_element(pool, element, flags);
 	check_element(pool, element);
 	return element;
 }
@@ -154,7 +150,7 @@
 		return;
 
 	while (pool->curr_nr) {
-		void *element = remove_element(pool);
+		void *element = remove_element(pool, GFP_KERNEL);
 		pool->free(element, pool->pool_data);
 	}
 	kfree(pool->elements);
@@ -250,7 +246,7 @@
 	spin_lock_irqsave(&pool->lock, flags);
 	if (new_min_nr <= pool->min_nr) {
 		while (new_min_nr < pool->curr_nr) {
-			element = remove_element(pool);
+			element = remove_element(pool, GFP_KERNEL);
 			spin_unlock_irqrestore(&pool->lock, flags);
 			pool->free(element, pool->pool_data);
 			spin_lock_irqsave(&pool->lock, flags);
@@ -336,7 +332,7 @@
 
 	spin_lock_irqsave(&pool->lock, flags);
 	if (likely(pool->curr_nr)) {
-		element = remove_element(pool);
+		element = remove_element(pool, gfp_temp);
 		spin_unlock_irqrestore(&pool->lock, flags);
 		/* paired with rmb in mempool_free(), read comment there */
 		smp_wmb();
diff -ruw linux-4.4.115/mm/migrate.c linux-4.4.115-fbx/mm/migrate.c
--- linux-4.4.115/mm/migrate.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/migrate.c	2019-10-29 09:26:25.709223396 +0100
@@ -31,6 +31,7 @@
 #include <linux/vmalloc.h>
 #include <linux/security.h>
 #include <linux/backing-dev.h>
+#include <linux/compaction.h>
 #include <linux/syscalls.h>
 #include <linux/hugetlb.h>
 #include <linux/hugetlb_cgroup.h>
@@ -38,6 +39,7 @@
 #include <linux/balloon_compaction.h>
 #include <linux/mmu_notifier.h>
 #include <linux/page_idle.h>
+#include <linux/page_owner.h>
 #include <linux/ptrace.h>
 
 #include <asm/tlbflush.h>
@@ -73,6 +75,81 @@
 	return 0;
 }
 
+bool isolate_movable_page(struct page *page, isolate_mode_t mode)
+{
+	struct address_space *mapping;
+
+	/*
+	 * Avoid burning cycles with pages that are yet under __free_pages(),
+	 * or just got freed under us.
+	 *
+	 * In case we 'win' a race for a movable page being freed under us and
+	 * raise its refcount preventing __free_pages() from doing its job
+	 * the put_page() at the end of this block will take care of
+	 * release this page, thus avoiding a nasty leakage.
+	 */
+	if (unlikely(!get_page_unless_zero(page)))
+		goto out;
+
+	/*
+	 * Check PageMovable before holding a PG_lock because page's owner
+	 * assumes anybody doesn't touch PG_lock of newly allocated page
+	 * so unconditionally grapping the lock ruins page's owner side.
+	 */
+	if (unlikely(!__PageMovable(page)))
+		goto out_putpage;
+	/*
+	 * As movable pages are not isolated from LRU lists, concurrent
+	 * compaction threads can race against page migration functions
+	 * as well as race against the releasing a page.
+	 *
+	 * In order to avoid having an already isolated movable page
+	 * being (wrongly) re-isolated while it is under migration,
+	 * or to avoid attempting to isolate pages being released,
+	 * lets be sure we have the page lock
+	 * before proceeding with the movable page isolation steps.
+	 */
+	if (unlikely(!trylock_page(page)))
+		goto out_putpage;
+
+	if (!PageMovable(page) || PageIsolated(page))
+		goto out_no_isolated;
+
+	mapping = page_mapping(page);
+	VM_BUG_ON_PAGE(!mapping, page);
+
+	if (!mapping->a_ops->isolate_page(page, mode))
+		goto out_no_isolated;
+
+	/* Driver shouldn't use PG_isolated bit of page->flags */
+	WARN_ON_ONCE(PageIsolated(page));
+	__SetPageIsolated(page);
+	unlock_page(page);
+
+	return true;
+
+out_no_isolated:
+	unlock_page(page);
+out_putpage:
+	put_page(page);
+out:
+	return false;
+}
+
+/* It should be called on page which is PG_movable */
+void putback_movable_page(struct page *page)
+{
+	struct address_space *mapping;
+
+	VM_BUG_ON_PAGE(!PageLocked(page), page);
+	VM_BUG_ON_PAGE(!PageMovable(page), page);
+	VM_BUG_ON_PAGE(!PageIsolated(page), page);
+
+	mapping = page_mapping(page);
+	mapping->a_ops->putback_page(page);
+	__ClearPageIsolated(page);
+}
+
 /*
  * Put previously isolated pages back onto the appropriate lists
  * from where they were once taken off for compaction/migration.
@@ -94,12 +171,25 @@
 		list_del(&page->lru);
 		dec_zone_page_state(page, NR_ISOLATED_ANON +
 				page_is_file_cache(page));
-		if (unlikely(isolated_balloon_page(page)))
-			balloon_page_putback(page);
+		/*
+		 * We isolated non-lru movable page so here we can use
+		 * __PageMovable because LRU page's mapping cannot have
+		 * PAGE_MAPPING_MOVABLE.
+		 */
+		if (unlikely(__PageMovable(page))) {
+			VM_BUG_ON_PAGE(!PageIsolated(page), page);
+			lock_page(page);
+			if (PageMovable(page))
+				putback_movable_page(page);
 		else
+				__ClearPageIsolated(page);
+			unlock_page(page);
+			put_page(page);
+		} else {
 			putback_lru_page(page);
 	}
 }
+}
 
 /*
  * Restore a potential migration pte to a working pte entry
@@ -580,6 +670,8 @@
 	 */
 	if (PageWriteback(newpage))
 		end_page_writeback(newpage);
+
+	copy_page_owner(page, newpage);
 }
 EXPORT_SYMBOL(migrate_page_copy);
 
@@ -588,7 +680,7 @@
  ***********************************************************/
 
 /*
- * Common logic to directly migrate a single page suitable for
+ * Common logic to directly migrate a single LRU page suitable for
  * pages that do not use PagePrivate/PagePrivate2.
  *
  * Pages are locked upon entry and exit.
@@ -751,24 +843,47 @@
 				enum migrate_mode mode)
 {
 	struct address_space *mapping;
-	int rc;
+	int rc = -EAGAIN;
+	bool is_lru = !__PageMovable(page);
 
 	VM_BUG_ON_PAGE(!PageLocked(page), page);
 	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
 
 	mapping = page_mapping(page);
+
+	if (likely(is_lru)) {
 	if (!mapping)
 		rc = migrate_page(mapping, newpage, page, mode);
 	else if (mapping->a_ops->migratepage)
 		/*
-		 * Most pages have a mapping and most filesystems provide a
-		 * migratepage callback. Anonymous pages are part of swap
-		 * space which also has its own migratepage callback. This
-		 * is the most common path for page migration.
+			 * Most pages have a mapping and most filesystems
+			 * provide a migratepage callback. Anonymous pages
+			 * are part of swap space which also has its own
+			 * migratepage callback. This is the most common path
+			 * for page migration.
 		 */
-		rc = mapping->a_ops->migratepage(mapping, newpage, page, mode);
+			rc = mapping->a_ops->migratepage(mapping, newpage,
+							page, mode);
 	else
-		rc = fallback_migrate_page(mapping, newpage, page, mode);
+			rc = fallback_migrate_page(mapping, newpage,
+							page, mode);
+	} else {
+		/*
+		 * In case of non-lru page, it could be released after
+		 * isolation step. In that case, we shouldn't try migration.
+		 */
+		VM_BUG_ON_PAGE(!PageIsolated(page), page);
+		if (!PageMovable(page)) {
+			rc = MIGRATEPAGE_SUCCESS;
+			__ClearPageIsolated(page);
+			goto out;
+		}
+
+		rc = mapping->a_ops->migratepage(mapping, newpage,
+						page, mode);
+		WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
+			!PageIsolated(page));
+	}
 
 	/*
 	 * When successful, old pagecache page->mapping must be cleared before
@@ -776,9 +891,25 @@
 	 */
 	if (rc == MIGRATEPAGE_SUCCESS) {
 		set_page_memcg(page, NULL);
-		if (!PageAnon(page))
+		if (__PageMovable(page)) {
+			VM_BUG_ON_PAGE(!PageIsolated(page), page);
+
+			/*
+			 * We clear PG_movable under page_lock so any compactor
+			 * cannot try to migrate this page.
+			 */
+			__ClearPageIsolated(page);
+		}
+
+		/*
+		 * Anonymous and movable page->mapping will be cleard by
+		 * free_pages_prepare so don't reset it here for keeping
+		 * the type to work PageAnon, for example.
+		 */
+		if (!PageMappingFlags(page))
 			page->mapping = NULL;
 	}
+out:
 	return rc;
 }
 
@@ -788,6 +919,7 @@
 	int rc = -EAGAIN;
 	int page_was_mapped = 0;
 	struct anon_vma *anon_vma = NULL;
+	bool is_lru = !__PageMovable(page);
 
 	if (!trylock_page(page)) {
 		if (!force || mode == MIGRATE_ASYNC)
@@ -856,15 +988,8 @@
 	if (unlikely(!trylock_page(newpage)))
 		goto out_unlock;
 
-	if (unlikely(isolated_balloon_page(page))) {
-		/*
-		 * A ballooned page does not need any special attention from
-		 * physical to virtual reverse mapping procedures.
-		 * Skip any attempt to unmap PTEs or to remap swap cache,
-		 * in order to avoid burning cycles at rmap level, and perform
-		 * the page migration right away (proteced by page lock).
-		 */
-		rc = balloon_page_migrate(newpage, page, mode);
+	if (unlikely(!is_lru)) {
+		rc = move_to_new_page(newpage, page, mode);
 		goto out_unlock_both;
 	}
 
@@ -891,7 +1016,7 @@
 		VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
 				page);
 		try_to_unmap(page,
-			TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
+			TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS, NULL);
 		page_was_mapped = 1;
 	}
 
@@ -910,6 +1035,19 @@
 		put_anon_vma(anon_vma);
 	unlock_page(page);
 out:
+	/*
+	 * If migration is successful, decrease refcount of the newpage
+	 * which will not free the page because new page owner increased
+	 * refcounter. As well, if it is LRU page, add the page to LRU
+	 * list in here.
+	 */
+	if (rc == MIGRATEPAGE_SUCCESS) {
+		if (unlikely(__PageMovable(newpage)))
+			put_page(newpage);
+		else
+			putback_lru_page(newpage);
+	}
+
 	return rc;
 }
 
@@ -943,6 +1081,18 @@
 
 	if (page_count(page) == 1) {
 		/* page was freed from under us. So we are done. */
+		ClearPageActive(page);
+		ClearPageUnevictable(page);
+		if (unlikely(__PageMovable(page))) {
+			lock_page(page);
+			if (!PageMovable(page))
+				__ClearPageIsolated(page);
+			unlock_page(page);
+		}
+		if (put_new_page)
+			put_new_page(newpage, private);
+		else
+			put_page(newpage);
 		goto out;
 	}
 
@@ -951,8 +1101,9 @@
 			goto out;
 
 	rc = __unmap_and_move(page, newpage, force, mode);
-	if (rc == MIGRATEPAGE_SUCCESS)
-		put_new_page = NULL;
+	if (rc == MIGRATEPAGE_SUCCESS) {
+		set_page_owner_migrate_reason(newpage, reason);
+	}
 
 out:
 	if (rc != -EAGAIN) {
@@ -965,33 +1116,45 @@
 		list_del(&page->lru);
 		dec_zone_page_state(page, NR_ISOLATED_ANON +
 				page_is_file_cache(page));
-		/* Soft-offlined page shouldn't go through lru cache list */
-		if (reason == MR_MEMORY_FAILURE && rc == MIGRATEPAGE_SUCCESS) {
+	}
+
 			/*
-			 * With this release, we free successfully migrated
-			 * page and set PG_HWPoison on just freed page
-			 * intentionally. Although it's rather weird, it's how
-			 * HWPoison flag works at the moment.
+	 * If migration is successful, releases reference grabbed during
+	 * isolation. Otherwise, restore the page to right list unless
+	 * we want to retry.
 			 */
+	if (rc == MIGRATEPAGE_SUCCESS) {
 			put_page(page);
+		if (reason == MR_MEMORY_FAILURE) {
+			/*
+			 * Set PG_HWPoison on just freed page
+			 * intentionally. Although it's rather weird,
+			 * it's how HWPoison flag works at the moment.
+			 */
 			if (!test_set_page_hwpoison(page))
 				num_poisoned_pages_inc();
-		} else
+		}
+	} else {
+		if (rc != -EAGAIN) {
+			if (likely(!__PageMovable(page))) {
 			putback_lru_page(page);
+				goto put_new;
 	}
 
-	/*
-	 * If migration was not successful and there's a freeing callback, use
-	 * it.  Otherwise, putback_lru_page() will drop the reference grabbed
-	 * during isolation.
-	 */
+			lock_page(page);
+			if (PageMovable(page))
+				putback_movable_page(page);
+			else
+				__ClearPageIsolated(page);
+			unlock_page(page);
+			put_page(page);
+		}
+put_new:
 	if (put_new_page)
 		put_new_page(newpage, private);
-	else if (unlikely(__is_movable_balloon_page(newpage))) {
-		/* drop our reference, page already in the balloon */
+		else
 		put_page(newpage);
-	} else
-		putback_lru_page(newpage);
+	}
 
 	if (result) {
 		if (rc)
@@ -1023,7 +1186,7 @@
 static int unmap_and_move_huge_page(new_page_t get_new_page,
 				free_page_t put_new_page, unsigned long private,
 				struct page *hpage, int force,
-				enum migrate_mode mode)
+				enum migrate_mode mode, int reason)
 {
 	int rc = -EAGAIN;
 	int *result = NULL;
@@ -1061,7 +1224,7 @@
 
 	if (page_mapped(hpage)) {
 		try_to_unmap(hpage,
-			TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
+			TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS, NULL);
 		page_was_mapped = 1;
 	}
 
@@ -1081,6 +1244,7 @@
 	if (rc == MIGRATEPAGE_SUCCESS) {
 		hugetlb_cgroup_migrate(hpage, new_hpage);
 		put_new_page = NULL;
+		set_page_owner_migrate_reason(new_hpage, reason);
 	}
 
 	unlock_page(hpage);
@@ -1141,6 +1305,8 @@
 	int swapwrite = current->flags & PF_SWAPWRITE;
 	int rc;
 
+	trace_mm_migrate_pages_start(mode, reason);
+
 	if (!swapwrite)
 		current->flags |= PF_SWAPWRITE;
 
@@ -1153,7 +1319,7 @@
 			if (PageHuge(page))
 				rc = unmap_and_move_huge_page(get_new_page,
 						put_new_page, private, page,
-						pass > 2, mode);
+						pass > 2, mode, reason);
 			else
 				rc = unmap_and_move(get_new_page, put_new_page,
 						private, page, pass > 2, mode,
@@ -1837,6 +2003,7 @@
 	set_page_memcg(new_page, page_memcg(page));
 	set_page_memcg(page, NULL);
 	page_remove_rmap(page);
+	set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
 
 	spin_unlock(ptl);
 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
diff -ruw linux-4.4.115/mm/mlock.c linux-4.4.115-fbx/mm/mlock.c
--- linux-4.4.115/mm/mlock.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/mlock.c	2019-10-29 09:26:25.713223435 +0100
@@ -513,7 +513,7 @@
 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
 	*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
 			  vma->vm_file, pgoff, vma_policy(vma),
-			  vma->vm_userfaultfd_ctx);
+			  vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
 	if (*prev) {
 		vma = *prev;
 		goto success;
diff -ruw linux-4.4.115/mm/mmap.c linux-4.4.115-fbx/mm/mmap.c
--- linux-4.4.115/mm/mmap.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/mmap.c	2019-10-29 09:26:25.713223435 +0100
@@ -48,6 +48,10 @@
 #include <asm/tlb.h>
 #include <asm/mmu_context.h>
 
+#ifdef CONFIG_MSM_APP_SETTINGS
+#include <asm/app_api.h>
+#endif
+
 #include "internal.h"
 
 #ifndef arch_mmap_check
@@ -58,6 +62,18 @@
 #define arch_rebalance_pgtables(addr, len)		(addr)
 #endif
 
+#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
+const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN;
+const int mmap_rnd_bits_max = CONFIG_ARCH_MMAP_RND_BITS_MAX;
+int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS;
+#endif
+#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
+const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN;
+const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX;
+int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
+#endif
+
+
 static void unmap_region(struct mm_struct *mm,
 		struct vm_area_struct *vma, struct vm_area_struct *prev,
 		unsigned long start, unsigned long end);
@@ -190,6 +206,13 @@
 		free += global_page_state(NR_SLAB_RECLAIMABLE);
 
 		/*
+		 * Part of the kernel memory, which can be released
+		 * under memory pressure.
+		 */
+		free += global_page_state(
+			NR_INDIRECTLY_RECLAIMABLE_BYTES) >> PAGE_SHIFT;
+
+		/*
 		 * Leave reserved pages. The pages are not for anonymous pages.
 		 */
 		if (free <= totalreserve_pages)
@@ -939,7 +962,8 @@
  */
 static inline int is_mergeable_vma(struct vm_area_struct *vma,
 				struct file *file, unsigned long vm_flags,
-				struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
+				struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
+				const char __user *anon_name)
 {
 	/*
 	 * VM_SOFTDIRTY should not prevent from VMA merging, if we
@@ -957,6 +981,8 @@
 		return 0;
 	if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
 		return 0;
+	if (vma_get_anon_name(vma) != anon_name)
+		return 0;
 	return 1;
 }
 
@@ -989,9 +1015,10 @@
 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
 		     struct anon_vma *anon_vma, struct file *file,
 		     pgoff_t vm_pgoff,
-		     struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
+		     struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
+		     const char __user *anon_name)
 {
-	if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) &&
+	if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) &&
 	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
 		if (vma->vm_pgoff == vm_pgoff)
 			return 1;
@@ -1010,9 +1037,10 @@
 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
 		    struct anon_vma *anon_vma, struct file *file,
 		    pgoff_t vm_pgoff,
-		    struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
+		    struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
+		    const char __user *anon_name)
 {
-	if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) &&
+	if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) &&
 	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
 		pgoff_t vm_pglen;
 		vm_pglen = vma_pages(vma);
@@ -1023,9 +1051,9 @@
 }
 
 /*
- * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out
- * whether that can be merged with its predecessor or its successor.
- * Or both (it neatly fills a hole).
+ * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
+ * figure out whether that can be merged with its predecessor or its
+ * successor.  Or both (it neatly fills a hole).
  *
  * In most cases - when called for mmap, brk or mremap - [addr,end) is
  * certain not to be mapped by the time vma_merge is called; but when
@@ -1056,7 +1084,8 @@
 			unsigned long end, unsigned long vm_flags,
 			struct anon_vma *anon_vma, struct file *file,
 			pgoff_t pgoff, struct mempolicy *policy,
-			struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
+			struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
+			const char __user *anon_name)
 {
 	pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
 	struct vm_area_struct *area, *next;
@@ -1084,7 +1113,8 @@
 			mpol_equal(vma_policy(prev), policy) &&
 			can_vma_merge_after(prev, vm_flags,
 					    anon_vma, file, pgoff,
-					    vm_userfaultfd_ctx)) {
+					    vm_userfaultfd_ctx,
+					    anon_name)) {
 		/*
 		 * OK, it can.  Can we now merge in the successor as well?
 		 */
@@ -1093,7 +1123,8 @@
 				can_vma_merge_before(next, vm_flags,
 						     anon_vma, file,
 						     pgoff+pglen,
-						     vm_userfaultfd_ctx) &&
+						     vm_userfaultfd_ctx,
+						     anon_name) &&
 				is_mergeable_anon_vma(prev->anon_vma,
 						      next->anon_vma, NULL)) {
 							/* cases 1, 6 */
@@ -1115,7 +1146,8 @@
 			mpol_equal(policy, vma_policy(next)) &&
 			can_vma_merge_before(next, vm_flags,
 					     anon_vma, file, pgoff+pglen,
-					     vm_userfaultfd_ctx)) {
+					     vm_userfaultfd_ctx,
+					     anon_name)) {
 		if (prev && addr < prev->vm_end)	/* case 4 */
 			err = vma_adjust(prev, prev->vm_start,
 				addr, prev->vm_pgoff, NULL);
@@ -1290,6 +1322,11 @@
 	if (!len)
 		return -EINVAL;
 
+#ifdef CONFIG_MSM_APP_SETTINGS
+	if (use_app_setting)
+		apply_app_setting_bit(file);
+#endif
+
 	/*
 	 * Does the application expect PROT_READ to imply PROT_EXEC?
 	 *
@@ -1599,7 +1636,7 @@
 	 * Can we just expand an old mapping?
 	 */
 	vma = vma_merge(mm, prev, addr, addr + len, vm_flags,
-			NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX);
+			NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX, NULL);
 	if (vma)
 		goto out;
 
@@ -2649,6 +2686,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(do_munmap);
 
 int vm_munmap(unsigned long start, size_t len)
 {
@@ -2682,8 +2720,7 @@
 	unsigned long ret = -EINVAL;
 	struct file *file;
 
-	pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. "
-			"See Documentation/vm/remap_file_pages.txt.\n",
+	pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/vm/remap_file_pages.txt.\n",
 			current->comm, current->pid);
 
 	if (prot)
@@ -2826,7 +2863,7 @@
 
 	/* Can we just expand an old private anonymous mapping? */
 	vma = vma_merge(mm, prev, addr, addr + len, flags,
-			NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX);
+			NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX, NULL);
 	if (vma)
 		goto out;
 
@@ -2984,7 +3021,7 @@
 		return NULL;	/* should never get here */
 	new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
 			    vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
-			    vma->vm_userfaultfd_ctx);
+			    vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
 	if (new_vma) {
 		/*
 		 * Source vma may have been merged into new_vma
diff -ruw linux-4.4.115/mm/mprotect.c linux-4.4.115-fbx/mm/mprotect.c
--- linux-4.4.115/mm/mprotect.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/mprotect.c	2019-10-29 09:26:25.713223435 +0100
@@ -294,7 +294,7 @@
 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
 	*pprev = vma_merge(mm, *pprev, start, end, newflags,
 			   vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
-			   vma->vm_userfaultfd_ctx);
+			   vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
 	if (*pprev) {
 		vma = *pprev;
 		goto success;
diff -ruw linux-4.4.115/mm/nobootmem.c linux-4.4.115-fbx/mm/nobootmem.c
--- linux-4.4.115/mm/nobootmem.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/nobootmem.c	2019-01-22 16:16:28.815294510 +0100
@@ -76,7 +76,7 @@
  * down, but we are still initializing the system.  Pages are given directly
  * to the page allocator, no bootmem metadata is updated because it is gone.
  */
-void __init free_bootmem_late(unsigned long addr, unsigned long size)
+void free_bootmem_late(unsigned long addr, unsigned long size)
 {
 	unsigned long cursor, end;
 
diff -ruw linux-4.4.115/mm/oom_kill.c linux-4.4.115-fbx/mm/oom_kill.c
--- linux-4.4.115/mm/oom_kill.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/oom_kill.c	2019-10-29 09:26:25.717223474 +0100
@@ -350,7 +350,7 @@
  * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes,
  * swapents, oom_score_adj value, and name.
  */
-static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
+void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
 {
 	struct task_struct *p;
 	struct task_struct *task;
@@ -386,8 +386,7 @@
 static void dump_header(struct oom_control *oc, struct task_struct *p,
 			struct mem_cgroup *memcg)
 {
-	pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
-		"oom_score_adj=%hd\n",
+	pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, oom_score_adj=%hd\n",
 		current->comm, oc->gfp_mask, oc->order,
 		current->signal->oom_score_adj);
 	cpuset_print_current_mems_allowed();
diff -ruw linux-4.4.115/mm/page_alloc.c linux-4.4.115-fbx/mm/page_alloc.c
--- linux-4.4.115/mm/page_alloc.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/page_alloc.c	2019-10-29 09:26:25.725223553 +0100
@@ -114,13 +114,6 @@
 unsigned long totalram_pages __read_mostly;
 unsigned long totalreserve_pages __read_mostly;
 unsigned long totalcma_pages __read_mostly;
-/*
- * When calculating the number of globally allowed dirty pages, there
- * is a certain number of per-zone reserves that should not be
- * considered dirtyable memory.  This is the sum of those reserves
- * over all existing zones that contribute dirtyable memory.
- */
-unsigned long dirty_balance_reserve __read_mostly;
 
 int percpu_pagelist_fraction;
 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
@@ -230,6 +223,20 @@
 };
 
 static void free_compound_page(struct page *page);
+
+char * const migratetype_names[MIGRATE_TYPES] = {
+	"Unmovable",
+	"Movable",
+	"Reclaimable",
+#ifdef CONFIG_CMA
+	"CMA",
+#endif
+	"HighAtomic",
+#ifdef CONFIG_MEMORY_ISOLATION
+	"Isolate",
+#endif
+};
+
 compound_page_dtor * const compound_page_dtors[] = {
 	NULL,
 	free_compound_page,
@@ -238,9 +245,21 @@
 #endif
 };
 
+/*
+ * Try to keep at least this much lowmem free.  Do not allow normal
+ * allocations below this point, only high priority ones. Automatically
+ * tuned according to the amount of memory in the system.
+ */
 int min_free_kbytes = 1024;
 int user_min_free_kbytes = -1;
 
+/*
+ * Extra memory for the system to try freeing. Used to temporarily
+ * free memory, to make space for new workloads. Anyone can allocate
+ * down to the min watermarks controlled by min_free_kbytes above.
+ */
+int extra_free_kbytes = 0;
+
 static unsigned long __meminitdata nr_kernel_pages;
 static unsigned long __meminitdata nr_all_pages;
 static unsigned long __meminitdata dma_reserve;
@@ -463,6 +482,7 @@
 	printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
 		current->comm, page_to_pfn(page));
 	dump_page_badflags(page, reason, bad_flags);
+	dump_page_owner(page);
 
 	print_modules();
 	dump_stack();
@@ -509,7 +529,8 @@
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
 unsigned int _debug_guardpage_minorder;
-bool _debug_pagealloc_enabled __read_mostly;
+bool _debug_pagealloc_enabled __read_mostly
+			= IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
 bool _debug_guardpage_enabled __read_mostly;
 
 static int __init early_debug_pagealloc(char *buf)
@@ -520,6 +541,9 @@
 	if (strcmp(buf, "on") == 0)
 		_debug_pagealloc_enabled = true;
 
+	if (strcmp(buf, "off") == 0)
+		_debug_pagealloc_enabled = false;
+
 	return 0;
 }
 early_param("debug_pagealloc", early_debug_pagealloc);
@@ -1016,9 +1040,8 @@
 
 	trace_mm_page_free(page, order);
 	kmemcheck_free_shadow(page, order);
-	kasan_free_pages(page, order);
 
-	if (PageAnon(page))
+	if (PageMappingFlags(page))
 		page->mapping = NULL;
 	bad += free_pages_check(page);
 	for (i = 1; i < (1 << order); i++) {
@@ -1038,7 +1061,9 @@
 					   PAGE_SIZE << order);
 	}
 	arch_free_page(page, order);
+	kernel_poison_pages(page, 1 << order, 0);
 	kernel_map_pages(page, 1 << order, 0);
+	kasan_free_pages(page, order);
 
 	return true;
 }
@@ -1059,8 +1084,7 @@
 	local_irq_restore(flags);
 }
 
-static void __init __free_pages_boot_core(struct page *page,
-					unsigned long pfn, unsigned int order)
+static void __init __free_pages_boot_core(struct page *page, unsigned long pfn, unsigned int order)
 {
 	unsigned int nr_pages = 1 << order;
 	struct page *p = page;
@@ -1132,7 +1156,7 @@
 #endif
 
 
-void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
+void __free_pages_bootmem(struct page *page, unsigned long pfn,
 							unsigned int order)
 {
 	if (early_page_uninitialised(pfn))
@@ -1310,6 +1334,11 @@
 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
 
 #ifdef CONFIG_CMA
+bool is_cma_pageblock(struct page *page)
+{
+	return get_pageblock_migratetype(page) == MIGRATE_CMA;
+}
+
 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
 void __init init_cma_reserved_pageblock(struct page *page)
 {
@@ -1417,6 +1446,25 @@
 	return 0;
 }
 
+static inline bool free_pages_prezeroed(void)
+{
+	return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
+		page_poisoning_enabled();
+}
+
+inline void post_alloc_hook(struct page *page, unsigned int order,
+				gfp_t gfp_flags)
+{
+	set_page_private(page, 0);
+	set_page_refcounted(page);
+
+	kasan_alloc_pages(page, order);
+	arch_alloc_page(page, order);
+	kernel_map_pages(page, 1 << order, 1);
+	kernel_poison_pages(page, 1 << order, 1);
+	set_page_owner(page, order, gfp_flags);
+}
+
 static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
 								int alloc_flags)
 {
@@ -1428,22 +1476,15 @@
 			return 1;
 	}
 
-	set_page_private(page, 0);
-	set_page_refcounted(page);
+	post_alloc_hook(page, order, gfp_flags);
 
-	arch_alloc_page(page, order);
-	kernel_map_pages(page, 1 << order, 1);
-	kasan_alloc_pages(page, order);
-
-	if (gfp_flags & __GFP_ZERO)
+	if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
 		for (i = 0; i < (1 << order); i++)
 			clear_highpage(page + i);
 
 	if (order && (gfp_flags & __GFP_COMP))
 		prep_compound_page(page, order);
 
-	set_page_owner(page, order, gfp_flags);
-
 	/*
 	 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
 	 * allocate the page. The expectation is that the caller is taking
@@ -1506,6 +1547,11 @@
 #endif
 };
 
+int *get_migratetype_fallbacks(int mtype)
+{
+	return fallbacks[mtype];
+}
+
 #ifdef CONFIG_CMA
 static struct page *__rmqueue_cma_fallback(struct zone *zone,
 					unsigned int order)
@@ -1823,7 +1869,8 @@
 
 		page = list_entry(area->free_list[fallback_mt].next,
 						struct page, lru);
-		if (can_steal)
+		if (can_steal &&
+			get_pageblock_migratetype(page) != MIGRATE_HIGHATOMIC)
 			steal_suitable_fallback(zone, page, start_migratetype);
 
 		/* Remove the page from the freelists */
@@ -1862,10 +1909,6 @@
 
 	page = __rmqueue_smallest(zone, order, migratetype);
 	if (unlikely(!page)) {
-		if (migratetype == MIGRATE_MOVABLE)
-			page = __rmqueue_cma_fallback(zone, order);
-
-		if (!page)
 			page = __rmqueue_fallback(zone, order, migratetype);
 	}
 
@@ -1873,6 +1916,23 @@
 	return page;
 }
 
+#ifdef CONFIG_CMA
+static struct page *__rmqueue_cma(struct zone *zone, unsigned int order)
+{
+	struct page *page = 0;
+	if (IS_ENABLED(CONFIG_CMA))
+		if (!zone->cma_alloc)
+			page = __rmqueue_cma_fallback(zone, order);
+	trace_mm_page_alloc_zone_locked(page, order, MIGRATE_CMA);
+	return page;
+}
+#else
+static inline struct page *__rmqueue_cma(struct zone *zone, unsigned int order)
+{
+	return NULL;
+}
+#endif
+
 /*
  * Obtain a specified number of elements from the buddy allocator, all under
  * a single hold of the lock, for efficiency.  Add them to the supplied list.
@@ -1886,7 +1946,17 @@
 
 	spin_lock(&zone->lock);
 	for (i = 0; i < count; ++i) {
-		struct page *page = __rmqueue(zone, order, migratetype, 0);
+		struct page *page;
+
+		/*
+		 * If migrate type CMA is being requested only try to
+		 * satisfy the request with CMA pages to try and increase
+		 * CMA utlization.
+		 */
+		if (is_migrate_cma(migratetype))
+			page = __rmqueue_cma(zone, order);
+		else
+			page = __rmqueue(zone, order, migratetype, 0);
 		if (unlikely(page == NULL))
 			break;
 
@@ -1913,6 +1983,28 @@
 	return i;
 }
 
+/*
+ * Return the pcp list that corresponds to the migrate type if that list isn't
+ * empty.
+ * If the list is empty return NULL.
+ */
+static struct list_head *get_populated_pcp_list(struct zone *zone,
+			unsigned int order, struct per_cpu_pages *pcp,
+			int migratetype, int cold)
+{
+	struct list_head *list = &pcp->lists[migratetype];
+
+	if (list_empty(list)) {
+		pcp->count += rmqueue_bulk(zone, order,
+				pcp->batch, list,
+				migratetype, cold);
+
+		if (list_empty(list))
+			list = NULL;
+	}
+	return list;
+}
+
 #ifdef CONFIG_NUMA
 /*
  * Called from the vmstat counter updater to drain pagesets of this
@@ -2160,7 +2252,6 @@
 void split_page(struct page *page, unsigned int order)
 {
 	int i;
-	gfp_t gfp_mask;
 
 	VM_BUG_ON_PAGE(PageCompound(page), page);
 	VM_BUG_ON_PAGE(!page_count(page), page);
@@ -2174,12 +2265,9 @@
 		split_page(virt_to_page(page[0].shadow), order);
 #endif
 
-	gfp_mask = get_page_owner_gfp(page);
-	set_page_owner(page, 0, gfp_mask);
-	for (i = 1; i < (1 << order); i++) {
+	for (i = 1; i < (1 << order); i++)
 		set_page_refcounted(page + i);
-		set_page_owner(page + i, 0, gfp_mask);
-	}
+	split_page_owner(page, order);
 }
 EXPORT_SYMBOL_GPL(split_page);
 
@@ -2197,7 +2285,8 @@
 	if (!is_migrate_isolate(mt)) {
 		/* Obey watermarks as if the page was being allocated */
 		watermark = low_wmark_pages(zone) + (1 << order);
-		if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
+		if (!is_migrate_cma(mt) &&
+		    !zone_watermark_ok(zone, 0, watermark, 0, 0))
 			return 0;
 
 		__mod_zone_freepage_state(zone, -(1UL << order), mt);
@@ -2208,14 +2297,13 @@
 	zone->free_area[order].nr_free--;
 	rmv_page_order(page);
 
-	set_page_owner(page, order, __GFP_MOVABLE);
-
 	/* Set the pageblock if the isolated page is at least a pageblock */
 	if (order >= pageblock_order - 1) {
 		struct page *endpage = page + (1 << order) - 1;
 		for (; page < endpage; page += pageblock_nr_pages) {
 			int mt = get_pageblock_migratetype(page);
-			if (!is_migrate_isolate(mt) && !is_migrate_cma(mt))
+			if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
+				&& mt != MIGRATE_HIGHATOMIC)
 				set_pageblock_migratetype(page,
 							  MIGRATE_MOVABLE);
 		}
@@ -2226,33 +2314,6 @@
 }
 
 /*
- * Similar to split_page except the page is already free. As this is only
- * being used for migration, the migratetype of the block also changes.
- * As this is called with interrupts disabled, the caller is responsible
- * for calling arch_alloc_page() and kernel_map_page() after interrupts
- * are enabled.
- *
- * Note: this is probably too low level an operation for use in drivers.
- * Please consult with lkml before using this in your driver.
- */
-int split_free_page(struct page *page)
-{
-	unsigned int order;
-	int nr_pages;
-
-	order = page_order(page);
-
-	nr_pages = __isolate_free_page(page, order);
-	if (!nr_pages)
-		return 0;
-
-	/* Split into individual pages */
-	set_page_refcounted(page);
-	split_page(page, order);
-	return nr_pages;
-}
-
-/*
  * Allocate a page from the given zone. Use pcplists for order-0 allocations.
  */
 static inline
@@ -2261,21 +2322,32 @@
 			gfp_t gfp_flags, int alloc_flags, int migratetype)
 {
 	unsigned long flags;
-	struct page *page;
+	struct page *page = NULL;
 	bool cold = ((gfp_flags & __GFP_COLD) != 0);
 
 	if (likely(order == 0)) {
 		struct per_cpu_pages *pcp;
-		struct list_head *list;
+		struct list_head *list = NULL;
 
 		local_irq_save(flags);
 		pcp = &this_cpu_ptr(zone->pageset)->pcp;
-		list = &pcp->lists[migratetype];
-		if (list_empty(list)) {
-			pcp->count += rmqueue_bulk(zone, 0,
-					pcp->batch, list,
+
+		/* First try to get CMA pages */
+		if (migratetype == MIGRATE_MOVABLE &&
+			gfp_flags & __GFP_CMA) {
+			list = get_populated_pcp_list(zone, 0, pcp,
+					get_cma_migrate_type(), cold);
+		}
+
+		if (list == NULL) {
+			/*
+			 * Either CMA is not suitable or there are no free CMA
+			 * pages.
+			 */
+			list = get_populated_pcp_list(zone, 0, pcp,
 					migratetype, cold);
-			if (unlikely(list_empty(list)))
+			if (unlikely(list == NULL) ||
+				unlikely(list_empty(list)))
 				goto failed;
 		}
 
@@ -2308,8 +2380,13 @@
 			if (page)
 				trace_mm_page_alloc_zone_locked(page, order, migratetype);
 		}
+		if (!page && migratetype == MIGRATE_MOVABLE &&
+				gfp_flags & __GFP_CMA)
+			page = __rmqueue_cma(zone, order);
+
 		if (!page)
 			page = __rmqueue(zone, order, migratetype, gfp_flags);
+
 		spin_unlock(&zone->lock);
 		if (!page)
 			goto failed;
@@ -2469,6 +2546,14 @@
 			continue;
 
 		for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
+#ifdef CONFIG_CMA
+			/*
+			 * Note that this check is needed only
+			 * when MIGRATE_CMA < MIGRATE_PCPTYPES.
+			 */
+			if (mt == MIGRATE_CMA)
+				continue;
+#endif
 			if (!list_empty(&area->free_list[mt]))
 				return true;
 		}
@@ -3985,8 +4070,7 @@
 		user_zonelist_order = ZONELIST_ORDER_ZONE;
 	} else {
 		printk(KERN_WARNING
-			"Ignoring invalid numa_zonelist_order value:  "
-			"%s\n", s);
+		       "Ignoring invalid numa_zonelist_order value:  %s\n", s);
 		return -EINVAL;
 	}
 	return 0;
@@ -4451,8 +4535,7 @@
 	else
 		page_group_by_mobility_disabled = 0;
 
-	pr_info("Built %i zonelists in %s order, mobility grouping %s.  "
-		"Total pages: %ld\n",
+	pr_info("Built %i zonelists in %s order, mobility grouping %s.  Total pages: %ld\n",
 			nr_online_nodes,
 			zonelist_order_name[current_zonelist_order],
 			page_group_by_mobility_disabled ? "off" : "on",
@@ -5256,6 +5339,9 @@
 #endif
 	init_waitqueue_head(&pgdat->kswapd_wait);
 	init_waitqueue_head(&pgdat->pfmemalloc_wait);
+#ifdef CONFIG_COMPACTION
+	init_waitqueue_head(&pgdat->kcompactd_wait);
+#endif
 	pgdat_page_ext_init(pgdat);
 
 	for (j = 0; j < MAX_NR_ZONES; j++) {
@@ -5927,14 +6013,13 @@
 
 #undef	adj_init_size
 
-	pr_info("Memory: %luK/%luK available "
-	       "(%luK kernel code, %luK rwdata, %luK rodata, "
-	       "%luK init, %luK bss, %luK reserved, %luK cma-reserved"
+	pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
 #ifdef	CONFIG_HIGHMEM
 	       ", %luK highmem"
 #endif
 	       "%s%s)\n",
-	       nr_free_pages() << (PAGE_SHIFT-10), physpages << (PAGE_SHIFT-10),
+		nr_free_pages() << (PAGE_SHIFT - 10),
+		physpages << (PAGE_SHIFT - 10),
 	       codesize >> 10, datasize >> 10, rosize >> 10,
 	       (init_data_size + init_code_size) >> 10, bss_size >> 10,
 	       (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT-10),
@@ -6027,20 +6112,12 @@
 
 			if (max > zone->managed_pages)
 				max = zone->managed_pages;
+
+			zone->totalreserve_pages = max;
+
 			reserve_pages += max;
-			/*
-			 * Lowmem reserves are not available to
-			 * GFP_HIGHUSER page cache allocations and
-			 * kswapd tries to balance zones to their high
-			 * watermark.  As a result, neither should be
-			 * regarded as dirtyable memory, to prevent a
-			 * situation where reclaim has to clean pages
-			 * in order to balance the zones.
-			 */
-			zone->dirty_balance_reserve = max;
 		}
 	}
-	dirty_balance_reserve = reserve_pages;
 	totalreserve_pages = reserve_pages;
 }
 
@@ -6086,6 +6163,7 @@
 static void __setup_per_zone_wmarks(void)
 {
 	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
+	unsigned long pages_low = extra_free_kbytes >> (PAGE_SHIFT - 10);
 	unsigned long lowmem_pages = 0;
 	struct zone *zone;
 	unsigned long flags;
@@ -6097,11 +6175,14 @@
 	}
 
 	for_each_zone(zone) {
-		u64 tmp;
+		u64 min, low;
 
 		spin_lock_irqsave(&zone->lock, flags);
-		tmp = (u64)pages_min * zone->managed_pages;
-		do_div(tmp, lowmem_pages);
+		min = (u64)pages_min * zone->managed_pages;
+		do_div(min, lowmem_pages);
+		low = (u64)pages_low * zone->managed_pages;
+		do_div(low, vm_total_pages);
+
 		if (is_highmem(zone)) {
 			/*
 			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
@@ -6122,11 +6203,13 @@
 			 * If it's a lowmem zone, reserve a number of pages
 			 * proportionate to the zone's size.
 			 */
-			zone->watermark[WMARK_MIN] = tmp;
+			zone->watermark[WMARK_MIN] = min;
 		}
 
-		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
-		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
+		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) +
+					low + (min >> 2);
+		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) +
+					low + (min >> 1);
 
 		__mod_zone_page_state(zone, NR_ALLOC_BATCH,
 			high_wmark_pages(zone) - low_wmark_pages(zone) -
@@ -6249,7 +6332,7 @@
 /*
  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
  *	that we can call two helper functions whenever min_free_kbytes
- *	changes.
+ *	or extra_free_kbytes changes.
  */
 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
 	void __user *buffer, size_t *length, loff_t *ppos)
@@ -6795,6 +6878,8 @@
 	if (ret)
 		return ret;
 
+	cc.zone->cma_alloc = 1;
+
 	ret = __alloc_contig_migrate_range(&cc, start, end);
 	if (ret)
 		goto done;
@@ -6853,6 +6938,7 @@
 done:
 	undo_isolate_page_range(pfn_max_align_down(start),
 				pfn_max_align_up(end), migratetype);
+	cc.zone->cma_alloc = 0;
 	return ret;
 }
 
diff -ruw linux-4.4.115/mm/page_ext.c linux-4.4.115-fbx/mm/page_ext.c
--- linux-4.4.115/mm/page_ext.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/page_ext.c	2019-10-29 09:26:25.725223553 +0100
@@ -54,9 +54,6 @@
 
 static struct page_ext_operations *page_ext_ops[] = {
 	&debug_guardpage_ops,
-#ifdef CONFIG_PAGE_POISONING
-	&page_poisoning_ops,
-#endif
 #ifdef CONFIG_PAGE_OWNER
 	&page_owner_ops,
 #endif
@@ -111,6 +108,9 @@
 	 * page can reach here before the page_ext arrays are
 	 * allocated when feeding a range of pages to the allocator
 	 * for the first time during bootup or memory hotplug.
+	 *
+	 * This check is also necessary for ensuring page poisoning
+	 * works as expected when enabled
 	 */
 	if (unlikely(!base))
 		return NULL;
@@ -183,6 +183,9 @@
 	 * page can reach here before the page_ext arrays are
 	 * allocated when feeding a range of pages to the allocator
 	 * for the first time during bootup or memory hotplug.
+	 *
+	 * This check is also necessary for ensuring page poisoning
+	 * works as expected when enabled
 	 */
 	if (!section->page_ext)
 		return NULL;
diff -ruw linux-4.4.115/mm/page_isolation.c linux-4.4.115-fbx/mm/page_isolation.c
--- linux-4.4.115/mm/page_isolation.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/page_isolation.c	2019-01-22 16:16:28.819294546 +0100
@@ -7,6 +7,8 @@
 #include <linux/pageblock-flags.h>
 #include <linux/memory.h>
 #include <linux/hugetlb.h>
+#include <linux/kasan.h>
+#include <linux/page_owner.h>
 #include "internal.h"
 
 static int set_migratetype_isolate(struct page *page,
@@ -105,8 +107,6 @@
 			if (pfn_valid_within(page_to_pfn(buddy)) &&
 			    !is_migrate_isolate_page(buddy)) {
 				__isolate_free_page(page, order);
-				kernel_map_pages(page, (1 << order), 1);
-				set_page_refcounted(page);
 				isolated_page = page;
 			}
 		}
@@ -125,9 +125,11 @@
 	zone->nr_isolate_pageblock--;
 out:
 	spin_unlock_irqrestore(&zone->lock, flags);
-	if (isolated_page)
+	if (isolated_page) {
+		post_alloc_hook(page, order, __GFP_MOVABLE);
 		__free_pages(isolated_page, order);
 }
+}
 
 static inline struct page *
 __first_valid_page(unsigned long pfn, unsigned long nr_pages)
diff -ruw linux-4.4.115/mm/page_owner.c linux-4.4.115-fbx/mm/page_owner.c
--- linux-4.4.115/mm/page_owner.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/page_owner.c	2019-01-22 16:16:28.819294546 +0100
@@ -5,10 +5,24 @@
 #include <linux/bootmem.h>
 #include <linux/stacktrace.h>
 #include <linux/page_owner.h>
+#include <linux/jump_label.h>
+#include <linux/migrate.h>
+#include <linux/stackdepot.h>
+
 #include "internal.h"
 
-static bool page_owner_disabled = true;
-bool page_owner_inited __read_mostly;
+/*
+ * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
+ * to use off stack temporal storage
+ */
+#define PAGE_OWNER_STACK_DEPTH (16)
+
+static bool page_owner_disabled =
+	!IS_ENABLED(CONFIG_PAGE_OWNER_ENABLE_DEFAULT);
+DEFINE_STATIC_KEY_FALSE(page_owner_inited);
+
+static depot_stack_handle_t dummy_handle;
+static depot_stack_handle_t failure_handle;
 
 static void init_early_allocated_pages(void);
 
@@ -20,6 +34,9 @@
 	if (strcmp(buf, "on") == 0)
 		page_owner_disabled = false;
 
+	if (strcmp(buf, "off") == 0)
+		page_owner_disabled = true;
+
 	return 0;
 }
 early_param("page_owner", early_page_owner_param);
@@ -32,12 +49,42 @@
 	return true;
 }
 
+static noinline void register_dummy_stack(void)
+{
+	unsigned long entries[4];
+	struct stack_trace dummy;
+
+	dummy.nr_entries = 0;
+	dummy.max_entries = ARRAY_SIZE(entries);
+	dummy.entries = &entries[0];
+	dummy.skip = 0;
+
+	save_stack_trace(&dummy);
+	dummy_handle = depot_save_stack(&dummy, GFP_KERNEL);
+}
+
+static noinline void register_failure_stack(void)
+{
+	unsigned long entries[4];
+	struct stack_trace failure;
+
+	failure.nr_entries = 0;
+	failure.max_entries = ARRAY_SIZE(entries);
+	failure.entries = &entries[0];
+	failure.skip = 0;
+
+	save_stack_trace(&failure);
+	failure_handle = depot_save_stack(&failure, GFP_KERNEL);
+}
+
 static void init_page_owner(void)
 {
 	if (page_owner_disabled)
 		return;
 
-	page_owner_inited = true;
+	register_dummy_stack();
+	register_failure_stack();
+	static_branch_enable(&page_owner_inited);
 	init_early_allocated_pages();
 }
 
@@ -59,52 +106,135 @@
 	}
 }
 
-void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
+static inline bool check_recursive_alloc(struct stack_trace *trace,
+					unsigned long ip)
 {
-	struct page_ext *page_ext = lookup_page_ext(page);
+	int i, count;
+
+	if (!trace->nr_entries)
+		return false;
+
+	for (i = 0, count = 0; i < trace->nr_entries; i++) {
+		if (trace->entries[i] == ip && ++count == 2)
+			return true;
+	}
 
+	return false;
+}
+
+static noinline depot_stack_handle_t save_stack(gfp_t flags)
+{
+	unsigned long entries[PAGE_OWNER_STACK_DEPTH];
 	struct stack_trace trace = {
 		.nr_entries = 0,
-		.max_entries = ARRAY_SIZE(page_ext->trace_entries),
-		.entries = &page_ext->trace_entries[0],
-		.skip = 3,
+		.entries = entries,
+		.max_entries = PAGE_OWNER_STACK_DEPTH,
+		.skip = 2
 	};
+	depot_stack_handle_t handle;
+
+	save_stack_trace(&trace);
+	if (trace.nr_entries != 0 &&
+	    trace.entries[trace.nr_entries-1] == ULONG_MAX)
+		trace.nr_entries--;
+
+	/*
+	 * We need to check recursion here because our request to stackdepot
+	 * could trigger memory allocation to save new entry. New memory
+	 * allocation would reach here and call depot_save_stack() again
+	 * if we don't catch it. There is still not enough memory in stackdepot
+	 * so it would try to allocate memory again and loop forever.
+	 */
+	if (check_recursive_alloc(&trace, _RET_IP_))
+		return dummy_handle;
+
+	handle = depot_save_stack(&trace, flags);
+	if (!handle)
+		handle = failure_handle;
+
+	return handle;
+}
+
+noinline void __set_page_owner(struct page *page, unsigned int order,
+					gfp_t gfp_mask)
+{
+	struct page_ext *page_ext = lookup_page_ext(page);
 
 	if (unlikely(!page_ext))
 		return;
 
-	save_stack_trace(&trace);
-
+	page_ext->handle = save_stack(gfp_mask);
 	page_ext->order = order;
 	page_ext->gfp_mask = gfp_mask;
-	page_ext->nr_entries = trace.nr_entries;
+	page_ext->last_migrate_reason = -1;
 
 	__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
 }
 
-gfp_t __get_page_owner_gfp(struct page *page)
+void __set_page_owner_migrate_reason(struct page *page, int reason)
 {
 	struct page_ext *page_ext = lookup_page_ext(page);
 	if (unlikely(!page_ext))
+		return;
+
+	page_ext->last_migrate_reason = reason;
+}
+
+void __split_page_owner(struct page *page, unsigned int order)
+{
+	int i;
+	struct page_ext *page_ext = lookup_page_ext(page);
+	if (unlikely(!page_ext))
 		/*
-		 * The caller just returns 0 if no valid gfp
-		 * So return 0 here too.
+		 * The caller just returns if no valid gfp
+		 * So return here too.
 		 */
-		return 0;
+		return;
+
+	page_ext->order = 0;
+	for (i = 1; i < (1 << order); i++)
+		__copy_page_owner(page, page + i);
+}
+
+void __copy_page_owner(struct page *oldpage, struct page *newpage)
+{
+	struct page_ext *old_ext = lookup_page_ext(oldpage);
+	struct page_ext *new_ext = lookup_page_ext(newpage);
+
+	if (unlikely(!old_ext || !new_ext))
+		return;
 
-	return page_ext->gfp_mask;
+	new_ext->order = old_ext->order;
+	new_ext->gfp_mask = old_ext->gfp_mask;
+	new_ext->last_migrate_reason = old_ext->last_migrate_reason;
+	new_ext->handle = old_ext->handle;
+
+	/*
+	 * We don't clear the bit on the oldpage as it's going to be freed
+	 * after migration. Until then, the info can be useful in case of
+	 * a bug, and the overal stats will be off a bit only temporarily.
+	 * Also, migrate_misplaced_transhuge_page() can still fail the
+	 * migration and then we want the oldpage to retain the info. But
+	 * in that case we also don't need to explicitly clear the info from
+	 * the new page, which will be freed.
+	 */
+	__set_bit(PAGE_EXT_OWNER, &new_ext->flags);
 }
 
 static ssize_t
 print_page_owner(char __user *buf, size_t count, unsigned long pfn,
-		struct page *page, struct page_ext *page_ext)
+		struct page *page, struct page_ext *page_ext,
+		depot_stack_handle_t handle)
 {
 	int ret;
 	int pageblock_mt, page_mt;
 	char *kbuf;
+	unsigned long entries[PAGE_OWNER_STACK_DEPTH];
 	struct stack_trace trace = {
-		.nr_entries = page_ext->nr_entries,
-		.entries = &page_ext->trace_entries[0],
+		.nr_entries = 0,
+		.entries = entries,
+		.max_entries = PAGE_OWNER_STACK_DEPTH,
+		.skip = 0
 	};
 
 	kbuf = kmalloc(count, GFP_KERNEL);
@@ -112,8 +242,9 @@
 		return -ENOMEM;
 
 	ret = snprintf(kbuf, count,
-			"Page allocated via order %u, mask 0x%x\n",
-			page_ext->order, page_ext->gfp_mask);
+			"Page allocated via order %u, mask %#x(%pGg)\n",
+			page_ext->order, page_ext->gfp_mask,
+			&page_ext->gfp_mask);
 
 	if (ret >= count)
 		goto err;
@@ -122,31 +253,29 @@
 	pageblock_mt = get_pfnblock_migratetype(page, pfn);
 	page_mt  = gfpflags_to_migratetype(page_ext->gfp_mask);
 	ret += snprintf(kbuf + ret, count - ret,
-			"PFN %lu Block %lu type %d %s Flags %s%s%s%s%s%s%s%s%s%s%s%s\n",
+			"PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n",
 			pfn,
+			migratetype_names[page_mt],
 			pfn >> pageblock_order,
-			pageblock_mt,
-			pageblock_mt != page_mt ? "Fallback" : "        ",
-			PageLocked(page)	? "K" : " ",
-			PageError(page)		? "E" : " ",
-			PageReferenced(page)	? "R" : " ",
-			PageUptodate(page)	? "U" : " ",
-			PageDirty(page)		? "D" : " ",
-			PageLRU(page)		? "L" : " ",
-			PageActive(page)	? "A" : " ",
-			PageSlab(page)		? "S" : " ",
-			PageWriteback(page)	? "W" : " ",
-			PageCompound(page)	? "C" : " ",
-			PageSwapCache(page)	? "B" : " ",
-			PageMappedToDisk(page)	? "M" : " ");
+			migratetype_names[pageblock_mt],
+			page->flags, &page->flags);
 
 	if (ret >= count)
 		goto err;
 
+	depot_fetch_stack(handle, &trace);
 	ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0);
 	if (ret >= count)
 		goto err;
 
+	if (page_ext->last_migrate_reason != -1) {
+		ret += snprintf(kbuf + ret, count - ret,
+			"Page has been migrated, last migrate reason: %s\n",
+			migrate_reason_names[page_ext->last_migrate_reason]);
+		if (ret >= count)
+			goto err;
+	}
+
 	ret += snprintf(kbuf + ret, count - ret, "\n");
 	if (ret >= count)
 		goto err;
@@ -162,14 +291,58 @@
 	return -ENOMEM;
 }
 
+void __dump_page_owner(struct page *page)
+{
+	struct page_ext *page_ext = lookup_page_ext(page);
+	unsigned long entries[PAGE_OWNER_STACK_DEPTH];
+	struct stack_trace trace = {
+		.nr_entries = 0,
+		.entries = entries,
+		.max_entries = PAGE_OWNER_STACK_DEPTH,
+		.skip = 0
+	};
+	depot_stack_handle_t handle;
+	gfp_t gfp_mask;
+	int mt;
+
+	if (unlikely(!page_ext)) {
+		pr_alert("There is not page extension available.\n");
+		return;
+	}
+	gfp_mask = page_ext->gfp_mask;
+	mt = gfpflags_to_migratetype(gfp_mask);
+
+	if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
+		pr_alert("page_owner info is not active (free page?)\n");
+		return;
+	}
+
+	handle = READ_ONCE(page_ext->handle);
+	if (!handle) {
+		pr_alert("page_owner info is not active (free page?)\n");
+		return;
+	}
+
+	depot_fetch_stack(handle, &trace);
+	pr_alert("page allocated via order %u, migratetype %s, "
+			"gfp_mask %#x(%pGg)\n", page_ext->order,
+			migratetype_names[mt], gfp_mask, &gfp_mask);
+	print_stack_trace(&trace, 0);
+
+	if (page_ext->last_migrate_reason != -1)
+		pr_alert("page has been migrated, last migrate reason: %s\n",
+			migrate_reason_names[page_ext->last_migrate_reason]);
+}
+
 static ssize_t
 read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
 {
 	unsigned long pfn;
 	struct page *page;
 	struct page_ext *page_ext;
+	depot_stack_handle_t handle;
 
-	if (!page_owner_inited)
+	if (!static_branch_unlikely(&page_owner_inited))
 		return -EINVAL;
 
 	page = NULL;
@@ -216,10 +389,19 @@
 		if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
 			continue;
 
+		/*
+		 * Access to page_ext->handle isn't synchronous so we should
+		 * be careful to access it.
+		 */
+		handle = READ_ONCE(page_ext->handle);
+		if (!handle)
+			continue;
+
 		/* Record the next PFN to read in the file offset */
 		*ppos = (pfn - min_low_pfn) + 1;
 
-		return print_page_owner(buf, count, pfn, page, page_ext);
+		return print_page_owner(buf, count, pfn, page,
+				page_ext, handle);
 	}
 
 	return 0;
@@ -258,6 +440,9 @@
 
 			page = pfn_to_page(pfn);
 
+			if (page_zone(page) != zone)
+				continue;
+
 			/*
 			 * We are safe to check buddy flag and order, because
 			 * this is init stage and only single thread runs.
@@ -321,7 +506,7 @@
 {
 	struct dentry *dentry;
 
-	if (!page_owner_inited) {
+	if (!static_branch_unlikely(&page_owner_inited)) {
 		pr_info("page_owner is disabled\n");
 		return 0;
 	}
diff -ruw linux-4.4.115/mm/page-writeback.c linux-4.4.115-fbx/mm/page-writeback.c
--- linux-4.4.115/mm/page-writeback.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/page-writeback.c	2019-10-29 09:26:25.717223474 +0100
@@ -278,7 +278,12 @@
 	unsigned long nr_pages;
 
 	nr_pages = zone_page_state(zone, NR_FREE_PAGES);
-	nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
+	/*
+	 * Pages reserved for the kernel should not be considered
+	 * dirtyable, to prevent a situation where reclaim has to
+	 * clean pages in order to balance the zones.
+	 */
+	nr_pages -= min(nr_pages, zone->totalreserve_pages);
 
 	nr_pages += zone_page_state(zone, NR_INACTIVE_FILE);
 	nr_pages += zone_page_state(zone, NR_ACTIVE_FILE);
@@ -332,7 +337,12 @@
 	unsigned long x;
 
 	x = global_page_state(NR_FREE_PAGES);
-	x -= min(x, dirty_balance_reserve);
+	/*
+	 * Pages reserved for the kernel should not be considered
+	 * dirtyable, to prevent a situation where reclaim has to
+	 * clean pages in order to balance the zones.
+	 */
+	x -= min(x, totalreserve_pages);
 
 	x += global_page_state(NR_INACTIVE_FILE);
 	x += global_page_state(NR_ACTIVE_FILE);
@@ -1944,6 +1954,12 @@
                 if (global_page_state(NR_UNSTABLE_NFS) +
 			global_page_state(NR_WRITEBACK) <= dirty_thresh)
                         	break;
+		/* Try safe version */
+		else if (unlikely(global_page_state_snapshot(NR_UNSTABLE_NFS) +
+			global_page_state_snapshot(NR_WRITEBACK) <=
+				dirty_thresh))
+				break;
+
                 congestion_wait(BLK_RW_ASYNC, HZ/10);
 
 		/*
@@ -1978,11 +1994,11 @@
 	 * We want to write everything out, not just down to the dirty
 	 * threshold
 	 */
-	if (!bdi_has_dirty_io(&q->backing_dev_info))
+	if (!bdi_has_dirty_io(q->backing_dev_info))
 		return;
 
 	rcu_read_lock();
-	list_for_each_entry_rcu(wb, &q->backing_dev_info.wb_list, bdi_node)
+	list_for_each_entry_rcu(wb, &q->backing_dev_info->wb_list, bdi_node)
 		if (wb_has_dirty_io(wb))
 			wb_start_writeback(wb, nr_pages, true,
 					   WB_REASON_LAPTOP_TIMER);
diff -ruw linux-4.4.115/mm/percpu.c linux-4.4.115-fbx/mm/percpu.c
--- linux-4.4.115/mm/percpu.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/percpu.c	2019-10-29 09:26:25.725223553 +0100
@@ -889,8 +889,8 @@
 	size = ALIGN(size, 2);
 
 	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
-		WARN(true, "illegal size (%zu) or align (%zu) for "
-		     "percpu allocation\n", size, align);
+		WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n",
+		     size, align);
 		return NULL;
 	}
 
diff -ruw linux-4.4.115/mm/readahead.c linux-4.4.115-fbx/mm/readahead.c
--- linux-4.4.115/mm/readahead.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/readahead.c	2019-10-29 09:26:25.725223553 +0100
@@ -234,6 +234,8 @@
 
 /*
  * Set the initial window size, round to next power of 2 and square
+ * Small size is not dependant on max value - only a one-page read is regarded
+ * as small.
  * for small size, x 4 for medium, and x 2 for large
  * for 128k (32 page) max ra
  * 1-8 page = 32k initial, > 8 page = 128k initial
@@ -242,7 +244,7 @@
 {
 	unsigned long newsize = roundup_pow_of_two(size);
 
-	if (newsize <= max / 32)
+	if (newsize <= 1)
 		newsize = newsize * 4;
 	else if (newsize <= max / 4)
 		newsize = newsize * 2;
diff -ruw linux-4.4.115/mm/rmap.c linux-4.4.115-fbx/mm/rmap.c
--- linux-4.4.115/mm/rmap.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/rmap.c	2019-10-29 09:26:25.729223592 +0100
@@ -1481,9 +1481,12 @@
  * try_to_unmap - try to remove all page table mappings to a page
  * @page: the page to get unmapped
  * @flags: action and flags
+ * @vma : target vma for reclaim
  *
  * Tries to remove all the page table entries which are mapping this
  * page, used in the pageout path.  Caller must hold the page lock.
+ * If @vma is not NULL, this function try to remove @page from only @vma
+ * without peeking all mapped vma for @page.
  * Return values are:
  *
  * SWAP_SUCCESS	- we succeeded in removing all mappings
@@ -1491,7 +1494,8 @@
  * SWAP_FAIL	- the page is unswappable
  * SWAP_MLOCK	- page is mlocked.
  */
-int try_to_unmap(struct page *page, enum ttu_flags flags)
+int try_to_unmap(struct page *page, enum ttu_flags flags,
+				struct vm_area_struct *vma)
 {
 	int ret;
 	struct rmap_walk_control rwc = {
@@ -1499,6 +1503,7 @@
 		.arg = (void *)flags,
 		.done = page_not_mapped,
 		.anon_lock = page_lock_anon_vma_read,
+		.target_vma = vma,
 	};
 
 	VM_BUG_ON_PAGE(!PageHuge(page) && PageTransHuge(page), page);
@@ -1544,6 +1549,7 @@
 		.arg = (void *)TTU_MUNLOCK,
 		.done = page_not_mapped,
 		.anon_lock = page_lock_anon_vma_read,
+		.target_vma = NULL,
 
 	};
 
@@ -1605,6 +1611,11 @@
 	struct anon_vma_chain *avc;
 	int ret = SWAP_AGAIN;
 
+       if (rwc->target_vma) {
+               unsigned long address = vma_address(page, rwc->target_vma);
+               return rwc->rmap_one(page, rwc->target_vma, address, rwc->arg);
+       }
+
 	anon_vma = rmap_walk_anon_lock(page, rwc);
 	if (!anon_vma)
 		return ret;
@@ -1647,6 +1658,7 @@
 	struct address_space *mapping = page->mapping;
 	pgoff_t pgoff;
 	struct vm_area_struct *vma;
+	unsigned long address;
 	int ret = SWAP_AGAIN;
 
 	/*
@@ -1662,6 +1674,12 @@
 
 	pgoff = page_to_pgoff(page);
 	i_mmap_lock_read(mapping);
+	if (rwc->target_vma) {
+               address = vma_address(page, rwc->target_vma);
+               ret = rwc->rmap_one(page, rwc->target_vma, address, rwc->arg);
+               goto done;
+	}
+
 	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
 		unsigned long address = vma_address(page, vma);
 
diff -ruw linux-4.4.115/mm/shmem.c linux-4.4.115-fbx/mm/shmem.c
--- linux-4.4.115/mm/shmem.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/shmem.c	2019-10-29 09:26:25.729223592 +0100
@@ -3405,6 +3405,14 @@
 }
 EXPORT_SYMBOL_GPL(shmem_file_setup);
 
+void shmem_set_file(struct vm_area_struct *vma, struct file *file)
+{
+	if (vma->vm_file)
+		fput(vma->vm_file);
+	vma->vm_file = file;
+	vma->vm_ops = &shmem_vm_ops;
+}
+
 /**
  * shmem_zero_setup - setup a shared anonymous mapping
  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
@@ -3424,10 +3432,7 @@
 	if (IS_ERR(file))
 		return PTR_ERR(file);
 
-	if (vma->vm_file)
-		fput(vma->vm_file);
-	vma->vm_file = file;
-	vma->vm_ops = &shmem_vm_ops;
+	shmem_set_file(vma, file);
 	return 0;
 }
 
diff -ruw linux-4.4.115/mm/slab_common.c linux-4.4.115-fbx/mm/slab_common.c
--- linux-4.4.115/mm/slab_common.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/slab_common.c	2019-01-22 16:16:28.827294618 +0100
@@ -35,7 +35,7 @@
  */
 #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
 		SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
-		SLAB_FAILSLAB)
+		SLAB_FAILSLAB | SLAB_KASAN)
 
 #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | SLAB_NOTRACK)
 
@@ -453,6 +453,9 @@
 static int shutdown_cache(struct kmem_cache *s,
 		struct list_head *release, bool *need_rcu_barrier)
 {
+	/* free asan quarantined objects */
+	kasan_cache_shutdown(s);
+
 	if (__kmem_cache_shutdown(s) != 0)
 		return -EBUSY;
 
@@ -723,8 +726,8 @@
 		err = shutdown_cache(s, &release, &need_rcu_barrier);
 
 	if (err) {
-		pr_err("kmem_cache_destroy %s: "
-		       "Slab cache still has objects\n", s->name);
+		pr_err("kmem_cache_destroy %s: Slab cache still has objects\n",
+		       s->name);
 		dump_stack();
 	}
 out_unlock:
@@ -750,6 +753,7 @@
 
 	get_online_cpus();
 	get_online_mems();
+	kasan_cache_shrink(cachep);
 	ret = __kmem_cache_shrink(cachep, false);
 	put_online_mems();
 	put_online_cpus();
@@ -1010,7 +1014,7 @@
 	page = alloc_kmem_pages(flags, order);
 	ret = page ? page_address(page) : NULL;
 	kmemleak_alloc(ret, size, 1, flags);
-	kasan_kmalloc_large(ret, size);
+	kasan_kmalloc_large(ret, size, flags);
 	return ret;
 }
 EXPORT_SYMBOL(kmalloc_order);
@@ -1044,13 +1048,11 @@
 #else
 	seq_puts(m, "slabinfo - version: 2.1\n");
 #endif
-	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
-		 "<objperslab> <pagesperslab>");
+	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
 	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
 	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
 #ifdef CONFIG_DEBUG_SLAB
-	seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
-		 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
+	seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
 	seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
 #endif
 	seq_putc(m, '\n');
@@ -1191,7 +1193,7 @@
 		ks = ksize(p);
 
 	if (ks >= new_size) {
-		kasan_krealloc((void *)p, new_size);
+		kasan_krealloc((void *)p, new_size, flags);
 		return (void *)p;
 	}
 
diff -ruw linux-4.4.115/mm/slab.h linux-4.4.115-fbx/mm/slab.h
--- linux-4.4.115/mm/slab.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/slab.h	2019-01-22 16:16:28.827294618 +0100
@@ -371,4 +371,6 @@
 void slab_stop(struct seq_file *m, void *p);
 int memcg_slab_show(struct seq_file *m, void *p);
 
+void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
+
 #endif /* MM_SLAB_H */
diff -ruw linux-4.4.115/mm/slub.c linux-4.4.115-fbx/mm/slub.c
--- linux-4.4.115/mm/slub.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/slub.c	2019-10-29 09:26:25.733223631 +0100
@@ -124,6 +124,14 @@
 #endif
 }
 
+static inline void *fixup_red_left(struct kmem_cache *s, void *p)
+{
+	if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
+		p += s->red_left_pad;
+
+	return p;
+}
+
 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
 {
 #ifdef CONFIG_SLUB_CPU_PARTIAL
@@ -224,24 +232,6 @@
  * 			Core slab cache functions
  *******************************************************************/
 
-/* Verify that a pointer has an address that is valid within a slab page */
-static inline int check_valid_pointer(struct kmem_cache *s,
-				struct page *page, const void *object)
-{
-	void *base;
-
-	if (!object)
-		return 1;
-
-	base = page_address(page);
-	if (object < base || object >= base + page->objects * s->size ||
-		(object - base) % s->size) {
-		return 0;
-	}
-
-	return 1;
-}
-
 static inline void *get_freepointer(struct kmem_cache *s, void *object)
 {
 	return *(void **)(object + s->offset);
@@ -271,11 +261,13 @@
 
 /* Loop over all objects in a slab */
 #define for_each_object(__p, __s, __addr, __objects) \
-	for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
+	for (__p = fixup_red_left(__s, __addr); \
+		__p < (__addr) + (__objects) * (__s)->size; \
 			__p += (__s)->size)
 
 #define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
-	for (__p = (__addr), __idx = 1; __idx <= __objects;\
+	for (__p = fixup_red_left(__s, __addr), __idx = 1; \
+		__idx <= __objects; \
 			__p += (__s)->size, __idx++)
 
 /* Determine object index from a given position */
@@ -295,6 +287,9 @@
 		return s->object_size;
 
 #endif
+	if (s->flags & SLAB_KASAN)
+		return s->object_size;
+
 	/*
 	 * If we have the need to store the freelist pointer
 	 * back there or track user information then we can
@@ -456,13 +451,27 @@
 		set_bit(slab_index(p, s, addr), map);
 }
 
+static inline int size_from_object(struct kmem_cache *s)
+{
+	if (s->flags & SLAB_RED_ZONE)
+		return s->size - s->red_left_pad;
+
+	return s->size;
+}
+
+static inline void *restore_red_left(struct kmem_cache *s, void *p)
+{
+	if (s->flags & SLAB_RED_ZONE)
+		p -= s->red_left_pad;
+
+	return p;
+}
+
 /*
  * Debug settings:
  */
 #if defined(CONFIG_SLUB_DEBUG_ON)
 static int slub_debug = DEBUG_DEFAULT_FLAGS;
-#elif defined(CONFIG_KASAN)
-static int slub_debug = SLAB_STORE_USER;
 #else
 static int slub_debug;
 #endif
@@ -489,6 +498,26 @@
 /*
  * Object debugging
  */
+
+/* Verify that a pointer has an address that is valid within a slab page */
+static inline int check_valid_pointer(struct kmem_cache *s,
+				struct page *page, void *object)
+{
+	void *base;
+
+	if (!object)
+		return 1;
+
+	base = page_address(page);
+	object = restore_red_left(s, object);
+	if (object < base || object >= base + page->objects * s->size ||
+		(object - base) % s->size) {
+		return 0;
+	}
+
+	return 1;
+}
+
 static void print_section(char *text, u8 *addr, unsigned int length)
 {
 	metadata_access_enable();
@@ -628,7 +657,9 @@
 	pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
 	       p, p - addr, get_freepointer(s, p));
 
-	if (p > addr + 16)
+	if (s->flags & SLAB_RED_ZONE)
+		print_section("Redzone ", p - s->red_left_pad, s->red_left_pad);
+	else if (p > addr + 16)
 		print_section("Bytes b4 ", p - 16, 16);
 
 	print_section("Object ", p, min_t(unsigned long, s->object_size,
@@ -645,18 +676,30 @@
 	if (s->flags & SLAB_STORE_USER)
 		off += 2 * sizeof(struct track);
 
-	if (off != s->size)
+	off += kasan_metadata_size(s);
+
+	if (off != size_from_object(s))
 		/* Beginning of the filler is the free pointer */
-		print_section("Padding ", p + off, s->size - off);
+		print_section("Padding ", p + off, size_from_object(s) - off);
 
 	dump_stack();
 }
 
+#ifdef CONFIG_SLUB_DEBUG_PANIC_ON
+static void slab_panic(const char *cause)
+{
+	panic("%s\n", cause);
+}
+#else
+static inline void slab_panic(const char *cause) {}
+#endif
+
 void object_err(struct kmem_cache *s, struct page *page,
 			u8 *object, char *reason)
 {
 	slab_bug(s, "%s", reason);
 	print_trailer(s, page, object);
+	slab_panic(reason);
 }
 
 static void slab_err(struct kmem_cache *s, struct page *page,
@@ -671,12 +714,16 @@
 	slab_bug(s, "%s", buf);
 	print_page_info(page);
 	dump_stack();
+	slab_panic("slab error");
 }
 
 static void init_object(struct kmem_cache *s, void *object, u8 val)
 {
 	u8 *p = object;
 
+	if (s->flags & SLAB_RED_ZONE)
+		memset(p - s->red_left_pad, val, s->red_left_pad);
+
 	if (s->flags & __OBJECT_POISON) {
 		memset(p, POISON_FREE, s->object_size - 1);
 		p[s->object_size - 1] = POISON_END;
@@ -689,6 +736,7 @@
 static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
 						void *from, void *to)
 {
+	slab_panic("object poison overwritten");
 	slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
 	memset(from, data, to - from);
 }
@@ -769,11 +817,13 @@
 		/* We also have user information there */
 		off += 2 * sizeof(struct track);
 
-	if (s->size == off)
+	off += kasan_metadata_size(s);
+
+	if (size_from_object(s) == off)
 		return 1;
 
 	return check_bytes_and_report(s, page, p, "Object padding",
-				p + off, POISON_INUSE, s->size - off);
+			p + off, POISON_INUSE, size_from_object(s) - off);
 }
 
 /* Check the pad bytes at the end of a slab page */
@@ -818,6 +868,10 @@
 
 	if (s->flags & SLAB_RED_ZONE) {
 		if (!check_bytes_and_report(s, page, object, "Redzone",
+			object - s->red_left_pad, val, s->red_left_pad))
+			return 0;
+
+		if (!check_bytes_and_report(s, page, object, "Redzone",
 			endobject, val, s->inuse - s->object_size))
 			return 0;
 	} else {
@@ -928,14 +982,14 @@
 		max_objects = MAX_OBJS_PER_PAGE;
 
 	if (page->objects != max_objects) {
-		slab_err(s, page, "Wrong number of objects. Found %d but "
-			"should be %d", page->objects, max_objects);
+		slab_err(s, page, "Wrong number of objects. Found %d but should be %d",
+			 page->objects, max_objects);
 		page->objects = max_objects;
 		slab_fix(s, "Number of objects adjusted.");
 	}
 	if (page->inuse != page->objects - nr) {
-		slab_err(s, page, "Wrong object count. Counter is %d but "
-			"counted were %d", page->inuse, page->objects - nr);
+		slab_err(s, page, "Wrong object count. Counter is %d but counted were %d",
+			 page->inuse, page->objects - nr);
 		page->inuse = page->objects - nr;
 		slab_fix(s, "Object count adjusted.");
 	}
@@ -1099,8 +1153,8 @@
 
 	if (unlikely(s != page->slab_cache)) {
 		if (!PageSlab(page)) {
-			slab_err(s, page, "Attempt to free object(0x%p) "
-				"outside of slab", object);
+			slab_err(s, page, "Attempt to free object(0x%p) outside of slab",
+				 object);
 		} else if (!page->slab_cache) {
 			pr_err("SLUB <none>: no slab for object 0x%p.\n",
 			       object);
@@ -1270,7 +1324,7 @@
 static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
 {
 	kmemleak_alloc(ptr, size, 1, flags);
-	kasan_kmalloc_large(ptr, size);
+	kasan_kmalloc_large(ptr, size, flags);
 }
 
 static inline void kfree_hook(const void *x)
@@ -1304,13 +1358,15 @@
 		kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
 		kmemleak_alloc_recursive(object, s->object_size, 1,
 					 s->flags, flags);
-		kasan_slab_alloc(s, object);
+		kasan_slab_alloc(s, object, flags);
 	}
 	memcg_kmem_put_cache(s);
 }
 
-static inline void slab_free_hook(struct kmem_cache *s, void *x)
+static inline void *slab_free_hook(struct kmem_cache *s, void *x)
 {
+	void *freeptr;
+
 	kmemleak_free_recursive(x, s->flags);
 
 	/*
@@ -1331,7 +1387,13 @@
 	if (!(s->flags & SLAB_DEBUG_OBJECTS))
 		debug_check_no_obj_freed(x, s->object_size);
 
+	freeptr = get_freepointer(s, x);
+	/*
+	 * kasan_slab_free() may put x into memory quarantine, delaying its
+	 * reuse. In this case the object's freelist pointer is changed.
+	 */
 	kasan_slab_free(s, x);
+	return freeptr;
 }
 
 static inline void slab_free_freelist_hook(struct kmem_cache *s,
@@ -1349,11 +1411,11 @@
 
 	void *object = head;
 	void *tail_obj = tail ? : head;
+	void *freeptr;
 
 	do {
-		slab_free_hook(s, object);
-	} while ((object != tail_obj) &&
-		 (object = get_freepointer(s, object)));
+		freeptr = slab_free_hook(s, object);
+	} while ((object != tail_obj) && (object = freeptr));
 #endif
 }
 
@@ -1361,6 +1423,7 @@
 				void *object)
 {
 	setup_object_debug(s, page, object);
+	kasan_init_slab_obj(s, object);
 	if (unlikely(s->ctor)) {
 		kasan_unpoison_object_data(s, object);
 		s->ctor(object);
@@ -1468,7 +1531,7 @@
 			set_freepointer(s, p, NULL);
 	}
 
-	page->freelist = start;
+	page->freelist = fixup_red_left(s, start);
 	page->inuse = page->objects;
 	page->frozen = 1;
 
@@ -1526,6 +1589,7 @@
 	page_mapcount_reset(page);
 	if (current->reclaim_state)
 		current->reclaim_state->reclaimed_slab += pages;
+	kasan_alloc_pages(page, order);
 	__free_kmem_pages(page, order);
 }
 
@@ -2588,7 +2652,7 @@
 {
 	void *ret = slab_alloc(s, gfpflags, _RET_IP_);
 	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
-	kasan_kmalloc(s, ret, size);
+	kasan_kmalloc(s, ret, size, gfpflags);
 	return ret;
 }
 EXPORT_SYMBOL(kmem_cache_alloc_trace);
@@ -2616,7 +2680,7 @@
 	trace_kmalloc_node(_RET_IP_, ret,
 			   size, s->size, gfpflags, node);
 
-	kasan_kmalloc(s, ret, size);
+	kasan_kmalloc(s, ret, size, gfpflags);
 	return ret;
 }
 EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
@@ -2761,16 +2825,13 @@
  * same page) possible by specifying head and tail ptr, plus objects
  * count (cnt). Bulk free indicated by tail pointer being set.
  */
-static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
-				      void *head, void *tail, int cnt,
-				      unsigned long addr)
+static __always_inline void do_slab_free(struct kmem_cache *s,
+				struct page *page, void *head, void *tail,
+				int cnt, unsigned long addr)
 {
 	void *tail_obj = tail ? : head;
 	struct kmem_cache_cpu *c;
 	unsigned long tid;
-
-	slab_free_freelist_hook(s, head, tail);
-
 redo:
 	/*
 	 * Determine the currently cpus per cpu slab.
@@ -2804,6 +2865,27 @@
 
 }
 
+static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
+				      void *head, void *tail, int cnt,
+				      unsigned long addr)
+{
+	slab_free_freelist_hook(s, head, tail);
+	/*
+	 * slab_free_freelist_hook() could have put the items into quarantine.
+	 * If so, no need to free them.
+	 */
+	if (s->flags & SLAB_KASAN && !(s->flags & SLAB_DESTROY_BY_RCU))
+		return;
+	do_slab_free(s, page, head, tail, cnt, addr);
+}
+
+#ifdef CONFIG_KASAN
+void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
+{
+	do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr);
+}
+#endif
+
 void kmem_cache_free(struct kmem_cache *s, void *x)
 {
 	s = cache_from_obj(s, x);
@@ -3160,7 +3242,8 @@
 	init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
 	init_tracking(kmem_cache_node, n);
 #endif
-	kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node));
+	kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
+		      GFP_KERNEL);
 	init_kmem_cache_node(n);
 	inc_slabs_node(kmem_cache_node, node, page->objects);
 
@@ -3223,7 +3306,7 @@
 static int calculate_sizes(struct kmem_cache *s, int forced_order)
 {
 	unsigned long flags = s->flags;
-	unsigned long size = s->object_size;
+	size_t size = s->object_size;
 	int order;
 
 	/*
@@ -3282,8 +3365,11 @@
 		 * the object.
 		 */
 		size += 2 * sizeof(struct track);
+#endif
 
-	if (flags & SLAB_RED_ZONE)
+	kasan_cache_create(s, &size, &s->flags);
+#ifdef CONFIG_SLUB_DEBUG
+	if (flags & SLAB_RED_ZONE) {
 		/*
 		 * Add some empty padding so that we can catch
 		 * overwrites from earlier objects rather than let
@@ -3292,6 +3378,11 @@
 		 * of the object.
 		 */
 		size += sizeof(void *);
+
+		s->red_left_pad = sizeof(void *);
+		s->red_left_pad = ALIGN(s->red_left_pad, s->align);
+		size += s->red_left_pad;
+	}
 #endif
 
 	/*
@@ -3406,8 +3497,7 @@
 	free_kmem_cache_nodes(s);
 error:
 	if (flags & SLAB_PANIC)
-		panic("Cannot create slab %s size=%lu realsize=%u "
-			"order=%u offset=%u flags=%lx\n",
+		panic("Cannot create slab %s size=%lu realsize=%u order=%u offset=%u flags=%lx\n",
 			s->name, (unsigned long)s->size, s->size,
 			oo_order(s->oo), s->offset, flags);
 	return -EINVAL;
@@ -3533,7 +3623,7 @@
 
 	trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
 
-	kasan_kmalloc(s, ret, size);
+	kasan_kmalloc(s, ret, size, flags);
 
 	return ret;
 }
@@ -3578,13 +3668,53 @@
 
 	trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
 
-	kasan_kmalloc(s, ret, size);
+	kasan_kmalloc(s, ret, size, flags);
 
 	return ret;
 }
 EXPORT_SYMBOL(__kmalloc_node);
 #endif
 
+#ifdef CONFIG_HARDENED_USERCOPY
+/*
+ * Rejects objects that are incorrectly sized.
+ *
+ * Returns NULL if check passes, otherwise const char * to name of cache
+ * to indicate an error.
+ */
+const char *__check_heap_object(const void *ptr, unsigned long n,
+				struct page *page)
+{
+	struct kmem_cache *s;
+	unsigned long offset;
+	size_t object_size;
+
+	/* Find object and usable object size. */
+	s = page->slab_cache;
+	object_size = slab_ksize(s);
+
+	/* Reject impossible pointers. */
+	if (ptr < page_address(page))
+		return s->name;
+
+	/* Find offset within object. */
+	offset = (ptr - page_address(page)) % s->size;
+
+	/* Adjust for redzone and reject if within the redzone. */
+	if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
+		if (offset < s->red_left_pad)
+			return s->name;
+		offset -= s->red_left_pad;
+	}
+
+	/* Allow address range falling entirely within object size. */
+	if (offset <= object_size && n <= object_size - offset)
+		return NULL;
+
+	return s->name;
+}
+#endif /* CONFIG_HARDENED_USERCOPY */
+
 static size_t __ksize(const void *object)
 {
 	struct page *page;
@@ -3607,7 +3737,7 @@
 	size_t size = __ksize(object);
 	/* We assume that ksize callers could use whole allocated area,
 	   so we need unpoison this area. */
-	kasan_krealloc(object, size);
+	kasan_krealloc(object, size, GFP_NOWAIT);
 	return size;
 }
 EXPORT_SYMBOL(ksize);
@@ -3626,6 +3756,7 @@
 	if (unlikely(!PageSlab(page))) {
 		BUG_ON(!PageCompound(page));
 		kfree_hook(x);
+		kasan_alloc_pages(page, compound_order(page));
 		__free_kmem_pages(page, compound_order(page));
 		return;
 	}
diff -ruw linux-4.4.115/mm/sparse.c linux-4.4.115-fbx/mm/sparse.c
--- linux-4.4.115/mm/sparse.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/sparse.c	2019-01-22 16:16:28.831294655 +0100
@@ -428,8 +428,8 @@
 		if (map_map[pnum])
 			continue;
 		ms = __nr_to_section(pnum);
-		printk(KERN_ERR "%s: sparsemem memory map backing failed "
-			"some memory will not be available.\n", __func__);
+		printk(KERN_ERR "%s: sparsemem memory map backing failed some memory will not be available.\n",
+		       __func__);
 		ms->section_mem_map = 0;
 	}
 }
@@ -456,8 +456,8 @@
 	if (map)
 		return map;
 
-	printk(KERN_ERR "%s: sparsemem memory map backing failed "
-			"some memory will not be available.\n", __func__);
+	printk(KERN_ERR "%s: sparsemem memory map backing failed some memory will not be available.\n",
+	       __func__);
 	ms->section_mem_map = 0;
 	return NULL;
 }
diff -ruw linux-4.4.115/mm/sparse-vmemmap.c linux-4.4.115-fbx/mm/sparse-vmemmap.c
--- linux-4.4.115/mm/sparse-vmemmap.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/sparse-vmemmap.c	2019-01-22 16:16:28.831294655 +0100
@@ -94,8 +94,8 @@
 	int actual_node = early_pfn_to_nid(pfn);
 
 	if (node_distance(actual_node, node) > LOCAL_DISTANCE)
-		printk(KERN_WARNING "[%lx-%lx] potential offnode "
-			"page_structs\n", start, end - 1);
+		printk(KERN_WARNING "[%lx-%lx] potential offnode page_structs\n",
+		       start, end - 1);
 }
 
 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
@@ -220,8 +220,8 @@
 		if (map_map[pnum])
 			continue;
 		ms = __nr_to_section(pnum);
-		printk(KERN_ERR "%s: sparsemem memory map backing failed "
-			"some memory will not be available.\n", __func__);
+		printk(KERN_ERR "%s: sparsemem memory map backing failed some memory will not be available.\n",
+		       __func__);
 		ms->section_mem_map = 0;
 	}
 
diff -ruw linux-4.4.115/mm/truncate.c linux-4.4.115-fbx/mm/truncate.c
--- linux-4.4.115/mm/truncate.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/truncate.c	2019-10-29 09:26:25.737223670 +0100
@@ -63,6 +63,171 @@
 	spin_unlock_irq(&mapping->tree_lock);
 }
 
+static void do_truncate_inode_pages_range(struct address_space *mapping,
+				loff_t lstart, loff_t lend, bool fill_zero)
+{
+	pgoff_t		start;		/* inclusive */
+	pgoff_t		end;		/* exclusive */
+	unsigned int	partial_start;	/* inclusive */
+	unsigned int	partial_end;	/* exclusive */
+	struct pagevec	pvec;
+	pgoff_t		indices[PAGEVEC_SIZE];
+	pgoff_t		index;
+	int		i;
+
+	cleancache_invalidate_inode(mapping);
+	if (mapping->nrpages == 0 && mapping->nrshadows == 0)
+		return;
+
+	/* Offsets within partial pages */
+	partial_start = lstart & (PAGE_CACHE_SIZE - 1);
+	partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
+
+	/*
+	 * 'start' and 'end' always covers the range of pages to be fully
+	 * truncated. Partial pages are covered with 'partial_start' at the
+	 * start of the range and 'partial_end' at the end of the range.
+	 * Note that 'end' is exclusive while 'lend' is inclusive.
+	 */
+	start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+	if (lend == -1)
+		/*
+		 * lend == -1 indicates end-of-file so we have to set 'end'
+		 * to the highest possible pgoff_t and since the type is
+		 * unsigned we're using -1.
+		 */
+		end = -1;
+	else
+		end = (lend + 1) >> PAGE_CACHE_SHIFT;
+
+	pagevec_init(&pvec, 0);
+	index = start;
+
+	while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
+			min(end - index, (pgoff_t)PAGEVEC_SIZE),
+			indices)) {
+		for (i = 0; i < pagevec_count(&pvec); i++) {
+			struct page *page = pvec.pages[i];
+
+			/* We rely upon deletion not changing page->index */
+			index = indices[i];
+			if (index >= end)
+				break;
+
+			if (radix_tree_exceptional_entry(page)) {
+				clear_exceptional_entry(mapping, index, page);
+				continue;
+			}
+
+			if (!trylock_page(page))
+				continue;
+			WARN_ON(page->index != index);
+			if (PageWriteback(page)) {
+				unlock_page(page);
+				continue;
+			}
+			truncate_inode_page(mapping, page);
+			if (fill_zero)
+				zero_user(page, 0, PAGE_CACHE_SIZE);
+			unlock_page(page);
+		}
+		pagevec_remove_exceptionals(&pvec);
+		pagevec_release(&pvec);
+		cond_resched();
+		index++;
+	}
+
+	if (partial_start) {
+		struct page *page = find_lock_page(mapping, start - 1);
+
+		if (page) {
+			unsigned int top = PAGE_CACHE_SIZE;
+
+			if (start > end) {
+				/* Truncation within a single page */
+				top = partial_end;
+				partial_end = 0;
+			}
+			wait_on_page_writeback(page);
+			zero_user_segment(page, partial_start, top);
+			cleancache_invalidate_page(mapping, page);
+			if (page_has_private(page))
+				do_invalidatepage(page, partial_start,
+						  top - partial_start);
+			unlock_page(page);
+			page_cache_release(page);
+		}
+	}
+	if (partial_end) {
+		struct page *page = find_lock_page(mapping, end);
+
+		if (page) {
+			wait_on_page_writeback(page);
+			zero_user_segment(page, 0, partial_end);
+			cleancache_invalidate_page(mapping, page);
+			if (page_has_private(page))
+				do_invalidatepage(page, 0,
+						  partial_end);
+			unlock_page(page);
+			page_cache_release(page);
+		}
+	}
+	/*
+	 * If the truncation happened within a single page no pages
+	 * will be released, just zeroed, so we can bail out now.
+	 */
+	if (start >= end)
+		return;
+
+	index = start;
+	for ( ; ; ) {
+		cond_resched();
+		if (!pagevec_lookup_entries(&pvec, mapping, index,
+			min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
+			/* If all gone from start onwards, we're done */
+			if (index == start)
+				break;
+			/* Otherwise restart to make sure all gone */
+			index = start;
+			continue;
+		}
+		if (index == start && indices[0] >= end) {
+			/* All gone out of hole to be punched, we're done */
+			pagevec_remove_exceptionals(&pvec);
+			pagevec_release(&pvec);
+			break;
+		}
+		for (i = 0; i < pagevec_count(&pvec); i++) {
+			struct page *page = pvec.pages[i];
+
+			/* We rely upon deletion not changing page->index */
+			index = indices[i];
+			if (index >= end) {
+				/* Restart punch to make sure all gone */
+				index = start - 1;
+				break;
+			}
+
+			if (radix_tree_exceptional_entry(page)) {
+				clear_exceptional_entry(mapping, index, page);
+				continue;
+			}
+
+			lock_page(page);
+			WARN_ON(page->index != index);
+			wait_on_page_writeback(page);
+			truncate_inode_page(mapping, page);
+			if (fill_zero)
+				zero_user(page, 0, PAGE_CACHE_SIZE);
+			unlock_page(page);
+		}
+		pagevec_remove_exceptionals(&pvec);
+		pagevec_release(&pvec);
+		index++;
+	}
+	cleancache_invalidate_inode(mapping);
+}
+
 /**
  * do_invalidatepage - invalidate part or all of a page
  * @page: the page which is affected
@@ -218,160 +383,41 @@
 void truncate_inode_pages_range(struct address_space *mapping,
 				loff_t lstart, loff_t lend)
 {
-	pgoff_t		start;		/* inclusive */
-	pgoff_t		end;		/* exclusive */
-	unsigned int	partial_start;	/* inclusive */
-	unsigned int	partial_end;	/* exclusive */
-	struct pagevec	pvec;
-	pgoff_t		indices[PAGEVEC_SIZE];
-	pgoff_t		index;
-	int		i;
-
-	cleancache_invalidate_inode(mapping);
-	if (mapping->nrpages == 0 && mapping->nrshadows == 0)
-		return;
-
-	/* Offsets within partial pages */
-	partial_start = lstart & (PAGE_CACHE_SIZE - 1);
-	partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
-
-	/*
-	 * 'start' and 'end' always covers the range of pages to be fully
-	 * truncated. Partial pages are covered with 'partial_start' at the
-	 * start of the range and 'partial_end' at the end of the range.
-	 * Note that 'end' is exclusive while 'lend' is inclusive.
-	 */
-	start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-	if (lend == -1)
-		/*
-		 * lend == -1 indicates end-of-file so we have to set 'end'
-		 * to the highest possible pgoff_t and since the type is
-		 * unsigned we're using -1.
-		 */
-		end = -1;
-	else
-		end = (lend + 1) >> PAGE_CACHE_SHIFT;
-
-	pagevec_init(&pvec, 0);
-	index = start;
-	while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
-			min(end - index, (pgoff_t)PAGEVEC_SIZE),
-			indices)) {
-		for (i = 0; i < pagevec_count(&pvec); i++) {
-			struct page *page = pvec.pages[i];
-
-			/* We rely upon deletion not changing page->index */
-			index = indices[i];
-			if (index >= end)
-				break;
-
-			if (radix_tree_exceptional_entry(page)) {
-				clear_exceptional_entry(mapping, index, page);
-				continue;
-			}
-
-			if (!trylock_page(page))
-				continue;
-			WARN_ON(page->index != index);
-			if (PageWriteback(page)) {
-				unlock_page(page);
-				continue;
-			}
-			truncate_inode_page(mapping, page);
-			unlock_page(page);
-		}
-		pagevec_remove_exceptionals(&pvec);
-		pagevec_release(&pvec);
-		cond_resched();
-		index++;
+	do_truncate_inode_pages_range(mapping, lstart, lend, false);
 	}
+EXPORT_SYMBOL(truncate_inode_pages_range);
 
-	if (partial_start) {
-		struct page *page = find_lock_page(mapping, start - 1);
-		if (page) {
-			unsigned int top = PAGE_CACHE_SIZE;
-			if (start > end) {
-				/* Truncation within a single page */
-				top = partial_end;
-				partial_end = 0;
-			}
-			wait_on_page_writeback(page);
-			zero_user_segment(page, partial_start, top);
-			cleancache_invalidate_page(mapping, page);
-			if (page_has_private(page))
-				do_invalidatepage(page, partial_start,
-						  top - partial_start);
-			unlock_page(page);
-			page_cache_release(page);
-		}
-	}
-	if (partial_end) {
-		struct page *page = find_lock_page(mapping, end);
-		if (page) {
-			wait_on_page_writeback(page);
-			zero_user_segment(page, 0, partial_end);
-			cleancache_invalidate_page(mapping, page);
-			if (page_has_private(page))
-				do_invalidatepage(page, 0,
-						  partial_end);
-			unlock_page(page);
-			page_cache_release(page);
-		}
-	}
-	/*
-	 * If the truncation happened within a single page no pages
-	 * will be released, just zeroed, so we can bail out now.
+/**
+ * truncate_inode_pages_range_fill_zero - truncate range of pages specified by start &
+ * end byte offsets and zero them out
+ * @mapping: mapping to truncate
+ * @lstart: offset from which to truncate
+ * @lend: offset to which to truncate (inclusive)
+ *
+ * Truncate the page cache, removing the pages that are between
+ * specified offsets (and zeroing out partial pages
+ * if lstart or lend + 1 is not page aligned).
+ *
+ * Truncate takes two passes - the first pass is nonblocking.  It will not
+ * block on page locks and it will not block on writeback.  The second pass
+ * will wait.  This is to prevent as much IO as possible in the affected region.
+ * The first pass will remove most pages, so the search cost of the second pass
+ * is low.
+ *
+ * We pass down the cache-hot hint to the page freeing code.  Even if the
+ * mapping is large, it is probably the case that the final pages are the most
+ * recently touched, and freeing happens in ascending file offset order.
+ *
+ * Note that since ->invalidatepage() accepts range to invalidate
+ * truncate_inode_pages_range is able to handle cases where lend + 1 is not
+ * page aligned properly.
 	 */
-	if (start >= end)
-		return;
-
-	index = start;
-	for ( ; ; ) {
-		cond_resched();
-		if (!pagevec_lookup_entries(&pvec, mapping, index,
-			min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
-			/* If all gone from start onwards, we're done */
-			if (index == start)
-				break;
-			/* Otherwise restart to make sure all gone */
-			index = start;
-			continue;
-		}
-		if (index == start && indices[0] >= end) {
-			/* All gone out of hole to be punched, we're done */
-			pagevec_remove_exceptionals(&pvec);
-			pagevec_release(&pvec);
-			break;
-		}
-		for (i = 0; i < pagevec_count(&pvec); i++) {
-			struct page *page = pvec.pages[i];
-
-			/* We rely upon deletion not changing page->index */
-			index = indices[i];
-			if (index >= end) {
-				/* Restart punch to make sure all gone */
-				index = start - 1;
-				break;
-			}
-
-			if (radix_tree_exceptional_entry(page)) {
-				clear_exceptional_entry(mapping, index, page);
-				continue;
-			}
-
-			lock_page(page);
-			WARN_ON(page->index != index);
-			wait_on_page_writeback(page);
-			truncate_inode_page(mapping, page);
-			unlock_page(page);
-		}
-		pagevec_remove_exceptionals(&pvec);
-		pagevec_release(&pvec);
-		index++;
-	}
-	cleancache_invalidate_inode(mapping);
+void truncate_inode_pages_range_fill_zero(struct address_space *mapping,
+				loff_t lstart, loff_t lend)
+{
+	do_truncate_inode_pages_range(mapping, lstart, lend, true);
 }
-EXPORT_SYMBOL(truncate_inode_pages_range);
+EXPORT_SYMBOL(truncate_inode_pages_range_fill_zero);
 
 /**
  * truncate_inode_pages - truncate *all* the pages from an offset
@@ -392,6 +438,27 @@
 EXPORT_SYMBOL(truncate_inode_pages);
 
 /**
+ * truncate_inode_pages_fill_zero - truncate *all* the pages from an offset
+ * and zero them out
+ * @mapping: mapping to truncate
+ * @lstart: offset from which to truncate
+ *
+ * Called under (and serialised by) inode->i_mutex.
+ *
+ * Note: When this function returns, there can be a page in the process of
+ * deletion (inside __delete_from_page_cache()) in the specified range.  Thus
+ * mapping->nrpages can be non-zero when this function returns even after
+ * truncation of the whole mapping.
+ */
+void truncate_inode_pages_fill_zero(struct address_space *mapping,
+	loff_t lstart)
+{
+	truncate_inode_pages_range_fill_zero(mapping, lstart, (loff_t)-1);
+}
+EXPORT_SYMBOL(truncate_inode_pages_fill_zero);
+
+
+/**
  * truncate_inode_pages_final - truncate *all* pages before inode dies
  * @mapping: mapping to truncate
  *
diff -ruw linux-4.4.115/mm/util.c linux-4.4.115-fbx/mm/util.c
--- linux-4.4.115/mm/util.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/util.c	2019-10-29 09:26:25.741223709 +0100
@@ -344,10 +344,12 @@
 	}
 
 	mapping = (unsigned long)page->mapping;
-	if (mapping & PAGE_MAPPING_FLAGS)
+	if ((unsigned long)mapping & PAGE_MAPPING_ANON)
 		return NULL;
-	return page->mapping;
+
+	return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
 }
+EXPORT_SYMBOL(page_mapping);
 
 int overcommit_ratio_handler(struct ctl_table *table, int write,
 			     void __user *buffer, size_t *lenp,
diff -ruw linux-4.4.115/mm/vmalloc.c linux-4.4.115-fbx/mm/vmalloc.c
--- linux-4.4.115/mm/vmalloc.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/vmalloc.c	2019-10-29 09:26:25.741223709 +0100
@@ -274,13 +274,12 @@
 
 /*** Global kva allocator ***/
 
-#define VM_LAZY_FREE	0x01
-#define VM_LAZY_FREEING	0x02
 #define VM_VM_AREA	0x04
 
 static DEFINE_SPINLOCK(vmap_area_lock);
 /* Export for kexec only */
 LIST_HEAD(vmap_area_list);
+static LLIST_HEAD(vmap_purge_list);
 static struct rb_root vmap_area_root = RB_ROOT;
 
 /* The vmap cache globals are protected by vmap_area_lock */
@@ -291,6 +290,57 @@
 
 static unsigned long vmap_area_pcpu_hole;
 
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+#define POSSIBLE_VMALLOC_START	PAGE_OFFSET
+
+#define VMALLOC_BITMAP_SIZE	((VMALLOC_END - PAGE_OFFSET) >> \
+					PAGE_SHIFT)
+#define VMALLOC_TO_BIT(addr)	((addr - PAGE_OFFSET) >> PAGE_SHIFT)
+#define BIT_TO_VMALLOC(i)	(PAGE_OFFSET + i * PAGE_SIZE)
+
+unsigned long total_vmalloc_size;
+unsigned long vmalloc_reserved;
+
+DECLARE_BITMAP(possible_areas, VMALLOC_BITMAP_SIZE);
+
+void mark_vmalloc_reserved_area(void *x, unsigned long size)
+{
+	unsigned long addr = (unsigned long)x;
+
+	bitmap_set(possible_areas, VMALLOC_TO_BIT(addr), size >> PAGE_SHIFT);
+	vmalloc_reserved += size;
+}
+
+int is_vmalloc_addr(const void *x)
+{
+	unsigned long addr = (unsigned long)x;
+
+	if (addr < POSSIBLE_VMALLOC_START || addr >= VMALLOC_END)
+		return 0;
+
+	if (test_bit(VMALLOC_TO_BIT(addr), possible_areas))
+		return 0;
+
+	return 1;
+}
+
+static void calc_total_vmalloc_size(void)
+{
+	total_vmalloc_size = VMALLOC_END - POSSIBLE_VMALLOC_START -
+		vmalloc_reserved;
+}
+#else
+int is_vmalloc_addr(const void *x)
+{
+	unsigned long addr = (unsigned long)x;
+
+	return addr >= VMALLOC_START && addr < VMALLOC_END;
+}
+
+static void calc_total_vmalloc_size(void) { }
+#endif
+EXPORT_SYMBOL(is_vmalloc_addr);
+
 static struct vmap_area *__find_vmap_area(unsigned long addr)
 {
 	struct rb_node *n = vmap_area_root.rb_node;
@@ -363,6 +413,8 @@
 	BUG_ON(offset_in_page(size));
 	BUG_ON(!is_power_of_2(align));
 
+	might_sleep();
+
 	va = kmalloc_node(sizeof(struct vmap_area),
 			gfp_mask & GFP_RECLAIM_MASK, node);
 	if (unlikely(!va))
@@ -470,8 +522,8 @@
 		goto retry;
 	}
 	if (printk_ratelimit())
-		pr_warn("vmap allocation for size %lu failed: "
-			"use vmalloc=<size> to increase size.\n", size);
+		pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
+			size);
 	kfree(va);
 	return ERR_PTR(-EBUSY);
 }
@@ -577,6 +629,13 @@
 
 static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
 
+/*
+ * Serialize vmap purging.  There is no actual criticial section protected
+ * by this look, but we want to avoid concurrent calls for performance
+ * reasons and to make the pcpu_get_vm_areas more deterministic.
+ */
+static DEFINE_MUTEX(vmap_purge_lock);
+
 /* for per-CPU blocks */
 static void purge_fragmented_blocks_allcpus(void);
 
@@ -591,65 +650,40 @@
 
 /*
  * Purges all lazily-freed vmap areas.
- *
- * If sync is 0 then don't purge if there is already a purge in progress.
- * If force_flush is 1, then flush kernel TLBs between *start and *end even
- * if we found no lazy vmap areas to unmap (callers can use this to optimise
- * their own TLB flushing).
- * Returns with *start = min(*start, lowest purged address)
- *              *end = max(*end, highest purged address)
  */
-static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
-					int sync, int force_flush)
+static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
 {
-	static DEFINE_SPINLOCK(purge_lock);
-	LIST_HEAD(valist);
+	struct llist_node *valist;
 	struct vmap_area *va;
 	struct vmap_area *n_va;
-	int nr = 0;
+	bool do_free = false;
 
-	/*
-	 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
-	 * should not expect such behaviour. This just simplifies locking for
-	 * the case that isn't actually used at the moment anyway.
-	 */
-	if (!sync && !force_flush) {
-		if (!spin_trylock(&purge_lock))
-			return;
-	} else
-		spin_lock(&purge_lock);
+	lockdep_assert_held(&vmap_purge_lock);
 
-	if (sync)
-		purge_fragmented_blocks_allcpus();
-
-	rcu_read_lock();
-	list_for_each_entry_rcu(va, &vmap_area_list, list) {
-		if (va->flags & VM_LAZY_FREE) {
-			if (va->va_start < *start)
-				*start = va->va_start;
-			if (va->va_end > *end)
-				*end = va->va_end;
-			nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
-			list_add_tail(&va->purge_list, &valist);
-			va->flags |= VM_LAZY_FREEING;
-			va->flags &= ~VM_LAZY_FREE;
-		}
+	valist = llist_del_all(&vmap_purge_list);
+	llist_for_each_entry(va, valist, purge_list) {
+		if (va->va_start < start)
+			start = va->va_start;
+		if (va->va_end > end)
+			end = va->va_end;
+		do_free = true;
 	}
-	rcu_read_unlock();
 
-	if (nr)
-		atomic_sub(nr, &vmap_lazy_nr);
+	if (!do_free)
+		return false;
 
-	if (nr || force_flush)
-		flush_tlb_kernel_range(*start, *end);
+	flush_tlb_kernel_range(start, end);
 
-	if (nr) {
 		spin_lock(&vmap_area_lock);
-		list_for_each_entry_safe(va, n_va, &valist, purge_list)
+	llist_for_each_entry_safe(va, n_va, valist, purge_list) {
+		int nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
+
 			__free_vmap_area(va);
-		spin_unlock(&vmap_area_lock);
+		atomic_sub(nr, &vmap_lazy_nr);
+		cond_resched_lock(&vmap_area_lock);
 	}
-	spin_unlock(&purge_lock);
+	spin_unlock(&vmap_area_lock);
+	return true;
 }
 
 /*
@@ -658,9 +692,10 @@
  */
 static void try_purge_vmap_area_lazy(void)
 {
-	unsigned long start = ULONG_MAX, end = 0;
-
-	__purge_vmap_area_lazy(&start, &end, 0, 0);
+	if (mutex_trylock(&vmap_purge_lock)) {
+		__purge_vmap_area_lazy(ULONG_MAX, 0);
+		mutex_unlock(&vmap_purge_lock);
+	}
 }
 
 /*
@@ -668,9 +703,10 @@
  */
 static void purge_vmap_area_lazy(void)
 {
-	unsigned long start = ULONG_MAX, end = 0;
-
-	__purge_vmap_area_lazy(&start, &end, 1, 0);
+	mutex_lock(&vmap_purge_lock);
+	purge_fragmented_blocks_allcpus();
+	__purge_vmap_area_lazy(ULONG_MAX, 0);
+	mutex_unlock(&vmap_purge_lock);
 }
 
 /*
@@ -680,20 +716,16 @@
  */
 static void free_vmap_area_noflush(struct vmap_area *va)
 {
-	va->flags |= VM_LAZY_FREE;
-	atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
-	if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
-		try_purge_vmap_area_lazy();
-}
+	int nr_lazy;
 
-/*
- * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
- * called for the correct range previously.
- */
-static void free_unmap_vmap_area_noflush(struct vmap_area *va)
-{
-	unmap_vmap_area(va);
-	free_vmap_area_noflush(va);
+	nr_lazy = atomic_add_return((va->va_end - va->va_start) >> PAGE_SHIFT,
+				    &vmap_lazy_nr);
+
+	/* After this point, we may free va at any time */
+	llist_add(&va->purge_list, &vmap_purge_list);
+
+	if (unlikely(nr_lazy > lazy_max_pages()))
+		try_purge_vmap_area_lazy();
 }
 
 /*
@@ -702,7 +734,8 @@
 static void free_unmap_vmap_area(struct vmap_area *va)
 {
 	flush_cache_vunmap(va->va_start, va->va_end);
-	free_unmap_vmap_area_noflush(va);
+	unmap_vmap_area(va);
+	free_vmap_area_noflush(va);
 }
 
 static struct vmap_area *find_vmap_area(unsigned long addr)
@@ -716,16 +749,6 @@
 	return va;
 }
 
-static void free_unmap_vmap_area_addr(unsigned long addr)
-{
-	struct vmap_area *va;
-
-	va = find_vmap_area(addr);
-	BUG_ON(!va);
-	free_unmap_vmap_area(va);
-}
-
-
 /*** Per cpu kva allocator ***/
 
 /*
@@ -1046,6 +1069,8 @@
 	if (unlikely(!vmap_initialized))
 		return;
 
+	might_sleep();
+
 	for_each_possible_cpu(cpu) {
 		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
 		struct vmap_block *vb;
@@ -1070,7 +1095,11 @@
 		rcu_read_unlock();
 	}
 
-	__purge_vmap_area_lazy(&start, &end, 1, flush);
+	mutex_lock(&vmap_purge_lock);
+	purge_fragmented_blocks_allcpus();
+	if (!__purge_vmap_area_lazy(start, end) && flush)
+		flush_tlb_kernel_range(start, end);
+	mutex_unlock(&vmap_purge_lock);
 }
 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
 
@@ -1083,7 +1112,9 @@
 {
 	unsigned long size = count << PAGE_SHIFT;
 	unsigned long addr = (unsigned long)mem;
+	struct vmap_area *va;
 
+	might_sleep();
 	BUG_ON(!addr);
 	BUG_ON(addr < VMALLOC_START);
 	BUG_ON(addr > VMALLOC_END);
@@ -1092,10 +1123,14 @@
 	debug_check_no_locks_freed(mem, size);
 	vmap_debug_free_range(addr, addr+size);
 
-	if (likely(count <= VMAP_MAX_ALLOC))
+	if (likely(count <= VMAP_MAX_ALLOC)) {
 		vb_free(mem, size);
-	else
-		free_unmap_vmap_area_addr(addr);
+		return;
+	}
+
+	va = find_vmap_area(addr);
+	BUG_ON(!va);
+	free_unmap_vmap_area(va);
 }
 EXPORT_SYMBOL(vm_unmap_ram);
 
@@ -1144,6 +1179,33 @@
 EXPORT_SYMBOL(vm_map_ram);
 
 static struct vm_struct *vmlist __initdata;
+
+/**
+ * vm_area_check_early - check if vmap area is already mapped
+ * @vm: vm_struct to be checked
+ *
+ * This function is used to check if the vmap area has been
+ * mapped already. @vm->addr, @vm->size and @vm->flags should
+ * contain proper values.
+ *
+ */
+int __init vm_area_check_early(struct vm_struct *vm)
+{
+	struct vm_struct *tmp, **p;
+
+	BUG_ON(vmap_initialized);
+	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
+		if (tmp->addr >= vm->addr) {
+			if (tmp->addr < vm->addr + vm->size)
+				return 1;
+		} else {
+			if (tmp->addr + tmp->size > vm->addr)
+				return 1;
+		}
+	}
+	return 0;
+}
+
 /**
  * vm_area_add_early - add vmap area early during boot
  * @vm: vm_struct to add
@@ -1224,7 +1286,7 @@
 	}
 
 	vmap_area_pcpu_hole = VMALLOC_END;
-
+	calc_total_vmalloc_size();
 	vmap_initialized = true;
 }
 
@@ -1388,16 +1450,27 @@
  */
 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
 {
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+	return __get_vm_area_node(size, 1, flags, PAGE_OFFSET, VMALLOC_END,
+				  NUMA_NO_NODE, GFP_KERNEL,
+				  __builtin_return_address(0));
+#else
 	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
 				  NUMA_NO_NODE, GFP_KERNEL,
 				  __builtin_return_address(0));
+#endif
 }
 
 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
 				const void *caller)
 {
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+	return __get_vm_area_node(size, 1, flags, PAGE_OFFSET, VMALLOC_END,
+				  NUMA_NO_NODE, GFP_KERNEL, caller);
+#else
 	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
 				  NUMA_NO_NODE, GFP_KERNEL, caller);
+#endif
 }
 
 /**
@@ -1431,6 +1504,8 @@
 {
 	struct vmap_area *va;
 
+	might_sleep();
+
 	va = find_vmap_area((unsigned long)addr);
 	if (va && va->flags & VM_VM_AREA) {
 		struct vm_struct *vm = va->vm;
@@ -1490,6 +1565,38 @@
 	return;
 }
  
+static inline void __vfree_deferred(const void *addr)
+{
+	/*
+	 * Use raw_cpu_ptr() because this can be called from preemptible
+	 * context. Preemption is absolutely fine here, because the llist_add()
+	 * implementation is lockless, so it works even if we are adding to
+	 * nother cpu's list.  schedule_work() should be fine with this too.
+	 */
+	struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
+
+	if (llist_add((struct llist_node *)addr, &p->list))
+		schedule_work(&p->wq);
+}
+
+/**
+ *	vfree_atomic  -  release memory allocated by vmalloc()
+ *	@addr:		memory base address
+ *
+ *	This one is just like vfree() but can be called in any atomic context
+ *	except NMIs.
+ */
+void vfree_atomic(const void *addr)
+{
+	BUG_ON(in_nmi());
+
+	kmemleak_free(addr);
+
+	if (!addr)
+		return;
+	__vfree_deferred(addr);
+}
+
 /**
  *	vfree  -  release memory allocated by vmalloc()
  *	@addr:		memory base address
@@ -1512,11 +1619,9 @@
 
 	if (!addr)
 		return;
-	if (unlikely(in_interrupt())) {
-		struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred);
-		if (llist_add((struct llist_node *)addr, &p->list))
-			schedule_work(&p->wq);
-	} else
+	if (unlikely(in_interrupt()))
+		__vfree_deferred(addr);
+	else
 		__vunmap(addr, 1);
 }
 EXPORT_SYMBOL(vfree);
@@ -2654,6 +2759,9 @@
 	if (v->flags & VM_VPAGES)
 		seq_puts(m, " vpages");
 
+	if (v->flags & VM_LOWMEM)
+		seq_puts(m, " lowmem");
+
 	show_numa_info(m, v);
 	seq_putc(m, '\n');
 	return 0;
diff -ruw linux-4.4.115/mm/vmpressure.c linux-4.4.115-fbx/mm/vmpressure.c
--- linux-4.4.115/mm/vmpressure.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/vmpressure.c	2019-01-22 16:16:28.835294691 +0100
@@ -22,6 +22,9 @@
 #include <linux/slab.h>
 #include <linux/swap.h>
 #include <linux/printk.h>
+#include <linux/notifier.h>
+#include <linux/init.h>
+#include <linux/module.h>
 #include <linux/vmpressure.h>
 
 /*
@@ -38,7 +41,7 @@
  * TODO: Make the window size depend on machine size, as we do for vmstat
  * thresholds. Currently we set it to 512 pages (2MB for 4KB pages).
  */
-static const unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16;
+static unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16;
 
 /*
  * These thresholds are used when we account memory pressure through
@@ -49,6 +52,33 @@
 static const unsigned int vmpressure_level_med = 60;
 static const unsigned int vmpressure_level_critical = 95;
 
+static unsigned long vmpressure_scale_max = 100;
+module_param_named(vmpressure_scale_max, vmpressure_scale_max,
+			ulong, S_IRUGO | S_IWUSR);
+
+/* vmpressure values >= this will be scaled based on allocstalls */
+static unsigned long allocstall_threshold = 70;
+module_param_named(allocstall_threshold, allocstall_threshold,
+			ulong, S_IRUGO | S_IWUSR);
+
+static struct vmpressure global_vmpressure;
+BLOCKING_NOTIFIER_HEAD(vmpressure_notifier);
+
+int vmpressure_notifier_register(struct notifier_block *nb)
+{
+	return blocking_notifier_chain_register(&vmpressure_notifier, nb);
+}
+
+int vmpressure_notifier_unregister(struct notifier_block *nb)
+{
+	return blocking_notifier_chain_unregister(&vmpressure_notifier, nb);
+}
+
+void vmpressure_notify(unsigned long pressure)
+{
+	blocking_notifier_call_chain(&vmpressure_notifier, pressure, NULL);
+}
+
 /*
  * When there are too little pages left to scan, vmpressure() may miss the
  * critical pressure as number of pages will be less than "window size".
@@ -75,6 +105,7 @@
 	return container_of(work, struct vmpressure, work);
 }
 
+#ifdef CONFIG_MEMCG
 static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr)
 {
 	struct cgroup_subsys_state *css = vmpressure_to_css(vmpr);
@@ -85,6 +116,12 @@
 		return NULL;
 	return memcg_to_vmpressure(memcg);
 }
+#else
+static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr)
+{
+	return NULL;
+}
+#endif
 
 enum vmpressure_levels {
 	VMPRESSURE_LOW = 0,
@@ -108,7 +145,7 @@
 	return VMPRESSURE_LOW;
 }
 
-static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned,
+static unsigned long vmpressure_calc_pressure(unsigned long scanned,
 						    unsigned long reclaimed)
 {
 	unsigned long scale = scanned + reclaimed;
@@ -135,7 +172,20 @@
 	pr_debug("%s: %3lu  (s: %lu  r: %lu)\n", __func__, pressure,
 		 scanned, reclaimed);
 
-	return vmpressure_level(pressure);
+	return pressure;
+}
+
+static unsigned long vmpressure_account_stall(unsigned long pressure,
+				unsigned long stall, unsigned long scanned)
+{
+	unsigned long scale;
+
+	if (pressure < allocstall_threshold)
+		return pressure;
+
+	scale = ((vmpressure_scale_max - pressure) * stall) / scanned;
+
+	return pressure + scale;
 }
 
 struct vmpressure_event {
@@ -149,9 +199,11 @@
 {
 	struct vmpressure_event *ev;
 	enum vmpressure_levels level;
+	unsigned long pressure;
 	bool signalled = false;
 
-	level = vmpressure_calc_level(scanned, reclaimed);
+	pressure = vmpressure_calc_pressure(scanned, reclaimed);
+	level = vmpressure_level(pressure);
 
 	mutex_lock(&vmpr->events_lock);
 
@@ -203,24 +255,13 @@
 	} while ((vmpr = vmpressure_parent(vmpr)));
 }
 
-/**
- * vmpressure() - Account memory pressure through scanned/reclaimed ratio
- * @gfp:	reclaimer's gfp mask
- * @memcg:	cgroup memory controller handle
- * @scanned:	number of pages scanned
- * @reclaimed:	number of pages reclaimed
- *
- * This function should be called from the vmscan reclaim path to account
- * "instantaneous" memory pressure (scanned/reclaimed ratio). The raw
- * pressure index is then further refined and averaged over time.
- *
- * This function does not return any value.
- */
-void vmpressure(gfp_t gfp, struct mem_cgroup *memcg,
+void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg,
 		unsigned long scanned, unsigned long reclaimed)
 {
 	struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
 
+	BUG_ON(!vmpr);
+
 	/*
 	 * Here we only want to account pressure that userland is able to
 	 * help us with. For example, suppose that DMA zone is under
@@ -257,6 +298,94 @@
 	schedule_work(&vmpr->work);
 }
 
+void calculate_vmpressure_win(void)
+{
+	long x;
+
+	x = global_page_state(NR_FILE_PAGES) -
+			global_page_state(NR_SHMEM) -
+			total_swapcache_pages() +
+			global_page_state(NR_FREE_PAGES);
+	if (x < 1)
+		x = 1;
+	/*
+	 * For low (free + cached), vmpressure window should be
+	 * small, and high for higher values of (free + cached).
+	 * But it should not be linear as well. This ensures
+	 * timely vmpressure notifications when system is under
+	 * memory pressure, and optimal number of events when
+	 * cached is high. The sqaure root function is empirically
+	 * found to serve the purpose.
+	 */
+	x = int_sqrt(x);
+	vmpressure_win = x;
+}
+
+void vmpressure_global(gfp_t gfp, unsigned long scanned,
+		unsigned long reclaimed)
+{
+	struct vmpressure *vmpr = &global_vmpressure;
+	unsigned long pressure;
+	unsigned long stall;
+
+	if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS)))
+		return;
+
+	if (!scanned)
+		return;
+
+	spin_lock(&vmpr->sr_lock);
+	if (!vmpr->scanned)
+		calculate_vmpressure_win();
+
+	vmpr->scanned += scanned;
+	vmpr->reclaimed += reclaimed;
+
+	if (!current_is_kswapd())
+		vmpr->stall += scanned;
+
+	stall = vmpr->stall;
+	scanned = vmpr->scanned;
+	reclaimed = vmpr->reclaimed;
+	spin_unlock(&vmpr->sr_lock);
+
+	if (scanned < vmpressure_win)
+		return;
+
+	spin_lock(&vmpr->sr_lock);
+	vmpr->scanned = 0;
+	vmpr->reclaimed = 0;
+	vmpr->stall = 0;
+	spin_unlock(&vmpr->sr_lock);
+
+	pressure = vmpressure_calc_pressure(scanned, reclaimed);
+	pressure = vmpressure_account_stall(pressure, stall, scanned);
+	vmpressure_notify(pressure);
+}
+
+/**
+ * vmpressure() - Account memory pressure through scanned/reclaimed ratio
+ * @gfp:	reclaimer's gfp mask
+ * @memcg:	cgroup memory controller handle
+ * @scanned:	number of pages scanned
+ * @reclaimed:	number of pages reclaimed
+ *
+ * This function should be called from the vmscan reclaim path to account
+ * "instantaneous" memory pressure (scanned/reclaimed ratio). The raw
+ * pressure index is then further refined and averaged over time.
+ *
+ * This function does not return any value.
+ */
+void vmpressure(gfp_t gfp, struct mem_cgroup *memcg,
+		unsigned long scanned, unsigned long reclaimed)
+{
+	if (!memcg)
+		vmpressure_global(gfp, scanned, reclaimed);
+
+	if (IS_ENABLED(CONFIG_MEMCG))
+		vmpressure_memcg(gfp, memcg, scanned, reclaimed);
+}
+
 /**
  * vmpressure_prio() - Account memory pressure through reclaimer priority level
  * @gfp:	reclaimer's gfp mask
@@ -308,6 +437,8 @@
 	struct vmpressure_event *ev;
 	int level;
 
+	BUG_ON(!vmpr);
+
 	for (level = 0; level < VMPRESSURE_NUM_LEVELS; level++) {
 		if (!strcmp(vmpressure_str_levels[level], args))
 			break;
@@ -347,6 +478,8 @@
 	struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
 	struct vmpressure_event *ev;
 
+	BUG_ON(!vmpr);
+
 	mutex_lock(&vmpr->events_lock);
 	list_for_each_entry(ev, &vmpr->events, node) {
 		if (ev->efd != eventfd)
@@ -388,3 +521,10 @@
 	 */
 	flush_work(&vmpr->work);
 }
+
+int vmpressure_global_init(void)
+{
+	vmpressure_init(&global_vmpressure);
+	return 0;
+}
+late_initcall(vmpressure_global_init);
diff -ruw linux-4.4.115/mm/vmscan.c linux-4.4.115-fbx/mm/vmscan.c
--- linux-4.4.115/mm/vmscan.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/vmscan.c	2019-10-29 09:26:25.745223748 +0100
@@ -104,6 +104,13 @@
 
 	/* Number of pages freed so far during a call to shrink_zones() */
 	unsigned long nr_reclaimed;
+
+	/*
+	 * Reclaim pages from a vma. If the page is shared by other tasks
+	 * it is zapped from a vma without reclaim so it ends up remaining
+	 * on memory until last task zap it.
+	 */
+	struct vm_area_struct *target_vma;
 };
 
 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
@@ -146,6 +153,12 @@
  */
 unsigned long vm_total_pages;
 
+#ifdef CONFIG_KSWAPD_CPU_AFFINITY_MASK
+char *kswapd_cpu_mask = CONFIG_KSWAPD_CPU_AFFINITY_MASK;
+#else
+char *kswapd_cpu_mask = NULL;
+#endif
+
 static LIST_HEAD(shrinker_list);
 static DECLARE_RWSEM(shrinker_rwsem);
 
@@ -278,6 +291,10 @@
 	long batch_size = shrinker->batch ? shrinker->batch
 					  : SHRINK_BATCH;
 	long scanned = 0, next_deferred;
+	long min_cache_size = batch_size;
+
+	if (current_is_kswapd())
+		min_cache_size = 0;
 
 	freeable = shrinker->count_objects(shrinker, shrinkctl);
 	if (freeable == 0)
@@ -345,7 +362,7 @@
 	 * scanning at high prio and therefore should try to reclaim as much as
 	 * possible.
 	 */
-	while (total_scan >= batch_size ||
+	while (total_scan > min_cache_size ||
 	       total_scan >= freeable) {
 		unsigned long ret;
 		unsigned long nr_to_scan = min(batch_size, total_scan);
@@ -382,6 +399,35 @@
 	return freed;
 }
 
+static void shrink_slab_lmk(gfp_t gfp_mask, int nid,
+				 struct mem_cgroup *memcg,
+				 unsigned long nr_scanned,
+				 unsigned long nr_eligible)
+{
+	struct shrinker *shrinker;
+
+	if (nr_scanned == 0)
+		nr_scanned = SWAP_CLUSTER_MAX;
+
+	if (!down_read_trylock(&shrinker_rwsem))
+		goto out;
+
+	list_for_each_entry(shrinker, &shrinker_list, list) {
+		struct shrink_control sc = {
+			.gfp_mask = gfp_mask,
+		};
+
+		if (!(shrinker->flags & SHRINKER_LMK))
+			continue;
+
+		do_shrink_slab(&sc, shrinker, nr_scanned, nr_eligible);
+	}
+
+	up_read(&shrinker_rwsem);
+out:
+	cond_resched();
+}
+
 /**
  * shrink_slab - shrink slab caches
  * @gfp_mask: allocation context
@@ -443,6 +489,9 @@
 			.memcg = memcg,
 		};
 
+		if (shrinker->flags & SHRINKER_LMK)
+			continue;
+
 		if (memcg && !(shrinker->flags & SHRINKER_MEMCG_AWARE))
 			continue;
 
@@ -912,7 +961,7 @@
 		struct address_space *mapping;
 		struct page *page;
 		int may_enter_fs;
-		enum page_references references = PAGEREF_RECLAIM_CLEAN;
+		enum page_references references = PAGEREF_RECLAIM;
 		bool dirty, writeback;
 
 		cond_resched();
@@ -924,6 +973,7 @@
 			goto keep;
 
 		VM_BUG_ON_PAGE(PageActive(page), page);
+		if (zone)
 		VM_BUG_ON_PAGE(page_zone(page) != zone, page);
 
 		sc->nr_scanned++;
@@ -1003,7 +1053,7 @@
 			/* Case 1 above */
 			if (current_is_kswapd() &&
 			    PageReclaim(page) &&
-			    test_bit(ZONE_WRITEBACK, &zone->flags)) {
+			    (zone && test_bit(ZONE_WRITEBACK, &zone->flags))) {
 				nr_immediate++;
 				goto keep_locked;
 
@@ -1069,7 +1119,8 @@
 		 */
 		if (page_mapped(page) && mapping) {
 			switch (try_to_unmap(page,
-					ttu_flags|TTU_BATCH_FLUSH)) {
+					ttu_flags|TTU_BATCH_FLUSH,
+					sc->target_vma)) {
 			case SWAP_FAIL:
 				goto activate_locked;
 			case SWAP_AGAIN:
@@ -1089,7 +1140,8 @@
 			 */
 			if (page_is_file_cache(page) &&
 					(!current_is_kswapd() ||
-					 !test_bit(ZONE_DIRTY, &zone->flags))) {
+					(zone &&
+					!test_bit(ZONE_DIRTY, &zone->flags)))) {
 				/*
 				 * Immediately reclaim when written back.
 				 * Similar in principal to deactivate_page()
@@ -1201,6 +1253,13 @@
 		 * appear not as the counts should be low
 		 */
 		list_add(&page->lru, &free_pages);
+		/*
+		 * If pagelist are from multiple zones, we should decrease
+		 * NR_ISOLATED_ANON + x on freed pages in here.
+		 */
+		if (!zone)
+			dec_zone_page_state(page, NR_ISOLATED_ANON +
+					page_is_file_cache(page));
 		continue;
 
 cull_mlocked:
@@ -1212,7 +1271,7 @@
 
 activate_locked:
 		/* Not a candidate for swapping, so reclaim swap space. */
-		if (PageSwapCache(page) && vm_swap_full())
+		if (PageSwapCache(page) && vm_swap_full(page_swap_info(page)))
 			try_to_free_swap(page);
 		VM_BUG_ON_PAGE(PageActive(page), page);
 		SetPageActive(page);
@@ -1246,6 +1305,8 @@
 		.gfp_mask = GFP_KERNEL,
 		.priority = DEF_PRIORITY,
 		.may_unmap = 1,
+		/* Doesn't allow to write out dirty page */
+		.may_writepage = 0,
 	};
 	unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5;
 	struct page *page, *next;
@@ -1253,7 +1314,7 @@
 
 	list_for_each_entry_safe(page, next, page_list, lru) {
 		if (page_is_file_cache(page) && !PageDirty(page) &&
-		    !isolated_balloon_page(page)) {
+		    !__PageMovable(page)) {
 			ClearPageActive(page);
 			list_move(&page->lru, &clean_pages);
 		}
@@ -1267,6 +1328,42 @@
 	return ret;
 }
 
+#ifdef CONFIG_PROCESS_RECLAIM
+unsigned long reclaim_pages_from_list(struct list_head *page_list,
+					struct vm_area_struct *vma)
+{
+	struct scan_control sc = {
+		.gfp_mask = GFP_KERNEL,
+		.priority = DEF_PRIORITY,
+		.may_writepage = 1,
+		.may_unmap = 1,
+		.may_swap = 1,
+		.target_vma = vma,
+	};
+
+	unsigned long nr_reclaimed;
+	struct page *page;
+	unsigned long dummy1, dummy2, dummy3, dummy4, dummy5;
+
+	list_for_each_entry(page, page_list, lru)
+		ClearPageActive(page);
+
+	nr_reclaimed = shrink_page_list(page_list, NULL, &sc,
+			TTU_UNMAP|TTU_IGNORE_ACCESS,
+			&dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
+
+	while (!list_empty(page_list)) {
+		page = lru_to_page(page_list);
+		list_del(&page->lru);
+		dec_zone_page_state(page, NR_ISOLATED_ANON +
+				page_is_file_cache(page));
+		putback_lru_page(page);
+	}
+
+	return nr_reclaimed;
+}
+#endif
+
 /*
  * Attempt to remove the specified page from its LRU.  Only take this page
  * if it is of the appropriate PageActive status.  Pages which are being
@@ -1453,31 +1550,32 @@
 	return ret;
 }
 
-/*
- * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
- * then get resheduled. When there are massive number of tasks doing page
- * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
- * the LRU list will go small and be scanned faster than necessary, leading to
- * unnecessary swapping, thrashing and OOM.
- */
-static int too_many_isolated(struct zone *zone, int file,
-		struct scan_control *sc)
+static int __too_many_isolated(struct zone *zone, int file,
+	struct scan_control *sc, int safe)
 {
 	unsigned long inactive, isolated;
 
-	if (current_is_kswapd())
-		return 0;
-
-	if (!sane_reclaim(sc))
-		return 0;
-
 	if (file) {
+		if (safe) {
+			inactive = zone_page_state_snapshot(zone,
+					NR_INACTIVE_FILE);
+			isolated = zone_page_state_snapshot(zone,
+					NR_ISOLATED_FILE);
+		} else {
 		inactive = zone_page_state(zone, NR_INACTIVE_FILE);
 		isolated = zone_page_state(zone, NR_ISOLATED_FILE);
+		}
+	} else {
+		if (safe) {
+			inactive = zone_page_state_snapshot(zone,
+					NR_INACTIVE_ANON);
+			isolated = zone_page_state_snapshot(zone,
+					NR_ISOLATED_ANON);
 	} else {
 		inactive = zone_page_state(zone, NR_INACTIVE_ANON);
 		isolated = zone_page_state(zone, NR_ISOLATED_ANON);
 	}
+	}
 
 	/*
 	 * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
@@ -1490,6 +1588,32 @@
 	return isolated > inactive;
 }
 
+/*
+ * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
+ * then get resheduled. When there are massive number of tasks doing page
+ * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
+ * the LRU list will go small and be scanned faster than necessary, leading to
+ * unnecessary swapping, thrashing and OOM.
+ */
+static int too_many_isolated(struct zone *zone, int file,
+		struct scan_control *sc, int safe)
+{
+	if (current_is_kswapd())
+		return 0;
+
+	if (!sane_reclaim(sc))
+		return 0;
+
+	if (unlikely(__too_many_isolated(zone, file, sc, 0))) {
+		if (safe)
+			return __too_many_isolated(zone, file, sc, safe);
+		else
+			return 1;
+	}
+
+	return 0;
+}
+
 static noinline_for_stack void
 putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
 {
@@ -1503,6 +1627,7 @@
 	while (!list_empty(page_list)) {
 		struct page *page = lru_to_page(page_list);
 		int lru;
+		int file;
 
 		VM_BUG_ON_PAGE(PageLRU(page), page);
 		list_del(&page->lru);
@@ -1519,8 +1644,11 @@
 		lru = page_lru(page);
 		add_page_to_lru_list(page, lruvec, lru);
 
+		file = is_file_lru(lru);
+		if (IS_ENABLED(CONFIG_ZCACHE))
+			if (file)
+				SetPageWasActive(page);
 		if (is_active_lru(lru)) {
-			int file = is_file_lru(lru);
 			int numpages = hpage_nr_pages(page);
 			reclaim_stat->recent_rotated[file] += numpages;
 		}
@@ -1577,15 +1705,18 @@
 	unsigned long nr_immediate = 0;
 	isolate_mode_t isolate_mode = 0;
 	int file = is_file_lru(lru);
+	int safe = 0;
 	struct zone *zone = lruvec_zone(lruvec);
 	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
 
-	while (unlikely(too_many_isolated(zone, file, sc))) {
+	while (unlikely(too_many_isolated(zone, file, sc, safe))) {
 		congestion_wait(BLK_RW_ASYNC, HZ/10);
 
 		/* We are about to die and free our memory. Return now. */
 		if (fatal_signal_pending(current))
 			return SWAP_CLUSTER_MAX;
+
+		safe = 1;
 	}
 
 	lru_add_drain();
@@ -1842,6 +1973,12 @@
 		}
 
 		ClearPageActive(page);	/* we are de-activating */
+		if (IS_ENABLED(CONFIG_ZCACHE))
+			/*
+			 * For zcache to know whether the page is from active
+			 * file list
+			 */
+			SetPageWasActive(page);
 		list_add(&page->lru, &l_inactive);
 	}
 
@@ -2057,7 +2194,8 @@
 	 * There is enough inactive page cache, do not reclaim
 	 * anything from the anonymous working set right now.
 	 */
-	if (!inactive_file_is_low(lruvec)) {
+	if (!IS_ENABLED(CONFIG_BALANCE_ANON_FILE_RECLAIM) &&
+			!inactive_file_is_low(lruvec)) {
 		scan_balance = SCAN_FILE;
 		goto out;
 	}
@@ -2430,15 +2568,23 @@
 				    sc->nr_scanned - nr_scanned,
 				    zone_lru_pages);
 
+		/*
+		 * Record the subtree's reclaim efficiency. The reclaimed
+		 * pages from slab is excluded here because the corresponding
+		 * scanned pages is not accounted. Moreover, freeing a page
+		 * by slab shrinking depends on each slab's object population,
+		 * making the cost model (i.e. scan:free) different from that
+		 * of LRU.
+		 */
+		vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
+			   sc->nr_scanned - nr_scanned,
+			   sc->nr_reclaimed - nr_reclaimed);
+
 		if (reclaim_state) {
 			sc->nr_reclaimed += reclaim_state->reclaimed_slab;
 			reclaim_state->reclaimed_slab = 0;
 		}
 
-		vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
-			   sc->nr_scanned - nr_scanned,
-			   sc->nr_reclaimed - nr_reclaimed);
-
 		if (sc->nr_reclaimed - nr_reclaimed)
 			reclaimable = true;
 
@@ -2512,6 +2658,7 @@
 	gfp_t orig_mask;
 	enum zone_type requested_highidx = gfp_zone(sc->gfp_mask);
 	bool reclaimable = false;
+	unsigned long lru_pages = 0;
 
 	/*
 	 * If the number of buffer_heads in the machine exceeds the maximum
@@ -2539,6 +2686,7 @@
 		 * to global LRU.
 		 */
 		if (global_reclaim(sc)) {
+			lru_pages += zone_reclaimable_pages(zone);
 			if (!cpuset_zone_allowed(zone,
 						 GFP_KERNEL | __GFP_HARDWALL))
 				continue;
@@ -2589,6 +2737,9 @@
 			reclaimable = true;
 	}
 
+	if (global_reclaim(sc))
+		shrink_slab_lmk(sc->gfp_mask, 0, NULL,
+				sc->nr_scanned, lru_pages);
 	/*
 	 * Restore to original mask to avoid the impact on the caller if we
 	 * promoted it to __GFP_HIGHMEM.
@@ -2947,18 +3098,23 @@
 	} while (memcg);
 }
 
-static bool zone_balanced(struct zone *zone, int order,
+static bool zone_balanced(struct zone *zone, int order, bool highorder,
 			  unsigned long balance_gap, int classzone_idx)
 {
-	if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) +
-				    balance_gap, classzone_idx))
-		return false;
+	unsigned long mark = high_wmark_pages(zone) + balance_gap;
 
-	if (IS_ENABLED(CONFIG_COMPACTION) && order && compaction_suitable(zone,
-				order, 0, classzone_idx) == COMPACT_SKIPPED)
-		return false;
+	/*
+	 * When checking from pgdat_balanced(), kswapd should stop and sleep
+	 * when it reaches the high order-0 watermark and let kcompactd take
+	 * over. Other callers such as wakeup_kswapd() want to determine the
+	 * true high-order watermark.
+	 */
+	if (IS_ENABLED(CONFIG_COMPACTION) && !highorder) {
+		mark += (1UL << order);
+		order = 0;
+	}
 
-	return true;
+	return zone_watermark_ok_safe(zone, order, mark, classzone_idx);
 }
 
 /*
@@ -3008,7 +3164,7 @@
 			continue;
 		}
 
-		if (zone_balanced(zone, order, 0, i))
+		if (zone_balanced(zone, order, false, 0, i))
 			balanced_pages += zone->managed_pages;
 		else if (!order)
 			return false;
@@ -3063,9 +3219,8 @@
 static bool kswapd_shrink_zone(struct zone *zone,
 			       int classzone_idx,
 			       struct scan_control *sc,
-			       unsigned long *nr_attempted)
+				unsigned long lru_pages)
 {
-	int testorder = sc->order;
 	unsigned long balance_gap;
 	bool lowmem_pressure;
 
@@ -3073,17 +3228,6 @@
 	sc->nr_to_reclaim = max(SWAP_CLUSTER_MAX, high_wmark_pages(zone));
 
 	/*
-	 * Kswapd reclaims only single pages with compaction enabled. Trying
-	 * too hard to reclaim until contiguous free pages have become
-	 * available can hurt performance by evicting too much useful data
-	 * from memory. Do not reclaim more than needed for compaction.
-	 */
-	if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
-			compaction_suitable(zone, sc->order, 0, classzone_idx)
-							!= COMPACT_SKIPPED)
-		testorder = 0;
-
-	/*
 	 * We put equal pressure on every zone, unless one zone has way too
 	 * many pages free already. The "too many pages" is defined as the
 	 * high wmark plus a "gap" where the gap is either the low
@@ -3097,14 +3241,13 @@
 	 * reclaim is necessary
 	 */
 	lowmem_pressure = (buffer_heads_over_limit && is_highmem(zone));
-	if (!lowmem_pressure && zone_balanced(zone, testorder,
+	if (!lowmem_pressure && zone_balanced(zone, sc->order, false,
 						balance_gap, classzone_idx))
 		return true;
 
 	shrink_zone(zone, sc, zone_idx(zone) == classzone_idx);
-
-	/* Account for the number of pages attempted to reclaim */
-	*nr_attempted += sc->nr_to_reclaim;
+	shrink_slab_lmk(sc->gfp_mask, zone_to_nid(zone), NULL,
+			sc->nr_scanned, lru_pages);
 
 	clear_bit(ZONE_WRITEBACK, &zone->flags);
 
@@ -3115,7 +3258,7 @@
 	 * waits.
 	 */
 	if (zone_reclaimable(zone) &&
-	    zone_balanced(zone, testorder, 0, classzone_idx)) {
+	    zone_balanced(zone, sc->order, false, 0, classzone_idx)) {
 		clear_bit(ZONE_CONGESTED, &zone->flags);
 		clear_bit(ZONE_DIRTY, &zone->flags);
 	}
@@ -3127,7 +3270,7 @@
  * For kswapd, balance_pgdat() will work across all this node's zones until
  * they are all at high_wmark_pages(zone).
  *
- * Returns the final order kswapd was reclaiming at
+ * Returns the highest zone idx kswapd was reclaiming at
  *
  * There is special handling here for zones which are full of pinned pages.
  * This can happen if the pages are all mlocked, or if they are all used by
@@ -3144,8 +3287,7 @@
  * interoperates with the page allocator fallback scheme to ensure that aging
  * of pages is balanced across the zones.
  */
-static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
-							int *classzone_idx)
+static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
 {
 	int i;
 	int end_zone = 0;	/* Inclusive.  0 = ZONE_DMA */
@@ -3162,9 +3304,8 @@
 	count_vm_event(PAGEOUTRUN);
 
 	do {
-		unsigned long nr_attempted = 0;
 		bool raise_priority = true;
-		bool pgdat_needs_compaction = (order > 0);
+		unsigned long lru_pages = 0;
 
 		sc.nr_reclaimed = 0;
 
@@ -3199,7 +3340,7 @@
 				break;
 			}
 
-			if (!zone_balanced(zone, order, 0, 0)) {
+			if (!zone_balanced(zone, order, false, 0, 0)) {
 				end_zone = i;
 				break;
 			} else {
@@ -3215,32 +3356,23 @@
 		if (i < 0)
 			goto out;
 
+		/*
+		 * If we're getting trouble reclaiming, start doing writepage
+		 * even in laptop mode.
+		 */
+		if (sc.priority < DEF_PRIORITY - 2)
+			sc.may_writepage = 1;
+
 		for (i = 0; i <= end_zone; i++) {
 			struct zone *zone = pgdat->node_zones + i;
 
 			if (!populated_zone(zone))
 				continue;
 
-			/*
-			 * If any zone is currently balanced then kswapd will
-			 * not call compaction as it is expected that the
-			 * necessary pages are already available.
-			 */
-			if (pgdat_needs_compaction &&
-					zone_watermark_ok(zone, order,
-						low_wmark_pages(zone),
-						*classzone_idx, 0))
-				pgdat_needs_compaction = false;
+			lru_pages += zone_reclaimable_pages(zone);
 		}
 
 		/*
-		 * If we're getting trouble reclaiming, start doing writepage
-		 * even in laptop mode.
-		 */
-		if (sc.priority < DEF_PRIORITY - 2)
-			sc.may_writepage = 1;
-
-		/*
 		 * Now scan the zone in the dma->highmem direction, stopping
 		 * at the last zone which needs scanning.
 		 *
@@ -3276,8 +3408,7 @@
 			 * that that high watermark would be met at 100%
 			 * efficiency.
 			 */
-			if (kswapd_shrink_zone(zone, end_zone,
-					       &sc, &nr_attempted))
+			if (kswapd_shrink_zone(zone, end_zone, &sc, lru_pages))
 				raise_priority = false;
 		}
 
@@ -3290,49 +3421,29 @@
 				pfmemalloc_watermark_ok(pgdat))
 			wake_up_all(&pgdat->pfmemalloc_wait);
 
-		/*
-		 * Fragmentation may mean that the system cannot be rebalanced
-		 * for high-order allocations in all zones. If twice the
-		 * allocation size has been reclaimed and the zones are still
-		 * not balanced then recheck the watermarks at order-0 to
-		 * prevent kswapd reclaiming excessively. Assume that a
-		 * process requested a high-order can direct reclaim/compact.
-		 */
-		if (order && sc.nr_reclaimed >= 2UL << order)
-			order = sc.order = 0;
-
 		/* Check if kswapd should be suspending */
 		if (try_to_freeze() || kthread_should_stop())
 			break;
 
 		/*
-		 * Compact if necessary and kswapd is reclaiming at least the
-		 * high watermark number of pages as requsted
-		 */
-		if (pgdat_needs_compaction && sc.nr_reclaimed > nr_attempted)
-			compact_pgdat(pgdat, order);
-
-		/*
 		 * Raise priority if scanning rate is too low or there was no
 		 * progress in reclaiming pages
 		 */
 		if (raise_priority || !sc.nr_reclaimed)
 			sc.priority--;
 	} while (sc.priority >= 1 &&
-		 !pgdat_balanced(pgdat, order, *classzone_idx));
+			!pgdat_balanced(pgdat, order, classzone_idx));
 
 out:
 	/*
-	 * Return the order we were reclaiming at so prepare_kswapd_sleep()
-	 * makes a decision on the order we were last reclaiming at. However,
-	 * if another caller entered the allocator slow path while kswapd
-	 * was awake, order will remain at the higher level
+	 * Return the highest zone idx we were reclaiming at so
+	 * prepare_kswapd_sleep() makes the same decisions as here.
 	 */
-	*classzone_idx = end_zone;
-	return order;
+	return end_zone;
 }
 
-static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
+static void kswapd_try_to_sleep(pg_data_t *pgdat, int order,
+				int classzone_idx, int balanced_classzone_idx)
 {
 	long remaining = 0;
 	DEFINE_WAIT(wait);
@@ -3343,7 +3454,22 @@
 	prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
 
 	/* Try to sleep for a short interval */
-	if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) {
+	if (prepare_kswapd_sleep(pgdat, order, remaining,
+						balanced_classzone_idx)) {
+		/*
+		 * Compaction records what page blocks it recently failed to
+		 * isolate pages from and skips them in the future scanning.
+		 * When kswapd is going to sleep, it is reasonable to assume
+		 * that pages and compaction may succeed so reset the cache.
+		 */
+		reset_isolation_suitable(pgdat);
+
+		/*
+		 * We have freed the memory, now we should compact it to make
+		 * allocation of the requested order possible.
+		 */
+		wakeup_kcompactd(pgdat, order, classzone_idx);
+
 		remaining = schedule_timeout(HZ/10);
 		finish_wait(&pgdat->kswapd_wait, &wait);
 		prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
@@ -3353,7 +3479,8 @@
 	 * After a short sleep, check if it was a premature sleep. If not, then
 	 * go fully to sleep until explicitly woken up.
 	 */
-	if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) {
+	if (prepare_kswapd_sleep(pgdat, order, remaining,
+						balanced_classzone_idx)) {
 		trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
 
 		/*
@@ -3366,14 +3493,6 @@
 		 */
 		set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
 
-		/*
-		 * Compaction records what page blocks it recently failed to
-		 * isolate pages from and skips them in the future scanning.
-		 * When kswapd is going to sleep, it is reasonable to assume
-		 * that pages and compaction may succeed so reset the cache.
-		 */
-		reset_isolation_suitable(pgdat);
-
 		if (!kthread_should_stop())
 			schedule();
 
@@ -3403,7 +3522,6 @@
 static int kswapd(void *p)
 {
 	unsigned long order, new_order;
-	unsigned balanced_order;
 	int classzone_idx, new_classzone_idx;
 	int balanced_classzone_idx;
 	pg_data_t *pgdat = (pg_data_t*)p;
@@ -3416,7 +3534,7 @@
 
 	lockdep_set_current_reclaim_state(GFP_KERNEL);
 
-	if (!cpumask_empty(cpumask))
+	if (kswapd_cpu_mask == NULL && !cpumask_empty(cpumask))
 		set_cpus_allowed_ptr(tsk, cpumask);
 	current->reclaim_state = &reclaim_state;
 
@@ -3436,24 +3554,19 @@
 	set_freezable();
 
 	order = new_order = 0;
-	balanced_order = 0;
 	classzone_idx = new_classzone_idx = pgdat->nr_zones - 1;
 	balanced_classzone_idx = classzone_idx;
 	for ( ; ; ) {
 		bool ret;
 
 		/*
-		 * If the last balance_pgdat was unsuccessful it's unlikely a
-		 * new request of a similar or harder type will succeed soon
-		 * so consider going to sleep on the basis we reclaimed at
+		 * While we were reclaiming, there might have been another
+		 * wakeup, so check the values.
 		 */
-		if (balanced_classzone_idx >= new_classzone_idx &&
-					balanced_order == new_order) {
 			new_order = pgdat->kswapd_max_order;
 			new_classzone_idx = pgdat->classzone_idx;
 			pgdat->kswapd_max_order =  0;
 			pgdat->classzone_idx = pgdat->nr_zones - 1;
-		}
 
 		if (order < new_order || classzone_idx > new_classzone_idx) {
 			/*
@@ -3463,7 +3576,7 @@
 			order = new_order;
 			classzone_idx = new_classzone_idx;
 		} else {
-			kswapd_try_to_sleep(pgdat, balanced_order,
+			kswapd_try_to_sleep(pgdat, order, classzone_idx,
 						balanced_classzone_idx);
 			order = pgdat->kswapd_max_order;
 			classzone_idx = pgdat->classzone_idx;
@@ -3483,9 +3596,8 @@
 		 */
 		if (!ret) {
 			trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
-			balanced_classzone_idx = classzone_idx;
-			balanced_order = balance_pgdat(pgdat, order,
-						&balanced_classzone_idx);
+			balanced_classzone_idx = balance_pgdat(pgdat, order,
+								classzone_idx);
 		}
 	}
 
@@ -3515,7 +3627,7 @@
 	}
 	if (!waitqueue_active(&pgdat->kswapd_wait))
 		return;
-	if (zone_balanced(zone, order, 0, 0))
+	if (zone_balanced(zone, order, true, 0, 0))
 		return;
 
 	trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
@@ -3586,6 +3698,22 @@
 	return NOTIFY_OK;
 }
 
+static int set_kswapd_cpu_mask(pg_data_t *pgdat)
+{
+	int ret = 0;
+	cpumask_t tmask;
+
+	if (!kswapd_cpu_mask)
+		return 0;
+
+	cpumask_clear(&tmask);
+	ret = cpumask_parse(kswapd_cpu_mask, &tmask);
+	if (ret)
+		return ret;
+
+	return set_cpus_allowed_ptr(pgdat->kswapd, &tmask);
+}
+
 /*
  * This kswapd start function will be called by init and node-hot-add.
  * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
@@ -3605,6 +3733,9 @@
 		pr_err("Failed to start kswapd on node %d\n", nid);
 		ret = PTR_ERR(pgdat->kswapd);
 		pgdat->kswapd = NULL;
+	} else if (kswapd_cpu_mask) {
+		if (set_kswapd_cpu_mask(pgdat))
+			pr_warn("error setting kswapd cpu affinity mask\n");
 	}
 	return ret;
 }
@@ -3630,6 +3761,7 @@
 	swap_setup();
 	for_each_node_state(nid, N_MEMORY)
  		kswapd_run(nid);
+	if (kswapd_cpu_mask == NULL)
 	hotcpu_notifier(cpu_callback, 0);
 	return 0;
 }
diff -ruw linux-4.4.115/mm/vmstat.c linux-4.4.115-fbx/mm/vmstat.c
--- linux-4.4.115/mm/vmstat.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/vmstat.c	2019-10-29 09:26:25.745223748 +0100
@@ -460,7 +460,7 @@
  *
  * The function returns the number of global counters updated.
  */
-static int refresh_cpu_vm_stats(void)
+static int refresh_cpu_vm_stats(bool do_pagesets)
 {
 	struct zone *zone;
 	int i;
@@ -484,8 +484,9 @@
 #endif
 			}
 		}
-		cond_resched();
 #ifdef CONFIG_NUMA
+		if (do_pagesets) {
+			cond_resched();
 		/*
 		 * Deal with draining the remote pageset of this
 		 * processor
@@ -512,6 +513,7 @@
 			drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
 			changes++;
 		}
+		}
 #endif
 	}
 	changes += fold_diff(global_diff);
@@ -762,6 +764,8 @@
 	"workingset_nodereclaim",
 	"nr_anon_transparent_hugepages",
 	"nr_free_cma",
+	"nr_swapcache",
+	"nr_indirectly_reclaimable",
 
 	/* enum writeback_stat_item counters */
 	"nr_dirty_threshold",
@@ -771,6 +775,7 @@
 	/* enum vm_event_item counters */
 	"pgpgin",
 	"pgpgout",
+	"pgpgoutclean",
 	"pswpin",
 	"pswpout",
 
@@ -824,6 +829,7 @@
 	"compact_stall",
 	"compact_fail",
 	"compact_success",
+	"compact_daemon_wake",
 #endif
 
 #ifdef CONFIG_HUGETLB_PAGE
@@ -902,6 +908,7 @@
 
 /* Walk all the zones in a node and print using a callback */
 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
+		bool nolock,
 		void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
 {
 	struct zone *zone;
@@ -912,27 +919,16 @@
 		if (!populated_zone(zone))
 			continue;
 
+		if (!nolock)
 		spin_lock_irqsave(&zone->lock, flags);
 		print(m, pgdat, zone);
+		if (!nolock)
 		spin_unlock_irqrestore(&zone->lock, flags);
 	}
 }
 #endif
 
 #ifdef CONFIG_PROC_FS
-static char * const migratetype_names[MIGRATE_TYPES] = {
-	"Unmovable",
-	"Movable",
-	"Reclaimable",
-	"HighAtomic",
-#ifdef CONFIG_CMA
-	"CMA",
-#endif
-#ifdef CONFIG_MEMORY_ISOLATION
-	"Isolate",
-#endif
-};
-
 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
 						struct zone *zone)
 {
@@ -950,7 +946,7 @@
 static int frag_show(struct seq_file *m, void *arg)
 {
 	pg_data_t *pgdat = (pg_data_t *)arg;
-	walk_zones_in_node(m, pgdat, frag_show_print);
+	walk_zones_in_node(m, pgdat, false, frag_show_print);
 	return 0;
 }
 
@@ -991,7 +987,7 @@
 		seq_printf(m, "%6d ", order);
 	seq_putc(m, '\n');
 
-	walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
+	walk_zones_in_node(m, pgdat, false, pagetypeinfo_showfree_print);
 
 	return 0;
 }
@@ -1040,7 +1036,7 @@
 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
 		seq_printf(m, "%12s ", migratetype_names[mtype]);
 	seq_putc(m, '\n');
-	walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
+	walk_zones_in_node(m, pgdat, false, pagetypeinfo_showblockcount_print);
 
 	return 0;
 }
@@ -1084,7 +1080,11 @@
 
 			page = pfn_to_page(pfn);
 			if (PageBuddy(page)) {
-				pfn += (1UL << page_order(page)) - 1;
+				unsigned long freepage_order;
+
+				freepage_order = page_order_unsafe(page);
+				if (freepage_order < MAX_ORDER)
+					pfn += (1UL << freepage_order) - 1;
 				continue;
 			}
 
@@ -1131,7 +1131,7 @@
 #ifdef CONFIG_PAGE_OWNER
 	int mtype;
 
-	if (!page_owner_inited)
+	if (!static_branch_unlikely(&page_owner_inited))
 		return;
 
 	drain_all_pages(NULL);
@@ -1141,7 +1141,7 @@
 		seq_printf(m, "%12s ", migratetype_names[mtype]);
 	seq_putc(m, '\n');
 
-	walk_zones_in_node(m, pgdat, pagetypeinfo_showmixedcount_print);
+	walk_zones_in_node(m, pgdat, true, pagetypeinfo_showmixedcount_print);
 #endif /* CONFIG_PAGE_OWNER */
 }
 
@@ -1274,7 +1274,7 @@
 static int zoneinfo_show(struct seq_file *m, void *arg)
 {
 	pg_data_t *pgdat = (pg_data_t *)arg;
-	walk_zones_in_node(m, pgdat, zoneinfo_show_print);
+	walk_zones_in_node(m, pgdat, false, zoneinfo_show_print);
 	return 0;
 }
 
@@ -1391,7 +1391,7 @@
 
 static void vmstat_update(struct work_struct *w)
 {
-	if (refresh_cpu_vm_stats()) {
+	if (refresh_cpu_vm_stats(true) && !cpu_isolated(smp_processor_id())) {
 		/*
 		 * Counters were updated so we expect more updates
 		 * to occur in the future. Keep on running the
@@ -1403,26 +1403,34 @@
 	} else {
 		/*
 		 * We did not update any counters so the app may be in
-		 * a mode where it does not cause counter updates.
+		 * a mode where it does not cause counter updates or the cpu
+		 * was isolated.
 		 * We may be uselessly running vmstat_update.
 		 * Defer the checking for differentials to the
 		 * shepherd thread on a different processor.
 		 */
-		int r;
-		/*
-		 * Shepherd work thread does not race since it never
-		 * changes the bit if its zero but the cpu
-		 * online / off line code may race if
-		 * worker threads are still allowed during
-		 * shutdown / startup.
-		 */
-		r = cpumask_test_and_set_cpu(smp_processor_id(),
-			cpu_stat_off);
-		VM_BUG_ON(r);
+		cpumask_set_cpu(smp_processor_id(), cpu_stat_off);
 	}
 }
 
 /*
+ * Switch off vmstat processing and then fold all the remaining differentials
+ * until the diffs stay at zero. The function is used by NOHZ and can only be
+ * invoked when tick processing is not active.
+ */
+void quiet_vmstat(void)
+{
+	if (system_state != SYSTEM_RUNNING)
+		return;
+
+	do {
+		if (!cpumask_test_and_set_cpu(smp_processor_id(), cpu_stat_off))
+			cancel_delayed_work(this_cpu_ptr(&vmstat_work));
+
+	} while (refresh_cpu_vm_stats(false));
+}
+
+/*
  * Check if the diffs for a certain cpu indicate that
  * an update is needed.
  */
@@ -1454,7 +1462,7 @@
  */
 static void vmstat_shepherd(struct work_struct *w);
 
-static DECLARE_DELAYED_WORK(shepherd, vmstat_shepherd);
+static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
 
 static void vmstat_shepherd(struct work_struct *w)
 {
@@ -1463,7 +1471,7 @@
 	get_online_cpus();
 	/* Check processors whose vmstat worker threads have been disabled */
 	for_each_cpu(cpu, cpu_stat_off)
-		if (need_update(cpu) &&
+		if (!cpu_isolated(cpu) && need_update(cpu) &&
 			cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
 
 			queue_delayed_work_on(cpu, vmstat_wq,
@@ -1627,7 +1635,7 @@
 	if (!node_state(pgdat->node_id, N_MEMORY))
 		return 0;
 
-	walk_zones_in_node(m, pgdat, unusable_show_print);
+	walk_zones_in_node(m, pgdat, false, unusable_show_print);
 
 	return 0;
 }
@@ -1679,7 +1687,7 @@
 {
 	pg_data_t *pgdat = (pg_data_t *)arg;
 
-	walk_zones_in_node(m, pgdat, extfrag_show_print);
+	walk_zones_in_node(m, pgdat, false, extfrag_show_print);
 
 	return 0;
 }
diff -ruw linux-4.4.115/mm/zsmalloc.c linux-4.4.115-fbx/mm/zsmalloc.c
--- linux-4.4.115/mm/zsmalloc.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/mm/zsmalloc.c	2019-10-29 09:26:25.745223748 +0100
@@ -16,32 +16,15 @@
  * struct page(s) to form a zspage.
  *
  * Usage of struct page fields:
- *	page->private: points to the first component (0-order) page
- *	page->index (union with page->freelist): offset of the first object
- *		starting in this page. For the first page, this is
- *		always 0, so we use this field (aka freelist) to point
- *		to the first free object in zspage.
- *	page->lru: links together all component pages (except the first page)
- *		of a zspage
- *
- *	For _first_ page only:
- *
- *	page->private: refers to the component page after the first page
- *		If the page is first_page for huge object, it stores handle.
- *		Look at size_class->huge.
- *	page->freelist: points to the first free object in zspage.
- *		Free objects are linked together using in-place
- *		metadata.
- *	page->objects: maximum number of objects we can store in this
- *		zspage (class->zspage_order * PAGE_SIZE / class->size)
- *	page->lru: links together first pages of various zspages.
- *		Basically forming list of zspages in a fullness group.
- *	page->mapping: class index and fullness group of the zspage
- *	page->inuse: the number of objects that are used in this zspage
+ *	page->private: points to zspage
+ *	page->freelist(index): links together all component pages of a zspage
+ *		For the huge page, this is always 0, so we use this field
+ *		to store handle.
  *
  * Usage of struct page flags:
  *	PG_private: identifies the first component page
  *	PG_private2: identifies the last component page
+ *	PG_owner_priv_1: indentifies the huge component page
  *
  */
 
@@ -64,6 +47,11 @@
 #include <linux/debugfs.h>
 #include <linux/zsmalloc.h>
 #include <linux/zpool.h>
+#include <linux/mount.h>
+#include <linux/migrate.h>
+#include <linux/pagemap.h>
+
+#define ZSPAGE_MAGIC	0x58
 
 /*
  * This must be power of 2 and greater than of equal to sizeof(link_free).
@@ -86,9 +74,7 @@
  * Object location (<PFN>, <obj_idx>) is encoded as
  * as single (unsigned long) handle value.
  *
- * Note that object index <obj_idx> is relative to system
- * page <PFN> it is stored in, so for each sub-page belonging
- * to a zspage, obj_idx starts with 0.
+ * Note that object index <obj_idx> starts from 0.
  *
  * This is made more complicated by various memory models and PAE.
  */
@@ -147,33 +133,29 @@
  *  ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
  *  (reason above)
  */
-#define ZS_SIZE_CLASS_DELTA	(PAGE_SIZE >> 8)
+#define ZS_SIZE_CLASS_DELTA	(PAGE_SIZE >> CLASS_BITS)
 
 /*
  * We do not maintain any list for completely empty or full pages
  */
 enum fullness_group {
-	ZS_ALMOST_FULL,
-	ZS_ALMOST_EMPTY,
-	_ZS_NR_FULLNESS_GROUPS,
-
 	ZS_EMPTY,
-	ZS_FULL
+	ZS_ALMOST_EMPTY,
+	ZS_ALMOST_FULL,
+	ZS_FULL,
+	NR_ZS_FULLNESS,
 };
 
 enum zs_stat_type {
+	CLASS_EMPTY,
+	CLASS_ALMOST_EMPTY,
+	CLASS_ALMOST_FULL,
+	CLASS_FULL,
 	OBJ_ALLOCATED,
 	OBJ_USED,
-	CLASS_ALMOST_FULL,
-	CLASS_ALMOST_EMPTY,
+	NR_ZS_STAT_TYPE,
 };
 
-#ifdef CONFIG_ZSMALLOC_STAT
-#define NR_ZS_STAT_TYPE	(CLASS_ALMOST_EMPTY + 1)
-#else
-#define NR_ZS_STAT_TYPE	(OBJ_USED + 1)
-#endif
-
 struct zs_size_stat {
 	unsigned long objs[NR_ZS_STAT_TYPE];
 };
@@ -182,6 +164,10 @@
 static struct dentry *zs_stat_root;
 #endif
 
+#ifdef CONFIG_COMPACTION
+static struct vfsmount *zsmalloc_mnt;
+#endif
+
 /*
  * number of size_classes
  */
@@ -205,35 +191,49 @@
 
 struct size_class {
 	spinlock_t lock;
-	struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
+	struct list_head fullness_list[NR_ZS_FULLNESS];
 	/*
 	 * Size of objects stored in this class. Must be multiple
 	 * of ZS_ALIGN.
 	 */
 	int size;
-	unsigned int index;
-
+	int objs_per_zspage;
 	/* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
 	int pages_per_zspage;
+
+	unsigned int index;
 	struct zs_size_stat stats;
+};
 
 	/* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
-	bool huge;
-};
+static void SetPageHugeObject(struct page *page)
+{
+	SetPageOwnerPriv1(page);
+}
+
+static void ClearPageHugeObject(struct page *page)
+{
+	ClearPageOwnerPriv1(page);
+}
+
+static int PageHugeObject(struct page *page)
+{
+	return PageOwnerPriv1(page);
+}
 
 /*
  * Placed within free objects to form a singly linked list.
- * For every zspage, first_page->freelist gives head of this list.
+ * For every zspage, zspage->freeobj gives head of this list.
  *
  * This must be power of 2 and less than or equal to ZS_ALIGN
  */
 struct link_free {
 	union {
 		/*
-		 * Position of next free chunk (encodes <PFN, obj_idx>)
+		 * Free object index;
 		 * It's valid for non-allocated object
 		 */
-		void *next;
+		unsigned long next;
 		/*
 		 * Handle of allocated object.
 		 */
@@ -246,8 +246,8 @@
 
 	struct size_class **size_class;
 	struct kmem_cache *handle_cachep;
+	struct kmem_cache *zspage_cachep;
 
-	gfp_t flags;	/* allocation flags used when growing pool */
 	atomic_long_t pages_allocated;
 
 	struct zs_pool_stats stats;
@@ -262,16 +262,36 @@
 #ifdef CONFIG_ZSMALLOC_STAT
 	struct dentry *stat_dentry;
 #endif
+#ifdef CONFIG_COMPACTION
+	struct inode *inode;
+	struct work_struct free_work;
+#endif
 };
 
 /*
  * A zspage's class index and fullness group
  * are encoded in its (first)page->mapping
  */
-#define CLASS_IDX_BITS	28
-#define FULLNESS_BITS	4
-#define CLASS_IDX_MASK	((1 << CLASS_IDX_BITS) - 1)
-#define FULLNESS_MASK	((1 << FULLNESS_BITS) - 1)
+#define FULLNESS_BITS	2
+#define CLASS_BITS	8
+#define ISOLATED_BITS	3
+#define MAGIC_VAL_BITS	8
+
+struct zspage {
+	struct {
+		unsigned int fullness:FULLNESS_BITS;
+		unsigned int class:CLASS_BITS;
+		unsigned int isolated:ISOLATED_BITS;
+		unsigned int magic:MAGIC_VAL_BITS;
+	};
+	unsigned int inuse;
+	unsigned int freeobj;
+	struct page *first_page;
+	struct list_head list; /* fullness list */
+#ifdef CONFIG_COMPACTION
+	rwlock_t lock;
+#endif
+};
 
 struct mapping_area {
 #ifdef CONFIG_PGTABLE_MAPPING
@@ -281,32 +301,76 @@
 #endif
 	char *vm_addr; /* address of kmap_atomic()'ed pages */
 	enum zs_mapmode vm_mm; /* mapping mode */
-	bool huge;
 };
 
-static int create_handle_cache(struct zs_pool *pool)
+#ifdef CONFIG_COMPACTION
+static int zs_register_migration(struct zs_pool *pool);
+static void zs_unregister_migration(struct zs_pool *pool);
+static void migrate_lock_init(struct zspage *zspage);
+static void migrate_read_lock(struct zspage *zspage);
+static void migrate_read_unlock(struct zspage *zspage);
+static void kick_deferred_free(struct zs_pool *pool);
+static void init_deferred_free(struct zs_pool *pool);
+static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage);
+#else
+static int zsmalloc_mount(void) { return 0; }
+static void zsmalloc_unmount(void) {}
+static int zs_register_migration(struct zs_pool *pool) { return 0; }
+static void zs_unregister_migration(struct zs_pool *pool) {}
+static void migrate_lock_init(struct zspage *zspage) {}
+static void migrate_read_lock(struct zspage *zspage) {}
+static void migrate_read_unlock(struct zspage *zspage) {}
+static void kick_deferred_free(struct zs_pool *pool) {}
+static void init_deferred_free(struct zs_pool *pool) {}
+static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
+#endif
+
+static int create_cache(struct zs_pool *pool)
 {
 	pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE,
 					0, 0, NULL);
-	return pool->handle_cachep ? 0 : 1;
+	if (!pool->handle_cachep)
+		return 1;
+
+	pool->zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage),
+					0, 0, NULL);
+	if (!pool->zspage_cachep) {
+		kmem_cache_destroy(pool->handle_cachep);
+		pool->handle_cachep = NULL;
+		return 1;
 }
 
-static void destroy_handle_cache(struct zs_pool *pool)
+	return 0;
+}
+
+static void destroy_cache(struct zs_pool *pool)
 {
 	kmem_cache_destroy(pool->handle_cachep);
+	kmem_cache_destroy(pool->zspage_cachep);
 }
 
-static unsigned long alloc_handle(struct zs_pool *pool)
+static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
 {
 	return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
-		pool->flags & ~__GFP_HIGHMEM);
+			gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
 }
 
-static void free_handle(struct zs_pool *pool, unsigned long handle)
+static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
 {
 	kmem_cache_free(pool->handle_cachep, (void *)handle);
 }
 
+static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags)
+{
+	return kmem_cache_alloc(pool->zspage_cachep,
+			flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
+};
+
+static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
+{
+	kmem_cache_free(pool->zspage_cachep, zspage);
+}
+
 static void record_obj(unsigned long handle, unsigned long obj)
 {
 	/*
@@ -325,7 +389,12 @@
 			     const struct zpool_ops *zpool_ops,
 			     struct zpool *zpool)
 {
-	return zs_create_pool(name, gfp);
+	/*
+	 * Ignore global gfp flags: zs_malloc() may be invoked from
+	 * different contexts and its caller must provide a valid
+	 * gfp mask.
+	 */
+	return zs_create_pool(name);
 }
 
 static void zs_zpool_destroy(void *pool)
@@ -336,7 +405,7 @@
 static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
 			unsigned long *handle)
 {
-	*handle = zs_malloc(pool, size);
+	*handle = zs_malloc(pool, size, gfp);
 	return *handle ? 0 : -1;
 }
 static void zs_zpool_free(void *pool, unsigned long handle)
@@ -404,36 +473,76 @@
 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
 static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
 
+static bool is_zspage_isolated(struct zspage *zspage)
+{
+	return zspage->isolated;
+}
+
 static int is_first_page(struct page *page)
 {
 	return PagePrivate(page);
 }
 
-static int is_last_page(struct page *page)
+/* Protected by class->lock */
+static inline int get_zspage_inuse(struct zspage *zspage)
+{
+	return zspage->inuse;
+}
+
+static inline void set_zspage_inuse(struct zspage *zspage, int val)
+{
+	zspage->inuse = val;
+}
+
+static inline void mod_zspage_inuse(struct zspage *zspage, int val)
+{
+	zspage->inuse += val;
+}
+
+static inline struct page *get_first_page(struct zspage *zspage)
+{
+	struct page *first_page = zspage->first_page;
+
+	VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
+	return first_page;
+}
+
+static inline int get_first_obj_offset(struct page *page)
+{
+	return page->units;
+}
+
+static inline void set_first_obj_offset(struct page *page, int offset)
+{
+	page->units = offset;
+}
+
+static inline unsigned int get_freeobj(struct zspage *zspage)
+{
+	return zspage->freeobj;
+}
+
+static inline void set_freeobj(struct zspage *zspage, unsigned int obj)
 {
-	return PagePrivate2(page);
+	zspage->freeobj = obj;
 }
 
-static void get_zspage_mapping(struct page *page, unsigned int *class_idx,
+static void get_zspage_mapping(struct zspage *zspage,
+				unsigned int *class_idx,
 				enum fullness_group *fullness)
 {
-	unsigned long m;
-	BUG_ON(!is_first_page(page));
+	BUG_ON(zspage->magic != ZSPAGE_MAGIC);
 
-	m = (unsigned long)page->mapping;
-	*fullness = m & FULLNESS_MASK;
-	*class_idx = (m >> FULLNESS_BITS) & CLASS_IDX_MASK;
+	*fullness = zspage->fullness;
+	*class_idx = zspage->class;
 }
 
-static void set_zspage_mapping(struct page *page, unsigned int class_idx,
+static void set_zspage_mapping(struct zspage *zspage,
+				unsigned int class_idx,
 				enum fullness_group fullness)
 {
-	unsigned long m;
-	BUG_ON(!is_first_page(page));
-
-	m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) |
-			(fullness & FULLNESS_MASK);
-	page->mapping = (struct address_space *)m;
+	zspage->class = class_idx;
+	zspage->fullness = fullness;
 }
 
 /*
@@ -457,23 +566,19 @@
 static inline void zs_stat_inc(struct size_class *class,
 				enum zs_stat_type type, unsigned long cnt)
 {
-	if (type < NR_ZS_STAT_TYPE)
 		class->stats.objs[type] += cnt;
 }
 
 static inline void zs_stat_dec(struct size_class *class,
 				enum zs_stat_type type, unsigned long cnt)
 {
-	if (type < NR_ZS_STAT_TYPE)
 		class->stats.objs[type] -= cnt;
 }
 
 static inline unsigned long zs_stat_get(struct size_class *class,
 				enum zs_stat_type type)
 {
-	if (type < NR_ZS_STAT_TYPE)
 		return class->stats.objs[type];
-	return 0;
 }
 
 #ifdef CONFIG_ZSMALLOC_STAT
@@ -495,6 +600,8 @@
 	debugfs_remove_recursive(zs_stat_root);
 }
 
+static unsigned long zs_can_compact(struct size_class *class);
+
 static int zs_stats_size_show(struct seq_file *s, void *v)
 {
 	int i;
@@ -502,14 +609,15 @@
 	struct size_class *class;
 	int objs_per_zspage;
 	unsigned long class_almost_full, class_almost_empty;
-	unsigned long obj_allocated, obj_used, pages_used;
+	unsigned long obj_allocated, obj_used, pages_used, freeable;
 	unsigned long total_class_almost_full = 0, total_class_almost_empty = 0;
 	unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0;
+	unsigned long total_freeable = 0;
 
-	seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s\n",
+	seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s %8s\n",
 			"class", "size", "almost_full", "almost_empty",
 			"obj_allocated", "obj_used", "pages_used",
-			"pages_per_zspage");
+			"pages_per_zspage", "freeable");
 
 	for (i = 0; i < zs_size_classes; i++) {
 		class = pool->size_class[i];
@@ -522,6 +630,7 @@
 		class_almost_empty = zs_stat_get(class, CLASS_ALMOST_EMPTY);
 		obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
 		obj_used = zs_stat_get(class, OBJ_USED);
+		freeable = zs_can_compact(class);
 		spin_unlock(&class->lock);
 
 		objs_per_zspage = get_maxobj_per_zspage(class->size,
@@ -529,23 +638,25 @@
 		pages_used = obj_allocated / objs_per_zspage *
 				class->pages_per_zspage;
 
-		seq_printf(s, " %5u %5u %11lu %12lu %13lu %10lu %10lu %16d\n",
+		seq_printf(s, " %5u %5u %11lu %12lu %13lu"
+				" %10lu %10lu %16d %8lu\n",
 			i, class->size, class_almost_full, class_almost_empty,
 			obj_allocated, obj_used, pages_used,
-			class->pages_per_zspage);
+			class->pages_per_zspage, freeable);
 
 		total_class_almost_full += class_almost_full;
 		total_class_almost_empty += class_almost_empty;
 		total_objs += obj_allocated;
 		total_used_objs += obj_used;
 		total_pages += pages_used;
+		total_freeable += freeable;
 	}
 
 	seq_puts(s, "\n");
-	seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu\n",
+	seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu %16s %8lu\n",
 			"Total", "", total_class_almost_full,
 			total_class_almost_empty, total_objs,
-			total_used_objs, total_pages);
+			total_used_objs, total_pages, "", total_freeable);
 
 	return 0;
 }
@@ -562,7 +673,7 @@
 	.release        = single_release,
 };
 
-static int zs_pool_stat_create(const char *name, struct zs_pool *pool)
+static int zs_pool_stat_create(struct zs_pool *pool, const char *name)
 {
 	struct dentry *entry;
 
@@ -602,7 +713,7 @@
 {
 }
 
-static inline int zs_pool_stat_create(const char *name, struct zs_pool *pool)
+static inline int zs_pool_stat_create(struct zs_pool *pool, const char *name)
 {
 	return 0;
 }
@@ -620,20 +731,20 @@
  * the pool (not yet implemented). This function returns fullness
  * status of the given page.
  */
-static enum fullness_group get_fullness_group(struct page *page)
+static enum fullness_group get_fullness_group(struct size_class *class,
+						struct zspage *zspage)
 {
-	int inuse, max_objects;
+	int inuse, objs_per_zspage;
 	enum fullness_group fg;
-	BUG_ON(!is_first_page(page));
 
-	inuse = page->inuse;
-	max_objects = page->objects;
+	inuse = get_zspage_inuse(zspage);
+	objs_per_zspage = class->objs_per_zspage;
 
 	if (inuse == 0)
 		fg = ZS_EMPTY;
-	else if (inuse == max_objects)
+	else if (inuse == objs_per_zspage)
 		fg = ZS_FULL;
-	else if (inuse <= 3 * max_objects / fullness_threshold_frac)
+	else if (inuse <= 3 * objs_per_zspage / fullness_threshold_frac)
 		fg = ZS_ALMOST_EMPTY;
 	else
 		fg = ZS_ALMOST_FULL;
@@ -647,59 +758,41 @@
  * have. This functions inserts the given zspage into the freelist
  * identified by <class, fullness_group>.
  */
-static void insert_zspage(struct page *page, struct size_class *class,
+static void insert_zspage(struct size_class *class,
+				struct zspage *zspage,
 				enum fullness_group fullness)
 {
-	struct page **head;
-
-	BUG_ON(!is_first_page(page));
-
-	if (fullness >= _ZS_NR_FULLNESS_GROUPS)
-		return;
+	struct zspage *head;
 
-	zs_stat_inc(class, fullness == ZS_ALMOST_EMPTY ?
-			CLASS_ALMOST_EMPTY : CLASS_ALMOST_FULL, 1);
-
-	head = &class->fullness_list[fullness];
-	if (!*head) {
-		*head = page;
+	zs_stat_inc(class, fullness, 1);
+	head = list_first_entry_or_null(&class->fullness_list[fullness],
+					struct zspage, list);
+	/*
+	 * We want to see more ZS_FULL pages and less almost empty/full.
+	 * Put pages with higher ->inuse first.
+	 */
+	if (head) {
+		if (get_zspage_inuse(zspage) < get_zspage_inuse(head)) {
+			list_add(&zspage->list, &head->list);
 		return;
 	}
-
-	/*
-	 * We want to see more ZS_FULL pages and less almost
-	 * empty/full. Put pages with higher ->inuse first.
-	 */
-	list_add_tail(&page->lru, &(*head)->lru);
-	if (page->inuse >= (*head)->inuse)
-		*head = page;
+	}
+	list_add(&zspage->list, &class->fullness_list[fullness]);
 }
 
 /*
  * This function removes the given zspage from the freelist identified
  * by <class, fullness_group>.
  */
-static void remove_zspage(struct page *page, struct size_class *class,
+static void remove_zspage(struct size_class *class,
+				struct zspage *zspage,
 				enum fullness_group fullness)
 {
-	struct page **head;
-
-	BUG_ON(!is_first_page(page));
+	VM_BUG_ON(list_empty(&class->fullness_list[fullness]));
+	VM_BUG_ON(is_zspage_isolated(zspage));
 
-	if (fullness >= _ZS_NR_FULLNESS_GROUPS)
-		return;
-
-	head = &class->fullness_list[fullness];
-	BUG_ON(!*head);
-	if (list_empty(&(*head)->lru))
-		*head = NULL;
-	else if (*head == page)
-		*head = (struct page *)list_entry((*head)->lru.next,
-					struct page, lru);
-
-	list_del_init(&page->lru);
-	zs_stat_dec(class, fullness == ZS_ALMOST_EMPTY ?
-			CLASS_ALMOST_EMPTY : CLASS_ALMOST_FULL, 1);
+	list_del_init(&zspage->list);
+	zs_stat_dec(class, fullness, 1);
 }
 
 /*
@@ -712,21 +805,22 @@
  * fullness group.
  */
 static enum fullness_group fix_fullness_group(struct size_class *class,
-						struct page *page)
+						struct zspage *zspage)
 {
 	int class_idx;
 	enum fullness_group currfg, newfg;
 
-	BUG_ON(!is_first_page(page));
-
-	get_zspage_mapping(page, &class_idx, &currfg);
-	newfg = get_fullness_group(page);
+	get_zspage_mapping(zspage, &class_idx, &currfg);
+	newfg = get_fullness_group(class, zspage);
 	if (newfg == currfg)
 		goto out;
 
-	remove_zspage(page, class, currfg);
-	insert_zspage(page, class, newfg);
-	set_zspage_mapping(page, class_idx, newfg);
+	if (!is_zspage_isolated(zspage)) {
+		remove_zspage(class, zspage, currfg);
+		insert_zspage(class, zspage, newfg);
+	}
+
+	set_zspage_mapping(zspage, class_idx, newfg);
 
 out:
 	return newfg;
@@ -768,64 +862,49 @@
 	return max_usedpc_order;
 }
 
-/*
- * A single 'zspage' is composed of many system pages which are
- * linked together using fields in struct page. This function finds
- * the first/head page, given any component page of a zspage.
- */
-static struct page *get_first_page(struct page *page)
+static struct zspage *get_zspage(struct page *page)
 {
-	if (is_first_page(page))
-		return page;
-	else
-		return (struct page *)page_private(page);
+	struct zspage *zspage = (struct zspage *)page->private;
+
+	BUG_ON(zspage->magic != ZSPAGE_MAGIC);
+	return zspage;
 }
 
 static struct page *get_next_page(struct page *page)
 {
-	struct page *next;
+	if (unlikely(PageHugeObject(page)))
+		return NULL;
 
-	if (is_last_page(page))
-		next = NULL;
-	else if (is_first_page(page))
-		next = (struct page *)page_private(page);
-	else
-		next = list_entry(page->lru.next, struct page, lru);
+	return page->freelist;
+}
 
-	return next;
+/**
+ * obj_to_location - get (<page>, <obj_idx>) from encoded object value
+ * @page: page object resides in zspage
+ * @obj_idx: object index
+ */
+static void obj_to_location(unsigned long obj, struct page **page,
+				unsigned int *obj_idx)
+{
+	obj >>= OBJ_TAG_BITS;
+	*page = pfn_to_page(obj >> OBJ_INDEX_BITS);
+	*obj_idx = (obj & OBJ_INDEX_MASK);
 }
 
-/*
- * Encode <page, obj_idx> as a single handle value.
- * We use the least bit of handle for tagging.
+/**
+ * location_to_obj - get obj value encoded from (<page>, <obj_idx>)
+ * @page: page object resides in zspage
+ * @obj_idx: object index
  */
-static void *location_to_obj(struct page *page, unsigned long obj_idx)
+static unsigned long location_to_obj(struct page *page, unsigned int obj_idx)
 {
 	unsigned long obj;
 
-	if (!page) {
-		BUG_ON(obj_idx);
-		return NULL;
-	}
-
 	obj = page_to_pfn(page) << OBJ_INDEX_BITS;
-	obj |= ((obj_idx) & OBJ_INDEX_MASK);
+	obj |= obj_idx & OBJ_INDEX_MASK;
 	obj <<= OBJ_TAG_BITS;
 
-	return (void *)obj;
-}
-
-/*
- * Decode <page, obj_idx> pair from the given object handle. We adjust the
- * decoded obj_idx back to its original value since it was adjusted in
- * location_to_obj().
- */
-static void obj_to_location(unsigned long obj, struct page **page,
-				unsigned long *obj_idx)
-{
-	obj >>= OBJ_TAG_BITS;
-	*page = pfn_to_page(obj >> OBJ_INDEX_BITS);
-	*obj_idx = (obj & OBJ_INDEX_MASK);
+	return obj;
 }
 
 static unsigned long handle_to_obj(unsigned long handle)
@@ -833,108 +912,146 @@
 	return *(unsigned long *)handle;
 }
 
-static unsigned long obj_to_head(struct size_class *class, struct page *page,
-			void *obj)
+static unsigned long obj_to_head(struct page *page, void *obj)
 {
-	if (class->huge) {
-		VM_BUG_ON(!is_first_page(page));
-		return page_private(page);
+	if (unlikely(PageHugeObject(page))) {
+		VM_BUG_ON_PAGE(!is_first_page(page), page);
+		return page->index;
 	} else
 		return *(unsigned long *)obj;
 }
 
-static unsigned long obj_idx_to_offset(struct page *page,
-				unsigned long obj_idx, int class_size)
+static inline int testpin_tag(unsigned long handle)
 {
-	unsigned long off = 0;
-
-	if (!is_first_page(page))
-		off = page->index;
-
-	return off + obj_idx * class_size;
+	return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle);
 }
 
 static inline int trypin_tag(unsigned long handle)
 {
-	unsigned long *ptr = (unsigned long *)handle;
-
-	return !test_and_set_bit_lock(HANDLE_PIN_BIT, ptr);
+	return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle);
 }
 
 static void pin_tag(unsigned long handle)
 {
-	while (!trypin_tag(handle));
+	bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle);
 }
 
 static void unpin_tag(unsigned long handle)
 {
-	unsigned long *ptr = (unsigned long *)handle;
-
-	clear_bit_unlock(HANDLE_PIN_BIT, ptr);
+	bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle);
 }
 
 static void reset_page(struct page *page)
 {
+	__ClearPageMovable(page);
 	clear_bit(PG_private, &page->flags);
 	clear_bit(PG_private_2, &page->flags);
 	set_page_private(page, 0);
-	page->mapping = NULL;
-	page->freelist = NULL;
 	page_mapcount_reset(page);
+	ClearPageHugeObject(page);
+	page->freelist = NULL;
+}
+
+/*
+ * To prevent zspage destroy during migration, zspage freeing should
+ * hold locks of all pages in the zspage.
+ */
+void lock_zspage(struct zspage *zspage)
+{
+	struct page *page = get_first_page(zspage);
+
+	do {
+		lock_page(page);
+	} while ((page = get_next_page(page)) != NULL);
 }
 
-static void free_zspage(struct page *first_page)
+int trylock_zspage(struct zspage *zspage)
 {
-	struct page *nextp, *tmp, *head_extra;
+	struct page *cursor, *fail;
 
-	BUG_ON(!is_first_page(first_page));
-	BUG_ON(first_page->inuse);
+	for (cursor = get_first_page(zspage); cursor != NULL; cursor =
+					get_next_page(cursor)) {
+		if (!trylock_page(cursor)) {
+			fail = cursor;
+			goto unlock;
+		}
+	}
 
-	head_extra = (struct page *)page_private(first_page);
+	return 1;
+unlock:
+	for (cursor = get_first_page(zspage); cursor != fail; cursor =
+					get_next_page(cursor))
+		unlock_page(cursor);
 
-	reset_page(first_page);
-	__free_page(first_page);
+	return 0;
+}
 
-	/* zspage with only 1 system page */
-	if (!head_extra)
-		return;
+static void __free_zspage(struct zs_pool *pool, struct size_class *class,
+				struct zspage *zspage)
+{
+	struct page *page, *next;
+	enum fullness_group fg;
+	unsigned int class_idx;
+
+	get_zspage_mapping(zspage, &class_idx, &fg);
+
+	assert_spin_locked(&class->lock);
+
+	VM_BUG_ON(get_zspage_inuse(zspage));
+	VM_BUG_ON(fg != ZS_EMPTY);
+
+	next = page = get_first_page(zspage);
+	do {
+		VM_BUG_ON_PAGE(!PageLocked(page), page);
+		next = get_next_page(page);
+		reset_page(page);
+		unlock_page(page);
+		put_page(page);
+		page = next;
+	} while (page != NULL);
+
+	cache_free_zspage(pool, zspage);
+
+	zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
+			class->size, class->pages_per_zspage));
+	atomic_long_sub(class->pages_per_zspage,
+					&pool->pages_allocated);
+}
+
+static void free_zspage(struct zs_pool *pool, struct size_class *class,
+				struct zspage *zspage)
+{
+	VM_BUG_ON(get_zspage_inuse(zspage));
+	VM_BUG_ON(list_empty(&zspage->list));
 
-	list_for_each_entry_safe(nextp, tmp, &head_extra->lru, lru) {
-		list_del(&nextp->lru);
-		reset_page(nextp);
-		__free_page(nextp);
+	if (!trylock_zspage(zspage)) {
+		kick_deferred_free(pool);
+		return;
 	}
-	reset_page(head_extra);
-	__free_page(head_extra);
+
+	remove_zspage(class, zspage, ZS_EMPTY);
+	__free_zspage(pool, class, zspage);
 }
 
 /* Initialize a newly allocated zspage */
-static void init_zspage(struct page *first_page, struct size_class *class)
+static void init_zspage(struct size_class *class, struct zspage *zspage)
 {
+	unsigned int freeobj = 1;
 	unsigned long off = 0;
-	struct page *page = first_page;
+	struct page *page = get_first_page(zspage);
 
-	BUG_ON(!is_first_page(first_page));
 	while (page) {
 		struct page *next_page;
 		struct link_free *link;
-		unsigned int i = 1;
 		void *vaddr;
 
-		/*
-		 * page->index stores offset of first object starting
-		 * in the page. For the first page, this is always 0,
-		 * so we use first_page->index (aka ->freelist) to store
-		 * head of corresponding zspage's freelist.
-		 */
-		if (page != first_page)
-			page->index = off;
+		set_first_obj_offset(page, off);
 
 		vaddr = kmap_atomic(page);
 		link = (struct link_free *)vaddr + off / sizeof(*link);
 
 		while ((off += class->size) < PAGE_SIZE) {
-			link->next = location_to_obj(page, i++);
+			link->next = freeobj++ << OBJ_ALLOCATED_TAG;
 			link += class->size / sizeof(*link);
 		}
 
@@ -944,87 +1061,108 @@
 		 * page (if present)
 		 */
 		next_page = get_next_page(page);
-		link->next = location_to_obj(next_page, 0);
+		if (next_page) {
+			link->next = freeobj++ << OBJ_ALLOCATED_TAG;
+		} else {
+			/*
+			 * Reset OBJ_ALLOCATED_TAG bit to last link to tell
+			 * whether it's allocated object or not.
+			 */
+			link->next = -1 << OBJ_ALLOCATED_TAG;
+		}
 		kunmap_atomic(vaddr);
 		page = next_page;
 		off %= PAGE_SIZE;
 	}
+
+	set_freeobj(zspage, 0);
 }
 
-/*
- * Allocate a zspage for the given size class
- */
-static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
+static void create_page_chain(struct size_class *class, struct zspage *zspage,
+				struct page *pages[])
 {
-	int i, error;
-	struct page *first_page = NULL, *uninitialized_var(prev_page);
+	int i;
+	struct page *page;
+	struct page *prev_page = NULL;
+	int nr_pages = class->pages_per_zspage;
 
 	/*
 	 * Allocate individual pages and link them together as:
-	 * 1. first page->private = first sub-page
-	 * 2. all sub-pages are linked together using page->lru
-	 * 3. each sub-page is linked to the first page using page->private
+	 * 1. all pages are linked together using page->freelist
+	 * 2. each sub-page point to zspage using page->private
 	 *
-	 * For each size class, First/Head pages are linked together using
-	 * page->lru. Also, we set PG_private to identify the first page
-	 * (i.e. no other sub-page has this flag set) and PG_private_2 to
-	 * identify the last page.
+	 * we set PG_private to identify the first page (i.e. no other sub-page
+	 * has this flag set) and PG_private_2 to identify the last page.
 	 */
-	error = -ENOMEM;
-	for (i = 0; i < class->pages_per_zspage; i++) {
-		struct page *page;
-
-		page = alloc_page(flags);
-		if (!page)
-			goto cleanup;
-
-		INIT_LIST_HEAD(&page->lru);
-		if (i == 0) {	/* first page */
+	for (i = 0; i < nr_pages; i++) {
+		page = pages[i];
+		set_page_private(page, (unsigned long)zspage);
+		page->freelist = NULL;
+		if (i == 0) {
+			zspage->first_page = page;
 			SetPagePrivate(page);
-			set_page_private(page, 0);
-			first_page = page;
-			first_page->inuse = 0;
+			if (unlikely(class->objs_per_zspage == 1 &&
+					class->pages_per_zspage == 1))
+				SetPageHugeObject(page);
+		} else {
+			prev_page->freelist = page;
 		}
-		if (i == 1)
-			set_page_private(first_page, (unsigned long)page);
-		if (i >= 1)
-			set_page_private(page, (unsigned long)first_page);
-		if (i >= 2)
-			list_add(&page->lru, &prev_page->lru);
-		if (i == class->pages_per_zspage - 1)	/* last page */
+		if (i == nr_pages - 1)
 			SetPagePrivate2(page);
 		prev_page = page;
 	}
+}
+
+/*
+ * Allocate a zspage for the given size class
+ */
+static struct zspage *alloc_zspage(struct zs_pool *pool,
+					struct size_class *class,
+					gfp_t gfp)
+{
+	int i;
+	struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE];
+	struct zspage *zspage = cache_alloc_zspage(pool, gfp);
+
+	if (!zspage)
+		return NULL;
 
-	init_zspage(first_page, class);
+	memset(zspage, 0, sizeof(struct zspage));
+	zspage->magic = ZSPAGE_MAGIC;
+	migrate_lock_init(zspage);
 
-	first_page->freelist = location_to_obj(first_page, 0);
-	/* Maximum number of objects we can store in this zspage */
-	first_page->objects = class->pages_per_zspage * PAGE_SIZE / class->size;
-
-	error = 0; /* Success */
-
-cleanup:
-	if (unlikely(error) && first_page) {
-		free_zspage(first_page);
-		first_page = NULL;
+	for (i = 0; i < class->pages_per_zspage; i++) {
+		struct page *page;
+
+		page = alloc_page(gfp);
+		if (!page) {
+			while (--i >= 0)
+				__free_page(pages[i]);
+			cache_free_zspage(pool, zspage);
+			return NULL;
+		}
+		pages[i] = page;
 	}
 
-	return first_page;
+	create_page_chain(class, zspage, pages);
+	init_zspage(class, zspage);
+
+	return zspage;
 }
 
-static struct page *find_get_zspage(struct size_class *class)
+static struct zspage *find_get_zspage(struct size_class *class)
 {
 	int i;
-	struct page *page;
+	struct zspage *zspage;
 
-	for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) {
-		page = class->fullness_list[i];
-		if (page)
+	for (i = ZS_ALMOST_FULL; i >= ZS_EMPTY; i--) {
+		zspage = list_first_entry_or_null(&class->fullness_list[i],
+				struct zspage, list);
+		if (zspage)
 			break;
 	}
 
-	return page;
+	return zspage;
 }
 
 #ifdef CONFIG_PGTABLE_MAPPING
@@ -1127,11 +1265,9 @@
 		goto out;
 
 	buf = area->vm_buf;
-	if (!area->huge) {
 		buf = buf + ZS_HANDLE_SIZE;
 		size -= ZS_HANDLE_SIZE;
 		off += ZS_HANDLE_SIZE;
-	}
 
 	sizes[0] = PAGE_SIZE - off;
 	sizes[1] = size - sizes[0];
@@ -1231,11 +1367,9 @@
 	return true;
 }
 
-static bool zspage_full(struct page *page)
+static bool zspage_full(struct size_class *class, struct zspage *zspage)
 {
-	BUG_ON(!is_first_page(page));
-
-	return page->inuse == page->objects;
+	return get_zspage_inuse(zspage) == class->objs_per_zspage;
 }
 
 unsigned long zs_get_total_pages(struct zs_pool *pool)
@@ -1261,8 +1395,10 @@
 void *zs_map_object(struct zs_pool *pool, unsigned long handle,
 			enum zs_mapmode mm)
 {
+	struct zspage *zspage;
 	struct page *page;
-	unsigned long obj, obj_idx, off;
+	unsigned long obj, off;
+	unsigned int obj_idx;
 
 	unsigned int class_idx;
 	enum fullness_group fg;
@@ -1271,23 +1407,26 @@
 	struct page *pages[2];
 	void *ret;
 
-	BUG_ON(!handle);
-
 	/*
 	 * Because we use per-cpu mapping areas shared among the
 	 * pools/users, we can't allow mapping in interrupt context
 	 * because it can corrupt another users mappings.
 	 */
-	BUG_ON(in_interrupt());
+	WARN_ON_ONCE(in_interrupt());
 
 	/* From now on, migration cannot move the object */
 	pin_tag(handle);
 
 	obj = handle_to_obj(handle);
 	obj_to_location(obj, &page, &obj_idx);
-	get_zspage_mapping(get_first_page(page), &class_idx, &fg);
+	zspage = get_zspage(page);
+
+	/* migration cannot move any subpage in this zspage */
+	migrate_read_lock(zspage);
+
+	get_zspage_mapping(zspage, &class_idx, &fg);
 	class = pool->size_class[class_idx];
-	off = obj_idx_to_offset(page, obj_idx, class->size);
+	off = (class->size * obj_idx) & ~PAGE_MASK;
 
 	area = &get_cpu_var(zs_map_area);
 	area->vm_mm = mm;
@@ -1305,7 +1444,7 @@
 
 	ret = __zs_map_object(area, pages, off, class->size);
 out:
-	if (!class->huge)
+	if (likely(!PageHugeObject(page)))
 		ret += ZS_HANDLE_SIZE;
 
 	return ret;
@@ -1314,21 +1453,22 @@
 
 void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
 {
+	struct zspage *zspage;
 	struct page *page;
-	unsigned long obj, obj_idx, off;
+	unsigned long obj, off;
+	unsigned int obj_idx;
 
 	unsigned int class_idx;
 	enum fullness_group fg;
 	struct size_class *class;
 	struct mapping_area *area;
 
-	BUG_ON(!handle);
-
 	obj = handle_to_obj(handle);
 	obj_to_location(obj, &page, &obj_idx);
-	get_zspage_mapping(get_first_page(page), &class_idx, &fg);
+	zspage = get_zspage(page);
+	get_zspage_mapping(zspage, &class_idx, &fg);
 	class = pool->size_class[class_idx];
-	off = obj_idx_to_offset(page, obj_idx, class->size);
+	off = (class->size * obj_idx) & ~PAGE_MASK;
 
 	area = this_cpu_ptr(&zs_map_area);
 	if (off + class->size <= PAGE_SIZE)
@@ -1343,38 +1483,50 @@
 		__zs_unmap_object(area, pages, off, class->size);
 	}
 	put_cpu_var(zs_map_area);
+
+	migrate_read_unlock(zspage);
 	unpin_tag(handle);
 }
 EXPORT_SYMBOL_GPL(zs_unmap_object);
 
-static unsigned long obj_malloc(struct page *first_page,
-		struct size_class *class, unsigned long handle)
+static unsigned long obj_malloc(struct size_class *class,
+				struct zspage *zspage, unsigned long handle)
 {
+	int i, nr_page, offset;
 	unsigned long obj;
 	struct link_free *link;
 
 	struct page *m_page;
-	unsigned long m_objidx, m_offset;
+	unsigned long m_offset;
 	void *vaddr;
 
 	handle |= OBJ_ALLOCATED_TAG;
-	obj = (unsigned long)first_page->freelist;
-	obj_to_location(obj, &m_page, &m_objidx);
-	m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
+	obj = get_freeobj(zspage);
+
+	offset = obj * class->size;
+	nr_page = offset >> PAGE_SHIFT;
+	m_offset = offset & ~PAGE_MASK;
+	m_page = get_first_page(zspage);
+
+	for (i = 0; i < nr_page; i++)
+		m_page = get_next_page(m_page);
 
 	vaddr = kmap_atomic(m_page);
 	link = (struct link_free *)vaddr + m_offset / sizeof(*link);
-	first_page->freelist = link->next;
-	if (!class->huge)
+	set_freeobj(zspage, link->next >> OBJ_ALLOCATED_TAG);
+	if (likely(!PageHugeObject(m_page)))
 		/* record handle in the header of allocated chunk */
 		link->handle = handle;
 	else
-		/* record handle in first_page->private */
-		set_page_private(first_page, handle);
+		/* record handle to page->index */
+		zspage->first_page->index = handle;
+
 	kunmap_atomic(vaddr);
-	first_page->inuse++;
+	mod_zspage_inuse(zspage, 1);
 	zs_stat_inc(class, OBJ_USED, 1);
 
+	obj = location_to_obj(m_page, obj);
+
 	return obj;
 }
 
@@ -1388,16 +1540,17 @@
  * otherwise 0.
  * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
  */
-unsigned long zs_malloc(struct zs_pool *pool, size_t size)
+unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
 {
 	unsigned long handle, obj;
 	struct size_class *class;
-	struct page *first_page;
+	enum fullness_group newfg;
+	struct zspage *zspage;
 
 	if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
 		return 0;
 
-	handle = alloc_handle(pool);
+	handle = cache_alloc_handle(pool, gfp);
 	if (!handle)
 		return 0;
 
@@ -1406,71 +1559,79 @@
 	class = pool->size_class[get_size_class_index(size)];
 
 	spin_lock(&class->lock);
-	first_page = find_get_zspage(class);
+	zspage = find_get_zspage(class);
+	if (likely(zspage)) {
+		obj = obj_malloc(class, zspage, handle);
+		/* Now move the zspage to another fullness group, if required */
+		fix_fullness_group(class, zspage);
+		record_obj(handle, obj);
+		spin_unlock(&class->lock);
+
+		return handle;
+	}
 
-	if (!first_page) {
 		spin_unlock(&class->lock);
-		first_page = alloc_zspage(class, pool->flags);
-		if (unlikely(!first_page)) {
-			free_handle(pool, handle);
+
+	zspage = alloc_zspage(pool, class, gfp);
+	if (!zspage) {
+		cache_free_handle(pool, handle);
 			return 0;
 		}
 
-		set_zspage_mapping(first_page, class->index, ZS_EMPTY);
+	spin_lock(&class->lock);
+	obj = obj_malloc(class, zspage, handle);
+	newfg = get_fullness_group(class, zspage);
+	insert_zspage(class, zspage, newfg);
+	set_zspage_mapping(zspage, class->index, newfg);
+	record_obj(handle, obj);
 		atomic_long_add(class->pages_per_zspage,
 					&pool->pages_allocated);
-
-		spin_lock(&class->lock);
 		zs_stat_inc(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
 				class->size, class->pages_per_zspage));
-	}
 
-	obj = obj_malloc(first_page, class, handle);
-	/* Now move the zspage to another fullness group, if required */
-	fix_fullness_group(class, first_page);
-	record_obj(handle, obj);
+	/* We completely set up zspage so mark them as movable */
+	SetZsPageMovable(pool, zspage);
 	spin_unlock(&class->lock);
 
 	return handle;
 }
 EXPORT_SYMBOL_GPL(zs_malloc);
 
-static void obj_free(struct zs_pool *pool, struct size_class *class,
-			unsigned long obj)
+static void obj_free(struct size_class *class, unsigned long obj)
 {
 	struct link_free *link;
-	struct page *first_page, *f_page;
-	unsigned long f_objidx, f_offset;
+	struct zspage *zspage;
+	struct page *f_page;
+	unsigned long f_offset;
+	unsigned int f_objidx;
 	void *vaddr;
 
-	BUG_ON(!obj);
-
 	obj &= ~OBJ_ALLOCATED_TAG;
 	obj_to_location(obj, &f_page, &f_objidx);
-	first_page = get_first_page(f_page);
-
-	f_offset = obj_idx_to_offset(f_page, f_objidx, class->size);
+	f_offset = (class->size * f_objidx) & ~PAGE_MASK;
+	zspage = get_zspage(f_page);
 
 	vaddr = kmap_atomic(f_page);
 
 	/* Insert this object in containing zspage's freelist */
 	link = (struct link_free *)(vaddr + f_offset);
-	link->next = first_page->freelist;
-	if (class->huge)
-		set_page_private(first_page, 0);
+	link->next = get_freeobj(zspage) << OBJ_ALLOCATED_TAG;
 	kunmap_atomic(vaddr);
-	first_page->freelist = (void *)obj;
-	first_page->inuse--;
+	set_freeobj(zspage, f_objidx);
+	mod_zspage_inuse(zspage, -1);
 	zs_stat_dec(class, OBJ_USED, 1);
 }
 
 void zs_free(struct zs_pool *pool, unsigned long handle)
 {
-	struct page *first_page, *f_page;
-	unsigned long obj, f_objidx;
+	struct zspage *zspage;
+	struct page *f_page;
+	unsigned long obj;
+	unsigned int f_objidx;
 	int class_idx;
 	struct size_class *class;
 	enum fullness_group fullness;
+	bool isolated;
 
 	if (unlikely(!handle))
 		return;
@@ -1478,33 +1639,39 @@
 	pin_tag(handle);
 	obj = handle_to_obj(handle);
 	obj_to_location(obj, &f_page, &f_objidx);
-	first_page = get_first_page(f_page);
+	zspage = get_zspage(f_page);
 
-	get_zspage_mapping(first_page, &class_idx, &fullness);
+	migrate_read_lock(zspage);
+
+	get_zspage_mapping(zspage, &class_idx, &fullness);
 	class = pool->size_class[class_idx];
 
 	spin_lock(&class->lock);
-	obj_free(pool, class, obj);
-	fullness = fix_fullness_group(class, first_page);
-	if (fullness == ZS_EMPTY) {
-		zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
-				class->size, class->pages_per_zspage));
-		atomic_long_sub(class->pages_per_zspage,
-				&pool->pages_allocated);
-		free_zspage(first_page);
+	obj_free(class, obj);
+	fullness = fix_fullness_group(class, zspage);
+	if (fullness != ZS_EMPTY) {
+		migrate_read_unlock(zspage);
+		goto out;
 	}
+
+	isolated = is_zspage_isolated(zspage);
+	migrate_read_unlock(zspage);
+	/* If zspage is isolated, zs_page_putback will free the zspage */
+	if (likely(!isolated))
+		free_zspage(pool, class, zspage);
+out:
+
 	spin_unlock(&class->lock);
 	unpin_tag(handle);
-
-	free_handle(pool, handle);
+	cache_free_handle(pool, handle);
 }
 EXPORT_SYMBOL_GPL(zs_free);
 
-static void zs_object_copy(unsigned long dst, unsigned long src,
-				struct size_class *class)
+static void zs_object_copy(struct size_class *class, unsigned long dst,
+				unsigned long src)
 {
 	struct page *s_page, *d_page;
-	unsigned long s_objidx, d_objidx;
+	unsigned int s_objidx, d_objidx;
 	unsigned long s_off, d_off;
 	void *s_addr, *d_addr;
 	int s_size, d_size, size;
@@ -1515,8 +1682,8 @@
 	obj_to_location(src, &s_page, &s_objidx);
 	obj_to_location(dst, &d_page, &d_objidx);
 
-	s_off = obj_idx_to_offset(s_page, s_objidx, class->size);
-	d_off = obj_idx_to_offset(d_page, d_objidx, class->size);
+	s_off = (class->size * s_objidx) & ~PAGE_MASK;
+	d_off = (class->size * d_objidx) & ~PAGE_MASK;
 
 	if (s_off + class->size > PAGE_SIZE)
 		s_size = PAGE_SIZE - s_off;
@@ -1544,7 +1711,6 @@
 			kunmap_atomic(d_addr);
 			kunmap_atomic(s_addr);
 			s_page = get_next_page(s_page);
-			BUG_ON(!s_page);
 			s_addr = kmap_atomic(s_page);
 			d_addr = kmap_atomic(d_page);
 			s_size = class->size - written;
@@ -1554,7 +1720,6 @@
 		if (d_off >= PAGE_SIZE) {
 			kunmap_atomic(d_addr);
 			d_page = get_next_page(d_page);
-			BUG_ON(!d_page);
 			d_addr = kmap_atomic(d_page);
 			d_size = class->size - written;
 			d_off = 0;
@@ -1569,20 +1734,19 @@
  * Find alloced object in zspage from index object and
  * return handle.
  */
-static unsigned long find_alloced_obj(struct page *page, int index,
-					struct size_class *class)
+static unsigned long find_alloced_obj(struct size_class *class,
+					struct page *page, int index)
 {
 	unsigned long head;
 	int offset = 0;
 	unsigned long handle = 0;
 	void *addr = kmap_atomic(page);
 
-	if (!is_first_page(page))
-		offset = page->index;
+	offset = get_first_obj_offset(page);
 	offset += class->size * index;
 
 	while (offset < PAGE_SIZE) {
-		head = obj_to_head(class, page, addr + offset);
+		head = obj_to_head(page, addr + offset);
 		if (head & OBJ_ALLOCATED_TAG) {
 			handle = head & ~OBJ_ALLOCATED_TAG;
 			if (trypin_tag(handle))
@@ -1599,7 +1763,7 @@
 }
 
 struct zs_compact_control {
-	/* Source page for migration which could be a subpage of zspage. */
+	/* Source spage for migration which could be a subpage of zspage */
 	struct page *s_page;
 	/* Destination page for migration which should be a first page
 	 * of zspage. */
@@ -1620,7 +1784,7 @@
 	int ret = 0;
 
 	while (1) {
-		handle = find_alloced_obj(s_page, index, class);
+		handle = find_alloced_obj(class, s_page, index);
 		if (!handle) {
 			s_page = get_next_page(s_page);
 			if (!s_page)
@@ -1630,15 +1794,15 @@
 		}
 
 		/* Stop if there is no more space */
-		if (zspage_full(d_page)) {
+		if (zspage_full(class, get_zspage(d_page))) {
 			unpin_tag(handle);
 			ret = -ENOMEM;
 			break;
 		}
 
 		used_obj = handle_to_obj(handle);
-		free_obj = obj_malloc(d_page, class, handle);
-		zs_object_copy(free_obj, used_obj, class);
+		free_obj = obj_malloc(class, get_zspage(d_page), handle);
+		zs_object_copy(class, free_obj, used_obj);
 		index++;
 		/*
 		 * record_obj updates handle's value to free_obj and it will
@@ -1649,7 +1813,7 @@
 		free_obj |= BIT(HANDLE_PIN_BIT);
 		record_obj(handle, free_obj);
 		unpin_tag(handle);
-		obj_free(pool, class, used_obj);
+		obj_free(class, used_obj);
 	}
 
 	/* Remember last position in this iteration */
@@ -1659,70 +1823,422 @@
 	return ret;
 }
 
-static struct page *isolate_target_page(struct size_class *class)
+static struct zspage *isolate_zspage(struct size_class *class, bool source)
 {
 	int i;
-	struct page *page;
+	struct zspage *zspage;
+	enum fullness_group fg[2] = {ZS_ALMOST_EMPTY, ZS_ALMOST_FULL};
 
-	for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) {
-		page = class->fullness_list[i];
-		if (page) {
-			remove_zspage(page, class, i);
-			break;
+	if (!source) {
+		fg[0] = ZS_ALMOST_FULL;
+		fg[1] = ZS_ALMOST_EMPTY;
+	}
+
+	for (i = 0; i < 2; i++) {
+		zspage = list_first_entry_or_null(&class->fullness_list[fg[i]],
+							struct zspage, list);
+		if (zspage) {
+			VM_BUG_ON(is_zspage_isolated(zspage));
+			remove_zspage(class, zspage, fg[i]);
+			return zspage;
 		}
 	}
 
-	return page;
+	return zspage;
 }
 
 /*
- * putback_zspage - add @first_page into right class's fullness list
- * @pool: target pool
+ * putback_zspage - add @zspage into right class's fullness list
  * @class: destination class
- * @first_page: target page
+ * @zspage: target page
  *
- * Return @fist_page's fullness_group
+ * Return @zspage's fullness_group
  */
-static enum fullness_group putback_zspage(struct zs_pool *pool,
-			struct size_class *class,
-			struct page *first_page)
+static enum fullness_group putback_zspage(struct size_class *class,
+			struct zspage *zspage)
 {
 	enum fullness_group fullness;
 
-	BUG_ON(!is_first_page(first_page));
+	VM_BUG_ON(is_zspage_isolated(zspage));
 
-	fullness = get_fullness_group(first_page);
-	insert_zspage(first_page, class, fullness);
-	set_zspage_mapping(first_page, class->index, fullness);
+	fullness = get_fullness_group(class, zspage);
+	insert_zspage(class, zspage, fullness);
+	set_zspage_mapping(zspage, class->index, fullness);
 
-	if (fullness == ZS_EMPTY) {
-		zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
-			class->size, class->pages_per_zspage));
-		atomic_long_sub(class->pages_per_zspage,
-				&pool->pages_allocated);
+	return fullness;
+}
+
+#ifdef CONFIG_COMPACTION
+static struct dentry *zs_mount(struct file_system_type *fs_type,
+				int flags, const char *dev_name, void *data)
+{
+	static const struct dentry_operations ops = {
+		.d_dname = simple_dname,
+	};
 
-		free_zspage(first_page);
+	return mount_pseudo(fs_type, "zsmalloc:", NULL, &ops, ZSMALLOC_MAGIC);
 	}
 
-	return fullness;
+static struct file_system_type zsmalloc_fs = {
+	.name		= "zsmalloc",
+	.mount		= zs_mount,
+	.kill_sb	= kill_anon_super,
+};
+
+static int zsmalloc_mount(void)
+{
+	int ret = 0;
+
+	zsmalloc_mnt = kern_mount(&zsmalloc_fs);
+	if (IS_ERR(zsmalloc_mnt))
+		ret = PTR_ERR(zsmalloc_mnt);
+
+	return ret;
+}
+
+static void zsmalloc_unmount(void)
+{
+	kern_unmount(zsmalloc_mnt);
+}
+
+static void migrate_lock_init(struct zspage *zspage)
+{
+	rwlock_init(&zspage->lock);
+}
+
+static void migrate_read_lock(struct zspage *zspage)
+{
+	read_lock(&zspage->lock);
+}
+
+static void migrate_read_unlock(struct zspage *zspage)
+{
+	read_unlock(&zspage->lock);
+}
+
+static void migrate_write_lock(struct zspage *zspage)
+{
+	write_lock(&zspage->lock);
 }
 
-static struct page *isolate_source_page(struct size_class *class)
+static void migrate_write_unlock(struct zspage *zspage)
+{
+	write_unlock(&zspage->lock);
+}
+
+/* Number of isolated subpage for *page migration* in this zspage */
+static void inc_zspage_isolation(struct zspage *zspage)
+{
+	zspage->isolated++;
+}
+
+static void dec_zspage_isolation(struct zspage *zspage)
+{
+	zspage->isolated--;
+}
+
+static void replace_sub_page(struct size_class *class, struct zspage *zspage,
+				struct page *newpage, struct page *oldpage)
+{
+	struct page *page;
+	struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, };
+	int idx = 0;
+
+	page = get_first_page(zspage);
+	do {
+		if (page == oldpage)
+			pages[idx] = newpage;
+		else
+			pages[idx] = page;
+		idx++;
+	} while ((page = get_next_page(page)) != NULL);
+
+	create_page_chain(class, zspage, pages);
+	set_first_obj_offset(newpage, get_first_obj_offset(oldpage));
+	if (unlikely(PageHugeObject(oldpage)))
+		newpage->index = oldpage->index;
+	__SetPageMovable(newpage, page_mapping(oldpage));
+}
+
+bool zs_page_isolate(struct page *page, isolate_mode_t mode)
+{
+	struct zs_pool *pool;
+	struct size_class *class;
+	int class_idx;
+	enum fullness_group fullness;
+	struct zspage *zspage;
+	struct address_space *mapping;
+
+	/*
+	 * Page is locked so zspage couldn't be destroyed. For detail, look at
+	 * lock_zspage in free_zspage.
+	 */
+	VM_BUG_ON_PAGE(!PageMovable(page), page);
+	VM_BUG_ON_PAGE(PageIsolated(page), page);
+
+	zspage = get_zspage(page);
+
+	/*
+	 * Without class lock, fullness could be stale while class_idx is okay
+	 * because class_idx is constant unless page is freed so we should get
+	 * fullness again under class lock.
+	 */
+	get_zspage_mapping(zspage, &class_idx, &fullness);
+	mapping = page_mapping(page);
+	pool = mapping->private_data;
+	class = pool->size_class[class_idx];
+
+	spin_lock(&class->lock);
+	if (get_zspage_inuse(zspage) == 0) {
+		spin_unlock(&class->lock);
+		return false;
+	}
+
+	/* zspage is isolated for object migration */
+	if (list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
+		spin_unlock(&class->lock);
+		return false;
+	}
+
+	/*
+	 * If this is first time isolation for the zspage, isolate zspage from
+	 * size_class to prevent further object allocation from the zspage.
+	 */
+	if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
+		get_zspage_mapping(zspage, &class_idx, &fullness);
+		remove_zspage(class, zspage, fullness);
+	}
+
+	inc_zspage_isolation(zspage);
+	spin_unlock(&class->lock);
+
+	return true;
+}
+
+int zs_page_migrate(struct address_space *mapping, struct page *newpage,
+		struct page *page, enum migrate_mode mode)
+{
+	struct zs_pool *pool;
+	struct size_class *class;
+	int class_idx;
+	enum fullness_group fullness;
+	struct zspage *zspage;
+	struct page *dummy;
+	void *s_addr, *d_addr, *addr;
+	int offset, pos;
+	unsigned long handle, head;
+	unsigned long old_obj, new_obj;
+	unsigned int obj_idx;
+	int ret = -EAGAIN;
+
+	VM_BUG_ON_PAGE(!PageMovable(page), page);
+	VM_BUG_ON_PAGE(!PageIsolated(page), page);
+
+	zspage = get_zspage(page);
+
+	/* Concurrent compactor cannot migrate any subpage in zspage */
+	migrate_write_lock(zspage);
+	get_zspage_mapping(zspage, &class_idx, &fullness);
+	pool = mapping->private_data;
+	class = pool->size_class[class_idx];
+	offset = get_first_obj_offset(page);
+
+	spin_lock(&class->lock);
+	if (!get_zspage_inuse(zspage)) {
+		ret = -EBUSY;
+		goto unlock_class;
+	}
+
+	pos = offset;
+	s_addr = kmap_atomic(page);
+	while (pos < PAGE_SIZE) {
+		head = obj_to_head(page, s_addr + pos);
+		if (head & OBJ_ALLOCATED_TAG) {
+			handle = head & ~OBJ_ALLOCATED_TAG;
+			if (!trypin_tag(handle))
+				goto unpin_objects;
+		}
+		pos += class->size;
+	}
+
+	/*
+	 * Here, any user cannot access all objects in the zspage so let's move.
+	 */
+	d_addr = kmap_atomic(newpage);
+	memcpy(d_addr, s_addr, PAGE_SIZE);
+	kunmap_atomic(d_addr);
+
+	for (addr = s_addr + offset; addr < s_addr + pos;
+					addr += class->size) {
+		head = obj_to_head(page, addr);
+		if (head & OBJ_ALLOCATED_TAG) {
+			handle = head & ~OBJ_ALLOCATED_TAG;
+			if (!testpin_tag(handle))
+				BUG();
+
+			old_obj = handle_to_obj(handle);
+			obj_to_location(old_obj, &dummy, &obj_idx);
+			new_obj = (unsigned long)location_to_obj(newpage,
+								obj_idx);
+			new_obj |= BIT(HANDLE_PIN_BIT);
+			record_obj(handle, new_obj);
+		}
+	}
+
+	replace_sub_page(class, zspage, newpage, page);
+	get_page(newpage);
+
+	dec_zspage_isolation(zspage);
+
+	/*
+	 * Page migration is done so let's putback isolated zspage to
+	 * the list if @page is final isolated subpage in the zspage.
+	 */
+	if (!is_zspage_isolated(zspage))
+		putback_zspage(class, zspage);
+
+	reset_page(page);
+	put_page(page);
+	page = newpage;
+
+	ret = MIGRATEPAGE_SUCCESS;
+unpin_objects:
+	for (addr = s_addr + offset; addr < s_addr + pos;
+						addr += class->size) {
+		head = obj_to_head(page, addr);
+		if (head & OBJ_ALLOCATED_TAG) {
+			handle = head & ~OBJ_ALLOCATED_TAG;
+			if (!testpin_tag(handle))
+				BUG();
+			unpin_tag(handle);
+		}
+	}
+	kunmap_atomic(s_addr);
+unlock_class:
+	spin_unlock(&class->lock);
+	migrate_write_unlock(zspage);
+
+	return ret;
+}
+
+void zs_page_putback(struct page *page)
+{
+	struct zs_pool *pool;
+	struct size_class *class;
+	int class_idx;
+	enum fullness_group fg;
+	struct address_space *mapping;
+	struct zspage *zspage;
+
+	VM_BUG_ON_PAGE(!PageMovable(page), page);
+	VM_BUG_ON_PAGE(!PageIsolated(page), page);
+
+	zspage = get_zspage(page);
+	get_zspage_mapping(zspage, &class_idx, &fg);
+	mapping = page_mapping(page);
+	pool = mapping->private_data;
+	class = pool->size_class[class_idx];
+
+	spin_lock(&class->lock);
+	dec_zspage_isolation(zspage);
+	if (!is_zspage_isolated(zspage)) {
+		fg = putback_zspage(class, zspage);
+		/*
+		 * Due to page_lock, we cannot free zspage immediately
+		 * so let's defer.
+		 */
+		if (fg == ZS_EMPTY)
+			schedule_work(&pool->free_work);
+	}
+	spin_unlock(&class->lock);
+}
+
+const struct address_space_operations zsmalloc_aops = {
+	.isolate_page = zs_page_isolate,
+	.migratepage = zs_page_migrate,
+	.putback_page = zs_page_putback,
+};
+
+static int zs_register_migration(struct zs_pool *pool)
+{
+	pool->inode = alloc_anon_inode(zsmalloc_mnt->mnt_sb);
+	if (IS_ERR(pool->inode)) {
+		pool->inode = NULL;
+		return 1;
+	}
+
+	pool->inode->i_mapping->private_data = pool;
+	pool->inode->i_mapping->a_ops = &zsmalloc_aops;
+	return 0;
+}
+
+static void zs_unregister_migration(struct zs_pool *pool)
+{
+	flush_work(&pool->free_work);
+	if (pool->inode)
+		iput(pool->inode);
+}
+
+/*
+ * Caller should hold page_lock of all pages in the zspage
+ * In here, we cannot use zspage meta data.
+ */
+static void async_free_zspage(struct work_struct *work)
 {
 	int i;
-	struct page *page = NULL;
+	struct size_class *class;
+	unsigned int class_idx;
+	enum fullness_group fullness;
+	struct zspage *zspage, *tmp;
+	LIST_HEAD(free_pages);
+	struct zs_pool *pool = container_of(work, struct zs_pool,
+					free_work);
 
-	for (i = ZS_ALMOST_EMPTY; i >= ZS_ALMOST_FULL; i--) {
-		page = class->fullness_list[i];
-		if (!page)
+	for (i = 0; i < zs_size_classes; i++) {
+		class = pool->size_class[i];
+		if (class->index != i)
 			continue;
 
-		remove_zspage(page, class, i);
-		break;
+		spin_lock(&class->lock);
+		list_splice_init(&class->fullness_list[ZS_EMPTY], &free_pages);
+		spin_unlock(&class->lock);
 	}
 
-	return page;
+
+	list_for_each_entry_safe(zspage, tmp, &free_pages, list) {
+		list_del(&zspage->list);
+		lock_zspage(zspage);
+
+		get_zspage_mapping(zspage, &class_idx, &fullness);
+		VM_BUG_ON(fullness != ZS_EMPTY);
+		class = pool->size_class[class_idx];
+		spin_lock(&class->lock);
+		__free_zspage(pool, pool->size_class[class_idx], zspage);
+		spin_unlock(&class->lock);
+	}
+};
+
+static void kick_deferred_free(struct zs_pool *pool)
+{
+	schedule_work(&pool->free_work);
+}
+
+static void init_deferred_free(struct zs_pool *pool)
+{
+	INIT_WORK(&pool->free_work, async_free_zspage);
+}
+
+static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage)
+{
+	struct page *page = get_first_page(zspage);
+
+	do {
+		WARN_ON(!trylock_page(page));
+		__SetPageMovable(page, pool->inode->i_mapping);
+		unlock_page(page);
+	} while ((page = get_next_page(page)) != NULL);
 }
+#endif
 
 /*
  *
@@ -1748,22 +2264,20 @@
 static void __zs_compact(struct zs_pool *pool, struct size_class *class)
 {
 	struct zs_compact_control cc;
-	struct page *src_page;
-	struct page *dst_page = NULL;
+	struct zspage *src_zspage;
+	struct zspage *dst_zspage = NULL;
 
 	spin_lock(&class->lock);
-	while ((src_page = isolate_source_page(class))) {
-
-		BUG_ON(!is_first_page(src_page));
+	while ((src_zspage = isolate_zspage(class, true))) {
 
 		if (!zs_can_compact(class))
 			break;
 
 		cc.index = 0;
-		cc.s_page = src_page;
+		cc.s_page = get_first_page(src_zspage);
 
-		while ((dst_page = isolate_target_page(class))) {
-			cc.d_page = dst_page;
+		while ((dst_zspage = isolate_zspage(class, false))) {
+			cc.d_page = get_first_page(dst_zspage);
 			/*
 			 * If there is no more space in dst_page, resched
 			 * and see if anyone had allocated another zspage.
@@ -1771,23 +2285,25 @@
 			if (!migrate_zspage(pool, class, &cc))
 				break;
 
-			putback_zspage(pool, class, dst_page);
+			putback_zspage(class, dst_zspage);
 		}
 
 		/* Stop if we couldn't find slot */
-		if (dst_page == NULL)
+		if (dst_zspage == NULL)
 			break;
 
-		putback_zspage(pool, class, dst_page);
-		if (putback_zspage(pool, class, src_page) == ZS_EMPTY)
+		putback_zspage(class, dst_zspage);
+		if (putback_zspage(class, src_zspage) == ZS_EMPTY) {
+			free_zspage(pool, class, src_zspage);
 			pool->stats.pages_compacted += class->pages_per_zspage;
+		}
 		spin_unlock(&class->lock);
 		cond_resched();
 		spin_lock(&class->lock);
 	}
 
-	if (src_page)
-		putback_zspage(pool, class, src_page);
+	if (src_zspage)
+		putback_zspage(class, src_zspage);
 
 	spin_unlock(&class->lock);
 }
@@ -1884,7 +2400,7 @@
  * On success, a pointer to the newly created pool is returned,
  * otherwise NULL.
  */
-struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
+struct zs_pool *zs_create_pool(const char *name)
 {
 	int i;
 	struct zs_pool *pool;
@@ -1894,6 +2410,7 @@
 	if (!pool)
 		return NULL;
 
+	init_deferred_free(pool);
 	pool->size_class = kcalloc(zs_size_classes, sizeof(struct size_class *),
 			GFP_KERNEL);
 	if (!pool->size_class) {
@@ -1905,7 +2422,7 @@
 	if (!pool->name)
 		goto err;
 
-	if (create_handle_cache(pool))
+	if (create_cache(pool))
 		goto err;
 
 	/*
@@ -1916,6 +2433,7 @@
 		int size;
 		int pages_per_zspage;
 		struct size_class *class;
+		int fullness = 0;
 
 		size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
 		if (size > ZS_MAX_ALLOC_SIZE)
@@ -1945,18 +2463,21 @@
 		class->size = size;
 		class->index = i;
 		class->pages_per_zspage = pages_per_zspage;
-		if (pages_per_zspage == 1 &&
-			get_maxobj_per_zspage(size, pages_per_zspage) == 1)
-			class->huge = true;
+		class->objs_per_zspage = class->pages_per_zspage *
+						PAGE_SIZE / class->size;
 		spin_lock_init(&class->lock);
 		pool->size_class[i] = class;
+		for (fullness = ZS_EMPTY; fullness < NR_ZS_FULLNESS;
+							fullness++)
+			INIT_LIST_HEAD(&class->fullness_list[fullness]);
 
 		prev_class = class;
 	}
 
-	pool->flags = flags;
+	if (zs_pool_stat_create(pool, name))
+		goto err;
 
-	if (zs_pool_stat_create(name, pool))
+	if (zs_register_migration(pool))
 		goto err;
 
 	/*
@@ -1978,6 +2499,7 @@
 	int i;
 
 	zs_unregister_shrinker(pool);
+	zs_unregister_migration(pool);
 	zs_pool_stat_destroy(pool);
 
 	for (i = 0; i < zs_size_classes; i++) {
@@ -1990,8 +2512,8 @@
 		if (class->index != i)
 			continue;
 
-		for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) {
-			if (class->fullness_list[fg]) {
+		for (fg = ZS_EMPTY; fg < NR_ZS_FULLNESS; fg++) {
+			if (!list_empty(&class->fullness_list[fg])) {
 				pr_info("Freeing non-empty class with size %db, fullness group %d\n",
 					class->size, fg);
 			}
@@ -1999,7 +2521,7 @@
 		kfree(class);
 	}
 
-	destroy_handle_cache(pool);
+	destroy_cache(pool);
 	kfree(pool->size_class);
 	kfree(pool->name);
 	kfree(pool);
@@ -2008,7 +2530,13 @@
 
 static int __init zs_init(void)
 {
-	int ret = zs_register_cpu_notifier();
+	int ret;
+
+	ret = zsmalloc_mount();
+	if (ret)
+		goto out;
+
+	ret = zs_register_cpu_notifier();
 
 	if (ret)
 		goto notifier_fail;
@@ -2032,7 +2560,8 @@
 #endif
 notifier_fail:
 	zs_unregister_cpu_notifier();
-
+	zsmalloc_unmount();
+out:
 	return ret;
 }
 
@@ -2041,6 +2570,7 @@
 #ifdef CONFIG_ZPOOL
 	zpool_unregister_driver(&zs_zpool_driver);
 #endif
+	zsmalloc_unmount();
 	zs_unregister_cpu_notifier();
 
 	zs_stat_exit();
diff -ruw linux-4.4.115/net/8021q/vlan.c linux-4.4.115-fbx/net/8021q/vlan.c
--- linux-4.4.115/net/8021q/vlan.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/8021q/vlan.c	2019-01-22 16:16:28.843294763 +0100
@@ -203,7 +203,7 @@
 /*  Attach a VLAN device to a mac address (ie Ethernet Card).
  *  Returns 0 if the device was created or a negative error code otherwise.
  */
-static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
+int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
 {
 	struct net_device *new_dev;
 	struct vlan_dev_priv *vlan;
diff -ruw linux-4.4.115/net/bluetooth/af_bluetooth.c linux-4.4.115-fbx/net/bluetooth/af_bluetooth.c
--- linux-4.4.115/net/bluetooth/af_bluetooth.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/bluetooth/af_bluetooth.c	2019-01-22 16:16:28.867294981 +0100
@@ -106,11 +106,40 @@
 }
 EXPORT_SYMBOL(bt_sock_unregister);
 
+#ifdef CONFIG_PARANOID_NETWORK
+static inline int current_has_bt_admin(void)
+{
+	return !current_euid();
+}
+
+static inline int current_has_bt(void)
+{
+	return current_has_bt_admin();
+}
+# else
+static inline int current_has_bt_admin(void)
+{
+	return 1;
+}
+
+static inline int current_has_bt(void)
+{
+	return 1;
+}
+#endif
+
 static int bt_sock_create(struct net *net, struct socket *sock, int proto,
 			  int kern)
 {
 	int err;
 
+	if (proto == BTPROTO_RFCOMM || proto == BTPROTO_SCO ||
+			proto == BTPROTO_L2CAP) {
+		if (!current_has_bt())
+			return -EPERM;
+	} else if (!current_has_bt_admin())
+		return -EPERM;
+
 	if (net != &init_net)
 		return -EAFNOSUPPORT;
 
@@ -154,7 +183,7 @@
 
 void bt_accept_enqueue(struct sock *parent, struct sock *sk)
 {
-	BT_DBG("parent %p, sk %p", parent, sk);
+	BT_DBG("parent %pK, sk %pK", parent, sk);
 
 	sock_hold(sk);
 	list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
@@ -165,7 +194,7 @@
 
 void bt_accept_unlink(struct sock *sk)
 {
-	BT_DBG("sk %p state %d", sk, sk->sk_state);
+	BT_DBG("sk %pK state %d", sk, sk->sk_state);
 
 	list_del_init(&bt_sk(sk)->accept_q);
 	bt_sk(sk)->parent->sk_ack_backlog--;
@@ -179,7 +208,7 @@
 	struct list_head *p, *n;
 	struct sock *sk;
 
-	BT_DBG("parent %p", parent);
+	BT_DBG("parent %pK", parent);
 
 	list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
 		sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
@@ -219,7 +248,7 @@
 	size_t copied;
 	int err;
 
-	BT_DBG("sock %p sk %p len %zu", sock, sk, len);
+	BT_DBG("sock %pK sk %pK len %zu", sock, sk, len);
 
 	if (flags & MSG_OOB)
 		return -EOPNOTSUPP;
@@ -294,7 +323,7 @@
 	if (flags & MSG_OOB)
 		return -EOPNOTSUPP;
 
-	BT_DBG("sk %p size %zu", sk, size);
+	BT_DBG("sk %pK size %zu", sk, size);
 
 	lock_sock(sk);
 
@@ -410,7 +439,7 @@
 	struct sock *sk = sock->sk;
 	unsigned int mask = 0;
 
-	BT_DBG("sock %p, sk %p", sock, sk);
+	BT_DBG("sock %pK, sk %pK", sock, sk);
 
 	poll_wait(file, sk_sleep(sk), wait);
 
@@ -454,7 +483,7 @@
 	long amount;
 	int err;
 
-	BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg);
+	BT_DBG("sk %pK cmd %x arg %lx", sk, cmd, arg);
 
 	switch (cmd) {
 	case TIOCOUTQ:
@@ -501,7 +530,7 @@
 	DECLARE_WAITQUEUE(wait, current);
 	int err = 0;
 
-	BT_DBG("sk %p", sk);
+	BT_DBG("sk %pK", sk);
 
 	add_wait_queue(sk_sleep(sk), &wait);
 	set_current_state(TASK_INTERRUPTIBLE);
@@ -538,7 +567,7 @@
 	unsigned long timeo;
 	int err = 0;
 
-	BT_DBG("sk %p", sk);
+	BT_DBG("sk %pK", sk);
 
 	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
 
diff -ruw linux-4.4.115/net/bluetooth/hci_conn.c linux-4.4.115-fbx/net/bluetooth/hci_conn.c
--- linux-4.4.115/net/bluetooth/hci_conn.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/bluetooth/hci_conn.c	2019-10-29 09:26:25.765223944 +0100
@@ -141,7 +141,7 @@
 	struct hci_dev *hdev = conn->hdev;
 	struct hci_conn *c = NULL;
 
-	BT_DBG("%s hcon %p", hdev->name, conn);
+	BT_DBG("%s hcon %pK", hdev->name, conn);
 
 	hci_dev_lock(hdev);
 
@@ -165,7 +165,7 @@
 
 static void hci_connect_le_scan_remove(struct hci_conn *conn)
 {
-	BT_DBG("%s hcon %p", conn->hdev->name, conn);
+	BT_DBG("%s hcon %pK", conn->hdev->name, conn);
 
 	/* We can't call hci_conn_del/hci_conn_cleanup here since that
 	 * could deadlock with another hci_conn_del() call that's holding
@@ -187,7 +187,7 @@
 	struct inquiry_entry *ie;
 	struct hci_cp_create_conn cp;
 
-	BT_DBG("hcon %p", conn);
+	BT_DBG("hcon %pK", conn);
 
 	conn->state = BT_CONNECT;
 	conn->out = true;
@@ -226,7 +226,7 @@
 
 int hci_disconnect(struct hci_conn *conn, __u8 reason)
 {
-	BT_DBG("hcon %p", conn);
+	BT_DBG("hcon %pK", conn);
 
 	/* When we are master of an established connection and it enters
 	 * the disconnect timeout, then go ahead and try to read the
@@ -251,7 +251,7 @@
 	struct hci_dev *hdev = conn->hdev;
 	struct hci_cp_add_sco cp;
 
-	BT_DBG("hcon %p", conn);
+	BT_DBG("hcon %pK", conn);
 
 	conn->state = BT_CONNECT;
 	conn->out = true;
@@ -270,7 +270,7 @@
 	struct hci_cp_setup_sync_conn cp;
 	const struct sco_param *param;
 
-	BT_DBG("hcon %p", conn);
+	BT_DBG("hcon %pK", conn);
 
 	conn->state = BT_CONNECT;
 	conn->out = true;
@@ -356,7 +356,7 @@
 	struct hci_dev *hdev = conn->hdev;
 	struct hci_cp_le_start_enc cp;
 
-	BT_DBG("hcon %p", conn);
+	BT_DBG("hcon %pK", conn);
 
 	memset(&cp, 0, sizeof(cp));
 
@@ -376,7 +376,7 @@
 	if (!sco)
 		return;
 
-	BT_DBG("hcon %p", conn);
+	BT_DBG("hcon %pK", conn);
 
 	if (!status) {
 		if (lmp_esco_capable(conn->hdev))
@@ -395,7 +395,7 @@
 					     disc_work.work);
 	int refcnt = atomic_read(&conn->refcnt);
 
-	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
+	BT_DBG("hcon %pK state %s", conn, state_to_string(conn->state));
 
 	WARN_ON(refcnt < 0);
 
@@ -426,7 +426,7 @@
 					     idle_work.work);
 	struct hci_dev *hdev = conn->hdev;
 
-	BT_DBG("hcon %p mode %d", conn, conn->mode);
+	BT_DBG("hcon %pK mode %d", conn, conn->mode);
 
 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
 		return;
@@ -566,7 +566,7 @@
 {
 	struct hci_dev *hdev = conn->hdev;
 
-	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
+	BT_DBG("%s hcon %pK handle %d", hdev->name, conn, conn->handle);
 
 	cancel_delayed_work_sync(&conn->disc_work);
 	cancel_delayed_work_sync(&conn->auto_accept_work);
@@ -691,6 +691,9 @@
 	conn = hci_lookup_le_connect(hdev);
 
 	if (!status) {
+		if (WARN_ON(!conn))
+			goto done;
+
 		hci_connect_le_scan_cleanup(conn);
 		goto done;
 	}
@@ -1147,7 +1150,7 @@
 /* Check link security requirement */
 int hci_conn_check_link_mode(struct hci_conn *conn)
 {
-	BT_DBG("hcon %p", conn);
+	BT_DBG("hcon %pK", conn);
 
 	/* In Secure Connections Only mode, it is required that Secure
 	 * Connections is used and the link is encrypted with AES-CCM
@@ -1170,7 +1173,7 @@
 /* Authenticate remote device */
 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
 {
-	BT_DBG("hcon %p", conn);
+	BT_DBG("hcon %pK", conn);
 
 	if (conn->pending_sec_level > sec_level)
 		sec_level = conn->pending_sec_level;
@@ -1207,7 +1210,7 @@
 /* Encrypt the the link */
 static void hci_conn_encrypt(struct hci_conn *conn)
 {
-	BT_DBG("hcon %p", conn);
+	BT_DBG("hcon %pK", conn);
 
 	if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
 		struct hci_cp_set_conn_encrypt cp;
@@ -1222,7 +1225,7 @@
 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
 		      bool initiator)
 {
-	BT_DBG("hcon %p", conn);
+	BT_DBG("hcon %pK", conn);
 
 	if (conn->type == LE_LINK)
 		return smp_conn_security(conn, sec_level);
@@ -1291,7 +1294,7 @@
 /* Check secure link requirement */
 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
 {
-	BT_DBG("hcon %p", conn);
+	BT_DBG("hcon %pK", conn);
 
 	/* Accept if non-secure or higher security level is required */
 	if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
@@ -1310,7 +1313,7 @@
 /* Switch role */
 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
 {
-	BT_DBG("hcon %p", conn);
+	BT_DBG("hcon %pK", conn);
 
 	if (role == conn->role)
 		return 1;
@@ -1331,7 +1334,7 @@
 {
 	struct hci_dev *hdev = conn->hdev;
 
-	BT_DBG("hcon %p mode %d", conn, conn->mode);
+	BT_DBG("hcon %pK mode %d", conn, conn->mode);
 
 	if (conn->mode != HCI_CM_SNIFF)
 		goto timer;
@@ -1511,7 +1514,7 @@
 	struct hci_dev *hdev = conn->hdev;
 	struct hci_chan *chan;
 
-	BT_DBG("%s hcon %p", hdev->name, conn);
+	BT_DBG("%s hcon %pK", hdev->name, conn);
 
 	if (test_bit(HCI_CONN_DROP, &conn->flags)) {
 		BT_DBG("Refusing to create new hci_chan");
@@ -1536,7 +1539,7 @@
 	struct hci_conn *conn = chan->conn;
 	struct hci_dev *hdev = conn->hdev;
 
-	BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
+	BT_DBG("%s hcon %pK chan %pK", hdev->name, conn, chan);
 
 	list_del_rcu(&chan->list);
 
@@ -1555,7 +1558,7 @@
 {
 	struct hci_chan *chan, *n;
 
-	BT_DBG("hcon %p", conn);
+	BT_DBG("hcon %pK", conn);
 
 	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
 		hci_chan_del(chan);
diff -ruw linux-4.4.115/net/bluetooth/hci_core.c linux-4.4.115-fbx/net/bluetooth/hci_core.c
--- linux-4.4.115/net/bluetooth/hci_core.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/bluetooth/hci_core.c	2019-10-29 09:26:25.765223944 +0100
@@ -1131,7 +1131,7 @@
 	struct discovery_state *cache = &hdev->discovery;
 	struct inquiry_entry *e;
 
-	BT_DBG("cache %p, %pMR", cache, bdaddr);
+	BT_DBG("cache %pK, %pMR", cache, bdaddr);
 
 	list_for_each_entry(e, &cache->all, all) {
 		if (!bacmp(&e->data.bdaddr, bdaddr))
@@ -1147,7 +1147,7 @@
 	struct discovery_state *cache = &hdev->discovery;
 	struct inquiry_entry *e;
 
-	BT_DBG("cache %p, %pMR", cache, bdaddr);
+	BT_DBG("cache %pK, %pMR", cache, bdaddr);
 
 	list_for_each_entry(e, &cache->unknown, list) {
 		if (!bacmp(&e->data.bdaddr, bdaddr))
@@ -1164,7 +1164,7 @@
 	struct discovery_state *cache = &hdev->discovery;
 	struct inquiry_entry *e;
 
-	BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
+	BT_DBG("cache %pK bdaddr %pMR state %d", cache, bdaddr, state);
 
 	list_for_each_entry(e, &cache->resolve, list) {
 		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
@@ -1202,7 +1202,7 @@
 	struct inquiry_entry *ie;
 	u32 flags = 0;
 
-	BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
+	BT_DBG("cache %pK, %pMR", cache, &data->bdaddr);
 
 	hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
 
@@ -1281,7 +1281,7 @@
 		copied++;
 	}
 
-	BT_DBG("cache %p, copied %d", cache, copied);
+	BT_DBG("cache %pK, copied %d", cache, copied);
 	return copied;
 }
 
@@ -1402,7 +1402,7 @@
 {
 	int ret = 0;
 
-	BT_DBG("%s %p", hdev->name, hdev);
+	BT_DBG("%s %pK", hdev->name, hdev);
 
 	hci_req_lock(hdev);
 
@@ -1639,7 +1639,7 @@
 {
 	bool auto_off;
 
-	BT_DBG("%s %p", hdev->name, hdev);
+	BT_DBG("%s %pK", hdev->name, hdev);
 
 	if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
@@ -1788,7 +1788,7 @@
 {
 	int ret;
 
-	BT_DBG("%s %p", hdev->name, hdev);
+	BT_DBG("%s %pK", hdev->name, hdev);
 
 	hci_req_lock(hdev);
 
@@ -2122,7 +2122,7 @@
 {
 	struct hci_dev *hdev = data;
 
-	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
+	BT_DBG("%pK name %s blocked %d", hdev, hdev->name, blocked);
 
 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
 		return -EBUSY;
@@ -3017,6 +3017,7 @@
 		}
 
 		list_del(&params->list);
+		list_del(&params->action);
 		kfree(params);
 	}
 
@@ -3353,7 +3354,7 @@
 	sprintf(hdev->name, "hci%d", id);
 	hdev->id = id;
 
-	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
+	BT_DBG("%pK name %s bus %d", hdev, hdev->name, hdev->bus);
 
 	hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
 					  WQ_MEM_RECLAIM, 1, hdev->name);
@@ -3434,7 +3435,7 @@
 {
 	int id;
 
-	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
+	BT_DBG("%pK name %s bus %d", hdev, hdev->name, hdev->bus);
 
 	hci_dev_set_flag(hdev, HCI_UNREGISTER);
 
@@ -3577,7 +3578,7 @@
 
 int hci_register_cb(struct hci_cb *cb)
 {
-	BT_DBG("%p name %s", cb, cb->name);
+	BT_DBG("%pK name %s", cb, cb->name);
 
 	mutex_lock(&hci_cb_list_lock);
 	list_add_tail(&cb->list, &hci_cb_list);
@@ -3589,7 +3590,7 @@
 
 int hci_unregister_cb(struct hci_cb *cb)
 {
-	BT_DBG("%p name %s", cb, cb->name);
+	BT_DBG("%pK name %s", cb, cb->name);
 
 	mutex_lock(&hci_cb_list_lock);
 	list_del(&cb->list);
@@ -3733,12 +3734,12 @@
 	list = skb_shinfo(skb)->frag_list;
 	if (!list) {
 		/* Non fragmented */
-		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
+		BT_DBG("%s nonfrag skb %pK len %d", hdev->name, skb, skb->len);
 
 		skb_queue_tail(queue, skb);
 	} else {
 		/* Fragmented */
-		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
+		BT_DBG("%s frag %pK len %d", hdev->name, skb, skb->len);
 
 		skb_shinfo(skb)->frag_list = NULL;
 
@@ -3759,7 +3760,7 @@
 			bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
 			hci_add_acl_hdr(skb, conn->handle, flags);
 
-			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
+			BT_DBG("%s frag %pK len %d", hdev->name, skb, skb->len);
 
 			__skb_queue_tail(queue, skb);
 		} while (list);
@@ -3772,7 +3773,7 @@
 {
 	struct hci_dev *hdev = chan->conn->hdev;
 
-	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
+	BT_DBG("%s chan %pK flags 0x%4.4x", hdev->name, chan, flags);
 
 	hci_queue_acl(chan, &chan->data_q, skb, flags);
 
@@ -3859,7 +3860,7 @@
 	} else
 		*quote = 0;
 
-	BT_DBG("conn %p quote %d", conn, *quote);
+	BT_DBG("conn %pK quote %d", conn, *quote);
 	return conn;
 }
 
@@ -3962,7 +3963,7 @@
 
 	q = cnt / num;
 	*quote = q ? q : 1;
-	BT_DBG("chan %p quote %d", chan, *quote);
+	BT_DBG("chan %pK quote %d", chan, *quote);
 	return chan;
 }
 
@@ -4004,7 +4005,7 @@
 
 			skb->priority = HCI_PRIO_MAX - 1;
 
-			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
+			BT_DBG("chan %pK skb %pK promoted to %d", chan, skb,
 			       skb->priority);
 		}
 
@@ -4046,7 +4047,7 @@
 	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
 		u32 priority = (skb_peek(&chan->data_q))->priority;
 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
-			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+			BT_DBG("chan %pK skb %pK len %d priority %u", chan, skb,
 			       skb->len, skb->priority);
 
 			/* Stop if priority has changed */
@@ -4094,7 +4095,7 @@
 		while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
 			int blocks;
 
-			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+			BT_DBG("chan %pK skb %pK len %d priority %u", chan, skb,
 			       skb->len, skb->priority);
 
 			/* Stop if priority has changed */
@@ -4162,7 +4163,7 @@
 
 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
-			BT_DBG("skb %p len %d", skb, skb->len);
+			BT_DBG("skb %pK len %d", skb, skb->len);
 			hci_send_frame(hdev, skb);
 
 			conn->sent++;
@@ -4186,7 +4187,7 @@
 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
 						     &quote))) {
 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
-			BT_DBG("skb %p len %d", skb, skb->len);
+			BT_DBG("skb %pK len %d", skb, skb->len);
 			hci_send_frame(hdev, skb);
 
 			conn->sent++;
@@ -4220,7 +4221,7 @@
 	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
 		u32 priority = (skb_peek(&chan->data_q))->priority;
 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
-			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+			BT_DBG("chan %pK skb %pK len %d priority %u", chan, skb,
 			       skb->len, skb->priority);
 
 			/* Stop if priority has changed */
@@ -4299,6 +4300,11 @@
 		l2cap_recv_acldata(conn, skb, flags);
 		return;
 	} else {
+		if (handle == 3804) {
+			/* qualcomm HCI seems to have a debug log on
+			 * this channel */
+			return;
+		}
 		BT_ERR("%s ACL packet for unknown connection handle %d",
 		       hdev->name, handle);
 	}
diff -ruw linux-4.4.115/net/bluetooth/hci_event.c linux-4.4.115-fbx/net/bluetooth/hci_event.c
--- linux-4.4.115/net/bluetooth/hci_event.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/bluetooth/hci_event.c	2019-10-29 09:26:25.769223983 +0100
@@ -1469,7 +1469,7 @@
 
 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
 
-	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
+	BT_DBG("%s bdaddr %pMR hcon %pK", hdev->name, &cp->bdaddr, conn);
 
 	if (status) {
 		if (conn && conn->state == BT_CONNECT) {
@@ -3235,7 +3235,7 @@
 			break;
 
 		default:
-			BT_ERR("Unknown type %d conn %p", conn->type, conn);
+			BT_ERR("Unknown type %d conn %pK", conn->type, conn);
 			break;
 		}
 	}
@@ -3306,7 +3306,7 @@
 			break;
 
 		default:
-			BT_ERR("Unknown type %d conn %p", conn->type, conn);
+			BT_ERR("Unknown type %d conn %pK", conn->type, conn);
 			break;
 		}
 	}
@@ -4381,7 +4381,7 @@
 
 	hchan->handle = le16_to_cpu(ev->handle);
 
-	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
+	BT_DBG("hcon %pK mgr %pK hchan %pK", hcon, hcon->amp_mgr, hchan);
 
 	mgr = hcon->amp_mgr;
 	if (mgr && mgr->bredr_chan) {
@@ -4742,8 +4742,8 @@
 
 	/* Adjust for actual length */
 	if (len != real_len) {
-		BT_ERR_RATELIMITED("%s advertising data length corrected",
-				   hdev->name);
+		/* BT_ERR_RATELIMITED("%s advertising data length corrected", */
+		/* 		   hdev->name); */
 		len = real_len;
 	}
 
diff -ruw linux-4.4.115/net/bluetooth/hci_sock.c linux-4.4.115-fbx/net/bluetooth/hci_sock.c
--- linux-4.4.115/net/bluetooth/hci_sock.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/bluetooth/hci_sock.c	2019-10-29 09:26:25.769223983 +0100
@@ -155,7 +155,7 @@
 	struct sock *sk;
 	struct sk_buff *skb_copy = NULL;
 
-	BT_DBG("hdev %p len %d", hdev, skb->len);
+	BT_DBG("hdev %pK len %d", hdev, skb->len);
 
 	read_lock(&hci_sk_list.lock);
 
@@ -260,7 +260,7 @@
 	if (!atomic_read(&monitor_promisc))
 		return;
 
-	BT_DBG("hdev %p len %d", hdev, skb->len);
+	BT_DBG("hdev %pK len %d", hdev, skb->len);
 
 	switch (bt_cb(skb)->pkt_type) {
 	case HCI_COMMAND_PKT:
@@ -553,7 +553,7 @@
 	struct sock *sk = sock->sk;
 	struct hci_dev *hdev;
 
-	BT_DBG("sock %p sk %p", sock, sk);
+	BT_DBG("sock %pK sk %pK", sock, sk);
 
 	if (!sk)
 		return 0;
@@ -753,7 +753,7 @@
 	struct hci_dev *hdev = NULL;
 	int len, err = 0;
 
-	BT_DBG("sock %p sk %p", sock, sk);
+	BT_DBG("sock %pK sk %pK", sock, sk);
 
 	if (!addr)
 		return -EINVAL;
@@ -931,7 +931,7 @@
 	struct hci_dev *hdev;
 	int err = 0;
 
-	BT_DBG("sock %p sk %p", sock, sk);
+	BT_DBG("sock %pK sk %pK", sock, sk);
 
 	if (peer)
 		return -EOPNOTSUPP;
@@ -999,7 +999,7 @@
 	struct sk_buff *skb;
 	int copied, err;
 
-	BT_DBG("sock %p, sk %p", sock, sk);
+	BT_DBG("sock %pK, sk %pK", sock, sk);
 
 	if (flags & MSG_OOB)
 		return -EOPNOTSUPP;
@@ -1159,7 +1159,7 @@
 	struct sk_buff *skb;
 	int err;
 
-	BT_DBG("sock %p sk %p", sock, sk);
+	BT_DBG("sock %pK sk %pK", sock, sk);
 
 	if (msg->msg_flags & MSG_OOB)
 		return -EOPNOTSUPP;
@@ -1289,7 +1289,7 @@
 	struct sock *sk = sock->sk;
 	int err = 0, opt = 0;
 
-	BT_DBG("sk %p, opt %d", sk, optname);
+	BT_DBG("sk %pK, opt %d", sk, optname);
 
 	lock_sock(sk);
 
@@ -1372,7 +1372,7 @@
 	struct sock *sk = sock->sk;
 	int len, opt, err = 0;
 
-	BT_DBG("sk %p, opt %d", sk, optname);
+	BT_DBG("sk %pK, opt %d", sk, optname);
 
 	if (get_user(len, optlen))
 		return -EFAULT;
@@ -1462,7 +1462,7 @@
 {
 	struct sock *sk;
 
-	BT_DBG("sock %p", sock);
+	BT_DBG("sock %pK", sock);
 
 	if (sock->type != SOCK_RAW)
 		return -ESOCKTNOSUPPORT;
diff -ruw linux-4.4.115/net/bluetooth/hci_sysfs.c linux-4.4.115-fbx/net/bluetooth/hci_sysfs.c
--- linux-4.4.115/net/bluetooth/hci_sysfs.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/bluetooth/hci_sysfs.c	2019-01-22 16:16:28.875295053 +0100
@@ -77,7 +77,7 @@
 {
 	struct hci_dev *hdev = conn->hdev;
 
-	BT_DBG("conn %p", conn);
+	BT_DBG("conn %pK", conn);
 
 	conn->dev.type = &bt_link;
 	conn->dev.class = bt_class;
@@ -90,7 +90,7 @@
 {
 	struct hci_dev *hdev = conn->hdev;
 
-	BT_DBG("conn %p", conn);
+	BT_DBG("conn %pK", conn);
 
 	dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
 
diff -ruw linux-4.4.115/net/bluetooth/l2cap_core.c linux-4.4.115-fbx/net/bluetooth/l2cap_core.c
--- linux-4.4.115/net/bluetooth/l2cap_core.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/bluetooth/l2cap_core.c	2019-10-29 09:26:25.773224022 +0100
@@ -249,7 +249,7 @@
 
 static void l2cap_state_change(struct l2cap_chan *chan, int state)
 {
-	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
+	BT_DBG("chan %pK %s -> %s", chan, state_to_string(chan->state),
 	       state_to_string(state));
 
 	chan->state = state;
@@ -400,7 +400,7 @@
 	struct l2cap_conn *conn = chan->conn;
 	int reason;
 
-	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
+	BT_DBG("chan %pK state %s", chan, state_to_string(chan->state));
 
 	mutex_lock(&conn->chan_lock);
 	l2cap_chan_lock(chan);
@@ -449,7 +449,7 @@
 	/* This flag is cleared in l2cap_chan_ready() */
 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
 
-	BT_DBG("chan %p", chan);
+	BT_DBG("chan %pK", chan);
 
 	return chan;
 }
@@ -459,7 +459,7 @@
 {
 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
 
-	BT_DBG("chan %p", chan);
+	BT_DBG("chan %pK", chan);
 
 	write_lock(&chan_list_lock);
 	list_del(&chan->global_l);
@@ -470,14 +470,14 @@
 
 void l2cap_chan_hold(struct l2cap_chan *c)
 {
-	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
+	BT_DBG("chan %pK orig refcnt %d", c, atomic_read(&c->kref.refcount));
 
 	kref_get(&c->kref);
 }
 
 void l2cap_chan_put(struct l2cap_chan *c)
 {
-	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
+	BT_DBG("chan %pK orig refcnt %d", c, atomic_read(&c->kref.refcount));
 
 	kref_put(&c->kref, l2cap_chan_destroy);
 }
@@ -516,7 +516,7 @@
 
 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
 {
-	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
+	BT_DBG("conn %pK, psm 0x%2.2x, dcid 0x%4.4x", conn,
 	       __le16_to_cpu(chan->psm), chan->dcid);
 
 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
@@ -579,7 +579,7 @@
 
 	__clear_chan_timer(chan);
 
-	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
+	BT_DBG("chan %pK, conn %pK, err %d, state %s", chan, conn, err,
 	       state_to_string(chan->state));
 
 	chan->ops->teardown(chan, err);
@@ -608,7 +608,7 @@
 	if (chan->hs_hchan) {
 		struct hci_chan *hs_hchan = chan->hs_hchan;
 
-		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
+		BT_DBG("chan %pK disconnect hs_hchan %pK", chan, hs_hchan);
 		amp_disconnect_logical_link(hs_hchan);
 	}
 
@@ -711,7 +711,7 @@
 {
 	struct l2cap_conn *conn = chan->conn;
 
-	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
+	BT_DBG("chan %pK state %s", chan, state_to_string(chan->state));
 
 	switch (chan->state) {
 	case BT_LISTEN:
@@ -874,7 +874,7 @@
 	struct hci_conn *hcon = chan->conn->hcon;
 	u16 flags;
 
-	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
+	BT_DBG("chan %pK, skb %pK len %d priority %u", chan, skb, skb->len,
 	       skb->priority);
 
 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
@@ -1061,7 +1061,7 @@
 	struct sk_buff *skb;
 	u32 control_field;
 
-	BT_DBG("chan %p, control %p", chan, control);
+	BT_DBG("chan %pK, control %pK", chan, control);
 
 	if (!control->sframe)
 		return;
@@ -1100,7 +1100,7 @@
 {
 	struct l2cap_ctrl control;
 
-	BT_DBG("chan %p, poll %d", chan, poll);
+	BT_DBG("chan %pK, poll %d", chan, poll);
 
 	memset(&control, 0, sizeof(control));
 	control.sframe = 1;
@@ -1189,7 +1189,7 @@
 {
 	struct sk_buff *skb;
 
-	BT_DBG("chan %p", chan);
+	BT_DBG("chan %pK", chan);
 
 	if (chan->mode != L2CAP_MODE_ERTM)
 		return;
@@ -1223,7 +1223,7 @@
 static void l2cap_move_done(struct l2cap_chan *chan)
 {
 	u8 move_role = chan->move_role;
-	BT_DBG("chan %p", chan);
+	BT_DBG("chan %pK", chan);
 
 	chan->move_state = L2CAP_MOVE_STABLE;
 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
@@ -1302,7 +1302,7 @@
 static void l2cap_start_connection(struct l2cap_chan *chan)
 {
 	if (__amp_capable(chan)) {
-		BT_DBG("chan %p AMP capable: discover AMPs", chan);
+		BT_DBG("chan %pK AMP capable: discover AMPs", chan);
 		a2mp_discover_amp(chan);
 	} else if (chan->conn->hcon->type == LE_LINK) {
 		l2cap_le_start(chan);
@@ -1399,7 +1399,7 @@
 {
 	struct l2cap_chan *chan, *tmp;
 
-	BT_DBG("conn %p", conn);
+	BT_DBG("conn %pK", conn);
 
 	mutex_lock(&conn->chan_lock);
 
@@ -1477,7 +1477,7 @@
 	struct hci_conn *hcon = conn->hcon;
 	struct hci_dev *hdev = hcon->hdev;
 
-	BT_DBG("%s conn %p", hdev->name, conn);
+	BT_DBG("%s conn %pK", hdev->name, conn);
 
 	/* For outgoing pairing which doesn't necessarily have an
 	 * associated socket (e.g. mgmt_pair_device).
@@ -1510,7 +1510,7 @@
 	struct l2cap_chan *chan;
 	struct hci_conn *hcon = conn->hcon;
 
-	BT_DBG("conn %p", conn);
+	BT_DBG("conn %pK", conn);
 
 	if (hcon->type == ACL_LINK)
 		l2cap_request_info(conn);
@@ -1551,7 +1551,7 @@
 {
 	struct l2cap_chan *chan;
 
-	BT_DBG("conn %p", conn);
+	BT_DBG("conn %pK", conn);
 
 	mutex_lock(&conn->chan_lock);
 
@@ -1661,7 +1661,7 @@
 	if (!conn)
 		return;
 
-	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
+	BT_DBG("hcon %pK conn %pK, err %d", hcon, conn, err);
 
 	kfree_skb(conn->rx_skb);
 
@@ -1789,7 +1789,7 @@
 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
 					       monitor_timer.work);
 
-	BT_DBG("chan %p", chan);
+	BT_DBG("chan %pK", chan);
 
 	l2cap_chan_lock(chan);
 
@@ -1810,7 +1810,7 @@
 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
 					       retrans_timer.work);
 
-	BT_DBG("chan %p", chan);
+	BT_DBG("chan %pK", chan);
 
 	l2cap_chan_lock(chan);
 
@@ -1831,7 +1831,7 @@
 	struct sk_buff *skb;
 	struct l2cap_ctrl *control;
 
-	BT_DBG("chan %p, skbs %p", chan, skbs);
+	BT_DBG("chan %pK, skbs %pK", chan, skbs);
 
 	if (__chan_is_moving(chan))
 		return;
@@ -1870,7 +1870,7 @@
 	struct l2cap_ctrl *control;
 	int sent = 0;
 
-	BT_DBG("chan %p", chan);
+	BT_DBG("chan %pK", chan);
 
 	if (chan->state != BT_CONNECTED)
 		return -ENOTCONN;
@@ -1941,7 +1941,7 @@
 	struct sk_buff *tx_skb;
 	u16 seq;
 
-	BT_DBG("chan %p", chan);
+	BT_DBG("chan %pK", chan);
 
 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
 		return;
@@ -2018,7 +2018,7 @@
 static void l2cap_retransmit(struct l2cap_chan *chan,
 			     struct l2cap_ctrl *control)
 {
-	BT_DBG("chan %p, control %p", chan, control);
+	BT_DBG("chan %pK, control %pK", chan, control);
 
 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
 	l2cap_ertm_resend(chan);
@@ -2029,7 +2029,7 @@
 {
 	struct sk_buff *skb;
 
-	BT_DBG("chan %p, control %p", chan, control);
+	BT_DBG("chan %pK, control %pK", chan, control);
 
 	if (control->poll)
 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
@@ -2065,7 +2065,7 @@
 					 chan->last_acked_seq);
 	int threshold;
 
-	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
+	BT_DBG("chan %pK last_acked_seq %d buffer_seq %d",
 	       chan, chan->last_acked_seq, chan->buffer_seq);
 
 	memset(&control, 0, sizeof(control));
@@ -2160,7 +2160,7 @@
 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
 	struct l2cap_hdr *lh;
 
-	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
+	BT_DBG("chan %pK psm 0x%2.2x len %zu", chan,
 	       __le16_to_cpu(chan->psm), len);
 
 	count = min_t(unsigned int, (conn->mtu - hlen), len);
@@ -2192,7 +2192,7 @@
 	int err, count;
 	struct l2cap_hdr *lh;
 
-	BT_DBG("chan %p len %zu", chan, len);
+	BT_DBG("chan %pK len %zu", chan, len);
 
 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
 
@@ -2223,7 +2223,7 @@
 	int err, count, hlen;
 	struct l2cap_hdr *lh;
 
-	BT_DBG("chan %p len %zu", chan, len);
+	BT_DBG("chan %pK len %zu", chan, len);
 
 	if (!conn)
 		return ERR_PTR(-ENOTCONN);
@@ -2277,7 +2277,7 @@
 	size_t pdu_len;
 	u8 sar;
 
-	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
+	BT_DBG("chan %pK, msg %pK, len %zu", chan, msg, len);
 
 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
 	 * so fragmented skbs are not used.  The HCI layer's handling
@@ -2344,7 +2344,7 @@
 	int err, count, hlen;
 	struct l2cap_hdr *lh;
 
-	BT_DBG("chan %p len %zu", chan, len);
+	BT_DBG("chan %pK len %zu", chan, len);
 
 	if (!conn)
 		return ERR_PTR(-ENOTCONN);
@@ -2386,7 +2386,7 @@
 	size_t pdu_len;
 	u16 sdu_len;
 
-	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
+	BT_DBG("chan %pK, msg %pK, len %zu", chan, msg, len);
 
 	sdu_len = len;
 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
@@ -2552,7 +2552,7 @@
 	struct l2cap_ctrl control;
 	u16 seq;
 
-	BT_DBG("chan %p, txseq %u", chan, txseq);
+	BT_DBG("chan %pK, txseq %u", chan, txseq);
 
 	memset(&control, 0, sizeof(control));
 	control.sframe = 1;
@@ -2574,7 +2574,7 @@
 {
 	struct l2cap_ctrl control;
 
-	BT_DBG("chan %p", chan);
+	BT_DBG("chan %pK", chan);
 
 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
 		return;
@@ -2592,7 +2592,7 @@
 	u16 initial_head;
 	u16 seq;
 
-	BT_DBG("chan %p, txseq %u", chan, txseq);
+	BT_DBG("chan %pK, txseq %u", chan, txseq);
 
 	memset(&control, 0, sizeof(control));
 	control.sframe = 1;
@@ -2617,7 +2617,7 @@
 	struct sk_buff *acked_skb;
 	u16 ackseq;
 
-	BT_DBG("chan %p, reqseq %u", chan, reqseq);
+	BT_DBG("chan %pK, reqseq %u", chan, reqseq);
 
 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
 		return;
@@ -2646,7 +2646,7 @@
 
 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
 {
-	BT_DBG("chan %p", chan);
+	BT_DBG("chan %pK", chan);
 
 	chan->expected_tx_seq = chan->buffer_seq;
 	l2cap_seq_list_clear(&chan->srej_list);
@@ -2658,7 +2658,7 @@
 				struct l2cap_ctrl *control,
 				struct sk_buff_head *skbs, u8 event)
 {
-	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
+	BT_DBG("chan %pK, control %pK, skbs %pK, event %d", chan, control, skbs,
 	       event);
 
 	switch (event) {
@@ -2730,7 +2730,7 @@
 				  struct l2cap_ctrl *control,
 				  struct sk_buff_head *skbs, u8 event)
 {
-	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
+	BT_DBG("chan %pK, control %pK, skbs %pK, event %d", chan, control, skbs,
 	       event);
 
 	switch (event) {
@@ -2807,7 +2807,7 @@
 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
 		     struct sk_buff_head *skbs, u8 event)
 {
-	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
+	BT_DBG("chan %pK, control %pK, skbs %pK, event %d, state %d",
 	       chan, control, skbs, event, chan->tx_state);
 
 	switch (chan->tx_state) {
@@ -2826,14 +2826,14 @@
 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
 			     struct l2cap_ctrl *control)
 {
-	BT_DBG("chan %p, control %p", chan, control);
+	BT_DBG("chan %pK, control %pK", chan, control);
 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
 }
 
 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
 				  struct l2cap_ctrl *control)
 {
-	BT_DBG("chan %p, control %p", chan, control);
+	BT_DBG("chan %pK, control %pK", chan, control);
 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
 }
 
@@ -2843,7 +2843,7 @@
 	struct sk_buff *nskb;
 	struct l2cap_chan *chan;
 
-	BT_DBG("conn %p", conn);
+	BT_DBG("conn %pK", conn);
 
 	mutex_lock(&conn->chan_lock);
 
@@ -2874,7 +2874,7 @@
 	struct l2cap_hdr *lh;
 	int len, count;
 
-	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
+	BT_DBG("conn %pK, code 0x%2.2x, ident 0x%2.2x, len %u",
 	       conn, code, ident, dlen);
 
 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
@@ -3036,7 +3036,7 @@
 					       ack_timer.work);
 	u16 frames_to_ack;
 
-	BT_DBG("chan %p", chan);
+	BT_DBG("chan %pK", chan);
 
 	l2cap_chan_lock(chan);
 
@@ -3181,7 +3181,7 @@
 	void *endptr = data + data_size;
 	u16 size;
 
-	BT_DBG("chan %p", chan);
+	BT_DBG("chan %pK", chan);
 
 	if (chan->num_conf_req || chan->num_conf_rsp)
 		goto done;
@@ -3311,7 +3311,7 @@
 	u16 result = L2CAP_CONF_SUCCESS;
 	u16 size;
 
-	BT_DBG("chan %p", chan);
+	BT_DBG("chan %pK", chan);
 
 	while (len >= L2CAP_CONF_OPT_SIZE) {
 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
@@ -3522,7 +3522,7 @@
 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
 	struct l2cap_conf_efs efs;
 
-	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
+	BT_DBG("chan %pK, rsp %pK, len %d, req %pK", chan, rsp, len, data);
 
 	while (len >= L2CAP_CONF_OPT_SIZE) {
 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
@@ -3628,7 +3628,7 @@
 	struct l2cap_conf_rsp *rsp = data;
 	void *ptr = rsp->data;
 
-	BT_DBG("chan %p", chan);
+	BT_DBG("chan %pK", chan);
 
 	rsp->scid   = cpu_to_le16(chan->dcid);
 	rsp->result = cpu_to_le16(result);
@@ -3642,7 +3642,7 @@
 	struct l2cap_le_conn_rsp rsp;
 	struct l2cap_conn *conn = chan->conn;
 
-	BT_DBG("chan %p", chan);
+	BT_DBG("chan %pK", chan);
 
 	rsp.dcid    = cpu_to_le16(chan->scid);
 	rsp.mtu     = cpu_to_le16(chan->imtu);
@@ -3671,7 +3671,7 @@
 	else
 		rsp_code = L2CAP_CONN_RSP;
 
-	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
+	BT_DBG("chan %pK rsp_code %u", chan, rsp_code);
 
 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
 
@@ -3699,7 +3699,7 @@
 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
 	};
 
-	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
+	BT_DBG("chan %pK, rsp %pK, len %d", chan, rsp, len);
 
 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
 		return;
@@ -4002,7 +4002,7 @@
 {
 	struct l2cap_conn *conn = chan->conn;
 
-	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
+	BT_DBG("conn %pK chan %pK ident %d flags 0x%4.4x", conn, chan, ident,
 	       flags);
 
 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
@@ -4499,7 +4499,8 @@
 			return 0;
 		}
 
-		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
+		BT_DBG("mgr %pK bredr_chan %pK hs_hcon %pK",
+		       mgr, chan, hs_hcon);
 
 		mgr->bredr_chan = chan;
 		chan->hs_hcon = hs_hcon;
@@ -4528,7 +4529,7 @@
 	struct l2cap_move_chan_req req;
 	u8 ident;
 
-	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
+	BT_DBG("chan %pK, dest_amp_id %d", chan, dest_amp_id);
 
 	ident = l2cap_get_ident(chan->conn);
 	chan->ident = ident;
@@ -4546,7 +4547,7 @@
 {
 	struct l2cap_move_chan_rsp rsp;
 
-	BT_DBG("chan %p, result 0x%4.4x", chan, result);
+	BT_DBG("chan %pK, result 0x%4.4x", chan, result);
 
 	rsp.icid = cpu_to_le16(chan->dcid);
 	rsp.result = cpu_to_le16(result);
@@ -4559,7 +4560,7 @@
 {
 	struct l2cap_move_chan_cfm cfm;
 
-	BT_DBG("chan %p, result 0x%4.4x", chan, result);
+	BT_DBG("chan %pK, result 0x%4.4x", chan, result);
 
 	chan->ident = l2cap_get_ident(chan->conn);
 
@@ -4576,7 +4577,7 @@
 {
 	struct l2cap_move_chan_cfm cfm;
 
-	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
+	BT_DBG("conn %pK, icid 0x%4.4x", conn, icid);
 
 	cfm.icid = cpu_to_le16(icid);
 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
@@ -4696,7 +4697,7 @@
 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
 		       u8 status)
 {
-	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
+	BT_DBG("chan %pK, hchan %pK, status %d", chan, hchan, status);
 
 	if (status) {
 		l2cap_logical_fail(chan);
@@ -4715,7 +4716,7 @@
 
 void l2cap_move_start(struct l2cap_chan *chan)
 {
-	BT_DBG("chan %p", chan);
+	BT_DBG("chan %pK", chan);
 
 	if (chan->local_amp_id == AMP_ID_BREDR) {
 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
@@ -4735,7 +4736,7 @@
 static void l2cap_do_create(struct l2cap_chan *chan, int result,
 			    u8 local_amp_id, u8 remote_amp_id)
 {
-	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
+	BT_DBG("chan %pK state %s %u -> %u", chan, state_to_string(chan->state),
 	       local_amp_id, remote_amp_id);
 
 	chan->fcs = L2CAP_FCS_NONE;
@@ -4844,7 +4845,7 @@
 	u8 local_amp_id = chan->local_amp_id;
 	u8 remote_amp_id = chan->remote_amp_id;
 
-	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
+	BT_DBG("chan %pK, result %d, local_amp_id %d, remote_amp_id %d",
 	       chan, result, local_amp_id, remote_amp_id);
 
 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
@@ -5216,7 +5217,8 @@
 
 	memset(&rsp, 0, sizeof(rsp));
 
-	err = hci_check_conn_params(min, max, latency, to_multiplier);
+//	err = hci_check_conn_params(min, max, latency, to_multiplier);
+	err = 1;
 	if (err)
 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
 	else
@@ -5767,7 +5769,7 @@
 {
 	struct l2cap_ctrl control;
 
-	BT_DBG("chan %p", chan);
+	BT_DBG("chan %pK", chan);
 
 	memset(&control, 0, sizeof(control));
 	control.sframe = 1;
@@ -5922,7 +5924,7 @@
 	 * until a gap is encountered.
 	 */
 
-	BT_DBG("chan %p", chan);
+	BT_DBG("chan %pK", chan);
 
 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
 		struct sk_buff *skb;
@@ -5954,7 +5956,7 @@
 {
 	struct sk_buff *skb;
 
-	BT_DBG("chan %p, control %p", chan, control);
+	BT_DBG("chan %pK, control %pK", chan, control);
 
 	if (control->reqseq == chan->next_tx_seq) {
 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
@@ -6012,7 +6014,7 @@
 {
 	struct sk_buff *skb;
 
-	BT_DBG("chan %p, control %p", chan, control);
+	BT_DBG("chan %pK, control %pK", chan, control);
 
 	if (control->reqseq == chan->next_tx_seq) {
 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
@@ -6046,7 +6048,7 @@
 
 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
 {
-	BT_DBG("chan %p, txseq %d", chan, txseq);
+	BT_DBG("chan %pK, txseq %d", chan, txseq);
 
 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
 	       chan->expected_tx_seq);
@@ -6137,7 +6139,7 @@
 	int err = 0;
 	bool skb_in_use = false;
 
-	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
+	BT_DBG("chan %pK, control %pK, skb %pK, event %d", chan, control, skb,
 	       event);
 
 	switch (event) {
@@ -6193,7 +6195,7 @@
 			 */
 			skb_queue_tail(&chan->srej_q, skb);
 			skb_in_use = true;
-			BT_DBG("Queued %p (queue len %d)", skb,
+			BT_DBG("Queued %pK (queue len %d)", skb,
 			       skb_queue_len(&chan->srej_q));
 
 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
@@ -6257,7 +6259,7 @@
 	}
 
 	if (skb && !skb_in_use) {
-		BT_DBG("Freeing %p", skb);
+		BT_DBG("Freeing %pK", skb);
 		kfree_skb(skb);
 	}
 
@@ -6272,7 +6274,7 @@
 	u16 txseq = control->txseq;
 	bool skb_in_use = false;
 
-	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
+	BT_DBG("chan %pK, control %pK, skb %pK, event %d", chan, control, skb,
 	       event);
 
 	switch (event) {
@@ -6283,7 +6285,7 @@
 			l2cap_pass_to_tx(chan, control);
 			skb_queue_tail(&chan->srej_q, skb);
 			skb_in_use = true;
-			BT_DBG("Queued %p (queue len %d)", skb,
+			BT_DBG("Queued %pK (queue len %d)", skb,
 			       skb_queue_len(&chan->srej_q));
 
 			chan->expected_tx_seq = __next_seq(chan, txseq);
@@ -6294,7 +6296,7 @@
 			l2cap_pass_to_tx(chan, control);
 			skb_queue_tail(&chan->srej_q, skb);
 			skb_in_use = true;
-			BT_DBG("Queued %p (queue len %d)", skb,
+			BT_DBG("Queued %pK (queue len %d)", skb,
 			       skb_queue_len(&chan->srej_q));
 
 			err = l2cap_rx_queued_iframes(chan);
@@ -6309,7 +6311,7 @@
 			 */
 			skb_queue_tail(&chan->srej_q, skb);
 			skb_in_use = true;
-			BT_DBG("Queued %p (queue len %d)", skb,
+			BT_DBG("Queued %pK (queue len %d)", skb,
 			       skb_queue_len(&chan->srej_q));
 
 			l2cap_pass_to_tx(chan, control);
@@ -6323,7 +6325,7 @@
 			 */
 			skb_queue_tail(&chan->srej_q, skb);
 			skb_in_use = true;
-			BT_DBG("Queued %p (queue len %d)", skb,
+			BT_DBG("Queued %pK (queue len %d)", skb,
 			       skb_queue_len(&chan->srej_q));
 
 			l2cap_pass_to_tx(chan, control);
@@ -6400,7 +6402,7 @@
 	}
 
 	if (skb && !skb_in_use) {
-		BT_DBG("Freeing %p", skb);
+		BT_DBG("Freeing %pK", skb);
 		kfree_skb(skb);
 	}
 
@@ -6409,7 +6411,7 @@
 
 static int l2cap_finish_move(struct l2cap_chan *chan)
 {
-	BT_DBG("chan %p", chan);
+	BT_DBG("chan %pK", chan);
 
 	chan->rx_state = L2CAP_RX_STATE_RECV;
 
@@ -6427,7 +6429,7 @@
 {
 	int err;
 
-	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
+	BT_DBG("chan %pK, control %pK, skb %pK, event %d", chan, control, skb,
 	       event);
 
 	if (!control->poll)
@@ -6511,7 +6513,7 @@
 {
 	int err = 0;
 
-	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
+	BT_DBG("chan %pK, control %pK, skb %pK, event %d, state %d", chan,
 	       control, skb, event, chan->rx_state);
 
 	if (__valid_reqseq(chan, control->reqseq)) {
@@ -6548,7 +6550,7 @@
 {
 	int err = 0;
 
-	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
+	BT_DBG("chan %pK, control %pK, skb %pK, state %d", chan, control, skb,
 	       chan->rx_state);
 
 	if (l2cap_classify_txseq(chan, control->txseq) ==
@@ -6570,7 +6572,7 @@
 		chan->sdu_len = 0;
 
 		if (skb) {
-			BT_DBG("Freeing %p", skb);
+			BT_DBG("Freeing %pK", skb);
 			kfree_skb(skb);
 		}
 	}
@@ -6683,7 +6685,7 @@
 
 	return_credits = le_max_credits - chan->rx_credits;
 
-	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
+	BT_DBG("chan %pK returning %u credits to sender", chan, return_credits);
 
 	chan->rx_credits += return_credits;
 
@@ -6808,7 +6810,7 @@
 		}
 	}
 
-	BT_DBG("chan %p, len %d", chan, skb->len);
+	BT_DBG("chan %pK, len %d", chan, skb->len);
 
 	/* If we receive data on a fixed channel before the info req/rsp
 	 * procdure is done simply assume that the channel is supported
@@ -6848,7 +6850,7 @@
 		goto done;
 
 	default:
-		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
+		BT_DBG("chan %pK: bad mode 0x%2.2x", chan, chan->mode);
 		break;
 	}
 
@@ -6873,7 +6875,7 @@
 	if (!chan)
 		goto free_skb;
 
-	BT_DBG("chan %p, len %d", chan, skb->len);
+	BT_DBG("chan %pK, len %d", chan, skb->len);
 
 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
 		goto drop;
@@ -6986,7 +6988,7 @@
 	conn->hcon = hci_conn_get(hcon);
 	conn->hchan = hchan;
 
-	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
+	BT_DBG("hcon %pK conn %pK hchan %pK", hcon, conn, hchan);
 
 	switch (hcon->type) {
 	case LE_LINK:
@@ -7282,7 +7284,7 @@
 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
 		return;
 
-	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
+	BT_DBG("hcon %pK bdaddr %pMR status %d", hcon, &hcon->dst, status);
 
 	if (status) {
 		l2cap_conn_del(hcon, bt_to_errno(status));
@@ -7337,7 +7339,7 @@
 {
 	struct l2cap_conn *conn = hcon->l2cap_data;
 
-	BT_DBG("hcon %p", hcon);
+	BT_DBG("hcon %pK", hcon);
 
 	if (!conn)
 		return HCI_ERROR_REMOTE_USER_TERM;
@@ -7349,7 +7351,7 @@
 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
 		return;
 
-	BT_DBG("hcon %p reason %d", hcon, reason);
+	BT_DBG("hcon %pK reason %d", hcon, reason);
 
 	l2cap_conn_del(hcon, bt_to_errno(reason));
 }
@@ -7379,14 +7381,14 @@
 	if (!conn)
 		return;
 
-	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
+	BT_DBG("conn %pK status 0x%2.2x encrypt %u", conn, status, encrypt);
 
 	mutex_lock(&conn->chan_lock);
 
 	list_for_each_entry(chan, &conn->chan_l, list) {
 		l2cap_chan_lock(chan);
 
-		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
+		BT_DBG("chan %pK scid 0x%4.4x state %s", chan, chan->scid,
 		       state_to_string(chan->state));
 
 		if (chan->scid == L2CAP_CID_A2MP) {
@@ -7478,7 +7480,7 @@
 	if (!conn)
 		goto drop;
 
-	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
+	BT_DBG("conn %pK len %d flags 0x%x", conn, skb->len, flags);
 
 	switch (flags) {
 	case ACL_START:
diff -ruw linux-4.4.115/net/bluetooth/l2cap_sock.c linux-4.4.115-fbx/net/bluetooth/l2cap_sock.c
--- linux-4.4.115/net/bluetooth/l2cap_sock.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/bluetooth/l2cap_sock.c	2019-01-22 16:16:28.879295089 +0100
@@ -84,7 +84,7 @@
 	struct sockaddr_l2 la;
 	int len, err = 0;
 
-	BT_DBG("sk %p", sk);
+	BT_DBG("sk %pK", sk);
 
 	if (!addr || addr->sa_family != AF_BLUETOOTH)
 		return -EINVAL;
@@ -178,7 +178,7 @@
 	struct sockaddr_l2 la;
 	int len, err = 0;
 
-	BT_DBG("sk %p", sk);
+	BT_DBG("sk %pK", sk);
 
 	if (!addr || alen < sizeof(addr->sa_family) ||
 	    addr->sa_family != AF_BLUETOOTH)
@@ -254,7 +254,7 @@
 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
 	int err = 0;
 
-	BT_DBG("sk %p backlog %d", sk, backlog);
+	BT_DBG("sk %pK backlog %d", sk, backlog);
 
 	lock_sock(sk);
 
@@ -311,7 +311,7 @@
 
 	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
 
-	BT_DBG("sk %p timeo %ld", sk, timeo);
+	BT_DBG("sk %pK timeo %ld", sk, timeo);
 
 	/* Wait for an incoming connection. (wake-one). */
 	add_wait_queue_exclusive(sk_sleep(sk), &wait);
@@ -348,7 +348,7 @@
 
 	newsock->state = SS_CONNECTED;
 
-	BT_DBG("new socket %p", nsk);
+	BT_DBG("new socket %pK", nsk);
 
 done:
 	release_sock(sk);
@@ -362,7 +362,7 @@
 	struct sock *sk = sock->sk;
 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
 
-	BT_DBG("sock %p, sk %p", sock, sk);
+	BT_DBG("sock %pK, sk %pK", sock, sk);
 
 	if (peer && sk->sk_state != BT_CONNECTED &&
 	    sk->sk_state != BT_CONNECT && sk->sk_state != BT_CONNECT2 &&
@@ -398,7 +398,7 @@
 	int len, err = 0;
 	u32 opt;
 
-	BT_DBG("sk %p", sk);
+	BT_DBG("sk %pK", sk);
 
 	if (get_user(len, optlen))
 		return -EFAULT;
@@ -500,7 +500,7 @@
 	struct bt_power pwr;
 	int len, err = 0;
 
-	BT_DBG("sk %p", sk);
+	BT_DBG("sk %pK", sk);
 
 	if (level == SOL_L2CAP)
 		return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
@@ -636,7 +636,7 @@
 	int len, err = 0;
 	u32 opt;
 
-	BT_DBG("sk %p", sk);
+	BT_DBG("sk %pK", sk);
 
 	lock_sock(sk);
 
@@ -750,7 +750,7 @@
 	int len, err = 0;
 	u32 opt;
 
-	BT_DBG("sk %p", sk);
+	BT_DBG("sk %pK", sk);
 
 	if (level == SOL_L2CAP)
 		return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
@@ -951,7 +951,7 @@
 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
 	int err;
 
-	BT_DBG("sock %p, sk %p", sock, sk);
+	BT_DBG("sock %pK, sk %pK", sock, sk);
 
 	err = sock_error(sk);
 	if (err)
@@ -1045,7 +1045,7 @@
 	if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
 		return;
 
-	BT_DBG("sk %p state %s", sk, state_to_string(sk->sk_state));
+	BT_DBG("sk %pK state %s", sk, state_to_string(sk->sk_state));
 
 	/* Kill poor orphan */
 
@@ -1106,7 +1106,7 @@
 	struct l2cap_conn *conn;
 	int err = 0;
 
-	BT_DBG("sock %p, sk %p", sock, sk);
+	BT_DBG("sock %pK, sk %pK", sock, sk);
 
 	if (!sk)
 		return 0;
@@ -1125,7 +1125,7 @@
 	/* prevent chan structure from being freed whilst unlocked */
 	l2cap_chan_hold(chan);
 
-	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
+	BT_DBG("chan %pK state %s", chan, state_to_string(chan->state));
 
 	if (chan->mode == L2CAP_MODE_ERTM &&
 	    chan->unacked_frames > 0 &&
@@ -1190,7 +1190,7 @@
 	struct sock *sk = sock->sk;
 	int err;
 
-	BT_DBG("sock %p, sk %p", sock, sk);
+	BT_DBG("sock %pK, sk %pK", sock, sk);
 
 	if (!sk)
 		return 0;
@@ -1208,14 +1208,14 @@
 {
 	struct sock *sk;
 
-	BT_DBG("parent %p state %s", parent,
+	BT_DBG("parent %pK state %s", parent,
 	       state_to_string(parent->sk_state));
 
 	/* Close not yet accepted channels */
 	while ((sk = bt_accept_dequeue(parent, NULL))) {
 		struct l2cap_chan *chan = l2cap_pi(sk)->chan;
 
-		BT_DBG("child chan %p state %s", chan,
+		BT_DBG("child chan %pK state %s", chan,
 		       state_to_string(chan->state));
 
 		l2cap_chan_lock(chan);
@@ -1305,7 +1305,7 @@
 	struct sock *sk = chan->data;
 	struct sock *parent;
 
-	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
+	BT_DBG("chan %pK state %s", chan, state_to_string(chan->state));
 
 	/* This callback can be called both for server (BT_LISTEN)
 	 * sockets as well as "normal" ones. To avoid lockdep warnings
@@ -1392,7 +1392,7 @@
 
 	parent = bt_sk(sk)->parent;
 
-	BT_DBG("sk %p, parent %p", sk, parent);
+	BT_DBG("sk %pK, parent %pK", sk, parent);
 
 	sk->sk_state = BT_CONNECTED;
 	sk->sk_state_change(sk);
@@ -1471,7 +1471,7 @@
 
 static void l2cap_sock_destruct(struct sock *sk)
 {
-	BT_DBG("sk %p", sk);
+	BT_DBG("sk %pK", sk);
 
 	if (l2cap_pi(sk)->chan)
 		l2cap_chan_put(l2cap_pi(sk)->chan);
@@ -1502,7 +1502,7 @@
 {
 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
 
-	BT_DBG("sk %p", sk);
+	BT_DBG("sk %pK", sk);
 
 	if (parent) {
 		struct l2cap_chan *pchan = l2cap_pi(parent)->chan;
@@ -1609,7 +1609,7 @@
 {
 	struct sock *sk;
 
-	BT_DBG("sock %p", sock);
+	BT_DBG("sock %pK", sock);
 
 	sock->state = SS_UNCONNECTED;
 
diff -ruw linux-4.4.115/net/bluetooth/mgmt.c linux-4.4.115-fbx/net/bluetooth/mgmt.c
--- linux-4.4.115/net/bluetooth/mgmt.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/bluetooth/mgmt.c	2019-10-29 09:26:25.773224022 +0100
@@ -281,7 +281,7 @@
 {
 	struct mgmt_rp_read_version rp;
 
-	BT_DBG("sock %p", sk);
+	BT_DBG("sock %pK", sk);
 
 	rp.version = MGMT_VERSION;
 	rp.revision = cpu_to_le16(MGMT_REVISION);
@@ -298,7 +298,7 @@
 	size_t rp_size;
 	int i, err;
 
-	BT_DBG("sock %p", sk);
+	BT_DBG("sock %pK", sk);
 
 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
 		num_commands = ARRAY_SIZE(mgmt_commands);
@@ -351,7 +351,7 @@
 	u16 count;
 	int err;
 
-	BT_DBG("sock %p", sk);
+	BT_DBG("sock %pK", sk);
 
 	read_lock(&hci_dev_list_lock);
 
@@ -411,7 +411,7 @@
 	u16 count;
 	int err;
 
-	BT_DBG("sock %p", sk);
+	BT_DBG("sock %pK", sk);
 
 	read_lock(&hci_dev_list_lock);
 
@@ -471,7 +471,7 @@
 	u16 count;
 	int err;
 
-	BT_DBG("sock %p", sk);
+	BT_DBG("sock %pK", sk);
 
 	read_lock(&hci_dev_list_lock);
 
@@ -588,7 +588,7 @@
 	struct mgmt_rp_read_config_info rp;
 	u32 options = 0;
 
-	BT_DBG("sock %p %s", sk, hdev->name);
+	BT_DBG("sock %pK %s", sk, hdev->name);
 
 	hci_dev_lock(hdev);
 
@@ -1373,7 +1373,7 @@
 {
 	struct mgmt_rp_read_info rp;
 
-	BT_DBG("sock %p %s", sk, hdev->name);
+	BT_DBG("sock %pK %s", sk, hdev->name);
 
 	hci_dev_lock(hdev);
 
diff -ruw linux-4.4.115/net/bluetooth/sco.c linux-4.4.115-fbx/net/bluetooth/sco.c
--- linux-4.4.115/net/bluetooth/sco.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/bluetooth/sco.c	2019-10-29 09:26:25.777224062 +0100
@@ -76,7 +76,7 @@
 {
 	struct sock *sk = (struct sock *)arg;
 
-	BT_DBG("sock %p state %d", sk, sk->sk_state);
+	BT_DBG("sock %pK state %d", sk, sk->sk_state);
 
 	bh_lock_sock(sk);
 	sk->sk_err = ETIMEDOUT;
@@ -89,13 +89,13 @@
 
 static void sco_sock_set_timer(struct sock *sk, long timeout)
 {
-	BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout);
+	BT_DBG("sock %pK state %d timeout %ld", sk, sk->sk_state, timeout);
 	sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
 }
 
 static void sco_sock_clear_timer(struct sock *sk)
 {
-	BT_DBG("sock %p state %d", sk, sk->sk_state);
+	BT_DBG("sock %pK state %d", sk, sk->sk_state);
 	sk_stop_timer(sk, &sk->sk_timer);
 }
 
@@ -122,7 +122,7 @@
 	else
 		conn->mtu = 60;
 
-	BT_DBG("hcon %p conn %p", hcon, conn);
+	BT_DBG("hcon %pK conn %pK", hcon, conn);
 
 	return conn;
 }
@@ -135,7 +135,7 @@
 
 	conn = sco_pi(sk)->conn;
 
-	BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
+	BT_DBG("sk %pK, conn %pK, err %d", sk, conn, err);
 
 	if (conn) {
 		sco_conn_lock(conn);
@@ -162,7 +162,7 @@
 	if (!conn)
 		return;
 
-	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
+	BT_DBG("hcon %pK conn %pK, err %d", hcon, conn, err);
 
 	/* Kill socket */
 	sco_conn_lock(conn);
@@ -186,7 +186,7 @@
 static void __sco_chan_add(struct sco_conn *conn, struct sock *sk,
 			   struct sock *parent)
 {
-	BT_DBG("conn %p", conn);
+	BT_DBG("conn %pK", conn);
 
 	sco_pi(sk)->conn = conn;
 	conn->sk = sk;
@@ -281,7 +281,7 @@
 	if (len > conn->mtu)
 		return -EINVAL;
 
-	BT_DBG("sk %p len %d", sk, len);
+	BT_DBG("sk %pK len %d", sk, len);
 
 	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
 	if (!skb)
@@ -308,7 +308,7 @@
 	if (!sk)
 		goto drop;
 
-	BT_DBG("sk %p len %d", sk, skb->len);
+	BT_DBG("sk %pK len %d", sk, skb->len);
 
 	if (sk->sk_state != BT_CONNECTED)
 		goto drop;
@@ -365,7 +365,7 @@
 
 static void sco_sock_destruct(struct sock *sk)
 {
-	BT_DBG("sk %p", sk);
+	BT_DBG("sk %pK", sk);
 
 	skb_queue_purge(&sk->sk_receive_queue);
 	skb_queue_purge(&sk->sk_write_queue);
@@ -375,7 +375,7 @@
 {
 	struct sock *sk;
 
-	BT_DBG("parent %p", parent);
+	BT_DBG("parent %pK", parent);
 
 	/* Close not yet accepted channels */
 	while ((sk = bt_accept_dequeue(parent, NULL))) {
@@ -395,7 +395,7 @@
 	if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
 		return;
 
-	BT_DBG("sk %p state %d", sk, sk->sk_state);
+	BT_DBG("sk %pK state %d", sk, sk->sk_state);
 
 	/* Kill poor orphan */
 	bt_sock_unlink(&sco_sk_list, sk);
@@ -405,7 +405,7 @@
 
 static void __sco_sock_close(struct sock *sk)
 {
-	BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
+	BT_DBG("sk %pK state %d socket %pK", sk, sk->sk_state, sk->sk_socket);
 
 	switch (sk->sk_state) {
 	case BT_LISTEN:
@@ -449,7 +449,7 @@
 
 static void sco_sock_init(struct sock *sk, struct sock *parent)
 {
-	BT_DBG("sk %p", sk);
+	BT_DBG("sk %pK", sk);
 
 	if (parent) {
 		sk->sk_type = parent->sk_type;
@@ -497,7 +497,7 @@
 {
 	struct sock *sk;
 
-	BT_DBG("sock %p", sock);
+	BT_DBG("sock %pK", sock);
 
 	sock->state = SS_UNCONNECTED;
 
@@ -521,7 +521,7 @@
 	struct sock *sk = sock->sk;
 	int err = 0;
 
-	BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr);
+	BT_DBG("sk %pK %pMR", sk, &sa->sco_bdaddr);
 
 	if (!addr || addr->sa_family != AF_BLUETOOTH)
 		return -EINVAL;
@@ -556,7 +556,7 @@
 	struct sock *sk = sock->sk;
 	int err;
 
-	BT_DBG("sk %p", sk);
+	BT_DBG("sk %pK", sk);
 
 	if (alen < sizeof(struct sockaddr_sco) ||
 	    addr->sa_family != AF_BLUETOOTH)
@@ -591,7 +591,7 @@
 	bdaddr_t *src = &sco_pi(sk)->src;
 	int err = 0;
 
-	BT_DBG("sk %p backlog %d", sk, backlog);
+	BT_DBG("sk %pK backlog %d", sk, backlog);
 
 	lock_sock(sk);
 
@@ -637,7 +637,7 @@
 
 	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
 
-	BT_DBG("sk %p timeo %ld", sk, timeo);
+	BT_DBG("sk %pK timeo %ld", sk, timeo);
 
 	/* Wait for an incoming connection. (wake-one). */
 	add_wait_queue_exclusive(sk_sleep(sk), &wait);
@@ -673,7 +673,7 @@
 
 	newsock->state = SS_CONNECTED;
 
-	BT_DBG("new socket %p", ch);
+	BT_DBG("new socket %pK", ch);
 
 done:
 	release_sock(sk);
@@ -686,7 +686,7 @@
 	struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
 	struct sock *sk = sock->sk;
 
-	BT_DBG("sock %p, sk %p", sock, sk);
+	BT_DBG("sock %pK, sk %pK", sock, sk);
 
 	addr->sa_family = AF_BLUETOOTH;
 	*len = sizeof(struct sockaddr_sco);
@@ -705,7 +705,7 @@
 	struct sock *sk = sock->sk;
 	int err;
 
-	BT_DBG("sock %p, sk %p", sock, sk);
+	BT_DBG("sock %pK, sk %pK", sock, sk);
 
 	err = sock_error(sk);
 	if (err)
@@ -729,7 +729,7 @@
 {
 	struct hci_dev *hdev = conn->hdev;
 
-	BT_DBG("conn %p", conn);
+	BT_DBG("conn %pK", conn);
 
 	conn->state = BT_CONFIG;
 
@@ -799,7 +799,7 @@
 	struct bt_voice voice;
 	u32 opt;
 
-	BT_DBG("sk %p", sk);
+	BT_DBG("sk %pK", sk);
 
 	lock_sock(sk);
 
@@ -864,7 +864,7 @@
 	struct sco_conninfo cinfo;
 	int len, err = 0;
 
-	BT_DBG("sk %p", sk);
+	BT_DBG("sk %pK", sk);
 
 	if (get_user(len, optlen))
 		return -EFAULT;
@@ -924,7 +924,7 @@
 	int len, err = 0;
 	struct bt_voice voice;
 
-	BT_DBG("sk %p", sk);
+	BT_DBG("sk %pK", sk);
 
 	if (level == SOL_SCO)
 		return sco_sock_getsockopt_old(sock, optname, optval, optlen);
@@ -971,7 +971,7 @@
 	struct sock *sk = sock->sk;
 	int err = 0;
 
-	BT_DBG("sock %p, sk %p", sock, sk);
+	BT_DBG("sock %pK, sk %pK", sock, sk);
 
 	if (!sk)
 		return 0;
@@ -1001,7 +1001,7 @@
 	struct sock *sk = sock->sk;
 	int err = 0;
 
-	BT_DBG("sock %p, sk %p", sock, sk);
+	BT_DBG("sock %pK, sk %pK", sock, sk);
 
 	if (!sk)
 		return 0;
@@ -1025,7 +1025,7 @@
 	struct sock *parent;
 	struct sock *sk = conn->sk;
 
-	BT_DBG("conn %p", conn);
+	BT_DBG("conn %pK", conn);
 
 	if (sk) {
 		sco_sock_clear_timer(sk);
@@ -1112,7 +1112,7 @@
 	if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
 		return;
 
-	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
+	BT_DBG("hcon %pK bdaddr %pMR status %d", hcon, &hcon->dst, status);
 
 	if (!status) {
 		struct sco_conn *conn;
@@ -1129,7 +1129,7 @@
 	if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
 		return;
 
-	BT_DBG("hcon %p reason %d", hcon, reason);
+	BT_DBG("hcon %pK reason %d", hcon, reason);
 
 	sco_conn_del(hcon, bt_to_errno(reason));
 }
@@ -1141,7 +1141,7 @@
 	if (!conn)
 		goto drop;
 
-	BT_DBG("conn %p len %d", conn, skb->len);
+	BT_DBG("conn %pK len %d", conn, skb->len);
 
 	if (skb->len) {
 		sco_recv_frame(conn, skb);
diff -ruw linux-4.4.115/net/bluetooth/smp.c linux-4.4.115-fbx/net/bluetooth/smp.c
--- linux-4.4.115/net/bluetooth/smp.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/bluetooth/smp.c	2019-10-29 09:26:25.777224062 +0100
@@ -178,7 +178,7 @@
 		return -EFBIG;
 
 	if (!tfm) {
-		BT_ERR("tfm %p", tfm);
+		BT_ERR("tfm %pK", tfm);
 		return -EINVAL;
 	}
 
@@ -381,7 +381,7 @@
 	SMP_DBG("k %16phN r %16phN", k, r);
 
 	if (!tfm) {
-		BT_ERR("tfm %p", tfm);
+		BT_ERR("tfm %pK", tfm);
 		return -EINVAL;
 	}
 
@@ -953,7 +953,7 @@
 	struct smp_cmd_pairing_confirm cp;
 	int ret;
 
-	BT_DBG("conn %p", conn);
+	BT_DBG("conn %pK", conn);
 
 	ret = smp_c1(smp->tfm_aes, smp->tk, smp->prnd, smp->preq, smp->prsp,
 		     conn->hcon->init_addr_type, &conn->hcon->init_addr,
@@ -984,7 +984,7 @@
 	if (IS_ERR_OR_NULL(smp->tfm_aes))
 		return SMP_UNSPECIFIED;
 
-	BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
+	BT_DBG("conn %pK %s", conn, conn->hcon->out ? "master" : "slave");
 
 	ret = smp_c1(smp->tfm_aes, smp->tk, smp->rrnd, smp->preq, smp->prsp,
 		     hcon->init_addr_type, &hcon->init_addr,
@@ -1223,7 +1223,7 @@
 	struct hci_dev *hdev = hcon->hdev;
 	__u8 *keydist;
 
-	BT_DBG("conn %p", conn);
+	BT_DBG("conn %pK", conn);
 
 	rsp = (void *) &smp->prsp[1];
 
@@ -1353,7 +1353,7 @@
 					    security_timer.work);
 	struct l2cap_conn *conn = smp->conn;
 
-	BT_DBG("conn %p", conn);
+	BT_DBG("conn %pK", conn);
 
 	hci_disconnect(conn->hcon, HCI_ERROR_REMOTE_USER_TERM);
 }
@@ -1715,7 +1715,7 @@
 	u8 key_size, auth, sec_level;
 	int ret;
 
-	BT_DBG("conn %p", conn);
+	BT_DBG("conn %pK", conn);
 
 	if (skb->len < sizeof(*req))
 		return SMP_INVALID_PARAMS;
@@ -1900,7 +1900,7 @@
 	u8 key_size, auth;
 	int ret;
 
-	BT_DBG("conn %p", conn);
+	BT_DBG("conn %pK", conn);
 
 	if (skb->len < sizeof(*rsp))
 		return SMP_INVALID_PARAMS;
@@ -2052,7 +2052,7 @@
 	struct l2cap_chan *chan = conn->smp;
 	struct smp_chan *smp = chan->data;
 
-	BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
+	BT_DBG("conn %pK %s", conn, conn->hcon->out ? "master" : "slave");
 
 	if (skb->len < sizeof(smp->pcnf))
 		return SMP_INVALID_PARAMS;
@@ -2098,7 +2098,7 @@
 	u32 passkey;
 	int err;
 
-	BT_DBG("conn %p", conn);
+	BT_DBG("conn %pK", conn);
 
 	if (skb->len < sizeof(smp->rrnd))
 		return SMP_INVALID_PARAMS;
@@ -2233,7 +2233,7 @@
 	struct smp_chan *smp;
 	u8 sec_level, auth;
 
-	BT_DBG("conn %p", conn);
+	BT_DBG("conn %pK", conn);
 
 	if (skb->len < sizeof(*rp))
 		return SMP_INVALID_PARAMS;
@@ -2290,7 +2290,7 @@
 	__u8 authreq;
 	int ret;
 
-	BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level);
+	BT_DBG("conn %pK hcon %pK level 0x%2.2x", conn, hcon, sec_level);
 
 	/* This may be NULL if there's an unexpected disconnection */
 	if (!conn)
@@ -2397,7 +2397,7 @@
 	struct l2cap_chan *chan = conn->smp;
 	struct smp_chan *smp = chan->data;
 
-	BT_DBG("conn %p", conn);
+	BT_DBG("conn %pK", conn);
 
 	if (skb->len < sizeof(*rp))
 		return SMP_INVALID_PARAMS;
@@ -2421,7 +2421,7 @@
 	struct smp_ltk *ltk;
 	u8 authenticated;
 
-	BT_DBG("conn %p", conn);
+	BT_DBG("conn %pK", conn);
 
 	if (skb->len < sizeof(*rp))
 		return SMP_INVALID_PARAMS;
@@ -2530,7 +2530,7 @@
 	struct smp_chan *smp = chan->data;
 	struct smp_csrk *csrk;
 
-	BT_DBG("conn %p", conn);
+	BT_DBG("conn %pK", conn);
 
 	if (skb->len < sizeof(*rp))
 		return SMP_INVALID_PARAMS;
@@ -2609,7 +2609,7 @@
 	struct smp_cmd_pairing_confirm cfm;
 	int err;
 
-	BT_DBG("conn %p", conn);
+	BT_DBG("conn %pK", conn);
 
 	if (skb->len < sizeof(*key))
 		return SMP_INVALID_PARAMS;
@@ -2722,7 +2722,7 @@
 	u8 io_cap[3], r[16], e[16];
 	int err;
 
-	BT_DBG("conn %p", conn);
+	BT_DBG("conn %pK", conn);
 
 	if (skb->len < sizeof(*check))
 		return SMP_INVALID_PARAMS;
@@ -2904,7 +2904,7 @@
 {
 	struct l2cap_conn *conn = chan->conn;
 
-	BT_DBG("chan %p", chan);
+	BT_DBG("chan %pK", chan);
 
 	if (chan->data)
 		smp_chan_destroy(conn);
@@ -2921,7 +2921,7 @@
 	struct smp_cmd_pairing req;
 	struct smp_chan *smp;
 
-	BT_DBG("chan %p", chan);
+	BT_DBG("chan %pK", chan);
 
 	/* Only new pairings are interesting */
 	if (!test_bit(HCI_CONN_NEW_LINK_KEY, &hcon->flags))
@@ -2987,7 +2987,7 @@
 	struct l2cap_conn *conn = chan->conn;
 	struct hci_conn *hcon = conn->hcon;
 
-	BT_DBG("chan %p", chan);
+	BT_DBG("chan %pK", chan);
 
 	if (hcon->type == ACL_LINK) {
 		bredr_pairing(chan);
@@ -3010,7 +3010,7 @@
 	struct l2cap_conn *conn = chan->conn;
 	struct hci_conn *hcon = conn->hcon;
 
-	BT_DBG("chan %p", chan);
+	BT_DBG("chan %pK", chan);
 
 	/* No need to call l2cap_chan_hold() here since we already own
 	 * the reference taken in smp_new_conn_cb(). This is just the
@@ -3028,7 +3028,7 @@
 {
 	int err;
 
-	BT_DBG("chan %p", chan);
+	BT_DBG("chan %pK", chan);
 
 	err = smp_sig_channel(chan, skb);
 	if (err) {
@@ -3080,7 +3080,7 @@
 {
 	struct l2cap_chan *chan;
 
-	BT_DBG("pchan %p", pchan);
+	BT_DBG("pchan %pK", pchan);
 
 	chan = l2cap_chan_create();
 	if (!chan)
@@ -3101,7 +3101,7 @@
 	 */
 	atomic_set(&chan->nesting, L2CAP_NESTING_SMP);
 
-	BT_DBG("created chan %p", chan);
+	BT_DBG("created chan %pK", chan);
 
 	return chan;
 }
@@ -3206,7 +3206,7 @@
 {
 	struct smp_dev *smp;
 
-	BT_DBG("chan %p", chan);
+	BT_DBG("chan %pK", chan);
 
 	smp = chan->data;
 	if (smp) {
diff -ruw linux-4.4.115/net/bridge/br_device.c linux-4.4.115-fbx/net/bridge/br_device.c
--- linux-4.4.115/net/bridge/br_device.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/bridge/br_device.c	2019-01-22 16:16:28.887295162 +0100
@@ -48,16 +48,17 @@
 		return NETDEV_TX_OK;
 	}
 
-	u64_stats_update_begin(&brstats->syncp);
-	brstats->tx_packets++;
-	brstats->tx_bytes += skb->len;
-	u64_stats_update_end(&brstats->syncp);
-
 	BR_INPUT_SKB_CB(skb)->brdev = dev;
 
 	skb_reset_mac_header(skb);
 	skb_pull(skb, ETH_HLEN);
 
+	u64_stats_update_begin(&brstats->syncp);
+	brstats->tx_packets++;
+	/* Exclude ETH_HLEN from byte stats for consistency with Rx chain */
+	brstats->tx_bytes += skb->len;
+	u64_stats_update_end(&brstats->syncp);
+
 	if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid))
 		goto out;
 
diff -ruw linux-4.4.115/net/core/dev.c linux-4.4.115-fbx/net/core/dev.c
--- linux-4.4.115/net/core/dev.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/core/dev.c	2019-10-29 09:26:25.793224218 +0100
@@ -137,6 +137,8 @@
 #include <linux/errqueue.h>
 #include <linux/hrtimer.h>
 #include <linux/netfilter_ingress.h>
+#include <linux/tcp.h>
+#include <net/tcp.h>
 
 #include "net-sysfs.h"
 
@@ -1150,8 +1152,6 @@
 	BUG_ON(!dev_net(dev));
 
 	net = dev_net(dev);
-	if (dev->flags & IFF_UP)
-		return -EBUSY;
 
 	write_seqcount_begin(&devnet_rename_seq);
 
@@ -2807,6 +2807,10 @@
 	if (netif_needs_gso(skb, features)) {
 		struct sk_buff *segs;
 
+		__be16 src_port = tcp_hdr(skb)->source;
+		__be16 dest_port = tcp_hdr(skb)->dest;
+
+		trace_print_skb_gso(skb, src_port, dest_port);
 		segs = skb_gso_segment(skb, features);
 		if (IS_ERR(segs)) {
 			goto out_kfree_skb;
@@ -2846,7 +2850,7 @@
 
 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
 {
-	struct sk_buff *next, *head = NULL, *tail;
+	struct sk_buff *next, *head = NULL, *tail = NULL;
 
 	for (; skb != NULL; skb = next) {
 		next = skb->next;
@@ -4159,6 +4163,7 @@
 	}
 
 out:
+	__this_cpu_add(softnet_data.gro_coalesced, NAPI_GRO_CB(skb)->count > 1);
 	return netif_receive_skb_internal(skb);
 }
 
@@ -4201,6 +4206,7 @@
 		unsigned long diffs;
 
 		NAPI_GRO_CB(p)->flush = 0;
+		NAPI_GRO_CB(p)->flush_id = 0;
 
 		if (hash != skb_get_hash_raw(p)) {
 			NAPI_GRO_CB(p)->same_flow = 0;
@@ -4579,6 +4585,24 @@
 }
 EXPORT_SYMBOL(__skb_gro_checksum_complete);
 
+static void net_rps_send_ipi(struct softnet_data *remsd)
+{
+#ifdef CONFIG_RPS
+	while (remsd) {
+		struct softnet_data *next = remsd->rps_ipi_next;
+
+		if (cpu_online(remsd->cpu)) {
+			smp_call_function_single_async(remsd->cpu, &remsd->csd);
+		} else {
+			rps_lock(remsd);
+			remsd->backlog.state = 0;
+			rps_unlock(remsd);
+		}
+		remsd = next;
+	}
+#endif
+}
+
 /*
  * net_rps_action_and_irq_enable sends any pending IPI's for rps.
  * Note: called with local irq disabled, but exits with local irq enabled.
@@ -4594,14 +4618,7 @@
 		local_irq_enable();
 
 		/* Send pending IPI's to kick RPS processing on remote cpus. */
-		while (remsd) {
-			struct softnet_data *next = remsd->rps_ipi_next;
-
-			if (cpu_online(remsd->cpu))
-				smp_call_function_single_async(remsd->cpu,
-							   &remsd->csd);
-			remsd = next;
-		}
+		net_rps_send_ipi(remsd);
 	} else
 #endif
 		local_irq_enable();
@@ -4642,8 +4659,7 @@
 			local_irq_disable();
 			input_queue_head_incr(sd);
 			if (++work >= quota) {
-				local_irq_enable();
-				return work;
+				goto state_changed;
 			}
 		}
 
@@ -4660,14 +4676,17 @@
 			napi->state = 0;
 			rps_unlock(sd);
 
-			break;
+			goto state_changed;
 		}
 
 		skb_queue_splice_tail_init(&sd->input_pkt_queue,
 					   &sd->process_queue);
 		rps_unlock(sd);
 	}
+state_changed:
 	local_irq_enable();
+	napi_gro_flush(napi, false);
+	sd->current_napi = NULL;
 
 	return work;
 }
@@ -4703,10 +4722,13 @@
 
 void __napi_complete(struct napi_struct *n)
 {
+	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
+
 	BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
 
 	list_del_init(&n->poll_list);
 	smp_mb__before_atomic();
+	sd->current_napi = NULL;
 	clear_bit(NAPI_STATE_SCHED, &n->state);
 }
 EXPORT_SYMBOL(__napi_complete);
@@ -4856,6 +4878,15 @@
 }
 EXPORT_SYMBOL(netif_napi_del);
 
+
+struct napi_struct *get_current_napi_context(void)
+{
+	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
+
+	return sd->current_napi;
+}
+EXPORT_SYMBOL(get_current_napi_context);
+
 static int napi_poll(struct napi_struct *n, struct list_head *repoll)
 {
 	void *have;
@@ -4875,6 +4906,9 @@
 	 */
 	work = 0;
 	if (test_bit(NAPI_STATE_SCHED, &n->state)) {
+		struct softnet_data *sd = this_cpu_ptr(&softnet_data);
+
+		sd->current_napi = n;
 		work = n->poll(n, weight);
 		trace_napi_poll(n);
 	}
@@ -6947,7 +6981,7 @@
 			rebroadcast_time = jiffies;
 		}
 
-		msleep(250);
+		msleep(1);
 
 		refcnt = netdev_refcnt_read(dev);
 
@@ -7496,7 +7530,7 @@
 	struct sk_buff **list_skb;
 	struct sk_buff *skb;
 	unsigned int cpu, oldcpu = (unsigned long)ocpu;
-	struct softnet_data *sd, *oldsd;
+	struct softnet_data *sd, *oldsd, *remsd;
 
 	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
 		return NOTIFY_OK;
@@ -7540,6 +7574,13 @@
 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
 	local_irq_enable();
 
+#ifdef CONFIG_RPS
+	remsd = oldsd->rps_ipi_list;
+	oldsd->rps_ipi_list = NULL;
+#endif
+	/* send out pending IPI's on offline CPU */
+	net_rps_send_ipi(remsd);
+
 	/* Process offline CPU's input_pkt_queue */
 	while ((skb = __skb_dequeue(&oldsd->process_queue))) {
 		netif_rx_ni(skb);
diff -ruw linux-4.4.115/net/core/flow_dissector.c linux-4.4.115-fbx/net/core/flow_dissector.c
--- linux-4.4.115/net/core/flow_dissector.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/core/flow_dissector.c	2019-10-29 09:26:25.797224257 +0100
@@ -18,6 +18,7 @@
 #include <linux/mpls.h>
 #include <net/flow_dissector.h>
 #include <scsi/fc/fc_fcoe.h>
+#include <linux/net_map.h>
 
 static bool dissector_uses_key(const struct flow_dissector *flow_dissector,
 			       enum flow_dissector_key_id key_id)
@@ -338,6 +339,40 @@
 		goto out_good;
 	}
 
+	case __constant_htons(ETH_P_MAP): {
+		struct {
+			struct rmnet_map_header_s map;
+			uint8_t proto;
+		} *map, _map;
+		unsigned int maplen;
+
+		map = skb_header_pointer(skb, nhoff, sizeof(_map), &_map);
+		if (!map)
+			return false;
+
+		/* Is MAP command? */
+		if (map->map.cd_bit)
+			return false;
+
+		/* Is aggregated frame? */
+		maplen = ntohs(map->map.pkt_len);
+		maplen += map->map.pad_len;
+		maplen += sizeof(struct rmnet_map_header_s);
+		if (maplen < skb->len)
+			return false;
+
+		nhoff += sizeof(struct rmnet_map_header_s);
+		switch (map->proto & RMNET_IP_VER_MASK) {
+		case RMNET_IPV4:
+			proto = htons(ETH_P_IP);
+			goto ip;
+		case RMNET_IPV6:
+			proto = htons(ETH_P_IPV6);
+			goto ipv6;
+		default:
+			return false;
+		}
+	}
 	case htons(ETH_P_FCOE):
 		key_control->thoff = (u16)(nhoff + FCOE_HEADER_LEN);
 		/* fall through */
diff -ruw linux-4.4.115/net/core/Makefile linux-4.4.115-fbx/net/core/Makefile
--- linux-4.4.115/net/core/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/core/Makefile	2019-10-29 09:26:25.789224179 +0100
@@ -24,3 +24,4 @@
 obj-$(CONFIG_CGROUP_NET_PRIO) += netprio_cgroup.o
 obj-$(CONFIG_CGROUP_NET_CLASSID) += netclassid_cgroup.o
 obj-$(CONFIG_LWTUNNEL) += lwtunnel.o
+obj-$(CONFIG_SOCKEV_NLMCAST) += sockev_nlmcast.o
diff -ruw linux-4.4.115/net/core/neighbour.c linux-4.4.115-fbx/net/core/neighbour.c
--- linux-4.4.115/net/core/neighbour.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/core/neighbour.c	2019-10-29 09:26:25.801224296 +0100
@@ -687,7 +687,7 @@
 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
 
 	if (!neigh->dead) {
-		pr_warn("Destroying alive neighbour %p\n", neigh);
+		pr_warn("Destroying alive neighbour %pK\n", neigh);
 		dump_stack();
 		return;
 	}
diff -ruw linux-4.4.115/net/core/net-procfs.c linux-4.4.115-fbx/net/core/net-procfs.c
--- linux-4.4.115/net/core/net-procfs.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/core/net-procfs.c	2019-01-22 16:16:28.915295415 +0100
@@ -159,10 +159,11 @@
 #endif
 
 	seq_printf(seq,
-		   "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+		   "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
 		   sd->processed, sd->dropped, sd->time_squeeze, 0,
 		   0, 0, 0, 0, /* was fastroute */
-		   sd->cpu_collision, sd->received_rps, flow_limit_count);
+		   sd->cpu_collision, sd->received_rps, flow_limit_count,
+		   sd->gro_coalesced);
 	return 0;
 }
 
diff -ruw linux-4.4.115/net/core/net-sysfs.c linux-4.4.115-fbx/net/core/net-sysfs.c
--- linux-4.4.115/net/core/net-sysfs.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/core/net-sysfs.c	2019-10-29 09:26:25.801224296 +0100
@@ -18,6 +18,7 @@
 #include <linux/nsproxy.h>
 #include <net/sock.h>
 #include <net/net_namespace.h>
+#include <net/cfg80211.h>
 #include <linux/rtnetlink.h>
 #include <linux/vmalloc.h>
 #include <linux/export.h>
@@ -608,7 +609,24 @@
 };
 
 #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
+static ssize_t show_nl80211_iftype(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	const struct net_device *netdev = to_net_dev(dev);
+	ssize_t ret = 0;
+
+	if (!rtnl_trylock())
+		return restart_syscall();
+	if (netdev->ieee80211_ptr)
+		ret = sprintf(buf, "%d\n", netdev->ieee80211_ptr->iftype);
+	rtnl_unlock();
+
+	return ret;
+}
+static DEVICE_ATTR(nl80211_iftype, S_IRUGO, show_nl80211_iftype, NULL);
+
 static struct attribute *wireless_attrs[] = {
+	&dev_attr_nl80211_iftype.attr,
 	NULL
 };
 
diff -ruw linux-4.4.115/net/core/skbuff.c linux-4.4.115-fbx/net/core/skbuff.c
--- linux-4.4.115/net/core/skbuff.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/core/skbuff.c	2019-10-29 09:26:25.809224375 +0100
@@ -208,6 +208,9 @@
 	u8 *data;
 	bool pfmemalloc;
 
+	if (IS_ENABLED(CONFIG_FORCE_ALLOC_FROM_DMA_ZONE))
+		gfp_mask |= GFP_DMA;
+
 	cache = (flags & SKB_ALLOC_FCLONE)
 		? skbuff_fclone_cache : skbuff_head_cache;
 
@@ -358,6 +361,9 @@
 	unsigned long flags;
 	void *data;
 
+	if (IS_ENABLED(CONFIG_FORCE_ALLOC_FROM_DMA_ZONE))
+		gfp_mask |= GFP_DMA;
+
 	local_irq_save(flags);
 	nc = this_cpu_ptr(&netdev_alloc_cache);
 	data = __alloc_page_frag(nc, fragsz, gfp_mask);
@@ -404,6 +410,7 @@
  *
  *	%NULL is returned if there is no free memory.
  */
+#ifndef CONFIG_DISABLE_NET_SKB_FRAG_CACHE
 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
 				   gfp_t gfp_mask)
 {
@@ -415,6 +422,9 @@
 
 	len += NET_SKB_PAD;
 
+	if (IS_ENABLED(CONFIG_FORCE_ALLOC_FROM_DMA_ZONE))
+		gfp_mask |= GFP_DMA;
+
 	if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
 	    (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
 		skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
@@ -458,6 +468,22 @@
 skb_fail:
 	return skb;
 }
+#else
+struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
+				   unsigned int length, gfp_t gfp_mask)
+{
+	struct sk_buff *skb = NULL;
+
+	skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask,
+			  SKB_ALLOC_RX, NUMA_NO_NODE);
+	if (likely(skb)) {
+		skb_reserve(skb, NET_SKB_PAD);
+		skb->dev = dev;
+	}
+	return skb;
+}
+#endif
+
 EXPORT_SYMBOL(__netdev_alloc_skb);
 
 /**
diff -ruw linux-4.4.115/net/core/sock.c linux-4.4.115-fbx/net/core/sock.c
--- linux-4.4.115/net/core/sock.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/core/sock.c	2019-10-29 09:26:25.809224375 +0100
@@ -1439,8 +1439,12 @@
 }
 EXPORT_SYMBOL(sk_alloc);
 
-void sk_destruct(struct sock *sk)
+/* Sockets having SOCK_RCU_FREE will call this function after one RCU
+ * grace period. This is the case for UDP sockets and TCP listeners.
+ */
+static void __sk_destruct(struct rcu_head *head)
 {
+	struct sock *sk = container_of(head, struct sock, sk_rcu);
 	struct sk_filter *filter;
 
 	if (sk->sk_destruct)
@@ -1472,6 +1476,14 @@
 	sk_prot_free(sk->sk_prot_creator, sk);
 }
 
+void sk_destruct(struct sock *sk)
+{
+	if (sock_flag(sk, SOCK_RCU_FREE))
+		call_rcu(&sk->sk_rcu, __sk_destruct);
+	else
+		__sk_destruct(&sk->sk_rcu);
+}
+
 static void __sk_free(struct sock *sk)
 {
 	if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt))
@@ -2397,8 +2409,11 @@
 		sk->sk_type	=	sock->type;
 		sk->sk_wq	=	sock->wq;
 		sock->sk	=	sk;
-	} else
+		sk->sk_uid	=	SOCK_INODE(sock)->i_uid;
+	} else {
 		sk->sk_wq	=	NULL;
+		sk->sk_uid	=	make_kuid(sock_net(sk)->user_ns, 0);
+	}
 
 	rwlock_init(&sk->sk_callback_lock);
 	lockdep_set_class_and_name(&sk->sk_callback_lock,
diff -ruw linux-4.4.115/net/core/sock_diag.c linux-4.4.115-fbx/net/core/sock_diag.c
--- linux-4.4.115/net/core/sock_diag.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/core/sock_diag.c	2019-01-22 16:16:28.927295524 +0100
@@ -214,7 +214,7 @@
 }
 EXPORT_SYMBOL_GPL(sock_diag_unregister);
 
-static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
 	int err;
 	struct sock_diag_req *req = nlmsg_data(nlh);
@@ -234,8 +234,12 @@
 	hndl = sock_diag_handlers[req->sdiag_family];
 	if (hndl == NULL)
 		err = -ENOENT;
-	else
+	else if (nlh->nlmsg_type == SOCK_DIAG_BY_FAMILY)
 		err = hndl->dump(skb, nlh);
+	else if (nlh->nlmsg_type == SOCK_DESTROY_BACKPORT && hndl->destroy)
+		err = hndl->destroy(skb, nlh);
+	else
+		err = -EOPNOTSUPP;
 	mutex_unlock(&sock_diag_table_mutex);
 
 	return err;
@@ -261,7 +265,8 @@
 
 		return ret;
 	case SOCK_DIAG_BY_FAMILY:
-		return __sock_diag_rcv_msg(skb, nlh);
+	case SOCK_DESTROY_BACKPORT:
+		return __sock_diag_cmd(skb, nlh);
 	default:
 		return -EINVAL;
 	}
@@ -295,6 +300,18 @@
 	return 0;
 }
 
+int sock_diag_destroy(struct sock *sk, int err)
+{
+	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!sk->sk_prot->diag_destroy)
+		return -EOPNOTSUPP;
+
+	return sk->sk_prot->diag_destroy(sk, err);
+}
+EXPORT_SYMBOL_GPL(sock_diag_destroy);
+
 static int __net_init diag_net_init(struct net *net)
 {
 	struct netlink_kernel_cfg cfg = {
diff -ruw linux-4.4.115/net/ipv4/af_inet.c linux-4.4.115-fbx/net/ipv4/af_inet.c
--- linux-4.4.115/net/ipv4/af_inet.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv4/af_inet.c	2019-10-29 09:26:25.821224492 +0100
@@ -89,6 +89,7 @@
 #include <linux/netfilter_ipv4.h>
 #include <linux/random.h>
 #include <linux/slab.h>
+#include <linux/netfilter/xt_qtaguid.h>
 
 #include <asm/uaccess.h>
 
@@ -121,6 +122,21 @@
 #endif
 #include <net/l3mdev.h>
 
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+
+static inline int current_has_network(void)
+{
+	return in_egroup_p(AID_INET) || capable(CAP_NET_RAW);
+}
+#else
+static inline int current_has_network(void)
+{
+	return 1;
+}
+#endif
+
+int sysctl_reserved_port_bind __read_mostly = 1;
 
 /* The inetsw table contains everything that inet_create needs to
  * build a new socket.
@@ -260,6 +276,9 @@
 	if (protocol < 0 || protocol >= IPPROTO_MAX)
 		return -EINVAL;
 
+	if (!current_has_network())
+		return -EACCES;
+
 	sock->state = SS_UNCONNECTED;
 
 	/* Look for the requested type/protocol pair. */
@@ -308,8 +327,7 @@
 	}
 
 	err = -EPERM;
-	if (sock->type == SOCK_RAW && !kern &&
-	    !ns_capable(net->user_ns, CAP_NET_RAW))
+	if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
 		goto out_rcu_unlock;
 
 	sock->ops = answer->ops;
@@ -398,6 +416,9 @@
 	if (sk) {
 		long timeout;
 
+#ifdef CONFIG_NETFILTER_XT_MATCH_QTAGUID
+		qtaguid_untag(sock, true);
+#endif
 		/* Applications forget to leave groups before exiting */
 		ip_mc_drop_socket(sk);
 
@@ -1327,6 +1348,7 @@
 
 	for (p = *head; p; p = p->next) {
 		struct iphdr *iph2;
+		u16 flush_id;
 
 		if (!NAPI_GRO_CB(p)->same_flow)
 			continue;
@@ -1350,14 +1372,24 @@
 			(iph->tos ^ iph2->tos) |
 			((iph->frag_off ^ iph2->frag_off) & htons(IP_DF));
 
-		/* Save the IP ID check to be included later when we get to
-		 * the transport layer so only the inner most IP ID is checked.
-		 * This is because some GSO/TSO implementations do not
-		 * correctly increment the IP ID for the outer hdrs.
-		 */
-		NAPI_GRO_CB(p)->flush_id =
-			    ((u16)(ntohs(iph2->id) + NAPI_GRO_CB(p)->count) ^ id);
 		NAPI_GRO_CB(p)->flush |= flush;
+
+		/* We must save the offset as it is possible to have multiple
+		 * flows using the same protocol and address pairs so we
+		 * need to wait until we can validate this is part of the
+		 * same flow with a 5-tuple or better to avoid unnecessary
+		 * collisions between flows.  We can support one of two
+		 * possible scenarios, either a fixed value with DF bit set
+		 * or an incrementing value with DF either set or unset.
+		 * In the case of a fixed value we will end up losing the
+		 * data that the IP ID was a fixed value, however per RFC
+		 * 6864 in such a case the actual value of the IP ID is
+		 * meant to be ignored anyway.
+		 */
+		flush_id = (u16)(id - ntohs(iph2->id));
+		if (flush_id || !(iph2->frag_off & htons(IP_DF)))
+			NAPI_GRO_CB(p)->flush_id |= flush_id ^
+						    NAPI_GRO_CB(p)->count;
 	}
 
 	NAPI_GRO_CB(skb)->flush |= flush;
diff -ruw linux-4.4.115/net/ipv4/devinet.c linux-4.4.115-fbx/net/ipv4/devinet.c
--- linux-4.4.115/net/ipv4/devinet.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv4/devinet.c	2019-10-29 09:26:25.825224531 +0100
@@ -2196,6 +2196,8 @@
 					      "promote_secondaries"),
 		DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET,
 					      "route_localnet"),
+		DEVINET_SYSCTL_RW_ENTRY(NF_IPV4_DEFRAG_SKIP,
+					"nf_ipv4_defrag_skip"),
 	},
 };
 
diff -ruw linux-4.4.115/net/ipv4/fib_frontend.c linux-4.4.115-fbx/net/ipv4/fib_frontend.c
--- linux-4.4.115/net/ipv4/fib_frontend.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv4/fib_frontend.c	2019-10-29 09:26:25.825224531 +0100
@@ -627,6 +627,7 @@
 	[RTA_FLOW]		= { .type = NLA_U32 },
 	[RTA_ENCAP_TYPE]	= { .type = NLA_U16 },
 	[RTA_ENCAP]		= { .type = NLA_NESTED },
+	[RTA_UID]		= { .type = NLA_U32 },
 };
 
 static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
diff -ruw linux-4.4.115/net/ipv4/fib_trie.c linux-4.4.115-fbx/net/ipv4/fib_trie.c
--- linux-4.4.115/net/ipv4/fib_trie.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv4/fib_trie.c	2019-10-29 09:26:25.829224570 +0100
@@ -1694,7 +1694,7 @@
 	lt = (struct trie *)local_tb->tb_data;
 
 	while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
-		struct key_vector *local_l = NULL, *local_tp;
+		struct key_vector *local_l = NULL, *local_tp = NULL;
 
 		hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
 			struct fib_alias *new_fa;
diff -ruw linux-4.4.115/net/ipv4/icmp.c linux-4.4.115-fbx/net/ipv4/icmp.c
--- linux-4.4.115/net/ipv4/icmp.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv4/icmp.c	2019-10-29 09:26:25.829224570 +0100
@@ -425,6 +425,7 @@
 	fl4.daddr = daddr;
 	fl4.saddr = saddr;
 	fl4.flowi4_mark = mark;
+	fl4.flowi4_uid = sock_net_uid(net, NULL);
 	fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
 	fl4.flowi4_proto = IPPROTO_ICMP;
 	fl4.flowi4_oif = l3mdev_master_ifindex(skb->dev);
@@ -473,6 +474,7 @@
 		      param->replyopts.opt.opt.faddr : iph->saddr);
 	fl4->saddr = saddr;
 	fl4->flowi4_mark = mark;
+	fl4->flowi4_uid = sock_net_uid(net, NULL);
 	fl4->flowi4_tos = RT_TOS(tos);
 	fl4->flowi4_proto = IPPROTO_ICMP;
 	fl4->fl4_icmp_type = type;
diff -ruw linux-4.4.115/net/ipv4/inet_connection_sock.c linux-4.4.115-fbx/net/ipv4/inet_connection_sock.c
--- linux-4.4.115/net/ipv4/inet_connection_sock.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv4/inet_connection_sock.c	2019-10-29 09:26:25.829224570 +0100
@@ -179,6 +179,13 @@
 		head = &hashinfo->bhash[inet_bhashfn(net, snum,
 				hashinfo->bhash_size)];
 		spin_lock(&head->lock);
+
+		if (inet_is_local_reserved_port(net, snum) &&
+		    !sysctl_reserved_port_bind) {
+			ret = 1;
+			goto fail_unlock;
+		}
+
 		inet_bind_bucket_for_each(tb, &head->chain)
 			if (net_eq(ib_net(tb), net) && tb->port == snum)
 				goto tb_found;
@@ -422,7 +429,7 @@
 			   sk->sk_protocol, inet_sk_flowi_flags(sk),
 			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
 			   ireq->ir_loc_addr, ireq->ir_rmt_port,
-			   htons(ireq->ir_num));
+			   htons(ireq->ir_num), sk->sk_uid);
 	security_req_classify_flow(req, flowi4_to_flowi(fl4));
 	rt = ip_route_output_flow(net, fl4, sk);
 	if (IS_ERR(rt))
@@ -458,7 +465,7 @@
 			   sk->sk_protocol, inet_sk_flowi_flags(sk),
 			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
 			   ireq->ir_loc_addr, ireq->ir_rmt_port,
-			   htons(ireq->ir_num));
+			   htons(ireq->ir_num), sk->sk_uid);
 	security_req_classify_flow(req, flowi4_to_flowi(fl4));
 	rt = ip_route_output_flow(net, fl4, sk);
 	if (IS_ERR(rt))
diff -ruw linux-4.4.115/net/ipv4/inet_lro.c linux-4.4.115-fbx/net/ipv4/inet_lro.c
--- linux-4.4.115/net/ipv4/inet_lro.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv4/inet_lro.c	2019-01-22 16:16:28.955295778 +0100
@@ -145,19 +145,36 @@
 }
 
 static void lro_init_desc(struct net_lro_desc *lro_desc, struct sk_buff *skb,
-			  struct iphdr *iph, struct tcphdr *tcph)
+			  struct iphdr *iph, struct tcphdr *tcph,
+			  struct net_lro_info *lro_info)
 {
 	int nr_frags;
 	__be32 *ptr;
 	u32 tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph);
+	u64 hw_marked = 0;
+
+	if (lro_info)
+		hw_marked = lro_info->valid_fields;
 
 	nr_frags = skb_shinfo(skb)->nr_frags;
 	lro_desc->parent = skb;
 	lro_desc->next_frag = &(skb_shinfo(skb)->frags[nr_frags]);
 	lro_desc->iph = iph;
 	lro_desc->tcph = tcph;
+
+	if (hw_marked & LRO_TCP_SEQ_NUM)
+		lro_desc->tcp_next_seq = lro_info->tcp_seq_num + tcp_data_len;
+	else
 	lro_desc->tcp_next_seq = ntohl(tcph->seq) + tcp_data_len;
+
+	if (hw_marked & LRO_TCP_ACK_NUM)
+		lro_desc->tcp_ack = htonl(lro_info->tcp_ack_num);
+	else
 	lro_desc->tcp_ack = tcph->ack_seq;
+
+	if (hw_marked & LRO_TCP_WIN)
+		lro_desc->tcp_window = htons(lro_info->tcp_win);
+	else
 	lro_desc->tcp_window = tcph->window;
 
 	lro_desc->pkt_aggr_cnt = 1;
@@ -173,6 +190,9 @@
 	lro_desc->mss = tcp_data_len;
 	lro_desc->active = 1;
 
+	if (hw_marked & LRO_TCP_DATA_CSUM)
+		lro_desc->data_csum = lro_info->tcp_data_csum;
+	else
 	lro_desc->data_csum = lro_tcp_data_csum(iph, tcph,
 						tcp_data_len);
 }
@@ -183,15 +203,28 @@
 }
 
 static void lro_add_common(struct net_lro_desc *lro_desc, struct iphdr *iph,
-			   struct tcphdr *tcph, int tcp_data_len)
+			   struct tcphdr *tcph, int tcp_data_len,
+			   struct net_lro_info *lro_info)
 {
 	struct sk_buff *parent = lro_desc->parent;
 	__be32 *topt;
+	u64 hw_marked = 0;
+
+	if (lro_info)
+		hw_marked = lro_info->valid_fields;
 
 	lro_desc->pkt_aggr_cnt++;
 	lro_desc->ip_tot_len += tcp_data_len;
 	lro_desc->tcp_next_seq += tcp_data_len;
+
+	if (hw_marked & LRO_TCP_WIN)
+		lro_desc->tcp_window = htons(lro_info->tcp_win);
+	else
 	lro_desc->tcp_window = tcph->window;
+
+	if (hw_marked & LRO_TCP_ACK_NUM)
+		lro_desc->tcp_ack = htonl(lro_info->tcp_ack_num);
+	else
 	lro_desc->tcp_ack = tcph->ack_seq;
 
 	/* don't update tcp_rcv_tsval, would not work with PAWS */
@@ -200,8 +233,15 @@
 		lro_desc->tcp_rcv_tsecr = *(topt + 2);
 	}
 
+	if (hw_marked & LRO_TCP_DATA_CSUM)
 	lro_desc->data_csum = csum_block_add(lro_desc->data_csum,
-					     lro_tcp_data_csum(iph, tcph,
+						     lro_info->tcp_data_csum,
+						     parent->len);
+	else
+		lro_desc->data_csum =
+			csum_block_add(lro_desc->data_csum,
+				       lro_tcp_data_csum(iph,
+							 tcph,
 							       tcp_data_len),
 					     parent->len);
 
@@ -212,12 +252,13 @@
 }
 
 static void lro_add_packet(struct net_lro_desc *lro_desc, struct sk_buff *skb,
-			   struct iphdr *iph, struct tcphdr *tcph)
+			   struct iphdr *iph, struct tcphdr *tcph,
+			   struct net_lro_info *lro_info)
 {
 	struct sk_buff *parent = lro_desc->parent;
 	int tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph);
 
-	lro_add_common(lro_desc, iph, tcph, tcp_data_len);
+	lro_add_common(lro_desc, iph, tcph, tcp_data_len, lro_info);
 
 	skb_pull(skb, (skb->len - tcp_data_len));
 	parent->truesize += skb->truesize;
@@ -230,6 +271,29 @@
 	lro_desc->last_skb = skb;
 }
 
+static void lro_add_frags(struct net_lro_desc *lro_desc,
+			  int len, int hlen, int truesize,
+			  struct skb_frag_struct *skb_frags,
+			  struct iphdr *iph, struct tcphdr *tcph)
+{
+	struct sk_buff *skb = lro_desc->parent;
+	int tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph);
+
+	lro_add_common(lro_desc, iph, tcph, tcp_data_len, NULL);
+
+	skb->truesize += truesize;
+
+	skb_frags[0].page_offset += hlen;
+	skb_frag_size_sub(&skb_frags[0], hlen);
+
+	while (tcp_data_len > 0) {
+		*lro_desc->next_frag = *skb_frags;
+		tcp_data_len -= skb_frag_size(skb_frags);
+		lro_desc->next_frag++;
+		skb_frags++;
+		skb_shinfo(skb)->nr_frags++;
+	}
+}
 
 static int lro_check_tcp_conn(struct net_lro_desc *lro_desc,
 			      struct iphdr *iph,
@@ -284,6 +348,8 @@
 
 	if (lro_mgr->features & LRO_F_NAPI)
 		netif_receive_skb(lro_desc->parent);
+	else if (lro_mgr->features & LRO_F_NI)
+		netif_rx_ni(lro_desc->parent);
 	else
 		netif_rx(lro_desc->parent);
 
@@ -292,12 +358,13 @@
 }
 
 static int __lro_proc_skb(struct net_lro_mgr *lro_mgr, struct sk_buff *skb,
-			  void *priv)
+			  void *priv, struct net_lro_info *lro_info)
 {
 	struct net_lro_desc *lro_desc;
 	struct iphdr *iph;
 	struct tcphdr *tcph;
 	u64 flags;
+	u64 hw_marked = 0;
 	int vlan_hdr_len = 0;
 
 	if (!lro_mgr->get_skb_header ||
@@ -308,7 +375,14 @@
 	if (!(flags & LRO_IPV4) || !(flags & LRO_TCP))
 		goto out;
 
+	if (lro_info)
+		hw_marked = lro_info->valid_fields;
+
+	if (hw_marked & LRO_DESC)
+		lro_desc = lro_info->lro_desc;
+	else
 	lro_desc = lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph);
+
 	if (!lro_desc)
 		goto out;
 
@@ -317,22 +391,38 @@
 		vlan_hdr_len = VLAN_HLEN;
 
 	if (!lro_desc->active) { /* start new lro session */
-		if (lro_tcp_ip_check(iph, tcph, skb->len - vlan_hdr_len, NULL))
+		if (hw_marked & LRO_ELIGIBILITY_CHECKED) {
+			if (!lro_info->lro_eligible)
 			goto out;
+		} else {
+			if (lro_tcp_ip_check(iph, tcph,
+					     skb->len - vlan_hdr_len, NULL))
+				goto out;
+		}
 
 		skb->ip_summed = lro_mgr->ip_summed_aggr;
-		lro_init_desc(lro_desc, skb, iph, tcph);
+		lro_init_desc(lro_desc, skb, iph, tcph, lro_info);
 		LRO_INC_STATS(lro_mgr, aggregated);
 		return 0;
 	}
 
+	if (hw_marked & LRO_TCP_SEQ_NUM) {
+		if (lro_desc->tcp_next_seq != lro_info->tcp_seq_num)
+			goto out2;
+	} else {
 	if (lro_desc->tcp_next_seq != ntohl(tcph->seq))
 		goto out2;
+	}
 
+	if (hw_marked & LRO_ELIGIBILITY_CHECKED) {
+		if (!lro_info->lro_eligible)
+			goto out2;
+	} else {
 	if (lro_tcp_ip_check(iph, tcph, skb->len, lro_desc))
 		goto out2;
+	}
 
-	lro_add_packet(lro_desc, skb, iph, tcph);
+	lro_add_packet(lro_desc, skb, iph, tcph, lro_info);
 	LRO_INC_STATS(lro_mgr, aggregated);
 
 	if ((lro_desc->pkt_aggr_cnt >= lro_mgr->max_aggr) ||
@@ -348,19 +438,161 @@
 	return 1;
 }
 
+static struct sk_buff *lro_gen_skb(struct net_lro_mgr *lro_mgr,
+				   struct skb_frag_struct *frags,
+				   int len, int true_size,
+				   void *mac_hdr,
+				   int hlen, __wsum sum,
+				   u32 ip_summed)
+{
+	struct sk_buff *skb;
+	struct skb_frag_struct *skb_frags;
+	int data_len = len;
+	int hdr_len = min(len, hlen);
+
+	skb = netdev_alloc_skb(lro_mgr->dev, hlen + lro_mgr->frag_align_pad);
+	if (!skb)
+		return NULL;
+
+	skb_reserve(skb, lro_mgr->frag_align_pad);
+	skb->len = len;
+	skb->data_len = len - hdr_len;
+	skb->truesize += true_size;
+	skb->tail += hdr_len;
+
+	memcpy(skb->data, mac_hdr, hdr_len);
+
+	skb_frags = skb_shinfo(skb)->frags;
+	while (data_len > 0) {
+		*skb_frags = *frags;
+		data_len -= skb_frag_size(frags);
+		skb_frags++;
+		frags++;
+		skb_shinfo(skb)->nr_frags++;
+	}
+
+	skb_shinfo(skb)->frags[0].page_offset += hdr_len;
+	skb_frag_size_sub(&skb_shinfo(skb)->frags[0], hdr_len);
+
+	skb->ip_summed = ip_summed;
+	skb->csum = sum;
+	skb->protocol = eth_type_trans(skb, lro_mgr->dev);
+	return skb;
+}
+
+static struct sk_buff *__lro_proc_segment(struct net_lro_mgr *lro_mgr,
+					  struct skb_frag_struct *frags,
+					  int len, int true_size,
+					  void *priv, __wsum sum)
+{
+	struct net_lro_desc *lro_desc;
+	struct iphdr *iph;
+	struct tcphdr *tcph;
+	struct sk_buff *skb;
+	u64 flags;
+	void *mac_hdr;
+	int mac_hdr_len;
+	int hdr_len = LRO_MAX_PG_HLEN;
+	int vlan_hdr_len = 0;
+
+	if (!lro_mgr->get_frag_header ||
+	    lro_mgr->get_frag_header(frags, (void *)&mac_hdr, (void *)&iph,
+				     (void *)&tcph, &flags, priv)) {
+		mac_hdr = skb_frag_address(frags);
+		goto out1;
+	}
+
+	if (!(flags & LRO_IPV4) || !(flags & LRO_TCP))
+		goto out1;
+
+	hdr_len = (int)((void *)(tcph) + TCP_HDR_LEN(tcph) - mac_hdr);
+	mac_hdr_len = (int)((void *)(iph) - mac_hdr);
+
+	lro_desc = lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph);
+	if (!lro_desc)
+		goto out1;
+
+	if (!lro_desc->active) { /* start new lro session */
+		if (lro_tcp_ip_check(iph, tcph, len - mac_hdr_len, NULL))
+			goto out1;
+
+		skb = lro_gen_skb(lro_mgr, frags, len, true_size, mac_hdr,
+				  hdr_len, 0, lro_mgr->ip_summed_aggr);
+		if (!skb)
+			goto out;
+
+		if ((skb->protocol == htons(ETH_P_8021Q)) &&
+		    !(lro_mgr->features & LRO_F_EXTRACT_VLAN_ID))
+			vlan_hdr_len = VLAN_HLEN;
+
+		iph = (void *)(skb->data + vlan_hdr_len);
+		tcph = (void *)((u8 *)skb->data + vlan_hdr_len
+				+ IP_HDR_LEN(iph));
+
+		lro_init_desc(lro_desc, skb, iph, tcph, NULL);
+		LRO_INC_STATS(lro_mgr, aggregated);
+		return NULL;
+	}
+
+	if (lro_desc->tcp_next_seq != ntohl(tcph->seq))
+		goto out2;
+
+	if (lro_tcp_ip_check(iph, tcph, len - mac_hdr_len, lro_desc))
+		goto out2;
+
+	lro_add_frags(lro_desc, len, hdr_len, true_size, frags, iph, tcph);
+	LRO_INC_STATS(lro_mgr, aggregated);
+
+	if ((skb_shinfo(lro_desc->parent)->nr_frags >= lro_mgr->max_aggr) ||
+	    lro_desc->parent->len > (0xFFFF - lro_mgr->dev->mtu))
+		lro_flush(lro_mgr, lro_desc);
+
+	return NULL;
+
+out2: /* send aggregated packets to the stack */
+	lro_flush(lro_mgr, lro_desc);
+
+out1:  /* Original packet has to be posted to the stack */
+	skb = lro_gen_skb(lro_mgr, frags, len, true_size, mac_hdr,
+			  hdr_len, sum, lro_mgr->ip_summed);
+out:
+	return skb;
+}
+
 void lro_receive_skb(struct net_lro_mgr *lro_mgr,
 		     struct sk_buff *skb,
 		     void *priv)
 {
-	if (__lro_proc_skb(lro_mgr, skb, priv)) {
+	if (__lro_proc_skb(lro_mgr, skb, priv, NULL)) {
 		if (lro_mgr->features & LRO_F_NAPI)
 			netif_receive_skb(skb);
+		else if (lro_mgr->features & LRO_F_NI)
+			netif_rx_ni(skb);
 		else
 			netif_rx(skb);
 	}
 }
 EXPORT_SYMBOL(lro_receive_skb);
 
+void lro_receive_frags(struct net_lro_mgr *lro_mgr,
+		       struct skb_frag_struct *frags,
+		       int len, int true_size, void *priv, __wsum sum)
+{
+	struct sk_buff *skb;
+
+	skb = __lro_proc_segment(lro_mgr, frags, len, true_size, priv, sum);
+	if (!skb)
+		return;
+
+	if (lro_mgr->features & LRO_F_NAPI)
+		netif_receive_skb(skb);
+	else if (lro_mgr->features & LRO_F_NI)
+		netif_rx_ni(skb);
+	else
+		netif_rx(skb);
+}
+EXPORT_SYMBOL(lro_receive_frags);
+
 void lro_flush_all(struct net_lro_mgr *lro_mgr)
 {
 	int i;
@@ -372,3 +604,35 @@
 	}
 }
 EXPORT_SYMBOL(lro_flush_all);
+
+void lro_flush_pkt(struct net_lro_mgr *lro_mgr, struct iphdr *iph,
+		   struct tcphdr *tcph)
+{
+	struct net_lro_desc *lro_desc;
+
+	lro_desc = lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph);
+	if (lro_desc && lro_desc->active)
+		lro_flush(lro_mgr, lro_desc);
+}
+EXPORT_SYMBOL(lro_flush_pkt);
+
+void lro_flush_desc(struct net_lro_mgr *lro_mgr, struct net_lro_desc *lro_desc)
+{
+	if (lro_desc->active)
+		lro_flush(lro_mgr, lro_desc);
+}
+EXPORT_SYMBOL(lro_flush_desc);
+
+void lro_receive_skb_ext(struct net_lro_mgr *lro_mgr, struct sk_buff *skb,
+			 void *priv, struct net_lro_info *lro_info)
+{
+	if (__lro_proc_skb(lro_mgr, skb, priv, lro_info)) {
+		if (lro_mgr->features & LRO_F_NAPI)
+			netif_receive_skb(skb);
+		else if (lro_mgr->features & LRO_F_NI)
+			netif_rx_ni(skb);
+		else
+			netif_rx(skb);
+	}
+}
+EXPORT_SYMBOL(lro_receive_skb_ext);
diff -ruw linux-4.4.115/net/ipv4/ipconfig.c linux-4.4.115-fbx/net/ipv4/ipconfig.c
--- linux-4.4.115/net/ipv4/ipconfig.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv4/ipconfig.c	2019-10-29 09:26:25.837224649 +0100
@@ -193,15 +193,61 @@
 static struct ic_device *ic_first_dev __initdata;	/* List of open device */
 static struct net_device *ic_dev __initdata;		/* Selected device */
 
-static bool __init ic_is_init_dev(struct net_device *dev)
+static bool __init ic_is_init_dev(struct net_device *dev, bool partial)
 {
+	char *p = NULL;
+	bool ret;
+
 	if (dev->flags & IFF_LOOPBACK)
 		return false;
-	return user_dev_name[0] ? !strcmp(dev->name, user_dev_name) :
+
+	if (partial) {
+		p = strchr(user_dev_name, '.');
+		if (p)
+			*p = 0;
+	}
+
+	ret = false;
+	if (user_dev_name[0] ? !strcmp(dev->name, user_dev_name) :
 	    (!(dev->flags & IFF_LOOPBACK) &&
 	     (dev->flags & (IFF_POINTOPOINT|IFF_BROADCAST)) &&
-	     strncmp(dev->name, "dummy", 5));
+	     strncmp(dev->name, "dummy", 5)))
+		ret = true;
+	if (p)
+		*p = '.';
+	return ret;
+}
+
+#ifdef CONFIG_VLAN_8021Q
+int register_vlan_device(struct net_device *real_dev, u16 vlan_id);
+
+static void __init prepare_vlan(void)
+{
+	unsigned short oflags;
+	struct net_device *dev;
+	char *p;
+	u16 vid;
+
+	if (!strchr(user_dev_name, '.'))
+		return;
+
+	p = strchr(user_dev_name, '.');
+	*p = 0;
+	vid = simple_strtoul(p + 1, NULL, 10);
+	dev = __dev_get_by_name(&init_net, user_dev_name);
+	if (!dev)
+		goto fail;
+
+	oflags = dev->flags;
+	if (dev_change_flags(dev, oflags | IFF_UP) < 0)
+		goto fail;
+
+	register_vlan_device(dev, vid);
+
+fail:
+	*p = '.';
 }
+#endif
 
 static int __init ic_open_devs(void)
 {
@@ -221,8 +267,13 @@
 			pr_err("IP-Config: Failed to open %s\n", dev->name);
 	}
 
+#ifdef CONFIG_VLAN_8021Q
+	/* register vlan device if needed */
+	prepare_vlan();
+#endif
+
 	for_each_netdev(&init_net, dev) {
-		if (ic_is_init_dev(dev)) {
+		if (ic_is_init_dev(dev, false)) {
 			int able = 0;
 			if (dev->mtu >= 364)
 				able |= IC_BOOTP;
@@ -271,10 +322,12 @@
 		int wait, elapsed;
 
 		for_each_netdev(&init_net, dev)
-			if (ic_is_init_dev(dev) && netif_carrier_ok(dev))
+			if (ic_is_init_dev(dev, false) && netif_carrier_ok(dev))
 				goto have_carrier;
 
+		rtnl_unlock();
 		msleep(1);
+		rtnl_lock();
 
 		if (time_before(jiffies, next_msg))
 			continue;
@@ -1393,7 +1446,7 @@
 
 		rtnl_lock();
 		for_each_netdev(&init_net, dev) {
-			if (ic_is_init_dev(dev)) {
+			if (ic_is_init_dev(dev, true)) {
 				found = 1;
 				break;
 			}
diff -ruw linux-4.4.115/net/ipv4/ip_output.c linux-4.4.115-fbx/net/ipv4/ip_output.c
--- linux-4.4.115/net/ipv4/ip_output.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv4/ip_output.c	2019-10-29 09:26:25.833224610 +0100
@@ -926,7 +926,7 @@
 	    (((length + (skb ? skb->len : fragheaderlen)) > mtu) &&
 	    (skb_queue_len(queue) <= 1) &&
 	    (sk->sk_protocol == IPPROTO_UDP) &&
-	    (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
+	    (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
 	    (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx)) {
 		err = ip_ufo_append_data(sk, queue, getfrag, from, length,
 					 hh_len, fragheaderlen, transhdrlen,
@@ -1583,7 +1583,8 @@
 			   RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
 			   ip_reply_arg_flowi_flags(arg),
 			   daddr, saddr,
-			   tcp_hdr(skb)->source, tcp_hdr(skb)->dest);
+			   tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
+			   arg->uid);
 	security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
 	rt = ip_route_output_key(net, &fl4);
 	if (IS_ERR(rt))
diff -ruw linux-4.4.115/net/ipv4/Kconfig linux-4.4.115-fbx/net/ipv4/Kconfig
--- linux-4.4.115/net/ipv4/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv4/Kconfig	2019-10-29 09:26:25.821224492 +0100
@@ -436,6 +436,19 @@
 	  Support for UDP socket monitoring interface used by the ss tool.
 	  If unsure, say Y.
 
+config INET_DIAG_DESTROY
+	bool "INET: allow privileged process to administratively close sockets"
+	depends on INET_DIAG
+	default n
+	---help---
+	  Provides a SOCK_DESTROY operation that allows privileged processes
+	  (e.g., a connection manager or a network administration tool such as
+	  ss) to close sockets opened by other processes. Closing a socket in
+	  this way interrupts any blocking read/write/connect operations on
+	  the socket and causes future socket calls to behave as if the socket
+	  had been disconnected.
+	  If unsure, say N.
+
 menuconfig TCP_CONG_ADVANCED
 	bool "TCP: advanced congestion control"
 	---help---
diff -ruw linux-4.4.115/net/ipv4/Makefile linux-4.4.115-fbx/net/ipv4/Makefile
--- linux-4.4.115/net/ipv4/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv4/Makefile	2019-01-22 16:16:28.947295705 +0100
@@ -16,6 +16,7 @@
 
 obj-$(CONFIG_NET_IP_TUNNEL) += ip_tunnel.o
 obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o
+obj-$(CONFIG_SYSFS) += sysfs_net_ipv4.o
 obj-$(CONFIG_PROC_FS) += proc.o
 obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o
 obj-$(CONFIG_IP_MROUTE) += ipmr.o
diff -ruw linux-4.4.115/net/ipv4/netfilter/ip_tables.c linux-4.4.115-fbx/net/ipv4/netfilter/ip_tables.c
--- linux-4.4.115/net/ipv4/netfilter/ip_tables.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv4/netfilter/ip_tables.c	2019-10-29 09:26:25.841224688 +0100
@@ -408,9 +408,11 @@
 			}
 			if (table_base + v != ipt_next_entry(e) &&
 			    !(e->ip.flags & IPT_F_GOTO)) {
+				if (unlikely(stackidx >= private->stacksize)) {
+					verdict = NF_DROP;
+					break;
+				}
 				jumpstack[stackidx++] = e;
-				pr_debug("Pushed %p into pos %u\n",
-					 e, stackidx - 1);
 			}
 
 			e = get_entry(table_base, v);
diff -ruw linux-4.4.115/net/ipv4/netfilter/nf_defrag_ipv4.c linux-4.4.115-fbx/net/ipv4/netfilter/nf_defrag_ipv4.c
--- linux-4.4.115/net/ipv4/netfilter/nf_defrag_ipv4.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv4/netfilter/nf_defrag_ipv4.c	2019-01-22 16:16:28.975295959 +0100
@@ -11,6 +11,7 @@
 #include <linux/netfilter.h>
 #include <linux/module.h>
 #include <linux/skbuff.h>
+#include <linux/inetdevice.h>
 #include <net/route.h>
 #include <net/ip.h>
 
@@ -80,8 +81,13 @@
 #endif
 	/* Gather fragments. */
 	if (ip_is_fragment(ip_hdr(skb))) {
-		enum ip_defrag_users user =
-			nf_ct_defrag_user(state->hook, skb);
+		enum ip_defrag_users user;
+
+		if (skb->dev &&
+		    IN_DEV_NF_IPV4_DEFRAG_SKIP(__in_dev_get_rcu(skb->dev)))
+			return NF_ACCEPT;
+
+		user = nf_ct_defrag_user(state->hook, skb);
 
 		if (nf_ct_ipv4_gather_frags(state->net, skb, user))
 			return NF_STOLEN;
diff -ruw linux-4.4.115/net/ipv4/ping.c linux-4.4.115-fbx/net/ipv4/ping.c
--- linux-4.4.115/net/ipv4/ping.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv4/ping.c	2019-10-29 09:26:25.841224688 +0100
@@ -660,7 +660,7 @@
 			void *user_icmph, size_t icmph_len) {
 	u8 type, code;
 
-	if (len > 0xFFFF)
+	if (len > 0xFFFF || len < icmph_len)
 		return -EMSGSIZE;
 
 	/* Must have at least a full ICMP header. */
@@ -798,7 +798,8 @@
 
 	flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
 			   RT_SCOPE_UNIVERSE, sk->sk_protocol,
-			   inet_sk_flowi_flags(sk), faddr, saddr, 0, 0);
+			   inet_sk_flowi_flags(sk), faddr, saddr, 0, 0,
+			   sk->sk_uid);
 
 	security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
 	rt = ip_route_output_flow(net, &fl4, sk);
diff -ruw linux-4.4.115/net/ipv4/raw.c linux-4.4.115-fbx/net/ipv4/raw.c
--- linux-4.4.115/net/ipv4/raw.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv4/raw.c	2019-10-29 09:26:25.845224727 +0100
@@ -609,7 +609,7 @@
 			   hdrincl ? IPPROTO_RAW : sk->sk_protocol,
 			   inet_sk_flowi_flags(sk) |
 			    (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
-			   daddr, saddr, 0, 0);
+			   daddr, saddr, 0, 0, sk->sk_uid);
 
 	if (!saddr && ipc.oif) {
 		err = l3mdev_get_saddr(net, ipc.oif, &fl4);
diff -ruw linux-4.4.115/net/ipv4/route.c linux-4.4.115-fbx/net/ipv4/route.c
--- linux-4.4.115/net/ipv4/route.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv4/route.c	2019-10-29 09:26:25.845224727 +0100
@@ -501,7 +501,8 @@
 }
 EXPORT_SYMBOL(__ip_select_ident);
 
-static void __build_flow_key(struct flowi4 *fl4, const struct sock *sk,
+static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
+			     const struct sock *sk,
 			     const struct iphdr *iph,
 			     int oif, u8 tos,
 			     u8 prot, u32 mark, int flow_flags)
@@ -517,19 +518,21 @@
 	flowi4_init_output(fl4, oif, mark, tos,
 			   RT_SCOPE_UNIVERSE, prot,
 			   flow_flags,
-			   iph->daddr, iph->saddr, 0, 0);
+			   iph->daddr, iph->saddr, 0, 0,
+			   sock_net_uid(net, sk));
 }
 
 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
 			       const struct sock *sk)
 {
+	const struct net *net = dev_net(skb->dev);
 	const struct iphdr *iph = ip_hdr(skb);
 	int oif = skb->dev->ifindex;
 	u8 tos = RT_TOS(iph->tos);
 	u8 prot = iph->protocol;
 	u32 mark = skb->mark;
 
-	__build_flow_key(fl4, sk, iph, oif, tos, prot, mark, 0);
+	__build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
 }
 
 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
@@ -546,7 +549,7 @@
 			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
 			   inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
 			   inet_sk_flowi_flags(sk),
-			   daddr, inet->inet_saddr, 0, 0);
+			   daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
 	rcu_read_unlock();
 }
 
@@ -793,6 +796,7 @@
 	struct rtable *rt;
 	struct flowi4 fl4;
 	const struct iphdr *iph = (const struct iphdr *) skb->data;
+	struct net *net = dev_net(skb->dev);
 	int oif = skb->dev->ifindex;
 	u8 tos = RT_TOS(iph->tos);
 	u8 prot = iph->protocol;
@@ -800,7 +804,7 @@
 
 	rt = (struct rtable *) dst;
 
-	__build_flow_key(&fl4, sk, iph, oif, tos, prot, mark, 0);
+	__build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
 	__ip_do_redirect(rt, skb, &fl4, true);
 }
 
@@ -1018,7 +1022,7 @@
 	if (!mark)
 		mark = IP4_REPLY_MARK(net, skb->mark);
 
-	__build_flow_key(&fl4, NULL, iph, oif,
+	__build_flow_key(net, &fl4, NULL, iph, oif,
 			 RT_TOS(iph->tos), protocol, mark, flow_flags);
 	rt = __ip_route_output_key(net, &fl4);
 	if (!IS_ERR(rt)) {
@@ -1034,7 +1038,7 @@
 	struct flowi4 fl4;
 	struct rtable *rt;
 
-	__build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
+	__build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
 
 	if (!fl4.flowi4_mark)
 		fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
@@ -1053,6 +1057,7 @@
 	struct rtable *rt;
 	struct dst_entry *odst = NULL;
 	bool new = false;
+	struct net *net = sock_net(sk);
 
 	bh_lock_sock(sk);
 
@@ -1066,7 +1071,7 @@
 		goto out;
 	}
 
-	__build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
+	__build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
 
 	rt = (struct rtable *)odst;
 	if (odst->obsolete && !odst->ops->check(odst, 0)) {
@@ -1106,7 +1111,7 @@
 	struct flowi4 fl4;
 	struct rtable *rt;
 
-	__build_flow_key(&fl4, NULL, iph, oif,
+	__build_flow_key(net, &fl4, NULL, iph, oif,
 			 RT_TOS(iph->tos), protocol, mark, flow_flags);
 	rt = __ip_route_output_key(net, &fl4);
 	if (!IS_ERR(rt)) {
@@ -1121,9 +1126,10 @@
 	const struct iphdr *iph = (const struct iphdr *) skb->data;
 	struct flowi4 fl4;
 	struct rtable *rt;
+	struct net *net = sock_net(sk);
 
-	__build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
-	rt = __ip_route_output_key(sock_net(sk), &fl4);
+	__build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
+	rt = __ip_route_output_key(net, &fl4);
 	if (!IS_ERR(rt)) {
 		__ip_do_redirect(rt, skb, &fl4, false);
 		ip_rt_put(rt);
@@ -2499,6 +2505,11 @@
 	    nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
 		goto nla_put_failure;
 
+	if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
+	    nla_put_u32(skb, RTA_UID,
+			from_kuid_munged(current_user_ns(), fl4->flowi4_uid)))
+		goto nla_put_failure;
+
 	error = rt->dst.error;
 
 	if (rt_is_input_route(rt)) {
@@ -2551,6 +2562,7 @@
 	int mark;
 	struct sk_buff *skb;
 	u32 table_id = RT_TABLE_MAIN;
+	kuid_t uid;
 
 	err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
 	if (err < 0)
@@ -2578,6 +2590,10 @@
 	dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
 	iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
 	mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
+	if (tb[RTA_UID])
+		uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
+	else
+		uid = (iif ? INVALID_UID : current_uid());
 
 	memset(&fl4, 0, sizeof(fl4));
 	fl4.daddr = dst;
@@ -2585,6 +2601,7 @@
 	fl4.flowi4_tos = rtm->rtm_tos;
 	fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
 	fl4.flowi4_mark = mark;
+	fl4.flowi4_uid = uid;
 
 	if (netif_index_is_l3_master(net, fl4.flowi4_oif))
 		fl4.flowi4_flags = FLOWI_FLAG_L3MDEV_SRC | FLOWI_FLAG_SKIP_NH_OIF;
diff -ruw linux-4.4.115/net/ipv4/sysctl_net_ipv4.c linux-4.4.115-fbx/net/ipv4/sysctl_net_ipv4.c
--- linux-4.4.115/net/ipv4/sysctl_net_ipv4.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv4/sysctl_net_ipv4.c	2019-10-29 09:26:25.849224766 +0100
@@ -42,6 +42,10 @@
 static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
 static int ip_ping_group_range_min[] = { 0, 0 };
 static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
+static int tcp_delack_seg_min = TCP_DELACK_MIN;
+static int tcp_delack_seg_max = 60;
+static int tcp_use_userconfig_min;
+static int tcp_use_userconfig_max = 1;
 
 /* Update system visible IP port range */
 static void set_local_port_range(struct net *net, int range[2])
@@ -152,6 +156,21 @@
 	return ret;
 }
 
+/* Validate changes from /proc interface. */
+static int proc_tcp_default_init_rwnd(struct ctl_table *ctl, int write,
+				      void __user *buffer,
+				      size_t *lenp, loff_t *ppos)
+{
+	int old_value = *(int *)ctl->data;
+	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+	int new_value = *(int *)ctl->data;
+
+	if (write && ret == 0 && (new_value < 3 || new_value > 100))
+		*(int *)ctl->data = old_value;
+
+	return ret;
+}
+
 static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
 				       void __user *buffer, size_t *lenp, loff_t *ppos)
 {
@@ -761,6 +780,13 @@
 		.proc_handler	= proc_dointvec_ms_jiffies,
 	},
 	{
+		.procname       = "tcp_default_init_rwnd",
+		.data           = &sysctl_tcp_default_init_rwnd,
+		.maxlen         = sizeof(int),
+		.mode           = 0644,
+		.proc_handler   = proc_tcp_default_init_rwnd
+	},
+	{
 		.procname	= "icmp_msgs_per_sec",
 		.data		= &sysctl_icmp_msgs_per_sec,
 		.maxlen		= sizeof(int),
@@ -799,6 +825,25 @@
 		.proc_handler	= proc_dointvec_minmax,
 		.extra1		= &one
 	},
+	{
+		.procname	= "tcp_delack_seg",
+		.data		= &sysctl_tcp_delack_seg,
+		.maxlen		= sizeof(sysctl_tcp_delack_seg),
+		.mode		= 0644,
+		.proc_handler	= tcp_proc_delayed_ack_control,
+		.extra1		= &tcp_delack_seg_min,
+		.extra2		= &tcp_delack_seg_max,
+	},
+	{
+		.procname       = "tcp_use_userconfig",
+		.data           = &sysctl_tcp_use_userconfig,
+		.maxlen         = sizeof(sysctl_tcp_use_userconfig),
+		.mode           = 0644,
+		.proc_handler   = tcp_use_userconfig_sysctl_handler,
+		.extra1		= &tcp_use_userconfig_min,
+		.extra2		= &tcp_use_userconfig_max,
+	},
+
 	{ }
 };
 
@@ -881,6 +926,13 @@
 		.proc_handler	= proc_do_large_bitmap,
 	},
 	{
+		.procname	= "reserved_port_bind",
+		.data		= &sysctl_reserved_port_bind,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec
+	},
+	{
 		.procname	= "ip_no_pmtu_disc",
 		.data		= &init_net.ipv4.sysctl_ip_no_pmtu_disc,
 		.maxlen		= sizeof(int),
diff -ruw linux-4.4.115/net/ipv4/tcp.c linux-4.4.115-fbx/net/ipv4/tcp.c
--- linux-4.4.115/net/ipv4/tcp.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv4/tcp.c	2019-10-29 09:26:25.849224766 +0100
@@ -302,6 +302,12 @@
 atomic_long_t tcp_memory_allocated;	/* Current allocated memory. */
 EXPORT_SYMBOL(tcp_memory_allocated);
 
+int sysctl_tcp_delack_seg __read_mostly = TCP_DELACK_SEG;
+EXPORT_SYMBOL(sysctl_tcp_delack_seg);
+
+int sysctl_tcp_use_userconfig __read_mostly;
+EXPORT_SYMBOL(sysctl_tcp_use_userconfig);
+
 /*
  * Current number of TCP sockets.
  */
@@ -1407,8 +1413,11 @@
 		   /* Delayed ACKs frequently hit locked sockets during bulk
 		    * receive. */
 		if (icsk->icsk_ack.blocked ||
-		    /* Once-per-two-segments ACK was not sent by tcp_input.c */
-		    tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
+		    /* Once-per-sysctl_tcp_delack_seg segments
+			  * ACK was not sent by tcp_input.c
+			  */
+		    tp->rcv_nxt - tp->rcv_wup > (icsk->icsk_ack.rcv_mss) *
+						sysctl_tcp_delack_seg ||
 		    /*
 		     * If this read emptied read buffer, we send ACK, if
 		     * connection is not bidirectional, user drained
@@ -2425,6 +2434,13 @@
 		}
 		break;
 
+	case TCP_LINEAR_RTO:
+		if (val < 0 || val > 1)
+			err = -EINVAL;
+		else
+			tp->linear_rto = val;
+		break;
+
 	case TCP_REPAIR:
 		if (!tcp_can_repair_sock(sk))
 			err = -EPERM;
@@ -2730,6 +2746,14 @@
 	rate64 = rate != ~0U ? rate : ~0ULL;
 	put_unaligned(rate64, &info->tcpi_max_pacing_rate);
 
+	/* Expose reference count for socket */
+	if (sk->sk_socket) {
+		struct file *filep = sk->sk_socket->file;
+
+		if (filep)
+			info->tcpi_count = file_count(filep);
+	}
+
 	do {
 		start = u64_stats_fetch_begin_irq(&tp->syncp);
 		put_unaligned(tp->bytes_acked, &info->tcpi_bytes_acked);
@@ -2848,6 +2872,9 @@
 	case TCP_THIN_DUPACK:
 		val = tp->thin_dupack;
 		break;
+	case TCP_LINEAR_RTO:
+		val = tp->linear_rto;
+		break;
 
 	case TCP_REPAIR:
 		val = tp->repair;
@@ -3103,6 +3130,52 @@
 }
 EXPORT_SYMBOL_GPL(tcp_done);
 
+int tcp_abort(struct sock *sk, int err)
+{
+	if (!sk_fullsock(sk)) {
+		if (sk->sk_state == TCP_NEW_SYN_RECV) {
+			struct request_sock *req = inet_reqsk(sk);
+
+			local_bh_disable();
+			inet_csk_reqsk_queue_drop_and_put(req->rsk_listener,
+							  req);
+			local_bh_enable();
+			return 0;
+		}
+		sock_gen_put(sk);
+		return -EOPNOTSUPP;
+	}
+
+	/* Don't race with userspace socket closes such as tcp_close. */
+	lock_sock(sk);
+
+	if (sk->sk_state == TCP_LISTEN) {
+		tcp_set_state(sk, TCP_CLOSE);
+		inet_csk_listen_stop(sk);
+	}
+
+	/* Don't race with BH socket closes such as inet_csk_listen_stop. */
+	local_bh_disable();
+	bh_lock_sock(sk);
+
+	if (!sock_flag(sk, SOCK_DEAD)) {
+		sk->sk_err = err;
+		/* This barrier is coupled with smp_rmb() in tcp_poll() */
+		smp_wmb();
+		sk->sk_error_report(sk);
+		if (tcp_need_reset(sk->sk_state))
+			tcp_send_active_reset(sk, GFP_ATOMIC);
+		tcp_done(sk);
+	}
+
+	bh_unlock_sock(sk);
+	local_bh_enable();
+	release_sock(sk);
+	sock_put(sk);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(tcp_abort);
+
 extern struct tcp_congestion_ops tcp_reno;
 
 static __initdata unsigned long thash_entries;
diff -ruw linux-4.4.115/net/ipv4/tcp_input.c linux-4.4.115-fbx/net/ipv4/tcp_input.c
--- linux-4.4.115/net/ipv4/tcp_input.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv4/tcp_input.c	2019-10-29 09:26:25.857224845 +0100
@@ -102,6 +102,7 @@
 int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
 int sysctl_tcp_early_retrans __read_mostly = 3;
 int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
+int sysctl_tcp_default_init_rwnd __read_mostly = TCP_INIT_CWND * 2;
 
 #define FLAG_DATA		0x01 /* Incoming frame contained data.		*/
 #define FLAG_WIN_UPDATE		0x02 /* Incoming ACK was a window update.	*/
@@ -4963,7 +4964,8 @@
 	struct tcp_sock *tp = tcp_sk(sk);
 
 	    /* More than one full frame received... */
-	if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss &&
+	if (((tp->rcv_nxt - tp->rcv_wup) > (inet_csk(sk)->icsk_ack.rcv_mss) *
+					sysctl_tcp_delack_seg &&
 	     /* ... and right edge of window advances far enough.
 	      * (tcp_recvmsg() will send ACK otherwise). Or...
 	      */
diff -ruw linux-4.4.115/net/ipv4/tcp_ipv4.c linux-4.4.115-fbx/net/ipv4/tcp_ipv4.c
--- linux-4.4.115/net/ipv4/tcp_ipv4.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv4/tcp_ipv4.c	2019-10-29 09:26:25.861224884 +0100
@@ -691,6 +691,7 @@
 		arg.bound_dev_if = sk->sk_bound_dev_if;
 
 	arg.tos = ip_hdr(skb)->tos;
+	arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
 	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
 			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
@@ -712,7 +713,7 @@
    outside socket context is ugly, certainly. What can I do?
  */
 
-static void tcp_v4_send_ack(struct net *net,
+static void tcp_v4_send_ack(const struct sock *sk,
 			    struct sk_buff *skb, u32 seq, u32 ack,
 			    u32 win, u32 tsval, u32 tsecr, int oif,
 			    struct tcp_md5sig_key *key,
@@ -727,6 +728,7 @@
 #endif
 			];
 	} rep;
+	struct net *net = sock_net(sk);
 	struct ip_reply_arg arg;
 
 	memset(&rep.th, 0, sizeof(struct tcphdr));
@@ -776,6 +778,7 @@
 	if (oif)
 		arg.bound_dev_if = oif;
 	arg.tos = tos;
+	arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
 	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
 			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
@@ -789,7 +792,7 @@
 	struct inet_timewait_sock *tw = inet_twsk(sk);
 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
 
-	tcp_v4_send_ack(sock_net(sk), skb,
+	tcp_v4_send_ack(sk, skb,
 			tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
 			tcp_time_stamp + tcptw->tw_ts_offset,
@@ -817,7 +820,7 @@
 	 * exception of <SYN> segments, MUST be right-shifted by
 	 * Rcv.Wind.Shift bits:
 	 */
-	tcp_v4_send_ack(sock_net(sk), skb, seq,
+	tcp_v4_send_ack(sk, skb, seq,
 			tcp_rsk(req)->rcv_nxt,
 			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
 			tcp_time_stamp,
@@ -846,7 +849,8 @@
 	struct sk_buff *skb;
 
 	/* First, grab a route. */
-	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
+	if (!dst && (dst = inet_csk_route_req(
+					(struct sock *)sk, &fl4, req)) == NULL)
 		return -1;
 
 	skb = tcp_make_synack(sk, dst, req, foc, attach_req);
@@ -1205,7 +1209,8 @@
 					  const struct request_sock *req,
 					  bool *strict)
 {
-	struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
+	struct dst_entry *dst = inet_csk_route_req(
+					(struct sock *)sk, &fl->u.ip4, req);
 
 	if (strict) {
 		if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
@@ -1583,6 +1588,12 @@
 	if (!pskb_may_pull(skb, th->doff * 4))
 		goto discard_it;
 
+	/* Assuming a trustworthy entity did the checksum and found the csum
+	 * invalid, drop the packet.
+	 */
+	if (skb->ip_summed == CHECKSUM_COMPLETE && skb->csum_valid == 0)
+		goto csum_error;
+
 	/* An explanation is required here, I think.
 	 * Packet length and doff are validated by header prediction,
 	 * provided case of th->doff==0 is eliminated.
@@ -2195,6 +2206,7 @@
 	__be32 src = inet->inet_rcv_saddr;
 	__u16 destp = ntohs(inet->inet_dport);
 	__u16 srcp = ntohs(inet->inet_sport);
+	__u8 seq_state = sk->sk_state;
 	int rx_queue;
 	int state;
 
@@ -2214,6 +2226,9 @@
 		timer_expires = jiffies;
 	}
 
+	if (inet->transparent)
+		seq_state |= 0x80;
+
 	state = sk_state_load(sk);
 	if (state == TCP_LISTEN)
 		rx_queue = sk->sk_ack_backlog;
@@ -2225,7 +2240,7 @@
 
 	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
 			"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
-		i, src, srcp, dest, destp, state,
+		i, src, srcp, dest, destp, seq_state,
 		tp->write_seq - tp->snd_una,
 		rx_queue,
 		timer_active,
@@ -2379,6 +2394,7 @@
 	.destroy_cgroup		= tcp_destroy_cgroup,
 	.proto_cgroup		= tcp_proto_cgroup,
 #endif
+	.diag_destroy		= tcp_abort,
 };
 EXPORT_SYMBOL(tcp_prot);
 
diff -ruw linux-4.4.115/net/ipv4/tcp_output.c linux-4.4.115-fbx/net/ipv4/tcp_output.c
--- linux-4.4.115/net/ipv4/tcp_output.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv4/tcp_output.c	2019-10-29 09:26:25.861224884 +0100
@@ -191,7 +191,7 @@
 	 * (RFC 3517, Section 4, NextSeg() rule (2)). Further place a
 	 * limit when mss is larger than 1460.
 	 */
-	u32 init_rwnd = TCP_INIT_CWND * 2;
+	u32 init_rwnd = sysctl_tcp_default_init_rwnd;
 
 	if (mss > 1460)
 		init_rwnd = max((1460 * init_rwnd) / mss, 2U);
diff -ruw linux-4.4.115/net/ipv4/tcp_timer.c linux-4.4.115-fbx/net/ipv4/tcp_timer.c
--- linux-4.4.115/net/ipv4/tcp_timer.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv4/tcp_timer.c	2019-10-29 09:26:25.865224923 +0100
@@ -32,6 +32,40 @@
 int sysctl_tcp_orphan_retries __read_mostly;
 int sysctl_tcp_thin_linear_timeouts __read_mostly;
 
+/*Function to reset tcp_ack related sysctl on resetting master control */
+void set_tcp_default(void)
+{
+	sysctl_tcp_delack_seg	= TCP_DELACK_SEG;
+}
+
+/*sysctl handler for tcp_ack realted master control */
+int tcp_proc_delayed_ack_control(struct ctl_table *table, int write,
+				 void __user *buffer, size_t *length,
+				 loff_t *ppos)
+{
+	int ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
+
+	/* The ret value will be 0 if the input validation is successful
+	 * and the values are written to sysctl table. If not, the stack
+	 * will continue to work with currently configured values
+	 */
+	return ret;
+}
+
+/*sysctl handler for tcp_ack realted master control */
+int tcp_use_userconfig_sysctl_handler(struct ctl_table *table, int write,
+				      void __user *buffer, size_t *length,
+				      loff_t *ppos)
+{
+	int ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
+
+	if (write && ret == 0) {
+		if (!sysctl_tcp_use_userconfig)
+			set_tcp_default();
+	}
+	return ret;
+}
+
 static void tcp_write_err(struct sock *sk)
 {
 	sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
@@ -504,6 +538,10 @@
 	    icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
 		icsk->icsk_backoff = 0;
 		icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
+
+	} else if (sk->sk_state == TCP_ESTABLISHED && tp->linear_rto) {
+		icsk->icsk_backoff = 0;
+		icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
 	} else {
 		/* Use normal (exponential) backoff */
 		icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
diff -ruw linux-4.4.115/net/ipv4/udp.c linux-4.4.115-fbx/net/ipv4/udp.c
--- linux-4.4.115/net/ipv4/udp.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv4/udp.c	2019-10-29 09:26:25.865224923 +0100
@@ -257,6 +257,11 @@
 	} else {
 		hslot = udp_hashslot(udptable, net, snum);
 		spin_lock_bh(&hslot->lock);
+
+		if (inet_is_local_reserved_port(net, snum) &&
+		    !sysctl_reserved_port_bind)
+			goto fail_unlock;
+
 		if (hslot->count > 10) {
 			int exist;
 			unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum;
@@ -1025,7 +1030,8 @@
 		flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
 				   RT_SCOPE_UNIVERSE, sk->sk_protocol,
 				   flow_flags,
-				   faddr, saddr, dport, inet->inet_sport);
+				   faddr, saddr, dport, inet->inet_sport,
+				   sk->sk_uid);
 
 		if (!saddr && ipc.oif) {
 			err = l3mdev_get_saddr(net, ipc.oif, fl4);
@@ -2264,6 +2270,20 @@
 }
 EXPORT_SYMBOL(udp_poll);
 
+int udp_abort(struct sock *sk, int err)
+{
+	lock_sock(sk);
+
+	sk->sk_err = err;
+	sk->sk_error_report(sk);
+	udp_disconnect(sk, 0);
+
+	release_sock(sk);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(udp_abort);
+
 struct proto udp_prot = {
 	.name		   = "UDP",
 	.owner		   = THIS_MODULE,
@@ -2295,6 +2315,7 @@
 	.compat_getsockopt = compat_udp_getsockopt,
 #endif
 	.clear_sk	   = sk_prot_clear_portaddr_nulls,
+	.diag_destroy	   = udp_abort,
 };
 EXPORT_SYMBOL(udp_prot);
 
@@ -2432,14 +2453,20 @@
 		int bucket)
 {
 	struct inet_sock *inet = inet_sk(sp);
+	struct udp_sock *up = udp_sk(sp);
 	__be32 dest = inet->inet_daddr;
 	__be32 src  = inet->inet_rcv_saddr;
 	__u16 destp	  = ntohs(inet->inet_dport);
 	__u16 srcp	  = ntohs(inet->inet_sport);
+	__u8 state = sp->sk_state;
+	if (up->encap_rcv)
+		state |= 0xF0;
+	else if (inet->transparent)
+		state |= 0x80;
 
 	seq_printf(f, "%5d: %08X:%04X %08X:%04X"
 		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
-		bucket, src, srcp, dest, destp, sp->sk_state,
+		bucket, src, srcp, dest, destp, state,
 		sk_wmem_alloc_get(sp),
 		sk_rmem_alloc_get(sp),
 		0, 0L, 0,
diff -ruw linux-4.4.115/net/ipv4/xfrm4_policy.c linux-4.4.115-fbx/net/ipv4/xfrm4_policy.c
--- linux-4.4.115/net/ipv4/xfrm4_policy.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv4/xfrm4_policy.c	2019-10-29 09:26:25.869224962 +0100
@@ -22,14 +22,16 @@
 static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
 					    int tos, int oif,
 					    const xfrm_address_t *saddr,
-					    const xfrm_address_t *daddr)
+					    const xfrm_address_t *daddr,
+					    u32 mark)
 {
 	struct rtable *rt;
 
 	memset(fl4, 0, sizeof(*fl4));
 	fl4->daddr = daddr->a4;
 	fl4->flowi4_tos = tos;
-	fl4->flowi4_oif = oif;
+	fl4->flowi4_oif = l3mdev_master_ifindex_by_index(net, oif);
+	fl4->flowi4_mark = mark;
 	if (saddr)
 		fl4->saddr = saddr->a4;
 
@@ -44,20 +46,22 @@
 
 static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, int oif,
 					  const xfrm_address_t *saddr,
-					  const xfrm_address_t *daddr)
+					  const xfrm_address_t *daddr,
+					  u32 mark)
 {
 	struct flowi4 fl4;
 
-	return __xfrm4_dst_lookup(net, &fl4, tos, oif, saddr, daddr);
+	return __xfrm4_dst_lookup(net, &fl4, tos, oif, saddr, daddr, mark);
 }
 
 static int xfrm4_get_saddr(struct net *net, int oif,
-			   xfrm_address_t *saddr, xfrm_address_t *daddr)
+			   xfrm_address_t *saddr, xfrm_address_t *daddr,
+			   u32 mark)
 {
 	struct dst_entry *dst;
 	struct flowi4 fl4;
 
-	dst = __xfrm4_dst_lookup(net, &fl4, 0, oif, NULL, daddr);
+	dst = __xfrm4_dst_lookup(net, &fl4, 0, oif, NULL, daddr, mark);
 	if (IS_ERR(dst))
 		return -EHOSTUNREACH;
 
diff -ruw linux-4.4.115/net/ipv6/addrconf.c linux-4.4.115-fbx/net/ipv6/addrconf.c
--- linux-4.4.115/net/ipv6/addrconf.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv6/addrconf.c	2019-10-29 09:26:25.869224962 +0100
@@ -112,6 +112,27 @@
 	return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
 }
 
+static inline s32 rfc3315_s14_backoff_init(s32 irt)
+{
+	/* multiply 'initial retransmission time' by 0.9 .. 1.1 */
+	u64 tmp = (900000 + prandom_u32() % 200001) * (u64)irt;
+	do_div(tmp, 1000000);
+	return (s32)tmp;
+}
+
+static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt)
+{
+	/* multiply 'retransmission timeout' by 1.9 .. 2.1 */
+	u64 tmp = (1900000 + prandom_u32() % 200001) * (u64)rt;
+	do_div(tmp, 1000000);
+	if ((s32)tmp > mrt) {
+		/* multiply 'maximum retransmission time' by 0.9 .. 1.1 */
+		tmp = (900000 + prandom_u32() % 200001) * (u64)mrt;
+		do_div(tmp, 1000000);
+	}
+	return (s32)tmp;
+}
+
 #ifdef CONFIG_SYSCTL
 static int addrconf_sysctl_register(struct inet6_dev *idev);
 static void addrconf_sysctl_unregister(struct inet6_dev *idev);
@@ -187,6 +208,7 @@
 	.dad_transmits		= 1,
 	.rtr_solicits		= MAX_RTR_SOLICITATIONS,
 	.rtr_solicit_interval	= RTR_SOLICITATION_INTERVAL,
+	.rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
 	.rtr_solicit_delay	= MAX_RTR_SOLICITATION_DELAY,
 	.use_tempaddr		= 0,
 	.temp_valid_lft		= TEMP_VALID_LIFETIME,
@@ -202,9 +224,11 @@
 	.accept_ra_rtr_pref	= 1,
 	.rtr_probe_interval	= 60 * HZ,
 #ifdef CONFIG_IPV6_ROUTE_INFO
+	.accept_ra_rt_info_min_plen = 0,
 	.accept_ra_rt_info_max_plen = 0,
 #endif
 #endif
+	.accept_ra_rt_table	= 0,
 	.proxy_ndp		= 0,
 	.accept_source_route	= 0,	/* we do not accept RH0 by default. */
 	.disable_ipv6		= 0,
@@ -216,6 +240,7 @@
 	},
 	.use_oif_addrs_only	= 0,
 	.ignore_routes_with_linkdown = 0,
+	.accept_ra_prefix_route = 1,
 };
 
 static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -231,6 +256,7 @@
 	.dad_transmits		= 1,
 	.rtr_solicits		= MAX_RTR_SOLICITATIONS,
 	.rtr_solicit_interval	= RTR_SOLICITATION_INTERVAL,
+	.rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
 	.rtr_solicit_delay	= MAX_RTR_SOLICITATION_DELAY,
 	.use_tempaddr		= 0,
 	.temp_valid_lft		= TEMP_VALID_LIFETIME,
@@ -246,9 +272,11 @@
 	.accept_ra_rtr_pref	= 1,
 	.rtr_probe_interval	= 60 * HZ,
 #ifdef CONFIG_IPV6_ROUTE_INFO
+	.accept_ra_rt_info_min_plen = 0,
 	.accept_ra_rt_info_max_plen = 0,
 #endif
 #endif
+	.accept_ra_rt_table	= 0,
 	.proxy_ndp		= 0,
 	.accept_source_route	= 0,	/* we do not accept RH0 by default. */
 	.disable_ipv6		= 0,
@@ -260,6 +288,7 @@
 	},
 	.use_oif_addrs_only	= 0,
 	.ignore_routes_with_linkdown = 0,
+	.accept_ra_prefix_route = 1,
 };
 
 /* Check if a valid qdisc is available */
@@ -2054,6 +2083,16 @@
 		return addrconf_ifid_ieee1394(eui, dev);
 	case ARPHRD_TUNNEL6:
 		return addrconf_ifid_ip6tnl(eui, dev);
+	case ARPHRD_RAWIP: {
+		struct in6_addr lladdr;
+
+		if (ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
+			get_random_bytes(eui, 8);
+		else
+			memcpy(eui, lladdr.s6_addr + 8, 8);
+
+		return 0;
+	}
 	}
 	return -1;
 }
@@ -2146,6 +2185,31 @@
 		__ipv6_regen_rndid(idev);
 }
 
+u32 addrconf_rt_table(const struct net_device *dev, u32 default_table) {
+	/* Determines into what table to put autoconf PIO/RIO/default routes
+	 * learned on this device.
+	 *
+	 * - If 0, use the same table for every device. This puts routes into
+	 *   one of RT_TABLE_{PREFIX,INFO,DFLT} depending on the type of route
+	 *   (but note that these three are currently all equal to
+	 *   RT6_TABLE_MAIN).
+	 * - If > 0, use the specified table.
+	 * - If < 0, put routes into table dev->ifindex + (-rt_table).
+	 */
+	struct inet6_dev *idev = in6_dev_get(dev);
+	u32 table;
+	int sysctl = idev->cnf.accept_ra_rt_table;
+	if (sysctl == 0) {
+		table = default_table;
+	} else if (sysctl > 0) {
+		table = (u32) sysctl;
+	} else {
+		table = (unsigned) dev->ifindex + (-sysctl);
+	}
+	in6_dev_put(idev);
+	return table;
+}
+
 /*
  *	Add prefix route.
  */
@@ -2155,7 +2219,7 @@
 		      unsigned long expires, u32 flags)
 {
 	struct fib6_config cfg = {
-		.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX,
+		.fc_table = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_PREFIX),
 		.fc_metric = IP6_RT_PRIO_ADDRCONF,
 		.fc_ifindex = dev->ifindex,
 		.fc_expires = expires,
@@ -2188,7 +2252,7 @@
 	struct fib6_node *fn;
 	struct rt6_info *rt = NULL;
 	struct fib6_table *table;
-	u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX;
+	u32 tb_id = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_PREFIX);
 
 	table = fib6_get_table(dev_net(dev), tb_id);
 	if (!table)
@@ -2411,9 +2475,12 @@
 				flags |= RTF_EXPIRES;
 				expires = jiffies_to_clock_t(rt_expires);
 			}
-			addrconf_prefix_route(&pinfo->prefix, pinfo->prefix_len,
+			if (dev->ip6_ptr->cnf.accept_ra_prefix_route) {
+				addrconf_prefix_route(&pinfo->prefix,
+						      pinfo->prefix_len,
 					      dev, expires, flags);
 		}
+		}
 		ip6_rt_put(rt);
 	}
 
@@ -3076,7 +3143,9 @@
 	    (dev->type != ARPHRD_IEEE802154) &&
 	    (dev->type != ARPHRD_IEEE1394) &&
 	    (dev->type != ARPHRD_TUNNEL6) &&
-	    (dev->type != ARPHRD_6LOWPAN)) {
+	    (dev->type != ARPHRD_6LOWPAN) &&
+	    (dev->type != ARPHRD_RAWIP) &&
+	    (dev->type != ARPHRD_INFINIBAND)) {
 		/* Alas, we support only Ethernet autoconfiguration. */
 		return;
 	}
@@ -3466,7 +3535,7 @@
 	if (idev->if_flags & IF_RA_RCVD)
 		goto out;
 
-	if (idev->rs_probes++ < idev->cnf.rtr_solicits) {
+	if (idev->rs_probes++ < idev->cnf.rtr_solicits || idev->cnf.rtr_solicits < 0) {
 		write_unlock(&idev->lock);
 		if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
 			ndisc_send_rs(dev, &lladdr,
@@ -3475,11 +3544,13 @@
 			goto put;
 
 		write_lock(&idev->lock);
+		idev->rs_interval = rfc3315_s14_backoff_update(
+			idev->rs_interval, idev->cnf.rtr_solicit_max_interval);
 		/* The wait after the last probe can be shorter */
 		addrconf_mod_rs_timer(idev, (idev->rs_probes ==
 					     idev->cnf.rtr_solicits) ?
 				      idev->cnf.rtr_solicit_delay :
-				      idev->cnf.rtr_solicit_interval);
+				      idev->rs_interval);
 	} else {
 		/*
 		 * Note: we do not support deprecated "all on-link"
@@ -3707,7 +3778,7 @@
 	send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
 	send_rs = send_mld &&
 		  ipv6_accept_ra(ifp->idev) &&
-		  ifp->idev->cnf.rtr_solicits > 0 &&
+		  ifp->idev->cnf.rtr_solicits != 0 &&
 		  (dev->flags&IFF_LOOPBACK) == 0;
 	read_unlock_bh(&ifp->idev->lock);
 
@@ -3729,10 +3800,11 @@
 
 		write_lock_bh(&ifp->idev->lock);
 		spin_lock(&ifp->lock);
+		ifp->idev->rs_interval = rfc3315_s14_backoff_init(
+			ifp->idev->cnf.rtr_solicit_interval);
 		ifp->idev->rs_probes = 1;
 		ifp->idev->if_flags |= IF_RS_SENT;
-		addrconf_mod_rs_timer(ifp->idev,
-				      ifp->idev->cnf.rtr_solicit_interval);
+		addrconf_mod_rs_timer(ifp->idev, ifp->idev->rs_interval);
 		spin_unlock(&ifp->lock);
 		write_unlock_bh(&ifp->idev->lock);
 	}
@@ -4649,6 +4721,8 @@
 	array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits;
 	array[DEVCONF_RTR_SOLICIT_INTERVAL] =
 		jiffies_to_msecs(cnf->rtr_solicit_interval);
+	array[DEVCONF_RTR_SOLICIT_MAX_INTERVAL] =
+		jiffies_to_msecs(cnf->rtr_solicit_max_interval);
 	array[DEVCONF_RTR_SOLICIT_DELAY] =
 		jiffies_to_msecs(cnf->rtr_solicit_delay);
 	array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
@@ -4670,9 +4744,11 @@
 	array[DEVCONF_RTR_PROBE_INTERVAL] =
 		jiffies_to_msecs(cnf->rtr_probe_interval);
 #ifdef CONFIG_IPV6_ROUTE_INFO
+	array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] = cnf->accept_ra_rt_info_min_plen;
 	array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
 #endif
 #endif
+	array[DEVCONF_ACCEPT_RA_RT_TABLE] = cnf->accept_ra_rt_table;
 	array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
 	array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
@@ -4856,7 +4932,7 @@
 		return -EINVAL;
 	if (!ipv6_accept_ra(idev))
 		return -EINVAL;
-	if (idev->cnf.rtr_solicits <= 0)
+	if (idev->cnf.rtr_solicits == 0)
 		return -EINVAL;
 
 	write_lock_bh(&idev->lock);
@@ -4881,8 +4957,10 @@
 
 	if (update_rs) {
 		idev->if_flags |= IF_RS_SENT;
+		idev->rs_interval = rfc3315_s14_backoff_init(
+			idev->cnf.rtr_solicit_interval);
 		idev->rs_probes = 1;
-		addrconf_mod_rs_timer(idev, idev->cnf.rtr_solicit_interval);
+		addrconf_mod_rs_timer(idev, idev->rs_interval);
 	}
 
 	/* Well, that's kinda nasty ... */
@@ -5520,6 +5598,13 @@
 			.proc_handler	= proc_dointvec_jiffies,
 		},
 		{
+			.procname	= "router_solicitation_max_interval",
+			.data		= &ipv6_devconf.rtr_solicit_max_interval,
+			.maxlen		= sizeof(int),
+			.mode		= 0644,
+			.proc_handler	= proc_dointvec_jiffies,
+		},
+		{
 			.procname	= "router_solicitation_delay",
 			.data		= &ipv6_devconf.rtr_solicit_delay,
 			.maxlen		= sizeof(int),
@@ -5629,6 +5714,13 @@
 		},
 #ifdef CONFIG_IPV6_ROUTE_INFO
 		{
+			.procname	= "accept_ra_rt_info_min_plen",
+			.data		= &ipv6_devconf.accept_ra_rt_info_min_plen,
+			.maxlen		= sizeof(int),
+			.mode		= 0644,
+			.proc_handler	= proc_dointvec,
+		},
+		{
 			.procname	= "accept_ra_rt_info_max_plen",
 			.data		= &ipv6_devconf.accept_ra_rt_info_max_plen,
 			.maxlen		= sizeof(int),
@@ -5638,6 +5730,13 @@
 #endif
 #endif
 		{
+			.procname	= "accept_ra_rt_table",
+			.data		= &ipv6_devconf.accept_ra_rt_table,
+			.maxlen		= sizeof(int),
+			.mode		= 0644,
+			.proc_handler	= proc_dointvec,
+		},
+		{
 			.procname	= "proxy_ndp",
 			.data		= &ipv6_devconf.proxy_ndp,
 			.maxlen		= sizeof(int),
@@ -5726,6 +5825,13 @@
 			.maxlen		= sizeof(int),
 			.mode		= 0644,
 			.proc_handler	= proc_dointvec,
+		},
+		{
+			.procname	= "accept_ra_prefix_route",
+			.data		= &ipv6_devconf.accept_ra_prefix_route,
+			.maxlen		= sizeof(int),
+			.mode		= 0644,
+			.proc_handler	= proc_dointvec,
 		},
 		{
 			.procname	= "stable_secret",
diff -ruw linux-4.4.115/net/ipv6/af_inet6.c linux-4.4.115-fbx/net/ipv6/af_inet6.c
--- linux-4.4.115/net/ipv6/af_inet6.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv6/af_inet6.c	2019-10-29 09:26:25.873225001 +0100
@@ -64,6 +64,20 @@
 #include <asm/uaccess.h>
 #include <linux/mroute6.h>
 
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+
+static inline int current_has_network(void)
+{
+	return in_egroup_p(AID_INET) || capable(CAP_NET_RAW);
+}
+#else
+static inline int current_has_network(void)
+{
+	return 1;
+}
+#endif
+
 MODULE_AUTHOR("Cast of dozens");
 MODULE_DESCRIPTION("IPv6 protocol stack for Linux");
 MODULE_LICENSE("GPL");
@@ -112,6 +126,9 @@
 	if (protocol < 0 || protocol >= IPPROTO_MAX)
 		return -EINVAL;
 
+	if (!current_has_network())
+		return -EACCES;
+
 	/* Look for the requested type/protocol pair. */
 lookup_protocol:
 	err = -ESOCKTNOSUPPORT;
@@ -158,8 +175,7 @@
 	}
 
 	err = -EPERM;
-	if (sock->type == SOCK_RAW && !kern &&
-	    !ns_capable(net->user_ns, CAP_NET_RAW))
+	if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
 		goto out_rcu_unlock;
 
 	sock->ops = answer->ops;
@@ -661,6 +677,7 @@
 		fl6.flowi6_mark = sk->sk_mark;
 		fl6.fl6_dport = inet->inet_dport;
 		fl6.fl6_sport = inet->inet_sport;
+		fl6.flowi6_uid = sk->sk_uid;
 		security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 
 		rcu_read_lock();
diff -ruw linux-4.4.115/net/ipv6/datagram.c linux-4.4.115-fbx/net/ipv6/datagram.c
--- linux-4.4.115/net/ipv6/datagram.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv6/datagram.c	2019-10-29 09:26:25.873225001 +0100
@@ -165,6 +165,7 @@
 	fl6.flowi6_mark = sk->sk_mark;
 	fl6.fl6_dport = inet->inet_dport;
 	fl6.fl6_sport = inet->inet_sport;
+	fl6.flowi6_uid = sk->sk_uid;
 
 	if (!fl6.flowi6_oif)
 		fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
@@ -183,6 +184,11 @@
 	err = 0;
 	if (IS_ERR(dst)) {
 		err = PTR_ERR(dst);
+		/* Reset daddr and dport so that udp_v6_early_demux()
+		 * fails to find this socket
+		 */
+		memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr));
+		inet->inet_dport = 0;
 		goto out;
 	}
 
@@ -967,9 +973,14 @@
 			     __u16 srcp, __u16 destp, int bucket)
 {
 	const struct in6_addr *dest, *src;
+	__u8 state = sp->sk_state;
 
 	dest  = &sp->sk_v6_daddr;
 	src   = &sp->sk_v6_rcv_saddr;
+
+	if (inet_sk(sp) && inet_sk(sp)->transparent)
+		state |= 0x80;
+
 	seq_printf(seq,
 		   "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d\n",
@@ -978,7 +989,7 @@
 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
 		   dest->s6_addr32[0], dest->s6_addr32[1],
 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
-		   sp->sk_state,
+		   state,
 		   sk_wmem_alloc_get(sp),
 		   sk_rmem_alloc_get(sp),
 		   0, 0L, 0,
diff -ruw linux-4.4.115/net/ipv6/exthdrs_core.c linux-4.4.115-fbx/net/ipv6/exthdrs_core.c
--- linux-4.4.115/net/ipv6/exthdrs_core.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv6/exthdrs_core.c	2019-01-22 16:16:29.007296248 +0100
@@ -166,15 +166,15 @@
  * to explore inner IPv6 header, eg. ICMPv6 error messages.
  *
  * If target header is found, its offset is set in *offset and return protocol
- * number. Otherwise, return -1.
+ * number. Otherwise, return -ENOENT or -EBADMSG.
  *
  * If the first fragment doesn't contain the final protocol header or
  * NEXTHDR_NONE it is considered invalid.
  *
  * Note that non-1st fragment is special case that "the protocol number
  * of last header" is "next header" field in Fragment header. In this case,
- * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
- * isn't NULL.
+ * *offset is meaningless. If fragoff is not NULL, the fragment offset is
+ * stored in *fragoff; if it is NULL, return -EINVAL.
  *
  * if flags is not NULL and it's a fragment, then the frag flag
  * IP6_FH_F_FRAG will be set. If it's an AH header, the
@@ -253,9 +253,12 @@
 				if (target < 0 &&
 				    ((!ipv6_ext_hdr(hp->nexthdr)) ||
 				     hp->nexthdr == NEXTHDR_NONE)) {
-					if (fragoff)
+					if (fragoff) {
 						*fragoff = _frag_off;
 					return hp->nexthdr;
+					} else {
+						return -EINVAL;
+					}
 				}
 				if (!found)
 					return -ENOENT;
diff -ruw linux-4.4.115/net/ipv6/icmp.c linux-4.4.115-fbx/net/ipv6/icmp.c
--- linux-4.4.115/net/ipv6/icmp.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv6/icmp.c	2019-01-22 16:16:29.007296248 +0100
@@ -92,13 +92,14 @@
 	struct net *net = dev_net(skb->dev);
 
 	if (type == ICMPV6_PKT_TOOBIG)
-		ip6_update_pmtu(skb, net, info, 0, 0);
+		ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
 	else if (type == NDISC_REDIRECT)
-		ip6_redirect(skb, net, skb->dev->ifindex, 0);
+		ip6_redirect(skb, net, skb->dev->ifindex, 0,
+			     sock_net_uid(net, NULL));
 
 	if (!(type & ICMPV6_INFOMSG_MASK))
 		if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST)
-			ping_err(skb, offset, info);
+			ping_err(skb, offset, ntohl(info));
 }
 
 static int icmpv6_rcv(struct sk_buff *skb);
@@ -478,6 +479,7 @@
 	fl6.flowi6_oif = iif;
 	fl6.fl6_icmp_type = type;
 	fl6.fl6_icmp_code = code;
+	fl6.flowi6_uid = sock_net_uid(net, NULL);
 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 
 	sk = icmpv6_xmit_lock(net);
@@ -585,6 +587,7 @@
 	fl6.flowi6_oif = l3mdev_fib_oif(skb->dev);
 	fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY;
 	fl6.flowi6_mark = mark;
+	fl6.flowi6_uid = sock_net_uid(net, NULL);
 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 
 	sk = icmpv6_xmit_lock(net);
diff -ruw linux-4.4.115/net/ipv6/inet6_connection_sock.c linux-4.4.115-fbx/net/ipv6/inet6_connection_sock.c
--- linux-4.4.115/net/ipv6/inet6_connection_sock.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv6/inet6_connection_sock.c	2019-01-22 16:16:29.007296248 +0100
@@ -86,6 +86,7 @@
 	fl6->flowi6_mark = ireq->ir_mark;
 	fl6->fl6_dport = ireq->ir_rmt_port;
 	fl6->fl6_sport = htons(ireq->ir_num);
+	fl6->flowi6_uid = sk->sk_uid;
 	security_req_classify_flow(req, flowi6_to_flowi(fl6));
 
 	dst = ip6_dst_lookup_flow(sk, fl6, final_p);
@@ -134,6 +135,7 @@
 	fl6->flowi6_mark = sk->sk_mark;
 	fl6->fl6_sport = inet->inet_sport;
 	fl6->fl6_dport = inet->inet_dport;
+	fl6->flowi6_uid = sk->sk_uid;
 	security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
 
 	rcu_read_lock();
diff -ruw linux-4.4.115/net/ipv6/ip6_fib.c linux-4.4.115-fbx/net/ipv6/ip6_fib.c
--- linux-4.4.115/net/ipv6/ip6_fib.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv6/ip6_fib.c	2019-01-22 16:16:29.011296285 +0100
@@ -909,6 +909,7 @@
 			fn->fn_flags |= RTN_RTINFO;
 		}
 		nsiblings = iter->rt6i_nsiblings;
+		iter->rt6i_node = NULL;
 		fib6_purge_rt(iter, fn, info->nl_net);
 		if (fn->rr_ptr == iter)
 			fn->rr_ptr = NULL;
@@ -923,6 +924,7 @@
 					break;
 				if (rt6_qualify_for_ecmp(iter)) {
 					*ins = iter->dst.rt6_next;
+					iter->rt6i_node = NULL;
 					fib6_purge_rt(iter, fn, info->nl_net);
 					if (fn->rr_ptr == iter)
 						fn->rr_ptr = NULL;
diff -ruw linux-4.4.115/net/ipv6/ip6_offload.c linux-4.4.115-fbx/net/ipv6/ip6_offload.c
--- linux-4.4.115/net/ipv6/ip6_offload.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv6/ip6_offload.c	2019-10-29 09:26:25.877225040 +0100
@@ -242,9 +242,6 @@
 		/* flush if Traffic Class fields are different */
 		NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000));
 		NAPI_GRO_CB(p)->flush |= flush;
-
-		/* Clear flush_id, there's really no concept of ID in IPv6. */
-		NAPI_GRO_CB(p)->flush_id = 0;
 	}
 
 	NAPI_GRO_CB(skb)->flush |= flush;
diff -ruw linux-4.4.115/net/ipv6/ip6_output.c linux-4.4.115-fbx/net/ipv6/ip6_output.c
--- linux-4.4.115/net/ipv6/ip6_output.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv6/ip6_output.c	2019-10-29 09:26:25.877225040 +0100
@@ -1371,7 +1371,7 @@
 	    (((length + (skb ? skb->len : headersize)) > mtu) &&
 	    (skb_queue_len(queue) <= 1) &&
 	    (sk->sk_protocol == IPPROTO_UDP) &&
-	    (rt->dst.dev->features & NETIF_F_UFO) &&
+	    (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
 	    (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk))) {
 		err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
 					  hh_len, fragheaderlen, exthdrlen,
diff -ruw linux-4.4.115/net/ipv6/ndisc.c linux-4.4.115-fbx/net/ipv6/ndisc.c
--- linux-4.4.115/net/ipv6/ndisc.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv6/ndisc.c	2019-10-29 09:26:25.881225079 +0100
@@ -1358,6 +1358,8 @@
 			if (ri->prefix_len == 0 &&
 			    !in6_dev->cnf.accept_ra_defrtr)
 				continue;
+			if (ri->prefix_len < in6_dev->cnf.accept_ra_rt_info_min_plen)
+				continue;
 			if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen)
 				continue;
 			rt6_route_rcv(skb->dev, (u8 *)p, (p->nd_opt_len) << 3,
diff -ruw linux-4.4.115/net/ipv6/netfilter/ip6_tables.c linux-4.4.115-fbx/net/ipv6/netfilter/ip6_tables.c
--- linux-4.4.115/net/ipv6/netfilter/ip6_tables.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv6/netfilter/ip6_tables.c	2019-10-29 09:26:25.885225119 +0100
@@ -94,23 +94,27 @@
 {
 	unsigned long ret;
 	const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
+#if IS_ENABLED(IP6_NF_IPTABLES_128)
+	const __uint128_t *ulm1 = (const __uint128_t *)&ip6info->smsk;
+	const __uint128_t *ulm2 = (const __uint128_t *)&ip6info->dmsk;
+#endif
 
 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
 
-	if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
-				       &ip6info->src), IP6T_INV_SRCIP) ||
+#if IS_ENABLED(IP6_NF_IPTABLES_128)
+	if (*ulm1 || *ulm2)
+#endif
+	{
+		if (FWINV(ipv6_masked_addr_cmp
+			  (&ipv6->saddr, &ip6info->smsk, &ip6info->src),
+			   IP6T_INV_SRCIP) ||
 	    FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
-				       &ip6info->dst), IP6T_INV_DSTIP)) {
+					       &ip6info->dst),
+			  IP6T_INV_DSTIP)) {
 		dprintf("Source or dest mismatch.\n");
-/*
-		dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
-			ipinfo->smsk.s_addr, ipinfo->src.s_addr,
-			ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
-		dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
-			ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
-			ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
 		return false;
 	}
+	}
 
 	ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
 
@@ -425,6 +429,10 @@
 			}
 			if (table_base + v != ip6t_next_entry(e) &&
 			    !(e->ipv6.flags & IP6T_F_GOTO)) {
+				if (unlikely(stackidx >= private->stacksize)) {
+					verdict = NF_DROP;
+					break;
+				}
 				jumpstack[stackidx++] = e;
 			}
 
diff -ruw linux-4.4.115/net/ipv6/netfilter/Kconfig linux-4.4.115-fbx/net/ipv6/netfilter/Kconfig
--- linux-4.4.115/net/ipv6/netfilter/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv6/netfilter/Kconfig	2019-01-22 16:16:29.019296357 +0100
@@ -135,6 +135,18 @@
 
 if IP6_NF_IPTABLES
 
+config IP6_NF_IPTABLES_128
+	tristate "128 bit arithmetic for iptables matching"
+	depends on IP6_NF_IPTABLES
+	help
+	  This enables 128 bit matching in ip6tables to help optimize cases
+          where there is no match required. ip6tables matching for ipv6 always
+	  has a mask if an address is specified for match. Adding a check for
+	  mask prior to that helps to improve performance as it avoids the
+	  masked comparison.
+
+	  Note that this feature depends on the architecture. If unsure, say N.
+
 # The simple matches.
 config IP6_NF_MATCH_AH
 	tristate '"ah" match support'
diff -ruw linux-4.4.115/net/ipv6/netfilter.c linux-4.4.115-fbx/net/ipv6/netfilter.c
--- linux-4.4.115/net/ipv6/netfilter.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv6/netfilter.c	2019-01-22 16:16:29.019296357 +0100
@@ -26,6 +26,7 @@
 	struct flowi6 fl6 = {
 		.flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
 		.flowi6_mark = skb->mark,
+		.flowi6_uid = sock_net_uid(net, skb->sk),
 		.daddr = iph->daddr,
 		.saddr = iph->saddr,
 	};
diff -ruw linux-4.4.115/net/ipv6/ping.c linux-4.4.115-fbx/net/ipv6/ping.c
--- linux-4.4.115/net/ipv6/ping.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv6/ping.c	2019-10-29 09:26:25.885225119 +0100
@@ -84,7 +84,7 @@
 	struct icmp6hdr user_icmph;
 	int addr_type;
 	struct in6_addr *daddr;
-	int iif = 0;
+	int oif = 0;
 	struct flowi6 fl6;
 	int err;
 	int hlimit;
@@ -106,25 +106,30 @@
 		if (u->sin6_family != AF_INET6) {
 			return -EAFNOSUPPORT;
 		}
-		if (sk->sk_bound_dev_if &&
-		    sk->sk_bound_dev_if != u->sin6_scope_id) {
-			return -EINVAL;
-		}
 		daddr = &(u->sin6_addr);
-		iif = u->sin6_scope_id;
+		if (__ipv6_addr_needs_scope_id(ipv6_addr_type(daddr)))
+			oif = u->sin6_scope_id;
 	} else {
 		if (sk->sk_state != TCP_ESTABLISHED)
 			return -EDESTADDRREQ;
 		daddr = &sk->sk_v6_daddr;
 	}
 
-	if (!iif)
-		iif = sk->sk_bound_dev_if;
+	if (!oif)
+		oif = sk->sk_bound_dev_if;
+
+	if (!oif)
+		oif = np->sticky_pktinfo.ipi6_ifindex;
+
+	if (!oif && ipv6_addr_is_multicast(daddr))
+		oif = np->mcast_oif;
+	else if (!oif)
+		oif = np->ucast_oif;
 
 	addr_type = ipv6_addr_type(daddr);
-	if (__ipv6_addr_needs_scope_id(addr_type) && !iif)
-		return -EINVAL;
-	if (addr_type & IPV6_ADDR_MAPPED)
+	if ((__ipv6_addr_needs_scope_id(addr_type) && !oif) ||
+	    (addr_type & IPV6_ADDR_MAPPED) ||
+	    (oif && sk->sk_bound_dev_if && oif != sk->sk_bound_dev_if))
 		return -EINVAL;
 
 	/* TODO: use ip6_datagram_send_ctl to get options from cmsg */
@@ -134,16 +139,13 @@
 	fl6.flowi6_proto = IPPROTO_ICMPV6;
 	fl6.saddr = np->saddr;
 	fl6.daddr = *daddr;
+	fl6.flowi6_oif = oif;
 	fl6.flowi6_mark = sk->sk_mark;
+	fl6.flowi6_uid = sk->sk_uid;
 	fl6.fl6_icmp_type = user_icmph.icmp6_type;
 	fl6.fl6_icmp_code = user_icmph.icmp6_code;
 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 
-	if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
-		fl6.flowi6_oif = np->mcast_oif;
-	else if (!fl6.flowi6_oif)
-		fl6.flowi6_oif = np->ucast_oif;
-
 	dst = ip6_sk_dst_lookup_flow(sk, &fl6,  daddr);
 	if (IS_ERR(dst))
 		return PTR_ERR(dst);
@@ -155,11 +157,6 @@
 		goto dst_err_out;
 	}
 
-	if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
-		fl6.flowi6_oif = np->mcast_oif;
-	else if (!fl6.flowi6_oif)
-		fl6.flowi6_oif = np->ucast_oif;
-
 	pfh.icmph.type = user_icmph.icmp6_type;
 	pfh.icmph.code = user_icmph.icmp6_code;
 	pfh.icmph.checksum = 0;
diff -ruw linux-4.4.115/net/ipv6/raw.c linux-4.4.115-fbx/net/ipv6/raw.c
--- linux-4.4.115/net/ipv6/raw.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv6/raw.c	2019-10-29 09:26:25.885225119 +0100
@@ -774,6 +774,7 @@
 	memset(&fl6, 0, sizeof(fl6));
 
 	fl6.flowi6_mark = sk->sk_mark;
+	fl6.flowi6_uid = sk->sk_uid;
 
 	if (sin6) {
 		if (addr_len < SIN6_LEN_RFC2133)
diff -ruw linux-4.4.115/net/ipv6/route.c linux-4.4.115-fbx/net/ipv6/route.c
--- linux-4.4.115/net/ipv6/route.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv6/route.c	2019-10-29 09:26:25.889225158 +0100
@@ -99,13 +99,12 @@
 static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
 
 #ifdef CONFIG_IPV6_ROUTE_INFO
-static struct rt6_info *rt6_add_route_info(struct net *net,
+static struct rt6_info *rt6_add_route_info(struct net_device *dev,
 					   const struct in6_addr *prefix, int prefixlen,
-					   const struct in6_addr *gwaddr, int ifindex,
-					   unsigned int pref);
-static struct rt6_info *rt6_get_route_info(struct net *net,
+					   const struct in6_addr *gwaddr, unsigned int pref);
+static struct rt6_info *rt6_get_route_info(struct net_device *dev,
 					   const struct in6_addr *prefix, int prefixlen,
-					   const struct in6_addr *gwaddr, int ifindex);
+					   const struct in6_addr *gwaddr);
 #endif
 
 struct uncached_list {
@@ -392,17 +391,14 @@
 	struct net_device *loopback_dev =
 		dev_net(dev)->loopback_dev;
 
-	if (dev != loopback_dev) {
-		if (idev && idev->dev == dev) {
-			struct inet6_dev *loopback_idev =
-				in6_dev_get(loopback_dev);
+	if (idev && idev->dev != loopback_dev) {
+		struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
 			if (loopback_idev) {
 				rt->rt6i_idev = loopback_idev;
 				in6_dev_put(idev);
 			}
 		}
 	}
-}
 
 static bool __rt6_check_expired(const struct rt6_info *rt)
 {
@@ -755,7 +751,6 @@
 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
 		  const struct in6_addr *gwaddr)
 {
-	struct net *net = dev_net(dev);
 	struct route_info *rinfo = (struct route_info *) opt;
 	struct in6_addr prefix_buf, *prefix;
 	unsigned int pref;
@@ -800,8 +795,7 @@
 	if (rinfo->prefix_len == 0)
 		rt = rt6_get_dflt_router(gwaddr, dev);
 	else
-		rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
-					gwaddr, dev->ifindex);
+		rt = rt6_get_route_info(dev, prefix, rinfo->prefix_len,	gwaddr);
 
 	if (rt && !lifetime) {
 		ip6_del_rt(rt);
@@ -809,8 +803,7 @@
 	}
 
 	if (!rt && lifetime)
-		rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
-					pref);
+		rt = rt6_add_route_info(dev, prefix, rinfo->prefix_len, gwaddr, pref);
 	else if (rt)
 		rt->rt6i_flags = RTF_ROUTEINFO |
 				 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
@@ -1395,7 +1388,7 @@
 }
 
 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
-		     int oif, u32 mark)
+		     int oif, u32 mark, kuid_t uid)
 {
 	const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
 	struct dst_entry *dst;
@@ -1407,6 +1400,7 @@
 	fl6.daddr = iph->daddr;
 	fl6.saddr = iph->saddr;
 	fl6.flowlabel = ip6_flowinfo(iph);
+	fl6.flowi6_uid = uid;
 
 	dst = ip6_route_output(net, NULL, &fl6);
 	if (!dst->error)
@@ -1418,7 +1412,7 @@
 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
 {
 	ip6_update_pmtu(skb, sock_net(sk), mtu,
-			sk->sk_bound_dev_if, sk->sk_mark);
+			sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid);
 }
 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
 
@@ -1499,7 +1493,8 @@
 				flags, __ip6_route_redirect);
 }
 
-void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
+void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
+		  kuid_t uid)
 {
 	const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
 	struct dst_entry *dst;
@@ -1512,6 +1507,7 @@
 	fl6.daddr = iph->daddr;
 	fl6.saddr = iph->saddr;
 	fl6.flowlabel = ip6_flowinfo(iph);
+	fl6.flowi6_uid = uid;
 
 	dst = ip6_route_redirect(net, &fl6, &ipv6_hdr(skb)->saddr);
 	rt6_do_redirect(dst, NULL, skb);
@@ -1533,6 +1529,7 @@
 	fl6.flowi6_mark = mark;
 	fl6.daddr = msg->dest;
 	fl6.saddr = iph->daddr;
+	fl6.flowi6_uid = sock_net_uid(net, NULL);
 
 	dst = ip6_route_redirect(net, &fl6, &iph->saddr);
 	rt6_do_redirect(dst, NULL, skb);
@@ -1541,7 +1538,8 @@
 
 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
 {
-	ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark);
+	ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
+		     sk->sk_uid);
 }
 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
 
@@ -2263,15 +2261,16 @@
 }
 
 #ifdef CONFIG_IPV6_ROUTE_INFO
-static struct rt6_info *rt6_get_route_info(struct net *net,
+static struct rt6_info *rt6_get_route_info(struct net_device *dev,
 					   const struct in6_addr *prefix, int prefixlen,
-					   const struct in6_addr *gwaddr, int ifindex)
+					   const struct in6_addr *gwaddr)
 {
 	struct fib6_node *fn;
 	struct rt6_info *rt = NULL;
 	struct fib6_table *table;
 
-	table = fib6_get_table(net, RT6_TABLE_INFO);
+	table = fib6_get_table(dev_net(dev),
+			       addrconf_rt_table(dev, RT6_TABLE_INFO));
 	if (!table)
 		return NULL;
 
@@ -2281,7 +2280,7 @@
 		goto out;
 
 	for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
-		if (rt->dst.dev->ifindex != ifindex)
+		if (rt->dst.dev->ifindex != dev->ifindex)
 			continue;
 		if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
 			continue;
@@ -2295,23 +2294,22 @@
 	return rt;
 }
 
-static struct rt6_info *rt6_add_route_info(struct net *net,
+static struct rt6_info *rt6_add_route_info(struct net_device *dev,
 					   const struct in6_addr *prefix, int prefixlen,
-					   const struct in6_addr *gwaddr, int ifindex,
-					   unsigned int pref)
+					   const struct in6_addr *gwaddr, unsigned int pref)
 {
 	struct fib6_config cfg = {
 		.fc_metric	= IP6_RT_PRIO_USER,
-		.fc_ifindex	= ifindex,
+		.fc_ifindex	= dev->ifindex,
 		.fc_dst_len	= prefixlen,
 		.fc_flags	= RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
 				  RTF_UP | RTF_PREF(pref),
 		.fc_nlinfo.portid = 0,
 		.fc_nlinfo.nlh = NULL,
-		.fc_nlinfo.nl_net = net,
+		.fc_nlinfo.nl_net = dev_net(dev),
 	};
 
-	cfg.fc_table = l3mdev_fib_table_by_index(net, ifindex) ? : RT6_TABLE_INFO;
+	cfg.fc_table = l3mdev_fib_table_by_index(dev_net(dev), dev->ifindex) ? : addrconf_rt_table(dev, RT6_TABLE_INFO);
 	cfg.fc_dst = *prefix;
 	cfg.fc_gateway = *gwaddr;
 
@@ -2321,7 +2319,7 @@
 
 	ip6_route_add(&cfg);
 
-	return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
+	return rt6_get_route_info(dev, prefix, prefixlen, gwaddr);
 }
 #endif
 
@@ -2330,7 +2328,8 @@
 	struct rt6_info *rt;
 	struct fib6_table *table;
 
-	table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
+	table = fib6_get_table(dev_net(dev),
+			       addrconf_rt_table(dev, RT6_TABLE_MAIN));
 	if (!table)
 		return NULL;
 
@@ -2352,7 +2351,7 @@
 				     unsigned int pref)
 {
 	struct fib6_config cfg = {
-		.fc_table	= l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
+		.fc_table	= l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_DFLT),
 		.fc_metric	= IP6_RT_PRIO_USER,
 		.fc_ifindex	= dev->ifindex,
 		.fc_flags	= RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
@@ -2369,28 +2368,17 @@
 	return rt6_get_dflt_router(gwaddr, dev);
 }
 
-void rt6_purge_dflt_routers(struct net *net)
-{
-	struct rt6_info *rt;
-	struct fib6_table *table;
-
-	/* NOTE: Keep consistent with rt6_get_dflt_router */
-	table = fib6_get_table(net, RT6_TABLE_DFLT);
-	if (!table)
-		return;
 
-restart:
-	read_lock_bh(&table->tb6_lock);
-	for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
+int rt6_addrconf_purge(struct rt6_info *rt, void *arg) {
 		if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
-		    (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
-			dst_hold(&rt->dst);
-			read_unlock_bh(&table->tb6_lock);
-			ip6_del_rt(rt);
-			goto restart;
-		}
+	    (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2))
+		return -1;
+	return 0;
 	}
-	read_unlock_bh(&table->tb6_lock);
+
+void rt6_purge_dflt_routers(struct net *net)
+{
+	fib6_clean_all(net, rt6_addrconf_purge, NULL);
 }
 
 static void rtmsg_to_fib6_config(struct net *net,
@@ -2715,6 +2703,7 @@
 	[RTA_PREF]              = { .type = NLA_U8 },
 	[RTA_ENCAP_TYPE]	= { .type = NLA_U16 },
 	[RTA_ENCAP]		= { .type = NLA_NESTED },
+	[RTA_UID]		= { .type = NLA_U32 },
 };
 
 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -3220,6 +3209,10 @@
 {
 	struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
 	int prefix;
+	struct net *net = arg->net;
+
+	if (rt == net->ipv6.ip6_null_entry)
+		return 0;
 
 	if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
 		struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
@@ -3227,7 +3220,7 @@
 	} else
 		prefix = 0;
 
-	return rt6_fill_node(arg->net,
+	return rt6_fill_node(net,
 		     arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
 		     NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq,
 		     prefix, 0, NLM_F_MULTI);
@@ -3273,6 +3266,12 @@
 	if (tb[RTA_MARK])
 		fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
 
+	if (tb[RTA_UID])
+		fl6.flowi6_uid = make_kuid(current_user_ns(),
+					   nla_get_u32(tb[RTA_UID]));
+	else
+		fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
+
 	if (iif) {
 		struct net_device *dev;
 		int flags = 0;
diff -ruw linux-4.4.115/net/ipv6/tcp_ipv6.c linux-4.4.115-fbx/net/ipv6/tcp_ipv6.c
--- linux-4.4.115/net/ipv6/tcp_ipv6.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv6/tcp_ipv6.c	2019-10-29 09:26:25.893225197 +0100
@@ -239,6 +239,7 @@
 	fl6.flowi6_mark = sk->sk_mark;
 	fl6.fl6_dport = usin->sin6_port;
 	fl6.fl6_sport = inet->inet_sport;
+	fl6.flowi6_uid = sk->sk_uid;
 
 	opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
 	final_p = fl6_update_dst(&fl6, opt, &final);
@@ -820,6 +821,7 @@
 	fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
 	fl6.fl6_dport = t1->dest;
 	fl6.fl6_sport = t1->source;
+	fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 
 	/* Pass a socket to ip6_dst_lookup either it is for RST
@@ -1721,6 +1723,7 @@
 	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
 	int rx_queue;
 	int state;
+	__u8 state_seq = sp->sk_state;
 
 	dest  = &sp->sk_v6_daddr;
 	src   = &sp->sk_v6_rcv_saddr;
@@ -1752,6 +1755,9 @@
 		 */
 		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
 
+	if (inet->transparent)
+		state_seq |= 0x80;
+
 	seq_printf(seq,
 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
@@ -1760,7 +1766,7 @@
 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
 		   dest->s6_addr32[0], dest->s6_addr32[1],
 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
-		   state,
+		   state_seq,
 		   tp->write_seq - tp->snd_una,
 		   rx_queue,
 		   timer_active,
@@ -1915,6 +1921,7 @@
 	.proto_cgroup		= tcp_proto_cgroup,
 #endif
 	.clear_sk		= tcp_v6_clear_sk,
+	.diag_destroy		= tcp_abort,
 };
 
 static const struct inet6_protocol tcpv6_protocol = {
diff -ruw linux-4.4.115/net/ipv6/udp.c linux-4.4.115-fbx/net/ipv6/udp.c
--- linux-4.4.115/net/ipv6/udp.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv6/udp.c	2019-10-29 09:26:25.893225197 +0100
@@ -45,6 +45,7 @@
 #include <net/tcp_states.h>
 #include <net/ip6_checksum.h>
 #include <net/xfrm.h>
+#include <net/inet_hashtables.h>
 #include <net/inet6_hashtables.h>
 #include <net/busy_poll.h>
 
@@ -958,6 +959,71 @@
 	return 0;
 }
 
+static struct sock *__udp6_lib_demux_lookup(struct net *net,
+			__be16 loc_port, const struct in6_addr *loc_addr,
+			__be16 rmt_port, const struct in6_addr *rmt_addr,
+			int dif)
+{
+	struct sock *sk;
+	struct hlist_nulls_node *hnode;
+	unsigned short hnum = ntohs(loc_port);
+	unsigned int hash2 = udp6_portaddr_hash(net, loc_addr, hnum);
+	unsigned int slot2 = hash2 & udp_table.mask;
+	struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
+
+	const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
+
+	udp_portaddr_for_each_entry_rcu(sk, hnode, &hslot2->head) {
+		if (sk->sk_state == TCP_ESTABLISHED &&
+		    INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif))
+			return sk;
+		/* Only check first socket in chain */
+		break;
+	}
+	return NULL;
+}
+
+static void udp_v6_early_demux(struct sk_buff *skb)
+{
+	struct net *net = dev_net(skb->dev);
+	const struct udphdr *uh;
+	struct sock *sk;
+	struct dst_entry *dst;
+	int dif = skb->dev->ifindex;
+
+	if (!pskb_may_pull(skb, skb_transport_offset(skb) +
+	    sizeof(struct udphdr)))
+		return;
+
+	uh = udp_hdr(skb);
+
+	if (skb->pkt_type == PACKET_HOST)
+		sk = __udp6_lib_demux_lookup(net, uh->dest,
+					     &ipv6_hdr(skb)->daddr,
+					     uh->source, &ipv6_hdr(skb)->saddr,
+					     dif);
+	else
+		return;
+
+	if (!sk || !atomic_inc_not_zero_hint(&sk->sk_refcnt, 2))
+		return;
+
+	skb->sk = sk;
+	skb->destructor = sock_efree;
+	dst = READ_ONCE(sk->sk_rx_dst);
+
+	if (dst)
+		dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
+	if (dst) {
+		if (dst->flags & DST_NOCACHE) {
+			if (likely(atomic_inc_not_zero(&dst->__refcnt)))
+				skb_dst_set(skb, dst);
+		} else {
+			skb_dst_set_noref(skb, dst);
+		}
+	}
+}
+
 static __inline__ int udpv6_rcv(struct sk_buff *skb)
 {
 	return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP);
@@ -1249,6 +1315,7 @@
 		fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
 
 	fl6.flowi6_mark = sk->sk_mark;
+	fl6.flowi6_uid = sk->sk_uid;
 
 	if (msg->msg_controllen) {
 		opt = &opt_space;
@@ -1465,6 +1532,7 @@
 #endif
 
 static const struct inet6_protocol udpv6_protocol = {
+	.early_demux	=	udp_v6_early_demux,
 	.handler	=	udpv6_rcv,
 	.err_handler	=	udpv6_err,
 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
@@ -1557,6 +1625,7 @@
 	.compat_getsockopt = compat_udpv6_getsockopt,
 #endif
 	.clear_sk	   = udp_v6_clear_sk,
+	.diag_destroy      = udp_abort,
 };
 
 static struct inet_protosw udpv6_protosw = {
diff -ruw linux-4.4.115/net/ipv6/xfrm6_policy.c linux-4.4.115-fbx/net/ipv6/xfrm6_policy.c
--- linux-4.4.115/net/ipv6/xfrm6_policy.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/ipv6/xfrm6_policy.c	2019-10-29 09:26:25.893225197 +0100
@@ -29,15 +29,17 @@
 
 static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
 					  const xfrm_address_t *saddr,
-					  const xfrm_address_t *daddr)
+					  const xfrm_address_t *daddr,
+					  u32 mark)
 {
 	struct flowi6 fl6;
 	struct dst_entry *dst;
 	int err;
 
 	memset(&fl6, 0, sizeof(fl6));
-	fl6.flowi6_oif = oif;
+	fl6.flowi6_oif = l3mdev_master_ifindex_by_index(net, oif);
 	fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
+	fl6.flowi6_mark = mark;
 	memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr));
 	if (saddr)
 		memcpy(&fl6.saddr, saddr, sizeof(fl6.saddr));
@@ -54,12 +56,13 @@
 }
 
 static int xfrm6_get_saddr(struct net *net, int oif,
-			   xfrm_address_t *saddr, xfrm_address_t *daddr)
+			   xfrm_address_t *saddr, xfrm_address_t *daddr,
+			   u32 mark)
 {
 	struct dst_entry *dst;
 	struct net_device *dev;
 
-	dst = xfrm6_dst_lookup(net, 0, oif, NULL, daddr);
+	dst = xfrm6_dst_lookup(net, 0, oif, NULL, daddr, mark);
 	if (IS_ERR(dst))
 		return -EHOSTUNREACH;
 
diff -ruw linux-4.4.115/net/Kconfig linux-4.4.115-fbx/net/Kconfig
--- linux-4.4.115/net/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/Kconfig	2019-10-29 09:26:25.753223827 +0100
@@ -48,6 +48,16 @@
 config NET_INGRESS
 	bool
 
+config DISABLE_NET_SKB_FRAG_CACHE
+	bool "Disable skb fragment cache"
+	help
+	  Enabling this option ensures that when allocating skbs the network
+	  skb fragment cache is not used.
+	  Disabling use of the fragment cache can be useful on some low end
+	  targets because it reduces memory pressure.
+
+	  If you are unsure how to answer this question, answer N.
+
 menu "Networking options"
 
 source "net/packet/Kconfig"
@@ -86,6 +96,12 @@
 
 endif # if INET
 
+config ANDROID_PARANOID_NETWORK
+	bool "Only allow certain groups to create sockets"
+	default y
+	help
+		none
+
 config NETWORK_SECMARK
 	bool "Security Marking"
 	help
@@ -233,6 +249,7 @@
 source "net/hsr/Kconfig"
 source "net/switchdev/Kconfig"
 source "net/l3mdev/Kconfig"
+source "net/rmnet_data/Kconfig"
 
 config RPS
 	bool
@@ -297,6 +314,15 @@
 	  with many clients some protection against DoS by a single (spoofed)
 	  flow that greatly exceeds average workload.
 
+config SOCKEV_NLMCAST
+	bool "Enable SOCKEV Netlink Multicast"
+	default n
+	---help---
+	  Default client for SOCKEV notifier events. Sends multicast netlink
+	  messages whenever the socket event notifier is invoked. Enable if
+	  user space entities need to be notified of socket events without
+	  having to poll /proc
+
 menu "Network testing"
 
 config NET_PKTGEN
@@ -383,6 +409,8 @@
 	  weight tunnel endpoint. Tunnel encapsulation parameters are stored
 	  with light weight tunnel state associated with fib routes.
 
+source "net/ipc_router/Kconfig"
+
 endif   # if NET
 
 # Used by archs to tell that they support BPF_JIT
diff -ruw linux-4.4.115/net/key/af_key.c linux-4.4.115-fbx/net/key/af_key.c
--- linux-4.4.115/net/key/af_key.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/key/af_key.c	2019-10-29 09:26:25.897225236 +0100
@@ -196,30 +196,22 @@
 	return 0;
 }
 
-static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
-			       gfp_t allocation, struct sock *sk)
+static int pfkey_broadcast_one(struct sk_buff *skb, gfp_t allocation,
+			       struct sock *sk)
 {
 	int err = -ENOBUFS;
 
-	sock_hold(sk);
-	if (*skb2 == NULL) {
-		if (atomic_read(&skb->users) != 1) {
-			*skb2 = skb_clone(skb, allocation);
-		} else {
-			*skb2 = skb;
-			atomic_inc(&skb->users);
-		}
-	}
-	if (*skb2 != NULL) {
-		if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
-			skb_set_owner_r(*skb2, sk);
-			skb_queue_tail(&sk->sk_receive_queue, *skb2);
+	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
+		return err;
+
+	skb = skb_clone(skb, allocation);
+
+	if (skb) {
+		skb_set_owner_r(skb, sk);
+		skb_queue_tail(&sk->sk_receive_queue, skb);
 			sk->sk_data_ready(sk);
-			*skb2 = NULL;
 			err = 0;
 		}
-	}
-	sock_put(sk);
 	return err;
 }
 
@@ -234,7 +226,6 @@
 {
 	struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
 	struct sock *sk;
-	struct sk_buff *skb2 = NULL;
 	int err = -ESRCH;
 
 	/* XXX Do we need something like netlink_overrun?  I think
@@ -253,7 +244,7 @@
 		 * socket.
 		 */
 		if (pfk->promisc)
-			pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
+			pfkey_broadcast_one(skb, GFP_ATOMIC, sk);
 
 		/* the exact target will be processed later */
 		if (sk == one_sk)
@@ -268,7 +259,7 @@
 				continue;
 		}
 
-		err2 = pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
+		err2 = pfkey_broadcast_one(skb, GFP_ATOMIC, sk);
 
 		/* Error is cleared after successful sending to at least one
 		 * registered KM */
@@ -278,9 +269,8 @@
 	rcu_read_unlock();
 
 	if (one_sk != NULL)
-		err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
+		err = pfkey_broadcast_one(skb, allocation, one_sk);
 
-	kfree_skb(skb2);
 	kfree_skb(skb);
 	return err;
 }
diff -ruw linux-4.4.115/net/mac80211/ieee80211_i.h linux-4.4.115-fbx/net/mac80211/ieee80211_i.h
--- linux-4.4.115/net/mac80211/ieee80211_i.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/mac80211/ieee80211_i.h	2019-01-22 16:16:29.087296973 +0100
@@ -807,6 +807,7 @@
 struct txq_info {
 	struct sk_buff_head queue;
 	unsigned long flags;
+	unsigned long byte_cnt;
 
 	/* keep last! */
 	struct ieee80211_txq txq;
diff -ruw linux-4.4.115/net/mac80211/iface.c linux-4.4.115-fbx/net/mac80211/iface.c
--- linux-4.4.115/net/mac80211/iface.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/mac80211/iface.c	2019-10-29 09:26:25.909225353 +0100
@@ -979,6 +979,7 @@
 
 		spin_lock_bh(&txqi->queue.lock);
 		ieee80211_purge_tx_queue(&local->hw, &txqi->queue);
+		txqi->byte_cnt = 0;
 		spin_unlock_bh(&txqi->queue.lock);
 
 		atomic_set(&sdata->txqs_len[txqi->txq.ac], 0);
diff -ruw linux-4.4.115/net/mac80211/mesh_hwmp.c linux-4.4.115-fbx/net/mac80211/mesh_hwmp.c
--- linux-4.4.115/net/mac80211/mesh_hwmp.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/mac80211/mesh_hwmp.c	2019-10-29 09:26:25.909225353 +0100
@@ -530,7 +530,7 @@
 	const u8 *target_addr, *orig_addr;
 	const u8 *da;
 	u8 target_flags, ttl, flags;
-	u32 orig_sn, target_sn, lifetime, target_metric;
+	u32 orig_sn, target_sn, lifetime, target_metric = 0;
 	bool reply = false;
 	bool forward = true;
 	bool root_is_gate;
diff -ruw linux-4.4.115/net/mac80211/mlme.c linux-4.4.115-fbx/net/mac80211/mlme.c
--- linux-4.4.115/net/mac80211/mlme.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/mac80211/mlme.c	2019-10-29 09:26:25.913225393 +0100
@@ -4490,20 +4490,20 @@
 		return -EOPNOTSUPP;
 	}
 
-	auth_data = kzalloc(sizeof(*auth_data) + req->sae_data_len +
+	auth_data = kzalloc(sizeof(*auth_data) + req->auth_data_len +
 			    req->ie_len, GFP_KERNEL);
 	if (!auth_data)
 		return -ENOMEM;
 
 	auth_data->bss = req->bss;
 
-	if (req->sae_data_len >= 4) {
-		__le16 *pos = (__le16 *) req->sae_data;
+	if (req->auth_data_len >= 4) {
+		__le16 *pos = (__le16 *) req->auth_data;
 		auth_data->sae_trans = le16_to_cpu(pos[0]);
 		auth_data->sae_status = le16_to_cpu(pos[1]);
-		memcpy(auth_data->data, req->sae_data + 4,
-		       req->sae_data_len - 4);
-		auth_data->data_len += req->sae_data_len - 4;
+		memcpy(auth_data->data, req->auth_data + 4,
+		       req->auth_data_len - 4);
+		auth_data->data_len += req->auth_data_len - 4;
 	}
 
 	if (req->ie && req->ie_len) {
diff -ruw linux-4.4.115/net/mac80211/rx.c linux-4.4.115-fbx/net/mac80211/rx.c
--- linux-4.4.115/net/mac80211/rx.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/mac80211/rx.c	2019-10-29 09:26:25.913225393 +0100
@@ -122,7 +122,8 @@
 	hdr = (void *)(skb->data + rtap_vendor_space);
 
 	if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
-			    RX_FLAG_FAILED_PLCP_CRC))
+			    RX_FLAG_FAILED_PLCP_CRC |
+			    RX_FLAG_ONLY_MONITOR))
 		return true;
 
 	if (unlikely(skb->len < 16 + present_fcs_len + rtap_vendor_space))
@@ -507,7 +508,7 @@
 		return NULL;
 	}
 
-	if (!local->monitors) {
+	if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) {
 		if (should_drop_frame(origskb, present_fcs_len,
 				      rtap_vendor_space)) {
 			dev_kfree_skb(origskb);
@@ -3478,6 +3479,7 @@
  * be called with rcu_read_lock protection.
  */
 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
+					 struct ieee80211_sta *pubsta,
 					 struct sk_buff *skb,
 					 struct napi_struct *napi)
 {
@@ -3487,7 +3489,6 @@
 	__le16 fc;
 	struct ieee80211_rx_data rx;
 	struct ieee80211_sub_if_data *prev;
-	struct sta_info *sta, *prev_sta;
 	struct rhash_head *tmp;
 	int err = 0;
 
@@ -3523,7 +3524,14 @@
 		     ieee80211_is_beacon(hdr->frame_control)))
 		ieee80211_scan_rx(local, skb);
 
-	if (ieee80211_is_data(fc)) {
+	if (pubsta) {
+		rx.sta = container_of(pubsta, struct sta_info, sta);
+		rx.sdata = rx.sta->sdata;
+		if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
+			return;
+		goto out;
+	} else if (ieee80211_is_data(fc)) {
+		struct sta_info *sta, *prev_sta;
 		const struct bucket_table *tbl;
 
 		prev_sta = NULL;
@@ -3597,8 +3605,8 @@
  * This is the receive path handler. It is called by a low level driver when an
  * 802.11 MPDU is received from the hardware.
  */
-void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb,
-		       struct napi_struct *napi)
+void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
+		       struct sk_buff *skb, struct napi_struct *napi)
 {
 	struct ieee80211_local *local = hw_to_local(hw);
 	struct ieee80211_rate *rate = NULL;
@@ -3697,7 +3705,8 @@
 	ieee80211_tpt_led_trig_rx(local,
 			((struct ieee80211_hdr *)skb->data)->frame_control,
 			skb->len);
-	__ieee80211_rx_handle_packet(hw, skb, napi);
+
+	__ieee80211_rx_handle_packet(hw, pubsta, skb, napi);
 
 	rcu_read_unlock();
 
diff -ruw linux-4.4.115/net/mac80211/sta_info.c linux-4.4.115-fbx/net/mac80211/sta_info.c
--- linux-4.4.115/net/mac80211/sta_info.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/mac80211/sta_info.c	2019-01-22 16:16:29.099297082 +0100
@@ -115,6 +115,7 @@
 
 			ieee80211_purge_tx_queue(&local->hw, &txqi->queue);
 			atomic_sub(n, &sdata->txqs_len[txqi->txq.ac]);
+			txqi->byte_cnt = 0;
 		}
 	}
 
diff -ruw linux-4.4.115/net/mac80211/tx.c linux-4.4.115-fbx/net/mac80211/tx.c
--- linux-4.4.115/net/mac80211/tx.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/mac80211/tx.c	2019-10-29 09:26:25.917225432 +0100
@@ -1266,7 +1266,11 @@
 	if (atomic_read(&sdata->txqs_len[ac]) >= local->hw.txq_ac_max_pending)
 		netif_stop_subqueue(sdata->dev, ac);
 
-	skb_queue_tail(&txqi->queue, skb);
+	spin_lock_bh(&txqi->queue.lock);
+	txqi->byte_cnt += skb->len;
+	__skb_queue_tail(&txqi->queue, skb);
+	spin_unlock_bh(&txqi->queue.lock);
+
 	drv_wake_tx_queue(local, txqi);
 
 	return;
@@ -1294,6 +1298,8 @@
 	if (!skb)
 		goto out;
 
+	txqi->byte_cnt -= skb->len;
+
 	atomic_dec(&sdata->txqs_len[ac]);
 	if (__netif_subqueue_stopped(sdata->dev, ac))
 		ieee80211_propagate_queue_wake(local, sdata->vif.hw_queue[ac]);
diff -ruw linux-4.4.115/net/mac80211/util.c linux-4.4.115-fbx/net/mac80211/util.c
--- linux-4.4.115/net/mac80211/util.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/mac80211/util.c	2019-10-29 09:26:25.917225432 +0100
@@ -3198,10 +3198,11 @@
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_sub_if_data *sdata_iter;
 	enum nl80211_iftype iftype = sdata->wdev.iftype;
-	int num[NUM_NL80211_IFTYPES];
 	struct ieee80211_chanctx *ctx;
-	int num_different_channels = 0;
 	int total = 1;
+	struct iface_combination_params params = {
+		.radar_detect = radar_detect,
+	};
 
 	lockdep_assert_held(&local->chanctx_mtx);
 
@@ -3212,9 +3213,6 @@
 		    !chandef->chan))
 		return -EINVAL;
 
-	if (chandef)
-		num_different_channels = 1;
-
 	if (WARN_ON(iftype >= NUM_NL80211_IFTYPES))
 		return -EINVAL;
 
@@ -3225,24 +3223,26 @@
 		return 0;
 	}
 
-	memset(num, 0, sizeof(num));
+	if (chandef)
+		params.num_different_channels = 1;
 
 	if (iftype != NL80211_IFTYPE_UNSPECIFIED)
-		num[iftype] = 1;
+		params.iftype_num[iftype] = 1;
 
 	list_for_each_entry(ctx, &local->chanctx_list, list) {
 		if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED)
 			continue;
-		radar_detect |= ieee80211_chanctx_radar_detect(local, ctx);
+		params.radar_detect |=
+			ieee80211_chanctx_radar_detect(local, ctx);
 		if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE) {
-			num_different_channels++;
+			params.num_different_channels++;
 			continue;
 		}
 		if (chandef && chanmode == IEEE80211_CHANCTX_SHARED &&
 		    cfg80211_chandef_compatible(chandef,
 						&ctx->conf.def))
 			continue;
-		num_different_channels++;
+		params.num_different_channels++;
 	}
 
 	list_for_each_entry_rcu(sdata_iter, &local->interfaces, list) {
@@ -3255,16 +3255,14 @@
 		    local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype))
 			continue;
 
-		num[wdev_iter->iftype]++;
+		params.iftype_num[wdev_iter->iftype]++;
 		total++;
 	}
 
-	if (total == 1 && !radar_detect)
+	if (total == 1 && !params.radar_detect)
 		return 0;
 
-	return cfg80211_check_combinations(local->hw.wiphy,
-					   num_different_channels,
-					   radar_detect, num);
+	return cfg80211_check_combinations(local->hw.wiphy, &params);
 }
 
 static void
@@ -3280,12 +3278,10 @@
 int ieee80211_max_num_channels(struct ieee80211_local *local)
 {
 	struct ieee80211_sub_if_data *sdata;
-	int num[NUM_NL80211_IFTYPES] = {};
 	struct ieee80211_chanctx *ctx;
-	int num_different_channels = 0;
-	u8 radar_detect = 0;
 	u32 max_num_different_channels = 1;
 	int err;
+	struct iface_combination_params params = {0};
 
 	lockdep_assert_held(&local->chanctx_mtx);
 
@@ -3293,17 +3289,17 @@
 		if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED)
 			continue;
 
-		num_different_channels++;
+		params.num_different_channels++;
 
-		radar_detect |= ieee80211_chanctx_radar_detect(local, ctx);
+		params.radar_detect |=
+			ieee80211_chanctx_radar_detect(local, ctx);
 	}
 
 	list_for_each_entry_rcu(sdata, &local->interfaces, list)
-		num[sdata->wdev.iftype]++;
+		params.iftype_num[sdata->wdev.iftype]++;
 
-	err = cfg80211_iter_combinations(local->hw.wiphy,
-					 num_different_channels, radar_detect,
-					 num, ieee80211_iter_max_chans,
+	err = cfg80211_iter_combinations(local->hw.wiphy, &params,
+					 ieee80211_iter_max_chans,
 					 &max_num_different_channels);
 	if (err < 0)
 		return err;
@@ -3344,3 +3340,17 @@
 		txqi->txq.ac = IEEE80211_AC_BE;
 	}
 }
+
+void ieee80211_txq_get_depth(struct ieee80211_txq *txq,
+			     unsigned long *frame_cnt,
+			     unsigned long *byte_cnt)
+{
+	struct txq_info *txqi = to_txq_info(txq);
+
+	if (frame_cnt)
+		*frame_cnt = txqi->queue.qlen;
+
+	if (byte_cnt)
+		*byte_cnt = txqi->byte_cnt;
+}
+EXPORT_SYMBOL(ieee80211_txq_get_depth);
diff -ruw linux-4.4.115/net/Makefile linux-4.4.115-fbx/net/Makefile
--- linux-4.4.115/net/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/Makefile	2019-01-22 16:16:28.847294800 +0100
@@ -77,3 +77,5 @@
 ifneq ($(CONFIG_NET_L3_MASTER_DEV),)
 obj-y				+= l3mdev/
 endif
+obj-$(CONFIG_IPC_ROUTER)	+= ipc_router/
+obj-$(CONFIG_RMNET_DATA) += rmnet_data/
diff -ruw linux-4.4.115/net/netfilter/core.c linux-4.4.115-fbx/net/netfilter/core.c
--- linux-4.4.115/net/netfilter/core.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/netfilter/core.c	2019-01-22 16:16:29.111297190 +0100
@@ -123,7 +123,7 @@
 void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
 {
 	struct list_head *hook_list;
-	struct nf_hook_entry *entry;
+	struct nf_hook_entry *entry = NULL;
 	struct nf_hook_ops *elem;
 
 	hook_list = nf_find_hook_list(net, reg);
diff -ruw linux-4.4.115/net/netfilter/Kconfig linux-4.4.115-fbx/net/netfilter/Kconfig
--- linux-4.4.115/net/netfilter/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/netfilter/Kconfig	2019-01-22 16:16:29.111297190 +0100
@@ -749,6 +749,20 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config NETFILTER_XT_TARGET_HARDIDLETIMER
+	tristate  "HARDIDLETIMER target support"
+	depends on NETFILTER_ADVANCED
+	help
+
+	  This option adds the `HARDIDLETIMER' target.  Each matching packet
+	  resets the timer associated with label specified when the rule is
+	  added.  When the timer expires, it triggers a sysfs notification.
+	  The remaining time for expiration can be read via sysfs.
+          Compared to IDLETIMER HARDIDLETIMER will send notification when
+          CPU in suspend too.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
 config NETFILTER_XT_TARGET_LED
 	tristate '"LED" target support'
 	depends on LEDS_CLASS && LEDS_TRIGGERS
@@ -1278,6 +1292,8 @@
 	based on who created the socket: the user or group. It is also
 	possible to check whether a socket actually exists.
 
+	Conflicts with '"quota, tag, uid" match'
+
 config NETFILTER_XT_MATCH_POLICY
 	tristate 'IPsec "policy" match support'
 	depends on XFRM
@@ -1311,6 +1327,22 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config NETFILTER_XT_MATCH_QTAGUID
+	bool '"quota, tag, owner" match and stats support'
+        depends on NETFILTER_XT_MATCH_SOCKET
+	depends on NETFILTER_XT_MATCH_OWNER=n
+	help
+	  This option replaces the `owner' match. In addition to matching
+	  on uid, it keeps stats based on a tag assigned to a socket.
+	  The full tag is comprised of a UID and an accounting tag.
+	  The tags are assignable to sockets from user space (e.g. a download
+	  manager can assign the socket to another UID for accounting).
+	  Stats and control are done via /proc/net/xt_qtaguid/.
+	  It replaces owner as it takes the same arguments, but should
+	  really be recognized by the iptables tool.
+
+	  If unsure, say `N'.
+
 config NETFILTER_XT_MATCH_QUOTA
 	tristate '"quota" match support'
 	depends on NETFILTER_ADVANCED
@@ -1321,6 +1353,29 @@
 	  If you want to compile it as a module, say M here and read
 	  <file:Documentation/kbuild/modules.txt>.  If unsure, say `N'.
 
+config NETFILTER_XT_MATCH_QUOTA2
+	tristate '"quota2" match support'
+	depends on NETFILTER_ADVANCED
+	help
+	  This option adds a `quota2' match, which allows to match on a
+	  byte counter correctly and not per CPU.
+	  It allows naming the quotas.
+	  This is based on http://xtables-addons.git.sourceforge.net
+
+	  If you want to compile it as a module, say M here and read
+	  <file:Documentation/kbuild/modules.txt>.  If unsure, say `N'.
+
+config NETFILTER_XT_MATCH_QUOTA2_LOG
+	bool '"quota2" Netfilter LOG support'
+	depends on NETFILTER_XT_MATCH_QUOTA2
+	default n
+	help
+	  This option allows `quota2' to log ONCE when a quota limit
+	  is passed. It logs via NETLINK using the NETLINK_NFLOG family.
+	  It logs similarly to how ipt_ULOG would without data.
+
+	  If unsure, say `N'.
+
 config NETFILTER_XT_MATCH_RATEEST
 	tristate '"rateest" match support'
 	depends on NETFILTER_ADVANCED
diff -ruw linux-4.4.115/net/netfilter/Makefile linux-4.4.115-fbx/net/netfilter/Makefile
--- linux-4.4.115/net/netfilter/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/netfilter/Makefile	2019-01-22 16:16:29.111297190 +0100
@@ -122,6 +122,7 @@
 obj-$(CONFIG_NETFILTER_XT_TARGET_TEE) += xt_TEE.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_IDLETIMER) += xt_IDLETIMER.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER) += xt_HARDIDLETIMER.o
 
 # matches
 obj-$(CONFIG_NETFILTER_XT_MATCH_ADDRTYPE) += xt_addrtype.o
@@ -156,7 +157,9 @@
 obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_QTAGUID) += xt_qtaguid_print.o xt_qtaguid.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA) += xt_quota.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA2) += xt_quota2.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_RATEEST) += xt_rateest.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_REALM) += xt_realm.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_RECENT) += xt_recent.o
diff -ruw linux-4.4.115/net/netfilter/nf_conntrack_core.c linux-4.4.115-fbx/net/netfilter/nf_conntrack_core.c
--- linux-4.4.115/net/netfilter/nf_conntrack_core.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/netfilter/nf_conntrack_core.c	2019-10-29 09:26:25.929225549 +0100
@@ -237,7 +237,7 @@
 static void
 clean_from_lists(struct nf_conn *ct)
 {
-	pr_debug("clean_from_lists(%p)\n", ct);
+	pr_debug("clean_from_lists(%pK)\n", ct);
 	hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
 	hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
 
@@ -330,7 +330,7 @@
 	struct net *net = nf_ct_net(ct);
 	struct nf_conntrack_l4proto *l4proto;
 
-	pr_debug("destroy_conntrack(%p)\n", ct);
+	pr_debug("destroy_conntrack(%pK)\n", ct);
 	NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
 	NF_CT_ASSERT(!timer_pending(&ct->timeout));
 
@@ -361,7 +361,7 @@
 	if (ct->master)
 		nf_ct_put(ct->master);
 
-	pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
+	pr_debug("destroy_conntrack: returning ct=%pK to slab\n", ct);
 	nf_conntrack_free(ct);
 }
 
@@ -629,7 +629,7 @@
 	 * confirmed us.
 	 */
 	NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
-	pr_debug("Confirming conntrack %p\n", ct);
+	pr_debug("Confirming conntrack %pK\n", ct);
 	/* We have to check the DYING flag after unlink to prevent
 	 * a race against nf_ct_get_next_corpse() possibly called from
 	 * user context, else we insert an already 'dead' hash, blocking
@@ -979,7 +979,7 @@
 		spin_lock(&nf_conntrack_expect_lock);
 		exp = nf_ct_find_expectation(net, zone, tuple);
 		if (exp) {
-			pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
+			pr_debug("conntrack: expectation arrives ct=%pK exp=%pK\n",
 				 ct, exp);
 			/* Welcome, Mr. Bond.  We've been expecting you... */
 			__set_bit(IPS_EXPECTED_BIT, &ct->status);
@@ -1070,14 +1070,14 @@
 	} else {
 		/* Once we've had two way comms, always ESTABLISHED. */
 		if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
-			pr_debug("nf_conntrack_in: normal packet for %p\n", ct);
+			pr_debug("nf_conntrack_in: normal packet for %pK\n", ct);
 			*ctinfo = IP_CT_ESTABLISHED;
 		} else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
-			pr_debug("nf_conntrack_in: related packet for %p\n",
+			pr_debug("nf_conntrack_in: related packet for %pK\n",
 				 ct);
 			*ctinfo = IP_CT_RELATED;
 		} else {
-			pr_debug("nf_conntrack_in: new packet for %p\n", ct);
+			pr_debug("nf_conntrack_in: new packet for %pK\n", ct);
 			*ctinfo = IP_CT_NEW;
 		}
 		*set_reply = 0;
@@ -1219,7 +1219,7 @@
 	/* Should be unconfirmed, so not in hash table yet */
 	NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
 
-	pr_debug("Altering reply tuple of %p to ", ct);
+	pr_debug("Altering reply tuple of %pK to ", ct);
 	nf_ct_dump_tuple(newreply);
 
 	ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
diff -ruw linux-4.4.115/net/netfilter/xt_owner.c linux-4.4.115-fbx/net/netfilter/xt_owner.c
--- linux-4.4.115/net/netfilter/xt_owner.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/netfilter/xt_owner.c	2019-01-22 16:16:29.143297480 +0100
@@ -29,6 +29,61 @@
 	return 0;
 }
 
+static int __owner_match_simple_gid(kgid_t gid, kgid_t gid_min, kgid_t gid_max)
+{
+	return gid_gte(gid, gid_min) && gid_lte(gid, gid_max);
+}
+
+/*
+ * see kernel/groups.c:groups_to_user() function, which inspired the
+ * content of this function.
+ */
+static int __owner_match_gid_groupinfo(const struct group_info *group_info,
+				       kgid_t gid_min, kgid_t gid_max)
+{
+	unsigned int count = group_info->ngroups;
+	unsigned int block;
+
+	for (block = 0; block < group_info->nblocks; ++block) {
+		unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
+		unsigned int i;
+
+		for (i = 0; i < cp_count; ++i) {
+			if (__owner_match_simple_gid(
+					     group_info->blocks[block][i],
+					     gid_min, gid_max)) {
+				return 1;
+			}
+			count -= cp_count;
+		}
+	}
+	return 0;
+}
+
+static int owner_match_gid(const struct file *filp,
+			   const struct xt_owner_match_info *info)
+{
+	kgid_t gid_min = make_kgid(&init_user_ns, info->gid_min);
+	kgid_t gid_max = make_kgid(&init_user_ns, info->gid_max);
+
+	/*
+	 * direct match, this is the simple and only case handled by
+	 * the old code, file fsgid matches info gid range.
+	 */
+	if (__owner_match_simple_gid(filp->f_cred->fsgid, gid_min, gid_max))
+		return 1;
+
+	/*
+	 * otherwise we need to have a look to the group list available
+	 * in f_cred->group_info.
+	 */
+	if (__owner_match_gid_groupinfo(filp->f_cred->group_info,
+					gid_min, gid_max))
+		return 1;
+
+	return 0;
+}
+
 static bool
 owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
 {
@@ -60,10 +115,7 @@
 	}
 
 	if (info->match & XT_OWNER_GID) {
-		kgid_t gid_min = make_kgid(&init_user_ns, info->gid_min);
-		kgid_t gid_max = make_kgid(&init_user_ns, info->gid_max);
-		if ((gid_gte(filp->f_cred->fsgid, gid_min) &&
-		     gid_lte(filp->f_cred->fsgid, gid_max)) ^
+		if (owner_match_gid(filp, info) ^
 		    !(info->invert & XT_OWNER_GID))
 			return false;
 	}
diff -ruw linux-4.4.115/net/netlink/af_netlink.c linux-4.4.115-fbx/net/netlink/af_netlink.c
--- linux-4.4.115/net/netlink/af_netlink.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/netlink/af_netlink.c	2019-10-29 09:26:25.945225706 +0100
@@ -365,15 +365,6 @@
 
 static void netlink_sock_destruct(struct sock *sk)
 {
-	struct netlink_sock *nlk = nlk_sk(sk);
-
-	if (nlk->cb_running) {
-		if (nlk->cb.done)
-			nlk->cb.done(&nlk->cb);
-		module_put(nlk->cb.module);
-		kfree_skb(nlk->cb.skb);
-	}
-
 	skb_queue_purge(&sk->sk_receive_queue);
 
 	if (!sock_flag(sk, SOCK_DEAD)) {
@@ -386,14 +377,6 @@
 	WARN_ON(nlk_sk(sk)->groups);
 }
 
-static void netlink_sock_destruct_work(struct work_struct *work)
-{
-	struct netlink_sock *nlk = container_of(work, struct netlink_sock,
-						work);
-
-	sk_free(&nlk->sk);
-}
-
 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
  * SMP. Look, when several writers sleep and reader wakes them up, all but one
  * immediately hit write lock and grab all the cpus. Exclusive sleep solves
@@ -504,8 +487,9 @@
 
 	rcu_read_lock();
 	sk = __netlink_lookup(table, portid, net);
-	if (sk)
-		sock_hold(sk);
+	if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
+		sk = NULL;
+
 	rcu_read_unlock();
 
 	return sk;
@@ -632,6 +616,7 @@
 	}
 	init_waitqueue_head(&nlk->wait);
 
+	sock_set_flag(sk, SOCK_RCU_FREE);
 	sk->sk_destruct = netlink_sock_destruct;
 	sk->sk_protocol = protocol;
 	return 0;
@@ -696,23 +681,6 @@
 	goto out;
 }
 
-static void deferred_put_nlk_sk(struct rcu_head *head)
-{
-	struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
-	struct sock *sk = &nlk->sk;
-
-	if (!atomic_dec_and_test(&sk->sk_refcnt))
-		return;
-
-	if (nlk->cb_running && nlk->cb.done) {
-		INIT_WORK(&nlk->work, netlink_sock_destruct_work);
-		schedule_work(&nlk->work);
-		return;
-	}
-
-	sk_free(sk);
-}
-
 static int netlink_release(struct socket *sock)
 {
 	struct sock *sk = sock->sk;
@@ -785,7 +753,19 @@
 	local_bh_disable();
 	sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
 	local_bh_enable();
-	call_rcu(&nlk->rcu, deferred_put_nlk_sk);
+	if (nlk->cb_running) {
+		mutex_lock(nlk->cb_mutex);
+		if (nlk->cb_running) {
+			if (nlk->cb.done)
+				nlk->cb.done(&nlk->cb);
+
+			module_put(nlk->cb.module);
+			kfree_skb(nlk->cb.skb);
+			nlk->cb_running = false;
+		}
+		mutex_unlock(nlk->cb_mutex);
+	}
+	sock_put(sk);
 	return 0;
 }
 
diff -ruw linux-4.4.115/net/netlink/af_netlink.h linux-4.4.115-fbx/net/netlink/af_netlink.h
--- linux-4.4.115/net/netlink/af_netlink.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/netlink/af_netlink.h	2019-01-22 16:16:29.151297552 +0100
@@ -48,7 +48,6 @@
 	struct module		*module;
 
 	struct rhash_head	node;
-	struct rcu_head		rcu;
 	struct work_struct	work;
 };
 
diff -ruw linux-4.4.115/net/netlink/genetlink.c linux-4.4.115-fbx/net/netlink/genetlink.c
--- linux-4.4.115/net/netlink/genetlink.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/netlink/genetlink.c	2019-10-29 09:26:25.949225745 +0100
@@ -1009,7 +1009,7 @@
 
 static int genl_bind(struct net *net, int group)
 {
-	int i, err = -ENOENT;
+	int i, err = 0;
 
 	down_read(&cb_lock);
 	for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
diff -ruw linux-4.4.115/net/packet/af_packet.c linux-4.4.115-fbx/net/packet/af_packet.c
--- linux-4.4.115/net/packet/af_packet.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/packet/af_packet.c	2019-10-29 09:26:25.953225784 +0100
@@ -1689,7 +1689,7 @@
 		match->flags = flags;
 		INIT_LIST_HEAD(&match->list);
 		spin_lock_init(&match->lock);
-		atomic_set(&match->sk_ref, 0);
+		refcount_set(&match->sk_ref, 0);
 		fanout_init_data(match);
 		match->prot_hook.type = po->prot_hook.type;
 		match->prot_hook.dev = po->prot_hook.dev;
@@ -1706,19 +1706,19 @@
 	    match->prot_hook.type == po->prot_hook.type &&
 	    match->prot_hook.dev == po->prot_hook.dev) {
 		err = -ENOSPC;
-		if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
+		if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
 			__dev_remove_pack(&po->prot_hook);
 			po->fanout = match;
 			po->rollover = rollover;
 			rollover = NULL;
-			atomic_inc(&match->sk_ref);
+			refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
 			__fanout_link(sk, po);
 			err = 0;
 		}
 	}
 	spin_unlock(&po->bind_lock);
 
-	if (err && !atomic_read(&match->sk_ref)) {
+	if (err && !refcount_read(&match->sk_ref)) {
 		list_del(&match->list);
 		kfree(match);
 	}
@@ -1744,7 +1744,7 @@
 	if (f) {
 		po->fanout = NULL;
 
-		if (atomic_dec_and_test(&f->sk_ref))
+		if (refcount_dec_and_test(&f->sk_ref))
 			list_del(&f->list);
 		else
 			f = NULL;
diff -ruw linux-4.4.115/net/packet/internal.h linux-4.4.115-fbx/net/packet/internal.h
--- linux-4.4.115/net/packet/internal.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/packet/internal.h	2019-10-29 09:26:25.953225784 +0100
@@ -1,6 +1,8 @@
 #ifndef __PACKET_INTERNAL_H__
 #define __PACKET_INTERNAL_H__
 
+#include <linux/refcount.h>
+
 struct packet_mclist {
 	struct packet_mclist	*next;
 	int			ifindex;
@@ -86,7 +88,7 @@
 	struct list_head	list;
 	struct sock		*arr[PACKET_FANOUT_MAX];
 	spinlock_t		lock;
-	atomic_t		sk_ref;
+	refcount_t		sk_ref;
 	struct packet_type	prot_hook ____cacheline_aligned_in_smp;
 };
 
diff -ruw linux-4.4.115/net/rfkill/core.c linux-4.4.115-fbx/net/rfkill/core.c
--- linux-4.4.115/net/rfkill/core.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/rfkill/core.c	2019-01-22 16:16:29.175297770 +0100
@@ -802,8 +802,7 @@
 }
 EXPORT_SYMBOL(rfkill_resume_polling);
 
-#ifdef CONFIG_PM_SLEEP
-static int rfkill_suspend(struct device *dev)
+static __maybe_unused int rfkill_suspend(struct device *dev)
 {
 	struct rfkill *rfkill = to_rfkill(dev);
 
@@ -812,7 +811,7 @@
 	return 0;
 }
 
-static int rfkill_resume(struct device *dev)
+static __maybe_unused int rfkill_resume(struct device *dev)
 {
 	struct rfkill *rfkill = to_rfkill(dev);
 	bool cur;
@@ -828,17 +827,13 @@
 }
 
 static SIMPLE_DEV_PM_OPS(rfkill_pm_ops, rfkill_suspend, rfkill_resume);
-#define RFKILL_PM_OPS (&rfkill_pm_ops)
-#else
-#define RFKILL_PM_OPS NULL
-#endif
 
 static struct class rfkill_class = {
 	.name		= "rfkill",
 	.dev_release	= rfkill_release,
 	.dev_groups	= rfkill_dev_groups,
 	.dev_uevent	= rfkill_dev_uevent,
-	.pm		= RFKILL_PM_OPS,
+	.pm		= IS_ENABLED(CONFIG_RFKILL_PM) ? &rfkill_pm_ops : NULL,
 };
 
 bool rfkill_blocked(struct rfkill *rfkill)
diff -ruw linux-4.4.115/net/rfkill/Kconfig linux-4.4.115-fbx/net/rfkill/Kconfig
--- linux-4.4.115/net/rfkill/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/rfkill/Kconfig	2019-01-22 16:16:29.171297734 +0100
@@ -10,6 +10,11 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called rfkill.
 
+config RFKILL_PM
+	bool "Power off on suspend"
+	depends on RFKILL && PM
+	default y
+
 # LED trigger support
 config RFKILL_LEDS
 	bool
diff -ruw linux-4.4.115/net/socket.c linux-4.4.115-fbx/net/socket.c
--- linux-4.4.115/net/socket.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/socket.c	2019-10-29 09:26:25.985226097 +0100
@@ -89,6 +89,8 @@
 #include <linux/magic.h>
 #include <linux/slab.h>
 #include <linux/xattr.h>
+#include <linux/seemp_api.h>
+#include <linux/seemp_instrumentation.h>
 
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
@@ -115,6 +117,9 @@
 
 static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to);
 static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from);
+
+static BLOCKING_NOTIFIER_HEAD(sockev_notifier_list);
+
 static int sock_mmap(struct file *file, struct vm_area_struct *vma);
 
 static int sock_close(struct inode *inode, struct file *file);
@@ -169,6 +174,14 @@
 static DEFINE_PER_CPU(int, sockets_in_use);
 
 /*
+ * Socket Event framework helpers
+ */
+static void sockev_notify(unsigned long event, struct socket *sk)
+{
+	blocking_notifier_call_chain(&sockev_notifier_list, event, sk);
+}
+
+/**
  * Support routines.
  * Move socket addresses back and forth across the kernel/user
  * divide and look after the messy bits.
@@ -520,9 +533,23 @@
 	return used;
 }
 
+static int sockfs_setattr(struct dentry *dentry, struct iattr *iattr)
+{
+	int err = simple_setattr(dentry, iattr);
+
+	if (!err && (iattr->ia_valid & ATTR_UID)) {
+		struct socket *sock = SOCKET_I(d_inode(dentry));
+
+		sock->sk->sk_uid = iattr->ia_uid;
+	}
+
+	return err;
+}
+
 static const struct inode_operations sockfs_inode_ops = {
 	.getxattr = sockfs_getxattr,
 	.listxattr = sockfs_listxattr,
+	.setattr = sockfs_setattr,
 };
 
 /**
@@ -1234,6 +1261,9 @@
 	if (retval < 0)
 		goto out;
 
+	if (retval == 0)
+		sockev_notify(SOCKEV_SOCKET, sock);
+
 	retval = sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK));
 	if (retval < 0)
 		goto out_release;
@@ -1378,6 +1408,13 @@
 						      (struct sockaddr *)
 						      &address, addrlen);
 		}
+		if (!err) {
+			if (sock->sk)
+				sock_hold(sock->sk);
+			sockev_notify(SOCKEV_BIND, sock);
+			if (sock->sk)
+				sock_put(sock->sk);
+		}
 		fput_light(sock->file, fput_needed);
 	}
 	return err;
@@ -1405,6 +1442,13 @@
 		if (!err)
 			err = sock->ops->listen(sock, backlog);
 
+		if (!err) {
+			if (sock->sk)
+				sock_hold(sock->sk);
+			sockev_notify(SOCKEV_LISTEN, sock);
+			if (sock->sk)
+				sock_put(sock->sk);
+		}
 		fput_light(sock->file, fput_needed);
 	}
 	return err;
@@ -1492,7 +1536,8 @@
 
 	fd_install(newfd, newfile);
 	err = newfd;
-
+	if (!err)
+		sockev_notify(SOCKEV_ACCEPT, sock);
 out_put:
 	fput_light(sock->file, fput_needed);
 out:
@@ -1542,6 +1587,8 @@
 
 	err = sock->ops->connect(sock, (struct sockaddr *)&address, addrlen,
 				 sock->file->f_flags);
+	if (!err)
+		sockev_notify(SOCKEV_CONNECT, sock);
 out_put:
 	fput_light(sock->file, fput_needed);
 out:
@@ -1627,6 +1674,13 @@
 	struct iovec iov;
 	int fput_needed;
 
+	seemp_logk_sendto(fd, buff, len, flags, addr, addr_len);
+
+	if (len > INT_MAX)
+		len = INT_MAX;
+	if (unlikely(!access_ok(VERIFY_READ, buff, len)))
+		return -EFAULT;
+
 	err = import_single_range(WRITE, buff, len, &iov, &msg.msg_iter);
 	if (unlikely(err))
 		return err;
@@ -1683,9 +1737,14 @@
 	int err, err2;
 	int fput_needed;
 
+	if (size > INT_MAX)
+		size = INT_MAX;
+	if (unlikely(!access_ok(VERIFY_WRITE, ubuf, size)))
+		return -EFAULT;
 	err = import_single_range(READ, ubuf, size, &iov, &msg.msg_iter);
 	if (unlikely(err))
 		return err;
+
 	sock = sockfd_lookup_light(fd, &err, &fput_needed);
 	if (!sock)
 		goto out;
@@ -1800,6 +1859,7 @@
 
 	sock = sockfd_lookup_light(fd, &err, &fput_needed);
 	if (sock != NULL) {
+		sockev_notify(SOCKEV_SHUTDOWN, sock);
 		err = security_socket_shutdown(sock, how);
 		if (!err)
 			err = sock->ops->shutdown(sock, how);
@@ -3308,3 +3368,15 @@
 	return sock->ops->shutdown(sock, how);
 }
 EXPORT_SYMBOL(kernel_sock_shutdown);
+
+int sockev_register_notify(struct notifier_block *nb)
+{
+	return blocking_notifier_chain_register(&sockev_notifier_list, nb);
+}
+EXPORT_SYMBOL(sockev_register_notify);
+
+int sockev_unregister_notify(struct notifier_block *nb)
+{
+	return blocking_notifier_chain_unregister(&sockev_notifier_list, nb);
+}
+EXPORT_SYMBOL(sockev_unregister_notify);
diff -ruw linux-4.4.115/net/unix/af_unix.c linux-4.4.115-fbx/net/unix/af_unix.c
--- linux-4.4.115/net/unix/af_unix.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/unix/af_unix.c	2019-10-29 09:26:25.997226215 +0100
@@ -991,7 +991,7 @@
 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
 	char *sun_path = sunaddr->sun_path;
 	int err;
-	unsigned int hash;
+	unsigned int hash = 0;
 	struct unix_address *addr;
 	struct hlist_head *list;
 	struct path path = { NULL, NULL };
diff -ruw linux-4.4.115/net/wireless/ap.c linux-4.4.115-fbx/net/wireless/ap.c
--- linux-4.4.115/net/wireless/ap.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/wireless/ap.c	2019-01-22 16:16:29.239298349 +0100
@@ -25,13 +25,18 @@
 		return -ENOENT;
 
 	err = rdev_stop_ap(rdev, dev);
-	if (!err) {
 		wdev->beacon_interval = 0;
+	if (!err) {
 		memset(&wdev->chandef, 0, sizeof(wdev->chandef));
 		wdev->ssid_len = 0;
 		rdev_set_qos_map(rdev, dev, NULL);
 		if (notify)
 			nl80211_send_ap_stopped(wdev);
+
+		/* Should we apply the grace period during beaconing interface
+		 * shutdown also?
+		 */
+		cfg80211_sched_dfs_chan_update(rdev);
 	}
 
 	return err;
diff -ruw linux-4.4.115/net/wireless/chan.c linux-4.4.115-fbx/net/wireless/chan.c
--- linux-4.4.115/net/wireless/chan.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/wireless/chan.c	2019-01-22 16:16:29.239298349 +0100
@@ -319,7 +319,8 @@
 		if (!c)
 			return -EINVAL;
 
-		if (c->flags & IEEE80211_CHAN_RADAR)
+		if ((c->flags & IEEE80211_CHAN_RADAR) &&
+		    !(wiphy->flags & WIPHY_FLAG_DFS_OFFLOAD))
 			return 1;
 	}
 	return 0;
@@ -455,6 +456,105 @@
 	return (r1 + r2 > 0);
 }
 
+/*
+ * Checks if center frequency of chan falls with in the bandwidth
+ * range of chandef.
+ */
+bool cfg80211_is_sub_chan(struct cfg80211_chan_def *chandef,
+			  struct ieee80211_channel *chan)
+{
+	int width;
+	u32 cf_offset, freq;
+
+	if (chandef->chan->center_freq == chan->center_freq)
+		return true;
+
+	width = cfg80211_chandef_get_width(chandef);
+	if (width <= 20)
+		return false;
+
+	cf_offset = width / 2 - 10;
+
+	for (freq = chandef->center_freq1 - width / 2 + 10;
+	     freq <= chandef->center_freq1 + width / 2 - 10; freq += 20) {
+		if (chan->center_freq == freq)
+			return true;
+	}
+
+	if (!chandef->center_freq2)
+		return false;
+
+	for (freq = chandef->center_freq2 - width / 2 + 10;
+	     freq <= chandef->center_freq2 + width / 2 - 10; freq += 20) {
+		if (chan->center_freq == freq)
+			return true;
+	}
+
+	return false;
+}
+
+bool cfg80211_beaconing_iface_active(struct wireless_dev *wdev)
+{
+	bool active = false;
+
+	ASSERT_WDEV_LOCK(wdev);
+
+	if (!wdev->chandef.chan)
+		return false;
+
+	switch (wdev->iftype) {
+	case NL80211_IFTYPE_AP:
+	case NL80211_IFTYPE_P2P_GO:
+		active = wdev->beacon_interval != 0;
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		active = wdev->ssid_len != 0;
+		break;
+	case NL80211_IFTYPE_MESH_POINT:
+		active = wdev->mesh_id_len != 0;
+		break;
+	case NL80211_IFTYPE_STATION:
+	case NL80211_IFTYPE_OCB:
+	case NL80211_IFTYPE_P2P_CLIENT:
+	case NL80211_IFTYPE_MONITOR:
+	case NL80211_IFTYPE_AP_VLAN:
+	case NL80211_IFTYPE_WDS:
+	case NL80211_IFTYPE_P2P_DEVICE:
+		break;
+	case NL80211_IFTYPE_UNSPECIFIED:
+	case NUM_NL80211_IFTYPES:
+		WARN_ON(1);
+	}
+
+	return active;
+}
+
+bool cfg80211_any_wiphy_oper_chan(struct wiphy *wiphy,
+				  struct ieee80211_channel *chan)
+{
+	struct wireless_dev *wdev;
+
+	ASSERT_RTNL();
+
+	if (!(chan->flags & IEEE80211_CHAN_RADAR))
+		return false;
+
+	list_for_each_entry(wdev, &wiphy->wdev_list, list) {
+		wdev_lock(wdev);
+		if (!cfg80211_beaconing_iface_active(wdev)) {
+			wdev_unlock(wdev);
+			continue;
+		}
+
+		if (cfg80211_is_sub_chan(&wdev->chandef, chan)) {
+			wdev_unlock(wdev);
+			return true;
+		}
+		wdev_unlock(wdev);
+	}
+
+	return false;
+}
 
 static bool cfg80211_get_chans_dfs_available(struct wiphy *wiphy,
 					     u32 center_freq,
@@ -479,7 +579,9 @@
 		if (c->flags & IEEE80211_CHAN_DISABLED)
 			return false;
 
-		if ((c->flags & IEEE80211_CHAN_RADAR)  &&
+		/* check for radar flags */
+		if ((!(wiphy->flags & WIPHY_FLAG_DFS_OFFLOAD)) &&
+		    (c->flags & IEEE80211_CHAN_RADAR) &&
 		    (c->dfs_state != NL80211_DFS_AVAILABLE))
 			return false;
 	}
@@ -590,10 +692,17 @@
 
 	for (freq = start_freq; freq <= end_freq; freq += 20) {
 		c = ieee80211_get_channel(wiphy, freq);
-		if (!c || c->flags & prohibited_flags)
+
+		if (!c)
 			return false;
-	}
 
+		if ((!(wiphy->flags & WIPHY_FLAG_DFS_OFFLOAD)) &&
+		    (c->flags & prohibited_flags & IEEE80211_CHAN_RADAR))
+			return false;
+
+		if (c->flags & prohibited_flags & ~IEEE80211_CHAN_RADAR)
+			return false;
+	}
 	return true;
 }
 
@@ -739,7 +848,7 @@
 	 * and thus fail the GO instantiation, consider only the interfaces of
 	 * the current registered device.
 	 */
-	list_for_each_entry(wdev, &rdev->wdev_list, list) {
+	list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
 		struct ieee80211_channel *other_chan = NULL;
 		int r1, r2;
 
diff -ruw linux-4.4.115/net/wireless/core.c linux-4.4.115-fbx/net/wireless/core.c
--- linux-4.4.115/net/wireless/core.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/wireless/core.c	2019-10-29 09:26:26.001226254 +0100
@@ -3,6 +3,7 @@
  *
  * Copyright 2006-2010		Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
+ * Copyright 2015	Intel Deutschland GmbH
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -157,7 +158,7 @@
 	if (!(rdev->wiphy.flags & WIPHY_FLAG_NETNS_OK))
 		return -EOPNOTSUPP;
 
-	list_for_each_entry(wdev, &rdev->wdev_list, list) {
+	list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
 		if (!wdev->netdev)
 			continue;
 		wdev->netdev->features &= ~NETIF_F_NETNS_LOCAL;
@@ -171,7 +172,8 @@
 		/* failed -- clean up to old netns */
 		net = wiphy_net(&rdev->wiphy);
 
-		list_for_each_entry_continue_reverse(wdev, &rdev->wdev_list,
+		list_for_each_entry_continue_reverse(wdev,
+						     &rdev->wiphy.wdev_list,
 						     list) {
 			if (!wdev->netdev)
 				continue;
@@ -230,7 +232,7 @@
 
 	ASSERT_RTNL();
 
-	list_for_each_entry(wdev, &rdev->wdev_list, list) {
+	list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
 		if (wdev->netdev) {
 			dev_close(wdev->netdev);
 			continue;
@@ -298,7 +300,8 @@
 		kfree(item);
 		spin_unlock_irq(&rdev->destroy_list_lock);
 
-		list_for_each_entry_safe(wdev, tmp, &rdev->wdev_list, list) {
+		list_for_each_entry_safe(wdev, tmp,
+					 &rdev->wiphy.wdev_list, list) {
 			if (nlportid == wdev->owner_nlportid)
 				rdev_del_virtual_intf(rdev, wdev);
 		}
@@ -400,7 +403,7 @@
 		dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx);
 	}
 
-	INIT_LIST_HEAD(&rdev->wdev_list);
+	INIT_LIST_HEAD(&rdev->wiphy.wdev_list);
 	INIT_LIST_HEAD(&rdev->beacon_registrations);
 	spin_lock_init(&rdev->beacon_registrations_lock);
 	spin_lock_init(&rdev->bss_lock);
@@ -616,6 +619,13 @@
 		     !rdev->ops->set_mac_acl)))
 		return -EINVAL;
 
+	/* assure only valid behaviours are flagged by driver
+	 * hence subtract 2 as bit 0 is invalid.
+	 */
+	if (WARN_ON(wiphy->bss_select_support &&
+		    (wiphy->bss_select_support & ~(BIT(__NL80211_BSS_SELECT_ATTR_AFTER_LAST) - 2))))
+		return -EINVAL;
+
 	if (wiphy->addresses)
 		memcpy(wiphy->perm_addr, wiphy->addresses[0].addr, ETH_ALEN);
 
@@ -730,6 +740,36 @@
 		nl80211_send_reg_change_event(&request);
 	}
 
+	/* Check that nobody globally advertises any capabilities they do not
+	 * advertise on all possible interface types.
+	 */
+	if (wiphy->extended_capabilities_len &&
+	    wiphy->num_iftype_ext_capab &&
+	    wiphy->iftype_ext_capab) {
+		u8 supported_on_all, j;
+		const struct wiphy_iftype_ext_capab *capab;
+
+		capab = wiphy->iftype_ext_capab;
+		for (j = 0; j < wiphy->extended_capabilities_len; j++) {
+			if (capab[0].extended_capabilities_len > j)
+				supported_on_all =
+					capab[0].extended_capabilities[j];
+			else
+				supported_on_all = 0x00;
+			for (i = 1; i < wiphy->num_iftype_ext_capab; i++) {
+				if (j >= capab[i].extended_capabilities_len) {
+					supported_on_all = 0x00;
+					break;
+				}
+				supported_on_all &=
+					capab[i].extended_capabilities[j];
+			}
+			if (WARN_ON(wiphy->extended_capabilities[j] &
+				    ~supported_on_all))
+				break;
+		}
+	}
+
 	rdev->wiphy.registered = true;
 	rtnl_unlock();
 
@@ -782,7 +822,7 @@
 	nl80211_notify_wiphy(rdev, NL80211_CMD_DEL_WIPHY);
 	rdev->wiphy.registered = false;
 
-	WARN_ON(!list_empty(&rdev->wdev_list));
+	WARN_ON(!list_empty(&rdev->wiphy.wdev_list));
 
 	/*
 	 * First remove the hardware from everywhere, this makes
@@ -905,7 +945,6 @@
 		sched_scan_req = rtnl_dereference(rdev->sched_scan_req);
 		if (sched_scan_req && dev == sched_scan_req->dev)
 			__cfg80211_stop_sched_scan(rdev, false);
-
 #ifdef CONFIG_CFG80211_WEXT
 		kfree(wdev->wext.ie);
 		wdev->wext.ie = NULL;
@@ -914,6 +953,7 @@
 #endif
 		cfg80211_disconnect(rdev, dev,
 				    WLAN_REASON_DEAUTH_LEAVING, true);
+		cfg80211_mlme_down(rdev, dev);
 		break;
 	case NL80211_IFTYPE_MESH_POINT:
 		__cfg80211_leave_mesh(rdev, dev);
@@ -940,6 +980,7 @@
 		/* invalid */
 		break;
 	}
+	wdev->beacon_interval = 0;
 }
 
 void cfg80211_leave(struct cfg80211_registered_device *rdev,
@@ -1004,7 +1045,7 @@
 		spin_lock_init(&wdev->mgmt_registrations_lock);
 
 		wdev->identifier = ++rdev->wdev_id;
-		list_add_rcu(&wdev->list, &rdev->wdev_list);
+		list_add_rcu(&wdev->list, &rdev->wiphy.wdev_list);
 		rdev->devlist_generation++;
 		/* can only change netns with wiphy */
 		dev->features |= NETIF_F_NETNS_LOCAL;
diff -ruw linux-4.4.115/net/wireless/core.h linux-4.4.115-fbx/net/wireless/core.h
--- linux-4.4.115/net/wireless/core.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/wireless/core.h	2019-10-29 09:26:26.001226254 +0100
@@ -50,8 +50,7 @@
 	/* wiphy index, internal only */
 	int wiphy_idx;
 
-	/* associated wireless interfaces, protected by rtnl or RCU */
-	struct list_head wdev_list;
+	/* protected by RTNL */
 	int devlist_generation, wdev_id;
 	int opencount; /* also protected by devlist_mtx */
 	wait_queue_head_t dev_wait;
@@ -209,14 +208,7 @@
 	enum cfg80211_event_type type;
 
 	union {
-		struct {
-			u8 bssid[ETH_ALEN];
-			const u8 *req_ie;
-			const u8 *resp_ie;
-			size_t req_ie_len;
-			size_t resp_ie_len;
-			u16 status;
-		} cr;
+		struct cfg80211_connect_resp_params cr;
 		struct {
 			const u8 *req_ie;
 			const u8 *resp_ie;
@@ -334,7 +326,7 @@
 		       const u8 *ssid, int ssid_len,
 		       const u8 *ie, int ie_len,
 		       const u8 *key, int key_len, int key_idx,
-		       const u8 *sae_data, int sae_data_len);
+		       const u8 *auth_data, int auth_data_len);
 int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
 			struct net_device *dev,
 			struct ieee80211_channel *chan,
@@ -372,11 +364,9 @@
 		     struct cfg80211_connect_params *connect,
 		     struct cfg80211_cached_keys *connkeys,
 		     const u8 *prev_bssid);
-void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
-			       const u8 *req_ie, size_t req_ie_len,
-			       const u8 *resp_ie, size_t resp_ie_len,
-			       u16 status, bool wextev,
-			       struct cfg80211_bss *bss);
+void __cfg80211_connect_result(struct net_device *dev,
+			       struct cfg80211_connect_resp_params *params,
+			       bool wextev);
 void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
 			     size_t ie_len, u16 reason, bool from_ap);
 int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
@@ -448,6 +438,16 @@
 cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy,
 			      const struct cfg80211_chan_def *chandef);
 
+void cfg80211_sched_dfs_chan_update(struct cfg80211_registered_device *rdev);
+
+bool cfg80211_any_wiphy_oper_chan(struct wiphy *wiphy,
+				  struct ieee80211_channel *chan);
+
+bool cfg80211_beaconing_iface_active(struct wireless_dev *wdev);
+
+bool cfg80211_is_sub_chan(struct cfg80211_chan_def *chandef,
+			  struct ieee80211_channel *chan);
+
 static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
 {
 	unsigned long end = jiffies;
@@ -472,7 +472,7 @@
 			   u32 *mask);
 
 int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
-				 u32 beacon_int);
+				 enum nl80211_iftype iftype, u32 beacon_int);
 
 void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev,
 			       enum nl80211_iftype iftype, int num);
diff -ruw linux-4.4.115/net/wireless/db.txt linux-4.4.115-fbx/net/wireless/db.txt
--- linux-4.4.115/net/wireless/db.txt	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/wireless/db.txt	2019-10-29 09:26:26.001226254 +0100
@@ -1,17 +1,1536 @@
-#
-# This file is a placeholder to prevent accidental build breakage if someone
-# enables CONFIG_CFG80211_INTERNAL_REGDB.  Almost no one actually needs to
-# enable that build option.
-#
-# You should be using CRDA instead.  It is even better if you use the CRDA
-# package provided by your distribution, since they will probably keep it
-# up-to-date on your behalf.
-#
-# If you _really_ intend to use CONFIG_CFG80211_INTERNAL_REGDB then you will
-# need to replace this file with one containing appropriately formatted
-# regulatory rules that cover the regulatory domains you will be using.  Your
-# best option is to extract the db.txt file from the wireless-regdb git
-# repository:
-#
-#   git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-regdb.git
-#
+# This is the world regulatory domain
+country 00:
+	(2402 - 2472 @ 40), (20)
+	# Channel 12 - 13.
+	(2457 - 2482 @ 40), (20), PASSIVE-SCAN, NO-IBSS
+	# Channel 14. Only JP enables this and for 802.11b only
+	(2474 - 2494 @ 20), (20), PASSIVE-SCAN, NO-IBSS, NO-OFDM
+	# Channel 36 - 48
+	(5170 - 5250 @ 80), (20), PASSIVE-SCAN, NO-IBSS
+	(5250 - 5330 @ 80), (20), PASSIVE-SCAN, NO-IBSS
+	(5490 - 5710 @ 80), (20), PASSIVE-SCAN, NO-IBSS
+	# NB: 5260 MHz - 5700 MHz requies DFS
+	# Channel 149 - 165
+	(5735 - 5835 @ 80), (20), PASSIVE-SCAN, NO-IBSS
+	# IEEE 802.11ad (60GHz), channels 1..3
+	(57240 - 63720 @ 2160), (0)
+
+
+country AE: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+
+country AF: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country AI: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country AL: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5150 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5350 @ 80), (23), DFS, AUTO-BW
+	(5470 - 5710 @ 160), (30), DFS
+
+country AM: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 20), (18)
+	(5250 - 5330 @ 20), (18), DFS
+
+country AN: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country AR:
+	(2402 - 2482 @ 40), (36)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (36), AUTO-BW
+	(5490 - 5590 @ 80), (36)
+	(5650 - 5730 @ 80), (36)
+	(5735 - 5835 @ 80), (36)
+	# 60 gHz band channels 1-3
+	(57240 - 63720 @ 2160), (40), NO-OUTDOOR
+
+country AS: DFS-FCC
+	(2402 - 2472 @ 40), (30)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5850 @ 80), (30)
+
+country AT: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country AU: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5590 @ 80), (24), DFS
+	(5650 - 5730 @ 80), (24), DFS
+	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-4
+	(57240 - 65880 @ 2160), (43), NO-OUTDOOR
+
+country AW: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country AZ: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (18), AUTO-BW
+	(5250 - 5330 @ 80), (18), DFS, AUTO-BW
+
+country BA: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country BB: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5735 - 5835 @ 80), (30)
+
+country BD:
+	(2402 - 2482 @ 40), (20)
+	(5735 - 5835 @ 80), (30)
+
+country BE: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country BF: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+
+country BG: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country BH:
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5330 @ 20), (23)
+	(5735 - 5835 @ 20), (33)
+
+country BL: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country BM: DFS-FCC
+	(2402 - 2472 @ 40), (30)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+
+country BN: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (20), AUTO-BW
+	(5250 - 5330 @ 80), (20), DFS, AUTO-BW
+	(5735 - 5835 @ 80), (20)
+
+country BO: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5250 - 5330 @ 80), (30), DFS
+	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-3, FCC
+	(57240 - 63720 @ 2160), (40)
+
+country BR: DFS-FCC
+	(2402 - 2482 @ 40), (30)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-3
+	(57240 - 63720 @ 2160), (40)
+
+country BS: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+
+country BT: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country BY: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country BZ:
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5330 @ 160), (23)
+	(5490 - 5730 @ 160), (30)
+	(5735 - 5835 @ 80), (30)
+
+country CA: DFS-FCC
+	(2402 - 2472 @ 40), (30)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-3
+	(57240 - 63720 @ 2160), (40)
+
+country CF: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 40), (24)
+	(5250 - 5330 @ 40), (24), DFS
+	(5490 - 5730 @ 40), (24), DFS
+	(5735 - 5835 @ 40), (30)
+
+country CH: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country CI: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+
+country CL:
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5330 @ 160), (20)
+	(5735 - 5835 @ 80), (20)
+	# 60 gHz band channels 1-3
+	(57240 - 63720 @ 2160), (50), NO-OUTDOOR
+
+country CN: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5735 - 5835 @ 80), (33)
+	# 60 gHz band channels 2,3: 44dBm
+	(59400 - 63720 @ 2160), (44)
+
+country CO: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+
+country CR: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 20), (24)
+	(5250 - 5330 @ 20), (24), DFS
+	(5490 - 5730 @ 20), (24), DFS
+	(5735 - 5835 @ 20), (30)
+	# 60 gHz band channels 1-3
+	(57240 - 63720 @ 2160), (30)
+
+country CX: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+
+country CY: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+# Data from http://www.ctu.eu/164/download/VOR/VOR-12-08-2005-34.pdf
+# and http://www.ctu.eu/164/download/VOR/VOR-12-05-2007-6-AN.pdf
+country CZ: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+# Data from "Frequenznutzungsplan" (as published in April 2008), downloaded from
+# http://www.bundesnetzagentur.de/cae/servlet/contentblob/38448/publicationFile/2659/Frequenznutzungsplan2008_Id17448pdf.pdf
+# For the 5GHz range also see
+# http://www.bundesnetzagentur.de/cae/servlet/contentblob/38216/publicationFile/6579/WLAN5GHzVfg7_2010_28042010pdf.pdf
+
+country DE: DFS-ETSI
+	# entries 279004 and 280006
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+        # 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country DK: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country DM: DFS-FCC
+	(2402 - 2472 @ 40), (30)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5735 - 5835 @ 80), (30)
+
+country DO: DFS-FCC
+	(2402 - 2472 @ 40), (30)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5735 - 5835 @ 80), (30)
+
+country DZ: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5670 @ 160), (23), DFS
+
+country EC: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 20), (24)
+	(5250 - 5330 @ 20), (24), DFS
+	(5490 - 5730 @ 20), (24), DFS
+	(5735 - 5835 @ 20), (30)
+	# 60 gHz band channels 1-3, FCC
+	(57240 - 63720 @ 2160), (40)
+
+country EE: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country EG: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 40), (23)
+	(5250 - 5330 @ 40), (23), DFS
+
+country ES: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country ET: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country FI: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country FM: DFS-FCC
+	(2402 - 2472 @ 40), (30)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+
+country FR: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country GB: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country GD: DFS-FCC
+	(2402 - 2472 @ 40), (30)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+
+country GE: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (18), AUTO-BW
+	(5250 - 5330 @ 80), (18), DFS, AUTO-BW
+
+country GF: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country GH: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+
+country GI: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country GL: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country GP: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country GR: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country GT: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country GU: DFS-FCC
+	(2402 - 2472 @ 40), (30)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-3, FCC
+	(57240 - 63720 @ 2160), (40)
+
+country GY:
+	(2402 - 2482 @ 40), (30)
+	(5735 - 5835 @ 80), (30)
+
+country HK: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-4, ref: FCC/EU
+	(57240 - 65880 @ 2160), (40)
+
+country HN:
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5330 @ 160), (24)
+	(5490 - 5730 @ 160), (24)
+	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-3, FCC
+	(57240 - 63720 @ 2160), (40)
+
+country HR: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country HT: DFS-FCC
+	(2402 - 2472 @ 40), (30)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+
+country HU: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+        # 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country ID:
+	# ref: http://www.postel.go.id/content/ID/regulasi/standardisasi/kepdir/bwa%205,8%20ghz.pdf
+	(2402 - 2482 @ 40), (30)
+	(5735 - 5815 @ 20), (30)
+
+country IE: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country IL: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	# 60 gHz band channels 1-4, base on Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country IN:
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5330 @ 160), (23)
+	(5735 - 5835 @ 80), (33)
+
+country IQ: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country IS: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country IT: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country JM: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-3, FCC
+	(57240 - 63720 @ 2160), (40)
+
+country JO:
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23)
+	(5735 - 5835 @ 80), (23)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country JP: DFS-JP
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (20), AUTO-BW, NO-OUTDOOR
+	(5250 - 5330 @ 80), (20), DFS, AUTO-BW, NO-OUTDOOR
+	(5490 - 5710 @ 160), (20), DFS
+	# 60 gHz band channels 1-4
+	(57240 - 65880 @ 2160), (40)
+
+country KE: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23)
+	(5490 - 5570 @ 80), (30), DFS
+	(5735 - 5775 @ 40), (23)
+
+country KH: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country KN: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (30), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	(5735 - 5815 @ 80), (30)
+
+country KR: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (20), AUTO-BW
+	(5250 - 5330 @ 80), (20), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5835 @ 80), (30)
+	# 60 GHz band channels 1-4,
+	# ref: http://www.law.go.kr/%ED%96%89%EC%A0%95%EA%B7%9C%EC%B9%99/%EB%AC%B4%EC%84%A0%EC%84%A4%EB%B9%84%EA%B7%9C%EC%B9%99
+	(57240 - 65880 @ 2160), (43)
+
+country KW: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+
+country KY: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+
+country KZ:
+	(2402 - 2482 @ 40), (20)
+
+country LB: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+
+country LC: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (20), AUTO-BW
+	(5250 - 5330 @ 80), (30), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	(5735 - 5815 @ 80), (30)
+
+country LI: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country LK: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 20), (24)
+	(5250 - 5330 @ 20), (24), DFS
+	(5490 - 5730 @ 20), (24), DFS
+	(5735 - 5835 @ 20), (30)
+
+country LS: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country LT: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country LU: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country LV: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country MA: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country MC: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country MD: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country ME: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country MF: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country MH: DFS-FCC
+	(2402 - 2472 @ 40), (30)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+
+country MK: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country MN: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+
+country MO: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+
+country MP: DFS-FCC
+	(2402 - 2472 @ 40), (30)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+
+country MQ: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country MR: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country MT: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country MU: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+
+country MV: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (20), AUTO-BW
+	(5250 - 5330 @ 80), (20), DFS, AUTO-BW
+	(5735 - 5835 @ 80), (20)
+
+country MW: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country MX: DFS-FCC
+	(2402 - 2482 @ 40), (30)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-3, FCC
+	(57240 - 63720 @ 2160), (40)
+
+country MY: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5650 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (24)
+	# 60 gHz band channels 1-3
+	(57240 - 63720 @ 2160), (40)
+
+country NA: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5835 @ 80), (33)
+
+country NG: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5250 - 5330 @ 80), (30), DFS
+	(5735 - 5835 @ 80), (30)
+
+country NI: DFS-FCC
+	(2402 - 2472 @ 40), (30)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-3, FCC
+	(57240 - 63720 @ 2160), (40)
+
+country NL: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country NO: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country NP:
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5330 @ 160), (20)
+	(5735 - 5835 @ 80), (20)
+
+country NZ: DFS-FCC
+	(2402 - 2482 @ 40), (30)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country OM: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country PA:
+	(2402 - 2472 @ 40), (36)
+	(5170 - 5250 @ 80), (23), AUT0-BW
+	(5250 - 5330 @ 80), (30), AUTO-BW
+	(5735 - 5835 @ 80), (36)
+
+country PE: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+
+country PF: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country PG: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+
+country PH: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country PK:
+	(2402 - 2482 @ 40), (30)
+	(5735 - 5835 @ 80), (30)
+
+country PL: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country PM: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country PR: DFS-FCC
+	(2402 - 2472 @ 40), (30)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+
+# Public Safety FCCA, FCC4
+#  27dBm [4.9GHz 1/4 rate], 30dBm [1/2 rate], 33dBm [full rate], and 5GHz same as FCC1
+#  db.txt cannot express the limitation on 5G so disable all 5G channels for FCC4
+country PS: DFS-FCC
+	(2402 - 2472 @ 40), (30)
+	(4940 - 4990 @ 40), (33)
+
+country PT: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country PW: DFS-FCC
+	(2402 - 2472 @ 40), (30)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+
+country PY: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-3, FCC
+	(57240 - 63720 @ 2160), (40)
+
+country QA:
+	(2402 - 2482 @ 40), (20)
+	(5735 - 5835 @ 80), (30)
+
+country RE: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country RO: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+# Source:
+# http://www.ratel.rs/upload/documents/Plan_namene/Plan_namene-sl_glasnik.pdf
+country RS: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+        (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country RU:
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5330 @ 160), (23)
+	(5490 - 5730 @ 160), (30)
+	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-4
+	(57240 - 65880 @ 2160), (40)
+
+country RW: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+
+country SA: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country SE: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country SG: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40), NO-OUTDOOR
+
+country SI: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country SK: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+	# 5.9ghz band
+	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
+
+country SN:
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5330 @ 160), (24)
+	(5490 - 5730 @ 160), (24)
+	(5735 - 5835 @ 80), (30)
+
+country SR: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country SV: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 20), (23)
+	(5250 - 5330 @ 20), (23), DFS
+	(5735 - 5835 @ 20), (30)
+
+country TC: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+
+country TD: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country TG: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 40), (23)
+	(5250 - 5330 @ 40), (23), DFS
+	(5490 - 5710 @ 40), (30), DFS
+
+country TH: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-4
+	(57240 - 65880 @ 2160), (40)
+
+country TN: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+
+country TR: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country TT:
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5330 @ 160), (24)
+	(5490 - 5730 @ 160), (24)
+	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-3, FCC
+	(57240 - 63720 @ 2160), (40)
+
+country TW: DFS-FCC
+	(2402 - 2472 @ 40), (30)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-3, FCC
+	(57240 - 63720 @ 2160), (40)
+
+country TZ:
+	(2402 - 2482 @ 40), (20)
+	(5735 - 5835 @ 80), (30)
+
+# Source:
+# #914 / 06 Sep 2007: http://www.ucrf.gov.ua/uk/doc/nkrz/1196068874
+# #1174 / 23 Oct 2008: http://www.nkrz.gov.ua/uk/activities/ruling/1225269361
+# (appendix 8)
+# Listed 5GHz range is a lowest common denominator for all related
+# rules in the referenced laws. Such a range is used because of
+# disputable definitions there.
+country UA: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (20), AUTO-BW
+	(5250 - 5330 @ 80), (20), DFS, AUTO-BW
+	(5490 - 5670 @ 160), (20), DFS
+	(5735 - 5835 @ 80), (20)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57240 - 65880 @ 2160), (20)
+
+country UG: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+        (5735 - 5835 @ 80), (30)
+
+country US: DFS-FCC
+	(2402 - 2472 @ 40), (30)
+	(5170 - 5250 @ 80), (30), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+	# 5.9ghz band
+	# reference: https://apps.fcc.gov/edocs_public/attachmatch/FCC-03-324A1.pdf
+	(5842 - 5863 @  5), (30)
+	(5850 - 5870 @ 10), (30)
+	(5860 - 5880 @ 10), (30)
+	(5865 - 5885 @ 20), (30)
+	(5870 - 5890 @ 10), (30)
+	(5880 - 5900 @ 10), (30)
+	(5890 - 5910 @ 10), (30)
+	(5895 - 5915 @ 20), (30)
+	(5900 - 5920 @ 10), (30)
+	(5910 - 5930 @ 10), (30)
+	# 60g band
+	# reference: http://cfr.regstoday.com/47cfr15.aspx#47_CFR_15p255
+	# channels 1,2,3,4,5,6 EIRP=40dBm(43dBm peak)
+	(57240 - 70200 @ 2160), (40)
+
+country UY: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+        (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-4
+	(57240 - 65880 @ 2160), (40)
+
+country UZ: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+
+country VC: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country VE: DFS-FCC
+	(2402 - 2482 @ 40), (30)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+        (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5735 - 5835 @ 80), (30)
+
+country VI: DFS-FCC
+	(2402 - 2472 @ 40), (30)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+
+country VN: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (24)
+	(5250 - 5330 @ 80), (24), DFS
+	(5490 - 5730 @ 80), (24), DFS
+	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-4
+	(57240 - 65880 @ 2160), (40)
+
+country VU: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+
+country WF: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country WS: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 40), (23)
+	(5250 - 5330 @ 40), (23), DFS
+	(5490 - 5710 @ 40), (30), DFS
+
+country XA: DFS-JP
+	(2402 - 2482 @ 40), (20)
+	(2474 - 2494 @ 20), (20), NO-OFDM
+	(5170 - 5250 @ 80), (20), NO-IR, AUTO-BW, NO-OUTDOOR
+	(5250 - 5330 @ 80), (20), DFS, AUTO-BW, NO-OUTDOOR
+	(5490 - 5710 @ 160), (20), DFS
+
+country YE:
+	(2402 - 2482 @ 40), (20)
+
+country YT: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
+
+country ZA: DFS-FCC
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (24), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-4
+	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country ZW: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
diff -ruw linux-4.4.115/net/wireless/ibss.c linux-4.4.115-fbx/net/wireless/ibss.c
--- linux-4.4.115/net/wireless/ibss.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/wireless/ibss.c	2019-01-22 16:16:29.239298349 +0100
@@ -186,6 +186,7 @@
 	if (!nowext)
 		wdev->wext.ibss.ssid_len = 0;
 #endif
+	cfg80211_sched_dfs_chan_update(rdev);
 }
 
 void cfg80211_clear_ibss(struct net_device *dev, bool nowext)
diff -ruw linux-4.4.115/net/wireless/mesh.c linux-4.4.115-fbx/net/wireless/mesh.c
--- linux-4.4.115/net/wireless/mesh.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/wireless/mesh.c	2019-01-22 16:16:29.239298349 +0100
@@ -260,6 +260,7 @@
 		wdev->mesh_id_len = 0;
 		memset(&wdev->chandef, 0, sizeof(wdev->chandef));
 		rdev_set_qos_map(rdev, dev, NULL);
+		cfg80211_sched_dfs_chan_update(rdev);
 	}
 
 	return err;
diff -ruw linux-4.4.115/net/wireless/mlme.c linux-4.4.115-fbx/net/wireless/mlme.c
--- linux-4.4.115/net/wireless/mlme.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/wireless/mlme.c	2019-10-29 09:26:26.001226254 +0100
@@ -26,9 +26,16 @@
 	struct wiphy *wiphy = wdev->wiphy;
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
-	u8 *ie = mgmt->u.assoc_resp.variable;
-	int ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable);
-	u16 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
+	struct cfg80211_connect_resp_params cr;
+
+	memset(&cr, 0, sizeof(cr));
+	cr.status = (int)le16_to_cpu(mgmt->u.assoc_resp.status_code);
+	cr.bssid = mgmt->bssid;
+	cr.bss = bss;
+	cr.resp_ie = mgmt->u.assoc_resp.variable;
+	cr.resp_ie_len =
+		len - offsetof(struct ieee80211_mgmt, u.assoc_resp.variable);
+	cr.timeout_reason = NL80211_TIMEOUT_UNSPECIFIED;
 
 	trace_cfg80211_send_rx_assoc(dev, bss);
 
@@ -38,7 +45,7 @@
 	 * and got a reject -- we only try again with an assoc
 	 * frame instead of reassoc.
 	 */
-	if (cfg80211_sme_rx_assoc_resp(wdev, status_code)) {
+	if (cfg80211_sme_rx_assoc_resp(wdev, cr.status)) {
 		cfg80211_unhold_bss(bss_from_pub(bss));
 		cfg80211_put_bss(wiphy, bss);
 		return;
@@ -46,9 +53,7 @@
 
 	nl80211_send_rx_assoc(rdev, dev, buf, len, GFP_KERNEL, uapsd_queues);
 	/* update current_bss etc., consumes the bss reference */
-	__cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, ie, len - ieoffs,
-				  status_code,
-				  status_code == WLAN_STATUS_SUCCESS, bss);
+	__cfg80211_connect_result(dev, &cr, cr.status == WLAN_STATUS_SUCCESS);
 }
 EXPORT_SYMBOL(cfg80211_rx_assoc_resp);
 
@@ -216,14 +221,14 @@
 		       const u8 *ssid, int ssid_len,
 		       const u8 *ie, int ie_len,
 		       const u8 *key, int key_len, int key_idx,
-		       const u8 *sae_data, int sae_data_len)
+		       const u8 *auth_data, int auth_data_len)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	struct cfg80211_auth_request req = {
 		.ie = ie,
 		.ie_len = ie_len,
-		.sae_data = sae_data,
-		.sae_data_len = sae_data_len,
+		.auth_data = auth_data,
+		.auth_data_len = auth_data_len,
 		.auth_type = auth_type,
 		.key = key,
 		.key_len = key_len,
@@ -656,8 +661,25 @@
 			return err;
 	}
 
-	if (!ether_addr_equal(mgmt->sa, wdev_address(wdev)))
+	if (!ether_addr_equal(mgmt->sa, wdev_address(wdev))) {
+		/* Allow random TA to be used with Public Action frames if the
+		 * driver has indicated support for this. Otherwise, only allow
+		 * the local address to be used.
+		 */
+		if (!ieee80211_is_action(mgmt->frame_control) ||
+		    mgmt->u.action.category != WLAN_CATEGORY_PUBLIC)
+			return -EINVAL;
+		if (!wdev->current_bss &&
+		    !wiphy_ext_feature_isset(
+			    &rdev->wiphy,
+			    NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA))
+			return -EINVAL;
+		if (wdev->current_bss &&
+		    !wiphy_ext_feature_isset(
+			    &rdev->wiphy,
+			    NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED))
 		return -EINVAL;
+	}
 
 	/* Transmit the Action frame as requested by user space */
 	return rdev_mgmt_tx(rdev, wdev, params, cookie);
@@ -721,6 +743,12 @@
 }
 EXPORT_SYMBOL(cfg80211_rx_mgmt);
 
+void cfg80211_sched_dfs_chan_update(struct cfg80211_registered_device *rdev)
+{
+	cancel_delayed_work(&rdev->dfs_update_channels_wk);
+	queue_delayed_work(cfg80211_wq, &rdev->dfs_update_channels_wk, 0);
+}
+
 void cfg80211_dfs_channels_update_work(struct work_struct *work)
 {
 	struct delayed_work *delayed_work;
@@ -731,6 +759,8 @@
 	struct wiphy *wiphy;
 	bool check_again = false;
 	unsigned long timeout, next_time = 0;
+	unsigned long time_dfs_update;
+	enum nl80211_radar_event radar_event;
 	int bandid, i;
 
 	delayed_work = container_of(work, struct delayed_work, work);
@@ -747,11 +777,27 @@
 		for (i = 0; i < sband->n_channels; i++) {
 			c = &sband->channels[i];
 
-			if (c->dfs_state != NL80211_DFS_UNAVAILABLE)
+			if (!(c->flags & IEEE80211_CHAN_RADAR))
 				continue;
 
-			timeout = c->dfs_state_entered + msecs_to_jiffies(
-					IEEE80211_DFS_MIN_NOP_TIME_MS);
+			if (c->dfs_state != NL80211_DFS_UNAVAILABLE &&
+			    c->dfs_state != NL80211_DFS_AVAILABLE)
+				continue;
+
+			if (c->dfs_state == NL80211_DFS_UNAVAILABLE) {
+				time_dfs_update = IEEE80211_DFS_MIN_NOP_TIME_MS;
+				radar_event = NL80211_RADAR_NOP_FINISHED;
+			} else {
+				if (regulatory_pre_cac_allowed(wiphy) ||
+				    cfg80211_any_wiphy_oper_chan(wiphy, c))
+					continue;
+
+				time_dfs_update = REG_PRE_CAC_EXPIRY_GRACE_MS;
+				radar_event = NL80211_RADAR_PRE_CAC_EXPIRED;
+			}
+
+			timeout = c->dfs_state_entered +
+				  msecs_to_jiffies(time_dfs_update);
 
 			if (time_after_eq(jiffies, timeout)) {
 				c->dfs_state = NL80211_DFS_USABLE;
@@ -761,8 +807,8 @@
 							NL80211_CHAN_NO_HT);
 
 				nl80211_radar_notify(rdev, &chandef,
-						     NL80211_RADAR_NOP_FINISHED,
-						     NULL, GFP_ATOMIC);
+						     radar_event, NULL,
+						     GFP_ATOMIC);
 				continue;
 			}
 
@@ -787,7 +833,6 @@
 			  gfp_t gfp)
 {
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
-	unsigned long timeout;
 
 	trace_cfg80211_radar_event(wiphy, chandef);
 
@@ -797,9 +842,7 @@
 	 */
 	cfg80211_set_dfs_state(wiphy, chandef, NL80211_DFS_UNAVAILABLE);
 
-	timeout = msecs_to_jiffies(IEEE80211_DFS_MIN_NOP_TIME_MS);
-	queue_delayed_work(cfg80211_wq, &rdev->dfs_update_channels_wk,
-			   timeout);
+	cfg80211_sched_dfs_chan_update(rdev);
 
 	nl80211_radar_notify(rdev, chandef, NL80211_RADAR_DETECTED, NULL, gfp);
 }
@@ -828,6 +871,7 @@
 			  msecs_to_jiffies(wdev->cac_time_ms);
 		WARN_ON(!time_after_eq(jiffies, timeout));
 		cfg80211_set_dfs_state(wiphy, chandef, NL80211_DFS_AVAILABLE);
+		cfg80211_sched_dfs_chan_update(rdev);
 		break;
 	case NL80211_RADAR_CAC_ABORTED:
 		break;
diff -ruw linux-4.4.115/net/wireless/nl80211.c linux-4.4.115-fbx/net/wireless/nl80211.c
--- linux-4.4.115/net/wireless/nl80211.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/wireless/nl80211.c	2019-10-29 09:26:26.009226332 +0100
@@ -103,7 +103,7 @@
 		if (have_wdev_id && rdev->wiphy_idx != wiphy_idx)
 			continue;
 
-		list_for_each_entry(wdev, &rdev->wdev_list, list) {
+		list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
 			if (have_ifidx && wdev->netdev &&
 			    wdev->netdev->ifindex == ifidx) {
 				result = wdev;
@@ -149,7 +149,7 @@
 		tmp = cfg80211_rdev_by_wiphy_idx(wdev_id >> 32);
 		if (tmp) {
 			/* make sure wdev exists */
-			list_for_each_entry(wdev, &tmp->wdev_list, list) {
+			list_for_each_entry(wdev, &tmp->wiphy.wdev_list, list) {
 				if (wdev->identifier != (u32)wdev_id)
 					continue;
 				found = true;
@@ -353,7 +353,7 @@
 	[NL80211_ATTR_BG_SCAN_PERIOD] = { .type = NLA_U16 },
 	[NL80211_ATTR_WDEV] = { .type = NLA_U64 },
 	[NL80211_ATTR_USER_REG_HINT_TYPE] = { .type = NLA_U32 },
-	[NL80211_ATTR_SAE_DATA] = { .type = NLA_BINARY, },
+	[NL80211_ATTR_AUTH_DATA] = { .type = NLA_BINARY, },
 	[NL80211_ATTR_VHT_CAPABILITY] = { .len = NL80211_VHT_CAPABILITY_LEN },
 	[NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 },
 	[NL80211_ATTR_P2P_CTWINDOW] = { .type = NLA_U8 },
@@ -401,6 +401,26 @@
 	[NL80211_ATTR_NETNS_FD] = { .type = NLA_U32 },
 	[NL80211_ATTR_SCHED_SCAN_DELAY] = { .type = NLA_U32 },
 	[NL80211_ATTR_REG_INDOOR] = { .type = NLA_FLAG },
+	[NL80211_ATTR_PBSS] = { .type = NLA_FLAG },
+	[NL80211_ATTR_BSS_SELECT] = { .type = NLA_NESTED },
+	[NL80211_ATTR_BSSID] = { .len = ETH_ALEN },
+	[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI] = { .type = NLA_S8 },
+	[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST] = {
+		.len = sizeof(struct nl80211_bss_select_rssi_adjust)
+	},
+	[NL80211_ATTR_FILS_KEK] = { .type = NLA_BINARY,
+				    .len = FILS_MAX_KEK_LEN },
+	[NL80211_ATTR_FILS_NONCES] = { .len = 2 * FILS_NONCE_LEN },
+	[NL80211_ATTR_TIMEOUT_REASON] = { .type = NLA_U32 },
+	[NL80211_ATTR_FILS_ERP_USERNAME] = { .type = NLA_BINARY,
+					     .len = FILS_ERP_MAX_USERNAME_LEN },
+	[NL80211_ATTR_FILS_ERP_REALM] = { .type = NLA_BINARY,
+					  .len = FILS_ERP_MAX_REALM_LEN },
+	[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] = { .type = NLA_U16 },
+	[NL80211_ATTR_FILS_ERP_RRK] = { .type = NLA_BINARY,
+					.len = FILS_ERP_MAX_RRK_LEN },
+	[NL80211_ATTR_FILS_CACHE_ID] = { .len = 2 },
+	[NL80211_ATTR_PMK] = { .type = NLA_BINARY, .len = PMK_MAX_LEN },
 };
 
 /* policy for the key attributes */
@@ -467,7 +487,8 @@
 /* policy for GTK rekey offload attributes */
 static const struct nla_policy
 nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = {
-	[NL80211_REKEY_DATA_KEK] = { .len = NL80211_KEK_LEN },
+	[NL80211_REKEY_DATA_KEK] = { .type = NLA_BINARY,
+				     .len = FILS_MAX_KEK_LEN },
 	[NL80211_REKEY_DATA_KCK] = { .len = NL80211_KCK_LEN },
 	[NL80211_REKEY_DATA_REPLAY_CTR] = { .len = NL80211_REPLAY_CTR_LEN },
 };
@@ -485,6 +506,15 @@
 	[NL80211_SCHED_SCAN_PLAN_ITERATIONS] = { .type = NLA_U32 },
 };
 
+static const struct nla_policy
+nl80211_bss_select_policy[NL80211_BSS_SELECT_ATTR_MAX + 1] = {
+	[NL80211_BSS_SELECT_ATTR_RSSI] = { .type = NLA_FLAG },
+	[NL80211_BSS_SELECT_ATTR_BAND_PREF] = { .type = NLA_U32 },
+	[NL80211_BSS_SELECT_ATTR_RSSI_ADJUST] = {
+		.len = sizeof(struct nl80211_bss_select_rssi_adjust)
+	},
+};
+
 /* policy for packet pattern attributes */
 static const struct nla_policy
 nl80211_packet_pattern_policy[MAX_NL80211_PKTPAT + 1] = {
@@ -525,7 +555,7 @@
 		*rdev = wiphy_to_rdev(wiphy);
 		*wdev = NULL;
 
-		list_for_each_entry(tmp, &(*rdev)->wdev_list, list) {
+		list_for_each_entry(tmp, &(*rdev)->wiphy.wdev_list, list) {
 			if (tmp->identifier == cb->args[1]) {
 				*wdev = tmp;
 				break;
@@ -994,6 +1024,10 @@
 		     nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_REGIONS,
 				c->radar_detect_regions)))
 			goto nla_put_failure;
+		if (c->beacon_int_min_gcd &&
+		    nla_put_u32(msg, NL80211_IFACE_COMB_BI_MIN_GCD,
+				c->beacon_int_min_gcd))
+			goto nla_put_failure;
 
 		nla_nest_end(msg, nl_combi);
 	}
@@ -1244,7 +1278,7 @@
 struct nl80211_dump_wiphy_state {
 	s64 filter_wiphy;
 	long start;
-	long split_start, band_start, chan_start;
+	long split_start, band_start, chan_start, capa_start;
 	bool split;
 };
 
@@ -1538,6 +1572,7 @@
 			if (rdev->wiphy.features &
 					NL80211_FEATURE_SUPPORTS_WMM_ADMISSION)
 				CMD(add_tx_ts, ADD_TX_TS);
+			CMD(update_connect_params, UPDATE_CONNECT_PARAMS);
 		}
 		/* add into the if now */
 #undef CMD
@@ -1722,6 +1757,66 @@
 			    rdev->wiphy.ext_features))
 			goto nla_put_failure;
 
+		state->split_start++;
+		break;
+	case 13:
+		if (rdev->wiphy.num_iftype_ext_capab &&
+		    rdev->wiphy.iftype_ext_capab) {
+			struct nlattr *nested_ext_capab, *nested;
+
+			nested = nla_nest_start(msg,
+						NL80211_ATTR_IFTYPE_EXT_CAPA);
+			if (!nested)
+				goto nla_put_failure;
+
+			for (i = state->capa_start;
+			     i < rdev->wiphy.num_iftype_ext_capab; i++) {
+				const struct wiphy_iftype_ext_capab *capab;
+
+				capab = &rdev->wiphy.iftype_ext_capab[i];
+
+				nested_ext_capab = nla_nest_start(msg, i);
+				if (!nested_ext_capab ||
+				    nla_put_u32(msg, NL80211_ATTR_IFTYPE,
+						capab->iftype) ||
+				    nla_put(msg, NL80211_ATTR_EXT_CAPA,
+					    capab->extended_capabilities_len,
+					    capab->extended_capabilities) ||
+				    nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK,
+					    capab->extended_capabilities_len,
+					    capab->extended_capabilities_mask))
+					goto nla_put_failure;
+
+				nla_nest_end(msg, nested_ext_capab);
+				if (state->split)
+					break;
+			}
+			nla_nest_end(msg, nested);
+			if (i < rdev->wiphy.num_iftype_ext_capab) {
+				state->capa_start = i + 1;
+				break;
+			}
+		}
+
+		if (rdev->wiphy.bss_select_support) {
+			struct nlattr *nested;
+			u32 bss_select_support = rdev->wiphy.bss_select_support;
+
+			nested = nla_nest_start(msg, NL80211_ATTR_BSS_SELECT);
+			if (!nested)
+				goto nla_put_failure;
+
+			i = 0;
+			while (bss_select_support) {
+				if ((bss_select_support & 1) &&
+				    nla_put_flag(msg, i))
+					goto nla_put_failure;
+				i++;
+				bss_select_support >>= 1;
+			}
+			nla_nest_end(msg, nested);
+		}
+
 		/* done */
 		state->split_start = 0;
 		break;
@@ -2450,7 +2545,7 @@
 		}
 		if_idx = 0;
 
-		list_for_each_entry(wdev, &rdev->wdev_list, list) {
+		list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
 			if (if_idx < if_start) {
 				if_idx++;
 				continue;
@@ -2722,7 +2817,7 @@
 		spin_lock_init(&wdev->mgmt_registrations_lock);
 
 		wdev->identifier = ++rdev->wdev_id;
-		list_add_rcu(&wdev->list, &rdev->wdev_list);
+		list_add_rcu(&wdev->list, &rdev->wiphy.wdev_list);
 		rdev->devlist_generation++;
 		break;
 	default:
@@ -3195,6 +3290,291 @@
 	return err;
 }
 
+static u32 rateset_to_mask(struct ieee80211_supported_band *sband,
+			   u8 *rates, u8 rates_len)
+{
+	u8 i;
+	u32 mask = 0;
+
+	for (i = 0; i < rates_len; i++) {
+		int rate = (rates[i] & 0x7f) * 5;
+		int ridx;
+
+		for (ridx = 0; ridx < sband->n_bitrates; ridx++) {
+			struct ieee80211_rate *srate =
+				&sband->bitrates[ridx];
+			if (rate == srate->bitrate) {
+				mask |= 1 << ridx;
+				break;
+			}
+		}
+		if (ridx == sband->n_bitrates)
+			return 0; /* rate not found */
+	}
+
+	return mask;
+}
+
+static bool ht_rateset_to_mask(struct ieee80211_supported_band *sband,
+			       u8 *rates, u8 rates_len,
+			       u8 mcs[IEEE80211_HT_MCS_MASK_LEN])
+{
+	u8 i;
+
+	memset(mcs, 0, IEEE80211_HT_MCS_MASK_LEN);
+
+	for (i = 0; i < rates_len; i++) {
+		int ridx, rbit;
+
+		ridx = rates[i] / 8;
+		rbit = BIT(rates[i] % 8);
+
+		/* check validity */
+		if ((ridx < 0) || (ridx >= IEEE80211_HT_MCS_MASK_LEN))
+			return false;
+
+		/* check availability */
+		if (sband->ht_cap.mcs.rx_mask[ridx] & rbit)
+			mcs[ridx] |= rbit;
+		else
+			return false;
+	}
+
+	return true;
+}
+
+static u16 vht_mcs_map_to_mcs_mask(u8 vht_mcs_map)
+{
+	u16 mcs_mask = 0;
+
+	switch (vht_mcs_map) {
+	case IEEE80211_VHT_MCS_NOT_SUPPORTED:
+		break;
+	case IEEE80211_VHT_MCS_SUPPORT_0_7:
+		mcs_mask = 0x00FF;
+		break;
+	case IEEE80211_VHT_MCS_SUPPORT_0_8:
+		mcs_mask = 0x01FF;
+		break;
+	case IEEE80211_VHT_MCS_SUPPORT_0_9:
+		mcs_mask = 0x03FF;
+		break;
+	default:
+		break;
+	}
+
+	return mcs_mask;
+}
+
+static void vht_build_mcs_mask(u16 vht_mcs_map,
+			       u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+{
+	u8 nss;
+
+	for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
+		vht_mcs_mask[nss] = vht_mcs_map_to_mcs_mask(vht_mcs_map & 0x03);
+		vht_mcs_map >>= 2;
+	}
+}
+
+static bool vht_set_mcs_mask(struct ieee80211_supported_band *sband,
+			     struct nl80211_txrate_vht *txrate,
+			     u16 mcs[NL80211_VHT_NSS_MAX])
+{
+	u16 tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+	u16 tx_mcs_mask[NL80211_VHT_NSS_MAX] = {};
+	u8 i;
+
+	if (!sband->vht_cap.vht_supported)
+		return false;
+
+	memset(mcs, 0, sizeof(u16) * NL80211_VHT_NSS_MAX);
+
+	/* Build vht_mcs_mask from VHT capabilities */
+	vht_build_mcs_mask(tx_mcs_map, tx_mcs_mask);
+
+	for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+		if ((tx_mcs_mask[i] & txrate->mcs[i]) == txrate->mcs[i])
+			mcs[i] = txrate->mcs[i];
+		else
+			return false;
+	}
+
+	return true;
+}
+
+static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = {
+	[NL80211_TXRATE_LEGACY] = { .type = NLA_BINARY,
+				    .len = NL80211_MAX_SUPP_RATES },
+	[NL80211_TXRATE_HT] = { .type = NLA_BINARY,
+				.len = NL80211_MAX_SUPP_HT_RATES },
+	[NL80211_TXRATE_VHT] = { .len = sizeof(struct nl80211_txrate_vht)},
+	[NL80211_TXRATE_GI] = { .type = NLA_U8 },
+};
+
+static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
+					 struct cfg80211_bitrate_mask *mask)
+{
+	struct nlattr *tb[NL80211_TXRATE_MAX + 1];
+	struct cfg80211_registered_device *rdev = info->user_ptr[0];
+	int rem, i;
+	struct nlattr *tx_rates;
+	struct ieee80211_supported_band *sband;
+	u16 vht_tx_mcs_map;
+
+	memset(mask, 0, sizeof(*mask));
+	/* Default to all rates enabled */
+	for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
+		sband = rdev->wiphy.bands[i];
+
+		if (!sband)
+			continue;
+
+		mask->control[i].legacy = (1 << sband->n_bitrates) - 1;
+		memcpy(mask->control[i].ht_mcs,
+		       sband->ht_cap.mcs.rx_mask,
+		       sizeof(mask->control[i].ht_mcs));
+
+		if (!sband->vht_cap.vht_supported)
+			continue;
+
+		vht_tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+		vht_build_mcs_mask(vht_tx_mcs_map, mask->control[i].vht_mcs);
+	}
+
+	/* if no rates are given set it back to the defaults */
+	if (!info->attrs[NL80211_ATTR_TX_RATES])
+		goto out;
+
+	/* The nested attribute uses enum nl80211_band as the index. This maps
+	 * directly to the enum nl80211_band values used in cfg80211.
+	 */
+	BUILD_BUG_ON(NL80211_MAX_SUPP_HT_RATES > IEEE80211_HT_MCS_MASK_LEN * 8);
+	nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem) {
+		enum ieee80211_band band = nla_type(tx_rates);
+		int err;
+
+		if (band < 0 || band >= IEEE80211_NUM_BANDS)
+			return -EINVAL;
+		sband = rdev->wiphy.bands[band];
+		if (sband == NULL)
+			return -EINVAL;
+		err = nla_parse(tb, NL80211_TXRATE_MAX, nla_data(tx_rates),
+				nla_len(tx_rates), nl80211_txattr_policy);
+		if (err)
+			return err;
+		if (tb[NL80211_TXRATE_LEGACY]) {
+			mask->control[band].legacy = rateset_to_mask(
+				sband,
+				nla_data(tb[NL80211_TXRATE_LEGACY]),
+				nla_len(tb[NL80211_TXRATE_LEGACY]));
+			if ((mask->control[band].legacy == 0) &&
+			    nla_len(tb[NL80211_TXRATE_LEGACY]))
+				return -EINVAL;
+		}
+		if (tb[NL80211_TXRATE_HT]) {
+			if (!ht_rateset_to_mask(
+					sband,
+					nla_data(tb[NL80211_TXRATE_HT]),
+					nla_len(tb[NL80211_TXRATE_HT]),
+					mask->control[band].ht_mcs))
+				return -EINVAL;
+		}
+		if (tb[NL80211_TXRATE_VHT]) {
+			if (!vht_set_mcs_mask(
+					sband,
+					nla_data(tb[NL80211_TXRATE_VHT]),
+					mask->control[band].vht_mcs))
+				return -EINVAL;
+		}
+		if (tb[NL80211_TXRATE_GI]) {
+			mask->control[band].gi =
+				nla_get_u8(tb[NL80211_TXRATE_GI]);
+			if (mask->control[band].gi > NL80211_TXRATE_FORCE_LGI)
+				return -EINVAL;
+		}
+
+		if (mask->control[band].legacy == 0) {
+			/* don't allow empty legacy rates if HT or VHT
+			 * are not even supported.
+			 */
+			if (!(rdev->wiphy.bands[band]->ht_cap.ht_supported ||
+			      rdev->wiphy.bands[band]->vht_cap.vht_supported))
+				return -EINVAL;
+
+			for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
+				if (mask->control[band].ht_mcs[i])
+					goto out;
+
+			for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
+				if (mask->control[band].vht_mcs[i])
+					goto out;
+
+			/* legacy and mcs rates may not be both empty */
+			return -EINVAL;
+		}
+	}
+
+out:
+	return 0;
+}
+
+static int validate_beacon_tx_rate(struct cfg80211_registered_device *rdev,
+				   enum nl80211_band band,
+				   struct cfg80211_bitrate_mask *beacon_rate)
+{
+	u32 count_ht, count_vht, i;
+	u32 rate = beacon_rate->control[band].legacy;
+
+	/* Allow only one rate */
+	if (hweight32(rate) > 1)
+		return -EINVAL;
+
+	count_ht = 0;
+	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
+		if (hweight8(beacon_rate->control[band].ht_mcs[i]) > 1) {
+			return -EINVAL;
+		} else if (beacon_rate->control[band].ht_mcs[i]) {
+			count_ht++;
+			if (count_ht > 1)
+				return -EINVAL;
+		}
+		if (count_ht && rate)
+			return -EINVAL;
+	}
+
+	count_vht = 0;
+	for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+		if (hweight16(beacon_rate->control[band].vht_mcs[i]) > 1) {
+			return -EINVAL;
+		} else if (beacon_rate->control[band].vht_mcs[i]) {
+			count_vht++;
+			if (count_vht > 1)
+				return -EINVAL;
+		}
+		if (count_vht && rate)
+			return -EINVAL;
+	}
+
+	if ((count_ht && count_vht) || (!rate && !count_ht && !count_vht))
+		return -EINVAL;
+
+	if (rate &&
+	    !wiphy_ext_feature_isset(&rdev->wiphy,
+				     NL80211_EXT_FEATURE_BEACON_RATE_LEGACY))
+		return -EINVAL;
+	if (count_ht &&
+	    !wiphy_ext_feature_isset(&rdev->wiphy,
+				     NL80211_EXT_FEATURE_BEACON_RATE_HT))
+		return -EINVAL;
+	if (count_vht &&
+	    !wiphy_ext_feature_isset(&rdev->wiphy,
+				     NL80211_EXT_FEATURE_BEACON_RATE_VHT))
+		return -EINVAL;
+
+	return 0;
+}
+
 static int nl80211_parse_beacon(struct nlattr *attrs[],
 				struct cfg80211_beacon_data *bcn)
 {
@@ -3258,7 +3638,7 @@
 	struct wireless_dev *wdev;
 	bool ret = false;
 
-	list_for_each_entry(wdev, &rdev->wdev_list, list) {
+	list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
 		if (wdev->iftype != NL80211_IFTYPE_AP &&
 		    wdev->iftype != NL80211_IFTYPE_P2P_GO)
 			continue;
@@ -3286,12 +3666,36 @@
 		if (!(rdev->wiphy.features & NL80211_FEATURE_SAE) &&
 		    auth_type == NL80211_AUTHTYPE_SAE)
 			return false;
+		if (!wiphy_ext_feature_isset(&rdev->wiphy,
+					     NL80211_EXT_FEATURE_FILS_STA) &&
+		    (auth_type == NL80211_AUTHTYPE_FILS_SK ||
+		     auth_type == NL80211_AUTHTYPE_FILS_SK_PFS ||
+		     auth_type == NL80211_AUTHTYPE_FILS_PK))
+			return false;
 		return true;
 	case NL80211_CMD_CONNECT:
+		/* SAE not supported yet */
+		if (auth_type == NL80211_AUTHTYPE_SAE)
+			return false;
+		/* FILS with SK PFS or PK not supported yet */
+		if (auth_type == NL80211_AUTHTYPE_FILS_SK_PFS ||
+		    auth_type == NL80211_AUTHTYPE_FILS_PK)
+			return false;
+		if (!wiphy_ext_feature_isset(
+			    &rdev->wiphy,
+			    NL80211_EXT_FEATURE_FILS_SK_OFFLOAD) &&
+		    auth_type == NL80211_AUTHTYPE_FILS_SK)
+			return false;
+		return true;
 	case NL80211_CMD_START_AP:
 		/* SAE not supported yet */
 		if (auth_type == NL80211_AUTHTYPE_SAE)
 			return false;
+		/* FILS not supported yet */
+		if (auth_type == NL80211_AUTHTYPE_FILS_SK ||
+		    auth_type == NL80211_AUTHTYPE_FILS_SK_PFS ||
+		    auth_type == NL80211_AUTHTYPE_FILS_PK)
+			return false;
 		return true;
 	default:
 		return false;
@@ -3333,7 +3737,8 @@
 	params.dtim_period =
 		nla_get_u32(info->attrs[NL80211_ATTR_DTIM_PERIOD]);
 
-	err = cfg80211_validate_beacon_int(rdev, params.beacon_interval);
+	err = cfg80211_validate_beacon_int(rdev, dev->ieee80211_ptr->iftype,
+					   params.beacon_interval);
 	if (err)
 		return err;
 
@@ -3424,6 +3829,17 @@
 					   wdev->iftype))
 		return -EINVAL;
 
+	if (info->attrs[NL80211_ATTR_TX_RATES]) {
+		err = nl80211_parse_tx_bitrate_mask(info, &params.beacon_rate);
+		if (err)
+			return err;
+
+		err = validate_beacon_tx_rate(rdev, params.chandef.chan->band,
+					      &params.beacon_rate);
+		if (err)
+			return err;
+	}
+
 	if (info->attrs[NL80211_ATTR_SMPS_MODE]) {
 		params.smps_mode =
 			nla_get_u8(info->attrs[NL80211_ATTR_SMPS_MODE]);
@@ -3447,6 +3863,10 @@
 		params.smps_mode = NL80211_SMPS_OFF;
 	}
 
+	params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
+	if (params.pbss && !rdev->wiphy.bands[IEEE80211_BAND_60GHZ])
+		return -EOPNOTSUPP;
+
 	if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
 		params.acl = parse_acl_data(&rdev->wiphy, info);
 		if (IS_ERR(params.acl))
@@ -5737,6 +6157,73 @@
 	return n_channels;
 }
 
+static bool is_band_valid(struct wiphy *wiphy, enum ieee80211_band b)
+{
+	return b < IEEE80211_NUM_BANDS && wiphy->bands[b];
+}
+
+static int parse_bss_select(struct nlattr *nla, struct wiphy *wiphy,
+			    struct cfg80211_bss_selection *bss_select)
+{
+	struct nlattr *attr[NL80211_BSS_SELECT_ATTR_MAX + 1];
+	struct nlattr *nest;
+	int err;
+	bool found = false;
+	int i;
+
+	/* only process one nested attribute */
+	nest = nla_data(nla);
+	if (!nla_ok(nest, nla_len(nest)))
+		return -EINVAL;
+
+	err = nla_parse(attr, NL80211_BSS_SELECT_ATTR_MAX, nla_data(nest),
+			nla_len(nest), nl80211_bss_select_policy);
+	if (err)
+		return err;
+
+	/* only one attribute may be given */
+	for (i = 0; i <= NL80211_BSS_SELECT_ATTR_MAX; i++) {
+		if (attr[i]) {
+			if (found)
+				return -EINVAL;
+			found = true;
+		}
+	}
+
+	bss_select->behaviour = __NL80211_BSS_SELECT_ATTR_INVALID;
+
+	if (attr[NL80211_BSS_SELECT_ATTR_RSSI])
+		bss_select->behaviour = NL80211_BSS_SELECT_ATTR_RSSI;
+
+	if (attr[NL80211_BSS_SELECT_ATTR_BAND_PREF]) {
+		bss_select->behaviour = NL80211_BSS_SELECT_ATTR_BAND_PREF;
+		bss_select->param.band_pref =
+			nla_get_u32(attr[NL80211_BSS_SELECT_ATTR_BAND_PREF]);
+		if (!is_band_valid(wiphy, bss_select->param.band_pref))
+			return -EINVAL;
+	}
+
+	if (attr[NL80211_BSS_SELECT_ATTR_RSSI_ADJUST]) {
+		struct nl80211_bss_select_rssi_adjust *adj_param;
+
+		adj_param = nla_data(attr[NL80211_BSS_SELECT_ATTR_RSSI_ADJUST]);
+		bss_select->behaviour = NL80211_BSS_SELECT_ATTR_RSSI_ADJUST;
+		bss_select->param.adjust.band = adj_param->band;
+		bss_select->param.adjust.delta = adj_param->delta;
+		if (!is_band_valid(wiphy, bss_select->param.adjust.band))
+			return -EINVAL;
+	}
+
+	/* user-space did not provide behaviour attribute */
+	if (bss_select->behaviour == __NL80211_BSS_SELECT_ATTR_INVALID)
+		return -EINVAL;
+
+	if (!(wiphy->bss_select_support & BIT(bss_select->behaviour)))
+		return -EINVAL;
+
+	return 0;
+}
+
 static int nl80211_parse_random_mac(struct nlattr **attrs,
 				    u8 *mac_addr, u8 *mac_addr_mask)
 {
@@ -5975,6 +6462,25 @@
 	request->no_cck =
 		nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]);
 
+	/* Initial implementation used NL80211_ATTR_MAC to set the specific
+	 * BSSID to scan for. This was problematic because that same attribute
+	 * was already used for another purpose (local random MAC address). The
+	 * NL80211_ATTR_BSSID attribute was added to fix this. For backwards
+	 * compatibility with older userspace components, also use the
+	 * NL80211_ATTR_MAC value here if it can be determined to be used for
+	 * the specific BSSID use case instead of the random MAC address
+	 * (NL80211_ATTR_SCAN_FLAGS is used to enable random MAC address use).
+	 */
+	if (info->attrs[NL80211_ATTR_BSSID])
+		memcpy(request->bssid,
+		       nla_data(info->attrs[NL80211_ATTR_BSSID]), ETH_ALEN);
+	else if (!(request->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) &&
+		 info->attrs[NL80211_ATTR_MAC])
+		memcpy(request->bssid, nla_data(info->attrs[NL80211_ATTR_MAC]),
+		       ETH_ALEN);
+	else
+		eth_broadcast_addr(request->bssid);
+
 	request->wdev = wdev;
 	request->wiphy = &rdev->wiphy;
 	request->scan_start = jiffies;
@@ -6188,6 +6694,12 @@
 	if (!n_plans || n_plans > wiphy->max_sched_scan_plans)
 		return ERR_PTR(-EINVAL);
 
+	if (!wiphy_ext_feature_isset(
+		    wiphy, NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI) &&
+	    (attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI] ||
+	     attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST]))
+		return ERR_PTR(-EINVAL);
+
 	request = kzalloc(sizeof(*request)
 			+ sizeof(*request->ssids) * n_ssids
 			+ sizeof(*request->match_sets) * n_match_sets
@@ -6393,6 +6905,26 @@
 		request->delay =
 			nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_DELAY]);
 
+	if (attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI]) {
+		request->relative_rssi = nla_get_s8(
+			attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI]);
+		request->relative_rssi_set = true;
+	}
+
+	if (request->relative_rssi_set &&
+	    attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST]) {
+		struct nl80211_bss_select_rssi_adjust *rssi_adjust;
+
+		rssi_adjust = nla_data(
+			attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST]);
+		request->rssi_adjust.band = rssi_adjust->band;
+		request->rssi_adjust.delta = rssi_adjust->delta;
+		if (!is_band_valid(wiphy, request->rssi_adjust.band)) {
+			err = -EINVAL;
+			goto out_free;
+		}
+	}
+
 	err = nl80211_parse_sched_scan_plans(wiphy, n_plans, request, attrs);
 	if (err)
 		goto out_free;
@@ -6406,6 +6938,24 @@
 	return ERR_PTR(err);
 }
 
+static int nl80211_abort_scan(struct sk_buff *skb, struct genl_info *info)
+{
+	struct cfg80211_registered_device *rdev = info->user_ptr[0];
+	struct wireless_dev *wdev = info->user_ptr[1];
+
+	if (!rdev->ops->abort_scan)
+		return -EOPNOTSUPP;
+
+	if (rdev->scan_msg)
+		return 0;
+
+	if (!rdev->scan_req)
+		return -ENOENT;
+
+	rdev_abort_scan(rdev, wdev);
+	return 0;
+}
+
 static int nl80211_start_sched_scan(struct sk_buff *skb,
 				    struct genl_info *info)
 {
@@ -6482,6 +7032,9 @@
 	if (err)
 		return err;
 
+	if (rdev->wiphy.flags & WIPHY_FLAG_DFS_OFFLOAD)
+		return -EOPNOTSUPP;
+
 	if (netif_carrier_ok(dev))
 		return -EBUSY;
 
@@ -6976,8 +7529,8 @@
 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
 	struct net_device *dev = info->user_ptr[1];
 	struct ieee80211_channel *chan;
-	const u8 *bssid, *ssid, *ie = NULL, *sae_data = NULL;
-	int err, ssid_len, ie_len = 0, sae_data_len = 0;
+	const u8 *bssid, *ssid, *ie = NULL, *auth_data = NULL;
+	int err, ssid_len, ie_len = 0, auth_data_len = 0;
 	enum nl80211_auth_type auth_type;
 	struct key_parse key;
 	bool local_state_change;
@@ -7056,17 +7609,23 @@
 	if (!nl80211_valid_auth_type(rdev, auth_type, NL80211_CMD_AUTHENTICATE))
 		return -EINVAL;
 
-	if (auth_type == NL80211_AUTHTYPE_SAE &&
-	    !info->attrs[NL80211_ATTR_SAE_DATA])
+	if ((auth_type == NL80211_AUTHTYPE_SAE ||
+	     auth_type == NL80211_AUTHTYPE_FILS_SK ||
+	     auth_type == NL80211_AUTHTYPE_FILS_SK_PFS ||
+	     auth_type == NL80211_AUTHTYPE_FILS_PK) &&
+	    !info->attrs[NL80211_ATTR_AUTH_DATA])
 		return -EINVAL;
 
-	if (info->attrs[NL80211_ATTR_SAE_DATA]) {
-		if (auth_type != NL80211_AUTHTYPE_SAE)
+	if (info->attrs[NL80211_ATTR_AUTH_DATA]) {
+		if (auth_type != NL80211_AUTHTYPE_SAE &&
+		    auth_type != NL80211_AUTHTYPE_FILS_SK &&
+		    auth_type != NL80211_AUTHTYPE_FILS_SK_PFS &&
+		    auth_type != NL80211_AUTHTYPE_FILS_PK)
 			return -EINVAL;
-		sae_data = nla_data(info->attrs[NL80211_ATTR_SAE_DATA]);
-		sae_data_len = nla_len(info->attrs[NL80211_ATTR_SAE_DATA]);
+		auth_data = nla_data(info->attrs[NL80211_ATTR_AUTH_DATA]);
+		auth_data_len = nla_len(info->attrs[NL80211_ATTR_AUTH_DATA]);
 		/* need to include at least Auth Transaction and Status Code */
-		if (sae_data_len < 4)
+		if (auth_data_len < 4)
 			return -EINVAL;
 	}
 
@@ -7083,7 +7642,7 @@
 	err = cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
 				 ssid, ssid_len, ie, ie_len,
 				 key.p.key, key.p.key_len, key.idx,
-				 sae_data, sae_data_len);
+				 auth_data, auth_data_len);
 	wdev_unlock(dev->ieee80211_ptr);
 	return err;
 }
@@ -7259,6 +7818,15 @@
 		req.flags |= ASSOC_REQ_USE_RRM;
 	}
 
+	if (info->attrs[NL80211_ATTR_FILS_KEK]) {
+		req.fils_kek = nla_data(info->attrs[NL80211_ATTR_FILS_KEK]);
+		req.fils_kek_len = nla_len(info->attrs[NL80211_ATTR_FILS_KEK]);
+		if (!info->attrs[NL80211_ATTR_FILS_NONCES])
+			return -EINVAL;
+		req.fils_nonces =
+			nla_data(info->attrs[NL80211_ATTR_FILS_NONCES]);
+	}
+
 	err = nl80211_crypto_settings(rdev, info, &req.crypto, 1);
 	if (!err) {
 		wdev_lock(dev->ieee80211_ptr);
@@ -7412,12 +7980,14 @@
 
 	ibss.beacon_interval = 100;
 
-	if (info->attrs[NL80211_ATTR_BEACON_INTERVAL]) {
+	if (info->attrs[NL80211_ATTR_BEACON_INTERVAL])
 		ibss.beacon_interval =
 			nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]);
-		if (ibss.beacon_interval < 1 || ibss.beacon_interval > 10000)
-			return -EINVAL;
-	}
+
+	err = cfg80211_validate_beacon_int(rdev, NL80211_IFTYPE_ADHOC,
+					   ibss.beacon_interval);
+	if (err)
+		return err;
 
 	if (!rdev->ops->join_ibss)
 		return -EOPNOTSUPP;
@@ -7886,6 +8456,10 @@
 		connect.mfp = NL80211_MFP_NO;
 	}
 
+	if (info->attrs[NL80211_ATTR_PREV_BSSID])
+		connect.prev_bssid =
+			nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]);
+
 	if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
 		connect.channel = nl80211_get_valid_chan(
 			wiphy, info->attrs[NL80211_ATTR_WIPHY_FREQ]);
@@ -7951,6 +8525,56 @@
 		connect.flags |= ASSOC_REQ_USE_RRM;
 	}
 
+	connect.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
+	if (connect.pbss && !rdev->wiphy.bands[IEEE80211_BAND_60GHZ]) {
+		kzfree(connkeys);
+		return -EOPNOTSUPP;
+	}
+
+	if (info->attrs[NL80211_ATTR_BSS_SELECT]) {
+		/* bss selection makes no sense if bssid is set */
+		if (connect.bssid) {
+			kzfree(connkeys);
+			return -EINVAL;
+		}
+
+		err = parse_bss_select(info->attrs[NL80211_ATTR_BSS_SELECT],
+				       wiphy, &connect.bss_select);
+		if (err) {
+			kzfree(connkeys);
+			return err;
+		}
+	}
+
+	if (wiphy_ext_feature_isset(&rdev->wiphy,
+				    NL80211_EXT_FEATURE_FILS_SK_OFFLOAD) &&
+	    info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] &&
+	    info->attrs[NL80211_ATTR_FILS_ERP_REALM] &&
+	    info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] &&
+	    info->attrs[NL80211_ATTR_FILS_ERP_RRK]) {
+		connect.fils_erp_username =
+			nla_data(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]);
+		connect.fils_erp_username_len =
+			nla_len(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]);
+		connect.fils_erp_realm =
+			nla_data(info->attrs[NL80211_ATTR_FILS_ERP_REALM]);
+		connect.fils_erp_realm_len =
+			nla_len(info->attrs[NL80211_ATTR_FILS_ERP_REALM]);
+		connect.fils_erp_next_seq_num =
+			nla_get_u16(
+			   info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM]);
+		connect.fils_erp_rrk =
+			nla_data(info->attrs[NL80211_ATTR_FILS_ERP_RRK]);
+		connect.fils_erp_rrk_len =
+			nla_len(info->attrs[NL80211_ATTR_FILS_ERP_RRK]);
+	} else if (info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] ||
+		   info->attrs[NL80211_ATTR_FILS_ERP_REALM] ||
+		   info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] ||
+		   info->attrs[NL80211_ATTR_FILS_ERP_RRK]) {
+		kzfree(connkeys);
+		return -EINVAL;
+	}
+
 	wdev_lock(dev->ieee80211_ptr);
 	err = cfg80211_connect(rdev, dev, &connect, connkeys, NULL);
 	wdev_unlock(dev->ieee80211_ptr);
@@ -7959,6 +8583,76 @@
 	return err;
 }
 
+static int nl80211_update_connect_params(struct sk_buff *skb,
+					 struct genl_info *info)
+{
+	struct cfg80211_connect_params connect = {};
+	struct cfg80211_registered_device *rdev = info->user_ptr[0];
+	struct net_device *dev = info->user_ptr[1];
+	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	u32 changed = 0;
+	int ret;
+
+	if (!rdev->ops->update_connect_params)
+		return -EOPNOTSUPP;
+
+	if (info->attrs[NL80211_ATTR_IE]) {
+		if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
+			return -EINVAL;
+		connect.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
+		connect.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
+		changed |= UPDATE_ASSOC_IES;
+	}
+
+	if (wiphy_ext_feature_isset(&rdev->wiphy,
+				    NL80211_EXT_FEATURE_FILS_SK_OFFLOAD) &&
+	    info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] &&
+	    info->attrs[NL80211_ATTR_FILS_ERP_REALM] &&
+	    info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] &&
+	    info->attrs[NL80211_ATTR_FILS_ERP_RRK]) {
+		connect.fils_erp_username =
+			nla_data(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]);
+		connect.fils_erp_username_len =
+			nla_len(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]);
+		connect.fils_erp_realm =
+			nla_data(info->attrs[NL80211_ATTR_FILS_ERP_REALM]);
+		connect.fils_erp_realm_len =
+			nla_len(info->attrs[NL80211_ATTR_FILS_ERP_REALM]);
+		connect.fils_erp_next_seq_num =
+			nla_get_u16(
+			   info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM]);
+		connect.fils_erp_rrk =
+			nla_data(info->attrs[NL80211_ATTR_FILS_ERP_RRK]);
+		connect.fils_erp_rrk_len =
+			nla_len(info->attrs[NL80211_ATTR_FILS_ERP_RRK]);
+		changed |= UPDATE_FILS_ERP_INFO;
+	} else if (info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] ||
+		   info->attrs[NL80211_ATTR_FILS_ERP_REALM] ||
+		   info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] ||
+		   info->attrs[NL80211_ATTR_FILS_ERP_RRK]) {
+		return -EINVAL;
+	}
+
+	if (info->attrs[NL80211_ATTR_AUTH_TYPE]) {
+		u32 auth_type =
+			nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]);
+		if (!nl80211_valid_auth_type(rdev, auth_type,
+					     NL80211_CMD_CONNECT))
+			return -EINVAL;
+		connect.auth_type = auth_type;
+		changed |= UPDATE_AUTH_TYPE;
+	}
+
+	wdev_lock(dev->ieee80211_ptr);
+	if (!wdev->current_bss)
+		ret = -ENOLINK;
+	else
+		ret = rdev_update_connect_params(rdev, dev, &connect, changed);
+	wdev_unlock(dev->ieee80211_ptr);
+
+	return ret;
+}
+
 static int nl80211_disconnect(struct sk_buff *skb, struct genl_info *info)
 {
 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -8025,14 +8719,28 @@
 
 	memset(&pmksa, 0, sizeof(struct cfg80211_pmksa));
 
-	if (!info->attrs[NL80211_ATTR_MAC])
-		return -EINVAL;
-
 	if (!info->attrs[NL80211_ATTR_PMKID])
 		return -EINVAL;
 
 	pmksa.pmkid = nla_data(info->attrs[NL80211_ATTR_PMKID]);
+
+	if (info->attrs[NL80211_ATTR_MAC]) {
 	pmksa.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
+	} else if (info->attrs[NL80211_ATTR_SSID] &&
+		   info->attrs[NL80211_ATTR_FILS_CACHE_ID] &&
+		   (info->genlhdr->cmd == NL80211_CMD_DEL_PMKSA ||
+		    info->attrs[NL80211_ATTR_PMK])) {
+		pmksa.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
+		pmksa.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
+		pmksa.cache_id =
+			nla_data(info->attrs[NL80211_ATTR_FILS_CACHE_ID]);
+	} else {
+		return -EINVAL;
+	}
+	if (info->attrs[NL80211_ATTR_PMK]) {
+		pmksa.pmk = nla_data(info->attrs[NL80211_ATTR_PMK]);
+		pmksa.pmk_len = nla_len(info->attrs[NL80211_ATTR_PMK]);
+	}
 
 	if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
 	    dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT)
@@ -8212,237 +8920,21 @@
 	return rdev_cancel_remain_on_channel(rdev, wdev, cookie);
 }
 
-static u32 rateset_to_mask(struct ieee80211_supported_band *sband,
-			   u8 *rates, u8 rates_len)
-{
-	u8 i;
-	u32 mask = 0;
-
-	for (i = 0; i < rates_len; i++) {
-		int rate = (rates[i] & 0x7f) * 5;
-		int ridx;
-		for (ridx = 0; ridx < sband->n_bitrates; ridx++) {
-			struct ieee80211_rate *srate =
-				&sband->bitrates[ridx];
-			if (rate == srate->bitrate) {
-				mask |= 1 << ridx;
-				break;
-			}
-		}
-		if (ridx == sband->n_bitrates)
-			return 0; /* rate not found */
-	}
-
-	return mask;
-}
-
-static bool ht_rateset_to_mask(struct ieee80211_supported_band *sband,
-			       u8 *rates, u8 rates_len,
-			       u8 mcs[IEEE80211_HT_MCS_MASK_LEN])
-{
-	u8 i;
-
-	memset(mcs, 0, IEEE80211_HT_MCS_MASK_LEN);
-
-	for (i = 0; i < rates_len; i++) {
-		int ridx, rbit;
-
-		ridx = rates[i] / 8;
-		rbit = BIT(rates[i] % 8);
-
-		/* check validity */
-		if ((ridx < 0) || (ridx >= IEEE80211_HT_MCS_MASK_LEN))
-			return false;
-
-		/* check availability */
-		if (sband->ht_cap.mcs.rx_mask[ridx] & rbit)
-			mcs[ridx] |= rbit;
-		else
-			return false;
-	}
-
-	return true;
-}
-
-static u16 vht_mcs_map_to_mcs_mask(u8 vht_mcs_map)
-{
-	u16 mcs_mask = 0;
-
-	switch (vht_mcs_map) {
-	case IEEE80211_VHT_MCS_NOT_SUPPORTED:
-		break;
-	case IEEE80211_VHT_MCS_SUPPORT_0_7:
-		mcs_mask = 0x00FF;
-		break;
-	case IEEE80211_VHT_MCS_SUPPORT_0_8:
-		mcs_mask = 0x01FF;
-		break;
-	case IEEE80211_VHT_MCS_SUPPORT_0_9:
-		mcs_mask = 0x03FF;
-		break;
-	default:
-		break;
-	}
-
-	return mcs_mask;
-}
-
-static void vht_build_mcs_mask(u16 vht_mcs_map,
-			       u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
-{
-	u8 nss;
-
-	for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
-		vht_mcs_mask[nss] = vht_mcs_map_to_mcs_mask(vht_mcs_map & 0x03);
-		vht_mcs_map >>= 2;
-	}
-}
-
-static bool vht_set_mcs_mask(struct ieee80211_supported_band *sband,
-			     struct nl80211_txrate_vht *txrate,
-			     u16 mcs[NL80211_VHT_NSS_MAX])
-{
-	u16 tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
-	u16 tx_mcs_mask[NL80211_VHT_NSS_MAX] = {};
-	u8 i;
-
-	if (!sband->vht_cap.vht_supported)
-		return false;
-
-	memset(mcs, 0, sizeof(u16) * NL80211_VHT_NSS_MAX);
-
-	/* Build vht_mcs_mask from VHT capabilities */
-	vht_build_mcs_mask(tx_mcs_map, tx_mcs_mask);
-
-	for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
-		if ((tx_mcs_mask[i] & txrate->mcs[i]) == txrate->mcs[i])
-			mcs[i] = txrate->mcs[i];
-		else
-			return false;
-	}
-
-	return true;
-}
-
-static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = {
-	[NL80211_TXRATE_LEGACY] = { .type = NLA_BINARY,
-				    .len = NL80211_MAX_SUPP_RATES },
-	[NL80211_TXRATE_HT] = { .type = NLA_BINARY,
-				.len = NL80211_MAX_SUPP_HT_RATES },
-	[NL80211_TXRATE_VHT] = { .len = sizeof(struct nl80211_txrate_vht)},
-	[NL80211_TXRATE_GI] = { .type = NLA_U8 },
-};
-
 static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
 				       struct genl_info *info)
 {
-	struct nlattr *tb[NL80211_TXRATE_MAX + 1];
-	struct cfg80211_registered_device *rdev = info->user_ptr[0];
 	struct cfg80211_bitrate_mask mask;
-	int rem, i;
+	struct cfg80211_registered_device *rdev = info->user_ptr[0];
 	struct net_device *dev = info->user_ptr[1];
-	struct nlattr *tx_rates;
-	struct ieee80211_supported_band *sband;
-	u16 vht_tx_mcs_map;
+	int err;
 
 	if (!rdev->ops->set_bitrate_mask)
 		return -EOPNOTSUPP;
 
-	memset(&mask, 0, sizeof(mask));
-	/* Default to all rates enabled */
-	for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
-		sband = rdev->wiphy.bands[i];
-
-		if (!sband)
-			continue;
-
-		mask.control[i].legacy = (1 << sband->n_bitrates) - 1;
-		memcpy(mask.control[i].ht_mcs,
-		       sband->ht_cap.mcs.rx_mask,
-		       sizeof(mask.control[i].ht_mcs));
-
-		if (!sband->vht_cap.vht_supported)
-			continue;
-
-		vht_tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
-		vht_build_mcs_mask(vht_tx_mcs_map, mask.control[i].vht_mcs);
-	}
-
-	/* if no rates are given set it back to the defaults */
-	if (!info->attrs[NL80211_ATTR_TX_RATES])
-		goto out;
-
-	/*
-	 * The nested attribute uses enum nl80211_band as the index. This maps
-	 * directly to the enum ieee80211_band values used in cfg80211.
-	 */
-	BUILD_BUG_ON(NL80211_MAX_SUPP_HT_RATES > IEEE80211_HT_MCS_MASK_LEN * 8);
-	nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem) {
-		enum ieee80211_band band = nla_type(tx_rates);
-		int err;
-
-		if (band < 0 || band >= IEEE80211_NUM_BANDS)
-			return -EINVAL;
-		sband = rdev->wiphy.bands[band];
-		if (sband == NULL)
-			return -EINVAL;
-		err = nla_parse(tb, NL80211_TXRATE_MAX, nla_data(tx_rates),
-				nla_len(tx_rates), nl80211_txattr_policy);
+	err = nl80211_parse_tx_bitrate_mask(info, &mask);
 		if (err)
 			return err;
-		if (tb[NL80211_TXRATE_LEGACY]) {
-			mask.control[band].legacy = rateset_to_mask(
-				sband,
-				nla_data(tb[NL80211_TXRATE_LEGACY]),
-				nla_len(tb[NL80211_TXRATE_LEGACY]));
-			if ((mask.control[band].legacy == 0) &&
-			    nla_len(tb[NL80211_TXRATE_LEGACY]))
-				return -EINVAL;
-		}
-		if (tb[NL80211_TXRATE_HT]) {
-			if (!ht_rateset_to_mask(
-					sband,
-					nla_data(tb[NL80211_TXRATE_HT]),
-					nla_len(tb[NL80211_TXRATE_HT]),
-					mask.control[band].ht_mcs))
-				return -EINVAL;
-		}
-		if (tb[NL80211_TXRATE_VHT]) {
-			if (!vht_set_mcs_mask(
-					sband,
-					nla_data(tb[NL80211_TXRATE_VHT]),
-					mask.control[band].vht_mcs))
-				return -EINVAL;
-		}
-		if (tb[NL80211_TXRATE_GI]) {
-			mask.control[band].gi =
-				nla_get_u8(tb[NL80211_TXRATE_GI]);
-			if (mask.control[band].gi > NL80211_TXRATE_FORCE_LGI)
-				return -EINVAL;
-		}
 
-		if (mask.control[band].legacy == 0) {
-			/* don't allow empty legacy rates if HT or VHT
-			 * are not even supported.
-			 */
-			if (!(rdev->wiphy.bands[band]->ht_cap.ht_supported ||
-			      rdev->wiphy.bands[band]->vht_cap.vht_supported))
-				return -EINVAL;
-
-			for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
-				if (mask.control[band].ht_mcs[i])
-					goto out;
-
-			for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
-				if (mask.control[band].vht_mcs[i])
-					goto out;
-
-			/* legacy and mcs rates may not be both empty */
-			return -EINVAL;
-		}
-	}
-
-out:
 	return rdev_set_bitrate_mask(rdev, dev, NULL, &mask);
 }
 
@@ -8861,9 +9353,12 @@
 	if (info->attrs[NL80211_ATTR_BEACON_INTERVAL]) {
 		setup.beacon_interval =
 			nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]);
-		if (setup.beacon_interval < 10 ||
-		    setup.beacon_interval > 10000)
-			return -EINVAL;
+
+		err = cfg80211_validate_beacon_int(rdev,
+						   NL80211_IFTYPE_MESH_POINT,
+						   setup.beacon_interval);
+		if (err)
+			return err;
 	}
 
 	if (info->attrs[NL80211_ATTR_DTIM_PERIOD]) {
@@ -8909,6 +9404,17 @@
 			return err;
 	}
 
+	if (info->attrs[NL80211_ATTR_TX_RATES] && setup.chandef.chan != NULL) {
+		err = nl80211_parse_tx_bitrate_mask(info, &setup.beacon_rate);
+		if (err)
+			return err;
+
+		err = validate_beacon_tx_rate(rdev, setup.chandef.chan->band,
+					      &setup.beacon_rate);
+		if (err)
+			return err;
+	}
+
 	return cfg80211_join_mesh(rdev, dev, &setup, &cfg);
 }
 
@@ -9018,6 +9524,20 @@
 	if (nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_DELAY, req->delay))
 		return -ENOBUFS;
 
+	if (req->relative_rssi_set) {
+		struct nl80211_bss_select_rssi_adjust rssi_adjust;
+
+		if (nla_put_s8(msg, NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI,
+			       req->relative_rssi))
+			return -ENOBUFS;
+
+		rssi_adjust.band = req->rssi_adjust.band;
+		rssi_adjust.delta = req->rssi_adjust.delta;
+		if (nla_put(msg, NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST,
+			    sizeof(rssi_adjust), &rssi_adjust))
+			return -ENOBUFS;
+	}
+
 	freqs = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES);
 	if (!freqs)
 		return -ENOBUFS;
@@ -9794,17 +10314,26 @@
 	if (err)
 		return err;
 
-	if (!tb[NL80211_REKEY_DATA_REPLAY_CTR] || !tb[NL80211_REKEY_DATA_KEK] ||
-	    !tb[NL80211_REKEY_DATA_KCK])
+	if (!tb[NL80211_REKEY_DATA_KEK] || !tb[NL80211_REKEY_DATA_REPLAY_CTR] ||
+	    (!wiphy_ext_feature_isset(&rdev->wiphy,
+				      NL80211_EXT_FEATURE_FILS_SK_OFFLOAD) &&
+	     !wiphy_ext_feature_isset(&rdev->wiphy,
+				      NL80211_EXT_FEATURE_FILS_STA) &&
+	     !tb[NL80211_REKEY_DATA_KCK]))
 		return -EINVAL;
+
 	if (nla_len(tb[NL80211_REKEY_DATA_REPLAY_CTR]) != NL80211_REPLAY_CTR_LEN)
 		return -ERANGE;
-	if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN)
+	if (nla_len(tb[NL80211_REKEY_DATA_KEK]) < NL80211_KEK_LEN)
 		return -ERANGE;
-	if (nla_len(tb[NL80211_REKEY_DATA_KCK]) != NL80211_KCK_LEN)
+	if (tb[NL80211_REKEY_DATA_KCK] &&
+	    nla_len(tb[NL80211_REKEY_DATA_KCK]) != NL80211_KCK_LEN)
 		return -ERANGE;
 
+	memset(&rekey_data, 0, sizeof(rekey_data));
 	rekey_data.kek = nla_data(tb[NL80211_REKEY_DATA_KEK]);
+	rekey_data.kek_len = nla_len(tb[NL80211_REKEY_DATA_KEK]);
+	if (tb[NL80211_REKEY_DATA_KCK])
 	rekey_data.kck = nla_data(tb[NL80211_REKEY_DATA_KCK]);
 	rekey_data.replay_ctr = nla_data(tb[NL80211_REKEY_DATA_REPLAY_CTR]);
 
@@ -10010,6 +10539,7 @@
 		return -EOPNOTSUPP;
 
 	if (!info->attrs[NL80211_ATTR_MDID] ||
+	    !info->attrs[NL80211_ATTR_IE] ||
 	    !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
 		return -EINVAL;
 
@@ -10175,7 +10705,7 @@
 		*wdev = NULL;
 
 		if (cb->args[1]) {
-			list_for_each_entry(tmp, &(*rdev)->wdev_list, list) {
+			list_for_each_entry(tmp, &wiphy->wdev_list, list) {
 				if (tmp->identifier == cb->args[1] - 1) {
 					*wdev = tmp;
 					break;
@@ -10949,6 +11479,14 @@
 				  NL80211_FLAG_NEED_RTNL,
 	},
 	{
+		.cmd = NL80211_CMD_ABORT_SCAN,
+		.doit = nl80211_abort_scan,
+		.policy = nl80211_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL80211_FLAG_NEED_WDEV_UP |
+				  NL80211_FLAG_NEED_RTNL,
+	},
+	{
 		.cmd = NL80211_CMD_GET_SCAN,
 		.policy = nl80211_policy,
 		.dumpit = nl80211_dump_scan,
@@ -11038,6 +11576,14 @@
 				  NL80211_FLAG_NEED_RTNL,
 	},
 	{
+		.cmd = NL80211_CMD_UPDATE_CONNECT_PARAMS,
+		.doit = nl80211_update_connect_params,
+		.policy = nl80211_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+				  NL80211_FLAG_NEED_RTNL,
+	},
+	{
 		.cmd = NL80211_CMD_DISCONNECT,
 		.doit = nl80211_disconnect,
 		.policy = nl80211_policy,
@@ -11716,7 +12262,7 @@
 	struct sk_buff *msg;
 	void *hdr;
 
-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+	msg = nlmsg_new(100 + len, gfp);
 	if (!msg)
 		return;
 
@@ -11860,15 +12406,16 @@
 }
 
 void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
-				 struct net_device *netdev, const u8 *bssid,
-				 const u8 *req_ie, size_t req_ie_len,
-				 const u8 *resp_ie, size_t resp_ie_len,
-				 u16 status, gfp_t gfp)
+				 struct net_device *netdev,
+				 struct cfg80211_connect_resp_params *cr,
+				 gfp_t gfp)
 {
 	struct sk_buff *msg;
 	void *hdr;
 
-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+	msg = nlmsg_new(100 + cr->req_ie_len + cr->resp_ie_len +
+			cr->fils_kek_len + cr->pmk_len +
+			(cr->pmkid ? WLAN_PMKID_LEN : 0), gfp);
 	if (!msg)
 		return;
 
@@ -11880,12 +12427,31 @@
 
 	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
 	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
-	    (bssid && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) ||
-	    nla_put_u16(msg, NL80211_ATTR_STATUS_CODE, status) ||
-	    (req_ie &&
-	     nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) ||
-	    (resp_ie &&
-	     nla_put(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie)))
+	    (cr->bssid &&
+	     nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, cr->bssid)) ||
+	    nla_put_u16(msg, NL80211_ATTR_STATUS_CODE,
+			cr->status < 0 ? WLAN_STATUS_UNSPECIFIED_FAILURE :
+			cr->status) ||
+	    (cr->status < 0 &&
+	     (nla_put_flag(msg, NL80211_ATTR_TIMED_OUT) ||
+	      nla_put_u32(msg, NL80211_ATTR_TIMEOUT_REASON,
+			  cr->timeout_reason))) ||
+	    (cr->req_ie &&
+	     nla_put(msg, NL80211_ATTR_REQ_IE, cr->req_ie_len, cr->req_ie)) ||
+	    (cr->resp_ie &&
+	     nla_put(msg, NL80211_ATTR_RESP_IE, cr->resp_ie_len,
+		     cr->resp_ie)) ||
+	    (cr->update_erp_next_seq_num &&
+	     nla_put_u16(msg, NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM,
+			 cr->fils_erp_next_seq_num)) ||
+	    (cr->status == WLAN_STATUS_SUCCESS &&
+	     ((cr->fils_kek &&
+	       nla_put(msg, NL80211_ATTR_FILS_KEK, cr->fils_kek_len,
+		       cr->fils_kek)) ||
+	      (cr->pmk &&
+	       nla_put(msg, NL80211_ATTR_PMK, cr->pmk_len, cr->pmk)) ||
+	      (cr->pmkid &&
+	       nla_put(msg, NL80211_ATTR_PMKID, WLAN_PMKID_LEN, cr->pmkid)))))
 		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
@@ -11908,7 +12474,7 @@
 	struct sk_buff *msg;
 	void *hdr;
 
-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+	msg = nlmsg_new(100 + req_ie_len + resp_ie_len, gfp);
 	if (!msg)
 		return;
 
@@ -11946,7 +12512,7 @@
 	struct sk_buff *msg;
 	void *hdr;
 
-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+	msg = nlmsg_new(100 + ie_len, GFP_KERNEL);
 	if (!msg)
 		return;
 
@@ -12023,7 +12589,7 @@
 
 	trace_cfg80211_notify_new_peer_candidate(dev, addr);
 
-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+	msg = nlmsg_new(100 + ie_len, gfp);
 	if (!msg)
 		return;
 
@@ -12392,7 +12958,7 @@
 	struct sk_buff *msg;
 	void *hdr;
 
-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+	msg = nlmsg_new(100 + len, gfp);
 	if (!msg)
 		return -ENOMEM;
 
@@ -12435,7 +13001,7 @@
 
 	trace_cfg80211_mgmt_tx_status(wdev, cookie, ack);
 
-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+	msg = nlmsg_new(100 + len, gfp);
 	if (!msg)
 		return;
 
@@ -13180,7 +13746,7 @@
 				schedule_work(&rdev->sched_scan_stop_wk);
 		}
 
-		list_for_each_entry_rcu(wdev, &rdev->wdev_list, list) {
+		list_for_each_entry_rcu(wdev, &rdev->wiphy.wdev_list, list) {
 			cfg80211_mlme_unregister_socket(wdev, notify->portid);
 
 			if (wdev->owner_nlportid == notify->portid)
@@ -13239,7 +13805,7 @@
 	if (!ft_event->target_ap)
 		return;
 
-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+	msg = nlmsg_new(100 + ft_event->ric_ies_len, GFP_KERNEL);
 	if (!msg)
 		return;
 
@@ -13338,6 +13904,16 @@
 	nlmsg_free(msg);
 }
 
+void cfg80211_ap_stopped(struct net_device *netdev, gfp_t gfp)
+{
+	struct wireless_dev *wdev = netdev->ieee80211_ptr;
+	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+
+	nl80211_send_mlme_event(rdev, netdev, NULL, 0,
+				NL80211_CMD_STOP_AP, gfp, -1);
+}
+EXPORT_SYMBOL(cfg80211_ap_stopped);
+
 /* initialisation/exit functions */
 
 int nl80211_init(void)
diff -ruw linux-4.4.115/net/wireless/nl80211.h linux-4.4.115-fbx/net/wireless/nl80211.h
--- linux-4.4.115/net/wireless/nl80211.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/wireless/nl80211.h	2019-01-22 16:16:29.247298422 +0100
@@ -52,10 +52,9 @@
 				struct net_device *netdev,
 				const u8 *addr, gfp_t gfp);
 void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
-				 struct net_device *netdev, const u8 *bssid,
-				 const u8 *req_ie, size_t req_ie_len,
-				 const u8 *resp_ie, size_t resp_ie_len,
-				 u16 status, gfp_t gfp);
+				 struct net_device *netdev,
+				 struct cfg80211_connect_resp_params *params,
+				 gfp_t gfp);
 void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
 			 struct net_device *netdev, const u8 *bssid,
 			 const u8 *req_ie, size_t req_ie_len,
diff -ruw linux-4.4.115/net/wireless/rdev-ops.h linux-4.4.115-fbx/net/wireless/rdev-ops.h
--- linux-4.4.115/net/wireless/rdev-ops.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/wireless/rdev-ops.h	2019-10-29 09:26:26.009226332 +0100
@@ -427,6 +427,14 @@
 	return ret;
 }
 
+static inline void rdev_abort_scan(struct cfg80211_registered_device *rdev,
+				   struct wireless_dev *wdev)
+{
+	trace_rdev_abort_scan(&rdev->wiphy, wdev);
+	rdev->ops->abort_scan(&rdev->wiphy, wdev);
+	trace_rdev_return_void(&rdev->wiphy);
+}
+
 static inline int rdev_auth(struct cfg80211_registered_device *rdev,
 			    struct net_device *dev,
 			    struct cfg80211_auth_request *req)
@@ -481,6 +489,18 @@
 	trace_rdev_return_int(&rdev->wiphy, ret);
 	return ret;
 }
+
+static inline int
+rdev_update_connect_params(struct cfg80211_registered_device *rdev,
+			   struct net_device *dev,
+			   struct cfg80211_connect_params *sme, u32 changed)
+{
+	int ret;
+	trace_rdev_update_connect_params(&rdev->wiphy, dev, sme, changed);
+	ret = rdev->ops->update_connect_params(&rdev->wiphy, dev, sme, changed);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
 
 static inline int rdev_disconnect(struct cfg80211_registered_device *rdev,
 				  struct net_device *dev, u16 reason_code)
diff -ruw linux-4.4.115/net/wireless/reg.c linux-4.4.115-fbx/net/wireless/reg.c
--- linux-4.4.115/net/wireless/reg.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/wireless/reg.c	2019-10-29 09:26:26.009226332 +0100
@@ -425,6 +425,11 @@
 	return true;
 }
 
+static bool is_cfg80211_regdom_intersected(void)
+{
+	return is_intersected_alpha2(get_cfg80211_regdom()->alpha2);
+}
+
 static const struct ieee80211_regdomain *
 reg_copy_regd(const struct ieee80211_regdomain *src_regd)
 {
@@ -1676,12 +1681,48 @@
 {
 	struct wireless_dev *wdev;
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+	struct net_device *dev;
+	struct cfg80211_sched_scan_request *sched_scan_req;
+	ASSERT_RTNL();
 
+	list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list)
+		if (!reg_wdev_chan_valid(wiphy, wdev)) {
+			dev = wdev->netdev;
+			switch (wdev->iftype) {
+			case NL80211_IFTYPE_ADHOC:
+				cfg80211_leave_ibss(rdev, dev, true);
+				break;
+			case NL80211_IFTYPE_P2P_CLIENT:
+			case NL80211_IFTYPE_STATION:
 	ASSERT_RTNL();
+				sched_scan_req = rtnl_dereference(rdev->sched_scan_req);
+				if (sched_scan_req && dev == sched_scan_req->dev)
+					__cfg80211_stop_sched_scan(rdev, false);
 
-	list_for_each_entry(wdev, &rdev->wdev_list, list)
-		if (!reg_wdev_chan_valid(wiphy, wdev))
-			cfg80211_leave(rdev, wdev);
+				wdev_lock(wdev);
+#ifdef CONFIG_CFG80211_WEXT
+				kfree(wdev->wext.ie);
+				wdev->wext.ie = NULL;
+				wdev->wext.ie_len = 0;
+				wdev->wext.connect.auth_type =
+							NL80211_AUTHTYPE_AUTOMATIC;
+#endif
+				cfg80211_disconnect(rdev, dev,
+						WLAN_REASON_DEAUTH_LEAVING, true);
+				cfg80211_mlme_down(rdev, dev);
+				wdev_unlock(wdev);
+				break;
+			case NL80211_IFTYPE_MESH_POINT:
+				cfg80211_leave_mesh(rdev, dev);
+				break;
+			case NL80211_IFTYPE_AP:
+				cfg80211_stop_ap(rdev, dev, false);
+				break;
+			default:
+				break;
+			}
+			wdev->beacon_interval = 0;
+		}
 }
 
 static void reg_check_chans_work(struct work_struct *work)
@@ -1719,11 +1760,13 @@
 	if (ignore_reg_update(wiphy, initiator)) {
 		/*
 		 * Regulatory updates set by CORE are ignored for custom
-		 * regulatory cards. Let us notify the changes to the driver,
+		 * regulatory cards and for self managed regulatory.
+		 * Let us notify the changes to the driver,
 		 * as some drivers used this to restore its orig_* reg domain.
 		 */
-		if (initiator == NL80211_REGDOM_SET_BY_CORE &&
-		    wiphy->regulatory_flags & REGULATORY_CUSTOM_REG)
+		if ((initiator == NL80211_REGDOM_SET_BY_CORE &&
+		     wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) ||
+		    (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED))
 			reg_call_notifier(wiphy, lr);
 		return;
 	}
@@ -1941,9 +1984,14 @@
 	 */
 	if ((lr->initiator == NL80211_REGDOM_SET_BY_CORE ||
 	     lr->initiator == NL80211_REGDOM_SET_BY_DRIVER ||
-	     lr->initiator == NL80211_REGDOM_SET_BY_USER) &&
-	    regdom_changes(lr->alpha2))
+	     lr->initiator == NL80211_REGDOM_SET_BY_USER)) {
+		if (lr->intersect) {
+			if (!is_cfg80211_regdom_intersected())
+				return REG_REQ_IGNORE;
+		} else if (regdom_changes(lr->alpha2)) {
 		return REG_REQ_IGNORE;
+		}
+	}
 
 	if (!regdom_changes(user_request->alpha2))
 		return REG_REQ_ALREADY_SET;
@@ -2201,7 +2249,7 @@
 	reg_free_request(reg_request);
 }
 
-static bool reg_only_self_managed_wiphys(void)
+static bool reg_only_self_managed_wiphys(struct regulatory_request *reg_request)
 {
 	struct cfg80211_registered_device *rdev;
 	struct wiphy *wiphy;
@@ -2211,11 +2259,13 @@
 
 	list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
 		wiphy = &rdev->wiphy;
-		if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED)
+		if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) {
 			self_managed_found = true;
-		else
+			reg_call_notifier(wiphy, reg_request);
+		} else {
 			return false;
 	}
+	}
 
 	/* make sure at least one self-managed wiphy exists */
 	return self_managed_found;
@@ -2252,7 +2302,7 @@
 
 	spin_unlock(&reg_requests_lock);
 
-	if (reg_only_self_managed_wiphys()) {
+	if (reg_only_self_managed_wiphys(reg_request)) {
 		reg_free_request(reg_request);
 		return;
 	}
@@ -2399,6 +2449,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(regulatory_hint_user);
 
 int regulatory_hint_indoor(bool is_indoor, u32 portid)
 {
@@ -3200,6 +3251,34 @@
 	return reg_is_indoor;
 }
 
+bool regulatory_pre_cac_allowed(struct wiphy *wiphy)
+{
+	const struct ieee80211_regdomain *regd = NULL;
+	const struct ieee80211_regdomain *wiphy_regd = NULL;
+	bool pre_cac_allowed = false;
+
+	rcu_read_lock();
+
+	regd = rcu_dereference(cfg80211_regdomain);
+	wiphy_regd = rcu_dereference(wiphy->regd);
+	if (!wiphy_regd) {
+		if (regd->dfs_region == NL80211_DFS_ETSI)
+			pre_cac_allowed = true;
+
+		rcu_read_unlock();
+
+		return pre_cac_allowed;
+	}
+
+	if (regd->dfs_region == wiphy_regd->dfs_region &&
+	    wiphy_regd->dfs_region == NL80211_DFS_ETSI)
+		pre_cac_allowed = true;
+
+	rcu_read_unlock();
+
+	return pre_cac_allowed;
+}
+
 int __init regulatory_init(void)
 {
 	int err = 0;
diff -ruw linux-4.4.115/net/wireless/reg.h linux-4.4.115-fbx/net/wireless/reg.h
--- linux-4.4.115/net/wireless/reg.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/wireless/reg.h	2019-01-22 16:16:29.247298422 +0100
@@ -143,4 +143,18 @@
  */
 bool regulatory_indoor_allowed(void);
 
+/*
+ * Grace period to timeout pre-CAC results on the dfs channels. This timeout
+ * value is used for Non-ETSI domain.
+ * TODO: May be make this timeout available through regdb?
+ */
+#define REG_PRE_CAC_EXPIRY_GRACE_MS 2000
+
+/**
+ * regulatory_pre_cac_allowed - if pre-CAC allowed in the current dfs domain
+ * @wiphy: wiphy for which pre-CAC capability is checked.
+
+ * Pre-CAC is allowed only in ETSI domain.
+ */
+bool regulatory_pre_cac_allowed(struct wiphy *wiphy);
 #endif  /* __NET_WIRELESS_REG_H */
diff -ruw linux-4.4.115/net/wireless/scan.c linux-4.4.115-fbx/net/wireless/scan.c
--- linux-4.4.115/net/wireless/scan.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/wireless/scan.c	2019-01-22 16:16:29.247298422 +0100
@@ -1362,6 +1362,8 @@
 		if (wiphy->bands[i])
 			creq->rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1;
 
+	eth_broadcast_addr(creq->bssid);
+
 	rdev->scan_req = creq;
 	err = rdev_scan(rdev, creq);
 	if (err) {
diff -ruw linux-4.4.115/net/wireless/sme.c linux-4.4.115-fbx/net/wireless/sme.c
--- linux-4.4.115/net/wireless/sme.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/wireless/sme.c	2019-10-29 09:26:26.009226332 +0100
@@ -34,10 +34,11 @@
 		CFG80211_CONN_SCAN_AGAIN,
 		CFG80211_CONN_AUTHENTICATE_NEXT,
 		CFG80211_CONN_AUTHENTICATING,
-		CFG80211_CONN_AUTH_FAILED,
+		CFG80211_CONN_AUTH_FAILED_TIMEOUT,
 		CFG80211_CONN_ASSOCIATE_NEXT,
 		CFG80211_CONN_ASSOCIATING,
 		CFG80211_CONN_ASSOC_FAILED,
+		CFG80211_CONN_ASSOC_FAILED_TIMEOUT,
 		CFG80211_CONN_DEAUTH,
 		CFG80211_CONN_ABANDON,
 		CFG80211_CONN_CONNECTED,
@@ -48,6 +49,29 @@
 	bool auto_auth, prev_bssid_valid;
 };
 
+static bool cfg80211_is_all_countryie_ignore(void)
+{
+	struct cfg80211_registered_device *rdev;
+	struct wireless_dev *wdev;
+	bool is_all_countryie_ignore = true;
+
+	list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+		list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
+			wdev_lock(wdev);
+			if (!(wdev->wiphy->regulatory_flags &
+				REGULATORY_COUNTRY_IE_IGNORE)) {
+				is_all_countryie_ignore = false;
+				wdev_unlock(wdev);
+				goto out;
+			}
+			wdev_unlock(wdev);
+		}
+	}
+
+out:
+	return is_all_countryie_ignore;
+}
+
 static void cfg80211_sme_free(struct wireless_dev *wdev)
 {
 	if (!wdev->conn)
@@ -120,6 +144,8 @@
 		wdev->conn->params.ssid_len);
 	request->ssids[0].ssid_len = wdev->conn->params.ssid_len;
 
+	eth_broadcast_addr(request->bssid);
+
 	request->wdev = wdev;
 	request->wiphy = &rdev->wiphy;
 	request->scan_start = jiffies;
@@ -138,7 +164,8 @@
 	return err;
 }
 
-static int cfg80211_conn_do_work(struct wireless_dev *wdev)
+static int cfg80211_conn_do_work(struct wireless_dev *wdev,
+				 enum nl80211_timeout_reason *treason)
 {
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 	struct cfg80211_connect_params *params;
@@ -169,7 +196,8 @@
 					  NULL, 0,
 					  params->key, params->key_len,
 					  params->key_idx, NULL, 0);
-	case CFG80211_CONN_AUTH_FAILED:
+	case CFG80211_CONN_AUTH_FAILED_TIMEOUT:
+		*treason = NL80211_TIMEOUT_AUTH;
 		return -ENOTCONN;
 	case CFG80211_CONN_ASSOCIATE_NEXT:
 		if (WARN_ON(!rdev->ops->assoc))
@@ -196,6 +224,9 @@
 					     WLAN_REASON_DEAUTH_LEAVING,
 					     false);
 		return err;
+	case CFG80211_CONN_ASSOC_FAILED_TIMEOUT:
+		*treason = NL80211_TIMEOUT_ASSOC;
+		/* fall through */
 	case CFG80211_CONN_ASSOC_FAILED:
 		cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
 				     NULL, 0,
@@ -221,10 +252,11 @@
 		container_of(work, struct cfg80211_registered_device, conn_work);
 	struct wireless_dev *wdev;
 	u8 bssid_buf[ETH_ALEN], *bssid = NULL;
+	enum nl80211_timeout_reason treason;
 
 	rtnl_lock();
 
-	list_for_each_entry(wdev, &rdev->wdev_list, list) {
+	list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
 		if (!wdev->netdev)
 			continue;
 
@@ -242,12 +274,15 @@
 			memcpy(bssid_buf, wdev->conn->params.bssid, ETH_ALEN);
 			bssid = bssid_buf;
 		}
-		if (cfg80211_conn_do_work(wdev)) {
-			__cfg80211_connect_result(
-					wdev->netdev, bssid,
-					NULL, 0, NULL, 0,
-					WLAN_STATUS_UNSPECIFIED_FAILURE,
-					false, NULL);
+		treason = NL80211_TIMEOUT_UNSPECIFIED;
+		if (cfg80211_conn_do_work(wdev, &treason)) {
+			struct cfg80211_connect_resp_params cr;
+
+			memset(&cr, 0, sizeof(cr));
+			cr.status = -1;
+			cr.bssid = bssid;
+			cr.timeout_reason = treason;
+			__cfg80211_connect_result(wdev->netdev, &cr, false);
 		}
 		wdev_unlock(wdev);
 	}
@@ -267,7 +302,7 @@
 			       wdev->conn->params.bssid,
 			       wdev->conn->params.ssid,
 			       wdev->conn->params.ssid_len,
-			       IEEE80211_BSS_TYPE_ESS,
+			       wdev->conn_bss_type,
 			       IEEE80211_PRIVACY(wdev->conn->params.privacy));
 	if (!bss)
 		return NULL;
@@ -350,9 +385,13 @@
 		wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT;
 		schedule_work(&rdev->conn_work);
 	} else if (status_code != WLAN_STATUS_SUCCESS) {
-		__cfg80211_connect_result(wdev->netdev, mgmt->bssid,
-					  NULL, 0, NULL, 0,
-					  status_code, false, NULL);
+		struct cfg80211_connect_resp_params cr;
+
+		memset(&cr, 0, sizeof(cr));
+		cr.status = status_code;
+		cr.bssid = mgmt->bssid;
+		cr.timeout_reason = NL80211_TIMEOUT_UNSPECIFIED;
+		__cfg80211_connect_result(wdev->netdev, &cr, false);
 	} else if (wdev->conn->state == CFG80211_CONN_AUTHENTICATING) {
 		wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT;
 		schedule_work(&rdev->conn_work);
@@ -400,7 +439,7 @@
 	if (!wdev->conn)
 		return;
 
-	wdev->conn->state = CFG80211_CONN_AUTH_FAILED;
+	wdev->conn->state = CFG80211_CONN_AUTH_FAILED_TIMEOUT;
 	schedule_work(&rdev->conn_work);
 }
 
@@ -422,7 +461,7 @@
 	if (!wdev->conn)
 		return;
 
-	wdev->conn->state = CFG80211_CONN_ASSOC_FAILED;
+	wdev->conn->state = CFG80211_CONN_ASSOC_FAILED_TIMEOUT;
 	schedule_work(&rdev->conn_work);
 }
 
@@ -554,7 +593,9 @@
 
 	/* we're good if we have a matching bss struct */
 	if (bss) {
-		err = cfg80211_conn_do_work(wdev);
+		enum nl80211_timeout_reason treason;
+
+		err = cfg80211_conn_do_work(wdev, &treason);
 		cfg80211_put_bss(wdev->wiphy, bss);
 	} else {
 		/* otherwise we'll need to scan for the AP first */
@@ -619,7 +660,7 @@
 	 * count as new regulatory hints.
 	 */
 	list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
-		list_for_each_entry(wdev, &rdev->wdev_list, list) {
+		list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
 			wdev_lock(wdev);
 			if (wdev->conn || wdev->current_bss)
 				is_all_idle = false;
@@ -633,7 +674,8 @@
 static void disconnect_work(struct work_struct *work)
 {
 	rtnl_lock();
-	if (cfg80211_is_all_idle())
+	if (cfg80211_is_all_idle() &&
+	    !cfg80211_is_all_countryie_ignore())
 		regulatory_hint_disconnect();
 	rtnl_unlock();
 }
@@ -647,11 +689,9 @@
  */
 
 /* This method must consume bss one way or another */
-void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
-			       const u8 *req_ie, size_t req_ie_len,
-			       const u8 *resp_ie, size_t resp_ie_len,
-			       u16 status, bool wextev,
-			       struct cfg80211_bss *bss)
+void __cfg80211_connect_result(struct net_device *dev,
+			       struct cfg80211_connect_resp_params *cr,
+			       bool wextev)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	const u8 *country_ie;
@@ -663,48 +703,48 @@
 
 	if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION &&
 		    wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) {
-		cfg80211_put_bss(wdev->wiphy, bss);
+		cfg80211_put_bss(wdev->wiphy, cr->bss);
 		return;
 	}
 
-	nl80211_send_connect_result(wiphy_to_rdev(wdev->wiphy), dev,
-				    bssid, req_ie, req_ie_len,
-				    resp_ie, resp_ie_len,
-				    status, GFP_KERNEL);
+	nl80211_send_connect_result(wiphy_to_rdev(wdev->wiphy), dev, cr,
+				    GFP_KERNEL);
 
 #ifdef CONFIG_CFG80211_WEXT
 	if (wextev) {
-		if (req_ie && status == WLAN_STATUS_SUCCESS) {
+		if (cr->req_ie && cr->status == WLAN_STATUS_SUCCESS) {
 			memset(&wrqu, 0, sizeof(wrqu));
-			wrqu.data.length = req_ie_len;
-			wireless_send_event(dev, IWEVASSOCREQIE, &wrqu, req_ie);
+			wrqu.data.length = cr->req_ie_len;
+			wireless_send_event(dev, IWEVASSOCREQIE, &wrqu,
+					    cr->req_ie);
 		}
 
-		if (resp_ie && status == WLAN_STATUS_SUCCESS) {
+		if (cr->resp_ie && cr->status == WLAN_STATUS_SUCCESS) {
 			memset(&wrqu, 0, sizeof(wrqu));
-			wrqu.data.length = resp_ie_len;
-			wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu, resp_ie);
+			wrqu.data.length = cr->resp_ie_len;
+			wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu,
+					    cr->resp_ie);
 		}
 
 		memset(&wrqu, 0, sizeof(wrqu));
 		wrqu.ap_addr.sa_family = ARPHRD_ETHER;
-		if (bssid && status == WLAN_STATUS_SUCCESS) {
-			memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN);
-			memcpy(wdev->wext.prev_bssid, bssid, ETH_ALEN);
+		if (cr->bssid && cr->status == WLAN_STATUS_SUCCESS) {
+			memcpy(wrqu.ap_addr.sa_data, cr->bssid, ETH_ALEN);
+			memcpy(wdev->wext.prev_bssid, cr->bssid, ETH_ALEN);
 			wdev->wext.prev_bssid_valid = true;
 		}
 		wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
 	}
 #endif
 
-	if (!bss && (status == WLAN_STATUS_SUCCESS)) {
+	if (!cr->bss && (cr->status == WLAN_STATUS_SUCCESS)) {
 		WARN_ON_ONCE(!wiphy_to_rdev(wdev->wiphy)->ops->connect);
-		bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
+		cr->bss = cfg80211_get_bss(wdev->wiphy, NULL, cr->bssid,
 				       wdev->ssid, wdev->ssid_len,
-				       IEEE80211_BSS_TYPE_ESS,
+					   wdev->conn_bss_type,
 				       IEEE80211_PRIVACY_ANY);
-		if (bss)
-			cfg80211_hold_bss(bss_from_pub(bss));
+		if (cr->bss)
+			cfg80211_hold_bss(bss_from_pub(cr->bss));
 	}
 
 	if (wdev->current_bss) {
@@ -713,27 +753,27 @@
 		wdev->current_bss = NULL;
 	}
 
-	if (status != WLAN_STATUS_SUCCESS) {
+	if (cr->status != WLAN_STATUS_SUCCESS) {
 		kzfree(wdev->connect_keys);
 		wdev->connect_keys = NULL;
 		wdev->ssid_len = 0;
-		if (bss) {
-			cfg80211_unhold_bss(bss_from_pub(bss));
-			cfg80211_put_bss(wdev->wiphy, bss);
+		if (cr->bss) {
+			cfg80211_unhold_bss(bss_from_pub(cr->bss));
+			cfg80211_put_bss(wdev->wiphy, cr->bss);
 		}
 		cfg80211_sme_free(wdev);
 		return;
 	}
 
-	if (WARN_ON(!bss))
+	if (WARN_ON(!cr->bss))
 		return;
 
-	wdev->current_bss = bss_from_pub(bss);
+	wdev->current_bss = bss_from_pub(cr->bss);
 
 	cfg80211_upload_connect_keys(wdev);
 
 	rcu_read_lock();
-	country_ie = ieee80211_bss_get_ie(bss, WLAN_EID_COUNTRY);
+	country_ie = ieee80211_bss_get_ie(cr->bss, WLAN_EID_COUNTRY);
 	if (!country_ie) {
 		rcu_read_unlock();
 		return;
@@ -750,46 +790,95 @@
 	 * - country_ie + 2, the start of the country ie data, and
 	 * - and country_ie[1] which is the IE length
 	 */
-	regulatory_hint_country_ie(wdev->wiphy, bss->channel->band,
+	regulatory_hint_country_ie(wdev->wiphy, cr->bss->channel->band,
 				   country_ie + 2, country_ie[1]);
 	kfree(country_ie);
 }
 
-void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
-			     const u8 *req_ie, size_t req_ie_len,
-			     const u8 *resp_ie, size_t resp_ie_len,
-			     u16 status, gfp_t gfp)
+/* Consumes bss object one way or another */
+void cfg80211_connect_done(struct net_device *dev,
+			   struct cfg80211_connect_resp_params *params,
+			   gfp_t gfp)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 	struct cfg80211_event *ev;
 	unsigned long flags;
+	u8 *next;
 
-	ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp);
-	if (!ev)
+	if (params->bss) {
+		/* Make sure the bss entry provided by the driver is valid. */
+		struct cfg80211_internal_bss *ibss = bss_from_pub(params->bss);
+
+		if (WARN_ON(list_empty(&ibss->list))) {
+			cfg80211_put_bss(wdev->wiphy, params->bss);
 		return;
+		}
+	}
 
-	ev->type = EVENT_CONNECT_RESULT;
-	if (bssid)
-		memcpy(ev->cr.bssid, bssid, ETH_ALEN);
-	if (req_ie_len) {
-		ev->cr.req_ie = ((u8 *)ev) + sizeof(*ev);
-		ev->cr.req_ie_len = req_ie_len;
-		memcpy((void *)ev->cr.req_ie, req_ie, req_ie_len);
-	}
-	if (resp_ie_len) {
-		ev->cr.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len;
-		ev->cr.resp_ie_len = resp_ie_len;
-		memcpy((void *)ev->cr.resp_ie, resp_ie, resp_ie_len);
+	ev = kzalloc(sizeof(*ev) + (params->bssid ? ETH_ALEN : 0) +
+		     params->req_ie_len + params->resp_ie_len +
+		     params->fils_kek_len + params->pmk_len +
+		     (params->pmkid ? WLAN_PMKID_LEN : 0), gfp);
+	if (!ev) {
+		cfg80211_put_bss(wdev->wiphy, params->bss);
+		return;
 	}
-	ev->cr.status = status;
+
+	ev->type = EVENT_CONNECT_RESULT;
+	next = ((u8 *)ev) + sizeof(*ev);
+	if (params->bssid) {
+		ev->cr.bssid = next;
+		memcpy((void *)ev->cr.bssid, params->bssid, ETH_ALEN);
+		next += ETH_ALEN;
+	}
+	if (params->req_ie_len) {
+		ev->cr.req_ie = next;
+		ev->cr.req_ie_len = params->req_ie_len;
+		memcpy((void *)ev->cr.req_ie, params->req_ie,
+		       params->req_ie_len);
+		next += params->req_ie_len;
+	}
+	if (params->resp_ie_len) {
+		ev->cr.resp_ie = next;
+		ev->cr.resp_ie_len = params->resp_ie_len;
+		memcpy((void *)ev->cr.resp_ie, params->resp_ie,
+		       params->resp_ie_len);
+		next += params->resp_ie_len;
+	}
+	if (params->fils_kek_len) {
+		ev->cr.fils_kek = next;
+		ev->cr.fils_kek_len = params->fils_kek_len;
+		memcpy((void *)ev->cr.fils_kek, params->fils_kek,
+		       params->fils_kek_len);
+		next += params->fils_kek_len;
+	}
+	if (params->pmk_len) {
+		ev->cr.pmk = next;
+		ev->cr.pmk_len = params->pmk_len;
+		memcpy((void *)ev->cr.pmk, params->pmk, params->pmk_len);
+		next += params->pmk_len;
+	}
+	if (params->pmkid) {
+		ev->cr.pmkid = next;
+		memcpy((void *)ev->cr.pmkid, params->pmkid, WLAN_PMKID_LEN);
+		next += WLAN_PMKID_LEN;
+	}
+	ev->cr.update_erp_next_seq_num = params->update_erp_next_seq_num;
+	if (params->update_erp_next_seq_num)
+		ev->cr.fils_erp_next_seq_num = params->fils_erp_next_seq_num;
+	if (params->bss)
+		cfg80211_hold_bss(bss_from_pub(params->bss));
+	ev->cr.bss = params->bss;
+	ev->cr.status = params->status;
+	ev->cr.timeout_reason = params->timeout_reason;
 
 	spin_lock_irqsave(&wdev->event_lock, flags);
 	list_add_tail(&ev->list, &wdev->event_list);
 	spin_unlock_irqrestore(&wdev->event_lock, flags);
 	queue_work(cfg80211_wq, &rdev->event_work);
 }
-EXPORT_SYMBOL(cfg80211_connect_result);
+EXPORT_SYMBOL(cfg80211_connect_done);
 
 /* Consumes bss object one way or another */
 void __cfg80211_roamed(struct wireless_dev *wdev,
@@ -860,7 +949,7 @@
 
 	bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, wdev->ssid,
 			       wdev->ssid_len,
-			       IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY);
+			       wdev->conn_bss_type, IEEE80211_PRIVACY_ANY);
 	if (WARN_ON(!bss))
 		return;
 
@@ -1031,6 +1120,9 @@
 	memcpy(wdev->ssid, connect->ssid, connect->ssid_len);
 	wdev->ssid_len = connect->ssid_len;
 
+	wdev->conn_bss_type = connect->pbss ? IEEE80211_BSS_TYPE_PBSS :
+					      IEEE80211_BSS_TYPE_ESS;
+
 	if (!rdev->ops->connect)
 		err = cfg80211_sme_connect(wdev, connect, prev_bssid);
 	else
diff -ruw linux-4.4.115/net/wireless/sysfs.c linux-4.4.115-fbx/net/wireless/sysfs.c
--- linux-4.4.115/net/wireless/sysfs.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/wireless/sysfs.c	2019-01-22 16:16:29.247298422 +0100
@@ -87,14 +87,6 @@
 }
 
 #ifdef CONFIG_PM_SLEEP
-static void cfg80211_leave_all(struct cfg80211_registered_device *rdev)
-{
-	struct wireless_dev *wdev;
-
-	list_for_each_entry(wdev, &rdev->wdev_list, list)
-		cfg80211_leave(rdev, wdev);
-}
-
 static int wiphy_suspend(struct device *dev)
 {
 	struct cfg80211_registered_device *rdev = dev_to_rdev(dev);
@@ -103,17 +95,9 @@
 	rdev->suspend_at = get_seconds();
 
 	rtnl_lock();
-	if (rdev->wiphy.registered) {
-		if (!rdev->wiphy.wowlan_config)
-			cfg80211_leave_all(rdev);
+	if (rdev->wiphy.registered)
 		if (rdev->ops->suspend)
 			ret = rdev_suspend(rdev, rdev->wiphy.wowlan_config);
-		if (ret == 1) {
-			/* Driver refuse to configure wowlan */
-			cfg80211_leave_all(rdev);
-			ret = rdev_suspend(rdev, NULL);
-		}
-	}
 	rtnl_unlock();
 
 	return ret;
diff -ruw linux-4.4.115/net/wireless/trace.h linux-4.4.115-fbx/net/wireless/trace.h
--- linux-4.4.115/net/wireless/trace.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/wireless/trace.h	2019-10-29 09:26:26.013226371 +0100
@@ -1221,6 +1221,7 @@
 		__field(bool, privacy)
 		__field(u32, wpa_versions)
 		__field(u32, flags)
+		MAC_ENTRY(prev_bssid)
 	),
 	TP_fast_assign(
 		WIPHY_ASSIGN;
@@ -1232,13 +1233,32 @@
 		__entry->privacy = sme->privacy;
 		__entry->wpa_versions = sme->crypto.wpa_versions;
 		__entry->flags = sme->flags;
+		MAC_ASSIGN(prev_bssid, sme->prev_bssid);
 	),
 	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT
 		  ", ssid: %s, auth type: %d, privacy: %s, wpa versions: %u, "
-		  "flags: %u",
+		  "flags: %u, previous bssid: " MAC_PR_FMT,
 		  WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid), __entry->ssid,
 		  __entry->auth_type, BOOL_TO_STR(__entry->privacy),
-		  __entry->wpa_versions, __entry->flags)
+		  __entry->wpa_versions, __entry->flags, MAC_PR_ARG(prev_bssid))
+);
+
+TRACE_EVENT(rdev_update_connect_params,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		 struct cfg80211_connect_params *sme, u32 changed),
+	TP_ARGS(wiphy, netdev, sme, changed),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		__field(u32, changed)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		__entry->changed = changed;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", parameters changed: %u",
+		  WIPHY_PR_ARG, NETDEV_PR_ARG,  __entry->changed)
 );
 
 TRACE_EVENT(rdev_set_cqm_rssi_config,
@@ -2803,6 +2823,11 @@
 		  WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(target_ap))
 );
 
+DEFINE_EVENT(wiphy_wdev_evt, rdev_abort_scan,
+	TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
+	TP_ARGS(wiphy, wdev)
+);
+
 TRACE_EVENT(cfg80211_stop_iface,
 	TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
 	TP_ARGS(wiphy, wdev),
diff -ruw linux-4.4.115/net/wireless/util.c linux-4.4.115-fbx/net/wireless/util.c
--- linux-4.4.115/net/wireless/util.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/wireless/util.c	2019-10-29 09:26:26.013226371 +0100
@@ -13,6 +13,7 @@
 #include <net/dsfield.h>
 #include <linux/if_vlan.h>
 #include <linux/mpls.h>
+#include <linux/gcd.h>
 #include "core.h"
 #include "rdev-ops.h"
 
@@ -857,7 +858,6 @@
 {
 	struct cfg80211_event *ev;
 	unsigned long flags;
-	const u8 *bssid = NULL;
 
 	spin_lock_irqsave(&wdev->event_lock, flags);
 	while (!list_empty(&wdev->event_list)) {
@@ -869,15 +869,10 @@
 		wdev_lock(wdev);
 		switch (ev->type) {
 		case EVENT_CONNECT_RESULT:
-			if (!is_zero_ether_addr(ev->cr.bssid))
-				bssid = ev->cr.bssid;
 			__cfg80211_connect_result(
-				wdev->netdev, bssid,
-				ev->cr.req_ie, ev->cr.req_ie_len,
-				ev->cr.resp_ie, ev->cr.resp_ie_len,
-				ev->cr.status,
-				ev->cr.status == WLAN_STATUS_SUCCESS,
-				NULL);
+				wdev->netdev,
+				&ev->cr,
+				ev->cr.status == WLAN_STATUS_SUCCESS);
 			break;
 		case EVENT_ROAMED:
 			__cfg80211_roamed(wdev, ev->rm.bss, ev->rm.req_ie,
@@ -913,7 +908,7 @@
 
 	ASSERT_RTNL();
 
-	list_for_each_entry(wdev, &rdev->wdev_list, list)
+	list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list)
 		cfg80211_process_wdev_events(wdev);
 }
 
@@ -1082,7 +1077,7 @@
 		   58500000,
 		   65000000,
 		   78000000,
-		   0,
+		   86500000,
 		},
 		{  13500000,
 		   27000000,
@@ -1485,31 +1480,57 @@
 }
 EXPORT_SYMBOL(ieee80211_chandef_to_operating_class);
 
-int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
-				 u32 beacon_int)
+static void cfg80211_calculate_bi_data(struct wiphy *wiphy, u32 new_beacon_int,
+				       u32 *beacon_int_gcd,
+				       bool *beacon_int_different)
 {
 	struct wireless_dev *wdev;
-	int res = 0;
 
-	if (!beacon_int)
-		return -EINVAL;
+	*beacon_int_gcd = 0;
+	*beacon_int_different = false;
 
-	list_for_each_entry(wdev, &rdev->wdev_list, list) {
+	list_for_each_entry(wdev, &wiphy->wdev_list, list) {
 		if (!wdev->beacon_interval)
 			continue;
-		if (wdev->beacon_interval != beacon_int) {
-			res = -EINVAL;
-			break;
+
+		if (!*beacon_int_gcd) {
+			*beacon_int_gcd = wdev->beacon_interval;
+			continue;
+		}
+
+		if (wdev->beacon_interval == *beacon_int_gcd)
+			continue;
+
+		*beacon_int_different = true;
+		*beacon_int_gcd = gcd(*beacon_int_gcd, wdev->beacon_interval);
+	}
+
+	if (new_beacon_int && *beacon_int_gcd != new_beacon_int) {
+		if (*beacon_int_gcd)
+			*beacon_int_different = true;
+		*beacon_int_gcd = gcd(*beacon_int_gcd, new_beacon_int);
 		}
 	}
 
-	return res;
+int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
+				 enum nl80211_iftype iftype, u32 beacon_int)
+{
+	/*
+	 * This is just a basic pre-condition check; if interface combinations
+	 * are possible the driver must already be checking those with a call
+	 * to cfg80211_check_combinations(), in which case we'll validate more
+	 * through the cfg80211_calculate_bi_data() call and code in
+	 * cfg80211_iter_combinations().
+	 */
+
+	if (beacon_int < 10 || beacon_int > 10000)
+		return -EINVAL;
+
+	return 0;
 }
 
 int cfg80211_iter_combinations(struct wiphy *wiphy,
-			       const int num_different_channels,
-			       const u8 radar_detect,
-			       const int iftype_num[NUM_NL80211_IFTYPES],
+			       struct iface_combination_params *params,
 			       void (*iter)(const struct ieee80211_iface_combination *c,
 					    void *data),
 			       void *data)
@@ -1519,8 +1540,23 @@
 	int i, j, iftype;
 	int num_interfaces = 0;
 	u32 used_iftypes = 0;
+	u32 beacon_int_gcd;
+	bool beacon_int_different;
+
+	/*
+	 * This is a bit strange, since the iteration used to rely only on
+	 * the data given by the driver, but here it now relies on context,
+	 * in form of the currently operating interfaces.
+	 * This is OK for all current users, and saves us from having to
+	 * push the GCD calculations into all the drivers.
+	 * In the future, this should probably rely more on data that's in
+	 * cfg80211 already - the only thing not would appear to be any new
+	 * interfaces (while being brought up) and channel/radar data.
+	 */
+	cfg80211_calculate_bi_data(wiphy, params->new_beacon_int,
+				   &beacon_int_gcd, &beacon_int_different);
 
-	if (radar_detect) {
+	if (params->radar_detect) {
 		rcu_read_lock();
 		regdom = rcu_dereference(cfg80211_regdomain);
 		if (regdom)
@@ -1529,8 +1565,8 @@
 	}
 
 	for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
-		num_interfaces += iftype_num[iftype];
-		if (iftype_num[iftype] > 0 &&
+		num_interfaces += params->iftype_num[iftype];
+		if (params->iftype_num[iftype] > 0 &&
 		    !(wiphy->software_iftypes & BIT(iftype)))
 			used_iftypes |= BIT(iftype);
 	}
@@ -1544,7 +1580,7 @@
 
 		if (num_interfaces > c->max_interfaces)
 			continue;
-		if (num_different_channels > c->num_different_channels)
+		if (params->num_different_channels > c->num_different_channels)
 			continue;
 
 		limits = kmemdup(c->limits, sizeof(limits[0]) * c->n_limits,
@@ -1559,16 +1595,17 @@
 				all_iftypes |= limits[j].types;
 				if (!(limits[j].types & BIT(iftype)))
 					continue;
-				if (limits[j].max < iftype_num[iftype])
+				if (limits[j].max < params->iftype_num[iftype])
 					goto cont;
-				limits[j].max -= iftype_num[iftype];
+				limits[j].max -= params->iftype_num[iftype];
 			}
 		}
 
-		if (radar_detect != (c->radar_detect_widths & radar_detect))
+		if (params->radar_detect !=
+			(c->radar_detect_widths & params->radar_detect))
 			goto cont;
 
-		if (radar_detect && c->radar_detect_regions &&
+		if (params->radar_detect && c->radar_detect_regions &&
 		    !(c->radar_detect_regions & BIT(region)))
 			goto cont;
 
@@ -1580,6 +1617,14 @@
 		if ((all_iftypes & used_iftypes) != used_iftypes)
 			goto cont;
 
+		if (beacon_int_gcd) {
+			if (c->beacon_int_min_gcd &&
+			    beacon_int_gcd < c->beacon_int_min_gcd)
+				goto cont;
+			if (!c->beacon_int_min_gcd && beacon_int_different)
+				goto cont;
+		}
+
 		/* This combination covered all interface types and
 		 * supported the requested numbers, so we're good.
 		 */
@@ -1602,14 +1647,11 @@
 }
 
 int cfg80211_check_combinations(struct wiphy *wiphy,
-				const int num_different_channels,
-				const u8 radar_detect,
-				const int iftype_num[NUM_NL80211_IFTYPES])
+				struct iface_combination_params *params)
 {
 	int err, num = 0;
 
-	err = cfg80211_iter_combinations(wiphy, num_different_channels,
-					 radar_detect, iftype_num,
+	err = cfg80211_iter_combinations(wiphy, params,
 					 cfg80211_iter_sum_ifcombs, &num);
 	if (err)
 		return err;
@@ -1628,14 +1670,15 @@
 				 u8 radar_detect)
 {
 	struct wireless_dev *wdev_iter;
-	int num[NUM_NL80211_IFTYPES];
 	struct ieee80211_channel
 			*used_channels[CFG80211_MAX_NUM_DIFFERENT_CHANNELS];
 	struct ieee80211_channel *ch;
 	enum cfg80211_chan_mode chmode;
-	int num_different_channels = 0;
 	int total = 1;
 	int i;
+	struct iface_combination_params params = {
+		.radar_detect = radar_detect,
+	};
 
 	ASSERT_RTNL();
 
@@ -1652,10 +1695,9 @@
 		return 0;
 	}
 
-	memset(num, 0, sizeof(num));
 	memset(used_channels, 0, sizeof(used_channels));
 
-	num[iftype] = 1;
+	params.iftype_num[iftype] = 1;
 
 	/* TODO: We'll probably not need this anymore, since this
 	 * should only be called with CHAN_MODE_UNDEFINED. There are
@@ -1668,14 +1710,14 @@
 	case CHAN_MODE_SHARED:
 		WARN_ON(!chan);
 		used_channels[0] = chan;
-		num_different_channels++;
+		params.num_different_channels++;
 		break;
 	case CHAN_MODE_EXCLUSIVE:
-		num_different_channels++;
+		params.num_different_channels++;
 		break;
 	}
 
-	list_for_each_entry(wdev_iter, &rdev->wdev_list, list) {
+	list_for_each_entry(wdev_iter, &rdev->wiphy.wdev_list, list) {
 		if (wdev_iter == wdev)
 			continue;
 		if (wdev_iter->iftype == NL80211_IFTYPE_P2P_DEVICE) {
@@ -1699,7 +1741,8 @@
 		 */
 		mutex_lock_nested(&wdev_iter->mtx, 1);
 		__acquire(wdev_iter->mtx);
-		cfg80211_get_chan_state(wdev_iter, &ch, &chmode, &radar_detect);
+		cfg80211_get_chan_state(wdev_iter, &ch, &chmode,
+					&params.radar_detect);
 		wdev_unlock(wdev_iter);
 
 		switch (chmode) {
@@ -1715,23 +1758,22 @@
 
 			if (used_channels[i] == NULL) {
 				used_channels[i] = ch;
-				num_different_channels++;
+				params.num_different_channels++;
 			}
 			break;
 		case CHAN_MODE_EXCLUSIVE:
-			num_different_channels++;
+			params.num_different_channels++;
 			break;
 		}
 
-		num[wdev_iter->iftype]++;
+		params.iftype_num[wdev_iter->iftype]++;
 		total++;
 	}
 
-	if (total == 1 && !radar_detect)
+	if (total == 1 && !params.radar_detect)
 		return 0;
 
-	return cfg80211_check_combinations(&rdev->wiphy, num_different_channels,
-					   radar_detect, num);
+	return cfg80211_check_combinations(&rdev->wiphy, &params);
 }
 
 int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
@@ -1813,3 +1855,54 @@
 const unsigned char bridge_tunnel_header[] __aligned(2) =
 	{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
 EXPORT_SYMBOL(bridge_tunnel_header);
+
+bool cfg80211_is_gratuitous_arp_unsolicited_na(struct sk_buff *skb)
+{
+	const struct ethhdr *eth = (void *)skb->data;
+	const struct {
+		struct arphdr hdr;
+		u8 ar_sha[ETH_ALEN];
+		u8 ar_sip[4];
+		u8 ar_tha[ETH_ALEN];
+		u8 ar_tip[4];
+	} __packed *arp;
+	const struct ipv6hdr *ipv6;
+	const struct icmp6hdr *icmpv6;
+
+	switch (eth->h_proto) {
+	case cpu_to_be16(ETH_P_ARP):
+		/* can't say - but will probably be dropped later anyway */
+		if (!pskb_may_pull(skb, sizeof(*eth) + sizeof(*arp)))
+			return false;
+
+		arp = (void *)(eth + 1);
+
+		if ((arp->hdr.ar_op == cpu_to_be16(ARPOP_REPLY) ||
+		     arp->hdr.ar_op == cpu_to_be16(ARPOP_REQUEST)) &&
+		    !memcmp(arp->ar_sip, arp->ar_tip, sizeof(arp->ar_sip)))
+			return true;
+		break;
+	case cpu_to_be16(ETH_P_IPV6):
+		/* can't say - but will probably be dropped later anyway */
+		if (!pskb_may_pull(skb, sizeof(*eth) + sizeof(*ipv6) +
+					sizeof(*icmpv6)))
+			return false;
+
+		ipv6 = (void *)(eth + 1);
+		icmpv6 = (void *)(ipv6 + 1);
+
+		if (icmpv6->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT &&
+		    !memcmp(&ipv6->saddr, &ipv6->daddr, sizeof(ipv6->saddr)))
+			return true;
+		break;
+	default:
+		/*
+		 * no need to support other protocols, proxy service isn't
+		 * specified for any others
+		 */
+		break;
+	}
+
+	return false;
+}
+EXPORT_SYMBOL(cfg80211_is_gratuitous_arp_unsolicited_na);
diff -ruw linux-4.4.115/net/xfrm/xfrm_algo.c linux-4.4.115-fbx/net/xfrm/xfrm_algo.c
--- linux-4.4.115/net/xfrm/xfrm_algo.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/xfrm/xfrm_algo.c	2019-01-22 16:16:29.255298494 +0100
@@ -239,7 +239,7 @@
 
 	.uinfo = {
 		.auth = {
-			.icv_truncbits = 96,
+			.icv_truncbits = 128,
 			.icv_fullbits = 256,
 		}
 	},
diff -ruw linux-4.4.115/net/xfrm/xfrm_output.c linux-4.4.115-fbx/net/xfrm/xfrm_output.c
--- linux-4.4.115/net/xfrm/xfrm_output.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/xfrm/xfrm_output.c	2019-01-22 16:16:29.255298494 +0100
@@ -66,6 +66,9 @@
 			goto error_nolock;
 		}
 
+		if (x->props.output_mark)
+			skb->mark = x->props.output_mark;
+
 		err = x->outer_mode->output(x, skb);
 		if (err) {
 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR);
diff -ruw linux-4.4.115/net/xfrm/xfrm_policy.c linux-4.4.115-fbx/net/xfrm/xfrm_policy.c
--- linux-4.4.115/net/xfrm/xfrm_policy.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/xfrm/xfrm_policy.c	2019-10-29 09:26:26.017226410 +0100
@@ -119,7 +119,7 @@
 						  int tos, int oif,
 						  const xfrm_address_t *saddr,
 						  const xfrm_address_t *daddr,
-						  int family)
+						  int family, u32 mark)
 {
 	struct xfrm_policy_afinfo *afinfo;
 	struct dst_entry *dst;
@@ -128,7 +128,7 @@
 	if (unlikely(afinfo == NULL))
 		return ERR_PTR(-EAFNOSUPPORT);
 
-	dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr);
+	dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
 
 	xfrm_policy_put_afinfo(afinfo);
 
@@ -139,7 +139,7 @@
 						int tos, int oif,
 						xfrm_address_t *prev_saddr,
 						xfrm_address_t *prev_daddr,
-						int family)
+						int family, u32 mark)
 {
 	struct net *net = xs_net(x);
 	xfrm_address_t *saddr = &x->props.saddr;
@@ -155,7 +155,7 @@
 		daddr = x->coaddr;
 	}
 
-	dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family);
+	dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
 
 	if (!IS_ERR(dst)) {
 		if (prev_saddr != saddr)
@@ -1225,9 +1225,15 @@
 	read_lock_bh(&net->xfrm.xfrm_policy_lock);
 	pol = rcu_dereference(sk->sk_policy[dir]);
 	if (pol != NULL) {
-		bool match = xfrm_selector_match(&pol->selector, fl, family);
+		bool match;
 		int err = 0;
 
+		if (pol->family != family) {
+			pol = NULL;
+			goto out;
+		}
+
+		match = xfrm_selector_match(&pol->selector, fl, family);
 		if (match) {
 			if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
 				pol = NULL;
@@ -1307,7 +1313,7 @@
 
 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
 {
-	struct net *net = xp_net(pol);
+	struct net *net = sock_net(sk);
 	struct xfrm_policy *old_pol;
 
 #ifdef CONFIG_XFRM_SUB_POLICY
@@ -1396,14 +1402,14 @@
 
 static int
 xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
-	       xfrm_address_t *remote, unsigned short family)
+	       xfrm_address_t *remote, unsigned short family, u32 mark)
 {
 	int err;
 	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
 
 	if (unlikely(afinfo == NULL))
 		return -EINVAL;
-	err = afinfo->get_saddr(net, oif, local, remote);
+	err = afinfo->get_saddr(net, oif, local, remote, mark);
 	xfrm_policy_put_afinfo(afinfo);
 	return err;
 }
@@ -1434,7 +1440,7 @@
 			if (xfrm_addr_any(local, tmpl->encap_family)) {
 				error = xfrm_get_saddr(net, fl->flowi_oif,
 						       &tmp, remote,
-						       tmpl->encap_family);
+						       tmpl->encap_family, 0);
 				if (error)
 					goto fail;
 				local = &tmp;
@@ -1713,7 +1719,8 @@
 		if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
 			family = xfrm[i]->props.family;
 			dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
-					      &saddr, &daddr, family);
+					      &saddr, &daddr, family,
+					      xfrm[i]->props.output_mark);
 			err = PTR_ERR(dst);
 			if (IS_ERR(dst))
 				goto put_states;
diff -ruw linux-4.4.115/net/xfrm/xfrm_state.c linux-4.4.115-fbx/net/xfrm/xfrm_state.c
--- linux-4.4.115/net/xfrm/xfrm_state.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/xfrm/xfrm_state.c	2019-10-29 09:26:26.017226410 +0100
@@ -1845,6 +1845,13 @@
 	struct xfrm_mgr *km;
 	struct xfrm_policy *pol = NULL;
 
+	if (!optval && !optlen) {
+		xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
+		xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
+		__sk_dst_reset(sk);
+		return 0;
+	}
+
 	if (optlen <= 0 || optlen > PAGE_SIZE)
 		return -EMSGSIZE;
 
@@ -1869,6 +1876,7 @@
 	if (err >= 0) {
 		xfrm_sk_policy_insert(sk, err, pol);
 		xfrm_pol_put(pol);
+		__sk_dst_reset(sk);
 		err = 0;
 	}
 
diff -ruw linux-4.4.115/net/xfrm/xfrm_user.c linux-4.4.115-fbx/net/xfrm/xfrm_user.c
--- linux-4.4.115/net/xfrm/xfrm_user.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/net/xfrm/xfrm_user.c	2019-10-29 09:26:26.017226410 +0100
@@ -584,6 +584,9 @@
 
 	xfrm_mark_get(attrs, &x->mark);
 
+	if (attrs[XFRMA_OUTPUT_MARK])
+		x->props.output_mark = nla_get_u32(attrs[XFRMA_OUTPUT_MARK]);
+
 	err = __xfrm_init_state(x, false);
 	if (err)
 		goto error;
@@ -867,6 +870,11 @@
 		goto out;
 	if (x->security)
 		ret = copy_sec_ctx(x->security, skb);
+	if (x->props.output_mark) {
+		ret = nla_put_u32(skb, XFRMA_OUTPUT_MARK, x->props.output_mark);
+		if (ret)
+			goto out;
+	}
 out:
 	return ret;
 }
@@ -1376,11 +1384,14 @@
 
 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
 {
+	u16 prev_family;
 	int i;
 
 	if (nr > XFRM_MAX_DEPTH)
 		return -EINVAL;
 
+	prev_family = family;
+
 	for (i = 0; i < nr; i++) {
 		/* We never validated the ut->family value, so many
 		 * applications simply leave it at zero.  The check was
@@ -1392,6 +1403,15 @@
 		if (!ut[i].family)
 			ut[i].family = family;
 
+		if ((ut[i].mode == XFRM_MODE_TRANSPORT) &&
+		    (ut[i].family != prev_family))
+			return -EINVAL;
+
+		if (ut[i].mode >= XFRM_MODE_MAX)
+			return -EINVAL;
+
+		prev_family = ut[i].family;
+
 		switch (ut[i].family) {
 		case AF_INET:
 			break;
@@ -1693,6 +1713,10 @@
 	struct sk_buff *skb;
 	int err;
 
+	err = verify_policy_dir(dir);
+	if (err)
+		return ERR_PTR(err);
+
 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
 	if (!skb)
 		return ERR_PTR(-ENOMEM);
@@ -2218,6 +2242,10 @@
 	int n = 0;
 	struct net *net = sock_net(skb->sk);
 
+	err = verify_policy_dir(pi->dir);
+	if (err)
+		return err;
+
 	if (attrs[XFRMA_MIGRATE] == NULL)
 		return -EINVAL;
 
@@ -2333,6 +2361,11 @@
 {
 	struct net *net = &init_net;
 	struct sk_buff *skb;
+	int err;
+
+	err = verify_policy_dir(dir);
+	if (err)
+		return err;
 
 	skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k), GFP_ATOMIC);
 	if (skb == NULL)
@@ -2408,6 +2441,7 @@
 	[XFRMA_SA_EXTRA_FLAGS]	= { .type = NLA_U32 },
 	[XFRMA_PROTO]		= { .type = NLA_U8 },
 	[XFRMA_ADDRESS_FILTER]	= { .len = sizeof(struct xfrm_address_filter) },
+	[XFRMA_OUTPUT_MARK]	= { .len = NLA_U32 },
 };
 
 static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
@@ -2627,6 +2661,8 @@
 		l += nla_total_size(sizeof(*x->coaddr));
 	if (x->props.extra_flags)
 		l += nla_total_size(sizeof(x->props.extra_flags));
+	if (x->props.output_mark)
+		l += nla_total_size(sizeof(x->props.output_mark));
 
 	/* Must count x->lastused as it may become non-zero behind our back. */
 	l += nla_total_size(sizeof(u64));
@@ -2990,6 +3026,11 @@
 
 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
 {
+	int err;
+
+	err = verify_policy_dir(dir);
+	if (err)
+		return err;
 
 	switch (c->event) {
 	case XFRM_MSG_NEWPOLICY:
diff -ruw linux-4.4.115/scripts/dtc/libfdt/fdt.c linux-4.4.115-fbx/scripts/dtc/libfdt/fdt.c
--- linux-4.4.115/scripts/dtc/libfdt/fdt.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/scripts/dtc/libfdt/fdt.c	2019-10-29 09:26:26.025226489 +0100
@@ -71,6 +71,20 @@
 		return -FDT_ERR_BADMAGIC;
 	}
 
+	if (fdt_off_dt_struct(fdt) > (UINT_MAX - fdt_size_dt_struct(fdt)))
+		return FDT_ERR_BADOFFSET;
+
+	if (fdt_off_dt_strings(fdt) > (UINT_MAX -  fdt_size_dt_strings(fdt)))
+		return FDT_ERR_BADOFFSET;
+
+	if ((fdt_off_dt_struct(fdt) + fdt_size_dt_struct(fdt))
+	    > fdt_totalsize(fdt))
+		return FDT_ERR_BADOFFSET;
+
+	if ((fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt))
+	    > fdt_totalsize(fdt))
+		return FDT_ERR_BADOFFSET;
+
 	return 0;
 }
 
diff -ruw linux-4.4.115/scripts/dtc/libfdt/fdt_rw.c linux-4.4.115-fbx/scripts/dtc/libfdt/fdt_rw.c
--- linux-4.4.115/scripts/dtc/libfdt/fdt_rw.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/scripts/dtc/libfdt/fdt_rw.c	2019-10-29 09:26:26.025226489 +0100
@@ -394,7 +394,7 @@
 static void _fdt_packblocks(const char *old, char *new,
 			    int mem_rsv_size, int struct_size)
 {
-	int mem_rsv_off, struct_off, strings_off;
+	uint32_t mem_rsv_off, struct_off, strings_off;
 
 	mem_rsv_off = FDT_ALIGN(sizeof(struct fdt_header), 8);
 	struct_off = mem_rsv_off + mem_rsv_size;
diff -ruw linux-4.4.115/scripts/dtc/Makefile linux-4.4.115-fbx/scripts/dtc/Makefile
--- linux-4.4.115/scripts/dtc/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/scripts/dtc/Makefile	2019-01-22 16:16:29.279298712 +0100
@@ -1,7 +1,9 @@
 # scripts/dtc makefile
 
 hostprogs-y	:= dtc
+ifeq ($(DTC_EXT),)
 always		:= $(hostprogs-y)
+endif
 
 dtc-objs	:= dtc.o flattree.o fstree.o data.o livetree.o treesource.o \
 		   srcpos.o checks.o util.o
diff -ruw linux-4.4.115/scripts/Kbuild.include linux-4.4.115-fbx/scripts/Kbuild.include
--- linux-4.4.115/scripts/Kbuild.include	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/scripts/Kbuild.include	2019-10-29 09:26:26.017226410 +0100
@@ -107,16 +107,30 @@
 as-instr = $(call try-run,\
 	printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3))
 
+# __cc-option
+# Usage: MY_CFLAGS += $(call __cc-option,$(CC),$(MY_CFLAGS),-march=winchip-c6,-march=i586)
+__cc-option = $(call try-run,\
+	$(1) -Werror $(2) $(3) -c -x c /dev/null -o "$$TMP",$(3),$(4))
+
+# Do not attempt to build with gcc plugins during cc-option tests.
+# (And this uses delayed resolution so the flags will be up to date.)
+CC_OPTION_CFLAGS = $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
+
 # cc-option
 # Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
 
-cc-option = $(call try-run,\
-	$(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
+cc-option = $(call __cc-option, $(CC),\
+	$(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS),$(1),$(2))
+
+# hostcc-option
+# Usage: cflags-y += $(call hostcc-option,-march=winchip-c6,-march=i586)
+hostcc-option = $(call __cc-option, $(HOSTCC),\
+	$(HOSTCFLAGS) $(HOST_EXTRACFLAGS),$(1),$(2))
 
 # cc-option-yn
 # Usage: flag := $(call cc-option-yn,-march=winchip-c6)
 cc-option-yn = $(call try-run,\
-	$(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)
+	$(CC) -Werror $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)
 
 # cc-option-align
 # Prefix align with either -falign or -malign
@@ -126,7 +140,7 @@
 # cc-disable-warning
 # Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable)
 cc-disable-warning = $(call try-run,\
-	$(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
+	$(CC) -Werror $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
 
 # cc-name
 # Expands to either gcc or clang
diff -ruw linux-4.4.115/scripts/Makefile.build linux-4.4.115-fbx/scripts/Makefile.build
--- linux-4.4.115/scripts/Makefile.build	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/scripts/Makefile.build	2019-01-22 16:16:29.267298603 +0100
@@ -64,6 +64,11 @@
 include scripts/Makefile.host
 endif
 
+# Do not include host rules unless needed
+ifneq ($(dtbo-y),)
+include scripts/Makefile.dtbo
+endif
+
 ifneq ($(KBUILD_SRC),)
 # Create output directory if not already present
 _dummy := $(shell [ -d $(obj) ] || mkdir -p $(obj))
@@ -175,6 +180,14 @@
 $(obj)/%.symtypes : $(src)/%.c FORCE
 	$(call cmd,cc_symtypes_c)
 
+# LLVM assembly
+# Generate .ll files from .c
+quiet_cmd_cc_ll_c = CC $(quiet_modtag)  $@
+      cmd_cc_ll_c = $(CC) $(c_flags) -emit-llvm -S -o $@ $<
+
+$(obj)/%.ll: $(src)/%.c FORCE
+	$(call if_changed_dep,cc_ll_c)
+
 # C (.c) files
 # The C file is compiled and updated dependency information is generated.
 # (See cmd_cc_o_c + relevant part of rule_cc_o_c)
diff -ruw linux-4.4.115/scripts/Makefile.clean linux-4.4.115-fbx/scripts/Makefile.clean
--- linux-4.4.115/scripts/Makefile.clean	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/scripts/Makefile.clean	2019-01-22 16:16:29.267298603 +0100
@@ -11,7 +11,7 @@
 
 # The filename Kbuild has precedence over Makefile
 kbuild-dir := $(if $(filter /%,$(src)),$(src),$(srctree)/$(src))
-include $(if $(wildcard $(kbuild-dir)/Kbuild), $(kbuild-dir)/Kbuild, $(kbuild-dir)/Makefile)
+-include $(if $(wildcard $(kbuild-dir)/Kbuild), $(kbuild-dir)/Kbuild, $(kbuild-dir)/Makefile)
 
 # Figure out what we need to build from the various variables
 # ==========================================================================
diff -ruw linux-4.4.115/scripts/Makefile.extrawarn linux-4.4.115-fbx/scripts/Makefile.extrawarn
--- linux-4.4.115/scripts/Makefile.extrawarn	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/scripts/Makefile.extrawarn	2019-10-29 09:26:26.017226410 +0100
@@ -61,7 +61,6 @@
 KBUILD_CFLAGS += $(call cc-disable-warning, initializer-overrides)
 KBUILD_CFLAGS += $(call cc-disable-warning, unused-value)
 KBUILD_CFLAGS += $(call cc-disable-warning, format)
-KBUILD_CFLAGS += $(call cc-disable-warning, unknown-warning-option)
 KBUILD_CFLAGS += $(call cc-disable-warning, sign-compare)
 KBUILD_CFLAGS += $(call cc-disable-warning, format-zero-length)
 KBUILD_CFLAGS += $(call cc-disable-warning, uninitialized)
diff -ruw linux-4.4.115/scripts/Makefile.headersinst linux-4.4.115-fbx/scripts/Makefile.headersinst
--- linux-4.4.115/scripts/Makefile.headersinst	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/scripts/Makefile.headersinst	2019-01-22 16:16:29.267298603 +0100
@@ -124,7 +124,7 @@
 # Recursion
 .PHONY: $(subdirs)
 $(subdirs):
-	$(Q)$(MAKE) $(hdr-inst)=$(obj)/$@ dst=$(_dst)/$@
+	$(Q)$(MAKE) $(hdr-inst)=$(obj)/$@ dst=$(_dst)/$(patsubst ../../../drivers/%,%,$@)
 
 targets := $(wildcard $(sort $(targets)))
 cmd_files := $(wildcard \
diff -ruw linux-4.4.115/scripts/Makefile.lib linux-4.4.115-fbx/scripts/Makefile.lib
--- linux-4.4.115/scripts/Makefile.lib	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/scripts/Makefile.lib	2019-10-29 09:26:26.021226449 +0100
@@ -129,6 +129,18 @@
 		$(CFLAGS_KASAN))
 endif
 
+ifeq ($(CONFIG_UBSAN),y)
+_c_flags += $(if $(patsubst n%,, \
+		$(UBSAN_SANITIZE_$(basetarget).o)$(UBSAN_SANITIZE)$(CONFIG_UBSAN_SANITIZE_ALL)), \
+		$(CFLAGS_UBSAN))
+endif
+
+ifeq ($(CONFIG_KCOV),y)
+_c_flags += $(if $(patsubst n%,, \
+	$(KCOV_INSTRUMENT_$(basetarget).o)$(KCOV_INSTRUMENT)y), \
+	$(CFLAGS_KCOV))
+endif
+
 # If building the kernel in a separate objtree expand all occurrences
 # of -Idir to -I$(srctree)/dir except for absolute paths (starting with '/').
 
@@ -281,10 +293,16 @@
 $(obj)/%.dtb.S: $(obj)/%.dtb
 	$(call cmd,dt_S_dtb)
 
+ifneq ($(DTC_EXT),)
+DTC = $(DTC_EXT)
+else
+DTC = $(objtree)/scripts/dtc/dtc
+endif
+
 quiet_cmd_dtc = DTC     $@
 cmd_dtc = mkdir -p $(dir ${dtc-tmp}) ; \
 	$(CPP) $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $< ; \
-	$(objtree)/scripts/dtc/dtc -O dtb -o $@ -b 0 \
+	$(DTC) -O dtb -o $@ -b 0 \
 		-i $(dir $<) $(DTC_FLAGS) \
 		-d $(depfile).dtc.tmp $(dtc-tmp) ; \
 	cat $(depfile).pre.tmp $(depfile).dtc.tmp > $(depfile)
@@ -294,6 +312,24 @@
 
 dtc-tmp = $(subst $(comma),_,$(dot-target).dts.tmp)
 
+# Helper targets for Installing DTBs into the boot directory
+quiet_cmd_dtb_install =	INSTALL $<
+      cmd_dtb_install =	cp $< $(2)
+
+_dtbinst_pre_:
+	$(Q)if [ -d $(INSTALL_DTBS_PATH).old ]; then rm -rf $(INSTALL_DTBS_PATH).old; fi
+	$(Q)if [ -d $(INSTALL_DTBS_PATH) ]; then mv $(INSTALL_DTBS_PATH) $(INSTALL_DTBS_PATH).old; fi
+	$(Q)mkdir -p $(INSTALL_DTBS_PATH)
+
+%.dtb_dtbinst_: $(obj)/%.dtb _dtbinst_pre_
+	$(call cmd,dtb_install,$(INSTALL_DTBS_PATH))
+
+# cat
+# ---------------------------------------------------------------------------
+# Concatentate multiple files together
+quiet_cmd_cat = CAT     $@
+cmd_cat = (cat $(filter-out FORCE,$^) > $@) || (rm -f $@; false)
+
 # Bzip2
 # ---------------------------------------------------------------------------
 
@@ -388,3 +424,34 @@
 cmd_xzmisc = (cat $(filter-out FORCE,$^) | \
 	xz --check=crc32 --lzma2=dict=1MiB) > $@ || \
 	(rm -f $@ ; false)
+
+# ASM offsets
+# ---------------------------------------------------------------------------
+
+# Default sed regexp - multiline due to syntax constraints
+#
+# Use [:space:] because LLVM's integrated assembler inserts <tab> around
+# the .ascii directive whereas GCC keeps the <space> as-is.
+define sed-offsets
+	's:^[[:space:]]*\.ascii[[:space:]]*"\(.*\)".*:\1:; \
+	/^->/{s:->#\(.*\):/* \1 */:; \
+	s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
+	s:->::; p;}'
+endef
+
+# Use filechk to avoid rebuilds when a header changes, but the resulting file
+# does not
+define filechk_offsets
+	(set -e; \
+	 echo "#ifndef $2"; \
+	 echo "#define $2"; \
+	 echo "/*"; \
+	 echo " * DO NOT MODIFY."; \
+	 echo " *"; \
+	 echo " * This file was generated by Kbuild"; \
+	 echo " */"; \
+	 echo ""; \
+	 sed -ne $(sed-offsets); \
+	 echo ""; \
+	 echo "#endif" )
+endef
diff -ruw linux-4.4.115/scripts/mod/Makefile linux-4.4.115-fbx/scripts/mod/Makefile
--- linux-4.4.115/scripts/mod/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/scripts/mod/Makefile	2019-01-22 16:16:29.299298893 +0100
@@ -5,32 +5,8 @@
 
 devicetable-offsets-file := devicetable-offsets.h
 
-define sed-y
-	"/^->/{s:->#\(.*\):/* \1 */:; \
-	s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 \2 /* \3 */:; \
-	s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
-	s:->::; p;}"
-endef
-
-quiet_cmd_offsets = GEN     $@
-define cmd_offsets
-	(set -e; \
-	 echo "#ifndef __DEVICETABLE_OFFSETS_H__"; \
-	 echo "#define __DEVICETABLE_OFFSETS_H__"; \
-	 echo "/*"; \
-	 echo " * DO NOT MODIFY."; \
-	 echo " *"; \
-	 echo " * This file was generated by Kbuild"; \
-	 echo " *"; \
-	 echo " */"; \
-	 echo ""; \
-	 sed -ne $(sed-y) $<; \
-	 echo ""; \
-	 echo "#endif" ) > $@
-endef
-
-$(obj)/$(devicetable-offsets-file): $(obj)/devicetable-offsets.s
-	$(call if_changed,offsets)
+$(obj)/$(devicetable-offsets-file): $(obj)/devicetable-offsets.s FORCE
+	$(call filechk,offsets,__DEVICETABLE_OFFSETS_H__)
 
 targets += $(devicetable-offsets-file) devicetable-offsets.s
 
diff -ruw linux-4.4.115/scripts/setlocalversion linux-4.4.115-fbx/scripts/setlocalversion
--- linux-4.4.115/scripts/setlocalversion	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/scripts/setlocalversion	2019-10-29 09:26:26.037226606 +0100
@@ -48,7 +48,20 @@
 
 		# If we are at a tagged commit (like "v2.6.30-rc6"), we ignore
 		# it, because this version is defined in the top level Makefile.
-		if [ -z "`git describe --exact-match 2>/dev/null`" ]; then
+		if atag="`git describe --exact-match --abbrev=0 2>/dev/null`"; then
+			# Make sure we're at the tag that matches the Makefile.
+			# If not place the hash of the tag as well for
+			# v2.6.30-rc5-g314aef
+			if [ "x$atag" != "x$VERSION" ]; then
+				# If only the short version is requested,
+				# don't bother running further git commands
+				if $short; then
+					echo "+"
+					return
+				fi
+				printf '%s%s' -g "`git show-ref -s --abbrev --tags $atag 2>/dev/null`"
+			fi
+		else
 
 			# If only the short version is requested, don't bother
 			# running further git commands
@@ -57,10 +70,12 @@
 				return
 			fi
 			# If we are past a tagged commit (like
-			# "v2.6.30-rc5-302-g72357d5"), we pretty print it.
+			# "v2.6.30-rc5-302-g72357d5"), we pretty print it and
+			# include the hash of any new tag on top.
 			if atag="`git describe 2>/dev/null`"; then
-				echo "$atag" | awk -F- '{printf("-%05d-%s", $(NF-1),$(NF))}'
-
+				tag="`git describe --abbrev=0 2>/dev/null`"
+				commit="`echo "$atag" | awk -F- '{printf("-%05d-%s", $(NF-1),$(NF))}'`"
+				printf '%s%s%s' -g "`git show-ref -s --abbrev --tags $tag 2>/dev/null`" $commit
 			# If we don't have a tag at all we print -g{commitish}.
 			else
 				printf '%s%s' -g $head
diff -ruw linux-4.4.115/scripts/sortextable.c linux-4.4.115-fbx/scripts/sortextable.c
--- linux-4.4.115/scripts/sortextable.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/scripts/sortextable.c	2019-01-22 16:16:29.303298929 +0100
@@ -266,9 +266,9 @@
 		break;
 	}  /* end switch */
 	if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0
-	||  r2(&ehdr->e_type) != ET_EXEC
+	||  (r2(&ehdr->e_type) != ET_EXEC && r2(&ehdr->e_type) != ET_DYN)
 	||  ehdr->e_ident[EI_VERSION] != EV_CURRENT) {
-		fprintf(stderr, "unrecognized ET_EXEC file %s\n", fname);
+		fprintf(stderr, "unrecognized ET_EXEC/ET_DYN file %s\n", fname);
 		fail_file();
 	}
 
@@ -282,12 +282,13 @@
 	case EM_386:
 	case EM_X86_64:
 	case EM_S390:
+	case EM_AARCH64:
+	case EM_PARISC:
 		custom_sort = sort_relative_table;
 		break;
 	case EM_ARCOMPACT:
 	case EM_ARCV2:
 	case EM_ARM:
-	case EM_AARCH64:
 	case EM_MICROBLAZE:
 	case EM_MIPS:
 	case EM_XTENSA:
@@ -304,7 +305,7 @@
 		if (r2(&ehdr->e_ehsize) != sizeof(Elf32_Ehdr)
 		||  r2(&ehdr->e_shentsize) != sizeof(Elf32_Shdr)) {
 			fprintf(stderr,
-				"unrecognized ET_EXEC file: %s\n", fname);
+				"unrecognized ET_EXEC/ET_DYN file: %s\n", fname);
 			fail_file();
 		}
 		do32(ehdr, fname, custom_sort);
@@ -314,7 +315,7 @@
 		if (r2(&ghdr->e_ehsize) != sizeof(Elf64_Ehdr)
 		||  r2(&ghdr->e_shentsize) != sizeof(Elf64_Shdr)) {
 			fprintf(stderr,
-				"unrecognized ET_EXEC file: %s\n", fname);
+				"unrecognized ET_EXEC/ET_DYN file: %s\n", fname);
 			fail_file();
 		}
 		do64(ghdr, fname, custom_sort);
diff -ruw linux-4.4.115/security/commoncap.c linux-4.4.115-fbx/security/commoncap.c
--- linux-4.4.115/security/commoncap.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/security/commoncap.c	2019-01-22 16:16:29.307298965 +0100
@@ -31,6 +31,10 @@
 #include <linux/binfmts.h>
 #include <linux/personality.h>
 
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+#endif
+
 /*
  * If a non-root user executes a setuid-root binary in
  * !secure(SECURE_NOROOT) mode, then we raise capabilities.
@@ -73,6 +77,13 @@
 {
 	struct user_namespace *ns = targ_ns;
 
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+	if (cap == CAP_NET_RAW && in_egroup_p(AID_NET_RAW))
+		return 0;
+	if (cap == CAP_NET_ADMIN && in_egroup_p(AID_NET_ADMIN))
+		return 0;
+#endif
+
 	/* See if cred has the capability in the target user namespace
 	 * by examining the target user namespace and all of the target
 	 * user namespace's parents.
diff -ruw linux-4.4.115/security/Kconfig linux-4.4.115-fbx/security/Kconfig
--- linux-4.4.115/security/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/security/Kconfig	2019-01-22 16:16:29.303298929 +0100
@@ -6,6 +6,11 @@
 
 source security/keys/Kconfig
 
+if ARCH_QCOM
+source security/pfe/Kconfig
+endif
+
+
 config SECURITY_DMESG_RESTRICT
 	bool "Restrict unprivileged access to the kernel syslog"
 	default n
@@ -18,6 +23,15 @@
 
 	  If you are unsure how to answer this question, answer N.
 
+config SECURITY_PERF_EVENTS_RESTRICT
+	bool "Restrict unprivileged use of performance events"
+	depends on PERF_EVENTS
+	help
+	  If you say Y here, the kernel.perf_event_paranoid sysctl
+	  will be set to 3 by default, and no unprivileged use of the
+	  perf_event_open syscall will be permitted unless it is
+	  changed.
+
 config SECURITY
 	bool "Enable different security models"
 	depends on SYSFS
@@ -128,6 +142,46 @@
 	  this low address space will need the permission specific to the
 	  systems running LSM.
 
+config HAVE_HARDENED_USERCOPY_ALLOCATOR
+	bool
+	help
+	  The heap allocator implements __check_heap_object() for
+	  validating memory ranges against heap object sizes in
+	  support of CONFIG_HARDENED_USERCOPY.
+
+config HAVE_ARCH_HARDENED_USERCOPY
+	bool
+	help
+	  The architecture supports CONFIG_HARDENED_USERCOPY by
+	  calling check_object_size() just before performing the
+	  userspace copies in the low level implementation of
+	  copy_to_user() and copy_from_user().
+
+config HARDENED_USERCOPY
+	bool "Harden memory copies between kernel and userspace"
+	depends on HAVE_ARCH_HARDENED_USERCOPY
+	depends on HAVE_HARDENED_USERCOPY_ALLOCATOR
+	select BUG
+	help
+	  This option checks for obviously wrong memory regions when
+	  copying memory to/from the kernel (via copy_to_user() and
+	  copy_from_user() functions) by rejecting memory ranges that
+	  are larger than the specified heap object, span multiple
+	  separately allocates pages, are not on the process stack,
+	  or are part of the kernel text. This kills entire classes
+	  of heap overflow exploits and similar kernel memory exposures.
+
+config HARDENED_USERCOPY_PAGESPAN
+	bool "Refuse to copy allocations that span multiple pages"
+	depends on HARDENED_USERCOPY
+	depends on !COMPILE_TEST
+	help
+	  When a multi-page allocation is done without __GFP_COMP,
+	  hardened usercopy will reject attempts to copy it. There are,
+	  however, several cases of this in the kernel that have not all
+	  been removed. This config is intended to be used only while
+	  trying to find such users.
+
 source security/selinux/Kconfig
 source security/smack/Kconfig
 source security/tomoyo/Kconfig
diff -ruw linux-4.4.115/security/lsm_audit.c linux-4.4.115-fbx/security/lsm_audit.c
--- linux-4.4.115/security/lsm_audit.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/security/lsm_audit.c	2019-10-29 09:26:26.045226684 +0100
@@ -220,7 +220,7 @@
 	 */
 	BUILD_BUG_ON(sizeof(a->u) > sizeof(void *)*2);
 
-	audit_log_format(ab, " pid=%d comm=", task_pid_nr(current));
+	audit_log_format(ab, " pid=%d comm=", task_tgid_nr(current));
 	audit_log_untrustedstring(ab, memcpy(comm, current->comm, sizeof(comm)));
 
 	switch (a->type) {
@@ -294,7 +294,7 @@
 	case LSM_AUDIT_DATA_TASK: {
 		struct task_struct *tsk = a->u.tsk;
 		if (tsk) {
-			pid_t pid = task_pid_nr(tsk);
+			pid_t pid = task_tgid_nr(tsk);
 			if (pid) {
 				char comm[sizeof(tsk->comm)];
 				audit_log_format(ab, " opid=%d ocomm=", pid);
diff -ruw linux-4.4.115/security/Makefile linux-4.4.115-fbx/security/Makefile
--- linux-4.4.115/security/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/security/Makefile	2019-01-22 16:16:29.303298929 +0100
@@ -8,6 +8,7 @@
 subdir-$(CONFIG_SECURITY_TOMOYO)        += tomoyo
 subdir-$(CONFIG_SECURITY_APPARMOR)	+= apparmor
 subdir-$(CONFIG_SECURITY_YAMA)		+= yama
+subdir-$(CONFIG_ARCH_QCOM)	+= pfe
 
 # always enable default capabilities
 obj-y					+= commoncap.o
@@ -22,6 +23,7 @@
 obj-$(CONFIG_SECURITY_TOMOYO)		+= tomoyo/
 obj-$(CONFIG_SECURITY_APPARMOR)		+= apparmor/
 obj-$(CONFIG_SECURITY_YAMA)		+= yama/
+obj-$(CONFIG_ARCH_QCOM)				+= pfe/
 obj-$(CONFIG_CGROUP_DEVICE)		+= device_cgroup.o
 
 # Object integrity file lists
diff -ruw linux-4.4.115/sound/core/compress_offload.c linux-4.4.115-fbx/sound/core/compress_offload.c
--- linux-4.4.115/sound/core/compress_offload.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/core/compress_offload.c	2019-10-29 09:26:26.053226763 +0100
@@ -168,10 +168,13 @@
 static int snd_compr_update_tstamp(struct snd_compr_stream *stream,
 		struct snd_compr_tstamp *tstamp)
 {
+	int err = 0;
 	if (!stream->ops->pointer)
 		return -ENOTSUPP;
-	stream->ops->pointer(stream, tstamp);
-	pr_debug("dsp consumed till %d total %d bytes\n",
+	err = stream->ops->pointer(stream, tstamp);
+	if (err)
+		return err;
+	pr_debug("dsp consumed till %d total %llu bytes\n",
 		tstamp->byte_offset, tstamp->copied_total);
 	if (stream->direction == SND_COMPRESS_PLAYBACK)
 		stream->runtime->total_bytes_transferred = tstamp->copied_total;
@@ -251,8 +254,8 @@
 		      (app_pointer * runtime->buffer_size);
 
 	dstn = runtime->buffer + app_pointer;
-	pr_debug("copying %ld at %lld\n",
-			(unsigned long)count, app_pointer);
+	pr_debug("copying %zu at %lld\n",
+			count, app_pointer);
 	if (count < runtime->buffer_size - app_pointer) {
 		if (copy_from_user(dstn, buf, count))
 			return -EFAULT;
@@ -290,7 +293,7 @@
 	}
 
 	avail = snd_compr_get_avail(stream);
-	pr_debug("avail returned %ld\n", (unsigned long)avail);
+	pr_debug("avail returned %zu\n", avail);
 	/* calculate how much we can write to buffer */
 	if (avail > count)
 		avail = count;
@@ -345,7 +348,7 @@
 	}
 
 	avail = snd_compr_get_avail(stream);
-	pr_debug("avail returned %ld\n", (unsigned long)avail);
+	pr_debug("avail returned %zu\n", avail);
 	/* calculate how much we can read from buffer */
 	if (avail > count)
 		avail = count;
@@ -398,7 +401,7 @@
 	poll_wait(f, &stream->runtime->sleep, wait);
 
 	avail = snd_compr_get_avail(stream);
-	pr_debug("avail is %ld\n", (unsigned long)avail);
+	pr_debug("avail is %zu\n", avail);
 	/* check if we have at least one fragment to fill */
 	switch (stream->runtime->state) {
 	case SNDRV_PCM_STATE_DRAINING:
@@ -500,7 +503,7 @@
 {
 	/* first let's check the buffer parameter's */
 	if (params->buffer.fragment_size == 0 ||
-	    params->buffer.fragments > INT_MAX / params->buffer.fragment_size)
+	    params->buffer.fragments > U32_MAX / params->buffer.fragment_size)
 		return -EINVAL;
 
 	/* now codec parameters */
@@ -630,9 +633,10 @@
 static inline int
 snd_compr_tstamp(struct snd_compr_stream *stream, unsigned long arg)
 {
-	struct snd_compr_tstamp tstamp = {0};
+	struct snd_compr_tstamp tstamp;
 	int ret;
 
+	memset(&tstamp, 0, sizeof(tstamp));
 	ret = snd_compr_update_tstamp(stream, &tstamp);
 	if (ret == 0)
 		ret = copy_to_user((struct snd_compr_tstamp __user *)arg,
@@ -685,64 +689,39 @@
 		return -EPERM;
 	retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
 	if (!retval) {
-		snd_compr_drain_notify(stream);
+		stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+		wake_up(&stream->runtime->sleep);
 		stream->runtime->total_bytes_available = 0;
 		stream->runtime->total_bytes_transferred = 0;
 	}
 	return retval;
 }
 
-static int snd_compress_wait_for_drain(struct snd_compr_stream *stream)
-{
-	int ret;
-
-	/*
-	 * We are called with lock held. So drop the lock while we wait for
-	 * drain complete notfication from the driver
-	 *
-	 * It is expected that driver will notify the drain completion and then
-	 * stream will be moved to SETUP state, even if draining resulted in an
-	 * error. We can trigger next track after this.
+/* this fn is called without lock being held and we change stream states here
+ * so while using the stream state auquire the lock but relase before invoking
+ * DSP as the call will possibly take a while
 	 */
-	stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
-	mutex_unlock(&stream->device->lock);
-
-	/* we wait for drain to complete here, drain can return when
-	 * interruption occurred, wait returned error or success.
-	 * For the first two cases we don't do anything different here and
-	 * return after waking up
-	 */
-
-	ret = wait_event_interruptible(stream->runtime->sleep,
-			(stream->runtime->state != SNDRV_PCM_STATE_DRAINING));
-	if (ret == -ERESTARTSYS)
-		pr_debug("wait aborted by a signal");
-	else if (ret)
-		pr_debug("wait for drain failed with %d\n", ret);
-
-
-	wake_up(&stream->runtime->sleep);
-	mutex_lock(&stream->device->lock);
-
-	return ret;
-}
-
 static int snd_compr_drain(struct snd_compr_stream *stream)
 {
 	int retval;
 
+	mutex_lock(&stream->device->lock);
 	if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
-			stream->runtime->state == SNDRV_PCM_STATE_SETUP)
-		return -EPERM;
-
+			stream->runtime->state == SNDRV_PCM_STATE_SETUP) {
+		retval = -EPERM;
+		goto ret;
+	}
+	mutex_unlock(&stream->device->lock);
 	retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
-	if (retval) {
-		pr_debug("SND_COMPR_TRIGGER_DRAIN failed %d\n", retval);
+	mutex_lock(&stream->device->lock);
+	if (!retval) {
+		stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
 		wake_up(&stream->runtime->sleep);
-		return retval;
 	}
 
-	return snd_compress_wait_for_drain(stream);
+ret:
+	mutex_unlock(&stream->device->lock);
+	return retval;
 }
 
 static int snd_compr_next_track(struct snd_compr_stream *stream)
@@ -770,22 +749,87 @@
 static int snd_compr_partial_drain(struct snd_compr_stream *stream)
 {
 	int retval;
+
+	mutex_lock(&stream->device->lock);
 	if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
-			stream->runtime->state == SNDRV_PCM_STATE_SETUP)
+			stream->runtime->state == SNDRV_PCM_STATE_SETUP) {
+		mutex_unlock(&stream->device->lock);
 		return -EPERM;
+	}
+	mutex_unlock(&stream->device->lock);
 	/* stream can be drained only when next track has been signalled */
 	if (stream->next_track == false)
 		return -EPERM;
 
 	retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
-	if (retval) {
-		pr_debug("Partial drain returned failure\n");
-		wake_up(&stream->runtime->sleep);
+
+	stream->next_track = false;
 		return retval;
 	}
 
-	stream->next_track = false;
-	return snd_compress_wait_for_drain(stream);
+static int snd_compr_set_next_track_param(struct snd_compr_stream *stream,
+		unsigned long arg)
+{
+	union snd_codec_options codec_options;
+	int retval;
+
+	/* set next track params when stream is running or has been setup */
+	if (stream->runtime->state != SNDRV_PCM_STATE_SETUP &&
+			stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
+		return -EPERM;
+
+	if (copy_from_user(&codec_options, (void __user *)arg,
+				sizeof(codec_options)))
+		return -EFAULT;
+
+	retval = stream->ops->set_next_track_param(stream, &codec_options);
+	return retval;
+}
+
+static int snd_compress_simple_ioctls(struct file *file,
+				struct snd_compr_stream *stream,
+				unsigned int cmd, unsigned long arg)
+{
+	int retval = -ENOTTY;
+
+	switch (_IOC_NR(cmd)) {
+	case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION):
+		retval = put_user(SNDRV_COMPRESS_VERSION,
+				(int __user *)arg) ? -EFAULT : 0;
+		break;
+
+	case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
+		retval = snd_compr_get_caps(stream, arg);
+		break;
+
+	case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS):
+		retval = snd_compr_get_codec_caps(stream, arg);
+		break;
+
+
+	case _IOC_NR(SNDRV_COMPRESS_TSTAMP):
+		retval = snd_compr_tstamp(stream, arg);
+		break;
+
+	case _IOC_NR(SNDRV_COMPRESS_AVAIL):
+		retval = snd_compr_ioctl_avail(stream, arg);
+		break;
+
+	/* drain and partial drain need special handling
+	 * we need to drop the locks here as the streams would get blocked on
+	 * the dsp to get drained. The locking would be handled in respective
+	 * function here
+	 */
+	case _IOC_NR(SNDRV_COMPRESS_DRAIN):
+		retval = snd_compr_drain(stream);
+		break;
+
+	case _IOC_NR(SNDRV_COMPRESS_PARTIAL_DRAIN):
+		retval = snd_compr_partial_drain(stream);
+		break;
+	}
+
+	return retval;
 }
 
 static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
@@ -799,15 +843,9 @@
 	stream = &data->stream;
 	if (snd_BUG_ON(!stream))
 		return -EFAULT;
+
 	mutex_lock(&stream->device->lock);
 	switch (_IOC_NR(cmd)) {
-	case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION):
-		retval = put_user(SNDRV_COMPRESS_VERSION,
-				(int __user *)arg) ? -EFAULT : 0;
-		break;
-	case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
-		retval = snd_compr_get_caps(stream, arg);
-		break;
 #ifndef COMPR_CODEC_CAPS_OVERFLOW
 	case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS):
 		retval = snd_compr_get_codec_caps(stream, arg);
@@ -816,44 +854,49 @@
 	case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS):
 		retval = snd_compr_set_params(stream, arg);
 		break;
+
 	case _IOC_NR(SNDRV_COMPRESS_GET_PARAMS):
 		retval = snd_compr_get_params(stream, arg);
 		break;
+
 	case _IOC_NR(SNDRV_COMPRESS_SET_METADATA):
 		retval = snd_compr_set_metadata(stream, arg);
 		break;
+
 	case _IOC_NR(SNDRV_COMPRESS_GET_METADATA):
 		retval = snd_compr_get_metadata(stream, arg);
 		break;
-	case _IOC_NR(SNDRV_COMPRESS_TSTAMP):
-		retval = snd_compr_tstamp(stream, arg);
-		break;
-	case _IOC_NR(SNDRV_COMPRESS_AVAIL):
-		retval = snd_compr_ioctl_avail(stream, arg);
-		break;
+
 	case _IOC_NR(SNDRV_COMPRESS_PAUSE):
 		retval = snd_compr_pause(stream);
 		break;
+
 	case _IOC_NR(SNDRV_COMPRESS_RESUME):
 		retval = snd_compr_resume(stream);
 		break;
+
 	case _IOC_NR(SNDRV_COMPRESS_START):
 		retval = snd_compr_start(stream);
 		break;
+
 	case _IOC_NR(SNDRV_COMPRESS_STOP):
 		retval = snd_compr_stop(stream);
 		break;
-	case _IOC_NR(SNDRV_COMPRESS_DRAIN):
-		retval = snd_compr_drain(stream);
-		break;
-	case _IOC_NR(SNDRV_COMPRESS_PARTIAL_DRAIN):
-		retval = snd_compr_partial_drain(stream);
-		break;
+
 	case _IOC_NR(SNDRV_COMPRESS_NEXT_TRACK):
 		retval = snd_compr_next_track(stream);
 		break;
 
+	case _IOC_NR(SNDRV_COMPRESS_SET_NEXT_TRACK_PARAM):
+		retval = snd_compr_set_next_track_param(stream, arg);
+		break;
+
+	default:
+		mutex_unlock(&stream->device->lock);
+		return snd_compress_simple_ioctls(f, stream, cmd, arg);
+
 	}
+
 	mutex_unlock(&stream->device->lock);
 	return retval;
 }
@@ -865,6 +908,7 @@
 		.write =	snd_compr_write,
 		.read =		snd_compr_read,
 		.unlocked_ioctl = snd_compr_ioctl,
+		.compat_ioctl   = snd_compr_ioctl,
 		.mmap =		snd_compr_mmap,
 		.poll =		snd_compr_poll,
 };
@@ -937,6 +981,17 @@
 }
 EXPORT_SYMBOL_GPL(snd_compress_new);
 
+/*
+ * snd_compress_free: free compress device
+ * @card: sound card pointer
+ * @compr: compress device pointer
+ */
+void snd_compress_free(struct snd_card *card, struct snd_compr *compr)
+{
+	snd_device_free(card, compr);
+}
+EXPORT_SYMBOL_GPL(snd_compress_free);
+
 static int snd_compress_add_device(struct snd_compr *device)
 {
 	int ret;
diff -ruw linux-4.4.115/sound/core/info.c linux-4.4.115-fbx/sound/core/info.c
--- linux-4.4.115/sound/core/info.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/core/info.c	2019-10-29 09:26:26.057226802 +0100
@@ -619,6 +619,36 @@
 	return 0;
 }
 
+/*
+ * snd_register_module_info - create and register module
+ * @module: the module pointer
+ * @name: the module name
+ * @parent: the parent directory
+ *
+ * Creates and registers new module entry.
+ *
+ * Return: The pointer of the new instance, or NULL on failure.
+ */
+struct snd_info_entry *snd_register_module_info(struct module *module,
+						const char *name,
+						struct snd_info_entry *parent)
+{
+	struct snd_info_entry *entry;
+
+	entry = snd_info_create_module_entry(module, name, parent);
+	if (!entry)
+		return NULL;
+
+	entry->mode = S_IFDIR | S_IRUGO | S_IXUGO;
+
+	if (snd_info_register(entry) < 0) {
+		snd_info_free_entry(entry);
+		return NULL;
+	}
+
+	return entry;
+}
+EXPORT_SYMBOL(snd_register_module_info);
 
 /**
  * snd_info_get_line - read one line from the procfs buffer
@@ -724,8 +754,11 @@
 	INIT_LIST_HEAD(&entry->children);
 	INIT_LIST_HEAD(&entry->list);
 	entry->parent = parent;
-	if (parent)
+	if (parent) {
+		mutex_lock(&parent->access);
 		list_add_tail(&entry->list, &parent->children);
+		mutex_unlock(&parent->access);
+	}
 	return entry;
 }
 
diff -ruw linux-4.4.115/sound/core/init.c linux-4.4.115-fbx/sound/core/init.c
--- linux-4.4.115/sound/core/init.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/core/init.c	2019-10-29 09:26:26.057226802 +0100
@@ -58,6 +58,8 @@
 module_param_array(slots, charp, NULL, 0444);
 MODULE_PARM_DESC(slots, "Module names assigned to the slots.");
 
+#define SND_CARD_STATE_MAX_LEN 16
+
 /* return non-zero if the given index is reserved for the given
  * module via slots option
  */
@@ -107,9 +109,39 @@
 	snd_iprintf(buffer, "%s\n", entry->card->id);
 }
 
+static ssize_t snd_card_state_read(struct snd_info_entry *entry,
+			       void *file_private_data, struct file *file,
+			       char __user *buf, size_t count, loff_t pos)
+{
+	int len;
+	char buffer[SND_CARD_STATE_MAX_LEN];
+
+	/* make sure offline is updated prior to wake up */
+	rmb();
+	len = snprintf(buffer, sizeof(buffer), "%s\n",
+		       entry->card->offline ? "OFFLINE" : "ONLINE");
+	return simple_read_from_buffer(buf, count, &pos, buffer, len);
+}
+
+static unsigned int snd_card_state_poll(struct snd_info_entry *entry,
+					void *private_data, struct file *file,
+					poll_table *wait)
+{
+	poll_wait(file, &entry->card->offline_poll_wait, wait);
+	if (xchg(&entry->card->offline_change, 0))
+		return POLLIN | POLLPRI | POLLRDNORM;
+	else
+		return 0;
+}
+
+static struct snd_info_entry_ops snd_card_state_proc_ops = {
+	.read = snd_card_state_read,
+	.poll = snd_card_state_poll,
+};
+
 static int init_info_for_card(struct snd_card *card)
 {
-	struct snd_info_entry *entry;
+	struct snd_info_entry *entry, *entry_state;
 
 	entry = snd_info_create_card_entry(card, "id", card->proc_root);
 	if (!entry) {
@@ -119,6 +151,17 @@
 	entry->c.text.read = snd_card_id_read;
 	card->proc_id = entry;
 
+	entry_state = snd_info_create_card_entry(card, "state",
+						 card->proc_root);
+	if (!entry_state) {
+		dev_dbg(card->dev, "unable to create card entry state\n");
+		card->proc_id = NULL;
+		return -ENOMEM;
+	}
+	entry_state->size = SND_CARD_STATE_MAX_LEN;
+	entry_state->content = SNDRV_INFO_CONTENT_DATA;
+	entry_state->c.ops = &snd_card_state_proc_ops;
+
 	return snd_info_card_register(card);
 }
 #else /* !CONFIG_SND_PROC_FS */
@@ -258,6 +301,7 @@
 	init_waitqueue_head(&card->power_sleep);
 #endif
 
+	init_waitqueue_head(&card->offline_poll_wait);
 	device_initialize(&card->card_dev);
 	card->card_dev.parent = parent;
 	card->card_dev.class = sound_class;
@@ -972,6 +1016,35 @@
 
 EXPORT_SYMBOL(snd_card_file_remove);
 
+/**
+ * snd_card_change_online_state - mark card's online/offline state
+ * @card: Card to mark
+ * @online: whether online of offline
+ *
+ * Mutes the DAI DAC.
+ */
+void snd_card_change_online_state(struct snd_card *card, int online)
+{
+	snd_printd("snd card %s state change %d -> %d\n",
+		   card->shortname, !card->offline, online);
+	card->offline = !online;
+	/* make sure offline is updated prior to wake up */
+	wmb();
+	xchg(&card->offline_change, 1);
+	wake_up_interruptible(&card->offline_poll_wait);
+}
+EXPORT_SYMBOL(snd_card_change_online_state);
+
+/**
+ * snd_card_is_online_state - return true if card is online state
+ * @card: Card to query
+ */
+bool snd_card_is_online_state(struct snd_card *card)
+{
+	return !card->offline;
+}
+EXPORT_SYMBOL(snd_card_is_online_state);
+
 #ifdef CONFIG_PM
 /**
  *  snd_power_wait - wait until the power-state is changed.
diff -ruw linux-4.4.115/sound/core/jack.c linux-4.4.115-fbx/sound/core/jack.c
--- linux-4.4.115/sound/core/jack.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/core/jack.c	2019-01-22 16:16:29.343299291 +0100
@@ -32,13 +32,17 @@
 	unsigned int mask_bits; /* only masked status bits are reported via kctl */
 };
 
-static int jack_switch_types[SND_JACK_SWITCH_TYPES] = {
+static int jack_switch_types[] = {
 	SW_HEADPHONE_INSERT,
 	SW_MICROPHONE_INSERT,
 	SW_LINEOUT_INSERT,
 	SW_JACK_PHYSICAL_INSERT,
 	SW_VIDEOOUT_INSERT,
 	SW_LINEIN_INSERT,
+	SW_HPHL_OVERCURRENT,
+	SW_HPHR_OVERCURRENT,
+	SW_UNSUPPORT_INSERT,
+	SW_MICROPHONE2_INSERT,
 };
 
 static int snd_jack_dev_disconnect(struct snd_device *device)
@@ -240,7 +244,7 @@
 
 		jack->type = type;
 
-		for (i = 0; i < SND_JACK_SWITCH_TYPES; i++)
+		for (i = 0; i < ARRAY_SIZE(jack_switch_types); i++)
 			if (type & (1 << i))
 				input_set_capability(jack->input_dev, EV_SW,
 						     jack_switch_types[i]);
diff -ruw linux-4.4.115/sound/core/pcm.c linux-4.4.115-fbx/sound/core/pcm.c
--- linux-4.4.115/sound/core/pcm.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/core/pcm.c	2019-10-29 09:26:26.057226802 +0100
@@ -744,6 +744,7 @@
 		}
 		substream->group = &substream->self_group;
 		spin_lock_init(&substream->self_group.lock);
+		spin_lock_init(&substream->runtime_lock);
 		mutex_init(&substream->self_group.mutex);
 		INIT_LIST_HEAD(&substream->self_group.substreams);
 		list_add_tail(&substream->link_list, &substream->self_group.substreams);
@@ -857,6 +858,14 @@
 		snd_ctl_remove(pstr->pcm->card, pstr->chmap_kctl);
 		pstr->chmap_kctl = NULL;
 	}
+	if (pstr->vol_kctl) {
+		snd_ctl_remove(pstr->pcm->card, pstr->vol_kctl);
+		pstr->vol_kctl = NULL;
+	}
+	if (pstr->usr_kctl) {
+		snd_ctl_remove(pstr->pcm->card, pstr->usr_kctl);
+		pstr->usr_kctl = NULL;
+	}
 }
 
 static void snd_pcm_free_stream(struct snd_pcm_str * pstr)
@@ -1014,9 +1023,11 @@
 void snd_pcm_detach_substream(struct snd_pcm_substream *substream)
 {
 	struct snd_pcm_runtime *runtime;
+	unsigned long flags = 0;
 
 	if (PCM_RUNTIME_CHECK(substream))
 		return;
+	spin_lock_irqsave(&substream->runtime_lock, flags);
 	runtime = substream->runtime;
 	if (runtime->private_free != NULL)
 		runtime->private_free(runtime);
@@ -1030,6 +1041,7 @@
 	put_pid(substream->pid);
 	substream->pid = NULL;
 	substream->pstr->substream_opened--;
+	spin_unlock_irqrestore(&substream->runtime_lock, flags);
 }
 
 static ssize_t show_pcm_class(struct device *dev,
diff -ruw linux-4.4.115/sound/core/pcm_compat.c linux-4.4.115-fbx/sound/core/pcm_compat.c
--- linux-4.4.115/sound/core/pcm_compat.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/core/pcm_compat.c	2019-10-29 09:26:26.057226802 +0100
@@ -655,6 +655,39 @@
 #endif /* CONFIG_X86_X32 */
 };
 
+static int snd_compressed_ioctl32(struct snd_pcm_substream *substream,
+				 unsigned int cmd, void __user *arg)
+{
+	struct snd_pcm_runtime *runtime;
+	int err = 0;
+
+	if (PCM_RUNTIME_CHECK(substream))
+		return -ENXIO;
+	runtime = substream->runtime;
+	if (substream->ops->compat_ioctl) {
+		err = substream->ops->compat_ioctl(substream, cmd, arg);
+	} else {
+		err = -ENOIOCTLCMD;
+		pr_err("%s failed cmd = %d\n", __func__, cmd);
+	}
+	pr_debug("%s called with cmd = %d\n", __func__, cmd);
+	return err;
+}
+static int snd_user_ioctl32(struct snd_pcm_substream *substream,
+			  unsigned int cmd, void __user *arg)
+{
+	struct snd_pcm_runtime *runtime;
+	int err = -ENOIOCTLCMD;
+
+	if (PCM_RUNTIME_CHECK(substream))
+		return -ENXIO;
+	runtime = substream->runtime;
+	if (substream->ops->compat_ioctl)
+		err = substream->ops->compat_ioctl(substream, cmd, arg);
+	return err;
+}
+
+
 static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
 {
 	struct snd_pcm_file *pcm_file;
@@ -734,6 +767,11 @@
 	case SNDRV_PCM_IOCTL_CHANNEL_INFO_X32:
 		return snd_pcm_ioctl_channel_info_x32(substream, argp);
 #endif /* CONFIG_X86_X32 */
+	default:
+		if (_IOC_TYPE(cmd) == 'C')
+			return snd_compressed_ioctl32(substream, cmd, argp);
+		else if (_IOC_TYPE(cmd) == 'U')
+			return snd_user_ioctl32(substream, cmd, argp);
 	}
 
 	return -ENOIOCTLCMD;
diff -ruw linux-4.4.115/sound/core/pcm_lib.c linux-4.4.115-fbx/sound/core/pcm_lib.c
--- linux-4.4.115/sound/core/pcm_lib.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/core/pcm_lib.c	2019-10-29 09:26:26.057226802 +0100
@@ -41,6 +41,9 @@
 #define trace_hw_ptr_error(substream, reason)
 #endif
 
+#define STRING_LENGTH_OF_INT 12
+#define MAX_USR_CTRL_CNT 128
+
 /*
  * fill ring buffer with silence
  * runtime->silence_start: starting pointer to silence area
@@ -374,7 +377,8 @@
 		 * the elapsed time to detect xruns.
 		 */
 		jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
-		if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
+		if ((jdelta < runtime->hw_ptr_buffer_jiffies / 2) ||
+		    (runtime->hw_ptr_buffer_jiffies <= 0))
 			goto no_delta_check;
 		hdelta = jdelta - delta * HZ / runtime->rate;
 		xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1;
@@ -1791,6 +1795,11 @@
 	switch (runtime->access) {
 	case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED:
 	case SNDRV_PCM_ACCESS_RW_INTERLEAVED:
+		if ((UINT_MAX/width) < info->channel) {
+			snd_printd("%s: integer overflow while multiply\n",
+				   __func__);
+			return -EINVAL;
+		}
 		info->first = info->channel * width;
 		info->step = runtime->channels * width;
 		break;
@@ -1798,6 +1807,12 @@
 	case SNDRV_PCM_ACCESS_RW_NONINTERLEAVED:
 	{
 		size_t size = runtime->dma_bytes / runtime->channels;
+
+		if ((size > 0) && ((UINT_MAX/(size * 8)) < info->channel)) {
+			snd_printd("%s: integer overflow while multiply\n",
+				   __func__);
+			return -EINVAL;
+		}
 		info->first = info->channel * size * 8;
 		info->step = width;
 		break;
@@ -1843,8 +1858,6 @@
 		      unsigned int cmd, void *arg)
 {
 	switch (cmd) {
-	case SNDRV_PCM_IOCTL1_INFO:
-		return 0;
 	case SNDRV_PCM_IOCTL1_RESET:
 		return snd_pcm_lib_ioctl_reset(substream, arg);
 	case SNDRV_PCM_IOCTL1_CHANNEL_INFO:
@@ -2116,6 +2129,9 @@
 	struct snd_pcm_runtime *runtime;
 	if (PCM_RUNTIME_CHECK(substream))
 		return -ENXIO;
+	/* TODO: consider and -EINVAL here */
+	if (substream->hw_no_buffer)
+		snd_printd("%s: warning this PCM is host less\n", __func__);
 	runtime = substream->runtime;
 	if (snd_BUG_ON(!substream->ops->copy && !runtime->dma_area))
 		return -EINVAL;
@@ -2566,6 +2582,23 @@
 	kfree(info);
 }
 
+static int pcm_volume_ctl_info(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	uinfo->count = 1;
+	uinfo->value.integer.min = 0;
+	uinfo->value.integer.max = 0x2000;
+	return 0;
+}
+
+static void pcm_volume_ctl_private_free(struct snd_kcontrol *kcontrol)
+{
+	struct snd_pcm_volume *info = snd_kcontrol_chip(kcontrol);
+	info->pcm->streams[info->stream].vol_kctl = NULL;
+	kfree(info);
+}
+
 /**
  * snd_pcm_add_chmap_ctls - create channel-mapping control elements
  * @pcm: the assigned PCM instance
@@ -2625,3 +2658,166 @@
 	return 0;
 }
 EXPORT_SYMBOL_GPL(snd_pcm_add_chmap_ctls);
+
+/**
+ * snd_pcm_add_volume_ctls - create volume control elements
+ * @pcm: the assigned PCM instance
+ * @stream: stream direction
+ * @max_length: the max length of the volume parameter of stream
+ * @private_value: the value passed to each kcontrol's private_value field
+ * @info_ret: store struct snd_pcm_volume instance if non-NULL
+ *
+ * Create volume control elements assigned to the given PCM stream(s).
+ * Returns zero if succeed, or a negative error value.
+ */
+int snd_pcm_add_volume_ctls(struct snd_pcm *pcm, int stream,
+			   const struct snd_pcm_volume_elem *volume,
+			   int max_length,
+			   unsigned long private_value,
+			   struct snd_pcm_volume **info_ret)
+{
+	struct snd_pcm_volume *info;
+	struct snd_kcontrol_new knew = {
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |
+			SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = pcm_volume_ctl_info,
+	};
+	int err;
+	int size;
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+	info->pcm = pcm;
+	info->stream = stream;
+	info->volume = volume;
+	info->max_length = max_length;
+	size = sizeof("Playback ") + sizeof(" Volume") +
+		STRING_LENGTH_OF_INT*sizeof(char) + 1;
+	knew.name = kzalloc(size, GFP_KERNEL);
+	if (!knew.name) {
+		kfree(info);
+		return -ENOMEM;
+	}
+	if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+		snprintf((char *)knew.name, size, "%s %d %s",
+			"Playback", pcm->device, "Volume");
+	else
+		snprintf((char *)knew.name, size, "%s %d %s",
+			"Capture", pcm->device, "Volume");
+	knew.device = pcm->device;
+	knew.count = pcm->streams[stream].substream_count;
+	knew.private_value = private_value;
+	info->kctl = snd_ctl_new1(&knew, info);
+	if (!info->kctl) {
+		kfree(info);
+		kfree(knew.name);
+		return -ENOMEM;
+	}
+	info->kctl->private_free = pcm_volume_ctl_private_free;
+	err = snd_ctl_add(pcm->card, info->kctl);
+	if (err < 0) {
+		kfree(info);
+		kfree(knew.name);
+		return -ENOMEM;
+	}
+	pcm->streams[stream].vol_kctl = info->kctl;
+	if (info_ret)
+		*info_ret = info;
+	kfree(knew.name);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(snd_pcm_add_volume_ctls);
+
+static int pcm_usr_ctl_info(struct snd_kcontrol *kcontrol,
+			    struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	uinfo->count = MAX_USR_CTRL_CNT;
+	uinfo->value.integer.min = 0;
+	uinfo->value.integer.max = INT_MAX;
+	return 0;
+}
+
+static void pcm_usr_ctl_private_free(struct snd_kcontrol *kcontrol)
+{
+	struct snd_pcm_usr *info = snd_kcontrol_chip(kcontrol);
+	info->pcm->streams[info->stream].usr_kctl = NULL;
+	kfree(info);
+}
+
+/**
+ * snd_pcm_add_usr_ctls - create user control elements
+ * @pcm: the assigned PCM instance
+ * @stream: stream direction
+ * @max_length: the max length of the user parameter of stream
+ * @private_value: the value passed to each kcontrol's private_value field
+ * @info_ret: store struct snd_pcm_usr instance if non-NULL
+ *
+ * Create usr control elements assigned to the given PCM stream(s).
+ * Returns zero if succeed, or a negative error value.
+ */
+int snd_pcm_add_usr_ctls(struct snd_pcm *pcm, int stream,
+			 const struct snd_pcm_usr_elem *usr,
+			 int max_length, int max_kctrl_str_len,
+			 unsigned long private_value,
+			 struct snd_pcm_usr **info_ret)
+{
+	struct snd_pcm_usr *info;
+	struct snd_kcontrol_new knew = {
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = pcm_usr_ctl_info,
+	};
+	int err;
+	char *buf;
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (!info) {
+		pr_err("%s: snd_pcm_usr alloc failed\n", __func__);
+		return -ENOMEM;
+	}
+	info->pcm = pcm;
+	info->stream = stream;
+	info->usr = usr;
+	info->max_length = max_length;
+	buf = kzalloc(max_kctrl_str_len, GFP_KERNEL);
+	if (!buf) {
+		pr_err("%s: buffer allocation failed\n", __func__);
+		kfree(info);
+		return -ENOMEM;
+	}
+	knew.name = buf;
+	if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+		snprintf(buf, max_kctrl_str_len, "%s %d %s",
+			"Playback", pcm->device, "User kcontrol");
+	else
+		snprintf(buf, max_kctrl_str_len, "%s %d %s",
+			"Capture", pcm->device, "User kcontrol");
+	knew.device = pcm->device;
+	knew.count = pcm->streams[stream].substream_count;
+	knew.private_value = private_value;
+	info->kctl = snd_ctl_new1(&knew, info);
+	if (!info->kctl) {
+		kfree(info);
+		kfree(knew.name);
+		pr_err("%s: snd_ctl_new failed\n", __func__);
+		return -ENOMEM;
+	}
+	info->kctl->private_free = pcm_usr_ctl_private_free;
+	err = snd_ctl_add(pcm->card, info->kctl);
+	if (err < 0) {
+		kfree(info);
+		kfree(knew.name);
+		pr_err("%s: snd_ctl_add failed:%d\n", __func__,
+			err);
+		return -ENOMEM;
+	}
+	pcm->streams[stream].usr_kctl = info->kctl;
+	if (info_ret)
+		*info_ret = info;
+	kfree(knew.name);
+	return 0;
+}
+EXPORT_SYMBOL(snd_pcm_add_usr_ctls);
diff -ruw linux-4.4.115/sound/core/pcm_misc.c linux-4.4.115-fbx/sound/core/pcm_misc.c
--- linux-4.4.115/sound/core/pcm_misc.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/core/pcm_misc.c	2019-01-22 16:16:29.343299291 +0100
@@ -168,7 +168,9 @@
 		.le = -1, .signd = -1,
 	},
 	[SNDRV_PCM_FORMAT_SPECIAL] = {
-		.le = -1, .signd = -1,
+		/* set the width and phys same as S16_LE */
+		.width = 16, .phys = 16, .le = -1, .signd = -1,
+		.silence = {},
 	},
 	[SNDRV_PCM_FORMAT_S24_3LE] = {
 		.width = 24, .phys = 24, .le = 1, .signd = 1,
diff -ruw linux-4.4.115/sound/core/pcm_native.c linux-4.4.115-fbx/sound/core/pcm_native.c
--- linux-4.4.115/sound/core/pcm_native.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/core/pcm_native.c	2019-10-29 09:26:26.061226841 +0100
@@ -29,6 +29,7 @@
 #include <linux/dma-mapping.h>
 #include <sound/core.h>
 #include <sound/control.h>
+#include <sound/compress_offload.h>
 #include <sound/info.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
@@ -196,7 +197,6 @@
 
 int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info)
 {
-	struct snd_pcm_runtime *runtime;
 	struct snd_pcm *pcm = substream->pcm;
 	struct snd_pcm_str *pstr = substream->pstr;
 
@@ -212,12 +212,7 @@
 	info->subdevices_count = pstr->substream_count;
 	info->subdevices_avail = pstr->substream_count - pstr->substream_opened;
 	strlcpy(info->subname, substream->name, sizeof(info->subname));
-	runtime = substream->runtime;
-	/* AB: FIXME!!! This is definitely nonsense */
-	if (runtime) {
-		info->sync = runtime->sync;
-		substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_INFO, info);
-	}
+
 	return 0;
 }
 
@@ -587,7 +582,8 @@
 	runtime->silence_threshold = 0;
 	runtime->silence_size = 0;
 	runtime->boundary = runtime->buffer_size;
-	while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size)
+	while (runtime->boundary * 2 * runtime->channels <=
+					LONG_MAX - runtime->buffer_size)
 		runtime->boundary *= 2;
 
 	snd_pcm_timer_resolution_change(substream);
@@ -652,7 +648,9 @@
 	if (substream->ops->hw_free)
 		result = substream->ops->hw_free(substream);
 	snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
+	if (pm_qos_request_active(&substream->latency_pm_qos_req))
 	pm_qos_remove_request(&substream->latency_pm_qos_req);
+
 	return result;
 }
 
@@ -1034,6 +1032,7 @@
 	if (runtime->status->state != SNDRV_PCM_STATE_PREPARED)
 		return -EBADFD;
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
+	    !substream->hw_no_buffer &&
 	    !snd_pcm_playback_data(substream))
 		return -EPIPE;
 	runtime->trigger_tstamp_latched = false;
@@ -1087,6 +1086,33 @@
 			      SNDRV_PCM_STATE_RUNNING);
 }
 
+static int snd_compressed_ioctl(struct snd_pcm_substream *substream,
+				 unsigned int cmd, void __user *arg)
+{
+	struct snd_pcm_runtime *runtime;
+	int err = 0;
+
+	if (PCM_RUNTIME_CHECK(substream))
+		return -ENXIO;
+	runtime = substream->runtime;
+	pr_debug("%s called with cmd = %d\n", __func__, cmd);
+	err = substream->ops->ioctl(substream, cmd, arg);
+	return err;
+}
+
+static int snd_user_ioctl(struct snd_pcm_substream *substream,
+			  unsigned int cmd, void __user *arg)
+{
+	struct snd_pcm_runtime *runtime;
+	int err = 0;
+
+	if (PCM_RUNTIME_CHECK(substream))
+		return -ENXIO;
+	runtime = substream->runtime;
+	err = substream->ops->ioctl(substream, cmd, arg);
+	return err;
+}
+
 /*
  * stop callbacks
  */
@@ -1986,7 +2012,8 @@
 #endif
 
 static unsigned int rates[] = { 5512, 8000, 11025, 16000, 22050, 32000, 44100,
-                                 48000, 64000, 88200, 96000, 176400, 192000 };
+				48000, 64000, 88200, 96000, 176400, 192000,
+				352800, 384000 };
 
 const struct snd_pcm_hw_constraint_list snd_pcm_known_rates = {
 	.count = ARRAY_SIZE(rates),
@@ -2701,6 +2728,7 @@
 	volatile struct snd_pcm_mmap_status *status;
 	volatile struct snd_pcm_mmap_control *control;
 	int err;
+	snd_pcm_uframes_t hw_avail;
 
 	memset(&sync_ptr, 0, sizeof(sync_ptr));
 	if (get_user(sync_ptr.flags, (unsigned __user *)&(_sync_ptr->flags)))
@@ -2723,6 +2751,16 @@
 		control->avail_min = sync_ptr.c.control.avail_min;
 	else
 		sync_ptr.c.control.avail_min = control->avail_min;
+
+	if (runtime->render_flag & SNDRV_NON_DMA_MODE) {
+		hw_avail = snd_pcm_playback_hw_avail(runtime);
+		if ((hw_avail >= runtime->start_threshold)
+			&& (runtime->render_flag &
+				SNDRV_RENDER_STOPPED)) {
+			if (substream->ops->restart)
+				substream->ops->restart(substream);
+		}
+	}
 	sync_ptr.s.status.state = status->state;
 	sync_ptr.s.status.hw_ptr = status->hw_ptr;
 	sync_ptr.s.status.tstamp = status->tstamp;
@@ -2811,6 +2849,16 @@
 		snd_pcm_stream_unlock_irq(substream);
 		return res;
 	}
+	case SNDRV_COMPRESS_GET_CAPS:
+	case SNDRV_COMPRESS_GET_CODEC_CAPS:
+	case SNDRV_COMPRESS_SET_PARAMS:
+	case SNDRV_COMPRESS_GET_PARAMS:
+	case SNDRV_COMPRESS_TSTAMP:
+	case SNDRV_COMPRESS_DRAIN:
+		return snd_compressed_ioctl(substream, cmd, arg);
+	default:
+		if (((cmd >> 8) & 0xff) == 'U')
+			return snd_user_ioctl(substream, cmd, arg);
 	}
 	pcm_dbg(substream->pcm, "unknown ioctl = 0x%x\n", cmd);
 	return -ENOTTY;
@@ -2980,10 +3028,12 @@
 				   unsigned long arg)
 {
 	struct snd_pcm_file *pcm_file;
+	unsigned char ioctl_magic;
 
 	pcm_file = file->private_data;
+	ioctl_magic = ((cmd >> 8) & 0xff);
 
-	if (((cmd >> 8) & 0xff) != 'A')
+	if (ioctl_magic != 'A' && ioctl_magic != 'C' && ioctl_magic != 'U')
 		return -ENOTTY;
 
 	return snd_pcm_playback_ioctl1(file, pcm_file->substream, cmd,
@@ -2994,10 +3044,12 @@
 				  unsigned long arg)
 {
 	struct snd_pcm_file *pcm_file;
+	unsigned char ioctl_magic;
 
 	pcm_file = file->private_data;
+	ioctl_magic = ((cmd >> 8) & 0xff);
 
-	if (((cmd >> 8) & 0xff) != 'A')
+	if (ioctl_magic != 'A' && ioctl_magic != 'U')
 		return -ENOTTY;
 
 	return snd_pcm_capture_ioctl1(file, pcm_file->substream, cmd,
diff -ruw linux-4.4.115/sound/core/pcm_timer.c linux-4.4.115-fbx/sound/core/pcm_timer.c
--- linux-4.4.115/sound/core/pcm_timer.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/core/pcm_timer.c	2019-01-22 16:16:29.347299327 +0100
@@ -65,9 +65,16 @@
 static unsigned long snd_pcm_timer_resolution(struct snd_timer * timer)
 {
 	struct snd_pcm_substream *substream;
+	unsigned long ret = 0, flags = 0;
 	
 	substream = timer->private_data;
-	return substream->runtime ? substream->runtime->timer_resolution : 0;
+	spin_lock_irqsave(&substream->runtime_lock, flags);
+	if (substream->runtime)
+		ret = substream->runtime->timer_resolution;
+	else
+		ret = 0;
+	spin_unlock_irqrestore(&substream->runtime_lock, flags);
+	return ret;
 }
 
 static int snd_pcm_timer_start(struct snd_timer * timer)
diff -ruw linux-4.4.115/sound/core/rawmidi.c linux-4.4.115-fbx/sound/core/rawmidi.c
--- linux-4.4.115/sound/core/rawmidi.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/core/rawmidi.c	2019-10-29 09:26:26.061226841 +0100
@@ -115,6 +115,7 @@
 		return -ENOMEM;
 	runtime->substream = substream;
 	spin_lock_init(&runtime->lock);
+	mutex_init(&runtime->realloc_mutex);
 	init_waitqueue_head(&runtime->sleep);
 	INIT_WORK(&runtime->event_work, snd_rawmidi_input_event_work);
 	runtime->event = NULL;
@@ -636,7 +637,9 @@
 			      struct snd_rawmidi_params * params)
 {
 	char *newbuf;
+	char *oldbuf;
 	struct snd_rawmidi_runtime *runtime = substream->runtime;
+	unsigned long flags;
 	
 	if (substream->append && substream->use_count > 1)
 		return -EBUSY;
@@ -648,13 +651,22 @@
 		return -EINVAL;
 	}
 	if (params->buffer_size != runtime->buffer_size) {
-		newbuf = krealloc(runtime->buffer, params->buffer_size,
+		mutex_lock(&runtime->realloc_mutex);
+		newbuf = __krealloc(runtime->buffer, params->buffer_size,
 				  GFP_KERNEL);
-		if (!newbuf)
+		if (!newbuf) {
+			mutex_unlock(&runtime->realloc_mutex);
 			return -ENOMEM;
+		}
+		spin_lock_irqsave(&runtime->lock, flags);
+		oldbuf = runtime->buffer;
 		runtime->buffer = newbuf;
 		runtime->buffer_size = params->buffer_size;
 		runtime->avail = runtime->buffer_size;
+		spin_unlock_irqrestore(&runtime->lock, flags);
+		if (oldbuf != newbuf)
+			kfree(oldbuf);
+		mutex_unlock(&runtime->realloc_mutex);
 	}
 	runtime->avail_min = params->avail_min;
 	substream->active_sensing = !params->no_active_sensing;
@@ -666,7 +678,9 @@
 			     struct snd_rawmidi_params * params)
 {
 	char *newbuf;
+	char *oldbuf;
 	struct snd_rawmidi_runtime *runtime = substream->runtime;
+	unsigned long flags;
 
 	snd_rawmidi_drain_input(substream);
 	if (params->buffer_size < 32 || params->buffer_size > 1024L * 1024L) {
@@ -676,12 +690,21 @@
 		return -EINVAL;
 	}
 	if (params->buffer_size != runtime->buffer_size) {
-		newbuf = krealloc(runtime->buffer, params->buffer_size,
+		mutex_lock(&runtime->realloc_mutex);
+		newbuf = __krealloc(runtime->buffer, params->buffer_size,
 				  GFP_KERNEL);
-		if (!newbuf)
+		if (!newbuf) {
+			mutex_unlock(&runtime->realloc_mutex);
 			return -ENOMEM;
+		}
+		spin_lock_irqsave(&runtime->lock, flags);
+		oldbuf = runtime->buffer;
 		runtime->buffer = newbuf;
 		runtime->buffer_size = params->buffer_size;
+		spin_unlock_irqrestore(&runtime->lock, flags);
+		if (oldbuf != newbuf)
+			kfree(oldbuf);
+		mutex_unlock(&runtime->realloc_mutex);
 	}
 	runtime->avail_min = params->avail_min;
 	return 0;
@@ -953,6 +976,8 @@
 	struct snd_rawmidi_runtime *runtime = substream->runtime;
 	unsigned long appl_ptr;
 
+	if (userbuf)
+		mutex_lock(&runtime->realloc_mutex);
 	spin_lock_irqsave(&runtime->lock, flags);
 	while (count > 0 && runtime->avail) {
 		count1 = runtime->buffer_size - runtime->appl_ptr;
@@ -973,6 +998,7 @@
 			spin_unlock_irqrestore(&runtime->lock, flags);
 			if (copy_to_user(userbuf + result,
 					 runtime->buffer + appl_ptr, count1)) {
+				mutex_unlock(&runtime->realloc_mutex);
 				return result > 0 ? result : -EFAULT;
 			}
 			spin_lock_irqsave(&runtime->lock, flags);
@@ -981,6 +1007,8 @@
 		count -= count1;
 	}
 	spin_unlock_irqrestore(&runtime->lock, flags);
+	if (userbuf)
+		mutex_unlock(&runtime->realloc_mutex);
 	return result;
 }
 
@@ -1245,10 +1273,14 @@
 		return -EINVAL;
 
 	result = 0;
+	if (userbuf)
+		mutex_lock(&runtime->realloc_mutex);
 	spin_lock_irqsave(&runtime->lock, flags);
 	if (substream->append) {
 		if ((long)runtime->avail < count) {
 			spin_unlock_irqrestore(&runtime->lock, flags);
+			if (userbuf)
+				mutex_unlock(&runtime->realloc_mutex);
 			return -EAGAIN;
 		}
 	}
@@ -1284,6 +1316,8 @@
       __end:
 	count1 = runtime->avail < runtime->buffer_size;
 	spin_unlock_irqrestore(&runtime->lock, flags);
+	if (userbuf)
+		mutex_unlock(&runtime->realloc_mutex);
 	if (count1)
 		snd_rawmidi_output_trigger(substream, 1);
 	return result;
diff -ruw linux-4.4.115/sound/core/timer.c linux-4.4.115-fbx/sound/core/timer.c
--- linux-4.4.115/sound/core/timer.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/core/timer.c	2019-10-29 09:26:26.065226880 +0100
@@ -318,8 +318,6 @@
 	return 0;
 }
 
-static int _snd_timer_stop(struct snd_timer_instance *timeri, int event);
-
 /*
  * close a timer instance
  */
@@ -408,7 +406,6 @@
 static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
 {
 	struct snd_timer *timer;
-	unsigned long flags;
 	unsigned long resolution = 0;
 	struct snd_timer_instance *ts;
 	struct timespec tstamp;
@@ -432,34 +429,66 @@
 		return;
 	if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
 		return;
-	spin_lock_irqsave(&timer->lock, flags);
 	list_for_each_entry(ts, &ti->slave_active_head, active_list)
 		if (ts->ccallback)
 			ts->ccallback(ts, event + 100, &tstamp, resolution);
-	spin_unlock_irqrestore(&timer->lock, flags);
 }
 
-static int snd_timer_start1(struct snd_timer *timer, struct snd_timer_instance *timeri,
-			    unsigned long sticks)
+/* start/continue a master timer */
+static int snd_timer_start1(struct snd_timer_instance *timeri,
+			    bool start, unsigned long ticks)
 {
+	struct snd_timer *timer;
+	int result;
+	unsigned long flags;
+
+	timer = timeri->timer;
+	if (!timer)
+		return -EINVAL;
+
+	spin_lock_irqsave(&timer->lock, flags);
+	if (timer->card && timer->card->shutdown) {
+		result = -ENODEV;
+		goto unlock;
+	}
+	if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
+			     SNDRV_TIMER_IFLG_START)) {
+		result = -EBUSY;
+		goto unlock;
+	}
+
+	if (start)
+		timeri->ticks = timeri->cticks = ticks;
+	else if (!timeri->cticks)
+		timeri->cticks = 1;
+	timeri->pticks = 0;
+
 	list_move_tail(&timeri->active_list, &timer->active_list_head);
 	if (timer->running) {
 		if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
 			goto __start_now;
 		timer->flags |= SNDRV_TIMER_FLG_RESCHED;
 		timeri->flags |= SNDRV_TIMER_IFLG_START;
-		return 1;	/* delayed start */
+		result = 1; /* delayed start */
 	} else {
-		timer->sticks = sticks;
+		if (start)
+			timer->sticks = ticks;
 		timer->hw.start(timer);
 	      __start_now:
 		timer->running++;
 		timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
-		return 0;
+		result = 0;
 	}
+	snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START :
+			  SNDRV_TIMER_EVENT_CONTINUE);
+ unlock:
+	spin_unlock_irqrestore(&timer->lock, flags);
+	return result;
 }
 
-static int snd_timer_start_slave(struct snd_timer_instance *timeri)
+/* start/continue a slave timer */
+static int snd_timer_start_slave(struct snd_timer_instance *timeri,
+				 bool start)
 {
 	unsigned long flags;
 
@@ -473,107 +502,92 @@
 		spin_lock(&timeri->timer->lock);
 		list_add_tail(&timeri->active_list,
 			      &timeri->master->slave_active_head);
+		snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START :
+				  SNDRV_TIMER_EVENT_CONTINUE);
 		spin_unlock(&timeri->timer->lock);
 	}
 	spin_unlock_irqrestore(&slave_active_lock, flags);
 	return 1; /* delayed start */
 }
 
-/*
- *  start the timer instance
- */
-int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
+/* stop/pause a master timer */
+static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
 {
 	struct snd_timer *timer;
-	int result = -EINVAL;
+	int result = 0;
 	unsigned long flags;
 
-	if (timeri == NULL || ticks < 1)
-		return -EINVAL;
-	if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
-		result = snd_timer_start_slave(timeri);
-		if (result >= 0)
-			snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
-		return result;
-	}
 	timer = timeri->timer;
-	if (timer == NULL)
+	if (!timer)
 		return -EINVAL;
-	if (timer->card && timer->card->shutdown)
-		return -ENODEV;
 	spin_lock_irqsave(&timer->lock, flags);
-	if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
-			     SNDRV_TIMER_IFLG_START)) {
+	if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
+			       SNDRV_TIMER_IFLG_START))) {
 		result = -EBUSY;
 		goto unlock;
 	}
-	timeri->ticks = timeri->cticks = ticks;
+	list_del_init(&timeri->ack_list);
+	list_del_init(&timeri->active_list);
+	if (timer->card && timer->card->shutdown)
+		goto unlock;
+	if (stop) {
+		timeri->cticks = timeri->ticks;
 	timeri->pticks = 0;
-	result = snd_timer_start1(timer, timeri, ticks);
+	}
+	if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
+	    !(--timer->running)) {
+		timer->hw.stop(timer);
+		if (timer->flags & SNDRV_TIMER_FLG_RESCHED) {
+			timer->flags &= ~SNDRV_TIMER_FLG_RESCHED;
+			snd_timer_reschedule(timer, 0);
+			if (timer->flags & SNDRV_TIMER_FLG_CHANGE) {
+				timer->flags &= ~SNDRV_TIMER_FLG_CHANGE;
+				timer->hw.start(timer);
+			}
+		}
+	}
+	timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
+	snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
+			  SNDRV_TIMER_EVENT_CONTINUE);
  unlock:
 	spin_unlock_irqrestore(&timer->lock, flags);
-	if (result >= 0)
-		snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
 	return result;
 }
 
-static int _snd_timer_stop(struct snd_timer_instance *timeri, int event)
+/* stop/pause a slave timer */
+static int snd_timer_stop_slave(struct snd_timer_instance *timeri, bool stop)
 {
-	struct snd_timer *timer;
 	unsigned long flags;
 
-	if (snd_BUG_ON(!timeri))
-		return -ENXIO;
-
-	if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
 		spin_lock_irqsave(&slave_active_lock, flags);
 		if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
 			spin_unlock_irqrestore(&slave_active_lock, flags);
 			return -EBUSY;
 		}
-		if (timeri->timer)
-			spin_lock(&timeri->timer->lock);
 		timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
+	if (timeri->timer) {
+		spin_lock(&timeri->timer->lock);
 		list_del_init(&timeri->ack_list);
 		list_del_init(&timeri->active_list);
-		if (timeri->timer)
+		snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
+				  SNDRV_TIMER_EVENT_CONTINUE);
 			spin_unlock(&timeri->timer->lock);
-		spin_unlock_irqrestore(&slave_active_lock, flags);
-		goto __end;
 	}
-	timer = timeri->timer;
-	if (!timer)
-		return -EINVAL;
-	spin_lock_irqsave(&timer->lock, flags);
-	if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
-			       SNDRV_TIMER_IFLG_START))) {
-		spin_unlock_irqrestore(&timer->lock, flags);
-		return -EBUSY;
-	}
-	list_del_init(&timeri->ack_list);
-	list_del_init(&timeri->active_list);
-	if (timer->card && timer->card->shutdown) {
-		spin_unlock_irqrestore(&timer->lock, flags);
+	spin_unlock_irqrestore(&slave_active_lock, flags);
 		return 0;
 	}
-	if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
-	    !(--timer->running)) {
-		timer->hw.stop(timer);
-		if (timer->flags & SNDRV_TIMER_FLG_RESCHED) {
-			timer->flags &= ~SNDRV_TIMER_FLG_RESCHED;
-			snd_timer_reschedule(timer, 0);
-			if (timer->flags & SNDRV_TIMER_FLG_CHANGE) {
-				timer->flags &= ~SNDRV_TIMER_FLG_CHANGE;
-				timer->hw.start(timer);
-			}
-		}
-	}
-	timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
-	spin_unlock_irqrestore(&timer->lock, flags);
-      __end:
-	if (event != SNDRV_TIMER_EVENT_RESOLUTION)
-		snd_timer_notify1(timeri, event);
-	return 0;
+
+/*
+ *  start the timer instance
+ */
+int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
+{
+	if (timeri == NULL || ticks < 1)
+		return -EINVAL;
+	if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
+		return snd_timer_start_slave(timeri, true);
+	else
+		return snd_timer_start1(timeri, true, ticks);
 }
 
 /*
@@ -583,21 +597,10 @@
  */
 int snd_timer_stop(struct snd_timer_instance *timeri)
 {
-	struct snd_timer *timer;
-	unsigned long flags;
-	int err;
-
-	err = _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_STOP);
-	if (err < 0)
-		return err;
-	timer = timeri->timer;
-	if (!timer)
-		return -EINVAL;
-	spin_lock_irqsave(&timer->lock, flags);
-	timeri->cticks = timeri->ticks;
-	timeri->pticks = 0;
-	spin_unlock_irqrestore(&timer->lock, flags);
-	return 0;
+	if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
+		return snd_timer_stop_slave(timeri, true);
+	else
+		return snd_timer_stop1(timeri, true);
 }
 
 /*
@@ -605,32 +608,10 @@
  */
 int snd_timer_continue(struct snd_timer_instance *timeri)
 {
-	struct snd_timer *timer;
-	int result = -EINVAL;
-	unsigned long flags;
-
-	if (timeri == NULL)
-		return result;
 	if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
-		return snd_timer_start_slave(timeri);
-	timer = timeri->timer;
-	if (! timer)
-		return -EINVAL;
-	if (timer->card && timer->card->shutdown)
-		return -ENODEV;
-	spin_lock_irqsave(&timer->lock, flags);
-	if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
-		result = -EBUSY;
-		goto unlock;
-	}
-	if (!timeri->cticks)
-		timeri->cticks = 1;
-	timeri->pticks = 0;
-	result = snd_timer_start1(timer, timeri, timer->sticks);
- unlock:
-	spin_unlock_irqrestore(&timer->lock, flags);
-	snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_CONTINUE);
-	return result;
+		return snd_timer_start_slave(timeri, false);
+	else
+		return snd_timer_start1(timeri, false, 0);
 }
 
 /*
@@ -638,7 +619,10 @@
  */
 int snd_timer_pause(struct snd_timer_instance * timeri)
 {
-	return _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_PAUSE);
+	if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
+		return snd_timer_stop_slave(timeri, false);
+	else
+		return snd_timer_stop1(timeri, false);
 }
 
 /*
@@ -1135,7 +1119,11 @@
 
 	mutex_lock(&register_mutex);
 	list_for_each_entry(timer, &snd_timer_list, device_list) {
-		if (timer->card && timer->card->shutdown)
+		if (timer->card == NULL) {
+			pr_debug("%s: timer->card is NULL\n", __func__);
+			continue;
+		}
+		if (timer->card->shutdown)
 			continue;
 		switch (timer->tmr_class) {
 		case SNDRV_TIMER_CLASS_GLOBAL:
diff -ruw linux-4.4.115/sound/soc/codecs/Kconfig linux-4.4.115-fbx/sound/soc/codecs/Kconfig
--- linux-4.4.115/sound/soc/codecs/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/codecs/Kconfig	2019-10-29 09:26:26.109227311 +0100
@@ -81,6 +81,7 @@
 	select SND_SOC_MAX9877 if I2C
 	select SND_SOC_MC13783 if MFD_MC13XXX
 	select SND_SOC_ML26124 if I2C
+	select SND_SOC_HDMI_CODEC
 	select SND_SOC_NAU8825 if I2C
 	select SND_SOC_PCM1681 if I2C
 	select SND_SOC_PCM1792A if SPI_MASTER
@@ -97,6 +98,7 @@
 	select SND_SOC_RT5677 if I2C && SPI_MASTER
 	select SND_SOC_SGTL5000 if I2C
 	select SND_SOC_SI476X if MFD_SI476X_CORE
+	select SND_SOC_SIL9437 if I2C
 	select SND_SOC_SIRF_AUDIO_CODEC
 	select SND_SOC_SN95031 if INTEL_SCU_IPC
 	select SND_SOC_SPDIF
@@ -112,6 +114,7 @@
 	select SND_SOC_TAS2552 if I2C
 	select SND_SOC_TAS5086 if I2C
 	select SND_SOC_TAS571X if I2C
+	select SND_SOC_TAS5766 if I2C
 	select SND_SOC_TFA9879 if I2C
 	select SND_SOC_TLV320AIC23_I2C if I2C
 	select SND_SOC_TLV320AIC23_SPI if SPI_MASTER
@@ -454,6 +457,9 @@
 config SND_SOC_DMIC
 	tristate
 
+config SND_SOC_HDMI_CODEC
+       tristate "HDMI stub CODEC"
+
 config SND_SOC_ES8328
 	tristate "Everest Semi ES8328 CODEC"
 
@@ -582,6 +588,11 @@
 config SND_SOC_SI476X
 	tristate
 
+config SND_SOC_SIL9437
+	tristate "Silicon Image Sil9437 ARC/eARC receiver"
+	depends on I2C
+	select REGMAP_I2C
+
 config SND_SOC_SIGMADSP
 	tristate
 	select CRC32
@@ -656,6 +667,10 @@
 	tristate "Texas Instruments TAS5711/TAS5717/TAS5719 power amplifiers"
 	depends on I2C
 
+config SND_SOC_TAS5766
+	tristate "Texas Instruments TAS5766 power amplifiers"
+	depends on I2C
+
 config SND_SOC_TFA9879
 	tristate "NXP Semiconductors TFA9879 amplifier"
 	depends on I2C
@@ -709,6 +724,71 @@
 config SND_SOC_UDA1380
         tristate
 
+config SND_SOC_WCD934X_DSD
+        tristate
+
+config SND_SOC_WCD9320
+        tristate
+
+config SND_SOC_WCD9330
+        tristate
+	depends on WCD9330_CODEC
+
+config SND_SOC_WCD9335
+        tristate
+	depends on WCD9335_CODEC
+
+config SND_SOC_WCD934X
+        tristate
+	depends on WCD934X_CODEC
+	select SND_SOC_WCD9XXX_V2
+	select AUDIO_EXT_CLK
+	select SND_SOC_WCD_DSP_MGR
+	select SND_SOC_WCD_SPI
+	select SND_SOC_WCD934X_MBHC
+        select SND_SOC_WCD934X_DSD
+
+config SND_SOC_WCD934X_MBHC
+        tristate
+	depends on SND_SOC_WCD934X
+	select SND_SOC_WCD_MBHC
+
+config SND_SOC_WSA881X
+        tristate
+	select MSM_CDC_PINCTRL
+	select REGMAP_SWR
+
+config SND_SOC_WSA881X_ANALOG
+        tristate
+	select REGMAP_I2C
+
+config SND_SOC_WCD9XXX
+	tristate
+	default y if SND_SOC_WCD9320=y || SND_SOC_WCD9330=y || SND_SOC_WCD9335=y
+
+config SND_SOC_WCD9XXX_V2
+	tristate
+	default y if SND_SOC_WCD9335=y
+
+config SND_SOC_WCD_CPE
+	tristate
+	default y if SND_SOC_WCD9330=y || SND_SOC_WCD9335=y
+
+config AUDIO_EXT_CLK
+	tristate
+	default y if SND_SOC_WCD9335=y || SND_SOC_WCD9330=y || SND_SOC_SDM660_CDC=y
+
+config SND_SOC_WCD_MBHC
+	tristate
+	default y if (SND_SOC_MSM8909_WCD=y || SND_SOC_SDM660_CDC=y || SND_SOC_WCD9335=y) && SND_SOC_MDMCALIFORNIUM!=y
+
+config SND_SOC_WCD_DSP_MGR
+	tristate
+
+config SND_SOC_WCD_SPI
+	depends on SPI
+	tristate
+
 config SND_SOC_WL1273
 	tristate
 
@@ -915,4 +995,17 @@
 	tristate "Texas Instruments TPA6130A2 headphone amplifier"
 	depends on I2C
 
+config SND_SOC_MSM_STUB
+	tristate
+
+config SND_SOC_MSM_HDMI_CODEC_RX
+	bool "HDMI Audio Playback"
+	depends on FB_MSM_MDSS_HDMI_PANEL && (SND_SOC_APQ8084 || SND_SOC_MSM8994 || SND_SOC_MSM8996 || SND_SOC_MSM8998 || SND_SOC_SDM660_COMMON)
+	help
+	HDMI audio drivers should be built only if the platform
+        supports hdmi panel.
+
+source "sound/soc/codecs/sdm660_cdc/Kconfig"
+source "sound/soc/codecs/msm_sdw/Kconfig"
+
 endmenu
diff -ruw linux-4.4.115/sound/soc/codecs/Makefile linux-4.4.115-fbx/sound/soc/codecs/Makefile
--- linux-4.4.115/sound/soc/codecs/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/codecs/Makefile	2019-01-22 16:16:29.499300704 +0100
@@ -74,6 +74,7 @@
 snd-soc-max9850-objs := max9850.o
 snd-soc-mc13783-objs := mc13783.o
 snd-soc-ml26124-objs := ml26124.o
+snd-soc-hdmi-codec-objs := hdmi.o
 snd-soc-nau8825-objs := nau8825.o
 snd-soc-pcm1681-objs := pcm1681.o
 snd-soc-pcm1792a-codec-objs := pcm1792a.o
@@ -99,6 +100,7 @@
 snd-soc-sigmadsp-i2c-objs := sigmadsp-i2c.o
 snd-soc-sigmadsp-regmap-objs := sigmadsp-regmap.o
 snd-soc-si476x-objs := si476x.o
+snd-soc-sil9437-objs := sil9437.o
 snd-soc-sirf-audio-codec-objs := sirf-audio-codec.o
 snd-soc-sn95031-objs := sn95031.o
 snd-soc-spdif-tx-objs := spdif_transmitter.o
@@ -115,6 +117,7 @@
 snd-soc-sti-sas-objs := sti-sas.o
 snd-soc-tas5086-objs := tas5086.o
 snd-soc-tas571x-objs := tas571x.o
+snd-soc-tas5766-objs := tas5766.o
 snd-soc-tfa9879-objs := tfa9879.o
 snd-soc-tlv320aic23-objs := tlv320aic23.o
 snd-soc-tlv320aic23-i2c-objs := tlv320aic23-i2c.o
@@ -129,6 +132,27 @@
 snd-soc-twl6040-objs := twl6040.o
 snd-soc-uda134x-objs := uda134x.o
 snd-soc-uda1380-objs := uda1380.o
+snd-soc-wcd9320-objs := wcd9320.o wcd9320-tables.o
+snd-soc-wcd9330-objs := wcd9330.o wcd9330-tables.o
+snd-soc-wcd9335-objs := wcd9335.o
+snd-soc-wcd934x-objs := wcd934x.o
+snd-soc-wcd9xxx-objs := wcd9xxx-resmgr.o wcd9xxx-mbhc.o wcd9xxx-common.o wcdcal-hwdep.o
+snd-soc-wcd9xxx-v2-objs := wcd9xxx-common-v2.o wcd9xxx-resmgr-v2.o
+ifeq ($(CONFIG_COMMON_CLK_MSM), y)
+	audio-ext-clock-objs := audio-ext-clk.o
+endif
+
+ifeq ($(CONFIG_COMMON_CLK_QCOM), y)
+	audio-ext-clock-up-objs := audio-ext-clk-up.o
+endif
+snd-soc-wcd-cpe-objs := wcd_cpe_services.o wcd_cpe_core.o
+snd-soc-wsa881x-objs := wsa881x.o wsa881x-tables.o wsa881x-regmap.o wsa881x-temp-sensor.o
+snd-soc-wcd-mbhc-objs := wcd-mbhc-v2.o
+snd-soc-wsa881x-analog-objs := wsa881x-analog.o wsa881x-tables-analog.o
+snd-soc-wsa881x-analog-objs += wsa881x-regmap-analog.o wsa881x-irq.o
+snd-soc-wcd-dsp-utils-objs := wcd-dsp-utils.o
+snd-soc-wcd-dsp-mgr-objs := wcd-dsp-mgr.o
+snd-soc-wcd-spi-objs := wcd-spi.o
 snd-soc-wl1273-objs := wl1273.o
 snd-soc-wm-adsp-objs := wm_adsp.o
 snd-soc-wm0010-objs := wm0010.o
@@ -185,6 +209,8 @@
 snd-soc-wm9712-objs := wm9712.o
 snd-soc-wm9713-objs := wm9713.o
 snd-soc-wm-hubs-objs := wm_hubs.o
+snd-soc-msm-stub-objs := msm_stub.o
+obj-$(CONFIG_SND_SOC_MSM_HDMI_CODEC_RX) := msm_hdmi_codec_rx.o
 
 # Amp
 snd-soc-max9877-objs := max9877.o
@@ -269,6 +295,7 @@
 obj-$(CONFIG_SND_SOC_MAX9850)	+= snd-soc-max9850.o
 obj-$(CONFIG_SND_SOC_MC13783)	+= snd-soc-mc13783.o
 obj-$(CONFIG_SND_SOC_ML26124)	+= snd-soc-ml26124.o
+obj-$(CONFIG_SND_SOC_HDMI_CODEC) += snd-soc-hdmi-codec.o
 obj-$(CONFIG_SND_SOC_NAU8825)   += snd-soc-nau8825.o
 obj-$(CONFIG_SND_SOC_PCM1681)	+= snd-soc-pcm1681.o
 obj-$(CONFIG_SND_SOC_PCM1792A)	+= snd-soc-pcm1792a-codec.o
@@ -292,6 +319,7 @@
 obj-$(CONFIG_SND_SOC_SIGMADSP_I2C)	+= snd-soc-sigmadsp-i2c.o
 obj-$(CONFIG_SND_SOC_SIGMADSP_REGMAP)	+= snd-soc-sigmadsp-regmap.o
 obj-$(CONFIG_SND_SOC_SI476X)	+= snd-soc-si476x.o
+obj-$(CONFIG_SND_SOC_SIL9437)	+= snd-soc-sil9437.o
 obj-$(CONFIG_SND_SOC_SN95031)	+=snd-soc-sn95031.o
 obj-$(CONFIG_SND_SOC_SPDIF)	+= snd-soc-spdif-rx.o snd-soc-spdif-tx.o
 obj-$(CONFIG_SND_SOC_SSM2518)	+= snd-soc-ssm2518.o
@@ -307,6 +335,7 @@
 obj-$(CONFIG_SND_SOC_TAS2552)	+= snd-soc-tas2552.o
 obj-$(CONFIG_SND_SOC_TAS5086)	+= snd-soc-tas5086.o
 obj-$(CONFIG_SND_SOC_TAS571X)	+= snd-soc-tas571x.o
+obj-$(CONFIG_SND_SOC_TAS5766)	+= snd-soc-tas5766.o
 obj-$(CONFIG_SND_SOC_TFA9879)	+= snd-soc-tfa9879.o
 obj-$(CONFIG_SND_SOC_TLV320AIC23)	+= snd-soc-tlv320aic23.o
 obj-$(CONFIG_SND_SOC_TLV320AIC23_I2C)	+= snd-soc-tlv320aic23-i2c.o
@@ -321,7 +350,25 @@
 obj-$(CONFIG_SND_SOC_TWL6040)	+= snd-soc-twl6040.o
 obj-$(CONFIG_SND_SOC_UDA134X)	+= snd-soc-uda134x.o
 obj-$(CONFIG_SND_SOC_UDA1380)	+= snd-soc-uda1380.o
+obj-$(CONFIG_SND_SOC_WCD9320)	+= snd-soc-wcd9320.o
+obj-$(CONFIG_SND_SOC_WCD9330)	+= snd-soc-wcd9330.o
+obj-$(CONFIG_SND_SOC_WCD9335)	+= snd-soc-wcd9335.o
+obj-$(CONFIG_SND_SOC_WCD934X)	+= wcd934x/
+ifeq ($(CONFIG_COMMON_CLK_MSM), y)
+	obj-$(CONFIG_AUDIO_EXT_CLK)	+= audio-ext-clock.o
+endif
+ifeq ($(CONFIG_COMMON_CLK_QCOM), y)
+	obj-$(CONFIG_AUDIO_EXT_CLK)     += audio-ext-clock-up.o
+endif
+obj-$(CONFIG_SND_SOC_WCD9XXX)   += snd-soc-wcd9xxx.o
+obj-$(CONFIG_SND_SOC_WCD9XXX_V2) += snd-soc-wcd9xxx-v2.o
+obj-$(CONFIG_SND_SOC_WCD_CPE)   += snd-soc-wcd-cpe.o
+obj-$(CONFIG_SND_SOC_WCD_MBHC)  += snd-soc-wcd-mbhc.o
+obj-$(CONFIG_SND_SOC_WSA881X)	+= snd-soc-wsa881x.o
+obj-$(CONFIG_SND_SOC_WSA881X_ANALOG)	+= snd-soc-wsa881x-analog.o
 obj-$(CONFIG_SND_SOC_WL1273)	+= snd-soc-wl1273.o
+obj-$(CONFIG_SND_SOC_WCD_DSP_MGR)	+= snd-soc-wcd-dsp-mgr.o snd-soc-wcd-dsp-utils.o
+obj-$(CONFIG_SND_SOC_WCD_SPI)  += snd-soc-wcd-spi.o
 obj-$(CONFIG_SND_SOC_WM0010)	+= snd-soc-wm0010.o
 obj-$(CONFIG_SND_SOC_WM1250_EV1) += snd-soc-wm1250-ev1.o
 obj-$(CONFIG_SND_SOC_WM2000)	+= snd-soc-wm2000.o
@@ -377,7 +424,10 @@
 obj-$(CONFIG_SND_SOC_WM9713)	+= snd-soc-wm9713.o
 obj-$(CONFIG_SND_SOC_WM_ADSP)	+= snd-soc-wm-adsp.o
 obj-$(CONFIG_SND_SOC_WM_HUBS)	+= snd-soc-wm-hubs.o
+obj-$(CONFIG_SND_SOC_MSM_STUB)  += snd-soc-msm-stub.o
 
 # Amp
 obj-$(CONFIG_SND_SOC_MAX9877)	+= snd-soc-max9877.o
 obj-$(CONFIG_SND_SOC_TPA6130A2)	+= snd-soc-tpa6130a2.o
+obj-y += sdm660_cdc/
+obj-y += msm_sdw/
diff -ruw linux-4.4.115/sound/soc/codecs/wm8804.c linux-4.4.115-fbx/sound/soc/codecs/wm8804.c
--- linux-4.4.115/sound/soc/codecs/wm8804.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/codecs/wm8804.c	2019-07-17 21:26:01.529863250 +0200
@@ -542,7 +542,70 @@
 	.symmetric_rates = 1
 };
 
+static irqreturn_t wm8804_irq(int irq, void *data)
+{
+	struct snd_soc_codec *codec = data;
+	int irq_val, spd_val;
+
+	irq_val = snd_soc_read(codec, WM8804_INTSTAT);
+	if (irq_val < 0) {
+		dev_err(codec->dev, "Failed to read IRQ status: %d\n",
+			irq_val);
+		return IRQ_NONE;
+	}
+
+	if (irq_val & BIT(4)) {
+		spd_val = snd_soc_read(codec, WM8804_SPDSTAT);
+		if ((spd_val & BIT(0)) || (spd_val & BIT(1))) {
+			/* AUDIO_N or PCM_N, disable ALWAYSVALID flag to
+			 * output zeroes until clients enables it again */
+			snd_soc_update_bits(codec, WM8804_PLL6, BIT(6), 0);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int wm8804_codec_probe(struct snd_soc_codec *codec)
+{
+	struct gpio_desc *gpio_int;
+	int irq;
+	int ret;
+
+	gpio_int = devm_gpiod_get_optional(codec->dev, "wlf,irq", GPIOD_IN);
+	if (IS_ERR(gpio_int)) {
+		dev_err(codec->dev, "Failed to get irq line\n");
+		return PTR_ERR(gpio_int);
+	}
+
+	if (gpio_int == NULL) {
+		dev_dbg(codec->dev, "No irq line\n");
+		return 0;
+	}
+
+	irq = gpiod_to_irq(gpio_int);
+	if (irq < 0) {
+		dev_err(codec->dev, "no irq resource found\n");
+		return irq;
+	}
+
+	ret = devm_request_threaded_irq(codec->dev, irq, NULL,
+					wm8804_irq,
+					IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+					"wm8804_irq", codec);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to request IRQ: %d\n", ret);
+		return ret;
+	}
+
+	/* Unmask UPD_AUDIO_N */
+	snd_soc_write(codec, WM8804_INTMASK, ~BIT(4));
+
+	return 0;
+}
+
 static const struct snd_soc_codec_driver soc_codec_dev_wm8804 = {
+	.probe =	wm8804_codec_probe,
 	.idle_bias_off = true,
 
 	.dapm_widgets = wm8804_dapm_widgets,
diff -ruw linux-4.4.115/sound/soc/Kconfig linux-4.4.115-fbx/sound/soc/Kconfig
--- linux-4.4.115/sound/soc/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/Kconfig	2019-01-22 16:16:29.491300631 +0100
@@ -51,6 +51,7 @@
 source "sound/soc/omap/Kconfig"
 source "sound/soc/kirkwood/Kconfig"
 source "sound/soc/intel/Kconfig"
+source "sound/soc/msm/Kconfig"
 source "sound/soc/mediatek/Kconfig"
 source "sound/soc/mxs/Kconfig"
 source "sound/soc/pxa/Kconfig"
diff -ruw linux-4.4.115/sound/soc/Makefile linux-4.4.115-fbx/sound/soc/Makefile
--- linux-4.4.115/sound/soc/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/Makefile	2019-01-22 16:16:29.491300631 +0100
@@ -28,6 +28,7 @@
 obj-$(CONFIG_SND_SOC)	+= fsl/
 obj-$(CONFIG_SND_SOC)	+= jz4740/
 obj-$(CONFIG_SND_SOC)	+= intel/
+obj-$(CONFIG_SND_SOC)	+= msm/
 obj-$(CONFIG_SND_SOC)	+= mediatek/
 obj-$(CONFIG_SND_SOC)	+= mxs/
 obj-$(CONFIG_SND_SOC)	+= nuc900/
diff -ruw linux-4.4.115/sound/soc/soc-compress.c linux-4.4.115-fbx/sound/soc/soc-compress.c
--- linux-4.4.115/sound/soc/soc-compress.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/soc-compress.c	2019-01-22 16:16:29.671302261 +0100
@@ -409,6 +409,37 @@
 	return ret;
 }
 
+static void dpcm_be_hw_params_prepare(void *data)
+{
+	struct snd_compr_stream *cstream = data;
+	struct snd_soc_pcm_runtime *fe = cstream->private_data;
+	struct snd_soc_pcm_runtime *be = cstream->be;
+	int stream, ret;
+
+	if (cstream->direction == SND_COMPRESS_PLAYBACK)
+		stream = SNDRV_PCM_STREAM_PLAYBACK;
+	else
+		stream = SNDRV_PCM_STREAM_CAPTURE;
+
+	ret = dpcm_fe_dai_hw_params_be(fe, be,
+		    &fe->dpcm[stream].hw_params, stream);
+	if (ret < 0) {
+		fe->err_ops = ret;
+		return;
+	}
+
+	ret = dpcm_fe_dai_prepare_be(fe, be, stream);
+	if (ret < 0) {
+		fe->err_ops = ret;
+		return;
+	}
+}
+
+static void dpcm_be_hw_params_prepare_async(void *data, async_cookie_t cookie)
+{
+	dpcm_be_hw_params_prepare(data);
+}
+
 static int soc_compr_set_params_fe(struct snd_compr_stream *cstream,
 					struct snd_compr_params *params)
 {
@@ -416,7 +447,11 @@
 	struct snd_pcm_substream *fe_substream =
 		 fe->pcm->streams[cstream->direction].substream;
 	struct snd_soc_platform *platform = fe->platform;
-	int ret = 0, stream;
+	struct snd_soc_pcm_runtime *be_list[DPCM_MAX_BE_USERS];
+	struct snd_soc_dpcm *dpcm;
+	int ret = 0, stream, i, j = 0;
+
+	ASYNC_DOMAIN_EXCLUSIVE(async_domain);
 
 	if (cstream->direction == SND_COMPRESS_PLAYBACK)
 		stream = SNDRV_PCM_STREAM_PLAYBACK;
@@ -425,22 +460,35 @@
 
 	mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
 
-	if (platform->driver->compr_ops && platform->driver->compr_ops->set_params) {
-		ret = platform->driver->compr_ops->set_params(cstream, params);
+	if (!(fe->dai_link->async_ops & ASYNC_DPCM_SND_SOC_HW_PARAMS)) {
+		/* first we call set_params for the platform driver
+		 * this should configure the soc side
+		 * if the machine has compressed ops then we call that as well
+		 * expectation is that platform and machine will configure
+		 * everything for this compress path, like configuring pcm
+		 * port for codec
+		 */
+		if (platform->driver->compr_ops &&
+				platform->driver->compr_ops->set_params) {
+			ret = platform->driver->compr_ops->set_params(cstream,
+								params);
 		if (ret < 0)
 			goto out;
 	}
 
-	if (fe->dai_link->compr_ops && fe->dai_link->compr_ops->set_params) {
+		if (fe->dai_link->compr_ops &&
+					fe->dai_link->compr_ops->set_params) {
 		ret = fe->dai_link->compr_ops->set_params(cstream);
 		if (ret < 0)
 			goto out;
 	}
 
 	/*
-	 * Create an empty hw_params for the BE as the machine driver must
-	 * fix this up to match DSP decoder and ASRC configuration.
-	 * I.e. machine driver fixup for compressed BE is mandatory.
+		 * Create an empty hw_params for the BE as the machine
+		 * driver must fix this up to match DSP decoder and
+		 * ASRC configuration.
+		 * I.e. machine driver fixup for compressed BE is
+		 * mandatory.
 	 */
 	memset(&fe->dpcm[fe_substream->stream].hw_params, 0,
 		sizeof(struct snd_pcm_hw_params));
@@ -454,7 +502,70 @@
 	ret = dpcm_be_dai_prepare(fe, stream);
 	if (ret < 0)
 		goto out;
+	} else {
+		/*
+		 * Create an empty hw_params for the BE as the machine
+		 * driver must fix this up to match DSP decoder and
+		 * ASRC configuration.
+		 * I.e. machine driver fixup for compressed BE is
+		 * mandatory.
+		 */
+		memset(&fe->dpcm[fe_substream->stream].hw_params, 0,
+				sizeof(struct snd_pcm_hw_params));
+
+		fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
 
+		list_for_each_entry(dpcm,
+				&fe->dpcm[stream].be_clients, list_be) {
+			struct snd_soc_pcm_runtime *be = dpcm->be;
+
+			if (be->dai_link->async_ops &
+				ASYNC_DPCM_SND_SOC_HW_PARAMS) {
+				cstream->be = be;
+				async_schedule_domain(
+				dpcm_be_hw_params_prepare_async,
+				cstream, &async_domain);
+			} else {
+				be_list[j++] = be;
+				if (j == DPCM_MAX_BE_USERS) {
+					dev_dbg(fe->dev,
+						"ASoC: MAX backend users!\n");
+					break;
+				}
+			}
+		}
+		for (i = 0; i < j; i++) {
+			cstream->be = be_list[i];
+			dpcm_be_hw_params_prepare(cstream);
+		}
+		/* first we call set_params for the platform driver
+		 * this should configure the soc side
+		 * if the machine has compressed ops then we call that as well
+		 * expectation is that platform and machine will configure
+		 * everything this compress path, like configuring pcm port
+		 * for codec
+		 */
+		if (platform->driver->compr_ops &&
+				platform->driver->compr_ops->set_params) {
+			ret = platform->driver->compr_ops->set_params(cstream,
+								    params);
+			if (ret < 0)
+				goto exit;
+		}
+
+		dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_START);
+
+		if (fe->dai_link->compr_ops &&
+				fe->dai_link->compr_ops->set_params) {
+			ret = fe->dai_link->compr_ops->set_params(cstream);
+			if (ret < 0)
+				goto exit;
+		}
+exit:
+		async_synchronize_full_domain(&async_domain);
+		if (fe->err_ops < 0 || ret < 0)
+			goto out;
+	}
 	dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_START);
 	fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE;
 
@@ -532,14 +643,15 @@
 {
 	struct snd_soc_pcm_runtime *rtd = cstream->private_data;
 	struct snd_soc_platform *platform = rtd->platform;
+	int ret = 0;
 
 	mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
 
 	if (platform->driver->compr_ops && platform->driver->compr_ops->pointer)
-		 platform->driver->compr_ops->pointer(cstream, tstamp);
+		ret = platform->driver->compr_ops->pointer(cstream, tstamp);
 
 	mutex_unlock(&rtd->pcm_mutex);
-	return 0;
+	return ret;
 }
 
 static int soc_compr_copy(struct snd_compr_stream *cstream,
@@ -558,6 +670,22 @@
 	return ret;
 }
 
+static int sst_compr_set_next_track_param(struct snd_compr_stream *cstream,
+				union snd_codec_options *codec_options)
+{
+	struct snd_soc_pcm_runtime *rtd = cstream->private_data;
+	struct snd_soc_platform *platform = rtd->platform;
+	int ret = 0;
+
+	if (platform->driver->compr_ops &&
+			platform->driver->compr_ops->set_next_track_param)
+		ret = platform->driver->compr_ops->set_next_track_param(cstream,
+								codec_options);
+
+	return ret;
+}
+
+
 static int soc_compr_set_metadata(struct snd_compr_stream *cstream,
 				struct snd_compr_metadata *metadata)
 {
@@ -590,6 +718,7 @@
 	.free		= soc_compr_free,
 	.set_params	= soc_compr_set_params,
 	.set_metadata   = soc_compr_set_metadata,
+	.set_next_track_param	= sst_compr_set_next_track_param,
 	.get_metadata	= soc_compr_get_metadata,
 	.get_params	= soc_compr_get_params,
 	.trigger	= soc_compr_trigger,
@@ -606,6 +735,7 @@
 	.set_params	= soc_compr_set_params_fe,
 	.get_params	= soc_compr_get_params,
 	.set_metadata   = soc_compr_set_metadata,
+	.set_next_track_param	= sst_compr_set_next_track_param,
 	.get_metadata	= soc_compr_get_metadata,
 	.trigger	= soc_compr_trigger_fe,
 	.pointer	= soc_compr_pointer,
@@ -721,8 +851,16 @@
 	rtd->compr = compr;
 	compr->private_data = rtd;
 
-	printk(KERN_INFO "compress asoc: %s <-> %s mapping ok\n", codec_dai->name,
-		cpu_dai->name);
+	if (platform->driver->pcm_new) {
+		ret = platform->driver->pcm_new(rtd);
+		if (ret < 0) {
+			pr_err("asoc: compress pcm constructor failed\n");
+			goto compr_err;
+		}
+	}
+
+	dev_dbg(rtd->card->dev, "compress asoc: %s <-> %s mapping ok\n",
+		codec_dai->name, cpu_dai->name);
 	return ret;
 
 compr_err:
diff -ruw linux-4.4.115/sound/soc/soc-core.c linux-4.4.115-fbx/sound/soc/soc-core.c
--- linux-4.4.115/sound/soc/soc-core.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/soc-core.c	2019-10-29 09:26:26.181228015 +0100
@@ -303,7 +303,7 @@
 	}
 
 	if (!component->debugfs_root) {
-		dev_warn(component->dev,
+		dev_dbg(component->dev,
 			"ASoC: Failed to create component debugfs directory\n");
 		return;
 	}
@@ -328,7 +328,7 @@
 						 codec->component.debugfs_root,
 						 codec, &codec_reg_fops);
 	if (!codec->debugfs_reg)
-		dev_warn(codec->dev,
+		dev_dbg(codec->dev,
 			"ASoC: Failed to create codec register debugfs file\n");
 }
 
@@ -861,11 +861,27 @@
 static const struct snd_soc_dai_ops null_dai_ops = {
 };
 
-static struct snd_soc_component *soc_find_component(
+/**
+ * soc_find_component: find a component from component_list in ASoC core
+ *
+ * @of_node: of_node of the component to query.
+ * @name: name of the component to query.
+ *
+ * function to find out if a component is already registered with ASoC core.
+ *
+ * Returns component handle for success, else NULL error.
+ */
+struct snd_soc_component *soc_find_component(
 	const struct device_node *of_node, const char *name)
 {
 	struct snd_soc_component *component;
 
+	if (!of_node && !name) {
+		pr_err("%s: Either of_node or name must be valid\n",
+			__func__);
+		return NULL;
+	}
+
 	lockdep_assert_held(&client_mutex);
 
 	list_for_each_entry(component, &component_list, list) {
@@ -879,6 +895,7 @@
 
 	return NULL;
 }
+EXPORT_SYMBOL(soc_find_component);
 
 static struct snd_soc_dai *snd_soc_find_dai(
 	const struct snd_soc_dai_link_component *dlc)
@@ -2294,8 +2311,7 @@
 
 	if (dai->driver->ops->mute_stream)
 		return dai->driver->ops->mute_stream(dai, mute, direction);
-	else if (direction == SNDRV_PCM_STREAM_PLAYBACK &&
-		 dai->driver->ops->digital_mute)
+	else if (dai->driver->ops->digital_mute)
 		return dai->driver->ops->digital_mute(dai, mute);
 	else
 		return -ENOTSUPP;
@@ -2437,6 +2453,7 @@
 	card->instantiated = 0;
 	mutex_init(&card->mutex);
 	mutex_init(&card->dapm_mutex);
+	mutex_init(&card->dapm_power_mutex);
 
 	ret = snd_soc_instantiate_card(card);
 	if (ret != 0)
@@ -3040,6 +3057,18 @@
 }
 
 /**
+ * snd_soc_card_change_online_state - Mark if soc card is online/offline
+ *
+ * @soc_card : soc_card to mark
+ */
+void snd_soc_card_change_online_state(struct snd_soc_card *soc_card, int online)
+{
+	if (soc_card && soc_card->snd_card)
+		snd_card_change_online_state(soc_card->snd_card, online);
+}
+EXPORT_SYMBOL(snd_soc_card_change_online_state);
+
+/**
  * snd_soc_register_codec - Register a codec with the ASoC core
  *
  * @dev: The parent device for this codec
@@ -3588,6 +3617,63 @@
 	return ret;
 }
 
+/**
+ * snd_soc_info_multi_ext - external single mixer info callback
+ * @kcontrol: mixer control
+ * @uinfo: control element information
+ *
+ * Callback to provide information about a single external mixer control.
+ * that accepts multiple input.
+ *
+ * Returns 0 for success.
+ */
+int snd_soc_info_multi_ext(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_info *uinfo)
+{
+	struct soc_multi_mixer_control *mc =
+		(struct soc_multi_mixer_control *)kcontrol->private_value;
+	int platform_max;
+
+	if (!mc->platform_max)
+		mc->platform_max = mc->max;
+	platform_max = mc->platform_max;
+
+	if (platform_max == 1 && !strnstr(kcontrol->id.name, " Volume", 30))
+		uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
+	else
+		uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+
+	uinfo->count = mc->count;
+	uinfo->value.integer.min = 0;
+	uinfo->value.integer.max = platform_max;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_info_multi_ext);
+
+/**
+ * snd_soc_dai_get_channel_map - configure DAI audio channel map
+ * @dai: DAI
+ * @tx_num: how many TX channels
+ * @tx_slot: pointer to an array which imply the TX slot number channel
+ *           0~num-1 uses
+ * @rx_num: how many RX channels
+ * @rx_slot: pointer to an array which imply the RX slot number channel
+ *           0~num-1 uses
+ *
+ * configure the relationship between channel number and TDM slot number.
+ */
+int snd_soc_dai_get_channel_map(struct snd_soc_dai *dai,
+	unsigned int *tx_num, unsigned int *tx_slot,
+	unsigned int *rx_num, unsigned int *rx_slot)
+{
+	if (dai->driver && dai->driver->ops->get_channel_map)
+		return dai->driver->ops->get_channel_map(dai, tx_num, tx_slot,
+			rx_num, rx_slot);
+	else
+		return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(snd_soc_dai_get_channel_map);
+
 int snd_soc_of_get_dai_name(struct device_node *of_node,
 			    const char **dai_name)
 {
diff -ruw linux-4.4.115/sound/soc/soc-dapm.c linux-4.4.115-fbx/sound/soc/soc-dapm.c
--- linux-4.4.115/sound/soc/soc-dapm.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/soc-dapm.c	2019-10-29 09:26:26.181228015 +0100
@@ -78,8 +78,7 @@
 	[snd_soc_dapm_dai_link] = 2,
 	[snd_soc_dapm_dai_in] = 4,
 	[snd_soc_dapm_dai_out] = 4,
-	[snd_soc_dapm_aif_in] = 4,
-	[snd_soc_dapm_aif_out] = 4,
+	[snd_soc_dapm_adc] = 4,
 	[snd_soc_dapm_mic] = 5,
 	[snd_soc_dapm_mux] = 6,
 	[snd_soc_dapm_demux] = 6,
@@ -88,7 +87,8 @@
 	[snd_soc_dapm_mixer] = 8,
 	[snd_soc_dapm_mixer_named_ctl] = 8,
 	[snd_soc_dapm_pga] = 9,
-	[snd_soc_dapm_adc] = 10,
+	[snd_soc_dapm_aif_in] = 9,
+	[snd_soc_dapm_aif_out] = 9,
 	[snd_soc_dapm_out_drv] = 11,
 	[snd_soc_dapm_hp] = 11,
 	[snd_soc_dapm_spk] = 11,
@@ -100,7 +100,9 @@
 static int dapm_down_seq[] = {
 	[snd_soc_dapm_pre] = 0,
 	[snd_soc_dapm_kcontrol] = 1,
-	[snd_soc_dapm_adc] = 2,
+	[snd_soc_dapm_aif_in] = 2,
+	[snd_soc_dapm_aif_out] = 2,
+	[snd_soc_dapm_adc] = 5,
 	[snd_soc_dapm_hp] = 3,
 	[snd_soc_dapm_spk] = 3,
 	[snd_soc_dapm_line] = 3,
@@ -114,8 +116,6 @@
 	[snd_soc_dapm_micbias] = 8,
 	[snd_soc_dapm_mux] = 9,
 	[snd_soc_dapm_demux] = 9,
-	[snd_soc_dapm_aif_in] = 10,
-	[snd_soc_dapm_aif_out] = 10,
 	[snd_soc_dapm_dai_in] = 10,
 	[snd_soc_dapm_dai_out] = 10,
 	[snd_soc_dapm_dai_link] = 11,
@@ -282,6 +282,8 @@
 	mutex_lock(&card->dapm_mutex);
 
 	list_for_each_entry(w, &card->widgets, list) {
+		if (w->ignore_suspend)
+			continue;
 		if (w->is_ep) {
 			dapm_mark_dirty(w, "Rechecking endpoints");
 			if (w->is_ep & SND_SOC_DAPM_EP_SINK)
@@ -429,13 +431,14 @@
 	kfree(data);
 }
 
-static struct snd_soc_dapm_widget_list *dapm_kcontrol_get_wlist(
+struct snd_soc_dapm_widget_list *dapm_kcontrol_get_wlist(
 	const struct snd_kcontrol *kcontrol)
 {
 	struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol);
 
 	return data->wlist;
 }
+EXPORT_SYMBOL(dapm_kcontrol_get_wlist);
 
 static int dapm_kcontrol_add_widget(struct snd_kcontrol *kcontrol,
 	struct snd_soc_dapm_widget *widget)
@@ -740,7 +743,7 @@
 	unsigned int max = mc->max;
 	unsigned int mask = (1 << fls(max)) - 1;
 	unsigned int invert = mc->invert;
-	unsigned int val;
+	unsigned int val = 0;
 
 	if (reg != SND_SOC_NOPM) {
 		soc_dapm_read(p->sink->dapm, reg, &val);
@@ -1490,7 +1493,7 @@
 		/* Do we need to apply any queued changes? */
 		if (sort[w->id] != cur_sort || w->reg != cur_reg ||
 		    w->dapm != cur_dapm || w->subseq != cur_subseq) {
-			if (!list_empty(&pending))
+			if (cur_dapm && !list_empty(&pending))
 				dapm_seq_run_coalesced(card, &pending);
 
 			if (cur_dapm && cur_dapm->seq_notifier) {
@@ -1548,12 +1551,17 @@
 			break;
 		}
 
+		/* Add this debug log to keep track of widgets being
+		 * powered-up and powered-down */
+		dev_dbg(w->dapm->dev, "dapm: powering %s widget %s\n",
+			power_up ? "up" : "down", w->name);
+
 		if (ret < 0)
 			dev_err(w->dapm->dev,
 				"ASoC: Failed to apply widget power: %d\n", ret);
 	}
 
-	if (!list_empty(&pending))
+	if (cur_dapm && !list_empty(&pending))
 		dapm_seq_run_coalesced(card, &pending);
 
 	if (cur_dapm && cur_dapm->seq_notifier) {
@@ -1785,10 +1793,13 @@
 	LIST_HEAD(down_list);
 	ASYNC_DOMAIN_EXCLUSIVE(async_domain);
 	enum snd_soc_bias_level bias;
+	struct snd_soc_platform *p;
+	struct snd_soc_codec *c;
 
 	lockdep_assert_held(&card->dapm_mutex);
 
 	trace_snd_soc_dapm_start(card);
+	mutex_lock(&card->dapm_power_mutex);
 
 	list_for_each_entry(d, &card->dapm_list, list) {
 		if (dapm_idle_bias_off(d))
@@ -1866,7 +1877,9 @@
 	dapm_pre_sequence_async(&card->dapm, 0);
 	/* Run other bias changes in parallel */
 	list_for_each_entry(d, &card->dapm_list, list) {
-		if (d != &card->dapm)
+		p = snd_soc_dapm_to_platform(d);
+		c = snd_soc_dapm_to_codec(d);
+		if ((d != &card->dapm) && (c || p))
 			async_schedule_domain(dapm_pre_sequence_async, d,
 						&async_domain);
 	}
@@ -1890,7 +1903,9 @@
 
 	/* Run all the bias changes in parallel */
 	list_for_each_entry(d, &card->dapm_list, list) {
-		if (d != &card->dapm)
+		p = snd_soc_dapm_to_platform(d);
+		c = snd_soc_dapm_to_codec(d);
+		if ((d != &card->dapm) && (c || p))
 			async_schedule_domain(dapm_post_sequence_async, d,
 						&async_domain);
 	}
@@ -1907,6 +1922,7 @@
 	pop_dbg(card->dev, card->pop_time,
 		"DAPM sequencing finished, waiting %dms\n", card->pop_time);
 	pop_wait(card->pop_time);
+	mutex_unlock(&card->dapm_power_mutex);
 
 	trace_snd_soc_dapm_done(card);
 
@@ -2616,7 +2632,6 @@
 		dapm_mark_dirty(widgets[dir], "Route added");
 	}
 
-	if (dapm->card->instantiated && path->connect)
 		dapm_path_invalidate(path);
 
 	return 0;
@@ -3905,6 +3920,9 @@
 	for (i = 0; i < rtd->num_codecs; i++) {
 		struct snd_soc_dai *codec_dai = rtd->codec_dais[i];
 
+		if (!cpu_dai->component->codec)
+			continue;
+
 		/* connect BE DAI playback if widgets are valid */
 		if (codec_dai->playback_widget && cpu_dai->playback_widget) {
 			source = cpu_dai->playback_widget;
diff -ruw linux-4.4.115/sound/soc/soc-ops.c linux-4.4.115-fbx/sound/soc/soc-ops.c
--- linux-4.4.115/sound/soc/soc-ops.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/soc-ops.c	2019-10-29 09:26:26.181228015 +0100
@@ -201,7 +201,10 @@
 
 	uinfo->count = snd_soc_volsw_is_stereo(mc) ? 2 : 1;
 	uinfo->value.integer.min = 0;
+	if (uinfo->type == SNDRV_CTL_ELEM_TYPE_INTEGER)
 	uinfo->value.integer.max = platform_max - mc->min;
+	else
+		uinfo->value.integer.max = platform_max;
 	return 0;
 }
 EXPORT_SYMBOL_GPL(snd_soc_info_volsw);
@@ -221,14 +224,12 @@
 int snd_soc_info_volsw_sx(struct snd_kcontrol *kcontrol,
 			  struct snd_ctl_elem_info *uinfo)
 {
-	struct soc_mixer_control *mc =
-		(struct soc_mixer_control *)kcontrol->private_value;
-
 	snd_soc_info_volsw(kcontrol, uinfo);
 	/* Max represents the number of levels in an SX control not the
-	 * maximum value, so add the minimum value back on
+	 * maximum value.
+	 * uinfo->value.integer.max is set to number of levels
+	 * in snd_soc_info_volsw_sx. No further adjustment is necessary.
 	 */
-	uinfo->value.integer.max += mc->min;
 
 	return 0;
 }
@@ -779,11 +780,11 @@
 	switch (op_flag) {
 	case SNDRV_CTL_TLV_OP_READ:
 		if (params->get)
-			ret = params->get(tlv, count);
+			ret = params->get(kcontrol, tlv, count);
 		break;
 	case SNDRV_CTL_TLV_OP_WRITE:
 		if (params->put)
-			ret = params->put(tlv, count);
+			ret = params->put(kcontrol, tlv, count);
 		break;
 	}
 	return ret;
diff -ruw linux-4.4.115/sound/soc/soc-pcm.c linux-4.4.115-fbx/sound/soc/soc-pcm.c
--- linux-4.4.115/sound/soc/soc-pcm.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/soc-pcm.c	2019-10-29 09:26:26.185228054 +0100
@@ -25,6 +25,7 @@
 #include <linux/workqueue.h>
 #include <linux/export.h>
 #include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
@@ -52,6 +53,26 @@
 	return codec_stream->rates;
 }
 
+static const struct snd_pcm_hardware no_host_hardware = {
+	.info			= SNDRV_PCM_INFO_MMAP |
+					SNDRV_PCM_INFO_MMAP_VALID |
+					SNDRV_PCM_INFO_INTERLEAVED |
+					SNDRV_PCM_INFO_PAUSE |
+					SNDRV_PCM_INFO_RESUME,
+	.formats		= SNDRV_PCM_FMTBIT_S16_LE |
+					SNDRV_PCM_FMTBIT_S32_LE,
+	.period_bytes_min	= PAGE_SIZE >> 2,
+	.period_bytes_max	= PAGE_SIZE >> 1,
+	.periods_min		= 2,
+	.periods_max		= 4,
+	/*
+	 * Increase the max buffer bytes as PAGE_SIZE bytes is
+	 * not enough to encompass all the scenarios sent by
+	 * userspapce.
+	 */
+	.buffer_bytes_max	= PAGE_SIZE * 4,
+};
+
 /**
  * snd_soc_runtime_activate() - Increment active count for PCM runtime components
  * @rtd: ASoC PCM runtime that is activated
@@ -156,6 +177,8 @@
 	const struct snd_pcm_hardware *hw)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
+	if (!runtime)
+		return 0;
 	runtime->hw.info = hw->info;
 	runtime->hw.formats = hw->formats;
 	runtime->hw.period_bytes_min = hw->period_bytes_min;
@@ -182,8 +205,10 @@
 				be->dai_link->name, event, dir);
 
 		if ((event == SND_SOC_DAPM_STREAM_STOP) &&
-		    (be->dpcm[dir].users >= 1))
+		    (be->dpcm[dir].users >= 1)) {
+			pr_debug("%s Don't close BE \n", __func__);
 			continue;
+		}
 
 		snd_soc_dapm_stream_event(be, dir, event);
 	}
@@ -468,6 +493,8 @@
 	pm_runtime_get_sync(platform->dev);
 
 	mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
+	if (rtd->dai_link->no_host_mode == SND_SOC_DAI_LINK_NO_HOST)
+		snd_soc_set_runtime_hwparams(substream, &no_host_hardware);
 
 	/* startup the audio subsystem */
 	if (cpu_dai->driver->ops && cpu_dai->driver->ops->startup) {
@@ -598,7 +625,7 @@
 		platform->driver->ops->close(substream);
 
 platform_err:
-	if (cpu_dai->driver->ops->shutdown)
+	if (cpu_dai->driver->ops && cpu_dai->driver->ops->shutdown)
 		cpu_dai->driver->ops->shutdown(substream, cpu_dai);
 out:
 	mutex_unlock(&rtd->pcm_mutex);
@@ -674,6 +701,20 @@
 
 	snd_soc_dai_digital_mute(cpu_dai, 1, substream->stream);
 
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		if (snd_soc_runtime_ignore_pmdown_time(rtd)) {
+			/* powered down playback stream now */
+			snd_soc_dapm_stream_event(rtd,
+						  SNDRV_PCM_STREAM_PLAYBACK,
+						  SND_SOC_DAPM_STREAM_STOP);
+		} else {
+			/* start delayed pop wq here for playback streams */
+			rtd->pop_wait = 1;
+			queue_delayed_work(system_power_efficient_wq,
+					   &rtd->delayed_work,
+					   msecs_to_jiffies(rtd->pmdown_time));
+		}
+	}
 	if (cpu_dai->driver->ops->shutdown)
 		cpu_dai->driver->ops->shutdown(substream, cpu_dai);
 
@@ -689,20 +730,7 @@
 	if (platform->driver->ops && platform->driver->ops->close)
 		platform->driver->ops->close(substream);
 
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-		if (snd_soc_runtime_ignore_pmdown_time(rtd)) {
-			/* powered down playback stream now */
-			snd_soc_dapm_stream_event(rtd,
-						  SNDRV_PCM_STREAM_PLAYBACK,
-						  SND_SOC_DAPM_STREAM_STOP);
-		} else {
-			/* start delayed pop wq here for playback streams */
-			rtd->pop_wait = 1;
-			queue_delayed_work(system_power_efficient_wq,
-					   &rtd->delayed_work,
-					   msecs_to_jiffies(rtd->pmdown_time));
-		}
-	} else {
+	if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK) {
 		/* capture streams can be powered down now */
 		snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_CAPTURE,
 					  SND_SOC_DAPM_STREAM_STOP);
@@ -739,6 +767,11 @@
 
 	mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
 
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		snd_soc_dapm_stream_event(rtd,
+		SNDRV_PCM_STREAM_PLAYBACK,
+		SND_SOC_DAPM_STREAM_START);
+
 	if (rtd->dai_link->ops && rtd->dai_link->ops->prepare) {
 		ret = rtd->dai_link->ops->prepare(substream);
 		if (ret < 0) {
@@ -787,8 +820,15 @@
 		cancel_delayed_work(&rtd->delayed_work);
 	}
 
-	snd_soc_dapm_stream_event(rtd, substream->stream,
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		for (i = 0; i < rtd->num_codecs; i++) {
+			codec_dai = rtd->codec_dais[i];
+			if (codec_dai->capture_active == 1)
+				snd_soc_dapm_stream_event(rtd,
+				SNDRV_PCM_STREAM_CAPTURE,
 			SND_SOC_DAPM_STREAM_START);
+		}
+	}
 
 	for (i = 0; i < rtd->num_codecs; i++)
 		snd_soc_dai_digital_mute(rtd->codec_dais[i], 0,
@@ -796,6 +836,13 @@
 	snd_soc_dai_digital_mute(cpu_dai, 0, substream->stream);
 
 out:
+	if (ret < 0 && substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		pr_err("%s: Issue stop stream for codec_dai due to op failure %d = ret\n",
+		__func__, ret);
+		snd_soc_dapm_stream_event(rtd,
+		SNDRV_PCM_STREAM_PLAYBACK,
+		SND_SOC_DAPM_STREAM_STOP);
+	}
 	mutex_unlock(&rtd->pcm_mutex);
 	return ret;
 }
@@ -844,10 +891,31 @@
 
 	mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
 
+	/* perform any hw_params fixups */
+	if ((rtd->dai_link->no_host_mode == SND_SOC_DAI_LINK_NO_HOST) &&
+				rtd->dai_link->be_hw_params_fixup) {
+		ret = rtd->dai_link->be_hw_params_fixup(rtd,
+				params);
+		if (ret < 0)
+			dev_err(rtd->card->dev, "ASoC: fixup failed for %s\n",
+			rtd->dai_link->name);
+	}
+
 	ret = soc_pcm_params_symmetry(substream, params);
 	if (ret)
 		goto out;
 
+	/* perform any hw_params fixups */
+	if ((rtd->dai_link->no_host_mode == SND_SOC_DAI_LINK_NO_HOST) &&
+				rtd->dai_link->be_hw_params_fixup) {
+		ret = rtd->dai_link->be_hw_params_fixup(rtd,
+				params);
+		if (ret < 0) {
+			dev_err(rtd->card->dev, "ASoC: fixup failed for %s\n",
+			rtd->dai_link->name);
+		}
+	}
+
 	if (rtd->dai_link->ops && rtd->dai_link->ops->hw_params) {
 		ret = rtd->dai_link->ops->hw_params(substream, params);
 		if (ret < 0) {
@@ -917,6 +985,22 @@
 	cpu_dai->channels = params_channels(params);
 	cpu_dai->sample_bits =
 		snd_pcm_format_physical_width(params_format(params));
+	/* malloc a page for hostless IO.
+	 * FIXME: rework with alsa-lib changes so that this malloc is not required.
+	 */
+	if (rtd->dai_link->no_host_mode == SND_SOC_DAI_LINK_NO_HOST) {
+		substream->dma_buffer.dev.type = SNDRV_DMA_TYPE_DEV;
+		substream->dma_buffer.dev.dev = rtd->dev;
+		substream->dma_buffer.dev.dev->coherent_dma_mask =
+					DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+		substream->dma_buffer.private_data = NULL;
+
+		arch_setup_dma_ops(substream->dma_buffer.dev.dev,
+				   0, 0, NULL, 0);
+		ret = snd_pcm_lib_malloc_pages(substream, PAGE_SIZE);
+		if (ret < 0)
+			goto platform_err;
+	}
 
 out:
 	mutex_unlock(&rtd->pcm_mutex);
@@ -1000,6 +1084,9 @@
 	if (cpu_dai->driver->ops && cpu_dai->driver->ops->hw_free)
 		cpu_dai->driver->ops->hw_free(substream, cpu_dai);
 
+	if (rtd->dai_link->no_host_mode == SND_SOC_DAI_LINK_NO_HOST)
+		snd_pcm_lib_free_pages(substream);
+
 	mutex_unlock(&rtd->pcm_mutex);
 	return 0;
 }
@@ -1096,6 +1183,9 @@
 	if (platform->driver->ops && platform->driver->ops->pointer)
 		offset = platform->driver->ops->pointer(substream);
 
+	if (platform->driver->delay_blk)
+		return offset;
+
 	if (cpu_dai->driver->ops && cpu_dai->driver->ops->delay)
 		delay += cpu_dai->driver->ops->delay(substream, cpu_dai);
 
@@ -1120,6 +1210,22 @@
 	return offset;
 }
 
+static int soc_pcm_delay_blk(struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_platform *platform = rtd->platform;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	snd_pcm_sframes_t delay = 0;
+
+	if (platform->driver->delay_blk)
+		delay = platform->driver->delay_blk(substream,
+				rtd->codec_dais[0]);
+
+	runtime->delay = delay;
+
+	return 0;
+}
+
 /* connect a FE and BE */
 static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
 		struct snd_soc_pcm_runtime *be, int stream)
@@ -1226,7 +1332,11 @@
 			if (!be->dai_link->no_pcm)
 				continue;
 
-			if (be->cpu_dai->playback_widget == widget)
+			if ((be->cpu_dai->playback_widget == widget &&
+				(be->dai_link->stream_name &&
+				!strcmp(be->dai_link->stream_name,
+				    be->cpu_dai->playback_widget->sname))) ||
+				be->codec_dai->playback_widget == widget)
 				return be;
 
 			for (j = 0; j < be->num_codecs; j++) {
@@ -1243,7 +1353,11 @@
 			if (!be->dai_link->no_pcm)
 				continue;
 
-			if (be->cpu_dai->capture_widget == widget)
+			if ((be->cpu_dai->capture_widget == widget &&
+				(be->dai_link->stream_name &&
+				!strcmp(be->dai_link->stream_name,
+				    be->cpu_dai->capture_widget->sname))) ||
+				be->codec_dai->capture_widget == widget)
 				return be;
 
 			for (j = 0; j < be->num_codecs; j++) {
@@ -1703,14 +1817,14 @@
 
 	dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
 
-	/* shutdown the BEs */
-	dpcm_be_dai_shutdown(fe, substream->stream);
-
 	dev_dbg(fe->dev, "ASoC: close FE %s\n", fe->dai_link->name);
 
 	/* now shutdown the frontend */
 	soc_pcm_close(substream);
 
+	/* shutdown the BEs */
+	dpcm_be_dai_shutdown(fe, substream->stream);
+
 	/* run the stream event for each BE */
 	dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
 
@@ -1789,6 +1903,81 @@
 	return 0;
 }
 
+int dpcm_fe_dai_hw_params_be(struct snd_soc_pcm_runtime *fe,
+	struct snd_soc_pcm_runtime *be,
+	struct snd_pcm_hw_params *params, int stream)
+{
+	int ret;
+	struct snd_soc_dpcm *dpcm;
+	struct snd_pcm_substream *be_substream =
+		snd_soc_dpcm_get_substream(be, stream);
+
+	/* is this op for this BE ? */
+	if (!snd_soc_dpcm_be_can_update(fe, be, stream))
+		return 0;
+
+	/* only allow hw_params() if no connected FEs are running */
+	if (!snd_soc_dpcm_can_be_params(fe, be, stream))
+		return 0;
+
+	if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN) &&
+			(be->dpcm[stream].state !=
+				SND_SOC_DPCM_STATE_HW_PARAMS) &&
+			(be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE))
+		return 0;
+
+	dev_dbg(be->dev, "ASoC: hw_params BE %s\n",
+			fe->dai_link->name);
+
+	/* perform any hw_params fixups */
+	if (be->dai_link->be_hw_params_fixup) {
+		ret = be->dai_link->be_hw_params_fixup(be,
+				params);
+		if (ret < 0) {
+			dev_err(be->dev,
+					"ASoC: hw_params BE fixup failed %d\n",
+					ret);
+			goto unwind;
+		}
+	}
+
+	ret = soc_pcm_hw_params(be_substream, params);
+	if (ret < 0) {
+		dev_err(be->dev, "ASoC: hw_params BE failed %d\n", ret);
+		goto unwind;
+	}
+
+	be->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_PARAMS;
+	return 0;
+
+unwind:
+	/* disable any enabled and non active backends */
+	list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+		struct snd_soc_pcm_runtime *be = dpcm->be;
+		struct snd_pcm_substream *be_substream =
+			snd_soc_dpcm_get_substream(be, stream);
+
+		if (!snd_soc_dpcm_be_can_update(fe, be, stream))
+			continue;
+
+		/* only allow hw_free() if no connected FEs are running */
+		if (!snd_soc_dpcm_can_be_free_stop(fe, be, stream))
+			continue;
+
+		if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN) &&
+			(be->dpcm[stream].state
+				!= SND_SOC_DPCM_STATE_HW_PARAMS) &&
+			(be->dpcm[stream].state
+				!= SND_SOC_DPCM_STATE_HW_FREE) &&
+			(be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP))
+			continue;
+
+		soc_pcm_hw_free(be_substream);
+	}
+
+	return ret;
+}
+
 int dpcm_be_dai_hw_params(struct snd_soc_pcm_runtime *fe, int stream)
 {
 	struct snd_soc_dpcm *dpcm;
@@ -2089,6 +2278,35 @@
 	return ret;
 }
 
+int dpcm_fe_dai_prepare_be(struct snd_soc_pcm_runtime *fe,
+		struct snd_soc_pcm_runtime *be, int stream)
+{
+	struct snd_pcm_substream *be_substream =
+		snd_soc_dpcm_get_substream(be, stream);
+	int ret = 0;
+
+	/* is this op for this BE ? */
+	if (!snd_soc_dpcm_be_can_update(fe, be, stream))
+		return 0;
+
+	if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
+			(be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP))
+		return 0;
+
+	dev_dbg(be->dev, "ASoC: prepare BE %s\n",
+			fe->dai_link->name);
+
+	ret = soc_pcm_prepare(be_substream);
+	if (ret < 0) {
+		dev_err(be->dev, "ASoC: backend prepare failed %d\n",
+				ret);
+		return ret;
+	}
+
+	be->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE;
+	return ret;
+}
+
 static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd)
 {
 	struct snd_soc_pcm_runtime *fe = substream->private_data;
@@ -2140,13 +2358,94 @@
 	return ret;
 }
 
+static void dpcm_be_async_prepare(void *data, async_cookie_t cookie)
+{
+	struct snd_soc_dpcm *dpcm = data;
+	struct snd_soc_pcm_runtime *be = dpcm->be;
+	int stream = dpcm->stream;
+	struct snd_pcm_substream *be_substream =
+		snd_soc_dpcm_get_substream(be, stream);
+	int ret;
+
+	dev_dbg(be->dev, "%s ASoC: prepare BE %s\n", __func__,
+					dpcm->fe->dai_link->name);
+	ret = soc_pcm_prepare(be_substream);
+	if (ret < 0) {
+		be->err_ops = ret;
+		dev_err(be->dev, "ASoC: backend prepare failed %d\n",
+				ret);
+		return;
+	}
+	be->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE;
+}
+
+void dpcm_be_dai_prepare_async(struct snd_soc_pcm_runtime *fe, int stream,
+					    struct async_domain *domain)
+{
+	struct snd_soc_dpcm *dpcm;
+	struct snd_soc_dpcm *dpcm_async[DPCM_MAX_BE_USERS];
+	int i = 0, j;
+
+	list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+		struct snd_soc_pcm_runtime *be = dpcm->be;
+
+		be->err_ops = 0;
+		/* is this op for this BE ? */
+		if (!snd_soc_dpcm_be_can_update(fe, be, stream))
+			continue;
+
+		if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
+			(be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP))
+			continue;
+
+		/* does this BE support async op ?*/
+		if ((fe->dai_link->async_ops & ASYNC_DPCM_SND_SOC_PREPARE) &&
+		    (be->dai_link->async_ops & ASYNC_DPCM_SND_SOC_PREPARE)) {
+			dpcm->stream = stream;
+			async_schedule_domain(dpcm_be_async_prepare,
+							    dpcm, domain);
+		} else {
+			dpcm_async[i++] = dpcm;
+			if (i == DPCM_MAX_BE_USERS) {
+				dev_dbg(fe->dev, "ASoC: MAX backend users!\n");
+				break;
+			}
+		}
+	}
+
+	for (j = 0; j < i; j++) {
+		struct snd_soc_dpcm *dpcm = dpcm_async[j];
+		struct snd_soc_pcm_runtime *be = dpcm->be;
+		struct snd_pcm_substream *be_substream =
+			snd_soc_dpcm_get_substream(be, stream);
+		int ret;
+
+		dev_dbg(be->dev, "ASoC: prepare BE %s\n",
+				dpcm->fe->dai_link->name);
+
+		ret = soc_pcm_prepare(be_substream);
+		if (ret < 0) {
+			dev_err(be->dev, "ASoC: backend prepare failed %d\n",
+					ret);
+			be->err_ops = ret;
+			return;
+		}
+
+		be->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE;
+	}
+}
+
 static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream)
 {
 	struct snd_soc_pcm_runtime *fe = substream->private_data;
+	struct snd_soc_dpcm *dpcm;
 	int stream = substream->stream, ret = 0;
+	ASYNC_DOMAIN_EXCLUSIVE(async_domain);
 
 	mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
 
+	fe->err_ops = 0;
+
 	dev_dbg(fe->dev, "ASoC: prepare FE %s\n", fe->dai_link->name);
 
 	dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
@@ -2159,17 +2458,48 @@
 		goto out;
 	}
 
+	if (!(fe->dai_link->async_ops & ASYNC_DPCM_SND_SOC_PREPARE)) {
 	ret = dpcm_be_dai_prepare(fe, substream->stream);
 	if (ret < 0)
 		goto out;
+		/* call prepare on the frontend */
+		ret = soc_pcm_prepare(substream);
+		if (ret < 0) {
+			dev_err(fe->dev, "ASoC: prepare FE %s failed\n",
+					fe->dai_link->name);
+			goto out;
+		}
+	} else {
+		dpcm_be_dai_prepare_async(fe, substream->stream,
+							&async_domain);
 
 	/* call prepare on the frontend */
 	ret = soc_pcm_prepare(substream);
 	if (ret < 0) {
+			fe->err_ops = ret;
 		dev_err(fe->dev,"ASoC: prepare FE %s failed\n",
 			fe->dai_link->name);
+		}
+
+		async_synchronize_full_domain(&async_domain);
+
+		/* check if any BE failed */
+		list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients,
+							    list_be) {
+			struct snd_soc_pcm_runtime *be = dpcm->be;
+
+			if (be->err_ops < 0) {
+				ret = be->err_ops;
 		goto out;
 	}
+		}
+
+		/* check if FE failed */
+		if (fe->err_ops < 0) {
+			ret = fe->err_ops;
+			goto out;
+		}
+	}
 
 	/* run the stream event for each BE */
 	dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_START);
@@ -2182,6 +2512,18 @@
 	return ret;
 }
 
+static int soc_pcm_compat_ioctl(struct snd_pcm_substream *substream,
+		     unsigned int cmd, void *arg)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_platform *platform = rtd->platform;
+
+	if (platform->driver->ops->compat_ioctl)
+		return platform->driver->ops->compat_ioctl(substream,
+			cmd, arg);
+	return snd_pcm_lib_ioctl(substream, cmd, arg);
+}
+
 static int soc_pcm_ioctl(struct snd_pcm_substream *substream,
 		     unsigned int cmd, void *arg)
 {
@@ -2610,9 +2952,27 @@
 			pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd;
 		if (capture)
 			pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd;
+		if (platform->driver->pcm_new)
+			rtd->platform->driver->pcm_new(rtd);
 		goto out;
 	}
 
+	/* setup any hostless PCMs - i.e. no host IO is performed */
+	if (rtd->dai_link->no_host_mode) {
+		if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
+			pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->hw_no_buffer = 1;
+			snd_soc_set_runtime_hwparams(
+				pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream,
+				&no_host_hardware);
+		}
+		if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
+			pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->hw_no_buffer = 1;
+			snd_soc_set_runtime_hwparams(
+				pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream,
+				&no_host_hardware);
+		}
+	}
+
 	/* ASoC PCM operations */
 	if (rtd->dai_link->dynamic) {
 		rtd->ops.open		= dpcm_fe_dai_open;
@@ -2622,7 +2982,9 @@
 		rtd->ops.hw_free	= dpcm_fe_dai_hw_free;
 		rtd->ops.close		= dpcm_fe_dai_close;
 		rtd->ops.pointer	= soc_pcm_pointer;
+		rtd->ops.delay_blk	= soc_pcm_delay_blk;
 		rtd->ops.ioctl		= soc_pcm_ioctl;
+		rtd->ops.compat_ioctl   = soc_pcm_compat_ioctl;
 	} else {
 		rtd->ops.open		= soc_pcm_open;
 		rtd->ops.hw_params	= soc_pcm_hw_params;
@@ -2631,7 +2993,9 @@
 		rtd->ops.hw_free	= soc_pcm_hw_free;
 		rtd->ops.close		= soc_pcm_close;
 		rtd->ops.pointer	= soc_pcm_pointer;
+		rtd->ops.delay_blk	= soc_pcm_delay_blk;
 		rtd->ops.ioctl		= soc_pcm_ioctl;
+		rtd->ops.compat_ioctl   = soc_pcm_compat_ioctl;
 	}
 
 	if (platform->driver->ops) {
@@ -2660,7 +3024,7 @@
 
 	pcm->private_free = platform->driver->pcm_free;
 out:
-	dev_info(rtd->card->dev, "%s <-> %s mapping ok\n",
+	dev_dbg(rtd->card->dev, "%s <-> %s mapping ok\n",
 		 (rtd->num_codecs > 1) ? "multicodec" : rtd->codec_dai->name,
 		 cpu_dai->name);
 	return ret;
diff -ruw linux-4.4.115/sound/soc/soc-utils.c linux-4.4.115-fbx/sound/soc/soc-utils.c
--- linux-4.4.115/sound/soc/soc-utils.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/soc-utils.c	2019-01-22 16:16:29.679302334 +0100
@@ -139,6 +139,9 @@
 {
 	int ret;
 
+	memset(&dummy_codec, 0,
+		sizeof(struct snd_soc_codec_driver));
+
 	ret = snd_soc_register_codec(&pdev->dev, &dummy_codec, &dummy_dai, 1);
 	if (ret < 0)
 		return ret;
diff -ruw linux-4.4.115/sound/usb/card.c linux-4.4.115-fbx/sound/usb/card.c
--- linux-4.4.115/sound/usb/card.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/usb/card.c	2019-10-29 09:26:26.189228093 +0100
@@ -45,6 +45,7 @@
 #include <linux/usb/audio.h>
 #include <linux/usb/audio-v2.h>
 #include <linux/module.h>
+#include <linux/usb/audio-v3.h>
 
 #include <sound/control.h>
 #include <sound/core.h>
@@ -110,6 +111,71 @@
 static struct snd_usb_audio *usb_chip[SNDRV_CARDS];
 static struct usb_driver usb_audio_driver;
 
+struct snd_usb_substream *find_snd_usb_substream(unsigned int card_num,
+	unsigned int pcm_idx, unsigned int direction, struct snd_usb_audio
+	**uchip, void (*disconnect_cb)(struct snd_usb_audio *chip))
+{
+	int idx;
+	struct snd_usb_stream *as;
+	struct snd_usb_substream *subs = NULL;
+	struct snd_usb_audio *chip = NULL;
+
+	mutex_lock(&register_mutex);
+	/*
+	 * legacy audio snd card number assignment is dynamic. Hence
+	 * search using chip->card->number
+	 */
+	for (idx = 0; idx < SNDRV_CARDS; idx++) {
+		if (!usb_chip[idx])
+			continue;
+		if (usb_chip[idx]->card->number == card_num) {
+			chip = usb_chip[idx];
+			break;
+		}
+	}
+
+	if (!chip || atomic_read(&chip->shutdown)) {
+		pr_debug("%s: instance of usb crad # %d does not exist\n",
+			__func__, card_num);
+		goto err;
+	}
+
+	if (pcm_idx >= chip->pcm_devs) {
+		pr_err("%s: invalid pcm dev number %u > %d\n", __func__,
+			pcm_idx, chip->pcm_devs);
+		goto err;
+	}
+
+	if (direction > SNDRV_PCM_STREAM_CAPTURE) {
+		pr_err("%s: invalid direction %u\n", __func__, direction);
+		goto err;
+	}
+
+	list_for_each_entry(as, &chip->pcm_list, list) {
+		if (as->pcm_index == pcm_idx) {
+			subs = &as->substream[direction];
+			if (subs->interface < 0 && !subs->data_endpoint &&
+				!subs->sync_endpoint) {
+				pr_debug("%s: stream disconnected, bail out\n",
+					__func__);
+				subs = NULL;
+				goto err;
+			}
+			goto done;
+		}
+	}
+
+done:
+	chip->card_num = card_num;
+	chip->disconnect_cb = disconnect_cb;
+err:
+	*uchip = chip;
+	if (!subs)
+		pr_debug("%s: substream instance not found\n", __func__);
+	mutex_unlock(&register_mutex);
+	return subs;
+}
+
 /*
  * disconnect streams
  * called from usb_audio_disconnect()
@@ -215,18 +281,45 @@
 	struct usb_device *dev = chip->dev;
 	struct usb_host_interface *host_iface;
 	struct usb_interface_descriptor *altsd;
-	void *control_header;
+	struct usb_interface *usb_iface;
 	int i, protocol;
-	int rest_bytes;
+
+	usb_iface = usb_ifnum_to_if(dev, ctrlif);
+	if (!usb_iface) {
+		snd_printk(KERN_ERR "%d:%u : does not exist\n",
+					dev->devnum, ctrlif);
+		return -EINVAL;
+	}
 
 	/* find audiocontrol interface */
-	host_iface = &usb_ifnum_to_if(dev, ctrlif)->altsetting[0];
-	control_header = snd_usb_find_csint_desc(host_iface->extra,
-						 host_iface->extralen,
-						 NULL, UAC_HEADER);
+	host_iface = &usb_iface->altsetting[0];
+	if (!host_iface) {
+		snd_printk(KERN_ERR "Audio Control interface is not available.");
+		return -EINVAL;
+	}
+
 	altsd = get_iface_desc(host_iface);
 	protocol = altsd->bInterfaceProtocol;
 
+	/*
+	 * UAC 1.0 devices use AC HEADER Desc for linking AS interfaces;
+	 * UAC 2.0 and 3.0 devices use IAD for linking AS interfaces
+	 */
+
+	switch (protocol) {
+	default:
+		dev_warn(&dev->dev,
+			 "unknown interface protocol %#02x, assuming v1\n",
+			 protocol);
+		/* fall through */
+
+	case UAC_VERSION_1: {
+		void *control_header;
+		struct uac1_ac_header_descriptor *h1;
+		int rest_bytes;
+
+		control_header = snd_usb_find_csint_desc(host_iface->extra,
+					host_iface->extralen, NULL, UAC_HEADER);
 	if (!control_header) {
 		dev_err(&dev->dev, "cannot find UAC_HEADER\n");
 		return -EINVAL;
@@ -241,15 +334,7 @@
 		return -EINVAL;
 	}
 
-	switch (protocol) {
-	default:
-		dev_warn(&dev->dev,
-			 "unknown interface protocol %#02x, assuming v1\n",
-			 protocol);
-		/* fall through */
-
-	case UAC_VERSION_1: {
-		struct uac1_ac_header_descriptor *h1 = control_header;
+		h1 = control_header;
 
 		if (rest_bytes < sizeof(*h1)) {
 			dev_err(&dev->dev, "too short v1 buffer descriptor\n");
@@ -277,10 +362,10 @@
 		break;
 	}
 
-	case UAC_VERSION_2: {
+	case UAC_VERSION_2:
+	case UAC_VERSION_3: {
 		struct usb_interface_assoc_descriptor *assoc =
-			usb_ifnum_to_if(dev, ctrlif)->intf_assoc;
-
+						usb_iface->intf_assoc;
 		if (!assoc) {
 			/*
 			 * Firmware writers cannot count to three.  So to find
@@ -297,7 +382,8 @@
 		}
 
 		if (!assoc) {
-			dev_err(&dev->dev, "Audio class v2 interfaces need an interface association\n");
+			dev_err(&dev->dev, "Audio class V%d interfaces need an interface association\n",
+					protocol);
 			return -EINVAL;
 		}
 
@@ -329,6 +415,7 @@
 	list_for_each_entry_safe(ep, n, &chip->ep_list, list)
 		snd_usb_endpoint_free(ep);
 
+	mutex_destroy(&chip->dev_lock);
 	mutex_destroy(&chip->mutex);
 	kfree(chip);
 	return 0;
@@ -384,6 +471,7 @@
 	}
 
 	mutex_init(&chip->mutex);
+	mutex_init(&chip->dev_lock);
 	init_waitqueue_head(&chip->shutdown_wait);
 	chip->index = idx;
 	chip->dev = dev;
@@ -495,6 +583,15 @@
 	struct usb_host_interface *alts;
 	int ifnum;
 	u32 id;
+	struct usb_interface_assoc_descriptor *assoc;
+
+	assoc = intf->intf_assoc;
+	if (assoc && assoc->bFunctionClass == USB_CLASS_AUDIO &&
+	    assoc->bFunctionProtocol == UAC_VERSION_3 &&
+	    assoc->bFunctionSubClass == FULL_ADC_PROFILE) {
+		dev_info(&dev->dev, "No support for full-fledged ADC 3.0 yet!!\n");
+		return -EINVAL;
+	}
 
 	alts = &intf->altsetting[0];
 	ifnum = get_iface_desc(alts)->bInterfaceNumber;
@@ -583,6 +680,8 @@
 	usb_chip[chip->index] = chip;
 	chip->num_interfaces++;
 	usb_set_intfdata(intf, chip);
+	intf->needs_remote_wakeup = 1;
+	usb_enable_autosuspend(chip->dev);
 	atomic_dec(&chip->active);
 	mutex_unlock(&register_mutex);
 	return 0;
@@ -612,6 +711,9 @@
 
 	card = chip->card;
 
+	if (chip->disconnect_cb)
+		chip->disconnect_cb(chip);
+
 	mutex_lock(&register_mutex);
 	if (atomic_inc_return(&chip->shutdown) == 1) {
 		struct snd_usb_stream *as;
diff -ruw linux-4.4.115/sound/usb/card.h linux-4.4.115-fbx/sound/usb/card.h
--- linux-4.4.115/sound/usb/card.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/usb/card.h	2019-01-22 16:16:29.695302479 +0100
@@ -167,4 +167,8 @@
 	struct list_head list;
 };
 
+struct snd_usb_substream *find_snd_usb_substream(unsigned int card_num,
+	unsigned int pcm_idx, unsigned int direction, struct snd_usb_audio
+	**uchip, void (*disconnect_cb)(struct snd_usb_audio *chip));
+
 #endif /* __USBAUDIO_CARD_H */
diff -ruw linux-4.4.115/sound/usb/clock.c linux-4.4.115-fbx/sound/usb/clock.c
--- linux-4.4.115/sound/usb/clock.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/usb/clock.c	2019-01-22 16:16:29.695302479 +0100
@@ -427,6 +427,10 @@
 
 	case UAC_VERSION_2:
 		return set_sample_rate_v2(chip, iface, alts, fmt, rate);
+
+	/* Clock rate is fixed at 48 kHz for BADD devices */
+	case UAC_VERSION_3:
+		return 0;
 	}
 }
 
diff -ruw linux-4.4.115/sound/usb/endpoint.c linux-4.4.115-fbx/sound/usb/endpoint.c
--- linux-4.4.115/sound/usb/endpoint.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/usb/endpoint.c	2019-01-22 16:16:29.695302479 +0100
@@ -357,7 +357,7 @@
 		err = usb_submit_urb(ctx->urb, GFP_ATOMIC);
 		if (err < 0)
 			usb_audio_err(ep->chip,
-				"Unable to submit urb #%d: %d (urb %p)\n",
+				"Unable to submit urb #%d: %d (urb %pK)\n",
 				ctx->index, err, ctx->urb);
 		else
 			set_bit(ctx->index, &ep->active_mask);
@@ -462,7 +462,7 @@
 		    ep->iface == alts->desc.bInterfaceNumber &&
 		    ep->altsetting == alts->desc.bAlternateSetting) {
 			usb_audio_dbg(ep->chip,
-				      "Re-using EP %x in iface %d,%d @%p\n",
+				      "Re-using EP %x in iface %d,%d @%pK\n",
 					ep_num, ep->iface, ep->altsetting, ep);
 			goto __exit_unlock;
 		}
diff -ruw linux-4.4.115/sound/usb/format.c linux-4.4.115-fbx/sound/usb/format.c
--- linux-4.4.115/sound/usb/format.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/usb/format.c	2019-01-22 16:16:29.695302479 +0100
@@ -20,6 +20,7 @@
 #include <linux/usb.h>
 #include <linux/usb/audio.h>
 #include <linux/usb/audio-v2.h>
+#include <linux/usb/audio-v3.h>
 
 #include <sound/core.h>
 #include <sound/pcm.h>
@@ -69,6 +70,35 @@
 		format <<= 1;
 		break;
 	}
+
+	case UAC_VERSION_3: {
+		switch (fp->maxpacksize) {
+		case BADD_MAXPSIZE_SYNC_MONO_16:
+		case BADD_MAXPSIZE_SYNC_STEREO_16:
+		case BADD_MAXPSIZE_ASYNC_MONO_16:
+		case BADD_MAXPSIZE_ASYNC_STEREO_16: {
+			sample_width = BIT_RES_16_BIT;
+			sample_bytes = SUBSLOTSIZE_16_BIT;
+			break;
+		}
+
+		case BADD_MAXPSIZE_SYNC_MONO_24:
+		case BADD_MAXPSIZE_SYNC_STEREO_24:
+		case BADD_MAXPSIZE_ASYNC_MONO_24:
+		case BADD_MAXPSIZE_ASYNC_STEREO_24: {
+			sample_width = BIT_RES_24_BIT;
+			sample_bytes = SUBSLOTSIZE_24_BIT;
+			break;
+		}
+
+		default:
+			usb_audio_err(chip, "%u:%d : Invalid wMaxPacketSize\n",
+				      fp->iface, fp->altsetting);
+			return pcm_formats;
+		}
+		format = 1 << format;
+		break;
+	}
 	}
 
 	if ((pcm_formats == 0) &&
@@ -366,6 +396,22 @@
 	return ret;
 }
 
+static int badd_set_audio_rate_v3(struct snd_usb_audio *chip,
+		   struct audioformat *fp)
+{
+	unsigned int rate;
+
+	fp->rate_table = kmalloc(sizeof(int), GFP_KERNEL);
+	if (fp->rate_table == NULL)
+		return -ENOMEM;
+
+	fp->nr_rates = 1;
+	rate = BADD_SAMPLING_RATE;
+	fp->rate_min = fp->rate_max = fp->rate_table[0] = rate;
+	fp->rates |= snd_pcm_rate_to_rate_bit(rate);
+	return 0;
+}
+
 /*
  * parse the format type I and III descriptors
  */
@@ -415,6 +461,9 @@
 		/* fp->channels is already set in this case */
 		ret = parse_audio_format_rates_v2(chip, fp);
 		break;
+	case UAC_VERSION_3:
+		ret = badd_set_audio_rate_v3(chip, fp);
+		break;
 	}
 
 	if (fp->channels < 1) {
@@ -502,6 +551,9 @@
 			 fmt->bFormatType);
 		return -ENOTSUPP;
 	}
+	if (fp->protocol == UAC_VERSION_3)
+		fp->fmt_type = UAC_FORMAT_TYPE_I;
+	else
 	fp->fmt_type = fmt->bFormatType;
 	if (err < 0)
 		return err;
diff -ruw linux-4.4.115/sound/usb/Kconfig linux-4.4.115-fbx/sound/usb/Kconfig
--- linux-4.4.115/sound/usb/Kconfig	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/usb/Kconfig	2019-01-22 16:16:29.691302442 +0100
@@ -162,5 +162,13 @@
 
 source "sound/usb/line6/Kconfig"
 
+config SND_USB_AUDIO_QMI
+	tristate "USB Audio QMI Service driver"
+	depends on MSM_QMI_INTERFACE
+	help
+	  Starts USB Audio QMI server to communicate with remote entity
+	  to perform operations like enable or disable particular audio
+	  stream on a connected USB device.
+
 endif	# SND_USB
 
diff -ruw linux-4.4.115/sound/usb/Makefile linux-4.4.115-fbx/sound/usb/Makefile
--- linux-4.4.115/sound/usb/Makefile	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/usb/Makefile	2019-01-22 16:16:29.691302442 +0100
@@ -13,7 +13,8 @@
 			pcm.o \
 			proc.o \
 			quirks.o \
-			stream.o
+			stream.o \
+			badd.o
 
 snd-usbmidi-lib-objs := midi.o
 
@@ -26,3 +27,4 @@
 
 obj-$(CONFIG_SND) += misc/ usx2y/ caiaq/ 6fire/ hiface/ bcd2000/
 obj-$(CONFIG_SND_USB_LINE6)	+= line6/
+obj-$(CONFIG_SND_USB_AUDIO_QMI) += usb_audio_qmi_v01.o usb_audio_qmi_svc.o
diff -ruw linux-4.4.115/sound/usb/mixer.c linux-4.4.115-fbx/sound/usb/mixer.c
--- linux-4.4.115/sound/usb/mixer.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/usb/mixer.c	2019-10-29 09:26:26.193228133 +0100
@@ -50,6 +50,7 @@
 #include <linux/usb.h>
 #include <linux/usb/audio.h>
 #include <linux/usb/audio-v2.h>
+#include <linux/usb/audio-v3.h>
 
 #include <sound/core.h>
 #include <sound/control.h>
@@ -184,6 +185,17 @@
 	/* we just parse the header */
 	struct uac_feature_unit_descriptor *hdr = NULL;
 
+	if (state->mixer->protocol == UAC_VERSION_3) {
+		int i;
+
+		for (i = 0; i < NUM_BADD_DESCS; i++) {
+			hdr = (void *)badd_desc_list[i];
+			if (hdr->bUnitID == unit)
+				return hdr;
+		}
+
+		return NULL;
+	}
 	while ((hdr = snd_usb_find_desc(state->buffer, state->buflen, hdr,
 					USB_DT_CS_INTERFACE)) != NULL) {
 		if (hdr->bLength >= 4 &&
@@ -203,11 +215,6 @@
 				    int index, char *buf, int maxlen)
 {
 	int len = usb_string(state->chip->dev, index, buf, maxlen - 1);
-
-	if (len < 0)
-		return 0;
-
-	buf[len] = 0;
 	return len;
 }
 
@@ -723,7 +730,7 @@
 				term->channels = d->bNrChannels;
 				term->chconfig = le16_to_cpu(d->wChannelConfig);
 				term->name = d->iTerminal;
-			} else { /* UAC_VERSION_2 */
+			} else if (state->mixer->protocol == UAC_VERSION_2) {
 				struct uac2_input_terminal_descriptor *d = p1;
 
 				/* call recursively to verify that the
@@ -740,6 +747,24 @@
 				term->channels = d->bNrChannels;
 				term->chconfig = le32_to_cpu(d->bmChannelConfig);
 				term->name = d->iTerminal;
+			} else { /* UAC_VERSION_3 */
+				struct uac3_input_terminal_descriptor *d = p1;
+
+				err = check_input_term(state,
+							d->bCSourceID, term);
+				if (err < 0)
+					return err;
+
+				term->id = id;
+				term->type = d->wTerminalType;
+				if (d->wClusterDescrID == CLUSTER_ID_MONO) {
+					term->channels = NUM_CHANNELS_MONO;
+					term->chconfig = BADD_CH_CONFIG_MONO;
+				} else {
+					term->channels = NUM_CHANNELS_STEREO;
+					term->chconfig = BADD_CH_CONFIG_STEREO;
+				}
+				term->name = d->wTerminalDescrStr;
 			}
 			return 0;
 		case UAC_FEATURE_UNIT: {
@@ -757,28 +782,61 @@
 			return 0;
 		}
 		case UAC_SELECTOR_UNIT:
-		case UAC2_CLOCK_SELECTOR: {
+		/* UAC3_MIXER_UNIT_V3 */
+		case UAC2_CLOCK_SELECTOR:
+		/* UAC3_CLOCK_SOURCE */ {
+			if (state->mixer->protocol == UAC_VERSION_3
+				&& hdr[2] == UAC3_CLOCK_SOURCE) {
+				struct uac3_clock_source_descriptor *d = p1;
+
+				term->type = d->bDescriptorSubtype << 16;
+				term->id = id;
+				term->name = d->wClockSourceStr;
+			} else if (state->mixer->protocol == UAC_VERSION_3
+					&& hdr[2] == UAC3_MIXER_UNIT_V3) {
+				struct uac3_mixer_unit_descriptor *d = p1;
+
+				term->type = d->bDescriptorSubtype << 16;
+				if (d->wClusterDescrID == CLUSTER_ID_MONO) {
+					term->channels = NUM_CHANNELS_MONO;
+					term->chconfig = BADD_CH_CONFIG_MONO;
+				} else {
+					term->channels = NUM_CHANNELS_STEREO;
+					term->chconfig = BADD_CH_CONFIG_STEREO;
+				}
+				term->name = d->wMixerDescrStr;
+			} else {
 			struct uac_selector_unit_descriptor *d = p1;
-			/* call recursively to retrieve the channel info */
-			err = check_input_term(state, d->baSourceID[0], term);
+				/* call recursively to retrieve channel info */
+				err = check_input_term(state,
+							d->baSourceID[0], term);
 			if (err < 0)
 				return err;
-			term->type = d->bDescriptorSubtype << 16; /* virtual type */
+				/* virtual type */
+				term->type = d->bDescriptorSubtype << 16;
 			term->id = id;
 			term->name = uac_selector_unit_iSelector(d);
+			}
 			return 0;
 		}
 		case UAC1_PROCESSING_UNIT:
 		case UAC1_EXTENSION_UNIT:
 		/* UAC2_PROCESSING_UNIT_V2 */
 		/* UAC2_EFFECT_UNIT */
+		/* UAC3_FEATURE_UNIT_V3 */
 		case UAC2_EXTENSION_UNIT_V2: {
+			if (state->mixer->protocol == UAC_VERSION_3) {
+				struct uac_feature_unit_descriptor *d = p1;
+
+				id = d->bSourceID;
+			} else {
 			struct uac_processing_unit_descriptor *d = p1;
 
 			if (state->mixer->protocol == UAC_VERSION_2 &&
 				hdr[2] == UAC2_EFFECT_UNIT) {
 				/* UAC2/UAC1 unit IDs overlap here in an
-				 * uncompatible way. Ignore this unit for now.
+					 * uncompatible way. Ignore this unit
+					 * for now.
 				 */
 				return 0;
 			}
@@ -787,12 +845,19 @@
 				id = d->baSourceID[0];
 				break; /* continue to parse */
 			}
-			term->type = d->bDescriptorSubtype << 16; /* virtual type */
-			term->channels = uac_processing_unit_bNrChannels(d);
-			term->chconfig = uac_processing_unit_wChannelConfig(d, state->mixer->protocol);
-			term->name = uac_processing_unit_iProcessing(d, state->mixer->protocol);
+				/* virtual type */
+				term->type = d->bDescriptorSubtype << 16;
+				term->channels =
+					uac_processing_unit_bNrChannels(d);
+				term->chconfig =
+					uac_processing_unit_wChannelConfig(
+						d, state->mixer->protocol);
+				term->name = uac_processing_unit_iProcessing(
+						d, state->mixer->protocol);
 			return 0;
 		}
+			break;
+		}
 		case UAC2_CLOCK_SOURCE: {
 			struct uac_clock_source_descriptor *d = p1;
 			term->type = d->bDescriptorSubtype << 16; /* virtual type */
@@ -948,6 +1013,17 @@
 			cval->res = 384;
 		}
 		break;
+
+	case USB_ID(0x1130, 0x1620): /* Logitech Speakers S150 */
+	/* This audio device has 2 channels and it explicitly requires the
+	 * host to send SET_CUR command on the volume control of both the
+	 * channels. 7936 = 0x1F00 is the default value.
+	 */
+		if (cval->channels == 2)
+			snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR,
+						(cval->control << 8) | 2, 7936);
+		break;
+
 	}
 }
 
@@ -1043,8 +1119,10 @@
 	/* USB descriptions contain the dB scale in 1/256 dB unit
 	 * while ALSA TLV contains in 1/100 dB unit
 	 */
-	cval->dBmin = (convert_signed_value(cval, cval->min) * 100) / 256;
-	cval->dBmax = (convert_signed_value(cval, cval->max) * 100) / 256;
+	cval->dBmin =
+		(convert_signed_value(cval, cval->min) * 100) / (cval->res);
+	cval->dBmax =
+		(convert_signed_value(cval, cval->max) * 100) / (cval->res);
 	if (cval->dBmin > cval->dBmax) {
 		/* something is wrong; assume it's either from/to 0dB */
 		if (cval->dBmin < 0)
@@ -1236,12 +1314,18 @@
 	struct usb_feature_control_info *ctl_info;
 	unsigned int len = 0;
 	int mapped_name = 0;
-	int nameid = uac_feature_unit_iFeature(desc);
+	int nameid;
 	struct snd_kcontrol *kctl;
 	struct usb_mixer_elem_info *cval;
 	const struct usbmix_name_map *map;
 	unsigned int range;
 
+	if (state->mixer->protocol == UAC_VERSION_3)
+		nameid = ((struct uac3_feature_unit_descriptor *)
+				raw_desc)->wFeatureDescrStr;
+	else
+		nameid = uac_feature_unit_iFeature(desc);
+
 	control++; /* change from zero-based to 1-based value */
 
 	if (control == UAC_FU_GRAPHIC_EQUALIZER) {
@@ -1262,7 +1346,7 @@
 	ctl_info = &audio_feature_info[control-1];
 	if (state->mixer->protocol == UAC_VERSION_1)
 		cval->val_type = ctl_info->type;
-	else /* UAC_VERSION_2 */
+	else /* UAC_VERSION_2 or UAC_VERSION_3*/
 		cval->val_type = ctl_info->type_uac2 >= 0 ?
 			ctl_info->type_uac2 : ctl_info->type;
 
@@ -1385,6 +1469,62 @@
 	snd_usb_mixer_add_control(&cval->head, kctl);
 }
 
+static int find_num_channels(struct mixer_build *state, int dir)
+{
+	int num_ch = -EINVAL, num, i, j, wMaxPacketSize;
+	int ctrlif = get_iface_desc(state->mixer->hostif)->bInterfaceNumber;
+	struct usb_interface *usb_iface	=
+			usb_ifnum_to_if(state->mixer->chip->dev, ctrlif);
+	struct usb_interface_assoc_descriptor *assoc = usb_iface->intf_assoc;
+	struct usb_host_interface *alts;
+
+	for (i = 0; i < assoc->bInterfaceCount; i++) {
+		int intf = assoc->bFirstInterface + i;
+
+		if (intf != ctrlif) {
+			struct usb_interface *iface =
+				usb_ifnum_to_if(state->mixer->chip->dev, intf);
+
+			alts = &iface->altsetting[1];
+			if (dir == USB_DIR_OUT &&
+				get_endpoint(alts, 0)->bEndpointAddress &
+				USB_DIR_IN)
+				continue;
+			if (dir == USB_DIR_IN &&
+				!(get_endpoint(alts, 0)->bEndpointAddress &
+				USB_DIR_IN))
+				continue;
+			num = iface->num_altsetting;
+			for (j = 1; j < num; j++) {
+				num_ch = NUM_CHANNELS_MONO;
+				alts = &iface->altsetting[j];
+				wMaxPacketSize = le16_to_cpu(
+							get_endpoint(alts, 0)->
+							wMaxPacketSize);
+				switch (wMaxPacketSize) {
+				case BADD_MAXPSIZE_SYNC_MONO_16:
+				case BADD_MAXPSIZE_SYNC_MONO_24:
+				case BADD_MAXPSIZE_ASYNC_MONO_16:
+				case BADD_MAXPSIZE_ASYNC_MONO_24:
+					break;
+				case BADD_MAXPSIZE_SYNC_STEREO_16:
+				case BADD_MAXPSIZE_SYNC_STEREO_24:
+				case BADD_MAXPSIZE_ASYNC_STEREO_16:
+				case BADD_MAXPSIZE_ASYNC_STEREO_24:
+					num_ch = NUM_CHANNELS_STEREO;
+					break;
+				}
+				if (num_ch == NUM_CHANNELS_MONO)
+					continue;
+				else
+					break;
+			}
+		}
+	}
+
+	return num_ch;
+}
+
 /*
  * parse a feature unit
  *
@@ -1422,7 +1562,7 @@
 				      unitid);
 			return -EINVAL;
 		}
-	} else {
+	} else if (state->mixer->protocol == UAC_VERSION_2) {
 		struct uac2_feature_unit_descriptor *ftr = _ftr;
 		if (hdr->bLength < 6) {
 			usb_audio_err(state->chip,
@@ -1439,11 +1579,118 @@
 				      unitid);
 			return -EINVAL;
 		}
+	} else {
+		struct usb_interface *usb_iface	=
+			usb_ifnum_to_if(state->mixer->chip->dev,
+			get_iface_desc(state->mixer->hostif)->bInterfaceNumber);
+		struct usb_interface_assoc_descriptor *assoc =
+							usb_iface->intf_assoc;
+
+		csize = 4;
+		switch (unitid) {
+		case BADD_FU_ID_BAIOF:
+			channels = NUM_CHANNELS_MONO;
+			bmaControls = monoControls;
+			badd_baif_in_term_desc.wClusterDescrID =
+						CLUSTER_ID_MONO;
+			break;
+
+		case BADD_FU_ID_BAOF:
+			switch (assoc->bFunctionSubClass) {
+			case PROF_HEADPHONE:
+			case PROF_HEADSET_ADAPTER:
+				channels = NUM_CHANNELS_STEREO;
+				bmaControls = stereoControls;
+				badd_baiof_mu_desc.wClusterDescrID =
+					CLUSTER_ID_MONO;
+				break;
+			case PROF_SPEAKERPHONE:
+				channels = NUM_CHANNELS_MONO;
+				bmaControls = monoControls;
+				badd_baof_in_term_desc.wClusterDescrID =
+					CLUSTER_ID_MONO;
+				break;
+			default:
+				channels = find_num_channels(state,
+								USB_DIR_OUT);
+				if (channels < 0) {
+					usb_audio_err(state->chip,
+						      "unit %u: Cant find num of channels\n",
+						      unitid);
+					return channels;
+				}
+
+				bmaControls = (channels == NUM_CHANNELS_MONO) ?
+						monoControls : stereoControls;
+				badd_baof_in_term_desc.wClusterDescrID =
+					(channels == NUM_CHANNELS_MONO) ?
+					CLUSTER_ID_MONO : CLUSTER_ID_STEREO;
+				break;
+			}
+			break;
+
+		case BADD_FU_ID_BAIF:
+			switch (assoc->bFunctionSubClass) {
+			case PROF_HEADSET:
+			case PROF_HEADSET_ADAPTER:
+			case PROF_SPEAKERPHONE:
+				channels = NUM_CHANNELS_MONO;
+				bmaControls = monoControls;
+				badd_baif_in_term_desc.wClusterDescrID =
+					CLUSTER_ID_MONO;
+				break;
+			default:
+				channels = find_num_channels(state, USB_DIR_IN);
+				if (channels < 0) {
+					usb_audio_err(state->chip,
+						      "unit %u: Cant find num of channels\n",
+						      unitid);
+					return channels;
+				}
+
+				bmaControls = (channels == NUM_CHANNELS_MONO) ?
+						 monoControls : stereoControls;
+				badd_baif_in_term_desc.wClusterDescrID =
+					(channels == NUM_CHANNELS_MONO) ?
+					CLUSTER_ID_MONO : CLUSTER_ID_STEREO;
+				break;
+			}
+			break;
+
+		default:
+			usb_audio_err(state->chip, "Invalid unit %u\n", unitid);
+			return -EINVAL;
+		}
 	}
 
 	/* parse the source unit */
-	if ((err = parse_audio_unit(state, hdr->bSourceID)) < 0)
+	if (state->mixer->protocol != UAC_VERSION_3) {
+		err = parse_audio_unit(state, hdr->bSourceID);
+		if (err < 0)
+			return err;
+	} else {
+		struct usb_interface *usb_iface	=
+			usb_ifnum_to_if(state->mixer->chip->dev,
+			get_iface_desc(state->mixer->hostif)->bInterfaceNumber);
+		struct usb_interface_assoc_descriptor *assoc =
+			usb_iface->intf_assoc;
+
+		switch (unitid) {
+		case BADD_FU_ID_BAOF:
+			switch (assoc->bFunctionSubClass) {
+			case PROF_HEADSET:
+			case PROF_HEADSET_ADAPTER:
+				hdr->bSourceID = BADD_MU_ID_BAIOF;
+				break;
+			default:
+				hdr->bSourceID = BADD_IN_TERM_ID_BAOF;
+				break;
+			}
+		}
+		err = parse_audio_unit(state, hdr->bSourceID);
+		if (err < 0)
 		return err;
+	}
 
 	/* determine the input source type and name */
 	err = check_input_term(state, hdr->bSourceID, &iterm);
@@ -1497,7 +1744,7 @@
 				build_feature_ctl(state, _ftr, 0, i, &iterm,
 						  unitid, 0);
 		}
-	} else { /* UAC_VERSION_2 */
+	} else { /* UAC_VERSION_2 or UAC_VERSION_3*/
 		for (i = 0; i < ARRAY_SIZE(audio_feature_info); i++) {
 			unsigned int ch_bits = 0;
 			unsigned int ch_read_only = 0;
@@ -1615,6 +1862,12 @@
 	int input_pins, num_ins, num_outs;
 	int pin, ich, err;
 
+	if (state->mixer->protocol == UAC_VERSION_3) {
+		input_pins = badd_baiof_mu_desc.bNrInPins;
+		num_outs =
+		   (badd_baiof_mu_desc.wClusterDescrID == CLUSTER_ID_MONO) ?
+		    NUM_CHANNELS_MONO : NUM_CHANNELS_STEREO;
+	} else {
 	if (desc->bLength < 11 || !(input_pins = desc->bNrInPins) ||
 	    !(num_outs = uac_mixer_unit_bNrChannels(desc))) {
 		usb_audio_err(state->chip,
@@ -1622,6 +1875,7 @@
 			      unitid);
 		return -EINVAL;
 	}
+	}
 
 	num_ins = 0;
 	ich = 0;
@@ -1640,9 +1894,14 @@
 			int och, ich_has_controls = 0;
 
 			for (och = 0; och < num_outs; och++) {
-				__u8 *c = uac_mixer_unit_bmControls(desc,
-						state->mixer->protocol);
+				__u8 *c = NULL;
 
+				if (state->mixer->protocol == UAC_VERSION_3)
+					c =
+					  &(badd_baiof_mu_desc.bmMixerControls);
+				else
+					c = uac_mixer_unit_bmControls(desc,
+							state->mixer->protocol);
 				if (check_matrix_bitmap(c, ich, och, num_outs)) {
 					ich_has_controls = 1;
 					break;
@@ -2157,16 +2416,28 @@
 	case UAC_MIXER_UNIT:
 		return parse_audio_mixer_unit(state, unitid, p1);
 	case UAC_SELECTOR_UNIT:
+	/*   UAC3_MIXER_UNIT_V3 has the same value */
 	case UAC2_CLOCK_SELECTOR:
+	/*   UAC3_CLOCK_SOURCE has the same value */
+		if (state->mixer->protocol == UAC_VERSION_3 &&
+			p1[2] == UAC3_CLOCK_SOURCE)
+			return 0; /* NOP */
+		else if (state->mixer->protocol == UAC_VERSION_3
+			&& p1[2] == UAC3_MIXER_UNIT_V3)
+			return parse_audio_mixer_unit(state, unitid, p1);
+		else
 		return parse_audio_selector_unit(state, unitid, p1);
 	case UAC_FEATURE_UNIT:
 		return parse_audio_feature_unit(state, unitid, p1);
 	case UAC1_PROCESSING_UNIT:
 	/*   UAC2_EFFECT_UNIT has the same value */
+	/*   UAC3_FEATURE_UNIT_V3 has the same value */
 		if (state->mixer->protocol == UAC_VERSION_1)
 			return parse_audio_processing_unit(state, unitid, p1);
-		else
+		else if (state->mixer->protocol == UAC_VERSION_2)
 			return 0; /* FIXME - effect units not implemented yet */
+		else
+			return parse_audio_feature_unit(state, unitid, p1);
 	case UAC1_EXTENSION_UNIT:
 	/*   UAC2_PROCESSING_UNIT_V2 has the same value */
 		if (state->mixer->protocol == UAC_VERSION_1)
@@ -2204,6 +2475,23 @@
 	return 0;
 }
 
+static int make_out_term(struct mixer_build state, int wTerminalType)
+{
+	struct uac3_output_terminal_descriptor *desc = NULL;
+
+	if (wTerminalType == UAC_TERMINAL_STREAMING)
+		desc = &badd_baif_out_term_desc;
+	else {
+		desc = &badd_baof_out_term_desc;
+		desc->wTerminalType = wTerminalType;
+	}
+	set_bit(desc->bTerminalID, state.unitbitmap);
+	state.oterm.id = desc->bTerminalID;
+	state.oterm.type = desc->wTerminalType;
+	state.oterm.name = desc->wTerminalDescrStr;
+	return parse_audio_unit(&state, desc->bSourceID);
+}
+
 /*
  * create mixer controls
  *
@@ -2212,9 +2500,8 @@
 static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
 {
 	struct mixer_build state;
-	int err;
+	int err = -EINVAL;
 	const struct usbmix_ctl_map *map;
-	void *p;
 
 	memset(&state, 0, sizeof(state));
 	state.chip = mixer->chip;
@@ -2232,46 +2519,110 @@
 		}
 	}
 
+	if (mixer->protocol == UAC_VERSION_3) {
+		struct usb_interface *usb_iface	=
+			usb_ifnum_to_if(mixer->chip->dev,
+			get_iface_desc(mixer->hostif)->bInterfaceNumber);
+		struct usb_interface_assoc_descriptor *assoc =
+			usb_iface->intf_assoc;
+
+		switch (assoc->bFunctionSubClass) {
+		case PROF_GENERIC_IO: {
+			if (assoc->bInterfaceCount == 0x02) {
+				if (get_endpoint(mixer->hostif,
+					0)->bEndpointAddress | USB_DIR_IN)
+					err = make_out_term(state,
+							UAC_TERMINAL_STREAMING);
+				else
+					err = make_out_term(state,
+						UAC_OUTPUT_TERMINAL_UNDEFINED);
+			} else {
+				err = make_out_term(state,
+						UAC_OUTPUT_TERMINAL_UNDEFINED);
+				if (err < 0 && err != -EINVAL)
+					return err;
+				err = make_out_term(state,
+						UAC_TERMINAL_STREAMING);
+			}
+			break;
+		}
+
+		case PROF_HEADPHONE:
+			err = make_out_term(state,
+					UAC_OUTPUT_TERMINAL_HEADPHONES);
+			break;
+		case PROF_SPEAKER:
+			err = make_out_term(state, UAC_OUTPUT_TERMINAL_SPEAKER);
+			break;
+		case PROF_MICROPHONE:
+			err = make_out_term(state, UAC_TERMINAL_STREAMING);
+			break;
+		case PROF_HEADSET:
+		case PROF_HEADSET_ADAPTER:
+			err = make_out_term(state, UAC_BIDIR_TERMINAL_HEADSET);
+			if (err < 0 && err != -EINVAL)
+				return err;
+			err = make_out_term(state, UAC_TERMINAL_STREAMING);
+			break;
+		case PROF_SPEAKERPHONE:
+			err = make_out_term(state,
+					UAC_BIDIR_TERMINAL_SPEAKERPHONE);
+			if (err < 0 && err != -EINVAL)
+				return err;
+			err = make_out_term(state, UAC_TERMINAL_STREAMING);
+			break;
+		}
+		if (err < 0 && err != -EINVAL)
+			return err;
+	} else {
+		void *p;
+
 	p = NULL;
 	while ((p = snd_usb_find_csint_desc(mixer->hostif->extra,
-					    mixer->hostif->extralen,
-					    p, UAC_OUTPUT_TERMINAL)) != NULL) {
+						mixer->hostif->extralen, p,
+						UAC_OUTPUT_TERMINAL)) != NULL) {
 		if (mixer->protocol == UAC_VERSION_1) {
-			struct uac1_output_terminal_descriptor *desc = p;
+				struct uac1_output_terminal_descriptor *desc =
+									      p;
 
 			if (desc->bLength < sizeof(*desc))
 				continue; /* invalid descriptor? */
 			/* mark terminal ID as visited */
 			set_bit(desc->bTerminalID, state.unitbitmap);
 			state.oterm.id = desc->bTerminalID;
-			state.oterm.type = le16_to_cpu(desc->wTerminalType);
+				state.oterm.type =
+					le16_to_cpu(desc->wTerminalType);
 			state.oterm.name = desc->iTerminal;
 			err = parse_audio_unit(&state, desc->bSourceID);
 			if (err < 0 && err != -EINVAL)
 				return err;
 		} else { /* UAC_VERSION_2 */
-			struct uac2_output_terminal_descriptor *desc = p;
+				struct uac2_output_terminal_descriptor *desc =
+									      p;
 
 			if (desc->bLength < sizeof(*desc))
 				continue; /* invalid descriptor? */
 			/* mark terminal ID as visited */
 			set_bit(desc->bTerminalID, state.unitbitmap);
 			state.oterm.id = desc->bTerminalID;
-			state.oterm.type = le16_to_cpu(desc->wTerminalType);
+				state.oterm.type =
+					le16_to_cpu(desc->wTerminalType);
 			state.oterm.name = desc->iTerminal;
 			err = parse_audio_unit(&state, desc->bSourceID);
 			if (err < 0 && err != -EINVAL)
 				return err;
 
 			/*
-			 * For UAC2, use the same approach to also add the
-			 * clock selectors
+				 * For UAC2, use the same approach to also add
+				 * the clock selectors
 			 */
-			err = parse_audio_unit(&state, desc->bCSourceID);
+				err = parse_audio_unit(&state,
+							desc->bCSourceID);
 			if (err < 0 && err != -EINVAL)
 				return err;
 		}
 	}
+	}
 
 	return 0;
 }
@@ -2504,6 +2855,9 @@
 	case UAC_VERSION_2:
 		mixer->protocol = UAC_VERSION_2;
 		break;
+	case UAC_VERSION_3:
+		mixer->protocol = UAC_VERSION_3;
+		break;
 	}
 
 	if ((err = snd_usb_mixer_controls(mixer)) < 0 ||
diff -ruw linux-4.4.115/sound/usb/pcm.c linux-4.4.115-fbx/sound/usb/pcm.c
--- linux-4.4.115/sound/usb/pcm.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/usb/pcm.c	2019-10-29 09:26:26.193228133 +0100
@@ -228,7 +228,7 @@
 	if (!test_and_set_bit(SUBSTREAM_FLAG_DATA_EP_STARTED, &subs->flags)) {
 		struct snd_usb_endpoint *ep = subs->data_endpoint;
 
-		dev_dbg(&subs->dev->dev, "Starting data EP @%p\n", ep);
+		dev_dbg(&subs->dev->dev, "Starting data EP @%pK\n", ep);
 
 		ep->data_subs = subs;
 		err = snd_usb_endpoint_start(ep);
@@ -257,7 +257,7 @@
 			}
 		}
 
-		dev_dbg(&subs->dev->dev, "Starting sync EP @%p\n", ep);
+		dev_dbg(&subs->dev->dev, "Starting sync EP @%pK\n", ep);
 
 		ep->sync_slave = subs->data_endpoint;
 		err = snd_usb_endpoint_start(ep);
@@ -554,6 +554,64 @@
 	return 0;
 }
 
+int snd_usb_enable_audio_stream(struct snd_usb_substream *subs,
+	bool enable)
+{
+	struct audioformat *fmt;
+	struct usb_host_interface *alts;
+	struct usb_interface *iface;
+	int ret;
+
+	if (!enable) {
+		if (subs->interface >= 0) {
+			usb_set_interface(subs->dev, subs->interface, 0);
+			subs->altset_idx = 0;
+			subs->interface = -1;
+			subs->cur_audiofmt = NULL;
+		}
+
+		snd_usb_autosuspend(subs->stream->chip);
+		return 0;
+	}
+
+	snd_usb_autoresume(subs->stream->chip);
+	fmt = find_format(subs);
+	if (!fmt) {
+		dev_err(&subs->dev->dev,
+		"cannot set format: format = %#x, rate = %d, channels = %d\n",
+			   subs->pcm_format, subs->cur_rate, subs->channels);
+		return -EINVAL;
+	}
+
+	subs->altset_idx = 0;
+	subs->interface = -1;
+	if (atomic_read(&subs->stream->chip->shutdown)) {
+		ret = -ENODEV;
+	} else {
+		ret = set_format(subs, fmt);
+		if (ret < 0)
+			return ret;
+
+		iface = usb_ifnum_to_if(subs->dev, subs->cur_audiofmt->iface);
+		alts = &iface->altsetting[subs->cur_audiofmt->altset_idx];
+		ret = snd_usb_init_sample_rate(subs->stream->chip,
+					       subs->cur_audiofmt->iface,
+					       alts,
+					       subs->cur_audiofmt,
+					       subs->cur_rate);
+		if (ret < 0) {
+			dev_err(&subs->dev->dev, "failed to set rate %d\n",
+				subs->cur_rate);
+			return ret;
+		}
+	}
+
+	subs->interface = fmt->iface;
+	subs->altset_idx = fmt->altset_idx;
+
+	return 0;
+}
+
 /*
  * Return the score of matching two audioformats.
  * Veto the audioformat if:
@@ -571,13 +629,13 @@
 
 	if (fp->channels < 1) {
 		dev_dbg(&subs->dev->dev,
-			"%s: (fmt @%p) no channels\n", __func__, fp);
+			"%s: (fmt @%pK) no channels\n", __func__, fp);
 		return 0;
 	}
 
 	if (!(fp->formats & pcm_format_to_bits(pcm_format))) {
 		dev_dbg(&subs->dev->dev,
-			"%s: (fmt @%p) no match for format %d\n", __func__,
+			"%s: (fmt @%pK) no match for format %d\n", __func__,
 			fp, pcm_format);
 		return 0;
 	}
@@ -590,7 +648,7 @@
 	}
 	if (!score) {
 		dev_dbg(&subs->dev->dev,
-			"%s: (fmt @%p) no match for rate %d\n", __func__,
+			"%s: (fmt @%pK) no match for rate %d\n", __func__,
 			fp, rate);
 		return 0;
 	}
@@ -599,7 +657,7 @@
 		score++;
 
 	dev_dbg(&subs->dev->dev,
-		"%s: (fmt @%p) score %d\n", __func__, fp, score);
+		"%s: (fmt @%pK) score %d\n", __func__, fp, score);
 
 	return score;
 }
diff -ruw linux-4.4.115/sound/usb/pcm.h linux-4.4.115-fbx/sound/usb/pcm.h
--- linux-4.4.115/sound/usb/pcm.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/usb/pcm.h	2019-01-22 16:16:29.699302515 +0100
@@ -9,6 +9,7 @@
 int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface,
 		       struct usb_host_interface *alts,
 		       struct audioformat *fmt);
-
+int snd_usb_enable_audio_stream(struct snd_usb_substream *subs,
+	bool enable);
 
 #endif /* __USBAUDIO_PCM_H */
diff -ruw linux-4.4.115/sound/usb/stream.c linux-4.4.115-fbx/sound/usb/stream.c
--- linux-4.4.115/sound/usb/stream.c	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/usb/stream.c	2019-01-22 16:16:29.703302551 +0100
@@ -20,6 +20,7 @@
 #include <linux/usb.h>
 #include <linux/usb/audio.h>
 #include <linux/usb/audio-v2.h>
+#include <linux/usb/audio-v3.h>
 
 #include <sound/core.h>
 #include <sound/pcm.h>
@@ -69,9 +70,14 @@
 static void snd_usb_audio_pcm_free(struct snd_pcm *pcm)
 {
 	struct snd_usb_stream *stream = pcm->private_data;
+	struct snd_usb_audio *chip;
+
 	if (stream) {
+		mutex_lock(&stream->chip->dev_lock);
+		chip = stream->chip;
 		stream->pcm = NULL;
 		snd_usb_audio_stream_free(stream);
+		mutex_unlock(&chip->dev_lock);
 	}
 }
 
@@ -279,8 +285,6 @@
 		0 /* terminator */
 	};
 	struct snd_pcm_chmap_elem *chmap;
-	const unsigned int *maps;
-	int c;
 
 	if (channels > ARRAY_SIZE(chmap->map))
 		return NULL;
@@ -289,26 +293,41 @@
 	if (!chmap)
 		return NULL;
 
-	maps = protocol == UAC_VERSION_2 ? uac2_maps : uac1_maps;
 	chmap->channels = channels;
-	c = 0;
+
+	if (protocol == UAC_VERSION_3) {
+		switch (channels) {
+		case 1:
+			chmap->map[0] = SNDRV_CHMAP_MONO;
+			break;
+		case 2:
+			chmap->map[0] = SNDRV_CHMAP_FL;
+			chmap->map[1] = SNDRV_CHMAP_FR;
+			break;
+		}
+	} else {
+		int c = 0;
+		const unsigned int *maps =
+			protocol == UAC_VERSION_2 ? uac2_maps : uac1_maps;
 
 	if (bits) {
 		for (; bits && *maps; maps++, bits >>= 1)
 			if (bits & 1)
 				chmap->map[c++] = *maps;
 	} else {
-		/* If we're missing wChannelConfig, then guess something
-		    to make sure the channel map is not skipped entirely */
+			/*
+			 * If we're missing wChannelConfig, then guess something
+			 * to make sure the channel map is not skipped entirely
+			 */
 		if (channels == 1)
 			chmap->map[c++] = SNDRV_CHMAP_MONO;
 		else
 			for (; c < channels && *maps; maps++)
 				chmap->map[c++] = *maps;
 	}
-
 	for (; c < channels; c++)
 		chmap->map[c] = SNDRV_CHMAP_UNKNOWN;
+	}
 
 	return chmap;
 }
@@ -406,6 +425,9 @@
 	struct usb_interface_descriptor *altsd = get_iface_desc(alts);
 	int attributes = 0;
 
+	if (protocol == UAC_VERSION_3)
+		return 0;
+
 	csep = snd_usb_find_desc(alts->endpoint[0].extra, alts->endpoint[0].extralen, NULL, USB_DT_CS_ENDPOINT);
 
 	/* Creamware Noah has this descriptor after the 2nd endpoint */
@@ -626,6 +648,50 @@
 				iface_no, altno, as->bTerminalLink);
 			continue;
 		}
+
+		case UAC_VERSION_3: {
+			int wMaxPacketSize;
+
+			/*
+			 * Allocate a dummy instance of fmt and set format type
+			 * to UAC_FORMAT_TYPE_I for BADD support; free fmt
+			 * after its last usage
+			 */
+			fmt = kzalloc(sizeof(*fmt), GFP_KERNEL);
+			if (!fmt)
+				return -ENOMEM;
+
+			fmt->bFormatType = UAC_FORMAT_TYPE_I;
+			format = UAC_FORMAT_TYPE_I_PCM;
+			clock = BADD_CLOCK_SOURCE;
+			wMaxPacketSize = le16_to_cpu(get_endpoint(alts, 0)
+							->wMaxPacketSize);
+			switch (wMaxPacketSize) {
+			case BADD_MAXPSIZE_SYNC_MONO_16:
+			case BADD_MAXPSIZE_SYNC_MONO_24:
+			case BADD_MAXPSIZE_ASYNC_MONO_16:
+			case BADD_MAXPSIZE_ASYNC_MONO_24: {
+				num_channels = NUM_CHANNELS_MONO;
+				chconfig = BADD_CH_CONFIG_MONO;
+				goto populate_fp;
+			}
+
+			case BADD_MAXPSIZE_SYNC_STEREO_16:
+			case BADD_MAXPSIZE_SYNC_STEREO_24:
+			case BADD_MAXPSIZE_ASYNC_STEREO_16:
+			case BADD_MAXPSIZE_ASYNC_STEREO_24: {
+				num_channels = NUM_CHANNELS_STEREO;
+				chconfig = BADD_CH_CONFIG_STEREO;
+				goto populate_fp;
+			}
+			default:
+				dev_err(&dev->dev,
+					"%u:%d: invalid wMaxPacketSize\n",
+					iface_no, altno);
+				kfree(fmt);
+				continue;
+			}
+		}
 		}
 
 		/* get format type */
@@ -659,6 +725,7 @@
 							fp->maxpacksize * 2)
 			continue;
 
+populate_fp:
 		fp = kzalloc(sizeof(*fp), GFP_KERNEL);
 		if (! fp) {
 			dev_err(&dev->dev, "cannot malloc\n");
@@ -720,6 +787,8 @@
 			continue;
 		}
 
+		if (protocol == UAC_VERSION_3)
+			kfree(fmt);
 		/* Create chmap */
 		if (fp->channels != num_channels)
 			chconfig = 0;
diff -ruw linux-4.4.115/sound/usb/usbaudio.h linux-4.4.115-fbx/sound/usb/usbaudio.h
--- linux-4.4.115/sound/usb/usbaudio.h	2018-02-03 17:04:31.000000000 +0100
+++ linux-4.4.115-fbx/sound/usb/usbaudio.h	2019-01-22 16:16:29.703302551 +0100
@@ -60,6 +60,10 @@
 	bool autoclock;			/* from the 'autoclock' module param */
 
 	struct usb_host_interface *ctrl_intf;	/* the audio control interface */
+
+	struct mutex dev_lock;	/* to protect any race with disconnect */
+	int card_num;	/* cache pcm card number to use upon disconnect */
+	void (*disconnect_cb)(struct snd_usb_audio *chip);
 };
 
 #define usb_audio_err(chip, fmt, args...) \
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm/boot/dts/include/dt-bindings/clock/audio-ext-clk.h	2019-01-22 16:16:28.167288642 +0100
@@ -0,0 +1,30 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __AUDIO_EXT_CLK_H
+#define __AUDIO_EXT_CLK_H
+
+/* Audio External Clocks */
+#define AUDIO_PMI_CLK		0
+#define AUDIO_PMIC_LNBB_CLK	0
+#define AUDIO_AP_CLK		1
+#define AUDIO_AP_CLK2		2
+#define AUDIO_LPASS_MCLK	3
+#define AUDIO_LPASS_MCLK2	4
+
+#define clk_audio_ap_clk        0x9b5727cb
+#define clk_audio_pmi_clk       0xcbfe416d
+#define clk_audio_ap_clk2       0x454d1e91
+#define clk_audio_lpass_mclk    0xf0f2a284
+#define clk_audio_pmi_lnbb_clk   0x57312343
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm/boot/dts/include/dt-bindings/clock/msm-clocks-8996.h	2019-01-22 16:16:28.171288678 +0100
@@ -0,0 +1,574 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CLOCKS_8996_H
+#define __MSM_CLOCKS_8996_H
+
+#include "audio-ext-clk.h"
+
+/* clock_gcc controlled clocks */
+#define clk_cxo_clk_src			0x79e95308
+#define clk_pnoc_clk			0x4325d220
+#define clk_pnoc_a_clk			0x2808c12b
+#define clk_bimc_clk			0x4b80bf00
+#define clk_bimc_a_clk			0x4b25668a
+#define clk_cnoc_clk			0xd5ccb7f4
+#define clk_cnoc_a_clk			0xd8fe2ccc
+#define clk_snoc_clk			0x2c341aa0
+#define clk_snoc_a_clk			0x8fcef2af
+#define clk_bb_clk1			0xf5304268
+#define clk_bb_clk1_ao			0xfa113810
+#define clk_bb_clk1_pin			0x6dd0a779
+#define clk_bb_clk1_pin_ao		0x9b637772
+#define clk_bb_clk2			0xfe15cb87
+#define clk_bb_clk2_ao			0x59682706
+#define clk_bb_clk2_pin			0x498938e5
+#define clk_bb_clk2_pin_ao		0x52513787
+#define clk_bimc_msmbus_clk		0xd212feea
+#define clk_bimc_msmbus_a_clk		0x71d1a499
+#define clk_ce1_a_clk			0x44a833fe
+#define clk_cnoc_msmbus_clk		0x62228b5d
+#define clk_cnoc_msmbus_a_clk		0x67442955
+#define clk_cxo_clk_src_ao		0x64eb6004
+#define clk_cxo_dwc3_clk		0xf79c19f6
+#define clk_cxo_lpm_clk			0x94adbf3d
+#define clk_cxo_otg_clk			0x4eec0bb9
+#define clk_cxo_pil_lpass_clk		0xe17f0ff6
+#define clk_cxo_pil_ssc_clk		0x81832015
+#define clk_div_clk1			0xaa1157a6
+#define clk_div_clk1_ao			0x6b943d68
+#define clk_div_clk2			0xd454019f
+#define clk_div_clk2_ao			0x53f9e788
+#define clk_div_clk3			0xa9a55a68
+#define clk_div_clk3_ao			0x3d6725a8
+#define clk_ipa_a_clk			0xeeec2919
+#define clk_ipa_clk			0xfa685cda
+#define clk_ln_bb_clk			0x3ab0b36d
+#define clk_ln_bb_a_clk			0xc7257ea8
+#define clk_ln_bb_clk_pin		0x1b1c476a
+#define clk_ln_bb_a_clk_pin		0x9cbb5411
+#define clk_mcd_ce1_clk			0xbb615d26
+#define clk_pnoc_keepalive_a_clk	0xf8f91f0b
+#define clk_pnoc_msmbus_clk		0x38b95c77
+#define clk_pnoc_msmbus_a_clk		0x8c9b4e93
+#define clk_pnoc_pm_clk			0xd6f7dfb9
+#define clk_pnoc_sps_clk		0xd482ecc7
+#define clk_qdss_a_clk			0xdd121669
+#define clk_qdss_clk			0x1492202a
+#define clk_rf_clk1			0xaabeea5a
+#define clk_rf_clk1_ao			0x72a10cb8
+#define clk_rf_clk1_pin			0x8f463562
+#define clk_rf_clk1_pin_ao		0x62549ff6
+#define clk_rf_clk2			0x24a30992
+#define clk_rf_clk2_ao			0x944d8bbd
+#define clk_rf_clk2_pin			0xa7c5602a
+#define clk_rf_clk2_pin_ao		0x2d75eb4d
+#define clk_snoc_msmbus_clk		0xe6900bb6
+#define clk_snoc_msmbus_a_clk		0x5d4683bd
+#define clk_mcd_ce1_clk			0xbb615d26
+#define clk_qcedev_ce1_clk		0x293f97b0
+#define clk_qcrypto_ce1_clk		0xa6ac14df
+#define clk_qseecom_ce1_clk		0xaa858373
+#define clk_scm_ce1_clk			0xd8ebcc62
+#define clk_ce1_clk			0x42229c55
+#define clk_gcc_ce1_ahb_m_clk		0x2eb28c01
+#define clk_gcc_ce1_axi_m_clk		0xc174dfba
+#define clk_measure_only_bimc_hmss_axi_clk	0xc1cc4f11
+#define clk_aggre1_noc_clk		0x049abba8
+#define clk_aggre1_noc_a_clk		0xc12e4220
+#define clk_aggre2_noc_clk		0xaa681404
+#define clk_aggre2_noc_a_clk		0xcab67089
+#define clk_mmssnoc_axi_rpm_clk		0x4d7f8cdc
+#define clk_mmssnoc_axi_rpm_a_clk	0xfbea899b
+#define clk_mmssnoc_axi_clk		0xdb4b31e6
+#define clk_mmssnoc_axi_a_clk		0xd4970614
+#define clk_mmssnoc_gds_clk		0x06a22afa
+
+#define clk_gpll0			0x1ebe3bc4
+#define clk_gpll0_ao			0xa1368304
+#define clk_gpll0_out_main		0xe9374de7
+#define clk_gpll4			0xb3b5d85b
+#define clk_gpll4_out_main		0xa9a0ab9d
+#define clk_ufs_axi_clk_src		0x297ca380
+#define clk_pcie_aux_clk_src		0xebc50566
+#define clk_usb30_master_clk_src	0xc6262f89
+#define clk_usb20_master_clk_src	0x5680ac83
+#define clk_ufs_ice_core_clk_src	0xda8e7119
+#define clk_blsp1_qup1_i2c_apps_clk_src 0x17f78f5e
+#define clk_blsp1_qup1_spi_apps_clk_src 0xf534c4fa
+#define clk_blsp1_qup2_i2c_apps_clk_src 0x8de71c79
+#define clk_blsp1_qup2_spi_apps_clk_src 0x33cf809a
+#define clk_blsp1_qup3_i2c_apps_clk_src 0xf161b902
+#define clk_blsp1_qup3_spi_apps_clk_src 0x5e95683f
+#define clk_blsp1_qup4_i2c_apps_clk_src 0xb2ecce68
+#define clk_blsp1_qup4_spi_apps_clk_src 0xddb5bbdb
+#define clk_blsp1_qup5_i2c_apps_clk_src 0x71ea7804
+#define clk_blsp1_qup5_spi_apps_clk_src 0x9752f35f
+#define clk_blsp1_qup6_i2c_apps_clk_src 0x28806803
+#define clk_blsp1_qup6_spi_apps_clk_src 0x44a1edc4
+#define clk_blsp1_uart1_apps_clk_src	0xf8146114
+#define clk_blsp1_uart2_apps_clk_src	0xfc9c2f73
+#define clk_blsp1_uart3_apps_clk_src	0x600497f2
+#define clk_blsp1_uart4_apps_clk_src	0x56bff15c
+#define clk_blsp1_uart5_apps_clk_src	0x218ef697
+#define clk_blsp1_uart6_apps_clk_src	0x8fbdbe4c
+#define clk_blsp2_qup1_i2c_apps_clk_src 0xd6d1e95d
+#define clk_blsp2_qup1_spi_apps_clk_src 0xcc1b8365
+#define clk_blsp2_qup2_i2c_apps_clk_src 0x603b5c51
+#define clk_blsp2_qup2_spi_apps_clk_src 0xd577dc44
+#define clk_blsp2_qup3_i2c_apps_clk_src 0xea82959c
+#define clk_blsp2_qup3_spi_apps_clk_src 0xd04b1e92
+#define clk_blsp2_qup4_i2c_apps_clk_src 0x73dc968c
+#define clk_blsp2_qup4_spi_apps_clk_src 0x25d4a2b1
+#define clk_blsp2_qup5_i2c_apps_clk_src 0xcc3698bd
+#define clk_blsp2_qup5_spi_apps_clk_src 0xfa0cf45e
+#define clk_blsp2_qup6_i2c_apps_clk_src 0x2fa53151
+#define clk_blsp2_qup6_spi_apps_clk_src 0x5ca86755
+#define clk_blsp2_uart1_apps_clk_src	0x562c66dc
+#define clk_blsp2_uart2_apps_clk_src	0xdd448080
+#define clk_blsp2_uart3_apps_clk_src	0x46b2e90f
+#define clk_blsp2_uart4_apps_clk_src	0x23a093d2
+#define clk_blsp2_uart5_apps_clk_src	0xe067616a
+#define clk_blsp2_uart6_apps_clk_src	0xe02d2829
+#define clk_gp1_clk_src			0xad85b97a
+#define clk_gp2_clk_src			0xfb1f0065
+#define clk_gp3_clk_src			0x63b693d6
+#define clk_hmss_rbcpr_clk_src		0xedd9a474
+#define clk_pdm2_clk_src		0x31e494fd
+#define clk_sdcc1_apps_clk_src		0xd4975db2
+#define clk_sdcc2_apps_clk_src		0xfc46c821
+#define clk_sdcc3_apps_clk_src		0xea34c7f4
+#define clk_sdcc4_apps_clk_src		0x7aaaaa0c
+#define clk_tsif_ref_clk_src		0x4e9042d1
+#define clk_usb20_mock_utmi_clk_src	0xc3aaeecb
+#define clk_usb30_mock_utmi_clk_src	0xa024a976
+#define clk_usb3_phy_aux_clk_src	0x15eec63c
+#define clk_gcc_qusb2phy_prim_reset	0x07550fa1
+#define clk_gcc_qusb2phy_sec_reset	0x3f3a87d0
+#define clk_gcc_periph_noc_usb20_ahb_clk	0xfb9f26e9
+#define clk_gcc_mmss_gcc_dbg_clk	0xe89d461c
+#define clk_cpu_dbg_clk			0x6550dfa9
+#define clk_gcc_blsp1_ahb_clk		0x8caa5b4f
+#define clk_gcc_blsp1_qup1_i2c_apps_clk 0xc303fae9
+#define clk_gcc_blsp1_qup1_spi_apps_clk 0x759a76b0
+#define clk_gcc_blsp1_qup2_i2c_apps_clk 0x1076f220
+#define clk_gcc_blsp1_qup2_spi_apps_clk 0x3e77d48f
+#define clk_gcc_blsp1_qup3_i2c_apps_clk 0x9e25ac82
+#define clk_gcc_blsp1_qup3_spi_apps_clk 0xfb978880
+#define clk_gcc_blsp1_qup4_i2c_apps_clk 0xd7f40f6f
+#define clk_gcc_blsp1_qup4_spi_apps_clk 0x80f8722f
+#define clk_gcc_blsp1_qup5_i2c_apps_clk 0xacae5604
+#define clk_gcc_blsp1_qup5_spi_apps_clk 0xbf3e15d7
+#define clk_gcc_blsp1_qup6_i2c_apps_clk 0x5c6ad820
+#define clk_gcc_blsp1_qup6_spi_apps_clk 0x780d9f85
+#define clk_gcc_blsp1_uart1_apps_clk	0xc7c62f90
+#define clk_gcc_blsp1_uart2_apps_clk	0xf8a61c96
+#define clk_gcc_blsp1_uart3_apps_clk	0xc3298bd7
+#define clk_gcc_blsp1_uart4_apps_clk	0x26be16c0
+#define clk_gcc_blsp1_uart5_apps_clk	0x28a6bc74
+#define clk_gcc_blsp1_uart6_apps_clk	0x28fd3466
+#define clk_gcc_blsp2_ahb_clk		0x8f283c1d
+#define clk_gcc_blsp2_qup1_i2c_apps_clk 0x9ace11dd
+#define clk_gcc_blsp2_qup1_spi_apps_clk 0xa32604cc
+#define clk_gcc_blsp2_qup2_i2c_apps_clk 0x1bf9a57e
+#define clk_gcc_blsp2_qup2_spi_apps_clk 0xbf54ca6d
+#define clk_gcc_blsp2_qup3_i2c_apps_clk 0x336d4170
+#define clk_gcc_blsp2_qup3_spi_apps_clk 0xc68509d6
+#define clk_gcc_blsp2_qup4_i2c_apps_clk 0xbd22539d
+#define clk_gcc_blsp2_qup4_spi_apps_clk 0x01a72b93
+#define clk_gcc_blsp2_qup5_i2c_apps_clk 0xe2b2ce1d
+#define clk_gcc_blsp2_qup5_spi_apps_clk 0xf40999cd
+#define clk_gcc_blsp2_qup6_i2c_apps_clk 0x894bcea4
+#define clk_gcc_blsp2_qup6_spi_apps_clk 0xfe1bd34a
+#define clk_gcc_blsp2_uart1_apps_clk	0x8c3512ff
+#define clk_gcc_blsp2_uart2_apps_clk	0x1e1965a3
+#define clk_gcc_blsp2_uart3_apps_clk	0x382415ab
+#define clk_gcc_blsp2_uart4_apps_clk	0x87a44b42
+#define clk_gcc_blsp2_uart5_apps_clk	0x5cd30649
+#define clk_gcc_blsp2_uart6_apps_clk	0x8feee5ab
+#define clk_gcc_boot_rom_ahb_clk	0xde2adeb1
+#define clk_gcc_gp1_clk			0x057f7b69
+#define clk_gcc_gp2_clk			0x9bf83ffd
+#define clk_gcc_gp3_clk			0xec6539ee
+#define clk_gcc_hmss_rbcpr_clk		0x699183be
+#define clk_gcc_mmss_noc_cfg_ahb_clk	0xb41a9d99
+#define clk_gcc_pcie_0_aux_clk		0x3d2e3ece
+#define clk_gcc_pcie_0_cfg_ahb_clk	0x4dd325c3
+#define clk_gcc_pcie_0_mstr_axi_clk	0x3f85285b
+#define clk_gcc_pcie_0_slv_axi_clk	0xd69638a1
+#define clk_gcc_pcie_0_pipe_clk		0x4f37621e
+#define clk_gcc_pcie_0_phy_reset	0xdc3201c1
+#define clk_gcc_pcie_1_aux_clk		0xc9bb962c
+#define clk_gcc_pcie_1_cfg_ahb_clk	0xb6338658
+#define clk_gcc_pcie_1_mstr_axi_clk	0xc20f6269
+#define clk_gcc_pcie_1_slv_axi_clk	0xd54e40d6
+#define clk_gcc_pcie_1_pipe_clk		0xc1627422
+#define clk_gcc_pcie_1_phy_reset	0x674481bb
+#define clk_gcc_pcie_2_aux_clk		0xa4dc7ae8
+#define clk_gcc_pcie_2_cfg_ahb_clk	0x4f1d3121
+#define clk_gcc_pcie_2_mstr_axi_clk	0x9e81724a
+#define clk_gcc_pcie_2_slv_axi_clk	0x7990d8b2
+#define clk_gcc_pcie_2_pipe_clk		0xa757a834
+#define clk_gcc_pcie_2_phy_reset	0x82634880
+#define clk_gcc_pcie_phy_reset		0x9bc3c959
+#define clk_gcc_pcie_phy_com_reset	0x8bf513e6
+#define clk_gcc_pcie_phy_nocsr_com_phy_reset	0x0c16a2da
+#define clk_gcc_pcie_phy_aux_clk	0x4746e74f
+#define clk_gcc_pcie_phy_cfg_ahb_clk	0x8533671a
+#define clk_gcc_pdm2_clk		0x99d55711
+#define clk_gcc_pdm_ahb_clk		0x365664f6
+#define clk_gcc_prng_ahb_clk		0x397e7eaa
+#define clk_gcc_sdcc1_ahb_clk		0x691e0caa
+#define clk_gcc_sdcc1_apps_clk		0x9ad6fb96
+#define clk_gcc_sdcc2_ahb_clk		0x23d5727f
+#define clk_gcc_sdcc2_apps_clk		0x861b20ac
+#define clk_gcc_sdcc3_ahb_clk		0x565b2c03
+#define clk_gcc_sdcc3_apps_clk		0x0b27aeac
+#define clk_gcc_sdcc4_ahb_clk		0x64f3e6a8
+#define clk_gcc_sdcc4_apps_clk		0xbf7c4dc8
+#define clk_gcc_tsif_ahb_clk		0x88d2822c
+#define clk_gcc_tsif_ref_clk		0x8f1ed2c2
+#define clk_gcc_ufs_ahb_clk		0x1914bb84
+#define clk_gcc_ufs_axi_clk		0x47c743a7
+#define clk_gcc_ufs_ice_core_clk	0x310b0710
+#define clk_gcc_ufs_rx_cfg_clk		0xa6747786
+#define clk_gcc_ufs_rx_symbol_0_clk	0x7f43251c
+#define clk_gcc_ufs_rx_symbol_1_clk	0x03182fde
+#define clk_gcc_ufs_tx_cfg_clk		0xba2cf8b5
+#define clk_gcc_ufs_tx_symbol_0_clk	0x6a9f747a
+#define clk_gcc_ufs_unipro_core_clk	0x2daf7fd2
+#define clk_gcc_ufs_sys_clk_core_clk	0x360e5ac8
+#define clk_gcc_ufs_tx_symbol_clk_core_clk	0xf6fb0df7
+#define clk_gcc_usb20_master_clk	0x24c3b66a
+#define clk_gcc_usb20_mock_utmi_clk	0xe8db8203
+#define clk_gcc_usb20_sleep_clk		0x6e8cb4b2
+#define clk_gcc_usb30_master_clk	0xb3b4e2cb
+#define clk_gcc_usb30_mock_utmi_clk	0xa800b65a
+#define clk_gcc_usb30_sleep_clk		0xd0b65c92
+#define clk_gcc_usb3_phy_aux_clk	0x0d9a36e0
+#define clk_gcc_usb3_phy_pipe_clk	0xf279aff2
+#define clk_gcc_usb_phy_cfg_ahb2phy_clk	0xd1231a0e
+#define clk_gcc_aggre0_cnoc_ahb_clk	0x53a35559
+#define clk_gcc_aggre0_snoc_axi_clk	0x3c446400
+#define clk_gcc_aggre0_noc_qosgen_extref_clk	0x8c4356ba
+#define clk_hlos1_vote_lpass_core_smmu_clk	0x3aaa1743
+#define clk_hlos1_vote_lpass_adsp_smmu_clk	0xc76f702f
+#define clk_gcc_usb3_phy_reset		0x03d559f1
+#define clk_gcc_usb3phy_phy_reset	0xb1a4f885
+#define clk_gcc_usb3_clkref_clk		0xb6cc8f01
+#define clk_gcc_hdmi_clkref_clk		0x4d4eec04
+#define clk_gcc_edp_clkref_clk		0xa8685c3f
+#define clk_gcc_ufs_clkref_clk		0x92aa126f
+#define clk_gcc_pcie_clkref_clk		0xa2e247fa
+#define clk_gcc_rx2_usb2_clkref_clk	0x27ec24ba
+#define clk_gcc_rx1_usb2_clkref_clk	0x53351d25
+#define clk_gcc_smmu_aggre0_ahb_clk	0x47a06ce4
+#define clk_gcc_smmu_aggre0_axi_clk	0x3cac4a6c
+#define clk_gcc_sys_noc_usb3_axi_clk	0x94d26800
+#define clk_gcc_sys_noc_ufs_axi_clk	0x19d38312
+#define clk_gcc_aggre2_usb3_axi_clk	0xd5822a8e
+#define clk_gcc_aggre2_ufs_axi_clk	0xb31e5191
+#define clk_gcc_mmss_gpll0_div_clk	0xdd06848d
+#define clk_gcc_mmss_bimc_gfx_clk	0xe4f28754
+#define clk_gcc_bimc_gfx_clk		0x3edd69ad
+#define clk_gcc_qspi_ahb_clk		0x96969dc8
+#define clk_gcc_qspi_ser_clk		0xfaf1e266
+#define clk_qspi_ser_clk_src		0x426676ee
+#define clk_sdcc1_ice_core_clk_src	0xfd6a4301
+#define clk_gcc_sdcc1_ice_core_clk	0x0fd5680a
+#define clk_gcc_mss_cfg_ahb_clk		0x111cde81
+#define clk_gcc_mss_snoc_axi_clk	0x0e71de85
+#define clk_gcc_mss_q6_bimc_axi_clk	0x67544d62
+#define clk_gcc_mss_mnoc_bimc_axi_clk	0xf665d03f
+#define clk_gpll0_out_msscc		0x7d794829
+#define clk_gcc_debug_mux_v2		0xf7e749f0
+#define clk_gcc_dcc_ahb_clk		0xfa14a88c
+#define clk_gcc_aggre0_noc_mpu_cfg_ahb_clk	0x5c1bb8e2
+
+/* clock_mmss controlled clocks */
+#define clk_mmsscc_xo			0x05e63704
+#define clk_mmsscc_gpll0		0xe900c515
+#define clk_mmsscc_gpll0_div		0x73892e05
+#define clk_mmsscc_mmssnoc_ahb		0x7b4bd6f7
+#define clk_mmpll0			0xdd83b751
+#define clk_mmpll0_out_main		0x2f996a31
+#define clk_mmpll1			0x6da7fb90
+#define clk_mmpll1_out_main		0xa0d3a7da
+#define clk_mmpll4			0x22c063c1
+#define clk_mmpll4_out_main		0xfb21c2fd
+#define clk_mmpll3			0x18c76899
+#define clk_mmpll3_out_main		0x6eb6328f
+#define clk_ahb_clk_src			0x86f49203
+#define clk_mmpll2			0x1190e4d8
+#define clk_mmpll2_out_main		0x1e9e24a8
+#define clk_mmpll8			0xd06ad45e
+#define clk_mmpll8_out_main		0x75b1f386
+#define clk_mmpll9			0x1c50684c
+#define clk_mmpll9_out_main		0x16b74937
+#define clk_mmpll5			0xa41e1936
+#define clk_mmpll5_out_main		0xcc1897bf
+#define clk_csi0_clk_src		0x227e65bc
+#define clk_vfe0_clk_src		0xa0c2bd8f
+#define clk_vfe1_clk_src		0x4e357366
+#define clk_csi1_clk_src		0x6a2a6c36
+#define clk_csi2_clk_src		0x4113589f
+#define clk_csi3_clk_src		0xfd934012
+#define clk_maxi_clk_src		0x52c09777
+#define clk_cpp_clk_src			0x8382f56d
+#define clk_jpeg0_clk_src		0x9a0a0ac3
+#define clk_jpeg2_clk_src		0x5ad927f3
+#define clk_jpeg_dma_clk_src		0xb68afcea
+#define clk_mdp_clk_src			0x6dc1f8f1
+#define clk_video_core_clk_src		0x8be4c944
+#define clk_fd_core_clk_src		0xe4799ab7
+#define clk_cci_clk_src			0x822f3d97
+#define clk_csiphy0_3p_clk_src		0xd2474b12
+#define clk_csiphy1_3p_clk_src		0x46a02aff
+#define clk_csiphy2_3p_clk_src		0x1447813f
+#define clk_camss_gp0_clk_src		0x6b57cfe6
+#define clk_camss_gp1_clk_src		0xf735368a
+#define clk_jpeg_dma_clk_src		0xb68afcea
+#define clk_mclk0_clk_src		0x266b3853
+#define clk_mclk1_clk_src		0xa73cad0c
+#define clk_mclk2_clk_src		0x42545468
+#define clk_mclk3_clk_src		0x2bfbb714
+#define clk_csi0phytimer_clk_src	0xc8a309be
+#define clk_csi1phytimer_clk_src	0x7c0fe23a
+#define clk_csi2phytimer_clk_src	0x62ffea9c
+#define clk_rbbmtimer_clk_src		0x17649ecc
+#define clk_esc0_clk_src		0xb41d7c38
+#define clk_esc1_clk_src		0x3b0afa42
+#define clk_hdmi_clk_src		0xb40aeea9
+#define clk_vsync_clk_src		0xecb43940
+#define clk_rbcpr_clk_src		0x2c2e9af2
+#define clk_video_subcore0_clk_src	0x88d79636
+#define clk_video_subcore1_clk_src	0x4966930c
+#define clk_mmss_bto_ahb_clk		0xfdf8c361
+#define clk_camss_ahb_clk		0xc4ff91d4
+#define clk_camss_cci_ahb_clk		0x04c4441a
+#define clk_camss_cci_clk		0xd6cb5eb9
+#define clk_camss_cpp_ahb_clk		0x12e9a87b
+#define clk_camss_cpp_clk		0xb82f366b
+#define clk_camss_cpp_axi_clk		0x5598c804
+#define clk_camss_cpp_vbif_ahb_clk	0xb5f31be4
+#define clk_camss_csi0_ahb_clk		0x6e29c972
+#define clk_camss_csi0_clk		0x30862ddb
+#define clk_camss_csi0phy_clk		0x2cecfb84
+#define clk_camss_csi0pix_clk		0x6946f77b
+#define clk_camss_csi0rdi_clk		0x83645ef5
+#define clk_camss_csi1_ahb_clk		0xccc15f06
+#define clk_camss_csi1_clk		0xb150f052
+#define clk_camss_csi1phy_clk		0xb989f06d
+#define clk_camss_csi1pix_clk		0x58d19bf3
+#define clk_camss_csi1rdi_clk		0x4d2f3352
+#define clk_camss_csi2_ahb_clk		0x92d02d75
+#define clk_camss_csi2_clk		0x74fc92e8
+#define clk_camss_csi2phy_clk		0xda05d9d8
+#define clk_camss_csi2pix_clk		0xf8ed0731
+#define clk_camss_csi2rdi_clk		0xdc1b2081
+#define clk_camss_csi3_ahb_clk		0xee5e459c
+#define clk_camss_csi3_clk		0x39488fdd
+#define clk_camss_csi3phy_clk		0x8b6063b9
+#define clk_camss_csi3pix_clk		0xd82bd467
+#define clk_camss_csi3rdi_clk		0xb6750046
+#define clk_camss_csi_vfe0_clk		0x3023937a
+#define clk_camss_csi_vfe1_clk		0xe66fa522
+#define clk_camss_csiphy0_3p_clk	0xf2a54f5a
+#define clk_camss_csiphy1_3p_clk	0x8bf70cb2
+#define clk_camss_csiphy2_3p_clk	0x1c14c939
+#define clk_camss_gp0_clk		0xcee7e51d
+#define clk_camss_gp1_clk		0x41f1c2e3
+#define clk_camss_ispif_ahb_clk		0x9a212c6d
+#define clk_camss_jpeg0_clk		0x0b0e2db7
+#define clk_camss_jpeg2_clk		0xd7291c8d
+#define clk_camss_jpeg_ahb_clk		0x1f47fd28
+#define clk_camss_jpeg_axi_clk		0x9e5545c8
+#define clk_camss_jpeg_dma_clk		0x2336e65d
+#define clk_camss_mclk0_clk		0xcf0c61e0
+#define clk_camss_mclk1_clk		0xd1410ed4
+#define clk_camss_mclk2_clk		0x851286f2
+#define clk_camss_mclk3_clk		0x4db11c45
+#define clk_camss_micro_ahb_clk		0x33a23277
+#define clk_camss_csi0phytimer_clk	0xff93b3c8
+#define clk_camss_csi1phytimer_clk	0x6c399ab6
+#define clk_camss_csi2phytimer_clk	0x24f47f49
+#define clk_camss_top_ahb_clk		0x8f8b2d33
+#define clk_camss_vfe_ahb_clk		0x595197bc
+#define clk_camss_vfe_axi_clk		0x273d4c31
+#define clk_camss_vfe0_ahb_clk		0x4652833c
+#define clk_camss_vfe0_clk		0x1e9bb8c4
+#define clk_camss_vfe0_stream_clk	0x22835fa4
+#define clk_camss_vfe1_ahb_clk		0x6a56abd3
+#define clk_camss_vfe1_clk		0x5bffa69b
+#define clk_camss_vfe1_stream_clk	0x92f849b9
+#define clk_fd_ahb_clk			0x868a2c5c
+#define clk_fd_core_clk			0x3badcae4
+#define clk_fd_core_uar_clk		0x7e624e15
+#define clk_gpu_ahb_clk			0xf97f1d43
+#define clk_gpu_aon_isense_clk		0xa9e9b297
+#define clk_gpu_gx_gfx3d_clk		0xb7ece823
+#define clk_gpu_mx_clk			0xb80ccedf
+#define clk_gpu_gx_rbbmtimer_clk	0xdeba634e
+#define clk_mdss_ahb_clk		0x684ccb41
+#define clk_mdss_axi_clk		0xcc07d687
+#define clk_mdss_esc0_clk		0x28cafbe6
+#define clk_mdss_esc1_clk		0xc22c6883
+#define clk_mdss_hdmi_ahb_clk		0x01cef516
+#define clk_mdss_hdmi_clk		0x097a6de9
+#define clk_mdss_mdp_clk		0x618336ac
+#define clk_mdss_vsync_clk		0x42a022d3
+#define clk_mmss_misc_ahb_clk		0xea30b0e7
+#define clk_mmss_misc_cxo_clk		0xe620cd80
+#define clk_mmagic_bimc_noc_cfg_ahb_clk 0x12d5ba72
+#define clk_mmagic_camss_axi_clk	0xa8b1c16b
+#define clk_mmagic_camss_noc_cfg_ahb_clk 0x5182c819
+#define clk_mmss_mmagic_cfg_ahb_clk	0x5e94a822
+#define clk_mmagic_mdss_axi_clk		0xa0359d10
+#define clk_mmagic_mdss_noc_cfg_ahb_clk 0x9c6d5482
+#define clk_mmagic_video_axi_clk	0x7b9219c3
+#define clk_mmagic_video_noc_cfg_ahb_clk 0x5124d256
+#define clk_mmss_mmagic_ahb_clk		0x3d15f2b0
+#define clk_mmss_mmagic_maxi_clk	0xbdaf5af7
+#define clk_mmss_rbcpr_ahb_clk		0x623ba55f
+#define clk_mmss_rbcpr_clk		0x69a23a6f
+#define clk_mmss_spdm_cpp_clk		0xefe35cd2
+#define clk_mmss_spdm_jpeg_dma_clk	0xcb7bd5a0
+#define clk_smmu_cpp_ahb_clk		0x3ad82d84
+#define clk_smmu_cpp_axi_clk		0xa6bb2f4a
+#define clk_smmu_jpeg_ahb_clk		0x10c436ec
+#define clk_smmu_jpeg_axi_clk		0x41112f37
+#define clk_smmu_mdp_ahb_clk		0x04994cb2
+#define clk_smmu_mdp_axi_clk		0x7fd71687
+#define clk_smmu_rot_ahb_clk		0xa30772c9
+#define clk_smmu_rot_axi_clk		0xfed7c078
+#define clk_smmu_vfe_ahb_clk		0x4dabebe7
+#define clk_smmu_vfe_axi_clk		0xde483725
+#define clk_smmu_video_ahb_clk		0x2d738e2c
+#define clk_smmu_video_axi_clk		0xe2b5b887
+#define clk_video_ahb_clk		0x90775cfb
+#define clk_video_axi_clk		0xe6c16dba
+#define clk_video_core_clk		0x7e876ec3
+#define clk_video_maxi_clk		0x97749db6
+#define clk_video_subcore0_clk		0xb6f63e6c
+#define clk_video_subcore1_clk		0x26c29cb4
+#define clk_vmem_ahb_clk		0xab6223ff
+#define clk_vmem_maxi_clk		0x15ef32db
+#define clk_mmss_debug_mux		0xe646ffda
+#define clk_mmss_gcc_dbg_clk		0xafa4d48a
+#define clk_gfx3d_clk_src		0x917f76ef
+#define clk_extpclk_clk_src		0xb2c31abd
+#define clk_mdss_byte0_clk		0xf5a03f64
+#define clk_mdss_byte1_clk		0xb8c7067d
+#define clk_mdss_extpclk_clk		0xfa5aadb0
+#define clk_mdss_pclk0_clk		0x3487234a
+#define clk_mdss_pclk1_clk		0xd5804246
+#define clk_gpu_gcc_dbg_clk		0x0ccc42cd
+#define clk_mdss_mdp_vote_clk		0x588460a4
+#define clk_mdss_rotator_vote_clk	0x5b1f675e
+#define clk_mmpll2_postdiv_clk		0x4fdeaaba
+#define clk_mmpll8_postdiv_clk		0xedf57882
+#define clk_mmpll9_postdiv_clk		0x3064b618
+#define clk_gfx3d_clk_src_v2		0x4210acb7
+#define clk_byte0_clk_src		0x75cc885b
+#define clk_byte1_clk_src		0x63c2c955
+#define clk_pclk0_clk_src		0xccac1f35
+#define clk_pclk1_clk_src		0x090f68ac
+#define clk_ext_byte0_clk_src		0xfb32f31e
+#define clk_ext_byte1_clk_src		0x585ef6d4
+#define clk_ext_pclk0_clk_src		0x087c1612
+#define clk_ext_pclk1_clk_src		0x8067c5a3
+
+/* clock_debug controlled clocks */
+#define clk_gcc_debug_mux		0x8121ac15
+
+/* external multimedia clocks */
+#define clk_dsi0pll_pixel_clk_mux	0x792379e1
+#define clk_dsi0pll_byte_clk_mux	0x60e83f06
+#define clk_dsi0pll_byte_clk_src	0xbbaa30be
+#define clk_dsi0pll_pixel_clk_src	0x45b3260f
+#define clk_dsi0pll_n2_div_clk		0x1474c213
+#define clk_dsi0pll_post_n1_div_clk	0xdab8c389
+#define clk_dsi0pll_vco_clk		0x15940d40
+#define clk_dsi1pll_pixel_clk_mux	0x36458019
+#define clk_dsi1pll_byte_clk_mux	0xb5a42b7b
+#define clk_dsi1pll_byte_clk_src	0x63930a8f
+#define clk_dsi1pll_pixel_clk_src	0x0e4c9b56
+#define clk_dsi1pll_n2_div_clk		0x2c9d4007
+#define clk_dsi1pll_post_n1_div_clk	0x03020041
+#define clk_dsi1pll_vco_clk		0x99797b50
+#define clk_mdss_dsi1_vco_clk_src	0xfcd15658
+#define clk_hdmi_vco_clk		0x66003284
+
+#define clk_dsi0pll_shadow_byte_clk_src	0x177c029c
+#define clk_dsi0pll_shadow_pixel_clk_src	0x98ae3c92
+#define clk_dsi0pll_shadow_n2_div_clk	0xd5f0dad9
+#define clk_dsi0pll_shadow_post_n1_div_clk	0x1f7c8cf8
+#define clk_dsi0pll_shadow_vco_clk	0xb100ca83
+#define clk_dsi1pll_shadow_byte_clk_src	0xfc021ce5
+#define clk_dsi1pll_shadow_pixel_clk_src	0xdcca3ffc
+#define clk_dsi1pll_shadow_n2_div_clk		0x189541bf
+#define clk_dsi1pll_shadow_post_n1_div_clk	0x1637020e
+#define clk_dsi1pll_shadow_vco_clk		0x68d8b6f7
+
+/* CPU clocks */
+#define clk_pwrcl_clk 0xc554130e
+#define clk_pwrcl_pll 0x25454ca1
+#define clk_pwrcl_alt_pll 0xc445471b
+#define clk_pwrcl_pll_main 0x28948e22
+#define clk_pwrcl_alt_pll_main 0x25c8270e
+#define clk_pwrcl_hf_mux 0x77706ae6
+#define clk_pwrcl_lf_mux 0xd99e334d
+#define clk_perfcl_clk 0x58869997
+#define clk_perfcl_pll 0x97dcec1c
+#define clk_perfcl_alt_pll 0xfe2eaea1
+#define clk_perfcl_pll_main 0x0dbf0c0b
+#define clk_perfcl_alt_pll_main 0x0b892aab
+#define clk_perfcl_hf_mux 0x9e8bbe59
+#define clk_perfcl_lf_mux 0x2f9c278d
+#define clk_cbf_pll 0xfe2e96a3
+#define clk_cbf_pll_main 0x2b05cf95
+#define clk_cbf_hf_mux 0x71244f73
+#define clk_cbf_clk 0x48e9e16b
+#define clk_xo_ao 0x428c856d
+#define clk_sys_apcsaux_clk 0x0b0dd513
+#define clk_cpu_debug_mux 0xc7acaa31
+
+/* GCC block resets */
+#define QUSB2PHY_PRIM_BCR		0
+#define QUSB2PHY_SEC_BCR		1
+#define BLSP1_BCR			2
+#define BLSP2_BCR			3
+#define BOOT_ROM_BCR			4
+#define PRNG_BCR			5
+#define UFS_BCR				6
+#define USB_20_BCR			7
+#define USB_30_BCR			8
+#define USB3_PHY_BCR			9
+#define USB3PHY_PHY_BCR			10
+#define PCIE_0_PHY_BCR			11
+#define PCIE_1_PHY_BCR			12
+#define PCIE_2_PHY_BCR			13
+#define PCIE_PHY_BCR			14
+#define PCIE_PHY_COM_BCR		15
+#define PCIE_PHY_NOCSR_COM_PHY_BCR	16
+
+/* MMSS Block resets */
+#define VIDEO_BCR			0
+#define MDSS_BCR			1
+#define CAMSS_MICRO_BCR			2
+#define CAMSS_JPEG_BCR			3
+#define CAMSS_VFE0_BCR			4
+#define CAMSS_VFE1_BCR			5
+#define FD_BCR				6
+#define GPU_GX_BCR			7
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm/boot/dts/include/dt-bindings/clock/msm-clocks-8998.h	2019-01-22 16:16:28.171288678 +0100
@@ -0,0 +1,534 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CLOCKS_8998_H
+#define __MSM_CLOCKS_8998_H
+
+#include "audio-ext-clk.h"
+
+/* clock_rpm controlled clocks */
+#define clk_ce1_clk				0x42229c55
+#define clk_ce1_a_clk				0x44a833fe
+#define clk_cxo_clk_src				0x79e95308
+#define clk_bimc_clk				0x4b80bf00
+#define clk_bimc_a_clk				0x4b25668a
+#define clk_cnoc_clk				0xd5ccb7f4
+#define clk_cnoc_a_clk				0xd8fe2ccc
+#define clk_snoc_clk				0x2c341aa0
+#define clk_snoc_a_clk				0x8fcef2af
+#define clk_cnoc_periph_clk			0xb11e9cf9
+#define clk_cnoc_periph_a_clk			0x1d7faa2e
+#define clk_cnoc_periph_keepalive_a_clk		0x7287aef2
+#define clk_ln_bb_clk1				0xb867b147
+#define clk_ln_bb_clk1_ao			0x7f63a93a
+#define clk_ln_bb_clk1_pin			0x6fc5653c
+#define clk_ln_bb_clk1_pin_ao			0x25d625bf
+#define clk_ln_bb_clk2				0xf83e6387
+#define clk_ln_bb_clk2_ao			0x96f09628
+#define clk_ln_bb_clk2_pin			0xa9ebe8d5
+#define clk_ln_bb_clk2_pin_ao			0x89a1226f
+#define clk_ln_bb_clk3				0x4f52a39e
+#define clk_ln_bb_clk3_ao			0xb15eba76
+#define clk_ln_bb_clk3_pin			0xc4de7dad
+#define clk_ln_bb_clk3_pin_ao			0xc01022e8
+#define clk_bimc_msmbus_clk			0xd212feea
+#define clk_bimc_msmbus_a_clk			0x71d1a499
+#define clk_cnoc_msmbus_clk			0x62228b5d
+#define clk_cnoc_msmbus_a_clk			0x67442955
+#define clk_cxo_clk_src_ao			0x64eb6004
+#define clk_cxo_dwc3_clk			0xf79c19f6
+#define clk_cxo_lpm_clk				0x94adbf3d
+#define clk_cxo_otg_clk				0x4eec0bb9
+#define clk_cxo_pil_lpass_clk			0xe17f0ff6
+#define clk_cxo_pil_ssc_clk			0x81832015
+#define clk_cxo_pil_spss_clk			0x5cd71a61
+#define clk_div_clk1				0xaa1157a6
+#define clk_div_clk1_ao				0x6b943d68
+#define clk_div_clk2				0xd454019f
+#define clk_div_clk2_ao				0x53f9e788
+#define clk_div_clk3				0xa9a55a68
+#define clk_div_clk3_ao				0x3d6725a8
+#define clk_ipa_clk				0xfa685cda
+#define clk_ipa_a_clk				0xeeec2919
+#define clk_mcd_ce1_clk				0xbb615d26
+#define clk_mmssnoc_axi_clk			0xdb4b31e6
+#define clk_mmssnoc_axi_a_clk			0xd4970614
+#define clk_qcedev_ce1_clk			0x293f97b0
+#define clk_qcrypto_ce1_clk			0xa6ac14df
+#define clk_qdss_clk				0x1492202a
+#define clk_qdss_a_clk				0xdd121669
+#define clk_qseecom_ce1_clk			0xaa858373
+#define clk_rf_clk1				0xaabeea5a
+#define clk_rf_clk1_ao				0x72a10cb8
+#define clk_rf_clk1_pin				0x8f463562
+#define clk_rf_clk1_pin_ao			0x62549ff6
+#define clk_rf_clk2				0x24a30992
+#define clk_rf_clk2_ao				0x944d8bbd
+#define clk_rf_clk2_pin				0xa7c5602a
+#define clk_rf_clk2_pin_ao			0x2d75eb4d
+#define clk_rf_clk3				0xb673936b
+#define clk_rf_clk3_ao				0x038bb968
+#define clk_rf_clk3_pin				0x726f53f5
+#define clk_rf_clk3_pin_ao			0x76f9240f
+#define clk_scm_ce1_clk				0xd8ebcc62
+#define clk_snoc_msmbus_clk			0xe6900bb6
+#define clk_snoc_msmbus_a_clk			0x5d4683bd
+#define clk_gcc_ce1_ahb_m_clk			0x2eb28c01
+#define clk_gcc_ce1_axi_m_clk			0xc174dfba
+#define clk_aggre1_noc_clk			0x049abba8
+#define clk_aggre1_noc_a_clk			0xc12e4220
+#define clk_aggre2_noc_clk			0xaa681404
+#define clk_aggre2_noc_a_clk			0xcab67089
+#define clk_measure_only_bimc_hmss_axi_clk	0xc1cc4f11
+
+/* clock_gcc controlled clocks*/
+#define clk_debug_mmss_clk			0x977c99b6
+#define clk_debug_rpm_clk			0x8e2b07ca
+#define clk_debug_cpu_clk			0x0e696b2b
+#define clk_gpu_gcc_debug_clk			0x3eb88190
+#define clk_gfx_gcc_debug_clk			0xa3a64fec
+#define clk_gpll0				0x1ebe3bc4
+#define clk_gpll0_out_main			0xe9374de7
+#define clk_gpll0_ao				0xa1368304
+#define clk_gcc_mmss_gpll0_clk			0x8050f008
+#define clk_gcc_mmss_gpll0_div_clk		0xdd06848d
+#define clk_gcc_gpu_gpll0_clk			0xdad7a7a4
+#define clk_gcc_gpu_gpll0_div_clk		0x07d16c6a
+#define clk_gpll4				0xb3b5d85b
+#define clk_gpll4_out_main			0xa9a0ab9d
+#define clk_usb30_master_clk_src		0xc6262f89
+#define clk_pcie_aux_clk_src			0xebc50566
+#define clk_ufs_axi_clk_src			0x297ca380
+#define clk_blsp1_qup1_i2c_apps_clk_src		0x17f78f5e
+#define clk_blsp1_qup1_spi_apps_clk_src		0xf534c4fa
+#define clk_blsp1_qup2_i2c_apps_clk_src		0x8de71c79
+#define clk_blsp1_qup2_spi_apps_clk_src		0x33cf809a
+#define clk_blsp1_qup3_i2c_apps_clk_src		0xf161b902
+#define clk_blsp1_qup3_spi_apps_clk_src		0x5e95683f
+#define clk_blsp1_qup4_i2c_apps_clk_src		0xb2ecce68
+#define clk_blsp1_qup4_spi_apps_clk_src		0xddb5bbdb
+#define clk_blsp1_qup5_i2c_apps_clk_src		0x71ea7804
+#define clk_blsp1_qup5_spi_apps_clk_src		0x9752f35f
+#define clk_blsp1_qup6_i2c_apps_clk_src		0x28806803
+#define clk_blsp1_qup6_spi_apps_clk_src		0x44a1edc4
+#define clk_blsp1_uart1_apps_clk_src		0xf8146114
+#define clk_blsp1_uart2_apps_clk_src		0xfc9c2f73
+#define clk_blsp1_uart3_apps_clk_src		0x600497f2
+#define clk_blsp2_qup1_i2c_apps_clk_src		0xd6d1e95d
+#define clk_blsp2_qup1_spi_apps_clk_src		0xcc1b8365
+#define clk_blsp2_qup2_i2c_apps_clk_src		0x603b5c51
+#define clk_blsp2_qup2_spi_apps_clk_src		0xd577dc44
+#define clk_blsp2_qup3_i2c_apps_clk_src		0xea82959c
+#define clk_blsp2_qup3_spi_apps_clk_src		0xd04b1e92
+#define clk_blsp2_qup4_i2c_apps_clk_src		0x73dc968c
+#define clk_blsp2_qup4_spi_apps_clk_src		0x25d4a2b1
+#define clk_blsp2_qup5_i2c_apps_clk_src		0xcc3698bd
+#define clk_blsp2_qup5_spi_apps_clk_src		0xfa0cf45e
+#define clk_blsp2_qup6_i2c_apps_clk_src		0x2fa53151
+#define clk_blsp2_qup6_spi_apps_clk_src		0x5ca86755
+#define clk_blsp2_uart1_apps_clk_src		0x562c66dc
+#define clk_blsp2_uart2_apps_clk_src		0xdd448080
+#define clk_blsp2_uart3_apps_clk_src		0x46b2e90f
+#define clk_gp1_clk_src				0xad85b97a
+#define clk_gp2_clk_src				0xfb1f0065
+#define clk_gp3_clk_src				0x63b693d6
+#define clk_hmss_rbcpr_clk_src			0xedd9a474
+#define clk_pdm2_clk_src			0x31e494fd
+#define clk_sdcc2_apps_clk_src			0xfc46c821
+#define clk_sdcc4_apps_clk_src			0x7aaaaa0c
+#define clk_tsif_ref_clk_src			0x4e9042d1
+#define clk_ufs_ice_core_clk_src		0xda8e7119
+#define clk_ufs_phy_aux_clk_src			0xc6bca085
+#define clk_ufs_unipro_core_clk_src		0x179e80a9
+#define clk_usb30_mock_utmi_clk_src		0xa024a976
+#define clk_usb3_phy_aux_clk_src		0x15eec63c
+#define clk_qspi_ref_clk_src			0xfe6b8e11
+#define clk_gcc_pcie_phy_0_reset		0x6bb4df33
+#define clk_gcc_usb3_phy_reset			0x03d559f1
+#define clk_gcc_usb3phy_phy_reset		0xb1a4f885
+#define clk_gcc_aggre1_ufs_axi_clk		0x873459d8
+#define clk_gcc_aggre1_ufs_axi_hw_ctl_clk	0x117a6f39
+#define clk_gcc_aggre1_usb3_axi_clk		0xc5c3fbe8
+#define clk_gcc_bimc_mss_q6_axi_clk		0x7437988f
+#define clk_gcc_blsp1_ahb_clk			0x8caa5b4f
+#define clk_gcc_blsp1_qup1_i2c_apps_clk		0xc303fae9
+#define clk_gcc_blsp1_qup1_spi_apps_clk		0x759a76b0
+#define clk_gcc_blsp1_qup2_i2c_apps_clk		0x1076f220
+#define clk_gcc_blsp1_qup2_spi_apps_clk		0x3e77d48f
+#define clk_gcc_blsp1_qup3_i2c_apps_clk		0x9e25ac82
+#define clk_gcc_blsp1_qup3_spi_apps_clk		0xfb978880
+#define clk_gcc_blsp1_qup4_i2c_apps_clk		0xd7f40f6f
+#define clk_gcc_blsp1_qup4_spi_apps_clk		0x80f8722f
+#define clk_gcc_blsp1_qup5_i2c_apps_clk		0xacae5604
+#define clk_gcc_blsp1_qup5_spi_apps_clk		0xbf3e15d7
+#define clk_gcc_blsp1_qup6_i2c_apps_clk		0x5c6ad820
+#define clk_gcc_blsp1_qup6_spi_apps_clk		0x780d9f85
+#define clk_gcc_blsp1_uart1_apps_clk		0xc7c62f90
+#define clk_gcc_blsp1_uart2_apps_clk		0xf8a61c96
+#define clk_gcc_blsp1_uart3_apps_clk		0xc3298bd7
+#define clk_gcc_blsp2_ahb_clk			0x8f283c1d
+#define clk_gcc_blsp2_qup1_i2c_apps_clk		0x9ace11dd
+#define clk_gcc_blsp2_qup1_spi_apps_clk		0xa32604cc
+#define clk_gcc_blsp2_qup2_i2c_apps_clk		0x1bf9a57e
+#define clk_gcc_blsp2_qup2_spi_apps_clk		0xbf54ca6d
+#define clk_gcc_blsp2_qup3_i2c_apps_clk		0x336d4170
+#define clk_gcc_blsp2_qup3_spi_apps_clk		0xc68509d6
+#define clk_gcc_blsp2_qup4_i2c_apps_clk		0xbd22539d
+#define clk_gcc_blsp2_qup4_spi_apps_clk		0x01a72b93
+#define clk_gcc_blsp2_qup5_i2c_apps_clk		0xe2b2ce1d
+#define clk_gcc_blsp2_qup5_spi_apps_clk		0xf40999cd
+#define clk_gcc_blsp2_qup6_i2c_apps_clk		0x894bcea4
+#define clk_gcc_blsp2_qup6_spi_apps_clk		0xfe1bd34a
+#define clk_gcc_blsp2_uart1_apps_clk		0x8c3512ff
+#define clk_gcc_blsp2_uart2_apps_clk		0x1e1965a3
+#define clk_gcc_blsp2_uart3_apps_clk		0x382415ab
+#define clk_gcc_boot_rom_ahb_clk		0xde2adeb1
+#define clk_gcc_bimc_gfx_clk			0x3edd69ad
+#define clk_gcc_cfg_noc_usb3_axi_clk		0x9ea4c2d9
+#define clk_gcc_gp1_clk				0x057f7b69
+#define clk_gcc_gp2_clk				0x9bf83ffd
+#define clk_gcc_gp3_clk				0xec6539ee
+#define clk_gcc_gpu_bimc_gfx_clk		0x3909459b
+#define clk_gcc_gpu_cfg_ahb_clk			0x72f20a57
+#define clk_gcc_gpu_iref_clk			0xfd82abad
+#define clk_gcc_hmss_dvm_bus_clk		0x17cc8b53
+#define clk_gcc_hmss_rbcpr_clk			0x699183be
+#define clk_hmss_gpll0_clk_src			0x17eb05d0
+#define clk_hmss_gpll4_clk_src			0x20456cae
+#define clk_gcc_mmss_sys_noc_axi_clk		0x4467b15b
+#define clk_gcc_mss_at_clk			0x1692c5aa
+#define clk_nav_gcc_dbg_clk			0x2221c544
+#define clk_gcc_pcie_0_aux_clk			0x3d2e3ece
+#define clk_gcc_pcie_0_cfg_ahb_clk		0x4dd325c3
+#define clk_gcc_pcie_0_mstr_axi_clk		0x3f85285b
+#define clk_gcc_pcie_0_pipe_clk			0x4f37621e
+#define clk_gcc_pcie_0_slv_axi_clk		0xd69638a1
+#define clk_gcc_pcie_phy_aux_clk		0x4746e74f
+#define clk_gcc_pdm2_clk			0x99d55711
+#define clk_gcc_pdm_ahb_clk			0x365664f6
+#define clk_gcc_prng_ahb_clk			0x397e7eaa
+#define clk_gcc_sdcc2_ahb_clk			0x23d5727f
+#define clk_gcc_sdcc2_apps_clk			0x861b20ac
+#define clk_gcc_sdcc4_ahb_clk			0x64f3e6a8
+#define clk_gcc_sdcc4_apps_clk			0xbf7c4dc8
+#define clk_gcc_tsif_ahb_clk			0x88d2822c
+#define clk_gcc_tsif_ref_clk			0x8f1ed2c2
+#define clk_gcc_ufs_ahb_clk			0x1914bb84
+#define clk_gcc_ufs_axi_clk			0x47c743a7
+#define clk_gcc_ufs_axi_hw_ctl_clk		0x69385b45
+#define clk_gcc_ufs_ice_core_clk		0x310b0710
+#define clk_gcc_ufs_ice_core_hw_ctl_clk		0x84e15a5b
+#define clk_gcc_ufs_phy_aux_clk			0x17acc8fb
+#define clk_gcc_ufs_phy_aux_hw_ctl_clk		0x7dbdb2e2
+#define clk_gcc_ufs_rx_symbol_0_clk		0x7f43251c
+#define clk_gcc_ufs_rx_symbol_1_clk		0x03182fde
+#define clk_gcc_ufs_tx_symbol_0_clk		0x6a9f747a
+#define clk_ufs_tx_symbol_0_clk			0xb3fcd0f7
+#define clk_ufs_rx_symbol_0_clk			0x17a0f1cd
+#define clk_gcc_ufs_unipro_core_clk		0x2daf7fd2
+#define clk_gcc_ufs_unipro_core_hw_ctl_clk	0x4a4e0f3d
+#define clk_gcc_usb30_master_clk		0xb3b4e2cb
+#define clk_gcc_usb30_mock_utmi_clk		0xa800b65a
+#define clk_gcc_usb30_sleep_clk			0xd0b65c92
+#define clk_gcc_usb3_phy_aux_clk		0x0d9a36e0
+#define clk_gcc_usb3_phy_pipe_clk		0xf279aff2
+#define clk_gcc_usb3_clkref_clk			0xb6cc8f00
+#define clk_gcc_hdmi_clkref_clk			0x4d4eec04
+#define clk_gcc_edp_clkref_clk			0xa8685c3f
+#define clk_gcc_ufs_clkref_clk			0x92aa126f
+#define clk_gcc_pcie_clkref_clk			0xa2e247fa
+#define clk_gcc_rx2_qlink_clkref_clk		0xd0ba986d
+#define clk_gcc_rx1_usb2_clkref_clk		0x53351d25
+#define clk_gcc_pcie_phy_reset			0x9bc3c959
+#define clk_gcc_pcie_phy_com_reset		0x8bf513e6
+#define clk_gcc_pcie_phy_nocsr_com_phy_reset	0x0c16a2da
+#define clk_gcc_qusb2phy_prim_reset		0x07550fa1
+#define clk_gcc_qusb2phy_sec_reset		0x3f3a87d0
+#define clk_gcc_mmss_noc_cfg_ahb_clk		0xb41a9d99
+#define clk_gcc_dcc_ahb_clk			0xfa14a88c
+#define clk_hlos1_vote_lpass_core_smmu_clk	0x3aaa1743
+#define clk_hlos1_vote_lpass_adsp_smmu_clk	0xc76f702f
+#define clk_gcc_mss_cfg_ahb_clk			0x111cde81
+#define clk_gcc_mss_q6_bimc_axi_clk		0x67544d62
+#define clk_gcc_mss_mnoc_bimc_axi_clk		0xf665d03f
+#define clk_gpll0_out_msscc			0x7d794829
+#define clk_gcc_mss_snoc_axi_clk		0x0e71de85
+#define clk_gcc_qspi_ref_clk			0x766a0f7c
+#define clk_gcc_qspi_ahb_clk			0x96969dc8
+#define clk_gcc_debug_mux			0x8121ac15
+
+/* clock_mmss controlled clocks */
+#define clk_mmsscc_xo				0x05e63704
+#define clk_mmsscc_gpll0			0xe900c515
+#define clk_mmsscc_gpll0_div			0x73892e05
+#define clk_mmpll0_pll				0x361e3cfd
+#define clk_mmpll1_pll				0x198e426b
+#define clk_mmpll3_pll				0x18c76899
+#define clk_mmpll4_pll				0x22c063c1
+#define clk_mmpll5_pll				0xa41e1936
+#define clk_mmpll6_pll				0xc56fb440
+#define clk_mmpll7_pll				0x3ac216af
+#define clk_mmpll10_pll				0x2561263b
+#define clk_mmpll0_pll_out			0x1e9e24a8
+#define clk_mmpll1_pll_out			0x5fa32257
+#define clk_mmpll3_pll_out			0x6eb6328f
+#define clk_mmpll4_pll_out			0xfb21c2fd
+#define clk_mmpll5_pll_out			0xcc1897bf
+#define clk_mmpll6_pll_out			0xfb1060bd
+#define clk_mmpll7_pll_out			0x767758ed
+#define clk_mmpll10_pll_out			0x3c5668f3
+#define clk_ahb_clk_src				0x86f49203
+#define clk_csi0_clk_src			0x227e65bc
+#define clk_vfe0_clk_src			0xa0c2bd8f
+#define clk_vfe1_clk_src			0x4e357366
+#define clk_mdp_clk_src				0x6dc1f8f1
+#define clk_maxi_clk_src			0x52c09777
+#define clk_cpp_clk_src				0x8382f56d
+#define clk_jpeg0_clk_src			0x9a0a0ac3
+#define clk_rot_clk_src				0xce49b56c
+#define clk_video_core_clk_src			0x8be4c944
+#define clk_csi1_clk_src			0x6a2a6c36
+#define clk_csi2_clk_src			0x4113589f
+#define clk_csi3_clk_src			0xfd934012
+#define clk_fd_core_clk_src			0xe4799ab7
+#define clk_ext_dp_phy_pll_vco			0x441b576b
+#define clk_ext_dp_phy_pll_link			0xea12644c
+#define clk_dp_link_clk_src			0x370d0626
+#define clk_dp_crypto_clk_src			0xf8faa811
+#define clk_dp_pixel_clk_src			0xf5dfbabf
+#define clk_ext_extpclk_clk_src			0xe5b273af
+#define clk_ext_pclk0_clk_src			0x087c1612
+#define clk_ext_pclk1_clk_src			0x8067c5a3
+#define clk_pclk0_clk_src			0xccac1f35
+#define clk_pclk1_clk_src			0x090f68ac
+#define clk_video_subcore0_clk_src		0x88d79636
+#define clk_video_subcore1_clk_src		0x4966930c
+#define clk_cci_clk_src				0x822f3d97
+#define clk_camss_gp0_clk_src			0x43b063e9
+#define clk_camss_gp1_clk_src			0xa3315f1b
+#define clk_mclk0_clk_src			0x266b3853
+#define clk_mclk1_clk_src			0xa73cad0c
+#define clk_mclk2_clk_src			0x42545468
+#define clk_mclk3_clk_src			0x2bfbb714
+#define clk_csiphy_clk_src			0x8cceb70a
+#define clk_csi0phytimer_clk_src		0xc8a309be
+#define clk_csi1phytimer_clk_src		0x7c0fe23a
+#define clk_csi2phytimer_clk_src		0x62ffea9c
+#define clk_ext_byte0_clk_src			0xfb32f31e
+#define clk_ext_byte1_clk_src			0x585ef6d4
+#define clk_byte0_clk_src			0x75cc885b
+#define clk_byte1_clk_src			0x63c2c955
+#define clk_dp_aux_clk_src			0x2b6e972b
+#define clk_dp_gtc_clk_src			0xc5a86a42
+#define clk_esc0_clk_src			0xb41d7c38
+#define clk_esc1_clk_src			0x3b0afa42
+#define clk_extpclk_clk_src			0xb2c31abd
+#define clk_hdmi_clk_src			0xb40aeea9
+#define clk_vsync_clk_src			0xecb43940
+#define clk_mmss_bimc_smmu_ahb_clk		0x4825baf4
+#define clk_mmss_bimc_smmu_axi_clk		0xc365ac39
+#define clk_mmss_snoc_dvm_axi_clk		0x2c159a11
+#define clk_mmss_camss_ahb_clk			0xa51f2c1d
+#define clk_mmss_camss_cci_ahb_clk		0xfda8bb6a
+#define clk_mmss_camss_cci_clk			0x71bb5c97
+#define clk_mmss_camss_cpp_ahb_clk		0xd5554f15
+#define clk_mmss_camss_cpp_clk			0x8e99ef57
+#define clk_mmss_camss_cpp_axi_clk		0xd84e390b
+#define clk_mmss_camss_cpp_vbif_ahb_clk		0x1b33a88e
+#define clk_mmss_camss_cphy_csid0_clk		0x56114361
+#define clk_mmss_camss_csi0_ahb_clk		0x2b58d241
+#define clk_mmss_camss_csi0_clk			0xccfe39ef
+#define clk_mmss_camss_csi0pix_clk		0x9e26509d
+#define clk_mmss_camss_csi0rdi_clk		0x01d5bf83
+#define clk_mmss_camss_cphy_csid1_clk		0x79fbcd8a
+#define clk_mmss_camss_csi1_ahb_clk		0x7073244b
+#define clk_mmss_camss_csi1_clk			0x3eeeaac0
+#define clk_mmss_camss_csi1pix_clk		0xf1375139
+#define clk_mmss_camss_csi1rdi_clk		0x43185024
+#define clk_mmss_camss_cphy_csid2_clk		0xf295e3ef
+#define clk_mmss_camss_csi2_ahb_clk		0x681c1479
+#define clk_mmss_camss_csi2_clk			0x94524569
+#define clk_mmss_camss_csi2pix_clk		0xf4de617d
+#define clk_mmss_camss_csi2rdi_clk		0x4bf01dc5
+#define clk_mmss_camss_cphy_csid3_clk		0x100188e9
+#define clk_mmss_camss_csi3_ahb_clk		0xfae7c29b
+#define clk_mmss_camss_csi3_clk			0x55e4bbae
+#define clk_mmss_camss_csi3pix_clk		0xc166a015
+#define clk_mmss_camss_csi3rdi_clk		0x6983a4cd
+#define clk_mmss_camss_csi_vfe0_clk		0x3b30b798
+#define clk_mmss_camss_csi_vfe1_clk		0xfe729af7
+#define clk_mmss_camss_csiphy0_clk		0x96c81af8
+#define clk_mmss_camss_csiphy1_clk		0xee9ac2bb
+#define clk_mmss_camss_csiphy2_clk		0x3365e70e
+#define clk_mmss_fd_ahb_clk			0x4ff1da4d
+#define clk_mmss_fd_core_clk			0x749e7eb0
+#define clk_mmss_fd_core_uar_clk		0x8ea480c5
+#define clk_mmss_camss_gp0_clk			0x3f7f6c87
+#define clk_mmss_camss_gp1_clk			0xdccdd730
+#define clk_mmss_camss_ispif_ahb_clk		0xbda4f0e3
+#define clk_mmss_camss_jpeg0_clk		0x4cc73b07
+#define clk_mmss_camss_jpeg0_vote_clk		0xc9efa6ac
+#define clk_mmss_camss_jpeg0_dma_vote_clk	0x371ec109
+#define clk_mmss_camss_jpeg_ahb_clk		0xde1fece3
+#define clk_mmss_camss_jpeg_axi_clk		0x7534616b
+#define clk_mmss_camss_mclk0_clk		0x056293a7
+#define clk_mmss_camss_mclk1_clk		0x96c7b69b
+#define clk_mmss_camss_mclk2_clk		0x8820556e
+#define clk_mmss_camss_mclk3_clk		0xf90ffb67
+#define clk_mmss_camss_micro_ahb_clk		0x6c6fd3c7
+#define clk_mmss_camss_csi0phytimer_clk		0x7a78864e
+#define clk_mmss_camss_csi1phytimer_clk		0x6e6c1de5
+#define clk_mmss_camss_csi2phytimer_clk		0x0235e2de
+#define clk_mmss_camss_top_ahb_clk		0x120618d6
+#define clk_mmss_camss_vfe0_ahb_clk		0x137bd0bd
+#define clk_mmss_camss_vfe0_clk			0xead28288
+#define clk_mmss_camss_vfe0_stream_clk		0xa0428287
+#define clk_mmss_camss_vfe1_ahb_clk		0xac0154c0
+#define clk_mmss_camss_vfe1_clk			0xc216b14d
+#define clk_mmss_camss_vfe1_stream_clk		0x745af3b6
+#define clk_mmss_camss_vfe_vbif_ahb_clk		0x0109a9c6
+#define clk_mmss_camss_vfe_vbif_axi_clk		0xe626d8a1
+#define clk_mmss_mdss_ahb_clk			0x85d37ab5
+#define clk_mmss_mdss_axi_clk			0xdf04fc1d
+#define clk_mmss_mdss_byte0_clk			0x38105d25
+#define clk_mmss_mdss_byte0_intf_clk		0x38e5aa79
+#define clk_mmss_mdss_byte0_intf_div_clk	0x8604f181
+#define clk_mmss_mdss_byte1_clk			0xe0c21354
+#define clk_mmss_mdss_byte1_intf_clk		0xcf654d8e
+#define clk_mmss_mdss_byte1_intf_div_clk	0xcdf334c5
+#define clk_mmss_mdss_dp_aux_clk		0x23125eb6
+#define clk_mmss_mdss_dp_crypto_clk		0x9a072d4e
+#define clk_mmss_mdss_dp_link_clk		0x8dd302d1
+#define clk_mmss_mdss_dp_link_intf_clk		0x70e386e6
+#define clk_mmss_mdss_dp_pixel_clk		0xb707b765
+#define clk_mmss_mdss_dp_gtc_clk		0xb59c151a
+#define clk_mmss_mdss_esc0_clk			0x5721ff83
+#define clk_mmss_mdss_esc1_clk			0xc3d0376b
+#define clk_mmss_mdss_extpclk_clk		0x74d5a954
+#define clk_mmss_mdss_hdmi_clk			0x28460a6d
+#define clk_mmss_mdss_hdmi_dp_ahb_clk		0x5448519f
+#define clk_mmss_mdss_mdp_clk			0x43539b0e
+#define clk_mmss_mdss_mdp_lut_clk		0x00627b2b
+#define clk_mmss_mdss_pclk0_clk			0xcc0e909d
+#define clk_mmss_mdss_pclk1_clk			0x850d9146
+#define clk_mmss_mdss_rot_clk			0xbb7e71c4
+#define clk_mmss_mdss_vsync_clk			0x629b36dc
+#define clk_mmss_misc_ahb_clk			0xea30b0e7
+#define clk_mmss_misc_cxo_clk			0xe620cd80
+#define clk_mmss_mnoc_ahb_clk			0x49a394f4
+#define clk_mmss_mnoc_maxi_clk			0xd8b7278f
+#define clk_mmss_video_subcore0_clk		0x23fae359
+#define clk_mmss_video_subcore1_clk		0x5213a0c7
+#define clk_mmss_video_ahb_clk			0x94334ae9
+#define clk_mmss_video_axi_clk			0xf3178ba5
+#define clk_mmss_video_core_clk			0x78f14c85
+#define clk_mmss_video_maxi_clk			0x1785ef88
+#define clk_mmss_vmem_ahb_clk			0x4b18955b
+#define clk_mmss_vmem_maxi_clk			0xb6067889
+#define clk_mmss_debug_mux			0xe646ffda
+
+/* external multimedia clocks */
+#define clk_dsi0pll_byteclk_mux			0xecf2c434
+#define clk_dsi0pll_byteclk_src			0x6f6f740f
+#define clk_dsi0pll_pclk_mux			0x6c9da335
+#define clk_dsi0pll_pclk_src			0x5efd85d4
+#define clk_dsi0pll_pclk_src_mux		0x84b14663
+#define clk_dsi0pll_post_bit_div		0xf46dcf27
+#define clk_dsi0pll_pll_out_div1		0xeda5b7fe
+#define clk_dsi0pll_pll_out_div2		0x97fa476d
+#define clk_dsi0pll_pll_out_div4		0x90a98ce0
+#define clk_dsi0pll_pll_out_div8		0x9d9d85cf
+#define clk_dsi0pll_pll_out_mux			0x179c27ca
+#define clk_dsi0pll_post_vco_mux		0xfaf9bd1f
+#define clk_dsi0pll_post_vco_div1		0xabb50b2a
+#define clk_dsi0pll_post_vco_div4		0xbe51c091
+#define clk_dsi0pll_bitclk_src			0x36c3c437
+#define clk_dsi0pll_vco_clk			0x15940d40
+
+#define clk_dsi1pll_byteclk_mux			0x14e2f38f
+#define clk_dsi1pll_byteclk_src			0x4b65c298
+#define clk_dsi1pll_pclk_mux			0x4c0518b5
+#define clk_dsi1pll_pclk_src			0xeddcd80e
+#define clk_dsi1pll_pclk_src_mux		0x3651feb3
+#define clk_dsi1pll_post_bit_div		0x712f0260
+#define clk_dsi1pll_pll_out_div8		0x87628ddb
+#define clk_dsi1pll_pll_out_div4		0x0d9a384b
+#define clk_dsi1pll_pll_out_div2		0x0c9b5748
+#define clk_dsi1pll_pll_out_div1		0x3193164e
+#define clk_dsi1pll_pll_out_mux			0x171bf8fd
+#define clk_dsi1pll_post_vco_mux		0xc6a90d20
+#define clk_dsi1pll_post_vco_div1		0x6f47ca7d
+#define clk_dsi1pll_post_vco_div4		0x90628974
+#define clk_dsi1pll_bitclk_src			0x13ab045b
+#define clk_dsi1pll_vco_clk			0x99797b50
+
+#define clk_dp_vco_clk				0xfcaaeec7
+#define clk_dp_link_2x_clk_divsel_five		0xcfe3f5dd
+#define clk_vco_divsel_four_clk_src		0xe0da19c0
+#define clk_vco_divsel_two_clk_src		0xb5cfc6a8
+#define clk_vco_divided_clk_src_mux		0x3f8197c2
+#define clk_hdmi_vco_clk			0xbb7dc20d
+
+/* clock_gpu controlled clocks*/
+#define clk_gpucc_xo				0xc4e1a890
+#define clk_gpucc_gpll0				0x0db0e37f
+#define clk_gfx3d_clk_src			0x917f76ef
+#define clk_rbbmtimer_clk_src			0x17649ecc
+#define clk_gfx3d_isense_clk_src		0xecc3eafa
+#define clk_rbcpr_clk_src			0x2c2e9af2
+#define clk_gpu_debug_div_clk			0x75d6f53f
+#define clk_gpucc_gfx3d_clk			0x95f01bd5
+#define clk_gpucc_rbbmtimer_clk			0x58a0a7ca
+#define clk_gpucc_gfx3d_isense_clk		0xb2678e80
+#define clk_gpucc_cxo_clk			0x6532dcae
+#define clk_gpucc_rbcpr_clk			0x7bd750e8
+#define clk_gpu_pll0_pll			0x0e61ab4d
+#define clk_gpu_pll0_pll_out_even		0xb0ed5009
+#define clk_gpu_pll0_pll_out_odd		0x08c5a8a5
+#define clk_gpu_pll0_postdiv_clk		0x76c19f3c
+#define clk_gpucc_mx_clk			0x1edbb879
+#define clk_gpucc_gcc_dbg_clk			0x9ae8cd3c
+#define clk_gfxcc_dbg_clk			0x3ed47625
+
+/* CPU clocks */
+#define clk_pwrcl_clk				0xc554130e
+#define clk_perfcl_clk				0x58869997
+#define clk_sys_apcsaux_clk_gcc			0xf905e862
+#define clk_xo_ao				0x428c856d
+#define clk_osm_clk_src				0xaabe68c3
+#define clk_cpu_debug_mux			0x3ae8bcb2
+
+/* Audio External Clocks */
+#define clk_audio_ap_clk			0x9b5727cb
+#define clk_audio_pmi_clk			0xcbfe416d
+#define clk_audio_ap_clk2			0x454d1e91
+
+/* GCC block resets */
+#define QUSB2PHY_PRIM_BCR			0
+#define QUSB2PHY_SEC_BCR			1
+#define BLSP1_BCR				2
+#define BLSP2_BCR				3
+#define BOOT_ROM_BCR				4
+#define PRNG_BCR				5
+#define UFS_BCR					6
+#define USB_30_BCR				7
+#define USB3_PHY_BCR				8
+#define USB3PHY_PHY_BCR				9
+#define PCIE_0_PHY_BCR				10
+#define PCIE_PHY_BCR				11
+#define PCIE_PHY_COM_BCR			12
+#define PCIE_PHY_NOCSR_COM_PHY_BCR		13
+
+/* MMSS block resets */
+#define CAMSS_MICRO_BCR				0
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm/boot/dts/include/dt-bindings/clock/msm-clocks-hwio-8998.h	2019-01-22 16:16:28.171288678 +0100
@@ -0,0 +1,395 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define CLKFLAG_NO_RATE_CACHE					0x00004000
+
+#define FMAX_LOWER						0
+#define FMAX_LOW						1
+#define FMAX_NOM						2
+#define FMAX_TURBO						3
+
+#define HALT_CHECK_DELAY					5
+
+#define RPM_MISC_CLK_TYPE					0x306b6c63
+#define RPM_BUS_CLK_TYPE					0x316b6c63
+#define RPM_MEM_CLK_TYPE					0x326b6c63
+#define RPM_IPA_CLK_TYPE					0x617069
+#define RPM_CE_CLK_TYPE						0x6563
+#define RPM_AGGR_CLK_TYPE					0x72676761
+#define RPM_SMD_KEY_ENABLE					0x62616E45
+#define RPM_MMAXI_CLK_TYPE					0x69786d6d
+
+#define CXO_CLK_SRC_ID						0x0
+#define QDSS_CLK_ID						0x1
+#define SNOC_CLK_ID						0x1
+#define CNOC_CLK_ID						0x2
+#define CNOC_PERIPH_CLK_ID					0x0
+#define BIMC_CLK_ID						0x0
+#define IPA_CLK_ID						0x0
+#define CE1_CLK_ID						0x0
+#define RF_CLK1_ID						0x4
+#define RF_CLK2_ID						0x5
+#define RF_CLK3_ID						0x6
+#define LN_BB_CLK1_ID						0x1
+#define LN_BB_CLK2_ID						0x2
+#define LN_BB_CLK3_ID						0x3
+#define DIV_CLK1_ID						0xb
+#define DIV_CLK2_ID						0xc
+#define DIV_CLK3_ID						0xd
+#define RF_CLK1_PIN_ID						0x4
+#define RF_CLK2_PIN_ID						0x5
+#define RF_CLK3_PIN_ID						0x6
+#define LN_BB_CLK1_PIN_ID					0x1
+#define LN_BB_CLK2_PIN_ID					0x2
+#define LN_BB_CLK3_PIN_ID					0x3
+#define AGGR1_NOC_ID						0x1
+#define AGGR2_NOC_ID						0x2
+#define MMSSNOC_AXI_CLK_ID					0x0
+
+#define APCS_COMMON_LMH_CMD_RCGR				0x0012C
+
+#define GCC_APCS_GPLL_ENA_VOTE					0x52000
+#define GCC_APCS_CLOCK_BRANCH_ENA_VOTE				0x52004
+#define GCC_APCS_CLOCK_BRANCH_ENA_VOTE_1			0x5200C
+#define PLLTEST_PAD_CFG						0x6200C
+#define GCC_XO_DIV4_CBCR				        0x43008
+#define CLOCK_FRQ_MEASURE_CTL				        0x62004
+#define CLOCK_FRQ_MEASURE_STATUS			        0x62008
+#define GCC_GPLL0_MODE						0x00000
+#define GCC_GPLL4_MODE						0x77000
+#define GCC_USB30_MASTER_CMD_RCGR				0x0F014
+#define GCC_PCIE_AUX_CMD_RCGR					0x6C000
+#define GCC_UFS_AXI_CMD_RCGR					0x75018
+#define GCC_BLSP1_QUP1_I2C_APPS_CMD_RCGR			0x19020
+#define GCC_BLSP1_QUP1_SPI_APPS_CMD_RCGR			0x1900C
+#define GCC_BLSP1_QUP2_I2C_APPS_CMD_RCGR			0x1B020
+#define GCC_BLSP1_QUP2_SPI_APPS_CMD_RCGR			0x1B00C
+#define GCC_BLSP1_QUP3_I2C_APPS_CMD_RCGR			0x1D020
+#define GCC_BLSP1_QUP3_SPI_APPS_CMD_RCGR			0x1D00C
+#define GCC_BLSP1_QUP4_I2C_APPS_CMD_RCGR			0x1F020
+#define GCC_BLSP1_QUP4_SPI_APPS_CMD_RCGR			0x1F00C
+#define GCC_BLSP1_QUP5_I2C_APPS_CMD_RCGR			0x21020
+#define GCC_BLSP1_QUP5_SPI_APPS_CMD_RCGR			0x2100C
+#define GCC_BLSP1_QUP6_I2C_APPS_CMD_RCGR			0x23020
+#define GCC_BLSP1_QUP6_SPI_APPS_CMD_RCGR			0x2300C
+#define GCC_BLSP1_UART1_APPS_CMD_RCGR				0x1A00C
+#define GCC_BLSP1_UART2_APPS_CMD_RCGR				0x1C00C
+#define GCC_BLSP1_UART3_APPS_CMD_RCGR				0x1E00C
+#define GCC_BLSP2_QUP1_I2C_APPS_CMD_RCGR			0x26020
+#define GCC_BLSP2_QUP2_I2C_APPS_CMD_RCGR			0x28020
+#define GCC_BLSP2_QUP3_I2C_APPS_CMD_RCGR			0x2A020
+#define GCC_BLSP2_QUP4_I2C_APPS_CMD_RCGR			0x2C020
+#define GCC_BLSP2_QUP5_I2C_APPS_CMD_RCGR			0x2E020
+#define GCC_BLSP2_QUP6_I2C_APPS_CMD_RCGR			0x30020
+#define GCC_BLSP2_QUP1_SPI_APPS_CMD_RCGR			0x2600C
+#define GCC_BLSP2_QUP2_SPI_APPS_CMD_RCGR			0x2800C
+#define GCC_BLSP2_QUP3_SPI_APPS_CMD_RCGR			0x2A00C
+#define GCC_BLSP2_QUP4_SPI_APPS_CMD_RCGR			0x2C00C
+#define GCC_BLSP2_QUP5_SPI_APPS_CMD_RCGR			0x2E00C
+#define GCC_BLSP2_QUP6_SPI_APPS_CMD_RCGR			0x3000C
+#define GCC_BLSP2_UART1_APPS_CMD_RCGR				0x2700C
+#define GCC_BLSP2_UART2_APPS_CMD_RCGR				0x2900C
+#define GCC_BLSP2_UART3_APPS_CMD_RCGR				0x2B00C
+#define GCC_GP1_CMD_RCGR					0x64004
+#define GCC_GP2_CMD_RCGR					0x65004
+#define GCC_GP3_CMD_RCGR					0x66004
+#define GCC_HMSS_RBCPR_CMD_RCGR					0x48044
+#define GCC_PDM2_CMD_RCGR					0x33010
+#define GCC_SDCC2_APPS_CMD_RCGR					0x14010
+#define GCC_SDCC4_APPS_CMD_RCGR					0x16010
+#define GCC_TSIF_REF_CMD_RCGR					0x36010
+#define GCC_UFS_ICE_CORE_CMD_RCGR				0x76010
+#define GCC_UFS_PHY_AUX_CMD_RCGR				0x76044
+#define GCC_UFS_UNIPRO_CORE_CMD_RCGR				0x76028
+#define GCC_USB30_MOCK_UTMI_CMD_RCGR				0x0F028
+#define GCC_USB3_PHY_AUX_CMD_RCGR				0x5000C
+#define GCC_QSPI_REF_CMD_RCGR					0x9000C
+#define GCC_PCIE_0_PHY_BCR					0x6C01C
+#define GCC_HDMI_CLKREF_EN					0x88000
+#define GCC_UFS_CLKREF_EN					0x88004
+#define GCC_USB3_CLKREF_EN					0x88008
+#define GCC_PCIE_CLKREF_EN					0x8800C
+#define GCC_RX1_USB2_CLKREF_EN					0x88014
+#define GCC_USB3_PHY_BCR					0x50020
+#define GCC_AGGRE1_NOC_XO_CBCR					0x8202C
+#define GCC_AGGRE1_UFS_AXI_CBCR					0x82028
+#define GCC_AGGRE1_USB3_AXI_CBCR				0x82024
+#define GCC_BIMC_MSS_Q6_AXI_CBCR				0x4401C
+#define GCC_BLSP1_AHB_CBCR					0x17004
+#define GCC_BLSP1_BCR						0x17000
+#define GCC_BLSP1_QUP1_SPI_APPS_CBCR				0x19004
+#define GCC_BLSP1_QUP1_I2C_APPS_CBCR				0x19008
+#define GCC_BLSP1_QUP2_SPI_APPS_CBCR				0x1B004
+#define GCC_BLSP1_QUP2_I2C_APPS_CBCR				0x1B008
+#define GCC_BLSP1_QUP3_SPI_APPS_CBCR				0x1D004
+#define GCC_BLSP1_QUP3_I2C_APPS_CBCR				0x1D008
+#define GCC_BLSP1_QUP4_SPI_APPS_CBCR				0x1F004
+#define GCC_BLSP1_QUP4_I2C_APPS_CBCR				0x1F008
+#define GCC_BLSP1_QUP5_SPI_APPS_CBCR				0x21004
+#define GCC_BLSP1_QUP5_I2C_APPS_CBCR				0x21008
+#define GCC_BLSP1_QUP6_SPI_APPS_CBCR				0x23004
+#define GCC_BLSP1_QUP6_I2C_APPS_CBCR				0x23008
+#define GCC_BLSP1_UART1_APPS_CBCR				0x1A004
+#define GCC_BLSP1_UART2_APPS_CBCR				0x1C004
+#define GCC_BLSP1_UART3_APPS_CBCR				0x1E004
+#define GCC_BLSP2_AHB_CBCR					0x25004
+#define GCC_BLSP2_BCR						0x25000
+#define GCC_BLSP2_QUP1_SPI_APPS_CBCR				0x26004
+#define GCC_BLSP2_QUP1_I2C_APPS_CBCR				0x26008
+#define GCC_BLSP2_QUP2_I2C_APPS_CBCR				0x28008
+#define GCC_BLSP2_QUP2_SPI_APPS_CBCR				0x28004
+#define GCC_BLSP2_QUP3_SPI_APPS_CBCR				0x2A004
+#define GCC_BLSP2_QUP3_I2C_APPS_CBCR				0x2A008
+#define GCC_BLSP2_QUP4_SPI_APPS_CBCR				0x2C004
+#define GCC_BLSP2_QUP4_I2C_APPS_CBCR				0x2C008
+#define GCC_BLSP2_QUP5_SPI_APPS_CBCR				0x2E004
+#define GCC_BLSP2_QUP5_I2C_APPS_CBCR				0x2E008
+#define GCC_BLSP2_QUP6_SPI_APPS_CBCR				0x30004
+#define GCC_BLSP2_QUP6_I2C_APPS_CBCR				0x30008
+#define GCC_BLSP2_UART1_APPS_CBCR				0x27004
+#define GCC_BLSP2_UART2_APPS_CBCR				0x29004
+#define GCC_BLSP2_UART3_APPS_CBCR				0x2B004
+#define GCC_BOOT_ROM_AHB_CBCR					0x38004
+#define GCC_BOOT_ROM_BCR					0x38000
+#define GCC_CFG_NOC_USB3_AXI_CBCR				0x05018
+#define GCC_BIMC_GFX_CBCR					0x46040
+#define GCC_GP1_CBCR						0x64000
+#define GCC_GP2_CBCR						0x65000
+#define GCC_GP3_CBCR						0x66000
+#define GCC_GPU_BIMC_GFX_CBCR					0x71010
+#define GCC_GPU_CFG_AHB_CBCR					0x71004
+#define GCC_GPU_IREF_EN						0x88010
+#define GCC_HMSS_DVM_BUS_CBCR					0x4808C
+#define GCC_HMSS_RBCPR_CBCR					0x48008
+#define GCC_MMSS_SYS_NOC_AXI_CBCR				0x09000
+#define GCC_MMSS_NOC_CFG_AHB_CBCR				0x09004
+#define GCC_PCIE_0_SLV_AXI_CBCR					0x6B008
+#define GCC_PCIE_0_MSTR_AXI_CBCR				0x6B00C
+#define GCC_PCIE_0_CFG_AHB_CBCR					0x6B010
+#define GCC_PCIE_0_AUX_CBCR					0x6B014
+#define GCC_PCIE_0_PIPE_CBCR					0x6B018
+#define GCC_PCIE_PHY_AUX_CBCR					0x6F004
+#define GCC_PCIE_PHY_BCR					0x6F000
+#define GCC_PCIE_PHY_COM_BCR					0x6F014
+#define GCC_PCIE_PHY_NOCSR_COM_PHY_BCR				0x6F00C
+#define GCC_QUSB2PHY_PRIM_BCR					0x12000
+#define GCC_QUSB2PHY_SEC_BCR					0x12004
+#define GCC_PDM2_CBCR						0x3300C
+#define GCC_PDM_AHB_CBCR					0x33004
+#define GCC_PRNG_AHB_CBCR					0x34004
+#define GCC_PRNG_BCR						0x34000
+#define GCC_SDCC2_APPS_CBCR					0x14004
+#define GCC_SDCC2_AHB_CBCR					0x14008
+#define GCC_SDCC4_APPS_CBCR					0x16004
+#define GCC_SDCC4_AHB_CBCR					0x16008
+#define GCC_TSIF_AHB_CBCR					0x36004
+#define GCC_TSIF_REF_CBCR					0x36008
+#define GCC_UFS_AXI_CBCR					0x75008
+#define GCC_UFS_BCR						0x75000
+#define GCC_UFS_AHB_CBCR					0x7500C
+#define GCC_UFS_TX_SYMBOL_0_CBCR				0x75010
+#define GCC_UFS_RX_SYMBOL_0_CBCR				0x75014
+#define GCC_UFS_RX_SYMBOL_1_CBCR				0x7605C
+#define GCC_UFS_UNIPRO_CORE_CBCR				0x76008
+#define GCC_UFS_ICE_CORE_CBCR					0x7600C
+#define GCC_UFS_PHY_AUX_CBCR					0x76040
+#define GCC_USB30_MASTER_CBCR					0x0F008
+#define GCC_USB30_SLEEP_CBCR					0x0F00C
+#define GCC_USB30_MOCK_UTMI_CBCR				0x0F010
+#define GCC_USB_30_BCR						0x0F000
+#define GCC_USB3_PHY_AUX_CBCR					0x50000
+#define GCC_USB3_PHY_PIPE_CBCR					0x50004
+#define GCC_USB3PHY_PHY_BCR					0x50024
+#define GCC_APCS_CLOCK_SLEEP_ENA_VOTE				0x52008
+#define GCC_MSS_CFG_AHB_CBCR					0x8A000
+#define GCC_MSS_Q6_BIMC_AXI_CBCR				0x8A040
+#define GCC_MSS_MNOC_BIMC_AXI_CBCR				0x8A004
+#define GCC_MSS_SNOC_AXI_CBCR					0x8A03C
+#define GCC_HMSS_GPLL0_CMD_RCGR					0x4805C
+#define GCC_MSS_CFG_AHB_CBCR					0x8A000
+#define GCC_MSS_Q6_BIMC_AXI_CBCR				0x8A040
+#define GCC_MSS_MNOC_BIMC_AXI_CBCR				0x8A004
+#define GCC_MSS_SNOC_AXI_CBCR					0x8A03C
+#define GCC_DCC_AHB_CBCR					0x84004
+#define GCC_HLOS1_VOTE_LPASS_CORE_SMMU_CBCR			0x7D010
+#define GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CBCR			0x7D014
+#define GCC_QSPI_AHB_CBCR					0x90004
+#define GCC_QSPI_REF_CBCR					0x90008
+#define GCC_MMSS_MISC						0x0902C
+#define GCC_GPU_MISC						0x71028
+
+#define GPUCC_GPU_PLL0_PLL_MODE					0x00000
+#define GPUCC_GPU_PLL0_USER_CTL_MODE				0x0000C
+#define GPUCC_GFX3D_CMD_RCGR					0x01070
+#define GPUCC_RBBMTIMER_CMD_RCGR				0x010B0
+#define GPUCC_GFX3D_ISENSE_CMD_RCGR				0x01100
+#define GPUCC_RBCPR_CMD_RCGR					0x01030
+#define GPUCC_GFX3D_CBCR					0x01098
+#define GPUCC_RBBMTIMER_CBCR					0x010D0
+#define GPUCC_GFX3D_ISENSE_CBCR					0x01124
+#define GPUCC_CXO_CBCR						0x01020
+#define GPUCC_RBCPR_CBCR					0x01054
+#define GPU_GX_BCR						0x01090
+#define GPUCC_GX_DOMAIN_MISC					0x00130
+#define GPUCC_GPU_DD_WRAP_CTRL					0x00430
+#define GPUCC_DEBUG_CLK_CTL					0x00120
+
+#define MMSS_PLL_VOTE_APCS					0x001E0
+#define MMSS_MMPLL0_PLL_MODE					0x0C000
+#define MMSS_MMPLL1_PLL_MODE					0x0C050
+#define MMSS_MMPLL3_PLL_MODE					0x00000
+#define MMSS_MMPLL4_PLL_MODE					0x00050
+#define MMSS_MMPLL5_PLL_MODE					0x000A0
+#define MMSS_MMPLL6_PLL_MODE					0x000F0
+#define MMSS_MMPLL7_PLL_MODE					0x00140
+#define MMSS_MMPLL10_PLL_MODE					0x00190
+#define MMSS_AHB_CMD_RCGR					0x05000
+#define MMSS_CSI0_CMD_RCGR					0x03090
+#define MMSS_VFE0_CMD_RCGR					0x03600
+#define MMSS_VFE1_CMD_RCGR					0x03620
+#define MMSS_MDP_CMD_RCGR					0x02040
+#define MMSS_MAXI_CMD_RCGR					0x0F020
+#define MMSS_CPP_CMD_RCGR					0x03640
+#define MMSS_JPEG0_CMD_RCGR					0x03500
+#define MMSS_ROT_CMD_RCGR					0x021A0
+#define MMSS_VIDEO_CORE_CMD_RCGR				0x01000
+#define MMSS_CSI1_CMD_RCGR					0x03100
+#define MMSS_CSI2_CMD_RCGR					0x03160
+#define MMSS_CSI3_CMD_RCGR					0x031C0
+#define MMSS_FD_CORE_CMD_RCGR					0x03B00
+#define MMSS_BYTE0_CMD_RCGR					0x02120
+#define MMSS_BYTE1_CMD_RCGR					0x02140
+#define MMSS_PCLK0_CMD_RCGR					0x02000
+#define MMSS_PCLK1_CMD_RCGR					0x02020
+#define MMSS_VIDEO_SUBCORE0_CMD_RCGR				0x01060
+#define MMSS_VIDEO_SUBCORE1_CMD_RCGR				0x01080
+#define MMSS_CSIPHY_CMD_RCGR					0x03800
+#define MMSS_CCI_CMD_RCGR					0x03300
+#define MMSS_CAMSS_GP0_CMD_RCGR					0x03420
+#define MMSS_CAMSS_GP1_CMD_RCGR					0x03450
+#define MMSS_MCLK0_CMD_RCGR					0x03360
+#define MMSS_MCLK1_CMD_RCGR					0x03390
+#define MMSS_MCLK2_CMD_RCGR					0x033C0
+#define MMSS_MCLK3_CMD_RCGR					0x033F0
+#define MMSS_CAMSS_CSI2PHYTIMER_CBCR				0x03084
+#define MMSS_CSI0PHYTIMER_CMD_RCGR				0x03000
+#define MMSS_CSI1PHYTIMER_CMD_RCGR				0x03030
+#define MMSS_CSI2PHYTIMER_CMD_RCGR				0x03060
+#define MMSS_DP_GTC_CMD_RCGR					0x02280
+#define MMSS_ESC0_CMD_RCGR					0x02160
+#define MMSS_ESC1_CMD_RCGR					0x02180
+#define MMSS_EXTPCLK_CMD_RCGR					0x02060
+#define MMSS_HDMI_CMD_RCGR					0x02100
+#define MMSS_VSYNC_CMD_RCGR					0x02080
+#define MMSS_BIMC_SMMU_AHB_CBCR					0x0E004
+#define MMSS_BIMC_SMMU_AXI_CBCR					0x0E008
+#define MMSS_SNOC_DVM_AXI_CBCR					0x0E040
+#define MMSS_CAMSS_AHB_CBCR					0x0348C
+#define MMSS_CAMSS_CCI_AHB_CBCR					0x03348
+#define MMSS_CAMSS_CCI_CBCR					0x03344
+#define MMSS_CAMSS_CPP_AHB_CBCR					0x036B4
+#define MMSS_CAMSS_CPP_CBCR					0x036B0
+#define MMSS_CAMSS_CPP_AXI_CBCR					0x036C4
+#define MMSS_CAMSS_CPP_VBIF_AHB_CBCR				0x036C8
+#define MMSS_CAMSS_CPHY_CSID0_CBCR				0x03730
+#define MMSS_CAMSS_CSI0_AHB_CBCR				0x030BC
+#define MMSS_CAMSS_CSI0_CBCR					0x030B4
+#define MMSS_CAMSS_CSI0PIX_CBCR					0x030E4
+#define MMSS_CAMSS_CSI0RDI_CBCR					0x030D4
+#define MMSS_CAMSS_CPHY_CSID1_CBCR				0x03734
+#define MMSS_CAMSS_CSI1_AHB_CBCR				0x03128
+#define MMSS_CAMSS_CSI1_CBCR					0x03124
+#define MMSS_CAMSS_CSI1PIX_CBCR					0x03154
+#define MMSS_CAMSS_CSI1RDI_CBCR					0x03144
+#define MMSS_CAMSS_CPHY_CSID2_CBCR				0x03738
+#define MMSS_CAMSS_CSI2_AHB_CBCR				0x03188
+#define MMSS_CAMSS_CSI2_CBCR					0x03184
+#define MMSS_CAMSS_CSI2PIX_CBCR					0x031B4
+#define MMSS_CAMSS_CSI2RDI_CBCR					0x031A4
+#define MMSS_CAMSS_CPHY_CSID3_CBCR				0x0373C
+#define MMSS_CAMSS_CSI3_AHB_CBCR				0x031E8
+#define MMSS_CAMSS_CSI3_CBCR					0x031E4
+#define MMSS_CAMSS_CSI3PIX_CBCR					0x03214
+#define MMSS_CAMSS_CSI3RDI_CBCR					0x03204
+#define MMSS_CAMSS_CSI_VFE0_CBCR				0x03704
+#define MMSS_CAMSS_CSI_VFE1_CBCR				0x03714
+#define MMSS_CAMSS_CSIPHY0_CBCR					0x03740
+#define MMSS_CAMSS_CSIPHY1_CBCR					0x03744
+#define MMSS_CAMSS_CSIPHY2_CBCR					0x03748
+#define MMSS_FD_AHB_CBCR					0x03B74
+#define MMSS_FD_CORE_CBCR					0x03B68
+#define MMSS_FD_CORE_UAR_CBCR					0x03B6C
+#define MMSS_CAMSS_GP0_CBCR					0x03444
+#define MMSS_CAMSS_GP1_CBCR					0x03474
+#define MMSS_CAMSS_ISPIF_AHB_CBCR				0x03224
+#define MMSS_CAMSS_JPEG0_CBCR					0x035A8
+#define MMSS_CAMSS_JPEG_AHB_CBCR				0x035B4
+#define MMSS_CAMSS_JPEG_AXI_CBCR				0x035B8
+#define MMSS_CAMSS_MCLK0_CBCR					0x03384
+#define MMSS_CAMSS_MCLK1_CBCR					0x033B4
+#define MMSS_CAMSS_MCLK2_CBCR					0x033E4
+#define MMSS_CAMSS_MCLK3_CBCR					0x03414
+#define MMSS_CAMSS_MICRO_AHB_CBCR				0x03494
+#define MMSS_CAMSS_CSI0PHYTIMER_CBCR				0x03024
+#define MMSS_CAMSS_CSI1PHYTIMER_CBCR				0x03054
+#define MMSS_CSI2PHYTIMER_CMD_RCGR				0x03060
+#define MMSS_CAMSS_TOP_AHB_CBCR					0x03484
+#define MMSS_CAMSS_VFE0_AHB_CBCR				0x03668
+#define MMSS_CAMSS_VFE0_CBCR					0x036A8
+#define MMSS_CAMSS_VFE0_STREAM_CBCR				0x03720
+#define MMSS_CAMSS_VFE1_AHB_CBCR				0x03678
+#define MMSS_CAMSS_VFE1_CBCR					0x036AC
+#define MMSS_CAMSS_VFE1_STREAM_CBCR				0x03724
+#define MMSS_CAMSS_VFE_VBIF_AHB_CBCR				0x036B8
+#define MMSS_CAMSS_VFE_VBIF_AXI_CBCR				0x036BC
+#define MMSS_MDSS_AHB_CBCR					0x02308
+#define MMSS_MDSS_AXI_CBCR					0x02310
+#define MMSS_MDSS_BYTE0_CBCR					0x0233C
+#define MMSS_MDSS_BYTE0_INTF_CBCR				0x02374
+#define MMSS_MDSS_BYTE0_INTF_DIV				0x0237C
+#define MMSS_MDSS_BYTE1_CBCR					0x02340
+#define MMSS_MDSS_BYTE1_INTF_CBCR				0x02378
+#define MMSS_MDSS_BYTE1_INTF_DIV				0x02380
+#define MMSS_MDSS_DP_AUX_CBCR					0x02364
+#define MMSS_MDSS_DP_CRYPTO_CBCR				0x0235C
+#define MMSS_MDSS_DP_GTC_CBCR					0x02368
+#define MMSS_MDSS_DP_LINK_CBCR					0x02354
+#define MMSS_MDSS_DP_LINK_INTF_CBCR				0x02358
+#define MMSS_MDSS_DP_PIXEL_CBCR					0x02360
+#define MMSS_MDSS_ESC0_CBCR					0x02344
+#define MMSS_MDSS_ESC1_CBCR					0x02348
+#define MMSS_MDSS_EXTPCLK_CBCR					0x02324
+#define MMSS_MDSS_HDMI_CBCR					0x02338
+#define MMSS_MDSS_HDMI_DP_AHB_CBCR				0x0230C
+#define MMSS_MDSS_MDP_CBCR					0x0231C
+#define MMSS_MDSS_MDP_LUT_CBCR					0x02320
+#define MMSS_MDSS_PCLK0_CBCR					0x02314
+#define MMSS_MDSS_PCLK1_CBCR					0x02318
+#define MMSS_MDSS_ROT_CBCR					0x02350
+#define MMSS_MDSS_VSYNC_CBCR					0x02328
+#define MMSS_MISC_AHB_CBCR					0x00328
+#define MMSS_MISC_CXO_CBCR					0x00324
+#define MMSS_MNOC_AHB_CBCR					0x05024
+#define MMSS_MNOC_MAXI_CBCR					0x0F004
+#define MMSS_VIDEO_SUBCORE0_CBCR				0x01048
+#define MMSS_VIDEO_SUBCORE1_CBCR				0x0104C
+#define MMSS_VIDEO_AHB_CBCR					0x01030
+#define MMSS_VIDEO_AXI_CBCR					0x01034
+#define MMSS_VIDEO_CORE_CBCR					0x01028
+#define MMSS_VIDEO_MAXI_CBCR					0x01038
+#define MMSS_VMEM_AHB_CBCR					0x0F068
+#define MMSS_VMEM_MAXI_CBCR					0x0F064
+#define MMSS_DP_AUX_CMD_RCGR					0x02260
+#define MMSS_DP_CRYPTO_CMD_RCGR					0x02220
+#define MMSS_DP_LINK_CMD_RCGR					0x02200
+#define MMSS_DP_PIXEL_CMD_RCGR					0x02240
+#define MMSS_DEBUG_CLK_CTL					0x00900
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/include/dt-bindings/msm./msm-bus-ids.h linux-4.4.115-fbx/arch/arm/boot/dts/include/dt-bindings/msm/msm-bus-ids.h
--- linux-4.4.115-fbx/arch/arm/boot/dts/include/dt-bindings/msm./msm-bus-ids.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/include/dt-bindings/msm/msm-bus-ids.h	2019-01-22 16:16:28.179288750 +0100
@@ -0,0 +1,887 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_BUS_IDS_H
+#define __MSM_BUS_IDS_H
+
+/* Aggregation types */
+#define AGG_SCHEME_NONE	0
+#define AGG_SCHEME_LEG	1
+#define AGG_SCHEME_1	2
+
+/* Topology related enums */
+#define	MSM_BUS_FAB_DEFAULT 0
+#define	MSM_BUS_FAB_APPSS 0
+#define	MSM_BUS_FAB_SYSTEM 1024
+#define	MSM_BUS_FAB_MMSS 2048
+#define	MSM_BUS_FAB_SYSTEM_FPB 3072
+#define	MSM_BUS_FAB_CPSS_FPB 4096
+
+#define	MSM_BUS_FAB_BIMC 0
+#define	MSM_BUS_FAB_SYS_NOC 1024
+#define	MSM_BUS_FAB_MMSS_NOC 2048
+#define	MSM_BUS_FAB_OCMEM_NOC 3072
+#define	MSM_BUS_FAB_PERIPH_NOC 4096
+#define	MSM_BUS_FAB_CONFIG_NOC 5120
+#define	MSM_BUS_FAB_OCMEM_VNOC 6144
+#define	MSM_BUS_FAB_MMSS_AHB 2049
+#define	MSM_BUS_FAB_A0_NOC 6145
+#define	MSM_BUS_FAB_A1_NOC 6146
+#define	MSM_BUS_FAB_A2_NOC 6147
+#define	MSM_BUS_FAB_GNOC 6148
+#define	MSM_BUS_FAB_CR_VIRT 6149
+
+#define	MSM_BUS_MASTER_FIRST 1
+#define	MSM_BUS_MASTER_AMPSS_M0 1
+#define	MSM_BUS_MASTER_AMPSS_M1 2
+#define	MSM_BUS_APPSS_MASTER_FAB_MMSS 3
+#define	MSM_BUS_APPSS_MASTER_FAB_SYSTEM 4
+#define	MSM_BUS_SYSTEM_MASTER_FAB_APPSS 5
+#define	MSM_BUS_MASTER_SPS 6
+#define	MSM_BUS_MASTER_ADM_PORT0 7
+#define	MSM_BUS_MASTER_ADM_PORT1 8
+#define	MSM_BUS_SYSTEM_MASTER_ADM1_PORT0 9
+#define	MSM_BUS_MASTER_ADM1_PORT1 10
+#define	MSM_BUS_MASTER_LPASS_PROC 11
+#define	MSM_BUS_MASTER_MSS_PROCI 12
+#define	MSM_BUS_MASTER_MSS_PROCD 13
+#define	MSM_BUS_MASTER_MSS_MDM_PORT0 14
+#define	MSM_BUS_MASTER_LPASS 15
+#define	MSM_BUS_SYSTEM_MASTER_CPSS_FPB 16
+#define	MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB 17
+#define	MSM_BUS_SYSTEM_MASTER_MMSS_FPB 18
+#define	MSM_BUS_MASTER_ADM1_CI 19
+#define	MSM_BUS_MASTER_ADM0_CI 20
+#define	MSM_BUS_MASTER_MSS_MDM_PORT1 21
+#define	MSM_BUS_MASTER_MDP_PORT0 22
+#define	MSM_BUS_MASTER_MDP_PORT1 23
+#define	MSM_BUS_MMSS_MASTER_ADM1_PORT0 24
+#define	MSM_BUS_MASTER_ROTATOR 25
+#define	MSM_BUS_MASTER_GRAPHICS_3D 26
+#define	MSM_BUS_MASTER_JPEG_DEC 27
+#define	MSM_BUS_MASTER_GRAPHICS_2D_CORE0 28
+#define	MSM_BUS_MASTER_VFE 29
+#define	MSM_BUS_MASTER_VFE0 MSM_BUS_MASTER_VFE
+#define	MSM_BUS_MASTER_VPE 30
+#define	MSM_BUS_MASTER_JPEG_ENC 31
+#define	MSM_BUS_MASTER_GRAPHICS_2D_CORE1 32
+#define	MSM_BUS_MMSS_MASTER_APPS_FAB 33
+#define	MSM_BUS_MASTER_HD_CODEC_PORT0 34
+#define	MSM_BUS_MASTER_HD_CODEC_PORT1 35
+#define	MSM_BUS_MASTER_SPDM 36
+#define	MSM_BUS_MASTER_RPM 37
+#define	MSM_BUS_MASTER_MSS 38
+#define	MSM_BUS_MASTER_RIVA 39
+#define	MSM_BUS_MASTER_SNOC_VMEM 40
+#define	MSM_BUS_MASTER_MSS_SW_PROC 41
+#define	MSM_BUS_MASTER_MSS_FW_PROC 42
+#define	MSM_BUS_MASTER_HMSS 43
+#define	MSM_BUS_MASTER_GSS_NAV 44
+#define	MSM_BUS_MASTER_PCIE 45
+#define	MSM_BUS_MASTER_SATA 46
+#define	MSM_BUS_MASTER_CRYPTO 47
+#define	MSM_BUS_MASTER_VIDEO_CAP 48
+#define	MSM_BUS_MASTER_GRAPHICS_3D_PORT1 49
+#define	MSM_BUS_MASTER_VIDEO_ENC 50
+#define	MSM_BUS_MASTER_VIDEO_DEC 51
+#define	MSM_BUS_MASTER_LPASS_AHB 52
+#define	MSM_BUS_MASTER_QDSS_BAM 53
+#define	MSM_BUS_MASTER_SNOC_CFG 54
+#define	MSM_BUS_MASTER_CRYPTO_CORE0 55
+#define	MSM_BUS_MASTER_CRYPTO_CORE1 56
+#define	MSM_BUS_MASTER_MSS_NAV 57
+#define	MSM_BUS_MASTER_OCMEM_DMA 58
+#define	MSM_BUS_MASTER_WCSS 59
+#define	MSM_BUS_MASTER_QDSS_ETR 60
+#define	MSM_BUS_MASTER_USB3 61
+#define	MSM_BUS_MASTER_JPEG 62
+#define	MSM_BUS_MASTER_VIDEO_P0 63
+#define	MSM_BUS_MASTER_VIDEO_P1 64
+#define	MSM_BUS_MASTER_MSS_PROC 65
+#define	MSM_BUS_MASTER_JPEG_OCMEM 66
+#define	MSM_BUS_MASTER_MDP_OCMEM 67
+#define	MSM_BUS_MASTER_VIDEO_P0_OCMEM 68
+#define	MSM_BUS_MASTER_VIDEO_P1_OCMEM 69
+#define	MSM_BUS_MASTER_VFE_OCMEM 70
+#define	MSM_BUS_MASTER_CNOC_ONOC_CFG 71
+#define	MSM_BUS_MASTER_RPM_INST 72
+#define	MSM_BUS_MASTER_RPM_DATA 73
+#define	MSM_BUS_MASTER_RPM_SYS 74
+#define	MSM_BUS_MASTER_DEHR 75
+#define	MSM_BUS_MASTER_QDSS_DAP 76
+#define	MSM_BUS_MASTER_TIC 77
+#define	MSM_BUS_MASTER_SDCC_1 78
+#define	MSM_BUS_MASTER_SDCC_3 79
+#define	MSM_BUS_MASTER_SDCC_4 80
+#define	MSM_BUS_MASTER_SDCC_2 81
+#define	MSM_BUS_MASTER_TSIF 82
+#define	MSM_BUS_MASTER_BAM_DMA 83
+#define	MSM_BUS_MASTER_BLSP_2 84
+#define	MSM_BUS_MASTER_USB_HSIC 85
+#define	MSM_BUS_MASTER_BLSP_1 86
+#define	MSM_BUS_MASTER_USB_HS 87
+#define	MSM_BUS_MASTER_PNOC_CFG 88
+#define	MSM_BUS_MASTER_V_OCMEM_GFX3D 89
+#define	MSM_BUS_MASTER_IPA 90
+#define	MSM_BUS_MASTER_QPIC 91
+#define	MSM_BUS_MASTER_MDPE 92
+#define	MSM_BUS_MASTER_USB_HS2 93
+#define	MSM_BUS_MASTER_VPU 94
+#define	MSM_BUS_MASTER_UFS 95
+#define	MSM_BUS_MASTER_BCAST 96
+#define	MSM_BUS_MASTER_CRYPTO_CORE2 97
+#define	MSM_BUS_MASTER_EMAC 98
+#define	MSM_BUS_MASTER_VPU_1 99
+#define	MSM_BUS_MASTER_PCIE_1 100
+#define	MSM_BUS_MASTER_USB3_1 101
+#define	MSM_BUS_MASTER_CNOC_MNOC_MMSS_CFG 102
+#define	MSM_BUS_MASTER_CNOC_MNOC_CFG 103
+#define	MSM_BUS_MASTER_TCU_0 104
+#define	MSM_BUS_MASTER_TCU_1 105
+#define	MSM_BUS_MASTER_CPP 106
+#define	MSM_BUS_MASTER_AUDIO 107
+#define	MSM_BUS_MASTER_PCIE_2 108
+#define	MSM_BUS_MASTER_VFE1 109
+#define	MSM_BUS_MASTER_XM_USB_HS1 110
+#define	MSM_BUS_MASTER_PCNOC_BIMC_1 111
+#define	MSM_BUS_MASTER_BIMC_PCNOC   112
+#define	MSM_BUS_MASTER_XI_USB_HSIC  113
+#define	MSM_BUS_MASTER_SGMII	    114
+#define	MSM_BUS_SPMI_FETCHER 115
+#define	MSM_BUS_MASTER_GNOC_BIMC 116
+#define	MSM_BUS_MASTER_CRVIRT_A2NOC 117
+#define	MSM_BUS_MASTER_CNOC_A2NOC 118
+#define	MSM_BUS_MASTER_WLAN 119
+#define	MSM_BUS_MASTER_MSS_CE 120
+#define	MSM_BUS_MASTER_CDSP_PROC 121
+#define	MSM_BUS_MASTER_GNOC_SNOC 122
+#define	MSM_BUS_MASTER_PIMEM 123
+#define	MSM_BUS_MASTER_MASTER_LAST 124
+
+#define	MSM_BUS_SYSTEM_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB
+#define	MSM_BUS_CPSS_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_CPSS_FPB
+
+#define	MSM_BUS_SNOC_MM_INT_0 10000
+#define	MSM_BUS_SNOC_MM_INT_1 10001
+#define	MSM_BUS_SNOC_MM_INT_2 10002
+#define	MSM_BUS_SNOC_MM_INT_BIMC 10003
+#define	MSM_BUS_SNOC_INT_0 10004
+#define	MSM_BUS_SNOC_INT_1 10005
+#define	MSM_BUS_SNOC_INT_BIMC 10006
+#define	MSM_BUS_SNOC_BIMC_0_MAS 10007
+#define	MSM_BUS_SNOC_BIMC_1_MAS 10008
+#define	MSM_BUS_SNOC_QDSS_INT 10009
+#define	MSM_BUS_PNOC_SNOC_MAS 10010
+#define	MSM_BUS_PNOC_SNOC_SLV 10011
+#define	MSM_BUS_PNOC_INT_0 10012
+#define	MSM_BUS_PNOC_INT_1 10013
+#define	MSM_BUS_PNOC_M_0 10014
+#define	MSM_BUS_PNOC_M_1 10015
+#define	MSM_BUS_BIMC_SNOC_MAS 10016
+#define	MSM_BUS_BIMC_SNOC_SLV 10017
+#define	MSM_BUS_PNOC_SLV_0 10018
+#define	MSM_BUS_PNOC_SLV_1 10019
+#define	MSM_BUS_PNOC_SLV_2 10020
+#define	MSM_BUS_PNOC_SLV_3 10021
+#define	MSM_BUS_PNOC_SLV_4 10022
+#define	MSM_BUS_PNOC_SLV_8 10023
+#define	MSM_BUS_PNOC_SLV_9 10024
+#define	MSM_BUS_SNOC_BIMC_0_SLV 10025
+#define	MSM_BUS_SNOC_BIMC_1_SLV 10026
+#define	MSM_BUS_MNOC_BIMC_MAS 10027
+#define	MSM_BUS_MNOC_BIMC_SLV 10028
+#define	MSM_BUS_BIMC_MNOC_MAS 10029
+#define	MSM_BUS_BIMC_MNOC_SLV 10030
+#define	MSM_BUS_SNOC_BIMC_MAS 10031
+#define	MSM_BUS_SNOC_BIMC_SLV 10032
+#define	MSM_BUS_CNOC_SNOC_MAS 10033
+#define	MSM_BUS_CNOC_SNOC_SLV 10034
+#define	MSM_BUS_SNOC_CNOC_MAS 10035
+#define	MSM_BUS_SNOC_CNOC_SLV 10036
+#define	MSM_BUS_OVNOC_SNOC_MAS 10037
+#define	MSM_BUS_OVNOC_SNOC_SLV 10038
+#define	MSM_BUS_SNOC_OVNOC_MAS 10039
+#define	MSM_BUS_SNOC_OVNOC_SLV 10040
+#define	MSM_BUS_SNOC_PNOC_MAS 10041
+#define	MSM_BUS_SNOC_PNOC_SLV 10042
+#define	MSM_BUS_BIMC_INT_APPS_EBI 10043
+#define	MSM_BUS_BIMC_INT_APPS_SNOC 10044
+#define	MSM_BUS_SNOC_BIMC_2_MAS 10045
+#define	MSM_BUS_SNOC_BIMC_2_SLV 10046
+#define	MSM_BUS_PNOC_SLV_5	10047
+#define	MSM_BUS_PNOC_SLV_7	10048
+#define	MSM_BUS_PNOC_INT_2 10049
+#define	MSM_BUS_PNOC_INT_3 10050
+#define	MSM_BUS_PNOC_INT_4 10051
+#define	MSM_BUS_PNOC_INT_5 10052
+#define	MSM_BUS_PNOC_INT_6 10053
+#define	MSM_BUS_PNOC_INT_7 10054
+#define	MSM_BUS_BIMC_SNOC_1_MAS 10055
+#define	MSM_BUS_BIMC_SNOC_1_SLV 10056
+#define	MSM_BUS_PNOC_A1NOC_MAS 10057
+#define	MSM_BUS_PNOC_A1NOC_SLV 10058
+#define	MSM_BUS_CNOC_A1NOC_MAS 10059
+#define	MSM_BUS_A0NOC_SNOC_MAS 10060
+#define	MSM_BUS_A0NOC_SNOC_SLV 10061
+#define	MSM_BUS_A1NOC_SNOC_SLV 10062
+#define	MSM_BUS_A1NOC_SNOC_MAS 10063
+#define	MSM_BUS_A2NOC_SNOC_MAS 10064
+#define	MSM_BUS_A2NOC_SNOC_SLV 10065
+#define	MSM_BUS_SNOC_INT_2 10066
+#define	MSM_BUS_A0NOC_QDSS_INT	10067
+#define	MSM_BUS_INT_LAST 10068
+
+#define	MSM_BUS_INT_TEST_ID	20000
+#define	MSM_BUS_INT_TEST_LAST	20050
+
+#define	MSM_BUS_SLAVE_FIRST 512
+#define	MSM_BUS_SLAVE_EBI_CH0 512
+#define	MSM_BUS_SLAVE_EBI_CH1 513
+#define	MSM_BUS_SLAVE_AMPSS_L2 514
+#define	MSM_BUS_APPSS_SLAVE_FAB_MMSS 515
+#define	MSM_BUS_APPSS_SLAVE_FAB_SYSTEM 516
+#define	MSM_BUS_SYSTEM_SLAVE_FAB_APPS 517
+#define	MSM_BUS_SLAVE_SPS 518
+#define	MSM_BUS_SLAVE_SYSTEM_IMEM 519
+#define	MSM_BUS_SLAVE_AMPSS 520
+#define	MSM_BUS_SLAVE_MSS 521
+#define	MSM_BUS_SLAVE_LPASS 522
+#define	MSM_BUS_SYSTEM_SLAVE_CPSS_FPB 523
+#define	MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB 524
+#define	MSM_BUS_SYSTEM_SLAVE_MMSS_FPB 525
+#define	MSM_BUS_SLAVE_CORESIGHT 526
+#define	MSM_BUS_SLAVE_RIVA 527
+#define	MSM_BUS_SLAVE_SMI 528
+#define	MSM_BUS_MMSS_SLAVE_FAB_APPS 529
+#define	MSM_BUS_MMSS_SLAVE_FAB_APPS_1 530
+#define	MSM_BUS_SLAVE_MM_IMEM 531
+#define	MSM_BUS_SLAVE_CRYPTO 532
+#define	MSM_BUS_SLAVE_SPDM 533
+#define	MSM_BUS_SLAVE_RPM 534
+#define	MSM_BUS_SLAVE_RPM_MSG_RAM 535
+#define	MSM_BUS_SLAVE_MPM 536
+#define	MSM_BUS_SLAVE_PMIC1_SSBI1_A 537
+#define	MSM_BUS_SLAVE_PMIC1_SSBI1_B 538
+#define	MSM_BUS_SLAVE_PMIC1_SSBI1_C 539
+#define	MSM_BUS_SLAVE_PMIC2_SSBI2_A 540
+#define	MSM_BUS_SLAVE_PMIC2_SSBI2_B 541
+#define	MSM_BUS_SLAVE_GSBI1_UART 542
+#define	MSM_BUS_SLAVE_GSBI2_UART 543
+#define	MSM_BUS_SLAVE_GSBI3_UART 544
+#define	MSM_BUS_SLAVE_GSBI4_UART 545
+#define	MSM_BUS_SLAVE_GSBI5_UART 546
+#define	MSM_BUS_SLAVE_GSBI6_UART 547
+#define	MSM_BUS_SLAVE_GSBI7_UART 548
+#define	MSM_BUS_SLAVE_GSBI8_UART 549
+#define	MSM_BUS_SLAVE_GSBI9_UART 550
+#define	MSM_BUS_SLAVE_GSBI10_UART 551
+#define	MSM_BUS_SLAVE_GSBI11_UART 552
+#define	MSM_BUS_SLAVE_GSBI12_UART 553
+#define	MSM_BUS_SLAVE_GSBI1_QUP 554
+#define	MSM_BUS_SLAVE_GSBI2_QUP 555
+#define	MSM_BUS_SLAVE_GSBI3_QUP 556
+#define	MSM_BUS_SLAVE_GSBI4_QUP 557
+#define	MSM_BUS_SLAVE_GSBI5_QUP 558
+#define	MSM_BUS_SLAVE_GSBI6_QUP 559
+#define	MSM_BUS_SLAVE_GSBI7_QUP 560
+#define	MSM_BUS_SLAVE_GSBI8_QUP 561
+#define	MSM_BUS_SLAVE_GSBI9_QUP 562
+#define	MSM_BUS_SLAVE_GSBI10_QUP 563
+#define	MSM_BUS_SLAVE_GSBI11_QUP 564
+#define	MSM_BUS_SLAVE_GSBI12_QUP 565
+#define	MSM_BUS_SLAVE_EBI2_NAND 566
+#define	MSM_BUS_SLAVE_EBI2_CS0 567
+#define	MSM_BUS_SLAVE_EBI2_CS1 568
+#define	MSM_BUS_SLAVE_EBI2_CS2 569
+#define	MSM_BUS_SLAVE_EBI2_CS3 570
+#define	MSM_BUS_SLAVE_EBI2_CS4 571
+#define	MSM_BUS_SLAVE_EBI2_CS5 572
+#define	MSM_BUS_SLAVE_USB_FS1 573
+#define	MSM_BUS_SLAVE_USB_FS2 574
+#define	MSM_BUS_SLAVE_TSIF 575
+#define	MSM_BUS_SLAVE_MSM_TSSC 576
+#define	MSM_BUS_SLAVE_MSM_PDM 577
+#define	MSM_BUS_SLAVE_MSM_DIMEM 578
+#define	MSM_BUS_SLAVE_MSM_TCSR 579
+#define	MSM_BUS_SLAVE_MSM_PRNG 580
+#define	MSM_BUS_SLAVE_GSS 581
+#define	MSM_BUS_SLAVE_SATA 582
+#define	MSM_BUS_SLAVE_USB3 583
+#define	MSM_BUS_SLAVE_WCSS 584
+#define	MSM_BUS_SLAVE_OCIMEM 585
+#define	MSM_BUS_SLAVE_SNOC_OCMEM 586
+#define	MSM_BUS_SLAVE_SERVICE_SNOC 587
+#define	MSM_BUS_SLAVE_QDSS_STM 588
+#define	MSM_BUS_SLAVE_CAMERA_CFG 589
+#define	MSM_BUS_SLAVE_DISPLAY_CFG 590
+#define	MSM_BUS_SLAVE_OCMEM_CFG 591
+#define	MSM_BUS_SLAVE_CPR_CFG 592
+#define	MSM_BUS_SLAVE_CPR_XPU_CFG 593
+#define	MSM_BUS_SLAVE_MISC_CFG 594
+#define	MSM_BUS_SLAVE_MISC_XPU_CFG 595
+#define	MSM_BUS_SLAVE_VENUS_CFG 596
+#define	MSM_BUS_SLAVE_MISC_VENUS_CFG 597
+#define	MSM_BUS_SLAVE_GRAPHICS_3D_CFG 598
+#define	MSM_BUS_SLAVE_MMSS_CLK_CFG 599
+#define	MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG 600
+#define	MSM_BUS_SLAVE_MNOC_MPU_CFG 601
+#define	MSM_BUS_SLAVE_ONOC_MPU_CFG 602
+#define	MSM_BUS_SLAVE_SERVICE_MNOC 603
+#define	MSM_BUS_SLAVE_OCMEM 604
+#define	MSM_BUS_SLAVE_SERVICE_ONOC 605
+#define	MSM_BUS_SLAVE_SDCC_1 606
+#define	MSM_BUS_SLAVE_SDCC_3 607
+#define	MSM_BUS_SLAVE_SDCC_2 608
+#define	MSM_BUS_SLAVE_SDCC_4 609
+#define	MSM_BUS_SLAVE_BAM_DMA 610
+#define	MSM_BUS_SLAVE_BLSP_2 611
+#define	MSM_BUS_SLAVE_USB_HSIC 612
+#define	MSM_BUS_SLAVE_BLSP_1 613
+#define	MSM_BUS_SLAVE_USB_HS 614
+#define	MSM_BUS_SLAVE_PDM 615
+#define	MSM_BUS_SLAVE_PERIPH_APU_CFG 616
+#define	MSM_BUS_SLAVE_PNOC_MPU_CFG 617
+#define	MSM_BUS_SLAVE_PRNG 618
+#define	MSM_BUS_SLAVE_SERVICE_PNOC 619
+#define	MSM_BUS_SLAVE_CLK_CTL 620
+#define	MSM_BUS_SLAVE_CNOC_MSS 621
+#define	MSM_BUS_SLAVE_SECURITY 622
+#define	MSM_BUS_SLAVE_TCSR 623
+#define	MSM_BUS_SLAVE_TLMM 624
+#define	MSM_BUS_SLAVE_CRYPTO_0_CFG 625
+#define	MSM_BUS_SLAVE_CRYPTO_1_CFG 626
+#define	MSM_BUS_SLAVE_IMEM_CFG 627
+#define	MSM_BUS_SLAVE_MESSAGE_RAM 628
+#define	MSM_BUS_SLAVE_BIMC_CFG 629
+#define	MSM_BUS_SLAVE_BOOT_ROM 630
+#define	MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG 631
+#define	MSM_BUS_SLAVE_PMIC_ARB 632
+#define	MSM_BUS_SLAVE_SPDM_WRAPPER 633
+#define	MSM_BUS_SLAVE_DEHR_CFG 634
+#define	MSM_BUS_SLAVE_QDSS_CFG 635
+#define	MSM_BUS_SLAVE_RBCPR_CFG 636
+#define	MSM_BUS_SLAVE_RBCPR_QDSS_APU_CFG 637
+#define	MSM_BUS_SLAVE_SNOC_MPU_CFG 638
+#define	MSM_BUS_SLAVE_CNOC_ONOC_CFG 639
+#define	MSM_BUS_SLAVE_CNOC_MNOC_CFG 640
+#define	MSM_BUS_SLAVE_PNOC_CFG 641
+#define	MSM_BUS_SLAVE_SNOC_CFG 642
+#define	MSM_BUS_SLAVE_EBI1_DLL_CFG 643
+#define	MSM_BUS_SLAVE_PHY_APU_CFG 644
+#define	MSM_BUS_SLAVE_EBI1_PHY_CFG 645
+#define	MSM_BUS_SLAVE_SERVICE_CNOC 646
+#define	MSM_BUS_SLAVE_IPS_CFG 647
+#define	MSM_BUS_SLAVE_QPIC 648
+#define	MSM_BUS_SLAVE_DSI_CFG 649
+#define	MSM_BUS_SLAVE_UFS_CFG 650
+#define	MSM_BUS_SLAVE_RBCPR_CX_CFG 651
+#define	MSM_BUS_SLAVE_RBCPR_MX_CFG 652
+#define	MSM_BUS_SLAVE_PCIE_CFG 653
+#define	MSM_BUS_SLAVE_USB_PHYS_CFG 654
+#define	MSM_BUS_SLAVE_VIDEO_CAP_CFG 655
+#define	MSM_BUS_SLAVE_AVSYNC_CFG 656
+#define	MSM_BUS_SLAVE_CRYPTO_2_CFG 657
+#define	MSM_BUS_SLAVE_VPU_CFG 658
+#define	MSM_BUS_SLAVE_BCAST_CFG 659
+#define	MSM_BUS_SLAVE_KLM_CFG 660
+#define	MSM_BUS_SLAVE_GENI_IR_CFG 661
+#define	MSM_BUS_SLAVE_OCMEM_GFX 662
+#define	MSM_BUS_SLAVE_CATS_128 663
+#define	MSM_BUS_SLAVE_OCMEM_64 664
+#define	MSM_BUS_SLAVE_PCIE_0 665
+#define	MSM_BUS_SLAVE_PCIE_1 666
+#define	MSM_BUS_SLAVE_PCIE_0_CFG 667
+#define	MSM_BUS_SLAVE_PCIE_1_CFG 668
+#define	MSM_BUS_SLAVE_SRVC_MNOC 669
+#define	MSM_BUS_SLAVE_USB_HS2 670
+#define	MSM_BUS_SLAVE_AUDIO 671
+#define	MSM_BUS_SLAVE_TCU 672
+#define	MSM_BUS_SLAVE_APPSS 673
+#define	MSM_BUS_SLAVE_PCIE_PARF 674
+#define	MSM_BUS_SLAVE_USB3_PHY_CFG 675
+#define	MSM_BUS_SLAVE_IPA_CFG 676
+#define	MSM_BUS_SLAVE_A0NOC_SNOC 677
+#define	MSM_BUS_SLAVE_A1NOC_SNOC 678
+#define	MSM_BUS_SLAVE_A2NOC_SNOC 679
+#define	MSM_BUS_SLAVE_HMSS_L3 680
+#define	MSM_BUS_SLAVE_PIMEM_CFG 681
+#define	MSM_BUS_SLAVE_DCC_CFG 682
+#define	MSM_BUS_SLAVE_QDSS_RBCPR_APU_CFG 683
+#define	MSM_BUS_SLAVE_PCIE_2_CFG 684
+#define	MSM_BUS_SLAVE_PCIE20_AHB2PHY 685
+#define	MSM_BUS_SLAVE_A0NOC_CFG 686
+#define	MSM_BUS_SLAVE_A1NOC_CFG 687
+#define	MSM_BUS_SLAVE_A2NOC_CFG 688
+#define	MSM_BUS_SLAVE_A1NOC_MPU_CFG 689
+#define	MSM_BUS_SLAVE_A2NOC_MPU_CFG 690
+#define	MSM_BUS_SLAVE_A0NOC_SMMU_CFG 691
+#define	MSM_BUS_SLAVE_A1NOC_SMMU_CFG 692
+#define	MSM_BUS_SLAVE_A2NOC_SMMU_CFG 693
+#define	MSM_BUS_SLAVE_LPASS_SMMU_CFG 694
+#define	MSM_BUS_SLAVE_MMAGIC_CFG 695
+#define	MSM_BUS_SLAVE_VENUS_THROTTLE_CFG 696
+#define	MSM_BUS_SLAVE_SSC_CFG 697
+#define	MSM_BUS_SLAVE_DSA_CFG 698
+#define	MSM_BUS_SLAVE_DSA_MPU_CFG 699
+#define	MSM_BUS_SLAVE_DISPLAY_THROTTLE_CFG 700
+#define	MSM_BUS_SLAVE_SMMU_CPP_CFG 701
+#define	MSM_BUS_SLAVE_SMMU_JPEG_CFG 702
+#define	MSM_BUS_SLAVE_SMMU_MDP_CFG 703
+#define	MSM_BUS_SLAVE_SMMU_ROTATOR_CFG 704
+#define	MSM_BUS_SLAVE_SMMU_VENUS_CFG 705
+#define	MSM_BUS_SLAVE_SMMU_VFE_CFG 706
+#define	MSM_BUS_SLAVE_A0NOC_MPU_CFG 707
+#define	MSM_BUS_SLAVE_VMEM_CFG 708
+#define	MSM_BUS_SLAVE_CAMERA_THROTTLE_CFG 709
+#define	MSM_BUS_SLAVE_VMEM 710
+#define	MSM_BUS_SLAVE_AHB2PHY 711
+#define	MSM_BUS_SLAVE_PIMEM 712
+#define	MSM_BUS_SLAVE_SNOC_VMEM 713
+#define	MSM_BUS_SLAVE_PCIE_2 714
+#define	MSM_BUS_SLAVE_RBCPR_MX 715
+#define	MSM_BUS_SLAVE_RBCPR_CX 716
+#define	MSM_BUS_SLAVE_BIMC_PCNOC 717
+#define	MSM_BUS_SLAVE_PCNOC_BIMC_1 718
+#define	MSM_BUS_SLAVE_SGMII 719
+#define	MSM_BUS_SLAVE_SPMI_FETCHER 720
+#define	MSM_BUS_PNOC_SLV_6 721
+#define	MSM_BUS_SLAVE_MMSS_SMMU_CFG 722
+#define	MSM_BUS_SLAVE_WLAN 723
+#define	MSM_BUS_SLAVE_CRVIRT_A2NOC 724
+#define	MSM_BUS_SLAVE_CNOC_A2NOC 725
+#define	MSM_BUS_SLAVE_GLM 726
+#define	MSM_BUS_SLAVE_GNOC_BIMC 727
+#define	MSM_BUS_SLAVE_GNOC_SNOC 728
+#define	MSM_BUS_SLAVE_QM_CFG 729
+#define	MSM_BUS_SLAVE_TLMM_EAST 730
+#define	MSM_BUS_SLAVE_TLMM_NORTH 731
+#define	MSM_BUS_SLAVE_TLMM_WEST 732
+#define	MSM_BUS_SLAVE_SKL 733
+#define	MSM_BUS_SLAVE_LPASS_TCM	734
+#define	MSM_BUS_SLAVE_TLMM_SOUTH 735
+#define	MSM_BUS_SLAVE_TLMM_CENTER 736
+#define	MSM_BUS_MSS_NAV_CE_MPU_CFG 737
+#define	MSM_BUS_SLAVE_A2NOC_THROTTLE_CFG 738
+#define	MSM_BUS_SLAVE_CDSP 739
+#define	MSM_BUS_SLAVE_CDSP_SMMU_CFG 740
+#define	MSM_BUS_SLAVE_LPASS_MPU_CFG 741
+#define	MSM_BUS_SLAVE_CSI_PHY_CFG 742
+#define	MSM_BUS_SLAVE_LAST 743
+
+#define	MSM_BUS_SYSTEM_FPB_SLAVE_SYSTEM  MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB
+#define	MSM_BUS_CPSS_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_CPSS_FPB
+
+/*
+ * ID's used in RPM messages
+ */
+#define	ICBID_MASTER_APPSS_PROC 0
+#define	ICBID_MASTER_MSS_PROC 1
+#define	ICBID_MASTER_MNOC_BIMC 2
+#define	ICBID_MASTER_SNOC_BIMC 3
+#define	ICBID_MASTER_SNOC_BIMC_0 ICBID_MASTER_SNOC_BIMC
+#define	ICBID_MASTER_CNOC_MNOC_MMSS_CFG 4
+#define	ICBID_MASTER_CNOC_MNOC_CFG 5
+#define	ICBID_MASTER_GFX3D 6
+#define	ICBID_MASTER_JPEG 7
+#define	ICBID_MASTER_MDP 8
+#define	ICBID_MASTER_MDP0 ICBID_MASTER_MDP
+#define	ICBID_MASTER_MDPS ICBID_MASTER_MDP
+#define	ICBID_MASTER_VIDEO 9
+#define	ICBID_MASTER_VIDEO_P0 ICBID_MASTER_VIDEO
+#define	ICBID_MASTER_VIDEO_P1 10
+#define	ICBID_MASTER_VFE 11
+#define	ICBID_MASTER_VFE0 ICBID_MASTER_VFE
+#define	ICBID_MASTER_CNOC_ONOC_CFG 12
+#define	ICBID_MASTER_JPEG_OCMEM 13
+#define	ICBID_MASTER_MDP_OCMEM 14
+#define	ICBID_MASTER_VIDEO_P0_OCMEM 15
+#define	ICBID_MASTER_VIDEO_P1_OCMEM 16
+#define	ICBID_MASTER_VFE_OCMEM 17
+#define	ICBID_MASTER_LPASS_AHB 18
+#define	ICBID_MASTER_QDSS_BAM 19
+#define	ICBID_MASTER_SNOC_CFG 20
+#define	ICBID_MASTER_BIMC_SNOC 21
+#define	ICBID_MASTER_BIMC_SNOC_0 ICBID_MASTER_BIMC_SNOC
+#define	ICBID_MASTER_CNOC_SNOC 22
+#define	ICBID_MASTER_CRYPTO 23
+#define	ICBID_MASTER_CRYPTO_CORE0 ICBID_MASTER_CRYPTO
+#define	ICBID_MASTER_CRYPTO_CORE1 24
+#define	ICBID_MASTER_LPASS_PROC 25
+#define	ICBID_MASTER_MSS 26
+#define	ICBID_MASTER_MSS_NAV 27
+#define	ICBID_MASTER_OCMEM_DMA 28
+#define	ICBID_MASTER_PNOC_SNOC 29
+#define	ICBID_MASTER_WCSS 30
+#define	ICBID_MASTER_QDSS_ETR 31
+#define	ICBID_MASTER_USB3 32
+#define	ICBID_MASTER_USB3_0 ICBID_MASTER_USB3
+#define	ICBID_MASTER_SDCC_1 33
+#define	ICBID_MASTER_SDCC_3 34
+#define	ICBID_MASTER_SDCC_2 35
+#define	ICBID_MASTER_SDCC_4 36
+#define	ICBID_MASTER_TSIF 37
+#define	ICBID_MASTER_BAM_DMA 38
+#define	ICBID_MASTER_BLSP_2 39
+#define	ICBID_MASTER_USB_HSIC 40
+#define	ICBID_MASTER_BLSP_1 41
+#define	ICBID_MASTER_USB_HS 42
+#define	ICBID_MASTER_USB_HS1 ICBID_MASTER_USB_HS
+#define	ICBID_MASTER_PNOC_CFG 43
+#define	ICBID_MASTER_SNOC_PNOC 44
+#define	ICBID_MASTER_RPM_INST 45
+#define	ICBID_MASTER_RPM_DATA 46
+#define	ICBID_MASTER_RPM_SYS 47
+#define	ICBID_MASTER_DEHR 48
+#define	ICBID_MASTER_QDSS_DAP 49
+#define	ICBID_MASTER_SPDM 50
+#define	ICBID_MASTER_TIC 51
+#define	ICBID_MASTER_SNOC_CNOC 52
+#define	ICBID_MASTER_GFX3D_OCMEM 53
+#define	ICBID_MASTER_GFX3D_GMEM ICBID_MASTER_GFX3D_OCMEM
+#define	ICBID_MASTER_OVIRT_SNOC 54
+#define	ICBID_MASTER_SNOC_OVIRT 55
+#define	ICBID_MASTER_SNOC_GVIRT ICBID_MASTER_SNOC_OVIRT
+#define	ICBID_MASTER_ONOC_OVIRT 56
+#define	ICBID_MASTER_USB_HS2 57
+#define	ICBID_MASTER_QPIC 58
+#define	ICBID_MASTER_IPA 59
+#define	ICBID_MASTER_DSI 60
+#define	ICBID_MASTER_MDP1 61
+#define	ICBID_MASTER_MDPE ICBID_MASTER_MDP1
+#define	ICBID_MASTER_VPU_PROC 62
+#define	ICBID_MASTER_VPU 63
+#define	ICBID_MASTER_VPU0 ICBID_MASTER_VPU
+#define	ICBID_MASTER_CRYPTO_CORE2 64
+#define	ICBID_MASTER_PCIE_0 65
+#define	ICBID_MASTER_PCIE_1 66
+#define	ICBID_MASTER_SATA 67
+#define	ICBID_MASTER_UFS 68
+#define	ICBID_MASTER_USB3_1 69
+#define	ICBID_MASTER_VIDEO_OCMEM 70
+#define	ICBID_MASTER_VPU1 71
+#define	ICBID_MASTER_VCAP 72
+#define	ICBID_MASTER_EMAC 73
+#define	ICBID_MASTER_BCAST 74
+#define	ICBID_MASTER_MMSS_PROC 75
+#define	ICBID_MASTER_SNOC_BIMC_1 76
+#define	ICBID_MASTER_SNOC_PCNOC 77
+#define	ICBID_MASTER_AUDIO 78
+#define	ICBID_MASTER_MM_INT_0 79
+#define	ICBID_MASTER_MM_INT_1 80
+#define	ICBID_MASTER_MM_INT_2 81
+#define	ICBID_MASTER_MM_INT_BIMC 82
+#define	ICBID_MASTER_MSS_INT 83
+#define	ICBID_MASTER_PCNOC_CFG 84
+#define	ICBID_MASTER_PCNOC_INT_0 85
+#define	ICBID_MASTER_PCNOC_INT_1 86
+#define	ICBID_MASTER_PCNOC_M_0 87
+#define	ICBID_MASTER_PCNOC_M_1 88
+#define	ICBID_MASTER_PCNOC_S_0 89
+#define	ICBID_MASTER_PCNOC_S_1 90
+#define	ICBID_MASTER_PCNOC_S_2 91
+#define	ICBID_MASTER_PCNOC_S_3 92
+#define	ICBID_MASTER_PCNOC_S_4 93
+#define	ICBID_MASTER_PCNOC_S_6 94
+#define	ICBID_MASTER_PCNOC_S_7 95
+#define	ICBID_MASTER_PCNOC_S_8 96
+#define	ICBID_MASTER_PCNOC_S_9 97
+#define	ICBID_MASTER_QDSS_INT 98
+#define	ICBID_MASTER_SNOC_INT_0	99
+#define	ICBID_MASTER_SNOC_INT_1 100
+#define	ICBID_MASTER_SNOC_INT_BIMC 101
+#define	ICBID_MASTER_TCU_0 102
+#define	ICBID_MASTER_TCU_1 103
+#define	ICBID_MASTER_BIMC_INT_0 104
+#define	ICBID_MASTER_BIMC_INT_1 105
+#define	ICBID_MASTER_CAMERA 106
+#define	ICBID_MASTER_RICA 107
+#define	ICBID_MASTER_SNOC_BIMC_2 108
+#define	ICBID_MASTER_BIMC_SNOC_1 109
+#define	ICBID_MASTER_A0NOC_SNOC 110
+#define	ICBID_MASTER_A1NOC_SNOC 111
+#define	ICBID_MASTER_A2NOC_SNOC 112
+#define	ICBID_MASTER_PIMEM 113
+#define	ICBID_MASTER_SNOC_VMEM 114
+#define	ICBID_MASTER_CPP 115
+#define	ICBID_MASTER_CNOC_A1NOC 116
+#define	ICBID_MASTER_PNOC_A1NOC 117
+#define	ICBID_MASTER_HMSS 118
+#define	ICBID_MASTER_PCIE_2 119
+#define	ICBID_MASTER_ROTATOR 120
+#define	ICBID_MASTER_VENUS_VMEM 121
+#define	ICBID_MASTER_DCC 122
+#define	ICBID_MASTER_MCDMA 123
+#define	ICBID_MASTER_PCNOC_INT_2 124
+#define	ICBID_MASTER_PCNOC_INT_3 125
+#define	ICBID_MASTER_PCNOC_INT_4 126
+#define	ICBID_MASTER_PCNOC_INT_5 127
+#define	ICBID_MASTER_PCNOC_INT_6 128
+#define	ICBID_MASTER_PCNOC_S_5 129
+#define	ICBID_MASTER_SENSORS_AHB 130
+#define	ICBID_MASTER_SENSORS_PROC 131
+#define	ICBID_MASTER_QSPI 132
+#define	ICBID_MASTER_VFE1 133
+#define	ICBID_MASTER_SNOC_INT_2 134
+#define	ICBID_MASTER_SMMNOC_BIMC 135
+#define	ICBID_MASTER_CRVIRT_A1NOC 136
+#define	ICBID_MASTER_XM_USB_HS1 137
+#define	ICBID_MASTER_XI_USB_HS1 138
+#define	ICBID_MASTER_PCNOC_BIMC_1 139
+#define	ICBID_MASTER_BIMC_PCNOC 140
+#define	ICBID_MASTER_XI_HSIC 141
+#define	ICBID_MASTER_SGMII  142
+#define	ICBID_MASTER_SPMI_FETCHER 143
+#define	ICBID_MASTER_GNOC_BIMC 144
+#define	ICBID_MASTER_CRVIRT_A2NOC 145
+#define	ICBID_MASTER_CNOC_A2NOC 146
+#define	ICBID_MASTER_WLAN 147
+#define	ICBID_MASTER_MSS_CE 148
+#define	ICBID_MASTER_CDSP_PROC 149
+#define	ICBID_MASTER_GNOC_SNOC 150
+
+#define	ICBID_SLAVE_EBI1 0
+#define	ICBID_SLAVE_APPSS_L2 1
+#define	ICBID_SLAVE_BIMC_SNOC 2
+#define	ICBID_SLAVE_BIMC_SNOC_0 ICBID_SLAVE_BIMC_SNOC
+#define	ICBID_SLAVE_CAMERA_CFG 3
+#define	ICBID_SLAVE_DISPLAY_CFG 4
+#define	ICBID_SLAVE_OCMEM_CFG 5
+#define	ICBID_SLAVE_CPR_CFG 6
+#define	ICBID_SLAVE_CPR_XPU_CFG 7
+#define	ICBID_SLAVE_MISC_CFG 8
+#define	ICBID_SLAVE_MISC_XPU_CFG 9
+#define	ICBID_SLAVE_VENUS_CFG 10
+#define	ICBID_SLAVE_GFX3D_CFG 11
+#define	ICBID_SLAVE_MMSS_CLK_CFG 12
+#define	ICBID_SLAVE_MMSS_CLK_XPU_CFG 13
+#define	ICBID_SLAVE_MNOC_MPU_CFG 14
+#define	ICBID_SLAVE_ONOC_MPU_CFG 15
+#define	ICBID_SLAVE_MNOC_BIMC 16
+#define	ICBID_SLAVE_SERVICE_MNOC 17
+#define	ICBID_SLAVE_OCMEM 18
+#define	ICBID_SLAVE_GMEM ICBID_SLAVE_OCMEM
+#define	ICBID_SLAVE_SERVICE_ONOC 19
+#define	ICBID_SLAVE_APPSS 20
+#define	ICBID_SLAVE_LPASS 21
+#define	ICBID_SLAVE_USB3 22
+#define	ICBID_SLAVE_USB3_0 ICBID_SLAVE_USB3
+#define	ICBID_SLAVE_WCSS 23
+#define	ICBID_SLAVE_SNOC_BIMC 24
+#define	ICBID_SLAVE_SNOC_BIMC_0 ICBID_SLAVE_SNOC_BIMC
+#define	ICBID_SLAVE_SNOC_CNOC 25
+#define	ICBID_SLAVE_IMEM 26
+#define	ICBID_SLAVE_OCIMEM ICBID_SLAVE_IMEM
+#define	ICBID_SLAVE_SNOC_OVIRT 27
+#define	ICBID_SLAVE_SNOC_GVIRT ICBID_SLAVE_SNOC_OVIRT
+#define	ICBID_SLAVE_SNOC_PNOC 28
+#define	ICBID_SLAVE_SNOC_PCNOC ICBID_SLAVE_SNOC_PNOC
+#define	ICBID_SLAVE_SERVICE_SNOC 29
+#define	ICBID_SLAVE_QDSS_STM 30
+#define	ICBID_SLAVE_SDCC_1 31
+#define	ICBID_SLAVE_SDCC_3 32
+#define	ICBID_SLAVE_SDCC_2 33
+#define	ICBID_SLAVE_SDCC_4 34
+#define	ICBID_SLAVE_TSIF 35
+#define	ICBID_SLAVE_BAM_DMA 36
+#define	ICBID_SLAVE_BLSP_2 37
+#define	ICBID_SLAVE_USB_HSIC 38
+#define	ICBID_SLAVE_BLSP_1 39
+#define	ICBID_SLAVE_USB_HS 40
+#define	ICBID_SLAVE_USB_HS1 ICBID_SLAVE_USB_HS
+#define	ICBID_SLAVE_PDM 41
+#define	ICBID_SLAVE_PERIPH_APU_CFG 42
+#define	ICBID_SLAVE_PNOC_MPU_CFG 43
+#define	ICBID_SLAVE_PRNG 44
+#define	ICBID_SLAVE_PNOC_SNOC 45
+#define	ICBID_SLAVE_PCNOC_SNOC ICBID_SLAVE_PNOC_SNOC
+#define	ICBID_SLAVE_SERVICE_PNOC 46
+#define	ICBID_SLAVE_CLK_CTL 47
+#define	ICBID_SLAVE_CNOC_MSS 48
+#define	ICBID_SLAVE_PCNOC_MSS ICBID_SLAVE_CNOC_MSS
+#define	ICBID_SLAVE_SECURITY 49
+#define	ICBID_SLAVE_TCSR 50
+#define	ICBID_SLAVE_TLMM 51
+#define	ICBID_SLAVE_CRYPTO_0_CFG 52
+#define	ICBID_SLAVE_CRYPTO_1_CFG 53
+#define	ICBID_SLAVE_IMEM_CFG 54
+#define	ICBID_SLAVE_MESSAGE_RAM 55
+#define	ICBID_SLAVE_BIMC_CFG 56
+#define	ICBID_SLAVE_BOOT_ROM 57
+#define	ICBID_SLAVE_CNOC_MNOC_MMSS_CFG 58
+#define	ICBID_SLAVE_PMIC_ARB 59
+#define	ICBID_SLAVE_SPDM_WRAPPER 60
+#define	ICBID_SLAVE_DEHR_CFG 61
+#define	ICBID_SLAVE_MPM 62
+#define	ICBID_SLAVE_QDSS_CFG 63
+#define	ICBID_SLAVE_RBCPR_CFG 64
+#define	ICBID_SLAVE_RBCPR_CX_CFG ICBID_SLAVE_RBCPR_CFG
+#define	ICBID_SLAVE_RBCPR_QDSS_APU_CFG 65
+#define	ICBID_SLAVE_CNOC_MNOC_CFG 66
+#define	ICBID_SLAVE_SNOC_MPU_CFG 67
+#define	ICBID_SLAVE_CNOC_ONOC_CFG 68
+#define	ICBID_SLAVE_PNOC_CFG 69
+#define	ICBID_SLAVE_SNOC_CFG 70
+#define	ICBID_SLAVE_EBI1_DLL_CFG 71
+#define	ICBID_SLAVE_PHY_APU_CFG 72
+#define	ICBID_SLAVE_EBI1_PHY_CFG 73
+#define	ICBID_SLAVE_RPM 74
+#define	ICBID_SLAVE_CNOC_SNOC 75
+#define	ICBID_SLAVE_SERVICE_CNOC 76
+#define	ICBID_SLAVE_OVIRT_SNOC 77
+#define	ICBID_SLAVE_OVIRT_OCMEM 78
+#define	ICBID_SLAVE_USB_HS2 79
+#define	ICBID_SLAVE_QPIC 80
+#define	ICBID_SLAVE_IPS_CFG 81
+#define	ICBID_SLAVE_DSI_CFG 82
+#define	ICBID_SLAVE_USB3_1 83
+#define	ICBID_SLAVE_PCIE_0 84
+#define	ICBID_SLAVE_PCIE_1 85
+#define	ICBID_SLAVE_PSS_SMMU_CFG 86
+#define	ICBID_SLAVE_CRYPTO_2_CFG 87
+#define	ICBID_SLAVE_PCIE_0_CFG 88
+#define	ICBID_SLAVE_PCIE_1_CFG 89
+#define	ICBID_SLAVE_SATA_CFG 90
+#define	ICBID_SLAVE_SPSS_GENI_IR 91
+#define	ICBID_SLAVE_UFS_CFG 92
+#define	ICBID_SLAVE_AVSYNC_CFG 93
+#define	ICBID_SLAVE_VPU_CFG 94
+#define	ICBID_SLAVE_USB_PHY_CFG 95
+#define	ICBID_SLAVE_RBCPR_MX_CFG 96
+#define	ICBID_SLAVE_PCIE_PARF 97
+#define	ICBID_SLAVE_VCAP_CFG 98
+#define	ICBID_SLAVE_EMAC_CFG 99
+#define	ICBID_SLAVE_BCAST_CFG 100
+#define	ICBID_SLAVE_KLM_CFG 101
+#define	ICBID_SLAVE_DISPLAY_PWM 102
+#define	ICBID_SLAVE_GENI 103
+#define	ICBID_SLAVE_SNOC_BIMC_1 104
+#define	ICBID_SLAVE_AUDIO 105
+#define	ICBID_SLAVE_CATS_0 106
+#define	ICBID_SLAVE_CATS_1 107
+#define	ICBID_SLAVE_MM_INT_0 108
+#define	ICBID_SLAVE_MM_INT_1 109
+#define	ICBID_SLAVE_MM_INT_2 110
+#define	ICBID_SLAVE_MM_INT_BIMC 111
+#define	ICBID_SLAVE_MMU_MODEM_XPU_CFG 112
+#define	ICBID_SLAVE_MSS_INT 113
+#define	ICBID_SLAVE_PCNOC_INT_0 114
+#define	ICBID_SLAVE_PCNOC_INT_1 115
+#define	ICBID_SLAVE_PCNOC_M_0 116
+#define	ICBID_SLAVE_PCNOC_M_1 117
+#define	ICBID_SLAVE_PCNOC_S_0 118
+#define	ICBID_SLAVE_PCNOC_S_1 119
+#define	ICBID_SLAVE_PCNOC_S_2 120
+#define	ICBID_SLAVE_PCNOC_S_3 121
+#define	ICBID_SLAVE_PCNOC_S_4 122
+#define	ICBID_SLAVE_PCNOC_S_6 123
+#define	ICBID_SLAVE_PCNOC_S_7 124
+#define	ICBID_SLAVE_PCNOC_S_8 125
+#define	ICBID_SLAVE_PCNOC_S_9 126
+#define	ICBID_SLAVE_PRNG_XPU_CFG 127
+#define	ICBID_SLAVE_QDSS_INT 128
+#define	ICBID_SLAVE_RPM_XPU_CFG 129
+#define	ICBID_SLAVE_SNOC_INT_0 130
+#define	ICBID_SLAVE_SNOC_INT_1 131
+#define	ICBID_SLAVE_SNOC_INT_BIMC 132
+#define	ICBID_SLAVE_TCU 133
+#define	ICBID_SLAVE_BIMC_INT_0 134
+#define	ICBID_SLAVE_BIMC_INT_1 135
+#define	ICBID_SLAVE_RICA_CFG 136
+#define	ICBID_SLAVE_SNOC_BIMC_2 137
+#define	ICBID_SLAVE_BIMC_SNOC_1 138
+#define	ICBID_SLAVE_PNOC_A1NOC 139
+#define	ICBID_SLAVE_SNOC_VMEM 140
+#define	ICBID_SLAVE_A0NOC_SNOC 141
+#define	ICBID_SLAVE_A1NOC_SNOC 142
+#define	ICBID_SLAVE_A2NOC_SNOC 143
+#define	ICBID_SLAVE_A0NOC_CFG 144
+#define	ICBID_SLAVE_A0NOC_MPU_CFG 145
+#define	ICBID_SLAVE_A0NOC_SMMU_CFG 146
+#define	ICBID_SLAVE_A1NOC_CFG 147
+#define	ICBID_SLAVE_A1NOC_MPU_CFG 148
+#define	ICBID_SLAVE_A1NOC_SMMU_CFG 149
+#define	ICBID_SLAVE_A2NOC_CFG 150
+#define	ICBID_SLAVE_A2NOC_MPU_CFG 151
+#define	ICBID_SLAVE_A2NOC_SMMU_CFG 152
+#define	ICBID_SLAVE_AHB2PHY 153
+#define	ICBID_SLAVE_CAMERA_THROTTLE_CFG 154
+#define	ICBID_SLAVE_DCC_CFG 155
+#define	ICBID_SLAVE_DISPLAY_THROTTLE_CFG 156
+#define	ICBID_SLAVE_DSA_CFG 157
+#define	ICBID_SLAVE_DSA_MPU_CFG 158
+#define	ICBID_SLAVE_SSC_MPU_CFG 159
+#define	ICBID_SLAVE_HMSS_L3 160
+#define	ICBID_SLAVE_LPASS_SMMU_CFG 161
+#define	ICBID_SLAVE_MMAGIC_CFG 162
+#define	ICBID_SLAVE_PCIE20_AHB2PHY 163
+#define	ICBID_SLAVE_PCIE_2 164
+#define	ICBID_SLAVE_PCIE_2_CFG 165
+#define	ICBID_SLAVE_PIMEM 166
+#define	ICBID_SLAVE_PIMEM_CFG 167
+#define	ICBID_SLAVE_QDSS_RBCPR_APU_CFG 168
+#define	ICBID_SLAVE_RBCPR_CX 169
+#define	ICBID_SLAVE_RBCPR_MX 170
+#define	ICBID_SLAVE_SMMU_CPP_CFG 171
+#define	ICBID_SLAVE_SMMU_JPEG_CFG 172
+#define	ICBID_SLAVE_SMMU_MDP_CFG 173
+#define	ICBID_SLAVE_SMMU_ROTATOR_CFG 174
+#define	ICBID_SLAVE_SMMU_VENUS_CFG 175
+#define	ICBID_SLAVE_SMMU_VFE_CFG 176
+#define	ICBID_SLAVE_SSC_CFG 177
+#define	ICBID_SLAVE_VENUS_THROTTLE_CFG 178
+#define	ICBID_SLAVE_VMEM 179
+#define	ICBID_SLAVE_VMEM_CFG 180
+#define	ICBID_SLAVE_QDSS_MPU_CFG 181
+#define	ICBID_SLAVE_USB3_PHY_CFG 182
+#define	ICBID_SLAVE_IPA_CFG 183
+#define	ICBID_SLAVE_PCNOC_INT_2 184
+#define	ICBID_SLAVE_PCNOC_INT_3 185
+#define	ICBID_SLAVE_PCNOC_INT_4 186
+#define	ICBID_SLAVE_PCNOC_INT_5 187
+#define	ICBID_SLAVE_PCNOC_INT_6 188
+#define	ICBID_SLAVE_PCNOC_S_5 189
+#define	ICBID_SLAVE_QSPI 190
+#define	ICBID_SLAVE_A1NOC_MS_MPU_CFG 191
+#define	ICBID_SLAVE_A2NOC_MS_MPU_CFG 192
+#define	ICBID_SLAVE_MODEM_Q6_SMMU_CFG 193
+#define	ICBID_SLAVE_MSS_MPU_CFG 194
+#define	ICBID_SLAVE_MSS_PROC_MS_MPU_CFG 195
+#define	ICBID_SLAVE_SKL 196
+#define	ICBID_SLAVE_SNOC_INT_2 197
+#define	ICBID_SLAVE_SMMNOC_BIMC 198
+#define	ICBID_SLAVE_CRVIRT_A1NOC 199
+#define	ICBID_SLAVE_SGMII	 200
+#define	ICBID_SLAVE_QHS4_APPS	 201
+#define	ICBID_SLAVE_BIMC_PCNOC   202
+#define	ICBID_SLAVE_PCNOC_BIMC_1 203
+#define	ICBID_SLAVE_SPMI_FETCHER 204
+#define	ICBID_SLAVE_MMSS_SMMU_CFG 205
+#define	ICBID_SLAVE_WLAN 206
+#define	ICBID_SLAVE_CRVIRT_A2NOC 207
+#define	ICBID_SLAVE_CNOC_A2NOC 208
+#define	ICBID_SLAVE_GLM 209
+#define	ICBID_SLAVE_GNOC_BIMC 210
+#define	ICBID_SLAVE_GNOC_SNOC 211
+#define	ICBID_SLAVE_QM_CFG 212
+#define	ICBID_SLAVE_TLMM_EAST 213
+#define	ICBID_SLAVE_TLMM_NORTH 214
+#define	ICBID_SLAVE_TLMM_WEST 215
+#define	ICBID_SLAVE_LPASS_TCM	216
+#define	ICBID_SLAVE_TLMM_SOUTH	217
+#define	ICBID_SLAVE_TLMM_CENTER	218
+#define	ICBID_SLAVE_MSS_NAV_CE_MPU_CFG	219
+#define	ICBID_SLAVE_A2NOC_THROTTLE_CFG	220
+#define	ICBID_SLAVE_CDSP	221
+#define	ICBID_SLAVE_CDSP_SMMU_CFG	222
+#define	ICBID_SLAVE_LPASS_MPU_CFG	223
+#define	ICBID_SLAVE_CSI_PHY_CFG	224
+#endif
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/include/dt-bindings/msm./msm-bus-rule-ops.h linux-4.4.115-fbx/arch/arm/boot/dts/include/dt-bindings/msm/msm-bus-rule-ops.h
--- linux-4.4.115-fbx/arch/arm/boot/dts/include/dt-bindings/msm./msm-bus-rule-ops.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/include/dt-bindings/msm/msm-bus-rule-ops.h	2019-01-22 16:16:28.179288750 +0100
@@ -0,0 +1,34 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_BUS_RULE_OPS_H
+#define __MSM_BUS_RULE_OPS_H
+
+#define FLD_IB	0
+#define FLD_AB	1
+#define FLD_CLK	2
+
+#define OP_LE	0
+#define OP_LT	1
+#define OP_GE	2
+#define OP_GT	3
+#define OP_NOOP	4
+
+#define RULE_STATE_NOT_APPLIED	0
+#define RULE_STATE_APPLIED	1
+
+#define THROTTLE_ON	0
+#define THROTTLE_OFF	1
+#define THROTTLE_REG	2
+
+
+#endif
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/include/dt-bindings/msm./pm.h linux-4.4.115-fbx/arch/arm/boot/dts/include/dt-bindings/msm/pm.h
--- linux-4.4.115-fbx/arch/arm/boot/dts/include/dt-bindings/msm./pm.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/include/dt-bindings/msm/pm.h	2019-01-22 16:16:28.179288750 +0100
@@ -0,0 +1,25 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DT_MSM_PM_H__
+#define __DT_MSM_PM_H__
+
+#define LPM_RESET_LVL_NONE	0
+#define LPM_RESET_LVL_RET	1
+#define LPM_RESET_LVL_GDHS	2
+#define LPM_RESET_LVL_PC	3
+
+#define LPM_AFF_LVL_CPU		0
+#define LPM_AFF_LVL_L2		1
+#define LPM_AFF_LVL_CCI		2
+
+#endif
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/include/dt-bindings/msm./power-on.h linux-4.4.115-fbx/arch/arm/boot/dts/include/dt-bindings/msm/power-on.h
--- linux-4.4.115-fbx/arch/arm/boot/dts/include/dt-bindings/msm./power-on.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/include/dt-bindings/msm/power-on.h	2019-01-22 16:16:28.179288750 +0100
@@ -0,0 +1,24 @@
+/* Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_POWER_ON_H__
+#define __MSM_POWER_ON_H__
+
+#define PON_POWER_OFF_RESERVED		0x00
+#define PON_POWER_OFF_WARM_RESET	0x01
+#define PON_POWER_OFF_SHUTDOWN		0x04
+#define PON_POWER_OFF_DVDD_SHUTDOWN	0x05
+#define PON_POWER_OFF_HARD_RESET	0x07
+#define PON_POWER_OFF_DVDD_HARD_RESET	0x08
+#define PON_POWER_OFF_MAX_TYPE		0x10
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm/boot/dts/include/dt-bindings/regulator/qcom,rpm-smd-regulator.h	2019-01-22 16:16:28.183288787 +0100
@@ -0,0 +1,28 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_RPM_SMD_REGULATOR_H
+#define __QCOM_RPM_SMD_REGULATOR_H
+
+#define RPM_SMD_REGULATOR_LEVEL_NONE		0
+#define RPM_SMD_REGULATOR_LEVEL_RETENTION	16
+#define RPM_SMD_REGULATOR_LEVEL_RETENTION_PLUS	32
+#define RPM_SMD_REGULATOR_LEVEL_MIN_SVS		48
+#define RPM_SMD_REGULATOR_LEVEL_LOW_SVS		64
+#define RPM_SMD_REGULATOR_LEVEL_SVS		128
+#define RPM_SMD_REGULATOR_LEVEL_SVS_PLUS	192
+#define RPM_SMD_REGULATOR_LEVEL_NOM		256
+#define RPM_SMD_REGULATOR_LEVEL_NOM_PLUS	320
+#define RPM_SMD_REGULATOR_LEVEL_TURBO		384
+#define RPM_SMD_REGULATOR_LEVEL_BINNING		512
+
+#endif
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./apq8098-freebox-batfish.dts linux-4.4.115-fbx/arch/arm/boot/dts/qcom/apq8098-freebox-batfish.dts
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./apq8098-freebox-batfish.dts	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/apq8098-freebox-batfish.dts	2019-01-22 16:16:21.171225289 +0100
@@ -0,0 +1,692 @@
+/* Copyright (c) 2017, Freebox SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include "apq8098-v2.1.dtsi"
+#include "msm8998-audio-freebox.dtsi"
+
+#include "msm8998-ramoops.dtsi"
+
+/ {
+	model = "Freebox Batfish";
+	compatible = "freebox,fbx7hd-batfish", "freebox,fbx7hd", "qcom,apq8098";
+	qcom,board-id = <5 2>;
+
+	ir: ir-receiver {
+		compatible = "gpio-ir-receiver";
+		gpios = <&tlmm 120 1>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&ir_int>;
+		linux,rc-map-name = "rc-rc6-freebox";
+	};
+
+	fbxgpio@0 {
+		compatible = "fbx,fbxgpio";
+
+		pinctrl-names = "default";
+		pinctrl-0 = <
+			&hdmi_en
+			&hdmi_fault
+			&force_test_mode
+			&usb_hub_rst
+			&spdif_src_sel
+			>;
+
+		hdmi-enable {
+			name = "hdmi-enable";
+			gpio = <&tlmm 17 GPIO_ACTIVE_HIGH>;
+			output-low;
+		};
+
+		hdmi-fault {
+			name = "hdmi-fault";
+			gpio = <&tlmm 133 GPIO_ACTIVE_LOW>;
+			input;
+		};
+
+		test-mode {
+			name = "test-mode";
+			gpio = <&tlmm 29 GPIO_ACTIVE_LOW>;
+			input;
+		};
+
+		usb-reset {
+			name = "usb-reset";
+			gpio = <&tlmm 24 GPIO_ACTIVE_LOW>;
+			output-low;
+		};
+
+		spdif-src {
+			name = "spdif-src";
+			gpio = <&tlmm 8 GPIO_ACTIVE_LOW>;
+			output-low;
+		};
+	};
+
+	fbxgpio@1 {
+		compatible = "fbx,fbxgpio";
+
+		top-wcharging-ledr {
+			name = "top-wcharging-ledr";
+			gpio = <&exp1 0 GPIO_ACTIVE_LOW>;
+			input;
+		};
+
+		top-wcharging-ledg {
+			name = "top-wcharging-ledg";
+			gpio = <&exp1 1 GPIO_ACTIVE_LOW>;
+			input;
+		};
+
+		top-nfc-enable {
+			name = "top-nfc-enable";
+			gpio = <&exp1 2 GPIO_ACTIVE_LOW>;
+			output-low;
+		};
+
+		top-nfc-int {
+			name = "top-nfc-int";
+			gpio = <&exp1 3 GPIO_ACTIVE_LOW>;
+			input;
+		};
+
+		top-dsp-reset {
+			name = "top-dsp-reset";
+			gpio = <&exp1 4 GPIO_ACTIVE_LOW>;
+			output-low;
+		};
+
+		top-mic-mute {
+			name = "top-mic-mute";
+			gpio = <&exp1 7 GPIO_ACTIVE_HIGH>;
+			input;
+		};
+
+		top-wcharging-stby {
+			name = "top-wcharging-stby";
+			gpio = <&exp1 8 GPIO_ACTIVE_LOW>;
+			output-low;
+		};
+
+		top-dsp-int {
+			name = "top-dsp-int";
+			gpio = <&exp1 15 GPIO_ACTIVE_HIGH>;
+			input;
+		};
+	};
+
+	gpio_keys {
+		compatible = "gpio-keys";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		pinctrl-0 = <&force_bank0>;
+		pinctrl-names = "default";
+
+		button@1 {
+			label = "Factory Reset Button";
+			linux,code = <KEY_SETUP>;
+			gpios = <&tlmm 18 GPIO_ACTIVE_LOW>;
+		};
+	};
+};
+
+&vendor {
+	bluetooth: bt_wcn3990 {
+		compatible = "qca,wcn3990";
+		qca,bt-vdd-io-supply = <&pm8998_s3>;
+		qca,bt-vdd-xtal-supply = <&pm8998_s5>;
+		qca,bt-vdd-core-supply = <&pm8998_l7>;
+		qca,bt-vdd-pa-supply = <&pm8998_l17>;
+		qca,bt-vdd-ldo-supply = <&pm8998_l25>;
+		//qca,bt-chip-pwd-supply = <&pmi8998_bob_pin1>;
+		clocks = <&clock_gcc clk_rf_clk2_pin>;
+		clock-names = "rf_clk2";
+
+		qca,bt-vdd-io-voltage-level = <1352000 1352000>;
+		qca,bt-vdd-xtal-voltage-level = <2040000 2040000>;
+		qca,bt-vdd-core-voltage-level = <1800000 1800000>;
+		qca,bt-vdd-pa-voltage-level = <1304000 1304000>;
+		qca,bt-vdd-ldo-voltage-level = <3312000 3312000>;
+		qca,bt-chip-pwd-voltage-level = <3600000 3600000>;
+
+		qca,bt-vdd-io-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-xtal-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-core-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-pa-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-ldo-current-level = <1>; /* LPM/PFM */
+	};
+};
+
+&blsp1_uart3_hs {
+	status = "ok";
+};
+
+&uartblsp2dm1 {
+	status = "ok";
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart_console_active>;
+};
+
+&ufsphy1 {
+	vdda-phy-supply = <&pm8998_l1>;
+	vdda-pll-supply = <&pm8998_l2>;
+	vddp-ref-clk-supply = <&pm8998_l26>;
+	vdda-phy-max-microamp = <51400>;
+	vdda-pll-max-microamp = <14600>;
+	vddp-ref-clk-max-microamp = <100>;
+	vddp-ref-clk-always-on;
+	status = "ok";
+};
+
+&ufs1 {
+	vdd-hba-supply = <&gdsc_ufs>;
+	vdd-hba-fixed-regulator;
+	vcc-supply = <&pm8998_l20>;
+	vccq-supply = <&pm8998_l26>;
+	vccq2-supply = <&pm8998_s4>;
+	vcc-max-microamp = <750000>;
+	vccq-max-microamp = <560000>;
+	vccq2-max-microamp = <750000>;
+	status = "ok";
+};
+
+&ufs_ice {
+	status = "ok";
+};
+
+&i2c_5 {
+	status = "okay";
+
+	qcom,clk-freq-out = <100000>;
+
+	// ti8020@44
+	// usbmux@47
+	// si2157@60
+	// si2168@64
+	// leadtrend@68
+};
+
+&i2c_4 {
+	status = "okay";
+
+	qcom,clk-freq-out = <100000>;
+
+	lm75@48 {
+		compatible = "national,lm75";
+		reg = <0x48>;
+	};
+
+	tas5766@4c {
+		compatible = "ti,tas5766";
+		reg = <0x4c>;
+	};
+
+	tas5766@4d {
+		compatible = "ti,tas5766";
+		reg = <0x4d>;
+	};
+
+	tas5766@4e {
+		compatible = "ti,tas5766";
+		reg = <0x4e>;
+	};
+
+	// at24_audio@53
+	// at24_carrier@57
+};
+
+&i2c_6 {
+	status = "okay";
+
+	qcom,clk-freq-out = <100000>;
+
+	exp1: gpio@74 {
+		compatible = "ti,tca9539";
+		reg = <0x74>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		reset-gpios = <&tlmm 96 GPIO_ACTIVE_LOW>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&top_exp_rst>;
+	};
+
+	// at24_top@53
+};
+
+&i2c_7 {
+	status = "okay";
+
+	wm8804@3b {
+		compatible = "wlf,wm8804";
+		reg = <0x3b>;
+		wlf,reset-gpio = <&tlmm 100 GPIO_ACTIVE_HIGH>;
+		wlf,irq-gpio = <&tlmm 110 GPIO_ACTIVE_LOW>;
+	};
+
+	sil9437@31 {
+		compatible = "sil,sil9437";
+		reg = <0x31>;
+		sil,reset-gpio = <&tlmm 12 GPIO_ACTIVE_LOW>;
+		sil,irq-gpio = <&tlmm 118 GPIO_ACTIVE_LOW>;
+		sil,irq-open-drain;
+		sil,irq-active-low;
+	};
+
+	// hdmi_redriver@5e
+};
+
+&spi_1 {
+	status = "okay";
+
+	pinctrl-0 = <&spi_1_active &spi_1b_active &top_psoc_rst>;
+	pinctrl-1 = <&spi_1_sleep &spi_1b_sleep &top_psoc_rst>;
+	cs-gpios = <&tlmm 2 GPIO_ACTIVE_HIGH>, <&tlmm 23 GPIO_ACTIVE_HIGH>, <&tlmm 23 GPIO_ACTIVE_HIGH>, <&tlmm 23 GPIO_ACTIVE_HIGH>;
+
+	spidev@0 {
+		/* PSOC for LED & touch control */
+                compatible = "freebox,fbx7hd-top-psoc";
+		spi-max-frequency = <1200000>;
+		irq-gpio = <&tlmm 122 GPIO_ACTIVE_LOW>;
+		reset-gpio = <&tlmm 97 GPIO_ACTIVE_LOW>;
+		reg = <0>;
+	};
+
+	spidev@3 {
+		/* XMOS */
+		compatible = "spidev";
+		spi-max-frequency = <1200000>;
+		reg = <3>;
+	};
+};
+
+&spi_9 {
+	status = "okay";
+	switch@0 {
+		compatible = "realtek,rtl8367c-spi";
+		reg = <0>;
+		reset-gpios = <&tlmm 30 GPIO_ACTIVE_LOW>;
+		spi-max-frequency = <5000000>;
+	};
+};
+
+&tlmm {
+	force_bank0: force_bank0 {
+		mux {
+			pins = "gpio18";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio18";
+			drive-strength = <2>;
+			bias-disable;
+		};
+	};
+
+	force_test_mode: force_test_mode {
+		mux {
+			pins = "gpio29";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio29";
+			drive-strength = <2>;
+			bias-pull-down;
+		};
+	};
+
+	hdmi_en: hdmi_en {
+		mux {
+			pins = "gpio17";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio17";
+			drive-strength = <2>;
+			bias-pull-down;
+		};
+	};
+
+	hdmi_fault: hdmi_fault {
+		mux {
+			pins = "gpio133";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio133";
+			drive-strength = <2>;
+			bias-pull-up;
+		};
+	};
+
+	usb_hub_rst: usb_hub_rst {
+		mux {
+			pins = "gpio24";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio24";
+			drive-strength = <2>;
+			bias-pull-down;
+		};
+	};
+
+	spdif_src_sel: spdif_src_sel {
+		mux {
+			pins = "gpio8";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio8";
+			drive-strength = <2>;
+			bias-pull-down;
+		};
+	};
+
+	top_exp_rst: top_exp_rst {
+		mux {
+			pins = "gpio96";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio96";
+			drive-strength = <2>;
+			bias-pull-down;
+		};
+	};
+
+	top_psoc_rst: top_psoc_rst {
+		mux {
+			pins = "gpio97";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio97";
+			drive-strength = <2>;
+			bias-pull-down;
+		};
+	};
+
+	ir_int: ir_int {
+		mux {
+			pins = "gpio120";
+			function = "gpio";
+		};
+		config {
+			pins = "gpio120";
+			drive-strength = <6>;
+			bias-pull-up;
+		};
+	};
+
+	top_exp_psoc_irq: top_exp_psoc_irq {
+		mux {
+			pins = "gpio122";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio122";
+			drive-strength = <2>;
+			bias-pull-down;
+		};
+	};
+};
+
+&usb3 {
+	/delete-property/ extcon;
+	dwc3@a800000 {
+		dr_mode = "host";
+	};
+};
+
+&sde_hdmi {
+	qcom,display-type = "primary";
+};
+
+&sde_hdmi_tx {
+	/* disable 5V HPD pin */
+	/delete-property/ qcom,hdmi-tx-hpd5v-gpio;
+	pinctrl-0 = <&mdss_hdmi_hpd_active
+		&mdss_hdmi_ddc_active>;
+	pinctrl-1 = <&mdss_hdmi_hpd_suspend
+		&mdss_hdmi_ddc_suspend>;
+};
+
+&spmi_bus {
+	qcom,pmi8998@2 {
+		status = "disabled";
+	};
+	qcom,pmi8998@3 {
+		status = "disabled";
+	};
+};
+
+&pmi8998_wled {
+	status = "disabled";
+};
+
+&pm8998_gpios {
+	gpio@cc00 { /* GPIO 13 */
+		qcom,mode = <1>;
+		qcom,output-type = <0>;
+		qcom,pull = <5>;
+		qcom,vin-sel = <0>;
+		qcom,out-strength = <1>;
+		qcom,src-sel = <3>;
+		qcom,master-en = <1>;
+		status = "okay";
+	};
+};
+
+&pm8998_vadc {
+	chan@83 {
+		label = "vph_pwr";
+		reg = <0x83>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@85 {
+		label = "vcoin";
+		reg = <0x85>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@4c {
+		label = "xo_therm";
+		reg = <0x4c>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <4>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@4d {
+		label = "msm_therm";
+		reg = <0x4d>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@51 {
+		label = "quiet_therm";
+		reg = <0x51>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+};
+
+&pm8998_adc_tm {
+	chan@83 {
+		label = "vph_pwr";
+		reg = <0x83>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,btm-channel-number = <0x60>;
+	};
+
+	chan@4d {
+		label = "msm_therm";
+		reg = <0x4d>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x68>;
+		qcom,thermal-node;
+	};
+
+	chan@51 {
+		label = "quiet_therm";
+		reg = <0x51>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x70>;
+		qcom,thermal-node;
+	};
+
+	chan@4c {
+		label = "xo_therm";
+		reg = <0x4c>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <4>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x78>;
+		qcom,thermal-node;
+	};
+};
+
+&soc {
+	qcom,msm-dai-tdm-sec-rx {
+		qcom,msm-cpudai-tdm-group-id = <37136>;
+		qcom,msm-cpudai-tdm-group-num-ports = <1>;
+		qcom,msm-cpudai-tdm-group-port-id = <36880>;
+		qcom,msm-cpudai-tdm-clk-rate = <12288000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <1>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <1>;
+		qcom,msm-cpudai-tdm-invert-sync = <0>;
+		qcom,msm-cpudai-tdm-data-delay = <0>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&sec_aux_pcm_active
+				&sec_aux_pcm_dout_active>;
+		pinctrl-1 = <&sec_aux_pcm_sleep
+				&sec_aux_pcm_dout_sleep>;
+	};
+
+	qcom,msm-dai-mi2s {
+		dai_mi2s0: qcom,msm-dai-q6-mi2s-prim {
+			qcom,msm-mi2s-rx-lines = <1>;
+			qcom,msm-mi2s-tx-lines = <0>;
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&pri_mi2s_mclk_active
+					&pri_mi2s_sck_active
+					&pri_mi2s_ws_active
+					&pri_mi2s_sd0_active>;
+			pinctrl-1 = <&pri_mi2s_mclk_sleep
+					&pri_mi2s_sck_sleep
+					&pri_mi2s_ws_sleep
+					&pri_mi2s_sd0_sleep>;
+		};
+
+		dai_mi2s2: qcom,msm-dai-q6-mi2s-tert {
+			qcom,msm-mi2s-rx-lines = <0>;
+			qcom,msm-mi2s-tx-lines = <1>;
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&tert_mi2s_active
+					&tert_mi2s_sd0_active>;
+			pinctrl-1 = <&tert_mi2s_sleep
+					&tert_mi2s_sd0_sleep>;
+		};
+
+		dai_mi2s3: qcom,msm-dai-q6-mi2s-quat {
+			qcom,msm-mi2s-rx-lines = <0>;
+			qcom,msm-mi2s-tx-lines = <15>;
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&quat_mi2s_active
+					&quat_mi2s_sd0_active
+					&quat_mi2s_sd1_active
+					&quat_mi2s_sd2_active
+					&quat_mi2s_sd3_active>;
+			pinctrl-1 = <&quat_mi2s_sleep
+					&quat_mi2s_sd0_sleep
+					&quat_mi2s_sd1_sleep
+					&quat_mi2s_sd2_sleep
+					&quat_mi2s_sd3_sleep>;
+		};
+	};
+
+	sound-freebox {
+		fbx,spdif-wm8804;
+		fbx,arc-sil9437;
+	};
+};
+
+&soc {
+	qcom,bcl {
+		/delete-property/ qcom,bcl-enable;
+	};
+};
+
+&pcie0 {
+	qcom,boot-option = <0>;
+};
+
+&msm_ath10k_wlan {
+	status = "ok";
+};
+
+&tspp {
+	pinctrl-names = "disabled", "tsif0-mode1", "tsif0-mode2";
+
+	tsin0: port@0 {
+		tsin-num = <0>;
+		i2c-bus = <&i2c_5>;
+		reset-gpios = <&tlmm 84 GPIO_ACTIVE_LOW>;
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./apq8098-freebox-oarfish.dts linux-4.4.115-fbx/arch/arm/boot/dts/qcom/apq8098-freebox-oarfish.dts
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./apq8098-freebox-oarfish.dts	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/apq8098-freebox-oarfish.dts	2019-01-22 16:16:21.171225289 +0100
@@ -0,0 +1,402 @@
+/* Copyright (c) 2017, Freebox SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include "apq8098-v2.1.dtsi"
+#include "msm8998-audio-freebox.dtsi"
+
+#include "msm8998-ramoops.dtsi"
+
+/ {
+	model = "Freebox Oarfish";
+	compatible = "freebox,fbx7hd-oarfish", "freebox,fbx7hd", "qcom,apq8098";
+	qcom,board-id = <5 3>;
+
+	ir: ir-receiver {
+		compatible = "gpio-ir-receiver";
+		gpios = <&tlmm 120 1>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&ir_int>;
+		linux,rc-map-name = "rc-rc6-freebox";
+	};
+
+	fbxgpio@0 {
+		compatible = "fbx,fbxgpio";
+
+		pinctrl-names = "default";
+		pinctrl-0 = <
+			&hdmi_en
+			&hdmi_fault
+			&force_test_mode
+			>;
+
+		hdmi-enable {
+			name = "hdmi-enable";
+			gpio = <&tlmm 17 GPIO_ACTIVE_HIGH>;
+			output-low;
+		};
+
+		hdmi-fault {
+			name = "hdmi-fault";
+			gpio = <&tlmm 133 GPIO_ACTIVE_LOW>;
+			input;
+		};
+
+		test-mode {
+			name = "test-mode";
+			gpio = <&tlmm 29 GPIO_ACTIVE_LOW>;
+			input;
+		};
+	};
+
+	gpio_keys {
+		compatible = "gpio-keys";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		pinctrl-0 = <&force_bank0>;
+		pinctrl-names = "default";
+
+		button@1 {
+			label = "Factory Reset Button";
+			linux,code = <KEY_SETUP>;
+			gpios = <&tlmm 18 GPIO_ACTIVE_LOW>;
+		};
+	};
+};
+
+&vendor {
+	bluetooth: bt_wcn3990 {
+		compatible = "qca,wcn3990";
+		qca,bt-vdd-io-supply = <&pm8998_s3>;
+		qca,bt-vdd-xtal-supply = <&pm8998_s5>;
+		qca,bt-vdd-core-supply = <&pm8998_l7>;
+		qca,bt-vdd-pa-supply = <&pm8998_l17>;
+		qca,bt-vdd-ldo-supply = <&pm8998_l25>;
+		//qca,bt-chip-pwd-supply = <&pmi8998_bob_pin1>;
+		clocks = <&clock_gcc clk_rf_clk2_pin>;
+		clock-names = "rf_clk2";
+
+		qca,bt-vdd-io-voltage-level = <1352000 1352000>;
+		qca,bt-vdd-xtal-voltage-level = <2040000 2040000>;
+		qca,bt-vdd-core-voltage-level = <1800000 1800000>;
+		qca,bt-vdd-pa-voltage-level = <1304000 1304000>;
+		qca,bt-vdd-ldo-voltage-level = <3312000 3312000>;
+		qca,bt-chip-pwd-voltage-level = <3600000 3600000>;
+
+		qca,bt-vdd-io-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-xtal-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-core-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-pa-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-ldo-current-level = <1>; /* LPM/PFM */
+	};
+};
+
+&blsp1_uart3_hs {
+	status = "ok";
+};
+
+&uartblsp2dm1 {
+	status = "ok";
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart_console_active>;
+};
+
+&ufsphy1 {
+	vdda-phy-supply = <&pm8998_l1>;
+	vdda-pll-supply = <&pm8998_l2>;
+	vddp-ref-clk-supply = <&pm8998_l26>;
+	vdda-phy-max-microamp = <51400>;
+	vdda-pll-max-microamp = <14600>;
+	vddp-ref-clk-max-microamp = <100>;
+	vddp-ref-clk-always-on;
+	status = "ok";
+};
+
+&ufs1 {
+	vdd-hba-supply = <&gdsc_ufs>;
+	vdd-hba-fixed-regulator;
+	vcc-supply = <&pm8998_l20>;
+	vccq-supply = <&pm8998_l26>;
+	vccq2-supply = <&pm8998_s4>;
+	vcc-max-microamp = <750000>;
+	vccq-max-microamp = <560000>;
+	vccq2-max-microamp = <750000>;
+	status = "ok";
+};
+
+&ufs_ice {
+	status = "ok";
+};
+
+&i2c_5 {
+	status = "okay";
+
+	qcom,clk-freq-out = <100000>;
+
+	// usbmux@47
+	// si2168@64
+};
+
+&i2c_4 {
+	status = "okay";
+
+	qcom,clk-freq-out = <100000>;
+
+	// at24_carrier@57
+};
+
+&i2c_7 {
+	status = "okay";
+
+	// hdmi_redriver@5e
+};
+
+&tlmm {
+	force_bank0: force_bank0 {
+		mux {
+			pins = "gpio18";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio18";
+			drive-strength = <2>;
+			bias-disable;
+		};
+	};
+
+	force_test_mode: force_test_mode {
+		mux {
+			pins = "gpio29";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio29";
+			drive-strength = <2>;
+			bias-pull-down;
+		};
+	};
+
+	hdmi_en: hdmi_en {
+		mux {
+			pins = "gpio17";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio17";
+			drive-strength = <2>;
+			bias-pull-down;
+		};
+	};
+
+	hdmi_fault: hdmi_fault {
+		mux {
+			pins = "gpio133";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio133";
+			drive-strength = <2>;
+			bias-pull-up;
+		};
+	};
+
+	ir_int: ir_int {
+		mux {
+			pins = "gpio120";
+			function = "gpio";
+		};
+		config {
+			pins = "gpio120";
+			drive-strength = <6>;
+			bias-pull-up;
+		};
+	};
+};
+
+&usb3 {
+	/delete-property/ extcon;
+	dwc3@a800000 {
+		dr_mode = "host";
+	};
+};
+
+&sde_hdmi {
+	qcom,display-type = "primary";
+};
+
+&sde_hdmi_tx {
+	/* disable 5V HPD pin */
+	/delete-property/ qcom,hdmi-tx-hpd5v-gpio;
+	pinctrl-0 = <&mdss_hdmi_hpd_active
+		&mdss_hdmi_ddc_active>;
+	pinctrl-1 = <&mdss_hdmi_hpd_suspend
+		&mdss_hdmi_ddc_suspend>;
+};
+
+&spmi_bus {
+	qcom,pmi8998@2 {
+		status = "disabled";
+	};
+	qcom,pmi8998@3 {
+		status = "disabled";
+	};
+};
+
+&pmi8998_wled {
+	status = "disabled";
+};
+
+&pm8998_gpios {
+	gpio@cc00 { /* GPIO 13 */
+		qcom,mode = <1>;
+		qcom,output-type = <0>;
+		qcom,pull = <5>;
+		qcom,vin-sel = <0>;
+		qcom,out-strength = <1>;
+		qcom,src-sel = <3>;
+		qcom,master-en = <1>;
+		status = "okay";
+	};
+};
+
+&pm8998_vadc {
+	chan@83 {
+		label = "vph_pwr";
+		reg = <0x83>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@85 {
+		label = "vcoin";
+		reg = <0x85>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@4c {
+		label = "xo_therm";
+		reg = <0x4c>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <4>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@4d {
+		label = "msm_therm";
+		reg = <0x4d>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@51 {
+		label = "quiet_therm";
+		reg = <0x51>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+};
+
+&pm8998_adc_tm {
+	chan@83 {
+		label = "vph_pwr";
+		reg = <0x83>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,btm-channel-number = <0x60>;
+	};
+
+	chan@4d {
+		label = "msm_therm";
+		reg = <0x4d>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x68>;
+		qcom,thermal-node;
+	};
+
+	chan@51 {
+		label = "quiet_therm";
+		reg = <0x51>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x70>;
+		qcom,thermal-node;
+	};
+
+	chan@4c {
+		label = "xo_therm";
+		reg = <0x4c>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <4>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x78>;
+		qcom,thermal-node;
+	};
+};
+
+&soc {
+	qcom,bcl {
+		/delete-property/ qcom,bcl-enable;
+	};
+};
+
+&pcie0 {
+	qcom,boot-option = <0>;
+};
+
+&msm_ath10k_wlan {
+	status = "ok";
+};
+
+&tspp {
+	pinctrl-names = "disabled", "tsif0-mode1", "tsif0-mode2";
+
+	tsin0: port@0 {
+		tsin-num = <0>;
+		i2c-bus = <&i2c_5>;
+		reset-gpios = <&tlmm 84 GPIO_ACTIVE_LOW>;
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./apq8098-freebox-proto.dts linux-4.4.115-fbx/arch/arm/boot/dts/qcom/apq8098-freebox-proto.dts
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./apq8098-freebox-proto.dts	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/apq8098-freebox-proto.dts	2019-01-22 16:16:21.171225289 +0100
@@ -0,0 +1,515 @@
+/* Copyright (c) 2017, Freebox SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include "apq8098-v2.1.dtsi"
+#include "msm8998-audio-freebox.dtsi"
+
+#include "msm8998-ramoops.dtsi"
+
+/ {
+	model = "Freebox Test Carrier";
+	compatible = "freebox,fbx7hd-carrier", "freebox,fbx7hd", "qcom,apq8098";
+	qcom,board-id = <5 1>;
+
+	ir: ir-receiver {
+		compatible = "gpio-ir-receiver";
+		gpios = <&tlmm 120 1>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&ir_int>;
+		linux,rc-map-name = "rc-rc6-freebox";
+	};
+
+	fbxgpio@0 {
+		compatible = "fbx,fbxgpio";
+
+		pinctrl-names = "default";
+		pinctrl-0 = <
+			&hdmi_fault
+			&nfc_enable
+			&nfc_int
+			>;
+
+		hdmi-fault {
+			name = "hdmi-fault";
+			gpio = <&tlmm 133 GPIO_ACTIVE_LOW>;
+			input;
+		};
+
+		nfcc-enable {
+			name = "nfcc-enable";
+			gpio = <&tlmm 96 GPIO_ACTIVE_HIGH>;
+			output-low;
+		};
+
+		nfcc-irq {
+			name = "nfcc-irq";
+			gpio = <&tlmm 97 GPIO_ACTIVE_HIGH>;
+			input;
+		};
+	};
+};
+
+&vendor {
+	bluetooth: bt_wcn3990 {
+		compatible = "qca,wcn3990";
+		qca,bt-vdd-io-supply = <&pm8998_s3>;
+		qca,bt-vdd-xtal-supply = <&pm8998_s5>;
+		qca,bt-vdd-core-supply = <&pm8998_l7>;
+		qca,bt-vdd-pa-supply = <&pm8998_l17>;
+		qca,bt-vdd-ldo-supply = <&pm8998_l25>;
+		//qca,bt-chip-pwd-supply = <&pmi8998_bob_pin1>;
+		clocks = <&clock_gcc clk_rf_clk2_pin>;
+		clock-names = "rf_clk2";
+
+		qca,bt-vdd-io-voltage-level = <1352000 1352000>;
+		qca,bt-vdd-xtal-voltage-level = <2040000 2040000>;
+		qca,bt-vdd-core-voltage-level = <1800000 1800000>;
+		qca,bt-vdd-pa-voltage-level = <1304000 1304000>;
+		qca,bt-vdd-ldo-voltage-level = <3312000 3312000>;
+		qca,bt-chip-pwd-voltage-level = <3600000 3600000>;
+
+		qca,bt-vdd-io-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-xtal-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-core-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-pa-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-ldo-current-level = <1>; /* LPM/PFM */
+	};
+};
+
+&blsp1_uart3_hs {
+	status = "ok";
+};
+
+&uartblsp2dm1 {
+	status = "ok";
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart_console_active>;
+};
+
+&ufsphy1 {
+	vdda-phy-supply = <&pm8998_l1>;
+	vdda-pll-supply = <&pm8998_l2>;
+	vddp-ref-clk-supply = <&pm8998_l26>;
+	vdda-phy-max-microamp = <51400>;
+	vdda-pll-max-microamp = <14600>;
+	vddp-ref-clk-max-microamp = <100>;
+	vddp-ref-clk-always-on;
+	status = "ok";
+};
+
+&ufs1 {
+	vdd-hba-supply = <&gdsc_ufs>;
+	vdd-hba-fixed-regulator;
+	vcc-supply = <&pm8998_l20>;
+	vccq-supply = <&pm8998_l26>;
+	vccq2-supply = <&pm8998_s4>;
+	vcc-max-microamp = <750000>;
+	vccq-max-microamp = <560000>;
+	vccq2-max-microamp = <750000>;
+	status = "ok";
+};
+
+&ufs_ice {
+	status = "ok";
+};
+
+&i2c_5 {
+	status = "okay";
+	// si2168@64
+};
+
+&i2c_4 {
+	status = "okay";
+
+	qcom,clk-freq-out = <100000>;
+
+	lm75@48 {
+		compatible = "national,lm75";
+		reg = <0x48>;
+	};
+
+	tas5766@4c {
+		compatible = "ti,tas5766";
+		reg = <0x4c>;
+	};
+
+	tas5766@4d {
+		compatible = "ti,tas5766";
+		reg = <0x4d>;
+	};
+
+	tas5766@4e {
+		compatible = "ti,tas5766";
+		reg = <0x4e>;
+	};
+};
+
+&i2c_6 {
+	status = "okay";
+};
+
+&i2c_7 {
+	status = "okay";
+
+	wm8804@3b {
+		compatible = "wlf,wm8804";
+		reg = <0x3b>;
+		wlf,reset-gpio = <&tlmm 100 GPIO_ACTIVE_HIGH>;
+	};
+
+	sil9437@31 {
+		compatible = "sil,sil9437";
+		reg = <0x31>;
+		sil,reset-gpio = <&tlmm 12 GPIO_ACTIVE_LOW>;
+		sil,irq-gpio = <&tlmm 118 GPIO_ACTIVE_LOW>;
+		sil,irq-open-drain;
+		sil,irq-active-low;
+	};
+
+	/delete-node/ qcom,smb138x@8;
+};
+
+&spi_9 {
+	status = "okay";
+	switch@0 {
+		compatible = "realtek,rtl8367c-spi";
+		reg = <0>;
+		reset-gpios = <&tlmm 30 GPIO_ACTIVE_LOW>;
+		spi-max-frequency = <5000000>;
+	};
+};
+
+&spi_10 {
+	status = "disabled";
+};
+
+&tlmm {
+	cdc_reset_ctrl_batfish {
+		cdc_reset_batfish_sleep: cdc_reset_batfish_sleep {
+			mux {
+				pins = "gpio25";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio25";
+				drive-strength = <16>;
+				bias-disable;
+				output-low;
+			};
+		};
+		cdc_reset_batfish_active:cdc_reset_batfish_active {
+			mux {
+				pins = "gpio25";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio25";
+				drive-strength = <16>;
+				bias-pull-down;
+				output-high;
+			};
+		};
+	};
+
+	nfc {
+		nfc_enable: nfc_enable {
+			mux {
+				pins = "gpio96";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio96";
+				drive-strength = <2>;
+				bias-pull-down;
+			};
+		};
+
+		nfc_int: nfc_int {
+			mux {
+				pins = "gpio97";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio97";
+				drive-strength = <2>;
+				bias-pull-down;
+			};
+		};
+	};
+
+	hdmi_fault: hdmi_fault {
+		mux {
+			pins = "gpio133";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio133";
+			drive-strength = <2>;
+			bias-pull-up;
+		};
+	};
+
+	ir_int: ir_int {
+		mux {
+			pins = "gpio120";
+			function = "gpio";
+		};
+		config {
+			pins = "gpio120";
+			drive-strength = <6>;
+			bias-pull-up;
+		};
+	};
+};
+
+&usb3 {
+	/delete-property/ extcon;
+	dwc3@a800000 {
+		dr_mode = "host";
+	};
+};
+
+&sde_hdmi {
+	qcom,display-type = "primary";
+};
+
+&sde_hdmi_tx {
+	/* disable 5V HPD pin */
+	/delete-property/ qcom,hdmi-tx-hpd5v-gpio;
+	pinctrl-0 = <&mdss_hdmi_hpd_active
+		&mdss_hdmi_ddc_active>;
+	pinctrl-1 = <&mdss_hdmi_hpd_suspend
+		&mdss_hdmi_ddc_suspend>;
+};
+
+&spmi_bus {
+	qcom,pmi8998@2 {
+		status = "disabled";
+	};
+	qcom,pmi8998@3 {
+		status = "disabled";
+	};
+};
+
+&pmi8998_wled {
+	status = "disabled";
+};
+
+&pm8998_gpios {
+	gpio@cc00 { /* GPIO 13 */
+		qcom,mode = <1>;
+		qcom,output-type = <0>;
+		qcom,pull = <5>;
+		qcom,vin-sel = <0>;
+		qcom,out-strength = <1>;
+		qcom,src-sel = <3>;
+		qcom,master-en = <1>;
+		status = "okay";
+	};
+};
+
+&pm8998_vadc {
+	chan@83 {
+		label = "vph_pwr";
+		reg = <0x83>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@85 {
+		label = "vcoin";
+		reg = <0x85>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@4c {
+		label = "xo_therm";
+		reg = <0x4c>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <4>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@4d {
+		label = "msm_therm";
+		reg = <0x4d>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@51 {
+		label = "quiet_therm";
+		reg = <0x51>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+};
+
+&pm8998_adc_tm {
+	chan@83 {
+		label = "vph_pwr";
+		reg = <0x83>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,btm-channel-number = <0x60>;
+	};
+
+	chan@4d {
+		label = "msm_therm";
+		reg = <0x4d>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x68>;
+		qcom,thermal-node;
+	};
+
+	chan@51 {
+		label = "quiet_therm";
+		reg = <0x51>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x70>;
+		qcom,thermal-node;
+	};
+
+	chan@4c {
+		label = "xo_therm";
+		reg = <0x4c>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <4>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x78>;
+		qcom,thermal-node;
+	};
+};
+
+&soc {
+	qcom,msm-dai-tdm-sec-rx {
+		qcom,msm-cpudai-tdm-group-id = <37136>;
+		qcom,msm-cpudai-tdm-group-num-ports = <1>;
+		qcom,msm-cpudai-tdm-group-port-id = <36880>;
+		qcom,msm-cpudai-tdm-clk-rate = <12288000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <1>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <1>;
+		qcom,msm-cpudai-tdm-invert-sync = <0>;
+		qcom,msm-cpudai-tdm-data-delay = <0>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&sec_aux_pcm_active
+				&sec_aux_pcm_dout_active>;
+		pinctrl-1 = <&sec_aux_pcm_sleep
+				&sec_aux_pcm_dout_sleep>;
+	};
+
+	qcom,msm-dai-mi2s {
+		dai_mi2s0: qcom,msm-dai-q6-mi2s-prim {
+			qcom,msm-mi2s-rx-lines = <1>;
+			qcom,msm-mi2s-tx-lines = <0>;
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&pri_mi2s_sck_active
+					&pri_mi2s_ws_active
+					&pri_mi2s_sd0_active>;
+			pinctrl-1 = <&pri_mi2s_sck_sleep
+					&pri_mi2s_ws_sleep
+					&pri_mi2s_sd0_sleep>;
+		};
+
+		dai_mi2s2: qcom,msm-dai-q6-mi2s-tert {
+			qcom,msm-mi2s-rx-lines = <0>;
+			qcom,msm-mi2s-tx-lines = <1>;
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&tert_mi2s_active
+					&tert_mi2s_sd0_active>;
+			pinctrl-1 = <&tert_mi2s_sleep
+					&tert_mi2s_sd0_sleep>;
+		};
+
+		dai_mi2s3: qcom,msm-dai-q6-mi2s-quat {
+			qcom,msm-mi2s-rx-lines = <0>;
+			qcom,msm-mi2s-tx-lines = <15>;
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&quat_mi2s_active
+					&quat_mi2s_sd0_active
+					&quat_mi2s_sd1_active
+					&quat_mi2s_sd2_active
+					&quat_mi2s_sd3_active>;
+			pinctrl-1 = <&quat_mi2s_sleep
+					&quat_mi2s_sd0_sleep
+					&quat_mi2s_sd1_sleep
+					&quat_mi2s_sd2_sleep
+					&quat_mi2s_sd3_sleep>;
+		};
+	};
+
+	sound-freebox {
+		fbx,spdif-wm8804;
+		fbx,arc-sil9437;
+	};
+};
+
+&soc {
+	qcom,bcl {
+		/delete-property/ qcom,bcl-enable;
+	};
+};
+
+&pcie0 {
+	qcom,boot-option = <0>;
+};
+
+&msm_ath10k_wlan {
+	status = "ok";
+};
+
+&tspp {
+	pinctrl-names = "disabled", "tsif0-mode1", "tsif0-mode2";
+
+	tsin0: port@0 {
+		tsin-num = <0>;
+		i2c-bus = <&i2c_5>;
+		reset-gpios = <&tlmm 84 GPIO_ACTIVE_LOW>;
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./apq8098-v2.1.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/apq8098-v2.1.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./apq8098-v2.1.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/apq8098-v2.1.dtsi	2019-01-22 16:16:21.171225289 +0100
@@ -0,0 +1,28 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8998-v2.1.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. APQ 8098 V2.1";
+	qcom,msm-id = <319 0x20001>;
+};
+
+&soc {
+	qcom,rmnet-ipa {
+		status = "disabled";
+	};
+};
+
+&ipa_hw {
+	status = "disabled";
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./apq8098-v2.1-mediabox.dts linux-4.4.115-fbx/arch/arm/boot/dts/qcom/apq8098-v2.1-mediabox.dts
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./apq8098-v2.1-mediabox.dts	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/apq8098-v2.1-mediabox.dts	2019-10-29 09:26:22.905195956 +0100
@@ -0,0 +1,222 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "apq8098-v2.1.dtsi"
+#include "msm8998-cdp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. APQ 8098 V2.1 mediabox";
+	compatible = "qcom,apq8098-cdp", "qcom,apq8098", "qcom,cdp";
+	qcom,board-id = <8 1>;
+};
+
+&spi_10 {
+	status = "disabled";
+};
+
+&pcie0 {
+	/delete-property/ qcom,boot-option;
+};
+
+&msm_ath10k_wlan {
+	status = "ok";
+};
+
+&sde_hdmi {
+	qcom,display-type = "primary";
+};
+
+&slim_aud {
+	tasha_codec {
+		wsa_spkr_sd1: msm_cdc_pinctrll {
+		      compatible = "qcom,msm-cdc-pinctrl";
+		      pinctrl-names = "aud_active", "aud_sleep";
+		      pinctrl-0 = <&spkr_1_sd_active_mediabox>;
+		      pinctrl-1 = <&spkr_1_sd_sleep_mediabox>;
+		};
+
+		wsa_spkr_sd2: msm_cdc_pinctrlr {
+		      compatible = "qcom,msm-cdc-pinctrl";
+		      pinctrl-names = "aud_active", "aud_sleep";
+		      pinctrl-0 = <&spkr_2_sd_active_mediabox>;
+		      pinctrl-1 = <&spkr_2_sd_sleep_mediabox>;
+		};
+	};
+};
+
+&sdhc_2 {
+	vdd-supply = <&pm8998_l21>;
+	qcom,vdd-voltage-level = <2950000 2960000>;
+	qcom,vdd-current-level = <200 800000>;
+
+	vdd-io-supply = <&pm8998_l13>;
+	qcom,vdd-io-voltage-level = <1808000 2960000>;
+	qcom,vdd-io-current-level = <200 22000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on
+			&sdc2_cd_on_mediabox>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off
+			&sdc2_cd_off_mediabox>;
+
+	qcom,clk-rates = <400000 20000000 25000000
+				50000000 100000000 200000000>;
+	qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+
+	cd-gpios = <&tlmm 86 0x1>;
+
+	status = "ok";
+};
+
+&tspp {
+	qcom,lpass-timer-tts = <1>;
+};
+
+&snd_9335 {
+	qcom,msm-mi2s-master = <1>, <1>, <1>, <0>;
+	qcom,msm-mbhc-hphl-swh = <1>;
+};
+
+&wcd_usbc_analog_en1_gpio {
+	status = "disabled";
+};
+
+&wcd_usbc_analog_en2n_gpio {
+	status = "disabled";
+};
+
+&pcie0 {
+	qcom,boot-option = <0x0>;
+};
+
+&soc {
+	qcom,msm-dai-mi2s {
+		dai_mi2s3: qcom,msm-dai-q6-mi2s-quat {
+			/* SD0 (1 << 0) | SD1 (1 << 1) | SD2 (1 << 2) */
+			qcom,msm-mi2s-rx-lines = <0>;
+			qcom,msm-mi2s-tx-lines = <15>; /* SD3 (1 << 3) */
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&quat_mi2s_active &quat_mi2s_sd0_active
+					&quat_mi2s_sd1_active
+					&quat_mi2s_sd2_active
+					&quat_mi2s_sd3_active>;
+			pinctrl-1 = <&quat_mi2s_sleep &quat_mi2s_sd0_sleep
+					&quat_mi2s_sd1_sleep
+					&quat_mi2s_sd2_sleep
+					&quat_mi2s_sd3_sleep>;
+		};
+	};
+
+	ir: ir-receiver {
+		compatible = "gpio-ir-receiver";
+		gpios = <&tlmm 120 1>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&ir_int>;
+		linux,rc-map-name = "rc-rc6-freebox";
+	};
+};
+
+&tlmm {
+	ir_int: ir_int {
+		mux {
+			pins = "gpio120";
+			function = "gpio";
+		};
+		config {
+			pins = "gpio120";
+			drive-strength = <6>;
+			bias-pull-up;
+		};
+	};
+
+	spkr_1_sd_mediabox {
+		spkr_1_sd_sleep_mediabox: spkr_1_sd_sleep_mediabox {
+			mux {
+				pins = "gpio85";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio85";
+				drive-strength = <2>;   /* 2 mA */
+				bias-pull-down;
+				input-enable;
+			};
+		};
+		spkr_1_sd_active_mediabox: spkr_1_sd_active_mediabox {
+			mux {
+				pins = "gpio85";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio85";
+				drive-strength = <8>;   /* 8 mA */
+				bias-disable;
+				output-high;
+			};
+		};
+	};
+
+	spkr_2_sd_mediabox_mediabox {
+		spkr_2_sd_sleep_mediabox: spkr_2_sd_sleep_mediabox {
+			mux {
+				pins = "gpio112";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio112";
+				drive-strength = <2>;   /* 2 mA */
+				bias-pull-down;
+				input-enable;
+			};
+		};
+		spkr_2_sd_active_mediabox: spkr_2_sd_active_mediabox {
+			mux {
+				pins = "gpio112";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio112";
+				drive-strength = <8>;   /* 8 mA */
+				bias-disable;
+				output-high;
+			};
+		};
+	};
+
+	sdc2_cd_on_mediabox: sdc2_cd_on_mediabox {
+		mux {
+			pins = "gpio86";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio86";
+			bias-pull-up;           /* pull up */
+			drive-strength = <2>;   /* 2 MA */
+		};
+	};
+
+	sdc2_cd_off_mediabox: sdc2_cd_off_mediabox {
+		mux {
+			pins = "gpio86";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio86";
+			bias-pull-up;           /* pull up */
+			drive-strength = <2>;   /* 2 MA */
+		};
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./Makefile linux-4.4.115-fbx/arch/arm/boot/dts/qcom/Makefile
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/Makefile	2019-10-29 09:26:22.901195917 +0100
@@ -0,0 +1,263 @@
+dtb-$(CONFIG_ARCH_MSM8996) += msm8996-v2-pmi8994-cdp.dtb \
+	msm8996-v2-pmi8994-mtp.dtb \
+	msm8996-v2-pmi8994-pmk8001-cdp.dtb \
+	msm8996-v2-pmi8994-pmk8001-mtp.dtb \
+	msm8996-v2-pmi8994-pm8004-cdp.dtb \
+	msm8996-v2-pmi8994-pm8004-mtp.dtb \
+	msm8996-v2-pmi8994-pm8004-pmk8001-cdp.dtb \
+	msm8996-v2-pmi8994-pm8004-pmk8001-mtp.dtb \
+	msm8996-v2-fluid.dtb \
+	msm8996-v2-liquid.dtb \
+	msm8996-v2-dtp.dtb \
+	msm8996-v3-auto-cdp.dtb \
+	msm8996-v3-auto-adp.dtb \
+	msm8996-v3-pmi8994-cdp.dtb \
+	msm8996-v3-pmi8994-mtp.dtb \
+	msm8996-v3-pmi8994-pmk8001-cdp.dtb \
+	msm8996-v3-pmi8994-pmk8001-mtp.dtb \
+	msm8996-v3-pmi8994-pm8004-cdp.dtb \
+	msm8996-v3-pmi8994-pm8004-mtp.dtb \
+	msm8996-v3-pmi8994-pm8004-pmk8001-cdp.dtb \
+	msm8996-v3-pmi8994-pm8004-pmk8001-mtp.dtb \
+	msm8996-v3-pmi8996-cdp.dtb \
+	msm8996-v3-pmi8996-mtp.dtb \
+	msm8996-v3-pmi8996-pmk8001-cdp.dtb \
+	msm8996-v3-pmi8996-pmk8001-mtp.dtb \
+	msm8996-v3-fluid.dtb \
+	msm8996-v3-liquid.dtb \
+	msm8996-v3-dtp.dtb \
+	msm8996-v3-pm8004-mmxf-adp.dtb \
+	msm8996-v3-pm8004-agave-adp.dtb \
+	msm8996-v3-pm8004-agave-adp-lite.dtb \
+	msm8996pro-auto-adp.dtb \
+	msm8996pro-auto-adp-lite.dtb \
+	msm8996pro-auto-cdp.dtb \
+	msm8996pro-auto-cv2x.dtb \
+	msm8996pro-pmi8994-cdp.dtb \
+	msm8996pro-pmi8994-mtp.dtb \
+	msm8996pro-pmi8994-pmk8001-cdp.dtb \
+	msm8996pro-pmi8994-pmk8001-mtp.dtb \
+	msm8996pro-pmi8994-pm8004-cdp.dtb \
+	msm8996pro-pmi8994-pm8004-mtp.dtb \
+	msm8996pro-pmi8994-pm8004-pmk8001-cdp.dtb \
+	msm8996pro-pmi8994-pm8004-pmk8001-mtp.dtb \
+	msm8996pro-pmi8996-cdp.dtb \
+	msm8996pro-pmi8996-mtp.dtb \
+	msm8996pro-pmi8996-pmk8001-cdp.dtb \
+	msm8996pro-pmi8996-pmk8001-mtp.dtb \
+	msm8996pro-v1.1-auto-cdp.dtb \
+	msm8996pro-v1.1-pmi8994-cdp.dtb \
+	msm8996pro-v1.1-pmi8994-mtp.dtb \
+	msm8996pro-v1.1-pmi8994-pmk8001-cdp.dtb \
+	msm8996pro-v1.1-pmi8994-pmk8001-mtp.dtb \
+	msm8996pro-v1.1-pmi8994-pm8004-cdp.dtb \
+	msm8996pro-v1.1-pmi8994-pm8004-mtp.dtb \
+	msm8996pro-v1.1-pmi8994-pm8004-pmk8001-cdp.dtb \
+	msm8996pro-v1.1-pmi8994-pm8004-pmk8001-mtp.dtb \
+	msm8996pro-v1.1-pmi8996-cdp.dtb \
+	msm8996pro-v1.1-pmi8996-mtp.dtb \
+	msm8996pro-v1.1-pmi8996-pmk8001-cdp.dtb \
+	msm8996pro-v1.1-pmi8996-pmk8001-mtp.dtb \
+	apq8096pro-auto-cdp.dtb \
+	apq8096pro-v1.1-auto-adp.dtb \
+	apq8096pro-v1.1-auto-adp-lite.dtb \
+	apq8096pro-liquid.dtb \
+	apq8096pro-v1.1-auto-cdp.dtb \
+	msm8996-v3.0-pmi8994-cdp.dtb \
+	msm8996-v3.0-pmi8994-mtp.dtb \
+	msm8996-v3.0-pmi8994-pm8004-cdp.dtb \
+	msm8996-v3.0-pmi8994-pm8004-mtp.dtb \
+	msm8996-v3.0-pmi8994-pm8004-pmk8001-cdp.dtb \
+	msm8996-v3.0-pmi8994-pmk8001-cdp.dtb \
+	msm8996-v3.0-pmi8996-cdp.dtb \
+	msm8996-v3.0-pmi8996-mtp.dtb \
+	msm8996-v3.0-fluid.dtb \
+	msm8996-v3.0-liquid.dtb \
+	msm8996-v3.0-dtp.dtb \
+	apq8096-v2-pmi8994-cdp.dtb \
+	apq8096-v2-pmi8994-mtp.dtb \
+	apq8096-v2-pmi8994-pmk8001-cdp.dtb \
+	apq8096-v2-pmi8994-pm8004-cdp.dtb \
+	apq8096-v2-pmi8994-pm8004-pmk8001-cdp.dtb \
+	apq8096-v2-liquid.dtb \
+	apq8096-v2-dragonboard.dtb \
+	apq8096-v2-auto-dragonboard.dtb \
+	apq8096-v3-pmi8994-cdp.dtb \
+	apq8096-v3-pmi8994-mtp.dtb \
+	apq8096-v3-pmi8994-pmk8001-cdp.dtb \
+	apq8096-v3-pmi8994-pm8004-cdp.dtb \
+	apq8096-v3-pmi8994-pm8004-pmk8001-cdp.dtb \
+	apq8096-v3-pmi8996-cdp.dtb \
+	apq8096-v3-pmi8996-mtp.dtb \
+	apq8096-v3-liquid.dtb \
+	apq8096-v3-dragonboard.dtb \
+	apq8096-v3-sbc.dtb \
+	apq8096-v3-auto-dragonboard.dtb \
+	apq8096-v3-auto-adp.dtb \
+	apq8096-v3-auto-cdp.dtb \
+	apq8096-v3.0-pmi8994-cdp.dtb \
+	apq8096-v3.0-pmi8994-mtp.dtb \
+	apq8096-v3.0-pmi8994-pm8004-cdp.dtb \
+	apq8096-v3.0-pmi8994-pm8004-pmk8001-cdp.dtb \
+	apq8096-v3.0-pmi8994-pmk8001-cdp.dtb \
+	apq8096-v3.0-pmi8996-cdp.dtb \
+	apq8096-v3.0-pmi8996-mtp.dtb \
+	apq8096-v3.0-liquid.dtb \
+	apq8096-v3.0-dragonboard.dtb \
+	apq8096-v3-pmi8994-mdm9x55-i2s-cdp.dtb \
+	apq8096-v3-pmi8994-pm8004-mdm9x55-i2s-cdp.dtb \
+	apq8096-v3-pmi8994-pm8004-pmk8001-mdm9x55-i2s-cdp.dtb \
+	apq8096-v3-pmi8994-pmk8001-mdm9x55-i2s-cdp.dtb \
+	apq8096-v3-pmi8996-mdm9x55-i2s-cdp.dtb \
+	apq8096-v3-pmi8994-mdm9x55-i2s-mtp.dtb \
+	apq8096-v3-pmi8994-mdm9x55-slimbus-mtp.dtb \
+	apq8096-v3-pmi8996-mdm9x55-i2s-mtp.dtb \
+	apq8096-v3-pmi8996-mdm9x55-slimbus-mtp.dtb \
+	apq8096-v3-pmi8996-dragonboard.dtb \
+	msm8996-auto-mizar.dtb
+
+dtb-$(CONFIG_MSM_GVM_QUIN) += vplatform-lfv-msm8996-telematics.dtb \
+	vplatform-lfv-msm8996-ivi.dtb \
+	vplatform-lfv-msm8996-baseline.dtb \
+	vplatform-lfv-msm8996-ivi-la.dtb \
+	vplatform-lfv-msm8996-ivi-lv-mt.dtb
+
+dtb-$(CONFIG_ARCH_MSM8998) += \
+	apq8098-v2.1-mediabox.dtb \
+	apq8098-freebox-proto.dtb \
+	apq8098-freebox-oarfish.dtb \
+	apq8098-freebox-batfish.dtb
+
+dtb-$(CONFIG_ARCH_MSMHAMSTER) += msmhamster-rumi.dtb
+
+dtb-$(CONFIG_ARCH_SDM660) += sdm660-sim.dtb \
+	sdm660-internal-codec-cdp.dtb \
+	sdm660-internal-codec-mtp.dtb \
+	sdm660-internal-codec-rcm.dtb \
+	sdm660-cdp.dtb \
+	sdm660-mtp.dtb \
+	sdm660-qrd.dtb \
+	sdm660-rcm.dtb \
+	sdm660-rumi.dtb \
+	sdm660-pm660a-cdp.dtb \
+	sdm660-pm660a-mtp.dtb \
+	sdm660-pm660a-qrd.dtb \
+	sdm660-pm660a-rcm.dtb \
+	sdm660-pm660a-rumi.dtb \
+	sdm660-internal-codec-pm660a-cdp.dtb \
+	sdm660-internal-codec-pm660a-mtp.dtb \
+	sdm660-internal-codec-pm660a-rcm.dtb \
+	sdm660-pm660a-sim.dtb \
+	sda660-cdp.dtb \
+	sda660-mtp.dtb \
+	sda660-rcm.dtb \
+	sda660-pm660a-cdp.dtb \
+	sda660-pm660a-mtp.dtb \
+	sda660-pm660a-rcm.dtb \
+	sda660-pm660a-qrd-hdk.dtb \
+	sdm660-headset-jacktype-no-cdp.dtb \
+	sdm660-headset-jacktype-no-rcm.dtb \
+	sdm660-pm660a-headset-jacktype-no-cdp.dtb \
+	sdm660-pm660a-headset-jacktype-no-rcm.dtb \
+	sdm660-usbc-audio-mtp.dtb \
+	sdm660-usbc-audio-rcm.dtb \
+	sdm660-fhd-cdp.dtb \
+	sdm660-pm660a-fhd-cdp.dtb \
+	sdm658-mtp.dtb \
+	sdm658-cdp.dtb \
+	sdm658-rcm.dtb \
+	sdm658-qrd.dtb \
+	sdm658-pm660a-mtp.dtb \
+	sdm658-pm660a-cdp.dtb \
+	sdm658-pm660a-rcm.dtb \
+	sdm658-pm660a-qrd.dtb \
+	sdm658-internal-codec-mtp.dtb \
+	sdm658-internal-codec-cdp.dtb \
+	sdm658-internal-codec-rcm.dtb \
+	sdm658-internal-codec-pm660a-mtp.dtb \
+	sdm658-internal-codec-pm660a-cdp.dtb \
+	sdm658-internal-codec-pm660a-rcm.dtb \
+	sda658-cdp.dtb \
+	sda658-mtp.dtb \
+	sda658-rcm.dtb \
+	sda658-pm660a-mtp.dtb \
+	sda658-pm660a-cdp.dtb \
+	sda658-pm660a-rcm.dtb \
+	sdm636-cdp.dtb \
+	sdm636-mtp.dtb \
+	sdm636-qrd.dtb \
+	sdm636-rcm.dtb \
+	sdm636-headset-jacktype-no-cdp.dtb \
+	sdm636-headset-jacktype-no-rcm.dtb \
+	sdm636-internal-codec-cdp.dtb \
+	sdm636-internal-codec-mtp.dtb \
+	sdm636-internal-codec-pm660a-cdp.dtb \
+	sdm636-internal-codec-pm660a-mtp.dtb \
+	sdm636-internal-codec-pm660a-rcm.dtb \
+	sdm636-internal-codec-rcm.dtb \
+	sdm636-pm660a-headset-jacktype-no-cdp.dtb \
+	sdm636-pm660a-headset-jacktype-no-rcm.dtb \
+	sdm636-pm660a-cdp.dtb \
+	sdm636-pm660a-mtp.dtb \
+	sdm636-pm660a-qrd.dtb \
+	sdm636-pm660a-rcm.dtb \
+	sdm636-usbc-audio-mtp.dtb \
+	sdm636-usbc-audio-rcm.dtb \
+	sda636-cdp.dtb \
+	sda636-mtp.dtb \
+	sda636-rcm.dtb \
+	sda636-pm660a-cdp.dtb \
+	sda636-pm660a-mtp.dtb \
+	sda636-pm660a-qrd-hdk.dtb \
+	sda636-pm660a-rcm.dtb
+
+dtb-$(CONFIG_ARCH_SDM630) += sdm630-rumi.dtb \
+	sdm630-pm660a-rumi.dtb \
+	sdm630-mtp.dtb \
+	sdm630-usbc-audio-mtp.dtb \
+	sdm630-usbc-audio-rcm.dtb \
+	sdm630-cdp.dtb \
+	sdm630-rcm.dtb \
+	sdm630-internal-codec-mtp.dtb \
+	sdm630-internal-codec-cdp.dtb \
+	sdm630-internal-codec-rcm.dtb \
+	sdm630-pm660a-cdp.dtb \
+	sdm630-pm660a-mtp.dtb \
+	sdm630-pm660a-rcm.dtb \
+	sdm630-pm660a-qrd.dtb \
+	sdm630-internal-codec-pm660a-cdp.dtb \
+	sdm630-internal-codec-pm660a-mtp.dtb \
+	sdm630-internal-codec-pm660a-rcm.dtb \
+	sda630-mtp.dtb \
+	sda630-cdp.dtb \
+	sda630-rcm.dtb \
+	sda630-pm660a-mtp.dtb \
+	sda630-pm660a-cdp.dtb \
+	sda630-pm660a-rcm.dtb \
+	sda630-pm660a-qrd-hdk.dtb \
+	sdm630-headset-jacktype-no-cdp.dtb \
+	sdm630-headset-jacktype-no-rcm.dtb \
+	sdm630-pm660a-headset-jacktype-no-cdp.dtb \
+	sdm630-pm660a-headset-jacktype-no-rcm.dtb
+
+ifeq ($(CONFIG_ARM64),y)
+always          := $(dtb-y) qcom_dtbs
+always          += $(dtbo-y)
+subdir-y        := $(dts-dirs)
+else
+targets += dtbs
+targets += $(addprefix ../, $(dtb-y))
+
+$(obj)/../%.dtb: $(src)/%.dts FORCE
+	$(call if_changed_dep,dtc)
+
+dtbs: $(addprefix $(obj)/../,$(dtb-y))
+endif
+clean-files := *.dtbo *.dtb qcom_dtbs qcom_dtbs.cmpxz
+
+cmd_dtbs		= ./scripts/dtbs.sh $@ $^
+quiet_cmd_dtbs		= DTBS    $@
+
+DTB_ALIGN=32
+$(obj)/qcom_dtbs: $(addprefix $(obj)/,$(dtb-y))
+	$(call cmd,dtbs)
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-audio.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-audio.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-audio.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-audio.dtsi	2019-10-29 09:26:22.917196073 +0100
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&msm_audio_ion {
+	qcom,smmu-version = <2>;
+	iommus = <&lpass_q6_smmu 1>;
+};
+
+&soc {
+	qcom,avtimer@170f7000 {
+		compatible = "qcom,avtimer";
+		reg = <0x170f700c 0x4>,
+		      <0x170f7010 0x4>;
+		reg-names = "avtimer_lsb_addr", "avtimer_msb_addr";
+		qcom,clk-div = <27>;
+	};
+
+	clock_audio: audio_ext_clk {
+		status = "ok";
+		compatible = "qcom,audio-ref-clk";
+		qcom,audio-ref-clk-gpio = <&pm8998_gpios 13 0>;
+		clock-names = "osr_clk";
+		clocks = <&clock_gcc clk_div_clk1>;
+		qcom,node_has_rpm_clock;
+		#clock-cells = <1>;
+		pinctrl-names = "sleep", "active";
+		pinctrl-0 = <&spkr_i2s_clk_sleep>;
+		pinctrl-1 = <&spkr_i2s_clk_active>;
+	};
+
+	clock_audio_lnbb: audio_ext_clk_lnbb {
+		status = "ok";
+		compatible = "qcom,audio-ref-clk";
+		clock-names = "osr_clk";
+		clocks = <&clock_gcc clk_ln_bb_clk2>;
+		qcom,node_has_rpm_clock;
+		#clock-cells = <1>;
+	};
+};
+
+&slim_aud {
+	msm_dai_slim {
+		compatible = "qcom,msm-dai-slim";
+		elemental-addr = [ff ff ff fe 17 02];
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-audio-freebox.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-audio-freebox.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-audio-freebox.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-audio-freebox.dtsi	2019-01-22 16:16:21.191225470 +0100
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2018, Freebox SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	snd_freebox: sound-freebox {
+		compatible = "qcom,msm8998-asoc-snd-freebox";
+		qcom,model = "msm8998-freebox-snd-card";
+		qcom,ext-disp-audio-rx;
+		qcom,mi2s-audio-intf;
+		qcom,msm-mi2s-master = <1>, <1>, <0>, <0>;
+		qcom,msm-mi2s-ext-mclk = <1>, <0>, <0>, <0>;
+
+		reg = <0x1711a000 0x4>,
+		      <0x1711b000 0x4>,
+		      <0x1711c000 0x4>,
+		      <0x1711d000 0x4>;
+		reg-names = "lpaif_pri_mode_muxsel",
+			    "lpaif_sec_mode_muxsel",
+			    "lpaif_tert_mode_muxsel",
+			    "lpaif_quat_mode_muxsel";
+
+		asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
+				<&loopback>, <&compress>, <&hostless>,
+				<&afe>, <&lsm>, <&routing>, <&compr>,
+				<&pcm_noirq>, <&trans_loopback>;
+		asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
+				"msm-pcm-dsp.2", "msm-voip-dsp",
+				"msm-pcm-voice", "msm-pcm-loopback",
+				"msm-compress-dsp", "msm-pcm-hostless",
+				"msm-pcm-afe", "msm-lsm-client",
+				"msm-pcm-routing", "msm-compr-dsp",
+				"msm-pcm-dsp-noirq", "msm-transcode-loopback";
+		asoc-cpu = <&dai_hdmi>, <&dai_dp>,
+				<&dai_mi2s0>, <&dai_mi2s1>,
+				<&dai_mi2s2>, <&dai_mi2s3>,
+				<&dai_pri_auxpcm>, <&dai_sec_auxpcm>,
+				<&dai_tert_auxpcm>, <&dai_quat_auxpcm>,
+				<&sb_0_rx>, <&sb_0_tx>, <&sb_1_rx>, <&sb_1_tx>,
+				<&sb_2_rx>, <&sb_2_tx>, <&sb_3_rx>, <&sb_3_tx>,
+				<&sb_4_rx>, <&sb_4_tx>, <&sb_5_tx>,
+				<&afe_pcm_rx>, <&afe_pcm_tx>, <&afe_proxy_rx>,
+				<&afe_proxy_tx>, <&incall_record_rx>,
+				<&incall_record_tx>, <&incall_music_rx>,
+				<&incall_music_2_rx>, <&sb_5_rx>, <&sb_6_rx>,
+				<&sb_7_rx>, <&sb_7_tx>, <&sb_8_tx>,
+				<&usb_audio_rx>, <&usb_audio_tx>,
+				<&dai_pri_tdm_rx_0>, <&dai_pri_tdm_tx_0>,
+				<&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>,
+				<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>,
+				<&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>;
+		asoc-cpu-names = "msm-dai-q6-hdmi.8", "msm-dai-q6-dp.24608",
+				"msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+				"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
+				"msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2",
+				"msm-dai-q6-auxpcm.3", "msm-dai-q6-auxpcm.4",
+				"msm-dai-q6-dev.16384", "msm-dai-q6-dev.16385",
+				"msm-dai-q6-dev.16386", "msm-dai-q6-dev.16387",
+				"msm-dai-q6-dev.16388", "msm-dai-q6-dev.16389",
+				"msm-dai-q6-dev.16390", "msm-dai-q6-dev.16391",
+				"msm-dai-q6-dev.16392", "msm-dai-q6-dev.16393",
+				"msm-dai-q6-dev.16395", "msm-dai-q6-dev.224",
+				"msm-dai-q6-dev.225", "msm-dai-q6-dev.241",
+				"msm-dai-q6-dev.240", "msm-dai-q6-dev.32771",
+				"msm-dai-q6-dev.32772", "msm-dai-q6-dev.32773",
+				"msm-dai-q6-dev.32770", "msm-dai-q6-dev.16394",
+				"msm-dai-q6-dev.16396", "msm-dai-q6-dev.16398",
+				"msm-dai-q6-dev.16399", "msm-dai-q6-dev.16401",
+				"msm-dai-q6-dev.28672", "msm-dai-q6-dev.28673",
+				"msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36865",
+				"msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36881",
+				"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897",
+				"msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913";
+		asoc-codec = <&stub_codec>, <&ext_disp_audio_codec>;
+		asoc-codec-names = "msm-stub-codec.1",
+				   "msm-ext-disp-audio-codec-rx";
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-audio-wcd.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-audio-wcd.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-audio-wcd.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-audio-wcd.dtsi	2019-01-22 16:16:21.191225470 +0100
@@ -0,0 +1,632 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8998-wsa881x.dtsi"
+
+&spi_10 {
+	status = "ok";
+};
+
+&soc {
+	snd_9335: sound-9335 {
+		compatible = "qcom,msm8998-asoc-snd-tasha";
+		qcom,model = "msm8998-tasha-snd-card";
+		qcom,ext-disp-audio-rx;
+		qcom,wcn-btfm;
+		qcom,mi2s-audio-intf;
+		qcom,auxpcm-audio-intf;
+		qcom,msm-mi2s-master = <1>, <1>, <1>, <1>;
+
+		reg = <0x1711a000 0x4>,
+		      <0x1711b000 0x4>,
+		      <0x1711c000 0x4>,
+		      <0x1711d000 0x4>;
+		reg-names = "lpaif_pri_mode_muxsel",
+			    "lpaif_sec_mode_muxsel",
+			    "lpaif_tert_mode_muxsel",
+			    "lpaif_quat_mode_muxsel";
+
+		qcom,audio-routing =
+			"AIF4 VI", "MCLK",
+			"RX_BIAS", "MCLK",
+			"MADINPUT", "MCLK",
+			"hifi amp", "LINEOUT1",
+			"hifi amp", "LINEOUT2",
+			"AMIC2", "MIC BIAS2",
+			"MIC BIAS2", "Headset Mic",
+			"AMIC3", "MIC BIAS2",
+			"MIC BIAS2", "ANCRight Headset Mic",
+			"AMIC4", "MIC BIAS2",
+			"MIC BIAS2", "ANCLeft Headset Mic",
+			"AMIC5", "MIC BIAS3",
+			"MIC BIAS3", "Handset Mic",
+			"AMIC6", "MIC BIAS4",
+			"MIC BIAS4", "Analog Mic6",
+			"DMIC0", "MIC BIAS1",
+			"MIC BIAS1", "Digital Mic0",
+			"DMIC1", "MIC BIAS1",
+			"MIC BIAS1", "Digital Mic1",
+			"DMIC2", "MIC BIAS3",
+			"MIC BIAS3", "Digital Mic2",
+			"DMIC3", "MIC BIAS3",
+			"MIC BIAS3", "Digital Mic3",
+			"DMIC4", "MIC BIAS4",
+			"MIC BIAS4", "Digital Mic4",
+			"DMIC5", "MIC BIAS4",
+			"MIC BIAS4", "Digital Mic5",
+			"SpkrLeft IN", "SPK1 OUT",
+			"SpkrRight IN", "SPK2 OUT";
+
+		qcom,msm-mbhc-hphl-swh = <0>;
+		qcom,msm-mbhc-gnd-swh = <0>;
+		qcom,us-euro-gpios = <&wcd_us_euro_gpio>;
+		qcom,hph-en0-gpio = <&hph_en0_gpio>;
+		qcom,hph-en1-gpio = <&hph_en1_gpio>;
+		qcom,tasha-mclk-clk-freq = <9600000>;
+		asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
+				<&loopback>, <&compress>, <&hostless>,
+				<&afe>, <&lsm>, <&routing>, <&cpe>, <&compr>,
+				<&pcm_noirq>, <&cpe3>, <&trans_loopback>;
+		asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
+				"msm-pcm-dsp.2", "msm-voip-dsp",
+				"msm-pcm-voice", "msm-pcm-loopback",
+				"msm-compress-dsp", "msm-pcm-hostless",
+				"msm-pcm-afe", "msm-lsm-client",
+				"msm-pcm-routing", "msm-cpe-lsm",
+				"msm-compr-dsp", "msm-pcm-dsp-noirq",
+				"msm-cpe-lsm.3", "msm-transcode-loopback";
+		asoc-cpu = <&dai_hdmi>, <&dai_dp>,
+				<&dai_mi2s0>, <&dai_mi2s1>,
+				<&dai_mi2s2>, <&dai_mi2s3>,
+				<&dai_pri_auxpcm>, <&dai_sec_auxpcm>,
+				<&dai_tert_auxpcm>, <&dai_quat_auxpcm>,
+				<&sb_0_rx>, <&sb_0_tx>, <&sb_1_rx>, <&sb_1_tx>,
+				<&sb_2_rx>, <&sb_2_tx>, <&sb_3_rx>, <&sb_3_tx>,
+				<&sb_4_rx>, <&sb_4_tx>, <&sb_5_tx>,
+				<&afe_pcm_rx>, <&afe_pcm_tx>, <&afe_proxy_rx>,
+				<&afe_proxy_tx>, <&incall_record_rx>,
+				<&incall_record_tx>, <&incall_music_rx>,
+				<&incall_music_2_rx>, <&sb_5_rx>, <&sb_6_rx>,
+				<&sb_7_rx>, <&sb_7_tx>, <&sb_8_tx>,
+				<&usb_audio_rx>, <&usb_audio_tx>,
+				<&dai_pri_tdm_rx_0>, <&dai_pri_tdm_tx_0>,
+				<&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>,
+				<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>,
+				<&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>;
+		asoc-cpu-names = "msm-dai-q6-hdmi.8", "msm-dai-q6-dp.24608",
+				"msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+				"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
+				"msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2",
+				"msm-dai-q6-auxpcm.3", "msm-dai-q6-auxpcm.4",
+				"msm-dai-q6-dev.16384", "msm-dai-q6-dev.16385",
+				"msm-dai-q6-dev.16386", "msm-dai-q6-dev.16387",
+				"msm-dai-q6-dev.16388", "msm-dai-q6-dev.16389",
+				"msm-dai-q6-dev.16390", "msm-dai-q6-dev.16391",
+				"msm-dai-q6-dev.16392", "msm-dai-q6-dev.16393",
+				"msm-dai-q6-dev.16395", "msm-dai-q6-dev.224",
+				"msm-dai-q6-dev.225", "msm-dai-q6-dev.241",
+				"msm-dai-q6-dev.240", "msm-dai-q6-dev.32771",
+				"msm-dai-q6-dev.32772", "msm-dai-q6-dev.32773",
+				"msm-dai-q6-dev.32770", "msm-dai-q6-dev.16394",
+				"msm-dai-q6-dev.16396", "msm-dai-q6-dev.16398",
+				"msm-dai-q6-dev.16399", "msm-dai-q6-dev.16401",
+				"msm-dai-q6-dev.28672", "msm-dai-q6-dev.28673",
+				"msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36865",
+				"msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36881",
+				"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897",
+				"msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913";
+		asoc-codec = <&stub_codec>, <&ext_disp_audio_codec>;
+		asoc-codec-names = "msm-stub-codec.1",
+				   "msm-ext-disp-audio-codec-rx";
+		qcom,wsa-max-devs = <2>;
+		qcom,wsa-devs = <&wsa881x_211>, <&wsa881x_212>,
+				<&wsa881x_213>, <&wsa881x_214>;
+		qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight",
+					  "SpkrLeft", "SpkrRight";
+
+		hph_en0_gpio: msm_cdc_pinctrl@67 {
+			compatible = "qcom,msm-cdc-pinctrl";
+			pinctrl-names = "aud_active", "aud_sleep";
+			pinctrl-0 = <&hph_en0_active>;
+			pinctrl-1 = <&hph_en0_idle>;
+		};
+
+		hph_en1_gpio: msm_cdc_pinctrl@68 {
+			compatible = "qcom,msm-cdc-pinctrl";
+			pinctrl-names = "aud_active", "aud_sleep";
+			pinctrl-0 = <&hph_en1_active>;
+			pinctrl-1 = <&hph_en1_idle>;
+		};
+	};
+
+	snd_934x: sound-tavil {
+		compatible = "qcom,msm8998-asoc-snd-tavil";
+		qcom,model = "msm8998-tavil-snd-card";
+		qcom,ext-disp-audio-rx;
+		qcom,wcn-btfm;
+		qcom,mi2s-audio-intf;
+		qcom,auxpcm-audio-intf;
+		qcom,msm-mi2s-master = <1>, <1>, <1>, <1>;
+
+		reg = <0x1711a000 0x4>,
+		      <0x1711b000 0x4>,
+		      <0x1711c000 0x4>,
+		      <0x1711d000 0x4>;
+		reg-names = "lpaif_pri_mode_muxsel",
+			    "lpaif_sec_mode_muxsel",
+			    "lpaif_tert_mode_muxsel",
+			    "lpaif_quat_mode_muxsel";
+
+		qcom,audio-routing =
+			"AIF4 VI", "MCLK",
+			"RX_BIAS", "MCLK",
+			"MADINPUT", "MCLK",
+			"hifi amp", "LINEOUT1",
+			"hifi amp", "LINEOUT2",
+			"AMIC2", "MIC BIAS2",
+			"MIC BIAS2", "Headset Mic",
+			"AMIC3", "MIC BIAS2",
+			"MIC BIAS2", "ANCRight Headset Mic",
+			"AMIC4", "MIC BIAS2",
+			"MIC BIAS2", "ANCLeft Headset Mic",
+			"AMIC5", "MIC BIAS3",
+			"MIC BIAS3", "Handset Mic",
+			"DMIC0", "MIC BIAS1",
+			"MIC BIAS1", "Digital Mic0",
+			"DMIC1", "MIC BIAS1",
+			"MIC BIAS1", "Digital Mic1",
+			"DMIC2", "MIC BIAS3",
+			"MIC BIAS3", "Digital Mic2",
+			"DMIC3", "MIC BIAS3",
+			"MIC BIAS3", "Digital Mic3",
+			"DMIC4", "MIC BIAS4",
+			"MIC BIAS4", "Digital Mic4",
+			"DMIC5", "MIC BIAS4",
+			"MIC BIAS4", "Digital Mic5",
+			"SpkrLeft IN", "SPK1 OUT",
+			"SpkrRight IN", "SPK2 OUT";
+
+		qcom,msm-mbhc-hphl-swh = <0>;
+		qcom,msm-mbhc-gnd-swh = <0>;
+		qcom,us-euro-gpios = <&tavil_us_euro_sw>;
+		qcom,hph-en0-gpio = <&tavil_hph_en0>;
+		qcom,hph-en1-gpio = <&tavil_hph_en1>;
+		qcom,tavil-mclk-clk-freq = <9600000>;
+
+		qcom,usbc-analog-en1_gpio = <&wcd_usbc_analog_en1_gpio>;
+		qcom,usbc-analog-en2_n_gpio = <&wcd_usbc_analog_en2n_gpio>;
+
+		asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
+				<&loopback>, <&compress>, <&hostless>,
+				<&afe>, <&lsm>, <&routing>, <&cpe>, <&compr>,
+				<&pcm_noirq>, <&trans_loopback>;
+		asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
+				"msm-pcm-dsp.2", "msm-voip-dsp",
+				"msm-pcm-voice", "msm-pcm-loopback",
+				"msm-compress-dsp", "msm-pcm-hostless",
+				"msm-pcm-afe", "msm-lsm-client",
+				"msm-pcm-routing", "msm-cpe-lsm",
+				"msm-compr-dsp", "msm-pcm-dsp-noirq",
+				"msm-transcode-loopback";
+		asoc-cpu = <&dai_hdmi>, <&dai_dp>,
+				<&dai_mi2s0>, <&dai_mi2s1>,
+				<&dai_mi2s2>, <&dai_mi2s3>,
+				<&dai_pri_auxpcm>, <&dai_sec_auxpcm>,
+				<&dai_tert_auxpcm>, <&dai_quat_auxpcm>,
+				<&sb_0_rx>, <&sb_0_tx>, <&sb_1_rx>, <&sb_1_tx>,
+				<&sb_2_rx>, <&sb_2_tx>, <&sb_3_rx>, <&sb_3_tx>,
+				<&sb_4_rx>, <&sb_4_tx>, <&sb_5_tx>,
+				<&afe_pcm_rx>, <&afe_pcm_tx>, <&afe_proxy_rx>,
+				<&afe_proxy_tx>, <&incall_record_rx>,
+				<&incall_record_tx>, <&incall_music_rx>,
+				<&incall_music_2_rx>, <&sb_5_rx>, <&sb_6_rx>,
+				<&sb_7_rx>, <&sb_7_tx>, <&sb_8_tx>,
+				<&usb_audio_rx>, <&usb_audio_tx>,
+				<&dai_pri_tdm_rx_0>, <&dai_pri_tdm_tx_0>,
+				<&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>,
+				<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>,
+				<&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>;
+		asoc-cpu-names = "msm-dai-q6-hdmi.8",  "msm-dai-q6-dp.24608",
+				"msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+				"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
+				"msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2",
+				"msm-dai-q6-auxpcm.3", "msm-dai-q6-auxpcm.4",
+				"msm-dai-q6-dev.16384", "msm-dai-q6-dev.16385",
+				"msm-dai-q6-dev.16386", "msm-dai-q6-dev.16387",
+				"msm-dai-q6-dev.16388", "msm-dai-q6-dev.16389",
+				"msm-dai-q6-dev.16390", "msm-dai-q6-dev.16391",
+				"msm-dai-q6-dev.16392", "msm-dai-q6-dev.16393",
+				"msm-dai-q6-dev.16395", "msm-dai-q6-dev.224",
+				"msm-dai-q6-dev.225", "msm-dai-q6-dev.241",
+				"msm-dai-q6-dev.240", "msm-dai-q6-dev.32771",
+				"msm-dai-q6-dev.32772", "msm-dai-q6-dev.32773",
+				"msm-dai-q6-dev.32770", "msm-dai-q6-dev.16394",
+				"msm-dai-q6-dev.16396", "msm-dai-q6-dev.16398",
+				"msm-dai-q6-dev.16399", "msm-dai-q6-dev.16401",
+				"msm-dai-q6-dev.28672", "msm-dai-q6-dev.28673",
+				"msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36865",
+				"msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36881",
+				"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897",
+				"msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913";
+		asoc-codec = <&stub_codec>, <&ext_disp_audio_codec>;
+		asoc-codec-names = "msm-stub-codec.1",
+				   "msm-ext-disp-audio-codec-rx";
+		qcom,wsa-max-devs = <2>;
+		qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0212>,
+				<&wsa881x_0213>, <&wsa881x_0214>;
+		qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight",
+					  "SpkrLeft", "SpkrRight";
+	};
+
+	cpe: qcom,msm-cpe-lsm {
+		compatible = "qcom,msm-cpe-lsm";
+		qcom,msm-cpe-lsm-id = <1>;
+	};
+
+	cpe3: qcom,msm-cpe-lsm@3 {
+		compatible = "qcom,msm-cpe-lsm";
+		qcom,msm-cpe-lsm-id = <3>;
+	};
+
+	qcom,wcd-dsp-mgr {
+		compatible = "qcom,wcd-dsp-mgr";
+		qcom,wdsp-components = <&wcd934x_cdc 0>,
+				       <&wcd_spi_0 1>,
+				       <&glink_spi_xprt_wdsp 2>;
+		qcom,img-filename = "cpe_9340";
+	};
+
+	wcd_us_euro_gpio: msm_cdc_pinctrl@75 {
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&wcd_gnd_mic_swap_active>;
+		pinctrl-1 = <&wcd_gnd_mic_swap_idle>;
+	};
+
+	wcd_usbc_analog_en1_gpio: msm_cdc_pinctrl@59 {
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&wcd_usbc_analog_en1_active>;
+		pinctrl-1 = <&wcd_usbc_analog_en1_idle>;
+	};
+
+	wcd_usbc_analog_en2n_gpio: msm_cdc_pinctrl@60 {
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&wcd_usbc_analog_en2n_active>;
+		pinctrl-1 = <&wcd_usbc_analog_en2n_idle>;
+	};
+
+	wcd9xxx_intc: wcd9xxx-irq {
+		status = "ok";
+		compatible = "qcom,wcd9xxx-irq";
+		interrupt-controller;
+		#interrupt-cells = <1>;
+		interrupt-parent = <&tlmm>;
+		qcom,gpio-connect = <&tlmm 54 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&wcd_intr_default>;
+	};
+
+	wcd_rst_gpio: msm_cdc_pinctrl@64 {
+		compatible = "qcom,msm-cdc-pinctrl";
+		qcom,cdc-rst-n-gpio = <&tlmm 64 0>;
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&cdc_reset_active>;
+		pinctrl-1 = <&cdc_reset_sleep>;
+	};
+
+	qocm,wcd-dsp-glink {
+		compatible = "qcom,wcd-dsp-glink";
+	};
+};
+
+&slim_aud {
+	tasha_codec {
+		compatible = "qcom,tasha-slim-pgd";
+		elemental-addr = [00 01 A0 01 17 02];
+
+		interrupt-parent = <&wcd9xxx_intc>;
+		interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
+			      17 18 19 20 21 22 23 24 25 26 27 28 29
+			      30>;
+
+		qcom,wcd-rst-gpio-node = <&wcd_rst_gpio>;
+
+		clock-names = "wcd_clk", "wcd_native_clk";
+		clocks = <&clock_audio clk_audio_pmi_clk>,
+			 <&clock_audio clk_audio_ap_clk2>;
+
+		cdc-vdd-buck-supply = <&pm8998_s4>;
+		qcom,cdc-vdd-buck-voltage = <1800000 1800000>;
+		qcom,cdc-vdd-buck-current = <650000>;
+
+		cdc-buck-sido-supply = <&pm8998_s4>;
+		qcom,cdc-buck-sido-voltage = <1800000 1800000>;
+		qcom,cdc-buck-sido-current = <250000>;
+
+		cdc-vdd-tx-h-supply = <&pm8998_s4>;
+		qcom,cdc-vdd-tx-h-voltage = <1800000 1800000>;
+		qcom,cdc-vdd-tx-h-current = <25000>;
+
+		cdc-vdd-rx-h-supply = <&pm8998_s4>;
+		qcom,cdc-vdd-rx-h-voltage = <1800000 1800000>;
+		qcom,cdc-vdd-rx-h-current = <25000>;
+
+		cdc-vddpx-1-supply = <&pm8998_s4>;
+		qcom,cdc-vddpx-1-voltage = <1800000 1800000>;
+		qcom,cdc-vddpx-1-current = <10000>;
+
+		qcom,cdc-static-supplies = "cdc-vdd-buck",
+					   "cdc-buck-sido",
+					   "cdc-vdd-tx-h",
+					   "cdc-vdd-rx-h",
+					   "cdc-vddpx-1";
+
+		qcom,cdc-micbias1-mv = <1800>;
+		qcom,cdc-micbias2-mv = <1800>;
+		qcom,cdc-micbias3-mv = <1800>;
+		qcom,cdc-micbias4-mv = <1800>;
+
+		qcom,cdc-mclk-clk-rate = <9600000>;
+		qcom,cdc-slim-ifd = "tasha-slim-ifd";
+		qcom,cdc-slim-ifd-elemental-addr = [00 00 A0 01 17 02];
+		qcom,cdc-dmic-sample-rate = <4800000>;
+		qcom,cdc-mad-dmic-rate = <600000>;
+		qcom,cdc-ecpp-dmic-rate = <1200000>;
+	};
+
+	wcd934x_cdc: tavil_codec {
+		compatible = "qcom,tavil-slim-pgd";
+		elemental-addr = [00 01 50 02 17 02];
+
+		interrupt-parent = <&wcd9xxx_intc>;
+		interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
+			      17 18 19 20 21 22 23 24 25 26 27 28 29
+			      30 31>;
+
+		qcom,wcd-rst-gpio-node = <&wcd_rst_gpio>;
+
+		clock-names = "wcd_clk";
+		clocks = <&clock_audio_lnbb clk_audio_pmi_lnbb_clk>;
+
+		cdc-vdd-buck-supply = <&pm8998_s4>;
+		qcom,cdc-vdd-buck-voltage = <1800000 1800000>;
+		qcom,cdc-vdd-buck-current = <650000>;
+
+		cdc-buck-sido-supply = <&pm8998_s4>;
+		qcom,cdc-buck-sido-voltage = <1800000 1800000>;
+		qcom,cdc-buck-sido-current = <250000>;
+
+		cdc-vdd-tx-h-supply = <&pm8998_s4>;
+		qcom,cdc-vdd-tx-h-voltage = <1800000 1800000>;
+		qcom,cdc-vdd-tx-h-current = <25000>;
+
+		cdc-vdd-rx-h-supply = <&pm8998_s4>;
+		qcom,cdc-vdd-rx-h-voltage = <1800000 1800000>;
+		qcom,cdc-vdd-rx-h-current = <25000>;
+
+		cdc-vddpx-1-supply = <&pm8998_s4>;
+		qcom,cdc-vddpx-1-voltage = <1800000 1800000>;
+		qcom,cdc-vddpx-1-current = <10000>;
+
+		qcom,cdc-static-supplies = "cdc-vdd-buck",
+					   "cdc-buck-sido",
+					   "cdc-vdd-tx-h",
+					   "cdc-vdd-rx-h",
+					   "cdc-vddpx-1";
+
+		qcom,cdc-micbias1-mv = <1800>;
+		qcom,cdc-micbias2-mv = <1800>;
+		qcom,cdc-micbias3-mv = <1800>;
+		qcom,cdc-micbias4-mv = <1800>;
+
+		qcom,cdc-mclk-clk-rate = <9600000>;
+		qcom,cdc-slim-ifd = "tavil-slim-ifd";
+		qcom,cdc-slim-ifd-elemental-addr = [00 00 50 02 17 02];
+		qcom,cdc-dmic-sample-rate = <4800000>;
+		qcom,cdc-mad-dmic-rate = <600000>;
+
+		qcom,wdsp-cmpnt-dev-name = "tavil_codec";
+
+		wcd_spi_0: wcd_spi {
+			compatible = "qcom,wcd-spi-v2";
+			qcom,master-bus-num = <10>;
+			qcom,chip-select = <0>;
+			qcom,max-frequency = <24000000>;
+			qcom,mem-base-addr = <0x100000>;
+		};
+	};
+};
+
+&tlmm {
+	wcd9xxx_intr {
+		wcd_intr_default: wcd_intr_default{
+			mux {
+				pins = "gpio54";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio54";
+				drive-strength = <2>; /* 2 mA */
+				bias-pull-down; /* pull down */
+				input-enable;
+			};
+		};
+	};
+
+	hph_en0_ctrl {
+		hph_en0_idle: hph_en0_idle {
+			mux {
+				pins = "gpio67";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio67";
+				drive-strength = <2>;
+				bias-pull-down;
+				output-low;
+			};
+		};
+		hph_en0_active: hph_en0_active {
+			mux {
+				pins = "gpio67";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio67";
+				drive-strength = <2>;
+				bias-disable;
+				output-high;
+			};
+		};
+	};
+
+	hph_en1_ctrl {
+		hph_en1_idle: hph_en1_idle {
+			mux {
+				pins = "gpio68";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio68";
+				drive-strength = <2>;
+				bias-pull-down;
+				output-low;
+			};
+		};
+		hph_en1_active: hph_en1_active {
+			mux {
+				pins = "gpio68";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio68";
+				drive-strength = <2>;
+				bias-disable;
+				output-high;
+			};
+		};
+	};
+
+	wcd_gnd_mic_swap {
+		wcd_gnd_mic_swap_idle: wcd_gnd_mic_swap_idle {
+			mux {
+				pins = "gpio75";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio75";
+				drive-strength = <2>;
+				bias-pull-down;
+				output-low;
+			};
+		};
+		wcd_gnd_mic_swap_active: wcd_gnd_mic_swap_active {
+			mux {
+				pins = "gpio75";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio75";
+				drive-strength = <2>;
+				bias-disable;
+				output-high;
+			};
+		};
+	};
+
+	wcd_usbc_analog_en1 {
+		wcd_usbc_analog_en1_idle: wcd_usbc_ana_en1_idle {
+			mux {
+				pins = "gpio59";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio59";
+				drive-strength = <2>;
+				bias-pull-down;
+				output-low;
+			};
+		};
+
+		wcd_usbc_analog_en1_active: wcd_usbc_ana_en1_active {
+			mux {
+				pins = "gpio59";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio59";
+				drive-strength = <2>;
+				bias-disable;
+				output-high;
+			};
+		};
+	};
+
+	wcd_usbc_analog_en2n {
+		wcd_usbc_analog_en2n_idle: wcd_usbc_ana_en2n_idle {
+			mux {
+				pins = "gpio60";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio60";
+				drive-strength = <2>;
+				bias-disable;
+				output-high;
+			};
+		};
+
+		wcd_usbc_analog_en2n_active: wcd_usbc_ana_en2n_active {
+			mux {
+				pins = "gpio60";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio60";
+				drive-strength = <2>;
+				bias-pull-down;
+				output-low;
+			};
+		};
+	};
+
+	cdc_reset_ctrl {
+		cdc_reset_sleep: cdc_reset_sleep {
+			mux {
+				pins = "gpio64";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio64";
+				drive-strength = <16>;
+				bias-disable;
+				output-low;
+			};
+		};
+		cdc_reset_active:cdc_reset_active {
+			mux {
+				pins = "gpio64";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio64";
+				drive-strength = <16>;
+				bias-pull-down;
+				output-high;
+			};
+		};
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-blsp.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-blsp.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-blsp.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-blsp.dtsi	2019-01-22 16:16:21.191225470 +0100
@@ -0,0 +1,863 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+/ {
+	aliases {
+		i2c1 = &i2c_1;
+		i2c2 = &i2c_2;
+		i2c3 = &i2c_3;
+		i2c4 = &i2c_4;
+		i2c5 = &i2c_5;
+		i2c6 = &i2c_6;
+		i2c7 = &i2c_7;
+		i2c8 = &i2c_8;
+		i2c9 = &i2c_9;
+		i2c10 = &i2c_10;
+		i2c11 = &i2c_11;
+		i2c12 = &i2c_12;
+		spi1 = &spi_1;
+		spi2 = &spi_2;
+		spi3 = &spi_3;
+		spi4 = &spi_4;
+		spi5 = &spi_5;
+		spi6 = &spi_6;
+		spi7 = &spi_7;
+		spi8 = &spi_8;
+		spi9 = &spi_9;
+		spi10 = &spi_10;
+		spi11 = &spi_11;
+		spi12 = &spi_12;
+	};
+};
+
+#include "msm8998-pinctrl.dtsi"
+
+&soc {
+	dma_blsp1: qcom,sps-dma@0xc144000 { /* BLSP1 */
+		#dma-cells = <4>;
+		compatible = "qcom,sps-dma";
+		reg = <0xc144000 0x25000>;
+		interrupts = <0 238 0>;
+		qcom,summing-threshold = <0x10>;
+	};
+
+	dma_blsp2: qcom,sps-dma@0xc184000 { /* BLSP2 */
+		#dma-cells = <4>;
+		compatible = "qcom,sps-dma";
+		reg = <0xc184000 0x25000>;
+		interrupts = <0 239 0>;
+		qcom,summing-threshold = <0x10>;
+	};
+
+	i2c_1: i2c@c175000 { /* BLSP1 QUP1 */
+		compatible = "qcom,i2c-msm-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "qup_phys_addr";
+		reg = <0xC175000 0x600>;
+		interrupt-names = "qup_irq";
+		interrupts = <0 95 0>;
+		dmas = <&dma_blsp1 6 64 0x20000020 0x20>,
+			<&dma_blsp1 7 32 0x20000020 0x20>;
+		dma-names = "tx", "rx";
+		qcom,master-id = <86>;
+		qcom,clk-freq-out = <400000>;
+		qcom,clk-freq-in  = <19200000>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp1_qup1_i2c_apps_clk>;
+		pinctrl-names = "i2c_active", "i2c_sleep";
+		pinctrl-0 = <&i2c_1_active>;
+		pinctrl-1 = <&i2c_1_sleep>;
+		status = "disabled";
+	};
+
+	i2c_2: i2c@c176000 { /* BLSP1 QUP2 */
+		compatible = "qcom,i2c-msm-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "qup_phys_addr";
+		reg = <0xC176000 0x600>;
+		interrupt-names = "qup_irq";
+		interrupts = <0 96 0>;
+		dmas = <&dma_blsp1 8 64 0x20000020 0x20>,
+			<&dma_blsp1 9 32 0x20000020 0x20>;
+		dma-names = "tx", "rx";
+		qcom,master-id = <86>;
+		qcom,clk-freq-out = <400000>;
+		qcom,clk-freq-in  = <19200000>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp1_qup2_i2c_apps_clk>;
+		pinctrl-names = "i2c_active", "i2c_sleep";
+		pinctrl-0 = <&i2c_2_active>;
+		pinctrl-1 = <&i2c_2_sleep>;
+		status = "disabled";
+	};
+
+	i2c_3: i2c@c177000 { /* BLSP1 QUP3 */
+		compatible = "qcom,i2c-msm-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "qup_phys_addr";
+		reg = <0xC177000 0x600>;
+		interrupt-names = "qup_irq";
+		interrupts = <0 97 0>;
+		dmas = <&dma_blsp1 10 64 0x20000020 0x20>,
+			<&dma_blsp1 11 32 0x20000020 0x20>;
+		dma-names = "tx", "rx";
+		qcom,master-id = <86>;
+		qcom,clk-freq-out = <400000>;
+		qcom,clk-freq-in  = <19200000>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp1_qup3_i2c_apps_clk>;
+		pinctrl-names = "i2c_active", "i2c_sleep";
+		pinctrl-0 = <&i2c_3_active>;
+		pinctrl-1 = <&i2c_3_sleep>;
+		status = "disabled";
+	};
+
+	i2c_4: i2c@c178000 { /* BLSP1 QUP4 */
+		compatible = "qcom,i2c-msm-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "qup_phys_addr";
+		reg = <0xC178000 0x600>;
+		interrupt-names = "qup_irq";
+		interrupts = <0 98 0>;
+		dmas = <&dma_blsp1 12 64 0x20000020 0x20>,
+			<&dma_blsp1 13 32 0x20000020 0x20>;
+		dma-names = "tx", "rx";
+		qcom,master-id = <86>;
+		qcom,clk-freq-out = <400000>;
+		qcom,clk-freq-in  = <19200000>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp1_qup4_i2c_apps_clk>;
+		pinctrl-names = "i2c_active", "i2c_sleep";
+		pinctrl-0 = <&i2c_4_active>;
+		pinctrl-1 = <&i2c_4_sleep>;
+		status = "disabled";
+	};
+
+	i2c_5: i2c@c179000 { /* BLSP1 QUP5 */
+		compatible = "qcom,i2c-msm-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "qup_phys_addr";
+		reg = <0xC179000 0x600>;
+		interrupt-names = "qup_irq";
+		interrupts = <0 99 0>;
+		dmas = <&dma_blsp1 14 64 0x20000020 0x20>,
+			<&dma_blsp1 15 32 0x20000020 0x20>;
+		dma-names = "tx", "rx";
+		qcom,master-id = <86>;
+		qcom,clk-freq-out = <400000>;
+		qcom,clk-freq-in  = <19200000>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp1_qup5_i2c_apps_clk>;
+		pinctrl-names = "i2c_active", "i2c_sleep";
+		pinctrl-0 = <&i2c_5_active>;
+		pinctrl-1 = <&i2c_5_sleep>;
+		status = "disabled";
+	};
+
+	i2c_6: i2c@c17a000 { /* BLSP1 QUP6 */
+		compatible = "qcom,i2c-msm-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "qup_phys_addr";
+		reg = <0xC17A000 0x600>;
+		interrupt-names = "qup_irq";
+		interrupts = <0 100 0>;
+		dmas = <&dma_blsp1 16 64 0x20000020 0x20>,
+			<&dma_blsp1 17 32 0x20000020 0x20>;
+		dma-names = "tx", "rx";
+		qcom,master-id = <86>;
+		qcom,clk-freq-out = <400000>;
+		qcom,clk-freq-in  = <19200000>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp1_qup6_i2c_apps_clk>;
+		pinctrl-names = "i2c_active", "i2c_sleep";
+		pinctrl-0 = <&i2c_6_active>;
+		pinctrl-1 = <&i2c_6_sleep>;
+		status = "disabled";
+	};
+
+	i2c_7: i2c@c1b5000 { /* BLSP2 QUP1 */
+		compatible = "qcom,i2c-msm-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "qup_phys_addr";
+		reg = <0xC1B5000 0x600>;
+		interrupt-names = "qup_irq";
+		interrupts = <0 101 0>;
+		dmas = <&dma_blsp2 6 64 0x20000020 0x20>,
+			<&dma_blsp2 7 32 0x20000020 0x20>;
+		dma-names = "tx", "rx";
+		qcom,master-id = <84>;
+		qcom,clk-freq-out = <400000>;
+		qcom,clk-freq-in  = <19200000>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp2_qup1_i2c_apps_clk>;
+		pinctrl-names = "i2c_active", "i2c_sleep";
+		pinctrl-0 = <&i2c_7_active>;
+		pinctrl-1 = <&i2c_7_sleep>;
+		status = "disabled";
+	};
+
+	i2c_8: i2c@c1b6000 { /* BLSP2 QUP2 */
+		compatible = "qcom,i2c-msm-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "qup_phys_addr";
+		reg = <0xC1B6000 0x600>;
+		interrupt-names = "qup_irq";
+		interrupts = <0 102 0>;
+		dmas = <&dma_blsp2 8 64 0x20000020 0x20>,
+			<&dma_blsp2 9 32 0x20000020 0x20>;
+		dma-names = "tx", "rx";
+		qcom,master-id = <84>;
+		qcom,clk-freq-out = <400000>;
+		qcom,clk-freq-in  = <19200000>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp2_qup2_i2c_apps_clk>;
+		pinctrl-names = "i2c_active", "i2c_sleep";
+		pinctrl-0 = <&i2c_8_active>;
+		pinctrl-1 = <&i2c_8_sleep>;
+		status = "disabled";
+	};
+
+	i2c_9: i2c@c1b7000 { /* BLSP2 QUP3 */
+		compatible = "qcom,i2c-msm-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "qup_phys_addr";
+		reg = <0xC1B7000 0x600>;
+		interrupt-names = "qup_irq";
+		interrupts = <0 103 0>;
+		dmas = <&dma_blsp2 10 64 0x20000020 0x20>,
+			<&dma_blsp2 11 32 0x20000020 0x20>;
+		dma-names = "tx", "rx";
+		qcom,master-id = <84>;
+		qcom,clk-freq-out = <400000>;
+		qcom,clk-freq-in  = <19200000>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp2_qup3_i2c_apps_clk>;
+		pinctrl-names = "i2c_active", "i2c_sleep";
+		pinctrl-0 = <&i2c_9_active>;
+		pinctrl-1 = <&i2c_9_sleep>;
+		status = "disabled";
+	};
+
+	i2c_10: i2c@c1b8000 { /* BLSP2 QUP4 */
+		compatible = "qcom,i2c-msm-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "qup_phys_addr";
+		reg = <0xC1B8000 0x600>;
+		interrupt-names = "qup_irq";
+		interrupts = <0 104 0>;
+		dmas = <&dma_blsp2 12 64 0x20000020 0x20>,
+			<&dma_blsp2 13 32 0x20000020 0x20>;
+		dma-names = "tx", "rx";
+		qcom,master-id = <84>;
+		qcom,clk-freq-out = <400000>;
+		qcom,clk-freq-in  = <19200000>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp2_qup4_i2c_apps_clk>;
+		pinctrl-names = "i2c_active", "i2c_sleep";
+		pinctrl-0 = <&i2c_10_active>;
+		pinctrl-1 = <&i2c_10_sleep>;
+		status = "disabled";
+	};
+
+	i2c_11: i2c@c1b9000 { /* BLSP2 QUP5 */
+		compatible = "qcom,i2c-msm-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "qup_phys_addr";
+		reg = <0xC1B9000 0x600>;
+		interrupt-names = "qup_irq";
+		interrupts = <0 105 0>;
+		dmas = <&dma_blsp2 14 64 0x20000020 0x20>,
+			<&dma_blsp2 15 32 0x20000020 0x20>;
+		dma-names = "tx", "rx";
+		qcom,master-id = <84>;
+		qcom,clk-freq-out = <400000>;
+		qcom,clk-freq-in  = <19200000>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp2_qup5_i2c_apps_clk>;
+		pinctrl-names = "i2c_active", "i2c_sleep";
+		pinctrl-0 = <&i2c_11_active>;
+		pinctrl-1 = <&i2c_11_sleep>;
+		status = "disabled";
+	};
+
+	i2c_12: i2c@c1ba000 { /* BLSP2 QUP6 */
+		compatible = "qcom,i2c-msm-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "qup_phys_addr";
+		reg = <0xC1BA000 0x600>;
+		interrupt-names = "qup_irq";
+		interrupts = <0 106 0>;
+		dmas = <&dma_blsp2 16 64 0x20000020 0x20>,
+			<&dma_blsp2 17 32 0x20000020 0x20>;
+		dma-names = "tx", "rx";
+		qcom,master-id = <84>;
+		qcom,clk-freq-out = <400000>;
+		qcom,clk-freq-in  = <19200000>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp2_qup6_i2c_apps_clk>;
+		pinctrl-names = "i2c_active", "i2c_sleep";
+		pinctrl-0 = <&i2c_12_active>;
+		pinctrl-1 = <&i2c_12_sleep>;
+		status = "disabled";
+	};
+
+	spi_1: spi@c175000 { /* BLSP1 QUP1 */
+		compatible = "qcom,spi-qup-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xC175000 0x600>,
+		      <0xC144000 0x25000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 95 0>, <0 238 0>;
+		spi-max-frequency = <50000000>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <6>;
+		qcom,bam-producer-pipe-index = <7>;
+		qcom,master-id = <86>;
+		qcom,use-pinctrl;
+		pinctrl-names = "spi_default", "spi_sleep";
+		pinctrl-0 = <&spi_1_active>;
+		pinctrl-1 = <&spi_1_sleep>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp1_qup1_spi_apps_clk>;
+		status = "disabled";
+	};
+
+	spi_2: spi@c176000 { /* BLSP1 QUP2 */
+		compatible = "qcom,spi-qup-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xC176000 0x600>,
+		      <0xC144000 0x25000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 96 0>, <0 238 0>;
+		spi-max-frequency = <50000000>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <8>;
+		qcom,bam-producer-pipe-index = <9>;
+		qcom,master-id = <86>;
+		qcom,use-pinctrl;
+		pinctrl-names = "spi_default", "spi_sleep";
+		pinctrl-0 = <&spi_2_active>;
+		pinctrl-1 = <&spi_2_sleep>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp1_qup2_spi_apps_clk>;
+		status = "disabled";
+	};
+
+	spi_3: spi@c177000 { /* BLSP1 QUP3 */
+		compatible = "qcom,spi-qup-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xC177000 0x600>,
+		      <0xC144000 0x25000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 97 0>, <0 238 0>;
+		spi-max-frequency = <50000000>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <10>;
+		qcom,bam-producer-pipe-index = <11>;
+		qcom,master-id = <86>;
+		qcom,use-pinctrl;
+		pinctrl-names = "spi_default", "spi_sleep";
+		pinctrl-0 = <&spi_3_active>;
+		pinctrl-1 = <&spi_3_sleep>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp1_qup3_spi_apps_clk>;
+		status = "disabled";
+	};
+
+	spi_4: spi@c178000 { /* BLSP1 QUP4 */
+		compatible = "qcom,spi-qup-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xC178000 0x600>,
+		      <0xC144000 0x25000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 98 0>, <0 238 0>;
+		spi-max-frequency = <50000000>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <12>;
+		qcom,bam-producer-pipe-index = <13>;
+		qcom,master-id = <86>;
+		qcom,use-pinctrl;
+		pinctrl-names = "spi_default", "spi_sleep";
+		pinctrl-0 = <&spi_4_active>;
+		pinctrl-1 = <&spi_4_sleep>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp1_qup4_spi_apps_clk>;
+		status = "disabled";
+	};
+
+	spi_5: spi@c179000 { /* BLSP1 QUP5 */
+		compatible = "qcom,spi-qup-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xC179000 0x600>,
+		      <0xC144000 0x25000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 99 0>, <0 238 0>;
+		spi-max-frequency = <50000000>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <14>;
+		qcom,bam-producer-pipe-index = <15>;
+		qcom,master-id = <86>;
+		qcom,use-pinctrl;
+		pinctrl-names = "spi_default", "spi_sleep";
+		pinctrl-0 = <&spi_5_active>;
+		pinctrl-1 = <&spi_5_sleep>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp1_qup5_spi_apps_clk>;
+		status = "disabled";
+	};
+
+	spi_6: spi@c17a000 { /* BLSP1 QUP6 */
+		compatible = "qcom,spi-qup-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xC17A000 0x600>,
+		      <0xC144000 0x25000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 100 0>, <0 238 0>;
+		spi-max-frequency = <50000000>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <16>;
+		qcom,bam-producer-pipe-index = <17>;
+		qcom,master-id = <86>;
+		qcom,use-pinctrl;
+		pinctrl-names = "spi_default", "spi_sleep";
+		pinctrl-0 = <&spi_6_active>;
+		pinctrl-1 = <&spi_6_sleep>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp1_qup6_spi_apps_clk>;
+		status = "disabled";
+	};
+
+
+	spi_7: spi@c1b5000 { /* BLSP2 QUP1 */
+		compatible = "qcom,spi-qup-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xC1B5000 0x600>,
+		      <0xC184000 0x25000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 101 0>, <0 239 0>;
+		spi-max-frequency = <50000000>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <6>;
+		qcom,bam-producer-pipe-index = <7>;
+		qcom,master-id = <84>;
+		qcom,use-pinctrl;
+		pinctrl-names = "spi_default", "spi_sleep";
+		pinctrl-0 = <&spi_7_active>;
+		pinctrl-1 = <&spi_7_sleep>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp2_qup1_spi_apps_clk>;
+		status = "disabled";
+	};
+
+	spi_8: spi@c1b6000 { /* BLSP2 QUP2 */
+		compatible = "qcom,spi-qup-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xC1B6000 0x600>,
+		      <0xC184000 0x25000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 102 0>, <0 239 0>;
+		spi-max-frequency = <50000000>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <8>;
+		qcom,bam-producer-pipe-index = <9>;
+		qcom,master-id = <84>;
+		qcom,use-pinctrl;
+		pinctrl-names = "spi_default", "spi_sleep";
+		pinctrl-0 = <&spi_8_active>;
+		pinctrl-1 = <&spi_8_sleep>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp2_qup2_spi_apps_clk>;
+		status = "disabled";
+	};
+
+	spi_9: spi@c1b7000 { /* BLSP2 QUP3 */
+		compatible = "qcom,spi-qup-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xC1B7000 0x600>,
+		      <0xC184000 0x25000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 103 0>, <0 239 0>;
+		spi-max-frequency = <50000000>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <10>;
+		qcom,bam-producer-pipe-index = <11>;
+		qcom,master-id = <84>;
+		qcom,use-pinctrl;
+		pinctrl-names = "spi_default", "spi_sleep";
+		pinctrl-0 = <&spi_9_active>;
+		pinctrl-1 = <&spi_9_sleep>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp2_qup3_spi_apps_clk>;
+		status = "disabled";
+	};
+
+	spi_10: spi@c1b8000 { /* BLSP2 QUP4 */
+		compatible = "qcom,spi-qup-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xC1B8000 0x600>,
+		      <0xC184000 0x25000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 104 0>, <0 239 0>;
+		spi-max-frequency = <50000000>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <12>;
+		qcom,bam-producer-pipe-index = <13>;
+		qcom,master-id = <84>;
+		qcom,use-pinctrl;
+		pinctrl-names = "spi_default", "spi_sleep";
+		pinctrl-0 = <&spi_10_active>;
+		pinctrl-1 = <&spi_10_sleep>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp2_qup4_spi_apps_clk>;
+		status = "disabled";
+	};
+
+	spi_11: spi@c1b9000 { /* BLSP2 QUP5 */
+		compatible = "qcom,spi-qup-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xC1B9000 0x600>,
+		      <0xC184000 0x25000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 105 0>, <0 239 0>;
+		spi-max-frequency = <50000000>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+
+		qcom,bam-consumer-pipe-index = <14>;
+		qcom,bam-producer-pipe-index = <15>;
+		qcom,master-id = <84>;
+		qcom,use-pinctrl;
+		pinctrl-names = "spi_default", "spi_sleep";
+		pinctrl-0 = <&spi_11_active>;
+		pinctrl-1 = <&spi_11_sleep>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp2_qup5_spi_apps_clk>;
+		status = "disabled";
+	};
+
+	spi_12: spi@c1ba000 { /* BLSP2 QUP6 */
+		compatible = "qcom,spi-qup-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xC1BA000 0x600>,
+		      <0xC184000 0x25000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 106 0>, <0 239 0>;
+		spi-max-frequency = <50000000>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <16>;
+		qcom,bam-producer-pipe-index = <17>;
+		qcom,master-id = <84>;
+		qcom,use-pinctrl;
+		pinctrl-names = "spi_default", "spi_sleep";
+		pinctrl-0 = <&spi_12_active>;
+		pinctrl-1 = <&spi_12_sleep>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp2_qup6_spi_apps_clk>;
+		status = "disabled";
+	};
+
+
+	blsp1_uart1_hs: uart@c16f000 { /* BLSP1 UART1 */
+		compatible = "qcom,msm-hsuart-v14";
+		reg = <0xC16F000 0x200>,
+		    <0xC144000 0x25000>;
+		reg-names = "core_mem", "bam_mem";
+		interrupt-names = "core_irq", "bam_irq", "wakeup_irq";
+		#address-cells = <0>;
+		interrupt-parent = <&blsp1_uart1_hs>;
+		interrupts = <0 1 2>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0xffffffff>;
+		interrupt-map = <0 &intc 0 0 107 0
+			    1 &intc 0 0 238 0
+			    2 &tlmm 1 0>;
+
+		qcom,inject-rx-on-wakeup;
+		qcom,rx-char-to-inject = <0xFD>;
+
+		qcom,bam-tx-ep-pipe-index = <0>;
+		qcom,bam-rx-ep-pipe-index = <1>;
+		qcom,master-id = <86>;
+		clock-names = "core_clk", "iface_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_uart1_apps_clk>,
+		    <&clock_gcc clk_gcc_blsp1_ahb_clk>;
+		pinctrl-names = "sleep", "default";
+		pinctrl-0 = <&blsp1_uart1_sleep>;
+		pinctrl-1 = <&blsp1_uart1_active>;
+
+		qcom,msm-bus,name = "buart1";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			    <86 512 0 0>,
+			    <86 512 500 800>;
+		status = "disabled";
+	};
+
+	blsp1_uart2_hs: uart@c170000 { /* BLSP1 UART2 */
+		compatible = "qcom,msm-hsuart-v14";
+		reg = <0xC170000 0x200>,
+		    <0xC144000 0x25000>;
+		reg-names = "core_mem", "bam_mem";
+		interrupt-names = "core_irq", "bam_irq", "wakeup_irq";
+		#address-cells = <0>;
+		interrupt-parent = <&blsp1_uart2_hs>;
+		interrupts = <0 1 2>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0xffffffff>;
+		interrupt-map = <0 &intc 0 0 108 0
+			    1 &intc 0 0 238 0
+			    2 &tlmm 34 0>;
+
+		qcom,inject-rx-on-wakeup;
+		qcom,rx-char-to-inject = <0xFD>;
+
+		qcom,bam-tx-ep-pipe-index = <2>;
+		qcom,bam-rx-ep-pipe-index = <3>;
+		qcom,master-id = <86>;
+		clock-names = "core_clk", "iface_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_uart2_apps_clk>,
+		    <&clock_gcc clk_gcc_blsp1_ahb_clk>;
+		pinctrl-names = "sleep", "default";
+		pinctrl-0 = <&blsp1_uart2_sleep>;
+		pinctrl-1 = <&blsp1_uart2_active>;
+
+		qcom,msm-bus,name = "buart2";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			    <86 512 0 0>,
+			    <86 512 500 800>;
+		status = "disabled";
+	};
+
+	blsp1_uart3_hs: uart@c171000 { /* BLSP1 UART3 */
+		compatible = "qcom,msm-hsuart-v14";
+		reg = <0xC171000 0x200>,
+		    <0xC144000 0x25000>;
+		reg-names = "core_mem", "bam_mem";
+		interrupt-names = "core_irq", "bam_irq", "wakeup_irq";
+		#address-cells = <0>;
+		interrupt-parent = <&blsp1_uart3_hs>;
+		interrupts = <0 1 2>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0xffffffff>;
+		interrupt-map = <0 &intc 0 0 109 0
+			    1 &intc 0 0 238 0
+			    2 &tlmm 46 0>;
+
+		qcom,inject-rx-on-wakeup;
+		qcom,rx-char-to-inject = <0xFD>;
+
+		qcom,bam-tx-ep-pipe-index = <4>;
+		qcom,bam-rx-ep-pipe-index = <5>;
+		qcom,master-id = <86>;
+		clock-names = "core_clk", "iface_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_uart3_apps_clk>,
+		    <&clock_gcc clk_gcc_blsp1_ahb_clk>;
+		pinctrl-names = "sleep", "default";
+		pinctrl-0 = <&blsp1_uart3_tx_sleep>, <&blsp1_uart3_rxcts_sleep>,
+					<&blsp1_uart3_rfr_sleep>;
+		pinctrl-1 = <&blsp1_uart3_tx_active>,
+			<&blsp1_uart3_rxcts_active>, <&blsp1_uart3_rfr_active>;
+
+		qcom,msm-bus,name = "buart3";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			    <86 512 0 0>,
+			    <86 512 500 800>;
+		status = "disabled";
+	};
+
+	blsp2_uart1_hs: uart@c1af000 { /* BLSP2 UART1 */
+		compatible = "qcom,msm-hsuart-v14";
+		reg = <0xC1AF000 0x200>,
+		    <0xC184000 0x25000>;
+		reg-names = "core_mem", "bam_mem";
+		interrupt-names = "core_irq", "bam_irq", "wakeup_irq";
+		#address-cells = <0>;
+		interrupt-parent = <&blsp2_uart1_hs>;
+		interrupts = <0 1 2>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0xffffffff>;
+		interrupt-map = <0 &intc 0 0 113 0
+			    1 &intc 0 0 239 0
+			    2 &tlmm 54 0>;
+
+		qcom,inject-rx-on-wakeup;
+		qcom,rx-char-to-inject = <0xFD>;
+
+		qcom,bam-tx-ep-pipe-index = <0>;
+		qcom,bam-rx-ep-pipe-index = <1>;
+		qcom,master-id = <84>;
+		clock-names = "core_clk", "iface_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_uart1_apps_clk>,
+		    <&clock_gcc clk_gcc_blsp2_ahb_clk>;
+		pinctrl-names = "sleep", "default";
+		pinctrl-0 = <&blsp2_uart1_sleep>;
+		pinctrl-1 = <&blsp2_uart1_active>;
+
+		qcom,msm-bus,name = "buart1";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			    <86 512 0 0>,
+			    <86 512 500 800>;
+		status = "disabled";
+	};
+
+	blsp2_uart2_hs: uart@c1b0000 { /* BLSP2 UART2 */
+		compatible = "qcom,msm-hsuart-v14";
+		reg = <0xC1B0000 0x200>,
+		    <0xC184000 0x25000>;
+		reg-names = "core_mem", "bam_mem";
+		interrupt-names = "core_irq", "bam_irq", "wakeup_irq";
+		#address-cells = <0>;
+		interrupt-parent = <&blsp2_uart2_hs>;
+		interrupts = <0 1 2>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0xffffffff>;
+		interrupt-map = <0 &intc 0 0 114 0
+			    1 &intc 0 0 239 0
+			    2 &tlmm 5 0>;
+
+		qcom,inject-rx-on-wakeup;
+		qcom,rx-char-to-inject = <0xFD>;
+
+		qcom,bam-tx-ep-pipe-index = <2>;
+		qcom,bam-rx-ep-pipe-index = <3>;
+		qcom,master-id = <84>;
+		clock-names = "core_clk", "iface_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_uart2_apps_clk>,
+		    <&clock_gcc clk_gcc_blsp2_ahb_clk>;
+		pinctrl-names = "sleep", "default";
+		pinctrl-0 = <&blsp2_uart2_sleep>;
+		pinctrl-1 = <&blsp2_uart2_active>;
+
+		qcom,msm-bus,name = "buart2";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			    <86 512 0 0>,
+			    <86 512 500 800>;
+		status = "disabled";
+	};
+
+	blsp2_uart3_hs: uart@c1b1000 { /* BLSP2 UART3 */
+		compatible = "qcom,msm-hsuart-v14";
+		reg = <0xC1B1000 0x200>,
+		    <0xC184000 0x25000>;
+		reg-names = "core_mem", "bam_mem";
+		interrupt-names = "core_irq", "bam_irq", "wakeup_irq";
+		#address-cells = <0>;
+		interrupt-parent = <&blsp2_uart3_hs>;
+		interrupts = <0 1 2>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0xffffffff>;
+		interrupt-map = <0 &intc 0 0 115 0
+			    1 &intc 0 0 239 0
+			    2 &tlmm 50 0>;
+
+		qcom,inject-rx-on-wakeup;
+		qcom,rx-char-to-inject = <0xFD>;
+
+		qcom,bam-tx-ep-pipe-index = <4>;
+		qcom,bam-rx-ep-pipe-index = <5>;
+		qcom,master-id = <84>;
+		clock-names = "core_clk", "iface_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_uart3_apps_clk>,
+		    <&clock_gcc clk_gcc_blsp2_ahb_clk>;
+		pinctrl-names = "sleep", "default";
+		pinctrl-0 = <&blsp2_uart3_sleep>;
+		pinctrl-1 = <&blsp2_uart3_active>;
+
+		qcom,msm-bus,name = "buart3";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			    <86 512 0 0>,
+			    <86 512 500 800>;
+		status = "disabled";
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-bus.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-bus.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-bus.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-bus.dtsi	2019-01-22 16:16:21.191225470 +0100
@@ -0,0 +1,1467 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is Mree software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/msm/msm-bus-ids.h>
+
+&soc {
+	ad_hoc_bus: ad-hoc-bus {
+		/*Version = 4 */
+		compatible = "qcom,msm-bus-device";
+		reg = <0x1620000 0x40000>,
+			<0x1000000 0x80000>,
+			<0x1500000 0x10000>,
+			<0x1660000 0x60000>,
+			<0x1700000 0x60000>,
+			<0x17900000 0x10000>,
+			<0x1740000 0x10000>,
+			<0x1740000 0x10000>;
+
+		reg-names = "snoc-base", "bimc-base", "cnoc-base",
+			"a1noc-base", "a2noc-base", "gnoc-base",
+			"mmnoc-ahb-base", "mnoc-base";
+
+		/*Buses*/
+		fab_a1noc: fab-a1noc {
+			cell-id = <MSM_BUS_FAB_A1_NOC>;
+			label = "fab-a1noc";
+			qcom,fab-dev;
+			qcom,base-name = "a1noc-base";
+			qcom,bus-type = <1>;
+			qcom,qos-off = <4096>;
+			qcom,base-offset = <36864>;
+			clock-names = "bus_clk", "bus_a_clk";
+			clocks = <&clock_gcc clk_aggre1_noc_clk>,
+				<&clock_gcc clk_aggre1_noc_a_clk>;
+			qcom,node-qos-clks {
+				clock-names =
+				"clk-ufs-axi-clk",
+				"clk-aggre1-ufs-axi-no-rate",
+				"clk-aggre1-usb3-axi-cfg-no-rate",
+				"clk-blsp2-ahb-no-rate";
+				clocks =
+				<&clock_gcc clk_gcc_ufs_axi_clk>,
+				<&clock_gcc clk_gcc_aggre1_ufs_axi_clk>,
+				<&clock_gcc clk_gcc_aggre1_usb3_axi_clk>,
+				<&clock_gcc clk_gcc_blsp2_ahb_clk>;
+			};
+		};
+
+		fab_a2noc: fab-a2noc {
+			cell-id = <MSM_BUS_FAB_A2_NOC>;
+			label = "fab-a2noc";
+			qcom,fab-dev;
+			qcom,base-name = "a2noc-base";
+			qcom,bus-type = <1>;
+			qcom,qos-off = <4096>;
+			qcom,base-offset = <20480>;
+			clock-names = "bus_clk", "bus_a_clk";
+			clocks = <&clock_gcc clk_aggre2_noc_clk>,
+				<&clock_gcc clk_aggre2_noc_a_clk>;
+			qcom,node-qos-clks {
+				clock-names =
+				"clk-ipa-clk",
+				"clk-sdcc2-ahb-no-rate",
+				"clk-sdcc4-ahb-no-rate",
+				"clk-blsp1-ahb-no-rate";
+				clocks =
+				<&clock_gcc clk_ipa_clk>,
+				<&clock_gcc clk_gcc_sdcc2_ahb_clk>,
+				<&clock_gcc clk_gcc_sdcc4_ahb_clk>,
+				<&clock_gcc clk_gcc_blsp1_ahb_clk>;
+			};
+		};
+
+		fab_bimc: fab-bimc {
+			cell-id = <MSM_BUS_FAB_BIMC>;
+			label = "fab-bimc";
+			qcom,fab-dev;
+			qcom,base-name = "bimc-base";
+			qcom,bus-type = <2>;
+			qcom,util-fact = <153>;
+			clock-names = "bus_clk", "bus_a_clk";
+			clocks = <&clock_gcc clk_bimc_msmbus_clk>,
+				<&clock_gcc clk_bimc_msmbus_a_clk>;
+		};
+
+		fab_cnoc: fab-cnoc {
+			cell-id = <MSM_BUS_FAB_CONFIG_NOC>;
+			label = "fab-cnoc";
+			qcom,fab-dev;
+			qcom,base-name = "cnoc-base";
+			qcom,bus-type = <1>;
+			clock-names = "bus_clk", "bus_a_clk";
+			clocks = <&clock_gcc clk_cnoc_clk>,
+				<&clock_gcc clk_cnoc_a_clk>;
+		};
+
+		fab_cr_virt: fab-cr_virt {
+			cell-id = <MSM_BUS_FAB_CR_VIRT>;
+			label = "fab-cr_virt";
+			qcom,virt-dev;
+			qcom,base-name = "cr_virt-base";
+			qcom,bypass-qos-prg;
+		};
+
+		fab_gnoc: fab-gnoc {
+			cell-id = <MSM_BUS_FAB_GNOC>;
+			label = "fab-gnoc";
+			qcom,virt-dev;
+			qcom,base-name = "gnoc-base";
+			qcom,bypass-qos-prg;
+		};
+
+		fab_mnoc: fab-mnoc {
+			cell-id = <MSM_BUS_FAB_MMSS_NOC>;
+			label = "fab-mnoc";
+			qcom,fab-dev;
+			qcom,base-name = "mnoc-base";
+			qcom,bus-type = <1>;
+			qcom,qos-off = <4096>;
+			qcom,base-offset = <16384>;
+			qcom,util-fact = <153>;
+			clock-names = "bus_clk", "bus_a_clk";
+			clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+				<&clock_gcc clk_mmssnoc_axi_a_clk>;
+			clk-camss-ahb-no-rate-supply =
+					<&gdsc_camss_top>;
+			clk-video-ahb-no-rate-supply =
+					<&gdsc_venus>;
+			clk-video-axi-no-rate-supply =
+					<&gdsc_venus>;
+			qcom,node-qos-clks {
+				clock-names =
+				"clk-noc-cfg-ahb-no-rate",
+				"clk-mnoc-ahb-no-rate",
+				"clk-camss-ahb-no-rate",
+				"clk-video-ahb-no-rate",
+				"clk-video-axi-no-rate";
+				clocks =
+				<&clock_gcc clk_mmssnoc_axi_clk>,
+				<&clock_gcc clk_gcc_mmss_noc_cfg_ahb_clk>,
+				<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+				<&clock_mmss clk_mmss_camss_ahb_clk>,
+				<&clock_mmss clk_mmss_video_ahb_clk>,
+				<&clock_mmss clk_mmss_video_axi_clk>;
+			};
+
+		};
+
+		fab_snoc: fab-snoc {
+			cell-id = <MSM_BUS_FAB_SYS_NOC>;
+			label = "fab-snoc";
+			qcom,fab-dev;
+			qcom,base-name = "snoc-base";
+			qcom,bus-type = <1>;
+			qcom,qos-off = <4096>;
+			qcom,base-offset = <20480>;
+			clock-names = "bus_clk", "bus_a_clk";
+			clocks = <&clock_gcc clk_snoc_clk>,
+				<&clock_gcc clk_snoc_a_clk>;
+		};
+
+		fab_mnoc_ahb: fab-mnoc-ahb {
+			cell-id = <MSM_BUS_FAB_MMSS_AHB>;
+			label = "fab-mnoc-ahb";
+			qcom,fab-dev;
+			qcom,base-name = "mmnoc-ahb-base";
+			qcom,bypass-qos-prg;
+			qcom,setrate-only-clk;
+			qcom,bus-type = <1>;
+			clock-names = "bus_clk", "bus_a_clk";
+			clocks = <&clock_mmss clk_ahb_clk_src>,
+			     <&clock_mmss clk_ahb_clk_src>;
+		};
+
+
+		/*Masters*/
+
+		mas_pcie_0: mas-pcie-0 {
+			cell-id = <MSM_BUS_MASTER_PCIE>;
+			label = "mas-pcie-0";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <1>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&slv_a1noc_snoc>;
+			qcom,prio1 = <1>;
+			qcom,prio0 = <1>;
+			qcom,bus-dev = <&fab_a1noc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_PCIE_0>;
+		};
+
+		mas_usb3: mas-usb3 {
+			cell-id = <MSM_BUS_MASTER_USB3>;
+			label = "mas-usb3";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <2>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&slv_a1noc_snoc>;
+			qcom,prio1 = <1>;
+			qcom,prio0 = <1>;
+			qcom,bus-dev = <&fab_a1noc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_USB3>;
+		};
+
+		mas_ufs: mas-ufs {
+			cell-id = <MSM_BUS_MASTER_UFS>;
+			label = "mas-ufs";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <0>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&slv_a1noc_snoc>;
+			qcom,prio1 = <1>;
+			qcom,prio0 = <1>;
+			qcom,bus-dev = <&fab_a1noc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_UFS>;
+		};
+
+		mas_blsp_2: mas-blsp-2 {
+			cell-id = <MSM_BUS_MASTER_BLSP_2>;
+			label = "mas-blsp-2";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <4>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&slv_a1noc_snoc>;
+			qcom,bus-dev = <&fab_a1noc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_BLSP_2>;
+		};
+
+		mas_cnoc_a2noc: mas-cnoc-a2noc {
+			cell-id = <0>;
+			label = "mas-cnoc-a2noc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,connections = <&slv_a2noc_snoc>;
+			qcom,bus-dev = <&fab_a2noc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_CNOC_A2NOC>;
+		};
+
+		mas_ipa: mas-ipa {
+			cell-id = <MSM_BUS_MASTER_IPA>;
+			label = "mas-ipa";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <1>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&slv_a2noc_snoc>;
+			qcom,prio1 = <1>;
+			qcom,prio0 = <1>;
+			qcom,bus-dev = <&fab_a2noc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_IPA>;
+		};
+
+		mas_sdcc_2: mas-sdcc-2 {
+			cell-id = <MSM_BUS_MASTER_SDCC_2>;
+			label = "mas-sdcc-2";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <6>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&slv_a2noc_snoc>;
+			qcom,bus-dev = <&fab_a2noc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_SDCC_2>;
+		};
+
+		mas_sdcc_4: mas-sdcc-4 {
+			cell-id = <MSM_BUS_MASTER_SDCC_4>;
+			label = "mas-sdcc-4";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <7>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&slv_a2noc_snoc>;
+			qcom,bus-dev = <&fab_a2noc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_SDCC_4>;
+		};
+
+		mas_tsif: mas-tsif {
+			cell-id = <MSM_BUS_MASTER_TSIF>;
+			label = "mas-tsif";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,connections = <&slv_a2noc_snoc>;
+			qcom,bus-dev = <&fab_a2noc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_TSIF>;
+		};
+
+		mas_blsp_1: mas-blsp-1 {
+			cell-id = <MSM_BUS_MASTER_BLSP_1>;
+			label = "mas-blsp-1";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <8>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&slv_a2noc_snoc>;
+			qcom,bus-dev = <&fab_a2noc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_BLSP_1>;
+		};
+
+		mas_cr_virt_a2noc: mas-cr-virt-a2noc {
+			cell-id = <MSM_BUS_MASTER_CRVIRT_A2NOC>;
+			label = "mas-cr-virt-a2noc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <9>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&slv_a2noc_snoc>;
+			qcom,bus-dev = <&fab_a2noc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_CRVIRT_A2NOC>;
+		};
+
+		mas_gnoc_bimc: mas-gnoc-bimc {
+			cell-id = <MSM_BUS_MASTER_GNOC_BIMC>;
+			label = "mas-gnoc-bimc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <2>;
+			qcom,ap-owned;
+			qcom,qport = <0>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&slv_ebi &slv_bimc_snoc_0>;
+			qcom,prio-lvl = <0>;
+			qcom,prio-rd = <0>;
+			qcom,prio-wr = <0>;
+			qcom,bus-dev = <&fab_bimc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_GNOC_BIMC>;
+		};
+
+		mas_oxili: mas-oxili {
+			cell-id = <MSM_BUS_MASTER_GRAPHICS_3D>;
+			label = "mas-oxili";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <2>;
+			qcom,ap-owned;
+			qcom,qport = <1>;
+			qcom,qos-mode = "bypass";
+			qcom,connections = < &slv_bimc_snoc_1
+				&slv_hmss_l3&slv_ebi &slv_bimc_snoc_0>;
+			qcom,bus-dev = <&fab_bimc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_GFX3D>;
+		};
+
+		mas_mnoc_bimc: mas-mnoc-bimc {
+			cell-id = <MSM_BUS_MNOC_BIMC_MAS>;
+			label = "mas-mnoc-bimc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <2>;
+			qcom,ap-owned;
+			qcom,qport = <2>;
+			qcom,qos-mode = "bypass";
+			qcom,connections = < &slv_bimc_snoc_1
+				&slv_hmss_l3&slv_ebi &slv_bimc_snoc_0>;
+			qcom,bus-dev = <&fab_bimc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_MNOC_BIMC>;
+		};
+
+		mas_snoc_bimc: mas-snoc-bimc {
+			cell-id = <MSM_BUS_SNOC_BIMC_MAS>;
+			label = "mas-snoc-bimc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <2>;
+			qcom,qport = <3>;
+			qcom,qos-mode = "bypass";
+			qcom,connections = < &slv_hmss_l3&slv_ebi>;
+			qcom,bus-dev = <&fab_bimc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_SNOC_BIMC>;
+		};
+
+		mas_snoc_cnoc: mas-snoc-cnoc {
+			cell-id = <MSM_BUS_SNOC_CNOC_MAS>;
+			label = "mas-snoc-cnoc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,connections = < &slv_skl &slv_blsp_2
+				&slv_message_ram
+				&slv_tlmm_west &slv_tsif
+				&slv_mpm &slv_bimc_cfg
+				&slv_tlmm_east &slv_spdm
+				&slv_pimem_cfg &slv_a1noc_smmu_cfg
+				&slv_blsp_1 &slv_clk_ctl
+				&slv_prng &slv_usb3_0
+				&slv_qdss_cfg &slv_qm_cfg
+				&slv_a2noc_cfg &slv_pmic_arb
+				&slv_ufs_cfg &slv_srvc_cnoc
+				&slv_ahb2phy &slv_ipa
+				&slv_glm &slv_snoc_cfg
+				&slv_ssc_cfg &slv_sdcc_2
+				&slv_sdcc_4 &slv_pdm
+				&slv_cnoc_mnoc_mmss_cfg &slv_cnoc_mnoc_cfg
+				&slv_mss_cfg &slv_imem_cfg
+				&slv_a1noc_cfg &slv_gpuss_cfg
+				&slv_tcsr &slv_tlmm_north>;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_SNOC_CNOC>;
+		};
+
+		mas_qdss_dap: mas-qdss-dap {
+			cell-id = <MSM_BUS_MASTER_QDSS_DAP>;
+			label = "mas-qdss-dap";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,connections = < &slv_skl &slv_blsp_2
+				&slv_message_ram
+				&slv_tlmm_west &slv_tsif
+				&slv_mpm &slv_bimc_cfg
+				&slv_tlmm_east &slv_spdm
+				&slv_pimem_cfg &slv_a1noc_smmu_cfg
+				&slv_blsp_1 &slv_clk_ctl
+				&slv_prng &slv_usb3_0
+				&slv_qdss_cfg &slv_qm_cfg
+				&slv_a2noc_cfg &slv_pmic_arb
+				&slv_ufs_cfg &slv_srvc_cnoc
+				&slv_ahb2phy &slv_ipa
+				&slv_glm &slv_snoc_cfg
+				&slv_sdcc_2 &slv_sdcc_4
+				&slv_pdm &slv_cnoc_mnoc_mmss_cfg
+				&slv_cnoc_mnoc_cfg &slv_mss_cfg
+				&slv_imem_cfg &slv_a1noc_cfg
+				&slv_gpuss_cfg &slv_ssc_cfg
+				&slv_tcsr &slv_tlmm_north
+				&slv_cnoc_a2noc>;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_QDSS_DAP>;
+		};
+
+		mas_crypto_c0: mas-crypto-c0 {
+			cell-id = <MSM_BUS_MASTER_CRYPTO_CORE0>;
+			label = "mas-crypto-c0";
+			qcom,buswidth = <650>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_cr_virt_a2noc>;
+			qcom,bus-dev = <&fab_a2noc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_CRYPTO_CORE0>;
+		};
+
+		mas_apps_proc: mas-apps-proc {
+			cell-id = <MSM_BUS_MASTER_AMPSS_M0>;
+			label = "mas-apps-proc";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,connections = <&slv_gnoc_bimc>;
+			qcom,bus-dev = <&fab_gnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_APPSS_PROC>;
+		};
+
+		mas_cnoc_mnoc_mmss_cfg: mas-cnoc-mnoc-mmss-cfg {
+			cell-id = <MSM_BUS_MASTER_CNOC_MNOC_MMSS_CFG>;
+			label = "mas-cnoc-mnoc-mmss-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,connections = < &slv_camera_throttle_cfg
+				&slv_venus_cfg &slv_misc_cfg
+				&slv_camera_cfg &slv_display_throttle_cfg
+				&slv_venus_throttle_cfg &slv_display_cfg
+				&slv_mmss_clk_cfg &slv_vmem_cfg
+				&slv_mmss_clk_xpu_cfg &slv_smmu_cfg>;
+			qcom,bus-dev = <&fab_mnoc_ahb>;
+			qcom,mas-rpm-id = <ICBID_MASTER_CNOC_MNOC_MMSS_CFG>;
+		};
+
+		mas_cnoc_mnoc_cfg: mas-cnoc-mnoc-cfg {
+			cell-id = <MSM_BUS_MASTER_CNOC_MNOC_CFG>;
+			label = "mas-cnoc-mnoc-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,connections = <&slv_srvc_mnoc>;
+			qcom,bus-dev = <&fab_mnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_CNOC_MNOC_CFG>;
+		};
+
+		mas_cpp: mas-cpp {
+			cell-id = <MSM_BUS_MASTER_CPP>;
+			label = "mas-cpp";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <5>;
+			qcom,qos-mode = "bypass";
+			qcom,connections = <&slv_mnoc_bimc>;
+			qcom,bus-dev = <&fab_mnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_CPP>;
+		};
+
+		mas_jpeg: mas-jpeg {
+			cell-id = <MSM_BUS_MASTER_JPEG>;
+			label = "mas-jpeg";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <7>;
+			qcom,qos-mode = "bypass";
+			qcom,connections = <&slv_mnoc_bimc>;
+			qcom,bus-dev = <&fab_mnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_JPEG>;
+		};
+
+		mas_mdp_p0: mas-mdp-p0 {
+			cell-id = <MSM_BUS_MASTER_MDP_PORT0>;
+			label = "mas-mdp-p0";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <1>;
+			qcom,qos-mode = "bypass";
+			qcom,connections = <&slv_mnoc_bimc>;
+			qcom,bus-dev = <&fab_mnoc>;
+			qcom,vrail-comp = <25>;
+			qcom,mas-rpm-id = <ICBID_MASTER_MDP0>;
+			clk-mdss-axi-no-rate-supply =
+					<&gdsc_mdss>;
+			clk-mdss-ahb-no-rate-supply =
+					<&gdsc_mdss>;
+			qcom,node-qos-clks {
+				clock-names =
+				"clk-mdss-ahb-no-rate",
+				"clk-mdss-axi-no-rate";
+				clocks =
+				<&clock_mmss clk_mmss_mdss_ahb_clk>,
+				<&clock_mmss clk_mmss_mdss_axi_clk>;
+			};
+
+		};
+
+		mas_mdp_p1: mas-mdp-p1 {
+			cell-id = <MSM_BUS_MASTER_MDP_PORT1>;
+			label = "mas-mdp-p1";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <2>;
+			qcom,qos-mode = "bypass";
+			qcom,connections = <&slv_mnoc_bimc>;
+			qcom,bus-dev = <&fab_mnoc>;
+			qcom,vrail-comp = <25>;
+			qcom,mas-rpm-id = <ICBID_MASTER_MDP1>;
+		};
+
+		mas_rotator: mas-rotator {
+			cell-id = <MSM_BUS_MASTER_ROTATOR>;
+			label = "mas-rotator";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <0>;
+			qcom,qos-mode = "bypass";
+			qcom,connections = <&slv_mnoc_bimc>;
+			qcom,bus-dev = <&fab_mnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_ROTATOR>;
+		};
+
+		mas_venus: mas-venus {
+			cell-id = <MSM_BUS_MASTER_VIDEO_P0>;
+			label = "mas-venus";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <2>;
+			qcom,ap-owned;
+			qcom,qport = <3 4>;
+			qcom,qos-mode = "bypass";
+			qcom,connections = <&slv_mnoc_bimc>;
+			qcom,bus-dev = <&fab_mnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_VIDEO>;
+		};
+
+		mas_vfe: mas-vfe {
+			cell-id = <MSM_BUS_MASTER_VFE>;
+			label = "mas-vfe";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <6>;
+			qcom,qos-mode = "bypass";
+			qcom,connections = <&slv_mnoc_bimc>;
+			qcom,bus-dev = <&fab_mnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_VFE>;
+		};
+
+		mas_venus_vmem: mas-venus-vmem {
+			cell-id = <MSM_BUS_MASTER_VIDEO_P0_OCMEM>;
+			label = "mas-venus-vmem";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,connections = <&slv_vmem>;
+			qcom,bus-dev = <&fab_mnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_VENUS_VMEM>;
+		};
+
+		mas_hmss: mas-hmss {
+			cell-id = <MSM_BUS_MASTER_HMSS>;
+			label = "mas-hmss";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <3>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = < &slv_pimem &slv_imem
+				&slv_snoc_bimc>;
+			qcom,prio1 = <1>;
+			qcom,prio0 = <1>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_HMSS>;
+		};
+
+		mas_qdss_bam: mas-qdss-bam {
+			cell-id = <MSM_BUS_MASTER_QDSS_BAM>;
+			label = "mas-qdss-bam";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <1>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = < &slv_imem &slv_pimem &slv_snoc_cnoc
+				&slv_snoc_bimc>;
+			qcom,prio1 = <1>;
+			qcom,prio0 = <1>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_QDSS_BAM>;
+		};
+
+		mas_snoc_cfg: mas-snoc-cfg {
+			cell-id = <MSM_BUS_MASTER_SNOC_CFG>;
+			label = "mas-snoc-cfg";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_srvc_snoc>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_SNOC_CFG>;
+		};
+
+		mas_bimc_snoc_0: mas-bimc-snoc-0 {
+			cell-id = <MSM_BUS_BIMC_SNOC_MAS>;
+			label = "mas-bimc-snoc-0";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,connections = < &slv_pimem &slv_lpass&slv_hmss
+				 &slv_wlan &slv_snoc_cnoc
+				 &slv_imem &slv_qdss_stm>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_BIMC_SNOC_0>;
+		};
+
+		mas_bimc_snoc_1: mas-bimc-snoc-1 {
+			cell-id = <MSM_BUS_BIMC_SNOC_1_MAS>;
+			label = "mas-bimc-snoc-1";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,connections = <&slv_pcie_0>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_BIMC_SNOC_1>;
+		};
+
+		mas_a1noc_snoc: mas-a1noc-snoc {
+			cell-id = <MSM_BUS_A1NOC_SNOC_MAS>;
+			label = "mas-a1noc-snoc";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,connections = < &slv_pimem &slv_pcie_0 &slv_lpass
+				&slv_hmss &slv_snoc_bimc
+				 &slv_snoc_cnoc &slv_imem
+				 &slv_qdss_stm>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_A1NOC_SNOC>;
+		};
+
+		mas_a2noc_snoc: mas-a2noc-snoc {
+			cell-id = <MSM_BUS_A2NOC_SNOC_MAS>;
+			label = "mas-a2noc-snoc";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,connections = < &slv_pimem &slv_pcie_0 &slv_lpass
+				&slv_hmss &slv_snoc_bimc
+				 &slv_wlan &slv_snoc_cnoc
+				 &slv_imem &slv_qdss_stm>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_A2NOC_SNOC>;
+		};
+
+		mas_qdss_etr: mas-qdss-etr {
+			cell-id = <MSM_BUS_MASTER_QDSS_ETR>;
+			label = "mas-qdss-etr";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <2>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = < &slv_imem &slv_pimem &slv_snoc_cnoc
+				&slv_snoc_bimc>;
+			qcom,prio1 = <1>;
+			qcom,prio0 = <1>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_QDSS_ETR>;
+		};
+
+		/*Internal nodes*/
+
+		/*Slaves*/
+
+		slv_a1noc_snoc:slv-a1noc-snoc {
+			cell-id = <MSM_BUS_A1NOC_SNOC_SLV>;
+			label = "slv-a1noc-snoc";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_a1noc>;
+			qcom,connections = <&mas_a1noc_snoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_A1NOC_SNOC>;
+		};
+
+		slv_a2noc_snoc:slv-a2noc-snoc {
+			cell-id = <MSM_BUS_A2NOC_SNOC_SLV>;
+			label = "slv-a2noc-snoc";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_a2noc>;
+			qcom,connections = <&mas_a2noc_snoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_A2NOC_SNOC>;
+		};
+
+		slv_ebi:slv-ebi {
+			cell-id = <MSM_BUS_SLAVE_EBI_CH0>;
+			label = "slv-ebi";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <2>;
+			qcom,bus-dev = <&fab_bimc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_EBI1>;
+		};
+
+		slv_hmss_l3:slv-hmss-l3 {
+			cell-id = <MSM_BUS_SLAVE_HMSS_L3>;
+			label = "slv-hmss-l3";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_bimc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_HMSS_L3>;
+		};
+
+		slv_bimc_snoc_0:slv-bimc-snoc-0 {
+			cell-id = <MSM_BUS_BIMC_SNOC_SLV>;
+			label = "slv-bimc-snoc-0";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_bimc>;
+			qcom,connections = <&mas_bimc_snoc_0>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_BIMC_SNOC_0>;
+		};
+
+		slv_bimc_snoc_1:slv-bimc-snoc-1 {
+			cell-id = <MSM_BUS_BIMC_SNOC_1_SLV>;
+			label = "slv-bimc-snoc-1";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_bimc>;
+			qcom,connections = <&mas_bimc_snoc_1>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_BIMC_SNOC_1>;
+		};
+
+		slv_cnoc_a2noc:slv-cnoc-a2noc {
+			cell-id = <MSM_BUS_CNOC_SNOC_SLV>;
+			label = "slv-cnoc-a2noc";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,connections = <&mas_cnoc_a2noc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_CNOC_A2NOC>;
+		};
+
+		slv_ssc_cfg:slv-ssc-cfg {
+			cell-id = <MSM_BUS_SLAVE_SSC_CFG>;
+			label = "slv-ssc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SSC_CFG>;
+		};
+
+		slv_mpm:slv-mpm {
+			cell-id = <MSM_BUS_SLAVE_MPM>;
+			label = "slv-mpm";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_MPM>;
+		};
+
+		slv_pmic_arb:slv-pmic-arb {
+			cell-id = <MSM_BUS_SLAVE_PMIC_ARB>;
+			label = "slv-pmic-arb";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PMIC_ARB>;
+		};
+		slv_tlmm_north:slv-tlmm-north {
+			cell-id = <MSM_BUS_SLAVE_TLMM_NORTH>;
+			label = "slv-tlmm-north";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_TLMM_NORTH>;
+		};
+		slv_pimem_cfg:slv-pimem-cfg {
+			cell-id = <MSM_BUS_SLAVE_PIMEM_CFG>;
+			label = "slv-pimem-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PIMEM_CFG>;
+		};
+
+		slv_imem_cfg:slv-imem-cfg {
+			cell-id = <MSM_BUS_SLAVE_IMEM_CFG>;
+			label = "slv-imem-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_IMEM_CFG>;
+		};
+
+		slv_message_ram:slv-message-ram {
+			cell-id = <MSM_BUS_SLAVE_MESSAGE_RAM>;
+			label = "slv-message-ram";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_MESSAGE_RAM>;
+		};
+
+		slv_skl:slv-skl {
+			cell-id = <MSM_BUS_SLAVE_SKL>;
+			label = "slv-skl";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SKL>;
+		};
+
+		slv_bimc_cfg:slv-bimc-cfg {
+			cell-id = <MSM_BUS_SLAVE_BIMC_CFG>;
+			label = "slv-bimc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_BIMC_CFG>;
+		};
+
+		slv_prng:slv-prng {
+			cell-id = <MSM_BUS_SLAVE_PRNG>;
+			label = "slv-prng";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PRNG>;
+		};
+
+		slv_a2noc_cfg:slv-a2noc-cfg {
+			cell-id = <MSM_BUS_SLAVE_A2NOC_CFG>;
+			label = "slv-a2noc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_A2NOC_CFG>;
+		};
+
+		slv_ipa:slv-ipa {
+			cell-id = <MSM_BUS_SLAVE_IPA_CFG>;
+			label = "slv-ipa";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_IPA_CFG>;
+		};
+
+		slv_tcsr:slv-tcsr {
+			cell-id = <MSM_BUS_SLAVE_TCSR>;
+			label = "slv-tcsr";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_TCSR>;
+		};
+
+		slv_snoc_cfg:slv-snoc-cfg {
+			cell-id = <MSM_BUS_SLAVE_SNOC_CFG>;
+			label = "slv-snoc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SNOC_CFG>;
+		};
+
+		slv_clk_ctl:slv-clk-ctl {
+			cell-id = <MSM_BUS_SLAVE_CLK_CTL>;
+			label = "slv-clk-ctl";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_CLK_CTL>;
+		};
+
+		slv_glm:slv-glm {
+			cell-id = <MSM_BUS_SLAVE_GLM>;
+			label = "slv-glm";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_GLM>;
+		};
+
+		slv_spdm:slv-spdm {
+			cell-id = <MSM_BUS_SLAVE_SPDM_WRAPPER>;
+			label = "slv-spdm";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SPDM_WRAPPER>;
+		};
+
+		slv_gpuss_cfg:slv-gpuss-cfg {
+			cell-id = <MSM_BUS_SLAVE_GRAPHICS_3D_CFG>;
+			label = "slv-gpuss-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_GFX3D_CFG>;
+		};
+
+		slv_cnoc_mnoc_cfg:slv-cnoc-mnoc-cfg {
+			cell-id = <MSM_BUS_SLAVE_CNOC_MNOC_CFG>;
+			label = "slv-cnoc-mnoc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,connections = <&mas_cnoc_mnoc_cfg>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_CNOC_MNOC_CFG>;
+		};
+
+		slv_qm_cfg:slv-qm-cfg {
+			cell-id = <MSM_BUS_SLAVE_QM_CFG>;
+			label = "slv-qm-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_QM_CFG>;
+		};
+
+		slv_mss_cfg:slv-mss-cfg {
+			cell-id = <MSM_BUS_SLAVE_CNOC_MSS>;
+			label = "slv-mss-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_CNOC_MSS>;
+		};
+
+		slv_ufs_cfg:slv-ufs-cfg {
+			cell-id = <MSM_BUS_SLAVE_UFS_CFG>;
+			label = "slv-ufs-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_UFS_CFG>;
+		};
+
+		slv_tlmm_west:slv-tlmm-west {
+			cell-id = <MSM_BUS_SLAVE_TLMM_WEST>;
+			label = "slv-tlmm-west";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_TLMM_WEST>;
+		};
+
+		slv_a1noc_cfg:slv-a1noc-cfg {
+			cell-id = <MSM_BUS_SLAVE_A1NOC_CFG>;
+			label = "slv-a1noc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_A1NOC_CFG>;
+		};
+
+		slv_ahb2phy:slv-ahb2phy {
+			cell-id = <MSM_BUS_SLAVE_PCIE20_AHB2PHY>;
+			label = "slv-ahb2phy";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PCIE20_AHB2PHY>;
+		};
+
+		slv_blsp_2:slv-blsp-2 {
+			cell-id = <MSM_BUS_SLAVE_BLSP_2>;
+			label = "slv-blsp-2";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_BLSP_2>;
+		};
+
+		slv_pdm:slv-pdm {
+			cell-id = <MSM_BUS_SLAVE_PDM>;
+			label = "slv-pdm";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PDM>;
+		};
+
+		slv_usb3_0:slv-usb3-0 {
+			cell-id = <MSM_BUS_SLAVE_USB3>;
+			label = "slv-usb3-0";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_USB3_0>;
+		};
+
+		slv_a1noc_smmu_cfg:slv-a1noc-smmu-cfg {
+			cell-id = <MSM_BUS_SLAVE_A1NOC_SMMU_CFG>;
+			label = "slv-a1noc-smmu-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_A1NOC_SMMU_CFG>;
+		};
+
+		slv_blsp_1:slv-blsp-1 {
+			cell-id = <MSM_BUS_SLAVE_BLSP_1>;
+			label = "slv-blsp-1";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_BLSP_1>;
+		};
+
+		slv_sdcc_2:slv-sdcc-2 {
+			cell-id = <MSM_BUS_SLAVE_SDCC_2>;
+			label = "slv-sdcc-2";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SDCC_2>;
+		};
+
+		slv_sdcc_4:slv-sdcc-4 {
+			cell-id = <MSM_BUS_SLAVE_SDCC_4>;
+			label = "slv-sdcc-4";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SDCC_4>;
+		};
+
+		slv_tsif:slv-tsif {
+			cell-id = <MSM_BUS_SLAVE_TSIF>;
+			label = "slv-tsif";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_TSIF>;
+		};
+
+		slv_qdss_cfg:slv-qdss-cfg {
+			cell-id = <MSM_BUS_SLAVE_QDSS_CFG>;
+			label = "slv-qdss-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_QDSS_CFG>;
+		};
+
+		slv_tlmm_east:slv-tlmm-east {
+			cell-id = <MSM_BUS_SLAVE_TLMM_EAST>;
+			label = "slv-tlmm-east";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_TLMM_EAST>;
+		};
+
+		slv_cnoc_mnoc_mmss_cfg:slv-cnoc-mnoc-mmss-cfg {
+			cell-id = <MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG>;
+			label = "slv-cnoc-mnoc-mmss-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,connections = <&mas_cnoc_mnoc_mmss_cfg>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_CNOC_MNOC_MMSS_CFG>;
+		};
+
+		slv_srvc_cnoc:slv-srvc-cnoc {
+			cell-id = <MSM_BUS_SLAVE_SERVICE_CNOC>;
+			label = "slv-srvc-cnoc";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SERVICE_CNOC>;
+		};
+
+		slv_cr_virt_a2noc:slv-cr-virt-a2noc {
+			cell-id = <MSM_BUS_SLAVE_CRVIRT_A2NOC>;
+			label = "slv-cr-virt-a2noc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_a2noc>;
+			qcom,connections = <&mas_cr_virt_a2noc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_CRVIRT_A2NOC>;
+		};
+
+		slv_gnoc_bimc:slv-gnoc-bimc {
+			cell-id = <MSM_BUS_SLAVE_GNOC_BIMC>;
+			label = "slv-gnoc-bimc";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_gnoc>;
+			qcom,connections = <&mas_gnoc_bimc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_GNOC_BIMC>;
+		};
+
+		slv_camera_cfg:slv-camera-cfg {
+			cell-id = <MSM_BUS_SLAVE_CAMERA_CFG>;
+			label = "slv-camera-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_mnoc_ahb>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_CAMERA_CFG>;
+		};
+
+		slv_camera_throttle_cfg:slv-camera-throttle-cfg {
+			cell-id = <MSM_BUS_SLAVE_CAMERA_THROTTLE_CFG>;
+			label = "slv-camera-throttle-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_mnoc_ahb>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_CAMERA_THROTTLE_CFG>;
+		};
+
+		slv_misc_cfg:slv-misc-cfg {
+			cell-id = <MSM_BUS_SLAVE_MISC_CFG>;
+			label = "slv-misc-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_mnoc_ahb>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_MISC_CFG>;
+		};
+
+		slv_venus_throttle_cfg:slv-venus-throttle-cfg {
+			cell-id = <MSM_BUS_SLAVE_VENUS_THROTTLE_CFG>;
+			label = "slv-venus-throttle-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_mnoc_ahb>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_VENUS_THROTTLE_CFG>;
+		};
+
+		slv_venus_cfg:slv-venus-cfg {
+			cell-id = <MSM_BUS_SLAVE_VENUS_CFG>;
+			label = "slv-venus-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_mnoc_ahb>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_VENUS_CFG>;
+		};
+
+		slv_vmem_cfg:slv-vmem-cfg {
+			cell-id = <MSM_BUS_SLAVE_VMEM_CFG>;
+			label = "slv-vmem-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_mnoc_ahb>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_VMEM_CFG>;
+			qcom,enable-only-clk;
+			clock-names = "node_clk";
+			clocks = <&clock_mmss clk_mmss_mnoc_maxi_clk>;
+		};
+
+		slv_mmss_clk_xpu_cfg:slv-mmss-clk-xpu-cfg {
+			cell-id = <MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG>;
+			label = "slv-mmss-clk-xpu-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_mnoc_ahb>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_MMSS_CLK_XPU_CFG>;
+		};
+
+		slv_mmss_clk_cfg:slv-mmss-clk-cfg {
+			cell-id = <MSM_BUS_SLAVE_MMSS_CLK_CFG>;
+			label = "slv-mmss-clk-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_mnoc_ahb>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_MMSS_CLK_CFG>;
+		};
+
+		slv_display_cfg:slv-display-cfg {
+			cell-id = <MSM_BUS_SLAVE_DISPLAY_CFG>;
+			label = "slv-display-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_mnoc_ahb>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_DISPLAY_CFG>;
+		};
+
+		slv_display_throttle_cfg:slv-display-throttle-cfg {
+			cell-id = <MSM_BUS_SLAVE_DISPLAY_THROTTLE_CFG>;
+			label = "slv-display-throttle-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_mnoc_ahb>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_DISPLAY_THROTTLE_CFG>;
+		};
+
+		slv_smmu_cfg:slv-smmu-cfg {
+			cell-id = <MSM_BUS_SLAVE_MMSS_SMMU_CFG>;
+			label = "slv-smmu-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_mnoc_ahb>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_MMSS_SMMU_CFG>;
+		};
+
+		slv_mnoc_bimc:slv-mnoc-bimc {
+			cell-id = <MSM_BUS_MNOC_BIMC_SLV>;
+			label = "slv-mnoc-bimc";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <2>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_mnoc>;
+			qcom,connections = <&mas_mnoc_bimc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_MNOC_BIMC>;
+			qcom,enable-only-clk;
+			clock-names = "node_clk";
+			clocks = <&clock_gcc clk_mmssnoc_axi_clk>;
+		};
+
+		slv_vmem: slv-vmem {
+			cell-id = <MSM_BUS_SLAVE_VMEM>;
+			label = "slv-vmem";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_mnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_VMEM>;
+			clock-names = "node_clk";
+			clocks = <&clock_mmss clk_mmss_mnoc_maxi_clk>;
+		};
+
+		slv_srvc_mnoc:slv-srvc-mnoc {
+			cell-id = <MSM_BUS_SLAVE_SERVICE_MNOC>;
+			label = "slv-srvc-mnoc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_mnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SERVICE_MNOC>;
+		};
+
+		slv_hmss:slv-hmss {
+			cell-id = <MSM_BUS_SLAVE_APPSS>;
+			label = "slv-hmss";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_APPSS>;
+		};
+
+		slv_lpass:slv-lpass {
+			cell-id = <MSM_BUS_SLAVE_LPASS>;
+			label = "slv-lpass";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_LPASS>;
+		};
+
+		slv_wlan:slv-wlan {
+			cell-id = <MSM_BUS_SLAVE_WLAN>;
+			label = "slv-wlan";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_WLAN>;
+		};
+
+		slv_snoc_bimc:slv-snoc-bimc {
+			cell-id = <MSM_BUS_SNOC_BIMC_SLV>;
+			label = "slv-snoc-bimc";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <2>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,connections = <&mas_snoc_bimc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SNOC_BIMC>;
+		};
+
+		slv_snoc_cnoc:slv-snoc-cnoc {
+			cell-id = <MSM_BUS_SNOC_CNOC_SLV>;
+			label = "slv-snoc-cnoc";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,connections = <&mas_snoc_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SNOC_CNOC>;
+		};
+
+		slv_imem:slv-imem {
+			cell-id = <MSM_BUS_SLAVE_OCIMEM>;
+			label = "slv-imem";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_IMEM>;
+		};
+
+		slv_pimem:slv-pimem {
+			cell-id = <MSM_BUS_SLAVE_PIMEM>;
+			label = "slv-pimem";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PIMEM>;
+		};
+
+		slv_qdss_stm:slv-qdss-stm {
+			cell-id = <MSM_BUS_SLAVE_QDSS_STM>;
+			label = "slv-qdss-stm";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_QDSS_STM>;
+		};
+
+		slv_pcie_0:slv-pcie-0 {
+			cell-id = <MSM_BUS_SLAVE_PCIE_0>;
+			label = "slv-pcie-0";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PCIE_0>;
+		};
+
+		slv_srvc_snoc:slv-srvc-snoc {
+			cell-id = <MSM_BUS_SLAVE_SERVICE_SNOC>;
+			label = "slv-srvc-snoc";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SERVICE_SNOC>;
+		};
+	};
+
+	devfreq_spdm_cpu {
+		compatible = "qcom,devfreq_spdm";
+		qcom,msm-bus,name = "devfreq_spdm";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<1 512 0 0>,
+				<1 512 0 0>;
+		qcom,msm-bus,active-only;
+		qcom,spdm-client = <0>;
+
+		qcom,bw-upstep = <1000>;
+		qcom,bw-dwnstep = <1000>;
+		qcom,max-vote = <10000>;
+		qcom,up-step-multp = <2>;
+		qcom,spdm-interval = <100>;
+
+		qcom,ports = <24>;
+		qcom,alpha-up = <12>;
+		qcom,alpha-down = <15>;
+		qcom,bucket-size = <8>;
+
+		/*max pl1 freq, max pl2 freq*/
+		qcom,pl-freqs = <260000 770000>;
+
+		/* pl1 low, pl1 high, pl2 low, pl2 high, pl3 low, pl3 high */
+		qcom,reject-rate = <5000 5000 5000 5000 5000 5000>;
+		/* pl1 low, pl1 high, pl2 low, pl2 high, pl3 low, pl3 high */
+		qcom,response-time-us = <10000 10000 10000 10000 10000 10000>;
+		/* pl1 low, pl1 high, pl2 low, pl2 high, pl3 low, pl3 high */
+		qcom,cci-response-time-us = <10000 10000 10000
+						10000 10000 10000>;
+		qcom,max-cci-freq = <1036800>;
+	};
+
+	devfreq_spdm_gov {
+		compatible = "qcom,gov_spdm_hyp";
+		interrupt-names = "spdm-irq";
+		interrupts = <0 192 IRQ_TYPE_EDGE_RISING>;
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-camera.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-camera.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-camera.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-camera.dtsi	2019-01-22 16:16:21.195225506 +0100
@@ -0,0 +1,909 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	qcom,msm-cam@8c0000 {
+		compatible = "qcom,msm-cam";
+		reg = <0x8c0000 0x40000>;
+		reg-names = "msm-cam";
+		status = "ok";
+		bus-vectors = "suspend", "svs", "nominal", "turbo";
+		qcom,bus-votes = <0 300000000 640000000 640000000>;
+	};
+
+	qcom,csiphy@ca34000 {
+		cell-index = <0>;
+		compatible = "qcom,csiphy-v5.0", "qcom,csiphy";
+		reg = <0xca34000 0x1000>;
+		reg-names = "csiphy";
+		interrupts = <0 78 0>;
+		interrupt-names = "csiphy";
+		gdscr-supply = <&gdsc_camss_top>;
+		bimc_smmu-supply = <&gdsc_bimc_smmu>;
+		qcom,cam-vreg-name = "gdscr", "bimc_smmu";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_csi0_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi0_clk>,
+			<&clock_mmss clk_mmss_camss_cphy_csid0_clk>,
+			<&clock_mmss clk_csi0phytimer_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi0phytimer_clk>,
+			<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
+			<&clock_mmss clk_csiphy_clk_src>,
+			<&clock_mmss clk_mmss_camss_csiphy0_clk>;
+		clock-names = "mmssnoc_axi", "mnoc_ahb",
+			"bmic_smmu_ahb", "bmic_smmu_axi",
+			"camss_ahb_clk", "camss_top_ahb_clk",
+			"csi_src_clk", "csi_clk", "cphy_csid_clk",
+			"csiphy_timer_src_clk", "csiphy_timer_clk",
+			"camss_ispif_ahb_clk", "csiphy_clk_src", "csiphy_clk";
+		qcom,clock-rates = <0 0 0 0 0 0 274290000 0 0 200000000 0
+			0 274290000 0>;
+		status = "ok";
+	};
+
+	qcom,csiphy@ca35000 {
+		cell-index = <1>;
+		compatible = "qcom,csiphy-v5.0", "qcom,csiphy";
+		reg = <0xca35000 0x1000>;
+		reg-names = "csiphy";
+		interrupts = <0 79 0>;
+		interrupt-names = "csiphy";
+		gdscr-supply = <&gdsc_camss_top>;
+		bimc_smmu-supply = <&gdsc_bimc_smmu>;
+		qcom,cam-vreg-name = "gdscr", "bimc_smmu";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_csi1_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi1_clk>,
+			<&clock_mmss clk_mmss_camss_cphy_csid1_clk>,
+			<&clock_mmss clk_csi1phytimer_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi1phytimer_clk>,
+			<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
+			<&clock_mmss clk_csiphy_clk_src>,
+			<&clock_mmss clk_mmss_camss_csiphy1_clk>;
+		clock-names = "mmssnoc_axi", "mnoc_ahb",
+			"bmic_smmu_ahb", "bmic_smmu_axi",
+			"camss_ahb_clk", "camss_top_ahb_clk",
+			"csi_src_clk", "csi_clk", "cphy_csid_clk",
+			"csiphy_timer_src_clk", "csiphy_timer_clk",
+			"camss_ispif_ahb_clk", "csiphy_clk_src", "csiphy_clk";
+		qcom,clock-rates = <0 0 0 0 0 0 274290000 0 0 200000000 0
+			0 274290000 0>;
+		status = "ok";
+	};
+
+	qcom,csiphy@ca36000 {
+		cell-index = <2>;
+		compatible = "qcom,csiphy-v5.0", "qcom,csiphy";
+		reg = <0xca36000 0x1000>;
+		reg-names = "csiphy";
+		interrupts = <0 80 0>;
+		interrupt-names = "csiphy";
+		gdscr-supply = <&gdsc_camss_top>;
+		bimc_smmu-supply = <&gdsc_bimc_smmu>;
+		qcom,cam-vreg-name = "gdscr", "bimc_smmu";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_csi2_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi2_clk>,
+			<&clock_mmss clk_mmss_camss_cphy_csid2_clk>,
+			<&clock_mmss clk_csi2phytimer_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi2phytimer_clk>,
+			<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
+			<&clock_mmss clk_csiphy_clk_src>,
+			<&clock_mmss clk_mmss_camss_csiphy2_clk>;
+		clock-names = "mmssnoc_axi", "mnoc_ahb",
+			"bmic_smmu_ahb", "bmic_smmu_axi",
+			"camss_ahb_clk", "camss_top_ahb_clk",
+			"csi_src_clk", "csi_clk", "cphy_csid_clk",
+			"csiphy_timer_src_clk", "csiphy_timer_clk",
+			"camss_ispif_ahb_clk", "csiphy_clk_src", "csiphy_clk";
+		qcom,clock-rates = <0 0 0 0 0 0 274290000 0 0 200000000 0
+			0 274290000 0>;
+		status = "ok";
+	};
+
+	qcom,csid@ca30000  {
+		cell-index = <0>;
+		compatible = "qcom,csid-v5.0", "qcom,csid";
+		reg = <0xca30000 0x400>;
+		reg-names = "csid";
+		interrupts = <0 296 0>;
+		interrupt-names = "csid";
+		qcom,csi-vdd-voltage = <1200000>;
+		qcom,mipi-csi-vdd-supply = <&pm8998_l2>;
+		gdscr-supply = <&gdsc_camss_top>;
+		vdd_sec-supply = <&pm8998_l1>;
+		bimc_smmu-supply = <&gdsc_bimc_smmu>;
+		qcom,cam-vreg-name = "vdd_sec", "gdscr", "bimc_smmu";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
+			<&clock_mmss clk_csi0_clk_src>,
+			<&clock_mmss clk_csiphy_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi0_clk>,
+			<&clock_mmss clk_mmss_camss_csi0_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_csi0rdi_clk>,
+			<&clock_mmss clk_mmss_camss_csi0pix_clk>,
+			<&clock_mmss clk_mmss_camss_cphy_csid0_clk>;
+		clock-names = "mmssnoc_axi", "mnoc_ahb",
+			"bmic_smmu_ahb", "bmic_smmu_axi",
+			"camss_ahb_clk", "camss_top_ahb_clk",
+			"ispif_ahb_clk", "csi_src_clk", "csiphy_clk_src",
+			"csi_clk", "csi_ahb_clk", "csi_rdi_clk",
+			"csi_pix_clk", "cphy_csid_clk";
+		qcom,clock-rates = <0 0 0 0 0 0 0 274290000 274290000
+			0 0 0 0 0>;
+		status = "ok";
+	};
+
+	qcom,csid@ca30400 {
+		cell-index = <1>;
+		compatible = "qcom,csid-v5.0", "qcom,csid";
+		reg = <0xca30400 0x400>;
+		reg-names = "csid";
+		interrupts = <0 297 0>;
+		interrupt-names = "csid";
+		qcom,csi-vdd-voltage = <1200000>;
+		qcom,mipi-csi-vdd-supply = <&pm8998_l2>;
+		gdscr-supply = <&gdsc_camss_top>;
+		vdd_sec-supply = <&pm8998_l1>;
+		bimc_smmu-supply = <&gdsc_bimc_smmu>;
+		qcom,cam-vreg-name = "vdd_sec", "gdscr", "bimc_smmu";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
+			<&clock_mmss clk_csi1_clk_src>,
+			<&clock_mmss clk_csiphy_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi1_clk>,
+			<&clock_mmss clk_mmss_camss_csi1_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_csi1rdi_clk>,
+			<&clock_mmss clk_mmss_camss_csi1pix_clk>,
+			<&clock_mmss clk_mmss_camss_cphy_csid1_clk>;
+		clock-names = "mmssnoc_axi", "mnoc_ahb",
+			"bmic_smmu_ahb", "bmic_smmu_axi",
+			"camss_ahb_clk", "camss_top_ahb_clk",
+			"ispif_ahb_clk", "csi_src_clk", "csiphy_clk_src",
+			"csi_clk", "csi_ahb_clk", "csi_rdi_clk",
+			"csi_pix_clk", "cphy_csid_clk";
+		qcom,clock-rates = <0 0 0 0 0 0 0 274290000 274290000
+			 0 0 0 0 0>;
+		status = "ok";
+	};
+
+	qcom,csid@ca30800 {
+		cell-index = <2>;
+		compatible = "qcom,csid-v5.0", "qcom,csid";
+		reg = <0xca30800 0x400>;
+		reg-names = "csid";
+		interrupts = <0 298 0>;
+		interrupt-names = "csid";
+		qcom,csi-vdd-voltage = <1200000>;
+		qcom,mipi-csi-vdd-supply = <&pm8998_l2>;
+		gdscr-supply = <&gdsc_camss_top>;
+		vdd_sec-supply = <&pm8998_l1>;
+		bimc_smmu-supply = <&gdsc_bimc_smmu>;
+		qcom,cam-vreg-name = "vdd_sec", "gdscr", "bimc_smmu";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
+			<&clock_mmss clk_csi2_clk_src>,
+			<&clock_mmss clk_csiphy_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi2_clk>,
+			<&clock_mmss clk_mmss_camss_csi2_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_csi2rdi_clk>,
+			<&clock_mmss clk_mmss_camss_csi2pix_clk>,
+			<&clock_mmss clk_mmss_camss_cphy_csid2_clk>;
+		clock-names = "mmssnoc_axi", "mnoc_ahb",
+			"bmic_smmu_ahb", "bmic_smmu_axi",
+			"camss_ahb_clk", "camss_top_ahb_clk",
+			"ispif_ahb_clk", "csi_src_clk", "csiphy_clk_src",
+			"csi_clk", "csi_ahb_clk", "csi_rdi_clk",
+			"csi_pix_clk", "cphy_csid_clk";
+		qcom,clock-rates = <0 0 0 0 0 0 0 274290000 274290000
+			 0 0 0 0 0>;
+		status = "ok";
+	};
+
+	qcom,csid@ca30c00 {
+		cell-index = <3>;
+		compatible = "qcom,csid-v5.0", "qcom,csid";
+		reg = <0xca30c00 0x400>;
+		reg-names = "csid";
+		interrupts = <0 299 0>;
+		interrupt-names = "csid";
+		qcom,csi-vdd-voltage = <1200000>;
+		qcom,mipi-csi-vdd-supply = <&pm8998_l2>;
+		gdscr-supply = <&gdsc_camss_top>;
+		vdd_sec-supply = <&pm8998_l1>;
+		bimc_smmu-supply = <&gdsc_bimc_smmu>;
+		qcom,cam-vreg-name = "vdd_sec", "gdscr", "bimc_smmu";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
+			<&clock_mmss clk_csi3_clk_src>,
+			<&clock_mmss clk_csiphy_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi3_clk>,
+			<&clock_mmss clk_mmss_camss_csi3_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_csi3rdi_clk>,
+			<&clock_mmss clk_mmss_camss_csi3pix_clk>,
+			<&clock_mmss clk_mmss_camss_cphy_csid3_clk>;
+		clock-names = "mmssnoc_axi", "mnoc_ahb",
+			"bmic_smmu_ahb", "bmic_smmu_axi",
+			"camss_ahb_clk", "camss_top_ahb_clk",
+			"ispif_ahb_clk", "csi_src_clk", "csiphy_clk_src",
+			"csi_clk", "csi_ahb_clk", "csi_rdi_clk",
+			"csi_pix_clk", "cphy_csid_clk";
+		qcom,clock-rates = <0 0 0 0 0 0 0 274290000 274290000
+			 0 0 0 0 0>;
+		status = "ok";
+	};
+
+	qcom,cam_smmu {
+		compatible = "qcom,msm-cam-smmu";
+		status = "ok";
+
+		msm_cam_smmu_cb1 {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&mmss_smmu 0xc00>,
+					<&mmss_smmu 0xc01>,
+					<&mmss_smmu 0xc02>,
+					<&mmss_smmu 0xc03>;
+			label = "vfe";
+			qcom,scratch-buf-support;
+		};
+
+		msm_cam_smmu_cb2 {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&mmss_smmu 0xa00>;
+			label = "cpp";
+		};
+
+		msm_cam_smmu_cb3 {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&mmss_smmu 0xa01>;
+			label = "camera_fd";
+		};
+
+		msm_cam_smmu_cb4 {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&mmss_smmu 0x800>;
+			label = "jpeg_enc0";
+		};
+
+		msm_cam_smmu_cb5 {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&mmss_smmu 0x801>;
+			label = "jpeg_dma";
+		};
+	};
+
+	qcom,fd@caa4000 {
+		cell-index = <0>;
+		compatible = "qcom,face-detection";
+		reg = <0xcaa4000 0x800>,
+			<0xcaa5000 0x400>,
+			<0xca80000 0x3000>;
+		reg-names = "fd_core", "fd_misc", "fd_vbif";
+		interrupts = <0 293 0>;
+		interrupt-names = "fd";
+		smmu-vdd-supply = <&gdsc_bimc_smmu>;
+		camss-vdd-supply = <&gdsc_camss_top>;
+		vdd-supply = <&gdsc_cpp>;
+		qcom,vdd-names = "smmu-vdd", "camss-vdd", "vdd";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_mmss_fd_core_clk>,
+			<&clock_mmss clk_mmss_fd_core_uar_clk>,
+			<&clock_mmss clk_mmss_fd_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_cpp_axi_clk>,
+			<&clock_mmss clk_mmss_camss_cpp_vbif_ahb_clk>;
+		clock-names = "mmssnoc_axi",
+			"mmss_mnoc_ahb_clk",
+			"mmss_bimc_smmu_ahb_clk",
+			"mmss_bimc_smmu_axi_clk",
+			"mmss_camss_ahb_clk",
+			"mmss_camss_top_ahb_clk",
+			"mmss_fd_core_clk",
+			"mmss_fd_core_uar_clk",
+			"mmss_fd_ahb_clk",
+			"mmss_camss_cpp_axi_clk",
+			"mmss_camss_cpp_vbif_ahb_clk";
+		qcom,clock-rates =
+			<0 0 0 0 0 0 404000000 0 0 0 0>,
+			<0 0 0 0 0 0 100000000 0 0 0 0>,
+			<0 0 0 0 0 0 404000000 0 0 0 0>,
+			<0 0 0 0 0 0 404000000 0 0 0 0>;
+		qcom,msm-bus,name = "msm_camera_fd";
+		qcom,msm-bus,num-cases = <4>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps = <106 512 0 0>,
+			<106 512 1625 0>,
+			<106 512 2995 0>,
+			<106 512 7200 0>;
+		qcom,fd-vbif-reg-settings = <0x20 0x10000000 0x30000000>,
+			<0x24 0x10000000 0x30000000>,
+			<0x28 0x10000000 0x30000000>,
+			<0x2c 0x10000000 0x30000000>;
+		qcom,fd-misc-reg-settings = <0x20 0x2 0x3>,
+			<0x24 0x2 0x3>;
+		status = "ok";
+	};
+
+	qcom,cpp@ca04000 {
+		cell-index = <0>;
+		compatible = "qcom,cpp";
+		reg = <0xca04000 0x100>,
+			<0xca80000 0x3000>,
+			<0xca18000 0x3000>,
+			<0xc8c36D4 0x4>;
+		reg-names = "cpp", "cpp_vbif", "cpp_hw", "camss_cpp";
+		interrupts = <0 294 0>;
+		interrupt-names = "cpp";
+		smmu-vdd-supply = <&gdsc_bimc_smmu>;
+		camss-vdd-supply = <&gdsc_camss_top>;
+		vdd-supply = <&gdsc_cpp>;
+		qcom,vdd-names = "smmu-vdd", "camss-vdd", "vdd";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_cpp_clk_src>,
+			<&clock_mmss clk_mmss_camss_cpp_clk>,
+			<&clock_mmss clk_mmss_camss_cpp_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_cpp_axi_clk>,
+			<&clock_mmss clk_mmss_camss_micro_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_cpp_vbif_ahb_clk>;
+		clock-names = "mmssnoc_axi_clk",
+			"mnoc_ahb_clk",
+			"camss_ahb_clk", "camss_top_ahb_clk",
+			"cpp_src_clk",
+			"cpp_core_clk", "camss_cpp_ahb_clk",
+			"camss_cpp_axi_clk", "micro_iface_clk",
+			"mmss_smmu_axi_clk", "cpp_vbif_ahb_clk";
+		qcom,clock-rates = <0 0 0 0 200000000 200000000 0 0 0 0 0>;
+		qcom,min-clock-rate = <200000000>;
+		qcom,bus-master = <1>;
+		qcom,vbif-qos-setting = <0x550 0x33333333>,
+			<0x554 0x03333333>,
+			<0x558 0x33333333>,
+			<0x55c 0x03333333>,
+			<0x560 0x33333333>,
+			<0x564 0x03333333>,
+			<0x568 0x33333333>,
+			<0x56c 0x03333333>,
+			<0x570 0x33333333>,
+			<0x574 0x03333333>,
+			<0x578 0x33333333>,
+			<0x57c 0x03333333>,
+			<0x580 0x33333333>,
+			<0x584 0x03333333>,
+			<0x588 0x33333333>,
+			<0x58c 0x03333333>;
+		status = "ok";
+		qcom,msm-bus,name = "msm_camera_cpp";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<106 512 0 0>,
+			<106 512 0 0>;
+		qcom,msm-bus-vector-dyn-vote;
+		resets = <&clock_mmss CAMSS_MICRO_BCR>;
+		reset-names = "micro_iface_reset";
+		qcom,src-clock-rates = <100000000 200000000 576000000
+			600000000>;
+		qcom,micro-reset;
+		qcom,cpp-fw-payload-info {
+			qcom,stripe-base = <790>;
+			qcom,plane-base = <715>;
+			qcom,stripe-size = <63>;
+			qcom,plane-size = <25>;
+			qcom,fe-ptr-off = <11>;
+			qcom,we-ptr-off = <23>;
+			qcom,ref-fe-ptr-off = <17>;
+			qcom,ref-we-ptr-off = <36>;
+			qcom,we-meta-ptr-off = <42>;
+			qcom,fe-mmu-pf-ptr-off = <7>;
+			qcom,ref-fe-mmu-pf-ptr-off = <10>;
+			qcom,we-mmu-pf-ptr-off = <13>;
+			qcom,dup-we-mmu-pf-ptr-off = <18>;
+			qcom,ref-we-mmu-pf-ptr-off = <23>;
+			qcom,set-group-buffer-len = <135>;
+			qcom,dup-frame-indicator-off = <70>;
+		};
+	};
+
+	qcom,ispif@ca31000 {
+		cell-index = <0>;
+		compatible = "qcom,ispif-v3.0", "qcom,ispif";
+		reg = <0xca31000 0xc00>,
+			<0xca00020 0x4>;
+		reg-names = "ispif", "csi_clk_mux";
+		interrupts = <0 309 0>;
+		interrupt-names = "ispif";
+		qcom,num-isps = <0x2>;
+		camss-vdd-supply = <&gdsc_camss_top>;
+		vfe0-vdd-supply = <&gdsc_vfe0>;
+		vfe1-vdd-supply = <&gdsc_vfe1>;
+		qcom,vdd-names = "camss-vdd", "vfe0-vdd",
+				"vfe1-vdd";
+		qcom,clock-cntl-support;
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
+			<&clock_mmss clk_csi0_clk_src>,
+			<&clock_mmss clk_csi1_clk_src>,
+			<&clock_mmss clk_csi2_clk_src>,
+			<&clock_mmss clk_csi3_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi0rdi_clk>,
+			<&clock_mmss clk_mmss_camss_csi1rdi_clk>,
+			<&clock_mmss clk_mmss_camss_csi2rdi_clk>,
+			<&clock_mmss clk_mmss_camss_csi3rdi_clk>,
+			<&clock_mmss clk_mmss_camss_csi0pix_clk>,
+			<&clock_mmss clk_mmss_camss_csi1pix_clk>,
+			<&clock_mmss clk_mmss_camss_csi2pix_clk>,
+			<&clock_mmss clk_mmss_camss_csi3pix_clk>,
+			<&clock_mmss clk_mmss_camss_csi0_clk>,
+			<&clock_mmss clk_mmss_camss_csi1_clk>,
+			<&clock_mmss clk_mmss_camss_csi2_clk>,
+			<&clock_mmss clk_mmss_camss_csi3_clk>,
+			<&clock_mmss clk_vfe0_clk_src>,
+			<&clock_mmss clk_mmss_camss_vfe0_clk>,
+			<&clock_mmss clk_mmss_camss_csi_vfe0_clk>,
+			<&clock_mmss clk_vfe1_clk_src>,
+			<&clock_mmss clk_mmss_camss_vfe1_clk>,
+			<&clock_mmss clk_mmss_camss_csi_vfe1_clk>;
+		clock-names = "mmssnoc_axi", "mnoc_ahb_clk",
+			"camss_ahb_clk",
+			"camss_top_ahb_clk", "ispif_ahb_clk",
+			"csi0_src_clk", "csi1_src_clk",
+			"csi2_src_clk", "csi3_src_clk",
+			"csi0_rdi_clk", "csi1_rdi_clk",
+			"csi2_rdi_clk", "csi3_rdi_clk",
+			"csi0_pix_clk", "csi1_pix_clk",
+			"csi2_pix_clk", "csi3_pix_clk",
+			"camss_csi0_clk", "camss_csi1_clk",
+			"camss_csi2_clk", "camss_csi3_clk",
+			"vfe0_clk_src",
+			"camss_vfe_vfe0_clk",
+			"camss_csi_vfe0_clk",
+			"vfe1_clk_src",
+			"camss_vfe_vfe1_clk",
+			"camss_csi_vfe1_clk";
+		qcom,clock-rates = <0 0 0 0 0
+			0 0 0 0
+			0 0 0 0
+			0 0 0 0
+			0 0 0 0
+			0 0 0
+			0 0 0>;
+		qcom,clock-control = "INIT_RATE", "NO_SET_RATE",
+			"NO_SET_RATE", "NO_SET_RATE",
+			"NO_SET_RATE",
+			"INIT_RATE", "INIT_RATE",
+			"INIT_RATE", "INIT_RATE",
+			"NO_SET_RATE", "NO_SET_RATE",
+			"NO_SET_RATE", "NO_SET_RATE",
+			"NO_SET_RATE", "NO_SET_RATE",
+			"NO_SET_RATE", "NO_SET_RATE",
+			"NO_SET_RATE", "NO_SET_RATE",
+			"NO_SET_RATE", "NO_SET_RATE",
+			"INIT_RATE",
+			"NO_SET_RATE", "NO_SET_RATE",
+			"INIT_RATE",
+			"NO_SET_RATE", "NO_SET_RATE";
+		status = "ok";
+	};
+
+	vfe0: qcom,vfe0@ca10000 {
+		cell-index = <0>;
+		compatible = "qcom,vfe48";
+		reg = <0xca10000 0x4000>,
+			<0xca40000 0x3000>;
+		reg-names = "vfe", "vfe_vbif";
+		interrupts = <0 314 0>;
+		interrupt-names = "vfe";
+		vdd-supply = <&gdsc_vfe0>;
+		camss-vdd-supply = <&gdsc_camss_top>;
+		smmu-vdd-supply = <&gdsc_bimc_smmu>;
+		qcom,vdd-names = "vdd", "camss-vdd", "smmu-vdd";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_vfe0_clk_src>,
+			<&clock_mmss clk_mmss_camss_vfe0_clk>,
+			<&clock_mmss clk_mmss_camss_vfe0_stream_clk>,
+			<&clock_mmss clk_mmss_camss_vfe0_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_vfe_vbif_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_vfe_vbif_axi_clk>,
+			<&clock_mmss clk_mmss_camss_csi_vfe0_clk>;
+		clock-names = "mmssnoc_axi", "mnoc_ahb_clk",
+			"bimc_smmu_ahb_clk", "bimc_smmu_axi_clk",
+			"camss_ahb_clk", "camss_top_ahb_clk", "vfe_clk_src",
+			"camss_vfe_clk", "camss_vfe_stream_clk",
+			"camss_vfe_ahb_clk", "camss_vfe_vbif_ahb_clk",
+			"camss_vfe_vbif_axi_clk",
+			"camss_csi_vfe_clk";
+		qcom,clock-rates = <0 0 0 0 0 0 480000000 0 0 0 0 0 0
+					0 0 0 0 0 0 576000000 0 0 0 0 0 0
+					0 0 0 0 0 0 600000000 0 0 0 0 0 0>;
+		status = "ok";
+		qos-entries = <8>;
+		qos-regs = <0x404 0x408 0x40c 0x410 0x414 0x418
+			0x41c 0x420>;
+		qos-settings = <0xaaa9aaa9
+			0xaaa9aaa9
+			0xaaa9aaa9
+			0xaaa9aaa9
+			0xaaa9aaa9
+			0xaaa9aaa9
+			0xaaa9aaa9
+			0xaaa9aaa9>;
+		vbif-entries = <1>;
+		vbif-regs = <0x124>;
+		vbif-settings = <0x3>;
+		ds-entries = <17>;
+		ds-regs = <0x424 0x428 0x42c 0x430 0x434
+			0x438 0x43c 0x440 0x444 0x448 0x44c
+			0x450 0x454 0x458 0x45c 0x460 0x464>;
+		ds-settings = <0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0x40000103>;
+		qcom,msm-bus,name = "msm_camera_vfe";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<29 512 0 0>,
+			<29 512 100000000 100000000>;
+		qcom,msm-bus-vector-dyn-vote;
+	};
+
+	vfe1: qcom,vfe1@ca14000 {
+		cell-index = <1>;
+		compatible = "qcom,vfe48";
+		reg = <0xca14000 0x4000>,
+			<0xca40000 0x3000>;
+		reg-names = "vfe", "vfe_vbif";
+		interrupts = <0 315 0>;
+		interrupt-names = "vfe";
+		vdd-supply = <&gdsc_vfe1>;
+		camss-vdd-supply = <&gdsc_camss_top>;
+		smmu-vdd-supply = <&gdsc_bimc_smmu>;
+		qcom,vdd-names = "vdd", "camss-vdd", "smmu-vdd";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_vfe1_clk_src>,
+			<&clock_mmss clk_mmss_camss_vfe1_clk>,
+			<&clock_mmss clk_mmss_camss_vfe1_stream_clk>,
+			<&clock_mmss clk_mmss_camss_vfe1_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_vfe_vbif_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_vfe_vbif_axi_clk>,
+			<&clock_mmss clk_mmss_camss_csi_vfe1_clk>;
+		clock-names = "mmssnoc_axi", "mnoc_ahb_clk",
+			"bimc_smmu_ahb_clk", "bimc_smmu_axi_clk",
+			"camss_ahb_clk", "camss_top_ahb_clk", "vfe_clk_src",
+			"camss_vfe_clk", "camss_vfe_stream_clk",
+			"camss_vfe_ahb_clk", "camss_vfe_vbif_ahb_clk",
+			"camss_vfe_vbif_axi_clk",
+			"camss_csi_vfe_clk";
+		qcom,clock-rates = <0 0 0 0 0 0 480000000 0 0 0 0 0 0
+					0 0 0 0 0 0 576000000 0 0 0 0 0 0
+					0 0 0 0 0 0 600000000 0 0 0 0 0 0>;
+		status = "ok";
+		qos-entries = <8>;
+		qos-regs = <0x404 0x408 0x40c 0x410 0x414 0x418
+			0x41c 0x420>;
+		qos-settings = <0xaaa9aaa9
+			0xaaa9aaa9
+			0xaaa9aaa9
+			0xaaa9aaa9
+			0xaaa9aaa9
+			0xaaa9aaa9
+			0xaaa9aaa9
+			0xaaa9aaa9>;
+		vbif-entries = <1>;
+		vbif-regs = <0x124>;
+		vbif-settings = <0x3>;
+		ds-entries = <17>;
+		ds-regs = <0x424 0x428 0x42c 0x430 0x434
+			0x438 0x43c 0x440 0x444 0x448 0x44c
+			0x450 0x454 0x458 0x45c 0x460 0x464>;
+		ds-settings = <0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0x40000103>;
+		qcom,msm-bus,name = "msm_camera_vfe";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<29 512 0 0>,
+			<29 512 100000000 100000000>;
+		qcom,msm-bus-vector-dyn-vote;
+	};
+
+	qcom,vfe {
+		compatible = "qcom,vfe";
+		num_child = <2>;
+	};
+
+	cci: qcom,cci@ca0c000 {
+		cell-index = <0>;
+		compatible = "qcom,cci";
+		reg = <0xca0c000 0x4000>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "cci";
+		interrupts = <0 295 0>;
+		interrupt-names = "cci";
+		status = "ok";
+		mmagic-supply = <&gdsc_bimc_smmu>;
+		gdscr-supply = <&gdsc_camss_top>;
+		qcom,cam-vreg-name = "mmagic", "gdscr";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_cci_clk_src>,
+			<&clock_mmss clk_mmss_camss_cci_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_cci_clk>;
+		clock-names = "mmssnoc_axi", "mnoc_ahb", "smmu_ahb", "smmu_axi",
+			"camss_ahb_clk", "camss_top_ahb_clk",
+			"cci_src_clk", "cci_ahb_clk", "camss_cci_clk";
+		qcom,clock-rates = <0 0 0 0 0 0 19200000 0 0>,
+			<0 0 0 0 0 0 37500000 0 0>;
+		pinctrl-names = "cci_default", "cci_suspend";
+			pinctrl-0 = <&cci0_active &cci1_active>;
+			pinctrl-1 = <&cci0_suspend &cci1_suspend>;
+		gpios = <&tlmm 17 0>,
+			<&tlmm 18 0>,
+			<&tlmm 19 0>,
+			<&tlmm 20 0>;
+		qcom,gpio-tbl-num = <0 1 2 3>;
+		qcom,gpio-tbl-flags = <1 1 1 1>;
+		qcom,gpio-tbl-label = "CCI_I2C_DATA0",
+				      "CCI_I2C_CLK0",
+				      "CCI_I2C_DATA1",
+				      "CCI_I2C_CLK1";
+		i2c_freq_100Khz: qcom,i2c_standard_mode {
+			status = "disabled";
+		};
+		i2c_freq_400Khz: qcom,i2c_fast_mode {
+			status = "disabled";
+		};
+		i2c_freq_custom: qcom,i2c_custom_mode {
+			status = "disabled";
+		};
+		i2c_freq_1Mhz: qcom,i2c_fast_plus_mode {
+			status = "disabled";
+		};
+	};
+
+	qcom,jpeg@ca1c000 {
+		cell-index = <0>;
+		compatible = "qcom,jpeg";
+		reg = <0xca1c000 0x4000>,
+			<0xca60000 0x3000>;
+		reg-names = "jpeg_hw", "jpeg_vbif";
+		interrupts = <0 316 0>;
+		interrupt-names = "jpeg";
+		smmu-vdd-supply = <&gdsc_bimc_smmu>;
+		camss-vdd-supply = <&gdsc_camss_top>;
+		qcom,vdd-names = "smmu-vdd", "camss-vdd";
+		clock-names = "mmssnoc_axi",
+			"mmss_mnoc_ahb_clk",
+			"mmss_bimc_smmu_ahb_clk",
+			"mmss_bimc_smmu_axi_clk",
+			"mmss_camss_ahb_clk",
+			"mmss_camss_top_ahb_clk",
+			"core_clk",
+			"mmss_camss_jpeg_ahb_clk",
+			"mmss_camss_jpeg_axi_clk";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_jpeg0_vote_clk>,
+			<&clock_mmss clk_mmss_camss_jpeg_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_jpeg_axi_clk>;
+		qcom,clock-rates = <0 0 0 0 0 0 480000000 0 0>;
+		qcom,vbif-reg-settings = <0x4 0x1>;
+		qcom,prefetch-reg-settings = <0x30c 0x1111>,
+			<0x318 0x31>,
+			<0x324 0x31>,
+			<0x330 0x31>,
+			<0x33c 0x0>;
+		qcom,msm-bus,name = "msm_camera_jpeg0";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps = <62 512 0 0>,
+			<62 512 1920000 2880000>;
+		status = "ok";
+	};
+
+	qcom,jpeg@caa0000 {
+		cell-index = <3>;
+		compatible = "qcom,jpegdma";
+		reg = <0xcaa0000 0x4000>,
+			<0xca60000 0x3000>;
+		reg-names = "jpeg_hw", "jpeg_vbif";
+		interrupts = <0 304 0>;
+		interrupt-names = "jpeg";
+		smmu-vdd-supply = <&gdsc_bimc_smmu>;
+		camss-vdd-supply = <&gdsc_camss_top>;
+		qcom,vdd-names = "smmu-vdd", "camss-vdd";
+		clock-names = "mmssnoc_axi",
+			"mmss_mnoc_ahb_clk",
+			"mmss_bimc_smmu_ahb_clk",
+			"mmss_bimc_smmu_axi_clk",
+			"mmss_camss_ahb_clk",
+			"mmss_camss_top_ahb_clk",
+			"core_clk",
+			"mmss_camss_jpeg_ahb_clk",
+			"mmss_camss_jpeg_axi_clk";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_jpeg0_dma_vote_clk>,
+			<&clock_mmss clk_mmss_camss_jpeg_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_jpeg_axi_clk>;
+		qcom,clock-rates = <0 0 0 0 0 0 480000000 0 0>;
+		qcom,vbif-reg-settings = <0x4 0x1>;
+		qcom,prefetch-reg-settings = <0x18c 0x11>,
+			<0x1a0 0x31>,
+			<0x1b0 0x31>;
+		qcom,msm-bus,name = "msm_camera_jpeg_dma";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps = <62 512 0 0>,
+			<62 512 1920000 2880000>;
+		qcom,max-ds-factor = <128>;
+		status = "ok";
+	};
+};
+
+&i2c_freq_100Khz {
+	qcom,hw-thigh = <201>;
+	qcom,hw-tlow = <174>;
+	qcom,hw-tsu-sto = <204>;
+	qcom,hw-tsu-sta = <231>;
+	qcom,hw-thd-dat = <22>;
+	qcom,hw-thd-sta = <162>;
+	qcom,hw-tbuf = <227>;
+	qcom,hw-scl-stretch-en = <0>;
+	qcom,hw-trdhld = <6>;
+	qcom,hw-tsp = <3>;
+	qcom,cci-clk-src = <37500000>;
+	status = "ok";
+};
+
+&i2c_freq_400Khz {
+	qcom,hw-thigh = <38>;
+	qcom,hw-tlow = <56>;
+	qcom,hw-tsu-sto = <40>;
+	qcom,hw-tsu-sta = <40>;
+	qcom,hw-thd-dat = <22>;
+	qcom,hw-thd-sta = <35>;
+	qcom,hw-tbuf = <62>;
+	qcom,hw-scl-stretch-en = <0>;
+	qcom,hw-trdhld = <6>;
+	qcom,hw-tsp = <3>;
+	qcom,cci-clk-src = <37500000>;
+	status = "ok";
+};
+
+&i2c_freq_custom {
+	qcom,hw-thigh = <38>;
+	qcom,hw-tlow = <56>;
+	qcom,hw-tsu-sto = <40>;
+	qcom,hw-tsu-sta = <40>;
+	qcom,hw-thd-dat = <22>;
+	qcom,hw-thd-sta = <35>;
+	qcom,hw-tbuf = <62>;
+	qcom,hw-scl-stretch-en = <1>;
+	qcom,hw-trdhld = <6>;
+	qcom,hw-tsp = <3>;
+	qcom,cci-clk-src = <37500000>;
+	status = "ok";
+};
+
+&i2c_freq_1Mhz {
+	qcom,hw-thigh = <16>;
+	qcom,hw-tlow = <22>;
+	qcom,hw-tsu-sto = <17>;
+	qcom,hw-tsu-sta = <18>;
+	qcom,hw-thd-dat = <16>;
+	qcom,hw-thd-sta = <15>;
+	qcom,hw-tbuf = <24>;
+	qcom,hw-scl-stretch-en = <0>;
+	qcom,hw-trdhld = <3>;
+	qcom,hw-tsp = <3>;
+	qcom,cci-clk-src = <37500000>;
+	status = "ok";
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-cdp.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-cdp.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-cdp.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-cdp.dtsi	2019-10-29 09:26:22.917196073 +0100
@@ -0,0 +1,348 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8998-audio-wcd.dtsi"
+#include "msm-smb138x.dtsi"
+
+&vendor {
+	bluetooth: bt_wcn3990 {
+		compatible = "qca,wcn3990";
+		qca,bt-vdd-io-supply = <&pm8998_s3>;
+		qca,bt-vdd-xtal-supply = <&pm8998_s5>;
+		qca,bt-vdd-core-supply = <&pm8998_l7>;
+		qca,bt-vdd-pa-supply = <&pm8998_l17>;
+		qca,bt-vdd-ldo-supply = <&pm8998_l25>;
+		qca,bt-chip-pwd-supply = <&pmi8998_bob_pin1>;
+		clocks = <&clock_gcc clk_rf_clk2_pin>;
+		clock-names = "rf_clk2";
+
+		qca,bt-vdd-io-voltage-level = <1352000 1352000>;
+		qca,bt-vdd-xtal-voltage-level = <2040000 2040000>;
+		qca,bt-vdd-core-voltage-level = <1800000 1800000>;
+		qca,bt-vdd-pa-voltage-level = <1304000 1304000>;
+		qca,bt-vdd-ldo-voltage-level = <3312000 3312000>;
+		qca,bt-chip-pwd-voltage-level = <3600000 3600000>;
+
+		qca,bt-vdd-io-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-xtal-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-core-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-pa-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-ldo-current-level = <1>; /* LPM/PFM */
+	};
+};
+
+&blsp1_uart3_hs {
+	status = "ok";
+};
+
+&ufsphy1 {
+	vdda-phy-supply = <&pm8998_l1>;
+	vdda-pll-supply = <&pm8998_l2>;
+	vddp-ref-clk-supply = <&pm8998_l26>;
+	vdda-phy-max-microamp = <51400>;
+	vdda-pll-max-microamp = <14600>;
+	vddp-ref-clk-max-microamp = <100>;
+	vddp-ref-clk-always-on;
+	status = "ok";
+};
+
+&ufs1 {
+	vdd-hba-supply = <&gdsc_ufs>;
+	vdd-hba-fixed-regulator;
+	vcc-supply = <&pm8998_l20>;
+	vccq-supply = <&pm8998_l26>;
+	vccq2-supply = <&pm8998_s4>;
+	vcc-max-microamp = <750000>;
+	vccq-max-microamp = <560000>;
+	vccq2-max-microamp = <750000>;
+	status = "ok";
+};
+
+&ufs_ice {
+	status = "ok";
+};
+
+&sdhc_2 {
+	vdd-supply = <&pm8998_l21>;
+	qcom,vdd-voltage-level = <2950000 2960000>;
+	qcom,vdd-current-level = <200 800000>;
+
+	vdd-io-supply = <&pm8998_l13>;
+	qcom,vdd-io-voltage-level = <1808000 2960000>;
+	qcom,vdd-io-current-level = <200 22000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
+
+	qcom,clk-rates = <400000 20000000 25000000
+				50000000 100000000 200000000>;
+	qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+
+	cd-gpios = <&tlmm 95 0x1>;
+
+	status = "ok";
+};
+
+&uartblsp2dm1 {
+	status = "ok";
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart_console_active>;
+};
+
+&pm8998_gpios {
+	/* GPIO 5 for Home Key */
+	gpio@c400 {
+		status = "okay";
+		qcom,mode = <0>;
+		qcom,pull = <0>;
+		qcom,vin-sel = <0>;
+		qcom,src-sel = <0>;
+		qcom,out-strength = <1>;
+	};
+
+	/* GPIO 6 for Vol+ Key */
+	gpio@c500 {
+		status = "okay";
+		qcom,mode = <0>;
+		qcom,pull = <0>;
+		qcom,vin-sel = <0>;
+		qcom,src-sel = <0>;
+		qcom,out-strength = <1>;
+	};
+
+	/* GPIO 7 for Snapshot Key */
+	gpio@c600 {
+		status = "okay";
+		qcom,mode = <0>;
+		qcom,pull = <0>;
+		qcom,vin-sel = <0>;
+		qcom,src-sel = <0>;
+		qcom,out-strength = <1>;
+	};
+
+	/* GPIO 8 for Focus Key */
+	gpio@c700 {
+		status = "okay";
+		qcom,mode = <0>;
+		qcom,pull = <0>;
+		qcom,vin-sel = <0>;
+		qcom,src-sel = <0>;
+		qcom,out-strength = <1>;
+	};
+
+	gpio@cc00 { /* GPIO 13 */
+		qcom,mode = <1>;
+		qcom,output-type = <0>;
+		qcom,pull = <5>;
+		qcom,vin-sel = <0>;
+		qcom,out-strength = <1>;
+		qcom,src-sel = <3>;
+		qcom,master-en = <1>;
+		status = "okay";
+	};
+
+	gpio@d200 { /* GPIO 19 - wil6210 refclk3_en */
+		qcom,mode = <0>;		/* Input */
+		qcom,pull = <5>;		/* No Pull */
+		qcom,vin-sel = <1>;		/* VIN1 GPIO_MV */
+		qcom,src-sel = <0>;		/* GPIO */
+		qcom,invert = <0>;		/* Invert */
+		qcom,master-en = <1>;		/* Enable GPIO */
+		status = "okay";
+	};
+
+	/* GPIO 21 (NFC_CLK_REQ) */
+	gpio@d400 {
+		qcom,mode = <0>;
+		qcom,vin-sel = <1>;
+		qcom,src-sel = <0>;
+		qcom,master-en = <1>;
+		status = "okay";
+	};
+};
+
+&labibb {
+	status = "ok";
+	qcom,qpnp-labibb-mode = "lcd";
+};
+
+&pmi8998_wled {
+	qcom,led-strings-list = [00 01];
+};
+
+&pmi8998_charger {
+	qcom,batteryless-platform;
+};
+
+&pmi8998_haptics {
+	status = "okay";
+};
+
+&pm8998_vadc {
+	chan@83 {
+		label = "vph_pwr";
+		reg = <0x83>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@85 {
+		label = "vcoin";
+		reg = <0x85>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@4c {
+		label = "xo_therm";
+		reg = <0x4c>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <4>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@4d {
+		label = "msm_therm";
+		reg = <0x4d>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@51 {
+		label = "quiet_therm";
+		reg = <0x51>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+};
+
+&pm8998_adc_tm {
+	chan@83 {
+		label = "vph_pwr";
+		reg = <0x83>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,btm-channel-number = <0x60>;
+	};
+
+	chan@4d {
+		label = "msm_therm";
+		reg = <0x4d>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x68>;
+		qcom,thermal-node;
+	};
+
+	chan@51 {
+		label = "quiet_therm";
+		reg = <0x51>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x70>;
+		qcom,thermal-node;
+	};
+
+	chan@4c {
+		label = "xo_therm";
+		reg = <0x4c>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <4>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x78>;
+		qcom,thermal-node;
+	};
+};
+
+&wil6210 {
+	status = "ok";
+};
+
+&snd_9335 {
+	qcom,mbhc-audio-jack-type = "6-pole-jack";
+};
+
+&snd_934x {
+	qcom,mbhc-audio-jack-type = "6-pole-jack";
+};
+
+&soc {
+	gpio_keys {
+		compatible = "gpio-keys";
+		input-name = "gpio-keys";
+		status = "okay";
+
+		home {
+			label = "home";
+			gpios = <&pm8998_gpios 5 0x1>;
+			linux,input-type = <1>;
+			linux,code = <102>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+
+		};
+
+		vol_up {
+			label = "volume_up";
+			gpios = <&pm8998_gpios 6 0x1>;
+			linux,input-type = <1>;
+			linux,code = <115>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+		};
+
+		cam_snapshot {
+			label = "cam_snapshot";
+			gpios = <&pm8998_gpios 7 0x1>;
+			linux,input-type = <1>;
+			linux,code = <766>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+		};
+
+		cam_focus {
+			label = "cam_focus";
+			gpios = <&pm8998_gpios 8 0x1>;
+			linux,input-type = <1>;
+			linux,code = <528>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+		};
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-coresight.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-coresight.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-coresight.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-coresight.dtsi	2019-01-22 16:16:21.195225506 +0100
@@ -0,0 +1,1622 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	tmc_etr: tmc@6048000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b961>;
+
+		reg = <0x6048000 0x1000>,
+		      <0x6064000 0x15000>;
+		reg-names = "tmc-base", "bam-base";
+
+		arm,buffer-size = <0x400000>;
+		arm,sg-enable;
+
+		coresight-ctis = <&cti0 &cti8>;
+
+		coresight-name = "coresight-tmc-etr";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		port{
+			tmc_etr_in_replicator: endpoint {
+				slave-mode;
+				remote-endpoint = <&replicator_out_tmc_etr>;
+			};
+		};
+	};
+
+	replicator: replicator@6046000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b909>;
+
+		reg = <0x6046000 0x1000>;
+		reg-names = "replicator-base";
+
+		coresight-name = "coresight-replicator";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports{
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				replicator_out_tmc_etr:endpoint {
+					remote-endpoint =
+						<&tmc_etr_in_replicator>;
+				};
+			};
+			port@1 {
+				reg = <0>;
+				replicator_in_tmc_etf:endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tmc_etf_out_replicator>;
+				};
+			};
+		};
+	};
+
+	tmc_etf: tmc@6047000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b961>;
+
+		reg = <0x6047000 0x1000>;
+		reg-names = "tmc-base";
+
+		coresight-ctis = <&cti0 &cti8>;
+
+		coresight-name = "coresight-tmc-etf";
+
+		arm,default-sink;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports{
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				tmc_etf_out_replicator:endpoint {
+					remote-endpoint =
+						<&replicator_in_tmc_etf>;
+				};
+			};
+			port@1 {
+				reg = <0>;
+				tmc_etf_in_funnel_merg:endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_merg_out_tmc_etf>;
+				};
+			};
+		};
+	};
+
+	funnel_merg: funnel@6045000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6045000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-merg";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_merg_out_tmc_etf:endpoint {
+					remote-endpoint =
+						<&tmc_etf_in_funnel_merg>;
+				};
+			};
+			port@1 {
+				reg = <0>;
+				funnel_merg_in_funnel_in0:endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_in0_out_funnel_merg>;
+				};
+			};
+			port@2 {
+				reg = <1>;
+				funnel_merg_in_funnel_in1:endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_in1_out_funnel_merg>;
+				};
+			};
+		};
+	};
+
+	funnel_in0: funnel@6041000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6041000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-in0";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_in0_out_funnel_merg: endpoint {
+					remote-endpoint =
+						<&funnel_merg_in_funnel_in0>;
+				};
+			};
+			port@1 {
+				reg = <0>;
+				funnel_in0_in_rpm_etm0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&rpm_etm0_out_funnel_in0>;
+				};
+			};
+			port@2 {
+				reg = <3>;
+				funnel_in0_in_funnel_spss: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_spss_out_funnel_in0>;
+				};
+			};
+			port@3 {
+				reg = <6>;
+				funnel_in0_in_funnel_qatb: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_qatb_out_funnel_in0>;
+				};
+			};
+			port@4 {
+				reg = <7>;
+				funnel_in0_in_stm: endpoint {
+					slave-mode;
+					remote-endpoint = <&stm_out_funnel_in0>;
+				};
+			};
+		};
+	};
+
+	funnel_in1: funnel@6042000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6042000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-in1";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_in1_out_funnel_merg: endpoint {
+					remote-endpoint =
+						<&funnel_merg_in_funnel_in1>;
+				};
+			};
+			port@1 {
+				reg = <2>;
+				funnel_in1_in_tpda_nav: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&tpda_nav_out_funnel_in1>;
+				};
+			};
+			port@2 {
+				reg = <3>;
+				funnel_in1_in_tpda_mss: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&tpda_mss_out_funnel_in1>;
+				};
+			};
+			port@3 {
+				reg = <4>;
+				funnel_in1_in_audio_etm0: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&audio_etm0_out_funnel_in1>;
+				};
+			};
+			port@4 {
+				reg = <5>;
+				funnel_in1_in_modem_etm0: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&modem_etm0_out_funnel_in1>;
+				};
+			};
+			port@5 {
+				reg = <6>;
+				funnel_in1_in_funnel_apss_merg: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&funnel_apss_merg_out_funnel_in1>;
+				};
+			};
+			port@6 {
+				reg = <7>;
+				funnel_in1_in_gfx: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&gfx_out_funnel_in1>;
+				};
+			};
+		};
+	};
+
+	funnel_apss_merg: funnel@7b70000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x7b70000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-apss-merg";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_apss_merg_out_funnel_in1: endpoint {
+					remote-endpoint =
+					    <&funnel_in1_in_funnel_apss_merg>;
+				};
+			};
+			port@1 {
+				reg = <0>;
+				funnel_apss_merg_in_funnel_apss: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&funnel_apss_out_funnel_apss_merg>;
+				};
+			};
+			port@2 {
+				reg = <1>;
+				funnel_apss_merg_in_tpda_olc: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&tpda_olc_out_funnel_apss_merg>;
+				};
+			};
+			port@3 {
+				reg = <3>;
+				funnel_apss_merg_in_tpda_apss: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&tpda_apss_out_funnel_apss_merg>;
+				};
+			};
+		};
+	};
+
+	funnel_apss: funnel@7b60000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x7b60000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-apss";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_apss_out_funnel_apss_merg: endpoint {
+					remote-endpoint =
+					    <&funnel_apss_merg_in_funnel_apss>;
+				};
+			};
+			port@1 {
+				reg = <0>;
+				funnel_apss_in_etm0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm0_out_funnel_apss>;
+				};
+			};
+			port@2 {
+				reg = <1>;
+				funnel_apss_in_etm1: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm1_out_funnel_apss>;
+				};
+			};
+			port@3 {
+				reg = <2>;
+				funnel_apss_in_etm2: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm2_out_funnel_apss>;
+				};
+			};
+			port@4 {
+				reg = <3>;
+				funnel_apss_in_etm3: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm3_out_funnel_apss>;
+				};
+			};
+			port@5 {
+				reg = <4>;
+				funnel_apss_in_etm4: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm4_out_funnel_apss>;
+				};
+			};
+			port@6 {
+				reg = <5>;
+				funnel_apss_in_etm5: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm5_out_funnel_apss>;
+				};
+			};
+			port@7 {
+				reg = <6>;
+				funnel_apss_in_etm6: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm6_out_funnel_apss>;
+				};
+			};
+			port@8 {
+				reg = <7>;
+				funnel_apss_in_etm7: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm7_out_funnel_apss>;
+				};
+			};
+		};
+	};
+
+	stm: stm@6002000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b962>;
+
+		reg = <0x6002000 0x1000>,
+		      <0x16280000 0x180000>;
+		reg-names = "stm-base", "stm-data-base";
+
+		coresight-name = "coresight-stm";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		port{
+			stm_out_funnel_in0: endpoint {
+				remote-endpoint = <&funnel_in0_in_stm>;
+			};
+		};
+	};
+
+	etm0: etm@7840000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b95d>;
+
+		reg = <0x7840000 0x1000>;
+		cpu = <&CPU0>;
+
+		coresight-name = "coresight-etm0";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		port{
+			etm0_out_funnel_apss: endpoint {
+				remote-endpoint = <&funnel_apss_in_etm0>;
+			};
+		};
+	};
+
+	etm1: etm@7940000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b95d>;
+
+		reg = <0x7940000 0x1000>;
+		cpu = <&CPU1>;
+
+		coresight-name = "coresight-etm1";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		port{
+			etm1_out_funnel_apss: endpoint {
+				remote-endpoint = <&funnel_apss_in_etm1>;
+			};
+		};
+	};
+
+	etm2: etm@7A40000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b95d>;
+
+		reg = <0x7A40000 0x1000>;
+		cpu = <&CPU2>;
+
+		coresight-name = "coresight-etm2";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		port{
+			etm2_out_funnel_apss: endpoint {
+				remote-endpoint = <&funnel_apss_in_etm2>;
+			};
+		};
+	};
+
+	etm3: etm@7B40000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b95d>;
+
+		reg = <0x7B40000 0x1000>;
+		cpu = <&CPU3>;
+
+		coresight-name = "coresight-etm3";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		port{
+			etm3_out_funnel_apss: endpoint {
+				remote-endpoint = <&funnel_apss_in_etm3>;
+			};
+		};
+	};
+
+	etm4: etm@7C40000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b95d>;
+
+		reg = <0x7C40000 0x1000>;
+		cpu = <&CPU4>;
+
+		coresight-name = "coresight-etm4";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		port{
+			etm4_out_funnel_apss: endpoint {
+				remote-endpoint = <&funnel_apss_in_etm4>;
+			};
+		};
+	};
+
+	etm5: etm@7D40000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b95d>;
+
+		reg = <0x7D40000 0x1000>;
+		cpu = <&CPU5>;
+
+		coresight-name = "coresight-etm5";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		port{
+			etm5_out_funnel_apss: endpoint {
+				remote-endpoint = <&funnel_apss_in_etm5>;
+			};
+		};
+	};
+
+	etm6: etm@7E40000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b95d>;
+
+		reg = <0x7E40000 0x1000>;
+		cpu = <&CPU6>;
+
+		coresight-name = "coresight-etm6";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		port{
+			etm6_out_funnel_apss: endpoint {
+				remote-endpoint = <&funnel_apss_in_etm6>;
+			};
+		};
+	};
+
+	etm7: etm@7F40000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b95d>;
+
+		reg = <0x7F40000 0x1000>;
+		cpu = <&CPU7>;
+
+		coresight-name = "coresight-etm7";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		port{
+			etm7_out_funnel_apss: endpoint {
+				remote-endpoint = <&funnel_apss_in_etm7>;
+			};
+		};
+	};
+
+	cti0: cti@6010000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x6010000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti0";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti1: cti@6011000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x6011000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti1";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti2: cti@6012000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x6012000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti2";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		qcom,cti-gpio-trigout = <4>;
+		pinctrl-names = "cti-trigout-pctrl";
+		pinctrl-0 = <&trigout_a>;
+	};
+
+	cti3: cti@6013000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x6013000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti3";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti4: cti@6014000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x6014000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti4";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti5: cti@6015000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x6015000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti5";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti6: cti@6016000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x6016000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti6";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti7: cti@6017000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x6017000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti7";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti8: cti@6018000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x6018000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti8";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti9: cti@6019000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x6019000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti9";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti10: cti@601a000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x601a000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti10";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti11: cti@601b000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x601b000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti11";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti12: cti@601c000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x601c000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti12";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti13: cti@601d000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x601d000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti13";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti14: cti@601e000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x601e000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti14";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti15: cti@601f000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x601f000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti15";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti_cpu0: cti@7820000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x7820000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-cpu0";
+		cpu = <&CPU0>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti_cpu1: cti@7920000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x7920000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-cpu1";
+		cpu = <&CPU1>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti_cpu2: cti@7a20000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x7a20000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-cpu2";
+		cpu = <&CPU2>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti_cpu3: cti@7b20000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x7b20000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-cpu3";
+		cpu = <&CPU3>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti_cpu4: cti@7c20000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x7c20000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-cpu4";
+		cpu = <&CPU4>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti_cpu5: cti@7d20000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x7d20000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-cpu5";
+		cpu = <&CPU5>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti_cpu6: cti@7e20000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x7e20000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-cpu6";
+		cpu = <&CPU6>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti_cpu7: cti@7f20000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x7f20000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-cpu7";
+		cpu = <&CPU7>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti_apss: cti@7b80000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x7b80000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-apss";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti_apss_dl: cti@7bc1000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x7bc1000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-apss-dl";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti_olc: cti@7b91000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x7b91000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-olc";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	funnel_qatb: funnel@6005000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6005000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-qatb";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_qatb_out_funnel_in0: endpoint {
+					remote-endpoint =
+					    <&funnel_in0_in_funnel_qatb>;
+				};
+			};
+			port@1 {
+				reg = <0>;
+				funnel_qatb_in_tpda: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpda_out_funnel_qatb>;
+				};
+			};
+			port@2 {
+				reg = <3>;
+				funnel_qatb_in_funnel_dlet_qatb: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&funnel_dlet_qatb_out_funnel_qatb>;
+				};
+			};
+		};
+	};
+
+	tpda: tpda@6004000 {
+		compatible = "qcom,coresight-tpda";
+		reg = <0x6004000 0x1000>;
+		reg-names = "tpda-base";
+
+		coresight-name = "coresight-tpda";
+
+		qcom,tpda-atid = <65>;
+		qcom,bc-elem-size = <7 32>,
+				    <9 32>;
+		qcom,tc-elem-size = <3 32>,
+				    <6 32>,
+				    <9 32>;
+		qcom,dsb-elem-size = <7 32>,
+				     <9 32>;
+		qcom,cmb-elem-size = <3 32>,
+				     <4 32>,
+				     <5 32>,
+				     <9 64>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				tpda_out_funnel_qatb: endpoint {
+					remote-endpoint =
+						<&funnel_qatb_in_tpda>;
+				};
+			};
+			port@1 {
+				reg = <3>;
+				tpda_in_tpdm_vsense: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_vsense_out_tpda>;
+				};
+			};
+			port@2 {
+				reg = <4>;
+				tpda_in_tpdm_dcc: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_dcc_out_tpda>;
+				};
+			};
+			port@3 {
+				reg = <5>;
+				tpda_in_tpdm_prng: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_prng_out_tpda>;
+				};
+			};
+			port@4 {
+				reg = <7>;
+				tpda_in_tpdm_qm: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_qm_out_tpda>;
+				};
+			};
+			port@5 {
+				reg = <9>;
+				tpda_in_tpdm_pimem: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_pimem_out_tpda>;
+				};
+			};
+		};
+	};
+
+	tpdm_vsense: tpdm@7038000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x7038000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-vsense";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port{
+			tpdm_vsense_out_tpda: endpoint {
+				remote-endpoint = <&tpda_in_tpdm_vsense>;
+			};
+		};
+	};
+
+	tpdm_dcc: tpdm@7054000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x7054000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-dcc";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port{
+			tpdm_dcc_out_tpda: endpoint {
+				remote-endpoint = <&tpda_in_tpdm_dcc>;
+			};
+		};
+	};
+
+	tpdm_prng: tpdm@704c000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x704c000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-prng";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port{
+			tpdm_prng_out_tpda: endpoint {
+				remote-endpoint = <&tpda_in_tpdm_prng>;
+			};
+		};
+	};
+
+	tpdm_qm: tpdm@71d0000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x71d0000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-qm";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		qcom,msr-fix-req;
+
+		port{
+			tpdm_qm_out_tpda: endpoint {
+				remote-endpoint = <&tpda_in_tpdm_qm>;
+			};
+		};
+
+	};
+
+	tpdm_pimem: tpdm@7050000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x7050000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-pimem";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		qcom,msr-fix-req;
+
+		port{
+			tpdm_pimem_out_tpda: endpoint {
+				remote-endpoint = <&tpda_in_tpdm_pimem>;
+			};
+		};
+
+	};
+
+	tpda_apss: tpda@7bc2000 {
+		compatible = "qcom,coresight-tpda";
+		reg = <0x7bc2000 0x1000>;
+		reg-names = "tpda-base";
+
+		coresight-name = "coresight-tpda-apss";
+
+		qcom,tpda-atid = <66>;
+		qcom,dsb-elem-size = <0 32>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				tpda_apss_out_funnel_apss_merg: endpoint {
+					remote-endpoint =
+					       <&funnel_apss_merg_in_tpda_apss>;
+				};
+			};
+			port@1 {
+				reg = <0>;
+				tpda_apss_in_tpdm_apss: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_apss_out_tpda_apss>;
+				};
+			};
+		};
+	};
+
+	tpdm_apss: tpdm@7bc0000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x7bc0000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-apss";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		qcom,msr-fix-req;
+
+		port{
+			tpdm_apss_out_tpda_apss: endpoint {
+				remote-endpoint = <&tpda_apss_in_tpdm_apss>;
+			};
+		};
+	};
+
+	tpda_mss: tpda@7043000 {
+		compatible = "qcom,coresight-tpda";
+		reg = <0x7043000 0x1000>;
+		reg-names = "tpda-base";
+
+		coresight-name = "coresight-tpda-mss";
+
+		qcom,tpda-atid = <67>;
+		qcom,dsb-elem-size = <0 32>;
+		qcom,cmb-elem-size = <0 32>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				tpda_mss_out_funnel_in1: endpoint {
+					remote-endpoint =
+						<&funnel_in1_in_tpda_mss>;
+				};
+			};
+			port@1 {
+				reg = <0>;
+				tpda_mss_in_tpdm_mss: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_mss_out_tpda_mss>;
+				};
+			};
+		};
+	};
+
+	tpdm_mss: tpdm@7042000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x7042000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-mss";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		qcom,msr-fix-req;
+
+		port{
+			tpdm_mss_out_tpda_mss: endpoint {
+				remote-endpoint = <&tpda_mss_in_tpdm_mss>;
+			};
+		};
+	};
+
+	tpda_nav: tpda@7191000 {
+		compatible = "qcom,coresight-tpda";
+		reg = <0x7191000 0x1000>;
+		reg-names = "tpda-base";
+
+		coresight-name = "coresight-tpda-nav";
+
+		qcom,tpda-atid = <68>;
+		qcom,cmb-elem-size = <0 32>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				tpda_nav_out_funnel_in1: endpoint {
+					remote-endpoint =
+						<&funnel_in1_in_tpda_nav>;
+				};
+			};
+			port@1 {
+				reg = <0>;
+				tpda_nav_in_tpdm_nav: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_nav_out_tpda_nav>;
+				};
+			};
+		};
+	};
+
+	tpdm_nav: tpdm@7190000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x7190000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-nav";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port{
+			tpdm_nav_out_tpda_nav: endpoint {
+				remote-endpoint = <&tpda_nav_in_tpdm_nav>;
+			};
+		};
+	};
+
+	tpda_olc: tpda@7b92000 {
+		compatible = "qcom,coresight-tpda";
+		reg = <0x7b92000 0x1000>;
+		reg-names = "tpda-base";
+
+		coresight-name = "coresight-tpda-olc";
+
+		qcom,tpda-atid = <69>;
+		qcom,cmb-elem-size = <0 64>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				tpda_olc_out_funnel_apss_merg: endpoint {
+					remote-endpoint =
+						<&funnel_apss_merg_in_tpda_olc>;
+				};
+			};
+			port@1 {
+				reg = <0>;
+				tpda_olc_in_tpdm_olc: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_olc_out_tpda_olc>;
+				};
+			};
+		};
+	};
+
+	tpdm_olc: tpdm@7b90000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x7b90000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-olc";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port{
+			tpdm_olc_out_tpda_olc: endpoint {
+				remote-endpoint = <&tpda_olc_in_tpdm_olc>;
+			};
+		};
+	};
+
+	funnel_spss: funnel@7083000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x7083000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-spss";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_spss_out_funnel_in0: endpoint {
+					remote-endpoint =
+					    <&funnel_in0_in_funnel_spss>;
+				};
+			};
+			port@1 {
+				reg = <0>;
+				funnel_spss_in_tpda_spss: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpda_spss_out_funnel_spss>;
+				};
+			};
+			port@2 {
+				reg = <1>;
+				funnel_spss_in_spss_etm0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&spss_etm0_out_funnel_spss>;
+				};
+			};
+		};
+	};
+
+	tpda_spss: tpda@7082000 {
+		compatible = "qcom,coresight-tpda";
+		reg = <0x7082000 0x1000>;
+		reg-names = "tpda-base";
+
+		coresight-name = "coresight-tpda-spss";
+
+		qcom,tpda-atid = <70>;
+		qcom,dsb-elem-size = <0 32>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				tpda_spss_out_funnel_spss: endpoint {
+					remote-endpoint =
+						<&funnel_spss_in_tpda_spss>;
+				};
+			};
+			port@1 {
+				reg = <0>;
+				tpda_spss_in_tpdm_spss: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_spss_out_tpda_spss>;
+				};
+			};
+		};
+	};
+
+	tpdm_spss: tpdm@7080000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x7080000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-spss";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		qcom,msr-fix-req;
+
+		port{
+			tpdm_spss_out_tpda_spss: endpoint {
+				remote-endpoint = <&tpda_spss_in_tpdm_spss>;
+			};
+		};
+	};
+
+	hwevent: hwevent@158000 {
+		compatible = "qcom,coresight-hwevent";
+		reg = <0x158000 0x80>,
+		      <0x17091000 0x80>,
+		      <0x1730200c 0x4>,
+		      <0xc90137c 0x4>,
+		      <0xc828018 0x80>,
+		      <0x1c00058 0x80>,
+		      <0x5e02038 0x4>,
+		      <0x5e02028 0x10>,
+		      <0x1fcb360 0x80>,
+		      <0x1fcb760 0x80>,
+		      <0x1fcbf60 0x80>,
+		      <0xa8f8860 0x4>,
+		      <0x500c260 0x4>,
+		      <0x500d040 0x4>,
+		      <0x1da6400 0x80>;
+		reg-names = "gcc-ctrl", "lpass-stm", "lpass-qdsp", "mdss-mdp",
+			    "mdss-misc", "pcie0-hwev", "ssc-en", "ssc-hwev",
+			    "tcsr-qdss", "tcsr-mss0", "tcsr-mss1", "usb-ctrl",
+			    "vbif-stm", "vbif-stm-en", "ufs-mux";
+
+		coresight-name = "coresight-hwevent";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>,
+			 <&clock_mmss clk_mmss_misc_ahb_clk>;
+		clock-names = "core_clk", "core_a_clk", "core_mmss_clk";
+
+		qcom,hwevent-clks = "core_mmss_clk";
+	};
+
+	csr: csr@6001000 {
+		compatible = "qcom,coresight-csr";
+		reg = <0x6001000 0x1000>;
+		reg-names = "csr-base";
+
+		coresight-name = "coresight-csr";
+
+		qcom,blk-size = <1>;
+	};
+
+	modem_etm0 {
+		compatible = "qcom,coresight-remote-etm";
+
+		coresight-name = "coresight-modem-etm0";
+		qcom,inst-id = <2>;
+
+		port{
+			modem_etm0_out_funnel_in1: endpoint {
+				remote-endpoint = <&funnel_in1_in_modem_etm0>;
+			};
+		};
+	};
+
+	audio_etm0 {
+		compatible = "qcom,coresight-remote-etm";
+
+		coresight-name = "coresight-audio-etm0";
+		qcom,inst-id = <5>;
+
+		port{
+			audio_etm0_out_funnel_in1: endpoint {
+				remote-endpoint = <&funnel_in1_in_audio_etm0>;
+			};
+		};
+	};
+
+	rpm_etm0 {
+		compatible = "qcom,coresight-remote-etm";
+
+		coresight-name = "coresight-rpm-etm0";
+		qcom,inst-id = <4>;
+
+		port{
+			rpm_etm0_out_funnel_in0: endpoint {
+				remote-endpoint = <&funnel_in0_in_rpm_etm0>;
+			};
+		};
+	};
+
+	funnel_dlet_qatb: funnel@7225000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x7225000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-dlet-qatb";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_dlet_qatb_out_funnel_qatb: endpoint {
+					remote-endpoint =
+					    <&funnel_qatb_in_funnel_dlet_qatb>;
+				};
+			};
+			port@1 {
+				reg = <1>;
+				funnel_dlet_qatb_in_tpdm_wcss: endpoint {
+					slave-mode;
+					remote-endpoint =
+					      <&tpdm_wcss_out_funnel_dlet_qatb>;
+				};
+			};
+		};
+	};
+
+	dummy-tpdm-wcss {
+		compatible = "qcom,coresight-dummy";
+
+		coresight-name = "coresight-tpdm-wcss";
+
+		port{
+			tpdm_wcss_out_funnel_dlet_qatb: endpoint {
+				remote-endpoint =
+					<&funnel_dlet_qatb_in_tpdm_wcss>;
+			};
+		};
+	};
+
+	dummy-spss-etm0 {
+		compatible = "qcom,coresight-dummy";
+
+		coresight-name = "coresight-spss-etm0";
+
+		port{
+			spss_etm0_out_funnel_spss: endpoint {
+				remote-endpoint =
+					<&funnel_spss_in_spss_etm0>;
+			};
+		};
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998.dtsi	2019-10-29 09:26:22.921196113 +0100
@@ -0,0 +1,3351 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "skeleton64.dtsi"
+#include <dt-bindings/clock/msm-clocks-8998.h>
+#include <dt-bindings/regulator/qcom,rpm-smd-regulator.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+	model = "Qualcomm Technologies, Inc. MSM 8998";
+	compatible = "qcom,msm8998";
+	qcom,msm-id = <292 0x0>;
+	interrupt-parent = <&intc>;
+
+	aliases {
+		serial0 = &uartblsp2dm1;
+		pci-domain0 = &pcie0;
+		sdhc2 = &sdhc_2; /* SDC2 SD card slot */
+	};
+
+	psci {
+		compatible = "arm,psci-1.0";
+		method = "smc";
+	};
+
+	chosen {
+		stdout-path = "serial0";
+	};
+
+	cpus {
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		CPU0: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x0>;
+			qcom,limits-info = <&mitigation_profile0>;
+			qcom,lmh-dcvs = <&lmh_dcvs0>;
+			enable-method = "psci";
+			efficiency = <1024>;
+			next-level-cache = <&L2_0>;
+			qcom,ea = <&ea0>;
+			L2_0: l2-cache {
+			      compatible = "arm,arch-cache";
+			      cache-level = <2>;
+				  qcom,dump-size = <0x0>;      /* A53 L2 dump not supported */
+			};
+			L1_I_0: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9040>;
+			};
+			L1_D_0: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9040>;
+			};
+			L1_TLB_0: l1-tlb {
+				qcom,dump-size = <0x2000>;
+			};
+		};
+
+		CPU1: cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x1>;
+			qcom,limits-info = <&mitigation_profile1>;
+			qcom,lmh-dcvs = <&lmh_dcvs0>;
+			enable-method = "psci";
+			efficiency = <1024>;
+			next-level-cache = <&L2_0>;
+			qcom,ea = <&ea1>;
+			L1_I_1: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9040>;
+			};
+			L1_D_1: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9040>;
+			};
+			L1_TLB_1: l1-tlb {
+				qcom,dump-size = <0x2000>;
+			};
+		};
+
+		CPU2: cpu@2 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x2>;
+			qcom,limits-info = <&mitigation_profile2>;
+			qcom,lmh-dcvs = <&lmh_dcvs0>;
+			enable-method = "psci";
+			efficiency = <1024>;
+			next-level-cache = <&L2_0>;
+			qcom,ea = <&ea2>;
+			L1_I_2: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9040>;
+			};
+			L1_D_2: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9040>;
+			};
+			L1_TLB_2: l1-tlb {
+				qcom,dump-size = <0x2000>;
+			};
+		};
+
+		CPU3: cpu@3 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x3>;
+			qcom,limits-info = <&mitigation_profile3>;
+			qcom,lmh-dcvs = <&lmh_dcvs0>;
+			enable-method = "psci";
+			efficiency = <1024>;
+			next-level-cache = <&L2_0>;
+			qcom,ea = <&ea3>;
+			L1_I_3: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9040>;
+			};
+			L1_D_3: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9040>;
+			};
+			L1_TLB_3: l1-tlb {
+				qcom,dump-size = <0x2000>;
+			};
+		};
+
+		CPU4: cpu@100 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x100>;
+			qcom,limits-info = <&mitigation_profile4>;
+			qcom,lmh-dcvs = <&lmh_dcvs1>;
+			enable-method = "psci";
+			efficiency = <1536>;
+			next-level-cache = <&L2_1>;
+			qcom,ea = <&ea4>;
+			L2_1: l2-cache {
+				compatible = "arm,arch-cache";
+				cache-level = <2>;
+			};
+			L1_I_100: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x12000>;
+			};
+			L1_D_100: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x12000>;
+			};
+			L1_TLB_100: l1-tlb {
+				qcom,dump-size = <0x4800>;
+			};
+		};
+
+		CPU5: cpu@101 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x101>;
+			qcom,limits-info = <&mitigation_profile5>;
+			qcom,lmh-dcvs = <&lmh_dcvs1>;
+			enable-method = "psci";
+			efficiency = <1536>;
+			next-level-cache = <&L2_1>;
+			qcom,ea = <&ea5>;
+			L1_I_101: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x12000>;
+			};
+			L1_D_101: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x12000>;
+			};
+			L1_TLB_101: l1-tlb {
+				qcom,dump-size = <0x4800>;
+			};
+		};
+
+		CPU6: cpu@102 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x102>;
+			qcom,limits-info = <&mitigation_profile6>;
+			qcom,lmh-dcvs = <&lmh_dcvs1>;
+			enable-method = "psci";
+			efficiency = <1536>;
+			next-level-cache = <&L2_1>;
+			qcom,ea = <&ea6>;
+			L1_I_102: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x12000>;
+			};
+			L1_D_102: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x12000>;
+			};
+			L1_TLB_102: l1-tlb {
+				qcom,dump-size = <0x4800>;
+			};
+		};
+
+		CPU7: cpu@103 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x103>;
+			qcom,limits-info = <&mitigation_profile7>;
+			qcom,lmh-dcvs = <&lmh_dcvs1>;
+			enable-method = "psci";
+			efficiency = <1536>;
+			next-level-cache = <&L2_1>;
+			qcom,ea = <&ea7>;
+			L1_I_103: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x12000>;
+			};
+			L1_D_103: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x12000>;
+			};
+			L1_TLB_103: l1-tlb {
+				qcom,dump-size = <0x4800>;
+			};
+		};
+
+		cpu-map {
+			cluster0 {
+				core0 {
+					cpu = <&CPU0>;
+				};
+
+				core1 {
+					cpu = <&CPU1>;
+				};
+
+				core2 {
+					cpu = <&CPU2>;
+				};
+
+				core3 {
+					cpu = <&CPU3>;
+				};
+			};
+
+			cluster1 {
+				core0 {
+					cpu = <&CPU4>;
+				};
+
+				core1 {
+					cpu = <&CPU5>;
+				};
+
+				core2 {
+					cpu = <&CPU6>;
+				};
+
+				core3 {
+					cpu = <&CPU7>;
+				};
+			};
+		};
+	};
+
+	soc: soc { };
+
+	vendor: vendor {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges = <0 0 0 0xffffffff>;
+		compatible = "simple-bus";
+	};
+
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		removed_regions: removed_regions@85800000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x85800000 0 0x3700000>;
+		};
+
+		pil_slpi_mem: pil_slpi_region@94400000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x94400000 0 0xf00000>;
+		};
+
+		pil_ipa_gpu_mem: pil_ipa_gpu_region@94300000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x94300000 0 0x100000>;
+		};
+
+		pil_mba_mem: pil_mba_region@94100000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x94100000 0 0x200000>;
+		};
+
+		pil_video_mem: pil_video_region@93c00000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x93c00000 0 0x500000>;
+		};
+
+		modem_mem: modem_region@8cc00000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x8cc00000 0 0x7000000>;
+		};
+
+		pil_adsp_mem: pil_adsp_region@0x8b200000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x8b200000 0 0x1a00000>;
+		};
+
+		spss_mem: spss_region@8ab00000 { /* for SPSS-PIL */
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x8ab00000 0 0x700000>;
+		};
+
+		adsp_mem: adsp_region {
+			compatible = "shared-dma-pool";
+			alloc-ranges = <0 0x00000000 0 0xffffffff>;
+			reusable;
+			alignment = <0 0x400000>;
+			size = <0 0x800000>;
+		};
+
+		qseecom_mem: qseecom_region {
+			compatible = "shared-dma-pool";
+			alloc-ranges = <0 0x00000000 0 0xffffffff>;
+			reusable;
+			alignment = <0 0x400000>;
+			size = <0 0x1400000>;
+		};
+
+		sp_mem: sp_region {  /* SPSS-HLOS ION shared mem */
+			compatible = "shared-dma-pool";
+			alloc-ranges = <0 0x00000000 0 0xffffffff>; /* 32-bit */
+			reusable;
+			alignment = <0 0x100000>;
+			size = <0 0x800000>;
+		};
+
+		secure_display_memory: secure_region {
+			compatible = "shared-dma-pool";
+			alloc-ranges = <0 0x00000000 0 0xffffffff>;
+			reusable;
+			alignment = <0 0x200000>;
+			size = <0 0x8000000>;
+		};
+
+		/* global autoconfigured region for contiguous allocations */
+		linux,cma {
+			compatible = "shared-dma-pool";
+			alloc-ranges = <0 0x00000000 0 0xffffffff>;
+			reusable;
+			alignment = <0 0x400000>;
+			size = <0 0x2000000>;
+			linux,cma-default;
+		};
+
+		cont_splash_mem: splash_region@9d600000 {
+			reg = <0x0 0x9d600000 0x0 0x02400000>;
+			label = "cont_splash_mem";
+		};
+	};
+};
+
+#include "msm8998-smp2p.dtsi"
+#include "msm-gdsc-8998.dtsi"
+
+&soc {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	ranges = <0 0 0 0xffffffff>;
+	compatible = "simple-bus";
+
+	intc: interrupt-controller@17a00000 {
+		compatible = "arm,gic-v3";
+		reg = <0x17a00000 0x10000>,       /* GICD */
+		      <0x17b00000 0x100000>;      /* GICR * 8 */
+		#interrupt-cells = <3>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+		interrupt-controller;
+		#redistributor-regions = <1>;
+		redistributor-stride = <0x0 0x20000>;
+		interrupts = <1 9 4>;
+
+		gic-its@0x17a20000{
+			compatible = "arm,gic-v3-its";
+			msi-contoller;
+			reg = <0x17a20000 0x20000>;
+		};
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupts = <1 1 0xf08>,
+			     <1 2 0xf08>,
+			     <1 3 0xf08>,
+			     <1 0 0xf08>;
+		clock-frequency = <19200000>;
+	};
+
+	restart@10ac000 {
+		compatible = "qcom,pshold";
+		reg = <0x10ac000 0x4>,
+		      <0x1fd3000 0x4>;
+		reg-names = "pshold-base", "tcsr-boot-misc-detect";
+	};
+
+	spmi_bus: qcom,spmi@800f000 {
+		compatible = "qcom,spmi-pmic-arb";
+		reg =	<0x800f000 0x1000>,
+			<0x8400000 0x1000000>,
+			<0x9400000 0x1000000>,
+			<0xa400000 0x220000>,
+			<0x800a000 0x3000>;
+		reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+		interrupt-names = "periph_irq";
+		interrupts = <GIC_SPI 326 IRQ_TYPE_NONE>;
+		qcom,ee = <0>;
+		qcom,channel = <0>;
+		qcom,reserved-chan = <511>;
+		#address-cells = <2>;
+		#size-cells = <0>;
+		interrupt-controller;
+		#interrupt-cells = <4>;
+		cell-index = <0>;
+	};
+
+	qcom,sps {
+		compatible = "qcom,msm_sps_4k";
+		qcom,device-type = <3>;
+		qcom,pipe-attr-ee;
+	};
+
+	uartblsp1dm1: serial@0c170000 {
+		compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+		reg = <0xc170000 0x1000>;
+		interrupts = <0 108 0>;
+		status = "disabled";
+		clocks = <&clock_gcc clk_gcc_blsp1_uart2_apps_clk>,
+			 <&clock_gcc clk_gcc_blsp1_ahb_clk>;
+		clock-names = "core", "iface";
+	};
+
+	uartblsp2dm1: serial@0c1b0000 {
+		compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+		reg = <0xc1b0000 0x1000>;
+		interrupts = <0 114 0>;
+		status = "disabled";
+		clocks = <&clock_gcc clk_gcc_blsp2_uart2_apps_clk>,
+			 <&clock_gcc clk_gcc_blsp2_ahb_clk>;
+		clock-names = "core", "iface";
+	};
+
+	slim_aud: slim@171c0000 {
+		cell-index = <1>;
+		compatible = "qcom,slim-ngd";
+		reg = <0x171c0000 0x2C000>,
+			<0x17184000 0x32000>;
+		reg-names = "slimbus_physical", "slimbus_bam_physical";
+		interrupts = <0 163 0>, <0 164 0>;
+		interrupt-names = "slimbus_irq", "slimbus_bam_irq";
+		qcom,apps-ch-pipes = <0x00001f80>;
+		qcom,ea-pc = <0x210>;
+	};
+
+	slim_qca: slim@17240000 {
+		status = "ok";
+		cell-index = <3>;
+		compatible = "qcom,slim-ngd";
+		reg = <0x17240000 0x2C000>,
+			<0x17204000 0x26000>;
+		reg-names = "slimbus_physical", "slimbus_bam_physical";
+		interrupts = <0 291 0>, <0 292 0>;
+		interrupt-names = "slimbus_irq", "slimbus_bam_irq";
+
+		/* Slimbus Slave DT for WCN3990 */
+		btfmslim_codec: wcn3990 {
+			compatible = "qcom,btfmslim_slave";
+			elemental-addr = [00 01 20 02 17 02];
+			qcom,btfm-slim-ifd = "btfmslim_slave_ifd";
+			qcom,btfm-slim-ifd-elemental-addr = [00 00 20 02 17 02];
+		};
+	};
+
+	timer@17920000 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+		compatible = "arm,armv7-timer-mem";
+		reg = <0x17920000 0x1000>;
+		clock-frequency = <19200000>;
+
+		frame@17921000 {
+			frame-number = <0>;
+			interrupts = <0 8 0x4>,
+				     <0 7 0x4>;
+			reg = <0x17921000 0x1000>,
+			      <0x17922000 0x1000>;
+		};
+
+		frame@17923000 {
+			frame-number = <1>;
+			interrupts = <0 9 0x4>;
+			reg = <0x17923000 0x1000>;
+			status = "disabled";
+		};
+
+		frame@17924000 {
+			frame-number = <2>;
+			interrupts = <0 10 0x4>;
+			reg = <0x17924000 0x1000>;
+			status = "disabled";
+		};
+
+		frame@17925000 {
+			frame-number = <3>;
+			interrupts = <0 11 0x4>;
+			reg = <0x17925000 0x1000>;
+			status = "disabled";
+		};
+
+		frame@17926000 {
+			frame-number = <4>;
+			interrupts = <0 12 0x4>;
+			reg = <0x17926000 0x1000>;
+			status = "disabled";
+		};
+
+		frame@17927000 {
+			frame-number = <5>;
+			interrupts = <0 13 0x4>;
+			reg = <0x17927000 0x1000>;
+			status = "disabled";
+		};
+
+		frame@17928000 {
+			frame-number = <6>;
+			interrupts = <0 14 0x4>;
+			reg = <0x17928000 0x1000>;
+			status = "disabled";
+		};
+	};
+
+	cpubw: qcom,cpubw {
+		compatible = "qcom,devbw";
+		governor = "performance";
+		qcom,src-dst-ports = <1 512>;
+		qcom,active-only;
+		qcom,bw-tbl =
+			<   762 /*  100 MHz */ >,
+			<  1144 /*  150 MHz */ >,
+			<  1525 /*  200 MHz */ >,
+			<  2288 /*  300 MHz */ >,
+			<  3143 /*  412 MHz */ >,
+			<  4173 /*  547 MHz */ >,
+			<  5195 /*  681 MHz */ >,
+			<  5859 /*  768 MHz */ >,
+			<  7759 /* 1017 MHz */ >,
+			<  9887 /* 1296 MHz */ >,
+			< 11863 /* 1555 MHz */ >,
+			< 13763 /* 1804 MHz */ >;
+	};
+
+	bwmon: qcom,cpu-bwmon {
+		compatible = "qcom,bimc-bwmon3";
+		reg = <0x01008000 0x300>, <0x01001000 0x200>;
+		reg-names = "base", "global_base";
+		interrupts = <0 183 4>;
+		qcom,mport = <0>;
+		qcom,target-dev = <&cpubw>;
+	};
+
+	mincpubw: qcom,mincpubw {
+		compatible = "qcom,devbw";
+		governor = "powersave";
+		qcom,src-dst-ports = <1 512>;
+		qcom,active-only;
+		qcom,bw-tbl =
+			<   762 /*  100 MHz */ >,
+			<  1144 /*  150 MHz */ >,
+			<  1525 /*  200 MHz */ >,
+			<  2288 /*  300 MHz */ >,
+			<  3143 /*  412 MHz */ >,
+			<  4173 /*  547 MHz */ >,
+			<  5195 /*  681 MHz */ >,
+			<  5859 /*  768 MHz */ >,
+			<  7759 /* 1017 MHz */ >,
+			<  9887 /* 1296 MHz */ >,
+			< 11863 /* 1555 MHz */ >,
+			< 13763 /* 1804 MHz */ >;
+	};
+
+	memlat_cpu0: qcom,memlat-cpu0 {
+		compatible = "qcom,devbw";
+		governor = "powersave";
+		qcom,src-dst-ports = <1 512>;
+		qcom,active-only;
+		qcom,bw-tbl =
+			<   762 /*  100 MHz */ >,
+			<  1144 /*  150 MHz */ >,
+			<  1525 /*  200 MHz */ >,
+			<  2288 /*  300 MHz */ >,
+			<  3143 /*  412 MHz */ >,
+			<  4173 /*  547 MHz */ >,
+			<  5195 /*  681 MHz */ >,
+			<  5859 /*  768 MHz */ >,
+			<  7759 /* 1017 MHz */ >,
+			<  9887 /* 1296 MHz */ >,
+			< 11863 /* 1555 MHz */ >,
+			< 13763 /* 1804 MHz */ >;
+	};
+
+	memlat_cpu4: qcom,memlat-cpu4 {
+		compatible = "qcom,devbw";
+		governor = "powersave";
+		qcom,src-dst-ports = <1 512>;
+		qcom,active-only;
+		status = "ok";
+		qcom,bw-tbl =
+			<   762 /*  100 MHz */ >,
+			<  1144 /*  150 MHz */ >,
+			<  1525 /*  200 MHz */ >,
+			<  2288 /*  300 MHz */ >,
+			<  3143 /*  412 MHz */ >,
+			<  4173 /*  547 MHz */ >,
+			<  5195 /*  681 MHz */ >,
+			<  5859 /*  768 MHz */ >,
+			<  7759 /* 1017 MHz */ >,
+			<  9887 /* 1296 MHz */ >,
+			< 11863 /* 1555 MHz */ >,
+			< 13763 /* 1804 MHz */ >;
+	};
+
+	devfreq_memlat_0: qcom,arm-memlat-mon-0 {
+		compatible = "qcom,arm-memlat-mon";
+		qcom,cpulist =	<&CPU0 &CPU1 &CPU2 &CPU3>;
+		qcom,target-dev = <&memlat_cpu0>;
+		qcom,core-dev-table =
+			<  300000 1525 >,
+			<  499200 3143 >,
+			< 1113600 4173 >,
+			< 1881600 5859 >;
+	};
+
+	devfreq_memlat_4: qcom,arm-memlat-mon-4 {
+		compatible = "qcom,arm-memlat-mon";
+		qcom,cpulist =	<&CPU4 &CPU5 &CPU6 &CPU7>;
+		qcom,target-dev = <&memlat_cpu4>;
+		qcom,core-dev-table =
+			<  300000  1525 >,
+			<  480000  3143 >,
+			<  900000  4173 >,
+			< 1017000  7759 >,
+			< 1296000  9887 >,
+			< 1555000 11863 >,
+			< 1804000 13763 >;
+	};
+
+	devfreq_cpufreq: devfreq-cpufreq {
+		mincpubw-cpufreq {
+			target-dev = <&mincpubw>;
+			cpu-to-dev-map-0 =
+				< 1881600 1525 >;
+			cpu-to-dev-map-4 =
+				< 2016000 1525 >,
+				< 2092800 5195 >;
+		};
+	};
+
+	msm_cpufreq: qcom,msm-cpufreq {
+		compatible = "qcom,msm-cpufreq";
+		clock-names = "cpu0_clk", "cpu1_clk", "cpu2_clk",
+				"cpu3_clk", "cpu4_clk", "cpu5_clk",
+				"cpu6_clk", "cpu7_clk";
+		clocks = <&clock_cpu clk_pwrcl_clk>,
+			 <&clock_cpu clk_pwrcl_clk>,
+			 <&clock_cpu clk_pwrcl_clk>,
+			 <&clock_cpu clk_pwrcl_clk>,
+			 <&clock_cpu clk_perfcl_clk>,
+			 <&clock_cpu clk_perfcl_clk>,
+			 <&clock_cpu clk_perfcl_clk>,
+			 <&clock_cpu clk_perfcl_clk>;
+
+		qcom,governor-per-policy;
+
+		qcom,cpufreq-table-0 =
+			<  300000 >,
+			<  345600 >,
+			<  422400 >,
+			<  499200 >,
+			<  576000 >,
+			<  633600 >,
+			<  710400 >,
+			<  806400 >,
+			<  883200 >,
+			<  960000 >,
+			< 1036800 >,
+			< 1113600 >,
+			< 1190400 >,
+			< 1248000 >,
+			< 1324800 >,
+			< 1401600 >,
+			< 1478400 >,
+			< 1574400 >,
+			< 1651200 >,
+			< 1728000 >,
+			< 1804800 >,
+			< 1881600 >;
+
+		qcom,cpufreq-table-4 =
+			<  300000 >,
+			<  345600 >,
+			<  422400 >,
+			<  480000 >,
+			<  556800 >,
+			<  633600 >,
+			<  710400 >,
+			<  787200 >,
+			<  844800 >,
+			<  902400 >,
+			<  979200 >,
+			< 1056000 >,
+			< 1171200 >,
+			< 1248000 >,
+			< 1324800 >,
+			< 1401600 >,
+			< 1478400 >,
+			< 1536000 >,
+			< 1632000 >,
+			< 1708800 >,
+			< 1785600 >,
+			< 1862400 >,
+			< 1939200 >,
+			< 2016000 >,
+			< 2092800 >;
+	};
+
+	arm64-cpu-erp {
+		compatible = "arm,arm64-cpu-erp";
+		interrupts = <0 43 4>,
+			     <0 44 4>,
+			     <0 41 4>,
+			     <0 42 4>;
+
+		interrupt-names = "pri-dbe-irq",
+				  "sec-dbe-irq",
+				  "pri-ext-irq",
+				  "sec-ext-irq";
+
+		poll-delay-ms = <5000>;
+	};
+
+	clock_gcc: qcom,gcc@100000 {
+		compatible = "qcom,gcc-8998";
+		reg = <0x100000 0xb0000>;
+		reg-names = "cc_base";
+		vdd_dig-supply = <&pm8998_s1_level>;
+		vdd_dig_ao-supply = <&pm8998_s1_level_ao>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	clock_mmss: qcom,mmsscc@c8c0000 {
+		compatible = "qcom,mmsscc-8998";
+		reg = <0xc8c0000 0x40000>;
+		reg-names = "cc_base";
+		vdd_dig-supply = <&pm8998_s1_level>;
+		vdd_mmsscc_mx-supply = <&pm8998_s9_level>;
+		clock-names = "xo", "gpll0", "gpll0_div",
+				"pclk0_src", "pclk1_src",
+				"byte0_src", "byte1_src",
+				"dp_link_src", "dp_vco_div",
+				"extpclk_src";
+		clocks = <&clock_gcc clk_cxo_clk_src>,
+			 <&clock_gcc clk_gcc_mmss_gpll0_clk>,
+			 <&clock_gcc clk_gcc_mmss_gpll0_div_clk>,
+			 <&mdss_dsi0_pll clk_dsi0pll_pclk_mux>,
+			 <&mdss_dsi1_pll clk_dsi1pll_pclk_mux>,
+			 <&mdss_dsi0_pll clk_dsi0pll_byteclk_mux>,
+			 <&mdss_dsi1_pll clk_dsi1pll_byteclk_mux>,
+			 <&mdss_dp_pll clk_dp_link_2x_clk_divsel_five>,
+			 <&mdss_dp_pll clk_vco_divided_clk_src_mux>,
+			 <&mdss_hdmi_pll clk_hdmi_vco_clk>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	clock_gpu: qcom,gpucc@5065000 {
+		compatible = "qcom,gpucc-8998";
+		reg = <0x5065000 0x9000>;
+		reg-names = "cc_base";
+		vdd_dig-supply = <&pm8998_s1_level>;
+		clock-names = "xo_ao", "gpll0";
+		clocks = <&clock_gcc clk_cxo_clk_src_ao>,
+			<&clock_gcc clk_gcc_gpu_gpll0_clk>;
+		#clock-cells = <1>;
+	};
+
+	clock_gfx: qcom,gfxcc@5065000 {
+		compatible = "qcom,gfxcc-8998";
+		reg = <0x5065000 0x9000>;
+		reg-names = "cc_base";
+		vdd_gpucc-supply = <&gfx_vreg>;
+		vdd_mx-supply = <&pm8998_s9_level>;
+		vdd_gpu_mx-supply = <&pm8998_s9_level>;
+		qcom,gfx3d_clk_src-opp-handle = <&msm_gpu>;
+		qcom,gfxfreq-speedbin0 =
+			<	  0 0				0 >,
+			< 171000000 1 RPM_SMD_REGULATOR_LEVEL_SVS >,
+			< 251000000 2 RPM_SMD_REGULATOR_LEVEL_SVS >,
+			< 332000000 3 RPM_SMD_REGULATOR_LEVEL_SVS >,
+			< 403000000 4 RPM_SMD_REGULATOR_LEVEL_SVS >,
+			< 504000000 5 RPM_SMD_REGULATOR_LEVEL_NOM >,
+			< 650000000 6 RPM_SMD_REGULATOR_LEVEL_TURBO >;
+		qcom,gfxfreq-mx-speedbin0 =
+			<         0			      0 >,
+			< 171000000 RPM_SMD_REGULATOR_LEVEL_SVS >,
+			< 251000000 RPM_SMD_REGULATOR_LEVEL_SVS >,
+			< 332000000 RPM_SMD_REGULATOR_LEVEL_SVS >,
+			< 403000000 RPM_SMD_REGULATOR_LEVEL_SVS >,
+			< 504000000 RPM_SMD_REGULATOR_LEVEL_NOM >,
+			< 650000000 RPM_SMD_REGULATOR_LEVEL_TURBO >;
+		#clock-cells = <1>;
+	};
+
+	clock_cpu: qcom,cpu-clock-8998@179c0000 {
+		compatible = "qcom,cpu-clock-osm-msm8998-v1";
+		reg = <0x179c0000 0x4000>,
+		      <0x17916000 0x1000>,
+		      <0x17816000 0x1000>,
+		      <0x179d1000 0x1000>,
+		      <0x00784130 0x8>,
+		      <0x1791101c 0x8>;
+		reg-names = "osm", "pwrcl_pll", "perfcl_pll",
+			    "apcs_common", "perfcl_efuse", "debug";
+
+		vdd-pwrcl-supply = <&apc0_pwrcl_vreg>;
+		vdd-perfcl-supply = <&apc1_perfcl_vreg>;
+
+		interrupts = <GIC_SPI 35 IRQ_TYPE_EDGE_RISING>,
+			     <GIC_SPI 36 IRQ_TYPE_EDGE_RISING>;
+		interrupt-names = "pwrcl-irq", "perfcl-irq";
+
+		qcom,pwrcl-speedbin0-v0 =
+			<   300000000 0x0004000f 0x01200020 0x1 1 >,
+			<   345600000 0x05040012 0x02200020 0x1 2 >,
+			<   422400000 0x05040016 0x02200020 0x1 3 >,
+			<   499200000 0x0504001a 0x02200020 0x1 4 >,
+			<   576000000 0x0504001e 0x03200020 0x1 5 >,
+			<   633600000 0x05040021 0x03200020 0x1 6 >,
+			<   710400000 0x05040025 0x03200020 0x1 7 >,
+			<   806400000 0x0504002a 0x04200020 0x1 8 >,
+			<   883200000 0x0404002e 0x04250025 0x1 9 >,
+			<   960000000 0x04040032 0x05280028 0x1 10 >,
+			<  1036800000 0x04040036 0x052b002b 0x2 11 >,
+			<  1113600000 0x0404003a 0x052e002e 0x2 12 >,
+			<  1190400000 0x0404003e 0x06320032 0x2 13 >,
+			<  1248000000 0x04040041 0x06340034 0x2 14 >,
+			<  1324800000 0x04040045 0x06370037 0x2 15 >,
+			<  1401600000 0x04040049 0x073a003a 0x2 16 >,
+			<  1478400000 0x0404004d 0x073e003e 0x2 17 >,
+			<  1574400000 0x04040052 0x08420042 0x2 18 >,
+			<  1651200000 0x04040056 0x08450045 0x2 19 >,
+			<  1728000000 0x0404005a 0x08480048 0x2 20 >,
+			<  1804800000 0x0404005e 0x094b004b 0x3 21 >,
+			<  1881600000 0x04040062 0x094e004e 0x3 22 >;
+
+		qcom,perfcl-speedbin0-v0 =
+			<   300000000 0x0004000f 0x01200020 0x1 1 >,
+			<   345600000 0x05040012 0x02200020 0x1 2 >,
+			<   422400000 0x05040016 0x02200020 0x1 3 >,
+			<   480000000 0x05040019 0x02200020 0x1 4 >,
+			<   556800000 0x0504001d 0x03200020 0x1 5 >,
+			<   633600000 0x05040021 0x03200020 0x1 6 >,
+			<   710400000 0x05040025 0x03200020 0x1 7 >,
+			<   787200000 0x05040029 0x04200020 0x1 8 >,
+			<   844800000 0x0404002c 0x04230023 0x1 9 >,
+			<   902400000 0x0404002f 0x04260026 0x1 10 >,
+			<   979200000 0x04040033 0x05290029 0x1 11 >,
+			<  1056000000 0x04040037 0x052c002c 0x1 12 >,
+			<  1171200000 0x0404003d 0x06310031 0x2 13 >,
+			<  1248000000 0x04040041 0x06340034 0x2 14 >,
+			<  1324800000 0x04040045 0x06370037 0x2 15 >,
+			<  1401600000 0x04040049 0x073a003a 0x2 16 >,
+			<  1478400000 0x0404004d 0x073e003e 0x2 17 >,
+			<  1536000000 0x04040050 0x07400040 0x2 18 >,
+			<  1632000000 0x04040055 0x08440044 0x2 19 >,
+			<  1708800000 0x04040059 0x08470047 0x2 20 >,
+			<  1785600000 0x0404005d 0x094a004a 0x2 21 >,
+			<  1862400000 0x04040061 0x094e004e 0x2 22 >,
+			<  1939200000 0x04040065 0x09510051 0x3 23 >,
+			<  2016000000 0x04040069 0x0a540054 0x3 24 >,
+			<  2092800000 0x0404006d 0x0a570057 0x3 25 >;
+
+		qcom,up-timer =
+			<1000 1000>;
+		qcom,down-timer =
+			<1000 1000>;
+		qcom,pc-override-index =
+			<0 0>;
+		qcom,set-ret-inactive;
+		qcom,enable-llm-freq-vote;
+		qcom,llm-freq-up-timer =
+			<327675 327675>;
+		qcom,llm-freq-down-timer =
+			<327675 327675>;
+		qcom,enable-llm-volt-vote;
+		qcom,llm-volt-up-timer =
+			<327675 327675>;
+		qcom,llm-volt-down-timer =
+			<327675 327675>;
+		qcom,cc-reads = <10>;
+		qcom,cc-delay = <5>;
+		qcom,cc-factor = <100>;
+		qcom,osm-clk-rate = <200000000>;
+		qcom,xo-clk-rate = <19200000>;
+
+		qcom,l-val-base =
+			<0x17916004 0x17816004>;
+		qcom,apcs-itm-present =
+			<0x179d143c 0x179d143c>;
+		qcom,apcs-pll-user-ctl =
+			<0x1791600c 0x1781600c>;
+		qcom,apcs-cfg-rcgr =
+			<0x17911054 0x17811054>;
+		qcom,apcs-cmd-rcgr =
+			<0x17911050 0x17811050>;
+		qcom,apm-mode-ctl =
+			<0x179d0004 0x179d0010>;
+		qcom,apm-ctrl-status =
+			<0x179d000c 0x179d0018>;
+		qcom,llm-sw-overr=
+			<0x8fff0036 0x8fff003a 0x0fff0036>,
+			<0x8fff003d 0x8fff0041 0x0fff003d>;
+
+		qcom,apm-threshold-voltage = <832000>;
+		qcom,boost-fsm-en;
+		qcom,safe-fsm-en;
+		qcom,ps-fsm-en;
+		qcom,droop-fsm-en;
+		qcom,wfx-fsm-en;
+		qcom,pc-fsm-en;
+
+		qcom,pwrcl-apcs-mem-acc-cfg =
+			<0x179d1360 0x179d1364 0x179d1364>;
+		qcom,perfcl-apcs-mem-acc-cfg =
+			<0x179d1368 0x179d136C 0x179d1370>;
+		qcom,pwrcl-apcs-mem-acc-val =
+			<0x00000000 0x80000000 0x80000000>,
+			<0x00000000 0x00000000 0x00000000>,
+			<0x00000000 0x00000001 0x00000001>;
+		qcom,perfcl-apcs-mem-acc-val =
+			<0x00000000 0x00000000 0x80000000>,
+			<0x00000000 0x00000000 0x00000000>,
+			<0x00000000 0x00000000 0x00000001>;
+
+		clock-names = "aux_clk", "xo_ao";
+		clocks = <&clock_gcc clk_hmss_gpll0_clk_src>,
+			<&clock_gcc clk_cxo_clk_src_ao>;
+		#clock-cells = <1>;
+	};
+
+	clock_debug: qcom,debugcc@162000 {
+		compatible = "qcom,cc-debug-8998";
+		reg = <0x162000 0x4>;
+		reg-names = "cc_base";
+		clock-names = "debug_gpu_clk", "debug_gfx_clk",
+				"debug_mmss_clk", "debug_cpu_clk";
+		clocks = <&clock_gpu clk_gpucc_gcc_dbg_clk>,
+			 <&clock_gfx clk_gfxcc_dbg_clk>,
+			 <&clock_mmss clk_mmss_debug_mux>,
+			 <&clock_cpu clk_cpu_debug_mux>;
+		#clock-cells = <1>;
+	};
+
+	qcom,rmtfs_sharedmem@0 {
+		compatible = "qcom,sharedmem-uio";
+		reg = <0x0 0x00200000>;
+		reg-names = "rmtfs";
+		qcom,client-id = <0x00000001>;
+	};
+
+	qcom,msm_gsi {
+		compatible = "qcom,msm_gsi";
+	};
+
+	qcom,rmnet-ipa {
+		compatible = "qcom,rmnet-ipa3";
+		qcom,rmnet-ipa-ssr;
+		qcom,ipa-loaduC;
+		qcom,ipa-advertise-sg-support;
+	};
+
+	ipa_hw: qcom,ipa@01e00000 {
+		compatible = "qcom,ipa";
+		reg = <0x01e00000 0x34000>,
+			<0x01e84000 0x31fff>,
+			<0x01e04000 0x2c000>;
+		reg-names = "ipa-base", "bam-base", "gsi-base";
+		interrupts =
+			<0 333 0>,
+			<0 432 0>,
+			<0 432 0>;
+		interrupt-names = "ipa-irq", "bam-irq", "gsi-irq";
+		qcom,ipa-hw-ver = <11>; /* IPA core version = IPAv3.1 */
+		qcom,ipa-hw-mode = <0>; /* IPA hw type = Normal */
+		qcom,ee = <0>;
+		qcom,use-gsi;
+		qcom,use-ipa-tethering-bridge;
+		qcom,modem-cfg-emb-pipe-flt;
+		qcom,do-not-use-ch-gsi-20;
+		qcom,ipa-wdi2;
+		qcom,use-64-bit-dma-mask;
+		clocks = <&clock_gcc clk_ipa_clk>;
+		clock-names = "core_clk";
+		qcom,arm-smmu;
+		qcom,smmu-disable-htw;
+		qcom,smmu-s1-bypass;
+		qcom,msm-bus,name = "ipa";
+		qcom,msm-bus,num-cases = <4>;
+		qcom,msm-bus,num-paths = <4>;
+		qcom,msm-bus,vectors-KBps =
+		/* No vote */
+			<90 512 0 0>,
+			<90 585 0 0>,
+			<1 676 0 0>,
+			 /* SMMU smmu_aggre2_noc_clk */
+			<81 10065 0 0>,
+		/* SVS */
+			<90 512 80000 640000>,
+			<90 585 80000 640000>,
+			<1 676 80000 80000>,
+			/* SMMU smmu_aggre2_noc_clk */
+			<81 10065 0 16000>,
+		/* NOMINAL */
+			<90 512 206000 960000>,
+			<90 585 206000 960000>,
+			<1 676 206000 160000>,
+			/* SMMU smmu_aggre2_noc_clk */
+			<81 10065 0 16000>,
+		/* TURBO */
+			<90 512 206000 3600000>,
+			<90 585 206000 3600000>,
+			<1 676 206000 300000>,
+			/* SMMU smmu_aggre2_noc_clk */
+			<81 10065 0 16000>;
+		qcom,bus-vector-names = "MIN", "SVS", "NOMINAL", "TURBO";
+
+		/* IPA RAM mmap */
+		qcom,ipa-ram-mmap = <
+				0x280	/* ofst_start; */
+				0x0	/* nat_ofst; */
+				0x0	/* nat_size; */
+				0x288	/* v4_flt_hash_ofst; */
+				0x78	/* v4_flt_hash_size; */
+				0x4000	/* v4_flt_hash_size_ddr; */
+				0x308	/* v4_flt_nhash_ofst; */
+				0x78	/* v4_flt_nhash_size; */
+				0x4000	/* v4_flt_nhash_size_ddr; */
+				0x388	/* v6_flt_hash_ofst; */
+				0x78	/* v6_flt_hash_size; */
+				0x4000	/* v6_flt_hash_size_ddr; */
+				0x408	/* v6_flt_nhash_ofst; */
+				0x78	/* v6_flt_nhash_size; */
+				0x4000	/* v6_flt_nhash_size_ddr; */
+				0xf	/* v4_rt_num_index; */
+				0x0	/* v4_modem_rt_index_lo; */
+				0x7	/* v4_modem_rt_index_hi; */
+				0x8	/* v4_apps_rt_index_lo; */
+				0xe	/* v4_apps_rt_index_hi; */
+				0x488	/* v4_rt_hash_ofst; */
+				0x78	/* v4_rt_hash_size; */
+				0x4000	/* v4_rt_hash_size_ddr; */
+				0x508	/* v4_rt_nhash_ofst; */
+				0x78	/* v4_rt_nhash_size; */
+				0x4000	/* v4_rt_nhash_size_ddr; */
+				0xf	/* v6_rt_num_index; */
+				0x0	/* v6_modem_rt_index_lo; */
+				0x7	/* v6_modem_rt_index_hi; */
+				0x8	/* v6_apps_rt_index_lo; */
+				0xe	/* v6_apps_rt_index_hi; */
+				0x588	/* v6_rt_hash_ofst; */
+				0x78	/* v6_rt_hash_size; */
+				0x4000	/* v6_rt_hash_size_ddr; */
+				0x608	/* v6_rt_nhash_ofst; */
+				0x78	/* v6_rt_nhash_size; */
+				0x4000	/* v6_rt_nhash_size_ddr; */
+				0x688	/* modem_hdr_ofst; */
+				0x140	/* modem_hdr_size; */
+				0x7c8	/* apps_hdr_ofst; */
+				0x0	/* apps_hdr_size; */
+				0x800	/* apps_hdr_size_ddr; */
+				0x7d0	/* modem_hdr_proc_ctx_ofst; */
+				0x200	/* modem_hdr_proc_ctx_size; */
+				0x9d0	/* apps_hdr_proc_ctx_ofst; */
+				0x200	/* apps_hdr_proc_ctx_size; */
+				0x0	/* apps_hdr_proc_ctx_size_ddr; */
+				0x0	/* modem_comp_decomp_ofst; diff */
+				0x0	/* modem_comp_decomp_size; diff */
+				0xbd8	/* modem_ofst; */
+				0x1424	/* modem_size; */
+				0x1ffc	/* apps_v4_flt_hash_ofst; */
+				0x0	/* apps_v4_flt_hash_size; */
+				0x1ffc	/* apps_v4_flt_nhash_ofst; */
+				0x0	/* apps_v4_flt_nhash_size; */
+				0x1ffc	/* apps_v6_flt_hash_ofst; */
+				0x0	/* apps_v6_flt_hash_size; */
+				0x1ffc	/* apps_v6_flt_nhash_ofst; */
+				0x0	/* apps_v6_flt_nhash_size; */
+				0x80	/* uc_info_ofst; */
+				0x200	/* uc_info_size; */
+				0x2000	/* end_ofst; */
+				0x1ffc	/* apps_v4_rt_hash_ofst; */
+				0x0	/* apps_v4_rt_hash_size; */
+				0x1ffc	/* apps_v4_rt_nhash_ofst; */
+				0x0	/* apps_v4_rt_nhash_size; */
+				0x1ffc	/* apps_v6_rt_hash_ofst; */
+				0x0	/* apps_v6_rt_hash_size; */
+				0x1ffc	/* apps_v6_rt_nhash_ofst; */
+				0x0	/* apps_v6_rt_nhash_size; */
+				>;
+
+		/* smp2p gpio information */
+		qcom,smp2pgpio_map_ipa_1_out {
+			compatible = "qcom,smp2pgpio-map-ipa-1-out";
+			gpios = <&smp2pgpio_ipa_1_out 0 0>;
+		};
+
+		qcom,smp2pgpio_map_ipa_1_in {
+			compatible = "qcom,smp2pgpio-map-ipa-1-in";
+			gpios = <&smp2pgpio_ipa_1_in 0 0>;
+		};
+
+		ipa_smmu_ap: ipa_smmu_ap {
+			compatible = "qcom,ipa-smmu-ap-cb";
+			iommus = <&anoc2_smmu 0x18e0>;
+			qcom,iova-mapping = <0x10000000 0x40000000>;
+		};
+
+		ipa_smmu_wlan: ipa_smmu_wlan {
+			compatible = "qcom,ipa-smmu-wlan-cb";
+			iommus = <&anoc2_smmu 0x18e1>;
+		};
+
+		ipa_smmu_uc: ipa_smmu_uc {
+			compatible = "qcom,ipa-smmu-uc-cb";
+			iommus = <&anoc2_smmu 0x18e2>;
+			qcom,iova-mapping = <0x40000000 0x20000000>;
+		};
+	};
+
+	qcom,ipa_fws@1e08000 {
+		compatible = "qcom,pil-tz-generic";
+		qcom,pas-id = <0xF>;
+		qcom,firmware-name = "ipa_fws";
+	};
+
+	qcom,chd_silver {
+		compatible = "qcom,core-hang-detect";
+		label = "silver";
+		qcom,threshold-arr = <0x179880b0 0x179980b0
+		0x179a80b0 0x179b80b0>;
+		qcom,config-arr = <0x179880b8 0x179980b8
+		0x179a80b8 0x179b80b8>;
+	};
+
+	qcom,chd_gold {
+		compatible = "qcom,core-hang-detect";
+		label = "gold";
+		qcom,threshold-arr = <0x178880b0 0x178980b0
+		0x178a80b0 0x178b80b0>;
+		qcom,config-arr = <0x178880b8 0x178980b8
+		0x178a80b8 0x178b80b8>;
+	};
+
+	qcom,ipc-spinlock@1f40000 {
+		compatible = "qcom,ipc-spinlock-sfpb";
+		reg = <0x1f40000 0x8000>;
+		qcom,num-locks = <8>;
+	};
+
+	qcom,ghd {
+		compatible = "qcom,gladiator-hang-detect";
+		qcom,threshold-arr = <0x179d141c 0x179d1420
+		0x179d1424 0x179d1428 0x179d142c 0x179d1430>;
+		qcom,config-reg = <0x179d1434>;
+	};
+
+	qcom,msm-gladiator-v2@17900000 {
+		compatible = "qcom,msm-gladiator-v2";
+		reg = <0x17900000 0xe000>;
+		reg-names = "gladiator_base";
+		interrupts = <0 22 0>;
+		clock-names = "atb_clk";
+		clocks = <&clock_gcc clk_qdss_clk>;
+	};
+
+	qcom,smem@86000000 {
+		compatible = "qcom,smem";
+		reg = <0x86000000 0x200000>,
+			<0x17911008 0x4>,
+			<0x778000 0x7000>,
+			<0x1fd4000 0x8>;
+		reg-names = "smem", "irq-reg-base", "aux-mem1",
+			"smem_targ_info_reg";
+		qcom,mpu-enabled;
+	};
+
+	qcom,msm-adsprpc-mem {
+		compatible = "qcom,msm-adsprpc-mem-region";
+		memory-region = <&adsp_mem>;
+	};
+
+	qcom,msm_fastrpc {
+		compatible = "qcom,msm-fastrpc-adsp";
+		qcom,fastrpc-glink;
+
+		qcom,msm_fastrpc_cpz_cb1 {
+			compatible = "qcom,msm-fastrpc-compute-cb";
+			label = "adsprpc-smd";
+			iommus = <&lpass_q6_smmu 2>;
+			qcom,secure-context-bank;
+			dma-coherent;
+		};
+		qcom,msm_fastrpc_compute_cb1 {
+			compatible = "qcom,msm-fastrpc-compute-cb";
+			label = "adsprpc-smd";
+			iommus = <&lpass_q6_smmu 8>;
+			dma-coherent;
+		};
+		qcom,msm_fastrpc_compute_cb2 {
+			compatible = "qcom,msm-fastrpc-compute-cb";
+			label = "adsprpc-smd";
+			iommus = <&lpass_q6_smmu 9>;
+			dma-coherent;
+		};
+		qcom,msm_fastrpc_compute_cb3 {
+			compatible = "qcom,msm-fastrpc-compute-cb";
+			label = "adsprpc-smd";
+			iommus = <&lpass_q6_smmu 10>;
+			dma-coherent;
+		};
+		qcom,msm_fastrpc_compute_cb4 {
+			compatible = "qcom,msm-fastrpc-compute-cb";
+			label = "adsprpc-smd";
+			iommus = <&lpass_q6_smmu 11>;
+			dma-coherent;
+		};
+		qcom,msm_fastrpc_compute_cb6 {
+			compatible = "qcom,msm-fastrpc-compute-cb";
+			label = "adsprpc-smd";
+			iommus = <&lpass_q6_smmu 5>;
+			dma-coherent;
+		};
+		qcom,msm_fastrpc_compute_cb7 {
+			compatible = "qcom,msm-fastrpc-compute-cb";
+			label = "adsprpc-smd";
+			iommus = <&lpass_q6_smmu 6>;
+			dma-coherent;
+		};
+		qcom,msm_fastrpc_compute_cb8 {
+			compatible = "qcom,msm-fastrpc-compute-cb";
+			label = "adsprpc-smd";
+			iommus = <&lpass_q6_smmu 7>;
+			dma-coherent;
+		};
+	};
+
+	rpm_bus: qcom,rpm-smd {
+		compatible = "qcom,rpm-glink";
+		qcom,glink-edge = "rpm";
+		rpm-channel-name = "rpm_requests";
+	};
+
+	glink_mpss: qcom,glink-ssr-modem {
+		compatible = "qcom,glink_ssr";
+		label = "modem";
+		qcom,edge = "mpss";
+		qcom,notify-edges = <&glink_lpass>, <&glink_dsps>, <&glink_rpm>;
+		qcom,xprt = "smem";
+	};
+
+	glink_lpass: qcom,glink-ssr-adsp {
+		compatible = "qcom,glink_ssr";
+		label = "adsp";
+		qcom,edge = "lpass";
+		qcom,notify-edges = <&glink_mpss>, <&glink_dsps>, <&glink_rpm>;
+		qcom,xprt = "smem";
+	};
+
+	glink_dsps: qcom,glink-ssr-dsps {
+		compatible = "qcom,glink_ssr";
+		label = "slpi";
+		qcom,edge = "dsps";
+		qcom,notify-edges = <&glink_mpss>, <&glink_lpass>, <&glink_rpm>;
+		qcom,xprt = "smem";
+	};
+
+	glink_rpm: qcom,glink-ssr-rpm {
+		compatible = "qcom,glink_ssr";
+		label = "rpm";
+		qcom,edge = "rpm";
+		qcom,notify-edges = <&glink_lpass>, <&glink_mpss>,
+					<&glink_dsps>, <&glink_spss>;
+		qcom,xprt = "smem";
+	};
+
+	glink_spss: qcom,glink-ssr-spss {
+		compatible = "qcom,glink_ssr";
+		label = "spss";
+		qcom,edge = "spss";
+		qcom,notify-edges = <&glink_mpss>, <&glink_lpass>,
+				<&glink_dsps>, <&glink_rpm>;
+		qcom,xprt = "mailbox";
+	};
+
+	qcom,glink-smem-native-xprt-modem@86000000 {
+		compatible = "qcom,glink-smem-native-xprt";
+		reg = <0x86000000 0x200000>,
+			<0x17911008 0x4>;
+		reg-names = "smem", "irq-reg-base";
+		qcom,irq-mask = <0x8000>;
+		interrupts = <0 452 1>;
+		label = "mpss";
+	};
+
+	qcom,glink-smem-native-xprt-adsp@86000000 {
+		compatible = "qcom,glink-smem-native-xprt";
+		reg = <0x86000000 0x200000>,
+			<0x17911008 0x4>;
+		reg-names = "smem", "irq-reg-base";
+		qcom,irq-mask = <0x200>;
+		interrupts = <0 157 1>;
+		label = "lpass";
+		qcom,qos-config = <&glink_qos_adsp>;
+		qcom,ramp-time = <0xaf>;
+	};
+
+	glink_qos_adsp: qcom,glink-qos-config-adsp {
+		compatible = "qcom,glink-qos-config";
+		qcom,flow-info = <0x3c 0x0>,
+				<0x3c 0x0>,
+				<0x3c 0x0>,
+				<0x3c 0x0>;
+		qcom,mtu-size = <0x800>;
+		qcom,tput-stats-cycle = <0xa>;
+	};
+
+	qcom,glink-smem-native-xprt-dsps@86000000 {
+		compatible = "qcom,glink-smem-native-xprt";
+		reg = <0x86000000 0x200000>,
+			<0x17911008 0x4>;
+		reg-names = "smem", "irq-reg-base";
+		qcom,irq-mask = <0x8000000>;
+		interrupts = <0 179 1>;
+		label = "dsps";
+	};
+
+	qcom,glink-smem-native-xprt-rpm@778000 {
+		compatible = "qcom,glink-rpm-native-xprt";
+		reg = <0x778000 0x7000>,
+			<0x17911008 0x4>;
+		reg-names = "msgram", "irq-reg-base";
+		qcom,irq-mask = <0x1>;
+		interrupts = <0 168 1>;
+		label = "rpm";
+	};
+
+	qcom,glink-mailbox-xprt-spss@1d05008 {
+		compatible = "qcom,glink-mailbox-xprt";
+		reg = <0x1d05008 0x8>,
+			<0x1d05010 0x4>,
+			<0x1d0501c 0x4>,
+			<0x1d06008 0x4>;
+		reg-names = "mbox-loc-addr", "mbox-loc-size", "irq-reg-base",
+			"irq-rx-reset";
+		qcom,irq-mask = <0x1>;
+		interrupts = <0 348 4>;
+		label = "spss";
+		qcom,tx-ring-size = <0x800>;
+		qcom,rx-ring-size = <0x800>;
+	};
+
+	glink_spi_xprt_wdsp: qcom,glink-spi-xprt-wdsp {
+		compatible = "qcom,glink-spi-xprt";
+		label = "wdsp";
+		qcom,remote-fifo-config = <&glink_fifo_wdsp>;
+		qcom,qos-config = <&glink_qos_wdsp>;
+		qcom,ramp-time = <0x10>,
+				     <0x20>,
+				     <0x30>,
+				     <0x40>;
+	};
+
+	glink_fifo_wdsp: qcom,glink-fifo-config-wdsp {
+		compatible = "qcom,glink-fifo-config";
+		qcom,out-read-idx-reg = <0x12000>;
+		qcom,out-write-idx-reg = <0x12004>;
+		qcom,in-read-idx-reg = <0x1200C>;
+		qcom,in-write-idx-reg = <0x12010>;
+	};
+
+	glink_qos_wdsp: qcom,glink-qos-config-wdsp {
+		compatible = "qcom,glink-qos-config";
+		qcom,flow-info = <0x80 0x0>,
+				 <0x70 0x1>,
+				 <0x60 0x2>,
+				 <0x50 0x3>;
+		qcom,mtu-size = <0x800>;
+		qcom,tput-stats-cycle = <0xa>;
+	};
+
+	qcom,glink_pkt {
+		compatible = "qcom,glinkpkt";
+
+		qcom,glinkpkt-at-mdm0 {
+			qcom,glinkpkt-transport = "smem";
+			qcom,glinkpkt-edge = "mpss";
+			qcom,glinkpkt-ch-name = "DS";
+			qcom,glinkpkt-dev-name = "at_mdm0";
+		};
+
+		qcom,glinkpkt-loopback_cntl {
+			qcom,glinkpkt-transport = "lloop";
+			qcom,glinkpkt-edge = "local";
+			qcom,glinkpkt-ch-name = "LOCAL_LOOPBACK_CLNT";
+			qcom,glinkpkt-dev-name = "glink_pkt_loopback_ctrl";
+		};
+
+		qcom,glinkpkt-loopback_data {
+			qcom,glinkpkt-transport = "lloop";
+			qcom,glinkpkt-edge = "local";
+			qcom,glinkpkt-ch-name = "glink_pkt_lloop_CLNT";
+			qcom,glinkpkt-dev-name = "glink_pkt_loopback";
+		};
+
+		qcom,glinkpkt-apr-apps2 {
+			qcom,glinkpkt-transport = "smem";
+			qcom,glinkpkt-edge = "adsp";
+			qcom,glinkpkt-ch-name = "apr_apps2";
+			qcom,glinkpkt-dev-name = "apr_apps2";
+		};
+
+		qcom,glinkpkt-data40-cntl {
+			qcom,glinkpkt-transport = "smem";
+			qcom,glinkpkt-edge = "mpss";
+			qcom,glinkpkt-ch-name = "DATA40_CNTL";
+			qcom,glinkpkt-dev-name = "smdcntl8";
+		};
+
+		qcom,glinkpkt-data1 {
+			qcom,glinkpkt-transport = "smem";
+			qcom,glinkpkt-edge = "mpss";
+			qcom,glinkpkt-ch-name = "DATA1";
+			qcom,glinkpkt-dev-name = "smd7";
+		};
+
+		qcom,glinkpkt-data4 {
+			qcom,glinkpkt-transport = "smem";
+			qcom,glinkpkt-edge = "mpss";
+			qcom,glinkpkt-ch-name = "DATA4";
+			qcom,glinkpkt-dev-name = "smd8";
+		};
+
+		qcom,glinkpkt-data11 {
+			qcom,glinkpkt-transport = "smem";
+			qcom,glinkpkt-edge = "mpss";
+			qcom,glinkpkt-ch-name = "DATA11";
+			qcom,glinkpkt-dev-name = "smd11";
+		};
+	};
+
+	qcom,ipc_router {
+		compatible = "qcom,ipc_router";
+		qcom,node-id = <1>;
+	};
+
+	qcom,ipc_router_modem_xprt {
+		compatible = "qcom,ipc_router_glink_xprt";
+		qcom,ch-name = "IPCRTR";
+		qcom,xprt-remote = "mpss";
+		qcom,glink-xprt = "smem";
+		qcom,xprt-linkid = <1>;
+		qcom,xprt-version = <1>;
+		qcom,fragmented-data;
+	};
+
+	qcom,ipc_router_q6_xprt {
+		compatible = "qcom,ipc_router_glink_xprt";
+		qcom,ch-name = "IPCRTR";
+		qcom,xprt-remote = "lpass";
+		qcom,glink-xprt = "smem";
+		qcom,xprt-linkid = <1>;
+		qcom,xprt-version = <1>;
+		qcom,fragmented-data;
+	};
+
+	qcom,ipc_router_dsps_xprt {
+		compatible = "qcom,ipc_router_glink_xprt";
+		qcom,ch-name = "IPCRTR";
+		qcom,xprt-remote = "dsps";
+		qcom,glink-xprt = "smem";
+		qcom,xprt-linkid = <1>;
+		qcom,xprt-version = <1>;
+		qcom,fragmented-data;
+		qcom,dynamic-wakeup-source;
+	};
+
+	qcom,spcom {
+		compatible = "qcom,spcom";
+
+		/* predefined channels, remote side is server */
+		qcom,spcom-ch-names = "sp_kernel" , "sp_ssr";
+		status = "ok";
+	};
+
+	spss_utils: qcom,spss_utils {
+		compatible = "qcom,spss-utils";
+		/* spss fuses physical address */
+		qcom,spss-fuse1-addr = <0x007841c4>;
+		qcom,spss-fuse1-bit = <27>;
+		qcom,spss-fuse2-addr = <0x0078413c>;
+		qcom,spss-fuse2-bit = <31>;
+		qcom,spss-test-firmware-name = "spss";    /* default name */
+		qcom,spss-prod-firmware-name = "spss1p";  /* 8 chars max */
+		qcom,spss-hybr-firmware-name = "spss1h";  /* 8 chars max */
+		qcom,spss-debug-reg-addr = <0x01d06020>;
+		status = "ok";
+	};
+
+	sdhc_2: sdhci@c0a4900 {
+		compatible = "qcom,sdhci-msm";
+		reg = <0xc0a4900 0x314>, <0xc0a4000 0x800>;
+		reg-names = "hc_mem", "core_mem";
+
+		interrupts = <0 125 0>, <0 221 0>;
+		interrupt-names = "hc_irq", "pwr_irq";
+
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_sdcc2_ahb_clk>,
+			 <&clock_gcc clk_gcc_sdcc2_apps_clk>;
+
+		qcom,large-address-bus;
+		qcom,bus-width = <4>;
+		qcom,cpu-dma-latency-us = <701>;
+
+		qcom,devfreq,freq-table = <52000000 200000000>;
+
+		qcom,msm-bus,name = "sdhc2";
+		qcom,msm-bus,num-cases = <8>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps = <81 512 0 0>, /* No vote */
+				<81 512 1600 3200>,    /* 400 KB/s*/
+				<81 512 80000 160000>, /* 20 MB/s */
+				<81 512 100000 200000>, /* 25 MB/s */
+				<81 512 200000 400000>, /* 50 MB/s */
+				<81 512 400000 800000>, /* 100 MB/s */
+				<81 512 800000 800000>, /* 200 MB/s */
+				<81 512 2048000 4096000>; /* Max. bandwidth */
+		qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000
+						100000000 200000000 4294967295>;
+
+		qcom,sdr104-wa;
+
+		status = "disabled";
+	};
+
+	ufsphy1: ufsphy@1da7000 {
+		compatible = "qcom,ufs-phy-qmp-v3";
+		reg = <0x1da7000 0xda8>;
+		reg-names = "phy_mem";
+		#phy-cells = <0>;
+		clock-names = "ref_clk_src",
+			"ref_clk",
+			"ref_aux_clk";
+		clocks = <&clock_gcc clk_ln_bb_clk1>,
+			<&clock_gcc clk_gcc_ufs_clkref_clk>,
+			<&clock_gcc clk_gcc_ufs_phy_aux_hw_ctl_clk>;
+		status = "disabled";
+	};
+
+	ufs_ice: ufsice@1db0000 {
+		compatible = "qcom,ice";
+		reg = <0x1db0000 0x8000>;
+		qcom,enable-ice-clk;
+		clock-names =   "ufs_core_clk",
+				"bus_clk",
+				"iface_clk",
+				"ice_core_clk";
+		clocks = <&clock_gcc clk_gcc_ufs_axi_clk>,
+			 <&clock_gcc clk_gcc_aggre1_ufs_axi_clk>,
+			 <&clock_gcc clk_gcc_ufs_ahb_clk>,
+			 <&clock_gcc clk_gcc_ufs_ice_core_clk>;
+		qcom,op-freq-hz =	<0>,
+					<0>,
+					<0>,
+					<300000000>;
+		vdd-hba-supply = <&gdsc_ufs>;
+		qcom,msm-bus,name = "ufs_ice_noc";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<1 650 0 0>,    /* No vote */
+				<1 650 1000 0>; /* Max. bandwidth */
+		qcom,bus-vector-names = "MIN",
+					"MAX";
+		qcom,instance-type = "ufs";
+		status = "disabled";
+	};
+
+	ufs1: ufshc@1da4000 {
+		compatible = "qcom,ufshc";
+		reg = <0x1da4000 0x2500>;
+		interrupts = <0 265 0>;
+		phys = <&ufsphy1>;
+		phy-names = "ufsphy";
+		ufs-qcom-crypto = <&ufs_ice>;
+
+		clock-names =
+			"core_clk",
+			"bus_aggr_clk",
+			"iface_clk",
+			"core_clk_unipro",
+			"core_clk_ice",
+			"ref_clk",
+			"tx_lane0_sync_clk",
+			"rx_lane0_sync_clk";
+		clocks =
+			<&clock_gcc clk_gcc_ufs_axi_hw_ctl_clk>,
+			<&clock_gcc clk_gcc_aggre1_ufs_axi_hw_ctl_clk>,
+			<&clock_gcc clk_gcc_ufs_ahb_clk>,
+			<&clock_gcc clk_gcc_ufs_unipro_core_hw_ctl_clk>,
+			<&clock_gcc clk_gcc_ufs_ice_core_hw_ctl_clk>,
+			<&clock_gcc clk_ln_bb_clk1>,
+			<&clock_gcc clk_gcc_ufs_tx_symbol_0_clk>,
+			<&clock_gcc clk_gcc_ufs_rx_symbol_0_clk>;
+		freq-table-hz =
+			<50000000 200000000>,
+			<0 0>,
+			<0 0>,
+			<37500000 150000000>,
+			<75000000 300000000>,
+			<0 0>,
+			<0 0>,
+			<0 0>;
+
+		lanes-per-direction = <1>;
+
+		qcom,msm-bus,name = "ufs1";
+		qcom,msm-bus,num-cases = <22>;
+		qcom,msm-bus,num-paths = <2>;
+		qcom,msm-bus,vectors-KBps =
+		/*
+		 * During HS G3 UFS runs at nominal voltage corner, vote
+		 * higher bandwidth to push other buses in the data path
+		 * to run at nominal to achieve max throughput.
+		 * 4GBps pushes BIMC to run at nominal.
+		 * 200MBps pushes CNOC to run at nominal.
+		 * Vote for half of this bandwidth for HS G3 1-lane.
+		 * For max bandwidth, vote high enough to push the buses
+		 * to run in turbo voltage corner.
+		 */
+		<95 512 0 0>, <1 650 0 0>,          /* No vote */
+		<95 512 922 0>, <1 650 1000 0>,     /* PWM G1 */
+		<95 512 1844 0>, <1 650 1000 0>,    /* PWM G2 */
+		<95 512 3688 0>, <1 650 1000 0>,    /* PWM G3 */
+		<95 512 7376 0>, <1 650 1000 0>,    /* PWM G4 */
+		<95 512 1844 0>, <1 650 1000 0>,    /* PWM G1 L2 */
+		<95 512 3688 0>, <1 650 1000 0>,    /* PWM G2 L2 */
+		<95 512 7376 0>, <1 650 1000 0>,    /* PWM G3 L2 */
+		<95 512 14752 0>, <1 650 1000 0>,   /* PWM G4 L2 */
+		<95 512 127796 0>, <1 650 1000 0>,  /* HS G1 RA */
+		<95 512 255591 0>, <1 650 1000 0>,  /* HS G2 RA */
+		<95 512 2097152 0>, <1 650 102400 0>,  /* HS G3 RA */
+		<95 512 255591 0>, <1 650 1000 0>,  /* HS G1 RA L2 */
+		<95 512 511181 0>, <1 650 1000 0>,  /* HS G2 RA L2 */
+		<95 512 4194304 0>, <1 650 204800 0>, /* HS G3 RA L2 */
+		<95 512 149422 0>, <1 650 1000 0>,  /* HS G1 RB */
+		<95 512 298189 0>, <1 650 1000 0>,  /* HS G2 RB */
+		<95 512 2097152 0>, <1 650 102400 0>,  /* HS G3 RB */
+		<95 512 298189 0>, <1 650 1000 0>,  /* HS G1 RB L2 */
+		<95 512 596378 0>, <1 650 1000 0>,  /* HS G2 RB L2 */
+		<95 512 4194304 0>, <1 650 204800 0>, /* HS G3 RB L2 */
+		<95 512 7643136 0>, <1 650 307200 0>; /* Max. bandwidth */
+		qcom,bus-vector-names = "MIN",
+		"PWM_G1_L1", "PWM_G2_L1", "PWM_G3_L1", "PWM_G4_L1",
+		"PWM_G1_L2", "PWM_G2_L2", "PWM_G3_L2", "PWM_G4_L2",
+		"HS_RA_G1_L1", "HS_RA_G2_L1", "HS_RA_G3_L1",
+		"HS_RA_G1_L2", "HS_RA_G2_L2", "HS_RA_G3_L2",
+		"HS_RB_G1_L1", "HS_RB_G2_L1", "HS_RB_G3_L1",
+		"HS_RB_G1_L2", "HS_RB_G2_L2", "HS_RB_G3_L2",
+		"MAX";
+
+		/* PM QoS */
+		qcom,pm-qos-cpu-groups = <0x0F 0xF0>;
+		qcom,pm-qos-cpu-group-latency-us = <70 70>;
+		qcom,pm-qos-default-cpu = <0>;
+
+		pinctrl-names = "dev-reset-assert", "dev-reset-deassert";
+		pinctrl-0 = <&ufs_dev_reset_assert>;
+		pinctrl-1 = <&ufs_dev_reset_deassert>;
+
+		resets = <&clock_gcc UFS_BCR>;
+		reset-names = "core_reset";
+
+		status = "disabled";
+	};
+
+	usb3: ssusb@a800000 {
+		compatible = "qcom,dwc-usb3-msm";
+		reg = <0x0a800000 0xf8c00>,
+		      <0x0c016000 0x400>;
+		reg-names = "core_base", "ahb2phy_base";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		interrupts = <0 347 0>, <0 243 0>,  <0 180 0>;
+		interrupt-names = "hs_phy_irq", "ss_phy_irq", "pwr_event_irq";
+
+		USB3_GDSC-supply = <&gdsc_usb30>;
+		qcom,usb-dbm = <&dbm_1p5>;
+		qcom,msm-bus,name = "usb3";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+					<61 512 0 0>,
+					<61 512 240000 800000>;
+
+		qcom,dwc-usb3-msm-tx-fifo-size = <21288>;
+		extcon = <&pmi8998_pdphy>;
+
+		clocks = <&clock_gcc clk_gcc_usb30_master_clk>,
+			<&clock_gcc clk_gcc_cfg_noc_usb3_axi_clk>,
+			<&clock_gcc clk_gcc_aggre1_usb3_axi_clk>,
+			<&clock_gcc clk_gcc_usb30_mock_utmi_clk>,
+			<&clock_gcc clk_gcc_usb30_sleep_clk>,
+			<&clock_gcc clk_cxo_dwc3_clk>;
+
+		clock-names = "core_clk", "iface_clk", "bus_aggr_clk",
+				"utmi_clk", "sleep_clk", "xo";
+
+		qcom,core-clk-rate = <120000000>;
+		qcom,core-clk-rate-hs = <60000000>;
+
+		resets = <&clock_gcc USB_30_BCR>;
+		reset-names = "core_reset";
+
+		dwc3@a800000 {
+			compatible = "snps,dwc3";
+			reg = <0x0a800000 0xcd00>;
+			interrupt-parent = <&intc>;
+			interrupts = <0 131 0>;
+			usb-phy = <&qusb_phy0>, <&ssphy>;
+			tx-fifo-resize;
+			snps,nominal-elastic-buffer;
+			snps,disable-clk-gating;
+			snps,has-lpm-erratum;
+			snps,hird-threshold = /bits/ 8 <0x10>;
+			snps,num-gsi-evt-buffs = <0x3>;
+		};
+
+		qcom,usbbam@a904000 {
+			compatible = "qcom,usb-bam-msm";
+			reg = <0xa904000 0x17000>;
+			interrupt-parent = <&intc>;
+			interrupts = <0 132 0>;
+
+			qcom,bam-type = <0>;
+			qcom,usb-bam-fifo-baseaddr = <0x146bb000>;
+			qcom,usb-bam-num-pipes = <8>;
+			qcom,ignore-core-reset-ack;
+			qcom,disable-clk-gating;
+			qcom,usb-bam-override-threshold = <0x4001>;
+			qcom,usb-bam-max-mbps-highspeed = <400>;
+			qcom,usb-bam-max-mbps-superspeed = <3600>;
+			qcom,reset-bam-on-connect;
+
+			qcom,pipe0 {
+				label = "ssusb-qdss-in-0";
+				qcom,usb-bam-mem-type = <2>;
+				qcom,dir = <1>;
+				qcom,pipe-num = <0>;
+				qcom,peer-bam = <0>;
+				qcom,peer-bam-physical-address = <0x6064000>;
+				qcom,src-bam-pipe-index = <0>;
+				qcom,dst-bam-pipe-index = <0>;
+				qcom,data-fifo-offset = <0x0>;
+				qcom,data-fifo-size = <0x1800>;
+				qcom,descriptor-fifo-offset = <0x1800>;
+				qcom,descriptor-fifo-size = <0x800>;
+			};
+		};
+	};
+
+	qusb_phy0: qusb@c012000 {
+		compatible = "qcom,qusb2phy-v2";
+		reg = <0x0c012000 0x2a8>,
+		      <0x01fcb24c 0x4>;
+		reg-names = "qusb_phy_base",
+				"tcsr_clamp_dig_n_1p8";
+		vdd-supply = <&pm8998_l1>;
+		vdda12-supply = <&pm8998_l2>;
+		vdda18-supply = <&pm8998_l12>;
+		vdda33-supply = <&pm8998_l24>;
+		qcom,vdd-voltage-level = <0 880000 880000>;
+		qcom,vdda33-voltage-level = <2400000 3088000 3088000>;
+		qcom,qusb-phy-init-seq =
+				/* <value reg_offset> */
+					<0x80 0x0
+					0x13 0x04
+					0x7c 0x18c
+					0x80 0x2c
+					0x0a 0x184
+					0x00 0x240>;
+		phy_type= "utmi";
+
+		clocks = <&clock_gcc clk_ln_bb_clk1>,
+			 <&clock_gcc clk_gcc_rx1_usb2_clkref_clk>;
+		clock-names = "ref_clk_src", "ref_clk";
+
+		resets = <&clock_gcc QUSB2PHY_PRIM_BCR>;
+		reset-names = "phy_reset";
+	};
+
+	ssphy: ssphy@c010000 {
+		compatible = "qcom,usb-ssphy-qmp-v2";
+		reg = <0x0c010000 0xe0c>,
+		      <0x01fcb244 0x4>,
+		      <0x01fcb248 0x4>;
+		reg-names = "qmp_phy_base",
+			    "vls_clamp_reg",
+			    "tcsr_usb3_dp_phymode";
+		vdd-supply = <&pm8998_l1>;
+		core-supply = <&pm8998_l2>;
+		qcom,vdd-voltage-level = <0 880000 880000>;
+		qcom,vbus-valid-override;
+		qcom,qmp-phy-init-seq =
+			/* <reg_offset, value, delay> */
+			<0x138 0x30 0x00
+			 0x034 0x04 0x01
+			 0x080 0x14 0x00
+			 0x03c 0x06 0x00
+			 0x08c 0x08 0x00
+			 0x15c 0x06 0x00
+			 0x164 0x01 0x00
+			 0x13c 0x80 0x00
+			 0x0b0 0x82 0x00
+			 0x0b8 0xab 0x00
+			 0x0bc 0xea 0x00
+			 0x0c0 0x02 0x00
+			 0x060 0x06 0x00
+			 0x068 0x16 0x00
+			 0x070 0x36 0x00
+			 0x0dc 0x00 0x00
+			 0x0d8 0x3f 0x00
+			 0x0f8 0x01 0x00
+			 0x0f4 0xc9 0x00
+			 0x148 0x0a 0x00
+			 0x0a0 0x00 0x00
+			 0x09c 0x34 0x00
+			 0x098 0x15 0x00
+			 0x090 0x04 0x00
+			 0x154 0x00 0x00
+			 0x094 0x00 0x00
+			 0x0f0 0x00 0x00
+			 0x00c 0x0a 0x00
+			 0x048 0x07 0x00
+			 0x0d0 0x80 0x00
+			 0x184 0x01 0x00
+			 0x010 0x01 0x00
+			 0x01c 0x31 0x00
+			 0x020 0x01 0x00
+			 0x014 0x00 0x00
+			 0x018 0x00 0x00
+			 0x024 0x85 0x00
+			 0x028 0x07 0x00
+			 0x430 0x0b 0x00
+			 0x4d4 0x0f 0x00
+			 0x4d8 0x4e 0x00
+			 0x4dc 0x18 0x00
+			 0x4f8 0x07 0x00
+			 0x4fc 0x80 0x00
+			 0x504 0x43 0x00
+			 0x50c 0x1c 0x00
+			 0x434 0x75 0x00
+			 0x43c 0x00 0x00
+			 0x440 0x00 0x00
+			 0x444 0x80 0x00
+			 0x408 0x0a 0x00
+			 0x414 0x06 0x00
+			 0x500 0x00 0x00
+			 0x4c0 0x03 0x00
+			 0x564 0x05 0x00
+			 0x830 0x0b 0x00
+			 0x8d4 0x0f 0x00
+			 0x8d8 0x4e 0x00
+			 0x8dc 0x18 0x00
+			 0x8f8 0x07 0x00
+			 0x8fc 0x80 0x00
+			 0x904 0x43 0x00
+			 0x90c 0x1c 0x00
+			 0x834 0x75 0x00
+			 0x83c 0x00 0x00
+			 0x840 0x00 0x00
+			 0x844 0x80 0x00
+			 0x808 0x0a 0x00
+			 0x814 0x06 0x00
+			 0x900 0x00 0x00
+			 0x8c0 0x03 0x00
+			 0x964 0x05 0x00
+			 0x260 0x10 0x00
+			 0x2a4 0x12 0x00
+			 0x28c 0x16 0x00
+			 0x244 0x00 0x00
+			 0x660 0x10 0x00
+			 0x6a4 0x12 0x00
+			 0x68c 0x16 0x00
+			 0x644 0x00 0x00
+			 0xcc8 0x83 0x00
+			 0xccc 0x09 0x00
+			 0xcd0 0xa2 0x00
+			 0xcd4 0x40 0x00
+			 0xcc4 0x02 0x00
+			 0xc80 0xd1 0x00
+			 0xc84 0x1f 0x00
+			 0xc88 0x47 0x00
+			 0xc64 0x1b 0x00
+			 0xc0c 0x9f 0x00
+			 0xc10 0x9f 0x00
+			 0xc14 0xb7 0x00
+			 0xc18 0x4e 0x00
+			 0xc1c 0x65 0x00
+			 0xc20 0x6b 0x00
+			 0xc24 0x15 0x00
+			 0xc28 0x0d 0x00
+			 0xc2c 0x15 0x00
+			 0xc30 0x0d 0x00
+			 0xc34 0x15 0x00
+			 0xc38 0x0d 0x00
+			 0xc3c 0x15 0x00
+			 0xc40 0x0d 0x00
+			 0xc44 0x15 0x00
+			 0xc48 0x0d 0x00
+			 0xc4c 0x15 0x00
+			 0xc50 0x0d 0x00
+			 0xc5c 0x02 0x00
+			 0xca0 0x04 0x00
+			 0xc8c 0x44 0x00
+			 0xc70 0xe7 0x00
+			 0xc74 0x03 0x00
+			 0xc78 0x40 0x00
+			 0xc7c 0x00 0x00
+			 0xdd8 0x8a 0x00
+			 0xcb8 0x75 0x00
+			 0xcb0 0x86 0x00
+			 0xcbc 0x13 0x00
+			 0xffffffff 0xffffffff 0x00>;
+
+		qcom,qmp-phy-reg-offset =
+				<0xd74 /* USB3_PHY_PCS_STATUS */
+				 0xcd8 /* USB3_PHY_AUTONOMOUS_MODE_CTRL */
+				 0xcdc /* USB3_PHY_LFPS_RXTERM_IRQ_CLEAR */
+				 0xc04 /* USB3_PHY_POWER_DOWN_CONTROL */
+				 0xc00 /* USB3_PHY_SW_RESET */
+				 0xc08 /* USB3_PHY_START */
+				 0xa00>; /* USB3PHY_PCS_MISC_TYPEC_CTRL */
+
+		clocks = <&clock_gcc clk_gcc_usb3_phy_aux_clk>,
+			 <&clock_gcc clk_gcc_usb3_phy_pipe_clk>,
+			 <&clock_gcc clk_ln_bb_clk1>,
+			 <&clock_gcc clk_gcc_usb3_clkref_clk>;
+
+		clock-names = "aux_clk", "pipe_clk", "ref_clk_src",
+				"ref_clk";
+
+		resets = <&clock_gcc USB3_PHY_BCR>,
+			 <&clock_gcc USB3PHY_PHY_BCR>;
+		reset-names = "phy_reset", "phy_phy_reset";
+	};
+
+	usb_audio_qmi_dev {
+		compatible = "qcom,usb-audio-qmi-dev";
+		iommus = <&lpass_q6_smmu 12>;
+		qcom,usb-audio-stream-id = <12>;
+		qcom,usb-audio-intr-num = <2>;
+	};
+
+	dbm_1p5: dbm@a8f8000 {
+		compatible = "qcom,usb-dbm-1p5";
+		reg = <0xa8f8000 0x300>;
+		qcom,reset-ep-after-lpm-resume;
+	};
+
+	usb_nop_phy: usb_nop_phy {
+		compatible = "usb-nop-xceiv";
+	};
+
+	qcom,lpass@17300000 {
+		compatible = "qcom,pil-tz-generic";
+		reg = <0x17300000 0x00100>;
+		interrupts = <0 162 1>;
+
+		vdd_cx-supply = <&pm8998_s1_level>;
+		qcom,proxy-reg-names = "vdd_cx";
+		qcom,vdd_cx-uV-uA = <RPM_SMD_REGULATOR_LEVEL_TURBO 100000>;
+
+		clocks = <&clock_gcc clk_cxo_pil_lpass_clk>;
+		clock-names = "xo";
+		qcom,proxy-clock-names = "xo";
+
+		qcom,pas-id = <1>;
+		qcom,proxy-timeout-ms = <10000>;
+		qcom,smem-id = <423>;
+		qcom,sysmon-id = <1>;
+		status = "ok";
+		qcom,ssctl-instance-id = <0x14>;
+		qcom,firmware-name = "adsp";
+		memory-region = <&pil_adsp_mem>;
+
+		/* GPIO inputs from lpass */
+		qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_2_in 0 0>;
+		qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_2_in 2 0>;
+		qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_2_in 1 0>;
+		qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_2_in 3 0>;
+
+		/* GPIO output to lpass */
+		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>;
+	};
+
+	qcom,memshare {
+		compatible = "qcom,memshare";
+
+		qcom,client_1 {
+			compatible = "qcom,memshare-peripheral";
+			qcom,peripheral-size = <0x200000>;
+			qcom,client-id = <0>;
+			qcom,allocate-boot-time;
+			label = "modem";
+		};
+
+		qcom,client_2 {
+			compatible = "qcom,memshare-peripheral";
+			qcom,peripheral-size = <0x300000>;
+			qcom,client-id = <2>;
+			label = "modem";
+		};
+
+		mem_client_3_size: qcom,client_3 {
+			compatible = "qcom,memshare-peripheral";
+			qcom,peripheral-size = <0x0>;
+			qcom,client-id = <1>;
+			qcom,allocate-boot-time;
+			label = "modem";
+		};
+	};
+
+	pil_modem: qcom,mss@4080000 {
+		compatible = "qcom,pil-q6v55-mss";
+		reg = <0x4080000 0x100>,
+		      <0x1f63000 0x008>,
+		      <0x1f65000 0x008>,
+		      <0x1f64000 0x008>,
+		      <0x4180000 0x020>,
+		      <0x00179000 0x004>;
+		reg-names = "qdsp6_base", "halt_q6", "halt_modem",
+			    "halt_nc", "rmb_base", "restart_reg";
+
+		clocks = <&clock_gcc clk_cxo_clk_src>,
+			 <&clock_gcc clk_gcc_mss_cfg_ahb_clk>,
+			 <&clock_gcc clk_gcc_bimc_mss_q6_axi_clk>,
+			 <&clock_gcc clk_gcc_boot_rom_ahb_clk>,
+			 <&clock_gcc clk_gpll0_out_msscc>,
+			 <&clock_gcc clk_gcc_mss_snoc_axi_clk>,
+			 <&clock_gcc clk_gcc_mss_mnoc_bimc_axi_clk>,
+			 <&clock_gcc clk_qdss_clk>;
+		clock-names = "xo", "iface_clk", "bus_clk",
+			      "mem_clk", "gpll0_mss_clk", "snoc_axi_clk",
+			      "mnoc_axi_clk", "qdss_clk";
+		qcom,proxy-clock-names = "xo", "qdss_clk", "mem_clk";
+		qcom,active-clock-names = "iface_clk", "bus_clk",
+			"gpll0_mss_clk", "snoc_axi_clk", "mnoc_axi_clk";
+
+		interrupts = <0 448 1>;
+		vdd_cx-supply = <&pm8998_s1_level>;
+		vdd_cx-voltage = <RPM_SMD_REGULATOR_LEVEL_TURBO>;
+		vdd_mx-supply = <&pm8998_s9_level>;
+		vdd_mx-uV = <RPM_SMD_REGULATOR_LEVEL_TURBO>;
+		qcom,firmware-name = "modem";
+		qcom,pil-self-auth;
+		qcom,sysmon-id = <0>;
+		qcom,ssctl-instance-id = <0x12>;
+		qcom,qdsp6v62-1-2;
+		status = "ok";
+		memory-region = <&modem_mem>;
+		qcom,mem-protect-id = <0xF>;
+
+		/* GPIO inputs from mss */
+		qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_1_in 0 0>;
+		qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_1_in 1 0>;
+		qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_1_in 2 0>;
+		qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_1_in 3 0>;
+		qcom,gpio-shutdown-ack = <&smp2pgpio_ssr_smp2p_1_in 7 0>;
+
+		/* GPIO output to mss */
+		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
+		qcom,mba-mem@0 {
+			compatible = "qcom,pil-mba-mem";
+			memory-region = <&pil_mba_mem>;
+		};
+	};
+
+	tsens0: tsens@10aa000 {
+		compatible = "qcom,msm8998-tsens";
+		reg = <0x10aa000 0x2000>;
+		reg-names = "tsens_physical";
+		interrupts = <0 458 0>, <0 445 0>;
+		interrupt-names = "tsens-upper-lower", "tsens-critical";
+		qcom,client-id = <0 1 2 3 4 7 8 9 10 11 12 13>;
+		qcom,sensor-id = <0 1 2 3 4 7 8 9 10 11 12 13>;
+		qcom,sensors = <12>;
+	};
+
+	tsens1: tsens@10ad000 {
+		compatible = "qcom,msm8998-tsens";
+		reg = <0x10ad000 0x2000>;
+		reg-names = "tsens_physical";
+		interrupts = <0 184 0>, <0 430 0>;
+		interrupt-names = "tsens-upper-lower", "tsens-critical";
+		qcom,client-id = <14 15 16 17 18 19 20 21>;
+		qcom,sensor-id = <0 1 3 4 5 6 7 2>;
+		qcom,sensors = <8>;
+	};
+
+	qcom,qbt1000 {
+		compatible = "qcom,qbt1000";
+		clock-names = "core", "iface";
+		clocks = <&clock_gcc clk_gcc_blsp2_qup6_spi_apps_clk>,
+			<&clock_gcc clk_gcc_blsp2_ahb_clk>;
+		clock-frequency = <15000000>;
+		qcom,ipc-gpio = <&tlmm 121 0>;
+		qcom,finger-detect-gpio = <&pm8998_gpios 2 0>;
+	};
+
+	qcom,sensor-information {
+		compatible = "qcom,sensor-information";
+		sensor_information0: qcom,sensor-information-0 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor0";
+			qcom,scaling-factor = <10>;
+		};
+		sensor_information1: qcom,sensor-information-1 {
+			qcom,sensor-type =  "tsens";
+			qcom,sensor-name = "tsens_tz_sensor1";
+			qcom,scaling-factor = <10>;
+		};
+		sensor_information2: qcom,sensor-information-2 {
+			qcom,sensor-type =  "tsens";
+			qcom,sensor-name = "tsens_tz_sensor2";
+			qcom,scaling-factor = <10>;
+		};
+		sensor_information3: qcom,sensor-information-3 {
+			qcom,sensor-type =  "tsens";
+			qcom,sensor-name = "tsens_tz_sensor3";
+			qcom,scaling-factor = <10>;
+		};
+		sensor_information4: qcom,sensor-information-4 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor4";
+			qcom,scaling-factor = <10>;
+		};
+		sensor_information7: qcom,sensor-information-7 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor7";
+			qcom,scaling-factor = <10>;
+		};
+		sensor_information8: qcom,sensor-information-8 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor8";
+			qcom,scaling-factor = <10>;
+		};
+		sensor_information9: qcom,sensor-information-9 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor9";
+			qcom,scaling-factor = <10>;
+		};
+		sensor_information10: qcom,sensor-information-10 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor10";
+			qcom,scaling-factor = <10>;
+		};
+		sensor_information11: qcom,sensor-information-11 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor11";
+			qcom,scaling-factor = <10>;
+		};
+		sensor_information12: qcom,sensor-information-12 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor12";
+			qcom,scaling-factor = <10>;
+			qcom,alias-name = "gpu_1";
+		};
+		sensor_information13: qcom,sensor-information-13 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor13";
+			qcom,scaling-factor = <10>;
+			qcom,alias-name = "gpu";
+		};
+		sensor_information14: qcom,sensor-information-14 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor14";
+			qcom,scaling-factor = <10>;
+		};
+		sensor_information15: qcom,sensor-information-15 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor15";
+			qcom,scaling-factor = <10>;
+			qcom,alias-name = "modem_dsp";
+		};
+		sensor_information16: qcom,sensor-information-16 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor16";
+			qcom,scaling-factor = <10>;
+		};
+		sensor_information17: qcom,sensor-information-17 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor17";
+			qcom,scaling-factor = <10>;
+			qcom,alias-name = "hvx";
+		};
+		sensor_information18: qcom,sensor-information-18 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor18";
+			qcom,scaling-factor = <10>;
+			qcom,alias-name = "camera";
+		};
+		sensor_information19: qcom,sensor-information-19 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor19";
+			qcom,scaling-factor = <10>;
+			qcom,alias-name = "multi_media_ss";
+		};
+		sensor_information20: qcom,sensor-information-20 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor20";
+			qcom,scaling-factor = <10>;
+			qcom,alias-name = "modem";
+		};
+		sensor_information21: qcom,sensor-information-21 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor21";
+			qcom,scaling-factor = <10>;
+			qcom,alias-name = "pop_mem";
+		};
+		sensor_information22: qcom,sensor-information-22 {
+			qcom,sensor-type =  "alarm";
+			qcom,sensor-name = "pm8998_tz";
+			qcom,scaling-factor = <1000>;
+		};
+		sensor_information23: qcom,sensor-information-23 {
+			qcom,sensor-type =  "adc";
+			qcom,sensor-name = "msm_therm";
+		};
+		sensor_information24: qcom,sensor-information-24 {
+			qcom,sensor-type =  "adc";
+			qcom,sensor-name = "emmc_therm";
+		};
+		sensor_information25: qcom,sensor-information-25 {
+			qcom,sensor-type =  "adc";
+			qcom,sensor-name = "pa_therm0";
+		};
+		sensor_information26: qcom,sensor-information-26 {
+			qcom,sensor-type =  "adc";
+			qcom,sensor-name = "pa_therm1";
+		};
+		sensor_information27: qcom,sensor-information-27 {
+			qcom,sensor-type =  "adc";
+			qcom,sensor-name = "quiet_therm";
+		};
+		sensor_information28: qcom,sensor-information-28 {
+			qcom,sensor-type = "llm";
+			qcom,sensor-name = "limits_sensor-01";
+		};
+		sensor_information29: qcom,sensor-information-29 {
+			qcom,sensor-type = "llm";
+			qcom,sensor-name = "limits_sensor-02";
+		};
+	};
+
+	qcom_seecom: qseecom@86600000 {
+		compatible = "qcom,qseecom";
+		reg = <0x86600000 0x2200000>;
+		reg-names = "secapp-region";
+		qcom,hlos-num-ce-hw-instances = <1>;
+		qcom,hlos-ce-hw-instance = <0>;
+		qcom,qsee-ce-hw-instance = <0>;
+		qcom,disk-encrypt-pipe-pair = <2>;
+		qcom,support-fde;
+		qcom,no-clock-support;
+		qcom,appsbl-qseecom-support;
+		qcom,fde-key-size;
+		qcom,commonlib64-loaded-by-uefi;
+		qcom,msm-bus,name = "qseecom-noc";
+		qcom,msm-bus,num-cases = <4>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<55 512 0 0>,
+				<55 512 0 0>,
+				<55 512 120000 1200000>,
+				<55 512 393600 3936000>;
+		clock-names = "core_clk_src", "core_clk",
+				"iface_clk", "bus_clk";
+		clocks = <&clock_gcc clk_ce1_clk>,
+			 <&clock_gcc clk_qseecom_ce1_clk>,
+			 <&clock_gcc clk_gcc_ce1_ahb_m_clk>,
+			 <&clock_gcc clk_gcc_ce1_axi_m_clk>;
+		qcom,ce-opp-freq = <171430000>;
+		qcom,qsee-reentrancy-support = <2>;
+	};
+
+	qcom_tzlog: tz-log@146BF720 {
+		compatible = "qcom,tz-log";
+		reg = <0x146BF720 0x3000>;
+		qcom,hyplog-enabled;
+		hyplog-address-offset = <0x410>; /* 0x066BFB30 */
+		hyplog-size-offset = <0x414>;    /* 0x066BFB34 */
+	};
+
+	qcom_msmhdcp: qcom,msm_hdcp {
+		compatible = "qcom,msm-hdcp";
+	};
+
+	qcom_crypto: qcrypto@1DE0000 {
+		compatible = "qcom,qcrypto";
+		reg = <0x1DE0000 0x20000>,
+		      <0x1DC4000 0x24000>;
+		reg-names = "crypto-base","crypto-bam-base";
+		interrupts = <0 206 0>;
+		qcom,bam-pipe-pair = <2>;
+		qcom,ce-hw-instance = <0>;
+		qcom,ce-device = <0>;
+		qcom,bam-ee = <0>;
+		qcom,ce-hw-shared;
+		qcom,clk-mgmt-sus-res;
+		qcom,msm-bus,name = "qcrypto-noc";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<55 512 0 0>,
+				<55 512 3936000 393600>;
+		clock-names = "core_clk_src", "core_clk",
+				"iface_clk", "bus_clk";
+		clocks = <&clock_gcc clk_qcrypto_ce1_clk>,
+			 <&clock_gcc clk_qcrypto_ce1_clk>,
+			 <&clock_gcc clk_gcc_ce1_ahb_m_clk>,
+			 <&clock_gcc clk_gcc_ce1_axi_m_clk>;
+		qcom,ce-opp-freq = <171430000>;
+		qcom,use-sw-aes-cbc-ecb-ctr-algo;
+		qcom,use-sw-aes-xts-algo;
+		qcom,use-sw-aes-ccm-algo;
+		qcom,use-sw-ahash-algo;
+		qcom,use-sw-aead-algo;
+		qcom,use-sw-hmac-algo;
+	};
+
+	qcom_cedev: qcedev@1DE0000{
+		compatible = "qcom,qcedev";
+		reg = <0x1DE0000 0x20000>,
+		      <0x1DC4000 0x24000>;
+		reg-names = "crypto-base","crypto-bam-base";
+		interrupts = <0 206 0>;
+		qcom,bam-pipe-pair = <1>;
+		qcom,ce-hw-instance = <0>;
+		qcom,ce-device = <0>;
+		qcom,ce-hw-shared;
+		qcom,bam-ee = <0>;
+		qcom,msm-bus,name = "qcedev-noc";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<55 512 0 0>,
+				<55 512 3936000 393600>;
+		clock-names = "core_clk_src", "core_clk",
+				"iface_clk", "bus_clk";
+		clocks = <&clock_gcc clk_qcedev_ce1_clk>,
+			 <&clock_gcc clk_qcedev_ce1_clk>,
+			 <&clock_gcc clk_gcc_ce1_ahb_m_clk>,
+			 <&clock_gcc clk_gcc_ce1_axi_m_clk>;
+		qcom,ce-opp-freq = <171430000>;
+	};
+
+	qcom_rng: qrng@793000 {
+		compatible = "qcom,msm-rng";
+		reg = <0x793000 0x1000>;
+		qcom,msm-rng-iface-clk;
+		qcom,no-qrng-config;
+		qcom,msm-bus,name = "msm-rng-noc";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<1 618 0 0>,	/* No vote */
+			<1 618 0 800>;	/* 100 MB/s */
+		clocks = <&clock_gcc clk_gcc_prng_ahb_clk>;
+		clock-names = "iface_clk";
+	};
+
+	mitigation_profile0: qcom,limit_info-0 {
+		qcom,temperature-sensor = <&sensor_information1>;
+		qcom,hotplug-mitigation-enable;
+	};
+
+	mitigation_profile1: qcom,limit_info-1 {
+		qcom,temperature-sensor = <&sensor_information2>;
+		qcom,hotplug-mitigation-enable;
+	};
+
+	mitigation_profile2: qcom,limit_info-2 {
+		qcom,temperature-sensor = <&sensor_information3>;
+		qcom,hotplug-mitigation-enable;
+	};
+
+	mitigation_profile3: qcom,limit_info-3 {
+		qcom,temperature-sensor = <&sensor_information4>;
+		qcom,hotplug-mitigation-enable;
+	};
+
+	mitigation_profile4: qcom,limit_info-4 {
+		qcom,temperature-sensor = <&sensor_information7>;
+		qcom,hotplug-mitigation-enable;
+	};
+
+	mitigation_profile5: qcom,limit_info-5 {
+		qcom,temperature-sensor = <&sensor_information8>;
+		qcom,hotplug-mitigation-enable;
+	};
+
+	mitigation_profile6: qcom,limit_info-6 {
+		qcom,temperature-sensor = <&sensor_information9>;
+		qcom,hotplug-mitigation-enable;
+	};
+
+	mitigation_profile7: qcom,limit_info-7 {
+		qcom,temperature-sensor = <&sensor_information10>;
+		qcom,hotplug-mitigation-enable;
+	};
+
+	qcom,lmh {
+		compatible = "qcom,lmh_v1";
+		interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	qcom,msm-thermal {
+		compatible = "qcom,msm-thermal";
+		qcom,sensor-id = <1>;
+		qcom,poll-ms = <100>;
+		qcom,therm-reset-temp = <115>;
+		qcom,core-limit-temp = <90>;
+		qcom,core-temp-hysteresis = <10>;
+		qcom,hotplug-temp = <105>;
+		qcom,hotplug-temp-hysteresis = <20>;
+		qcom,online-hotplug-core;
+		qcom,synchronous-cluster-id = <0 1>;
+		qcom,synchronous-cluster-map = <0 4 &CPU0 &CPU1 &CPU2 &CPU3>,
+						<1 4 &CPU4 &CPU5 &CPU6 &CPU7>;
+		clock-names = "osm";
+		clocks = <&clock_cpu clk_pwrcl_clk>;
+
+		qcom,vdd-restriction-temp = <5>;
+		qcom,vdd-restriction-temp-hysteresis = <10>;
+
+		vdd-dig-supply = <&pm8998_s1_floor_level>;
+		vdd-gfx-supply = <&gfx_vreg>;
+
+		qcom,vdd-dig-rstr{
+			qcom,vdd-rstr-reg = "vdd-dig";
+			qcom,levels = <RPM_SMD_REGULATOR_LEVEL_NOM
+					RPM_SMD_REGULATOR_LEVEL_TURBO
+					RPM_SMD_REGULATOR_LEVEL_TURBO>;
+				/* Nominal, Super Turbo, Super Turbo */
+			qcom,min-level = <RPM_SMD_REGULATOR_LEVEL_NONE>;
+				/* No Request */
+		};
+
+		qcom,vdd-gfx-rstr{
+			qcom,vdd-rstr-reg = "vdd-gfx";
+			qcom,levels = <5 6 6>; /* Nominal, Turbo, Turbo */
+			qcom,min-level = <1>; /* No Request */
+		};
+
+		msm_thermal_freq: qcom,vdd-apps-rstr{
+			qcom,vdd-rstr-reg = "vdd-apps";
+			qcom,levels = <1248000>;
+			qcom,freq-req;
+		};
+	};
+
+	pcie0: qcom,pcie@01c00000 {
+		compatible = "qcom,pci-msm";
+		cell-index = <0>;
+
+		reg = <0x1c00000 0x2000>,
+		      <0x1c06000 0x1000>,
+		      <0x1b000000 0xf1d>,
+		      <0x1b000f20 0xa8>,
+		      <0x1b100000 0x100000>,
+		      <0x1b200000 0x100000>,
+		      <0x1b300000 0xd00000>;
+
+		reg-names = "parf", "phy", "dm_core", "elbi",
+				"conf", "io", "bars";
+
+		#address-cells = <3>;
+		#size-cells = <2>;
+		ranges = <0x01000000 0x0 0x1b200000 0x1b200000 0x0 0x100000>,
+			<0x02000000 0x0 0x1b300000 0x1b300000 0x0 0xd00000>;
+		interrupt-parent = <&pcie0>;
+		interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
+				20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
+				36 37>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0 0 0 0xffffffff>;
+		interrupt-map = <0 0 0 0 &intc 0 0 405 0
+				0 0 0 1 &intc 0 0 135 0
+				0 0 0 2 &intc 0 0 136 0
+				0 0 0 3 &intc 0 0 138 0
+				0 0 0 4 &intc 0 0 139 0
+				0 0 0 5 &intc 0 0 278 0
+				0 0 0 6 &intc 0 0 576 0
+				0 0 0 7 &intc 0 0 577 0
+				0 0 0 8 &intc 0 0 578 0
+				0 0 0 9 &intc 0 0 579 0
+				0 0 0 10 &intc 0 0 580 0
+				0 0 0 11 &intc 0 0 581 0
+				0 0 0 12 &intc 0 0 582 0
+				0 0 0 13 &intc 0 0 583 0
+				0 0 0 14 &intc 0 0 584 0
+				0 0 0 15 &intc 0 0 585 0
+				0 0 0 16 &intc 0 0 586 0
+				0 0 0 17 &intc 0 0 587 0
+				0 0 0 18 &intc 0 0 588 0
+				0 0 0 19 &intc 0 0 589 0
+				0 0 0 20 &intc 0 0 590 0
+				0 0 0 21 &intc 0 0 591 0
+				0 0 0 22 &intc 0 0 592 0
+				0 0 0 23 &intc 0 0 593 0
+				0 0 0 24 &intc 0 0 594 0
+				0 0 0 25 &intc 0 0 595 0
+				0 0 0 26 &intc 0 0 596 0
+				0 0 0 27 &intc 0 0 597 0
+				0 0 0 28 &intc 0 0 598 0
+				0 0 0 29 &intc 0 0 599 0
+				0 0 0 30 &intc 0 0 600 0
+				0 0 0 31 &intc 0 0 601 0
+				0 0 0 32 &intc 0 0 602 0
+				0 0 0 33 &intc 0 0 603 0
+				0 0 0 34 &intc 0 0 604 0
+				0 0 0 35 &intc 0 0 605 0
+				0 0 0 36 &intc 0 0 606 0
+				0 0 0 37 &intc 0 0 607 0>;
+
+		interrupt-names = "int_msi", "int_a", "int_b", "int_c",
+				"int_d", "int_global_int",
+				"msi_0", "msi_1", "msi_2", "msi_3",
+				"msi_4", "msi_5", "msi_6", "msi_7",
+				"msi_8", "msi_9", "msi_10", "msi_11",
+				"msi_12", "msi_13", "msi_14", "msi_15",
+				"msi_16", "msi_17", "msi_18", "msi_19",
+				"msi_20", "msi_21", "msi_22", "msi_23",
+				"msi_24", "msi_25", "msi_26", "msi_27",
+				"msi_28", "msi_29", "msi_30", "msi_31";
+
+		qcom,phy-sequence = <0x804 0x01 0x00
+					0x034 0x14 0x00
+					0x138 0x30 0x00
+					0x048 0x0f 0x00
+					0x15c 0x06 0x00
+					0x090 0x01 0x00
+					0x088 0x20 0x00
+					0x0f0 0x00 0x00
+					0x0f8 0x01 0x00
+					0x0f4 0xc9 0x00
+					0x11c 0xff 0x00
+					0x120 0x3f 0x00
+					0x164 0x01 0x00
+					0x154 0x00 0x00
+					0x148 0x0a 0x00
+					0x05C 0x19 0x00
+					0x038 0x90 0x00
+					0x0b0 0x82 0x00
+					0x0c0 0x03 0x00
+					0x0bc 0x55 0x00
+					0x0b8 0x55 0x00
+					0x0a0 0x00 0x00
+					0x09c 0x0d 0x00
+					0x098 0x04 0x00
+					0x13c 0x00 0x00
+					0x060 0x08 0x00
+					0x068 0x16 0x00
+					0x070 0x34 0x00
+					0x15c 0x06 0x00
+					0x138 0x33 0x00
+					0x03c 0x02 0x00
+					0x040 0x0e 0x00
+					0x080 0x04 0x00
+					0x0dc 0x00 0x00
+					0x0d8 0x3f 0x00
+					0x00c 0x09 0x00
+					0x010 0x01 0x00
+					0x01c 0x40 0x00
+					0x020 0x01 0x00
+					0x014 0x02 0x00
+					0x018 0x00 0x00
+					0x024 0x7e 0x00
+					0x028 0x15 0x00
+					0x244 0x02 0x00
+					0x2a4 0x12 0x00
+					0x260 0x10 0x00
+					0x28c 0x06 0x00
+					0x504 0x03 0x00
+					0x500 0x10 0x00
+					0x50c 0x14 0x00
+					0x4d4 0x0a 0x00
+					0x4d8 0x04 0x00
+					0x4dc 0x1a 0x00
+					0x434 0x4b 0x00
+					0x414 0x04 0x00
+					0x40c 0x04 0x00
+					0x4f8 0x00 0x00
+					0x4fc 0x80 0x00
+					0x51c 0x40 0x00
+					0x444 0x71 0x00
+					0x43c 0x40 0x00
+					0x854 0x04 0x00
+					0x62c 0x52 0x00
+					0x9ac 0x00 0x00
+					0x8a0 0x01 0x00
+					0x9e0 0x00 0x00
+					0x9dc 0x20 0x00
+					0x9a8 0x00 0x00
+					0x8a4 0x01 0x00
+					0x8a8 0x73 0x00
+					0x9d8 0xaa 0x00
+					0x9b0 0x03 0x00
+					0x804 0x03 0x00
+					0x800 0x00 0x00
+					0x808 0x03 0x00>;
+
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&pcie0_clkreq_default
+			&pcie0_perst_default
+			&pcie0_wake_default>;
+		pinctrl-1 = <&pcie0_clkreq_default
+			&pcie0_perst_default
+			&pcie0_wake_sleep>;
+
+		perst-gpio = <&tlmm 35 0>;
+		wake-gpio = <&tlmm 37 0>;
+
+		gdsc-vdd-supply = <&gdsc_pcie_0>;
+		vreg-1.8-supply = <&pm8998_l2>;
+		vreg-0.9-supply = <&pm8998_l1>;
+		vreg-cx-supply = <&pm8998_s1_level>;
+
+		qcom,vreg-1.8-voltage-level = <1200000 1200000 24000>;
+		qcom,vreg-0.9-voltage-level = <880000 880000 24000>;
+		qcom,vreg-cx-voltage-level = <RPM_SMD_REGULATOR_LEVEL_BINNING
+						RPM_SMD_REGULATOR_LEVEL_SVS 0>;
+
+		qcom,l1-supported;
+		qcom,l1ss-supported;
+		qcom,aux-clk-sync;
+
+		qcom,ep-latency = <10>;
+
+		qcom,boot-option = <0x1>;
+
+		linux,pci-domain = <0>;
+
+		qcom,msi-gicm-addr = <0x17a00040>;
+		qcom,msi-gicm-base = <0x260>;
+
+		qcom,pcie-phy-ver = <0x20>;
+		qcom,use-19p2mhz-aux-clk;
+
+		iommus = <&anoc1_smmu>;
+		qcom,smmu-exist;
+		qcom,smmu-sid-base = <0x1480>;
+
+		qcom,msm-bus,name = "pcie0";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<45 512 0 0>,
+				<45 512 500 800>;
+
+		clocks = <&clock_gcc clk_gcc_pcie_0_pipe_clk>,
+			<&clock_gcc clk_ln_bb_clk1>,
+			<&clock_gcc clk_gcc_pcie_0_aux_clk>,
+			<&clock_gcc clk_gcc_pcie_0_cfg_ahb_clk>,
+			<&clock_gcc clk_gcc_pcie_0_mstr_axi_clk>,
+			<&clock_gcc clk_gcc_pcie_0_slv_axi_clk>,
+			<&clock_gcc clk_gcc_pcie_clkref_clk>;
+
+		clock-names = "pcie_0_pipe_clk", "pcie_0_ref_clk_src",
+				"pcie_0_aux_clk", "pcie_0_cfg_ahb_clk",
+				"pcie_0_mstr_axi_clk", "pcie_0_slv_axi_clk",
+				"pcie_0_ldo";
+
+		max-clock-frequency-hz = <0>, <0>, <19200000>,
+					<0>, <0>, <0>, <0>, <0>, <0>,
+					<0>, <0>, <0>, <0>, <0>, <0>,
+					<0>, <0>;
+
+		resets = <&clock_gcc PCIE_PHY_BCR>,
+			 <&clock_gcc PCIE_0_PHY_BCR>,
+			 <&clock_gcc PCIE_0_PHY_BCR>;
+
+		reset-names = "pcie_phy_reset",
+				"pcie_0_phy_reset",
+				"pcie_0_phy_pipe_reset";
+	};
+
+	qcom,bcl {
+		compatible = "qcom,bcl";
+		qcom,bcl-enable;
+		qcom,bcl-framework-interface;
+		qcom,bcl-freq-control-list = <&CPU4 &CPU5 &CPU6 &CPU7>;
+		qcom,bcl-hotplug-list = <&CPU4 &CPU5 &CPU6 &CPU7>;
+		qcom,bcl-soc-hotplug-list = <&CPU4 &CPU5 &CPU6 &CPU7>;
+		qcom,ibat-monitor {
+			qcom,low-threshold-uamp = <3400000>;
+			qcom,high-threshold-uamp = <4200000>;
+			qcom,mitigation-freq-khz = <576000>;
+			qcom,vph-high-threshold-uv = <3500000>;
+			qcom,vph-low-threshold-uv = <3300000>;
+			qcom,soc-low-threshold = <10>;
+			qcom,thermal-handle = <&msm_thermal_freq>;
+		};
+	};
+
+	qcom,ssc@5c00000 {
+		compatible = "qcom,pil-tz-generic";
+		reg = <0x5c00000 0x4000>;
+		interrupts = <0 390 1>;
+
+		vdd_cx-supply = <&pm8998_l27_level>;
+		vdd_px-supply = <&pm8998_lvs2>;
+		qcom,vdd_cx-uV-uA = <RPM_SMD_REGULATOR_LEVEL_TURBO 0>;
+		qcom,proxy-reg-names = "vdd_cx", "vdd_px";
+		qcom,keep-proxy-regs-on;
+
+		clocks = <&clock_gcc clk_cxo_pil_ssc_clk>,
+			 <&clock_gcc clk_aggre2_noc_clk>;
+		clock-names = "xo", "aggre2";
+		qcom,proxy-clock-names = "xo", "aggre2";
+
+		qcom,pas-id = <12>;
+		qcom,proxy-timeout-ms = <10000>;
+		qcom,smem-id = <424>;
+		qcom,sysmon-id = <3>;
+		qcom,ssctl-instance-id = <0x16>;
+		qcom,firmware-name = "slpi";
+		status = "ok";
+		memory-region = <&pil_slpi_mem>;
+
+		/* GPIO inputs from ssc */
+		qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_3_in 0 0>;
+		qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_3_in 2 0>;
+		qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_3_in 1 0>;
+		qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_3_in 3 0>;
+
+		/* GPIO output to ssc */
+		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_3_out 0 0>;
+	};
+
+	qcom,venus@cce0000 {
+		compatible = "qcom,pil-tz-generic";
+		reg = <0xcce0000 0x4000>;
+
+		vdd-supply = <&gdsc_venus>;
+		qcom,proxy-reg-names = "vdd";
+
+		clocks = <&clock_mmss clk_mmss_video_core_clk>,
+			 <&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			 <&clock_mmss clk_mmss_video_ahb_clk>,
+			 <&clock_gcc clk_mmssnoc_axi_clk>,
+			 <&clock_mmss clk_mmss_video_axi_clk>,
+			 <&clock_mmss clk_mmss_video_maxi_clk>;
+		clock-names = "core_clk", "mnoc_ahb_clk", "iface_clk",
+			      "noc_axi_clk", "bus_clk", "maxi_clk";
+		qcom,proxy-clock-names = "core_clk","mnoc_ahb_clk",
+		 "iface_clk", "noc_axi_clk", "bus_clk", "maxi_clk";
+
+		qcom,pas-id = <9>;
+		qcom,msm-bus,name = "pil-venus";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<63 512 0 0>,
+			<63 512 0 304000>;
+		qcom,proxy-timeout-ms = <100>;
+		qcom,firmware-name = "venus";
+		memory-region = <&pil_video_mem>;
+		status = "ok";
+	};
+
+	wdog: qcom,wdt@17817000 {
+		compatible = "qcom,msm-watchdog";
+		reg = <0x17817000 0x1000>;
+		reg-names = "wdt-base";
+		interrupts = <0 3 0>, <0 4 0>;
+		qcom,bark-time = <11000>;
+		qcom,pet-time = <10000>;
+		qcom,ipi-ping;
+		qcom,wakeup-enable;
+		qcom,scandump-size = <0x40000>;
+	};
+
+	qcom,spss@1d00000 {
+		compatible = "qcom,pil-tz-generic";
+		reg = <0x1d0101c 0x4>,
+		      <0x1d01024 0x4>,
+		      <0x1d01028 0x4>,
+		      <0x1d0103c 0x4>,
+		      <0x1d02030 0x4>;
+		reg-names = "sp2soc_irq_status", "sp2soc_irq_clr",
+			    "sp2soc_irq_mask", "rmb_err", "rmb_err_spare2";
+		interrupts = <0 352 1>;
+
+		vdd_cx-supply = <&pm8998_s1_level>;
+		qcom,proxy-reg-names = "vdd_cx";
+		qcom,vdd_cx-uV-uA = <RPM_SMD_REGULATOR_LEVEL_TURBO 100000>;
+
+		clocks = <&clock_gcc clk_cxo_pil_spss_clk>;
+		clock-names = "xo";
+		qcom,proxy-clock-names = "xo";
+		qcom,pil-generic-irq-handler;
+		status = "ok";
+
+		qcom,pas-id = <14>;
+		qcom,proxy-timeout-ms = <10000>;
+		qcom,firmware-name = "spss";
+		memory-region = <&spss_mem>;
+		qcom,spss-scsr-bits = <24 25>;
+	};
+
+	qcom,msm-rtb {
+		compatible = "qcom,msm-rtb";
+		qcom,rtb-size = <0x100000>;
+	};
+
+	qcom,mpm2-sleep-counter@10a3000 {
+		compatible = "qcom,mpm2-sleep-counter";
+		reg = <0x010a3000 0x1000>;
+		clock-frequency = <32768>;
+	};
+
+	qcom,msm-imem@146bf000 {
+		compatible = "qcom,msm-imem";
+		reg = <0x146bf000 0x1000>;
+		ranges = <0x0 0x146bf000 0x1000>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		mem_dump_table@10 {
+			compatible = "qcom,msm-imem-mem_dump_table";
+			reg = <0x10 8>;
+		};
+
+		dload_type@18 {
+			compatible = "qcom,msm-imem-dload-type";
+			reg = <0x18 4>;
+		};
+
+		restart_reason@65c {
+			compatible = "qcom,msm-imem-restart_reason";
+			reg = <0x65c 4>;
+		};
+
+		boot_stats@6b0 {
+			compatible = "qcom,msm-imem-boot_stats";
+			reg = <0x6b0 32>;
+		};
+
+		kaslr_offset@6d0 {
+			compatible = "qcom,msm-imem-kaslr_offset";
+			reg = <0x6d0 12>;
+		};
+
+		pil@94c {
+			compatible = "qcom,msm-imem-pil";
+			reg = <0x94c 200>;
+		};
+
+		diag_dload@c8 {
+			compatible = "qcom,msm-imem-diag-dload";
+			reg = <0xc8 200>;
+		};
+	};
+
+	cpu_pmu: cpu-pmu {
+		compatible = "arm,armv8-pmuv3";
+		qcom,irq-is-percpu;
+		interrupts = <1 6 4>;
+	};
+
+	cpuss_dump {
+		compatible = "qcom,cpuss-dump";
+		qcom,l1_i_cache0 {
+			qcom,dump-node = <&L1_I_0>;
+			qcom,dump-id = <0x60>;
+		};
+		qcom,l1_i_cache1 {
+			qcom,dump-node = <&L1_I_1>;
+			qcom,dump-id = <0x61>;
+		};
+		qcom,l1_i_cache2 {
+			qcom,dump-node = <&L1_I_2>;
+			qcom,dump-id = <0x62>;
+		};
+		qcom,l1_i_cache3 {
+			qcom,dump-node = <&L1_I_3>;
+			qcom,dump-id = <0x63>;
+		};
+		qcom,l1_i_cache100 {
+			qcom,dump-node = <&L1_I_100>;
+			qcom,dump-id = <0x64>;
+		};
+		qcom,l1_i_cache101 {
+			qcom,dump-node = <&L1_I_101>;
+			qcom,dump-id = <0x65>;
+		};
+		qcom,l1_i_cache102 {
+			qcom,dump-node = <&L1_I_102>;
+			qcom,dump-id = <0x66>;
+		};
+		qcom,l1_i_cache103 {
+			qcom,dump-node = <&L1_I_103>;
+			qcom,dump-id = <0x67>;
+		};
+		qcom,l1_d_cache0 {
+			qcom,dump-node = <&L1_D_0>;
+			qcom,dump-id = <0x80>;
+		};
+		qcom,l1_d_cache1 {
+			qcom,dump-node = <&L1_D_1>;
+			qcom,dump-id = <0x81>;
+		};
+		qcom,l1_d_cache2 {
+			qcom,dump-node = <&L1_D_2>;
+			qcom,dump-id = <0x82>;
+		};
+		qcom,l1_d_cache3 {
+			qcom,dump-node = <&L1_D_3>;
+			qcom,dump-id = <0x83>;
+		};
+		qcom,l1_d_cache100 {
+			qcom,dump-node = <&L1_D_100>;
+			qcom,dump-id = <0x84>;
+		};
+		qcom,l1_d_cache101 {
+			qcom,dump-node = <&L1_D_101>;
+			qcom,dump-id = <0x85>;
+		};
+		qcom,l1_d_cache102 {
+			qcom,dump-node = <&L1_D_102>;
+			qcom,dump-id = <0x86>;
+		};
+		qcom,l1_d_cache103 {
+			qcom,dump-node = <&L1_D_103>;
+			qcom,dump-id = <0x87>;
+		};
+		qcom,l1_tlb_dump0 {
+			qcom,dump-node = <&L1_TLB_0>;
+			qcom,dump-id = <0x20>;
+		};
+		qcom,l1_tlb_dump1 {
+			qcom,dump-node = <&L1_TLB_1>;
+			qcom,dump-id = <0x21>;
+		};
+		qcom,l1_tlb_dump2 {
+			qcom,dump-node = <&L1_TLB_2>;
+			qcom,dump-id = <0x22>;
+		};
+		qcom,l1_tlb_dump3 {
+			qcom,dump-node = <&L1_TLB_3>;
+			qcom,dump-id = <0x23>;
+		};
+		qcom,l1_tlb_dump100 {
+			qcom,dump-node = <&L1_TLB_100>;
+			qcom,dump-id = <0x24>;
+		};
+		qcom,l1_tlb_dump101 {
+			qcom,dump-node = <&L1_TLB_101>;
+			qcom,dump-id = <0x25>;
+		};
+		qcom,l1_tlb_dump102 {
+			qcom,dump-node = <&L1_TLB_102>;
+			qcom,dump-id = <0x26>;
+		};
+		qcom,l1_tlb_dump103 {
+			qcom,dump-node = <&L1_TLB_103>;
+			qcom,dump-id = <0x27>;
+		};
+	};
+
+	ssc_sensors: qcom,msm-ssc-sensors {
+		compatible = "qcom,msm-ssc-sensors";
+		status = "ok";
+		qcom,firmware-name = "slpi_v1";
+	};
+
+	dcc: dcc@10b3000 {
+		compatible = "qcom,dcc";
+		reg = <0x10b3000 0x1000>,
+		      <0x10b4000 0x2000>;
+		reg-names = "dcc-base", "dcc-ram-base";
+
+		clocks = <&clock_gcc clk_gcc_dcc_ahb_clk>;
+		clock-names = "dcc_clk";
+	};
+
+	qcom,msm-core@780000 {
+		compatible = "qcom,apss-core-ea";
+		reg = <0x780000 0x1000>;
+		qcom,low-hyst-temp = <100>;
+		qcom,high-hyst-temp = <100>;
+		qcom,polling-interval = <50>;
+
+		ea0: ea0 {
+			sensor = <&sensor_information1>;
+		};
+
+		ea1: ea1 {
+			sensor = <&sensor_information2>;
+		};
+
+		ea2: ea2 {
+			sensor = <&sensor_information3>;
+		};
+
+		ea3: ea3 {
+			sensor = <&sensor_information4>;
+		};
+
+		ea4: ea4 {
+			sensor = <&sensor_information7>;
+		};
+
+		ea5: ea5 {
+			sensor = <&sensor_information8>;
+		};
+
+		ea6: ea6 {
+			sensor = <&sensor_information9>;
+		};
+
+		ea7: ea7 {
+			sensor = <&sensor_information10>;
+		};
+
+	};
+
+	msm_ath10k_wlan: qcom,msm_ath10k_wlan {
+		status = "disabled";
+		compatible = "qcom,wcn3990-wifi";
+		reg = <0x18800000 0x800000>;
+		reg-names = "membase";
+		clocks = <&clock_gcc clk_rf_clk2_pin>;
+		clock-names = "cxo_ref_clk_pin";
+		interrupts =
+			<0 413 0 /* CE0 */ >,
+			<0 414 0 /* CE1 */ >,
+			<0 415 0 /* CE2 */ >,
+			<0 416 0 /* CE3 */ >,
+			<0 417 0 /* CE4 */ >,
+			<0 418 0 /* CE5 */ >,
+			<0 420 0 /* CE6 */ >,
+			<0 421 0 /* CE7 */ >,
+			<0 422 0 /* CE8 */ >,
+			<0 423 0 /* CE9 */ >,
+			<0 424 0 /* CE10 */ >,
+			<0 425 0 /* CE11 */ >;
+		vdd-0.8-cx-mx-supply = <&pm8998_l5>;
+		vdd-1.8-xo-supply = <&pm8998_l7_pin_ctrl>;
+		vdd-1.3-rfa-supply = <&pm8998_l17_pin_ctrl>;
+		vdd-3.3-ch0-supply = <&pm8998_l25_pin_ctrl>;
+		qcom,vdd-0.8-cx-mx-config = <800000 800000>;
+		qcom,vdd-3.3-ch0-config = <3104000 3312000>;
+	};
+
+	qcom,icnss@18800000 {
+		compatible = "qcom,icnss";
+		reg = <0x18800000 0x800000>,
+		      <0xa0000000 0x10000000>,
+		      <0xb0000000 0x10000>;
+		reg-names = "membase", "smmu_iova_base", "smmu_iova_ipa";
+		clocks = <&clock_gcc clk_rf_clk2_pin>;
+		clock-names = "cxo_ref_clk_pin";
+		iommus = <&anoc2_smmu 0x1900>,
+			 <&anoc2_smmu 0x1901>;
+		interrupts = <0 413 0 /* CE0 */ >,
+			     <0 414 0 /* CE1 */ >,
+			     <0 415 0 /* CE2 */ >,
+			     <0 416 0 /* CE3 */ >,
+			     <0 417 0 /* CE4 */ >,
+			     <0 418 0 /* CE5 */ >,
+			     <0 420 0 /* CE6 */ >,
+			     <0 421 0 /* CE7 */ >,
+			     <0 422 0 /* CE8 */ >,
+			     <0 423 0 /* CE9 */ >,
+			     <0 424 0 /* CE10 */ >,
+			     <0 425 0 /* CE11 */ >;
+		qcom,wlan-msa-memory = <0x100000>;
+		vdd-0.8-cx-mx-supply = <&pm8998_l5>;
+		vdd-1.8-xo-supply = <&pm8998_l7_pin_ctrl>;
+		vdd-1.3-rfa-supply = <&pm8998_l17_pin_ctrl>;
+		vdd-3.3-ch0-supply = <&pm8998_l25_pin_ctrl>;
+		qcom,vdd-0.8-cx-mx-config = <800000 800000>;
+		qcom,vdd-3.3-ch0-config = <3104000 3312000>;
+		qcom,icnss-vadc = <&pm8998_vadc>;
+		qcom,icnss-adc_tm = <&pm8998_adc_tm>;
+	};
+
+	tspp: msm_tspp@0c1e7000 {
+		compatible = "qcom,msm_tspp";
+		reg = <0x0c1e7000 0x200>, /* MSM_TSIF0_PHYS */
+		      <0x0c1e8000 0x200>, /* MSM_TSIF1_PHYS */
+		      <0x0c1e9000 0x1000>, /* MSM_TSPP_PHYS  */
+		      <0x0c1c4000 0x23000>; /* MSM_TSPP_BAM_PHYS */
+		reg-names = "MSM_TSIF0_PHYS",
+			"MSM_TSIF1_PHYS",
+			"MSM_TSPP_PHYS",
+			"MSM_TSPP_BAM_PHYS";
+		interrupts = <0 121 0>, /* TSIF_TSPP_IRQ */
+			<0 119 0>, /* TSIF0_IRQ */
+			<0 120 0>, /* TSIF1_IRQ */
+			<0 122 0>; /* TSIF_BAM_IRQ */
+		interrupt-names = "TSIF_TSPP_IRQ",
+			"TSIF0_IRQ",
+			"TSIF1_IRQ",
+			"TSIF_BAM_IRQ";
+
+		clock-names = "iface_clk", "ref_clk";
+		clocks = <&clock_gcc clk_gcc_tsif_ahb_clk>,
+			<&clock_gcc clk_gcc_tsif_ref_clk>;
+
+		qcom,msm-bus,name = "tsif";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<82 512 0 0>, /* No vote */
+				<82 512 12288 24576>;
+				/* Max. bandwidth, 2xTSIF, each max of 96Mbps */
+
+		pinctrl-names = "disabled",
+			"tsif0-mode1", "tsif0-mode2",
+			"tsif1-mode1", "tsif1-mode2",
+			"dual-tsif-mode1", "dual-tsif-mode2";
+
+		pinctrl-0 = <>;				/* disabled */
+		pinctrl-1 = <&tsif0_signals_active>;	/* tsif0-mode1 */
+		pinctrl-2 = <&tsif0_signals_active
+			&tsif0_sync_active>;		/* tsif0-mode2 */
+		pinctrl-3 = <&tsif1_signals_active>;	/* tsif1-mode1 */
+		pinctrl-4 = <&tsif1_signals_active
+			&tsif1_sync_active>;		/* tsif1-mode2 */
+		pinctrl-5 = <&tsif0_signals_active
+			&tsif1_signals_active>;		/* dual-tsif-mode1 */
+		pinctrl-6 = <&tsif0_signals_active
+			&tsif0_sync_active
+			&tsif1_signals_active
+			&tsif1_sync_active>;		/* dual-tsif-mode2 */
+	};
+
+	wil6210: qcom,wil6210 {
+		status = "disabled";
+		compatible = "qcom,wil6210";
+		qcom,pcie-parent = <&pcie0>;
+		qcom,wigig-en = <&tlmm 80 0>;
+		qcom,msm-bus,name = "wil6210";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<45 512 0 0>,
+			<45 512 600000 800000>; /* ~4.6Gbps (MCS12) */
+		qcom,use-ext-supply;
+		vdd-supply= <&pm8998_s7>;
+		vddio-supply= <&pm8998_s5>;
+		qcom,use-ext-clocks;
+		clocks = <&clock_gcc clk_rf_clk3>,
+			 <&clock_gcc clk_rf_clk3_pin>;
+		clock-names = "rf_clk3_clk", "rf_clk3_pin_clk";
+		qcom,smmu-support;
+		qcom,smmu-s1-en;
+		qcom,smmu-fast-map;
+		qcom,smmu-coherent;
+		qcom,smmu-mapping = <0x20000000 0xe0000000>;
+		qcom,keep-radio-on-during-sleep;
+	};
+
+	qcom,qsee_ipc_irq_bridge {
+		compatible = "qcom,qsee-ipc-irq-bridge";
+
+		qcom,qsee-ipc-irq-spss {
+			qcom,rx-irq-clr = <0x1d08008 0x4>;
+			qcom,rx-irq-clr-mask = <0x1>;
+			qcom,dev-name = "qsee_ipc_irq_spss";
+			interrupts = <0 349 4>;
+			label = "spss";
+		};
+	};
+};
+
+&clock_cpu {
+	lmh_dcvs0: qcom,limits-dcvs@0 {
+		compatible = "qcom,msm-hw-limits";
+		interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	lmh_dcvs1: qcom,limits-dcvs@1 {
+		compatible = "qcom,msm-hw-limits";
+		interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+	};
+};
+
+&gdsc_usb30 {
+	status = "ok";
+};
+
+&gdsc_pcie_0 {
+	status = "ok";
+};
+
+&gdsc_ufs {
+	status = "ok";
+};
+
+&gdsc_bimc_smmu {
+	clock-names = "bus_clk";
+	clocks = <&clock_mmss clk_mmss_bimc_smmu_axi_clk>;
+	proxy-supply = <&gdsc_bimc_smmu>;
+	qcom,proxy-consumer-enable;
+	status = "ok";
+};
+
+&gdsc_hlos1_vote_lpass_adsp {
+	status = "ok";
+};
+
+&gdsc_hlos1_vote_lpass_core {
+	status = "ok";
+};
+
+&gdsc_venus {
+	status = "ok";
+};
+
+&gdsc_venus_core0 {
+	status = "ok";
+	qcom,support-hw-trigger;
+};
+
+&gdsc_venus_core1 {
+	status = "ok";
+	qcom,support-hw-trigger;
+};
+
+&gdsc_camss_top {
+	status = "ok";
+};
+
+&gdsc_vfe0 {
+	parent-supply = <&gdsc_camss_top>;
+	status = "ok";
+};
+
+&gdsc_vfe1 {
+	parent-supply = <&gdsc_camss_top>;
+	status = "ok";
+};
+
+&gdsc_cpp {
+	parent-supply = <&gdsc_camss_top>;
+	qcom,support-hw-trigger;
+	status = "ok";
+};
+
+&gdsc_mdss {
+	proxy-supply = <&gdsc_mdss>;
+	qcom,proxy-consumer-enable;
+	status = "ok";
+};
+
+&gdsc_gpu_gx {
+	clock-names = "core_root_clk";
+	clocks = <&clock_gfx clk_gfx3d_clk_src>;
+	qcom,force-enable-root-clk;
+	parent-supply = <&gfx_vreg>;
+	status = "ok";
+};
+
+&gdsc_gpu_cx {
+	status = "ok";
+};
+
+#include "msm-pm8998.dtsi"
+#include "msm-pmi8998.dtsi"
+#include "msm-pm8005.dtsi"
+#include "msm-pm8998-rpm-regulator.dtsi"
+#include "msm8998-regulator.dtsi"
+
+#include "msm8998-pm.dtsi"
+#include "msm-arm-smmu-8998.dtsi"
+#include "msm-arm-smmu-impl-defs-8998.dtsi"
+#include "msm8998-ion.dtsi"
+#include "msm8998-camera.dtsi"
+#include "msm8998-vidc.dtsi"
+#include "msm8998-coresight.dtsi"
+#include "msm8998-bus.dtsi"
+#include "msm8998-gpu.dtsi"
+#include "msm8998-pinctrl.dtsi"
+#include "msm-audio-lpass.dtsi"
+#include "msm8998-mdss-pll.dtsi"
+#include "msm-rdbg.dtsi"
+#include "msm8998-blsp.dtsi"
+#include "msm8998-audio.dtsi"
+#include "msm8998-sde.dtsi"
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-gpu.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-gpu.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-gpu.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-gpu.dtsi	2019-01-22 16:16:21.195225506 +0100
@@ -0,0 +1,277 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+
+	pil_gpu: qcom,kgsl-hyp {
+		compatible = "qcom,pil-tz-generic";
+		qcom,pas-id = <13>;
+		qcom,firmware-name = "a540_zap";
+	};
+
+	msm_bus: qcom,kgsl-busmon{
+		label = "kgsl-busmon";
+		compatible = "qcom,kgsl-busmon";
+	};
+
+	gpubw: qcom,gpubw {
+		compatible = "qcom,devbw";
+		governor = "bw_vbif";
+		qcom,src-dst-ports = <26 512>;
+		/*
+		 * active-only flag is used while registering the bus
+		 * governor.It helps release the bus vote when the CPU
+		 * subsystem is inactiv3
+		 */
+		qcom,active-only;
+		qcom,bw-tbl =
+			<     0 /*  off     */ >,
+			<   762 /*  100 MHz */ >,
+			<  1144 /*  150 MHz */ >,
+			<  1525 /*  200 MHz */ >,
+			<  2288 /*  300 MHz */ >,
+			<  3143 /*  412 MHz */ >,
+			<  4173 /*  547 MHz */ >,
+			<  5195 /*  681 MHz */ >,
+			<  5859 /*  768 MHz */ >,
+			<  7759 /*  1017 MHz */ >,
+			<  9887 /*  1296 MHz */ >,
+			<  11863 /*  1555 MHz */ >,
+			<  13763 /*  1804 MHz */ >;
+	};
+
+	msm_gpu: qcom,kgsl-3d0@5000000 {
+		label = "kgsl-3d0";
+		compatible = "qcom,kgsl-3d0", "qcom,kgsl-3d";
+		status = "ok";
+		reg = <0x5000000 0x40000>;
+		reg-names = "kgsl_3d0_reg_memory";
+		interrupts = <0 300 0>;
+		interrupt-names = "kgsl_3d0_irq";
+		qcom,id = <0>;
+
+		qcom,chipid = <0x05040000>;
+		qcom,gpu-efuse-leakage = <0x00070130 24>;
+		qcom,base-leakage-coefficient = <34>;
+		qcom,lm-limit = <6000>;
+
+		qcom,initial-pwrlevel = <4>;
+
+		qcom,idle-timeout = <80>; //<HZ/12>
+		qcom,no-nap;
+
+		qcom,highest-bank-bit = <15>;
+
+		qcom,snapshot-size = <1048576>; //bytes
+
+		qcom,gpu-qdss-stm = <0x161c0000 0x40000>; // base addr, size
+
+		qcom,gpu-qtimer = <0x17921000 0x1000>; // base addr, size
+
+		qcom,tsens-name = "tsens_tz_sensor12";
+
+		/* Avoid L2PC on big cluster CPUs (CPU 4,5,6,7) */
+		qcom,l2pc-cpu-mask = <0x000000f0>;
+
+		/* Quirks */
+		qcom,gpu-quirk-lmloadkill-disable;
+
+		/* DRM settings */
+		qcom,gpmu-tsens = <0x000c000d>;
+		qcom,max-power = <5448>;
+		qcom,gpmu-firmware = "a540_gpmu.fw2";
+		qcom,gpmu-version = <3 0>;
+		qcom,zap-shader = "a540_zap";
+
+		clocks = <&clock_gfx clk_gpucc_gfx3d_clk>,
+			<&clock_gcc clk_gcc_gpu_cfg_ahb_clk>,
+			<&clock_gpu clk_gpucc_rbbmtimer_clk>,
+			<&clock_gcc clk_gcc_bimc_gfx_clk>,
+			<&clock_gcc clk_gcc_gpu_bimc_gfx_clk>,
+			<&clock_gpu clk_gpucc_gfx3d_isense_clk>,
+			<&clock_gpu clk_gpucc_rbcpr_clk>,
+			<&clock_gcc clk_gcc_gpu_iref_clk>;
+
+		clock-names = "core_clk", "iface_clk", "rbbmtimer_clk",
+			"mem_clk", "mem_iface_clk", "isense_clk", "rbcpr_clk",
+			"iref_clk";
+
+		qcom,isense-clk-on-level = <1>;
+		/* Bus Scale Settings */
+		qcom,gpubw-dev = <&gpubw>;
+		qcom,bus-control;
+		qcom,msm-bus,name = "grp3d";
+		qcom,bus-width = <32>;
+		qcom,msm-bus,num-cases = <13>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<26 512 0 0>,
+
+				<26 512 0 800000>,      // 1 bus=100
+				<26 512 0 1200000>,     // 2 bus=150
+				<26 512 0 1600000>,     // 3 bus=200
+				<26 512 0 2400000>,     // 4 bus=300
+				<26 512 0 3296000>,     // 5 bus=412
+				<26 512 0 4376000>,     // 6 bus=547
+				<26 512 0 5448000>,     // 7 bus=681
+				<26 512 0 6144000>,     // 8 bus=768
+				<26 512 0 8136000>,     // 9 bus=1017
+				<26 512 0 10368000>,    // 10 bus=1296
+				<26 512 0 12440000>,    // 11 bus=1555
+				<26 512 0 14432000>;    // 12 bus=1804
+
+		/* GDSC regulator names */
+		regulator-names = "vddcx", "vdd";
+		/* GDSC oxili regulators */
+		vddcx-supply = <&gdsc_gpu_cx>;
+		vdd-supply = <&gdsc_gpu_gx>;
+
+		/* Trace bus */
+		coresight-name = "coresight-gfx";
+		coresight-atid = <3>;
+		port {
+			gfx_out_funnel_in1: endpoint {
+				remote-endpoint = <&funnel_in1_in_gfx>;
+			};
+		};
+
+		/* GPU Mempools */
+		qcom,gpu-mempools {
+			#address-cells= <1>;
+			#size-cells = <0>;
+			compatible = "qcom,gpu-mempools";
+
+			/* 4K Page Pool configuration */
+			qcom,gpu-mempool@0 {
+				reg = <0>;
+				qcom,mempool-page-size = <4096>;
+				qcom,mempool-reserved = <2048>;
+				qcom,mempool-allocate;
+			};
+			/* 8K Page Pool configuration */
+			qcom,gpu-mempool@1 {
+				reg = <1>;
+				qcom,mempool-page-size  = <8192>;
+				qcom,mempool-reserved = <1024>;
+				qcom,mempool-allocate;
+			};
+			/* 64K Page Pool configuration */
+			qcom,gpu-mempool@2 {
+				reg = <2>;
+				qcom,mempool-page-size  = <65536>;
+				qcom,mempool-reserved = <256>;
+			};
+			/* 1M Page Pool configuration */
+			qcom,gpu-mempool@3 {
+				reg = <3>;
+				qcom,mempool-page-size  = <1048576>;
+				qcom,mempool-reserved = <32>;
+			};
+		};
+
+		/* Power levels */
+		qcom,gpu-pwrlevels {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			compatible = "qcom,gpu-pwrlevels";
+
+			qcom,gpu-pwrlevel@0 {
+				reg = <0>;
+				qcom,gpu-freq = <650000000>;
+				qcom,bus-freq = <12>;
+				qcom,bus-min = <11>;
+				qcom,bus-max = <12>;
+			};
+
+
+			qcom,gpu-pwrlevel@1 {
+				reg = <1>;
+				qcom,gpu-freq = <504000000>;
+				qcom,bus-freq = <11>;
+				qcom,bus-min = <10>;
+				qcom,bus-max = <12>;
+			};
+
+			qcom,gpu-pwrlevel@2 {
+				reg = <2>;
+				qcom,gpu-freq = <403000000>;
+				qcom,bus-freq = <10>;
+				qcom,bus-min = <9>;
+				qcom,bus-max = <11>;
+			};
+
+			qcom,gpu-pwrlevel@3 {
+				reg = <3>;
+				qcom,gpu-freq = <332000000>;
+				qcom,bus-freq = <7>;
+				qcom,bus-min = <6>;
+				qcom,bus-max = <8>;
+			};
+
+			qcom,gpu-pwrlevel@4 {
+				reg = <4>;
+				qcom,gpu-freq = <251000000>;
+				qcom,bus-freq = <4>;
+				qcom,bus-min = <3>;
+				qcom,bus-max = <5>;
+			};
+
+			qcom,gpu-pwrlevel@5 {
+				reg = <5>;
+				qcom,gpu-freq = <171000000>;
+				qcom,bus-freq = <3>;
+				qcom,bus-min = <1>;
+				qcom,bus-max = <4>;
+			};
+
+			qcom,gpu-pwrlevel@6 {
+				reg = <6>;
+				qcom,gpu-freq = <27000000>;
+				qcom,bus-freq = <0>;
+				qcom,bus-min = <0>;
+				qcom,bus-max = <0>;
+			};
+		};
+
+	};
+
+	kgsl_msm_iommu: qcom,kgsl-iommu {
+		compatible = "qcom,kgsl-smmu-v2";
+
+		reg = <0x05040000 0x10000>;
+		qcom,protect = <0x40000 0x10000>;
+		qcom,micro-mmu-control = <0x6000>;
+
+		clocks =<&clock_gcc clk_gcc_gpu_cfg_ahb_clk>,
+			<&clock_gcc clk_gcc_bimc_gfx_clk>,
+			<&clock_gcc clk_gcc_gpu_bimc_gfx_clk>;
+
+		clock-names = "iface_clk", "mem_clk", "mem_iface_clk";
+
+		qcom,secure_align_mask = <0xfff>;
+		qcom,retention;
+		qcom,hyp_secure_alloc;
+
+		gfx3d_user: gfx3d_user {
+			compatible = "qcom,smmu-kgsl-cb";
+			label = "gfx3d_user";
+			iommus = <&kgsl_smmu 0>;
+			qcom,gpu-offset = <0x48000>;
+		};
+
+		gfx3d_secure: gfx3d_secure {
+			compatible = "qcom,smmu-kgsl-cb";
+			iommus = <&kgsl_smmu 2>;
+		};
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-ion.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-ion.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-ion.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-ion.dtsi	2019-01-22 16:16:21.195225506 +0100
@@ -0,0 +1,53 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	qcom,ion {
+		compatible = "qcom,msm-ion";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		system_heap: qcom,ion-heap@25 {
+			reg = <25>;
+			qcom,ion-heap-type = "SYSTEM";
+		};
+
+		qcom,ion-heap@22 { /* ADSP HEAP */
+			reg = <22>;
+			memory-region = <&adsp_mem>;
+			qcom,ion-heap-type = "DMA";
+		};
+
+		qcom,ion-heap@27 { /* QSEECOM HEAP */
+			reg = <27>;
+			memory-region = <&qseecom_mem>;
+			qcom,ion-heap-type = "DMA";
+		};
+
+		qcom,ion-heap@13 { /* SPSS HEAP */
+			reg = <13>;
+			memory-region = <&sp_mem>;
+			qcom,ion-heap-type = "DMA";
+		};
+
+		qcom,ion-heap@10 { /* SECURE DISPLAY HEAP */
+			reg = <10>;
+			memory-region = <&secure_display_memory>;
+			qcom,ion-heap-type = "HYP_CMA";
+		};
+
+		qcom,ion-heap@9 {
+			reg = <9>;
+			qcom,ion-heap-type = "SYSTEM_SECURE";
+		};
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-mdss-pll.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-mdss-pll.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-mdss-pll.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-mdss-pll.dtsi	2019-01-22 16:16:21.195225506 +0100
@@ -0,0 +1,173 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	mdss_dsi0_pll: qcom,mdss_dsi_pll@c994400 {
+		compatible = "qcom,mdss_dsi_pll_8998";
+		status = "ok";
+		label = "MDSS DSI 0 PLL";
+		cell-index = <0>;
+		#clock-cells = <1>;
+
+		reg = <0xc994a00 0x1c0>,
+		      <0xc994400 0x7c0>,
+		      <0x0c8c2300 0x8>;
+		reg-names = "pll_base", "phy_base", "gdsc_base";
+
+		gdsc-supply = <&gdsc_mdss>;
+
+		clocks = <&clock_mmss clk_mmss_mdss_ahb_clk>;
+		clock-names = "iface_clk";
+		clock-rate = <0>;
+		qcom,dsi-pll-ssc-en;
+		qcom,dsi-pll-ssc-mode = "down-spread";
+
+		qcom,platform-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,platform-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "gdsc";
+				qcom,supply-min-voltage = <0>;
+				qcom,supply-max-voltage = <0>;
+				qcom,supply-enable-load = <0>;
+				qcom,supply-disable-load = <0>;
+			};
+
+		};
+	};
+
+	mdss_dsi1_pll: qcom,mdss_dsi_pll@c996400 {
+		compatible = "qcom,mdss_dsi_pll_8998";
+		status = "ok";
+		label = "MDSS DSI 1 PLL";
+		cell-index = <1>;
+		#clock-cells = <1>;
+
+		reg = <0x0c996a00 0x1c0>,
+		      <0x0c996400 0x7c0>,
+		      <0x008c2300 0x8>;
+		reg-names = "pll_base", "phy_base", "gdsc_base";
+
+		gdsc-supply = <&gdsc_mdss>;
+
+		clocks = <&clock_mmss clk_mmss_mdss_ahb_clk>;
+		clock-names = "iface_clk";
+		clock-rate = <0>;
+		qcom,dsi-pll-ssc-en;
+		qcom,dsi-pll-ssc-mode = "down-spread";
+
+		qcom,platform-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,platform-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "gdsc";
+				qcom,supply-min-voltage = <0>;
+				qcom,supply-max-voltage = <0>;
+				qcom,supply-enable-load = <0>;
+				qcom,supply-disable-load = <0>;
+			};
+		};
+	};
+
+	mdss_dp_pll: qcom,mdss_dp_pll@c011000 {
+		compatible = "qcom,mdss_dp_pll_8998";
+		status = "ok";
+		label = "MDSS DP PLL";
+		cell-index = <0>;
+		#clock-cells = <1>;
+
+		reg = <0xc011c00 0x190>,
+		      <0xc011000 0x910>,
+		      <0x0c8c2300 0x8>;
+		reg-names = "pll_base", "phy_base", "gdsc_base";
+
+		gdsc-supply = <&gdsc_mdss>;
+
+		clocks = <&clock_mmss clk_mmss_mdss_ahb_clk>,
+			 <&clock_gcc clk_ln_bb_clk1>,
+			 <&clock_gcc clk_gcc_usb3_clkref_clk>;
+		clock-names = "iface_clk", "ref_clk_src", "ref_clk";
+		clock-rate = <0>;
+
+		qcom,platform-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,platform-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "gdsc";
+				qcom,supply-min-voltage = <0>;
+				qcom,supply-max-voltage = <0>;
+				qcom,supply-enable-load = <0>;
+				qcom,supply-disable-load = <0>;
+			};
+
+		};
+	};
+
+	mdss_hdmi_pll: qcom,mdss_hdmi_pll@0xc9a0600 {
+		compatible = "qcom,mdss_hdmi_pll_8998";
+		label = "MDSS HDMI PLL";
+		cell-index = <2>;
+		#clock-cells = <1>;
+
+		reg = <0xc9a0600 0xb10>,
+		      <0xc9a1200 0x0e4>,
+		      <0xc8c2300 0x8>;
+		reg-names = "pll_base", "phy_base", "gdsc_base";
+
+		gdsc-supply = <&gdsc_mdss>;
+		vdda-pll-supply = <&pm8998_l2>;
+		vdda-phy-supply = <&pm8998_l12>;
+
+		clocks = <&clock_mmss clk_mmss_mdss_ahb_clk>,
+			 <&clock_gcc clk_gcc_hdmi_clkref_clk>,
+			 <&clock_gcc clk_ln_bb_clk1>;
+		clock-names = "iface_clk", "ref_clk", "ref_clk_src";
+		clock-rate = <0>;
+
+		qcom,platform-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,platform-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "gdsc";
+				qcom,supply-min-voltage = <0>;
+				qcom,supply-max-voltage = <0>;
+				qcom,supply-enable-load = <0>;
+				qcom,supply-disable-load = <0>;
+			};
+			qcom,platform-supply-entry@1 {
+				reg = <1>;
+				qcom,supply-name = "vdda-pll";
+				qcom,supply-min-voltage = <1200000>;
+				qcom,supply-max-voltage = <1200000>;
+				qcom,supply-enable-load = <14200>;
+				qcom,supply-disable-load = <1>;
+			};
+
+			qcom,platform-supply-entry@2 {
+				reg = <2>;
+				qcom,supply-name = "vdda-phy";
+				qcom,supply-min-voltage = <1800000>;
+				qcom,supply-max-voltage = <1800000>;
+				qcom,supply-enable-load = <13100>;
+				qcom,supply-disable-load = <4>;
+			};
+		};
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-pinctrl.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-pinctrl.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi	2019-01-22 16:16:21.195225506 +0100
@@ -0,0 +1,2460 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	tlmm: pinctrl@03400000 {
+		compatible = "qcom,msm8998-pinctrl";
+		reg = <0x03400000 0xc00000>;
+		interrupts = <0 208 0>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+
+		uart_console_active: uart_console_active {
+			mux {
+				pins = "gpio4", "gpio5";
+				function = "blsp_uart8_a";
+			};
+
+			config {
+				pins = "gpio4", "gpio5";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
+		/* HS UART CONFIGURATION */
+		blsp1_uart1_active: blsp1_uart1_active {
+			mux {
+				pins = "gpio0", "gpio1", "gpio2", "gpio3";
+				function = "blsp_uart1_a";
+			};
+
+			config {
+				pins = "gpio0", "gpio1", "gpio2", "gpio3";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
+		blsp1_uart1_sleep: blsp1_uart1_sleep {
+			mux {
+				pins = "gpio0", "gpio1", "gpio2", "gpio3";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio0", "gpio1", "gpio2", "gpio3";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
+		blsp1_uart2_active: blsp1_uart2_active {
+			mux {
+				pins = "gpio31", "gpio34", "gpio33", "gpio32";
+				function = "blsp_uart2_a";
+			};
+
+			config {
+				pins = "gpio31", "gpio34", "gpio33", "gpio32";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
+		blsp1_uart2_sleep: blsp1_uart2_sleep {
+			mux {
+				pins = "gpio31", "gpio34", "gpio33", "gpio32";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio31", "gpio34", "gpio33", "gpio32";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
+		blsp1_uart3: blsp1_uart3 {
+			blsp1_uart3_tx_active: blsp1_uart3_tx_active {
+				mux {
+					pins = "gpio45";
+					function = "blsp_uart3_a";
+				};
+
+				config {
+					pins = "gpio45";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			blsp1_uart3_tx_sleep: blsp1_uart3_tx_sleep {
+				mux {
+					pins = "gpio45";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio45";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+
+			blsp1_uart3_rxcts_active: blsp1_uart3_rxcts_active {
+				mux {
+					pins = "gpio46", "gpio47";
+					function = "blsp_uart3_a";
+				};
+
+				config {
+					pins = "gpio46", "gpio47";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			blsp1_uart3_rxcts_sleep: blsp1_uart3_rxcts_sleep {
+				mux {
+					pins = "gpio46", "gpio47";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio46", "gpio47";
+					drive-strength = <2>;
+					bias-no-pull;
+				};
+			};
+
+			blsp1_uart3_rfr_active: blsp1_uart3_rfr_active {
+				mux {
+					pins = "gpio48";
+					function = "blsp_uart3_a";
+				};
+
+				config {
+					pins = "gpio48";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			blsp1_uart3_rfr_sleep: blsp1_uart3_rfr_sleep {
+				mux {
+					pins = "gpio48";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio48";
+					drive-strength = <2>;
+					bias-no-pull;
+				};
+			};
+		};
+
+		blsp2_uart1_active: blsp2_uart1_active {
+			mux {
+				pins = "gpio53", "gpio54", "gpio55", "gpio56";
+				function = "blsp_uart7_a";
+			};
+
+			config {
+				pins = "gpio53", "gpio54", "gpio55", "gpio56";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
+		blsp2_uart1_sleep: blsp2_uart1_sleep {
+			mux {
+				pins = "gpio53", "gpio54", "gpio55", "gpio56";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio53", "gpio54", "gpio55", "gpio56";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
+		blsp2_uart2_active: blsp2_uart2_active {
+			mux {
+				pins = "gpio4", "gpio5", "gpio6", "gpio7";
+				function = "blsp_uart8_a";
+			};
+
+			config {
+				pins = "gpio4", "gpio5", "gpio6", "gpio7";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
+		blsp2_uart2_sleep: blsp2_uart2_sleep {
+			mux {
+				pins = "gpio4", "gpio5", "gpio6", "gpio7";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio4", "gpio5", "gpio6", "gpio7";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
+		blsp2_uart3_active: blsp2_uart3_active {
+			mux {
+				pins = "gpio49", "gpio50", "gpio51", "gpio52";
+				function = "blsp_uart9_a";
+			};
+
+			config {
+				pins = "gpio49", "gpio50", "gpio51", "gpio52";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
+		blsp2_uart3_sleep: blsp2_uart3_sleep {
+			mux {
+				pins = "gpio49", "gpio50", "gpio51", "gpio52";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio49", "gpio50", "gpio51", "gpio52";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
+		/* PCIE CONFIGURATION */
+
+		pcie0 {
+			pcie0_clkreq_default: pcie0_clkreq_default {
+				mux {
+					pins = "gpio36";
+					function = "pci_e0";
+				};
+
+				config {
+					pins = "gpio36";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+
+			pcie0_perst_default: pcie0_perst_default {
+				mux {
+					pins = "gpio35";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio35";
+					drive-strength = <2>;
+					bias-pull-down;
+				};
+			};
+
+			pcie0_wake_default: pcie0_wake_default {
+				mux {
+					pins = "gpio37";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio37";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+
+			pcie0_wake_sleep: pcie0_wake_sleep {
+				mux {
+					pins = "gpio37";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio37";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		/* I2C CONFIGURATION */
+		i2c_1 {
+			i2c_1_active: i2c_1_active {
+				mux {
+					pins = "gpio2", "gpio3";
+					function = "blsp_i2c1";
+				};
+
+				config {
+					pins = "gpio2", "gpio3";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			i2c_1_sleep: i2c_1_sleep {
+				mux {
+					pins = "gpio2", "gpio3";
+					function = "blsp_i2c1";
+				};
+
+				config {
+					pins = "gpio2", "gpio3";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		i2c_2 {
+			i2c_2_active: i2c_2_active {
+				mux {
+					pins = "gpio32", "gpio33";
+					function = "blsp_i2c2";
+				};
+
+				config {
+					pins = "gpio32", "gpio33";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			i2c_2_sleep: i2c_2_sleep {
+				mux {
+					pins = "gpio32", "gpio33";
+					function = "blsp_i2c2";
+				};
+
+				config {
+					pins = "gpio32", "gpio33";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		i2c_3 {
+			i2c_3_active: i2c_3_active {
+				mux {
+					pins = "gpio47", "gpio48";
+					function = "blsp_i2c3";
+				};
+
+				config {
+					pins = "gpio47", "gpio48";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			i2c_3_sleep: i2c_3_sleep {
+				mux {
+					pins = "gpio47", "gpio48";
+					function = "blsp_i2c3";
+				};
+
+				config {
+					pins = "gpio47", "gpio48";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		i2c_4 {
+			i2c_4_active: i2c_4_active {
+				mux {
+					pins = "gpio10", "gpio11";
+					function = "blsp_i2c4";
+				};
+
+				config {
+					pins = "gpio10", "gpio11";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			i2c_4_sleep: i2c_4_sleep {
+				mux {
+					pins = "gpio10", "gpio11";
+					function = "blsp_i2c4";
+				};
+
+				config {
+					pins = "gpio10", "gpio11";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		i2c_5 {
+			i2c_5_active: i2c_5_active {
+				mux {
+					pins = "gpio87", "gpio88";
+					function = "blsp_i2c5";
+				};
+
+				config {
+					pins = "gpio87", "gpio88";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			i2c_5_sleep: i2c_5_sleep {
+				mux {
+					pins = "gpio87", "gpio88";
+					function = "blsp_i2c5";
+				};
+
+				config {
+					pins = "gpio87", "gpio88";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		i2c_6 {
+			i2c_6_active: i2c_6_active {
+				mux {
+					pins = "gpio43", "gpio44";
+					function = "blsp_i2c6";
+				};
+
+				config {
+					pins = "gpio43", "gpio44";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			i2c_6_sleep: i2c_6_sleep {
+				mux {
+					pins = "gpio43", "gpio44";
+					function = "blsp_i2c6";
+				};
+
+				config {
+					pins = "gpio43", "gpio44";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		i2c_7 {
+			i2c_7_active: i2c_7_active {
+				mux {
+					pins = "gpio55", "gpio56";
+					function = "blsp_i2c7";
+				};
+
+				config {
+					pins = "gpio55", "gpio56";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			i2c_7_sleep: i2c_7_sleep {
+				mux {
+					pins = "gpio55", "gpio56";
+					function = "blsp_i2c7";
+				};
+
+				config {
+					pins = "gpio55", "gpio56";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		i2c_8 {
+			i2c_8_active: i2c_8_active {
+				mux {
+					pins = "gpio6", "gpio7";
+					function = "blsp_i2c8";
+				};
+
+				config {
+					pins = "gpio6", "gpio7";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			i2c_8_sleep: i2c_8_sleep {
+				mux {
+					pins = "gpio6", "gpio7";
+					function = "blsp_i2c8";
+				};
+
+				config {
+					pins = "gpio6", "gpio7";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		i2c_9 {
+			i2c_9_active: i2c_9_active {
+				mux {
+					pins = "gpio51", "gpio52";
+					function = "blsp_i2c9";
+				};
+
+				config {
+					pins = "gpio51", "gpio52";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			i2c_9_sleep: i2c_9_sleep {
+				mux {
+					pins = "gpio51", "gpio52";
+					function = "blsp_i2c9";
+				};
+
+				config {
+					pins = "gpio51", "gpio52";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		i2c_10 {
+			i2c_10_active: i2c_10_active {
+				mux {
+					pins = "gpio67", "gpio68";
+					function = "blsp_i2c10";
+				};
+
+				config {
+					pins = "gpio67", "gpio68";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			i2c_10_sleep: i2c_10_sleep {
+				mux {
+					pins = "gpio67", "gpio68";
+					function = "blsp_i2c10";
+				};
+
+				config {
+					pins = "gpio67", "gpio68";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		i2c_11 {
+			i2c_11_active: i2c_11_active {
+				mux {
+					pins = "gpio60", "gpio61";
+					function = "blsp_i2c11";
+				};
+
+				config {
+					pins = "gpio60", "gpio61";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			i2c_11_sleep: i2c_11_sleep {
+				mux {
+					pins = "gpio60", "gpio61";
+					function = "blsp_i2c11";
+				};
+
+				config {
+					pins = "gpio60", "gpio61";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		i2c_12 {
+			i2c_12_active: i2c_12_active {
+				mux {
+					pins = "gpio83", "gpio84";
+					function = "blsp_i2c12";
+				};
+
+				config {
+					pins = "gpio83", "gpio84";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			i2c_12_sleep: i2c_12_sleep {
+				mux {
+					pins = "gpio83", "gpio84";
+					function = "blsp_i2c12";
+				};
+
+				config {
+					pins = "gpio83", "gpio84";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		/* SPI CONFIGURATION */
+
+		spi_1 {
+			spi_1_active: spi_1_active {
+				mux {
+					pins = "gpio0", "gpio1",
+							"gpio2", "gpio3";
+					function = "blsp_spi1";
+				};
+
+				config {
+					pins = "gpio0", "gpio1",
+							"gpio2", "gpio3";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			spi_1_sleep: spi_1_sleep {
+				mux {
+					pins = "gpio0", "gpio1",
+							"gpio2", "gpio3";
+					function = "blsp_spi1";
+				};
+
+				config {
+					pins = "gpio0", "gpio1",
+							"gpio2", "gpio3";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			spi_1b_active: spi_1b_active {
+				mux {
+					pins = "gpio23", "gpio28";
+					function = "blsp1_spi_b";
+				};
+
+				config {
+					pins = "gpio23", "gpio28";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			spi_1b_sleep: spi_1b_sleep {
+				mux {
+					pins = "gpio23", "gpio28";
+					function = "blsp1_spi_b";
+				};
+
+				config {
+					pins = "gpio23", "gpio28";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		spi_2 {
+			spi_2_active: spi_2_active {
+				mux {
+					pins = "gpio31", "gpio34",
+							"gpio32", "gpio33";
+					function = "blsp_spi2";
+				};
+
+				config {
+					pins = "gpio31", "gpio34",
+							"gpio32", "gpio33";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			spi_2_sleep: spi_2_sleep {
+				mux {
+					pins = "gpio31", "gpio34",
+							"gpio32", "gpio33";
+					function = "blsp_spi2";
+				};
+
+				config {
+					pins = "gpio31", "gpio34",
+							"gpio32", "gpio33";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		spi_3 {
+			spi_3_active: spi_3_active {
+				mux {
+					pins = "gpio45", "gpio46",
+							"gpio47", "gpio48";
+					function = "blsp_spi3";
+				};
+
+				config {
+					pins = "gpio45", "gpio46",
+							"gpio47", "gpio48";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			spi_3_sleep: spi_3_sleep {
+				mux {
+					pins = "gpio45", "gpio46",
+							"gpio47", "gpio48";
+					function = "blsp_spi3";
+				};
+
+				config {
+					pins = "gpio45", "gpio46",
+							"gpio47", "gpio48";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		spi_4 {
+			spi_4_active: spi_4_active {
+				mux {
+					pins = "gpio8", "gpio9",
+							"gpio10", "gpio11";
+					function = "blsp_spi4";
+				};
+
+				config {
+					pins = "gpio8", "gpio9",
+							"gpio10", "gpio11";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			spi_4_sleep: spi_4_sleep {
+				mux {
+					pins = "gpio8", "gpio9",
+							"gpio10", "gpio11";
+					function = "blsp_spi4";
+				};
+
+				config {
+					pins = "gpio8", "gpio9",
+							"gpio10", "gpio11";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		spi_5 {
+			spi_5_active: spi_5_active {
+				mux {
+					pins = "gpio85", "gpio86",
+							"gpio87", "gpio88";
+					function = "blsp_spi5";
+				};
+
+				config {
+					pins = "gpio85", "gpio86",
+							"gpio87", "gpio88";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			spi_5_sleep: spi_5_sleep {
+				mux {
+					pins = "gpio85", "gpio86",
+							"gpio87", "gpio88";
+					function = "blsp_spi5";
+				};
+
+				config {
+					pins = "gpio85", "gpio86",
+							"gpio87", "gpio88";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		spi_6 {
+			spi_6_active: spi_6_active {
+				mux {
+					pins = "gpio41", "gpio42",
+							"gpio43", "gpio44";
+					function = "blsp_spi6";
+				};
+
+				config {
+					pins = "gpio41", "gpio42",
+							"gpio43", "gpio44";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			spi_6_sleep: spi_6_sleep {
+				mux {
+					pins = "gpio41", "gpio42",
+							"gpio43", "gpio44";
+					function = "blsp_spi6";
+				};
+
+				config {
+					pins = "gpio41", "gpio42",
+							"gpio43", "gpio44";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		spi_7 {
+			spi_7_active: spi_7_active {
+				mux {
+					pins = "gpio53", "gpio54",
+							"gpio55", "gpio56";
+					function = "blsp_spi7";
+				};
+
+				config {
+					pins = "gpio53", "gpio54",
+							"gpio55", "gpio56";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			spi_7_sleep: spi_7_sleep {
+				mux {
+					pins = "gpio53", "gpio54",
+							"gpio55", "gpio56";
+					function = "blsp_spi7";
+				};
+
+				config {
+					pins = "gpio53", "gpio54",
+							"gpio55", "gpio56";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		spi_8 {
+			spi_8_active: spi_8_active {
+				mux {
+					pins = "gpio4", "gpio5",
+							"gpio6", "gpio7";
+					function = "blsp_spi8";
+				};
+
+				config {
+					pins = "gpio4", "gpio5",
+							"gpio6", "gpio7";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			spi_8_sleep: spi_8_sleep {
+				mux {
+					pins = "gpio4", "gpio5",
+							"gpio6", "gpio7";
+					function = "blsp_spi8";
+				};
+
+				config {
+					pins = "gpio4", "gpio5",
+							"gpio6", "gpio7";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		spi_9 {
+			spi_9_active: spi_9_active {
+				mux {
+					pins = "gpio49", "gpio50",
+							"gpio51", "gpio52";
+					function = "blsp_spi9";
+				};
+
+				config {
+					pins = "gpio49", "gpio50",
+							"gpio51", "gpio52";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			spi_9_sleep: spi_9_sleep {
+				mux {
+					pins = "gpio49", "gpio50",
+							"gpio51", "gpio52";
+					function = "blsp_spi9";
+				};
+
+				config {
+					pins = "gpio49", "gpio50",
+							"gpio51", "gpio52";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		spi_10 {
+			spi_10_active: spi_10_active {
+				mux {
+					pins = "gpio65", "gpio66",
+							"gpio67", "gpio68";
+					function = "blsp_spi10";
+				};
+
+				config {
+					pins = "gpio65", "gpio66",
+							"gpio67", "gpio68";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			spi_10_sleep: spi_10_sleep {
+				mux {
+					pins = "gpio65", "gpio66",
+							"gpio67", "gpio68";
+					function = "blsp_spi10";
+				};
+
+				config {
+					pins = "gpio65", "gpio66",
+							"gpio67", "gpio68";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		spi_11 {
+			spi_11_active: spi_11_active {
+				mux {
+					pins = "gpio58", "gpio59",
+							"gpio60", "gpio61";
+					function = "blsp_spi11";
+				};
+
+				config {
+					pins = "gpio58", "gpio59",
+							"gpio60", "gpio61";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			spi_11_sleep: spi_11_sleep {
+				mux {
+					pins = "gpio58", "gpio59",
+							"gpio60", "gpio61";
+					function = "blsp_spi11";
+				};
+
+				config {
+					pins = "gpio58", "gpio59",
+							"gpio60", "gpio61";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		spi_12 {
+			spi_12_active: spi_12_active {
+				mux {
+					pins = "gpio81", "gpio82",
+							"gpio83", "gpio84";
+					function = "blsp_spi12";
+				};
+
+				config {
+					pins = "gpio81", "gpio82",
+							"gpio83", "gpio84";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			spi_12_sleep: spi_12_sleep {
+				mux {
+					pins = "gpio81", "gpio82",
+							"gpio83", "gpio84";
+					function = "blsp_spi12";
+				};
+
+				config {
+					pins = "gpio81", "gpio82",
+							"gpio83", "gpio84";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* CAMERA CONFIGURATION */
+
+		cci0_active: cci0_active {
+			mux {
+				/* CLK, DATA */
+				pins = "gpio17","gpio18"; // Only 2
+				function = "cci_i2c";
+			};
+
+			config {
+				pins = "gpio17","gpio18";
+				bias-pull-up; /* PULL UP*/
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cci0_suspend: cci0_suspend {
+			mux {
+				/* CLK, DATA */
+				pins = "gpio17","gpio18";
+				function = "cci_i2c";
+			};
+
+			config {
+				pins = "gpio17","gpio18";
+				bias-pull-down; /* PULL DOWN */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cci1_active: cci1_active {
+			mux {
+				/* CLK, DATA */
+				pins = "gpio19","gpio20";
+				function = "cci_i2c";
+			};
+
+			config {
+				pins = "gpio19","gpio20";
+				bias-pull-up; /* PULL UP*/
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cci1_suspend: cci1_suspend {
+			mux {
+				/* CLK, DATA */
+				pins = "gpio19","gpio20";
+				function = "cci_i2c";
+			};
+
+			config {
+				pins = "gpio19","gpio20";
+				bias-pull-down; /* PULL DOWN */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		/* MDSS CONFIGURATION */
+
+		mdss_dp_aux_active: mdss_dp_aux_active {
+			mux {
+				pins = "gpio77", "gpio78";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio77", "gpio78";
+				bias-disable = <0>; /* no pull */
+				drive-strength = <8>;
+			};
+		};
+
+		mdss_dp_aux_suspend: mdss_dp_aux_suspend {
+			mux {
+				pins = "gpio77", "gpio78";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio77", "gpio78";
+				bias-pull-down;
+				drive-strength = <2>;
+			};
+		};
+
+		mdss_dp_usbplug_cc_active: mdss_dp_usbplug_cc_active {
+			mux {
+				pins = "gpio38";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio38";
+				bias-disable;
+				drive-strength = <16>;
+			};
+		};
+
+		mdss_dp_usbplug_cc_suspend: mdss_dp_usbplug_cc_suspend {
+			mux {
+				pins = "gpio38";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio38";
+				bias-pull-down;
+				drive-strength = <2>;
+			};
+		};
+
+		mdss_dp_hpd_active: mdss_dp_hpd_active {
+			mux {
+				pins = "gpio34";
+				function = "edp_hot";
+			};
+
+			config {
+				pins = "gpio34";
+				bias-pull-down;
+				drive-strength = <16>;
+			};
+		};
+
+		mdss_dp_hpd_suspend: mdss_dp_hpd_suspend {
+			mux {
+				pins = "gpio34";
+				function = "edp_hot";
+			};
+
+			config {
+				pins = "gpio34";
+				bias-pull-down;
+				drive-strength = <2>;
+			};
+		};
+
+		mdss_hdmi_5v_active: mdss_hdmi_5v_active {
+			mux {
+				pins = "gpio133";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio133";
+				bias-pull-up;
+				drive-strength = <16>;
+			};
+		};
+
+		mdss_hdmi_5v_suspend: mdss_hdmi_5v_suspend {
+			mux {
+				pins = "gpio133";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio133";
+				bias-pull-down;
+				drive-strength = <2>;
+			};
+		};
+
+		mdss_hdmi_hpd_active: mdss_hdmi_hpd_active {
+			mux {
+				pins = "gpio34";
+				function = "hdmi_hot";
+			};
+
+			config {
+				pins = "gpio34";
+				bias-pull-down;
+				drive-strength = <16>;
+			};
+		};
+
+		mdss_hdmi_hpd_suspend: mdss_hdmi_hpd_suspend {
+			mux {
+				pins = "gpio34";
+				function = "hdmi_hot";
+			};
+
+			config {
+				pins = "gpio34";
+				bias-pull-down;
+				drive-strength = <2>;
+			};
+		};
+
+		mdss_hdmi_ddc_active: mdss_hdmi_ddc_active {
+			mux {
+				pins = "gpio32", "gpio33";
+				function = "hdmi_ddc";
+			};
+
+			config {
+				pins = "gpio32", "gpio33";
+				drive-strength = <2>;
+				bias-pull-up;
+			};
+		};
+
+		mdss_hdmi_ddc_suspend: mdss_hdmi_ddc_suspend {
+			mux {
+				pins = "gpio32", "gpio33";
+				function = "hdmi_ddc";
+			};
+
+			config {
+				pins = "gpio32", "gpio33";
+				drive-strength = <2>;
+				bias-pull-up;
+			};
+		};
+
+		mdss_hdmi_cec_active: mdss_hdmi_cec_active {
+			mux {
+				pins = "gpio31";
+				function = "hdmi_cec";
+			};
+
+			config {
+				pins = "gpio31";
+				drive-strength = <2>;
+				bias-pull-up;
+			};
+		};
+
+		mdss_hdmi_cec_suspend: mdss_hdmi_cec_suspend {
+			mux {
+				pins = "gpio31";
+				function = "hdmi_cec";
+			};
+
+			config {
+				pins = "gpio31";
+				drive-strength = <2>;
+				bias-pull-up;
+			};
+		};
+
+		/* UFS CONFIGURATION */
+
+		ufs_dev_reset_assert: ufs_dev_reset_assert {
+			config {
+				pins = "ufs_reset";
+				bias-pull-down;		/* default: pull down */
+				/*
+				 * UFS_RESET driver strengths are having
+				 * different values/steps compared to typical
+				 * GPIO drive strengths.
+				 *
+				 * Following table clarifies:
+				 *
+				 * HDRV value | UFS_RESET | Typical GPIO
+				 *   (dec)    |   (mA)    |    (mA)
+				 *     0      |   0.8     |    2
+				 *     1      |   1.55    |    4
+				 *     2      |   2.35    |    6
+				 *     3      |   3.1     |    8
+				 *     4      |   3.9     |    10
+				 *     5      |   4.65    |    12
+				 *     6      |   5.4     |    14
+				 *     7      |   6.15    |    16
+				 *
+				 * POR value for UFS_RESET HDRV is 3 which means
+				 * 3.1mA and we want to use that. Hence just
+				 * specify 8mA to "drive-strength" binding and
+				 * that should result into writing 3 to HDRV
+				 * field.
+				 */
+				drive-strength = <8>;	/* default: 3.1 mA */
+				output-low; /* active low reset */
+			};
+		};
+
+		ufs_dev_reset_deassert: ufs_dev_reset_deassert {
+			config {
+				pins = "ufs_reset";
+				bias-pull-down;		/* default: pull down */
+				/*
+				 * default: 3.1 mA
+				 * check comments under ufs_dev_reset_assert
+				 */
+				drive-strength = <8>;
+				output-high; /* active low reset */
+			};
+		};
+
+		/* SHDCI CONFIGURATION */
+
+		sdc2_clk_on: sdc2_clk_on {
+			config {
+				pins = "sdc2_clk";
+				bias-disable;		/* NO pull */
+				drive-strength = <16>;	/* 16 MA */
+			};
+		};
+
+		sdc2_clk_off: sdc2_clk_off {
+			config {
+				pins = "sdc2_clk";
+				bias-disable;		/* NO pull */
+				drive-strength = <2>;	/* 2 MA */
+			};
+		};
+
+		sdc2_cmd_on: sdc2_cmd_on {
+			config {
+				pins = "sdc2_cmd";
+				bias-pull-up;		/* pull up */
+				drive-strength = <10>;	/* 10 MA */
+			};
+		};
+
+		sdc2_cmd_off: sdc2_cmd_off {
+			config {
+				pins = "sdc2_cmd";
+				bias-pull-up;		/* pull up */
+				drive-strength = <2>;	/* 2 MA */
+			};
+		};
+
+		sdc2_data_on: sdc2_data_on {
+			config {
+				pins = "sdc2_data";
+				bias-pull-up;		/* pull up */
+				drive-strength = <10>;	/* 10 MA */
+			};
+		};
+
+		sdc2_data_off: sdc2_data_off {
+			config {
+				pins = "sdc2_data";
+				bias-pull-up;		/* pull up */
+				drive-strength = <2>;	/* 2 MA */
+			};
+		};
+
+		sdc2_cd_on: sdc2_cd_on {
+			mux {
+				pins = "gpio95";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio95";
+				bias-pull-up;           /* pull up */
+				drive-strength = <2>;   /* 2 MA */
+			};
+		};
+
+		sdc2_cd_off: sdc2_cd_off {
+			mux {
+				pins = "gpio95";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio95";
+				bias-pull-up;           /* pull up */
+				drive-strength = <2>;   /* 2 MA */
+			};
+
+		};
+
+		/* CORESIGHT */
+
+		trigout_a: trigout_a {
+			mux {
+				pins = "gpio58";
+				function = "qdss_cti1_a";
+			};
+
+			config {
+				pins = "gpio58";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
+		/* TSPP CONFIGURATION */
+
+		tsif0_signals_active: tsif0_signals_active {
+			tsif1_clk {
+				pins = "gpio89"; /* TSIF0 CLK */
+				function = "tsif1_clk";
+			};
+			tsif1_en {
+				pins = "gpio90"; /* TSIF0 Enable */
+				function = "tsif1_en";
+			};
+			tsif1_data {
+				pins = "gpio91"; /* TSIF0 DATA */
+				function = "tsif1_data";
+			};
+			signals_cfg {
+				pins = "gpio89", "gpio90", "gpio91";
+				drive_strength = <2>;	/* 2 mA */
+				bias-pull-down;		/* pull down */
+			};
+		};
+
+		tsif0_sync_active: tsif0_sync_active {
+			tsif1_sync {
+				pins = "gpio9";	/* TSIF0 SYNC */
+				function = "tsif1_sync";
+				drive_strength = <2>;	/* 2 mA */
+				bias-pull-down;		/* pull down */
+			};
+		};
+
+		tsif1_signals_active: tsif1_signals_active {
+			tsif2_clk {
+				pins = "gpio93"; /* TSIF1 CLK */
+				function = "tsif2_clk";
+			};
+			tsif2_en {
+				pins = "gpio94"; /* TSIF1 Enable */
+				function = "tsif2_en";
+			};
+			tsif2_data {
+				pins = "gpio95"; /* TSIF1 DATA */
+				function = "tsif2_data";
+			};
+			signals_cfg {
+				pins = "gpio93", "gpio94", "gpio95";
+				drive_strength = <2>;	/* 2 mA */
+				bias-pull-down;		/* pull down */
+			};
+		};
+
+		tsif1_sync_active: tsif1_sync_active {
+			tsif2_sync {
+				pins = "gpio96";	/* TSIF1 SYNC */
+				function = "tsif2_sync";
+				drive_strength = <2>;	/* 2 mA */
+				bias-pull-down;		/* pull down */
+			};
+		};
+
+		/* DIGITAL AUDIO CONFIGURATION */
+
+		pri_aux_pcm_clk {
+			pri_aux_pcm_clk_sleep: pri_aux_pcm_clk_sleep {
+				mux {
+					pins = "gpio65";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio65";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			pri_aux_pcm_clk_active: pri_aux_pcm_clk_active {
+				mux {
+					pins = "gpio65";
+					function = "pri_mi2s";
+				};
+
+				config {
+					pins = "gpio65";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+					output-high;
+				};
+			};
+		};
+
+		pri_aux_pcm_sync {
+			pri_aux_pcm_sync_sleep: pri_aux_pcm_sync_sleep {
+				mux {
+					pins = "gpio66";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio66";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			pri_aux_pcm_sync_active: pri_aux_pcm_sync_active {
+				mux {
+					pins = "gpio66";
+					function = "pri_mi2s_ws";
+				};
+
+				config {
+					pins = "gpio66";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+					output-high;
+				};
+			};
+		};
+
+		pri_aux_pcm_din {
+			pri_aux_pcm_din_sleep: pri_aux_pcm_din_sleep {
+				mux {
+					pins = "gpio67";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio67";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			pri_aux_pcm_din_active: pri_aux_pcm_din_active {
+				mux {
+					pins = "gpio67";
+					function = "pri_mi2s";
+				};
+
+				config {
+					pins = "gpio67";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		pri_aux_pcm_dout {
+			pri_aux_pcm_dout_sleep: pri_aux_pcm_dout_sleep {
+				mux {
+					pins = "gpio68";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio68";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			pri_aux_pcm_dout_active: pri_aux_pcm_dout_active {
+				mux {
+					pins = "gpio68";
+					function = "pri_mi2s";
+				};
+
+				config {
+					pins = "gpio68";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		sec_aux_pcm {
+			sec_aux_pcm_sleep: sec_aux_pcm_sleep {
+				mux {
+					pins = "gpio80", "gpio81";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio80", "gpio81";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			sec_aux_pcm_active: sec_aux_pcm_active {
+				mux {
+					pins = "gpio80", "gpio81";
+					function = "sec_mi2s";
+				};
+
+				config {
+					pins = "gpio80", "gpio81";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		sec_aux_pcm_din {
+			sec_aux_pcm_din_sleep: sec_aux_pcm_din_sleep {
+				mux {
+					pins = "gpio82";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio82";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			sec_aux_pcm_din_active: sec_aux_pcm_din_active {
+				mux {
+					pins = "gpio82";
+					function = "sec_mi2s";
+				};
+
+				config {
+					pins = "gpio82";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		sec_aux_pcm_dout {
+			sec_aux_pcm_dout_sleep: sec_aux_pcm_dout_sleep {
+				mux {
+					pins = "gpio83";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio83";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			sec_aux_pcm_dout_active: sec_aux_pcm_dout_active {
+				mux {
+					pins = "gpio83";
+					function = "sec_mi2s";
+				};
+
+				config {
+					pins = "gpio83";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		tert_aux_pcm {
+			tert_aux_pcm_sleep: tert_aux_pcm_sleep {
+				mux {
+					pins = "gpio75", "gpio76";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio75", "gpio76";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			tert_aux_pcm_active: tert_aux_pcm_active {
+				mux {
+					pins = "gpio75", "gpio76";
+					function = "ter_mi2s";
+				};
+
+				config {
+					pins = "gpio75", "gpio76";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+					output-high;
+				};
+			};
+		};
+
+		tert_aux_pcm_din {
+			tert_aux_pcm_din_sleep: tert_aux_pcm_din_sleep {
+				mux {
+					pins = "gpio77";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio77";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			tert_aux_pcm_din_active: tert_aux_pcm_din_active {
+				mux {
+					pins = "gpio77";
+					function = "ter_mi2s";
+				};
+
+				config {
+					pins = "gpio77";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		tert_aux_pcm_dout {
+			tert_aux_pcm_dout_sleep: tert_aux_pcm_dout_sleep {
+				mux {
+					pins = "gpio78";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio78";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			tert_aux_pcm_dout_active: tert_aux_pcm_dout_active {
+				mux {
+					pins = "gpio78";
+					function = "ter_mi2s";
+				};
+
+				config {
+					pins = "gpio78";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		quat_aux_pcm {
+			quat_aux_pcm_sleep: quat_aux_pcm_sleep {
+				mux {
+					pins = "gpio58", "gpio59";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio58", "gpio59";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			quat_aux_pcm_active: quat_aux_pcm_active {
+				mux {
+					pins = "gpio58", "gpio59";
+					function = "qua_mi2s";
+				};
+
+				config {
+					pins = "gpio58", "gpio59";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+					output-high;
+				};
+			};
+		};
+
+		quat_aux_pcm_din {
+			quat_aux_pcm_din_sleep: quat_aux_pcm_din_sleep {
+				mux {
+					pins = "gpio60";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio60";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			quat_aux_pcm_din_active: quat_aux_pcm_din_active {
+				mux {
+					pins = "gpio60";
+					function = "qua_mi2s";
+				};
+
+				config {
+					pins = "gpio60";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		quat_aux_pcm_dout {
+			quat_aux_pcm_dout_sleep: quat_aux_pcm_dout_sleep {
+				mux {
+					pins = "gpio61";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio61";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			quat_aux_pcm_dout_active: quat_aux_pcm_dout_active {
+				mux {
+					pins = "gpio61";
+					function = "qua_mi2s";
+				};
+
+				config {
+					pins = "gpio61";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		pri_mi2s_mclk {
+			pri_mi2s_mclk_sleep: pri_mi2s_mclk_sleep {
+				mux {
+					pins = "gpio64";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio64";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			pri_mi2s_mclk_active: pri_mi2s_mclk_active {
+				mux {
+					pins = "gpio64";
+					function = "pri_mi2s";
+				};
+
+				config {
+					pins = "gpio64";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+					output-high;
+				};
+			};
+		};
+
+		pri_mi2s_sck {
+			pri_mi2s_sck_sleep: pri_mi2s_sck_sleep {
+				mux {
+					pins = "gpio65";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio65";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			pri_mi2s_sck_active: pri_mi2s_sck_active {
+				mux {
+					pins = "gpio65";
+					function = "pri_mi2s";
+				};
+
+				config {
+					pins = "gpio65";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+					output-high;
+				};
+			};
+		};
+
+		pri_mi2s_ws {
+			pri_mi2s_ws_sleep: pri_mi2s_ws_sleep {
+				mux {
+					pins = "gpio66";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio66";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			pri_mi2s_ws_active: pri_mi2s_ws_active {
+				mux {
+					pins = "gpio66";
+					function = "pri_mi2s_ws";
+				};
+
+				config {
+					pins = "gpio66";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+					output-high;
+				};
+			};
+		};
+
+		pri_mi2s_sd0 {
+			pri_mi2s_sd0_sleep: pri_mi2s_sd0_sleep {
+				mux {
+					pins = "gpio67";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio67";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			pri_mi2s_sd0_active: pri_mi2s_sd0_active {
+				mux {
+					pins = "gpio67";
+					function = "pri_mi2s";
+				};
+
+				config {
+					pins = "gpio67";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		pri_mi2s_sd1 {
+			pri_mi2s_sd1_sleep: pri_mi2s_sd1_sleep {
+				mux {
+					pins = "gpio68";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio68";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			pri_mi2s_sd1_active: pri_mi2s_sd1_active {
+				mux {
+					pins = "gpio68";
+					function = "pri_mi2s";
+				};
+
+				config {
+					pins = "gpio68";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		sec_mi2s_mclk {
+			sec_mi2s_mclk_sleep: sec_mi2s_mclk_sleep {
+				mux {
+					pins = "gpio79";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio79";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			sec_mi2s_mclk_active: sec_mi2s_mclk_active {
+				mux {
+					pins = "gpio79";
+					function = "sec_mi2s";
+				};
+
+				config {
+					pins = "gpio79";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		sec_mi2s {
+			sec_mi2s_sleep: sec_mi2s_sleep {
+				mux {
+					pins = "gpio80", "gpio81";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio80", "gpio81";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			sec_mi2s_active: sec_mi2s_active {
+				mux {
+					pins = "gpio80", "gpio81";
+					function = "sec_mi2s";
+				};
+
+				config {
+					pins = "gpio80", "gpio81";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		sec_mi2s_sd0 {
+			sec_mi2s_sd0_sleep: sec_mi2s_sd0_sleep {
+				mux {
+					pins = "gpio82";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio82";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			sec_mi2s_sd0_active: sec_mi2s_sd0_active {
+				mux {
+					pins = "gpio82";
+					function = "sec_mi2s";
+				};
+
+				config {
+					pins = "gpio82";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		sec_mi2s_sd1 {
+			sec_mi2s_sd1_sleep: sec_mi2s_sd1_sleep {
+				mux {
+					pins = "gpio83";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio83";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			sec_mi2s_sd1_active: sec_mi2s_sd1_active {
+				mux {
+					pins = "gpio83";
+					function = "sec_mi2s";
+				};
+
+				config {
+					pins = "gpio83";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		tert_mi2s_mclk {
+			tert_mi2s_mclk_sleep: tert_mi2s_mclk_sleep {
+				mux {
+					pins = "gpio74";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio74";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			tert_mi2s_mclk_active: tert_mi2s_mclk_active {
+				mux {
+					pins = "gpio74";
+					function = "ter_mi2s";
+				};
+
+				config {
+					pins = "gpio74";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		tert_mi2s {
+			tert_mi2s_sleep: tert_mi2s_sleep {
+				mux {
+					pins = "gpio75", "gpio76";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio75", "gpio76";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			tert_mi2s_active: tert_mi2s_active {
+				mux {
+					pins = "gpio75", "gpio76";
+					function = "ter_mi2s";
+				};
+
+				config {
+					pins = "gpio75", "gpio76";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		tert_mi2s_sd0 {
+			tert_mi2s_sd0_sleep: tert_mi2s_sd0_sleep {
+				mux {
+					pins = "gpio77";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio77";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			tert_mi2s_sd0_active: tert_mi2s_sd0_active {
+				mux {
+					pins = "gpio77";
+					function = "ter_mi2s";
+				};
+
+				config {
+					pins = "gpio77";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		tert_mi2s_sd1 {
+			tert_mi2s_sd1_sleep: tert_mi2s_sd1_sleep {
+				mux {
+					pins = "gpio78";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio78";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			tert_mi2s_sd1_active: tert_mi2s_sd1_active {
+				mux {
+					pins = "gpio78";
+					function = "ter_mi2s";
+				};
+
+				config {
+					pins = "gpio78";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		quat_mi2s_mclk {
+			quat_mi2s_mclk_sleep: quat_mi2s_mclk_sleep {
+				mux {
+					pins = "gpio57";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio57";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			quat_mi2s_mclk_active: quat_mi2s_mclk_active {
+				mux {
+					pins = "gpio57";
+					function = "qua_mi2s";
+				};
+
+				config {
+					pins = "gpio57";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		quat_mi2s {
+			quat_mi2s_sleep: quat_mi2s_sleep {
+				mux {
+					pins = "gpio58", "gpio59";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio58", "gpio59";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			quat_mi2s_active: quat_mi2s_active {
+				mux {
+					pins = "gpio58", "gpio59";
+					function = "qua_mi2s";
+				};
+
+				config {
+					pins = "gpio58", "gpio59";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+					output-high;
+				};
+			};
+		};
+
+		quat_mi2s_sd0 {
+			quat_mi2s_sd0_sleep: quat_mi2s_sd0_sleep {
+				mux {
+					pins = "gpio60";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio60";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			quat_mi2s_sd0_active: quat_mi2s_sd0_active {
+				mux {
+					pins = "gpio60";
+					function = "qua_mi2s";
+				};
+
+				config {
+					pins = "gpio60";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		quat_mi2s_sd1 {
+			quat_mi2s_sd1_sleep: quat_mi2s_sd1_sleep {
+				mux {
+					pins = "gpio61";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio61";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			quat_mi2s_sd1_active: quat_mi2s_sd1_active {
+				mux {
+					pins = "gpio61";
+					function = "qua_mi2s";
+				};
+
+				config {
+					pins = "gpio61";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		quat_mi2s_sd2 {
+			quat_mi2s_sd2_sleep: quat_mi2s_sd2_sleep {
+				mux {
+					pins = "gpio62";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio62";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			quat_mi2s_sd2_active: quat_mi2s_sd2_active {
+				mux {
+					pins = "gpio62";
+					function = "qua_mi2s";
+				};
+
+				config {
+					pins = "gpio62";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		quat_mi2s_sd3 {
+			quat_mi2s_sd3_sleep: quat_mi2s_sd3_sleep {
+				mux {
+					pins = "gpio63";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio63";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			quat_mi2s_sd3_active: quat_mi2s_sd3_active {
+				mux {
+					pins = "gpio63";
+					function = "qua_mi2s";
+				};
+
+				config {
+					pins = "gpio63";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		spkr_i2s_clk_pin {
+			spkr_i2s_clk_sleep: spkr_i2s_clk_sleep {
+				mux {
+					pins = "gpio69";
+					function = "spkr_i2s";
+				};
+
+				config {
+					pins = "gpio69";
+					drive-strength = <2>; /* 2 mA */
+					bias-pull-down;       /* PULL DOWN */
+				};
+			};
+
+			spkr_i2s_clk_active: spkr_i2s_clk_active {
+				mux {
+					pins = "gpio69";
+					function = "spkr_i2s";
+				};
+
+				config {
+					pins = "gpio69";
+					drive-strength = <8>; /* 8 mA */
+					bias-disable;         /* NO PULL */
+				};
+			};
+		};
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-pm.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-pm.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-pm.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-pm.dtsi	2019-01-22 16:16:21.195225506 +0100
@@ -0,0 +1,807 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+&soc {
+	qcom,spm@178120000 {
+		compatible = "qcom,spm-v2";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0x17812000 0x1000>;
+		qcom,name = "gold-l2"; /* Gold L2 SAW */
+		qcom,saw2-ver-reg = <0xfd0>;
+		qcom,cpu-vctl-list = <&CPU4 &CPU5 &CPU6 &CPU7>;
+		qcom,vctl-timeout-us = <500>;
+		qcom,vctl-port = <0x0>;
+		qcom,phase-port = <0x1>;
+		qcom,saw2-avs-ctl = <0x1010031>;
+		qcom,saw2-avs-limit = <0x4580458>;
+		qcom,pfm-port = <0x2>;
+	};
+
+	qcom,spm@179120000 {
+		compatible = "qcom,spm-v2";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0x17912000 0x1000>;
+		qcom,name = "silver-l2"; /* Silver L2 SAW */
+		qcom,saw2-ver-reg = <0xfd0>;
+		qcom,cpu-vctl-list = <&CPU0 &CPU1 &CPU2 &CPU3>;
+		qcom,vctl-timeout-us = <500>;
+		qcom,vctl-port = <0x0>;
+		qcom,phase-port = <0x1>;
+		qcom,saw2-avs-ctl = <0x1010031>;
+		qcom,saw2-avs-limit = <0x4580458>;
+		qcom,pfm-port = <0x2>;
+	};
+
+	qcom,lpm-levels {
+		compatible = "qcom,lpm-levels";
+		qcom,use-psci;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		qcom,pm-cluster@0 {
+			reg = <0>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			label = "system";
+			qcom,spm-device-names = "cci";
+			qcom,psci-mode-shift = <8>;
+			qcom,psci-mode-mask = <0xf>;
+
+			qcom,pm-cluster-level@0{
+				reg = <0>;
+				label = "system-wfi";
+				qcom,psci-mode = <0x0>;
+				qcom,latency-us = <100>;
+				qcom,ss-power = <725>;
+				qcom,energy-overhead = <85000>;
+				qcom,time-overhead = <120>;
+			};
+
+			qcom,pm-cluster-level@1{ /* E3 */
+				reg = <1>;
+				label = "system-pc";
+				qcom,psci-mode = <0x3>;
+				qcom,latency-us = <5534>;
+				qcom,ss-power = <399>;
+				qcom,energy-overhead = <3340281>;
+				qcom,time-overhead = <16744>;
+				qcom,min-child-idx = <3>;
+				qcom,is-reset;
+				qcom,notify-rpm;
+			};
+
+			qcom,pm-cluster@0{
+				reg = <0>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				label = "pwr";
+				qcom,spm-device-names = "l2";
+				qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3>;
+				qcom,psci-mode-shift = <4>;
+				qcom,psci-mode-mask = <0xf>;
+
+				qcom,pm-cluster-level@0{ /* D1 */
+					reg = <0>;
+					label = "pwr-l2-wfi";
+					qcom,psci-mode = <0x1>;
+					qcom,latency-us = <51>;
+					qcom,ss-power = <452>;
+					qcom,energy-overhead = <69355>;
+					qcom,time-overhead = <99>;
+				};
+				qcom,pm-cluster-level@1{ /* D2D */
+					reg = <1>;
+					label = "pwr-l2-dynret";
+					qcom,psci-mode = <0x2>;
+					qcom,latency-us = <659>;
+					qcom,ss-power = <434>;
+					qcom,energy-overhead = <465725>;
+					qcom,time-overhead = <976>;
+					qcom,min-child-idx = <1>;
+				};
+
+				qcom,pm-cluster-level@2{ /* D2E */
+					reg = <2>;
+					label = "pwr-l2-ret";
+					qcom,psci-mode = <0x3>;
+					qcom,latency-us = <743>;
+					qcom,ss-power = <425>;
+					qcom,energy-overhead = <629936>;
+					qcom,time-overhead = <1312>;
+					qcom,min-child-idx = <2>;
+				};
+
+				qcom,pm-cluster-level@3{ /* D4 */
+					reg = <3>;
+					label = "pwr-l2-pc";
+					qcom,psci-mode = <0x4>;
+					qcom,latency-us = <4562>;
+					qcom,ss-power = <408>;
+					qcom,energy-overhead = <2421840>;
+					qcom,time-overhead = <5376>;
+					qcom,min-child-idx = <2>;
+					qcom,is-reset;
+				};
+
+				qcom,pm-cpu {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					qcom,psci-mode-shift = <0>;
+					qcom,psci-mode-mask = <0xf>;
+
+					qcom,pm-cpu-level@0 { /* C1 */
+						reg = <0>;
+						qcom,spm-cpu-mode = "wfi";
+						qcom,psci-cpu-mode = <0x1>;
+						qcom,latency-us = <43>;
+						qcom,ss-power = <454>;
+						qcom,energy-overhead = <38639>;
+						qcom,time-overhead = <83>;
+					};
+
+					qcom,pm-cpu-level@1 { /* C2D */
+						reg = <1>;
+						qcom,psci-cpu-mode = <2>;
+						qcom,spm-cpu-mode = "ret";
+						qcom,latency-us = <86>;
+						qcom,ss-power = <449>;
+						qcom,energy-overhead = <78456>;
+						qcom,time-overhead = <167>;
+					};
+
+					qcom,pm-cpu-level@2 {  /* C3 */
+						reg = <2>;
+						qcom,spm-cpu-mode = "pc";
+						qcom,psci-cpu-mode = <0x3>;
+						qcom,latency-us = <612>;
+						qcom,ss-power = <436>;
+						qcom,energy-overhead = <418225>;
+						qcom,time-overhead = <885>;
+						qcom,is-reset;
+					};
+				};
+			};
+
+			qcom,pm-cluster@1{
+				reg = <1>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				label = "perf";
+				qcom,spm-device-names = "l2";
+				qcom,cpu = <&CPU4 &CPU5 &CPU6 &CPU7>;
+				qcom,psci-mode-shift = <4>;
+				qcom,psci-mode-mask = <0xf>;
+
+				qcom,pm-cluster-level@0{  /* D1 */
+					reg = <0>;
+					label = "perf-l2-wfi";
+					qcom,psci-mode = <0x1>;
+					qcom,latency-us = <51>;
+					qcom,ss-power = <512>;
+					qcom,energy-overhead = <99986>;
+					qcom,time-overhead = <99>;
+				};
+
+				qcom,pm-cluster-level@1{ /* D2D */
+					reg = <1>;
+					label = "perf-l2-dynret";
+					qcom,psci-mode = <2>;
+					qcom,latency-us = <529>;
+					qcom,ss-power = <468>;
+					qcom,energy-overhead = <496783>;
+					qcom,time-overhead = <871>;
+					qcom,min-child-idx = <1>;
+				};
+
+				qcom,pm-cluster-level@2{ /* D2E */
+					reg = <2>;
+					label = "perf-l2-ret";
+					qcom,psci-mode = <3>;
+					qcom,latency-us = <605>;
+					qcom,ss-power = <456>;
+					qcom,energy-overhead = <597126>;
+					qcom,time-overhead = <1025>;
+					qcom,min-child-idx = <2>;
+				};
+
+				qcom,pm-cluster-level@3{ /* D4 */
+					reg = <3>;
+					label = "perf-l2-pc";
+					qcom,psci-mode = <0x4>;
+					qcom,latency-us = <2027>;
+					qcom,ss-power = <420>;
+					qcom,energy-overhead = <1624216>;
+					qcom,time-overhead = <2751>;
+					qcom,min-child-idx = <2>;
+					qcom,is-reset;
+				};
+
+				qcom,pm-cpu {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					qcom,psci-mode-shift = <0>;
+					qcom,psci-mode-mask = <0xf>;
+
+					qcom,pm-cpu-level@0 { /* C1 */
+						reg = <0>;
+						qcom,spm-cpu-mode = "wfi";
+						qcom,psci-cpu-mode = <0x1>;
+						qcom,latency-us = <43>;
+						qcom,ss-power = <515>;
+						qcom,energy-overhead = <48502>;
+						qcom,time-overhead = <86>;
+					};
+
+					qcom,pm-cpu-level@1 { /* C2D */
+						reg = <1>;
+						qcom,psci-cpu-mode = <2>;
+						qcom,spm-cpu-mode = "ret";
+						qcom,latency-us = <82>;
+						qcom,ss-power = <505>;
+						qcom,energy-overhead = <98530>;
+						qcom,time-overhead = <161>;
+					};
+
+					qcom,pm-cpu-level@2 { /* C3 */
+						reg = <2>;
+						qcom,spm-cpu-mode = "pc";
+						qcom,psci-cpu-mode = <0x3>;
+						qcom,latency-us = <525>;
+						qcom,ss-power = <476>;
+						qcom,energy-overhead = <485037>;
+						qcom,time-overhead = <861>;
+						qcom,is-reset;
+					};
+				};
+			};
+		};
+	};
+
+	qcom,rpm-stats@200000 {
+		compatible = "qcom,rpm-stats";
+		reg = <0x200000 0x1000>,
+			<0x290014 0x4>,
+			<0x29001c 0x4>;
+		reg-names = "phys_addr_base",
+			"offset_addr",
+			"heap_phys_addrbase";
+		qcom,sleep-stats-version = <2>;
+	};
+
+	qcom,rpm-rail-stats@200000 {
+		compatible = "qcom,rpm-rail-stats";
+		reg = <0x200000 0x100>,
+			<0x29000c 0x4>;
+		reg-names = "phys_addr_base",
+			    "offset_addr";
+	};
+
+	qcom,rpm-log@200000 {
+		compatible = "qcom,rpm-log";
+		reg = <0x200000 0x4000>,
+			<0x290018 0x4>;
+		qcom,rpm-addr-phys = <0x200000>;
+		qcom,offset-version = <4>;
+		qcom,offset-page-buffer-addr = <36>;
+		qcom,offset-log-len = <40>;
+		qcom,offset-log-len-mask = <44>;
+		qcom,offset-page-indices = <56>;
+	};
+
+	qcom,rpm-master-stats@778150 {
+		compatible = "qcom,rpm-master-stats";
+		reg = <0x778150 0x5000>;
+		qcom,masters = "APSS", "MPSS", "ADSP", "SLPI", "TZ", "SPSS";
+		qcom,master-stats-version = <2>;
+		qcom,master-offset = <4096>;
+	};
+
+	rpm_msg_ram: memory@0x200000 {
+		compatible = "qcom,rpm-msg-ram";
+		reg = <0x200000 0x1000>,
+			<0x290000 0x1000>;
+	};
+
+	rpm_code_ram: rpm-memory@0x778000 {
+		compatible = "qcom,rpm-code-ram";
+		reg = <0x778000 0x5000>;
+	};
+
+	qcom,system-stats {
+		compatible = "qcom,system-stats";
+		qcom,rpm-msg-ram = <&rpm_msg_ram>;
+		qcom,rpm-code-ram = <&rpm_code_ram>;
+		qcom,masters = "APSS", "MPSS", "ADSP", "SLPI", "TZ", "SPSS";
+	};
+
+	qcom,mpm@7781b8 {
+		compatible = "qcom,mpm-v2";
+		reg = <0x7781b8 0x1000>, /* MSM_RPM_MPM_BASE 4K */
+		    <0x17911008 0x4>;   /* MSM_APCS_GCC_BASE 4K */
+		reg-names = "vmpm", "ipc";
+		interrupts = <GIC_SPI 171 IRQ_TYPE_EDGE_RISING>;
+		clocks = <&clock_gcc clk_cxo_lpm_clk>;
+		clock-names = "xo";
+		qcom,num-mpm-irqs = <96>;
+
+		qcom,ipc-bit-offset = <1>;
+
+		qcom,gic-parent = <&intc>;
+		qcom,gic-map =
+			<0x1f 212>, /* usb30_power_event_irq	*/
+			<0x2 216>, /* tsens1_upper_lower_int	*/
+			<0x34 275>, /* qmp_usb3_lfps_rxterm_irq_cx	*/
+			<0x57 358>, /*  spmi_periph_irq[0]	*/
+			<0x4f 379>, /* usb2phy_intr: qusb2phy_dmse_hv	*/
+			<0x51 379>, /* usb2phy_intr: qusb2phy_dpse_hv	*/
+			<0x50 384>, /* sp_rmb_sp2soc_irq	*/
+			<0xff 16>, /* APC0_qgicQTmrHypPhysIrptReq	*/
+			<0xff 17>, /* APC3_qgicQTmrSecPhysIrptReq	*/
+			<0xff 18>, /* APC0_qgicQTmrNonSecPhysIrptReq	*/
+			<0xff 19>, /* APC3_qgicQTmrVirtIrptReq	*/
+			<0xff 20>, /* APC0_dbgCommRxFull	*/
+			<0xff 21>, /* APC3_dbgCommTxEmpty	*/
+			<0xff 22>, /* APC0_qgicPerfMonIrptReq	*/
+			<0xff 23>, /* corespm_vote_int[7]	*/
+			<0xff 24>, /* APC0_qgicExtFaultIrptReq	*/
+			<0xff 28>, /* qgicWakeupSync	*/
+			<0xff 29>, /* APCC_cti_SPI_intx	*/
+			<0xff 30>, /* APCC_cti_SPI_inty	*/
+			<0xff 32>, /* l2spm_vote_int[0]	*/
+			<0xff 33>, /* l2spm_vote_int[1]	*/
+			<0xff 34>, /* APCC_qgicL2ErrorIrptReq	*/
+			<0xff 35>, /* WDT_barkInt	*/
+			<0xff 36>, /* WDT_biteExpired	*/
+			<0xff 39>, /* QTMR_qgicFrm0VirtIrq	*/
+			<0xff 40>, /* QTMR_qgicFrm0PhyIrq	*/
+			<0xff 41>, /* QTMR_qgicFrm1PhyIrq	*/
+			<0xff 42>, /* QTMR_qgicFrm2PhyIrq	*/
+			<0xff 43>, /* QTMR_qgicFrm3PhyIrq	*/
+			<0xff 44>, /* QTMR_qgicFrm4PhyIrq	*/
+			<0xff 45>, /* QTMR_qgicFrm5PhyIrq	*/
+			<0xff 46>, /* QTMR_qgicFrm6PhyIrq	*/
+			<0xff 47>, /* rbif_Irq[0]	*/
+			<0xff 48>, /* rbif_Irq[1]	*/
+			<0xff 52>, /* cci_spm_vote_summary_int	*/
+			<0xff 54>, /* ~nERRORIRQ	*/
+			<0xff 55>, /* nEVNTCNTOVERFLOW_cci	*/
+			<0xff 56>, /* QTMR_qgicFrm0VirtIrq	*/
+			<0xff 57>, /* QTMR_qgicFrm0PhyIrq	*/
+			<0xff 58>, /* QTMR_qgicFrm1PhyIrq	*/
+			<0xff 59>, /* QTMR_qgicFrm2PhyIrq	*/
+			<0xff 60>, /* QTMR_qgicFrm3PhyIrq	*/
+			<0xff 61>, /* QTMR_qgicFrm4PhyIrq	*/
+			<0xff 62>, /* QTMR_qgicFrm5PhyIrq	*/
+			<0xff 63>, /* QTMR_qgicFrm6PhyIrq	*/
+			<0xff 64>, /* wakeup_counter_irq_OR	*/
+			<0xff 65>, /* apc0_vs_alarm	*/
+			<0xff 66>, /* apc1_vs_alarm	*/
+			<0xff 67>, /* o_pwr_osm_irq	*/
+			<0xff 68>, /* o_perf_osm_irq	*/
+			<0xff 69>, /* o_pwr_dcvsh_interrupt	*/
+			<0xff 70>, /* o_perf_dcvsh_interrupt	*/
+			<0xff 73>, /* L2_EXTERRIRQ_C0	*/
+			<0xff 74>, /* L2_EXTERRIRQ_C1	*/
+			<0xff 75>, /* L2_INTERRIRQ_C0	*/
+			<0xff 76>, /* L2_INTERRIRQ_C1	*/
+			<0xff 77>, /* L2SPM_svicInt[0]	*/
+			<0xff 78>, /* L2SPM_svicInt[1]	*/
+			<0xff 79>, /* L2SPM_svicIntSwDone[0]	*/
+			<0xff 80>, /* L2SPM_svicIntSwDone[1]	*/
+			<0xff 81>, /* l2_avs_err[0]	*/
+			<0xff 82>, /* l2_avs_err[1]	*/
+			<0xff 83>, /* l2_avs_ack[0]	*/
+			<0xff 84>, /* l2_avs_ack[1]	*/
+			<0xff 96>, /* uart_dm_intr	*/
+			<0xff 97>, /* uart_dm_intr	*/
+			<0xff 98>, /* o_qm_interrupt	*/
+			<0xff 100>, /* jpeg_vbif_irpt	*/
+			<0xff 101>, /* processor_1_user_int	*/
+			<0xff 102>, /* processor_1_kernel_int	*/
+			<0xff 106>, /* dir_conn_irq_lpa_dsp_2	*/
+			<0xff 107>, /* dir_conn_irq_lpa_dsp_1	*/
+			<0xff 109>, /* camss_vbif_0_irq	*/
+			<0xff 110>, /* csiphy_irq	*/
+			<0xff 111>, /* csiphy_irq	*/
+			<0xff 112>, /* csiphy_irq	*/
+			<0xff 115>, /* mdp_irq	*/
+			<0xff 116>, /* vbif_irpt	*/
+			<0xff 117>, /* dir_conn_irq_lpa_dsp_0	*/
+			<0xff 119>, /* lcc_audio_wrapper_q6	*/
+			<0xff 122>, /* PIMEM TPDM BC interrupt	*/
+			<0xff 123>, /* PIMEM TPDM TC interrupt	*/
+			<0xff 124>, /* dir_conn_irq_sensors_1	*/
+			<0xff 125>, /* dir_conn_irq_sensors_0	*/
+			<0xff 126>, /* qup_irq	*/
+			<0xff 127>, /* qup_irq	*/
+			<0xff 128>, /* qup_irq	*/
+			<0xff 129>, /* qup_irq	*/
+			<0xff 130>, /* qup_irq	*/
+			<0xff 131>, /* qup_irq	*/
+			<0xff 132>, /* qup_irq	*/
+			<0xff 133>, /* qup_irq	*/
+			<0xff 134>, /* qup_irq	*/
+			<0xff 135>, /* qup_irq	*/
+			<0xff 136>, /* qup_irq	*/
+			<0xff 137>, /* qup_irq	*/
+			<0xff 138>, /* qup_irq	*/
+			<0xff 139>, /* uart_dm_intr	*/
+			<0xff 140>, /* uart_dm_intr	*/
+			<0xff 141>, /* uart_dm_intr	*/
+			<0xff 145>, /* uart_dm_intr	*/
+			<0xff 146>, /* uart_dm_intr	*/
+			<0xff 147>, /* uart_dm_intr	*/
+			<0xff 148>, /* osmmu_Cirpt[4]	*/
+			<0xff 149>, /* osmmu_Cirpt[5]	*/
+			<0xff 151>, /* tsif_irq[0]	*/
+			<0xff 152>, /* tsif_irq[1]	*/
+			<0xff 153>, /* tspp_irq	*/
+			<0xff 154>, /* bam_irq	*/
+			<0xff 155>, /* dir_conn_irq_lpa_dsp_5	*/
+			<0xff 156>, /* dir_conn_irq_lpa_dsp_4	*/
+			<0xff 157>, /* sdcc_irq	*/
+			<0xff 158>, /* sdcc_irq	*/
+			<0xff 159>, /* lpass_qos_apps_interrupt	*/
+			<0xff 160>, /* smmu_PMIrpt	*/
+			<0xff 161>, /* sdcc_irq	*/
+			<0xff 162>, /* sdcc_irq	*/
+			<0xff 163>, /* usb30_ctrl_irq[0]	*/
+			<0xff 164>, /* usb30_bam_irq	*/
+			<0xff 165>, /* usb30_hs_phy_irq	*/
+			<0xff 166>, /* o_lm_int_2qgic	*/
+			<0xff 167>, /* pcie20_inta	*/
+			<0xff 168>, /* pcie20_intb	*/
+			<0xff 169>, /* smmu_Cirpt[12]	*/
+			<0xff 170>, /* pcie20_intc	*/
+			<0xff 171>, /* pcie20_intd	*/
+			<0xff 172>, /* dcvs_int(8)	*/
+			<0xff 173>, /* dcvs_int(9)	*/
+			<0xff 184>, /* dir_conn_irq_lpa_dsp_3	*/
+			<0xff 185>, /* camss_vbif_2_irq	*/
+			<0xff 186>, /* mnoc_obs_mainFault	*/
+			<0xff 188>, /* q6ss_irq_out(4)	*/
+			<0xff 189>, /* q6ss_irq_out(5)	*/
+			<0xff 190>, /* q6ss_irq_out(6)	*/
+			<0xff 191>, /* q6ss_irq_out(7)	*/
+			<0xff 192>, /* audio_out0_irq	*/
+			<0xff 194>, /* q6ss_wdog_exp_irq	*/
+			<0xff 195>, /* lpass_slimbus_core_ee1_irq	*/
+			<0xff 196>, /* lpass_slimbus_bam_ee1_irq	*/
+			<0xff 197>, /* resampler_irq[0]	*/
+			<0xff 199>, /* qdss_usb_trace_bam_irq	*/
+			<0xff 200>, /* rpm_ipc[4]	*/
+			<0xff 201>, /* rpm_ipc[5]	*/
+			<0xff 202>, /* rpm_ipc[6]	*/
+			<0xff 203>, /* rpm_ipc[7]	*/
+			<0xff 204>, /* rpm_ipc[20]	*/
+			<0xff 205>, /* rpm_ipc[21]	*/
+			<0xff 206>, /* rpm_ipc[22]	*/
+			<0xff 207>, /* rpm_ipc[23]	*/
+			<0xff 208>, /* q6ss_irq_out(4)	*/
+			<0xff 209>, /* q6ss_irq_out(5)	*/
+			<0xff 210>, /* q6ss_irq_out(6)	*/
+			<0xff 211>, /* q6ss_irq_out(7)	*/
+			<0xff 213>, /* secure_wdog_bark_irq	*/
+			<0xff 214>, /* tsens1_tsens_max_min_int	*/
+			<0xff 215>, /* o_bimc_intr[0]	*/
+			<0xff 217>, /* ocimem_nonsec_irq	*/
+			<0xff 218>, /* sscaon_tmr_timeout_irq	*/
+			<0xff 219>, /* q6ss_irq_out(28)	*/
+			<0xff 220>, /* spmi_protocol_irq	*/
+			<0xff 221>, /* q6ss_irq_out(29)	*/
+			<0xff 222>, /* q6ss_irq_out(30)	*/
+			<0xff 223>, /* spdm_offline_irq	*/
+			<0xff 224>, /* spdm_realtime_irq	*/
+			<0xff 225>, /* snoc_obs_mainFault	*/
+			<0xff 226>, /* cnoc_obs_mainFault	*/
+			<0xff 227>, /* o_ss_xpu3_sec_intr	*/
+			<0xff 228>, /* o_tcsr_xpu3_non_sec_summary_intr	*/
+			<0xff 229>, /* o_timeout_slave_kpss_summary_intr */
+			<0xff 230>, /* o_tcsr_vmidmt_client_sec_summary_intr */
+			<0xff 231>, /* o_tcsr_vmidmt_client_non_sec */
+			<0xff 232>, /* o_tcsr_vmidmt_cfg_sec_summary_intr */
+			<0xff 233>, /* o_tcsr_vmidmt_cfg_non_sec */
+			<0xff 234>, /* q6ss_irq_out(31)	*/
+			<0xff 235>, /* cpr_irq[0]	*/
+			<0xff 236>, /* crypto_core_irq[0]	*/
+			<0xff 237>, /* crypto_core_irq[1]	*/
+			<0xff 238>, /* crypto_bam_irq[0]	*/
+			<0xff 239>, /* crypto_bam_irq[1]	*/
+			<0xff 240>, /* summary_irq_hmss	*/
+			<0xff 241>, /* dir_conn_irq_hmss_7	*/
+			<0xff 242>, /* dir_conn_irq_hmss_6	*/
+			<0xff 243>, /* dir_conn_irq_hmss_5	*/
+			<0xff 244>, /* dir_conn_irq_hmss_4	*/
+			<0xff 245>, /* dir_conn_irq_hmss_3	*/
+			<0xff 246>, /* dir_conn_irq_hmss_2	*/
+			<0xff 247>, /* dir_conn_irq_hmss_1	*/
+			<0xff 248>, /* dir_conn_irq_hmss_0	*/
+			<0xff 249>, /* summary_irq_hmss_tz	*/
+			<0xff 250>, /* cpr_irq[3]	*/
+			<0xff 251>, /* cpr_irq[2]	*/
+			<0xff 252>, /* cpr_irq[1]	*/
+			<0xff 253>, /* sdcc_pwr_cmd_irq	*/
+			<0xff 254>, /* sdio_wakeup_irq	*/
+			<0xff 255>, /* cpr_irq[0]	*/
+			<0xff 256>, /* smmu_Cirpt[13]	*/
+			<0xff 257>, /* smmu_Cirpt[14]	*/
+			<0xff 258>, /* smmu_Cirpt[0]	*/
+			<0xff 259>, /* sdcc_pwr_cmd_irq	*/
+			<0xff 260>, /* sdio_wakeup_irq	*/
+			<0xff 261>, /* o_tcsr_mmu_nsgcfglrpt_summary_intr */
+			<0xff 262>, /* o_tcsr_mmu_gcfglrpt_summary_intr	*/
+			<0xff 263>, /* o_tcsr_mmu_nsglrpt_summary_intr	*/
+			<0xff 264>, /* o_tcsr_mmu_glrpt_summary_intr	*/
+			<0xff 265>, /* vbif_irpt	*/
+			<0xff 266>, /* smmu_PMIrpt	*/
+			<0xff 267>, /* smmu_Cirpt[3]	*/
+			<0xff 268>, /* q6ss_irq_out(31)	*/
+			<0xff 269>, /* rpm_wdog_expired_irq	*/
+			<0xff 270>, /* bam_irq	*/
+			<0xff 271>, /* bam_irq	*/
+			<0xff 272>, /* q6ss_irq_out(28)	*/
+			<0xff 273>, /* q6ss_irq_out(29)	*/
+			<0xff 274>, /* q6ss_irq_out(30)	*/
+			<0xff 276>, /* osmmu_Cirpt [4]	*/
+			<0xff 277>, /* osmmu_Cirpt [5]	*/
+			<0xff 278>, /* usb30_ctrl_irq[1]	*/
+			<0xff 279>, /* osmmu_Cirpt [6]	*/
+			<0xff 280>, /* osmmu_Cirpt [7]	*/
+			<0xff 281>, /* osmmu_Cirpt [8]	*/
+			<0xff 282>, /* osmmu_Cirpt [9]	*/
+			<0xff 283>, /* osmmu_Cirpt [10]	*/
+			<0xff 284>, /* osmmu_Cirpt [11]	*/
+			<0xff 285>, /* osmmu_Cirpt [12]	*/
+			<0xff 286>, /* osmmu_Cirpt [13]	*/
+			<0xff 287>, /* osmmu_Cirpt [14]	*/
+			<0xff 288>, /* osmmu_Cirpt [15]	*/
+			<0xff 289>, /* ufs_ice_sec_level_irq	*/
+			<0xff 290>, /* cpr_irq[4]	*/
+			<0xff 291>, /* smmu_Cirpt[2]	*/
+			<0xff 292>, /* osmmu_Cirpt [16]	*/
+			<0xff 293>, /* osmmu_Cirpt [17]	*/
+			<0xff 294>, /* osmmu_Cirpt [18]	*/
+			<0xff 295>, /* osmmu_Cirpt [0]	*/
+			<0xff 296>, /* osmmu_PMIrpt	*/
+			<0xff 297>, /* ufs_intrq	*/
+			<0xff 298>, /* osmmu_Cirpt [1]	*/
+			<0xff 299>, /* osmmu_Cirpt [2]	*/
+			<0xff 300>, /* osmmu_Cirpt [3]	*/
+			<0xff 301>, /* smmu_Cirpt[1]	*/
+			<0xff 302>, /* qdss_etrbytecnt_irq	*/
+			<0xff 303>, /* smmu_Cirpt[0]	*/
+			<0xff 304>, /* osmmu_Cirpt [19]	*/
+			<0xff 305>, /* osmmu_Cirpt [20]	*/
+			<0xff 306>, /* osmmu_Cirpt [21]	*/
+			<0xff 307>, /* osmmu_Cirpt [22]	*/
+			<0xff 308>, /* osmmu_Cirpt [23]	*/
+			<0xff 310>, /* pcie20_global_int	*/
+			<0xff 311>, /* pcie20_int_edma_int	*/
+			<0xff 316>, /* lpass_hdmitx_interrupt_ext	*/
+			<0xff 317>, /* rbif_irq	*/
+			<0xff 318>, /* gpu_cc_gpu_cx_gds_hw_ctrl_irq_out */
+			<0xff 319>, /* VENUS_IRQ	*/
+			<0xff 323>, /* lpass_slimbus1_core_ee1_irq	*/
+			<0xff 324>, /* lpass_slimbus1_bam_ee1_irq	*/
+			<0xff 325>, /* camss_irq18	*/
+			<0xff 326>, /* camss_irq0	*/
+			<0xff 327>, /* camss_irq1	*/
+			<0xff 328>, /* camss_irq2	*/
+			<0xff 329>, /* camss_irq3	*/
+			<0xff 330>, /* camss_irq4	*/
+			<0xff 331>, /* camss_irq5	*/
+			<0xff 332>, /* GC_SYS_irq_0	*/
+			<0xff 333>, /* GC_SYS_irq_1	*/
+			<0xff 334>, /* GC_SYS_irq_2	*/
+			<0xff 335>, /* GC_SYS_irq_3	*/
+			<0xff 336>, /* camss_irq13	*/
+			<0xff 337>, /* camss_irq14	*/
+			<0xff 338>, /* camss_irq15	*/
+			<0xff 339>, /* camss_irq16	*/
+			<0xff 340>, /* camss_irq17	*/
+			<0xff 341>, /* camss_irq6	*/
+			<0xff 342>, /* smmu_Cirpt[15]	*/
+			<0xff 343>, /* bam_irq[0]	*/
+			<0xff 344>, /* uart_dm_intr	*/
+			<0xff 345>, /* camss_irq7	*/
+			<0xff 346>, /* camss_irq8	*/
+			<0xff 347>, /* camss_irq9	*/
+			<0xff 348>, /* camss_irq10	*/
+			<0xff 350>, /* camss_irq12	*/
+			<0xff 351>, /* sif_aud_dec_out_irq_ext	*/
+			<0xff 356>, /* vbif_nrt_irpt	*/
+			<0xff 357>, /* Nonfatal pIMEM interrupt	*/
+			<0xff 359>, /* spmi_periph_irq[1]	*/
+			<0xff 360>, /* fatal pIMEM interrupt	*/
+			<0xff 361>, /* osmmu_Cirpt[0]	*/
+			<0xff 362>, /* osmmu_Cirpt[1]	*/
+			<0xff 363>, /* osmmu_Cirpt[2]	*/
+			<0xff 364>, /* osmmu_Cirpt[3]	*/
+			<0xff 365>, /* ipa_irq(0)	*/
+			<0xff 366>, /* osmmu_PMIrpt	*/
+			<0xff 380>, /* sp_sp2apps_irq[0]	*/
+			<0xff 381>, /* sp_sp2apps_irq[1]	*/
+			<0xff 382>, /* sp_sp2apps_irq[2]	*/
+			<0xff 383>, /* sp_sp2apps_irq[3]	*/
+			<0xff 385>, /* osmmu_CIrpt[12]	*/
+			<0xff 386>, /* osmmu_CIrpt[13]	*/
+			<0xff 387>, /* osmmu_CIrpt[14]	*/
+			<0xff 388>, /* osmmu_CIrpt[15]	*/
+			<0xff 389>, /* osmmu_CIrpt[16]	*/
+			<0xff 390>, /* osmmu_CIrpt[17]	*/
+			<0xff 391>, /* osmmu_CIrpt[18]	*/
+			<0xff 392>, /* osmmu_CIrpt[19]	*/
+			<0xff 393>, /* o_dcc_crc_fail_int	*/
+			<0xff 395>, /* aggre1_obs_mainfault	*/
+			<0xff 396>, /* aggr1_smmu_cirpt[0]	*/
+			<0xff 397>, /* aggr1_smmu_cirpt[1]	*/
+			<0xff 398>, /* aggr1_smmu_cirpt[2]	*/
+			<0xff 399>, /* aggr1_smmu_cirpt[3]	*/
+			<0xff 400>, /* aggr1_smmu_cirpt[4]	*/
+			<0xff 401>, /* aggr1_smmu_cirpt[5]	*/
+			<0xff 402>, /* aggr1_smmu_cirpt[6]	*/
+			<0xff 403>, /* aggr1_smmu_pmirpt	*/
+			<0xff 404>, /* aggre2noc_obs_mainFault	*/
+			<0xff 405>, /* osmmu_CIrpt[0]	*/
+			<0xff 406>, /* osmmu_CIrpt[1]	*/
+			<0xff 407>, /* osmmu_CIrpt[2]	*/
+			<0xff 408>, /* osmmu_CIrpt[3]	*/
+			<0xff 409>, /* osmmu_CIrpt[4]	*/
+			<0xff 410>, /* osmmu_CIrpt[5]	*/
+			<0xff 411>, /* o_dcc_task_done_int	*/
+			<0xff 412>, /* vsense_alarm_irq	*/
+			<0xff 413>, /* osmmu_PMIrpt	*/
+			<0xff 414>, /* pmic_arb_trans_done_irq[0]	*/
+			<0xff 415>, /* pmic_arb_trans_done_irq[1]	*/
+			<0xff 416>, /* rpm_ipc[28]	*/
+			<0xff 417>, /* rpm_ipc[29]	*/
+			<0xff 418>, /* rpm_ipc[30]	*/
+			<0xff 419>, /* rpm_ipc[31]	*/
+			<0xff 420>, /* qup_irq	*/
+			<0xff 421>, /* qup_irq	*/
+			<0xff 422>, /* wd_bite_apps	*/
+			<0xff 423>, /* lpass_qos_apps_interrupt	*/
+			<0xff 424>, /* ipa_irq(2)	*/
+			<0xff 425>, /* smmu_Cirpt[1]	*/
+			<0xff 426>, /* smmu_Cirpt[2]	*/
+			<0xff 427>, /* smmu_Cirpt[3]	*/
+			<0xff 428>, /* smmu_Cirpt[4]	*/
+			<0xff 429>, /* smmu_Cirpt[5]	*/
+			<0xff 430>, /* smmu_Cirpt[6]	*/
+			<0xff 431>, /* smmu_Cirpt[7]	*/
+			<0xff 432>, /* smmu_Cirpt[8]	*/
+			<0xff 433>, /* smmu_Cirpt[9]	*/
+			<0xff 434>, /* smmu_Cirpt[10]	*/
+			<0xff 435>, /* smmu_Cirpt[11]	*/
+			<0xff 436>, /* smmu_Cirpt[16]	*/
+			<0xff 437>, /* pcie20_0_int_msi_dev0	*/
+			<0xff 438>, /* pcie20_0_int_msi_dev1	*/
+			<0xff 439>, /* pcie20_0_int_msi_dev2	*/
+			<0xff 440>, /* pcie20_0_int_msi_dev3	*/
+			<0xff 441>, /* pcie20_0_int_msi_dev4	*/
+			<0xff 442>, /* pcie20_0_int_msi_dev5	*/
+			<0xff 443>, /* pcie20_0_int_msi_dev6	*/
+			<0xff 444>, /* pcie20_0_int_msi_dev7	*/
+			<0xff 445>, /* o_wcss_apps_intr[0]	*/
+			<0xff 446>, /* o_wcss_apps_intr[1]	*/
+			<0xff 447>, /* o_wcss_apps_intr[2]	*/
+			<0xff 448>, /* o_wcss_apps_intr[3]	*/
+			<0xff 449>, /* o_wcss_apps_intr[4]	*/
+			<0xff 450>, /* o_wcss_apps_intr[5]	*/
+			<0xff 452>, /* o_wcss_apps_intr[6]	*/
+			<0xff 453>, /* o_wcss_apps_intr[7]	*/
+			<0xff 454>, /* o_wcss_apps_intr[8]	*/
+			<0xff 455>, /* o_wcss_apps_intr[9]	*/
+			<0xff 456>, /* o_wcss_apps_intr[10]	*/
+			<0xff 457>, /* o_wcss_apps_intr[11]	*/
+			<0xff 458>, /* o_wcss_apps_intr[12]	*/
+			<0xff 461>, /* o_ocimem_nonsec_irq	*/
+			<0xff 462>, /* tsens1_tsens_critical_int	*/
+			<0xff 463>, /* aggr1_smmu_cirpt[7]	*/
+			<0xff 464>, /* ipa_bam_irq(0)	*/
+			<0xff 465>, /* ipa_bam_irq(2)	*/
+			<0xff 466>, /* ssc_uart_int	*/
+			<0xff 468>, /* cri_cm_irq_tz	*/
+			<0xff 469>, /* cri_cm_irq_hyp	*/
+			<0xff 471>, /* mmss_bimc_smmu_gds_hw_ctrl_irq_out */
+			<0xff 472>, /* gcc_gds_hw_ctrl_irq_out	*/
+			<0xff 473>, /* lcc_audio_core_smmu_gds_hw_ctrl */
+			<0xff 477>, /* tsens0_tsens_critical_int	*/
+			<0xff 478>, /* tsens0_tsens_max_min_int	*/
+			<0xff 480>, /* q6ss_wdog_expired_irq	*/
+			<0xff 481>, /* mss_ipc_out_irq[4]	*/
+			<0xff 482>, /* mss_ipc_out_irq[5]	*/
+			<0xff 483>, /* mss_ipc_out_irq[6]	*/
+			<0xff 484>, /* mss_ipc_out_irq[7]	*/
+			<0xff 485>, /* mss_ipc_out_irq[28]	*/
+			<0xff 486>, /* mss_ipc_out_irq[29]	*/
+			<0xff 487>, /* mss_ipc_out_irq[30]	*/
+			<0xff 488>, /* mss_ipc_out_irq[31]	*/
+			<0xff 489>, /* skl_core_irq	*/
+			<0xff 490>, /* tsens0_upper_lower_int	*/
+			<0xff 494>, /* osmmu_CIrpt[6]	*/
+			<0xff 495>, /* osmmu_CIrpt[7]	*/
+			<0xff 496>, /* osmmu_CIrpt[8]	*/
+			<0xff 497>, /* osmmu_CIrpt[9]	*/
+			<0xff 498>, /* osmmu_CIrpt[10]	*/
+			<0xff 499>, /* osmmu_CIrpt[11]	*/
+			<0xff 503>; /* o_bimc_intr[1]	*/
+
+		qcom,gpio-parent = <&tlmm>;
+		qcom,gpio-map = <3  1>,
+			<4  5>,
+			<5  9>,
+			<6  11>,
+			<7  66>,
+			<8  22>,
+			<9  24>,
+			<10  26>,
+			<11  34>,
+			<12  36>,
+			<13  37>, /* PCIe0 */
+			<14  38>,
+			<15  40>,
+			<16  42>,
+			<17  46>,
+			<18  50>,
+			<19  53>,
+			<20  54>,
+			<21  56>,
+			<22  57>,
+			<23  58>,
+			<24  59>,
+			<25  60>,
+			<26  61>,
+			<27  62>,
+			<28  63>,
+			<29  64>,
+			<30  71>,
+			<31  73>,
+			<32  77>,
+			<33  78>,
+			<34  79>,
+			<35  80>,
+			<36  82>,
+			<37  86>,
+			<38  91>,
+			<39  92>,
+			<40  95>,
+			<41  97>,
+			<42  101>,
+			<43  104>,
+			<44  106>,
+			<45  108>,
+			<46  112>,
+			<47  113>,
+			<48  110>,
+			<50  127>,
+			<51  115>,
+			<54  116>, /* PCIe2 */
+			<55  117>,
+			<56  118>,
+			<57  119>,
+			<58  120>,
+			<59  121>,
+			<60  122>,
+			<61  123>,
+			<62  124>,
+			<63  125>,
+			<64  126>,
+			<65  129>,
+			<66  131>,
+			<67  132>, /* PCIe1 */
+			<68  133>,
+			<69  145>;
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-ramoops.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-ramoops.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-ramoops.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-ramoops.dtsi	2019-01-22 16:16:21.199225542 +0100
@@ -0,0 +1,21 @@
+
+/ {
+	reserved-memory {
+
+		/* pstore test */
+		ramoops_mem: ramoops_mem@0x88f00000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0x0 0x88f00000 0x0 0x100000>; /* 1 MB */
+		};
+	};
+
+	/* pstore test */
+	ramoops {
+		compatible = "ramoops";
+		memory-region = <&ramoops_mem>;
+		record-size = <0x00100000>; /* 512 KB */
+		ecc-size = <16>;
+		no-dump-oops;
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-regulator.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-regulator.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-regulator.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-regulator.dtsi	2019-01-22 16:16:21.199225542 +0100
@@ -0,0 +1,1145 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/clock/msm-clocks-8998.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+&rpm_bus {
+	/* PM8998 S1 + S6 = VDD_CX supply */
+	rpm-regulator-smpa1 {
+		status = "okay";
+		pm8998_s1_level: regulator-s1-level {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_s1_level";
+			qcom,set = <3>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_TURBO>;
+			qcom,use-voltage-level;
+		};
+
+		pm8998_s1_floor_level: regulator-s1-floor-level {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_s1_floor_level";
+			qcom,set = <3>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_TURBO>;
+			qcom,use-voltage-floor-level;
+			qcom,always-send-voltage;
+		};
+
+		pm8998_s1_level_ao: regulator-s1-level-ao {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_s1_level_ao";
+			qcom,set = <1>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_TURBO>;
+			qcom,use-voltage-level;
+		};
+	};
+
+	rpm-regulator-smpa2 {
+		status = "okay";
+		pm8998_s2: regulator-s2 {
+			regulator-min-microvolt = <1128000>;
+			regulator-max-microvolt = <1128000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-smpa3 {
+		status = "okay";
+		pm8998_s3: regulator-s3 {
+			regulator-min-microvolt = <1352000>;
+			regulator-max-microvolt = <1352000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-smpa4 {
+		status = "okay";
+		pm8998_s4: regulator-s4 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-smpa5 {
+		status = "okay";
+		pm8998_s5: regulator-s5 {
+			regulator-min-microvolt = <1904000>;
+			regulator-max-microvolt = <2040000>;
+			qcom,init-pin-ctrl-mode = <8>;		/* PMIC_AWAKE */
+			qcom,send-defaults;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-smpa7 {
+		status = "okay";
+		pm8998_s7: regulator-s7 {
+			regulator-min-microvolt = <900000>;
+			regulator-max-microvolt = <1028000>;
+			qcom,init-pin-ctrl-mode = <8>;		/* PMIC_AWAKE */
+			qcom,send-defaults;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-smpa8 {
+		status = "okay";
+		pm8998_s8: regulator-s8 {
+			regulator-min-microvolt = <800000>;
+			regulator-max-microvolt = <800000>;
+			status = "okay";
+		};
+	};
+
+	/* PM8998 S9 = VDD_MX supply */
+	rpm-regulator-smpa9 {
+		status = "okay";
+		pm8998_s9_level: regulator-s9-level {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_s9_level";
+			qcom,set = <3>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_TURBO>;
+			qcom,use-voltage-level;
+		};
+
+		pm8998_s9_floor_level: regulator-s9-floor-level {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_s9_floor_level";
+			qcom,set = <3>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_TURBO>;
+			qcom,use-voltage-floor-level;
+			qcom,always-send-voltage;
+		};
+
+		pm8998_s9_level_ao: regulator-s9-level-ao {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_s9_level_ao";
+			qcom,set = <1>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_TURBO>;
+			qcom,use-voltage-level;
+		};
+	};
+
+	rpm-regulator-ldoa1 {
+		status = "okay";
+		pm8998_l1: regulator-l1 {
+			regulator-min-microvolt = <880000>;
+			regulator-max-microvolt = <880000>;
+			proxy-supply = <&pm8998_l1>;
+			qcom,proxy-consumer-enable;
+			qcom,proxy-consumer-current = <73400>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa2 {
+		status = "okay";
+		pm8998_l2: regulator-l2 {
+			regulator-min-microvolt = <1200000>;
+			regulator-max-microvolt = <1200000>;
+			proxy-supply = <&pm8998_l2>;
+			qcom,proxy-consumer-enable;
+			qcom,proxy-consumer-current = <12560>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa3 {
+		status = "okay";
+		pm8998_l3: regulator-l3 {
+			regulator-min-microvolt = <1000000>;
+			regulator-max-microvolt = <1000000>;
+			status = "okay";
+		};
+	};
+
+	/* PM8998 L4 = VDD_SSC_MX supply */
+	rpm-regulator-ldoa4 {
+		status = "okay";
+		pm8998_l4_level: regulator-l4-level {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l4_level";
+			qcom,set = <3>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_TURBO>;
+			qcom,use-voltage-level;
+		};
+
+		pm8998_l4_floor_level: regulator-l4-floor-level {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l4_floor_level";
+			qcom,set = <3>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_TURBO>;
+			qcom,use-voltage-floor-level;
+			qcom,always-send-voltage;
+		};
+	};
+
+	rpm-regulator-ldoa5 {
+		status = "okay";
+		pm8998_l5: regulator-l5 {
+			regulator-min-microvolt = <800000>;
+			regulator-max-microvolt = <800000>;
+			/* Force NPM follows HW0_EN */
+			qcom,init-pin-ctrl-mode = <1>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa6 {
+		status = "okay";
+		pm8998_l6: regulator-l6 {
+			regulator-min-microvolt = <1808000>;
+			regulator-max-microvolt = <1808000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa7 {
+		status = "okay";
+		pm8998_l7: regulator-l7 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			status = "okay";
+		};
+
+		pm8998_l7_pin_ctrl: regulator-l7-pin-ctrl {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l7_pin_ctrl";
+			qcom,set = <3>;
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			/* Force NPM follows HW_EN2 */
+			qcom,init-pin-ctrl-mode = <4>;
+			/* Enable follows HW_EN2 */
+			qcom,enable-with-pin-ctrl = <0 4>;
+		};
+	};
+
+	rpm-regulator-ldoa8 {
+		status = "okay";
+		pm8998_l8: regulator-l8 {
+			regulator-min-microvolt = <1200000>;
+			regulator-max-microvolt = <1200000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa9 {
+		status = "okay";
+		pm8998_l9: regulator-l9 {
+			regulator-min-microvolt = <1808000>;
+			regulator-max-microvolt = <2960000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa10 {
+		status = "okay";
+		pm8998_l10: regulator-l10 {
+			regulator-min-microvolt = <1808000>;
+			regulator-max-microvolt = <2960000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa11 {
+		status = "okay";
+		pm8998_l11: regulator-l11 {
+			regulator-min-microvolt = <1000000>;
+			regulator-max-microvolt = <1000000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa12 {
+		status = "okay";
+		pm8998_l12: regulator-l12 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa13 {
+		status = "okay";
+		pm8998_l13: regulator-l13 {
+			regulator-min-microvolt = <1808000>;
+			regulator-max-microvolt = <2960000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa14 {
+		status = "okay";
+		pm8998_l14: regulator-l14 {
+			regulator-min-microvolt = <1880000>;
+			regulator-max-microvolt = <1880000>;
+			proxy-supply = <&pm8998_l14>;
+			qcom,proxy-consumer-enable;
+			qcom,proxy-consumer-current = <32000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa15 {
+		status = "okay";
+		pm8998_l15: regulator-l15 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa16 {
+		status = "okay";
+		pm8998_l16: regulator-l16 {
+			regulator-min-microvolt = <2704000>;
+			regulator-max-microvolt = <2704000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa17 {
+		status = "okay";
+		pm8998_l17: regulator-l17 {
+			regulator-min-microvolt = <1304000>;
+			regulator-max-microvolt = <1304000>;
+			status = "okay";
+		};
+
+		pm8998_l17_pin_ctrl: regulator-l17-pin-ctrl {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l17_pin_ctrl";
+			qcom,set = <3>;
+			regulator-min-microvolt = <1304000>;
+			regulator-max-microvolt = <1304000>;
+			/* Force NPM follows HW_EN2 */
+			qcom,init-pin-ctrl-mode = <4>;
+			/* Enable follows HW_EN2 */
+			qcom,enable-with-pin-ctrl = <0 4>;
+		};
+	};
+
+	rpm-regulator-ldoa18 {
+		status = "okay";
+		pm8998_l18: regulator-l18 {
+			regulator-min-microvolt = <2704000>;
+			regulator-max-microvolt = <2704000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa19 {
+		status = "okay";
+		pm8998_l19: regulator-l19 {
+			regulator-min-microvolt = <3008000>;
+			regulator-max-microvolt = <3008000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa20 {
+		status = "okay";
+		pm8998_l20: regulator-l20 {
+			regulator-min-microvolt = <2960000>;
+			regulator-max-microvolt = <2960000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa21 {
+		status = "okay";
+		pm8998_l21: regulator-l21 {
+			regulator-min-microvolt = <2960000>;
+			regulator-max-microvolt = <2960000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa22 {
+		status = "okay";
+		pm8998_l22: regulator-l22 {
+			regulator-min-microvolt = <2864000>;
+			regulator-max-microvolt = <2864000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa23 {
+		status = "okay";
+		pm8998_l23: regulator-l23 {
+			regulator-min-microvolt = <3312000>;
+			regulator-max-microvolt = <3312000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa24 {
+		status = "okay";
+		pm8998_l24: regulator-l24 {
+			regulator-min-microvolt = <1848000>;
+			regulator-max-microvolt = <3088000>;
+			parent-supply = <&pm8998_l12>;
+			status = "okay";
+		};
+	};
+	rpm-regulator-ldoa25 {
+		status = "okay";
+		pm8998_l25: regulator-l25 {
+			regulator-min-microvolt = <3104000>;
+			regulator-max-microvolt = <3312000>;
+			status = "okay";
+		};
+
+		pm8998_l25_pin_ctrl: regulator-l25-pin-ctrl {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l25_pin_ctrl";
+			qcom,set = <3>;
+			regulator-min-microvolt = <3104000>;
+			regulator-max-microvolt = <3312000>;
+			/* Force NPM follows HW_EN2 */
+			qcom,init-pin-ctrl-mode = <4>;
+			/* Enable follows HW_EN2 */
+			qcom,enable-with-pin-ctrl = <0 4>;
+		};
+	};
+
+	rpm-regulator-ldoa26 {
+		status = "okay";
+		pm8998_l26: regulator-l26 {
+			regulator-min-microvolt = <1200000>;
+			regulator-max-microvolt = <1200000>;
+			status = "okay";
+		};
+	};
+
+	/* PM8998 L27 = VDD_SSC_CX supply */
+	rpm-regulator-ldoa27 {
+		status = "okay";
+		pm8998_l27_level: regulator-l27-level {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l27_level";
+			qcom,set = <3>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_TURBO>;
+			qcom,use-voltage-level;
+		};
+
+		pm8998_l27_floor_level: regulator-l27-floor-level {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l27_floor_level";
+			qcom,set = <3>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_TURBO>;
+			qcom,use-voltage-floor-level;
+			qcom,always-send-voltage;
+		};
+	};
+
+	rpm-regulator-ldoa28 {
+		status = "okay";
+		pm8998_l28: regulator-l28 {
+			regulator-min-microvolt = <3008000>;
+			regulator-max-microvolt = <3008000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-vsa1 {
+		status = "okay";
+		pm8998_lvs1: regulator-lvs1 {
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-vsa2 {
+		status = "okay";
+		pm8998_lvs2: regulator-lvs2 {
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-bobb {
+		status = "okay";
+		pmi8998_bob: regulator-bob {
+			regulator-min-microvolt = <3312000>;
+			regulator-max-microvolt = <3600000>;
+			qcom,pwm-threshold-current = <2000000>;
+			qcom,init-bob-mode = <2>;
+			status = "okay";
+		};
+		pmi8998_bob_pin1: regulator-bob-pin1 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_bob_pin1";
+			qcom,set = <3>;
+			regulator-min-microvolt = <3312000>;
+			regulator-max-microvolt = <3600000>;
+			qcom,pwm-threshold-current = <2000000>;
+			qcom,init-bob-mode = <2>;
+			qcom,use-pin-ctrl-voltage1;
+		};
+		pmi8998_bob_pin2: regulator-bob-pin2 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_bob_pin2";
+			qcom,set = <3>;
+			regulator-min-microvolt = <3312000>;
+			regulator-max-microvolt = <3600000>;
+			qcom,pwm-threshold-current = <2000000>;
+			qcom,init-bob-mode = <2>;
+			qcom,use-pin-ctrl-voltage2;
+		};
+		pmi8998_bob_pin3: regulator-bob-pin3 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_bob_pin3";
+			qcom,set = <3>;
+			regulator-min-microvolt = <3312000>;
+			regulator-max-microvolt = <3600000>;
+			qcom,pwm-threshold-current = <2000000>;
+			qcom,init-bob-mode = <2>;
+			qcom,use-pin-ctrl-voltage3;
+		};
+	};
+};
+
+&spmi_bus {
+	qcom,pm8005@5 {
+		/* PM8005 S1 + S4 = 2 phase VDD_GFX supply */
+		pm8005_s1: regulator@1400 {
+			regulator-name = "pm8005_s1";
+			status = "okay";
+			regulator-min-microvolt = <524000>;
+			regulator-max-microvolt = <1100000>;
+			qcom,enable-time = <500>;
+		};
+	};
+
+	qcom,pm8998@1 {
+		pm8998_s10: regulator@2f00 {
+			compatible = "qcom,qpnp-regulator";
+			reg = <0x2f00 0x100>;
+			regulator-name = "pm8998_s10";
+			regulator-min-microvolt = <572000>;
+			regulator-max-microvolt = <1112000>;
+			qcom,enable-time = <500>;
+			regulator-always-on;
+		};
+
+		pm8998_s13: regulator@3800 {
+			compatible = "qcom,qpnp-regulator";
+			reg = <0x3800 0x100>;
+			regulator-name = "pm8998_s13";
+			regulator-min-microvolt = <572000>;
+			regulator-max-microvolt = <1112000>;
+			qcom,enable-time = <500>;
+			regulator-always-on;
+		};
+	};
+};
+
+/* Stub regulators */
+
+/ {
+	gfx_stub_vreg: regulator-gfx-stub {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "gfx_stub_corner";
+		qcom,hpm-min-load = <100000>;
+		regulator-min-microvolt = <1>;
+		regulator-max-microvolt = <6>;
+		status = "disabled";
+	};
+};
+
+&soc {
+/* CPR controller regulators */
+	apc0_cpr: cprh-ctrl@179c8000 {
+		compatible = "qcom,cprh-msm8998-v1-kbss-regulator";
+		reg = <0x179c8000 0x4000>, <0x00784000 0x1000>;
+		reg-names = "cpr_ctrl", "fuse_base";
+		clocks = <&clock_gcc clk_gcc_hmss_rbcpr_clk>;
+		clock-names = "core_clk";
+		qcom,cpr-ctrl-name = "apc0";
+		qcom,cpr-controller-id = <0>;
+
+		qcom,cpr-sensor-time = <1000>;
+		qcom,cpr-loop-time = <5000000>;
+		qcom,cpr-idle-cycles = <15>;
+		qcom,cpr-up-down-delay-time = <3000>;
+		qcom,cpr-step-quot-init-min = <11>;
+		qcom,cpr-step-quot-init-max = <12>;
+		qcom,cpr-count-mode = <0>;		/* All at once */
+		qcom,cpr-count-repeat = <1>;
+		qcom,cpr-down-error-step-limit = <1>;
+		qcom,cpr-up-error-step-limit = <1>;
+		qcom,cpr-corner-switch-delay-time = <209>;
+		qcom,cpr-voltage-settling-time = <1760>;
+
+		qcom,apm-threshold-voltage = <832000>;
+		qcom,apm-crossover-voltage = <880000>;
+		qcom,apm-hysteresis-voltage = <32000>;
+		qcom,voltage-step = <4000>;
+		qcom,voltage-base = <352000>;
+		qcom,cpr-saw-use-unit-mV;
+
+		qcom,cpr-enable;
+		qcom,cpr-hw-closed-loop;
+
+		qcom,cpr-panic-reg-addr-list =
+			<0x179cbaa4 0x17912c18>;
+		qcom,cpr-panic-reg-name-list =
+			"PWR_CPRH_STATUS", "APCLUS0_L2_SAW4_PMIC_STS";
+
+		qcom,cpr-aging-ref-voltage = <1112000>;
+		vdd-supply = <&pm8998_s10>;
+
+		thread@0 {
+			qcom,cpr-thread-id = <0>;
+			qcom,cpr-consecutive-up = <0>;
+			qcom,cpr-consecutive-down = <2>;
+			qcom,cpr-up-threshold = <2>;
+			qcom,cpr-down-threshold = <2>;
+
+			apc0_pwrcl_vreg: regulator {
+				regulator-name = "apc0_pwrcl_corner";
+				regulator-min-microvolt = <1>;
+				regulator-max-microvolt = <23>;
+
+				qcom,cpr-fuse-corners = <4>;
+				qcom,cpr-fuse-combos = <8>;
+				qcom,cpr-corners = <22>;
+
+				qcom,cpr-corner-fmax-map = <7 10 17 22>;
+
+				qcom,cpr-voltage-ceiling =
+					<896000  896000  896000  896000  896000
+					 896000  896000  896000  896000  896000
+					 896000  896000  896000  896000  896000
+					 896000  896000 1032000 1032000 1032000
+					1112000 1112000>;
+
+				qcom,cpr-voltage-floor =
+					<896000  896000  896000  896000  896000
+					 896000  896000  896000  896000  896000
+					 896000  896000  896000  896000  896000
+					 896000  896000  896000  896000  896000
+					 896000  896000>,
+					<572000  572000  572000  572000  572000
+					 572000  572000  572000  572000  572000
+					 664000  664000  664000  664000  664000
+					 664000  664000  752000  752000  752000
+					 752000  752000>,
+					<572000  572000  572000  572000  572000
+					 572000  572000  572000  572000  572000
+					 664000  664000  664000  664000  664000
+					 664000  664000  752000  752000  752000
+					 752000  752000>,
+					<572000  572000  572000  572000  572000
+					 572000  572000  572000  572000  572000
+					 664000  664000  664000  664000  664000
+					 664000  664000  752000  752000  752000
+					 752000  752000>,
+					<572000  572000  572000  572000  572000
+					 572000  572000  572000  572000  572000
+					 664000  664000  664000  664000  664000
+					 664000  664000  752000  752000  752000
+					 752000  752000>,
+					<572000  572000  572000  572000  572000
+					 572000  572000  572000  572000  572000
+					 664000  664000  664000  664000  664000
+					 664000  664000  752000  752000  752000
+					 752000  752000>,
+					<572000  572000  572000  572000  572000
+					 572000  572000  572000  572000  572000
+					 664000  664000  664000  664000  664000
+					 664000  664000  752000  752000  752000
+					 752000  752000>,
+					<572000  572000  572000  572000  572000
+					 572000  572000  572000  572000  572000
+					 664000  664000  664000  664000  664000
+					 664000  664000  752000  752000  752000
+					 752000  752000>;
+
+				qcom,cpr-floor-to-ceiling-max-range =
+					<55000  55000  55000  55000
+					 55000  55000  55000  55000
+					 55000  55000  65000  65000
+					 65000  65000  65000  65000
+					 65000  65000  65000  65000
+					 65000  65000>;
+
+				qcom,corner-frequencies =
+					<300000000  345600000  422400000
+					 499200000  576000000  633600000
+					 710400000  806400000  883200000
+					 960000000 1036800000 1113600000
+					1190400000 1248000000 1324800000
+					1401600000 1478400000 1574400000
+					1651200000 1728000000 1804800000
+					1881600000>;
+
+				qcom,cpr-ro-scaling-factor =
+					<3430 3512 3262 3426 3100 3238 2463
+					 2725 1749 2891 4058 4014 3195 2866
+					    0    0>,
+					<3430 3512 3262 3426 3100 3238 2463
+					 2725 1749 2891 4058 4014 3195 2866
+					    0    0>,
+					<3162 3153 3163 3261 3037 3135 2645
+					 2857 1864 2417 3499 3706 3315 2771
+					    0    0>,
+					<2632 2539 2835 2857 2767 2813 2690
+					 2827 1857 1632 2596 3068 3212 2454
+					    0    0>;
+
+				qcom,cpr-open-loop-voltage-fuse-adjustment =
+					<0 0 0 0>,
+					<40000 24000 0 0>,
+					<40000 24000 0 0>,
+					<40000 24000 0 0>,
+					<40000 24000 0 0>,
+					<40000 24000 0 0>,
+					<40000 24000 0 0>,
+					<40000 24000 0 0>;
+
+				qcom,cpr-closed-loop-voltage-fuse-adjustment =
+					<0 0 0 0>,
+					<20000 26000 0 0>,
+					<20000 26000 0 0>,
+					<20000 26000 0 0>,
+					<20000 26000 0 0>,
+					<20000 26000 0 0>,
+					<20000 26000 0 0>,
+					<20000 26000 0 0>;
+
+				qcom,allow-voltage-interpolation;
+				qcom,allow-quotient-interpolation;
+				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+				qcom,cpr-aging-max-voltage-adjustment = <15000>;
+				qcom,cpr-aging-ref-corner = <22>;
+				qcom,cpr-aging-ro-scaling-factor = <1620>;
+				qcom,allow-aging-voltage-adjustment = <0>;
+				qcom,allow-aging-open-loop-voltage-adjustment =
+					<1>;
+			};
+		};
+	};
+
+	apc1_cpr: cprh-ctrl@179c4000{
+		compatible = "qcom,cprh-msm8998-v1-kbss-regulator";
+		reg = <0x179c4000 0x4000>, <0x00784000 0x1000>;
+		reg-names = "cpr_ctrl", "fuse_base";
+		clocks = <&clock_gcc clk_gcc_hmss_rbcpr_clk>;
+		clock-names = "core_clk";
+		qcom,cpr-ctrl-name = "apc1";
+		qcom,cpr-controller-id = <1>;
+
+		qcom,cpr-sensor-time = <1000>;
+		qcom,cpr-loop-time = <5000000>;
+		qcom,cpr-idle-cycles = <15>;
+		qcom,cpr-up-down-delay-time = <3000>;
+		qcom,cpr-step-quot-init-min = <9>;
+		qcom,cpr-step-quot-init-max = <14>;
+		qcom,cpr-count-mode = <0>;		/* All at once */
+		qcom,cpr-count-repeat = <1>;
+		qcom,cpr-down-error-step-limit = <1>;
+		qcom,cpr-up-error-step-limit = <1>;
+		qcom,cpr-corner-switch-delay-time = <209>;
+		qcom,cpr-voltage-settling-time = <1760>;
+
+		qcom,apm-threshold-voltage = <832000>;
+		qcom,apm-crossover-voltage = <880000>;
+		qcom,apm-hysteresis-voltage = <32000>;
+		qcom,voltage-step = <4000>;
+		qcom,voltage-base = <352000>;
+		qcom,cpr-saw-use-unit-mV;
+
+		qcom,cpr-enable;
+		qcom,cpr-hw-closed-loop;
+
+		qcom,cpr-panic-reg-addr-list =
+			<0x179c7aa4 0x17812c18>;
+		qcom,cpr-panic-reg-name-list =
+			"PERF_CPRH_STATUS", "APCLUS1_L2_SAW4_PMIC_STS";
+
+		qcom,cpr-aging-ref-voltage = <1112000>;
+		vdd-supply = <&pm8998_s13>;
+
+		thread@0 {
+			qcom,cpr-thread-id = <0>;
+			qcom,cpr-consecutive-up = <0>;
+			qcom,cpr-consecutive-down = <2>;
+			qcom,cpr-up-threshold = <2>;
+			qcom,cpr-down-threshold = <2>;
+
+			apc1_perfcl_vreg: regulator {
+				regulator-name = "apc1_perfcl_corner";
+				regulator-min-microvolt = <1>;
+				regulator-max-microvolt = <26>;
+
+				qcom,cpr-fuse-corners = <4>;
+				qcom,cpr-fuse-combos = <8>;
+				qcom,cpr-corners = <25>;
+
+				qcom,cpr-corner-fmax-map = <8 12 18 25>;
+
+				qcom,cpr-voltage-ceiling =
+					<896000  896000  896000  896000
+					 896000  896000  896000  896000
+					 896000  896000  896000  896000
+					 896000  896000  896000  896000
+					 896000  896000 1032000 1032000
+					1032000 1032000 1112000 1112000
+					1112000>;
+
+				qcom,cpr-voltage-floor =
+					<896000  896000  896000  896000
+					 896000  896000  896000  896000
+					 896000  896000  896000  896000
+					 896000  896000  896000  896000
+					 896000  896000  896000  896000
+					 896000  896000  896000  896000
+					 896000>,
+					<572000  572000  572000  572000
+					 572000  572000  572000  572000
+					 572000  572000  572000  572000
+					 664000  664000  664000  664000
+					 664000  664000  752000  752000
+					 752000  752000  752000  752000
+					 752000>,
+					<572000  572000  572000  572000
+					 572000  572000  572000  572000
+					 572000  572000  572000  572000
+					 664000  664000  664000  664000
+					 664000  664000  752000  752000
+					 752000  752000  752000  752000
+					 752000>,
+					<572000  572000  572000  572000
+					 572000  572000  572000  572000
+					 572000  572000  572000  572000
+					 664000  664000  664000  664000
+					 664000  664000  752000  752000
+					 752000  752000  752000  752000
+					 752000>,
+					<572000  572000  572000  572000
+					 572000  572000  572000  572000
+					 572000  572000  572000  572000
+					 664000  664000  664000  664000
+					 664000  664000  752000  752000
+					 752000  752000  752000  752000
+					 752000>,
+					<572000  572000  572000  572000
+					 572000  572000  572000  572000
+					 572000  572000  572000  572000
+					 664000  664000  664000  664000
+					 664000  664000  752000  752000
+					 752000  752000  752000  752000
+					 752000>,
+					<572000  572000  572000  572000
+					 572000  572000  572000  572000
+					 572000  572000  572000  572000
+					 664000  664000  664000  664000
+					 664000  664000  752000  752000
+					 752000  752000  752000  752000
+					 752000>,
+					<572000  572000  572000  572000
+					 572000  572000  572000  572000
+					 572000  572000  572000  572000
+					 664000  664000  664000  664000
+					 664000  664000  752000  752000
+					 752000  752000  752000  752000
+					 752000>;
+
+				qcom,cpr-floor-to-ceiling-max-range =
+					<55000  55000  55000  55000
+					 55000  55000  55000  55000
+					 55000  55000  55000  55000
+					 65000  65000  65000  65000
+					 65000  65000  65000  65000
+					 65000  65000  65000  65000
+					 65000>;
+
+				qcom,corner-frequencies =
+					<300000000  345600000  422400000
+					 480000000  556800000  633600000
+					 710400000  787200000  844800000
+					 902400000  979200000 1056000000
+					1171200000 1248000000 1324800000
+					1401600000 1478400000 1536000000
+					1632000000 1708800000 1785600000
+					1862400000 1939200000 2016000000
+					2092800000>;
+
+				qcom,cpr-ro-scaling-factor =
+					<3430 3512 3262 3426 3100 3238 2463
+					 2725 1749 2891 4058 4014 3195 2866
+					    0    0>,
+					<3430 3512 3262 3426 3100 3238 2463
+					 2725 1749 2891 4058 4014 3195 2866
+					    0    0>,
+					<3162 3153 3163 3261 3037 3135 2645
+					 2857 1864 2417 3499 3706 3315 2771
+					    0    0>,
+					<2632 2539 2835 2857 2767 2813 2690
+					 2827 1857 1632 2596 3068 3212 2454
+					    0    0>;
+
+				qcom,cpr-open-loop-voltage-fuse-adjustment =
+					<0 0 0 0>,
+					<8000 0 0 52000>,
+					<8000 0 0 52000>,
+					<8000 0 0 52000>,
+					<8000 0 0 52000>,
+					<8000 0 0 52000>,
+					<8000 0 0 52000>,
+					<8000 0 0 52000>;
+
+				qcom,cpr-closed-loop-voltage-fuse-adjustment =
+					<0 0 0 0>,
+					<0 0 0 50000>,
+					<0 0 0 50000>,
+					<0 0 0 50000>,
+					<0 0 0 50000>,
+					<0 0 0 50000>,
+					<0 0 0 50000>,
+					<0 0 0 50000>;
+
+				qcom,allow-voltage-interpolation;
+				qcom,allow-quotient-interpolation;
+				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+				qcom,cpr-aging-max-voltage-adjustment = <15000>;
+				qcom,cpr-aging-ref-corner = <25>;
+				qcom,cpr-aging-ro-scaling-factor = <1700>;
+				qcom,allow-aging-voltage-adjustment = <0>;
+				qcom,allow-aging-open-loop-voltage-adjustment =
+					<1>;
+			};
+		};
+	};
+
+	gfx_cpr: cpr4-ctrl@5061000 {
+		compatible = "qcom,cpr4-msm8998-v1-mmss-regulator";
+		reg =	<0x05061000 0x4000>,
+			<0x00784000 0x1000>,
+			<0x05065204 0x4>;
+		reg-names = "cpr_ctrl", "fuse_base", "aging_allowed";
+		clocks = <&clock_gpu clk_gpucc_rbcpr_clk>,
+			 <&clock_gcc clk_cnoc_clk>;
+		clock-names = "core_clk", "bus_clk";
+		interrupts = <GIC_SPI 285 IRQ_TYPE_EDGE_RISING>;
+		interrupt-names = "cpr";
+		qcom,cpr-ctrl-name = "gfx";
+
+		qcom,cpr-sensor-time = <1000>;
+		qcom,cpr-loop-time = <5000000>;
+		qcom,cpr-idle-cycles = <15>;
+		qcom,cpr-step-quot-init-min = <8>;
+		qcom,cpr-step-quot-init-max = <12>;
+		qcom,cpr-count-mode = <0>;		/* All-at-once min */
+		qcom,cpr-count-repeat = <1>;
+
+		vdd-supply = <&pm8005_s1>;
+		qcom,voltage-step = <4000>;
+		mem-acc-supply = <&gfx_mem_acc_vreg>;
+		qcom,cpr-aging-ref-voltage = <1032000>;
+		qcom,cpr-aging-allowed-reg-mask  = <0x00000003>;
+		qcom,cpr-aging-allowed-reg-value = <0x00000003>;
+
+		qcom,cpr-enable;
+
+		thread@0 {
+			qcom,cpr-thread-id = <0>;
+			qcom,cpr-consecutive-up = <0>;
+			qcom,cpr-consecutive-down = <2>;
+			qcom,cpr-up-threshold = <0>;
+			qcom,cpr-down-threshold = <2>;
+
+			gfx_vreg: regulator {
+				regulator-name = "gfx_corner";
+				regulator-min-microvolt = <1>;
+				regulator-max-microvolt = <6>;
+
+				qcom,cpr-fuse-corners = <4>;
+				qcom,cpr-fuse-combos = <8>;
+				qcom,cpr-corners = <6>;
+
+				qcom,cpr-corner-fmax-map = <1 3 5 6>;
+
+				qcom,cpr-voltage-ceiling =
+					<896000  896000  896000  896000  896000
+					1032000>,
+					<672000  740000  800000  868000  976000
+					1100000>,
+					<672000  740000  800000  868000  976000
+					1100000>,
+					<672000  740000  800000  868000  976000
+					1100000>,
+					<672000  740000  800000  868000  976000
+					1100000>,
+					<672000  740000  800000  868000  976000
+					1100000>,
+					<672000  740000  800000  868000  976000
+					1100000>,
+					<672000  740000  800000  868000  976000
+					1100000>;
+
+				qcom,cpr-voltage-floor =
+					<896000  896000  896000  896000  896000
+					 896000>,
+					<528000  528000  568000  612000  664000
+					 752000>,
+					<528000  528000  568000  612000  664000
+					 752000>,
+					<528000  528000  568000  612000  664000
+					 752000>,
+					<528000  528000  568000  612000  664000
+					 752000>,
+					<528000  528000  568000  612000  664000
+					 752000>,
+					<528000  528000  568000  612000  664000
+					 752000>,
+					<528000  528000  568000  612000  664000
+					 752000>;
+
+				qcom,mem-acc-voltage = <1 1 1 2 2 2>;
+
+				qcom,corner-frequencies =
+					<171000000 251000000 332000000
+					 403000000 504000000 650000000>;
+
+				qcom,cpr-target-quotients =
+				      <   0    0  404  478  363  411  140  176
+					105    0    0    0    0    0    0    0>,
+				      <   0    0  574  651  532  584  266  319
+					196    0    0    0    0    0    0    0>,
+				      <   0    0  743  830  693  753  389  456
+					285    0    0    0    0    0    0    0>,
+				      <   0    0  879  977  829  893  495  570
+					365    0    0    0    0    0    0    0>,
+				      <   0    0 1168 1270 1097 1150    0    0
+					  0    0    0 1406  899  805    0    0>,
+				      <1669 1757    0    0    0    0    0    0
+					  0 1359 1902 1740    0 1033    0    0>;
+
+				qcom,cpr-ro-scaling-factor =
+				      <2389 2287 2985 3112 2873 2904 2159 2399
+				       1580 1602 2158 3042 2780 2069    0    0>,
+				      <2389 2287 2985 3112 2873 2904 2159 2399
+				       1580 1602 2158 3042 2780 2069    0    0>,
+				      <2389 2287 2985 3112 2873 2904 2159 2399
+				       1580 1602 2158 3042 2780 2069    0    0>,
+				      <2389 2287 2985 3112 2873 2904 2159 2399
+				       1580 1602 2158 3042 2780 2069    0    0>,
+				      <2389 2287 2985 3112 2873 2904 2159 2399
+				       1580 1602 2158 3042 2780 2069    0    0>,
+				      <2389 2287 2985 3112 2873 2904 2159 2399
+				       1580 1602 2158 3042 2780 2069    0    0>;
+
+				qcom,cpr-open-loop-voltage-fuse-adjustment =
+					<   72000        0        0        0>,
+					<   72000        0        0        0>,
+					<   72000        0        0        0>,
+					<   72000        0        0        0>,
+					<   72000        0        0        0>,
+					<   72000        0        0        0>,
+					<   72000        0        0        0>,
+					<   72000        0        0        0>;
+
+				qcom,cpr-closed-loop-voltage-adjustment =
+					<   65000    26000     8000        0
+						0        0>,
+					<   65000    26000     8000        0
+						0        0>,
+					<   65000    26000     8000        0
+						0        0>,
+					<   65000    26000     8000        0
+						0        0>,
+					<   65000    26000     8000        0
+						0        0>,
+					<   65000    26000     8000        0
+						0        0>,
+					<   65000    26000     8000        0
+						0        0>,
+					<   65000    26000     8000        0
+						0        0>;
+
+				qcom,cpr-floor-to-ceiling-max-range =
+				       <75000 75000 75000 75000 75000 75000>;
+
+			     qcom,cpr-fused-closed-loop-voltage-adjustment-map =
+					<0 0 1 2 3 4>;
+
+				qcom,allow-voltage-interpolation;
+				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+				qcom,cpr-aging-max-voltage-adjustment = <15000>;
+				qcom,cpr-aging-ref-corner = <6>;
+				qcom,cpr-aging-ro-scaling-factor = <2950>;
+				qcom,allow-aging-voltage-adjustment = <0>;
+				qcom,allow-aging-open-loop-voltage-adjustment =
+					<1>;
+			};
+		};
+	};
+
+	gfx_mem_acc_vreg: regulator@1fcf004 {
+		compatible = "qcom,mem-acc-regulator";
+		reg = <0x01fcf004 0x4>;
+		reg-names = "acc-sel-l1";
+		regulator-name = "gfx_mem_acc_corner";
+		regulator-min-microvolt = <1>;
+		regulator-max-microvolt = <2>;
+
+		qcom,corner-acc-map = <0x1 0x0>;
+		qcom,acc-sel-l1-bit-pos = <0>;
+		qcom,acc-sel-l1-bit-size = <1>;
+	};
+};
+
+&pmi8998_charger {
+	smb2_vbus: qcom,smb2-vbus {
+		regulator-name = "smb2-vbus";
+	};
+
+	smb2_vconn: qcom,smb2-vconn {
+		regulator-name = "smb2-vconn";
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-sde-display.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-sde-display.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-sde-display.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-sde-display.dtsi	2019-01-22 16:16:21.199225542 +0100
@@ -0,0 +1,75 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+
+	sde_wb: qcom,wb-display@0 {
+		compatible = "qcom,wb-display";
+		cell-index = <0>;
+		label = "wb_display";
+	};
+
+	msm_ext_disp: qcom,msm_ext_disp {
+		compatible = "qcom,msm-ext-disp";
+
+		ext_disp_audio_codec: qcom,msm-ext-disp-audio-codec-rx {
+			compatible = "qcom,msm-ext-disp-audio-codec-rx";
+			qcom,msm_ext_disp = <&msm_ext_disp>;
+		};
+	};
+
+	sde_hdmi: qcom,hdmi-display {
+		compatible = "qcom,hdmi-display";
+		label = "sde_hdmi";
+		qcom,display-type = "secondary";
+		qcom,msm_ext_disp = <&msm_ext_disp>;
+	};
+
+	sde_hdmi_cec: qcom,hdmi-cec@c9a0000 {
+		compatible = "qcom,hdmi-cec";
+		label = "sde_hdmi_cec";
+		qcom,hdmi-dev = <&sde_hdmi>;
+		interrupt-parent = <&sde_hdmi_tx>;
+		interrupts = <1 0>;
+
+		reg = <0xc9a0000 0x50c>;
+		reg-names = "hdmi_cec";
+
+		clocks = <&clock_mmss clk_mmss_mnoc_ahb_clk>,
+		       <&clock_mmss clk_mmss_mdss_ahb_clk>,
+		       <&clock_mmss clk_mmss_mdss_hdmi_clk>;
+		clock-names = "cec_mnoc_clk", "cec_iface_clk", "cec_core_clk";
+
+		pinctrl-names = "cec_active", "cec_sleep";
+		pinctrl-0 = <&mdss_hdmi_cec_active>;
+		pinctrl-1 = <&mdss_hdmi_cec_suspend>;
+
+		cec-gdsc-supply = <&gdsc_mdss>;
+		qcom,platform-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,platform-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "cec-gdsc";
+				qcom,supply-min-voltage = <0>;
+				qcom,supply-max-voltage = <0>;
+				qcom,supply-enable-load = <0>;
+				qcom,supply-disable-load = <0>;
+			};
+		};
+	};
+};
+
+&sde_kms {
+	connectors = <&sde_hdmi_tx &sde_hdmi &sde_wb>;
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-sde.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-sde.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-sde.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-sde.dtsi	2019-01-22 16:16:21.199225542 +0100
@@ -0,0 +1,245 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	sde_kms: qcom,sde_kms@c900000 {
+		compatible = "qcom,sde-kms";
+		reg = <0x0c900000 0x90000>,
+		      <0x0c9b0000 0x2008>;
+		reg-names = "mdp_phys", "vbif_phys";
+
+		/* clock and supply entries */
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			 <&clock_mmss clk_mmss_mdss_ahb_clk>,
+			 <&clock_mmss clk_mmss_mdss_axi_clk>,
+			 <&clock_mmss clk_mdp_clk_src>,
+			 <&clock_mmss clk_mmss_mdss_mdp_clk>,
+			 <&clock_mmss clk_mmss_mdss_vsync_clk>,
+			 <&clock_mmss clk_mmss_mdss_mdp_lut_clk>;
+		clock-names = "mmss_noc_axi_clk",
+					"mmss_noc_ahb_clk",
+					"mmss_smmu_ahb_clk",
+					"mmss_smmu_axi_clk",
+			    "mnoc_clk", "iface_clk", "bus_clk",
+				"core_clk_src", "core_clk", "vsync_clk",
+				"lut_clk";
+		clock-rate = <0 0 0 0 0 0 0 330000000 0 0 0 0>;
+		clock-max-rate = <0 0 0 0 0 0 412500000 412500000 0 0 0 0>;
+		qcom,sde-max-bw-low-kbps = <6700000>;
+		qcom,sde-max-bw-high-kbps = <6700000>;
+
+		/* interrupt config */
+		interrupt-parent = <&intc>;
+		interrupts = <0 83 0>;
+		interrupt-controller;
+		#interrupt-cells = <1>;
+		iommus = <&mmss_smmu 0>;
+
+		gpus = <&msm_gpu>;
+
+		/* hw blocks */
+		qcom,sde-off = <0x1000>;
+		qcom,sde-len = <0x458>;
+
+		qcom,sde-ctl-off = <0x2000 0x2200 0x2400
+				     0x2600 0x2800>;
+		qcom,sde-ctl-size = <0x94>;
+
+		qcom,sde-mixer-off = <0x45000 0x46000 0x47000
+				      0x48000 0x49000 0x4a000>;
+		qcom,sde-mixer-size = <0x31c>;
+
+		qcom,sde-dspp-off = <0x55000 0x57000>;
+		qcom,sde-dspp-size = <0x17e0>;
+
+		qcom,sde-wb-off = <0x66000>;
+		qcom,sde-wb-size = <0x2dc>;
+
+		qcom,sde-wb-id = <2>;
+		qcom,sde-wb-xin-id = <6>;
+		qcom,sde-wb-clk-ctrl = <0x2bc 0x10>;
+		qcom,sde-intf-off = <0x6b000 0x6b800
+					0x6c000 0x6c800>;
+		qcom,sde-intf-size = <0x280>;
+
+		qcom,sde-intf-type = "dp", "dsi", "dsi", "hdmi";
+
+		qcom,sde-pp-off = <0x71000 0x71800
+				0x72000 0x72800 0x73000>;
+		qcom,sde-pp-slave = <0x0 0x0 0x0 0x0 0x1>;
+
+		qcom,sde-pp-size = <0xd4>;
+
+		qcom,sde-te2-off = <0x2000 0x2000 0x0 0x0 0x0>;
+		qcom,sde-cdm-off = <0x7a200>;
+		qcom,sde-cdm-size = <0x224>;
+
+		qcom,sde-dsc-off = <0x81000 0x81400>;
+		qcom,sde-dsc-size = <0x140>;
+
+		qcom,sde-intf-max-prefetch-lines = <0x15 0x15 0x15 0x15>;
+
+		qcom,sde-sspp-type =  "vig", "vig", "vig", "vig",
+						"dma", "dma", "dma", "dma",
+						"cursor", "cursor";
+
+		qcom,sde-sspp-off = <0x5000 0x7000 0x9000 0xb000
+						0x25000 0x27000 0x29000 0x2b000
+						0x35000 0x37000>;
+		qcom,sde-sspp-src-size = <0x1ac>;
+
+		qcom,sde-sspp-xin-id = <0 4 8 12 1 5 9 13 2 10>;
+
+		/* offsets are relative to "mdp_phys + qcom,sde-off */
+		qcom,sde-sspp-clk-ctrl = <0x2ac 0x8>, <0x2b4 0x8>,
+				  <0x2c4 0x8>, <0x2c4 0xc>, <0x3a8 0x10>,
+				  <0x3b0 0x10>;
+
+		qcom,sde-qseed-type = "qseedv3";
+		qcom,sde-mixer-linewidth = <2560>;
+		qcom,sde-sspp-linewidth = <2560>;
+		qcom,sde-mixer-blendstages = <0x7>;
+		qcom,sde-highest-bank-bit = <0x2>;
+		qcom,sde-panic-per-pipe;
+		qcom,sde-has-cdp;
+		qcom,sde-has-src-split;
+		qcom,sde-sspp-danger-lut = <0x000f 0xffff 0x0000>;
+		qcom,sde-sspp-safe-lut = <0xfffc 0xff00 0xffff>;
+
+		qcom,sde-vbif-off = <0>;
+		qcom,sde-vbif-id = <0>;
+		qcom,sde-vbif-default-ot-rd-limit = <32>;
+		qcom,sde-vbif-default-ot-wr-limit = <32>;
+		qcom,sde-vbif-dynamic-ot-rd-limit = <62208000 2>,
+			<124416000 4>, <248832000 16>;
+		qcom,sde-vbif-dynamic-ot-wr-limit = <62208000 2>,
+			<124416000 4>, <248832000 16>;
+
+		vdd-supply = <&gdsc_mdss>;
+		gdsc-mmagic-mdss-supply = <&gdsc_bimc_smmu>;
+		qcom,sde-csc-type = "csc-10bit";
+
+		qcom,sde-sspp-vig-blocks {
+			qcom,sde-vig-csc-off = <0x1a00>;
+			qcom,sde-vig-qseed-off = <0xa00>;
+			qcom,sde-vig-qseed-size = <0xa0>;
+		};
+
+		qcom,platform-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,platform-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "gdsc-mmagic-mdss";
+				qcom,supply-min-voltage = <0>;
+				qcom,supply-max-voltage = <0>;
+				qcom,supply-enable-load = <0>;
+				qcom,supply-disable-load = <0>;
+			};
+
+			qcom,platform-supply-entry@1 {
+				reg = <1>;
+				qcom,supply-name = "vdd";
+				qcom,supply-min-voltage = <0>;
+				qcom,supply-max-voltage = <0>;
+				qcom,supply-enable-load = <0>;
+				qcom,supply-disable-load = <0>;
+			};
+		};
+
+		smmu_kms_unsec: qcom,smmu_kms_unsec_cb {
+			compatible = "qcom,smmu_sde_unsec";
+			iommus = <&mmss_smmu 0>;
+		};
+
+		smmu_kms_sec: qcom,smmu_kms_sec_cb {
+			compatible = "qcom,smmu_sde_sec";
+			iommus = <&mmss_smmu 1>;
+		};
+
+		/* data and reg bus scale settings */
+		qcom,sde-data-bus {
+			qcom,msm-bus,name = "mdss_sde";
+			qcom,msm-bus,num-cases = <3>;
+			qcom,msm-bus,num-paths = <2>;
+			qcom,msm-bus,vectors-KBps =
+				<22 512 0 0>, <23 512 0 0>,
+				<22 512 0 6400000>, <23 512 0 6400000>,
+				<22 512 0 6400000>, <23 512 0 6400000>;
+		};
+		qcom,sde-reg-bus {
+			qcom,msm-bus,name = "mdss_reg";
+			qcom,msm-bus,num-cases = <4>;
+			qcom,msm-bus,num-paths = <1>;
+			qcom,msm-bus,active-only;
+			qcom,msm-bus,vectors-KBps =
+				<1 590 0 0>,
+				<1 590 0 76800>,
+				<1 590 0 160000>,
+				<1 590 0 320000>;
+		};
+	};
+
+	sde_hdmi_tx: qcom,hdmi_tx_8998@c9a0000 {
+		cell-index = <0>;
+		compatible = "qcom,hdmi-tx-8998";
+		reg =	<0xc9a0000 0x50c>,
+			<0x780000 0x621c>,
+			<0xc9e0000 0x28>;
+		reg-names = "core_physical", "qfprom_physical", "hdcp_physical";
+		interrupt-parent = <&sde_kms>;
+		interrupts = <8 0>;
+		interrupt-controller;
+		#interrupt-cells = <1>;
+		qcom,hdmi-tx-ddc-clk-gpio = <&tlmm 32 0>;
+		qcom,hdmi-tx-ddc-data-gpio = <&tlmm 33 0>;
+		qcom,hdmi-tx-hpd-gpio = <&tlmm 34 0>;
+		qcom,hdmi-tx-hpd5v-gpio = <&tlmm 133 0>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&mdss_hdmi_hpd_active
+			&mdss_hdmi_ddc_active
+			&mdss_hdmi_5v_active>;
+		pinctrl-1 = <&mdss_hdmi_hpd_suspend
+			&mdss_hdmi_ddc_suspend
+			&mdss_hdmi_5v_suspend>;
+		hpd-gdsc-supply = <&gdsc_mdss>;
+		qcom,supply-names = "hpd-gdsc";
+		qcom,min-voltage-level = <0>;
+		qcom,max-voltage-level = <0>;
+		qcom,enable-load = <0>;
+		qcom,disable-load = <0>;
+
+		clocks = <&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			 <&clock_mmss clk_mmss_mdss_ahb_clk>,
+			 <&clock_mmss clk_mmss_mdss_hdmi_clk>,
+			 <&clock_mmss clk_mmss_mdss_mdp_clk>,
+			 <&clock_mmss clk_mmss_mdss_hdmi_dp_ahb_clk>,
+			 <&clock_mmss clk_mmss_mdss_extpclk_clk>,
+			 <&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			 <&clock_mmss clk_mmss_misc_ahb_clk>,
+			 <&clock_mmss clk_mmss_mdss_axi_clk>;
+		clock-names = "hpd_mnoc_clk", "hpd_iface_clk",
+				"hpd_core_clk", "hpd_mdp_core_clk",
+				"hpd_alt_iface_clk", "core_extp_clk",
+				"mnoc_clk","hpd_misc_ahb_clk",
+				"hpd_bus_clk";
+
+		/*qcom,mdss-fb-map = <&mdss_fb2>;*/
+		qcom,pluggable;
+	};
+};
+#include "msm8998-sde-display.dtsi"
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-smp2p.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-smp2p.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-smp2p.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-smp2p.dtsi	2019-01-22 16:16:21.199225542 +0100
@@ -0,0 +1,267 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+&soc {
+	qcom,smp2p-modem@17911008 {
+		compatible = "qcom,smp2p";
+		reg = <0x17911008 0x4>;
+		qcom,remote-pid = <1>;
+		qcom,irq-bitmask = <0x4000>;
+		interrupts = <0 451 1>;
+	};
+
+	qcom,smp2p-adsp@17911008 {
+		compatible = "qcom,smp2p";
+		reg = <0x17911008 0x4>;
+		qcom,remote-pid = <2>;
+		qcom,irq-bitmask = <0x400>;
+		interrupts = <0 158 1>;
+	};
+
+	qcom,smp2p-dsps@17911008 {
+		compatible = "qcom,smp2p";
+		reg = <0x17911008 0x4>;
+		qcom,remote-pid = <3>;
+		qcom,irq-bitmask = <0x4000000>;
+		interrupts = <0 178 1>;
+	};
+
+	smp2pgpio_smp2p_15_in: qcom,smp2pgpio-smp2p-15-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <15>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_15_in {
+		compatible = "qcom,smp2pgpio_test_smp2p_15_in";
+		gpios = <&smp2pgpio_smp2p_15_in 0 0>;
+	};
+
+	smp2pgpio_smp2p_15_out: qcom,smp2pgpio-smp2p-15-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <15>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_15_out {
+		compatible = "qcom,smp2pgpio_test_smp2p_15_out";
+		gpios = <&smp2pgpio_smp2p_15_out 0 0>;
+	};
+
+	smp2pgpio_smp2p_1_in: qcom,smp2pgpio-smp2p-1-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <1>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_1_in {
+		compatible = "qcom,smp2pgpio_test_smp2p_1_in";
+		gpios = <&smp2pgpio_smp2p_1_in 0 0>;
+	};
+
+	smp2pgpio_smp2p_1_out: qcom,smp2pgpio-smp2p-1-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <1>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_1_out {
+		compatible = "qcom,smp2pgpio_test_smp2p_1_out";
+		gpios = <&smp2pgpio_smp2p_1_out 0 0>;
+	};
+
+	smp2pgpio_smp2p_2_in: qcom,smp2pgpio-smp2p-2-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <2>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_2_in {
+		compatible = "qcom,smp2pgpio_test_smp2p_2_in";
+		gpios = <&smp2pgpio_smp2p_2_in 0 0>;
+	};
+
+	smp2pgpio_smp2p_2_out: qcom,smp2pgpio-smp2p-2-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <2>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_2_out {
+		compatible = "qcom,smp2pgpio_test_smp2p_2_out";
+		gpios = <&smp2pgpio_smp2p_2_out 0 0>;
+	};
+
+	smp2pgpio_smp2p_3_in: qcom,smp2pgpio-smp2p-3-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <3>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_3_in {
+		compatible = "qcom,smp2pgpio_test_smp2p_3_in";
+		gpios = <&smp2pgpio_smp2p_3_in 0 0>;
+	};
+
+	smp2pgpio_smp2p_3_out: qcom,smp2pgpio-smp2p-3-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <3>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	/* ssr - inbound entry from mss */
+	smp2pgpio_ssr_smp2p_1_in: qcom,smp2pgpio-ssr-smp2p-1-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "slave-kernel";
+		qcom,remote-pid = <1>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	/* ssr - outbound entry to mss */
+	smp2pgpio_ssr_smp2p_1_out: qcom,smp2pgpio-ssr-smp2p-1-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "master-kernel";
+		qcom,remote-pid = <1>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	/* ssr - inbound entry from lpass */
+	smp2pgpio_ssr_smp2p_2_in: qcom,smp2pgpio-ssr-smp2p-2-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "slave-kernel";
+		qcom,remote-pid = <2>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	/* ssr - outbound entry to lpass */
+	smp2pgpio_ssr_smp2p_2_out: qcom,smp2pgpio-ssr-smp2p-2-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "master-kernel";
+		qcom,remote-pid = <2>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	/* ssr - inbound entry from ssc */
+	smp2pgpio_ssr_smp2p_3_in: qcom,smp2pgpio-ssr-smp2p-3-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "slave-kernel";
+		qcom,remote-pid = <3>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	/* ssr - outbound entry to ssc */
+	smp2pgpio_ssr_smp2p_3_out: qcom,smp2pgpio-ssr-smp2p-3-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "master-kernel";
+		qcom,remote-pid = <3>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_3_out {
+		compatible = "qcom,smp2pgpio_test_smp2p_3_out";
+		gpios = <&smp2pgpio_smp2p_3_out 0 0>;
+	};
+
+	smp2pgpio_sleepstate_3_out: qcom,smp2pgpio-sleepstate-gpio-3-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "sleepstate";
+		qcom,remote-pid = <3>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio-sleepstate-3-out {
+		compatible = "qcom,smp2pgpio_sleepstate_3_out";
+		gpios = <&smp2pgpio_sleepstate_3_out 0 0>;
+	};
+
+	/* ipa - outbound entry to mss */
+	smp2pgpio_ipa_1_out: qcom,smp2pgpio-ipa-1-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "ipa";
+		qcom,remote-pid = <1>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	/* ipa - inbound entry from mss */
+	smp2pgpio_ipa_1_in: qcom,smp2pgpio-ipa-1-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "ipa";
+		qcom,remote-pid = <1>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-v2.1.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-v2.1.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-v2.1.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-v2.1.dtsi	2019-01-22 16:16:21.199225542 +0100
@@ -0,0 +1,18 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8998-v2.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. MSM8998 v2.1";
+	qcom,msm-id = <292 0x20001>;
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-v2-camera.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-v2-camera.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-v2-camera.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-v2-camera.dtsi	2019-01-22 16:16:21.199225542 +0100
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	qcom,csiphy@ca34000 {
+		cell-index = <0>;
+		compatible = "qcom,csiphy-v5.01", "qcom,csiphy";
+		reg = <0xca34000 0x1000>;
+		reg-names = "csiphy";
+		interrupts = <0 78 0>;
+		interrupt-names = "csiphy";
+		gdscr-supply = <&gdsc_camss_top>;
+		bimc_smmu-supply = <&gdsc_bimc_smmu>;
+		qcom,cam-vreg-name = "gdscr", "bimc_smmu";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_csi0_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi0_clk>,
+			<&clock_mmss clk_mmss_camss_cphy_csid0_clk>,
+			<&clock_mmss clk_csi0phytimer_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi0phytimer_clk>,
+			<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
+			<&clock_mmss clk_csiphy_clk_src>,
+			<&clock_mmss clk_mmss_camss_csiphy0_clk>;
+		clock-names = "mmssnoc_axi", "mnoc_ahb",
+			"bmic_smmu_ahb", "bmic_smmu_axi",
+			"camss_ahb_clk", "camss_top_ahb_clk",
+			"csi_src_clk", "csi_clk", "cphy_csid_clk",
+			"csiphy_timer_src_clk", "csiphy_timer_clk",
+			"camss_ispif_ahb_clk", "csiphy_clk_src", "csiphy_clk";
+		qcom,clock-rates = <0 0 0 0 0 0 274290000 0 0 200000000 0
+			0 274290000 0>;
+		status = "ok";
+	};
+
+	qcom,csiphy@ca35000 {
+		cell-index = <1>;
+		compatible = "qcom,csiphy-v5.01", "qcom,csiphy";
+		reg = <0xca35000 0x1000>;
+		reg-names = "csiphy";
+		interrupts = <0 79 0>;
+		interrupt-names = "csiphy";
+		gdscr-supply = <&gdsc_camss_top>;
+		bimc_smmu-supply = <&gdsc_bimc_smmu>;
+		qcom,cam-vreg-name = "gdscr", "bimc_smmu";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_csi1_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi1_clk>,
+			<&clock_mmss clk_mmss_camss_cphy_csid1_clk>,
+			<&clock_mmss clk_csi1phytimer_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi1phytimer_clk>,
+			<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
+			<&clock_mmss clk_csiphy_clk_src>,
+			<&clock_mmss clk_mmss_camss_csiphy1_clk>;
+		clock-names = "mmssnoc_axi", "mnoc_ahb",
+			"bmic_smmu_ahb", "bmic_smmu_axi",
+			"camss_ahb_clk", "camss_top_ahb_clk",
+			"csi_src_clk", "csi_clk", "cphy_csid_clk",
+			"csiphy_timer_src_clk", "csiphy_timer_clk",
+			"camss_ispif_ahb_clk", "csiphy_clk_src", "csiphy_clk";
+		qcom,clock-rates = <0 0 0 0 0 0 274290000 0 0 200000000 0
+			0 274290000 0>;
+		status = "ok";
+	};
+
+	qcom,csiphy@ca36000 {
+		cell-index = <2>;
+		compatible = "qcom,csiphy-v5.01", "qcom,csiphy";
+		reg = <0xca36000 0x1000>;
+		reg-names = "csiphy";
+		interrupts = <0 80 0>;
+		interrupt-names = "csiphy";
+		gdscr-supply = <&gdsc_camss_top>;
+		bimc_smmu-supply = <&gdsc_bimc_smmu>;
+		qcom,cam-vreg-name = "gdscr", "bimc_smmu";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_csi2_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi2_clk>,
+			<&clock_mmss clk_mmss_camss_cphy_csid2_clk>,
+			<&clock_mmss clk_csi2phytimer_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi2phytimer_clk>,
+			<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
+			<&clock_mmss clk_csiphy_clk_src>,
+			<&clock_mmss clk_mmss_camss_csiphy2_clk>;
+		clock-names = "mmssnoc_axi", "mnoc_ahb",
+			"bmic_smmu_ahb", "bmic_smmu_axi",
+			"camss_ahb_clk", "camss_top_ahb_clk",
+			"csi_src_clk", "csi_clk", "cphy_csid_clk",
+			"csiphy_timer_src_clk", "csiphy_timer_clk",
+			"camss_ispif_ahb_clk", "csiphy_clk_src", "csiphy_clk";
+		qcom,clock-rates = <0 0 0 0 0 0 274290000 0 0 200000000 0
+			0 274290000 0>;
+		status = "ok";
+	};
+
+	qcom,cpp@ca04000 {
+		cell-index = <0>;
+		compatible = "qcom,cpp";
+		reg = <0xca04000 0x100>,
+			<0xca80000 0x3000>,
+			<0xca18000 0x3000>,
+			<0xc8c36d4 0x4>;
+		reg-names = "cpp", "cpp_vbif", "cpp_hw", "camss_cpp";
+		interrupts = <0 294 0>;
+		interrupt-names = "cpp";
+		smmu-vdd-supply = <&gdsc_bimc_smmu>;
+		camss-vdd-supply = <&gdsc_camss_top>;
+		vdd-supply = <&gdsc_cpp>;
+		qcom,vdd-names = "smmu-vdd", "camss-vdd", "vdd";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_cpp_clk_src>,
+			<&clock_mmss clk_mmss_camss_cpp_clk>,
+			<&clock_mmss clk_mmss_camss_cpp_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_cpp_axi_clk>,
+			<&clock_mmss clk_mmss_camss_micro_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_cpp_vbif_ahb_clk>;
+		clock-names = "mmssnoc_axi_clk",
+			"mnoc_ahb_clk",
+			"camss_ahb_clk", "camss_top_ahb_clk",
+			"cpp_src_clk",
+			"cpp_core_clk", "camss_cpp_ahb_clk",
+			"camss_cpp_axi_clk", "micro_iface_clk",
+			"mmss_smmu_axi_clk", "cpp_vbif_ahb_clk";
+		qcom,clock-rates = <0 0 0 0 200000000 200000000 0 0 0 0 0>;
+		qcom,min-clock-rate = <200000000>;
+		qcom,bus-master = <1>;
+		qcom,vbif-qos-setting = <0x20 0x10000000>,
+			<0x24 0x10000000>,
+			<0x28 0x10000000>,
+			<0x2C 0x10000000>;
+		status = "ok";
+		qcom,msm-bus,name = "msm_camera_cpp";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<106 512 0 0>,
+			<106 512 0 0>;
+		qcom,msm-bus-vector-dyn-vote;
+		resets = <&clock_mmss CAMSS_MICRO_BCR>;
+		reset-names = "micro_iface_reset";
+		qcom,src-clock-rates = <100000000 200000000 384000000 404000000
+			480000000 576000000 600000000>;
+		qcom,micro-reset;
+		qcom,cpp-fw-payload-info {
+			qcom,stripe-base = <790>;
+			qcom,plane-base = <715>;
+			qcom,stripe-size = <63>;
+			qcom,plane-size = <25>;
+			qcom,fe-ptr-off = <11>;
+			qcom,we-ptr-off = <23>;
+			qcom,ref-fe-ptr-off = <17>;
+			qcom,ref-we-ptr-off = <36>;
+			qcom,we-meta-ptr-off = <42>;
+			qcom,fe-mmu-pf-ptr-off = <7>;
+			qcom,ref-fe-mmu-pf-ptr-off = <10>;
+			qcom,we-mmu-pf-ptr-off = <13>;
+			qcom,dup-we-mmu-pf-ptr-off = <18>;
+			qcom,ref-we-mmu-pf-ptr-off = <23>;
+			qcom,set-group-buffer-len = <135>;
+			qcom,dup-frame-indicator-off = <70>;
+		};
+	};
+};
+
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-v2.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-v2.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-v2.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-v2.dtsi	2019-01-22 16:16:21.203225578 +0100
@@ -0,0 +1,1312 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * As a general rule, only version-specific property overrides should be placed
+ * inside this file. Common device definitions should be placed inside the
+ * msm8998.dtsi file.
+ */
+
+#include "msm8998.dtsi"
+#include "msm8998-v2-camera.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. MSM8998 v2";
+	qcom,msm-id = <292 0x20000>;
+};
+
+&clock_cpu {
+	compatible = "qcom,cpu-clock-osm-msm8998-v2";
+	reg = <0x179c0000 0x4000>,
+	      <0x17916000 0x1000>,
+	      <0x17816000 0x1000>,
+	      <0x179d1000 0x1000>,
+	      <0x17914800 0x800>,
+	      <0x17814800 0x800>,
+	      <0x00784130 0x8>,
+	      <0x1791101c 0x8>;
+	reg-names = "osm", "pwrcl_pll", "perfcl_pll",
+		    "apcs_common", "pwrcl_acd", "perfcl_acd",
+		    "perfcl_efuse", "debug";
+
+	qcom,acdtd-val = <0x00009611 0x00009611>;
+	qcom,acdcr-val = <0x002b5ffd 0x002b5ffd>;
+	qcom,acdsscr-val = <0x00000501 0x00000501>;
+	qcom,acdextint0-val = <0x2cf9ae8 0x2cf9ae8>;
+	qcom,acdextint1-val = <0x2cf9afe 0x2cf9afe>;
+	qcom,acdautoxfer-val = <0x00000015 0x00000015>;
+	qcom,pwrcl-apcs-mem-acc-threshold-voltage = <852000>;
+	qcom,perfcl-apcs-mem-acc-threshold-voltage = <852000>;
+	qcom,apm-threshold-voltage = <800000>;
+
+	/delete-property/ qcom,llm-sw-overr;
+	qcom,pwrcl-speedbin0-v0 =
+		<   300000000 0x0004000f 0x01200020 0x1 1 >,
+		<   364800000 0x05040013 0x01200020 0x1 2 >,
+		<   441600000 0x05040017 0x02200020 0x1 3 >,
+		<   518400000 0x0504001b 0x02200020 0x1 4 >,
+		<   595200000 0x0504001f 0x02200020 0x1 5 >,
+		<   672000000 0x05040023 0x03200020 0x1 6 >,
+		<   748800000 0x05040027 0x03200020 0x1 7 >,
+		<   825600000 0x0404002b 0x03220022 0x1 8 >,
+		<   883200000 0x0404002e 0x04250025 0x1 9 >,
+		<   960000000 0x04040032 0x04280028 0x1 10 >,
+		<  1036800000 0x04040036 0x042b002b 0x1 11 >,
+		<  1094400000 0x04040039 0x052e002e 0x2 12 >,
+		<  1171200000 0x0404003d 0x05310031 0x2 13 >,
+		<  1248000000 0x04040041 0x05340034 0x2 14 >,
+		<  1324800000 0x04040045 0x06370037 0x2 15 >,
+		<  1401600000 0x04040049 0x063a003a 0x2 16 >,
+		<  1478400000 0x0404004d 0x073e003e 0x2 17 >,
+		<  1555200000 0x04040051 0x07410041 0x2 18 >,
+		<  1670400000 0x04040057 0x08460046 0x2 19 >,
+		<  1747200000 0x0404005b 0x08490049 0x2 20 >,
+		<  1824000000 0x0404005f 0x084c004c 0x3 21 >,
+		<  1900800000 0x04040063 0x094f004f 0x3 22 >;
+
+	qcom,perfcl-speedbin0-v0 =
+		<   300000000 0x0004000f 0x01200020 0x1 1 >,
+		<   345600000 0x05040012 0x01200020 0x1 2 >,
+		<   422400000 0x05040016 0x02200020 0x1 3 >,
+		<   499200000 0x0504001a 0x02200020 0x1 4 >,
+		<   576000000 0x0504001e 0x02200020 0x1 5 >,
+		<   652800000 0x05040022 0x03200020 0x1 6 >,
+		<   729600000 0x05040026 0x03200020 0x1 7 >,
+		<   806400000 0x0504002a 0x03220022 0x1 8 >,
+		<   902400000 0x0404002f 0x04260026 0x1 9 >,
+		<   979200000 0x04040033 0x04290029 0x1 10 >,
+		<  1056000000 0x04040037 0x052c002c 0x1 11 >,
+		<  1132800000 0x0404003b 0x052f002f 0x1 12 >,
+		<  1190400000 0x0404003e 0x05320032 0x2 13 >,
+		<  1267200000 0x04040042 0x06350035 0x2 14 >,
+		<  1344000000 0x04040046 0x06380038 0x2 15 >,
+		<  1420800000 0x0404004a 0x063b003b 0x2 16 >,
+		<  1497600000 0x0404004e 0x073e003e 0x2 17 >,
+		<  1574400000 0x04040052 0x07420042 0x2 18 >,
+		<  1651200000 0x04040056 0x07450045 0x2 19 >,
+		<  1728000000 0x0404005a 0x08480048 0x2 20 >,
+		<  1804800000 0x0404005e 0x084b004b 0x2 21 >,
+		<  1881600000 0x04040062 0x094e004e 0x2 22 >,
+		<  1958400000 0x04040066 0x09520052 0x2 23 >,
+		<  2035200000 0x0404006a 0x09550055 0x3 24 >,
+		<  2112000000 0x0404006e 0x0a580058 0x3 25 >,
+		<  2208000000 0x04040073 0x0a5c005c 0x3 26 >,
+		<  2265600000 0x04010076 0x0a5e005e 0x3 26 >,
+		<  2265600000 0x04040076 0x0a5e005e 0x3 27 >,
+		<  2342400000 0x0401007a 0x0a620062 0x3 27 >,
+		<  2342400000 0x0404007a 0x0a620062 0x3 28 >,
+		<  2419200000 0x0401007e 0x0a650065 0x3 28 >,
+		<  2419200000 0x0404007e 0x0a650065 0x3 29 >,
+		<  2496000000 0x04010082 0x0a680068 0x3 29 >,
+		<  2457600000 0x04040080 0x0a660066 0x3 30 >,
+		<  2553600000 0x04010085 0x0a6a006a 0x3 30 >,
+		<  2476800000 0x04040081 0x0a670067 0x3 31 >,
+		<  2572800000 0x04010086 0x0a6b006b 0x3 31 >,
+		<  2496000000 0x04040082 0x0a680068 0x3 32 >,
+		<  2592000000 0x04010087 0x0a6c006c 0x3 32 >;
+
+	qcom,perfcl-speedbin1-v0 =
+		<   300000000 0x0004000f 0x01200020 0x1 1 >,
+		<   345600000 0x05040012 0x01200020 0x1 2 >,
+		<   422400000 0x05040016 0x02200020 0x1 3 >,
+		<   499200000 0x0504001a 0x02200020 0x1 4 >,
+		<   576000000 0x0504001e 0x02200020 0x1 5 >,
+		<   652800000 0x05040022 0x03200020 0x1 6 >,
+		<   729600000 0x05040026 0x03200020 0x1 7 >,
+		<   806400000 0x0504002a 0x03220022 0x1 8 >,
+		<   902400000 0x0404002f 0x04260026 0x1 9 >,
+		<   979200000 0x04040033 0x04290029 0x1 10 >,
+		<  1056000000 0x04040037 0x052c002c 0x1 11 >,
+		<  1132800000 0x0404003b 0x052f002f 0x1 12 >,
+		<  1190400000 0x0404003e 0x05320032 0x2 13 >,
+		<  1267200000 0x04040042 0x06350035 0x2 14 >,
+		<  1344000000 0x04040046 0x06380038 0x2 15 >,
+		<  1420800000 0x0404004a 0x063b003b 0x2 16 >,
+		<  1497600000 0x0404004e 0x073e003e 0x2 17 >,
+		<  1574400000 0x04040052 0x07420042 0x2 18 >,
+		<  1651200000 0x04040056 0x07450045 0x2 19 >,
+		<  1728000000 0x0404005a 0x08480048 0x2 20 >,
+		<  1804800000 0x0404005e 0x084b004b 0x2 21 >,
+		<  1881600000 0x04040062 0x094e004e 0x2 22 >,
+		<  1958400000 0x04040066 0x09520052 0x2 23 >,
+		<  2035200000 0x0404006a 0x09550055 0x3 24 >,
+		<  2112000000 0x0404006e 0x0a580058 0x3 25 >,
+		<  2208000000 0x04040073 0x0a5c005c 0x3 26 >,
+		<  2304000000 0x04010078 0x0a600060 0x3 26 >;
+
+	qcom,perfcl-speedbin2-v0 =
+		<   300000000 0x0004000f 0x01200020 0x1 1 >,
+		<   345600000 0x05040012 0x01200020 0x1 2 >,
+		<   422400000 0x05040016 0x02200020 0x1 3 >,
+		<   499200000 0x0504001a 0x02200020 0x1 4 >,
+		<   576000000 0x0504001e 0x02200020 0x1 5 >,
+		<   652800000 0x05040022 0x03200020 0x1 6 >,
+		<   729600000 0x05040026 0x03200020 0x1 7 >,
+		<   806400000 0x0504002a 0x03220022 0x1 8 >,
+		<   902400000 0x0404002f 0x04260026 0x1 9 >,
+		<   979200000 0x04040033 0x04290029 0x1 10 >,
+		<  1056000000 0x04040037 0x052c002c 0x1 11 >,
+		<  1132800000 0x0404003b 0x052f002f 0x1 12 >,
+		<  1190400000 0x0404003e 0x05320032 0x2 13 >,
+		<  1267200000 0x04040042 0x06350035 0x2 14 >,
+		<  1344000000 0x04040046 0x06380038 0x2 15 >,
+		<  1420800000 0x0404004a 0x063b003b 0x2 16 >,
+		<  1497600000 0x0404004e 0x073e003e 0x2 17 >,
+		<  1574400000 0x04040052 0x07420042 0x2 18 >,
+		<  1651200000 0x04040056 0x07450045 0x2 19 >,
+		<  1728000000 0x0404005a 0x08480048 0x2 20 >,
+		<  1804800000 0x0404005e 0x084b004b 0x2 21 >,
+		<  1881600000 0x04040062 0x094e004e 0x2 22 >,
+		<  1958400000 0x04040066 0x09520052 0x2 23 >,
+		<  2035200000 0x0404006a 0x09550055 0x3 24 >,
+		<  2112000000 0x0404006e 0x0a580058 0x3 25 >,
+		<  2208000000 0x04040073 0x0a5c005c 0x3 26 >,
+		<  2265600000 0x04010076 0x0a5e005e 0x3 26 >,
+		<  2265600000 0x04040076 0x0a5e005e 0x3 27 >,
+		<  2342400000 0x0401007a 0x0a620062 0x3 27 >,
+		<  2323200000 0x04040079 0x0a610061 0x3 28 >,
+		<  2419200000 0x0401007e 0x0a650065 0x3 28 >,
+		<  2342400000 0x0404007a 0x0a620062 0x3 29 >,
+		<  2438400000 0x0401007f 0x0a660066 0x3 29 >,
+		<  2361600000 0x0404007b 0x0a620062 0x3 30 >,
+		<  2457600000 0x04010080 0x0a660066 0x3 30 >;
+
+	qcom,perfcl-speedbin3-v0 =
+		<   300000000 0x0004000f 0x01200020 0x1 1 >,
+		<   345600000 0x05040012 0x01200020 0x1 2 >,
+		<   422400000 0x05040016 0x02200020 0x1 3 >,
+		<   499200000 0x0504001a 0x02200020 0x1 4 >,
+		<   576000000 0x0504001e 0x02200020 0x1 5 >,
+		<   652800000 0x05040022 0x03200020 0x1 6 >,
+		<   729600000 0x05040026 0x03200020 0x1 7 >,
+		<   806400000 0x0504002a 0x03220022 0x1 8 >,
+		<   902400000 0x0404002f 0x04260026 0x1 9 >,
+		<   979200000 0x04040033 0x04290029 0x1 10 >,
+		<  1056000000 0x04040037 0x052c002c 0x1 11 >,
+		<  1132800000 0x0404003b 0x052f002f 0x1 12 >,
+		<  1190400000 0x0404003e 0x05320032 0x2 13 >,
+		<  1267200000 0x04040042 0x06350035 0x2 14 >,
+		<  1344000000 0x04040046 0x06380038 0x2 15 >,
+		<  1420800000 0x0404004a 0x063b003b 0x2 16 >,
+		<  1497600000 0x0404004e 0x073e003e 0x2 17 >,
+		<  1574400000 0x04040052 0x07420042 0x2 18 >,
+		<  1651200000 0x04040056 0x07450045 0x2 19 >,
+		<  1728000000 0x0404005a 0x08480048 0x2 20 >,
+		<  1804800000 0x0404005e 0x084b004b 0x2 21 >,
+		<  1881600000 0x04040062 0x094e004e 0x2 22 >,
+		<  1958400000 0x04040066 0x09520052 0x2 23 >,
+		<  2035200000 0x0404006a 0x09550055 0x3 24 >,
+		<  2112000000 0x0404006e 0x0a580058 0x3 25 >,
+		<  2208000000 0x04040073 0x0a5c005c 0x3 26 >,
+		<  2265600000 0x04010076 0x0a5e005e 0x3 26 >,
+		<  2265600000 0x04040076 0x0a5e005e 0x3 27 >,
+		<  2342400000 0x0401007a 0x0a620062 0x3 27 >,
+		<  2323200000 0x04040079 0x0a610061 0x3 28 >,
+		<  2419200000 0x0401007e 0x0a650065 0x3 28 >,
+		<  2342400000 0x0404007a 0x0a620062 0x3 29 >,
+		<  2438400000 0x0401007f 0x0a660066 0x3 29 >,
+		<  2361600000 0x0404007b 0x0a620062 0x3 30 >,
+		<  2457600000 0x04010080 0x0a660066 0x3 30 >;
+};
+
+&msm_cpufreq {
+	qcom,cpufreq-table-0 =
+		<   300000 >,
+		<   364800 >,
+		<   441600 >,
+		<   518400 >,
+		<   595200 >,
+		<   672000 >,
+		<   748800 >,
+		<   825600 >,
+		<   883200 >,
+		<   960000 >,
+		<  1036800 >,
+		<  1094400 >,
+		<  1171200 >,
+		<  1248000 >,
+		<  1324800 >,
+		<  1401600 >,
+		<  1478400 >,
+		<  1555200 >,
+		<  1670400 >,
+		<  1747200 >,
+		<  1824000 >,
+		<  1900800 >;
+
+	qcom,cpufreq-table-4 =
+		<   300000 >,
+		<   345600 >,
+		<   422400 >,
+		<   499200 >,
+		<   576000 >,
+		<   652800 >,
+		<   729600 >,
+		<   806400 >,
+		<   902400 >,
+		<   979200 >,
+		<  1056000 >,
+		<  1132800 >,
+		<  1190400 >,
+		<  1267200 >,
+		<  1344000 >,
+		<  1420800 >,
+		<  1497600 >,
+		<  1574400 >,
+		<  1651200 >,
+		<  1728000 >,
+		<  1804800 >,
+		<  1881600 >,
+		<  1958400 >,
+		<  2035200 >,
+		<  2112000 >,
+		<  2208000 >,
+		<  2265600 >,
+		<  2304000 >,
+		<  2323200 >,
+		<  2342400 >,
+		<  2361600 >,
+		<  2419200 >,
+		<  2457600 >,
+		<  2476800 >,
+		<  2496000 >,
+		<  2592000 >;
+};
+
+&bwmon {
+	compatible = "qcom,bimc-bwmon4";
+	qcom,hw-timer-hz = <19200000>;
+};
+
+&devfreq_cpufreq {
+	mincpubw-cpufreq {
+		cpu-to-dev-map-0 =
+			< 1900800 1525 >;
+		cpu-to-dev-map-4 =
+			< 2112000 1525 >,
+			< 2342400 5195 >,
+			< 2496000 13763 >;
+		};
+};
+
+&devfreq_memlat_0 {
+	qcom,core-dev-table =
+		<  300000 1525 >,
+		<  595200 3143 >,
+		< 1324800 4173 >,
+		< 1555200 5859 >,
+		< 1747200 5859 >,
+		< 1900800 7759 >;
+};
+
+&devfreq_memlat_4 {
+	qcom,core-dev-table =
+		<  576000  3143 >,
+		< 1132800  4173 >,
+		< 1344000  5859 >,
+		< 1728000  7759 >,
+		< 1958400 11863 >,
+		< 2208000 13763 >;
+};
+&clock_gcc {
+	compatible = "qcom,gcc-8998-v2";
+};
+
+&clock_mmss {
+	compatible = "qcom,mmsscc-8998-v2";
+};
+
+&clock_gpu {
+	compatible = "qcom,gpucc-8998-v2";
+};
+
+&clock_gfx {
+	compatible = "qcom,gfxcc-8998-v2";
+	qcom,gfxfreq-speedbin0 =
+		<         0 0                           0 >,
+		< 180000000 1 RPM_SMD_REGULATOR_LEVEL_SVS >,
+		< 257000000 2 RPM_SMD_REGULATOR_LEVEL_SVS >,
+		< 342000000 3 RPM_SMD_REGULATOR_LEVEL_SVS >,
+		< 414000000 4 RPM_SMD_REGULATOR_LEVEL_SVS >,
+		< 515000000 5 RPM_SMD_REGULATOR_LEVEL_NOM >,
+		< 596000000 6 RPM_SMD_REGULATOR_LEVEL_NOM >,
+		< 670000000 7 RPM_SMD_REGULATOR_LEVEL_TURBO >,
+		< 710000000 8 RPM_SMD_REGULATOR_LEVEL_TURBO >;
+	qcom,gfxfreq-mx-speedbin0 =
+		<         0                           0 >,
+		< 180000000 RPM_SMD_REGULATOR_LEVEL_SVS >,
+		< 257000000 RPM_SMD_REGULATOR_LEVEL_SVS >,
+		< 342000000 RPM_SMD_REGULATOR_LEVEL_SVS >,
+		< 414000000 RPM_SMD_REGULATOR_LEVEL_SVS >,
+		< 515000000 RPM_SMD_REGULATOR_LEVEL_NOM >,
+		< 596000000 RPM_SMD_REGULATOR_LEVEL_NOM >,
+		< 670000000 RPM_SMD_REGULATOR_LEVEL_TURBO >,
+		< 710000000 RPM_SMD_REGULATOR_LEVEL_TURBO >;
+};
+
+&pm8998_s10 {
+	regulator-min-microvolt = <568000>;
+	regulator-max-microvolt = <1056000>;
+};
+
+&pm8998_s13 {
+	regulator-min-microvolt = <568000>;
+	regulator-max-microvolt = <1136000>;
+};
+
+&pcie0 {
+	qcom,phy-sequence = <0x804 0x01 0x00
+				0x034 0x14 0x00
+				0x138 0x30 0x00
+				0x048 0x0f 0x00
+				0x15c 0x06 0x00
+				0x090 0x01 0x00
+				0x088 0x20 0x00
+				0x0f0 0x00 0x00
+				0x0f8 0x01 0x00
+				0x0f4 0xc9 0x00
+				0x11c 0xff 0x00
+				0x120 0x3f 0x00
+				0x164 0x01 0x00
+				0x154 0x00 0x00
+				0x148 0x0a 0x00
+				0x05C 0x19 0x00
+				0x038 0x90 0x00
+				0x0b0 0x82 0x00
+				0x0c0 0x03 0x00
+				0x0bc 0x55 0x00
+				0x0b8 0x55 0x00
+				0x0a0 0x00 0x00
+				0x09c 0x0d 0x00
+				0x098 0x04 0x00
+				0x13c 0x00 0x00
+				0x060 0x08 0x00
+				0x068 0x16 0x00
+				0x070 0x34 0x00
+				0x15c 0x06 0x00
+				0x138 0x33 0x00
+				0x03c 0x02 0x00
+				0x040 0x07 0x00
+				0x080 0x04 0x00
+				0x0dc 0x00 0x00
+				0x0d8 0x3f 0x00
+				0x00c 0x09 0x00
+				0x010 0x01 0x00
+				0x01c 0x40 0x00
+				0x020 0x01 0x00
+				0x014 0x02 0x00
+				0x018 0x00 0x00
+				0x024 0x7e 0x00
+				0x028 0x15 0x00
+				0x244 0x02 0x00
+				0x2a4 0x12 0x00
+				0x260 0x10 0x00
+				0x28c 0x06 0x00
+				0x504 0x03 0x00
+				0x500 0x1c 0x00
+				0x50c 0x14 0x00
+				0x4d4 0x0a 0x00
+				0x4d8 0x04 0x00
+				0x4dc 0x1a 0x00
+				0x434 0x4b 0x00
+				0x414 0x04 0x00
+				0x40c 0x04 0x00
+				0x4f8 0x00 0x00
+				0x4fc 0x80 0x00
+				0x51c 0x40 0x00
+				0x444 0x71 0x00
+				0x43c 0x40 0x00
+				0x854 0x04 0x00
+				0x62c 0x52 0x00
+				0x9ac 0x00 0x00
+				0x8a0 0x01 0x00
+				0x9e0 0x00 0x00
+				0x9dc 0x20 0x00
+				0x9a8 0x00 0x00
+				0x8a4 0x01 0x00
+				0x8a8 0x73 0x00
+				0x9d8 0x99 0x00
+				0x9b0 0x03 0x00
+				0x804 0x03 0x00
+				0x800 0x00 0x00
+				0x808 0x03 0x00>;
+};
+
+&apc0_cpr {
+	compatible = "qcom,cprh-msm8998-v2-kbss-regulator";
+	qcom,cpr-corner-switch-delay-time = <1042>;
+	qcom,cpr-aging-ref-voltage = <1056000>;
+	qcom,apm-threshold-voltage = <800000>;
+	qcom,apm-hysteresis-voltage = <0>;
+	qcom,mem-acc-threshold-voltage = <852000>;
+	qcom,mem-acc-crossover-voltage = <852000>;
+};
+
+&apc0_pwrcl_vreg {
+	regulator-max-microvolt = <23>;
+
+	qcom,cpr-fuse-combos = <32>;
+	qcom,cpr-speed-bins = <4>;
+	qcom,cpr-speed-bin-corners = <22 22 22 22>;
+	qcom,cpr-corners = <22>;
+
+	qcom,cpr-corner-fmax-map = <8 11 18 22>;
+
+	qcom,cpr-voltage-ceiling =
+		<828000  828000  828000  828000  828000
+		 828000  828000  828000  828000  828000
+		 828000  900000  900000  900000  900000
+		 900000  900000  900000  952000  952000
+		1056000 1056000>;
+
+	qcom,cpr-voltage-floor =
+		<568000  568000  568000  568000  568000
+		 568000  568000  568000  568000  568000
+		 568000  632000  632000  632000  632000
+		 632000  632000  632000  712000  712000
+		 772000  772000>;
+
+	qcom,cpr-floor-to-ceiling-max-range =
+		<32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 32000  32000  40000  40000
+		 40000  40000>;
+
+	qcom,corner-frequencies =
+		<300000000  364800000  441600000
+		 518400000  595200000  672000000
+		 748800000  825600000  883200000
+		 960000000 1036800000 1094400000
+		1171200000 1248000000 1324800000
+		1401600000 1478400000 1555200000
+		1670400000 1747200000 1824000000
+		1900800000>;
+
+	qcom,cpr-ro-scaling-factor =
+		<2595 2794 2577 2762 2471 2674 2199
+		 2553 3189 3255 3192 2962 3054 2982
+		 2042 2945>,
+		<2595 2794 2577 2762 2471 2674 2199
+		 2553 3189 3255 3192 2962 3054 2982
+		 2042 2945>,
+		<2391 2550 2483 2638 2382 2564 2259
+		 2555 2766 3041 2988 2935 2873 2688
+		 2013 2784>,
+		<2066 2153 2300 2434 2220 2386 2288
+		 2465 2028 2511 2487 2734 2554 2117
+		 1892 2377>;
+
+	qcom,cpr-open-loop-voltage-fuse-adjustment =
+		/* Speed bin 0 */
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		/* Speed bin 1 */
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		/* Speed bin 2 */
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		/* Speed bin 3 */
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>;
+
+	qcom,cpr-closed-loop-voltage-fuse-adjustment =
+		/* Speed bin 0 */
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		/* Speed bin 1 */
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		/* Speed bin 2 */
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		/* Speed bin 3 */
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>;
+
+	qcom,cpr-open-loop-voltage-adjustment =
+		/* Speed bin 0 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0  (-12000) (-12000) (-12000) (-12000)
+		(-12000) (-16000) (-16000) (-20000) (-24000)
+		(-28000) (-28000)>,
+		/* Speed bin 1 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0  (-12000) (-12000) (-12000) (-12000)
+		(-12000) (-16000) (-16000) (-20000) (-24000)
+		(-28000) (-28000)>,
+		/* Speed bin 2 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0  (-12000) (-12000) (-12000) (-12000)
+		(-12000) (-16000) (-16000) (-20000) (-24000)
+		(-28000) (-28000)>,
+		/* Speed bin 3 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0  (-12000) (-12000) (-12000) (-12000)
+		(-12000) (-16000) (-16000) (-20000) (-24000)
+		(-28000) (-28000)>;
+
+	qcom,cpr-closed-loop-voltage-adjustment =
+		/* Speed bin 0 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0  (-10000) (-11000) (-12000) (-13000)
+		(-14000) (-14000) (-15000) (-21000) (-24000)
+		(-26000) (-28000)>,
+		/* Speed bin 1 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0  (-10000) (-11000) (-12000) (-13000)
+		(-14000) (-14000) (-15000) (-21000) (-24000)
+		(-26000) (-28000)>,
+		/* Speed bin 2 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0  (-10000) (-11000) (-12000) (-13000)
+		(-14000) (-14000) (-15000) (-21000) (-24000)
+		(-26000) (-28000)>,
+		/* Speed bin 3 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0  (-10000) (-11000) (-12000) (-13000)
+		(-14000) (-14000) (-15000) (-21000) (-24000)
+		(-26000) (-28000)>;
+
+	qcom,allow-voltage-interpolation;
+	qcom,allow-quotient-interpolation;
+	qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+	qcom,cpr-aging-ref-corner = <22>;
+	qcom,cpr-aging-ro-scaling-factor = <1620>;
+	qcom,allow-aging-voltage-adjustment = <0>;
+};
+
+&apc1_cpr {
+	compatible = "qcom,cprh-msm8998-v2-kbss-regulator";
+	qcom,cpr-corner-switch-delay-time = <1042>;
+	qcom,cpr-aging-ref-voltage = <1136000>;
+	qcom,apm-threshold-voltage = <800000>;
+	qcom,apm-hysteresis-voltage = <0>;
+	qcom,mem-acc-threshold-voltage = <852000>;
+	qcom,mem-acc-crossover-voltage = <852000>;
+};
+
+&apc1_perfcl_vreg {
+	regulator-max-microvolt = <34>;
+
+	qcom,cpr-fuse-combos = <32>;
+	qcom,cpr-speed-bins = <4>;
+	qcom,cpr-speed-bin-corners = <32 26 30 31>;
+	qcom,cpr-corners =
+		/* Speed bin 0 */
+		<32 32 32 32 32 32 32 32>,
+		/* Speed bin 1 */
+		<26 26 26 26 26 26 26 26>,
+		/* Speed bin 2 */
+		<30 30 30 30 30 30 30 30>,
+		/* Speed bin 3 */
+		<31 31 31 31 31 31 31 31>;
+
+	qcom,cpr-corner-fmax-map =
+		/* Speed bin 0 */
+		<8 12 20 32>,
+		/* Speed bin 1 */
+		<8 12 20 26>,
+		/* Speed bin 2 */
+		<8 12 20 30>,
+		/* Speed bin 3 */
+		<8 12 20 31>;
+
+	qcom,cpr-voltage-ceiling =
+		/* Speed bin 0 */
+		<828000  828000  828000  828000  828000
+		 828000  828000  828000  828000  828000
+		 828000  828000  900000  900000  900000
+		 900000  900000  900000  900000  900000
+		 952000  952000  952000 1136000 1136000
+		1136000 1136000 1136000 1136000 1136000
+		1136000 1136000>,
+		/* Speed bin 1 */
+		<828000  828000  828000  828000  828000
+		 828000  828000  828000  828000  828000
+		 828000  828000  900000  900000  900000
+		 900000  900000  900000  900000  900000
+		 952000  952000  952000 1136000 1136000
+		1136000>,
+		/* Speed bin 2 */
+		<828000  828000  828000  828000  828000
+		 828000  828000  828000  828000  828000
+		 828000  828000  900000  900000  900000
+		 900000  900000  900000  900000  900000
+		 952000  952000  952000 1136000 1136000
+		1136000 1136000 1136000 1136000 1136000>,
+		/* Speed bin 3 */
+		<828000  828000  828000  828000  828000
+		 828000  828000  828000  828000  828000
+		 828000  828000  900000  900000  900000
+		 900000  900000  900000  900000  900000
+		 952000  952000  952000 1136000 1136000
+		1136000 1136000 1136000 1136000 1136000
+		1136000>;
+
+	qcom,cpr-voltage-floor =
+		/* Speed bin 0 */
+		<568000  568000  568000  568000  568000
+		 568000  568000  568000  568000  568000
+		 568000  568000  632000  632000  632000
+		 632000  632000  632000  632000  632000
+		 712000  712000  712000  772000  772000
+		 772000  772000  772000  772000  772000
+		 772000  772000>,
+		/* Speed bin 1 */
+		<568000  568000  568000  568000  568000
+		 568000  568000  568000  568000  568000
+		 568000  568000  632000  632000  632000
+		 632000  632000  632000  632000  632000
+		 712000  712000  712000  772000  772000
+		 772000>,
+		/* Speed bin 2 */
+		<568000  568000  568000  568000  568000
+		 568000  568000  568000  568000  568000
+		 568000  568000  632000  632000  632000
+		 632000  632000  632000  632000  632000
+		 712000  712000  712000  772000  772000
+		 772000  772000  772000  772000  772000>,
+		/* Speed bin 3 */
+		<568000  568000  568000  568000  568000
+		 568000  568000  568000  568000  568000
+		 568000  568000  632000  632000  632000
+		 632000  632000  632000  632000  632000
+		 712000  712000  712000  772000  772000
+		 772000  772000  772000  772000  772000
+		 772000>;
+
+	qcom,cpr-floor-to-ceiling-max-range =
+		/* Speed bin 0 */
+		<32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 40000  40000  40000  40000
+		 40000  40000  40000  40000
+		 40000  40000  40000  40000>,
+		/* Speed bin 1 */
+		<32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 40000  40000  40000  40000
+		 40000  40000>,
+		/* Speed bin 2 */
+		<32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 40000  40000  40000  40000
+		 40000  40000  40000  40000
+		 40000  40000>,
+		/* Speed bin 3 */
+		<32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 40000  40000  40000  40000
+		 40000  40000  40000  40000
+		 40000  40000  40000>;
+
+	qcom,corner-frequencies =
+		/* Speed bin 0 */
+		<300000000  345600000  422400000
+		 499200000  576000000  652800000
+		 729600000  806400000  902400000
+		 979200000 1056000000 1132800000
+		1190400000 1267200000 1344000000
+		1420800000 1497600000 1574400000
+		1651200000 1728000000 1804800000
+		1881600000 1958400000 2035200000
+		2112000000 2208000000 2265600000
+		2342400000 2419200000 2457600000
+		2476800000 2496000000>,
+		/* Speed bin 1 */
+		<300000000  345600000  422400000
+		 499200000  576000000  652800000
+		 729600000  806400000  902400000
+		 979200000 1056000000 1132800000
+		1190400000 1267200000 1344000000
+		1420800000 1497600000 1574400000
+		1651200000 1728000000 1804800000
+		1881600000 1958400000 2035200000
+		2112000000 2208000000>,
+		/* Speed bin 2 */
+		<300000000  345600000  422400000
+		 499200000  576000000  652800000
+		 729600000  806400000  902400000
+		 979200000 1056000000 1132800000
+		1190400000 1267200000 1344000000
+		1420800000 1497600000 1574400000
+		1651200000 1728000000 1804800000
+		1881600000 1958400000 2035200000
+		2112000000 2208000000 2265600000
+		2323200000 2342400000 2361600000>,
+		/* Speed bin 3 */
+		<300000000  345600000  422400000
+		 499200000  576000000  652800000
+		 729600000  806400000  902400000
+		 979200000 1056000000 1132800000
+		1190400000 1267200000 1344000000
+		1420800000 1497600000 1574400000
+		1651200000 1728000000 1804800000
+		1881600000 1958400000 2035200000
+		2112000000 2208000000 2265600000
+		2323200000 2342400000 2361600000
+		2457600000>;
+
+	qcom,cpr-ro-scaling-factor =
+		<2857 3057 2828 2952 2699 2798 2446
+		 2631 2629 2578 2244 3344 3289 3137
+		 3164 2655>,
+		<2857 3057 2828 2952 2699 2798 2446
+		 2631 2629 2578 2244 3344 3289 3137
+		 3164 2655>,
+		<2603 2755 2676 2777 2573 2685 2465
+		 2610 2312 2423 2243 3104 3022 3036
+		 2740 2303>,
+		<1901 2016 2096 2228 2034 2161 2077
+		 2188 1565 1870 1925 2235 2205 2413
+		 1762 1478>;
+
+	qcom,cpr-open-loop-voltage-fuse-adjustment =
+		/* Speed bin 0 */
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		/* Speed bin 1 */
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		/* Speed bin 2 */
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		/* Speed bin 3 */
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>;
+
+	qcom,cpr-closed-loop-voltage-fuse-adjustment =
+		/* Speed bin 0 */
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		/* Speed bin 1 */
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		/* Speed bin 2 */
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		/* Speed bin 3 */
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>;
+
+	qcom,cpr-open-loop-voltage-adjustment =
+		/* Speed bin 0 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0   (-8000) (-12000) (-12000) (-12000)
+		(-12000) (-12000) (-12000) (-16000) (-16000)
+		(-20000) (-16000) (-16000) (-16000) (-12000)
+		(-28000) (-28000) (-28000) (-28000) (-28000)
+		(-28000) (-28000)>,
+		/* Speed bin 1 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0   (-8000) (-12000) (-12000) (-12000)
+		(-12000) (-12000) (-12000) (-16000) (-16000)
+		(-20000) (-16000) (-16000) (-16000) (-16000)
+		(-28000)>,
+		/* Speed bin 2 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0   (-8000) (-12000) (-12000) (-12000)
+		(-12000) (-12000) (-12000) (-16000) (-16000)
+		(-20000) (-16000) (-16000) (-16000) (-12000)
+		(-28000) (-28000) (-28000) (-28000) (-28000)>,
+		/* Speed bin 3 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0   (-8000) (-12000) (-12000) (-12000)
+		(-12000) (-12000) (-12000) (-16000) (-16000)
+		(-20000) (-16000) (-16000) (-16000) (-12000)
+		(-28000) (-28000) (-28000) (-28000) (-28000)
+		(-28000)>;
+
+	qcom,cpr-closed-loop-voltage-adjustment =
+		/* Speed bin 0 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0  (-10000) (-10000) (-11000) (-12000)
+		(-12000) (-13000) (-14000) (-14000) (-15000)
+		(-16000) (-16000) (-17000) (-15000) (-13000)
+		(-26000) (-26000) (-27000) (-27000) (-28000)
+		(-28000) (-28000)>,
+		/* Speed bin 1 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0  (-10000) (-10000) (-11000) (-12000)
+		(-12000) (-13000) (-14000) (-14000) (-15000)
+		(-16000) (-16000) (-17000) (-16000) (-15000)
+		(-28000)>,
+		/* Speed bin 2 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0  (-10000) (-10000) (-11000) (-12000)
+		(-12000) (-13000) (-14000) (-14000) (-15000)
+		(-16000) (-16000) (-17000) (-15000) (-14000)
+		(-27000) (-27000) (-28000) (-28000) (-28000)>,
+		/* Speed bin 3 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0  (-10000) (-10000) (-11000) (-12000)
+		(-12000) (-13000) (-14000) (-14000) (-15000)
+		(-16000) (-16000) (-17000) (-15000) (-14000)
+		(-26000) (-27000) (-27000) (-28000) (-28000)
+		(-28000)>;
+
+	qcom,allow-voltage-interpolation;
+	qcom,allow-quotient-interpolation;
+	qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+	qcom,cpr-aging-ref-corner = <32 26 30 31>;
+	qcom,cpr-aging-ro-scaling-factor = <1700>;
+	qcom,allow-aging-voltage-adjustment = <0>;
+};
+
+&pm8005_s1 {
+	regulator-min-microvolt = <516000>;
+	regulator-max-microvolt = <1088000>;
+};
+
+&gfx_cpr {
+	compatible = "qcom,cpr4-msm8998-v2-mmss-regulator";
+	qcom,cpr-aging-ref-voltage = <1088000>;
+};
+
+&gfx_vreg {
+	regulator-min-microvolt = <1>;
+	regulator-max-microvolt = <8>;
+
+	qcom,cpr-fuse-corners = <4>;
+	qcom,cpr-fuse-combos = <8>;
+	qcom,cpr-corners = <8>;
+
+	qcom,cpr-corner-fmax-map = <1 3 5 8>;
+
+	qcom,cpr-voltage-ceiling =
+		<716000 716000 772000 880000 908000 948000 1016000 1088000>,
+		<724000 724000 772000 832000 916000 968000 1024000 1088000>,
+		<724000 724000 772000 832000 916000 968000 1024000 1088000>,
+		<724000 724000 772000 832000 916000 968000 1024000 1088000>,
+		<724000 724000 772000 832000 916000 968000 1024000 1088000>,
+		<724000 724000 772000 832000 916000 968000 1024000 1088000>,
+		<724000 724000 772000 832000 916000 968000 1024000 1088000>,
+		<724000 724000 772000 832000 916000 968000 1024000 1088000>;
+
+	qcom,cpr-voltage-floor =
+		<516000 516000 532000 584000 632000 672000 712000 756000>;
+
+	qcom,mem-acc-voltage = <1 1 1 2 2 2 2 2>;
+
+	qcom,corner-frequencies =
+		<180000000 257000000 342000000 414000000
+		 515000000 596000000 670000000 710000000>;
+
+	qcom,cpr-target-quotients =
+		<   0    0    0    0  331  357    0    0
+		    0    0    0    0    0    0  115    0>,
+		<   0    0    0    0  467  500    0    0
+		    0    0    0    0    0    0  199    0>,
+		<   0    0    0    0  628  665    0    0
+		    0    0    0    0    0    0  290    0>,
+		<   0    0    0    0  762  805    0    0
+		    0    0    0    0    0    0  397    0>,
+		<   0    0    0    0  964 1013    0    0
+		    0    0 1143    0 1138 1055    0    0>,
+		<   0    0    0    0    0    0    0    0
+		    0    0 1306    0 1289 1168    0    0>,
+		<   0    0    0    0    0    0    0    0
+		    0    0 1468    0 1429 1256    0    0>,
+		<   0    0    0    0    0    0    0    0
+		    0    0 1627    0 1578 1353    0    0>;
+
+	qcom,cpr-ro-scaling-factor =
+		<   0    0    0    0 2377 2571    0    0
+		    0    0 2168    0 2209 1849 1997    0>,
+		<   0    0    0    0 2377 2571    0    0
+		    0    0 2168    0 2209 1849 1997    0>,
+		<   0    0    0    0 2377 2571    0    0
+		    0    0 2168    0 2209 1849 1997    0>,
+		<   0    0    0    0 2377 2571    0    0
+		    0    0 2168    0 2209 1849 1997    0>,
+		<   0    0    0    0 2377 2571    0    0
+		    0    0 2168    0 2209 1849 1997    0>,
+		<   0    0    0    0 2377 2571    0    0
+		    0    0 2168    0 2209 1849 1997    0>,
+		<   0    0    0    0 2377 2571    0    0
+		    0    0 2168    0 2209 1849 1997    0>,
+		<   0    0    0    0 2377 2571    0    0
+		    0    0 2168    0 2209 1849 1997    0>;
+
+	qcom,cpr-open-loop-voltage-fuse-adjustment =
+		<   60000        0        0        0>,
+		<   60000        0        0        0>,
+		<   60000        0        0        0>,
+		<   60000        0        0        0>,
+		<   60000        0        0        0>,
+		<   60000        0        0        0>,
+		<   60000        0        0        0>,
+		<   60000        0        0        0>;
+
+	qcom,cpr-closed-loop-voltage-adjustment =
+		<   90000    38000    28000     8000
+			0    29000    11000        0>,
+		<   90000    38000    28000     8000
+			0    29000    11000        0>,
+		<   90000    38000    28000     8000
+			0    29000    11000        0>,
+		<   90000    38000    28000     8000
+			0    29000    11000        0>,
+		<   90000    38000    28000     8000
+			0    29000    11000        0>,
+		<   90000    38000    28000     8000
+			0    29000    11000        0>,
+		<   90000    38000    28000     8000
+			0    29000    11000        0>,
+		<   90000    38000    28000     8000
+			0    29000    11000        0>;
+
+	qcom,cpr-floor-to-ceiling-max-range =
+	       <40000 40000 40000 40000 40000 40000 50000 50000>;
+
+	qcom,cpr-fused-closed-loop-voltage-adjustment-map =
+		<0 0 0 0 1 2 3 4>;
+
+	qcom,allow-voltage-interpolation;
+	qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+	qcom,cpr-aging-max-voltage-adjustment = <15000>;
+	qcom,cpr-aging-ref-corner = <8>;
+	qcom,cpr-aging-ro-scaling-factor = <1620>;
+	qcom,allow-aging-voltage-adjustment = <0>;
+};
+
+&qusb_phy0 {
+	reg = <0x0c012000 0x2a8>,
+	      <0x01fcb24c 0x4>,
+	      <0x00784238 0x4>;
+	reg-names = "qusb_phy_base",
+			"tcsr_clamp_dig_n_1p8",
+			"efuse_addr";
+	qcom,efuse-bit-pos = <16>;
+	qcom,efuse-num-bits = <4>;
+	qcom,qusb-phy-init-seq =
+			/* <value reg_offset> */
+				<0x13 0x04 /* analog_controls_two */
+				0x7c 0x18c /* pll_clock_inverter */
+				0x80 0x2c /* pll_cmode */
+				0x0a 0x184 /* pll_lock_delay */
+				0xa5 0x23c /* tune1 */
+				0x09 0x240 /* tune2 */
+				0x19 0xb4>; /* digital_timers_two */
+};
+
+&msm_vidc {
+	qcom,load-freq-tbl =
+		/* Encoders */
+		<1105920 533000000 0x55555555>, /* 4kx2304@30 */ /*TURBO*/
+		<1036800 444000000 0x55555555>, /* 720p@240, 1080p@120,1440p@60,
+						 * UHD@30 */ /*NOMINAL*/
+		< 829440 355200000 0x55555555>, /* UHD/4096x2160@30 SVSL1 */
+		< 489600 269330000 0x55555555>, /* 1080p@60, 720p@120 SVS */
+		< 345600 200000000 0x55555555>, /* 2560x1440@24, 1080p@30 */
+						/* SVS2 */
+
+		/* Decoders */
+		<2211840 533000000 0xffffffff>, /* 4kx2304@60, 1080p@240 */
+						/* TURBO */
+		<1728000 444000000 0xffffffff>, /* 2560x1440@120 */
+						/* NOMINAL */
+		<1675472 355200000 0xffffffff>, /* 4kx2304@44 */ /*SVSL1*/
+		<1105920 269330000 0xffffffff>, /* UHD/4k2304@30, 1080p@120 */
+						/* SVS */
+		< 829440 200000000 0xffffffff>; /* 720p@120, 1080p@60 */
+						/* SVS2 */
+
+	qcom,imem-ab-tbl =
+		<200000000 1560000>,/* imem @ svs2 freq 75 Mhz */
+		<269330000 3570000>,/* imem @ svs freq 171 Mhz */
+		<355200000 3570000>,/* imem @ svs freq 171 Mhz */
+		<444000000 6750000>,/* imem @ nom freq 323 Mhz */
+		<533000000 8490000>;/* imem @ turbo freq 406 Mhz */
+
+	qcom,dcvs-tbl = /* minLoad LoadLow LoadHigh CodecCheck */
+		/* Decode */
+		/* Load > Nominal, Nominal <-> Turbo Eg.3840x2160@60 */
+		<1728000 1728000 2211840 0x3f00000c>,
+		/* Encoder */
+		/* Load > Nominal, Nominal <-> Turbo Eg. 4kx2304@30 */
+		<1036800 1036800 1105920 0x04000004>,
+		/* Load > SVSL1, SVSL1<-> Nominal Eg. 3840x2160@30 */
+		< 829440  829440 1036800 0x04000004>,
+		/* Load > SVS , SVS <-> SVSL1 Eg. 4kx2304@24 */
+		< 489600  489600  829440 0x04000004>;
+
+	qcom,dcvs-limit = /* Min Frame size, Min MBs/sec */
+		<32400 30>, /* Encoder 3840x2160@30 */
+		<32400 60>; /* Decoder 3840x2160@60 */
+
+};
+
+&soc {
+	/* Gold L2 SAW */
+	qcom,spm@178120000 {
+		qcom,saw2-avs-limit = <0x4700470>;
+	};
+
+	/* Silver L2 SAW */
+	qcom,spm@179120000 {
+		qcom,saw2-avs-limit = <0x4200420>;
+	};
+};
+
+/* GPU overrides */
+&msm_gpu {
+	/* Updated chip ID */
+	qcom,chipid = <0x05040001>;
+	qcom,initial-pwrlevel = <6>;
+
+	qcom,gpu-pwrlevels {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		compatible = "qcom,gpu-pwrlevels";
+
+		qcom,gpu-pwrlevel@0 {
+			reg = <0>;
+			qcom,gpu-freq = <710000000>;
+			qcom,bus-freq = <12>;
+			qcom,bus-min = <12>;
+			qcom,bus-max = <12>;
+		};
+
+		qcom,gpu-pwrlevel@1 {
+			reg = <1>;
+			qcom,gpu-freq = <670000000>;
+			qcom,bus-freq = <12>;
+			qcom,bus-min = <11>;
+			qcom,bus-max = <12>;
+		};
+
+		qcom,gpu-pwrlevel@2 {
+			reg = <2>;
+			qcom,gpu-freq = <596000000>;
+			qcom,bus-freq = <11>;
+			qcom,bus-min = <9>;
+			qcom,bus-max = <12>;
+		};
+
+		qcom,gpu-pwrlevel@3 {
+			reg = <3>;
+			qcom,gpu-freq = <515000000>;
+			qcom,bus-freq = <11>;
+			qcom,bus-min = <9>;
+			qcom,bus-max = <12>;
+		};
+
+		qcom,gpu-pwrlevel@4 {
+			reg = <4>;
+			qcom,gpu-freq = <414000000>;
+			qcom,bus-freq = <9>;
+			qcom,bus-min = <8>;
+			qcom,bus-max = <11>;
+		};
+
+		qcom,gpu-pwrlevel@5 {
+			reg = <5>;
+			qcom,gpu-freq = <342000000>;
+			qcom,bus-freq = <8>;
+			qcom,bus-min = <5>;
+			qcom,bus-max = <9>;
+		};
+
+		qcom,gpu-pwrlevel@6 {
+			reg = <6>;
+			qcom,gpu-freq = <257000000>;
+			qcom,bus-freq = <5>;
+			qcom,bus-min = <3>;
+			qcom,bus-max = <8>;
+		};
+
+		qcom,gpu-pwrlevel@7 {
+			reg = <7>;
+			qcom,gpu-freq = <27000000>;
+			qcom,bus-freq = <0>;
+			qcom,bus-min = <0>;
+			qcom,bus-max = <0>;
+		};
+	};
+};
+
+&spss_utils {
+	qcom,spss-test-firmware-name = "spss2t";	/* 8 chars max */
+	qcom,spss-prod-firmware-name = "spss2p";	/* 8 chars max */
+	qcom,spss-hybr-firmware-name = "spss2h";	/* 8 chars max */
+};
+
+&ufs1 {
+	clock-names =
+		"core_clk",
+		"bus_aggr_clk",
+		"iface_clk",
+		"core_clk_unipro",
+		"core_clk_ice",
+		"ref_clk",
+		"tx_lane0_sync_clk",
+		"rx_lane0_sync_clk",
+		"rx_lane1_sync_clk";
+	clocks =
+		<&clock_gcc clk_gcc_ufs_axi_hw_ctl_clk>,
+		<&clock_gcc clk_gcc_aggre1_ufs_axi_clk>,
+		<&clock_gcc clk_gcc_ufs_ahb_clk>,
+		<&clock_gcc clk_gcc_ufs_unipro_core_hw_ctl_clk>,
+		<&clock_gcc clk_gcc_ufs_ice_core_hw_ctl_clk>,
+		<&clock_gcc clk_ln_bb_clk1>,
+		<&clock_gcc clk_gcc_ufs_tx_symbol_0_clk>,
+		<&clock_gcc clk_gcc_ufs_rx_symbol_0_clk>,
+		<&clock_gcc clk_gcc_ufs_rx_symbol_1_clk>;
+	freq-table-hz =
+		<50000000 200000000>,
+		<0 0>,
+		<0 0>,
+		<37500000 150000000>,
+		<75000000 300000000>,
+		<0 0>,
+		<0 0>,
+		<0 0>,
+		<0 0>;
+
+	lanes-per-direction = <2>;
+};
+
+&ssc_sensors {
+	qcom,firmware-name = "slpi_v2";
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-vidc.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-vidc.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-vidc.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-vidc.dtsi	2019-01-22 16:16:21.203225578 +0100
@@ -0,0 +1,251 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/msm/msm-bus-ids.h>
+#include <dt-bindings/clock/msm-clocks-8998.h>
+
+&soc {
+	msm_vidc: qcom,vidc@cc00000 {
+		compatible = "qcom,msm-vidc";
+		status = "ok";
+		reg = <0xcc00000 0x100000>;
+		interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
+		qcom,hfi = "venus";
+		qcom,hfi-version = "3xx";
+		qcom,firmware-name = "venus";
+		qcom,never-unload-fw;
+		qcom,sw-power-collapse;
+		qcom,max-secure-instances = <5>;
+		qcom,reg-presets =
+			<0x80124 0x00000003>,
+			<0x80550 0x01111111>,
+			<0x80560 0x01111111>,
+			<0x80568 0x01111111>,
+			<0x80570 0x01111111>,
+			<0x80580 0x01111111>,
+			<0x80588 0x01111111>,
+			<0xe2010 0x00000000>;
+
+		qcom,imem-size = <524288>; /* 512 kB */
+		qcom,max-hw-load = <2563200>; /* Full 4k @ 60 + 1080p @ 60 */
+		qcom,power-conf = <8294400>; /* WxH - 3840*2160 */
+		qcom,load-freq-tbl =
+			/* Encoders */
+			<972000 465000000 0x55555555>, /* 4k UHD @ 30 */
+			<489600 360000000 0x55555555>, /* 1080p @ 60 */
+			<244800 186000000 0x55555555>, /* 1080p @ 30 */
+			<108000 100000000 0x55555555>, /* 720p @ 30 */
+
+			/* Decoders */
+			<1944000 465000000 0xffffffff>, /* 4k UHD @ 60 */
+			< 972000 360000000 0xffffffff>, /* 4k UHD @ 30 */
+			< 489600 186000000 0xffffffff>, /* 1080p @ 60 */
+			< 244800 100000000 0xffffffff>; /* 1080p @ 30 */
+
+		qcom,dcvs-tbl =
+			<972000 972000 19944000 0x3f00000c>, /* UHD 30 */
+			<489600 489600   972000 0x3f00000c>, /* 1080p 60 */
+			<244800 244800   489600 0x3f00000c>, /* 1080p 30 */
+			<829440 489600   972000 0x04000004>; /* DCI 24 */
+
+		qcom,dcvs-limit =
+			<32400 30>, /* Encoder UHD */
+			<14400 30>; /* Decoder WQHD */
+
+		/* Table lists <video_core_freq imem_ab> pairs.
+		* imem_ab value determines the imem clock frequency for the
+		* corresponding video core frequency.
+		*/
+		qcom,imem-ab-tbl =
+			<100000000 1560000>, /* imem @ svs2 freq 75 Mhz */
+			<186000000 3570000>, /* imem @ svs freq 171 Mhz */
+			<360000000 6750000>, /* imem @ nom freq 323 Mhz */
+			<465000000 8490000>; /* imem @ turbo freq 406 Mhz */
+
+		/* Regulators */
+		smmu-vdd-supply = <&gdsc_bimc_smmu>;
+		venus-supply = <&gdsc_venus>;
+		venus-core0-supply = <&gdsc_venus_core0>;
+		venus-core1-supply = <&gdsc_venus_core1>;
+
+		/* Clocks */
+		clock-names = "sys_noc_axi_clk",
+			"noc_axi_clk", "mnoc_ahb_clk",
+			"smmu_ahb_clk", "smmu_axi_clk",
+			"mnoc_maxi_clk",
+			"core_clk", "iface_clk", "bus_clk",
+			"maxi_clk", "core0_clk", "core1_clk";
+		clocks = <&clock_gcc clk_gcc_mmss_sys_noc_axi_clk>,
+			<&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_maxi_clk>,
+			<&clock_mmss clk_mmss_video_core_clk>,
+			<&clock_mmss clk_mmss_video_ahb_clk>,
+			<&clock_mmss clk_mmss_video_axi_clk>,
+			<&clock_mmss clk_mmss_video_maxi_clk>,
+			<&clock_mmss clk_mmss_video_subcore0_clk>,
+			<&clock_mmss clk_mmss_video_subcore1_clk>;
+		qcom,clock-configs = <0x0 0x0 0x0 0x0 0x0 0x0
+				0x3 0x0 0x2 0x2 0x3 0x3>;
+
+		/* Buses */
+		bus_cnoc {
+			compatible = "qcom,msm-vidc,bus";
+			label = "cnoc";
+			qcom,bus-master = <MSM_BUS_MASTER_AMPSS_M0>;
+			qcom,bus-slave = <MSM_BUS_SLAVE_VENUS_CFG>;
+			qcom,bus-governor = "performance";
+			qcom,bus-range-kbps = <1 1>;
+		};
+
+		venus_bus_ddr {
+			compatible = "qcom,msm-vidc,bus";
+			label = "venus-ddr";
+			qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
+			qcom,bus-slave = <MSM_BUS_SLAVE_EBI_CH0>;
+			qcom,bus-governor = "msm-vidc-ddr";
+			qcom,bus-range-kbps = <1000 4946000>;
+		};
+
+		venus_bus_vmem {
+			compatible = "qcom,msm-vidc,bus";
+			label = "venus-vmem";
+			qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0_OCMEM>;
+			qcom,bus-slave = <MSM_BUS_SLAVE_VMEM>;
+			qcom,bus-governor = "msm-vidc-vmem+";
+			qcom,bus-range-kbps = <1000 8490000>;
+		};
+
+		arm9_bus_ddr {
+			compatible = "qcom,msm-vidc,bus";
+			label = "venus-arm9-ddr";
+			qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
+			qcom,bus-slave = <MSM_BUS_SLAVE_EBI_CH0>;
+			qcom,bus-governor = "performance";
+			qcom,bus-range-kbps = <1 1>;
+		};
+
+
+		/* MMUs */
+		non_secure_cb {
+			compatible = "qcom,msm-vidc,context-bank";
+			label = "venus_ns";
+			iommus =
+				<&mmss_smmu 0x400>,
+				<&mmss_smmu 0x401>,
+				<&mmss_smmu 0x40a>,
+				<&mmss_smmu 0x407>,
+				<&mmss_smmu 0x40e>,
+				<&mmss_smmu 0x40f>,
+				<&mmss_smmu 0x408>,
+				<&mmss_smmu 0x409>,
+				<&mmss_smmu 0x40b>,
+				<&mmss_smmu 0x40c>,
+				<&mmss_smmu 0x40d>,
+				<&mmss_smmu 0x410>,
+				<&mmss_smmu 0x421>,
+				<&mmss_smmu 0x428>,
+				<&mmss_smmu 0x429>,
+				<&mmss_smmu 0x42b>,
+				<&mmss_smmu 0x42c>,
+				<&mmss_smmu 0x42d>,
+				<&mmss_smmu 0x411>,
+				<&mmss_smmu 0x431>;
+			buffer-types = <0xfff>;
+			virtual-addr-pool = <0x70800000 0x6f800000>;
+		};
+
+		firmware_cb {
+			compatible = "qcom,msm-vidc,context-bank";
+			qcom,fw-context-bank;
+			iommus = <&mmss_smmu 0x580>,
+				<&mmss_smmu 0x586>;
+		};
+		secure_bitstream_cb {
+			compatible = "qcom,msm-vidc,context-bank";
+			label = "venus_sec_bitstream";
+			iommus = <&mmss_smmu 0x500>,
+				<&mmss_smmu 0x502>,
+				<&mmss_smmu 0x509>,
+				<&mmss_smmu 0x50a>,
+				<&mmss_smmu 0x50b>,
+				<&mmss_smmu 0x50e>,
+				<&mmss_smmu 0x526>,
+				<&mmss_smmu 0x529>,
+				<&mmss_smmu 0x52b>;
+			buffer-types = <0x241>;
+			virtual-addr-pool = <0x4b000000 0x25800000>;
+			qcom,secure-context-bank;
+		};
+
+		venus_secure_pixel_cb: secure_pixel_cb {
+			compatible = "qcom,msm-vidc,context-bank";
+			label = "venus_sec_pixel";
+			iommus = <&mmss_smmu 0x504>,
+				<&mmss_smmu 0x50c>,
+				<&mmss_smmu 0x510>,
+				<&mmss_smmu 0x52c>;
+			buffer-types = <0x106>;
+			virtual-addr-pool = <0x25800000 0x25800000>;
+			qcom,secure-context-bank;
+		};
+
+		venus_secure_non_pixel_cb: secure_non_pixel_cb {
+			compatible = "qcom,msm-vidc,context-bank";
+			label = "venus_sec_non_pixel";
+			iommus = <&mmss_smmu 0x505>,
+				<&mmss_smmu 0x507>,
+				<&mmss_smmu 0x508>,
+				<&mmss_smmu 0x50d>,
+				<&mmss_smmu 0x50f>,
+				<&mmss_smmu 0x525>,
+				<&mmss_smmu 0x528>,
+				<&mmss_smmu 0x52d>,
+				<&mmss_smmu 0x540>;
+			buffer-types = <0x480>;
+			virtual-addr-pool = <0x1000000 0x24800000>;
+			qcom,secure-context-bank;
+		};
+	};
+
+	qcom,vmem@c880000 {
+		compatible = "qcom,msm-vmem";
+		status = "ok";
+		interrupts = <GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH>;
+
+		reg = <0xc880000 0x6b>,
+		    <0x14800000 0x80000>;
+		reg-names = "reg-base", "mem-base";
+
+		clocks = <&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_mnoc_maxi_clk>,
+			<&clock_mmss clk_mmss_vmem_ahb_clk>,
+			<&clock_mmss clk_mmss_vmem_maxi_clk>;
+		clock-names = "mnoc_ahb","mnoc_maxi",
+			"ahb", "maxi";
+		clock-config = <0x0 0x0 0x0 0x1>;
+
+		qcom,msm-bus,name = "vmem";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <2>;
+		qcom,msm-bus,vectors-KBps =
+		<MSM_BUS_MASTER_VIDEO_P0_OCMEM MSM_BUS_SLAVE_VMEM    0    0>,
+		<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_VMEM_CFG   0   0>,
+		<MSM_BUS_MASTER_VIDEO_P0_OCMEM MSM_BUS_SLAVE_VMEM 1000 1000>,
+		<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_VMEM_CFG 500 800>;
+
+		qcom,bank-size = <131072>; /* 128 kB */
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-wcd.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-wcd.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-wcd.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-wcd.dtsi	2019-01-22 16:16:21.203225578 +0100
@@ -0,0 +1,239 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&slim_aud {
+	tasha_codec {
+		wsa_spkr_sd1: msm_cdc_pinctrll {
+		      compatible = "qcom,msm-cdc-pinctrl";
+		      pinctrl-names = "aud_active", "aud_sleep";
+		      pinctrl-0 = <&spkr_1_sd_n_active>;
+		      pinctrl-1 = <&spkr_1_sd_n_sleep>;
+		};
+
+		wsa_spkr_sd2: msm_cdc_pinctrlr {
+		      compatible = "qcom,msm-cdc-pinctrl";
+		      pinctrl-names = "aud_active", "aud_sleep";
+		      pinctrl-0 = <&spkr_2_sd_n_active>;
+		      pinctrl-1 = <&spkr_2_sd_n_sleep>;
+		};
+	};
+
+	tavil_codec {
+		wcd: wcd_pinctrl@5 {
+			compatible = "qcom,wcd-pinctrl";
+			qcom,num-gpios = <5>;
+			gpio-controller;
+			#gpio-cells = <2>;
+
+			us_euro_sw_wcd_active: us_euro_sw_wcd_active {
+				mux {
+					pins = "gpio1";
+				};
+
+				config {
+					pins = "gpio1";
+					output-high;
+				};
+			};
+
+			us_euro_sw_wcd_sleep: us_euro_sw_wcd_sleep {
+				mux {
+					pins = "gpio1";
+				};
+
+				config {
+					pins = "gpio1";
+					output-low;
+				};
+			};
+
+			spkr_1_wcd_en_active: spkr_1_wcd_en_active {
+				mux {
+					pins = "gpio2";
+				};
+
+				config {
+					pins = "gpio2";
+					output-high;
+				};
+			};
+
+			spkr_1_wcd_en_sleep: spkr_1_wcd_en_sleep {
+				mux {
+					pins = "gpio2";
+				};
+
+				config {
+					pins = "gpio2";
+					input-enable;
+				};
+			};
+
+			spkr_2_wcd_en_active: spkr_2_sd_n_active {
+				mux {
+					pins = "gpio3";
+				};
+
+				config {
+					pins = "gpio3";
+					output-high;
+				};
+			};
+
+			spkr_2_wcd_en_sleep: spkr_2_sd_n_sleep {
+				mux {
+					pins = "gpio3";
+				};
+
+				config {
+					pins = "gpio3";
+					input-enable;
+				};
+			};
+
+			hph_en0_wcd_active: hph_en0_wcd_active {
+				mux {
+					pins = "gpio4";
+				};
+
+				config {
+					pins = "gpio4";
+					output-high;
+				};
+			};
+
+			hph_en0_wcd_sleep: hph_en0_wcd_sleep {
+				mux {
+					pins = "gpio4";
+				};
+
+				config {
+					pins = "gpio4";
+					output-low;
+				};
+			};
+
+			hph_en1_wcd_active: hph_en1_wcd_active {
+				mux {
+					pins = "gpio5";
+				};
+
+				config {
+					pins = "gpio5";
+					output-high;
+				};
+			};
+
+			hph_en1_wcd_sleep: hph_en1_wcd_sleep {
+				mux {
+					pins = "gpio5";
+				};
+
+				config {
+					pins = "gpio5";
+					output-low;
+				};
+			};
+		};
+
+		wsa_spkr_wcd_sd1: msm_cdc_pinctrll {
+		      compatible = "qcom,msm-cdc-pinctrl";
+		      pinctrl-names = "aud_active", "aud_sleep";
+		      pinctrl-0 = <&spkr_1_wcd_en_active>;
+		      pinctrl-1 = <&spkr_1_wcd_en_sleep>;
+		};
+
+		wsa_spkr_wcd_sd2: msm_cdc_pinctrlr {
+		      compatible = "qcom,msm-cdc-pinctrl";
+		      pinctrl-names = "aud_active", "aud_sleep";
+		      pinctrl-0 = <&spkr_2_wcd_en_active>;
+		      pinctrl-1 = <&spkr_2_wcd_en_sleep>;
+		};
+
+		tavil_us_euro_sw: msm_cdc_pinctrl_us_euro_sw {
+		      compatible = "qcom,msm-cdc-pinctrl";
+		      pinctrl-names = "aud_active", "aud_sleep";
+		      pinctrl-0 = <&us_euro_sw_wcd_active>;
+		      pinctrl-1 = <&us_euro_sw_wcd_sleep>;
+		};
+
+		tavil_hph_en0: msm_cdc_pinctrl_hph_en0 {
+		      compatible = "qcom,msm-cdc-pinctrl";
+		      pinctrl-names = "aud_active", "aud_sleep";
+		      pinctrl-0 = <&hph_en0_wcd_active>;
+		      pinctrl-1 = <&hph_en0_wcd_sleep>;
+		};
+
+		tavil_hph_en1: msm_cdc_pinctrl_hph_en1 {
+		      compatible = "qcom,msm-cdc-pinctrl";
+		      pinctrl-names = "aud_active", "aud_sleep";
+		      pinctrl-0 = <&hph_en1_wcd_active>;
+		      pinctrl-1 = <&hph_en1_wcd_sleep>;
+		};
+	};
+};
+
+&tlmm {
+	spkr_1_sd_n {
+		spkr_1_sd_n_sleep: spkr_1_sd_n_sleep {
+			mux {
+				pins = "gpio65";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio65";
+				drive-strength = <2>;   /* 2 mA */
+				bias-pull-down;
+				input-enable;
+			};
+		};
+		spkr_1_sd_n_active: spkr_1_sd_n_active {
+			mux {
+				pins = "gpio65";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio65";
+				drive-strength = <16>;   /* 16 mA */
+				bias-disable;
+				output-high;
+			};
+		};
+	};
+
+	spkr_2_sd_n {
+		spkr_2_sd_n_sleep: spkr_2_sd_n_sleep {
+			mux {
+				pins = "gpio66";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio66";
+				drive-strength = <2>;   /* 2 mA */
+				bias-pull-down;
+				input-enable;
+			};
+		};
+		spkr_2_sd_n_active: spkr_2_sd_n_active {
+			mux {
+				pins = "gpio66";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio66";
+				drive-strength = <16>;   /* 16 mA */
+				bias-disable;
+				output-high;
+			};
+		};
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-wsa881x.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-wsa881x.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm8998-wsa881x.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm8998-wsa881x.dtsi	2019-01-22 16:16:21.203225578 +0100
@@ -0,0 +1,78 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "msm8998-wcd.dtsi"
+
+&slim_aud {
+	tasha_codec {
+		swr_master {
+			compatible = "qcom,swr-wcd";
+			#address-cells = <2>;
+			#size-cells = <0>;
+
+			wsa881x_211: wsa881x@20170211 {
+				compatible = "qcom,wsa881x";
+				reg = <0x00 0x20170211>;
+				qcom,spkr-sd-n-node = <&wsa_spkr_sd1>;
+			};
+
+			wsa881x_212: wsa881x@20170212 {
+				compatible = "qcom,wsa881x";
+				reg = <0x00 0x20170212>;
+				qcom,spkr-sd-n-node = <&wsa_spkr_sd2>;
+			};
+
+			wsa881x_213: wsa881x@21170213 {
+				compatible = "qcom,wsa881x";
+				reg = <0x00 0x21170213>;
+				qcom,spkr-sd-n-node = <&wsa_spkr_sd1>;
+			};
+
+			wsa881x_214: wsa881x@21170214 {
+				compatible = "qcom,wsa881x";
+				reg = <0x00 0x21170214>;
+				qcom,spkr-sd-n-node = <&wsa_spkr_sd2>;
+			};
+		};
+	};
+
+	tavil_codec {
+		swr_master {
+			compatible = "qcom,swr-wcd";
+			#address-cells = <2>;
+			#size-cells = <0>;
+
+			wsa881x_0211: wsa881x@20170211 {
+				compatible = "qcom,wsa881x";
+				reg = <0x00 0x20170211>;
+				qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd1>;
+			};
+
+			wsa881x_0212: wsa881x@20170212 {
+				compatible = "qcom,wsa881x";
+				reg = <0x00 0x20170212>;
+				qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd2>;
+			};
+
+			wsa881x_0213: wsa881x@21170213 {
+				compatible = "qcom,wsa881x";
+				reg = <0x00 0x21170213>;
+				qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd1>;
+			};
+
+			wsa881x_0214: wsa881x@21170214 {
+				compatible = "qcom,wsa881x";
+				reg = <0x00 0x21170214>;
+				qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd2>;
+			};
+		};
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm-arm-smmu-8998.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm-arm-smmu-8998.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm-arm-smmu-8998.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm-arm-smmu-8998.dtsi	2019-01-22 16:16:21.179225361 +0100
@@ -0,0 +1,192 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/clock/msm-clocks-8998.h>
+#include <dt-bindings/msm/msm-bus-ids.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+&soc {
+	anoc1_smmu: arm,smmu-anoc1@1680000 {
+		status = "ok";
+		compatible = "qcom,smmu-v2";
+		reg = <0x1680000 0x10000>;
+		#iommu-cells = <0>;
+		qcom,register-save;
+		qcom,skip-init;
+		#global-interrupts = <0>;
+		interrupts = <GIC_SPI 364 IRQ_TYPE_EDGE_RISING>,
+			   <GIC_SPI 365 IRQ_TYPE_EDGE_RISING>,
+			   <GIC_SPI 366 IRQ_TYPE_EDGE_RISING>,
+			   <GIC_SPI 367 IRQ_TYPE_EDGE_RISING>,
+			   <GIC_SPI 368 IRQ_TYPE_EDGE_RISING>,
+			   <GIC_SPI 369 IRQ_TYPE_EDGE_RISING>;
+		qcom,msm-bus,name = "smmu-bus-client-anoc1";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,active-only;
+		qcom,msm-bus,num-paths = <1>;
+		/* aggre1_noc_clk */
+		qcom,msm-bus,vectors-KBps =
+				<84 10062 0 0>,
+				<84 10062 0 1000>;
+	};
+
+	anoc2_smmu: arm,smmu-anoc2@16c0000 {
+		status = "ok";
+		compatible = "qcom,smmu-v2";
+		reg = <0x16c0000 0x40000>;
+		#iommu-cells = <1>;
+		qcom,register-save;
+		qcom,skip-init;
+		#global-interrupts = <0>;
+		interrupts = <GIC_SPI 373 IRQ_TYPE_EDGE_RISING>,
+			   <GIC_SPI 374 IRQ_TYPE_EDGE_RISING>,
+			   <GIC_SPI 375 IRQ_TYPE_EDGE_RISING>,
+			   <GIC_SPI 376 IRQ_TYPE_EDGE_RISING>,
+			   <GIC_SPI 377 IRQ_TYPE_EDGE_RISING>,
+			   <GIC_SPI 378 IRQ_TYPE_EDGE_RISING>,
+			   <GIC_SPI 462 IRQ_TYPE_EDGE_RISING>,
+			   <GIC_SPI 463 IRQ_TYPE_EDGE_RISING>,
+			   <GIC_SPI 464 IRQ_TYPE_EDGE_RISING>,
+			   <GIC_SPI 465 IRQ_TYPE_EDGE_RISING>;
+		qcom,msm-bus,name = "smmu-bus-client-anoc2";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,active-only;
+		qcom,msm-bus,num-paths = <1>;
+		/* aggre2_noc_clk */
+		qcom,msm-bus,vectors-KBps =
+				<117 10065 0 0>,
+				<117 10065 0 1000>;
+	};
+
+	lpass_q6_smmu: arm,smmu-lpass_q6@5100000 {
+		status = "ok";
+		compatible = "qcom,smmu-v2";
+		reg = <0x5100000 0x40000>;
+		#iommu-cells = <1>;
+		qcom,tz-device-id = "LPASS";
+		qcom,register-save;
+		qcom,skip-init;
+		#global-interrupts = <0>;
+		interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 393 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
+		vdd-supply = <&gdsc_hlos1_vote_lpass_adsp>;
+		clocks = <&clock_gcc clk_hlos1_vote_lpass_adsp_smmu_clk>;
+		clock-names = "lpass_q6_smmu_clk";
+		#clock-cells = <1>;
+	};
+
+	mmss_smmu: arm,smmu-mmss@cd00000 {
+		status = "ok";
+		compatible = "qcom,smmu-v2";
+		reg = <0xcd00000 0x40000>;
+		#iommu-cells = <1>;
+		qcom,register-save;
+		qcom,no-smr-check;
+		qcom,skip-init;
+		#global-interrupts = <0>;
+		interrupts = <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 244 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 250 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 251 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 252 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 253 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 255 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
+		vdd-supply = <&gdsc_bimc_smmu>;
+		clocks = <&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>;
+		clock-names = "mmss_mnoc_ahb_clk",
+			"mmss_bimc_smmu_ahb_clk",
+			"mmss_bimc_smmu_axi_clk";
+		#clock-cells = <1>;
+		qcom,msm-bus,name = "smmu-bus-client-mmss";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,active-only;
+		qcom,msm-bus,num-paths = <2>;
+		/* ahb_clk_src, mmssnoc_axi_clk */
+		qcom,msm-bus,vectors-KBps =
+				<102 722 0 0>, <29 512 0 0>,
+				<102 722 0 1000>, <29 512 0 1000>;
+	};
+
+	kgsl_smmu: arm,smmu-kgsl@5040000 {
+		status = "ok";
+		compatible = "qcom,smmu-v2";
+		reg = <0x5040000 0x10000>;
+		#iommu-cells = <1>;
+		qcom-tz-device-id = "GPU";
+		qcom,dynamic;
+		qcom,register-save;
+		qcom,skip-init;
+		#global-interrupts = <0>;
+		interrupts = <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 331 IRQ_TYPE_EDGE_RISING>;
+		qcom,deferred-regulator-disable-delay = <80>;
+		vdd-supply = <&gdsc_gpu_cx>;
+		clocks = <&clock_gcc clk_gcc_gpu_cfg_ahb_clk>,
+			<&clock_gcc clk_gcc_bimc_gfx_clk>,
+			<&clock_gcc clk_gcc_gpu_bimc_gfx_clk>;
+
+		clock-names = "gcc_gpu_cfg_ahb_clk",
+			"gcc_bimc_gfx_clk",
+			"gcc_gpu_bimc_gfx_clk";
+		#clock-cells = <1>;
+	};
+
+	iommu_test_device {
+		compatible = "iommu-debug-test";
+		/*
+		 * 42 shouldn't be used by anyone on the mmss_smmu.  We just
+		 * need _something_ here to get this node recognized by the
+		 * SMMU driver. Our test uses ATOS, which doesn't use SIDs
+		 * anyways, so using a dummy value is ok.
+		 */
+		iommus = <&mmss_smmu 42>;
+	};
+
+	iommu_coherent_test_device {
+		compatible = "iommu-debug-test";
+		/*
+		 * 43 shouldn't be used by anyone on the mmss_smmu.  We just
+		 * need _something_ here to get this node recognized by the
+		 * SMMU driver. Our test uses ATOS, which doesn't use SIDs
+		 * anyways, so using a dummy value is ok.
+		 */
+		iommus = <&mmss_smmu 43>;
+		dma-coherent;
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm-arm-smmu-impl-defs-8998.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm-arm-smmu-impl-defs-8998.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm-arm-smmu-impl-defs-8998.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm-arm-smmu-impl-defs-8998.dtsi	2019-01-22 16:16:21.179225361 +0100
@@ -0,0 +1,565 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+&kgsl_smmu {
+	attach-impl-defs = <0x6000 0x2378>,
+		<0x6060 0x1055>,
+		<0x6470 0x110011>,
+		<0x6478 0x0>,
+		<0x647c 0x1000100>,
+		<0x6480 0x81108110>,
+		<0x6484 0x81108110>,
+		<0x6488 0x3e003e0>,
+		<0x648c 0x3e003e0>,
+		<0x6490 0x80008010>,
+		<0x6494 0x8020>,
+		<0x649c 0x6>,
+		<0x6800 0x6>,
+		<0x6900 0x3ff>,
+		<0x6924 0x604>,
+		<0x6928 0x11000>,
+		<0x6930 0x800>,
+		<0x6960 0x3>,
+		<0x6b64 0x1a5551>,
+		<0x6b68 0x2aaa2f82>;
+};
+
+&lpass_q6_smmu {
+	attach-impl-defs = <0x6000 0x3270>,
+		<0x6060 0x1055>,
+		<0x6070 0xe0>,
+		<0x6074 0xe0>,
+		<0x6078 0xe0>,
+		<0x607c 0xe0>,
+		<0x60f0 0xc0>,
+		<0x60f4 0xc8>,
+		<0x60f8 0xd0>,
+		<0x60fc 0xd8>,
+		<0x6170 0x0>,
+		<0x6174 0x30>,
+		<0x6178 0x60>,
+		<0x617c 0x90>,
+		<0x6270 0x0>,
+		<0x6274 0x2>,
+		<0x6278 0x4>,
+		<0x627c 0x6>,
+		<0x62f0 0x8>,
+		<0x62f4 0xe>,
+		<0x62f8 0x14>,
+		<0x62fc 0x1a>,
+		<0x6370 0x20>,
+		<0x6374 0x40>,
+		<0x6378 0x60>,
+		<0x637c 0x80>,
+		<0x67a0 0x0>,
+		<0x67a4 0x0>,
+		<0x67a8 0x20>,
+		<0x67b0 0x0>,
+		<0x67b4 0x8>,
+		<0x67b8 0xc8>,
+		<0x67d0 0x4>,
+		<0x67dc 0x8>,
+		<0x67e0 0x8>,
+		<0x6800 0x6>,
+		<0x6900 0x3ff>,
+		<0x6924 0x202>,
+		<0x6928 0x10a00>,
+		<0x6930 0x500>,
+		<0x6b64 0x121151>,
+		<0x6b68 0x8a840080>,
+		<0x6c00 0x0>,
+		<0x6c04 0x0>,
+		<0x6c08 0x0>,
+		<0x6c0c 0x0>,
+		<0x6c10 0x1>,
+		<0x6c14 0x1>,
+		<0x6c18 0x1>,
+		<0x6c1c 0x1>,
+		<0x6c20 0x2>,
+		<0x6c24 0x2>,
+		<0x6c28 0x2>,
+		<0x6c2c 0x2>,
+		<0x6c30 0x3>,
+		<0x6c34 0x3>,
+		<0x6c38 0x3>,
+		<0x6c3c 0x3>;
+};
+
+&mmss_smmu {
+	attach-impl-defs = <0x6000 0x3270>,
+		<0x6060 0x1055>,
+		<0x6800 0x6>,
+		<0x6900 0x3ff>,
+		<0x6924 0x204>,
+		<0x6928 0x11002>,
+		<0x6930 0x800>,
+		<0x6960 0xffffffff>,
+		<0x6964 0xffffffff>,
+		<0x6968 0xffffffff>,
+		<0x696c 0xffffffff>,
+		<0x6b48 0x330330>,
+		<0x6b4c 0x81>,
+		<0x6b50 0x3333>,
+		<0x6b54 0x3333>,
+		<0x6b64 0x1a5555>,
+		<0x6b68 0x9aaa892a>,
+		<0x6b70 0x10100002>,
+		<0x6b74 0x10100002>,
+		<0x6b78 0x10100002>,
+		<0x6b80 0x20042004>,
+		<0x6b84 0x20042004>;
+};
+
+&anoc1_smmu {
+	attach-impl-defs = <0x6000 0x3270>,
+		<0x6060 0x1055>,
+		<0x6070 0x19>,
+		<0x6074 0x39>,
+		<0x6078 0x41>,
+		<0x607c 0x59>,
+		<0x6080 0x95>,
+		<0x6084 0x98>,
+		<0x6088 0xc0>,
+		<0x608c 0xc0>,
+		<0x60f0 0x0>,
+		<0x60f4 0x2>,
+		<0x60f8 0x5>,
+		<0x60fc 0x8>,
+		<0x6100 0x16>,
+		<0x6104 0x17>,
+		<0x6108 0x19>,
+		<0x610c 0x19>,
+		<0x6170 0x0>,
+		<0x6174 0x0>,
+		<0x6178 0x0>,
+		<0x617c 0x0>,
+		<0x6180 0x0>,
+		<0x6184 0x0>,
+		<0x6188 0x0>,
+		<0x618c 0x0>,
+		<0x6270 0x0>,
+		<0x6274 0xd>,
+		<0x6278 0xe>,
+		<0x627c 0x12>,
+		<0x6280 0x16>,
+		<0x6284 0x16>,
+		<0x6288 0x18>,
+		<0x628c 0x18>,
+		<0x62f0 0x18>,
+		<0x62f4 0x1b>,
+		<0x62f8 0x1c>,
+		<0x62fc 0x24>,
+		<0x6300 0x28>,
+		<0x6304 0x2b>,
+		<0x6308 0x31>,
+		<0x630c 0x31>,
+		<0x6370 0x31>,
+		<0x6374 0x34>,
+		<0x6378 0x35>,
+		<0x637c 0x47>,
+		<0x6380 0x4f>,
+		<0x6384 0x54>,
+		<0x6388 0x60>,
+		<0x638c 0x60>,
+		<0x67a0 0x0>,
+		<0x67a4 0xa7>,
+		<0x67a8 0xc0>,
+		<0x67b0 0x0>,
+		<0x67b4 0x18>,
+		<0x67b8 0x7c>,
+		<0x67d0 0x0>,
+		<0x67dc 0x4>,
+		<0x67e0 0x8>,
+		<0x6800 0x6>,
+		<0x6900 0x3ff>,
+		<0x6b64 0x121151>,
+		<0x6b68 0xbb804080>,
+		<0x6c00 0x0>,
+		<0x6c04 0x0>,
+		<0x6c08 0x0>,
+		<0x6c0c 0x0>,
+		<0x6c10 0x0>,
+		<0x6c14 0x0>,
+		<0x6c18 0x0>,
+		<0x6c1c 0x0>,
+		<0x6c20 0x0>,
+		<0x6c24 0x0>,
+		<0x6c28 0x0>,
+		<0x6c2c 0x0>,
+		<0x6c30 0x0>,
+		<0x6c34 0x0>,
+		<0x6c38 0x0>,
+		<0x6c3c 0x0>,
+		<0x6c40 0x0>,
+		<0x6c44 0x0>,
+		<0x6c48 0x0>,
+		<0x6c4c 0x0>,
+		<0x6c50 0x0>,
+		<0x6c54 0x0>,
+		<0x6c58 0x0>,
+		<0x6c5c 0x0>,
+		<0x6c60 0x0>,
+		<0x6c64 0x0>,
+		<0x6c68 0x0>,
+		<0x6c6c 0x0>,
+		<0x6c70 0x0>,
+		<0x6c74 0x0>,
+		<0x6c78 0x0>,
+		<0x6c7c 0x0>,
+		<0x6c80 0x0>,
+		<0x6c84 0x1>,
+		<0x6c88 0x0>,
+		<0x6c8c 0x0>,
+		<0x6c90 0x4>,
+		<0x6c94 0x3>,
+		<0x6c98 0x2>,
+		<0x6c9c 0x0>,
+		<0x6ca0 0x5>,
+		<0x6ca4 0x5>,
+		<0x6ca8 0x0>,
+		<0x6cac 0x0>,
+		<0x6cb0 0x0>,
+		<0x6cb4 0x0>,
+		<0x6cb8 0x0>,
+		<0x6cbc 0x0>,
+		<0x6cc0 0x0>,
+		<0x6cc4 0x0>,
+		<0x6cc8 0x0>,
+		<0x6ccc 0x0>,
+		<0x6cd0 0x0>,
+		<0x6cd4 0x0>,
+		<0x6cd8 0x0>,
+		<0x6cdc 0x0>,
+		<0x6ce0 0x0>,
+		<0x6ce4 0x0>,
+		<0x6ce8 0x0>,
+		<0x6cec 0x0>,
+		<0x6cf0 0x0>,
+		<0x6cf4 0x0>,
+		<0x6cf8 0x0>,
+		<0x6cfc 0x0>,
+		<0x6d00 0x0>,
+		<0x6d04 0x0>,
+		<0x6d08 0x0>,
+		<0x6d0c 0x0>,
+		<0x6d10 0x0>,
+		<0x6d14 0x0>,
+		<0x6d18 0x0>,
+		<0x6d1c 0x0>,
+		<0x6d20 0x0>,
+		<0x6d24 0x0>,
+		<0x6d28 0x0>,
+		<0x6d2c 0x0>,
+		<0x6d30 0x0>,
+		<0x6d34 0x0>,
+		<0x6d38 0x0>,
+		<0x6d3c 0x0>,
+		<0x6d40 0x0>,
+		<0x6d44 0x0>,
+		<0x6d48 0x0>,
+		<0x6d4c 0x0>,
+		<0x6d50 0x0>,
+		<0x6d54 0x0>,
+		<0x6d58 0x0>,
+		<0x6d5c 0x0>,
+		<0x6d60 0x0>,
+		<0x6d64 0x0>,
+		<0x6d68 0x0>,
+		<0x6d6c 0x0>,
+		<0x6d70 0x0>,
+		<0x6d74 0x0>,
+		<0x6d78 0x0>,
+		<0x6d7c 0x0>,
+		<0x6d80 0x0>,
+		<0x6d84 0x0>,
+		<0x6d88 0x0>,
+		<0x6d8c 0x0>,
+		<0x6d90 0x0>,
+		<0x6d94 0x0>,
+		<0x6d98 0x0>,
+		<0x6d9c 0x0>,
+		<0x6da0 0x0>,
+		<0x6da4 0x0>,
+		<0x6da8 0x0>,
+		<0x6dac 0x0>,
+		<0x6db0 0x0>,
+		<0x6db4 0x0>,
+		<0x6db8 0x0>,
+		<0x6dbc 0x0>,
+		<0x6dc0 0x0>,
+		<0x6dc4 0x0>,
+		<0x6dc8 0x0>,
+		<0x6dcc 0x0>,
+		<0x6dd0 0x0>,
+		<0x6dd4 0x0>,
+		<0x6dd8 0x0>,
+		<0x6ddc 0x0>,
+		<0x6de0 0x0>,
+		<0x6de4 0x0>,
+		<0x6de8 0x0>,
+		<0x6dec 0x0>,
+		<0x6df0 0x0>,
+		<0x6df4 0x0>,
+		<0x6df8 0x0>,
+		<0x6dfc 0x0>;
+};
+
+&anoc2_smmu {
+	attach-impl-defs = <0x6000 0x3270>,
+		<0x6060 0x1055>,
+		<0x6070 0x12>,
+		<0x6074 0x26>,
+		<0x6078 0x3a>,
+		<0x607c 0x3c>,
+		<0x6080 0x3f>,
+		<0x6084 0x67>,
+		<0x6088 0x6c>,
+		<0x608c 0x74>,
+		<0x6090 0x7c>,
+		<0x6094 0x80>,
+		<0x6098 0xa0>,
+		<0x609c 0xa0>,
+		<0x60a0 0xa0>,
+		<0x60a4 0xa0>,
+		<0x60a8 0xa0>,
+		<0x60ac 0xa0>,
+		<0x60f0 0x0>,
+		<0x60f4 0x1>,
+		<0x60f8 0x3>,
+		<0x60fc 0x4>,
+		<0x6100 0x5>,
+		<0x6104 0x7>,
+		<0x6108 0x8>,
+		<0x610c 0x10>,
+		<0x6110 0x10>,
+		<0x6114 0x10>,
+		<0x6118 0x12>,
+		<0x611c 0x12>,
+		<0x6120 0x12>,
+		<0x6124 0x12>,
+		<0x6128 0x12>,
+		<0x612c 0x12>,
+		<0x6170 0x0>,
+		<0x6174 0x0>,
+		<0x6178 0x0>,
+		<0x617c 0x0>,
+		<0x6180 0x0>,
+		<0x6184 0x0>,
+		<0x6188 0x0>,
+		<0x618c 0x0>,
+		<0x6190 0x0>,
+		<0x6194 0x0>,
+		<0x6198 0x0>,
+		<0x619c 0x0>,
+		<0x61a0 0x0>,
+		<0x61a4 0x0>,
+		<0x61a8 0x0>,
+		<0x61ac 0x0>,
+		<0x6270 0x0>,
+		<0x6274 0x1>,
+		<0x6278 0x2>,
+		<0x627c 0x4>,
+		<0x6280 0x4>,
+		<0x6284 0x6>,
+		<0x6288 0x6>,
+		<0x628c 0x18>,
+		<0x6290 0x1a>,
+		<0x6294 0x1a>,
+		<0x6298 0x1e>,
+		<0x629c 0x1e>,
+		<0x62a0 0x1e>,
+		<0x62a4 0x1e>,
+		<0x62a8 0x1e>,
+		<0x62ac 0x1e>,
+		<0x62f0 0x1e>,
+		<0x62f4 0x24>,
+		<0x62f8 0x2a>,
+		<0x62fc 0x2c>,
+		<0x6300 0x2d>,
+		<0x6304 0x33>,
+		<0x6308 0x34>,
+		<0x630c 0x3a>,
+		<0x6310 0x3c>,
+		<0x6314 0x44>,
+		<0x6318 0x48>,
+		<0x631c 0x48>,
+		<0x6320 0x48>,
+		<0x6324 0x48>,
+		<0x6328 0x48>,
+		<0x632c 0x48>,
+		<0x6370 0x48>,
+		<0x6374 0x4d>,
+		<0x6378 0x52>,
+		<0x637c 0x56>,
+		<0x6380 0x59>,
+		<0x6384 0x63>,
+		<0x6388 0x68>,
+		<0x638c 0x70>,
+		<0x6390 0x78>,
+		<0x6394 0x88>,
+		<0x6398 0x90>,
+		<0x639c 0x90>,
+		<0x63a0 0x90>,
+		<0x63a4 0x90>,
+		<0x63a8 0x90>,
+		<0x63ac 0x90>,
+		<0x67a0 0x0>,
+		<0x67a4 0x8e>,
+		<0x67a8 0xa0>,
+		<0x67b0 0x0>,
+		<0x67b4 0x1e>,
+		<0x67b8 0xc6>,
+		<0x67d0 0x0>,
+		<0x67dc 0x4>,
+		<0x67e0 0x8>,
+		<0x6800 0x6>,
+		<0x6900 0x3ff>,
+		<0x6b48 0x330331>,
+		<0x6b4c 0x81>,
+		<0x6b50 0x1313>,
+		<0x6b64 0x121155>,
+		<0x6b68 0xea880920>,
+		<0x6b70 0x10100101>,
+		<0x6b74 0xc0c0000>,
+		<0x6b78 0xc0c0000>,
+		<0x6b80 0x20012001>,
+		<0x6b84 0x20012001>,
+		<0x6c00 0x5>,
+		<0x6c04 0x0>,
+		<0x6c08 0x5>,
+		<0x6c0c 0x0>,
+		<0x6c10 0x5>,
+		<0x6c14 0x0>,
+		<0x6c18 0x5>,
+		<0x6c1c 0x0>,
+		<0x6c20 0x5>,
+		<0x6c24 0x0>,
+		<0x6c28 0x0>,
+		<0x6c2c 0x0>,
+		<0x6c30 0x0>,
+		<0x6c34 0x0>,
+		<0x6c38 0x0>,
+		<0x6c3c 0x0>,
+		<0x6c40 0x0>,
+		<0x6c44 0x0>,
+		<0x6c48 0x0>,
+		<0x6c4c 0x0>,
+		<0x6c50 0x0>,
+		<0x6c54 0x0>,
+		<0x6c58 0x0>,
+		<0x6c5c 0x0>,
+		<0x6c60 0x0>,
+		<0x6c64 0x0>,
+		<0x6c68 0x0>,
+		<0x6c6c 0x0>,
+		<0x6c70 0x0>,
+		<0x6c74 0x0>,
+		<0x6c78 0x0>,
+		<0x6c7c 0x0>,
+		<0x6c80 0x0>,
+		<0x6c84 0x0>,
+		<0x6c88 0x0>,
+		<0x6c8c 0x0>,
+		<0x6c90 0x0>,
+		<0x6c94 0x0>,
+		<0x6c98 0x0>,
+		<0x6c9c 0x0>,
+		<0x6ca0 0x0>,
+		<0x6ca4 0x0>,
+		<0x6ca8 0x0>,
+		<0x6cac 0x0>,
+		<0x6cb0 0x0>,
+		<0x6cb4 0x0>,
+		<0x6cb8 0x0>,
+		<0x6cbc 0x0>,
+		<0x6cc0 0x0>,
+		<0x6cc4 0x0>,
+		<0x6cc8 0x0>,
+		<0x6ccc 0x0>,
+		<0x6cd0 0x0>,
+		<0x6cd4 0x0>,
+		<0x6cd8 0x0>,
+		<0x6cdc 0x0>,
+		<0x6ce0 0x0>,
+		<0x6ce4 0x0>,
+		<0x6ce8 0x0>,
+		<0x6cec 0x0>,
+		<0x6cf0 0x0>,
+		<0x6cf4 0x0>,
+		<0x6cf8 0x0>,
+		<0x6cfc 0x0>,
+		<0x6d00 0x8>,
+		<0x6d04 0x0>,
+		<0x6d08 0x8>,
+		<0x6d0c 0x0>,
+		<0x6d10 0x7>,
+		<0x6d14 0x0>,
+		<0x6d18 0x3>,
+		<0x6d1c 0x2>,
+		<0x6d20 0x4>,
+		<0x6d24 0x0>,
+		<0x6d28 0x4>,
+		<0x6d2c 0x0>,
+		<0x6d30 0x6>,
+		<0x6d34 0x0>,
+		<0x6d38 0x9>,
+		<0x6d3c 0x0>,
+		<0x6d40 0x0>,
+		<0x6d44 0x1>,
+		<0x6d48 0x4>,
+		<0x6d4c 0x0>,
+		<0x6d50 0x4>,
+		<0x6d54 0x0>,
+		<0x6d58 0x0>,
+		<0x6d5c 0x0>,
+		<0x6d60 0x0>,
+		<0x6d64 0x0>,
+		<0x6d68 0x0>,
+		<0x6d6c 0x0>,
+		<0x6d70 0x0>,
+		<0x6d74 0x0>,
+		<0x6d78 0x0>,
+		<0x6d7c 0x0>,
+		<0x6d80 0x0>,
+		<0x6d84 0x0>,
+		<0x6d88 0x0>,
+		<0x6d8c 0x0>,
+		<0x6d90 0x0>,
+		<0x6d94 0x0>,
+		<0x6d98 0x0>,
+		<0x6d9c 0x0>,
+		<0x6da0 0x0>,
+		<0x6da4 0x0>,
+		<0x6da8 0x0>,
+		<0x6dac 0x0>,
+		<0x6db0 0x0>,
+		<0x6db4 0x0>,
+		<0x6db8 0x0>,
+		<0x6dbc 0x0>,
+		<0x6dc0 0x0>,
+		<0x6dc4 0x0>,
+		<0x6dc8 0x0>,
+		<0x6dcc 0x0>,
+		<0x6dd0 0x0>,
+		<0x6dd4 0x0>,
+		<0x6dd8 0x0>,
+		<0x6ddc 0x0>,
+		<0x6de0 0x0>,
+		<0x6de4 0x0>,
+		<0x6de8 0x0>,
+		<0x6dec 0x0>,
+		<0x6df0 0x0>,
+		<0x6df4 0x0>,
+		<0x6df8 0x0>,
+		<0x6dfc 0x0>;
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm-audio-lpass.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm-audio-lpass.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm-audio-lpass.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm-audio-lpass.dtsi	2019-10-29 09:26:22.905195956 +0100
@@ -0,0 +1,529 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+
+	pcm0: qcom,msm-pcm {
+		compatible = "qcom,msm-pcm-dsp";
+		qcom,msm-pcm-dsp-id = <0>;
+	};
+
+	routing: qcom,msm-pcm-routing {
+		compatible = "qcom,msm-pcm-routing";
+	};
+
+	compr: qcom,msm-compr-dsp {
+		compatible = "qcom,msm-compr-dsp";
+	};
+
+	pcm1: qcom,msm-pcm-low-latency {
+		compatible = "qcom,msm-pcm-dsp";
+		qcom,msm-pcm-dsp-id = <1>;
+		qcom,msm-pcm-low-latency;
+		qcom,latency-level = "regular";
+	};
+
+	pcm2: qcom,msm-ultra-low-latency {
+		compatible = "qcom,msm-pcm-dsp";
+		qcom,msm-pcm-dsp-id = <2>;
+		qcom,msm-pcm-low-latency;
+		qcom,latency-level = "ultra";
+	};
+
+	pcm_noirq: qcom,msm-pcm-dsp-noirq {
+		compatible = "qcom,msm-pcm-dsp-noirq";
+		qcom,msm-pcm-low-latency;
+		qcom,latency-level = "ultra";
+	};
+
+	compress: qcom,msm-compress-dsp {
+		compatible = "qcom,msm-compress-dsp";
+	};
+
+	voip: qcom,msm-voip-dsp {
+		compatible = "qcom,msm-voip-dsp";
+	};
+
+	voice: qcom,msm-pcm-voice {
+		compatible = "qcom,msm-pcm-voice";
+		qcom,destroy-cvd;
+	};
+
+	stub_codec: qcom,msm-stub-codec {
+		compatible = "qcom,msm-stub-codec";
+	};
+
+	qcom,msm-dai-fe {
+		compatible = "qcom,msm-dai-fe";
+	};
+
+	afe: qcom,msm-pcm-afe {
+		compatible = "qcom,msm-pcm-afe";
+	};
+
+	dai_hdmi: qcom,msm-dai-q6-hdmi {
+		compatible = "qcom,msm-dai-q6-hdmi";
+		qcom,msm-dai-q6-dev-id = <8>;
+	};
+
+	dai_dp: qcom,msm-dai-q6-dp {
+		compatible = "qcom,msm-dai-q6-hdmi";
+		qcom,msm-dai-q6-dev-id = <24608>;
+	};
+
+	loopback: qcom,msm-pcm-loopback {
+		compatible = "qcom,msm-pcm-loopback";
+	};
+
+	trans_loopback: qcom,msm-transcode-loopback {
+		compatible = "qcom,msm-transcode-loopback";
+	};
+
+	qcom,msm-dai-mi2s {
+		compatible = "qcom,msm-dai-mi2s";
+		dai_mi2s0: qcom,msm-dai-q6-mi2s-prim {
+			compatible = "qcom,msm-dai-q6-mi2s";
+			qcom,msm-dai-q6-mi2s-dev-id = <0>;
+			qcom,msm-mi2s-rx-lines = <3>;
+			qcom,msm-mi2s-tx-lines = <0>;
+		};
+
+		dai_mi2s1: qcom,msm-dai-q6-mi2s-sec {
+			compatible = "qcom,msm-dai-q6-mi2s";
+			qcom,msm-dai-q6-mi2s-dev-id = <1>;
+			qcom,msm-mi2s-rx-lines = <1>;
+			qcom,msm-mi2s-tx-lines = <0>;
+		};
+
+		dai_mi2s2: qcom,msm-dai-q6-mi2s-tert {
+			compatible = "qcom,msm-dai-q6-mi2s";
+			qcom,msm-dai-q6-mi2s-dev-id = <2>;
+			qcom,msm-mi2s-rx-lines = <0>;
+			qcom,msm-mi2s-tx-lines = <3>;
+		};
+
+		dai_mi2s3: qcom,msm-dai-q6-mi2s-quat {
+			compatible = "qcom,msm-dai-q6-mi2s";
+			qcom,msm-dai-q6-mi2s-dev-id = <3>;
+			qcom,msm-mi2s-rx-lines = <1>;
+			qcom,msm-mi2s-tx-lines = <2>;
+		};
+
+		dai_mi2s4: qcom,msm-dai-q6-mi2s-quin {
+			compatible = "qcom,msm-dai-q6-mi2s";
+			qcom,msm-dai-q6-mi2s-dev-id = <5>;
+			qcom,msm-mi2s-rx-lines = <1>;
+			qcom,msm-mi2s-tx-lines = <2>;
+		};
+
+		dai_mi2s5: qcom,msm-dai-q6-mi2s-senary {
+			compatible = "qcom,msm-dai-q6-mi2s";
+			qcom,msm-dai-q6-mi2s-dev-id = <6>;
+			qcom,msm-mi2s-rx-lines = <0>;
+			qcom,msm-mi2s-tx-lines = <3>;
+		};
+	};
+
+	lsm: qcom,msm-lsm-client {
+		compatible = "qcom,msm-lsm-client";
+	};
+
+	qcom,msm-dai-q6 {
+		compatible = "qcom,msm-dai-q6";
+		sb_0_rx: qcom,msm-dai-q6-sb-0-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16384>;
+		};
+
+		sb_0_tx: qcom,msm-dai-q6-sb-0-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16385>;
+		};
+
+		sb_1_rx: qcom,msm-dai-q6-sb-1-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16386>;
+		};
+
+		sb_1_tx: qcom,msm-dai-q6-sb-1-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16387>;
+		};
+
+		sb_2_rx: qcom,msm-dai-q6-sb-2-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16388>;
+		};
+
+		sb_2_tx: qcom,msm-dai-q6-sb-2-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16389>;
+		};
+
+
+		sb_3_rx: qcom,msm-dai-q6-sb-3-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16390>;
+		};
+
+		sb_3_tx: qcom,msm-dai-q6-sb-3-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16391>;
+		};
+
+		sb_4_rx: qcom,msm-dai-q6-sb-4-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16392>;
+		};
+
+		sb_4_tx: qcom,msm-dai-q6-sb-4-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16393>;
+		};
+
+		sb_5_tx: qcom,msm-dai-q6-sb-5-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16395>;
+		};
+
+		sb_5_rx: qcom,msm-dai-q6-sb-5-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16394>;
+		};
+
+		sb_6_rx: qcom,msm-dai-q6-sb-6-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16396>;
+		};
+
+		sb_7_rx: qcom,msm-dai-q6-sb-7-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16398>;
+		};
+
+		sb_7_tx: qcom,msm-dai-q6-sb-7-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16399>;
+		};
+
+		sb_8_rx: qcom,msm-dai-q6-sb-8-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16400>;
+		};
+
+		sb_8_tx: qcom,msm-dai-q6-sb-8-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16401>;
+		};
+
+		bt_sco_rx: qcom,msm-dai-q6-bt-sco-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <12288>;
+		};
+
+		bt_sco_tx: qcom,msm-dai-q6-bt-sco-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <12289>;
+		};
+
+		int_fm_rx: qcom,msm-dai-q6-int-fm-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <12292>;
+		};
+
+		int_fm_tx: qcom,msm-dai-q6-int-fm-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <12293>;
+		};
+
+		afe_pcm_rx: qcom,msm-dai-q6-be-afe-pcm-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <224>;
+		};
+
+		afe_pcm_tx: qcom,msm-dai-q6-be-afe-pcm-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <225>;
+		};
+
+		afe_proxy_rx: qcom,msm-dai-q6-afe-proxy-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <241>;
+		};
+
+		afe_proxy_tx: qcom,msm-dai-q6-afe-proxy-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <240>;
+		};
+
+		incall_record_rx: qcom,msm-dai-q6-incall-record-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <32771>;
+		};
+
+		incall_record_tx: qcom,msm-dai-q6-incall-record-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <32772>;
+		};
+
+		incall_music_rx: qcom,msm-dai-q6-incall-music-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <32773>;
+		};
+
+		incall_music_2_rx: qcom,msm-dai-q6-incall-music-2-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <32770>;
+		};
+
+		usb_audio_rx: qcom,msm-dai-q6-usb-audio-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <28672>;
+		};
+
+		usb_audio_tx: qcom,msm-dai-q6-usb-audio-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <28673>;
+		};
+	};
+
+	hostless: qcom,msm-pcm-hostless {
+		compatible = "qcom,msm-pcm-hostless";
+	};
+
+	dai_pri_auxpcm: qcom,msm-pri-auxpcm {
+		compatible = "qcom,msm-auxpcm-dev";
+		qcom,msm-cpudai-auxpcm-mode = <0>, <0>;
+		qcom,msm-cpudai-auxpcm-sync = <1>, <1>;
+		qcom,msm-cpudai-auxpcm-frame = <5>, <4>;
+		qcom,msm-cpudai-auxpcm-quant = <2>, <2>;
+		qcom,msm-cpudai-auxpcm-num-slots = <1>, <1>;
+		qcom,msm-cpudai-auxpcm-slot-mapping = <1>, <1>;
+		qcom,msm-cpudai-auxpcm-data = <0>, <0>;
+		qcom,msm-cpudai-auxpcm-pcm-clk-rate = <2048000>, <2048000>;
+		qcom,msm-auxpcm-interface = "primary";
+		qcom,msm-cpudai-afe-clk-ver = <2>;
+	};
+
+	dai_sec_auxpcm: qcom,msm-sec-auxpcm {
+		compatible = "qcom,msm-auxpcm-dev";
+		qcom,msm-cpudai-auxpcm-mode = <0>, <0>;
+		qcom,msm-cpudai-auxpcm-sync = <1>, <1>;
+		qcom,msm-cpudai-auxpcm-frame = <5>, <4>;
+		qcom,msm-cpudai-auxpcm-quant = <2>, <2>;
+		qcom,msm-cpudai-auxpcm-num-slots = <1>, <1>;
+		qcom,msm-cpudai-auxpcm-slot-mapping = <1>, <1>;
+		qcom,msm-cpudai-auxpcm-data = <0>, <0>;
+		qcom,msm-cpudai-auxpcm-pcm-clk-rate = <2048000>, <2048000>;
+		qcom,msm-auxpcm-interface = "secondary";
+		qcom,msm-cpudai-afe-clk-ver = <2>;
+	};
+
+	dai_tert_auxpcm: qcom,msm-tert-auxpcm {
+		compatible = "qcom,msm-auxpcm-dev";
+		qcom,msm-cpudai-auxpcm-mode = <0>, <0>;
+		qcom,msm-cpudai-auxpcm-sync = <1>, <1>;
+		qcom,msm-cpudai-auxpcm-frame = <5>, <4>;
+		qcom,msm-cpudai-auxpcm-quant = <2>, <2>;
+		qcom,msm-cpudai-auxpcm-num-slots = <1>, <1>;
+		qcom,msm-cpudai-auxpcm-slot-mapping = <1>, <1>;
+		qcom,msm-cpudai-auxpcm-data = <0>, <0>;
+		qcom,msm-cpudai-auxpcm-pcm-clk-rate = <2048000>, <2048000>;
+		qcom,msm-auxpcm-interface = "tertiary";
+		qcom,msm-cpudai-afe-clk-ver = <2>;
+	};
+
+	dai_quat_auxpcm: qcom,msm-quat-auxpcm {
+		compatible = "qcom,msm-auxpcm-dev";
+		qcom,msm-cpudai-auxpcm-mode = <0>, <0>;
+		qcom,msm-cpudai-auxpcm-sync = <1>, <1>;
+		qcom,msm-cpudai-auxpcm-frame = <5>, <4>;
+		qcom,msm-cpudai-auxpcm-quant = <2>, <2>;
+		qcom,msm-cpudai-auxpcm-num-slots = <1>, <1>;
+		qcom,msm-cpudai-auxpcm-slot-mapping = <1>, <1>;
+		qcom,msm-cpudai-auxpcm-data = <0>, <0>;
+		qcom,msm-cpudai-auxpcm-pcm-clk-rate = <2048000>, <2048000>;
+		qcom,msm-auxpcm-interface = "quaternary";
+		qcom,msm-cpudai-afe-clk-ver = <2>;
+	};
+
+	hdmi_dba: qcom,msm-hdmi-dba-codec-rx {
+		compatible = "qcom,msm-hdmi-dba-codec-rx";
+		qcom,dba-bridge-chip = "adv7533";
+	};
+
+	msm_audio_ion: qcom,msm-audio-ion {
+		compatible = "qcom,msm-audio-ion";
+		qcom,smmu-version = <1>;
+		qcom,smmu-enabled;
+		iommus = <&adsp_io 1>;
+	};
+
+	qcom,msm-adsp-loader {
+		status = "ok";
+		compatible = "qcom,adsp-loader";
+		qcom,adsp-state = <0>;
+	};
+
+	qcom,msm-dai-tdm-pri-rx {
+		compatible = "qcom,msm-dai-tdm";
+		qcom,msm-cpudai-tdm-group-id = <37120>;
+		qcom,msm-cpudai-tdm-group-num-ports = <1>;
+		qcom,msm-cpudai-tdm-group-port-id = <36864>;
+		qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <1>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <1>;
+		qcom,msm-cpudai-tdm-data-delay = <1>;
+		dai_pri_tdm_rx_0: qcom,msm-dai-q6-tdm-pri-rx-0 {
+			compatible = "qcom,msm-dai-q6-tdm";
+			qcom,msm-cpudai-tdm-dev-id = <36864>;
+			qcom,msm-cpudai-tdm-data-align = <0>;
+		};
+	};
+
+	qcom,msm-dai-tdm-pri-tx {
+		compatible = "qcom,msm-dai-tdm";
+		qcom,msm-cpudai-tdm-group-id = <37121>;
+		qcom,msm-cpudai-tdm-group-num-ports = <1>;
+		qcom,msm-cpudai-tdm-group-port-id = <36865>;
+		qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <1>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <1>;
+		qcom,msm-cpudai-tdm-data-delay = <1>;
+		dai_pri_tdm_tx_0: qcom,msm-dai-q6-tdm-pri-tx-0 {
+			compatible = "qcom,msm-dai-q6-tdm";
+			qcom,msm-cpudai-tdm-dev-id = <36865>;
+			qcom,msm-cpudai-tdm-data-align = <0>;
+		};
+	};
+
+	qcom,msm-dai-tdm-sec-rx {
+		compatible = "qcom,msm-dai-tdm";
+		qcom,msm-cpudai-tdm-group-id = <37136>;
+		qcom,msm-cpudai-tdm-group-num-ports = <1>;
+		qcom,msm-cpudai-tdm-group-port-id = <36880>;
+		qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <1>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <1>;
+		qcom,msm-cpudai-tdm-data-delay = <1>;
+		dai_sec_tdm_rx_0: qcom,msm-dai-q6-tdm-sec-rx-0 {
+			compatible = "qcom,msm-dai-q6-tdm";
+			qcom,msm-cpudai-tdm-dev-id = <36880>;
+			qcom,msm-cpudai-tdm-data-align = <0>;
+		};
+	};
+
+	qcom,msm-dai-tdm-sec-tx {
+		compatible = "qcom,msm-dai-tdm";
+		qcom,msm-cpudai-tdm-group-id = <37137>;
+		qcom,msm-cpudai-tdm-group-num-ports = <1>;
+		qcom,msm-cpudai-tdm-group-port-id = <36881>;
+		qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <1>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <1>;
+		qcom,msm-cpudai-tdm-data-delay = <1>;
+		dai_sec_tdm_tx_0: qcom,msm-dai-q6-tdm-sec-tx-0 {
+			compatible = "qcom,msm-dai-q6-tdm";
+			qcom,msm-cpudai-tdm-dev-id = <36881>;
+			qcom,msm-cpudai-tdm-data-align = <0>;
+		};
+	};
+
+	qcom,msm-dai-tdm-tert-rx {
+		compatible = "qcom,msm-dai-tdm";
+		qcom,msm-cpudai-tdm-group-id = <37152>;
+		qcom,msm-cpudai-tdm-group-num-ports = <1>;
+		qcom,msm-cpudai-tdm-group-port-id = <36896>;
+		qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <1>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <1>;
+		qcom,msm-cpudai-tdm-data-delay = <1>;
+		dai_tert_tdm_rx_0: qcom,msm-dai-q6-tdm-tert-rx-0 {
+			compatible = "qcom,msm-dai-q6-tdm";
+			qcom,msm-cpudai-tdm-dev-id = <36896>;
+			qcom,msm-cpudai-tdm-data-align = <0>;
+		};
+	};
+
+	qcom,msm-dai-tdm-tert-tx {
+		compatible = "qcom,msm-dai-tdm";
+		qcom,msm-cpudai-tdm-group-id = <37153>;
+		qcom,msm-cpudai-tdm-group-num-ports = <1>;
+		qcom,msm-cpudai-tdm-group-port-id = <36897 >;
+		qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <1>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <1>;
+		qcom,msm-cpudai-tdm-data-delay = <1>;
+		dai_tert_tdm_tx_0: qcom,msm-dai-q6-tdm-tert-tx-0 {
+			compatible = "qcom,msm-dai-q6-tdm";
+			qcom,msm-cpudai-tdm-dev-id = <36897 >;
+			qcom,msm-cpudai-tdm-data-align = <0>;
+		};
+	};
+
+	qcom,msm-dai-tdm-quat-rx {
+		compatible = "qcom,msm-dai-tdm";
+		qcom,msm-cpudai-tdm-group-id = <37168>;
+		qcom,msm-cpudai-tdm-group-num-ports = <1>;
+		qcom,msm-cpudai-tdm-group-port-id = <36912>;
+		qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <1>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <1>;
+		qcom,msm-cpudai-tdm-data-delay = <1>;
+		dai_quat_tdm_rx_0: qcom,msm-dai-q6-tdm-quat-rx-0 {
+			compatible = "qcom,msm-dai-q6-tdm";
+			qcom,msm-cpudai-tdm-dev-id = <36912>;
+			qcom,msm-cpudai-tdm-data-align = <0>;
+		};
+	};
+
+	qcom,msm-dai-tdm-quat-tx {
+		compatible = "qcom,msm-dai-tdm";
+		qcom,msm-cpudai-tdm-group-id = <37169>;
+		qcom,msm-cpudai-tdm-group-num-ports = <1>;
+		qcom,msm-cpudai-tdm-group-port-id = <36913 >;
+		qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <1>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <1>;
+		qcom,msm-cpudai-tdm-data-delay = <1>;
+		dai_quat_tdm_tx_0: qcom,msm-dai-q6-tdm-quat-tx-0 {
+			compatible = "qcom,msm-dai-q6-tdm";
+			qcom,msm-cpudai-tdm-dev-id = <36913 >;
+			qcom,msm-cpudai-tdm-data-align = <0>;
+		};
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm-gdsc-8998.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm-gdsc-8998.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm-gdsc-8998.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm-gdsc-8998.dtsi	2019-01-22 16:16:21.179225361 +0100
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	/* GCC GDSCs */
+	gdsc_usb30: qcom,gdsc@10f004 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_usb30";
+		reg = <0x10f004 0x4>;
+		status = "disabled";
+	};
+
+	gdsc_pcie_0: qcom,gdsc@16b004 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_pcie_0";
+		reg = <0x16b004 0x4>;
+		status = "disabled";
+	};
+
+	gdsc_ufs: qcom,gdsc@175004 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_ufs";
+		reg = <0x175004 0x4>;
+		status = "disabled";
+	};
+
+	gdsc_hlos1_vote_lpass_adsp: qcom,gdsc@17d034 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_hlos1_vote_lpass_adsp";
+		reg = <0x17d034 0x4>;
+		qcom,no-status-check-on-disable;
+		qcom,gds-timeout = <500>;
+		status = "disabled";
+	};
+
+	gdsc_hlos1_vote_lpass_core: qcom,gdsc@17d038 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_hlos1_vote_lpass_core";
+		reg = <0x17d038 0x4>;
+		qcom,no-status-check-on-disable;
+		qcom,gds-timeout = <500>;
+		status = "disabled";
+	};
+
+	/* MMSS GDSCs */
+	gdsc_bimc_smmu: qcom,gdsc@c8ce020 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_bimc_smmu";
+		reg = <0xc8ce020 0x4>,
+		      <0xc8ce024 0x4>;
+		reg-names = "base", "hw_ctrl_addr";
+		qcom,no-status-check-on-disable;
+		qcom,gds-timeout = <500>;
+		status = "disabled";
+	};
+
+	gdsc_venus: qcom,gdsc@c8c1024 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_venus";
+		reg = <0xc8c1024 0x4>;
+		status = "disabled";
+	};
+
+	gdsc_venus_core0: qcom,gdsc@c8c1040 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_venus_core0";
+		reg = <0xc8c1040 0x4>;
+		status = "disabled";
+	};
+
+	gdsc_venus_core1: qcom,gdsc@c8c1044 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_venus_core1";
+		reg = <0xc8c1044 0x4>;
+		status = "disabled";
+	};
+
+	gdsc_camss_top: qcom,gdsc@c8c34a0 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_camss_top";
+		reg = <0xc8c34a0 0x4>;
+		status = "disabled";
+	};
+
+	gdsc_vfe0: qcom,gdsc@c8c3664 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_vfe0";
+		reg = <0xc8c3664 0x4>;
+		status = "disabled";
+	};
+
+	gdsc_vfe1: qcom,gdsc@c8c3674 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_vfe1";
+		reg = <0xc8c3674 0x4>;
+		status = "disabled";
+	};
+
+	gdsc_cpp: qcom,gdsc@c8c36d4 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_cpp";
+		reg = <0xc8c36d4 0x4>;
+		status = "disabled";
+	};
+
+	gdsc_mdss: qcom,gdsc@c8c2304 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_mdss";
+		reg = <0xc8c2304 0x4>;
+		status = "disabled";
+	};
+
+	/* GPU GDSCs */
+	gdsc_gpu_cx: qcom,gdsc@5066004 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_gpu_cx";
+		reg = <0x5066004 0x4>,
+		      <0x5066008 0x4>;
+		reg-names = "base", "hw_ctrl_addr";
+		qcom,no-status-check-on-disable;
+		qcom,gds-timeout = <500>;
+		status = "disabled";
+	};
+
+	gdsc_gpu_gx: qcom,gdsc@5066094 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_gpu_gx";
+		reg = <0x5066094 0x4>,
+		      <0x5065130 0x4>,
+		      <0x5066090 0x4>;
+		reg-names = "base", "domain_addr", "sw_reset";
+		qcom,retain-periph;
+		qcom,reset-aon-logic;
+		status = "disabled";
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm-pm8005.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm-pm8005.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm-pm8005.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm-pm8005.dtsi	2019-01-22 16:16:21.179225361 +0100
@@ -0,0 +1,103 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/spmi/spmi.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+&spmi_bus {
+	qcom,pm8005@4 {
+		compatible = "qcom,spmi-pmic";
+		reg = <0x4 SPMI_USID>;
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		pm8005_revid: qcom,revid@100 {
+			compatible = "qcom,qpnp-revid";
+			reg = <0x100 0x100>;
+		};
+
+		qcom,temp-alarm@2400 {
+			compatible = "qcom,qpnp-temp-alarm";
+			reg = <0x2400 0x100>;
+			interrupts = <0x4 0x24 0x0 IRQ_TYPE_EDGE_RISING>;
+			label = "pm8005_tz";
+		};
+
+		pm8005_gpios: gpios {
+			compatible = "qcom,qpnp-pin";
+			gpio-controller;
+			#gpio-cells = <2>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			label = "pm8005-gpio";
+
+			gpio@c000 {
+				reg = <0xc000 0x100>;
+				qcom,pin-num = <1>;
+				status = "disabled";
+			};
+
+			gpio@c100 {
+				reg = <0xc100 0x100>;
+				qcom,pin-num = <2>;
+				status = "disabled";
+			};
+
+			gpio@c200 {
+				reg = <0xc200 0x100>;
+				qcom,pin-num = <3>;
+				status = "disabled";
+			};
+
+			gpio@c300 {
+				reg = <0xc300 0x100>;
+				qcom,pin-num = <4>;
+				status = "disabled";
+			};
+		};
+	};
+
+	qcom,pm8005@5 {
+		compatible ="qcom,spmi-pmic";
+		reg = <0x5 SPMI_USID>;
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		regulator@1400 {
+			compatible = "qcom,qpnp-regulator";
+			reg = <0x1400 0x100>;
+			regulator-name = "pm8005_s1";
+			status = "disabled";
+		};
+
+		regulator@1700 {
+			compatible = "qcom,qpnp-regulator";
+			reg = <0x1700 0x100>;
+			regulator-name = "pm8005_s2";
+			status = "disabled";
+		};
+
+		regulator@1a00 {
+			compatible = "qcom,qpnp-regulator";
+			reg = <0x1a00 0x100>;
+			regulator-name = "pm8005_s3";
+			status = "disabled";
+		};
+
+		regulator@1d00 {
+			compatible = "qcom,qpnp-regulator";
+			reg = <0x1d00 0x100>;
+			regulator-name = "pm8005_s4";
+			status = "disabled";
+		};
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm-pm8998.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm-pm8998.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm-pm8998.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm-pm8998.dtsi	2019-01-22 16:16:21.179225361 +0100
@@ -0,0 +1,328 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/spmi/spmi.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/msm/power-on.h>
+
+&spmi_bus {
+	qcom,pm8998@0 {
+		compatible ="qcom,spmi-pmic";
+		reg = <0x0 SPMI_USID>;
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		pm8998_revid: qcom,revid@100 {
+			compatible = "qcom,qpnp-revid";
+			reg = <0x100 0x100>;
+		};
+
+		qcom,power-on@800 {
+			compatible = "qcom,qpnp-power-on";
+			reg = <0x800 0x100>;
+			interrupts = <0x0 0x8 0x0 IRQ_TYPE_NONE>,
+				     <0x0 0x8 0x1 IRQ_TYPE_NONE>,
+				     <0x0 0x8 0x4 IRQ_TYPE_NONE>,
+				     <0x0 0x8 0x5 IRQ_TYPE_NONE>;
+			interrupt-names = "kpdpwr", "resin",
+					"resin-bark", "kpdpwr-resin-bark";
+			qcom,pon-dbc-delay = <15625>;
+			qcom,system-reset;
+			qcom,store-hard-reset-reason;
+
+			qcom,pon_1 {
+				qcom,pon-type = <0>;
+				qcom,pull-up = <1>;
+				linux,code = <116>;
+			};
+
+			qcom,pon_2 {
+				qcom,pon-type = <1>;
+				qcom,pull-up = <1>;
+				linux,code = <114>;
+			};
+
+			qcom,pon_3 {
+				qcom,pon-type = <3>;
+				qcom,support-reset = <1>;
+				qcom,pull-up = <1>;
+				qcom,s1-timer = <6720>;
+				qcom,s2-timer = <2000>;
+				qcom,s2-type = <PON_POWER_OFF_DVDD_HARD_RESET>;
+				qcom,use-bark;
+			};
+		};
+
+		qcom,temp-alarm@2400 {
+			compatible = "qcom,qpnp-temp-alarm";
+			reg = <0x2400 0x100>;
+			interrupts = <0x0 0x24 0x0 IRQ_TYPE_EDGE_RISING>;
+			label = "pm8998_tz";
+			qcom,channel-num = <6>;
+			qcom,temp_alarm-vadc = <&pm8998_vadc>;
+		};
+
+		pm8998_gpios: gpios {
+			compatible = "qcom,qpnp-pin";
+			gpio-controller;
+			#gpio-cells = <2>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			label = "pm8998-gpio";
+
+			gpio@c000 {
+				reg = <0xc000 0x100>;
+				qcom,pin-num = <1>;
+				status = "disabled";
+			};
+
+			gpio@c100 {
+				reg = <0xc100 0x100>;
+				qcom,pin-num = <2>;
+				status = "disabled";
+			};
+
+			gpio@c200 {
+				reg = <0xc200 0x100>;
+				qcom,pin-num = <3>;
+				status = "disabled";
+			};
+
+			gpio@c300 {
+				reg = <0xc300 0x100>;
+				qcom,pin-num = <4>;
+				status = "disabled";
+			};
+
+			gpio@c400 {
+				reg = <0xc400 0x100>;
+				qcom,pin-num = <5>;
+				status = "disabled";
+			};
+
+			gpio@c500 {
+				reg = <0xc500 0x100>;
+				qcom,pin-num = <6>;
+				status = "disabled";
+			};
+
+			gpio@c600 {
+				reg = <0xc600 0x100>;
+				qcom,pin-num = <7>;
+				status = "disabled";
+			};
+
+			gpio@c700 {
+				reg = <0xc700 0x100>;
+				qcom,pin-num = <8>;
+				status = "disabled";
+			};
+
+			gpio@c800 {
+				reg = <0xc800 0x100>;
+				qcom,pin-num = <9>;
+				status = "disabled";
+			};
+
+			gpio@c900 {
+				reg = <0xc900 0x100>;
+				qcom,pin-num = <10>;
+				status = "disabled";
+			};
+
+			gpio@ca00 {
+				reg = <0xca00 0x100>;
+				qcom,pin-num = <11>;
+				status = "disabled";
+			};
+
+			gpio@cb00 {
+				reg = <0xcb00 0x100>;
+				qcom,pin-num = <12>;
+				status = "disabled";
+			};
+
+			gpio@cc00 {
+				reg = <0xcc00 0x100>;
+				qcom,pin-num = <13>;
+				status = "disabled";
+			};
+
+			gpio@cd00 {
+				reg = <0xcd00 0x100>;
+				qcom,pin-num = <14>;
+				status = "disabled";
+			};
+
+			gpio@ce00 {
+				reg = <0xce00 0x100>;
+				qcom,pin-num = <15>;
+				status = "disabled";
+			};
+
+			gpio@cf00 {
+				reg = <0xcf00 0x100>;
+				qcom,pin-num = <16>;
+				status = "disabled";
+			};
+
+			gpio@d000 {
+				reg = <0xd000 0x100>;
+				qcom,pin-num = <17>;
+				status = "disabled";
+			};
+
+			gpio@d100 {
+				reg = <0xd100 0x100>;
+				qcom,pin-num = <18>;
+				status = "disabled";
+			};
+
+			gpio@d200 {
+				reg = <0xd200 0x100>;
+				qcom,pin-num = <19>;
+				status = "disabled";
+			};
+
+			gpio@d300 {
+				reg = <0xd300 0x100>;
+				qcom,pin-num = <20>;
+				status = "disabled";
+			};
+
+			gpio@d400 {
+				reg = <0xd400 0x100>;
+				qcom,pin-num = <21>;
+				status = "disabled";
+			};
+
+			gpio@d500 {
+				reg = <0xd500 0x100>;
+				qcom,pin-num = <22>;
+				status = "disabled";
+			};
+
+			gpio@d600 {
+				reg = <0xd600 0x100>;
+				qcom,pin-num = <23>;
+				status = "disabled";
+			};
+
+			gpio@d700 {
+				reg = <0xd700 0x100>;
+				qcom,pin-num = <24>;
+				status = "disabled";
+			};
+
+			gpio@d800 {
+				reg = <0xd800 0x100>;
+				qcom,pin-num = <25>;
+				status = "disabled";
+			};
+
+			gpio@d900 {
+				reg = <0xd900 0x100>;
+				qcom,pin-num = <26>;
+				status = "disabled";
+			};
+		};
+
+		pm8998_coincell: qcom,coincell@2800 {
+			compatible = "qcom,qpnp-coincell";
+			reg = <0x2800 0x100>;
+		};
+
+		pm8998_rtc: qcom,pm8998_rtc {
+			compatible = "qcom,qpnp-rtc";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			qcom,qpnp-rtc-write = <0>;
+			qcom,qpnp-rtc-alarm-pwrup = <0>;
+
+			qcom,pm8998_rtc_rw@6000 {
+				reg = <0x6000 0x100>;
+			};
+			qcom,pm8998_rtc_alarm@6100 {
+				reg = <0x6100 0x100>;
+				interrupts = <0x0 0x61 0x1 IRQ_TYPE_NONE>;
+			};
+		};
+
+		pm8998_vadc: vadc@3100 {
+			compatible = "qcom,qpnp-vadc-hc";
+			reg = <0x3100 0x100>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0x0 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "eoc-int-en-set";
+			qcom,adc-bit-resolution = <15>;
+			qcom,adc-vdd-reference = <1875>;
+
+			chan@6 {
+				label = "die_temp";
+				reg = <6>;
+				qcom,decimation = <2>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <3>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+				qcom,cal-val = <0>;
+			};
+
+			chan@0 {
+				label = "ref_gnd";
+				reg = <0>;
+				qcom,decimation = <2>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+				qcom,cal-val = <0>;
+			};
+
+			chan@1 {
+				label = "ref_1250v";
+				reg = <1>;
+				qcom,decimation = <2>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+				qcom,cal-val = <0>;
+			};
+		};
+
+		pm8998_adc_tm: vadc@3400 {
+			compatible = "qcom,qpnp-adc-tm-hc";
+			reg = <0x3400 0x100>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0x0 0x34 0x0 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "eoc-int-en-set";
+			qcom,adc-bit-resolution = <15>;
+			qcom,adc-vdd-reference = <1875>;
+			qcom,adc_tm-vadc = <&pm8998_vadc>;
+			qcom,decimation = <0>;
+			qcom,fast-avg-setup = <0>;
+		};
+	};
+
+	qcom,pm8998@1 {
+		compatible ="qcom,spmi-pmic";
+		reg = <0x1 SPMI_USID>;
+		#address-cells = <2>;
+		#size-cells = <0>;
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm-pm8998-rpm-regulator.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm-pm8998-rpm-regulator.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm-pm8998-rpm-regulator.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm-pm8998-rpm-regulator.dtsi	2019-01-22 16:16:21.179225361 +0100
@@ -0,0 +1,600 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&rpm_bus {
+	/* PM8998 S1 + S6 = VDD_CX supply */
+	rpm-regulator-smpa1 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "rwcx";
+		qcom,resource-id = <0>;
+		qcom,regulator-type = <1>;
+		status = "disabled";
+
+		regulator-s1 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_s1";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-smpa2 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "smpa";
+		qcom,resource-id = <2>;
+		qcom,regulator-type = <1>;
+		status = "disabled";
+
+		regulator-s2 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_s2";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-smpa3 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "smpa";
+		qcom,resource-id = <3>;
+		qcom,regulator-type = <1>;
+		status = "disabled";
+
+		regulator-s3 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_s3";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-smpa4 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "smpa";
+		qcom,resource-id = <4>;
+		qcom,regulator-type = <1>;
+		status = "disabled";
+
+		regulator-s4 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_s4";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-smpa5 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "smpa";
+		qcom,resource-id = <5>;
+		qcom,regulator-type = <1>;
+		status = "disabled";
+
+		regulator-s5 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_s5";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-smpa7 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "smpa";
+		qcom,resource-id = <7>;
+		qcom,regulator-type = <1>;
+		status = "disabled";
+
+		regulator-s7 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_s7";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-smpa8 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "smpa";
+		qcom,resource-id = <8>;
+		qcom,regulator-type = <1>;
+		status = "disabled";
+
+		regulator-s8 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_s8";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	/* PM8998 S9 = VDD_MX supply */
+	rpm-regulator-smpa9 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "rwmx";
+		qcom,resource-id = <0>;
+		qcom,regulator-type = <1>;
+		status = "disabled";
+
+		regulator-s9 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_s9";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa1 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <1>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l1 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l1";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa2 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <2>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l2 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l2";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa3 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <3>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l3 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l3";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa4 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "rwsm";
+		qcom,resource-id = <0>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l4 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l4";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa5 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <5>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l5 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l5";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa6 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <6>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l6 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l6";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa7 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <7>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l7 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l7";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa8 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <8>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l8 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l8";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa9 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <9>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l9 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l9";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa10 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <10>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l10 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l10";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa11 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <11>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l11 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l11";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa12 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <12>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l12 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l12";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa13 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <13>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l13 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l13";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa14 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <14>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l14 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l14";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa15 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <15>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l15 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l15";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa16 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <16>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l16 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l16";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa17 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <17>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l17 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l17";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa18 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <18>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l18 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l18";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa19 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <19>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l19 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l19";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa20 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <20>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l20 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l20";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa21 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <21>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l21 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l21";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa22 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <22>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l22 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l22";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa23 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <23>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l23 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l23";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa24 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <24>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l24 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l24";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa25 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <25>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l25 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l25";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa26 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <26>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l26 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l26";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa27 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "rwsc";
+		qcom,resource-id = <0>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l27 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l27";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa28 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <28>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l28 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l28";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-vsa1 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "vsa";
+		qcom,resource-id = <1>;
+		qcom,regulator-type = <2>;
+		status = "disabled";
+
+		regulator-lvs1 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_lvs1";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-vsa2 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "vsa";
+		qcom,resource-id = <2>;
+		qcom,regulator-type = <2>;
+		status = "disabled";
+
+		regulator-lvs2 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_lvs2";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-bobb {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "bobb";
+		qcom,resource-id = <1>;
+		qcom,regulator-type = <4>;
+		status = "disabled";
+
+		regulator-bob {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_bob";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm-pmi8998.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm-pmi8998.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm-pmi8998.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm-pmi8998.dtsi	2019-01-22 16:16:21.179225361 +0100
@@ -0,0 +1,777 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/spmi/spmi.h>
+
+&spmi_bus {
+	qcom,pmi8998@2 {
+		compatible = "qcom,spmi-pmic";
+		reg = <0x2 SPMI_USID>;
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		pmi8998_revid: qcom,revid@100 {
+			compatible = "qcom,qpnp-revid";
+			reg = <0x100 0x100>;
+			qcom,fab-id-valid;
+		};
+
+		qcom,power-on@800 {
+			compatible = "qcom,qpnp-power-on";
+			reg = <0x800 0x100>;
+		};
+
+		pmi8998_misc: qcom,misc@900 {
+			compatible = "qcom,qpnp-misc";
+			reg = <0x900 0x100>;
+		};
+
+		qcom,temp-alarm@2400 {
+			compatible = "qcom,qpnp-temp-alarm";
+			reg = <0x2400 0x100>;
+			interrupts = <0x2 0x24 0x0 IRQ_TYPE_EDGE_RISING>;
+			label = "pmi8998_tz";
+		};
+
+		pmi8998_gpios: gpios {
+			compatible = "qcom,qpnp-pin";
+			gpio-controller;
+			#gpio-cells = <2>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			label = "pmi8998-gpio";
+
+			gpio@c000 {
+				reg = <0xc000 0x100>;
+				qcom,pin-num = <1>;
+				status = "disabled";
+			};
+
+			gpio@c100 {
+				reg = <0xc100 0x100>;
+				qcom,pin-num = <2>;
+				status = "disabled";
+			};
+
+			gpio@c200 {
+				reg = <0xc200 0x100>;
+				qcom,pin-num = <3>;
+				status = "disabled";
+			};
+
+			gpio@c300 {
+				reg = <0xc300 0x100>;
+				qcom,pin-num = <4>;
+				status = "disabled";
+			};
+
+			gpio@c400 {
+				reg = <0xc400 0x100>;
+				qcom,pin-num = <5>;
+				status = "disabled";
+			};
+
+			gpio@c500 {
+				reg = <0xc500 0x100>;
+				qcom,pin-num = <6>;
+				status = "disabled";
+			};
+
+			gpio@c600 {
+				reg = <0xc600 0x100>;
+				qcom,pin-num = <7>;
+				status = "disabled";
+			};
+
+			gpio@c700 {
+				reg = <0xc700 0x100>;
+				qcom,pin-num = <8>;
+				status = "disabled";
+			};
+
+			gpio@c800 {
+				reg = <0xc800 0x100>;
+				qcom,pin-num = <9>;
+				status = "disabled";
+			};
+
+			gpio@c900 {
+				reg = <0xc900 0x100>;
+				qcom,pin-num = <10>;
+				status = "disabled";
+			};
+
+			gpio@ca00 {
+				reg = <0xca00 0x100>;
+				qcom,pin-num = <11>;
+				status = "disabled";
+			};
+
+			gpio@cb00 {
+				reg = <0xcb00 0x100>;
+				qcom,pin-num = <12>;
+				status = "disabled";
+			};
+
+			gpio@cc00 {
+				reg = <0xcc00 0x100>;
+				qcom,pin-num = <13>;
+				status = "disabled";
+			};
+
+			gpio@cd00 {
+				reg = <0xcd00 0x100>;
+				qcom,pin-num = <14>;
+				status = "disabled";
+			};
+		};
+
+		qcom,qpnp-qnovo@1500 {
+			compatible = "qcom,qpnp-qnovo";
+			reg = <0x1500 0x100>;
+			interrupts = <0x2 0x15 0x0 IRQ_TYPE_NONE>;
+			interrupt-names = "ptrain-done";
+			qcom,pmic-revid = <&pmi8998_revid>;
+		};
+
+		pmi8998_charger: qcom,qpnp-smb2 {
+			compatible = "qcom,qpnp-smb2";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			qcom,pmic-revid = <&pmi8998_revid>;
+
+			io-channels = <&pmi8998_rradc 8>,
+				      <&pmi8998_rradc 10>,
+				      <&pmi8998_rradc 3>,
+				      <&pmi8998_rradc 4>;
+			io-channel-names = "charger_temp",
+					   "charger_temp_max",
+					   "usbin_i",
+					   "usbin_v";
+
+			qcom,boost-threshold-ua = <100000>;
+			qcom,wipower-max-uw = <5000000>;
+			dpdm-supply = <&qusb_phy0>;
+
+			qcom,thermal-mitigation
+					= <3000000 1500000 1000000 500000>;
+
+			qcom,chgr@1000 {
+				reg = <0x1000 0x100>;
+				interrupts =
+					<0x2 0x10 0x0 IRQ_TYPE_EDGE_RISING>,
+					<0x2 0x10 0x1 IRQ_TYPE_EDGE_RISING>,
+					<0x2 0x10 0x2 IRQ_TYPE_EDGE_RISING>,
+					<0x2 0x10 0x3 IRQ_TYPE_EDGE_RISING>,
+					<0x2 0x10 0x4 IRQ_TYPE_EDGE_RISING>;
+
+				interrupt-names = "chg-error",
+						  "chg-state-change",
+						  "step-chg-state-change",
+						  "step-chg-soc-update-fail",
+						  "step-chg-soc-update-request";
+			};
+
+			qcom,otg@1100 {
+				reg = <0x1100 0x100>;
+				interrupts = <0x2 0x11 0x0 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x11 0x1 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x11 0x2 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x11 0x3 IRQ_TYPE_EDGE_BOTH>;
+
+				interrupt-names = "otg-fail",
+						  "otg-overcurrent",
+						  "otg-oc-dis-sw-sts",
+						  "testmode-change-detect";
+			};
+
+			qcom,bat-if@1200 {
+				reg = <0x1200 0x100>;
+				interrupts =
+					<0x2 0x12 0x0 IRQ_TYPE_EDGE_RISING>,
+					<0x2 0x12 0x1 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x12 0x2 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x12 0x3 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x12 0x4 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x12 0x5 IRQ_TYPE_EDGE_BOTH>;
+
+				interrupt-names = "bat-temp",
+						  "bat-ocp",
+						  "bat-ov",
+						  "bat-low",
+						  "bat-therm-or-id-missing",
+						  "bat-terminal-missing";
+			};
+
+			qcom,usb-chgpth@1300 {
+				reg = <0x1300 0x100>;
+				interrupts =
+					<0x2 0x13 0x0 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x13 0x1 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x13 0x2 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x13 0x3 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x13 0x4 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x13 0x5 IRQ_TYPE_EDGE_RISING>,
+					<0x2 0x13 0x6 IRQ_TYPE_EDGE_RISING>,
+					<0x2 0x13 0x7 IRQ_TYPE_EDGE_RISING>;
+
+				interrupt-names = "usbin-collapse",
+						  "usbin-lt-3p6v",
+						  "usbin-uv",
+						  "usbin-ov",
+						  "usbin-plugin",
+						  "usbin-src-change",
+						  "usbin-icl-change",
+						  "type-c-change";
+			};
+
+			qcom,dc-chgpth@1400 {
+				reg = <0x1400 0x100>;
+				interrupts =
+					<0x2 0x14 0x0 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x14 0x1 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x14 0x2 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x14 0x3 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x14 0x4 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x14 0x5 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x14 0x6 IRQ_TYPE_EDGE_RISING>;
+
+				interrupt-names = "dcin-collapse",
+						  "dcin-lt-3p6v",
+						  "dcin-uv",
+						  "dcin-ov",
+						  "dcin-plugin",
+						  "div2-en-dg",
+						  "dcin-icl-change";
+			};
+
+			qcom,chgr-misc@1600 {
+				reg = <0x1600 0x100>;
+				interrupts =
+					<0x2 0x16 0x0 IRQ_TYPE_EDGE_RISING>,
+					<0x2 0x16 0x1 IRQ_TYPE_EDGE_RISING>,
+					<0x2 0x16 0x2 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x16 0x3 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x16 0x4 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x16 0x5 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x16 0x6 IRQ_TYPE_EDGE_FALLING>,
+					<0x2 0x16 0x7 IRQ_TYPE_EDGE_BOTH>;
+
+				interrupt-names = "wdog-snarl",
+						  "wdog-bark",
+						  "aicl-fail",
+						  "aicl-done",
+						  "high-duty-cycle",
+						  "input-current-limiting",
+						  "temperature-change",
+						  "switcher-power-ok";
+			};
+		};
+
+		pmi8998_pdphy: qcom,usb-pdphy@1700 {
+			compatible = "qcom,qpnp-pdphy";
+			reg = <0x1700 0x100>;
+			vdd-pdphy-supply = <&pm8998_l24>;
+			vbus-supply = <&smb2_vbus>;
+			vconn-supply = <&smb2_vconn>;
+			interrupts = <0x2 0x17 0x0 IRQ_TYPE_EDGE_RISING>,
+				     <0x2 0x17 0x1 IRQ_TYPE_EDGE_RISING>,
+				     <0x2 0x17 0x2 IRQ_TYPE_EDGE_RISING>,
+				     <0x2 0x17 0x3 IRQ_TYPE_EDGE_RISING>,
+				     <0x2 0x17 0x4 IRQ_TYPE_EDGE_RISING>,
+				     <0x2 0x17 0x5 IRQ_TYPE_EDGE_RISING>,
+				     <0x2 0x17 0x6 IRQ_TYPE_EDGE_RISING>;
+
+			interrupt-names = "sig-tx",
+					  "sig-rx",
+					  "msg-tx",
+					  "msg-rx",
+					  "msg-tx-failed",
+					  "msg-tx-discarded",
+					  "msg-rx-discarded";
+
+			qcom,default-sink-caps = <5000 3000>, /* 5V @ 3A */
+						 <9000 3000>, /* 9V @ 3A */
+						 <12000 2250>; /* 12V @ 2.25A */
+		};
+
+		bcl@4200 {
+			compatible = "qcom,msm-bcl-lmh";
+			reg = <0x4200 0xff>,
+				<0x4300 0xff>;
+			reg-names = "fg_user_adc",
+					"fg_lmh";
+			interrupts = <0x2 0x42 0x0 IRQ_TYPE_NONE>,
+					<0x2 0x42 0x2 IRQ_TYPE_NONE>;
+			interrupt-names = "bcl-high-ibat-int",
+					"bcl-low-vbat-int";
+			qcom,vbat-polling-delay-ms = <100>;
+			qcom,ibat-polling-delay-ms = <100>;
+		};
+
+		pmi8998_rradc: rradc@4500 {
+			compatible = "qcom,rradc";
+			reg = <0x4500 0x100>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			#io-channel-cells = <1>;
+			qcom,pmic-revid = <&pmi8998_revid>;
+		};
+
+		pmi8998_fg: qpnp,fg {
+			compatible = "qcom,fg-gen3";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			qcom,pmic-revid = <&pmi8998_revid>;
+			io-channels = <&pmi8998_rradc 0>;
+			io-channel-names = "rradc_batt_id";
+			qcom,rradc-base = <0x4500>;
+			qcom,fg-esr-timer-awake = <96 96>;
+			qcom,fg-esr-timer-asleep = <256 256>;
+			qcom,fg-esr-timer-charging = <0 96>;
+			qcom,cycle-counter-en;
+			status = "okay";
+
+			qcom,fg-batt-soc@4000 {
+				status = "okay";
+				reg = <0x4000 0x100>;
+				interrupts = <0x2 0x40 0x0 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x40 0x1 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x40 0x2
+							IRQ_TYPE_EDGE_RISING>,
+					     <0x2 0x40 0x3
+							IRQ_TYPE_EDGE_RISING>,
+					     <0x2 0x40 0x4 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x40 0x5
+							IRQ_TYPE_EDGE_RISING>,
+					     <0x2 0x40 0x6 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x40 0x7 IRQ_TYPE_EDGE_BOTH>;
+				interrupt-names = "soc-update",
+						  "soc-ready",
+						  "bsoc-delta",
+						  "msoc-delta",
+						  "msoc-low",
+						  "msoc-empty",
+						  "msoc-high",
+						  "msoc-full";
+			};
+
+			qcom,fg-batt-info@4100 {
+				status = "okay";
+				reg = <0x4100 0x100>;
+				interrupts = <0x2 0x41 0x0 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x41 0x1 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x41 0x2 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x41 0x3 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x41 0x6 IRQ_TYPE_EDGE_BOTH>;
+				interrupt-names = "vbatt-pred-delta",
+						  "vbatt-low",
+						  "esr-delta",
+						  "batt-missing",
+						  "batt-temp-delta";
+			};
+
+			qcom,fg-memif@4400 {
+				status = "okay";
+				reg = <0x4400 0x100>;
+				interrupts = <0x2 0x44 0x0 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x44 0x1 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x44 0x2 IRQ_TYPE_EDGE_BOTH>;
+				interrupt-names = "ima-rdy",
+						  "mem-xcp",
+						  "dma-grant";
+			};
+		};
+	};
+
+	qcom,pmi8998@3 {
+		compatible ="qcom,spmi-pmic";
+		reg = <0x3 SPMI_USID>;
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		pmi8998_pwm_1: pwm@b100 {
+			compatible = "qcom,qpnp-pwm";
+			reg = <0xb100 0x100>,
+			      <0xb042 0x7e>;
+			reg-names = "qpnp-lpg-channel-base",
+					"qpnp-lpg-lut-base";
+			qcom,lpg-lut-size = <0x7e>;
+			qcom,channel-id = <1>;
+			qcom,supported-sizes = <6>, <9>;
+			qcom,ramp-index = <0>;
+			#pwm-cells = <2>;
+			status = "disabled";
+		};
+
+		pmi8998_pwm_2: pwm@b200 {
+			compatible = "qcom,qpnp-pwm";
+			reg = <0xb200 0x100>,
+			      <0xb042 0x7e>;
+			reg-names = "qpnp-lpg-channel-base",
+					 "qpnp-lpg-lut-base";
+			qcom,lpg-lut-size = <0x7e>;
+			qcom,channel-id = <2>;
+			qcom,supported-sizes = <6>, <9>;
+			qcom,ramp-index = <1>;
+			#pwm-cells = <2>;
+			status = "disabled";
+		};
+
+		pmi8998_pwm_3: pwm@b300 {
+			compatible = "qcom,qpnp-pwm";
+			reg = <0xb300 0x100>,
+			      <0xb042 0x7e>;
+			reg-names = "qpnp-lpg-channel-base",
+					 "qpnp-lpg-lut-base";
+			qcom,lpg-lut-size = <0x7e>;
+			qcom,channel-id = <3>;
+			qcom,supported-sizes = <6>, <9>;
+			qcom,ramp-index = <2>;
+			#pwm-cells = <2>;
+		};
+
+		pmi8998_pwm_4: pwm@b400 {
+			compatible = "qcom,qpnp-pwm";
+			reg = <0xb400 0x100>,
+			      <0xb042 0x7e>;
+			reg-names = "qpnp-lpg-channel-base",
+					 "qpnp-lpg-lut-base";
+			qcom,lpg-lut-size = <0x7e>;
+			qcom,channel-id = <4>;
+			qcom,supported-sizes = <6>, <9>;
+			qcom,ramp-index = <3>;
+			#pwm-cells = <2>;
+		};
+
+		pmi8998_pwm_5: pwm@b500 {
+			compatible = "qcom,qpnp-pwm";
+			reg = <0xb500 0x100>,
+			      <0xb042 0x7e>;
+			reg-names = "qpnp-lpg-channel-base",
+					 "qpnp-lpg-lut-base";
+			qcom,lpg-lut-size = <0x7e>;
+			qcom,channel-id = <5>;
+			qcom,supported-sizes = <6>, <9>;
+			qcom,ramp-index = <4>;
+			#pwm-cells = <2>;
+		};
+
+		pmi8998_pwm_6: pwm@b600 {
+			compatible = "qcom,qpnp-pwm";
+			reg = <0xb600 0x100>,
+			      <0xb042 0x7e>;
+			reg-names = "qpnp-lpg-channel-base",
+					 "qpnp-lpg-lut-base";
+			qcom,lpg-lut-size = <0x7e>;
+			qcom,channel-id = <6>;
+			qcom,supported-sizes = <6>, <9>;
+			qcom,ramp-index = <5>;
+			#pwm-cells = <2>;
+			status = "disabled";
+		};
+
+		qcom,leds@d000 {
+			compatible = "qcom,leds-qpnp";
+			reg = <0xd000 0x100>;
+			label = "rgb";
+			status = "okay";
+
+			red_led: qcom,rgb_0 {
+				label = "rgb";
+				qcom,id = <3>;
+				qcom,mode = "pwm";
+				pwms = <&pmi8998_pwm_5 0 0>;
+				qcom,pwm-us = <1000>;
+				qcom,max-current = <12>;
+				qcom,default-state = "off";
+				linux,name = "red";
+				linux,default-trigger =
+					"battery-charging";
+			};
+
+			green_led: qcom,rgb_1 {
+				label = "rgb";
+				qcom,id = <4>;
+				qcom,mode = "pwm";
+				pwms = <&pmi8998_pwm_4 0 0>;
+				qcom,pwm-us = <1000>;
+				qcom,max-current = <12>;
+				qcom,default-state = "off";
+				linux,name = "green";
+				linux,default-trigger = "battery-full";
+			};
+
+			blue_led: qcom,rgb_2 {
+				label = "rgb";
+				qcom,id = <5>;
+				qcom,mode = "pwm";
+				pwms = <&pmi8998_pwm_3 0 0>;
+				qcom,pwm-us = <1000>;
+				qcom,max-current = <12>;
+				qcom,default-state = "off";
+				linux,name = "blue";
+				linux,default-trigger = "boot-indication";
+			};
+		};
+
+		labibb: qpnp-labibb-regulator {
+			compatible = "qcom,qpnp-labibb-regulator";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			qcom,pmic-revid = <&pmi8998_revid>;
+			status = "disabled";
+
+			ibb_regulator: qcom,ibb@dc00 {
+				reg = <0xdc00 0x100>;
+				reg-names = "ibb_reg";
+				regulator-name = "ibb_reg";
+
+				regulator-min-microvolt = <4600000>;
+				regulator-max-microvolt = <6000000>;
+
+				interrupts = <0x3 0xdc 0x2
+						IRQ_TYPE_EDGE_RISING>;
+				interrupt-names = "ibb-sc-err";
+
+				qcom,qpnp-ibb-min-voltage = <1400000>;
+				qcom,qpnp-ibb-step-size = <100000>;
+				qcom,qpnp-ibb-slew-rate = <2000000>;
+				qcom,qpnp-ibb-use-default-voltage;
+				qcom,qpnp-ibb-init-voltage = <5500000>;
+				qcom,qpnp-ibb-init-amoled-voltage = <4000000>;
+				qcom,qpnp-ibb-init-lcd-voltage = <5500000>;
+
+				qcom,qpnp-ibb-soft-start = <1000>;
+
+				qcom,qpnp-ibb-lab-pwrup-delay = <8000>;
+				qcom,qpnp-ibb-lab-pwrdn-delay = <8000>;
+				qcom,qpnp-ibb-en-discharge;
+
+				qcom,qpnp-ibb-full-pull-down;
+				qcom,qpnp-ibb-pull-down-enable;
+				qcom,qpnp-ibb-switching-clock-frequency =
+									<1480>;
+				qcom,qpnp-ibb-limit-maximum-current = <1550>;
+				qcom,qpnp-ibb-debounce-cycle = <16>;
+				qcom,qpnp-ibb-limit-max-current-enable;
+				qcom,qpnp-ibb-ps-enable;
+			};
+
+			lab_regulator: qcom,lab@de00 {
+				reg = <0xde00 0x100>;
+				reg-names = "lab";
+				regulator-name = "lab_reg";
+
+				regulator-min-microvolt = <4600000>;
+				regulator-max-microvolt = <6000000>;
+
+				interrupts = <0x3 0xde 0x0
+						IRQ_TYPE_EDGE_RISING>,
+					     <0x3 0xde 0x1
+						IRQ_TYPE_EDGE_RISING>;
+				interrupt-names = "lab-vreg-ok", "lab-sc-err";
+
+				qcom,qpnp-lab-min-voltage = <4600000>;
+				qcom,qpnp-lab-step-size = <100000>;
+				qcom,qpnp-lab-slew-rate = <5000>;
+				qcom,qpnp-lab-use-default-voltage;
+				qcom,qpnp-lab-init-voltage = <5500000>;
+				qcom,qpnp-lab-init-amoled-voltage = <4600000>;
+				qcom,qpnp-lab-init-lcd-voltage = <5500000>;
+
+				qcom,qpnp-lab-soft-start = <800>;
+
+				qcom,qpnp-lab-full-pull-down;
+				qcom,qpnp-lab-pull-down-enable;
+				qcom,qpnp-lab-switching-clock-frequency =
+									<1600>;
+				qcom,qpnp-lab-limit-maximum-current = <1600>;
+				qcom,qpnp-lab-limit-max-current-enable;
+				qcom,qpnp-lab-ps-threshold = <70>;
+				qcom,qpnp-lab-ps-enable;
+				qcom,qpnp-lab-nfet-size = <100>;
+				qcom,qpnp-lab-pfet-size = <100>;
+				qcom,qpnp-lab-max-precharge-time = <500>;
+			};
+		};
+
+		pmi8998_wled: qcom,leds@d800 {
+			compatible = "qcom,qpnp-wled";
+			reg = <0xd800 0x100>,
+				<0xd900 0x100>;
+			reg-names = "qpnp-wled-ctrl-base",
+					"qpnp-wled-sink-base";
+			interrupts = <0x3 0xd8 0x1 IRQ_TYPE_EDGE_RISING>,
+					<0x3 0xd8 0x2 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "ovp-irq", "sc-irq";
+			linux,name = "wled";
+			linux,default-trigger = "bkl-trigger";
+			qcom,fdbk-output = "auto";
+			qcom,vref-uv = <127500>;
+			qcom,switch-freq-khz = <800>;
+			qcom,ovp-mv = <29600>;
+			qcom,ilim-ma = <970>;
+			qcom,boost-duty-ns = <26>;
+			qcom,mod-freq-khz = <9600>;
+			qcom,dim-mode = "hybrid";
+			qcom,hyb-thres = <625>;
+			qcom,sync-dly-us = <800>;
+			qcom,fs-curr-ua = <25000>;
+			qcom,cons-sync-write-delay-us = <1000>;
+			qcom,led-strings-list = [00 01 02 03];
+			qcom,en-ext-pfet-sc-pro;
+			qcom,pmic-revid = <&pmi8998_revid>;
+			qcom,loop-auto-gm-en;
+			qcom,auto-calibration-enable;
+			status = "okay";
+		};
+
+		pmi8998_haptics: qcom,haptic@c000 {
+			status = "disabled";
+			compatible = "qcom,qpnp-haptic";
+			reg = <0xc000 0x100>;
+			interrupts = <0x3 0xc0 0x0 IRQ_TYPE_EDGE_BOTH>,
+				     <0x3 0xc0 0x1 IRQ_TYPE_EDGE_BOTH>;
+			interrupt-names = "sc-irq", "play-irq";
+			qcom,pmic-revid = <&pmi8998_revid>;
+			qcom,pmic-misc = <&pmi8998_misc>;
+			qcom,misc-clk-trim-error-reg = <0xf3>;
+			qcom,actuator-type = "lra";
+			qcom,play-mode = "direct";
+			qcom,vmax-mv = <3200>;
+			qcom,ilim-ma = <800>;
+			qcom,wave-shape = "square";
+			qcom,wave-play-rate-us = <6667>;
+			qcom,int-pwm-freq-khz = <505>;
+			qcom,sc-deb-cycles = <8>;
+			qcom,en-brake;
+			qcom,brake-pattern = [03 03 00 00];
+			qcom,lra-high-z = "opt1";
+			qcom,lra-auto-res-mode = "qwd";
+			qcom,lra-res-cal-period = <4>;
+			qcom,correct-lra-drive-freq;
+		};
+
+		flash_led: qcom,leds@d300 {
+			compatible = "qcom,qpnp-flash-led-v2";
+			status = "okay";
+			reg = <0xd300 0x100>;
+			label = "flash";
+			interrupts = <0x3 0xd3 0x0 IRQ_TYPE_EDGE_RISING>,
+					<0x3 0xd3 0x3 IRQ_TYPE_EDGE_RISING>,
+					<0x3 0xd3 0x4 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "led-fault-irq",
+					"all-ramp-down-done-irq",
+					"all-ramp-up-done-irq";
+			qcom,hdrm-auto-mode;
+			qcom,short-circuit-det;
+			qcom,open-circuit-det;
+			qcom,vph-droop-det;
+			qcom,thermal-derate-en;
+			qcom,thermal-derate-current = <200 500 1000>;
+			qcom,isc-delay = <192>;
+			qcom,pmic-revid = <&pmi8998_revid>;
+
+			pmi8998_flash0: qcom,flash_0 {
+				label = "flash";
+				qcom,led-name = "led:flash_0";
+				qcom,max-current = <1500>;
+				qcom,default-led-trigger = "flash0_trigger";
+				qcom,id = <0>;
+				qcom,current-ma = <1000>;
+				qcom,duration-ms = <1280>;
+				qcom,ires-ua = <12500>;
+				qcom,hdrm-voltage-mv = <325>;
+				qcom,hdrm-vol-hi-lo-win-mv = <100>;
+			};
+
+			pmi8998_flash1: qcom,flash_1 {
+				label = "flash";
+				qcom,led-name = "led:flash_1";
+				qcom,max-current = <1500>;
+				qcom,default-led-trigger = "flash1_trigger";
+				qcom,id = <1>;
+				qcom,current-ma = <1000>;
+				qcom,duration-ms = <1280>;
+				qcom,ires-ua = <12500>;
+				qcom,hdrm-voltage-mv = <325>;
+				qcom,hdrm-vol-hi-lo-win-mv = <100>;
+			};
+
+			pmi8998_flash2: qcom,flash_2 {
+				label = "flash";
+				qcom,led-name = "led:flash_2";
+				qcom,max-current = <750>;
+				qcom,default-led-trigger = "flash2_trigger";
+				qcom,id = <2>;
+				qcom,current-ma = <500>;
+				qcom,duration-ms = <1280>;
+				qcom,ires-ua = <12500>;
+				qcom,hdrm-voltage-mv = <325>;
+				qcom,hdrm-vol-hi-lo-win-mv = <100>;
+			};
+
+			pmi8998_torch0: qcom,torch_0 {
+				label = "torch";
+				qcom,led-name = "led:torch_0";
+				qcom,max-current = <500>;
+				qcom,default-led-trigger = "torch0_trigger";
+				qcom,id = <0>;
+				qcom,current-ma = <300>;
+				qcom,ires-ua = <12500>;
+				qcom,hdrm-voltage-mv = <325>;
+				qcom,hdrm-vol-hi-lo-win-mv = <100>;
+			};
+
+			pmi8998_torch1: qcom,torch_1 {
+				label = "torch";
+				qcom,led-name = "led:torch_1";
+				qcom,max-current = <500>;
+				qcom,default-led-trigger = "torch1_trigger";
+				qcom,id = <1>;
+				qcom,current-ma = <300>;
+				qcom,ires-ua = <12500>;
+				qcom,hdrm-voltage-mv = <325>;
+				qcom,hdrm-vol-hi-lo-win-mv = <100>;
+			};
+
+			pmi8998_torch2: qcom,torch_2 {
+				label = "torch";
+				qcom,led-name = "led:torch_2";
+				qcom,max-current = <500>;
+				qcom,default-led-trigger = "torch2_trigger";
+				qcom,id = <2>;
+				qcom,current-ma = <300>;
+				qcom,ires-ua = <12500>;
+				qcom,hdrm-voltage-mv = <325>;
+				qcom,hdrm-vol-hi-lo-win-mv = <100>;
+			};
+
+			pmi8998_switch0: qcom,led_switch_0 {
+				label = "switch";
+				qcom,led-name = "led:switch_0";
+				qcom,led-mask = <3>;
+				qcom,default-led-trigger = "switch0_trigger";
+			};
+
+			pmi8998_switch1: qcom,led_switch_1 {
+				label = "switch";
+				qcom,led-name = "led:switch_1";
+				qcom,led-mask = <4>;
+				qcom,default-led-trigger = "switch1_trigger";
+			};
+		};
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm-rdbg.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm-rdbg.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm-rdbg.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm-rdbg.dtsi	2019-01-22 16:16:21.179225361 +0100
@@ -0,0 +1,106 @@
+/* Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	smp2pgpio_rdbg_2_in: qcom,smp2pgpio-rdbg-2-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "rdbg";
+		qcom,remote-pid = <2>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_client_rdbg_2_in {
+		compatible = "qcom,smp2pgpio_client_rdbg_2_in";
+		gpios = <&smp2pgpio_rdbg_2_in 0 0>;
+	};
+
+	smp2pgpio_rdbg_2_out: qcom,smp2pgpio-rdbg-2-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "rdbg";
+		qcom,remote-pid = <2>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_client_rdbg_2_out {
+		compatible = "qcom,smp2pgpio_client_rdbg_2_out";
+		gpios = <&smp2pgpio_rdbg_2_out 0 0>;
+	};
+
+	smp2pgpio_rdbg_1_in: qcom,smp2pgpio-rdbg-1-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "rdbg";
+		qcom,remote-pid = <1>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_client_rdbg_1_in {
+		compatible = "qcom,smp2pgpio_client_rdbg_1_in";
+		gpios = <&smp2pgpio_rdbg_1_in 0 0>;
+	};
+
+	smp2pgpio_rdbg_1_out: qcom,smp2pgpio-rdbg-1-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "rdbg";
+		qcom,remote-pid = <1>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_client_rdbg_1_out {
+		compatible = "qcom,smp2pgpio_client_rdbg_1_out";
+		gpios = <&smp2pgpio_rdbg_1_out 0 0>;
+	};
+
+	smp2pgpio_rdbg_5_in: qcom,smp2pgpio-rdbg-5-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "rdbg";
+		qcom,remote-pid = <5>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_client_rdbg_5_in {
+		compatible = "qcom,smp2pgpio_client_rdbg_5_in";
+		gpios = <&smp2pgpio_rdbg_5_in 0 0>;
+	};
+
+	smp2pgpio_rdbg_5_out: qcom,smp2pgpio-rdbg-5-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "rdbg";
+		qcom,remote-pid = <5>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_client_rdbg_5_out {
+		compatible = "qcom,smp2pgpio_client_rdbg_5_out";
+		gpios = <&smp2pgpio_rdbg_5_out 0 0>;
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm-smb138x.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm-smb138x.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./msm-smb138x.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/msm-smb138x.dtsi	2019-01-22 16:16:21.179225361 +0100
@@ -0,0 +1,137 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+
+&i2c_7 {
+	status = "okay";
+	smb138x: qcom,smb138x@8 {
+		compatible = "qcom,i2c-pmic";
+		reg = <0x8>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		interrupt-parent = <&spmi_bus>;
+		interrupts = <0x0 0xd1 0x0 IRQ_TYPE_LEVEL_LOW>;
+		interrupt_names = "smb138x";
+		interrupt-controller;
+		#interrupt-cells = <3>;
+		qcom,periph-map = <0x10 0x11 0x12 0x13 0x14 0x16 0x36>;
+
+		smb138x_revid: qcom,revid@100 {
+			compatible = "qcom,qpnp-revid";
+			reg = <0x100 0x100>;
+		};
+
+		smb138x_tadc: qcom,tadc@3600 {
+			compatible = "qcom,tadc";
+			reg = <0x3600 0x100>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			#io-channel-cells = <1>;
+			interrupt-parent = <&smb138x>;
+			interrupts = <0x36 0x0 IRQ_TYPE_EDGE_BOTH>;
+			interrupt-names = "eoc";
+
+			batt_temp@0 {
+				reg = <0>;
+				qcom,rbias = <68100>;
+				qcom,rtherm-at-25degc = <68000>;
+				qcom,beta-coefficient = <3450>;
+			};
+
+			skin_temp@1 {
+				reg = <1>;
+				qcom,rbias = <33000>;
+				qcom,rtherm-at-25degc = <68000>;
+				qcom,beta-coefficient = <3450>;
+			};
+
+			die_temp@2 {
+				reg = <2>;
+				qcom,scale = <(-1306)>;
+				qcom,offset = <397904>;
+			};
+
+			batt_i@3 {
+				reg = <3>;
+				qcom,channel = <3>;
+				qcom,scale = <(-20000000)>;
+			};
+
+			batt_v@4 {
+				reg = <4>;
+				qcom,scale = <5000000>;
+			};
+
+			input_i@5 {
+				reg = <5>;
+				qcom,scale = <14285714>;
+			};
+
+			input_v@6 {
+				reg = <6>;
+				qcom,scale = <25000000>;
+			};
+
+			otg_i@7 {
+				reg = <7>;
+				qcom,scale = <5714286>;
+			};
+		};
+
+		smb1381_charger: qcom,smb1381-charger@1000 {
+			compatible = "qcom,smb138x-parallel-slave";
+			qcom,pmic-revid = <&smb138x_revid>;
+			reg = <0x1000 0x700>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			interrupt-parent = <&smb138x>;
+			io-channels =
+				<&smb138x_tadc 1>,
+				<&smb138x_tadc 2>,
+				<&smb138x_tadc 3>,
+				<&smb138x_tadc 14>,
+				<&smb138x_tadc 15>,
+				<&smb138x_tadc 16>,
+				<&smb138x_tadc 17>;
+			io-channel-names =
+				"connector_temp",
+				"charger_temp",
+				"batt_i",
+				"connector_temp_thr1",
+				"connector_temp_thr2",
+				"connector_temp_thr3",
+				"charger_temp_max";
+
+			qcom,chgr@1000 {
+				reg = <0x1000 0x100>;
+				interrupts = <0x10 0x1 IRQ_TYPE_EDGE_RISING>;
+				interrupt-names = "chg-state-change";
+			};
+
+			qcom,chgr-misc@1600 {
+				reg = <0x1600 0x100>;
+				interrupts = <0x16 0x1 IRQ_TYPE_EDGE_RISING>,
+					     <0x16 0x6 IRQ_TYPE_EDGE_RISING>;
+				interrupt-names = "wdog-bark",
+						  "temperature-change";
+			};
+		};
+	};
+};
+
+&smb1381_charger {
+	smb138x_vbus: qcom,smb138x-vbus {
+		status = "disabled";
+		regulator-name = "smb138x-vbus";
+	};
+};
diff -Nruw linux-4.4.115-fbx/arch/arm/boot/dts/qcom./skeleton64.dtsi linux-4.4.115-fbx/arch/arm/boot/dts/qcom/skeleton64.dtsi
--- linux-4.4.115-fbx/arch/arm/boot/dts/qcom./skeleton64.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm/boot/dts/qcom/skeleton64.dtsi	2019-01-22 16:16:21.215225687 +0100
@@ -0,0 +1,15 @@
+/*
+ * Skeleton device tree in the 64 bits version; the bare minimum
+ * needed to boot; just include and add a compatible value.  The
+ * bootloader will typically populate the memory node.
+ */
+
+/ {
+	#address-cells = <2>;
+	#size-cells = <2>;
+	cpus { };
+	soc { };
+	chosen { };
+	aliases { };
+	memory { device_type = "memory"; reg = <0 0 0 0>; };
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/include/dt-bindings/clock/audio-ext-clk.h	2019-01-22 16:16:28.167288642 +0100
@@ -0,0 +1,30 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __AUDIO_EXT_CLK_H
+#define __AUDIO_EXT_CLK_H
+
+/* Audio External Clocks */
+#define AUDIO_PMI_CLK		0
+#define AUDIO_PMIC_LNBB_CLK	0
+#define AUDIO_AP_CLK		1
+#define AUDIO_AP_CLK2		2
+#define AUDIO_LPASS_MCLK	3
+#define AUDIO_LPASS_MCLK2	4
+
+#define clk_audio_ap_clk        0x9b5727cb
+#define clk_audio_pmi_clk       0xcbfe416d
+#define clk_audio_ap_clk2       0x454d1e91
+#define clk_audio_lpass_mclk    0xf0f2a284
+#define clk_audio_pmi_lnbb_clk   0x57312343
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/include/dt-bindings/clock/msm-clocks-8996.h	2019-01-22 16:16:28.171288678 +0100
@@ -0,0 +1,574 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CLOCKS_8996_H
+#define __MSM_CLOCKS_8996_H
+
+#include "audio-ext-clk.h"
+
+/* clock_gcc controlled clocks */
+#define clk_cxo_clk_src			0x79e95308
+#define clk_pnoc_clk			0x4325d220
+#define clk_pnoc_a_clk			0x2808c12b
+#define clk_bimc_clk			0x4b80bf00
+#define clk_bimc_a_clk			0x4b25668a
+#define clk_cnoc_clk			0xd5ccb7f4
+#define clk_cnoc_a_clk			0xd8fe2ccc
+#define clk_snoc_clk			0x2c341aa0
+#define clk_snoc_a_clk			0x8fcef2af
+#define clk_bb_clk1			0xf5304268
+#define clk_bb_clk1_ao			0xfa113810
+#define clk_bb_clk1_pin			0x6dd0a779
+#define clk_bb_clk1_pin_ao		0x9b637772
+#define clk_bb_clk2			0xfe15cb87
+#define clk_bb_clk2_ao			0x59682706
+#define clk_bb_clk2_pin			0x498938e5
+#define clk_bb_clk2_pin_ao		0x52513787
+#define clk_bimc_msmbus_clk		0xd212feea
+#define clk_bimc_msmbus_a_clk		0x71d1a499
+#define clk_ce1_a_clk			0x44a833fe
+#define clk_cnoc_msmbus_clk		0x62228b5d
+#define clk_cnoc_msmbus_a_clk		0x67442955
+#define clk_cxo_clk_src_ao		0x64eb6004
+#define clk_cxo_dwc3_clk		0xf79c19f6
+#define clk_cxo_lpm_clk			0x94adbf3d
+#define clk_cxo_otg_clk			0x4eec0bb9
+#define clk_cxo_pil_lpass_clk		0xe17f0ff6
+#define clk_cxo_pil_ssc_clk		0x81832015
+#define clk_div_clk1			0xaa1157a6
+#define clk_div_clk1_ao			0x6b943d68
+#define clk_div_clk2			0xd454019f
+#define clk_div_clk2_ao			0x53f9e788
+#define clk_div_clk3			0xa9a55a68
+#define clk_div_clk3_ao			0x3d6725a8
+#define clk_ipa_a_clk			0xeeec2919
+#define clk_ipa_clk			0xfa685cda
+#define clk_ln_bb_clk			0x3ab0b36d
+#define clk_ln_bb_a_clk			0xc7257ea8
+#define clk_ln_bb_clk_pin		0x1b1c476a
+#define clk_ln_bb_a_clk_pin		0x9cbb5411
+#define clk_mcd_ce1_clk			0xbb615d26
+#define clk_pnoc_keepalive_a_clk	0xf8f91f0b
+#define clk_pnoc_msmbus_clk		0x38b95c77
+#define clk_pnoc_msmbus_a_clk		0x8c9b4e93
+#define clk_pnoc_pm_clk			0xd6f7dfb9
+#define clk_pnoc_sps_clk		0xd482ecc7
+#define clk_qdss_a_clk			0xdd121669
+#define clk_qdss_clk			0x1492202a
+#define clk_rf_clk1			0xaabeea5a
+#define clk_rf_clk1_ao			0x72a10cb8
+#define clk_rf_clk1_pin			0x8f463562
+#define clk_rf_clk1_pin_ao		0x62549ff6
+#define clk_rf_clk2			0x24a30992
+#define clk_rf_clk2_ao			0x944d8bbd
+#define clk_rf_clk2_pin			0xa7c5602a
+#define clk_rf_clk2_pin_ao		0x2d75eb4d
+#define clk_snoc_msmbus_clk		0xe6900bb6
+#define clk_snoc_msmbus_a_clk		0x5d4683bd
+#define clk_mcd_ce1_clk			0xbb615d26
+#define clk_qcedev_ce1_clk		0x293f97b0
+#define clk_qcrypto_ce1_clk		0xa6ac14df
+#define clk_qseecom_ce1_clk		0xaa858373
+#define clk_scm_ce1_clk			0xd8ebcc62
+#define clk_ce1_clk			0x42229c55
+#define clk_gcc_ce1_ahb_m_clk		0x2eb28c01
+#define clk_gcc_ce1_axi_m_clk		0xc174dfba
+#define clk_measure_only_bimc_hmss_axi_clk	0xc1cc4f11
+#define clk_aggre1_noc_clk		0x049abba8
+#define clk_aggre1_noc_a_clk		0xc12e4220
+#define clk_aggre2_noc_clk		0xaa681404
+#define clk_aggre2_noc_a_clk		0xcab67089
+#define clk_mmssnoc_axi_rpm_clk		0x4d7f8cdc
+#define clk_mmssnoc_axi_rpm_a_clk	0xfbea899b
+#define clk_mmssnoc_axi_clk		0xdb4b31e6
+#define clk_mmssnoc_axi_a_clk		0xd4970614
+#define clk_mmssnoc_gds_clk		0x06a22afa
+
+#define clk_gpll0			0x1ebe3bc4
+#define clk_gpll0_ao			0xa1368304
+#define clk_gpll0_out_main		0xe9374de7
+#define clk_gpll4			0xb3b5d85b
+#define clk_gpll4_out_main		0xa9a0ab9d
+#define clk_ufs_axi_clk_src		0x297ca380
+#define clk_pcie_aux_clk_src		0xebc50566
+#define clk_usb30_master_clk_src	0xc6262f89
+#define clk_usb20_master_clk_src	0x5680ac83
+#define clk_ufs_ice_core_clk_src	0xda8e7119
+#define clk_blsp1_qup1_i2c_apps_clk_src 0x17f78f5e
+#define clk_blsp1_qup1_spi_apps_clk_src 0xf534c4fa
+#define clk_blsp1_qup2_i2c_apps_clk_src 0x8de71c79
+#define clk_blsp1_qup2_spi_apps_clk_src 0x33cf809a
+#define clk_blsp1_qup3_i2c_apps_clk_src 0xf161b902
+#define clk_blsp1_qup3_spi_apps_clk_src 0x5e95683f
+#define clk_blsp1_qup4_i2c_apps_clk_src 0xb2ecce68
+#define clk_blsp1_qup4_spi_apps_clk_src 0xddb5bbdb
+#define clk_blsp1_qup5_i2c_apps_clk_src 0x71ea7804
+#define clk_blsp1_qup5_spi_apps_clk_src 0x9752f35f
+#define clk_blsp1_qup6_i2c_apps_clk_src 0x28806803
+#define clk_blsp1_qup6_spi_apps_clk_src 0x44a1edc4
+#define clk_blsp1_uart1_apps_clk_src	0xf8146114
+#define clk_blsp1_uart2_apps_clk_src	0xfc9c2f73
+#define clk_blsp1_uart3_apps_clk_src	0x600497f2
+#define clk_blsp1_uart4_apps_clk_src	0x56bff15c
+#define clk_blsp1_uart5_apps_clk_src	0x218ef697
+#define clk_blsp1_uart6_apps_clk_src	0x8fbdbe4c
+#define clk_blsp2_qup1_i2c_apps_clk_src 0xd6d1e95d
+#define clk_blsp2_qup1_spi_apps_clk_src 0xcc1b8365
+#define clk_blsp2_qup2_i2c_apps_clk_src 0x603b5c51
+#define clk_blsp2_qup2_spi_apps_clk_src 0xd577dc44
+#define clk_blsp2_qup3_i2c_apps_clk_src 0xea82959c
+#define clk_blsp2_qup3_spi_apps_clk_src 0xd04b1e92
+#define clk_blsp2_qup4_i2c_apps_clk_src 0x73dc968c
+#define clk_blsp2_qup4_spi_apps_clk_src 0x25d4a2b1
+#define clk_blsp2_qup5_i2c_apps_clk_src 0xcc3698bd
+#define clk_blsp2_qup5_spi_apps_clk_src 0xfa0cf45e
+#define clk_blsp2_qup6_i2c_apps_clk_src 0x2fa53151
+#define clk_blsp2_qup6_spi_apps_clk_src 0x5ca86755
+#define clk_blsp2_uart1_apps_clk_src	0x562c66dc
+#define clk_blsp2_uart2_apps_clk_src	0xdd448080
+#define clk_blsp2_uart3_apps_clk_src	0x46b2e90f
+#define clk_blsp2_uart4_apps_clk_src	0x23a093d2
+#define clk_blsp2_uart5_apps_clk_src	0xe067616a
+#define clk_blsp2_uart6_apps_clk_src	0xe02d2829
+#define clk_gp1_clk_src			0xad85b97a
+#define clk_gp2_clk_src			0xfb1f0065
+#define clk_gp3_clk_src			0x63b693d6
+#define clk_hmss_rbcpr_clk_src		0xedd9a474
+#define clk_pdm2_clk_src		0x31e494fd
+#define clk_sdcc1_apps_clk_src		0xd4975db2
+#define clk_sdcc2_apps_clk_src		0xfc46c821
+#define clk_sdcc3_apps_clk_src		0xea34c7f4
+#define clk_sdcc4_apps_clk_src		0x7aaaaa0c
+#define clk_tsif_ref_clk_src		0x4e9042d1
+#define clk_usb20_mock_utmi_clk_src	0xc3aaeecb
+#define clk_usb30_mock_utmi_clk_src	0xa024a976
+#define clk_usb3_phy_aux_clk_src	0x15eec63c
+#define clk_gcc_qusb2phy_prim_reset	0x07550fa1
+#define clk_gcc_qusb2phy_sec_reset	0x3f3a87d0
+#define clk_gcc_periph_noc_usb20_ahb_clk	0xfb9f26e9
+#define clk_gcc_mmss_gcc_dbg_clk	0xe89d461c
+#define clk_cpu_dbg_clk			0x6550dfa9
+#define clk_gcc_blsp1_ahb_clk		0x8caa5b4f
+#define clk_gcc_blsp1_qup1_i2c_apps_clk 0xc303fae9
+#define clk_gcc_blsp1_qup1_spi_apps_clk 0x759a76b0
+#define clk_gcc_blsp1_qup2_i2c_apps_clk 0x1076f220
+#define clk_gcc_blsp1_qup2_spi_apps_clk 0x3e77d48f
+#define clk_gcc_blsp1_qup3_i2c_apps_clk 0x9e25ac82
+#define clk_gcc_blsp1_qup3_spi_apps_clk 0xfb978880
+#define clk_gcc_blsp1_qup4_i2c_apps_clk 0xd7f40f6f
+#define clk_gcc_blsp1_qup4_spi_apps_clk 0x80f8722f
+#define clk_gcc_blsp1_qup5_i2c_apps_clk 0xacae5604
+#define clk_gcc_blsp1_qup5_spi_apps_clk 0xbf3e15d7
+#define clk_gcc_blsp1_qup6_i2c_apps_clk 0x5c6ad820
+#define clk_gcc_blsp1_qup6_spi_apps_clk 0x780d9f85
+#define clk_gcc_blsp1_uart1_apps_clk	0xc7c62f90
+#define clk_gcc_blsp1_uart2_apps_clk	0xf8a61c96
+#define clk_gcc_blsp1_uart3_apps_clk	0xc3298bd7
+#define clk_gcc_blsp1_uart4_apps_clk	0x26be16c0
+#define clk_gcc_blsp1_uart5_apps_clk	0x28a6bc74
+#define clk_gcc_blsp1_uart6_apps_clk	0x28fd3466
+#define clk_gcc_blsp2_ahb_clk		0x8f283c1d
+#define clk_gcc_blsp2_qup1_i2c_apps_clk 0x9ace11dd
+#define clk_gcc_blsp2_qup1_spi_apps_clk 0xa32604cc
+#define clk_gcc_blsp2_qup2_i2c_apps_clk 0x1bf9a57e
+#define clk_gcc_blsp2_qup2_spi_apps_clk 0xbf54ca6d
+#define clk_gcc_blsp2_qup3_i2c_apps_clk 0x336d4170
+#define clk_gcc_blsp2_qup3_spi_apps_clk 0xc68509d6
+#define clk_gcc_blsp2_qup4_i2c_apps_clk 0xbd22539d
+#define clk_gcc_blsp2_qup4_spi_apps_clk 0x01a72b93
+#define clk_gcc_blsp2_qup5_i2c_apps_clk 0xe2b2ce1d
+#define clk_gcc_blsp2_qup5_spi_apps_clk 0xf40999cd
+#define clk_gcc_blsp2_qup6_i2c_apps_clk 0x894bcea4
+#define clk_gcc_blsp2_qup6_spi_apps_clk 0xfe1bd34a
+#define clk_gcc_blsp2_uart1_apps_clk	0x8c3512ff
+#define clk_gcc_blsp2_uart2_apps_clk	0x1e1965a3
+#define clk_gcc_blsp2_uart3_apps_clk	0x382415ab
+#define clk_gcc_blsp2_uart4_apps_clk	0x87a44b42
+#define clk_gcc_blsp2_uart5_apps_clk	0x5cd30649
+#define clk_gcc_blsp2_uart6_apps_clk	0x8feee5ab
+#define clk_gcc_boot_rom_ahb_clk	0xde2adeb1
+#define clk_gcc_gp1_clk			0x057f7b69
+#define clk_gcc_gp2_clk			0x9bf83ffd
+#define clk_gcc_gp3_clk			0xec6539ee
+#define clk_gcc_hmss_rbcpr_clk		0x699183be
+#define clk_gcc_mmss_noc_cfg_ahb_clk	0xb41a9d99
+#define clk_gcc_pcie_0_aux_clk		0x3d2e3ece
+#define clk_gcc_pcie_0_cfg_ahb_clk	0x4dd325c3
+#define clk_gcc_pcie_0_mstr_axi_clk	0x3f85285b
+#define clk_gcc_pcie_0_slv_axi_clk	0xd69638a1
+#define clk_gcc_pcie_0_pipe_clk		0x4f37621e
+#define clk_gcc_pcie_0_phy_reset	0xdc3201c1
+#define clk_gcc_pcie_1_aux_clk		0xc9bb962c
+#define clk_gcc_pcie_1_cfg_ahb_clk	0xb6338658
+#define clk_gcc_pcie_1_mstr_axi_clk	0xc20f6269
+#define clk_gcc_pcie_1_slv_axi_clk	0xd54e40d6
+#define clk_gcc_pcie_1_pipe_clk		0xc1627422
+#define clk_gcc_pcie_1_phy_reset	0x674481bb
+#define clk_gcc_pcie_2_aux_clk		0xa4dc7ae8
+#define clk_gcc_pcie_2_cfg_ahb_clk	0x4f1d3121
+#define clk_gcc_pcie_2_mstr_axi_clk	0x9e81724a
+#define clk_gcc_pcie_2_slv_axi_clk	0x7990d8b2
+#define clk_gcc_pcie_2_pipe_clk		0xa757a834
+#define clk_gcc_pcie_2_phy_reset	0x82634880
+#define clk_gcc_pcie_phy_reset		0x9bc3c959
+#define clk_gcc_pcie_phy_com_reset	0x8bf513e6
+#define clk_gcc_pcie_phy_nocsr_com_phy_reset	0x0c16a2da
+#define clk_gcc_pcie_phy_aux_clk	0x4746e74f
+#define clk_gcc_pcie_phy_cfg_ahb_clk	0x8533671a
+#define clk_gcc_pdm2_clk		0x99d55711
+#define clk_gcc_pdm_ahb_clk		0x365664f6
+#define clk_gcc_prng_ahb_clk		0x397e7eaa
+#define clk_gcc_sdcc1_ahb_clk		0x691e0caa
+#define clk_gcc_sdcc1_apps_clk		0x9ad6fb96
+#define clk_gcc_sdcc2_ahb_clk		0x23d5727f
+#define clk_gcc_sdcc2_apps_clk		0x861b20ac
+#define clk_gcc_sdcc3_ahb_clk		0x565b2c03
+#define clk_gcc_sdcc3_apps_clk		0x0b27aeac
+#define clk_gcc_sdcc4_ahb_clk		0x64f3e6a8
+#define clk_gcc_sdcc4_apps_clk		0xbf7c4dc8
+#define clk_gcc_tsif_ahb_clk		0x88d2822c
+#define clk_gcc_tsif_ref_clk		0x8f1ed2c2
+#define clk_gcc_ufs_ahb_clk		0x1914bb84
+#define clk_gcc_ufs_axi_clk		0x47c743a7
+#define clk_gcc_ufs_ice_core_clk	0x310b0710
+#define clk_gcc_ufs_rx_cfg_clk		0xa6747786
+#define clk_gcc_ufs_rx_symbol_0_clk	0x7f43251c
+#define clk_gcc_ufs_rx_symbol_1_clk	0x03182fde
+#define clk_gcc_ufs_tx_cfg_clk		0xba2cf8b5
+#define clk_gcc_ufs_tx_symbol_0_clk	0x6a9f747a
+#define clk_gcc_ufs_unipro_core_clk	0x2daf7fd2
+#define clk_gcc_ufs_sys_clk_core_clk	0x360e5ac8
+#define clk_gcc_ufs_tx_symbol_clk_core_clk	0xf6fb0df7
+#define clk_gcc_usb20_master_clk	0x24c3b66a
+#define clk_gcc_usb20_mock_utmi_clk	0xe8db8203
+#define clk_gcc_usb20_sleep_clk		0x6e8cb4b2
+#define clk_gcc_usb30_master_clk	0xb3b4e2cb
+#define clk_gcc_usb30_mock_utmi_clk	0xa800b65a
+#define clk_gcc_usb30_sleep_clk		0xd0b65c92
+#define clk_gcc_usb3_phy_aux_clk	0x0d9a36e0
+#define clk_gcc_usb3_phy_pipe_clk	0xf279aff2
+#define clk_gcc_usb_phy_cfg_ahb2phy_clk	0xd1231a0e
+#define clk_gcc_aggre0_cnoc_ahb_clk	0x53a35559
+#define clk_gcc_aggre0_snoc_axi_clk	0x3c446400
+#define clk_gcc_aggre0_noc_qosgen_extref_clk	0x8c4356ba
+#define clk_hlos1_vote_lpass_core_smmu_clk	0x3aaa1743
+#define clk_hlos1_vote_lpass_adsp_smmu_clk	0xc76f702f
+#define clk_gcc_usb3_phy_reset		0x03d559f1
+#define clk_gcc_usb3phy_phy_reset	0xb1a4f885
+#define clk_gcc_usb3_clkref_clk		0xb6cc8f01
+#define clk_gcc_hdmi_clkref_clk		0x4d4eec04
+#define clk_gcc_edp_clkref_clk		0xa8685c3f
+#define clk_gcc_ufs_clkref_clk		0x92aa126f
+#define clk_gcc_pcie_clkref_clk		0xa2e247fa
+#define clk_gcc_rx2_usb2_clkref_clk	0x27ec24ba
+#define clk_gcc_rx1_usb2_clkref_clk	0x53351d25
+#define clk_gcc_smmu_aggre0_ahb_clk	0x47a06ce4
+#define clk_gcc_smmu_aggre0_axi_clk	0x3cac4a6c
+#define clk_gcc_sys_noc_usb3_axi_clk	0x94d26800
+#define clk_gcc_sys_noc_ufs_axi_clk	0x19d38312
+#define clk_gcc_aggre2_usb3_axi_clk	0xd5822a8e
+#define clk_gcc_aggre2_ufs_axi_clk	0xb31e5191
+#define clk_gcc_mmss_gpll0_div_clk	0xdd06848d
+#define clk_gcc_mmss_bimc_gfx_clk	0xe4f28754
+#define clk_gcc_bimc_gfx_clk		0x3edd69ad
+#define clk_gcc_qspi_ahb_clk		0x96969dc8
+#define clk_gcc_qspi_ser_clk		0xfaf1e266
+#define clk_qspi_ser_clk_src		0x426676ee
+#define clk_sdcc1_ice_core_clk_src	0xfd6a4301
+#define clk_gcc_sdcc1_ice_core_clk	0x0fd5680a
+#define clk_gcc_mss_cfg_ahb_clk		0x111cde81
+#define clk_gcc_mss_snoc_axi_clk	0x0e71de85
+#define clk_gcc_mss_q6_bimc_axi_clk	0x67544d62
+#define clk_gcc_mss_mnoc_bimc_axi_clk	0xf665d03f
+#define clk_gpll0_out_msscc		0x7d794829
+#define clk_gcc_debug_mux_v2		0xf7e749f0
+#define clk_gcc_dcc_ahb_clk		0xfa14a88c
+#define clk_gcc_aggre0_noc_mpu_cfg_ahb_clk	0x5c1bb8e2
+
+/* clock_mmss controlled clocks */
+#define clk_mmsscc_xo			0x05e63704
+#define clk_mmsscc_gpll0		0xe900c515
+#define clk_mmsscc_gpll0_div		0x73892e05
+#define clk_mmsscc_mmssnoc_ahb		0x7b4bd6f7
+#define clk_mmpll0			0xdd83b751
+#define clk_mmpll0_out_main		0x2f996a31
+#define clk_mmpll1			0x6da7fb90
+#define clk_mmpll1_out_main		0xa0d3a7da
+#define clk_mmpll4			0x22c063c1
+#define clk_mmpll4_out_main		0xfb21c2fd
+#define clk_mmpll3			0x18c76899
+#define clk_mmpll3_out_main		0x6eb6328f
+#define clk_ahb_clk_src			0x86f49203
+#define clk_mmpll2			0x1190e4d8
+#define clk_mmpll2_out_main		0x1e9e24a8
+#define clk_mmpll8			0xd06ad45e
+#define clk_mmpll8_out_main		0x75b1f386
+#define clk_mmpll9			0x1c50684c
+#define clk_mmpll9_out_main		0x16b74937
+#define clk_mmpll5			0xa41e1936
+#define clk_mmpll5_out_main		0xcc1897bf
+#define clk_csi0_clk_src		0x227e65bc
+#define clk_vfe0_clk_src		0xa0c2bd8f
+#define clk_vfe1_clk_src		0x4e357366
+#define clk_csi1_clk_src		0x6a2a6c36
+#define clk_csi2_clk_src		0x4113589f
+#define clk_csi3_clk_src		0xfd934012
+#define clk_maxi_clk_src		0x52c09777
+#define clk_cpp_clk_src			0x8382f56d
+#define clk_jpeg0_clk_src		0x9a0a0ac3
+#define clk_jpeg2_clk_src		0x5ad927f3
+#define clk_jpeg_dma_clk_src		0xb68afcea
+#define clk_mdp_clk_src			0x6dc1f8f1
+#define clk_video_core_clk_src		0x8be4c944
+#define clk_fd_core_clk_src		0xe4799ab7
+#define clk_cci_clk_src			0x822f3d97
+#define clk_csiphy0_3p_clk_src		0xd2474b12
+#define clk_csiphy1_3p_clk_src		0x46a02aff
+#define clk_csiphy2_3p_clk_src		0x1447813f
+#define clk_camss_gp0_clk_src		0x6b57cfe6
+#define clk_camss_gp1_clk_src		0xf735368a
+#define clk_jpeg_dma_clk_src		0xb68afcea
+#define clk_mclk0_clk_src		0x266b3853
+#define clk_mclk1_clk_src		0xa73cad0c
+#define clk_mclk2_clk_src		0x42545468
+#define clk_mclk3_clk_src		0x2bfbb714
+#define clk_csi0phytimer_clk_src	0xc8a309be
+#define clk_csi1phytimer_clk_src	0x7c0fe23a
+#define clk_csi2phytimer_clk_src	0x62ffea9c
+#define clk_rbbmtimer_clk_src		0x17649ecc
+#define clk_esc0_clk_src		0xb41d7c38
+#define clk_esc1_clk_src		0x3b0afa42
+#define clk_hdmi_clk_src		0xb40aeea9
+#define clk_vsync_clk_src		0xecb43940
+#define clk_rbcpr_clk_src		0x2c2e9af2
+#define clk_video_subcore0_clk_src	0x88d79636
+#define clk_video_subcore1_clk_src	0x4966930c
+#define clk_mmss_bto_ahb_clk		0xfdf8c361
+#define clk_camss_ahb_clk		0xc4ff91d4
+#define clk_camss_cci_ahb_clk		0x04c4441a
+#define clk_camss_cci_clk		0xd6cb5eb9
+#define clk_camss_cpp_ahb_clk		0x12e9a87b
+#define clk_camss_cpp_clk		0xb82f366b
+#define clk_camss_cpp_axi_clk		0x5598c804
+#define clk_camss_cpp_vbif_ahb_clk	0xb5f31be4
+#define clk_camss_csi0_ahb_clk		0x6e29c972
+#define clk_camss_csi0_clk		0x30862ddb
+#define clk_camss_csi0phy_clk		0x2cecfb84
+#define clk_camss_csi0pix_clk		0x6946f77b
+#define clk_camss_csi0rdi_clk		0x83645ef5
+#define clk_camss_csi1_ahb_clk		0xccc15f06
+#define clk_camss_csi1_clk		0xb150f052
+#define clk_camss_csi1phy_clk		0xb989f06d
+#define clk_camss_csi1pix_clk		0x58d19bf3
+#define clk_camss_csi1rdi_clk		0x4d2f3352
+#define clk_camss_csi2_ahb_clk		0x92d02d75
+#define clk_camss_csi2_clk		0x74fc92e8
+#define clk_camss_csi2phy_clk		0xda05d9d8
+#define clk_camss_csi2pix_clk		0xf8ed0731
+#define clk_camss_csi2rdi_clk		0xdc1b2081
+#define clk_camss_csi3_ahb_clk		0xee5e459c
+#define clk_camss_csi3_clk		0x39488fdd
+#define clk_camss_csi3phy_clk		0x8b6063b9
+#define clk_camss_csi3pix_clk		0xd82bd467
+#define clk_camss_csi3rdi_clk		0xb6750046
+#define clk_camss_csi_vfe0_clk		0x3023937a
+#define clk_camss_csi_vfe1_clk		0xe66fa522
+#define clk_camss_csiphy0_3p_clk	0xf2a54f5a
+#define clk_camss_csiphy1_3p_clk	0x8bf70cb2
+#define clk_camss_csiphy2_3p_clk	0x1c14c939
+#define clk_camss_gp0_clk		0xcee7e51d
+#define clk_camss_gp1_clk		0x41f1c2e3
+#define clk_camss_ispif_ahb_clk		0x9a212c6d
+#define clk_camss_jpeg0_clk		0x0b0e2db7
+#define clk_camss_jpeg2_clk		0xd7291c8d
+#define clk_camss_jpeg_ahb_clk		0x1f47fd28
+#define clk_camss_jpeg_axi_clk		0x9e5545c8
+#define clk_camss_jpeg_dma_clk		0x2336e65d
+#define clk_camss_mclk0_clk		0xcf0c61e0
+#define clk_camss_mclk1_clk		0xd1410ed4
+#define clk_camss_mclk2_clk		0x851286f2
+#define clk_camss_mclk3_clk		0x4db11c45
+#define clk_camss_micro_ahb_clk		0x33a23277
+#define clk_camss_csi0phytimer_clk	0xff93b3c8
+#define clk_camss_csi1phytimer_clk	0x6c399ab6
+#define clk_camss_csi2phytimer_clk	0x24f47f49
+#define clk_camss_top_ahb_clk		0x8f8b2d33
+#define clk_camss_vfe_ahb_clk		0x595197bc
+#define clk_camss_vfe_axi_clk		0x273d4c31
+#define clk_camss_vfe0_ahb_clk		0x4652833c
+#define clk_camss_vfe0_clk		0x1e9bb8c4
+#define clk_camss_vfe0_stream_clk	0x22835fa4
+#define clk_camss_vfe1_ahb_clk		0x6a56abd3
+#define clk_camss_vfe1_clk		0x5bffa69b
+#define clk_camss_vfe1_stream_clk	0x92f849b9
+#define clk_fd_ahb_clk			0x868a2c5c
+#define clk_fd_core_clk			0x3badcae4
+#define clk_fd_core_uar_clk		0x7e624e15
+#define clk_gpu_ahb_clk			0xf97f1d43
+#define clk_gpu_aon_isense_clk		0xa9e9b297
+#define clk_gpu_gx_gfx3d_clk		0xb7ece823
+#define clk_gpu_mx_clk			0xb80ccedf
+#define clk_gpu_gx_rbbmtimer_clk	0xdeba634e
+#define clk_mdss_ahb_clk		0x684ccb41
+#define clk_mdss_axi_clk		0xcc07d687
+#define clk_mdss_esc0_clk		0x28cafbe6
+#define clk_mdss_esc1_clk		0xc22c6883
+#define clk_mdss_hdmi_ahb_clk		0x01cef516
+#define clk_mdss_hdmi_clk		0x097a6de9
+#define clk_mdss_mdp_clk		0x618336ac
+#define clk_mdss_vsync_clk		0x42a022d3
+#define clk_mmss_misc_ahb_clk		0xea30b0e7
+#define clk_mmss_misc_cxo_clk		0xe620cd80
+#define clk_mmagic_bimc_noc_cfg_ahb_clk 0x12d5ba72
+#define clk_mmagic_camss_axi_clk	0xa8b1c16b
+#define clk_mmagic_camss_noc_cfg_ahb_clk 0x5182c819
+#define clk_mmss_mmagic_cfg_ahb_clk	0x5e94a822
+#define clk_mmagic_mdss_axi_clk		0xa0359d10
+#define clk_mmagic_mdss_noc_cfg_ahb_clk 0x9c6d5482
+#define clk_mmagic_video_axi_clk	0x7b9219c3
+#define clk_mmagic_video_noc_cfg_ahb_clk 0x5124d256
+#define clk_mmss_mmagic_ahb_clk		0x3d15f2b0
+#define clk_mmss_mmagic_maxi_clk	0xbdaf5af7
+#define clk_mmss_rbcpr_ahb_clk		0x623ba55f
+#define clk_mmss_rbcpr_clk		0x69a23a6f
+#define clk_mmss_spdm_cpp_clk		0xefe35cd2
+#define clk_mmss_spdm_jpeg_dma_clk	0xcb7bd5a0
+#define clk_smmu_cpp_ahb_clk		0x3ad82d84
+#define clk_smmu_cpp_axi_clk		0xa6bb2f4a
+#define clk_smmu_jpeg_ahb_clk		0x10c436ec
+#define clk_smmu_jpeg_axi_clk		0x41112f37
+#define clk_smmu_mdp_ahb_clk		0x04994cb2
+#define clk_smmu_mdp_axi_clk		0x7fd71687
+#define clk_smmu_rot_ahb_clk		0xa30772c9
+#define clk_smmu_rot_axi_clk		0xfed7c078
+#define clk_smmu_vfe_ahb_clk		0x4dabebe7
+#define clk_smmu_vfe_axi_clk		0xde483725
+#define clk_smmu_video_ahb_clk		0x2d738e2c
+#define clk_smmu_video_axi_clk		0xe2b5b887
+#define clk_video_ahb_clk		0x90775cfb
+#define clk_video_axi_clk		0xe6c16dba
+#define clk_video_core_clk		0x7e876ec3
+#define clk_video_maxi_clk		0x97749db6
+#define clk_video_subcore0_clk		0xb6f63e6c
+#define clk_video_subcore1_clk		0x26c29cb4
+#define clk_vmem_ahb_clk		0xab6223ff
+#define clk_vmem_maxi_clk		0x15ef32db
+#define clk_mmss_debug_mux		0xe646ffda
+#define clk_mmss_gcc_dbg_clk		0xafa4d48a
+#define clk_gfx3d_clk_src		0x917f76ef
+#define clk_extpclk_clk_src		0xb2c31abd
+#define clk_mdss_byte0_clk		0xf5a03f64
+#define clk_mdss_byte1_clk		0xb8c7067d
+#define clk_mdss_extpclk_clk		0xfa5aadb0
+#define clk_mdss_pclk0_clk		0x3487234a
+#define clk_mdss_pclk1_clk		0xd5804246
+#define clk_gpu_gcc_dbg_clk		0x0ccc42cd
+#define clk_mdss_mdp_vote_clk		0x588460a4
+#define clk_mdss_rotator_vote_clk	0x5b1f675e
+#define clk_mmpll2_postdiv_clk		0x4fdeaaba
+#define clk_mmpll8_postdiv_clk		0xedf57882
+#define clk_mmpll9_postdiv_clk		0x3064b618
+#define clk_gfx3d_clk_src_v2		0x4210acb7
+#define clk_byte0_clk_src		0x75cc885b
+#define clk_byte1_clk_src		0x63c2c955
+#define clk_pclk0_clk_src		0xccac1f35
+#define clk_pclk1_clk_src		0x090f68ac
+#define clk_ext_byte0_clk_src		0xfb32f31e
+#define clk_ext_byte1_clk_src		0x585ef6d4
+#define clk_ext_pclk0_clk_src		0x087c1612
+#define clk_ext_pclk1_clk_src		0x8067c5a3
+
+/* clock_debug controlled clocks */
+#define clk_gcc_debug_mux		0x8121ac15
+
+/* external multimedia clocks */
+#define clk_dsi0pll_pixel_clk_mux	0x792379e1
+#define clk_dsi0pll_byte_clk_mux	0x60e83f06
+#define clk_dsi0pll_byte_clk_src	0xbbaa30be
+#define clk_dsi0pll_pixel_clk_src	0x45b3260f
+#define clk_dsi0pll_n2_div_clk		0x1474c213
+#define clk_dsi0pll_post_n1_div_clk	0xdab8c389
+#define clk_dsi0pll_vco_clk		0x15940d40
+#define clk_dsi1pll_pixel_clk_mux	0x36458019
+#define clk_dsi1pll_byte_clk_mux	0xb5a42b7b
+#define clk_dsi1pll_byte_clk_src	0x63930a8f
+#define clk_dsi1pll_pixel_clk_src	0x0e4c9b56
+#define clk_dsi1pll_n2_div_clk		0x2c9d4007
+#define clk_dsi1pll_post_n1_div_clk	0x03020041
+#define clk_dsi1pll_vco_clk		0x99797b50
+#define clk_mdss_dsi1_vco_clk_src	0xfcd15658
+#define clk_hdmi_vco_clk		0x66003284
+
+#define clk_dsi0pll_shadow_byte_clk_src	0x177c029c
+#define clk_dsi0pll_shadow_pixel_clk_src	0x98ae3c92
+#define clk_dsi0pll_shadow_n2_div_clk	0xd5f0dad9
+#define clk_dsi0pll_shadow_post_n1_div_clk	0x1f7c8cf8
+#define clk_dsi0pll_shadow_vco_clk	0xb100ca83
+#define clk_dsi1pll_shadow_byte_clk_src	0xfc021ce5
+#define clk_dsi1pll_shadow_pixel_clk_src	0xdcca3ffc
+#define clk_dsi1pll_shadow_n2_div_clk		0x189541bf
+#define clk_dsi1pll_shadow_post_n1_div_clk	0x1637020e
+#define clk_dsi1pll_shadow_vco_clk		0x68d8b6f7
+
+/* CPU clocks */
+#define clk_pwrcl_clk 0xc554130e
+#define clk_pwrcl_pll 0x25454ca1
+#define clk_pwrcl_alt_pll 0xc445471b
+#define clk_pwrcl_pll_main 0x28948e22
+#define clk_pwrcl_alt_pll_main 0x25c8270e
+#define clk_pwrcl_hf_mux 0x77706ae6
+#define clk_pwrcl_lf_mux 0xd99e334d
+#define clk_perfcl_clk 0x58869997
+#define clk_perfcl_pll 0x97dcec1c
+#define clk_perfcl_alt_pll 0xfe2eaea1
+#define clk_perfcl_pll_main 0x0dbf0c0b
+#define clk_perfcl_alt_pll_main 0x0b892aab
+#define clk_perfcl_hf_mux 0x9e8bbe59
+#define clk_perfcl_lf_mux 0x2f9c278d
+#define clk_cbf_pll 0xfe2e96a3
+#define clk_cbf_pll_main 0x2b05cf95
+#define clk_cbf_hf_mux 0x71244f73
+#define clk_cbf_clk 0x48e9e16b
+#define clk_xo_ao 0x428c856d
+#define clk_sys_apcsaux_clk 0x0b0dd513
+#define clk_cpu_debug_mux 0xc7acaa31
+
+/* GCC block resets */
+#define QUSB2PHY_PRIM_BCR		0
+#define QUSB2PHY_SEC_BCR		1
+#define BLSP1_BCR			2
+#define BLSP2_BCR			3
+#define BOOT_ROM_BCR			4
+#define PRNG_BCR			5
+#define UFS_BCR				6
+#define USB_20_BCR			7
+#define USB_30_BCR			8
+#define USB3_PHY_BCR			9
+#define USB3PHY_PHY_BCR			10
+#define PCIE_0_PHY_BCR			11
+#define PCIE_1_PHY_BCR			12
+#define PCIE_2_PHY_BCR			13
+#define PCIE_PHY_BCR			14
+#define PCIE_PHY_COM_BCR		15
+#define PCIE_PHY_NOCSR_COM_PHY_BCR	16
+
+/* MMSS Block resets */
+#define VIDEO_BCR			0
+#define MDSS_BCR			1
+#define CAMSS_MICRO_BCR			2
+#define CAMSS_JPEG_BCR			3
+#define CAMSS_VFE0_BCR			4
+#define CAMSS_VFE1_BCR			5
+#define FD_BCR				6
+#define GPU_GX_BCR			7
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/include/dt-bindings/clock/msm-clocks-8998.h	2019-01-22 16:16:28.171288678 +0100
@@ -0,0 +1,534 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CLOCKS_8998_H
+#define __MSM_CLOCKS_8998_H
+
+#include "audio-ext-clk.h"
+
+/* clock_rpm controlled clocks */
+#define clk_ce1_clk				0x42229c55
+#define clk_ce1_a_clk				0x44a833fe
+#define clk_cxo_clk_src				0x79e95308
+#define clk_bimc_clk				0x4b80bf00
+#define clk_bimc_a_clk				0x4b25668a
+#define clk_cnoc_clk				0xd5ccb7f4
+#define clk_cnoc_a_clk				0xd8fe2ccc
+#define clk_snoc_clk				0x2c341aa0
+#define clk_snoc_a_clk				0x8fcef2af
+#define clk_cnoc_periph_clk			0xb11e9cf9
+#define clk_cnoc_periph_a_clk			0x1d7faa2e
+#define clk_cnoc_periph_keepalive_a_clk		0x7287aef2
+#define clk_ln_bb_clk1				0xb867b147
+#define clk_ln_bb_clk1_ao			0x7f63a93a
+#define clk_ln_bb_clk1_pin			0x6fc5653c
+#define clk_ln_bb_clk1_pin_ao			0x25d625bf
+#define clk_ln_bb_clk2				0xf83e6387
+#define clk_ln_bb_clk2_ao			0x96f09628
+#define clk_ln_bb_clk2_pin			0xa9ebe8d5
+#define clk_ln_bb_clk2_pin_ao			0x89a1226f
+#define clk_ln_bb_clk3				0x4f52a39e
+#define clk_ln_bb_clk3_ao			0xb15eba76
+#define clk_ln_bb_clk3_pin			0xc4de7dad
+#define clk_ln_bb_clk3_pin_ao			0xc01022e8
+#define clk_bimc_msmbus_clk			0xd212feea
+#define clk_bimc_msmbus_a_clk			0x71d1a499
+#define clk_cnoc_msmbus_clk			0x62228b5d
+#define clk_cnoc_msmbus_a_clk			0x67442955
+#define clk_cxo_clk_src_ao			0x64eb6004
+#define clk_cxo_dwc3_clk			0xf79c19f6
+#define clk_cxo_lpm_clk				0x94adbf3d
+#define clk_cxo_otg_clk				0x4eec0bb9
+#define clk_cxo_pil_lpass_clk			0xe17f0ff6
+#define clk_cxo_pil_ssc_clk			0x81832015
+#define clk_cxo_pil_spss_clk			0x5cd71a61
+#define clk_div_clk1				0xaa1157a6
+#define clk_div_clk1_ao				0x6b943d68
+#define clk_div_clk2				0xd454019f
+#define clk_div_clk2_ao				0x53f9e788
+#define clk_div_clk3				0xa9a55a68
+#define clk_div_clk3_ao				0x3d6725a8
+#define clk_ipa_clk				0xfa685cda
+#define clk_ipa_a_clk				0xeeec2919
+#define clk_mcd_ce1_clk				0xbb615d26
+#define clk_mmssnoc_axi_clk			0xdb4b31e6
+#define clk_mmssnoc_axi_a_clk			0xd4970614
+#define clk_qcedev_ce1_clk			0x293f97b0
+#define clk_qcrypto_ce1_clk			0xa6ac14df
+#define clk_qdss_clk				0x1492202a
+#define clk_qdss_a_clk				0xdd121669
+#define clk_qseecom_ce1_clk			0xaa858373
+#define clk_rf_clk1				0xaabeea5a
+#define clk_rf_clk1_ao				0x72a10cb8
+#define clk_rf_clk1_pin				0x8f463562
+#define clk_rf_clk1_pin_ao			0x62549ff6
+#define clk_rf_clk2				0x24a30992
+#define clk_rf_clk2_ao				0x944d8bbd
+#define clk_rf_clk2_pin				0xa7c5602a
+#define clk_rf_clk2_pin_ao			0x2d75eb4d
+#define clk_rf_clk3				0xb673936b
+#define clk_rf_clk3_ao				0x038bb968
+#define clk_rf_clk3_pin				0x726f53f5
+#define clk_rf_clk3_pin_ao			0x76f9240f
+#define clk_scm_ce1_clk				0xd8ebcc62
+#define clk_snoc_msmbus_clk			0xe6900bb6
+#define clk_snoc_msmbus_a_clk			0x5d4683bd
+#define clk_gcc_ce1_ahb_m_clk			0x2eb28c01
+#define clk_gcc_ce1_axi_m_clk			0xc174dfba
+#define clk_aggre1_noc_clk			0x049abba8
+#define clk_aggre1_noc_a_clk			0xc12e4220
+#define clk_aggre2_noc_clk			0xaa681404
+#define clk_aggre2_noc_a_clk			0xcab67089
+#define clk_measure_only_bimc_hmss_axi_clk	0xc1cc4f11
+
+/* clock_gcc controlled clocks*/
+#define clk_debug_mmss_clk			0x977c99b6
+#define clk_debug_rpm_clk			0x8e2b07ca
+#define clk_debug_cpu_clk			0x0e696b2b
+#define clk_gpu_gcc_debug_clk			0x3eb88190
+#define clk_gfx_gcc_debug_clk			0xa3a64fec
+#define clk_gpll0				0x1ebe3bc4
+#define clk_gpll0_out_main			0xe9374de7
+#define clk_gpll0_ao				0xa1368304
+#define clk_gcc_mmss_gpll0_clk			0x8050f008
+#define clk_gcc_mmss_gpll0_div_clk		0xdd06848d
+#define clk_gcc_gpu_gpll0_clk			0xdad7a7a4
+#define clk_gcc_gpu_gpll0_div_clk		0x07d16c6a
+#define clk_gpll4				0xb3b5d85b
+#define clk_gpll4_out_main			0xa9a0ab9d
+#define clk_usb30_master_clk_src		0xc6262f89
+#define clk_pcie_aux_clk_src			0xebc50566
+#define clk_ufs_axi_clk_src			0x297ca380
+#define clk_blsp1_qup1_i2c_apps_clk_src		0x17f78f5e
+#define clk_blsp1_qup1_spi_apps_clk_src		0xf534c4fa
+#define clk_blsp1_qup2_i2c_apps_clk_src		0x8de71c79
+#define clk_blsp1_qup2_spi_apps_clk_src		0x33cf809a
+#define clk_blsp1_qup3_i2c_apps_clk_src		0xf161b902
+#define clk_blsp1_qup3_spi_apps_clk_src		0x5e95683f
+#define clk_blsp1_qup4_i2c_apps_clk_src		0xb2ecce68
+#define clk_blsp1_qup4_spi_apps_clk_src		0xddb5bbdb
+#define clk_blsp1_qup5_i2c_apps_clk_src		0x71ea7804
+#define clk_blsp1_qup5_spi_apps_clk_src		0x9752f35f
+#define clk_blsp1_qup6_i2c_apps_clk_src		0x28806803
+#define clk_blsp1_qup6_spi_apps_clk_src		0x44a1edc4
+#define clk_blsp1_uart1_apps_clk_src		0xf8146114
+#define clk_blsp1_uart2_apps_clk_src		0xfc9c2f73
+#define clk_blsp1_uart3_apps_clk_src		0x600497f2
+#define clk_blsp2_qup1_i2c_apps_clk_src		0xd6d1e95d
+#define clk_blsp2_qup1_spi_apps_clk_src		0xcc1b8365
+#define clk_blsp2_qup2_i2c_apps_clk_src		0x603b5c51
+#define clk_blsp2_qup2_spi_apps_clk_src		0xd577dc44
+#define clk_blsp2_qup3_i2c_apps_clk_src		0xea82959c
+#define clk_blsp2_qup3_spi_apps_clk_src		0xd04b1e92
+#define clk_blsp2_qup4_i2c_apps_clk_src		0x73dc968c
+#define clk_blsp2_qup4_spi_apps_clk_src		0x25d4a2b1
+#define clk_blsp2_qup5_i2c_apps_clk_src		0xcc3698bd
+#define clk_blsp2_qup5_spi_apps_clk_src		0xfa0cf45e
+#define clk_blsp2_qup6_i2c_apps_clk_src		0x2fa53151
+#define clk_blsp2_qup6_spi_apps_clk_src		0x5ca86755
+#define clk_blsp2_uart1_apps_clk_src		0x562c66dc
+#define clk_blsp2_uart2_apps_clk_src		0xdd448080
+#define clk_blsp2_uart3_apps_clk_src		0x46b2e90f
+#define clk_gp1_clk_src				0xad85b97a
+#define clk_gp2_clk_src				0xfb1f0065
+#define clk_gp3_clk_src				0x63b693d6
+#define clk_hmss_rbcpr_clk_src			0xedd9a474
+#define clk_pdm2_clk_src			0x31e494fd
+#define clk_sdcc2_apps_clk_src			0xfc46c821
+#define clk_sdcc4_apps_clk_src			0x7aaaaa0c
+#define clk_tsif_ref_clk_src			0x4e9042d1
+#define clk_ufs_ice_core_clk_src		0xda8e7119
+#define clk_ufs_phy_aux_clk_src			0xc6bca085
+#define clk_ufs_unipro_core_clk_src		0x179e80a9
+#define clk_usb30_mock_utmi_clk_src		0xa024a976
+#define clk_usb3_phy_aux_clk_src		0x15eec63c
+#define clk_qspi_ref_clk_src			0xfe6b8e11
+#define clk_gcc_pcie_phy_0_reset		0x6bb4df33
+#define clk_gcc_usb3_phy_reset			0x03d559f1
+#define clk_gcc_usb3phy_phy_reset		0xb1a4f885
+#define clk_gcc_aggre1_ufs_axi_clk		0x873459d8
+#define clk_gcc_aggre1_ufs_axi_hw_ctl_clk	0x117a6f39
+#define clk_gcc_aggre1_usb3_axi_clk		0xc5c3fbe8
+#define clk_gcc_bimc_mss_q6_axi_clk		0x7437988f
+#define clk_gcc_blsp1_ahb_clk			0x8caa5b4f
+#define clk_gcc_blsp1_qup1_i2c_apps_clk		0xc303fae9
+#define clk_gcc_blsp1_qup1_spi_apps_clk		0x759a76b0
+#define clk_gcc_blsp1_qup2_i2c_apps_clk		0x1076f220
+#define clk_gcc_blsp1_qup2_spi_apps_clk		0x3e77d48f
+#define clk_gcc_blsp1_qup3_i2c_apps_clk		0x9e25ac82
+#define clk_gcc_blsp1_qup3_spi_apps_clk		0xfb978880
+#define clk_gcc_blsp1_qup4_i2c_apps_clk		0xd7f40f6f
+#define clk_gcc_blsp1_qup4_spi_apps_clk		0x80f8722f
+#define clk_gcc_blsp1_qup5_i2c_apps_clk		0xacae5604
+#define clk_gcc_blsp1_qup5_spi_apps_clk		0xbf3e15d7
+#define clk_gcc_blsp1_qup6_i2c_apps_clk		0x5c6ad820
+#define clk_gcc_blsp1_qup6_spi_apps_clk		0x780d9f85
+#define clk_gcc_blsp1_uart1_apps_clk		0xc7c62f90
+#define clk_gcc_blsp1_uart2_apps_clk		0xf8a61c96
+#define clk_gcc_blsp1_uart3_apps_clk		0xc3298bd7
+#define clk_gcc_blsp2_ahb_clk			0x8f283c1d
+#define clk_gcc_blsp2_qup1_i2c_apps_clk		0x9ace11dd
+#define clk_gcc_blsp2_qup1_spi_apps_clk		0xa32604cc
+#define clk_gcc_blsp2_qup2_i2c_apps_clk		0x1bf9a57e
+#define clk_gcc_blsp2_qup2_spi_apps_clk		0xbf54ca6d
+#define clk_gcc_blsp2_qup3_i2c_apps_clk		0x336d4170
+#define clk_gcc_blsp2_qup3_spi_apps_clk		0xc68509d6
+#define clk_gcc_blsp2_qup4_i2c_apps_clk		0xbd22539d
+#define clk_gcc_blsp2_qup4_spi_apps_clk		0x01a72b93
+#define clk_gcc_blsp2_qup5_i2c_apps_clk		0xe2b2ce1d
+#define clk_gcc_blsp2_qup5_spi_apps_clk		0xf40999cd
+#define clk_gcc_blsp2_qup6_i2c_apps_clk		0x894bcea4
+#define clk_gcc_blsp2_qup6_spi_apps_clk		0xfe1bd34a
+#define clk_gcc_blsp2_uart1_apps_clk		0x8c3512ff
+#define clk_gcc_blsp2_uart2_apps_clk		0x1e1965a3
+#define clk_gcc_blsp2_uart3_apps_clk		0x382415ab
+#define clk_gcc_boot_rom_ahb_clk		0xde2adeb1
+#define clk_gcc_bimc_gfx_clk			0x3edd69ad
+#define clk_gcc_cfg_noc_usb3_axi_clk		0x9ea4c2d9
+#define clk_gcc_gp1_clk				0x057f7b69
+#define clk_gcc_gp2_clk				0x9bf83ffd
+#define clk_gcc_gp3_clk				0xec6539ee
+#define clk_gcc_gpu_bimc_gfx_clk		0x3909459b
+#define clk_gcc_gpu_cfg_ahb_clk			0x72f20a57
+#define clk_gcc_gpu_iref_clk			0xfd82abad
+#define clk_gcc_hmss_dvm_bus_clk		0x17cc8b53
+#define clk_gcc_hmss_rbcpr_clk			0x699183be
+#define clk_hmss_gpll0_clk_src			0x17eb05d0
+#define clk_hmss_gpll4_clk_src			0x20456cae
+#define clk_gcc_mmss_sys_noc_axi_clk		0x4467b15b
+#define clk_gcc_mss_at_clk			0x1692c5aa
+#define clk_nav_gcc_dbg_clk			0x2221c544
+#define clk_gcc_pcie_0_aux_clk			0x3d2e3ece
+#define clk_gcc_pcie_0_cfg_ahb_clk		0x4dd325c3
+#define clk_gcc_pcie_0_mstr_axi_clk		0x3f85285b
+#define clk_gcc_pcie_0_pipe_clk			0x4f37621e
+#define clk_gcc_pcie_0_slv_axi_clk		0xd69638a1
+#define clk_gcc_pcie_phy_aux_clk		0x4746e74f
+#define clk_gcc_pdm2_clk			0x99d55711
+#define clk_gcc_pdm_ahb_clk			0x365664f6
+#define clk_gcc_prng_ahb_clk			0x397e7eaa
+#define clk_gcc_sdcc2_ahb_clk			0x23d5727f
+#define clk_gcc_sdcc2_apps_clk			0x861b20ac
+#define clk_gcc_sdcc4_ahb_clk			0x64f3e6a8
+#define clk_gcc_sdcc4_apps_clk			0xbf7c4dc8
+#define clk_gcc_tsif_ahb_clk			0x88d2822c
+#define clk_gcc_tsif_ref_clk			0x8f1ed2c2
+#define clk_gcc_ufs_ahb_clk			0x1914bb84
+#define clk_gcc_ufs_axi_clk			0x47c743a7
+#define clk_gcc_ufs_axi_hw_ctl_clk		0x69385b45
+#define clk_gcc_ufs_ice_core_clk		0x310b0710
+#define clk_gcc_ufs_ice_core_hw_ctl_clk		0x84e15a5b
+#define clk_gcc_ufs_phy_aux_clk			0x17acc8fb
+#define clk_gcc_ufs_phy_aux_hw_ctl_clk		0x7dbdb2e2
+#define clk_gcc_ufs_rx_symbol_0_clk		0x7f43251c
+#define clk_gcc_ufs_rx_symbol_1_clk		0x03182fde
+#define clk_gcc_ufs_tx_symbol_0_clk		0x6a9f747a
+#define clk_ufs_tx_symbol_0_clk			0xb3fcd0f7
+#define clk_ufs_rx_symbol_0_clk			0x17a0f1cd
+#define clk_gcc_ufs_unipro_core_clk		0x2daf7fd2
+#define clk_gcc_ufs_unipro_core_hw_ctl_clk	0x4a4e0f3d
+#define clk_gcc_usb30_master_clk		0xb3b4e2cb
+#define clk_gcc_usb30_mock_utmi_clk		0xa800b65a
+#define clk_gcc_usb30_sleep_clk			0xd0b65c92
+#define clk_gcc_usb3_phy_aux_clk		0x0d9a36e0
+#define clk_gcc_usb3_phy_pipe_clk		0xf279aff2
+#define clk_gcc_usb3_clkref_clk			0xb6cc8f00
+#define clk_gcc_hdmi_clkref_clk			0x4d4eec04
+#define clk_gcc_edp_clkref_clk			0xa8685c3f
+#define clk_gcc_ufs_clkref_clk			0x92aa126f
+#define clk_gcc_pcie_clkref_clk			0xa2e247fa
+#define clk_gcc_rx2_qlink_clkref_clk		0xd0ba986d
+#define clk_gcc_rx1_usb2_clkref_clk		0x53351d25
+#define clk_gcc_pcie_phy_reset			0x9bc3c959
+#define clk_gcc_pcie_phy_com_reset		0x8bf513e6
+#define clk_gcc_pcie_phy_nocsr_com_phy_reset	0x0c16a2da
+#define clk_gcc_qusb2phy_prim_reset		0x07550fa1
+#define clk_gcc_qusb2phy_sec_reset		0x3f3a87d0
+#define clk_gcc_mmss_noc_cfg_ahb_clk		0xb41a9d99
+#define clk_gcc_dcc_ahb_clk			0xfa14a88c
+#define clk_hlos1_vote_lpass_core_smmu_clk	0x3aaa1743
+#define clk_hlos1_vote_lpass_adsp_smmu_clk	0xc76f702f
+#define clk_gcc_mss_cfg_ahb_clk			0x111cde81
+#define clk_gcc_mss_q6_bimc_axi_clk		0x67544d62
+#define clk_gcc_mss_mnoc_bimc_axi_clk		0xf665d03f
+#define clk_gpll0_out_msscc			0x7d794829
+#define clk_gcc_mss_snoc_axi_clk		0x0e71de85
+#define clk_gcc_qspi_ref_clk			0x766a0f7c
+#define clk_gcc_qspi_ahb_clk			0x96969dc8
+#define clk_gcc_debug_mux			0x8121ac15
+
+/* clock_mmss controlled clocks */
+#define clk_mmsscc_xo				0x05e63704
+#define clk_mmsscc_gpll0			0xe900c515
+#define clk_mmsscc_gpll0_div			0x73892e05
+#define clk_mmpll0_pll				0x361e3cfd
+#define clk_mmpll1_pll				0x198e426b
+#define clk_mmpll3_pll				0x18c76899
+#define clk_mmpll4_pll				0x22c063c1
+#define clk_mmpll5_pll				0xa41e1936
+#define clk_mmpll6_pll				0xc56fb440
+#define clk_mmpll7_pll				0x3ac216af
+#define clk_mmpll10_pll				0x2561263b
+#define clk_mmpll0_pll_out			0x1e9e24a8
+#define clk_mmpll1_pll_out			0x5fa32257
+#define clk_mmpll3_pll_out			0x6eb6328f
+#define clk_mmpll4_pll_out			0xfb21c2fd
+#define clk_mmpll5_pll_out			0xcc1897bf
+#define clk_mmpll6_pll_out			0xfb1060bd
+#define clk_mmpll7_pll_out			0x767758ed
+#define clk_mmpll10_pll_out			0x3c5668f3
+#define clk_ahb_clk_src				0x86f49203
+#define clk_csi0_clk_src			0x227e65bc
+#define clk_vfe0_clk_src			0xa0c2bd8f
+#define clk_vfe1_clk_src			0x4e357366
+#define clk_mdp_clk_src				0x6dc1f8f1
+#define clk_maxi_clk_src			0x52c09777
+#define clk_cpp_clk_src				0x8382f56d
+#define clk_jpeg0_clk_src			0x9a0a0ac3
+#define clk_rot_clk_src				0xce49b56c
+#define clk_video_core_clk_src			0x8be4c944
+#define clk_csi1_clk_src			0x6a2a6c36
+#define clk_csi2_clk_src			0x4113589f
+#define clk_csi3_clk_src			0xfd934012
+#define clk_fd_core_clk_src			0xe4799ab7
+#define clk_ext_dp_phy_pll_vco			0x441b576b
+#define clk_ext_dp_phy_pll_link			0xea12644c
+#define clk_dp_link_clk_src			0x370d0626
+#define clk_dp_crypto_clk_src			0xf8faa811
+#define clk_dp_pixel_clk_src			0xf5dfbabf
+#define clk_ext_extpclk_clk_src			0xe5b273af
+#define clk_ext_pclk0_clk_src			0x087c1612
+#define clk_ext_pclk1_clk_src			0x8067c5a3
+#define clk_pclk0_clk_src			0xccac1f35
+#define clk_pclk1_clk_src			0x090f68ac
+#define clk_video_subcore0_clk_src		0x88d79636
+#define clk_video_subcore1_clk_src		0x4966930c
+#define clk_cci_clk_src				0x822f3d97
+#define clk_camss_gp0_clk_src			0x43b063e9
+#define clk_camss_gp1_clk_src			0xa3315f1b
+#define clk_mclk0_clk_src			0x266b3853
+#define clk_mclk1_clk_src			0xa73cad0c
+#define clk_mclk2_clk_src			0x42545468
+#define clk_mclk3_clk_src			0x2bfbb714
+#define clk_csiphy_clk_src			0x8cceb70a
+#define clk_csi0phytimer_clk_src		0xc8a309be
+#define clk_csi1phytimer_clk_src		0x7c0fe23a
+#define clk_csi2phytimer_clk_src		0x62ffea9c
+#define clk_ext_byte0_clk_src			0xfb32f31e
+#define clk_ext_byte1_clk_src			0x585ef6d4
+#define clk_byte0_clk_src			0x75cc885b
+#define clk_byte1_clk_src			0x63c2c955
+#define clk_dp_aux_clk_src			0x2b6e972b
+#define clk_dp_gtc_clk_src			0xc5a86a42
+#define clk_esc0_clk_src			0xb41d7c38
+#define clk_esc1_clk_src			0x3b0afa42
+#define clk_extpclk_clk_src			0xb2c31abd
+#define clk_hdmi_clk_src			0xb40aeea9
+#define clk_vsync_clk_src			0xecb43940
+#define clk_mmss_bimc_smmu_ahb_clk		0x4825baf4
+#define clk_mmss_bimc_smmu_axi_clk		0xc365ac39
+#define clk_mmss_snoc_dvm_axi_clk		0x2c159a11
+#define clk_mmss_camss_ahb_clk			0xa51f2c1d
+#define clk_mmss_camss_cci_ahb_clk		0xfda8bb6a
+#define clk_mmss_camss_cci_clk			0x71bb5c97
+#define clk_mmss_camss_cpp_ahb_clk		0xd5554f15
+#define clk_mmss_camss_cpp_clk			0x8e99ef57
+#define clk_mmss_camss_cpp_axi_clk		0xd84e390b
+#define clk_mmss_camss_cpp_vbif_ahb_clk		0x1b33a88e
+#define clk_mmss_camss_cphy_csid0_clk		0x56114361
+#define clk_mmss_camss_csi0_ahb_clk		0x2b58d241
+#define clk_mmss_camss_csi0_clk			0xccfe39ef
+#define clk_mmss_camss_csi0pix_clk		0x9e26509d
+#define clk_mmss_camss_csi0rdi_clk		0x01d5bf83
+#define clk_mmss_camss_cphy_csid1_clk		0x79fbcd8a
+#define clk_mmss_camss_csi1_ahb_clk		0x7073244b
+#define clk_mmss_camss_csi1_clk			0x3eeeaac0
+#define clk_mmss_camss_csi1pix_clk		0xf1375139
+#define clk_mmss_camss_csi1rdi_clk		0x43185024
+#define clk_mmss_camss_cphy_csid2_clk		0xf295e3ef
+#define clk_mmss_camss_csi2_ahb_clk		0x681c1479
+#define clk_mmss_camss_csi2_clk			0x94524569
+#define clk_mmss_camss_csi2pix_clk		0xf4de617d
+#define clk_mmss_camss_csi2rdi_clk		0x4bf01dc5
+#define clk_mmss_camss_cphy_csid3_clk		0x100188e9
+#define clk_mmss_camss_csi3_ahb_clk		0xfae7c29b
+#define clk_mmss_camss_csi3_clk			0x55e4bbae
+#define clk_mmss_camss_csi3pix_clk		0xc166a015
+#define clk_mmss_camss_csi3rdi_clk		0x6983a4cd
+#define clk_mmss_camss_csi_vfe0_clk		0x3b30b798
+#define clk_mmss_camss_csi_vfe1_clk		0xfe729af7
+#define clk_mmss_camss_csiphy0_clk		0x96c81af8
+#define clk_mmss_camss_csiphy1_clk		0xee9ac2bb
+#define clk_mmss_camss_csiphy2_clk		0x3365e70e
+#define clk_mmss_fd_ahb_clk			0x4ff1da4d
+#define clk_mmss_fd_core_clk			0x749e7eb0
+#define clk_mmss_fd_core_uar_clk		0x8ea480c5
+#define clk_mmss_camss_gp0_clk			0x3f7f6c87
+#define clk_mmss_camss_gp1_clk			0xdccdd730
+#define clk_mmss_camss_ispif_ahb_clk		0xbda4f0e3
+#define clk_mmss_camss_jpeg0_clk		0x4cc73b07
+#define clk_mmss_camss_jpeg0_vote_clk		0xc9efa6ac
+#define clk_mmss_camss_jpeg0_dma_vote_clk	0x371ec109
+#define clk_mmss_camss_jpeg_ahb_clk		0xde1fece3
+#define clk_mmss_camss_jpeg_axi_clk		0x7534616b
+#define clk_mmss_camss_mclk0_clk		0x056293a7
+#define clk_mmss_camss_mclk1_clk		0x96c7b69b
+#define clk_mmss_camss_mclk2_clk		0x8820556e
+#define clk_mmss_camss_mclk3_clk		0xf90ffb67
+#define clk_mmss_camss_micro_ahb_clk		0x6c6fd3c7
+#define clk_mmss_camss_csi0phytimer_clk		0x7a78864e
+#define clk_mmss_camss_csi1phytimer_clk		0x6e6c1de5
+#define clk_mmss_camss_csi2phytimer_clk		0x0235e2de
+#define clk_mmss_camss_top_ahb_clk		0x120618d6
+#define clk_mmss_camss_vfe0_ahb_clk		0x137bd0bd
+#define clk_mmss_camss_vfe0_clk			0xead28288
+#define clk_mmss_camss_vfe0_stream_clk		0xa0428287
+#define clk_mmss_camss_vfe1_ahb_clk		0xac0154c0
+#define clk_mmss_camss_vfe1_clk			0xc216b14d
+#define clk_mmss_camss_vfe1_stream_clk		0x745af3b6
+#define clk_mmss_camss_vfe_vbif_ahb_clk		0x0109a9c6
+#define clk_mmss_camss_vfe_vbif_axi_clk		0xe626d8a1
+#define clk_mmss_mdss_ahb_clk			0x85d37ab5
+#define clk_mmss_mdss_axi_clk			0xdf04fc1d
+#define clk_mmss_mdss_byte0_clk			0x38105d25
+#define clk_mmss_mdss_byte0_intf_clk		0x38e5aa79
+#define clk_mmss_mdss_byte0_intf_div_clk	0x8604f181
+#define clk_mmss_mdss_byte1_clk			0xe0c21354
+#define clk_mmss_mdss_byte1_intf_clk		0xcf654d8e
+#define clk_mmss_mdss_byte1_intf_div_clk	0xcdf334c5
+#define clk_mmss_mdss_dp_aux_clk		0x23125eb6
+#define clk_mmss_mdss_dp_crypto_clk		0x9a072d4e
+#define clk_mmss_mdss_dp_link_clk		0x8dd302d1
+#define clk_mmss_mdss_dp_link_intf_clk		0x70e386e6
+#define clk_mmss_mdss_dp_pixel_clk		0xb707b765
+#define clk_mmss_mdss_dp_gtc_clk		0xb59c151a
+#define clk_mmss_mdss_esc0_clk			0x5721ff83
+#define clk_mmss_mdss_esc1_clk			0xc3d0376b
+#define clk_mmss_mdss_extpclk_clk		0x74d5a954
+#define clk_mmss_mdss_hdmi_clk			0x28460a6d
+#define clk_mmss_mdss_hdmi_dp_ahb_clk		0x5448519f
+#define clk_mmss_mdss_mdp_clk			0x43539b0e
+#define clk_mmss_mdss_mdp_lut_clk		0x00627b2b
+#define clk_mmss_mdss_pclk0_clk			0xcc0e909d
+#define clk_mmss_mdss_pclk1_clk			0x850d9146
+#define clk_mmss_mdss_rot_clk			0xbb7e71c4
+#define clk_mmss_mdss_vsync_clk			0x629b36dc
+#define clk_mmss_misc_ahb_clk			0xea30b0e7
+#define clk_mmss_misc_cxo_clk			0xe620cd80
+#define clk_mmss_mnoc_ahb_clk			0x49a394f4
+#define clk_mmss_mnoc_maxi_clk			0xd8b7278f
+#define clk_mmss_video_subcore0_clk		0x23fae359
+#define clk_mmss_video_subcore1_clk		0x5213a0c7
+#define clk_mmss_video_ahb_clk			0x94334ae9
+#define clk_mmss_video_axi_clk			0xf3178ba5
+#define clk_mmss_video_core_clk			0x78f14c85
+#define clk_mmss_video_maxi_clk			0x1785ef88
+#define clk_mmss_vmem_ahb_clk			0x4b18955b
+#define clk_mmss_vmem_maxi_clk			0xb6067889
+#define clk_mmss_debug_mux			0xe646ffda
+
+/* external multimedia clocks */
+#define clk_dsi0pll_byteclk_mux			0xecf2c434
+#define clk_dsi0pll_byteclk_src			0x6f6f740f
+#define clk_dsi0pll_pclk_mux			0x6c9da335
+#define clk_dsi0pll_pclk_src			0x5efd85d4
+#define clk_dsi0pll_pclk_src_mux		0x84b14663
+#define clk_dsi0pll_post_bit_div		0xf46dcf27
+#define clk_dsi0pll_pll_out_div1		0xeda5b7fe
+#define clk_dsi0pll_pll_out_div2		0x97fa476d
+#define clk_dsi0pll_pll_out_div4		0x90a98ce0
+#define clk_dsi0pll_pll_out_div8		0x9d9d85cf
+#define clk_dsi0pll_pll_out_mux			0x179c27ca
+#define clk_dsi0pll_post_vco_mux		0xfaf9bd1f
+#define clk_dsi0pll_post_vco_div1		0xabb50b2a
+#define clk_dsi0pll_post_vco_div4		0xbe51c091
+#define clk_dsi0pll_bitclk_src			0x36c3c437
+#define clk_dsi0pll_vco_clk			0x15940d40
+
+#define clk_dsi1pll_byteclk_mux			0x14e2f38f
+#define clk_dsi1pll_byteclk_src			0x4b65c298
+#define clk_dsi1pll_pclk_mux			0x4c0518b5
+#define clk_dsi1pll_pclk_src			0xeddcd80e
+#define clk_dsi1pll_pclk_src_mux		0x3651feb3
+#define clk_dsi1pll_post_bit_div		0x712f0260
+#define clk_dsi1pll_pll_out_div8		0x87628ddb
+#define clk_dsi1pll_pll_out_div4		0x0d9a384b
+#define clk_dsi1pll_pll_out_div2		0x0c9b5748
+#define clk_dsi1pll_pll_out_div1		0x3193164e
+#define clk_dsi1pll_pll_out_mux			0x171bf8fd
+#define clk_dsi1pll_post_vco_mux		0xc6a90d20
+#define clk_dsi1pll_post_vco_div1		0x6f47ca7d
+#define clk_dsi1pll_post_vco_div4		0x90628974
+#define clk_dsi1pll_bitclk_src			0x13ab045b
+#define clk_dsi1pll_vco_clk			0x99797b50
+
+#define clk_dp_vco_clk				0xfcaaeec7
+#define clk_dp_link_2x_clk_divsel_five		0xcfe3f5dd
+#define clk_vco_divsel_four_clk_src		0xe0da19c0
+#define clk_vco_divsel_two_clk_src		0xb5cfc6a8
+#define clk_vco_divided_clk_src_mux		0x3f8197c2
+#define clk_hdmi_vco_clk			0xbb7dc20d
+
+/* clock_gpu controlled clocks*/
+#define clk_gpucc_xo				0xc4e1a890
+#define clk_gpucc_gpll0				0x0db0e37f
+#define clk_gfx3d_clk_src			0x917f76ef
+#define clk_rbbmtimer_clk_src			0x17649ecc
+#define clk_gfx3d_isense_clk_src		0xecc3eafa
+#define clk_rbcpr_clk_src			0x2c2e9af2
+#define clk_gpu_debug_div_clk			0x75d6f53f
+#define clk_gpucc_gfx3d_clk			0x95f01bd5
+#define clk_gpucc_rbbmtimer_clk			0x58a0a7ca
+#define clk_gpucc_gfx3d_isense_clk		0xb2678e80
+#define clk_gpucc_cxo_clk			0x6532dcae
+#define clk_gpucc_rbcpr_clk			0x7bd750e8
+#define clk_gpu_pll0_pll			0x0e61ab4d
+#define clk_gpu_pll0_pll_out_even		0xb0ed5009
+#define clk_gpu_pll0_pll_out_odd		0x08c5a8a5
+#define clk_gpu_pll0_postdiv_clk		0x76c19f3c
+#define clk_gpucc_mx_clk			0x1edbb879
+#define clk_gpucc_gcc_dbg_clk			0x9ae8cd3c
+#define clk_gfxcc_dbg_clk			0x3ed47625
+
+/* CPU clocks */
+#define clk_pwrcl_clk				0xc554130e
+#define clk_perfcl_clk				0x58869997
+#define clk_sys_apcsaux_clk_gcc			0xf905e862
+#define clk_xo_ao				0x428c856d
+#define clk_osm_clk_src				0xaabe68c3
+#define clk_cpu_debug_mux			0x3ae8bcb2
+
+/* Audio External Clocks */
+#define clk_audio_ap_clk			0x9b5727cb
+#define clk_audio_pmi_clk			0xcbfe416d
+#define clk_audio_ap_clk2			0x454d1e91
+
+/* GCC block resets */
+#define QUSB2PHY_PRIM_BCR			0
+#define QUSB2PHY_SEC_BCR			1
+#define BLSP1_BCR				2
+#define BLSP2_BCR				3
+#define BOOT_ROM_BCR				4
+#define PRNG_BCR				5
+#define UFS_BCR					6
+#define USB_30_BCR				7
+#define USB3_PHY_BCR				8
+#define USB3PHY_PHY_BCR				9
+#define PCIE_0_PHY_BCR				10
+#define PCIE_PHY_BCR				11
+#define PCIE_PHY_COM_BCR			12
+#define PCIE_PHY_NOCSR_COM_PHY_BCR		13
+
+/* MMSS block resets */
+#define CAMSS_MICRO_BCR				0
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/include/dt-bindings/clock/msm-clocks-hwio-8998.h	2019-01-22 16:16:28.171288678 +0100
@@ -0,0 +1,395 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define CLKFLAG_NO_RATE_CACHE					0x00004000
+
+#define FMAX_LOWER						0
+#define FMAX_LOW						1
+#define FMAX_NOM						2
+#define FMAX_TURBO						3
+
+#define HALT_CHECK_DELAY					5
+
+#define RPM_MISC_CLK_TYPE					0x306b6c63
+#define RPM_BUS_CLK_TYPE					0x316b6c63
+#define RPM_MEM_CLK_TYPE					0x326b6c63
+#define RPM_IPA_CLK_TYPE					0x617069
+#define RPM_CE_CLK_TYPE						0x6563
+#define RPM_AGGR_CLK_TYPE					0x72676761
+#define RPM_SMD_KEY_ENABLE					0x62616E45
+#define RPM_MMAXI_CLK_TYPE					0x69786d6d
+
+#define CXO_CLK_SRC_ID						0x0
+#define QDSS_CLK_ID						0x1
+#define SNOC_CLK_ID						0x1
+#define CNOC_CLK_ID						0x2
+#define CNOC_PERIPH_CLK_ID					0x0
+#define BIMC_CLK_ID						0x0
+#define IPA_CLK_ID						0x0
+#define CE1_CLK_ID						0x0
+#define RF_CLK1_ID						0x4
+#define RF_CLK2_ID						0x5
+#define RF_CLK3_ID						0x6
+#define LN_BB_CLK1_ID						0x1
+#define LN_BB_CLK2_ID						0x2
+#define LN_BB_CLK3_ID						0x3
+#define DIV_CLK1_ID						0xb
+#define DIV_CLK2_ID						0xc
+#define DIV_CLK3_ID						0xd
+#define RF_CLK1_PIN_ID						0x4
+#define RF_CLK2_PIN_ID						0x5
+#define RF_CLK3_PIN_ID						0x6
+#define LN_BB_CLK1_PIN_ID					0x1
+#define LN_BB_CLK2_PIN_ID					0x2
+#define LN_BB_CLK3_PIN_ID					0x3
+#define AGGR1_NOC_ID						0x1
+#define AGGR2_NOC_ID						0x2
+#define MMSSNOC_AXI_CLK_ID					0x0
+
+#define APCS_COMMON_LMH_CMD_RCGR				0x0012C
+
+#define GCC_APCS_GPLL_ENA_VOTE					0x52000
+#define GCC_APCS_CLOCK_BRANCH_ENA_VOTE				0x52004
+#define GCC_APCS_CLOCK_BRANCH_ENA_VOTE_1			0x5200C
+#define PLLTEST_PAD_CFG						0x6200C
+#define GCC_XO_DIV4_CBCR				        0x43008
+#define CLOCK_FRQ_MEASURE_CTL				        0x62004
+#define CLOCK_FRQ_MEASURE_STATUS			        0x62008
+#define GCC_GPLL0_MODE						0x00000
+#define GCC_GPLL4_MODE						0x77000
+#define GCC_USB30_MASTER_CMD_RCGR				0x0F014
+#define GCC_PCIE_AUX_CMD_RCGR					0x6C000
+#define GCC_UFS_AXI_CMD_RCGR					0x75018
+#define GCC_BLSP1_QUP1_I2C_APPS_CMD_RCGR			0x19020
+#define GCC_BLSP1_QUP1_SPI_APPS_CMD_RCGR			0x1900C
+#define GCC_BLSP1_QUP2_I2C_APPS_CMD_RCGR			0x1B020
+#define GCC_BLSP1_QUP2_SPI_APPS_CMD_RCGR			0x1B00C
+#define GCC_BLSP1_QUP3_I2C_APPS_CMD_RCGR			0x1D020
+#define GCC_BLSP1_QUP3_SPI_APPS_CMD_RCGR			0x1D00C
+#define GCC_BLSP1_QUP4_I2C_APPS_CMD_RCGR			0x1F020
+#define GCC_BLSP1_QUP4_SPI_APPS_CMD_RCGR			0x1F00C
+#define GCC_BLSP1_QUP5_I2C_APPS_CMD_RCGR			0x21020
+#define GCC_BLSP1_QUP5_SPI_APPS_CMD_RCGR			0x2100C
+#define GCC_BLSP1_QUP6_I2C_APPS_CMD_RCGR			0x23020
+#define GCC_BLSP1_QUP6_SPI_APPS_CMD_RCGR			0x2300C
+#define GCC_BLSP1_UART1_APPS_CMD_RCGR				0x1A00C
+#define GCC_BLSP1_UART2_APPS_CMD_RCGR				0x1C00C
+#define GCC_BLSP1_UART3_APPS_CMD_RCGR				0x1E00C
+#define GCC_BLSP2_QUP1_I2C_APPS_CMD_RCGR			0x26020
+#define GCC_BLSP2_QUP2_I2C_APPS_CMD_RCGR			0x28020
+#define GCC_BLSP2_QUP3_I2C_APPS_CMD_RCGR			0x2A020
+#define GCC_BLSP2_QUP4_I2C_APPS_CMD_RCGR			0x2C020
+#define GCC_BLSP2_QUP5_I2C_APPS_CMD_RCGR			0x2E020
+#define GCC_BLSP2_QUP6_I2C_APPS_CMD_RCGR			0x30020
+#define GCC_BLSP2_QUP1_SPI_APPS_CMD_RCGR			0x2600C
+#define GCC_BLSP2_QUP2_SPI_APPS_CMD_RCGR			0x2800C
+#define GCC_BLSP2_QUP3_SPI_APPS_CMD_RCGR			0x2A00C
+#define GCC_BLSP2_QUP4_SPI_APPS_CMD_RCGR			0x2C00C
+#define GCC_BLSP2_QUP5_SPI_APPS_CMD_RCGR			0x2E00C
+#define GCC_BLSP2_QUP6_SPI_APPS_CMD_RCGR			0x3000C
+#define GCC_BLSP2_UART1_APPS_CMD_RCGR				0x2700C
+#define GCC_BLSP2_UART2_APPS_CMD_RCGR				0x2900C
+#define GCC_BLSP2_UART3_APPS_CMD_RCGR				0x2B00C
+#define GCC_GP1_CMD_RCGR					0x64004
+#define GCC_GP2_CMD_RCGR					0x65004
+#define GCC_GP3_CMD_RCGR					0x66004
+#define GCC_HMSS_RBCPR_CMD_RCGR					0x48044
+#define GCC_PDM2_CMD_RCGR					0x33010
+#define GCC_SDCC2_APPS_CMD_RCGR					0x14010
+#define GCC_SDCC4_APPS_CMD_RCGR					0x16010
+#define GCC_TSIF_REF_CMD_RCGR					0x36010
+#define GCC_UFS_ICE_CORE_CMD_RCGR				0x76010
+#define GCC_UFS_PHY_AUX_CMD_RCGR				0x76044
+#define GCC_UFS_UNIPRO_CORE_CMD_RCGR				0x76028
+#define GCC_USB30_MOCK_UTMI_CMD_RCGR				0x0F028
+#define GCC_USB3_PHY_AUX_CMD_RCGR				0x5000C
+#define GCC_QSPI_REF_CMD_RCGR					0x9000C
+#define GCC_PCIE_0_PHY_BCR					0x6C01C
+#define GCC_HDMI_CLKREF_EN					0x88000
+#define GCC_UFS_CLKREF_EN					0x88004
+#define GCC_USB3_CLKREF_EN					0x88008
+#define GCC_PCIE_CLKREF_EN					0x8800C
+#define GCC_RX1_USB2_CLKREF_EN					0x88014
+#define GCC_USB3_PHY_BCR					0x50020
+#define GCC_AGGRE1_NOC_XO_CBCR					0x8202C
+#define GCC_AGGRE1_UFS_AXI_CBCR					0x82028
+#define GCC_AGGRE1_USB3_AXI_CBCR				0x82024
+#define GCC_BIMC_MSS_Q6_AXI_CBCR				0x4401C
+#define GCC_BLSP1_AHB_CBCR					0x17004
+#define GCC_BLSP1_BCR						0x17000
+#define GCC_BLSP1_QUP1_SPI_APPS_CBCR				0x19004
+#define GCC_BLSP1_QUP1_I2C_APPS_CBCR				0x19008
+#define GCC_BLSP1_QUP2_SPI_APPS_CBCR				0x1B004
+#define GCC_BLSP1_QUP2_I2C_APPS_CBCR				0x1B008
+#define GCC_BLSP1_QUP3_SPI_APPS_CBCR				0x1D004
+#define GCC_BLSP1_QUP3_I2C_APPS_CBCR				0x1D008
+#define GCC_BLSP1_QUP4_SPI_APPS_CBCR				0x1F004
+#define GCC_BLSP1_QUP4_I2C_APPS_CBCR				0x1F008
+#define GCC_BLSP1_QUP5_SPI_APPS_CBCR				0x21004
+#define GCC_BLSP1_QUP5_I2C_APPS_CBCR				0x21008
+#define GCC_BLSP1_QUP6_SPI_APPS_CBCR				0x23004
+#define GCC_BLSP1_QUP6_I2C_APPS_CBCR				0x23008
+#define GCC_BLSP1_UART1_APPS_CBCR				0x1A004
+#define GCC_BLSP1_UART2_APPS_CBCR				0x1C004
+#define GCC_BLSP1_UART3_APPS_CBCR				0x1E004
+#define GCC_BLSP2_AHB_CBCR					0x25004
+#define GCC_BLSP2_BCR						0x25000
+#define GCC_BLSP2_QUP1_SPI_APPS_CBCR				0x26004
+#define GCC_BLSP2_QUP1_I2C_APPS_CBCR				0x26008
+#define GCC_BLSP2_QUP2_I2C_APPS_CBCR				0x28008
+#define GCC_BLSP2_QUP2_SPI_APPS_CBCR				0x28004
+#define GCC_BLSP2_QUP3_SPI_APPS_CBCR				0x2A004
+#define GCC_BLSP2_QUP3_I2C_APPS_CBCR				0x2A008
+#define GCC_BLSP2_QUP4_SPI_APPS_CBCR				0x2C004
+#define GCC_BLSP2_QUP4_I2C_APPS_CBCR				0x2C008
+#define GCC_BLSP2_QUP5_SPI_APPS_CBCR				0x2E004
+#define GCC_BLSP2_QUP5_I2C_APPS_CBCR				0x2E008
+#define GCC_BLSP2_QUP6_SPI_APPS_CBCR				0x30004
+#define GCC_BLSP2_QUP6_I2C_APPS_CBCR				0x30008
+#define GCC_BLSP2_UART1_APPS_CBCR				0x27004
+#define GCC_BLSP2_UART2_APPS_CBCR				0x29004
+#define GCC_BLSP2_UART3_APPS_CBCR				0x2B004
+#define GCC_BOOT_ROM_AHB_CBCR					0x38004
+#define GCC_BOOT_ROM_BCR					0x38000
+#define GCC_CFG_NOC_USB3_AXI_CBCR				0x05018
+#define GCC_BIMC_GFX_CBCR					0x46040
+#define GCC_GP1_CBCR						0x64000
+#define GCC_GP2_CBCR						0x65000
+#define GCC_GP3_CBCR						0x66000
+#define GCC_GPU_BIMC_GFX_CBCR					0x71010
+#define GCC_GPU_CFG_AHB_CBCR					0x71004
+#define GCC_GPU_IREF_EN						0x88010
+#define GCC_HMSS_DVM_BUS_CBCR					0x4808C
+#define GCC_HMSS_RBCPR_CBCR					0x48008
+#define GCC_MMSS_SYS_NOC_AXI_CBCR				0x09000
+#define GCC_MMSS_NOC_CFG_AHB_CBCR				0x09004
+#define GCC_PCIE_0_SLV_AXI_CBCR					0x6B008
+#define GCC_PCIE_0_MSTR_AXI_CBCR				0x6B00C
+#define GCC_PCIE_0_CFG_AHB_CBCR					0x6B010
+#define GCC_PCIE_0_AUX_CBCR					0x6B014
+#define GCC_PCIE_0_PIPE_CBCR					0x6B018
+#define GCC_PCIE_PHY_AUX_CBCR					0x6F004
+#define GCC_PCIE_PHY_BCR					0x6F000
+#define GCC_PCIE_PHY_COM_BCR					0x6F014
+#define GCC_PCIE_PHY_NOCSR_COM_PHY_BCR				0x6F00C
+#define GCC_QUSB2PHY_PRIM_BCR					0x12000
+#define GCC_QUSB2PHY_SEC_BCR					0x12004
+#define GCC_PDM2_CBCR						0x3300C
+#define GCC_PDM_AHB_CBCR					0x33004
+#define GCC_PRNG_AHB_CBCR					0x34004
+#define GCC_PRNG_BCR						0x34000
+#define GCC_SDCC2_APPS_CBCR					0x14004
+#define GCC_SDCC2_AHB_CBCR					0x14008
+#define GCC_SDCC4_APPS_CBCR					0x16004
+#define GCC_SDCC4_AHB_CBCR					0x16008
+#define GCC_TSIF_AHB_CBCR					0x36004
+#define GCC_TSIF_REF_CBCR					0x36008
+#define GCC_UFS_AXI_CBCR					0x75008
+#define GCC_UFS_BCR						0x75000
+#define GCC_UFS_AHB_CBCR					0x7500C
+#define GCC_UFS_TX_SYMBOL_0_CBCR				0x75010
+#define GCC_UFS_RX_SYMBOL_0_CBCR				0x75014
+#define GCC_UFS_RX_SYMBOL_1_CBCR				0x7605C
+#define GCC_UFS_UNIPRO_CORE_CBCR				0x76008
+#define GCC_UFS_ICE_CORE_CBCR					0x7600C
+#define GCC_UFS_PHY_AUX_CBCR					0x76040
+#define GCC_USB30_MASTER_CBCR					0x0F008
+#define GCC_USB30_SLEEP_CBCR					0x0F00C
+#define GCC_USB30_MOCK_UTMI_CBCR				0x0F010
+#define GCC_USB_30_BCR						0x0F000
+#define GCC_USB3_PHY_AUX_CBCR					0x50000
+#define GCC_USB3_PHY_PIPE_CBCR					0x50004
+#define GCC_USB3PHY_PHY_BCR					0x50024
+#define GCC_APCS_CLOCK_SLEEP_ENA_VOTE				0x52008
+#define GCC_MSS_CFG_AHB_CBCR					0x8A000
+#define GCC_MSS_Q6_BIMC_AXI_CBCR				0x8A040
+#define GCC_MSS_MNOC_BIMC_AXI_CBCR				0x8A004
+#define GCC_MSS_SNOC_AXI_CBCR					0x8A03C
+#define GCC_HMSS_GPLL0_CMD_RCGR					0x4805C
+#define GCC_MSS_CFG_AHB_CBCR					0x8A000
+#define GCC_MSS_Q6_BIMC_AXI_CBCR				0x8A040
+#define GCC_MSS_MNOC_BIMC_AXI_CBCR				0x8A004
+#define GCC_MSS_SNOC_AXI_CBCR					0x8A03C
+#define GCC_DCC_AHB_CBCR					0x84004
+#define GCC_HLOS1_VOTE_LPASS_CORE_SMMU_CBCR			0x7D010
+#define GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CBCR			0x7D014
+#define GCC_QSPI_AHB_CBCR					0x90004
+#define GCC_QSPI_REF_CBCR					0x90008
+#define GCC_MMSS_MISC						0x0902C
+#define GCC_GPU_MISC						0x71028
+
+#define GPUCC_GPU_PLL0_PLL_MODE					0x00000
+#define GPUCC_GPU_PLL0_USER_CTL_MODE				0x0000C
+#define GPUCC_GFX3D_CMD_RCGR					0x01070
+#define GPUCC_RBBMTIMER_CMD_RCGR				0x010B0
+#define GPUCC_GFX3D_ISENSE_CMD_RCGR				0x01100
+#define GPUCC_RBCPR_CMD_RCGR					0x01030
+#define GPUCC_GFX3D_CBCR					0x01098
+#define GPUCC_RBBMTIMER_CBCR					0x010D0
+#define GPUCC_GFX3D_ISENSE_CBCR					0x01124
+#define GPUCC_CXO_CBCR						0x01020
+#define GPUCC_RBCPR_CBCR					0x01054
+#define GPU_GX_BCR						0x01090
+#define GPUCC_GX_DOMAIN_MISC					0x00130
+#define GPUCC_GPU_DD_WRAP_CTRL					0x00430
+#define GPUCC_DEBUG_CLK_CTL					0x00120
+
+#define MMSS_PLL_VOTE_APCS					0x001E0
+#define MMSS_MMPLL0_PLL_MODE					0x0C000
+#define MMSS_MMPLL1_PLL_MODE					0x0C050
+#define MMSS_MMPLL3_PLL_MODE					0x00000
+#define MMSS_MMPLL4_PLL_MODE					0x00050
+#define MMSS_MMPLL5_PLL_MODE					0x000A0
+#define MMSS_MMPLL6_PLL_MODE					0x000F0
+#define MMSS_MMPLL7_PLL_MODE					0x00140
+#define MMSS_MMPLL10_PLL_MODE					0x00190
+#define MMSS_AHB_CMD_RCGR					0x05000
+#define MMSS_CSI0_CMD_RCGR					0x03090
+#define MMSS_VFE0_CMD_RCGR					0x03600
+#define MMSS_VFE1_CMD_RCGR					0x03620
+#define MMSS_MDP_CMD_RCGR					0x02040
+#define MMSS_MAXI_CMD_RCGR					0x0F020
+#define MMSS_CPP_CMD_RCGR					0x03640
+#define MMSS_JPEG0_CMD_RCGR					0x03500
+#define MMSS_ROT_CMD_RCGR					0x021A0
+#define MMSS_VIDEO_CORE_CMD_RCGR				0x01000
+#define MMSS_CSI1_CMD_RCGR					0x03100
+#define MMSS_CSI2_CMD_RCGR					0x03160
+#define MMSS_CSI3_CMD_RCGR					0x031C0
+#define MMSS_FD_CORE_CMD_RCGR					0x03B00
+#define MMSS_BYTE0_CMD_RCGR					0x02120
+#define MMSS_BYTE1_CMD_RCGR					0x02140
+#define MMSS_PCLK0_CMD_RCGR					0x02000
+#define MMSS_PCLK1_CMD_RCGR					0x02020
+#define MMSS_VIDEO_SUBCORE0_CMD_RCGR				0x01060
+#define MMSS_VIDEO_SUBCORE1_CMD_RCGR				0x01080
+#define MMSS_CSIPHY_CMD_RCGR					0x03800
+#define MMSS_CCI_CMD_RCGR					0x03300
+#define MMSS_CAMSS_GP0_CMD_RCGR					0x03420
+#define MMSS_CAMSS_GP1_CMD_RCGR					0x03450
+#define MMSS_MCLK0_CMD_RCGR					0x03360
+#define MMSS_MCLK1_CMD_RCGR					0x03390
+#define MMSS_MCLK2_CMD_RCGR					0x033C0
+#define MMSS_MCLK3_CMD_RCGR					0x033F0
+#define MMSS_CAMSS_CSI2PHYTIMER_CBCR				0x03084
+#define MMSS_CSI0PHYTIMER_CMD_RCGR				0x03000
+#define MMSS_CSI1PHYTIMER_CMD_RCGR				0x03030
+#define MMSS_CSI2PHYTIMER_CMD_RCGR				0x03060
+#define MMSS_DP_GTC_CMD_RCGR					0x02280
+#define MMSS_ESC0_CMD_RCGR					0x02160
+#define MMSS_ESC1_CMD_RCGR					0x02180
+#define MMSS_EXTPCLK_CMD_RCGR					0x02060
+#define MMSS_HDMI_CMD_RCGR					0x02100
+#define MMSS_VSYNC_CMD_RCGR					0x02080
+#define MMSS_BIMC_SMMU_AHB_CBCR					0x0E004
+#define MMSS_BIMC_SMMU_AXI_CBCR					0x0E008
+#define MMSS_SNOC_DVM_AXI_CBCR					0x0E040
+#define MMSS_CAMSS_AHB_CBCR					0x0348C
+#define MMSS_CAMSS_CCI_AHB_CBCR					0x03348
+#define MMSS_CAMSS_CCI_CBCR					0x03344
+#define MMSS_CAMSS_CPP_AHB_CBCR					0x036B4
+#define MMSS_CAMSS_CPP_CBCR					0x036B0
+#define MMSS_CAMSS_CPP_AXI_CBCR					0x036C4
+#define MMSS_CAMSS_CPP_VBIF_AHB_CBCR				0x036C8
+#define MMSS_CAMSS_CPHY_CSID0_CBCR				0x03730
+#define MMSS_CAMSS_CSI0_AHB_CBCR				0x030BC
+#define MMSS_CAMSS_CSI0_CBCR					0x030B4
+#define MMSS_CAMSS_CSI0PIX_CBCR					0x030E4
+#define MMSS_CAMSS_CSI0RDI_CBCR					0x030D4
+#define MMSS_CAMSS_CPHY_CSID1_CBCR				0x03734
+#define MMSS_CAMSS_CSI1_AHB_CBCR				0x03128
+#define MMSS_CAMSS_CSI1_CBCR					0x03124
+#define MMSS_CAMSS_CSI1PIX_CBCR					0x03154
+#define MMSS_CAMSS_CSI1RDI_CBCR					0x03144
+#define MMSS_CAMSS_CPHY_CSID2_CBCR				0x03738
+#define MMSS_CAMSS_CSI2_AHB_CBCR				0x03188
+#define MMSS_CAMSS_CSI2_CBCR					0x03184
+#define MMSS_CAMSS_CSI2PIX_CBCR					0x031B4
+#define MMSS_CAMSS_CSI2RDI_CBCR					0x031A4
+#define MMSS_CAMSS_CPHY_CSID3_CBCR				0x0373C
+#define MMSS_CAMSS_CSI3_AHB_CBCR				0x031E8
+#define MMSS_CAMSS_CSI3_CBCR					0x031E4
+#define MMSS_CAMSS_CSI3PIX_CBCR					0x03214
+#define MMSS_CAMSS_CSI3RDI_CBCR					0x03204
+#define MMSS_CAMSS_CSI_VFE0_CBCR				0x03704
+#define MMSS_CAMSS_CSI_VFE1_CBCR				0x03714
+#define MMSS_CAMSS_CSIPHY0_CBCR					0x03740
+#define MMSS_CAMSS_CSIPHY1_CBCR					0x03744
+#define MMSS_CAMSS_CSIPHY2_CBCR					0x03748
+#define MMSS_FD_AHB_CBCR					0x03B74
+#define MMSS_FD_CORE_CBCR					0x03B68
+#define MMSS_FD_CORE_UAR_CBCR					0x03B6C
+#define MMSS_CAMSS_GP0_CBCR					0x03444
+#define MMSS_CAMSS_GP1_CBCR					0x03474
+#define MMSS_CAMSS_ISPIF_AHB_CBCR				0x03224
+#define MMSS_CAMSS_JPEG0_CBCR					0x035A8
+#define MMSS_CAMSS_JPEG_AHB_CBCR				0x035B4
+#define MMSS_CAMSS_JPEG_AXI_CBCR				0x035B8
+#define MMSS_CAMSS_MCLK0_CBCR					0x03384
+#define MMSS_CAMSS_MCLK1_CBCR					0x033B4
+#define MMSS_CAMSS_MCLK2_CBCR					0x033E4
+#define MMSS_CAMSS_MCLK3_CBCR					0x03414
+#define MMSS_CAMSS_MICRO_AHB_CBCR				0x03494
+#define MMSS_CAMSS_CSI0PHYTIMER_CBCR				0x03024
+#define MMSS_CAMSS_CSI1PHYTIMER_CBCR				0x03054
+#define MMSS_CSI2PHYTIMER_CMD_RCGR				0x03060
+#define MMSS_CAMSS_TOP_AHB_CBCR					0x03484
+#define MMSS_CAMSS_VFE0_AHB_CBCR				0x03668
+#define MMSS_CAMSS_VFE0_CBCR					0x036A8
+#define MMSS_CAMSS_VFE0_STREAM_CBCR				0x03720
+#define MMSS_CAMSS_VFE1_AHB_CBCR				0x03678
+#define MMSS_CAMSS_VFE1_CBCR					0x036AC
+#define MMSS_CAMSS_VFE1_STREAM_CBCR				0x03724
+#define MMSS_CAMSS_VFE_VBIF_AHB_CBCR				0x036B8
+#define MMSS_CAMSS_VFE_VBIF_AXI_CBCR				0x036BC
+#define MMSS_MDSS_AHB_CBCR					0x02308
+#define MMSS_MDSS_AXI_CBCR					0x02310
+#define MMSS_MDSS_BYTE0_CBCR					0x0233C
+#define MMSS_MDSS_BYTE0_INTF_CBCR				0x02374
+#define MMSS_MDSS_BYTE0_INTF_DIV				0x0237C
+#define MMSS_MDSS_BYTE1_CBCR					0x02340
+#define MMSS_MDSS_BYTE1_INTF_CBCR				0x02378
+#define MMSS_MDSS_BYTE1_INTF_DIV				0x02380
+#define MMSS_MDSS_DP_AUX_CBCR					0x02364
+#define MMSS_MDSS_DP_CRYPTO_CBCR				0x0235C
+#define MMSS_MDSS_DP_GTC_CBCR					0x02368
+#define MMSS_MDSS_DP_LINK_CBCR					0x02354
+#define MMSS_MDSS_DP_LINK_INTF_CBCR				0x02358
+#define MMSS_MDSS_DP_PIXEL_CBCR					0x02360
+#define MMSS_MDSS_ESC0_CBCR					0x02344
+#define MMSS_MDSS_ESC1_CBCR					0x02348
+#define MMSS_MDSS_EXTPCLK_CBCR					0x02324
+#define MMSS_MDSS_HDMI_CBCR					0x02338
+#define MMSS_MDSS_HDMI_DP_AHB_CBCR				0x0230C
+#define MMSS_MDSS_MDP_CBCR					0x0231C
+#define MMSS_MDSS_MDP_LUT_CBCR					0x02320
+#define MMSS_MDSS_PCLK0_CBCR					0x02314
+#define MMSS_MDSS_PCLK1_CBCR					0x02318
+#define MMSS_MDSS_ROT_CBCR					0x02350
+#define MMSS_MDSS_VSYNC_CBCR					0x02328
+#define MMSS_MISC_AHB_CBCR					0x00328
+#define MMSS_MISC_CXO_CBCR					0x00324
+#define MMSS_MNOC_AHB_CBCR					0x05024
+#define MMSS_MNOC_MAXI_CBCR					0x0F004
+#define MMSS_VIDEO_SUBCORE0_CBCR				0x01048
+#define MMSS_VIDEO_SUBCORE1_CBCR				0x0104C
+#define MMSS_VIDEO_AHB_CBCR					0x01030
+#define MMSS_VIDEO_AXI_CBCR					0x01034
+#define MMSS_VIDEO_CORE_CBCR					0x01028
+#define MMSS_VIDEO_MAXI_CBCR					0x01038
+#define MMSS_VMEM_AHB_CBCR					0x0F068
+#define MMSS_VMEM_MAXI_CBCR					0x0F064
+#define MMSS_DP_AUX_CMD_RCGR					0x02260
+#define MMSS_DP_CRYPTO_CMD_RCGR					0x02220
+#define MMSS_DP_LINK_CMD_RCGR					0x02200
+#define MMSS_DP_PIXEL_CMD_RCGR					0x02240
+#define MMSS_DEBUG_CLK_CTL					0x00900
diff -Nruw linux-4.4.115-fbx/arch/arm64/boot/dts/include/dt-bindings/msm./msm-bus-ids.h linux-4.4.115-fbx/arch/arm64/boot/dts/include/dt-bindings/msm/msm-bus-ids.h
--- linux-4.4.115-fbx/arch/arm64/boot/dts/include/dt-bindings/msm./msm-bus-ids.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/include/dt-bindings/msm/msm-bus-ids.h	2019-01-22 16:16:28.179288750 +0100
@@ -0,0 +1,887 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_BUS_IDS_H
+#define __MSM_BUS_IDS_H
+
+/* Aggregation types */
+#define AGG_SCHEME_NONE	0
+#define AGG_SCHEME_LEG	1
+#define AGG_SCHEME_1	2
+
+/* Topology related enums */
+#define	MSM_BUS_FAB_DEFAULT 0
+#define	MSM_BUS_FAB_APPSS 0
+#define	MSM_BUS_FAB_SYSTEM 1024
+#define	MSM_BUS_FAB_MMSS 2048
+#define	MSM_BUS_FAB_SYSTEM_FPB 3072
+#define	MSM_BUS_FAB_CPSS_FPB 4096
+
+#define	MSM_BUS_FAB_BIMC 0
+#define	MSM_BUS_FAB_SYS_NOC 1024
+#define	MSM_BUS_FAB_MMSS_NOC 2048
+#define	MSM_BUS_FAB_OCMEM_NOC 3072
+#define	MSM_BUS_FAB_PERIPH_NOC 4096
+#define	MSM_BUS_FAB_CONFIG_NOC 5120
+#define	MSM_BUS_FAB_OCMEM_VNOC 6144
+#define	MSM_BUS_FAB_MMSS_AHB 2049
+#define	MSM_BUS_FAB_A0_NOC 6145
+#define	MSM_BUS_FAB_A1_NOC 6146
+#define	MSM_BUS_FAB_A2_NOC 6147
+#define	MSM_BUS_FAB_GNOC 6148
+#define	MSM_BUS_FAB_CR_VIRT 6149
+
+#define	MSM_BUS_MASTER_FIRST 1
+#define	MSM_BUS_MASTER_AMPSS_M0 1
+#define	MSM_BUS_MASTER_AMPSS_M1 2
+#define	MSM_BUS_APPSS_MASTER_FAB_MMSS 3
+#define	MSM_BUS_APPSS_MASTER_FAB_SYSTEM 4
+#define	MSM_BUS_SYSTEM_MASTER_FAB_APPSS 5
+#define	MSM_BUS_MASTER_SPS 6
+#define	MSM_BUS_MASTER_ADM_PORT0 7
+#define	MSM_BUS_MASTER_ADM_PORT1 8
+#define	MSM_BUS_SYSTEM_MASTER_ADM1_PORT0 9
+#define	MSM_BUS_MASTER_ADM1_PORT1 10
+#define	MSM_BUS_MASTER_LPASS_PROC 11
+#define	MSM_BUS_MASTER_MSS_PROCI 12
+#define	MSM_BUS_MASTER_MSS_PROCD 13
+#define	MSM_BUS_MASTER_MSS_MDM_PORT0 14
+#define	MSM_BUS_MASTER_LPASS 15
+#define	MSM_BUS_SYSTEM_MASTER_CPSS_FPB 16
+#define	MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB 17
+#define	MSM_BUS_SYSTEM_MASTER_MMSS_FPB 18
+#define	MSM_BUS_MASTER_ADM1_CI 19
+#define	MSM_BUS_MASTER_ADM0_CI 20
+#define	MSM_BUS_MASTER_MSS_MDM_PORT1 21
+#define	MSM_BUS_MASTER_MDP_PORT0 22
+#define	MSM_BUS_MASTER_MDP_PORT1 23
+#define	MSM_BUS_MMSS_MASTER_ADM1_PORT0 24
+#define	MSM_BUS_MASTER_ROTATOR 25
+#define	MSM_BUS_MASTER_GRAPHICS_3D 26
+#define	MSM_BUS_MASTER_JPEG_DEC 27
+#define	MSM_BUS_MASTER_GRAPHICS_2D_CORE0 28
+#define	MSM_BUS_MASTER_VFE 29
+#define	MSM_BUS_MASTER_VFE0 MSM_BUS_MASTER_VFE
+#define	MSM_BUS_MASTER_VPE 30
+#define	MSM_BUS_MASTER_JPEG_ENC 31
+#define	MSM_BUS_MASTER_GRAPHICS_2D_CORE1 32
+#define	MSM_BUS_MMSS_MASTER_APPS_FAB 33
+#define	MSM_BUS_MASTER_HD_CODEC_PORT0 34
+#define	MSM_BUS_MASTER_HD_CODEC_PORT1 35
+#define	MSM_BUS_MASTER_SPDM 36
+#define	MSM_BUS_MASTER_RPM 37
+#define	MSM_BUS_MASTER_MSS 38
+#define	MSM_BUS_MASTER_RIVA 39
+#define	MSM_BUS_MASTER_SNOC_VMEM 40
+#define	MSM_BUS_MASTER_MSS_SW_PROC 41
+#define	MSM_BUS_MASTER_MSS_FW_PROC 42
+#define	MSM_BUS_MASTER_HMSS 43
+#define	MSM_BUS_MASTER_GSS_NAV 44
+#define	MSM_BUS_MASTER_PCIE 45
+#define	MSM_BUS_MASTER_SATA 46
+#define	MSM_BUS_MASTER_CRYPTO 47
+#define	MSM_BUS_MASTER_VIDEO_CAP 48
+#define	MSM_BUS_MASTER_GRAPHICS_3D_PORT1 49
+#define	MSM_BUS_MASTER_VIDEO_ENC 50
+#define	MSM_BUS_MASTER_VIDEO_DEC 51
+#define	MSM_BUS_MASTER_LPASS_AHB 52
+#define	MSM_BUS_MASTER_QDSS_BAM 53
+#define	MSM_BUS_MASTER_SNOC_CFG 54
+#define	MSM_BUS_MASTER_CRYPTO_CORE0 55
+#define	MSM_BUS_MASTER_CRYPTO_CORE1 56
+#define	MSM_BUS_MASTER_MSS_NAV 57
+#define	MSM_BUS_MASTER_OCMEM_DMA 58
+#define	MSM_BUS_MASTER_WCSS 59
+#define	MSM_BUS_MASTER_QDSS_ETR 60
+#define	MSM_BUS_MASTER_USB3 61
+#define	MSM_BUS_MASTER_JPEG 62
+#define	MSM_BUS_MASTER_VIDEO_P0 63
+#define	MSM_BUS_MASTER_VIDEO_P1 64
+#define	MSM_BUS_MASTER_MSS_PROC 65
+#define	MSM_BUS_MASTER_JPEG_OCMEM 66
+#define	MSM_BUS_MASTER_MDP_OCMEM 67
+#define	MSM_BUS_MASTER_VIDEO_P0_OCMEM 68
+#define	MSM_BUS_MASTER_VIDEO_P1_OCMEM 69
+#define	MSM_BUS_MASTER_VFE_OCMEM 70
+#define	MSM_BUS_MASTER_CNOC_ONOC_CFG 71
+#define	MSM_BUS_MASTER_RPM_INST 72
+#define	MSM_BUS_MASTER_RPM_DATA 73
+#define	MSM_BUS_MASTER_RPM_SYS 74
+#define	MSM_BUS_MASTER_DEHR 75
+#define	MSM_BUS_MASTER_QDSS_DAP 76
+#define	MSM_BUS_MASTER_TIC 77
+#define	MSM_BUS_MASTER_SDCC_1 78
+#define	MSM_BUS_MASTER_SDCC_3 79
+#define	MSM_BUS_MASTER_SDCC_4 80
+#define	MSM_BUS_MASTER_SDCC_2 81
+#define	MSM_BUS_MASTER_TSIF 82
+#define	MSM_BUS_MASTER_BAM_DMA 83
+#define	MSM_BUS_MASTER_BLSP_2 84
+#define	MSM_BUS_MASTER_USB_HSIC 85
+#define	MSM_BUS_MASTER_BLSP_1 86
+#define	MSM_BUS_MASTER_USB_HS 87
+#define	MSM_BUS_MASTER_PNOC_CFG 88
+#define	MSM_BUS_MASTER_V_OCMEM_GFX3D 89
+#define	MSM_BUS_MASTER_IPA 90
+#define	MSM_BUS_MASTER_QPIC 91
+#define	MSM_BUS_MASTER_MDPE 92
+#define	MSM_BUS_MASTER_USB_HS2 93
+#define	MSM_BUS_MASTER_VPU 94
+#define	MSM_BUS_MASTER_UFS 95
+#define	MSM_BUS_MASTER_BCAST 96
+#define	MSM_BUS_MASTER_CRYPTO_CORE2 97
+#define	MSM_BUS_MASTER_EMAC 98
+#define	MSM_BUS_MASTER_VPU_1 99
+#define	MSM_BUS_MASTER_PCIE_1 100
+#define	MSM_BUS_MASTER_USB3_1 101
+#define	MSM_BUS_MASTER_CNOC_MNOC_MMSS_CFG 102
+#define	MSM_BUS_MASTER_CNOC_MNOC_CFG 103
+#define	MSM_BUS_MASTER_TCU_0 104
+#define	MSM_BUS_MASTER_TCU_1 105
+#define	MSM_BUS_MASTER_CPP 106
+#define	MSM_BUS_MASTER_AUDIO 107
+#define	MSM_BUS_MASTER_PCIE_2 108
+#define	MSM_BUS_MASTER_VFE1 109
+#define	MSM_BUS_MASTER_XM_USB_HS1 110
+#define	MSM_BUS_MASTER_PCNOC_BIMC_1 111
+#define	MSM_BUS_MASTER_BIMC_PCNOC   112
+#define	MSM_BUS_MASTER_XI_USB_HSIC  113
+#define	MSM_BUS_MASTER_SGMII	    114
+#define	MSM_BUS_SPMI_FETCHER 115
+#define	MSM_BUS_MASTER_GNOC_BIMC 116
+#define	MSM_BUS_MASTER_CRVIRT_A2NOC 117
+#define	MSM_BUS_MASTER_CNOC_A2NOC 118
+#define	MSM_BUS_MASTER_WLAN 119
+#define	MSM_BUS_MASTER_MSS_CE 120
+#define	MSM_BUS_MASTER_CDSP_PROC 121
+#define	MSM_BUS_MASTER_GNOC_SNOC 122
+#define	MSM_BUS_MASTER_PIMEM 123
+#define	MSM_BUS_MASTER_MASTER_LAST 124
+
+#define	MSM_BUS_SYSTEM_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB
+#define	MSM_BUS_CPSS_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_CPSS_FPB
+
+#define	MSM_BUS_SNOC_MM_INT_0 10000
+#define	MSM_BUS_SNOC_MM_INT_1 10001
+#define	MSM_BUS_SNOC_MM_INT_2 10002
+#define	MSM_BUS_SNOC_MM_INT_BIMC 10003
+#define	MSM_BUS_SNOC_INT_0 10004
+#define	MSM_BUS_SNOC_INT_1 10005
+#define	MSM_BUS_SNOC_INT_BIMC 10006
+#define	MSM_BUS_SNOC_BIMC_0_MAS 10007
+#define	MSM_BUS_SNOC_BIMC_1_MAS 10008
+#define	MSM_BUS_SNOC_QDSS_INT 10009
+#define	MSM_BUS_PNOC_SNOC_MAS 10010
+#define	MSM_BUS_PNOC_SNOC_SLV 10011
+#define	MSM_BUS_PNOC_INT_0 10012
+#define	MSM_BUS_PNOC_INT_1 10013
+#define	MSM_BUS_PNOC_M_0 10014
+#define	MSM_BUS_PNOC_M_1 10015
+#define	MSM_BUS_BIMC_SNOC_MAS 10016
+#define	MSM_BUS_BIMC_SNOC_SLV 10017
+#define	MSM_BUS_PNOC_SLV_0 10018
+#define	MSM_BUS_PNOC_SLV_1 10019
+#define	MSM_BUS_PNOC_SLV_2 10020
+#define	MSM_BUS_PNOC_SLV_3 10021
+#define	MSM_BUS_PNOC_SLV_4 10022
+#define	MSM_BUS_PNOC_SLV_8 10023
+#define	MSM_BUS_PNOC_SLV_9 10024
+#define	MSM_BUS_SNOC_BIMC_0_SLV 10025
+#define	MSM_BUS_SNOC_BIMC_1_SLV 10026
+#define	MSM_BUS_MNOC_BIMC_MAS 10027
+#define	MSM_BUS_MNOC_BIMC_SLV 10028
+#define	MSM_BUS_BIMC_MNOC_MAS 10029
+#define	MSM_BUS_BIMC_MNOC_SLV 10030
+#define	MSM_BUS_SNOC_BIMC_MAS 10031
+#define	MSM_BUS_SNOC_BIMC_SLV 10032
+#define	MSM_BUS_CNOC_SNOC_MAS 10033
+#define	MSM_BUS_CNOC_SNOC_SLV 10034
+#define	MSM_BUS_SNOC_CNOC_MAS 10035
+#define	MSM_BUS_SNOC_CNOC_SLV 10036
+#define	MSM_BUS_OVNOC_SNOC_MAS 10037
+#define	MSM_BUS_OVNOC_SNOC_SLV 10038
+#define	MSM_BUS_SNOC_OVNOC_MAS 10039
+#define	MSM_BUS_SNOC_OVNOC_SLV 10040
+#define	MSM_BUS_SNOC_PNOC_MAS 10041
+#define	MSM_BUS_SNOC_PNOC_SLV 10042
+#define	MSM_BUS_BIMC_INT_APPS_EBI 10043
+#define	MSM_BUS_BIMC_INT_APPS_SNOC 10044
+#define	MSM_BUS_SNOC_BIMC_2_MAS 10045
+#define	MSM_BUS_SNOC_BIMC_2_SLV 10046
+#define	MSM_BUS_PNOC_SLV_5	10047
+#define	MSM_BUS_PNOC_SLV_7	10048
+#define	MSM_BUS_PNOC_INT_2 10049
+#define	MSM_BUS_PNOC_INT_3 10050
+#define	MSM_BUS_PNOC_INT_4 10051
+#define	MSM_BUS_PNOC_INT_5 10052
+#define	MSM_BUS_PNOC_INT_6 10053
+#define	MSM_BUS_PNOC_INT_7 10054
+#define	MSM_BUS_BIMC_SNOC_1_MAS 10055
+#define	MSM_BUS_BIMC_SNOC_1_SLV 10056
+#define	MSM_BUS_PNOC_A1NOC_MAS 10057
+#define	MSM_BUS_PNOC_A1NOC_SLV 10058
+#define	MSM_BUS_CNOC_A1NOC_MAS 10059
+#define	MSM_BUS_A0NOC_SNOC_MAS 10060
+#define	MSM_BUS_A0NOC_SNOC_SLV 10061
+#define	MSM_BUS_A1NOC_SNOC_SLV 10062
+#define	MSM_BUS_A1NOC_SNOC_MAS 10063
+#define	MSM_BUS_A2NOC_SNOC_MAS 10064
+#define	MSM_BUS_A2NOC_SNOC_SLV 10065
+#define	MSM_BUS_SNOC_INT_2 10066
+#define	MSM_BUS_A0NOC_QDSS_INT	10067
+#define	MSM_BUS_INT_LAST 10068
+
+#define	MSM_BUS_INT_TEST_ID	20000
+#define	MSM_BUS_INT_TEST_LAST	20050
+
+#define	MSM_BUS_SLAVE_FIRST 512
+#define	MSM_BUS_SLAVE_EBI_CH0 512
+#define	MSM_BUS_SLAVE_EBI_CH1 513
+#define	MSM_BUS_SLAVE_AMPSS_L2 514
+#define	MSM_BUS_APPSS_SLAVE_FAB_MMSS 515
+#define	MSM_BUS_APPSS_SLAVE_FAB_SYSTEM 516
+#define	MSM_BUS_SYSTEM_SLAVE_FAB_APPS 517
+#define	MSM_BUS_SLAVE_SPS 518
+#define	MSM_BUS_SLAVE_SYSTEM_IMEM 519
+#define	MSM_BUS_SLAVE_AMPSS 520
+#define	MSM_BUS_SLAVE_MSS 521
+#define	MSM_BUS_SLAVE_LPASS 522
+#define	MSM_BUS_SYSTEM_SLAVE_CPSS_FPB 523
+#define	MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB 524
+#define	MSM_BUS_SYSTEM_SLAVE_MMSS_FPB 525
+#define	MSM_BUS_SLAVE_CORESIGHT 526
+#define	MSM_BUS_SLAVE_RIVA 527
+#define	MSM_BUS_SLAVE_SMI 528
+#define	MSM_BUS_MMSS_SLAVE_FAB_APPS 529
+#define	MSM_BUS_MMSS_SLAVE_FAB_APPS_1 530
+#define	MSM_BUS_SLAVE_MM_IMEM 531
+#define	MSM_BUS_SLAVE_CRYPTO 532
+#define	MSM_BUS_SLAVE_SPDM 533
+#define	MSM_BUS_SLAVE_RPM 534
+#define	MSM_BUS_SLAVE_RPM_MSG_RAM 535
+#define	MSM_BUS_SLAVE_MPM 536
+#define	MSM_BUS_SLAVE_PMIC1_SSBI1_A 537
+#define	MSM_BUS_SLAVE_PMIC1_SSBI1_B 538
+#define	MSM_BUS_SLAVE_PMIC1_SSBI1_C 539
+#define	MSM_BUS_SLAVE_PMIC2_SSBI2_A 540
+#define	MSM_BUS_SLAVE_PMIC2_SSBI2_B 541
+#define	MSM_BUS_SLAVE_GSBI1_UART 542
+#define	MSM_BUS_SLAVE_GSBI2_UART 543
+#define	MSM_BUS_SLAVE_GSBI3_UART 544
+#define	MSM_BUS_SLAVE_GSBI4_UART 545
+#define	MSM_BUS_SLAVE_GSBI5_UART 546
+#define	MSM_BUS_SLAVE_GSBI6_UART 547
+#define	MSM_BUS_SLAVE_GSBI7_UART 548
+#define	MSM_BUS_SLAVE_GSBI8_UART 549
+#define	MSM_BUS_SLAVE_GSBI9_UART 550
+#define	MSM_BUS_SLAVE_GSBI10_UART 551
+#define	MSM_BUS_SLAVE_GSBI11_UART 552
+#define	MSM_BUS_SLAVE_GSBI12_UART 553
+#define	MSM_BUS_SLAVE_GSBI1_QUP 554
+#define	MSM_BUS_SLAVE_GSBI2_QUP 555
+#define	MSM_BUS_SLAVE_GSBI3_QUP 556
+#define	MSM_BUS_SLAVE_GSBI4_QUP 557
+#define	MSM_BUS_SLAVE_GSBI5_QUP 558
+#define	MSM_BUS_SLAVE_GSBI6_QUP 559
+#define	MSM_BUS_SLAVE_GSBI7_QUP 560
+#define	MSM_BUS_SLAVE_GSBI8_QUP 561
+#define	MSM_BUS_SLAVE_GSBI9_QUP 562
+#define	MSM_BUS_SLAVE_GSBI10_QUP 563
+#define	MSM_BUS_SLAVE_GSBI11_QUP 564
+#define	MSM_BUS_SLAVE_GSBI12_QUP 565
+#define	MSM_BUS_SLAVE_EBI2_NAND 566
+#define	MSM_BUS_SLAVE_EBI2_CS0 567
+#define	MSM_BUS_SLAVE_EBI2_CS1 568
+#define	MSM_BUS_SLAVE_EBI2_CS2 569
+#define	MSM_BUS_SLAVE_EBI2_CS3 570
+#define	MSM_BUS_SLAVE_EBI2_CS4 571
+#define	MSM_BUS_SLAVE_EBI2_CS5 572
+#define	MSM_BUS_SLAVE_USB_FS1 573
+#define	MSM_BUS_SLAVE_USB_FS2 574
+#define	MSM_BUS_SLAVE_TSIF 575
+#define	MSM_BUS_SLAVE_MSM_TSSC 576
+#define	MSM_BUS_SLAVE_MSM_PDM 577
+#define	MSM_BUS_SLAVE_MSM_DIMEM 578
+#define	MSM_BUS_SLAVE_MSM_TCSR 579
+#define	MSM_BUS_SLAVE_MSM_PRNG 580
+#define	MSM_BUS_SLAVE_GSS 581
+#define	MSM_BUS_SLAVE_SATA 582
+#define	MSM_BUS_SLAVE_USB3 583
+#define	MSM_BUS_SLAVE_WCSS 584
+#define	MSM_BUS_SLAVE_OCIMEM 585
+#define	MSM_BUS_SLAVE_SNOC_OCMEM 586
+#define	MSM_BUS_SLAVE_SERVICE_SNOC 587
+#define	MSM_BUS_SLAVE_QDSS_STM 588
+#define	MSM_BUS_SLAVE_CAMERA_CFG 589
+#define	MSM_BUS_SLAVE_DISPLAY_CFG 590
+#define	MSM_BUS_SLAVE_OCMEM_CFG 591
+#define	MSM_BUS_SLAVE_CPR_CFG 592
+#define	MSM_BUS_SLAVE_CPR_XPU_CFG 593
+#define	MSM_BUS_SLAVE_MISC_CFG 594
+#define	MSM_BUS_SLAVE_MISC_XPU_CFG 595
+#define	MSM_BUS_SLAVE_VENUS_CFG 596
+#define	MSM_BUS_SLAVE_MISC_VENUS_CFG 597
+#define	MSM_BUS_SLAVE_GRAPHICS_3D_CFG 598
+#define	MSM_BUS_SLAVE_MMSS_CLK_CFG 599
+#define	MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG 600
+#define	MSM_BUS_SLAVE_MNOC_MPU_CFG 601
+#define	MSM_BUS_SLAVE_ONOC_MPU_CFG 602
+#define	MSM_BUS_SLAVE_SERVICE_MNOC 603
+#define	MSM_BUS_SLAVE_OCMEM 604
+#define	MSM_BUS_SLAVE_SERVICE_ONOC 605
+#define	MSM_BUS_SLAVE_SDCC_1 606
+#define	MSM_BUS_SLAVE_SDCC_3 607
+#define	MSM_BUS_SLAVE_SDCC_2 608
+#define	MSM_BUS_SLAVE_SDCC_4 609
+#define	MSM_BUS_SLAVE_BAM_DMA 610
+#define	MSM_BUS_SLAVE_BLSP_2 611
+#define	MSM_BUS_SLAVE_USB_HSIC 612
+#define	MSM_BUS_SLAVE_BLSP_1 613
+#define	MSM_BUS_SLAVE_USB_HS 614
+#define	MSM_BUS_SLAVE_PDM 615
+#define	MSM_BUS_SLAVE_PERIPH_APU_CFG 616
+#define	MSM_BUS_SLAVE_PNOC_MPU_CFG 617
+#define	MSM_BUS_SLAVE_PRNG 618
+#define	MSM_BUS_SLAVE_SERVICE_PNOC 619
+#define	MSM_BUS_SLAVE_CLK_CTL 620
+#define	MSM_BUS_SLAVE_CNOC_MSS 621
+#define	MSM_BUS_SLAVE_SECURITY 622
+#define	MSM_BUS_SLAVE_TCSR 623
+#define	MSM_BUS_SLAVE_TLMM 624
+#define	MSM_BUS_SLAVE_CRYPTO_0_CFG 625
+#define	MSM_BUS_SLAVE_CRYPTO_1_CFG 626
+#define	MSM_BUS_SLAVE_IMEM_CFG 627
+#define	MSM_BUS_SLAVE_MESSAGE_RAM 628
+#define	MSM_BUS_SLAVE_BIMC_CFG 629
+#define	MSM_BUS_SLAVE_BOOT_ROM 630
+#define	MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG 631
+#define	MSM_BUS_SLAVE_PMIC_ARB 632
+#define	MSM_BUS_SLAVE_SPDM_WRAPPER 633
+#define	MSM_BUS_SLAVE_DEHR_CFG 634
+#define	MSM_BUS_SLAVE_QDSS_CFG 635
+#define	MSM_BUS_SLAVE_RBCPR_CFG 636
+#define	MSM_BUS_SLAVE_RBCPR_QDSS_APU_CFG 637
+#define	MSM_BUS_SLAVE_SNOC_MPU_CFG 638
+#define	MSM_BUS_SLAVE_CNOC_ONOC_CFG 639
+#define	MSM_BUS_SLAVE_CNOC_MNOC_CFG 640
+#define	MSM_BUS_SLAVE_PNOC_CFG 641
+#define	MSM_BUS_SLAVE_SNOC_CFG 642
+#define	MSM_BUS_SLAVE_EBI1_DLL_CFG 643
+#define	MSM_BUS_SLAVE_PHY_APU_CFG 644
+#define	MSM_BUS_SLAVE_EBI1_PHY_CFG 645
+#define	MSM_BUS_SLAVE_SERVICE_CNOC 646
+#define	MSM_BUS_SLAVE_IPS_CFG 647
+#define	MSM_BUS_SLAVE_QPIC 648
+#define	MSM_BUS_SLAVE_DSI_CFG 649
+#define	MSM_BUS_SLAVE_UFS_CFG 650
+#define	MSM_BUS_SLAVE_RBCPR_CX_CFG 651
+#define	MSM_BUS_SLAVE_RBCPR_MX_CFG 652
+#define	MSM_BUS_SLAVE_PCIE_CFG 653
+#define	MSM_BUS_SLAVE_USB_PHYS_CFG 654
+#define	MSM_BUS_SLAVE_VIDEO_CAP_CFG 655
+#define	MSM_BUS_SLAVE_AVSYNC_CFG 656
+#define	MSM_BUS_SLAVE_CRYPTO_2_CFG 657
+#define	MSM_BUS_SLAVE_VPU_CFG 658
+#define	MSM_BUS_SLAVE_BCAST_CFG 659
+#define	MSM_BUS_SLAVE_KLM_CFG 660
+#define	MSM_BUS_SLAVE_GENI_IR_CFG 661
+#define	MSM_BUS_SLAVE_OCMEM_GFX 662
+#define	MSM_BUS_SLAVE_CATS_128 663
+#define	MSM_BUS_SLAVE_OCMEM_64 664
+#define	MSM_BUS_SLAVE_PCIE_0 665
+#define	MSM_BUS_SLAVE_PCIE_1 666
+#define	MSM_BUS_SLAVE_PCIE_0_CFG 667
+#define	MSM_BUS_SLAVE_PCIE_1_CFG 668
+#define	MSM_BUS_SLAVE_SRVC_MNOC 669
+#define	MSM_BUS_SLAVE_USB_HS2 670
+#define	MSM_BUS_SLAVE_AUDIO 671
+#define	MSM_BUS_SLAVE_TCU 672
+#define	MSM_BUS_SLAVE_APPSS 673
+#define	MSM_BUS_SLAVE_PCIE_PARF 674
+#define	MSM_BUS_SLAVE_USB3_PHY_CFG 675
+#define	MSM_BUS_SLAVE_IPA_CFG 676
+#define	MSM_BUS_SLAVE_A0NOC_SNOC 677
+#define	MSM_BUS_SLAVE_A1NOC_SNOC 678
+#define	MSM_BUS_SLAVE_A2NOC_SNOC 679
+#define	MSM_BUS_SLAVE_HMSS_L3 680
+#define	MSM_BUS_SLAVE_PIMEM_CFG 681
+#define	MSM_BUS_SLAVE_DCC_CFG 682
+#define	MSM_BUS_SLAVE_QDSS_RBCPR_APU_CFG 683
+#define	MSM_BUS_SLAVE_PCIE_2_CFG 684
+#define	MSM_BUS_SLAVE_PCIE20_AHB2PHY 685
+#define	MSM_BUS_SLAVE_A0NOC_CFG 686
+#define	MSM_BUS_SLAVE_A1NOC_CFG 687
+#define	MSM_BUS_SLAVE_A2NOC_CFG 688
+#define	MSM_BUS_SLAVE_A1NOC_MPU_CFG 689
+#define	MSM_BUS_SLAVE_A2NOC_MPU_CFG 690
+#define	MSM_BUS_SLAVE_A0NOC_SMMU_CFG 691
+#define	MSM_BUS_SLAVE_A1NOC_SMMU_CFG 692
+#define	MSM_BUS_SLAVE_A2NOC_SMMU_CFG 693
+#define	MSM_BUS_SLAVE_LPASS_SMMU_CFG 694
+#define	MSM_BUS_SLAVE_MMAGIC_CFG 695
+#define	MSM_BUS_SLAVE_VENUS_THROTTLE_CFG 696
+#define	MSM_BUS_SLAVE_SSC_CFG 697
+#define	MSM_BUS_SLAVE_DSA_CFG 698
+#define	MSM_BUS_SLAVE_DSA_MPU_CFG 699
+#define	MSM_BUS_SLAVE_DISPLAY_THROTTLE_CFG 700
+#define	MSM_BUS_SLAVE_SMMU_CPP_CFG 701
+#define	MSM_BUS_SLAVE_SMMU_JPEG_CFG 702
+#define	MSM_BUS_SLAVE_SMMU_MDP_CFG 703
+#define	MSM_BUS_SLAVE_SMMU_ROTATOR_CFG 704
+#define	MSM_BUS_SLAVE_SMMU_VENUS_CFG 705
+#define	MSM_BUS_SLAVE_SMMU_VFE_CFG 706
+#define	MSM_BUS_SLAVE_A0NOC_MPU_CFG 707
+#define	MSM_BUS_SLAVE_VMEM_CFG 708
+#define	MSM_BUS_SLAVE_CAMERA_THROTTLE_CFG 709
+#define	MSM_BUS_SLAVE_VMEM 710
+#define	MSM_BUS_SLAVE_AHB2PHY 711
+#define	MSM_BUS_SLAVE_PIMEM 712
+#define	MSM_BUS_SLAVE_SNOC_VMEM 713
+#define	MSM_BUS_SLAVE_PCIE_2 714
+#define	MSM_BUS_SLAVE_RBCPR_MX 715
+#define	MSM_BUS_SLAVE_RBCPR_CX 716
+#define	MSM_BUS_SLAVE_BIMC_PCNOC 717
+#define	MSM_BUS_SLAVE_PCNOC_BIMC_1 718
+#define	MSM_BUS_SLAVE_SGMII 719
+#define	MSM_BUS_SLAVE_SPMI_FETCHER 720
+#define	MSM_BUS_PNOC_SLV_6 721
+#define	MSM_BUS_SLAVE_MMSS_SMMU_CFG 722
+#define	MSM_BUS_SLAVE_WLAN 723
+#define	MSM_BUS_SLAVE_CRVIRT_A2NOC 724
+#define	MSM_BUS_SLAVE_CNOC_A2NOC 725
+#define	MSM_BUS_SLAVE_GLM 726
+#define	MSM_BUS_SLAVE_GNOC_BIMC 727
+#define	MSM_BUS_SLAVE_GNOC_SNOC 728
+#define	MSM_BUS_SLAVE_QM_CFG 729
+#define	MSM_BUS_SLAVE_TLMM_EAST 730
+#define	MSM_BUS_SLAVE_TLMM_NORTH 731
+#define	MSM_BUS_SLAVE_TLMM_WEST 732
+#define	MSM_BUS_SLAVE_SKL 733
+#define	MSM_BUS_SLAVE_LPASS_TCM	734
+#define	MSM_BUS_SLAVE_TLMM_SOUTH 735
+#define	MSM_BUS_SLAVE_TLMM_CENTER 736
+#define	MSM_BUS_MSS_NAV_CE_MPU_CFG 737
+#define	MSM_BUS_SLAVE_A2NOC_THROTTLE_CFG 738
+#define	MSM_BUS_SLAVE_CDSP 739
+#define	MSM_BUS_SLAVE_CDSP_SMMU_CFG 740
+#define	MSM_BUS_SLAVE_LPASS_MPU_CFG 741
+#define	MSM_BUS_SLAVE_CSI_PHY_CFG 742
+#define	MSM_BUS_SLAVE_LAST 743
+
+#define	MSM_BUS_SYSTEM_FPB_SLAVE_SYSTEM  MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB
+#define	MSM_BUS_CPSS_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_CPSS_FPB
+
+/*
+ * ID's used in RPM messages
+ */
+#define	ICBID_MASTER_APPSS_PROC 0
+#define	ICBID_MASTER_MSS_PROC 1
+#define	ICBID_MASTER_MNOC_BIMC 2
+#define	ICBID_MASTER_SNOC_BIMC 3
+#define	ICBID_MASTER_SNOC_BIMC_0 ICBID_MASTER_SNOC_BIMC
+#define	ICBID_MASTER_CNOC_MNOC_MMSS_CFG 4
+#define	ICBID_MASTER_CNOC_MNOC_CFG 5
+#define	ICBID_MASTER_GFX3D 6
+#define	ICBID_MASTER_JPEG 7
+#define	ICBID_MASTER_MDP 8
+#define	ICBID_MASTER_MDP0 ICBID_MASTER_MDP
+#define	ICBID_MASTER_MDPS ICBID_MASTER_MDP
+#define	ICBID_MASTER_VIDEO 9
+#define	ICBID_MASTER_VIDEO_P0 ICBID_MASTER_VIDEO
+#define	ICBID_MASTER_VIDEO_P1 10
+#define	ICBID_MASTER_VFE 11
+#define	ICBID_MASTER_VFE0 ICBID_MASTER_VFE
+#define	ICBID_MASTER_CNOC_ONOC_CFG 12
+#define	ICBID_MASTER_JPEG_OCMEM 13
+#define	ICBID_MASTER_MDP_OCMEM 14
+#define	ICBID_MASTER_VIDEO_P0_OCMEM 15
+#define	ICBID_MASTER_VIDEO_P1_OCMEM 16
+#define	ICBID_MASTER_VFE_OCMEM 17
+#define	ICBID_MASTER_LPASS_AHB 18
+#define	ICBID_MASTER_QDSS_BAM 19
+#define	ICBID_MASTER_SNOC_CFG 20
+#define	ICBID_MASTER_BIMC_SNOC 21
+#define	ICBID_MASTER_BIMC_SNOC_0 ICBID_MASTER_BIMC_SNOC
+#define	ICBID_MASTER_CNOC_SNOC 22
+#define	ICBID_MASTER_CRYPTO 23
+#define	ICBID_MASTER_CRYPTO_CORE0 ICBID_MASTER_CRYPTO
+#define	ICBID_MASTER_CRYPTO_CORE1 24
+#define	ICBID_MASTER_LPASS_PROC 25
+#define	ICBID_MASTER_MSS 26
+#define	ICBID_MASTER_MSS_NAV 27
+#define	ICBID_MASTER_OCMEM_DMA 28
+#define	ICBID_MASTER_PNOC_SNOC 29
+#define	ICBID_MASTER_WCSS 30
+#define	ICBID_MASTER_QDSS_ETR 31
+#define	ICBID_MASTER_USB3 32
+#define	ICBID_MASTER_USB3_0 ICBID_MASTER_USB3
+#define	ICBID_MASTER_SDCC_1 33
+#define	ICBID_MASTER_SDCC_3 34
+#define	ICBID_MASTER_SDCC_2 35
+#define	ICBID_MASTER_SDCC_4 36
+#define	ICBID_MASTER_TSIF 37
+#define	ICBID_MASTER_BAM_DMA 38
+#define	ICBID_MASTER_BLSP_2 39
+#define	ICBID_MASTER_USB_HSIC 40
+#define	ICBID_MASTER_BLSP_1 41
+#define	ICBID_MASTER_USB_HS 42
+#define	ICBID_MASTER_USB_HS1 ICBID_MASTER_USB_HS
+#define	ICBID_MASTER_PNOC_CFG 43
+#define	ICBID_MASTER_SNOC_PNOC 44
+#define	ICBID_MASTER_RPM_INST 45
+#define	ICBID_MASTER_RPM_DATA 46
+#define	ICBID_MASTER_RPM_SYS 47
+#define	ICBID_MASTER_DEHR 48
+#define	ICBID_MASTER_QDSS_DAP 49
+#define	ICBID_MASTER_SPDM 50
+#define	ICBID_MASTER_TIC 51
+#define	ICBID_MASTER_SNOC_CNOC 52
+#define	ICBID_MASTER_GFX3D_OCMEM 53
+#define	ICBID_MASTER_GFX3D_GMEM ICBID_MASTER_GFX3D_OCMEM
+#define	ICBID_MASTER_OVIRT_SNOC 54
+#define	ICBID_MASTER_SNOC_OVIRT 55
+#define	ICBID_MASTER_SNOC_GVIRT ICBID_MASTER_SNOC_OVIRT
+#define	ICBID_MASTER_ONOC_OVIRT 56
+#define	ICBID_MASTER_USB_HS2 57
+#define	ICBID_MASTER_QPIC 58
+#define	ICBID_MASTER_IPA 59
+#define	ICBID_MASTER_DSI 60
+#define	ICBID_MASTER_MDP1 61
+#define	ICBID_MASTER_MDPE ICBID_MASTER_MDP1
+#define	ICBID_MASTER_VPU_PROC 62
+#define	ICBID_MASTER_VPU 63
+#define	ICBID_MASTER_VPU0 ICBID_MASTER_VPU
+#define	ICBID_MASTER_CRYPTO_CORE2 64
+#define	ICBID_MASTER_PCIE_0 65
+#define	ICBID_MASTER_PCIE_1 66
+#define	ICBID_MASTER_SATA 67
+#define	ICBID_MASTER_UFS 68
+#define	ICBID_MASTER_USB3_1 69
+#define	ICBID_MASTER_VIDEO_OCMEM 70
+#define	ICBID_MASTER_VPU1 71
+#define	ICBID_MASTER_VCAP 72
+#define	ICBID_MASTER_EMAC 73
+#define	ICBID_MASTER_BCAST 74
+#define	ICBID_MASTER_MMSS_PROC 75
+#define	ICBID_MASTER_SNOC_BIMC_1 76
+#define	ICBID_MASTER_SNOC_PCNOC 77
+#define	ICBID_MASTER_AUDIO 78
+#define	ICBID_MASTER_MM_INT_0 79
+#define	ICBID_MASTER_MM_INT_1 80
+#define	ICBID_MASTER_MM_INT_2 81
+#define	ICBID_MASTER_MM_INT_BIMC 82
+#define	ICBID_MASTER_MSS_INT 83
+#define	ICBID_MASTER_PCNOC_CFG 84
+#define	ICBID_MASTER_PCNOC_INT_0 85
+#define	ICBID_MASTER_PCNOC_INT_1 86
+#define	ICBID_MASTER_PCNOC_M_0 87
+#define	ICBID_MASTER_PCNOC_M_1 88
+#define	ICBID_MASTER_PCNOC_S_0 89
+#define	ICBID_MASTER_PCNOC_S_1 90
+#define	ICBID_MASTER_PCNOC_S_2 91
+#define	ICBID_MASTER_PCNOC_S_3 92
+#define	ICBID_MASTER_PCNOC_S_4 93
+#define	ICBID_MASTER_PCNOC_S_6 94
+#define	ICBID_MASTER_PCNOC_S_7 95
+#define	ICBID_MASTER_PCNOC_S_8 96
+#define	ICBID_MASTER_PCNOC_S_9 97
+#define	ICBID_MASTER_QDSS_INT 98
+#define	ICBID_MASTER_SNOC_INT_0	99
+#define	ICBID_MASTER_SNOC_INT_1 100
+#define	ICBID_MASTER_SNOC_INT_BIMC 101
+#define	ICBID_MASTER_TCU_0 102
+#define	ICBID_MASTER_TCU_1 103
+#define	ICBID_MASTER_BIMC_INT_0 104
+#define	ICBID_MASTER_BIMC_INT_1 105
+#define	ICBID_MASTER_CAMERA 106
+#define	ICBID_MASTER_RICA 107
+#define	ICBID_MASTER_SNOC_BIMC_2 108
+#define	ICBID_MASTER_BIMC_SNOC_1 109
+#define	ICBID_MASTER_A0NOC_SNOC 110
+#define	ICBID_MASTER_A1NOC_SNOC 111
+#define	ICBID_MASTER_A2NOC_SNOC 112
+#define	ICBID_MASTER_PIMEM 113
+#define	ICBID_MASTER_SNOC_VMEM 114
+#define	ICBID_MASTER_CPP 115
+#define	ICBID_MASTER_CNOC_A1NOC 116
+#define	ICBID_MASTER_PNOC_A1NOC 117
+#define	ICBID_MASTER_HMSS 118
+#define	ICBID_MASTER_PCIE_2 119
+#define	ICBID_MASTER_ROTATOR 120
+#define	ICBID_MASTER_VENUS_VMEM 121
+#define	ICBID_MASTER_DCC 122
+#define	ICBID_MASTER_MCDMA 123
+#define	ICBID_MASTER_PCNOC_INT_2 124
+#define	ICBID_MASTER_PCNOC_INT_3 125
+#define	ICBID_MASTER_PCNOC_INT_4 126
+#define	ICBID_MASTER_PCNOC_INT_5 127
+#define	ICBID_MASTER_PCNOC_INT_6 128
+#define	ICBID_MASTER_PCNOC_S_5 129
+#define	ICBID_MASTER_SENSORS_AHB 130
+#define	ICBID_MASTER_SENSORS_PROC 131
+#define	ICBID_MASTER_QSPI 132
+#define	ICBID_MASTER_VFE1 133
+#define	ICBID_MASTER_SNOC_INT_2 134
+#define	ICBID_MASTER_SMMNOC_BIMC 135
+#define	ICBID_MASTER_CRVIRT_A1NOC 136
+#define	ICBID_MASTER_XM_USB_HS1 137
+#define	ICBID_MASTER_XI_USB_HS1 138
+#define	ICBID_MASTER_PCNOC_BIMC_1 139
+#define	ICBID_MASTER_BIMC_PCNOC 140
+#define	ICBID_MASTER_XI_HSIC 141
+#define	ICBID_MASTER_SGMII  142
+#define	ICBID_MASTER_SPMI_FETCHER 143
+#define	ICBID_MASTER_GNOC_BIMC 144
+#define	ICBID_MASTER_CRVIRT_A2NOC 145
+#define	ICBID_MASTER_CNOC_A2NOC 146
+#define	ICBID_MASTER_WLAN 147
+#define	ICBID_MASTER_MSS_CE 148
+#define	ICBID_MASTER_CDSP_PROC 149
+#define	ICBID_MASTER_GNOC_SNOC 150
+
+#define	ICBID_SLAVE_EBI1 0
+#define	ICBID_SLAVE_APPSS_L2 1
+#define	ICBID_SLAVE_BIMC_SNOC 2
+#define	ICBID_SLAVE_BIMC_SNOC_0 ICBID_SLAVE_BIMC_SNOC
+#define	ICBID_SLAVE_CAMERA_CFG 3
+#define	ICBID_SLAVE_DISPLAY_CFG 4
+#define	ICBID_SLAVE_OCMEM_CFG 5
+#define	ICBID_SLAVE_CPR_CFG 6
+#define	ICBID_SLAVE_CPR_XPU_CFG 7
+#define	ICBID_SLAVE_MISC_CFG 8
+#define	ICBID_SLAVE_MISC_XPU_CFG 9
+#define	ICBID_SLAVE_VENUS_CFG 10
+#define	ICBID_SLAVE_GFX3D_CFG 11
+#define	ICBID_SLAVE_MMSS_CLK_CFG 12
+#define	ICBID_SLAVE_MMSS_CLK_XPU_CFG 13
+#define	ICBID_SLAVE_MNOC_MPU_CFG 14
+#define	ICBID_SLAVE_ONOC_MPU_CFG 15
+#define	ICBID_SLAVE_MNOC_BIMC 16
+#define	ICBID_SLAVE_SERVICE_MNOC 17
+#define	ICBID_SLAVE_OCMEM 18
+#define	ICBID_SLAVE_GMEM ICBID_SLAVE_OCMEM
+#define	ICBID_SLAVE_SERVICE_ONOC 19
+#define	ICBID_SLAVE_APPSS 20
+#define	ICBID_SLAVE_LPASS 21
+#define	ICBID_SLAVE_USB3 22
+#define	ICBID_SLAVE_USB3_0 ICBID_SLAVE_USB3
+#define	ICBID_SLAVE_WCSS 23
+#define	ICBID_SLAVE_SNOC_BIMC 24
+#define	ICBID_SLAVE_SNOC_BIMC_0 ICBID_SLAVE_SNOC_BIMC
+#define	ICBID_SLAVE_SNOC_CNOC 25
+#define	ICBID_SLAVE_IMEM 26
+#define	ICBID_SLAVE_OCIMEM ICBID_SLAVE_IMEM
+#define	ICBID_SLAVE_SNOC_OVIRT 27
+#define	ICBID_SLAVE_SNOC_GVIRT ICBID_SLAVE_SNOC_OVIRT
+#define	ICBID_SLAVE_SNOC_PNOC 28
+#define	ICBID_SLAVE_SNOC_PCNOC ICBID_SLAVE_SNOC_PNOC
+#define	ICBID_SLAVE_SERVICE_SNOC 29
+#define	ICBID_SLAVE_QDSS_STM 30
+#define	ICBID_SLAVE_SDCC_1 31
+#define	ICBID_SLAVE_SDCC_3 32
+#define	ICBID_SLAVE_SDCC_2 33
+#define	ICBID_SLAVE_SDCC_4 34
+#define	ICBID_SLAVE_TSIF 35
+#define	ICBID_SLAVE_BAM_DMA 36
+#define	ICBID_SLAVE_BLSP_2 37
+#define	ICBID_SLAVE_USB_HSIC 38
+#define	ICBID_SLAVE_BLSP_1 39
+#define	ICBID_SLAVE_USB_HS 40
+#define	ICBID_SLAVE_USB_HS1 ICBID_SLAVE_USB_HS
+#define	ICBID_SLAVE_PDM 41
+#define	ICBID_SLAVE_PERIPH_APU_CFG 42
+#define	ICBID_SLAVE_PNOC_MPU_CFG 43
+#define	ICBID_SLAVE_PRNG 44
+#define	ICBID_SLAVE_PNOC_SNOC 45
+#define	ICBID_SLAVE_PCNOC_SNOC ICBID_SLAVE_PNOC_SNOC
+#define	ICBID_SLAVE_SERVICE_PNOC 46
+#define	ICBID_SLAVE_CLK_CTL 47
+#define	ICBID_SLAVE_CNOC_MSS 48
+#define	ICBID_SLAVE_PCNOC_MSS ICBID_SLAVE_CNOC_MSS
+#define	ICBID_SLAVE_SECURITY 49
+#define	ICBID_SLAVE_TCSR 50
+#define	ICBID_SLAVE_TLMM 51
+#define	ICBID_SLAVE_CRYPTO_0_CFG 52
+#define	ICBID_SLAVE_CRYPTO_1_CFG 53
+#define	ICBID_SLAVE_IMEM_CFG 54
+#define	ICBID_SLAVE_MESSAGE_RAM 55
+#define	ICBID_SLAVE_BIMC_CFG 56
+#define	ICBID_SLAVE_BOOT_ROM 57
+#define	ICBID_SLAVE_CNOC_MNOC_MMSS_CFG 58
+#define	ICBID_SLAVE_PMIC_ARB 59
+#define	ICBID_SLAVE_SPDM_WRAPPER 60
+#define	ICBID_SLAVE_DEHR_CFG 61
+#define	ICBID_SLAVE_MPM 62
+#define	ICBID_SLAVE_QDSS_CFG 63
+#define	ICBID_SLAVE_RBCPR_CFG 64
+#define	ICBID_SLAVE_RBCPR_CX_CFG ICBID_SLAVE_RBCPR_CFG
+#define	ICBID_SLAVE_RBCPR_QDSS_APU_CFG 65
+#define	ICBID_SLAVE_CNOC_MNOC_CFG 66
+#define	ICBID_SLAVE_SNOC_MPU_CFG 67
+#define	ICBID_SLAVE_CNOC_ONOC_CFG 68
+#define	ICBID_SLAVE_PNOC_CFG 69
+#define	ICBID_SLAVE_SNOC_CFG 70
+#define	ICBID_SLAVE_EBI1_DLL_CFG 71
+#define	ICBID_SLAVE_PHY_APU_CFG 72
+#define	ICBID_SLAVE_EBI1_PHY_CFG 73
+#define	ICBID_SLAVE_RPM 74
+#define	ICBID_SLAVE_CNOC_SNOC 75
+#define	ICBID_SLAVE_SERVICE_CNOC 76
+#define	ICBID_SLAVE_OVIRT_SNOC 77
+#define	ICBID_SLAVE_OVIRT_OCMEM 78
+#define	ICBID_SLAVE_USB_HS2 79
+#define	ICBID_SLAVE_QPIC 80
+#define	ICBID_SLAVE_IPS_CFG 81
+#define	ICBID_SLAVE_DSI_CFG 82
+#define	ICBID_SLAVE_USB3_1 83
+#define	ICBID_SLAVE_PCIE_0 84
+#define	ICBID_SLAVE_PCIE_1 85
+#define	ICBID_SLAVE_PSS_SMMU_CFG 86
+#define	ICBID_SLAVE_CRYPTO_2_CFG 87
+#define	ICBID_SLAVE_PCIE_0_CFG 88
+#define	ICBID_SLAVE_PCIE_1_CFG 89
+#define	ICBID_SLAVE_SATA_CFG 90
+#define	ICBID_SLAVE_SPSS_GENI_IR 91
+#define	ICBID_SLAVE_UFS_CFG 92
+#define	ICBID_SLAVE_AVSYNC_CFG 93
+#define	ICBID_SLAVE_VPU_CFG 94
+#define	ICBID_SLAVE_USB_PHY_CFG 95
+#define	ICBID_SLAVE_RBCPR_MX_CFG 96
+#define	ICBID_SLAVE_PCIE_PARF 97
+#define	ICBID_SLAVE_VCAP_CFG 98
+#define	ICBID_SLAVE_EMAC_CFG 99
+#define	ICBID_SLAVE_BCAST_CFG 100
+#define	ICBID_SLAVE_KLM_CFG 101
+#define	ICBID_SLAVE_DISPLAY_PWM 102
+#define	ICBID_SLAVE_GENI 103
+#define	ICBID_SLAVE_SNOC_BIMC_1 104
+#define	ICBID_SLAVE_AUDIO 105
+#define	ICBID_SLAVE_CATS_0 106
+#define	ICBID_SLAVE_CATS_1 107
+#define	ICBID_SLAVE_MM_INT_0 108
+#define	ICBID_SLAVE_MM_INT_1 109
+#define	ICBID_SLAVE_MM_INT_2 110
+#define	ICBID_SLAVE_MM_INT_BIMC 111
+#define	ICBID_SLAVE_MMU_MODEM_XPU_CFG 112
+#define	ICBID_SLAVE_MSS_INT 113
+#define	ICBID_SLAVE_PCNOC_INT_0 114
+#define	ICBID_SLAVE_PCNOC_INT_1 115
+#define	ICBID_SLAVE_PCNOC_M_0 116
+#define	ICBID_SLAVE_PCNOC_M_1 117
+#define	ICBID_SLAVE_PCNOC_S_0 118
+#define	ICBID_SLAVE_PCNOC_S_1 119
+#define	ICBID_SLAVE_PCNOC_S_2 120
+#define	ICBID_SLAVE_PCNOC_S_3 121
+#define	ICBID_SLAVE_PCNOC_S_4 122
+#define	ICBID_SLAVE_PCNOC_S_6 123
+#define	ICBID_SLAVE_PCNOC_S_7 124
+#define	ICBID_SLAVE_PCNOC_S_8 125
+#define	ICBID_SLAVE_PCNOC_S_9 126
+#define	ICBID_SLAVE_PRNG_XPU_CFG 127
+#define	ICBID_SLAVE_QDSS_INT 128
+#define	ICBID_SLAVE_RPM_XPU_CFG 129
+#define	ICBID_SLAVE_SNOC_INT_0 130
+#define	ICBID_SLAVE_SNOC_INT_1 131
+#define	ICBID_SLAVE_SNOC_INT_BIMC 132
+#define	ICBID_SLAVE_TCU 133
+#define	ICBID_SLAVE_BIMC_INT_0 134
+#define	ICBID_SLAVE_BIMC_INT_1 135
+#define	ICBID_SLAVE_RICA_CFG 136
+#define	ICBID_SLAVE_SNOC_BIMC_2 137
+#define	ICBID_SLAVE_BIMC_SNOC_1 138
+#define	ICBID_SLAVE_PNOC_A1NOC 139
+#define	ICBID_SLAVE_SNOC_VMEM 140
+#define	ICBID_SLAVE_A0NOC_SNOC 141
+#define	ICBID_SLAVE_A1NOC_SNOC 142
+#define	ICBID_SLAVE_A2NOC_SNOC 143
+#define	ICBID_SLAVE_A0NOC_CFG 144
+#define	ICBID_SLAVE_A0NOC_MPU_CFG 145
+#define	ICBID_SLAVE_A0NOC_SMMU_CFG 146
+#define	ICBID_SLAVE_A1NOC_CFG 147
+#define	ICBID_SLAVE_A1NOC_MPU_CFG 148
+#define	ICBID_SLAVE_A1NOC_SMMU_CFG 149
+#define	ICBID_SLAVE_A2NOC_CFG 150
+#define	ICBID_SLAVE_A2NOC_MPU_CFG 151
+#define	ICBID_SLAVE_A2NOC_SMMU_CFG 152
+#define	ICBID_SLAVE_AHB2PHY 153
+#define	ICBID_SLAVE_CAMERA_THROTTLE_CFG 154
+#define	ICBID_SLAVE_DCC_CFG 155
+#define	ICBID_SLAVE_DISPLAY_THROTTLE_CFG 156
+#define	ICBID_SLAVE_DSA_CFG 157
+#define	ICBID_SLAVE_DSA_MPU_CFG 158
+#define	ICBID_SLAVE_SSC_MPU_CFG 159
+#define	ICBID_SLAVE_HMSS_L3 160
+#define	ICBID_SLAVE_LPASS_SMMU_CFG 161
+#define	ICBID_SLAVE_MMAGIC_CFG 162
+#define	ICBID_SLAVE_PCIE20_AHB2PHY 163
+#define	ICBID_SLAVE_PCIE_2 164
+#define	ICBID_SLAVE_PCIE_2_CFG 165
+#define	ICBID_SLAVE_PIMEM 166
+#define	ICBID_SLAVE_PIMEM_CFG 167
+#define	ICBID_SLAVE_QDSS_RBCPR_APU_CFG 168
+#define	ICBID_SLAVE_RBCPR_CX 169
+#define	ICBID_SLAVE_RBCPR_MX 170
+#define	ICBID_SLAVE_SMMU_CPP_CFG 171
+#define	ICBID_SLAVE_SMMU_JPEG_CFG 172
+#define	ICBID_SLAVE_SMMU_MDP_CFG 173
+#define	ICBID_SLAVE_SMMU_ROTATOR_CFG 174
+#define	ICBID_SLAVE_SMMU_VENUS_CFG 175
+#define	ICBID_SLAVE_SMMU_VFE_CFG 176
+#define	ICBID_SLAVE_SSC_CFG 177
+#define	ICBID_SLAVE_VENUS_THROTTLE_CFG 178
+#define	ICBID_SLAVE_VMEM 179
+#define	ICBID_SLAVE_VMEM_CFG 180
+#define	ICBID_SLAVE_QDSS_MPU_CFG 181
+#define	ICBID_SLAVE_USB3_PHY_CFG 182
+#define	ICBID_SLAVE_IPA_CFG 183
+#define	ICBID_SLAVE_PCNOC_INT_2 184
+#define	ICBID_SLAVE_PCNOC_INT_3 185
+#define	ICBID_SLAVE_PCNOC_INT_4 186
+#define	ICBID_SLAVE_PCNOC_INT_5 187
+#define	ICBID_SLAVE_PCNOC_INT_6 188
+#define	ICBID_SLAVE_PCNOC_S_5 189
+#define	ICBID_SLAVE_QSPI 190
+#define	ICBID_SLAVE_A1NOC_MS_MPU_CFG 191
+#define	ICBID_SLAVE_A2NOC_MS_MPU_CFG 192
+#define	ICBID_SLAVE_MODEM_Q6_SMMU_CFG 193
+#define	ICBID_SLAVE_MSS_MPU_CFG 194
+#define	ICBID_SLAVE_MSS_PROC_MS_MPU_CFG 195
+#define	ICBID_SLAVE_SKL 196
+#define	ICBID_SLAVE_SNOC_INT_2 197
+#define	ICBID_SLAVE_SMMNOC_BIMC 198
+#define	ICBID_SLAVE_CRVIRT_A1NOC 199
+#define	ICBID_SLAVE_SGMII	 200
+#define	ICBID_SLAVE_QHS4_APPS	 201
+#define	ICBID_SLAVE_BIMC_PCNOC   202
+#define	ICBID_SLAVE_PCNOC_BIMC_1 203
+#define	ICBID_SLAVE_SPMI_FETCHER 204
+#define	ICBID_SLAVE_MMSS_SMMU_CFG 205
+#define	ICBID_SLAVE_WLAN 206
+#define	ICBID_SLAVE_CRVIRT_A2NOC 207
+#define	ICBID_SLAVE_CNOC_A2NOC 208
+#define	ICBID_SLAVE_GLM 209
+#define	ICBID_SLAVE_GNOC_BIMC 210
+#define	ICBID_SLAVE_GNOC_SNOC 211
+#define	ICBID_SLAVE_QM_CFG 212
+#define	ICBID_SLAVE_TLMM_EAST 213
+#define	ICBID_SLAVE_TLMM_NORTH 214
+#define	ICBID_SLAVE_TLMM_WEST 215
+#define	ICBID_SLAVE_LPASS_TCM	216
+#define	ICBID_SLAVE_TLMM_SOUTH	217
+#define	ICBID_SLAVE_TLMM_CENTER	218
+#define	ICBID_SLAVE_MSS_NAV_CE_MPU_CFG	219
+#define	ICBID_SLAVE_A2NOC_THROTTLE_CFG	220
+#define	ICBID_SLAVE_CDSP	221
+#define	ICBID_SLAVE_CDSP_SMMU_CFG	222
+#define	ICBID_SLAVE_LPASS_MPU_CFG	223
+#define	ICBID_SLAVE_CSI_PHY_CFG	224
+#endif
diff -Nruw linux-4.4.115-fbx/arch/arm64/boot/dts/include/dt-bindings/msm./msm-bus-rule-ops.h linux-4.4.115-fbx/arch/arm64/boot/dts/include/dt-bindings/msm/msm-bus-rule-ops.h
--- linux-4.4.115-fbx/arch/arm64/boot/dts/include/dt-bindings/msm./msm-bus-rule-ops.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/include/dt-bindings/msm/msm-bus-rule-ops.h	2019-01-22 16:16:28.179288750 +0100
@@ -0,0 +1,34 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_BUS_RULE_OPS_H
+#define __MSM_BUS_RULE_OPS_H
+
+#define FLD_IB	0
+#define FLD_AB	1
+#define FLD_CLK	2
+
+#define OP_LE	0
+#define OP_LT	1
+#define OP_GE	2
+#define OP_GT	3
+#define OP_NOOP	4
+
+#define RULE_STATE_NOT_APPLIED	0
+#define RULE_STATE_APPLIED	1
+
+#define THROTTLE_ON	0
+#define THROTTLE_OFF	1
+#define THROTTLE_REG	2
+
+
+#endif
diff -Nruw linux-4.4.115-fbx/arch/arm64/boot/dts/include/dt-bindings/msm./pm.h linux-4.4.115-fbx/arch/arm64/boot/dts/include/dt-bindings/msm/pm.h
--- linux-4.4.115-fbx/arch/arm64/boot/dts/include/dt-bindings/msm./pm.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/include/dt-bindings/msm/pm.h	2019-01-22 16:16:28.179288750 +0100
@@ -0,0 +1,25 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DT_MSM_PM_H__
+#define __DT_MSM_PM_H__
+
+#define LPM_RESET_LVL_NONE	0
+#define LPM_RESET_LVL_RET	1
+#define LPM_RESET_LVL_GDHS	2
+#define LPM_RESET_LVL_PC	3
+
+#define LPM_AFF_LVL_CPU		0
+#define LPM_AFF_LVL_L2		1
+#define LPM_AFF_LVL_CCI		2
+
+#endif
diff -Nruw linux-4.4.115-fbx/arch/arm64/boot/dts/include/dt-bindings/msm./power-on.h linux-4.4.115-fbx/arch/arm64/boot/dts/include/dt-bindings/msm/power-on.h
--- linux-4.4.115-fbx/arch/arm64/boot/dts/include/dt-bindings/msm./power-on.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/include/dt-bindings/msm/power-on.h	2019-01-22 16:16:28.179288750 +0100
@@ -0,0 +1,24 @@
+/* Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_POWER_ON_H__
+#define __MSM_POWER_ON_H__
+
+#define PON_POWER_OFF_RESERVED		0x00
+#define PON_POWER_OFF_WARM_RESET	0x01
+#define PON_POWER_OFF_SHUTDOWN		0x04
+#define PON_POWER_OFF_DVDD_SHUTDOWN	0x05
+#define PON_POWER_OFF_HARD_RESET	0x07
+#define PON_POWER_OFF_DVDD_HARD_RESET	0x08
+#define PON_POWER_OFF_MAX_TYPE		0x10
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/include/dt-bindings/regulator/qcom,rpm-smd-regulator.h	2019-01-22 16:16:28.183288787 +0100
@@ -0,0 +1,28 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_RPM_SMD_REGULATOR_H
+#define __QCOM_RPM_SMD_REGULATOR_H
+
+#define RPM_SMD_REGULATOR_LEVEL_NONE		0
+#define RPM_SMD_REGULATOR_LEVEL_RETENTION	16
+#define RPM_SMD_REGULATOR_LEVEL_RETENTION_PLUS	32
+#define RPM_SMD_REGULATOR_LEVEL_MIN_SVS		48
+#define RPM_SMD_REGULATOR_LEVEL_LOW_SVS		64
+#define RPM_SMD_REGULATOR_LEVEL_SVS		128
+#define RPM_SMD_REGULATOR_LEVEL_SVS_PLUS	192
+#define RPM_SMD_REGULATOR_LEVEL_NOM		256
+#define RPM_SMD_REGULATOR_LEVEL_NOM_PLUS	320
+#define RPM_SMD_REGULATOR_LEVEL_TURBO		384
+#define RPM_SMD_REGULATOR_LEVEL_BINNING		512
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/apq8098-freebox-batfish.dts	2019-01-22 16:16:21.171225289 +0100
@@ -0,0 +1,692 @@
+/* Copyright (c) 2017, Freebox SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include "apq8098-v2.1.dtsi"
+#include "msm8998-audio-freebox.dtsi"
+
+#include "msm8998-ramoops.dtsi"
+
+/ {
+	model = "Freebox Batfish";
+	compatible = "freebox,fbx7hd-batfish", "freebox,fbx7hd", "qcom,apq8098";
+	qcom,board-id = <5 2>;
+
+	ir: ir-receiver {
+		compatible = "gpio-ir-receiver";
+		gpios = <&tlmm 120 1>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&ir_int>;
+		linux,rc-map-name = "rc-rc6-freebox";
+	};
+
+	fbxgpio@0 {
+		compatible = "fbx,fbxgpio";
+
+		pinctrl-names = "default";
+		pinctrl-0 = <
+			&hdmi_en
+			&hdmi_fault
+			&force_test_mode
+			&usb_hub_rst
+			&spdif_src_sel
+			>;
+
+		hdmi-enable {
+			name = "hdmi-enable";
+			gpio = <&tlmm 17 GPIO_ACTIVE_HIGH>;
+			output-low;
+		};
+
+		hdmi-fault {
+			name = "hdmi-fault";
+			gpio = <&tlmm 133 GPIO_ACTIVE_LOW>;
+			input;
+		};
+
+		test-mode {
+			name = "test-mode";
+			gpio = <&tlmm 29 GPIO_ACTIVE_LOW>;
+			input;
+		};
+
+		usb-reset {
+			name = "usb-reset";
+			gpio = <&tlmm 24 GPIO_ACTIVE_LOW>;
+			output-low;
+		};
+
+		spdif-src {
+			name = "spdif-src";
+			gpio = <&tlmm 8 GPIO_ACTIVE_LOW>;
+			output-low;
+		};
+	};
+
+	fbxgpio@1 {
+		compatible = "fbx,fbxgpio";
+
+		top-wcharging-ledr {
+			name = "top-wcharging-ledr";
+			gpio = <&exp1 0 GPIO_ACTIVE_LOW>;
+			input;
+		};
+
+		top-wcharging-ledg {
+			name = "top-wcharging-ledg";
+			gpio = <&exp1 1 GPIO_ACTIVE_LOW>;
+			input;
+		};
+
+		top-nfc-enable {
+			name = "top-nfc-enable";
+			gpio = <&exp1 2 GPIO_ACTIVE_LOW>;
+			output-low;
+		};
+
+		top-nfc-int {
+			name = "top-nfc-int";
+			gpio = <&exp1 3 GPIO_ACTIVE_LOW>;
+			input;
+		};
+
+		top-dsp-reset {
+			name = "top-dsp-reset";
+			gpio = <&exp1 4 GPIO_ACTIVE_LOW>;
+			output-low;
+		};
+
+		top-mic-mute {
+			name = "top-mic-mute";
+			gpio = <&exp1 7 GPIO_ACTIVE_HIGH>;
+			input;
+		};
+
+		top-wcharging-stby {
+			name = "top-wcharging-stby";
+			gpio = <&exp1 8 GPIO_ACTIVE_LOW>;
+			output-low;
+		};
+
+		top-dsp-int {
+			name = "top-dsp-int";
+			gpio = <&exp1 15 GPIO_ACTIVE_HIGH>;
+			input;
+		};
+	};
+
+	gpio_keys {
+		compatible = "gpio-keys";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		pinctrl-0 = <&force_bank0>;
+		pinctrl-names = "default";
+
+		button@1 {
+			label = "Factory Reset Button";
+			linux,code = <KEY_SETUP>;
+			gpios = <&tlmm 18 GPIO_ACTIVE_LOW>;
+		};
+	};
+};
+
+&vendor {
+	bluetooth: bt_wcn3990 {
+		compatible = "qca,wcn3990";
+		qca,bt-vdd-io-supply = <&pm8998_s3>;
+		qca,bt-vdd-xtal-supply = <&pm8998_s5>;
+		qca,bt-vdd-core-supply = <&pm8998_l7>;
+		qca,bt-vdd-pa-supply = <&pm8998_l17>;
+		qca,bt-vdd-ldo-supply = <&pm8998_l25>;
+		//qca,bt-chip-pwd-supply = <&pmi8998_bob_pin1>;
+		clocks = <&clock_gcc clk_rf_clk2_pin>;
+		clock-names = "rf_clk2";
+
+		qca,bt-vdd-io-voltage-level = <1352000 1352000>;
+		qca,bt-vdd-xtal-voltage-level = <2040000 2040000>;
+		qca,bt-vdd-core-voltage-level = <1800000 1800000>;
+		qca,bt-vdd-pa-voltage-level = <1304000 1304000>;
+		qca,bt-vdd-ldo-voltage-level = <3312000 3312000>;
+		qca,bt-chip-pwd-voltage-level = <3600000 3600000>;
+
+		qca,bt-vdd-io-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-xtal-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-core-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-pa-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-ldo-current-level = <1>; /* LPM/PFM */
+	};
+};
+
+&blsp1_uart3_hs {
+	status = "ok";
+};
+
+&uartblsp2dm1 {
+	status = "ok";
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart_console_active>;
+};
+
+&ufsphy1 {
+	vdda-phy-supply = <&pm8998_l1>;
+	vdda-pll-supply = <&pm8998_l2>;
+	vddp-ref-clk-supply = <&pm8998_l26>;
+	vdda-phy-max-microamp = <51400>;
+	vdda-pll-max-microamp = <14600>;
+	vddp-ref-clk-max-microamp = <100>;
+	vddp-ref-clk-always-on;
+	status = "ok";
+};
+
+&ufs1 {
+	vdd-hba-supply = <&gdsc_ufs>;
+	vdd-hba-fixed-regulator;
+	vcc-supply = <&pm8998_l20>;
+	vccq-supply = <&pm8998_l26>;
+	vccq2-supply = <&pm8998_s4>;
+	vcc-max-microamp = <750000>;
+	vccq-max-microamp = <560000>;
+	vccq2-max-microamp = <750000>;
+	status = "ok";
+};
+
+&ufs_ice {
+	status = "ok";
+};
+
+&i2c_5 {
+	status = "okay";
+
+	qcom,clk-freq-out = <100000>;
+
+	// ti8020@44
+	// usbmux@47
+	// si2157@60
+	// si2168@64
+	// leadtrend@68
+};
+
+&i2c_4 {
+	status = "okay";
+
+	qcom,clk-freq-out = <100000>;
+
+	lm75@48 {
+		compatible = "national,lm75";
+		reg = <0x48>;
+	};
+
+	tas5766@4c {
+		compatible = "ti,tas5766";
+		reg = <0x4c>;
+	};
+
+	tas5766@4d {
+		compatible = "ti,tas5766";
+		reg = <0x4d>;
+	};
+
+	tas5766@4e {
+		compatible = "ti,tas5766";
+		reg = <0x4e>;
+	};
+
+	// at24_audio@53
+	// at24_carrier@57
+};
+
+&i2c_6 {
+	status = "okay";
+
+	qcom,clk-freq-out = <100000>;
+
+	exp1: gpio@74 {
+		compatible = "ti,tca9539";
+		reg = <0x74>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		reset-gpios = <&tlmm 96 GPIO_ACTIVE_LOW>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&top_exp_rst>;
+	};
+
+	// at24_top@53
+};
+
+&i2c_7 {
+	status = "okay";
+
+	wm8804@3b {
+		compatible = "wlf,wm8804";
+		reg = <0x3b>;
+		wlf,reset-gpio = <&tlmm 100 GPIO_ACTIVE_HIGH>;
+		wlf,irq-gpio = <&tlmm 110 GPIO_ACTIVE_LOW>;
+	};
+
+	sil9437@31 {
+		compatible = "sil,sil9437";
+		reg = <0x31>;
+		sil,reset-gpio = <&tlmm 12 GPIO_ACTIVE_LOW>;
+		sil,irq-gpio = <&tlmm 118 GPIO_ACTIVE_LOW>;
+		sil,irq-open-drain;
+		sil,irq-active-low;
+	};
+
+	// hdmi_redriver@5e
+};
+
+&spi_1 {
+	status = "okay";
+
+	pinctrl-0 = <&spi_1_active &spi_1b_active &top_psoc_rst>;
+	pinctrl-1 = <&spi_1_sleep &spi_1b_sleep &top_psoc_rst>;
+	cs-gpios = <&tlmm 2 GPIO_ACTIVE_HIGH>, <&tlmm 23 GPIO_ACTIVE_HIGH>, <&tlmm 23 GPIO_ACTIVE_HIGH>, <&tlmm 23 GPIO_ACTIVE_HIGH>;
+
+	spidev@0 {
+		/* PSOC for LED & touch control */
+                compatible = "freebox,fbx7hd-top-psoc";
+		spi-max-frequency = <1200000>;
+		irq-gpio = <&tlmm 122 GPIO_ACTIVE_LOW>;
+		reset-gpio = <&tlmm 97 GPIO_ACTIVE_LOW>;
+		reg = <0>;
+	};
+
+	spidev@3 {
+		/* XMOS */
+		compatible = "spidev";
+		spi-max-frequency = <1200000>;
+		reg = <3>;
+	};
+};
+
+&spi_9 {
+	status = "okay";
+	switch@0 {
+		compatible = "realtek,rtl8367c-spi";
+		reg = <0>;
+		reset-gpios = <&tlmm 30 GPIO_ACTIVE_LOW>;
+		spi-max-frequency = <5000000>;
+	};
+};
+
+&tlmm {
+	force_bank0: force_bank0 {
+		mux {
+			pins = "gpio18";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio18";
+			drive-strength = <2>;
+			bias-disable;
+		};
+	};
+
+	force_test_mode: force_test_mode {
+		mux {
+			pins = "gpio29";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio29";
+			drive-strength = <2>;
+			bias-pull-down;
+		};
+	};
+
+	hdmi_en: hdmi_en {
+		mux {
+			pins = "gpio17";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio17";
+			drive-strength = <2>;
+			bias-pull-down;
+		};
+	};
+
+	hdmi_fault: hdmi_fault {
+		mux {
+			pins = "gpio133";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio133";
+			drive-strength = <2>;
+			bias-pull-up;
+		};
+	};
+
+	usb_hub_rst: usb_hub_rst {
+		mux {
+			pins = "gpio24";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio24";
+			drive-strength = <2>;
+			bias-pull-down;
+		};
+	};
+
+	spdif_src_sel: spdif_src_sel {
+		mux {
+			pins = "gpio8";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio8";
+			drive-strength = <2>;
+			bias-pull-down;
+		};
+	};
+
+	top_exp_rst: top_exp_rst {
+		mux {
+			pins = "gpio96";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio96";
+			drive-strength = <2>;
+			bias-pull-down;
+		};
+	};
+
+	top_psoc_rst: top_psoc_rst {
+		mux {
+			pins = "gpio97";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio97";
+			drive-strength = <2>;
+			bias-pull-down;
+		};
+	};
+
+	ir_int: ir_int {
+		mux {
+			pins = "gpio120";
+			function = "gpio";
+		};
+		config {
+			pins = "gpio120";
+			drive-strength = <6>;
+			bias-pull-up;
+		};
+	};
+
+	top_exp_psoc_irq: top_exp_psoc_irq {
+		mux {
+			pins = "gpio122";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio122";
+			drive-strength = <2>;
+			bias-pull-down;
+		};
+	};
+};
+
+&usb3 {
+	/delete-property/ extcon;
+	dwc3@a800000 {
+		dr_mode = "host";
+	};
+};
+
+&sde_hdmi {
+	qcom,display-type = "primary";
+};
+
+&sde_hdmi_tx {
+	/* disable 5V HPD pin */
+	/delete-property/ qcom,hdmi-tx-hpd5v-gpio;
+	pinctrl-0 = <&mdss_hdmi_hpd_active
+		&mdss_hdmi_ddc_active>;
+	pinctrl-1 = <&mdss_hdmi_hpd_suspend
+		&mdss_hdmi_ddc_suspend>;
+};
+
+&spmi_bus {
+	qcom,pmi8998@2 {
+		status = "disabled";
+	};
+	qcom,pmi8998@3 {
+		status = "disabled";
+	};
+};
+
+&pmi8998_wled {
+	status = "disabled";
+};
+
+&pm8998_gpios {
+	gpio@cc00 { /* GPIO 13 */
+		qcom,mode = <1>;
+		qcom,output-type = <0>;
+		qcom,pull = <5>;
+		qcom,vin-sel = <0>;
+		qcom,out-strength = <1>;
+		qcom,src-sel = <3>;
+		qcom,master-en = <1>;
+		status = "okay";
+	};
+};
+
+&pm8998_vadc {
+	chan@83 {
+		label = "vph_pwr";
+		reg = <0x83>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@85 {
+		label = "vcoin";
+		reg = <0x85>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@4c {
+		label = "xo_therm";
+		reg = <0x4c>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <4>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@4d {
+		label = "msm_therm";
+		reg = <0x4d>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@51 {
+		label = "quiet_therm";
+		reg = <0x51>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+};
+
+&pm8998_adc_tm {
+	chan@83 {
+		label = "vph_pwr";
+		reg = <0x83>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,btm-channel-number = <0x60>;
+	};
+
+	chan@4d {
+		label = "msm_therm";
+		reg = <0x4d>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x68>;
+		qcom,thermal-node;
+	};
+
+	chan@51 {
+		label = "quiet_therm";
+		reg = <0x51>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x70>;
+		qcom,thermal-node;
+	};
+
+	chan@4c {
+		label = "xo_therm";
+		reg = <0x4c>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <4>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x78>;
+		qcom,thermal-node;
+	};
+};
+
+&soc {
+	qcom,msm-dai-tdm-sec-rx {
+		qcom,msm-cpudai-tdm-group-id = <37136>;
+		qcom,msm-cpudai-tdm-group-num-ports = <1>;
+		qcom,msm-cpudai-tdm-group-port-id = <36880>;
+		qcom,msm-cpudai-tdm-clk-rate = <12288000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <1>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <1>;
+		qcom,msm-cpudai-tdm-invert-sync = <0>;
+		qcom,msm-cpudai-tdm-data-delay = <0>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&sec_aux_pcm_active
+				&sec_aux_pcm_dout_active>;
+		pinctrl-1 = <&sec_aux_pcm_sleep
+				&sec_aux_pcm_dout_sleep>;
+	};
+
+	qcom,msm-dai-mi2s {
+		dai_mi2s0: qcom,msm-dai-q6-mi2s-prim {
+			qcom,msm-mi2s-rx-lines = <1>;
+			qcom,msm-mi2s-tx-lines = <0>;
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&pri_mi2s_mclk_active
+					&pri_mi2s_sck_active
+					&pri_mi2s_ws_active
+					&pri_mi2s_sd0_active>;
+			pinctrl-1 = <&pri_mi2s_mclk_sleep
+					&pri_mi2s_sck_sleep
+					&pri_mi2s_ws_sleep
+					&pri_mi2s_sd0_sleep>;
+		};
+
+		dai_mi2s2: qcom,msm-dai-q6-mi2s-tert {
+			qcom,msm-mi2s-rx-lines = <0>;
+			qcom,msm-mi2s-tx-lines = <1>;
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&tert_mi2s_active
+					&tert_mi2s_sd0_active>;
+			pinctrl-1 = <&tert_mi2s_sleep
+					&tert_mi2s_sd0_sleep>;
+		};
+
+		dai_mi2s3: qcom,msm-dai-q6-mi2s-quat {
+			qcom,msm-mi2s-rx-lines = <0>;
+			qcom,msm-mi2s-tx-lines = <15>;
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&quat_mi2s_active
+					&quat_mi2s_sd0_active
+					&quat_mi2s_sd1_active
+					&quat_mi2s_sd2_active
+					&quat_mi2s_sd3_active>;
+			pinctrl-1 = <&quat_mi2s_sleep
+					&quat_mi2s_sd0_sleep
+					&quat_mi2s_sd1_sleep
+					&quat_mi2s_sd2_sleep
+					&quat_mi2s_sd3_sleep>;
+		};
+	};
+
+	sound-freebox {
+		fbx,spdif-wm8804;
+		fbx,arc-sil9437;
+	};
+};
+
+&soc {
+	qcom,bcl {
+		/delete-property/ qcom,bcl-enable;
+	};
+};
+
+&pcie0 {
+	qcom,boot-option = <0>;
+};
+
+&msm_ath10k_wlan {
+	status = "ok";
+};
+
+&tspp {
+	pinctrl-names = "disabled", "tsif0-mode1", "tsif0-mode2";
+
+	tsin0: port@0 {
+		tsin-num = <0>;
+		i2c-bus = <&i2c_5>;
+		reset-gpios = <&tlmm 84 GPIO_ACTIVE_LOW>;
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/apq8098-freebox-oarfish.dts	2019-01-22 16:16:21.171225289 +0100
@@ -0,0 +1,402 @@
+/* Copyright (c) 2017, Freebox SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include "apq8098-v2.1.dtsi"
+#include "msm8998-audio-freebox.dtsi"
+
+#include "msm8998-ramoops.dtsi"
+
+/ {
+	model = "Freebox Oarfish";
+	compatible = "freebox,fbx7hd-oarfish", "freebox,fbx7hd", "qcom,apq8098";
+	qcom,board-id = <5 3>;
+
+	ir: ir-receiver {
+		compatible = "gpio-ir-receiver";
+		gpios = <&tlmm 120 1>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&ir_int>;
+		linux,rc-map-name = "rc-rc6-freebox";
+	};
+
+	fbxgpio@0 {
+		compatible = "fbx,fbxgpio";
+
+		pinctrl-names = "default";
+		pinctrl-0 = <
+			&hdmi_en
+			&hdmi_fault
+			&force_test_mode
+			>;
+
+		hdmi-enable {
+			name = "hdmi-enable";
+			gpio = <&tlmm 17 GPIO_ACTIVE_HIGH>;
+			output-low;
+		};
+
+		hdmi-fault {
+			name = "hdmi-fault";
+			gpio = <&tlmm 133 GPIO_ACTIVE_LOW>;
+			input;
+		};
+
+		test-mode {
+			name = "test-mode";
+			gpio = <&tlmm 29 GPIO_ACTIVE_LOW>;
+			input;
+		};
+	};
+
+	gpio_keys {
+		compatible = "gpio-keys";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		pinctrl-0 = <&force_bank0>;
+		pinctrl-names = "default";
+
+		button@1 {
+			label = "Factory Reset Button";
+			linux,code = <KEY_SETUP>;
+			gpios = <&tlmm 18 GPIO_ACTIVE_LOW>;
+		};
+	};
+};
+
+&vendor {
+	bluetooth: bt_wcn3990 {
+		compatible = "qca,wcn3990";
+		qca,bt-vdd-io-supply = <&pm8998_s3>;
+		qca,bt-vdd-xtal-supply = <&pm8998_s5>;
+		qca,bt-vdd-core-supply = <&pm8998_l7>;
+		qca,bt-vdd-pa-supply = <&pm8998_l17>;
+		qca,bt-vdd-ldo-supply = <&pm8998_l25>;
+		//qca,bt-chip-pwd-supply = <&pmi8998_bob_pin1>;
+		clocks = <&clock_gcc clk_rf_clk2_pin>;
+		clock-names = "rf_clk2";
+
+		qca,bt-vdd-io-voltage-level = <1352000 1352000>;
+		qca,bt-vdd-xtal-voltage-level = <2040000 2040000>;
+		qca,bt-vdd-core-voltage-level = <1800000 1800000>;
+		qca,bt-vdd-pa-voltage-level = <1304000 1304000>;
+		qca,bt-vdd-ldo-voltage-level = <3312000 3312000>;
+		qca,bt-chip-pwd-voltage-level = <3600000 3600000>;
+
+		qca,bt-vdd-io-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-xtal-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-core-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-pa-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-ldo-current-level = <1>; /* LPM/PFM */
+	};
+};
+
+&blsp1_uart3_hs {
+	status = "ok";
+};
+
+&uartblsp2dm1 {
+	status = "ok";
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart_console_active>;
+};
+
+&ufsphy1 {
+	vdda-phy-supply = <&pm8998_l1>;
+	vdda-pll-supply = <&pm8998_l2>;
+	vddp-ref-clk-supply = <&pm8998_l26>;
+	vdda-phy-max-microamp = <51400>;
+	vdda-pll-max-microamp = <14600>;
+	vddp-ref-clk-max-microamp = <100>;
+	vddp-ref-clk-always-on;
+	status = "ok";
+};
+
+&ufs1 {
+	vdd-hba-supply = <&gdsc_ufs>;
+	vdd-hba-fixed-regulator;
+	vcc-supply = <&pm8998_l20>;
+	vccq-supply = <&pm8998_l26>;
+	vccq2-supply = <&pm8998_s4>;
+	vcc-max-microamp = <750000>;
+	vccq-max-microamp = <560000>;
+	vccq2-max-microamp = <750000>;
+	status = "ok";
+};
+
+&ufs_ice {
+	status = "ok";
+};
+
+&i2c_5 {
+	status = "okay";
+
+	qcom,clk-freq-out = <100000>;
+
+	// usbmux@47
+	// si2168@64
+};
+
+&i2c_4 {
+	status = "okay";
+
+	qcom,clk-freq-out = <100000>;
+
+	// at24_carrier@57
+};
+
+&i2c_7 {
+	status = "okay";
+
+	// hdmi_redriver@5e
+};
+
+&tlmm {
+	force_bank0: force_bank0 {
+		mux {
+			pins = "gpio18";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio18";
+			drive-strength = <2>;
+			bias-disable;
+		};
+	};
+
+	force_test_mode: force_test_mode {
+		mux {
+			pins = "gpio29";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio29";
+			drive-strength = <2>;
+			bias-pull-down;
+		};
+	};
+
+	hdmi_en: hdmi_en {
+		mux {
+			pins = "gpio17";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio17";
+			drive-strength = <2>;
+			bias-pull-down;
+		};
+	};
+
+	hdmi_fault: hdmi_fault {
+		mux {
+			pins = "gpio133";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio133";
+			drive-strength = <2>;
+			bias-pull-up;
+		};
+	};
+
+	ir_int: ir_int {
+		mux {
+			pins = "gpio120";
+			function = "gpio";
+		};
+		config {
+			pins = "gpio120";
+			drive-strength = <6>;
+			bias-pull-up;
+		};
+	};
+};
+
+&usb3 {
+	/delete-property/ extcon;
+	dwc3@a800000 {
+		dr_mode = "host";
+	};
+};
+
+&sde_hdmi {
+	qcom,display-type = "primary";
+};
+
+&sde_hdmi_tx {
+	/* disable 5V HPD pin */
+	/delete-property/ qcom,hdmi-tx-hpd5v-gpio;
+	pinctrl-0 = <&mdss_hdmi_hpd_active
+		&mdss_hdmi_ddc_active>;
+	pinctrl-1 = <&mdss_hdmi_hpd_suspend
+		&mdss_hdmi_ddc_suspend>;
+};
+
+&spmi_bus {
+	qcom,pmi8998@2 {
+		status = "disabled";
+	};
+	qcom,pmi8998@3 {
+		status = "disabled";
+	};
+};
+
+&pmi8998_wled {
+	status = "disabled";
+};
+
+&pm8998_gpios {
+	gpio@cc00 { /* GPIO 13 */
+		qcom,mode = <1>;
+		qcom,output-type = <0>;
+		qcom,pull = <5>;
+		qcom,vin-sel = <0>;
+		qcom,out-strength = <1>;
+		qcom,src-sel = <3>;
+		qcom,master-en = <1>;
+		status = "okay";
+	};
+};
+
+&pm8998_vadc {
+	chan@83 {
+		label = "vph_pwr";
+		reg = <0x83>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@85 {
+		label = "vcoin";
+		reg = <0x85>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@4c {
+		label = "xo_therm";
+		reg = <0x4c>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <4>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@4d {
+		label = "msm_therm";
+		reg = <0x4d>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@51 {
+		label = "quiet_therm";
+		reg = <0x51>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+};
+
+&pm8998_adc_tm {
+	chan@83 {
+		label = "vph_pwr";
+		reg = <0x83>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,btm-channel-number = <0x60>;
+	};
+
+	chan@4d {
+		label = "msm_therm";
+		reg = <0x4d>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x68>;
+		qcom,thermal-node;
+	};
+
+	chan@51 {
+		label = "quiet_therm";
+		reg = <0x51>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x70>;
+		qcom,thermal-node;
+	};
+
+	chan@4c {
+		label = "xo_therm";
+		reg = <0x4c>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <4>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x78>;
+		qcom,thermal-node;
+	};
+};
+
+&soc {
+	qcom,bcl {
+		/delete-property/ qcom,bcl-enable;
+	};
+};
+
+&pcie0 {
+	qcom,boot-option = <0>;
+};
+
+&msm_ath10k_wlan {
+	status = "ok";
+};
+
+&tspp {
+	pinctrl-names = "disabled", "tsif0-mode1", "tsif0-mode2";
+
+	tsin0: port@0 {
+		tsin-num = <0>;
+		i2c-bus = <&i2c_5>;
+		reset-gpios = <&tlmm 84 GPIO_ACTIVE_LOW>;
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/apq8098-freebox-proto.dts	2019-01-22 16:16:21.171225289 +0100
@@ -0,0 +1,515 @@
+/* Copyright (c) 2017, Freebox SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include "apq8098-v2.1.dtsi"
+#include "msm8998-audio-freebox.dtsi"
+
+#include "msm8998-ramoops.dtsi"
+
+/ {
+	model = "Freebox Test Carrier";
+	compatible = "freebox,fbx7hd-carrier", "freebox,fbx7hd", "qcom,apq8098";
+	qcom,board-id = <5 1>;
+
+	ir: ir-receiver {
+		compatible = "gpio-ir-receiver";
+		gpios = <&tlmm 120 1>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&ir_int>;
+		linux,rc-map-name = "rc-rc6-freebox";
+	};
+
+	fbxgpio@0 {
+		compatible = "fbx,fbxgpio";
+
+		pinctrl-names = "default";
+		pinctrl-0 = <
+			&hdmi_fault
+			&nfc_enable
+			&nfc_int
+			>;
+
+		hdmi-fault {
+			name = "hdmi-fault";
+			gpio = <&tlmm 133 GPIO_ACTIVE_LOW>;
+			input;
+		};
+
+		nfcc-enable {
+			name = "nfcc-enable";
+			gpio = <&tlmm 96 GPIO_ACTIVE_HIGH>;
+			output-low;
+		};
+
+		nfcc-irq {
+			name = "nfcc-irq";
+			gpio = <&tlmm 97 GPIO_ACTIVE_HIGH>;
+			input;
+		};
+	};
+};
+
+&vendor {
+	bluetooth: bt_wcn3990 {
+		compatible = "qca,wcn3990";
+		qca,bt-vdd-io-supply = <&pm8998_s3>;
+		qca,bt-vdd-xtal-supply = <&pm8998_s5>;
+		qca,bt-vdd-core-supply = <&pm8998_l7>;
+		qca,bt-vdd-pa-supply = <&pm8998_l17>;
+		qca,bt-vdd-ldo-supply = <&pm8998_l25>;
+		//qca,bt-chip-pwd-supply = <&pmi8998_bob_pin1>;
+		clocks = <&clock_gcc clk_rf_clk2_pin>;
+		clock-names = "rf_clk2";
+
+		qca,bt-vdd-io-voltage-level = <1352000 1352000>;
+		qca,bt-vdd-xtal-voltage-level = <2040000 2040000>;
+		qca,bt-vdd-core-voltage-level = <1800000 1800000>;
+		qca,bt-vdd-pa-voltage-level = <1304000 1304000>;
+		qca,bt-vdd-ldo-voltage-level = <3312000 3312000>;
+		qca,bt-chip-pwd-voltage-level = <3600000 3600000>;
+
+		qca,bt-vdd-io-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-xtal-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-core-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-pa-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-ldo-current-level = <1>; /* LPM/PFM */
+	};
+};
+
+&blsp1_uart3_hs {
+	status = "ok";
+};
+
+&uartblsp2dm1 {
+	status = "ok";
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart_console_active>;
+};
+
+&ufsphy1 {
+	vdda-phy-supply = <&pm8998_l1>;
+	vdda-pll-supply = <&pm8998_l2>;
+	vddp-ref-clk-supply = <&pm8998_l26>;
+	vdda-phy-max-microamp = <51400>;
+	vdda-pll-max-microamp = <14600>;
+	vddp-ref-clk-max-microamp = <100>;
+	vddp-ref-clk-always-on;
+	status = "ok";
+};
+
+&ufs1 {
+	vdd-hba-supply = <&gdsc_ufs>;
+	vdd-hba-fixed-regulator;
+	vcc-supply = <&pm8998_l20>;
+	vccq-supply = <&pm8998_l26>;
+	vccq2-supply = <&pm8998_s4>;
+	vcc-max-microamp = <750000>;
+	vccq-max-microamp = <560000>;
+	vccq2-max-microamp = <750000>;
+	status = "ok";
+};
+
+&ufs_ice {
+	status = "ok";
+};
+
+&i2c_5 {
+	status = "okay";
+	// si2168@64
+};
+
+&i2c_4 {
+	status = "okay";
+
+	qcom,clk-freq-out = <100000>;
+
+	lm75@48 {
+		compatible = "national,lm75";
+		reg = <0x48>;
+	};
+
+	tas5766@4c {
+		compatible = "ti,tas5766";
+		reg = <0x4c>;
+	};
+
+	tas5766@4d {
+		compatible = "ti,tas5766";
+		reg = <0x4d>;
+	};
+
+	tas5766@4e {
+		compatible = "ti,tas5766";
+		reg = <0x4e>;
+	};
+};
+
+&i2c_6 {
+	status = "okay";
+};
+
+&i2c_7 {
+	status = "okay";
+
+	wm8804@3b {
+		compatible = "wlf,wm8804";
+		reg = <0x3b>;
+		wlf,reset-gpio = <&tlmm 100 GPIO_ACTIVE_HIGH>;
+	};
+
+	sil9437@31 {
+		compatible = "sil,sil9437";
+		reg = <0x31>;
+		sil,reset-gpio = <&tlmm 12 GPIO_ACTIVE_LOW>;
+		sil,irq-gpio = <&tlmm 118 GPIO_ACTIVE_LOW>;
+		sil,irq-open-drain;
+		sil,irq-active-low;
+	};
+
+	/delete-node/ qcom,smb138x@8;
+};
+
+&spi_9 {
+	status = "okay";
+	switch@0 {
+		compatible = "realtek,rtl8367c-spi";
+		reg = <0>;
+		reset-gpios = <&tlmm 30 GPIO_ACTIVE_LOW>;
+		spi-max-frequency = <5000000>;
+	};
+};
+
+&spi_10 {
+	status = "disabled";
+};
+
+&tlmm {
+	cdc_reset_ctrl_batfish {
+		cdc_reset_batfish_sleep: cdc_reset_batfish_sleep {
+			mux {
+				pins = "gpio25";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio25";
+				drive-strength = <16>;
+				bias-disable;
+				output-low;
+			};
+		};
+		cdc_reset_batfish_active:cdc_reset_batfish_active {
+			mux {
+				pins = "gpio25";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio25";
+				drive-strength = <16>;
+				bias-pull-down;
+				output-high;
+			};
+		};
+	};
+
+	nfc {
+		nfc_enable: nfc_enable {
+			mux {
+				pins = "gpio96";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio96";
+				drive-strength = <2>;
+				bias-pull-down;
+			};
+		};
+
+		nfc_int: nfc_int {
+			mux {
+				pins = "gpio97";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio97";
+				drive-strength = <2>;
+				bias-pull-down;
+			};
+		};
+	};
+
+	hdmi_fault: hdmi_fault {
+		mux {
+			pins = "gpio133";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio133";
+			drive-strength = <2>;
+			bias-pull-up;
+		};
+	};
+
+	ir_int: ir_int {
+		mux {
+			pins = "gpio120";
+			function = "gpio";
+		};
+		config {
+			pins = "gpio120";
+			drive-strength = <6>;
+			bias-pull-up;
+		};
+	};
+};
+
+&usb3 {
+	/delete-property/ extcon;
+	dwc3@a800000 {
+		dr_mode = "host";
+	};
+};
+
+&sde_hdmi {
+	qcom,display-type = "primary";
+};
+
+&sde_hdmi_tx {
+	/* disable 5V HPD pin */
+	/delete-property/ qcom,hdmi-tx-hpd5v-gpio;
+	pinctrl-0 = <&mdss_hdmi_hpd_active
+		&mdss_hdmi_ddc_active>;
+	pinctrl-1 = <&mdss_hdmi_hpd_suspend
+		&mdss_hdmi_ddc_suspend>;
+};
+
+&spmi_bus {
+	qcom,pmi8998@2 {
+		status = "disabled";
+	};
+	qcom,pmi8998@3 {
+		status = "disabled";
+	};
+};
+
+&pmi8998_wled {
+	status = "disabled";
+};
+
+&pm8998_gpios {
+	gpio@cc00 { /* GPIO 13 */
+		qcom,mode = <1>;
+		qcom,output-type = <0>;
+		qcom,pull = <5>;
+		qcom,vin-sel = <0>;
+		qcom,out-strength = <1>;
+		qcom,src-sel = <3>;
+		qcom,master-en = <1>;
+		status = "okay";
+	};
+};
+
+&pm8998_vadc {
+	chan@83 {
+		label = "vph_pwr";
+		reg = <0x83>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@85 {
+		label = "vcoin";
+		reg = <0x85>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@4c {
+		label = "xo_therm";
+		reg = <0x4c>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <4>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@4d {
+		label = "msm_therm";
+		reg = <0x4d>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@51 {
+		label = "quiet_therm";
+		reg = <0x51>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+};
+
+&pm8998_adc_tm {
+	chan@83 {
+		label = "vph_pwr";
+		reg = <0x83>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,btm-channel-number = <0x60>;
+	};
+
+	chan@4d {
+		label = "msm_therm";
+		reg = <0x4d>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x68>;
+		qcom,thermal-node;
+	};
+
+	chan@51 {
+		label = "quiet_therm";
+		reg = <0x51>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x70>;
+		qcom,thermal-node;
+	};
+
+	chan@4c {
+		label = "xo_therm";
+		reg = <0x4c>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <4>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x78>;
+		qcom,thermal-node;
+	};
+};
+
+&soc {
+	qcom,msm-dai-tdm-sec-rx {
+		qcom,msm-cpudai-tdm-group-id = <37136>;
+		qcom,msm-cpudai-tdm-group-num-ports = <1>;
+		qcom,msm-cpudai-tdm-group-port-id = <36880>;
+		qcom,msm-cpudai-tdm-clk-rate = <12288000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <1>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <1>;
+		qcom,msm-cpudai-tdm-invert-sync = <0>;
+		qcom,msm-cpudai-tdm-data-delay = <0>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&sec_aux_pcm_active
+				&sec_aux_pcm_dout_active>;
+		pinctrl-1 = <&sec_aux_pcm_sleep
+				&sec_aux_pcm_dout_sleep>;
+	};
+
+	qcom,msm-dai-mi2s {
+		dai_mi2s0: qcom,msm-dai-q6-mi2s-prim {
+			qcom,msm-mi2s-rx-lines = <1>;
+			qcom,msm-mi2s-tx-lines = <0>;
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&pri_mi2s_sck_active
+					&pri_mi2s_ws_active
+					&pri_mi2s_sd0_active>;
+			pinctrl-1 = <&pri_mi2s_sck_sleep
+					&pri_mi2s_ws_sleep
+					&pri_mi2s_sd0_sleep>;
+		};
+
+		dai_mi2s2: qcom,msm-dai-q6-mi2s-tert {
+			qcom,msm-mi2s-rx-lines = <0>;
+			qcom,msm-mi2s-tx-lines = <1>;
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&tert_mi2s_active
+					&tert_mi2s_sd0_active>;
+			pinctrl-1 = <&tert_mi2s_sleep
+					&tert_mi2s_sd0_sleep>;
+		};
+
+		dai_mi2s3: qcom,msm-dai-q6-mi2s-quat {
+			qcom,msm-mi2s-rx-lines = <0>;
+			qcom,msm-mi2s-tx-lines = <15>;
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&quat_mi2s_active
+					&quat_mi2s_sd0_active
+					&quat_mi2s_sd1_active
+					&quat_mi2s_sd2_active
+					&quat_mi2s_sd3_active>;
+			pinctrl-1 = <&quat_mi2s_sleep
+					&quat_mi2s_sd0_sleep
+					&quat_mi2s_sd1_sleep
+					&quat_mi2s_sd2_sleep
+					&quat_mi2s_sd3_sleep>;
+		};
+	};
+
+	sound-freebox {
+		fbx,spdif-wm8804;
+		fbx,arc-sil9437;
+	};
+};
+
+&soc {
+	qcom,bcl {
+		/delete-property/ qcom,bcl-enable;
+	};
+};
+
+&pcie0 {
+	qcom,boot-option = <0>;
+};
+
+&msm_ath10k_wlan {
+	status = "ok";
+};
+
+&tspp {
+	pinctrl-names = "disabled", "tsif0-mode1", "tsif0-mode2";
+
+	tsin0: port@0 {
+		tsin-num = <0>;
+		i2c-bus = <&i2c_5>;
+		reset-gpios = <&tlmm 84 GPIO_ACTIVE_LOW>;
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/apq8098-v2.1.dtsi	2019-01-22 16:16:21.171225289 +0100
@@ -0,0 +1,28 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8998-v2.1.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. APQ 8098 V2.1";
+	qcom,msm-id = <319 0x20001>;
+};
+
+&soc {
+	qcom,rmnet-ipa {
+		status = "disabled";
+	};
+};
+
+&ipa_hw {
+	status = "disabled";
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/apq8098-v2.1-mediabox.dts	2019-10-29 09:26:22.905195956 +0100
@@ -0,0 +1,222 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "apq8098-v2.1.dtsi"
+#include "msm8998-cdp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. APQ 8098 V2.1 mediabox";
+	compatible = "qcom,apq8098-cdp", "qcom,apq8098", "qcom,cdp";
+	qcom,board-id = <8 1>;
+};
+
+&spi_10 {
+	status = "disabled";
+};
+
+&pcie0 {
+	/delete-property/ qcom,boot-option;
+};
+
+&msm_ath10k_wlan {
+	status = "ok";
+};
+
+&sde_hdmi {
+	qcom,display-type = "primary";
+};
+
+&slim_aud {
+	tasha_codec {
+		wsa_spkr_sd1: msm_cdc_pinctrll {
+		      compatible = "qcom,msm-cdc-pinctrl";
+		      pinctrl-names = "aud_active", "aud_sleep";
+		      pinctrl-0 = <&spkr_1_sd_active_mediabox>;
+		      pinctrl-1 = <&spkr_1_sd_sleep_mediabox>;
+		};
+
+		wsa_spkr_sd2: msm_cdc_pinctrlr {
+		      compatible = "qcom,msm-cdc-pinctrl";
+		      pinctrl-names = "aud_active", "aud_sleep";
+		      pinctrl-0 = <&spkr_2_sd_active_mediabox>;
+		      pinctrl-1 = <&spkr_2_sd_sleep_mediabox>;
+		};
+	};
+};
+
+&sdhc_2 {
+	vdd-supply = <&pm8998_l21>;
+	qcom,vdd-voltage-level = <2950000 2960000>;
+	qcom,vdd-current-level = <200 800000>;
+
+	vdd-io-supply = <&pm8998_l13>;
+	qcom,vdd-io-voltage-level = <1808000 2960000>;
+	qcom,vdd-io-current-level = <200 22000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on
+			&sdc2_cd_on_mediabox>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off
+			&sdc2_cd_off_mediabox>;
+
+	qcom,clk-rates = <400000 20000000 25000000
+				50000000 100000000 200000000>;
+	qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+
+	cd-gpios = <&tlmm 86 0x1>;
+
+	status = "ok";
+};
+
+&tspp {
+	qcom,lpass-timer-tts = <1>;
+};
+
+&snd_9335 {
+	qcom,msm-mi2s-master = <1>, <1>, <1>, <0>;
+	qcom,msm-mbhc-hphl-swh = <1>;
+};
+
+&wcd_usbc_analog_en1_gpio {
+	status = "disabled";
+};
+
+&wcd_usbc_analog_en2n_gpio {
+	status = "disabled";
+};
+
+&pcie0 {
+	qcom,boot-option = <0x0>;
+};
+
+&soc {
+	qcom,msm-dai-mi2s {
+		dai_mi2s3: qcom,msm-dai-q6-mi2s-quat {
+			/* SD0 (1 << 0) | SD1 (1 << 1) | SD2 (1 << 2) */
+			qcom,msm-mi2s-rx-lines = <0>;
+			qcom,msm-mi2s-tx-lines = <15>; /* SD3 (1 << 3) */
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&quat_mi2s_active &quat_mi2s_sd0_active
+					&quat_mi2s_sd1_active
+					&quat_mi2s_sd2_active
+					&quat_mi2s_sd3_active>;
+			pinctrl-1 = <&quat_mi2s_sleep &quat_mi2s_sd0_sleep
+					&quat_mi2s_sd1_sleep
+					&quat_mi2s_sd2_sleep
+					&quat_mi2s_sd3_sleep>;
+		};
+	};
+
+	ir: ir-receiver {
+		compatible = "gpio-ir-receiver";
+		gpios = <&tlmm 120 1>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&ir_int>;
+		linux,rc-map-name = "rc-rc6-freebox";
+	};
+};
+
+&tlmm {
+	ir_int: ir_int {
+		mux {
+			pins = "gpio120";
+			function = "gpio";
+		};
+		config {
+			pins = "gpio120";
+			drive-strength = <6>;
+			bias-pull-up;
+		};
+	};
+
+	spkr_1_sd_mediabox {
+		spkr_1_sd_sleep_mediabox: spkr_1_sd_sleep_mediabox {
+			mux {
+				pins = "gpio85";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio85";
+				drive-strength = <2>;   /* 2 mA */
+				bias-pull-down;
+				input-enable;
+			};
+		};
+		spkr_1_sd_active_mediabox: spkr_1_sd_active_mediabox {
+			mux {
+				pins = "gpio85";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio85";
+				drive-strength = <8>;   /* 8 mA */
+				bias-disable;
+				output-high;
+			};
+		};
+	};
+
+	spkr_2_sd_mediabox_mediabox {
+		spkr_2_sd_sleep_mediabox: spkr_2_sd_sleep_mediabox {
+			mux {
+				pins = "gpio112";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio112";
+				drive-strength = <2>;   /* 2 mA */
+				bias-pull-down;
+				input-enable;
+			};
+		};
+		spkr_2_sd_active_mediabox: spkr_2_sd_active_mediabox {
+			mux {
+				pins = "gpio112";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio112";
+				drive-strength = <8>;   /* 8 mA */
+				bias-disable;
+				output-high;
+			};
+		};
+	};
+
+	sdc2_cd_on_mediabox: sdc2_cd_on_mediabox {
+		mux {
+			pins = "gpio86";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio86";
+			bias-pull-up;           /* pull up */
+			drive-strength = <2>;   /* 2 MA */
+		};
+	};
+
+	sdc2_cd_off_mediabox: sdc2_cd_off_mediabox {
+		mux {
+			pins = "gpio86";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio86";
+			bias-pull-up;           /* pull up */
+			drive-strength = <2>;   /* 2 MA */
+		};
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm8998-audio.dtsi	2019-10-29 09:26:22.917196073 +0100
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&msm_audio_ion {
+	qcom,smmu-version = <2>;
+	iommus = <&lpass_q6_smmu 1>;
+};
+
+&soc {
+	qcom,avtimer@170f7000 {
+		compatible = "qcom,avtimer";
+		reg = <0x170f700c 0x4>,
+		      <0x170f7010 0x4>;
+		reg-names = "avtimer_lsb_addr", "avtimer_msb_addr";
+		qcom,clk-div = <27>;
+	};
+
+	clock_audio: audio_ext_clk {
+		status = "ok";
+		compatible = "qcom,audio-ref-clk";
+		qcom,audio-ref-clk-gpio = <&pm8998_gpios 13 0>;
+		clock-names = "osr_clk";
+		clocks = <&clock_gcc clk_div_clk1>;
+		qcom,node_has_rpm_clock;
+		#clock-cells = <1>;
+		pinctrl-names = "sleep", "active";
+		pinctrl-0 = <&spkr_i2s_clk_sleep>;
+		pinctrl-1 = <&spkr_i2s_clk_active>;
+	};
+
+	clock_audio_lnbb: audio_ext_clk_lnbb {
+		status = "ok";
+		compatible = "qcom,audio-ref-clk";
+		clock-names = "osr_clk";
+		clocks = <&clock_gcc clk_ln_bb_clk2>;
+		qcom,node_has_rpm_clock;
+		#clock-cells = <1>;
+	};
+};
+
+&slim_aud {
+	msm_dai_slim {
+		compatible = "qcom,msm-dai-slim";
+		elemental-addr = [ff ff ff fe 17 02];
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm8998-audio-freebox.dtsi	2019-01-22 16:16:21.191225470 +0100
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2018, Freebox SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	snd_freebox: sound-freebox {
+		compatible = "qcom,msm8998-asoc-snd-freebox";
+		qcom,model = "msm8998-freebox-snd-card";
+		qcom,ext-disp-audio-rx;
+		qcom,mi2s-audio-intf;
+		qcom,msm-mi2s-master = <1>, <1>, <0>, <0>;
+		qcom,msm-mi2s-ext-mclk = <1>, <0>, <0>, <0>;
+
+		reg = <0x1711a000 0x4>,
+		      <0x1711b000 0x4>,
+		      <0x1711c000 0x4>,
+		      <0x1711d000 0x4>;
+		reg-names = "lpaif_pri_mode_muxsel",
+			    "lpaif_sec_mode_muxsel",
+			    "lpaif_tert_mode_muxsel",
+			    "lpaif_quat_mode_muxsel";
+
+		asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
+				<&loopback>, <&compress>, <&hostless>,
+				<&afe>, <&lsm>, <&routing>, <&compr>,
+				<&pcm_noirq>, <&trans_loopback>;
+		asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
+				"msm-pcm-dsp.2", "msm-voip-dsp",
+				"msm-pcm-voice", "msm-pcm-loopback",
+				"msm-compress-dsp", "msm-pcm-hostless",
+				"msm-pcm-afe", "msm-lsm-client",
+				"msm-pcm-routing", "msm-compr-dsp",
+				"msm-pcm-dsp-noirq", "msm-transcode-loopback";
+		asoc-cpu = <&dai_hdmi>, <&dai_dp>,
+				<&dai_mi2s0>, <&dai_mi2s1>,
+				<&dai_mi2s2>, <&dai_mi2s3>,
+				<&dai_pri_auxpcm>, <&dai_sec_auxpcm>,
+				<&dai_tert_auxpcm>, <&dai_quat_auxpcm>,
+				<&sb_0_rx>, <&sb_0_tx>, <&sb_1_rx>, <&sb_1_tx>,
+				<&sb_2_rx>, <&sb_2_tx>, <&sb_3_rx>, <&sb_3_tx>,
+				<&sb_4_rx>, <&sb_4_tx>, <&sb_5_tx>,
+				<&afe_pcm_rx>, <&afe_pcm_tx>, <&afe_proxy_rx>,
+				<&afe_proxy_tx>, <&incall_record_rx>,
+				<&incall_record_tx>, <&incall_music_rx>,
+				<&incall_music_2_rx>, <&sb_5_rx>, <&sb_6_rx>,
+				<&sb_7_rx>, <&sb_7_tx>, <&sb_8_tx>,
+				<&usb_audio_rx>, <&usb_audio_tx>,
+				<&dai_pri_tdm_rx_0>, <&dai_pri_tdm_tx_0>,
+				<&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>,
+				<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>,
+				<&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>;
+		asoc-cpu-names = "msm-dai-q6-hdmi.8", "msm-dai-q6-dp.24608",
+				"msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+				"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
+				"msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2",
+				"msm-dai-q6-auxpcm.3", "msm-dai-q6-auxpcm.4",
+				"msm-dai-q6-dev.16384", "msm-dai-q6-dev.16385",
+				"msm-dai-q6-dev.16386", "msm-dai-q6-dev.16387",
+				"msm-dai-q6-dev.16388", "msm-dai-q6-dev.16389",
+				"msm-dai-q6-dev.16390", "msm-dai-q6-dev.16391",
+				"msm-dai-q6-dev.16392", "msm-dai-q6-dev.16393",
+				"msm-dai-q6-dev.16395", "msm-dai-q6-dev.224",
+				"msm-dai-q6-dev.225", "msm-dai-q6-dev.241",
+				"msm-dai-q6-dev.240", "msm-dai-q6-dev.32771",
+				"msm-dai-q6-dev.32772", "msm-dai-q6-dev.32773",
+				"msm-dai-q6-dev.32770", "msm-dai-q6-dev.16394",
+				"msm-dai-q6-dev.16396", "msm-dai-q6-dev.16398",
+				"msm-dai-q6-dev.16399", "msm-dai-q6-dev.16401",
+				"msm-dai-q6-dev.28672", "msm-dai-q6-dev.28673",
+				"msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36865",
+				"msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36881",
+				"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897",
+				"msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913";
+		asoc-codec = <&stub_codec>, <&ext_disp_audio_codec>;
+		asoc-codec-names = "msm-stub-codec.1",
+				   "msm-ext-disp-audio-codec-rx";
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm8998-audio-wcd.dtsi	2019-01-22 16:16:21.191225470 +0100
@@ -0,0 +1,632 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8998-wsa881x.dtsi"
+
+&spi_10 {
+	status = "ok";
+};
+
+&soc {
+	snd_9335: sound-9335 {
+		compatible = "qcom,msm8998-asoc-snd-tasha";
+		qcom,model = "msm8998-tasha-snd-card";
+		qcom,ext-disp-audio-rx;
+		qcom,wcn-btfm;
+		qcom,mi2s-audio-intf;
+		qcom,auxpcm-audio-intf;
+		qcom,msm-mi2s-master = <1>, <1>, <1>, <1>;
+
+		reg = <0x1711a000 0x4>,
+		      <0x1711b000 0x4>,
+		      <0x1711c000 0x4>,
+		      <0x1711d000 0x4>;
+		reg-names = "lpaif_pri_mode_muxsel",
+			    "lpaif_sec_mode_muxsel",
+			    "lpaif_tert_mode_muxsel",
+			    "lpaif_quat_mode_muxsel";
+
+		qcom,audio-routing =
+			"AIF4 VI", "MCLK",
+			"RX_BIAS", "MCLK",
+			"MADINPUT", "MCLK",
+			"hifi amp", "LINEOUT1",
+			"hifi amp", "LINEOUT2",
+			"AMIC2", "MIC BIAS2",
+			"MIC BIAS2", "Headset Mic",
+			"AMIC3", "MIC BIAS2",
+			"MIC BIAS2", "ANCRight Headset Mic",
+			"AMIC4", "MIC BIAS2",
+			"MIC BIAS2", "ANCLeft Headset Mic",
+			"AMIC5", "MIC BIAS3",
+			"MIC BIAS3", "Handset Mic",
+			"AMIC6", "MIC BIAS4",
+			"MIC BIAS4", "Analog Mic6",
+			"DMIC0", "MIC BIAS1",
+			"MIC BIAS1", "Digital Mic0",
+			"DMIC1", "MIC BIAS1",
+			"MIC BIAS1", "Digital Mic1",
+			"DMIC2", "MIC BIAS3",
+			"MIC BIAS3", "Digital Mic2",
+			"DMIC3", "MIC BIAS3",
+			"MIC BIAS3", "Digital Mic3",
+			"DMIC4", "MIC BIAS4",
+			"MIC BIAS4", "Digital Mic4",
+			"DMIC5", "MIC BIAS4",
+			"MIC BIAS4", "Digital Mic5",
+			"SpkrLeft IN", "SPK1 OUT",
+			"SpkrRight IN", "SPK2 OUT";
+
+		qcom,msm-mbhc-hphl-swh = <0>;
+		qcom,msm-mbhc-gnd-swh = <0>;
+		qcom,us-euro-gpios = <&wcd_us_euro_gpio>;
+		qcom,hph-en0-gpio = <&hph_en0_gpio>;
+		qcom,hph-en1-gpio = <&hph_en1_gpio>;
+		qcom,tasha-mclk-clk-freq = <9600000>;
+		asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
+				<&loopback>, <&compress>, <&hostless>,
+				<&afe>, <&lsm>, <&routing>, <&cpe>, <&compr>,
+				<&pcm_noirq>, <&cpe3>, <&trans_loopback>;
+		asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
+				"msm-pcm-dsp.2", "msm-voip-dsp",
+				"msm-pcm-voice", "msm-pcm-loopback",
+				"msm-compress-dsp", "msm-pcm-hostless",
+				"msm-pcm-afe", "msm-lsm-client",
+				"msm-pcm-routing", "msm-cpe-lsm",
+				"msm-compr-dsp", "msm-pcm-dsp-noirq",
+				"msm-cpe-lsm.3", "msm-transcode-loopback";
+		asoc-cpu = <&dai_hdmi>, <&dai_dp>,
+				<&dai_mi2s0>, <&dai_mi2s1>,
+				<&dai_mi2s2>, <&dai_mi2s3>,
+				<&dai_pri_auxpcm>, <&dai_sec_auxpcm>,
+				<&dai_tert_auxpcm>, <&dai_quat_auxpcm>,
+				<&sb_0_rx>, <&sb_0_tx>, <&sb_1_rx>, <&sb_1_tx>,
+				<&sb_2_rx>, <&sb_2_tx>, <&sb_3_rx>, <&sb_3_tx>,
+				<&sb_4_rx>, <&sb_4_tx>, <&sb_5_tx>,
+				<&afe_pcm_rx>, <&afe_pcm_tx>, <&afe_proxy_rx>,
+				<&afe_proxy_tx>, <&incall_record_rx>,
+				<&incall_record_tx>, <&incall_music_rx>,
+				<&incall_music_2_rx>, <&sb_5_rx>, <&sb_6_rx>,
+				<&sb_7_rx>, <&sb_7_tx>, <&sb_8_tx>,
+				<&usb_audio_rx>, <&usb_audio_tx>,
+				<&dai_pri_tdm_rx_0>, <&dai_pri_tdm_tx_0>,
+				<&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>,
+				<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>,
+				<&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>;
+		asoc-cpu-names = "msm-dai-q6-hdmi.8", "msm-dai-q6-dp.24608",
+				"msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+				"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
+				"msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2",
+				"msm-dai-q6-auxpcm.3", "msm-dai-q6-auxpcm.4",
+				"msm-dai-q6-dev.16384", "msm-dai-q6-dev.16385",
+				"msm-dai-q6-dev.16386", "msm-dai-q6-dev.16387",
+				"msm-dai-q6-dev.16388", "msm-dai-q6-dev.16389",
+				"msm-dai-q6-dev.16390", "msm-dai-q6-dev.16391",
+				"msm-dai-q6-dev.16392", "msm-dai-q6-dev.16393",
+				"msm-dai-q6-dev.16395", "msm-dai-q6-dev.224",
+				"msm-dai-q6-dev.225", "msm-dai-q6-dev.241",
+				"msm-dai-q6-dev.240", "msm-dai-q6-dev.32771",
+				"msm-dai-q6-dev.32772", "msm-dai-q6-dev.32773",
+				"msm-dai-q6-dev.32770", "msm-dai-q6-dev.16394",
+				"msm-dai-q6-dev.16396", "msm-dai-q6-dev.16398",
+				"msm-dai-q6-dev.16399", "msm-dai-q6-dev.16401",
+				"msm-dai-q6-dev.28672", "msm-dai-q6-dev.28673",
+				"msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36865",
+				"msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36881",
+				"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897",
+				"msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913";
+		asoc-codec = <&stub_codec>, <&ext_disp_audio_codec>;
+		asoc-codec-names = "msm-stub-codec.1",
+				   "msm-ext-disp-audio-codec-rx";
+		qcom,wsa-max-devs = <2>;
+		qcom,wsa-devs = <&wsa881x_211>, <&wsa881x_212>,
+				<&wsa881x_213>, <&wsa881x_214>;
+		qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight",
+					  "SpkrLeft", "SpkrRight";
+
+		hph_en0_gpio: msm_cdc_pinctrl@67 {
+			compatible = "qcom,msm-cdc-pinctrl";
+			pinctrl-names = "aud_active", "aud_sleep";
+			pinctrl-0 = <&hph_en0_active>;
+			pinctrl-1 = <&hph_en0_idle>;
+		};
+
+		hph_en1_gpio: msm_cdc_pinctrl@68 {
+			compatible = "qcom,msm-cdc-pinctrl";
+			pinctrl-names = "aud_active", "aud_sleep";
+			pinctrl-0 = <&hph_en1_active>;
+			pinctrl-1 = <&hph_en1_idle>;
+		};
+	};
+
+	snd_934x: sound-tavil {
+		compatible = "qcom,msm8998-asoc-snd-tavil";
+		qcom,model = "msm8998-tavil-snd-card";
+		qcom,ext-disp-audio-rx;
+		qcom,wcn-btfm;
+		qcom,mi2s-audio-intf;
+		qcom,auxpcm-audio-intf;
+		qcom,msm-mi2s-master = <1>, <1>, <1>, <1>;
+
+		reg = <0x1711a000 0x4>,
+		      <0x1711b000 0x4>,
+		      <0x1711c000 0x4>,
+		      <0x1711d000 0x4>;
+		reg-names = "lpaif_pri_mode_muxsel",
+			    "lpaif_sec_mode_muxsel",
+			    "lpaif_tert_mode_muxsel",
+			    "lpaif_quat_mode_muxsel";
+
+		qcom,audio-routing =
+			"AIF4 VI", "MCLK",
+			"RX_BIAS", "MCLK",
+			"MADINPUT", "MCLK",
+			"hifi amp", "LINEOUT1",
+			"hifi amp", "LINEOUT2",
+			"AMIC2", "MIC BIAS2",
+			"MIC BIAS2", "Headset Mic",
+			"AMIC3", "MIC BIAS2",
+			"MIC BIAS2", "ANCRight Headset Mic",
+			"AMIC4", "MIC BIAS2",
+			"MIC BIAS2", "ANCLeft Headset Mic",
+			"AMIC5", "MIC BIAS3",
+			"MIC BIAS3", "Handset Mic",
+			"DMIC0", "MIC BIAS1",
+			"MIC BIAS1", "Digital Mic0",
+			"DMIC1", "MIC BIAS1",
+			"MIC BIAS1", "Digital Mic1",
+			"DMIC2", "MIC BIAS3",
+			"MIC BIAS3", "Digital Mic2",
+			"DMIC3", "MIC BIAS3",
+			"MIC BIAS3", "Digital Mic3",
+			"DMIC4", "MIC BIAS4",
+			"MIC BIAS4", "Digital Mic4",
+			"DMIC5", "MIC BIAS4",
+			"MIC BIAS4", "Digital Mic5",
+			"SpkrLeft IN", "SPK1 OUT",
+			"SpkrRight IN", "SPK2 OUT";
+
+		qcom,msm-mbhc-hphl-swh = <0>;
+		qcom,msm-mbhc-gnd-swh = <0>;
+		qcom,us-euro-gpios = <&tavil_us_euro_sw>;
+		qcom,hph-en0-gpio = <&tavil_hph_en0>;
+		qcom,hph-en1-gpio = <&tavil_hph_en1>;
+		qcom,tavil-mclk-clk-freq = <9600000>;
+
+		qcom,usbc-analog-en1_gpio = <&wcd_usbc_analog_en1_gpio>;
+		qcom,usbc-analog-en2_n_gpio = <&wcd_usbc_analog_en2n_gpio>;
+
+		asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
+				<&loopback>, <&compress>, <&hostless>,
+				<&afe>, <&lsm>, <&routing>, <&cpe>, <&compr>,
+				<&pcm_noirq>, <&trans_loopback>;
+		asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
+				"msm-pcm-dsp.2", "msm-voip-dsp",
+				"msm-pcm-voice", "msm-pcm-loopback",
+				"msm-compress-dsp", "msm-pcm-hostless",
+				"msm-pcm-afe", "msm-lsm-client",
+				"msm-pcm-routing", "msm-cpe-lsm",
+				"msm-compr-dsp", "msm-pcm-dsp-noirq",
+				"msm-transcode-loopback";
+		asoc-cpu = <&dai_hdmi>, <&dai_dp>,
+				<&dai_mi2s0>, <&dai_mi2s1>,
+				<&dai_mi2s2>, <&dai_mi2s3>,
+				<&dai_pri_auxpcm>, <&dai_sec_auxpcm>,
+				<&dai_tert_auxpcm>, <&dai_quat_auxpcm>,
+				<&sb_0_rx>, <&sb_0_tx>, <&sb_1_rx>, <&sb_1_tx>,
+				<&sb_2_rx>, <&sb_2_tx>, <&sb_3_rx>, <&sb_3_tx>,
+				<&sb_4_rx>, <&sb_4_tx>, <&sb_5_tx>,
+				<&afe_pcm_rx>, <&afe_pcm_tx>, <&afe_proxy_rx>,
+				<&afe_proxy_tx>, <&incall_record_rx>,
+				<&incall_record_tx>, <&incall_music_rx>,
+				<&incall_music_2_rx>, <&sb_5_rx>, <&sb_6_rx>,
+				<&sb_7_rx>, <&sb_7_tx>, <&sb_8_tx>,
+				<&usb_audio_rx>, <&usb_audio_tx>,
+				<&dai_pri_tdm_rx_0>, <&dai_pri_tdm_tx_0>,
+				<&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>,
+				<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>,
+				<&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>;
+		asoc-cpu-names = "msm-dai-q6-hdmi.8",  "msm-dai-q6-dp.24608",
+				"msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+				"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
+				"msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2",
+				"msm-dai-q6-auxpcm.3", "msm-dai-q6-auxpcm.4",
+				"msm-dai-q6-dev.16384", "msm-dai-q6-dev.16385",
+				"msm-dai-q6-dev.16386", "msm-dai-q6-dev.16387",
+				"msm-dai-q6-dev.16388", "msm-dai-q6-dev.16389",
+				"msm-dai-q6-dev.16390", "msm-dai-q6-dev.16391",
+				"msm-dai-q6-dev.16392", "msm-dai-q6-dev.16393",
+				"msm-dai-q6-dev.16395", "msm-dai-q6-dev.224",
+				"msm-dai-q6-dev.225", "msm-dai-q6-dev.241",
+				"msm-dai-q6-dev.240", "msm-dai-q6-dev.32771",
+				"msm-dai-q6-dev.32772", "msm-dai-q6-dev.32773",
+				"msm-dai-q6-dev.32770", "msm-dai-q6-dev.16394",
+				"msm-dai-q6-dev.16396", "msm-dai-q6-dev.16398",
+				"msm-dai-q6-dev.16399", "msm-dai-q6-dev.16401",
+				"msm-dai-q6-dev.28672", "msm-dai-q6-dev.28673",
+				"msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36865",
+				"msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36881",
+				"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897",
+				"msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913";
+		asoc-codec = <&stub_codec>, <&ext_disp_audio_codec>;
+		asoc-codec-names = "msm-stub-codec.1",
+				   "msm-ext-disp-audio-codec-rx";
+		qcom,wsa-max-devs = <2>;
+		qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0212>,
+				<&wsa881x_0213>, <&wsa881x_0214>;
+		qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight",
+					  "SpkrLeft", "SpkrRight";
+	};
+
+	cpe: qcom,msm-cpe-lsm {
+		compatible = "qcom,msm-cpe-lsm";
+		qcom,msm-cpe-lsm-id = <1>;
+	};
+
+	cpe3: qcom,msm-cpe-lsm@3 {
+		compatible = "qcom,msm-cpe-lsm";
+		qcom,msm-cpe-lsm-id = <3>;
+	};
+
+	qcom,wcd-dsp-mgr {
+		compatible = "qcom,wcd-dsp-mgr";
+		qcom,wdsp-components = <&wcd934x_cdc 0>,
+				       <&wcd_spi_0 1>,
+				       <&glink_spi_xprt_wdsp 2>;
+		qcom,img-filename = "cpe_9340";
+	};
+
+	wcd_us_euro_gpio: msm_cdc_pinctrl@75 {
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&wcd_gnd_mic_swap_active>;
+		pinctrl-1 = <&wcd_gnd_mic_swap_idle>;
+	};
+
+	wcd_usbc_analog_en1_gpio: msm_cdc_pinctrl@59 {
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&wcd_usbc_analog_en1_active>;
+		pinctrl-1 = <&wcd_usbc_analog_en1_idle>;
+	};
+
+	wcd_usbc_analog_en2n_gpio: msm_cdc_pinctrl@60 {
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&wcd_usbc_analog_en2n_active>;
+		pinctrl-1 = <&wcd_usbc_analog_en2n_idle>;
+	};
+
+	wcd9xxx_intc: wcd9xxx-irq {
+		status = "ok";
+		compatible = "qcom,wcd9xxx-irq";
+		interrupt-controller;
+		#interrupt-cells = <1>;
+		interrupt-parent = <&tlmm>;
+		qcom,gpio-connect = <&tlmm 54 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&wcd_intr_default>;
+	};
+
+	wcd_rst_gpio: msm_cdc_pinctrl@64 {
+		compatible = "qcom,msm-cdc-pinctrl";
+		qcom,cdc-rst-n-gpio = <&tlmm 64 0>;
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&cdc_reset_active>;
+		pinctrl-1 = <&cdc_reset_sleep>;
+	};
+
+	qocm,wcd-dsp-glink {
+		compatible = "qcom,wcd-dsp-glink";
+	};
+};
+
+&slim_aud {
+	tasha_codec {
+		compatible = "qcom,tasha-slim-pgd";
+		elemental-addr = [00 01 A0 01 17 02];
+
+		interrupt-parent = <&wcd9xxx_intc>;
+		interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
+			      17 18 19 20 21 22 23 24 25 26 27 28 29
+			      30>;
+
+		qcom,wcd-rst-gpio-node = <&wcd_rst_gpio>;
+
+		clock-names = "wcd_clk", "wcd_native_clk";
+		clocks = <&clock_audio clk_audio_pmi_clk>,
+			 <&clock_audio clk_audio_ap_clk2>;
+
+		cdc-vdd-buck-supply = <&pm8998_s4>;
+		qcom,cdc-vdd-buck-voltage = <1800000 1800000>;
+		qcom,cdc-vdd-buck-current = <650000>;
+
+		cdc-buck-sido-supply = <&pm8998_s4>;
+		qcom,cdc-buck-sido-voltage = <1800000 1800000>;
+		qcom,cdc-buck-sido-current = <250000>;
+
+		cdc-vdd-tx-h-supply = <&pm8998_s4>;
+		qcom,cdc-vdd-tx-h-voltage = <1800000 1800000>;
+		qcom,cdc-vdd-tx-h-current = <25000>;
+
+		cdc-vdd-rx-h-supply = <&pm8998_s4>;
+		qcom,cdc-vdd-rx-h-voltage = <1800000 1800000>;
+		qcom,cdc-vdd-rx-h-current = <25000>;
+
+		cdc-vddpx-1-supply = <&pm8998_s4>;
+		qcom,cdc-vddpx-1-voltage = <1800000 1800000>;
+		qcom,cdc-vddpx-1-current = <10000>;
+
+		qcom,cdc-static-supplies = "cdc-vdd-buck",
+					   "cdc-buck-sido",
+					   "cdc-vdd-tx-h",
+					   "cdc-vdd-rx-h",
+					   "cdc-vddpx-1";
+
+		qcom,cdc-micbias1-mv = <1800>;
+		qcom,cdc-micbias2-mv = <1800>;
+		qcom,cdc-micbias3-mv = <1800>;
+		qcom,cdc-micbias4-mv = <1800>;
+
+		qcom,cdc-mclk-clk-rate = <9600000>;
+		qcom,cdc-slim-ifd = "tasha-slim-ifd";
+		qcom,cdc-slim-ifd-elemental-addr = [00 00 A0 01 17 02];
+		qcom,cdc-dmic-sample-rate = <4800000>;
+		qcom,cdc-mad-dmic-rate = <600000>;
+		qcom,cdc-ecpp-dmic-rate = <1200000>;
+	};
+
+	wcd934x_cdc: tavil_codec {
+		compatible = "qcom,tavil-slim-pgd";
+		elemental-addr = [00 01 50 02 17 02];
+
+		interrupt-parent = <&wcd9xxx_intc>;
+		interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
+			      17 18 19 20 21 22 23 24 25 26 27 28 29
+			      30 31>;
+
+		qcom,wcd-rst-gpio-node = <&wcd_rst_gpio>;
+
+		clock-names = "wcd_clk";
+		clocks = <&clock_audio_lnbb clk_audio_pmi_lnbb_clk>;
+
+		cdc-vdd-buck-supply = <&pm8998_s4>;
+		qcom,cdc-vdd-buck-voltage = <1800000 1800000>;
+		qcom,cdc-vdd-buck-current = <650000>;
+
+		cdc-buck-sido-supply = <&pm8998_s4>;
+		qcom,cdc-buck-sido-voltage = <1800000 1800000>;
+		qcom,cdc-buck-sido-current = <250000>;
+
+		cdc-vdd-tx-h-supply = <&pm8998_s4>;
+		qcom,cdc-vdd-tx-h-voltage = <1800000 1800000>;
+		qcom,cdc-vdd-tx-h-current = <25000>;
+
+		cdc-vdd-rx-h-supply = <&pm8998_s4>;
+		qcom,cdc-vdd-rx-h-voltage = <1800000 1800000>;
+		qcom,cdc-vdd-rx-h-current = <25000>;
+
+		cdc-vddpx-1-supply = <&pm8998_s4>;
+		qcom,cdc-vddpx-1-voltage = <1800000 1800000>;
+		qcom,cdc-vddpx-1-current = <10000>;
+
+		qcom,cdc-static-supplies = "cdc-vdd-buck",
+					   "cdc-buck-sido",
+					   "cdc-vdd-tx-h",
+					   "cdc-vdd-rx-h",
+					   "cdc-vddpx-1";
+
+		qcom,cdc-micbias1-mv = <1800>;
+		qcom,cdc-micbias2-mv = <1800>;
+		qcom,cdc-micbias3-mv = <1800>;
+		qcom,cdc-micbias4-mv = <1800>;
+
+		qcom,cdc-mclk-clk-rate = <9600000>;
+		qcom,cdc-slim-ifd = "tavil-slim-ifd";
+		qcom,cdc-slim-ifd-elemental-addr = [00 00 50 02 17 02];
+		qcom,cdc-dmic-sample-rate = <4800000>;
+		qcom,cdc-mad-dmic-rate = <600000>;
+
+		qcom,wdsp-cmpnt-dev-name = "tavil_codec";
+
+		wcd_spi_0: wcd_spi {
+			compatible = "qcom,wcd-spi-v2";
+			qcom,master-bus-num = <10>;
+			qcom,chip-select = <0>;
+			qcom,max-frequency = <24000000>;
+			qcom,mem-base-addr = <0x100000>;
+		};
+	};
+};
+
+&tlmm {
+	wcd9xxx_intr {
+		wcd_intr_default: wcd_intr_default{
+			mux {
+				pins = "gpio54";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio54";
+				drive-strength = <2>; /* 2 mA */
+				bias-pull-down; /* pull down */
+				input-enable;
+			};
+		};
+	};
+
+	hph_en0_ctrl {
+		hph_en0_idle: hph_en0_idle {
+			mux {
+				pins = "gpio67";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio67";
+				drive-strength = <2>;
+				bias-pull-down;
+				output-low;
+			};
+		};
+		hph_en0_active: hph_en0_active {
+			mux {
+				pins = "gpio67";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio67";
+				drive-strength = <2>;
+				bias-disable;
+				output-high;
+			};
+		};
+	};
+
+	hph_en1_ctrl {
+		hph_en1_idle: hph_en1_idle {
+			mux {
+				pins = "gpio68";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio68";
+				drive-strength = <2>;
+				bias-pull-down;
+				output-low;
+			};
+		};
+		hph_en1_active: hph_en1_active {
+			mux {
+				pins = "gpio68";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio68";
+				drive-strength = <2>;
+				bias-disable;
+				output-high;
+			};
+		};
+	};
+
+	wcd_gnd_mic_swap {
+		wcd_gnd_mic_swap_idle: wcd_gnd_mic_swap_idle {
+			mux {
+				pins = "gpio75";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio75";
+				drive-strength = <2>;
+				bias-pull-down;
+				output-low;
+			};
+		};
+		wcd_gnd_mic_swap_active: wcd_gnd_mic_swap_active {
+			mux {
+				pins = "gpio75";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio75";
+				drive-strength = <2>;
+				bias-disable;
+				output-high;
+			};
+		};
+	};
+
+	wcd_usbc_analog_en1 {
+		wcd_usbc_analog_en1_idle: wcd_usbc_ana_en1_idle {
+			mux {
+				pins = "gpio59";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio59";
+				drive-strength = <2>;
+				bias-pull-down;
+				output-low;
+			};
+		};
+
+		wcd_usbc_analog_en1_active: wcd_usbc_ana_en1_active {
+			mux {
+				pins = "gpio59";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio59";
+				drive-strength = <2>;
+				bias-disable;
+				output-high;
+			};
+		};
+	};
+
+	wcd_usbc_analog_en2n {
+		wcd_usbc_analog_en2n_idle: wcd_usbc_ana_en2n_idle {
+			mux {
+				pins = "gpio60";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio60";
+				drive-strength = <2>;
+				bias-disable;
+				output-high;
+			};
+		};
+
+		wcd_usbc_analog_en2n_active: wcd_usbc_ana_en2n_active {
+			mux {
+				pins = "gpio60";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio60";
+				drive-strength = <2>;
+				bias-pull-down;
+				output-low;
+			};
+		};
+	};
+
+	cdc_reset_ctrl {
+		cdc_reset_sleep: cdc_reset_sleep {
+			mux {
+				pins = "gpio64";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio64";
+				drive-strength = <16>;
+				bias-disable;
+				output-low;
+			};
+		};
+		cdc_reset_active:cdc_reset_active {
+			mux {
+				pins = "gpio64";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio64";
+				drive-strength = <16>;
+				bias-pull-down;
+				output-high;
+			};
+		};
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm8998-blsp.dtsi	2019-01-22 16:16:21.191225470 +0100
@@ -0,0 +1,863 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+/ {
+	aliases {
+		i2c1 = &i2c_1;
+		i2c2 = &i2c_2;
+		i2c3 = &i2c_3;
+		i2c4 = &i2c_4;
+		i2c5 = &i2c_5;
+		i2c6 = &i2c_6;
+		i2c7 = &i2c_7;
+		i2c8 = &i2c_8;
+		i2c9 = &i2c_9;
+		i2c10 = &i2c_10;
+		i2c11 = &i2c_11;
+		i2c12 = &i2c_12;
+		spi1 = &spi_1;
+		spi2 = &spi_2;
+		spi3 = &spi_3;
+		spi4 = &spi_4;
+		spi5 = &spi_5;
+		spi6 = &spi_6;
+		spi7 = &spi_7;
+		spi8 = &spi_8;
+		spi9 = &spi_9;
+		spi10 = &spi_10;
+		spi11 = &spi_11;
+		spi12 = &spi_12;
+	};
+};
+
+#include "msm8998-pinctrl.dtsi"
+
+&soc {
+	dma_blsp1: qcom,sps-dma@0xc144000 { /* BLSP1 */
+		#dma-cells = <4>;
+		compatible = "qcom,sps-dma";
+		reg = <0xc144000 0x25000>;
+		interrupts = <0 238 0>;
+		qcom,summing-threshold = <0x10>;
+	};
+
+	dma_blsp2: qcom,sps-dma@0xc184000 { /* BLSP2 */
+		#dma-cells = <4>;
+		compatible = "qcom,sps-dma";
+		reg = <0xc184000 0x25000>;
+		interrupts = <0 239 0>;
+		qcom,summing-threshold = <0x10>;
+	};
+
+	i2c_1: i2c@c175000 { /* BLSP1 QUP1 */
+		compatible = "qcom,i2c-msm-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "qup_phys_addr";
+		reg = <0xC175000 0x600>;
+		interrupt-names = "qup_irq";
+		interrupts = <0 95 0>;
+		dmas = <&dma_blsp1 6 64 0x20000020 0x20>,
+			<&dma_blsp1 7 32 0x20000020 0x20>;
+		dma-names = "tx", "rx";
+		qcom,master-id = <86>;
+		qcom,clk-freq-out = <400000>;
+		qcom,clk-freq-in  = <19200000>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp1_qup1_i2c_apps_clk>;
+		pinctrl-names = "i2c_active", "i2c_sleep";
+		pinctrl-0 = <&i2c_1_active>;
+		pinctrl-1 = <&i2c_1_sleep>;
+		status = "disabled";
+	};
+
+	i2c_2: i2c@c176000 { /* BLSP1 QUP2 */
+		compatible = "qcom,i2c-msm-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "qup_phys_addr";
+		reg = <0xC176000 0x600>;
+		interrupt-names = "qup_irq";
+		interrupts = <0 96 0>;
+		dmas = <&dma_blsp1 8 64 0x20000020 0x20>,
+			<&dma_blsp1 9 32 0x20000020 0x20>;
+		dma-names = "tx", "rx";
+		qcom,master-id = <86>;
+		qcom,clk-freq-out = <400000>;
+		qcom,clk-freq-in  = <19200000>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp1_qup2_i2c_apps_clk>;
+		pinctrl-names = "i2c_active", "i2c_sleep";
+		pinctrl-0 = <&i2c_2_active>;
+		pinctrl-1 = <&i2c_2_sleep>;
+		status = "disabled";
+	};
+
+	i2c_3: i2c@c177000 { /* BLSP1 QUP3 */
+		compatible = "qcom,i2c-msm-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "qup_phys_addr";
+		reg = <0xC177000 0x600>;
+		interrupt-names = "qup_irq";
+		interrupts = <0 97 0>;
+		dmas = <&dma_blsp1 10 64 0x20000020 0x20>,
+			<&dma_blsp1 11 32 0x20000020 0x20>;
+		dma-names = "tx", "rx";
+		qcom,master-id = <86>;
+		qcom,clk-freq-out = <400000>;
+		qcom,clk-freq-in  = <19200000>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp1_qup3_i2c_apps_clk>;
+		pinctrl-names = "i2c_active", "i2c_sleep";
+		pinctrl-0 = <&i2c_3_active>;
+		pinctrl-1 = <&i2c_3_sleep>;
+		status = "disabled";
+	};
+
+	i2c_4: i2c@c178000 { /* BLSP1 QUP4 */
+		compatible = "qcom,i2c-msm-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "qup_phys_addr";
+		reg = <0xC178000 0x600>;
+		interrupt-names = "qup_irq";
+		interrupts = <0 98 0>;
+		dmas = <&dma_blsp1 12 64 0x20000020 0x20>,
+			<&dma_blsp1 13 32 0x20000020 0x20>;
+		dma-names = "tx", "rx";
+		qcom,master-id = <86>;
+		qcom,clk-freq-out = <400000>;
+		qcom,clk-freq-in  = <19200000>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp1_qup4_i2c_apps_clk>;
+		pinctrl-names = "i2c_active", "i2c_sleep";
+		pinctrl-0 = <&i2c_4_active>;
+		pinctrl-1 = <&i2c_4_sleep>;
+		status = "disabled";
+	};
+
+	i2c_5: i2c@c179000 { /* BLSP1 QUP5 */
+		compatible = "qcom,i2c-msm-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "qup_phys_addr";
+		reg = <0xC179000 0x600>;
+		interrupt-names = "qup_irq";
+		interrupts = <0 99 0>;
+		dmas = <&dma_blsp1 14 64 0x20000020 0x20>,
+			<&dma_blsp1 15 32 0x20000020 0x20>;
+		dma-names = "tx", "rx";
+		qcom,master-id = <86>;
+		qcom,clk-freq-out = <400000>;
+		qcom,clk-freq-in  = <19200000>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp1_qup5_i2c_apps_clk>;
+		pinctrl-names = "i2c_active", "i2c_sleep";
+		pinctrl-0 = <&i2c_5_active>;
+		pinctrl-1 = <&i2c_5_sleep>;
+		status = "disabled";
+	};
+
+	i2c_6: i2c@c17a000 { /* BLSP1 QUP6 */
+		compatible = "qcom,i2c-msm-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "qup_phys_addr";
+		reg = <0xC17A000 0x600>;
+		interrupt-names = "qup_irq";
+		interrupts = <0 100 0>;
+		dmas = <&dma_blsp1 16 64 0x20000020 0x20>,
+			<&dma_blsp1 17 32 0x20000020 0x20>;
+		dma-names = "tx", "rx";
+		qcom,master-id = <86>;
+		qcom,clk-freq-out = <400000>;
+		qcom,clk-freq-in  = <19200000>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp1_qup6_i2c_apps_clk>;
+		pinctrl-names = "i2c_active", "i2c_sleep";
+		pinctrl-0 = <&i2c_6_active>;
+		pinctrl-1 = <&i2c_6_sleep>;
+		status = "disabled";
+	};
+
+	i2c_7: i2c@c1b5000 { /* BLSP2 QUP1 */
+		compatible = "qcom,i2c-msm-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "qup_phys_addr";
+		reg = <0xC1B5000 0x600>;
+		interrupt-names = "qup_irq";
+		interrupts = <0 101 0>;
+		dmas = <&dma_blsp2 6 64 0x20000020 0x20>,
+			<&dma_blsp2 7 32 0x20000020 0x20>;
+		dma-names = "tx", "rx";
+		qcom,master-id = <84>;
+		qcom,clk-freq-out = <400000>;
+		qcom,clk-freq-in  = <19200000>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp2_qup1_i2c_apps_clk>;
+		pinctrl-names = "i2c_active", "i2c_sleep";
+		pinctrl-0 = <&i2c_7_active>;
+		pinctrl-1 = <&i2c_7_sleep>;
+		status = "disabled";
+	};
+
+	i2c_8: i2c@c1b6000 { /* BLSP2 QUP2 */
+		compatible = "qcom,i2c-msm-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "qup_phys_addr";
+		reg = <0xC1B6000 0x600>;
+		interrupt-names = "qup_irq";
+		interrupts = <0 102 0>;
+		dmas = <&dma_blsp2 8 64 0x20000020 0x20>,
+			<&dma_blsp2 9 32 0x20000020 0x20>;
+		dma-names = "tx", "rx";
+		qcom,master-id = <84>;
+		qcom,clk-freq-out = <400000>;
+		qcom,clk-freq-in  = <19200000>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp2_qup2_i2c_apps_clk>;
+		pinctrl-names = "i2c_active", "i2c_sleep";
+		pinctrl-0 = <&i2c_8_active>;
+		pinctrl-1 = <&i2c_8_sleep>;
+		status = "disabled";
+	};
+
+	i2c_9: i2c@c1b7000 { /* BLSP2 QUP3 */
+		compatible = "qcom,i2c-msm-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "qup_phys_addr";
+		reg = <0xC1B7000 0x600>;
+		interrupt-names = "qup_irq";
+		interrupts = <0 103 0>;
+		dmas = <&dma_blsp2 10 64 0x20000020 0x20>,
+			<&dma_blsp2 11 32 0x20000020 0x20>;
+		dma-names = "tx", "rx";
+		qcom,master-id = <84>;
+		qcom,clk-freq-out = <400000>;
+		qcom,clk-freq-in  = <19200000>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp2_qup3_i2c_apps_clk>;
+		pinctrl-names = "i2c_active", "i2c_sleep";
+		pinctrl-0 = <&i2c_9_active>;
+		pinctrl-1 = <&i2c_9_sleep>;
+		status = "disabled";
+	};
+
+	i2c_10: i2c@c1b8000 { /* BLSP2 QUP4 */
+		compatible = "qcom,i2c-msm-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "qup_phys_addr";
+		reg = <0xC1B8000 0x600>;
+		interrupt-names = "qup_irq";
+		interrupts = <0 104 0>;
+		dmas = <&dma_blsp2 12 64 0x20000020 0x20>,
+			<&dma_blsp2 13 32 0x20000020 0x20>;
+		dma-names = "tx", "rx";
+		qcom,master-id = <84>;
+		qcom,clk-freq-out = <400000>;
+		qcom,clk-freq-in  = <19200000>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp2_qup4_i2c_apps_clk>;
+		pinctrl-names = "i2c_active", "i2c_sleep";
+		pinctrl-0 = <&i2c_10_active>;
+		pinctrl-1 = <&i2c_10_sleep>;
+		status = "disabled";
+	};
+
+	i2c_11: i2c@c1b9000 { /* BLSP2 QUP5 */
+		compatible = "qcom,i2c-msm-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "qup_phys_addr";
+		reg = <0xC1B9000 0x600>;
+		interrupt-names = "qup_irq";
+		interrupts = <0 105 0>;
+		dmas = <&dma_blsp2 14 64 0x20000020 0x20>,
+			<&dma_blsp2 15 32 0x20000020 0x20>;
+		dma-names = "tx", "rx";
+		qcom,master-id = <84>;
+		qcom,clk-freq-out = <400000>;
+		qcom,clk-freq-in  = <19200000>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp2_qup5_i2c_apps_clk>;
+		pinctrl-names = "i2c_active", "i2c_sleep";
+		pinctrl-0 = <&i2c_11_active>;
+		pinctrl-1 = <&i2c_11_sleep>;
+		status = "disabled";
+	};
+
+	i2c_12: i2c@c1ba000 { /* BLSP2 QUP6 */
+		compatible = "qcom,i2c-msm-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "qup_phys_addr";
+		reg = <0xC1BA000 0x600>;
+		interrupt-names = "qup_irq";
+		interrupts = <0 106 0>;
+		dmas = <&dma_blsp2 16 64 0x20000020 0x20>,
+			<&dma_blsp2 17 32 0x20000020 0x20>;
+		dma-names = "tx", "rx";
+		qcom,master-id = <84>;
+		qcom,clk-freq-out = <400000>;
+		qcom,clk-freq-in  = <19200000>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp2_qup6_i2c_apps_clk>;
+		pinctrl-names = "i2c_active", "i2c_sleep";
+		pinctrl-0 = <&i2c_12_active>;
+		pinctrl-1 = <&i2c_12_sleep>;
+		status = "disabled";
+	};
+
+	spi_1: spi@c175000 { /* BLSP1 QUP1 */
+		compatible = "qcom,spi-qup-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xC175000 0x600>,
+		      <0xC144000 0x25000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 95 0>, <0 238 0>;
+		spi-max-frequency = <50000000>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <6>;
+		qcom,bam-producer-pipe-index = <7>;
+		qcom,master-id = <86>;
+		qcom,use-pinctrl;
+		pinctrl-names = "spi_default", "spi_sleep";
+		pinctrl-0 = <&spi_1_active>;
+		pinctrl-1 = <&spi_1_sleep>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp1_qup1_spi_apps_clk>;
+		status = "disabled";
+	};
+
+	spi_2: spi@c176000 { /* BLSP1 QUP2 */
+		compatible = "qcom,spi-qup-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xC176000 0x600>,
+		      <0xC144000 0x25000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 96 0>, <0 238 0>;
+		spi-max-frequency = <50000000>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <8>;
+		qcom,bam-producer-pipe-index = <9>;
+		qcom,master-id = <86>;
+		qcom,use-pinctrl;
+		pinctrl-names = "spi_default", "spi_sleep";
+		pinctrl-0 = <&spi_2_active>;
+		pinctrl-1 = <&spi_2_sleep>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp1_qup2_spi_apps_clk>;
+		status = "disabled";
+	};
+
+	spi_3: spi@c177000 { /* BLSP1 QUP3 */
+		compatible = "qcom,spi-qup-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xC177000 0x600>,
+		      <0xC144000 0x25000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 97 0>, <0 238 0>;
+		spi-max-frequency = <50000000>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <10>;
+		qcom,bam-producer-pipe-index = <11>;
+		qcom,master-id = <86>;
+		qcom,use-pinctrl;
+		pinctrl-names = "spi_default", "spi_sleep";
+		pinctrl-0 = <&spi_3_active>;
+		pinctrl-1 = <&spi_3_sleep>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp1_qup3_spi_apps_clk>;
+		status = "disabled";
+	};
+
+	spi_4: spi@c178000 { /* BLSP1 QUP4 */
+		compatible = "qcom,spi-qup-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xC178000 0x600>,
+		      <0xC144000 0x25000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 98 0>, <0 238 0>;
+		spi-max-frequency = <50000000>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <12>;
+		qcom,bam-producer-pipe-index = <13>;
+		qcom,master-id = <86>;
+		qcom,use-pinctrl;
+		pinctrl-names = "spi_default", "spi_sleep";
+		pinctrl-0 = <&spi_4_active>;
+		pinctrl-1 = <&spi_4_sleep>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp1_qup4_spi_apps_clk>;
+		status = "disabled";
+	};
+
+	spi_5: spi@c179000 { /* BLSP1 QUP5 */
+		compatible = "qcom,spi-qup-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xC179000 0x600>,
+		      <0xC144000 0x25000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 99 0>, <0 238 0>;
+		spi-max-frequency = <50000000>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <14>;
+		qcom,bam-producer-pipe-index = <15>;
+		qcom,master-id = <86>;
+		qcom,use-pinctrl;
+		pinctrl-names = "spi_default", "spi_sleep";
+		pinctrl-0 = <&spi_5_active>;
+		pinctrl-1 = <&spi_5_sleep>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp1_qup5_spi_apps_clk>;
+		status = "disabled";
+	};
+
+	spi_6: spi@c17a000 { /* BLSP1 QUP6 */
+		compatible = "qcom,spi-qup-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xC17A000 0x600>,
+		      <0xC144000 0x25000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 100 0>, <0 238 0>;
+		spi-max-frequency = <50000000>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <16>;
+		qcom,bam-producer-pipe-index = <17>;
+		qcom,master-id = <86>;
+		qcom,use-pinctrl;
+		pinctrl-names = "spi_default", "spi_sleep";
+		pinctrl-0 = <&spi_6_active>;
+		pinctrl-1 = <&spi_6_sleep>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp1_qup6_spi_apps_clk>;
+		status = "disabled";
+	};
+
+
+	spi_7: spi@c1b5000 { /* BLSP2 QUP1 */
+		compatible = "qcom,spi-qup-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xC1B5000 0x600>,
+		      <0xC184000 0x25000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 101 0>, <0 239 0>;
+		spi-max-frequency = <50000000>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <6>;
+		qcom,bam-producer-pipe-index = <7>;
+		qcom,master-id = <84>;
+		qcom,use-pinctrl;
+		pinctrl-names = "spi_default", "spi_sleep";
+		pinctrl-0 = <&spi_7_active>;
+		pinctrl-1 = <&spi_7_sleep>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp2_qup1_spi_apps_clk>;
+		status = "disabled";
+	};
+
+	spi_8: spi@c1b6000 { /* BLSP2 QUP2 */
+		compatible = "qcom,spi-qup-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xC1B6000 0x600>,
+		      <0xC184000 0x25000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 102 0>, <0 239 0>;
+		spi-max-frequency = <50000000>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <8>;
+		qcom,bam-producer-pipe-index = <9>;
+		qcom,master-id = <84>;
+		qcom,use-pinctrl;
+		pinctrl-names = "spi_default", "spi_sleep";
+		pinctrl-0 = <&spi_8_active>;
+		pinctrl-1 = <&spi_8_sleep>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp2_qup2_spi_apps_clk>;
+		status = "disabled";
+	};
+
+	spi_9: spi@c1b7000 { /* BLSP2 QUP3 */
+		compatible = "qcom,spi-qup-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xC1B7000 0x600>,
+		      <0xC184000 0x25000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 103 0>, <0 239 0>;
+		spi-max-frequency = <50000000>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <10>;
+		qcom,bam-producer-pipe-index = <11>;
+		qcom,master-id = <84>;
+		qcom,use-pinctrl;
+		pinctrl-names = "spi_default", "spi_sleep";
+		pinctrl-0 = <&spi_9_active>;
+		pinctrl-1 = <&spi_9_sleep>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp2_qup3_spi_apps_clk>;
+		status = "disabled";
+	};
+
+	spi_10: spi@c1b8000 { /* BLSP2 QUP4 */
+		compatible = "qcom,spi-qup-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xC1B8000 0x600>,
+		      <0xC184000 0x25000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 104 0>, <0 239 0>;
+		spi-max-frequency = <50000000>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <12>;
+		qcom,bam-producer-pipe-index = <13>;
+		qcom,master-id = <84>;
+		qcom,use-pinctrl;
+		pinctrl-names = "spi_default", "spi_sleep";
+		pinctrl-0 = <&spi_10_active>;
+		pinctrl-1 = <&spi_10_sleep>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp2_qup4_spi_apps_clk>;
+		status = "disabled";
+	};
+
+	spi_11: spi@c1b9000 { /* BLSP2 QUP5 */
+		compatible = "qcom,spi-qup-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xC1B9000 0x600>,
+		      <0xC184000 0x25000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 105 0>, <0 239 0>;
+		spi-max-frequency = <50000000>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+
+		qcom,bam-consumer-pipe-index = <14>;
+		qcom,bam-producer-pipe-index = <15>;
+		qcom,master-id = <84>;
+		qcom,use-pinctrl;
+		pinctrl-names = "spi_default", "spi_sleep";
+		pinctrl-0 = <&spi_11_active>;
+		pinctrl-1 = <&spi_11_sleep>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp2_qup5_spi_apps_clk>;
+		status = "disabled";
+	};
+
+	spi_12: spi@c1ba000 { /* BLSP2 QUP6 */
+		compatible = "qcom,spi-qup-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xC1BA000 0x600>,
+		      <0xC184000 0x25000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 106 0>, <0 239 0>;
+		spi-max-frequency = <50000000>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <16>;
+		qcom,bam-producer-pipe-index = <17>;
+		qcom,master-id = <84>;
+		qcom,use-pinctrl;
+		pinctrl-names = "spi_default", "spi_sleep";
+		pinctrl-0 = <&spi_12_active>;
+		pinctrl-1 = <&spi_12_sleep>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
+			 <&clock_gcc clk_gcc_blsp2_qup6_spi_apps_clk>;
+		status = "disabled";
+	};
+
+
+	blsp1_uart1_hs: uart@c16f000 { /* BLSP1 UART1 */
+		compatible = "qcom,msm-hsuart-v14";
+		reg = <0xC16F000 0x200>,
+		    <0xC144000 0x25000>;
+		reg-names = "core_mem", "bam_mem";
+		interrupt-names = "core_irq", "bam_irq", "wakeup_irq";
+		#address-cells = <0>;
+		interrupt-parent = <&blsp1_uart1_hs>;
+		interrupts = <0 1 2>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0xffffffff>;
+		interrupt-map = <0 &intc 0 0 107 0
+			    1 &intc 0 0 238 0
+			    2 &tlmm 1 0>;
+
+		qcom,inject-rx-on-wakeup;
+		qcom,rx-char-to-inject = <0xFD>;
+
+		qcom,bam-tx-ep-pipe-index = <0>;
+		qcom,bam-rx-ep-pipe-index = <1>;
+		qcom,master-id = <86>;
+		clock-names = "core_clk", "iface_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_uart1_apps_clk>,
+		    <&clock_gcc clk_gcc_blsp1_ahb_clk>;
+		pinctrl-names = "sleep", "default";
+		pinctrl-0 = <&blsp1_uart1_sleep>;
+		pinctrl-1 = <&blsp1_uart1_active>;
+
+		qcom,msm-bus,name = "buart1";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			    <86 512 0 0>,
+			    <86 512 500 800>;
+		status = "disabled";
+	};
+
+	blsp1_uart2_hs: uart@c170000 { /* BLSP1 UART2 */
+		compatible = "qcom,msm-hsuart-v14";
+		reg = <0xC170000 0x200>,
+		    <0xC144000 0x25000>;
+		reg-names = "core_mem", "bam_mem";
+		interrupt-names = "core_irq", "bam_irq", "wakeup_irq";
+		#address-cells = <0>;
+		interrupt-parent = <&blsp1_uart2_hs>;
+		interrupts = <0 1 2>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0xffffffff>;
+		interrupt-map = <0 &intc 0 0 108 0
+			    1 &intc 0 0 238 0
+			    2 &tlmm 34 0>;
+
+		qcom,inject-rx-on-wakeup;
+		qcom,rx-char-to-inject = <0xFD>;
+
+		qcom,bam-tx-ep-pipe-index = <2>;
+		qcom,bam-rx-ep-pipe-index = <3>;
+		qcom,master-id = <86>;
+		clock-names = "core_clk", "iface_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_uart2_apps_clk>,
+		    <&clock_gcc clk_gcc_blsp1_ahb_clk>;
+		pinctrl-names = "sleep", "default";
+		pinctrl-0 = <&blsp1_uart2_sleep>;
+		pinctrl-1 = <&blsp1_uart2_active>;
+
+		qcom,msm-bus,name = "buart2";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			    <86 512 0 0>,
+			    <86 512 500 800>;
+		status = "disabled";
+	};
+
+	blsp1_uart3_hs: uart@c171000 { /* BLSP1 UART3 */
+		compatible = "qcom,msm-hsuart-v14";
+		reg = <0xC171000 0x200>,
+		    <0xC144000 0x25000>;
+		reg-names = "core_mem", "bam_mem";
+		interrupt-names = "core_irq", "bam_irq", "wakeup_irq";
+		#address-cells = <0>;
+		interrupt-parent = <&blsp1_uart3_hs>;
+		interrupts = <0 1 2>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0xffffffff>;
+		interrupt-map = <0 &intc 0 0 109 0
+			    1 &intc 0 0 238 0
+			    2 &tlmm 46 0>;
+
+		qcom,inject-rx-on-wakeup;
+		qcom,rx-char-to-inject = <0xFD>;
+
+		qcom,bam-tx-ep-pipe-index = <4>;
+		qcom,bam-rx-ep-pipe-index = <5>;
+		qcom,master-id = <86>;
+		clock-names = "core_clk", "iface_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_uart3_apps_clk>,
+		    <&clock_gcc clk_gcc_blsp1_ahb_clk>;
+		pinctrl-names = "sleep", "default";
+		pinctrl-0 = <&blsp1_uart3_tx_sleep>, <&blsp1_uart3_rxcts_sleep>,
+					<&blsp1_uart3_rfr_sleep>;
+		pinctrl-1 = <&blsp1_uart3_tx_active>,
+			<&blsp1_uart3_rxcts_active>, <&blsp1_uart3_rfr_active>;
+
+		qcom,msm-bus,name = "buart3";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			    <86 512 0 0>,
+			    <86 512 500 800>;
+		status = "disabled";
+	};
+
+	blsp2_uart1_hs: uart@c1af000 { /* BLSP2 UART1 */
+		compatible = "qcom,msm-hsuart-v14";
+		reg = <0xC1AF000 0x200>,
+		    <0xC184000 0x25000>;
+		reg-names = "core_mem", "bam_mem";
+		interrupt-names = "core_irq", "bam_irq", "wakeup_irq";
+		#address-cells = <0>;
+		interrupt-parent = <&blsp2_uart1_hs>;
+		interrupts = <0 1 2>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0xffffffff>;
+		interrupt-map = <0 &intc 0 0 113 0
+			    1 &intc 0 0 239 0
+			    2 &tlmm 54 0>;
+
+		qcom,inject-rx-on-wakeup;
+		qcom,rx-char-to-inject = <0xFD>;
+
+		qcom,bam-tx-ep-pipe-index = <0>;
+		qcom,bam-rx-ep-pipe-index = <1>;
+		qcom,master-id = <84>;
+		clock-names = "core_clk", "iface_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_uart1_apps_clk>,
+		    <&clock_gcc clk_gcc_blsp2_ahb_clk>;
+		pinctrl-names = "sleep", "default";
+		pinctrl-0 = <&blsp2_uart1_sleep>;
+		pinctrl-1 = <&blsp2_uart1_active>;
+
+		qcom,msm-bus,name = "buart1";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			    <86 512 0 0>,
+			    <86 512 500 800>;
+		status = "disabled";
+	};
+
+	blsp2_uart2_hs: uart@c1b0000 { /* BLSP2 UART2 */
+		compatible = "qcom,msm-hsuart-v14";
+		reg = <0xC1B0000 0x200>,
+		    <0xC184000 0x25000>;
+		reg-names = "core_mem", "bam_mem";
+		interrupt-names = "core_irq", "bam_irq", "wakeup_irq";
+		#address-cells = <0>;
+		interrupt-parent = <&blsp2_uart2_hs>;
+		interrupts = <0 1 2>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0xffffffff>;
+		interrupt-map = <0 &intc 0 0 114 0
+			    1 &intc 0 0 239 0
+			    2 &tlmm 5 0>;
+
+		qcom,inject-rx-on-wakeup;
+		qcom,rx-char-to-inject = <0xFD>;
+
+		qcom,bam-tx-ep-pipe-index = <2>;
+		qcom,bam-rx-ep-pipe-index = <3>;
+		qcom,master-id = <84>;
+		clock-names = "core_clk", "iface_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_uart2_apps_clk>,
+		    <&clock_gcc clk_gcc_blsp2_ahb_clk>;
+		pinctrl-names = "sleep", "default";
+		pinctrl-0 = <&blsp2_uart2_sleep>;
+		pinctrl-1 = <&blsp2_uart2_active>;
+
+		qcom,msm-bus,name = "buart2";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			    <86 512 0 0>,
+			    <86 512 500 800>;
+		status = "disabled";
+	};
+
+	blsp2_uart3_hs: uart@c1b1000 { /* BLSP2 UART3 */
+		compatible = "qcom,msm-hsuart-v14";
+		reg = <0xC1B1000 0x200>,
+		    <0xC184000 0x25000>;
+		reg-names = "core_mem", "bam_mem";
+		interrupt-names = "core_irq", "bam_irq", "wakeup_irq";
+		#address-cells = <0>;
+		interrupt-parent = <&blsp2_uart3_hs>;
+		interrupts = <0 1 2>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0xffffffff>;
+		interrupt-map = <0 &intc 0 0 115 0
+			    1 &intc 0 0 239 0
+			    2 &tlmm 50 0>;
+
+		qcom,inject-rx-on-wakeup;
+		qcom,rx-char-to-inject = <0xFD>;
+
+		qcom,bam-tx-ep-pipe-index = <4>;
+		qcom,bam-rx-ep-pipe-index = <5>;
+		qcom,master-id = <84>;
+		clock-names = "core_clk", "iface_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_uart3_apps_clk>,
+		    <&clock_gcc clk_gcc_blsp2_ahb_clk>;
+		pinctrl-names = "sleep", "default";
+		pinctrl-0 = <&blsp2_uart3_sleep>;
+		pinctrl-1 = <&blsp2_uart3_active>;
+
+		qcom,msm-bus,name = "buart3";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			    <86 512 0 0>,
+			    <86 512 500 800>;
+		status = "disabled";
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm8998-bus.dtsi	2019-01-22 16:16:21.191225470 +0100
@@ -0,0 +1,1467 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is Mree software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/msm/msm-bus-ids.h>
+
+&soc {
+	ad_hoc_bus: ad-hoc-bus {
+		/*Version = 4 */
+		compatible = "qcom,msm-bus-device";
+		reg = <0x1620000 0x40000>,
+			<0x1000000 0x80000>,
+			<0x1500000 0x10000>,
+			<0x1660000 0x60000>,
+			<0x1700000 0x60000>,
+			<0x17900000 0x10000>,
+			<0x1740000 0x10000>,
+			<0x1740000 0x10000>;
+
+		reg-names = "snoc-base", "bimc-base", "cnoc-base",
+			"a1noc-base", "a2noc-base", "gnoc-base",
+			"mmnoc-ahb-base", "mnoc-base";
+
+		/*Buses*/
+		fab_a1noc: fab-a1noc {
+			cell-id = <MSM_BUS_FAB_A1_NOC>;
+			label = "fab-a1noc";
+			qcom,fab-dev;
+			qcom,base-name = "a1noc-base";
+			qcom,bus-type = <1>;
+			qcom,qos-off = <4096>;
+			qcom,base-offset = <36864>;
+			clock-names = "bus_clk", "bus_a_clk";
+			clocks = <&clock_gcc clk_aggre1_noc_clk>,
+				<&clock_gcc clk_aggre1_noc_a_clk>;
+			qcom,node-qos-clks {
+				clock-names =
+				"clk-ufs-axi-clk",
+				"clk-aggre1-ufs-axi-no-rate",
+				"clk-aggre1-usb3-axi-cfg-no-rate",
+				"clk-blsp2-ahb-no-rate";
+				clocks =
+				<&clock_gcc clk_gcc_ufs_axi_clk>,
+				<&clock_gcc clk_gcc_aggre1_ufs_axi_clk>,
+				<&clock_gcc clk_gcc_aggre1_usb3_axi_clk>,
+				<&clock_gcc clk_gcc_blsp2_ahb_clk>;
+			};
+		};
+
+		fab_a2noc: fab-a2noc {
+			cell-id = <MSM_BUS_FAB_A2_NOC>;
+			label = "fab-a2noc";
+			qcom,fab-dev;
+			qcom,base-name = "a2noc-base";
+			qcom,bus-type = <1>;
+			qcom,qos-off = <4096>;
+			qcom,base-offset = <20480>;
+			clock-names = "bus_clk", "bus_a_clk";
+			clocks = <&clock_gcc clk_aggre2_noc_clk>,
+				<&clock_gcc clk_aggre2_noc_a_clk>;
+			qcom,node-qos-clks {
+				clock-names =
+				"clk-ipa-clk",
+				"clk-sdcc2-ahb-no-rate",
+				"clk-sdcc4-ahb-no-rate",
+				"clk-blsp1-ahb-no-rate";
+				clocks =
+				<&clock_gcc clk_ipa_clk>,
+				<&clock_gcc clk_gcc_sdcc2_ahb_clk>,
+				<&clock_gcc clk_gcc_sdcc4_ahb_clk>,
+				<&clock_gcc clk_gcc_blsp1_ahb_clk>;
+			};
+		};
+
+		fab_bimc: fab-bimc {
+			cell-id = <MSM_BUS_FAB_BIMC>;
+			label = "fab-bimc";
+			qcom,fab-dev;
+			qcom,base-name = "bimc-base";
+			qcom,bus-type = <2>;
+			qcom,util-fact = <153>;
+			clock-names = "bus_clk", "bus_a_clk";
+			clocks = <&clock_gcc clk_bimc_msmbus_clk>,
+				<&clock_gcc clk_bimc_msmbus_a_clk>;
+		};
+
+		fab_cnoc: fab-cnoc {
+			cell-id = <MSM_BUS_FAB_CONFIG_NOC>;
+			label = "fab-cnoc";
+			qcom,fab-dev;
+			qcom,base-name = "cnoc-base";
+			qcom,bus-type = <1>;
+			clock-names = "bus_clk", "bus_a_clk";
+			clocks = <&clock_gcc clk_cnoc_clk>,
+				<&clock_gcc clk_cnoc_a_clk>;
+		};
+
+		fab_cr_virt: fab-cr_virt {
+			cell-id = <MSM_BUS_FAB_CR_VIRT>;
+			label = "fab-cr_virt";
+			qcom,virt-dev;
+			qcom,base-name = "cr_virt-base";
+			qcom,bypass-qos-prg;
+		};
+
+		fab_gnoc: fab-gnoc {
+			cell-id = <MSM_BUS_FAB_GNOC>;
+			label = "fab-gnoc";
+			qcom,virt-dev;
+			qcom,base-name = "gnoc-base";
+			qcom,bypass-qos-prg;
+		};
+
+		fab_mnoc: fab-mnoc {
+			cell-id = <MSM_BUS_FAB_MMSS_NOC>;
+			label = "fab-mnoc";
+			qcom,fab-dev;
+			qcom,base-name = "mnoc-base";
+			qcom,bus-type = <1>;
+			qcom,qos-off = <4096>;
+			qcom,base-offset = <16384>;
+			qcom,util-fact = <153>;
+			clock-names = "bus_clk", "bus_a_clk";
+			clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+				<&clock_gcc clk_mmssnoc_axi_a_clk>;
+			clk-camss-ahb-no-rate-supply =
+					<&gdsc_camss_top>;
+			clk-video-ahb-no-rate-supply =
+					<&gdsc_venus>;
+			clk-video-axi-no-rate-supply =
+					<&gdsc_venus>;
+			qcom,node-qos-clks {
+				clock-names =
+				"clk-noc-cfg-ahb-no-rate",
+				"clk-mnoc-ahb-no-rate",
+				"clk-camss-ahb-no-rate",
+				"clk-video-ahb-no-rate",
+				"clk-video-axi-no-rate";
+				clocks =
+				<&clock_gcc clk_mmssnoc_axi_clk>,
+				<&clock_gcc clk_gcc_mmss_noc_cfg_ahb_clk>,
+				<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+				<&clock_mmss clk_mmss_camss_ahb_clk>,
+				<&clock_mmss clk_mmss_video_ahb_clk>,
+				<&clock_mmss clk_mmss_video_axi_clk>;
+			};
+
+		};
+
+		fab_snoc: fab-snoc {
+			cell-id = <MSM_BUS_FAB_SYS_NOC>;
+			label = "fab-snoc";
+			qcom,fab-dev;
+			qcom,base-name = "snoc-base";
+			qcom,bus-type = <1>;
+			qcom,qos-off = <4096>;
+			qcom,base-offset = <20480>;
+			clock-names = "bus_clk", "bus_a_clk";
+			clocks = <&clock_gcc clk_snoc_clk>,
+				<&clock_gcc clk_snoc_a_clk>;
+		};
+
+		fab_mnoc_ahb: fab-mnoc-ahb {
+			cell-id = <MSM_BUS_FAB_MMSS_AHB>;
+			label = "fab-mnoc-ahb";
+			qcom,fab-dev;
+			qcom,base-name = "mmnoc-ahb-base";
+			qcom,bypass-qos-prg;
+			qcom,setrate-only-clk;
+			qcom,bus-type = <1>;
+			clock-names = "bus_clk", "bus_a_clk";
+			clocks = <&clock_mmss clk_ahb_clk_src>,
+			     <&clock_mmss clk_ahb_clk_src>;
+		};
+
+
+		/*Masters*/
+
+		mas_pcie_0: mas-pcie-0 {
+			cell-id = <MSM_BUS_MASTER_PCIE>;
+			label = "mas-pcie-0";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <1>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&slv_a1noc_snoc>;
+			qcom,prio1 = <1>;
+			qcom,prio0 = <1>;
+			qcom,bus-dev = <&fab_a1noc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_PCIE_0>;
+		};
+
+		mas_usb3: mas-usb3 {
+			cell-id = <MSM_BUS_MASTER_USB3>;
+			label = "mas-usb3";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <2>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&slv_a1noc_snoc>;
+			qcom,prio1 = <1>;
+			qcom,prio0 = <1>;
+			qcom,bus-dev = <&fab_a1noc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_USB3>;
+		};
+
+		mas_ufs: mas-ufs {
+			cell-id = <MSM_BUS_MASTER_UFS>;
+			label = "mas-ufs";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <0>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&slv_a1noc_snoc>;
+			qcom,prio1 = <1>;
+			qcom,prio0 = <1>;
+			qcom,bus-dev = <&fab_a1noc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_UFS>;
+		};
+
+		mas_blsp_2: mas-blsp-2 {
+			cell-id = <MSM_BUS_MASTER_BLSP_2>;
+			label = "mas-blsp-2";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <4>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&slv_a1noc_snoc>;
+			qcom,bus-dev = <&fab_a1noc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_BLSP_2>;
+		};
+
+		mas_cnoc_a2noc: mas-cnoc-a2noc {
+			cell-id = <0>;
+			label = "mas-cnoc-a2noc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,connections = <&slv_a2noc_snoc>;
+			qcom,bus-dev = <&fab_a2noc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_CNOC_A2NOC>;
+		};
+
+		mas_ipa: mas-ipa {
+			cell-id = <MSM_BUS_MASTER_IPA>;
+			label = "mas-ipa";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <1>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&slv_a2noc_snoc>;
+			qcom,prio1 = <1>;
+			qcom,prio0 = <1>;
+			qcom,bus-dev = <&fab_a2noc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_IPA>;
+		};
+
+		mas_sdcc_2: mas-sdcc-2 {
+			cell-id = <MSM_BUS_MASTER_SDCC_2>;
+			label = "mas-sdcc-2";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <6>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&slv_a2noc_snoc>;
+			qcom,bus-dev = <&fab_a2noc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_SDCC_2>;
+		};
+
+		mas_sdcc_4: mas-sdcc-4 {
+			cell-id = <MSM_BUS_MASTER_SDCC_4>;
+			label = "mas-sdcc-4";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <7>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&slv_a2noc_snoc>;
+			qcom,bus-dev = <&fab_a2noc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_SDCC_4>;
+		};
+
+		mas_tsif: mas-tsif {
+			cell-id = <MSM_BUS_MASTER_TSIF>;
+			label = "mas-tsif";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,connections = <&slv_a2noc_snoc>;
+			qcom,bus-dev = <&fab_a2noc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_TSIF>;
+		};
+
+		mas_blsp_1: mas-blsp-1 {
+			cell-id = <MSM_BUS_MASTER_BLSP_1>;
+			label = "mas-blsp-1";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <8>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&slv_a2noc_snoc>;
+			qcom,bus-dev = <&fab_a2noc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_BLSP_1>;
+		};
+
+		mas_cr_virt_a2noc: mas-cr-virt-a2noc {
+			cell-id = <MSM_BUS_MASTER_CRVIRT_A2NOC>;
+			label = "mas-cr-virt-a2noc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <9>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&slv_a2noc_snoc>;
+			qcom,bus-dev = <&fab_a2noc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_CRVIRT_A2NOC>;
+		};
+
+		mas_gnoc_bimc: mas-gnoc-bimc {
+			cell-id = <MSM_BUS_MASTER_GNOC_BIMC>;
+			label = "mas-gnoc-bimc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <2>;
+			qcom,ap-owned;
+			qcom,qport = <0>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&slv_ebi &slv_bimc_snoc_0>;
+			qcom,prio-lvl = <0>;
+			qcom,prio-rd = <0>;
+			qcom,prio-wr = <0>;
+			qcom,bus-dev = <&fab_bimc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_GNOC_BIMC>;
+		};
+
+		mas_oxili: mas-oxili {
+			cell-id = <MSM_BUS_MASTER_GRAPHICS_3D>;
+			label = "mas-oxili";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <2>;
+			qcom,ap-owned;
+			qcom,qport = <1>;
+			qcom,qos-mode = "bypass";
+			qcom,connections = < &slv_bimc_snoc_1
+				&slv_hmss_l3&slv_ebi &slv_bimc_snoc_0>;
+			qcom,bus-dev = <&fab_bimc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_GFX3D>;
+		};
+
+		mas_mnoc_bimc: mas-mnoc-bimc {
+			cell-id = <MSM_BUS_MNOC_BIMC_MAS>;
+			label = "mas-mnoc-bimc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <2>;
+			qcom,ap-owned;
+			qcom,qport = <2>;
+			qcom,qos-mode = "bypass";
+			qcom,connections = < &slv_bimc_snoc_1
+				&slv_hmss_l3&slv_ebi &slv_bimc_snoc_0>;
+			qcom,bus-dev = <&fab_bimc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_MNOC_BIMC>;
+		};
+
+		mas_snoc_bimc: mas-snoc-bimc {
+			cell-id = <MSM_BUS_SNOC_BIMC_MAS>;
+			label = "mas-snoc-bimc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <2>;
+			qcom,qport = <3>;
+			qcom,qos-mode = "bypass";
+			qcom,connections = < &slv_hmss_l3&slv_ebi>;
+			qcom,bus-dev = <&fab_bimc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_SNOC_BIMC>;
+		};
+
+		mas_snoc_cnoc: mas-snoc-cnoc {
+			cell-id = <MSM_BUS_SNOC_CNOC_MAS>;
+			label = "mas-snoc-cnoc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,connections = < &slv_skl &slv_blsp_2
+				&slv_message_ram
+				&slv_tlmm_west &slv_tsif
+				&slv_mpm &slv_bimc_cfg
+				&slv_tlmm_east &slv_spdm
+				&slv_pimem_cfg &slv_a1noc_smmu_cfg
+				&slv_blsp_1 &slv_clk_ctl
+				&slv_prng &slv_usb3_0
+				&slv_qdss_cfg &slv_qm_cfg
+				&slv_a2noc_cfg &slv_pmic_arb
+				&slv_ufs_cfg &slv_srvc_cnoc
+				&slv_ahb2phy &slv_ipa
+				&slv_glm &slv_snoc_cfg
+				&slv_ssc_cfg &slv_sdcc_2
+				&slv_sdcc_4 &slv_pdm
+				&slv_cnoc_mnoc_mmss_cfg &slv_cnoc_mnoc_cfg
+				&slv_mss_cfg &slv_imem_cfg
+				&slv_a1noc_cfg &slv_gpuss_cfg
+				&slv_tcsr &slv_tlmm_north>;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_SNOC_CNOC>;
+		};
+
+		mas_qdss_dap: mas-qdss-dap {
+			cell-id = <MSM_BUS_MASTER_QDSS_DAP>;
+			label = "mas-qdss-dap";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,connections = < &slv_skl &slv_blsp_2
+				&slv_message_ram
+				&slv_tlmm_west &slv_tsif
+				&slv_mpm &slv_bimc_cfg
+				&slv_tlmm_east &slv_spdm
+				&slv_pimem_cfg &slv_a1noc_smmu_cfg
+				&slv_blsp_1 &slv_clk_ctl
+				&slv_prng &slv_usb3_0
+				&slv_qdss_cfg &slv_qm_cfg
+				&slv_a2noc_cfg &slv_pmic_arb
+				&slv_ufs_cfg &slv_srvc_cnoc
+				&slv_ahb2phy &slv_ipa
+				&slv_glm &slv_snoc_cfg
+				&slv_sdcc_2 &slv_sdcc_4
+				&slv_pdm &slv_cnoc_mnoc_mmss_cfg
+				&slv_cnoc_mnoc_cfg &slv_mss_cfg
+				&slv_imem_cfg &slv_a1noc_cfg
+				&slv_gpuss_cfg &slv_ssc_cfg
+				&slv_tcsr &slv_tlmm_north
+				&slv_cnoc_a2noc>;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_QDSS_DAP>;
+		};
+
+		mas_crypto_c0: mas-crypto-c0 {
+			cell-id = <MSM_BUS_MASTER_CRYPTO_CORE0>;
+			label = "mas-crypto-c0";
+			qcom,buswidth = <650>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_cr_virt_a2noc>;
+			qcom,bus-dev = <&fab_a2noc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_CRYPTO_CORE0>;
+		};
+
+		mas_apps_proc: mas-apps-proc {
+			cell-id = <MSM_BUS_MASTER_AMPSS_M0>;
+			label = "mas-apps-proc";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,connections = <&slv_gnoc_bimc>;
+			qcom,bus-dev = <&fab_gnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_APPSS_PROC>;
+		};
+
+		mas_cnoc_mnoc_mmss_cfg: mas-cnoc-mnoc-mmss-cfg {
+			cell-id = <MSM_BUS_MASTER_CNOC_MNOC_MMSS_CFG>;
+			label = "mas-cnoc-mnoc-mmss-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,connections = < &slv_camera_throttle_cfg
+				&slv_venus_cfg &slv_misc_cfg
+				&slv_camera_cfg &slv_display_throttle_cfg
+				&slv_venus_throttle_cfg &slv_display_cfg
+				&slv_mmss_clk_cfg &slv_vmem_cfg
+				&slv_mmss_clk_xpu_cfg &slv_smmu_cfg>;
+			qcom,bus-dev = <&fab_mnoc_ahb>;
+			qcom,mas-rpm-id = <ICBID_MASTER_CNOC_MNOC_MMSS_CFG>;
+		};
+
+		mas_cnoc_mnoc_cfg: mas-cnoc-mnoc-cfg {
+			cell-id = <MSM_BUS_MASTER_CNOC_MNOC_CFG>;
+			label = "mas-cnoc-mnoc-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,connections = <&slv_srvc_mnoc>;
+			qcom,bus-dev = <&fab_mnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_CNOC_MNOC_CFG>;
+		};
+
+		mas_cpp: mas-cpp {
+			cell-id = <MSM_BUS_MASTER_CPP>;
+			label = "mas-cpp";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <5>;
+			qcom,qos-mode = "bypass";
+			qcom,connections = <&slv_mnoc_bimc>;
+			qcom,bus-dev = <&fab_mnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_CPP>;
+		};
+
+		mas_jpeg: mas-jpeg {
+			cell-id = <MSM_BUS_MASTER_JPEG>;
+			label = "mas-jpeg";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <7>;
+			qcom,qos-mode = "bypass";
+			qcom,connections = <&slv_mnoc_bimc>;
+			qcom,bus-dev = <&fab_mnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_JPEG>;
+		};
+
+		mas_mdp_p0: mas-mdp-p0 {
+			cell-id = <MSM_BUS_MASTER_MDP_PORT0>;
+			label = "mas-mdp-p0";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <1>;
+			qcom,qos-mode = "bypass";
+			qcom,connections = <&slv_mnoc_bimc>;
+			qcom,bus-dev = <&fab_mnoc>;
+			qcom,vrail-comp = <25>;
+			qcom,mas-rpm-id = <ICBID_MASTER_MDP0>;
+			clk-mdss-axi-no-rate-supply =
+					<&gdsc_mdss>;
+			clk-mdss-ahb-no-rate-supply =
+					<&gdsc_mdss>;
+			qcom,node-qos-clks {
+				clock-names =
+				"clk-mdss-ahb-no-rate",
+				"clk-mdss-axi-no-rate";
+				clocks =
+				<&clock_mmss clk_mmss_mdss_ahb_clk>,
+				<&clock_mmss clk_mmss_mdss_axi_clk>;
+			};
+
+		};
+
+		mas_mdp_p1: mas-mdp-p1 {
+			cell-id = <MSM_BUS_MASTER_MDP_PORT1>;
+			label = "mas-mdp-p1";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <2>;
+			qcom,qos-mode = "bypass";
+			qcom,connections = <&slv_mnoc_bimc>;
+			qcom,bus-dev = <&fab_mnoc>;
+			qcom,vrail-comp = <25>;
+			qcom,mas-rpm-id = <ICBID_MASTER_MDP1>;
+		};
+
+		mas_rotator: mas-rotator {
+			cell-id = <MSM_BUS_MASTER_ROTATOR>;
+			label = "mas-rotator";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <0>;
+			qcom,qos-mode = "bypass";
+			qcom,connections = <&slv_mnoc_bimc>;
+			qcom,bus-dev = <&fab_mnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_ROTATOR>;
+		};
+
+		mas_venus: mas-venus {
+			cell-id = <MSM_BUS_MASTER_VIDEO_P0>;
+			label = "mas-venus";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <2>;
+			qcom,ap-owned;
+			qcom,qport = <3 4>;
+			qcom,qos-mode = "bypass";
+			qcom,connections = <&slv_mnoc_bimc>;
+			qcom,bus-dev = <&fab_mnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_VIDEO>;
+		};
+
+		mas_vfe: mas-vfe {
+			cell-id = <MSM_BUS_MASTER_VFE>;
+			label = "mas-vfe";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <6>;
+			qcom,qos-mode = "bypass";
+			qcom,connections = <&slv_mnoc_bimc>;
+			qcom,bus-dev = <&fab_mnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_VFE>;
+		};
+
+		mas_venus_vmem: mas-venus-vmem {
+			cell-id = <MSM_BUS_MASTER_VIDEO_P0_OCMEM>;
+			label = "mas-venus-vmem";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,connections = <&slv_vmem>;
+			qcom,bus-dev = <&fab_mnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_VENUS_VMEM>;
+		};
+
+		mas_hmss: mas-hmss {
+			cell-id = <MSM_BUS_MASTER_HMSS>;
+			label = "mas-hmss";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <3>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = < &slv_pimem &slv_imem
+				&slv_snoc_bimc>;
+			qcom,prio1 = <1>;
+			qcom,prio0 = <1>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_HMSS>;
+		};
+
+		mas_qdss_bam: mas-qdss-bam {
+			cell-id = <MSM_BUS_MASTER_QDSS_BAM>;
+			label = "mas-qdss-bam";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <1>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = < &slv_imem &slv_pimem &slv_snoc_cnoc
+				&slv_snoc_bimc>;
+			qcom,prio1 = <1>;
+			qcom,prio0 = <1>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_QDSS_BAM>;
+		};
+
+		mas_snoc_cfg: mas-snoc-cfg {
+			cell-id = <MSM_BUS_MASTER_SNOC_CFG>;
+			label = "mas-snoc-cfg";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_srvc_snoc>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_SNOC_CFG>;
+		};
+
+		mas_bimc_snoc_0: mas-bimc-snoc-0 {
+			cell-id = <MSM_BUS_BIMC_SNOC_MAS>;
+			label = "mas-bimc-snoc-0";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,connections = < &slv_pimem &slv_lpass&slv_hmss
+				 &slv_wlan &slv_snoc_cnoc
+				 &slv_imem &slv_qdss_stm>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_BIMC_SNOC_0>;
+		};
+
+		mas_bimc_snoc_1: mas-bimc-snoc-1 {
+			cell-id = <MSM_BUS_BIMC_SNOC_1_MAS>;
+			label = "mas-bimc-snoc-1";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,connections = <&slv_pcie_0>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_BIMC_SNOC_1>;
+		};
+
+		mas_a1noc_snoc: mas-a1noc-snoc {
+			cell-id = <MSM_BUS_A1NOC_SNOC_MAS>;
+			label = "mas-a1noc-snoc";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,connections = < &slv_pimem &slv_pcie_0 &slv_lpass
+				&slv_hmss &slv_snoc_bimc
+				 &slv_snoc_cnoc &slv_imem
+				 &slv_qdss_stm>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_A1NOC_SNOC>;
+		};
+
+		mas_a2noc_snoc: mas-a2noc-snoc {
+			cell-id = <MSM_BUS_A2NOC_SNOC_MAS>;
+			label = "mas-a2noc-snoc";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,connections = < &slv_pimem &slv_pcie_0 &slv_lpass
+				&slv_hmss &slv_snoc_bimc
+				 &slv_wlan &slv_snoc_cnoc
+				 &slv_imem &slv_qdss_stm>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_A2NOC_SNOC>;
+		};
+
+		mas_qdss_etr: mas-qdss-etr {
+			cell-id = <MSM_BUS_MASTER_QDSS_ETR>;
+			label = "mas-qdss-etr";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <2>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = < &slv_imem &slv_pimem &slv_snoc_cnoc
+				&slv_snoc_bimc>;
+			qcom,prio1 = <1>;
+			qcom,prio0 = <1>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_QDSS_ETR>;
+		};
+
+		/*Internal nodes*/
+
+		/*Slaves*/
+
+		slv_a1noc_snoc:slv-a1noc-snoc {
+			cell-id = <MSM_BUS_A1NOC_SNOC_SLV>;
+			label = "slv-a1noc-snoc";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_a1noc>;
+			qcom,connections = <&mas_a1noc_snoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_A1NOC_SNOC>;
+		};
+
+		slv_a2noc_snoc:slv-a2noc-snoc {
+			cell-id = <MSM_BUS_A2NOC_SNOC_SLV>;
+			label = "slv-a2noc-snoc";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_a2noc>;
+			qcom,connections = <&mas_a2noc_snoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_A2NOC_SNOC>;
+		};
+
+		slv_ebi:slv-ebi {
+			cell-id = <MSM_BUS_SLAVE_EBI_CH0>;
+			label = "slv-ebi";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <2>;
+			qcom,bus-dev = <&fab_bimc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_EBI1>;
+		};
+
+		slv_hmss_l3:slv-hmss-l3 {
+			cell-id = <MSM_BUS_SLAVE_HMSS_L3>;
+			label = "slv-hmss-l3";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_bimc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_HMSS_L3>;
+		};
+
+		slv_bimc_snoc_0:slv-bimc-snoc-0 {
+			cell-id = <MSM_BUS_BIMC_SNOC_SLV>;
+			label = "slv-bimc-snoc-0";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_bimc>;
+			qcom,connections = <&mas_bimc_snoc_0>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_BIMC_SNOC_0>;
+		};
+
+		slv_bimc_snoc_1:slv-bimc-snoc-1 {
+			cell-id = <MSM_BUS_BIMC_SNOC_1_SLV>;
+			label = "slv-bimc-snoc-1";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_bimc>;
+			qcom,connections = <&mas_bimc_snoc_1>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_BIMC_SNOC_1>;
+		};
+
+		slv_cnoc_a2noc:slv-cnoc-a2noc {
+			cell-id = <MSM_BUS_CNOC_SNOC_SLV>;
+			label = "slv-cnoc-a2noc";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,connections = <&mas_cnoc_a2noc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_CNOC_A2NOC>;
+		};
+
+		slv_ssc_cfg:slv-ssc-cfg {
+			cell-id = <MSM_BUS_SLAVE_SSC_CFG>;
+			label = "slv-ssc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SSC_CFG>;
+		};
+
+		slv_mpm:slv-mpm {
+			cell-id = <MSM_BUS_SLAVE_MPM>;
+			label = "slv-mpm";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_MPM>;
+		};
+
+		slv_pmic_arb:slv-pmic-arb {
+			cell-id = <MSM_BUS_SLAVE_PMIC_ARB>;
+			label = "slv-pmic-arb";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PMIC_ARB>;
+		};
+		slv_tlmm_north:slv-tlmm-north {
+			cell-id = <MSM_BUS_SLAVE_TLMM_NORTH>;
+			label = "slv-tlmm-north";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_TLMM_NORTH>;
+		};
+		slv_pimem_cfg:slv-pimem-cfg {
+			cell-id = <MSM_BUS_SLAVE_PIMEM_CFG>;
+			label = "slv-pimem-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PIMEM_CFG>;
+		};
+
+		slv_imem_cfg:slv-imem-cfg {
+			cell-id = <MSM_BUS_SLAVE_IMEM_CFG>;
+			label = "slv-imem-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_IMEM_CFG>;
+		};
+
+		slv_message_ram:slv-message-ram {
+			cell-id = <MSM_BUS_SLAVE_MESSAGE_RAM>;
+			label = "slv-message-ram";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_MESSAGE_RAM>;
+		};
+
+		slv_skl:slv-skl {
+			cell-id = <MSM_BUS_SLAVE_SKL>;
+			label = "slv-skl";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SKL>;
+		};
+
+		slv_bimc_cfg:slv-bimc-cfg {
+			cell-id = <MSM_BUS_SLAVE_BIMC_CFG>;
+			label = "slv-bimc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_BIMC_CFG>;
+		};
+
+		slv_prng:slv-prng {
+			cell-id = <MSM_BUS_SLAVE_PRNG>;
+			label = "slv-prng";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PRNG>;
+		};
+
+		slv_a2noc_cfg:slv-a2noc-cfg {
+			cell-id = <MSM_BUS_SLAVE_A2NOC_CFG>;
+			label = "slv-a2noc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_A2NOC_CFG>;
+		};
+
+		slv_ipa:slv-ipa {
+			cell-id = <MSM_BUS_SLAVE_IPA_CFG>;
+			label = "slv-ipa";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_IPA_CFG>;
+		};
+
+		slv_tcsr:slv-tcsr {
+			cell-id = <MSM_BUS_SLAVE_TCSR>;
+			label = "slv-tcsr";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_TCSR>;
+		};
+
+		slv_snoc_cfg:slv-snoc-cfg {
+			cell-id = <MSM_BUS_SLAVE_SNOC_CFG>;
+			label = "slv-snoc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SNOC_CFG>;
+		};
+
+		slv_clk_ctl:slv-clk-ctl {
+			cell-id = <MSM_BUS_SLAVE_CLK_CTL>;
+			label = "slv-clk-ctl";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_CLK_CTL>;
+		};
+
+		slv_glm:slv-glm {
+			cell-id = <MSM_BUS_SLAVE_GLM>;
+			label = "slv-glm";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_GLM>;
+		};
+
+		slv_spdm:slv-spdm {
+			cell-id = <MSM_BUS_SLAVE_SPDM_WRAPPER>;
+			label = "slv-spdm";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SPDM_WRAPPER>;
+		};
+
+		slv_gpuss_cfg:slv-gpuss-cfg {
+			cell-id = <MSM_BUS_SLAVE_GRAPHICS_3D_CFG>;
+			label = "slv-gpuss-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_GFX3D_CFG>;
+		};
+
+		slv_cnoc_mnoc_cfg:slv-cnoc-mnoc-cfg {
+			cell-id = <MSM_BUS_SLAVE_CNOC_MNOC_CFG>;
+			label = "slv-cnoc-mnoc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,connections = <&mas_cnoc_mnoc_cfg>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_CNOC_MNOC_CFG>;
+		};
+
+		slv_qm_cfg:slv-qm-cfg {
+			cell-id = <MSM_BUS_SLAVE_QM_CFG>;
+			label = "slv-qm-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_QM_CFG>;
+		};
+
+		slv_mss_cfg:slv-mss-cfg {
+			cell-id = <MSM_BUS_SLAVE_CNOC_MSS>;
+			label = "slv-mss-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_CNOC_MSS>;
+		};
+
+		slv_ufs_cfg:slv-ufs-cfg {
+			cell-id = <MSM_BUS_SLAVE_UFS_CFG>;
+			label = "slv-ufs-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_UFS_CFG>;
+		};
+
+		slv_tlmm_west:slv-tlmm-west {
+			cell-id = <MSM_BUS_SLAVE_TLMM_WEST>;
+			label = "slv-tlmm-west";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_TLMM_WEST>;
+		};
+
+		slv_a1noc_cfg:slv-a1noc-cfg {
+			cell-id = <MSM_BUS_SLAVE_A1NOC_CFG>;
+			label = "slv-a1noc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_A1NOC_CFG>;
+		};
+
+		slv_ahb2phy:slv-ahb2phy {
+			cell-id = <MSM_BUS_SLAVE_PCIE20_AHB2PHY>;
+			label = "slv-ahb2phy";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PCIE20_AHB2PHY>;
+		};
+
+		slv_blsp_2:slv-blsp-2 {
+			cell-id = <MSM_BUS_SLAVE_BLSP_2>;
+			label = "slv-blsp-2";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_BLSP_2>;
+		};
+
+		slv_pdm:slv-pdm {
+			cell-id = <MSM_BUS_SLAVE_PDM>;
+			label = "slv-pdm";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PDM>;
+		};
+
+		slv_usb3_0:slv-usb3-0 {
+			cell-id = <MSM_BUS_SLAVE_USB3>;
+			label = "slv-usb3-0";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_USB3_0>;
+		};
+
+		slv_a1noc_smmu_cfg:slv-a1noc-smmu-cfg {
+			cell-id = <MSM_BUS_SLAVE_A1NOC_SMMU_CFG>;
+			label = "slv-a1noc-smmu-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_A1NOC_SMMU_CFG>;
+		};
+
+		slv_blsp_1:slv-blsp-1 {
+			cell-id = <MSM_BUS_SLAVE_BLSP_1>;
+			label = "slv-blsp-1";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_BLSP_1>;
+		};
+
+		slv_sdcc_2:slv-sdcc-2 {
+			cell-id = <MSM_BUS_SLAVE_SDCC_2>;
+			label = "slv-sdcc-2";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SDCC_2>;
+		};
+
+		slv_sdcc_4:slv-sdcc-4 {
+			cell-id = <MSM_BUS_SLAVE_SDCC_4>;
+			label = "slv-sdcc-4";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SDCC_4>;
+		};
+
+		slv_tsif:slv-tsif {
+			cell-id = <MSM_BUS_SLAVE_TSIF>;
+			label = "slv-tsif";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_TSIF>;
+		};
+
+		slv_qdss_cfg:slv-qdss-cfg {
+			cell-id = <MSM_BUS_SLAVE_QDSS_CFG>;
+			label = "slv-qdss-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_QDSS_CFG>;
+		};
+
+		slv_tlmm_east:slv-tlmm-east {
+			cell-id = <MSM_BUS_SLAVE_TLMM_EAST>;
+			label = "slv-tlmm-east";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_TLMM_EAST>;
+		};
+
+		slv_cnoc_mnoc_mmss_cfg:slv-cnoc-mnoc-mmss-cfg {
+			cell-id = <MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG>;
+			label = "slv-cnoc-mnoc-mmss-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,connections = <&mas_cnoc_mnoc_mmss_cfg>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_CNOC_MNOC_MMSS_CFG>;
+		};
+
+		slv_srvc_cnoc:slv-srvc-cnoc {
+			cell-id = <MSM_BUS_SLAVE_SERVICE_CNOC>;
+			label = "slv-srvc-cnoc";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SERVICE_CNOC>;
+		};
+
+		slv_cr_virt_a2noc:slv-cr-virt-a2noc {
+			cell-id = <MSM_BUS_SLAVE_CRVIRT_A2NOC>;
+			label = "slv-cr-virt-a2noc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_a2noc>;
+			qcom,connections = <&mas_cr_virt_a2noc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_CRVIRT_A2NOC>;
+		};
+
+		slv_gnoc_bimc:slv-gnoc-bimc {
+			cell-id = <MSM_BUS_SLAVE_GNOC_BIMC>;
+			label = "slv-gnoc-bimc";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_gnoc>;
+			qcom,connections = <&mas_gnoc_bimc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_GNOC_BIMC>;
+		};
+
+		slv_camera_cfg:slv-camera-cfg {
+			cell-id = <MSM_BUS_SLAVE_CAMERA_CFG>;
+			label = "slv-camera-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_mnoc_ahb>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_CAMERA_CFG>;
+		};
+
+		slv_camera_throttle_cfg:slv-camera-throttle-cfg {
+			cell-id = <MSM_BUS_SLAVE_CAMERA_THROTTLE_CFG>;
+			label = "slv-camera-throttle-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_mnoc_ahb>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_CAMERA_THROTTLE_CFG>;
+		};
+
+		slv_misc_cfg:slv-misc-cfg {
+			cell-id = <MSM_BUS_SLAVE_MISC_CFG>;
+			label = "slv-misc-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_mnoc_ahb>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_MISC_CFG>;
+		};
+
+		slv_venus_throttle_cfg:slv-venus-throttle-cfg {
+			cell-id = <MSM_BUS_SLAVE_VENUS_THROTTLE_CFG>;
+			label = "slv-venus-throttle-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_mnoc_ahb>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_VENUS_THROTTLE_CFG>;
+		};
+
+		slv_venus_cfg:slv-venus-cfg {
+			cell-id = <MSM_BUS_SLAVE_VENUS_CFG>;
+			label = "slv-venus-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_mnoc_ahb>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_VENUS_CFG>;
+		};
+
+		slv_vmem_cfg:slv-vmem-cfg {
+			cell-id = <MSM_BUS_SLAVE_VMEM_CFG>;
+			label = "slv-vmem-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_mnoc_ahb>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_VMEM_CFG>;
+			qcom,enable-only-clk;
+			clock-names = "node_clk";
+			clocks = <&clock_mmss clk_mmss_mnoc_maxi_clk>;
+		};
+
+		slv_mmss_clk_xpu_cfg:slv-mmss-clk-xpu-cfg {
+			cell-id = <MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG>;
+			label = "slv-mmss-clk-xpu-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_mnoc_ahb>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_MMSS_CLK_XPU_CFG>;
+		};
+
+		slv_mmss_clk_cfg:slv-mmss-clk-cfg {
+			cell-id = <MSM_BUS_SLAVE_MMSS_CLK_CFG>;
+			label = "slv-mmss-clk-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_mnoc_ahb>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_MMSS_CLK_CFG>;
+		};
+
+		slv_display_cfg:slv-display-cfg {
+			cell-id = <MSM_BUS_SLAVE_DISPLAY_CFG>;
+			label = "slv-display-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_mnoc_ahb>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_DISPLAY_CFG>;
+		};
+
+		slv_display_throttle_cfg:slv-display-throttle-cfg {
+			cell-id = <MSM_BUS_SLAVE_DISPLAY_THROTTLE_CFG>;
+			label = "slv-display-throttle-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_mnoc_ahb>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_DISPLAY_THROTTLE_CFG>;
+		};
+
+		slv_smmu_cfg:slv-smmu-cfg {
+			cell-id = <MSM_BUS_SLAVE_MMSS_SMMU_CFG>;
+			label = "slv-smmu-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_mnoc_ahb>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_MMSS_SMMU_CFG>;
+		};
+
+		slv_mnoc_bimc:slv-mnoc-bimc {
+			cell-id = <MSM_BUS_MNOC_BIMC_SLV>;
+			label = "slv-mnoc-bimc";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <2>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_mnoc>;
+			qcom,connections = <&mas_mnoc_bimc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_MNOC_BIMC>;
+			qcom,enable-only-clk;
+			clock-names = "node_clk";
+			clocks = <&clock_gcc clk_mmssnoc_axi_clk>;
+		};
+
+		slv_vmem: slv-vmem {
+			cell-id = <MSM_BUS_SLAVE_VMEM>;
+			label = "slv-vmem";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_mnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_VMEM>;
+			clock-names = "node_clk";
+			clocks = <&clock_mmss clk_mmss_mnoc_maxi_clk>;
+		};
+
+		slv_srvc_mnoc:slv-srvc-mnoc {
+			cell-id = <MSM_BUS_SLAVE_SERVICE_MNOC>;
+			label = "slv-srvc-mnoc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_mnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SERVICE_MNOC>;
+		};
+
+		slv_hmss:slv-hmss {
+			cell-id = <MSM_BUS_SLAVE_APPSS>;
+			label = "slv-hmss";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_APPSS>;
+		};
+
+		slv_lpass:slv-lpass {
+			cell-id = <MSM_BUS_SLAVE_LPASS>;
+			label = "slv-lpass";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_LPASS>;
+		};
+
+		slv_wlan:slv-wlan {
+			cell-id = <MSM_BUS_SLAVE_WLAN>;
+			label = "slv-wlan";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_WLAN>;
+		};
+
+		slv_snoc_bimc:slv-snoc-bimc {
+			cell-id = <MSM_BUS_SNOC_BIMC_SLV>;
+			label = "slv-snoc-bimc";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <2>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,connections = <&mas_snoc_bimc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SNOC_BIMC>;
+		};
+
+		slv_snoc_cnoc:slv-snoc-cnoc {
+			cell-id = <MSM_BUS_SNOC_CNOC_SLV>;
+			label = "slv-snoc-cnoc";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,connections = <&mas_snoc_cnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SNOC_CNOC>;
+		};
+
+		slv_imem:slv-imem {
+			cell-id = <MSM_BUS_SLAVE_OCIMEM>;
+			label = "slv-imem";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_IMEM>;
+		};
+
+		slv_pimem:slv-pimem {
+			cell-id = <MSM_BUS_SLAVE_PIMEM>;
+			label = "slv-pimem";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PIMEM>;
+		};
+
+		slv_qdss_stm:slv-qdss-stm {
+			cell-id = <MSM_BUS_SLAVE_QDSS_STM>;
+			label = "slv-qdss-stm";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_QDSS_STM>;
+		};
+
+		slv_pcie_0:slv-pcie-0 {
+			cell-id = <MSM_BUS_SLAVE_PCIE_0>;
+			label = "slv-pcie-0";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PCIE_0>;
+		};
+
+		slv_srvc_snoc:slv-srvc-snoc {
+			cell-id = <MSM_BUS_SLAVE_SERVICE_SNOC>;
+			label = "slv-srvc-snoc";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SERVICE_SNOC>;
+		};
+	};
+
+	devfreq_spdm_cpu {
+		compatible = "qcom,devfreq_spdm";
+		qcom,msm-bus,name = "devfreq_spdm";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<1 512 0 0>,
+				<1 512 0 0>;
+		qcom,msm-bus,active-only;
+		qcom,spdm-client = <0>;
+
+		qcom,bw-upstep = <1000>;
+		qcom,bw-dwnstep = <1000>;
+		qcom,max-vote = <10000>;
+		qcom,up-step-multp = <2>;
+		qcom,spdm-interval = <100>;
+
+		qcom,ports = <24>;
+		qcom,alpha-up = <12>;
+		qcom,alpha-down = <15>;
+		qcom,bucket-size = <8>;
+
+		/*max pl1 freq, max pl2 freq*/
+		qcom,pl-freqs = <260000 770000>;
+
+		/* pl1 low, pl1 high, pl2 low, pl2 high, pl3 low, pl3 high */
+		qcom,reject-rate = <5000 5000 5000 5000 5000 5000>;
+		/* pl1 low, pl1 high, pl2 low, pl2 high, pl3 low, pl3 high */
+		qcom,response-time-us = <10000 10000 10000 10000 10000 10000>;
+		/* pl1 low, pl1 high, pl2 low, pl2 high, pl3 low, pl3 high */
+		qcom,cci-response-time-us = <10000 10000 10000
+						10000 10000 10000>;
+		qcom,max-cci-freq = <1036800>;
+	};
+
+	devfreq_spdm_gov {
+		compatible = "qcom,gov_spdm_hyp";
+		interrupt-names = "spdm-irq";
+		interrupts = <0 192 IRQ_TYPE_EDGE_RISING>;
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm8998-camera.dtsi	2019-01-22 16:16:21.195225506 +0100
@@ -0,0 +1,909 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	qcom,msm-cam@8c0000 {
+		compatible = "qcom,msm-cam";
+		reg = <0x8c0000 0x40000>;
+		reg-names = "msm-cam";
+		status = "ok";
+		bus-vectors = "suspend", "svs", "nominal", "turbo";
+		qcom,bus-votes = <0 300000000 640000000 640000000>;
+	};
+
+	qcom,csiphy@ca34000 {
+		cell-index = <0>;
+		compatible = "qcom,csiphy-v5.0", "qcom,csiphy";
+		reg = <0xca34000 0x1000>;
+		reg-names = "csiphy";
+		interrupts = <0 78 0>;
+		interrupt-names = "csiphy";
+		gdscr-supply = <&gdsc_camss_top>;
+		bimc_smmu-supply = <&gdsc_bimc_smmu>;
+		qcom,cam-vreg-name = "gdscr", "bimc_smmu";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_csi0_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi0_clk>,
+			<&clock_mmss clk_mmss_camss_cphy_csid0_clk>,
+			<&clock_mmss clk_csi0phytimer_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi0phytimer_clk>,
+			<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
+			<&clock_mmss clk_csiphy_clk_src>,
+			<&clock_mmss clk_mmss_camss_csiphy0_clk>;
+		clock-names = "mmssnoc_axi", "mnoc_ahb",
+			"bmic_smmu_ahb", "bmic_smmu_axi",
+			"camss_ahb_clk", "camss_top_ahb_clk",
+			"csi_src_clk", "csi_clk", "cphy_csid_clk",
+			"csiphy_timer_src_clk", "csiphy_timer_clk",
+			"camss_ispif_ahb_clk", "csiphy_clk_src", "csiphy_clk";
+		qcom,clock-rates = <0 0 0 0 0 0 274290000 0 0 200000000 0
+			0 274290000 0>;
+		status = "ok";
+	};
+
+	qcom,csiphy@ca35000 {
+		cell-index = <1>;
+		compatible = "qcom,csiphy-v5.0", "qcom,csiphy";
+		reg = <0xca35000 0x1000>;
+		reg-names = "csiphy";
+		interrupts = <0 79 0>;
+		interrupt-names = "csiphy";
+		gdscr-supply = <&gdsc_camss_top>;
+		bimc_smmu-supply = <&gdsc_bimc_smmu>;
+		qcom,cam-vreg-name = "gdscr", "bimc_smmu";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_csi1_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi1_clk>,
+			<&clock_mmss clk_mmss_camss_cphy_csid1_clk>,
+			<&clock_mmss clk_csi1phytimer_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi1phytimer_clk>,
+			<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
+			<&clock_mmss clk_csiphy_clk_src>,
+			<&clock_mmss clk_mmss_camss_csiphy1_clk>;
+		clock-names = "mmssnoc_axi", "mnoc_ahb",
+			"bmic_smmu_ahb", "bmic_smmu_axi",
+			"camss_ahb_clk", "camss_top_ahb_clk",
+			"csi_src_clk", "csi_clk", "cphy_csid_clk",
+			"csiphy_timer_src_clk", "csiphy_timer_clk",
+			"camss_ispif_ahb_clk", "csiphy_clk_src", "csiphy_clk";
+		qcom,clock-rates = <0 0 0 0 0 0 274290000 0 0 200000000 0
+			0 274290000 0>;
+		status = "ok";
+	};
+
+	qcom,csiphy@ca36000 {
+		cell-index = <2>;
+		compatible = "qcom,csiphy-v5.0", "qcom,csiphy";
+		reg = <0xca36000 0x1000>;
+		reg-names = "csiphy";
+		interrupts = <0 80 0>;
+		interrupt-names = "csiphy";
+		gdscr-supply = <&gdsc_camss_top>;
+		bimc_smmu-supply = <&gdsc_bimc_smmu>;
+		qcom,cam-vreg-name = "gdscr", "bimc_smmu";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_csi2_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi2_clk>,
+			<&clock_mmss clk_mmss_camss_cphy_csid2_clk>,
+			<&clock_mmss clk_csi2phytimer_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi2phytimer_clk>,
+			<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
+			<&clock_mmss clk_csiphy_clk_src>,
+			<&clock_mmss clk_mmss_camss_csiphy2_clk>;
+		clock-names = "mmssnoc_axi", "mnoc_ahb",
+			"bmic_smmu_ahb", "bmic_smmu_axi",
+			"camss_ahb_clk", "camss_top_ahb_clk",
+			"csi_src_clk", "csi_clk", "cphy_csid_clk",
+			"csiphy_timer_src_clk", "csiphy_timer_clk",
+			"camss_ispif_ahb_clk", "csiphy_clk_src", "csiphy_clk";
+		qcom,clock-rates = <0 0 0 0 0 0 274290000 0 0 200000000 0
+			0 274290000 0>;
+		status = "ok";
+	};
+
+	qcom,csid@ca30000  {
+		cell-index = <0>;
+		compatible = "qcom,csid-v5.0", "qcom,csid";
+		reg = <0xca30000 0x400>;
+		reg-names = "csid";
+		interrupts = <0 296 0>;
+		interrupt-names = "csid";
+		qcom,csi-vdd-voltage = <1200000>;
+		qcom,mipi-csi-vdd-supply = <&pm8998_l2>;
+		gdscr-supply = <&gdsc_camss_top>;
+		vdd_sec-supply = <&pm8998_l1>;
+		bimc_smmu-supply = <&gdsc_bimc_smmu>;
+		qcom,cam-vreg-name = "vdd_sec", "gdscr", "bimc_smmu";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
+			<&clock_mmss clk_csi0_clk_src>,
+			<&clock_mmss clk_csiphy_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi0_clk>,
+			<&clock_mmss clk_mmss_camss_csi0_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_csi0rdi_clk>,
+			<&clock_mmss clk_mmss_camss_csi0pix_clk>,
+			<&clock_mmss clk_mmss_camss_cphy_csid0_clk>;
+		clock-names = "mmssnoc_axi", "mnoc_ahb",
+			"bmic_smmu_ahb", "bmic_smmu_axi",
+			"camss_ahb_clk", "camss_top_ahb_clk",
+			"ispif_ahb_clk", "csi_src_clk", "csiphy_clk_src",
+			"csi_clk", "csi_ahb_clk", "csi_rdi_clk",
+			"csi_pix_clk", "cphy_csid_clk";
+		qcom,clock-rates = <0 0 0 0 0 0 0 274290000 274290000
+			0 0 0 0 0>;
+		status = "ok";
+	};
+
+	qcom,csid@ca30400 {
+		cell-index = <1>;
+		compatible = "qcom,csid-v5.0", "qcom,csid";
+		reg = <0xca30400 0x400>;
+		reg-names = "csid";
+		interrupts = <0 297 0>;
+		interrupt-names = "csid";
+		qcom,csi-vdd-voltage = <1200000>;
+		qcom,mipi-csi-vdd-supply = <&pm8998_l2>;
+		gdscr-supply = <&gdsc_camss_top>;
+		vdd_sec-supply = <&pm8998_l1>;
+		bimc_smmu-supply = <&gdsc_bimc_smmu>;
+		qcom,cam-vreg-name = "vdd_sec", "gdscr", "bimc_smmu";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
+			<&clock_mmss clk_csi1_clk_src>,
+			<&clock_mmss clk_csiphy_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi1_clk>,
+			<&clock_mmss clk_mmss_camss_csi1_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_csi1rdi_clk>,
+			<&clock_mmss clk_mmss_camss_csi1pix_clk>,
+			<&clock_mmss clk_mmss_camss_cphy_csid1_clk>;
+		clock-names = "mmssnoc_axi", "mnoc_ahb",
+			"bmic_smmu_ahb", "bmic_smmu_axi",
+			"camss_ahb_clk", "camss_top_ahb_clk",
+			"ispif_ahb_clk", "csi_src_clk", "csiphy_clk_src",
+			"csi_clk", "csi_ahb_clk", "csi_rdi_clk",
+			"csi_pix_clk", "cphy_csid_clk";
+		qcom,clock-rates = <0 0 0 0 0 0 0 274290000 274290000
+			 0 0 0 0 0>;
+		status = "ok";
+	};
+
+	qcom,csid@ca30800 {
+		cell-index = <2>;
+		compatible = "qcom,csid-v5.0", "qcom,csid";
+		reg = <0xca30800 0x400>;
+		reg-names = "csid";
+		interrupts = <0 298 0>;
+		interrupt-names = "csid";
+		qcom,csi-vdd-voltage = <1200000>;
+		qcom,mipi-csi-vdd-supply = <&pm8998_l2>;
+		gdscr-supply = <&gdsc_camss_top>;
+		vdd_sec-supply = <&pm8998_l1>;
+		bimc_smmu-supply = <&gdsc_bimc_smmu>;
+		qcom,cam-vreg-name = "vdd_sec", "gdscr", "bimc_smmu";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
+			<&clock_mmss clk_csi2_clk_src>,
+			<&clock_mmss clk_csiphy_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi2_clk>,
+			<&clock_mmss clk_mmss_camss_csi2_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_csi2rdi_clk>,
+			<&clock_mmss clk_mmss_camss_csi2pix_clk>,
+			<&clock_mmss clk_mmss_camss_cphy_csid2_clk>;
+		clock-names = "mmssnoc_axi", "mnoc_ahb",
+			"bmic_smmu_ahb", "bmic_smmu_axi",
+			"camss_ahb_clk", "camss_top_ahb_clk",
+			"ispif_ahb_clk", "csi_src_clk", "csiphy_clk_src",
+			"csi_clk", "csi_ahb_clk", "csi_rdi_clk",
+			"csi_pix_clk", "cphy_csid_clk";
+		qcom,clock-rates = <0 0 0 0 0 0 0 274290000 274290000
+			 0 0 0 0 0>;
+		status = "ok";
+	};
+
+	qcom,csid@ca30c00 {
+		cell-index = <3>;
+		compatible = "qcom,csid-v5.0", "qcom,csid";
+		reg = <0xca30c00 0x400>;
+		reg-names = "csid";
+		interrupts = <0 299 0>;
+		interrupt-names = "csid";
+		qcom,csi-vdd-voltage = <1200000>;
+		qcom,mipi-csi-vdd-supply = <&pm8998_l2>;
+		gdscr-supply = <&gdsc_camss_top>;
+		vdd_sec-supply = <&pm8998_l1>;
+		bimc_smmu-supply = <&gdsc_bimc_smmu>;
+		qcom,cam-vreg-name = "vdd_sec", "gdscr", "bimc_smmu";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
+			<&clock_mmss clk_csi3_clk_src>,
+			<&clock_mmss clk_csiphy_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi3_clk>,
+			<&clock_mmss clk_mmss_camss_csi3_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_csi3rdi_clk>,
+			<&clock_mmss clk_mmss_camss_csi3pix_clk>,
+			<&clock_mmss clk_mmss_camss_cphy_csid3_clk>;
+		clock-names = "mmssnoc_axi", "mnoc_ahb",
+			"bmic_smmu_ahb", "bmic_smmu_axi",
+			"camss_ahb_clk", "camss_top_ahb_clk",
+			"ispif_ahb_clk", "csi_src_clk", "csiphy_clk_src",
+			"csi_clk", "csi_ahb_clk", "csi_rdi_clk",
+			"csi_pix_clk", "cphy_csid_clk";
+		qcom,clock-rates = <0 0 0 0 0 0 0 274290000 274290000
+			 0 0 0 0 0>;
+		status = "ok";
+	};
+
+	qcom,cam_smmu {
+		compatible = "qcom,msm-cam-smmu";
+		status = "ok";
+
+		msm_cam_smmu_cb1 {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&mmss_smmu 0xc00>,
+					<&mmss_smmu 0xc01>,
+					<&mmss_smmu 0xc02>,
+					<&mmss_smmu 0xc03>;
+			label = "vfe";
+			qcom,scratch-buf-support;
+		};
+
+		msm_cam_smmu_cb2 {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&mmss_smmu 0xa00>;
+			label = "cpp";
+		};
+
+		msm_cam_smmu_cb3 {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&mmss_smmu 0xa01>;
+			label = "camera_fd";
+		};
+
+		msm_cam_smmu_cb4 {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&mmss_smmu 0x800>;
+			label = "jpeg_enc0";
+		};
+
+		msm_cam_smmu_cb5 {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&mmss_smmu 0x801>;
+			label = "jpeg_dma";
+		};
+	};
+
+	qcom,fd@caa4000 {
+		cell-index = <0>;
+		compatible = "qcom,face-detection";
+		reg = <0xcaa4000 0x800>,
+			<0xcaa5000 0x400>,
+			<0xca80000 0x3000>;
+		reg-names = "fd_core", "fd_misc", "fd_vbif";
+		interrupts = <0 293 0>;
+		interrupt-names = "fd";
+		smmu-vdd-supply = <&gdsc_bimc_smmu>;
+		camss-vdd-supply = <&gdsc_camss_top>;
+		vdd-supply = <&gdsc_cpp>;
+		qcom,vdd-names = "smmu-vdd", "camss-vdd", "vdd";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_mmss_fd_core_clk>,
+			<&clock_mmss clk_mmss_fd_core_uar_clk>,
+			<&clock_mmss clk_mmss_fd_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_cpp_axi_clk>,
+			<&clock_mmss clk_mmss_camss_cpp_vbif_ahb_clk>;
+		clock-names = "mmssnoc_axi",
+			"mmss_mnoc_ahb_clk",
+			"mmss_bimc_smmu_ahb_clk",
+			"mmss_bimc_smmu_axi_clk",
+			"mmss_camss_ahb_clk",
+			"mmss_camss_top_ahb_clk",
+			"mmss_fd_core_clk",
+			"mmss_fd_core_uar_clk",
+			"mmss_fd_ahb_clk",
+			"mmss_camss_cpp_axi_clk",
+			"mmss_camss_cpp_vbif_ahb_clk";
+		qcom,clock-rates =
+			<0 0 0 0 0 0 404000000 0 0 0 0>,
+			<0 0 0 0 0 0 100000000 0 0 0 0>,
+			<0 0 0 0 0 0 404000000 0 0 0 0>,
+			<0 0 0 0 0 0 404000000 0 0 0 0>;
+		qcom,msm-bus,name = "msm_camera_fd";
+		qcom,msm-bus,num-cases = <4>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps = <106 512 0 0>,
+			<106 512 1625 0>,
+			<106 512 2995 0>,
+			<106 512 7200 0>;
+		qcom,fd-vbif-reg-settings = <0x20 0x10000000 0x30000000>,
+			<0x24 0x10000000 0x30000000>,
+			<0x28 0x10000000 0x30000000>,
+			<0x2c 0x10000000 0x30000000>;
+		qcom,fd-misc-reg-settings = <0x20 0x2 0x3>,
+			<0x24 0x2 0x3>;
+		status = "ok";
+	};
+
+	qcom,cpp@ca04000 {
+		cell-index = <0>;
+		compatible = "qcom,cpp";
+		reg = <0xca04000 0x100>,
+			<0xca80000 0x3000>,
+			<0xca18000 0x3000>,
+			<0xc8c36D4 0x4>;
+		reg-names = "cpp", "cpp_vbif", "cpp_hw", "camss_cpp";
+		interrupts = <0 294 0>;
+		interrupt-names = "cpp";
+		smmu-vdd-supply = <&gdsc_bimc_smmu>;
+		camss-vdd-supply = <&gdsc_camss_top>;
+		vdd-supply = <&gdsc_cpp>;
+		qcom,vdd-names = "smmu-vdd", "camss-vdd", "vdd";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_cpp_clk_src>,
+			<&clock_mmss clk_mmss_camss_cpp_clk>,
+			<&clock_mmss clk_mmss_camss_cpp_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_cpp_axi_clk>,
+			<&clock_mmss clk_mmss_camss_micro_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_cpp_vbif_ahb_clk>;
+		clock-names = "mmssnoc_axi_clk",
+			"mnoc_ahb_clk",
+			"camss_ahb_clk", "camss_top_ahb_clk",
+			"cpp_src_clk",
+			"cpp_core_clk", "camss_cpp_ahb_clk",
+			"camss_cpp_axi_clk", "micro_iface_clk",
+			"mmss_smmu_axi_clk", "cpp_vbif_ahb_clk";
+		qcom,clock-rates = <0 0 0 0 200000000 200000000 0 0 0 0 0>;
+		qcom,min-clock-rate = <200000000>;
+		qcom,bus-master = <1>;
+		qcom,vbif-qos-setting = <0x550 0x33333333>,
+			<0x554 0x03333333>,
+			<0x558 0x33333333>,
+			<0x55c 0x03333333>,
+			<0x560 0x33333333>,
+			<0x564 0x03333333>,
+			<0x568 0x33333333>,
+			<0x56c 0x03333333>,
+			<0x570 0x33333333>,
+			<0x574 0x03333333>,
+			<0x578 0x33333333>,
+			<0x57c 0x03333333>,
+			<0x580 0x33333333>,
+			<0x584 0x03333333>,
+			<0x588 0x33333333>,
+			<0x58c 0x03333333>;
+		status = "ok";
+		qcom,msm-bus,name = "msm_camera_cpp";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<106 512 0 0>,
+			<106 512 0 0>;
+		qcom,msm-bus-vector-dyn-vote;
+		resets = <&clock_mmss CAMSS_MICRO_BCR>;
+		reset-names = "micro_iface_reset";
+		qcom,src-clock-rates = <100000000 200000000 576000000
+			600000000>;
+		qcom,micro-reset;
+		qcom,cpp-fw-payload-info {
+			qcom,stripe-base = <790>;
+			qcom,plane-base = <715>;
+			qcom,stripe-size = <63>;
+			qcom,plane-size = <25>;
+			qcom,fe-ptr-off = <11>;
+			qcom,we-ptr-off = <23>;
+			qcom,ref-fe-ptr-off = <17>;
+			qcom,ref-we-ptr-off = <36>;
+			qcom,we-meta-ptr-off = <42>;
+			qcom,fe-mmu-pf-ptr-off = <7>;
+			qcom,ref-fe-mmu-pf-ptr-off = <10>;
+			qcom,we-mmu-pf-ptr-off = <13>;
+			qcom,dup-we-mmu-pf-ptr-off = <18>;
+			qcom,ref-we-mmu-pf-ptr-off = <23>;
+			qcom,set-group-buffer-len = <135>;
+			qcom,dup-frame-indicator-off = <70>;
+		};
+	};
+
+	qcom,ispif@ca31000 {
+		cell-index = <0>;
+		compatible = "qcom,ispif-v3.0", "qcom,ispif";
+		reg = <0xca31000 0xc00>,
+			<0xca00020 0x4>;
+		reg-names = "ispif", "csi_clk_mux";
+		interrupts = <0 309 0>;
+		interrupt-names = "ispif";
+		qcom,num-isps = <0x2>;
+		camss-vdd-supply = <&gdsc_camss_top>;
+		vfe0-vdd-supply = <&gdsc_vfe0>;
+		vfe1-vdd-supply = <&gdsc_vfe1>;
+		qcom,vdd-names = "camss-vdd", "vfe0-vdd",
+				"vfe1-vdd";
+		qcom,clock-cntl-support;
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
+			<&clock_mmss clk_csi0_clk_src>,
+			<&clock_mmss clk_csi1_clk_src>,
+			<&clock_mmss clk_csi2_clk_src>,
+			<&clock_mmss clk_csi3_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi0rdi_clk>,
+			<&clock_mmss clk_mmss_camss_csi1rdi_clk>,
+			<&clock_mmss clk_mmss_camss_csi2rdi_clk>,
+			<&clock_mmss clk_mmss_camss_csi3rdi_clk>,
+			<&clock_mmss clk_mmss_camss_csi0pix_clk>,
+			<&clock_mmss clk_mmss_camss_csi1pix_clk>,
+			<&clock_mmss clk_mmss_camss_csi2pix_clk>,
+			<&clock_mmss clk_mmss_camss_csi3pix_clk>,
+			<&clock_mmss clk_mmss_camss_csi0_clk>,
+			<&clock_mmss clk_mmss_camss_csi1_clk>,
+			<&clock_mmss clk_mmss_camss_csi2_clk>,
+			<&clock_mmss clk_mmss_camss_csi3_clk>,
+			<&clock_mmss clk_vfe0_clk_src>,
+			<&clock_mmss clk_mmss_camss_vfe0_clk>,
+			<&clock_mmss clk_mmss_camss_csi_vfe0_clk>,
+			<&clock_mmss clk_vfe1_clk_src>,
+			<&clock_mmss clk_mmss_camss_vfe1_clk>,
+			<&clock_mmss clk_mmss_camss_csi_vfe1_clk>;
+		clock-names = "mmssnoc_axi", "mnoc_ahb_clk",
+			"camss_ahb_clk",
+			"camss_top_ahb_clk", "ispif_ahb_clk",
+			"csi0_src_clk", "csi1_src_clk",
+			"csi2_src_clk", "csi3_src_clk",
+			"csi0_rdi_clk", "csi1_rdi_clk",
+			"csi2_rdi_clk", "csi3_rdi_clk",
+			"csi0_pix_clk", "csi1_pix_clk",
+			"csi2_pix_clk", "csi3_pix_clk",
+			"camss_csi0_clk", "camss_csi1_clk",
+			"camss_csi2_clk", "camss_csi3_clk",
+			"vfe0_clk_src",
+			"camss_vfe_vfe0_clk",
+			"camss_csi_vfe0_clk",
+			"vfe1_clk_src",
+			"camss_vfe_vfe1_clk",
+			"camss_csi_vfe1_clk";
+		qcom,clock-rates = <0 0 0 0 0
+			0 0 0 0
+			0 0 0 0
+			0 0 0 0
+			0 0 0 0
+			0 0 0
+			0 0 0>;
+		qcom,clock-control = "INIT_RATE", "NO_SET_RATE",
+			"NO_SET_RATE", "NO_SET_RATE",
+			"NO_SET_RATE",
+			"INIT_RATE", "INIT_RATE",
+			"INIT_RATE", "INIT_RATE",
+			"NO_SET_RATE", "NO_SET_RATE",
+			"NO_SET_RATE", "NO_SET_RATE",
+			"NO_SET_RATE", "NO_SET_RATE",
+			"NO_SET_RATE", "NO_SET_RATE",
+			"NO_SET_RATE", "NO_SET_RATE",
+			"NO_SET_RATE", "NO_SET_RATE",
+			"INIT_RATE",
+			"NO_SET_RATE", "NO_SET_RATE",
+			"INIT_RATE",
+			"NO_SET_RATE", "NO_SET_RATE";
+		status = "ok";
+	};
+
+	vfe0: qcom,vfe0@ca10000 {
+		cell-index = <0>;
+		compatible = "qcom,vfe48";
+		reg = <0xca10000 0x4000>,
+			<0xca40000 0x3000>;
+		reg-names = "vfe", "vfe_vbif";
+		interrupts = <0 314 0>;
+		interrupt-names = "vfe";
+		vdd-supply = <&gdsc_vfe0>;
+		camss-vdd-supply = <&gdsc_camss_top>;
+		smmu-vdd-supply = <&gdsc_bimc_smmu>;
+		qcom,vdd-names = "vdd", "camss-vdd", "smmu-vdd";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_vfe0_clk_src>,
+			<&clock_mmss clk_mmss_camss_vfe0_clk>,
+			<&clock_mmss clk_mmss_camss_vfe0_stream_clk>,
+			<&clock_mmss clk_mmss_camss_vfe0_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_vfe_vbif_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_vfe_vbif_axi_clk>,
+			<&clock_mmss clk_mmss_camss_csi_vfe0_clk>;
+		clock-names = "mmssnoc_axi", "mnoc_ahb_clk",
+			"bimc_smmu_ahb_clk", "bimc_smmu_axi_clk",
+			"camss_ahb_clk", "camss_top_ahb_clk", "vfe_clk_src",
+			"camss_vfe_clk", "camss_vfe_stream_clk",
+			"camss_vfe_ahb_clk", "camss_vfe_vbif_ahb_clk",
+			"camss_vfe_vbif_axi_clk",
+			"camss_csi_vfe_clk";
+		qcom,clock-rates = <0 0 0 0 0 0 480000000 0 0 0 0 0 0
+					0 0 0 0 0 0 576000000 0 0 0 0 0 0
+					0 0 0 0 0 0 600000000 0 0 0 0 0 0>;
+		status = "ok";
+		qos-entries = <8>;
+		qos-regs = <0x404 0x408 0x40c 0x410 0x414 0x418
+			0x41c 0x420>;
+		qos-settings = <0xaaa9aaa9
+			0xaaa9aaa9
+			0xaaa9aaa9
+			0xaaa9aaa9
+			0xaaa9aaa9
+			0xaaa9aaa9
+			0xaaa9aaa9
+			0xaaa9aaa9>;
+		vbif-entries = <1>;
+		vbif-regs = <0x124>;
+		vbif-settings = <0x3>;
+		ds-entries = <17>;
+		ds-regs = <0x424 0x428 0x42c 0x430 0x434
+			0x438 0x43c 0x440 0x444 0x448 0x44c
+			0x450 0x454 0x458 0x45c 0x460 0x464>;
+		ds-settings = <0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0x40000103>;
+		qcom,msm-bus,name = "msm_camera_vfe";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<29 512 0 0>,
+			<29 512 100000000 100000000>;
+		qcom,msm-bus-vector-dyn-vote;
+	};
+
+	vfe1: qcom,vfe1@ca14000 {
+		cell-index = <1>;
+		compatible = "qcom,vfe48";
+		reg = <0xca14000 0x4000>,
+			<0xca40000 0x3000>;
+		reg-names = "vfe", "vfe_vbif";
+		interrupts = <0 315 0>;
+		interrupt-names = "vfe";
+		vdd-supply = <&gdsc_vfe1>;
+		camss-vdd-supply = <&gdsc_camss_top>;
+		smmu-vdd-supply = <&gdsc_bimc_smmu>;
+		qcom,vdd-names = "vdd", "camss-vdd", "smmu-vdd";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_vfe1_clk_src>,
+			<&clock_mmss clk_mmss_camss_vfe1_clk>,
+			<&clock_mmss clk_mmss_camss_vfe1_stream_clk>,
+			<&clock_mmss clk_mmss_camss_vfe1_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_vfe_vbif_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_vfe_vbif_axi_clk>,
+			<&clock_mmss clk_mmss_camss_csi_vfe1_clk>;
+		clock-names = "mmssnoc_axi", "mnoc_ahb_clk",
+			"bimc_smmu_ahb_clk", "bimc_smmu_axi_clk",
+			"camss_ahb_clk", "camss_top_ahb_clk", "vfe_clk_src",
+			"camss_vfe_clk", "camss_vfe_stream_clk",
+			"camss_vfe_ahb_clk", "camss_vfe_vbif_ahb_clk",
+			"camss_vfe_vbif_axi_clk",
+			"camss_csi_vfe_clk";
+		qcom,clock-rates = <0 0 0 0 0 0 480000000 0 0 0 0 0 0
+					0 0 0 0 0 0 576000000 0 0 0 0 0 0
+					0 0 0 0 0 0 600000000 0 0 0 0 0 0>;
+		status = "ok";
+		qos-entries = <8>;
+		qos-regs = <0x404 0x408 0x40c 0x410 0x414 0x418
+			0x41c 0x420>;
+		qos-settings = <0xaaa9aaa9
+			0xaaa9aaa9
+			0xaaa9aaa9
+			0xaaa9aaa9
+			0xaaa9aaa9
+			0xaaa9aaa9
+			0xaaa9aaa9
+			0xaaa9aaa9>;
+		vbif-entries = <1>;
+		vbif-regs = <0x124>;
+		vbif-settings = <0x3>;
+		ds-entries = <17>;
+		ds-regs = <0x424 0x428 0x42c 0x430 0x434
+			0x438 0x43c 0x440 0x444 0x448 0x44c
+			0x450 0x454 0x458 0x45c 0x460 0x464>;
+		ds-settings = <0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0xcccc0011
+			0x40000103>;
+		qcom,msm-bus,name = "msm_camera_vfe";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<29 512 0 0>,
+			<29 512 100000000 100000000>;
+		qcom,msm-bus-vector-dyn-vote;
+	};
+
+	qcom,vfe {
+		compatible = "qcom,vfe";
+		num_child = <2>;
+	};
+
+	cci: qcom,cci@ca0c000 {
+		cell-index = <0>;
+		compatible = "qcom,cci";
+		reg = <0xca0c000 0x4000>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "cci";
+		interrupts = <0 295 0>;
+		interrupt-names = "cci";
+		status = "ok";
+		mmagic-supply = <&gdsc_bimc_smmu>;
+		gdscr-supply = <&gdsc_camss_top>;
+		qcom,cam-vreg-name = "mmagic", "gdscr";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_cci_clk_src>,
+			<&clock_mmss clk_mmss_camss_cci_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_cci_clk>;
+		clock-names = "mmssnoc_axi", "mnoc_ahb", "smmu_ahb", "smmu_axi",
+			"camss_ahb_clk", "camss_top_ahb_clk",
+			"cci_src_clk", "cci_ahb_clk", "camss_cci_clk";
+		qcom,clock-rates = <0 0 0 0 0 0 19200000 0 0>,
+			<0 0 0 0 0 0 37500000 0 0>;
+		pinctrl-names = "cci_default", "cci_suspend";
+			pinctrl-0 = <&cci0_active &cci1_active>;
+			pinctrl-1 = <&cci0_suspend &cci1_suspend>;
+		gpios = <&tlmm 17 0>,
+			<&tlmm 18 0>,
+			<&tlmm 19 0>,
+			<&tlmm 20 0>;
+		qcom,gpio-tbl-num = <0 1 2 3>;
+		qcom,gpio-tbl-flags = <1 1 1 1>;
+		qcom,gpio-tbl-label = "CCI_I2C_DATA0",
+				      "CCI_I2C_CLK0",
+				      "CCI_I2C_DATA1",
+				      "CCI_I2C_CLK1";
+		i2c_freq_100Khz: qcom,i2c_standard_mode {
+			status = "disabled";
+		};
+		i2c_freq_400Khz: qcom,i2c_fast_mode {
+			status = "disabled";
+		};
+		i2c_freq_custom: qcom,i2c_custom_mode {
+			status = "disabled";
+		};
+		i2c_freq_1Mhz: qcom,i2c_fast_plus_mode {
+			status = "disabled";
+		};
+	};
+
+	qcom,jpeg@ca1c000 {
+		cell-index = <0>;
+		compatible = "qcom,jpeg";
+		reg = <0xca1c000 0x4000>,
+			<0xca60000 0x3000>;
+		reg-names = "jpeg_hw", "jpeg_vbif";
+		interrupts = <0 316 0>;
+		interrupt-names = "jpeg";
+		smmu-vdd-supply = <&gdsc_bimc_smmu>;
+		camss-vdd-supply = <&gdsc_camss_top>;
+		qcom,vdd-names = "smmu-vdd", "camss-vdd";
+		clock-names = "mmssnoc_axi",
+			"mmss_mnoc_ahb_clk",
+			"mmss_bimc_smmu_ahb_clk",
+			"mmss_bimc_smmu_axi_clk",
+			"mmss_camss_ahb_clk",
+			"mmss_camss_top_ahb_clk",
+			"core_clk",
+			"mmss_camss_jpeg_ahb_clk",
+			"mmss_camss_jpeg_axi_clk";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_jpeg0_vote_clk>,
+			<&clock_mmss clk_mmss_camss_jpeg_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_jpeg_axi_clk>;
+		qcom,clock-rates = <0 0 0 0 0 0 480000000 0 0>;
+		qcom,vbif-reg-settings = <0x4 0x1>;
+		qcom,prefetch-reg-settings = <0x30c 0x1111>,
+			<0x318 0x31>,
+			<0x324 0x31>,
+			<0x330 0x31>,
+			<0x33c 0x0>;
+		qcom,msm-bus,name = "msm_camera_jpeg0";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps = <62 512 0 0>,
+			<62 512 1920000 2880000>;
+		status = "ok";
+	};
+
+	qcom,jpeg@caa0000 {
+		cell-index = <3>;
+		compatible = "qcom,jpegdma";
+		reg = <0xcaa0000 0x4000>,
+			<0xca60000 0x3000>;
+		reg-names = "jpeg_hw", "jpeg_vbif";
+		interrupts = <0 304 0>;
+		interrupt-names = "jpeg";
+		smmu-vdd-supply = <&gdsc_bimc_smmu>;
+		camss-vdd-supply = <&gdsc_camss_top>;
+		qcom,vdd-names = "smmu-vdd", "camss-vdd";
+		clock-names = "mmssnoc_axi",
+			"mmss_mnoc_ahb_clk",
+			"mmss_bimc_smmu_ahb_clk",
+			"mmss_bimc_smmu_axi_clk",
+			"mmss_camss_ahb_clk",
+			"mmss_camss_top_ahb_clk",
+			"core_clk",
+			"mmss_camss_jpeg_ahb_clk",
+			"mmss_camss_jpeg_axi_clk";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_jpeg0_dma_vote_clk>,
+			<&clock_mmss clk_mmss_camss_jpeg_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_jpeg_axi_clk>;
+		qcom,clock-rates = <0 0 0 0 0 0 480000000 0 0>;
+		qcom,vbif-reg-settings = <0x4 0x1>;
+		qcom,prefetch-reg-settings = <0x18c 0x11>,
+			<0x1a0 0x31>,
+			<0x1b0 0x31>;
+		qcom,msm-bus,name = "msm_camera_jpeg_dma";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps = <62 512 0 0>,
+			<62 512 1920000 2880000>;
+		qcom,max-ds-factor = <128>;
+		status = "ok";
+	};
+};
+
+&i2c_freq_100Khz {
+	qcom,hw-thigh = <201>;
+	qcom,hw-tlow = <174>;
+	qcom,hw-tsu-sto = <204>;
+	qcom,hw-tsu-sta = <231>;
+	qcom,hw-thd-dat = <22>;
+	qcom,hw-thd-sta = <162>;
+	qcom,hw-tbuf = <227>;
+	qcom,hw-scl-stretch-en = <0>;
+	qcom,hw-trdhld = <6>;
+	qcom,hw-tsp = <3>;
+	qcom,cci-clk-src = <37500000>;
+	status = "ok";
+};
+
+&i2c_freq_400Khz {
+	qcom,hw-thigh = <38>;
+	qcom,hw-tlow = <56>;
+	qcom,hw-tsu-sto = <40>;
+	qcom,hw-tsu-sta = <40>;
+	qcom,hw-thd-dat = <22>;
+	qcom,hw-thd-sta = <35>;
+	qcom,hw-tbuf = <62>;
+	qcom,hw-scl-stretch-en = <0>;
+	qcom,hw-trdhld = <6>;
+	qcom,hw-tsp = <3>;
+	qcom,cci-clk-src = <37500000>;
+	status = "ok";
+};
+
+&i2c_freq_custom {
+	qcom,hw-thigh = <38>;
+	qcom,hw-tlow = <56>;
+	qcom,hw-tsu-sto = <40>;
+	qcom,hw-tsu-sta = <40>;
+	qcom,hw-thd-dat = <22>;
+	qcom,hw-thd-sta = <35>;
+	qcom,hw-tbuf = <62>;
+	qcom,hw-scl-stretch-en = <1>;
+	qcom,hw-trdhld = <6>;
+	qcom,hw-tsp = <3>;
+	qcom,cci-clk-src = <37500000>;
+	status = "ok";
+};
+
+&i2c_freq_1Mhz {
+	qcom,hw-thigh = <16>;
+	qcom,hw-tlow = <22>;
+	qcom,hw-tsu-sto = <17>;
+	qcom,hw-tsu-sta = <18>;
+	qcom,hw-thd-dat = <16>;
+	qcom,hw-thd-sta = <15>;
+	qcom,hw-tbuf = <24>;
+	qcom,hw-scl-stretch-en = <0>;
+	qcom,hw-trdhld = <3>;
+	qcom,hw-tsp = <3>;
+	qcom,cci-clk-src = <37500000>;
+	status = "ok";
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm8998-cdp.dtsi	2019-10-29 09:26:22.917196073 +0100
@@ -0,0 +1,348 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8998-audio-wcd.dtsi"
+#include "msm-smb138x.dtsi"
+
+&vendor {
+	bluetooth: bt_wcn3990 {
+		compatible = "qca,wcn3990";
+		qca,bt-vdd-io-supply = <&pm8998_s3>;
+		qca,bt-vdd-xtal-supply = <&pm8998_s5>;
+		qca,bt-vdd-core-supply = <&pm8998_l7>;
+		qca,bt-vdd-pa-supply = <&pm8998_l17>;
+		qca,bt-vdd-ldo-supply = <&pm8998_l25>;
+		qca,bt-chip-pwd-supply = <&pmi8998_bob_pin1>;
+		clocks = <&clock_gcc clk_rf_clk2_pin>;
+		clock-names = "rf_clk2";
+
+		qca,bt-vdd-io-voltage-level = <1352000 1352000>;
+		qca,bt-vdd-xtal-voltage-level = <2040000 2040000>;
+		qca,bt-vdd-core-voltage-level = <1800000 1800000>;
+		qca,bt-vdd-pa-voltage-level = <1304000 1304000>;
+		qca,bt-vdd-ldo-voltage-level = <3312000 3312000>;
+		qca,bt-chip-pwd-voltage-level = <3600000 3600000>;
+
+		qca,bt-vdd-io-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-xtal-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-core-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-pa-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-ldo-current-level = <1>; /* LPM/PFM */
+	};
+};
+
+&blsp1_uart3_hs {
+	status = "ok";
+};
+
+&ufsphy1 {
+	vdda-phy-supply = <&pm8998_l1>;
+	vdda-pll-supply = <&pm8998_l2>;
+	vddp-ref-clk-supply = <&pm8998_l26>;
+	vdda-phy-max-microamp = <51400>;
+	vdda-pll-max-microamp = <14600>;
+	vddp-ref-clk-max-microamp = <100>;
+	vddp-ref-clk-always-on;
+	status = "ok";
+};
+
+&ufs1 {
+	vdd-hba-supply = <&gdsc_ufs>;
+	vdd-hba-fixed-regulator;
+	vcc-supply = <&pm8998_l20>;
+	vccq-supply = <&pm8998_l26>;
+	vccq2-supply = <&pm8998_s4>;
+	vcc-max-microamp = <750000>;
+	vccq-max-microamp = <560000>;
+	vccq2-max-microamp = <750000>;
+	status = "ok";
+};
+
+&ufs_ice {
+	status = "ok";
+};
+
+&sdhc_2 {
+	vdd-supply = <&pm8998_l21>;
+	qcom,vdd-voltage-level = <2950000 2960000>;
+	qcom,vdd-current-level = <200 800000>;
+
+	vdd-io-supply = <&pm8998_l13>;
+	qcom,vdd-io-voltage-level = <1808000 2960000>;
+	qcom,vdd-io-current-level = <200 22000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
+
+	qcom,clk-rates = <400000 20000000 25000000
+				50000000 100000000 200000000>;
+	qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+
+	cd-gpios = <&tlmm 95 0x1>;
+
+	status = "ok";
+};
+
+&uartblsp2dm1 {
+	status = "ok";
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart_console_active>;
+};
+
+&pm8998_gpios {
+	/* GPIO 5 for Home Key */
+	gpio@c400 {
+		status = "okay";
+		qcom,mode = <0>;
+		qcom,pull = <0>;
+		qcom,vin-sel = <0>;
+		qcom,src-sel = <0>;
+		qcom,out-strength = <1>;
+	};
+
+	/* GPIO 6 for Vol+ Key */
+	gpio@c500 {
+		status = "okay";
+		qcom,mode = <0>;
+		qcom,pull = <0>;
+		qcom,vin-sel = <0>;
+		qcom,src-sel = <0>;
+		qcom,out-strength = <1>;
+	};
+
+	/* GPIO 7 for Snapshot Key */
+	gpio@c600 {
+		status = "okay";
+		qcom,mode = <0>;
+		qcom,pull = <0>;
+		qcom,vin-sel = <0>;
+		qcom,src-sel = <0>;
+		qcom,out-strength = <1>;
+	};
+
+	/* GPIO 8 for Focus Key */
+	gpio@c700 {
+		status = "okay";
+		qcom,mode = <0>;
+		qcom,pull = <0>;
+		qcom,vin-sel = <0>;
+		qcom,src-sel = <0>;
+		qcom,out-strength = <1>;
+	};
+
+	gpio@cc00 { /* GPIO 13 */
+		qcom,mode = <1>;
+		qcom,output-type = <0>;
+		qcom,pull = <5>;
+		qcom,vin-sel = <0>;
+		qcom,out-strength = <1>;
+		qcom,src-sel = <3>;
+		qcom,master-en = <1>;
+		status = "okay";
+	};
+
+	gpio@d200 { /* GPIO 19 - wil6210 refclk3_en */
+		qcom,mode = <0>;		/* Input */
+		qcom,pull = <5>;		/* No Pull */
+		qcom,vin-sel = <1>;		/* VIN1 GPIO_MV */
+		qcom,src-sel = <0>;		/* GPIO */
+		qcom,invert = <0>;		/* Invert */
+		qcom,master-en = <1>;		/* Enable GPIO */
+		status = "okay";
+	};
+
+	/* GPIO 21 (NFC_CLK_REQ) */
+	gpio@d400 {
+		qcom,mode = <0>;
+		qcom,vin-sel = <1>;
+		qcom,src-sel = <0>;
+		qcom,master-en = <1>;
+		status = "okay";
+	};
+};
+
+&labibb {
+	status = "ok";
+	qcom,qpnp-labibb-mode = "lcd";
+};
+
+&pmi8998_wled {
+	qcom,led-strings-list = [00 01];
+};
+
+&pmi8998_charger {
+	qcom,batteryless-platform;
+};
+
+&pmi8998_haptics {
+	status = "okay";
+};
+
+&pm8998_vadc {
+	chan@83 {
+		label = "vph_pwr";
+		reg = <0x83>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@85 {
+		label = "vcoin";
+		reg = <0x85>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@4c {
+		label = "xo_therm";
+		reg = <0x4c>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <4>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@4d {
+		label = "msm_therm";
+		reg = <0x4d>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@51 {
+		label = "quiet_therm";
+		reg = <0x51>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+};
+
+&pm8998_adc_tm {
+	chan@83 {
+		label = "vph_pwr";
+		reg = <0x83>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,btm-channel-number = <0x60>;
+	};
+
+	chan@4d {
+		label = "msm_therm";
+		reg = <0x4d>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x68>;
+		qcom,thermal-node;
+	};
+
+	chan@51 {
+		label = "quiet_therm";
+		reg = <0x51>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x70>;
+		qcom,thermal-node;
+	};
+
+	chan@4c {
+		label = "xo_therm";
+		reg = <0x4c>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <4>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x78>;
+		qcom,thermal-node;
+	};
+};
+
+&wil6210 {
+	status = "ok";
+};
+
+&snd_9335 {
+	qcom,mbhc-audio-jack-type = "6-pole-jack";
+};
+
+&snd_934x {
+	qcom,mbhc-audio-jack-type = "6-pole-jack";
+};
+
+&soc {
+	gpio_keys {
+		compatible = "gpio-keys";
+		input-name = "gpio-keys";
+		status = "okay";
+
+		home {
+			label = "home";
+			gpios = <&pm8998_gpios 5 0x1>;
+			linux,input-type = <1>;
+			linux,code = <102>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+
+		};
+
+		vol_up {
+			label = "volume_up";
+			gpios = <&pm8998_gpios 6 0x1>;
+			linux,input-type = <1>;
+			linux,code = <115>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+		};
+
+		cam_snapshot {
+			label = "cam_snapshot";
+			gpios = <&pm8998_gpios 7 0x1>;
+			linux,input-type = <1>;
+			linux,code = <766>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+		};
+
+		cam_focus {
+			label = "cam_focus";
+			gpios = <&pm8998_gpios 8 0x1>;
+			linux,input-type = <1>;
+			linux,code = <528>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+		};
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm8998-coresight.dtsi	2019-01-22 16:16:21.195225506 +0100
@@ -0,0 +1,1622 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	tmc_etr: tmc@6048000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b961>;
+
+		reg = <0x6048000 0x1000>,
+		      <0x6064000 0x15000>;
+		reg-names = "tmc-base", "bam-base";
+
+		arm,buffer-size = <0x400000>;
+		arm,sg-enable;
+
+		coresight-ctis = <&cti0 &cti8>;
+
+		coresight-name = "coresight-tmc-etr";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		port{
+			tmc_etr_in_replicator: endpoint {
+				slave-mode;
+				remote-endpoint = <&replicator_out_tmc_etr>;
+			};
+		};
+	};
+
+	replicator: replicator@6046000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b909>;
+
+		reg = <0x6046000 0x1000>;
+		reg-names = "replicator-base";
+
+		coresight-name = "coresight-replicator";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports{
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				replicator_out_tmc_etr:endpoint {
+					remote-endpoint =
+						<&tmc_etr_in_replicator>;
+				};
+			};
+			port@1 {
+				reg = <0>;
+				replicator_in_tmc_etf:endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tmc_etf_out_replicator>;
+				};
+			};
+		};
+	};
+
+	tmc_etf: tmc@6047000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b961>;
+
+		reg = <0x6047000 0x1000>;
+		reg-names = "tmc-base";
+
+		coresight-ctis = <&cti0 &cti8>;
+
+		coresight-name = "coresight-tmc-etf";
+
+		arm,default-sink;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports{
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				tmc_etf_out_replicator:endpoint {
+					remote-endpoint =
+						<&replicator_in_tmc_etf>;
+				};
+			};
+			port@1 {
+				reg = <0>;
+				tmc_etf_in_funnel_merg:endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_merg_out_tmc_etf>;
+				};
+			};
+		};
+	};
+
+	funnel_merg: funnel@6045000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6045000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-merg";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_merg_out_tmc_etf:endpoint {
+					remote-endpoint =
+						<&tmc_etf_in_funnel_merg>;
+				};
+			};
+			port@1 {
+				reg = <0>;
+				funnel_merg_in_funnel_in0:endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_in0_out_funnel_merg>;
+				};
+			};
+			port@2 {
+				reg = <1>;
+				funnel_merg_in_funnel_in1:endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_in1_out_funnel_merg>;
+				};
+			};
+		};
+	};
+
+	funnel_in0: funnel@6041000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6041000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-in0";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_in0_out_funnel_merg: endpoint {
+					remote-endpoint =
+						<&funnel_merg_in_funnel_in0>;
+				};
+			};
+			port@1 {
+				reg = <0>;
+				funnel_in0_in_rpm_etm0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&rpm_etm0_out_funnel_in0>;
+				};
+			};
+			port@2 {
+				reg = <3>;
+				funnel_in0_in_funnel_spss: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_spss_out_funnel_in0>;
+				};
+			};
+			port@3 {
+				reg = <6>;
+				funnel_in0_in_funnel_qatb: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_qatb_out_funnel_in0>;
+				};
+			};
+			port@4 {
+				reg = <7>;
+				funnel_in0_in_stm: endpoint {
+					slave-mode;
+					remote-endpoint = <&stm_out_funnel_in0>;
+				};
+			};
+		};
+	};
+
+	funnel_in1: funnel@6042000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6042000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-in1";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_in1_out_funnel_merg: endpoint {
+					remote-endpoint =
+						<&funnel_merg_in_funnel_in1>;
+				};
+			};
+			port@1 {
+				reg = <2>;
+				funnel_in1_in_tpda_nav: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&tpda_nav_out_funnel_in1>;
+				};
+			};
+			port@2 {
+				reg = <3>;
+				funnel_in1_in_tpda_mss: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&tpda_mss_out_funnel_in1>;
+				};
+			};
+			port@3 {
+				reg = <4>;
+				funnel_in1_in_audio_etm0: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&audio_etm0_out_funnel_in1>;
+				};
+			};
+			port@4 {
+				reg = <5>;
+				funnel_in1_in_modem_etm0: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&modem_etm0_out_funnel_in1>;
+				};
+			};
+			port@5 {
+				reg = <6>;
+				funnel_in1_in_funnel_apss_merg: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&funnel_apss_merg_out_funnel_in1>;
+				};
+			};
+			port@6 {
+				reg = <7>;
+				funnel_in1_in_gfx: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&gfx_out_funnel_in1>;
+				};
+			};
+		};
+	};
+
+	funnel_apss_merg: funnel@7b70000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x7b70000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-apss-merg";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_apss_merg_out_funnel_in1: endpoint {
+					remote-endpoint =
+					    <&funnel_in1_in_funnel_apss_merg>;
+				};
+			};
+			port@1 {
+				reg = <0>;
+				funnel_apss_merg_in_funnel_apss: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&funnel_apss_out_funnel_apss_merg>;
+				};
+			};
+			port@2 {
+				reg = <1>;
+				funnel_apss_merg_in_tpda_olc: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&tpda_olc_out_funnel_apss_merg>;
+				};
+			};
+			port@3 {
+				reg = <3>;
+				funnel_apss_merg_in_tpda_apss: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&tpda_apss_out_funnel_apss_merg>;
+				};
+			};
+		};
+	};
+
+	funnel_apss: funnel@7b60000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x7b60000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-apss";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_apss_out_funnel_apss_merg: endpoint {
+					remote-endpoint =
+					    <&funnel_apss_merg_in_funnel_apss>;
+				};
+			};
+			port@1 {
+				reg = <0>;
+				funnel_apss_in_etm0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm0_out_funnel_apss>;
+				};
+			};
+			port@2 {
+				reg = <1>;
+				funnel_apss_in_etm1: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm1_out_funnel_apss>;
+				};
+			};
+			port@3 {
+				reg = <2>;
+				funnel_apss_in_etm2: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm2_out_funnel_apss>;
+				};
+			};
+			port@4 {
+				reg = <3>;
+				funnel_apss_in_etm3: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm3_out_funnel_apss>;
+				};
+			};
+			port@5 {
+				reg = <4>;
+				funnel_apss_in_etm4: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm4_out_funnel_apss>;
+				};
+			};
+			port@6 {
+				reg = <5>;
+				funnel_apss_in_etm5: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm5_out_funnel_apss>;
+				};
+			};
+			port@7 {
+				reg = <6>;
+				funnel_apss_in_etm6: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm6_out_funnel_apss>;
+				};
+			};
+			port@8 {
+				reg = <7>;
+				funnel_apss_in_etm7: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm7_out_funnel_apss>;
+				};
+			};
+		};
+	};
+
+	stm: stm@6002000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b962>;
+
+		reg = <0x6002000 0x1000>,
+		      <0x16280000 0x180000>;
+		reg-names = "stm-base", "stm-data-base";
+
+		coresight-name = "coresight-stm";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		port{
+			stm_out_funnel_in0: endpoint {
+				remote-endpoint = <&funnel_in0_in_stm>;
+			};
+		};
+	};
+
+	etm0: etm@7840000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b95d>;
+
+		reg = <0x7840000 0x1000>;
+		cpu = <&CPU0>;
+
+		coresight-name = "coresight-etm0";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		port{
+			etm0_out_funnel_apss: endpoint {
+				remote-endpoint = <&funnel_apss_in_etm0>;
+			};
+		};
+	};
+
+	etm1: etm@7940000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b95d>;
+
+		reg = <0x7940000 0x1000>;
+		cpu = <&CPU1>;
+
+		coresight-name = "coresight-etm1";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		port{
+			etm1_out_funnel_apss: endpoint {
+				remote-endpoint = <&funnel_apss_in_etm1>;
+			};
+		};
+	};
+
+	etm2: etm@7A40000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b95d>;
+
+		reg = <0x7A40000 0x1000>;
+		cpu = <&CPU2>;
+
+		coresight-name = "coresight-etm2";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		port{
+			etm2_out_funnel_apss: endpoint {
+				remote-endpoint = <&funnel_apss_in_etm2>;
+			};
+		};
+	};
+
+	etm3: etm@7B40000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b95d>;
+
+		reg = <0x7B40000 0x1000>;
+		cpu = <&CPU3>;
+
+		coresight-name = "coresight-etm3";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		port{
+			etm3_out_funnel_apss: endpoint {
+				remote-endpoint = <&funnel_apss_in_etm3>;
+			};
+		};
+	};
+
+	etm4: etm@7C40000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b95d>;
+
+		reg = <0x7C40000 0x1000>;
+		cpu = <&CPU4>;
+
+		coresight-name = "coresight-etm4";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		port{
+			etm4_out_funnel_apss: endpoint {
+				remote-endpoint = <&funnel_apss_in_etm4>;
+			};
+		};
+	};
+
+	etm5: etm@7D40000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b95d>;
+
+		reg = <0x7D40000 0x1000>;
+		cpu = <&CPU5>;
+
+		coresight-name = "coresight-etm5";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		port{
+			etm5_out_funnel_apss: endpoint {
+				remote-endpoint = <&funnel_apss_in_etm5>;
+			};
+		};
+	};
+
+	etm6: etm@7E40000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b95d>;
+
+		reg = <0x7E40000 0x1000>;
+		cpu = <&CPU6>;
+
+		coresight-name = "coresight-etm6";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		port{
+			etm6_out_funnel_apss: endpoint {
+				remote-endpoint = <&funnel_apss_in_etm6>;
+			};
+		};
+	};
+
+	etm7: etm@7F40000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b95d>;
+
+		reg = <0x7F40000 0x1000>;
+		cpu = <&CPU7>;
+
+		coresight-name = "coresight-etm7";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		port{
+			etm7_out_funnel_apss: endpoint {
+				remote-endpoint = <&funnel_apss_in_etm7>;
+			};
+		};
+	};
+
+	cti0: cti@6010000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x6010000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti0";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti1: cti@6011000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x6011000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti1";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti2: cti@6012000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x6012000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti2";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		qcom,cti-gpio-trigout = <4>;
+		pinctrl-names = "cti-trigout-pctrl";
+		pinctrl-0 = <&trigout_a>;
+	};
+
+	cti3: cti@6013000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x6013000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti3";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti4: cti@6014000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x6014000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti4";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti5: cti@6015000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x6015000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti5";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti6: cti@6016000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x6016000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti6";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti7: cti@6017000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x6017000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti7";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti8: cti@6018000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x6018000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti8";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti9: cti@6019000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x6019000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti9";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti10: cti@601a000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x601a000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti10";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti11: cti@601b000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x601b000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti11";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti12: cti@601c000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x601c000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti12";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti13: cti@601d000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x601d000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti13";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti14: cti@601e000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x601e000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti14";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti15: cti@601f000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x601f000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti15";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti_cpu0: cti@7820000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x7820000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-cpu0";
+		cpu = <&CPU0>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti_cpu1: cti@7920000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x7920000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-cpu1";
+		cpu = <&CPU1>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti_cpu2: cti@7a20000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x7a20000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-cpu2";
+		cpu = <&CPU2>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti_cpu3: cti@7b20000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x7b20000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-cpu3";
+		cpu = <&CPU3>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti_cpu4: cti@7c20000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x7c20000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-cpu4";
+		cpu = <&CPU4>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti_cpu5: cti@7d20000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x7d20000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-cpu5";
+		cpu = <&CPU5>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti_cpu6: cti@7e20000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x7e20000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-cpu6";
+		cpu = <&CPU6>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti_cpu7: cti@7f20000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x7f20000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-cpu7";
+		cpu = <&CPU7>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti_apss: cti@7b80000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x7b80000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-apss";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti_apss_dl: cti@7bc1000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x7bc1000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-apss-dl";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	cti_olc: cti@7b91000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x7b91000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-olc";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	funnel_qatb: funnel@6005000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6005000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-qatb";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_qatb_out_funnel_in0: endpoint {
+					remote-endpoint =
+					    <&funnel_in0_in_funnel_qatb>;
+				};
+			};
+			port@1 {
+				reg = <0>;
+				funnel_qatb_in_tpda: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpda_out_funnel_qatb>;
+				};
+			};
+			port@2 {
+				reg = <3>;
+				funnel_qatb_in_funnel_dlet_qatb: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&funnel_dlet_qatb_out_funnel_qatb>;
+				};
+			};
+		};
+	};
+
+	tpda: tpda@6004000 {
+		compatible = "qcom,coresight-tpda";
+		reg = <0x6004000 0x1000>;
+		reg-names = "tpda-base";
+
+		coresight-name = "coresight-tpda";
+
+		qcom,tpda-atid = <65>;
+		qcom,bc-elem-size = <7 32>,
+				    <9 32>;
+		qcom,tc-elem-size = <3 32>,
+				    <6 32>,
+				    <9 32>;
+		qcom,dsb-elem-size = <7 32>,
+				     <9 32>;
+		qcom,cmb-elem-size = <3 32>,
+				     <4 32>,
+				     <5 32>,
+				     <9 64>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				tpda_out_funnel_qatb: endpoint {
+					remote-endpoint =
+						<&funnel_qatb_in_tpda>;
+				};
+			};
+			port@1 {
+				reg = <3>;
+				tpda_in_tpdm_vsense: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_vsense_out_tpda>;
+				};
+			};
+			port@2 {
+				reg = <4>;
+				tpda_in_tpdm_dcc: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_dcc_out_tpda>;
+				};
+			};
+			port@3 {
+				reg = <5>;
+				tpda_in_tpdm_prng: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_prng_out_tpda>;
+				};
+			};
+			port@4 {
+				reg = <7>;
+				tpda_in_tpdm_qm: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_qm_out_tpda>;
+				};
+			};
+			port@5 {
+				reg = <9>;
+				tpda_in_tpdm_pimem: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_pimem_out_tpda>;
+				};
+			};
+		};
+	};
+
+	tpdm_vsense: tpdm@7038000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x7038000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-vsense";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port{
+			tpdm_vsense_out_tpda: endpoint {
+				remote-endpoint = <&tpda_in_tpdm_vsense>;
+			};
+		};
+	};
+
+	tpdm_dcc: tpdm@7054000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x7054000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-dcc";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port{
+			tpdm_dcc_out_tpda: endpoint {
+				remote-endpoint = <&tpda_in_tpdm_dcc>;
+			};
+		};
+	};
+
+	tpdm_prng: tpdm@704c000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x704c000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-prng";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port{
+			tpdm_prng_out_tpda: endpoint {
+				remote-endpoint = <&tpda_in_tpdm_prng>;
+			};
+		};
+	};
+
+	tpdm_qm: tpdm@71d0000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x71d0000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-qm";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		qcom,msr-fix-req;
+
+		port{
+			tpdm_qm_out_tpda: endpoint {
+				remote-endpoint = <&tpda_in_tpdm_qm>;
+			};
+		};
+
+	};
+
+	tpdm_pimem: tpdm@7050000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x7050000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-pimem";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		qcom,msr-fix-req;
+
+		port{
+			tpdm_pimem_out_tpda: endpoint {
+				remote-endpoint = <&tpda_in_tpdm_pimem>;
+			};
+		};
+
+	};
+
+	tpda_apss: tpda@7bc2000 {
+		compatible = "qcom,coresight-tpda";
+		reg = <0x7bc2000 0x1000>;
+		reg-names = "tpda-base";
+
+		coresight-name = "coresight-tpda-apss";
+
+		qcom,tpda-atid = <66>;
+		qcom,dsb-elem-size = <0 32>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				tpda_apss_out_funnel_apss_merg: endpoint {
+					remote-endpoint =
+					       <&funnel_apss_merg_in_tpda_apss>;
+				};
+			};
+			port@1 {
+				reg = <0>;
+				tpda_apss_in_tpdm_apss: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_apss_out_tpda_apss>;
+				};
+			};
+		};
+	};
+
+	tpdm_apss: tpdm@7bc0000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x7bc0000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-apss";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		qcom,msr-fix-req;
+
+		port{
+			tpdm_apss_out_tpda_apss: endpoint {
+				remote-endpoint = <&tpda_apss_in_tpdm_apss>;
+			};
+		};
+	};
+
+	tpda_mss: tpda@7043000 {
+		compatible = "qcom,coresight-tpda";
+		reg = <0x7043000 0x1000>;
+		reg-names = "tpda-base";
+
+		coresight-name = "coresight-tpda-mss";
+
+		qcom,tpda-atid = <67>;
+		qcom,dsb-elem-size = <0 32>;
+		qcom,cmb-elem-size = <0 32>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				tpda_mss_out_funnel_in1: endpoint {
+					remote-endpoint =
+						<&funnel_in1_in_tpda_mss>;
+				};
+			};
+			port@1 {
+				reg = <0>;
+				tpda_mss_in_tpdm_mss: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_mss_out_tpda_mss>;
+				};
+			};
+		};
+	};
+
+	tpdm_mss: tpdm@7042000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x7042000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-mss";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		qcom,msr-fix-req;
+
+		port{
+			tpdm_mss_out_tpda_mss: endpoint {
+				remote-endpoint = <&tpda_mss_in_tpdm_mss>;
+			};
+		};
+	};
+
+	tpda_nav: tpda@7191000 {
+		compatible = "qcom,coresight-tpda";
+		reg = <0x7191000 0x1000>;
+		reg-names = "tpda-base";
+
+		coresight-name = "coresight-tpda-nav";
+
+		qcom,tpda-atid = <68>;
+		qcom,cmb-elem-size = <0 32>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				tpda_nav_out_funnel_in1: endpoint {
+					remote-endpoint =
+						<&funnel_in1_in_tpda_nav>;
+				};
+			};
+			port@1 {
+				reg = <0>;
+				tpda_nav_in_tpdm_nav: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_nav_out_tpda_nav>;
+				};
+			};
+		};
+	};
+
+	tpdm_nav: tpdm@7190000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x7190000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-nav";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port{
+			tpdm_nav_out_tpda_nav: endpoint {
+				remote-endpoint = <&tpda_nav_in_tpdm_nav>;
+			};
+		};
+	};
+
+	tpda_olc: tpda@7b92000 {
+		compatible = "qcom,coresight-tpda";
+		reg = <0x7b92000 0x1000>;
+		reg-names = "tpda-base";
+
+		coresight-name = "coresight-tpda-olc";
+
+		qcom,tpda-atid = <69>;
+		qcom,cmb-elem-size = <0 64>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				tpda_olc_out_funnel_apss_merg: endpoint {
+					remote-endpoint =
+						<&funnel_apss_merg_in_tpda_olc>;
+				};
+			};
+			port@1 {
+				reg = <0>;
+				tpda_olc_in_tpdm_olc: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_olc_out_tpda_olc>;
+				};
+			};
+		};
+	};
+
+	tpdm_olc: tpdm@7b90000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x7b90000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-olc";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port{
+			tpdm_olc_out_tpda_olc: endpoint {
+				remote-endpoint = <&tpda_olc_in_tpdm_olc>;
+			};
+		};
+	};
+
+	funnel_spss: funnel@7083000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x7083000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-spss";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_spss_out_funnel_in0: endpoint {
+					remote-endpoint =
+					    <&funnel_in0_in_funnel_spss>;
+				};
+			};
+			port@1 {
+				reg = <0>;
+				funnel_spss_in_tpda_spss: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpda_spss_out_funnel_spss>;
+				};
+			};
+			port@2 {
+				reg = <1>;
+				funnel_spss_in_spss_etm0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&spss_etm0_out_funnel_spss>;
+				};
+			};
+		};
+	};
+
+	tpda_spss: tpda@7082000 {
+		compatible = "qcom,coresight-tpda";
+		reg = <0x7082000 0x1000>;
+		reg-names = "tpda-base";
+
+		coresight-name = "coresight-tpda-spss";
+
+		qcom,tpda-atid = <70>;
+		qcom,dsb-elem-size = <0 32>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				tpda_spss_out_funnel_spss: endpoint {
+					remote-endpoint =
+						<&funnel_spss_in_tpda_spss>;
+				};
+			};
+			port@1 {
+				reg = <0>;
+				tpda_spss_in_tpdm_spss: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_spss_out_tpda_spss>;
+				};
+			};
+		};
+	};
+
+	tpdm_spss: tpdm@7080000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x7080000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-spss";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		qcom,msr-fix-req;
+
+		port{
+			tpdm_spss_out_tpda_spss: endpoint {
+				remote-endpoint = <&tpda_spss_in_tpdm_spss>;
+			};
+		};
+	};
+
+	hwevent: hwevent@158000 {
+		compatible = "qcom,coresight-hwevent";
+		reg = <0x158000 0x80>,
+		      <0x17091000 0x80>,
+		      <0x1730200c 0x4>,
+		      <0xc90137c 0x4>,
+		      <0xc828018 0x80>,
+		      <0x1c00058 0x80>,
+		      <0x5e02038 0x4>,
+		      <0x5e02028 0x10>,
+		      <0x1fcb360 0x80>,
+		      <0x1fcb760 0x80>,
+		      <0x1fcbf60 0x80>,
+		      <0xa8f8860 0x4>,
+		      <0x500c260 0x4>,
+		      <0x500d040 0x4>,
+		      <0x1da6400 0x80>;
+		reg-names = "gcc-ctrl", "lpass-stm", "lpass-qdsp", "mdss-mdp",
+			    "mdss-misc", "pcie0-hwev", "ssc-en", "ssc-hwev",
+			    "tcsr-qdss", "tcsr-mss0", "tcsr-mss1", "usb-ctrl",
+			    "vbif-stm", "vbif-stm-en", "ufs-mux";
+
+		coresight-name = "coresight-hwevent";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>,
+			 <&clock_mmss clk_mmss_misc_ahb_clk>;
+		clock-names = "core_clk", "core_a_clk", "core_mmss_clk";
+
+		qcom,hwevent-clks = "core_mmss_clk";
+	};
+
+	csr: csr@6001000 {
+		compatible = "qcom,coresight-csr";
+		reg = <0x6001000 0x1000>;
+		reg-names = "csr-base";
+
+		coresight-name = "coresight-csr";
+
+		qcom,blk-size = <1>;
+	};
+
+	modem_etm0 {
+		compatible = "qcom,coresight-remote-etm";
+
+		coresight-name = "coresight-modem-etm0";
+		qcom,inst-id = <2>;
+
+		port{
+			modem_etm0_out_funnel_in1: endpoint {
+				remote-endpoint = <&funnel_in1_in_modem_etm0>;
+			};
+		};
+	};
+
+	audio_etm0 {
+		compatible = "qcom,coresight-remote-etm";
+
+		coresight-name = "coresight-audio-etm0";
+		qcom,inst-id = <5>;
+
+		port{
+			audio_etm0_out_funnel_in1: endpoint {
+				remote-endpoint = <&funnel_in1_in_audio_etm0>;
+			};
+		};
+	};
+
+	rpm_etm0 {
+		compatible = "qcom,coresight-remote-etm";
+
+		coresight-name = "coresight-rpm-etm0";
+		qcom,inst-id = <4>;
+
+		port{
+			rpm_etm0_out_funnel_in0: endpoint {
+				remote-endpoint = <&funnel_in0_in_rpm_etm0>;
+			};
+		};
+	};
+
+	funnel_dlet_qatb: funnel@7225000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x7225000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-dlet-qatb";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_dlet_qatb_out_funnel_qatb: endpoint {
+					remote-endpoint =
+					    <&funnel_qatb_in_funnel_dlet_qatb>;
+				};
+			};
+			port@1 {
+				reg = <1>;
+				funnel_dlet_qatb_in_tpdm_wcss: endpoint {
+					slave-mode;
+					remote-endpoint =
+					      <&tpdm_wcss_out_funnel_dlet_qatb>;
+				};
+			};
+		};
+	};
+
+	dummy-tpdm-wcss {
+		compatible = "qcom,coresight-dummy";
+
+		coresight-name = "coresight-tpdm-wcss";
+
+		port{
+			tpdm_wcss_out_funnel_dlet_qatb: endpoint {
+				remote-endpoint =
+					<&funnel_dlet_qatb_in_tpdm_wcss>;
+			};
+		};
+	};
+
+	dummy-spss-etm0 {
+		compatible = "qcom,coresight-dummy";
+
+		coresight-name = "coresight-spss-etm0";
+
+		port{
+			spss_etm0_out_funnel_spss: endpoint {
+				remote-endpoint =
+					<&funnel_spss_in_spss_etm0>;
+			};
+		};
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm8998.dtsi	2019-10-29 09:26:22.921196113 +0100
@@ -0,0 +1,3351 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "skeleton64.dtsi"
+#include <dt-bindings/clock/msm-clocks-8998.h>
+#include <dt-bindings/regulator/qcom,rpm-smd-regulator.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+	model = "Qualcomm Technologies, Inc. MSM 8998";
+	compatible = "qcom,msm8998";
+	qcom,msm-id = <292 0x0>;
+	interrupt-parent = <&intc>;
+
+	aliases {
+		serial0 = &uartblsp2dm1;
+		pci-domain0 = &pcie0;
+		sdhc2 = &sdhc_2; /* SDC2 SD card slot */
+	};
+
+	psci {
+		compatible = "arm,psci-1.0";
+		method = "smc";
+	};
+
+	chosen {
+		stdout-path = "serial0";
+	};
+
+	cpus {
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		CPU0: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x0>;
+			qcom,limits-info = <&mitigation_profile0>;
+			qcom,lmh-dcvs = <&lmh_dcvs0>;
+			enable-method = "psci";
+			efficiency = <1024>;
+			next-level-cache = <&L2_0>;
+			qcom,ea = <&ea0>;
+			L2_0: l2-cache {
+			      compatible = "arm,arch-cache";
+			      cache-level = <2>;
+				  qcom,dump-size = <0x0>;      /* A53 L2 dump not supported */
+			};
+			L1_I_0: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9040>;
+			};
+			L1_D_0: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9040>;
+			};
+			L1_TLB_0: l1-tlb {
+				qcom,dump-size = <0x2000>;
+			};
+		};
+
+		CPU1: cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x1>;
+			qcom,limits-info = <&mitigation_profile1>;
+			qcom,lmh-dcvs = <&lmh_dcvs0>;
+			enable-method = "psci";
+			efficiency = <1024>;
+			next-level-cache = <&L2_0>;
+			qcom,ea = <&ea1>;
+			L1_I_1: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9040>;
+			};
+			L1_D_1: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9040>;
+			};
+			L1_TLB_1: l1-tlb {
+				qcom,dump-size = <0x2000>;
+			};
+		};
+
+		CPU2: cpu@2 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x2>;
+			qcom,limits-info = <&mitigation_profile2>;
+			qcom,lmh-dcvs = <&lmh_dcvs0>;
+			enable-method = "psci";
+			efficiency = <1024>;
+			next-level-cache = <&L2_0>;
+			qcom,ea = <&ea2>;
+			L1_I_2: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9040>;
+			};
+			L1_D_2: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9040>;
+			};
+			L1_TLB_2: l1-tlb {
+				qcom,dump-size = <0x2000>;
+			};
+		};
+
+		CPU3: cpu@3 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x3>;
+			qcom,limits-info = <&mitigation_profile3>;
+			qcom,lmh-dcvs = <&lmh_dcvs0>;
+			enable-method = "psci";
+			efficiency = <1024>;
+			next-level-cache = <&L2_0>;
+			qcom,ea = <&ea3>;
+			L1_I_3: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9040>;
+			};
+			L1_D_3: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9040>;
+			};
+			L1_TLB_3: l1-tlb {
+				qcom,dump-size = <0x2000>;
+			};
+		};
+
+		CPU4: cpu@100 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x100>;
+			qcom,limits-info = <&mitigation_profile4>;
+			qcom,lmh-dcvs = <&lmh_dcvs1>;
+			enable-method = "psci";
+			efficiency = <1536>;
+			next-level-cache = <&L2_1>;
+			qcom,ea = <&ea4>;
+			L2_1: l2-cache {
+				compatible = "arm,arch-cache";
+				cache-level = <2>;
+			};
+			L1_I_100: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x12000>;
+			};
+			L1_D_100: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x12000>;
+			};
+			L1_TLB_100: l1-tlb {
+				qcom,dump-size = <0x4800>;
+			};
+		};
+
+		CPU5: cpu@101 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x101>;
+			qcom,limits-info = <&mitigation_profile5>;
+			qcom,lmh-dcvs = <&lmh_dcvs1>;
+			enable-method = "psci";
+			efficiency = <1536>;
+			next-level-cache = <&L2_1>;
+			qcom,ea = <&ea5>;
+			L1_I_101: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x12000>;
+			};
+			L1_D_101: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x12000>;
+			};
+			L1_TLB_101: l1-tlb {
+				qcom,dump-size = <0x4800>;
+			};
+		};
+
+		CPU6: cpu@102 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x102>;
+			qcom,limits-info = <&mitigation_profile6>;
+			qcom,lmh-dcvs = <&lmh_dcvs1>;
+			enable-method = "psci";
+			efficiency = <1536>;
+			next-level-cache = <&L2_1>;
+			qcom,ea = <&ea6>;
+			L1_I_102: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x12000>;
+			};
+			L1_D_102: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x12000>;
+			};
+			L1_TLB_102: l1-tlb {
+				qcom,dump-size = <0x4800>;
+			};
+		};
+
+		CPU7: cpu@103 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x103>;
+			qcom,limits-info = <&mitigation_profile7>;
+			qcom,lmh-dcvs = <&lmh_dcvs1>;
+			enable-method = "psci";
+			efficiency = <1536>;
+			next-level-cache = <&L2_1>;
+			qcom,ea = <&ea7>;
+			L1_I_103: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x12000>;
+			};
+			L1_D_103: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x12000>;
+			};
+			L1_TLB_103: l1-tlb {
+				qcom,dump-size = <0x4800>;
+			};
+		};
+
+		cpu-map {
+			cluster0 {
+				core0 {
+					cpu = <&CPU0>;
+				};
+
+				core1 {
+					cpu = <&CPU1>;
+				};
+
+				core2 {
+					cpu = <&CPU2>;
+				};
+
+				core3 {
+					cpu = <&CPU3>;
+				};
+			};
+
+			cluster1 {
+				core0 {
+					cpu = <&CPU4>;
+				};
+
+				core1 {
+					cpu = <&CPU5>;
+				};
+
+				core2 {
+					cpu = <&CPU6>;
+				};
+
+				core3 {
+					cpu = <&CPU7>;
+				};
+			};
+		};
+	};
+
+	soc: soc { };
+
+	vendor: vendor {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges = <0 0 0 0xffffffff>;
+		compatible = "simple-bus";
+	};
+
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		removed_regions: removed_regions@85800000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x85800000 0 0x3700000>;
+		};
+
+		pil_slpi_mem: pil_slpi_region@94400000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x94400000 0 0xf00000>;
+		};
+
+		pil_ipa_gpu_mem: pil_ipa_gpu_region@94300000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x94300000 0 0x100000>;
+		};
+
+		pil_mba_mem: pil_mba_region@94100000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x94100000 0 0x200000>;
+		};
+
+		pil_video_mem: pil_video_region@93c00000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x93c00000 0 0x500000>;
+		};
+
+		modem_mem: modem_region@8cc00000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x8cc00000 0 0x7000000>;
+		};
+
+		pil_adsp_mem: pil_adsp_region@0x8b200000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x8b200000 0 0x1a00000>;
+		};
+
+		spss_mem: spss_region@8ab00000 { /* for SPSS-PIL */
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x8ab00000 0 0x700000>;
+		};
+
+		adsp_mem: adsp_region {
+			compatible = "shared-dma-pool";
+			alloc-ranges = <0 0x00000000 0 0xffffffff>;
+			reusable;
+			alignment = <0 0x400000>;
+			size = <0 0x800000>;
+		};
+
+		qseecom_mem: qseecom_region {
+			compatible = "shared-dma-pool";
+			alloc-ranges = <0 0x00000000 0 0xffffffff>;
+			reusable;
+			alignment = <0 0x400000>;
+			size = <0 0x1400000>;
+		};
+
+		sp_mem: sp_region {  /* SPSS-HLOS ION shared mem */
+			compatible = "shared-dma-pool";
+			alloc-ranges = <0 0x00000000 0 0xffffffff>; /* 32-bit */
+			reusable;
+			alignment = <0 0x100000>;
+			size = <0 0x800000>;
+		};
+
+		secure_display_memory: secure_region {
+			compatible = "shared-dma-pool";
+			alloc-ranges = <0 0x00000000 0 0xffffffff>;
+			reusable;
+			alignment = <0 0x200000>;
+			size = <0 0x8000000>;
+		};
+
+		/* global autoconfigured region for contiguous allocations */
+		linux,cma {
+			compatible = "shared-dma-pool";
+			alloc-ranges = <0 0x00000000 0 0xffffffff>;
+			reusable;
+			alignment = <0 0x400000>;
+			size = <0 0x2000000>;
+			linux,cma-default;
+		};
+
+		cont_splash_mem: splash_region@9d600000 {
+			reg = <0x0 0x9d600000 0x0 0x02400000>;
+			label = "cont_splash_mem";
+		};
+	};
+};
+
+#include "msm8998-smp2p.dtsi"
+#include "msm-gdsc-8998.dtsi"
+
+&soc {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	ranges = <0 0 0 0xffffffff>;
+	compatible = "simple-bus";
+
+	intc: interrupt-controller@17a00000 {
+		compatible = "arm,gic-v3";
+		reg = <0x17a00000 0x10000>,       /* GICD */
+		      <0x17b00000 0x100000>;      /* GICR * 8 */
+		#interrupt-cells = <3>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+		interrupt-controller;
+		#redistributor-regions = <1>;
+		redistributor-stride = <0x0 0x20000>;
+		interrupts = <1 9 4>;
+
+		gic-its@0x17a20000{
+			compatible = "arm,gic-v3-its";
+			msi-contoller;
+			reg = <0x17a20000 0x20000>;
+		};
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupts = <1 1 0xf08>,
+			     <1 2 0xf08>,
+			     <1 3 0xf08>,
+			     <1 0 0xf08>;
+		clock-frequency = <19200000>;
+	};
+
+	restart@10ac000 {
+		compatible = "qcom,pshold";
+		reg = <0x10ac000 0x4>,
+		      <0x1fd3000 0x4>;
+		reg-names = "pshold-base", "tcsr-boot-misc-detect";
+	};
+
+	spmi_bus: qcom,spmi@800f000 {
+		compatible = "qcom,spmi-pmic-arb";
+		reg =	<0x800f000 0x1000>,
+			<0x8400000 0x1000000>,
+			<0x9400000 0x1000000>,
+			<0xa400000 0x220000>,
+			<0x800a000 0x3000>;
+		reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+		interrupt-names = "periph_irq";
+		interrupts = <GIC_SPI 326 IRQ_TYPE_NONE>;
+		qcom,ee = <0>;
+		qcom,channel = <0>;
+		qcom,reserved-chan = <511>;
+		#address-cells = <2>;
+		#size-cells = <0>;
+		interrupt-controller;
+		#interrupt-cells = <4>;
+		cell-index = <0>;
+	};
+
+	qcom,sps {
+		compatible = "qcom,msm_sps_4k";
+		qcom,device-type = <3>;
+		qcom,pipe-attr-ee;
+	};
+
+	uartblsp1dm1: serial@0c170000 {
+		compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+		reg = <0xc170000 0x1000>;
+		interrupts = <0 108 0>;
+		status = "disabled";
+		clocks = <&clock_gcc clk_gcc_blsp1_uart2_apps_clk>,
+			 <&clock_gcc clk_gcc_blsp1_ahb_clk>;
+		clock-names = "core", "iface";
+	};
+
+	uartblsp2dm1: serial@0c1b0000 {
+		compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+		reg = <0xc1b0000 0x1000>;
+		interrupts = <0 114 0>;
+		status = "disabled";
+		clocks = <&clock_gcc clk_gcc_blsp2_uart2_apps_clk>,
+			 <&clock_gcc clk_gcc_blsp2_ahb_clk>;
+		clock-names = "core", "iface";
+	};
+
+	slim_aud: slim@171c0000 {
+		cell-index = <1>;
+		compatible = "qcom,slim-ngd";
+		reg = <0x171c0000 0x2C000>,
+			<0x17184000 0x32000>;
+		reg-names = "slimbus_physical", "slimbus_bam_physical";
+		interrupts = <0 163 0>, <0 164 0>;
+		interrupt-names = "slimbus_irq", "slimbus_bam_irq";
+		qcom,apps-ch-pipes = <0x00001f80>;
+		qcom,ea-pc = <0x210>;
+	};
+
+	slim_qca: slim@17240000 {
+		status = "ok";
+		cell-index = <3>;
+		compatible = "qcom,slim-ngd";
+		reg = <0x17240000 0x2C000>,
+			<0x17204000 0x26000>;
+		reg-names = "slimbus_physical", "slimbus_bam_physical";
+		interrupts = <0 291 0>, <0 292 0>;
+		interrupt-names = "slimbus_irq", "slimbus_bam_irq";
+
+		/* Slimbus Slave DT for WCN3990 */
+		btfmslim_codec: wcn3990 {
+			compatible = "qcom,btfmslim_slave";
+			elemental-addr = [00 01 20 02 17 02];
+			qcom,btfm-slim-ifd = "btfmslim_slave_ifd";
+			qcom,btfm-slim-ifd-elemental-addr = [00 00 20 02 17 02];
+		};
+	};
+
+	timer@17920000 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+		compatible = "arm,armv7-timer-mem";
+		reg = <0x17920000 0x1000>;
+		clock-frequency = <19200000>;
+
+		frame@17921000 {
+			frame-number = <0>;
+			interrupts = <0 8 0x4>,
+				     <0 7 0x4>;
+			reg = <0x17921000 0x1000>,
+			      <0x17922000 0x1000>;
+		};
+
+		frame@17923000 {
+			frame-number = <1>;
+			interrupts = <0 9 0x4>;
+			reg = <0x17923000 0x1000>;
+			status = "disabled";
+		};
+
+		frame@17924000 {
+			frame-number = <2>;
+			interrupts = <0 10 0x4>;
+			reg = <0x17924000 0x1000>;
+			status = "disabled";
+		};
+
+		frame@17925000 {
+			frame-number = <3>;
+			interrupts = <0 11 0x4>;
+			reg = <0x17925000 0x1000>;
+			status = "disabled";
+		};
+
+		frame@17926000 {
+			frame-number = <4>;
+			interrupts = <0 12 0x4>;
+			reg = <0x17926000 0x1000>;
+			status = "disabled";
+		};
+
+		frame@17927000 {
+			frame-number = <5>;
+			interrupts = <0 13 0x4>;
+			reg = <0x17927000 0x1000>;
+			status = "disabled";
+		};
+
+		frame@17928000 {
+			frame-number = <6>;
+			interrupts = <0 14 0x4>;
+			reg = <0x17928000 0x1000>;
+			status = "disabled";
+		};
+	};
+
+	cpubw: qcom,cpubw {
+		compatible = "qcom,devbw";
+		governor = "performance";
+		qcom,src-dst-ports = <1 512>;
+		qcom,active-only;
+		qcom,bw-tbl =
+			<   762 /*  100 MHz */ >,
+			<  1144 /*  150 MHz */ >,
+			<  1525 /*  200 MHz */ >,
+			<  2288 /*  300 MHz */ >,
+			<  3143 /*  412 MHz */ >,
+			<  4173 /*  547 MHz */ >,
+			<  5195 /*  681 MHz */ >,
+			<  5859 /*  768 MHz */ >,
+			<  7759 /* 1017 MHz */ >,
+			<  9887 /* 1296 MHz */ >,
+			< 11863 /* 1555 MHz */ >,
+			< 13763 /* 1804 MHz */ >;
+	};
+
+	bwmon: qcom,cpu-bwmon {
+		compatible = "qcom,bimc-bwmon3";
+		reg = <0x01008000 0x300>, <0x01001000 0x200>;
+		reg-names = "base", "global_base";
+		interrupts = <0 183 4>;
+		qcom,mport = <0>;
+		qcom,target-dev = <&cpubw>;
+	};
+
+	mincpubw: qcom,mincpubw {
+		compatible = "qcom,devbw";
+		governor = "powersave";
+		qcom,src-dst-ports = <1 512>;
+		qcom,active-only;
+		qcom,bw-tbl =
+			<   762 /*  100 MHz */ >,
+			<  1144 /*  150 MHz */ >,
+			<  1525 /*  200 MHz */ >,
+			<  2288 /*  300 MHz */ >,
+			<  3143 /*  412 MHz */ >,
+			<  4173 /*  547 MHz */ >,
+			<  5195 /*  681 MHz */ >,
+			<  5859 /*  768 MHz */ >,
+			<  7759 /* 1017 MHz */ >,
+			<  9887 /* 1296 MHz */ >,
+			< 11863 /* 1555 MHz */ >,
+			< 13763 /* 1804 MHz */ >;
+	};
+
+	memlat_cpu0: qcom,memlat-cpu0 {
+		compatible = "qcom,devbw";
+		governor = "powersave";
+		qcom,src-dst-ports = <1 512>;
+		qcom,active-only;
+		qcom,bw-tbl =
+			<   762 /*  100 MHz */ >,
+			<  1144 /*  150 MHz */ >,
+			<  1525 /*  200 MHz */ >,
+			<  2288 /*  300 MHz */ >,
+			<  3143 /*  412 MHz */ >,
+			<  4173 /*  547 MHz */ >,
+			<  5195 /*  681 MHz */ >,
+			<  5859 /*  768 MHz */ >,
+			<  7759 /* 1017 MHz */ >,
+			<  9887 /* 1296 MHz */ >,
+			< 11863 /* 1555 MHz */ >,
+			< 13763 /* 1804 MHz */ >;
+	};
+
+	memlat_cpu4: qcom,memlat-cpu4 {
+		compatible = "qcom,devbw";
+		governor = "powersave";
+		qcom,src-dst-ports = <1 512>;
+		qcom,active-only;
+		status = "ok";
+		qcom,bw-tbl =
+			<   762 /*  100 MHz */ >,
+			<  1144 /*  150 MHz */ >,
+			<  1525 /*  200 MHz */ >,
+			<  2288 /*  300 MHz */ >,
+			<  3143 /*  412 MHz */ >,
+			<  4173 /*  547 MHz */ >,
+			<  5195 /*  681 MHz */ >,
+			<  5859 /*  768 MHz */ >,
+			<  7759 /* 1017 MHz */ >,
+			<  9887 /* 1296 MHz */ >,
+			< 11863 /* 1555 MHz */ >,
+			< 13763 /* 1804 MHz */ >;
+	};
+
+	devfreq_memlat_0: qcom,arm-memlat-mon-0 {
+		compatible = "qcom,arm-memlat-mon";
+		qcom,cpulist =	<&CPU0 &CPU1 &CPU2 &CPU3>;
+		qcom,target-dev = <&memlat_cpu0>;
+		qcom,core-dev-table =
+			<  300000 1525 >,
+			<  499200 3143 >,
+			< 1113600 4173 >,
+			< 1881600 5859 >;
+	};
+
+	devfreq_memlat_4: qcom,arm-memlat-mon-4 {
+		compatible = "qcom,arm-memlat-mon";
+		qcom,cpulist =	<&CPU4 &CPU5 &CPU6 &CPU7>;
+		qcom,target-dev = <&memlat_cpu4>;
+		qcom,core-dev-table =
+			<  300000  1525 >,
+			<  480000  3143 >,
+			<  900000  4173 >,
+			< 1017000  7759 >,
+			< 1296000  9887 >,
+			< 1555000 11863 >,
+			< 1804000 13763 >;
+	};
+
+	devfreq_cpufreq: devfreq-cpufreq {
+		mincpubw-cpufreq {
+			target-dev = <&mincpubw>;
+			cpu-to-dev-map-0 =
+				< 1881600 1525 >;
+			cpu-to-dev-map-4 =
+				< 2016000 1525 >,
+				< 2092800 5195 >;
+		};
+	};
+
+	msm_cpufreq: qcom,msm-cpufreq {
+		compatible = "qcom,msm-cpufreq";
+		clock-names = "cpu0_clk", "cpu1_clk", "cpu2_clk",
+				"cpu3_clk", "cpu4_clk", "cpu5_clk",
+				"cpu6_clk", "cpu7_clk";
+		clocks = <&clock_cpu clk_pwrcl_clk>,
+			 <&clock_cpu clk_pwrcl_clk>,
+			 <&clock_cpu clk_pwrcl_clk>,
+			 <&clock_cpu clk_pwrcl_clk>,
+			 <&clock_cpu clk_perfcl_clk>,
+			 <&clock_cpu clk_perfcl_clk>,
+			 <&clock_cpu clk_perfcl_clk>,
+			 <&clock_cpu clk_perfcl_clk>;
+
+		qcom,governor-per-policy;
+
+		qcom,cpufreq-table-0 =
+			<  300000 >,
+			<  345600 >,
+			<  422400 >,
+			<  499200 >,
+			<  576000 >,
+			<  633600 >,
+			<  710400 >,
+			<  806400 >,
+			<  883200 >,
+			<  960000 >,
+			< 1036800 >,
+			< 1113600 >,
+			< 1190400 >,
+			< 1248000 >,
+			< 1324800 >,
+			< 1401600 >,
+			< 1478400 >,
+			< 1574400 >,
+			< 1651200 >,
+			< 1728000 >,
+			< 1804800 >,
+			< 1881600 >;
+
+		qcom,cpufreq-table-4 =
+			<  300000 >,
+			<  345600 >,
+			<  422400 >,
+			<  480000 >,
+			<  556800 >,
+			<  633600 >,
+			<  710400 >,
+			<  787200 >,
+			<  844800 >,
+			<  902400 >,
+			<  979200 >,
+			< 1056000 >,
+			< 1171200 >,
+			< 1248000 >,
+			< 1324800 >,
+			< 1401600 >,
+			< 1478400 >,
+			< 1536000 >,
+			< 1632000 >,
+			< 1708800 >,
+			< 1785600 >,
+			< 1862400 >,
+			< 1939200 >,
+			< 2016000 >,
+			< 2092800 >;
+	};
+
+	arm64-cpu-erp {
+		compatible = "arm,arm64-cpu-erp";
+		interrupts = <0 43 4>,
+			     <0 44 4>,
+			     <0 41 4>,
+			     <0 42 4>;
+
+		interrupt-names = "pri-dbe-irq",
+				  "sec-dbe-irq",
+				  "pri-ext-irq",
+				  "sec-ext-irq";
+
+		poll-delay-ms = <5000>;
+	};
+
+	clock_gcc: qcom,gcc@100000 {
+		compatible = "qcom,gcc-8998";
+		reg = <0x100000 0xb0000>;
+		reg-names = "cc_base";
+		vdd_dig-supply = <&pm8998_s1_level>;
+		vdd_dig_ao-supply = <&pm8998_s1_level_ao>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	clock_mmss: qcom,mmsscc@c8c0000 {
+		compatible = "qcom,mmsscc-8998";
+		reg = <0xc8c0000 0x40000>;
+		reg-names = "cc_base";
+		vdd_dig-supply = <&pm8998_s1_level>;
+		vdd_mmsscc_mx-supply = <&pm8998_s9_level>;
+		clock-names = "xo", "gpll0", "gpll0_div",
+				"pclk0_src", "pclk1_src",
+				"byte0_src", "byte1_src",
+				"dp_link_src", "dp_vco_div",
+				"extpclk_src";
+		clocks = <&clock_gcc clk_cxo_clk_src>,
+			 <&clock_gcc clk_gcc_mmss_gpll0_clk>,
+			 <&clock_gcc clk_gcc_mmss_gpll0_div_clk>,
+			 <&mdss_dsi0_pll clk_dsi0pll_pclk_mux>,
+			 <&mdss_dsi1_pll clk_dsi1pll_pclk_mux>,
+			 <&mdss_dsi0_pll clk_dsi0pll_byteclk_mux>,
+			 <&mdss_dsi1_pll clk_dsi1pll_byteclk_mux>,
+			 <&mdss_dp_pll clk_dp_link_2x_clk_divsel_five>,
+			 <&mdss_dp_pll clk_vco_divided_clk_src_mux>,
+			 <&mdss_hdmi_pll clk_hdmi_vco_clk>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	clock_gpu: qcom,gpucc@5065000 {
+		compatible = "qcom,gpucc-8998";
+		reg = <0x5065000 0x9000>;
+		reg-names = "cc_base";
+		vdd_dig-supply = <&pm8998_s1_level>;
+		clock-names = "xo_ao", "gpll0";
+		clocks = <&clock_gcc clk_cxo_clk_src_ao>,
+			<&clock_gcc clk_gcc_gpu_gpll0_clk>;
+		#clock-cells = <1>;
+	};
+
+	clock_gfx: qcom,gfxcc@5065000 {
+		compatible = "qcom,gfxcc-8998";
+		reg = <0x5065000 0x9000>;
+		reg-names = "cc_base";
+		vdd_gpucc-supply = <&gfx_vreg>;
+		vdd_mx-supply = <&pm8998_s9_level>;
+		vdd_gpu_mx-supply = <&pm8998_s9_level>;
+		qcom,gfx3d_clk_src-opp-handle = <&msm_gpu>;
+		qcom,gfxfreq-speedbin0 =
+			<	  0 0				0 >,
+			< 171000000 1 RPM_SMD_REGULATOR_LEVEL_SVS >,
+			< 251000000 2 RPM_SMD_REGULATOR_LEVEL_SVS >,
+			< 332000000 3 RPM_SMD_REGULATOR_LEVEL_SVS >,
+			< 403000000 4 RPM_SMD_REGULATOR_LEVEL_SVS >,
+			< 504000000 5 RPM_SMD_REGULATOR_LEVEL_NOM >,
+			< 650000000 6 RPM_SMD_REGULATOR_LEVEL_TURBO >;
+		qcom,gfxfreq-mx-speedbin0 =
+			<         0			      0 >,
+			< 171000000 RPM_SMD_REGULATOR_LEVEL_SVS >,
+			< 251000000 RPM_SMD_REGULATOR_LEVEL_SVS >,
+			< 332000000 RPM_SMD_REGULATOR_LEVEL_SVS >,
+			< 403000000 RPM_SMD_REGULATOR_LEVEL_SVS >,
+			< 504000000 RPM_SMD_REGULATOR_LEVEL_NOM >,
+			< 650000000 RPM_SMD_REGULATOR_LEVEL_TURBO >;
+		#clock-cells = <1>;
+	};
+
+	clock_cpu: qcom,cpu-clock-8998@179c0000 {
+		compatible = "qcom,cpu-clock-osm-msm8998-v1";
+		reg = <0x179c0000 0x4000>,
+		      <0x17916000 0x1000>,
+		      <0x17816000 0x1000>,
+		      <0x179d1000 0x1000>,
+		      <0x00784130 0x8>,
+		      <0x1791101c 0x8>;
+		reg-names = "osm", "pwrcl_pll", "perfcl_pll",
+			    "apcs_common", "perfcl_efuse", "debug";
+
+		vdd-pwrcl-supply = <&apc0_pwrcl_vreg>;
+		vdd-perfcl-supply = <&apc1_perfcl_vreg>;
+
+		interrupts = <GIC_SPI 35 IRQ_TYPE_EDGE_RISING>,
+			     <GIC_SPI 36 IRQ_TYPE_EDGE_RISING>;
+		interrupt-names = "pwrcl-irq", "perfcl-irq";
+
+		qcom,pwrcl-speedbin0-v0 =
+			<   300000000 0x0004000f 0x01200020 0x1 1 >,
+			<   345600000 0x05040012 0x02200020 0x1 2 >,
+			<   422400000 0x05040016 0x02200020 0x1 3 >,
+			<   499200000 0x0504001a 0x02200020 0x1 4 >,
+			<   576000000 0x0504001e 0x03200020 0x1 5 >,
+			<   633600000 0x05040021 0x03200020 0x1 6 >,
+			<   710400000 0x05040025 0x03200020 0x1 7 >,
+			<   806400000 0x0504002a 0x04200020 0x1 8 >,
+			<   883200000 0x0404002e 0x04250025 0x1 9 >,
+			<   960000000 0x04040032 0x05280028 0x1 10 >,
+			<  1036800000 0x04040036 0x052b002b 0x2 11 >,
+			<  1113600000 0x0404003a 0x052e002e 0x2 12 >,
+			<  1190400000 0x0404003e 0x06320032 0x2 13 >,
+			<  1248000000 0x04040041 0x06340034 0x2 14 >,
+			<  1324800000 0x04040045 0x06370037 0x2 15 >,
+			<  1401600000 0x04040049 0x073a003a 0x2 16 >,
+			<  1478400000 0x0404004d 0x073e003e 0x2 17 >,
+			<  1574400000 0x04040052 0x08420042 0x2 18 >,
+			<  1651200000 0x04040056 0x08450045 0x2 19 >,
+			<  1728000000 0x0404005a 0x08480048 0x2 20 >,
+			<  1804800000 0x0404005e 0x094b004b 0x3 21 >,
+			<  1881600000 0x04040062 0x094e004e 0x3 22 >;
+
+		qcom,perfcl-speedbin0-v0 =
+			<   300000000 0x0004000f 0x01200020 0x1 1 >,
+			<   345600000 0x05040012 0x02200020 0x1 2 >,
+			<   422400000 0x05040016 0x02200020 0x1 3 >,
+			<   480000000 0x05040019 0x02200020 0x1 4 >,
+			<   556800000 0x0504001d 0x03200020 0x1 5 >,
+			<   633600000 0x05040021 0x03200020 0x1 6 >,
+			<   710400000 0x05040025 0x03200020 0x1 7 >,
+			<   787200000 0x05040029 0x04200020 0x1 8 >,
+			<   844800000 0x0404002c 0x04230023 0x1 9 >,
+			<   902400000 0x0404002f 0x04260026 0x1 10 >,
+			<   979200000 0x04040033 0x05290029 0x1 11 >,
+			<  1056000000 0x04040037 0x052c002c 0x1 12 >,
+			<  1171200000 0x0404003d 0x06310031 0x2 13 >,
+			<  1248000000 0x04040041 0x06340034 0x2 14 >,
+			<  1324800000 0x04040045 0x06370037 0x2 15 >,
+			<  1401600000 0x04040049 0x073a003a 0x2 16 >,
+			<  1478400000 0x0404004d 0x073e003e 0x2 17 >,
+			<  1536000000 0x04040050 0x07400040 0x2 18 >,
+			<  1632000000 0x04040055 0x08440044 0x2 19 >,
+			<  1708800000 0x04040059 0x08470047 0x2 20 >,
+			<  1785600000 0x0404005d 0x094a004a 0x2 21 >,
+			<  1862400000 0x04040061 0x094e004e 0x2 22 >,
+			<  1939200000 0x04040065 0x09510051 0x3 23 >,
+			<  2016000000 0x04040069 0x0a540054 0x3 24 >,
+			<  2092800000 0x0404006d 0x0a570057 0x3 25 >;
+
+		qcom,up-timer =
+			<1000 1000>;
+		qcom,down-timer =
+			<1000 1000>;
+		qcom,pc-override-index =
+			<0 0>;
+		qcom,set-ret-inactive;
+		qcom,enable-llm-freq-vote;
+		qcom,llm-freq-up-timer =
+			<327675 327675>;
+		qcom,llm-freq-down-timer =
+			<327675 327675>;
+		qcom,enable-llm-volt-vote;
+		qcom,llm-volt-up-timer =
+			<327675 327675>;
+		qcom,llm-volt-down-timer =
+			<327675 327675>;
+		qcom,cc-reads = <10>;
+		qcom,cc-delay = <5>;
+		qcom,cc-factor = <100>;
+		qcom,osm-clk-rate = <200000000>;
+		qcom,xo-clk-rate = <19200000>;
+
+		qcom,l-val-base =
+			<0x17916004 0x17816004>;
+		qcom,apcs-itm-present =
+			<0x179d143c 0x179d143c>;
+		qcom,apcs-pll-user-ctl =
+			<0x1791600c 0x1781600c>;
+		qcom,apcs-cfg-rcgr =
+			<0x17911054 0x17811054>;
+		qcom,apcs-cmd-rcgr =
+			<0x17911050 0x17811050>;
+		qcom,apm-mode-ctl =
+			<0x179d0004 0x179d0010>;
+		qcom,apm-ctrl-status =
+			<0x179d000c 0x179d0018>;
+		qcom,llm-sw-overr=
+			<0x8fff0036 0x8fff003a 0x0fff0036>,
+			<0x8fff003d 0x8fff0041 0x0fff003d>;
+
+		qcom,apm-threshold-voltage = <832000>;
+		qcom,boost-fsm-en;
+		qcom,safe-fsm-en;
+		qcom,ps-fsm-en;
+		qcom,droop-fsm-en;
+		qcom,wfx-fsm-en;
+		qcom,pc-fsm-en;
+
+		qcom,pwrcl-apcs-mem-acc-cfg =
+			<0x179d1360 0x179d1364 0x179d1364>;
+		qcom,perfcl-apcs-mem-acc-cfg =
+			<0x179d1368 0x179d136C 0x179d1370>;
+		qcom,pwrcl-apcs-mem-acc-val =
+			<0x00000000 0x80000000 0x80000000>,
+			<0x00000000 0x00000000 0x00000000>,
+			<0x00000000 0x00000001 0x00000001>;
+		qcom,perfcl-apcs-mem-acc-val =
+			<0x00000000 0x00000000 0x80000000>,
+			<0x00000000 0x00000000 0x00000000>,
+			<0x00000000 0x00000000 0x00000001>;
+
+		clock-names = "aux_clk", "xo_ao";
+		clocks = <&clock_gcc clk_hmss_gpll0_clk_src>,
+			<&clock_gcc clk_cxo_clk_src_ao>;
+		#clock-cells = <1>;
+	};
+
+	clock_debug: qcom,debugcc@162000 {
+		compatible = "qcom,cc-debug-8998";
+		reg = <0x162000 0x4>;
+		reg-names = "cc_base";
+		clock-names = "debug_gpu_clk", "debug_gfx_clk",
+				"debug_mmss_clk", "debug_cpu_clk";
+		clocks = <&clock_gpu clk_gpucc_gcc_dbg_clk>,
+			 <&clock_gfx clk_gfxcc_dbg_clk>,
+			 <&clock_mmss clk_mmss_debug_mux>,
+			 <&clock_cpu clk_cpu_debug_mux>;
+		#clock-cells = <1>;
+	};
+
+	qcom,rmtfs_sharedmem@0 {
+		compatible = "qcom,sharedmem-uio";
+		reg = <0x0 0x00200000>;
+		reg-names = "rmtfs";
+		qcom,client-id = <0x00000001>;
+	};
+
+	qcom,msm_gsi {
+		compatible = "qcom,msm_gsi";
+	};
+
+	qcom,rmnet-ipa {
+		compatible = "qcom,rmnet-ipa3";
+		qcom,rmnet-ipa-ssr;
+		qcom,ipa-loaduC;
+		qcom,ipa-advertise-sg-support;
+	};
+
+	ipa_hw: qcom,ipa@01e00000 {
+		compatible = "qcom,ipa";
+		reg = <0x01e00000 0x34000>,
+			<0x01e84000 0x31fff>,
+			<0x01e04000 0x2c000>;
+		reg-names = "ipa-base", "bam-base", "gsi-base";
+		interrupts =
+			<0 333 0>,
+			<0 432 0>,
+			<0 432 0>;
+		interrupt-names = "ipa-irq", "bam-irq", "gsi-irq";
+		qcom,ipa-hw-ver = <11>; /* IPA core version = IPAv3.1 */
+		qcom,ipa-hw-mode = <0>; /* IPA hw type = Normal */
+		qcom,ee = <0>;
+		qcom,use-gsi;
+		qcom,use-ipa-tethering-bridge;
+		qcom,modem-cfg-emb-pipe-flt;
+		qcom,do-not-use-ch-gsi-20;
+		qcom,ipa-wdi2;
+		qcom,use-64-bit-dma-mask;
+		clocks = <&clock_gcc clk_ipa_clk>;
+		clock-names = "core_clk";
+		qcom,arm-smmu;
+		qcom,smmu-disable-htw;
+		qcom,smmu-s1-bypass;
+		qcom,msm-bus,name = "ipa";
+		qcom,msm-bus,num-cases = <4>;
+		qcom,msm-bus,num-paths = <4>;
+		qcom,msm-bus,vectors-KBps =
+		/* No vote */
+			<90 512 0 0>,
+			<90 585 0 0>,
+			<1 676 0 0>,
+			 /* SMMU smmu_aggre2_noc_clk */
+			<81 10065 0 0>,
+		/* SVS */
+			<90 512 80000 640000>,
+			<90 585 80000 640000>,
+			<1 676 80000 80000>,
+			/* SMMU smmu_aggre2_noc_clk */
+			<81 10065 0 16000>,
+		/* NOMINAL */
+			<90 512 206000 960000>,
+			<90 585 206000 960000>,
+			<1 676 206000 160000>,
+			/* SMMU smmu_aggre2_noc_clk */
+			<81 10065 0 16000>,
+		/* TURBO */
+			<90 512 206000 3600000>,
+			<90 585 206000 3600000>,
+			<1 676 206000 300000>,
+			/* SMMU smmu_aggre2_noc_clk */
+			<81 10065 0 16000>;
+		qcom,bus-vector-names = "MIN", "SVS", "NOMINAL", "TURBO";
+
+		/* IPA RAM mmap */
+		qcom,ipa-ram-mmap = <
+				0x280	/* ofst_start; */
+				0x0	/* nat_ofst; */
+				0x0	/* nat_size; */
+				0x288	/* v4_flt_hash_ofst; */
+				0x78	/* v4_flt_hash_size; */
+				0x4000	/* v4_flt_hash_size_ddr; */
+				0x308	/* v4_flt_nhash_ofst; */
+				0x78	/* v4_flt_nhash_size; */
+				0x4000	/* v4_flt_nhash_size_ddr; */
+				0x388	/* v6_flt_hash_ofst; */
+				0x78	/* v6_flt_hash_size; */
+				0x4000	/* v6_flt_hash_size_ddr; */
+				0x408	/* v6_flt_nhash_ofst; */
+				0x78	/* v6_flt_nhash_size; */
+				0x4000	/* v6_flt_nhash_size_ddr; */
+				0xf	/* v4_rt_num_index; */
+				0x0	/* v4_modem_rt_index_lo; */
+				0x7	/* v4_modem_rt_index_hi; */
+				0x8	/* v4_apps_rt_index_lo; */
+				0xe	/* v4_apps_rt_index_hi; */
+				0x488	/* v4_rt_hash_ofst; */
+				0x78	/* v4_rt_hash_size; */
+				0x4000	/* v4_rt_hash_size_ddr; */
+				0x508	/* v4_rt_nhash_ofst; */
+				0x78	/* v4_rt_nhash_size; */
+				0x4000	/* v4_rt_nhash_size_ddr; */
+				0xf	/* v6_rt_num_index; */
+				0x0	/* v6_modem_rt_index_lo; */
+				0x7	/* v6_modem_rt_index_hi; */
+				0x8	/* v6_apps_rt_index_lo; */
+				0xe	/* v6_apps_rt_index_hi; */
+				0x588	/* v6_rt_hash_ofst; */
+				0x78	/* v6_rt_hash_size; */
+				0x4000	/* v6_rt_hash_size_ddr; */
+				0x608	/* v6_rt_nhash_ofst; */
+				0x78	/* v6_rt_nhash_size; */
+				0x4000	/* v6_rt_nhash_size_ddr; */
+				0x688	/* modem_hdr_ofst; */
+				0x140	/* modem_hdr_size; */
+				0x7c8	/* apps_hdr_ofst; */
+				0x0	/* apps_hdr_size; */
+				0x800	/* apps_hdr_size_ddr; */
+				0x7d0	/* modem_hdr_proc_ctx_ofst; */
+				0x200	/* modem_hdr_proc_ctx_size; */
+				0x9d0	/* apps_hdr_proc_ctx_ofst; */
+				0x200	/* apps_hdr_proc_ctx_size; */
+				0x0	/* apps_hdr_proc_ctx_size_ddr; */
+				0x0	/* modem_comp_decomp_ofst; diff */
+				0x0	/* modem_comp_decomp_size; diff */
+				0xbd8	/* modem_ofst; */
+				0x1424	/* modem_size; */
+				0x1ffc	/* apps_v4_flt_hash_ofst; */
+				0x0	/* apps_v4_flt_hash_size; */
+				0x1ffc	/* apps_v4_flt_nhash_ofst; */
+				0x0	/* apps_v4_flt_nhash_size; */
+				0x1ffc	/* apps_v6_flt_hash_ofst; */
+				0x0	/* apps_v6_flt_hash_size; */
+				0x1ffc	/* apps_v6_flt_nhash_ofst; */
+				0x0	/* apps_v6_flt_nhash_size; */
+				0x80	/* uc_info_ofst; */
+				0x200	/* uc_info_size; */
+				0x2000	/* end_ofst; */
+				0x1ffc	/* apps_v4_rt_hash_ofst; */
+				0x0	/* apps_v4_rt_hash_size; */
+				0x1ffc	/* apps_v4_rt_nhash_ofst; */
+				0x0	/* apps_v4_rt_nhash_size; */
+				0x1ffc	/* apps_v6_rt_hash_ofst; */
+				0x0	/* apps_v6_rt_hash_size; */
+				0x1ffc	/* apps_v6_rt_nhash_ofst; */
+				0x0	/* apps_v6_rt_nhash_size; */
+				>;
+
+		/* smp2p gpio information */
+		qcom,smp2pgpio_map_ipa_1_out {
+			compatible = "qcom,smp2pgpio-map-ipa-1-out";
+			gpios = <&smp2pgpio_ipa_1_out 0 0>;
+		};
+
+		qcom,smp2pgpio_map_ipa_1_in {
+			compatible = "qcom,smp2pgpio-map-ipa-1-in";
+			gpios = <&smp2pgpio_ipa_1_in 0 0>;
+		};
+
+		ipa_smmu_ap: ipa_smmu_ap {
+			compatible = "qcom,ipa-smmu-ap-cb";
+			iommus = <&anoc2_smmu 0x18e0>;
+			qcom,iova-mapping = <0x10000000 0x40000000>;
+		};
+
+		ipa_smmu_wlan: ipa_smmu_wlan {
+			compatible = "qcom,ipa-smmu-wlan-cb";
+			iommus = <&anoc2_smmu 0x18e1>;
+		};
+
+		ipa_smmu_uc: ipa_smmu_uc {
+			compatible = "qcom,ipa-smmu-uc-cb";
+			iommus = <&anoc2_smmu 0x18e2>;
+			qcom,iova-mapping = <0x40000000 0x20000000>;
+		};
+	};
+
+	qcom,ipa_fws@1e08000 {
+		compatible = "qcom,pil-tz-generic";
+		qcom,pas-id = <0xF>;
+		qcom,firmware-name = "ipa_fws";
+	};
+
+	qcom,chd_silver {
+		compatible = "qcom,core-hang-detect";
+		label = "silver";
+		qcom,threshold-arr = <0x179880b0 0x179980b0
+		0x179a80b0 0x179b80b0>;
+		qcom,config-arr = <0x179880b8 0x179980b8
+		0x179a80b8 0x179b80b8>;
+	};
+
+	qcom,chd_gold {
+		compatible = "qcom,core-hang-detect";
+		label = "gold";
+		qcom,threshold-arr = <0x178880b0 0x178980b0
+		0x178a80b0 0x178b80b0>;
+		qcom,config-arr = <0x178880b8 0x178980b8
+		0x178a80b8 0x178b80b8>;
+	};
+
+	qcom,ipc-spinlock@1f40000 {
+		compatible = "qcom,ipc-spinlock-sfpb";
+		reg = <0x1f40000 0x8000>;
+		qcom,num-locks = <8>;
+	};
+
+	qcom,ghd {
+		compatible = "qcom,gladiator-hang-detect";
+		qcom,threshold-arr = <0x179d141c 0x179d1420
+		0x179d1424 0x179d1428 0x179d142c 0x179d1430>;
+		qcom,config-reg = <0x179d1434>;
+	};
+
+	qcom,msm-gladiator-v2@17900000 {
+		compatible = "qcom,msm-gladiator-v2";
+		reg = <0x17900000 0xe000>;
+		reg-names = "gladiator_base";
+		interrupts = <0 22 0>;
+		clock-names = "atb_clk";
+		clocks = <&clock_gcc clk_qdss_clk>;
+	};
+
+	qcom,smem@86000000 {
+		compatible = "qcom,smem";
+		reg = <0x86000000 0x200000>,
+			<0x17911008 0x4>,
+			<0x778000 0x7000>,
+			<0x1fd4000 0x8>;
+		reg-names = "smem", "irq-reg-base", "aux-mem1",
+			"smem_targ_info_reg";
+		qcom,mpu-enabled;
+	};
+
+	qcom,msm-adsprpc-mem {
+		compatible = "qcom,msm-adsprpc-mem-region";
+		memory-region = <&adsp_mem>;
+	};
+
+	qcom,msm_fastrpc {
+		compatible = "qcom,msm-fastrpc-adsp";
+		qcom,fastrpc-glink;
+
+		qcom,msm_fastrpc_cpz_cb1 {
+			compatible = "qcom,msm-fastrpc-compute-cb";
+			label = "adsprpc-smd";
+			iommus = <&lpass_q6_smmu 2>;
+			qcom,secure-context-bank;
+			dma-coherent;
+		};
+		qcom,msm_fastrpc_compute_cb1 {
+			compatible = "qcom,msm-fastrpc-compute-cb";
+			label = "adsprpc-smd";
+			iommus = <&lpass_q6_smmu 8>;
+			dma-coherent;
+		};
+		qcom,msm_fastrpc_compute_cb2 {
+			compatible = "qcom,msm-fastrpc-compute-cb";
+			label = "adsprpc-smd";
+			iommus = <&lpass_q6_smmu 9>;
+			dma-coherent;
+		};
+		qcom,msm_fastrpc_compute_cb3 {
+			compatible = "qcom,msm-fastrpc-compute-cb";
+			label = "adsprpc-smd";
+			iommus = <&lpass_q6_smmu 10>;
+			dma-coherent;
+		};
+		qcom,msm_fastrpc_compute_cb4 {
+			compatible = "qcom,msm-fastrpc-compute-cb";
+			label = "adsprpc-smd";
+			iommus = <&lpass_q6_smmu 11>;
+			dma-coherent;
+		};
+		qcom,msm_fastrpc_compute_cb6 {
+			compatible = "qcom,msm-fastrpc-compute-cb";
+			label = "adsprpc-smd";
+			iommus = <&lpass_q6_smmu 5>;
+			dma-coherent;
+		};
+		qcom,msm_fastrpc_compute_cb7 {
+			compatible = "qcom,msm-fastrpc-compute-cb";
+			label = "adsprpc-smd";
+			iommus = <&lpass_q6_smmu 6>;
+			dma-coherent;
+		};
+		qcom,msm_fastrpc_compute_cb8 {
+			compatible = "qcom,msm-fastrpc-compute-cb";
+			label = "adsprpc-smd";
+			iommus = <&lpass_q6_smmu 7>;
+			dma-coherent;
+		};
+	};
+
+	rpm_bus: qcom,rpm-smd {
+		compatible = "qcom,rpm-glink";
+		qcom,glink-edge = "rpm";
+		rpm-channel-name = "rpm_requests";
+	};
+
+	glink_mpss: qcom,glink-ssr-modem {
+		compatible = "qcom,glink_ssr";
+		label = "modem";
+		qcom,edge = "mpss";
+		qcom,notify-edges = <&glink_lpass>, <&glink_dsps>, <&glink_rpm>;
+		qcom,xprt = "smem";
+	};
+
+	glink_lpass: qcom,glink-ssr-adsp {
+		compatible = "qcom,glink_ssr";
+		label = "adsp";
+		qcom,edge = "lpass";
+		qcom,notify-edges = <&glink_mpss>, <&glink_dsps>, <&glink_rpm>;
+		qcom,xprt = "smem";
+	};
+
+	glink_dsps: qcom,glink-ssr-dsps {
+		compatible = "qcom,glink_ssr";
+		label = "slpi";
+		qcom,edge = "dsps";
+		qcom,notify-edges = <&glink_mpss>, <&glink_lpass>, <&glink_rpm>;
+		qcom,xprt = "smem";
+	};
+
+	glink_rpm: qcom,glink-ssr-rpm {
+		compatible = "qcom,glink_ssr";
+		label = "rpm";
+		qcom,edge = "rpm";
+		qcom,notify-edges = <&glink_lpass>, <&glink_mpss>,
+					<&glink_dsps>, <&glink_spss>;
+		qcom,xprt = "smem";
+	};
+
+	glink_spss: qcom,glink-ssr-spss {
+		compatible = "qcom,glink_ssr";
+		label = "spss";
+		qcom,edge = "spss";
+		qcom,notify-edges = <&glink_mpss>, <&glink_lpass>,
+				<&glink_dsps>, <&glink_rpm>;
+		qcom,xprt = "mailbox";
+	};
+
+	qcom,glink-smem-native-xprt-modem@86000000 {
+		compatible = "qcom,glink-smem-native-xprt";
+		reg = <0x86000000 0x200000>,
+			<0x17911008 0x4>;
+		reg-names = "smem", "irq-reg-base";
+		qcom,irq-mask = <0x8000>;
+		interrupts = <0 452 1>;
+		label = "mpss";
+	};
+
+	qcom,glink-smem-native-xprt-adsp@86000000 {
+		compatible = "qcom,glink-smem-native-xprt";
+		reg = <0x86000000 0x200000>,
+			<0x17911008 0x4>;
+		reg-names = "smem", "irq-reg-base";
+		qcom,irq-mask = <0x200>;
+		interrupts = <0 157 1>;
+		label = "lpass";
+		qcom,qos-config = <&glink_qos_adsp>;
+		qcom,ramp-time = <0xaf>;
+	};
+
+	glink_qos_adsp: qcom,glink-qos-config-adsp {
+		compatible = "qcom,glink-qos-config";
+		qcom,flow-info = <0x3c 0x0>,
+				<0x3c 0x0>,
+				<0x3c 0x0>,
+				<0x3c 0x0>;
+		qcom,mtu-size = <0x800>;
+		qcom,tput-stats-cycle = <0xa>;
+	};
+
+	qcom,glink-smem-native-xprt-dsps@86000000 {
+		compatible = "qcom,glink-smem-native-xprt";
+		reg = <0x86000000 0x200000>,
+			<0x17911008 0x4>;
+		reg-names = "smem", "irq-reg-base";
+		qcom,irq-mask = <0x8000000>;
+		interrupts = <0 179 1>;
+		label = "dsps";
+	};
+
+	qcom,glink-smem-native-xprt-rpm@778000 {
+		compatible = "qcom,glink-rpm-native-xprt";
+		reg = <0x778000 0x7000>,
+			<0x17911008 0x4>;
+		reg-names = "msgram", "irq-reg-base";
+		qcom,irq-mask = <0x1>;
+		interrupts = <0 168 1>;
+		label = "rpm";
+	};
+
+	qcom,glink-mailbox-xprt-spss@1d05008 {
+		compatible = "qcom,glink-mailbox-xprt";
+		reg = <0x1d05008 0x8>,
+			<0x1d05010 0x4>,
+			<0x1d0501c 0x4>,
+			<0x1d06008 0x4>;
+		reg-names = "mbox-loc-addr", "mbox-loc-size", "irq-reg-base",
+			"irq-rx-reset";
+		qcom,irq-mask = <0x1>;
+		interrupts = <0 348 4>;
+		label = "spss";
+		qcom,tx-ring-size = <0x800>;
+		qcom,rx-ring-size = <0x800>;
+	};
+
+	glink_spi_xprt_wdsp: qcom,glink-spi-xprt-wdsp {
+		compatible = "qcom,glink-spi-xprt";
+		label = "wdsp";
+		qcom,remote-fifo-config = <&glink_fifo_wdsp>;
+		qcom,qos-config = <&glink_qos_wdsp>;
+		qcom,ramp-time = <0x10>,
+				     <0x20>,
+				     <0x30>,
+				     <0x40>;
+	};
+
+	glink_fifo_wdsp: qcom,glink-fifo-config-wdsp {
+		compatible = "qcom,glink-fifo-config";
+		qcom,out-read-idx-reg = <0x12000>;
+		qcom,out-write-idx-reg = <0x12004>;
+		qcom,in-read-idx-reg = <0x1200C>;
+		qcom,in-write-idx-reg = <0x12010>;
+	};
+
+	glink_qos_wdsp: qcom,glink-qos-config-wdsp {
+		compatible = "qcom,glink-qos-config";
+		qcom,flow-info = <0x80 0x0>,
+				 <0x70 0x1>,
+				 <0x60 0x2>,
+				 <0x50 0x3>;
+		qcom,mtu-size = <0x800>;
+		qcom,tput-stats-cycle = <0xa>;
+	};
+
+	qcom,glink_pkt {
+		compatible = "qcom,glinkpkt";
+
+		qcom,glinkpkt-at-mdm0 {
+			qcom,glinkpkt-transport = "smem";
+			qcom,glinkpkt-edge = "mpss";
+			qcom,glinkpkt-ch-name = "DS";
+			qcom,glinkpkt-dev-name = "at_mdm0";
+		};
+
+		qcom,glinkpkt-loopback_cntl {
+			qcom,glinkpkt-transport = "lloop";
+			qcom,glinkpkt-edge = "local";
+			qcom,glinkpkt-ch-name = "LOCAL_LOOPBACK_CLNT";
+			qcom,glinkpkt-dev-name = "glink_pkt_loopback_ctrl";
+		};
+
+		qcom,glinkpkt-loopback_data {
+			qcom,glinkpkt-transport = "lloop";
+			qcom,glinkpkt-edge = "local";
+			qcom,glinkpkt-ch-name = "glink_pkt_lloop_CLNT";
+			qcom,glinkpkt-dev-name = "glink_pkt_loopback";
+		};
+
+		qcom,glinkpkt-apr-apps2 {
+			qcom,glinkpkt-transport = "smem";
+			qcom,glinkpkt-edge = "adsp";
+			qcom,glinkpkt-ch-name = "apr_apps2";
+			qcom,glinkpkt-dev-name = "apr_apps2";
+		};
+
+		qcom,glinkpkt-data40-cntl {
+			qcom,glinkpkt-transport = "smem";
+			qcom,glinkpkt-edge = "mpss";
+			qcom,glinkpkt-ch-name = "DATA40_CNTL";
+			qcom,glinkpkt-dev-name = "smdcntl8";
+		};
+
+		qcom,glinkpkt-data1 {
+			qcom,glinkpkt-transport = "smem";
+			qcom,glinkpkt-edge = "mpss";
+			qcom,glinkpkt-ch-name = "DATA1";
+			qcom,glinkpkt-dev-name = "smd7";
+		};
+
+		qcom,glinkpkt-data4 {
+			qcom,glinkpkt-transport = "smem";
+			qcom,glinkpkt-edge = "mpss";
+			qcom,glinkpkt-ch-name = "DATA4";
+			qcom,glinkpkt-dev-name = "smd8";
+		};
+
+		qcom,glinkpkt-data11 {
+			qcom,glinkpkt-transport = "smem";
+			qcom,glinkpkt-edge = "mpss";
+			qcom,glinkpkt-ch-name = "DATA11";
+			qcom,glinkpkt-dev-name = "smd11";
+		};
+	};
+
+	qcom,ipc_router {
+		compatible = "qcom,ipc_router";
+		qcom,node-id = <1>;
+	};
+
+	qcom,ipc_router_modem_xprt {
+		compatible = "qcom,ipc_router_glink_xprt";
+		qcom,ch-name = "IPCRTR";
+		qcom,xprt-remote = "mpss";
+		qcom,glink-xprt = "smem";
+		qcom,xprt-linkid = <1>;
+		qcom,xprt-version = <1>;
+		qcom,fragmented-data;
+	};
+
+	qcom,ipc_router_q6_xprt {
+		compatible = "qcom,ipc_router_glink_xprt";
+		qcom,ch-name = "IPCRTR";
+		qcom,xprt-remote = "lpass";
+		qcom,glink-xprt = "smem";
+		qcom,xprt-linkid = <1>;
+		qcom,xprt-version = <1>;
+		qcom,fragmented-data;
+	};
+
+	qcom,ipc_router_dsps_xprt {
+		compatible = "qcom,ipc_router_glink_xprt";
+		qcom,ch-name = "IPCRTR";
+		qcom,xprt-remote = "dsps";
+		qcom,glink-xprt = "smem";
+		qcom,xprt-linkid = <1>;
+		qcom,xprt-version = <1>;
+		qcom,fragmented-data;
+		qcom,dynamic-wakeup-source;
+	};
+
+	qcom,spcom {
+		compatible = "qcom,spcom";
+
+		/* predefined channels, remote side is server */
+		qcom,spcom-ch-names = "sp_kernel" , "sp_ssr";
+		status = "ok";
+	};
+
+	spss_utils: qcom,spss_utils {
+		compatible = "qcom,spss-utils";
+		/* spss fuses physical address */
+		qcom,spss-fuse1-addr = <0x007841c4>;
+		qcom,spss-fuse1-bit = <27>;
+		qcom,spss-fuse2-addr = <0x0078413c>;
+		qcom,spss-fuse2-bit = <31>;
+		qcom,spss-test-firmware-name = "spss";    /* default name */
+		qcom,spss-prod-firmware-name = "spss1p";  /* 8 chars max */
+		qcom,spss-hybr-firmware-name = "spss1h";  /* 8 chars max */
+		qcom,spss-debug-reg-addr = <0x01d06020>;
+		status = "ok";
+	};
+
+	sdhc_2: sdhci@c0a4900 {
+		compatible = "qcom,sdhci-msm";
+		reg = <0xc0a4900 0x314>, <0xc0a4000 0x800>;
+		reg-names = "hc_mem", "core_mem";
+
+		interrupts = <0 125 0>, <0 221 0>;
+		interrupt-names = "hc_irq", "pwr_irq";
+
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_sdcc2_ahb_clk>,
+			 <&clock_gcc clk_gcc_sdcc2_apps_clk>;
+
+		qcom,large-address-bus;
+		qcom,bus-width = <4>;
+		qcom,cpu-dma-latency-us = <701>;
+
+		qcom,devfreq,freq-table = <52000000 200000000>;
+
+		qcom,msm-bus,name = "sdhc2";
+		qcom,msm-bus,num-cases = <8>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps = <81 512 0 0>, /* No vote */
+				<81 512 1600 3200>,    /* 400 KB/s*/
+				<81 512 80000 160000>, /* 20 MB/s */
+				<81 512 100000 200000>, /* 25 MB/s */
+				<81 512 200000 400000>, /* 50 MB/s */
+				<81 512 400000 800000>, /* 100 MB/s */
+				<81 512 800000 800000>, /* 200 MB/s */
+				<81 512 2048000 4096000>; /* Max. bandwidth */
+		qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000
+						100000000 200000000 4294967295>;
+
+		qcom,sdr104-wa;
+
+		status = "disabled";
+	};
+
+	ufsphy1: ufsphy@1da7000 {
+		compatible = "qcom,ufs-phy-qmp-v3";
+		reg = <0x1da7000 0xda8>;
+		reg-names = "phy_mem";
+		#phy-cells = <0>;
+		clock-names = "ref_clk_src",
+			"ref_clk",
+			"ref_aux_clk";
+		clocks = <&clock_gcc clk_ln_bb_clk1>,
+			<&clock_gcc clk_gcc_ufs_clkref_clk>,
+			<&clock_gcc clk_gcc_ufs_phy_aux_hw_ctl_clk>;
+		status = "disabled";
+	};
+
+	ufs_ice: ufsice@1db0000 {
+		compatible = "qcom,ice";
+		reg = <0x1db0000 0x8000>;
+		qcom,enable-ice-clk;
+		clock-names =   "ufs_core_clk",
+				"bus_clk",
+				"iface_clk",
+				"ice_core_clk";
+		clocks = <&clock_gcc clk_gcc_ufs_axi_clk>,
+			 <&clock_gcc clk_gcc_aggre1_ufs_axi_clk>,
+			 <&clock_gcc clk_gcc_ufs_ahb_clk>,
+			 <&clock_gcc clk_gcc_ufs_ice_core_clk>;
+		qcom,op-freq-hz =	<0>,
+					<0>,
+					<0>,
+					<300000000>;
+		vdd-hba-supply = <&gdsc_ufs>;
+		qcom,msm-bus,name = "ufs_ice_noc";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<1 650 0 0>,    /* No vote */
+				<1 650 1000 0>; /* Max. bandwidth */
+		qcom,bus-vector-names = "MIN",
+					"MAX";
+		qcom,instance-type = "ufs";
+		status = "disabled";
+	};
+
+	ufs1: ufshc@1da4000 {
+		compatible = "qcom,ufshc";
+		reg = <0x1da4000 0x2500>;
+		interrupts = <0 265 0>;
+		phys = <&ufsphy1>;
+		phy-names = "ufsphy";
+		ufs-qcom-crypto = <&ufs_ice>;
+
+		clock-names =
+			"core_clk",
+			"bus_aggr_clk",
+			"iface_clk",
+			"core_clk_unipro",
+			"core_clk_ice",
+			"ref_clk",
+			"tx_lane0_sync_clk",
+			"rx_lane0_sync_clk";
+		clocks =
+			<&clock_gcc clk_gcc_ufs_axi_hw_ctl_clk>,
+			<&clock_gcc clk_gcc_aggre1_ufs_axi_hw_ctl_clk>,
+			<&clock_gcc clk_gcc_ufs_ahb_clk>,
+			<&clock_gcc clk_gcc_ufs_unipro_core_hw_ctl_clk>,
+			<&clock_gcc clk_gcc_ufs_ice_core_hw_ctl_clk>,
+			<&clock_gcc clk_ln_bb_clk1>,
+			<&clock_gcc clk_gcc_ufs_tx_symbol_0_clk>,
+			<&clock_gcc clk_gcc_ufs_rx_symbol_0_clk>;
+		freq-table-hz =
+			<50000000 200000000>,
+			<0 0>,
+			<0 0>,
+			<37500000 150000000>,
+			<75000000 300000000>,
+			<0 0>,
+			<0 0>,
+			<0 0>;
+
+		lanes-per-direction = <1>;
+
+		qcom,msm-bus,name = "ufs1";
+		qcom,msm-bus,num-cases = <22>;
+		qcom,msm-bus,num-paths = <2>;
+		qcom,msm-bus,vectors-KBps =
+		/*
+		 * During HS G3 UFS runs at nominal voltage corner, vote
+		 * higher bandwidth to push other buses in the data path
+		 * to run at nominal to achieve max throughput.
+		 * 4GBps pushes BIMC to run at nominal.
+		 * 200MBps pushes CNOC to run at nominal.
+		 * Vote for half of this bandwidth for HS G3 1-lane.
+		 * For max bandwidth, vote high enough to push the buses
+		 * to run in turbo voltage corner.
+		 */
+		<95 512 0 0>, <1 650 0 0>,          /* No vote */
+		<95 512 922 0>, <1 650 1000 0>,     /* PWM G1 */
+		<95 512 1844 0>, <1 650 1000 0>,    /* PWM G2 */
+		<95 512 3688 0>, <1 650 1000 0>,    /* PWM G3 */
+		<95 512 7376 0>, <1 650 1000 0>,    /* PWM G4 */
+		<95 512 1844 0>, <1 650 1000 0>,    /* PWM G1 L2 */
+		<95 512 3688 0>, <1 650 1000 0>,    /* PWM G2 L2 */
+		<95 512 7376 0>, <1 650 1000 0>,    /* PWM G3 L2 */
+		<95 512 14752 0>, <1 650 1000 0>,   /* PWM G4 L2 */
+		<95 512 127796 0>, <1 650 1000 0>,  /* HS G1 RA */
+		<95 512 255591 0>, <1 650 1000 0>,  /* HS G2 RA */
+		<95 512 2097152 0>, <1 650 102400 0>,  /* HS G3 RA */
+		<95 512 255591 0>, <1 650 1000 0>,  /* HS G1 RA L2 */
+		<95 512 511181 0>, <1 650 1000 0>,  /* HS G2 RA L2 */
+		<95 512 4194304 0>, <1 650 204800 0>, /* HS G3 RA L2 */
+		<95 512 149422 0>, <1 650 1000 0>,  /* HS G1 RB */
+		<95 512 298189 0>, <1 650 1000 0>,  /* HS G2 RB */
+		<95 512 2097152 0>, <1 650 102400 0>,  /* HS G3 RB */
+		<95 512 298189 0>, <1 650 1000 0>,  /* HS G1 RB L2 */
+		<95 512 596378 0>, <1 650 1000 0>,  /* HS G2 RB L2 */
+		<95 512 4194304 0>, <1 650 204800 0>, /* HS G3 RB L2 */
+		<95 512 7643136 0>, <1 650 307200 0>; /* Max. bandwidth */
+		qcom,bus-vector-names = "MIN",
+		"PWM_G1_L1", "PWM_G2_L1", "PWM_G3_L1", "PWM_G4_L1",
+		"PWM_G1_L2", "PWM_G2_L2", "PWM_G3_L2", "PWM_G4_L2",
+		"HS_RA_G1_L1", "HS_RA_G2_L1", "HS_RA_G3_L1",
+		"HS_RA_G1_L2", "HS_RA_G2_L2", "HS_RA_G3_L2",
+		"HS_RB_G1_L1", "HS_RB_G2_L1", "HS_RB_G3_L1",
+		"HS_RB_G1_L2", "HS_RB_G2_L2", "HS_RB_G3_L2",
+		"MAX";
+
+		/* PM QoS */
+		qcom,pm-qos-cpu-groups = <0x0F 0xF0>;
+		qcom,pm-qos-cpu-group-latency-us = <70 70>;
+		qcom,pm-qos-default-cpu = <0>;
+
+		pinctrl-names = "dev-reset-assert", "dev-reset-deassert";
+		pinctrl-0 = <&ufs_dev_reset_assert>;
+		pinctrl-1 = <&ufs_dev_reset_deassert>;
+
+		resets = <&clock_gcc UFS_BCR>;
+		reset-names = "core_reset";
+
+		status = "disabled";
+	};
+
+	usb3: ssusb@a800000 {
+		compatible = "qcom,dwc-usb3-msm";
+		reg = <0x0a800000 0xf8c00>,
+		      <0x0c016000 0x400>;
+		reg-names = "core_base", "ahb2phy_base";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		interrupts = <0 347 0>, <0 243 0>,  <0 180 0>;
+		interrupt-names = "hs_phy_irq", "ss_phy_irq", "pwr_event_irq";
+
+		USB3_GDSC-supply = <&gdsc_usb30>;
+		qcom,usb-dbm = <&dbm_1p5>;
+		qcom,msm-bus,name = "usb3";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+					<61 512 0 0>,
+					<61 512 240000 800000>;
+
+		qcom,dwc-usb3-msm-tx-fifo-size = <21288>;
+		extcon = <&pmi8998_pdphy>;
+
+		clocks = <&clock_gcc clk_gcc_usb30_master_clk>,
+			<&clock_gcc clk_gcc_cfg_noc_usb3_axi_clk>,
+			<&clock_gcc clk_gcc_aggre1_usb3_axi_clk>,
+			<&clock_gcc clk_gcc_usb30_mock_utmi_clk>,
+			<&clock_gcc clk_gcc_usb30_sleep_clk>,
+			<&clock_gcc clk_cxo_dwc3_clk>;
+
+		clock-names = "core_clk", "iface_clk", "bus_aggr_clk",
+				"utmi_clk", "sleep_clk", "xo";
+
+		qcom,core-clk-rate = <120000000>;
+		qcom,core-clk-rate-hs = <60000000>;
+
+		resets = <&clock_gcc USB_30_BCR>;
+		reset-names = "core_reset";
+
+		dwc3@a800000 {
+			compatible = "snps,dwc3";
+			reg = <0x0a800000 0xcd00>;
+			interrupt-parent = <&intc>;
+			interrupts = <0 131 0>;
+			usb-phy = <&qusb_phy0>, <&ssphy>;
+			tx-fifo-resize;
+			snps,nominal-elastic-buffer;
+			snps,disable-clk-gating;
+			snps,has-lpm-erratum;
+			snps,hird-threshold = /bits/ 8 <0x10>;
+			snps,num-gsi-evt-buffs = <0x3>;
+		};
+
+		qcom,usbbam@a904000 {
+			compatible = "qcom,usb-bam-msm";
+			reg = <0xa904000 0x17000>;
+			interrupt-parent = <&intc>;
+			interrupts = <0 132 0>;
+
+			qcom,bam-type = <0>;
+			qcom,usb-bam-fifo-baseaddr = <0x146bb000>;
+			qcom,usb-bam-num-pipes = <8>;
+			qcom,ignore-core-reset-ack;
+			qcom,disable-clk-gating;
+			qcom,usb-bam-override-threshold = <0x4001>;
+			qcom,usb-bam-max-mbps-highspeed = <400>;
+			qcom,usb-bam-max-mbps-superspeed = <3600>;
+			qcom,reset-bam-on-connect;
+
+			qcom,pipe0 {
+				label = "ssusb-qdss-in-0";
+				qcom,usb-bam-mem-type = <2>;
+				qcom,dir = <1>;
+				qcom,pipe-num = <0>;
+				qcom,peer-bam = <0>;
+				qcom,peer-bam-physical-address = <0x6064000>;
+				qcom,src-bam-pipe-index = <0>;
+				qcom,dst-bam-pipe-index = <0>;
+				qcom,data-fifo-offset = <0x0>;
+				qcom,data-fifo-size = <0x1800>;
+				qcom,descriptor-fifo-offset = <0x1800>;
+				qcom,descriptor-fifo-size = <0x800>;
+			};
+		};
+	};
+
+	qusb_phy0: qusb@c012000 {
+		compatible = "qcom,qusb2phy-v2";
+		reg = <0x0c012000 0x2a8>,
+		      <0x01fcb24c 0x4>;
+		reg-names = "qusb_phy_base",
+				"tcsr_clamp_dig_n_1p8";
+		vdd-supply = <&pm8998_l1>;
+		vdda12-supply = <&pm8998_l2>;
+		vdda18-supply = <&pm8998_l12>;
+		vdda33-supply = <&pm8998_l24>;
+		qcom,vdd-voltage-level = <0 880000 880000>;
+		qcom,vdda33-voltage-level = <2400000 3088000 3088000>;
+		qcom,qusb-phy-init-seq =
+				/* <value reg_offset> */
+					<0x80 0x0
+					0x13 0x04
+					0x7c 0x18c
+					0x80 0x2c
+					0x0a 0x184
+					0x00 0x240>;
+		phy_type= "utmi";
+
+		clocks = <&clock_gcc clk_ln_bb_clk1>,
+			 <&clock_gcc clk_gcc_rx1_usb2_clkref_clk>;
+		clock-names = "ref_clk_src", "ref_clk";
+
+		resets = <&clock_gcc QUSB2PHY_PRIM_BCR>;
+		reset-names = "phy_reset";
+	};
+
+	ssphy: ssphy@c010000 {
+		compatible = "qcom,usb-ssphy-qmp-v2";
+		reg = <0x0c010000 0xe0c>,
+		      <0x01fcb244 0x4>,
+		      <0x01fcb248 0x4>;
+		reg-names = "qmp_phy_base",
+			    "vls_clamp_reg",
+			    "tcsr_usb3_dp_phymode";
+		vdd-supply = <&pm8998_l1>;
+		core-supply = <&pm8998_l2>;
+		qcom,vdd-voltage-level = <0 880000 880000>;
+		qcom,vbus-valid-override;
+		qcom,qmp-phy-init-seq =
+			/* <reg_offset, value, delay> */
+			<0x138 0x30 0x00
+			 0x034 0x04 0x01
+			 0x080 0x14 0x00
+			 0x03c 0x06 0x00
+			 0x08c 0x08 0x00
+			 0x15c 0x06 0x00
+			 0x164 0x01 0x00
+			 0x13c 0x80 0x00
+			 0x0b0 0x82 0x00
+			 0x0b8 0xab 0x00
+			 0x0bc 0xea 0x00
+			 0x0c0 0x02 0x00
+			 0x060 0x06 0x00
+			 0x068 0x16 0x00
+			 0x070 0x36 0x00
+			 0x0dc 0x00 0x00
+			 0x0d8 0x3f 0x00
+			 0x0f8 0x01 0x00
+			 0x0f4 0xc9 0x00
+			 0x148 0x0a 0x00
+			 0x0a0 0x00 0x00
+			 0x09c 0x34 0x00
+			 0x098 0x15 0x00
+			 0x090 0x04 0x00
+			 0x154 0x00 0x00
+			 0x094 0x00 0x00
+			 0x0f0 0x00 0x00
+			 0x00c 0x0a 0x00
+			 0x048 0x07 0x00
+			 0x0d0 0x80 0x00
+			 0x184 0x01 0x00
+			 0x010 0x01 0x00
+			 0x01c 0x31 0x00
+			 0x020 0x01 0x00
+			 0x014 0x00 0x00
+			 0x018 0x00 0x00
+			 0x024 0x85 0x00
+			 0x028 0x07 0x00
+			 0x430 0x0b 0x00
+			 0x4d4 0x0f 0x00
+			 0x4d8 0x4e 0x00
+			 0x4dc 0x18 0x00
+			 0x4f8 0x07 0x00
+			 0x4fc 0x80 0x00
+			 0x504 0x43 0x00
+			 0x50c 0x1c 0x00
+			 0x434 0x75 0x00
+			 0x43c 0x00 0x00
+			 0x440 0x00 0x00
+			 0x444 0x80 0x00
+			 0x408 0x0a 0x00
+			 0x414 0x06 0x00
+			 0x500 0x00 0x00
+			 0x4c0 0x03 0x00
+			 0x564 0x05 0x00
+			 0x830 0x0b 0x00
+			 0x8d4 0x0f 0x00
+			 0x8d8 0x4e 0x00
+			 0x8dc 0x18 0x00
+			 0x8f8 0x07 0x00
+			 0x8fc 0x80 0x00
+			 0x904 0x43 0x00
+			 0x90c 0x1c 0x00
+			 0x834 0x75 0x00
+			 0x83c 0x00 0x00
+			 0x840 0x00 0x00
+			 0x844 0x80 0x00
+			 0x808 0x0a 0x00
+			 0x814 0x06 0x00
+			 0x900 0x00 0x00
+			 0x8c0 0x03 0x00
+			 0x964 0x05 0x00
+			 0x260 0x10 0x00
+			 0x2a4 0x12 0x00
+			 0x28c 0x16 0x00
+			 0x244 0x00 0x00
+			 0x660 0x10 0x00
+			 0x6a4 0x12 0x00
+			 0x68c 0x16 0x00
+			 0x644 0x00 0x00
+			 0xcc8 0x83 0x00
+			 0xccc 0x09 0x00
+			 0xcd0 0xa2 0x00
+			 0xcd4 0x40 0x00
+			 0xcc4 0x02 0x00
+			 0xc80 0xd1 0x00
+			 0xc84 0x1f 0x00
+			 0xc88 0x47 0x00
+			 0xc64 0x1b 0x00
+			 0xc0c 0x9f 0x00
+			 0xc10 0x9f 0x00
+			 0xc14 0xb7 0x00
+			 0xc18 0x4e 0x00
+			 0xc1c 0x65 0x00
+			 0xc20 0x6b 0x00
+			 0xc24 0x15 0x00
+			 0xc28 0x0d 0x00
+			 0xc2c 0x15 0x00
+			 0xc30 0x0d 0x00
+			 0xc34 0x15 0x00
+			 0xc38 0x0d 0x00
+			 0xc3c 0x15 0x00
+			 0xc40 0x0d 0x00
+			 0xc44 0x15 0x00
+			 0xc48 0x0d 0x00
+			 0xc4c 0x15 0x00
+			 0xc50 0x0d 0x00
+			 0xc5c 0x02 0x00
+			 0xca0 0x04 0x00
+			 0xc8c 0x44 0x00
+			 0xc70 0xe7 0x00
+			 0xc74 0x03 0x00
+			 0xc78 0x40 0x00
+			 0xc7c 0x00 0x00
+			 0xdd8 0x8a 0x00
+			 0xcb8 0x75 0x00
+			 0xcb0 0x86 0x00
+			 0xcbc 0x13 0x00
+			 0xffffffff 0xffffffff 0x00>;
+
+		qcom,qmp-phy-reg-offset =
+				<0xd74 /* USB3_PHY_PCS_STATUS */
+				 0xcd8 /* USB3_PHY_AUTONOMOUS_MODE_CTRL */
+				 0xcdc /* USB3_PHY_LFPS_RXTERM_IRQ_CLEAR */
+				 0xc04 /* USB3_PHY_POWER_DOWN_CONTROL */
+				 0xc00 /* USB3_PHY_SW_RESET */
+				 0xc08 /* USB3_PHY_START */
+				 0xa00>; /* USB3PHY_PCS_MISC_TYPEC_CTRL */
+
+		clocks = <&clock_gcc clk_gcc_usb3_phy_aux_clk>,
+			 <&clock_gcc clk_gcc_usb3_phy_pipe_clk>,
+			 <&clock_gcc clk_ln_bb_clk1>,
+			 <&clock_gcc clk_gcc_usb3_clkref_clk>;
+
+		clock-names = "aux_clk", "pipe_clk", "ref_clk_src",
+				"ref_clk";
+
+		resets = <&clock_gcc USB3_PHY_BCR>,
+			 <&clock_gcc USB3PHY_PHY_BCR>;
+		reset-names = "phy_reset", "phy_phy_reset";
+	};
+
+	usb_audio_qmi_dev {
+		compatible = "qcom,usb-audio-qmi-dev";
+		iommus = <&lpass_q6_smmu 12>;
+		qcom,usb-audio-stream-id = <12>;
+		qcom,usb-audio-intr-num = <2>;
+	};
+
+	dbm_1p5: dbm@a8f8000 {
+		compatible = "qcom,usb-dbm-1p5";
+		reg = <0xa8f8000 0x300>;
+		qcom,reset-ep-after-lpm-resume;
+	};
+
+	usb_nop_phy: usb_nop_phy {
+		compatible = "usb-nop-xceiv";
+	};
+
+	qcom,lpass@17300000 {
+		compatible = "qcom,pil-tz-generic";
+		reg = <0x17300000 0x00100>;
+		interrupts = <0 162 1>;
+
+		vdd_cx-supply = <&pm8998_s1_level>;
+		qcom,proxy-reg-names = "vdd_cx";
+		qcom,vdd_cx-uV-uA = <RPM_SMD_REGULATOR_LEVEL_TURBO 100000>;
+
+		clocks = <&clock_gcc clk_cxo_pil_lpass_clk>;
+		clock-names = "xo";
+		qcom,proxy-clock-names = "xo";
+
+		qcom,pas-id = <1>;
+		qcom,proxy-timeout-ms = <10000>;
+		qcom,smem-id = <423>;
+		qcom,sysmon-id = <1>;
+		status = "ok";
+		qcom,ssctl-instance-id = <0x14>;
+		qcom,firmware-name = "adsp";
+		memory-region = <&pil_adsp_mem>;
+
+		/* GPIO inputs from lpass */
+		qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_2_in 0 0>;
+		qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_2_in 2 0>;
+		qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_2_in 1 0>;
+		qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_2_in 3 0>;
+
+		/* GPIO output to lpass */
+		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>;
+	};
+
+	qcom,memshare {
+		compatible = "qcom,memshare";
+
+		qcom,client_1 {
+			compatible = "qcom,memshare-peripheral";
+			qcom,peripheral-size = <0x200000>;
+			qcom,client-id = <0>;
+			qcom,allocate-boot-time;
+			label = "modem";
+		};
+
+		qcom,client_2 {
+			compatible = "qcom,memshare-peripheral";
+			qcom,peripheral-size = <0x300000>;
+			qcom,client-id = <2>;
+			label = "modem";
+		};
+
+		mem_client_3_size: qcom,client_3 {
+			compatible = "qcom,memshare-peripheral";
+			qcom,peripheral-size = <0x0>;
+			qcom,client-id = <1>;
+			qcom,allocate-boot-time;
+			label = "modem";
+		};
+	};
+
+	pil_modem: qcom,mss@4080000 {
+		compatible = "qcom,pil-q6v55-mss";
+		reg = <0x4080000 0x100>,
+		      <0x1f63000 0x008>,
+		      <0x1f65000 0x008>,
+		      <0x1f64000 0x008>,
+		      <0x4180000 0x020>,
+		      <0x00179000 0x004>;
+		reg-names = "qdsp6_base", "halt_q6", "halt_modem",
+			    "halt_nc", "rmb_base", "restart_reg";
+
+		clocks = <&clock_gcc clk_cxo_clk_src>,
+			 <&clock_gcc clk_gcc_mss_cfg_ahb_clk>,
+			 <&clock_gcc clk_gcc_bimc_mss_q6_axi_clk>,
+			 <&clock_gcc clk_gcc_boot_rom_ahb_clk>,
+			 <&clock_gcc clk_gpll0_out_msscc>,
+			 <&clock_gcc clk_gcc_mss_snoc_axi_clk>,
+			 <&clock_gcc clk_gcc_mss_mnoc_bimc_axi_clk>,
+			 <&clock_gcc clk_qdss_clk>;
+		clock-names = "xo", "iface_clk", "bus_clk",
+			      "mem_clk", "gpll0_mss_clk", "snoc_axi_clk",
+			      "mnoc_axi_clk", "qdss_clk";
+		qcom,proxy-clock-names = "xo", "qdss_clk", "mem_clk";
+		qcom,active-clock-names = "iface_clk", "bus_clk",
+			"gpll0_mss_clk", "snoc_axi_clk", "mnoc_axi_clk";
+
+		interrupts = <0 448 1>;
+		vdd_cx-supply = <&pm8998_s1_level>;
+		vdd_cx-voltage = <RPM_SMD_REGULATOR_LEVEL_TURBO>;
+		vdd_mx-supply = <&pm8998_s9_level>;
+		vdd_mx-uV = <RPM_SMD_REGULATOR_LEVEL_TURBO>;
+		qcom,firmware-name = "modem";
+		qcom,pil-self-auth;
+		qcom,sysmon-id = <0>;
+		qcom,ssctl-instance-id = <0x12>;
+		qcom,qdsp6v62-1-2;
+		status = "ok";
+		memory-region = <&modem_mem>;
+		qcom,mem-protect-id = <0xF>;
+
+		/* GPIO inputs from mss */
+		qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_1_in 0 0>;
+		qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_1_in 1 0>;
+		qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_1_in 2 0>;
+		qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_1_in 3 0>;
+		qcom,gpio-shutdown-ack = <&smp2pgpio_ssr_smp2p_1_in 7 0>;
+
+		/* GPIO output to mss */
+		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
+		qcom,mba-mem@0 {
+			compatible = "qcom,pil-mba-mem";
+			memory-region = <&pil_mba_mem>;
+		};
+	};
+
+	tsens0: tsens@10aa000 {
+		compatible = "qcom,msm8998-tsens";
+		reg = <0x10aa000 0x2000>;
+		reg-names = "tsens_physical";
+		interrupts = <0 458 0>, <0 445 0>;
+		interrupt-names = "tsens-upper-lower", "tsens-critical";
+		qcom,client-id = <0 1 2 3 4 7 8 9 10 11 12 13>;
+		qcom,sensor-id = <0 1 2 3 4 7 8 9 10 11 12 13>;
+		qcom,sensors = <12>;
+	};
+
+	tsens1: tsens@10ad000 {
+		compatible = "qcom,msm8998-tsens";
+		reg = <0x10ad000 0x2000>;
+		reg-names = "tsens_physical";
+		interrupts = <0 184 0>, <0 430 0>;
+		interrupt-names = "tsens-upper-lower", "tsens-critical";
+		qcom,client-id = <14 15 16 17 18 19 20 21>;
+		qcom,sensor-id = <0 1 3 4 5 6 7 2>;
+		qcom,sensors = <8>;
+	};
+
+	qcom,qbt1000 {
+		compatible = "qcom,qbt1000";
+		clock-names = "core", "iface";
+		clocks = <&clock_gcc clk_gcc_blsp2_qup6_spi_apps_clk>,
+			<&clock_gcc clk_gcc_blsp2_ahb_clk>;
+		clock-frequency = <15000000>;
+		qcom,ipc-gpio = <&tlmm 121 0>;
+		qcom,finger-detect-gpio = <&pm8998_gpios 2 0>;
+	};
+
+	qcom,sensor-information {
+		compatible = "qcom,sensor-information";
+		sensor_information0: qcom,sensor-information-0 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor0";
+			qcom,scaling-factor = <10>;
+		};
+		sensor_information1: qcom,sensor-information-1 {
+			qcom,sensor-type =  "tsens";
+			qcom,sensor-name = "tsens_tz_sensor1";
+			qcom,scaling-factor = <10>;
+		};
+		sensor_information2: qcom,sensor-information-2 {
+			qcom,sensor-type =  "tsens";
+			qcom,sensor-name = "tsens_tz_sensor2";
+			qcom,scaling-factor = <10>;
+		};
+		sensor_information3: qcom,sensor-information-3 {
+			qcom,sensor-type =  "tsens";
+			qcom,sensor-name = "tsens_tz_sensor3";
+			qcom,scaling-factor = <10>;
+		};
+		sensor_information4: qcom,sensor-information-4 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor4";
+			qcom,scaling-factor = <10>;
+		};
+		sensor_information7: qcom,sensor-information-7 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor7";
+			qcom,scaling-factor = <10>;
+		};
+		sensor_information8: qcom,sensor-information-8 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor8";
+			qcom,scaling-factor = <10>;
+		};
+		sensor_information9: qcom,sensor-information-9 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor9";
+			qcom,scaling-factor = <10>;
+		};
+		sensor_information10: qcom,sensor-information-10 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor10";
+			qcom,scaling-factor = <10>;
+		};
+		sensor_information11: qcom,sensor-information-11 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor11";
+			qcom,scaling-factor = <10>;
+		};
+		sensor_information12: qcom,sensor-information-12 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor12";
+			qcom,scaling-factor = <10>;
+			qcom,alias-name = "gpu_1";
+		};
+		sensor_information13: qcom,sensor-information-13 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor13";
+			qcom,scaling-factor = <10>;
+			qcom,alias-name = "gpu";
+		};
+		sensor_information14: qcom,sensor-information-14 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor14";
+			qcom,scaling-factor = <10>;
+		};
+		sensor_information15: qcom,sensor-information-15 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor15";
+			qcom,scaling-factor = <10>;
+			qcom,alias-name = "modem_dsp";
+		};
+		sensor_information16: qcom,sensor-information-16 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor16";
+			qcom,scaling-factor = <10>;
+		};
+		sensor_information17: qcom,sensor-information-17 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor17";
+			qcom,scaling-factor = <10>;
+			qcom,alias-name = "hvx";
+		};
+		sensor_information18: qcom,sensor-information-18 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor18";
+			qcom,scaling-factor = <10>;
+			qcom,alias-name = "camera";
+		};
+		sensor_information19: qcom,sensor-information-19 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor19";
+			qcom,scaling-factor = <10>;
+			qcom,alias-name = "multi_media_ss";
+		};
+		sensor_information20: qcom,sensor-information-20 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor20";
+			qcom,scaling-factor = <10>;
+			qcom,alias-name = "modem";
+		};
+		sensor_information21: qcom,sensor-information-21 {
+			qcom,sensor-type = "tsens";
+			qcom,sensor-name = "tsens_tz_sensor21";
+			qcom,scaling-factor = <10>;
+			qcom,alias-name = "pop_mem";
+		};
+		sensor_information22: qcom,sensor-information-22 {
+			qcom,sensor-type =  "alarm";
+			qcom,sensor-name = "pm8998_tz";
+			qcom,scaling-factor = <1000>;
+		};
+		sensor_information23: qcom,sensor-information-23 {
+			qcom,sensor-type =  "adc";
+			qcom,sensor-name = "msm_therm";
+		};
+		sensor_information24: qcom,sensor-information-24 {
+			qcom,sensor-type =  "adc";
+			qcom,sensor-name = "emmc_therm";
+		};
+		sensor_information25: qcom,sensor-information-25 {
+			qcom,sensor-type =  "adc";
+			qcom,sensor-name = "pa_therm0";
+		};
+		sensor_information26: qcom,sensor-information-26 {
+			qcom,sensor-type =  "adc";
+			qcom,sensor-name = "pa_therm1";
+		};
+		sensor_information27: qcom,sensor-information-27 {
+			qcom,sensor-type =  "adc";
+			qcom,sensor-name = "quiet_therm";
+		};
+		sensor_information28: qcom,sensor-information-28 {
+			qcom,sensor-type = "llm";
+			qcom,sensor-name = "limits_sensor-01";
+		};
+		sensor_information29: qcom,sensor-information-29 {
+			qcom,sensor-type = "llm";
+			qcom,sensor-name = "limits_sensor-02";
+		};
+	};
+
+	qcom_seecom: qseecom@86600000 {
+		compatible = "qcom,qseecom";
+		reg = <0x86600000 0x2200000>;
+		reg-names = "secapp-region";
+		qcom,hlos-num-ce-hw-instances = <1>;
+		qcom,hlos-ce-hw-instance = <0>;
+		qcom,qsee-ce-hw-instance = <0>;
+		qcom,disk-encrypt-pipe-pair = <2>;
+		qcom,support-fde;
+		qcom,no-clock-support;
+		qcom,appsbl-qseecom-support;
+		qcom,fde-key-size;
+		qcom,commonlib64-loaded-by-uefi;
+		qcom,msm-bus,name = "qseecom-noc";
+		qcom,msm-bus,num-cases = <4>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<55 512 0 0>,
+				<55 512 0 0>,
+				<55 512 120000 1200000>,
+				<55 512 393600 3936000>;
+		clock-names = "core_clk_src", "core_clk",
+				"iface_clk", "bus_clk";
+		clocks = <&clock_gcc clk_ce1_clk>,
+			 <&clock_gcc clk_qseecom_ce1_clk>,
+			 <&clock_gcc clk_gcc_ce1_ahb_m_clk>,
+			 <&clock_gcc clk_gcc_ce1_axi_m_clk>;
+		qcom,ce-opp-freq = <171430000>;
+		qcom,qsee-reentrancy-support = <2>;
+	};
+
+	qcom_tzlog: tz-log@146BF720 {
+		compatible = "qcom,tz-log";
+		reg = <0x146BF720 0x3000>;
+		qcom,hyplog-enabled;
+		hyplog-address-offset = <0x410>; /* 0x066BFB30 */
+		hyplog-size-offset = <0x414>;    /* 0x066BFB34 */
+	};
+
+	qcom_msmhdcp: qcom,msm_hdcp {
+		compatible = "qcom,msm-hdcp";
+	};
+
+	qcom_crypto: qcrypto@1DE0000 {
+		compatible = "qcom,qcrypto";
+		reg = <0x1DE0000 0x20000>,
+		      <0x1DC4000 0x24000>;
+		reg-names = "crypto-base","crypto-bam-base";
+		interrupts = <0 206 0>;
+		qcom,bam-pipe-pair = <2>;
+		qcom,ce-hw-instance = <0>;
+		qcom,ce-device = <0>;
+		qcom,bam-ee = <0>;
+		qcom,ce-hw-shared;
+		qcom,clk-mgmt-sus-res;
+		qcom,msm-bus,name = "qcrypto-noc";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<55 512 0 0>,
+				<55 512 3936000 393600>;
+		clock-names = "core_clk_src", "core_clk",
+				"iface_clk", "bus_clk";
+		clocks = <&clock_gcc clk_qcrypto_ce1_clk>,
+			 <&clock_gcc clk_qcrypto_ce1_clk>,
+			 <&clock_gcc clk_gcc_ce1_ahb_m_clk>,
+			 <&clock_gcc clk_gcc_ce1_axi_m_clk>;
+		qcom,ce-opp-freq = <171430000>;
+		qcom,use-sw-aes-cbc-ecb-ctr-algo;
+		qcom,use-sw-aes-xts-algo;
+		qcom,use-sw-aes-ccm-algo;
+		qcom,use-sw-ahash-algo;
+		qcom,use-sw-aead-algo;
+		qcom,use-sw-hmac-algo;
+	};
+
+	qcom_cedev: qcedev@1DE0000{
+		compatible = "qcom,qcedev";
+		reg = <0x1DE0000 0x20000>,
+		      <0x1DC4000 0x24000>;
+		reg-names = "crypto-base","crypto-bam-base";
+		interrupts = <0 206 0>;
+		qcom,bam-pipe-pair = <1>;
+		qcom,ce-hw-instance = <0>;
+		qcom,ce-device = <0>;
+		qcom,ce-hw-shared;
+		qcom,bam-ee = <0>;
+		qcom,msm-bus,name = "qcedev-noc";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<55 512 0 0>,
+				<55 512 3936000 393600>;
+		clock-names = "core_clk_src", "core_clk",
+				"iface_clk", "bus_clk";
+		clocks = <&clock_gcc clk_qcedev_ce1_clk>,
+			 <&clock_gcc clk_qcedev_ce1_clk>,
+			 <&clock_gcc clk_gcc_ce1_ahb_m_clk>,
+			 <&clock_gcc clk_gcc_ce1_axi_m_clk>;
+		qcom,ce-opp-freq = <171430000>;
+	};
+
+	qcom_rng: qrng@793000 {
+		compatible = "qcom,msm-rng";
+		reg = <0x793000 0x1000>;
+		qcom,msm-rng-iface-clk;
+		qcom,no-qrng-config;
+		qcom,msm-bus,name = "msm-rng-noc";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<1 618 0 0>,	/* No vote */
+			<1 618 0 800>;	/* 100 MB/s */
+		clocks = <&clock_gcc clk_gcc_prng_ahb_clk>;
+		clock-names = "iface_clk";
+	};
+
+	mitigation_profile0: qcom,limit_info-0 {
+		qcom,temperature-sensor = <&sensor_information1>;
+		qcom,hotplug-mitigation-enable;
+	};
+
+	mitigation_profile1: qcom,limit_info-1 {
+		qcom,temperature-sensor = <&sensor_information2>;
+		qcom,hotplug-mitigation-enable;
+	};
+
+	mitigation_profile2: qcom,limit_info-2 {
+		qcom,temperature-sensor = <&sensor_information3>;
+		qcom,hotplug-mitigation-enable;
+	};
+
+	mitigation_profile3: qcom,limit_info-3 {
+		qcom,temperature-sensor = <&sensor_information4>;
+		qcom,hotplug-mitigation-enable;
+	};
+
+	mitigation_profile4: qcom,limit_info-4 {
+		qcom,temperature-sensor = <&sensor_information7>;
+		qcom,hotplug-mitigation-enable;
+	};
+
+	mitigation_profile5: qcom,limit_info-5 {
+		qcom,temperature-sensor = <&sensor_information8>;
+		qcom,hotplug-mitigation-enable;
+	};
+
+	mitigation_profile6: qcom,limit_info-6 {
+		qcom,temperature-sensor = <&sensor_information9>;
+		qcom,hotplug-mitigation-enable;
+	};
+
+	mitigation_profile7: qcom,limit_info-7 {
+		qcom,temperature-sensor = <&sensor_information10>;
+		qcom,hotplug-mitigation-enable;
+	};
+
+	qcom,lmh {
+		compatible = "qcom,lmh_v1";
+		interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	qcom,msm-thermal {
+		compatible = "qcom,msm-thermal";
+		qcom,sensor-id = <1>;
+		qcom,poll-ms = <100>;
+		qcom,therm-reset-temp = <115>;
+		qcom,core-limit-temp = <90>;
+		qcom,core-temp-hysteresis = <10>;
+		qcom,hotplug-temp = <105>;
+		qcom,hotplug-temp-hysteresis = <20>;
+		qcom,online-hotplug-core;
+		qcom,synchronous-cluster-id = <0 1>;
+		qcom,synchronous-cluster-map = <0 4 &CPU0 &CPU1 &CPU2 &CPU3>,
+						<1 4 &CPU4 &CPU5 &CPU6 &CPU7>;
+		clock-names = "osm";
+		clocks = <&clock_cpu clk_pwrcl_clk>;
+
+		qcom,vdd-restriction-temp = <5>;
+		qcom,vdd-restriction-temp-hysteresis = <10>;
+
+		vdd-dig-supply = <&pm8998_s1_floor_level>;
+		vdd-gfx-supply = <&gfx_vreg>;
+
+		qcom,vdd-dig-rstr{
+			qcom,vdd-rstr-reg = "vdd-dig";
+			qcom,levels = <RPM_SMD_REGULATOR_LEVEL_NOM
+					RPM_SMD_REGULATOR_LEVEL_TURBO
+					RPM_SMD_REGULATOR_LEVEL_TURBO>;
+				/* Nominal, Super Turbo, Super Turbo */
+			qcom,min-level = <RPM_SMD_REGULATOR_LEVEL_NONE>;
+				/* No Request */
+		};
+
+		qcom,vdd-gfx-rstr{
+			qcom,vdd-rstr-reg = "vdd-gfx";
+			qcom,levels = <5 6 6>; /* Nominal, Turbo, Turbo */
+			qcom,min-level = <1>; /* No Request */
+		};
+
+		msm_thermal_freq: qcom,vdd-apps-rstr{
+			qcom,vdd-rstr-reg = "vdd-apps";
+			qcom,levels = <1248000>;
+			qcom,freq-req;
+		};
+	};
+
+	pcie0: qcom,pcie@01c00000 {
+		compatible = "qcom,pci-msm";
+		cell-index = <0>;
+
+		reg = <0x1c00000 0x2000>,
+		      <0x1c06000 0x1000>,
+		      <0x1b000000 0xf1d>,
+		      <0x1b000f20 0xa8>,
+		      <0x1b100000 0x100000>,
+		      <0x1b200000 0x100000>,
+		      <0x1b300000 0xd00000>;
+
+		reg-names = "parf", "phy", "dm_core", "elbi",
+				"conf", "io", "bars";
+
+		#address-cells = <3>;
+		#size-cells = <2>;
+		ranges = <0x01000000 0x0 0x1b200000 0x1b200000 0x0 0x100000>,
+			<0x02000000 0x0 0x1b300000 0x1b300000 0x0 0xd00000>;
+		interrupt-parent = <&pcie0>;
+		interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
+				20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
+				36 37>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0 0 0 0xffffffff>;
+		interrupt-map = <0 0 0 0 &intc 0 0 405 0
+				0 0 0 1 &intc 0 0 135 0
+				0 0 0 2 &intc 0 0 136 0
+				0 0 0 3 &intc 0 0 138 0
+				0 0 0 4 &intc 0 0 139 0
+				0 0 0 5 &intc 0 0 278 0
+				0 0 0 6 &intc 0 0 576 0
+				0 0 0 7 &intc 0 0 577 0
+				0 0 0 8 &intc 0 0 578 0
+				0 0 0 9 &intc 0 0 579 0
+				0 0 0 10 &intc 0 0 580 0
+				0 0 0 11 &intc 0 0 581 0
+				0 0 0 12 &intc 0 0 582 0
+				0 0 0 13 &intc 0 0 583 0
+				0 0 0 14 &intc 0 0 584 0
+				0 0 0 15 &intc 0 0 585 0
+				0 0 0 16 &intc 0 0 586 0
+				0 0 0 17 &intc 0 0 587 0
+				0 0 0 18 &intc 0 0 588 0
+				0 0 0 19 &intc 0 0 589 0
+				0 0 0 20 &intc 0 0 590 0
+				0 0 0 21 &intc 0 0 591 0
+				0 0 0 22 &intc 0 0 592 0
+				0 0 0 23 &intc 0 0 593 0
+				0 0 0 24 &intc 0 0 594 0
+				0 0 0 25 &intc 0 0 595 0
+				0 0 0 26 &intc 0 0 596 0
+				0 0 0 27 &intc 0 0 597 0
+				0 0 0 28 &intc 0 0 598 0
+				0 0 0 29 &intc 0 0 599 0
+				0 0 0 30 &intc 0 0 600 0
+				0 0 0 31 &intc 0 0 601 0
+				0 0 0 32 &intc 0 0 602 0
+				0 0 0 33 &intc 0 0 603 0
+				0 0 0 34 &intc 0 0 604 0
+				0 0 0 35 &intc 0 0 605 0
+				0 0 0 36 &intc 0 0 606 0
+				0 0 0 37 &intc 0 0 607 0>;
+
+		interrupt-names = "int_msi", "int_a", "int_b", "int_c",
+				"int_d", "int_global_int",
+				"msi_0", "msi_1", "msi_2", "msi_3",
+				"msi_4", "msi_5", "msi_6", "msi_7",
+				"msi_8", "msi_9", "msi_10", "msi_11",
+				"msi_12", "msi_13", "msi_14", "msi_15",
+				"msi_16", "msi_17", "msi_18", "msi_19",
+				"msi_20", "msi_21", "msi_22", "msi_23",
+				"msi_24", "msi_25", "msi_26", "msi_27",
+				"msi_28", "msi_29", "msi_30", "msi_31";
+
+		qcom,phy-sequence = <0x804 0x01 0x00
+					0x034 0x14 0x00
+					0x138 0x30 0x00
+					0x048 0x0f 0x00
+					0x15c 0x06 0x00
+					0x090 0x01 0x00
+					0x088 0x20 0x00
+					0x0f0 0x00 0x00
+					0x0f8 0x01 0x00
+					0x0f4 0xc9 0x00
+					0x11c 0xff 0x00
+					0x120 0x3f 0x00
+					0x164 0x01 0x00
+					0x154 0x00 0x00
+					0x148 0x0a 0x00
+					0x05C 0x19 0x00
+					0x038 0x90 0x00
+					0x0b0 0x82 0x00
+					0x0c0 0x03 0x00
+					0x0bc 0x55 0x00
+					0x0b8 0x55 0x00
+					0x0a0 0x00 0x00
+					0x09c 0x0d 0x00
+					0x098 0x04 0x00
+					0x13c 0x00 0x00
+					0x060 0x08 0x00
+					0x068 0x16 0x00
+					0x070 0x34 0x00
+					0x15c 0x06 0x00
+					0x138 0x33 0x00
+					0x03c 0x02 0x00
+					0x040 0x0e 0x00
+					0x080 0x04 0x00
+					0x0dc 0x00 0x00
+					0x0d8 0x3f 0x00
+					0x00c 0x09 0x00
+					0x010 0x01 0x00
+					0x01c 0x40 0x00
+					0x020 0x01 0x00
+					0x014 0x02 0x00
+					0x018 0x00 0x00
+					0x024 0x7e 0x00
+					0x028 0x15 0x00
+					0x244 0x02 0x00
+					0x2a4 0x12 0x00
+					0x260 0x10 0x00
+					0x28c 0x06 0x00
+					0x504 0x03 0x00
+					0x500 0x10 0x00
+					0x50c 0x14 0x00
+					0x4d4 0x0a 0x00
+					0x4d8 0x04 0x00
+					0x4dc 0x1a 0x00
+					0x434 0x4b 0x00
+					0x414 0x04 0x00
+					0x40c 0x04 0x00
+					0x4f8 0x00 0x00
+					0x4fc 0x80 0x00
+					0x51c 0x40 0x00
+					0x444 0x71 0x00
+					0x43c 0x40 0x00
+					0x854 0x04 0x00
+					0x62c 0x52 0x00
+					0x9ac 0x00 0x00
+					0x8a0 0x01 0x00
+					0x9e0 0x00 0x00
+					0x9dc 0x20 0x00
+					0x9a8 0x00 0x00
+					0x8a4 0x01 0x00
+					0x8a8 0x73 0x00
+					0x9d8 0xaa 0x00
+					0x9b0 0x03 0x00
+					0x804 0x03 0x00
+					0x800 0x00 0x00
+					0x808 0x03 0x00>;
+
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&pcie0_clkreq_default
+			&pcie0_perst_default
+			&pcie0_wake_default>;
+		pinctrl-1 = <&pcie0_clkreq_default
+			&pcie0_perst_default
+			&pcie0_wake_sleep>;
+
+		perst-gpio = <&tlmm 35 0>;
+		wake-gpio = <&tlmm 37 0>;
+
+		gdsc-vdd-supply = <&gdsc_pcie_0>;
+		vreg-1.8-supply = <&pm8998_l2>;
+		vreg-0.9-supply = <&pm8998_l1>;
+		vreg-cx-supply = <&pm8998_s1_level>;
+
+		qcom,vreg-1.8-voltage-level = <1200000 1200000 24000>;
+		qcom,vreg-0.9-voltage-level = <880000 880000 24000>;
+		qcom,vreg-cx-voltage-level = <RPM_SMD_REGULATOR_LEVEL_BINNING
+						RPM_SMD_REGULATOR_LEVEL_SVS 0>;
+
+		qcom,l1-supported;
+		qcom,l1ss-supported;
+		qcom,aux-clk-sync;
+
+		qcom,ep-latency = <10>;
+
+		qcom,boot-option = <0x1>;
+
+		linux,pci-domain = <0>;
+
+		qcom,msi-gicm-addr = <0x17a00040>;
+		qcom,msi-gicm-base = <0x260>;
+
+		qcom,pcie-phy-ver = <0x20>;
+		qcom,use-19p2mhz-aux-clk;
+
+		iommus = <&anoc1_smmu>;
+		qcom,smmu-exist;
+		qcom,smmu-sid-base = <0x1480>;
+
+		qcom,msm-bus,name = "pcie0";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<45 512 0 0>,
+				<45 512 500 800>;
+
+		clocks = <&clock_gcc clk_gcc_pcie_0_pipe_clk>,
+			<&clock_gcc clk_ln_bb_clk1>,
+			<&clock_gcc clk_gcc_pcie_0_aux_clk>,
+			<&clock_gcc clk_gcc_pcie_0_cfg_ahb_clk>,
+			<&clock_gcc clk_gcc_pcie_0_mstr_axi_clk>,
+			<&clock_gcc clk_gcc_pcie_0_slv_axi_clk>,
+			<&clock_gcc clk_gcc_pcie_clkref_clk>;
+
+		clock-names = "pcie_0_pipe_clk", "pcie_0_ref_clk_src",
+				"pcie_0_aux_clk", "pcie_0_cfg_ahb_clk",
+				"pcie_0_mstr_axi_clk", "pcie_0_slv_axi_clk",
+				"pcie_0_ldo";
+
+		max-clock-frequency-hz = <0>, <0>, <19200000>,
+					<0>, <0>, <0>, <0>, <0>, <0>,
+					<0>, <0>, <0>, <0>, <0>, <0>,
+					<0>, <0>;
+
+		resets = <&clock_gcc PCIE_PHY_BCR>,
+			 <&clock_gcc PCIE_0_PHY_BCR>,
+			 <&clock_gcc PCIE_0_PHY_BCR>;
+
+		reset-names = "pcie_phy_reset",
+				"pcie_0_phy_reset",
+				"pcie_0_phy_pipe_reset";
+	};
+
+	qcom,bcl {
+		compatible = "qcom,bcl";
+		qcom,bcl-enable;
+		qcom,bcl-framework-interface;
+		qcom,bcl-freq-control-list = <&CPU4 &CPU5 &CPU6 &CPU7>;
+		qcom,bcl-hotplug-list = <&CPU4 &CPU5 &CPU6 &CPU7>;
+		qcom,bcl-soc-hotplug-list = <&CPU4 &CPU5 &CPU6 &CPU7>;
+		qcom,ibat-monitor {
+			qcom,low-threshold-uamp = <3400000>;
+			qcom,high-threshold-uamp = <4200000>;
+			qcom,mitigation-freq-khz = <576000>;
+			qcom,vph-high-threshold-uv = <3500000>;
+			qcom,vph-low-threshold-uv = <3300000>;
+			qcom,soc-low-threshold = <10>;
+			qcom,thermal-handle = <&msm_thermal_freq>;
+		};
+	};
+
+	qcom,ssc@5c00000 {
+		compatible = "qcom,pil-tz-generic";
+		reg = <0x5c00000 0x4000>;
+		interrupts = <0 390 1>;
+
+		vdd_cx-supply = <&pm8998_l27_level>;
+		vdd_px-supply = <&pm8998_lvs2>;
+		qcom,vdd_cx-uV-uA = <RPM_SMD_REGULATOR_LEVEL_TURBO 0>;
+		qcom,proxy-reg-names = "vdd_cx", "vdd_px";
+		qcom,keep-proxy-regs-on;
+
+		clocks = <&clock_gcc clk_cxo_pil_ssc_clk>,
+			 <&clock_gcc clk_aggre2_noc_clk>;
+		clock-names = "xo", "aggre2";
+		qcom,proxy-clock-names = "xo", "aggre2";
+
+		qcom,pas-id = <12>;
+		qcom,proxy-timeout-ms = <10000>;
+		qcom,smem-id = <424>;
+		qcom,sysmon-id = <3>;
+		qcom,ssctl-instance-id = <0x16>;
+		qcom,firmware-name = "slpi";
+		status = "ok";
+		memory-region = <&pil_slpi_mem>;
+
+		/* GPIO inputs from ssc */
+		qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_3_in 0 0>;
+		qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_3_in 2 0>;
+		qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_3_in 1 0>;
+		qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_3_in 3 0>;
+
+		/* GPIO output to ssc */
+		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_3_out 0 0>;
+	};
+
+	qcom,venus@cce0000 {
+		compatible = "qcom,pil-tz-generic";
+		reg = <0xcce0000 0x4000>;
+
+		vdd-supply = <&gdsc_venus>;
+		qcom,proxy-reg-names = "vdd";
+
+		clocks = <&clock_mmss clk_mmss_video_core_clk>,
+			 <&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			 <&clock_mmss clk_mmss_video_ahb_clk>,
+			 <&clock_gcc clk_mmssnoc_axi_clk>,
+			 <&clock_mmss clk_mmss_video_axi_clk>,
+			 <&clock_mmss clk_mmss_video_maxi_clk>;
+		clock-names = "core_clk", "mnoc_ahb_clk", "iface_clk",
+			      "noc_axi_clk", "bus_clk", "maxi_clk";
+		qcom,proxy-clock-names = "core_clk","mnoc_ahb_clk",
+		 "iface_clk", "noc_axi_clk", "bus_clk", "maxi_clk";
+
+		qcom,pas-id = <9>;
+		qcom,msm-bus,name = "pil-venus";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<63 512 0 0>,
+			<63 512 0 304000>;
+		qcom,proxy-timeout-ms = <100>;
+		qcom,firmware-name = "venus";
+		memory-region = <&pil_video_mem>;
+		status = "ok";
+	};
+
+	wdog: qcom,wdt@17817000 {
+		compatible = "qcom,msm-watchdog";
+		reg = <0x17817000 0x1000>;
+		reg-names = "wdt-base";
+		interrupts = <0 3 0>, <0 4 0>;
+		qcom,bark-time = <11000>;
+		qcom,pet-time = <10000>;
+		qcom,ipi-ping;
+		qcom,wakeup-enable;
+		qcom,scandump-size = <0x40000>;
+	};
+
+	qcom,spss@1d00000 {
+		compatible = "qcom,pil-tz-generic";
+		reg = <0x1d0101c 0x4>,
+		      <0x1d01024 0x4>,
+		      <0x1d01028 0x4>,
+		      <0x1d0103c 0x4>,
+		      <0x1d02030 0x4>;
+		reg-names = "sp2soc_irq_status", "sp2soc_irq_clr",
+			    "sp2soc_irq_mask", "rmb_err", "rmb_err_spare2";
+		interrupts = <0 352 1>;
+
+		vdd_cx-supply = <&pm8998_s1_level>;
+		qcom,proxy-reg-names = "vdd_cx";
+		qcom,vdd_cx-uV-uA = <RPM_SMD_REGULATOR_LEVEL_TURBO 100000>;
+
+		clocks = <&clock_gcc clk_cxo_pil_spss_clk>;
+		clock-names = "xo";
+		qcom,proxy-clock-names = "xo";
+		qcom,pil-generic-irq-handler;
+		status = "ok";
+
+		qcom,pas-id = <14>;
+		qcom,proxy-timeout-ms = <10000>;
+		qcom,firmware-name = "spss";
+		memory-region = <&spss_mem>;
+		qcom,spss-scsr-bits = <24 25>;
+	};
+
+	qcom,msm-rtb {
+		compatible = "qcom,msm-rtb";
+		qcom,rtb-size = <0x100000>;
+	};
+
+	qcom,mpm2-sleep-counter@10a3000 {
+		compatible = "qcom,mpm2-sleep-counter";
+		reg = <0x010a3000 0x1000>;
+		clock-frequency = <32768>;
+	};
+
+	qcom,msm-imem@146bf000 {
+		compatible = "qcom,msm-imem";
+		reg = <0x146bf000 0x1000>;
+		ranges = <0x0 0x146bf000 0x1000>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		mem_dump_table@10 {
+			compatible = "qcom,msm-imem-mem_dump_table";
+			reg = <0x10 8>;
+		};
+
+		dload_type@18 {
+			compatible = "qcom,msm-imem-dload-type";
+			reg = <0x18 4>;
+		};
+
+		restart_reason@65c {
+			compatible = "qcom,msm-imem-restart_reason";
+			reg = <0x65c 4>;
+		};
+
+		boot_stats@6b0 {
+			compatible = "qcom,msm-imem-boot_stats";
+			reg = <0x6b0 32>;
+		};
+
+		kaslr_offset@6d0 {
+			compatible = "qcom,msm-imem-kaslr_offset";
+			reg = <0x6d0 12>;
+		};
+
+		pil@94c {
+			compatible = "qcom,msm-imem-pil";
+			reg = <0x94c 200>;
+		};
+
+		diag_dload@c8 {
+			compatible = "qcom,msm-imem-diag-dload";
+			reg = <0xc8 200>;
+		};
+	};
+
+	cpu_pmu: cpu-pmu {
+		compatible = "arm,armv8-pmuv3";
+		qcom,irq-is-percpu;
+		interrupts = <1 6 4>;
+	};
+
+	cpuss_dump {
+		compatible = "qcom,cpuss-dump";
+		qcom,l1_i_cache0 {
+			qcom,dump-node = <&L1_I_0>;
+			qcom,dump-id = <0x60>;
+		};
+		qcom,l1_i_cache1 {
+			qcom,dump-node = <&L1_I_1>;
+			qcom,dump-id = <0x61>;
+		};
+		qcom,l1_i_cache2 {
+			qcom,dump-node = <&L1_I_2>;
+			qcom,dump-id = <0x62>;
+		};
+		qcom,l1_i_cache3 {
+			qcom,dump-node = <&L1_I_3>;
+			qcom,dump-id = <0x63>;
+		};
+		qcom,l1_i_cache100 {
+			qcom,dump-node = <&L1_I_100>;
+			qcom,dump-id = <0x64>;
+		};
+		qcom,l1_i_cache101 {
+			qcom,dump-node = <&L1_I_101>;
+			qcom,dump-id = <0x65>;
+		};
+		qcom,l1_i_cache102 {
+			qcom,dump-node = <&L1_I_102>;
+			qcom,dump-id = <0x66>;
+		};
+		qcom,l1_i_cache103 {
+			qcom,dump-node = <&L1_I_103>;
+			qcom,dump-id = <0x67>;
+		};
+		qcom,l1_d_cache0 {
+			qcom,dump-node = <&L1_D_0>;
+			qcom,dump-id = <0x80>;
+		};
+		qcom,l1_d_cache1 {
+			qcom,dump-node = <&L1_D_1>;
+			qcom,dump-id = <0x81>;
+		};
+		qcom,l1_d_cache2 {
+			qcom,dump-node = <&L1_D_2>;
+			qcom,dump-id = <0x82>;
+		};
+		qcom,l1_d_cache3 {
+			qcom,dump-node = <&L1_D_3>;
+			qcom,dump-id = <0x83>;
+		};
+		qcom,l1_d_cache100 {
+			qcom,dump-node = <&L1_D_100>;
+			qcom,dump-id = <0x84>;
+		};
+		qcom,l1_d_cache101 {
+			qcom,dump-node = <&L1_D_101>;
+			qcom,dump-id = <0x85>;
+		};
+		qcom,l1_d_cache102 {
+			qcom,dump-node = <&L1_D_102>;
+			qcom,dump-id = <0x86>;
+		};
+		qcom,l1_d_cache103 {
+			qcom,dump-node = <&L1_D_103>;
+			qcom,dump-id = <0x87>;
+		};
+		qcom,l1_tlb_dump0 {
+			qcom,dump-node = <&L1_TLB_0>;
+			qcom,dump-id = <0x20>;
+		};
+		qcom,l1_tlb_dump1 {
+			qcom,dump-node = <&L1_TLB_1>;
+			qcom,dump-id = <0x21>;
+		};
+		qcom,l1_tlb_dump2 {
+			qcom,dump-node = <&L1_TLB_2>;
+			qcom,dump-id = <0x22>;
+		};
+		qcom,l1_tlb_dump3 {
+			qcom,dump-node = <&L1_TLB_3>;
+			qcom,dump-id = <0x23>;
+		};
+		qcom,l1_tlb_dump100 {
+			qcom,dump-node = <&L1_TLB_100>;
+			qcom,dump-id = <0x24>;
+		};
+		qcom,l1_tlb_dump101 {
+			qcom,dump-node = <&L1_TLB_101>;
+			qcom,dump-id = <0x25>;
+		};
+		qcom,l1_tlb_dump102 {
+			qcom,dump-node = <&L1_TLB_102>;
+			qcom,dump-id = <0x26>;
+		};
+		qcom,l1_tlb_dump103 {
+			qcom,dump-node = <&L1_TLB_103>;
+			qcom,dump-id = <0x27>;
+		};
+	};
+
+	ssc_sensors: qcom,msm-ssc-sensors {
+		compatible = "qcom,msm-ssc-sensors";
+		status = "ok";
+		qcom,firmware-name = "slpi_v1";
+	};
+
+	dcc: dcc@10b3000 {
+		compatible = "qcom,dcc";
+		reg = <0x10b3000 0x1000>,
+		      <0x10b4000 0x2000>;
+		reg-names = "dcc-base", "dcc-ram-base";
+
+		clocks = <&clock_gcc clk_gcc_dcc_ahb_clk>;
+		clock-names = "dcc_clk";
+	};
+
+	qcom,msm-core@780000 {
+		compatible = "qcom,apss-core-ea";
+		reg = <0x780000 0x1000>;
+		qcom,low-hyst-temp = <100>;
+		qcom,high-hyst-temp = <100>;
+		qcom,polling-interval = <50>;
+
+		ea0: ea0 {
+			sensor = <&sensor_information1>;
+		};
+
+		ea1: ea1 {
+			sensor = <&sensor_information2>;
+		};
+
+		ea2: ea2 {
+			sensor = <&sensor_information3>;
+		};
+
+		ea3: ea3 {
+			sensor = <&sensor_information4>;
+		};
+
+		ea4: ea4 {
+			sensor = <&sensor_information7>;
+		};
+
+		ea5: ea5 {
+			sensor = <&sensor_information8>;
+		};
+
+		ea6: ea6 {
+			sensor = <&sensor_information9>;
+		};
+
+		ea7: ea7 {
+			sensor = <&sensor_information10>;
+		};
+
+	};
+
+	msm_ath10k_wlan: qcom,msm_ath10k_wlan {
+		status = "disabled";
+		compatible = "qcom,wcn3990-wifi";
+		reg = <0x18800000 0x800000>;
+		reg-names = "membase";
+		clocks = <&clock_gcc clk_rf_clk2_pin>;
+		clock-names = "cxo_ref_clk_pin";
+		interrupts =
+			<0 413 0 /* CE0 */ >,
+			<0 414 0 /* CE1 */ >,
+			<0 415 0 /* CE2 */ >,
+			<0 416 0 /* CE3 */ >,
+			<0 417 0 /* CE4 */ >,
+			<0 418 0 /* CE5 */ >,
+			<0 420 0 /* CE6 */ >,
+			<0 421 0 /* CE7 */ >,
+			<0 422 0 /* CE8 */ >,
+			<0 423 0 /* CE9 */ >,
+			<0 424 0 /* CE10 */ >,
+			<0 425 0 /* CE11 */ >;
+		vdd-0.8-cx-mx-supply = <&pm8998_l5>;
+		vdd-1.8-xo-supply = <&pm8998_l7_pin_ctrl>;
+		vdd-1.3-rfa-supply = <&pm8998_l17_pin_ctrl>;
+		vdd-3.3-ch0-supply = <&pm8998_l25_pin_ctrl>;
+		qcom,vdd-0.8-cx-mx-config = <800000 800000>;
+		qcom,vdd-3.3-ch0-config = <3104000 3312000>;
+	};
+
+	qcom,icnss@18800000 {
+		compatible = "qcom,icnss";
+		reg = <0x18800000 0x800000>,
+		      <0xa0000000 0x10000000>,
+		      <0xb0000000 0x10000>;
+		reg-names = "membase", "smmu_iova_base", "smmu_iova_ipa";
+		clocks = <&clock_gcc clk_rf_clk2_pin>;
+		clock-names = "cxo_ref_clk_pin";
+		iommus = <&anoc2_smmu 0x1900>,
+			 <&anoc2_smmu 0x1901>;
+		interrupts = <0 413 0 /* CE0 */ >,
+			     <0 414 0 /* CE1 */ >,
+			     <0 415 0 /* CE2 */ >,
+			     <0 416 0 /* CE3 */ >,
+			     <0 417 0 /* CE4 */ >,
+			     <0 418 0 /* CE5 */ >,
+			     <0 420 0 /* CE6 */ >,
+			     <0 421 0 /* CE7 */ >,
+			     <0 422 0 /* CE8 */ >,
+			     <0 423 0 /* CE9 */ >,
+			     <0 424 0 /* CE10 */ >,
+			     <0 425 0 /* CE11 */ >;
+		qcom,wlan-msa-memory = <0x100000>;
+		vdd-0.8-cx-mx-supply = <&pm8998_l5>;
+		vdd-1.8-xo-supply = <&pm8998_l7_pin_ctrl>;
+		vdd-1.3-rfa-supply = <&pm8998_l17_pin_ctrl>;
+		vdd-3.3-ch0-supply = <&pm8998_l25_pin_ctrl>;
+		qcom,vdd-0.8-cx-mx-config = <800000 800000>;
+		qcom,vdd-3.3-ch0-config = <3104000 3312000>;
+		qcom,icnss-vadc = <&pm8998_vadc>;
+		qcom,icnss-adc_tm = <&pm8998_adc_tm>;
+	};
+
+	tspp: msm_tspp@0c1e7000 {
+		compatible = "qcom,msm_tspp";
+		reg = <0x0c1e7000 0x200>, /* MSM_TSIF0_PHYS */
+		      <0x0c1e8000 0x200>, /* MSM_TSIF1_PHYS */
+		      <0x0c1e9000 0x1000>, /* MSM_TSPP_PHYS  */
+		      <0x0c1c4000 0x23000>; /* MSM_TSPP_BAM_PHYS */
+		reg-names = "MSM_TSIF0_PHYS",
+			"MSM_TSIF1_PHYS",
+			"MSM_TSPP_PHYS",
+			"MSM_TSPP_BAM_PHYS";
+		interrupts = <0 121 0>, /* TSIF_TSPP_IRQ */
+			<0 119 0>, /* TSIF0_IRQ */
+			<0 120 0>, /* TSIF1_IRQ */
+			<0 122 0>; /* TSIF_BAM_IRQ */
+		interrupt-names = "TSIF_TSPP_IRQ",
+			"TSIF0_IRQ",
+			"TSIF1_IRQ",
+			"TSIF_BAM_IRQ";
+
+		clock-names = "iface_clk", "ref_clk";
+		clocks = <&clock_gcc clk_gcc_tsif_ahb_clk>,
+			<&clock_gcc clk_gcc_tsif_ref_clk>;
+
+		qcom,msm-bus,name = "tsif";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<82 512 0 0>, /* No vote */
+				<82 512 12288 24576>;
+				/* Max. bandwidth, 2xTSIF, each max of 96Mbps */
+
+		pinctrl-names = "disabled",
+			"tsif0-mode1", "tsif0-mode2",
+			"tsif1-mode1", "tsif1-mode2",
+			"dual-tsif-mode1", "dual-tsif-mode2";
+
+		pinctrl-0 = <>;				/* disabled */
+		pinctrl-1 = <&tsif0_signals_active>;	/* tsif0-mode1 */
+		pinctrl-2 = <&tsif0_signals_active
+			&tsif0_sync_active>;		/* tsif0-mode2 */
+		pinctrl-3 = <&tsif1_signals_active>;	/* tsif1-mode1 */
+		pinctrl-4 = <&tsif1_signals_active
+			&tsif1_sync_active>;		/* tsif1-mode2 */
+		pinctrl-5 = <&tsif0_signals_active
+			&tsif1_signals_active>;		/* dual-tsif-mode1 */
+		pinctrl-6 = <&tsif0_signals_active
+			&tsif0_sync_active
+			&tsif1_signals_active
+			&tsif1_sync_active>;		/* dual-tsif-mode2 */
+	};
+
+	wil6210: qcom,wil6210 {
+		status = "disabled";
+		compatible = "qcom,wil6210";
+		qcom,pcie-parent = <&pcie0>;
+		qcom,wigig-en = <&tlmm 80 0>;
+		qcom,msm-bus,name = "wil6210";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<45 512 0 0>,
+			<45 512 600000 800000>; /* ~4.6Gbps (MCS12) */
+		qcom,use-ext-supply;
+		vdd-supply= <&pm8998_s7>;
+		vddio-supply= <&pm8998_s5>;
+		qcom,use-ext-clocks;
+		clocks = <&clock_gcc clk_rf_clk3>,
+			 <&clock_gcc clk_rf_clk3_pin>;
+		clock-names = "rf_clk3_clk", "rf_clk3_pin_clk";
+		qcom,smmu-support;
+		qcom,smmu-s1-en;
+		qcom,smmu-fast-map;
+		qcom,smmu-coherent;
+		qcom,smmu-mapping = <0x20000000 0xe0000000>;
+		qcom,keep-radio-on-during-sleep;
+	};
+
+	qcom,qsee_ipc_irq_bridge {
+		compatible = "qcom,qsee-ipc-irq-bridge";
+
+		qcom,qsee-ipc-irq-spss {
+			qcom,rx-irq-clr = <0x1d08008 0x4>;
+			qcom,rx-irq-clr-mask = <0x1>;
+			qcom,dev-name = "qsee_ipc_irq_spss";
+			interrupts = <0 349 4>;
+			label = "spss";
+		};
+	};
+};
+
+&clock_cpu {
+	lmh_dcvs0: qcom,limits-dcvs@0 {
+		compatible = "qcom,msm-hw-limits";
+		interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	lmh_dcvs1: qcom,limits-dcvs@1 {
+		compatible = "qcom,msm-hw-limits";
+		interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+	};
+};
+
+&gdsc_usb30 {
+	status = "ok";
+};
+
+&gdsc_pcie_0 {
+	status = "ok";
+};
+
+&gdsc_ufs {
+	status = "ok";
+};
+
+&gdsc_bimc_smmu {
+	clock-names = "bus_clk";
+	clocks = <&clock_mmss clk_mmss_bimc_smmu_axi_clk>;
+	proxy-supply = <&gdsc_bimc_smmu>;
+	qcom,proxy-consumer-enable;
+	status = "ok";
+};
+
+&gdsc_hlos1_vote_lpass_adsp {
+	status = "ok";
+};
+
+&gdsc_hlos1_vote_lpass_core {
+	status = "ok";
+};
+
+&gdsc_venus {
+	status = "ok";
+};
+
+&gdsc_venus_core0 {
+	status = "ok";
+	qcom,support-hw-trigger;
+};
+
+&gdsc_venus_core1 {
+	status = "ok";
+	qcom,support-hw-trigger;
+};
+
+&gdsc_camss_top {
+	status = "ok";
+};
+
+&gdsc_vfe0 {
+	parent-supply = <&gdsc_camss_top>;
+	status = "ok";
+};
+
+&gdsc_vfe1 {
+	parent-supply = <&gdsc_camss_top>;
+	status = "ok";
+};
+
+&gdsc_cpp {
+	parent-supply = <&gdsc_camss_top>;
+	qcom,support-hw-trigger;
+	status = "ok";
+};
+
+&gdsc_mdss {
+	proxy-supply = <&gdsc_mdss>;
+	qcom,proxy-consumer-enable;
+	status = "ok";
+};
+
+&gdsc_gpu_gx {
+	clock-names = "core_root_clk";
+	clocks = <&clock_gfx clk_gfx3d_clk_src>;
+	qcom,force-enable-root-clk;
+	parent-supply = <&gfx_vreg>;
+	status = "ok";
+};
+
+&gdsc_gpu_cx {
+	status = "ok";
+};
+
+#include "msm-pm8998.dtsi"
+#include "msm-pmi8998.dtsi"
+#include "msm-pm8005.dtsi"
+#include "msm-pm8998-rpm-regulator.dtsi"
+#include "msm8998-regulator.dtsi"
+
+#include "msm8998-pm.dtsi"
+#include "msm-arm-smmu-8998.dtsi"
+#include "msm-arm-smmu-impl-defs-8998.dtsi"
+#include "msm8998-ion.dtsi"
+#include "msm8998-camera.dtsi"
+#include "msm8998-vidc.dtsi"
+#include "msm8998-coresight.dtsi"
+#include "msm8998-bus.dtsi"
+#include "msm8998-gpu.dtsi"
+#include "msm8998-pinctrl.dtsi"
+#include "msm-audio-lpass.dtsi"
+#include "msm8998-mdss-pll.dtsi"
+#include "msm-rdbg.dtsi"
+#include "msm8998-blsp.dtsi"
+#include "msm8998-audio.dtsi"
+#include "msm8998-sde.dtsi"
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm8998-gpu.dtsi	2019-01-22 16:16:21.195225506 +0100
@@ -0,0 +1,277 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+
+	pil_gpu: qcom,kgsl-hyp {
+		compatible = "qcom,pil-tz-generic";
+		qcom,pas-id = <13>;
+		qcom,firmware-name = "a540_zap";
+	};
+
+	msm_bus: qcom,kgsl-busmon{
+		label = "kgsl-busmon";
+		compatible = "qcom,kgsl-busmon";
+	};
+
+	gpubw: qcom,gpubw {
+		compatible = "qcom,devbw";
+		governor = "bw_vbif";
+		qcom,src-dst-ports = <26 512>;
+		/*
+		 * active-only flag is used while registering the bus
+		 * governor.It helps release the bus vote when the CPU
+		 * subsystem is inactiv3
+		 */
+		qcom,active-only;
+		qcom,bw-tbl =
+			<     0 /*  off     */ >,
+			<   762 /*  100 MHz */ >,
+			<  1144 /*  150 MHz */ >,
+			<  1525 /*  200 MHz */ >,
+			<  2288 /*  300 MHz */ >,
+			<  3143 /*  412 MHz */ >,
+			<  4173 /*  547 MHz */ >,
+			<  5195 /*  681 MHz */ >,
+			<  5859 /*  768 MHz */ >,
+			<  7759 /*  1017 MHz */ >,
+			<  9887 /*  1296 MHz */ >,
+			<  11863 /*  1555 MHz */ >,
+			<  13763 /*  1804 MHz */ >;
+	};
+
+	msm_gpu: qcom,kgsl-3d0@5000000 {
+		label = "kgsl-3d0";
+		compatible = "qcom,kgsl-3d0", "qcom,kgsl-3d";
+		status = "ok";
+		reg = <0x5000000 0x40000>;
+		reg-names = "kgsl_3d0_reg_memory";
+		interrupts = <0 300 0>;
+		interrupt-names = "kgsl_3d0_irq";
+		qcom,id = <0>;
+
+		qcom,chipid = <0x05040000>;
+		qcom,gpu-efuse-leakage = <0x00070130 24>;
+		qcom,base-leakage-coefficient = <34>;
+		qcom,lm-limit = <6000>;
+
+		qcom,initial-pwrlevel = <4>;
+
+		qcom,idle-timeout = <80>; //<HZ/12>
+		qcom,no-nap;
+
+		qcom,highest-bank-bit = <15>;
+
+		qcom,snapshot-size = <1048576>; //bytes
+
+		qcom,gpu-qdss-stm = <0x161c0000 0x40000>; // base addr, size
+
+		qcom,gpu-qtimer = <0x17921000 0x1000>; // base addr, size
+
+		qcom,tsens-name = "tsens_tz_sensor12";
+
+		/* Avoid L2PC on big cluster CPUs (CPU 4,5,6,7) */
+		qcom,l2pc-cpu-mask = <0x000000f0>;
+
+		/* Quirks */
+		qcom,gpu-quirk-lmloadkill-disable;
+
+		/* DRM settings */
+		qcom,gpmu-tsens = <0x000c000d>;
+		qcom,max-power = <5448>;
+		qcom,gpmu-firmware = "a540_gpmu.fw2";
+		qcom,gpmu-version = <3 0>;
+		qcom,zap-shader = "a540_zap";
+
+		clocks = <&clock_gfx clk_gpucc_gfx3d_clk>,
+			<&clock_gcc clk_gcc_gpu_cfg_ahb_clk>,
+			<&clock_gpu clk_gpucc_rbbmtimer_clk>,
+			<&clock_gcc clk_gcc_bimc_gfx_clk>,
+			<&clock_gcc clk_gcc_gpu_bimc_gfx_clk>,
+			<&clock_gpu clk_gpucc_gfx3d_isense_clk>,
+			<&clock_gpu clk_gpucc_rbcpr_clk>,
+			<&clock_gcc clk_gcc_gpu_iref_clk>;
+
+		clock-names = "core_clk", "iface_clk", "rbbmtimer_clk",
+			"mem_clk", "mem_iface_clk", "isense_clk", "rbcpr_clk",
+			"iref_clk";
+
+		qcom,isense-clk-on-level = <1>;
+		/* Bus Scale Settings */
+		qcom,gpubw-dev = <&gpubw>;
+		qcom,bus-control;
+		qcom,msm-bus,name = "grp3d";
+		qcom,bus-width = <32>;
+		qcom,msm-bus,num-cases = <13>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<26 512 0 0>,
+
+				<26 512 0 800000>,      // 1 bus=100
+				<26 512 0 1200000>,     // 2 bus=150
+				<26 512 0 1600000>,     // 3 bus=200
+				<26 512 0 2400000>,     // 4 bus=300
+				<26 512 0 3296000>,     // 5 bus=412
+				<26 512 0 4376000>,     // 6 bus=547
+				<26 512 0 5448000>,     // 7 bus=681
+				<26 512 0 6144000>,     // 8 bus=768
+				<26 512 0 8136000>,     // 9 bus=1017
+				<26 512 0 10368000>,    // 10 bus=1296
+				<26 512 0 12440000>,    // 11 bus=1555
+				<26 512 0 14432000>;    // 12 bus=1804
+
+		/* GDSC regulator names */
+		regulator-names = "vddcx", "vdd";
+		/* GDSC oxili regulators */
+		vddcx-supply = <&gdsc_gpu_cx>;
+		vdd-supply = <&gdsc_gpu_gx>;
+
+		/* Trace bus */
+		coresight-name = "coresight-gfx";
+		coresight-atid = <3>;
+		port {
+			gfx_out_funnel_in1: endpoint {
+				remote-endpoint = <&funnel_in1_in_gfx>;
+			};
+		};
+
+		/* GPU Mempools */
+		qcom,gpu-mempools {
+			#address-cells= <1>;
+			#size-cells = <0>;
+			compatible = "qcom,gpu-mempools";
+
+			/* 4K Page Pool configuration */
+			qcom,gpu-mempool@0 {
+				reg = <0>;
+				qcom,mempool-page-size = <4096>;
+				qcom,mempool-reserved = <2048>;
+				qcom,mempool-allocate;
+			};
+			/* 8K Page Pool configuration */
+			qcom,gpu-mempool@1 {
+				reg = <1>;
+				qcom,mempool-page-size  = <8192>;
+				qcom,mempool-reserved = <1024>;
+				qcom,mempool-allocate;
+			};
+			/* 64K Page Pool configuration */
+			qcom,gpu-mempool@2 {
+				reg = <2>;
+				qcom,mempool-page-size  = <65536>;
+				qcom,mempool-reserved = <256>;
+			};
+			/* 1M Page Pool configuration */
+			qcom,gpu-mempool@3 {
+				reg = <3>;
+				qcom,mempool-page-size  = <1048576>;
+				qcom,mempool-reserved = <32>;
+			};
+		};
+
+		/* Power levels */
+		qcom,gpu-pwrlevels {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			compatible = "qcom,gpu-pwrlevels";
+
+			qcom,gpu-pwrlevel@0 {
+				reg = <0>;
+				qcom,gpu-freq = <650000000>;
+				qcom,bus-freq = <12>;
+				qcom,bus-min = <11>;
+				qcom,bus-max = <12>;
+			};
+
+
+			qcom,gpu-pwrlevel@1 {
+				reg = <1>;
+				qcom,gpu-freq = <504000000>;
+				qcom,bus-freq = <11>;
+				qcom,bus-min = <10>;
+				qcom,bus-max = <12>;
+			};
+
+			qcom,gpu-pwrlevel@2 {
+				reg = <2>;
+				qcom,gpu-freq = <403000000>;
+				qcom,bus-freq = <10>;
+				qcom,bus-min = <9>;
+				qcom,bus-max = <11>;
+			};
+
+			qcom,gpu-pwrlevel@3 {
+				reg = <3>;
+				qcom,gpu-freq = <332000000>;
+				qcom,bus-freq = <7>;
+				qcom,bus-min = <6>;
+				qcom,bus-max = <8>;
+			};
+
+			qcom,gpu-pwrlevel@4 {
+				reg = <4>;
+				qcom,gpu-freq = <251000000>;
+				qcom,bus-freq = <4>;
+				qcom,bus-min = <3>;
+				qcom,bus-max = <5>;
+			};
+
+			qcom,gpu-pwrlevel@5 {
+				reg = <5>;
+				qcom,gpu-freq = <171000000>;
+				qcom,bus-freq = <3>;
+				qcom,bus-min = <1>;
+				qcom,bus-max = <4>;
+			};
+
+			qcom,gpu-pwrlevel@6 {
+				reg = <6>;
+				qcom,gpu-freq = <27000000>;
+				qcom,bus-freq = <0>;
+				qcom,bus-min = <0>;
+				qcom,bus-max = <0>;
+			};
+		};
+
+	};
+
+	kgsl_msm_iommu: qcom,kgsl-iommu {
+		compatible = "qcom,kgsl-smmu-v2";
+
+		reg = <0x05040000 0x10000>;
+		qcom,protect = <0x40000 0x10000>;
+		qcom,micro-mmu-control = <0x6000>;
+
+		clocks =<&clock_gcc clk_gcc_gpu_cfg_ahb_clk>,
+			<&clock_gcc clk_gcc_bimc_gfx_clk>,
+			<&clock_gcc clk_gcc_gpu_bimc_gfx_clk>;
+
+		clock-names = "iface_clk", "mem_clk", "mem_iface_clk";
+
+		qcom,secure_align_mask = <0xfff>;
+		qcom,retention;
+		qcom,hyp_secure_alloc;
+
+		gfx3d_user: gfx3d_user {
+			compatible = "qcom,smmu-kgsl-cb";
+			label = "gfx3d_user";
+			iommus = <&kgsl_smmu 0>;
+			qcom,gpu-offset = <0x48000>;
+		};
+
+		gfx3d_secure: gfx3d_secure {
+			compatible = "qcom,smmu-kgsl-cb";
+			iommus = <&kgsl_smmu 2>;
+		};
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm8998-ion.dtsi	2019-01-22 16:16:21.195225506 +0100
@@ -0,0 +1,53 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	qcom,ion {
+		compatible = "qcom,msm-ion";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		system_heap: qcom,ion-heap@25 {
+			reg = <25>;
+			qcom,ion-heap-type = "SYSTEM";
+		};
+
+		qcom,ion-heap@22 { /* ADSP HEAP */
+			reg = <22>;
+			memory-region = <&adsp_mem>;
+			qcom,ion-heap-type = "DMA";
+		};
+
+		qcom,ion-heap@27 { /* QSEECOM HEAP */
+			reg = <27>;
+			memory-region = <&qseecom_mem>;
+			qcom,ion-heap-type = "DMA";
+		};
+
+		qcom,ion-heap@13 { /* SPSS HEAP */
+			reg = <13>;
+			memory-region = <&sp_mem>;
+			qcom,ion-heap-type = "DMA";
+		};
+
+		qcom,ion-heap@10 { /* SECURE DISPLAY HEAP */
+			reg = <10>;
+			memory-region = <&secure_display_memory>;
+			qcom,ion-heap-type = "HYP_CMA";
+		};
+
+		qcom,ion-heap@9 {
+			reg = <9>;
+			qcom,ion-heap-type = "SYSTEM_SECURE";
+		};
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm8998-mdss-pll.dtsi	2019-01-22 16:16:21.195225506 +0100
@@ -0,0 +1,173 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	mdss_dsi0_pll: qcom,mdss_dsi_pll@c994400 {
+		compatible = "qcom,mdss_dsi_pll_8998";
+		status = "ok";
+		label = "MDSS DSI 0 PLL";
+		cell-index = <0>;
+		#clock-cells = <1>;
+
+		reg = <0xc994a00 0x1c0>,
+		      <0xc994400 0x7c0>,
+		      <0x0c8c2300 0x8>;
+		reg-names = "pll_base", "phy_base", "gdsc_base";
+
+		gdsc-supply = <&gdsc_mdss>;
+
+		clocks = <&clock_mmss clk_mmss_mdss_ahb_clk>;
+		clock-names = "iface_clk";
+		clock-rate = <0>;
+		qcom,dsi-pll-ssc-en;
+		qcom,dsi-pll-ssc-mode = "down-spread";
+
+		qcom,platform-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,platform-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "gdsc";
+				qcom,supply-min-voltage = <0>;
+				qcom,supply-max-voltage = <0>;
+				qcom,supply-enable-load = <0>;
+				qcom,supply-disable-load = <0>;
+			};
+
+		};
+	};
+
+	mdss_dsi1_pll: qcom,mdss_dsi_pll@c996400 {
+		compatible = "qcom,mdss_dsi_pll_8998";
+		status = "ok";
+		label = "MDSS DSI 1 PLL";
+		cell-index = <1>;
+		#clock-cells = <1>;
+
+		reg = <0x0c996a00 0x1c0>,
+		      <0x0c996400 0x7c0>,
+		      <0x008c2300 0x8>;
+		reg-names = "pll_base", "phy_base", "gdsc_base";
+
+		gdsc-supply = <&gdsc_mdss>;
+
+		clocks = <&clock_mmss clk_mmss_mdss_ahb_clk>;
+		clock-names = "iface_clk";
+		clock-rate = <0>;
+		qcom,dsi-pll-ssc-en;
+		qcom,dsi-pll-ssc-mode = "down-spread";
+
+		qcom,platform-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,platform-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "gdsc";
+				qcom,supply-min-voltage = <0>;
+				qcom,supply-max-voltage = <0>;
+				qcom,supply-enable-load = <0>;
+				qcom,supply-disable-load = <0>;
+			};
+		};
+	};
+
+	mdss_dp_pll: qcom,mdss_dp_pll@c011000 {
+		compatible = "qcom,mdss_dp_pll_8998";
+		status = "ok";
+		label = "MDSS DP PLL";
+		cell-index = <0>;
+		#clock-cells = <1>;
+
+		reg = <0xc011c00 0x190>,
+		      <0xc011000 0x910>,
+		      <0x0c8c2300 0x8>;
+		reg-names = "pll_base", "phy_base", "gdsc_base";
+
+		gdsc-supply = <&gdsc_mdss>;
+
+		clocks = <&clock_mmss clk_mmss_mdss_ahb_clk>,
+			 <&clock_gcc clk_ln_bb_clk1>,
+			 <&clock_gcc clk_gcc_usb3_clkref_clk>;
+		clock-names = "iface_clk", "ref_clk_src", "ref_clk";
+		clock-rate = <0>;
+
+		qcom,platform-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,platform-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "gdsc";
+				qcom,supply-min-voltage = <0>;
+				qcom,supply-max-voltage = <0>;
+				qcom,supply-enable-load = <0>;
+				qcom,supply-disable-load = <0>;
+			};
+
+		};
+	};
+
+	mdss_hdmi_pll: qcom,mdss_hdmi_pll@0xc9a0600 {
+		compatible = "qcom,mdss_hdmi_pll_8998";
+		label = "MDSS HDMI PLL";
+		cell-index = <2>;
+		#clock-cells = <1>;
+
+		reg = <0xc9a0600 0xb10>,
+		      <0xc9a1200 0x0e4>,
+		      <0xc8c2300 0x8>;
+		reg-names = "pll_base", "phy_base", "gdsc_base";
+
+		gdsc-supply = <&gdsc_mdss>;
+		vdda-pll-supply = <&pm8998_l2>;
+		vdda-phy-supply = <&pm8998_l12>;
+
+		clocks = <&clock_mmss clk_mmss_mdss_ahb_clk>,
+			 <&clock_gcc clk_gcc_hdmi_clkref_clk>,
+			 <&clock_gcc clk_ln_bb_clk1>;
+		clock-names = "iface_clk", "ref_clk", "ref_clk_src";
+		clock-rate = <0>;
+
+		qcom,platform-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,platform-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "gdsc";
+				qcom,supply-min-voltage = <0>;
+				qcom,supply-max-voltage = <0>;
+				qcom,supply-enable-load = <0>;
+				qcom,supply-disable-load = <0>;
+			};
+			qcom,platform-supply-entry@1 {
+				reg = <1>;
+				qcom,supply-name = "vdda-pll";
+				qcom,supply-min-voltage = <1200000>;
+				qcom,supply-max-voltage = <1200000>;
+				qcom,supply-enable-load = <14200>;
+				qcom,supply-disable-load = <1>;
+			};
+
+			qcom,platform-supply-entry@2 {
+				reg = <2>;
+				qcom,supply-name = "vdda-phy";
+				qcom,supply-min-voltage = <1800000>;
+				qcom,supply-max-voltage = <1800000>;
+				qcom,supply-enable-load = <13100>;
+				qcom,supply-disable-load = <4>;
+			};
+		};
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm8998-pinctrl.dtsi	2019-01-22 16:16:21.195225506 +0100
@@ -0,0 +1,2460 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	tlmm: pinctrl@03400000 {
+		compatible = "qcom,msm8998-pinctrl";
+		reg = <0x03400000 0xc00000>;
+		interrupts = <0 208 0>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+
+		uart_console_active: uart_console_active {
+			mux {
+				pins = "gpio4", "gpio5";
+				function = "blsp_uart8_a";
+			};
+
+			config {
+				pins = "gpio4", "gpio5";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
+		/* HS UART CONFIGURATION */
+		blsp1_uart1_active: blsp1_uart1_active {
+			mux {
+				pins = "gpio0", "gpio1", "gpio2", "gpio3";
+				function = "blsp_uart1_a";
+			};
+
+			config {
+				pins = "gpio0", "gpio1", "gpio2", "gpio3";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
+		blsp1_uart1_sleep: blsp1_uart1_sleep {
+			mux {
+				pins = "gpio0", "gpio1", "gpio2", "gpio3";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio0", "gpio1", "gpio2", "gpio3";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
+		blsp1_uart2_active: blsp1_uart2_active {
+			mux {
+				pins = "gpio31", "gpio34", "gpio33", "gpio32";
+				function = "blsp_uart2_a";
+			};
+
+			config {
+				pins = "gpio31", "gpio34", "gpio33", "gpio32";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
+		blsp1_uart2_sleep: blsp1_uart2_sleep {
+			mux {
+				pins = "gpio31", "gpio34", "gpio33", "gpio32";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio31", "gpio34", "gpio33", "gpio32";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
+		blsp1_uart3: blsp1_uart3 {
+			blsp1_uart3_tx_active: blsp1_uart3_tx_active {
+				mux {
+					pins = "gpio45";
+					function = "blsp_uart3_a";
+				};
+
+				config {
+					pins = "gpio45";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			blsp1_uart3_tx_sleep: blsp1_uart3_tx_sleep {
+				mux {
+					pins = "gpio45";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio45";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+
+			blsp1_uart3_rxcts_active: blsp1_uart3_rxcts_active {
+				mux {
+					pins = "gpio46", "gpio47";
+					function = "blsp_uart3_a";
+				};
+
+				config {
+					pins = "gpio46", "gpio47";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			blsp1_uart3_rxcts_sleep: blsp1_uart3_rxcts_sleep {
+				mux {
+					pins = "gpio46", "gpio47";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio46", "gpio47";
+					drive-strength = <2>;
+					bias-no-pull;
+				};
+			};
+
+			blsp1_uart3_rfr_active: blsp1_uart3_rfr_active {
+				mux {
+					pins = "gpio48";
+					function = "blsp_uart3_a";
+				};
+
+				config {
+					pins = "gpio48";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			blsp1_uart3_rfr_sleep: blsp1_uart3_rfr_sleep {
+				mux {
+					pins = "gpio48";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio48";
+					drive-strength = <2>;
+					bias-no-pull;
+				};
+			};
+		};
+
+		blsp2_uart1_active: blsp2_uart1_active {
+			mux {
+				pins = "gpio53", "gpio54", "gpio55", "gpio56";
+				function = "blsp_uart7_a";
+			};
+
+			config {
+				pins = "gpio53", "gpio54", "gpio55", "gpio56";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
+		blsp2_uart1_sleep: blsp2_uart1_sleep {
+			mux {
+				pins = "gpio53", "gpio54", "gpio55", "gpio56";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio53", "gpio54", "gpio55", "gpio56";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
+		blsp2_uart2_active: blsp2_uart2_active {
+			mux {
+				pins = "gpio4", "gpio5", "gpio6", "gpio7";
+				function = "blsp_uart8_a";
+			};
+
+			config {
+				pins = "gpio4", "gpio5", "gpio6", "gpio7";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
+		blsp2_uart2_sleep: blsp2_uart2_sleep {
+			mux {
+				pins = "gpio4", "gpio5", "gpio6", "gpio7";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio4", "gpio5", "gpio6", "gpio7";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
+		blsp2_uart3_active: blsp2_uart3_active {
+			mux {
+				pins = "gpio49", "gpio50", "gpio51", "gpio52";
+				function = "blsp_uart9_a";
+			};
+
+			config {
+				pins = "gpio49", "gpio50", "gpio51", "gpio52";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
+		blsp2_uart3_sleep: blsp2_uart3_sleep {
+			mux {
+				pins = "gpio49", "gpio50", "gpio51", "gpio52";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio49", "gpio50", "gpio51", "gpio52";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
+		/* PCIE CONFIGURATION */
+
+		pcie0 {
+			pcie0_clkreq_default: pcie0_clkreq_default {
+				mux {
+					pins = "gpio36";
+					function = "pci_e0";
+				};
+
+				config {
+					pins = "gpio36";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+
+			pcie0_perst_default: pcie0_perst_default {
+				mux {
+					pins = "gpio35";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio35";
+					drive-strength = <2>;
+					bias-pull-down;
+				};
+			};
+
+			pcie0_wake_default: pcie0_wake_default {
+				mux {
+					pins = "gpio37";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio37";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+
+			pcie0_wake_sleep: pcie0_wake_sleep {
+				mux {
+					pins = "gpio37";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio37";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		/* I2C CONFIGURATION */
+		i2c_1 {
+			i2c_1_active: i2c_1_active {
+				mux {
+					pins = "gpio2", "gpio3";
+					function = "blsp_i2c1";
+				};
+
+				config {
+					pins = "gpio2", "gpio3";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			i2c_1_sleep: i2c_1_sleep {
+				mux {
+					pins = "gpio2", "gpio3";
+					function = "blsp_i2c1";
+				};
+
+				config {
+					pins = "gpio2", "gpio3";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		i2c_2 {
+			i2c_2_active: i2c_2_active {
+				mux {
+					pins = "gpio32", "gpio33";
+					function = "blsp_i2c2";
+				};
+
+				config {
+					pins = "gpio32", "gpio33";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			i2c_2_sleep: i2c_2_sleep {
+				mux {
+					pins = "gpio32", "gpio33";
+					function = "blsp_i2c2";
+				};
+
+				config {
+					pins = "gpio32", "gpio33";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		i2c_3 {
+			i2c_3_active: i2c_3_active {
+				mux {
+					pins = "gpio47", "gpio48";
+					function = "blsp_i2c3";
+				};
+
+				config {
+					pins = "gpio47", "gpio48";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			i2c_3_sleep: i2c_3_sleep {
+				mux {
+					pins = "gpio47", "gpio48";
+					function = "blsp_i2c3";
+				};
+
+				config {
+					pins = "gpio47", "gpio48";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		i2c_4 {
+			i2c_4_active: i2c_4_active {
+				mux {
+					pins = "gpio10", "gpio11";
+					function = "blsp_i2c4";
+				};
+
+				config {
+					pins = "gpio10", "gpio11";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			i2c_4_sleep: i2c_4_sleep {
+				mux {
+					pins = "gpio10", "gpio11";
+					function = "blsp_i2c4";
+				};
+
+				config {
+					pins = "gpio10", "gpio11";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		i2c_5 {
+			i2c_5_active: i2c_5_active {
+				mux {
+					pins = "gpio87", "gpio88";
+					function = "blsp_i2c5";
+				};
+
+				config {
+					pins = "gpio87", "gpio88";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			i2c_5_sleep: i2c_5_sleep {
+				mux {
+					pins = "gpio87", "gpio88";
+					function = "blsp_i2c5";
+				};
+
+				config {
+					pins = "gpio87", "gpio88";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		i2c_6 {
+			i2c_6_active: i2c_6_active {
+				mux {
+					pins = "gpio43", "gpio44";
+					function = "blsp_i2c6";
+				};
+
+				config {
+					pins = "gpio43", "gpio44";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			i2c_6_sleep: i2c_6_sleep {
+				mux {
+					pins = "gpio43", "gpio44";
+					function = "blsp_i2c6";
+				};
+
+				config {
+					pins = "gpio43", "gpio44";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		i2c_7 {
+			i2c_7_active: i2c_7_active {
+				mux {
+					pins = "gpio55", "gpio56";
+					function = "blsp_i2c7";
+				};
+
+				config {
+					pins = "gpio55", "gpio56";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			i2c_7_sleep: i2c_7_sleep {
+				mux {
+					pins = "gpio55", "gpio56";
+					function = "blsp_i2c7";
+				};
+
+				config {
+					pins = "gpio55", "gpio56";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		i2c_8 {
+			i2c_8_active: i2c_8_active {
+				mux {
+					pins = "gpio6", "gpio7";
+					function = "blsp_i2c8";
+				};
+
+				config {
+					pins = "gpio6", "gpio7";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			i2c_8_sleep: i2c_8_sleep {
+				mux {
+					pins = "gpio6", "gpio7";
+					function = "blsp_i2c8";
+				};
+
+				config {
+					pins = "gpio6", "gpio7";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		i2c_9 {
+			i2c_9_active: i2c_9_active {
+				mux {
+					pins = "gpio51", "gpio52";
+					function = "blsp_i2c9";
+				};
+
+				config {
+					pins = "gpio51", "gpio52";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			i2c_9_sleep: i2c_9_sleep {
+				mux {
+					pins = "gpio51", "gpio52";
+					function = "blsp_i2c9";
+				};
+
+				config {
+					pins = "gpio51", "gpio52";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		i2c_10 {
+			i2c_10_active: i2c_10_active {
+				mux {
+					pins = "gpio67", "gpio68";
+					function = "blsp_i2c10";
+				};
+
+				config {
+					pins = "gpio67", "gpio68";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			i2c_10_sleep: i2c_10_sleep {
+				mux {
+					pins = "gpio67", "gpio68";
+					function = "blsp_i2c10";
+				};
+
+				config {
+					pins = "gpio67", "gpio68";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		i2c_11 {
+			i2c_11_active: i2c_11_active {
+				mux {
+					pins = "gpio60", "gpio61";
+					function = "blsp_i2c11";
+				};
+
+				config {
+					pins = "gpio60", "gpio61";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			i2c_11_sleep: i2c_11_sleep {
+				mux {
+					pins = "gpio60", "gpio61";
+					function = "blsp_i2c11";
+				};
+
+				config {
+					pins = "gpio60", "gpio61";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		i2c_12 {
+			i2c_12_active: i2c_12_active {
+				mux {
+					pins = "gpio83", "gpio84";
+					function = "blsp_i2c12";
+				};
+
+				config {
+					pins = "gpio83", "gpio84";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			i2c_12_sleep: i2c_12_sleep {
+				mux {
+					pins = "gpio83", "gpio84";
+					function = "blsp_i2c12";
+				};
+
+				config {
+					pins = "gpio83", "gpio84";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		/* SPI CONFIGURATION */
+
+		spi_1 {
+			spi_1_active: spi_1_active {
+				mux {
+					pins = "gpio0", "gpio1",
+							"gpio2", "gpio3";
+					function = "blsp_spi1";
+				};
+
+				config {
+					pins = "gpio0", "gpio1",
+							"gpio2", "gpio3";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			spi_1_sleep: spi_1_sleep {
+				mux {
+					pins = "gpio0", "gpio1",
+							"gpio2", "gpio3";
+					function = "blsp_spi1";
+				};
+
+				config {
+					pins = "gpio0", "gpio1",
+							"gpio2", "gpio3";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			spi_1b_active: spi_1b_active {
+				mux {
+					pins = "gpio23", "gpio28";
+					function = "blsp1_spi_b";
+				};
+
+				config {
+					pins = "gpio23", "gpio28";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			spi_1b_sleep: spi_1b_sleep {
+				mux {
+					pins = "gpio23", "gpio28";
+					function = "blsp1_spi_b";
+				};
+
+				config {
+					pins = "gpio23", "gpio28";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		spi_2 {
+			spi_2_active: spi_2_active {
+				mux {
+					pins = "gpio31", "gpio34",
+							"gpio32", "gpio33";
+					function = "blsp_spi2";
+				};
+
+				config {
+					pins = "gpio31", "gpio34",
+							"gpio32", "gpio33";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			spi_2_sleep: spi_2_sleep {
+				mux {
+					pins = "gpio31", "gpio34",
+							"gpio32", "gpio33";
+					function = "blsp_spi2";
+				};
+
+				config {
+					pins = "gpio31", "gpio34",
+							"gpio32", "gpio33";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		spi_3 {
+			spi_3_active: spi_3_active {
+				mux {
+					pins = "gpio45", "gpio46",
+							"gpio47", "gpio48";
+					function = "blsp_spi3";
+				};
+
+				config {
+					pins = "gpio45", "gpio46",
+							"gpio47", "gpio48";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			spi_3_sleep: spi_3_sleep {
+				mux {
+					pins = "gpio45", "gpio46",
+							"gpio47", "gpio48";
+					function = "blsp_spi3";
+				};
+
+				config {
+					pins = "gpio45", "gpio46",
+							"gpio47", "gpio48";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		spi_4 {
+			spi_4_active: spi_4_active {
+				mux {
+					pins = "gpio8", "gpio9",
+							"gpio10", "gpio11";
+					function = "blsp_spi4";
+				};
+
+				config {
+					pins = "gpio8", "gpio9",
+							"gpio10", "gpio11";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			spi_4_sleep: spi_4_sleep {
+				mux {
+					pins = "gpio8", "gpio9",
+							"gpio10", "gpio11";
+					function = "blsp_spi4";
+				};
+
+				config {
+					pins = "gpio8", "gpio9",
+							"gpio10", "gpio11";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		spi_5 {
+			spi_5_active: spi_5_active {
+				mux {
+					pins = "gpio85", "gpio86",
+							"gpio87", "gpio88";
+					function = "blsp_spi5";
+				};
+
+				config {
+					pins = "gpio85", "gpio86",
+							"gpio87", "gpio88";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			spi_5_sleep: spi_5_sleep {
+				mux {
+					pins = "gpio85", "gpio86",
+							"gpio87", "gpio88";
+					function = "blsp_spi5";
+				};
+
+				config {
+					pins = "gpio85", "gpio86",
+							"gpio87", "gpio88";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		spi_6 {
+			spi_6_active: spi_6_active {
+				mux {
+					pins = "gpio41", "gpio42",
+							"gpio43", "gpio44";
+					function = "blsp_spi6";
+				};
+
+				config {
+					pins = "gpio41", "gpio42",
+							"gpio43", "gpio44";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			spi_6_sleep: spi_6_sleep {
+				mux {
+					pins = "gpio41", "gpio42",
+							"gpio43", "gpio44";
+					function = "blsp_spi6";
+				};
+
+				config {
+					pins = "gpio41", "gpio42",
+							"gpio43", "gpio44";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		spi_7 {
+			spi_7_active: spi_7_active {
+				mux {
+					pins = "gpio53", "gpio54",
+							"gpio55", "gpio56";
+					function = "blsp_spi7";
+				};
+
+				config {
+					pins = "gpio53", "gpio54",
+							"gpio55", "gpio56";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			spi_7_sleep: spi_7_sleep {
+				mux {
+					pins = "gpio53", "gpio54",
+							"gpio55", "gpio56";
+					function = "blsp_spi7";
+				};
+
+				config {
+					pins = "gpio53", "gpio54",
+							"gpio55", "gpio56";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		spi_8 {
+			spi_8_active: spi_8_active {
+				mux {
+					pins = "gpio4", "gpio5",
+							"gpio6", "gpio7";
+					function = "blsp_spi8";
+				};
+
+				config {
+					pins = "gpio4", "gpio5",
+							"gpio6", "gpio7";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			spi_8_sleep: spi_8_sleep {
+				mux {
+					pins = "gpio4", "gpio5",
+							"gpio6", "gpio7";
+					function = "blsp_spi8";
+				};
+
+				config {
+					pins = "gpio4", "gpio5",
+							"gpio6", "gpio7";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		spi_9 {
+			spi_9_active: spi_9_active {
+				mux {
+					pins = "gpio49", "gpio50",
+							"gpio51", "gpio52";
+					function = "blsp_spi9";
+				};
+
+				config {
+					pins = "gpio49", "gpio50",
+							"gpio51", "gpio52";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			spi_9_sleep: spi_9_sleep {
+				mux {
+					pins = "gpio49", "gpio50",
+							"gpio51", "gpio52";
+					function = "blsp_spi9";
+				};
+
+				config {
+					pins = "gpio49", "gpio50",
+							"gpio51", "gpio52";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		spi_10 {
+			spi_10_active: spi_10_active {
+				mux {
+					pins = "gpio65", "gpio66",
+							"gpio67", "gpio68";
+					function = "blsp_spi10";
+				};
+
+				config {
+					pins = "gpio65", "gpio66",
+							"gpio67", "gpio68";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			spi_10_sleep: spi_10_sleep {
+				mux {
+					pins = "gpio65", "gpio66",
+							"gpio67", "gpio68";
+					function = "blsp_spi10";
+				};
+
+				config {
+					pins = "gpio65", "gpio66",
+							"gpio67", "gpio68";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		spi_11 {
+			spi_11_active: spi_11_active {
+				mux {
+					pins = "gpio58", "gpio59",
+							"gpio60", "gpio61";
+					function = "blsp_spi11";
+				};
+
+				config {
+					pins = "gpio58", "gpio59",
+							"gpio60", "gpio61";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			spi_11_sleep: spi_11_sleep {
+				mux {
+					pins = "gpio58", "gpio59",
+							"gpio60", "gpio61";
+					function = "blsp_spi11";
+				};
+
+				config {
+					pins = "gpio58", "gpio59",
+							"gpio60", "gpio61";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		spi_12 {
+			spi_12_active: spi_12_active {
+				mux {
+					pins = "gpio81", "gpio82",
+							"gpio83", "gpio84";
+					function = "blsp_spi12";
+				};
+
+				config {
+					pins = "gpio81", "gpio82",
+							"gpio83", "gpio84";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			spi_12_sleep: spi_12_sleep {
+				mux {
+					pins = "gpio81", "gpio82",
+							"gpio83", "gpio84";
+					function = "blsp_spi12";
+				};
+
+				config {
+					pins = "gpio81", "gpio82",
+							"gpio83", "gpio84";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* CAMERA CONFIGURATION */
+
+		cci0_active: cci0_active {
+			mux {
+				/* CLK, DATA */
+				pins = "gpio17","gpio18"; // Only 2
+				function = "cci_i2c";
+			};
+
+			config {
+				pins = "gpio17","gpio18";
+				bias-pull-up; /* PULL UP*/
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cci0_suspend: cci0_suspend {
+			mux {
+				/* CLK, DATA */
+				pins = "gpio17","gpio18";
+				function = "cci_i2c";
+			};
+
+			config {
+				pins = "gpio17","gpio18";
+				bias-pull-down; /* PULL DOWN */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cci1_active: cci1_active {
+			mux {
+				/* CLK, DATA */
+				pins = "gpio19","gpio20";
+				function = "cci_i2c";
+			};
+
+			config {
+				pins = "gpio19","gpio20";
+				bias-pull-up; /* PULL UP*/
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cci1_suspend: cci1_suspend {
+			mux {
+				/* CLK, DATA */
+				pins = "gpio19","gpio20";
+				function = "cci_i2c";
+			};
+
+			config {
+				pins = "gpio19","gpio20";
+				bias-pull-down; /* PULL DOWN */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		/* MDSS CONFIGURATION */
+
+		mdss_dp_aux_active: mdss_dp_aux_active {
+			mux {
+				pins = "gpio77", "gpio78";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio77", "gpio78";
+				bias-disable = <0>; /* no pull */
+				drive-strength = <8>;
+			};
+		};
+
+		mdss_dp_aux_suspend: mdss_dp_aux_suspend {
+			mux {
+				pins = "gpio77", "gpio78";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio77", "gpio78";
+				bias-pull-down;
+				drive-strength = <2>;
+			};
+		};
+
+		mdss_dp_usbplug_cc_active: mdss_dp_usbplug_cc_active {
+			mux {
+				pins = "gpio38";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio38";
+				bias-disable;
+				drive-strength = <16>;
+			};
+		};
+
+		mdss_dp_usbplug_cc_suspend: mdss_dp_usbplug_cc_suspend {
+			mux {
+				pins = "gpio38";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio38";
+				bias-pull-down;
+				drive-strength = <2>;
+			};
+		};
+
+		mdss_dp_hpd_active: mdss_dp_hpd_active {
+			mux {
+				pins = "gpio34";
+				function = "edp_hot";
+			};
+
+			config {
+				pins = "gpio34";
+				bias-pull-down;
+				drive-strength = <16>;
+			};
+		};
+
+		mdss_dp_hpd_suspend: mdss_dp_hpd_suspend {
+			mux {
+				pins = "gpio34";
+				function = "edp_hot";
+			};
+
+			config {
+				pins = "gpio34";
+				bias-pull-down;
+				drive-strength = <2>;
+			};
+		};
+
+		mdss_hdmi_5v_active: mdss_hdmi_5v_active {
+			mux {
+				pins = "gpio133";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio133";
+				bias-pull-up;
+				drive-strength = <16>;
+			};
+		};
+
+		mdss_hdmi_5v_suspend: mdss_hdmi_5v_suspend {
+			mux {
+				pins = "gpio133";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio133";
+				bias-pull-down;
+				drive-strength = <2>;
+			};
+		};
+
+		mdss_hdmi_hpd_active: mdss_hdmi_hpd_active {
+			mux {
+				pins = "gpio34";
+				function = "hdmi_hot";
+			};
+
+			config {
+				pins = "gpio34";
+				bias-pull-down;
+				drive-strength = <16>;
+			};
+		};
+
+		mdss_hdmi_hpd_suspend: mdss_hdmi_hpd_suspend {
+			mux {
+				pins = "gpio34";
+				function = "hdmi_hot";
+			};
+
+			config {
+				pins = "gpio34";
+				bias-pull-down;
+				drive-strength = <2>;
+			};
+		};
+
+		mdss_hdmi_ddc_active: mdss_hdmi_ddc_active {
+			mux {
+				pins = "gpio32", "gpio33";
+				function = "hdmi_ddc";
+			};
+
+			config {
+				pins = "gpio32", "gpio33";
+				drive-strength = <2>;
+				bias-pull-up;
+			};
+		};
+
+		mdss_hdmi_ddc_suspend: mdss_hdmi_ddc_suspend {
+			mux {
+				pins = "gpio32", "gpio33";
+				function = "hdmi_ddc";
+			};
+
+			config {
+				pins = "gpio32", "gpio33";
+				drive-strength = <2>;
+				bias-pull-up;
+			};
+		};
+
+		mdss_hdmi_cec_active: mdss_hdmi_cec_active {
+			mux {
+				pins = "gpio31";
+				function = "hdmi_cec";
+			};
+
+			config {
+				pins = "gpio31";
+				drive-strength = <2>;
+				bias-pull-up;
+			};
+		};
+
+		mdss_hdmi_cec_suspend: mdss_hdmi_cec_suspend {
+			mux {
+				pins = "gpio31";
+				function = "hdmi_cec";
+			};
+
+			config {
+				pins = "gpio31";
+				drive-strength = <2>;
+				bias-pull-up;
+			};
+		};
+
+		/* UFS CONFIGURATION */
+
+		ufs_dev_reset_assert: ufs_dev_reset_assert {
+			config {
+				pins = "ufs_reset";
+				bias-pull-down;		/* default: pull down */
+				/*
+				 * UFS_RESET driver strengths are having
+				 * different values/steps compared to typical
+				 * GPIO drive strengths.
+				 *
+				 * Following table clarifies:
+				 *
+				 * HDRV value | UFS_RESET | Typical GPIO
+				 *   (dec)    |   (mA)    |    (mA)
+				 *     0      |   0.8     |    2
+				 *     1      |   1.55    |    4
+				 *     2      |   2.35    |    6
+				 *     3      |   3.1     |    8
+				 *     4      |   3.9     |    10
+				 *     5      |   4.65    |    12
+				 *     6      |   5.4     |    14
+				 *     7      |   6.15    |    16
+				 *
+				 * POR value for UFS_RESET HDRV is 3 which means
+				 * 3.1mA and we want to use that. Hence just
+				 * specify 8mA to "drive-strength" binding and
+				 * that should result into writing 3 to HDRV
+				 * field.
+				 */
+				drive-strength = <8>;	/* default: 3.1 mA */
+				output-low; /* active low reset */
+			};
+		};
+
+		ufs_dev_reset_deassert: ufs_dev_reset_deassert {
+			config {
+				pins = "ufs_reset";
+				bias-pull-down;		/* default: pull down */
+				/*
+				 * default: 3.1 mA
+				 * check comments under ufs_dev_reset_assert
+				 */
+				drive-strength = <8>;
+				output-high; /* active low reset */
+			};
+		};
+
+		/* SHDCI CONFIGURATION */
+
+		sdc2_clk_on: sdc2_clk_on {
+			config {
+				pins = "sdc2_clk";
+				bias-disable;		/* NO pull */
+				drive-strength = <16>;	/* 16 MA */
+			};
+		};
+
+		sdc2_clk_off: sdc2_clk_off {
+			config {
+				pins = "sdc2_clk";
+				bias-disable;		/* NO pull */
+				drive-strength = <2>;	/* 2 MA */
+			};
+		};
+
+		sdc2_cmd_on: sdc2_cmd_on {
+			config {
+				pins = "sdc2_cmd";
+				bias-pull-up;		/* pull up */
+				drive-strength = <10>;	/* 10 MA */
+			};
+		};
+
+		sdc2_cmd_off: sdc2_cmd_off {
+			config {
+				pins = "sdc2_cmd";
+				bias-pull-up;		/* pull up */
+				drive-strength = <2>;	/* 2 MA */
+			};
+		};
+
+		sdc2_data_on: sdc2_data_on {
+			config {
+				pins = "sdc2_data";
+				bias-pull-up;		/* pull up */
+				drive-strength = <10>;	/* 10 MA */
+			};
+		};
+
+		sdc2_data_off: sdc2_data_off {
+			config {
+				pins = "sdc2_data";
+				bias-pull-up;		/* pull up */
+				drive-strength = <2>;	/* 2 MA */
+			};
+		};
+
+		sdc2_cd_on: sdc2_cd_on {
+			mux {
+				pins = "gpio95";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio95";
+				bias-pull-up;           /* pull up */
+				drive-strength = <2>;   /* 2 MA */
+			};
+		};
+
+		sdc2_cd_off: sdc2_cd_off {
+			mux {
+				pins = "gpio95";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio95";
+				bias-pull-up;           /* pull up */
+				drive-strength = <2>;   /* 2 MA */
+			};
+
+		};
+
+		/* CORESIGHT */
+
+		trigout_a: trigout_a {
+			mux {
+				pins = "gpio58";
+				function = "qdss_cti1_a";
+			};
+
+			config {
+				pins = "gpio58";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
+		/* TSPP CONFIGURATION */
+
+		tsif0_signals_active: tsif0_signals_active {
+			tsif1_clk {
+				pins = "gpio89"; /* TSIF0 CLK */
+				function = "tsif1_clk";
+			};
+			tsif1_en {
+				pins = "gpio90"; /* TSIF0 Enable */
+				function = "tsif1_en";
+			};
+			tsif1_data {
+				pins = "gpio91"; /* TSIF0 DATA */
+				function = "tsif1_data";
+			};
+			signals_cfg {
+				pins = "gpio89", "gpio90", "gpio91";
+				drive_strength = <2>;	/* 2 mA */
+				bias-pull-down;		/* pull down */
+			};
+		};
+
+		tsif0_sync_active: tsif0_sync_active {
+			tsif1_sync {
+				pins = "gpio9";	/* TSIF0 SYNC */
+				function = "tsif1_sync";
+				drive_strength = <2>;	/* 2 mA */
+				bias-pull-down;		/* pull down */
+			};
+		};
+
+		tsif1_signals_active: tsif1_signals_active {
+			tsif2_clk {
+				pins = "gpio93"; /* TSIF1 CLK */
+				function = "tsif2_clk";
+			};
+			tsif2_en {
+				pins = "gpio94"; /* TSIF1 Enable */
+				function = "tsif2_en";
+			};
+			tsif2_data {
+				pins = "gpio95"; /* TSIF1 DATA */
+				function = "tsif2_data";
+			};
+			signals_cfg {
+				pins = "gpio93", "gpio94", "gpio95";
+				drive_strength = <2>;	/* 2 mA */
+				bias-pull-down;		/* pull down */
+			};
+		};
+
+		tsif1_sync_active: tsif1_sync_active {
+			tsif2_sync {
+				pins = "gpio96";	/* TSIF1 SYNC */
+				function = "tsif2_sync";
+				drive_strength = <2>;	/* 2 mA */
+				bias-pull-down;		/* pull down */
+			};
+		};
+
+		/* DIGITAL AUDIO CONFIGURATION */
+
+		pri_aux_pcm_clk {
+			pri_aux_pcm_clk_sleep: pri_aux_pcm_clk_sleep {
+				mux {
+					pins = "gpio65";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio65";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			pri_aux_pcm_clk_active: pri_aux_pcm_clk_active {
+				mux {
+					pins = "gpio65";
+					function = "pri_mi2s";
+				};
+
+				config {
+					pins = "gpio65";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+					output-high;
+				};
+			};
+		};
+
+		pri_aux_pcm_sync {
+			pri_aux_pcm_sync_sleep: pri_aux_pcm_sync_sleep {
+				mux {
+					pins = "gpio66";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio66";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			pri_aux_pcm_sync_active: pri_aux_pcm_sync_active {
+				mux {
+					pins = "gpio66";
+					function = "pri_mi2s_ws";
+				};
+
+				config {
+					pins = "gpio66";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+					output-high;
+				};
+			};
+		};
+
+		pri_aux_pcm_din {
+			pri_aux_pcm_din_sleep: pri_aux_pcm_din_sleep {
+				mux {
+					pins = "gpio67";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio67";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			pri_aux_pcm_din_active: pri_aux_pcm_din_active {
+				mux {
+					pins = "gpio67";
+					function = "pri_mi2s";
+				};
+
+				config {
+					pins = "gpio67";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		pri_aux_pcm_dout {
+			pri_aux_pcm_dout_sleep: pri_aux_pcm_dout_sleep {
+				mux {
+					pins = "gpio68";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio68";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			pri_aux_pcm_dout_active: pri_aux_pcm_dout_active {
+				mux {
+					pins = "gpio68";
+					function = "pri_mi2s";
+				};
+
+				config {
+					pins = "gpio68";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		sec_aux_pcm {
+			sec_aux_pcm_sleep: sec_aux_pcm_sleep {
+				mux {
+					pins = "gpio80", "gpio81";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio80", "gpio81";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			sec_aux_pcm_active: sec_aux_pcm_active {
+				mux {
+					pins = "gpio80", "gpio81";
+					function = "sec_mi2s";
+				};
+
+				config {
+					pins = "gpio80", "gpio81";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		sec_aux_pcm_din {
+			sec_aux_pcm_din_sleep: sec_aux_pcm_din_sleep {
+				mux {
+					pins = "gpio82";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio82";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			sec_aux_pcm_din_active: sec_aux_pcm_din_active {
+				mux {
+					pins = "gpio82";
+					function = "sec_mi2s";
+				};
+
+				config {
+					pins = "gpio82";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		sec_aux_pcm_dout {
+			sec_aux_pcm_dout_sleep: sec_aux_pcm_dout_sleep {
+				mux {
+					pins = "gpio83";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio83";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			sec_aux_pcm_dout_active: sec_aux_pcm_dout_active {
+				mux {
+					pins = "gpio83";
+					function = "sec_mi2s";
+				};
+
+				config {
+					pins = "gpio83";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		tert_aux_pcm {
+			tert_aux_pcm_sleep: tert_aux_pcm_sleep {
+				mux {
+					pins = "gpio75", "gpio76";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio75", "gpio76";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			tert_aux_pcm_active: tert_aux_pcm_active {
+				mux {
+					pins = "gpio75", "gpio76";
+					function = "ter_mi2s";
+				};
+
+				config {
+					pins = "gpio75", "gpio76";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+					output-high;
+				};
+			};
+		};
+
+		tert_aux_pcm_din {
+			tert_aux_pcm_din_sleep: tert_aux_pcm_din_sleep {
+				mux {
+					pins = "gpio77";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio77";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			tert_aux_pcm_din_active: tert_aux_pcm_din_active {
+				mux {
+					pins = "gpio77";
+					function = "ter_mi2s";
+				};
+
+				config {
+					pins = "gpio77";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		tert_aux_pcm_dout {
+			tert_aux_pcm_dout_sleep: tert_aux_pcm_dout_sleep {
+				mux {
+					pins = "gpio78";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio78";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			tert_aux_pcm_dout_active: tert_aux_pcm_dout_active {
+				mux {
+					pins = "gpio78";
+					function = "ter_mi2s";
+				};
+
+				config {
+					pins = "gpio78";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		quat_aux_pcm {
+			quat_aux_pcm_sleep: quat_aux_pcm_sleep {
+				mux {
+					pins = "gpio58", "gpio59";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio58", "gpio59";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			quat_aux_pcm_active: quat_aux_pcm_active {
+				mux {
+					pins = "gpio58", "gpio59";
+					function = "qua_mi2s";
+				};
+
+				config {
+					pins = "gpio58", "gpio59";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+					output-high;
+				};
+			};
+		};
+
+		quat_aux_pcm_din {
+			quat_aux_pcm_din_sleep: quat_aux_pcm_din_sleep {
+				mux {
+					pins = "gpio60";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio60";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			quat_aux_pcm_din_active: quat_aux_pcm_din_active {
+				mux {
+					pins = "gpio60";
+					function = "qua_mi2s";
+				};
+
+				config {
+					pins = "gpio60";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		quat_aux_pcm_dout {
+			quat_aux_pcm_dout_sleep: quat_aux_pcm_dout_sleep {
+				mux {
+					pins = "gpio61";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio61";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			quat_aux_pcm_dout_active: quat_aux_pcm_dout_active {
+				mux {
+					pins = "gpio61";
+					function = "qua_mi2s";
+				};
+
+				config {
+					pins = "gpio61";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		pri_mi2s_mclk {
+			pri_mi2s_mclk_sleep: pri_mi2s_mclk_sleep {
+				mux {
+					pins = "gpio64";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio64";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			pri_mi2s_mclk_active: pri_mi2s_mclk_active {
+				mux {
+					pins = "gpio64";
+					function = "pri_mi2s";
+				};
+
+				config {
+					pins = "gpio64";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+					output-high;
+				};
+			};
+		};
+
+		pri_mi2s_sck {
+			pri_mi2s_sck_sleep: pri_mi2s_sck_sleep {
+				mux {
+					pins = "gpio65";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio65";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			pri_mi2s_sck_active: pri_mi2s_sck_active {
+				mux {
+					pins = "gpio65";
+					function = "pri_mi2s";
+				};
+
+				config {
+					pins = "gpio65";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+					output-high;
+				};
+			};
+		};
+
+		pri_mi2s_ws {
+			pri_mi2s_ws_sleep: pri_mi2s_ws_sleep {
+				mux {
+					pins = "gpio66";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio66";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			pri_mi2s_ws_active: pri_mi2s_ws_active {
+				mux {
+					pins = "gpio66";
+					function = "pri_mi2s_ws";
+				};
+
+				config {
+					pins = "gpio66";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+					output-high;
+				};
+			};
+		};
+
+		pri_mi2s_sd0 {
+			pri_mi2s_sd0_sleep: pri_mi2s_sd0_sleep {
+				mux {
+					pins = "gpio67";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio67";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			pri_mi2s_sd0_active: pri_mi2s_sd0_active {
+				mux {
+					pins = "gpio67";
+					function = "pri_mi2s";
+				};
+
+				config {
+					pins = "gpio67";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		pri_mi2s_sd1 {
+			pri_mi2s_sd1_sleep: pri_mi2s_sd1_sleep {
+				mux {
+					pins = "gpio68";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio68";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			pri_mi2s_sd1_active: pri_mi2s_sd1_active {
+				mux {
+					pins = "gpio68";
+					function = "pri_mi2s";
+				};
+
+				config {
+					pins = "gpio68";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		sec_mi2s_mclk {
+			sec_mi2s_mclk_sleep: sec_mi2s_mclk_sleep {
+				mux {
+					pins = "gpio79";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio79";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			sec_mi2s_mclk_active: sec_mi2s_mclk_active {
+				mux {
+					pins = "gpio79";
+					function = "sec_mi2s";
+				};
+
+				config {
+					pins = "gpio79";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		sec_mi2s {
+			sec_mi2s_sleep: sec_mi2s_sleep {
+				mux {
+					pins = "gpio80", "gpio81";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio80", "gpio81";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			sec_mi2s_active: sec_mi2s_active {
+				mux {
+					pins = "gpio80", "gpio81";
+					function = "sec_mi2s";
+				};
+
+				config {
+					pins = "gpio80", "gpio81";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		sec_mi2s_sd0 {
+			sec_mi2s_sd0_sleep: sec_mi2s_sd0_sleep {
+				mux {
+					pins = "gpio82";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio82";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			sec_mi2s_sd0_active: sec_mi2s_sd0_active {
+				mux {
+					pins = "gpio82";
+					function = "sec_mi2s";
+				};
+
+				config {
+					pins = "gpio82";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		sec_mi2s_sd1 {
+			sec_mi2s_sd1_sleep: sec_mi2s_sd1_sleep {
+				mux {
+					pins = "gpio83";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio83";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			sec_mi2s_sd1_active: sec_mi2s_sd1_active {
+				mux {
+					pins = "gpio83";
+					function = "sec_mi2s";
+				};
+
+				config {
+					pins = "gpio83";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		tert_mi2s_mclk {
+			tert_mi2s_mclk_sleep: tert_mi2s_mclk_sleep {
+				mux {
+					pins = "gpio74";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio74";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			tert_mi2s_mclk_active: tert_mi2s_mclk_active {
+				mux {
+					pins = "gpio74";
+					function = "ter_mi2s";
+				};
+
+				config {
+					pins = "gpio74";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		tert_mi2s {
+			tert_mi2s_sleep: tert_mi2s_sleep {
+				mux {
+					pins = "gpio75", "gpio76";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio75", "gpio76";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			tert_mi2s_active: tert_mi2s_active {
+				mux {
+					pins = "gpio75", "gpio76";
+					function = "ter_mi2s";
+				};
+
+				config {
+					pins = "gpio75", "gpio76";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		tert_mi2s_sd0 {
+			tert_mi2s_sd0_sleep: tert_mi2s_sd0_sleep {
+				mux {
+					pins = "gpio77";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio77";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			tert_mi2s_sd0_active: tert_mi2s_sd0_active {
+				mux {
+					pins = "gpio77";
+					function = "ter_mi2s";
+				};
+
+				config {
+					pins = "gpio77";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		tert_mi2s_sd1 {
+			tert_mi2s_sd1_sleep: tert_mi2s_sd1_sleep {
+				mux {
+					pins = "gpio78";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio78";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			tert_mi2s_sd1_active: tert_mi2s_sd1_active {
+				mux {
+					pins = "gpio78";
+					function = "ter_mi2s";
+				};
+
+				config {
+					pins = "gpio78";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		quat_mi2s_mclk {
+			quat_mi2s_mclk_sleep: quat_mi2s_mclk_sleep {
+				mux {
+					pins = "gpio57";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio57";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			quat_mi2s_mclk_active: quat_mi2s_mclk_active {
+				mux {
+					pins = "gpio57";
+					function = "qua_mi2s";
+				};
+
+				config {
+					pins = "gpio57";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		quat_mi2s {
+			quat_mi2s_sleep: quat_mi2s_sleep {
+				mux {
+					pins = "gpio58", "gpio59";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio58", "gpio59";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			quat_mi2s_active: quat_mi2s_active {
+				mux {
+					pins = "gpio58", "gpio59";
+					function = "qua_mi2s";
+				};
+
+				config {
+					pins = "gpio58", "gpio59";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+					output-high;
+				};
+			};
+		};
+
+		quat_mi2s_sd0 {
+			quat_mi2s_sd0_sleep: quat_mi2s_sd0_sleep {
+				mux {
+					pins = "gpio60";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio60";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			quat_mi2s_sd0_active: quat_mi2s_sd0_active {
+				mux {
+					pins = "gpio60";
+					function = "qua_mi2s";
+				};
+
+				config {
+					pins = "gpio60";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		quat_mi2s_sd1 {
+			quat_mi2s_sd1_sleep: quat_mi2s_sd1_sleep {
+				mux {
+					pins = "gpio61";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio61";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			quat_mi2s_sd1_active: quat_mi2s_sd1_active {
+				mux {
+					pins = "gpio61";
+					function = "qua_mi2s";
+				};
+
+				config {
+					pins = "gpio61";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		quat_mi2s_sd2 {
+			quat_mi2s_sd2_sleep: quat_mi2s_sd2_sleep {
+				mux {
+					pins = "gpio62";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio62";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			quat_mi2s_sd2_active: quat_mi2s_sd2_active {
+				mux {
+					pins = "gpio62";
+					function = "qua_mi2s";
+				};
+
+				config {
+					pins = "gpio62";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		quat_mi2s_sd3 {
+			quat_mi2s_sd3_sleep: quat_mi2s_sd3_sleep {
+				mux {
+					pins = "gpio63";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio63";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+					input-enable;
+				};
+			};
+
+			quat_mi2s_sd3_active: quat_mi2s_sd3_active {
+				mux {
+					pins = "gpio63";
+					function = "qua_mi2s";
+				};
+
+				config {
+					pins = "gpio63";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable;           /* NO PULL */
+				};
+			};
+		};
+
+		spkr_i2s_clk_pin {
+			spkr_i2s_clk_sleep: spkr_i2s_clk_sleep {
+				mux {
+					pins = "gpio69";
+					function = "spkr_i2s";
+				};
+
+				config {
+					pins = "gpio69";
+					drive-strength = <2>; /* 2 mA */
+					bias-pull-down;       /* PULL DOWN */
+				};
+			};
+
+			spkr_i2s_clk_active: spkr_i2s_clk_active {
+				mux {
+					pins = "gpio69";
+					function = "spkr_i2s";
+				};
+
+				config {
+					pins = "gpio69";
+					drive-strength = <8>; /* 8 mA */
+					bias-disable;         /* NO PULL */
+				};
+			};
+		};
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm8998-pm.dtsi	2019-01-22 16:16:21.195225506 +0100
@@ -0,0 +1,807 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+&soc {
+	qcom,spm@178120000 {
+		compatible = "qcom,spm-v2";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0x17812000 0x1000>;
+		qcom,name = "gold-l2"; /* Gold L2 SAW */
+		qcom,saw2-ver-reg = <0xfd0>;
+		qcom,cpu-vctl-list = <&CPU4 &CPU5 &CPU6 &CPU7>;
+		qcom,vctl-timeout-us = <500>;
+		qcom,vctl-port = <0x0>;
+		qcom,phase-port = <0x1>;
+		qcom,saw2-avs-ctl = <0x1010031>;
+		qcom,saw2-avs-limit = <0x4580458>;
+		qcom,pfm-port = <0x2>;
+	};
+
+	qcom,spm@179120000 {
+		compatible = "qcom,spm-v2";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0x17912000 0x1000>;
+		qcom,name = "silver-l2"; /* Silver L2 SAW */
+		qcom,saw2-ver-reg = <0xfd0>;
+		qcom,cpu-vctl-list = <&CPU0 &CPU1 &CPU2 &CPU3>;
+		qcom,vctl-timeout-us = <500>;
+		qcom,vctl-port = <0x0>;
+		qcom,phase-port = <0x1>;
+		qcom,saw2-avs-ctl = <0x1010031>;
+		qcom,saw2-avs-limit = <0x4580458>;
+		qcom,pfm-port = <0x2>;
+	};
+
+	qcom,lpm-levels {
+		compatible = "qcom,lpm-levels";
+		qcom,use-psci;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		qcom,pm-cluster@0 {
+			reg = <0>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			label = "system";
+			qcom,spm-device-names = "cci";
+			qcom,psci-mode-shift = <8>;
+			qcom,psci-mode-mask = <0xf>;
+
+			qcom,pm-cluster-level@0{
+				reg = <0>;
+				label = "system-wfi";
+				qcom,psci-mode = <0x0>;
+				qcom,latency-us = <100>;
+				qcom,ss-power = <725>;
+				qcom,energy-overhead = <85000>;
+				qcom,time-overhead = <120>;
+			};
+
+			qcom,pm-cluster-level@1{ /* E3 */
+				reg = <1>;
+				label = "system-pc";
+				qcom,psci-mode = <0x3>;
+				qcom,latency-us = <5534>;
+				qcom,ss-power = <399>;
+				qcom,energy-overhead = <3340281>;
+				qcom,time-overhead = <16744>;
+				qcom,min-child-idx = <3>;
+				qcom,is-reset;
+				qcom,notify-rpm;
+			};
+
+			qcom,pm-cluster@0{
+				reg = <0>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				label = "pwr";
+				qcom,spm-device-names = "l2";
+				qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3>;
+				qcom,psci-mode-shift = <4>;
+				qcom,psci-mode-mask = <0xf>;
+
+				qcom,pm-cluster-level@0{ /* D1 */
+					reg = <0>;
+					label = "pwr-l2-wfi";
+					qcom,psci-mode = <0x1>;
+					qcom,latency-us = <51>;
+					qcom,ss-power = <452>;
+					qcom,energy-overhead = <69355>;
+					qcom,time-overhead = <99>;
+				};
+				qcom,pm-cluster-level@1{ /* D2D */
+					reg = <1>;
+					label = "pwr-l2-dynret";
+					qcom,psci-mode = <0x2>;
+					qcom,latency-us = <659>;
+					qcom,ss-power = <434>;
+					qcom,energy-overhead = <465725>;
+					qcom,time-overhead = <976>;
+					qcom,min-child-idx = <1>;
+				};
+
+				qcom,pm-cluster-level@2{ /* D2E */
+					reg = <2>;
+					label = "pwr-l2-ret";
+					qcom,psci-mode = <0x3>;
+					qcom,latency-us = <743>;
+					qcom,ss-power = <425>;
+					qcom,energy-overhead = <629936>;
+					qcom,time-overhead = <1312>;
+					qcom,min-child-idx = <2>;
+				};
+
+				qcom,pm-cluster-level@3{ /* D4 */
+					reg = <3>;
+					label = "pwr-l2-pc";
+					qcom,psci-mode = <0x4>;
+					qcom,latency-us = <4562>;
+					qcom,ss-power = <408>;
+					qcom,energy-overhead = <2421840>;
+					qcom,time-overhead = <5376>;
+					qcom,min-child-idx = <2>;
+					qcom,is-reset;
+				};
+
+				qcom,pm-cpu {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					qcom,psci-mode-shift = <0>;
+					qcom,psci-mode-mask = <0xf>;
+
+					qcom,pm-cpu-level@0 { /* C1 */
+						reg = <0>;
+						qcom,spm-cpu-mode = "wfi";
+						qcom,psci-cpu-mode = <0x1>;
+						qcom,latency-us = <43>;
+						qcom,ss-power = <454>;
+						qcom,energy-overhead = <38639>;
+						qcom,time-overhead = <83>;
+					};
+
+					qcom,pm-cpu-level@1 { /* C2D */
+						reg = <1>;
+						qcom,psci-cpu-mode = <2>;
+						qcom,spm-cpu-mode = "ret";
+						qcom,latency-us = <86>;
+						qcom,ss-power = <449>;
+						qcom,energy-overhead = <78456>;
+						qcom,time-overhead = <167>;
+					};
+
+					qcom,pm-cpu-level@2 {  /* C3 */
+						reg = <2>;
+						qcom,spm-cpu-mode = "pc";
+						qcom,psci-cpu-mode = <0x3>;
+						qcom,latency-us = <612>;
+						qcom,ss-power = <436>;
+						qcom,energy-overhead = <418225>;
+						qcom,time-overhead = <885>;
+						qcom,is-reset;
+					};
+				};
+			};
+
+			qcom,pm-cluster@1{
+				reg = <1>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				label = "perf";
+				qcom,spm-device-names = "l2";
+				qcom,cpu = <&CPU4 &CPU5 &CPU6 &CPU7>;
+				qcom,psci-mode-shift = <4>;
+				qcom,psci-mode-mask = <0xf>;
+
+				qcom,pm-cluster-level@0{  /* D1 */
+					reg = <0>;
+					label = "perf-l2-wfi";
+					qcom,psci-mode = <0x1>;
+					qcom,latency-us = <51>;
+					qcom,ss-power = <512>;
+					qcom,energy-overhead = <99986>;
+					qcom,time-overhead = <99>;
+				};
+
+				qcom,pm-cluster-level@1{ /* D2D */
+					reg = <1>;
+					label = "perf-l2-dynret";
+					qcom,psci-mode = <2>;
+					qcom,latency-us = <529>;
+					qcom,ss-power = <468>;
+					qcom,energy-overhead = <496783>;
+					qcom,time-overhead = <871>;
+					qcom,min-child-idx = <1>;
+				};
+
+				qcom,pm-cluster-level@2{ /* D2E */
+					reg = <2>;
+					label = "perf-l2-ret";
+					qcom,psci-mode = <3>;
+					qcom,latency-us = <605>;
+					qcom,ss-power = <456>;
+					qcom,energy-overhead = <597126>;
+					qcom,time-overhead = <1025>;
+					qcom,min-child-idx = <2>;
+				};
+
+				qcom,pm-cluster-level@3{ /* D4 */
+					reg = <3>;
+					label = "perf-l2-pc";
+					qcom,psci-mode = <0x4>;
+					qcom,latency-us = <2027>;
+					qcom,ss-power = <420>;
+					qcom,energy-overhead = <1624216>;
+					qcom,time-overhead = <2751>;
+					qcom,min-child-idx = <2>;
+					qcom,is-reset;
+				};
+
+				qcom,pm-cpu {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					qcom,psci-mode-shift = <0>;
+					qcom,psci-mode-mask = <0xf>;
+
+					qcom,pm-cpu-level@0 { /* C1 */
+						reg = <0>;
+						qcom,spm-cpu-mode = "wfi";
+						qcom,psci-cpu-mode = <0x1>;
+						qcom,latency-us = <43>;
+						qcom,ss-power = <515>;
+						qcom,energy-overhead = <48502>;
+						qcom,time-overhead = <86>;
+					};
+
+					qcom,pm-cpu-level@1 { /* C2D */
+						reg = <1>;
+						qcom,psci-cpu-mode = <2>;
+						qcom,spm-cpu-mode = "ret";
+						qcom,latency-us = <82>;
+						qcom,ss-power = <505>;
+						qcom,energy-overhead = <98530>;
+						qcom,time-overhead = <161>;
+					};
+
+					qcom,pm-cpu-level@2 { /* C3 */
+						reg = <2>;
+						qcom,spm-cpu-mode = "pc";
+						qcom,psci-cpu-mode = <0x3>;
+						qcom,latency-us = <525>;
+						qcom,ss-power = <476>;
+						qcom,energy-overhead = <485037>;
+						qcom,time-overhead = <861>;
+						qcom,is-reset;
+					};
+				};
+			};
+		};
+	};
+
+	qcom,rpm-stats@200000 {
+		compatible = "qcom,rpm-stats";
+		reg = <0x200000 0x1000>,
+			<0x290014 0x4>,
+			<0x29001c 0x4>;
+		reg-names = "phys_addr_base",
+			"offset_addr",
+			"heap_phys_addrbase";
+		qcom,sleep-stats-version = <2>;
+	};
+
+	qcom,rpm-rail-stats@200000 {
+		compatible = "qcom,rpm-rail-stats";
+		reg = <0x200000 0x100>,
+			<0x29000c 0x4>;
+		reg-names = "phys_addr_base",
+			    "offset_addr";
+	};
+
+	qcom,rpm-log@200000 {
+		compatible = "qcom,rpm-log";
+		reg = <0x200000 0x4000>,
+			<0x290018 0x4>;
+		qcom,rpm-addr-phys = <0x200000>;
+		qcom,offset-version = <4>;
+		qcom,offset-page-buffer-addr = <36>;
+		qcom,offset-log-len = <40>;
+		qcom,offset-log-len-mask = <44>;
+		qcom,offset-page-indices = <56>;
+	};
+
+	qcom,rpm-master-stats@778150 {
+		compatible = "qcom,rpm-master-stats";
+		reg = <0x778150 0x5000>;
+		qcom,masters = "APSS", "MPSS", "ADSP", "SLPI", "TZ", "SPSS";
+		qcom,master-stats-version = <2>;
+		qcom,master-offset = <4096>;
+	};
+
+	rpm_msg_ram: memory@0x200000 {
+		compatible = "qcom,rpm-msg-ram";
+		reg = <0x200000 0x1000>,
+			<0x290000 0x1000>;
+	};
+
+	rpm_code_ram: rpm-memory@0x778000 {
+		compatible = "qcom,rpm-code-ram";
+		reg = <0x778000 0x5000>;
+	};
+
+	qcom,system-stats {
+		compatible = "qcom,system-stats";
+		qcom,rpm-msg-ram = <&rpm_msg_ram>;
+		qcom,rpm-code-ram = <&rpm_code_ram>;
+		qcom,masters = "APSS", "MPSS", "ADSP", "SLPI", "TZ", "SPSS";
+	};
+
+	qcom,mpm@7781b8 {
+		compatible = "qcom,mpm-v2";
+		reg = <0x7781b8 0x1000>, /* MSM_RPM_MPM_BASE 4K */
+		    <0x17911008 0x4>;   /* MSM_APCS_GCC_BASE 4K */
+		reg-names = "vmpm", "ipc";
+		interrupts = <GIC_SPI 171 IRQ_TYPE_EDGE_RISING>;
+		clocks = <&clock_gcc clk_cxo_lpm_clk>;
+		clock-names = "xo";
+		qcom,num-mpm-irqs = <96>;
+
+		qcom,ipc-bit-offset = <1>;
+
+		qcom,gic-parent = <&intc>;
+		qcom,gic-map =
+			<0x1f 212>, /* usb30_power_event_irq	*/
+			<0x2 216>, /* tsens1_upper_lower_int	*/
+			<0x34 275>, /* qmp_usb3_lfps_rxterm_irq_cx	*/
+			<0x57 358>, /*  spmi_periph_irq[0]	*/
+			<0x4f 379>, /* usb2phy_intr: qusb2phy_dmse_hv	*/
+			<0x51 379>, /* usb2phy_intr: qusb2phy_dpse_hv	*/
+			<0x50 384>, /* sp_rmb_sp2soc_irq	*/
+			<0xff 16>, /* APC0_qgicQTmrHypPhysIrptReq	*/
+			<0xff 17>, /* APC3_qgicQTmrSecPhysIrptReq	*/
+			<0xff 18>, /* APC0_qgicQTmrNonSecPhysIrptReq	*/
+			<0xff 19>, /* APC3_qgicQTmrVirtIrptReq	*/
+			<0xff 20>, /* APC0_dbgCommRxFull	*/
+			<0xff 21>, /* APC3_dbgCommTxEmpty	*/
+			<0xff 22>, /* APC0_qgicPerfMonIrptReq	*/
+			<0xff 23>, /* corespm_vote_int[7]	*/
+			<0xff 24>, /* APC0_qgicExtFaultIrptReq	*/
+			<0xff 28>, /* qgicWakeupSync	*/
+			<0xff 29>, /* APCC_cti_SPI_intx	*/
+			<0xff 30>, /* APCC_cti_SPI_inty	*/
+			<0xff 32>, /* l2spm_vote_int[0]	*/
+			<0xff 33>, /* l2spm_vote_int[1]	*/
+			<0xff 34>, /* APCC_qgicL2ErrorIrptReq	*/
+			<0xff 35>, /* WDT_barkInt	*/
+			<0xff 36>, /* WDT_biteExpired	*/
+			<0xff 39>, /* QTMR_qgicFrm0VirtIrq	*/
+			<0xff 40>, /* QTMR_qgicFrm0PhyIrq	*/
+			<0xff 41>, /* QTMR_qgicFrm1PhyIrq	*/
+			<0xff 42>, /* QTMR_qgicFrm2PhyIrq	*/
+			<0xff 43>, /* QTMR_qgicFrm3PhyIrq	*/
+			<0xff 44>, /* QTMR_qgicFrm4PhyIrq	*/
+			<0xff 45>, /* QTMR_qgicFrm5PhyIrq	*/
+			<0xff 46>, /* QTMR_qgicFrm6PhyIrq	*/
+			<0xff 47>, /* rbif_Irq[0]	*/
+			<0xff 48>, /* rbif_Irq[1]	*/
+			<0xff 52>, /* cci_spm_vote_summary_int	*/
+			<0xff 54>, /* ~nERRORIRQ	*/
+			<0xff 55>, /* nEVNTCNTOVERFLOW_cci	*/
+			<0xff 56>, /* QTMR_qgicFrm0VirtIrq	*/
+			<0xff 57>, /* QTMR_qgicFrm0PhyIrq	*/
+			<0xff 58>, /* QTMR_qgicFrm1PhyIrq	*/
+			<0xff 59>, /* QTMR_qgicFrm2PhyIrq	*/
+			<0xff 60>, /* QTMR_qgicFrm3PhyIrq	*/
+			<0xff 61>, /* QTMR_qgicFrm4PhyIrq	*/
+			<0xff 62>, /* QTMR_qgicFrm5PhyIrq	*/
+			<0xff 63>, /* QTMR_qgicFrm6PhyIrq	*/
+			<0xff 64>, /* wakeup_counter_irq_OR	*/
+			<0xff 65>, /* apc0_vs_alarm	*/
+			<0xff 66>, /* apc1_vs_alarm	*/
+			<0xff 67>, /* o_pwr_osm_irq	*/
+			<0xff 68>, /* o_perf_osm_irq	*/
+			<0xff 69>, /* o_pwr_dcvsh_interrupt	*/
+			<0xff 70>, /* o_perf_dcvsh_interrupt	*/
+			<0xff 73>, /* L2_EXTERRIRQ_C0	*/
+			<0xff 74>, /* L2_EXTERRIRQ_C1	*/
+			<0xff 75>, /* L2_INTERRIRQ_C0	*/
+			<0xff 76>, /* L2_INTERRIRQ_C1	*/
+			<0xff 77>, /* L2SPM_svicInt[0]	*/
+			<0xff 78>, /* L2SPM_svicInt[1]	*/
+			<0xff 79>, /* L2SPM_svicIntSwDone[0]	*/
+			<0xff 80>, /* L2SPM_svicIntSwDone[1]	*/
+			<0xff 81>, /* l2_avs_err[0]	*/
+			<0xff 82>, /* l2_avs_err[1]	*/
+			<0xff 83>, /* l2_avs_ack[0]	*/
+			<0xff 84>, /* l2_avs_ack[1]	*/
+			<0xff 96>, /* uart_dm_intr	*/
+			<0xff 97>, /* uart_dm_intr	*/
+			<0xff 98>, /* o_qm_interrupt	*/
+			<0xff 100>, /* jpeg_vbif_irpt	*/
+			<0xff 101>, /* processor_1_user_int	*/
+			<0xff 102>, /* processor_1_kernel_int	*/
+			<0xff 106>, /* dir_conn_irq_lpa_dsp_2	*/
+			<0xff 107>, /* dir_conn_irq_lpa_dsp_1	*/
+			<0xff 109>, /* camss_vbif_0_irq	*/
+			<0xff 110>, /* csiphy_irq	*/
+			<0xff 111>, /* csiphy_irq	*/
+			<0xff 112>, /* csiphy_irq	*/
+			<0xff 115>, /* mdp_irq	*/
+			<0xff 116>, /* vbif_irpt	*/
+			<0xff 117>, /* dir_conn_irq_lpa_dsp_0	*/
+			<0xff 119>, /* lcc_audio_wrapper_q6	*/
+			<0xff 122>, /* PIMEM TPDM BC interrupt	*/
+			<0xff 123>, /* PIMEM TPDM TC interrupt	*/
+			<0xff 124>, /* dir_conn_irq_sensors_1	*/
+			<0xff 125>, /* dir_conn_irq_sensors_0	*/
+			<0xff 126>, /* qup_irq	*/
+			<0xff 127>, /* qup_irq	*/
+			<0xff 128>, /* qup_irq	*/
+			<0xff 129>, /* qup_irq	*/
+			<0xff 130>, /* qup_irq	*/
+			<0xff 131>, /* qup_irq	*/
+			<0xff 132>, /* qup_irq	*/
+			<0xff 133>, /* qup_irq	*/
+			<0xff 134>, /* qup_irq	*/
+			<0xff 135>, /* qup_irq	*/
+			<0xff 136>, /* qup_irq	*/
+			<0xff 137>, /* qup_irq	*/
+			<0xff 138>, /* qup_irq	*/
+			<0xff 139>, /* uart_dm_intr	*/
+			<0xff 140>, /* uart_dm_intr	*/
+			<0xff 141>, /* uart_dm_intr	*/
+			<0xff 145>, /* uart_dm_intr	*/
+			<0xff 146>, /* uart_dm_intr	*/
+			<0xff 147>, /* uart_dm_intr	*/
+			<0xff 148>, /* osmmu_Cirpt[4]	*/
+			<0xff 149>, /* osmmu_Cirpt[5]	*/
+			<0xff 151>, /* tsif_irq[0]	*/
+			<0xff 152>, /* tsif_irq[1]	*/
+			<0xff 153>, /* tspp_irq	*/
+			<0xff 154>, /* bam_irq	*/
+			<0xff 155>, /* dir_conn_irq_lpa_dsp_5	*/
+			<0xff 156>, /* dir_conn_irq_lpa_dsp_4	*/
+			<0xff 157>, /* sdcc_irq	*/
+			<0xff 158>, /* sdcc_irq	*/
+			<0xff 159>, /* lpass_qos_apps_interrupt	*/
+			<0xff 160>, /* smmu_PMIrpt	*/
+			<0xff 161>, /* sdcc_irq	*/
+			<0xff 162>, /* sdcc_irq	*/
+			<0xff 163>, /* usb30_ctrl_irq[0]	*/
+			<0xff 164>, /* usb30_bam_irq	*/
+			<0xff 165>, /* usb30_hs_phy_irq	*/
+			<0xff 166>, /* o_lm_int_2qgic	*/
+			<0xff 167>, /* pcie20_inta	*/
+			<0xff 168>, /* pcie20_intb	*/
+			<0xff 169>, /* smmu_Cirpt[12]	*/
+			<0xff 170>, /* pcie20_intc	*/
+			<0xff 171>, /* pcie20_intd	*/
+			<0xff 172>, /* dcvs_int(8)	*/
+			<0xff 173>, /* dcvs_int(9)	*/
+			<0xff 184>, /* dir_conn_irq_lpa_dsp_3	*/
+			<0xff 185>, /* camss_vbif_2_irq	*/
+			<0xff 186>, /* mnoc_obs_mainFault	*/
+			<0xff 188>, /* q6ss_irq_out(4)	*/
+			<0xff 189>, /* q6ss_irq_out(5)	*/
+			<0xff 190>, /* q6ss_irq_out(6)	*/
+			<0xff 191>, /* q6ss_irq_out(7)	*/
+			<0xff 192>, /* audio_out0_irq	*/
+			<0xff 194>, /* q6ss_wdog_exp_irq	*/
+			<0xff 195>, /* lpass_slimbus_core_ee1_irq	*/
+			<0xff 196>, /* lpass_slimbus_bam_ee1_irq	*/
+			<0xff 197>, /* resampler_irq[0]	*/
+			<0xff 199>, /* qdss_usb_trace_bam_irq	*/
+			<0xff 200>, /* rpm_ipc[4]	*/
+			<0xff 201>, /* rpm_ipc[5]	*/
+			<0xff 202>, /* rpm_ipc[6]	*/
+			<0xff 203>, /* rpm_ipc[7]	*/
+			<0xff 204>, /* rpm_ipc[20]	*/
+			<0xff 205>, /* rpm_ipc[21]	*/
+			<0xff 206>, /* rpm_ipc[22]	*/
+			<0xff 207>, /* rpm_ipc[23]	*/
+			<0xff 208>, /* q6ss_irq_out(4)	*/
+			<0xff 209>, /* q6ss_irq_out(5)	*/
+			<0xff 210>, /* q6ss_irq_out(6)	*/
+			<0xff 211>, /* q6ss_irq_out(7)	*/
+			<0xff 213>, /* secure_wdog_bark_irq	*/
+			<0xff 214>, /* tsens1_tsens_max_min_int	*/
+			<0xff 215>, /* o_bimc_intr[0]	*/
+			<0xff 217>, /* ocimem_nonsec_irq	*/
+			<0xff 218>, /* sscaon_tmr_timeout_irq	*/
+			<0xff 219>, /* q6ss_irq_out(28)	*/
+			<0xff 220>, /* spmi_protocol_irq	*/
+			<0xff 221>, /* q6ss_irq_out(29)	*/
+			<0xff 222>, /* q6ss_irq_out(30)	*/
+			<0xff 223>, /* spdm_offline_irq	*/
+			<0xff 224>, /* spdm_realtime_irq	*/
+			<0xff 225>, /* snoc_obs_mainFault	*/
+			<0xff 226>, /* cnoc_obs_mainFault	*/
+			<0xff 227>, /* o_ss_xpu3_sec_intr	*/
+			<0xff 228>, /* o_tcsr_xpu3_non_sec_summary_intr	*/
+			<0xff 229>, /* o_timeout_slave_kpss_summary_intr */
+			<0xff 230>, /* o_tcsr_vmidmt_client_sec_summary_intr */
+			<0xff 231>, /* o_tcsr_vmidmt_client_non_sec */
+			<0xff 232>, /* o_tcsr_vmidmt_cfg_sec_summary_intr */
+			<0xff 233>, /* o_tcsr_vmidmt_cfg_non_sec */
+			<0xff 234>, /* q6ss_irq_out(31)	*/
+			<0xff 235>, /* cpr_irq[0]	*/
+			<0xff 236>, /* crypto_core_irq[0]	*/
+			<0xff 237>, /* crypto_core_irq[1]	*/
+			<0xff 238>, /* crypto_bam_irq[0]	*/
+			<0xff 239>, /* crypto_bam_irq[1]	*/
+			<0xff 240>, /* summary_irq_hmss	*/
+			<0xff 241>, /* dir_conn_irq_hmss_7	*/
+			<0xff 242>, /* dir_conn_irq_hmss_6	*/
+			<0xff 243>, /* dir_conn_irq_hmss_5	*/
+			<0xff 244>, /* dir_conn_irq_hmss_4	*/
+			<0xff 245>, /* dir_conn_irq_hmss_3	*/
+			<0xff 246>, /* dir_conn_irq_hmss_2	*/
+			<0xff 247>, /* dir_conn_irq_hmss_1	*/
+			<0xff 248>, /* dir_conn_irq_hmss_0	*/
+			<0xff 249>, /* summary_irq_hmss_tz	*/
+			<0xff 250>, /* cpr_irq[3]	*/
+			<0xff 251>, /* cpr_irq[2]	*/
+			<0xff 252>, /* cpr_irq[1]	*/
+			<0xff 253>, /* sdcc_pwr_cmd_irq	*/
+			<0xff 254>, /* sdio_wakeup_irq	*/
+			<0xff 255>, /* cpr_irq[0]	*/
+			<0xff 256>, /* smmu_Cirpt[13]	*/
+			<0xff 257>, /* smmu_Cirpt[14]	*/
+			<0xff 258>, /* smmu_Cirpt[0]	*/
+			<0xff 259>, /* sdcc_pwr_cmd_irq	*/
+			<0xff 260>, /* sdio_wakeup_irq	*/
+			<0xff 261>, /* o_tcsr_mmu_nsgcfglrpt_summary_intr */
+			<0xff 262>, /* o_tcsr_mmu_gcfglrpt_summary_intr	*/
+			<0xff 263>, /* o_tcsr_mmu_nsglrpt_summary_intr	*/
+			<0xff 264>, /* o_tcsr_mmu_glrpt_summary_intr	*/
+			<0xff 265>, /* vbif_irpt	*/
+			<0xff 266>, /* smmu_PMIrpt	*/
+			<0xff 267>, /* smmu_Cirpt[3]	*/
+			<0xff 268>, /* q6ss_irq_out(31)	*/
+			<0xff 269>, /* rpm_wdog_expired_irq	*/
+			<0xff 270>, /* bam_irq	*/
+			<0xff 271>, /* bam_irq	*/
+			<0xff 272>, /* q6ss_irq_out(28)	*/
+			<0xff 273>, /* q6ss_irq_out(29)	*/
+			<0xff 274>, /* q6ss_irq_out(30)	*/
+			<0xff 276>, /* osmmu_Cirpt [4]	*/
+			<0xff 277>, /* osmmu_Cirpt [5]	*/
+			<0xff 278>, /* usb30_ctrl_irq[1]	*/
+			<0xff 279>, /* osmmu_Cirpt [6]	*/
+			<0xff 280>, /* osmmu_Cirpt [7]	*/
+			<0xff 281>, /* osmmu_Cirpt [8]	*/
+			<0xff 282>, /* osmmu_Cirpt [9]	*/
+			<0xff 283>, /* osmmu_Cirpt [10]	*/
+			<0xff 284>, /* osmmu_Cirpt [11]	*/
+			<0xff 285>, /* osmmu_Cirpt [12]	*/
+			<0xff 286>, /* osmmu_Cirpt [13]	*/
+			<0xff 287>, /* osmmu_Cirpt [14]	*/
+			<0xff 288>, /* osmmu_Cirpt [15]	*/
+			<0xff 289>, /* ufs_ice_sec_level_irq	*/
+			<0xff 290>, /* cpr_irq[4]	*/
+			<0xff 291>, /* smmu_Cirpt[2]	*/
+			<0xff 292>, /* osmmu_Cirpt [16]	*/
+			<0xff 293>, /* osmmu_Cirpt [17]	*/
+			<0xff 294>, /* osmmu_Cirpt [18]	*/
+			<0xff 295>, /* osmmu_Cirpt [0]	*/
+			<0xff 296>, /* osmmu_PMIrpt	*/
+			<0xff 297>, /* ufs_intrq	*/
+			<0xff 298>, /* osmmu_Cirpt [1]	*/
+			<0xff 299>, /* osmmu_Cirpt [2]	*/
+			<0xff 300>, /* osmmu_Cirpt [3]	*/
+			<0xff 301>, /* smmu_Cirpt[1]	*/
+			<0xff 302>, /* qdss_etrbytecnt_irq	*/
+			<0xff 303>, /* smmu_Cirpt[0]	*/
+			<0xff 304>, /* osmmu_Cirpt [19]	*/
+			<0xff 305>, /* osmmu_Cirpt [20]	*/
+			<0xff 306>, /* osmmu_Cirpt [21]	*/
+			<0xff 307>, /* osmmu_Cirpt [22]	*/
+			<0xff 308>, /* osmmu_Cirpt [23]	*/
+			<0xff 310>, /* pcie20_global_int	*/
+			<0xff 311>, /* pcie20_int_edma_int	*/
+			<0xff 316>, /* lpass_hdmitx_interrupt_ext	*/
+			<0xff 317>, /* rbif_irq	*/
+			<0xff 318>, /* gpu_cc_gpu_cx_gds_hw_ctrl_irq_out */
+			<0xff 319>, /* VENUS_IRQ	*/
+			<0xff 323>, /* lpass_slimbus1_core_ee1_irq	*/
+			<0xff 324>, /* lpass_slimbus1_bam_ee1_irq	*/
+			<0xff 325>, /* camss_irq18	*/
+			<0xff 326>, /* camss_irq0	*/
+			<0xff 327>, /* camss_irq1	*/
+			<0xff 328>, /* camss_irq2	*/
+			<0xff 329>, /* camss_irq3	*/
+			<0xff 330>, /* camss_irq4	*/
+			<0xff 331>, /* camss_irq5	*/
+			<0xff 332>, /* GC_SYS_irq_0	*/
+			<0xff 333>, /* GC_SYS_irq_1	*/
+			<0xff 334>, /* GC_SYS_irq_2	*/
+			<0xff 335>, /* GC_SYS_irq_3	*/
+			<0xff 336>, /* camss_irq13	*/
+			<0xff 337>, /* camss_irq14	*/
+			<0xff 338>, /* camss_irq15	*/
+			<0xff 339>, /* camss_irq16	*/
+			<0xff 340>, /* camss_irq17	*/
+			<0xff 341>, /* camss_irq6	*/
+			<0xff 342>, /* smmu_Cirpt[15]	*/
+			<0xff 343>, /* bam_irq[0]	*/
+			<0xff 344>, /* uart_dm_intr	*/
+			<0xff 345>, /* camss_irq7	*/
+			<0xff 346>, /* camss_irq8	*/
+			<0xff 347>, /* camss_irq9	*/
+			<0xff 348>, /* camss_irq10	*/
+			<0xff 350>, /* camss_irq12	*/
+			<0xff 351>, /* sif_aud_dec_out_irq_ext	*/
+			<0xff 356>, /* vbif_nrt_irpt	*/
+			<0xff 357>, /* Nonfatal pIMEM interrupt	*/
+			<0xff 359>, /* spmi_periph_irq[1]	*/
+			<0xff 360>, /* fatal pIMEM interrupt	*/
+			<0xff 361>, /* osmmu_Cirpt[0]	*/
+			<0xff 362>, /* osmmu_Cirpt[1]	*/
+			<0xff 363>, /* osmmu_Cirpt[2]	*/
+			<0xff 364>, /* osmmu_Cirpt[3]	*/
+			<0xff 365>, /* ipa_irq(0)	*/
+			<0xff 366>, /* osmmu_PMIrpt	*/
+			<0xff 380>, /* sp_sp2apps_irq[0]	*/
+			<0xff 381>, /* sp_sp2apps_irq[1]	*/
+			<0xff 382>, /* sp_sp2apps_irq[2]	*/
+			<0xff 383>, /* sp_sp2apps_irq[3]	*/
+			<0xff 385>, /* osmmu_CIrpt[12]	*/
+			<0xff 386>, /* osmmu_CIrpt[13]	*/
+			<0xff 387>, /* osmmu_CIrpt[14]	*/
+			<0xff 388>, /* osmmu_CIrpt[15]	*/
+			<0xff 389>, /* osmmu_CIrpt[16]	*/
+			<0xff 390>, /* osmmu_CIrpt[17]	*/
+			<0xff 391>, /* osmmu_CIrpt[18]	*/
+			<0xff 392>, /* osmmu_CIrpt[19]	*/
+			<0xff 393>, /* o_dcc_crc_fail_int	*/
+			<0xff 395>, /* aggre1_obs_mainfault	*/
+			<0xff 396>, /* aggr1_smmu_cirpt[0]	*/
+			<0xff 397>, /* aggr1_smmu_cirpt[1]	*/
+			<0xff 398>, /* aggr1_smmu_cirpt[2]	*/
+			<0xff 399>, /* aggr1_smmu_cirpt[3]	*/
+			<0xff 400>, /* aggr1_smmu_cirpt[4]	*/
+			<0xff 401>, /* aggr1_smmu_cirpt[5]	*/
+			<0xff 402>, /* aggr1_smmu_cirpt[6]	*/
+			<0xff 403>, /* aggr1_smmu_pmirpt	*/
+			<0xff 404>, /* aggre2noc_obs_mainFault	*/
+			<0xff 405>, /* osmmu_CIrpt[0]	*/
+			<0xff 406>, /* osmmu_CIrpt[1]	*/
+			<0xff 407>, /* osmmu_CIrpt[2]	*/
+			<0xff 408>, /* osmmu_CIrpt[3]	*/
+			<0xff 409>, /* osmmu_CIrpt[4]	*/
+			<0xff 410>, /* osmmu_CIrpt[5]	*/
+			<0xff 411>, /* o_dcc_task_done_int	*/
+			<0xff 412>, /* vsense_alarm_irq	*/
+			<0xff 413>, /* osmmu_PMIrpt	*/
+			<0xff 414>, /* pmic_arb_trans_done_irq[0]	*/
+			<0xff 415>, /* pmic_arb_trans_done_irq[1]	*/
+			<0xff 416>, /* rpm_ipc[28]	*/
+			<0xff 417>, /* rpm_ipc[29]	*/
+			<0xff 418>, /* rpm_ipc[30]	*/
+			<0xff 419>, /* rpm_ipc[31]	*/
+			<0xff 420>, /* qup_irq	*/
+			<0xff 421>, /* qup_irq	*/
+			<0xff 422>, /* wd_bite_apps	*/
+			<0xff 423>, /* lpass_qos_apps_interrupt	*/
+			<0xff 424>, /* ipa_irq(2)	*/
+			<0xff 425>, /* smmu_Cirpt[1]	*/
+			<0xff 426>, /* smmu_Cirpt[2]	*/
+			<0xff 427>, /* smmu_Cirpt[3]	*/
+			<0xff 428>, /* smmu_Cirpt[4]	*/
+			<0xff 429>, /* smmu_Cirpt[5]	*/
+			<0xff 430>, /* smmu_Cirpt[6]	*/
+			<0xff 431>, /* smmu_Cirpt[7]	*/
+			<0xff 432>, /* smmu_Cirpt[8]	*/
+			<0xff 433>, /* smmu_Cirpt[9]	*/
+			<0xff 434>, /* smmu_Cirpt[10]	*/
+			<0xff 435>, /* smmu_Cirpt[11]	*/
+			<0xff 436>, /* smmu_Cirpt[16]	*/
+			<0xff 437>, /* pcie20_0_int_msi_dev0	*/
+			<0xff 438>, /* pcie20_0_int_msi_dev1	*/
+			<0xff 439>, /* pcie20_0_int_msi_dev2	*/
+			<0xff 440>, /* pcie20_0_int_msi_dev3	*/
+			<0xff 441>, /* pcie20_0_int_msi_dev4	*/
+			<0xff 442>, /* pcie20_0_int_msi_dev5	*/
+			<0xff 443>, /* pcie20_0_int_msi_dev6	*/
+			<0xff 444>, /* pcie20_0_int_msi_dev7	*/
+			<0xff 445>, /* o_wcss_apps_intr[0]	*/
+			<0xff 446>, /* o_wcss_apps_intr[1]	*/
+			<0xff 447>, /* o_wcss_apps_intr[2]	*/
+			<0xff 448>, /* o_wcss_apps_intr[3]	*/
+			<0xff 449>, /* o_wcss_apps_intr[4]	*/
+			<0xff 450>, /* o_wcss_apps_intr[5]	*/
+			<0xff 452>, /* o_wcss_apps_intr[6]	*/
+			<0xff 453>, /* o_wcss_apps_intr[7]	*/
+			<0xff 454>, /* o_wcss_apps_intr[8]	*/
+			<0xff 455>, /* o_wcss_apps_intr[9]	*/
+			<0xff 456>, /* o_wcss_apps_intr[10]	*/
+			<0xff 457>, /* o_wcss_apps_intr[11]	*/
+			<0xff 458>, /* o_wcss_apps_intr[12]	*/
+			<0xff 461>, /* o_ocimem_nonsec_irq	*/
+			<0xff 462>, /* tsens1_tsens_critical_int	*/
+			<0xff 463>, /* aggr1_smmu_cirpt[7]	*/
+			<0xff 464>, /* ipa_bam_irq(0)	*/
+			<0xff 465>, /* ipa_bam_irq(2)	*/
+			<0xff 466>, /* ssc_uart_int	*/
+			<0xff 468>, /* cri_cm_irq_tz	*/
+			<0xff 469>, /* cri_cm_irq_hyp	*/
+			<0xff 471>, /* mmss_bimc_smmu_gds_hw_ctrl_irq_out */
+			<0xff 472>, /* gcc_gds_hw_ctrl_irq_out	*/
+			<0xff 473>, /* lcc_audio_core_smmu_gds_hw_ctrl */
+			<0xff 477>, /* tsens0_tsens_critical_int	*/
+			<0xff 478>, /* tsens0_tsens_max_min_int	*/
+			<0xff 480>, /* q6ss_wdog_expired_irq	*/
+			<0xff 481>, /* mss_ipc_out_irq[4]	*/
+			<0xff 482>, /* mss_ipc_out_irq[5]	*/
+			<0xff 483>, /* mss_ipc_out_irq[6]	*/
+			<0xff 484>, /* mss_ipc_out_irq[7]	*/
+			<0xff 485>, /* mss_ipc_out_irq[28]	*/
+			<0xff 486>, /* mss_ipc_out_irq[29]	*/
+			<0xff 487>, /* mss_ipc_out_irq[30]	*/
+			<0xff 488>, /* mss_ipc_out_irq[31]	*/
+			<0xff 489>, /* skl_core_irq	*/
+			<0xff 490>, /* tsens0_upper_lower_int	*/
+			<0xff 494>, /* osmmu_CIrpt[6]	*/
+			<0xff 495>, /* osmmu_CIrpt[7]	*/
+			<0xff 496>, /* osmmu_CIrpt[8]	*/
+			<0xff 497>, /* osmmu_CIrpt[9]	*/
+			<0xff 498>, /* osmmu_CIrpt[10]	*/
+			<0xff 499>, /* osmmu_CIrpt[11]	*/
+			<0xff 503>; /* o_bimc_intr[1]	*/
+
+		qcom,gpio-parent = <&tlmm>;
+		qcom,gpio-map = <3  1>,
+			<4  5>,
+			<5  9>,
+			<6  11>,
+			<7  66>,
+			<8  22>,
+			<9  24>,
+			<10  26>,
+			<11  34>,
+			<12  36>,
+			<13  37>, /* PCIe0 */
+			<14  38>,
+			<15  40>,
+			<16  42>,
+			<17  46>,
+			<18  50>,
+			<19  53>,
+			<20  54>,
+			<21  56>,
+			<22  57>,
+			<23  58>,
+			<24  59>,
+			<25  60>,
+			<26  61>,
+			<27  62>,
+			<28  63>,
+			<29  64>,
+			<30  71>,
+			<31  73>,
+			<32  77>,
+			<33  78>,
+			<34  79>,
+			<35  80>,
+			<36  82>,
+			<37  86>,
+			<38  91>,
+			<39  92>,
+			<40  95>,
+			<41  97>,
+			<42  101>,
+			<43  104>,
+			<44  106>,
+			<45  108>,
+			<46  112>,
+			<47  113>,
+			<48  110>,
+			<50  127>,
+			<51  115>,
+			<54  116>, /* PCIe2 */
+			<55  117>,
+			<56  118>,
+			<57  119>,
+			<58  120>,
+			<59  121>,
+			<60  122>,
+			<61  123>,
+			<62  124>,
+			<63  125>,
+			<64  126>,
+			<65  129>,
+			<66  131>,
+			<67  132>, /* PCIe1 */
+			<68  133>,
+			<69  145>;
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm8998-ramoops.dtsi	2019-01-22 16:16:21.199225542 +0100
@@ -0,0 +1,21 @@
+
+/ {
+	reserved-memory {
+
+		/* pstore test */
+		ramoops_mem: ramoops_mem@0x88f00000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0x0 0x88f00000 0x0 0x100000>; /* 1 MB */
+		};
+	};
+
+	/* pstore test */
+	ramoops {
+		compatible = "ramoops";
+		memory-region = <&ramoops_mem>;
+		record-size = <0x00100000>; /* 512 KB */
+		ecc-size = <16>;
+		no-dump-oops;
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm8998-regulator.dtsi	2019-01-22 16:16:21.199225542 +0100
@@ -0,0 +1,1145 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/clock/msm-clocks-8998.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+&rpm_bus {
+	/* PM8998 S1 + S6 = VDD_CX supply */
+	rpm-regulator-smpa1 {
+		status = "okay";
+		pm8998_s1_level: regulator-s1-level {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_s1_level";
+			qcom,set = <3>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_TURBO>;
+			qcom,use-voltage-level;
+		};
+
+		pm8998_s1_floor_level: regulator-s1-floor-level {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_s1_floor_level";
+			qcom,set = <3>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_TURBO>;
+			qcom,use-voltage-floor-level;
+			qcom,always-send-voltage;
+		};
+
+		pm8998_s1_level_ao: regulator-s1-level-ao {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_s1_level_ao";
+			qcom,set = <1>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_TURBO>;
+			qcom,use-voltage-level;
+		};
+	};
+
+	rpm-regulator-smpa2 {
+		status = "okay";
+		pm8998_s2: regulator-s2 {
+			regulator-min-microvolt = <1128000>;
+			regulator-max-microvolt = <1128000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-smpa3 {
+		status = "okay";
+		pm8998_s3: regulator-s3 {
+			regulator-min-microvolt = <1352000>;
+			regulator-max-microvolt = <1352000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-smpa4 {
+		status = "okay";
+		pm8998_s4: regulator-s4 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-smpa5 {
+		status = "okay";
+		pm8998_s5: regulator-s5 {
+			regulator-min-microvolt = <1904000>;
+			regulator-max-microvolt = <2040000>;
+			qcom,init-pin-ctrl-mode = <8>;		/* PMIC_AWAKE */
+			qcom,send-defaults;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-smpa7 {
+		status = "okay";
+		pm8998_s7: regulator-s7 {
+			regulator-min-microvolt = <900000>;
+			regulator-max-microvolt = <1028000>;
+			qcom,init-pin-ctrl-mode = <8>;		/* PMIC_AWAKE */
+			qcom,send-defaults;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-smpa8 {
+		status = "okay";
+		pm8998_s8: regulator-s8 {
+			regulator-min-microvolt = <800000>;
+			regulator-max-microvolt = <800000>;
+			status = "okay";
+		};
+	};
+
+	/* PM8998 S9 = VDD_MX supply */
+	rpm-regulator-smpa9 {
+		status = "okay";
+		pm8998_s9_level: regulator-s9-level {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_s9_level";
+			qcom,set = <3>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_TURBO>;
+			qcom,use-voltage-level;
+		};
+
+		pm8998_s9_floor_level: regulator-s9-floor-level {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_s9_floor_level";
+			qcom,set = <3>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_TURBO>;
+			qcom,use-voltage-floor-level;
+			qcom,always-send-voltage;
+		};
+
+		pm8998_s9_level_ao: regulator-s9-level-ao {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_s9_level_ao";
+			qcom,set = <1>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_TURBO>;
+			qcom,use-voltage-level;
+		};
+	};
+
+	rpm-regulator-ldoa1 {
+		status = "okay";
+		pm8998_l1: regulator-l1 {
+			regulator-min-microvolt = <880000>;
+			regulator-max-microvolt = <880000>;
+			proxy-supply = <&pm8998_l1>;
+			qcom,proxy-consumer-enable;
+			qcom,proxy-consumer-current = <73400>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa2 {
+		status = "okay";
+		pm8998_l2: regulator-l2 {
+			regulator-min-microvolt = <1200000>;
+			regulator-max-microvolt = <1200000>;
+			proxy-supply = <&pm8998_l2>;
+			qcom,proxy-consumer-enable;
+			qcom,proxy-consumer-current = <12560>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa3 {
+		status = "okay";
+		pm8998_l3: regulator-l3 {
+			regulator-min-microvolt = <1000000>;
+			regulator-max-microvolt = <1000000>;
+			status = "okay";
+		};
+	};
+
+	/* PM8998 L4 = VDD_SSC_MX supply */
+	rpm-regulator-ldoa4 {
+		status = "okay";
+		pm8998_l4_level: regulator-l4-level {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l4_level";
+			qcom,set = <3>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_TURBO>;
+			qcom,use-voltage-level;
+		};
+
+		pm8998_l4_floor_level: regulator-l4-floor-level {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l4_floor_level";
+			qcom,set = <3>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_TURBO>;
+			qcom,use-voltage-floor-level;
+			qcom,always-send-voltage;
+		};
+	};
+
+	rpm-regulator-ldoa5 {
+		status = "okay";
+		pm8998_l5: regulator-l5 {
+			regulator-min-microvolt = <800000>;
+			regulator-max-microvolt = <800000>;
+			/* Force NPM follows HW0_EN */
+			qcom,init-pin-ctrl-mode = <1>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa6 {
+		status = "okay";
+		pm8998_l6: regulator-l6 {
+			regulator-min-microvolt = <1808000>;
+			regulator-max-microvolt = <1808000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa7 {
+		status = "okay";
+		pm8998_l7: regulator-l7 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			status = "okay";
+		};
+
+		pm8998_l7_pin_ctrl: regulator-l7-pin-ctrl {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l7_pin_ctrl";
+			qcom,set = <3>;
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			/* Force NPM follows HW_EN2 */
+			qcom,init-pin-ctrl-mode = <4>;
+			/* Enable follows HW_EN2 */
+			qcom,enable-with-pin-ctrl = <0 4>;
+		};
+	};
+
+	rpm-regulator-ldoa8 {
+		status = "okay";
+		pm8998_l8: regulator-l8 {
+			regulator-min-microvolt = <1200000>;
+			regulator-max-microvolt = <1200000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa9 {
+		status = "okay";
+		pm8998_l9: regulator-l9 {
+			regulator-min-microvolt = <1808000>;
+			regulator-max-microvolt = <2960000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa10 {
+		status = "okay";
+		pm8998_l10: regulator-l10 {
+			regulator-min-microvolt = <1808000>;
+			regulator-max-microvolt = <2960000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa11 {
+		status = "okay";
+		pm8998_l11: regulator-l11 {
+			regulator-min-microvolt = <1000000>;
+			regulator-max-microvolt = <1000000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa12 {
+		status = "okay";
+		pm8998_l12: regulator-l12 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa13 {
+		status = "okay";
+		pm8998_l13: regulator-l13 {
+			regulator-min-microvolt = <1808000>;
+			regulator-max-microvolt = <2960000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa14 {
+		status = "okay";
+		pm8998_l14: regulator-l14 {
+			regulator-min-microvolt = <1880000>;
+			regulator-max-microvolt = <1880000>;
+			proxy-supply = <&pm8998_l14>;
+			qcom,proxy-consumer-enable;
+			qcom,proxy-consumer-current = <32000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa15 {
+		status = "okay";
+		pm8998_l15: regulator-l15 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa16 {
+		status = "okay";
+		pm8998_l16: regulator-l16 {
+			regulator-min-microvolt = <2704000>;
+			regulator-max-microvolt = <2704000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa17 {
+		status = "okay";
+		pm8998_l17: regulator-l17 {
+			regulator-min-microvolt = <1304000>;
+			regulator-max-microvolt = <1304000>;
+			status = "okay";
+		};
+
+		pm8998_l17_pin_ctrl: regulator-l17-pin-ctrl {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l17_pin_ctrl";
+			qcom,set = <3>;
+			regulator-min-microvolt = <1304000>;
+			regulator-max-microvolt = <1304000>;
+			/* Force NPM follows HW_EN2 */
+			qcom,init-pin-ctrl-mode = <4>;
+			/* Enable follows HW_EN2 */
+			qcom,enable-with-pin-ctrl = <0 4>;
+		};
+	};
+
+	rpm-regulator-ldoa18 {
+		status = "okay";
+		pm8998_l18: regulator-l18 {
+			regulator-min-microvolt = <2704000>;
+			regulator-max-microvolt = <2704000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa19 {
+		status = "okay";
+		pm8998_l19: regulator-l19 {
+			regulator-min-microvolt = <3008000>;
+			regulator-max-microvolt = <3008000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa20 {
+		status = "okay";
+		pm8998_l20: regulator-l20 {
+			regulator-min-microvolt = <2960000>;
+			regulator-max-microvolt = <2960000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa21 {
+		status = "okay";
+		pm8998_l21: regulator-l21 {
+			regulator-min-microvolt = <2960000>;
+			regulator-max-microvolt = <2960000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa22 {
+		status = "okay";
+		pm8998_l22: regulator-l22 {
+			regulator-min-microvolt = <2864000>;
+			regulator-max-microvolt = <2864000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa23 {
+		status = "okay";
+		pm8998_l23: regulator-l23 {
+			regulator-min-microvolt = <3312000>;
+			regulator-max-microvolt = <3312000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa24 {
+		status = "okay";
+		pm8998_l24: regulator-l24 {
+			regulator-min-microvolt = <1848000>;
+			regulator-max-microvolt = <3088000>;
+			parent-supply = <&pm8998_l12>;
+			status = "okay";
+		};
+	};
+	rpm-regulator-ldoa25 {
+		status = "okay";
+		pm8998_l25: regulator-l25 {
+			regulator-min-microvolt = <3104000>;
+			regulator-max-microvolt = <3312000>;
+			status = "okay";
+		};
+
+		pm8998_l25_pin_ctrl: regulator-l25-pin-ctrl {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l25_pin_ctrl";
+			qcom,set = <3>;
+			regulator-min-microvolt = <3104000>;
+			regulator-max-microvolt = <3312000>;
+			/* Force NPM follows HW_EN2 */
+			qcom,init-pin-ctrl-mode = <4>;
+			/* Enable follows HW_EN2 */
+			qcom,enable-with-pin-ctrl = <0 4>;
+		};
+	};
+
+	rpm-regulator-ldoa26 {
+		status = "okay";
+		pm8998_l26: regulator-l26 {
+			regulator-min-microvolt = <1200000>;
+			regulator-max-microvolt = <1200000>;
+			status = "okay";
+		};
+	};
+
+	/* PM8998 L27 = VDD_SSC_CX supply */
+	rpm-regulator-ldoa27 {
+		status = "okay";
+		pm8998_l27_level: regulator-l27-level {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l27_level";
+			qcom,set = <3>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_TURBO>;
+			qcom,use-voltage-level;
+		};
+
+		pm8998_l27_floor_level: regulator-l27-floor-level {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l27_floor_level";
+			qcom,set = <3>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_TURBO>;
+			qcom,use-voltage-floor-level;
+			qcom,always-send-voltage;
+		};
+	};
+
+	rpm-regulator-ldoa28 {
+		status = "okay";
+		pm8998_l28: regulator-l28 {
+			regulator-min-microvolt = <3008000>;
+			regulator-max-microvolt = <3008000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-vsa1 {
+		status = "okay";
+		pm8998_lvs1: regulator-lvs1 {
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-vsa2 {
+		status = "okay";
+		pm8998_lvs2: regulator-lvs2 {
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-bobb {
+		status = "okay";
+		pmi8998_bob: regulator-bob {
+			regulator-min-microvolt = <3312000>;
+			regulator-max-microvolt = <3600000>;
+			qcom,pwm-threshold-current = <2000000>;
+			qcom,init-bob-mode = <2>;
+			status = "okay";
+		};
+		pmi8998_bob_pin1: regulator-bob-pin1 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_bob_pin1";
+			qcom,set = <3>;
+			regulator-min-microvolt = <3312000>;
+			regulator-max-microvolt = <3600000>;
+			qcom,pwm-threshold-current = <2000000>;
+			qcom,init-bob-mode = <2>;
+			qcom,use-pin-ctrl-voltage1;
+		};
+		pmi8998_bob_pin2: regulator-bob-pin2 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_bob_pin2";
+			qcom,set = <3>;
+			regulator-min-microvolt = <3312000>;
+			regulator-max-microvolt = <3600000>;
+			qcom,pwm-threshold-current = <2000000>;
+			qcom,init-bob-mode = <2>;
+			qcom,use-pin-ctrl-voltage2;
+		};
+		pmi8998_bob_pin3: regulator-bob-pin3 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_bob_pin3";
+			qcom,set = <3>;
+			regulator-min-microvolt = <3312000>;
+			regulator-max-microvolt = <3600000>;
+			qcom,pwm-threshold-current = <2000000>;
+			qcom,init-bob-mode = <2>;
+			qcom,use-pin-ctrl-voltage3;
+		};
+	};
+};
+
+&spmi_bus {
+	qcom,pm8005@5 {
+		/* PM8005 S1 + S4 = 2 phase VDD_GFX supply */
+		pm8005_s1: regulator@1400 {
+			regulator-name = "pm8005_s1";
+			status = "okay";
+			regulator-min-microvolt = <524000>;
+			regulator-max-microvolt = <1100000>;
+			qcom,enable-time = <500>;
+		};
+	};
+
+	qcom,pm8998@1 {
+		pm8998_s10: regulator@2f00 {
+			compatible = "qcom,qpnp-regulator";
+			reg = <0x2f00 0x100>;
+			regulator-name = "pm8998_s10";
+			regulator-min-microvolt = <572000>;
+			regulator-max-microvolt = <1112000>;
+			qcom,enable-time = <500>;
+			regulator-always-on;
+		};
+
+		pm8998_s13: regulator@3800 {
+			compatible = "qcom,qpnp-regulator";
+			reg = <0x3800 0x100>;
+			regulator-name = "pm8998_s13";
+			regulator-min-microvolt = <572000>;
+			regulator-max-microvolt = <1112000>;
+			qcom,enable-time = <500>;
+			regulator-always-on;
+		};
+	};
+};
+
+/* Stub regulators */
+
+/ {
+	gfx_stub_vreg: regulator-gfx-stub {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "gfx_stub_corner";
+		qcom,hpm-min-load = <100000>;
+		regulator-min-microvolt = <1>;
+		regulator-max-microvolt = <6>;
+		status = "disabled";
+	};
+};
+
+&soc {
+/* CPR controller regulators */
+	apc0_cpr: cprh-ctrl@179c8000 {
+		compatible = "qcom,cprh-msm8998-v1-kbss-regulator";
+		reg = <0x179c8000 0x4000>, <0x00784000 0x1000>;
+		reg-names = "cpr_ctrl", "fuse_base";
+		clocks = <&clock_gcc clk_gcc_hmss_rbcpr_clk>;
+		clock-names = "core_clk";
+		qcom,cpr-ctrl-name = "apc0";
+		qcom,cpr-controller-id = <0>;
+
+		qcom,cpr-sensor-time = <1000>;
+		qcom,cpr-loop-time = <5000000>;
+		qcom,cpr-idle-cycles = <15>;
+		qcom,cpr-up-down-delay-time = <3000>;
+		qcom,cpr-step-quot-init-min = <11>;
+		qcom,cpr-step-quot-init-max = <12>;
+		qcom,cpr-count-mode = <0>;		/* All at once */
+		qcom,cpr-count-repeat = <1>;
+		qcom,cpr-down-error-step-limit = <1>;
+		qcom,cpr-up-error-step-limit = <1>;
+		qcom,cpr-corner-switch-delay-time = <209>;
+		qcom,cpr-voltage-settling-time = <1760>;
+
+		qcom,apm-threshold-voltage = <832000>;
+		qcom,apm-crossover-voltage = <880000>;
+		qcom,apm-hysteresis-voltage = <32000>;
+		qcom,voltage-step = <4000>;
+		qcom,voltage-base = <352000>;
+		qcom,cpr-saw-use-unit-mV;
+
+		qcom,cpr-enable;
+		qcom,cpr-hw-closed-loop;
+
+		qcom,cpr-panic-reg-addr-list =
+			<0x179cbaa4 0x17912c18>;
+		qcom,cpr-panic-reg-name-list =
+			"PWR_CPRH_STATUS", "APCLUS0_L2_SAW4_PMIC_STS";
+
+		qcom,cpr-aging-ref-voltage = <1112000>;
+		vdd-supply = <&pm8998_s10>;
+
+		thread@0 {
+			qcom,cpr-thread-id = <0>;
+			qcom,cpr-consecutive-up = <0>;
+			qcom,cpr-consecutive-down = <2>;
+			qcom,cpr-up-threshold = <2>;
+			qcom,cpr-down-threshold = <2>;
+
+			apc0_pwrcl_vreg: regulator {
+				regulator-name = "apc0_pwrcl_corner";
+				regulator-min-microvolt = <1>;
+				regulator-max-microvolt = <23>;
+
+				qcom,cpr-fuse-corners = <4>;
+				qcom,cpr-fuse-combos = <8>;
+				qcom,cpr-corners = <22>;
+
+				qcom,cpr-corner-fmax-map = <7 10 17 22>;
+
+				qcom,cpr-voltage-ceiling =
+					<896000  896000  896000  896000  896000
+					 896000  896000  896000  896000  896000
+					 896000  896000  896000  896000  896000
+					 896000  896000 1032000 1032000 1032000
+					1112000 1112000>;
+
+				qcom,cpr-voltage-floor =
+					<896000  896000  896000  896000  896000
+					 896000  896000  896000  896000  896000
+					 896000  896000  896000  896000  896000
+					 896000  896000  896000  896000  896000
+					 896000  896000>,
+					<572000  572000  572000  572000  572000
+					 572000  572000  572000  572000  572000
+					 664000  664000  664000  664000  664000
+					 664000  664000  752000  752000  752000
+					 752000  752000>,
+					<572000  572000  572000  572000  572000
+					 572000  572000  572000  572000  572000
+					 664000  664000  664000  664000  664000
+					 664000  664000  752000  752000  752000
+					 752000  752000>,
+					<572000  572000  572000  572000  572000
+					 572000  572000  572000  572000  572000
+					 664000  664000  664000  664000  664000
+					 664000  664000  752000  752000  752000
+					 752000  752000>,
+					<572000  572000  572000  572000  572000
+					 572000  572000  572000  572000  572000
+					 664000  664000  664000  664000  664000
+					 664000  664000  752000  752000  752000
+					 752000  752000>,
+					<572000  572000  572000  572000  572000
+					 572000  572000  572000  572000  572000
+					 664000  664000  664000  664000  664000
+					 664000  664000  752000  752000  752000
+					 752000  752000>,
+					<572000  572000  572000  572000  572000
+					 572000  572000  572000  572000  572000
+					 664000  664000  664000  664000  664000
+					 664000  664000  752000  752000  752000
+					 752000  752000>,
+					<572000  572000  572000  572000  572000
+					 572000  572000  572000  572000  572000
+					 664000  664000  664000  664000  664000
+					 664000  664000  752000  752000  752000
+					 752000  752000>;
+
+				qcom,cpr-floor-to-ceiling-max-range =
+					<55000  55000  55000  55000
+					 55000  55000  55000  55000
+					 55000  55000  65000  65000
+					 65000  65000  65000  65000
+					 65000  65000  65000  65000
+					 65000  65000>;
+
+				qcom,corner-frequencies =
+					<300000000  345600000  422400000
+					 499200000  576000000  633600000
+					 710400000  806400000  883200000
+					 960000000 1036800000 1113600000
+					1190400000 1248000000 1324800000
+					1401600000 1478400000 1574400000
+					1651200000 1728000000 1804800000
+					1881600000>;
+
+				qcom,cpr-ro-scaling-factor =
+					<3430 3512 3262 3426 3100 3238 2463
+					 2725 1749 2891 4058 4014 3195 2866
+					    0    0>,
+					<3430 3512 3262 3426 3100 3238 2463
+					 2725 1749 2891 4058 4014 3195 2866
+					    0    0>,
+					<3162 3153 3163 3261 3037 3135 2645
+					 2857 1864 2417 3499 3706 3315 2771
+					    0    0>,
+					<2632 2539 2835 2857 2767 2813 2690
+					 2827 1857 1632 2596 3068 3212 2454
+					    0    0>;
+
+				qcom,cpr-open-loop-voltage-fuse-adjustment =
+					<0 0 0 0>,
+					<40000 24000 0 0>,
+					<40000 24000 0 0>,
+					<40000 24000 0 0>,
+					<40000 24000 0 0>,
+					<40000 24000 0 0>,
+					<40000 24000 0 0>,
+					<40000 24000 0 0>;
+
+				qcom,cpr-closed-loop-voltage-fuse-adjustment =
+					<0 0 0 0>,
+					<20000 26000 0 0>,
+					<20000 26000 0 0>,
+					<20000 26000 0 0>,
+					<20000 26000 0 0>,
+					<20000 26000 0 0>,
+					<20000 26000 0 0>,
+					<20000 26000 0 0>;
+
+				qcom,allow-voltage-interpolation;
+				qcom,allow-quotient-interpolation;
+				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+				qcom,cpr-aging-max-voltage-adjustment = <15000>;
+				qcom,cpr-aging-ref-corner = <22>;
+				qcom,cpr-aging-ro-scaling-factor = <1620>;
+				qcom,allow-aging-voltage-adjustment = <0>;
+				qcom,allow-aging-open-loop-voltage-adjustment =
+					<1>;
+			};
+		};
+	};
+
+	apc1_cpr: cprh-ctrl@179c4000{
+		compatible = "qcom,cprh-msm8998-v1-kbss-regulator";
+		reg = <0x179c4000 0x4000>, <0x00784000 0x1000>;
+		reg-names = "cpr_ctrl", "fuse_base";
+		clocks = <&clock_gcc clk_gcc_hmss_rbcpr_clk>;
+		clock-names = "core_clk";
+		qcom,cpr-ctrl-name = "apc1";
+		qcom,cpr-controller-id = <1>;
+
+		qcom,cpr-sensor-time = <1000>;
+		qcom,cpr-loop-time = <5000000>;
+		qcom,cpr-idle-cycles = <15>;
+		qcom,cpr-up-down-delay-time = <3000>;
+		qcom,cpr-step-quot-init-min = <9>;
+		qcom,cpr-step-quot-init-max = <14>;
+		qcom,cpr-count-mode = <0>;		/* All at once */
+		qcom,cpr-count-repeat = <1>;
+		qcom,cpr-down-error-step-limit = <1>;
+		qcom,cpr-up-error-step-limit = <1>;
+		qcom,cpr-corner-switch-delay-time = <209>;
+		qcom,cpr-voltage-settling-time = <1760>;
+
+		qcom,apm-threshold-voltage = <832000>;
+		qcom,apm-crossover-voltage = <880000>;
+		qcom,apm-hysteresis-voltage = <32000>;
+		qcom,voltage-step = <4000>;
+		qcom,voltage-base = <352000>;
+		qcom,cpr-saw-use-unit-mV;
+
+		qcom,cpr-enable;
+		qcom,cpr-hw-closed-loop;
+
+		qcom,cpr-panic-reg-addr-list =
+			<0x179c7aa4 0x17812c18>;
+		qcom,cpr-panic-reg-name-list =
+			"PERF_CPRH_STATUS", "APCLUS1_L2_SAW4_PMIC_STS";
+
+		qcom,cpr-aging-ref-voltage = <1112000>;
+		vdd-supply = <&pm8998_s13>;
+
+		thread@0 {
+			qcom,cpr-thread-id = <0>;
+			qcom,cpr-consecutive-up = <0>;
+			qcom,cpr-consecutive-down = <2>;
+			qcom,cpr-up-threshold = <2>;
+			qcom,cpr-down-threshold = <2>;
+
+			apc1_perfcl_vreg: regulator {
+				regulator-name = "apc1_perfcl_corner";
+				regulator-min-microvolt = <1>;
+				regulator-max-microvolt = <26>;
+
+				qcom,cpr-fuse-corners = <4>;
+				qcom,cpr-fuse-combos = <8>;
+				qcom,cpr-corners = <25>;
+
+				qcom,cpr-corner-fmax-map = <8 12 18 25>;
+
+				qcom,cpr-voltage-ceiling =
+					<896000  896000  896000  896000
+					 896000  896000  896000  896000
+					 896000  896000  896000  896000
+					 896000  896000  896000  896000
+					 896000  896000 1032000 1032000
+					1032000 1032000 1112000 1112000
+					1112000>;
+
+				qcom,cpr-voltage-floor =
+					<896000  896000  896000  896000
+					 896000  896000  896000  896000
+					 896000  896000  896000  896000
+					 896000  896000  896000  896000
+					 896000  896000  896000  896000
+					 896000  896000  896000  896000
+					 896000>,
+					<572000  572000  572000  572000
+					 572000  572000  572000  572000
+					 572000  572000  572000  572000
+					 664000  664000  664000  664000
+					 664000  664000  752000  752000
+					 752000  752000  752000  752000
+					 752000>,
+					<572000  572000  572000  572000
+					 572000  572000  572000  572000
+					 572000  572000  572000  572000
+					 664000  664000  664000  664000
+					 664000  664000  752000  752000
+					 752000  752000  752000  752000
+					 752000>,
+					<572000  572000  572000  572000
+					 572000  572000  572000  572000
+					 572000  572000  572000  572000
+					 664000  664000  664000  664000
+					 664000  664000  752000  752000
+					 752000  752000  752000  752000
+					 752000>,
+					<572000  572000  572000  572000
+					 572000  572000  572000  572000
+					 572000  572000  572000  572000
+					 664000  664000  664000  664000
+					 664000  664000  752000  752000
+					 752000  752000  752000  752000
+					 752000>,
+					<572000  572000  572000  572000
+					 572000  572000  572000  572000
+					 572000  572000  572000  572000
+					 664000  664000  664000  664000
+					 664000  664000  752000  752000
+					 752000  752000  752000  752000
+					 752000>,
+					<572000  572000  572000  572000
+					 572000  572000  572000  572000
+					 572000  572000  572000  572000
+					 664000  664000  664000  664000
+					 664000  664000  752000  752000
+					 752000  752000  752000  752000
+					 752000>,
+					<572000  572000  572000  572000
+					 572000  572000  572000  572000
+					 572000  572000  572000  572000
+					 664000  664000  664000  664000
+					 664000  664000  752000  752000
+					 752000  752000  752000  752000
+					 752000>;
+
+				qcom,cpr-floor-to-ceiling-max-range =
+					<55000  55000  55000  55000
+					 55000  55000  55000  55000
+					 55000  55000  55000  55000
+					 65000  65000  65000  65000
+					 65000  65000  65000  65000
+					 65000  65000  65000  65000
+					 65000>;
+
+				qcom,corner-frequencies =
+					<300000000  345600000  422400000
+					 480000000  556800000  633600000
+					 710400000  787200000  844800000
+					 902400000  979200000 1056000000
+					1171200000 1248000000 1324800000
+					1401600000 1478400000 1536000000
+					1632000000 1708800000 1785600000
+					1862400000 1939200000 2016000000
+					2092800000>;
+
+				qcom,cpr-ro-scaling-factor =
+					<3430 3512 3262 3426 3100 3238 2463
+					 2725 1749 2891 4058 4014 3195 2866
+					    0    0>,
+					<3430 3512 3262 3426 3100 3238 2463
+					 2725 1749 2891 4058 4014 3195 2866
+					    0    0>,
+					<3162 3153 3163 3261 3037 3135 2645
+					 2857 1864 2417 3499 3706 3315 2771
+					    0    0>,
+					<2632 2539 2835 2857 2767 2813 2690
+					 2827 1857 1632 2596 3068 3212 2454
+					    0    0>;
+
+				qcom,cpr-open-loop-voltage-fuse-adjustment =
+					<0 0 0 0>,
+					<8000 0 0 52000>,
+					<8000 0 0 52000>,
+					<8000 0 0 52000>,
+					<8000 0 0 52000>,
+					<8000 0 0 52000>,
+					<8000 0 0 52000>,
+					<8000 0 0 52000>;
+
+				qcom,cpr-closed-loop-voltage-fuse-adjustment =
+					<0 0 0 0>,
+					<0 0 0 50000>,
+					<0 0 0 50000>,
+					<0 0 0 50000>,
+					<0 0 0 50000>,
+					<0 0 0 50000>,
+					<0 0 0 50000>,
+					<0 0 0 50000>;
+
+				qcom,allow-voltage-interpolation;
+				qcom,allow-quotient-interpolation;
+				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+				qcom,cpr-aging-max-voltage-adjustment = <15000>;
+				qcom,cpr-aging-ref-corner = <25>;
+				qcom,cpr-aging-ro-scaling-factor = <1700>;
+				qcom,allow-aging-voltage-adjustment = <0>;
+				qcom,allow-aging-open-loop-voltage-adjustment =
+					<1>;
+			};
+		};
+	};
+
+	gfx_cpr: cpr4-ctrl@5061000 {
+		compatible = "qcom,cpr4-msm8998-v1-mmss-regulator";
+		reg =	<0x05061000 0x4000>,
+			<0x00784000 0x1000>,
+			<0x05065204 0x4>;
+		reg-names = "cpr_ctrl", "fuse_base", "aging_allowed";
+		clocks = <&clock_gpu clk_gpucc_rbcpr_clk>,
+			 <&clock_gcc clk_cnoc_clk>;
+		clock-names = "core_clk", "bus_clk";
+		interrupts = <GIC_SPI 285 IRQ_TYPE_EDGE_RISING>;
+		interrupt-names = "cpr";
+		qcom,cpr-ctrl-name = "gfx";
+
+		qcom,cpr-sensor-time = <1000>;
+		qcom,cpr-loop-time = <5000000>;
+		qcom,cpr-idle-cycles = <15>;
+		qcom,cpr-step-quot-init-min = <8>;
+		qcom,cpr-step-quot-init-max = <12>;
+		qcom,cpr-count-mode = <0>;		/* All-at-once min */
+		qcom,cpr-count-repeat = <1>;
+
+		vdd-supply = <&pm8005_s1>;
+		qcom,voltage-step = <4000>;
+		mem-acc-supply = <&gfx_mem_acc_vreg>;
+		qcom,cpr-aging-ref-voltage = <1032000>;
+		qcom,cpr-aging-allowed-reg-mask  = <0x00000003>;
+		qcom,cpr-aging-allowed-reg-value = <0x00000003>;
+
+		qcom,cpr-enable;
+
+		thread@0 {
+			qcom,cpr-thread-id = <0>;
+			qcom,cpr-consecutive-up = <0>;
+			qcom,cpr-consecutive-down = <2>;
+			qcom,cpr-up-threshold = <0>;
+			qcom,cpr-down-threshold = <2>;
+
+			gfx_vreg: regulator {
+				regulator-name = "gfx_corner";
+				regulator-min-microvolt = <1>;
+				regulator-max-microvolt = <6>;
+
+				qcom,cpr-fuse-corners = <4>;
+				qcom,cpr-fuse-combos = <8>;
+				qcom,cpr-corners = <6>;
+
+				qcom,cpr-corner-fmax-map = <1 3 5 6>;
+
+				qcom,cpr-voltage-ceiling =
+					<896000  896000  896000  896000  896000
+					1032000>,
+					<672000  740000  800000  868000  976000
+					1100000>,
+					<672000  740000  800000  868000  976000
+					1100000>,
+					<672000  740000  800000  868000  976000
+					1100000>,
+					<672000  740000  800000  868000  976000
+					1100000>,
+					<672000  740000  800000  868000  976000
+					1100000>,
+					<672000  740000  800000  868000  976000
+					1100000>,
+					<672000  740000  800000  868000  976000
+					1100000>;
+
+				qcom,cpr-voltage-floor =
+					<896000  896000  896000  896000  896000
+					 896000>,
+					<528000  528000  568000  612000  664000
+					 752000>,
+					<528000  528000  568000  612000  664000
+					 752000>,
+					<528000  528000  568000  612000  664000
+					 752000>,
+					<528000  528000  568000  612000  664000
+					 752000>,
+					<528000  528000  568000  612000  664000
+					 752000>,
+					<528000  528000  568000  612000  664000
+					 752000>,
+					<528000  528000  568000  612000  664000
+					 752000>;
+
+				qcom,mem-acc-voltage = <1 1 1 2 2 2>;
+
+				qcom,corner-frequencies =
+					<171000000 251000000 332000000
+					 403000000 504000000 650000000>;
+
+				qcom,cpr-target-quotients =
+				      <   0    0  404  478  363  411  140  176
+					105    0    0    0    0    0    0    0>,
+				      <   0    0  574  651  532  584  266  319
+					196    0    0    0    0    0    0    0>,
+				      <   0    0  743  830  693  753  389  456
+					285    0    0    0    0    0    0    0>,
+				      <   0    0  879  977  829  893  495  570
+					365    0    0    0    0    0    0    0>,
+				      <   0    0 1168 1270 1097 1150    0    0
+					  0    0    0 1406  899  805    0    0>,
+				      <1669 1757    0    0    0    0    0    0
+					  0 1359 1902 1740    0 1033    0    0>;
+
+				qcom,cpr-ro-scaling-factor =
+				      <2389 2287 2985 3112 2873 2904 2159 2399
+				       1580 1602 2158 3042 2780 2069    0    0>,
+				      <2389 2287 2985 3112 2873 2904 2159 2399
+				       1580 1602 2158 3042 2780 2069    0    0>,
+				      <2389 2287 2985 3112 2873 2904 2159 2399
+				       1580 1602 2158 3042 2780 2069    0    0>,
+				      <2389 2287 2985 3112 2873 2904 2159 2399
+				       1580 1602 2158 3042 2780 2069    0    0>,
+				      <2389 2287 2985 3112 2873 2904 2159 2399
+				       1580 1602 2158 3042 2780 2069    0    0>,
+				      <2389 2287 2985 3112 2873 2904 2159 2399
+				       1580 1602 2158 3042 2780 2069    0    0>;
+
+				qcom,cpr-open-loop-voltage-fuse-adjustment =
+					<   72000        0        0        0>,
+					<   72000        0        0        0>,
+					<   72000        0        0        0>,
+					<   72000        0        0        0>,
+					<   72000        0        0        0>,
+					<   72000        0        0        0>,
+					<   72000        0        0        0>,
+					<   72000        0        0        0>;
+
+				qcom,cpr-closed-loop-voltage-adjustment =
+					<   65000    26000     8000        0
+						0        0>,
+					<   65000    26000     8000        0
+						0        0>,
+					<   65000    26000     8000        0
+						0        0>,
+					<   65000    26000     8000        0
+						0        0>,
+					<   65000    26000     8000        0
+						0        0>,
+					<   65000    26000     8000        0
+						0        0>,
+					<   65000    26000     8000        0
+						0        0>,
+					<   65000    26000     8000        0
+						0        0>;
+
+				qcom,cpr-floor-to-ceiling-max-range =
+				       <75000 75000 75000 75000 75000 75000>;
+
+			     qcom,cpr-fused-closed-loop-voltage-adjustment-map =
+					<0 0 1 2 3 4>;
+
+				qcom,allow-voltage-interpolation;
+				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+				qcom,cpr-aging-max-voltage-adjustment = <15000>;
+				qcom,cpr-aging-ref-corner = <6>;
+				qcom,cpr-aging-ro-scaling-factor = <2950>;
+				qcom,allow-aging-voltage-adjustment = <0>;
+				qcom,allow-aging-open-loop-voltage-adjustment =
+					<1>;
+			};
+		};
+	};
+
+	gfx_mem_acc_vreg: regulator@1fcf004 {
+		compatible = "qcom,mem-acc-regulator";
+		reg = <0x01fcf004 0x4>;
+		reg-names = "acc-sel-l1";
+		regulator-name = "gfx_mem_acc_corner";
+		regulator-min-microvolt = <1>;
+		regulator-max-microvolt = <2>;
+
+		qcom,corner-acc-map = <0x1 0x0>;
+		qcom,acc-sel-l1-bit-pos = <0>;
+		qcom,acc-sel-l1-bit-size = <1>;
+	};
+};
+
+&pmi8998_charger {
+	smb2_vbus: qcom,smb2-vbus {
+		regulator-name = "smb2-vbus";
+	};
+
+	smb2_vconn: qcom,smb2-vconn {
+		regulator-name = "smb2-vconn";
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm8998-sde-display.dtsi	2019-01-22 16:16:21.199225542 +0100
@@ -0,0 +1,75 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+
+	sde_wb: qcom,wb-display@0 {
+		compatible = "qcom,wb-display";
+		cell-index = <0>;
+		label = "wb_display";
+	};
+
+	msm_ext_disp: qcom,msm_ext_disp {
+		compatible = "qcom,msm-ext-disp";
+
+		ext_disp_audio_codec: qcom,msm-ext-disp-audio-codec-rx {
+			compatible = "qcom,msm-ext-disp-audio-codec-rx";
+			qcom,msm_ext_disp = <&msm_ext_disp>;
+		};
+	};
+
+	sde_hdmi: qcom,hdmi-display {
+		compatible = "qcom,hdmi-display";
+		label = "sde_hdmi";
+		qcom,display-type = "secondary";
+		qcom,msm_ext_disp = <&msm_ext_disp>;
+	};
+
+	sde_hdmi_cec: qcom,hdmi-cec@c9a0000 {
+		compatible = "qcom,hdmi-cec";
+		label = "sde_hdmi_cec";
+		qcom,hdmi-dev = <&sde_hdmi>;
+		interrupt-parent = <&sde_hdmi_tx>;
+		interrupts = <1 0>;
+
+		reg = <0xc9a0000 0x50c>;
+		reg-names = "hdmi_cec";
+
+		clocks = <&clock_mmss clk_mmss_mnoc_ahb_clk>,
+		       <&clock_mmss clk_mmss_mdss_ahb_clk>,
+		       <&clock_mmss clk_mmss_mdss_hdmi_clk>;
+		clock-names = "cec_mnoc_clk", "cec_iface_clk", "cec_core_clk";
+
+		pinctrl-names = "cec_active", "cec_sleep";
+		pinctrl-0 = <&mdss_hdmi_cec_active>;
+		pinctrl-1 = <&mdss_hdmi_cec_suspend>;
+
+		cec-gdsc-supply = <&gdsc_mdss>;
+		qcom,platform-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,platform-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "cec-gdsc";
+				qcom,supply-min-voltage = <0>;
+				qcom,supply-max-voltage = <0>;
+				qcom,supply-enable-load = <0>;
+				qcom,supply-disable-load = <0>;
+			};
+		};
+	};
+};
+
+&sde_kms {
+	connectors = <&sde_hdmi_tx &sde_hdmi &sde_wb>;
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm8998-sde.dtsi	2019-01-22 16:16:21.199225542 +0100
@@ -0,0 +1,245 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	sde_kms: qcom,sde_kms@c900000 {
+		compatible = "qcom,sde-kms";
+		reg = <0x0c900000 0x90000>,
+		      <0x0c9b0000 0x2008>;
+		reg-names = "mdp_phys", "vbif_phys";
+
+		/* clock and supply entries */
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			 <&clock_mmss clk_mmss_mdss_ahb_clk>,
+			 <&clock_mmss clk_mmss_mdss_axi_clk>,
+			 <&clock_mmss clk_mdp_clk_src>,
+			 <&clock_mmss clk_mmss_mdss_mdp_clk>,
+			 <&clock_mmss clk_mmss_mdss_vsync_clk>,
+			 <&clock_mmss clk_mmss_mdss_mdp_lut_clk>;
+		clock-names = "mmss_noc_axi_clk",
+					"mmss_noc_ahb_clk",
+					"mmss_smmu_ahb_clk",
+					"mmss_smmu_axi_clk",
+			    "mnoc_clk", "iface_clk", "bus_clk",
+				"core_clk_src", "core_clk", "vsync_clk",
+				"lut_clk";
+		clock-rate = <0 0 0 0 0 0 0 330000000 0 0 0 0>;
+		clock-max-rate = <0 0 0 0 0 0 412500000 412500000 0 0 0 0>;
+		qcom,sde-max-bw-low-kbps = <6700000>;
+		qcom,sde-max-bw-high-kbps = <6700000>;
+
+		/* interrupt config */
+		interrupt-parent = <&intc>;
+		interrupts = <0 83 0>;
+		interrupt-controller;
+		#interrupt-cells = <1>;
+		iommus = <&mmss_smmu 0>;
+
+		gpus = <&msm_gpu>;
+
+		/* hw blocks */
+		qcom,sde-off = <0x1000>;
+		qcom,sde-len = <0x458>;
+
+		qcom,sde-ctl-off = <0x2000 0x2200 0x2400
+				     0x2600 0x2800>;
+		qcom,sde-ctl-size = <0x94>;
+
+		qcom,sde-mixer-off = <0x45000 0x46000 0x47000
+				      0x48000 0x49000 0x4a000>;
+		qcom,sde-mixer-size = <0x31c>;
+
+		qcom,sde-dspp-off = <0x55000 0x57000>;
+		qcom,sde-dspp-size = <0x17e0>;
+
+		qcom,sde-wb-off = <0x66000>;
+		qcom,sde-wb-size = <0x2dc>;
+
+		qcom,sde-wb-id = <2>;
+		qcom,sde-wb-xin-id = <6>;
+		qcom,sde-wb-clk-ctrl = <0x2bc 0x10>;
+		qcom,sde-intf-off = <0x6b000 0x6b800
+					0x6c000 0x6c800>;
+		qcom,sde-intf-size = <0x280>;
+
+		qcom,sde-intf-type = "dp", "dsi", "dsi", "hdmi";
+
+		qcom,sde-pp-off = <0x71000 0x71800
+				0x72000 0x72800 0x73000>;
+		qcom,sde-pp-slave = <0x0 0x0 0x0 0x0 0x1>;
+
+		qcom,sde-pp-size = <0xd4>;
+
+		qcom,sde-te2-off = <0x2000 0x2000 0x0 0x0 0x0>;
+		qcom,sde-cdm-off = <0x7a200>;
+		qcom,sde-cdm-size = <0x224>;
+
+		qcom,sde-dsc-off = <0x81000 0x81400>;
+		qcom,sde-dsc-size = <0x140>;
+
+		qcom,sde-intf-max-prefetch-lines = <0x15 0x15 0x15 0x15>;
+
+		qcom,sde-sspp-type =  "vig", "vig", "vig", "vig",
+						"dma", "dma", "dma", "dma",
+						"cursor", "cursor";
+
+		qcom,sde-sspp-off = <0x5000 0x7000 0x9000 0xb000
+						0x25000 0x27000 0x29000 0x2b000
+						0x35000 0x37000>;
+		qcom,sde-sspp-src-size = <0x1ac>;
+
+		qcom,sde-sspp-xin-id = <0 4 8 12 1 5 9 13 2 10>;
+
+		/* offsets are relative to "mdp_phys + qcom,sde-off */
+		qcom,sde-sspp-clk-ctrl = <0x2ac 0x8>, <0x2b4 0x8>,
+				  <0x2c4 0x8>, <0x2c4 0xc>, <0x3a8 0x10>,
+				  <0x3b0 0x10>;
+
+		qcom,sde-qseed-type = "qseedv3";
+		qcom,sde-mixer-linewidth = <2560>;
+		qcom,sde-sspp-linewidth = <2560>;
+		qcom,sde-mixer-blendstages = <0x7>;
+		qcom,sde-highest-bank-bit = <0x2>;
+		qcom,sde-panic-per-pipe;
+		qcom,sde-has-cdp;
+		qcom,sde-has-src-split;
+		qcom,sde-sspp-danger-lut = <0x000f 0xffff 0x0000>;
+		qcom,sde-sspp-safe-lut = <0xfffc 0xff00 0xffff>;
+
+		qcom,sde-vbif-off = <0>;
+		qcom,sde-vbif-id = <0>;
+		qcom,sde-vbif-default-ot-rd-limit = <32>;
+		qcom,sde-vbif-default-ot-wr-limit = <32>;
+		qcom,sde-vbif-dynamic-ot-rd-limit = <62208000 2>,
+			<124416000 4>, <248832000 16>;
+		qcom,sde-vbif-dynamic-ot-wr-limit = <62208000 2>,
+			<124416000 4>, <248832000 16>;
+
+		vdd-supply = <&gdsc_mdss>;
+		gdsc-mmagic-mdss-supply = <&gdsc_bimc_smmu>;
+		qcom,sde-csc-type = "csc-10bit";
+
+		qcom,sde-sspp-vig-blocks {
+			qcom,sde-vig-csc-off = <0x1a00>;
+			qcom,sde-vig-qseed-off = <0xa00>;
+			qcom,sde-vig-qseed-size = <0xa0>;
+		};
+
+		qcom,platform-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,platform-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "gdsc-mmagic-mdss";
+				qcom,supply-min-voltage = <0>;
+				qcom,supply-max-voltage = <0>;
+				qcom,supply-enable-load = <0>;
+				qcom,supply-disable-load = <0>;
+			};
+
+			qcom,platform-supply-entry@1 {
+				reg = <1>;
+				qcom,supply-name = "vdd";
+				qcom,supply-min-voltage = <0>;
+				qcom,supply-max-voltage = <0>;
+				qcom,supply-enable-load = <0>;
+				qcom,supply-disable-load = <0>;
+			};
+		};
+
+		smmu_kms_unsec: qcom,smmu_kms_unsec_cb {
+			compatible = "qcom,smmu_sde_unsec";
+			iommus = <&mmss_smmu 0>;
+		};
+
+		smmu_kms_sec: qcom,smmu_kms_sec_cb {
+			compatible = "qcom,smmu_sde_sec";
+			iommus = <&mmss_smmu 1>;
+		};
+
+		/* data and reg bus scale settings */
+		qcom,sde-data-bus {
+			qcom,msm-bus,name = "mdss_sde";
+			qcom,msm-bus,num-cases = <3>;
+			qcom,msm-bus,num-paths = <2>;
+			qcom,msm-bus,vectors-KBps =
+				<22 512 0 0>, <23 512 0 0>,
+				<22 512 0 6400000>, <23 512 0 6400000>,
+				<22 512 0 6400000>, <23 512 0 6400000>;
+		};
+		qcom,sde-reg-bus {
+			qcom,msm-bus,name = "mdss_reg";
+			qcom,msm-bus,num-cases = <4>;
+			qcom,msm-bus,num-paths = <1>;
+			qcom,msm-bus,active-only;
+			qcom,msm-bus,vectors-KBps =
+				<1 590 0 0>,
+				<1 590 0 76800>,
+				<1 590 0 160000>,
+				<1 590 0 320000>;
+		};
+	};
+
+	sde_hdmi_tx: qcom,hdmi_tx_8998@c9a0000 {
+		cell-index = <0>;
+		compatible = "qcom,hdmi-tx-8998";
+		reg =	<0xc9a0000 0x50c>,
+			<0x780000 0x621c>,
+			<0xc9e0000 0x28>;
+		reg-names = "core_physical", "qfprom_physical", "hdcp_physical";
+		interrupt-parent = <&sde_kms>;
+		interrupts = <8 0>;
+		interrupt-controller;
+		#interrupt-cells = <1>;
+		qcom,hdmi-tx-ddc-clk-gpio = <&tlmm 32 0>;
+		qcom,hdmi-tx-ddc-data-gpio = <&tlmm 33 0>;
+		qcom,hdmi-tx-hpd-gpio = <&tlmm 34 0>;
+		qcom,hdmi-tx-hpd5v-gpio = <&tlmm 133 0>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&mdss_hdmi_hpd_active
+			&mdss_hdmi_ddc_active
+			&mdss_hdmi_5v_active>;
+		pinctrl-1 = <&mdss_hdmi_hpd_suspend
+			&mdss_hdmi_ddc_suspend
+			&mdss_hdmi_5v_suspend>;
+		hpd-gdsc-supply = <&gdsc_mdss>;
+		qcom,supply-names = "hpd-gdsc";
+		qcom,min-voltage-level = <0>;
+		qcom,max-voltage-level = <0>;
+		qcom,enable-load = <0>;
+		qcom,disable-load = <0>;
+
+		clocks = <&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			 <&clock_mmss clk_mmss_mdss_ahb_clk>,
+			 <&clock_mmss clk_mmss_mdss_hdmi_clk>,
+			 <&clock_mmss clk_mmss_mdss_mdp_clk>,
+			 <&clock_mmss clk_mmss_mdss_hdmi_dp_ahb_clk>,
+			 <&clock_mmss clk_mmss_mdss_extpclk_clk>,
+			 <&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			 <&clock_mmss clk_mmss_misc_ahb_clk>,
+			 <&clock_mmss clk_mmss_mdss_axi_clk>;
+		clock-names = "hpd_mnoc_clk", "hpd_iface_clk",
+				"hpd_core_clk", "hpd_mdp_core_clk",
+				"hpd_alt_iface_clk", "core_extp_clk",
+				"mnoc_clk","hpd_misc_ahb_clk",
+				"hpd_bus_clk";
+
+		/*qcom,mdss-fb-map = <&mdss_fb2>;*/
+		qcom,pluggable;
+	};
+};
+#include "msm8998-sde-display.dtsi"
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm8998-smp2p.dtsi	2019-01-22 16:16:21.199225542 +0100
@@ -0,0 +1,267 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+&soc {
+	qcom,smp2p-modem@17911008 {
+		compatible = "qcom,smp2p";
+		reg = <0x17911008 0x4>;
+		qcom,remote-pid = <1>;
+		qcom,irq-bitmask = <0x4000>;
+		interrupts = <0 451 1>;
+	};
+
+	qcom,smp2p-adsp@17911008 {
+		compatible = "qcom,smp2p";
+		reg = <0x17911008 0x4>;
+		qcom,remote-pid = <2>;
+		qcom,irq-bitmask = <0x400>;
+		interrupts = <0 158 1>;
+	};
+
+	qcom,smp2p-dsps@17911008 {
+		compatible = "qcom,smp2p";
+		reg = <0x17911008 0x4>;
+		qcom,remote-pid = <3>;
+		qcom,irq-bitmask = <0x4000000>;
+		interrupts = <0 178 1>;
+	};
+
+	smp2pgpio_smp2p_15_in: qcom,smp2pgpio-smp2p-15-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <15>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_15_in {
+		compatible = "qcom,smp2pgpio_test_smp2p_15_in";
+		gpios = <&smp2pgpio_smp2p_15_in 0 0>;
+	};
+
+	smp2pgpio_smp2p_15_out: qcom,smp2pgpio-smp2p-15-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <15>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_15_out {
+		compatible = "qcom,smp2pgpio_test_smp2p_15_out";
+		gpios = <&smp2pgpio_smp2p_15_out 0 0>;
+	};
+
+	smp2pgpio_smp2p_1_in: qcom,smp2pgpio-smp2p-1-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <1>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_1_in {
+		compatible = "qcom,smp2pgpio_test_smp2p_1_in";
+		gpios = <&smp2pgpio_smp2p_1_in 0 0>;
+	};
+
+	smp2pgpio_smp2p_1_out: qcom,smp2pgpio-smp2p-1-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <1>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_1_out {
+		compatible = "qcom,smp2pgpio_test_smp2p_1_out";
+		gpios = <&smp2pgpio_smp2p_1_out 0 0>;
+	};
+
+	smp2pgpio_smp2p_2_in: qcom,smp2pgpio-smp2p-2-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <2>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_2_in {
+		compatible = "qcom,smp2pgpio_test_smp2p_2_in";
+		gpios = <&smp2pgpio_smp2p_2_in 0 0>;
+	};
+
+	smp2pgpio_smp2p_2_out: qcom,smp2pgpio-smp2p-2-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <2>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_2_out {
+		compatible = "qcom,smp2pgpio_test_smp2p_2_out";
+		gpios = <&smp2pgpio_smp2p_2_out 0 0>;
+	};
+
+	smp2pgpio_smp2p_3_in: qcom,smp2pgpio-smp2p-3-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <3>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_3_in {
+		compatible = "qcom,smp2pgpio_test_smp2p_3_in";
+		gpios = <&smp2pgpio_smp2p_3_in 0 0>;
+	};
+
+	smp2pgpio_smp2p_3_out: qcom,smp2pgpio-smp2p-3-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <3>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	/* ssr - inbound entry from mss */
+	smp2pgpio_ssr_smp2p_1_in: qcom,smp2pgpio-ssr-smp2p-1-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "slave-kernel";
+		qcom,remote-pid = <1>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	/* ssr - outbound entry to mss */
+	smp2pgpio_ssr_smp2p_1_out: qcom,smp2pgpio-ssr-smp2p-1-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "master-kernel";
+		qcom,remote-pid = <1>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	/* ssr - inbound entry from lpass */
+	smp2pgpio_ssr_smp2p_2_in: qcom,smp2pgpio-ssr-smp2p-2-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "slave-kernel";
+		qcom,remote-pid = <2>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	/* ssr - outbound entry to lpass */
+	smp2pgpio_ssr_smp2p_2_out: qcom,smp2pgpio-ssr-smp2p-2-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "master-kernel";
+		qcom,remote-pid = <2>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	/* ssr - inbound entry from ssc */
+	smp2pgpio_ssr_smp2p_3_in: qcom,smp2pgpio-ssr-smp2p-3-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "slave-kernel";
+		qcom,remote-pid = <3>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	/* ssr - outbound entry to ssc */
+	smp2pgpio_ssr_smp2p_3_out: qcom,smp2pgpio-ssr-smp2p-3-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "master-kernel";
+		qcom,remote-pid = <3>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_3_out {
+		compatible = "qcom,smp2pgpio_test_smp2p_3_out";
+		gpios = <&smp2pgpio_smp2p_3_out 0 0>;
+	};
+
+	smp2pgpio_sleepstate_3_out: qcom,smp2pgpio-sleepstate-gpio-3-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "sleepstate";
+		qcom,remote-pid = <3>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio-sleepstate-3-out {
+		compatible = "qcom,smp2pgpio_sleepstate_3_out";
+		gpios = <&smp2pgpio_sleepstate_3_out 0 0>;
+	};
+
+	/* ipa - outbound entry to mss */
+	smp2pgpio_ipa_1_out: qcom,smp2pgpio-ipa-1-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "ipa";
+		qcom,remote-pid = <1>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	/* ipa - inbound entry from mss */
+	smp2pgpio_ipa_1_in: qcom,smp2pgpio-ipa-1-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "ipa";
+		qcom,remote-pid = <1>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm8998-v2.1.dtsi	2019-01-22 16:16:21.199225542 +0100
@@ -0,0 +1,18 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8998-v2.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. MSM8998 v2.1";
+	qcom,msm-id = <292 0x20001>;
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm8998-v2-camera.dtsi	2019-01-22 16:16:21.199225542 +0100
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	qcom,csiphy@ca34000 {
+		cell-index = <0>;
+		compatible = "qcom,csiphy-v5.01", "qcom,csiphy";
+		reg = <0xca34000 0x1000>;
+		reg-names = "csiphy";
+		interrupts = <0 78 0>;
+		interrupt-names = "csiphy";
+		gdscr-supply = <&gdsc_camss_top>;
+		bimc_smmu-supply = <&gdsc_bimc_smmu>;
+		qcom,cam-vreg-name = "gdscr", "bimc_smmu";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_csi0_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi0_clk>,
+			<&clock_mmss clk_mmss_camss_cphy_csid0_clk>,
+			<&clock_mmss clk_csi0phytimer_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi0phytimer_clk>,
+			<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
+			<&clock_mmss clk_csiphy_clk_src>,
+			<&clock_mmss clk_mmss_camss_csiphy0_clk>;
+		clock-names = "mmssnoc_axi", "mnoc_ahb",
+			"bmic_smmu_ahb", "bmic_smmu_axi",
+			"camss_ahb_clk", "camss_top_ahb_clk",
+			"csi_src_clk", "csi_clk", "cphy_csid_clk",
+			"csiphy_timer_src_clk", "csiphy_timer_clk",
+			"camss_ispif_ahb_clk", "csiphy_clk_src", "csiphy_clk";
+		qcom,clock-rates = <0 0 0 0 0 0 274290000 0 0 200000000 0
+			0 274290000 0>;
+		status = "ok";
+	};
+
+	qcom,csiphy@ca35000 {
+		cell-index = <1>;
+		compatible = "qcom,csiphy-v5.01", "qcom,csiphy";
+		reg = <0xca35000 0x1000>;
+		reg-names = "csiphy";
+		interrupts = <0 79 0>;
+		interrupt-names = "csiphy";
+		gdscr-supply = <&gdsc_camss_top>;
+		bimc_smmu-supply = <&gdsc_bimc_smmu>;
+		qcom,cam-vreg-name = "gdscr", "bimc_smmu";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_csi1_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi1_clk>,
+			<&clock_mmss clk_mmss_camss_cphy_csid1_clk>,
+			<&clock_mmss clk_csi1phytimer_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi1phytimer_clk>,
+			<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
+			<&clock_mmss clk_csiphy_clk_src>,
+			<&clock_mmss clk_mmss_camss_csiphy1_clk>;
+		clock-names = "mmssnoc_axi", "mnoc_ahb",
+			"bmic_smmu_ahb", "bmic_smmu_axi",
+			"camss_ahb_clk", "camss_top_ahb_clk",
+			"csi_src_clk", "csi_clk", "cphy_csid_clk",
+			"csiphy_timer_src_clk", "csiphy_timer_clk",
+			"camss_ispif_ahb_clk", "csiphy_clk_src", "csiphy_clk";
+		qcom,clock-rates = <0 0 0 0 0 0 274290000 0 0 200000000 0
+			0 274290000 0>;
+		status = "ok";
+	};
+
+	qcom,csiphy@ca36000 {
+		cell-index = <2>;
+		compatible = "qcom,csiphy-v5.01", "qcom,csiphy";
+		reg = <0xca36000 0x1000>;
+		reg-names = "csiphy";
+		interrupts = <0 80 0>;
+		interrupt-names = "csiphy";
+		gdscr-supply = <&gdsc_camss_top>;
+		bimc_smmu-supply = <&gdsc_bimc_smmu>;
+		qcom,cam-vreg-name = "gdscr", "bimc_smmu";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_csi2_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi2_clk>,
+			<&clock_mmss clk_mmss_camss_cphy_csid2_clk>,
+			<&clock_mmss clk_csi2phytimer_clk_src>,
+			<&clock_mmss clk_mmss_camss_csi2phytimer_clk>,
+			<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
+			<&clock_mmss clk_csiphy_clk_src>,
+			<&clock_mmss clk_mmss_camss_csiphy2_clk>;
+		clock-names = "mmssnoc_axi", "mnoc_ahb",
+			"bmic_smmu_ahb", "bmic_smmu_axi",
+			"camss_ahb_clk", "camss_top_ahb_clk",
+			"csi_src_clk", "csi_clk", "cphy_csid_clk",
+			"csiphy_timer_src_clk", "csiphy_timer_clk",
+			"camss_ispif_ahb_clk", "csiphy_clk_src", "csiphy_clk";
+		qcom,clock-rates = <0 0 0 0 0 0 274290000 0 0 200000000 0
+			0 274290000 0>;
+		status = "ok";
+	};
+
+	qcom,cpp@ca04000 {
+		cell-index = <0>;
+		compatible = "qcom,cpp";
+		reg = <0xca04000 0x100>,
+			<0xca80000 0x3000>,
+			<0xca18000 0x3000>,
+			<0xc8c36d4 0x4>;
+		reg-names = "cpp", "cpp_vbif", "cpp_hw", "camss_cpp";
+		interrupts = <0 294 0>;
+		interrupt-names = "cpp";
+		smmu-vdd-supply = <&gdsc_bimc_smmu>;
+		camss-vdd-supply = <&gdsc_camss_top>;
+		vdd-supply = <&gdsc_cpp>;
+		qcom,vdd-names = "smmu-vdd", "camss-vdd", "vdd";
+		clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+			<&clock_mmss clk_cpp_clk_src>,
+			<&clock_mmss clk_mmss_camss_cpp_clk>,
+			<&clock_mmss clk_mmss_camss_cpp_ahb_clk>,
+			<&clock_mmss clk_mmss_camss_cpp_axi_clk>,
+			<&clock_mmss clk_mmss_camss_micro_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_camss_cpp_vbif_ahb_clk>;
+		clock-names = "mmssnoc_axi_clk",
+			"mnoc_ahb_clk",
+			"camss_ahb_clk", "camss_top_ahb_clk",
+			"cpp_src_clk",
+			"cpp_core_clk", "camss_cpp_ahb_clk",
+			"camss_cpp_axi_clk", "micro_iface_clk",
+			"mmss_smmu_axi_clk", "cpp_vbif_ahb_clk";
+		qcom,clock-rates = <0 0 0 0 200000000 200000000 0 0 0 0 0>;
+		qcom,min-clock-rate = <200000000>;
+		qcom,bus-master = <1>;
+		qcom,vbif-qos-setting = <0x20 0x10000000>,
+			<0x24 0x10000000>,
+			<0x28 0x10000000>,
+			<0x2C 0x10000000>;
+		status = "ok";
+		qcom,msm-bus,name = "msm_camera_cpp";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<106 512 0 0>,
+			<106 512 0 0>;
+		qcom,msm-bus-vector-dyn-vote;
+		resets = <&clock_mmss CAMSS_MICRO_BCR>;
+		reset-names = "micro_iface_reset";
+		qcom,src-clock-rates = <100000000 200000000 384000000 404000000
+			480000000 576000000 600000000>;
+		qcom,micro-reset;
+		qcom,cpp-fw-payload-info {
+			qcom,stripe-base = <790>;
+			qcom,plane-base = <715>;
+			qcom,stripe-size = <63>;
+			qcom,plane-size = <25>;
+			qcom,fe-ptr-off = <11>;
+			qcom,we-ptr-off = <23>;
+			qcom,ref-fe-ptr-off = <17>;
+			qcom,ref-we-ptr-off = <36>;
+			qcom,we-meta-ptr-off = <42>;
+			qcom,fe-mmu-pf-ptr-off = <7>;
+			qcom,ref-fe-mmu-pf-ptr-off = <10>;
+			qcom,we-mmu-pf-ptr-off = <13>;
+			qcom,dup-we-mmu-pf-ptr-off = <18>;
+			qcom,ref-we-mmu-pf-ptr-off = <23>;
+			qcom,set-group-buffer-len = <135>;
+			qcom,dup-frame-indicator-off = <70>;
+		};
+	};
+};
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm8998-v2.dtsi	2019-01-22 16:16:21.203225578 +0100
@@ -0,0 +1,1312 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * As a general rule, only version-specific property overrides should be placed
+ * inside this file. Common device definitions should be placed inside the
+ * msm8998.dtsi file.
+ */
+
+#include "msm8998.dtsi"
+#include "msm8998-v2-camera.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. MSM8998 v2";
+	qcom,msm-id = <292 0x20000>;
+};
+
+&clock_cpu {
+	compatible = "qcom,cpu-clock-osm-msm8998-v2";
+	reg = <0x179c0000 0x4000>,
+	      <0x17916000 0x1000>,
+	      <0x17816000 0x1000>,
+	      <0x179d1000 0x1000>,
+	      <0x17914800 0x800>,
+	      <0x17814800 0x800>,
+	      <0x00784130 0x8>,
+	      <0x1791101c 0x8>;
+	reg-names = "osm", "pwrcl_pll", "perfcl_pll",
+		    "apcs_common", "pwrcl_acd", "perfcl_acd",
+		    "perfcl_efuse", "debug";
+
+	qcom,acdtd-val = <0x00009611 0x00009611>;
+	qcom,acdcr-val = <0x002b5ffd 0x002b5ffd>;
+	qcom,acdsscr-val = <0x00000501 0x00000501>;
+	qcom,acdextint0-val = <0x2cf9ae8 0x2cf9ae8>;
+	qcom,acdextint1-val = <0x2cf9afe 0x2cf9afe>;
+	qcom,acdautoxfer-val = <0x00000015 0x00000015>;
+	qcom,pwrcl-apcs-mem-acc-threshold-voltage = <852000>;
+	qcom,perfcl-apcs-mem-acc-threshold-voltage = <852000>;
+	qcom,apm-threshold-voltage = <800000>;
+
+	/delete-property/ qcom,llm-sw-overr;
+	qcom,pwrcl-speedbin0-v0 =
+		<   300000000 0x0004000f 0x01200020 0x1 1 >,
+		<   364800000 0x05040013 0x01200020 0x1 2 >,
+		<   441600000 0x05040017 0x02200020 0x1 3 >,
+		<   518400000 0x0504001b 0x02200020 0x1 4 >,
+		<   595200000 0x0504001f 0x02200020 0x1 5 >,
+		<   672000000 0x05040023 0x03200020 0x1 6 >,
+		<   748800000 0x05040027 0x03200020 0x1 7 >,
+		<   825600000 0x0404002b 0x03220022 0x1 8 >,
+		<   883200000 0x0404002e 0x04250025 0x1 9 >,
+		<   960000000 0x04040032 0x04280028 0x1 10 >,
+		<  1036800000 0x04040036 0x042b002b 0x1 11 >,
+		<  1094400000 0x04040039 0x052e002e 0x2 12 >,
+		<  1171200000 0x0404003d 0x05310031 0x2 13 >,
+		<  1248000000 0x04040041 0x05340034 0x2 14 >,
+		<  1324800000 0x04040045 0x06370037 0x2 15 >,
+		<  1401600000 0x04040049 0x063a003a 0x2 16 >,
+		<  1478400000 0x0404004d 0x073e003e 0x2 17 >,
+		<  1555200000 0x04040051 0x07410041 0x2 18 >,
+		<  1670400000 0x04040057 0x08460046 0x2 19 >,
+		<  1747200000 0x0404005b 0x08490049 0x2 20 >,
+		<  1824000000 0x0404005f 0x084c004c 0x3 21 >,
+		<  1900800000 0x04040063 0x094f004f 0x3 22 >;
+
+	qcom,perfcl-speedbin0-v0 =
+		<   300000000 0x0004000f 0x01200020 0x1 1 >,
+		<   345600000 0x05040012 0x01200020 0x1 2 >,
+		<   422400000 0x05040016 0x02200020 0x1 3 >,
+		<   499200000 0x0504001a 0x02200020 0x1 4 >,
+		<   576000000 0x0504001e 0x02200020 0x1 5 >,
+		<   652800000 0x05040022 0x03200020 0x1 6 >,
+		<   729600000 0x05040026 0x03200020 0x1 7 >,
+		<   806400000 0x0504002a 0x03220022 0x1 8 >,
+		<   902400000 0x0404002f 0x04260026 0x1 9 >,
+		<   979200000 0x04040033 0x04290029 0x1 10 >,
+		<  1056000000 0x04040037 0x052c002c 0x1 11 >,
+		<  1132800000 0x0404003b 0x052f002f 0x1 12 >,
+		<  1190400000 0x0404003e 0x05320032 0x2 13 >,
+		<  1267200000 0x04040042 0x06350035 0x2 14 >,
+		<  1344000000 0x04040046 0x06380038 0x2 15 >,
+		<  1420800000 0x0404004a 0x063b003b 0x2 16 >,
+		<  1497600000 0x0404004e 0x073e003e 0x2 17 >,
+		<  1574400000 0x04040052 0x07420042 0x2 18 >,
+		<  1651200000 0x04040056 0x07450045 0x2 19 >,
+		<  1728000000 0x0404005a 0x08480048 0x2 20 >,
+		<  1804800000 0x0404005e 0x084b004b 0x2 21 >,
+		<  1881600000 0x04040062 0x094e004e 0x2 22 >,
+		<  1958400000 0x04040066 0x09520052 0x2 23 >,
+		<  2035200000 0x0404006a 0x09550055 0x3 24 >,
+		<  2112000000 0x0404006e 0x0a580058 0x3 25 >,
+		<  2208000000 0x04040073 0x0a5c005c 0x3 26 >,
+		<  2265600000 0x04010076 0x0a5e005e 0x3 26 >,
+		<  2265600000 0x04040076 0x0a5e005e 0x3 27 >,
+		<  2342400000 0x0401007a 0x0a620062 0x3 27 >,
+		<  2342400000 0x0404007a 0x0a620062 0x3 28 >,
+		<  2419200000 0x0401007e 0x0a650065 0x3 28 >,
+		<  2419200000 0x0404007e 0x0a650065 0x3 29 >,
+		<  2496000000 0x04010082 0x0a680068 0x3 29 >,
+		<  2457600000 0x04040080 0x0a660066 0x3 30 >,
+		<  2553600000 0x04010085 0x0a6a006a 0x3 30 >,
+		<  2476800000 0x04040081 0x0a670067 0x3 31 >,
+		<  2572800000 0x04010086 0x0a6b006b 0x3 31 >,
+		<  2496000000 0x04040082 0x0a680068 0x3 32 >,
+		<  2592000000 0x04010087 0x0a6c006c 0x3 32 >;
+
+	qcom,perfcl-speedbin1-v0 =
+		<   300000000 0x0004000f 0x01200020 0x1 1 >,
+		<   345600000 0x05040012 0x01200020 0x1 2 >,
+		<   422400000 0x05040016 0x02200020 0x1 3 >,
+		<   499200000 0x0504001a 0x02200020 0x1 4 >,
+		<   576000000 0x0504001e 0x02200020 0x1 5 >,
+		<   652800000 0x05040022 0x03200020 0x1 6 >,
+		<   729600000 0x05040026 0x03200020 0x1 7 >,
+		<   806400000 0x0504002a 0x03220022 0x1 8 >,
+		<   902400000 0x0404002f 0x04260026 0x1 9 >,
+		<   979200000 0x04040033 0x04290029 0x1 10 >,
+		<  1056000000 0x04040037 0x052c002c 0x1 11 >,
+		<  1132800000 0x0404003b 0x052f002f 0x1 12 >,
+		<  1190400000 0x0404003e 0x05320032 0x2 13 >,
+		<  1267200000 0x04040042 0x06350035 0x2 14 >,
+		<  1344000000 0x04040046 0x06380038 0x2 15 >,
+		<  1420800000 0x0404004a 0x063b003b 0x2 16 >,
+		<  1497600000 0x0404004e 0x073e003e 0x2 17 >,
+		<  1574400000 0x04040052 0x07420042 0x2 18 >,
+		<  1651200000 0x04040056 0x07450045 0x2 19 >,
+		<  1728000000 0x0404005a 0x08480048 0x2 20 >,
+		<  1804800000 0x0404005e 0x084b004b 0x2 21 >,
+		<  1881600000 0x04040062 0x094e004e 0x2 22 >,
+		<  1958400000 0x04040066 0x09520052 0x2 23 >,
+		<  2035200000 0x0404006a 0x09550055 0x3 24 >,
+		<  2112000000 0x0404006e 0x0a580058 0x3 25 >,
+		<  2208000000 0x04040073 0x0a5c005c 0x3 26 >,
+		<  2304000000 0x04010078 0x0a600060 0x3 26 >;
+
+	qcom,perfcl-speedbin2-v0 =
+		<   300000000 0x0004000f 0x01200020 0x1 1 >,
+		<   345600000 0x05040012 0x01200020 0x1 2 >,
+		<   422400000 0x05040016 0x02200020 0x1 3 >,
+		<   499200000 0x0504001a 0x02200020 0x1 4 >,
+		<   576000000 0x0504001e 0x02200020 0x1 5 >,
+		<   652800000 0x05040022 0x03200020 0x1 6 >,
+		<   729600000 0x05040026 0x03200020 0x1 7 >,
+		<   806400000 0x0504002a 0x03220022 0x1 8 >,
+		<   902400000 0x0404002f 0x04260026 0x1 9 >,
+		<   979200000 0x04040033 0x04290029 0x1 10 >,
+		<  1056000000 0x04040037 0x052c002c 0x1 11 >,
+		<  1132800000 0x0404003b 0x052f002f 0x1 12 >,
+		<  1190400000 0x0404003e 0x05320032 0x2 13 >,
+		<  1267200000 0x04040042 0x06350035 0x2 14 >,
+		<  1344000000 0x04040046 0x06380038 0x2 15 >,
+		<  1420800000 0x0404004a 0x063b003b 0x2 16 >,
+		<  1497600000 0x0404004e 0x073e003e 0x2 17 >,
+		<  1574400000 0x04040052 0x07420042 0x2 18 >,
+		<  1651200000 0x04040056 0x07450045 0x2 19 >,
+		<  1728000000 0x0404005a 0x08480048 0x2 20 >,
+		<  1804800000 0x0404005e 0x084b004b 0x2 21 >,
+		<  1881600000 0x04040062 0x094e004e 0x2 22 >,
+		<  1958400000 0x04040066 0x09520052 0x2 23 >,
+		<  2035200000 0x0404006a 0x09550055 0x3 24 >,
+		<  2112000000 0x0404006e 0x0a580058 0x3 25 >,
+		<  2208000000 0x04040073 0x0a5c005c 0x3 26 >,
+		<  2265600000 0x04010076 0x0a5e005e 0x3 26 >,
+		<  2265600000 0x04040076 0x0a5e005e 0x3 27 >,
+		<  2342400000 0x0401007a 0x0a620062 0x3 27 >,
+		<  2323200000 0x04040079 0x0a610061 0x3 28 >,
+		<  2419200000 0x0401007e 0x0a650065 0x3 28 >,
+		<  2342400000 0x0404007a 0x0a620062 0x3 29 >,
+		<  2438400000 0x0401007f 0x0a660066 0x3 29 >,
+		<  2361600000 0x0404007b 0x0a620062 0x3 30 >,
+		<  2457600000 0x04010080 0x0a660066 0x3 30 >;
+
+	qcom,perfcl-speedbin3-v0 =
+		<   300000000 0x0004000f 0x01200020 0x1 1 >,
+		<   345600000 0x05040012 0x01200020 0x1 2 >,
+		<   422400000 0x05040016 0x02200020 0x1 3 >,
+		<   499200000 0x0504001a 0x02200020 0x1 4 >,
+		<   576000000 0x0504001e 0x02200020 0x1 5 >,
+		<   652800000 0x05040022 0x03200020 0x1 6 >,
+		<   729600000 0x05040026 0x03200020 0x1 7 >,
+		<   806400000 0x0504002a 0x03220022 0x1 8 >,
+		<   902400000 0x0404002f 0x04260026 0x1 9 >,
+		<   979200000 0x04040033 0x04290029 0x1 10 >,
+		<  1056000000 0x04040037 0x052c002c 0x1 11 >,
+		<  1132800000 0x0404003b 0x052f002f 0x1 12 >,
+		<  1190400000 0x0404003e 0x05320032 0x2 13 >,
+		<  1267200000 0x04040042 0x06350035 0x2 14 >,
+		<  1344000000 0x04040046 0x06380038 0x2 15 >,
+		<  1420800000 0x0404004a 0x063b003b 0x2 16 >,
+		<  1497600000 0x0404004e 0x073e003e 0x2 17 >,
+		<  1574400000 0x04040052 0x07420042 0x2 18 >,
+		<  1651200000 0x04040056 0x07450045 0x2 19 >,
+		<  1728000000 0x0404005a 0x08480048 0x2 20 >,
+		<  1804800000 0x0404005e 0x084b004b 0x2 21 >,
+		<  1881600000 0x04040062 0x094e004e 0x2 22 >,
+		<  1958400000 0x04040066 0x09520052 0x2 23 >,
+		<  2035200000 0x0404006a 0x09550055 0x3 24 >,
+		<  2112000000 0x0404006e 0x0a580058 0x3 25 >,
+		<  2208000000 0x04040073 0x0a5c005c 0x3 26 >,
+		<  2265600000 0x04010076 0x0a5e005e 0x3 26 >,
+		<  2265600000 0x04040076 0x0a5e005e 0x3 27 >,
+		<  2342400000 0x0401007a 0x0a620062 0x3 27 >,
+		<  2323200000 0x04040079 0x0a610061 0x3 28 >,
+		<  2419200000 0x0401007e 0x0a650065 0x3 28 >,
+		<  2342400000 0x0404007a 0x0a620062 0x3 29 >,
+		<  2438400000 0x0401007f 0x0a660066 0x3 29 >,
+		<  2361600000 0x0404007b 0x0a620062 0x3 30 >,
+		<  2457600000 0x04010080 0x0a660066 0x3 30 >;
+};
+
+&msm_cpufreq {
+	qcom,cpufreq-table-0 =
+		<   300000 >,
+		<   364800 >,
+		<   441600 >,
+		<   518400 >,
+		<   595200 >,
+		<   672000 >,
+		<   748800 >,
+		<   825600 >,
+		<   883200 >,
+		<   960000 >,
+		<  1036800 >,
+		<  1094400 >,
+		<  1171200 >,
+		<  1248000 >,
+		<  1324800 >,
+		<  1401600 >,
+		<  1478400 >,
+		<  1555200 >,
+		<  1670400 >,
+		<  1747200 >,
+		<  1824000 >,
+		<  1900800 >;
+
+	qcom,cpufreq-table-4 =
+		<   300000 >,
+		<   345600 >,
+		<   422400 >,
+		<   499200 >,
+		<   576000 >,
+		<   652800 >,
+		<   729600 >,
+		<   806400 >,
+		<   902400 >,
+		<   979200 >,
+		<  1056000 >,
+		<  1132800 >,
+		<  1190400 >,
+		<  1267200 >,
+		<  1344000 >,
+		<  1420800 >,
+		<  1497600 >,
+		<  1574400 >,
+		<  1651200 >,
+		<  1728000 >,
+		<  1804800 >,
+		<  1881600 >,
+		<  1958400 >,
+		<  2035200 >,
+		<  2112000 >,
+		<  2208000 >,
+		<  2265600 >,
+		<  2304000 >,
+		<  2323200 >,
+		<  2342400 >,
+		<  2361600 >,
+		<  2419200 >,
+		<  2457600 >,
+		<  2476800 >,
+		<  2496000 >,
+		<  2592000 >;
+};
+
+&bwmon {
+	compatible = "qcom,bimc-bwmon4";
+	qcom,hw-timer-hz = <19200000>;
+};
+
+&devfreq_cpufreq {
+	mincpubw-cpufreq {
+		cpu-to-dev-map-0 =
+			< 1900800 1525 >;
+		cpu-to-dev-map-4 =
+			< 2112000 1525 >,
+			< 2342400 5195 >,
+			< 2496000 13763 >;
+		};
+};
+
+&devfreq_memlat_0 {
+	qcom,core-dev-table =
+		<  300000 1525 >,
+		<  595200 3143 >,
+		< 1324800 4173 >,
+		< 1555200 5859 >,
+		< 1747200 5859 >,
+		< 1900800 7759 >;
+};
+
+&devfreq_memlat_4 {
+	qcom,core-dev-table =
+		<  576000  3143 >,
+		< 1132800  4173 >,
+		< 1344000  5859 >,
+		< 1728000  7759 >,
+		< 1958400 11863 >,
+		< 2208000 13763 >;
+};
+&clock_gcc {
+	compatible = "qcom,gcc-8998-v2";
+};
+
+&clock_mmss {
+	compatible = "qcom,mmsscc-8998-v2";
+};
+
+&clock_gpu {
+	compatible = "qcom,gpucc-8998-v2";
+};
+
+&clock_gfx {
+	compatible = "qcom,gfxcc-8998-v2";
+	qcom,gfxfreq-speedbin0 =
+		<         0 0                           0 >,
+		< 180000000 1 RPM_SMD_REGULATOR_LEVEL_SVS >,
+		< 257000000 2 RPM_SMD_REGULATOR_LEVEL_SVS >,
+		< 342000000 3 RPM_SMD_REGULATOR_LEVEL_SVS >,
+		< 414000000 4 RPM_SMD_REGULATOR_LEVEL_SVS >,
+		< 515000000 5 RPM_SMD_REGULATOR_LEVEL_NOM >,
+		< 596000000 6 RPM_SMD_REGULATOR_LEVEL_NOM >,
+		< 670000000 7 RPM_SMD_REGULATOR_LEVEL_TURBO >,
+		< 710000000 8 RPM_SMD_REGULATOR_LEVEL_TURBO >;
+	qcom,gfxfreq-mx-speedbin0 =
+		<         0                           0 >,
+		< 180000000 RPM_SMD_REGULATOR_LEVEL_SVS >,
+		< 257000000 RPM_SMD_REGULATOR_LEVEL_SVS >,
+		< 342000000 RPM_SMD_REGULATOR_LEVEL_SVS >,
+		< 414000000 RPM_SMD_REGULATOR_LEVEL_SVS >,
+		< 515000000 RPM_SMD_REGULATOR_LEVEL_NOM >,
+		< 596000000 RPM_SMD_REGULATOR_LEVEL_NOM >,
+		< 670000000 RPM_SMD_REGULATOR_LEVEL_TURBO >,
+		< 710000000 RPM_SMD_REGULATOR_LEVEL_TURBO >;
+};
+
+&pm8998_s10 {
+	regulator-min-microvolt = <568000>;
+	regulator-max-microvolt = <1056000>;
+};
+
+&pm8998_s13 {
+	regulator-min-microvolt = <568000>;
+	regulator-max-microvolt = <1136000>;
+};
+
+&pcie0 {
+	qcom,phy-sequence = <0x804 0x01 0x00
+				0x034 0x14 0x00
+				0x138 0x30 0x00
+				0x048 0x0f 0x00
+				0x15c 0x06 0x00
+				0x090 0x01 0x00
+				0x088 0x20 0x00
+				0x0f0 0x00 0x00
+				0x0f8 0x01 0x00
+				0x0f4 0xc9 0x00
+				0x11c 0xff 0x00
+				0x120 0x3f 0x00
+				0x164 0x01 0x00
+				0x154 0x00 0x00
+				0x148 0x0a 0x00
+				0x05C 0x19 0x00
+				0x038 0x90 0x00
+				0x0b0 0x82 0x00
+				0x0c0 0x03 0x00
+				0x0bc 0x55 0x00
+				0x0b8 0x55 0x00
+				0x0a0 0x00 0x00
+				0x09c 0x0d 0x00
+				0x098 0x04 0x00
+				0x13c 0x00 0x00
+				0x060 0x08 0x00
+				0x068 0x16 0x00
+				0x070 0x34 0x00
+				0x15c 0x06 0x00
+				0x138 0x33 0x00
+				0x03c 0x02 0x00
+				0x040 0x07 0x00
+				0x080 0x04 0x00
+				0x0dc 0x00 0x00
+				0x0d8 0x3f 0x00
+				0x00c 0x09 0x00
+				0x010 0x01 0x00
+				0x01c 0x40 0x00
+				0x020 0x01 0x00
+				0x014 0x02 0x00
+				0x018 0x00 0x00
+				0x024 0x7e 0x00
+				0x028 0x15 0x00
+				0x244 0x02 0x00
+				0x2a4 0x12 0x00
+				0x260 0x10 0x00
+				0x28c 0x06 0x00
+				0x504 0x03 0x00
+				0x500 0x1c 0x00
+				0x50c 0x14 0x00
+				0x4d4 0x0a 0x00
+				0x4d8 0x04 0x00
+				0x4dc 0x1a 0x00
+				0x434 0x4b 0x00
+				0x414 0x04 0x00
+				0x40c 0x04 0x00
+				0x4f8 0x00 0x00
+				0x4fc 0x80 0x00
+				0x51c 0x40 0x00
+				0x444 0x71 0x00
+				0x43c 0x40 0x00
+				0x854 0x04 0x00
+				0x62c 0x52 0x00
+				0x9ac 0x00 0x00
+				0x8a0 0x01 0x00
+				0x9e0 0x00 0x00
+				0x9dc 0x20 0x00
+				0x9a8 0x00 0x00
+				0x8a4 0x01 0x00
+				0x8a8 0x73 0x00
+				0x9d8 0x99 0x00
+				0x9b0 0x03 0x00
+				0x804 0x03 0x00
+				0x800 0x00 0x00
+				0x808 0x03 0x00>;
+};
+
+&apc0_cpr {
+	compatible = "qcom,cprh-msm8998-v2-kbss-regulator";
+	qcom,cpr-corner-switch-delay-time = <1042>;
+	qcom,cpr-aging-ref-voltage = <1056000>;
+	qcom,apm-threshold-voltage = <800000>;
+	qcom,apm-hysteresis-voltage = <0>;
+	qcom,mem-acc-threshold-voltage = <852000>;
+	qcom,mem-acc-crossover-voltage = <852000>;
+};
+
+&apc0_pwrcl_vreg {
+	regulator-max-microvolt = <23>;
+
+	qcom,cpr-fuse-combos = <32>;
+	qcom,cpr-speed-bins = <4>;
+	qcom,cpr-speed-bin-corners = <22 22 22 22>;
+	qcom,cpr-corners = <22>;
+
+	qcom,cpr-corner-fmax-map = <8 11 18 22>;
+
+	qcom,cpr-voltage-ceiling =
+		<828000  828000  828000  828000  828000
+		 828000  828000  828000  828000  828000
+		 828000  900000  900000  900000  900000
+		 900000  900000  900000  952000  952000
+		1056000 1056000>;
+
+	qcom,cpr-voltage-floor =
+		<568000  568000  568000  568000  568000
+		 568000  568000  568000  568000  568000
+		 568000  632000  632000  632000  632000
+		 632000  632000  632000  712000  712000
+		 772000  772000>;
+
+	qcom,cpr-floor-to-ceiling-max-range =
+		<32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 32000  32000  40000  40000
+		 40000  40000>;
+
+	qcom,corner-frequencies =
+		<300000000  364800000  441600000
+		 518400000  595200000  672000000
+		 748800000  825600000  883200000
+		 960000000 1036800000 1094400000
+		1171200000 1248000000 1324800000
+		1401600000 1478400000 1555200000
+		1670400000 1747200000 1824000000
+		1900800000>;
+
+	qcom,cpr-ro-scaling-factor =
+		<2595 2794 2577 2762 2471 2674 2199
+		 2553 3189 3255 3192 2962 3054 2982
+		 2042 2945>,
+		<2595 2794 2577 2762 2471 2674 2199
+		 2553 3189 3255 3192 2962 3054 2982
+		 2042 2945>,
+		<2391 2550 2483 2638 2382 2564 2259
+		 2555 2766 3041 2988 2935 2873 2688
+		 2013 2784>,
+		<2066 2153 2300 2434 2220 2386 2288
+		 2465 2028 2511 2487 2734 2554 2117
+		 1892 2377>;
+
+	qcom,cpr-open-loop-voltage-fuse-adjustment =
+		/* Speed bin 0 */
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		/* Speed bin 1 */
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		/* Speed bin 2 */
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		/* Speed bin 3 */
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>,
+		<40000  24000   12000  30000>;
+
+	qcom,cpr-closed-loop-voltage-fuse-adjustment =
+		/* Speed bin 0 */
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		/* Speed bin 1 */
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		/* Speed bin 2 */
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		/* Speed bin 3 */
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>,
+		<20000  26000   12000  30000>;
+
+	qcom,cpr-open-loop-voltage-adjustment =
+		/* Speed bin 0 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0  (-12000) (-12000) (-12000) (-12000)
+		(-12000) (-16000) (-16000) (-20000) (-24000)
+		(-28000) (-28000)>,
+		/* Speed bin 1 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0  (-12000) (-12000) (-12000) (-12000)
+		(-12000) (-16000) (-16000) (-20000) (-24000)
+		(-28000) (-28000)>,
+		/* Speed bin 2 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0  (-12000) (-12000) (-12000) (-12000)
+		(-12000) (-16000) (-16000) (-20000) (-24000)
+		(-28000) (-28000)>,
+		/* Speed bin 3 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0  (-12000) (-12000) (-12000) (-12000)
+		(-12000) (-16000) (-16000) (-20000) (-24000)
+		(-28000) (-28000)>;
+
+	qcom,cpr-closed-loop-voltage-adjustment =
+		/* Speed bin 0 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0  (-10000) (-11000) (-12000) (-13000)
+		(-14000) (-14000) (-15000) (-21000) (-24000)
+		(-26000) (-28000)>,
+		/* Speed bin 1 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0  (-10000) (-11000) (-12000) (-13000)
+		(-14000) (-14000) (-15000) (-21000) (-24000)
+		(-26000) (-28000)>,
+		/* Speed bin 2 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0  (-10000) (-11000) (-12000) (-13000)
+		(-14000) (-14000) (-15000) (-21000) (-24000)
+		(-26000) (-28000)>,
+		/* Speed bin 3 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0  (-10000) (-11000) (-12000) (-13000)
+		(-14000) (-14000) (-15000) (-21000) (-24000)
+		(-26000) (-28000)>;
+
+	qcom,allow-voltage-interpolation;
+	qcom,allow-quotient-interpolation;
+	qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+	qcom,cpr-aging-ref-corner = <22>;
+	qcom,cpr-aging-ro-scaling-factor = <1620>;
+	qcom,allow-aging-voltage-adjustment = <0>;
+};
+
+&apc1_cpr {
+	compatible = "qcom,cprh-msm8998-v2-kbss-regulator";
+	qcom,cpr-corner-switch-delay-time = <1042>;
+	qcom,cpr-aging-ref-voltage = <1136000>;
+	qcom,apm-threshold-voltage = <800000>;
+	qcom,apm-hysteresis-voltage = <0>;
+	qcom,mem-acc-threshold-voltage = <852000>;
+	qcom,mem-acc-crossover-voltage = <852000>;
+};
+
+&apc1_perfcl_vreg {
+	regulator-max-microvolt = <34>;
+
+	qcom,cpr-fuse-combos = <32>;
+	qcom,cpr-speed-bins = <4>;
+	qcom,cpr-speed-bin-corners = <32 26 30 31>;
+	qcom,cpr-corners =
+		/* Speed bin 0 */
+		<32 32 32 32 32 32 32 32>,
+		/* Speed bin 1 */
+		<26 26 26 26 26 26 26 26>,
+		/* Speed bin 2 */
+		<30 30 30 30 30 30 30 30>,
+		/* Speed bin 3 */
+		<31 31 31 31 31 31 31 31>;
+
+	qcom,cpr-corner-fmax-map =
+		/* Speed bin 0 */
+		<8 12 20 32>,
+		/* Speed bin 1 */
+		<8 12 20 26>,
+		/* Speed bin 2 */
+		<8 12 20 30>,
+		/* Speed bin 3 */
+		<8 12 20 31>;
+
+	qcom,cpr-voltage-ceiling =
+		/* Speed bin 0 */
+		<828000  828000  828000  828000  828000
+		 828000  828000  828000  828000  828000
+		 828000  828000  900000  900000  900000
+		 900000  900000  900000  900000  900000
+		 952000  952000  952000 1136000 1136000
+		1136000 1136000 1136000 1136000 1136000
+		1136000 1136000>,
+		/* Speed bin 1 */
+		<828000  828000  828000  828000  828000
+		 828000  828000  828000  828000  828000
+		 828000  828000  900000  900000  900000
+		 900000  900000  900000  900000  900000
+		 952000  952000  952000 1136000 1136000
+		1136000>,
+		/* Speed bin 2 */
+		<828000  828000  828000  828000  828000
+		 828000  828000  828000  828000  828000
+		 828000  828000  900000  900000  900000
+		 900000  900000  900000  900000  900000
+		 952000  952000  952000 1136000 1136000
+		1136000 1136000 1136000 1136000 1136000>,
+		/* Speed bin 3 */
+		<828000  828000  828000  828000  828000
+		 828000  828000  828000  828000  828000
+		 828000  828000  900000  900000  900000
+		 900000  900000  900000  900000  900000
+		 952000  952000  952000 1136000 1136000
+		1136000 1136000 1136000 1136000 1136000
+		1136000>;
+
+	qcom,cpr-voltage-floor =
+		/* Speed bin 0 */
+		<568000  568000  568000  568000  568000
+		 568000  568000  568000  568000  568000
+		 568000  568000  632000  632000  632000
+		 632000  632000  632000  632000  632000
+		 712000  712000  712000  772000  772000
+		 772000  772000  772000  772000  772000
+		 772000  772000>,
+		/* Speed bin 1 */
+		<568000  568000  568000  568000  568000
+		 568000  568000  568000  568000  568000
+		 568000  568000  632000  632000  632000
+		 632000  632000  632000  632000  632000
+		 712000  712000  712000  772000  772000
+		 772000>,
+		/* Speed bin 2 */
+		<568000  568000  568000  568000  568000
+		 568000  568000  568000  568000  568000
+		 568000  568000  632000  632000  632000
+		 632000  632000  632000  632000  632000
+		 712000  712000  712000  772000  772000
+		 772000  772000  772000  772000  772000>,
+		/* Speed bin 3 */
+		<568000  568000  568000  568000  568000
+		 568000  568000  568000  568000  568000
+		 568000  568000  632000  632000  632000
+		 632000  632000  632000  632000  632000
+		 712000  712000  712000  772000  772000
+		 772000  772000  772000  772000  772000
+		 772000>;
+
+	qcom,cpr-floor-to-ceiling-max-range =
+		/* Speed bin 0 */
+		<32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 40000  40000  40000  40000
+		 40000  40000  40000  40000
+		 40000  40000  40000  40000>,
+		/* Speed bin 1 */
+		<32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 40000  40000  40000  40000
+		 40000  40000>,
+		/* Speed bin 2 */
+		<32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 40000  40000  40000  40000
+		 40000  40000  40000  40000
+		 40000  40000>,
+		/* Speed bin 3 */
+		<32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 32000  32000  32000  32000
+		 40000  40000  40000  40000
+		 40000  40000  40000  40000
+		 40000  40000  40000>;
+
+	qcom,corner-frequencies =
+		/* Speed bin 0 */
+		<300000000  345600000  422400000
+		 499200000  576000000  652800000
+		 729600000  806400000  902400000
+		 979200000 1056000000 1132800000
+		1190400000 1267200000 1344000000
+		1420800000 1497600000 1574400000
+		1651200000 1728000000 1804800000
+		1881600000 1958400000 2035200000
+		2112000000 2208000000 2265600000
+		2342400000 2419200000 2457600000
+		2476800000 2496000000>,
+		/* Speed bin 1 */
+		<300000000  345600000  422400000
+		 499200000  576000000  652800000
+		 729600000  806400000  902400000
+		 979200000 1056000000 1132800000
+		1190400000 1267200000 1344000000
+		1420800000 1497600000 1574400000
+		1651200000 1728000000 1804800000
+		1881600000 1958400000 2035200000
+		2112000000 2208000000>,
+		/* Speed bin 2 */
+		<300000000  345600000  422400000
+		 499200000  576000000  652800000
+		 729600000  806400000  902400000
+		 979200000 1056000000 1132800000
+		1190400000 1267200000 1344000000
+		1420800000 1497600000 1574400000
+		1651200000 1728000000 1804800000
+		1881600000 1958400000 2035200000
+		2112000000 2208000000 2265600000
+		2323200000 2342400000 2361600000>,
+		/* Speed bin 3 */
+		<300000000  345600000  422400000
+		 499200000  576000000  652800000
+		 729600000  806400000  902400000
+		 979200000 1056000000 1132800000
+		1190400000 1267200000 1344000000
+		1420800000 1497600000 1574400000
+		1651200000 1728000000 1804800000
+		1881600000 1958400000 2035200000
+		2112000000 2208000000 2265600000
+		2323200000 2342400000 2361600000
+		2457600000>;
+
+	qcom,cpr-ro-scaling-factor =
+		<2857 3057 2828 2952 2699 2798 2446
+		 2631 2629 2578 2244 3344 3289 3137
+		 3164 2655>,
+		<2857 3057 2828 2952 2699 2798 2446
+		 2631 2629 2578 2244 3344 3289 3137
+		 3164 2655>,
+		<2603 2755 2676 2777 2573 2685 2465
+		 2610 2312 2423 2243 3104 3022 3036
+		 2740 2303>,
+		<1901 2016 2096 2228 2034 2161 2077
+		 2188 1565 1870 1925 2235 2205 2413
+		 1762 1478>;
+
+	qcom,cpr-open-loop-voltage-fuse-adjustment =
+		/* Speed bin 0 */
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		/* Speed bin 1 */
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		/* Speed bin 2 */
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		/* Speed bin 3 */
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>,
+		<  8000        0   12000  52000>;
+
+	qcom,cpr-closed-loop-voltage-fuse-adjustment =
+		/* Speed bin 0 */
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		/* Speed bin 1 */
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		/* Speed bin 2 */
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		/* Speed bin 3 */
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>,
+		<      0        0   12000  50000>;
+
+	qcom,cpr-open-loop-voltage-adjustment =
+		/* Speed bin 0 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0   (-8000) (-12000) (-12000) (-12000)
+		(-12000) (-12000) (-12000) (-16000) (-16000)
+		(-20000) (-16000) (-16000) (-16000) (-12000)
+		(-28000) (-28000) (-28000) (-28000) (-28000)
+		(-28000) (-28000)>,
+		/* Speed bin 1 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0   (-8000) (-12000) (-12000) (-12000)
+		(-12000) (-12000) (-12000) (-16000) (-16000)
+		(-20000) (-16000) (-16000) (-16000) (-16000)
+		(-28000)>,
+		/* Speed bin 2 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0   (-8000) (-12000) (-12000) (-12000)
+		(-12000) (-12000) (-12000) (-16000) (-16000)
+		(-20000) (-16000) (-16000) (-16000) (-12000)
+		(-28000) (-28000) (-28000) (-28000) (-28000)>,
+		/* Speed bin 3 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0   (-8000) (-12000) (-12000) (-12000)
+		(-12000) (-12000) (-12000) (-16000) (-16000)
+		(-20000) (-16000) (-16000) (-16000) (-12000)
+		(-28000) (-28000) (-28000) (-28000) (-28000)
+		(-28000)>;
+
+	qcom,cpr-closed-loop-voltage-adjustment =
+		/* Speed bin 0 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0  (-10000) (-10000) (-11000) (-12000)
+		(-12000) (-13000) (-14000) (-14000) (-15000)
+		(-16000) (-16000) (-17000) (-15000) (-13000)
+		(-26000) (-26000) (-27000) (-27000) (-28000)
+		(-28000) (-28000)>,
+		/* Speed bin 1 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0  (-10000) (-10000) (-11000) (-12000)
+		(-12000) (-13000) (-14000) (-14000) (-15000)
+		(-16000) (-16000) (-17000) (-16000) (-15000)
+		(-28000)>,
+		/* Speed bin 2 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0  (-10000) (-10000) (-11000) (-12000)
+		(-12000) (-13000) (-14000) (-14000) (-15000)
+		(-16000) (-16000) (-17000) (-15000) (-14000)
+		(-27000) (-27000) (-28000) (-28000) (-28000)>,
+		/* Speed bin 3 */
+		<     0        0        0        0        0
+		      0        0        0        0        0
+		      0  (-10000) (-10000) (-11000) (-12000)
+		(-12000) (-13000) (-14000) (-14000) (-15000)
+		(-16000) (-16000) (-17000) (-15000) (-14000)
+		(-26000) (-27000) (-27000) (-28000) (-28000)
+		(-28000)>;
+
+	qcom,allow-voltage-interpolation;
+	qcom,allow-quotient-interpolation;
+	qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+	qcom,cpr-aging-ref-corner = <32 26 30 31>;
+	qcom,cpr-aging-ro-scaling-factor = <1700>;
+	qcom,allow-aging-voltage-adjustment = <0>;
+};
+
+&pm8005_s1 {
+	regulator-min-microvolt = <516000>;
+	regulator-max-microvolt = <1088000>;
+};
+
+&gfx_cpr {
+	compatible = "qcom,cpr4-msm8998-v2-mmss-regulator";
+	qcom,cpr-aging-ref-voltage = <1088000>;
+};
+
+&gfx_vreg {
+	regulator-min-microvolt = <1>;
+	regulator-max-microvolt = <8>;
+
+	qcom,cpr-fuse-corners = <4>;
+	qcom,cpr-fuse-combos = <8>;
+	qcom,cpr-corners = <8>;
+
+	qcom,cpr-corner-fmax-map = <1 3 5 8>;
+
+	qcom,cpr-voltage-ceiling =
+		<716000 716000 772000 880000 908000 948000 1016000 1088000>,
+		<724000 724000 772000 832000 916000 968000 1024000 1088000>,
+		<724000 724000 772000 832000 916000 968000 1024000 1088000>,
+		<724000 724000 772000 832000 916000 968000 1024000 1088000>,
+		<724000 724000 772000 832000 916000 968000 1024000 1088000>,
+		<724000 724000 772000 832000 916000 968000 1024000 1088000>,
+		<724000 724000 772000 832000 916000 968000 1024000 1088000>,
+		<724000 724000 772000 832000 916000 968000 1024000 1088000>;
+
+	qcom,cpr-voltage-floor =
+		<516000 516000 532000 584000 632000 672000 712000 756000>;
+
+	qcom,mem-acc-voltage = <1 1 1 2 2 2 2 2>;
+
+	qcom,corner-frequencies =
+		<180000000 257000000 342000000 414000000
+		 515000000 596000000 670000000 710000000>;
+
+	qcom,cpr-target-quotients =
+		<   0    0    0    0  331  357    0    0
+		    0    0    0    0    0    0  115    0>,
+		<   0    0    0    0  467  500    0    0
+		    0    0    0    0    0    0  199    0>,
+		<   0    0    0    0  628  665    0    0
+		    0    0    0    0    0    0  290    0>,
+		<   0    0    0    0  762  805    0    0
+		    0    0    0    0    0    0  397    0>,
+		<   0    0    0    0  964 1013    0    0
+		    0    0 1143    0 1138 1055    0    0>,
+		<   0    0    0    0    0    0    0    0
+		    0    0 1306    0 1289 1168    0    0>,
+		<   0    0    0    0    0    0    0    0
+		    0    0 1468    0 1429 1256    0    0>,
+		<   0    0    0    0    0    0    0    0
+		    0    0 1627    0 1578 1353    0    0>;
+
+	qcom,cpr-ro-scaling-factor =
+		<   0    0    0    0 2377 2571    0    0
+		    0    0 2168    0 2209 1849 1997    0>,
+		<   0    0    0    0 2377 2571    0    0
+		    0    0 2168    0 2209 1849 1997    0>,
+		<   0    0    0    0 2377 2571    0    0
+		    0    0 2168    0 2209 1849 1997    0>,
+		<   0    0    0    0 2377 2571    0    0
+		    0    0 2168    0 2209 1849 1997    0>,
+		<   0    0    0    0 2377 2571    0    0
+		    0    0 2168    0 2209 1849 1997    0>,
+		<   0    0    0    0 2377 2571    0    0
+		    0    0 2168    0 2209 1849 1997    0>,
+		<   0    0    0    0 2377 2571    0    0
+		    0    0 2168    0 2209 1849 1997    0>,
+		<   0    0    0    0 2377 2571    0    0
+		    0    0 2168    0 2209 1849 1997    0>;
+
+	qcom,cpr-open-loop-voltage-fuse-adjustment =
+		<   60000        0        0        0>,
+		<   60000        0        0        0>,
+		<   60000        0        0        0>,
+		<   60000        0        0        0>,
+		<   60000        0        0        0>,
+		<   60000        0        0        0>,
+		<   60000        0        0        0>,
+		<   60000        0        0        0>;
+
+	qcom,cpr-closed-loop-voltage-adjustment =
+		<   90000    38000    28000     8000
+			0    29000    11000        0>,
+		<   90000    38000    28000     8000
+			0    29000    11000        0>,
+		<   90000    38000    28000     8000
+			0    29000    11000        0>,
+		<   90000    38000    28000     8000
+			0    29000    11000        0>,
+		<   90000    38000    28000     8000
+			0    29000    11000        0>,
+		<   90000    38000    28000     8000
+			0    29000    11000        0>,
+		<   90000    38000    28000     8000
+			0    29000    11000        0>,
+		<   90000    38000    28000     8000
+			0    29000    11000        0>;
+
+	qcom,cpr-floor-to-ceiling-max-range =
+	       <40000 40000 40000 40000 40000 40000 50000 50000>;
+
+	qcom,cpr-fused-closed-loop-voltage-adjustment-map =
+		<0 0 0 0 1 2 3 4>;
+
+	qcom,allow-voltage-interpolation;
+	qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+	qcom,cpr-aging-max-voltage-adjustment = <15000>;
+	qcom,cpr-aging-ref-corner = <8>;
+	qcom,cpr-aging-ro-scaling-factor = <1620>;
+	qcom,allow-aging-voltage-adjustment = <0>;
+};
+
+&qusb_phy0 {
+	reg = <0x0c012000 0x2a8>,
+	      <0x01fcb24c 0x4>,
+	      <0x00784238 0x4>;
+	reg-names = "qusb_phy_base",
+			"tcsr_clamp_dig_n_1p8",
+			"efuse_addr";
+	qcom,efuse-bit-pos = <16>;
+	qcom,efuse-num-bits = <4>;
+	qcom,qusb-phy-init-seq =
+			/* <value reg_offset> */
+				<0x13 0x04 /* analog_controls_two */
+				0x7c 0x18c /* pll_clock_inverter */
+				0x80 0x2c /* pll_cmode */
+				0x0a 0x184 /* pll_lock_delay */
+				0xa5 0x23c /* tune1 */
+				0x09 0x240 /* tune2 */
+				0x19 0xb4>; /* digital_timers_two */
+};
+
+&msm_vidc {
+	qcom,load-freq-tbl =
+		/* Encoders */
+		<1105920 533000000 0x55555555>, /* 4kx2304@30 */ /*TURBO*/
+		<1036800 444000000 0x55555555>, /* 720p@240, 1080p@120,1440p@60,
+						 * UHD@30 */ /*NOMINAL*/
+		< 829440 355200000 0x55555555>, /* UHD/4096x2160@30 SVSL1 */
+		< 489600 269330000 0x55555555>, /* 1080p@60, 720p@120 SVS */
+		< 345600 200000000 0x55555555>, /* 2560x1440@24, 1080p@30 */
+						/* SVS2 */
+
+		/* Decoders */
+		<2211840 533000000 0xffffffff>, /* 4kx2304@60, 1080p@240 */
+						/* TURBO */
+		<1728000 444000000 0xffffffff>, /* 2560x1440@120 */
+						/* NOMINAL */
+		<1675472 355200000 0xffffffff>, /* 4kx2304@44 */ /*SVSL1*/
+		<1105920 269330000 0xffffffff>, /* UHD/4k2304@30, 1080p@120 */
+						/* SVS */
+		< 829440 200000000 0xffffffff>; /* 720p@120, 1080p@60 */
+						/* SVS2 */
+
+	qcom,imem-ab-tbl =
+		<200000000 1560000>,/* imem @ svs2 freq 75 Mhz */
+		<269330000 3570000>,/* imem @ svs freq 171 Mhz */
+		<355200000 3570000>,/* imem @ svs freq 171 Mhz */
+		<444000000 6750000>,/* imem @ nom freq 323 Mhz */
+		<533000000 8490000>;/* imem @ turbo freq 406 Mhz */
+
+	qcom,dcvs-tbl = /* minLoad LoadLow LoadHigh CodecCheck */
+		/* Decode */
+		/* Load > Nominal, Nominal <-> Turbo Eg.3840x2160@60 */
+		<1728000 1728000 2211840 0x3f00000c>,
+		/* Encoder */
+		/* Load > Nominal, Nominal <-> Turbo Eg. 4kx2304@30 */
+		<1036800 1036800 1105920 0x04000004>,
+		/* Load > SVSL1, SVSL1<-> Nominal Eg. 3840x2160@30 */
+		< 829440  829440 1036800 0x04000004>,
+		/* Load > SVS , SVS <-> SVSL1 Eg. 4kx2304@24 */
+		< 489600  489600  829440 0x04000004>;
+
+	qcom,dcvs-limit = /* Min Frame size, Min MBs/sec */
+		<32400 30>, /* Encoder 3840x2160@30 */
+		<32400 60>; /* Decoder 3840x2160@60 */
+
+};
+
+&soc {
+	/* Gold L2 SAW */
+	qcom,spm@178120000 {
+		qcom,saw2-avs-limit = <0x4700470>;
+	};
+
+	/* Silver L2 SAW */
+	qcom,spm@179120000 {
+		qcom,saw2-avs-limit = <0x4200420>;
+	};
+};
+
+/* GPU overrides */
+&msm_gpu {
+	/* Updated chip ID */
+	qcom,chipid = <0x05040001>;
+	qcom,initial-pwrlevel = <6>;
+
+	qcom,gpu-pwrlevels {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		compatible = "qcom,gpu-pwrlevels";
+
+		qcom,gpu-pwrlevel@0 {
+			reg = <0>;
+			qcom,gpu-freq = <710000000>;
+			qcom,bus-freq = <12>;
+			qcom,bus-min = <12>;
+			qcom,bus-max = <12>;
+		};
+
+		qcom,gpu-pwrlevel@1 {
+			reg = <1>;
+			qcom,gpu-freq = <670000000>;
+			qcom,bus-freq = <12>;
+			qcom,bus-min = <11>;
+			qcom,bus-max = <12>;
+		};
+
+		qcom,gpu-pwrlevel@2 {
+			reg = <2>;
+			qcom,gpu-freq = <596000000>;
+			qcom,bus-freq = <11>;
+			qcom,bus-min = <9>;
+			qcom,bus-max = <12>;
+		};
+
+		qcom,gpu-pwrlevel@3 {
+			reg = <3>;
+			qcom,gpu-freq = <515000000>;
+			qcom,bus-freq = <11>;
+			qcom,bus-min = <9>;
+			qcom,bus-max = <12>;
+		};
+
+		qcom,gpu-pwrlevel@4 {
+			reg = <4>;
+			qcom,gpu-freq = <414000000>;
+			qcom,bus-freq = <9>;
+			qcom,bus-min = <8>;
+			qcom,bus-max = <11>;
+		};
+
+		qcom,gpu-pwrlevel@5 {
+			reg = <5>;
+			qcom,gpu-freq = <342000000>;
+			qcom,bus-freq = <8>;
+			qcom,bus-min = <5>;
+			qcom,bus-max = <9>;
+		};
+
+		qcom,gpu-pwrlevel@6 {
+			reg = <6>;
+			qcom,gpu-freq = <257000000>;
+			qcom,bus-freq = <5>;
+			qcom,bus-min = <3>;
+			qcom,bus-max = <8>;
+		};
+
+		qcom,gpu-pwrlevel@7 {
+			reg = <7>;
+			qcom,gpu-freq = <27000000>;
+			qcom,bus-freq = <0>;
+			qcom,bus-min = <0>;
+			qcom,bus-max = <0>;
+		};
+	};
+};
+
+&spss_utils {
+	qcom,spss-test-firmware-name = "spss2t";	/* 8 chars max */
+	qcom,spss-prod-firmware-name = "spss2p";	/* 8 chars max */
+	qcom,spss-hybr-firmware-name = "spss2h";	/* 8 chars max */
+};
+
+&ufs1 {
+	clock-names =
+		"core_clk",
+		"bus_aggr_clk",
+		"iface_clk",
+		"core_clk_unipro",
+		"core_clk_ice",
+		"ref_clk",
+		"tx_lane0_sync_clk",
+		"rx_lane0_sync_clk",
+		"rx_lane1_sync_clk";
+	clocks =
+		<&clock_gcc clk_gcc_ufs_axi_hw_ctl_clk>,
+		<&clock_gcc clk_gcc_aggre1_ufs_axi_clk>,
+		<&clock_gcc clk_gcc_ufs_ahb_clk>,
+		<&clock_gcc clk_gcc_ufs_unipro_core_hw_ctl_clk>,
+		<&clock_gcc clk_gcc_ufs_ice_core_hw_ctl_clk>,
+		<&clock_gcc clk_ln_bb_clk1>,
+		<&clock_gcc clk_gcc_ufs_tx_symbol_0_clk>,
+		<&clock_gcc clk_gcc_ufs_rx_symbol_0_clk>,
+		<&clock_gcc clk_gcc_ufs_rx_symbol_1_clk>;
+	freq-table-hz =
+		<50000000 200000000>,
+		<0 0>,
+		<0 0>,
+		<37500000 150000000>,
+		<75000000 300000000>,
+		<0 0>,
+		<0 0>,
+		<0 0>,
+		<0 0>;
+
+	lanes-per-direction = <2>;
+};
+
+&ssc_sensors {
+	qcom,firmware-name = "slpi_v2";
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm8998-vidc.dtsi	2019-01-22 16:16:21.203225578 +0100
@@ -0,0 +1,251 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/msm/msm-bus-ids.h>
+#include <dt-bindings/clock/msm-clocks-8998.h>
+
+&soc {
+	msm_vidc: qcom,vidc@cc00000 {
+		compatible = "qcom,msm-vidc";
+		status = "ok";
+		reg = <0xcc00000 0x100000>;
+		interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
+		qcom,hfi = "venus";
+		qcom,hfi-version = "3xx";
+		qcom,firmware-name = "venus";
+		qcom,never-unload-fw;
+		qcom,sw-power-collapse;
+		qcom,max-secure-instances = <5>;
+		qcom,reg-presets =
+			<0x80124 0x00000003>,
+			<0x80550 0x01111111>,
+			<0x80560 0x01111111>,
+			<0x80568 0x01111111>,
+			<0x80570 0x01111111>,
+			<0x80580 0x01111111>,
+			<0x80588 0x01111111>,
+			<0xe2010 0x00000000>;
+
+		qcom,imem-size = <524288>; /* 512 kB */
+		qcom,max-hw-load = <2563200>; /* Full 4k @ 60 + 1080p @ 60 */
+		qcom,power-conf = <8294400>; /* WxH - 3840*2160 */
+		qcom,load-freq-tbl =
+			/* Encoders */
+			<972000 465000000 0x55555555>, /* 4k UHD @ 30 */
+			<489600 360000000 0x55555555>, /* 1080p @ 60 */
+			<244800 186000000 0x55555555>, /* 1080p @ 30 */
+			<108000 100000000 0x55555555>, /* 720p @ 30 */
+
+			/* Decoders */
+			<1944000 465000000 0xffffffff>, /* 4k UHD @ 60 */
+			< 972000 360000000 0xffffffff>, /* 4k UHD @ 30 */
+			< 489600 186000000 0xffffffff>, /* 1080p @ 60 */
+			< 244800 100000000 0xffffffff>; /* 1080p @ 30 */
+
+		qcom,dcvs-tbl =
+			<972000 972000 19944000 0x3f00000c>, /* UHD 30 */
+			<489600 489600   972000 0x3f00000c>, /* 1080p 60 */
+			<244800 244800   489600 0x3f00000c>, /* 1080p 30 */
+			<829440 489600   972000 0x04000004>; /* DCI 24 */
+
+		qcom,dcvs-limit =
+			<32400 30>, /* Encoder UHD */
+			<14400 30>; /* Decoder WQHD */
+
+		/* Table lists <video_core_freq imem_ab> pairs.
+		* imem_ab value determines the imem clock frequency for the
+		* corresponding video core frequency.
+		*/
+		qcom,imem-ab-tbl =
+			<100000000 1560000>, /* imem @ svs2 freq 75 Mhz */
+			<186000000 3570000>, /* imem @ svs freq 171 Mhz */
+			<360000000 6750000>, /* imem @ nom freq 323 Mhz */
+			<465000000 8490000>; /* imem @ turbo freq 406 Mhz */
+
+		/* Regulators */
+		smmu-vdd-supply = <&gdsc_bimc_smmu>;
+		venus-supply = <&gdsc_venus>;
+		venus-core0-supply = <&gdsc_venus_core0>;
+		venus-core1-supply = <&gdsc_venus_core1>;
+
+		/* Clocks */
+		clock-names = "sys_noc_axi_clk",
+			"noc_axi_clk", "mnoc_ahb_clk",
+			"smmu_ahb_clk", "smmu_axi_clk",
+			"mnoc_maxi_clk",
+			"core_clk", "iface_clk", "bus_clk",
+			"maxi_clk", "core0_clk", "core1_clk";
+		clocks = <&clock_gcc clk_gcc_mmss_sys_noc_axi_clk>,
+			<&clock_gcc clk_mmssnoc_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+			<&clock_mmss clk_mmss_mnoc_maxi_clk>,
+			<&clock_mmss clk_mmss_video_core_clk>,
+			<&clock_mmss clk_mmss_video_ahb_clk>,
+			<&clock_mmss clk_mmss_video_axi_clk>,
+			<&clock_mmss clk_mmss_video_maxi_clk>,
+			<&clock_mmss clk_mmss_video_subcore0_clk>,
+			<&clock_mmss clk_mmss_video_subcore1_clk>;
+		qcom,clock-configs = <0x0 0x0 0x0 0x0 0x0 0x0
+				0x3 0x0 0x2 0x2 0x3 0x3>;
+
+		/* Buses */
+		bus_cnoc {
+			compatible = "qcom,msm-vidc,bus";
+			label = "cnoc";
+			qcom,bus-master = <MSM_BUS_MASTER_AMPSS_M0>;
+			qcom,bus-slave = <MSM_BUS_SLAVE_VENUS_CFG>;
+			qcom,bus-governor = "performance";
+			qcom,bus-range-kbps = <1 1>;
+		};
+
+		venus_bus_ddr {
+			compatible = "qcom,msm-vidc,bus";
+			label = "venus-ddr";
+			qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
+			qcom,bus-slave = <MSM_BUS_SLAVE_EBI_CH0>;
+			qcom,bus-governor = "msm-vidc-ddr";
+			qcom,bus-range-kbps = <1000 4946000>;
+		};
+
+		venus_bus_vmem {
+			compatible = "qcom,msm-vidc,bus";
+			label = "venus-vmem";
+			qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0_OCMEM>;
+			qcom,bus-slave = <MSM_BUS_SLAVE_VMEM>;
+			qcom,bus-governor = "msm-vidc-vmem+";
+			qcom,bus-range-kbps = <1000 8490000>;
+		};
+
+		arm9_bus_ddr {
+			compatible = "qcom,msm-vidc,bus";
+			label = "venus-arm9-ddr";
+			qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
+			qcom,bus-slave = <MSM_BUS_SLAVE_EBI_CH0>;
+			qcom,bus-governor = "performance";
+			qcom,bus-range-kbps = <1 1>;
+		};
+
+
+		/* MMUs */
+		non_secure_cb {
+			compatible = "qcom,msm-vidc,context-bank";
+			label = "venus_ns";
+			iommus =
+				<&mmss_smmu 0x400>,
+				<&mmss_smmu 0x401>,
+				<&mmss_smmu 0x40a>,
+				<&mmss_smmu 0x407>,
+				<&mmss_smmu 0x40e>,
+				<&mmss_smmu 0x40f>,
+				<&mmss_smmu 0x408>,
+				<&mmss_smmu 0x409>,
+				<&mmss_smmu 0x40b>,
+				<&mmss_smmu 0x40c>,
+				<&mmss_smmu 0x40d>,
+				<&mmss_smmu 0x410>,
+				<&mmss_smmu 0x421>,
+				<&mmss_smmu 0x428>,
+				<&mmss_smmu 0x429>,
+				<&mmss_smmu 0x42b>,
+				<&mmss_smmu 0x42c>,
+				<&mmss_smmu 0x42d>,
+				<&mmss_smmu 0x411>,
+				<&mmss_smmu 0x431>;
+			buffer-types = <0xfff>;
+			virtual-addr-pool = <0x70800000 0x6f800000>;
+		};
+
+		firmware_cb {
+			compatible = "qcom,msm-vidc,context-bank";
+			qcom,fw-context-bank;
+			iommus = <&mmss_smmu 0x580>,
+				<&mmss_smmu 0x586>;
+		};
+		secure_bitstream_cb {
+			compatible = "qcom,msm-vidc,context-bank";
+			label = "venus_sec_bitstream";
+			iommus = <&mmss_smmu 0x500>,
+				<&mmss_smmu 0x502>,
+				<&mmss_smmu 0x509>,
+				<&mmss_smmu 0x50a>,
+				<&mmss_smmu 0x50b>,
+				<&mmss_smmu 0x50e>,
+				<&mmss_smmu 0x526>,
+				<&mmss_smmu 0x529>,
+				<&mmss_smmu 0x52b>;
+			buffer-types = <0x241>;
+			virtual-addr-pool = <0x4b000000 0x25800000>;
+			qcom,secure-context-bank;
+		};
+
+		venus_secure_pixel_cb: secure_pixel_cb {
+			compatible = "qcom,msm-vidc,context-bank";
+			label = "venus_sec_pixel";
+			iommus = <&mmss_smmu 0x504>,
+				<&mmss_smmu 0x50c>,
+				<&mmss_smmu 0x510>,
+				<&mmss_smmu 0x52c>;
+			buffer-types = <0x106>;
+			virtual-addr-pool = <0x25800000 0x25800000>;
+			qcom,secure-context-bank;
+		};
+
+		venus_secure_non_pixel_cb: secure_non_pixel_cb {
+			compatible = "qcom,msm-vidc,context-bank";
+			label = "venus_sec_non_pixel";
+			iommus = <&mmss_smmu 0x505>,
+				<&mmss_smmu 0x507>,
+				<&mmss_smmu 0x508>,
+				<&mmss_smmu 0x50d>,
+				<&mmss_smmu 0x50f>,
+				<&mmss_smmu 0x525>,
+				<&mmss_smmu 0x528>,
+				<&mmss_smmu 0x52d>,
+				<&mmss_smmu 0x540>;
+			buffer-types = <0x480>;
+			virtual-addr-pool = <0x1000000 0x24800000>;
+			qcom,secure-context-bank;
+		};
+	};
+
+	qcom,vmem@c880000 {
+		compatible = "qcom,msm-vmem";
+		status = "ok";
+		interrupts = <GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH>;
+
+		reg = <0xc880000 0x6b>,
+		    <0x14800000 0x80000>;
+		reg-names = "reg-base", "mem-base";
+
+		clocks = <&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_mnoc_maxi_clk>,
+			<&clock_mmss clk_mmss_vmem_ahb_clk>,
+			<&clock_mmss clk_mmss_vmem_maxi_clk>;
+		clock-names = "mnoc_ahb","mnoc_maxi",
+			"ahb", "maxi";
+		clock-config = <0x0 0x0 0x0 0x1>;
+
+		qcom,msm-bus,name = "vmem";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <2>;
+		qcom,msm-bus,vectors-KBps =
+		<MSM_BUS_MASTER_VIDEO_P0_OCMEM MSM_BUS_SLAVE_VMEM    0    0>,
+		<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_VMEM_CFG   0   0>,
+		<MSM_BUS_MASTER_VIDEO_P0_OCMEM MSM_BUS_SLAVE_VMEM 1000 1000>,
+		<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_VMEM_CFG 500 800>;
+
+		qcom,bank-size = <131072>; /* 128 kB */
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm8998-wcd.dtsi	2019-01-22 16:16:21.203225578 +0100
@@ -0,0 +1,239 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&slim_aud {
+	tasha_codec {
+		wsa_spkr_sd1: msm_cdc_pinctrll {
+		      compatible = "qcom,msm-cdc-pinctrl";
+		      pinctrl-names = "aud_active", "aud_sleep";
+		      pinctrl-0 = <&spkr_1_sd_n_active>;
+		      pinctrl-1 = <&spkr_1_sd_n_sleep>;
+		};
+
+		wsa_spkr_sd2: msm_cdc_pinctrlr {
+		      compatible = "qcom,msm-cdc-pinctrl";
+		      pinctrl-names = "aud_active", "aud_sleep";
+		      pinctrl-0 = <&spkr_2_sd_n_active>;
+		      pinctrl-1 = <&spkr_2_sd_n_sleep>;
+		};
+	};
+
+	tavil_codec {
+		wcd: wcd_pinctrl@5 {
+			compatible = "qcom,wcd-pinctrl";
+			qcom,num-gpios = <5>;
+			gpio-controller;
+			#gpio-cells = <2>;
+
+			us_euro_sw_wcd_active: us_euro_sw_wcd_active {
+				mux {
+					pins = "gpio1";
+				};
+
+				config {
+					pins = "gpio1";
+					output-high;
+				};
+			};
+
+			us_euro_sw_wcd_sleep: us_euro_sw_wcd_sleep {
+				mux {
+					pins = "gpio1";
+				};
+
+				config {
+					pins = "gpio1";
+					output-low;
+				};
+			};
+
+			spkr_1_wcd_en_active: spkr_1_wcd_en_active {
+				mux {
+					pins = "gpio2";
+				};
+
+				config {
+					pins = "gpio2";
+					output-high;
+				};
+			};
+
+			spkr_1_wcd_en_sleep: spkr_1_wcd_en_sleep {
+				mux {
+					pins = "gpio2";
+				};
+
+				config {
+					pins = "gpio2";
+					input-enable;
+				};
+			};
+
+			spkr_2_wcd_en_active: spkr_2_sd_n_active {
+				mux {
+					pins = "gpio3";
+				};
+
+				config {
+					pins = "gpio3";
+					output-high;
+				};
+			};
+
+			spkr_2_wcd_en_sleep: spkr_2_sd_n_sleep {
+				mux {
+					pins = "gpio3";
+				};
+
+				config {
+					pins = "gpio3";
+					input-enable;
+				};
+			};
+
+			hph_en0_wcd_active: hph_en0_wcd_active {
+				mux {
+					pins = "gpio4";
+				};
+
+				config {
+					pins = "gpio4";
+					output-high;
+				};
+			};
+
+			hph_en0_wcd_sleep: hph_en0_wcd_sleep {
+				mux {
+					pins = "gpio4";
+				};
+
+				config {
+					pins = "gpio4";
+					output-low;
+				};
+			};
+
+			hph_en1_wcd_active: hph_en1_wcd_active {
+				mux {
+					pins = "gpio5";
+				};
+
+				config {
+					pins = "gpio5";
+					output-high;
+				};
+			};
+
+			hph_en1_wcd_sleep: hph_en1_wcd_sleep {
+				mux {
+					pins = "gpio5";
+				};
+
+				config {
+					pins = "gpio5";
+					output-low;
+				};
+			};
+		};
+
+		wsa_spkr_wcd_sd1: msm_cdc_pinctrll {
+		      compatible = "qcom,msm-cdc-pinctrl";
+		      pinctrl-names = "aud_active", "aud_sleep";
+		      pinctrl-0 = <&spkr_1_wcd_en_active>;
+		      pinctrl-1 = <&spkr_1_wcd_en_sleep>;
+		};
+
+		wsa_spkr_wcd_sd2: msm_cdc_pinctrlr {
+		      compatible = "qcom,msm-cdc-pinctrl";
+		      pinctrl-names = "aud_active", "aud_sleep";
+		      pinctrl-0 = <&spkr_2_wcd_en_active>;
+		      pinctrl-1 = <&spkr_2_wcd_en_sleep>;
+		};
+
+		tavil_us_euro_sw: msm_cdc_pinctrl_us_euro_sw {
+		      compatible = "qcom,msm-cdc-pinctrl";
+		      pinctrl-names = "aud_active", "aud_sleep";
+		      pinctrl-0 = <&us_euro_sw_wcd_active>;
+		      pinctrl-1 = <&us_euro_sw_wcd_sleep>;
+		};
+
+		tavil_hph_en0: msm_cdc_pinctrl_hph_en0 {
+		      compatible = "qcom,msm-cdc-pinctrl";
+		      pinctrl-names = "aud_active", "aud_sleep";
+		      pinctrl-0 = <&hph_en0_wcd_active>;
+		      pinctrl-1 = <&hph_en0_wcd_sleep>;
+		};
+
+		tavil_hph_en1: msm_cdc_pinctrl_hph_en1 {
+		      compatible = "qcom,msm-cdc-pinctrl";
+		      pinctrl-names = "aud_active", "aud_sleep";
+		      pinctrl-0 = <&hph_en1_wcd_active>;
+		      pinctrl-1 = <&hph_en1_wcd_sleep>;
+		};
+	};
+};
+
+&tlmm {
+	spkr_1_sd_n {
+		spkr_1_sd_n_sleep: spkr_1_sd_n_sleep {
+			mux {
+				pins = "gpio65";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio65";
+				drive-strength = <2>;   /* 2 mA */
+				bias-pull-down;
+				input-enable;
+			};
+		};
+		spkr_1_sd_n_active: spkr_1_sd_n_active {
+			mux {
+				pins = "gpio65";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio65";
+				drive-strength = <16>;   /* 16 mA */
+				bias-disable;
+				output-high;
+			};
+		};
+	};
+
+	spkr_2_sd_n {
+		spkr_2_sd_n_sleep: spkr_2_sd_n_sleep {
+			mux {
+				pins = "gpio66";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio66";
+				drive-strength = <2>;   /* 2 mA */
+				bias-pull-down;
+				input-enable;
+			};
+		};
+		spkr_2_sd_n_active: spkr_2_sd_n_active {
+			mux {
+				pins = "gpio66";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio66";
+				drive-strength = <16>;   /* 16 mA */
+				bias-disable;
+				output-high;
+			};
+		};
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm8998-wsa881x.dtsi	2019-01-22 16:16:21.203225578 +0100
@@ -0,0 +1,78 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "msm8998-wcd.dtsi"
+
+&slim_aud {
+	tasha_codec {
+		swr_master {
+			compatible = "qcom,swr-wcd";
+			#address-cells = <2>;
+			#size-cells = <0>;
+
+			wsa881x_211: wsa881x@20170211 {
+				compatible = "qcom,wsa881x";
+				reg = <0x00 0x20170211>;
+				qcom,spkr-sd-n-node = <&wsa_spkr_sd1>;
+			};
+
+			wsa881x_212: wsa881x@20170212 {
+				compatible = "qcom,wsa881x";
+				reg = <0x00 0x20170212>;
+				qcom,spkr-sd-n-node = <&wsa_spkr_sd2>;
+			};
+
+			wsa881x_213: wsa881x@21170213 {
+				compatible = "qcom,wsa881x";
+				reg = <0x00 0x21170213>;
+				qcom,spkr-sd-n-node = <&wsa_spkr_sd1>;
+			};
+
+			wsa881x_214: wsa881x@21170214 {
+				compatible = "qcom,wsa881x";
+				reg = <0x00 0x21170214>;
+				qcom,spkr-sd-n-node = <&wsa_spkr_sd2>;
+			};
+		};
+	};
+
+	tavil_codec {
+		swr_master {
+			compatible = "qcom,swr-wcd";
+			#address-cells = <2>;
+			#size-cells = <0>;
+
+			wsa881x_0211: wsa881x@20170211 {
+				compatible = "qcom,wsa881x";
+				reg = <0x00 0x20170211>;
+				qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd1>;
+			};
+
+			wsa881x_0212: wsa881x@20170212 {
+				compatible = "qcom,wsa881x";
+				reg = <0x00 0x20170212>;
+				qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd2>;
+			};
+
+			wsa881x_0213: wsa881x@21170213 {
+				compatible = "qcom,wsa881x";
+				reg = <0x00 0x21170213>;
+				qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd1>;
+			};
+
+			wsa881x_0214: wsa881x@21170214 {
+				compatible = "qcom,wsa881x";
+				reg = <0x00 0x21170214>;
+				qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd2>;
+			};
+		};
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm-arm-smmu-8998.dtsi	2019-01-22 16:16:21.179225361 +0100
@@ -0,0 +1,192 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/clock/msm-clocks-8998.h>
+#include <dt-bindings/msm/msm-bus-ids.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+&soc {
+	anoc1_smmu: arm,smmu-anoc1@1680000 {
+		status = "ok";
+		compatible = "qcom,smmu-v2";
+		reg = <0x1680000 0x10000>;
+		#iommu-cells = <0>;
+		qcom,register-save;
+		qcom,skip-init;
+		#global-interrupts = <0>;
+		interrupts = <GIC_SPI 364 IRQ_TYPE_EDGE_RISING>,
+			   <GIC_SPI 365 IRQ_TYPE_EDGE_RISING>,
+			   <GIC_SPI 366 IRQ_TYPE_EDGE_RISING>,
+			   <GIC_SPI 367 IRQ_TYPE_EDGE_RISING>,
+			   <GIC_SPI 368 IRQ_TYPE_EDGE_RISING>,
+			   <GIC_SPI 369 IRQ_TYPE_EDGE_RISING>;
+		qcom,msm-bus,name = "smmu-bus-client-anoc1";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,active-only;
+		qcom,msm-bus,num-paths = <1>;
+		/* aggre1_noc_clk */
+		qcom,msm-bus,vectors-KBps =
+				<84 10062 0 0>,
+				<84 10062 0 1000>;
+	};
+
+	anoc2_smmu: arm,smmu-anoc2@16c0000 {
+		status = "ok";
+		compatible = "qcom,smmu-v2";
+		reg = <0x16c0000 0x40000>;
+		#iommu-cells = <1>;
+		qcom,register-save;
+		qcom,skip-init;
+		#global-interrupts = <0>;
+		interrupts = <GIC_SPI 373 IRQ_TYPE_EDGE_RISING>,
+			   <GIC_SPI 374 IRQ_TYPE_EDGE_RISING>,
+			   <GIC_SPI 375 IRQ_TYPE_EDGE_RISING>,
+			   <GIC_SPI 376 IRQ_TYPE_EDGE_RISING>,
+			   <GIC_SPI 377 IRQ_TYPE_EDGE_RISING>,
+			   <GIC_SPI 378 IRQ_TYPE_EDGE_RISING>,
+			   <GIC_SPI 462 IRQ_TYPE_EDGE_RISING>,
+			   <GIC_SPI 463 IRQ_TYPE_EDGE_RISING>,
+			   <GIC_SPI 464 IRQ_TYPE_EDGE_RISING>,
+			   <GIC_SPI 465 IRQ_TYPE_EDGE_RISING>;
+		qcom,msm-bus,name = "smmu-bus-client-anoc2";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,active-only;
+		qcom,msm-bus,num-paths = <1>;
+		/* aggre2_noc_clk */
+		qcom,msm-bus,vectors-KBps =
+				<117 10065 0 0>,
+				<117 10065 0 1000>;
+	};
+
+	lpass_q6_smmu: arm,smmu-lpass_q6@5100000 {
+		status = "ok";
+		compatible = "qcom,smmu-v2";
+		reg = <0x5100000 0x40000>;
+		#iommu-cells = <1>;
+		qcom,tz-device-id = "LPASS";
+		qcom,register-save;
+		qcom,skip-init;
+		#global-interrupts = <0>;
+		interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 393 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
+		vdd-supply = <&gdsc_hlos1_vote_lpass_adsp>;
+		clocks = <&clock_gcc clk_hlos1_vote_lpass_adsp_smmu_clk>;
+		clock-names = "lpass_q6_smmu_clk";
+		#clock-cells = <1>;
+	};
+
+	mmss_smmu: arm,smmu-mmss@cd00000 {
+		status = "ok";
+		compatible = "qcom,smmu-v2";
+		reg = <0xcd00000 0x40000>;
+		#iommu-cells = <1>;
+		qcom,register-save;
+		qcom,no-smr-check;
+		qcom,skip-init;
+		#global-interrupts = <0>;
+		interrupts = <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 244 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 250 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 251 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 252 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 253 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 255 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
+		vdd-supply = <&gdsc_bimc_smmu>;
+		clocks = <&clock_mmss clk_mmss_mnoc_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+			<&clock_mmss clk_mmss_bimc_smmu_axi_clk>;
+		clock-names = "mmss_mnoc_ahb_clk",
+			"mmss_bimc_smmu_ahb_clk",
+			"mmss_bimc_smmu_axi_clk";
+		#clock-cells = <1>;
+		qcom,msm-bus,name = "smmu-bus-client-mmss";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,active-only;
+		qcom,msm-bus,num-paths = <2>;
+		/* ahb_clk_src, mmssnoc_axi_clk */
+		qcom,msm-bus,vectors-KBps =
+				<102 722 0 0>, <29 512 0 0>,
+				<102 722 0 1000>, <29 512 0 1000>;
+	};
+
+	kgsl_smmu: arm,smmu-kgsl@5040000 {
+		status = "ok";
+		compatible = "qcom,smmu-v2";
+		reg = <0x5040000 0x10000>;
+		#iommu-cells = <1>;
+		qcom-tz-device-id = "GPU";
+		qcom,dynamic;
+		qcom,register-save;
+		qcom,skip-init;
+		#global-interrupts = <0>;
+		interrupts = <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+			   <GIC_SPI 331 IRQ_TYPE_EDGE_RISING>;
+		qcom,deferred-regulator-disable-delay = <80>;
+		vdd-supply = <&gdsc_gpu_cx>;
+		clocks = <&clock_gcc clk_gcc_gpu_cfg_ahb_clk>,
+			<&clock_gcc clk_gcc_bimc_gfx_clk>,
+			<&clock_gcc clk_gcc_gpu_bimc_gfx_clk>;
+
+		clock-names = "gcc_gpu_cfg_ahb_clk",
+			"gcc_bimc_gfx_clk",
+			"gcc_gpu_bimc_gfx_clk";
+		#clock-cells = <1>;
+	};
+
+	iommu_test_device {
+		compatible = "iommu-debug-test";
+		/*
+		 * 42 shouldn't be used by anyone on the mmss_smmu.  We just
+		 * need _something_ here to get this node recognized by the
+		 * SMMU driver. Our test uses ATOS, which doesn't use SIDs
+		 * anyways, so using a dummy value is ok.
+		 */
+		iommus = <&mmss_smmu 42>;
+	};
+
+	iommu_coherent_test_device {
+		compatible = "iommu-debug-test";
+		/*
+		 * 43 shouldn't be used by anyone on the mmss_smmu.  We just
+		 * need _something_ here to get this node recognized by the
+		 * SMMU driver. Our test uses ATOS, which doesn't use SIDs
+		 * anyways, so using a dummy value is ok.
+		 */
+		iommus = <&mmss_smmu 43>;
+		dma-coherent;
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm-arm-smmu-impl-defs-8998.dtsi	2019-01-22 16:16:21.179225361 +0100
@@ -0,0 +1,565 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+&kgsl_smmu {
+	attach-impl-defs = <0x6000 0x2378>,
+		<0x6060 0x1055>,
+		<0x6470 0x110011>,
+		<0x6478 0x0>,
+		<0x647c 0x1000100>,
+		<0x6480 0x81108110>,
+		<0x6484 0x81108110>,
+		<0x6488 0x3e003e0>,
+		<0x648c 0x3e003e0>,
+		<0x6490 0x80008010>,
+		<0x6494 0x8020>,
+		<0x649c 0x6>,
+		<0x6800 0x6>,
+		<0x6900 0x3ff>,
+		<0x6924 0x604>,
+		<0x6928 0x11000>,
+		<0x6930 0x800>,
+		<0x6960 0x3>,
+		<0x6b64 0x1a5551>,
+		<0x6b68 0x2aaa2f82>;
+};
+
+&lpass_q6_smmu {
+	attach-impl-defs = <0x6000 0x3270>,
+		<0x6060 0x1055>,
+		<0x6070 0xe0>,
+		<0x6074 0xe0>,
+		<0x6078 0xe0>,
+		<0x607c 0xe0>,
+		<0x60f0 0xc0>,
+		<0x60f4 0xc8>,
+		<0x60f8 0xd0>,
+		<0x60fc 0xd8>,
+		<0x6170 0x0>,
+		<0x6174 0x30>,
+		<0x6178 0x60>,
+		<0x617c 0x90>,
+		<0x6270 0x0>,
+		<0x6274 0x2>,
+		<0x6278 0x4>,
+		<0x627c 0x6>,
+		<0x62f0 0x8>,
+		<0x62f4 0xe>,
+		<0x62f8 0x14>,
+		<0x62fc 0x1a>,
+		<0x6370 0x20>,
+		<0x6374 0x40>,
+		<0x6378 0x60>,
+		<0x637c 0x80>,
+		<0x67a0 0x0>,
+		<0x67a4 0x0>,
+		<0x67a8 0x20>,
+		<0x67b0 0x0>,
+		<0x67b4 0x8>,
+		<0x67b8 0xc8>,
+		<0x67d0 0x4>,
+		<0x67dc 0x8>,
+		<0x67e0 0x8>,
+		<0x6800 0x6>,
+		<0x6900 0x3ff>,
+		<0x6924 0x202>,
+		<0x6928 0x10a00>,
+		<0x6930 0x500>,
+		<0x6b64 0x121151>,
+		<0x6b68 0x8a840080>,
+		<0x6c00 0x0>,
+		<0x6c04 0x0>,
+		<0x6c08 0x0>,
+		<0x6c0c 0x0>,
+		<0x6c10 0x1>,
+		<0x6c14 0x1>,
+		<0x6c18 0x1>,
+		<0x6c1c 0x1>,
+		<0x6c20 0x2>,
+		<0x6c24 0x2>,
+		<0x6c28 0x2>,
+		<0x6c2c 0x2>,
+		<0x6c30 0x3>,
+		<0x6c34 0x3>,
+		<0x6c38 0x3>,
+		<0x6c3c 0x3>;
+};
+
+&mmss_smmu {
+	attach-impl-defs = <0x6000 0x3270>,
+		<0x6060 0x1055>,
+		<0x6800 0x6>,
+		<0x6900 0x3ff>,
+		<0x6924 0x204>,
+		<0x6928 0x11002>,
+		<0x6930 0x800>,
+		<0x6960 0xffffffff>,
+		<0x6964 0xffffffff>,
+		<0x6968 0xffffffff>,
+		<0x696c 0xffffffff>,
+		<0x6b48 0x330330>,
+		<0x6b4c 0x81>,
+		<0x6b50 0x3333>,
+		<0x6b54 0x3333>,
+		<0x6b64 0x1a5555>,
+		<0x6b68 0x9aaa892a>,
+		<0x6b70 0x10100002>,
+		<0x6b74 0x10100002>,
+		<0x6b78 0x10100002>,
+		<0x6b80 0x20042004>,
+		<0x6b84 0x20042004>;
+};
+
+&anoc1_smmu {
+	attach-impl-defs = <0x6000 0x3270>,
+		<0x6060 0x1055>,
+		<0x6070 0x19>,
+		<0x6074 0x39>,
+		<0x6078 0x41>,
+		<0x607c 0x59>,
+		<0x6080 0x95>,
+		<0x6084 0x98>,
+		<0x6088 0xc0>,
+		<0x608c 0xc0>,
+		<0x60f0 0x0>,
+		<0x60f4 0x2>,
+		<0x60f8 0x5>,
+		<0x60fc 0x8>,
+		<0x6100 0x16>,
+		<0x6104 0x17>,
+		<0x6108 0x19>,
+		<0x610c 0x19>,
+		<0x6170 0x0>,
+		<0x6174 0x0>,
+		<0x6178 0x0>,
+		<0x617c 0x0>,
+		<0x6180 0x0>,
+		<0x6184 0x0>,
+		<0x6188 0x0>,
+		<0x618c 0x0>,
+		<0x6270 0x0>,
+		<0x6274 0xd>,
+		<0x6278 0xe>,
+		<0x627c 0x12>,
+		<0x6280 0x16>,
+		<0x6284 0x16>,
+		<0x6288 0x18>,
+		<0x628c 0x18>,
+		<0x62f0 0x18>,
+		<0x62f4 0x1b>,
+		<0x62f8 0x1c>,
+		<0x62fc 0x24>,
+		<0x6300 0x28>,
+		<0x6304 0x2b>,
+		<0x6308 0x31>,
+		<0x630c 0x31>,
+		<0x6370 0x31>,
+		<0x6374 0x34>,
+		<0x6378 0x35>,
+		<0x637c 0x47>,
+		<0x6380 0x4f>,
+		<0x6384 0x54>,
+		<0x6388 0x60>,
+		<0x638c 0x60>,
+		<0x67a0 0x0>,
+		<0x67a4 0xa7>,
+		<0x67a8 0xc0>,
+		<0x67b0 0x0>,
+		<0x67b4 0x18>,
+		<0x67b8 0x7c>,
+		<0x67d0 0x0>,
+		<0x67dc 0x4>,
+		<0x67e0 0x8>,
+		<0x6800 0x6>,
+		<0x6900 0x3ff>,
+		<0x6b64 0x121151>,
+		<0x6b68 0xbb804080>,
+		<0x6c00 0x0>,
+		<0x6c04 0x0>,
+		<0x6c08 0x0>,
+		<0x6c0c 0x0>,
+		<0x6c10 0x0>,
+		<0x6c14 0x0>,
+		<0x6c18 0x0>,
+		<0x6c1c 0x0>,
+		<0x6c20 0x0>,
+		<0x6c24 0x0>,
+		<0x6c28 0x0>,
+		<0x6c2c 0x0>,
+		<0x6c30 0x0>,
+		<0x6c34 0x0>,
+		<0x6c38 0x0>,
+		<0x6c3c 0x0>,
+		<0x6c40 0x0>,
+		<0x6c44 0x0>,
+		<0x6c48 0x0>,
+		<0x6c4c 0x0>,
+		<0x6c50 0x0>,
+		<0x6c54 0x0>,
+		<0x6c58 0x0>,
+		<0x6c5c 0x0>,
+		<0x6c60 0x0>,
+		<0x6c64 0x0>,
+		<0x6c68 0x0>,
+		<0x6c6c 0x0>,
+		<0x6c70 0x0>,
+		<0x6c74 0x0>,
+		<0x6c78 0x0>,
+		<0x6c7c 0x0>,
+		<0x6c80 0x0>,
+		<0x6c84 0x1>,
+		<0x6c88 0x0>,
+		<0x6c8c 0x0>,
+		<0x6c90 0x4>,
+		<0x6c94 0x3>,
+		<0x6c98 0x2>,
+		<0x6c9c 0x0>,
+		<0x6ca0 0x5>,
+		<0x6ca4 0x5>,
+		<0x6ca8 0x0>,
+		<0x6cac 0x0>,
+		<0x6cb0 0x0>,
+		<0x6cb4 0x0>,
+		<0x6cb8 0x0>,
+		<0x6cbc 0x0>,
+		<0x6cc0 0x0>,
+		<0x6cc4 0x0>,
+		<0x6cc8 0x0>,
+		<0x6ccc 0x0>,
+		<0x6cd0 0x0>,
+		<0x6cd4 0x0>,
+		<0x6cd8 0x0>,
+		<0x6cdc 0x0>,
+		<0x6ce0 0x0>,
+		<0x6ce4 0x0>,
+		<0x6ce8 0x0>,
+		<0x6cec 0x0>,
+		<0x6cf0 0x0>,
+		<0x6cf4 0x0>,
+		<0x6cf8 0x0>,
+		<0x6cfc 0x0>,
+		<0x6d00 0x0>,
+		<0x6d04 0x0>,
+		<0x6d08 0x0>,
+		<0x6d0c 0x0>,
+		<0x6d10 0x0>,
+		<0x6d14 0x0>,
+		<0x6d18 0x0>,
+		<0x6d1c 0x0>,
+		<0x6d20 0x0>,
+		<0x6d24 0x0>,
+		<0x6d28 0x0>,
+		<0x6d2c 0x0>,
+		<0x6d30 0x0>,
+		<0x6d34 0x0>,
+		<0x6d38 0x0>,
+		<0x6d3c 0x0>,
+		<0x6d40 0x0>,
+		<0x6d44 0x0>,
+		<0x6d48 0x0>,
+		<0x6d4c 0x0>,
+		<0x6d50 0x0>,
+		<0x6d54 0x0>,
+		<0x6d58 0x0>,
+		<0x6d5c 0x0>,
+		<0x6d60 0x0>,
+		<0x6d64 0x0>,
+		<0x6d68 0x0>,
+		<0x6d6c 0x0>,
+		<0x6d70 0x0>,
+		<0x6d74 0x0>,
+		<0x6d78 0x0>,
+		<0x6d7c 0x0>,
+		<0x6d80 0x0>,
+		<0x6d84 0x0>,
+		<0x6d88 0x0>,
+		<0x6d8c 0x0>,
+		<0x6d90 0x0>,
+		<0x6d94 0x0>,
+		<0x6d98 0x0>,
+		<0x6d9c 0x0>,
+		<0x6da0 0x0>,
+		<0x6da4 0x0>,
+		<0x6da8 0x0>,
+		<0x6dac 0x0>,
+		<0x6db0 0x0>,
+		<0x6db4 0x0>,
+		<0x6db8 0x0>,
+		<0x6dbc 0x0>,
+		<0x6dc0 0x0>,
+		<0x6dc4 0x0>,
+		<0x6dc8 0x0>,
+		<0x6dcc 0x0>,
+		<0x6dd0 0x0>,
+		<0x6dd4 0x0>,
+		<0x6dd8 0x0>,
+		<0x6ddc 0x0>,
+		<0x6de0 0x0>,
+		<0x6de4 0x0>,
+		<0x6de8 0x0>,
+		<0x6dec 0x0>,
+		<0x6df0 0x0>,
+		<0x6df4 0x0>,
+		<0x6df8 0x0>,
+		<0x6dfc 0x0>;
+};
+
+&anoc2_smmu {
+	attach-impl-defs = <0x6000 0x3270>,
+		<0x6060 0x1055>,
+		<0x6070 0x12>,
+		<0x6074 0x26>,
+		<0x6078 0x3a>,
+		<0x607c 0x3c>,
+		<0x6080 0x3f>,
+		<0x6084 0x67>,
+		<0x6088 0x6c>,
+		<0x608c 0x74>,
+		<0x6090 0x7c>,
+		<0x6094 0x80>,
+		<0x6098 0xa0>,
+		<0x609c 0xa0>,
+		<0x60a0 0xa0>,
+		<0x60a4 0xa0>,
+		<0x60a8 0xa0>,
+		<0x60ac 0xa0>,
+		<0x60f0 0x0>,
+		<0x60f4 0x1>,
+		<0x60f8 0x3>,
+		<0x60fc 0x4>,
+		<0x6100 0x5>,
+		<0x6104 0x7>,
+		<0x6108 0x8>,
+		<0x610c 0x10>,
+		<0x6110 0x10>,
+		<0x6114 0x10>,
+		<0x6118 0x12>,
+		<0x611c 0x12>,
+		<0x6120 0x12>,
+		<0x6124 0x12>,
+		<0x6128 0x12>,
+		<0x612c 0x12>,
+		<0x6170 0x0>,
+		<0x6174 0x0>,
+		<0x6178 0x0>,
+		<0x617c 0x0>,
+		<0x6180 0x0>,
+		<0x6184 0x0>,
+		<0x6188 0x0>,
+		<0x618c 0x0>,
+		<0x6190 0x0>,
+		<0x6194 0x0>,
+		<0x6198 0x0>,
+		<0x619c 0x0>,
+		<0x61a0 0x0>,
+		<0x61a4 0x0>,
+		<0x61a8 0x0>,
+		<0x61ac 0x0>,
+		<0x6270 0x0>,
+		<0x6274 0x1>,
+		<0x6278 0x2>,
+		<0x627c 0x4>,
+		<0x6280 0x4>,
+		<0x6284 0x6>,
+		<0x6288 0x6>,
+		<0x628c 0x18>,
+		<0x6290 0x1a>,
+		<0x6294 0x1a>,
+		<0x6298 0x1e>,
+		<0x629c 0x1e>,
+		<0x62a0 0x1e>,
+		<0x62a4 0x1e>,
+		<0x62a8 0x1e>,
+		<0x62ac 0x1e>,
+		<0x62f0 0x1e>,
+		<0x62f4 0x24>,
+		<0x62f8 0x2a>,
+		<0x62fc 0x2c>,
+		<0x6300 0x2d>,
+		<0x6304 0x33>,
+		<0x6308 0x34>,
+		<0x630c 0x3a>,
+		<0x6310 0x3c>,
+		<0x6314 0x44>,
+		<0x6318 0x48>,
+		<0x631c 0x48>,
+		<0x6320 0x48>,
+		<0x6324 0x48>,
+		<0x6328 0x48>,
+		<0x632c 0x48>,
+		<0x6370 0x48>,
+		<0x6374 0x4d>,
+		<0x6378 0x52>,
+		<0x637c 0x56>,
+		<0x6380 0x59>,
+		<0x6384 0x63>,
+		<0x6388 0x68>,
+		<0x638c 0x70>,
+		<0x6390 0x78>,
+		<0x6394 0x88>,
+		<0x6398 0x90>,
+		<0x639c 0x90>,
+		<0x63a0 0x90>,
+		<0x63a4 0x90>,
+		<0x63a8 0x90>,
+		<0x63ac 0x90>,
+		<0x67a0 0x0>,
+		<0x67a4 0x8e>,
+		<0x67a8 0xa0>,
+		<0x67b0 0x0>,
+		<0x67b4 0x1e>,
+		<0x67b8 0xc6>,
+		<0x67d0 0x0>,
+		<0x67dc 0x4>,
+		<0x67e0 0x8>,
+		<0x6800 0x6>,
+		<0x6900 0x3ff>,
+		<0x6b48 0x330331>,
+		<0x6b4c 0x81>,
+		<0x6b50 0x1313>,
+		<0x6b64 0x121155>,
+		<0x6b68 0xea880920>,
+		<0x6b70 0x10100101>,
+		<0x6b74 0xc0c0000>,
+		<0x6b78 0xc0c0000>,
+		<0x6b80 0x20012001>,
+		<0x6b84 0x20012001>,
+		<0x6c00 0x5>,
+		<0x6c04 0x0>,
+		<0x6c08 0x5>,
+		<0x6c0c 0x0>,
+		<0x6c10 0x5>,
+		<0x6c14 0x0>,
+		<0x6c18 0x5>,
+		<0x6c1c 0x0>,
+		<0x6c20 0x5>,
+		<0x6c24 0x0>,
+		<0x6c28 0x0>,
+		<0x6c2c 0x0>,
+		<0x6c30 0x0>,
+		<0x6c34 0x0>,
+		<0x6c38 0x0>,
+		<0x6c3c 0x0>,
+		<0x6c40 0x0>,
+		<0x6c44 0x0>,
+		<0x6c48 0x0>,
+		<0x6c4c 0x0>,
+		<0x6c50 0x0>,
+		<0x6c54 0x0>,
+		<0x6c58 0x0>,
+		<0x6c5c 0x0>,
+		<0x6c60 0x0>,
+		<0x6c64 0x0>,
+		<0x6c68 0x0>,
+		<0x6c6c 0x0>,
+		<0x6c70 0x0>,
+		<0x6c74 0x0>,
+		<0x6c78 0x0>,
+		<0x6c7c 0x0>,
+		<0x6c80 0x0>,
+		<0x6c84 0x0>,
+		<0x6c88 0x0>,
+		<0x6c8c 0x0>,
+		<0x6c90 0x0>,
+		<0x6c94 0x0>,
+		<0x6c98 0x0>,
+		<0x6c9c 0x0>,
+		<0x6ca0 0x0>,
+		<0x6ca4 0x0>,
+		<0x6ca8 0x0>,
+		<0x6cac 0x0>,
+		<0x6cb0 0x0>,
+		<0x6cb4 0x0>,
+		<0x6cb8 0x0>,
+		<0x6cbc 0x0>,
+		<0x6cc0 0x0>,
+		<0x6cc4 0x0>,
+		<0x6cc8 0x0>,
+		<0x6ccc 0x0>,
+		<0x6cd0 0x0>,
+		<0x6cd4 0x0>,
+		<0x6cd8 0x0>,
+		<0x6cdc 0x0>,
+		<0x6ce0 0x0>,
+		<0x6ce4 0x0>,
+		<0x6ce8 0x0>,
+		<0x6cec 0x0>,
+		<0x6cf0 0x0>,
+		<0x6cf4 0x0>,
+		<0x6cf8 0x0>,
+		<0x6cfc 0x0>,
+		<0x6d00 0x8>,
+		<0x6d04 0x0>,
+		<0x6d08 0x8>,
+		<0x6d0c 0x0>,
+		<0x6d10 0x7>,
+		<0x6d14 0x0>,
+		<0x6d18 0x3>,
+		<0x6d1c 0x2>,
+		<0x6d20 0x4>,
+		<0x6d24 0x0>,
+		<0x6d28 0x4>,
+		<0x6d2c 0x0>,
+		<0x6d30 0x6>,
+		<0x6d34 0x0>,
+		<0x6d38 0x9>,
+		<0x6d3c 0x0>,
+		<0x6d40 0x0>,
+		<0x6d44 0x1>,
+		<0x6d48 0x4>,
+		<0x6d4c 0x0>,
+		<0x6d50 0x4>,
+		<0x6d54 0x0>,
+		<0x6d58 0x0>,
+		<0x6d5c 0x0>,
+		<0x6d60 0x0>,
+		<0x6d64 0x0>,
+		<0x6d68 0x0>,
+		<0x6d6c 0x0>,
+		<0x6d70 0x0>,
+		<0x6d74 0x0>,
+		<0x6d78 0x0>,
+		<0x6d7c 0x0>,
+		<0x6d80 0x0>,
+		<0x6d84 0x0>,
+		<0x6d88 0x0>,
+		<0x6d8c 0x0>,
+		<0x6d90 0x0>,
+		<0x6d94 0x0>,
+		<0x6d98 0x0>,
+		<0x6d9c 0x0>,
+		<0x6da0 0x0>,
+		<0x6da4 0x0>,
+		<0x6da8 0x0>,
+		<0x6dac 0x0>,
+		<0x6db0 0x0>,
+		<0x6db4 0x0>,
+		<0x6db8 0x0>,
+		<0x6dbc 0x0>,
+		<0x6dc0 0x0>,
+		<0x6dc4 0x0>,
+		<0x6dc8 0x0>,
+		<0x6dcc 0x0>,
+		<0x6dd0 0x0>,
+		<0x6dd4 0x0>,
+		<0x6dd8 0x0>,
+		<0x6ddc 0x0>,
+		<0x6de0 0x0>,
+		<0x6de4 0x0>,
+		<0x6de8 0x0>,
+		<0x6dec 0x0>,
+		<0x6df0 0x0>,
+		<0x6df4 0x0>,
+		<0x6df8 0x0>,
+		<0x6dfc 0x0>;
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi	2019-10-29 09:26:22.905195956 +0100
@@ -0,0 +1,529 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+
+	pcm0: qcom,msm-pcm {
+		compatible = "qcom,msm-pcm-dsp";
+		qcom,msm-pcm-dsp-id = <0>;
+	};
+
+	routing: qcom,msm-pcm-routing {
+		compatible = "qcom,msm-pcm-routing";
+	};
+
+	compr: qcom,msm-compr-dsp {
+		compatible = "qcom,msm-compr-dsp";
+	};
+
+	pcm1: qcom,msm-pcm-low-latency {
+		compatible = "qcom,msm-pcm-dsp";
+		qcom,msm-pcm-dsp-id = <1>;
+		qcom,msm-pcm-low-latency;
+		qcom,latency-level = "regular";
+	};
+
+	pcm2: qcom,msm-ultra-low-latency {
+		compatible = "qcom,msm-pcm-dsp";
+		qcom,msm-pcm-dsp-id = <2>;
+		qcom,msm-pcm-low-latency;
+		qcom,latency-level = "ultra";
+	};
+
+	pcm_noirq: qcom,msm-pcm-dsp-noirq {
+		compatible = "qcom,msm-pcm-dsp-noirq";
+		qcom,msm-pcm-low-latency;
+		qcom,latency-level = "ultra";
+	};
+
+	compress: qcom,msm-compress-dsp {
+		compatible = "qcom,msm-compress-dsp";
+	};
+
+	voip: qcom,msm-voip-dsp {
+		compatible = "qcom,msm-voip-dsp";
+	};
+
+	voice: qcom,msm-pcm-voice {
+		compatible = "qcom,msm-pcm-voice";
+		qcom,destroy-cvd;
+	};
+
+	stub_codec: qcom,msm-stub-codec {
+		compatible = "qcom,msm-stub-codec";
+	};
+
+	qcom,msm-dai-fe {
+		compatible = "qcom,msm-dai-fe";
+	};
+
+	afe: qcom,msm-pcm-afe {
+		compatible = "qcom,msm-pcm-afe";
+	};
+
+	dai_hdmi: qcom,msm-dai-q6-hdmi {
+		compatible = "qcom,msm-dai-q6-hdmi";
+		qcom,msm-dai-q6-dev-id = <8>;
+	};
+
+	dai_dp: qcom,msm-dai-q6-dp {
+		compatible = "qcom,msm-dai-q6-hdmi";
+		qcom,msm-dai-q6-dev-id = <24608>;
+	};
+
+	loopback: qcom,msm-pcm-loopback {
+		compatible = "qcom,msm-pcm-loopback";
+	};
+
+	trans_loopback: qcom,msm-transcode-loopback {
+		compatible = "qcom,msm-transcode-loopback";
+	};
+
+	qcom,msm-dai-mi2s {
+		compatible = "qcom,msm-dai-mi2s";
+		dai_mi2s0: qcom,msm-dai-q6-mi2s-prim {
+			compatible = "qcom,msm-dai-q6-mi2s";
+			qcom,msm-dai-q6-mi2s-dev-id = <0>;
+			qcom,msm-mi2s-rx-lines = <3>;
+			qcom,msm-mi2s-tx-lines = <0>;
+		};
+
+		dai_mi2s1: qcom,msm-dai-q6-mi2s-sec {
+			compatible = "qcom,msm-dai-q6-mi2s";
+			qcom,msm-dai-q6-mi2s-dev-id = <1>;
+			qcom,msm-mi2s-rx-lines = <1>;
+			qcom,msm-mi2s-tx-lines = <0>;
+		};
+
+		dai_mi2s2: qcom,msm-dai-q6-mi2s-tert {
+			compatible = "qcom,msm-dai-q6-mi2s";
+			qcom,msm-dai-q6-mi2s-dev-id = <2>;
+			qcom,msm-mi2s-rx-lines = <0>;
+			qcom,msm-mi2s-tx-lines = <3>;
+		};
+
+		dai_mi2s3: qcom,msm-dai-q6-mi2s-quat {
+			compatible = "qcom,msm-dai-q6-mi2s";
+			qcom,msm-dai-q6-mi2s-dev-id = <3>;
+			qcom,msm-mi2s-rx-lines = <1>;
+			qcom,msm-mi2s-tx-lines = <2>;
+		};
+
+		dai_mi2s4: qcom,msm-dai-q6-mi2s-quin {
+			compatible = "qcom,msm-dai-q6-mi2s";
+			qcom,msm-dai-q6-mi2s-dev-id = <5>;
+			qcom,msm-mi2s-rx-lines = <1>;
+			qcom,msm-mi2s-tx-lines = <2>;
+		};
+
+		dai_mi2s5: qcom,msm-dai-q6-mi2s-senary {
+			compatible = "qcom,msm-dai-q6-mi2s";
+			qcom,msm-dai-q6-mi2s-dev-id = <6>;
+			qcom,msm-mi2s-rx-lines = <0>;
+			qcom,msm-mi2s-tx-lines = <3>;
+		};
+	};
+
+	lsm: qcom,msm-lsm-client {
+		compatible = "qcom,msm-lsm-client";
+	};
+
+	qcom,msm-dai-q6 {
+		compatible = "qcom,msm-dai-q6";
+		sb_0_rx: qcom,msm-dai-q6-sb-0-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16384>;
+		};
+
+		sb_0_tx: qcom,msm-dai-q6-sb-0-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16385>;
+		};
+
+		sb_1_rx: qcom,msm-dai-q6-sb-1-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16386>;
+		};
+
+		sb_1_tx: qcom,msm-dai-q6-sb-1-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16387>;
+		};
+
+		sb_2_rx: qcom,msm-dai-q6-sb-2-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16388>;
+		};
+
+		sb_2_tx: qcom,msm-dai-q6-sb-2-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16389>;
+		};
+
+
+		sb_3_rx: qcom,msm-dai-q6-sb-3-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16390>;
+		};
+
+		sb_3_tx: qcom,msm-dai-q6-sb-3-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16391>;
+		};
+
+		sb_4_rx: qcom,msm-dai-q6-sb-4-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16392>;
+		};
+
+		sb_4_tx: qcom,msm-dai-q6-sb-4-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16393>;
+		};
+
+		sb_5_tx: qcom,msm-dai-q6-sb-5-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16395>;
+		};
+
+		sb_5_rx: qcom,msm-dai-q6-sb-5-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16394>;
+		};
+
+		sb_6_rx: qcom,msm-dai-q6-sb-6-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16396>;
+		};
+
+		sb_7_rx: qcom,msm-dai-q6-sb-7-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16398>;
+		};
+
+		sb_7_tx: qcom,msm-dai-q6-sb-7-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16399>;
+		};
+
+		sb_8_rx: qcom,msm-dai-q6-sb-8-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16400>;
+		};
+
+		sb_8_tx: qcom,msm-dai-q6-sb-8-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16401>;
+		};
+
+		bt_sco_rx: qcom,msm-dai-q6-bt-sco-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <12288>;
+		};
+
+		bt_sco_tx: qcom,msm-dai-q6-bt-sco-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <12289>;
+		};
+
+		int_fm_rx: qcom,msm-dai-q6-int-fm-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <12292>;
+		};
+
+		int_fm_tx: qcom,msm-dai-q6-int-fm-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <12293>;
+		};
+
+		afe_pcm_rx: qcom,msm-dai-q6-be-afe-pcm-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <224>;
+		};
+
+		afe_pcm_tx: qcom,msm-dai-q6-be-afe-pcm-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <225>;
+		};
+
+		afe_proxy_rx: qcom,msm-dai-q6-afe-proxy-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <241>;
+		};
+
+		afe_proxy_tx: qcom,msm-dai-q6-afe-proxy-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <240>;
+		};
+
+		incall_record_rx: qcom,msm-dai-q6-incall-record-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <32771>;
+		};
+
+		incall_record_tx: qcom,msm-dai-q6-incall-record-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <32772>;
+		};
+
+		incall_music_rx: qcom,msm-dai-q6-incall-music-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <32773>;
+		};
+
+		incall_music_2_rx: qcom,msm-dai-q6-incall-music-2-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <32770>;
+		};
+
+		usb_audio_rx: qcom,msm-dai-q6-usb-audio-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <28672>;
+		};
+
+		usb_audio_tx: qcom,msm-dai-q6-usb-audio-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <28673>;
+		};
+	};
+
+	hostless: qcom,msm-pcm-hostless {
+		compatible = "qcom,msm-pcm-hostless";
+	};
+
+	dai_pri_auxpcm: qcom,msm-pri-auxpcm {
+		compatible = "qcom,msm-auxpcm-dev";
+		qcom,msm-cpudai-auxpcm-mode = <0>, <0>;
+		qcom,msm-cpudai-auxpcm-sync = <1>, <1>;
+		qcom,msm-cpudai-auxpcm-frame = <5>, <4>;
+		qcom,msm-cpudai-auxpcm-quant = <2>, <2>;
+		qcom,msm-cpudai-auxpcm-num-slots = <1>, <1>;
+		qcom,msm-cpudai-auxpcm-slot-mapping = <1>, <1>;
+		qcom,msm-cpudai-auxpcm-data = <0>, <0>;
+		qcom,msm-cpudai-auxpcm-pcm-clk-rate = <2048000>, <2048000>;
+		qcom,msm-auxpcm-interface = "primary";
+		qcom,msm-cpudai-afe-clk-ver = <2>;
+	};
+
+	dai_sec_auxpcm: qcom,msm-sec-auxpcm {
+		compatible = "qcom,msm-auxpcm-dev";
+		qcom,msm-cpudai-auxpcm-mode = <0>, <0>;
+		qcom,msm-cpudai-auxpcm-sync = <1>, <1>;
+		qcom,msm-cpudai-auxpcm-frame = <5>, <4>;
+		qcom,msm-cpudai-auxpcm-quant = <2>, <2>;
+		qcom,msm-cpudai-auxpcm-num-slots = <1>, <1>;
+		qcom,msm-cpudai-auxpcm-slot-mapping = <1>, <1>;
+		qcom,msm-cpudai-auxpcm-data = <0>, <0>;
+		qcom,msm-cpudai-auxpcm-pcm-clk-rate = <2048000>, <2048000>;
+		qcom,msm-auxpcm-interface = "secondary";
+		qcom,msm-cpudai-afe-clk-ver = <2>;
+	};
+
+	dai_tert_auxpcm: qcom,msm-tert-auxpcm {
+		compatible = "qcom,msm-auxpcm-dev";
+		qcom,msm-cpudai-auxpcm-mode = <0>, <0>;
+		qcom,msm-cpudai-auxpcm-sync = <1>, <1>;
+		qcom,msm-cpudai-auxpcm-frame = <5>, <4>;
+		qcom,msm-cpudai-auxpcm-quant = <2>, <2>;
+		qcom,msm-cpudai-auxpcm-num-slots = <1>, <1>;
+		qcom,msm-cpudai-auxpcm-slot-mapping = <1>, <1>;
+		qcom,msm-cpudai-auxpcm-data = <0>, <0>;
+		qcom,msm-cpudai-auxpcm-pcm-clk-rate = <2048000>, <2048000>;
+		qcom,msm-auxpcm-interface = "tertiary";
+		qcom,msm-cpudai-afe-clk-ver = <2>;
+	};
+
+	dai_quat_auxpcm: qcom,msm-quat-auxpcm {
+		compatible = "qcom,msm-auxpcm-dev";
+		qcom,msm-cpudai-auxpcm-mode = <0>, <0>;
+		qcom,msm-cpudai-auxpcm-sync = <1>, <1>;
+		qcom,msm-cpudai-auxpcm-frame = <5>, <4>;
+		qcom,msm-cpudai-auxpcm-quant = <2>, <2>;
+		qcom,msm-cpudai-auxpcm-num-slots = <1>, <1>;
+		qcom,msm-cpudai-auxpcm-slot-mapping = <1>, <1>;
+		qcom,msm-cpudai-auxpcm-data = <0>, <0>;
+		qcom,msm-cpudai-auxpcm-pcm-clk-rate = <2048000>, <2048000>;
+		qcom,msm-auxpcm-interface = "quaternary";
+		qcom,msm-cpudai-afe-clk-ver = <2>;
+	};
+
+	hdmi_dba: qcom,msm-hdmi-dba-codec-rx {
+		compatible = "qcom,msm-hdmi-dba-codec-rx";
+		qcom,dba-bridge-chip = "adv7533";
+	};
+
+	msm_audio_ion: qcom,msm-audio-ion {
+		compatible = "qcom,msm-audio-ion";
+		qcom,smmu-version = <1>;
+		qcom,smmu-enabled;
+		iommus = <&adsp_io 1>;
+	};
+
+	qcom,msm-adsp-loader {
+		status = "ok";
+		compatible = "qcom,adsp-loader";
+		qcom,adsp-state = <0>;
+	};
+
+	qcom,msm-dai-tdm-pri-rx {
+		compatible = "qcom,msm-dai-tdm";
+		qcom,msm-cpudai-tdm-group-id = <37120>;
+		qcom,msm-cpudai-tdm-group-num-ports = <1>;
+		qcom,msm-cpudai-tdm-group-port-id = <36864>;
+		qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <1>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <1>;
+		qcom,msm-cpudai-tdm-data-delay = <1>;
+		dai_pri_tdm_rx_0: qcom,msm-dai-q6-tdm-pri-rx-0 {
+			compatible = "qcom,msm-dai-q6-tdm";
+			qcom,msm-cpudai-tdm-dev-id = <36864>;
+			qcom,msm-cpudai-tdm-data-align = <0>;
+		};
+	};
+
+	qcom,msm-dai-tdm-pri-tx {
+		compatible = "qcom,msm-dai-tdm";
+		qcom,msm-cpudai-tdm-group-id = <37121>;
+		qcom,msm-cpudai-tdm-group-num-ports = <1>;
+		qcom,msm-cpudai-tdm-group-port-id = <36865>;
+		qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <1>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <1>;
+		qcom,msm-cpudai-tdm-data-delay = <1>;
+		dai_pri_tdm_tx_0: qcom,msm-dai-q6-tdm-pri-tx-0 {
+			compatible = "qcom,msm-dai-q6-tdm";
+			qcom,msm-cpudai-tdm-dev-id = <36865>;
+			qcom,msm-cpudai-tdm-data-align = <0>;
+		};
+	};
+
+	qcom,msm-dai-tdm-sec-rx {
+		compatible = "qcom,msm-dai-tdm";
+		qcom,msm-cpudai-tdm-group-id = <37136>;
+		qcom,msm-cpudai-tdm-group-num-ports = <1>;
+		qcom,msm-cpudai-tdm-group-port-id = <36880>;
+		qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <1>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <1>;
+		qcom,msm-cpudai-tdm-data-delay = <1>;
+		dai_sec_tdm_rx_0: qcom,msm-dai-q6-tdm-sec-rx-0 {
+			compatible = "qcom,msm-dai-q6-tdm";
+			qcom,msm-cpudai-tdm-dev-id = <36880>;
+			qcom,msm-cpudai-tdm-data-align = <0>;
+		};
+	};
+
+	qcom,msm-dai-tdm-sec-tx {
+		compatible = "qcom,msm-dai-tdm";
+		qcom,msm-cpudai-tdm-group-id = <37137>;
+		qcom,msm-cpudai-tdm-group-num-ports = <1>;
+		qcom,msm-cpudai-tdm-group-port-id = <36881>;
+		qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <1>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <1>;
+		qcom,msm-cpudai-tdm-data-delay = <1>;
+		dai_sec_tdm_tx_0: qcom,msm-dai-q6-tdm-sec-tx-0 {
+			compatible = "qcom,msm-dai-q6-tdm";
+			qcom,msm-cpudai-tdm-dev-id = <36881>;
+			qcom,msm-cpudai-tdm-data-align = <0>;
+		};
+	};
+
+	qcom,msm-dai-tdm-tert-rx {
+		compatible = "qcom,msm-dai-tdm";
+		qcom,msm-cpudai-tdm-group-id = <37152>;
+		qcom,msm-cpudai-tdm-group-num-ports = <1>;
+		qcom,msm-cpudai-tdm-group-port-id = <36896>;
+		qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <1>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <1>;
+		qcom,msm-cpudai-tdm-data-delay = <1>;
+		dai_tert_tdm_rx_0: qcom,msm-dai-q6-tdm-tert-rx-0 {
+			compatible = "qcom,msm-dai-q6-tdm";
+			qcom,msm-cpudai-tdm-dev-id = <36896>;
+			qcom,msm-cpudai-tdm-data-align = <0>;
+		};
+	};
+
+	qcom,msm-dai-tdm-tert-tx {
+		compatible = "qcom,msm-dai-tdm";
+		qcom,msm-cpudai-tdm-group-id = <37153>;
+		qcom,msm-cpudai-tdm-group-num-ports = <1>;
+		qcom,msm-cpudai-tdm-group-port-id = <36897 >;
+		qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <1>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <1>;
+		qcom,msm-cpudai-tdm-data-delay = <1>;
+		dai_tert_tdm_tx_0: qcom,msm-dai-q6-tdm-tert-tx-0 {
+			compatible = "qcom,msm-dai-q6-tdm";
+			qcom,msm-cpudai-tdm-dev-id = <36897 >;
+			qcom,msm-cpudai-tdm-data-align = <0>;
+		};
+	};
+
+	qcom,msm-dai-tdm-quat-rx {
+		compatible = "qcom,msm-dai-tdm";
+		qcom,msm-cpudai-tdm-group-id = <37168>;
+		qcom,msm-cpudai-tdm-group-num-ports = <1>;
+		qcom,msm-cpudai-tdm-group-port-id = <36912>;
+		qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <1>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <1>;
+		qcom,msm-cpudai-tdm-data-delay = <1>;
+		dai_quat_tdm_rx_0: qcom,msm-dai-q6-tdm-quat-rx-0 {
+			compatible = "qcom,msm-dai-q6-tdm";
+			qcom,msm-cpudai-tdm-dev-id = <36912>;
+			qcom,msm-cpudai-tdm-data-align = <0>;
+		};
+	};
+
+	qcom,msm-dai-tdm-quat-tx {
+		compatible = "qcom,msm-dai-tdm";
+		qcom,msm-cpudai-tdm-group-id = <37169>;
+		qcom,msm-cpudai-tdm-group-num-ports = <1>;
+		qcom,msm-cpudai-tdm-group-port-id = <36913 >;
+		qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <1>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <1>;
+		qcom,msm-cpudai-tdm-data-delay = <1>;
+		dai_quat_tdm_tx_0: qcom,msm-dai-q6-tdm-quat-tx-0 {
+			compatible = "qcom,msm-dai-q6-tdm";
+			qcom,msm-cpudai-tdm-dev-id = <36913 >;
+			qcom,msm-cpudai-tdm-data-align = <0>;
+		};
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm-gdsc-8998.dtsi	2019-01-22 16:16:21.179225361 +0100
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	/* GCC GDSCs */
+	gdsc_usb30: qcom,gdsc@10f004 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_usb30";
+		reg = <0x10f004 0x4>;
+		status = "disabled";
+	};
+
+	gdsc_pcie_0: qcom,gdsc@16b004 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_pcie_0";
+		reg = <0x16b004 0x4>;
+		status = "disabled";
+	};
+
+	gdsc_ufs: qcom,gdsc@175004 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_ufs";
+		reg = <0x175004 0x4>;
+		status = "disabled";
+	};
+
+	gdsc_hlos1_vote_lpass_adsp: qcom,gdsc@17d034 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_hlos1_vote_lpass_adsp";
+		reg = <0x17d034 0x4>;
+		qcom,no-status-check-on-disable;
+		qcom,gds-timeout = <500>;
+		status = "disabled";
+	};
+
+	gdsc_hlos1_vote_lpass_core: qcom,gdsc@17d038 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_hlos1_vote_lpass_core";
+		reg = <0x17d038 0x4>;
+		qcom,no-status-check-on-disable;
+		qcom,gds-timeout = <500>;
+		status = "disabled";
+	};
+
+	/* MMSS GDSCs */
+	gdsc_bimc_smmu: qcom,gdsc@c8ce020 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_bimc_smmu";
+		reg = <0xc8ce020 0x4>,
+		      <0xc8ce024 0x4>;
+		reg-names = "base", "hw_ctrl_addr";
+		qcom,no-status-check-on-disable;
+		qcom,gds-timeout = <500>;
+		status = "disabled";
+	};
+
+	gdsc_venus: qcom,gdsc@c8c1024 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_venus";
+		reg = <0xc8c1024 0x4>;
+		status = "disabled";
+	};
+
+	gdsc_venus_core0: qcom,gdsc@c8c1040 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_venus_core0";
+		reg = <0xc8c1040 0x4>;
+		status = "disabled";
+	};
+
+	gdsc_venus_core1: qcom,gdsc@c8c1044 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_venus_core1";
+		reg = <0xc8c1044 0x4>;
+		status = "disabled";
+	};
+
+	gdsc_camss_top: qcom,gdsc@c8c34a0 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_camss_top";
+		reg = <0xc8c34a0 0x4>;
+		status = "disabled";
+	};
+
+	gdsc_vfe0: qcom,gdsc@c8c3664 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_vfe0";
+		reg = <0xc8c3664 0x4>;
+		status = "disabled";
+	};
+
+	gdsc_vfe1: qcom,gdsc@c8c3674 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_vfe1";
+		reg = <0xc8c3674 0x4>;
+		status = "disabled";
+	};
+
+	gdsc_cpp: qcom,gdsc@c8c36d4 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_cpp";
+		reg = <0xc8c36d4 0x4>;
+		status = "disabled";
+	};
+
+	gdsc_mdss: qcom,gdsc@c8c2304 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_mdss";
+		reg = <0xc8c2304 0x4>;
+		status = "disabled";
+	};
+
+	/* GPU GDSCs */
+	gdsc_gpu_cx: qcom,gdsc@5066004 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_gpu_cx";
+		reg = <0x5066004 0x4>,
+		      <0x5066008 0x4>;
+		reg-names = "base", "hw_ctrl_addr";
+		qcom,no-status-check-on-disable;
+		qcom,gds-timeout = <500>;
+		status = "disabled";
+	};
+
+	gdsc_gpu_gx: qcom,gdsc@5066094 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_gpu_gx";
+		reg = <0x5066094 0x4>,
+		      <0x5065130 0x4>,
+		      <0x5066090 0x4>;
+		reg-names = "base", "domain_addr", "sw_reset";
+		qcom,retain-periph;
+		qcom,reset-aon-logic;
+		status = "disabled";
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm-pm8005.dtsi	2019-01-22 16:16:21.179225361 +0100
@@ -0,0 +1,103 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/spmi/spmi.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+&spmi_bus {
+	qcom,pm8005@4 {
+		compatible = "qcom,spmi-pmic";
+		reg = <0x4 SPMI_USID>;
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		pm8005_revid: qcom,revid@100 {
+			compatible = "qcom,qpnp-revid";
+			reg = <0x100 0x100>;
+		};
+
+		qcom,temp-alarm@2400 {
+			compatible = "qcom,qpnp-temp-alarm";
+			reg = <0x2400 0x100>;
+			interrupts = <0x4 0x24 0x0 IRQ_TYPE_EDGE_RISING>;
+			label = "pm8005_tz";
+		};
+
+		pm8005_gpios: gpios {
+			compatible = "qcom,qpnp-pin";
+			gpio-controller;
+			#gpio-cells = <2>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			label = "pm8005-gpio";
+
+			gpio@c000 {
+				reg = <0xc000 0x100>;
+				qcom,pin-num = <1>;
+				status = "disabled";
+			};
+
+			gpio@c100 {
+				reg = <0xc100 0x100>;
+				qcom,pin-num = <2>;
+				status = "disabled";
+			};
+
+			gpio@c200 {
+				reg = <0xc200 0x100>;
+				qcom,pin-num = <3>;
+				status = "disabled";
+			};
+
+			gpio@c300 {
+				reg = <0xc300 0x100>;
+				qcom,pin-num = <4>;
+				status = "disabled";
+			};
+		};
+	};
+
+	qcom,pm8005@5 {
+		compatible ="qcom,spmi-pmic";
+		reg = <0x5 SPMI_USID>;
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		regulator@1400 {
+			compatible = "qcom,qpnp-regulator";
+			reg = <0x1400 0x100>;
+			regulator-name = "pm8005_s1";
+			status = "disabled";
+		};
+
+		regulator@1700 {
+			compatible = "qcom,qpnp-regulator";
+			reg = <0x1700 0x100>;
+			regulator-name = "pm8005_s2";
+			status = "disabled";
+		};
+
+		regulator@1a00 {
+			compatible = "qcom,qpnp-regulator";
+			reg = <0x1a00 0x100>;
+			regulator-name = "pm8005_s3";
+			status = "disabled";
+		};
+
+		regulator@1d00 {
+			compatible = "qcom,qpnp-regulator";
+			reg = <0x1d00 0x100>;
+			regulator-name = "pm8005_s4";
+			status = "disabled";
+		};
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm-pm8998.dtsi	2019-01-22 16:16:21.179225361 +0100
@@ -0,0 +1,328 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/spmi/spmi.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/msm/power-on.h>
+
+&spmi_bus {
+	qcom,pm8998@0 {
+		compatible ="qcom,spmi-pmic";
+		reg = <0x0 SPMI_USID>;
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		pm8998_revid: qcom,revid@100 {
+			compatible = "qcom,qpnp-revid";
+			reg = <0x100 0x100>;
+		};
+
+		qcom,power-on@800 {
+			compatible = "qcom,qpnp-power-on";
+			reg = <0x800 0x100>;
+			interrupts = <0x0 0x8 0x0 IRQ_TYPE_NONE>,
+				     <0x0 0x8 0x1 IRQ_TYPE_NONE>,
+				     <0x0 0x8 0x4 IRQ_TYPE_NONE>,
+				     <0x0 0x8 0x5 IRQ_TYPE_NONE>;
+			interrupt-names = "kpdpwr", "resin",
+					"resin-bark", "kpdpwr-resin-bark";
+			qcom,pon-dbc-delay = <15625>;
+			qcom,system-reset;
+			qcom,store-hard-reset-reason;
+
+			qcom,pon_1 {
+				qcom,pon-type = <0>;
+				qcom,pull-up = <1>;
+				linux,code = <116>;
+			};
+
+			qcom,pon_2 {
+				qcom,pon-type = <1>;
+				qcom,pull-up = <1>;
+				linux,code = <114>;
+			};
+
+			qcom,pon_3 {
+				qcom,pon-type = <3>;
+				qcom,support-reset = <1>;
+				qcom,pull-up = <1>;
+				qcom,s1-timer = <6720>;
+				qcom,s2-timer = <2000>;
+				qcom,s2-type = <PON_POWER_OFF_DVDD_HARD_RESET>;
+				qcom,use-bark;
+			};
+		};
+
+		qcom,temp-alarm@2400 {
+			compatible = "qcom,qpnp-temp-alarm";
+			reg = <0x2400 0x100>;
+			interrupts = <0x0 0x24 0x0 IRQ_TYPE_EDGE_RISING>;
+			label = "pm8998_tz";
+			qcom,channel-num = <6>;
+			qcom,temp_alarm-vadc = <&pm8998_vadc>;
+		};
+
+		pm8998_gpios: gpios {
+			compatible = "qcom,qpnp-pin";
+			gpio-controller;
+			#gpio-cells = <2>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			label = "pm8998-gpio";
+
+			gpio@c000 {
+				reg = <0xc000 0x100>;
+				qcom,pin-num = <1>;
+				status = "disabled";
+			};
+
+			gpio@c100 {
+				reg = <0xc100 0x100>;
+				qcom,pin-num = <2>;
+				status = "disabled";
+			};
+
+			gpio@c200 {
+				reg = <0xc200 0x100>;
+				qcom,pin-num = <3>;
+				status = "disabled";
+			};
+
+			gpio@c300 {
+				reg = <0xc300 0x100>;
+				qcom,pin-num = <4>;
+				status = "disabled";
+			};
+
+			gpio@c400 {
+				reg = <0xc400 0x100>;
+				qcom,pin-num = <5>;
+				status = "disabled";
+			};
+
+			gpio@c500 {
+				reg = <0xc500 0x100>;
+				qcom,pin-num = <6>;
+				status = "disabled";
+			};
+
+			gpio@c600 {
+				reg = <0xc600 0x100>;
+				qcom,pin-num = <7>;
+				status = "disabled";
+			};
+
+			gpio@c700 {
+				reg = <0xc700 0x100>;
+				qcom,pin-num = <8>;
+				status = "disabled";
+			};
+
+			gpio@c800 {
+				reg = <0xc800 0x100>;
+				qcom,pin-num = <9>;
+				status = "disabled";
+			};
+
+			gpio@c900 {
+				reg = <0xc900 0x100>;
+				qcom,pin-num = <10>;
+				status = "disabled";
+			};
+
+			gpio@ca00 {
+				reg = <0xca00 0x100>;
+				qcom,pin-num = <11>;
+				status = "disabled";
+			};
+
+			gpio@cb00 {
+				reg = <0xcb00 0x100>;
+				qcom,pin-num = <12>;
+				status = "disabled";
+			};
+
+			gpio@cc00 {
+				reg = <0xcc00 0x100>;
+				qcom,pin-num = <13>;
+				status = "disabled";
+			};
+
+			gpio@cd00 {
+				reg = <0xcd00 0x100>;
+				qcom,pin-num = <14>;
+				status = "disabled";
+			};
+
+			gpio@ce00 {
+				reg = <0xce00 0x100>;
+				qcom,pin-num = <15>;
+				status = "disabled";
+			};
+
+			gpio@cf00 {
+				reg = <0xcf00 0x100>;
+				qcom,pin-num = <16>;
+				status = "disabled";
+			};
+
+			gpio@d000 {
+				reg = <0xd000 0x100>;
+				qcom,pin-num = <17>;
+				status = "disabled";
+			};
+
+			gpio@d100 {
+				reg = <0xd100 0x100>;
+				qcom,pin-num = <18>;
+				status = "disabled";
+			};
+
+			gpio@d200 {
+				reg = <0xd200 0x100>;
+				qcom,pin-num = <19>;
+				status = "disabled";
+			};
+
+			gpio@d300 {
+				reg = <0xd300 0x100>;
+				qcom,pin-num = <20>;
+				status = "disabled";
+			};
+
+			gpio@d400 {
+				reg = <0xd400 0x100>;
+				qcom,pin-num = <21>;
+				status = "disabled";
+			};
+
+			gpio@d500 {
+				reg = <0xd500 0x100>;
+				qcom,pin-num = <22>;
+				status = "disabled";
+			};
+
+			gpio@d600 {
+				reg = <0xd600 0x100>;
+				qcom,pin-num = <23>;
+				status = "disabled";
+			};
+
+			gpio@d700 {
+				reg = <0xd700 0x100>;
+				qcom,pin-num = <24>;
+				status = "disabled";
+			};
+
+			gpio@d800 {
+				reg = <0xd800 0x100>;
+				qcom,pin-num = <25>;
+				status = "disabled";
+			};
+
+			gpio@d900 {
+				reg = <0xd900 0x100>;
+				qcom,pin-num = <26>;
+				status = "disabled";
+			};
+		};
+
+		pm8998_coincell: qcom,coincell@2800 {
+			compatible = "qcom,qpnp-coincell";
+			reg = <0x2800 0x100>;
+		};
+
+		pm8998_rtc: qcom,pm8998_rtc {
+			compatible = "qcom,qpnp-rtc";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			qcom,qpnp-rtc-write = <0>;
+			qcom,qpnp-rtc-alarm-pwrup = <0>;
+
+			qcom,pm8998_rtc_rw@6000 {
+				reg = <0x6000 0x100>;
+			};
+			qcom,pm8998_rtc_alarm@6100 {
+				reg = <0x6100 0x100>;
+				interrupts = <0x0 0x61 0x1 IRQ_TYPE_NONE>;
+			};
+		};
+
+		pm8998_vadc: vadc@3100 {
+			compatible = "qcom,qpnp-vadc-hc";
+			reg = <0x3100 0x100>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0x0 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "eoc-int-en-set";
+			qcom,adc-bit-resolution = <15>;
+			qcom,adc-vdd-reference = <1875>;
+
+			chan@6 {
+				label = "die_temp";
+				reg = <6>;
+				qcom,decimation = <2>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <3>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+				qcom,cal-val = <0>;
+			};
+
+			chan@0 {
+				label = "ref_gnd";
+				reg = <0>;
+				qcom,decimation = <2>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+				qcom,cal-val = <0>;
+			};
+
+			chan@1 {
+				label = "ref_1250v";
+				reg = <1>;
+				qcom,decimation = <2>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+				qcom,cal-val = <0>;
+			};
+		};
+
+		pm8998_adc_tm: vadc@3400 {
+			compatible = "qcom,qpnp-adc-tm-hc";
+			reg = <0x3400 0x100>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0x0 0x34 0x0 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "eoc-int-en-set";
+			qcom,adc-bit-resolution = <15>;
+			qcom,adc-vdd-reference = <1875>;
+			qcom,adc_tm-vadc = <&pm8998_vadc>;
+			qcom,decimation = <0>;
+			qcom,fast-avg-setup = <0>;
+		};
+	};
+
+	qcom,pm8998@1 {
+		compatible ="qcom,spmi-pmic";
+		reg = <0x1 SPMI_USID>;
+		#address-cells = <2>;
+		#size-cells = <0>;
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm-pm8998-rpm-regulator.dtsi	2019-01-22 16:16:21.179225361 +0100
@@ -0,0 +1,600 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&rpm_bus {
+	/* PM8998 S1 + S6 = VDD_CX supply */
+	rpm-regulator-smpa1 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "rwcx";
+		qcom,resource-id = <0>;
+		qcom,regulator-type = <1>;
+		status = "disabled";
+
+		regulator-s1 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_s1";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-smpa2 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "smpa";
+		qcom,resource-id = <2>;
+		qcom,regulator-type = <1>;
+		status = "disabled";
+
+		regulator-s2 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_s2";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-smpa3 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "smpa";
+		qcom,resource-id = <3>;
+		qcom,regulator-type = <1>;
+		status = "disabled";
+
+		regulator-s3 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_s3";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-smpa4 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "smpa";
+		qcom,resource-id = <4>;
+		qcom,regulator-type = <1>;
+		status = "disabled";
+
+		regulator-s4 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_s4";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-smpa5 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "smpa";
+		qcom,resource-id = <5>;
+		qcom,regulator-type = <1>;
+		status = "disabled";
+
+		regulator-s5 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_s5";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-smpa7 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "smpa";
+		qcom,resource-id = <7>;
+		qcom,regulator-type = <1>;
+		status = "disabled";
+
+		regulator-s7 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_s7";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-smpa8 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "smpa";
+		qcom,resource-id = <8>;
+		qcom,regulator-type = <1>;
+		status = "disabled";
+
+		regulator-s8 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_s8";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	/* PM8998 S9 = VDD_MX supply */
+	rpm-regulator-smpa9 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "rwmx";
+		qcom,resource-id = <0>;
+		qcom,regulator-type = <1>;
+		status = "disabled";
+
+		regulator-s9 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_s9";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa1 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <1>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l1 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l1";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa2 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <2>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l2 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l2";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa3 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <3>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l3 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l3";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa4 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "rwsm";
+		qcom,resource-id = <0>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l4 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l4";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa5 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <5>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l5 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l5";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa6 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <6>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l6 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l6";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa7 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <7>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l7 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l7";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa8 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <8>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l8 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l8";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa9 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <9>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l9 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l9";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa10 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <10>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l10 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l10";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa11 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <11>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l11 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l11";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa12 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <12>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l12 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l12";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa13 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <13>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l13 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l13";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa14 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <14>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l14 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l14";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa15 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <15>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l15 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l15";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa16 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <16>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l16 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l16";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa17 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <17>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l17 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l17";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa18 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <18>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l18 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l18";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa19 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <19>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l19 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l19";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa20 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <20>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l20 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l20";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa21 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <21>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l21 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l21";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa22 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <22>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l22 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l22";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa23 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <23>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l23 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l23";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa24 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <24>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l24 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l24";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa25 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <25>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l25 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l25";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa26 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <26>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l26 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l26";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa27 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "rwsc";
+		qcom,resource-id = <0>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l27 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l27";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa28 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <28>;
+		qcom,regulator-type = <0>;
+		status = "disabled";
+
+		regulator-l28 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_l28";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-vsa1 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "vsa";
+		qcom,resource-id = <1>;
+		qcom,regulator-type = <2>;
+		status = "disabled";
+
+		regulator-lvs1 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_lvs1";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-vsa2 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "vsa";
+		qcom,resource-id = <2>;
+		qcom,regulator-type = <2>;
+		status = "disabled";
+
+		regulator-lvs2 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_lvs2";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-bobb {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "bobb";
+		qcom,resource-id = <1>;
+		qcom,regulator-type = <4>;
+		status = "disabled";
+
+		regulator-bob {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8998_bob";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm-pmi8998.dtsi	2019-01-22 16:16:21.179225361 +0100
@@ -0,0 +1,777 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/spmi/spmi.h>
+
+&spmi_bus {
+	qcom,pmi8998@2 {
+		compatible = "qcom,spmi-pmic";
+		reg = <0x2 SPMI_USID>;
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		pmi8998_revid: qcom,revid@100 {
+			compatible = "qcom,qpnp-revid";
+			reg = <0x100 0x100>;
+			qcom,fab-id-valid;
+		};
+
+		qcom,power-on@800 {
+			compatible = "qcom,qpnp-power-on";
+			reg = <0x800 0x100>;
+		};
+
+		pmi8998_misc: qcom,misc@900 {
+			compatible = "qcom,qpnp-misc";
+			reg = <0x900 0x100>;
+		};
+
+		qcom,temp-alarm@2400 {
+			compatible = "qcom,qpnp-temp-alarm";
+			reg = <0x2400 0x100>;
+			interrupts = <0x2 0x24 0x0 IRQ_TYPE_EDGE_RISING>;
+			label = "pmi8998_tz";
+		};
+
+		pmi8998_gpios: gpios {
+			compatible = "qcom,qpnp-pin";
+			gpio-controller;
+			#gpio-cells = <2>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			label = "pmi8998-gpio";
+
+			gpio@c000 {
+				reg = <0xc000 0x100>;
+				qcom,pin-num = <1>;
+				status = "disabled";
+			};
+
+			gpio@c100 {
+				reg = <0xc100 0x100>;
+				qcom,pin-num = <2>;
+				status = "disabled";
+			};
+
+			gpio@c200 {
+				reg = <0xc200 0x100>;
+				qcom,pin-num = <3>;
+				status = "disabled";
+			};
+
+			gpio@c300 {
+				reg = <0xc300 0x100>;
+				qcom,pin-num = <4>;
+				status = "disabled";
+			};
+
+			gpio@c400 {
+				reg = <0xc400 0x100>;
+				qcom,pin-num = <5>;
+				status = "disabled";
+			};
+
+			gpio@c500 {
+				reg = <0xc500 0x100>;
+				qcom,pin-num = <6>;
+				status = "disabled";
+			};
+
+			gpio@c600 {
+				reg = <0xc600 0x100>;
+				qcom,pin-num = <7>;
+				status = "disabled";
+			};
+
+			gpio@c700 {
+				reg = <0xc700 0x100>;
+				qcom,pin-num = <8>;
+				status = "disabled";
+			};
+
+			gpio@c800 {
+				reg = <0xc800 0x100>;
+				qcom,pin-num = <9>;
+				status = "disabled";
+			};
+
+			gpio@c900 {
+				reg = <0xc900 0x100>;
+				qcom,pin-num = <10>;
+				status = "disabled";
+			};
+
+			gpio@ca00 {
+				reg = <0xca00 0x100>;
+				qcom,pin-num = <11>;
+				status = "disabled";
+			};
+
+			gpio@cb00 {
+				reg = <0xcb00 0x100>;
+				qcom,pin-num = <12>;
+				status = "disabled";
+			};
+
+			gpio@cc00 {
+				reg = <0xcc00 0x100>;
+				qcom,pin-num = <13>;
+				status = "disabled";
+			};
+
+			gpio@cd00 {
+				reg = <0xcd00 0x100>;
+				qcom,pin-num = <14>;
+				status = "disabled";
+			};
+		};
+
+		qcom,qpnp-qnovo@1500 {
+			compatible = "qcom,qpnp-qnovo";
+			reg = <0x1500 0x100>;
+			interrupts = <0x2 0x15 0x0 IRQ_TYPE_NONE>;
+			interrupt-names = "ptrain-done";
+			qcom,pmic-revid = <&pmi8998_revid>;
+		};
+
+		pmi8998_charger: qcom,qpnp-smb2 {
+			compatible = "qcom,qpnp-smb2";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			qcom,pmic-revid = <&pmi8998_revid>;
+
+			io-channels = <&pmi8998_rradc 8>,
+				      <&pmi8998_rradc 10>,
+				      <&pmi8998_rradc 3>,
+				      <&pmi8998_rradc 4>;
+			io-channel-names = "charger_temp",
+					   "charger_temp_max",
+					   "usbin_i",
+					   "usbin_v";
+
+			qcom,boost-threshold-ua = <100000>;
+			qcom,wipower-max-uw = <5000000>;
+			dpdm-supply = <&qusb_phy0>;
+
+			qcom,thermal-mitigation
+					= <3000000 1500000 1000000 500000>;
+
+			qcom,chgr@1000 {
+				reg = <0x1000 0x100>;
+				interrupts =
+					<0x2 0x10 0x0 IRQ_TYPE_EDGE_RISING>,
+					<0x2 0x10 0x1 IRQ_TYPE_EDGE_RISING>,
+					<0x2 0x10 0x2 IRQ_TYPE_EDGE_RISING>,
+					<0x2 0x10 0x3 IRQ_TYPE_EDGE_RISING>,
+					<0x2 0x10 0x4 IRQ_TYPE_EDGE_RISING>;
+
+				interrupt-names = "chg-error",
+						  "chg-state-change",
+						  "step-chg-state-change",
+						  "step-chg-soc-update-fail",
+						  "step-chg-soc-update-request";
+			};
+
+			qcom,otg@1100 {
+				reg = <0x1100 0x100>;
+				interrupts = <0x2 0x11 0x0 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x11 0x1 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x11 0x2 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x11 0x3 IRQ_TYPE_EDGE_BOTH>;
+
+				interrupt-names = "otg-fail",
+						  "otg-overcurrent",
+						  "otg-oc-dis-sw-sts",
+						  "testmode-change-detect";
+			};
+
+			qcom,bat-if@1200 {
+				reg = <0x1200 0x100>;
+				interrupts =
+					<0x2 0x12 0x0 IRQ_TYPE_EDGE_RISING>,
+					<0x2 0x12 0x1 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x12 0x2 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x12 0x3 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x12 0x4 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x12 0x5 IRQ_TYPE_EDGE_BOTH>;
+
+				interrupt-names = "bat-temp",
+						  "bat-ocp",
+						  "bat-ov",
+						  "bat-low",
+						  "bat-therm-or-id-missing",
+						  "bat-terminal-missing";
+			};
+
+			qcom,usb-chgpth@1300 {
+				reg = <0x1300 0x100>;
+				interrupts =
+					<0x2 0x13 0x0 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x13 0x1 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x13 0x2 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x13 0x3 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x13 0x4 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x13 0x5 IRQ_TYPE_EDGE_RISING>,
+					<0x2 0x13 0x6 IRQ_TYPE_EDGE_RISING>,
+					<0x2 0x13 0x7 IRQ_TYPE_EDGE_RISING>;
+
+				interrupt-names = "usbin-collapse",
+						  "usbin-lt-3p6v",
+						  "usbin-uv",
+						  "usbin-ov",
+						  "usbin-plugin",
+						  "usbin-src-change",
+						  "usbin-icl-change",
+						  "type-c-change";
+			};
+
+			qcom,dc-chgpth@1400 {
+				reg = <0x1400 0x100>;
+				interrupts =
+					<0x2 0x14 0x0 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x14 0x1 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x14 0x2 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x14 0x3 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x14 0x4 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x14 0x5 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x14 0x6 IRQ_TYPE_EDGE_RISING>;
+
+				interrupt-names = "dcin-collapse",
+						  "dcin-lt-3p6v",
+						  "dcin-uv",
+						  "dcin-ov",
+						  "dcin-plugin",
+						  "div2-en-dg",
+						  "dcin-icl-change";
+			};
+
+			qcom,chgr-misc@1600 {
+				reg = <0x1600 0x100>;
+				interrupts =
+					<0x2 0x16 0x0 IRQ_TYPE_EDGE_RISING>,
+					<0x2 0x16 0x1 IRQ_TYPE_EDGE_RISING>,
+					<0x2 0x16 0x2 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x16 0x3 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x16 0x4 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x16 0x5 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x16 0x6 IRQ_TYPE_EDGE_FALLING>,
+					<0x2 0x16 0x7 IRQ_TYPE_EDGE_BOTH>;
+
+				interrupt-names = "wdog-snarl",
+						  "wdog-bark",
+						  "aicl-fail",
+						  "aicl-done",
+						  "high-duty-cycle",
+						  "input-current-limiting",
+						  "temperature-change",
+						  "switcher-power-ok";
+			};
+		};
+
+		pmi8998_pdphy: qcom,usb-pdphy@1700 {
+			compatible = "qcom,qpnp-pdphy";
+			reg = <0x1700 0x100>;
+			vdd-pdphy-supply = <&pm8998_l24>;
+			vbus-supply = <&smb2_vbus>;
+			vconn-supply = <&smb2_vconn>;
+			interrupts = <0x2 0x17 0x0 IRQ_TYPE_EDGE_RISING>,
+				     <0x2 0x17 0x1 IRQ_TYPE_EDGE_RISING>,
+				     <0x2 0x17 0x2 IRQ_TYPE_EDGE_RISING>,
+				     <0x2 0x17 0x3 IRQ_TYPE_EDGE_RISING>,
+				     <0x2 0x17 0x4 IRQ_TYPE_EDGE_RISING>,
+				     <0x2 0x17 0x5 IRQ_TYPE_EDGE_RISING>,
+				     <0x2 0x17 0x6 IRQ_TYPE_EDGE_RISING>;
+
+			interrupt-names = "sig-tx",
+					  "sig-rx",
+					  "msg-tx",
+					  "msg-rx",
+					  "msg-tx-failed",
+					  "msg-tx-discarded",
+					  "msg-rx-discarded";
+
+			qcom,default-sink-caps = <5000 3000>, /* 5V @ 3A */
+						 <9000 3000>, /* 9V @ 3A */
+						 <12000 2250>; /* 12V @ 2.25A */
+		};
+
+		bcl@4200 {
+			compatible = "qcom,msm-bcl-lmh";
+			reg = <0x4200 0xff>,
+				<0x4300 0xff>;
+			reg-names = "fg_user_adc",
+					"fg_lmh";
+			interrupts = <0x2 0x42 0x0 IRQ_TYPE_NONE>,
+					<0x2 0x42 0x2 IRQ_TYPE_NONE>;
+			interrupt-names = "bcl-high-ibat-int",
+					"bcl-low-vbat-int";
+			qcom,vbat-polling-delay-ms = <100>;
+			qcom,ibat-polling-delay-ms = <100>;
+		};
+
+		pmi8998_rradc: rradc@4500 {
+			compatible = "qcom,rradc";
+			reg = <0x4500 0x100>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			#io-channel-cells = <1>;
+			qcom,pmic-revid = <&pmi8998_revid>;
+		};
+
+		pmi8998_fg: qpnp,fg {
+			compatible = "qcom,fg-gen3";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			qcom,pmic-revid = <&pmi8998_revid>;
+			io-channels = <&pmi8998_rradc 0>;
+			io-channel-names = "rradc_batt_id";
+			qcom,rradc-base = <0x4500>;
+			qcom,fg-esr-timer-awake = <96 96>;
+			qcom,fg-esr-timer-asleep = <256 256>;
+			qcom,fg-esr-timer-charging = <0 96>;
+			qcom,cycle-counter-en;
+			status = "okay";
+
+			qcom,fg-batt-soc@4000 {
+				status = "okay";
+				reg = <0x4000 0x100>;
+				interrupts = <0x2 0x40 0x0 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x40 0x1 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x40 0x2
+							IRQ_TYPE_EDGE_RISING>,
+					     <0x2 0x40 0x3
+							IRQ_TYPE_EDGE_RISING>,
+					     <0x2 0x40 0x4 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x40 0x5
+							IRQ_TYPE_EDGE_RISING>,
+					     <0x2 0x40 0x6 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x40 0x7 IRQ_TYPE_EDGE_BOTH>;
+				interrupt-names = "soc-update",
+						  "soc-ready",
+						  "bsoc-delta",
+						  "msoc-delta",
+						  "msoc-low",
+						  "msoc-empty",
+						  "msoc-high",
+						  "msoc-full";
+			};
+
+			qcom,fg-batt-info@4100 {
+				status = "okay";
+				reg = <0x4100 0x100>;
+				interrupts = <0x2 0x41 0x0 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x41 0x1 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x41 0x2 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x41 0x3 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x41 0x6 IRQ_TYPE_EDGE_BOTH>;
+				interrupt-names = "vbatt-pred-delta",
+						  "vbatt-low",
+						  "esr-delta",
+						  "batt-missing",
+						  "batt-temp-delta";
+			};
+
+			qcom,fg-memif@4400 {
+				status = "okay";
+				reg = <0x4400 0x100>;
+				interrupts = <0x2 0x44 0x0 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x44 0x1 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x44 0x2 IRQ_TYPE_EDGE_BOTH>;
+				interrupt-names = "ima-rdy",
+						  "mem-xcp",
+						  "dma-grant";
+			};
+		};
+	};
+
+	qcom,pmi8998@3 {
+		compatible ="qcom,spmi-pmic";
+		reg = <0x3 SPMI_USID>;
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		pmi8998_pwm_1: pwm@b100 {
+			compatible = "qcom,qpnp-pwm";
+			reg = <0xb100 0x100>,
+			      <0xb042 0x7e>;
+			reg-names = "qpnp-lpg-channel-base",
+					"qpnp-lpg-lut-base";
+			qcom,lpg-lut-size = <0x7e>;
+			qcom,channel-id = <1>;
+			qcom,supported-sizes = <6>, <9>;
+			qcom,ramp-index = <0>;
+			#pwm-cells = <2>;
+			status = "disabled";
+		};
+
+		pmi8998_pwm_2: pwm@b200 {
+			compatible = "qcom,qpnp-pwm";
+			reg = <0xb200 0x100>,
+			      <0xb042 0x7e>;
+			reg-names = "qpnp-lpg-channel-base",
+					 "qpnp-lpg-lut-base";
+			qcom,lpg-lut-size = <0x7e>;
+			qcom,channel-id = <2>;
+			qcom,supported-sizes = <6>, <9>;
+			qcom,ramp-index = <1>;
+			#pwm-cells = <2>;
+			status = "disabled";
+		};
+
+		pmi8998_pwm_3: pwm@b300 {
+			compatible = "qcom,qpnp-pwm";
+			reg = <0xb300 0x100>,
+			      <0xb042 0x7e>;
+			reg-names = "qpnp-lpg-channel-base",
+					 "qpnp-lpg-lut-base";
+			qcom,lpg-lut-size = <0x7e>;
+			qcom,channel-id = <3>;
+			qcom,supported-sizes = <6>, <9>;
+			qcom,ramp-index = <2>;
+			#pwm-cells = <2>;
+		};
+
+		pmi8998_pwm_4: pwm@b400 {
+			compatible = "qcom,qpnp-pwm";
+			reg = <0xb400 0x100>,
+			      <0xb042 0x7e>;
+			reg-names = "qpnp-lpg-channel-base",
+					 "qpnp-lpg-lut-base";
+			qcom,lpg-lut-size = <0x7e>;
+			qcom,channel-id = <4>;
+			qcom,supported-sizes = <6>, <9>;
+			qcom,ramp-index = <3>;
+			#pwm-cells = <2>;
+		};
+
+		pmi8998_pwm_5: pwm@b500 {
+			compatible = "qcom,qpnp-pwm";
+			reg = <0xb500 0x100>,
+			      <0xb042 0x7e>;
+			reg-names = "qpnp-lpg-channel-base",
+					 "qpnp-lpg-lut-base";
+			qcom,lpg-lut-size = <0x7e>;
+			qcom,channel-id = <5>;
+			qcom,supported-sizes = <6>, <9>;
+			qcom,ramp-index = <4>;
+			#pwm-cells = <2>;
+		};
+
+		pmi8998_pwm_6: pwm@b600 {
+			compatible = "qcom,qpnp-pwm";
+			reg = <0xb600 0x100>,
+			      <0xb042 0x7e>;
+			reg-names = "qpnp-lpg-channel-base",
+					 "qpnp-lpg-lut-base";
+			qcom,lpg-lut-size = <0x7e>;
+			qcom,channel-id = <6>;
+			qcom,supported-sizes = <6>, <9>;
+			qcom,ramp-index = <5>;
+			#pwm-cells = <2>;
+			status = "disabled";
+		};
+
+		qcom,leds@d000 {
+			compatible = "qcom,leds-qpnp";
+			reg = <0xd000 0x100>;
+			label = "rgb";
+			status = "okay";
+
+			red_led: qcom,rgb_0 {
+				label = "rgb";
+				qcom,id = <3>;
+				qcom,mode = "pwm";
+				pwms = <&pmi8998_pwm_5 0 0>;
+				qcom,pwm-us = <1000>;
+				qcom,max-current = <12>;
+				qcom,default-state = "off";
+				linux,name = "red";
+				linux,default-trigger =
+					"battery-charging";
+			};
+
+			green_led: qcom,rgb_1 {
+				label = "rgb";
+				qcom,id = <4>;
+				qcom,mode = "pwm";
+				pwms = <&pmi8998_pwm_4 0 0>;
+				qcom,pwm-us = <1000>;
+				qcom,max-current = <12>;
+				qcom,default-state = "off";
+				linux,name = "green";
+				linux,default-trigger = "battery-full";
+			};
+
+			blue_led: qcom,rgb_2 {
+				label = "rgb";
+				qcom,id = <5>;
+				qcom,mode = "pwm";
+				pwms = <&pmi8998_pwm_3 0 0>;
+				qcom,pwm-us = <1000>;
+				qcom,max-current = <12>;
+				qcom,default-state = "off";
+				linux,name = "blue";
+				linux,default-trigger = "boot-indication";
+			};
+		};
+
+		labibb: qpnp-labibb-regulator {
+			compatible = "qcom,qpnp-labibb-regulator";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			qcom,pmic-revid = <&pmi8998_revid>;
+			status = "disabled";
+
+			ibb_regulator: qcom,ibb@dc00 {
+				reg = <0xdc00 0x100>;
+				reg-names = "ibb_reg";
+				regulator-name = "ibb_reg";
+
+				regulator-min-microvolt = <4600000>;
+				regulator-max-microvolt = <6000000>;
+
+				interrupts = <0x3 0xdc 0x2
+						IRQ_TYPE_EDGE_RISING>;
+				interrupt-names = "ibb-sc-err";
+
+				qcom,qpnp-ibb-min-voltage = <1400000>;
+				qcom,qpnp-ibb-step-size = <100000>;
+				qcom,qpnp-ibb-slew-rate = <2000000>;
+				qcom,qpnp-ibb-use-default-voltage;
+				qcom,qpnp-ibb-init-voltage = <5500000>;
+				qcom,qpnp-ibb-init-amoled-voltage = <4000000>;
+				qcom,qpnp-ibb-init-lcd-voltage = <5500000>;
+
+				qcom,qpnp-ibb-soft-start = <1000>;
+
+				qcom,qpnp-ibb-lab-pwrup-delay = <8000>;
+				qcom,qpnp-ibb-lab-pwrdn-delay = <8000>;
+				qcom,qpnp-ibb-en-discharge;
+
+				qcom,qpnp-ibb-full-pull-down;
+				qcom,qpnp-ibb-pull-down-enable;
+				qcom,qpnp-ibb-switching-clock-frequency =
+									<1480>;
+				qcom,qpnp-ibb-limit-maximum-current = <1550>;
+				qcom,qpnp-ibb-debounce-cycle = <16>;
+				qcom,qpnp-ibb-limit-max-current-enable;
+				qcom,qpnp-ibb-ps-enable;
+			};
+
+			lab_regulator: qcom,lab@de00 {
+				reg = <0xde00 0x100>;
+				reg-names = "lab";
+				regulator-name = "lab_reg";
+
+				regulator-min-microvolt = <4600000>;
+				regulator-max-microvolt = <6000000>;
+
+				interrupts = <0x3 0xde 0x0
+						IRQ_TYPE_EDGE_RISING>,
+					     <0x3 0xde 0x1
+						IRQ_TYPE_EDGE_RISING>;
+				interrupt-names = "lab-vreg-ok", "lab-sc-err";
+
+				qcom,qpnp-lab-min-voltage = <4600000>;
+				qcom,qpnp-lab-step-size = <100000>;
+				qcom,qpnp-lab-slew-rate = <5000>;
+				qcom,qpnp-lab-use-default-voltage;
+				qcom,qpnp-lab-init-voltage = <5500000>;
+				qcom,qpnp-lab-init-amoled-voltage = <4600000>;
+				qcom,qpnp-lab-init-lcd-voltage = <5500000>;
+
+				qcom,qpnp-lab-soft-start = <800>;
+
+				qcom,qpnp-lab-full-pull-down;
+				qcom,qpnp-lab-pull-down-enable;
+				qcom,qpnp-lab-switching-clock-frequency =
+									<1600>;
+				qcom,qpnp-lab-limit-maximum-current = <1600>;
+				qcom,qpnp-lab-limit-max-current-enable;
+				qcom,qpnp-lab-ps-threshold = <70>;
+				qcom,qpnp-lab-ps-enable;
+				qcom,qpnp-lab-nfet-size = <100>;
+				qcom,qpnp-lab-pfet-size = <100>;
+				qcom,qpnp-lab-max-precharge-time = <500>;
+			};
+		};
+
+		pmi8998_wled: qcom,leds@d800 {
+			compatible = "qcom,qpnp-wled";
+			reg = <0xd800 0x100>,
+				<0xd900 0x100>;
+			reg-names = "qpnp-wled-ctrl-base",
+					"qpnp-wled-sink-base";
+			interrupts = <0x3 0xd8 0x1 IRQ_TYPE_EDGE_RISING>,
+					<0x3 0xd8 0x2 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "ovp-irq", "sc-irq";
+			linux,name = "wled";
+			linux,default-trigger = "bkl-trigger";
+			qcom,fdbk-output = "auto";
+			qcom,vref-uv = <127500>;
+			qcom,switch-freq-khz = <800>;
+			qcom,ovp-mv = <29600>;
+			qcom,ilim-ma = <970>;
+			qcom,boost-duty-ns = <26>;
+			qcom,mod-freq-khz = <9600>;
+			qcom,dim-mode = "hybrid";
+			qcom,hyb-thres = <625>;
+			qcom,sync-dly-us = <800>;
+			qcom,fs-curr-ua = <25000>;
+			qcom,cons-sync-write-delay-us = <1000>;
+			qcom,led-strings-list = [00 01 02 03];
+			qcom,en-ext-pfet-sc-pro;
+			qcom,pmic-revid = <&pmi8998_revid>;
+			qcom,loop-auto-gm-en;
+			qcom,auto-calibration-enable;
+			status = "okay";
+		};
+
+		pmi8998_haptics: qcom,haptic@c000 {
+			status = "disabled";
+			compatible = "qcom,qpnp-haptic";
+			reg = <0xc000 0x100>;
+			interrupts = <0x3 0xc0 0x0 IRQ_TYPE_EDGE_BOTH>,
+				     <0x3 0xc0 0x1 IRQ_TYPE_EDGE_BOTH>;
+			interrupt-names = "sc-irq", "play-irq";
+			qcom,pmic-revid = <&pmi8998_revid>;
+			qcom,pmic-misc = <&pmi8998_misc>;
+			qcom,misc-clk-trim-error-reg = <0xf3>;
+			qcom,actuator-type = "lra";
+			qcom,play-mode = "direct";
+			qcom,vmax-mv = <3200>;
+			qcom,ilim-ma = <800>;
+			qcom,wave-shape = "square";
+			qcom,wave-play-rate-us = <6667>;
+			qcom,int-pwm-freq-khz = <505>;
+			qcom,sc-deb-cycles = <8>;
+			qcom,en-brake;
+			qcom,brake-pattern = [03 03 00 00];
+			qcom,lra-high-z = "opt1";
+			qcom,lra-auto-res-mode = "qwd";
+			qcom,lra-res-cal-period = <4>;
+			qcom,correct-lra-drive-freq;
+		};
+
+		flash_led: qcom,leds@d300 {
+			compatible = "qcom,qpnp-flash-led-v2";
+			status = "okay";
+			reg = <0xd300 0x100>;
+			label = "flash";
+			interrupts = <0x3 0xd3 0x0 IRQ_TYPE_EDGE_RISING>,
+					<0x3 0xd3 0x3 IRQ_TYPE_EDGE_RISING>,
+					<0x3 0xd3 0x4 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "led-fault-irq",
+					"all-ramp-down-done-irq",
+					"all-ramp-up-done-irq";
+			qcom,hdrm-auto-mode;
+			qcom,short-circuit-det;
+			qcom,open-circuit-det;
+			qcom,vph-droop-det;
+			qcom,thermal-derate-en;
+			qcom,thermal-derate-current = <200 500 1000>;
+			qcom,isc-delay = <192>;
+			qcom,pmic-revid = <&pmi8998_revid>;
+
+			pmi8998_flash0: qcom,flash_0 {
+				label = "flash";
+				qcom,led-name = "led:flash_0";
+				qcom,max-current = <1500>;
+				qcom,default-led-trigger = "flash0_trigger";
+				qcom,id = <0>;
+				qcom,current-ma = <1000>;
+				qcom,duration-ms = <1280>;
+				qcom,ires-ua = <12500>;
+				qcom,hdrm-voltage-mv = <325>;
+				qcom,hdrm-vol-hi-lo-win-mv = <100>;
+			};
+
+			pmi8998_flash1: qcom,flash_1 {
+				label = "flash";
+				qcom,led-name = "led:flash_1";
+				qcom,max-current = <1500>;
+				qcom,default-led-trigger = "flash1_trigger";
+				qcom,id = <1>;
+				qcom,current-ma = <1000>;
+				qcom,duration-ms = <1280>;
+				qcom,ires-ua = <12500>;
+				qcom,hdrm-voltage-mv = <325>;
+				qcom,hdrm-vol-hi-lo-win-mv = <100>;
+			};
+
+			pmi8998_flash2: qcom,flash_2 {
+				label = "flash";
+				qcom,led-name = "led:flash_2";
+				qcom,max-current = <750>;
+				qcom,default-led-trigger = "flash2_trigger";
+				qcom,id = <2>;
+				qcom,current-ma = <500>;
+				qcom,duration-ms = <1280>;
+				qcom,ires-ua = <12500>;
+				qcom,hdrm-voltage-mv = <325>;
+				qcom,hdrm-vol-hi-lo-win-mv = <100>;
+			};
+
+			pmi8998_torch0: qcom,torch_0 {
+				label = "torch";
+				qcom,led-name = "led:torch_0";
+				qcom,max-current = <500>;
+				qcom,default-led-trigger = "torch0_trigger";
+				qcom,id = <0>;
+				qcom,current-ma = <300>;
+				qcom,ires-ua = <12500>;
+				qcom,hdrm-voltage-mv = <325>;
+				qcom,hdrm-vol-hi-lo-win-mv = <100>;
+			};
+
+			pmi8998_torch1: qcom,torch_1 {
+				label = "torch";
+				qcom,led-name = "led:torch_1";
+				qcom,max-current = <500>;
+				qcom,default-led-trigger = "torch1_trigger";
+				qcom,id = <1>;
+				qcom,current-ma = <300>;
+				qcom,ires-ua = <12500>;
+				qcom,hdrm-voltage-mv = <325>;
+				qcom,hdrm-vol-hi-lo-win-mv = <100>;
+			};
+
+			pmi8998_torch2: qcom,torch_2 {
+				label = "torch";
+				qcom,led-name = "led:torch_2";
+				qcom,max-current = <500>;
+				qcom,default-led-trigger = "torch2_trigger";
+				qcom,id = <2>;
+				qcom,current-ma = <300>;
+				qcom,ires-ua = <12500>;
+				qcom,hdrm-voltage-mv = <325>;
+				qcom,hdrm-vol-hi-lo-win-mv = <100>;
+			};
+
+			pmi8998_switch0: qcom,led_switch_0 {
+				label = "switch";
+				qcom,led-name = "led:switch_0";
+				qcom,led-mask = <3>;
+				qcom,default-led-trigger = "switch0_trigger";
+			};
+
+			pmi8998_switch1: qcom,led_switch_1 {
+				label = "switch";
+				qcom,led-name = "led:switch_1";
+				qcom,led-mask = <4>;
+				qcom,default-led-trigger = "switch1_trigger";
+			};
+		};
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm-rdbg.dtsi	2019-01-22 16:16:21.179225361 +0100
@@ -0,0 +1,106 @@
+/* Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	smp2pgpio_rdbg_2_in: qcom,smp2pgpio-rdbg-2-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "rdbg";
+		qcom,remote-pid = <2>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_client_rdbg_2_in {
+		compatible = "qcom,smp2pgpio_client_rdbg_2_in";
+		gpios = <&smp2pgpio_rdbg_2_in 0 0>;
+	};
+
+	smp2pgpio_rdbg_2_out: qcom,smp2pgpio-rdbg-2-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "rdbg";
+		qcom,remote-pid = <2>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_client_rdbg_2_out {
+		compatible = "qcom,smp2pgpio_client_rdbg_2_out";
+		gpios = <&smp2pgpio_rdbg_2_out 0 0>;
+	};
+
+	smp2pgpio_rdbg_1_in: qcom,smp2pgpio-rdbg-1-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "rdbg";
+		qcom,remote-pid = <1>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_client_rdbg_1_in {
+		compatible = "qcom,smp2pgpio_client_rdbg_1_in";
+		gpios = <&smp2pgpio_rdbg_1_in 0 0>;
+	};
+
+	smp2pgpio_rdbg_1_out: qcom,smp2pgpio-rdbg-1-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "rdbg";
+		qcom,remote-pid = <1>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_client_rdbg_1_out {
+		compatible = "qcom,smp2pgpio_client_rdbg_1_out";
+		gpios = <&smp2pgpio_rdbg_1_out 0 0>;
+	};
+
+	smp2pgpio_rdbg_5_in: qcom,smp2pgpio-rdbg-5-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "rdbg";
+		qcom,remote-pid = <5>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_client_rdbg_5_in {
+		compatible = "qcom,smp2pgpio_client_rdbg_5_in";
+		gpios = <&smp2pgpio_rdbg_5_in 0 0>;
+	};
+
+	smp2pgpio_rdbg_5_out: qcom,smp2pgpio-rdbg-5-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "rdbg";
+		qcom,remote-pid = <5>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_client_rdbg_5_out {
+		compatible = "qcom,smp2pgpio_client_rdbg_5_out";
+		gpios = <&smp2pgpio_rdbg_5_out 0 0>;
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/msm-smb138x.dtsi	2019-01-22 16:16:21.179225361 +0100
@@ -0,0 +1,137 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+
+&i2c_7 {
+	status = "okay";
+	smb138x: qcom,smb138x@8 {
+		compatible = "qcom,i2c-pmic";
+		reg = <0x8>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		interrupt-parent = <&spmi_bus>;
+		interrupts = <0x0 0xd1 0x0 IRQ_TYPE_LEVEL_LOW>;
+		interrupt_names = "smb138x";
+		interrupt-controller;
+		#interrupt-cells = <3>;
+		qcom,periph-map = <0x10 0x11 0x12 0x13 0x14 0x16 0x36>;
+
+		smb138x_revid: qcom,revid@100 {
+			compatible = "qcom,qpnp-revid";
+			reg = <0x100 0x100>;
+		};
+
+		smb138x_tadc: qcom,tadc@3600 {
+			compatible = "qcom,tadc";
+			reg = <0x3600 0x100>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			#io-channel-cells = <1>;
+			interrupt-parent = <&smb138x>;
+			interrupts = <0x36 0x0 IRQ_TYPE_EDGE_BOTH>;
+			interrupt-names = "eoc";
+
+			batt_temp@0 {
+				reg = <0>;
+				qcom,rbias = <68100>;
+				qcom,rtherm-at-25degc = <68000>;
+				qcom,beta-coefficient = <3450>;
+			};
+
+			skin_temp@1 {
+				reg = <1>;
+				qcom,rbias = <33000>;
+				qcom,rtherm-at-25degc = <68000>;
+				qcom,beta-coefficient = <3450>;
+			};
+
+			die_temp@2 {
+				reg = <2>;
+				qcom,scale = <(-1306)>;
+				qcom,offset = <397904>;
+			};
+
+			batt_i@3 {
+				reg = <3>;
+				qcom,channel = <3>;
+				qcom,scale = <(-20000000)>;
+			};
+
+			batt_v@4 {
+				reg = <4>;
+				qcom,scale = <5000000>;
+			};
+
+			input_i@5 {
+				reg = <5>;
+				qcom,scale = <14285714>;
+			};
+
+			input_v@6 {
+				reg = <6>;
+				qcom,scale = <25000000>;
+			};
+
+			otg_i@7 {
+				reg = <7>;
+				qcom,scale = <5714286>;
+			};
+		};
+
+		smb1381_charger: qcom,smb1381-charger@1000 {
+			compatible = "qcom,smb138x-parallel-slave";
+			qcom,pmic-revid = <&smb138x_revid>;
+			reg = <0x1000 0x700>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			interrupt-parent = <&smb138x>;
+			io-channels =
+				<&smb138x_tadc 1>,
+				<&smb138x_tadc 2>,
+				<&smb138x_tadc 3>,
+				<&smb138x_tadc 14>,
+				<&smb138x_tadc 15>,
+				<&smb138x_tadc 16>,
+				<&smb138x_tadc 17>;
+			io-channel-names =
+				"connector_temp",
+				"charger_temp",
+				"batt_i",
+				"connector_temp_thr1",
+				"connector_temp_thr2",
+				"connector_temp_thr3",
+				"charger_temp_max";
+
+			qcom,chgr@1000 {
+				reg = <0x1000 0x100>;
+				interrupts = <0x10 0x1 IRQ_TYPE_EDGE_RISING>;
+				interrupt-names = "chg-state-change";
+			};
+
+			qcom,chgr-misc@1600 {
+				reg = <0x1600 0x100>;
+				interrupts = <0x16 0x1 IRQ_TYPE_EDGE_RISING>,
+					     <0x16 0x6 IRQ_TYPE_EDGE_RISING>;
+				interrupt-names = "wdog-bark",
+						  "temperature-change";
+			};
+		};
+	};
+};
+
+&smb1381_charger {
+	smb138x_vbus: qcom,smb138x-vbus {
+		status = "disabled";
+		regulator-name = "smb138x-vbus";
+	};
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/boot/dts/qcom/skeleton64.dtsi	2019-01-22 16:16:21.215225687 +0100
@@ -0,0 +1,15 @@
+/*
+ * Skeleton device tree in the 64 bits version; the bare minimum
+ * needed to boot; just include and add a compatible value.  The
+ * bootloader will typically populate the memory node.
+ */
+
+/ {
+	#address-cells = <2>;
+	#size-cells = <2>;
+	cpus { };
+	soc { };
+	chosen { };
+	aliases { };
+	memory { device_type = "memory"; reg = <0 0 0 0>; };
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/crypto/poly-hash-ce-core.S	2019-01-22 16:16:21.535228585 +0100
@@ -0,0 +1,163 @@
+/*
+ * Accelerated poly_hash implementation with ARMv8 PMULL instructions.
+ *
+ * Based on ghash-ce-core.S.
+ *
+ * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2017 Google, Inc. <ebiggers@google.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+	KEY	.req	v0
+	KEY2	.req	v1
+	T1	.req	v2
+	T2	.req	v3
+	GSTAR	.req	v4
+	XL	.req	v5
+	XM	.req	v6
+	XH	.req	v7
+
+	.text
+	.arch		armv8-a+crypto
+
+	/* 16-byte aligned (2**4 = 16); not required, but might as well */
+	.align		4
+.Lgstar:
+	.quad		0x87, 0x87
+
+/*
+ * void pmull_poly_hash_update(le128 *digest, const le128 *key,
+ *			       const u8 *src, unsigned int blocks,
+ *			       unsigned int partial);
+ */
+ENTRY(pmull_poly_hash_update)
+
+	/* Load digest into XL */
+	ld1		{XL.16b}, [x0]
+
+	/* Load key into KEY */
+	ld1		{KEY.16b}, [x1]
+
+	/* Load g*(x) = g(x) + x^128 = x^7 + x^2 + x + 1 into both halves of
+	 * GSTAR */
+	adr		x1, .Lgstar
+	ld1		{GSTAR.2d}, [x1]
+
+	/* Set KEY2 to (KEY[1]+KEY[0]):(KEY[1]+KEY[0]).  This is needed for
+	 * Karatsuba multiplication. */
+	ext		KEY2.16b, KEY.16b, KEY.16b, #8
+	eor		KEY2.16b, KEY2.16b, KEY.16b
+
+	/* If 'partial' is nonzero, then we're finishing a pending block and
+	 * should go right to the multiplication. */
+	cbnz		w4, 1f
+
+0:
+	/* Add the next block from 'src' to the digest */
+	ld1		{T1.16b}, [x2], #16
+	eor		XL.16b, XL.16b, T1.16b
+	sub		w3, w3, #1
+
+1:
+	/*
+	 * Multiply the current 128-bit digest (a1:a0, in XL) by the 128-bit key
+	 * (b1:b0, in KEY) using Karatsuba multiplication.
+	 */
+
+	/* T1 = (a1+a0):(a1+a0) */
+	ext		T1.16b, XL.16b, XL.16b, #8
+	eor		T1.16b, T1.16b, XL.16b
+
+	/* XH = a1 * b1 */
+	pmull2		XH.1q, XL.2d, KEY.2d
+
+	/* XL = a0 * b0 */
+	pmull		XL.1q, XL.1d, KEY.1d
+
+	/* XM = (a1+a0) * (b1+b0) */
+	pmull		XM.1q, T1.1d, KEY2.1d
+
+	/* XM += (XH[0]:XL[1]) + XL + XH */
+	ext		T1.16b, XL.16b, XH.16b, #8
+	eor		T2.16b, XL.16b, XH.16b
+	eor		XM.16b, XM.16b, T1.16b
+	eor		XM.16b, XM.16b, T2.16b
+
+	/*
+	 * Now the 256-bit product is in XH[1]:XM:XL[0].  It represents a
+	 * polynomial over GF(2) with degree as large as 255.  We need to
+	 * compute its remainder modulo g(x) = x^128+x^7+x^2+x+1.  For this it
+	 * is sufficient to compute the remainder of the high half 'c(x)x^128'
+	 * add it to the low half.  To reduce the high half we use the Barrett
+	 * reduction method.  The basic idea is that we can express the
+	 * remainder p(x) as g(x)q(x) mod x^128, where q(x) = (c(x)x^128)/g(x).
+	 * As detailed in [1], to avoid having to divide by g(x) at runtime the
+	 * following equivalent expression can be derived:
+	 *
+	 *	p(x) = [ g*(x)((c(x)q+(x))/x^128) ] mod x^128
+	 *
+	 * where g*(x) = x^128+g(x) = x^7+x^2+x+1, and q+(x) = x^256/g(x) = g(x)
+	 * in this case.  This is also equivalent to:
+	 *
+	 *	p(x) = [ g*(x)((c(x)(x^128 + g*(x)))/x^128) ] mod x^128
+	 *	     = [ g*(x)(c(x) + (c(x)g*(x))/x^128) ] mod x^128
+	 *
+	 * Since deg g*(x) < 64:
+	 *
+	 *	p(x) = [ g*(x)(c(x) + ((c(x)/x^64)g*(x))/x^64) ] mod x^128
+	 *	     = [ g*(x)((c(x)/x^64)x^64 + (c(x) mod x^64) +
+	 *				((c(x)/x^64)g*(x))/x^64) ] mod x^128
+	 *
+	 * Letting t(x) = g*(x)(c(x)/x^64):
+	 *
+	 *	p(x) = [ t(x)x^64 + g*(x)((c(x) mod x^64) + t(x)/x^64) ] mod x^128
+	 *
+	 * Therefore, to do the reduction we only need to issue two 64-bit =>
+	 * 128-bit carryless multiplications: g*(x) times c(x)/x^64, and g*(x)
+	 * times ((c(x) mod x^64) + t(x)/x^64).  (Multiplication by x^64 doesn't
+	 * count since it is simply a shift or move.)
+	 *
+	 * An alternate reduction method, also based on Barrett reduction and
+	 * described in [1], uses only shifts and XORs --- no multiplications.
+	 * However, the method with multiplications requires fewer instructions
+	 * and is faster on processors with fast carryless multiplication.
+	 *
+	 * [1] "Intel Carry-Less Multiplication Instruction and its Usage for
+	 * Computing the GCM Mode",
+	 * https://software.intel.com/sites/default/files/managed/72/cc/clmul-wp-rev-2.02-2014-04-20.pdf
+	 */
+
+	/* 256-bit product is XH[1]:XM:XL[0], so c(x) is XH[1]:XM[1] */
+
+	/* T1 = t(x) = g*(x)(c(x)/x^64) */
+	pmull2		T1.1q, GSTAR.2d, XH.2d
+
+	/* T2 = g*(x)((c(x) mod x^64) + t(x)/x^64) */
+	eor		T2.16b, XM.16b, T1.16b
+	pmull2		T2.1q, GSTAR.2d, T2.2d
+
+	/* Make XL[0] be the low half of the 128-bit result by adding the low 64
+	 * bits of the T2 term to what was already there.  The 't(x)x^64' term
+	 * makes no difference, so skip it. */
+	eor		XL.16b, XL.16b, T2.16b
+
+	/* Make XL[1] be the high half of the 128-bit result by adding the high
+	 * 64 bits of the 't(x)x^64' and T2 terms to what was already in XM[0],
+	 * then moving XM[0] to XL[1]. */
+	eor		XM.16b, XM.16b, T1.16b
+	ext		T2.16b, T2.16b, T2.16b, #8
+	eor		XM.16b, XM.16b, T2.16b
+	mov		XL.d[1], XM.d[0]
+
+	/* If more blocks remain, then loop back to process the next block;
+	 * else, store the digest and return. */
+	cbnz		w3, 0b
+	st1		{XL.16b}, [x0]
+	ret
+ENDPROC(pmull_poly_hash_update)
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/crypto/poly-hash-ce-glue.c	2019-01-22 16:16:21.535228585 +0100
@@ -0,0 +1,166 @@
+/*
+ * Accelerated poly_hash implementation with ARMv8 PMULL instructions.
+ *
+ * Based on ghash-ce-glue.c.
+ *
+ * poly_hash is part of the HEH (Hash-Encrypt-Hash) encryption mode, proposed in
+ * Internet Draft https://tools.ietf.org/html/draft-cope-heh-01.
+ *
+ * poly_hash is very similar to GHASH: both algorithms are keyed hashes which
+ * interpret their input data as coefficients of a polynomial over GF(2^128),
+ * then calculate a hash value by evaluating that polynomial at the point given
+ * by the key, e.g. using Horner's rule.  The difference is that poly_hash uses
+ * the more natural "ble" convention to represent GF(2^128) elements, whereas
+ * GHASH uses the less natural "lle" convention (see include/crypto/gf128mul.h).
+ * The ble convention makes it simpler to implement GF(2^128) multiplication.
+ *
+ * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2017 Google Inc. <ebiggers@google.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <crypto/b128ops.h>
+#include <crypto/internal/hash.h>
+#include <linux/cpufeature.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+/*
+ * Note: in this algorithm we currently use 'le128' to represent GF(2^128)
+ * elements, even though poly_hash-generic uses 'be128'.  Both types are
+ * actually "wrong" because the elements are actually in 'ble' format, and there
+ * should be a ble type to represent this --- as well as lle, bbe, and lbe types
+ * for the other conventions for representing GF(2^128) elements.  But
+ * practically it doesn't matter which type we choose here, so we just use le128
+ * since it's arguably more accurate, while poly_hash-generic still has to use
+ * be128 because the generic GF(2^128) multiplication functions all take be128.
+ */
+
+struct poly_hash_desc_ctx {
+	le128 digest;
+	unsigned int count;
+};
+
+asmlinkage void pmull_poly_hash_update(le128 *digest, const le128 *key,
+				       const u8 *src, unsigned int blocks,
+				       unsigned int partial);
+
+static int poly_hash_setkey(struct crypto_shash *tfm,
+			    const u8 *key, unsigned int keylen)
+{
+	if (keylen != sizeof(le128)) {
+		crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	memcpy(crypto_shash_ctx(tfm), key, sizeof(le128));
+	return 0;
+}
+
+static int poly_hash_init(struct shash_desc *desc)
+{
+	struct poly_hash_desc_ctx *ctx = shash_desc_ctx(desc);
+
+	ctx->digest = (le128) { 0 };
+	ctx->count = 0;
+	return 0;
+}
+
+static int poly_hash_update(struct shash_desc *desc, const u8 *src,
+			    unsigned int len)
+{
+	struct poly_hash_desc_ctx *ctx = shash_desc_ctx(desc);
+	unsigned int partial = ctx->count % sizeof(le128);
+	u8 *dst = (u8 *)&ctx->digest + partial;
+
+	ctx->count += len;
+
+	/* Finishing at least one block? */
+	if (partial + len >= sizeof(le128)) {
+		const le128 *key = crypto_shash_ctx(desc->tfm);
+
+		if (partial) {
+			/* Finish the pending block. */
+			unsigned int n = sizeof(le128) - partial;
+
+			len -= n;
+			do {
+				*dst++ ^= *src++;
+			} while (--n);
+		}
+
+		/*
+		 * Do the real work.  If 'partial' is nonzero, this starts by
+		 * multiplying 'digest' by 'key'.  Then for each additional full
+		 * block it adds the block to 'digest' and multiplies by 'key'.
+		 */
+		kernel_neon_begin_partial(8);
+		pmull_poly_hash_update(&ctx->digest, key, src,
+				       len / sizeof(le128), partial);
+		kernel_neon_end();
+
+		src += len - (len % sizeof(le128));
+		len %= sizeof(le128);
+		dst = (u8 *)&ctx->digest;
+	}
+
+	/* Continue adding the next block to 'digest'. */
+	while (len--)
+		*dst++ ^= *src++;
+	return 0;
+}
+
+static int poly_hash_final(struct shash_desc *desc, u8 *out)
+{
+	struct poly_hash_desc_ctx *ctx = shash_desc_ctx(desc);
+	unsigned int partial = ctx->count % sizeof(le128);
+
+	/* Finish the last block if needed. */
+	if (partial) {
+		const le128 *key = crypto_shash_ctx(desc->tfm);
+
+		kernel_neon_begin_partial(8);
+		pmull_poly_hash_update(&ctx->digest, key, NULL, 0, partial);
+		kernel_neon_end();
+	}
+
+	memcpy(out, &ctx->digest, sizeof(le128));
+	return 0;
+}
+
+static struct shash_alg poly_hash_alg = {
+	.digestsize	= sizeof(le128),
+	.init		= poly_hash_init,
+	.update		= poly_hash_update,
+	.final		= poly_hash_final,
+	.setkey		= poly_hash_setkey,
+	.descsize	= sizeof(struct poly_hash_desc_ctx),
+	.base		= {
+		.cra_name		= "poly_hash",
+		.cra_driver_name	= "poly_hash-ce",
+		.cra_priority		= 300,
+		.cra_ctxsize		= sizeof(le128),
+		.cra_module		= THIS_MODULE,
+	},
+};
+
+static int __init poly_hash_ce_mod_init(void)
+{
+	return crypto_register_shash(&poly_hash_alg);
+}
+
+static void __exit poly_hash_ce_mod_exit(void)
+{
+	crypto_unregister_shash(&poly_hash_alg);
+}
+
+MODULE_DESCRIPTION("Polynomial evaluation hash using ARMv8 Crypto Extensions");
+MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
+MODULE_LICENSE("GPL v2");
+
+module_cpu_feature_match(PMULL, poly_hash_ce_mod_init);
+module_exit(poly_hash_ce_mod_exit);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/include/asm/app_api.h	2019-01-22 16:16:21.535228585 +0100
@@ -0,0 +1,50 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_APP_API_H
+#define __ASM_APP_API_H
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+
+#define APP_SETTING_BIT		30
+#define MAX_ENTRIES		10
+
+/*
+ * APIs to set / clear the app setting bits
+ * in the register.
+ */
+#ifdef CONFIG_MSM_APP_API
+extern void set_app_setting_bit(uint32_t bit);
+extern void clear_app_setting_bit(uint32_t bit);
+extern void set_app_setting_bit_for_32bit_apps(void);
+extern void clear_app_setting_bit_for_32bit_apps(void);
+#else
+static inline void set_app_setting_bit(uint32_t bit) {}
+static inline void clear_app_setting_bit(uint32_t bit) {}
+static inline void set_app_setting_bit_for_32bit_apps(void) {}
+static inline void clear_app_setting_bit_for_32bit_apps(void) {}
+#endif
+
+#ifdef CONFIG_MSM_APP_SETTINGS
+extern void switch_app_setting_bit(struct task_struct *prev,
+				   struct task_struct *next);
+extern void switch_32bit_app_setting_bit(struct task_struct *prev,
+				   struct task_struct *next);
+extern void apply_app_setting_bit(struct file *file);
+extern bool use_app_setting;
+extern bool use_32bit_app_setting;
+extern bool use_32bit_app_setting_pro;
+#endif
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/include/asm/brk-imm.h	2019-01-22 16:16:21.535228585 +0100
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_BRK_IMM_H
+#define __ASM_BRK_IMM_H
+
+/*
+ * #imm16 values used for BRK instruction generation
+ * Allowed values for kgdb are 0x400 - 0x7ff
+ * 0x100: for triggering a fault on purpose (reserved)
+ * 0x400: for dynamic BRK instruction
+ * 0x401: for compile time BRK instruction
+ * 0x800: kernel-mode BUG() and WARN() traps
+ */
+#define FAULT_BRK_IMM			0x100
+#define KGDB_DYN_DBG_BRK_IMM		0x400
+#define KGDB_COMPILED_DBG_BRK_IMM	0x401
+#define BUG_BRK_IMM			0x800
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/include/asm/checksum.h	2019-01-22 16:16:21.535228585 +0100
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2016 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_CHECKSUM_H
+#define __ASM_CHECKSUM_H
+
+#include <linux/types.h>
+
+static inline __sum16 csum_fold(__wsum csum)
+{
+	u32 sum = (__force u32)csum;
+	sum += (sum >> 16) | (sum << 16);
+	return ~(__force __sum16)(sum >> 16);
+}
+#define csum_fold csum_fold
+
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+	__uint128_t tmp;
+	u64 sum;
+
+	tmp = *(const __uint128_t *)iph;
+	iph += 16;
+	ihl -= 4;
+	tmp += ((tmp >> 64) | (tmp << 64));
+	sum = tmp >> 64;
+	do {
+		sum += *(const u32 *)iph;
+		iph += 4;
+	} while (--ihl);
+
+	sum += ((sum >> 32) | (sum << 32));
+	return csum_fold(sum >> 32);
+}
+#define ip_fast_csum ip_fast_csum
+
+#include <asm-generic/checksum.h>
+
+#endif	/* __ASM_CHECKSUM_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/include/asm/current.h	2019-01-22 16:16:21.535228585 +0100
@@ -0,0 +1,35 @@
+#ifndef __ASM_CURRENT_H
+#define __ASM_CURRENT_H
+
+#include <linux/compiler.h>
+
+#include <asm/sysreg.h>
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+struct task_struct;
+
+/*
+ * We don't use read_sysreg() as we want the compiler to cache the value where
+ * possible.
+ */
+static __always_inline struct task_struct *get_current(void)
+{
+	unsigned long sp_el0;
+
+	asm ("mrs %0, sp_el0" : "=r" (sp_el0));
+
+	return (struct task_struct *)sp_el0;
+}
+#define current get_current()
+#else
+#include <linux/thread_info.h>
+#define get_current() (current_thread_info()->task)
+#define current get_current()
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_CURRENT_H */
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/include/asm/debugv8.h	2019-01-22 16:16:21.539228621 +0100
@@ -0,0 +1,229 @@
+/* Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_DEBUGV8_H
+#define __ASM_DEBUGV8_H
+
+#include <linux/types.h>
+
+/* 32 bit register reads for aarch 64 bit */
+#define dbg_readl(reg)			RSYSL_##reg()
+/* 64 bit register reads for aarch 64 bit */
+#define dbg_readq(reg)			RSYSQ_##reg()
+/* 32 and 64 bit register writes for aarch 64 bit */
+#define dbg_write(val, reg)		WSYS_##reg(val)
+
+#define MRSL(reg)				\
+({						\
+uint32_t val;					\
+asm volatile("mrs %0, "#reg : "=r" (val));	\
+val;						\
+})
+
+#define MRSQ(reg)				\
+({						\
+uint64_t val;					\
+asm volatile("mrs %0, "#reg : "=r" (val));	\
+val;						\
+})
+
+#define MSR(val, reg)				\
+({						\
+asm volatile("msr "#reg", %0" : : "r" (val));	\
+})
+
+/*
+ * Debug Feature Register
+ *
+ * Read only
+ */
+#define RSYSQ_ID_AA64DFR0_EL1()		MRSQ(ID_AA64DFR0_EL1)
+
+/*
+ * Debug Registers
+ *
+ * Available only in DBGv8
+ *
+ * Read only
+ * MDCCSR_EL0, MDRAR_EL1, OSLSR_EL1, DBGDTRRX_EL0, DBGAUTHSTATUS_EL1
+ *
+ * Write only
+ * DBGDTRTX_EL0, OSLAR_EL1
+ */
+/* 32 bit registers */
+#define RSYSL_DBGDTRRX_EL0()		MRSL(DBGDTRRX_EL0)
+#define RSYSL_MDCCSR_EL0()		MRSL(MDCCSR_EL0)
+#define RSYSL_MDSCR_EL1()		MRSL(MDSCR_EL1)
+#define RSYSL_OSDTRRX_EL1()		MRSL(OSDTRRX_EL1)
+#define RSYSL_OSDTRTX_EL1()		MRSL(OSDTRTX_EL1)
+#define RSYSL_OSDLR_EL1()		MRSL(OSDLR_EL1)
+#define RSYSL_OSLSR_EL1()		MRSL(OSLSR_EL1)
+#define RSYSL_MDCCINT_EL1()		MRSL(MDCCINT_EL1)
+#define RSYSL_OSECCR_EL1()		MRSL(OSECCR_EL1)
+#define RSYSL_DBGPRCR_EL1()		MRSL(DBGPRCR_EL1)
+#define RSYSL_DBGBCR0_EL1()		MRSL(DBGBCR0_EL1)
+#define RSYSL_DBGBCR1_EL1()		MRSL(DBGBCR1_EL1)
+#define RSYSL_DBGBCR2_EL1()		MRSL(DBGBCR2_EL1)
+#define RSYSL_DBGBCR3_EL1()		MRSL(DBGBCR3_EL1)
+#define RSYSL_DBGBCR4_EL1()		MRSL(DBGBCR4_EL1)
+#define RSYSL_DBGBCR5_EL1()		MRSL(DBGBCR5_EL1)
+#define RSYSL_DBGBCR6_EL1()		MRSL(DBGBCR6_EL1)
+#define RSYSL_DBGBCR7_EL1()		MRSL(DBGBCR7_EL1)
+#define RSYSL_DBGBCR8_EL1()		MRSL(DBGBCR8_EL1)
+#define RSYSL_DBGBCR9_EL1()		MRSL(DBGBCR9_EL1)
+#define RSYSL_DBGBCR10_EL1()		MRSL(DBGBCR10_EL1)
+#define RSYSL_DBGBCR11_EL1()		MRSL(DBGBCR11_EL1)
+#define RSYSL_DBGBCR12_EL1()		MRSL(DBGBCR12_EL1)
+#define RSYSL_DBGBCR13_EL1()		MRSL(DBGBCR13_EL1)
+#define RSYSL_DBGBCR14_EL1()		MRSL(DBGBCR14_EL1)
+#define RSYSL_DBGBCR15_EL1()		MRSL(DBGBCR15_EL1)
+#define RSYSL_DBGWCR0_EL1()		MRSL(DBGWCR0_EL1)
+#define RSYSL_DBGWCR1_EL1()		MRSL(DBGWCR1_EL1)
+#define RSYSL_DBGWCR2_EL1()		MRSL(DBGWCR2_EL1)
+#define RSYSL_DBGWCR3_EL1()		MRSL(DBGWCR3_EL1)
+#define RSYSL_DBGWCR4_EL1()		MRSL(DBGWCR4_EL1)
+#define RSYSL_DBGWCR5_EL1()		MRSL(DBGWCR5_EL1)
+#define RSYSL_DBGWCR6_EL1()		MRSL(DBGWCR6_EL1)
+#define RSYSL_DBGWCR7_EL1()		MRSL(DBGWCR7_EL1)
+#define RSYSL_DBGWCR8_EL1()		MRSL(DBGWCR8_EL1)
+#define RSYSL_DBGWCR9_EL1()		MRSL(DBGWCR9_EL1)
+#define RSYSL_DBGWCR10_EL1()		MRSL(DBGWCR10_EL1)
+#define RSYSL_DBGWCR11_EL1()		MRSL(DBGWCR11_EL1)
+#define RSYSL_DBGWCR12_EL1()		MRSL(DBGWCR12_EL1)
+#define RSYSL_DBGWCR13_EL1()		MRSL(DBGWCR13_EL1)
+#define RSYSL_DBGWCR14_EL1()		MRSL(DBGWCR14_EL1)
+#define RSYSL_DBGWCR15_EL1()		MRSL(DBGWCR15_EL1)
+#define RSYSL_DBGCLAIMSET_EL1()		MRSL(DBGCLAIMSET_EL1)
+#define RSYSL_DBGCLAIMCLR_EL1()		MRSL(DBGCLAIMCLR_EL1)
+#define RSYSL_DBGAUTHSTATUS_EL1()	MRSL(DBGAUTHSTATUS_EL1)
+#define RSYSL_DBGVCR32_EL2()		MRSL(DBGVCR32_EL2)
+#define RSYSL_MDCR_EL2()		MRSL(MDCR_EL2)
+#define RSYSL_MDCR_EL3()		MRSL(MDCR_EL3)
+/* 64 bit registers */
+#define RSYSQ_DBGDTR_EL0()		MRSQ(DBGDTR_EL0)
+#define RSYSQ_MDRAR_EL1()		MRSQ(MDRAR_EL1)
+#define RSYSQ_DBGBVR0_EL1()		MRSQ(DBGBVR0_EL1)
+#define RSYSQ_DBGBVR1_EL1()		MRSQ(DBGBVR1_EL1)
+#define RSYSQ_DBGBVR2_EL1()		MRSQ(DBGBVR2_EL1)
+#define RSYSQ_DBGBVR3_EL1()		MRSQ(DBGBVR3_EL1)
+#define RSYSQ_DBGBVR4_EL1()		MRSQ(DBGBVR4_EL1)
+#define RSYSQ_DBGBVR5_EL1()		MRSQ(DBGBVR5_EL1)
+#define RSYSQ_DBGBVR6_EL1()		MRSQ(DBGBVR6_EL1)
+#define RSYSQ_DBGBVR7_EL1()		MRSQ(DBGBVR7_EL1)
+#define RSYSQ_DBGBVR8_EL1()		MRSQ(DBGBVR8_EL1)
+#define RSYSQ_DBGBVR9_EL1()		MRSQ(DBGBVR9_EL1)
+#define RSYSQ_DBGBVR10_EL1()		MRSQ(DBGBVR10_EL1)
+#define RSYSQ_DBGBVR11_EL1()		MRSQ(DBGBVR11_EL1)
+#define RSYSQ_DBGBVR12_EL1()		MRSQ(DBGBVR12_EL1)
+#define RSYSQ_DBGBVR13_EL1()		MRSQ(DBGBVR13_EL1)
+#define RSYSQ_DBGBVR14_EL1()		MRSQ(DBGBVR14_EL1)
+#define RSYSQ_DBGBVR15_EL1()		MRSQ(DBGBVR15_EL1)
+#define RSYSQ_DBGWVR0_EL1()		MRSQ(DBGWVR0_EL1)
+#define RSYSQ_DBGWVR1_EL1()		MRSQ(DBGWVR1_EL1)
+#define RSYSQ_DBGWVR2_EL1()		MRSQ(DBGWVR2_EL1)
+#define RSYSQ_DBGWVR3_EL1()		MRSQ(DBGWVR3_EL1)
+#define RSYSQ_DBGWVR4_EL1()		MRSQ(DBGWVR4_EL1)
+#define RSYSQ_DBGWVR5_EL1()		MRSQ(DBGWVR5_EL1)
+#define RSYSQ_DBGWVR6_EL1()		MRSQ(DBGWVR6_EL1)
+#define RSYSQ_DBGWVR7_EL1()		MRSQ(DBGWVR7_EL1)
+#define RSYSQ_DBGWVR8_EL1()		MRSQ(DBGWVR8_EL1)
+#define RSYSQ_DBGWVR9_EL1()		MRSQ(DBGWVR9_EL1)
+#define RSYSQ_DBGWVR10_EL1()		MRSQ(DBGWVR10_EL1)
+#define RSYSQ_DBGWVR11_EL1()		MRSQ(DBGWVR11_EL1)
+#define RSYSQ_DBGWVR12_EL1()		MRSQ(DBGWVR12_EL1)
+#define RSYSQ_DBGWVR13_EL1()		MRSQ(DBGWVR13_EL1)
+#define RSYSQ_DBGWVR14_EL1()		MRSQ(DBGWVR14_EL1)
+#define RSYSQ_DBGWVR15_EL1()		MRSQ(DBGWVR15_EL1)
+
+/* 32 bit registers */
+#define WSYS_DBGDTRTX_EL0(val)		MSR(val, DBGDTRTX_EL0)
+#define WSYS_MDCCINT_EL1(val)		MSR(val, MDCCINT_EL1)
+#define WSYS_MDSCR_EL1(val)		MSR(val, MDSCR_EL1)
+#define WSYS_OSDTRRX_EL1(val)		MSR(val, OSDTRRX_EL1)
+#define WSYS_OSDTRTX_EL1(val)		MSR(val, OSDTRTX_EL1)
+#define WSYS_OSDLR_EL1(val)		MSR(val, OSDLR_EL1)
+#define WSYS_OSECCR_EL1(val)		MSR(val, OSECCR_EL1)
+#define WSYS_DBGPRCR_EL1(val)		MSR(val, DBGPRCR_EL1)
+#define WSYS_DBGBCR0_EL1(val)		MSR(val, DBGBCR0_EL1)
+#define WSYS_DBGBCR1_EL1(val)		MSR(val, DBGBCR1_EL1)
+#define WSYS_DBGBCR2_EL1(val)		MSR(val, DBGBCR2_EL1)
+#define WSYS_DBGBCR3_EL1(val)		MSR(val, DBGBCR3_EL1)
+#define WSYS_DBGBCR4_EL1(val)		MSR(val, DBGBCR4_EL1)
+#define WSYS_DBGBCR5_EL1(val)		MSR(val, DBGBCR5_EL1)
+#define WSYS_DBGBCR6_EL1(val)		MSR(val, DBGBCR6_EL1)
+#define WSYS_DBGBCR7_EL1(val)		MSR(val, DBGBCR7_EL1)
+#define WSYS_DBGBCR8_EL1(val)		MSR(val, DBGBCR8_EL1)
+#define WSYS_DBGBCR9_EL1(val)		MSR(val, DBGBCR9_EL1)
+#define WSYS_DBGBCR10_EL1(val)		MSR(val, DBGBCR10_EL1)
+#define WSYS_DBGBCR11_EL1(val)		MSR(val, DBGBCR11_EL1)
+#define WSYS_DBGBCR12_EL1(val)		MSR(val, DBGBCR12_EL1)
+#define WSYS_DBGBCR13_EL1(val)		MSR(val, DBGBCR13_EL1)
+#define WSYS_DBGBCR14_EL1(val)		MSR(val, DBGBCR14_EL1)
+#define WSYS_DBGBCR15_EL1(val)		MSR(val, DBGBCR15_EL1)
+#define WSYS_DBGWCR0_EL1(val)		MSR(val, DBGWCR0_EL1)
+#define WSYS_DBGWCR1_EL1(val)		MSR(val, DBGWCR1_EL1)
+#define WSYS_DBGWCR2_EL1(val)		MSR(val, DBGWCR2_EL1)
+#define WSYS_DBGWCR3_EL1(val)		MSR(val, DBGWCR3_EL1)
+#define WSYS_DBGWCR4_EL1(val)		MSR(val, DBGWCR4_EL1)
+#define WSYS_DBGWCR5_EL1(val)		MSR(val, DBGWCR5_EL1)
+#define WSYS_DBGWCR6_EL1(val)		MSR(val, DBGWCR6_EL1)
+#define WSYS_DBGWCR7_EL1(val)		MSR(val, DBGWCR7_EL1)
+#define WSYS_DBGWCR8_EL1(val)		MSR(val, DBGWCR8_EL1)
+#define WSYS_DBGWCR9_EL1(val)		MSR(val, DBGWCR9_EL1)
+#define WSYS_DBGWCR10_EL1(val)		MSR(val, DBGWCR10_EL1)
+#define WSYS_DBGWCR11_EL1(val)		MSR(val, DBGWCR11_EL1)
+#define WSYS_DBGWCR12_EL1(val)		MSR(val, DBGWCR12_EL1)
+#define WSYS_DBGWCR13_EL1(val)		MSR(val, DBGWCR13_EL1)
+#define WSYS_DBGWCR14_EL1(val)		MSR(val, DBGWCR14_EL1)
+#define WSYS_DBGWCR15_EL1(val)		MSR(val, DBGWCR15_EL1)
+#define WSYS_DBGCLAIMSET_EL1(val)	MSR(val, DBGCLAIMSET_EL1)
+#define WSYS_DBGCLAIMCLR_EL1(val)	MSR(val, DBGCLAIMCLR_EL1)
+#define WSYS_OSLAR_EL1(val)		MSR(val, OSLAR_EL1)
+#define WSYS_DBGVCR32_EL2(val)		MSR(val, DBGVCR32_EL2)
+#define WSYS_MDCR_EL2(val)		MSR(val, MDCR_EL2)
+#define WSYS_MDCR_EL3(val)		MSR(val, MDCR_EL3)
+/* 64 bit registers */
+#define WSYS_DBGDTR_EL0(val)		MSR(val, DBGDTR_EL0)
+#define WSYS_DBGBVR0_EL1(val)		MSR(val, DBGBVR0_EL1)
+#define WSYS_DBGBVR1_EL1(val)		MSR(val, DBGBVR1_EL1)
+#define WSYS_DBGBVR2_EL1(val)		MSR(val, DBGBVR2_EL1)
+#define WSYS_DBGBVR3_EL1(val)		MSR(val, DBGBVR3_EL1)
+#define WSYS_DBGBVR4_EL1(val)		MSR(val, DBGBVR4_EL1)
+#define WSYS_DBGBVR5_EL1(val)		MSR(val, DBGBVR5_EL1)
+#define WSYS_DBGBVR6_EL1(val)		MSR(val, DBGBVR6_EL1)
+#define WSYS_DBGBVR7_EL1(val)		MSR(val, DBGBVR7_EL1)
+#define WSYS_DBGBVR8_EL1(val)		MSR(val, DBGBVR8_EL1)
+#define WSYS_DBGBVR9_EL1(val)		MSR(val, DBGBVR9_EL1)
+#define WSYS_DBGBVR10_EL1(val)		MSR(val, DBGBVR10_EL1)
+#define WSYS_DBGBVR11_EL1(val)		MSR(val, DBGBVR11_EL1)
+#define WSYS_DBGBVR12_EL1(val)		MSR(val, DBGBVR12_EL1)
+#define WSYS_DBGBVR13_EL1(val)		MSR(val, DBGBVR13_EL1)
+#define WSYS_DBGBVR14_EL1(val)		MSR(val, DBGBVR14_EL1)
+#define WSYS_DBGBVR15_EL1(val)		MSR(val, DBGBVR15_EL1)
+#define WSYS_DBGWVR0_EL1(val)		MSR(val, DBGWVR0_EL1)
+#define WSYS_DBGWVR1_EL1(val)		MSR(val, DBGWVR1_EL1)
+#define WSYS_DBGWVR2_EL1(val)		MSR(val, DBGWVR2_EL1)
+#define WSYS_DBGWVR3_EL1(val)		MSR(val, DBGWVR3_EL1)
+#define WSYS_DBGWVR4_EL1(val)		MSR(val, DBGWVR4_EL1)
+#define WSYS_DBGWVR5_EL1(val)		MSR(val, DBGWVR5_EL1)
+#define WSYS_DBGWVR6_EL1(val)		MSR(val, DBGWVR6_EL1)
+#define WSYS_DBGWVR7_EL1(val)		MSR(val, DBGWVR7_EL1)
+#define WSYS_DBGWVR8_EL1(val)		MSR(val, DBGWVR8_EL1)
+#define WSYS_DBGWVR9_EL1(val)		MSR(val, DBGWVR9_EL1)
+#define WSYS_DBGWVR10_EL1(val)		MSR(val, DBGWVR10_EL1)
+#define WSYS_DBGWVR11_EL1(val)		MSR(val, DBGWVR11_EL1)
+#define WSYS_DBGWVR12_EL1(val)		MSR(val, DBGWVR12_EL1)
+#define WSYS_DBGWVR13_EL1(val)		MSR(val, DBGWVR13_EL1)
+#define WSYS_DBGWVR14_EL1(val)		MSR(val, DBGWVR14_EL1)
+#define WSYS_DBGWVR15_EL1(val)		MSR(val, DBGWVR15_EL1)
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/include/asm/dma-contiguous.h	2019-01-22 16:16:21.539228621 +0100
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2013,2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_DMA_CONTIGUOUS_H
+#define _ASM_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+
+void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
+
+#endif
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/include/asm/dma-iommu.h	2019-01-22 16:16:21.539228621 +0100
@@ -0,0 +1,64 @@
+#ifndef ASMARM_DMA_IOMMU_H
+#define ASMARM_DMA_IOMMU_H
+
+#ifdef __KERNEL__
+
+#include <linux/err.h>
+#include <linux/mm_types.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-debug.h>
+#include <linux/kmemcheck.h>
+#include <linux/kref.h>
+
+struct dma_iommu_mapping {
+	/* iommu specific data */
+	struct iommu_domain	*domain;
+
+	void			*bitmap;
+	size_t			bits;
+	dma_addr_t		base;
+
+	spinlock_t		lock;
+	struct kref		kref;
+#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST
+	struct dma_fast_smmu_mapping *fast;
+#endif
+};
+
+#ifdef CONFIG_ARM64_DMA_USE_IOMMU
+
+struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size);
+
+void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
+
+int arm_iommu_attach_device(struct device *dev,
+					struct dma_iommu_mapping *mapping);
+void arm_iommu_detach_device(struct device *dev);
+
+#else  /* !CONFIG_ARM64_DMA_USE_IOMMU */
+
+static inline struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
+{
+	return ERR_PTR(-ENOMEM);
+}
+
+static inline void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
+{
+}
+
+static inline int arm_iommu_attach_device(struct device *dev,
+			struct dma_iommu_mapping *mapping)
+{
+	return -ENODEV;
+}
+
+static inline void arm_iommu_detach_device(struct device *dev)
+{
+}
+
+#endif	/* CONFIG_ARM64_DMA_USE_IOMMU */
+
+#endif /* __KERNEL__ */
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/include/asm/edac.h	2019-01-22 16:16:21.539228621 +0100
@@ -0,0 +1,28 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef ASM_EDAC_H
+#define ASM_EDAC_H
+
+#if defined(CONFIG_EDAC_CORTEX_ARM64) && \
+	!defined(CONFIG_EDAC_CORTEX_ARM64_DBE_IRQ_ONLY)
+void arm64_check_cache_ecc(void *info);
+#else
+static inline void arm64_check_cache_ecc(void *info) { }
+#endif
+
+static inline void atomic_scrub(void *addr, int size)
+{
+	return;
+}
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/include/asm/etmv4x.h	2019-01-22 16:16:21.539228621 +0100
@@ -0,0 +1,385 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ETMV4X_H
+#define __ASM_ETMV4X_H
+
+#include <linux/types.h>
+
+/* 32 bit register reads for AArch64 */
+#define trc_readl(reg)			RSYSL_##reg()
+/* 64 bit register reads for AArch64 */
+#define trc_readq(reg)			RSYSQ_##reg()
+/* 32 and 64 bit register writes for AArch64 */
+#define trc_write(val, reg)		WSYS_##reg(val)
+
+#define MRSL(op0, op1, crn, crm, op2)					     \
+({									     \
+uint32_t val;								     \
+asm volatile("mrs %0, S"#op0"_"#op1"_"#crn"_"#crm"_"#op2 : "=r" (val));      \
+val;									     \
+})
+
+#define MRSQ(op0, op1, crn, crm, op2)					     \
+({									     \
+uint64_t val;								     \
+asm volatile("mrs %0, S"#op0"_"#op1"_"#crn"_"#crm"_"#op2 : "=r" (val));      \
+val;									     \
+})
+
+#define MSR(val, op0, op1, crn, crm, op2)				     \
+({									     \
+asm volatile("msr S"#op0"_"#op1"_"#crn"_"#crm"_"#op2", %0" : : "r" (val));   \
+})
+
+/* Clock and Power Management Register */
+#define RSYSL_CPMR_EL1()		MRSL(3, 7, c15, c0, 5)
+#define WSYS_CPMR_EL1(val)		MSR(val, 3, 7, c15, c0, 5)
+
+/*
+ * ETMv4 Registers
+ *
+ * Read only
+ * ETMAUTHSTATUS, ETMDEVARCH, ETMDEVID, ETMIDRn[0-13], ETMOSLSR, ETMSTATR
+ *
+ * Write only
+ * ETMOSLAR
+ */
+/* 32 bit registers */
+#define RSYSL_ETMAUTHSTATUS()		MRSL(2, 1, c7, c14, 6)
+#define RSYSL_ETMAUXCTLR()		MRSL(2, 1, c0, c6, 0)
+#define RSYSL_ETMCCCTLR()		MRSL(2, 1, c0, c14, 0)
+#define RSYSL_ETMCIDCCTLR0()		MRSL(2, 1, c3, c0, 2)
+#define RSYSL_ETMCNTCTLR0()		MRSL(2, 1, c0, c4, 5)
+#define RSYSL_ETMCNTCTLR1()		MRSL(2, 1, c0, c5, 5)
+#define RSYSL_ETMCNTCTLR2()		MRSL(2, 1, c0, c6, 5)
+#define RSYSL_ETMCNTCTLR3()		MRSL(2, 1, c0, c7, 5)
+#define RSYSL_ETMCNTRLDVR0()		MRSL(2, 1, c0, c0, 5)
+#define RSYSL_ETMCNTRLDVR1()		MRSL(2, 1, c0, c1, 5)
+#define RSYSL_ETMCNTRLDVR2()		MRSL(2, 1, c0, c2, 5)
+#define RSYSL_ETMCNTRLDVR3()		MRSL(2, 1, c0, c3, 5)
+#define RSYSL_ETMCNTVR0()		MRSL(2, 1, c0, c8, 5)
+#define RSYSL_ETMCNTVR1()		MRSL(2, 1, c0, c9, 5)
+#define RSYSL_ETMCNTVR2()		MRSL(2, 1, c0, c10, 5)
+#define RSYSL_ETMCNTVR3()		MRSL(2, 1, c0, c11, 5)
+#define RSYSL_ETMCONFIGR()		MRSL(2, 1, c0, c4, 0)
+#define RSYSL_ETMDEVARCH()		MRSL(2, 1, c7, c15, 6)
+#define RSYSL_ETMDEVID()		MRSL(2, 1, c7, c2, 7)
+#define RSYSL_ETMEVENTCTL0R()		MRSL(2, 1, c0, c8, 0)
+#define RSYSL_ETMEVENTCTL1R()		MRSL(2, 1, c0, c9, 0)
+#define RSYSL_ETMEXTINSELR()		MRSL(2, 1, c0, c8, 4)
+#define RSYSL_ETMIDR0()			MRSL(2, 1, c0, c8, 7)
+#define RSYSL_ETMIDR1()			MRSL(2, 1, c0, c9, 7)
+#define RSYSL_ETMIDR10()		MRSL(2, 1, c0, c2, 6)
+#define RSYSL_ETMIDR11()		MRSL(2, 1, c0, c3, 6)
+#define RSYSL_ETMIDR12()		MRSL(2, 1, c0, c4, 6)
+#define RSYSL_ETMIDR13()		MRSL(2, 1, c0, c5, 6)
+#define RSYSL_ETMIDR2()			MRSL(2, 1, c0, c10, 7)
+#define RSYSL_ETMIDR3()			MRSL(2, 1, c0, c11, 7)
+#define RSYSL_ETMIDR4()			MRSL(2, 1, c0, c12, 7)
+#define RSYSL_ETMIDR5()			MRSL(2, 1, c0, c13, 7)
+#define RSYSL_ETMIDR6()			MRSL(2, 1, c0, c14, 7)
+#define RSYSL_ETMIDR7()			MRSL(2, 1, c0, c15, 7)
+#define RSYSL_ETMIDR8()			MRSL(2, 1, c0, c0, 6)
+#define RSYSL_ETMIDR9()			MRSL(2, 1, c0, c1, 6)
+#define RSYSL_ETMIMSPEC0()		MRSL(2, 1, c0, c0, 7)
+#define RSYSL_ETMOSLSR()		MRSL(2, 1, c1, c1, 4)
+#define RSYSL_ETMPRGCTLR()		MRSL(2, 1, c0, c1, 0)
+#define RSYSL_ETMRSCTLR10()		MRSL(2, 1, c1, c10, 0)
+#define RSYSL_ETMRSCTLR11()		MRSL(2, 1, c1, c11, 0)
+#define RSYSL_ETMRSCTLR12()		MRSL(2, 1, c1, c12, 0)
+#define RSYSL_ETMRSCTLR13()		MRSL(2, 1, c1, c13, 0)
+#define RSYSL_ETMRSCTLR14()		MRSL(2, 1, c1, c14, 0)
+#define RSYSL_ETMRSCTLR15()		MRSL(2, 1, c1, c15, 0)
+#define RSYSL_ETMRSCTLR2()		MRSL(2, 1, c1, c2, 0)
+#define RSYSL_ETMRSCTLR3()		MRSL(2, 1, c1, c3, 0)
+#define RSYSL_ETMRSCTLR4()		MRSL(2, 1, c1, c4, 0)
+#define RSYSL_ETMRSCTLR5()		MRSL(2, 1, c1, c5, 0)
+#define RSYSL_ETMRSCTLR6()		MRSL(2, 1, c1, c6, 0)
+#define RSYSL_ETMRSCTLR7()		MRSL(2, 1, c1, c7, 0)
+#define RSYSL_ETMRSCTLR8()		MRSL(2, 1, c1, c8, 0)
+#define RSYSL_ETMRSCTLR9()		MRSL(2, 1, c1, c9, 0)
+#define RSYSL_ETMRSCTLR16()		MRSL(2, 1, c1, c0, 1)
+#define RSYSL_ETMRSCTLR17()		MRSL(2, 1, c1, c1, 1)
+#define RSYSL_ETMRSCTLR18()		MRSL(2, 1, c1, c2, 1)
+#define RSYSL_ETMRSCTLR19()		MRSL(2, 1, c1, c3, 1)
+#define RSYSL_ETMRSCTLR20()		MRSL(2, 1, c1, c4, 1)
+#define RSYSL_ETMRSCTLR21()		MRSL(2, 1, c1, c5, 1)
+#define RSYSL_ETMRSCTLR22()		MRSL(2, 1, c1, c6, 1)
+#define RSYSL_ETMRSCTLR23()		MRSL(2, 1, c1, c7, 1)
+#define RSYSL_ETMRSCTLR24()		MRSL(2, 1, c1, c8, 1)
+#define RSYSL_ETMRSCTLR25()		MRSL(2, 1, c1, c9, 1)
+#define RSYSL_ETMRSCTLR26()		MRSL(2, 1, c1, c10, 1)
+#define RSYSL_ETMRSCTLR27()		MRSL(2, 1, c1, c11, 1)
+#define RSYSL_ETMRSCTLR28()		MRSL(2, 1, c1, c12, 1)
+#define RSYSL_ETMRSCTLR29()		MRSL(2, 1, c1, c13, 1)
+#define RSYSL_ETMRSCTLR30()		MRSL(2, 1, c1, c14, 1)
+#define RSYSL_ETMRSCTLR31()		MRSL(2, 1, c1, c15, 1)
+#define RSYSL_ETMSEQEVR0()		MRSL(2, 1, c0, c0, 4)
+#define RSYSL_ETMSEQEVR1()		MRSL(2, 1, c0, c1, 4)
+#define RSYSL_ETMSEQEVR2()		MRSL(2, 1, c0, c2, 4)
+#define RSYSL_ETMSEQRSTEVR()		MRSL(2, 1, c0, c6, 4)
+#define RSYSL_ETMSEQSTR()		MRSL(2, 1, c0, c7, 4)
+#define RSYSL_ETMSTALLCTLR()		MRSL(2, 1, c0, c11, 0)
+#define RSYSL_ETMSTATR()		MRSL(2, 1, c0, c3, 0)
+#define RSYSL_ETMSYNCPR()		MRSL(2, 1, c0, c13, 0)
+#define RSYSL_ETMTRACEIDR()		MRSL(2, 1, c0, c0, 1)
+#define RSYSL_ETMTSCTLR()		MRSL(2, 1, c0, c12, 0)
+#define RSYSL_ETMVICTLR()		MRSL(2, 1, c0, c0, 2)
+#define RSYSL_ETMVIIECTLR()		MRSL(2, 1, c0, c1, 2)
+#define RSYSL_ETMVISSCTLR()		MRSL(2, 1, c0, c2, 2)
+#define RSYSL_ETMSSCCR0()		MRSL(2, 1, c1, c0, 2)
+#define RSYSL_ETMSSCCR1()		MRSL(2, 1, c1, c1, 2)
+#define RSYSL_ETMSSCCR2()		MRSL(2, 1, c1, c2, 2)
+#define RSYSL_ETMSSCCR3()		MRSL(2, 1, c1, c3, 2)
+#define RSYSL_ETMSSCCR4()		MRSL(2, 1, c1, c4, 2)
+#define RSYSL_ETMSSCCR5()		MRSL(2, 1, c1, c5, 2)
+#define RSYSL_ETMSSCCR6()		MRSL(2, 1, c1, c6, 2)
+#define RSYSL_ETMSSCCR7()		MRSL(2, 1, c1, c7, 2)
+#define RSYSL_ETMSSCSR0()		MRSL(2, 1, c1, c8, 2)
+#define RSYSL_ETMSSCSR1()		MRSL(2, 1, c1, c9, 2)
+#define RSYSL_ETMSSCSR2()		MRSL(2, 1, c1, c10, 2)
+#define RSYSL_ETMSSCSR3()		MRSL(2, 1, c1, c11, 2)
+#define RSYSL_ETMSSCSR4()		MRSL(2, 1, c1, c12, 2)
+#define RSYSL_ETMSSCSR5()		MRSL(2, 1, c1, c13, 2)
+#define RSYSL_ETMSSCSR6()		MRSL(2, 1, c1, c14, 2)
+#define RSYSL_ETMSSCSR7()		MRSL(2, 1, c1, c15, 2)
+#define RSYSL_ETMSSPCICR0()		MRSL(2, 1, c1, c0, 3)
+#define RSYSL_ETMSSPCICR1()		MRSL(2, 1, c1, c1, 3)
+#define RSYSL_ETMSSPCICR2()		MRSL(2, 1, c1, c2, 3)
+#define RSYSL_ETMSSPCICR3()		MRSL(2, 1, c1, c3, 3)
+#define RSYSL_ETMSSPCICR4()		MRSL(2, 1, c1, c4, 3)
+#define RSYSL_ETMSSPCICR5()		MRSL(2, 1, c1, c5, 3)
+#define RSYSL_ETMSSPCICR6()		MRSL(2, 1, c1, c6, 3)
+#define RSYSL_ETMSSPCICR7()		MRSL(2, 1, c1, c7, 3)
+
+/* 64 bit registers */
+#define RSYSQ_ETMACATR0()		MRSQ(2, 1, c2, c0, 2)
+#define RSYSQ_ETMACATR1()		MRSQ(2, 1, c2, c2, 2)
+#define RSYSQ_ETMACATR2()		MRSQ(2, 1, c2, c4, 2)
+#define RSYSQ_ETMACATR3()		MRSQ(2, 1, c2, c6, 2)
+#define RSYSQ_ETMACATR4()		MRSQ(2, 1, c2, c8, 2)
+#define RSYSQ_ETMACATR5()		MRSQ(2, 1, c2, c10, 2)
+#define RSYSQ_ETMACATR6()		MRSQ(2, 1, c2, c12, 2)
+#define RSYSQ_ETMACATR7()		MRSQ(2, 1, c2, c14, 2)
+#define RSYSQ_ETMACATR8()		MRSQ(2, 1, c2, c0, 3)
+#define RSYSQ_ETMACATR9()		MRSQ(2, 1, c2, c2, 3)
+#define RSYSQ_ETMACATR10()		MRSQ(2, 1, c2, c4, 3)
+#define RSYSQ_ETMACATR11()		MRSQ(2, 1, c2, c6, 3)
+#define RSYSQ_ETMACATR12()		MRSQ(2, 1, c2, c8, 3)
+#define RSYSQ_ETMACATR13()		MRSQ(2, 1, c2, c10, 3)
+#define RSYSQ_ETMACATR14()		MRSQ(2, 1, c2, c12, 3)
+#define RSYSQ_ETMACATR15()		MRSQ(2, 1, c2, c14, 3)
+#define RSYSQ_ETMCIDCVR0()		MRSQ(2, 1, c3, c0, 0)
+#define RSYSQ_ETMCIDCVR1()		MRSQ(2, 1, c3, c2, 0)
+#define RSYSQ_ETMCIDCVR2()		MRSQ(2, 1, c3, c4, 0)
+#define RSYSQ_ETMCIDCVR3()		MRSQ(2, 1, c3, c6, 0)
+#define RSYSQ_ETMCIDCVR4()		MRSQ(2, 1, c3, c8, 0)
+#define RSYSQ_ETMCIDCVR5()		MRSQ(2, 1, c3, c10, 0)
+#define RSYSQ_ETMCIDCVR6()		MRSQ(2, 1, c3, c12, 0)
+#define RSYSQ_ETMCIDCVR7()		MRSQ(2, 1, c3, c14, 0)
+#define RSYSQ_ETMACVR0()		MRSQ(2, 1, c2, c0, 0)
+#define RSYSQ_ETMACVR1()		MRSQ(2, 1, c2, c2, 0)
+#define RSYSQ_ETMACVR2()		MRSQ(2, 1, c2, c4, 0)
+#define RSYSQ_ETMACVR3()		MRSQ(2, 1, c2, c6, 0)
+#define RSYSQ_ETMACVR4()		MRSQ(2, 1, c2, c8, 0)
+#define RSYSQ_ETMACVR5()		MRSQ(2, 1, c2, c10, 0)
+#define RSYSQ_ETMACVR6()		MRSQ(2, 1, c2, c12, 0)
+#define RSYSQ_ETMACVR7()		MRSQ(2, 1, c2, c14, 0)
+#define RSYSQ_ETMACVR8()		MRSQ(2, 1, c2, c0, 1)
+#define RSYSQ_ETMACVR9()		MRSQ(2, 1, c2, c2, 1)
+#define RSYSQ_ETMACVR10()		MRSQ(2, 1, c2, c4, 1)
+#define RSYSQ_ETMACVR11()		MRSQ(2, 1, c2, c6, 1)
+#define RSYSQ_ETMACVR12()		MRSQ(2, 1, c2, c8, 1)
+#define RSYSQ_ETMACVR13()		MRSQ(2, 1, c2, c10, 1)
+#define RSYSQ_ETMACVR14()		MRSQ(2, 1, c2, c12, 1)
+#define RSYSQ_ETMACVR15()		MRSQ(2, 1, c2, c14, 1)
+#define RSYSQ_ETMVMIDCVR0()		MRSQ(2, 1, c3, c0, 1)
+#define RSYSQ_ETMVMIDCVR1()		MRSQ(2, 1, c3, c2, 1)
+#define RSYSQ_ETMVMIDCVR2()		MRSQ(2, 1, c3, c4, 1)
+#define RSYSQ_ETMVMIDCVR3()		MRSQ(2, 1, c3, c6, 1)
+#define RSYSQ_ETMVMIDCVR4()		MRSQ(2, 1, c3, c8, 1)
+#define RSYSQ_ETMVMIDCVR5()		MRSQ(2, 1, c3, c10, 1)
+#define RSYSQ_ETMVMIDCVR6()		MRSQ(2, 1, c3, c12, 1)
+#define RSYSQ_ETMVMIDCVR7()		MRSQ(2, 1, c3, c14, 1)
+#define RSYSQ_ETMDVCVR0()		MRSQ(2, 1, c2, c0, 4)
+#define RSYSQ_ETMDVCVR1()		MRSQ(2, 1, c2, c4, 4)
+#define RSYSQ_ETMDVCVR2()		MRSQ(2, 1, c2, c8, 4)
+#define RSYSQ_ETMDVCVR3()		MRSQ(2, 1, c2, c12, 4)
+#define RSYSQ_ETMDVCVR4()		MRSQ(2, 1, c2, c0, 5)
+#define RSYSQ_ETMDVCVR5()		MRSQ(2, 1, c2, c4, 5)
+#define RSYSQ_ETMDVCVR6()		MRSQ(2, 1, c2, c8, 5)
+#define RSYSQ_ETMDVCVR7()		MRSQ(2, 1, c2, c12, 5)
+#define RSYSQ_ETMDVCMR0()		MRSQ(2, 1, c2, c0, 6)
+#define RSYSQ_ETMDVCMR1()		MRSQ(2, 1, c2, c4, 6)
+#define RSYSQ_ETMDVCMR2()		MRSQ(2, 1, c2, c8, 6)
+#define RSYSQ_ETMDVCMR3()		MRSQ(2, 1, c2, c12, 6)
+#define RSYSQ_ETMDVCMR4()		MRSQ(2, 1, c2, c0, 7)
+#define RSYSQ_ETMDVCMR5()		MRSQ(2, 1, c2, c4, 7)
+#define RSYSQ_ETMDVCMR6()		MRSQ(2, 1, c2, c8, 7)
+#define RSYSQ_ETMDVCMR7()		MRSQ(2, 1, c2, c12, 7)
+
+/* 32 and 64 bit registers */
+#define WSYS_ETMAUXCTLR(val)		MSR(val, 2, 1, c0, c6, 0)
+#define WSYS_ETMACATR0(val)		MSR(val, 2, 1, c2, c0, 2)
+#define WSYS_ETMACATR1(val)		MSR(val, 2, 1, c2, c2, 2)
+#define WSYS_ETMACATR2(val)		MSR(val, 2, 1, c2, c4, 2)
+#define WSYS_ETMACATR3(val)		MSR(val, 2, 1, c2, c6, 2)
+#define WSYS_ETMACATR4(val)		MSR(val, 2, 1, c2, c8, 2)
+#define WSYS_ETMACATR5(val)		MSR(val, 2, 1, c2, c10, 2)
+#define WSYS_ETMACATR6(val)		MSR(val, 2, 1, c2, c12, 2)
+#define WSYS_ETMACATR7(val)		MSR(val, 2, 1, c2, c14, 2)
+#define WSYS_ETMACATR8(val)		MSR(val, 2, 1, c2, c0, 3)
+#define WSYS_ETMACATR9(val)		MSR(val, 2, 1, c2, c2, 3)
+#define WSYS_ETMACATR10(val)		MSR(val, 2, 1, c2, c4, 3)
+#define WSYS_ETMACATR11(val)		MSR(val, 2, 1, c2, c6, 3)
+#define WSYS_ETMACATR12(val)		MSR(val, 2, 1, c2, c8, 3)
+#define WSYS_ETMACATR13(val)		MSR(val, 2, 1, c2, c10, 3)
+#define WSYS_ETMACATR14(val)		MSR(val, 2, 1, c2, c12, 3)
+#define WSYS_ETMACATR15(val)		MSR(val, 2, 1, c2, c14, 3)
+#define WSYS_ETMACVR0(val)		MSR(val, 2, 1, c2, c0, 0)
+#define WSYS_ETMACVR1(val)		MSR(val, 2, 1, c2, c2, 0)
+#define WSYS_ETMACVR2(val)		MSR(val, 2, 1, c2, c4, 0)
+#define WSYS_ETMACVR3(val)		MSR(val, 2, 1, c2, c6, 0)
+#define WSYS_ETMACVR4(val)		MSR(val, 2, 1, c2, c8, 0)
+#define WSYS_ETMACVR5(val)		MSR(val, 2, 1, c2, c10, 0)
+#define WSYS_ETMACVR6(val)		MSR(val, 2, 1, c2, c12, 0)
+#define WSYS_ETMACVR7(val)		MSR(val, 2, 1, c2, c14, 0)
+#define WSYS_ETMACVR8(val)		MSR(val, 2, 1, c2, c0, 1)
+#define WSYS_ETMACVR9(val)		MSR(val, 2, 1, c2, c2, 1)
+#define WSYS_ETMACVR10(val)		MSR(val, 2, 1, c2, c4, 1)
+#define WSYS_ETMACVR11(val)		MSR(val, 2, 1, c2, c6, 1)
+#define WSYS_ETMACVR12(val)		MSR(val, 2, 1, c2, c8, 1)
+#define WSYS_ETMACVR13(val)		MSR(val, 2, 1, c2, c10, 1)
+#define WSYS_ETMACVR14(val)		MSR(val, 2, 1, c2, c12, 1)
+#define WSYS_ETMACVR15(val)		MSR(val, 2, 1, c2, c14, 1)
+#define WSYS_ETMCCCTLR(val)		MSR(val, 2, 1, c0, c14, 0)
+#define WSYS_ETMCIDCCTLR0(val)		MSR(val, 2, 1, c3, c0, 2)
+#define WSYS_ETMCIDCVR0(val)		MSR(val, 2, 1, c3, c0, 0)
+#define WSYS_ETMCIDCVR1(val)		MSR(val, 2, 1, c3, c2, 0)
+#define WSYS_ETMCIDCVR2(val)		MSR(val, 2, 1, c3, c4, 0)
+#define WSYS_ETMCIDCVR3(val)		MSR(val, 2, 1, c3, c6, 0)
+#define WSYS_ETMCIDCVR4(val)		MSR(val, 2, 1, c3, c8, 0)
+#define WSYS_ETMCIDCVR5(val)		MSR(val, 2, 1, c3, c10, 0)
+#define WSYS_ETMCIDCVR6(val)		MSR(val, 2, 1, c3, c12, 0)
+#define WSYS_ETMCIDCVR7(val)		MSR(val, 2, 1, c3, c14, 0)
+#define WSYS_ETMCNTCTLR0(val)		MSR(val, 2, 1, c0, c4, 5)
+#define WSYS_ETMCNTCTLR1(val)		MSR(val, 2, 1, c0, c5, 5)
+#define WSYS_ETMCNTCTLR2(val)		MSR(val, 2, 1, c0, c6, 5)
+#define WSYS_ETMCNTCTLR3(val)		MSR(val, 2, 1, c0, c7, 5)
+#define WSYS_ETMCNTRLDVR0(val)		MSR(val, 2, 1, c0, c0, 5)
+#define WSYS_ETMCNTRLDVR1(val)		MSR(val, 2, 1, c0, c1, 5)
+#define WSYS_ETMCNTRLDVR2(val)		MSR(val, 2, 1, c0, c2, 5)
+#define WSYS_ETMCNTRLDVR3(val)		MSR(val, 2, 1, c0, c3, 5)
+#define WSYS_ETMCNTVR0(val)		MSR(val, 2, 1, c0, c8, 5)
+#define WSYS_ETMCNTVR1(val)		MSR(val, 2, 1, c0, c9, 5)
+#define WSYS_ETMCNTVR2(val)		MSR(val, 2, 1, c0, c10, 5)
+#define WSYS_ETMCNTVR3(val)		MSR(val, 2, 1, c0, c11, 5)
+#define WSYS_ETMCONFIGR(val)		MSR(val, 2, 1, c0, c4, 0)
+#define WSYS_ETMEVENTCTL0R(val)		MSR(val, 2, 1, c0, c8, 0)
+#define WSYS_ETMEVENTCTL1R(val)		MSR(val, 2, 1, c0, c9, 0)
+#define WSYS_ETMEXTINSELR(val)		MSR(val, 2, 1, c0, c8, 4)
+#define WSYS_ETMIMSPEC0(val)		MSR(val, 2, 1, c0, c0, 7)
+#define WSYS_ETMOSLAR(val)		MSR(val, 2, 1, c1, c0, 4)
+#define WSYS_ETMPRGCTLR(val)		MSR(val, 2, 1, c0, c1, 0)
+#define WSYS_ETMRSCTLR10(val)		MSR(val, 2, 1, c1, c10, 0)
+#define WSYS_ETMRSCTLR11(val)		MSR(val, 2, 1, c1, c11, 0)
+#define WSYS_ETMRSCTLR12(val)		MSR(val, 2, 1, c1, c12, 0)
+#define WSYS_ETMRSCTLR13(val)		MSR(val, 2, 1, c1, c13, 0)
+#define WSYS_ETMRSCTLR14(val)		MSR(val, 2, 1, c1, c14, 0)
+#define WSYS_ETMRSCTLR15(val)		MSR(val, 2, 1, c1, c15, 0)
+#define WSYS_ETMRSCTLR2(val)		MSR(val, 2, 1, c1, c2, 0)
+#define WSYS_ETMRSCTLR3(val)		MSR(val, 2, 1, c1, c3, 0)
+#define WSYS_ETMRSCTLR4(val)		MSR(val, 2, 1, c1, c4, 0)
+#define WSYS_ETMRSCTLR5(val)		MSR(val, 2, 1, c1, c5, 0)
+#define WSYS_ETMRSCTLR6(val)		MSR(val, 2, 1, c1, c6, 0)
+#define WSYS_ETMRSCTLR7(val)		MSR(val, 2, 1, c1, c7, 0)
+#define WSYS_ETMRSCTLR8(val)		MSR(val, 2, 1, c1, c8, 0)
+#define WSYS_ETMRSCTLR9(val)		MSR(val, 2, 1, c1, c9, 0)
+#define WSYS_ETMRSCTLR16(val)		MSR(val, 2, 1, c1, c0, 1)
+#define WSYS_ETMRSCTLR17(val)		MSR(val, 2, 1, c1, c1, 1)
+#define WSYS_ETMRSCTLR18(val)		MSR(val, 2, 1, c1, c2, 1)
+#define WSYS_ETMRSCTLR19(val)		MSR(val, 2, 1, c1, c3, 1)
+#define WSYS_ETMRSCTLR20(val)		MSR(val, 2, 1, c1, c4, 1)
+#define WSYS_ETMRSCTLR21(val)		MSR(val, 2, 1, c1, c5, 1)
+#define WSYS_ETMRSCTLR22(val)		MSR(val, 2, 1, c1, c6, 1)
+#define WSYS_ETMRSCTLR23(val)		MSR(val, 2, 1, c1, c7, 1)
+#define WSYS_ETMRSCTLR24(val)		MSR(val, 2, 1, c1, c8, 1)
+#define WSYS_ETMRSCTLR25(val)		MSR(val, 2, 1, c1, c9, 1)
+#define WSYS_ETMRSCTLR26(val)		MSR(val, 2, 1, c1, c10, 1)
+#define WSYS_ETMRSCTLR27(val)		MSR(val, 2, 1, c1, c11, 1)
+#define WSYS_ETMRSCTLR28(val)		MSR(val, 2, 1, c1, c12, 1)
+#define WSYS_ETMRSCTLR29(val)		MSR(val, 2, 1, c1, c13, 1)
+#define WSYS_ETMRSCTLR30(val)		MSR(val, 2, 1, c1, c14, 1)
+#define WSYS_ETMRSCTLR31(val)		MSR(val, 2, 1, c1, c15, 1)
+#define WSYS_ETMSEQEVR0(val)		MSR(val, 2, 1, c0, c0, 4)
+#define WSYS_ETMSEQEVR1(val)		MSR(val, 2, 1, c0, c1, 4)
+#define WSYS_ETMSEQEVR2(val)		MSR(val, 2, 1, c0, c2, 4)
+#define WSYS_ETMSEQRSTEVR(val)		MSR(val, 2, 1, c0, c6, 4)
+#define WSYS_ETMSEQSTR(val)		MSR(val, 2, 1, c0, c7, 4)
+#define WSYS_ETMSTALLCTLR(val)		MSR(val, 2, 1, c0, c11, 0)
+#define WSYS_ETMSYNCPR(val)		MSR(val, 2, 1, c0, c13, 0)
+#define WSYS_ETMTRACEIDR(val)		MSR(val, 2, 1, c0, c0, 1)
+#define WSYS_ETMTSCTLR(val)		MSR(val, 2, 1, c0, c12, 0)
+#define WSYS_ETMVICTLR(val)		MSR(val, 2, 1, c0, c0, 2)
+#define WSYS_ETMVIIECTLR(val)		MSR(val, 2, 1, c0, c1, 2)
+#define WSYS_ETMVISSCTLR(val)		MSR(val, 2, 1, c0, c2, 2)
+#define WSYS_ETMVMIDCVR0(val)		MSR(val, 2, 1, c3, c0, 1)
+#define WSYS_ETMVMIDCVR1(val)		MSR(val, 2, 1, c3, c2, 1)
+#define WSYS_ETMVMIDCVR2(val)		MSR(val, 2, 1, c3, c4, 1)
+#define WSYS_ETMVMIDCVR3(val)		MSR(val, 2, 1, c3, c6, 1)
+#define WSYS_ETMVMIDCVR4(val)		MSR(val, 2, 1, c3, c8, 1)
+#define WSYS_ETMVMIDCVR5(val)		MSR(val, 2, 1, c3, c10, 1)
+#define WSYS_ETMVMIDCVR6(val)		MSR(val, 2, 1, c3, c12, 1)
+#define WSYS_ETMVMIDCVR7(val)		MSR(val, 2, 1, c3, c14, 1)
+#define WSYS_ETMDVCVR0(val)		MSR(val, 2, 1, c2, c0, 4)
+#define WSYS_ETMDVCVR1(val)		MSR(val, 2, 1, c2, c4, 4)
+#define WSYS_ETMDVCVR2(val)		MSR(val, 2, 1, c2, c8, 4)
+#define WSYS_ETMDVCVR3(val)		MSR(val, 2, 1, c2, c12, 4)
+#define WSYS_ETMDVCVR4(val)		MSR(val, 2, 1, c2, c0, 5)
+#define WSYS_ETMDVCVR5(val)		MSR(val, 2, 1, c2, c4, 5)
+#define WSYS_ETMDVCVR6(val)		MSR(val, 2, 1, c2, c8, 5)
+#define WSYS_ETMDVCVR7(val)		MSR(val, 2, 1, c2, c12, 5)
+#define WSYS_ETMDVCMR0(val)		MSR(val, 2, 1, c2, c0, 6)
+#define WSYS_ETMDVCMR1(val)		MSR(val, 2, 1, c2, c4, 6)
+#define WSYS_ETMDVCMR2(val)		MSR(val, 2, 1, c2, c8, 6)
+#define WSYS_ETMDVCMR3(val)		MSR(val, 2, 1, c2, c12, 6)
+#define WSYS_ETMDVCMR4(val)		MSR(val, 2, 1, c2, c0, 7)
+#define WSYS_ETMDVCMR5(val)		MSR(val, 2, 1, c2, c4, 7)
+#define WSYS_ETMDVCMR6(val)		MSR(val, 2, 1, c2, c8, 7)
+#define WSYS_ETMDVCMR7(val)		MSR(val, 2, 1, c2, c12, 7)
+#define WSYS_ETMSSCCR0(val)		MSR(val, 2, 1, c1, c0, 2)
+#define WSYS_ETMSSCCR1(val)		MSR(val, 2, 1, c1, c1, 2)
+#define WSYS_ETMSSCCR2(val)		MSR(val, 2, 1, c1, c2, 2)
+#define WSYS_ETMSSCCR3(val)		MSR(val, 2, 1, c1, c3, 2)
+#define WSYS_ETMSSCCR4(val)		MSR(val, 2, 1, c1, c4, 2)
+#define WSYS_ETMSSCCR5(val)		MSR(val, 2, 1, c1, c5, 2)
+#define WSYS_ETMSSCCR6(val)		MSR(val, 2, 1, c1, c6, 2)
+#define WSYS_ETMSSCCR7(val)		MSR(val, 2, 1, c1, c7, 2)
+#define WSYS_ETMSSCSR0(val)		MSR(val, 2, 1, c1, c8, 2)
+#define WSYS_ETMSSCSR1(val)		MSR(val, 2, 1, c1, c9, 2)
+#define WSYS_ETMSSCSR2(val)		MSR(val, 2, 1, c1, c10, 2)
+#define WSYS_ETMSSCSR3(val)		MSR(val, 2, 1, c1, c11, 2)
+#define WSYS_ETMSSCSR4(val)		MSR(val, 2, 1, c1, c12, 2)
+#define WSYS_ETMSSCSR5(val)		MSR(val, 2, 1, c1, c13, 2)
+#define WSYS_ETMSSCSR6(val)		MSR(val, 2, 1, c1, c14, 2)
+#define WSYS_ETMSSCSR7(val)		MSR(val, 2, 1, c1, c15, 2)
+#define WSYS_ETMSSPCICR0(val)		MSR(val, 2, 1, c1, c0, 3)
+#define WSYS_ETMSSPCICR1(val)		MSR(val, 2, 1, c1, c1, 3)
+#define WSYS_ETMSSPCICR2(val)		MSR(val, 2, 1, c1, c2, 3)
+#define WSYS_ETMSSPCICR3(val)		MSR(val, 2, 1, c1, c3, 3)
+#define WSYS_ETMSSPCICR4(val)		MSR(val, 2, 1, c1, c4, 3)
+#define WSYS_ETMSSPCICR5(val)		MSR(val, 2, 1, c1, c5, 3)
+#define WSYS_ETMSSPCICR6(val)		MSR(val, 2, 1, c1, c6, 3)
+#define WSYS_ETMSSPCICR7(val)		MSR(val, 2, 1, c1, c7, 3)
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/include/asm/gpio.h	2019-01-22 16:16:21.539228621 +0100
@@ -0,0 +1,32 @@
+#ifndef _ARCH_ARM64_GPIO_H
+#define _ARCH_ARM64_GPIO_H
+
+#if CONFIG_ARCH_NR_GPIO > 0
+#define ARCH_NR_GPIOS CONFIG_ARCH_NR_GPIO
+#endif
+
+/* not all ARM64 platforms necessarily support this API ... */
+#ifdef CONFIG_NEED_MACH_GPIO_H
+#include <mach/gpio.h>
+#endif
+
+#ifndef __ARM64_GPIOLIB_COMPLEX
+/* Note: this may rely upon the value of ARCH_NR_GPIOS set in mach/gpio.h */
+#include <asm-generic/gpio.h>
+
+/* The trivial gpiolib dispatchers */
+#define gpio_get_value  __gpio_get_value
+#define gpio_set_value  __gpio_set_value
+#define gpio_cansleep   __gpio_cansleep
+#endif
+
+/*
+ * Provide a default gpio_to_irq() which should satisfy every case.
+ * However, some platforms want to do this differently, so allow them
+ * to override it.
+ */
+#ifndef gpio_to_irq
+#define gpio_to_irq	__gpio_to_irq
+#endif
+
+#endif /* _ARCH_ARM64_GPIO_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/include/asm/stack_pointer.h	2019-01-22 16:16:21.543228657 +0100
@@ -0,0 +1,9 @@
+#ifndef __ASM_STACK_POINTER_H
+#define __ASM_STACK_POINTER_H
+
+/*
+ * how to get the current stack pointer from C
+ */
+register unsigned long current_stack_pointer asm ("sp");
+
+#endif /* __ASM_STACK_POINTER_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/kernel/perf_debug.c	2019-01-22 16:16:21.551228730 +0100
@@ -0,0 +1,73 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+
+/*
+ * Subsequent patches should add an entry to end of this string.
+ * Format is incrementing sequence number followed by text of
+ * patch commit title with newline.
+ * Note trailing ';' is on its own line to simplify addition of
+ * future strings.
+ */
+static char *descriptions =
+	" 0 arm64: perf: add debug patch logging framework\n"
+	" 1 Perf: arm64: Add L1 counters to tracepoints\n"
+	" 5 Perf: arm64: add perf user-mode permissions\n"
+	" 6 Perf: arm64: Add debugfs node to clear PMU\n"
+	" 7 Perf: arm64: Update PMU force reset\n"
+	"10 Perf: arm64: tracectr: initialize counts after hotplug\n"
+	"11 Perf: arm64: Refine disable/enable in tracecounters\n"
+	"15 Perf: arm64: make debug dir handle exportable\n"
+	"16 Perf: arm64: add perf trace user\n"
+	"17 Perf: arm64: add support for kryo pmu\n"
+;
+
+static ssize_t desc_read(struct file *fp, char __user *buf,
+			 size_t count, loff_t *pos)
+{
+	return simple_read_from_buffer(buf, count, pos, descriptions,
+				       strlen(descriptions));
+}
+
+static const struct file_operations perf_debug_desc_fops = {
+	.read = desc_read,
+};
+
+static int perf_debugfs_init(void)
+{
+	int ret = 0;
+	struct dentry *dir;
+	struct dentry *file;
+
+	dir = debugfs_create_dir("msm-perf-patches", NULL);
+	if (IS_ERR_OR_NULL(dir)) {
+		pr_err("failed to create msm-perf-patches dir in debugfs\n");
+		ret = PTR_ERR(dir);
+		goto init_exit;
+	}
+
+	file = debugfs_create_file("descriptions", 0444, dir, NULL,
+				   &perf_debug_desc_fops);
+	if (IS_ERR_OR_NULL(file)) {
+		debugfs_remove(dir);
+		pr_err("failed to create descriptions file for msm-perf-patches\n");
+		ret = PTR_ERR(file);
+		goto init_exit;
+	}
+
+init_exit:
+	return ret;
+}
+late_initcall(perf_debugfs_init);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/kernel/perf_trace_counters.c	2019-01-22 16:16:21.551228730 +0100
@@ -0,0 +1,180 @@
+/* Copyright (c) 2013-2014, 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/cpu.h>
+#include <linux/tracepoint.h>
+#include <trace/events/sched.h>
+#define CREATE_TRACE_POINTS
+#include "perf_trace_counters.h"
+
+static unsigned int tp_pid_state;
+
+DEFINE_PER_CPU(u32, cntenset_val);
+DEFINE_PER_CPU(u32, previous_ccnt);
+DEFINE_PER_CPU(u32[NUM_L1_CTRS], previous_l1_cnts);
+DEFINE_PER_CPU(u32, old_pid);
+DEFINE_PER_CPU(u32, hotplug_flag);
+
+static int tracectr_cpu_hotplug_notifier(struct notifier_block *self,
+					 unsigned long action, void *hcpu)
+{
+	unsigned long cpu = (unsigned long)hcpu;
+
+	if ((action & (~CPU_TASKS_FROZEN)) == CPU_STARTING)
+		per_cpu(hotplug_flag, cpu) = 1;
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block tracectr_cpu_hotplug_notifier_block = {
+	.notifier_call = tracectr_cpu_hotplug_notifier,
+};
+
+static void setup_prev_cnts(u32 cpu, u32 cnten_val)
+{
+	int i;
+
+	if (cnten_val & CC)
+		asm volatile("mrs %0, pmccntr_el0"
+			: "=r"(per_cpu(previous_ccnt, cpu)));
+
+	for (i = 0; i < NUM_L1_CTRS; i++) {
+		if (cnten_val & (1 << i)) {
+			/* Select */
+			asm volatile("msr pmselr_el0, %0" : : "r"(i));
+			isb();
+			/* Read value */
+			asm volatile("mrs %0, pmxevcntr_el0"
+				: "=r"(per_cpu(previous_l1_cnts[i], cpu)));
+		}
+	}
+}
+
+void tracectr_notifier(void *ignore, bool preempt,
+			struct task_struct *prev, struct task_struct *next)
+{
+	u32 cnten_val;
+	int current_pid;
+	u32 cpu = task_cpu(next);
+
+	if (tp_pid_state != 1)
+		return;
+	current_pid = next->pid;
+	if (per_cpu(old_pid, cpu) != -1) {
+		asm volatile("mrs %0, pmcntenset_el0" : "=r" (cnten_val));
+		per_cpu(cntenset_val, cpu) = cnten_val;
+		/* Disable all the counters that were enabled */
+		asm volatile("msr pmcntenclr_el0, %0" : : "r" (cnten_val));
+
+		if (per_cpu(hotplug_flag, cpu) == 1) {
+			per_cpu(hotplug_flag, cpu) = 0;
+			setup_prev_cnts(cpu, cnten_val);
+		} else {
+			trace_sched_switch_with_ctrs(per_cpu(old_pid, cpu),
+						     current_pid);
+		}
+
+		/* Enable all the counters that were disabled */
+		asm volatile("msr pmcntenset_el0, %0" : : "r" (cnten_val));
+	}
+	per_cpu(old_pid, cpu) = current_pid;
+}
+
+static void enable_tp_pid(void)
+{
+	if (tp_pid_state == 0) {
+		tp_pid_state = 1;
+		register_trace_sched_switch(tracectr_notifier, NULL);
+	}
+}
+
+static void disable_tp_pid(void)
+{
+	if (tp_pid_state == 1) {
+		tp_pid_state = 0;
+		unregister_trace_sched_switch(tracectr_notifier, NULL);
+	}
+}
+
+static ssize_t read_enabled_perftp_file_bool(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char buf[2];
+	buf[1] = '\n';
+	if (tp_pid_state == 0)
+		buf[0] = '0';
+	else
+		buf[0] = '1';
+	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t write_enabled_perftp_file_bool(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char buf[32];
+	size_t buf_size;
+
+	buf[0] = 0;
+	buf_size = min(count, (sizeof(buf)-1));
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	switch (buf[0]) {
+	case 'y':
+	case 'Y':
+	case '1':
+		enable_tp_pid();
+		break;
+	case 'n':
+	case 'N':
+	case '0':
+		disable_tp_pid();
+		break;
+	}
+
+	return count;
+}
+
+static const struct file_operations fops_perftp = {
+	.read =		read_enabled_perftp_file_bool,
+	.write =	write_enabled_perftp_file_bool,
+	.llseek =	default_llseek,
+};
+
+int __init init_tracecounters(void)
+{
+	struct dentry *dir;
+	struct dentry *file;
+	unsigned int value = 1;
+	int cpu;
+
+	dir = debugfs_create_dir("perf_debug_tp", NULL);
+	if (!dir)
+		return -ENOMEM;
+	file = debugfs_create_file("enabled", 0660, dir,
+		&value, &fops_perftp);
+	if (!file) {
+		debugfs_remove(dir);
+		return -ENOMEM;
+	}
+	for_each_possible_cpu(cpu)
+		per_cpu(old_pid, cpu) = -1;
+	register_cpu_notifier(&tracectr_cpu_hotplug_notifier_block);
+	return 0;
+}
+
+int __exit exit_tracecounters(void)
+{
+	unregister_cpu_notifier(&tracectr_cpu_hotplug_notifier_block);
+	return 0;
+}
+late_initcall(init_tracecounters);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/kernel/perf_trace_counters.h	2019-01-22 16:16:21.551228730 +0100
@@ -0,0 +1,111 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM perf_trace_counters
+
+#if !defined(_PERF_TRACE_COUNTERS_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _PERF_TRACE_COUNTERS_H_
+
+/* Ctr index for PMCNTENSET/CLR */
+#define CC 0x80000000
+#define C0 0x1
+#define C1 0x2
+#define C2 0x4
+#define C3 0x8
+#define C4 0x10
+#define C5 0x20
+#define C_ALL (CC | C0 | C1 | C2 | C3 | C4 | C5)
+#define NUM_L1_CTRS 6
+
+#include <linux/sched.h>
+#include <linux/cpumask.h>
+#include <linux/tracepoint.h>
+
+DECLARE_PER_CPU(u32, cntenset_val);
+DECLARE_PER_CPU(u32, previous_ccnt);
+DECLARE_PER_CPU(u32[NUM_L1_CTRS], previous_l1_cnts);
+TRACE_EVENT(sched_switch_with_ctrs,
+
+		TP_PROTO(pid_t prev, pid_t next),
+
+		TP_ARGS(prev, next),
+
+		TP_STRUCT__entry(
+			__field(pid_t,	old_pid)
+			__field(pid_t,	new_pid)
+			__field(u32, cctr)
+			__field(u32, ctr0)
+			__field(u32, ctr1)
+			__field(u32, ctr2)
+			__field(u32, ctr3)
+			__field(u32, ctr4)
+			__field(u32, ctr5)
+		),
+
+		TP_fast_assign(
+			u32 cpu = smp_processor_id();
+			u32 i;
+			u32 cnten_val;
+			u32 total_ccnt = 0;
+			u32 total_cnt = 0;
+			u32 delta_l1_cnts[NUM_L1_CTRS];
+			__entry->old_pid	= prev;
+			__entry->new_pid	= next;
+
+			cnten_val = per_cpu(cntenset_val, cpu);
+
+			if (cnten_val & CC) {
+				asm volatile("mrs %0, pmccntr_el0"
+							: "=r" (total_ccnt));
+				/* Read value */
+				__entry->cctr = total_ccnt -
+					per_cpu(previous_ccnt, cpu);
+				per_cpu(previous_ccnt, cpu) = total_ccnt;
+			}
+			for (i = 0; i < NUM_L1_CTRS; i++) {
+				if (cnten_val & (1 << i)) {
+					/* Select */
+					asm volatile("msr pmselr_el0, %0"
+							: : "r" (i));
+					isb();
+					asm volatile("mrs %0, pmxevcntr_el0"
+							: "=r" (total_cnt));
+					/* Read value */
+					delta_l1_cnts[i] = total_cnt -
+					  per_cpu(previous_l1_cnts[i], cpu);
+					per_cpu(previous_l1_cnts[i], cpu) =
+						total_cnt;
+				} else
+					delta_l1_cnts[i] = 0;
+			}
+
+			__entry->ctr0 = delta_l1_cnts[0];
+			__entry->ctr1 = delta_l1_cnts[1];
+			__entry->ctr2 = delta_l1_cnts[2];
+			__entry->ctr3 = delta_l1_cnts[3];
+			__entry->ctr4 = delta_l1_cnts[4];
+			__entry->ctr5 = delta_l1_cnts[5];
+		),
+
+		TP_printk("prev_pid=%d, next_pid=%d, CCNTR: %u, CTR0: %u, CTR1: %u, CTR2: %u, CTR3: %u, CTR4: %u, CTR5: %u",
+				__entry->old_pid, __entry->new_pid,
+				__entry->cctr, __entry->ctr0, __entry->ctr1,
+				__entry->ctr2, __entry->ctr3,
+				__entry->ctr4, __entry->ctr5)
+);
+
+#endif
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../arch/arm64/kernel
+#define TRACE_INCLUDE_FILE perf_trace_counters
+#include <trace/define_trace.h>
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/kernel/perf_trace_user.c	2019-01-22 16:16:21.551228730 +0100
@@ -0,0 +1,96 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/perf_event.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/preempt.h>
+#include <linux/stat.h>
+#include <asm/uaccess.h>
+
+#define CREATE_TRACE_POINTS
+#include "perf_trace_user.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM perf_trace_counters
+
+#define TRACE_USER_MAX_BUF_SIZE 100
+
+static ssize_t perf_trace_write(struct file *file,
+				const char __user *user_string_in,
+				size_t len, loff_t *ppos)
+{
+	u32 cnten_val;
+	int rc;
+	char buf[TRACE_USER_MAX_BUF_SIZE + 1];
+	ssize_t length;
+
+	if (len == 0)
+		return 0;
+
+	length = len > TRACE_USER_MAX_BUF_SIZE ? TRACE_USER_MAX_BUF_SIZE : len;
+
+	rc = copy_from_user(buf, user_string_in, length);
+	if (rc) {
+		pr_err("%s copy_from_user failed, rc=%d\n", __func__, rc);
+		return length;
+	}
+
+	/* Remove any trailing newline and make sure string is terminated */
+	if (buf[length - 1] == '\n')
+		buf[length - 1] = '\0';
+	else
+		buf[length] = '\0';
+
+	/*
+	 * Disable preemption to ensure that all the performance counter
+	 * accesses happen on the same cpu
+	 */
+	preempt_disable();
+	/* stop counters, call the trace function, restart them */
+
+	asm volatile("mrs %0, pmcntenset_el0" : "=r" (cnten_val));
+	/* Disable all the counters that were enabled */
+	asm volatile("msr pmcntenclr_el0, %0" : : "r" (cnten_val));
+
+	trace_perf_trace_user(buf, cnten_val);
+
+	/* Enable all the counters that were disabled */
+	asm volatile("msr pmcntenset_el0, %0" : : "r" (cnten_val));
+	preempt_enable();
+
+	return length;
+}
+
+static const struct file_operations perf_trace_fops = {
+	.write = perf_trace_write
+};
+
+static int __init init_perf_trace(void)
+{
+	struct dentry *dir;
+	struct dentry *file;
+	unsigned int value = 1;
+
+	dir = perf_create_debug_dir();
+	if (!dir)
+		return -ENOMEM;
+	file = debugfs_create_file("trace_marker", S_IWUSR | S_IWGRP, dir,
+		&value, &perf_trace_fops);
+	if (!file)
+		return -ENOMEM;
+
+	return 0;
+}
+
+late_initcall(init_perf_trace);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/kernel/perf_trace_user.h	2019-01-22 16:16:21.551228730 +0100
@@ -0,0 +1,85 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#if !defined(_PERF_TRACE_USER_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _PERF_TRACE_USER_H_
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM perf_trace_counters
+
+#include <linux/tracepoint.h>
+
+#define CNTENSET_CC    0x80000000
+#define NUM_L1_CTRS             4
+
+TRACE_EVENT(perf_trace_user,
+	TP_PROTO(char *string, u32 cnten_val),
+	TP_ARGS(string, cnten_val),
+
+	TP_STRUCT__entry(
+		__field(u32, cctr)
+		__field(u32, ctr0)
+		__field(u32, ctr1)
+		__field(u32, ctr2)
+		__field(u32, ctr3)
+		__field(u32, lctr0)
+		__field(u32, lctr1)
+		__string(user_string, string)
+		),
+
+	TP_fast_assign(
+		u32 cnt;
+		u32 l1_cnts[NUM_L1_CTRS];
+		int i;
+
+		if (cnten_val & CNTENSET_CC) {
+			/* Read value */
+			asm volatile("mrs %0, pmccntr_el0" : "=r" (cnt));
+			__entry->cctr = cnt;
+		} else
+			__entry->cctr = 0;
+		for (i = 0; i < NUM_L1_CTRS; i++) {
+			if (cnten_val & (1 << i)) {
+				/* Select */
+				asm volatile("msr pmselr_el0, %0"
+					     : : "r" (i));
+				isb();
+				/* Read value */
+				asm volatile("mrs %0, pmxevcntr_el0"
+					     : "=r" (cnt));
+				l1_cnts[i] = cnt;
+			} else {
+				l1_cnts[i] = 0;
+			}
+		}
+
+		__entry->ctr0 = l1_cnts[0];
+		__entry->ctr1 = l1_cnts[1];
+		__entry->ctr2 = l1_cnts[2];
+		__entry->ctr3 = l1_cnts[3];
+		__entry->lctr0 = 0;
+		__entry->lctr1 = 0;
+		__assign_str(user_string, string);
+		),
+
+		TP_printk("CCNTR: %u, CTR0: %u, CTR1: %u, CTR2: %u, CTR3: %u, L2CTR0: %u, L2CTR1: %u, MSG=%s",
+			  __entry->cctr, __entry->ctr0, __entry->ctr1,
+			  __entry->ctr2, __entry->ctr3,
+			  __entry->lctr0, __entry->lctr1,
+			  __get_str(user_string)
+			)
+	);
+
+#endif
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../arch/arm64/kernel
+#define TRACE_INCLUDE_FILE perf_trace_user
+#include <trace/define_trace.h>
diff -Nruw linux-4.4.115-fbx/arch/arm64/kernel/probes./Makefile linux-4.4.115-fbx/arch/arm64/kernel/probes/Makefile
--- linux-4.4.115-fbx/arch/arm64/kernel/probes./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kernel/probes/Makefile	2019-01-22 16:16:21.551228730 +0100
@@ -0,0 +1,3 @@
+obj-$(CONFIG_KPROBES)		+= kprobes.o decode-insn.o	\
+				   kprobes_trampoline.o		\
+				   simulate-insn.o
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/arm64/kernel/smccc-call.S	2019-01-22 16:16:21.555228766 +0100
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License Version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+
+	.macro SMCCC instr
+	.cfi_startproc
+	\instr	#0
+	ldr	x4, [sp]
+	stp	x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS]
+	stp	x2, x3, [x4, #ARM_SMCCC_RES_X2_OFFS]
+	ret
+	.cfi_endproc
+	.endm
+
+/*
+ * void arm_smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2,
+ *		  unsigned long a3, unsigned long a4, unsigned long a5,
+ *		  unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
+ */
+ENTRY(arm_smccc_smc)
+	SMCCC	smc
+ENDPROC(arm_smccc_smc)
+
+/*
+ * void arm_smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
+ *		  unsigned long a3, unsigned long a4, unsigned long a5,
+ *		  unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
+ */
+ENTRY(arm_smccc_hvc)
+	SMCCC	hvc
+ENDPROC(arm_smccc_hvc)
diff -Nruw linux-4.4.115-fbx/arch/arm64/kvm/hyp./Makefile linux-4.4.115-fbx/arch/arm64/kvm/hyp/Makefile
--- linux-4.4.115-fbx/arch/arm64/kvm/hyp./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/arm64/kvm/hyp/Makefile	2019-01-22 16:16:21.555228766 +0100
@@ -0,0 +1,14 @@
+#
+# Makefile for Kernel-based Virtual Machine module, HYP part
+#
+
+obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += vgic-v3-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += entry.o
+obj-$(CONFIG_KVM_ARM_HOST) += switch.o
+obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o
+obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
+obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/cris/boot/dts/include/dt-bindings/clock/audio-ext-clk.h	2019-01-22 16:16:28.167288642 +0100
@@ -0,0 +1,30 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __AUDIO_EXT_CLK_H
+#define __AUDIO_EXT_CLK_H
+
+/* Audio External Clocks */
+#define AUDIO_PMI_CLK		0
+#define AUDIO_PMIC_LNBB_CLK	0
+#define AUDIO_AP_CLK		1
+#define AUDIO_AP_CLK2		2
+#define AUDIO_LPASS_MCLK	3
+#define AUDIO_LPASS_MCLK2	4
+
+#define clk_audio_ap_clk        0x9b5727cb
+#define clk_audio_pmi_clk       0xcbfe416d
+#define clk_audio_ap_clk2       0x454d1e91
+#define clk_audio_lpass_mclk    0xf0f2a284
+#define clk_audio_pmi_lnbb_clk   0x57312343
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/cris/boot/dts/include/dt-bindings/clock/msm-clocks-8996.h	2019-01-22 16:16:28.171288678 +0100
@@ -0,0 +1,574 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CLOCKS_8996_H
+#define __MSM_CLOCKS_8996_H
+
+#include "audio-ext-clk.h"
+
+/* clock_gcc controlled clocks */
+#define clk_cxo_clk_src			0x79e95308
+#define clk_pnoc_clk			0x4325d220
+#define clk_pnoc_a_clk			0x2808c12b
+#define clk_bimc_clk			0x4b80bf00
+#define clk_bimc_a_clk			0x4b25668a
+#define clk_cnoc_clk			0xd5ccb7f4
+#define clk_cnoc_a_clk			0xd8fe2ccc
+#define clk_snoc_clk			0x2c341aa0
+#define clk_snoc_a_clk			0x8fcef2af
+#define clk_bb_clk1			0xf5304268
+#define clk_bb_clk1_ao			0xfa113810
+#define clk_bb_clk1_pin			0x6dd0a779
+#define clk_bb_clk1_pin_ao		0x9b637772
+#define clk_bb_clk2			0xfe15cb87
+#define clk_bb_clk2_ao			0x59682706
+#define clk_bb_clk2_pin			0x498938e5
+#define clk_bb_clk2_pin_ao		0x52513787
+#define clk_bimc_msmbus_clk		0xd212feea
+#define clk_bimc_msmbus_a_clk		0x71d1a499
+#define clk_ce1_a_clk			0x44a833fe
+#define clk_cnoc_msmbus_clk		0x62228b5d
+#define clk_cnoc_msmbus_a_clk		0x67442955
+#define clk_cxo_clk_src_ao		0x64eb6004
+#define clk_cxo_dwc3_clk		0xf79c19f6
+#define clk_cxo_lpm_clk			0x94adbf3d
+#define clk_cxo_otg_clk			0x4eec0bb9
+#define clk_cxo_pil_lpass_clk		0xe17f0ff6
+#define clk_cxo_pil_ssc_clk		0x81832015
+#define clk_div_clk1			0xaa1157a6
+#define clk_div_clk1_ao			0x6b943d68
+#define clk_div_clk2			0xd454019f
+#define clk_div_clk2_ao			0x53f9e788
+#define clk_div_clk3			0xa9a55a68
+#define clk_div_clk3_ao			0x3d6725a8
+#define clk_ipa_a_clk			0xeeec2919
+#define clk_ipa_clk			0xfa685cda
+#define clk_ln_bb_clk			0x3ab0b36d
+#define clk_ln_bb_a_clk			0xc7257ea8
+#define clk_ln_bb_clk_pin		0x1b1c476a
+#define clk_ln_bb_a_clk_pin		0x9cbb5411
+#define clk_mcd_ce1_clk			0xbb615d26
+#define clk_pnoc_keepalive_a_clk	0xf8f91f0b
+#define clk_pnoc_msmbus_clk		0x38b95c77
+#define clk_pnoc_msmbus_a_clk		0x8c9b4e93
+#define clk_pnoc_pm_clk			0xd6f7dfb9
+#define clk_pnoc_sps_clk		0xd482ecc7
+#define clk_qdss_a_clk			0xdd121669
+#define clk_qdss_clk			0x1492202a
+#define clk_rf_clk1			0xaabeea5a
+#define clk_rf_clk1_ao			0x72a10cb8
+#define clk_rf_clk1_pin			0x8f463562
+#define clk_rf_clk1_pin_ao		0x62549ff6
+#define clk_rf_clk2			0x24a30992
+#define clk_rf_clk2_ao			0x944d8bbd
+#define clk_rf_clk2_pin			0xa7c5602a
+#define clk_rf_clk2_pin_ao		0x2d75eb4d
+#define clk_snoc_msmbus_clk		0xe6900bb6
+#define clk_snoc_msmbus_a_clk		0x5d4683bd
+#define clk_mcd_ce1_clk			0xbb615d26
+#define clk_qcedev_ce1_clk		0x293f97b0
+#define clk_qcrypto_ce1_clk		0xa6ac14df
+#define clk_qseecom_ce1_clk		0xaa858373
+#define clk_scm_ce1_clk			0xd8ebcc62
+#define clk_ce1_clk			0x42229c55
+#define clk_gcc_ce1_ahb_m_clk		0x2eb28c01
+#define clk_gcc_ce1_axi_m_clk		0xc174dfba
+#define clk_measure_only_bimc_hmss_axi_clk	0xc1cc4f11
+#define clk_aggre1_noc_clk		0x049abba8
+#define clk_aggre1_noc_a_clk		0xc12e4220
+#define clk_aggre2_noc_clk		0xaa681404
+#define clk_aggre2_noc_a_clk		0xcab67089
+#define clk_mmssnoc_axi_rpm_clk		0x4d7f8cdc
+#define clk_mmssnoc_axi_rpm_a_clk	0xfbea899b
+#define clk_mmssnoc_axi_clk		0xdb4b31e6
+#define clk_mmssnoc_axi_a_clk		0xd4970614
+#define clk_mmssnoc_gds_clk		0x06a22afa
+
+#define clk_gpll0			0x1ebe3bc4
+#define clk_gpll0_ao			0xa1368304
+#define clk_gpll0_out_main		0xe9374de7
+#define clk_gpll4			0xb3b5d85b
+#define clk_gpll4_out_main		0xa9a0ab9d
+#define clk_ufs_axi_clk_src		0x297ca380
+#define clk_pcie_aux_clk_src		0xebc50566
+#define clk_usb30_master_clk_src	0xc6262f89
+#define clk_usb20_master_clk_src	0x5680ac83
+#define clk_ufs_ice_core_clk_src	0xda8e7119
+#define clk_blsp1_qup1_i2c_apps_clk_src 0x17f78f5e
+#define clk_blsp1_qup1_spi_apps_clk_src 0xf534c4fa
+#define clk_blsp1_qup2_i2c_apps_clk_src 0x8de71c79
+#define clk_blsp1_qup2_spi_apps_clk_src 0x33cf809a
+#define clk_blsp1_qup3_i2c_apps_clk_src 0xf161b902
+#define clk_blsp1_qup3_spi_apps_clk_src 0x5e95683f
+#define clk_blsp1_qup4_i2c_apps_clk_src 0xb2ecce68
+#define clk_blsp1_qup4_spi_apps_clk_src 0xddb5bbdb
+#define clk_blsp1_qup5_i2c_apps_clk_src 0x71ea7804
+#define clk_blsp1_qup5_spi_apps_clk_src 0x9752f35f
+#define clk_blsp1_qup6_i2c_apps_clk_src 0x28806803
+#define clk_blsp1_qup6_spi_apps_clk_src 0x44a1edc4
+#define clk_blsp1_uart1_apps_clk_src	0xf8146114
+#define clk_blsp1_uart2_apps_clk_src	0xfc9c2f73
+#define clk_blsp1_uart3_apps_clk_src	0x600497f2
+#define clk_blsp1_uart4_apps_clk_src	0x56bff15c
+#define clk_blsp1_uart5_apps_clk_src	0x218ef697
+#define clk_blsp1_uart6_apps_clk_src	0x8fbdbe4c
+#define clk_blsp2_qup1_i2c_apps_clk_src 0xd6d1e95d
+#define clk_blsp2_qup1_spi_apps_clk_src 0xcc1b8365
+#define clk_blsp2_qup2_i2c_apps_clk_src 0x603b5c51
+#define clk_blsp2_qup2_spi_apps_clk_src 0xd577dc44
+#define clk_blsp2_qup3_i2c_apps_clk_src 0xea82959c
+#define clk_blsp2_qup3_spi_apps_clk_src 0xd04b1e92
+#define clk_blsp2_qup4_i2c_apps_clk_src 0x73dc968c
+#define clk_blsp2_qup4_spi_apps_clk_src 0x25d4a2b1
+#define clk_blsp2_qup5_i2c_apps_clk_src 0xcc3698bd
+#define clk_blsp2_qup5_spi_apps_clk_src 0xfa0cf45e
+#define clk_blsp2_qup6_i2c_apps_clk_src 0x2fa53151
+#define clk_blsp2_qup6_spi_apps_clk_src 0x5ca86755
+#define clk_blsp2_uart1_apps_clk_src	0x562c66dc
+#define clk_blsp2_uart2_apps_clk_src	0xdd448080
+#define clk_blsp2_uart3_apps_clk_src	0x46b2e90f
+#define clk_blsp2_uart4_apps_clk_src	0x23a093d2
+#define clk_blsp2_uart5_apps_clk_src	0xe067616a
+#define clk_blsp2_uart6_apps_clk_src	0xe02d2829
+#define clk_gp1_clk_src			0xad85b97a
+#define clk_gp2_clk_src			0xfb1f0065
+#define clk_gp3_clk_src			0x63b693d6
+#define clk_hmss_rbcpr_clk_src		0xedd9a474
+#define clk_pdm2_clk_src		0x31e494fd
+#define clk_sdcc1_apps_clk_src		0xd4975db2
+#define clk_sdcc2_apps_clk_src		0xfc46c821
+#define clk_sdcc3_apps_clk_src		0xea34c7f4
+#define clk_sdcc4_apps_clk_src		0x7aaaaa0c
+#define clk_tsif_ref_clk_src		0x4e9042d1
+#define clk_usb20_mock_utmi_clk_src	0xc3aaeecb
+#define clk_usb30_mock_utmi_clk_src	0xa024a976
+#define clk_usb3_phy_aux_clk_src	0x15eec63c
+#define clk_gcc_qusb2phy_prim_reset	0x07550fa1
+#define clk_gcc_qusb2phy_sec_reset	0x3f3a87d0
+#define clk_gcc_periph_noc_usb20_ahb_clk	0xfb9f26e9
+#define clk_gcc_mmss_gcc_dbg_clk	0xe89d461c
+#define clk_cpu_dbg_clk			0x6550dfa9
+#define clk_gcc_blsp1_ahb_clk		0x8caa5b4f
+#define clk_gcc_blsp1_qup1_i2c_apps_clk 0xc303fae9
+#define clk_gcc_blsp1_qup1_spi_apps_clk 0x759a76b0
+#define clk_gcc_blsp1_qup2_i2c_apps_clk 0x1076f220
+#define clk_gcc_blsp1_qup2_spi_apps_clk 0x3e77d48f
+#define clk_gcc_blsp1_qup3_i2c_apps_clk 0x9e25ac82
+#define clk_gcc_blsp1_qup3_spi_apps_clk 0xfb978880
+#define clk_gcc_blsp1_qup4_i2c_apps_clk 0xd7f40f6f
+#define clk_gcc_blsp1_qup4_spi_apps_clk 0x80f8722f
+#define clk_gcc_blsp1_qup5_i2c_apps_clk 0xacae5604
+#define clk_gcc_blsp1_qup5_spi_apps_clk 0xbf3e15d7
+#define clk_gcc_blsp1_qup6_i2c_apps_clk 0x5c6ad820
+#define clk_gcc_blsp1_qup6_spi_apps_clk 0x780d9f85
+#define clk_gcc_blsp1_uart1_apps_clk	0xc7c62f90
+#define clk_gcc_blsp1_uart2_apps_clk	0xf8a61c96
+#define clk_gcc_blsp1_uart3_apps_clk	0xc3298bd7
+#define clk_gcc_blsp1_uart4_apps_clk	0x26be16c0
+#define clk_gcc_blsp1_uart5_apps_clk	0x28a6bc74
+#define clk_gcc_blsp1_uart6_apps_clk	0x28fd3466
+#define clk_gcc_blsp2_ahb_clk		0x8f283c1d
+#define clk_gcc_blsp2_qup1_i2c_apps_clk 0x9ace11dd
+#define clk_gcc_blsp2_qup1_spi_apps_clk 0xa32604cc
+#define clk_gcc_blsp2_qup2_i2c_apps_clk 0x1bf9a57e
+#define clk_gcc_blsp2_qup2_spi_apps_clk 0xbf54ca6d
+#define clk_gcc_blsp2_qup3_i2c_apps_clk 0x336d4170
+#define clk_gcc_blsp2_qup3_spi_apps_clk 0xc68509d6
+#define clk_gcc_blsp2_qup4_i2c_apps_clk 0xbd22539d
+#define clk_gcc_blsp2_qup4_spi_apps_clk 0x01a72b93
+#define clk_gcc_blsp2_qup5_i2c_apps_clk 0xe2b2ce1d
+#define clk_gcc_blsp2_qup5_spi_apps_clk 0xf40999cd
+#define clk_gcc_blsp2_qup6_i2c_apps_clk 0x894bcea4
+#define clk_gcc_blsp2_qup6_spi_apps_clk 0xfe1bd34a
+#define clk_gcc_blsp2_uart1_apps_clk	0x8c3512ff
+#define clk_gcc_blsp2_uart2_apps_clk	0x1e1965a3
+#define clk_gcc_blsp2_uart3_apps_clk	0x382415ab
+#define clk_gcc_blsp2_uart4_apps_clk	0x87a44b42
+#define clk_gcc_blsp2_uart5_apps_clk	0x5cd30649
+#define clk_gcc_blsp2_uart6_apps_clk	0x8feee5ab
+#define clk_gcc_boot_rom_ahb_clk	0xde2adeb1
+#define clk_gcc_gp1_clk			0x057f7b69
+#define clk_gcc_gp2_clk			0x9bf83ffd
+#define clk_gcc_gp3_clk			0xec6539ee
+#define clk_gcc_hmss_rbcpr_clk		0x699183be
+#define clk_gcc_mmss_noc_cfg_ahb_clk	0xb41a9d99
+#define clk_gcc_pcie_0_aux_clk		0x3d2e3ece
+#define clk_gcc_pcie_0_cfg_ahb_clk	0x4dd325c3
+#define clk_gcc_pcie_0_mstr_axi_clk	0x3f85285b
+#define clk_gcc_pcie_0_slv_axi_clk	0xd69638a1
+#define clk_gcc_pcie_0_pipe_clk		0x4f37621e
+#define clk_gcc_pcie_0_phy_reset	0xdc3201c1
+#define clk_gcc_pcie_1_aux_clk		0xc9bb962c
+#define clk_gcc_pcie_1_cfg_ahb_clk	0xb6338658
+#define clk_gcc_pcie_1_mstr_axi_clk	0xc20f6269
+#define clk_gcc_pcie_1_slv_axi_clk	0xd54e40d6
+#define clk_gcc_pcie_1_pipe_clk		0xc1627422
+#define clk_gcc_pcie_1_phy_reset	0x674481bb
+#define clk_gcc_pcie_2_aux_clk		0xa4dc7ae8
+#define clk_gcc_pcie_2_cfg_ahb_clk	0x4f1d3121
+#define clk_gcc_pcie_2_mstr_axi_clk	0x9e81724a
+#define clk_gcc_pcie_2_slv_axi_clk	0x7990d8b2
+#define clk_gcc_pcie_2_pipe_clk		0xa757a834
+#define clk_gcc_pcie_2_phy_reset	0x82634880
+#define clk_gcc_pcie_phy_reset		0x9bc3c959
+#define clk_gcc_pcie_phy_com_reset	0x8bf513e6
+#define clk_gcc_pcie_phy_nocsr_com_phy_reset	0x0c16a2da
+#define clk_gcc_pcie_phy_aux_clk	0x4746e74f
+#define clk_gcc_pcie_phy_cfg_ahb_clk	0x8533671a
+#define clk_gcc_pdm2_clk		0x99d55711
+#define clk_gcc_pdm_ahb_clk		0x365664f6
+#define clk_gcc_prng_ahb_clk		0x397e7eaa
+#define clk_gcc_sdcc1_ahb_clk		0x691e0caa
+#define clk_gcc_sdcc1_apps_clk		0x9ad6fb96
+#define clk_gcc_sdcc2_ahb_clk		0x23d5727f
+#define clk_gcc_sdcc2_apps_clk		0x861b20ac
+#define clk_gcc_sdcc3_ahb_clk		0x565b2c03
+#define clk_gcc_sdcc3_apps_clk		0x0b27aeac
+#define clk_gcc_sdcc4_ahb_clk		0x64f3e6a8
+#define clk_gcc_sdcc4_apps_clk		0xbf7c4dc8
+#define clk_gcc_tsif_ahb_clk		0x88d2822c
+#define clk_gcc_tsif_ref_clk		0x8f1ed2c2
+#define clk_gcc_ufs_ahb_clk		0x1914bb84
+#define clk_gcc_ufs_axi_clk		0x47c743a7
+#define clk_gcc_ufs_ice_core_clk	0x310b0710
+#define clk_gcc_ufs_rx_cfg_clk		0xa6747786
+#define clk_gcc_ufs_rx_symbol_0_clk	0x7f43251c
+#define clk_gcc_ufs_rx_symbol_1_clk	0x03182fde
+#define clk_gcc_ufs_tx_cfg_clk		0xba2cf8b5
+#define clk_gcc_ufs_tx_symbol_0_clk	0x6a9f747a
+#define clk_gcc_ufs_unipro_core_clk	0x2daf7fd2
+#define clk_gcc_ufs_sys_clk_core_clk	0x360e5ac8
+#define clk_gcc_ufs_tx_symbol_clk_core_clk	0xf6fb0df7
+#define clk_gcc_usb20_master_clk	0x24c3b66a
+#define clk_gcc_usb20_mock_utmi_clk	0xe8db8203
+#define clk_gcc_usb20_sleep_clk		0x6e8cb4b2
+#define clk_gcc_usb30_master_clk	0xb3b4e2cb
+#define clk_gcc_usb30_mock_utmi_clk	0xa800b65a
+#define clk_gcc_usb30_sleep_clk		0xd0b65c92
+#define clk_gcc_usb3_phy_aux_clk	0x0d9a36e0
+#define clk_gcc_usb3_phy_pipe_clk	0xf279aff2
+#define clk_gcc_usb_phy_cfg_ahb2phy_clk	0xd1231a0e
+#define clk_gcc_aggre0_cnoc_ahb_clk	0x53a35559
+#define clk_gcc_aggre0_snoc_axi_clk	0x3c446400
+#define clk_gcc_aggre0_noc_qosgen_extref_clk	0x8c4356ba
+#define clk_hlos1_vote_lpass_core_smmu_clk	0x3aaa1743
+#define clk_hlos1_vote_lpass_adsp_smmu_clk	0xc76f702f
+#define clk_gcc_usb3_phy_reset		0x03d559f1
+#define clk_gcc_usb3phy_phy_reset	0xb1a4f885
+#define clk_gcc_usb3_clkref_clk		0xb6cc8f01
+#define clk_gcc_hdmi_clkref_clk		0x4d4eec04
+#define clk_gcc_edp_clkref_clk		0xa8685c3f
+#define clk_gcc_ufs_clkref_clk		0x92aa126f
+#define clk_gcc_pcie_clkref_clk		0xa2e247fa
+#define clk_gcc_rx2_usb2_clkref_clk	0x27ec24ba
+#define clk_gcc_rx1_usb2_clkref_clk	0x53351d25
+#define clk_gcc_smmu_aggre0_ahb_clk	0x47a06ce4
+#define clk_gcc_smmu_aggre0_axi_clk	0x3cac4a6c
+#define clk_gcc_sys_noc_usb3_axi_clk	0x94d26800
+#define clk_gcc_sys_noc_ufs_axi_clk	0x19d38312
+#define clk_gcc_aggre2_usb3_axi_clk	0xd5822a8e
+#define clk_gcc_aggre2_ufs_axi_clk	0xb31e5191
+#define clk_gcc_mmss_gpll0_div_clk	0xdd06848d
+#define clk_gcc_mmss_bimc_gfx_clk	0xe4f28754
+#define clk_gcc_bimc_gfx_clk		0x3edd69ad
+#define clk_gcc_qspi_ahb_clk		0x96969dc8
+#define clk_gcc_qspi_ser_clk		0xfaf1e266
+#define clk_qspi_ser_clk_src		0x426676ee
+#define clk_sdcc1_ice_core_clk_src	0xfd6a4301
+#define clk_gcc_sdcc1_ice_core_clk	0x0fd5680a
+#define clk_gcc_mss_cfg_ahb_clk		0x111cde81
+#define clk_gcc_mss_snoc_axi_clk	0x0e71de85
+#define clk_gcc_mss_q6_bimc_axi_clk	0x67544d62
+#define clk_gcc_mss_mnoc_bimc_axi_clk	0xf665d03f
+#define clk_gpll0_out_msscc		0x7d794829
+#define clk_gcc_debug_mux_v2		0xf7e749f0
+#define clk_gcc_dcc_ahb_clk		0xfa14a88c
+#define clk_gcc_aggre0_noc_mpu_cfg_ahb_clk	0x5c1bb8e2
+
+/* clock_mmss controlled clocks */
+#define clk_mmsscc_xo			0x05e63704
+#define clk_mmsscc_gpll0		0xe900c515
+#define clk_mmsscc_gpll0_div		0x73892e05
+#define clk_mmsscc_mmssnoc_ahb		0x7b4bd6f7
+#define clk_mmpll0			0xdd83b751
+#define clk_mmpll0_out_main		0x2f996a31
+#define clk_mmpll1			0x6da7fb90
+#define clk_mmpll1_out_main		0xa0d3a7da
+#define clk_mmpll4			0x22c063c1
+#define clk_mmpll4_out_main		0xfb21c2fd
+#define clk_mmpll3			0x18c76899
+#define clk_mmpll3_out_main		0x6eb6328f
+#define clk_ahb_clk_src			0x86f49203
+#define clk_mmpll2			0x1190e4d8
+#define clk_mmpll2_out_main		0x1e9e24a8
+#define clk_mmpll8			0xd06ad45e
+#define clk_mmpll8_out_main		0x75b1f386
+#define clk_mmpll9			0x1c50684c
+#define clk_mmpll9_out_main		0x16b74937
+#define clk_mmpll5			0xa41e1936
+#define clk_mmpll5_out_main		0xcc1897bf
+#define clk_csi0_clk_src		0x227e65bc
+#define clk_vfe0_clk_src		0xa0c2bd8f
+#define clk_vfe1_clk_src		0x4e357366
+#define clk_csi1_clk_src		0x6a2a6c36
+#define clk_csi2_clk_src		0x4113589f
+#define clk_csi3_clk_src		0xfd934012
+#define clk_maxi_clk_src		0x52c09777
+#define clk_cpp_clk_src			0x8382f56d
+#define clk_jpeg0_clk_src		0x9a0a0ac3
+#define clk_jpeg2_clk_src		0x5ad927f3
+#define clk_jpeg_dma_clk_src		0xb68afcea
+#define clk_mdp_clk_src			0x6dc1f8f1
+#define clk_video_core_clk_src		0x8be4c944
+#define clk_fd_core_clk_src		0xe4799ab7
+#define clk_cci_clk_src			0x822f3d97
+#define clk_csiphy0_3p_clk_src		0xd2474b12
+#define clk_csiphy1_3p_clk_src		0x46a02aff
+#define clk_csiphy2_3p_clk_src		0x1447813f
+#define clk_camss_gp0_clk_src		0x6b57cfe6
+#define clk_camss_gp1_clk_src		0xf735368a
+#define clk_jpeg_dma_clk_src		0xb68afcea
+#define clk_mclk0_clk_src		0x266b3853
+#define clk_mclk1_clk_src		0xa73cad0c
+#define clk_mclk2_clk_src		0x42545468
+#define clk_mclk3_clk_src		0x2bfbb714
+#define clk_csi0phytimer_clk_src	0xc8a309be
+#define clk_csi1phytimer_clk_src	0x7c0fe23a
+#define clk_csi2phytimer_clk_src	0x62ffea9c
+#define clk_rbbmtimer_clk_src		0x17649ecc
+#define clk_esc0_clk_src		0xb41d7c38
+#define clk_esc1_clk_src		0x3b0afa42
+#define clk_hdmi_clk_src		0xb40aeea9
+#define clk_vsync_clk_src		0xecb43940
+#define clk_rbcpr_clk_src		0x2c2e9af2
+#define clk_video_subcore0_clk_src	0x88d79636
+#define clk_video_subcore1_clk_src	0x4966930c
+#define clk_mmss_bto_ahb_clk		0xfdf8c361
+#define clk_camss_ahb_clk		0xc4ff91d4
+#define clk_camss_cci_ahb_clk		0x04c4441a
+#define clk_camss_cci_clk		0xd6cb5eb9
+#define clk_camss_cpp_ahb_clk		0x12e9a87b
+#define clk_camss_cpp_clk		0xb82f366b
+#define clk_camss_cpp_axi_clk		0x5598c804
+#define clk_camss_cpp_vbif_ahb_clk	0xb5f31be4
+#define clk_camss_csi0_ahb_clk		0x6e29c972
+#define clk_camss_csi0_clk		0x30862ddb
+#define clk_camss_csi0phy_clk		0x2cecfb84
+#define clk_camss_csi0pix_clk		0x6946f77b
+#define clk_camss_csi0rdi_clk		0x83645ef5
+#define clk_camss_csi1_ahb_clk		0xccc15f06
+#define clk_camss_csi1_clk		0xb150f052
+#define clk_camss_csi1phy_clk		0xb989f06d
+#define clk_camss_csi1pix_clk		0x58d19bf3
+#define clk_camss_csi1rdi_clk		0x4d2f3352
+#define clk_camss_csi2_ahb_clk		0x92d02d75
+#define clk_camss_csi2_clk		0x74fc92e8
+#define clk_camss_csi2phy_clk		0xda05d9d8
+#define clk_camss_csi2pix_clk		0xf8ed0731
+#define clk_camss_csi2rdi_clk		0xdc1b2081
+#define clk_camss_csi3_ahb_clk		0xee5e459c
+#define clk_camss_csi3_clk		0x39488fdd
+#define clk_camss_csi3phy_clk		0x8b6063b9
+#define clk_camss_csi3pix_clk		0xd82bd467
+#define clk_camss_csi3rdi_clk		0xb6750046
+#define clk_camss_csi_vfe0_clk		0x3023937a
+#define clk_camss_csi_vfe1_clk		0xe66fa522
+#define clk_camss_csiphy0_3p_clk	0xf2a54f5a
+#define clk_camss_csiphy1_3p_clk	0x8bf70cb2
+#define clk_camss_csiphy2_3p_clk	0x1c14c939
+#define clk_camss_gp0_clk		0xcee7e51d
+#define clk_camss_gp1_clk		0x41f1c2e3
+#define clk_camss_ispif_ahb_clk		0x9a212c6d
+#define clk_camss_jpeg0_clk		0x0b0e2db7
+#define clk_camss_jpeg2_clk		0xd7291c8d
+#define clk_camss_jpeg_ahb_clk		0x1f47fd28
+#define clk_camss_jpeg_axi_clk		0x9e5545c8
+#define clk_camss_jpeg_dma_clk		0x2336e65d
+#define clk_camss_mclk0_clk		0xcf0c61e0
+#define clk_camss_mclk1_clk		0xd1410ed4
+#define clk_camss_mclk2_clk		0x851286f2
+#define clk_camss_mclk3_clk		0x4db11c45
+#define clk_camss_micro_ahb_clk		0x33a23277
+#define clk_camss_csi0phytimer_clk	0xff93b3c8
+#define clk_camss_csi1phytimer_clk	0x6c399ab6
+#define clk_camss_csi2phytimer_clk	0x24f47f49
+#define clk_camss_top_ahb_clk		0x8f8b2d33
+#define clk_camss_vfe_ahb_clk		0x595197bc
+#define clk_camss_vfe_axi_clk		0x273d4c31
+#define clk_camss_vfe0_ahb_clk		0x4652833c
+#define clk_camss_vfe0_clk		0x1e9bb8c4
+#define clk_camss_vfe0_stream_clk	0x22835fa4
+#define clk_camss_vfe1_ahb_clk		0x6a56abd3
+#define clk_camss_vfe1_clk		0x5bffa69b
+#define clk_camss_vfe1_stream_clk	0x92f849b9
+#define clk_fd_ahb_clk			0x868a2c5c
+#define clk_fd_core_clk			0x3badcae4
+#define clk_fd_core_uar_clk		0x7e624e15
+#define clk_gpu_ahb_clk			0xf97f1d43
+#define clk_gpu_aon_isense_clk		0xa9e9b297
+#define clk_gpu_gx_gfx3d_clk		0xb7ece823
+#define clk_gpu_mx_clk			0xb80ccedf
+#define clk_gpu_gx_rbbmtimer_clk	0xdeba634e
+#define clk_mdss_ahb_clk		0x684ccb41
+#define clk_mdss_axi_clk		0xcc07d687
+#define clk_mdss_esc0_clk		0x28cafbe6
+#define clk_mdss_esc1_clk		0xc22c6883
+#define clk_mdss_hdmi_ahb_clk		0x01cef516
+#define clk_mdss_hdmi_clk		0x097a6de9
+#define clk_mdss_mdp_clk		0x618336ac
+#define clk_mdss_vsync_clk		0x42a022d3
+#define clk_mmss_misc_ahb_clk		0xea30b0e7
+#define clk_mmss_misc_cxo_clk		0xe620cd80
+#define clk_mmagic_bimc_noc_cfg_ahb_clk 0x12d5ba72
+#define clk_mmagic_camss_axi_clk	0xa8b1c16b
+#define clk_mmagic_camss_noc_cfg_ahb_clk 0x5182c819
+#define clk_mmss_mmagic_cfg_ahb_clk	0x5e94a822
+#define clk_mmagic_mdss_axi_clk		0xa0359d10
+#define clk_mmagic_mdss_noc_cfg_ahb_clk 0x9c6d5482
+#define clk_mmagic_video_axi_clk	0x7b9219c3
+#define clk_mmagic_video_noc_cfg_ahb_clk 0x5124d256
+#define clk_mmss_mmagic_ahb_clk		0x3d15f2b0
+#define clk_mmss_mmagic_maxi_clk	0xbdaf5af7
+#define clk_mmss_rbcpr_ahb_clk		0x623ba55f
+#define clk_mmss_rbcpr_clk		0x69a23a6f
+#define clk_mmss_spdm_cpp_clk		0xefe35cd2
+#define clk_mmss_spdm_jpeg_dma_clk	0xcb7bd5a0
+#define clk_smmu_cpp_ahb_clk		0x3ad82d84
+#define clk_smmu_cpp_axi_clk		0xa6bb2f4a
+#define clk_smmu_jpeg_ahb_clk		0x10c436ec
+#define clk_smmu_jpeg_axi_clk		0x41112f37
+#define clk_smmu_mdp_ahb_clk		0x04994cb2
+#define clk_smmu_mdp_axi_clk		0x7fd71687
+#define clk_smmu_rot_ahb_clk		0xa30772c9
+#define clk_smmu_rot_axi_clk		0xfed7c078
+#define clk_smmu_vfe_ahb_clk		0x4dabebe7
+#define clk_smmu_vfe_axi_clk		0xde483725
+#define clk_smmu_video_ahb_clk		0x2d738e2c
+#define clk_smmu_video_axi_clk		0xe2b5b887
+#define clk_video_ahb_clk		0x90775cfb
+#define clk_video_axi_clk		0xe6c16dba
+#define clk_video_core_clk		0x7e876ec3
+#define clk_video_maxi_clk		0x97749db6
+#define clk_video_subcore0_clk		0xb6f63e6c
+#define clk_video_subcore1_clk		0x26c29cb4
+#define clk_vmem_ahb_clk		0xab6223ff
+#define clk_vmem_maxi_clk		0x15ef32db
+#define clk_mmss_debug_mux		0xe646ffda
+#define clk_mmss_gcc_dbg_clk		0xafa4d48a
+#define clk_gfx3d_clk_src		0x917f76ef
+#define clk_extpclk_clk_src		0xb2c31abd
+#define clk_mdss_byte0_clk		0xf5a03f64
+#define clk_mdss_byte1_clk		0xb8c7067d
+#define clk_mdss_extpclk_clk		0xfa5aadb0
+#define clk_mdss_pclk0_clk		0x3487234a
+#define clk_mdss_pclk1_clk		0xd5804246
+#define clk_gpu_gcc_dbg_clk		0x0ccc42cd
+#define clk_mdss_mdp_vote_clk		0x588460a4
+#define clk_mdss_rotator_vote_clk	0x5b1f675e
+#define clk_mmpll2_postdiv_clk		0x4fdeaaba
+#define clk_mmpll8_postdiv_clk		0xedf57882
+#define clk_mmpll9_postdiv_clk		0x3064b618
+#define clk_gfx3d_clk_src_v2		0x4210acb7
+#define clk_byte0_clk_src		0x75cc885b
+#define clk_byte1_clk_src		0x63c2c955
+#define clk_pclk0_clk_src		0xccac1f35
+#define clk_pclk1_clk_src		0x090f68ac
+#define clk_ext_byte0_clk_src		0xfb32f31e
+#define clk_ext_byte1_clk_src		0x585ef6d4
+#define clk_ext_pclk0_clk_src		0x087c1612
+#define clk_ext_pclk1_clk_src		0x8067c5a3
+
+/* clock_debug controlled clocks */
+#define clk_gcc_debug_mux		0x8121ac15
+
+/* external multimedia clocks */
+#define clk_dsi0pll_pixel_clk_mux	0x792379e1
+#define clk_dsi0pll_byte_clk_mux	0x60e83f06
+#define clk_dsi0pll_byte_clk_src	0xbbaa30be
+#define clk_dsi0pll_pixel_clk_src	0x45b3260f
+#define clk_dsi0pll_n2_div_clk		0x1474c213
+#define clk_dsi0pll_post_n1_div_clk	0xdab8c389
+#define clk_dsi0pll_vco_clk		0x15940d40
+#define clk_dsi1pll_pixel_clk_mux	0x36458019
+#define clk_dsi1pll_byte_clk_mux	0xb5a42b7b
+#define clk_dsi1pll_byte_clk_src	0x63930a8f
+#define clk_dsi1pll_pixel_clk_src	0x0e4c9b56
+#define clk_dsi1pll_n2_div_clk		0x2c9d4007
+#define clk_dsi1pll_post_n1_div_clk	0x03020041
+#define clk_dsi1pll_vco_clk		0x99797b50
+#define clk_mdss_dsi1_vco_clk_src	0xfcd15658
+#define clk_hdmi_vco_clk		0x66003284
+
+#define clk_dsi0pll_shadow_byte_clk_src	0x177c029c
+#define clk_dsi0pll_shadow_pixel_clk_src	0x98ae3c92
+#define clk_dsi0pll_shadow_n2_div_clk	0xd5f0dad9
+#define clk_dsi0pll_shadow_post_n1_div_clk	0x1f7c8cf8
+#define clk_dsi0pll_shadow_vco_clk	0xb100ca83
+#define clk_dsi1pll_shadow_byte_clk_src	0xfc021ce5
+#define clk_dsi1pll_shadow_pixel_clk_src	0xdcca3ffc
+#define clk_dsi1pll_shadow_n2_div_clk		0x189541bf
+#define clk_dsi1pll_shadow_post_n1_div_clk	0x1637020e
+#define clk_dsi1pll_shadow_vco_clk		0x68d8b6f7
+
+/* CPU clocks */
+#define clk_pwrcl_clk 0xc554130e
+#define clk_pwrcl_pll 0x25454ca1
+#define clk_pwrcl_alt_pll 0xc445471b
+#define clk_pwrcl_pll_main 0x28948e22
+#define clk_pwrcl_alt_pll_main 0x25c8270e
+#define clk_pwrcl_hf_mux 0x77706ae6
+#define clk_pwrcl_lf_mux 0xd99e334d
+#define clk_perfcl_clk 0x58869997
+#define clk_perfcl_pll 0x97dcec1c
+#define clk_perfcl_alt_pll 0xfe2eaea1
+#define clk_perfcl_pll_main 0x0dbf0c0b
+#define clk_perfcl_alt_pll_main 0x0b892aab
+#define clk_perfcl_hf_mux 0x9e8bbe59
+#define clk_perfcl_lf_mux 0x2f9c278d
+#define clk_cbf_pll 0xfe2e96a3
+#define clk_cbf_pll_main 0x2b05cf95
+#define clk_cbf_hf_mux 0x71244f73
+#define clk_cbf_clk 0x48e9e16b
+#define clk_xo_ao 0x428c856d
+#define clk_sys_apcsaux_clk 0x0b0dd513
+#define clk_cpu_debug_mux 0xc7acaa31
+
+/* GCC block resets */
+#define QUSB2PHY_PRIM_BCR		0
+#define QUSB2PHY_SEC_BCR		1
+#define BLSP1_BCR			2
+#define BLSP2_BCR			3
+#define BOOT_ROM_BCR			4
+#define PRNG_BCR			5
+#define UFS_BCR				6
+#define USB_20_BCR			7
+#define USB_30_BCR			8
+#define USB3_PHY_BCR			9
+#define USB3PHY_PHY_BCR			10
+#define PCIE_0_PHY_BCR			11
+#define PCIE_1_PHY_BCR			12
+#define PCIE_2_PHY_BCR			13
+#define PCIE_PHY_BCR			14
+#define PCIE_PHY_COM_BCR		15
+#define PCIE_PHY_NOCSR_COM_PHY_BCR	16
+
+/* MMSS Block resets */
+#define VIDEO_BCR			0
+#define MDSS_BCR			1
+#define CAMSS_MICRO_BCR			2
+#define CAMSS_JPEG_BCR			3
+#define CAMSS_VFE0_BCR			4
+#define CAMSS_VFE1_BCR			5
+#define FD_BCR				6
+#define GPU_GX_BCR			7
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/cris/boot/dts/include/dt-bindings/clock/msm-clocks-8998.h	2019-01-22 16:16:28.171288678 +0100
@@ -0,0 +1,534 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CLOCKS_8998_H
+#define __MSM_CLOCKS_8998_H
+
+#include "audio-ext-clk.h"
+
+/* clock_rpm controlled clocks */
+#define clk_ce1_clk				0x42229c55
+#define clk_ce1_a_clk				0x44a833fe
+#define clk_cxo_clk_src				0x79e95308
+#define clk_bimc_clk				0x4b80bf00
+#define clk_bimc_a_clk				0x4b25668a
+#define clk_cnoc_clk				0xd5ccb7f4
+#define clk_cnoc_a_clk				0xd8fe2ccc
+#define clk_snoc_clk				0x2c341aa0
+#define clk_snoc_a_clk				0x8fcef2af
+#define clk_cnoc_periph_clk			0xb11e9cf9
+#define clk_cnoc_periph_a_clk			0x1d7faa2e
+#define clk_cnoc_periph_keepalive_a_clk		0x7287aef2
+#define clk_ln_bb_clk1				0xb867b147
+#define clk_ln_bb_clk1_ao			0x7f63a93a
+#define clk_ln_bb_clk1_pin			0x6fc5653c
+#define clk_ln_bb_clk1_pin_ao			0x25d625bf
+#define clk_ln_bb_clk2				0xf83e6387
+#define clk_ln_bb_clk2_ao			0x96f09628
+#define clk_ln_bb_clk2_pin			0xa9ebe8d5
+#define clk_ln_bb_clk2_pin_ao			0x89a1226f
+#define clk_ln_bb_clk3				0x4f52a39e
+#define clk_ln_bb_clk3_ao			0xb15eba76
+#define clk_ln_bb_clk3_pin			0xc4de7dad
+#define clk_ln_bb_clk3_pin_ao			0xc01022e8
+#define clk_bimc_msmbus_clk			0xd212feea
+#define clk_bimc_msmbus_a_clk			0x71d1a499
+#define clk_cnoc_msmbus_clk			0x62228b5d
+#define clk_cnoc_msmbus_a_clk			0x67442955
+#define clk_cxo_clk_src_ao			0x64eb6004
+#define clk_cxo_dwc3_clk			0xf79c19f6
+#define clk_cxo_lpm_clk				0x94adbf3d
+#define clk_cxo_otg_clk				0x4eec0bb9
+#define clk_cxo_pil_lpass_clk			0xe17f0ff6
+#define clk_cxo_pil_ssc_clk			0x81832015
+#define clk_cxo_pil_spss_clk			0x5cd71a61
+#define clk_div_clk1				0xaa1157a6
+#define clk_div_clk1_ao				0x6b943d68
+#define clk_div_clk2				0xd454019f
+#define clk_div_clk2_ao				0x53f9e788
+#define clk_div_clk3				0xa9a55a68
+#define clk_div_clk3_ao				0x3d6725a8
+#define clk_ipa_clk				0xfa685cda
+#define clk_ipa_a_clk				0xeeec2919
+#define clk_mcd_ce1_clk				0xbb615d26
+#define clk_mmssnoc_axi_clk			0xdb4b31e6
+#define clk_mmssnoc_axi_a_clk			0xd4970614
+#define clk_qcedev_ce1_clk			0x293f97b0
+#define clk_qcrypto_ce1_clk			0xa6ac14df
+#define clk_qdss_clk				0x1492202a
+#define clk_qdss_a_clk				0xdd121669
+#define clk_qseecom_ce1_clk			0xaa858373
+#define clk_rf_clk1				0xaabeea5a
+#define clk_rf_clk1_ao				0x72a10cb8
+#define clk_rf_clk1_pin				0x8f463562
+#define clk_rf_clk1_pin_ao			0x62549ff6
+#define clk_rf_clk2				0x24a30992
+#define clk_rf_clk2_ao				0x944d8bbd
+#define clk_rf_clk2_pin				0xa7c5602a
+#define clk_rf_clk2_pin_ao			0x2d75eb4d
+#define clk_rf_clk3				0xb673936b
+#define clk_rf_clk3_ao				0x038bb968
+#define clk_rf_clk3_pin				0x726f53f5
+#define clk_rf_clk3_pin_ao			0x76f9240f
+#define clk_scm_ce1_clk				0xd8ebcc62
+#define clk_snoc_msmbus_clk			0xe6900bb6
+#define clk_snoc_msmbus_a_clk			0x5d4683bd
+#define clk_gcc_ce1_ahb_m_clk			0x2eb28c01
+#define clk_gcc_ce1_axi_m_clk			0xc174dfba
+#define clk_aggre1_noc_clk			0x049abba8
+#define clk_aggre1_noc_a_clk			0xc12e4220
+#define clk_aggre2_noc_clk			0xaa681404
+#define clk_aggre2_noc_a_clk			0xcab67089
+#define clk_measure_only_bimc_hmss_axi_clk	0xc1cc4f11
+
+/* clock_gcc controlled clocks*/
+#define clk_debug_mmss_clk			0x977c99b6
+#define clk_debug_rpm_clk			0x8e2b07ca
+#define clk_debug_cpu_clk			0x0e696b2b
+#define clk_gpu_gcc_debug_clk			0x3eb88190
+#define clk_gfx_gcc_debug_clk			0xa3a64fec
+#define clk_gpll0				0x1ebe3bc4
+#define clk_gpll0_out_main			0xe9374de7
+#define clk_gpll0_ao				0xa1368304
+#define clk_gcc_mmss_gpll0_clk			0x8050f008
+#define clk_gcc_mmss_gpll0_div_clk		0xdd06848d
+#define clk_gcc_gpu_gpll0_clk			0xdad7a7a4
+#define clk_gcc_gpu_gpll0_div_clk		0x07d16c6a
+#define clk_gpll4				0xb3b5d85b
+#define clk_gpll4_out_main			0xa9a0ab9d
+#define clk_usb30_master_clk_src		0xc6262f89
+#define clk_pcie_aux_clk_src			0xebc50566
+#define clk_ufs_axi_clk_src			0x297ca380
+#define clk_blsp1_qup1_i2c_apps_clk_src		0x17f78f5e
+#define clk_blsp1_qup1_spi_apps_clk_src		0xf534c4fa
+#define clk_blsp1_qup2_i2c_apps_clk_src		0x8de71c79
+#define clk_blsp1_qup2_spi_apps_clk_src		0x33cf809a
+#define clk_blsp1_qup3_i2c_apps_clk_src		0xf161b902
+#define clk_blsp1_qup3_spi_apps_clk_src		0x5e95683f
+#define clk_blsp1_qup4_i2c_apps_clk_src		0xb2ecce68
+#define clk_blsp1_qup4_spi_apps_clk_src		0xddb5bbdb
+#define clk_blsp1_qup5_i2c_apps_clk_src		0x71ea7804
+#define clk_blsp1_qup5_spi_apps_clk_src		0x9752f35f
+#define clk_blsp1_qup6_i2c_apps_clk_src		0x28806803
+#define clk_blsp1_qup6_spi_apps_clk_src		0x44a1edc4
+#define clk_blsp1_uart1_apps_clk_src		0xf8146114
+#define clk_blsp1_uart2_apps_clk_src		0xfc9c2f73
+#define clk_blsp1_uart3_apps_clk_src		0x600497f2
+#define clk_blsp2_qup1_i2c_apps_clk_src		0xd6d1e95d
+#define clk_blsp2_qup1_spi_apps_clk_src		0xcc1b8365
+#define clk_blsp2_qup2_i2c_apps_clk_src		0x603b5c51
+#define clk_blsp2_qup2_spi_apps_clk_src		0xd577dc44
+#define clk_blsp2_qup3_i2c_apps_clk_src		0xea82959c
+#define clk_blsp2_qup3_spi_apps_clk_src		0xd04b1e92
+#define clk_blsp2_qup4_i2c_apps_clk_src		0x73dc968c
+#define clk_blsp2_qup4_spi_apps_clk_src		0x25d4a2b1
+#define clk_blsp2_qup5_i2c_apps_clk_src		0xcc3698bd
+#define clk_blsp2_qup5_spi_apps_clk_src		0xfa0cf45e
+#define clk_blsp2_qup6_i2c_apps_clk_src		0x2fa53151
+#define clk_blsp2_qup6_spi_apps_clk_src		0x5ca86755
+#define clk_blsp2_uart1_apps_clk_src		0x562c66dc
+#define clk_blsp2_uart2_apps_clk_src		0xdd448080
+#define clk_blsp2_uart3_apps_clk_src		0x46b2e90f
+#define clk_gp1_clk_src				0xad85b97a
+#define clk_gp2_clk_src				0xfb1f0065
+#define clk_gp3_clk_src				0x63b693d6
+#define clk_hmss_rbcpr_clk_src			0xedd9a474
+#define clk_pdm2_clk_src			0x31e494fd
+#define clk_sdcc2_apps_clk_src			0xfc46c821
+#define clk_sdcc4_apps_clk_src			0x7aaaaa0c
+#define clk_tsif_ref_clk_src			0x4e9042d1
+#define clk_ufs_ice_core_clk_src		0xda8e7119
+#define clk_ufs_phy_aux_clk_src			0xc6bca085
+#define clk_ufs_unipro_core_clk_src		0x179e80a9
+#define clk_usb30_mock_utmi_clk_src		0xa024a976
+#define clk_usb3_phy_aux_clk_src		0x15eec63c
+#define clk_qspi_ref_clk_src			0xfe6b8e11
+#define clk_gcc_pcie_phy_0_reset		0x6bb4df33
+#define clk_gcc_usb3_phy_reset			0x03d559f1
+#define clk_gcc_usb3phy_phy_reset		0xb1a4f885
+#define clk_gcc_aggre1_ufs_axi_clk		0x873459d8
+#define clk_gcc_aggre1_ufs_axi_hw_ctl_clk	0x117a6f39
+#define clk_gcc_aggre1_usb3_axi_clk		0xc5c3fbe8
+#define clk_gcc_bimc_mss_q6_axi_clk		0x7437988f
+#define clk_gcc_blsp1_ahb_clk			0x8caa5b4f
+#define clk_gcc_blsp1_qup1_i2c_apps_clk		0xc303fae9
+#define clk_gcc_blsp1_qup1_spi_apps_clk		0x759a76b0
+#define clk_gcc_blsp1_qup2_i2c_apps_clk		0x1076f220
+#define clk_gcc_blsp1_qup2_spi_apps_clk		0x3e77d48f
+#define clk_gcc_blsp1_qup3_i2c_apps_clk		0x9e25ac82
+#define clk_gcc_blsp1_qup3_spi_apps_clk		0xfb978880
+#define clk_gcc_blsp1_qup4_i2c_apps_clk		0xd7f40f6f
+#define clk_gcc_blsp1_qup4_spi_apps_clk		0x80f8722f
+#define clk_gcc_blsp1_qup5_i2c_apps_clk		0xacae5604
+#define clk_gcc_blsp1_qup5_spi_apps_clk		0xbf3e15d7
+#define clk_gcc_blsp1_qup6_i2c_apps_clk		0x5c6ad820
+#define clk_gcc_blsp1_qup6_spi_apps_clk		0x780d9f85
+#define clk_gcc_blsp1_uart1_apps_clk		0xc7c62f90
+#define clk_gcc_blsp1_uart2_apps_clk		0xf8a61c96
+#define clk_gcc_blsp1_uart3_apps_clk		0xc3298bd7
+#define clk_gcc_blsp2_ahb_clk			0x8f283c1d
+#define clk_gcc_blsp2_qup1_i2c_apps_clk		0x9ace11dd
+#define clk_gcc_blsp2_qup1_spi_apps_clk		0xa32604cc
+#define clk_gcc_blsp2_qup2_i2c_apps_clk		0x1bf9a57e
+#define clk_gcc_blsp2_qup2_spi_apps_clk		0xbf54ca6d
+#define clk_gcc_blsp2_qup3_i2c_apps_clk		0x336d4170
+#define clk_gcc_blsp2_qup3_spi_apps_clk		0xc68509d6
+#define clk_gcc_blsp2_qup4_i2c_apps_clk		0xbd22539d
+#define clk_gcc_blsp2_qup4_spi_apps_clk		0x01a72b93
+#define clk_gcc_blsp2_qup5_i2c_apps_clk		0xe2b2ce1d
+#define clk_gcc_blsp2_qup5_spi_apps_clk		0xf40999cd
+#define clk_gcc_blsp2_qup6_i2c_apps_clk		0x894bcea4
+#define clk_gcc_blsp2_qup6_spi_apps_clk		0xfe1bd34a
+#define clk_gcc_blsp2_uart1_apps_clk		0x8c3512ff
+#define clk_gcc_blsp2_uart2_apps_clk		0x1e1965a3
+#define clk_gcc_blsp2_uart3_apps_clk		0x382415ab
+#define clk_gcc_boot_rom_ahb_clk		0xde2adeb1
+#define clk_gcc_bimc_gfx_clk			0x3edd69ad
+#define clk_gcc_cfg_noc_usb3_axi_clk		0x9ea4c2d9
+#define clk_gcc_gp1_clk				0x057f7b69
+#define clk_gcc_gp2_clk				0x9bf83ffd
+#define clk_gcc_gp3_clk				0xec6539ee
+#define clk_gcc_gpu_bimc_gfx_clk		0x3909459b
+#define clk_gcc_gpu_cfg_ahb_clk			0x72f20a57
+#define clk_gcc_gpu_iref_clk			0xfd82abad
+#define clk_gcc_hmss_dvm_bus_clk		0x17cc8b53
+#define clk_gcc_hmss_rbcpr_clk			0x699183be
+#define clk_hmss_gpll0_clk_src			0x17eb05d0
+#define clk_hmss_gpll4_clk_src			0x20456cae
+#define clk_gcc_mmss_sys_noc_axi_clk		0x4467b15b
+#define clk_gcc_mss_at_clk			0x1692c5aa
+#define clk_nav_gcc_dbg_clk			0x2221c544
+#define clk_gcc_pcie_0_aux_clk			0x3d2e3ece
+#define clk_gcc_pcie_0_cfg_ahb_clk		0x4dd325c3
+#define clk_gcc_pcie_0_mstr_axi_clk		0x3f85285b
+#define clk_gcc_pcie_0_pipe_clk			0x4f37621e
+#define clk_gcc_pcie_0_slv_axi_clk		0xd69638a1
+#define clk_gcc_pcie_phy_aux_clk		0x4746e74f
+#define clk_gcc_pdm2_clk			0x99d55711
+#define clk_gcc_pdm_ahb_clk			0x365664f6
+#define clk_gcc_prng_ahb_clk			0x397e7eaa
+#define clk_gcc_sdcc2_ahb_clk			0x23d5727f
+#define clk_gcc_sdcc2_apps_clk			0x861b20ac
+#define clk_gcc_sdcc4_ahb_clk			0x64f3e6a8
+#define clk_gcc_sdcc4_apps_clk			0xbf7c4dc8
+#define clk_gcc_tsif_ahb_clk			0x88d2822c
+#define clk_gcc_tsif_ref_clk			0x8f1ed2c2
+#define clk_gcc_ufs_ahb_clk			0x1914bb84
+#define clk_gcc_ufs_axi_clk			0x47c743a7
+#define clk_gcc_ufs_axi_hw_ctl_clk		0x69385b45
+#define clk_gcc_ufs_ice_core_clk		0x310b0710
+#define clk_gcc_ufs_ice_core_hw_ctl_clk		0x84e15a5b
+#define clk_gcc_ufs_phy_aux_clk			0x17acc8fb
+#define clk_gcc_ufs_phy_aux_hw_ctl_clk		0x7dbdb2e2
+#define clk_gcc_ufs_rx_symbol_0_clk		0x7f43251c
+#define clk_gcc_ufs_rx_symbol_1_clk		0x03182fde
+#define clk_gcc_ufs_tx_symbol_0_clk		0x6a9f747a
+#define clk_ufs_tx_symbol_0_clk			0xb3fcd0f7
+#define clk_ufs_rx_symbol_0_clk			0x17a0f1cd
+#define clk_gcc_ufs_unipro_core_clk		0x2daf7fd2
+#define clk_gcc_ufs_unipro_core_hw_ctl_clk	0x4a4e0f3d
+#define clk_gcc_usb30_master_clk		0xb3b4e2cb
+#define clk_gcc_usb30_mock_utmi_clk		0xa800b65a
+#define clk_gcc_usb30_sleep_clk			0xd0b65c92
+#define clk_gcc_usb3_phy_aux_clk		0x0d9a36e0
+#define clk_gcc_usb3_phy_pipe_clk		0xf279aff2
+#define clk_gcc_usb3_clkref_clk			0xb6cc8f00
+#define clk_gcc_hdmi_clkref_clk			0x4d4eec04
+#define clk_gcc_edp_clkref_clk			0xa8685c3f
+#define clk_gcc_ufs_clkref_clk			0x92aa126f
+#define clk_gcc_pcie_clkref_clk			0xa2e247fa
+#define clk_gcc_rx2_qlink_clkref_clk		0xd0ba986d
+#define clk_gcc_rx1_usb2_clkref_clk		0x53351d25
+#define clk_gcc_pcie_phy_reset			0x9bc3c959
+#define clk_gcc_pcie_phy_com_reset		0x8bf513e6
+#define clk_gcc_pcie_phy_nocsr_com_phy_reset	0x0c16a2da
+#define clk_gcc_qusb2phy_prim_reset		0x07550fa1
+#define clk_gcc_qusb2phy_sec_reset		0x3f3a87d0
+#define clk_gcc_mmss_noc_cfg_ahb_clk		0xb41a9d99
+#define clk_gcc_dcc_ahb_clk			0xfa14a88c
+#define clk_hlos1_vote_lpass_core_smmu_clk	0x3aaa1743
+#define clk_hlos1_vote_lpass_adsp_smmu_clk	0xc76f702f
+#define clk_gcc_mss_cfg_ahb_clk			0x111cde81
+#define clk_gcc_mss_q6_bimc_axi_clk		0x67544d62
+#define clk_gcc_mss_mnoc_bimc_axi_clk		0xf665d03f
+#define clk_gpll0_out_msscc			0x7d794829
+#define clk_gcc_mss_snoc_axi_clk		0x0e71de85
+#define clk_gcc_qspi_ref_clk			0x766a0f7c
+#define clk_gcc_qspi_ahb_clk			0x96969dc8
+#define clk_gcc_debug_mux			0x8121ac15
+
+/* clock_mmss controlled clocks */
+#define clk_mmsscc_xo				0x05e63704
+#define clk_mmsscc_gpll0			0xe900c515
+#define clk_mmsscc_gpll0_div			0x73892e05
+#define clk_mmpll0_pll				0x361e3cfd
+#define clk_mmpll1_pll				0x198e426b
+#define clk_mmpll3_pll				0x18c76899
+#define clk_mmpll4_pll				0x22c063c1
+#define clk_mmpll5_pll				0xa41e1936
+#define clk_mmpll6_pll				0xc56fb440
+#define clk_mmpll7_pll				0x3ac216af
+#define clk_mmpll10_pll				0x2561263b
+#define clk_mmpll0_pll_out			0x1e9e24a8
+#define clk_mmpll1_pll_out			0x5fa32257
+#define clk_mmpll3_pll_out			0x6eb6328f
+#define clk_mmpll4_pll_out			0xfb21c2fd
+#define clk_mmpll5_pll_out			0xcc1897bf
+#define clk_mmpll6_pll_out			0xfb1060bd
+#define clk_mmpll7_pll_out			0x767758ed
+#define clk_mmpll10_pll_out			0x3c5668f3
+#define clk_ahb_clk_src				0x86f49203
+#define clk_csi0_clk_src			0x227e65bc
+#define clk_vfe0_clk_src			0xa0c2bd8f
+#define clk_vfe1_clk_src			0x4e357366
+#define clk_mdp_clk_src				0x6dc1f8f1
+#define clk_maxi_clk_src			0x52c09777
+#define clk_cpp_clk_src				0x8382f56d
+#define clk_jpeg0_clk_src			0x9a0a0ac3
+#define clk_rot_clk_src				0xce49b56c
+#define clk_video_core_clk_src			0x8be4c944
+#define clk_csi1_clk_src			0x6a2a6c36
+#define clk_csi2_clk_src			0x4113589f
+#define clk_csi3_clk_src			0xfd934012
+#define clk_fd_core_clk_src			0xe4799ab7
+#define clk_ext_dp_phy_pll_vco			0x441b576b
+#define clk_ext_dp_phy_pll_link			0xea12644c
+#define clk_dp_link_clk_src			0x370d0626
+#define clk_dp_crypto_clk_src			0xf8faa811
+#define clk_dp_pixel_clk_src			0xf5dfbabf
+#define clk_ext_extpclk_clk_src			0xe5b273af
+#define clk_ext_pclk0_clk_src			0x087c1612
+#define clk_ext_pclk1_clk_src			0x8067c5a3
+#define clk_pclk0_clk_src			0xccac1f35
+#define clk_pclk1_clk_src			0x090f68ac
+#define clk_video_subcore0_clk_src		0x88d79636
+#define clk_video_subcore1_clk_src		0x4966930c
+#define clk_cci_clk_src				0x822f3d97
+#define clk_camss_gp0_clk_src			0x43b063e9
+#define clk_camss_gp1_clk_src			0xa3315f1b
+#define clk_mclk0_clk_src			0x266b3853
+#define clk_mclk1_clk_src			0xa73cad0c
+#define clk_mclk2_clk_src			0x42545468
+#define clk_mclk3_clk_src			0x2bfbb714
+#define clk_csiphy_clk_src			0x8cceb70a
+#define clk_csi0phytimer_clk_src		0xc8a309be
+#define clk_csi1phytimer_clk_src		0x7c0fe23a
+#define clk_csi2phytimer_clk_src		0x62ffea9c
+#define clk_ext_byte0_clk_src			0xfb32f31e
+#define clk_ext_byte1_clk_src			0x585ef6d4
+#define clk_byte0_clk_src			0x75cc885b
+#define clk_byte1_clk_src			0x63c2c955
+#define clk_dp_aux_clk_src			0x2b6e972b
+#define clk_dp_gtc_clk_src			0xc5a86a42
+#define clk_esc0_clk_src			0xb41d7c38
+#define clk_esc1_clk_src			0x3b0afa42
+#define clk_extpclk_clk_src			0xb2c31abd
+#define clk_hdmi_clk_src			0xb40aeea9
+#define clk_vsync_clk_src			0xecb43940
+#define clk_mmss_bimc_smmu_ahb_clk		0x4825baf4
+#define clk_mmss_bimc_smmu_axi_clk		0xc365ac39
+#define clk_mmss_snoc_dvm_axi_clk		0x2c159a11
+#define clk_mmss_camss_ahb_clk			0xa51f2c1d
+#define clk_mmss_camss_cci_ahb_clk		0xfda8bb6a
+#define clk_mmss_camss_cci_clk			0x71bb5c97
+#define clk_mmss_camss_cpp_ahb_clk		0xd5554f15
+#define clk_mmss_camss_cpp_clk			0x8e99ef57
+#define clk_mmss_camss_cpp_axi_clk		0xd84e390b
+#define clk_mmss_camss_cpp_vbif_ahb_clk		0x1b33a88e
+#define clk_mmss_camss_cphy_csid0_clk		0x56114361
+#define clk_mmss_camss_csi0_ahb_clk		0x2b58d241
+#define clk_mmss_camss_csi0_clk			0xccfe39ef
+#define clk_mmss_camss_csi0pix_clk		0x9e26509d
+#define clk_mmss_camss_csi0rdi_clk		0x01d5bf83
+#define clk_mmss_camss_cphy_csid1_clk		0x79fbcd8a
+#define clk_mmss_camss_csi1_ahb_clk		0x7073244b
+#define clk_mmss_camss_csi1_clk			0x3eeeaac0
+#define clk_mmss_camss_csi1pix_clk		0xf1375139
+#define clk_mmss_camss_csi1rdi_clk		0x43185024
+#define clk_mmss_camss_cphy_csid2_clk		0xf295e3ef
+#define clk_mmss_camss_csi2_ahb_clk		0x681c1479
+#define clk_mmss_camss_csi2_clk			0x94524569
+#define clk_mmss_camss_csi2pix_clk		0xf4de617d
+#define clk_mmss_camss_csi2rdi_clk		0x4bf01dc5
+#define clk_mmss_camss_cphy_csid3_clk		0x100188e9
+#define clk_mmss_camss_csi3_ahb_clk		0xfae7c29b
+#define clk_mmss_camss_csi3_clk			0x55e4bbae
+#define clk_mmss_camss_csi3pix_clk		0xc166a015
+#define clk_mmss_camss_csi3rdi_clk		0x6983a4cd
+#define clk_mmss_camss_csi_vfe0_clk		0x3b30b798
+#define clk_mmss_camss_csi_vfe1_clk		0xfe729af7
+#define clk_mmss_camss_csiphy0_clk		0x96c81af8
+#define clk_mmss_camss_csiphy1_clk		0xee9ac2bb
+#define clk_mmss_camss_csiphy2_clk		0x3365e70e
+#define clk_mmss_fd_ahb_clk			0x4ff1da4d
+#define clk_mmss_fd_core_clk			0x749e7eb0
+#define clk_mmss_fd_core_uar_clk		0x8ea480c5
+#define clk_mmss_camss_gp0_clk			0x3f7f6c87
+#define clk_mmss_camss_gp1_clk			0xdccdd730
+#define clk_mmss_camss_ispif_ahb_clk		0xbda4f0e3
+#define clk_mmss_camss_jpeg0_clk		0x4cc73b07
+#define clk_mmss_camss_jpeg0_vote_clk		0xc9efa6ac
+#define clk_mmss_camss_jpeg0_dma_vote_clk	0x371ec109
+#define clk_mmss_camss_jpeg_ahb_clk		0xde1fece3
+#define clk_mmss_camss_jpeg_axi_clk		0x7534616b
+#define clk_mmss_camss_mclk0_clk		0x056293a7
+#define clk_mmss_camss_mclk1_clk		0x96c7b69b
+#define clk_mmss_camss_mclk2_clk		0x8820556e
+#define clk_mmss_camss_mclk3_clk		0xf90ffb67
+#define clk_mmss_camss_micro_ahb_clk		0x6c6fd3c7
+#define clk_mmss_camss_csi0phytimer_clk		0x7a78864e
+#define clk_mmss_camss_csi1phytimer_clk		0x6e6c1de5
+#define clk_mmss_camss_csi2phytimer_clk		0x0235e2de
+#define clk_mmss_camss_top_ahb_clk		0x120618d6
+#define clk_mmss_camss_vfe0_ahb_clk		0x137bd0bd
+#define clk_mmss_camss_vfe0_clk			0xead28288
+#define clk_mmss_camss_vfe0_stream_clk		0xa0428287
+#define clk_mmss_camss_vfe1_ahb_clk		0xac0154c0
+#define clk_mmss_camss_vfe1_clk			0xc216b14d
+#define clk_mmss_camss_vfe1_stream_clk		0x745af3b6
+#define clk_mmss_camss_vfe_vbif_ahb_clk		0x0109a9c6
+#define clk_mmss_camss_vfe_vbif_axi_clk		0xe626d8a1
+#define clk_mmss_mdss_ahb_clk			0x85d37ab5
+#define clk_mmss_mdss_axi_clk			0xdf04fc1d
+#define clk_mmss_mdss_byte0_clk			0x38105d25
+#define clk_mmss_mdss_byte0_intf_clk		0x38e5aa79
+#define clk_mmss_mdss_byte0_intf_div_clk	0x8604f181
+#define clk_mmss_mdss_byte1_clk			0xe0c21354
+#define clk_mmss_mdss_byte1_intf_clk		0xcf654d8e
+#define clk_mmss_mdss_byte1_intf_div_clk	0xcdf334c5
+#define clk_mmss_mdss_dp_aux_clk		0x23125eb6
+#define clk_mmss_mdss_dp_crypto_clk		0x9a072d4e
+#define clk_mmss_mdss_dp_link_clk		0x8dd302d1
+#define clk_mmss_mdss_dp_link_intf_clk		0x70e386e6
+#define clk_mmss_mdss_dp_pixel_clk		0xb707b765
+#define clk_mmss_mdss_dp_gtc_clk		0xb59c151a
+#define clk_mmss_mdss_esc0_clk			0x5721ff83
+#define clk_mmss_mdss_esc1_clk			0xc3d0376b
+#define clk_mmss_mdss_extpclk_clk		0x74d5a954
+#define clk_mmss_mdss_hdmi_clk			0x28460a6d
+#define clk_mmss_mdss_hdmi_dp_ahb_clk		0x5448519f
+#define clk_mmss_mdss_mdp_clk			0x43539b0e
+#define clk_mmss_mdss_mdp_lut_clk		0x00627b2b
+#define clk_mmss_mdss_pclk0_clk			0xcc0e909d
+#define clk_mmss_mdss_pclk1_clk			0x850d9146
+#define clk_mmss_mdss_rot_clk			0xbb7e71c4
+#define clk_mmss_mdss_vsync_clk			0x629b36dc
+#define clk_mmss_misc_ahb_clk			0xea30b0e7
+#define clk_mmss_misc_cxo_clk			0xe620cd80
+#define clk_mmss_mnoc_ahb_clk			0x49a394f4
+#define clk_mmss_mnoc_maxi_clk			0xd8b7278f
+#define clk_mmss_video_subcore0_clk		0x23fae359
+#define clk_mmss_video_subcore1_clk		0x5213a0c7
+#define clk_mmss_video_ahb_clk			0x94334ae9
+#define clk_mmss_video_axi_clk			0xf3178ba5
+#define clk_mmss_video_core_clk			0x78f14c85
+#define clk_mmss_video_maxi_clk			0x1785ef88
+#define clk_mmss_vmem_ahb_clk			0x4b18955b
+#define clk_mmss_vmem_maxi_clk			0xb6067889
+#define clk_mmss_debug_mux			0xe646ffda
+
+/* external multimedia clocks */
+#define clk_dsi0pll_byteclk_mux			0xecf2c434
+#define clk_dsi0pll_byteclk_src			0x6f6f740f
+#define clk_dsi0pll_pclk_mux			0x6c9da335
+#define clk_dsi0pll_pclk_src			0x5efd85d4
+#define clk_dsi0pll_pclk_src_mux		0x84b14663
+#define clk_dsi0pll_post_bit_div		0xf46dcf27
+#define clk_dsi0pll_pll_out_div1		0xeda5b7fe
+#define clk_dsi0pll_pll_out_div2		0x97fa476d
+#define clk_dsi0pll_pll_out_div4		0x90a98ce0
+#define clk_dsi0pll_pll_out_div8		0x9d9d85cf
+#define clk_dsi0pll_pll_out_mux			0x179c27ca
+#define clk_dsi0pll_post_vco_mux		0xfaf9bd1f
+#define clk_dsi0pll_post_vco_div1		0xabb50b2a
+#define clk_dsi0pll_post_vco_div4		0xbe51c091
+#define clk_dsi0pll_bitclk_src			0x36c3c437
+#define clk_dsi0pll_vco_clk			0x15940d40
+
+#define clk_dsi1pll_byteclk_mux			0x14e2f38f
+#define clk_dsi1pll_byteclk_src			0x4b65c298
+#define clk_dsi1pll_pclk_mux			0x4c0518b5
+#define clk_dsi1pll_pclk_src			0xeddcd80e
+#define clk_dsi1pll_pclk_src_mux		0x3651feb3
+#define clk_dsi1pll_post_bit_div		0x712f0260
+#define clk_dsi1pll_pll_out_div8		0x87628ddb
+#define clk_dsi1pll_pll_out_div4		0x0d9a384b
+#define clk_dsi1pll_pll_out_div2		0x0c9b5748
+#define clk_dsi1pll_pll_out_div1		0x3193164e
+#define clk_dsi1pll_pll_out_mux			0x171bf8fd
+#define clk_dsi1pll_post_vco_mux		0xc6a90d20
+#define clk_dsi1pll_post_vco_div1		0x6f47ca7d
+#define clk_dsi1pll_post_vco_div4		0x90628974
+#define clk_dsi1pll_bitclk_src			0x13ab045b
+#define clk_dsi1pll_vco_clk			0x99797b50
+
+#define clk_dp_vco_clk				0xfcaaeec7
+#define clk_dp_link_2x_clk_divsel_five		0xcfe3f5dd
+#define clk_vco_divsel_four_clk_src		0xe0da19c0
+#define clk_vco_divsel_two_clk_src		0xb5cfc6a8
+#define clk_vco_divided_clk_src_mux		0x3f8197c2
+#define clk_hdmi_vco_clk			0xbb7dc20d
+
+/* clock_gpu controlled clocks*/
+#define clk_gpucc_xo				0xc4e1a890
+#define clk_gpucc_gpll0				0x0db0e37f
+#define clk_gfx3d_clk_src			0x917f76ef
+#define clk_rbbmtimer_clk_src			0x17649ecc
+#define clk_gfx3d_isense_clk_src		0xecc3eafa
+#define clk_rbcpr_clk_src			0x2c2e9af2
+#define clk_gpu_debug_div_clk			0x75d6f53f
+#define clk_gpucc_gfx3d_clk			0x95f01bd5
+#define clk_gpucc_rbbmtimer_clk			0x58a0a7ca
+#define clk_gpucc_gfx3d_isense_clk		0xb2678e80
+#define clk_gpucc_cxo_clk			0x6532dcae
+#define clk_gpucc_rbcpr_clk			0x7bd750e8
+#define clk_gpu_pll0_pll			0x0e61ab4d
+#define clk_gpu_pll0_pll_out_even		0xb0ed5009
+#define clk_gpu_pll0_pll_out_odd		0x08c5a8a5
+#define clk_gpu_pll0_postdiv_clk		0x76c19f3c
+#define clk_gpucc_mx_clk			0x1edbb879
+#define clk_gpucc_gcc_dbg_clk			0x9ae8cd3c
+#define clk_gfxcc_dbg_clk			0x3ed47625
+
+/* CPU clocks */
+#define clk_pwrcl_clk				0xc554130e
+#define clk_perfcl_clk				0x58869997
+#define clk_sys_apcsaux_clk_gcc			0xf905e862
+#define clk_xo_ao				0x428c856d
+#define clk_osm_clk_src				0xaabe68c3
+#define clk_cpu_debug_mux			0x3ae8bcb2
+
+/* Audio External Clocks */
+#define clk_audio_ap_clk			0x9b5727cb
+#define clk_audio_pmi_clk			0xcbfe416d
+#define clk_audio_ap_clk2			0x454d1e91
+
+/* GCC block resets */
+#define QUSB2PHY_PRIM_BCR			0
+#define QUSB2PHY_SEC_BCR			1
+#define BLSP1_BCR				2
+#define BLSP2_BCR				3
+#define BOOT_ROM_BCR				4
+#define PRNG_BCR				5
+#define UFS_BCR					6
+#define USB_30_BCR				7
+#define USB3_PHY_BCR				8
+#define USB3PHY_PHY_BCR				9
+#define PCIE_0_PHY_BCR				10
+#define PCIE_PHY_BCR				11
+#define PCIE_PHY_COM_BCR			12
+#define PCIE_PHY_NOCSR_COM_PHY_BCR		13
+
+/* MMSS block resets */
+#define CAMSS_MICRO_BCR				0
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/cris/boot/dts/include/dt-bindings/clock/msm-clocks-hwio-8998.h	2019-01-22 16:16:28.171288678 +0100
@@ -0,0 +1,395 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define CLKFLAG_NO_RATE_CACHE					0x00004000
+
+#define FMAX_LOWER						0
+#define FMAX_LOW						1
+#define FMAX_NOM						2
+#define FMAX_TURBO						3
+
+#define HALT_CHECK_DELAY					5
+
+#define RPM_MISC_CLK_TYPE					0x306b6c63
+#define RPM_BUS_CLK_TYPE					0x316b6c63
+#define RPM_MEM_CLK_TYPE					0x326b6c63
+#define RPM_IPA_CLK_TYPE					0x617069
+#define RPM_CE_CLK_TYPE						0x6563
+#define RPM_AGGR_CLK_TYPE					0x72676761
+#define RPM_SMD_KEY_ENABLE					0x62616E45
+#define RPM_MMAXI_CLK_TYPE					0x69786d6d
+
+#define CXO_CLK_SRC_ID						0x0
+#define QDSS_CLK_ID						0x1
+#define SNOC_CLK_ID						0x1
+#define CNOC_CLK_ID						0x2
+#define CNOC_PERIPH_CLK_ID					0x0
+#define BIMC_CLK_ID						0x0
+#define IPA_CLK_ID						0x0
+#define CE1_CLK_ID						0x0
+#define RF_CLK1_ID						0x4
+#define RF_CLK2_ID						0x5
+#define RF_CLK3_ID						0x6
+#define LN_BB_CLK1_ID						0x1
+#define LN_BB_CLK2_ID						0x2
+#define LN_BB_CLK3_ID						0x3
+#define DIV_CLK1_ID						0xb
+#define DIV_CLK2_ID						0xc
+#define DIV_CLK3_ID						0xd
+#define RF_CLK1_PIN_ID						0x4
+#define RF_CLK2_PIN_ID						0x5
+#define RF_CLK3_PIN_ID						0x6
+#define LN_BB_CLK1_PIN_ID					0x1
+#define LN_BB_CLK2_PIN_ID					0x2
+#define LN_BB_CLK3_PIN_ID					0x3
+#define AGGR1_NOC_ID						0x1
+#define AGGR2_NOC_ID						0x2
+#define MMSSNOC_AXI_CLK_ID					0x0
+
+#define APCS_COMMON_LMH_CMD_RCGR				0x0012C
+
+#define GCC_APCS_GPLL_ENA_VOTE					0x52000
+#define GCC_APCS_CLOCK_BRANCH_ENA_VOTE				0x52004
+#define GCC_APCS_CLOCK_BRANCH_ENA_VOTE_1			0x5200C
+#define PLLTEST_PAD_CFG						0x6200C
+#define GCC_XO_DIV4_CBCR				        0x43008
+#define CLOCK_FRQ_MEASURE_CTL				        0x62004
+#define CLOCK_FRQ_MEASURE_STATUS			        0x62008
+#define GCC_GPLL0_MODE						0x00000
+#define GCC_GPLL4_MODE						0x77000
+#define GCC_USB30_MASTER_CMD_RCGR				0x0F014
+#define GCC_PCIE_AUX_CMD_RCGR					0x6C000
+#define GCC_UFS_AXI_CMD_RCGR					0x75018
+#define GCC_BLSP1_QUP1_I2C_APPS_CMD_RCGR			0x19020
+#define GCC_BLSP1_QUP1_SPI_APPS_CMD_RCGR			0x1900C
+#define GCC_BLSP1_QUP2_I2C_APPS_CMD_RCGR			0x1B020
+#define GCC_BLSP1_QUP2_SPI_APPS_CMD_RCGR			0x1B00C
+#define GCC_BLSP1_QUP3_I2C_APPS_CMD_RCGR			0x1D020
+#define GCC_BLSP1_QUP3_SPI_APPS_CMD_RCGR			0x1D00C
+#define GCC_BLSP1_QUP4_I2C_APPS_CMD_RCGR			0x1F020
+#define GCC_BLSP1_QUP4_SPI_APPS_CMD_RCGR			0x1F00C
+#define GCC_BLSP1_QUP5_I2C_APPS_CMD_RCGR			0x21020
+#define GCC_BLSP1_QUP5_SPI_APPS_CMD_RCGR			0x2100C
+#define GCC_BLSP1_QUP6_I2C_APPS_CMD_RCGR			0x23020
+#define GCC_BLSP1_QUP6_SPI_APPS_CMD_RCGR			0x2300C
+#define GCC_BLSP1_UART1_APPS_CMD_RCGR				0x1A00C
+#define GCC_BLSP1_UART2_APPS_CMD_RCGR				0x1C00C
+#define GCC_BLSP1_UART3_APPS_CMD_RCGR				0x1E00C
+#define GCC_BLSP2_QUP1_I2C_APPS_CMD_RCGR			0x26020
+#define GCC_BLSP2_QUP2_I2C_APPS_CMD_RCGR			0x28020
+#define GCC_BLSP2_QUP3_I2C_APPS_CMD_RCGR			0x2A020
+#define GCC_BLSP2_QUP4_I2C_APPS_CMD_RCGR			0x2C020
+#define GCC_BLSP2_QUP5_I2C_APPS_CMD_RCGR			0x2E020
+#define GCC_BLSP2_QUP6_I2C_APPS_CMD_RCGR			0x30020
+#define GCC_BLSP2_QUP1_SPI_APPS_CMD_RCGR			0x2600C
+#define GCC_BLSP2_QUP2_SPI_APPS_CMD_RCGR			0x2800C
+#define GCC_BLSP2_QUP3_SPI_APPS_CMD_RCGR			0x2A00C
+#define GCC_BLSP2_QUP4_SPI_APPS_CMD_RCGR			0x2C00C
+#define GCC_BLSP2_QUP5_SPI_APPS_CMD_RCGR			0x2E00C
+#define GCC_BLSP2_QUP6_SPI_APPS_CMD_RCGR			0x3000C
+#define GCC_BLSP2_UART1_APPS_CMD_RCGR				0x2700C
+#define GCC_BLSP2_UART2_APPS_CMD_RCGR				0x2900C
+#define GCC_BLSP2_UART3_APPS_CMD_RCGR				0x2B00C
+#define GCC_GP1_CMD_RCGR					0x64004
+#define GCC_GP2_CMD_RCGR					0x65004
+#define GCC_GP3_CMD_RCGR					0x66004
+#define GCC_HMSS_RBCPR_CMD_RCGR					0x48044
+#define GCC_PDM2_CMD_RCGR					0x33010
+#define GCC_SDCC2_APPS_CMD_RCGR					0x14010
+#define GCC_SDCC4_APPS_CMD_RCGR					0x16010
+#define GCC_TSIF_REF_CMD_RCGR					0x36010
+#define GCC_UFS_ICE_CORE_CMD_RCGR				0x76010
+#define GCC_UFS_PHY_AUX_CMD_RCGR				0x76044
+#define GCC_UFS_UNIPRO_CORE_CMD_RCGR				0x76028
+#define GCC_USB30_MOCK_UTMI_CMD_RCGR				0x0F028
+#define GCC_USB3_PHY_AUX_CMD_RCGR				0x5000C
+#define GCC_QSPI_REF_CMD_RCGR					0x9000C
+#define GCC_PCIE_0_PHY_BCR					0x6C01C
+#define GCC_HDMI_CLKREF_EN					0x88000
+#define GCC_UFS_CLKREF_EN					0x88004
+#define GCC_USB3_CLKREF_EN					0x88008
+#define GCC_PCIE_CLKREF_EN					0x8800C
+#define GCC_RX1_USB2_CLKREF_EN					0x88014
+#define GCC_USB3_PHY_BCR					0x50020
+#define GCC_AGGRE1_NOC_XO_CBCR					0x8202C
+#define GCC_AGGRE1_UFS_AXI_CBCR					0x82028
+#define GCC_AGGRE1_USB3_AXI_CBCR				0x82024
+#define GCC_BIMC_MSS_Q6_AXI_CBCR				0x4401C
+#define GCC_BLSP1_AHB_CBCR					0x17004
+#define GCC_BLSP1_BCR						0x17000
+#define GCC_BLSP1_QUP1_SPI_APPS_CBCR				0x19004
+#define GCC_BLSP1_QUP1_I2C_APPS_CBCR				0x19008
+#define GCC_BLSP1_QUP2_SPI_APPS_CBCR				0x1B004
+#define GCC_BLSP1_QUP2_I2C_APPS_CBCR				0x1B008
+#define GCC_BLSP1_QUP3_SPI_APPS_CBCR				0x1D004
+#define GCC_BLSP1_QUP3_I2C_APPS_CBCR				0x1D008
+#define GCC_BLSP1_QUP4_SPI_APPS_CBCR				0x1F004
+#define GCC_BLSP1_QUP4_I2C_APPS_CBCR				0x1F008
+#define GCC_BLSP1_QUP5_SPI_APPS_CBCR				0x21004
+#define GCC_BLSP1_QUP5_I2C_APPS_CBCR				0x21008
+#define GCC_BLSP1_QUP6_SPI_APPS_CBCR				0x23004
+#define GCC_BLSP1_QUP6_I2C_APPS_CBCR				0x23008
+#define GCC_BLSP1_UART1_APPS_CBCR				0x1A004
+#define GCC_BLSP1_UART2_APPS_CBCR				0x1C004
+#define GCC_BLSP1_UART3_APPS_CBCR				0x1E004
+#define GCC_BLSP2_AHB_CBCR					0x25004
+#define GCC_BLSP2_BCR						0x25000
+#define GCC_BLSP2_QUP1_SPI_APPS_CBCR				0x26004
+#define GCC_BLSP2_QUP1_I2C_APPS_CBCR				0x26008
+#define GCC_BLSP2_QUP2_I2C_APPS_CBCR				0x28008
+#define GCC_BLSP2_QUP2_SPI_APPS_CBCR				0x28004
+#define GCC_BLSP2_QUP3_SPI_APPS_CBCR				0x2A004
+#define GCC_BLSP2_QUP3_I2C_APPS_CBCR				0x2A008
+#define GCC_BLSP2_QUP4_SPI_APPS_CBCR				0x2C004
+#define GCC_BLSP2_QUP4_I2C_APPS_CBCR				0x2C008
+#define GCC_BLSP2_QUP5_SPI_APPS_CBCR				0x2E004
+#define GCC_BLSP2_QUP5_I2C_APPS_CBCR				0x2E008
+#define GCC_BLSP2_QUP6_SPI_APPS_CBCR				0x30004
+#define GCC_BLSP2_QUP6_I2C_APPS_CBCR				0x30008
+#define GCC_BLSP2_UART1_APPS_CBCR				0x27004
+#define GCC_BLSP2_UART2_APPS_CBCR				0x29004
+#define GCC_BLSP2_UART3_APPS_CBCR				0x2B004
+#define GCC_BOOT_ROM_AHB_CBCR					0x38004
+#define GCC_BOOT_ROM_BCR					0x38000
+#define GCC_CFG_NOC_USB3_AXI_CBCR				0x05018
+#define GCC_BIMC_GFX_CBCR					0x46040
+#define GCC_GP1_CBCR						0x64000
+#define GCC_GP2_CBCR						0x65000
+#define GCC_GP3_CBCR						0x66000
+#define GCC_GPU_BIMC_GFX_CBCR					0x71010
+#define GCC_GPU_CFG_AHB_CBCR					0x71004
+#define GCC_GPU_IREF_EN						0x88010
+#define GCC_HMSS_DVM_BUS_CBCR					0x4808C
+#define GCC_HMSS_RBCPR_CBCR					0x48008
+#define GCC_MMSS_SYS_NOC_AXI_CBCR				0x09000
+#define GCC_MMSS_NOC_CFG_AHB_CBCR				0x09004
+#define GCC_PCIE_0_SLV_AXI_CBCR					0x6B008
+#define GCC_PCIE_0_MSTR_AXI_CBCR				0x6B00C
+#define GCC_PCIE_0_CFG_AHB_CBCR					0x6B010
+#define GCC_PCIE_0_AUX_CBCR					0x6B014
+#define GCC_PCIE_0_PIPE_CBCR					0x6B018
+#define GCC_PCIE_PHY_AUX_CBCR					0x6F004
+#define GCC_PCIE_PHY_BCR					0x6F000
+#define GCC_PCIE_PHY_COM_BCR					0x6F014
+#define GCC_PCIE_PHY_NOCSR_COM_PHY_BCR				0x6F00C
+#define GCC_QUSB2PHY_PRIM_BCR					0x12000
+#define GCC_QUSB2PHY_SEC_BCR					0x12004
+#define GCC_PDM2_CBCR						0x3300C
+#define GCC_PDM_AHB_CBCR					0x33004
+#define GCC_PRNG_AHB_CBCR					0x34004
+#define GCC_PRNG_BCR						0x34000
+#define GCC_SDCC2_APPS_CBCR					0x14004
+#define GCC_SDCC2_AHB_CBCR					0x14008
+#define GCC_SDCC4_APPS_CBCR					0x16004
+#define GCC_SDCC4_AHB_CBCR					0x16008
+#define GCC_TSIF_AHB_CBCR					0x36004
+#define GCC_TSIF_REF_CBCR					0x36008
+#define GCC_UFS_AXI_CBCR					0x75008
+#define GCC_UFS_BCR						0x75000
+#define GCC_UFS_AHB_CBCR					0x7500C
+#define GCC_UFS_TX_SYMBOL_0_CBCR				0x75010
+#define GCC_UFS_RX_SYMBOL_0_CBCR				0x75014
+#define GCC_UFS_RX_SYMBOL_1_CBCR				0x7605C
+#define GCC_UFS_UNIPRO_CORE_CBCR				0x76008
+#define GCC_UFS_ICE_CORE_CBCR					0x7600C
+#define GCC_UFS_PHY_AUX_CBCR					0x76040
+#define GCC_USB30_MASTER_CBCR					0x0F008
+#define GCC_USB30_SLEEP_CBCR					0x0F00C
+#define GCC_USB30_MOCK_UTMI_CBCR				0x0F010
+#define GCC_USB_30_BCR						0x0F000
+#define GCC_USB3_PHY_AUX_CBCR					0x50000
+#define GCC_USB3_PHY_PIPE_CBCR					0x50004
+#define GCC_USB3PHY_PHY_BCR					0x50024
+#define GCC_APCS_CLOCK_SLEEP_ENA_VOTE				0x52008
+#define GCC_MSS_CFG_AHB_CBCR					0x8A000
+#define GCC_MSS_Q6_BIMC_AXI_CBCR				0x8A040
+#define GCC_MSS_MNOC_BIMC_AXI_CBCR				0x8A004
+#define GCC_MSS_SNOC_AXI_CBCR					0x8A03C
+#define GCC_HMSS_GPLL0_CMD_RCGR					0x4805C
+#define GCC_MSS_CFG_AHB_CBCR					0x8A000
+#define GCC_MSS_Q6_BIMC_AXI_CBCR				0x8A040
+#define GCC_MSS_MNOC_BIMC_AXI_CBCR				0x8A004
+#define GCC_MSS_SNOC_AXI_CBCR					0x8A03C
+#define GCC_DCC_AHB_CBCR					0x84004
+#define GCC_HLOS1_VOTE_LPASS_CORE_SMMU_CBCR			0x7D010
+#define GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CBCR			0x7D014
+#define GCC_QSPI_AHB_CBCR					0x90004
+#define GCC_QSPI_REF_CBCR					0x90008
+#define GCC_MMSS_MISC						0x0902C
+#define GCC_GPU_MISC						0x71028
+
+#define GPUCC_GPU_PLL0_PLL_MODE					0x00000
+#define GPUCC_GPU_PLL0_USER_CTL_MODE				0x0000C
+#define GPUCC_GFX3D_CMD_RCGR					0x01070
+#define GPUCC_RBBMTIMER_CMD_RCGR				0x010B0
+#define GPUCC_GFX3D_ISENSE_CMD_RCGR				0x01100
+#define GPUCC_RBCPR_CMD_RCGR					0x01030
+#define GPUCC_GFX3D_CBCR					0x01098
+#define GPUCC_RBBMTIMER_CBCR					0x010D0
+#define GPUCC_GFX3D_ISENSE_CBCR					0x01124
+#define GPUCC_CXO_CBCR						0x01020
+#define GPUCC_RBCPR_CBCR					0x01054
+#define GPU_GX_BCR						0x01090
+#define GPUCC_GX_DOMAIN_MISC					0x00130
+#define GPUCC_GPU_DD_WRAP_CTRL					0x00430
+#define GPUCC_DEBUG_CLK_CTL					0x00120
+
+#define MMSS_PLL_VOTE_APCS					0x001E0
+#define MMSS_MMPLL0_PLL_MODE					0x0C000
+#define MMSS_MMPLL1_PLL_MODE					0x0C050
+#define MMSS_MMPLL3_PLL_MODE					0x00000
+#define MMSS_MMPLL4_PLL_MODE					0x00050
+#define MMSS_MMPLL5_PLL_MODE					0x000A0
+#define MMSS_MMPLL6_PLL_MODE					0x000F0
+#define MMSS_MMPLL7_PLL_MODE					0x00140
+#define MMSS_MMPLL10_PLL_MODE					0x00190
+#define MMSS_AHB_CMD_RCGR					0x05000
+#define MMSS_CSI0_CMD_RCGR					0x03090
+#define MMSS_VFE0_CMD_RCGR					0x03600
+#define MMSS_VFE1_CMD_RCGR					0x03620
+#define MMSS_MDP_CMD_RCGR					0x02040
+#define MMSS_MAXI_CMD_RCGR					0x0F020
+#define MMSS_CPP_CMD_RCGR					0x03640
+#define MMSS_JPEG0_CMD_RCGR					0x03500
+#define MMSS_ROT_CMD_RCGR					0x021A0
+#define MMSS_VIDEO_CORE_CMD_RCGR				0x01000
+#define MMSS_CSI1_CMD_RCGR					0x03100
+#define MMSS_CSI2_CMD_RCGR					0x03160
+#define MMSS_CSI3_CMD_RCGR					0x031C0
+#define MMSS_FD_CORE_CMD_RCGR					0x03B00
+#define MMSS_BYTE0_CMD_RCGR					0x02120
+#define MMSS_BYTE1_CMD_RCGR					0x02140
+#define MMSS_PCLK0_CMD_RCGR					0x02000
+#define MMSS_PCLK1_CMD_RCGR					0x02020
+#define MMSS_VIDEO_SUBCORE0_CMD_RCGR				0x01060
+#define MMSS_VIDEO_SUBCORE1_CMD_RCGR				0x01080
+#define MMSS_CSIPHY_CMD_RCGR					0x03800
+#define MMSS_CCI_CMD_RCGR					0x03300
+#define MMSS_CAMSS_GP0_CMD_RCGR					0x03420
+#define MMSS_CAMSS_GP1_CMD_RCGR					0x03450
+#define MMSS_MCLK0_CMD_RCGR					0x03360
+#define MMSS_MCLK1_CMD_RCGR					0x03390
+#define MMSS_MCLK2_CMD_RCGR					0x033C0
+#define MMSS_MCLK3_CMD_RCGR					0x033F0
+#define MMSS_CAMSS_CSI2PHYTIMER_CBCR				0x03084
+#define MMSS_CSI0PHYTIMER_CMD_RCGR				0x03000
+#define MMSS_CSI1PHYTIMER_CMD_RCGR				0x03030
+#define MMSS_CSI2PHYTIMER_CMD_RCGR				0x03060
+#define MMSS_DP_GTC_CMD_RCGR					0x02280
+#define MMSS_ESC0_CMD_RCGR					0x02160
+#define MMSS_ESC1_CMD_RCGR					0x02180
+#define MMSS_EXTPCLK_CMD_RCGR					0x02060
+#define MMSS_HDMI_CMD_RCGR					0x02100
+#define MMSS_VSYNC_CMD_RCGR					0x02080
+#define MMSS_BIMC_SMMU_AHB_CBCR					0x0E004
+#define MMSS_BIMC_SMMU_AXI_CBCR					0x0E008
+#define MMSS_SNOC_DVM_AXI_CBCR					0x0E040
+#define MMSS_CAMSS_AHB_CBCR					0x0348C
+#define MMSS_CAMSS_CCI_AHB_CBCR					0x03348
+#define MMSS_CAMSS_CCI_CBCR					0x03344
+#define MMSS_CAMSS_CPP_AHB_CBCR					0x036B4
+#define MMSS_CAMSS_CPP_CBCR					0x036B0
+#define MMSS_CAMSS_CPP_AXI_CBCR					0x036C4
+#define MMSS_CAMSS_CPP_VBIF_AHB_CBCR				0x036C8
+#define MMSS_CAMSS_CPHY_CSID0_CBCR				0x03730
+#define MMSS_CAMSS_CSI0_AHB_CBCR				0x030BC
+#define MMSS_CAMSS_CSI0_CBCR					0x030B4
+#define MMSS_CAMSS_CSI0PIX_CBCR					0x030E4
+#define MMSS_CAMSS_CSI0RDI_CBCR					0x030D4
+#define MMSS_CAMSS_CPHY_CSID1_CBCR				0x03734
+#define MMSS_CAMSS_CSI1_AHB_CBCR				0x03128
+#define MMSS_CAMSS_CSI1_CBCR					0x03124
+#define MMSS_CAMSS_CSI1PIX_CBCR					0x03154
+#define MMSS_CAMSS_CSI1RDI_CBCR					0x03144
+#define MMSS_CAMSS_CPHY_CSID2_CBCR				0x03738
+#define MMSS_CAMSS_CSI2_AHB_CBCR				0x03188
+#define MMSS_CAMSS_CSI2_CBCR					0x03184
+#define MMSS_CAMSS_CSI2PIX_CBCR					0x031B4
+#define MMSS_CAMSS_CSI2RDI_CBCR					0x031A4
+#define MMSS_CAMSS_CPHY_CSID3_CBCR				0x0373C
+#define MMSS_CAMSS_CSI3_AHB_CBCR				0x031E8
+#define MMSS_CAMSS_CSI3_CBCR					0x031E4
+#define MMSS_CAMSS_CSI3PIX_CBCR					0x03214
+#define MMSS_CAMSS_CSI3RDI_CBCR					0x03204
+#define MMSS_CAMSS_CSI_VFE0_CBCR				0x03704
+#define MMSS_CAMSS_CSI_VFE1_CBCR				0x03714
+#define MMSS_CAMSS_CSIPHY0_CBCR					0x03740
+#define MMSS_CAMSS_CSIPHY1_CBCR					0x03744
+#define MMSS_CAMSS_CSIPHY2_CBCR					0x03748
+#define MMSS_FD_AHB_CBCR					0x03B74
+#define MMSS_FD_CORE_CBCR					0x03B68
+#define MMSS_FD_CORE_UAR_CBCR					0x03B6C
+#define MMSS_CAMSS_GP0_CBCR					0x03444
+#define MMSS_CAMSS_GP1_CBCR					0x03474
+#define MMSS_CAMSS_ISPIF_AHB_CBCR				0x03224
+#define MMSS_CAMSS_JPEG0_CBCR					0x035A8
+#define MMSS_CAMSS_JPEG_AHB_CBCR				0x035B4
+#define MMSS_CAMSS_JPEG_AXI_CBCR				0x035B8
+#define MMSS_CAMSS_MCLK0_CBCR					0x03384
+#define MMSS_CAMSS_MCLK1_CBCR					0x033B4
+#define MMSS_CAMSS_MCLK2_CBCR					0x033E4
+#define MMSS_CAMSS_MCLK3_CBCR					0x03414
+#define MMSS_CAMSS_MICRO_AHB_CBCR				0x03494
+#define MMSS_CAMSS_CSI0PHYTIMER_CBCR				0x03024
+#define MMSS_CAMSS_CSI1PHYTIMER_CBCR				0x03054
+#define MMSS_CSI2PHYTIMER_CMD_RCGR				0x03060
+#define MMSS_CAMSS_TOP_AHB_CBCR					0x03484
+#define MMSS_CAMSS_VFE0_AHB_CBCR				0x03668
+#define MMSS_CAMSS_VFE0_CBCR					0x036A8
+#define MMSS_CAMSS_VFE0_STREAM_CBCR				0x03720
+#define MMSS_CAMSS_VFE1_AHB_CBCR				0x03678
+#define MMSS_CAMSS_VFE1_CBCR					0x036AC
+#define MMSS_CAMSS_VFE1_STREAM_CBCR				0x03724
+#define MMSS_CAMSS_VFE_VBIF_AHB_CBCR				0x036B8
+#define MMSS_CAMSS_VFE_VBIF_AXI_CBCR				0x036BC
+#define MMSS_MDSS_AHB_CBCR					0x02308
+#define MMSS_MDSS_AXI_CBCR					0x02310
+#define MMSS_MDSS_BYTE0_CBCR					0x0233C
+#define MMSS_MDSS_BYTE0_INTF_CBCR				0x02374
+#define MMSS_MDSS_BYTE0_INTF_DIV				0x0237C
+#define MMSS_MDSS_BYTE1_CBCR					0x02340
+#define MMSS_MDSS_BYTE1_INTF_CBCR				0x02378
+#define MMSS_MDSS_BYTE1_INTF_DIV				0x02380
+#define MMSS_MDSS_DP_AUX_CBCR					0x02364
+#define MMSS_MDSS_DP_CRYPTO_CBCR				0x0235C
+#define MMSS_MDSS_DP_GTC_CBCR					0x02368
+#define MMSS_MDSS_DP_LINK_CBCR					0x02354
+#define MMSS_MDSS_DP_LINK_INTF_CBCR				0x02358
+#define MMSS_MDSS_DP_PIXEL_CBCR					0x02360
+#define MMSS_MDSS_ESC0_CBCR					0x02344
+#define MMSS_MDSS_ESC1_CBCR					0x02348
+#define MMSS_MDSS_EXTPCLK_CBCR					0x02324
+#define MMSS_MDSS_HDMI_CBCR					0x02338
+#define MMSS_MDSS_HDMI_DP_AHB_CBCR				0x0230C
+#define MMSS_MDSS_MDP_CBCR					0x0231C
+#define MMSS_MDSS_MDP_LUT_CBCR					0x02320
+#define MMSS_MDSS_PCLK0_CBCR					0x02314
+#define MMSS_MDSS_PCLK1_CBCR					0x02318
+#define MMSS_MDSS_ROT_CBCR					0x02350
+#define MMSS_MDSS_VSYNC_CBCR					0x02328
+#define MMSS_MISC_AHB_CBCR					0x00328
+#define MMSS_MISC_CXO_CBCR					0x00324
+#define MMSS_MNOC_AHB_CBCR					0x05024
+#define MMSS_MNOC_MAXI_CBCR					0x0F004
+#define MMSS_VIDEO_SUBCORE0_CBCR				0x01048
+#define MMSS_VIDEO_SUBCORE1_CBCR				0x0104C
+#define MMSS_VIDEO_AHB_CBCR					0x01030
+#define MMSS_VIDEO_AXI_CBCR					0x01034
+#define MMSS_VIDEO_CORE_CBCR					0x01028
+#define MMSS_VIDEO_MAXI_CBCR					0x01038
+#define MMSS_VMEM_AHB_CBCR					0x0F068
+#define MMSS_VMEM_MAXI_CBCR					0x0F064
+#define MMSS_DP_AUX_CMD_RCGR					0x02260
+#define MMSS_DP_CRYPTO_CMD_RCGR					0x02220
+#define MMSS_DP_LINK_CMD_RCGR					0x02200
+#define MMSS_DP_PIXEL_CMD_RCGR					0x02240
+#define MMSS_DEBUG_CLK_CTL					0x00900
diff -Nruw linux-4.4.115-fbx/arch/cris/boot/dts/include/dt-bindings/msm./msm-bus-ids.h linux-4.4.115-fbx/arch/cris/boot/dts/include/dt-bindings/msm/msm-bus-ids.h
--- linux-4.4.115-fbx/arch/cris/boot/dts/include/dt-bindings/msm./msm-bus-ids.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/cris/boot/dts/include/dt-bindings/msm/msm-bus-ids.h	2019-01-22 16:16:28.179288750 +0100
@@ -0,0 +1,887 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_BUS_IDS_H
+#define __MSM_BUS_IDS_H
+
+/* Aggregation types */
+#define AGG_SCHEME_NONE	0
+#define AGG_SCHEME_LEG	1
+#define AGG_SCHEME_1	2
+
+/* Topology related enums */
+#define	MSM_BUS_FAB_DEFAULT 0
+#define	MSM_BUS_FAB_APPSS 0
+#define	MSM_BUS_FAB_SYSTEM 1024
+#define	MSM_BUS_FAB_MMSS 2048
+#define	MSM_BUS_FAB_SYSTEM_FPB 3072
+#define	MSM_BUS_FAB_CPSS_FPB 4096
+
+#define	MSM_BUS_FAB_BIMC 0
+#define	MSM_BUS_FAB_SYS_NOC 1024
+#define	MSM_BUS_FAB_MMSS_NOC 2048
+#define	MSM_BUS_FAB_OCMEM_NOC 3072
+#define	MSM_BUS_FAB_PERIPH_NOC 4096
+#define	MSM_BUS_FAB_CONFIG_NOC 5120
+#define	MSM_BUS_FAB_OCMEM_VNOC 6144
+#define	MSM_BUS_FAB_MMSS_AHB 2049
+#define	MSM_BUS_FAB_A0_NOC 6145
+#define	MSM_BUS_FAB_A1_NOC 6146
+#define	MSM_BUS_FAB_A2_NOC 6147
+#define	MSM_BUS_FAB_GNOC 6148
+#define	MSM_BUS_FAB_CR_VIRT 6149
+
+#define	MSM_BUS_MASTER_FIRST 1
+#define	MSM_BUS_MASTER_AMPSS_M0 1
+#define	MSM_BUS_MASTER_AMPSS_M1 2
+#define	MSM_BUS_APPSS_MASTER_FAB_MMSS 3
+#define	MSM_BUS_APPSS_MASTER_FAB_SYSTEM 4
+#define	MSM_BUS_SYSTEM_MASTER_FAB_APPSS 5
+#define	MSM_BUS_MASTER_SPS 6
+#define	MSM_BUS_MASTER_ADM_PORT0 7
+#define	MSM_BUS_MASTER_ADM_PORT1 8
+#define	MSM_BUS_SYSTEM_MASTER_ADM1_PORT0 9
+#define	MSM_BUS_MASTER_ADM1_PORT1 10
+#define	MSM_BUS_MASTER_LPASS_PROC 11
+#define	MSM_BUS_MASTER_MSS_PROCI 12
+#define	MSM_BUS_MASTER_MSS_PROCD 13
+#define	MSM_BUS_MASTER_MSS_MDM_PORT0 14
+#define	MSM_BUS_MASTER_LPASS 15
+#define	MSM_BUS_SYSTEM_MASTER_CPSS_FPB 16
+#define	MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB 17
+#define	MSM_BUS_SYSTEM_MASTER_MMSS_FPB 18
+#define	MSM_BUS_MASTER_ADM1_CI 19
+#define	MSM_BUS_MASTER_ADM0_CI 20
+#define	MSM_BUS_MASTER_MSS_MDM_PORT1 21
+#define	MSM_BUS_MASTER_MDP_PORT0 22
+#define	MSM_BUS_MASTER_MDP_PORT1 23
+#define	MSM_BUS_MMSS_MASTER_ADM1_PORT0 24
+#define	MSM_BUS_MASTER_ROTATOR 25
+#define	MSM_BUS_MASTER_GRAPHICS_3D 26
+#define	MSM_BUS_MASTER_JPEG_DEC 27
+#define	MSM_BUS_MASTER_GRAPHICS_2D_CORE0 28
+#define	MSM_BUS_MASTER_VFE 29
+#define	MSM_BUS_MASTER_VFE0 MSM_BUS_MASTER_VFE
+#define	MSM_BUS_MASTER_VPE 30
+#define	MSM_BUS_MASTER_JPEG_ENC 31
+#define	MSM_BUS_MASTER_GRAPHICS_2D_CORE1 32
+#define	MSM_BUS_MMSS_MASTER_APPS_FAB 33
+#define	MSM_BUS_MASTER_HD_CODEC_PORT0 34
+#define	MSM_BUS_MASTER_HD_CODEC_PORT1 35
+#define	MSM_BUS_MASTER_SPDM 36
+#define	MSM_BUS_MASTER_RPM 37
+#define	MSM_BUS_MASTER_MSS 38
+#define	MSM_BUS_MASTER_RIVA 39
+#define	MSM_BUS_MASTER_SNOC_VMEM 40
+#define	MSM_BUS_MASTER_MSS_SW_PROC 41
+#define	MSM_BUS_MASTER_MSS_FW_PROC 42
+#define	MSM_BUS_MASTER_HMSS 43
+#define	MSM_BUS_MASTER_GSS_NAV 44
+#define	MSM_BUS_MASTER_PCIE 45
+#define	MSM_BUS_MASTER_SATA 46
+#define	MSM_BUS_MASTER_CRYPTO 47
+#define	MSM_BUS_MASTER_VIDEO_CAP 48
+#define	MSM_BUS_MASTER_GRAPHICS_3D_PORT1 49
+#define	MSM_BUS_MASTER_VIDEO_ENC 50
+#define	MSM_BUS_MASTER_VIDEO_DEC 51
+#define	MSM_BUS_MASTER_LPASS_AHB 52
+#define	MSM_BUS_MASTER_QDSS_BAM 53
+#define	MSM_BUS_MASTER_SNOC_CFG 54
+#define	MSM_BUS_MASTER_CRYPTO_CORE0 55
+#define	MSM_BUS_MASTER_CRYPTO_CORE1 56
+#define	MSM_BUS_MASTER_MSS_NAV 57
+#define	MSM_BUS_MASTER_OCMEM_DMA 58
+#define	MSM_BUS_MASTER_WCSS 59
+#define	MSM_BUS_MASTER_QDSS_ETR 60
+#define	MSM_BUS_MASTER_USB3 61
+#define	MSM_BUS_MASTER_JPEG 62
+#define	MSM_BUS_MASTER_VIDEO_P0 63
+#define	MSM_BUS_MASTER_VIDEO_P1 64
+#define	MSM_BUS_MASTER_MSS_PROC 65
+#define	MSM_BUS_MASTER_JPEG_OCMEM 66
+#define	MSM_BUS_MASTER_MDP_OCMEM 67
+#define	MSM_BUS_MASTER_VIDEO_P0_OCMEM 68
+#define	MSM_BUS_MASTER_VIDEO_P1_OCMEM 69
+#define	MSM_BUS_MASTER_VFE_OCMEM 70
+#define	MSM_BUS_MASTER_CNOC_ONOC_CFG 71
+#define	MSM_BUS_MASTER_RPM_INST 72
+#define	MSM_BUS_MASTER_RPM_DATA 73
+#define	MSM_BUS_MASTER_RPM_SYS 74
+#define	MSM_BUS_MASTER_DEHR 75
+#define	MSM_BUS_MASTER_QDSS_DAP 76
+#define	MSM_BUS_MASTER_TIC 77
+#define	MSM_BUS_MASTER_SDCC_1 78
+#define	MSM_BUS_MASTER_SDCC_3 79
+#define	MSM_BUS_MASTER_SDCC_4 80
+#define	MSM_BUS_MASTER_SDCC_2 81
+#define	MSM_BUS_MASTER_TSIF 82
+#define	MSM_BUS_MASTER_BAM_DMA 83
+#define	MSM_BUS_MASTER_BLSP_2 84
+#define	MSM_BUS_MASTER_USB_HSIC 85
+#define	MSM_BUS_MASTER_BLSP_1 86
+#define	MSM_BUS_MASTER_USB_HS 87
+#define	MSM_BUS_MASTER_PNOC_CFG 88
+#define	MSM_BUS_MASTER_V_OCMEM_GFX3D 89
+#define	MSM_BUS_MASTER_IPA 90
+#define	MSM_BUS_MASTER_QPIC 91
+#define	MSM_BUS_MASTER_MDPE 92
+#define	MSM_BUS_MASTER_USB_HS2 93
+#define	MSM_BUS_MASTER_VPU 94
+#define	MSM_BUS_MASTER_UFS 95
+#define	MSM_BUS_MASTER_BCAST 96
+#define	MSM_BUS_MASTER_CRYPTO_CORE2 97
+#define	MSM_BUS_MASTER_EMAC 98
+#define	MSM_BUS_MASTER_VPU_1 99
+#define	MSM_BUS_MASTER_PCIE_1 100
+#define	MSM_BUS_MASTER_USB3_1 101
+#define	MSM_BUS_MASTER_CNOC_MNOC_MMSS_CFG 102
+#define	MSM_BUS_MASTER_CNOC_MNOC_CFG 103
+#define	MSM_BUS_MASTER_TCU_0 104
+#define	MSM_BUS_MASTER_TCU_1 105
+#define	MSM_BUS_MASTER_CPP 106
+#define	MSM_BUS_MASTER_AUDIO 107
+#define	MSM_BUS_MASTER_PCIE_2 108
+#define	MSM_BUS_MASTER_VFE1 109
+#define	MSM_BUS_MASTER_XM_USB_HS1 110
+#define	MSM_BUS_MASTER_PCNOC_BIMC_1 111
+#define	MSM_BUS_MASTER_BIMC_PCNOC   112
+#define	MSM_BUS_MASTER_XI_USB_HSIC  113
+#define	MSM_BUS_MASTER_SGMII	    114
+#define	MSM_BUS_SPMI_FETCHER 115
+#define	MSM_BUS_MASTER_GNOC_BIMC 116
+#define	MSM_BUS_MASTER_CRVIRT_A2NOC 117
+#define	MSM_BUS_MASTER_CNOC_A2NOC 118
+#define	MSM_BUS_MASTER_WLAN 119
+#define	MSM_BUS_MASTER_MSS_CE 120
+#define	MSM_BUS_MASTER_CDSP_PROC 121
+#define	MSM_BUS_MASTER_GNOC_SNOC 122
+#define	MSM_BUS_MASTER_PIMEM 123
+#define	MSM_BUS_MASTER_MASTER_LAST 124
+
+#define	MSM_BUS_SYSTEM_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB
+#define	MSM_BUS_CPSS_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_CPSS_FPB
+
+#define	MSM_BUS_SNOC_MM_INT_0 10000
+#define	MSM_BUS_SNOC_MM_INT_1 10001
+#define	MSM_BUS_SNOC_MM_INT_2 10002
+#define	MSM_BUS_SNOC_MM_INT_BIMC 10003
+#define	MSM_BUS_SNOC_INT_0 10004
+#define	MSM_BUS_SNOC_INT_1 10005
+#define	MSM_BUS_SNOC_INT_BIMC 10006
+#define	MSM_BUS_SNOC_BIMC_0_MAS 10007
+#define	MSM_BUS_SNOC_BIMC_1_MAS 10008
+#define	MSM_BUS_SNOC_QDSS_INT 10009
+#define	MSM_BUS_PNOC_SNOC_MAS 10010
+#define	MSM_BUS_PNOC_SNOC_SLV 10011
+#define	MSM_BUS_PNOC_INT_0 10012
+#define	MSM_BUS_PNOC_INT_1 10013
+#define	MSM_BUS_PNOC_M_0 10014
+#define	MSM_BUS_PNOC_M_1 10015
+#define	MSM_BUS_BIMC_SNOC_MAS 10016
+#define	MSM_BUS_BIMC_SNOC_SLV 10017
+#define	MSM_BUS_PNOC_SLV_0 10018
+#define	MSM_BUS_PNOC_SLV_1 10019
+#define	MSM_BUS_PNOC_SLV_2 10020
+#define	MSM_BUS_PNOC_SLV_3 10021
+#define	MSM_BUS_PNOC_SLV_4 10022
+#define	MSM_BUS_PNOC_SLV_8 10023
+#define	MSM_BUS_PNOC_SLV_9 10024
+#define	MSM_BUS_SNOC_BIMC_0_SLV 10025
+#define	MSM_BUS_SNOC_BIMC_1_SLV 10026
+#define	MSM_BUS_MNOC_BIMC_MAS 10027
+#define	MSM_BUS_MNOC_BIMC_SLV 10028
+#define	MSM_BUS_BIMC_MNOC_MAS 10029
+#define	MSM_BUS_BIMC_MNOC_SLV 10030
+#define	MSM_BUS_SNOC_BIMC_MAS 10031
+#define	MSM_BUS_SNOC_BIMC_SLV 10032
+#define	MSM_BUS_CNOC_SNOC_MAS 10033
+#define	MSM_BUS_CNOC_SNOC_SLV 10034
+#define	MSM_BUS_SNOC_CNOC_MAS 10035
+#define	MSM_BUS_SNOC_CNOC_SLV 10036
+#define	MSM_BUS_OVNOC_SNOC_MAS 10037
+#define	MSM_BUS_OVNOC_SNOC_SLV 10038
+#define	MSM_BUS_SNOC_OVNOC_MAS 10039
+#define	MSM_BUS_SNOC_OVNOC_SLV 10040
+#define	MSM_BUS_SNOC_PNOC_MAS 10041
+#define	MSM_BUS_SNOC_PNOC_SLV 10042
+#define	MSM_BUS_BIMC_INT_APPS_EBI 10043
+#define	MSM_BUS_BIMC_INT_APPS_SNOC 10044
+#define	MSM_BUS_SNOC_BIMC_2_MAS 10045
+#define	MSM_BUS_SNOC_BIMC_2_SLV 10046
+#define	MSM_BUS_PNOC_SLV_5	10047
+#define	MSM_BUS_PNOC_SLV_7	10048
+#define	MSM_BUS_PNOC_INT_2 10049
+#define	MSM_BUS_PNOC_INT_3 10050
+#define	MSM_BUS_PNOC_INT_4 10051
+#define	MSM_BUS_PNOC_INT_5 10052
+#define	MSM_BUS_PNOC_INT_6 10053
+#define	MSM_BUS_PNOC_INT_7 10054
+#define	MSM_BUS_BIMC_SNOC_1_MAS 10055
+#define	MSM_BUS_BIMC_SNOC_1_SLV 10056
+#define	MSM_BUS_PNOC_A1NOC_MAS 10057
+#define	MSM_BUS_PNOC_A1NOC_SLV 10058
+#define	MSM_BUS_CNOC_A1NOC_MAS 10059
+#define	MSM_BUS_A0NOC_SNOC_MAS 10060
+#define	MSM_BUS_A0NOC_SNOC_SLV 10061
+#define	MSM_BUS_A1NOC_SNOC_SLV 10062
+#define	MSM_BUS_A1NOC_SNOC_MAS 10063
+#define	MSM_BUS_A2NOC_SNOC_MAS 10064
+#define	MSM_BUS_A2NOC_SNOC_SLV 10065
+#define	MSM_BUS_SNOC_INT_2 10066
+#define	MSM_BUS_A0NOC_QDSS_INT	10067
+#define	MSM_BUS_INT_LAST 10068
+
+#define	MSM_BUS_INT_TEST_ID	20000
+#define	MSM_BUS_INT_TEST_LAST	20050
+
+#define	MSM_BUS_SLAVE_FIRST 512
+#define	MSM_BUS_SLAVE_EBI_CH0 512
+#define	MSM_BUS_SLAVE_EBI_CH1 513
+#define	MSM_BUS_SLAVE_AMPSS_L2 514
+#define	MSM_BUS_APPSS_SLAVE_FAB_MMSS 515
+#define	MSM_BUS_APPSS_SLAVE_FAB_SYSTEM 516
+#define	MSM_BUS_SYSTEM_SLAVE_FAB_APPS 517
+#define	MSM_BUS_SLAVE_SPS 518
+#define	MSM_BUS_SLAVE_SYSTEM_IMEM 519
+#define	MSM_BUS_SLAVE_AMPSS 520
+#define	MSM_BUS_SLAVE_MSS 521
+#define	MSM_BUS_SLAVE_LPASS 522
+#define	MSM_BUS_SYSTEM_SLAVE_CPSS_FPB 523
+#define	MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB 524
+#define	MSM_BUS_SYSTEM_SLAVE_MMSS_FPB 525
+#define	MSM_BUS_SLAVE_CORESIGHT 526
+#define	MSM_BUS_SLAVE_RIVA 527
+#define	MSM_BUS_SLAVE_SMI 528
+#define	MSM_BUS_MMSS_SLAVE_FAB_APPS 529
+#define	MSM_BUS_MMSS_SLAVE_FAB_APPS_1 530
+#define	MSM_BUS_SLAVE_MM_IMEM 531
+#define	MSM_BUS_SLAVE_CRYPTO 532
+#define	MSM_BUS_SLAVE_SPDM 533
+#define	MSM_BUS_SLAVE_RPM 534
+#define	MSM_BUS_SLAVE_RPM_MSG_RAM 535
+#define	MSM_BUS_SLAVE_MPM 536
+#define	MSM_BUS_SLAVE_PMIC1_SSBI1_A 537
+#define	MSM_BUS_SLAVE_PMIC1_SSBI1_B 538
+#define	MSM_BUS_SLAVE_PMIC1_SSBI1_C 539
+#define	MSM_BUS_SLAVE_PMIC2_SSBI2_A 540
+#define	MSM_BUS_SLAVE_PMIC2_SSBI2_B 541
+#define	MSM_BUS_SLAVE_GSBI1_UART 542
+#define	MSM_BUS_SLAVE_GSBI2_UART 543
+#define	MSM_BUS_SLAVE_GSBI3_UART 544
+#define	MSM_BUS_SLAVE_GSBI4_UART 545
+#define	MSM_BUS_SLAVE_GSBI5_UART 546
+#define	MSM_BUS_SLAVE_GSBI6_UART 547
+#define	MSM_BUS_SLAVE_GSBI7_UART 548
+#define	MSM_BUS_SLAVE_GSBI8_UART 549
+#define	MSM_BUS_SLAVE_GSBI9_UART 550
+#define	MSM_BUS_SLAVE_GSBI10_UART 551
+#define	MSM_BUS_SLAVE_GSBI11_UART 552
+#define	MSM_BUS_SLAVE_GSBI12_UART 553
+#define	MSM_BUS_SLAVE_GSBI1_QUP 554
+#define	MSM_BUS_SLAVE_GSBI2_QUP 555
+#define	MSM_BUS_SLAVE_GSBI3_QUP 556
+#define	MSM_BUS_SLAVE_GSBI4_QUP 557
+#define	MSM_BUS_SLAVE_GSBI5_QUP 558
+#define	MSM_BUS_SLAVE_GSBI6_QUP 559
+#define	MSM_BUS_SLAVE_GSBI7_QUP 560
+#define	MSM_BUS_SLAVE_GSBI8_QUP 561
+#define	MSM_BUS_SLAVE_GSBI9_QUP 562
+#define	MSM_BUS_SLAVE_GSBI10_QUP 563
+#define	MSM_BUS_SLAVE_GSBI11_QUP 564
+#define	MSM_BUS_SLAVE_GSBI12_QUP 565
+#define	MSM_BUS_SLAVE_EBI2_NAND 566
+#define	MSM_BUS_SLAVE_EBI2_CS0 567
+#define	MSM_BUS_SLAVE_EBI2_CS1 568
+#define	MSM_BUS_SLAVE_EBI2_CS2 569
+#define	MSM_BUS_SLAVE_EBI2_CS3 570
+#define	MSM_BUS_SLAVE_EBI2_CS4 571
+#define	MSM_BUS_SLAVE_EBI2_CS5 572
+#define	MSM_BUS_SLAVE_USB_FS1 573
+#define	MSM_BUS_SLAVE_USB_FS2 574
+#define	MSM_BUS_SLAVE_TSIF 575
+#define	MSM_BUS_SLAVE_MSM_TSSC 576
+#define	MSM_BUS_SLAVE_MSM_PDM 577
+#define	MSM_BUS_SLAVE_MSM_DIMEM 578
+#define	MSM_BUS_SLAVE_MSM_TCSR 579
+#define	MSM_BUS_SLAVE_MSM_PRNG 580
+#define	MSM_BUS_SLAVE_GSS 581
+#define	MSM_BUS_SLAVE_SATA 582
+#define	MSM_BUS_SLAVE_USB3 583
+#define	MSM_BUS_SLAVE_WCSS 584
+#define	MSM_BUS_SLAVE_OCIMEM 585
+#define	MSM_BUS_SLAVE_SNOC_OCMEM 586
+#define	MSM_BUS_SLAVE_SERVICE_SNOC 587
+#define	MSM_BUS_SLAVE_QDSS_STM 588
+#define	MSM_BUS_SLAVE_CAMERA_CFG 589
+#define	MSM_BUS_SLAVE_DISPLAY_CFG 590
+#define	MSM_BUS_SLAVE_OCMEM_CFG 591
+#define	MSM_BUS_SLAVE_CPR_CFG 592
+#define	MSM_BUS_SLAVE_CPR_XPU_CFG 593
+#define	MSM_BUS_SLAVE_MISC_CFG 594
+#define	MSM_BUS_SLAVE_MISC_XPU_CFG 595
+#define	MSM_BUS_SLAVE_VENUS_CFG 596
+#define	MSM_BUS_SLAVE_MISC_VENUS_CFG 597
+#define	MSM_BUS_SLAVE_GRAPHICS_3D_CFG 598
+#define	MSM_BUS_SLAVE_MMSS_CLK_CFG 599
+#define	MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG 600
+#define	MSM_BUS_SLAVE_MNOC_MPU_CFG 601
+#define	MSM_BUS_SLAVE_ONOC_MPU_CFG 602
+#define	MSM_BUS_SLAVE_SERVICE_MNOC 603
+#define	MSM_BUS_SLAVE_OCMEM 604
+#define	MSM_BUS_SLAVE_SERVICE_ONOC 605
+#define	MSM_BUS_SLAVE_SDCC_1 606
+#define	MSM_BUS_SLAVE_SDCC_3 607
+#define	MSM_BUS_SLAVE_SDCC_2 608
+#define	MSM_BUS_SLAVE_SDCC_4 609
+#define	MSM_BUS_SLAVE_BAM_DMA 610
+#define	MSM_BUS_SLAVE_BLSP_2 611
+#define	MSM_BUS_SLAVE_USB_HSIC 612
+#define	MSM_BUS_SLAVE_BLSP_1 613
+#define	MSM_BUS_SLAVE_USB_HS 614
+#define	MSM_BUS_SLAVE_PDM 615
+#define	MSM_BUS_SLAVE_PERIPH_APU_CFG 616
+#define	MSM_BUS_SLAVE_PNOC_MPU_CFG 617
+#define	MSM_BUS_SLAVE_PRNG 618
+#define	MSM_BUS_SLAVE_SERVICE_PNOC 619
+#define	MSM_BUS_SLAVE_CLK_CTL 620
+#define	MSM_BUS_SLAVE_CNOC_MSS 621
+#define	MSM_BUS_SLAVE_SECURITY 622
+#define	MSM_BUS_SLAVE_TCSR 623
+#define	MSM_BUS_SLAVE_TLMM 624
+#define	MSM_BUS_SLAVE_CRYPTO_0_CFG 625
+#define	MSM_BUS_SLAVE_CRYPTO_1_CFG 626
+#define	MSM_BUS_SLAVE_IMEM_CFG 627
+#define	MSM_BUS_SLAVE_MESSAGE_RAM 628
+#define	MSM_BUS_SLAVE_BIMC_CFG 629
+#define	MSM_BUS_SLAVE_BOOT_ROM 630
+#define	MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG 631
+#define	MSM_BUS_SLAVE_PMIC_ARB 632
+#define	MSM_BUS_SLAVE_SPDM_WRAPPER 633
+#define	MSM_BUS_SLAVE_DEHR_CFG 634
+#define	MSM_BUS_SLAVE_QDSS_CFG 635
+#define	MSM_BUS_SLAVE_RBCPR_CFG 636
+#define	MSM_BUS_SLAVE_RBCPR_QDSS_APU_CFG 637
+#define	MSM_BUS_SLAVE_SNOC_MPU_CFG 638
+#define	MSM_BUS_SLAVE_CNOC_ONOC_CFG 639
+#define	MSM_BUS_SLAVE_CNOC_MNOC_CFG 640
+#define	MSM_BUS_SLAVE_PNOC_CFG 641
+#define	MSM_BUS_SLAVE_SNOC_CFG 642
+#define	MSM_BUS_SLAVE_EBI1_DLL_CFG 643
+#define	MSM_BUS_SLAVE_PHY_APU_CFG 644
+#define	MSM_BUS_SLAVE_EBI1_PHY_CFG 645
+#define	MSM_BUS_SLAVE_SERVICE_CNOC 646
+#define	MSM_BUS_SLAVE_IPS_CFG 647
+#define	MSM_BUS_SLAVE_QPIC 648
+#define	MSM_BUS_SLAVE_DSI_CFG 649
+#define	MSM_BUS_SLAVE_UFS_CFG 650
+#define	MSM_BUS_SLAVE_RBCPR_CX_CFG 651
+#define	MSM_BUS_SLAVE_RBCPR_MX_CFG 652
+#define	MSM_BUS_SLAVE_PCIE_CFG 653
+#define	MSM_BUS_SLAVE_USB_PHYS_CFG 654
+#define	MSM_BUS_SLAVE_VIDEO_CAP_CFG 655
+#define	MSM_BUS_SLAVE_AVSYNC_CFG 656
+#define	MSM_BUS_SLAVE_CRYPTO_2_CFG 657
+#define	MSM_BUS_SLAVE_VPU_CFG 658
+#define	MSM_BUS_SLAVE_BCAST_CFG 659
+#define	MSM_BUS_SLAVE_KLM_CFG 660
+#define	MSM_BUS_SLAVE_GENI_IR_CFG 661
+#define	MSM_BUS_SLAVE_OCMEM_GFX 662
+#define	MSM_BUS_SLAVE_CATS_128 663
+#define	MSM_BUS_SLAVE_OCMEM_64 664
+#define	MSM_BUS_SLAVE_PCIE_0 665
+#define	MSM_BUS_SLAVE_PCIE_1 666
+#define	MSM_BUS_SLAVE_PCIE_0_CFG 667
+#define	MSM_BUS_SLAVE_PCIE_1_CFG 668
+#define	MSM_BUS_SLAVE_SRVC_MNOC 669
+#define	MSM_BUS_SLAVE_USB_HS2 670
+#define	MSM_BUS_SLAVE_AUDIO 671
+#define	MSM_BUS_SLAVE_TCU 672
+#define	MSM_BUS_SLAVE_APPSS 673
+#define	MSM_BUS_SLAVE_PCIE_PARF 674
+#define	MSM_BUS_SLAVE_USB3_PHY_CFG 675
+#define	MSM_BUS_SLAVE_IPA_CFG 676
+#define	MSM_BUS_SLAVE_A0NOC_SNOC 677
+#define	MSM_BUS_SLAVE_A1NOC_SNOC 678
+#define	MSM_BUS_SLAVE_A2NOC_SNOC 679
+#define	MSM_BUS_SLAVE_HMSS_L3 680
+#define	MSM_BUS_SLAVE_PIMEM_CFG 681
+#define	MSM_BUS_SLAVE_DCC_CFG 682
+#define	MSM_BUS_SLAVE_QDSS_RBCPR_APU_CFG 683
+#define	MSM_BUS_SLAVE_PCIE_2_CFG 684
+#define	MSM_BUS_SLAVE_PCIE20_AHB2PHY 685
+#define	MSM_BUS_SLAVE_A0NOC_CFG 686
+#define	MSM_BUS_SLAVE_A1NOC_CFG 687
+#define	MSM_BUS_SLAVE_A2NOC_CFG 688
+#define	MSM_BUS_SLAVE_A1NOC_MPU_CFG 689
+#define	MSM_BUS_SLAVE_A2NOC_MPU_CFG 690
+#define	MSM_BUS_SLAVE_A0NOC_SMMU_CFG 691
+#define	MSM_BUS_SLAVE_A1NOC_SMMU_CFG 692
+#define	MSM_BUS_SLAVE_A2NOC_SMMU_CFG 693
+#define	MSM_BUS_SLAVE_LPASS_SMMU_CFG 694
+#define	MSM_BUS_SLAVE_MMAGIC_CFG 695
+#define	MSM_BUS_SLAVE_VENUS_THROTTLE_CFG 696
+#define	MSM_BUS_SLAVE_SSC_CFG 697
+#define	MSM_BUS_SLAVE_DSA_CFG 698
+#define	MSM_BUS_SLAVE_DSA_MPU_CFG 699
+#define	MSM_BUS_SLAVE_DISPLAY_THROTTLE_CFG 700
+#define	MSM_BUS_SLAVE_SMMU_CPP_CFG 701
+#define	MSM_BUS_SLAVE_SMMU_JPEG_CFG 702
+#define	MSM_BUS_SLAVE_SMMU_MDP_CFG 703
+#define	MSM_BUS_SLAVE_SMMU_ROTATOR_CFG 704
+#define	MSM_BUS_SLAVE_SMMU_VENUS_CFG 705
+#define	MSM_BUS_SLAVE_SMMU_VFE_CFG 706
+#define	MSM_BUS_SLAVE_A0NOC_MPU_CFG 707
+#define	MSM_BUS_SLAVE_VMEM_CFG 708
+#define	MSM_BUS_SLAVE_CAMERA_THROTTLE_CFG 709
+#define	MSM_BUS_SLAVE_VMEM 710
+#define	MSM_BUS_SLAVE_AHB2PHY 711
+#define	MSM_BUS_SLAVE_PIMEM 712
+#define	MSM_BUS_SLAVE_SNOC_VMEM 713
+#define	MSM_BUS_SLAVE_PCIE_2 714
+#define	MSM_BUS_SLAVE_RBCPR_MX 715
+#define	MSM_BUS_SLAVE_RBCPR_CX 716
+#define	MSM_BUS_SLAVE_BIMC_PCNOC 717
+#define	MSM_BUS_SLAVE_PCNOC_BIMC_1 718
+#define	MSM_BUS_SLAVE_SGMII 719
+#define	MSM_BUS_SLAVE_SPMI_FETCHER 720
+#define	MSM_BUS_PNOC_SLV_6 721
+#define	MSM_BUS_SLAVE_MMSS_SMMU_CFG 722
+#define	MSM_BUS_SLAVE_WLAN 723
+#define	MSM_BUS_SLAVE_CRVIRT_A2NOC 724
+#define	MSM_BUS_SLAVE_CNOC_A2NOC 725
+#define	MSM_BUS_SLAVE_GLM 726
+#define	MSM_BUS_SLAVE_GNOC_BIMC 727
+#define	MSM_BUS_SLAVE_GNOC_SNOC 728
+#define	MSM_BUS_SLAVE_QM_CFG 729
+#define	MSM_BUS_SLAVE_TLMM_EAST 730
+#define	MSM_BUS_SLAVE_TLMM_NORTH 731
+#define	MSM_BUS_SLAVE_TLMM_WEST 732
+#define	MSM_BUS_SLAVE_SKL 733
+#define	MSM_BUS_SLAVE_LPASS_TCM	734
+#define	MSM_BUS_SLAVE_TLMM_SOUTH 735
+#define	MSM_BUS_SLAVE_TLMM_CENTER 736
+#define	MSM_BUS_MSS_NAV_CE_MPU_CFG 737
+#define	MSM_BUS_SLAVE_A2NOC_THROTTLE_CFG 738
+#define	MSM_BUS_SLAVE_CDSP 739
+#define	MSM_BUS_SLAVE_CDSP_SMMU_CFG 740
+#define	MSM_BUS_SLAVE_LPASS_MPU_CFG 741
+#define	MSM_BUS_SLAVE_CSI_PHY_CFG 742
+#define	MSM_BUS_SLAVE_LAST 743
+
+#define	MSM_BUS_SYSTEM_FPB_SLAVE_SYSTEM  MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB
+#define	MSM_BUS_CPSS_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_CPSS_FPB
+
+/*
+ * ID's used in RPM messages
+ */
+#define	ICBID_MASTER_APPSS_PROC 0
+#define	ICBID_MASTER_MSS_PROC 1
+#define	ICBID_MASTER_MNOC_BIMC 2
+#define	ICBID_MASTER_SNOC_BIMC 3
+#define	ICBID_MASTER_SNOC_BIMC_0 ICBID_MASTER_SNOC_BIMC
+#define	ICBID_MASTER_CNOC_MNOC_MMSS_CFG 4
+#define	ICBID_MASTER_CNOC_MNOC_CFG 5
+#define	ICBID_MASTER_GFX3D 6
+#define	ICBID_MASTER_JPEG 7
+#define	ICBID_MASTER_MDP 8
+#define	ICBID_MASTER_MDP0 ICBID_MASTER_MDP
+#define	ICBID_MASTER_MDPS ICBID_MASTER_MDP
+#define	ICBID_MASTER_VIDEO 9
+#define	ICBID_MASTER_VIDEO_P0 ICBID_MASTER_VIDEO
+#define	ICBID_MASTER_VIDEO_P1 10
+#define	ICBID_MASTER_VFE 11
+#define	ICBID_MASTER_VFE0 ICBID_MASTER_VFE
+#define	ICBID_MASTER_CNOC_ONOC_CFG 12
+#define	ICBID_MASTER_JPEG_OCMEM 13
+#define	ICBID_MASTER_MDP_OCMEM 14
+#define	ICBID_MASTER_VIDEO_P0_OCMEM 15
+#define	ICBID_MASTER_VIDEO_P1_OCMEM 16
+#define	ICBID_MASTER_VFE_OCMEM 17
+#define	ICBID_MASTER_LPASS_AHB 18
+#define	ICBID_MASTER_QDSS_BAM 19
+#define	ICBID_MASTER_SNOC_CFG 20
+#define	ICBID_MASTER_BIMC_SNOC 21
+#define	ICBID_MASTER_BIMC_SNOC_0 ICBID_MASTER_BIMC_SNOC
+#define	ICBID_MASTER_CNOC_SNOC 22
+#define	ICBID_MASTER_CRYPTO 23
+#define	ICBID_MASTER_CRYPTO_CORE0 ICBID_MASTER_CRYPTO
+#define	ICBID_MASTER_CRYPTO_CORE1 24
+#define	ICBID_MASTER_LPASS_PROC 25
+#define	ICBID_MASTER_MSS 26
+#define	ICBID_MASTER_MSS_NAV 27
+#define	ICBID_MASTER_OCMEM_DMA 28
+#define	ICBID_MASTER_PNOC_SNOC 29
+#define	ICBID_MASTER_WCSS 30
+#define	ICBID_MASTER_QDSS_ETR 31
+#define	ICBID_MASTER_USB3 32
+#define	ICBID_MASTER_USB3_0 ICBID_MASTER_USB3
+#define	ICBID_MASTER_SDCC_1 33
+#define	ICBID_MASTER_SDCC_3 34
+#define	ICBID_MASTER_SDCC_2 35
+#define	ICBID_MASTER_SDCC_4 36
+#define	ICBID_MASTER_TSIF 37
+#define	ICBID_MASTER_BAM_DMA 38
+#define	ICBID_MASTER_BLSP_2 39
+#define	ICBID_MASTER_USB_HSIC 40
+#define	ICBID_MASTER_BLSP_1 41
+#define	ICBID_MASTER_USB_HS 42
+#define	ICBID_MASTER_USB_HS1 ICBID_MASTER_USB_HS
+#define	ICBID_MASTER_PNOC_CFG 43
+#define	ICBID_MASTER_SNOC_PNOC 44
+#define	ICBID_MASTER_RPM_INST 45
+#define	ICBID_MASTER_RPM_DATA 46
+#define	ICBID_MASTER_RPM_SYS 47
+#define	ICBID_MASTER_DEHR 48
+#define	ICBID_MASTER_QDSS_DAP 49
+#define	ICBID_MASTER_SPDM 50
+#define	ICBID_MASTER_TIC 51
+#define	ICBID_MASTER_SNOC_CNOC 52
+#define	ICBID_MASTER_GFX3D_OCMEM 53
+#define	ICBID_MASTER_GFX3D_GMEM ICBID_MASTER_GFX3D_OCMEM
+#define	ICBID_MASTER_OVIRT_SNOC 54
+#define	ICBID_MASTER_SNOC_OVIRT 55
+#define	ICBID_MASTER_SNOC_GVIRT ICBID_MASTER_SNOC_OVIRT
+#define	ICBID_MASTER_ONOC_OVIRT 56
+#define	ICBID_MASTER_USB_HS2 57
+#define	ICBID_MASTER_QPIC 58
+#define	ICBID_MASTER_IPA 59
+#define	ICBID_MASTER_DSI 60
+#define	ICBID_MASTER_MDP1 61
+#define	ICBID_MASTER_MDPE ICBID_MASTER_MDP1
+#define	ICBID_MASTER_VPU_PROC 62
+#define	ICBID_MASTER_VPU 63
+#define	ICBID_MASTER_VPU0 ICBID_MASTER_VPU
+#define	ICBID_MASTER_CRYPTO_CORE2 64
+#define	ICBID_MASTER_PCIE_0 65
+#define	ICBID_MASTER_PCIE_1 66
+#define	ICBID_MASTER_SATA 67
+#define	ICBID_MASTER_UFS 68
+#define	ICBID_MASTER_USB3_1 69
+#define	ICBID_MASTER_VIDEO_OCMEM 70
+#define	ICBID_MASTER_VPU1 71
+#define	ICBID_MASTER_VCAP 72
+#define	ICBID_MASTER_EMAC 73
+#define	ICBID_MASTER_BCAST 74
+#define	ICBID_MASTER_MMSS_PROC 75
+#define	ICBID_MASTER_SNOC_BIMC_1 76
+#define	ICBID_MASTER_SNOC_PCNOC 77
+#define	ICBID_MASTER_AUDIO 78
+#define	ICBID_MASTER_MM_INT_0 79
+#define	ICBID_MASTER_MM_INT_1 80
+#define	ICBID_MASTER_MM_INT_2 81
+#define	ICBID_MASTER_MM_INT_BIMC 82
+#define	ICBID_MASTER_MSS_INT 83
+#define	ICBID_MASTER_PCNOC_CFG 84
+#define	ICBID_MASTER_PCNOC_INT_0 85
+#define	ICBID_MASTER_PCNOC_INT_1 86
+#define	ICBID_MASTER_PCNOC_M_0 87
+#define	ICBID_MASTER_PCNOC_M_1 88
+#define	ICBID_MASTER_PCNOC_S_0 89
+#define	ICBID_MASTER_PCNOC_S_1 90
+#define	ICBID_MASTER_PCNOC_S_2 91
+#define	ICBID_MASTER_PCNOC_S_3 92
+#define	ICBID_MASTER_PCNOC_S_4 93
+#define	ICBID_MASTER_PCNOC_S_6 94
+#define	ICBID_MASTER_PCNOC_S_7 95
+#define	ICBID_MASTER_PCNOC_S_8 96
+#define	ICBID_MASTER_PCNOC_S_9 97
+#define	ICBID_MASTER_QDSS_INT 98
+#define	ICBID_MASTER_SNOC_INT_0	99
+#define	ICBID_MASTER_SNOC_INT_1 100
+#define	ICBID_MASTER_SNOC_INT_BIMC 101
+#define	ICBID_MASTER_TCU_0 102
+#define	ICBID_MASTER_TCU_1 103
+#define	ICBID_MASTER_BIMC_INT_0 104
+#define	ICBID_MASTER_BIMC_INT_1 105
+#define	ICBID_MASTER_CAMERA 106
+#define	ICBID_MASTER_RICA 107
+#define	ICBID_MASTER_SNOC_BIMC_2 108
+#define	ICBID_MASTER_BIMC_SNOC_1 109
+#define	ICBID_MASTER_A0NOC_SNOC 110
+#define	ICBID_MASTER_A1NOC_SNOC 111
+#define	ICBID_MASTER_A2NOC_SNOC 112
+#define	ICBID_MASTER_PIMEM 113
+#define	ICBID_MASTER_SNOC_VMEM 114
+#define	ICBID_MASTER_CPP 115
+#define	ICBID_MASTER_CNOC_A1NOC 116
+#define	ICBID_MASTER_PNOC_A1NOC 117
+#define	ICBID_MASTER_HMSS 118
+#define	ICBID_MASTER_PCIE_2 119
+#define	ICBID_MASTER_ROTATOR 120
+#define	ICBID_MASTER_VENUS_VMEM 121
+#define	ICBID_MASTER_DCC 122
+#define	ICBID_MASTER_MCDMA 123
+#define	ICBID_MASTER_PCNOC_INT_2 124
+#define	ICBID_MASTER_PCNOC_INT_3 125
+#define	ICBID_MASTER_PCNOC_INT_4 126
+#define	ICBID_MASTER_PCNOC_INT_5 127
+#define	ICBID_MASTER_PCNOC_INT_6 128
+#define	ICBID_MASTER_PCNOC_S_5 129
+#define	ICBID_MASTER_SENSORS_AHB 130
+#define	ICBID_MASTER_SENSORS_PROC 131
+#define	ICBID_MASTER_QSPI 132
+#define	ICBID_MASTER_VFE1 133
+#define	ICBID_MASTER_SNOC_INT_2 134
+#define	ICBID_MASTER_SMMNOC_BIMC 135
+#define	ICBID_MASTER_CRVIRT_A1NOC 136
+#define	ICBID_MASTER_XM_USB_HS1 137
+#define	ICBID_MASTER_XI_USB_HS1 138
+#define	ICBID_MASTER_PCNOC_BIMC_1 139
+#define	ICBID_MASTER_BIMC_PCNOC 140
+#define	ICBID_MASTER_XI_HSIC 141
+#define	ICBID_MASTER_SGMII  142
+#define	ICBID_MASTER_SPMI_FETCHER 143
+#define	ICBID_MASTER_GNOC_BIMC 144
+#define	ICBID_MASTER_CRVIRT_A2NOC 145
+#define	ICBID_MASTER_CNOC_A2NOC 146
+#define	ICBID_MASTER_WLAN 147
+#define	ICBID_MASTER_MSS_CE 148
+#define	ICBID_MASTER_CDSP_PROC 149
+#define	ICBID_MASTER_GNOC_SNOC 150
+
+#define	ICBID_SLAVE_EBI1 0
+#define	ICBID_SLAVE_APPSS_L2 1
+#define	ICBID_SLAVE_BIMC_SNOC 2
+#define	ICBID_SLAVE_BIMC_SNOC_0 ICBID_SLAVE_BIMC_SNOC
+#define	ICBID_SLAVE_CAMERA_CFG 3
+#define	ICBID_SLAVE_DISPLAY_CFG 4
+#define	ICBID_SLAVE_OCMEM_CFG 5
+#define	ICBID_SLAVE_CPR_CFG 6
+#define	ICBID_SLAVE_CPR_XPU_CFG 7
+#define	ICBID_SLAVE_MISC_CFG 8
+#define	ICBID_SLAVE_MISC_XPU_CFG 9
+#define	ICBID_SLAVE_VENUS_CFG 10
+#define	ICBID_SLAVE_GFX3D_CFG 11
+#define	ICBID_SLAVE_MMSS_CLK_CFG 12
+#define	ICBID_SLAVE_MMSS_CLK_XPU_CFG 13
+#define	ICBID_SLAVE_MNOC_MPU_CFG 14
+#define	ICBID_SLAVE_ONOC_MPU_CFG 15
+#define	ICBID_SLAVE_MNOC_BIMC 16
+#define	ICBID_SLAVE_SERVICE_MNOC 17
+#define	ICBID_SLAVE_OCMEM 18
+#define	ICBID_SLAVE_GMEM ICBID_SLAVE_OCMEM
+#define	ICBID_SLAVE_SERVICE_ONOC 19
+#define	ICBID_SLAVE_APPSS 20
+#define	ICBID_SLAVE_LPASS 21
+#define	ICBID_SLAVE_USB3 22
+#define	ICBID_SLAVE_USB3_0 ICBID_SLAVE_USB3
+#define	ICBID_SLAVE_WCSS 23
+#define	ICBID_SLAVE_SNOC_BIMC 24
+#define	ICBID_SLAVE_SNOC_BIMC_0 ICBID_SLAVE_SNOC_BIMC
+#define	ICBID_SLAVE_SNOC_CNOC 25
+#define	ICBID_SLAVE_IMEM 26
+#define	ICBID_SLAVE_OCIMEM ICBID_SLAVE_IMEM
+#define	ICBID_SLAVE_SNOC_OVIRT 27
+#define	ICBID_SLAVE_SNOC_GVIRT ICBID_SLAVE_SNOC_OVIRT
+#define	ICBID_SLAVE_SNOC_PNOC 28
+#define	ICBID_SLAVE_SNOC_PCNOC ICBID_SLAVE_SNOC_PNOC
+#define	ICBID_SLAVE_SERVICE_SNOC 29
+#define	ICBID_SLAVE_QDSS_STM 30
+#define	ICBID_SLAVE_SDCC_1 31
+#define	ICBID_SLAVE_SDCC_3 32
+#define	ICBID_SLAVE_SDCC_2 33
+#define	ICBID_SLAVE_SDCC_4 34
+#define	ICBID_SLAVE_TSIF 35
+#define	ICBID_SLAVE_BAM_DMA 36
+#define	ICBID_SLAVE_BLSP_2 37
+#define	ICBID_SLAVE_USB_HSIC 38
+#define	ICBID_SLAVE_BLSP_1 39
+#define	ICBID_SLAVE_USB_HS 40
+#define	ICBID_SLAVE_USB_HS1 ICBID_SLAVE_USB_HS
+#define	ICBID_SLAVE_PDM 41
+#define	ICBID_SLAVE_PERIPH_APU_CFG 42
+#define	ICBID_SLAVE_PNOC_MPU_CFG 43
+#define	ICBID_SLAVE_PRNG 44
+#define	ICBID_SLAVE_PNOC_SNOC 45
+#define	ICBID_SLAVE_PCNOC_SNOC ICBID_SLAVE_PNOC_SNOC
+#define	ICBID_SLAVE_SERVICE_PNOC 46
+#define	ICBID_SLAVE_CLK_CTL 47
+#define	ICBID_SLAVE_CNOC_MSS 48
+#define	ICBID_SLAVE_PCNOC_MSS ICBID_SLAVE_CNOC_MSS
+#define	ICBID_SLAVE_SECURITY 49
+#define	ICBID_SLAVE_TCSR 50
+#define	ICBID_SLAVE_TLMM 51
+#define	ICBID_SLAVE_CRYPTO_0_CFG 52
+#define	ICBID_SLAVE_CRYPTO_1_CFG 53
+#define	ICBID_SLAVE_IMEM_CFG 54
+#define	ICBID_SLAVE_MESSAGE_RAM 55
+#define	ICBID_SLAVE_BIMC_CFG 56
+#define	ICBID_SLAVE_BOOT_ROM 57
+#define	ICBID_SLAVE_CNOC_MNOC_MMSS_CFG 58
+#define	ICBID_SLAVE_PMIC_ARB 59
+#define	ICBID_SLAVE_SPDM_WRAPPER 60
+#define	ICBID_SLAVE_DEHR_CFG 61
+#define	ICBID_SLAVE_MPM 62
+#define	ICBID_SLAVE_QDSS_CFG 63
+#define	ICBID_SLAVE_RBCPR_CFG 64
+#define	ICBID_SLAVE_RBCPR_CX_CFG ICBID_SLAVE_RBCPR_CFG
+#define	ICBID_SLAVE_RBCPR_QDSS_APU_CFG 65
+#define	ICBID_SLAVE_CNOC_MNOC_CFG 66
+#define	ICBID_SLAVE_SNOC_MPU_CFG 67
+#define	ICBID_SLAVE_CNOC_ONOC_CFG 68
+#define	ICBID_SLAVE_PNOC_CFG 69
+#define	ICBID_SLAVE_SNOC_CFG 70
+#define	ICBID_SLAVE_EBI1_DLL_CFG 71
+#define	ICBID_SLAVE_PHY_APU_CFG 72
+#define	ICBID_SLAVE_EBI1_PHY_CFG 73
+#define	ICBID_SLAVE_RPM 74
+#define	ICBID_SLAVE_CNOC_SNOC 75
+#define	ICBID_SLAVE_SERVICE_CNOC 76
+#define	ICBID_SLAVE_OVIRT_SNOC 77
+#define	ICBID_SLAVE_OVIRT_OCMEM 78
+#define	ICBID_SLAVE_USB_HS2 79
+#define	ICBID_SLAVE_QPIC 80
+#define	ICBID_SLAVE_IPS_CFG 81
+#define	ICBID_SLAVE_DSI_CFG 82
+#define	ICBID_SLAVE_USB3_1 83
+#define	ICBID_SLAVE_PCIE_0 84
+#define	ICBID_SLAVE_PCIE_1 85
+#define	ICBID_SLAVE_PSS_SMMU_CFG 86
+#define	ICBID_SLAVE_CRYPTO_2_CFG 87
+#define	ICBID_SLAVE_PCIE_0_CFG 88
+#define	ICBID_SLAVE_PCIE_1_CFG 89
+#define	ICBID_SLAVE_SATA_CFG 90
+#define	ICBID_SLAVE_SPSS_GENI_IR 91
+#define	ICBID_SLAVE_UFS_CFG 92
+#define	ICBID_SLAVE_AVSYNC_CFG 93
+#define	ICBID_SLAVE_VPU_CFG 94
+#define	ICBID_SLAVE_USB_PHY_CFG 95
+#define	ICBID_SLAVE_RBCPR_MX_CFG 96
+#define	ICBID_SLAVE_PCIE_PARF 97
+#define	ICBID_SLAVE_VCAP_CFG 98
+#define	ICBID_SLAVE_EMAC_CFG 99
+#define	ICBID_SLAVE_BCAST_CFG 100
+#define	ICBID_SLAVE_KLM_CFG 101
+#define	ICBID_SLAVE_DISPLAY_PWM 102
+#define	ICBID_SLAVE_GENI 103
+#define	ICBID_SLAVE_SNOC_BIMC_1 104
+#define	ICBID_SLAVE_AUDIO 105
+#define	ICBID_SLAVE_CATS_0 106
+#define	ICBID_SLAVE_CATS_1 107
+#define	ICBID_SLAVE_MM_INT_0 108
+#define	ICBID_SLAVE_MM_INT_1 109
+#define	ICBID_SLAVE_MM_INT_2 110
+#define	ICBID_SLAVE_MM_INT_BIMC 111
+#define	ICBID_SLAVE_MMU_MODEM_XPU_CFG 112
+#define	ICBID_SLAVE_MSS_INT 113
+#define	ICBID_SLAVE_PCNOC_INT_0 114
+#define	ICBID_SLAVE_PCNOC_INT_1 115
+#define	ICBID_SLAVE_PCNOC_M_0 116
+#define	ICBID_SLAVE_PCNOC_M_1 117
+#define	ICBID_SLAVE_PCNOC_S_0 118
+#define	ICBID_SLAVE_PCNOC_S_1 119
+#define	ICBID_SLAVE_PCNOC_S_2 120
+#define	ICBID_SLAVE_PCNOC_S_3 121
+#define	ICBID_SLAVE_PCNOC_S_4 122
+#define	ICBID_SLAVE_PCNOC_S_6 123
+#define	ICBID_SLAVE_PCNOC_S_7 124
+#define	ICBID_SLAVE_PCNOC_S_8 125
+#define	ICBID_SLAVE_PCNOC_S_9 126
+#define	ICBID_SLAVE_PRNG_XPU_CFG 127
+#define	ICBID_SLAVE_QDSS_INT 128
+#define	ICBID_SLAVE_RPM_XPU_CFG 129
+#define	ICBID_SLAVE_SNOC_INT_0 130
+#define	ICBID_SLAVE_SNOC_INT_1 131
+#define	ICBID_SLAVE_SNOC_INT_BIMC 132
+#define	ICBID_SLAVE_TCU 133
+#define	ICBID_SLAVE_BIMC_INT_0 134
+#define	ICBID_SLAVE_BIMC_INT_1 135
+#define	ICBID_SLAVE_RICA_CFG 136
+#define	ICBID_SLAVE_SNOC_BIMC_2 137
+#define	ICBID_SLAVE_BIMC_SNOC_1 138
+#define	ICBID_SLAVE_PNOC_A1NOC 139
+#define	ICBID_SLAVE_SNOC_VMEM 140
+#define	ICBID_SLAVE_A0NOC_SNOC 141
+#define	ICBID_SLAVE_A1NOC_SNOC 142
+#define	ICBID_SLAVE_A2NOC_SNOC 143
+#define	ICBID_SLAVE_A0NOC_CFG 144
+#define	ICBID_SLAVE_A0NOC_MPU_CFG 145
+#define	ICBID_SLAVE_A0NOC_SMMU_CFG 146
+#define	ICBID_SLAVE_A1NOC_CFG 147
+#define	ICBID_SLAVE_A1NOC_MPU_CFG 148
+#define	ICBID_SLAVE_A1NOC_SMMU_CFG 149
+#define	ICBID_SLAVE_A2NOC_CFG 150
+#define	ICBID_SLAVE_A2NOC_MPU_CFG 151
+#define	ICBID_SLAVE_A2NOC_SMMU_CFG 152
+#define	ICBID_SLAVE_AHB2PHY 153
+#define	ICBID_SLAVE_CAMERA_THROTTLE_CFG 154
+#define	ICBID_SLAVE_DCC_CFG 155
+#define	ICBID_SLAVE_DISPLAY_THROTTLE_CFG 156
+#define	ICBID_SLAVE_DSA_CFG 157
+#define	ICBID_SLAVE_DSA_MPU_CFG 158
+#define	ICBID_SLAVE_SSC_MPU_CFG 159
+#define	ICBID_SLAVE_HMSS_L3 160
+#define	ICBID_SLAVE_LPASS_SMMU_CFG 161
+#define	ICBID_SLAVE_MMAGIC_CFG 162
+#define	ICBID_SLAVE_PCIE20_AHB2PHY 163
+#define	ICBID_SLAVE_PCIE_2 164
+#define	ICBID_SLAVE_PCIE_2_CFG 165
+#define	ICBID_SLAVE_PIMEM 166
+#define	ICBID_SLAVE_PIMEM_CFG 167
+#define	ICBID_SLAVE_QDSS_RBCPR_APU_CFG 168
+#define	ICBID_SLAVE_RBCPR_CX 169
+#define	ICBID_SLAVE_RBCPR_MX 170
+#define	ICBID_SLAVE_SMMU_CPP_CFG 171
+#define	ICBID_SLAVE_SMMU_JPEG_CFG 172
+#define	ICBID_SLAVE_SMMU_MDP_CFG 173
+#define	ICBID_SLAVE_SMMU_ROTATOR_CFG 174
+#define	ICBID_SLAVE_SMMU_VENUS_CFG 175
+#define	ICBID_SLAVE_SMMU_VFE_CFG 176
+#define	ICBID_SLAVE_SSC_CFG 177
+#define	ICBID_SLAVE_VENUS_THROTTLE_CFG 178
+#define	ICBID_SLAVE_VMEM 179
+#define	ICBID_SLAVE_VMEM_CFG 180
+#define	ICBID_SLAVE_QDSS_MPU_CFG 181
+#define	ICBID_SLAVE_USB3_PHY_CFG 182
+#define	ICBID_SLAVE_IPA_CFG 183
+#define	ICBID_SLAVE_PCNOC_INT_2 184
+#define	ICBID_SLAVE_PCNOC_INT_3 185
+#define	ICBID_SLAVE_PCNOC_INT_4 186
+#define	ICBID_SLAVE_PCNOC_INT_5 187
+#define	ICBID_SLAVE_PCNOC_INT_6 188
+#define	ICBID_SLAVE_PCNOC_S_5 189
+#define	ICBID_SLAVE_QSPI 190
+#define	ICBID_SLAVE_A1NOC_MS_MPU_CFG 191
+#define	ICBID_SLAVE_A2NOC_MS_MPU_CFG 192
+#define	ICBID_SLAVE_MODEM_Q6_SMMU_CFG 193
+#define	ICBID_SLAVE_MSS_MPU_CFG 194
+#define	ICBID_SLAVE_MSS_PROC_MS_MPU_CFG 195
+#define	ICBID_SLAVE_SKL 196
+#define	ICBID_SLAVE_SNOC_INT_2 197
+#define	ICBID_SLAVE_SMMNOC_BIMC 198
+#define	ICBID_SLAVE_CRVIRT_A1NOC 199
+#define	ICBID_SLAVE_SGMII	 200
+#define	ICBID_SLAVE_QHS4_APPS	 201
+#define	ICBID_SLAVE_BIMC_PCNOC   202
+#define	ICBID_SLAVE_PCNOC_BIMC_1 203
+#define	ICBID_SLAVE_SPMI_FETCHER 204
+#define	ICBID_SLAVE_MMSS_SMMU_CFG 205
+#define	ICBID_SLAVE_WLAN 206
+#define	ICBID_SLAVE_CRVIRT_A2NOC 207
+#define	ICBID_SLAVE_CNOC_A2NOC 208
+#define	ICBID_SLAVE_GLM 209
+#define	ICBID_SLAVE_GNOC_BIMC 210
+#define	ICBID_SLAVE_GNOC_SNOC 211
+#define	ICBID_SLAVE_QM_CFG 212
+#define	ICBID_SLAVE_TLMM_EAST 213
+#define	ICBID_SLAVE_TLMM_NORTH 214
+#define	ICBID_SLAVE_TLMM_WEST 215
+#define	ICBID_SLAVE_LPASS_TCM	216
+#define	ICBID_SLAVE_TLMM_SOUTH	217
+#define	ICBID_SLAVE_TLMM_CENTER	218
+#define	ICBID_SLAVE_MSS_NAV_CE_MPU_CFG	219
+#define	ICBID_SLAVE_A2NOC_THROTTLE_CFG	220
+#define	ICBID_SLAVE_CDSP	221
+#define	ICBID_SLAVE_CDSP_SMMU_CFG	222
+#define	ICBID_SLAVE_LPASS_MPU_CFG	223
+#define	ICBID_SLAVE_CSI_PHY_CFG	224
+#endif
diff -Nruw linux-4.4.115-fbx/arch/cris/boot/dts/include/dt-bindings/msm./msm-bus-rule-ops.h linux-4.4.115-fbx/arch/cris/boot/dts/include/dt-bindings/msm/msm-bus-rule-ops.h
--- linux-4.4.115-fbx/arch/cris/boot/dts/include/dt-bindings/msm./msm-bus-rule-ops.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/cris/boot/dts/include/dt-bindings/msm/msm-bus-rule-ops.h	2019-01-22 16:16:28.179288750 +0100
@@ -0,0 +1,34 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_BUS_RULE_OPS_H
+#define __MSM_BUS_RULE_OPS_H
+
+#define FLD_IB	0
+#define FLD_AB	1
+#define FLD_CLK	2
+
+#define OP_LE	0
+#define OP_LT	1
+#define OP_GE	2
+#define OP_GT	3
+#define OP_NOOP	4
+
+#define RULE_STATE_NOT_APPLIED	0
+#define RULE_STATE_APPLIED	1
+
+#define THROTTLE_ON	0
+#define THROTTLE_OFF	1
+#define THROTTLE_REG	2
+
+
+#endif
diff -Nruw linux-4.4.115-fbx/arch/cris/boot/dts/include/dt-bindings/msm./pm.h linux-4.4.115-fbx/arch/cris/boot/dts/include/dt-bindings/msm/pm.h
--- linux-4.4.115-fbx/arch/cris/boot/dts/include/dt-bindings/msm./pm.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/cris/boot/dts/include/dt-bindings/msm/pm.h	2019-01-22 16:16:28.179288750 +0100
@@ -0,0 +1,25 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DT_MSM_PM_H__
+#define __DT_MSM_PM_H__
+
+#define LPM_RESET_LVL_NONE	0
+#define LPM_RESET_LVL_RET	1
+#define LPM_RESET_LVL_GDHS	2
+#define LPM_RESET_LVL_PC	3
+
+#define LPM_AFF_LVL_CPU		0
+#define LPM_AFF_LVL_L2		1
+#define LPM_AFF_LVL_CCI		2
+
+#endif
diff -Nruw linux-4.4.115-fbx/arch/cris/boot/dts/include/dt-bindings/msm./power-on.h linux-4.4.115-fbx/arch/cris/boot/dts/include/dt-bindings/msm/power-on.h
--- linux-4.4.115-fbx/arch/cris/boot/dts/include/dt-bindings/msm./power-on.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/cris/boot/dts/include/dt-bindings/msm/power-on.h	2019-01-22 16:16:28.179288750 +0100
@@ -0,0 +1,24 @@
+/* Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_POWER_ON_H__
+#define __MSM_POWER_ON_H__
+
+#define PON_POWER_OFF_RESERVED		0x00
+#define PON_POWER_OFF_WARM_RESET	0x01
+#define PON_POWER_OFF_SHUTDOWN		0x04
+#define PON_POWER_OFF_DVDD_SHUTDOWN	0x05
+#define PON_POWER_OFF_HARD_RESET	0x07
+#define PON_POWER_OFF_DVDD_HARD_RESET	0x08
+#define PON_POWER_OFF_MAX_TYPE		0x10
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/cris/boot/dts/include/dt-bindings/regulator/qcom,rpm-smd-regulator.h	2019-01-22 16:16:28.183288787 +0100
@@ -0,0 +1,28 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_RPM_SMD_REGULATOR_H
+#define __QCOM_RPM_SMD_REGULATOR_H
+
+#define RPM_SMD_REGULATOR_LEVEL_NONE		0
+#define RPM_SMD_REGULATOR_LEVEL_RETENTION	16
+#define RPM_SMD_REGULATOR_LEVEL_RETENTION_PLUS	32
+#define RPM_SMD_REGULATOR_LEVEL_MIN_SVS		48
+#define RPM_SMD_REGULATOR_LEVEL_LOW_SVS		64
+#define RPM_SMD_REGULATOR_LEVEL_SVS		128
+#define RPM_SMD_REGULATOR_LEVEL_SVS_PLUS	192
+#define RPM_SMD_REGULATOR_LEVEL_NOM		256
+#define RPM_SMD_REGULATOR_LEVEL_NOM_PLUS	320
+#define RPM_SMD_REGULATOR_LEVEL_TURBO		384
+#define RPM_SMD_REGULATOR_LEVEL_BINNING		512
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/metag/boot/dts/include/dt-bindings/clock/audio-ext-clk.h	2019-01-22 16:16:28.167288642 +0100
@@ -0,0 +1,30 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __AUDIO_EXT_CLK_H
+#define __AUDIO_EXT_CLK_H
+
+/* Audio External Clocks */
+#define AUDIO_PMI_CLK		0
+#define AUDIO_PMIC_LNBB_CLK	0
+#define AUDIO_AP_CLK		1
+#define AUDIO_AP_CLK2		2
+#define AUDIO_LPASS_MCLK	3
+#define AUDIO_LPASS_MCLK2	4
+
+#define clk_audio_ap_clk        0x9b5727cb
+#define clk_audio_pmi_clk       0xcbfe416d
+#define clk_audio_ap_clk2       0x454d1e91
+#define clk_audio_lpass_mclk    0xf0f2a284
+#define clk_audio_pmi_lnbb_clk   0x57312343
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/metag/boot/dts/include/dt-bindings/clock/msm-clocks-8996.h	2019-01-22 16:16:28.171288678 +0100
@@ -0,0 +1,574 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CLOCKS_8996_H
+#define __MSM_CLOCKS_8996_H
+
+#include "audio-ext-clk.h"
+
+/* clock_gcc controlled clocks */
+#define clk_cxo_clk_src			0x79e95308
+#define clk_pnoc_clk			0x4325d220
+#define clk_pnoc_a_clk			0x2808c12b
+#define clk_bimc_clk			0x4b80bf00
+#define clk_bimc_a_clk			0x4b25668a
+#define clk_cnoc_clk			0xd5ccb7f4
+#define clk_cnoc_a_clk			0xd8fe2ccc
+#define clk_snoc_clk			0x2c341aa0
+#define clk_snoc_a_clk			0x8fcef2af
+#define clk_bb_clk1			0xf5304268
+#define clk_bb_clk1_ao			0xfa113810
+#define clk_bb_clk1_pin			0x6dd0a779
+#define clk_bb_clk1_pin_ao		0x9b637772
+#define clk_bb_clk2			0xfe15cb87
+#define clk_bb_clk2_ao			0x59682706
+#define clk_bb_clk2_pin			0x498938e5
+#define clk_bb_clk2_pin_ao		0x52513787
+#define clk_bimc_msmbus_clk		0xd212feea
+#define clk_bimc_msmbus_a_clk		0x71d1a499
+#define clk_ce1_a_clk			0x44a833fe
+#define clk_cnoc_msmbus_clk		0x62228b5d
+#define clk_cnoc_msmbus_a_clk		0x67442955
+#define clk_cxo_clk_src_ao		0x64eb6004
+#define clk_cxo_dwc3_clk		0xf79c19f6
+#define clk_cxo_lpm_clk			0x94adbf3d
+#define clk_cxo_otg_clk			0x4eec0bb9
+#define clk_cxo_pil_lpass_clk		0xe17f0ff6
+#define clk_cxo_pil_ssc_clk		0x81832015
+#define clk_div_clk1			0xaa1157a6
+#define clk_div_clk1_ao			0x6b943d68
+#define clk_div_clk2			0xd454019f
+#define clk_div_clk2_ao			0x53f9e788
+#define clk_div_clk3			0xa9a55a68
+#define clk_div_clk3_ao			0x3d6725a8
+#define clk_ipa_a_clk			0xeeec2919
+#define clk_ipa_clk			0xfa685cda
+#define clk_ln_bb_clk			0x3ab0b36d
+#define clk_ln_bb_a_clk			0xc7257ea8
+#define clk_ln_bb_clk_pin		0x1b1c476a
+#define clk_ln_bb_a_clk_pin		0x9cbb5411
+#define clk_mcd_ce1_clk			0xbb615d26
+#define clk_pnoc_keepalive_a_clk	0xf8f91f0b
+#define clk_pnoc_msmbus_clk		0x38b95c77
+#define clk_pnoc_msmbus_a_clk		0x8c9b4e93
+#define clk_pnoc_pm_clk			0xd6f7dfb9
+#define clk_pnoc_sps_clk		0xd482ecc7
+#define clk_qdss_a_clk			0xdd121669
+#define clk_qdss_clk			0x1492202a
+#define clk_rf_clk1			0xaabeea5a
+#define clk_rf_clk1_ao			0x72a10cb8
+#define clk_rf_clk1_pin			0x8f463562
+#define clk_rf_clk1_pin_ao		0x62549ff6
+#define clk_rf_clk2			0x24a30992
+#define clk_rf_clk2_ao			0x944d8bbd
+#define clk_rf_clk2_pin			0xa7c5602a
+#define clk_rf_clk2_pin_ao		0x2d75eb4d
+#define clk_snoc_msmbus_clk		0xe6900bb6
+#define clk_snoc_msmbus_a_clk		0x5d4683bd
+#define clk_mcd_ce1_clk			0xbb615d26
+#define clk_qcedev_ce1_clk		0x293f97b0
+#define clk_qcrypto_ce1_clk		0xa6ac14df
+#define clk_qseecom_ce1_clk		0xaa858373
+#define clk_scm_ce1_clk			0xd8ebcc62
+#define clk_ce1_clk			0x42229c55
+#define clk_gcc_ce1_ahb_m_clk		0x2eb28c01
+#define clk_gcc_ce1_axi_m_clk		0xc174dfba
+#define clk_measure_only_bimc_hmss_axi_clk	0xc1cc4f11
+#define clk_aggre1_noc_clk		0x049abba8
+#define clk_aggre1_noc_a_clk		0xc12e4220
+#define clk_aggre2_noc_clk		0xaa681404
+#define clk_aggre2_noc_a_clk		0xcab67089
+#define clk_mmssnoc_axi_rpm_clk		0x4d7f8cdc
+#define clk_mmssnoc_axi_rpm_a_clk	0xfbea899b
+#define clk_mmssnoc_axi_clk		0xdb4b31e6
+#define clk_mmssnoc_axi_a_clk		0xd4970614
+#define clk_mmssnoc_gds_clk		0x06a22afa
+
+#define clk_gpll0			0x1ebe3bc4
+#define clk_gpll0_ao			0xa1368304
+#define clk_gpll0_out_main		0xe9374de7
+#define clk_gpll4			0xb3b5d85b
+#define clk_gpll4_out_main		0xa9a0ab9d
+#define clk_ufs_axi_clk_src		0x297ca380
+#define clk_pcie_aux_clk_src		0xebc50566
+#define clk_usb30_master_clk_src	0xc6262f89
+#define clk_usb20_master_clk_src	0x5680ac83
+#define clk_ufs_ice_core_clk_src	0xda8e7119
+#define clk_blsp1_qup1_i2c_apps_clk_src 0x17f78f5e
+#define clk_blsp1_qup1_spi_apps_clk_src 0xf534c4fa
+#define clk_blsp1_qup2_i2c_apps_clk_src 0x8de71c79
+#define clk_blsp1_qup2_spi_apps_clk_src 0x33cf809a
+#define clk_blsp1_qup3_i2c_apps_clk_src 0xf161b902
+#define clk_blsp1_qup3_spi_apps_clk_src 0x5e95683f
+#define clk_blsp1_qup4_i2c_apps_clk_src 0xb2ecce68
+#define clk_blsp1_qup4_spi_apps_clk_src 0xddb5bbdb
+#define clk_blsp1_qup5_i2c_apps_clk_src 0x71ea7804
+#define clk_blsp1_qup5_spi_apps_clk_src 0x9752f35f
+#define clk_blsp1_qup6_i2c_apps_clk_src 0x28806803
+#define clk_blsp1_qup6_spi_apps_clk_src 0x44a1edc4
+#define clk_blsp1_uart1_apps_clk_src	0xf8146114
+#define clk_blsp1_uart2_apps_clk_src	0xfc9c2f73
+#define clk_blsp1_uart3_apps_clk_src	0x600497f2
+#define clk_blsp1_uart4_apps_clk_src	0x56bff15c
+#define clk_blsp1_uart5_apps_clk_src	0x218ef697
+#define clk_blsp1_uart6_apps_clk_src	0x8fbdbe4c
+#define clk_blsp2_qup1_i2c_apps_clk_src 0xd6d1e95d
+#define clk_blsp2_qup1_spi_apps_clk_src 0xcc1b8365
+#define clk_blsp2_qup2_i2c_apps_clk_src 0x603b5c51
+#define clk_blsp2_qup2_spi_apps_clk_src 0xd577dc44
+#define clk_blsp2_qup3_i2c_apps_clk_src 0xea82959c
+#define clk_blsp2_qup3_spi_apps_clk_src 0xd04b1e92
+#define clk_blsp2_qup4_i2c_apps_clk_src 0x73dc968c
+#define clk_blsp2_qup4_spi_apps_clk_src 0x25d4a2b1
+#define clk_blsp2_qup5_i2c_apps_clk_src 0xcc3698bd
+#define clk_blsp2_qup5_spi_apps_clk_src 0xfa0cf45e
+#define clk_blsp2_qup6_i2c_apps_clk_src 0x2fa53151
+#define clk_blsp2_qup6_spi_apps_clk_src 0x5ca86755
+#define clk_blsp2_uart1_apps_clk_src	0x562c66dc
+#define clk_blsp2_uart2_apps_clk_src	0xdd448080
+#define clk_blsp2_uart3_apps_clk_src	0x46b2e90f
+#define clk_blsp2_uart4_apps_clk_src	0x23a093d2
+#define clk_blsp2_uart5_apps_clk_src	0xe067616a
+#define clk_blsp2_uart6_apps_clk_src	0xe02d2829
+#define clk_gp1_clk_src			0xad85b97a
+#define clk_gp2_clk_src			0xfb1f0065
+#define clk_gp3_clk_src			0x63b693d6
+#define clk_hmss_rbcpr_clk_src		0xedd9a474
+#define clk_pdm2_clk_src		0x31e494fd
+#define clk_sdcc1_apps_clk_src		0xd4975db2
+#define clk_sdcc2_apps_clk_src		0xfc46c821
+#define clk_sdcc3_apps_clk_src		0xea34c7f4
+#define clk_sdcc4_apps_clk_src		0x7aaaaa0c
+#define clk_tsif_ref_clk_src		0x4e9042d1
+#define clk_usb20_mock_utmi_clk_src	0xc3aaeecb
+#define clk_usb30_mock_utmi_clk_src	0xa024a976
+#define clk_usb3_phy_aux_clk_src	0x15eec63c
+#define clk_gcc_qusb2phy_prim_reset	0x07550fa1
+#define clk_gcc_qusb2phy_sec_reset	0x3f3a87d0
+#define clk_gcc_periph_noc_usb20_ahb_clk	0xfb9f26e9
+#define clk_gcc_mmss_gcc_dbg_clk	0xe89d461c
+#define clk_cpu_dbg_clk			0x6550dfa9
+#define clk_gcc_blsp1_ahb_clk		0x8caa5b4f
+#define clk_gcc_blsp1_qup1_i2c_apps_clk 0xc303fae9
+#define clk_gcc_blsp1_qup1_spi_apps_clk 0x759a76b0
+#define clk_gcc_blsp1_qup2_i2c_apps_clk 0x1076f220
+#define clk_gcc_blsp1_qup2_spi_apps_clk 0x3e77d48f
+#define clk_gcc_blsp1_qup3_i2c_apps_clk 0x9e25ac82
+#define clk_gcc_blsp1_qup3_spi_apps_clk 0xfb978880
+#define clk_gcc_blsp1_qup4_i2c_apps_clk 0xd7f40f6f
+#define clk_gcc_blsp1_qup4_spi_apps_clk 0x80f8722f
+#define clk_gcc_blsp1_qup5_i2c_apps_clk 0xacae5604
+#define clk_gcc_blsp1_qup5_spi_apps_clk 0xbf3e15d7
+#define clk_gcc_blsp1_qup6_i2c_apps_clk 0x5c6ad820
+#define clk_gcc_blsp1_qup6_spi_apps_clk 0x780d9f85
+#define clk_gcc_blsp1_uart1_apps_clk	0xc7c62f90
+#define clk_gcc_blsp1_uart2_apps_clk	0xf8a61c96
+#define clk_gcc_blsp1_uart3_apps_clk	0xc3298bd7
+#define clk_gcc_blsp1_uart4_apps_clk	0x26be16c0
+#define clk_gcc_blsp1_uart5_apps_clk	0x28a6bc74
+#define clk_gcc_blsp1_uart6_apps_clk	0x28fd3466
+#define clk_gcc_blsp2_ahb_clk		0x8f283c1d
+#define clk_gcc_blsp2_qup1_i2c_apps_clk 0x9ace11dd
+#define clk_gcc_blsp2_qup1_spi_apps_clk 0xa32604cc
+#define clk_gcc_blsp2_qup2_i2c_apps_clk 0x1bf9a57e
+#define clk_gcc_blsp2_qup2_spi_apps_clk 0xbf54ca6d
+#define clk_gcc_blsp2_qup3_i2c_apps_clk 0x336d4170
+#define clk_gcc_blsp2_qup3_spi_apps_clk 0xc68509d6
+#define clk_gcc_blsp2_qup4_i2c_apps_clk 0xbd22539d
+#define clk_gcc_blsp2_qup4_spi_apps_clk 0x01a72b93
+#define clk_gcc_blsp2_qup5_i2c_apps_clk 0xe2b2ce1d
+#define clk_gcc_blsp2_qup5_spi_apps_clk 0xf40999cd
+#define clk_gcc_blsp2_qup6_i2c_apps_clk 0x894bcea4
+#define clk_gcc_blsp2_qup6_spi_apps_clk 0xfe1bd34a
+#define clk_gcc_blsp2_uart1_apps_clk	0x8c3512ff
+#define clk_gcc_blsp2_uart2_apps_clk	0x1e1965a3
+#define clk_gcc_blsp2_uart3_apps_clk	0x382415ab
+#define clk_gcc_blsp2_uart4_apps_clk	0x87a44b42
+#define clk_gcc_blsp2_uart5_apps_clk	0x5cd30649
+#define clk_gcc_blsp2_uart6_apps_clk	0x8feee5ab
+#define clk_gcc_boot_rom_ahb_clk	0xde2adeb1
+#define clk_gcc_gp1_clk			0x057f7b69
+#define clk_gcc_gp2_clk			0x9bf83ffd
+#define clk_gcc_gp3_clk			0xec6539ee
+#define clk_gcc_hmss_rbcpr_clk		0x699183be
+#define clk_gcc_mmss_noc_cfg_ahb_clk	0xb41a9d99
+#define clk_gcc_pcie_0_aux_clk		0x3d2e3ece
+#define clk_gcc_pcie_0_cfg_ahb_clk	0x4dd325c3
+#define clk_gcc_pcie_0_mstr_axi_clk	0x3f85285b
+#define clk_gcc_pcie_0_slv_axi_clk	0xd69638a1
+#define clk_gcc_pcie_0_pipe_clk		0x4f37621e
+#define clk_gcc_pcie_0_phy_reset	0xdc3201c1
+#define clk_gcc_pcie_1_aux_clk		0xc9bb962c
+#define clk_gcc_pcie_1_cfg_ahb_clk	0xb6338658
+#define clk_gcc_pcie_1_mstr_axi_clk	0xc20f6269
+#define clk_gcc_pcie_1_slv_axi_clk	0xd54e40d6
+#define clk_gcc_pcie_1_pipe_clk		0xc1627422
+#define clk_gcc_pcie_1_phy_reset	0x674481bb
+#define clk_gcc_pcie_2_aux_clk		0xa4dc7ae8
+#define clk_gcc_pcie_2_cfg_ahb_clk	0x4f1d3121
+#define clk_gcc_pcie_2_mstr_axi_clk	0x9e81724a
+#define clk_gcc_pcie_2_slv_axi_clk	0x7990d8b2
+#define clk_gcc_pcie_2_pipe_clk		0xa757a834
+#define clk_gcc_pcie_2_phy_reset	0x82634880
+#define clk_gcc_pcie_phy_reset		0x9bc3c959
+#define clk_gcc_pcie_phy_com_reset	0x8bf513e6
+#define clk_gcc_pcie_phy_nocsr_com_phy_reset	0x0c16a2da
+#define clk_gcc_pcie_phy_aux_clk	0x4746e74f
+#define clk_gcc_pcie_phy_cfg_ahb_clk	0x8533671a
+#define clk_gcc_pdm2_clk		0x99d55711
+#define clk_gcc_pdm_ahb_clk		0x365664f6
+#define clk_gcc_prng_ahb_clk		0x397e7eaa
+#define clk_gcc_sdcc1_ahb_clk		0x691e0caa
+#define clk_gcc_sdcc1_apps_clk		0x9ad6fb96
+#define clk_gcc_sdcc2_ahb_clk		0x23d5727f
+#define clk_gcc_sdcc2_apps_clk		0x861b20ac
+#define clk_gcc_sdcc3_ahb_clk		0x565b2c03
+#define clk_gcc_sdcc3_apps_clk		0x0b27aeac
+#define clk_gcc_sdcc4_ahb_clk		0x64f3e6a8
+#define clk_gcc_sdcc4_apps_clk		0xbf7c4dc8
+#define clk_gcc_tsif_ahb_clk		0x88d2822c
+#define clk_gcc_tsif_ref_clk		0x8f1ed2c2
+#define clk_gcc_ufs_ahb_clk		0x1914bb84
+#define clk_gcc_ufs_axi_clk		0x47c743a7
+#define clk_gcc_ufs_ice_core_clk	0x310b0710
+#define clk_gcc_ufs_rx_cfg_clk		0xa6747786
+#define clk_gcc_ufs_rx_symbol_0_clk	0x7f43251c
+#define clk_gcc_ufs_rx_symbol_1_clk	0x03182fde
+#define clk_gcc_ufs_tx_cfg_clk		0xba2cf8b5
+#define clk_gcc_ufs_tx_symbol_0_clk	0x6a9f747a
+#define clk_gcc_ufs_unipro_core_clk	0x2daf7fd2
+#define clk_gcc_ufs_sys_clk_core_clk	0x360e5ac8
+#define clk_gcc_ufs_tx_symbol_clk_core_clk	0xf6fb0df7
+#define clk_gcc_usb20_master_clk	0x24c3b66a
+#define clk_gcc_usb20_mock_utmi_clk	0xe8db8203
+#define clk_gcc_usb20_sleep_clk		0x6e8cb4b2
+#define clk_gcc_usb30_master_clk	0xb3b4e2cb
+#define clk_gcc_usb30_mock_utmi_clk	0xa800b65a
+#define clk_gcc_usb30_sleep_clk		0xd0b65c92
+#define clk_gcc_usb3_phy_aux_clk	0x0d9a36e0
+#define clk_gcc_usb3_phy_pipe_clk	0xf279aff2
+#define clk_gcc_usb_phy_cfg_ahb2phy_clk	0xd1231a0e
+#define clk_gcc_aggre0_cnoc_ahb_clk	0x53a35559
+#define clk_gcc_aggre0_snoc_axi_clk	0x3c446400
+#define clk_gcc_aggre0_noc_qosgen_extref_clk	0x8c4356ba
+#define clk_hlos1_vote_lpass_core_smmu_clk	0x3aaa1743
+#define clk_hlos1_vote_lpass_adsp_smmu_clk	0xc76f702f
+#define clk_gcc_usb3_phy_reset		0x03d559f1
+#define clk_gcc_usb3phy_phy_reset	0xb1a4f885
+#define clk_gcc_usb3_clkref_clk		0xb6cc8f01
+#define clk_gcc_hdmi_clkref_clk		0x4d4eec04
+#define clk_gcc_edp_clkref_clk		0xa8685c3f
+#define clk_gcc_ufs_clkref_clk		0x92aa126f
+#define clk_gcc_pcie_clkref_clk		0xa2e247fa
+#define clk_gcc_rx2_usb2_clkref_clk	0x27ec24ba
+#define clk_gcc_rx1_usb2_clkref_clk	0x53351d25
+#define clk_gcc_smmu_aggre0_ahb_clk	0x47a06ce4
+#define clk_gcc_smmu_aggre0_axi_clk	0x3cac4a6c
+#define clk_gcc_sys_noc_usb3_axi_clk	0x94d26800
+#define clk_gcc_sys_noc_ufs_axi_clk	0x19d38312
+#define clk_gcc_aggre2_usb3_axi_clk	0xd5822a8e
+#define clk_gcc_aggre2_ufs_axi_clk	0xb31e5191
+#define clk_gcc_mmss_gpll0_div_clk	0xdd06848d
+#define clk_gcc_mmss_bimc_gfx_clk	0xe4f28754
+#define clk_gcc_bimc_gfx_clk		0x3edd69ad
+#define clk_gcc_qspi_ahb_clk		0x96969dc8
+#define clk_gcc_qspi_ser_clk		0xfaf1e266
+#define clk_qspi_ser_clk_src		0x426676ee
+#define clk_sdcc1_ice_core_clk_src	0xfd6a4301
+#define clk_gcc_sdcc1_ice_core_clk	0x0fd5680a
+#define clk_gcc_mss_cfg_ahb_clk		0x111cde81
+#define clk_gcc_mss_snoc_axi_clk	0x0e71de85
+#define clk_gcc_mss_q6_bimc_axi_clk	0x67544d62
+#define clk_gcc_mss_mnoc_bimc_axi_clk	0xf665d03f
+#define clk_gpll0_out_msscc		0x7d794829
+#define clk_gcc_debug_mux_v2		0xf7e749f0
+#define clk_gcc_dcc_ahb_clk		0xfa14a88c
+#define clk_gcc_aggre0_noc_mpu_cfg_ahb_clk	0x5c1bb8e2
+
+/* clock_mmss controlled clocks */
+#define clk_mmsscc_xo			0x05e63704
+#define clk_mmsscc_gpll0		0xe900c515
+#define clk_mmsscc_gpll0_div		0x73892e05
+#define clk_mmsscc_mmssnoc_ahb		0x7b4bd6f7
+#define clk_mmpll0			0xdd83b751
+#define clk_mmpll0_out_main		0x2f996a31
+#define clk_mmpll1			0x6da7fb90
+#define clk_mmpll1_out_main		0xa0d3a7da
+#define clk_mmpll4			0x22c063c1
+#define clk_mmpll4_out_main		0xfb21c2fd
+#define clk_mmpll3			0x18c76899
+#define clk_mmpll3_out_main		0x6eb6328f
+#define clk_ahb_clk_src			0x86f49203
+#define clk_mmpll2			0x1190e4d8
+#define clk_mmpll2_out_main		0x1e9e24a8
+#define clk_mmpll8			0xd06ad45e
+#define clk_mmpll8_out_main		0x75b1f386
+#define clk_mmpll9			0x1c50684c
+#define clk_mmpll9_out_main		0x16b74937
+#define clk_mmpll5			0xa41e1936
+#define clk_mmpll5_out_main		0xcc1897bf
+#define clk_csi0_clk_src		0x227e65bc
+#define clk_vfe0_clk_src		0xa0c2bd8f
+#define clk_vfe1_clk_src		0x4e357366
+#define clk_csi1_clk_src		0x6a2a6c36
+#define clk_csi2_clk_src		0x4113589f
+#define clk_csi3_clk_src		0xfd934012
+#define clk_maxi_clk_src		0x52c09777
+#define clk_cpp_clk_src			0x8382f56d
+#define clk_jpeg0_clk_src		0x9a0a0ac3
+#define clk_jpeg2_clk_src		0x5ad927f3
+#define clk_jpeg_dma_clk_src		0xb68afcea
+#define clk_mdp_clk_src			0x6dc1f8f1
+#define clk_video_core_clk_src		0x8be4c944
+#define clk_fd_core_clk_src		0xe4799ab7
+#define clk_cci_clk_src			0x822f3d97
+#define clk_csiphy0_3p_clk_src		0xd2474b12
+#define clk_csiphy1_3p_clk_src		0x46a02aff
+#define clk_csiphy2_3p_clk_src		0x1447813f
+#define clk_camss_gp0_clk_src		0x6b57cfe6
+#define clk_camss_gp1_clk_src		0xf735368a
+#define clk_jpeg_dma_clk_src		0xb68afcea
+#define clk_mclk0_clk_src		0x266b3853
+#define clk_mclk1_clk_src		0xa73cad0c
+#define clk_mclk2_clk_src		0x42545468
+#define clk_mclk3_clk_src		0x2bfbb714
+#define clk_csi0phytimer_clk_src	0xc8a309be
+#define clk_csi1phytimer_clk_src	0x7c0fe23a
+#define clk_csi2phytimer_clk_src	0x62ffea9c
+#define clk_rbbmtimer_clk_src		0x17649ecc
+#define clk_esc0_clk_src		0xb41d7c38
+#define clk_esc1_clk_src		0x3b0afa42
+#define clk_hdmi_clk_src		0xb40aeea9
+#define clk_vsync_clk_src		0xecb43940
+#define clk_rbcpr_clk_src		0x2c2e9af2
+#define clk_video_subcore0_clk_src	0x88d79636
+#define clk_video_subcore1_clk_src	0x4966930c
+#define clk_mmss_bto_ahb_clk		0xfdf8c361
+#define clk_camss_ahb_clk		0xc4ff91d4
+#define clk_camss_cci_ahb_clk		0x04c4441a
+#define clk_camss_cci_clk		0xd6cb5eb9
+#define clk_camss_cpp_ahb_clk		0x12e9a87b
+#define clk_camss_cpp_clk		0xb82f366b
+#define clk_camss_cpp_axi_clk		0x5598c804
+#define clk_camss_cpp_vbif_ahb_clk	0xb5f31be4
+#define clk_camss_csi0_ahb_clk		0x6e29c972
+#define clk_camss_csi0_clk		0x30862ddb
+#define clk_camss_csi0phy_clk		0x2cecfb84
+#define clk_camss_csi0pix_clk		0x6946f77b
+#define clk_camss_csi0rdi_clk		0x83645ef5
+#define clk_camss_csi1_ahb_clk		0xccc15f06
+#define clk_camss_csi1_clk		0xb150f052
+#define clk_camss_csi1phy_clk		0xb989f06d
+#define clk_camss_csi1pix_clk		0x58d19bf3
+#define clk_camss_csi1rdi_clk		0x4d2f3352
+#define clk_camss_csi2_ahb_clk		0x92d02d75
+#define clk_camss_csi2_clk		0x74fc92e8
+#define clk_camss_csi2phy_clk		0xda05d9d8
+#define clk_camss_csi2pix_clk		0xf8ed0731
+#define clk_camss_csi2rdi_clk		0xdc1b2081
+#define clk_camss_csi3_ahb_clk		0xee5e459c
+#define clk_camss_csi3_clk		0x39488fdd
+#define clk_camss_csi3phy_clk		0x8b6063b9
+#define clk_camss_csi3pix_clk		0xd82bd467
+#define clk_camss_csi3rdi_clk		0xb6750046
+#define clk_camss_csi_vfe0_clk		0x3023937a
+#define clk_camss_csi_vfe1_clk		0xe66fa522
+#define clk_camss_csiphy0_3p_clk	0xf2a54f5a
+#define clk_camss_csiphy1_3p_clk	0x8bf70cb2
+#define clk_camss_csiphy2_3p_clk	0x1c14c939
+#define clk_camss_gp0_clk		0xcee7e51d
+#define clk_camss_gp1_clk		0x41f1c2e3
+#define clk_camss_ispif_ahb_clk		0x9a212c6d
+#define clk_camss_jpeg0_clk		0x0b0e2db7
+#define clk_camss_jpeg2_clk		0xd7291c8d
+#define clk_camss_jpeg_ahb_clk		0x1f47fd28
+#define clk_camss_jpeg_axi_clk		0x9e5545c8
+#define clk_camss_jpeg_dma_clk		0x2336e65d
+#define clk_camss_mclk0_clk		0xcf0c61e0
+#define clk_camss_mclk1_clk		0xd1410ed4
+#define clk_camss_mclk2_clk		0x851286f2
+#define clk_camss_mclk3_clk		0x4db11c45
+#define clk_camss_micro_ahb_clk		0x33a23277
+#define clk_camss_csi0phytimer_clk	0xff93b3c8
+#define clk_camss_csi1phytimer_clk	0x6c399ab6
+#define clk_camss_csi2phytimer_clk	0x24f47f49
+#define clk_camss_top_ahb_clk		0x8f8b2d33
+#define clk_camss_vfe_ahb_clk		0x595197bc
+#define clk_camss_vfe_axi_clk		0x273d4c31
+#define clk_camss_vfe0_ahb_clk		0x4652833c
+#define clk_camss_vfe0_clk		0x1e9bb8c4
+#define clk_camss_vfe0_stream_clk	0x22835fa4
+#define clk_camss_vfe1_ahb_clk		0x6a56abd3
+#define clk_camss_vfe1_clk		0x5bffa69b
+#define clk_camss_vfe1_stream_clk	0x92f849b9
+#define clk_fd_ahb_clk			0x868a2c5c
+#define clk_fd_core_clk			0x3badcae4
+#define clk_fd_core_uar_clk		0x7e624e15
+#define clk_gpu_ahb_clk			0xf97f1d43
+#define clk_gpu_aon_isense_clk		0xa9e9b297
+#define clk_gpu_gx_gfx3d_clk		0xb7ece823
+#define clk_gpu_mx_clk			0xb80ccedf
+#define clk_gpu_gx_rbbmtimer_clk	0xdeba634e
+#define clk_mdss_ahb_clk		0x684ccb41
+#define clk_mdss_axi_clk		0xcc07d687
+#define clk_mdss_esc0_clk		0x28cafbe6
+#define clk_mdss_esc1_clk		0xc22c6883
+#define clk_mdss_hdmi_ahb_clk		0x01cef516
+#define clk_mdss_hdmi_clk		0x097a6de9
+#define clk_mdss_mdp_clk		0x618336ac
+#define clk_mdss_vsync_clk		0x42a022d3
+#define clk_mmss_misc_ahb_clk		0xea30b0e7
+#define clk_mmss_misc_cxo_clk		0xe620cd80
+#define clk_mmagic_bimc_noc_cfg_ahb_clk 0x12d5ba72
+#define clk_mmagic_camss_axi_clk	0xa8b1c16b
+#define clk_mmagic_camss_noc_cfg_ahb_clk 0x5182c819
+#define clk_mmss_mmagic_cfg_ahb_clk	0x5e94a822
+#define clk_mmagic_mdss_axi_clk		0xa0359d10
+#define clk_mmagic_mdss_noc_cfg_ahb_clk 0x9c6d5482
+#define clk_mmagic_video_axi_clk	0x7b9219c3
+#define clk_mmagic_video_noc_cfg_ahb_clk 0x5124d256
+#define clk_mmss_mmagic_ahb_clk		0x3d15f2b0
+#define clk_mmss_mmagic_maxi_clk	0xbdaf5af7
+#define clk_mmss_rbcpr_ahb_clk		0x623ba55f
+#define clk_mmss_rbcpr_clk		0x69a23a6f
+#define clk_mmss_spdm_cpp_clk		0xefe35cd2
+#define clk_mmss_spdm_jpeg_dma_clk	0xcb7bd5a0
+#define clk_smmu_cpp_ahb_clk		0x3ad82d84
+#define clk_smmu_cpp_axi_clk		0xa6bb2f4a
+#define clk_smmu_jpeg_ahb_clk		0x10c436ec
+#define clk_smmu_jpeg_axi_clk		0x41112f37
+#define clk_smmu_mdp_ahb_clk		0x04994cb2
+#define clk_smmu_mdp_axi_clk		0x7fd71687
+#define clk_smmu_rot_ahb_clk		0xa30772c9
+#define clk_smmu_rot_axi_clk		0xfed7c078
+#define clk_smmu_vfe_ahb_clk		0x4dabebe7
+#define clk_smmu_vfe_axi_clk		0xde483725
+#define clk_smmu_video_ahb_clk		0x2d738e2c
+#define clk_smmu_video_axi_clk		0xe2b5b887
+#define clk_video_ahb_clk		0x90775cfb
+#define clk_video_axi_clk		0xe6c16dba
+#define clk_video_core_clk		0x7e876ec3
+#define clk_video_maxi_clk		0x97749db6
+#define clk_video_subcore0_clk		0xb6f63e6c
+#define clk_video_subcore1_clk		0x26c29cb4
+#define clk_vmem_ahb_clk		0xab6223ff
+#define clk_vmem_maxi_clk		0x15ef32db
+#define clk_mmss_debug_mux		0xe646ffda
+#define clk_mmss_gcc_dbg_clk		0xafa4d48a
+#define clk_gfx3d_clk_src		0x917f76ef
+#define clk_extpclk_clk_src		0xb2c31abd
+#define clk_mdss_byte0_clk		0xf5a03f64
+#define clk_mdss_byte1_clk		0xb8c7067d
+#define clk_mdss_extpclk_clk		0xfa5aadb0
+#define clk_mdss_pclk0_clk		0x3487234a
+#define clk_mdss_pclk1_clk		0xd5804246
+#define clk_gpu_gcc_dbg_clk		0x0ccc42cd
+#define clk_mdss_mdp_vote_clk		0x588460a4
+#define clk_mdss_rotator_vote_clk	0x5b1f675e
+#define clk_mmpll2_postdiv_clk		0x4fdeaaba
+#define clk_mmpll8_postdiv_clk		0xedf57882
+#define clk_mmpll9_postdiv_clk		0x3064b618
+#define clk_gfx3d_clk_src_v2		0x4210acb7
+#define clk_byte0_clk_src		0x75cc885b
+#define clk_byte1_clk_src		0x63c2c955
+#define clk_pclk0_clk_src		0xccac1f35
+#define clk_pclk1_clk_src		0x090f68ac
+#define clk_ext_byte0_clk_src		0xfb32f31e
+#define clk_ext_byte1_clk_src		0x585ef6d4
+#define clk_ext_pclk0_clk_src		0x087c1612
+#define clk_ext_pclk1_clk_src		0x8067c5a3
+
+/* clock_debug controlled clocks */
+#define clk_gcc_debug_mux		0x8121ac15
+
+/* external multimedia clocks */
+#define clk_dsi0pll_pixel_clk_mux	0x792379e1
+#define clk_dsi0pll_byte_clk_mux	0x60e83f06
+#define clk_dsi0pll_byte_clk_src	0xbbaa30be
+#define clk_dsi0pll_pixel_clk_src	0x45b3260f
+#define clk_dsi0pll_n2_div_clk		0x1474c213
+#define clk_dsi0pll_post_n1_div_clk	0xdab8c389
+#define clk_dsi0pll_vco_clk		0x15940d40
+#define clk_dsi1pll_pixel_clk_mux	0x36458019
+#define clk_dsi1pll_byte_clk_mux	0xb5a42b7b
+#define clk_dsi1pll_byte_clk_src	0x63930a8f
+#define clk_dsi1pll_pixel_clk_src	0x0e4c9b56
+#define clk_dsi1pll_n2_div_clk		0x2c9d4007
+#define clk_dsi1pll_post_n1_div_clk	0x03020041
+#define clk_dsi1pll_vco_clk		0x99797b50
+#define clk_mdss_dsi1_vco_clk_src	0xfcd15658
+#define clk_hdmi_vco_clk		0x66003284
+
+#define clk_dsi0pll_shadow_byte_clk_src	0x177c029c
+#define clk_dsi0pll_shadow_pixel_clk_src	0x98ae3c92
+#define clk_dsi0pll_shadow_n2_div_clk	0xd5f0dad9
+#define clk_dsi0pll_shadow_post_n1_div_clk	0x1f7c8cf8
+#define clk_dsi0pll_shadow_vco_clk	0xb100ca83
+#define clk_dsi1pll_shadow_byte_clk_src	0xfc021ce5
+#define clk_dsi1pll_shadow_pixel_clk_src	0xdcca3ffc
+#define clk_dsi1pll_shadow_n2_div_clk		0x189541bf
+#define clk_dsi1pll_shadow_post_n1_div_clk	0x1637020e
+#define clk_dsi1pll_shadow_vco_clk		0x68d8b6f7
+
+/* CPU clocks */
+#define clk_pwrcl_clk 0xc554130e
+#define clk_pwrcl_pll 0x25454ca1
+#define clk_pwrcl_alt_pll 0xc445471b
+#define clk_pwrcl_pll_main 0x28948e22
+#define clk_pwrcl_alt_pll_main 0x25c8270e
+#define clk_pwrcl_hf_mux 0x77706ae6
+#define clk_pwrcl_lf_mux 0xd99e334d
+#define clk_perfcl_clk 0x58869997
+#define clk_perfcl_pll 0x97dcec1c
+#define clk_perfcl_alt_pll 0xfe2eaea1
+#define clk_perfcl_pll_main 0x0dbf0c0b
+#define clk_perfcl_alt_pll_main 0x0b892aab
+#define clk_perfcl_hf_mux 0x9e8bbe59
+#define clk_perfcl_lf_mux 0x2f9c278d
+#define clk_cbf_pll 0xfe2e96a3
+#define clk_cbf_pll_main 0x2b05cf95
+#define clk_cbf_hf_mux 0x71244f73
+#define clk_cbf_clk 0x48e9e16b
+#define clk_xo_ao 0x428c856d
+#define clk_sys_apcsaux_clk 0x0b0dd513
+#define clk_cpu_debug_mux 0xc7acaa31
+
+/* GCC block resets */
+#define QUSB2PHY_PRIM_BCR		0
+#define QUSB2PHY_SEC_BCR		1
+#define BLSP1_BCR			2
+#define BLSP2_BCR			3
+#define BOOT_ROM_BCR			4
+#define PRNG_BCR			5
+#define UFS_BCR				6
+#define USB_20_BCR			7
+#define USB_30_BCR			8
+#define USB3_PHY_BCR			9
+#define USB3PHY_PHY_BCR			10
+#define PCIE_0_PHY_BCR			11
+#define PCIE_1_PHY_BCR			12
+#define PCIE_2_PHY_BCR			13
+#define PCIE_PHY_BCR			14
+#define PCIE_PHY_COM_BCR		15
+#define PCIE_PHY_NOCSR_COM_PHY_BCR	16
+
+/* MMSS Block resets */
+#define VIDEO_BCR			0
+#define MDSS_BCR			1
+#define CAMSS_MICRO_BCR			2
+#define CAMSS_JPEG_BCR			3
+#define CAMSS_VFE0_BCR			4
+#define CAMSS_VFE1_BCR			5
+#define FD_BCR				6
+#define GPU_GX_BCR			7
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/metag/boot/dts/include/dt-bindings/clock/msm-clocks-8998.h	2019-01-22 16:16:28.171288678 +0100
@@ -0,0 +1,534 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CLOCKS_8998_H
+#define __MSM_CLOCKS_8998_H
+
+#include "audio-ext-clk.h"
+
+/* clock_rpm controlled clocks */
+#define clk_ce1_clk				0x42229c55
+#define clk_ce1_a_clk				0x44a833fe
+#define clk_cxo_clk_src				0x79e95308
+#define clk_bimc_clk				0x4b80bf00
+#define clk_bimc_a_clk				0x4b25668a
+#define clk_cnoc_clk				0xd5ccb7f4
+#define clk_cnoc_a_clk				0xd8fe2ccc
+#define clk_snoc_clk				0x2c341aa0
+#define clk_snoc_a_clk				0x8fcef2af
+#define clk_cnoc_periph_clk			0xb11e9cf9
+#define clk_cnoc_periph_a_clk			0x1d7faa2e
+#define clk_cnoc_periph_keepalive_a_clk		0x7287aef2
+#define clk_ln_bb_clk1				0xb867b147
+#define clk_ln_bb_clk1_ao			0x7f63a93a
+#define clk_ln_bb_clk1_pin			0x6fc5653c
+#define clk_ln_bb_clk1_pin_ao			0x25d625bf
+#define clk_ln_bb_clk2				0xf83e6387
+#define clk_ln_bb_clk2_ao			0x96f09628
+#define clk_ln_bb_clk2_pin			0xa9ebe8d5
+#define clk_ln_bb_clk2_pin_ao			0x89a1226f
+#define clk_ln_bb_clk3				0x4f52a39e
+#define clk_ln_bb_clk3_ao			0xb15eba76
+#define clk_ln_bb_clk3_pin			0xc4de7dad
+#define clk_ln_bb_clk3_pin_ao			0xc01022e8
+#define clk_bimc_msmbus_clk			0xd212feea
+#define clk_bimc_msmbus_a_clk			0x71d1a499
+#define clk_cnoc_msmbus_clk			0x62228b5d
+#define clk_cnoc_msmbus_a_clk			0x67442955
+#define clk_cxo_clk_src_ao			0x64eb6004
+#define clk_cxo_dwc3_clk			0xf79c19f6
+#define clk_cxo_lpm_clk				0x94adbf3d
+#define clk_cxo_otg_clk				0x4eec0bb9
+#define clk_cxo_pil_lpass_clk			0xe17f0ff6
+#define clk_cxo_pil_ssc_clk			0x81832015
+#define clk_cxo_pil_spss_clk			0x5cd71a61
+#define clk_div_clk1				0xaa1157a6
+#define clk_div_clk1_ao				0x6b943d68
+#define clk_div_clk2				0xd454019f
+#define clk_div_clk2_ao				0x53f9e788
+#define clk_div_clk3				0xa9a55a68
+#define clk_div_clk3_ao				0x3d6725a8
+#define clk_ipa_clk				0xfa685cda
+#define clk_ipa_a_clk				0xeeec2919
+#define clk_mcd_ce1_clk				0xbb615d26
+#define clk_mmssnoc_axi_clk			0xdb4b31e6
+#define clk_mmssnoc_axi_a_clk			0xd4970614
+#define clk_qcedev_ce1_clk			0x293f97b0
+#define clk_qcrypto_ce1_clk			0xa6ac14df
+#define clk_qdss_clk				0x1492202a
+#define clk_qdss_a_clk				0xdd121669
+#define clk_qseecom_ce1_clk			0xaa858373
+#define clk_rf_clk1				0xaabeea5a
+#define clk_rf_clk1_ao				0x72a10cb8
+#define clk_rf_clk1_pin				0x8f463562
+#define clk_rf_clk1_pin_ao			0x62549ff6
+#define clk_rf_clk2				0x24a30992
+#define clk_rf_clk2_ao				0x944d8bbd
+#define clk_rf_clk2_pin				0xa7c5602a
+#define clk_rf_clk2_pin_ao			0x2d75eb4d
+#define clk_rf_clk3				0xb673936b
+#define clk_rf_clk3_ao				0x038bb968
+#define clk_rf_clk3_pin				0x726f53f5
+#define clk_rf_clk3_pin_ao			0x76f9240f
+#define clk_scm_ce1_clk				0xd8ebcc62
+#define clk_snoc_msmbus_clk			0xe6900bb6
+#define clk_snoc_msmbus_a_clk			0x5d4683bd
+#define clk_gcc_ce1_ahb_m_clk			0x2eb28c01
+#define clk_gcc_ce1_axi_m_clk			0xc174dfba
+#define clk_aggre1_noc_clk			0x049abba8
+#define clk_aggre1_noc_a_clk			0xc12e4220
+#define clk_aggre2_noc_clk			0xaa681404
+#define clk_aggre2_noc_a_clk			0xcab67089
+#define clk_measure_only_bimc_hmss_axi_clk	0xc1cc4f11
+
+/* clock_gcc controlled clocks*/
+#define clk_debug_mmss_clk			0x977c99b6
+#define clk_debug_rpm_clk			0x8e2b07ca
+#define clk_debug_cpu_clk			0x0e696b2b
+#define clk_gpu_gcc_debug_clk			0x3eb88190
+#define clk_gfx_gcc_debug_clk			0xa3a64fec
+#define clk_gpll0				0x1ebe3bc4
+#define clk_gpll0_out_main			0xe9374de7
+#define clk_gpll0_ao				0xa1368304
+#define clk_gcc_mmss_gpll0_clk			0x8050f008
+#define clk_gcc_mmss_gpll0_div_clk		0xdd06848d
+#define clk_gcc_gpu_gpll0_clk			0xdad7a7a4
+#define clk_gcc_gpu_gpll0_div_clk		0x07d16c6a
+#define clk_gpll4				0xb3b5d85b
+#define clk_gpll4_out_main			0xa9a0ab9d
+#define clk_usb30_master_clk_src		0xc6262f89
+#define clk_pcie_aux_clk_src			0xebc50566
+#define clk_ufs_axi_clk_src			0x297ca380
+#define clk_blsp1_qup1_i2c_apps_clk_src		0x17f78f5e
+#define clk_blsp1_qup1_spi_apps_clk_src		0xf534c4fa
+#define clk_blsp1_qup2_i2c_apps_clk_src		0x8de71c79
+#define clk_blsp1_qup2_spi_apps_clk_src		0x33cf809a
+#define clk_blsp1_qup3_i2c_apps_clk_src		0xf161b902
+#define clk_blsp1_qup3_spi_apps_clk_src		0x5e95683f
+#define clk_blsp1_qup4_i2c_apps_clk_src		0xb2ecce68
+#define clk_blsp1_qup4_spi_apps_clk_src		0xddb5bbdb
+#define clk_blsp1_qup5_i2c_apps_clk_src		0x71ea7804
+#define clk_blsp1_qup5_spi_apps_clk_src		0x9752f35f
+#define clk_blsp1_qup6_i2c_apps_clk_src		0x28806803
+#define clk_blsp1_qup6_spi_apps_clk_src		0x44a1edc4
+#define clk_blsp1_uart1_apps_clk_src		0xf8146114
+#define clk_blsp1_uart2_apps_clk_src		0xfc9c2f73
+#define clk_blsp1_uart3_apps_clk_src		0x600497f2
+#define clk_blsp2_qup1_i2c_apps_clk_src		0xd6d1e95d
+#define clk_blsp2_qup1_spi_apps_clk_src		0xcc1b8365
+#define clk_blsp2_qup2_i2c_apps_clk_src		0x603b5c51
+#define clk_blsp2_qup2_spi_apps_clk_src		0xd577dc44
+#define clk_blsp2_qup3_i2c_apps_clk_src		0xea82959c
+#define clk_blsp2_qup3_spi_apps_clk_src		0xd04b1e92
+#define clk_blsp2_qup4_i2c_apps_clk_src		0x73dc968c
+#define clk_blsp2_qup4_spi_apps_clk_src		0x25d4a2b1
+#define clk_blsp2_qup5_i2c_apps_clk_src		0xcc3698bd
+#define clk_blsp2_qup5_spi_apps_clk_src		0xfa0cf45e
+#define clk_blsp2_qup6_i2c_apps_clk_src		0x2fa53151
+#define clk_blsp2_qup6_spi_apps_clk_src		0x5ca86755
+#define clk_blsp2_uart1_apps_clk_src		0x562c66dc
+#define clk_blsp2_uart2_apps_clk_src		0xdd448080
+#define clk_blsp2_uart3_apps_clk_src		0x46b2e90f
+#define clk_gp1_clk_src				0xad85b97a
+#define clk_gp2_clk_src				0xfb1f0065
+#define clk_gp3_clk_src				0x63b693d6
+#define clk_hmss_rbcpr_clk_src			0xedd9a474
+#define clk_pdm2_clk_src			0x31e494fd
+#define clk_sdcc2_apps_clk_src			0xfc46c821
+#define clk_sdcc4_apps_clk_src			0x7aaaaa0c
+#define clk_tsif_ref_clk_src			0x4e9042d1
+#define clk_ufs_ice_core_clk_src		0xda8e7119
+#define clk_ufs_phy_aux_clk_src			0xc6bca085
+#define clk_ufs_unipro_core_clk_src		0x179e80a9
+#define clk_usb30_mock_utmi_clk_src		0xa024a976
+#define clk_usb3_phy_aux_clk_src		0x15eec63c
+#define clk_qspi_ref_clk_src			0xfe6b8e11
+#define clk_gcc_pcie_phy_0_reset		0x6bb4df33
+#define clk_gcc_usb3_phy_reset			0x03d559f1
+#define clk_gcc_usb3phy_phy_reset		0xb1a4f885
+#define clk_gcc_aggre1_ufs_axi_clk		0x873459d8
+#define clk_gcc_aggre1_ufs_axi_hw_ctl_clk	0x117a6f39
+#define clk_gcc_aggre1_usb3_axi_clk		0xc5c3fbe8
+#define clk_gcc_bimc_mss_q6_axi_clk		0x7437988f
+#define clk_gcc_blsp1_ahb_clk			0x8caa5b4f
+#define clk_gcc_blsp1_qup1_i2c_apps_clk		0xc303fae9
+#define clk_gcc_blsp1_qup1_spi_apps_clk		0x759a76b0
+#define clk_gcc_blsp1_qup2_i2c_apps_clk		0x1076f220
+#define clk_gcc_blsp1_qup2_spi_apps_clk		0x3e77d48f
+#define clk_gcc_blsp1_qup3_i2c_apps_clk		0x9e25ac82
+#define clk_gcc_blsp1_qup3_spi_apps_clk		0xfb978880
+#define clk_gcc_blsp1_qup4_i2c_apps_clk		0xd7f40f6f
+#define clk_gcc_blsp1_qup4_spi_apps_clk		0x80f8722f
+#define clk_gcc_blsp1_qup5_i2c_apps_clk		0xacae5604
+#define clk_gcc_blsp1_qup5_spi_apps_clk		0xbf3e15d7
+#define clk_gcc_blsp1_qup6_i2c_apps_clk		0x5c6ad820
+#define clk_gcc_blsp1_qup6_spi_apps_clk		0x780d9f85
+#define clk_gcc_blsp1_uart1_apps_clk		0xc7c62f90
+#define clk_gcc_blsp1_uart2_apps_clk		0xf8a61c96
+#define clk_gcc_blsp1_uart3_apps_clk		0xc3298bd7
+#define clk_gcc_blsp2_ahb_clk			0x8f283c1d
+#define clk_gcc_blsp2_qup1_i2c_apps_clk		0x9ace11dd
+#define clk_gcc_blsp2_qup1_spi_apps_clk		0xa32604cc
+#define clk_gcc_blsp2_qup2_i2c_apps_clk		0x1bf9a57e
+#define clk_gcc_blsp2_qup2_spi_apps_clk		0xbf54ca6d
+#define clk_gcc_blsp2_qup3_i2c_apps_clk		0x336d4170
+#define clk_gcc_blsp2_qup3_spi_apps_clk		0xc68509d6
+#define clk_gcc_blsp2_qup4_i2c_apps_clk		0xbd22539d
+#define clk_gcc_blsp2_qup4_spi_apps_clk		0x01a72b93
+#define clk_gcc_blsp2_qup5_i2c_apps_clk		0xe2b2ce1d
+#define clk_gcc_blsp2_qup5_spi_apps_clk		0xf40999cd
+#define clk_gcc_blsp2_qup6_i2c_apps_clk		0x894bcea4
+#define clk_gcc_blsp2_qup6_spi_apps_clk		0xfe1bd34a
+#define clk_gcc_blsp2_uart1_apps_clk		0x8c3512ff
+#define clk_gcc_blsp2_uart2_apps_clk		0x1e1965a3
+#define clk_gcc_blsp2_uart3_apps_clk		0x382415ab
+#define clk_gcc_boot_rom_ahb_clk		0xde2adeb1
+#define clk_gcc_bimc_gfx_clk			0x3edd69ad
+#define clk_gcc_cfg_noc_usb3_axi_clk		0x9ea4c2d9
+#define clk_gcc_gp1_clk				0x057f7b69
+#define clk_gcc_gp2_clk				0x9bf83ffd
+#define clk_gcc_gp3_clk				0xec6539ee
+#define clk_gcc_gpu_bimc_gfx_clk		0x3909459b
+#define clk_gcc_gpu_cfg_ahb_clk			0x72f20a57
+#define clk_gcc_gpu_iref_clk			0xfd82abad
+#define clk_gcc_hmss_dvm_bus_clk		0x17cc8b53
+#define clk_gcc_hmss_rbcpr_clk			0x699183be
+#define clk_hmss_gpll0_clk_src			0x17eb05d0
+#define clk_hmss_gpll4_clk_src			0x20456cae
+#define clk_gcc_mmss_sys_noc_axi_clk		0x4467b15b
+#define clk_gcc_mss_at_clk			0x1692c5aa
+#define clk_nav_gcc_dbg_clk			0x2221c544
+#define clk_gcc_pcie_0_aux_clk			0x3d2e3ece
+#define clk_gcc_pcie_0_cfg_ahb_clk		0x4dd325c3
+#define clk_gcc_pcie_0_mstr_axi_clk		0x3f85285b
+#define clk_gcc_pcie_0_pipe_clk			0x4f37621e
+#define clk_gcc_pcie_0_slv_axi_clk		0xd69638a1
+#define clk_gcc_pcie_phy_aux_clk		0x4746e74f
+#define clk_gcc_pdm2_clk			0x99d55711
+#define clk_gcc_pdm_ahb_clk			0x365664f6
+#define clk_gcc_prng_ahb_clk			0x397e7eaa
+#define clk_gcc_sdcc2_ahb_clk			0x23d5727f
+#define clk_gcc_sdcc2_apps_clk			0x861b20ac
+#define clk_gcc_sdcc4_ahb_clk			0x64f3e6a8
+#define clk_gcc_sdcc4_apps_clk			0xbf7c4dc8
+#define clk_gcc_tsif_ahb_clk			0x88d2822c
+#define clk_gcc_tsif_ref_clk			0x8f1ed2c2
+#define clk_gcc_ufs_ahb_clk			0x1914bb84
+#define clk_gcc_ufs_axi_clk			0x47c743a7
+#define clk_gcc_ufs_axi_hw_ctl_clk		0x69385b45
+#define clk_gcc_ufs_ice_core_clk		0x310b0710
+#define clk_gcc_ufs_ice_core_hw_ctl_clk		0x84e15a5b
+#define clk_gcc_ufs_phy_aux_clk			0x17acc8fb
+#define clk_gcc_ufs_phy_aux_hw_ctl_clk		0x7dbdb2e2
+#define clk_gcc_ufs_rx_symbol_0_clk		0x7f43251c
+#define clk_gcc_ufs_rx_symbol_1_clk		0x03182fde
+#define clk_gcc_ufs_tx_symbol_0_clk		0x6a9f747a
+#define clk_ufs_tx_symbol_0_clk			0xb3fcd0f7
+#define clk_ufs_rx_symbol_0_clk			0x17a0f1cd
+#define clk_gcc_ufs_unipro_core_clk		0x2daf7fd2
+#define clk_gcc_ufs_unipro_core_hw_ctl_clk	0x4a4e0f3d
+#define clk_gcc_usb30_master_clk		0xb3b4e2cb
+#define clk_gcc_usb30_mock_utmi_clk		0xa800b65a
+#define clk_gcc_usb30_sleep_clk			0xd0b65c92
+#define clk_gcc_usb3_phy_aux_clk		0x0d9a36e0
+#define clk_gcc_usb3_phy_pipe_clk		0xf279aff2
+#define clk_gcc_usb3_clkref_clk			0xb6cc8f00
+#define clk_gcc_hdmi_clkref_clk			0x4d4eec04
+#define clk_gcc_edp_clkref_clk			0xa8685c3f
+#define clk_gcc_ufs_clkref_clk			0x92aa126f
+#define clk_gcc_pcie_clkref_clk			0xa2e247fa
+#define clk_gcc_rx2_qlink_clkref_clk		0xd0ba986d
+#define clk_gcc_rx1_usb2_clkref_clk		0x53351d25
+#define clk_gcc_pcie_phy_reset			0x9bc3c959
+#define clk_gcc_pcie_phy_com_reset		0x8bf513e6
+#define clk_gcc_pcie_phy_nocsr_com_phy_reset	0x0c16a2da
+#define clk_gcc_qusb2phy_prim_reset		0x07550fa1
+#define clk_gcc_qusb2phy_sec_reset		0x3f3a87d0
+#define clk_gcc_mmss_noc_cfg_ahb_clk		0xb41a9d99
+#define clk_gcc_dcc_ahb_clk			0xfa14a88c
+#define clk_hlos1_vote_lpass_core_smmu_clk	0x3aaa1743
+#define clk_hlos1_vote_lpass_adsp_smmu_clk	0xc76f702f
+#define clk_gcc_mss_cfg_ahb_clk			0x111cde81
+#define clk_gcc_mss_q6_bimc_axi_clk		0x67544d62
+#define clk_gcc_mss_mnoc_bimc_axi_clk		0xf665d03f
+#define clk_gpll0_out_msscc			0x7d794829
+#define clk_gcc_mss_snoc_axi_clk		0x0e71de85
+#define clk_gcc_qspi_ref_clk			0x766a0f7c
+#define clk_gcc_qspi_ahb_clk			0x96969dc8
+#define clk_gcc_debug_mux			0x8121ac15
+
+/* clock_mmss controlled clocks */
+#define clk_mmsscc_xo				0x05e63704
+#define clk_mmsscc_gpll0			0xe900c515
+#define clk_mmsscc_gpll0_div			0x73892e05
+#define clk_mmpll0_pll				0x361e3cfd
+#define clk_mmpll1_pll				0x198e426b
+#define clk_mmpll3_pll				0x18c76899
+#define clk_mmpll4_pll				0x22c063c1
+#define clk_mmpll5_pll				0xa41e1936
+#define clk_mmpll6_pll				0xc56fb440
+#define clk_mmpll7_pll				0x3ac216af
+#define clk_mmpll10_pll				0x2561263b
+#define clk_mmpll0_pll_out			0x1e9e24a8
+#define clk_mmpll1_pll_out			0x5fa32257
+#define clk_mmpll3_pll_out			0x6eb6328f
+#define clk_mmpll4_pll_out			0xfb21c2fd
+#define clk_mmpll5_pll_out			0xcc1897bf
+#define clk_mmpll6_pll_out			0xfb1060bd
+#define clk_mmpll7_pll_out			0x767758ed
+#define clk_mmpll10_pll_out			0x3c5668f3
+#define clk_ahb_clk_src				0x86f49203
+#define clk_csi0_clk_src			0x227e65bc
+#define clk_vfe0_clk_src			0xa0c2bd8f
+#define clk_vfe1_clk_src			0x4e357366
+#define clk_mdp_clk_src				0x6dc1f8f1
+#define clk_maxi_clk_src			0x52c09777
+#define clk_cpp_clk_src				0x8382f56d
+#define clk_jpeg0_clk_src			0x9a0a0ac3
+#define clk_rot_clk_src				0xce49b56c
+#define clk_video_core_clk_src			0x8be4c944
+#define clk_csi1_clk_src			0x6a2a6c36
+#define clk_csi2_clk_src			0x4113589f
+#define clk_csi3_clk_src			0xfd934012
+#define clk_fd_core_clk_src			0xe4799ab7
+#define clk_ext_dp_phy_pll_vco			0x441b576b
+#define clk_ext_dp_phy_pll_link			0xea12644c
+#define clk_dp_link_clk_src			0x370d0626
+#define clk_dp_crypto_clk_src			0xf8faa811
+#define clk_dp_pixel_clk_src			0xf5dfbabf
+#define clk_ext_extpclk_clk_src			0xe5b273af
+#define clk_ext_pclk0_clk_src			0x087c1612
+#define clk_ext_pclk1_clk_src			0x8067c5a3
+#define clk_pclk0_clk_src			0xccac1f35
+#define clk_pclk1_clk_src			0x090f68ac
+#define clk_video_subcore0_clk_src		0x88d79636
+#define clk_video_subcore1_clk_src		0x4966930c
+#define clk_cci_clk_src				0x822f3d97
+#define clk_camss_gp0_clk_src			0x43b063e9
+#define clk_camss_gp1_clk_src			0xa3315f1b
+#define clk_mclk0_clk_src			0x266b3853
+#define clk_mclk1_clk_src			0xa73cad0c
+#define clk_mclk2_clk_src			0x42545468
+#define clk_mclk3_clk_src			0x2bfbb714
+#define clk_csiphy_clk_src			0x8cceb70a
+#define clk_csi0phytimer_clk_src		0xc8a309be
+#define clk_csi1phytimer_clk_src		0x7c0fe23a
+#define clk_csi2phytimer_clk_src		0x62ffea9c
+#define clk_ext_byte0_clk_src			0xfb32f31e
+#define clk_ext_byte1_clk_src			0x585ef6d4
+#define clk_byte0_clk_src			0x75cc885b
+#define clk_byte1_clk_src			0x63c2c955
+#define clk_dp_aux_clk_src			0x2b6e972b
+#define clk_dp_gtc_clk_src			0xc5a86a42
+#define clk_esc0_clk_src			0xb41d7c38
+#define clk_esc1_clk_src			0x3b0afa42
+#define clk_extpclk_clk_src			0xb2c31abd
+#define clk_hdmi_clk_src			0xb40aeea9
+#define clk_vsync_clk_src			0xecb43940
+#define clk_mmss_bimc_smmu_ahb_clk		0x4825baf4
+#define clk_mmss_bimc_smmu_axi_clk		0xc365ac39
+#define clk_mmss_snoc_dvm_axi_clk		0x2c159a11
+#define clk_mmss_camss_ahb_clk			0xa51f2c1d
+#define clk_mmss_camss_cci_ahb_clk		0xfda8bb6a
+#define clk_mmss_camss_cci_clk			0x71bb5c97
+#define clk_mmss_camss_cpp_ahb_clk		0xd5554f15
+#define clk_mmss_camss_cpp_clk			0x8e99ef57
+#define clk_mmss_camss_cpp_axi_clk		0xd84e390b
+#define clk_mmss_camss_cpp_vbif_ahb_clk		0x1b33a88e
+#define clk_mmss_camss_cphy_csid0_clk		0x56114361
+#define clk_mmss_camss_csi0_ahb_clk		0x2b58d241
+#define clk_mmss_camss_csi0_clk			0xccfe39ef
+#define clk_mmss_camss_csi0pix_clk		0x9e26509d
+#define clk_mmss_camss_csi0rdi_clk		0x01d5bf83
+#define clk_mmss_camss_cphy_csid1_clk		0x79fbcd8a
+#define clk_mmss_camss_csi1_ahb_clk		0x7073244b
+#define clk_mmss_camss_csi1_clk			0x3eeeaac0
+#define clk_mmss_camss_csi1pix_clk		0xf1375139
+#define clk_mmss_camss_csi1rdi_clk		0x43185024
+#define clk_mmss_camss_cphy_csid2_clk		0xf295e3ef
+#define clk_mmss_camss_csi2_ahb_clk		0x681c1479
+#define clk_mmss_camss_csi2_clk			0x94524569
+#define clk_mmss_camss_csi2pix_clk		0xf4de617d
+#define clk_mmss_camss_csi2rdi_clk		0x4bf01dc5
+#define clk_mmss_camss_cphy_csid3_clk		0x100188e9
+#define clk_mmss_camss_csi3_ahb_clk		0xfae7c29b
+#define clk_mmss_camss_csi3_clk			0x55e4bbae
+#define clk_mmss_camss_csi3pix_clk		0xc166a015
+#define clk_mmss_camss_csi3rdi_clk		0x6983a4cd
+#define clk_mmss_camss_csi_vfe0_clk		0x3b30b798
+#define clk_mmss_camss_csi_vfe1_clk		0xfe729af7
+#define clk_mmss_camss_csiphy0_clk		0x96c81af8
+#define clk_mmss_camss_csiphy1_clk		0xee9ac2bb
+#define clk_mmss_camss_csiphy2_clk		0x3365e70e
+#define clk_mmss_fd_ahb_clk			0x4ff1da4d
+#define clk_mmss_fd_core_clk			0x749e7eb0
+#define clk_mmss_fd_core_uar_clk		0x8ea480c5
+#define clk_mmss_camss_gp0_clk			0x3f7f6c87
+#define clk_mmss_camss_gp1_clk			0xdccdd730
+#define clk_mmss_camss_ispif_ahb_clk		0xbda4f0e3
+#define clk_mmss_camss_jpeg0_clk		0x4cc73b07
+#define clk_mmss_camss_jpeg0_vote_clk		0xc9efa6ac
+#define clk_mmss_camss_jpeg0_dma_vote_clk	0x371ec109
+#define clk_mmss_camss_jpeg_ahb_clk		0xde1fece3
+#define clk_mmss_camss_jpeg_axi_clk		0x7534616b
+#define clk_mmss_camss_mclk0_clk		0x056293a7
+#define clk_mmss_camss_mclk1_clk		0x96c7b69b
+#define clk_mmss_camss_mclk2_clk		0x8820556e
+#define clk_mmss_camss_mclk3_clk		0xf90ffb67
+#define clk_mmss_camss_micro_ahb_clk		0x6c6fd3c7
+#define clk_mmss_camss_csi0phytimer_clk		0x7a78864e
+#define clk_mmss_camss_csi1phytimer_clk		0x6e6c1de5
+#define clk_mmss_camss_csi2phytimer_clk		0x0235e2de
+#define clk_mmss_camss_top_ahb_clk		0x120618d6
+#define clk_mmss_camss_vfe0_ahb_clk		0x137bd0bd
+#define clk_mmss_camss_vfe0_clk			0xead28288
+#define clk_mmss_camss_vfe0_stream_clk		0xa0428287
+#define clk_mmss_camss_vfe1_ahb_clk		0xac0154c0
+#define clk_mmss_camss_vfe1_clk			0xc216b14d
+#define clk_mmss_camss_vfe1_stream_clk		0x745af3b6
+#define clk_mmss_camss_vfe_vbif_ahb_clk		0x0109a9c6
+#define clk_mmss_camss_vfe_vbif_axi_clk		0xe626d8a1
+#define clk_mmss_mdss_ahb_clk			0x85d37ab5
+#define clk_mmss_mdss_axi_clk			0xdf04fc1d
+#define clk_mmss_mdss_byte0_clk			0x38105d25
+#define clk_mmss_mdss_byte0_intf_clk		0x38e5aa79
+#define clk_mmss_mdss_byte0_intf_div_clk	0x8604f181
+#define clk_mmss_mdss_byte1_clk			0xe0c21354
+#define clk_mmss_mdss_byte1_intf_clk		0xcf654d8e
+#define clk_mmss_mdss_byte1_intf_div_clk	0xcdf334c5
+#define clk_mmss_mdss_dp_aux_clk		0x23125eb6
+#define clk_mmss_mdss_dp_crypto_clk		0x9a072d4e
+#define clk_mmss_mdss_dp_link_clk		0x8dd302d1
+#define clk_mmss_mdss_dp_link_intf_clk		0x70e386e6
+#define clk_mmss_mdss_dp_pixel_clk		0xb707b765
+#define clk_mmss_mdss_dp_gtc_clk		0xb59c151a
+#define clk_mmss_mdss_esc0_clk			0x5721ff83
+#define clk_mmss_mdss_esc1_clk			0xc3d0376b
+#define clk_mmss_mdss_extpclk_clk		0x74d5a954
+#define clk_mmss_mdss_hdmi_clk			0x28460a6d
+#define clk_mmss_mdss_hdmi_dp_ahb_clk		0x5448519f
+#define clk_mmss_mdss_mdp_clk			0x43539b0e
+#define clk_mmss_mdss_mdp_lut_clk		0x00627b2b
+#define clk_mmss_mdss_pclk0_clk			0xcc0e909d
+#define clk_mmss_mdss_pclk1_clk			0x850d9146
+#define clk_mmss_mdss_rot_clk			0xbb7e71c4
+#define clk_mmss_mdss_vsync_clk			0x629b36dc
+#define clk_mmss_misc_ahb_clk			0xea30b0e7
+#define clk_mmss_misc_cxo_clk			0xe620cd80
+#define clk_mmss_mnoc_ahb_clk			0x49a394f4
+#define clk_mmss_mnoc_maxi_clk			0xd8b7278f
+#define clk_mmss_video_subcore0_clk		0x23fae359
+#define clk_mmss_video_subcore1_clk		0x5213a0c7
+#define clk_mmss_video_ahb_clk			0x94334ae9
+#define clk_mmss_video_axi_clk			0xf3178ba5
+#define clk_mmss_video_core_clk			0x78f14c85
+#define clk_mmss_video_maxi_clk			0x1785ef88
+#define clk_mmss_vmem_ahb_clk			0x4b18955b
+#define clk_mmss_vmem_maxi_clk			0xb6067889
+#define clk_mmss_debug_mux			0xe646ffda
+
+/* external multimedia clocks */
+#define clk_dsi0pll_byteclk_mux			0xecf2c434
+#define clk_dsi0pll_byteclk_src			0x6f6f740f
+#define clk_dsi0pll_pclk_mux			0x6c9da335
+#define clk_dsi0pll_pclk_src			0x5efd85d4
+#define clk_dsi0pll_pclk_src_mux		0x84b14663
+#define clk_dsi0pll_post_bit_div		0xf46dcf27
+#define clk_dsi0pll_pll_out_div1		0xeda5b7fe
+#define clk_dsi0pll_pll_out_div2		0x97fa476d
+#define clk_dsi0pll_pll_out_div4		0x90a98ce0
+#define clk_dsi0pll_pll_out_div8		0x9d9d85cf
+#define clk_dsi0pll_pll_out_mux			0x179c27ca
+#define clk_dsi0pll_post_vco_mux		0xfaf9bd1f
+#define clk_dsi0pll_post_vco_div1		0xabb50b2a
+#define clk_dsi0pll_post_vco_div4		0xbe51c091
+#define clk_dsi0pll_bitclk_src			0x36c3c437
+#define clk_dsi0pll_vco_clk			0x15940d40
+
+#define clk_dsi1pll_byteclk_mux			0x14e2f38f
+#define clk_dsi1pll_byteclk_src			0x4b65c298
+#define clk_dsi1pll_pclk_mux			0x4c0518b5
+#define clk_dsi1pll_pclk_src			0xeddcd80e
+#define clk_dsi1pll_pclk_src_mux		0x3651feb3
+#define clk_dsi1pll_post_bit_div		0x712f0260
+#define clk_dsi1pll_pll_out_div8		0x87628ddb
+#define clk_dsi1pll_pll_out_div4		0x0d9a384b
+#define clk_dsi1pll_pll_out_div2		0x0c9b5748
+#define clk_dsi1pll_pll_out_div1		0x3193164e
+#define clk_dsi1pll_pll_out_mux			0x171bf8fd
+#define clk_dsi1pll_post_vco_mux		0xc6a90d20
+#define clk_dsi1pll_post_vco_div1		0x6f47ca7d
+#define clk_dsi1pll_post_vco_div4		0x90628974
+#define clk_dsi1pll_bitclk_src			0x13ab045b
+#define clk_dsi1pll_vco_clk			0x99797b50
+
+#define clk_dp_vco_clk				0xfcaaeec7
+#define clk_dp_link_2x_clk_divsel_five		0xcfe3f5dd
+#define clk_vco_divsel_four_clk_src		0xe0da19c0
+#define clk_vco_divsel_two_clk_src		0xb5cfc6a8
+#define clk_vco_divided_clk_src_mux		0x3f8197c2
+#define clk_hdmi_vco_clk			0xbb7dc20d
+
+/* clock_gpu controlled clocks*/
+#define clk_gpucc_xo				0xc4e1a890
+#define clk_gpucc_gpll0				0x0db0e37f
+#define clk_gfx3d_clk_src			0x917f76ef
+#define clk_rbbmtimer_clk_src			0x17649ecc
+#define clk_gfx3d_isense_clk_src		0xecc3eafa
+#define clk_rbcpr_clk_src			0x2c2e9af2
+#define clk_gpu_debug_div_clk			0x75d6f53f
+#define clk_gpucc_gfx3d_clk			0x95f01bd5
+#define clk_gpucc_rbbmtimer_clk			0x58a0a7ca
+#define clk_gpucc_gfx3d_isense_clk		0xb2678e80
+#define clk_gpucc_cxo_clk			0x6532dcae
+#define clk_gpucc_rbcpr_clk			0x7bd750e8
+#define clk_gpu_pll0_pll			0x0e61ab4d
+#define clk_gpu_pll0_pll_out_even		0xb0ed5009
+#define clk_gpu_pll0_pll_out_odd		0x08c5a8a5
+#define clk_gpu_pll0_postdiv_clk		0x76c19f3c
+#define clk_gpucc_mx_clk			0x1edbb879
+#define clk_gpucc_gcc_dbg_clk			0x9ae8cd3c
+#define clk_gfxcc_dbg_clk			0x3ed47625
+
+/* CPU clocks */
+#define clk_pwrcl_clk				0xc554130e
+#define clk_perfcl_clk				0x58869997
+#define clk_sys_apcsaux_clk_gcc			0xf905e862
+#define clk_xo_ao				0x428c856d
+#define clk_osm_clk_src				0xaabe68c3
+#define clk_cpu_debug_mux			0x3ae8bcb2
+
+/* Audio External Clocks */
+#define clk_audio_ap_clk			0x9b5727cb
+#define clk_audio_pmi_clk			0xcbfe416d
+#define clk_audio_ap_clk2			0x454d1e91
+
+/* GCC block resets */
+#define QUSB2PHY_PRIM_BCR			0
+#define QUSB2PHY_SEC_BCR			1
+#define BLSP1_BCR				2
+#define BLSP2_BCR				3
+#define BOOT_ROM_BCR				4
+#define PRNG_BCR				5
+#define UFS_BCR					6
+#define USB_30_BCR				7
+#define USB3_PHY_BCR				8
+#define USB3PHY_PHY_BCR				9
+#define PCIE_0_PHY_BCR				10
+#define PCIE_PHY_BCR				11
+#define PCIE_PHY_COM_BCR			12
+#define PCIE_PHY_NOCSR_COM_PHY_BCR		13
+
+/* MMSS block resets */
+#define CAMSS_MICRO_BCR				0
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/metag/boot/dts/include/dt-bindings/clock/msm-clocks-hwio-8998.h	2019-01-22 16:16:28.171288678 +0100
@@ -0,0 +1,395 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define CLKFLAG_NO_RATE_CACHE					0x00004000
+
+#define FMAX_LOWER						0
+#define FMAX_LOW						1
+#define FMAX_NOM						2
+#define FMAX_TURBO						3
+
+#define HALT_CHECK_DELAY					5
+
+#define RPM_MISC_CLK_TYPE					0x306b6c63
+#define RPM_BUS_CLK_TYPE					0x316b6c63
+#define RPM_MEM_CLK_TYPE					0x326b6c63
+#define RPM_IPA_CLK_TYPE					0x617069
+#define RPM_CE_CLK_TYPE						0x6563
+#define RPM_AGGR_CLK_TYPE					0x72676761
+#define RPM_SMD_KEY_ENABLE					0x62616E45
+#define RPM_MMAXI_CLK_TYPE					0x69786d6d
+
+#define CXO_CLK_SRC_ID						0x0
+#define QDSS_CLK_ID						0x1
+#define SNOC_CLK_ID						0x1
+#define CNOC_CLK_ID						0x2
+#define CNOC_PERIPH_CLK_ID					0x0
+#define BIMC_CLK_ID						0x0
+#define IPA_CLK_ID						0x0
+#define CE1_CLK_ID						0x0
+#define RF_CLK1_ID						0x4
+#define RF_CLK2_ID						0x5
+#define RF_CLK3_ID						0x6
+#define LN_BB_CLK1_ID						0x1
+#define LN_BB_CLK2_ID						0x2
+#define LN_BB_CLK3_ID						0x3
+#define DIV_CLK1_ID						0xb
+#define DIV_CLK2_ID						0xc
+#define DIV_CLK3_ID						0xd
+#define RF_CLK1_PIN_ID						0x4
+#define RF_CLK2_PIN_ID						0x5
+#define RF_CLK3_PIN_ID						0x6
+#define LN_BB_CLK1_PIN_ID					0x1
+#define LN_BB_CLK2_PIN_ID					0x2
+#define LN_BB_CLK3_PIN_ID					0x3
+#define AGGR1_NOC_ID						0x1
+#define AGGR2_NOC_ID						0x2
+#define MMSSNOC_AXI_CLK_ID					0x0
+
+#define APCS_COMMON_LMH_CMD_RCGR				0x0012C
+
+#define GCC_APCS_GPLL_ENA_VOTE					0x52000
+#define GCC_APCS_CLOCK_BRANCH_ENA_VOTE				0x52004
+#define GCC_APCS_CLOCK_BRANCH_ENA_VOTE_1			0x5200C
+#define PLLTEST_PAD_CFG						0x6200C
+#define GCC_XO_DIV4_CBCR				        0x43008
+#define CLOCK_FRQ_MEASURE_CTL				        0x62004
+#define CLOCK_FRQ_MEASURE_STATUS			        0x62008
+#define GCC_GPLL0_MODE						0x00000
+#define GCC_GPLL4_MODE						0x77000
+#define GCC_USB30_MASTER_CMD_RCGR				0x0F014
+#define GCC_PCIE_AUX_CMD_RCGR					0x6C000
+#define GCC_UFS_AXI_CMD_RCGR					0x75018
+#define GCC_BLSP1_QUP1_I2C_APPS_CMD_RCGR			0x19020
+#define GCC_BLSP1_QUP1_SPI_APPS_CMD_RCGR			0x1900C
+#define GCC_BLSP1_QUP2_I2C_APPS_CMD_RCGR			0x1B020
+#define GCC_BLSP1_QUP2_SPI_APPS_CMD_RCGR			0x1B00C
+#define GCC_BLSP1_QUP3_I2C_APPS_CMD_RCGR			0x1D020
+#define GCC_BLSP1_QUP3_SPI_APPS_CMD_RCGR			0x1D00C
+#define GCC_BLSP1_QUP4_I2C_APPS_CMD_RCGR			0x1F020
+#define GCC_BLSP1_QUP4_SPI_APPS_CMD_RCGR			0x1F00C
+#define GCC_BLSP1_QUP5_I2C_APPS_CMD_RCGR			0x21020
+#define GCC_BLSP1_QUP5_SPI_APPS_CMD_RCGR			0x2100C
+#define GCC_BLSP1_QUP6_I2C_APPS_CMD_RCGR			0x23020
+#define GCC_BLSP1_QUP6_SPI_APPS_CMD_RCGR			0x2300C
+#define GCC_BLSP1_UART1_APPS_CMD_RCGR				0x1A00C
+#define GCC_BLSP1_UART2_APPS_CMD_RCGR				0x1C00C
+#define GCC_BLSP1_UART3_APPS_CMD_RCGR				0x1E00C
+#define GCC_BLSP2_QUP1_I2C_APPS_CMD_RCGR			0x26020
+#define GCC_BLSP2_QUP2_I2C_APPS_CMD_RCGR			0x28020
+#define GCC_BLSP2_QUP3_I2C_APPS_CMD_RCGR			0x2A020
+#define GCC_BLSP2_QUP4_I2C_APPS_CMD_RCGR			0x2C020
+#define GCC_BLSP2_QUP5_I2C_APPS_CMD_RCGR			0x2E020
+#define GCC_BLSP2_QUP6_I2C_APPS_CMD_RCGR			0x30020
+#define GCC_BLSP2_QUP1_SPI_APPS_CMD_RCGR			0x2600C
+#define GCC_BLSP2_QUP2_SPI_APPS_CMD_RCGR			0x2800C
+#define GCC_BLSP2_QUP3_SPI_APPS_CMD_RCGR			0x2A00C
+#define GCC_BLSP2_QUP4_SPI_APPS_CMD_RCGR			0x2C00C
+#define GCC_BLSP2_QUP5_SPI_APPS_CMD_RCGR			0x2E00C
+#define GCC_BLSP2_QUP6_SPI_APPS_CMD_RCGR			0x3000C
+#define GCC_BLSP2_UART1_APPS_CMD_RCGR				0x2700C
+#define GCC_BLSP2_UART2_APPS_CMD_RCGR				0x2900C
+#define GCC_BLSP2_UART3_APPS_CMD_RCGR				0x2B00C
+#define GCC_GP1_CMD_RCGR					0x64004
+#define GCC_GP2_CMD_RCGR					0x65004
+#define GCC_GP3_CMD_RCGR					0x66004
+#define GCC_HMSS_RBCPR_CMD_RCGR					0x48044
+#define GCC_PDM2_CMD_RCGR					0x33010
+#define GCC_SDCC2_APPS_CMD_RCGR					0x14010
+#define GCC_SDCC4_APPS_CMD_RCGR					0x16010
+#define GCC_TSIF_REF_CMD_RCGR					0x36010
+#define GCC_UFS_ICE_CORE_CMD_RCGR				0x76010
+#define GCC_UFS_PHY_AUX_CMD_RCGR				0x76044
+#define GCC_UFS_UNIPRO_CORE_CMD_RCGR				0x76028
+#define GCC_USB30_MOCK_UTMI_CMD_RCGR				0x0F028
+#define GCC_USB3_PHY_AUX_CMD_RCGR				0x5000C
+#define GCC_QSPI_REF_CMD_RCGR					0x9000C
+#define GCC_PCIE_0_PHY_BCR					0x6C01C
+#define GCC_HDMI_CLKREF_EN					0x88000
+#define GCC_UFS_CLKREF_EN					0x88004
+#define GCC_USB3_CLKREF_EN					0x88008
+#define GCC_PCIE_CLKREF_EN					0x8800C
+#define GCC_RX1_USB2_CLKREF_EN					0x88014
+#define GCC_USB3_PHY_BCR					0x50020
+#define GCC_AGGRE1_NOC_XO_CBCR					0x8202C
+#define GCC_AGGRE1_UFS_AXI_CBCR					0x82028
+#define GCC_AGGRE1_USB3_AXI_CBCR				0x82024
+#define GCC_BIMC_MSS_Q6_AXI_CBCR				0x4401C
+#define GCC_BLSP1_AHB_CBCR					0x17004
+#define GCC_BLSP1_BCR						0x17000
+#define GCC_BLSP1_QUP1_SPI_APPS_CBCR				0x19004
+#define GCC_BLSP1_QUP1_I2C_APPS_CBCR				0x19008
+#define GCC_BLSP1_QUP2_SPI_APPS_CBCR				0x1B004
+#define GCC_BLSP1_QUP2_I2C_APPS_CBCR				0x1B008
+#define GCC_BLSP1_QUP3_SPI_APPS_CBCR				0x1D004
+#define GCC_BLSP1_QUP3_I2C_APPS_CBCR				0x1D008
+#define GCC_BLSP1_QUP4_SPI_APPS_CBCR				0x1F004
+#define GCC_BLSP1_QUP4_I2C_APPS_CBCR				0x1F008
+#define GCC_BLSP1_QUP5_SPI_APPS_CBCR				0x21004
+#define GCC_BLSP1_QUP5_I2C_APPS_CBCR				0x21008
+#define GCC_BLSP1_QUP6_SPI_APPS_CBCR				0x23004
+#define GCC_BLSP1_QUP6_I2C_APPS_CBCR				0x23008
+#define GCC_BLSP1_UART1_APPS_CBCR				0x1A004
+#define GCC_BLSP1_UART2_APPS_CBCR				0x1C004
+#define GCC_BLSP1_UART3_APPS_CBCR				0x1E004
+#define GCC_BLSP2_AHB_CBCR					0x25004
+#define GCC_BLSP2_BCR						0x25000
+#define GCC_BLSP2_QUP1_SPI_APPS_CBCR				0x26004
+#define GCC_BLSP2_QUP1_I2C_APPS_CBCR				0x26008
+#define GCC_BLSP2_QUP2_I2C_APPS_CBCR				0x28008
+#define GCC_BLSP2_QUP2_SPI_APPS_CBCR				0x28004
+#define GCC_BLSP2_QUP3_SPI_APPS_CBCR				0x2A004
+#define GCC_BLSP2_QUP3_I2C_APPS_CBCR				0x2A008
+#define GCC_BLSP2_QUP4_SPI_APPS_CBCR				0x2C004
+#define GCC_BLSP2_QUP4_I2C_APPS_CBCR				0x2C008
+#define GCC_BLSP2_QUP5_SPI_APPS_CBCR				0x2E004
+#define GCC_BLSP2_QUP5_I2C_APPS_CBCR				0x2E008
+#define GCC_BLSP2_QUP6_SPI_APPS_CBCR				0x30004
+#define GCC_BLSP2_QUP6_I2C_APPS_CBCR				0x30008
+#define GCC_BLSP2_UART1_APPS_CBCR				0x27004
+#define GCC_BLSP2_UART2_APPS_CBCR				0x29004
+#define GCC_BLSP2_UART3_APPS_CBCR				0x2B004
+#define GCC_BOOT_ROM_AHB_CBCR					0x38004
+#define GCC_BOOT_ROM_BCR					0x38000
+#define GCC_CFG_NOC_USB3_AXI_CBCR				0x05018
+#define GCC_BIMC_GFX_CBCR					0x46040
+#define GCC_GP1_CBCR						0x64000
+#define GCC_GP2_CBCR						0x65000
+#define GCC_GP3_CBCR						0x66000
+#define GCC_GPU_BIMC_GFX_CBCR					0x71010
+#define GCC_GPU_CFG_AHB_CBCR					0x71004
+#define GCC_GPU_IREF_EN						0x88010
+#define GCC_HMSS_DVM_BUS_CBCR					0x4808C
+#define GCC_HMSS_RBCPR_CBCR					0x48008
+#define GCC_MMSS_SYS_NOC_AXI_CBCR				0x09000
+#define GCC_MMSS_NOC_CFG_AHB_CBCR				0x09004
+#define GCC_PCIE_0_SLV_AXI_CBCR					0x6B008
+#define GCC_PCIE_0_MSTR_AXI_CBCR				0x6B00C
+#define GCC_PCIE_0_CFG_AHB_CBCR					0x6B010
+#define GCC_PCIE_0_AUX_CBCR					0x6B014
+#define GCC_PCIE_0_PIPE_CBCR					0x6B018
+#define GCC_PCIE_PHY_AUX_CBCR					0x6F004
+#define GCC_PCIE_PHY_BCR					0x6F000
+#define GCC_PCIE_PHY_COM_BCR					0x6F014
+#define GCC_PCIE_PHY_NOCSR_COM_PHY_BCR				0x6F00C
+#define GCC_QUSB2PHY_PRIM_BCR					0x12000
+#define GCC_QUSB2PHY_SEC_BCR					0x12004
+#define GCC_PDM2_CBCR						0x3300C
+#define GCC_PDM_AHB_CBCR					0x33004
+#define GCC_PRNG_AHB_CBCR					0x34004
+#define GCC_PRNG_BCR						0x34000
+#define GCC_SDCC2_APPS_CBCR					0x14004
+#define GCC_SDCC2_AHB_CBCR					0x14008
+#define GCC_SDCC4_APPS_CBCR					0x16004
+#define GCC_SDCC4_AHB_CBCR					0x16008
+#define GCC_TSIF_AHB_CBCR					0x36004
+#define GCC_TSIF_REF_CBCR					0x36008
+#define GCC_UFS_AXI_CBCR					0x75008
+#define GCC_UFS_BCR						0x75000
+#define GCC_UFS_AHB_CBCR					0x7500C
+#define GCC_UFS_TX_SYMBOL_0_CBCR				0x75010
+#define GCC_UFS_RX_SYMBOL_0_CBCR				0x75014
+#define GCC_UFS_RX_SYMBOL_1_CBCR				0x7605C
+#define GCC_UFS_UNIPRO_CORE_CBCR				0x76008
+#define GCC_UFS_ICE_CORE_CBCR					0x7600C
+#define GCC_UFS_PHY_AUX_CBCR					0x76040
+#define GCC_USB30_MASTER_CBCR					0x0F008
+#define GCC_USB30_SLEEP_CBCR					0x0F00C
+#define GCC_USB30_MOCK_UTMI_CBCR				0x0F010
+#define GCC_USB_30_BCR						0x0F000
+#define GCC_USB3_PHY_AUX_CBCR					0x50000
+#define GCC_USB3_PHY_PIPE_CBCR					0x50004
+#define GCC_USB3PHY_PHY_BCR					0x50024
+#define GCC_APCS_CLOCK_SLEEP_ENA_VOTE				0x52008
+#define GCC_MSS_CFG_AHB_CBCR					0x8A000
+#define GCC_MSS_Q6_BIMC_AXI_CBCR				0x8A040
+#define GCC_MSS_MNOC_BIMC_AXI_CBCR				0x8A004
+#define GCC_MSS_SNOC_AXI_CBCR					0x8A03C
+#define GCC_HMSS_GPLL0_CMD_RCGR					0x4805C
+#define GCC_MSS_CFG_AHB_CBCR					0x8A000
+#define GCC_MSS_Q6_BIMC_AXI_CBCR				0x8A040
+#define GCC_MSS_MNOC_BIMC_AXI_CBCR				0x8A004
+#define GCC_MSS_SNOC_AXI_CBCR					0x8A03C
+#define GCC_DCC_AHB_CBCR					0x84004
+#define GCC_HLOS1_VOTE_LPASS_CORE_SMMU_CBCR			0x7D010
+#define GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CBCR			0x7D014
+#define GCC_QSPI_AHB_CBCR					0x90004
+#define GCC_QSPI_REF_CBCR					0x90008
+#define GCC_MMSS_MISC						0x0902C
+#define GCC_GPU_MISC						0x71028
+
+#define GPUCC_GPU_PLL0_PLL_MODE					0x00000
+#define GPUCC_GPU_PLL0_USER_CTL_MODE				0x0000C
+#define GPUCC_GFX3D_CMD_RCGR					0x01070
+#define GPUCC_RBBMTIMER_CMD_RCGR				0x010B0
+#define GPUCC_GFX3D_ISENSE_CMD_RCGR				0x01100
+#define GPUCC_RBCPR_CMD_RCGR					0x01030
+#define GPUCC_GFX3D_CBCR					0x01098
+#define GPUCC_RBBMTIMER_CBCR					0x010D0
+#define GPUCC_GFX3D_ISENSE_CBCR					0x01124
+#define GPUCC_CXO_CBCR						0x01020
+#define GPUCC_RBCPR_CBCR					0x01054
+#define GPU_GX_BCR						0x01090
+#define GPUCC_GX_DOMAIN_MISC					0x00130
+#define GPUCC_GPU_DD_WRAP_CTRL					0x00430
+#define GPUCC_DEBUG_CLK_CTL					0x00120
+
+#define MMSS_PLL_VOTE_APCS					0x001E0
+#define MMSS_MMPLL0_PLL_MODE					0x0C000
+#define MMSS_MMPLL1_PLL_MODE					0x0C050
+#define MMSS_MMPLL3_PLL_MODE					0x00000
+#define MMSS_MMPLL4_PLL_MODE					0x00050
+#define MMSS_MMPLL5_PLL_MODE					0x000A0
+#define MMSS_MMPLL6_PLL_MODE					0x000F0
+#define MMSS_MMPLL7_PLL_MODE					0x00140
+#define MMSS_MMPLL10_PLL_MODE					0x00190
+#define MMSS_AHB_CMD_RCGR					0x05000
+#define MMSS_CSI0_CMD_RCGR					0x03090
+#define MMSS_VFE0_CMD_RCGR					0x03600
+#define MMSS_VFE1_CMD_RCGR					0x03620
+#define MMSS_MDP_CMD_RCGR					0x02040
+#define MMSS_MAXI_CMD_RCGR					0x0F020
+#define MMSS_CPP_CMD_RCGR					0x03640
+#define MMSS_JPEG0_CMD_RCGR					0x03500
+#define MMSS_ROT_CMD_RCGR					0x021A0
+#define MMSS_VIDEO_CORE_CMD_RCGR				0x01000
+#define MMSS_CSI1_CMD_RCGR					0x03100
+#define MMSS_CSI2_CMD_RCGR					0x03160
+#define MMSS_CSI3_CMD_RCGR					0x031C0
+#define MMSS_FD_CORE_CMD_RCGR					0x03B00
+#define MMSS_BYTE0_CMD_RCGR					0x02120
+#define MMSS_BYTE1_CMD_RCGR					0x02140
+#define MMSS_PCLK0_CMD_RCGR					0x02000
+#define MMSS_PCLK1_CMD_RCGR					0x02020
+#define MMSS_VIDEO_SUBCORE0_CMD_RCGR				0x01060
+#define MMSS_VIDEO_SUBCORE1_CMD_RCGR				0x01080
+#define MMSS_CSIPHY_CMD_RCGR					0x03800
+#define MMSS_CCI_CMD_RCGR					0x03300
+#define MMSS_CAMSS_GP0_CMD_RCGR					0x03420
+#define MMSS_CAMSS_GP1_CMD_RCGR					0x03450
+#define MMSS_MCLK0_CMD_RCGR					0x03360
+#define MMSS_MCLK1_CMD_RCGR					0x03390
+#define MMSS_MCLK2_CMD_RCGR					0x033C0
+#define MMSS_MCLK3_CMD_RCGR					0x033F0
+#define MMSS_CAMSS_CSI2PHYTIMER_CBCR				0x03084
+#define MMSS_CSI0PHYTIMER_CMD_RCGR				0x03000
+#define MMSS_CSI1PHYTIMER_CMD_RCGR				0x03030
+#define MMSS_CSI2PHYTIMER_CMD_RCGR				0x03060
+#define MMSS_DP_GTC_CMD_RCGR					0x02280
+#define MMSS_ESC0_CMD_RCGR					0x02160
+#define MMSS_ESC1_CMD_RCGR					0x02180
+#define MMSS_EXTPCLK_CMD_RCGR					0x02060
+#define MMSS_HDMI_CMD_RCGR					0x02100
+#define MMSS_VSYNC_CMD_RCGR					0x02080
+#define MMSS_BIMC_SMMU_AHB_CBCR					0x0E004
+#define MMSS_BIMC_SMMU_AXI_CBCR					0x0E008
+#define MMSS_SNOC_DVM_AXI_CBCR					0x0E040
+#define MMSS_CAMSS_AHB_CBCR					0x0348C
+#define MMSS_CAMSS_CCI_AHB_CBCR					0x03348
+#define MMSS_CAMSS_CCI_CBCR					0x03344
+#define MMSS_CAMSS_CPP_AHB_CBCR					0x036B4
+#define MMSS_CAMSS_CPP_CBCR					0x036B0
+#define MMSS_CAMSS_CPP_AXI_CBCR					0x036C4
+#define MMSS_CAMSS_CPP_VBIF_AHB_CBCR				0x036C8
+#define MMSS_CAMSS_CPHY_CSID0_CBCR				0x03730
+#define MMSS_CAMSS_CSI0_AHB_CBCR				0x030BC
+#define MMSS_CAMSS_CSI0_CBCR					0x030B4
+#define MMSS_CAMSS_CSI0PIX_CBCR					0x030E4
+#define MMSS_CAMSS_CSI0RDI_CBCR					0x030D4
+#define MMSS_CAMSS_CPHY_CSID1_CBCR				0x03734
+#define MMSS_CAMSS_CSI1_AHB_CBCR				0x03128
+#define MMSS_CAMSS_CSI1_CBCR					0x03124
+#define MMSS_CAMSS_CSI1PIX_CBCR					0x03154
+#define MMSS_CAMSS_CSI1RDI_CBCR					0x03144
+#define MMSS_CAMSS_CPHY_CSID2_CBCR				0x03738
+#define MMSS_CAMSS_CSI2_AHB_CBCR				0x03188
+#define MMSS_CAMSS_CSI2_CBCR					0x03184
+#define MMSS_CAMSS_CSI2PIX_CBCR					0x031B4
+#define MMSS_CAMSS_CSI2RDI_CBCR					0x031A4
+#define MMSS_CAMSS_CPHY_CSID3_CBCR				0x0373C
+#define MMSS_CAMSS_CSI3_AHB_CBCR				0x031E8
+#define MMSS_CAMSS_CSI3_CBCR					0x031E4
+#define MMSS_CAMSS_CSI3PIX_CBCR					0x03214
+#define MMSS_CAMSS_CSI3RDI_CBCR					0x03204
+#define MMSS_CAMSS_CSI_VFE0_CBCR				0x03704
+#define MMSS_CAMSS_CSI_VFE1_CBCR				0x03714
+#define MMSS_CAMSS_CSIPHY0_CBCR					0x03740
+#define MMSS_CAMSS_CSIPHY1_CBCR					0x03744
+#define MMSS_CAMSS_CSIPHY2_CBCR					0x03748
+#define MMSS_FD_AHB_CBCR					0x03B74
+#define MMSS_FD_CORE_CBCR					0x03B68
+#define MMSS_FD_CORE_UAR_CBCR					0x03B6C
+#define MMSS_CAMSS_GP0_CBCR					0x03444
+#define MMSS_CAMSS_GP1_CBCR					0x03474
+#define MMSS_CAMSS_ISPIF_AHB_CBCR				0x03224
+#define MMSS_CAMSS_JPEG0_CBCR					0x035A8
+#define MMSS_CAMSS_JPEG_AHB_CBCR				0x035B4
+#define MMSS_CAMSS_JPEG_AXI_CBCR				0x035B8
+#define MMSS_CAMSS_MCLK0_CBCR					0x03384
+#define MMSS_CAMSS_MCLK1_CBCR					0x033B4
+#define MMSS_CAMSS_MCLK2_CBCR					0x033E4
+#define MMSS_CAMSS_MCLK3_CBCR					0x03414
+#define MMSS_CAMSS_MICRO_AHB_CBCR				0x03494
+#define MMSS_CAMSS_CSI0PHYTIMER_CBCR				0x03024
+#define MMSS_CAMSS_CSI1PHYTIMER_CBCR				0x03054
+#define MMSS_CSI2PHYTIMER_CMD_RCGR				0x03060
+#define MMSS_CAMSS_TOP_AHB_CBCR					0x03484
+#define MMSS_CAMSS_VFE0_AHB_CBCR				0x03668
+#define MMSS_CAMSS_VFE0_CBCR					0x036A8
+#define MMSS_CAMSS_VFE0_STREAM_CBCR				0x03720
+#define MMSS_CAMSS_VFE1_AHB_CBCR				0x03678
+#define MMSS_CAMSS_VFE1_CBCR					0x036AC
+#define MMSS_CAMSS_VFE1_STREAM_CBCR				0x03724
+#define MMSS_CAMSS_VFE_VBIF_AHB_CBCR				0x036B8
+#define MMSS_CAMSS_VFE_VBIF_AXI_CBCR				0x036BC
+#define MMSS_MDSS_AHB_CBCR					0x02308
+#define MMSS_MDSS_AXI_CBCR					0x02310
+#define MMSS_MDSS_BYTE0_CBCR					0x0233C
+#define MMSS_MDSS_BYTE0_INTF_CBCR				0x02374
+#define MMSS_MDSS_BYTE0_INTF_DIV				0x0237C
+#define MMSS_MDSS_BYTE1_CBCR					0x02340
+#define MMSS_MDSS_BYTE1_INTF_CBCR				0x02378
+#define MMSS_MDSS_BYTE1_INTF_DIV				0x02380
+#define MMSS_MDSS_DP_AUX_CBCR					0x02364
+#define MMSS_MDSS_DP_CRYPTO_CBCR				0x0235C
+#define MMSS_MDSS_DP_GTC_CBCR					0x02368
+#define MMSS_MDSS_DP_LINK_CBCR					0x02354
+#define MMSS_MDSS_DP_LINK_INTF_CBCR				0x02358
+#define MMSS_MDSS_DP_PIXEL_CBCR					0x02360
+#define MMSS_MDSS_ESC0_CBCR					0x02344
+#define MMSS_MDSS_ESC1_CBCR					0x02348
+#define MMSS_MDSS_EXTPCLK_CBCR					0x02324
+#define MMSS_MDSS_HDMI_CBCR					0x02338
+#define MMSS_MDSS_HDMI_DP_AHB_CBCR				0x0230C
+#define MMSS_MDSS_MDP_CBCR					0x0231C
+#define MMSS_MDSS_MDP_LUT_CBCR					0x02320
+#define MMSS_MDSS_PCLK0_CBCR					0x02314
+#define MMSS_MDSS_PCLK1_CBCR					0x02318
+#define MMSS_MDSS_ROT_CBCR					0x02350
+#define MMSS_MDSS_VSYNC_CBCR					0x02328
+#define MMSS_MISC_AHB_CBCR					0x00328
+#define MMSS_MISC_CXO_CBCR					0x00324
+#define MMSS_MNOC_AHB_CBCR					0x05024
+#define MMSS_MNOC_MAXI_CBCR					0x0F004
+#define MMSS_VIDEO_SUBCORE0_CBCR				0x01048
+#define MMSS_VIDEO_SUBCORE1_CBCR				0x0104C
+#define MMSS_VIDEO_AHB_CBCR					0x01030
+#define MMSS_VIDEO_AXI_CBCR					0x01034
+#define MMSS_VIDEO_CORE_CBCR					0x01028
+#define MMSS_VIDEO_MAXI_CBCR					0x01038
+#define MMSS_VMEM_AHB_CBCR					0x0F068
+#define MMSS_VMEM_MAXI_CBCR					0x0F064
+#define MMSS_DP_AUX_CMD_RCGR					0x02260
+#define MMSS_DP_CRYPTO_CMD_RCGR					0x02220
+#define MMSS_DP_LINK_CMD_RCGR					0x02200
+#define MMSS_DP_PIXEL_CMD_RCGR					0x02240
+#define MMSS_DEBUG_CLK_CTL					0x00900
diff -Nruw linux-4.4.115-fbx/arch/metag/boot/dts/include/dt-bindings/msm./msm-bus-ids.h linux-4.4.115-fbx/arch/metag/boot/dts/include/dt-bindings/msm/msm-bus-ids.h
--- linux-4.4.115-fbx/arch/metag/boot/dts/include/dt-bindings/msm./msm-bus-ids.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/metag/boot/dts/include/dt-bindings/msm/msm-bus-ids.h	2019-01-22 16:16:28.179288750 +0100
@@ -0,0 +1,887 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_BUS_IDS_H
+#define __MSM_BUS_IDS_H
+
+/* Aggregation types */
+#define AGG_SCHEME_NONE	0
+#define AGG_SCHEME_LEG	1
+#define AGG_SCHEME_1	2
+
+/* Topology related enums */
+#define	MSM_BUS_FAB_DEFAULT 0
+#define	MSM_BUS_FAB_APPSS 0
+#define	MSM_BUS_FAB_SYSTEM 1024
+#define	MSM_BUS_FAB_MMSS 2048
+#define	MSM_BUS_FAB_SYSTEM_FPB 3072
+#define	MSM_BUS_FAB_CPSS_FPB 4096
+
+#define	MSM_BUS_FAB_BIMC 0
+#define	MSM_BUS_FAB_SYS_NOC 1024
+#define	MSM_BUS_FAB_MMSS_NOC 2048
+#define	MSM_BUS_FAB_OCMEM_NOC 3072
+#define	MSM_BUS_FAB_PERIPH_NOC 4096
+#define	MSM_BUS_FAB_CONFIG_NOC 5120
+#define	MSM_BUS_FAB_OCMEM_VNOC 6144
+#define	MSM_BUS_FAB_MMSS_AHB 2049
+#define	MSM_BUS_FAB_A0_NOC 6145
+#define	MSM_BUS_FAB_A1_NOC 6146
+#define	MSM_BUS_FAB_A2_NOC 6147
+#define	MSM_BUS_FAB_GNOC 6148
+#define	MSM_BUS_FAB_CR_VIRT 6149
+
+#define	MSM_BUS_MASTER_FIRST 1
+#define	MSM_BUS_MASTER_AMPSS_M0 1
+#define	MSM_BUS_MASTER_AMPSS_M1 2
+#define	MSM_BUS_APPSS_MASTER_FAB_MMSS 3
+#define	MSM_BUS_APPSS_MASTER_FAB_SYSTEM 4
+#define	MSM_BUS_SYSTEM_MASTER_FAB_APPSS 5
+#define	MSM_BUS_MASTER_SPS 6
+#define	MSM_BUS_MASTER_ADM_PORT0 7
+#define	MSM_BUS_MASTER_ADM_PORT1 8
+#define	MSM_BUS_SYSTEM_MASTER_ADM1_PORT0 9
+#define	MSM_BUS_MASTER_ADM1_PORT1 10
+#define	MSM_BUS_MASTER_LPASS_PROC 11
+#define	MSM_BUS_MASTER_MSS_PROCI 12
+#define	MSM_BUS_MASTER_MSS_PROCD 13
+#define	MSM_BUS_MASTER_MSS_MDM_PORT0 14
+#define	MSM_BUS_MASTER_LPASS 15
+#define	MSM_BUS_SYSTEM_MASTER_CPSS_FPB 16
+#define	MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB 17
+#define	MSM_BUS_SYSTEM_MASTER_MMSS_FPB 18
+#define	MSM_BUS_MASTER_ADM1_CI 19
+#define	MSM_BUS_MASTER_ADM0_CI 20
+#define	MSM_BUS_MASTER_MSS_MDM_PORT1 21
+#define	MSM_BUS_MASTER_MDP_PORT0 22
+#define	MSM_BUS_MASTER_MDP_PORT1 23
+#define	MSM_BUS_MMSS_MASTER_ADM1_PORT0 24
+#define	MSM_BUS_MASTER_ROTATOR 25
+#define	MSM_BUS_MASTER_GRAPHICS_3D 26
+#define	MSM_BUS_MASTER_JPEG_DEC 27
+#define	MSM_BUS_MASTER_GRAPHICS_2D_CORE0 28
+#define	MSM_BUS_MASTER_VFE 29
+#define	MSM_BUS_MASTER_VFE0 MSM_BUS_MASTER_VFE
+#define	MSM_BUS_MASTER_VPE 30
+#define	MSM_BUS_MASTER_JPEG_ENC 31
+#define	MSM_BUS_MASTER_GRAPHICS_2D_CORE1 32
+#define	MSM_BUS_MMSS_MASTER_APPS_FAB 33
+#define	MSM_BUS_MASTER_HD_CODEC_PORT0 34
+#define	MSM_BUS_MASTER_HD_CODEC_PORT1 35
+#define	MSM_BUS_MASTER_SPDM 36
+#define	MSM_BUS_MASTER_RPM 37
+#define	MSM_BUS_MASTER_MSS 38
+#define	MSM_BUS_MASTER_RIVA 39
+#define	MSM_BUS_MASTER_SNOC_VMEM 40
+#define	MSM_BUS_MASTER_MSS_SW_PROC 41
+#define	MSM_BUS_MASTER_MSS_FW_PROC 42
+#define	MSM_BUS_MASTER_HMSS 43
+#define	MSM_BUS_MASTER_GSS_NAV 44
+#define	MSM_BUS_MASTER_PCIE 45
+#define	MSM_BUS_MASTER_SATA 46
+#define	MSM_BUS_MASTER_CRYPTO 47
+#define	MSM_BUS_MASTER_VIDEO_CAP 48
+#define	MSM_BUS_MASTER_GRAPHICS_3D_PORT1 49
+#define	MSM_BUS_MASTER_VIDEO_ENC 50
+#define	MSM_BUS_MASTER_VIDEO_DEC 51
+#define	MSM_BUS_MASTER_LPASS_AHB 52
+#define	MSM_BUS_MASTER_QDSS_BAM 53
+#define	MSM_BUS_MASTER_SNOC_CFG 54
+#define	MSM_BUS_MASTER_CRYPTO_CORE0 55
+#define	MSM_BUS_MASTER_CRYPTO_CORE1 56
+#define	MSM_BUS_MASTER_MSS_NAV 57
+#define	MSM_BUS_MASTER_OCMEM_DMA 58
+#define	MSM_BUS_MASTER_WCSS 59
+#define	MSM_BUS_MASTER_QDSS_ETR 60
+#define	MSM_BUS_MASTER_USB3 61
+#define	MSM_BUS_MASTER_JPEG 62
+#define	MSM_BUS_MASTER_VIDEO_P0 63
+#define	MSM_BUS_MASTER_VIDEO_P1 64
+#define	MSM_BUS_MASTER_MSS_PROC 65
+#define	MSM_BUS_MASTER_JPEG_OCMEM 66
+#define	MSM_BUS_MASTER_MDP_OCMEM 67
+#define	MSM_BUS_MASTER_VIDEO_P0_OCMEM 68
+#define	MSM_BUS_MASTER_VIDEO_P1_OCMEM 69
+#define	MSM_BUS_MASTER_VFE_OCMEM 70
+#define	MSM_BUS_MASTER_CNOC_ONOC_CFG 71
+#define	MSM_BUS_MASTER_RPM_INST 72
+#define	MSM_BUS_MASTER_RPM_DATA 73
+#define	MSM_BUS_MASTER_RPM_SYS 74
+#define	MSM_BUS_MASTER_DEHR 75
+#define	MSM_BUS_MASTER_QDSS_DAP 76
+#define	MSM_BUS_MASTER_TIC 77
+#define	MSM_BUS_MASTER_SDCC_1 78
+#define	MSM_BUS_MASTER_SDCC_3 79
+#define	MSM_BUS_MASTER_SDCC_4 80
+#define	MSM_BUS_MASTER_SDCC_2 81
+#define	MSM_BUS_MASTER_TSIF 82
+#define	MSM_BUS_MASTER_BAM_DMA 83
+#define	MSM_BUS_MASTER_BLSP_2 84
+#define	MSM_BUS_MASTER_USB_HSIC 85
+#define	MSM_BUS_MASTER_BLSP_1 86
+#define	MSM_BUS_MASTER_USB_HS 87
+#define	MSM_BUS_MASTER_PNOC_CFG 88
+#define	MSM_BUS_MASTER_V_OCMEM_GFX3D 89
+#define	MSM_BUS_MASTER_IPA 90
+#define	MSM_BUS_MASTER_QPIC 91
+#define	MSM_BUS_MASTER_MDPE 92
+#define	MSM_BUS_MASTER_USB_HS2 93
+#define	MSM_BUS_MASTER_VPU 94
+#define	MSM_BUS_MASTER_UFS 95
+#define	MSM_BUS_MASTER_BCAST 96
+#define	MSM_BUS_MASTER_CRYPTO_CORE2 97
+#define	MSM_BUS_MASTER_EMAC 98
+#define	MSM_BUS_MASTER_VPU_1 99
+#define	MSM_BUS_MASTER_PCIE_1 100
+#define	MSM_BUS_MASTER_USB3_1 101
+#define	MSM_BUS_MASTER_CNOC_MNOC_MMSS_CFG 102
+#define	MSM_BUS_MASTER_CNOC_MNOC_CFG 103
+#define	MSM_BUS_MASTER_TCU_0 104
+#define	MSM_BUS_MASTER_TCU_1 105
+#define	MSM_BUS_MASTER_CPP 106
+#define	MSM_BUS_MASTER_AUDIO 107
+#define	MSM_BUS_MASTER_PCIE_2 108
+#define	MSM_BUS_MASTER_VFE1 109
+#define	MSM_BUS_MASTER_XM_USB_HS1 110
+#define	MSM_BUS_MASTER_PCNOC_BIMC_1 111
+#define	MSM_BUS_MASTER_BIMC_PCNOC   112
+#define	MSM_BUS_MASTER_XI_USB_HSIC  113
+#define	MSM_BUS_MASTER_SGMII	    114
+#define	MSM_BUS_SPMI_FETCHER 115
+#define	MSM_BUS_MASTER_GNOC_BIMC 116
+#define	MSM_BUS_MASTER_CRVIRT_A2NOC 117
+#define	MSM_BUS_MASTER_CNOC_A2NOC 118
+#define	MSM_BUS_MASTER_WLAN 119
+#define	MSM_BUS_MASTER_MSS_CE 120
+#define	MSM_BUS_MASTER_CDSP_PROC 121
+#define	MSM_BUS_MASTER_GNOC_SNOC 122
+#define	MSM_BUS_MASTER_PIMEM 123
+#define	MSM_BUS_MASTER_MASTER_LAST 124
+
+#define	MSM_BUS_SYSTEM_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB
+#define	MSM_BUS_CPSS_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_CPSS_FPB
+
+#define	MSM_BUS_SNOC_MM_INT_0 10000
+#define	MSM_BUS_SNOC_MM_INT_1 10001
+#define	MSM_BUS_SNOC_MM_INT_2 10002
+#define	MSM_BUS_SNOC_MM_INT_BIMC 10003
+#define	MSM_BUS_SNOC_INT_0 10004
+#define	MSM_BUS_SNOC_INT_1 10005
+#define	MSM_BUS_SNOC_INT_BIMC 10006
+#define	MSM_BUS_SNOC_BIMC_0_MAS 10007
+#define	MSM_BUS_SNOC_BIMC_1_MAS 10008
+#define	MSM_BUS_SNOC_QDSS_INT 10009
+#define	MSM_BUS_PNOC_SNOC_MAS 10010
+#define	MSM_BUS_PNOC_SNOC_SLV 10011
+#define	MSM_BUS_PNOC_INT_0 10012
+#define	MSM_BUS_PNOC_INT_1 10013
+#define	MSM_BUS_PNOC_M_0 10014
+#define	MSM_BUS_PNOC_M_1 10015
+#define	MSM_BUS_BIMC_SNOC_MAS 10016
+#define	MSM_BUS_BIMC_SNOC_SLV 10017
+#define	MSM_BUS_PNOC_SLV_0 10018
+#define	MSM_BUS_PNOC_SLV_1 10019
+#define	MSM_BUS_PNOC_SLV_2 10020
+#define	MSM_BUS_PNOC_SLV_3 10021
+#define	MSM_BUS_PNOC_SLV_4 10022
+#define	MSM_BUS_PNOC_SLV_8 10023
+#define	MSM_BUS_PNOC_SLV_9 10024
+#define	MSM_BUS_SNOC_BIMC_0_SLV 10025
+#define	MSM_BUS_SNOC_BIMC_1_SLV 10026
+#define	MSM_BUS_MNOC_BIMC_MAS 10027
+#define	MSM_BUS_MNOC_BIMC_SLV 10028
+#define	MSM_BUS_BIMC_MNOC_MAS 10029
+#define	MSM_BUS_BIMC_MNOC_SLV 10030
+#define	MSM_BUS_SNOC_BIMC_MAS 10031
+#define	MSM_BUS_SNOC_BIMC_SLV 10032
+#define	MSM_BUS_CNOC_SNOC_MAS 10033
+#define	MSM_BUS_CNOC_SNOC_SLV 10034
+#define	MSM_BUS_SNOC_CNOC_MAS 10035
+#define	MSM_BUS_SNOC_CNOC_SLV 10036
+#define	MSM_BUS_OVNOC_SNOC_MAS 10037
+#define	MSM_BUS_OVNOC_SNOC_SLV 10038
+#define	MSM_BUS_SNOC_OVNOC_MAS 10039
+#define	MSM_BUS_SNOC_OVNOC_SLV 10040
+#define	MSM_BUS_SNOC_PNOC_MAS 10041
+#define	MSM_BUS_SNOC_PNOC_SLV 10042
+#define	MSM_BUS_BIMC_INT_APPS_EBI 10043
+#define	MSM_BUS_BIMC_INT_APPS_SNOC 10044
+#define	MSM_BUS_SNOC_BIMC_2_MAS 10045
+#define	MSM_BUS_SNOC_BIMC_2_SLV 10046
+#define	MSM_BUS_PNOC_SLV_5	10047
+#define	MSM_BUS_PNOC_SLV_7	10048
+#define	MSM_BUS_PNOC_INT_2 10049
+#define	MSM_BUS_PNOC_INT_3 10050
+#define	MSM_BUS_PNOC_INT_4 10051
+#define	MSM_BUS_PNOC_INT_5 10052
+#define	MSM_BUS_PNOC_INT_6 10053
+#define	MSM_BUS_PNOC_INT_7 10054
+#define	MSM_BUS_BIMC_SNOC_1_MAS 10055
+#define	MSM_BUS_BIMC_SNOC_1_SLV 10056
+#define	MSM_BUS_PNOC_A1NOC_MAS 10057
+#define	MSM_BUS_PNOC_A1NOC_SLV 10058
+#define	MSM_BUS_CNOC_A1NOC_MAS 10059
+#define	MSM_BUS_A0NOC_SNOC_MAS 10060
+#define	MSM_BUS_A0NOC_SNOC_SLV 10061
+#define	MSM_BUS_A1NOC_SNOC_SLV 10062
+#define	MSM_BUS_A1NOC_SNOC_MAS 10063
+#define	MSM_BUS_A2NOC_SNOC_MAS 10064
+#define	MSM_BUS_A2NOC_SNOC_SLV 10065
+#define	MSM_BUS_SNOC_INT_2 10066
+#define	MSM_BUS_A0NOC_QDSS_INT	10067
+#define	MSM_BUS_INT_LAST 10068
+
+#define	MSM_BUS_INT_TEST_ID	20000
+#define	MSM_BUS_INT_TEST_LAST	20050
+
+#define	MSM_BUS_SLAVE_FIRST 512
+#define	MSM_BUS_SLAVE_EBI_CH0 512
+#define	MSM_BUS_SLAVE_EBI_CH1 513
+#define	MSM_BUS_SLAVE_AMPSS_L2 514
+#define	MSM_BUS_APPSS_SLAVE_FAB_MMSS 515
+#define	MSM_BUS_APPSS_SLAVE_FAB_SYSTEM 516
+#define	MSM_BUS_SYSTEM_SLAVE_FAB_APPS 517
+#define	MSM_BUS_SLAVE_SPS 518
+#define	MSM_BUS_SLAVE_SYSTEM_IMEM 519
+#define	MSM_BUS_SLAVE_AMPSS 520
+#define	MSM_BUS_SLAVE_MSS 521
+#define	MSM_BUS_SLAVE_LPASS 522
+#define	MSM_BUS_SYSTEM_SLAVE_CPSS_FPB 523
+#define	MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB 524
+#define	MSM_BUS_SYSTEM_SLAVE_MMSS_FPB 525
+#define	MSM_BUS_SLAVE_CORESIGHT 526
+#define	MSM_BUS_SLAVE_RIVA 527
+#define	MSM_BUS_SLAVE_SMI 528
+#define	MSM_BUS_MMSS_SLAVE_FAB_APPS 529
+#define	MSM_BUS_MMSS_SLAVE_FAB_APPS_1 530
+#define	MSM_BUS_SLAVE_MM_IMEM 531
+#define	MSM_BUS_SLAVE_CRYPTO 532
+#define	MSM_BUS_SLAVE_SPDM 533
+#define	MSM_BUS_SLAVE_RPM 534
+#define	MSM_BUS_SLAVE_RPM_MSG_RAM 535
+#define	MSM_BUS_SLAVE_MPM 536
+#define	MSM_BUS_SLAVE_PMIC1_SSBI1_A 537
+#define	MSM_BUS_SLAVE_PMIC1_SSBI1_B 538
+#define	MSM_BUS_SLAVE_PMIC1_SSBI1_C 539
+#define	MSM_BUS_SLAVE_PMIC2_SSBI2_A 540
+#define	MSM_BUS_SLAVE_PMIC2_SSBI2_B 541
+#define	MSM_BUS_SLAVE_GSBI1_UART 542
+#define	MSM_BUS_SLAVE_GSBI2_UART 543
+#define	MSM_BUS_SLAVE_GSBI3_UART 544
+#define	MSM_BUS_SLAVE_GSBI4_UART 545
+#define	MSM_BUS_SLAVE_GSBI5_UART 546
+#define	MSM_BUS_SLAVE_GSBI6_UART 547
+#define	MSM_BUS_SLAVE_GSBI7_UART 548
+#define	MSM_BUS_SLAVE_GSBI8_UART 549
+#define	MSM_BUS_SLAVE_GSBI9_UART 550
+#define	MSM_BUS_SLAVE_GSBI10_UART 551
+#define	MSM_BUS_SLAVE_GSBI11_UART 552
+#define	MSM_BUS_SLAVE_GSBI12_UART 553
+#define	MSM_BUS_SLAVE_GSBI1_QUP 554
+#define	MSM_BUS_SLAVE_GSBI2_QUP 555
+#define	MSM_BUS_SLAVE_GSBI3_QUP 556
+#define	MSM_BUS_SLAVE_GSBI4_QUP 557
+#define	MSM_BUS_SLAVE_GSBI5_QUP 558
+#define	MSM_BUS_SLAVE_GSBI6_QUP 559
+#define	MSM_BUS_SLAVE_GSBI7_QUP 560
+#define	MSM_BUS_SLAVE_GSBI8_QUP 561
+#define	MSM_BUS_SLAVE_GSBI9_QUP 562
+#define	MSM_BUS_SLAVE_GSBI10_QUP 563
+#define	MSM_BUS_SLAVE_GSBI11_QUP 564
+#define	MSM_BUS_SLAVE_GSBI12_QUP 565
+#define	MSM_BUS_SLAVE_EBI2_NAND 566
+#define	MSM_BUS_SLAVE_EBI2_CS0 567
+#define	MSM_BUS_SLAVE_EBI2_CS1 568
+#define	MSM_BUS_SLAVE_EBI2_CS2 569
+#define	MSM_BUS_SLAVE_EBI2_CS3 570
+#define	MSM_BUS_SLAVE_EBI2_CS4 571
+#define	MSM_BUS_SLAVE_EBI2_CS5 572
+#define	MSM_BUS_SLAVE_USB_FS1 573
+#define	MSM_BUS_SLAVE_USB_FS2 574
+#define	MSM_BUS_SLAVE_TSIF 575
+#define	MSM_BUS_SLAVE_MSM_TSSC 576
+#define	MSM_BUS_SLAVE_MSM_PDM 577
+#define	MSM_BUS_SLAVE_MSM_DIMEM 578
+#define	MSM_BUS_SLAVE_MSM_TCSR 579
+#define	MSM_BUS_SLAVE_MSM_PRNG 580
+#define	MSM_BUS_SLAVE_GSS 581
+#define	MSM_BUS_SLAVE_SATA 582
+#define	MSM_BUS_SLAVE_USB3 583
+#define	MSM_BUS_SLAVE_WCSS 584
+#define	MSM_BUS_SLAVE_OCIMEM 585
+#define	MSM_BUS_SLAVE_SNOC_OCMEM 586
+#define	MSM_BUS_SLAVE_SERVICE_SNOC 587
+#define	MSM_BUS_SLAVE_QDSS_STM 588
+#define	MSM_BUS_SLAVE_CAMERA_CFG 589
+#define	MSM_BUS_SLAVE_DISPLAY_CFG 590
+#define	MSM_BUS_SLAVE_OCMEM_CFG 591
+#define	MSM_BUS_SLAVE_CPR_CFG 592
+#define	MSM_BUS_SLAVE_CPR_XPU_CFG 593
+#define	MSM_BUS_SLAVE_MISC_CFG 594
+#define	MSM_BUS_SLAVE_MISC_XPU_CFG 595
+#define	MSM_BUS_SLAVE_VENUS_CFG 596
+#define	MSM_BUS_SLAVE_MISC_VENUS_CFG 597
+#define	MSM_BUS_SLAVE_GRAPHICS_3D_CFG 598
+#define	MSM_BUS_SLAVE_MMSS_CLK_CFG 599
+#define	MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG 600
+#define	MSM_BUS_SLAVE_MNOC_MPU_CFG 601
+#define	MSM_BUS_SLAVE_ONOC_MPU_CFG 602
+#define	MSM_BUS_SLAVE_SERVICE_MNOC 603
+#define	MSM_BUS_SLAVE_OCMEM 604
+#define	MSM_BUS_SLAVE_SERVICE_ONOC 605
+#define	MSM_BUS_SLAVE_SDCC_1 606
+#define	MSM_BUS_SLAVE_SDCC_3 607
+#define	MSM_BUS_SLAVE_SDCC_2 608
+#define	MSM_BUS_SLAVE_SDCC_4 609
+#define	MSM_BUS_SLAVE_BAM_DMA 610
+#define	MSM_BUS_SLAVE_BLSP_2 611
+#define	MSM_BUS_SLAVE_USB_HSIC 612
+#define	MSM_BUS_SLAVE_BLSP_1 613
+#define	MSM_BUS_SLAVE_USB_HS 614
+#define	MSM_BUS_SLAVE_PDM 615
+#define	MSM_BUS_SLAVE_PERIPH_APU_CFG 616
+#define	MSM_BUS_SLAVE_PNOC_MPU_CFG 617
+#define	MSM_BUS_SLAVE_PRNG 618
+#define	MSM_BUS_SLAVE_SERVICE_PNOC 619
+#define	MSM_BUS_SLAVE_CLK_CTL 620
+#define	MSM_BUS_SLAVE_CNOC_MSS 621
+#define	MSM_BUS_SLAVE_SECURITY 622
+#define	MSM_BUS_SLAVE_TCSR 623
+#define	MSM_BUS_SLAVE_TLMM 624
+#define	MSM_BUS_SLAVE_CRYPTO_0_CFG 625
+#define	MSM_BUS_SLAVE_CRYPTO_1_CFG 626
+#define	MSM_BUS_SLAVE_IMEM_CFG 627
+#define	MSM_BUS_SLAVE_MESSAGE_RAM 628
+#define	MSM_BUS_SLAVE_BIMC_CFG 629
+#define	MSM_BUS_SLAVE_BOOT_ROM 630
+#define	MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG 631
+#define	MSM_BUS_SLAVE_PMIC_ARB 632
+#define	MSM_BUS_SLAVE_SPDM_WRAPPER 633
+#define	MSM_BUS_SLAVE_DEHR_CFG 634
+#define	MSM_BUS_SLAVE_QDSS_CFG 635
+#define	MSM_BUS_SLAVE_RBCPR_CFG 636
+#define	MSM_BUS_SLAVE_RBCPR_QDSS_APU_CFG 637
+#define	MSM_BUS_SLAVE_SNOC_MPU_CFG 638
+#define	MSM_BUS_SLAVE_CNOC_ONOC_CFG 639
+#define	MSM_BUS_SLAVE_CNOC_MNOC_CFG 640
+#define	MSM_BUS_SLAVE_PNOC_CFG 641
+#define	MSM_BUS_SLAVE_SNOC_CFG 642
+#define	MSM_BUS_SLAVE_EBI1_DLL_CFG 643
+#define	MSM_BUS_SLAVE_PHY_APU_CFG 644
+#define	MSM_BUS_SLAVE_EBI1_PHY_CFG 645
+#define	MSM_BUS_SLAVE_SERVICE_CNOC 646
+#define	MSM_BUS_SLAVE_IPS_CFG 647
+#define	MSM_BUS_SLAVE_QPIC 648
+#define	MSM_BUS_SLAVE_DSI_CFG 649
+#define	MSM_BUS_SLAVE_UFS_CFG 650
+#define	MSM_BUS_SLAVE_RBCPR_CX_CFG 651
+#define	MSM_BUS_SLAVE_RBCPR_MX_CFG 652
+#define	MSM_BUS_SLAVE_PCIE_CFG 653
+#define	MSM_BUS_SLAVE_USB_PHYS_CFG 654
+#define	MSM_BUS_SLAVE_VIDEO_CAP_CFG 655
+#define	MSM_BUS_SLAVE_AVSYNC_CFG 656
+#define	MSM_BUS_SLAVE_CRYPTO_2_CFG 657
+#define	MSM_BUS_SLAVE_VPU_CFG 658
+#define	MSM_BUS_SLAVE_BCAST_CFG 659
+#define	MSM_BUS_SLAVE_KLM_CFG 660
+#define	MSM_BUS_SLAVE_GENI_IR_CFG 661
+#define	MSM_BUS_SLAVE_OCMEM_GFX 662
+#define	MSM_BUS_SLAVE_CATS_128 663
+#define	MSM_BUS_SLAVE_OCMEM_64 664
+#define	MSM_BUS_SLAVE_PCIE_0 665
+#define	MSM_BUS_SLAVE_PCIE_1 666
+#define	MSM_BUS_SLAVE_PCIE_0_CFG 667
+#define	MSM_BUS_SLAVE_PCIE_1_CFG 668
+#define	MSM_BUS_SLAVE_SRVC_MNOC 669
+#define	MSM_BUS_SLAVE_USB_HS2 670
+#define	MSM_BUS_SLAVE_AUDIO 671
+#define	MSM_BUS_SLAVE_TCU 672
+#define	MSM_BUS_SLAVE_APPSS 673
+#define	MSM_BUS_SLAVE_PCIE_PARF 674
+#define	MSM_BUS_SLAVE_USB3_PHY_CFG 675
+#define	MSM_BUS_SLAVE_IPA_CFG 676
+#define	MSM_BUS_SLAVE_A0NOC_SNOC 677
+#define	MSM_BUS_SLAVE_A1NOC_SNOC 678
+#define	MSM_BUS_SLAVE_A2NOC_SNOC 679
+#define	MSM_BUS_SLAVE_HMSS_L3 680
+#define	MSM_BUS_SLAVE_PIMEM_CFG 681
+#define	MSM_BUS_SLAVE_DCC_CFG 682
+#define	MSM_BUS_SLAVE_QDSS_RBCPR_APU_CFG 683
+#define	MSM_BUS_SLAVE_PCIE_2_CFG 684
+#define	MSM_BUS_SLAVE_PCIE20_AHB2PHY 685
+#define	MSM_BUS_SLAVE_A0NOC_CFG 686
+#define	MSM_BUS_SLAVE_A1NOC_CFG 687
+#define	MSM_BUS_SLAVE_A2NOC_CFG 688
+#define	MSM_BUS_SLAVE_A1NOC_MPU_CFG 689
+#define	MSM_BUS_SLAVE_A2NOC_MPU_CFG 690
+#define	MSM_BUS_SLAVE_A0NOC_SMMU_CFG 691
+#define	MSM_BUS_SLAVE_A1NOC_SMMU_CFG 692
+#define	MSM_BUS_SLAVE_A2NOC_SMMU_CFG 693
+#define	MSM_BUS_SLAVE_LPASS_SMMU_CFG 694
+#define	MSM_BUS_SLAVE_MMAGIC_CFG 695
+#define	MSM_BUS_SLAVE_VENUS_THROTTLE_CFG 696
+#define	MSM_BUS_SLAVE_SSC_CFG 697
+#define	MSM_BUS_SLAVE_DSA_CFG 698
+#define	MSM_BUS_SLAVE_DSA_MPU_CFG 699
+#define	MSM_BUS_SLAVE_DISPLAY_THROTTLE_CFG 700
+#define	MSM_BUS_SLAVE_SMMU_CPP_CFG 701
+#define	MSM_BUS_SLAVE_SMMU_JPEG_CFG 702
+#define	MSM_BUS_SLAVE_SMMU_MDP_CFG 703
+#define	MSM_BUS_SLAVE_SMMU_ROTATOR_CFG 704
+#define	MSM_BUS_SLAVE_SMMU_VENUS_CFG 705
+#define	MSM_BUS_SLAVE_SMMU_VFE_CFG 706
+#define	MSM_BUS_SLAVE_A0NOC_MPU_CFG 707
+#define	MSM_BUS_SLAVE_VMEM_CFG 708
+#define	MSM_BUS_SLAVE_CAMERA_THROTTLE_CFG 709
+#define	MSM_BUS_SLAVE_VMEM 710
+#define	MSM_BUS_SLAVE_AHB2PHY 711
+#define	MSM_BUS_SLAVE_PIMEM 712
+#define	MSM_BUS_SLAVE_SNOC_VMEM 713
+#define	MSM_BUS_SLAVE_PCIE_2 714
+#define	MSM_BUS_SLAVE_RBCPR_MX 715
+#define	MSM_BUS_SLAVE_RBCPR_CX 716
+#define	MSM_BUS_SLAVE_BIMC_PCNOC 717
+#define	MSM_BUS_SLAVE_PCNOC_BIMC_1 718
+#define	MSM_BUS_SLAVE_SGMII 719
+#define	MSM_BUS_SLAVE_SPMI_FETCHER 720
+#define	MSM_BUS_PNOC_SLV_6 721
+#define	MSM_BUS_SLAVE_MMSS_SMMU_CFG 722
+#define	MSM_BUS_SLAVE_WLAN 723
+#define	MSM_BUS_SLAVE_CRVIRT_A2NOC 724
+#define	MSM_BUS_SLAVE_CNOC_A2NOC 725
+#define	MSM_BUS_SLAVE_GLM 726
+#define	MSM_BUS_SLAVE_GNOC_BIMC 727
+#define	MSM_BUS_SLAVE_GNOC_SNOC 728
+#define	MSM_BUS_SLAVE_QM_CFG 729
+#define	MSM_BUS_SLAVE_TLMM_EAST 730
+#define	MSM_BUS_SLAVE_TLMM_NORTH 731
+#define	MSM_BUS_SLAVE_TLMM_WEST 732
+#define	MSM_BUS_SLAVE_SKL 733
+#define	MSM_BUS_SLAVE_LPASS_TCM	734
+#define	MSM_BUS_SLAVE_TLMM_SOUTH 735
+#define	MSM_BUS_SLAVE_TLMM_CENTER 736
+#define	MSM_BUS_MSS_NAV_CE_MPU_CFG 737
+#define	MSM_BUS_SLAVE_A2NOC_THROTTLE_CFG 738
+#define	MSM_BUS_SLAVE_CDSP 739
+#define	MSM_BUS_SLAVE_CDSP_SMMU_CFG 740
+#define	MSM_BUS_SLAVE_LPASS_MPU_CFG 741
+#define	MSM_BUS_SLAVE_CSI_PHY_CFG 742
+#define	MSM_BUS_SLAVE_LAST 743
+
+#define	MSM_BUS_SYSTEM_FPB_SLAVE_SYSTEM  MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB
+#define	MSM_BUS_CPSS_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_CPSS_FPB
+
+/*
+ * ID's used in RPM messages
+ */
+#define	ICBID_MASTER_APPSS_PROC 0
+#define	ICBID_MASTER_MSS_PROC 1
+#define	ICBID_MASTER_MNOC_BIMC 2
+#define	ICBID_MASTER_SNOC_BIMC 3
+#define	ICBID_MASTER_SNOC_BIMC_0 ICBID_MASTER_SNOC_BIMC
+#define	ICBID_MASTER_CNOC_MNOC_MMSS_CFG 4
+#define	ICBID_MASTER_CNOC_MNOC_CFG 5
+#define	ICBID_MASTER_GFX3D 6
+#define	ICBID_MASTER_JPEG 7
+#define	ICBID_MASTER_MDP 8
+#define	ICBID_MASTER_MDP0 ICBID_MASTER_MDP
+#define	ICBID_MASTER_MDPS ICBID_MASTER_MDP
+#define	ICBID_MASTER_VIDEO 9
+#define	ICBID_MASTER_VIDEO_P0 ICBID_MASTER_VIDEO
+#define	ICBID_MASTER_VIDEO_P1 10
+#define	ICBID_MASTER_VFE 11
+#define	ICBID_MASTER_VFE0 ICBID_MASTER_VFE
+#define	ICBID_MASTER_CNOC_ONOC_CFG 12
+#define	ICBID_MASTER_JPEG_OCMEM 13
+#define	ICBID_MASTER_MDP_OCMEM 14
+#define	ICBID_MASTER_VIDEO_P0_OCMEM 15
+#define	ICBID_MASTER_VIDEO_P1_OCMEM 16
+#define	ICBID_MASTER_VFE_OCMEM 17
+#define	ICBID_MASTER_LPASS_AHB 18
+#define	ICBID_MASTER_QDSS_BAM 19
+#define	ICBID_MASTER_SNOC_CFG 20
+#define	ICBID_MASTER_BIMC_SNOC 21
+#define	ICBID_MASTER_BIMC_SNOC_0 ICBID_MASTER_BIMC_SNOC
+#define	ICBID_MASTER_CNOC_SNOC 22
+#define	ICBID_MASTER_CRYPTO 23
+#define	ICBID_MASTER_CRYPTO_CORE0 ICBID_MASTER_CRYPTO
+#define	ICBID_MASTER_CRYPTO_CORE1 24
+#define	ICBID_MASTER_LPASS_PROC 25
+#define	ICBID_MASTER_MSS 26
+#define	ICBID_MASTER_MSS_NAV 27
+#define	ICBID_MASTER_OCMEM_DMA 28
+#define	ICBID_MASTER_PNOC_SNOC 29
+#define	ICBID_MASTER_WCSS 30
+#define	ICBID_MASTER_QDSS_ETR 31
+#define	ICBID_MASTER_USB3 32
+#define	ICBID_MASTER_USB3_0 ICBID_MASTER_USB3
+#define	ICBID_MASTER_SDCC_1 33
+#define	ICBID_MASTER_SDCC_3 34
+#define	ICBID_MASTER_SDCC_2 35
+#define	ICBID_MASTER_SDCC_4 36
+#define	ICBID_MASTER_TSIF 37
+#define	ICBID_MASTER_BAM_DMA 38
+#define	ICBID_MASTER_BLSP_2 39
+#define	ICBID_MASTER_USB_HSIC 40
+#define	ICBID_MASTER_BLSP_1 41
+#define	ICBID_MASTER_USB_HS 42
+#define	ICBID_MASTER_USB_HS1 ICBID_MASTER_USB_HS
+#define	ICBID_MASTER_PNOC_CFG 43
+#define	ICBID_MASTER_SNOC_PNOC 44
+#define	ICBID_MASTER_RPM_INST 45
+#define	ICBID_MASTER_RPM_DATA 46
+#define	ICBID_MASTER_RPM_SYS 47
+#define	ICBID_MASTER_DEHR 48
+#define	ICBID_MASTER_QDSS_DAP 49
+#define	ICBID_MASTER_SPDM 50
+#define	ICBID_MASTER_TIC 51
+#define	ICBID_MASTER_SNOC_CNOC 52
+#define	ICBID_MASTER_GFX3D_OCMEM 53
+#define	ICBID_MASTER_GFX3D_GMEM ICBID_MASTER_GFX3D_OCMEM
+#define	ICBID_MASTER_OVIRT_SNOC 54
+#define	ICBID_MASTER_SNOC_OVIRT 55
+#define	ICBID_MASTER_SNOC_GVIRT ICBID_MASTER_SNOC_OVIRT
+#define	ICBID_MASTER_ONOC_OVIRT 56
+#define	ICBID_MASTER_USB_HS2 57
+#define	ICBID_MASTER_QPIC 58
+#define	ICBID_MASTER_IPA 59
+#define	ICBID_MASTER_DSI 60
+#define	ICBID_MASTER_MDP1 61
+#define	ICBID_MASTER_MDPE ICBID_MASTER_MDP1
+#define	ICBID_MASTER_VPU_PROC 62
+#define	ICBID_MASTER_VPU 63
+#define	ICBID_MASTER_VPU0 ICBID_MASTER_VPU
+#define	ICBID_MASTER_CRYPTO_CORE2 64
+#define	ICBID_MASTER_PCIE_0 65
+#define	ICBID_MASTER_PCIE_1 66
+#define	ICBID_MASTER_SATA 67
+#define	ICBID_MASTER_UFS 68
+#define	ICBID_MASTER_USB3_1 69
+#define	ICBID_MASTER_VIDEO_OCMEM 70
+#define	ICBID_MASTER_VPU1 71
+#define	ICBID_MASTER_VCAP 72
+#define	ICBID_MASTER_EMAC 73
+#define	ICBID_MASTER_BCAST 74
+#define	ICBID_MASTER_MMSS_PROC 75
+#define	ICBID_MASTER_SNOC_BIMC_1 76
+#define	ICBID_MASTER_SNOC_PCNOC 77
+#define	ICBID_MASTER_AUDIO 78
+#define	ICBID_MASTER_MM_INT_0 79
+#define	ICBID_MASTER_MM_INT_1 80
+#define	ICBID_MASTER_MM_INT_2 81
+#define	ICBID_MASTER_MM_INT_BIMC 82
+#define	ICBID_MASTER_MSS_INT 83
+#define	ICBID_MASTER_PCNOC_CFG 84
+#define	ICBID_MASTER_PCNOC_INT_0 85
+#define	ICBID_MASTER_PCNOC_INT_1 86
+#define	ICBID_MASTER_PCNOC_M_0 87
+#define	ICBID_MASTER_PCNOC_M_1 88
+#define	ICBID_MASTER_PCNOC_S_0 89
+#define	ICBID_MASTER_PCNOC_S_1 90
+#define	ICBID_MASTER_PCNOC_S_2 91
+#define	ICBID_MASTER_PCNOC_S_3 92
+#define	ICBID_MASTER_PCNOC_S_4 93
+#define	ICBID_MASTER_PCNOC_S_6 94
+#define	ICBID_MASTER_PCNOC_S_7 95
+#define	ICBID_MASTER_PCNOC_S_8 96
+#define	ICBID_MASTER_PCNOC_S_9 97
+#define	ICBID_MASTER_QDSS_INT 98
+#define	ICBID_MASTER_SNOC_INT_0	99
+#define	ICBID_MASTER_SNOC_INT_1 100
+#define	ICBID_MASTER_SNOC_INT_BIMC 101
+#define	ICBID_MASTER_TCU_0 102
+#define	ICBID_MASTER_TCU_1 103
+#define	ICBID_MASTER_BIMC_INT_0 104
+#define	ICBID_MASTER_BIMC_INT_1 105
+#define	ICBID_MASTER_CAMERA 106
+#define	ICBID_MASTER_RICA 107
+#define	ICBID_MASTER_SNOC_BIMC_2 108
+#define	ICBID_MASTER_BIMC_SNOC_1 109
+#define	ICBID_MASTER_A0NOC_SNOC 110
+#define	ICBID_MASTER_A1NOC_SNOC 111
+#define	ICBID_MASTER_A2NOC_SNOC 112
+#define	ICBID_MASTER_PIMEM 113
+#define	ICBID_MASTER_SNOC_VMEM 114
+#define	ICBID_MASTER_CPP 115
+#define	ICBID_MASTER_CNOC_A1NOC 116
+#define	ICBID_MASTER_PNOC_A1NOC 117
+#define	ICBID_MASTER_HMSS 118
+#define	ICBID_MASTER_PCIE_2 119
+#define	ICBID_MASTER_ROTATOR 120
+#define	ICBID_MASTER_VENUS_VMEM 121
+#define	ICBID_MASTER_DCC 122
+#define	ICBID_MASTER_MCDMA 123
+#define	ICBID_MASTER_PCNOC_INT_2 124
+#define	ICBID_MASTER_PCNOC_INT_3 125
+#define	ICBID_MASTER_PCNOC_INT_4 126
+#define	ICBID_MASTER_PCNOC_INT_5 127
+#define	ICBID_MASTER_PCNOC_INT_6 128
+#define	ICBID_MASTER_PCNOC_S_5 129
+#define	ICBID_MASTER_SENSORS_AHB 130
+#define	ICBID_MASTER_SENSORS_PROC 131
+#define	ICBID_MASTER_QSPI 132
+#define	ICBID_MASTER_VFE1 133
+#define	ICBID_MASTER_SNOC_INT_2 134
+#define	ICBID_MASTER_SMMNOC_BIMC 135
+#define	ICBID_MASTER_CRVIRT_A1NOC 136
+#define	ICBID_MASTER_XM_USB_HS1 137
+#define	ICBID_MASTER_XI_USB_HS1 138
+#define	ICBID_MASTER_PCNOC_BIMC_1 139
+#define	ICBID_MASTER_BIMC_PCNOC 140
+#define	ICBID_MASTER_XI_HSIC 141
+#define	ICBID_MASTER_SGMII  142
+#define	ICBID_MASTER_SPMI_FETCHER 143
+#define	ICBID_MASTER_GNOC_BIMC 144
+#define	ICBID_MASTER_CRVIRT_A2NOC 145
+#define	ICBID_MASTER_CNOC_A2NOC 146
+#define	ICBID_MASTER_WLAN 147
+#define	ICBID_MASTER_MSS_CE 148
+#define	ICBID_MASTER_CDSP_PROC 149
+#define	ICBID_MASTER_GNOC_SNOC 150
+
+#define	ICBID_SLAVE_EBI1 0
+#define	ICBID_SLAVE_APPSS_L2 1
+#define	ICBID_SLAVE_BIMC_SNOC 2
+#define	ICBID_SLAVE_BIMC_SNOC_0 ICBID_SLAVE_BIMC_SNOC
+#define	ICBID_SLAVE_CAMERA_CFG 3
+#define	ICBID_SLAVE_DISPLAY_CFG 4
+#define	ICBID_SLAVE_OCMEM_CFG 5
+#define	ICBID_SLAVE_CPR_CFG 6
+#define	ICBID_SLAVE_CPR_XPU_CFG 7
+#define	ICBID_SLAVE_MISC_CFG 8
+#define	ICBID_SLAVE_MISC_XPU_CFG 9
+#define	ICBID_SLAVE_VENUS_CFG 10
+#define	ICBID_SLAVE_GFX3D_CFG 11
+#define	ICBID_SLAVE_MMSS_CLK_CFG 12
+#define	ICBID_SLAVE_MMSS_CLK_XPU_CFG 13
+#define	ICBID_SLAVE_MNOC_MPU_CFG 14
+#define	ICBID_SLAVE_ONOC_MPU_CFG 15
+#define	ICBID_SLAVE_MNOC_BIMC 16
+#define	ICBID_SLAVE_SERVICE_MNOC 17
+#define	ICBID_SLAVE_OCMEM 18
+#define	ICBID_SLAVE_GMEM ICBID_SLAVE_OCMEM
+#define	ICBID_SLAVE_SERVICE_ONOC 19
+#define	ICBID_SLAVE_APPSS 20
+#define	ICBID_SLAVE_LPASS 21
+#define	ICBID_SLAVE_USB3 22
+#define	ICBID_SLAVE_USB3_0 ICBID_SLAVE_USB3
+#define	ICBID_SLAVE_WCSS 23
+#define	ICBID_SLAVE_SNOC_BIMC 24
+#define	ICBID_SLAVE_SNOC_BIMC_0 ICBID_SLAVE_SNOC_BIMC
+#define	ICBID_SLAVE_SNOC_CNOC 25
+#define	ICBID_SLAVE_IMEM 26
+#define	ICBID_SLAVE_OCIMEM ICBID_SLAVE_IMEM
+#define	ICBID_SLAVE_SNOC_OVIRT 27
+#define	ICBID_SLAVE_SNOC_GVIRT ICBID_SLAVE_SNOC_OVIRT
+#define	ICBID_SLAVE_SNOC_PNOC 28
+#define	ICBID_SLAVE_SNOC_PCNOC ICBID_SLAVE_SNOC_PNOC
+#define	ICBID_SLAVE_SERVICE_SNOC 29
+#define	ICBID_SLAVE_QDSS_STM 30
+#define	ICBID_SLAVE_SDCC_1 31
+#define	ICBID_SLAVE_SDCC_3 32
+#define	ICBID_SLAVE_SDCC_2 33
+#define	ICBID_SLAVE_SDCC_4 34
+#define	ICBID_SLAVE_TSIF 35
+#define	ICBID_SLAVE_BAM_DMA 36
+#define	ICBID_SLAVE_BLSP_2 37
+#define	ICBID_SLAVE_USB_HSIC 38
+#define	ICBID_SLAVE_BLSP_1 39
+#define	ICBID_SLAVE_USB_HS 40
+#define	ICBID_SLAVE_USB_HS1 ICBID_SLAVE_USB_HS
+#define	ICBID_SLAVE_PDM 41
+#define	ICBID_SLAVE_PERIPH_APU_CFG 42
+#define	ICBID_SLAVE_PNOC_MPU_CFG 43
+#define	ICBID_SLAVE_PRNG 44
+#define	ICBID_SLAVE_PNOC_SNOC 45
+#define	ICBID_SLAVE_PCNOC_SNOC ICBID_SLAVE_PNOC_SNOC
+#define	ICBID_SLAVE_SERVICE_PNOC 46
+#define	ICBID_SLAVE_CLK_CTL 47
+#define	ICBID_SLAVE_CNOC_MSS 48
+#define	ICBID_SLAVE_PCNOC_MSS ICBID_SLAVE_CNOC_MSS
+#define	ICBID_SLAVE_SECURITY 49
+#define	ICBID_SLAVE_TCSR 50
+#define	ICBID_SLAVE_TLMM 51
+#define	ICBID_SLAVE_CRYPTO_0_CFG 52
+#define	ICBID_SLAVE_CRYPTO_1_CFG 53
+#define	ICBID_SLAVE_IMEM_CFG 54
+#define	ICBID_SLAVE_MESSAGE_RAM 55
+#define	ICBID_SLAVE_BIMC_CFG 56
+#define	ICBID_SLAVE_BOOT_ROM 57
+#define	ICBID_SLAVE_CNOC_MNOC_MMSS_CFG 58
+#define	ICBID_SLAVE_PMIC_ARB 59
+#define	ICBID_SLAVE_SPDM_WRAPPER 60
+#define	ICBID_SLAVE_DEHR_CFG 61
+#define	ICBID_SLAVE_MPM 62
+#define	ICBID_SLAVE_QDSS_CFG 63
+#define	ICBID_SLAVE_RBCPR_CFG 64
+#define	ICBID_SLAVE_RBCPR_CX_CFG ICBID_SLAVE_RBCPR_CFG
+#define	ICBID_SLAVE_RBCPR_QDSS_APU_CFG 65
+#define	ICBID_SLAVE_CNOC_MNOC_CFG 66
+#define	ICBID_SLAVE_SNOC_MPU_CFG 67
+#define	ICBID_SLAVE_CNOC_ONOC_CFG 68
+#define	ICBID_SLAVE_PNOC_CFG 69
+#define	ICBID_SLAVE_SNOC_CFG 70
+#define	ICBID_SLAVE_EBI1_DLL_CFG 71
+#define	ICBID_SLAVE_PHY_APU_CFG 72
+#define	ICBID_SLAVE_EBI1_PHY_CFG 73
+#define	ICBID_SLAVE_RPM 74
+#define	ICBID_SLAVE_CNOC_SNOC 75
+#define	ICBID_SLAVE_SERVICE_CNOC 76
+#define	ICBID_SLAVE_OVIRT_SNOC 77
+#define	ICBID_SLAVE_OVIRT_OCMEM 78
+#define	ICBID_SLAVE_USB_HS2 79
+#define	ICBID_SLAVE_QPIC 80
+#define	ICBID_SLAVE_IPS_CFG 81
+#define	ICBID_SLAVE_DSI_CFG 82
+#define	ICBID_SLAVE_USB3_1 83
+#define	ICBID_SLAVE_PCIE_0 84
+#define	ICBID_SLAVE_PCIE_1 85
+#define	ICBID_SLAVE_PSS_SMMU_CFG 86
+#define	ICBID_SLAVE_CRYPTO_2_CFG 87
+#define	ICBID_SLAVE_PCIE_0_CFG 88
+#define	ICBID_SLAVE_PCIE_1_CFG 89
+#define	ICBID_SLAVE_SATA_CFG 90
+#define	ICBID_SLAVE_SPSS_GENI_IR 91
+#define	ICBID_SLAVE_UFS_CFG 92
+#define	ICBID_SLAVE_AVSYNC_CFG 93
+#define	ICBID_SLAVE_VPU_CFG 94
+#define	ICBID_SLAVE_USB_PHY_CFG 95
+#define	ICBID_SLAVE_RBCPR_MX_CFG 96
+#define	ICBID_SLAVE_PCIE_PARF 97
+#define	ICBID_SLAVE_VCAP_CFG 98
+#define	ICBID_SLAVE_EMAC_CFG 99
+#define	ICBID_SLAVE_BCAST_CFG 100
+#define	ICBID_SLAVE_KLM_CFG 101
+#define	ICBID_SLAVE_DISPLAY_PWM 102
+#define	ICBID_SLAVE_GENI 103
+#define	ICBID_SLAVE_SNOC_BIMC_1 104
+#define	ICBID_SLAVE_AUDIO 105
+#define	ICBID_SLAVE_CATS_0 106
+#define	ICBID_SLAVE_CATS_1 107
+#define	ICBID_SLAVE_MM_INT_0 108
+#define	ICBID_SLAVE_MM_INT_1 109
+#define	ICBID_SLAVE_MM_INT_2 110
+#define	ICBID_SLAVE_MM_INT_BIMC 111
+#define	ICBID_SLAVE_MMU_MODEM_XPU_CFG 112
+#define	ICBID_SLAVE_MSS_INT 113
+#define	ICBID_SLAVE_PCNOC_INT_0 114
+#define	ICBID_SLAVE_PCNOC_INT_1 115
+#define	ICBID_SLAVE_PCNOC_M_0 116
+#define	ICBID_SLAVE_PCNOC_M_1 117
+#define	ICBID_SLAVE_PCNOC_S_0 118
+#define	ICBID_SLAVE_PCNOC_S_1 119
+#define	ICBID_SLAVE_PCNOC_S_2 120
+#define	ICBID_SLAVE_PCNOC_S_3 121
+#define	ICBID_SLAVE_PCNOC_S_4 122
+#define	ICBID_SLAVE_PCNOC_S_6 123
+#define	ICBID_SLAVE_PCNOC_S_7 124
+#define	ICBID_SLAVE_PCNOC_S_8 125
+#define	ICBID_SLAVE_PCNOC_S_9 126
+#define	ICBID_SLAVE_PRNG_XPU_CFG 127
+#define	ICBID_SLAVE_QDSS_INT 128
+#define	ICBID_SLAVE_RPM_XPU_CFG 129
+#define	ICBID_SLAVE_SNOC_INT_0 130
+#define	ICBID_SLAVE_SNOC_INT_1 131
+#define	ICBID_SLAVE_SNOC_INT_BIMC 132
+#define	ICBID_SLAVE_TCU 133
+#define	ICBID_SLAVE_BIMC_INT_0 134
+#define	ICBID_SLAVE_BIMC_INT_1 135
+#define	ICBID_SLAVE_RICA_CFG 136
+#define	ICBID_SLAVE_SNOC_BIMC_2 137
+#define	ICBID_SLAVE_BIMC_SNOC_1 138
+#define	ICBID_SLAVE_PNOC_A1NOC 139
+#define	ICBID_SLAVE_SNOC_VMEM 140
+#define	ICBID_SLAVE_A0NOC_SNOC 141
+#define	ICBID_SLAVE_A1NOC_SNOC 142
+#define	ICBID_SLAVE_A2NOC_SNOC 143
+#define	ICBID_SLAVE_A0NOC_CFG 144
+#define	ICBID_SLAVE_A0NOC_MPU_CFG 145
+#define	ICBID_SLAVE_A0NOC_SMMU_CFG 146
+#define	ICBID_SLAVE_A1NOC_CFG 147
+#define	ICBID_SLAVE_A1NOC_MPU_CFG 148
+#define	ICBID_SLAVE_A1NOC_SMMU_CFG 149
+#define	ICBID_SLAVE_A2NOC_CFG 150
+#define	ICBID_SLAVE_A2NOC_MPU_CFG 151
+#define	ICBID_SLAVE_A2NOC_SMMU_CFG 152
+#define	ICBID_SLAVE_AHB2PHY 153
+#define	ICBID_SLAVE_CAMERA_THROTTLE_CFG 154
+#define	ICBID_SLAVE_DCC_CFG 155
+#define	ICBID_SLAVE_DISPLAY_THROTTLE_CFG 156
+#define	ICBID_SLAVE_DSA_CFG 157
+#define	ICBID_SLAVE_DSA_MPU_CFG 158
+#define	ICBID_SLAVE_SSC_MPU_CFG 159
+#define	ICBID_SLAVE_HMSS_L3 160
+#define	ICBID_SLAVE_LPASS_SMMU_CFG 161
+#define	ICBID_SLAVE_MMAGIC_CFG 162
+#define	ICBID_SLAVE_PCIE20_AHB2PHY 163
+#define	ICBID_SLAVE_PCIE_2 164
+#define	ICBID_SLAVE_PCIE_2_CFG 165
+#define	ICBID_SLAVE_PIMEM 166
+#define	ICBID_SLAVE_PIMEM_CFG 167
+#define	ICBID_SLAVE_QDSS_RBCPR_APU_CFG 168
+#define	ICBID_SLAVE_RBCPR_CX 169
+#define	ICBID_SLAVE_RBCPR_MX 170
+#define	ICBID_SLAVE_SMMU_CPP_CFG 171
+#define	ICBID_SLAVE_SMMU_JPEG_CFG 172
+#define	ICBID_SLAVE_SMMU_MDP_CFG 173
+#define	ICBID_SLAVE_SMMU_ROTATOR_CFG 174
+#define	ICBID_SLAVE_SMMU_VENUS_CFG 175
+#define	ICBID_SLAVE_SMMU_VFE_CFG 176
+#define	ICBID_SLAVE_SSC_CFG 177
+#define	ICBID_SLAVE_VENUS_THROTTLE_CFG 178
+#define	ICBID_SLAVE_VMEM 179
+#define	ICBID_SLAVE_VMEM_CFG 180
+#define	ICBID_SLAVE_QDSS_MPU_CFG 181
+#define	ICBID_SLAVE_USB3_PHY_CFG 182
+#define	ICBID_SLAVE_IPA_CFG 183
+#define	ICBID_SLAVE_PCNOC_INT_2 184
+#define	ICBID_SLAVE_PCNOC_INT_3 185
+#define	ICBID_SLAVE_PCNOC_INT_4 186
+#define	ICBID_SLAVE_PCNOC_INT_5 187
+#define	ICBID_SLAVE_PCNOC_INT_6 188
+#define	ICBID_SLAVE_PCNOC_S_5 189
+#define	ICBID_SLAVE_QSPI 190
+#define	ICBID_SLAVE_A1NOC_MS_MPU_CFG 191
+#define	ICBID_SLAVE_A2NOC_MS_MPU_CFG 192
+#define	ICBID_SLAVE_MODEM_Q6_SMMU_CFG 193
+#define	ICBID_SLAVE_MSS_MPU_CFG 194
+#define	ICBID_SLAVE_MSS_PROC_MS_MPU_CFG 195
+#define	ICBID_SLAVE_SKL 196
+#define	ICBID_SLAVE_SNOC_INT_2 197
+#define	ICBID_SLAVE_SMMNOC_BIMC 198
+#define	ICBID_SLAVE_CRVIRT_A1NOC 199
+#define	ICBID_SLAVE_SGMII	 200
+#define	ICBID_SLAVE_QHS4_APPS	 201
+#define	ICBID_SLAVE_BIMC_PCNOC   202
+#define	ICBID_SLAVE_PCNOC_BIMC_1 203
+#define	ICBID_SLAVE_SPMI_FETCHER 204
+#define	ICBID_SLAVE_MMSS_SMMU_CFG 205
+#define	ICBID_SLAVE_WLAN 206
+#define	ICBID_SLAVE_CRVIRT_A2NOC 207
+#define	ICBID_SLAVE_CNOC_A2NOC 208
+#define	ICBID_SLAVE_GLM 209
+#define	ICBID_SLAVE_GNOC_BIMC 210
+#define	ICBID_SLAVE_GNOC_SNOC 211
+#define	ICBID_SLAVE_QM_CFG 212
+#define	ICBID_SLAVE_TLMM_EAST 213
+#define	ICBID_SLAVE_TLMM_NORTH 214
+#define	ICBID_SLAVE_TLMM_WEST 215
+#define	ICBID_SLAVE_LPASS_TCM	216
+#define	ICBID_SLAVE_TLMM_SOUTH	217
+#define	ICBID_SLAVE_TLMM_CENTER	218
+#define	ICBID_SLAVE_MSS_NAV_CE_MPU_CFG	219
+#define	ICBID_SLAVE_A2NOC_THROTTLE_CFG	220
+#define	ICBID_SLAVE_CDSP	221
+#define	ICBID_SLAVE_CDSP_SMMU_CFG	222
+#define	ICBID_SLAVE_LPASS_MPU_CFG	223
+#define	ICBID_SLAVE_CSI_PHY_CFG	224
+#endif
diff -Nruw linux-4.4.115-fbx/arch/metag/boot/dts/include/dt-bindings/msm./msm-bus-rule-ops.h linux-4.4.115-fbx/arch/metag/boot/dts/include/dt-bindings/msm/msm-bus-rule-ops.h
--- linux-4.4.115-fbx/arch/metag/boot/dts/include/dt-bindings/msm./msm-bus-rule-ops.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/metag/boot/dts/include/dt-bindings/msm/msm-bus-rule-ops.h	2019-01-22 16:16:28.179288750 +0100
@@ -0,0 +1,34 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_BUS_RULE_OPS_H
+#define __MSM_BUS_RULE_OPS_H
+
+#define FLD_IB	0
+#define FLD_AB	1
+#define FLD_CLK	2
+
+#define OP_LE	0
+#define OP_LT	1
+#define OP_GE	2
+#define OP_GT	3
+#define OP_NOOP	4
+
+#define RULE_STATE_NOT_APPLIED	0
+#define RULE_STATE_APPLIED	1
+
+#define THROTTLE_ON	0
+#define THROTTLE_OFF	1
+#define THROTTLE_REG	2
+
+
+#endif
diff -Nruw linux-4.4.115-fbx/arch/metag/boot/dts/include/dt-bindings/msm./pm.h linux-4.4.115-fbx/arch/metag/boot/dts/include/dt-bindings/msm/pm.h
--- linux-4.4.115-fbx/arch/metag/boot/dts/include/dt-bindings/msm./pm.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/metag/boot/dts/include/dt-bindings/msm/pm.h	2019-01-22 16:16:28.179288750 +0100
@@ -0,0 +1,25 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DT_MSM_PM_H__
+#define __DT_MSM_PM_H__
+
+#define LPM_RESET_LVL_NONE	0
+#define LPM_RESET_LVL_RET	1
+#define LPM_RESET_LVL_GDHS	2
+#define LPM_RESET_LVL_PC	3
+
+#define LPM_AFF_LVL_CPU		0
+#define LPM_AFF_LVL_L2		1
+#define LPM_AFF_LVL_CCI		2
+
+#endif
diff -Nruw linux-4.4.115-fbx/arch/metag/boot/dts/include/dt-bindings/msm./power-on.h linux-4.4.115-fbx/arch/metag/boot/dts/include/dt-bindings/msm/power-on.h
--- linux-4.4.115-fbx/arch/metag/boot/dts/include/dt-bindings/msm./power-on.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/metag/boot/dts/include/dt-bindings/msm/power-on.h	2019-01-22 16:16:28.179288750 +0100
@@ -0,0 +1,24 @@
+/* Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_POWER_ON_H__
+#define __MSM_POWER_ON_H__
+
+#define PON_POWER_OFF_RESERVED		0x00
+#define PON_POWER_OFF_WARM_RESET	0x01
+#define PON_POWER_OFF_SHUTDOWN		0x04
+#define PON_POWER_OFF_DVDD_SHUTDOWN	0x05
+#define PON_POWER_OFF_HARD_RESET	0x07
+#define PON_POWER_OFF_DVDD_HARD_RESET	0x08
+#define PON_POWER_OFF_MAX_TYPE		0x10
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/metag/boot/dts/include/dt-bindings/regulator/qcom,rpm-smd-regulator.h	2019-01-22 16:16:28.183288787 +0100
@@ -0,0 +1,28 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_RPM_SMD_REGULATOR_H
+#define __QCOM_RPM_SMD_REGULATOR_H
+
+#define RPM_SMD_REGULATOR_LEVEL_NONE		0
+#define RPM_SMD_REGULATOR_LEVEL_RETENTION	16
+#define RPM_SMD_REGULATOR_LEVEL_RETENTION_PLUS	32
+#define RPM_SMD_REGULATOR_LEVEL_MIN_SVS		48
+#define RPM_SMD_REGULATOR_LEVEL_LOW_SVS		64
+#define RPM_SMD_REGULATOR_LEVEL_SVS		128
+#define RPM_SMD_REGULATOR_LEVEL_SVS_PLUS	192
+#define RPM_SMD_REGULATOR_LEVEL_NOM		256
+#define RPM_SMD_REGULATOR_LEVEL_NOM_PLUS	320
+#define RPM_SMD_REGULATOR_LEVEL_TURBO		384
+#define RPM_SMD_REGULATOR_LEVEL_BINNING		512
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/mips/boot/dts/include/dt-bindings/clock/audio-ext-clk.h	2019-01-22 16:16:28.167288642 +0100
@@ -0,0 +1,30 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __AUDIO_EXT_CLK_H
+#define __AUDIO_EXT_CLK_H
+
+/* Audio External Clocks */
+#define AUDIO_PMI_CLK		0
+#define AUDIO_PMIC_LNBB_CLK	0
+#define AUDIO_AP_CLK		1
+#define AUDIO_AP_CLK2		2
+#define AUDIO_LPASS_MCLK	3
+#define AUDIO_LPASS_MCLK2	4
+
+#define clk_audio_ap_clk        0x9b5727cb
+#define clk_audio_pmi_clk       0xcbfe416d
+#define clk_audio_ap_clk2       0x454d1e91
+#define clk_audio_lpass_mclk    0xf0f2a284
+#define clk_audio_pmi_lnbb_clk   0x57312343
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/mips/boot/dts/include/dt-bindings/clock/msm-clocks-8996.h	2019-01-22 16:16:28.171288678 +0100
@@ -0,0 +1,574 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CLOCKS_8996_H
+#define __MSM_CLOCKS_8996_H
+
+#include "audio-ext-clk.h"
+
+/* clock_gcc controlled clocks */
+#define clk_cxo_clk_src			0x79e95308
+#define clk_pnoc_clk			0x4325d220
+#define clk_pnoc_a_clk			0x2808c12b
+#define clk_bimc_clk			0x4b80bf00
+#define clk_bimc_a_clk			0x4b25668a
+#define clk_cnoc_clk			0xd5ccb7f4
+#define clk_cnoc_a_clk			0xd8fe2ccc
+#define clk_snoc_clk			0x2c341aa0
+#define clk_snoc_a_clk			0x8fcef2af
+#define clk_bb_clk1			0xf5304268
+#define clk_bb_clk1_ao			0xfa113810
+#define clk_bb_clk1_pin			0x6dd0a779
+#define clk_bb_clk1_pin_ao		0x9b637772
+#define clk_bb_clk2			0xfe15cb87
+#define clk_bb_clk2_ao			0x59682706
+#define clk_bb_clk2_pin			0x498938e5
+#define clk_bb_clk2_pin_ao		0x52513787
+#define clk_bimc_msmbus_clk		0xd212feea
+#define clk_bimc_msmbus_a_clk		0x71d1a499
+#define clk_ce1_a_clk			0x44a833fe
+#define clk_cnoc_msmbus_clk		0x62228b5d
+#define clk_cnoc_msmbus_a_clk		0x67442955
+#define clk_cxo_clk_src_ao		0x64eb6004
+#define clk_cxo_dwc3_clk		0xf79c19f6
+#define clk_cxo_lpm_clk			0x94adbf3d
+#define clk_cxo_otg_clk			0x4eec0bb9
+#define clk_cxo_pil_lpass_clk		0xe17f0ff6
+#define clk_cxo_pil_ssc_clk		0x81832015
+#define clk_div_clk1			0xaa1157a6
+#define clk_div_clk1_ao			0x6b943d68
+#define clk_div_clk2			0xd454019f
+#define clk_div_clk2_ao			0x53f9e788
+#define clk_div_clk3			0xa9a55a68
+#define clk_div_clk3_ao			0x3d6725a8
+#define clk_ipa_a_clk			0xeeec2919
+#define clk_ipa_clk			0xfa685cda
+#define clk_ln_bb_clk			0x3ab0b36d
+#define clk_ln_bb_a_clk			0xc7257ea8
+#define clk_ln_bb_clk_pin		0x1b1c476a
+#define clk_ln_bb_a_clk_pin		0x9cbb5411
+#define clk_mcd_ce1_clk			0xbb615d26
+#define clk_pnoc_keepalive_a_clk	0xf8f91f0b
+#define clk_pnoc_msmbus_clk		0x38b95c77
+#define clk_pnoc_msmbus_a_clk		0x8c9b4e93
+#define clk_pnoc_pm_clk			0xd6f7dfb9
+#define clk_pnoc_sps_clk		0xd482ecc7
+#define clk_qdss_a_clk			0xdd121669
+#define clk_qdss_clk			0x1492202a
+#define clk_rf_clk1			0xaabeea5a
+#define clk_rf_clk1_ao			0x72a10cb8
+#define clk_rf_clk1_pin			0x8f463562
+#define clk_rf_clk1_pin_ao		0x62549ff6
+#define clk_rf_clk2			0x24a30992
+#define clk_rf_clk2_ao			0x944d8bbd
+#define clk_rf_clk2_pin			0xa7c5602a
+#define clk_rf_clk2_pin_ao		0x2d75eb4d
+#define clk_snoc_msmbus_clk		0xe6900bb6
+#define clk_snoc_msmbus_a_clk		0x5d4683bd
+#define clk_mcd_ce1_clk			0xbb615d26
+#define clk_qcedev_ce1_clk		0x293f97b0
+#define clk_qcrypto_ce1_clk		0xa6ac14df
+#define clk_qseecom_ce1_clk		0xaa858373
+#define clk_scm_ce1_clk			0xd8ebcc62
+#define clk_ce1_clk			0x42229c55
+#define clk_gcc_ce1_ahb_m_clk		0x2eb28c01
+#define clk_gcc_ce1_axi_m_clk		0xc174dfba
+#define clk_measure_only_bimc_hmss_axi_clk	0xc1cc4f11
+#define clk_aggre1_noc_clk		0x049abba8
+#define clk_aggre1_noc_a_clk		0xc12e4220
+#define clk_aggre2_noc_clk		0xaa681404
+#define clk_aggre2_noc_a_clk		0xcab67089
+#define clk_mmssnoc_axi_rpm_clk		0x4d7f8cdc
+#define clk_mmssnoc_axi_rpm_a_clk	0xfbea899b
+#define clk_mmssnoc_axi_clk		0xdb4b31e6
+#define clk_mmssnoc_axi_a_clk		0xd4970614
+#define clk_mmssnoc_gds_clk		0x06a22afa
+
+#define clk_gpll0			0x1ebe3bc4
+#define clk_gpll0_ao			0xa1368304
+#define clk_gpll0_out_main		0xe9374de7
+#define clk_gpll4			0xb3b5d85b
+#define clk_gpll4_out_main		0xa9a0ab9d
+#define clk_ufs_axi_clk_src		0x297ca380
+#define clk_pcie_aux_clk_src		0xebc50566
+#define clk_usb30_master_clk_src	0xc6262f89
+#define clk_usb20_master_clk_src	0x5680ac83
+#define clk_ufs_ice_core_clk_src	0xda8e7119
+#define clk_blsp1_qup1_i2c_apps_clk_src 0x17f78f5e
+#define clk_blsp1_qup1_spi_apps_clk_src 0xf534c4fa
+#define clk_blsp1_qup2_i2c_apps_clk_src 0x8de71c79
+#define clk_blsp1_qup2_spi_apps_clk_src 0x33cf809a
+#define clk_blsp1_qup3_i2c_apps_clk_src 0xf161b902
+#define clk_blsp1_qup3_spi_apps_clk_src 0x5e95683f
+#define clk_blsp1_qup4_i2c_apps_clk_src 0xb2ecce68
+#define clk_blsp1_qup4_spi_apps_clk_src 0xddb5bbdb
+#define clk_blsp1_qup5_i2c_apps_clk_src 0x71ea7804
+#define clk_blsp1_qup5_spi_apps_clk_src 0x9752f35f
+#define clk_blsp1_qup6_i2c_apps_clk_src 0x28806803
+#define clk_blsp1_qup6_spi_apps_clk_src 0x44a1edc4
+#define clk_blsp1_uart1_apps_clk_src	0xf8146114
+#define clk_blsp1_uart2_apps_clk_src	0xfc9c2f73
+#define clk_blsp1_uart3_apps_clk_src	0x600497f2
+#define clk_blsp1_uart4_apps_clk_src	0x56bff15c
+#define clk_blsp1_uart5_apps_clk_src	0x218ef697
+#define clk_blsp1_uart6_apps_clk_src	0x8fbdbe4c
+#define clk_blsp2_qup1_i2c_apps_clk_src 0xd6d1e95d
+#define clk_blsp2_qup1_spi_apps_clk_src 0xcc1b8365
+#define clk_blsp2_qup2_i2c_apps_clk_src 0x603b5c51
+#define clk_blsp2_qup2_spi_apps_clk_src 0xd577dc44
+#define clk_blsp2_qup3_i2c_apps_clk_src 0xea82959c
+#define clk_blsp2_qup3_spi_apps_clk_src 0xd04b1e92
+#define clk_blsp2_qup4_i2c_apps_clk_src 0x73dc968c
+#define clk_blsp2_qup4_spi_apps_clk_src 0x25d4a2b1
+#define clk_blsp2_qup5_i2c_apps_clk_src 0xcc3698bd
+#define clk_blsp2_qup5_spi_apps_clk_src 0xfa0cf45e
+#define clk_blsp2_qup6_i2c_apps_clk_src 0x2fa53151
+#define clk_blsp2_qup6_spi_apps_clk_src 0x5ca86755
+#define clk_blsp2_uart1_apps_clk_src	0x562c66dc
+#define clk_blsp2_uart2_apps_clk_src	0xdd448080
+#define clk_blsp2_uart3_apps_clk_src	0x46b2e90f
+#define clk_blsp2_uart4_apps_clk_src	0x23a093d2
+#define clk_blsp2_uart5_apps_clk_src	0xe067616a
+#define clk_blsp2_uart6_apps_clk_src	0xe02d2829
+#define clk_gp1_clk_src			0xad85b97a
+#define clk_gp2_clk_src			0xfb1f0065
+#define clk_gp3_clk_src			0x63b693d6
+#define clk_hmss_rbcpr_clk_src		0xedd9a474
+#define clk_pdm2_clk_src		0x31e494fd
+#define clk_sdcc1_apps_clk_src		0xd4975db2
+#define clk_sdcc2_apps_clk_src		0xfc46c821
+#define clk_sdcc3_apps_clk_src		0xea34c7f4
+#define clk_sdcc4_apps_clk_src		0x7aaaaa0c
+#define clk_tsif_ref_clk_src		0x4e9042d1
+#define clk_usb20_mock_utmi_clk_src	0xc3aaeecb
+#define clk_usb30_mock_utmi_clk_src	0xa024a976
+#define clk_usb3_phy_aux_clk_src	0x15eec63c
+#define clk_gcc_qusb2phy_prim_reset	0x07550fa1
+#define clk_gcc_qusb2phy_sec_reset	0x3f3a87d0
+#define clk_gcc_periph_noc_usb20_ahb_clk	0xfb9f26e9
+#define clk_gcc_mmss_gcc_dbg_clk	0xe89d461c
+#define clk_cpu_dbg_clk			0x6550dfa9
+#define clk_gcc_blsp1_ahb_clk		0x8caa5b4f
+#define clk_gcc_blsp1_qup1_i2c_apps_clk 0xc303fae9
+#define clk_gcc_blsp1_qup1_spi_apps_clk 0x759a76b0
+#define clk_gcc_blsp1_qup2_i2c_apps_clk 0x1076f220
+#define clk_gcc_blsp1_qup2_spi_apps_clk 0x3e77d48f
+#define clk_gcc_blsp1_qup3_i2c_apps_clk 0x9e25ac82
+#define clk_gcc_blsp1_qup3_spi_apps_clk 0xfb978880
+#define clk_gcc_blsp1_qup4_i2c_apps_clk 0xd7f40f6f
+#define clk_gcc_blsp1_qup4_spi_apps_clk 0x80f8722f
+#define clk_gcc_blsp1_qup5_i2c_apps_clk 0xacae5604
+#define clk_gcc_blsp1_qup5_spi_apps_clk 0xbf3e15d7
+#define clk_gcc_blsp1_qup6_i2c_apps_clk 0x5c6ad820
+#define clk_gcc_blsp1_qup6_spi_apps_clk 0x780d9f85
+#define clk_gcc_blsp1_uart1_apps_clk	0xc7c62f90
+#define clk_gcc_blsp1_uart2_apps_clk	0xf8a61c96
+#define clk_gcc_blsp1_uart3_apps_clk	0xc3298bd7
+#define clk_gcc_blsp1_uart4_apps_clk	0x26be16c0
+#define clk_gcc_blsp1_uart5_apps_clk	0x28a6bc74
+#define clk_gcc_blsp1_uart6_apps_clk	0x28fd3466
+#define clk_gcc_blsp2_ahb_clk		0x8f283c1d
+#define clk_gcc_blsp2_qup1_i2c_apps_clk 0x9ace11dd
+#define clk_gcc_blsp2_qup1_spi_apps_clk 0xa32604cc
+#define clk_gcc_blsp2_qup2_i2c_apps_clk 0x1bf9a57e
+#define clk_gcc_blsp2_qup2_spi_apps_clk 0xbf54ca6d
+#define clk_gcc_blsp2_qup3_i2c_apps_clk 0x336d4170
+#define clk_gcc_blsp2_qup3_spi_apps_clk 0xc68509d6
+#define clk_gcc_blsp2_qup4_i2c_apps_clk 0xbd22539d
+#define clk_gcc_blsp2_qup4_spi_apps_clk 0x01a72b93
+#define clk_gcc_blsp2_qup5_i2c_apps_clk 0xe2b2ce1d
+#define clk_gcc_blsp2_qup5_spi_apps_clk 0xf40999cd
+#define clk_gcc_blsp2_qup6_i2c_apps_clk 0x894bcea4
+#define clk_gcc_blsp2_qup6_spi_apps_clk 0xfe1bd34a
+#define clk_gcc_blsp2_uart1_apps_clk	0x8c3512ff
+#define clk_gcc_blsp2_uart2_apps_clk	0x1e1965a3
+#define clk_gcc_blsp2_uart3_apps_clk	0x382415ab
+#define clk_gcc_blsp2_uart4_apps_clk	0x87a44b42
+#define clk_gcc_blsp2_uart5_apps_clk	0x5cd30649
+#define clk_gcc_blsp2_uart6_apps_clk	0x8feee5ab
+#define clk_gcc_boot_rom_ahb_clk	0xde2adeb1
+#define clk_gcc_gp1_clk			0x057f7b69
+#define clk_gcc_gp2_clk			0x9bf83ffd
+#define clk_gcc_gp3_clk			0xec6539ee
+#define clk_gcc_hmss_rbcpr_clk		0x699183be
+#define clk_gcc_mmss_noc_cfg_ahb_clk	0xb41a9d99
+#define clk_gcc_pcie_0_aux_clk		0x3d2e3ece
+#define clk_gcc_pcie_0_cfg_ahb_clk	0x4dd325c3
+#define clk_gcc_pcie_0_mstr_axi_clk	0x3f85285b
+#define clk_gcc_pcie_0_slv_axi_clk	0xd69638a1
+#define clk_gcc_pcie_0_pipe_clk		0x4f37621e
+#define clk_gcc_pcie_0_phy_reset	0xdc3201c1
+#define clk_gcc_pcie_1_aux_clk		0xc9bb962c
+#define clk_gcc_pcie_1_cfg_ahb_clk	0xb6338658
+#define clk_gcc_pcie_1_mstr_axi_clk	0xc20f6269
+#define clk_gcc_pcie_1_slv_axi_clk	0xd54e40d6
+#define clk_gcc_pcie_1_pipe_clk		0xc1627422
+#define clk_gcc_pcie_1_phy_reset	0x674481bb
+#define clk_gcc_pcie_2_aux_clk		0xa4dc7ae8
+#define clk_gcc_pcie_2_cfg_ahb_clk	0x4f1d3121
+#define clk_gcc_pcie_2_mstr_axi_clk	0x9e81724a
+#define clk_gcc_pcie_2_slv_axi_clk	0x7990d8b2
+#define clk_gcc_pcie_2_pipe_clk		0xa757a834
+#define clk_gcc_pcie_2_phy_reset	0x82634880
+#define clk_gcc_pcie_phy_reset		0x9bc3c959
+#define clk_gcc_pcie_phy_com_reset	0x8bf513e6
+#define clk_gcc_pcie_phy_nocsr_com_phy_reset	0x0c16a2da
+#define clk_gcc_pcie_phy_aux_clk	0x4746e74f
+#define clk_gcc_pcie_phy_cfg_ahb_clk	0x8533671a
+#define clk_gcc_pdm2_clk		0x99d55711
+#define clk_gcc_pdm_ahb_clk		0x365664f6
+#define clk_gcc_prng_ahb_clk		0x397e7eaa
+#define clk_gcc_sdcc1_ahb_clk		0x691e0caa
+#define clk_gcc_sdcc1_apps_clk		0x9ad6fb96
+#define clk_gcc_sdcc2_ahb_clk		0x23d5727f
+#define clk_gcc_sdcc2_apps_clk		0x861b20ac
+#define clk_gcc_sdcc3_ahb_clk		0x565b2c03
+#define clk_gcc_sdcc3_apps_clk		0x0b27aeac
+#define clk_gcc_sdcc4_ahb_clk		0x64f3e6a8
+#define clk_gcc_sdcc4_apps_clk		0xbf7c4dc8
+#define clk_gcc_tsif_ahb_clk		0x88d2822c
+#define clk_gcc_tsif_ref_clk		0x8f1ed2c2
+#define clk_gcc_ufs_ahb_clk		0x1914bb84
+#define clk_gcc_ufs_axi_clk		0x47c743a7
+#define clk_gcc_ufs_ice_core_clk	0x310b0710
+#define clk_gcc_ufs_rx_cfg_clk		0xa6747786
+#define clk_gcc_ufs_rx_symbol_0_clk	0x7f43251c
+#define clk_gcc_ufs_rx_symbol_1_clk	0x03182fde
+#define clk_gcc_ufs_tx_cfg_clk		0xba2cf8b5
+#define clk_gcc_ufs_tx_symbol_0_clk	0x6a9f747a
+#define clk_gcc_ufs_unipro_core_clk	0x2daf7fd2
+#define clk_gcc_ufs_sys_clk_core_clk	0x360e5ac8
+#define clk_gcc_ufs_tx_symbol_clk_core_clk	0xf6fb0df7
+#define clk_gcc_usb20_master_clk	0x24c3b66a
+#define clk_gcc_usb20_mock_utmi_clk	0xe8db8203
+#define clk_gcc_usb20_sleep_clk		0x6e8cb4b2
+#define clk_gcc_usb30_master_clk	0xb3b4e2cb
+#define clk_gcc_usb30_mock_utmi_clk	0xa800b65a
+#define clk_gcc_usb30_sleep_clk		0xd0b65c92
+#define clk_gcc_usb3_phy_aux_clk	0x0d9a36e0
+#define clk_gcc_usb3_phy_pipe_clk	0xf279aff2
+#define clk_gcc_usb_phy_cfg_ahb2phy_clk	0xd1231a0e
+#define clk_gcc_aggre0_cnoc_ahb_clk	0x53a35559
+#define clk_gcc_aggre0_snoc_axi_clk	0x3c446400
+#define clk_gcc_aggre0_noc_qosgen_extref_clk	0x8c4356ba
+#define clk_hlos1_vote_lpass_core_smmu_clk	0x3aaa1743
+#define clk_hlos1_vote_lpass_adsp_smmu_clk	0xc76f702f
+#define clk_gcc_usb3_phy_reset		0x03d559f1
+#define clk_gcc_usb3phy_phy_reset	0xb1a4f885
+#define clk_gcc_usb3_clkref_clk		0xb6cc8f01
+#define clk_gcc_hdmi_clkref_clk		0x4d4eec04
+#define clk_gcc_edp_clkref_clk		0xa8685c3f
+#define clk_gcc_ufs_clkref_clk		0x92aa126f
+#define clk_gcc_pcie_clkref_clk		0xa2e247fa
+#define clk_gcc_rx2_usb2_clkref_clk	0x27ec24ba
+#define clk_gcc_rx1_usb2_clkref_clk	0x53351d25
+#define clk_gcc_smmu_aggre0_ahb_clk	0x47a06ce4
+#define clk_gcc_smmu_aggre0_axi_clk	0x3cac4a6c
+#define clk_gcc_sys_noc_usb3_axi_clk	0x94d26800
+#define clk_gcc_sys_noc_ufs_axi_clk	0x19d38312
+#define clk_gcc_aggre2_usb3_axi_clk	0xd5822a8e
+#define clk_gcc_aggre2_ufs_axi_clk	0xb31e5191
+#define clk_gcc_mmss_gpll0_div_clk	0xdd06848d
+#define clk_gcc_mmss_bimc_gfx_clk	0xe4f28754
+#define clk_gcc_bimc_gfx_clk		0x3edd69ad
+#define clk_gcc_qspi_ahb_clk		0x96969dc8
+#define clk_gcc_qspi_ser_clk		0xfaf1e266
+#define clk_qspi_ser_clk_src		0x426676ee
+#define clk_sdcc1_ice_core_clk_src	0xfd6a4301
+#define clk_gcc_sdcc1_ice_core_clk	0x0fd5680a
+#define clk_gcc_mss_cfg_ahb_clk		0x111cde81
+#define clk_gcc_mss_snoc_axi_clk	0x0e71de85
+#define clk_gcc_mss_q6_bimc_axi_clk	0x67544d62
+#define clk_gcc_mss_mnoc_bimc_axi_clk	0xf665d03f
+#define clk_gpll0_out_msscc		0x7d794829
+#define clk_gcc_debug_mux_v2		0xf7e749f0
+#define clk_gcc_dcc_ahb_clk		0xfa14a88c
+#define clk_gcc_aggre0_noc_mpu_cfg_ahb_clk	0x5c1bb8e2
+
+/* clock_mmss controlled clocks */
+#define clk_mmsscc_xo			0x05e63704
+#define clk_mmsscc_gpll0		0xe900c515
+#define clk_mmsscc_gpll0_div		0x73892e05
+#define clk_mmsscc_mmssnoc_ahb		0x7b4bd6f7
+#define clk_mmpll0			0xdd83b751
+#define clk_mmpll0_out_main		0x2f996a31
+#define clk_mmpll1			0x6da7fb90
+#define clk_mmpll1_out_main		0xa0d3a7da
+#define clk_mmpll4			0x22c063c1
+#define clk_mmpll4_out_main		0xfb21c2fd
+#define clk_mmpll3			0x18c76899
+#define clk_mmpll3_out_main		0x6eb6328f
+#define clk_ahb_clk_src			0x86f49203
+#define clk_mmpll2			0x1190e4d8
+#define clk_mmpll2_out_main		0x1e9e24a8
+#define clk_mmpll8			0xd06ad45e
+#define clk_mmpll8_out_main		0x75b1f386
+#define clk_mmpll9			0x1c50684c
+#define clk_mmpll9_out_main		0x16b74937
+#define clk_mmpll5			0xa41e1936
+#define clk_mmpll5_out_main		0xcc1897bf
+#define clk_csi0_clk_src		0x227e65bc
+#define clk_vfe0_clk_src		0xa0c2bd8f
+#define clk_vfe1_clk_src		0x4e357366
+#define clk_csi1_clk_src		0x6a2a6c36
+#define clk_csi2_clk_src		0x4113589f
+#define clk_csi3_clk_src		0xfd934012
+#define clk_maxi_clk_src		0x52c09777
+#define clk_cpp_clk_src			0x8382f56d
+#define clk_jpeg0_clk_src		0x9a0a0ac3
+#define clk_jpeg2_clk_src		0x5ad927f3
+#define clk_jpeg_dma_clk_src		0xb68afcea
+#define clk_mdp_clk_src			0x6dc1f8f1
+#define clk_video_core_clk_src		0x8be4c944
+#define clk_fd_core_clk_src		0xe4799ab7
+#define clk_cci_clk_src			0x822f3d97
+#define clk_csiphy0_3p_clk_src		0xd2474b12
+#define clk_csiphy1_3p_clk_src		0x46a02aff
+#define clk_csiphy2_3p_clk_src		0x1447813f
+#define clk_camss_gp0_clk_src		0x6b57cfe6
+#define clk_camss_gp1_clk_src		0xf735368a
+#define clk_jpeg_dma_clk_src		0xb68afcea
+#define clk_mclk0_clk_src		0x266b3853
+#define clk_mclk1_clk_src		0xa73cad0c
+#define clk_mclk2_clk_src		0x42545468
+#define clk_mclk3_clk_src		0x2bfbb714
+#define clk_csi0phytimer_clk_src	0xc8a309be
+#define clk_csi1phytimer_clk_src	0x7c0fe23a
+#define clk_csi2phytimer_clk_src	0x62ffea9c
+#define clk_rbbmtimer_clk_src		0x17649ecc
+#define clk_esc0_clk_src		0xb41d7c38
+#define clk_esc1_clk_src		0x3b0afa42
+#define clk_hdmi_clk_src		0xb40aeea9
+#define clk_vsync_clk_src		0xecb43940
+#define clk_rbcpr_clk_src		0x2c2e9af2
+#define clk_video_subcore0_clk_src	0x88d79636
+#define clk_video_subcore1_clk_src	0x4966930c
+#define clk_mmss_bto_ahb_clk		0xfdf8c361
+#define clk_camss_ahb_clk		0xc4ff91d4
+#define clk_camss_cci_ahb_clk		0x04c4441a
+#define clk_camss_cci_clk		0xd6cb5eb9
+#define clk_camss_cpp_ahb_clk		0x12e9a87b
+#define clk_camss_cpp_clk		0xb82f366b
+#define clk_camss_cpp_axi_clk		0x5598c804
+#define clk_camss_cpp_vbif_ahb_clk	0xb5f31be4
+#define clk_camss_csi0_ahb_clk		0x6e29c972
+#define clk_camss_csi0_clk		0x30862ddb
+#define clk_camss_csi0phy_clk		0x2cecfb84
+#define clk_camss_csi0pix_clk		0x6946f77b
+#define clk_camss_csi0rdi_clk		0x83645ef5
+#define clk_camss_csi1_ahb_clk		0xccc15f06
+#define clk_camss_csi1_clk		0xb150f052
+#define clk_camss_csi1phy_clk		0xb989f06d
+#define clk_camss_csi1pix_clk		0x58d19bf3
+#define clk_camss_csi1rdi_clk		0x4d2f3352
+#define clk_camss_csi2_ahb_clk		0x92d02d75
+#define clk_camss_csi2_clk		0x74fc92e8
+#define clk_camss_csi2phy_clk		0xda05d9d8
+#define clk_camss_csi2pix_clk		0xf8ed0731
+#define clk_camss_csi2rdi_clk		0xdc1b2081
+#define clk_camss_csi3_ahb_clk		0xee5e459c
+#define clk_camss_csi3_clk		0x39488fdd
+#define clk_camss_csi3phy_clk		0x8b6063b9
+#define clk_camss_csi3pix_clk		0xd82bd467
+#define clk_camss_csi3rdi_clk		0xb6750046
+#define clk_camss_csi_vfe0_clk		0x3023937a
+#define clk_camss_csi_vfe1_clk		0xe66fa522
+#define clk_camss_csiphy0_3p_clk	0xf2a54f5a
+#define clk_camss_csiphy1_3p_clk	0x8bf70cb2
+#define clk_camss_csiphy2_3p_clk	0x1c14c939
+#define clk_camss_gp0_clk		0xcee7e51d
+#define clk_camss_gp1_clk		0x41f1c2e3
+#define clk_camss_ispif_ahb_clk		0x9a212c6d
+#define clk_camss_jpeg0_clk		0x0b0e2db7
+#define clk_camss_jpeg2_clk		0xd7291c8d
+#define clk_camss_jpeg_ahb_clk		0x1f47fd28
+#define clk_camss_jpeg_axi_clk		0x9e5545c8
+#define clk_camss_jpeg_dma_clk		0x2336e65d
+#define clk_camss_mclk0_clk		0xcf0c61e0
+#define clk_camss_mclk1_clk		0xd1410ed4
+#define clk_camss_mclk2_clk		0x851286f2
+#define clk_camss_mclk3_clk		0x4db11c45
+#define clk_camss_micro_ahb_clk		0x33a23277
+#define clk_camss_csi0phytimer_clk	0xff93b3c8
+#define clk_camss_csi1phytimer_clk	0x6c399ab6
+#define clk_camss_csi2phytimer_clk	0x24f47f49
+#define clk_camss_top_ahb_clk		0x8f8b2d33
+#define clk_camss_vfe_ahb_clk		0x595197bc
+#define clk_camss_vfe_axi_clk		0x273d4c31
+#define clk_camss_vfe0_ahb_clk		0x4652833c
+#define clk_camss_vfe0_clk		0x1e9bb8c4
+#define clk_camss_vfe0_stream_clk	0x22835fa4
+#define clk_camss_vfe1_ahb_clk		0x6a56abd3
+#define clk_camss_vfe1_clk		0x5bffa69b
+#define clk_camss_vfe1_stream_clk	0x92f849b9
+#define clk_fd_ahb_clk			0x868a2c5c
+#define clk_fd_core_clk			0x3badcae4
+#define clk_fd_core_uar_clk		0x7e624e15
+#define clk_gpu_ahb_clk			0xf97f1d43
+#define clk_gpu_aon_isense_clk		0xa9e9b297
+#define clk_gpu_gx_gfx3d_clk		0xb7ece823
+#define clk_gpu_mx_clk			0xb80ccedf
+#define clk_gpu_gx_rbbmtimer_clk	0xdeba634e
+#define clk_mdss_ahb_clk		0x684ccb41
+#define clk_mdss_axi_clk		0xcc07d687
+#define clk_mdss_esc0_clk		0x28cafbe6
+#define clk_mdss_esc1_clk		0xc22c6883
+#define clk_mdss_hdmi_ahb_clk		0x01cef516
+#define clk_mdss_hdmi_clk		0x097a6de9
+#define clk_mdss_mdp_clk		0x618336ac
+#define clk_mdss_vsync_clk		0x42a022d3
+#define clk_mmss_misc_ahb_clk		0xea30b0e7
+#define clk_mmss_misc_cxo_clk		0xe620cd80
+#define clk_mmagic_bimc_noc_cfg_ahb_clk 0x12d5ba72
+#define clk_mmagic_camss_axi_clk	0xa8b1c16b
+#define clk_mmagic_camss_noc_cfg_ahb_clk 0x5182c819
+#define clk_mmss_mmagic_cfg_ahb_clk	0x5e94a822
+#define clk_mmagic_mdss_axi_clk		0xa0359d10
+#define clk_mmagic_mdss_noc_cfg_ahb_clk 0x9c6d5482
+#define clk_mmagic_video_axi_clk	0x7b9219c3
+#define clk_mmagic_video_noc_cfg_ahb_clk 0x5124d256
+#define clk_mmss_mmagic_ahb_clk		0x3d15f2b0
+#define clk_mmss_mmagic_maxi_clk	0xbdaf5af7
+#define clk_mmss_rbcpr_ahb_clk		0x623ba55f
+#define clk_mmss_rbcpr_clk		0x69a23a6f
+#define clk_mmss_spdm_cpp_clk		0xefe35cd2
+#define clk_mmss_spdm_jpeg_dma_clk	0xcb7bd5a0
+#define clk_smmu_cpp_ahb_clk		0x3ad82d84
+#define clk_smmu_cpp_axi_clk		0xa6bb2f4a
+#define clk_smmu_jpeg_ahb_clk		0x10c436ec
+#define clk_smmu_jpeg_axi_clk		0x41112f37
+#define clk_smmu_mdp_ahb_clk		0x04994cb2
+#define clk_smmu_mdp_axi_clk		0x7fd71687
+#define clk_smmu_rot_ahb_clk		0xa30772c9
+#define clk_smmu_rot_axi_clk		0xfed7c078
+#define clk_smmu_vfe_ahb_clk		0x4dabebe7
+#define clk_smmu_vfe_axi_clk		0xde483725
+#define clk_smmu_video_ahb_clk		0x2d738e2c
+#define clk_smmu_video_axi_clk		0xe2b5b887
+#define clk_video_ahb_clk		0x90775cfb
+#define clk_video_axi_clk		0xe6c16dba
+#define clk_video_core_clk		0x7e876ec3
+#define clk_video_maxi_clk		0x97749db6
+#define clk_video_subcore0_clk		0xb6f63e6c
+#define clk_video_subcore1_clk		0x26c29cb4
+#define clk_vmem_ahb_clk		0xab6223ff
+#define clk_vmem_maxi_clk		0x15ef32db
+#define clk_mmss_debug_mux		0xe646ffda
+#define clk_mmss_gcc_dbg_clk		0xafa4d48a
+#define clk_gfx3d_clk_src		0x917f76ef
+#define clk_extpclk_clk_src		0xb2c31abd
+#define clk_mdss_byte0_clk		0xf5a03f64
+#define clk_mdss_byte1_clk		0xb8c7067d
+#define clk_mdss_extpclk_clk		0xfa5aadb0
+#define clk_mdss_pclk0_clk		0x3487234a
+#define clk_mdss_pclk1_clk		0xd5804246
+#define clk_gpu_gcc_dbg_clk		0x0ccc42cd
+#define clk_mdss_mdp_vote_clk		0x588460a4
+#define clk_mdss_rotator_vote_clk	0x5b1f675e
+#define clk_mmpll2_postdiv_clk		0x4fdeaaba
+#define clk_mmpll8_postdiv_clk		0xedf57882
+#define clk_mmpll9_postdiv_clk		0x3064b618
+#define clk_gfx3d_clk_src_v2		0x4210acb7
+#define clk_byte0_clk_src		0x75cc885b
+#define clk_byte1_clk_src		0x63c2c955
+#define clk_pclk0_clk_src		0xccac1f35
+#define clk_pclk1_clk_src		0x090f68ac
+#define clk_ext_byte0_clk_src		0xfb32f31e
+#define clk_ext_byte1_clk_src		0x585ef6d4
+#define clk_ext_pclk0_clk_src		0x087c1612
+#define clk_ext_pclk1_clk_src		0x8067c5a3
+
+/* clock_debug controlled clocks */
+#define clk_gcc_debug_mux		0x8121ac15
+
+/* external multimedia clocks */
+#define clk_dsi0pll_pixel_clk_mux	0x792379e1
+#define clk_dsi0pll_byte_clk_mux	0x60e83f06
+#define clk_dsi0pll_byte_clk_src	0xbbaa30be
+#define clk_dsi0pll_pixel_clk_src	0x45b3260f
+#define clk_dsi0pll_n2_div_clk		0x1474c213
+#define clk_dsi0pll_post_n1_div_clk	0xdab8c389
+#define clk_dsi0pll_vco_clk		0x15940d40
+#define clk_dsi1pll_pixel_clk_mux	0x36458019
+#define clk_dsi1pll_byte_clk_mux	0xb5a42b7b
+#define clk_dsi1pll_byte_clk_src	0x63930a8f
+#define clk_dsi1pll_pixel_clk_src	0x0e4c9b56
+#define clk_dsi1pll_n2_div_clk		0x2c9d4007
+#define clk_dsi1pll_post_n1_div_clk	0x03020041
+#define clk_dsi1pll_vco_clk		0x99797b50
+#define clk_mdss_dsi1_vco_clk_src	0xfcd15658
+#define clk_hdmi_vco_clk		0x66003284
+
+#define clk_dsi0pll_shadow_byte_clk_src	0x177c029c
+#define clk_dsi0pll_shadow_pixel_clk_src	0x98ae3c92
+#define clk_dsi0pll_shadow_n2_div_clk	0xd5f0dad9
+#define clk_dsi0pll_shadow_post_n1_div_clk	0x1f7c8cf8
+#define clk_dsi0pll_shadow_vco_clk	0xb100ca83
+#define clk_dsi1pll_shadow_byte_clk_src	0xfc021ce5
+#define clk_dsi1pll_shadow_pixel_clk_src	0xdcca3ffc
+#define clk_dsi1pll_shadow_n2_div_clk		0x189541bf
+#define clk_dsi1pll_shadow_post_n1_div_clk	0x1637020e
+#define clk_dsi1pll_shadow_vco_clk		0x68d8b6f7
+
+/* CPU clocks */
+#define clk_pwrcl_clk 0xc554130e
+#define clk_pwrcl_pll 0x25454ca1
+#define clk_pwrcl_alt_pll 0xc445471b
+#define clk_pwrcl_pll_main 0x28948e22
+#define clk_pwrcl_alt_pll_main 0x25c8270e
+#define clk_pwrcl_hf_mux 0x77706ae6
+#define clk_pwrcl_lf_mux 0xd99e334d
+#define clk_perfcl_clk 0x58869997
+#define clk_perfcl_pll 0x97dcec1c
+#define clk_perfcl_alt_pll 0xfe2eaea1
+#define clk_perfcl_pll_main 0x0dbf0c0b
+#define clk_perfcl_alt_pll_main 0x0b892aab
+#define clk_perfcl_hf_mux 0x9e8bbe59
+#define clk_perfcl_lf_mux 0x2f9c278d
+#define clk_cbf_pll 0xfe2e96a3
+#define clk_cbf_pll_main 0x2b05cf95
+#define clk_cbf_hf_mux 0x71244f73
+#define clk_cbf_clk 0x48e9e16b
+#define clk_xo_ao 0x428c856d
+#define clk_sys_apcsaux_clk 0x0b0dd513
+#define clk_cpu_debug_mux 0xc7acaa31
+
+/* GCC block resets */
+#define QUSB2PHY_PRIM_BCR		0
+#define QUSB2PHY_SEC_BCR		1
+#define BLSP1_BCR			2
+#define BLSP2_BCR			3
+#define BOOT_ROM_BCR			4
+#define PRNG_BCR			5
+#define UFS_BCR				6
+#define USB_20_BCR			7
+#define USB_30_BCR			8
+#define USB3_PHY_BCR			9
+#define USB3PHY_PHY_BCR			10
+#define PCIE_0_PHY_BCR			11
+#define PCIE_1_PHY_BCR			12
+#define PCIE_2_PHY_BCR			13
+#define PCIE_PHY_BCR			14
+#define PCIE_PHY_COM_BCR		15
+#define PCIE_PHY_NOCSR_COM_PHY_BCR	16
+
+/* MMSS Block resets */
+#define VIDEO_BCR			0
+#define MDSS_BCR			1
+#define CAMSS_MICRO_BCR			2
+#define CAMSS_JPEG_BCR			3
+#define CAMSS_VFE0_BCR			4
+#define CAMSS_VFE1_BCR			5
+#define FD_BCR				6
+#define GPU_GX_BCR			7
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/mips/boot/dts/include/dt-bindings/clock/msm-clocks-8998.h	2019-01-22 16:16:28.171288678 +0100
@@ -0,0 +1,534 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CLOCKS_8998_H
+#define __MSM_CLOCKS_8998_H
+
+#include "audio-ext-clk.h"
+
+/* clock_rpm controlled clocks */
+#define clk_ce1_clk				0x42229c55
+#define clk_ce1_a_clk				0x44a833fe
+#define clk_cxo_clk_src				0x79e95308
+#define clk_bimc_clk				0x4b80bf00
+#define clk_bimc_a_clk				0x4b25668a
+#define clk_cnoc_clk				0xd5ccb7f4
+#define clk_cnoc_a_clk				0xd8fe2ccc
+#define clk_snoc_clk				0x2c341aa0
+#define clk_snoc_a_clk				0x8fcef2af
+#define clk_cnoc_periph_clk			0xb11e9cf9
+#define clk_cnoc_periph_a_clk			0x1d7faa2e
+#define clk_cnoc_periph_keepalive_a_clk		0x7287aef2
+#define clk_ln_bb_clk1				0xb867b147
+#define clk_ln_bb_clk1_ao			0x7f63a93a
+#define clk_ln_bb_clk1_pin			0x6fc5653c
+#define clk_ln_bb_clk1_pin_ao			0x25d625bf
+#define clk_ln_bb_clk2				0xf83e6387
+#define clk_ln_bb_clk2_ao			0x96f09628
+#define clk_ln_bb_clk2_pin			0xa9ebe8d5
+#define clk_ln_bb_clk2_pin_ao			0x89a1226f
+#define clk_ln_bb_clk3				0x4f52a39e
+#define clk_ln_bb_clk3_ao			0xb15eba76
+#define clk_ln_bb_clk3_pin			0xc4de7dad
+#define clk_ln_bb_clk3_pin_ao			0xc01022e8
+#define clk_bimc_msmbus_clk			0xd212feea
+#define clk_bimc_msmbus_a_clk			0x71d1a499
+#define clk_cnoc_msmbus_clk			0x62228b5d
+#define clk_cnoc_msmbus_a_clk			0x67442955
+#define clk_cxo_clk_src_ao			0x64eb6004
+#define clk_cxo_dwc3_clk			0xf79c19f6
+#define clk_cxo_lpm_clk				0x94adbf3d
+#define clk_cxo_otg_clk				0x4eec0bb9
+#define clk_cxo_pil_lpass_clk			0xe17f0ff6
+#define clk_cxo_pil_ssc_clk			0x81832015
+#define clk_cxo_pil_spss_clk			0x5cd71a61
+#define clk_div_clk1				0xaa1157a6
+#define clk_div_clk1_ao				0x6b943d68
+#define clk_div_clk2				0xd454019f
+#define clk_div_clk2_ao				0x53f9e788
+#define clk_div_clk3				0xa9a55a68
+#define clk_div_clk3_ao				0x3d6725a8
+#define clk_ipa_clk				0xfa685cda
+#define clk_ipa_a_clk				0xeeec2919
+#define clk_mcd_ce1_clk				0xbb615d26
+#define clk_mmssnoc_axi_clk			0xdb4b31e6
+#define clk_mmssnoc_axi_a_clk			0xd4970614
+#define clk_qcedev_ce1_clk			0x293f97b0
+#define clk_qcrypto_ce1_clk			0xa6ac14df
+#define clk_qdss_clk				0x1492202a
+#define clk_qdss_a_clk				0xdd121669
+#define clk_qseecom_ce1_clk			0xaa858373
+#define clk_rf_clk1				0xaabeea5a
+#define clk_rf_clk1_ao				0x72a10cb8
+#define clk_rf_clk1_pin				0x8f463562
+#define clk_rf_clk1_pin_ao			0x62549ff6
+#define clk_rf_clk2				0x24a30992
+#define clk_rf_clk2_ao				0x944d8bbd
+#define clk_rf_clk2_pin				0xa7c5602a
+#define clk_rf_clk2_pin_ao			0x2d75eb4d
+#define clk_rf_clk3				0xb673936b
+#define clk_rf_clk3_ao				0x038bb968
+#define clk_rf_clk3_pin				0x726f53f5
+#define clk_rf_clk3_pin_ao			0x76f9240f
+#define clk_scm_ce1_clk				0xd8ebcc62
+#define clk_snoc_msmbus_clk			0xe6900bb6
+#define clk_snoc_msmbus_a_clk			0x5d4683bd
+#define clk_gcc_ce1_ahb_m_clk			0x2eb28c01
+#define clk_gcc_ce1_axi_m_clk			0xc174dfba
+#define clk_aggre1_noc_clk			0x049abba8
+#define clk_aggre1_noc_a_clk			0xc12e4220
+#define clk_aggre2_noc_clk			0xaa681404
+#define clk_aggre2_noc_a_clk			0xcab67089
+#define clk_measure_only_bimc_hmss_axi_clk	0xc1cc4f11
+
+/* clock_gcc controlled clocks*/
+#define clk_debug_mmss_clk			0x977c99b6
+#define clk_debug_rpm_clk			0x8e2b07ca
+#define clk_debug_cpu_clk			0x0e696b2b
+#define clk_gpu_gcc_debug_clk			0x3eb88190
+#define clk_gfx_gcc_debug_clk			0xa3a64fec
+#define clk_gpll0				0x1ebe3bc4
+#define clk_gpll0_out_main			0xe9374de7
+#define clk_gpll0_ao				0xa1368304
+#define clk_gcc_mmss_gpll0_clk			0x8050f008
+#define clk_gcc_mmss_gpll0_div_clk		0xdd06848d
+#define clk_gcc_gpu_gpll0_clk			0xdad7a7a4
+#define clk_gcc_gpu_gpll0_div_clk		0x07d16c6a
+#define clk_gpll4				0xb3b5d85b
+#define clk_gpll4_out_main			0xa9a0ab9d
+#define clk_usb30_master_clk_src		0xc6262f89
+#define clk_pcie_aux_clk_src			0xebc50566
+#define clk_ufs_axi_clk_src			0x297ca380
+#define clk_blsp1_qup1_i2c_apps_clk_src		0x17f78f5e
+#define clk_blsp1_qup1_spi_apps_clk_src		0xf534c4fa
+#define clk_blsp1_qup2_i2c_apps_clk_src		0x8de71c79
+#define clk_blsp1_qup2_spi_apps_clk_src		0x33cf809a
+#define clk_blsp1_qup3_i2c_apps_clk_src		0xf161b902
+#define clk_blsp1_qup3_spi_apps_clk_src		0x5e95683f
+#define clk_blsp1_qup4_i2c_apps_clk_src		0xb2ecce68
+#define clk_blsp1_qup4_spi_apps_clk_src		0xddb5bbdb
+#define clk_blsp1_qup5_i2c_apps_clk_src		0x71ea7804
+#define clk_blsp1_qup5_spi_apps_clk_src		0x9752f35f
+#define clk_blsp1_qup6_i2c_apps_clk_src		0x28806803
+#define clk_blsp1_qup6_spi_apps_clk_src		0x44a1edc4
+#define clk_blsp1_uart1_apps_clk_src		0xf8146114
+#define clk_blsp1_uart2_apps_clk_src		0xfc9c2f73
+#define clk_blsp1_uart3_apps_clk_src		0x600497f2
+#define clk_blsp2_qup1_i2c_apps_clk_src		0xd6d1e95d
+#define clk_blsp2_qup1_spi_apps_clk_src		0xcc1b8365
+#define clk_blsp2_qup2_i2c_apps_clk_src		0x603b5c51
+#define clk_blsp2_qup2_spi_apps_clk_src		0xd577dc44
+#define clk_blsp2_qup3_i2c_apps_clk_src		0xea82959c
+#define clk_blsp2_qup3_spi_apps_clk_src		0xd04b1e92
+#define clk_blsp2_qup4_i2c_apps_clk_src		0x73dc968c
+#define clk_blsp2_qup4_spi_apps_clk_src		0x25d4a2b1
+#define clk_blsp2_qup5_i2c_apps_clk_src		0xcc3698bd
+#define clk_blsp2_qup5_spi_apps_clk_src		0xfa0cf45e
+#define clk_blsp2_qup6_i2c_apps_clk_src		0x2fa53151
+#define clk_blsp2_qup6_spi_apps_clk_src		0x5ca86755
+#define clk_blsp2_uart1_apps_clk_src		0x562c66dc
+#define clk_blsp2_uart2_apps_clk_src		0xdd448080
+#define clk_blsp2_uart3_apps_clk_src		0x46b2e90f
+#define clk_gp1_clk_src				0xad85b97a
+#define clk_gp2_clk_src				0xfb1f0065
+#define clk_gp3_clk_src				0x63b693d6
+#define clk_hmss_rbcpr_clk_src			0xedd9a474
+#define clk_pdm2_clk_src			0x31e494fd
+#define clk_sdcc2_apps_clk_src			0xfc46c821
+#define clk_sdcc4_apps_clk_src			0x7aaaaa0c
+#define clk_tsif_ref_clk_src			0x4e9042d1
+#define clk_ufs_ice_core_clk_src		0xda8e7119
+#define clk_ufs_phy_aux_clk_src			0xc6bca085
+#define clk_ufs_unipro_core_clk_src		0x179e80a9
+#define clk_usb30_mock_utmi_clk_src		0xa024a976
+#define clk_usb3_phy_aux_clk_src		0x15eec63c
+#define clk_qspi_ref_clk_src			0xfe6b8e11
+#define clk_gcc_pcie_phy_0_reset		0x6bb4df33
+#define clk_gcc_usb3_phy_reset			0x03d559f1
+#define clk_gcc_usb3phy_phy_reset		0xb1a4f885
+#define clk_gcc_aggre1_ufs_axi_clk		0x873459d8
+#define clk_gcc_aggre1_ufs_axi_hw_ctl_clk	0x117a6f39
+#define clk_gcc_aggre1_usb3_axi_clk		0xc5c3fbe8
+#define clk_gcc_bimc_mss_q6_axi_clk		0x7437988f
+#define clk_gcc_blsp1_ahb_clk			0x8caa5b4f
+#define clk_gcc_blsp1_qup1_i2c_apps_clk		0xc303fae9
+#define clk_gcc_blsp1_qup1_spi_apps_clk		0x759a76b0
+#define clk_gcc_blsp1_qup2_i2c_apps_clk		0x1076f220
+#define clk_gcc_blsp1_qup2_spi_apps_clk		0x3e77d48f
+#define clk_gcc_blsp1_qup3_i2c_apps_clk		0x9e25ac82
+#define clk_gcc_blsp1_qup3_spi_apps_clk		0xfb978880
+#define clk_gcc_blsp1_qup4_i2c_apps_clk		0xd7f40f6f
+#define clk_gcc_blsp1_qup4_spi_apps_clk		0x80f8722f
+#define clk_gcc_blsp1_qup5_i2c_apps_clk		0xacae5604
+#define clk_gcc_blsp1_qup5_spi_apps_clk		0xbf3e15d7
+#define clk_gcc_blsp1_qup6_i2c_apps_clk		0x5c6ad820
+#define clk_gcc_blsp1_qup6_spi_apps_clk		0x780d9f85
+#define clk_gcc_blsp1_uart1_apps_clk		0xc7c62f90
+#define clk_gcc_blsp1_uart2_apps_clk		0xf8a61c96
+#define clk_gcc_blsp1_uart3_apps_clk		0xc3298bd7
+#define clk_gcc_blsp2_ahb_clk			0x8f283c1d
+#define clk_gcc_blsp2_qup1_i2c_apps_clk		0x9ace11dd
+#define clk_gcc_blsp2_qup1_spi_apps_clk		0xa32604cc
+#define clk_gcc_blsp2_qup2_i2c_apps_clk		0x1bf9a57e
+#define clk_gcc_blsp2_qup2_spi_apps_clk		0xbf54ca6d
+#define clk_gcc_blsp2_qup3_i2c_apps_clk		0x336d4170
+#define clk_gcc_blsp2_qup3_spi_apps_clk		0xc68509d6
+#define clk_gcc_blsp2_qup4_i2c_apps_clk		0xbd22539d
+#define clk_gcc_blsp2_qup4_spi_apps_clk		0x01a72b93
+#define clk_gcc_blsp2_qup5_i2c_apps_clk		0xe2b2ce1d
+#define clk_gcc_blsp2_qup5_spi_apps_clk		0xf40999cd
+#define clk_gcc_blsp2_qup6_i2c_apps_clk		0x894bcea4
+#define clk_gcc_blsp2_qup6_spi_apps_clk		0xfe1bd34a
+#define clk_gcc_blsp2_uart1_apps_clk		0x8c3512ff
+#define clk_gcc_blsp2_uart2_apps_clk		0x1e1965a3
+#define clk_gcc_blsp2_uart3_apps_clk		0x382415ab
+#define clk_gcc_boot_rom_ahb_clk		0xde2adeb1
+#define clk_gcc_bimc_gfx_clk			0x3edd69ad
+#define clk_gcc_cfg_noc_usb3_axi_clk		0x9ea4c2d9
+#define clk_gcc_gp1_clk				0x057f7b69
+#define clk_gcc_gp2_clk				0x9bf83ffd
+#define clk_gcc_gp3_clk				0xec6539ee
+#define clk_gcc_gpu_bimc_gfx_clk		0x3909459b
+#define clk_gcc_gpu_cfg_ahb_clk			0x72f20a57
+#define clk_gcc_gpu_iref_clk			0xfd82abad
+#define clk_gcc_hmss_dvm_bus_clk		0x17cc8b53
+#define clk_gcc_hmss_rbcpr_clk			0x699183be
+#define clk_hmss_gpll0_clk_src			0x17eb05d0
+#define clk_hmss_gpll4_clk_src			0x20456cae
+#define clk_gcc_mmss_sys_noc_axi_clk		0x4467b15b
+#define clk_gcc_mss_at_clk			0x1692c5aa
+#define clk_nav_gcc_dbg_clk			0x2221c544
+#define clk_gcc_pcie_0_aux_clk			0x3d2e3ece
+#define clk_gcc_pcie_0_cfg_ahb_clk		0x4dd325c3
+#define clk_gcc_pcie_0_mstr_axi_clk		0x3f85285b
+#define clk_gcc_pcie_0_pipe_clk			0x4f37621e
+#define clk_gcc_pcie_0_slv_axi_clk		0xd69638a1
+#define clk_gcc_pcie_phy_aux_clk		0x4746e74f
+#define clk_gcc_pdm2_clk			0x99d55711
+#define clk_gcc_pdm_ahb_clk			0x365664f6
+#define clk_gcc_prng_ahb_clk			0x397e7eaa
+#define clk_gcc_sdcc2_ahb_clk			0x23d5727f
+#define clk_gcc_sdcc2_apps_clk			0x861b20ac
+#define clk_gcc_sdcc4_ahb_clk			0x64f3e6a8
+#define clk_gcc_sdcc4_apps_clk			0xbf7c4dc8
+#define clk_gcc_tsif_ahb_clk			0x88d2822c
+#define clk_gcc_tsif_ref_clk			0x8f1ed2c2
+#define clk_gcc_ufs_ahb_clk			0x1914bb84
+#define clk_gcc_ufs_axi_clk			0x47c743a7
+#define clk_gcc_ufs_axi_hw_ctl_clk		0x69385b45
+#define clk_gcc_ufs_ice_core_clk		0x310b0710
+#define clk_gcc_ufs_ice_core_hw_ctl_clk		0x84e15a5b
+#define clk_gcc_ufs_phy_aux_clk			0x17acc8fb
+#define clk_gcc_ufs_phy_aux_hw_ctl_clk		0x7dbdb2e2
+#define clk_gcc_ufs_rx_symbol_0_clk		0x7f43251c
+#define clk_gcc_ufs_rx_symbol_1_clk		0x03182fde
+#define clk_gcc_ufs_tx_symbol_0_clk		0x6a9f747a
+#define clk_ufs_tx_symbol_0_clk			0xb3fcd0f7
+#define clk_ufs_rx_symbol_0_clk			0x17a0f1cd
+#define clk_gcc_ufs_unipro_core_clk		0x2daf7fd2
+#define clk_gcc_ufs_unipro_core_hw_ctl_clk	0x4a4e0f3d
+#define clk_gcc_usb30_master_clk		0xb3b4e2cb
+#define clk_gcc_usb30_mock_utmi_clk		0xa800b65a
+#define clk_gcc_usb30_sleep_clk			0xd0b65c92
+#define clk_gcc_usb3_phy_aux_clk		0x0d9a36e0
+#define clk_gcc_usb3_phy_pipe_clk		0xf279aff2
+#define clk_gcc_usb3_clkref_clk			0xb6cc8f00
+#define clk_gcc_hdmi_clkref_clk			0x4d4eec04
+#define clk_gcc_edp_clkref_clk			0xa8685c3f
+#define clk_gcc_ufs_clkref_clk			0x92aa126f
+#define clk_gcc_pcie_clkref_clk			0xa2e247fa
+#define clk_gcc_rx2_qlink_clkref_clk		0xd0ba986d
+#define clk_gcc_rx1_usb2_clkref_clk		0x53351d25
+#define clk_gcc_pcie_phy_reset			0x9bc3c959
+#define clk_gcc_pcie_phy_com_reset		0x8bf513e6
+#define clk_gcc_pcie_phy_nocsr_com_phy_reset	0x0c16a2da
+#define clk_gcc_qusb2phy_prim_reset		0x07550fa1
+#define clk_gcc_qusb2phy_sec_reset		0x3f3a87d0
+#define clk_gcc_mmss_noc_cfg_ahb_clk		0xb41a9d99
+#define clk_gcc_dcc_ahb_clk			0xfa14a88c
+#define clk_hlos1_vote_lpass_core_smmu_clk	0x3aaa1743
+#define clk_hlos1_vote_lpass_adsp_smmu_clk	0xc76f702f
+#define clk_gcc_mss_cfg_ahb_clk			0x111cde81
+#define clk_gcc_mss_q6_bimc_axi_clk		0x67544d62
+#define clk_gcc_mss_mnoc_bimc_axi_clk		0xf665d03f
+#define clk_gpll0_out_msscc			0x7d794829
+#define clk_gcc_mss_snoc_axi_clk		0x0e71de85
+#define clk_gcc_qspi_ref_clk			0x766a0f7c
+#define clk_gcc_qspi_ahb_clk			0x96969dc8
+#define clk_gcc_debug_mux			0x8121ac15
+
+/* clock_mmss controlled clocks */
+#define clk_mmsscc_xo				0x05e63704
+#define clk_mmsscc_gpll0			0xe900c515
+#define clk_mmsscc_gpll0_div			0x73892e05
+#define clk_mmpll0_pll				0x361e3cfd
+#define clk_mmpll1_pll				0x198e426b
+#define clk_mmpll3_pll				0x18c76899
+#define clk_mmpll4_pll				0x22c063c1
+#define clk_mmpll5_pll				0xa41e1936
+#define clk_mmpll6_pll				0xc56fb440
+#define clk_mmpll7_pll				0x3ac216af
+#define clk_mmpll10_pll				0x2561263b
+#define clk_mmpll0_pll_out			0x1e9e24a8
+#define clk_mmpll1_pll_out			0x5fa32257
+#define clk_mmpll3_pll_out			0x6eb6328f
+#define clk_mmpll4_pll_out			0xfb21c2fd
+#define clk_mmpll5_pll_out			0xcc1897bf
+#define clk_mmpll6_pll_out			0xfb1060bd
+#define clk_mmpll7_pll_out			0x767758ed
+#define clk_mmpll10_pll_out			0x3c5668f3
+#define clk_ahb_clk_src				0x86f49203
+#define clk_csi0_clk_src			0x227e65bc
+#define clk_vfe0_clk_src			0xa0c2bd8f
+#define clk_vfe1_clk_src			0x4e357366
+#define clk_mdp_clk_src				0x6dc1f8f1
+#define clk_maxi_clk_src			0x52c09777
+#define clk_cpp_clk_src				0x8382f56d
+#define clk_jpeg0_clk_src			0x9a0a0ac3
+#define clk_rot_clk_src				0xce49b56c
+#define clk_video_core_clk_src			0x8be4c944
+#define clk_csi1_clk_src			0x6a2a6c36
+#define clk_csi2_clk_src			0x4113589f
+#define clk_csi3_clk_src			0xfd934012
+#define clk_fd_core_clk_src			0xe4799ab7
+#define clk_ext_dp_phy_pll_vco			0x441b576b
+#define clk_ext_dp_phy_pll_link			0xea12644c
+#define clk_dp_link_clk_src			0x370d0626
+#define clk_dp_crypto_clk_src			0xf8faa811
+#define clk_dp_pixel_clk_src			0xf5dfbabf
+#define clk_ext_extpclk_clk_src			0xe5b273af
+#define clk_ext_pclk0_clk_src			0x087c1612
+#define clk_ext_pclk1_clk_src			0x8067c5a3
+#define clk_pclk0_clk_src			0xccac1f35
+#define clk_pclk1_clk_src			0x090f68ac
+#define clk_video_subcore0_clk_src		0x88d79636
+#define clk_video_subcore1_clk_src		0x4966930c
+#define clk_cci_clk_src				0x822f3d97
+#define clk_camss_gp0_clk_src			0x43b063e9
+#define clk_camss_gp1_clk_src			0xa3315f1b
+#define clk_mclk0_clk_src			0x266b3853
+#define clk_mclk1_clk_src			0xa73cad0c
+#define clk_mclk2_clk_src			0x42545468
+#define clk_mclk3_clk_src			0x2bfbb714
+#define clk_csiphy_clk_src			0x8cceb70a
+#define clk_csi0phytimer_clk_src		0xc8a309be
+#define clk_csi1phytimer_clk_src		0x7c0fe23a
+#define clk_csi2phytimer_clk_src		0x62ffea9c
+#define clk_ext_byte0_clk_src			0xfb32f31e
+#define clk_ext_byte1_clk_src			0x585ef6d4
+#define clk_byte0_clk_src			0x75cc885b
+#define clk_byte1_clk_src			0x63c2c955
+#define clk_dp_aux_clk_src			0x2b6e972b
+#define clk_dp_gtc_clk_src			0xc5a86a42
+#define clk_esc0_clk_src			0xb41d7c38
+#define clk_esc1_clk_src			0x3b0afa42
+#define clk_extpclk_clk_src			0xb2c31abd
+#define clk_hdmi_clk_src			0xb40aeea9
+#define clk_vsync_clk_src			0xecb43940
+#define clk_mmss_bimc_smmu_ahb_clk		0x4825baf4
+#define clk_mmss_bimc_smmu_axi_clk		0xc365ac39
+#define clk_mmss_snoc_dvm_axi_clk		0x2c159a11
+#define clk_mmss_camss_ahb_clk			0xa51f2c1d
+#define clk_mmss_camss_cci_ahb_clk		0xfda8bb6a
+#define clk_mmss_camss_cci_clk			0x71bb5c97
+#define clk_mmss_camss_cpp_ahb_clk		0xd5554f15
+#define clk_mmss_camss_cpp_clk			0x8e99ef57
+#define clk_mmss_camss_cpp_axi_clk		0xd84e390b
+#define clk_mmss_camss_cpp_vbif_ahb_clk		0x1b33a88e
+#define clk_mmss_camss_cphy_csid0_clk		0x56114361
+#define clk_mmss_camss_csi0_ahb_clk		0x2b58d241
+#define clk_mmss_camss_csi0_clk			0xccfe39ef
+#define clk_mmss_camss_csi0pix_clk		0x9e26509d
+#define clk_mmss_camss_csi0rdi_clk		0x01d5bf83
+#define clk_mmss_camss_cphy_csid1_clk		0x79fbcd8a
+#define clk_mmss_camss_csi1_ahb_clk		0x7073244b
+#define clk_mmss_camss_csi1_clk			0x3eeeaac0
+#define clk_mmss_camss_csi1pix_clk		0xf1375139
+#define clk_mmss_camss_csi1rdi_clk		0x43185024
+#define clk_mmss_camss_cphy_csid2_clk		0xf295e3ef
+#define clk_mmss_camss_csi2_ahb_clk		0x681c1479
+#define clk_mmss_camss_csi2_clk			0x94524569
+#define clk_mmss_camss_csi2pix_clk		0xf4de617d
+#define clk_mmss_camss_csi2rdi_clk		0x4bf01dc5
+#define clk_mmss_camss_cphy_csid3_clk		0x100188e9
+#define clk_mmss_camss_csi3_ahb_clk		0xfae7c29b
+#define clk_mmss_camss_csi3_clk			0x55e4bbae
+#define clk_mmss_camss_csi3pix_clk		0xc166a015
+#define clk_mmss_camss_csi3rdi_clk		0x6983a4cd
+#define clk_mmss_camss_csi_vfe0_clk		0x3b30b798
+#define clk_mmss_camss_csi_vfe1_clk		0xfe729af7
+#define clk_mmss_camss_csiphy0_clk		0x96c81af8
+#define clk_mmss_camss_csiphy1_clk		0xee9ac2bb
+#define clk_mmss_camss_csiphy2_clk		0x3365e70e
+#define clk_mmss_fd_ahb_clk			0x4ff1da4d
+#define clk_mmss_fd_core_clk			0x749e7eb0
+#define clk_mmss_fd_core_uar_clk		0x8ea480c5
+#define clk_mmss_camss_gp0_clk			0x3f7f6c87
+#define clk_mmss_camss_gp1_clk			0xdccdd730
+#define clk_mmss_camss_ispif_ahb_clk		0xbda4f0e3
+#define clk_mmss_camss_jpeg0_clk		0x4cc73b07
+#define clk_mmss_camss_jpeg0_vote_clk		0xc9efa6ac
+#define clk_mmss_camss_jpeg0_dma_vote_clk	0x371ec109
+#define clk_mmss_camss_jpeg_ahb_clk		0xde1fece3
+#define clk_mmss_camss_jpeg_axi_clk		0x7534616b
+#define clk_mmss_camss_mclk0_clk		0x056293a7
+#define clk_mmss_camss_mclk1_clk		0x96c7b69b
+#define clk_mmss_camss_mclk2_clk		0x8820556e
+#define clk_mmss_camss_mclk3_clk		0xf90ffb67
+#define clk_mmss_camss_micro_ahb_clk		0x6c6fd3c7
+#define clk_mmss_camss_csi0phytimer_clk		0x7a78864e
+#define clk_mmss_camss_csi1phytimer_clk		0x6e6c1de5
+#define clk_mmss_camss_csi2phytimer_clk		0x0235e2de
+#define clk_mmss_camss_top_ahb_clk		0x120618d6
+#define clk_mmss_camss_vfe0_ahb_clk		0x137bd0bd
+#define clk_mmss_camss_vfe0_clk			0xead28288
+#define clk_mmss_camss_vfe0_stream_clk		0xa0428287
+#define clk_mmss_camss_vfe1_ahb_clk		0xac0154c0
+#define clk_mmss_camss_vfe1_clk			0xc216b14d
+#define clk_mmss_camss_vfe1_stream_clk		0x745af3b6
+#define clk_mmss_camss_vfe_vbif_ahb_clk		0x0109a9c6
+#define clk_mmss_camss_vfe_vbif_axi_clk		0xe626d8a1
+#define clk_mmss_mdss_ahb_clk			0x85d37ab5
+#define clk_mmss_mdss_axi_clk			0xdf04fc1d
+#define clk_mmss_mdss_byte0_clk			0x38105d25
+#define clk_mmss_mdss_byte0_intf_clk		0x38e5aa79
+#define clk_mmss_mdss_byte0_intf_div_clk	0x8604f181
+#define clk_mmss_mdss_byte1_clk			0xe0c21354
+#define clk_mmss_mdss_byte1_intf_clk		0xcf654d8e
+#define clk_mmss_mdss_byte1_intf_div_clk	0xcdf334c5
+#define clk_mmss_mdss_dp_aux_clk		0x23125eb6
+#define clk_mmss_mdss_dp_crypto_clk		0x9a072d4e
+#define clk_mmss_mdss_dp_link_clk		0x8dd302d1
+#define clk_mmss_mdss_dp_link_intf_clk		0x70e386e6
+#define clk_mmss_mdss_dp_pixel_clk		0xb707b765
+#define clk_mmss_mdss_dp_gtc_clk		0xb59c151a
+#define clk_mmss_mdss_esc0_clk			0x5721ff83
+#define clk_mmss_mdss_esc1_clk			0xc3d0376b
+#define clk_mmss_mdss_extpclk_clk		0x74d5a954
+#define clk_mmss_mdss_hdmi_clk			0x28460a6d
+#define clk_mmss_mdss_hdmi_dp_ahb_clk		0x5448519f
+#define clk_mmss_mdss_mdp_clk			0x43539b0e
+#define clk_mmss_mdss_mdp_lut_clk		0x00627b2b
+#define clk_mmss_mdss_pclk0_clk			0xcc0e909d
+#define clk_mmss_mdss_pclk1_clk			0x850d9146
+#define clk_mmss_mdss_rot_clk			0xbb7e71c4
+#define clk_mmss_mdss_vsync_clk			0x629b36dc
+#define clk_mmss_misc_ahb_clk			0xea30b0e7
+#define clk_mmss_misc_cxo_clk			0xe620cd80
+#define clk_mmss_mnoc_ahb_clk			0x49a394f4
+#define clk_mmss_mnoc_maxi_clk			0xd8b7278f
+#define clk_mmss_video_subcore0_clk		0x23fae359
+#define clk_mmss_video_subcore1_clk		0x5213a0c7
+#define clk_mmss_video_ahb_clk			0x94334ae9
+#define clk_mmss_video_axi_clk			0xf3178ba5
+#define clk_mmss_video_core_clk			0x78f14c85
+#define clk_mmss_video_maxi_clk			0x1785ef88
+#define clk_mmss_vmem_ahb_clk			0x4b18955b
+#define clk_mmss_vmem_maxi_clk			0xb6067889
+#define clk_mmss_debug_mux			0xe646ffda
+
+/* external multimedia clocks */
+#define clk_dsi0pll_byteclk_mux			0xecf2c434
+#define clk_dsi0pll_byteclk_src			0x6f6f740f
+#define clk_dsi0pll_pclk_mux			0x6c9da335
+#define clk_dsi0pll_pclk_src			0x5efd85d4
+#define clk_dsi0pll_pclk_src_mux		0x84b14663
+#define clk_dsi0pll_post_bit_div		0xf46dcf27
+#define clk_dsi0pll_pll_out_div1		0xeda5b7fe
+#define clk_dsi0pll_pll_out_div2		0x97fa476d
+#define clk_dsi0pll_pll_out_div4		0x90a98ce0
+#define clk_dsi0pll_pll_out_div8		0x9d9d85cf
+#define clk_dsi0pll_pll_out_mux			0x179c27ca
+#define clk_dsi0pll_post_vco_mux		0xfaf9bd1f
+#define clk_dsi0pll_post_vco_div1		0xabb50b2a
+#define clk_dsi0pll_post_vco_div4		0xbe51c091
+#define clk_dsi0pll_bitclk_src			0x36c3c437
+#define clk_dsi0pll_vco_clk			0x15940d40
+
+#define clk_dsi1pll_byteclk_mux			0x14e2f38f
+#define clk_dsi1pll_byteclk_src			0x4b65c298
+#define clk_dsi1pll_pclk_mux			0x4c0518b5
+#define clk_dsi1pll_pclk_src			0xeddcd80e
+#define clk_dsi1pll_pclk_src_mux		0x3651feb3
+#define clk_dsi1pll_post_bit_div		0x712f0260
+#define clk_dsi1pll_pll_out_div8		0x87628ddb
+#define clk_dsi1pll_pll_out_div4		0x0d9a384b
+#define clk_dsi1pll_pll_out_div2		0x0c9b5748
+#define clk_dsi1pll_pll_out_div1		0x3193164e
+#define clk_dsi1pll_pll_out_mux			0x171bf8fd
+#define clk_dsi1pll_post_vco_mux		0xc6a90d20
+#define clk_dsi1pll_post_vco_div1		0x6f47ca7d
+#define clk_dsi1pll_post_vco_div4		0x90628974
+#define clk_dsi1pll_bitclk_src			0x13ab045b
+#define clk_dsi1pll_vco_clk			0x99797b50
+
+#define clk_dp_vco_clk				0xfcaaeec7
+#define clk_dp_link_2x_clk_divsel_five		0xcfe3f5dd
+#define clk_vco_divsel_four_clk_src		0xe0da19c0
+#define clk_vco_divsel_two_clk_src		0xb5cfc6a8
+#define clk_vco_divided_clk_src_mux		0x3f8197c2
+#define clk_hdmi_vco_clk			0xbb7dc20d
+
+/* clock_gpu controlled clocks*/
+#define clk_gpucc_xo				0xc4e1a890
+#define clk_gpucc_gpll0				0x0db0e37f
+#define clk_gfx3d_clk_src			0x917f76ef
+#define clk_rbbmtimer_clk_src			0x17649ecc
+#define clk_gfx3d_isense_clk_src		0xecc3eafa
+#define clk_rbcpr_clk_src			0x2c2e9af2
+#define clk_gpu_debug_div_clk			0x75d6f53f
+#define clk_gpucc_gfx3d_clk			0x95f01bd5
+#define clk_gpucc_rbbmtimer_clk			0x58a0a7ca
+#define clk_gpucc_gfx3d_isense_clk		0xb2678e80
+#define clk_gpucc_cxo_clk			0x6532dcae
+#define clk_gpucc_rbcpr_clk			0x7bd750e8
+#define clk_gpu_pll0_pll			0x0e61ab4d
+#define clk_gpu_pll0_pll_out_even		0xb0ed5009
+#define clk_gpu_pll0_pll_out_odd		0x08c5a8a5
+#define clk_gpu_pll0_postdiv_clk		0x76c19f3c
+#define clk_gpucc_mx_clk			0x1edbb879
+#define clk_gpucc_gcc_dbg_clk			0x9ae8cd3c
+#define clk_gfxcc_dbg_clk			0x3ed47625
+
+/* CPU clocks */
+#define clk_pwrcl_clk				0xc554130e
+#define clk_perfcl_clk				0x58869997
+#define clk_sys_apcsaux_clk_gcc			0xf905e862
+#define clk_xo_ao				0x428c856d
+#define clk_osm_clk_src				0xaabe68c3
+#define clk_cpu_debug_mux			0x3ae8bcb2
+
+/* Audio External Clocks */
+#define clk_audio_ap_clk			0x9b5727cb
+#define clk_audio_pmi_clk			0xcbfe416d
+#define clk_audio_ap_clk2			0x454d1e91
+
+/* GCC block resets */
+#define QUSB2PHY_PRIM_BCR			0
+#define QUSB2PHY_SEC_BCR			1
+#define BLSP1_BCR				2
+#define BLSP2_BCR				3
+#define BOOT_ROM_BCR				4
+#define PRNG_BCR				5
+#define UFS_BCR					6
+#define USB_30_BCR				7
+#define USB3_PHY_BCR				8
+#define USB3PHY_PHY_BCR				9
+#define PCIE_0_PHY_BCR				10
+#define PCIE_PHY_BCR				11
+#define PCIE_PHY_COM_BCR			12
+#define PCIE_PHY_NOCSR_COM_PHY_BCR		13
+
+/* MMSS block resets */
+#define CAMSS_MICRO_BCR				0
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/mips/boot/dts/include/dt-bindings/clock/msm-clocks-hwio-8998.h	2019-01-22 16:16:28.171288678 +0100
@@ -0,0 +1,395 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define CLKFLAG_NO_RATE_CACHE					0x00004000
+
+#define FMAX_LOWER						0
+#define FMAX_LOW						1
+#define FMAX_NOM						2
+#define FMAX_TURBO						3
+
+#define HALT_CHECK_DELAY					5
+
+#define RPM_MISC_CLK_TYPE					0x306b6c63
+#define RPM_BUS_CLK_TYPE					0x316b6c63
+#define RPM_MEM_CLK_TYPE					0x326b6c63
+#define RPM_IPA_CLK_TYPE					0x617069
+#define RPM_CE_CLK_TYPE						0x6563
+#define RPM_AGGR_CLK_TYPE					0x72676761
+#define RPM_SMD_KEY_ENABLE					0x62616E45
+#define RPM_MMAXI_CLK_TYPE					0x69786d6d
+
+#define CXO_CLK_SRC_ID						0x0
+#define QDSS_CLK_ID						0x1
+#define SNOC_CLK_ID						0x1
+#define CNOC_CLK_ID						0x2
+#define CNOC_PERIPH_CLK_ID					0x0
+#define BIMC_CLK_ID						0x0
+#define IPA_CLK_ID						0x0
+#define CE1_CLK_ID						0x0
+#define RF_CLK1_ID						0x4
+#define RF_CLK2_ID						0x5
+#define RF_CLK3_ID						0x6
+#define LN_BB_CLK1_ID						0x1
+#define LN_BB_CLK2_ID						0x2
+#define LN_BB_CLK3_ID						0x3
+#define DIV_CLK1_ID						0xb
+#define DIV_CLK2_ID						0xc
+#define DIV_CLK3_ID						0xd
+#define RF_CLK1_PIN_ID						0x4
+#define RF_CLK2_PIN_ID						0x5
+#define RF_CLK3_PIN_ID						0x6
+#define LN_BB_CLK1_PIN_ID					0x1
+#define LN_BB_CLK2_PIN_ID					0x2
+#define LN_BB_CLK3_PIN_ID					0x3
+#define AGGR1_NOC_ID						0x1
+#define AGGR2_NOC_ID						0x2
+#define MMSSNOC_AXI_CLK_ID					0x0
+
+#define APCS_COMMON_LMH_CMD_RCGR				0x0012C
+
+#define GCC_APCS_GPLL_ENA_VOTE					0x52000
+#define GCC_APCS_CLOCK_BRANCH_ENA_VOTE				0x52004
+#define GCC_APCS_CLOCK_BRANCH_ENA_VOTE_1			0x5200C
+#define PLLTEST_PAD_CFG						0x6200C
+#define GCC_XO_DIV4_CBCR				        0x43008
+#define CLOCK_FRQ_MEASURE_CTL				        0x62004
+#define CLOCK_FRQ_MEASURE_STATUS			        0x62008
+#define GCC_GPLL0_MODE						0x00000
+#define GCC_GPLL4_MODE						0x77000
+#define GCC_USB30_MASTER_CMD_RCGR				0x0F014
+#define GCC_PCIE_AUX_CMD_RCGR					0x6C000
+#define GCC_UFS_AXI_CMD_RCGR					0x75018
+#define GCC_BLSP1_QUP1_I2C_APPS_CMD_RCGR			0x19020
+#define GCC_BLSP1_QUP1_SPI_APPS_CMD_RCGR			0x1900C
+#define GCC_BLSP1_QUP2_I2C_APPS_CMD_RCGR			0x1B020
+#define GCC_BLSP1_QUP2_SPI_APPS_CMD_RCGR			0x1B00C
+#define GCC_BLSP1_QUP3_I2C_APPS_CMD_RCGR			0x1D020
+#define GCC_BLSP1_QUP3_SPI_APPS_CMD_RCGR			0x1D00C
+#define GCC_BLSP1_QUP4_I2C_APPS_CMD_RCGR			0x1F020
+#define GCC_BLSP1_QUP4_SPI_APPS_CMD_RCGR			0x1F00C
+#define GCC_BLSP1_QUP5_I2C_APPS_CMD_RCGR			0x21020
+#define GCC_BLSP1_QUP5_SPI_APPS_CMD_RCGR			0x2100C
+#define GCC_BLSP1_QUP6_I2C_APPS_CMD_RCGR			0x23020
+#define GCC_BLSP1_QUP6_SPI_APPS_CMD_RCGR			0x2300C
+#define GCC_BLSP1_UART1_APPS_CMD_RCGR				0x1A00C
+#define GCC_BLSP1_UART2_APPS_CMD_RCGR				0x1C00C
+#define GCC_BLSP1_UART3_APPS_CMD_RCGR				0x1E00C
+#define GCC_BLSP2_QUP1_I2C_APPS_CMD_RCGR			0x26020
+#define GCC_BLSP2_QUP2_I2C_APPS_CMD_RCGR			0x28020
+#define GCC_BLSP2_QUP3_I2C_APPS_CMD_RCGR			0x2A020
+#define GCC_BLSP2_QUP4_I2C_APPS_CMD_RCGR			0x2C020
+#define GCC_BLSP2_QUP5_I2C_APPS_CMD_RCGR			0x2E020
+#define GCC_BLSP2_QUP6_I2C_APPS_CMD_RCGR			0x30020
+#define GCC_BLSP2_QUP1_SPI_APPS_CMD_RCGR			0x2600C
+#define GCC_BLSP2_QUP2_SPI_APPS_CMD_RCGR			0x2800C
+#define GCC_BLSP2_QUP3_SPI_APPS_CMD_RCGR			0x2A00C
+#define GCC_BLSP2_QUP4_SPI_APPS_CMD_RCGR			0x2C00C
+#define GCC_BLSP2_QUP5_SPI_APPS_CMD_RCGR			0x2E00C
+#define GCC_BLSP2_QUP6_SPI_APPS_CMD_RCGR			0x3000C
+#define GCC_BLSP2_UART1_APPS_CMD_RCGR				0x2700C
+#define GCC_BLSP2_UART2_APPS_CMD_RCGR				0x2900C
+#define GCC_BLSP2_UART3_APPS_CMD_RCGR				0x2B00C
+#define GCC_GP1_CMD_RCGR					0x64004
+#define GCC_GP2_CMD_RCGR					0x65004
+#define GCC_GP3_CMD_RCGR					0x66004
+#define GCC_HMSS_RBCPR_CMD_RCGR					0x48044
+#define GCC_PDM2_CMD_RCGR					0x33010
+#define GCC_SDCC2_APPS_CMD_RCGR					0x14010
+#define GCC_SDCC4_APPS_CMD_RCGR					0x16010
+#define GCC_TSIF_REF_CMD_RCGR					0x36010
+#define GCC_UFS_ICE_CORE_CMD_RCGR				0x76010
+#define GCC_UFS_PHY_AUX_CMD_RCGR				0x76044
+#define GCC_UFS_UNIPRO_CORE_CMD_RCGR				0x76028
+#define GCC_USB30_MOCK_UTMI_CMD_RCGR				0x0F028
+#define GCC_USB3_PHY_AUX_CMD_RCGR				0x5000C
+#define GCC_QSPI_REF_CMD_RCGR					0x9000C
+#define GCC_PCIE_0_PHY_BCR					0x6C01C
+#define GCC_HDMI_CLKREF_EN					0x88000
+#define GCC_UFS_CLKREF_EN					0x88004
+#define GCC_USB3_CLKREF_EN					0x88008
+#define GCC_PCIE_CLKREF_EN					0x8800C
+#define GCC_RX1_USB2_CLKREF_EN					0x88014
+#define GCC_USB3_PHY_BCR					0x50020
+#define GCC_AGGRE1_NOC_XO_CBCR					0x8202C
+#define GCC_AGGRE1_UFS_AXI_CBCR					0x82028
+#define GCC_AGGRE1_USB3_AXI_CBCR				0x82024
+#define GCC_BIMC_MSS_Q6_AXI_CBCR				0x4401C
+#define GCC_BLSP1_AHB_CBCR					0x17004
+#define GCC_BLSP1_BCR						0x17000
+#define GCC_BLSP1_QUP1_SPI_APPS_CBCR				0x19004
+#define GCC_BLSP1_QUP1_I2C_APPS_CBCR				0x19008
+#define GCC_BLSP1_QUP2_SPI_APPS_CBCR				0x1B004
+#define GCC_BLSP1_QUP2_I2C_APPS_CBCR				0x1B008
+#define GCC_BLSP1_QUP3_SPI_APPS_CBCR				0x1D004
+#define GCC_BLSP1_QUP3_I2C_APPS_CBCR				0x1D008
+#define GCC_BLSP1_QUP4_SPI_APPS_CBCR				0x1F004
+#define GCC_BLSP1_QUP4_I2C_APPS_CBCR				0x1F008
+#define GCC_BLSP1_QUP5_SPI_APPS_CBCR				0x21004
+#define GCC_BLSP1_QUP5_I2C_APPS_CBCR				0x21008
+#define GCC_BLSP1_QUP6_SPI_APPS_CBCR				0x23004
+#define GCC_BLSP1_QUP6_I2C_APPS_CBCR				0x23008
+#define GCC_BLSP1_UART1_APPS_CBCR				0x1A004
+#define GCC_BLSP1_UART2_APPS_CBCR				0x1C004
+#define GCC_BLSP1_UART3_APPS_CBCR				0x1E004
+#define GCC_BLSP2_AHB_CBCR					0x25004
+#define GCC_BLSP2_BCR						0x25000
+#define GCC_BLSP2_QUP1_SPI_APPS_CBCR				0x26004
+#define GCC_BLSP2_QUP1_I2C_APPS_CBCR				0x26008
+#define GCC_BLSP2_QUP2_I2C_APPS_CBCR				0x28008
+#define GCC_BLSP2_QUP2_SPI_APPS_CBCR				0x28004
+#define GCC_BLSP2_QUP3_SPI_APPS_CBCR				0x2A004
+#define GCC_BLSP2_QUP3_I2C_APPS_CBCR				0x2A008
+#define GCC_BLSP2_QUP4_SPI_APPS_CBCR				0x2C004
+#define GCC_BLSP2_QUP4_I2C_APPS_CBCR				0x2C008
+#define GCC_BLSP2_QUP5_SPI_APPS_CBCR				0x2E004
+#define GCC_BLSP2_QUP5_I2C_APPS_CBCR				0x2E008
+#define GCC_BLSP2_QUP6_SPI_APPS_CBCR				0x30004
+#define GCC_BLSP2_QUP6_I2C_APPS_CBCR				0x30008
+#define GCC_BLSP2_UART1_APPS_CBCR				0x27004
+#define GCC_BLSP2_UART2_APPS_CBCR				0x29004
+#define GCC_BLSP2_UART3_APPS_CBCR				0x2B004
+#define GCC_BOOT_ROM_AHB_CBCR					0x38004
+#define GCC_BOOT_ROM_BCR					0x38000
+#define GCC_CFG_NOC_USB3_AXI_CBCR				0x05018
+#define GCC_BIMC_GFX_CBCR					0x46040
+#define GCC_GP1_CBCR						0x64000
+#define GCC_GP2_CBCR						0x65000
+#define GCC_GP3_CBCR						0x66000
+#define GCC_GPU_BIMC_GFX_CBCR					0x71010
+#define GCC_GPU_CFG_AHB_CBCR					0x71004
+#define GCC_GPU_IREF_EN						0x88010
+#define GCC_HMSS_DVM_BUS_CBCR					0x4808C
+#define GCC_HMSS_RBCPR_CBCR					0x48008
+#define GCC_MMSS_SYS_NOC_AXI_CBCR				0x09000
+#define GCC_MMSS_NOC_CFG_AHB_CBCR				0x09004
+#define GCC_PCIE_0_SLV_AXI_CBCR					0x6B008
+#define GCC_PCIE_0_MSTR_AXI_CBCR				0x6B00C
+#define GCC_PCIE_0_CFG_AHB_CBCR					0x6B010
+#define GCC_PCIE_0_AUX_CBCR					0x6B014
+#define GCC_PCIE_0_PIPE_CBCR					0x6B018
+#define GCC_PCIE_PHY_AUX_CBCR					0x6F004
+#define GCC_PCIE_PHY_BCR					0x6F000
+#define GCC_PCIE_PHY_COM_BCR					0x6F014
+#define GCC_PCIE_PHY_NOCSR_COM_PHY_BCR				0x6F00C
+#define GCC_QUSB2PHY_PRIM_BCR					0x12000
+#define GCC_QUSB2PHY_SEC_BCR					0x12004
+#define GCC_PDM2_CBCR						0x3300C
+#define GCC_PDM_AHB_CBCR					0x33004
+#define GCC_PRNG_AHB_CBCR					0x34004
+#define GCC_PRNG_BCR						0x34000
+#define GCC_SDCC2_APPS_CBCR					0x14004
+#define GCC_SDCC2_AHB_CBCR					0x14008
+#define GCC_SDCC4_APPS_CBCR					0x16004
+#define GCC_SDCC4_AHB_CBCR					0x16008
+#define GCC_TSIF_AHB_CBCR					0x36004
+#define GCC_TSIF_REF_CBCR					0x36008
+#define GCC_UFS_AXI_CBCR					0x75008
+#define GCC_UFS_BCR						0x75000
+#define GCC_UFS_AHB_CBCR					0x7500C
+#define GCC_UFS_TX_SYMBOL_0_CBCR				0x75010
+#define GCC_UFS_RX_SYMBOL_0_CBCR				0x75014
+#define GCC_UFS_RX_SYMBOL_1_CBCR				0x7605C
+#define GCC_UFS_UNIPRO_CORE_CBCR				0x76008
+#define GCC_UFS_ICE_CORE_CBCR					0x7600C
+#define GCC_UFS_PHY_AUX_CBCR					0x76040
+#define GCC_USB30_MASTER_CBCR					0x0F008
+#define GCC_USB30_SLEEP_CBCR					0x0F00C
+#define GCC_USB30_MOCK_UTMI_CBCR				0x0F010
+#define GCC_USB_30_BCR						0x0F000
+#define GCC_USB3_PHY_AUX_CBCR					0x50000
+#define GCC_USB3_PHY_PIPE_CBCR					0x50004
+#define GCC_USB3PHY_PHY_BCR					0x50024
+#define GCC_APCS_CLOCK_SLEEP_ENA_VOTE				0x52008
+#define GCC_MSS_CFG_AHB_CBCR					0x8A000
+#define GCC_MSS_Q6_BIMC_AXI_CBCR				0x8A040
+#define GCC_MSS_MNOC_BIMC_AXI_CBCR				0x8A004
+#define GCC_MSS_SNOC_AXI_CBCR					0x8A03C
+#define GCC_HMSS_GPLL0_CMD_RCGR					0x4805C
+#define GCC_MSS_CFG_AHB_CBCR					0x8A000
+#define GCC_MSS_Q6_BIMC_AXI_CBCR				0x8A040
+#define GCC_MSS_MNOC_BIMC_AXI_CBCR				0x8A004
+#define GCC_MSS_SNOC_AXI_CBCR					0x8A03C
+#define GCC_DCC_AHB_CBCR					0x84004
+#define GCC_HLOS1_VOTE_LPASS_CORE_SMMU_CBCR			0x7D010
+#define GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CBCR			0x7D014
+#define GCC_QSPI_AHB_CBCR					0x90004
+#define GCC_QSPI_REF_CBCR					0x90008
+#define GCC_MMSS_MISC						0x0902C
+#define GCC_GPU_MISC						0x71028
+
+#define GPUCC_GPU_PLL0_PLL_MODE					0x00000
+#define GPUCC_GPU_PLL0_USER_CTL_MODE				0x0000C
+#define GPUCC_GFX3D_CMD_RCGR					0x01070
+#define GPUCC_RBBMTIMER_CMD_RCGR				0x010B0
+#define GPUCC_GFX3D_ISENSE_CMD_RCGR				0x01100
+#define GPUCC_RBCPR_CMD_RCGR					0x01030
+#define GPUCC_GFX3D_CBCR					0x01098
+#define GPUCC_RBBMTIMER_CBCR					0x010D0
+#define GPUCC_GFX3D_ISENSE_CBCR					0x01124
+#define GPUCC_CXO_CBCR						0x01020
+#define GPUCC_RBCPR_CBCR					0x01054
+#define GPU_GX_BCR						0x01090
+#define GPUCC_GX_DOMAIN_MISC					0x00130
+#define GPUCC_GPU_DD_WRAP_CTRL					0x00430
+#define GPUCC_DEBUG_CLK_CTL					0x00120
+
+#define MMSS_PLL_VOTE_APCS					0x001E0
+#define MMSS_MMPLL0_PLL_MODE					0x0C000
+#define MMSS_MMPLL1_PLL_MODE					0x0C050
+#define MMSS_MMPLL3_PLL_MODE					0x00000
+#define MMSS_MMPLL4_PLL_MODE					0x00050
+#define MMSS_MMPLL5_PLL_MODE					0x000A0
+#define MMSS_MMPLL6_PLL_MODE					0x000F0
+#define MMSS_MMPLL7_PLL_MODE					0x00140
+#define MMSS_MMPLL10_PLL_MODE					0x00190
+#define MMSS_AHB_CMD_RCGR					0x05000
+#define MMSS_CSI0_CMD_RCGR					0x03090
+#define MMSS_VFE0_CMD_RCGR					0x03600
+#define MMSS_VFE1_CMD_RCGR					0x03620
+#define MMSS_MDP_CMD_RCGR					0x02040
+#define MMSS_MAXI_CMD_RCGR					0x0F020
+#define MMSS_CPP_CMD_RCGR					0x03640
+#define MMSS_JPEG0_CMD_RCGR					0x03500
+#define MMSS_ROT_CMD_RCGR					0x021A0
+#define MMSS_VIDEO_CORE_CMD_RCGR				0x01000
+#define MMSS_CSI1_CMD_RCGR					0x03100
+#define MMSS_CSI2_CMD_RCGR					0x03160
+#define MMSS_CSI3_CMD_RCGR					0x031C0
+#define MMSS_FD_CORE_CMD_RCGR					0x03B00
+#define MMSS_BYTE0_CMD_RCGR					0x02120
+#define MMSS_BYTE1_CMD_RCGR					0x02140
+#define MMSS_PCLK0_CMD_RCGR					0x02000
+#define MMSS_PCLK1_CMD_RCGR					0x02020
+#define MMSS_VIDEO_SUBCORE0_CMD_RCGR				0x01060
+#define MMSS_VIDEO_SUBCORE1_CMD_RCGR				0x01080
+#define MMSS_CSIPHY_CMD_RCGR					0x03800
+#define MMSS_CCI_CMD_RCGR					0x03300
+#define MMSS_CAMSS_GP0_CMD_RCGR					0x03420
+#define MMSS_CAMSS_GP1_CMD_RCGR					0x03450
+#define MMSS_MCLK0_CMD_RCGR					0x03360
+#define MMSS_MCLK1_CMD_RCGR					0x03390
+#define MMSS_MCLK2_CMD_RCGR					0x033C0
+#define MMSS_MCLK3_CMD_RCGR					0x033F0
+#define MMSS_CAMSS_CSI2PHYTIMER_CBCR				0x03084
+#define MMSS_CSI0PHYTIMER_CMD_RCGR				0x03000
+#define MMSS_CSI1PHYTIMER_CMD_RCGR				0x03030
+#define MMSS_CSI2PHYTIMER_CMD_RCGR				0x03060
+#define MMSS_DP_GTC_CMD_RCGR					0x02280
+#define MMSS_ESC0_CMD_RCGR					0x02160
+#define MMSS_ESC1_CMD_RCGR					0x02180
+#define MMSS_EXTPCLK_CMD_RCGR					0x02060
+#define MMSS_HDMI_CMD_RCGR					0x02100
+#define MMSS_VSYNC_CMD_RCGR					0x02080
+#define MMSS_BIMC_SMMU_AHB_CBCR					0x0E004
+#define MMSS_BIMC_SMMU_AXI_CBCR					0x0E008
+#define MMSS_SNOC_DVM_AXI_CBCR					0x0E040
+#define MMSS_CAMSS_AHB_CBCR					0x0348C
+#define MMSS_CAMSS_CCI_AHB_CBCR					0x03348
+#define MMSS_CAMSS_CCI_CBCR					0x03344
+#define MMSS_CAMSS_CPP_AHB_CBCR					0x036B4
+#define MMSS_CAMSS_CPP_CBCR					0x036B0
+#define MMSS_CAMSS_CPP_AXI_CBCR					0x036C4
+#define MMSS_CAMSS_CPP_VBIF_AHB_CBCR				0x036C8
+#define MMSS_CAMSS_CPHY_CSID0_CBCR				0x03730
+#define MMSS_CAMSS_CSI0_AHB_CBCR				0x030BC
+#define MMSS_CAMSS_CSI0_CBCR					0x030B4
+#define MMSS_CAMSS_CSI0PIX_CBCR					0x030E4
+#define MMSS_CAMSS_CSI0RDI_CBCR					0x030D4
+#define MMSS_CAMSS_CPHY_CSID1_CBCR				0x03734
+#define MMSS_CAMSS_CSI1_AHB_CBCR				0x03128
+#define MMSS_CAMSS_CSI1_CBCR					0x03124
+#define MMSS_CAMSS_CSI1PIX_CBCR					0x03154
+#define MMSS_CAMSS_CSI1RDI_CBCR					0x03144
+#define MMSS_CAMSS_CPHY_CSID2_CBCR				0x03738
+#define MMSS_CAMSS_CSI2_AHB_CBCR				0x03188
+#define MMSS_CAMSS_CSI2_CBCR					0x03184
+#define MMSS_CAMSS_CSI2PIX_CBCR					0x031B4
+#define MMSS_CAMSS_CSI2RDI_CBCR					0x031A4
+#define MMSS_CAMSS_CPHY_CSID3_CBCR				0x0373C
+#define MMSS_CAMSS_CSI3_AHB_CBCR				0x031E8
+#define MMSS_CAMSS_CSI3_CBCR					0x031E4
+#define MMSS_CAMSS_CSI3PIX_CBCR					0x03214
+#define MMSS_CAMSS_CSI3RDI_CBCR					0x03204
+#define MMSS_CAMSS_CSI_VFE0_CBCR				0x03704
+#define MMSS_CAMSS_CSI_VFE1_CBCR				0x03714
+#define MMSS_CAMSS_CSIPHY0_CBCR					0x03740
+#define MMSS_CAMSS_CSIPHY1_CBCR					0x03744
+#define MMSS_CAMSS_CSIPHY2_CBCR					0x03748
+#define MMSS_FD_AHB_CBCR					0x03B74
+#define MMSS_FD_CORE_CBCR					0x03B68
+#define MMSS_FD_CORE_UAR_CBCR					0x03B6C
+#define MMSS_CAMSS_GP0_CBCR					0x03444
+#define MMSS_CAMSS_GP1_CBCR					0x03474
+#define MMSS_CAMSS_ISPIF_AHB_CBCR				0x03224
+#define MMSS_CAMSS_JPEG0_CBCR					0x035A8
+#define MMSS_CAMSS_JPEG_AHB_CBCR				0x035B4
+#define MMSS_CAMSS_JPEG_AXI_CBCR				0x035B8
+#define MMSS_CAMSS_MCLK0_CBCR					0x03384
+#define MMSS_CAMSS_MCLK1_CBCR					0x033B4
+#define MMSS_CAMSS_MCLK2_CBCR					0x033E4
+#define MMSS_CAMSS_MCLK3_CBCR					0x03414
+#define MMSS_CAMSS_MICRO_AHB_CBCR				0x03494
+#define MMSS_CAMSS_CSI0PHYTIMER_CBCR				0x03024
+#define MMSS_CAMSS_CSI1PHYTIMER_CBCR				0x03054
+#define MMSS_CSI2PHYTIMER_CMD_RCGR				0x03060
+#define MMSS_CAMSS_TOP_AHB_CBCR					0x03484
+#define MMSS_CAMSS_VFE0_AHB_CBCR				0x03668
+#define MMSS_CAMSS_VFE0_CBCR					0x036A8
+#define MMSS_CAMSS_VFE0_STREAM_CBCR				0x03720
+#define MMSS_CAMSS_VFE1_AHB_CBCR				0x03678
+#define MMSS_CAMSS_VFE1_CBCR					0x036AC
+#define MMSS_CAMSS_VFE1_STREAM_CBCR				0x03724
+#define MMSS_CAMSS_VFE_VBIF_AHB_CBCR				0x036B8
+#define MMSS_CAMSS_VFE_VBIF_AXI_CBCR				0x036BC
+#define MMSS_MDSS_AHB_CBCR					0x02308
+#define MMSS_MDSS_AXI_CBCR					0x02310
+#define MMSS_MDSS_BYTE0_CBCR					0x0233C
+#define MMSS_MDSS_BYTE0_INTF_CBCR				0x02374
+#define MMSS_MDSS_BYTE0_INTF_DIV				0x0237C
+#define MMSS_MDSS_BYTE1_CBCR					0x02340
+#define MMSS_MDSS_BYTE1_INTF_CBCR				0x02378
+#define MMSS_MDSS_BYTE1_INTF_DIV				0x02380
+#define MMSS_MDSS_DP_AUX_CBCR					0x02364
+#define MMSS_MDSS_DP_CRYPTO_CBCR				0x0235C
+#define MMSS_MDSS_DP_GTC_CBCR					0x02368
+#define MMSS_MDSS_DP_LINK_CBCR					0x02354
+#define MMSS_MDSS_DP_LINK_INTF_CBCR				0x02358
+#define MMSS_MDSS_DP_PIXEL_CBCR					0x02360
+#define MMSS_MDSS_ESC0_CBCR					0x02344
+#define MMSS_MDSS_ESC1_CBCR					0x02348
+#define MMSS_MDSS_EXTPCLK_CBCR					0x02324
+#define MMSS_MDSS_HDMI_CBCR					0x02338
+#define MMSS_MDSS_HDMI_DP_AHB_CBCR				0x0230C
+#define MMSS_MDSS_MDP_CBCR					0x0231C
+#define MMSS_MDSS_MDP_LUT_CBCR					0x02320
+#define MMSS_MDSS_PCLK0_CBCR					0x02314
+#define MMSS_MDSS_PCLK1_CBCR					0x02318
+#define MMSS_MDSS_ROT_CBCR					0x02350
+#define MMSS_MDSS_VSYNC_CBCR					0x02328
+#define MMSS_MISC_AHB_CBCR					0x00328
+#define MMSS_MISC_CXO_CBCR					0x00324
+#define MMSS_MNOC_AHB_CBCR					0x05024
+#define MMSS_MNOC_MAXI_CBCR					0x0F004
+#define MMSS_VIDEO_SUBCORE0_CBCR				0x01048
+#define MMSS_VIDEO_SUBCORE1_CBCR				0x0104C
+#define MMSS_VIDEO_AHB_CBCR					0x01030
+#define MMSS_VIDEO_AXI_CBCR					0x01034
+#define MMSS_VIDEO_CORE_CBCR					0x01028
+#define MMSS_VIDEO_MAXI_CBCR					0x01038
+#define MMSS_VMEM_AHB_CBCR					0x0F068
+#define MMSS_VMEM_MAXI_CBCR					0x0F064
+#define MMSS_DP_AUX_CMD_RCGR					0x02260
+#define MMSS_DP_CRYPTO_CMD_RCGR					0x02220
+#define MMSS_DP_LINK_CMD_RCGR					0x02200
+#define MMSS_DP_PIXEL_CMD_RCGR					0x02240
+#define MMSS_DEBUG_CLK_CTL					0x00900
diff -Nruw linux-4.4.115-fbx/arch/mips/boot/dts/include/dt-bindings/msm./msm-bus-ids.h linux-4.4.115-fbx/arch/mips/boot/dts/include/dt-bindings/msm/msm-bus-ids.h
--- linux-4.4.115-fbx/arch/mips/boot/dts/include/dt-bindings/msm./msm-bus-ids.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/mips/boot/dts/include/dt-bindings/msm/msm-bus-ids.h	2019-01-22 16:16:28.179288750 +0100
@@ -0,0 +1,887 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_BUS_IDS_H
+#define __MSM_BUS_IDS_H
+
+/* Aggregation types */
+#define AGG_SCHEME_NONE	0
+#define AGG_SCHEME_LEG	1
+#define AGG_SCHEME_1	2
+
+/* Topology related enums */
+#define	MSM_BUS_FAB_DEFAULT 0
+#define	MSM_BUS_FAB_APPSS 0
+#define	MSM_BUS_FAB_SYSTEM 1024
+#define	MSM_BUS_FAB_MMSS 2048
+#define	MSM_BUS_FAB_SYSTEM_FPB 3072
+#define	MSM_BUS_FAB_CPSS_FPB 4096
+
+#define	MSM_BUS_FAB_BIMC 0
+#define	MSM_BUS_FAB_SYS_NOC 1024
+#define	MSM_BUS_FAB_MMSS_NOC 2048
+#define	MSM_BUS_FAB_OCMEM_NOC 3072
+#define	MSM_BUS_FAB_PERIPH_NOC 4096
+#define	MSM_BUS_FAB_CONFIG_NOC 5120
+#define	MSM_BUS_FAB_OCMEM_VNOC 6144
+#define	MSM_BUS_FAB_MMSS_AHB 2049
+#define	MSM_BUS_FAB_A0_NOC 6145
+#define	MSM_BUS_FAB_A1_NOC 6146
+#define	MSM_BUS_FAB_A2_NOC 6147
+#define	MSM_BUS_FAB_GNOC 6148
+#define	MSM_BUS_FAB_CR_VIRT 6149
+
+#define	MSM_BUS_MASTER_FIRST 1
+#define	MSM_BUS_MASTER_AMPSS_M0 1
+#define	MSM_BUS_MASTER_AMPSS_M1 2
+#define	MSM_BUS_APPSS_MASTER_FAB_MMSS 3
+#define	MSM_BUS_APPSS_MASTER_FAB_SYSTEM 4
+#define	MSM_BUS_SYSTEM_MASTER_FAB_APPSS 5
+#define	MSM_BUS_MASTER_SPS 6
+#define	MSM_BUS_MASTER_ADM_PORT0 7
+#define	MSM_BUS_MASTER_ADM_PORT1 8
+#define	MSM_BUS_SYSTEM_MASTER_ADM1_PORT0 9
+#define	MSM_BUS_MASTER_ADM1_PORT1 10
+#define	MSM_BUS_MASTER_LPASS_PROC 11
+#define	MSM_BUS_MASTER_MSS_PROCI 12
+#define	MSM_BUS_MASTER_MSS_PROCD 13
+#define	MSM_BUS_MASTER_MSS_MDM_PORT0 14
+#define	MSM_BUS_MASTER_LPASS 15
+#define	MSM_BUS_SYSTEM_MASTER_CPSS_FPB 16
+#define	MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB 17
+#define	MSM_BUS_SYSTEM_MASTER_MMSS_FPB 18
+#define	MSM_BUS_MASTER_ADM1_CI 19
+#define	MSM_BUS_MASTER_ADM0_CI 20
+#define	MSM_BUS_MASTER_MSS_MDM_PORT1 21
+#define	MSM_BUS_MASTER_MDP_PORT0 22
+#define	MSM_BUS_MASTER_MDP_PORT1 23
+#define	MSM_BUS_MMSS_MASTER_ADM1_PORT0 24
+#define	MSM_BUS_MASTER_ROTATOR 25
+#define	MSM_BUS_MASTER_GRAPHICS_3D 26
+#define	MSM_BUS_MASTER_JPEG_DEC 27
+#define	MSM_BUS_MASTER_GRAPHICS_2D_CORE0 28
+#define	MSM_BUS_MASTER_VFE 29
+#define	MSM_BUS_MASTER_VFE0 MSM_BUS_MASTER_VFE
+#define	MSM_BUS_MASTER_VPE 30
+#define	MSM_BUS_MASTER_JPEG_ENC 31
+#define	MSM_BUS_MASTER_GRAPHICS_2D_CORE1 32
+#define	MSM_BUS_MMSS_MASTER_APPS_FAB 33
+#define	MSM_BUS_MASTER_HD_CODEC_PORT0 34
+#define	MSM_BUS_MASTER_HD_CODEC_PORT1 35
+#define	MSM_BUS_MASTER_SPDM 36
+#define	MSM_BUS_MASTER_RPM 37
+#define	MSM_BUS_MASTER_MSS 38
+#define	MSM_BUS_MASTER_RIVA 39
+#define	MSM_BUS_MASTER_SNOC_VMEM 40
+#define	MSM_BUS_MASTER_MSS_SW_PROC 41
+#define	MSM_BUS_MASTER_MSS_FW_PROC 42
+#define	MSM_BUS_MASTER_HMSS 43
+#define	MSM_BUS_MASTER_GSS_NAV 44
+#define	MSM_BUS_MASTER_PCIE 45
+#define	MSM_BUS_MASTER_SATA 46
+#define	MSM_BUS_MASTER_CRYPTO 47
+#define	MSM_BUS_MASTER_VIDEO_CAP 48
+#define	MSM_BUS_MASTER_GRAPHICS_3D_PORT1 49
+#define	MSM_BUS_MASTER_VIDEO_ENC 50
+#define	MSM_BUS_MASTER_VIDEO_DEC 51
+#define	MSM_BUS_MASTER_LPASS_AHB 52
+#define	MSM_BUS_MASTER_QDSS_BAM 53
+#define	MSM_BUS_MASTER_SNOC_CFG 54
+#define	MSM_BUS_MASTER_CRYPTO_CORE0 55
+#define	MSM_BUS_MASTER_CRYPTO_CORE1 56
+#define	MSM_BUS_MASTER_MSS_NAV 57
+#define	MSM_BUS_MASTER_OCMEM_DMA 58
+#define	MSM_BUS_MASTER_WCSS 59
+#define	MSM_BUS_MASTER_QDSS_ETR 60
+#define	MSM_BUS_MASTER_USB3 61
+#define	MSM_BUS_MASTER_JPEG 62
+#define	MSM_BUS_MASTER_VIDEO_P0 63
+#define	MSM_BUS_MASTER_VIDEO_P1 64
+#define	MSM_BUS_MASTER_MSS_PROC 65
+#define	MSM_BUS_MASTER_JPEG_OCMEM 66
+#define	MSM_BUS_MASTER_MDP_OCMEM 67
+#define	MSM_BUS_MASTER_VIDEO_P0_OCMEM 68
+#define	MSM_BUS_MASTER_VIDEO_P1_OCMEM 69
+#define	MSM_BUS_MASTER_VFE_OCMEM 70
+#define	MSM_BUS_MASTER_CNOC_ONOC_CFG 71
+#define	MSM_BUS_MASTER_RPM_INST 72
+#define	MSM_BUS_MASTER_RPM_DATA 73
+#define	MSM_BUS_MASTER_RPM_SYS 74
+#define	MSM_BUS_MASTER_DEHR 75
+#define	MSM_BUS_MASTER_QDSS_DAP 76
+#define	MSM_BUS_MASTER_TIC 77
+#define	MSM_BUS_MASTER_SDCC_1 78
+#define	MSM_BUS_MASTER_SDCC_3 79
+#define	MSM_BUS_MASTER_SDCC_4 80
+#define	MSM_BUS_MASTER_SDCC_2 81
+#define	MSM_BUS_MASTER_TSIF 82
+#define	MSM_BUS_MASTER_BAM_DMA 83
+#define	MSM_BUS_MASTER_BLSP_2 84
+#define	MSM_BUS_MASTER_USB_HSIC 85
+#define	MSM_BUS_MASTER_BLSP_1 86
+#define	MSM_BUS_MASTER_USB_HS 87
+#define	MSM_BUS_MASTER_PNOC_CFG 88
+#define	MSM_BUS_MASTER_V_OCMEM_GFX3D 89
+#define	MSM_BUS_MASTER_IPA 90
+#define	MSM_BUS_MASTER_QPIC 91
+#define	MSM_BUS_MASTER_MDPE 92
+#define	MSM_BUS_MASTER_USB_HS2 93
+#define	MSM_BUS_MASTER_VPU 94
+#define	MSM_BUS_MASTER_UFS 95
+#define	MSM_BUS_MASTER_BCAST 96
+#define	MSM_BUS_MASTER_CRYPTO_CORE2 97
+#define	MSM_BUS_MASTER_EMAC 98
+#define	MSM_BUS_MASTER_VPU_1 99
+#define	MSM_BUS_MASTER_PCIE_1 100
+#define	MSM_BUS_MASTER_USB3_1 101
+#define	MSM_BUS_MASTER_CNOC_MNOC_MMSS_CFG 102
+#define	MSM_BUS_MASTER_CNOC_MNOC_CFG 103
+#define	MSM_BUS_MASTER_TCU_0 104
+#define	MSM_BUS_MASTER_TCU_1 105
+#define	MSM_BUS_MASTER_CPP 106
+#define	MSM_BUS_MASTER_AUDIO 107
+#define	MSM_BUS_MASTER_PCIE_2 108
+#define	MSM_BUS_MASTER_VFE1 109
+#define	MSM_BUS_MASTER_XM_USB_HS1 110
+#define	MSM_BUS_MASTER_PCNOC_BIMC_1 111
+#define	MSM_BUS_MASTER_BIMC_PCNOC   112
+#define	MSM_BUS_MASTER_XI_USB_HSIC  113
+#define	MSM_BUS_MASTER_SGMII	    114
+#define	MSM_BUS_SPMI_FETCHER 115
+#define	MSM_BUS_MASTER_GNOC_BIMC 116
+#define	MSM_BUS_MASTER_CRVIRT_A2NOC 117
+#define	MSM_BUS_MASTER_CNOC_A2NOC 118
+#define	MSM_BUS_MASTER_WLAN 119
+#define	MSM_BUS_MASTER_MSS_CE 120
+#define	MSM_BUS_MASTER_CDSP_PROC 121
+#define	MSM_BUS_MASTER_GNOC_SNOC 122
+#define	MSM_BUS_MASTER_PIMEM 123
+#define	MSM_BUS_MASTER_MASTER_LAST 124
+
+#define	MSM_BUS_SYSTEM_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB
+#define	MSM_BUS_CPSS_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_CPSS_FPB
+
+#define	MSM_BUS_SNOC_MM_INT_0 10000
+#define	MSM_BUS_SNOC_MM_INT_1 10001
+#define	MSM_BUS_SNOC_MM_INT_2 10002
+#define	MSM_BUS_SNOC_MM_INT_BIMC 10003
+#define	MSM_BUS_SNOC_INT_0 10004
+#define	MSM_BUS_SNOC_INT_1 10005
+#define	MSM_BUS_SNOC_INT_BIMC 10006
+#define	MSM_BUS_SNOC_BIMC_0_MAS 10007
+#define	MSM_BUS_SNOC_BIMC_1_MAS 10008
+#define	MSM_BUS_SNOC_QDSS_INT 10009
+#define	MSM_BUS_PNOC_SNOC_MAS 10010
+#define	MSM_BUS_PNOC_SNOC_SLV 10011
+#define	MSM_BUS_PNOC_INT_0 10012
+#define	MSM_BUS_PNOC_INT_1 10013
+#define	MSM_BUS_PNOC_M_0 10014
+#define	MSM_BUS_PNOC_M_1 10015
+#define	MSM_BUS_BIMC_SNOC_MAS 10016
+#define	MSM_BUS_BIMC_SNOC_SLV 10017
+#define	MSM_BUS_PNOC_SLV_0 10018
+#define	MSM_BUS_PNOC_SLV_1 10019
+#define	MSM_BUS_PNOC_SLV_2 10020
+#define	MSM_BUS_PNOC_SLV_3 10021
+#define	MSM_BUS_PNOC_SLV_4 10022
+#define	MSM_BUS_PNOC_SLV_8 10023
+#define	MSM_BUS_PNOC_SLV_9 10024
+#define	MSM_BUS_SNOC_BIMC_0_SLV 10025
+#define	MSM_BUS_SNOC_BIMC_1_SLV 10026
+#define	MSM_BUS_MNOC_BIMC_MAS 10027
+#define	MSM_BUS_MNOC_BIMC_SLV 10028
+#define	MSM_BUS_BIMC_MNOC_MAS 10029
+#define	MSM_BUS_BIMC_MNOC_SLV 10030
+#define	MSM_BUS_SNOC_BIMC_MAS 10031
+#define	MSM_BUS_SNOC_BIMC_SLV 10032
+#define	MSM_BUS_CNOC_SNOC_MAS 10033
+#define	MSM_BUS_CNOC_SNOC_SLV 10034
+#define	MSM_BUS_SNOC_CNOC_MAS 10035
+#define	MSM_BUS_SNOC_CNOC_SLV 10036
+#define	MSM_BUS_OVNOC_SNOC_MAS 10037
+#define	MSM_BUS_OVNOC_SNOC_SLV 10038
+#define	MSM_BUS_SNOC_OVNOC_MAS 10039
+#define	MSM_BUS_SNOC_OVNOC_SLV 10040
+#define	MSM_BUS_SNOC_PNOC_MAS 10041
+#define	MSM_BUS_SNOC_PNOC_SLV 10042
+#define	MSM_BUS_BIMC_INT_APPS_EBI 10043
+#define	MSM_BUS_BIMC_INT_APPS_SNOC 10044
+#define	MSM_BUS_SNOC_BIMC_2_MAS 10045
+#define	MSM_BUS_SNOC_BIMC_2_SLV 10046
+#define	MSM_BUS_PNOC_SLV_5	10047
+#define	MSM_BUS_PNOC_SLV_7	10048
+#define	MSM_BUS_PNOC_INT_2 10049
+#define	MSM_BUS_PNOC_INT_3 10050
+#define	MSM_BUS_PNOC_INT_4 10051
+#define	MSM_BUS_PNOC_INT_5 10052
+#define	MSM_BUS_PNOC_INT_6 10053
+#define	MSM_BUS_PNOC_INT_7 10054
+#define	MSM_BUS_BIMC_SNOC_1_MAS 10055
+#define	MSM_BUS_BIMC_SNOC_1_SLV 10056
+#define	MSM_BUS_PNOC_A1NOC_MAS 10057
+#define	MSM_BUS_PNOC_A1NOC_SLV 10058
+#define	MSM_BUS_CNOC_A1NOC_MAS 10059
+#define	MSM_BUS_A0NOC_SNOC_MAS 10060
+#define	MSM_BUS_A0NOC_SNOC_SLV 10061
+#define	MSM_BUS_A1NOC_SNOC_SLV 10062
+#define	MSM_BUS_A1NOC_SNOC_MAS 10063
+#define	MSM_BUS_A2NOC_SNOC_MAS 10064
+#define	MSM_BUS_A2NOC_SNOC_SLV 10065
+#define	MSM_BUS_SNOC_INT_2 10066
+#define	MSM_BUS_A0NOC_QDSS_INT	10067
+#define	MSM_BUS_INT_LAST 10068
+
+#define	MSM_BUS_INT_TEST_ID	20000
+#define	MSM_BUS_INT_TEST_LAST	20050
+
+#define	MSM_BUS_SLAVE_FIRST 512
+#define	MSM_BUS_SLAVE_EBI_CH0 512
+#define	MSM_BUS_SLAVE_EBI_CH1 513
+#define	MSM_BUS_SLAVE_AMPSS_L2 514
+#define	MSM_BUS_APPSS_SLAVE_FAB_MMSS 515
+#define	MSM_BUS_APPSS_SLAVE_FAB_SYSTEM 516
+#define	MSM_BUS_SYSTEM_SLAVE_FAB_APPS 517
+#define	MSM_BUS_SLAVE_SPS 518
+#define	MSM_BUS_SLAVE_SYSTEM_IMEM 519
+#define	MSM_BUS_SLAVE_AMPSS 520
+#define	MSM_BUS_SLAVE_MSS 521
+#define	MSM_BUS_SLAVE_LPASS 522
+#define	MSM_BUS_SYSTEM_SLAVE_CPSS_FPB 523
+#define	MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB 524
+#define	MSM_BUS_SYSTEM_SLAVE_MMSS_FPB 525
+#define	MSM_BUS_SLAVE_CORESIGHT 526
+#define	MSM_BUS_SLAVE_RIVA 527
+#define	MSM_BUS_SLAVE_SMI 528
+#define	MSM_BUS_MMSS_SLAVE_FAB_APPS 529
+#define	MSM_BUS_MMSS_SLAVE_FAB_APPS_1 530
+#define	MSM_BUS_SLAVE_MM_IMEM 531
+#define	MSM_BUS_SLAVE_CRYPTO 532
+#define	MSM_BUS_SLAVE_SPDM 533
+#define	MSM_BUS_SLAVE_RPM 534
+#define	MSM_BUS_SLAVE_RPM_MSG_RAM 535
+#define	MSM_BUS_SLAVE_MPM 536
+#define	MSM_BUS_SLAVE_PMIC1_SSBI1_A 537
+#define	MSM_BUS_SLAVE_PMIC1_SSBI1_B 538
+#define	MSM_BUS_SLAVE_PMIC1_SSBI1_C 539
+#define	MSM_BUS_SLAVE_PMIC2_SSBI2_A 540
+#define	MSM_BUS_SLAVE_PMIC2_SSBI2_B 541
+#define	MSM_BUS_SLAVE_GSBI1_UART 542
+#define	MSM_BUS_SLAVE_GSBI2_UART 543
+#define	MSM_BUS_SLAVE_GSBI3_UART 544
+#define	MSM_BUS_SLAVE_GSBI4_UART 545
+#define	MSM_BUS_SLAVE_GSBI5_UART 546
+#define	MSM_BUS_SLAVE_GSBI6_UART 547
+#define	MSM_BUS_SLAVE_GSBI7_UART 548
+#define	MSM_BUS_SLAVE_GSBI8_UART 549
+#define	MSM_BUS_SLAVE_GSBI9_UART 550
+#define	MSM_BUS_SLAVE_GSBI10_UART 551
+#define	MSM_BUS_SLAVE_GSBI11_UART 552
+#define	MSM_BUS_SLAVE_GSBI12_UART 553
+#define	MSM_BUS_SLAVE_GSBI1_QUP 554
+#define	MSM_BUS_SLAVE_GSBI2_QUP 555
+#define	MSM_BUS_SLAVE_GSBI3_QUP 556
+#define	MSM_BUS_SLAVE_GSBI4_QUP 557
+#define	MSM_BUS_SLAVE_GSBI5_QUP 558
+#define	MSM_BUS_SLAVE_GSBI6_QUP 559
+#define	MSM_BUS_SLAVE_GSBI7_QUP 560
+#define	MSM_BUS_SLAVE_GSBI8_QUP 561
+#define	MSM_BUS_SLAVE_GSBI9_QUP 562
+#define	MSM_BUS_SLAVE_GSBI10_QUP 563
+#define	MSM_BUS_SLAVE_GSBI11_QUP 564
+#define	MSM_BUS_SLAVE_GSBI12_QUP 565
+#define	MSM_BUS_SLAVE_EBI2_NAND 566
+#define	MSM_BUS_SLAVE_EBI2_CS0 567
+#define	MSM_BUS_SLAVE_EBI2_CS1 568
+#define	MSM_BUS_SLAVE_EBI2_CS2 569
+#define	MSM_BUS_SLAVE_EBI2_CS3 570
+#define	MSM_BUS_SLAVE_EBI2_CS4 571
+#define	MSM_BUS_SLAVE_EBI2_CS5 572
+#define	MSM_BUS_SLAVE_USB_FS1 573
+#define	MSM_BUS_SLAVE_USB_FS2 574
+#define	MSM_BUS_SLAVE_TSIF 575
+#define	MSM_BUS_SLAVE_MSM_TSSC 576
+#define	MSM_BUS_SLAVE_MSM_PDM 577
+#define	MSM_BUS_SLAVE_MSM_DIMEM 578
+#define	MSM_BUS_SLAVE_MSM_TCSR 579
+#define	MSM_BUS_SLAVE_MSM_PRNG 580
+#define	MSM_BUS_SLAVE_GSS 581
+#define	MSM_BUS_SLAVE_SATA 582
+#define	MSM_BUS_SLAVE_USB3 583
+#define	MSM_BUS_SLAVE_WCSS 584
+#define	MSM_BUS_SLAVE_OCIMEM 585
+#define	MSM_BUS_SLAVE_SNOC_OCMEM 586
+#define	MSM_BUS_SLAVE_SERVICE_SNOC 587
+#define	MSM_BUS_SLAVE_QDSS_STM 588
+#define	MSM_BUS_SLAVE_CAMERA_CFG 589
+#define	MSM_BUS_SLAVE_DISPLAY_CFG 590
+#define	MSM_BUS_SLAVE_OCMEM_CFG 591
+#define	MSM_BUS_SLAVE_CPR_CFG 592
+#define	MSM_BUS_SLAVE_CPR_XPU_CFG 593
+#define	MSM_BUS_SLAVE_MISC_CFG 594
+#define	MSM_BUS_SLAVE_MISC_XPU_CFG 595
+#define	MSM_BUS_SLAVE_VENUS_CFG 596
+#define	MSM_BUS_SLAVE_MISC_VENUS_CFG 597
+#define	MSM_BUS_SLAVE_GRAPHICS_3D_CFG 598
+#define	MSM_BUS_SLAVE_MMSS_CLK_CFG 599
+#define	MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG 600
+#define	MSM_BUS_SLAVE_MNOC_MPU_CFG 601
+#define	MSM_BUS_SLAVE_ONOC_MPU_CFG 602
+#define	MSM_BUS_SLAVE_SERVICE_MNOC 603
+#define	MSM_BUS_SLAVE_OCMEM 604
+#define	MSM_BUS_SLAVE_SERVICE_ONOC 605
+#define	MSM_BUS_SLAVE_SDCC_1 606
+#define	MSM_BUS_SLAVE_SDCC_3 607
+#define	MSM_BUS_SLAVE_SDCC_2 608
+#define	MSM_BUS_SLAVE_SDCC_4 609
+#define	MSM_BUS_SLAVE_BAM_DMA 610
+#define	MSM_BUS_SLAVE_BLSP_2 611
+#define	MSM_BUS_SLAVE_USB_HSIC 612
+#define	MSM_BUS_SLAVE_BLSP_1 613
+#define	MSM_BUS_SLAVE_USB_HS 614
+#define	MSM_BUS_SLAVE_PDM 615
+#define	MSM_BUS_SLAVE_PERIPH_APU_CFG 616
+#define	MSM_BUS_SLAVE_PNOC_MPU_CFG 617
+#define	MSM_BUS_SLAVE_PRNG 618
+#define	MSM_BUS_SLAVE_SERVICE_PNOC 619
+#define	MSM_BUS_SLAVE_CLK_CTL 620
+#define	MSM_BUS_SLAVE_CNOC_MSS 621
+#define	MSM_BUS_SLAVE_SECURITY 622
+#define	MSM_BUS_SLAVE_TCSR 623
+#define	MSM_BUS_SLAVE_TLMM 624
+#define	MSM_BUS_SLAVE_CRYPTO_0_CFG 625
+#define	MSM_BUS_SLAVE_CRYPTO_1_CFG 626
+#define	MSM_BUS_SLAVE_IMEM_CFG 627
+#define	MSM_BUS_SLAVE_MESSAGE_RAM 628
+#define	MSM_BUS_SLAVE_BIMC_CFG 629
+#define	MSM_BUS_SLAVE_BOOT_ROM 630
+#define	MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG 631
+#define	MSM_BUS_SLAVE_PMIC_ARB 632
+#define	MSM_BUS_SLAVE_SPDM_WRAPPER 633
+#define	MSM_BUS_SLAVE_DEHR_CFG 634
+#define	MSM_BUS_SLAVE_QDSS_CFG 635
+#define	MSM_BUS_SLAVE_RBCPR_CFG 636
+#define	MSM_BUS_SLAVE_RBCPR_QDSS_APU_CFG 637
+#define	MSM_BUS_SLAVE_SNOC_MPU_CFG 638
+#define	MSM_BUS_SLAVE_CNOC_ONOC_CFG 639
+#define	MSM_BUS_SLAVE_CNOC_MNOC_CFG 640
+#define	MSM_BUS_SLAVE_PNOC_CFG 641
+#define	MSM_BUS_SLAVE_SNOC_CFG 642
+#define	MSM_BUS_SLAVE_EBI1_DLL_CFG 643
+#define	MSM_BUS_SLAVE_PHY_APU_CFG 644
+#define	MSM_BUS_SLAVE_EBI1_PHY_CFG 645
+#define	MSM_BUS_SLAVE_SERVICE_CNOC 646
+#define	MSM_BUS_SLAVE_IPS_CFG 647
+#define	MSM_BUS_SLAVE_QPIC 648
+#define	MSM_BUS_SLAVE_DSI_CFG 649
+#define	MSM_BUS_SLAVE_UFS_CFG 650
+#define	MSM_BUS_SLAVE_RBCPR_CX_CFG 651
+#define	MSM_BUS_SLAVE_RBCPR_MX_CFG 652
+#define	MSM_BUS_SLAVE_PCIE_CFG 653
+#define	MSM_BUS_SLAVE_USB_PHYS_CFG 654
+#define	MSM_BUS_SLAVE_VIDEO_CAP_CFG 655
+#define	MSM_BUS_SLAVE_AVSYNC_CFG 656
+#define	MSM_BUS_SLAVE_CRYPTO_2_CFG 657
+#define	MSM_BUS_SLAVE_VPU_CFG 658
+#define	MSM_BUS_SLAVE_BCAST_CFG 659
+#define	MSM_BUS_SLAVE_KLM_CFG 660
+#define	MSM_BUS_SLAVE_GENI_IR_CFG 661
+#define	MSM_BUS_SLAVE_OCMEM_GFX 662
+#define	MSM_BUS_SLAVE_CATS_128 663
+#define	MSM_BUS_SLAVE_OCMEM_64 664
+#define	MSM_BUS_SLAVE_PCIE_0 665
+#define	MSM_BUS_SLAVE_PCIE_1 666
+#define	MSM_BUS_SLAVE_PCIE_0_CFG 667
+#define	MSM_BUS_SLAVE_PCIE_1_CFG 668
+#define	MSM_BUS_SLAVE_SRVC_MNOC 669
+#define	MSM_BUS_SLAVE_USB_HS2 670
+#define	MSM_BUS_SLAVE_AUDIO 671
+#define	MSM_BUS_SLAVE_TCU 672
+#define	MSM_BUS_SLAVE_APPSS 673
+#define	MSM_BUS_SLAVE_PCIE_PARF 674
+#define	MSM_BUS_SLAVE_USB3_PHY_CFG 675
+#define	MSM_BUS_SLAVE_IPA_CFG 676
+#define	MSM_BUS_SLAVE_A0NOC_SNOC 677
+#define	MSM_BUS_SLAVE_A1NOC_SNOC 678
+#define	MSM_BUS_SLAVE_A2NOC_SNOC 679
+#define	MSM_BUS_SLAVE_HMSS_L3 680
+#define	MSM_BUS_SLAVE_PIMEM_CFG 681
+#define	MSM_BUS_SLAVE_DCC_CFG 682
+#define	MSM_BUS_SLAVE_QDSS_RBCPR_APU_CFG 683
+#define	MSM_BUS_SLAVE_PCIE_2_CFG 684
+#define	MSM_BUS_SLAVE_PCIE20_AHB2PHY 685
+#define	MSM_BUS_SLAVE_A0NOC_CFG 686
+#define	MSM_BUS_SLAVE_A1NOC_CFG 687
+#define	MSM_BUS_SLAVE_A2NOC_CFG 688
+#define	MSM_BUS_SLAVE_A1NOC_MPU_CFG 689
+#define	MSM_BUS_SLAVE_A2NOC_MPU_CFG 690
+#define	MSM_BUS_SLAVE_A0NOC_SMMU_CFG 691
+#define	MSM_BUS_SLAVE_A1NOC_SMMU_CFG 692
+#define	MSM_BUS_SLAVE_A2NOC_SMMU_CFG 693
+#define	MSM_BUS_SLAVE_LPASS_SMMU_CFG 694
+#define	MSM_BUS_SLAVE_MMAGIC_CFG 695
+#define	MSM_BUS_SLAVE_VENUS_THROTTLE_CFG 696
+#define	MSM_BUS_SLAVE_SSC_CFG 697
+#define	MSM_BUS_SLAVE_DSA_CFG 698
+#define	MSM_BUS_SLAVE_DSA_MPU_CFG 699
+#define	MSM_BUS_SLAVE_DISPLAY_THROTTLE_CFG 700
+#define	MSM_BUS_SLAVE_SMMU_CPP_CFG 701
+#define	MSM_BUS_SLAVE_SMMU_JPEG_CFG 702
+#define	MSM_BUS_SLAVE_SMMU_MDP_CFG 703
+#define	MSM_BUS_SLAVE_SMMU_ROTATOR_CFG 704
+#define	MSM_BUS_SLAVE_SMMU_VENUS_CFG 705
+#define	MSM_BUS_SLAVE_SMMU_VFE_CFG 706
+#define	MSM_BUS_SLAVE_A0NOC_MPU_CFG 707
+#define	MSM_BUS_SLAVE_VMEM_CFG 708
+#define	MSM_BUS_SLAVE_CAMERA_THROTTLE_CFG 709
+#define	MSM_BUS_SLAVE_VMEM 710
+#define	MSM_BUS_SLAVE_AHB2PHY 711
+#define	MSM_BUS_SLAVE_PIMEM 712
+#define	MSM_BUS_SLAVE_SNOC_VMEM 713
+#define	MSM_BUS_SLAVE_PCIE_2 714
+#define	MSM_BUS_SLAVE_RBCPR_MX 715
+#define	MSM_BUS_SLAVE_RBCPR_CX 716
+#define	MSM_BUS_SLAVE_BIMC_PCNOC 717
+#define	MSM_BUS_SLAVE_PCNOC_BIMC_1 718
+#define	MSM_BUS_SLAVE_SGMII 719
+#define	MSM_BUS_SLAVE_SPMI_FETCHER 720
+#define	MSM_BUS_PNOC_SLV_6 721
+#define	MSM_BUS_SLAVE_MMSS_SMMU_CFG 722
+#define	MSM_BUS_SLAVE_WLAN 723
+#define	MSM_BUS_SLAVE_CRVIRT_A2NOC 724
+#define	MSM_BUS_SLAVE_CNOC_A2NOC 725
+#define	MSM_BUS_SLAVE_GLM 726
+#define	MSM_BUS_SLAVE_GNOC_BIMC 727
+#define	MSM_BUS_SLAVE_GNOC_SNOC 728
+#define	MSM_BUS_SLAVE_QM_CFG 729
+#define	MSM_BUS_SLAVE_TLMM_EAST 730
+#define	MSM_BUS_SLAVE_TLMM_NORTH 731
+#define	MSM_BUS_SLAVE_TLMM_WEST 732
+#define	MSM_BUS_SLAVE_SKL 733
+#define	MSM_BUS_SLAVE_LPASS_TCM	734
+#define	MSM_BUS_SLAVE_TLMM_SOUTH 735
+#define	MSM_BUS_SLAVE_TLMM_CENTER 736
+#define	MSM_BUS_MSS_NAV_CE_MPU_CFG 737
+#define	MSM_BUS_SLAVE_A2NOC_THROTTLE_CFG 738
+#define	MSM_BUS_SLAVE_CDSP 739
+#define	MSM_BUS_SLAVE_CDSP_SMMU_CFG 740
+#define	MSM_BUS_SLAVE_LPASS_MPU_CFG 741
+#define	MSM_BUS_SLAVE_CSI_PHY_CFG 742
+#define	MSM_BUS_SLAVE_LAST 743
+
+#define	MSM_BUS_SYSTEM_FPB_SLAVE_SYSTEM  MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB
+#define	MSM_BUS_CPSS_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_CPSS_FPB
+
+/*
+ * ID's used in RPM messages
+ */
+#define	ICBID_MASTER_APPSS_PROC 0
+#define	ICBID_MASTER_MSS_PROC 1
+#define	ICBID_MASTER_MNOC_BIMC 2
+#define	ICBID_MASTER_SNOC_BIMC 3
+#define	ICBID_MASTER_SNOC_BIMC_0 ICBID_MASTER_SNOC_BIMC
+#define	ICBID_MASTER_CNOC_MNOC_MMSS_CFG 4
+#define	ICBID_MASTER_CNOC_MNOC_CFG 5
+#define	ICBID_MASTER_GFX3D 6
+#define	ICBID_MASTER_JPEG 7
+#define	ICBID_MASTER_MDP 8
+#define	ICBID_MASTER_MDP0 ICBID_MASTER_MDP
+#define	ICBID_MASTER_MDPS ICBID_MASTER_MDP
+#define	ICBID_MASTER_VIDEO 9
+#define	ICBID_MASTER_VIDEO_P0 ICBID_MASTER_VIDEO
+#define	ICBID_MASTER_VIDEO_P1 10
+#define	ICBID_MASTER_VFE 11
+#define	ICBID_MASTER_VFE0 ICBID_MASTER_VFE
+#define	ICBID_MASTER_CNOC_ONOC_CFG 12
+#define	ICBID_MASTER_JPEG_OCMEM 13
+#define	ICBID_MASTER_MDP_OCMEM 14
+#define	ICBID_MASTER_VIDEO_P0_OCMEM 15
+#define	ICBID_MASTER_VIDEO_P1_OCMEM 16
+#define	ICBID_MASTER_VFE_OCMEM 17
+#define	ICBID_MASTER_LPASS_AHB 18
+#define	ICBID_MASTER_QDSS_BAM 19
+#define	ICBID_MASTER_SNOC_CFG 20
+#define	ICBID_MASTER_BIMC_SNOC 21
+#define	ICBID_MASTER_BIMC_SNOC_0 ICBID_MASTER_BIMC_SNOC
+#define	ICBID_MASTER_CNOC_SNOC 22
+#define	ICBID_MASTER_CRYPTO 23
+#define	ICBID_MASTER_CRYPTO_CORE0 ICBID_MASTER_CRYPTO
+#define	ICBID_MASTER_CRYPTO_CORE1 24
+#define	ICBID_MASTER_LPASS_PROC 25
+#define	ICBID_MASTER_MSS 26
+#define	ICBID_MASTER_MSS_NAV 27
+#define	ICBID_MASTER_OCMEM_DMA 28
+#define	ICBID_MASTER_PNOC_SNOC 29
+#define	ICBID_MASTER_WCSS 30
+#define	ICBID_MASTER_QDSS_ETR 31
+#define	ICBID_MASTER_USB3 32
+#define	ICBID_MASTER_USB3_0 ICBID_MASTER_USB3
+#define	ICBID_MASTER_SDCC_1 33
+#define	ICBID_MASTER_SDCC_3 34
+#define	ICBID_MASTER_SDCC_2 35
+#define	ICBID_MASTER_SDCC_4 36
+#define	ICBID_MASTER_TSIF 37
+#define	ICBID_MASTER_BAM_DMA 38
+#define	ICBID_MASTER_BLSP_2 39
+#define	ICBID_MASTER_USB_HSIC 40
+#define	ICBID_MASTER_BLSP_1 41
+#define	ICBID_MASTER_USB_HS 42
+#define	ICBID_MASTER_USB_HS1 ICBID_MASTER_USB_HS
+#define	ICBID_MASTER_PNOC_CFG 43
+#define	ICBID_MASTER_SNOC_PNOC 44
+#define	ICBID_MASTER_RPM_INST 45
+#define	ICBID_MASTER_RPM_DATA 46
+#define	ICBID_MASTER_RPM_SYS 47
+#define	ICBID_MASTER_DEHR 48
+#define	ICBID_MASTER_QDSS_DAP 49
+#define	ICBID_MASTER_SPDM 50
+#define	ICBID_MASTER_TIC 51
+#define	ICBID_MASTER_SNOC_CNOC 52
+#define	ICBID_MASTER_GFX3D_OCMEM 53
+#define	ICBID_MASTER_GFX3D_GMEM ICBID_MASTER_GFX3D_OCMEM
+#define	ICBID_MASTER_OVIRT_SNOC 54
+#define	ICBID_MASTER_SNOC_OVIRT 55
+#define	ICBID_MASTER_SNOC_GVIRT ICBID_MASTER_SNOC_OVIRT
+#define	ICBID_MASTER_ONOC_OVIRT 56
+#define	ICBID_MASTER_USB_HS2 57
+#define	ICBID_MASTER_QPIC 58
+#define	ICBID_MASTER_IPA 59
+#define	ICBID_MASTER_DSI 60
+#define	ICBID_MASTER_MDP1 61
+#define	ICBID_MASTER_MDPE ICBID_MASTER_MDP1
+#define	ICBID_MASTER_VPU_PROC 62
+#define	ICBID_MASTER_VPU 63
+#define	ICBID_MASTER_VPU0 ICBID_MASTER_VPU
+#define	ICBID_MASTER_CRYPTO_CORE2 64
+#define	ICBID_MASTER_PCIE_0 65
+#define	ICBID_MASTER_PCIE_1 66
+#define	ICBID_MASTER_SATA 67
+#define	ICBID_MASTER_UFS 68
+#define	ICBID_MASTER_USB3_1 69
+#define	ICBID_MASTER_VIDEO_OCMEM 70
+#define	ICBID_MASTER_VPU1 71
+#define	ICBID_MASTER_VCAP 72
+#define	ICBID_MASTER_EMAC 73
+#define	ICBID_MASTER_BCAST 74
+#define	ICBID_MASTER_MMSS_PROC 75
+#define	ICBID_MASTER_SNOC_BIMC_1 76
+#define	ICBID_MASTER_SNOC_PCNOC 77
+#define	ICBID_MASTER_AUDIO 78
+#define	ICBID_MASTER_MM_INT_0 79
+#define	ICBID_MASTER_MM_INT_1 80
+#define	ICBID_MASTER_MM_INT_2 81
+#define	ICBID_MASTER_MM_INT_BIMC 82
+#define	ICBID_MASTER_MSS_INT 83
+#define	ICBID_MASTER_PCNOC_CFG 84
+#define	ICBID_MASTER_PCNOC_INT_0 85
+#define	ICBID_MASTER_PCNOC_INT_1 86
+#define	ICBID_MASTER_PCNOC_M_0 87
+#define	ICBID_MASTER_PCNOC_M_1 88
+#define	ICBID_MASTER_PCNOC_S_0 89
+#define	ICBID_MASTER_PCNOC_S_1 90
+#define	ICBID_MASTER_PCNOC_S_2 91
+#define	ICBID_MASTER_PCNOC_S_3 92
+#define	ICBID_MASTER_PCNOC_S_4 93
+#define	ICBID_MASTER_PCNOC_S_6 94
+#define	ICBID_MASTER_PCNOC_S_7 95
+#define	ICBID_MASTER_PCNOC_S_8 96
+#define	ICBID_MASTER_PCNOC_S_9 97
+#define	ICBID_MASTER_QDSS_INT 98
+#define	ICBID_MASTER_SNOC_INT_0	99
+#define	ICBID_MASTER_SNOC_INT_1 100
+#define	ICBID_MASTER_SNOC_INT_BIMC 101
+#define	ICBID_MASTER_TCU_0 102
+#define	ICBID_MASTER_TCU_1 103
+#define	ICBID_MASTER_BIMC_INT_0 104
+#define	ICBID_MASTER_BIMC_INT_1 105
+#define	ICBID_MASTER_CAMERA 106
+#define	ICBID_MASTER_RICA 107
+#define	ICBID_MASTER_SNOC_BIMC_2 108
+#define	ICBID_MASTER_BIMC_SNOC_1 109
+#define	ICBID_MASTER_A0NOC_SNOC 110
+#define	ICBID_MASTER_A1NOC_SNOC 111
+#define	ICBID_MASTER_A2NOC_SNOC 112
+#define	ICBID_MASTER_PIMEM 113
+#define	ICBID_MASTER_SNOC_VMEM 114
+#define	ICBID_MASTER_CPP 115
+#define	ICBID_MASTER_CNOC_A1NOC 116
+#define	ICBID_MASTER_PNOC_A1NOC 117
+#define	ICBID_MASTER_HMSS 118
+#define	ICBID_MASTER_PCIE_2 119
+#define	ICBID_MASTER_ROTATOR 120
+#define	ICBID_MASTER_VENUS_VMEM 121
+#define	ICBID_MASTER_DCC 122
+#define	ICBID_MASTER_MCDMA 123
+#define	ICBID_MASTER_PCNOC_INT_2 124
+#define	ICBID_MASTER_PCNOC_INT_3 125
+#define	ICBID_MASTER_PCNOC_INT_4 126
+#define	ICBID_MASTER_PCNOC_INT_5 127
+#define	ICBID_MASTER_PCNOC_INT_6 128
+#define	ICBID_MASTER_PCNOC_S_5 129
+#define	ICBID_MASTER_SENSORS_AHB 130
+#define	ICBID_MASTER_SENSORS_PROC 131
+#define	ICBID_MASTER_QSPI 132
+#define	ICBID_MASTER_VFE1 133
+#define	ICBID_MASTER_SNOC_INT_2 134
+#define	ICBID_MASTER_SMMNOC_BIMC 135
+#define	ICBID_MASTER_CRVIRT_A1NOC 136
+#define	ICBID_MASTER_XM_USB_HS1 137
+#define	ICBID_MASTER_XI_USB_HS1 138
+#define	ICBID_MASTER_PCNOC_BIMC_1 139
+#define	ICBID_MASTER_BIMC_PCNOC 140
+#define	ICBID_MASTER_XI_HSIC 141
+#define	ICBID_MASTER_SGMII  142
+#define	ICBID_MASTER_SPMI_FETCHER 143
+#define	ICBID_MASTER_GNOC_BIMC 144
+#define	ICBID_MASTER_CRVIRT_A2NOC 145
+#define	ICBID_MASTER_CNOC_A2NOC 146
+#define	ICBID_MASTER_WLAN 147
+#define	ICBID_MASTER_MSS_CE 148
+#define	ICBID_MASTER_CDSP_PROC 149
+#define	ICBID_MASTER_GNOC_SNOC 150
+
+#define	ICBID_SLAVE_EBI1 0
+#define	ICBID_SLAVE_APPSS_L2 1
+#define	ICBID_SLAVE_BIMC_SNOC 2
+#define	ICBID_SLAVE_BIMC_SNOC_0 ICBID_SLAVE_BIMC_SNOC
+#define	ICBID_SLAVE_CAMERA_CFG 3
+#define	ICBID_SLAVE_DISPLAY_CFG 4
+#define	ICBID_SLAVE_OCMEM_CFG 5
+#define	ICBID_SLAVE_CPR_CFG 6
+#define	ICBID_SLAVE_CPR_XPU_CFG 7
+#define	ICBID_SLAVE_MISC_CFG 8
+#define	ICBID_SLAVE_MISC_XPU_CFG 9
+#define	ICBID_SLAVE_VENUS_CFG 10
+#define	ICBID_SLAVE_GFX3D_CFG 11
+#define	ICBID_SLAVE_MMSS_CLK_CFG 12
+#define	ICBID_SLAVE_MMSS_CLK_XPU_CFG 13
+#define	ICBID_SLAVE_MNOC_MPU_CFG 14
+#define	ICBID_SLAVE_ONOC_MPU_CFG 15
+#define	ICBID_SLAVE_MNOC_BIMC 16
+#define	ICBID_SLAVE_SERVICE_MNOC 17
+#define	ICBID_SLAVE_OCMEM 18
+#define	ICBID_SLAVE_GMEM ICBID_SLAVE_OCMEM
+#define	ICBID_SLAVE_SERVICE_ONOC 19
+#define	ICBID_SLAVE_APPSS 20
+#define	ICBID_SLAVE_LPASS 21
+#define	ICBID_SLAVE_USB3 22
+#define	ICBID_SLAVE_USB3_0 ICBID_SLAVE_USB3
+#define	ICBID_SLAVE_WCSS 23
+#define	ICBID_SLAVE_SNOC_BIMC 24
+#define	ICBID_SLAVE_SNOC_BIMC_0 ICBID_SLAVE_SNOC_BIMC
+#define	ICBID_SLAVE_SNOC_CNOC 25
+#define	ICBID_SLAVE_IMEM 26
+#define	ICBID_SLAVE_OCIMEM ICBID_SLAVE_IMEM
+#define	ICBID_SLAVE_SNOC_OVIRT 27
+#define	ICBID_SLAVE_SNOC_GVIRT ICBID_SLAVE_SNOC_OVIRT
+#define	ICBID_SLAVE_SNOC_PNOC 28
+#define	ICBID_SLAVE_SNOC_PCNOC ICBID_SLAVE_SNOC_PNOC
+#define	ICBID_SLAVE_SERVICE_SNOC 29
+#define	ICBID_SLAVE_QDSS_STM 30
+#define	ICBID_SLAVE_SDCC_1 31
+#define	ICBID_SLAVE_SDCC_3 32
+#define	ICBID_SLAVE_SDCC_2 33
+#define	ICBID_SLAVE_SDCC_4 34
+#define	ICBID_SLAVE_TSIF 35
+#define	ICBID_SLAVE_BAM_DMA 36
+#define	ICBID_SLAVE_BLSP_2 37
+#define	ICBID_SLAVE_USB_HSIC 38
+#define	ICBID_SLAVE_BLSP_1 39
+#define	ICBID_SLAVE_USB_HS 40
+#define	ICBID_SLAVE_USB_HS1 ICBID_SLAVE_USB_HS
+#define	ICBID_SLAVE_PDM 41
+#define	ICBID_SLAVE_PERIPH_APU_CFG 42
+#define	ICBID_SLAVE_PNOC_MPU_CFG 43
+#define	ICBID_SLAVE_PRNG 44
+#define	ICBID_SLAVE_PNOC_SNOC 45
+#define	ICBID_SLAVE_PCNOC_SNOC ICBID_SLAVE_PNOC_SNOC
+#define	ICBID_SLAVE_SERVICE_PNOC 46
+#define	ICBID_SLAVE_CLK_CTL 47
+#define	ICBID_SLAVE_CNOC_MSS 48
+#define	ICBID_SLAVE_PCNOC_MSS ICBID_SLAVE_CNOC_MSS
+#define	ICBID_SLAVE_SECURITY 49
+#define	ICBID_SLAVE_TCSR 50
+#define	ICBID_SLAVE_TLMM 51
+#define	ICBID_SLAVE_CRYPTO_0_CFG 52
+#define	ICBID_SLAVE_CRYPTO_1_CFG 53
+#define	ICBID_SLAVE_IMEM_CFG 54
+#define	ICBID_SLAVE_MESSAGE_RAM 55
+#define	ICBID_SLAVE_BIMC_CFG 56
+#define	ICBID_SLAVE_BOOT_ROM 57
+#define	ICBID_SLAVE_CNOC_MNOC_MMSS_CFG 58
+#define	ICBID_SLAVE_PMIC_ARB 59
+#define	ICBID_SLAVE_SPDM_WRAPPER 60
+#define	ICBID_SLAVE_DEHR_CFG 61
+#define	ICBID_SLAVE_MPM 62
+#define	ICBID_SLAVE_QDSS_CFG 63
+#define	ICBID_SLAVE_RBCPR_CFG 64
+#define	ICBID_SLAVE_RBCPR_CX_CFG ICBID_SLAVE_RBCPR_CFG
+#define	ICBID_SLAVE_RBCPR_QDSS_APU_CFG 65
+#define	ICBID_SLAVE_CNOC_MNOC_CFG 66
+#define	ICBID_SLAVE_SNOC_MPU_CFG 67
+#define	ICBID_SLAVE_CNOC_ONOC_CFG 68
+#define	ICBID_SLAVE_PNOC_CFG 69
+#define	ICBID_SLAVE_SNOC_CFG 70
+#define	ICBID_SLAVE_EBI1_DLL_CFG 71
+#define	ICBID_SLAVE_PHY_APU_CFG 72
+#define	ICBID_SLAVE_EBI1_PHY_CFG 73
+#define	ICBID_SLAVE_RPM 74
+#define	ICBID_SLAVE_CNOC_SNOC 75
+#define	ICBID_SLAVE_SERVICE_CNOC 76
+#define	ICBID_SLAVE_OVIRT_SNOC 77
+#define	ICBID_SLAVE_OVIRT_OCMEM 78
+#define	ICBID_SLAVE_USB_HS2 79
+#define	ICBID_SLAVE_QPIC 80
+#define	ICBID_SLAVE_IPS_CFG 81
+#define	ICBID_SLAVE_DSI_CFG 82
+#define	ICBID_SLAVE_USB3_1 83
+#define	ICBID_SLAVE_PCIE_0 84
+#define	ICBID_SLAVE_PCIE_1 85
+#define	ICBID_SLAVE_PSS_SMMU_CFG 86
+#define	ICBID_SLAVE_CRYPTO_2_CFG 87
+#define	ICBID_SLAVE_PCIE_0_CFG 88
+#define	ICBID_SLAVE_PCIE_1_CFG 89
+#define	ICBID_SLAVE_SATA_CFG 90
+#define	ICBID_SLAVE_SPSS_GENI_IR 91
+#define	ICBID_SLAVE_UFS_CFG 92
+#define	ICBID_SLAVE_AVSYNC_CFG 93
+#define	ICBID_SLAVE_VPU_CFG 94
+#define	ICBID_SLAVE_USB_PHY_CFG 95
+#define	ICBID_SLAVE_RBCPR_MX_CFG 96
+#define	ICBID_SLAVE_PCIE_PARF 97
+#define	ICBID_SLAVE_VCAP_CFG 98
+#define	ICBID_SLAVE_EMAC_CFG 99
+#define	ICBID_SLAVE_BCAST_CFG 100
+#define	ICBID_SLAVE_KLM_CFG 101
+#define	ICBID_SLAVE_DISPLAY_PWM 102
+#define	ICBID_SLAVE_GENI 103
+#define	ICBID_SLAVE_SNOC_BIMC_1 104
+#define	ICBID_SLAVE_AUDIO 105
+#define	ICBID_SLAVE_CATS_0 106
+#define	ICBID_SLAVE_CATS_1 107
+#define	ICBID_SLAVE_MM_INT_0 108
+#define	ICBID_SLAVE_MM_INT_1 109
+#define	ICBID_SLAVE_MM_INT_2 110
+#define	ICBID_SLAVE_MM_INT_BIMC 111
+#define	ICBID_SLAVE_MMU_MODEM_XPU_CFG 112
+#define	ICBID_SLAVE_MSS_INT 113
+#define	ICBID_SLAVE_PCNOC_INT_0 114
+#define	ICBID_SLAVE_PCNOC_INT_1 115
+#define	ICBID_SLAVE_PCNOC_M_0 116
+#define	ICBID_SLAVE_PCNOC_M_1 117
+#define	ICBID_SLAVE_PCNOC_S_0 118
+#define	ICBID_SLAVE_PCNOC_S_1 119
+#define	ICBID_SLAVE_PCNOC_S_2 120
+#define	ICBID_SLAVE_PCNOC_S_3 121
+#define	ICBID_SLAVE_PCNOC_S_4 122
+#define	ICBID_SLAVE_PCNOC_S_6 123
+#define	ICBID_SLAVE_PCNOC_S_7 124
+#define	ICBID_SLAVE_PCNOC_S_8 125
+#define	ICBID_SLAVE_PCNOC_S_9 126
+#define	ICBID_SLAVE_PRNG_XPU_CFG 127
+#define	ICBID_SLAVE_QDSS_INT 128
+#define	ICBID_SLAVE_RPM_XPU_CFG 129
+#define	ICBID_SLAVE_SNOC_INT_0 130
+#define	ICBID_SLAVE_SNOC_INT_1 131
+#define	ICBID_SLAVE_SNOC_INT_BIMC 132
+#define	ICBID_SLAVE_TCU 133
+#define	ICBID_SLAVE_BIMC_INT_0 134
+#define	ICBID_SLAVE_BIMC_INT_1 135
+#define	ICBID_SLAVE_RICA_CFG 136
+#define	ICBID_SLAVE_SNOC_BIMC_2 137
+#define	ICBID_SLAVE_BIMC_SNOC_1 138
+#define	ICBID_SLAVE_PNOC_A1NOC 139
+#define	ICBID_SLAVE_SNOC_VMEM 140
+#define	ICBID_SLAVE_A0NOC_SNOC 141
+#define	ICBID_SLAVE_A1NOC_SNOC 142
+#define	ICBID_SLAVE_A2NOC_SNOC 143
+#define	ICBID_SLAVE_A0NOC_CFG 144
+#define	ICBID_SLAVE_A0NOC_MPU_CFG 145
+#define	ICBID_SLAVE_A0NOC_SMMU_CFG 146
+#define	ICBID_SLAVE_A1NOC_CFG 147
+#define	ICBID_SLAVE_A1NOC_MPU_CFG 148
+#define	ICBID_SLAVE_A1NOC_SMMU_CFG 149
+#define	ICBID_SLAVE_A2NOC_CFG 150
+#define	ICBID_SLAVE_A2NOC_MPU_CFG 151
+#define	ICBID_SLAVE_A2NOC_SMMU_CFG 152
+#define	ICBID_SLAVE_AHB2PHY 153
+#define	ICBID_SLAVE_CAMERA_THROTTLE_CFG 154
+#define	ICBID_SLAVE_DCC_CFG 155
+#define	ICBID_SLAVE_DISPLAY_THROTTLE_CFG 156
+#define	ICBID_SLAVE_DSA_CFG 157
+#define	ICBID_SLAVE_DSA_MPU_CFG 158
+#define	ICBID_SLAVE_SSC_MPU_CFG 159
+#define	ICBID_SLAVE_HMSS_L3 160
+#define	ICBID_SLAVE_LPASS_SMMU_CFG 161
+#define	ICBID_SLAVE_MMAGIC_CFG 162
+#define	ICBID_SLAVE_PCIE20_AHB2PHY 163
+#define	ICBID_SLAVE_PCIE_2 164
+#define	ICBID_SLAVE_PCIE_2_CFG 165
+#define	ICBID_SLAVE_PIMEM 166
+#define	ICBID_SLAVE_PIMEM_CFG 167
+#define	ICBID_SLAVE_QDSS_RBCPR_APU_CFG 168
+#define	ICBID_SLAVE_RBCPR_CX 169
+#define	ICBID_SLAVE_RBCPR_MX 170
+#define	ICBID_SLAVE_SMMU_CPP_CFG 171
+#define	ICBID_SLAVE_SMMU_JPEG_CFG 172
+#define	ICBID_SLAVE_SMMU_MDP_CFG 173
+#define	ICBID_SLAVE_SMMU_ROTATOR_CFG 174
+#define	ICBID_SLAVE_SMMU_VENUS_CFG 175
+#define	ICBID_SLAVE_SMMU_VFE_CFG 176
+#define	ICBID_SLAVE_SSC_CFG 177
+#define	ICBID_SLAVE_VENUS_THROTTLE_CFG 178
+#define	ICBID_SLAVE_VMEM 179
+#define	ICBID_SLAVE_VMEM_CFG 180
+#define	ICBID_SLAVE_QDSS_MPU_CFG 181
+#define	ICBID_SLAVE_USB3_PHY_CFG 182
+#define	ICBID_SLAVE_IPA_CFG 183
+#define	ICBID_SLAVE_PCNOC_INT_2 184
+#define	ICBID_SLAVE_PCNOC_INT_3 185
+#define	ICBID_SLAVE_PCNOC_INT_4 186
+#define	ICBID_SLAVE_PCNOC_INT_5 187
+#define	ICBID_SLAVE_PCNOC_INT_6 188
+#define	ICBID_SLAVE_PCNOC_S_5 189
+#define	ICBID_SLAVE_QSPI 190
+#define	ICBID_SLAVE_A1NOC_MS_MPU_CFG 191
+#define	ICBID_SLAVE_A2NOC_MS_MPU_CFG 192
+#define	ICBID_SLAVE_MODEM_Q6_SMMU_CFG 193
+#define	ICBID_SLAVE_MSS_MPU_CFG 194
+#define	ICBID_SLAVE_MSS_PROC_MS_MPU_CFG 195
+#define	ICBID_SLAVE_SKL 196
+#define	ICBID_SLAVE_SNOC_INT_2 197
+#define	ICBID_SLAVE_SMMNOC_BIMC 198
+#define	ICBID_SLAVE_CRVIRT_A1NOC 199
+#define	ICBID_SLAVE_SGMII	 200
+#define	ICBID_SLAVE_QHS4_APPS	 201
+#define	ICBID_SLAVE_BIMC_PCNOC   202
+#define	ICBID_SLAVE_PCNOC_BIMC_1 203
+#define	ICBID_SLAVE_SPMI_FETCHER 204
+#define	ICBID_SLAVE_MMSS_SMMU_CFG 205
+#define	ICBID_SLAVE_WLAN 206
+#define	ICBID_SLAVE_CRVIRT_A2NOC 207
+#define	ICBID_SLAVE_CNOC_A2NOC 208
+#define	ICBID_SLAVE_GLM 209
+#define	ICBID_SLAVE_GNOC_BIMC 210
+#define	ICBID_SLAVE_GNOC_SNOC 211
+#define	ICBID_SLAVE_QM_CFG 212
+#define	ICBID_SLAVE_TLMM_EAST 213
+#define	ICBID_SLAVE_TLMM_NORTH 214
+#define	ICBID_SLAVE_TLMM_WEST 215
+#define	ICBID_SLAVE_LPASS_TCM	216
+#define	ICBID_SLAVE_TLMM_SOUTH	217
+#define	ICBID_SLAVE_TLMM_CENTER	218
+#define	ICBID_SLAVE_MSS_NAV_CE_MPU_CFG	219
+#define	ICBID_SLAVE_A2NOC_THROTTLE_CFG	220
+#define	ICBID_SLAVE_CDSP	221
+#define	ICBID_SLAVE_CDSP_SMMU_CFG	222
+#define	ICBID_SLAVE_LPASS_MPU_CFG	223
+#define	ICBID_SLAVE_CSI_PHY_CFG	224
+#endif
diff -Nruw linux-4.4.115-fbx/arch/mips/boot/dts/include/dt-bindings/msm./msm-bus-rule-ops.h linux-4.4.115-fbx/arch/mips/boot/dts/include/dt-bindings/msm/msm-bus-rule-ops.h
--- linux-4.4.115-fbx/arch/mips/boot/dts/include/dt-bindings/msm./msm-bus-rule-ops.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/mips/boot/dts/include/dt-bindings/msm/msm-bus-rule-ops.h	2019-01-22 16:16:28.179288750 +0100
@@ -0,0 +1,34 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_BUS_RULE_OPS_H
+#define __MSM_BUS_RULE_OPS_H
+
+#define FLD_IB	0
+#define FLD_AB	1
+#define FLD_CLK	2
+
+#define OP_LE	0
+#define OP_LT	1
+#define OP_GE	2
+#define OP_GT	3
+#define OP_NOOP	4
+
+#define RULE_STATE_NOT_APPLIED	0
+#define RULE_STATE_APPLIED	1
+
+#define THROTTLE_ON	0
+#define THROTTLE_OFF	1
+#define THROTTLE_REG	2
+
+
+#endif
diff -Nruw linux-4.4.115-fbx/arch/mips/boot/dts/include/dt-bindings/msm./pm.h linux-4.4.115-fbx/arch/mips/boot/dts/include/dt-bindings/msm/pm.h
--- linux-4.4.115-fbx/arch/mips/boot/dts/include/dt-bindings/msm./pm.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/mips/boot/dts/include/dt-bindings/msm/pm.h	2019-01-22 16:16:28.179288750 +0100
@@ -0,0 +1,25 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DT_MSM_PM_H__
+#define __DT_MSM_PM_H__
+
+#define LPM_RESET_LVL_NONE	0
+#define LPM_RESET_LVL_RET	1
+#define LPM_RESET_LVL_GDHS	2
+#define LPM_RESET_LVL_PC	3
+
+#define LPM_AFF_LVL_CPU		0
+#define LPM_AFF_LVL_L2		1
+#define LPM_AFF_LVL_CCI		2
+
+#endif
diff -Nruw linux-4.4.115-fbx/arch/mips/boot/dts/include/dt-bindings/msm./power-on.h linux-4.4.115-fbx/arch/mips/boot/dts/include/dt-bindings/msm/power-on.h
--- linux-4.4.115-fbx/arch/mips/boot/dts/include/dt-bindings/msm./power-on.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/mips/boot/dts/include/dt-bindings/msm/power-on.h	2019-01-22 16:16:28.179288750 +0100
@@ -0,0 +1,24 @@
+/* Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_POWER_ON_H__
+#define __MSM_POWER_ON_H__
+
+#define PON_POWER_OFF_RESERVED		0x00
+#define PON_POWER_OFF_WARM_RESET	0x01
+#define PON_POWER_OFF_SHUTDOWN		0x04
+#define PON_POWER_OFF_DVDD_SHUTDOWN	0x05
+#define PON_POWER_OFF_HARD_RESET	0x07
+#define PON_POWER_OFF_DVDD_HARD_RESET	0x08
+#define PON_POWER_OFF_MAX_TYPE		0x10
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/mips/boot/dts/include/dt-bindings/regulator/qcom,rpm-smd-regulator.h	2019-01-22 16:16:28.183288787 +0100
@@ -0,0 +1,28 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_RPM_SMD_REGULATOR_H
+#define __QCOM_RPM_SMD_REGULATOR_H
+
+#define RPM_SMD_REGULATOR_LEVEL_NONE		0
+#define RPM_SMD_REGULATOR_LEVEL_RETENTION	16
+#define RPM_SMD_REGULATOR_LEVEL_RETENTION_PLUS	32
+#define RPM_SMD_REGULATOR_LEVEL_MIN_SVS		48
+#define RPM_SMD_REGULATOR_LEVEL_LOW_SVS		64
+#define RPM_SMD_REGULATOR_LEVEL_SVS		128
+#define RPM_SMD_REGULATOR_LEVEL_SVS_PLUS	192
+#define RPM_SMD_REGULATOR_LEVEL_NOM		256
+#define RPM_SMD_REGULATOR_LEVEL_NOM_PLUS	320
+#define RPM_SMD_REGULATOR_LEVEL_TURBO		384
+#define RPM_SMD_REGULATOR_LEVEL_BINNING		512
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/powerpc/boot/dts/include/dt-bindings/clock/audio-ext-clk.h	2019-01-22 16:16:28.167288642 +0100
@@ -0,0 +1,30 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __AUDIO_EXT_CLK_H
+#define __AUDIO_EXT_CLK_H
+
+/* Audio External Clocks */
+#define AUDIO_PMI_CLK		0
+#define AUDIO_PMIC_LNBB_CLK	0
+#define AUDIO_AP_CLK		1
+#define AUDIO_AP_CLK2		2
+#define AUDIO_LPASS_MCLK	3
+#define AUDIO_LPASS_MCLK2	4
+
+#define clk_audio_ap_clk        0x9b5727cb
+#define clk_audio_pmi_clk       0xcbfe416d
+#define clk_audio_ap_clk2       0x454d1e91
+#define clk_audio_lpass_mclk    0xf0f2a284
+#define clk_audio_pmi_lnbb_clk   0x57312343
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/powerpc/boot/dts/include/dt-bindings/clock/msm-clocks-8996.h	2019-01-22 16:16:28.171288678 +0100
@@ -0,0 +1,574 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CLOCKS_8996_H
+#define __MSM_CLOCKS_8996_H
+
+#include "audio-ext-clk.h"
+
+/* clock_gcc controlled clocks */
+#define clk_cxo_clk_src			0x79e95308
+#define clk_pnoc_clk			0x4325d220
+#define clk_pnoc_a_clk			0x2808c12b
+#define clk_bimc_clk			0x4b80bf00
+#define clk_bimc_a_clk			0x4b25668a
+#define clk_cnoc_clk			0xd5ccb7f4
+#define clk_cnoc_a_clk			0xd8fe2ccc
+#define clk_snoc_clk			0x2c341aa0
+#define clk_snoc_a_clk			0x8fcef2af
+#define clk_bb_clk1			0xf5304268
+#define clk_bb_clk1_ao			0xfa113810
+#define clk_bb_clk1_pin			0x6dd0a779
+#define clk_bb_clk1_pin_ao		0x9b637772
+#define clk_bb_clk2			0xfe15cb87
+#define clk_bb_clk2_ao			0x59682706
+#define clk_bb_clk2_pin			0x498938e5
+#define clk_bb_clk2_pin_ao		0x52513787
+#define clk_bimc_msmbus_clk		0xd212feea
+#define clk_bimc_msmbus_a_clk		0x71d1a499
+#define clk_ce1_a_clk			0x44a833fe
+#define clk_cnoc_msmbus_clk		0x62228b5d
+#define clk_cnoc_msmbus_a_clk		0x67442955
+#define clk_cxo_clk_src_ao		0x64eb6004
+#define clk_cxo_dwc3_clk		0xf79c19f6
+#define clk_cxo_lpm_clk			0x94adbf3d
+#define clk_cxo_otg_clk			0x4eec0bb9
+#define clk_cxo_pil_lpass_clk		0xe17f0ff6
+#define clk_cxo_pil_ssc_clk		0x81832015
+#define clk_div_clk1			0xaa1157a6
+#define clk_div_clk1_ao			0x6b943d68
+#define clk_div_clk2			0xd454019f
+#define clk_div_clk2_ao			0x53f9e788
+#define clk_div_clk3			0xa9a55a68
+#define clk_div_clk3_ao			0x3d6725a8
+#define clk_ipa_a_clk			0xeeec2919
+#define clk_ipa_clk			0xfa685cda
+#define clk_ln_bb_clk			0x3ab0b36d
+#define clk_ln_bb_a_clk			0xc7257ea8
+#define clk_ln_bb_clk_pin		0x1b1c476a
+#define clk_ln_bb_a_clk_pin		0x9cbb5411
+#define clk_mcd_ce1_clk			0xbb615d26
+#define clk_pnoc_keepalive_a_clk	0xf8f91f0b
+#define clk_pnoc_msmbus_clk		0x38b95c77
+#define clk_pnoc_msmbus_a_clk		0x8c9b4e93
+#define clk_pnoc_pm_clk			0xd6f7dfb9
+#define clk_pnoc_sps_clk		0xd482ecc7
+#define clk_qdss_a_clk			0xdd121669
+#define clk_qdss_clk			0x1492202a
+#define clk_rf_clk1			0xaabeea5a
+#define clk_rf_clk1_ao			0x72a10cb8
+#define clk_rf_clk1_pin			0x8f463562
+#define clk_rf_clk1_pin_ao		0x62549ff6
+#define clk_rf_clk2			0x24a30992
+#define clk_rf_clk2_ao			0x944d8bbd
+#define clk_rf_clk2_pin			0xa7c5602a
+#define clk_rf_clk2_pin_ao		0x2d75eb4d
+#define clk_snoc_msmbus_clk		0xe6900bb6
+#define clk_snoc_msmbus_a_clk		0x5d4683bd
+#define clk_mcd_ce1_clk			0xbb615d26
+#define clk_qcedev_ce1_clk		0x293f97b0
+#define clk_qcrypto_ce1_clk		0xa6ac14df
+#define clk_qseecom_ce1_clk		0xaa858373
+#define clk_scm_ce1_clk			0xd8ebcc62
+#define clk_ce1_clk			0x42229c55
+#define clk_gcc_ce1_ahb_m_clk		0x2eb28c01
+#define clk_gcc_ce1_axi_m_clk		0xc174dfba
+#define clk_measure_only_bimc_hmss_axi_clk	0xc1cc4f11
+#define clk_aggre1_noc_clk		0x049abba8
+#define clk_aggre1_noc_a_clk		0xc12e4220
+#define clk_aggre2_noc_clk		0xaa681404
+#define clk_aggre2_noc_a_clk		0xcab67089
+#define clk_mmssnoc_axi_rpm_clk		0x4d7f8cdc
+#define clk_mmssnoc_axi_rpm_a_clk	0xfbea899b
+#define clk_mmssnoc_axi_clk		0xdb4b31e6
+#define clk_mmssnoc_axi_a_clk		0xd4970614
+#define clk_mmssnoc_gds_clk		0x06a22afa
+
+#define clk_gpll0			0x1ebe3bc4
+#define clk_gpll0_ao			0xa1368304
+#define clk_gpll0_out_main		0xe9374de7
+#define clk_gpll4			0xb3b5d85b
+#define clk_gpll4_out_main		0xa9a0ab9d
+#define clk_ufs_axi_clk_src		0x297ca380
+#define clk_pcie_aux_clk_src		0xebc50566
+#define clk_usb30_master_clk_src	0xc6262f89
+#define clk_usb20_master_clk_src	0x5680ac83
+#define clk_ufs_ice_core_clk_src	0xda8e7119
+#define clk_blsp1_qup1_i2c_apps_clk_src 0x17f78f5e
+#define clk_blsp1_qup1_spi_apps_clk_src 0xf534c4fa
+#define clk_blsp1_qup2_i2c_apps_clk_src 0x8de71c79
+#define clk_blsp1_qup2_spi_apps_clk_src 0x33cf809a
+#define clk_blsp1_qup3_i2c_apps_clk_src 0xf161b902
+#define clk_blsp1_qup3_spi_apps_clk_src 0x5e95683f
+#define clk_blsp1_qup4_i2c_apps_clk_src 0xb2ecce68
+#define clk_blsp1_qup4_spi_apps_clk_src 0xddb5bbdb
+#define clk_blsp1_qup5_i2c_apps_clk_src 0x71ea7804
+#define clk_blsp1_qup5_spi_apps_clk_src 0x9752f35f
+#define clk_blsp1_qup6_i2c_apps_clk_src 0x28806803
+#define clk_blsp1_qup6_spi_apps_clk_src 0x44a1edc4
+#define clk_blsp1_uart1_apps_clk_src	0xf8146114
+#define clk_blsp1_uart2_apps_clk_src	0xfc9c2f73
+#define clk_blsp1_uart3_apps_clk_src	0x600497f2
+#define clk_blsp1_uart4_apps_clk_src	0x56bff15c
+#define clk_blsp1_uart5_apps_clk_src	0x218ef697
+#define clk_blsp1_uart6_apps_clk_src	0x8fbdbe4c
+#define clk_blsp2_qup1_i2c_apps_clk_src 0xd6d1e95d
+#define clk_blsp2_qup1_spi_apps_clk_src 0xcc1b8365
+#define clk_blsp2_qup2_i2c_apps_clk_src 0x603b5c51
+#define clk_blsp2_qup2_spi_apps_clk_src 0xd577dc44
+#define clk_blsp2_qup3_i2c_apps_clk_src 0xea82959c
+#define clk_blsp2_qup3_spi_apps_clk_src 0xd04b1e92
+#define clk_blsp2_qup4_i2c_apps_clk_src 0x73dc968c
+#define clk_blsp2_qup4_spi_apps_clk_src 0x25d4a2b1
+#define clk_blsp2_qup5_i2c_apps_clk_src 0xcc3698bd
+#define clk_blsp2_qup5_spi_apps_clk_src 0xfa0cf45e
+#define clk_blsp2_qup6_i2c_apps_clk_src 0x2fa53151
+#define clk_blsp2_qup6_spi_apps_clk_src 0x5ca86755
+#define clk_blsp2_uart1_apps_clk_src	0x562c66dc
+#define clk_blsp2_uart2_apps_clk_src	0xdd448080
+#define clk_blsp2_uart3_apps_clk_src	0x46b2e90f
+#define clk_blsp2_uart4_apps_clk_src	0x23a093d2
+#define clk_blsp2_uart5_apps_clk_src	0xe067616a
+#define clk_blsp2_uart6_apps_clk_src	0xe02d2829
+#define clk_gp1_clk_src			0xad85b97a
+#define clk_gp2_clk_src			0xfb1f0065
+#define clk_gp3_clk_src			0x63b693d6
+#define clk_hmss_rbcpr_clk_src		0xedd9a474
+#define clk_pdm2_clk_src		0x31e494fd
+#define clk_sdcc1_apps_clk_src		0xd4975db2
+#define clk_sdcc2_apps_clk_src		0xfc46c821
+#define clk_sdcc3_apps_clk_src		0xea34c7f4
+#define clk_sdcc4_apps_clk_src		0x7aaaaa0c
+#define clk_tsif_ref_clk_src		0x4e9042d1
+#define clk_usb20_mock_utmi_clk_src	0xc3aaeecb
+#define clk_usb30_mock_utmi_clk_src	0xa024a976
+#define clk_usb3_phy_aux_clk_src	0x15eec63c
+#define clk_gcc_qusb2phy_prim_reset	0x07550fa1
+#define clk_gcc_qusb2phy_sec_reset	0x3f3a87d0
+#define clk_gcc_periph_noc_usb20_ahb_clk	0xfb9f26e9
+#define clk_gcc_mmss_gcc_dbg_clk	0xe89d461c
+#define clk_cpu_dbg_clk			0x6550dfa9
+#define clk_gcc_blsp1_ahb_clk		0x8caa5b4f
+#define clk_gcc_blsp1_qup1_i2c_apps_clk 0xc303fae9
+#define clk_gcc_blsp1_qup1_spi_apps_clk 0x759a76b0
+#define clk_gcc_blsp1_qup2_i2c_apps_clk 0x1076f220
+#define clk_gcc_blsp1_qup2_spi_apps_clk 0x3e77d48f
+#define clk_gcc_blsp1_qup3_i2c_apps_clk 0x9e25ac82
+#define clk_gcc_blsp1_qup3_spi_apps_clk 0xfb978880
+#define clk_gcc_blsp1_qup4_i2c_apps_clk 0xd7f40f6f
+#define clk_gcc_blsp1_qup4_spi_apps_clk 0x80f8722f
+#define clk_gcc_blsp1_qup5_i2c_apps_clk 0xacae5604
+#define clk_gcc_blsp1_qup5_spi_apps_clk 0xbf3e15d7
+#define clk_gcc_blsp1_qup6_i2c_apps_clk 0x5c6ad820
+#define clk_gcc_blsp1_qup6_spi_apps_clk 0x780d9f85
+#define clk_gcc_blsp1_uart1_apps_clk	0xc7c62f90
+#define clk_gcc_blsp1_uart2_apps_clk	0xf8a61c96
+#define clk_gcc_blsp1_uart3_apps_clk	0xc3298bd7
+#define clk_gcc_blsp1_uart4_apps_clk	0x26be16c0
+#define clk_gcc_blsp1_uart5_apps_clk	0x28a6bc74
+#define clk_gcc_blsp1_uart6_apps_clk	0x28fd3466
+#define clk_gcc_blsp2_ahb_clk		0x8f283c1d
+#define clk_gcc_blsp2_qup1_i2c_apps_clk 0x9ace11dd
+#define clk_gcc_blsp2_qup1_spi_apps_clk 0xa32604cc
+#define clk_gcc_blsp2_qup2_i2c_apps_clk 0x1bf9a57e
+#define clk_gcc_blsp2_qup2_spi_apps_clk 0xbf54ca6d
+#define clk_gcc_blsp2_qup3_i2c_apps_clk 0x336d4170
+#define clk_gcc_blsp2_qup3_spi_apps_clk 0xc68509d6
+#define clk_gcc_blsp2_qup4_i2c_apps_clk 0xbd22539d
+#define clk_gcc_blsp2_qup4_spi_apps_clk 0x01a72b93
+#define clk_gcc_blsp2_qup5_i2c_apps_clk 0xe2b2ce1d
+#define clk_gcc_blsp2_qup5_spi_apps_clk 0xf40999cd
+#define clk_gcc_blsp2_qup6_i2c_apps_clk 0x894bcea4
+#define clk_gcc_blsp2_qup6_spi_apps_clk 0xfe1bd34a
+#define clk_gcc_blsp2_uart1_apps_clk	0x8c3512ff
+#define clk_gcc_blsp2_uart2_apps_clk	0x1e1965a3
+#define clk_gcc_blsp2_uart3_apps_clk	0x382415ab
+#define clk_gcc_blsp2_uart4_apps_clk	0x87a44b42
+#define clk_gcc_blsp2_uart5_apps_clk	0x5cd30649
+#define clk_gcc_blsp2_uart6_apps_clk	0x8feee5ab
+#define clk_gcc_boot_rom_ahb_clk	0xde2adeb1
+#define clk_gcc_gp1_clk			0x057f7b69
+#define clk_gcc_gp2_clk			0x9bf83ffd
+#define clk_gcc_gp3_clk			0xec6539ee
+#define clk_gcc_hmss_rbcpr_clk		0x699183be
+#define clk_gcc_mmss_noc_cfg_ahb_clk	0xb41a9d99
+#define clk_gcc_pcie_0_aux_clk		0x3d2e3ece
+#define clk_gcc_pcie_0_cfg_ahb_clk	0x4dd325c3
+#define clk_gcc_pcie_0_mstr_axi_clk	0x3f85285b
+#define clk_gcc_pcie_0_slv_axi_clk	0xd69638a1
+#define clk_gcc_pcie_0_pipe_clk		0x4f37621e
+#define clk_gcc_pcie_0_phy_reset	0xdc3201c1
+#define clk_gcc_pcie_1_aux_clk		0xc9bb962c
+#define clk_gcc_pcie_1_cfg_ahb_clk	0xb6338658
+#define clk_gcc_pcie_1_mstr_axi_clk	0xc20f6269
+#define clk_gcc_pcie_1_slv_axi_clk	0xd54e40d6
+#define clk_gcc_pcie_1_pipe_clk		0xc1627422
+#define clk_gcc_pcie_1_phy_reset	0x674481bb
+#define clk_gcc_pcie_2_aux_clk		0xa4dc7ae8
+#define clk_gcc_pcie_2_cfg_ahb_clk	0x4f1d3121
+#define clk_gcc_pcie_2_mstr_axi_clk	0x9e81724a
+#define clk_gcc_pcie_2_slv_axi_clk	0x7990d8b2
+#define clk_gcc_pcie_2_pipe_clk		0xa757a834
+#define clk_gcc_pcie_2_phy_reset	0x82634880
+#define clk_gcc_pcie_phy_reset		0x9bc3c959
+#define clk_gcc_pcie_phy_com_reset	0x8bf513e6
+#define clk_gcc_pcie_phy_nocsr_com_phy_reset	0x0c16a2da
+#define clk_gcc_pcie_phy_aux_clk	0x4746e74f
+#define clk_gcc_pcie_phy_cfg_ahb_clk	0x8533671a
+#define clk_gcc_pdm2_clk		0x99d55711
+#define clk_gcc_pdm_ahb_clk		0x365664f6
+#define clk_gcc_prng_ahb_clk		0x397e7eaa
+#define clk_gcc_sdcc1_ahb_clk		0x691e0caa
+#define clk_gcc_sdcc1_apps_clk		0x9ad6fb96
+#define clk_gcc_sdcc2_ahb_clk		0x23d5727f
+#define clk_gcc_sdcc2_apps_clk		0x861b20ac
+#define clk_gcc_sdcc3_ahb_clk		0x565b2c03
+#define clk_gcc_sdcc3_apps_clk		0x0b27aeac
+#define clk_gcc_sdcc4_ahb_clk		0x64f3e6a8
+#define clk_gcc_sdcc4_apps_clk		0xbf7c4dc8
+#define clk_gcc_tsif_ahb_clk		0x88d2822c
+#define clk_gcc_tsif_ref_clk		0x8f1ed2c2
+#define clk_gcc_ufs_ahb_clk		0x1914bb84
+#define clk_gcc_ufs_axi_clk		0x47c743a7
+#define clk_gcc_ufs_ice_core_clk	0x310b0710
+#define clk_gcc_ufs_rx_cfg_clk		0xa6747786
+#define clk_gcc_ufs_rx_symbol_0_clk	0x7f43251c
+#define clk_gcc_ufs_rx_symbol_1_clk	0x03182fde
+#define clk_gcc_ufs_tx_cfg_clk		0xba2cf8b5
+#define clk_gcc_ufs_tx_symbol_0_clk	0x6a9f747a
+#define clk_gcc_ufs_unipro_core_clk	0x2daf7fd2
+#define clk_gcc_ufs_sys_clk_core_clk	0x360e5ac8
+#define clk_gcc_ufs_tx_symbol_clk_core_clk	0xf6fb0df7
+#define clk_gcc_usb20_master_clk	0x24c3b66a
+#define clk_gcc_usb20_mock_utmi_clk	0xe8db8203
+#define clk_gcc_usb20_sleep_clk		0x6e8cb4b2
+#define clk_gcc_usb30_master_clk	0xb3b4e2cb
+#define clk_gcc_usb30_mock_utmi_clk	0xa800b65a
+#define clk_gcc_usb30_sleep_clk		0xd0b65c92
+#define clk_gcc_usb3_phy_aux_clk	0x0d9a36e0
+#define clk_gcc_usb3_phy_pipe_clk	0xf279aff2
+#define clk_gcc_usb_phy_cfg_ahb2phy_clk	0xd1231a0e
+#define clk_gcc_aggre0_cnoc_ahb_clk	0x53a35559
+#define clk_gcc_aggre0_snoc_axi_clk	0x3c446400
+#define clk_gcc_aggre0_noc_qosgen_extref_clk	0x8c4356ba
+#define clk_hlos1_vote_lpass_core_smmu_clk	0x3aaa1743
+#define clk_hlos1_vote_lpass_adsp_smmu_clk	0xc76f702f
+#define clk_gcc_usb3_phy_reset		0x03d559f1
+#define clk_gcc_usb3phy_phy_reset	0xb1a4f885
+#define clk_gcc_usb3_clkref_clk		0xb6cc8f01
+#define clk_gcc_hdmi_clkref_clk		0x4d4eec04
+#define clk_gcc_edp_clkref_clk		0xa8685c3f
+#define clk_gcc_ufs_clkref_clk		0x92aa126f
+#define clk_gcc_pcie_clkref_clk		0xa2e247fa
+#define clk_gcc_rx2_usb2_clkref_clk	0x27ec24ba
+#define clk_gcc_rx1_usb2_clkref_clk	0x53351d25
+#define clk_gcc_smmu_aggre0_ahb_clk	0x47a06ce4
+#define clk_gcc_smmu_aggre0_axi_clk	0x3cac4a6c
+#define clk_gcc_sys_noc_usb3_axi_clk	0x94d26800
+#define clk_gcc_sys_noc_ufs_axi_clk	0x19d38312
+#define clk_gcc_aggre2_usb3_axi_clk	0xd5822a8e
+#define clk_gcc_aggre2_ufs_axi_clk	0xb31e5191
+#define clk_gcc_mmss_gpll0_div_clk	0xdd06848d
+#define clk_gcc_mmss_bimc_gfx_clk	0xe4f28754
+#define clk_gcc_bimc_gfx_clk		0x3edd69ad
+#define clk_gcc_qspi_ahb_clk		0x96969dc8
+#define clk_gcc_qspi_ser_clk		0xfaf1e266
+#define clk_qspi_ser_clk_src		0x426676ee
+#define clk_sdcc1_ice_core_clk_src	0xfd6a4301
+#define clk_gcc_sdcc1_ice_core_clk	0x0fd5680a
+#define clk_gcc_mss_cfg_ahb_clk		0x111cde81
+#define clk_gcc_mss_snoc_axi_clk	0x0e71de85
+#define clk_gcc_mss_q6_bimc_axi_clk	0x67544d62
+#define clk_gcc_mss_mnoc_bimc_axi_clk	0xf665d03f
+#define clk_gpll0_out_msscc		0x7d794829
+#define clk_gcc_debug_mux_v2		0xf7e749f0
+#define clk_gcc_dcc_ahb_clk		0xfa14a88c
+#define clk_gcc_aggre0_noc_mpu_cfg_ahb_clk	0x5c1bb8e2
+
+/* clock_mmss controlled clocks */
+#define clk_mmsscc_xo			0x05e63704
+#define clk_mmsscc_gpll0		0xe900c515
+#define clk_mmsscc_gpll0_div		0x73892e05
+#define clk_mmsscc_mmssnoc_ahb		0x7b4bd6f7
+#define clk_mmpll0			0xdd83b751
+#define clk_mmpll0_out_main		0x2f996a31
+#define clk_mmpll1			0x6da7fb90
+#define clk_mmpll1_out_main		0xa0d3a7da
+#define clk_mmpll4			0x22c063c1
+#define clk_mmpll4_out_main		0xfb21c2fd
+#define clk_mmpll3			0x18c76899
+#define clk_mmpll3_out_main		0x6eb6328f
+#define clk_ahb_clk_src			0x86f49203
+#define clk_mmpll2			0x1190e4d8
+#define clk_mmpll2_out_main		0x1e9e24a8
+#define clk_mmpll8			0xd06ad45e
+#define clk_mmpll8_out_main		0x75b1f386
+#define clk_mmpll9			0x1c50684c
+#define clk_mmpll9_out_main		0x16b74937
+#define clk_mmpll5			0xa41e1936
+#define clk_mmpll5_out_main		0xcc1897bf
+#define clk_csi0_clk_src		0x227e65bc
+#define clk_vfe0_clk_src		0xa0c2bd8f
+#define clk_vfe1_clk_src		0x4e357366
+#define clk_csi1_clk_src		0x6a2a6c36
+#define clk_csi2_clk_src		0x4113589f
+#define clk_csi3_clk_src		0xfd934012
+#define clk_maxi_clk_src		0x52c09777
+#define clk_cpp_clk_src			0x8382f56d
+#define clk_jpeg0_clk_src		0x9a0a0ac3
+#define clk_jpeg2_clk_src		0x5ad927f3
+#define clk_jpeg_dma_clk_src		0xb68afcea
+#define clk_mdp_clk_src			0x6dc1f8f1
+#define clk_video_core_clk_src		0x8be4c944
+#define clk_fd_core_clk_src		0xe4799ab7
+#define clk_cci_clk_src			0x822f3d97
+#define clk_csiphy0_3p_clk_src		0xd2474b12
+#define clk_csiphy1_3p_clk_src		0x46a02aff
+#define clk_csiphy2_3p_clk_src		0x1447813f
+#define clk_camss_gp0_clk_src		0x6b57cfe6
+#define clk_camss_gp1_clk_src		0xf735368a
+#define clk_jpeg_dma_clk_src		0xb68afcea
+#define clk_mclk0_clk_src		0x266b3853
+#define clk_mclk1_clk_src		0xa73cad0c
+#define clk_mclk2_clk_src		0x42545468
+#define clk_mclk3_clk_src		0x2bfbb714
+#define clk_csi0phytimer_clk_src	0xc8a309be
+#define clk_csi1phytimer_clk_src	0x7c0fe23a
+#define clk_csi2phytimer_clk_src	0x62ffea9c
+#define clk_rbbmtimer_clk_src		0x17649ecc
+#define clk_esc0_clk_src		0xb41d7c38
+#define clk_esc1_clk_src		0x3b0afa42
+#define clk_hdmi_clk_src		0xb40aeea9
+#define clk_vsync_clk_src		0xecb43940
+#define clk_rbcpr_clk_src		0x2c2e9af2
+#define clk_video_subcore0_clk_src	0x88d79636
+#define clk_video_subcore1_clk_src	0x4966930c
+#define clk_mmss_bto_ahb_clk		0xfdf8c361
+#define clk_camss_ahb_clk		0xc4ff91d4
+#define clk_camss_cci_ahb_clk		0x04c4441a
+#define clk_camss_cci_clk		0xd6cb5eb9
+#define clk_camss_cpp_ahb_clk		0x12e9a87b
+#define clk_camss_cpp_clk		0xb82f366b
+#define clk_camss_cpp_axi_clk		0x5598c804
+#define clk_camss_cpp_vbif_ahb_clk	0xb5f31be4
+#define clk_camss_csi0_ahb_clk		0x6e29c972
+#define clk_camss_csi0_clk		0x30862ddb
+#define clk_camss_csi0phy_clk		0x2cecfb84
+#define clk_camss_csi0pix_clk		0x6946f77b
+#define clk_camss_csi0rdi_clk		0x83645ef5
+#define clk_camss_csi1_ahb_clk		0xccc15f06
+#define clk_camss_csi1_clk		0xb150f052
+#define clk_camss_csi1phy_clk		0xb989f06d
+#define clk_camss_csi1pix_clk		0x58d19bf3
+#define clk_camss_csi1rdi_clk		0x4d2f3352
+#define clk_camss_csi2_ahb_clk		0x92d02d75
+#define clk_camss_csi2_clk		0x74fc92e8
+#define clk_camss_csi2phy_clk		0xda05d9d8
+#define clk_camss_csi2pix_clk		0xf8ed0731
+#define clk_camss_csi2rdi_clk		0xdc1b2081
+#define clk_camss_csi3_ahb_clk		0xee5e459c
+#define clk_camss_csi3_clk		0x39488fdd
+#define clk_camss_csi3phy_clk		0x8b6063b9
+#define clk_camss_csi3pix_clk		0xd82bd467
+#define clk_camss_csi3rdi_clk		0xb6750046
+#define clk_camss_csi_vfe0_clk		0x3023937a
+#define clk_camss_csi_vfe1_clk		0xe66fa522
+#define clk_camss_csiphy0_3p_clk	0xf2a54f5a
+#define clk_camss_csiphy1_3p_clk	0x8bf70cb2
+#define clk_camss_csiphy2_3p_clk	0x1c14c939
+#define clk_camss_gp0_clk		0xcee7e51d
+#define clk_camss_gp1_clk		0x41f1c2e3
+#define clk_camss_ispif_ahb_clk		0x9a212c6d
+#define clk_camss_jpeg0_clk		0x0b0e2db7
+#define clk_camss_jpeg2_clk		0xd7291c8d
+#define clk_camss_jpeg_ahb_clk		0x1f47fd28
+#define clk_camss_jpeg_axi_clk		0x9e5545c8
+#define clk_camss_jpeg_dma_clk		0x2336e65d
+#define clk_camss_mclk0_clk		0xcf0c61e0
+#define clk_camss_mclk1_clk		0xd1410ed4
+#define clk_camss_mclk2_clk		0x851286f2
+#define clk_camss_mclk3_clk		0x4db11c45
+#define clk_camss_micro_ahb_clk		0x33a23277
+#define clk_camss_csi0phytimer_clk	0xff93b3c8
+#define clk_camss_csi1phytimer_clk	0x6c399ab6
+#define clk_camss_csi2phytimer_clk	0x24f47f49
+#define clk_camss_top_ahb_clk		0x8f8b2d33
+#define clk_camss_vfe_ahb_clk		0x595197bc
+#define clk_camss_vfe_axi_clk		0x273d4c31
+#define clk_camss_vfe0_ahb_clk		0x4652833c
+#define clk_camss_vfe0_clk		0x1e9bb8c4
+#define clk_camss_vfe0_stream_clk	0x22835fa4
+#define clk_camss_vfe1_ahb_clk		0x6a56abd3
+#define clk_camss_vfe1_clk		0x5bffa69b
+#define clk_camss_vfe1_stream_clk	0x92f849b9
+#define clk_fd_ahb_clk			0x868a2c5c
+#define clk_fd_core_clk			0x3badcae4
+#define clk_fd_core_uar_clk		0x7e624e15
+#define clk_gpu_ahb_clk			0xf97f1d43
+#define clk_gpu_aon_isense_clk		0xa9e9b297
+#define clk_gpu_gx_gfx3d_clk		0xb7ece823
+#define clk_gpu_mx_clk			0xb80ccedf
+#define clk_gpu_gx_rbbmtimer_clk	0xdeba634e
+#define clk_mdss_ahb_clk		0x684ccb41
+#define clk_mdss_axi_clk		0xcc07d687
+#define clk_mdss_esc0_clk		0x28cafbe6
+#define clk_mdss_esc1_clk		0xc22c6883
+#define clk_mdss_hdmi_ahb_clk		0x01cef516
+#define clk_mdss_hdmi_clk		0x097a6de9
+#define clk_mdss_mdp_clk		0x618336ac
+#define clk_mdss_vsync_clk		0x42a022d3
+#define clk_mmss_misc_ahb_clk		0xea30b0e7
+#define clk_mmss_misc_cxo_clk		0xe620cd80
+#define clk_mmagic_bimc_noc_cfg_ahb_clk 0x12d5ba72
+#define clk_mmagic_camss_axi_clk	0xa8b1c16b
+#define clk_mmagic_camss_noc_cfg_ahb_clk 0x5182c819
+#define clk_mmss_mmagic_cfg_ahb_clk	0x5e94a822
+#define clk_mmagic_mdss_axi_clk		0xa0359d10
+#define clk_mmagic_mdss_noc_cfg_ahb_clk 0x9c6d5482
+#define clk_mmagic_video_axi_clk	0x7b9219c3
+#define clk_mmagic_video_noc_cfg_ahb_clk 0x5124d256
+#define clk_mmss_mmagic_ahb_clk		0x3d15f2b0
+#define clk_mmss_mmagic_maxi_clk	0xbdaf5af7
+#define clk_mmss_rbcpr_ahb_clk		0x623ba55f
+#define clk_mmss_rbcpr_clk		0x69a23a6f
+#define clk_mmss_spdm_cpp_clk		0xefe35cd2
+#define clk_mmss_spdm_jpeg_dma_clk	0xcb7bd5a0
+#define clk_smmu_cpp_ahb_clk		0x3ad82d84
+#define clk_smmu_cpp_axi_clk		0xa6bb2f4a
+#define clk_smmu_jpeg_ahb_clk		0x10c436ec
+#define clk_smmu_jpeg_axi_clk		0x41112f37
+#define clk_smmu_mdp_ahb_clk		0x04994cb2
+#define clk_smmu_mdp_axi_clk		0x7fd71687
+#define clk_smmu_rot_ahb_clk		0xa30772c9
+#define clk_smmu_rot_axi_clk		0xfed7c078
+#define clk_smmu_vfe_ahb_clk		0x4dabebe7
+#define clk_smmu_vfe_axi_clk		0xde483725
+#define clk_smmu_video_ahb_clk		0x2d738e2c
+#define clk_smmu_video_axi_clk		0xe2b5b887
+#define clk_video_ahb_clk		0x90775cfb
+#define clk_video_axi_clk		0xe6c16dba
+#define clk_video_core_clk		0x7e876ec3
+#define clk_video_maxi_clk		0x97749db6
+#define clk_video_subcore0_clk		0xb6f63e6c
+#define clk_video_subcore1_clk		0x26c29cb4
+#define clk_vmem_ahb_clk		0xab6223ff
+#define clk_vmem_maxi_clk		0x15ef32db
+#define clk_mmss_debug_mux		0xe646ffda
+#define clk_mmss_gcc_dbg_clk		0xafa4d48a
+#define clk_gfx3d_clk_src		0x917f76ef
+#define clk_extpclk_clk_src		0xb2c31abd
+#define clk_mdss_byte0_clk		0xf5a03f64
+#define clk_mdss_byte1_clk		0xb8c7067d
+#define clk_mdss_extpclk_clk		0xfa5aadb0
+#define clk_mdss_pclk0_clk		0x3487234a
+#define clk_mdss_pclk1_clk		0xd5804246
+#define clk_gpu_gcc_dbg_clk		0x0ccc42cd
+#define clk_mdss_mdp_vote_clk		0x588460a4
+#define clk_mdss_rotator_vote_clk	0x5b1f675e
+#define clk_mmpll2_postdiv_clk		0x4fdeaaba
+#define clk_mmpll8_postdiv_clk		0xedf57882
+#define clk_mmpll9_postdiv_clk		0x3064b618
+#define clk_gfx3d_clk_src_v2		0x4210acb7
+#define clk_byte0_clk_src		0x75cc885b
+#define clk_byte1_clk_src		0x63c2c955
+#define clk_pclk0_clk_src		0xccac1f35
+#define clk_pclk1_clk_src		0x090f68ac
+#define clk_ext_byte0_clk_src		0xfb32f31e
+#define clk_ext_byte1_clk_src		0x585ef6d4
+#define clk_ext_pclk0_clk_src		0x087c1612
+#define clk_ext_pclk1_clk_src		0x8067c5a3
+
+/* clock_debug controlled clocks */
+#define clk_gcc_debug_mux		0x8121ac15
+
+/* external multimedia clocks */
+#define clk_dsi0pll_pixel_clk_mux	0x792379e1
+#define clk_dsi0pll_byte_clk_mux	0x60e83f06
+#define clk_dsi0pll_byte_clk_src	0xbbaa30be
+#define clk_dsi0pll_pixel_clk_src	0x45b3260f
+#define clk_dsi0pll_n2_div_clk		0x1474c213
+#define clk_dsi0pll_post_n1_div_clk	0xdab8c389
+#define clk_dsi0pll_vco_clk		0x15940d40
+#define clk_dsi1pll_pixel_clk_mux	0x36458019
+#define clk_dsi1pll_byte_clk_mux	0xb5a42b7b
+#define clk_dsi1pll_byte_clk_src	0x63930a8f
+#define clk_dsi1pll_pixel_clk_src	0x0e4c9b56
+#define clk_dsi1pll_n2_div_clk		0x2c9d4007
+#define clk_dsi1pll_post_n1_div_clk	0x03020041
+#define clk_dsi1pll_vco_clk		0x99797b50
+#define clk_mdss_dsi1_vco_clk_src	0xfcd15658
+#define clk_hdmi_vco_clk		0x66003284
+
+#define clk_dsi0pll_shadow_byte_clk_src	0x177c029c
+#define clk_dsi0pll_shadow_pixel_clk_src	0x98ae3c92
+#define clk_dsi0pll_shadow_n2_div_clk	0xd5f0dad9
+#define clk_dsi0pll_shadow_post_n1_div_clk	0x1f7c8cf8
+#define clk_dsi0pll_shadow_vco_clk	0xb100ca83
+#define clk_dsi1pll_shadow_byte_clk_src	0xfc021ce5
+#define clk_dsi1pll_shadow_pixel_clk_src	0xdcca3ffc
+#define clk_dsi1pll_shadow_n2_div_clk		0x189541bf
+#define clk_dsi1pll_shadow_post_n1_div_clk	0x1637020e
+#define clk_dsi1pll_shadow_vco_clk		0x68d8b6f7
+
+/* CPU clocks */
+#define clk_pwrcl_clk 0xc554130e
+#define clk_pwrcl_pll 0x25454ca1
+#define clk_pwrcl_alt_pll 0xc445471b
+#define clk_pwrcl_pll_main 0x28948e22
+#define clk_pwrcl_alt_pll_main 0x25c8270e
+#define clk_pwrcl_hf_mux 0x77706ae6
+#define clk_pwrcl_lf_mux 0xd99e334d
+#define clk_perfcl_clk 0x58869997
+#define clk_perfcl_pll 0x97dcec1c
+#define clk_perfcl_alt_pll 0xfe2eaea1
+#define clk_perfcl_pll_main 0x0dbf0c0b
+#define clk_perfcl_alt_pll_main 0x0b892aab
+#define clk_perfcl_hf_mux 0x9e8bbe59
+#define clk_perfcl_lf_mux 0x2f9c278d
+#define clk_cbf_pll 0xfe2e96a3
+#define clk_cbf_pll_main 0x2b05cf95
+#define clk_cbf_hf_mux 0x71244f73
+#define clk_cbf_clk 0x48e9e16b
+#define clk_xo_ao 0x428c856d
+#define clk_sys_apcsaux_clk 0x0b0dd513
+#define clk_cpu_debug_mux 0xc7acaa31
+
+/* GCC block resets */
+#define QUSB2PHY_PRIM_BCR		0
+#define QUSB2PHY_SEC_BCR		1
+#define BLSP1_BCR			2
+#define BLSP2_BCR			3
+#define BOOT_ROM_BCR			4
+#define PRNG_BCR			5
+#define UFS_BCR				6
+#define USB_20_BCR			7
+#define USB_30_BCR			8
+#define USB3_PHY_BCR			9
+#define USB3PHY_PHY_BCR			10
+#define PCIE_0_PHY_BCR			11
+#define PCIE_1_PHY_BCR			12
+#define PCIE_2_PHY_BCR			13
+#define PCIE_PHY_BCR			14
+#define PCIE_PHY_COM_BCR		15
+#define PCIE_PHY_NOCSR_COM_PHY_BCR	16
+
+/* MMSS Block resets */
+#define VIDEO_BCR			0
+#define MDSS_BCR			1
+#define CAMSS_MICRO_BCR			2
+#define CAMSS_JPEG_BCR			3
+#define CAMSS_VFE0_BCR			4
+#define CAMSS_VFE1_BCR			5
+#define FD_BCR				6
+#define GPU_GX_BCR			7
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/powerpc/boot/dts/include/dt-bindings/clock/msm-clocks-8998.h	2019-01-22 16:16:28.171288678 +0100
@@ -0,0 +1,534 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CLOCKS_8998_H
+#define __MSM_CLOCKS_8998_H
+
+#include "audio-ext-clk.h"
+
+/* clock_rpm controlled clocks */
+#define clk_ce1_clk				0x42229c55
+#define clk_ce1_a_clk				0x44a833fe
+#define clk_cxo_clk_src				0x79e95308
+#define clk_bimc_clk				0x4b80bf00
+#define clk_bimc_a_clk				0x4b25668a
+#define clk_cnoc_clk				0xd5ccb7f4
+#define clk_cnoc_a_clk				0xd8fe2ccc
+#define clk_snoc_clk				0x2c341aa0
+#define clk_snoc_a_clk				0x8fcef2af
+#define clk_cnoc_periph_clk			0xb11e9cf9
+#define clk_cnoc_periph_a_clk			0x1d7faa2e
+#define clk_cnoc_periph_keepalive_a_clk		0x7287aef2
+#define clk_ln_bb_clk1				0xb867b147
+#define clk_ln_bb_clk1_ao			0x7f63a93a
+#define clk_ln_bb_clk1_pin			0x6fc5653c
+#define clk_ln_bb_clk1_pin_ao			0x25d625bf
+#define clk_ln_bb_clk2				0xf83e6387
+#define clk_ln_bb_clk2_ao			0x96f09628
+#define clk_ln_bb_clk2_pin			0xa9ebe8d5
+#define clk_ln_bb_clk2_pin_ao			0x89a1226f
+#define clk_ln_bb_clk3				0x4f52a39e
+#define clk_ln_bb_clk3_ao			0xb15eba76
+#define clk_ln_bb_clk3_pin			0xc4de7dad
+#define clk_ln_bb_clk3_pin_ao			0xc01022e8
+#define clk_bimc_msmbus_clk			0xd212feea
+#define clk_bimc_msmbus_a_clk			0x71d1a499
+#define clk_cnoc_msmbus_clk			0x62228b5d
+#define clk_cnoc_msmbus_a_clk			0x67442955
+#define clk_cxo_clk_src_ao			0x64eb6004
+#define clk_cxo_dwc3_clk			0xf79c19f6
+#define clk_cxo_lpm_clk				0x94adbf3d
+#define clk_cxo_otg_clk				0x4eec0bb9
+#define clk_cxo_pil_lpass_clk			0xe17f0ff6
+#define clk_cxo_pil_ssc_clk			0x81832015
+#define clk_cxo_pil_spss_clk			0x5cd71a61
+#define clk_div_clk1				0xaa1157a6
+#define clk_div_clk1_ao				0x6b943d68
+#define clk_div_clk2				0xd454019f
+#define clk_div_clk2_ao				0x53f9e788
+#define clk_div_clk3				0xa9a55a68
+#define clk_div_clk3_ao				0x3d6725a8
+#define clk_ipa_clk				0xfa685cda
+#define clk_ipa_a_clk				0xeeec2919
+#define clk_mcd_ce1_clk				0xbb615d26
+#define clk_mmssnoc_axi_clk			0xdb4b31e6
+#define clk_mmssnoc_axi_a_clk			0xd4970614
+#define clk_qcedev_ce1_clk			0x293f97b0
+#define clk_qcrypto_ce1_clk			0xa6ac14df
+#define clk_qdss_clk				0x1492202a
+#define clk_qdss_a_clk				0xdd121669
+#define clk_qseecom_ce1_clk			0xaa858373
+#define clk_rf_clk1				0xaabeea5a
+#define clk_rf_clk1_ao				0x72a10cb8
+#define clk_rf_clk1_pin				0x8f463562
+#define clk_rf_clk1_pin_ao			0x62549ff6
+#define clk_rf_clk2				0x24a30992
+#define clk_rf_clk2_ao				0x944d8bbd
+#define clk_rf_clk2_pin				0xa7c5602a
+#define clk_rf_clk2_pin_ao			0x2d75eb4d
+#define clk_rf_clk3				0xb673936b
+#define clk_rf_clk3_ao				0x038bb968
+#define clk_rf_clk3_pin				0x726f53f5
+#define clk_rf_clk3_pin_ao			0x76f9240f
+#define clk_scm_ce1_clk				0xd8ebcc62
+#define clk_snoc_msmbus_clk			0xe6900bb6
+#define clk_snoc_msmbus_a_clk			0x5d4683bd
+#define clk_gcc_ce1_ahb_m_clk			0x2eb28c01
+#define clk_gcc_ce1_axi_m_clk			0xc174dfba
+#define clk_aggre1_noc_clk			0x049abba8
+#define clk_aggre1_noc_a_clk			0xc12e4220
+#define clk_aggre2_noc_clk			0xaa681404
+#define clk_aggre2_noc_a_clk			0xcab67089
+#define clk_measure_only_bimc_hmss_axi_clk	0xc1cc4f11
+
+/* clock_gcc controlled clocks*/
+#define clk_debug_mmss_clk			0x977c99b6
+#define clk_debug_rpm_clk			0x8e2b07ca
+#define clk_debug_cpu_clk			0x0e696b2b
+#define clk_gpu_gcc_debug_clk			0x3eb88190
+#define clk_gfx_gcc_debug_clk			0xa3a64fec
+#define clk_gpll0				0x1ebe3bc4
+#define clk_gpll0_out_main			0xe9374de7
+#define clk_gpll0_ao				0xa1368304
+#define clk_gcc_mmss_gpll0_clk			0x8050f008
+#define clk_gcc_mmss_gpll0_div_clk		0xdd06848d
+#define clk_gcc_gpu_gpll0_clk			0xdad7a7a4
+#define clk_gcc_gpu_gpll0_div_clk		0x07d16c6a
+#define clk_gpll4				0xb3b5d85b
+#define clk_gpll4_out_main			0xa9a0ab9d
+#define clk_usb30_master_clk_src		0xc6262f89
+#define clk_pcie_aux_clk_src			0xebc50566
+#define clk_ufs_axi_clk_src			0x297ca380
+#define clk_blsp1_qup1_i2c_apps_clk_src		0x17f78f5e
+#define clk_blsp1_qup1_spi_apps_clk_src		0xf534c4fa
+#define clk_blsp1_qup2_i2c_apps_clk_src		0x8de71c79
+#define clk_blsp1_qup2_spi_apps_clk_src		0x33cf809a
+#define clk_blsp1_qup3_i2c_apps_clk_src		0xf161b902
+#define clk_blsp1_qup3_spi_apps_clk_src		0x5e95683f
+#define clk_blsp1_qup4_i2c_apps_clk_src		0xb2ecce68
+#define clk_blsp1_qup4_spi_apps_clk_src		0xddb5bbdb
+#define clk_blsp1_qup5_i2c_apps_clk_src		0x71ea7804
+#define clk_blsp1_qup5_spi_apps_clk_src		0x9752f35f
+#define clk_blsp1_qup6_i2c_apps_clk_src		0x28806803
+#define clk_blsp1_qup6_spi_apps_clk_src		0x44a1edc4
+#define clk_blsp1_uart1_apps_clk_src		0xf8146114
+#define clk_blsp1_uart2_apps_clk_src		0xfc9c2f73
+#define clk_blsp1_uart3_apps_clk_src		0x600497f2
+#define clk_blsp2_qup1_i2c_apps_clk_src		0xd6d1e95d
+#define clk_blsp2_qup1_spi_apps_clk_src		0xcc1b8365
+#define clk_blsp2_qup2_i2c_apps_clk_src		0x603b5c51
+#define clk_blsp2_qup2_spi_apps_clk_src		0xd577dc44
+#define clk_blsp2_qup3_i2c_apps_clk_src		0xea82959c
+#define clk_blsp2_qup3_spi_apps_clk_src		0xd04b1e92
+#define clk_blsp2_qup4_i2c_apps_clk_src		0x73dc968c
+#define clk_blsp2_qup4_spi_apps_clk_src		0x25d4a2b1
+#define clk_blsp2_qup5_i2c_apps_clk_src		0xcc3698bd
+#define clk_blsp2_qup5_spi_apps_clk_src		0xfa0cf45e
+#define clk_blsp2_qup6_i2c_apps_clk_src		0x2fa53151
+#define clk_blsp2_qup6_spi_apps_clk_src		0x5ca86755
+#define clk_blsp2_uart1_apps_clk_src		0x562c66dc
+#define clk_blsp2_uart2_apps_clk_src		0xdd448080
+#define clk_blsp2_uart3_apps_clk_src		0x46b2e90f
+#define clk_gp1_clk_src				0xad85b97a
+#define clk_gp2_clk_src				0xfb1f0065
+#define clk_gp3_clk_src				0x63b693d6
+#define clk_hmss_rbcpr_clk_src			0xedd9a474
+#define clk_pdm2_clk_src			0x31e494fd
+#define clk_sdcc2_apps_clk_src			0xfc46c821
+#define clk_sdcc4_apps_clk_src			0x7aaaaa0c
+#define clk_tsif_ref_clk_src			0x4e9042d1
+#define clk_ufs_ice_core_clk_src		0xda8e7119
+#define clk_ufs_phy_aux_clk_src			0xc6bca085
+#define clk_ufs_unipro_core_clk_src		0x179e80a9
+#define clk_usb30_mock_utmi_clk_src		0xa024a976
+#define clk_usb3_phy_aux_clk_src		0x15eec63c
+#define clk_qspi_ref_clk_src			0xfe6b8e11
+#define clk_gcc_pcie_phy_0_reset		0x6bb4df33
+#define clk_gcc_usb3_phy_reset			0x03d559f1
+#define clk_gcc_usb3phy_phy_reset		0xb1a4f885
+#define clk_gcc_aggre1_ufs_axi_clk		0x873459d8
+#define clk_gcc_aggre1_ufs_axi_hw_ctl_clk	0x117a6f39
+#define clk_gcc_aggre1_usb3_axi_clk		0xc5c3fbe8
+#define clk_gcc_bimc_mss_q6_axi_clk		0x7437988f
+#define clk_gcc_blsp1_ahb_clk			0x8caa5b4f
+#define clk_gcc_blsp1_qup1_i2c_apps_clk		0xc303fae9
+#define clk_gcc_blsp1_qup1_spi_apps_clk		0x759a76b0
+#define clk_gcc_blsp1_qup2_i2c_apps_clk		0x1076f220
+#define clk_gcc_blsp1_qup2_spi_apps_clk		0x3e77d48f
+#define clk_gcc_blsp1_qup3_i2c_apps_clk		0x9e25ac82
+#define clk_gcc_blsp1_qup3_spi_apps_clk		0xfb978880
+#define clk_gcc_blsp1_qup4_i2c_apps_clk		0xd7f40f6f
+#define clk_gcc_blsp1_qup4_spi_apps_clk		0x80f8722f
+#define clk_gcc_blsp1_qup5_i2c_apps_clk		0xacae5604
+#define clk_gcc_blsp1_qup5_spi_apps_clk		0xbf3e15d7
+#define clk_gcc_blsp1_qup6_i2c_apps_clk		0x5c6ad820
+#define clk_gcc_blsp1_qup6_spi_apps_clk		0x780d9f85
+#define clk_gcc_blsp1_uart1_apps_clk		0xc7c62f90
+#define clk_gcc_blsp1_uart2_apps_clk		0xf8a61c96
+#define clk_gcc_blsp1_uart3_apps_clk		0xc3298bd7
+#define clk_gcc_blsp2_ahb_clk			0x8f283c1d
+#define clk_gcc_blsp2_qup1_i2c_apps_clk		0x9ace11dd
+#define clk_gcc_blsp2_qup1_spi_apps_clk		0xa32604cc
+#define clk_gcc_blsp2_qup2_i2c_apps_clk		0x1bf9a57e
+#define clk_gcc_blsp2_qup2_spi_apps_clk		0xbf54ca6d
+#define clk_gcc_blsp2_qup3_i2c_apps_clk		0x336d4170
+#define clk_gcc_blsp2_qup3_spi_apps_clk		0xc68509d6
+#define clk_gcc_blsp2_qup4_i2c_apps_clk		0xbd22539d
+#define clk_gcc_blsp2_qup4_spi_apps_clk		0x01a72b93
+#define clk_gcc_blsp2_qup5_i2c_apps_clk		0xe2b2ce1d
+#define clk_gcc_blsp2_qup5_spi_apps_clk		0xf40999cd
+#define clk_gcc_blsp2_qup6_i2c_apps_clk		0x894bcea4
+#define clk_gcc_blsp2_qup6_spi_apps_clk		0xfe1bd34a
+#define clk_gcc_blsp2_uart1_apps_clk		0x8c3512ff
+#define clk_gcc_blsp2_uart2_apps_clk		0x1e1965a3
+#define clk_gcc_blsp2_uart3_apps_clk		0x382415ab
+#define clk_gcc_boot_rom_ahb_clk		0xde2adeb1
+#define clk_gcc_bimc_gfx_clk			0x3edd69ad
+#define clk_gcc_cfg_noc_usb3_axi_clk		0x9ea4c2d9
+#define clk_gcc_gp1_clk				0x057f7b69
+#define clk_gcc_gp2_clk				0x9bf83ffd
+#define clk_gcc_gp3_clk				0xec6539ee
+#define clk_gcc_gpu_bimc_gfx_clk		0x3909459b
+#define clk_gcc_gpu_cfg_ahb_clk			0x72f20a57
+#define clk_gcc_gpu_iref_clk			0xfd82abad
+#define clk_gcc_hmss_dvm_bus_clk		0x17cc8b53
+#define clk_gcc_hmss_rbcpr_clk			0x699183be
+#define clk_hmss_gpll0_clk_src			0x17eb05d0
+#define clk_hmss_gpll4_clk_src			0x20456cae
+#define clk_gcc_mmss_sys_noc_axi_clk		0x4467b15b
+#define clk_gcc_mss_at_clk			0x1692c5aa
+#define clk_nav_gcc_dbg_clk			0x2221c544
+#define clk_gcc_pcie_0_aux_clk			0x3d2e3ece
+#define clk_gcc_pcie_0_cfg_ahb_clk		0x4dd325c3
+#define clk_gcc_pcie_0_mstr_axi_clk		0x3f85285b
+#define clk_gcc_pcie_0_pipe_clk			0x4f37621e
+#define clk_gcc_pcie_0_slv_axi_clk		0xd69638a1
+#define clk_gcc_pcie_phy_aux_clk		0x4746e74f
+#define clk_gcc_pdm2_clk			0x99d55711
+#define clk_gcc_pdm_ahb_clk			0x365664f6
+#define clk_gcc_prng_ahb_clk			0x397e7eaa
+#define clk_gcc_sdcc2_ahb_clk			0x23d5727f
+#define clk_gcc_sdcc2_apps_clk			0x861b20ac
+#define clk_gcc_sdcc4_ahb_clk			0x64f3e6a8
+#define clk_gcc_sdcc4_apps_clk			0xbf7c4dc8
+#define clk_gcc_tsif_ahb_clk			0x88d2822c
+#define clk_gcc_tsif_ref_clk			0x8f1ed2c2
+#define clk_gcc_ufs_ahb_clk			0x1914bb84
+#define clk_gcc_ufs_axi_clk			0x47c743a7
+#define clk_gcc_ufs_axi_hw_ctl_clk		0x69385b45
+#define clk_gcc_ufs_ice_core_clk		0x310b0710
+#define clk_gcc_ufs_ice_core_hw_ctl_clk		0x84e15a5b
+#define clk_gcc_ufs_phy_aux_clk			0x17acc8fb
+#define clk_gcc_ufs_phy_aux_hw_ctl_clk		0x7dbdb2e2
+#define clk_gcc_ufs_rx_symbol_0_clk		0x7f43251c
+#define clk_gcc_ufs_rx_symbol_1_clk		0x03182fde
+#define clk_gcc_ufs_tx_symbol_0_clk		0x6a9f747a
+#define clk_ufs_tx_symbol_0_clk			0xb3fcd0f7
+#define clk_ufs_rx_symbol_0_clk			0x17a0f1cd
+#define clk_gcc_ufs_unipro_core_clk		0x2daf7fd2
+#define clk_gcc_ufs_unipro_core_hw_ctl_clk	0x4a4e0f3d
+#define clk_gcc_usb30_master_clk		0xb3b4e2cb
+#define clk_gcc_usb30_mock_utmi_clk		0xa800b65a
+#define clk_gcc_usb30_sleep_clk			0xd0b65c92
+#define clk_gcc_usb3_phy_aux_clk		0x0d9a36e0
+#define clk_gcc_usb3_phy_pipe_clk		0xf279aff2
+#define clk_gcc_usb3_clkref_clk			0xb6cc8f00
+#define clk_gcc_hdmi_clkref_clk			0x4d4eec04
+#define clk_gcc_edp_clkref_clk			0xa8685c3f
+#define clk_gcc_ufs_clkref_clk			0x92aa126f
+#define clk_gcc_pcie_clkref_clk			0xa2e247fa
+#define clk_gcc_rx2_qlink_clkref_clk		0xd0ba986d
+#define clk_gcc_rx1_usb2_clkref_clk		0x53351d25
+#define clk_gcc_pcie_phy_reset			0x9bc3c959
+#define clk_gcc_pcie_phy_com_reset		0x8bf513e6
+#define clk_gcc_pcie_phy_nocsr_com_phy_reset	0x0c16a2da
+#define clk_gcc_qusb2phy_prim_reset		0x07550fa1
+#define clk_gcc_qusb2phy_sec_reset		0x3f3a87d0
+#define clk_gcc_mmss_noc_cfg_ahb_clk		0xb41a9d99
+#define clk_gcc_dcc_ahb_clk			0xfa14a88c
+#define clk_hlos1_vote_lpass_core_smmu_clk	0x3aaa1743
+#define clk_hlos1_vote_lpass_adsp_smmu_clk	0xc76f702f
+#define clk_gcc_mss_cfg_ahb_clk			0x111cde81
+#define clk_gcc_mss_q6_bimc_axi_clk		0x67544d62
+#define clk_gcc_mss_mnoc_bimc_axi_clk		0xf665d03f
+#define clk_gpll0_out_msscc			0x7d794829
+#define clk_gcc_mss_snoc_axi_clk		0x0e71de85
+#define clk_gcc_qspi_ref_clk			0x766a0f7c
+#define clk_gcc_qspi_ahb_clk			0x96969dc8
+#define clk_gcc_debug_mux			0x8121ac15
+
+/* clock_mmss controlled clocks */
+#define clk_mmsscc_xo				0x05e63704
+#define clk_mmsscc_gpll0			0xe900c515
+#define clk_mmsscc_gpll0_div			0x73892e05
+#define clk_mmpll0_pll				0x361e3cfd
+#define clk_mmpll1_pll				0x198e426b
+#define clk_mmpll3_pll				0x18c76899
+#define clk_mmpll4_pll				0x22c063c1
+#define clk_mmpll5_pll				0xa41e1936
+#define clk_mmpll6_pll				0xc56fb440
+#define clk_mmpll7_pll				0x3ac216af
+#define clk_mmpll10_pll				0x2561263b
+#define clk_mmpll0_pll_out			0x1e9e24a8
+#define clk_mmpll1_pll_out			0x5fa32257
+#define clk_mmpll3_pll_out			0x6eb6328f
+#define clk_mmpll4_pll_out			0xfb21c2fd
+#define clk_mmpll5_pll_out			0xcc1897bf
+#define clk_mmpll6_pll_out			0xfb1060bd
+#define clk_mmpll7_pll_out			0x767758ed
+#define clk_mmpll10_pll_out			0x3c5668f3
+#define clk_ahb_clk_src				0x86f49203
+#define clk_csi0_clk_src			0x227e65bc
+#define clk_vfe0_clk_src			0xa0c2bd8f
+#define clk_vfe1_clk_src			0x4e357366
+#define clk_mdp_clk_src				0x6dc1f8f1
+#define clk_maxi_clk_src			0x52c09777
+#define clk_cpp_clk_src				0x8382f56d
+#define clk_jpeg0_clk_src			0x9a0a0ac3
+#define clk_rot_clk_src				0xce49b56c
+#define clk_video_core_clk_src			0x8be4c944
+#define clk_csi1_clk_src			0x6a2a6c36
+#define clk_csi2_clk_src			0x4113589f
+#define clk_csi3_clk_src			0xfd934012
+#define clk_fd_core_clk_src			0xe4799ab7
+#define clk_ext_dp_phy_pll_vco			0x441b576b
+#define clk_ext_dp_phy_pll_link			0xea12644c
+#define clk_dp_link_clk_src			0x370d0626
+#define clk_dp_crypto_clk_src			0xf8faa811
+#define clk_dp_pixel_clk_src			0xf5dfbabf
+#define clk_ext_extpclk_clk_src			0xe5b273af
+#define clk_ext_pclk0_clk_src			0x087c1612
+#define clk_ext_pclk1_clk_src			0x8067c5a3
+#define clk_pclk0_clk_src			0xccac1f35
+#define clk_pclk1_clk_src			0x090f68ac
+#define clk_video_subcore0_clk_src		0x88d79636
+#define clk_video_subcore1_clk_src		0x4966930c
+#define clk_cci_clk_src				0x822f3d97
+#define clk_camss_gp0_clk_src			0x43b063e9
+#define clk_camss_gp1_clk_src			0xa3315f1b
+#define clk_mclk0_clk_src			0x266b3853
+#define clk_mclk1_clk_src			0xa73cad0c
+#define clk_mclk2_clk_src			0x42545468
+#define clk_mclk3_clk_src			0x2bfbb714
+#define clk_csiphy_clk_src			0x8cceb70a
+#define clk_csi0phytimer_clk_src		0xc8a309be
+#define clk_csi1phytimer_clk_src		0x7c0fe23a
+#define clk_csi2phytimer_clk_src		0x62ffea9c
+#define clk_ext_byte0_clk_src			0xfb32f31e
+#define clk_ext_byte1_clk_src			0x585ef6d4
+#define clk_byte0_clk_src			0x75cc885b
+#define clk_byte1_clk_src			0x63c2c955
+#define clk_dp_aux_clk_src			0x2b6e972b
+#define clk_dp_gtc_clk_src			0xc5a86a42
+#define clk_esc0_clk_src			0xb41d7c38
+#define clk_esc1_clk_src			0x3b0afa42
+#define clk_extpclk_clk_src			0xb2c31abd
+#define clk_hdmi_clk_src			0xb40aeea9
+#define clk_vsync_clk_src			0xecb43940
+#define clk_mmss_bimc_smmu_ahb_clk		0x4825baf4
+#define clk_mmss_bimc_smmu_axi_clk		0xc365ac39
+#define clk_mmss_snoc_dvm_axi_clk		0x2c159a11
+#define clk_mmss_camss_ahb_clk			0xa51f2c1d
+#define clk_mmss_camss_cci_ahb_clk		0xfda8bb6a
+#define clk_mmss_camss_cci_clk			0x71bb5c97
+#define clk_mmss_camss_cpp_ahb_clk		0xd5554f15
+#define clk_mmss_camss_cpp_clk			0x8e99ef57
+#define clk_mmss_camss_cpp_axi_clk		0xd84e390b
+#define clk_mmss_camss_cpp_vbif_ahb_clk		0x1b33a88e
+#define clk_mmss_camss_cphy_csid0_clk		0x56114361
+#define clk_mmss_camss_csi0_ahb_clk		0x2b58d241
+#define clk_mmss_camss_csi0_clk			0xccfe39ef
+#define clk_mmss_camss_csi0pix_clk		0x9e26509d
+#define clk_mmss_camss_csi0rdi_clk		0x01d5bf83
+#define clk_mmss_camss_cphy_csid1_clk		0x79fbcd8a
+#define clk_mmss_camss_csi1_ahb_clk		0x7073244b
+#define clk_mmss_camss_csi1_clk			0x3eeeaac0
+#define clk_mmss_camss_csi1pix_clk		0xf1375139
+#define clk_mmss_camss_csi1rdi_clk		0x43185024
+#define clk_mmss_camss_cphy_csid2_clk		0xf295e3ef
+#define clk_mmss_camss_csi2_ahb_clk		0x681c1479
+#define clk_mmss_camss_csi2_clk			0x94524569
+#define clk_mmss_camss_csi2pix_clk		0xf4de617d
+#define clk_mmss_camss_csi2rdi_clk		0x4bf01dc5
+#define clk_mmss_camss_cphy_csid3_clk		0x100188e9
+#define clk_mmss_camss_csi3_ahb_clk		0xfae7c29b
+#define clk_mmss_camss_csi3_clk			0x55e4bbae
+#define clk_mmss_camss_csi3pix_clk		0xc166a015
+#define clk_mmss_camss_csi3rdi_clk		0x6983a4cd
+#define clk_mmss_camss_csi_vfe0_clk		0x3b30b798
+#define clk_mmss_camss_csi_vfe1_clk		0xfe729af7
+#define clk_mmss_camss_csiphy0_clk		0x96c81af8
+#define clk_mmss_camss_csiphy1_clk		0xee9ac2bb
+#define clk_mmss_camss_csiphy2_clk		0x3365e70e
+#define clk_mmss_fd_ahb_clk			0x4ff1da4d
+#define clk_mmss_fd_core_clk			0x749e7eb0
+#define clk_mmss_fd_core_uar_clk		0x8ea480c5
+#define clk_mmss_camss_gp0_clk			0x3f7f6c87
+#define clk_mmss_camss_gp1_clk			0xdccdd730
+#define clk_mmss_camss_ispif_ahb_clk		0xbda4f0e3
+#define clk_mmss_camss_jpeg0_clk		0x4cc73b07
+#define clk_mmss_camss_jpeg0_vote_clk		0xc9efa6ac
+#define clk_mmss_camss_jpeg0_dma_vote_clk	0x371ec109
+#define clk_mmss_camss_jpeg_ahb_clk		0xde1fece3
+#define clk_mmss_camss_jpeg_axi_clk		0x7534616b
+#define clk_mmss_camss_mclk0_clk		0x056293a7
+#define clk_mmss_camss_mclk1_clk		0x96c7b69b
+#define clk_mmss_camss_mclk2_clk		0x8820556e
+#define clk_mmss_camss_mclk3_clk		0xf90ffb67
+#define clk_mmss_camss_micro_ahb_clk		0x6c6fd3c7
+#define clk_mmss_camss_csi0phytimer_clk		0x7a78864e
+#define clk_mmss_camss_csi1phytimer_clk		0x6e6c1de5
+#define clk_mmss_camss_csi2phytimer_clk		0x0235e2de
+#define clk_mmss_camss_top_ahb_clk		0x120618d6
+#define clk_mmss_camss_vfe0_ahb_clk		0x137bd0bd
+#define clk_mmss_camss_vfe0_clk			0xead28288
+#define clk_mmss_camss_vfe0_stream_clk		0xa0428287
+#define clk_mmss_camss_vfe1_ahb_clk		0xac0154c0
+#define clk_mmss_camss_vfe1_clk			0xc216b14d
+#define clk_mmss_camss_vfe1_stream_clk		0x745af3b6
+#define clk_mmss_camss_vfe_vbif_ahb_clk		0x0109a9c6
+#define clk_mmss_camss_vfe_vbif_axi_clk		0xe626d8a1
+#define clk_mmss_mdss_ahb_clk			0x85d37ab5
+#define clk_mmss_mdss_axi_clk			0xdf04fc1d
+#define clk_mmss_mdss_byte0_clk			0x38105d25
+#define clk_mmss_mdss_byte0_intf_clk		0x38e5aa79
+#define clk_mmss_mdss_byte0_intf_div_clk	0x8604f181
+#define clk_mmss_mdss_byte1_clk			0xe0c21354
+#define clk_mmss_mdss_byte1_intf_clk		0xcf654d8e
+#define clk_mmss_mdss_byte1_intf_div_clk	0xcdf334c5
+#define clk_mmss_mdss_dp_aux_clk		0x23125eb6
+#define clk_mmss_mdss_dp_crypto_clk		0x9a072d4e
+#define clk_mmss_mdss_dp_link_clk		0x8dd302d1
+#define clk_mmss_mdss_dp_link_intf_clk		0x70e386e6
+#define clk_mmss_mdss_dp_pixel_clk		0xb707b765
+#define clk_mmss_mdss_dp_gtc_clk		0xb59c151a
+#define clk_mmss_mdss_esc0_clk			0x5721ff83
+#define clk_mmss_mdss_esc1_clk			0xc3d0376b
+#define clk_mmss_mdss_extpclk_clk		0x74d5a954
+#define clk_mmss_mdss_hdmi_clk			0x28460a6d
+#define clk_mmss_mdss_hdmi_dp_ahb_clk		0x5448519f
+#define clk_mmss_mdss_mdp_clk			0x43539b0e
+#define clk_mmss_mdss_mdp_lut_clk		0x00627b2b
+#define clk_mmss_mdss_pclk0_clk			0xcc0e909d
+#define clk_mmss_mdss_pclk1_clk			0x850d9146
+#define clk_mmss_mdss_rot_clk			0xbb7e71c4
+#define clk_mmss_mdss_vsync_clk			0x629b36dc
+#define clk_mmss_misc_ahb_clk			0xea30b0e7
+#define clk_mmss_misc_cxo_clk			0xe620cd80
+#define clk_mmss_mnoc_ahb_clk			0x49a394f4
+#define clk_mmss_mnoc_maxi_clk			0xd8b7278f
+#define clk_mmss_video_subcore0_clk		0x23fae359
+#define clk_mmss_video_subcore1_clk		0x5213a0c7
+#define clk_mmss_video_ahb_clk			0x94334ae9
+#define clk_mmss_video_axi_clk			0xf3178ba5
+#define clk_mmss_video_core_clk			0x78f14c85
+#define clk_mmss_video_maxi_clk			0x1785ef88
+#define clk_mmss_vmem_ahb_clk			0x4b18955b
+#define clk_mmss_vmem_maxi_clk			0xb6067889
+#define clk_mmss_debug_mux			0xe646ffda
+
+/* external multimedia clocks */
+#define clk_dsi0pll_byteclk_mux			0xecf2c434
+#define clk_dsi0pll_byteclk_src			0x6f6f740f
+#define clk_dsi0pll_pclk_mux			0x6c9da335
+#define clk_dsi0pll_pclk_src			0x5efd85d4
+#define clk_dsi0pll_pclk_src_mux		0x84b14663
+#define clk_dsi0pll_post_bit_div		0xf46dcf27
+#define clk_dsi0pll_pll_out_div1		0xeda5b7fe
+#define clk_dsi0pll_pll_out_div2		0x97fa476d
+#define clk_dsi0pll_pll_out_div4		0x90a98ce0
+#define clk_dsi0pll_pll_out_div8		0x9d9d85cf
+#define clk_dsi0pll_pll_out_mux			0x179c27ca
+#define clk_dsi0pll_post_vco_mux		0xfaf9bd1f
+#define clk_dsi0pll_post_vco_div1		0xabb50b2a
+#define clk_dsi0pll_post_vco_div4		0xbe51c091
+#define clk_dsi0pll_bitclk_src			0x36c3c437
+#define clk_dsi0pll_vco_clk			0x15940d40
+
+#define clk_dsi1pll_byteclk_mux			0x14e2f38f
+#define clk_dsi1pll_byteclk_src			0x4b65c298
+#define clk_dsi1pll_pclk_mux			0x4c0518b5
+#define clk_dsi1pll_pclk_src			0xeddcd80e
+#define clk_dsi1pll_pclk_src_mux		0x3651feb3
+#define clk_dsi1pll_post_bit_div		0x712f0260
+#define clk_dsi1pll_pll_out_div8		0x87628ddb
+#define clk_dsi1pll_pll_out_div4		0x0d9a384b
+#define clk_dsi1pll_pll_out_div2		0x0c9b5748
+#define clk_dsi1pll_pll_out_div1		0x3193164e
+#define clk_dsi1pll_pll_out_mux			0x171bf8fd
+#define clk_dsi1pll_post_vco_mux		0xc6a90d20
+#define clk_dsi1pll_post_vco_div1		0x6f47ca7d
+#define clk_dsi1pll_post_vco_div4		0x90628974
+#define clk_dsi1pll_bitclk_src			0x13ab045b
+#define clk_dsi1pll_vco_clk			0x99797b50
+
+#define clk_dp_vco_clk				0xfcaaeec7
+#define clk_dp_link_2x_clk_divsel_five		0xcfe3f5dd
+#define clk_vco_divsel_four_clk_src		0xe0da19c0
+#define clk_vco_divsel_two_clk_src		0xb5cfc6a8
+#define clk_vco_divided_clk_src_mux		0x3f8197c2
+#define clk_hdmi_vco_clk			0xbb7dc20d
+
+/* clock_gpu controlled clocks*/
+#define clk_gpucc_xo				0xc4e1a890
+#define clk_gpucc_gpll0				0x0db0e37f
+#define clk_gfx3d_clk_src			0x917f76ef
+#define clk_rbbmtimer_clk_src			0x17649ecc
+#define clk_gfx3d_isense_clk_src		0xecc3eafa
+#define clk_rbcpr_clk_src			0x2c2e9af2
+#define clk_gpu_debug_div_clk			0x75d6f53f
+#define clk_gpucc_gfx3d_clk			0x95f01bd5
+#define clk_gpucc_rbbmtimer_clk			0x58a0a7ca
+#define clk_gpucc_gfx3d_isense_clk		0xb2678e80
+#define clk_gpucc_cxo_clk			0x6532dcae
+#define clk_gpucc_rbcpr_clk			0x7bd750e8
+#define clk_gpu_pll0_pll			0x0e61ab4d
+#define clk_gpu_pll0_pll_out_even		0xb0ed5009
+#define clk_gpu_pll0_pll_out_odd		0x08c5a8a5
+#define clk_gpu_pll0_postdiv_clk		0x76c19f3c
+#define clk_gpucc_mx_clk			0x1edbb879
+#define clk_gpucc_gcc_dbg_clk			0x9ae8cd3c
+#define clk_gfxcc_dbg_clk			0x3ed47625
+
+/* CPU clocks */
+#define clk_pwrcl_clk				0xc554130e
+#define clk_perfcl_clk				0x58869997
+#define clk_sys_apcsaux_clk_gcc			0xf905e862
+#define clk_xo_ao				0x428c856d
+#define clk_osm_clk_src				0xaabe68c3
+#define clk_cpu_debug_mux			0x3ae8bcb2
+
+/* Audio External Clocks */
+#define clk_audio_ap_clk			0x9b5727cb
+#define clk_audio_pmi_clk			0xcbfe416d
+#define clk_audio_ap_clk2			0x454d1e91
+
+/* GCC block resets */
+#define QUSB2PHY_PRIM_BCR			0
+#define QUSB2PHY_SEC_BCR			1
+#define BLSP1_BCR				2
+#define BLSP2_BCR				3
+#define BOOT_ROM_BCR				4
+#define PRNG_BCR				5
+#define UFS_BCR					6
+#define USB_30_BCR				7
+#define USB3_PHY_BCR				8
+#define USB3PHY_PHY_BCR				9
+#define PCIE_0_PHY_BCR				10
+#define PCIE_PHY_BCR				11
+#define PCIE_PHY_COM_BCR			12
+#define PCIE_PHY_NOCSR_COM_PHY_BCR		13
+
+/* MMSS block resets */
+#define CAMSS_MICRO_BCR				0
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/powerpc/boot/dts/include/dt-bindings/clock/msm-clocks-hwio-8998.h	2019-01-22 16:16:28.171288678 +0100
@@ -0,0 +1,395 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define CLKFLAG_NO_RATE_CACHE					0x00004000
+
+#define FMAX_LOWER						0
+#define FMAX_LOW						1
+#define FMAX_NOM						2
+#define FMAX_TURBO						3
+
+#define HALT_CHECK_DELAY					5
+
+#define RPM_MISC_CLK_TYPE					0x306b6c63
+#define RPM_BUS_CLK_TYPE					0x316b6c63
+#define RPM_MEM_CLK_TYPE					0x326b6c63
+#define RPM_IPA_CLK_TYPE					0x617069
+#define RPM_CE_CLK_TYPE						0x6563
+#define RPM_AGGR_CLK_TYPE					0x72676761
+#define RPM_SMD_KEY_ENABLE					0x62616E45
+#define RPM_MMAXI_CLK_TYPE					0x69786d6d
+
+#define CXO_CLK_SRC_ID						0x0
+#define QDSS_CLK_ID						0x1
+#define SNOC_CLK_ID						0x1
+#define CNOC_CLK_ID						0x2
+#define CNOC_PERIPH_CLK_ID					0x0
+#define BIMC_CLK_ID						0x0
+#define IPA_CLK_ID						0x0
+#define CE1_CLK_ID						0x0
+#define RF_CLK1_ID						0x4
+#define RF_CLK2_ID						0x5
+#define RF_CLK3_ID						0x6
+#define LN_BB_CLK1_ID						0x1
+#define LN_BB_CLK2_ID						0x2
+#define LN_BB_CLK3_ID						0x3
+#define DIV_CLK1_ID						0xb
+#define DIV_CLK2_ID						0xc
+#define DIV_CLK3_ID						0xd
+#define RF_CLK1_PIN_ID						0x4
+#define RF_CLK2_PIN_ID						0x5
+#define RF_CLK3_PIN_ID						0x6
+#define LN_BB_CLK1_PIN_ID					0x1
+#define LN_BB_CLK2_PIN_ID					0x2
+#define LN_BB_CLK3_PIN_ID					0x3
+#define AGGR1_NOC_ID						0x1
+#define AGGR2_NOC_ID						0x2
+#define MMSSNOC_AXI_CLK_ID					0x0
+
+#define APCS_COMMON_LMH_CMD_RCGR				0x0012C
+
+#define GCC_APCS_GPLL_ENA_VOTE					0x52000
+#define GCC_APCS_CLOCK_BRANCH_ENA_VOTE				0x52004
+#define GCC_APCS_CLOCK_BRANCH_ENA_VOTE_1			0x5200C
+#define PLLTEST_PAD_CFG						0x6200C
+#define GCC_XO_DIV4_CBCR				        0x43008
+#define CLOCK_FRQ_MEASURE_CTL				        0x62004
+#define CLOCK_FRQ_MEASURE_STATUS			        0x62008
+#define GCC_GPLL0_MODE						0x00000
+#define GCC_GPLL4_MODE						0x77000
+#define GCC_USB30_MASTER_CMD_RCGR				0x0F014
+#define GCC_PCIE_AUX_CMD_RCGR					0x6C000
+#define GCC_UFS_AXI_CMD_RCGR					0x75018
+#define GCC_BLSP1_QUP1_I2C_APPS_CMD_RCGR			0x19020
+#define GCC_BLSP1_QUP1_SPI_APPS_CMD_RCGR			0x1900C
+#define GCC_BLSP1_QUP2_I2C_APPS_CMD_RCGR			0x1B020
+#define GCC_BLSP1_QUP2_SPI_APPS_CMD_RCGR			0x1B00C
+#define GCC_BLSP1_QUP3_I2C_APPS_CMD_RCGR			0x1D020
+#define GCC_BLSP1_QUP3_SPI_APPS_CMD_RCGR			0x1D00C
+#define GCC_BLSP1_QUP4_I2C_APPS_CMD_RCGR			0x1F020
+#define GCC_BLSP1_QUP4_SPI_APPS_CMD_RCGR			0x1F00C
+#define GCC_BLSP1_QUP5_I2C_APPS_CMD_RCGR			0x21020
+#define GCC_BLSP1_QUP5_SPI_APPS_CMD_RCGR			0x2100C
+#define GCC_BLSP1_QUP6_I2C_APPS_CMD_RCGR			0x23020
+#define GCC_BLSP1_QUP6_SPI_APPS_CMD_RCGR			0x2300C
+#define GCC_BLSP1_UART1_APPS_CMD_RCGR				0x1A00C
+#define GCC_BLSP1_UART2_APPS_CMD_RCGR				0x1C00C
+#define GCC_BLSP1_UART3_APPS_CMD_RCGR				0x1E00C
+#define GCC_BLSP2_QUP1_I2C_APPS_CMD_RCGR			0x26020
+#define GCC_BLSP2_QUP2_I2C_APPS_CMD_RCGR			0x28020
+#define GCC_BLSP2_QUP3_I2C_APPS_CMD_RCGR			0x2A020
+#define GCC_BLSP2_QUP4_I2C_APPS_CMD_RCGR			0x2C020
+#define GCC_BLSP2_QUP5_I2C_APPS_CMD_RCGR			0x2E020
+#define GCC_BLSP2_QUP6_I2C_APPS_CMD_RCGR			0x30020
+#define GCC_BLSP2_QUP1_SPI_APPS_CMD_RCGR			0x2600C
+#define GCC_BLSP2_QUP2_SPI_APPS_CMD_RCGR			0x2800C
+#define GCC_BLSP2_QUP3_SPI_APPS_CMD_RCGR			0x2A00C
+#define GCC_BLSP2_QUP4_SPI_APPS_CMD_RCGR			0x2C00C
+#define GCC_BLSP2_QUP5_SPI_APPS_CMD_RCGR			0x2E00C
+#define GCC_BLSP2_QUP6_SPI_APPS_CMD_RCGR			0x3000C
+#define GCC_BLSP2_UART1_APPS_CMD_RCGR				0x2700C
+#define GCC_BLSP2_UART2_APPS_CMD_RCGR				0x2900C
+#define GCC_BLSP2_UART3_APPS_CMD_RCGR				0x2B00C
+#define GCC_GP1_CMD_RCGR					0x64004
+#define GCC_GP2_CMD_RCGR					0x65004
+#define GCC_GP3_CMD_RCGR					0x66004
+#define GCC_HMSS_RBCPR_CMD_RCGR					0x48044
+#define GCC_PDM2_CMD_RCGR					0x33010
+#define GCC_SDCC2_APPS_CMD_RCGR					0x14010
+#define GCC_SDCC4_APPS_CMD_RCGR					0x16010
+#define GCC_TSIF_REF_CMD_RCGR					0x36010
+#define GCC_UFS_ICE_CORE_CMD_RCGR				0x76010
+#define GCC_UFS_PHY_AUX_CMD_RCGR				0x76044
+#define GCC_UFS_UNIPRO_CORE_CMD_RCGR				0x76028
+#define GCC_USB30_MOCK_UTMI_CMD_RCGR				0x0F028
+#define GCC_USB3_PHY_AUX_CMD_RCGR				0x5000C
+#define GCC_QSPI_REF_CMD_RCGR					0x9000C
+#define GCC_PCIE_0_PHY_BCR					0x6C01C
+#define GCC_HDMI_CLKREF_EN					0x88000
+#define GCC_UFS_CLKREF_EN					0x88004
+#define GCC_USB3_CLKREF_EN					0x88008
+#define GCC_PCIE_CLKREF_EN					0x8800C
+#define GCC_RX1_USB2_CLKREF_EN					0x88014
+#define GCC_USB3_PHY_BCR					0x50020
+#define GCC_AGGRE1_NOC_XO_CBCR					0x8202C
+#define GCC_AGGRE1_UFS_AXI_CBCR					0x82028
+#define GCC_AGGRE1_USB3_AXI_CBCR				0x82024
+#define GCC_BIMC_MSS_Q6_AXI_CBCR				0x4401C
+#define GCC_BLSP1_AHB_CBCR					0x17004
+#define GCC_BLSP1_BCR						0x17000
+#define GCC_BLSP1_QUP1_SPI_APPS_CBCR				0x19004
+#define GCC_BLSP1_QUP1_I2C_APPS_CBCR				0x19008
+#define GCC_BLSP1_QUP2_SPI_APPS_CBCR				0x1B004
+#define GCC_BLSP1_QUP2_I2C_APPS_CBCR				0x1B008
+#define GCC_BLSP1_QUP3_SPI_APPS_CBCR				0x1D004
+#define GCC_BLSP1_QUP3_I2C_APPS_CBCR				0x1D008
+#define GCC_BLSP1_QUP4_SPI_APPS_CBCR				0x1F004
+#define GCC_BLSP1_QUP4_I2C_APPS_CBCR				0x1F008
+#define GCC_BLSP1_QUP5_SPI_APPS_CBCR				0x21004
+#define GCC_BLSP1_QUP5_I2C_APPS_CBCR				0x21008
+#define GCC_BLSP1_QUP6_SPI_APPS_CBCR				0x23004
+#define GCC_BLSP1_QUP6_I2C_APPS_CBCR				0x23008
+#define GCC_BLSP1_UART1_APPS_CBCR				0x1A004
+#define GCC_BLSP1_UART2_APPS_CBCR				0x1C004
+#define GCC_BLSP1_UART3_APPS_CBCR				0x1E004
+#define GCC_BLSP2_AHB_CBCR					0x25004
+#define GCC_BLSP2_BCR						0x25000
+#define GCC_BLSP2_QUP1_SPI_APPS_CBCR				0x26004
+#define GCC_BLSP2_QUP1_I2C_APPS_CBCR				0x26008
+#define GCC_BLSP2_QUP2_I2C_APPS_CBCR				0x28008
+#define GCC_BLSP2_QUP2_SPI_APPS_CBCR				0x28004
+#define GCC_BLSP2_QUP3_SPI_APPS_CBCR				0x2A004
+#define GCC_BLSP2_QUP3_I2C_APPS_CBCR				0x2A008
+#define GCC_BLSP2_QUP4_SPI_APPS_CBCR				0x2C004
+#define GCC_BLSP2_QUP4_I2C_APPS_CBCR				0x2C008
+#define GCC_BLSP2_QUP5_SPI_APPS_CBCR				0x2E004
+#define GCC_BLSP2_QUP5_I2C_APPS_CBCR				0x2E008
+#define GCC_BLSP2_QUP6_SPI_APPS_CBCR				0x30004
+#define GCC_BLSP2_QUP6_I2C_APPS_CBCR				0x30008
+#define GCC_BLSP2_UART1_APPS_CBCR				0x27004
+#define GCC_BLSP2_UART2_APPS_CBCR				0x29004
+#define GCC_BLSP2_UART3_APPS_CBCR				0x2B004
+#define GCC_BOOT_ROM_AHB_CBCR					0x38004
+#define GCC_BOOT_ROM_BCR					0x38000
+#define GCC_CFG_NOC_USB3_AXI_CBCR				0x05018
+#define GCC_BIMC_GFX_CBCR					0x46040
+#define GCC_GP1_CBCR						0x64000
+#define GCC_GP2_CBCR						0x65000
+#define GCC_GP3_CBCR						0x66000
+#define GCC_GPU_BIMC_GFX_CBCR					0x71010
+#define GCC_GPU_CFG_AHB_CBCR					0x71004
+#define GCC_GPU_IREF_EN						0x88010
+#define GCC_HMSS_DVM_BUS_CBCR					0x4808C
+#define GCC_HMSS_RBCPR_CBCR					0x48008
+#define GCC_MMSS_SYS_NOC_AXI_CBCR				0x09000
+#define GCC_MMSS_NOC_CFG_AHB_CBCR				0x09004
+#define GCC_PCIE_0_SLV_AXI_CBCR					0x6B008
+#define GCC_PCIE_0_MSTR_AXI_CBCR				0x6B00C
+#define GCC_PCIE_0_CFG_AHB_CBCR					0x6B010
+#define GCC_PCIE_0_AUX_CBCR					0x6B014
+#define GCC_PCIE_0_PIPE_CBCR					0x6B018
+#define GCC_PCIE_PHY_AUX_CBCR					0x6F004
+#define GCC_PCIE_PHY_BCR					0x6F000
+#define GCC_PCIE_PHY_COM_BCR					0x6F014
+#define GCC_PCIE_PHY_NOCSR_COM_PHY_BCR				0x6F00C
+#define GCC_QUSB2PHY_PRIM_BCR					0x12000
+#define GCC_QUSB2PHY_SEC_BCR					0x12004
+#define GCC_PDM2_CBCR						0x3300C
+#define GCC_PDM_AHB_CBCR					0x33004
+#define GCC_PRNG_AHB_CBCR					0x34004
+#define GCC_PRNG_BCR						0x34000
+#define GCC_SDCC2_APPS_CBCR					0x14004
+#define GCC_SDCC2_AHB_CBCR					0x14008
+#define GCC_SDCC4_APPS_CBCR					0x16004
+#define GCC_SDCC4_AHB_CBCR					0x16008
+#define GCC_TSIF_AHB_CBCR					0x36004
+#define GCC_TSIF_REF_CBCR					0x36008
+#define GCC_UFS_AXI_CBCR					0x75008
+#define GCC_UFS_BCR						0x75000
+#define GCC_UFS_AHB_CBCR					0x7500C
+#define GCC_UFS_TX_SYMBOL_0_CBCR				0x75010
+#define GCC_UFS_RX_SYMBOL_0_CBCR				0x75014
+#define GCC_UFS_RX_SYMBOL_1_CBCR				0x7605C
+#define GCC_UFS_UNIPRO_CORE_CBCR				0x76008
+#define GCC_UFS_ICE_CORE_CBCR					0x7600C
+#define GCC_UFS_PHY_AUX_CBCR					0x76040
+#define GCC_USB30_MASTER_CBCR					0x0F008
+#define GCC_USB30_SLEEP_CBCR					0x0F00C
+#define GCC_USB30_MOCK_UTMI_CBCR				0x0F010
+#define GCC_USB_30_BCR						0x0F000
+#define GCC_USB3_PHY_AUX_CBCR					0x50000
+#define GCC_USB3_PHY_PIPE_CBCR					0x50004
+#define GCC_USB3PHY_PHY_BCR					0x50024
+#define GCC_APCS_CLOCK_SLEEP_ENA_VOTE				0x52008
+#define GCC_MSS_CFG_AHB_CBCR					0x8A000
+#define GCC_MSS_Q6_BIMC_AXI_CBCR				0x8A040
+#define GCC_MSS_MNOC_BIMC_AXI_CBCR				0x8A004
+#define GCC_MSS_SNOC_AXI_CBCR					0x8A03C
+#define GCC_HMSS_GPLL0_CMD_RCGR					0x4805C
+#define GCC_MSS_CFG_AHB_CBCR					0x8A000
+#define GCC_MSS_Q6_BIMC_AXI_CBCR				0x8A040
+#define GCC_MSS_MNOC_BIMC_AXI_CBCR				0x8A004
+#define GCC_MSS_SNOC_AXI_CBCR					0x8A03C
+#define GCC_DCC_AHB_CBCR					0x84004
+#define GCC_HLOS1_VOTE_LPASS_CORE_SMMU_CBCR			0x7D010
+#define GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CBCR			0x7D014
+#define GCC_QSPI_AHB_CBCR					0x90004
+#define GCC_QSPI_REF_CBCR					0x90008
+#define GCC_MMSS_MISC						0x0902C
+#define GCC_GPU_MISC						0x71028
+
+#define GPUCC_GPU_PLL0_PLL_MODE					0x00000
+#define GPUCC_GPU_PLL0_USER_CTL_MODE				0x0000C
+#define GPUCC_GFX3D_CMD_RCGR					0x01070
+#define GPUCC_RBBMTIMER_CMD_RCGR				0x010B0
+#define GPUCC_GFX3D_ISENSE_CMD_RCGR				0x01100
+#define GPUCC_RBCPR_CMD_RCGR					0x01030
+#define GPUCC_GFX3D_CBCR					0x01098
+#define GPUCC_RBBMTIMER_CBCR					0x010D0
+#define GPUCC_GFX3D_ISENSE_CBCR					0x01124
+#define GPUCC_CXO_CBCR						0x01020
+#define GPUCC_RBCPR_CBCR					0x01054
+#define GPU_GX_BCR						0x01090
+#define GPUCC_GX_DOMAIN_MISC					0x00130
+#define GPUCC_GPU_DD_WRAP_CTRL					0x00430
+#define GPUCC_DEBUG_CLK_CTL					0x00120
+
+#define MMSS_PLL_VOTE_APCS					0x001E0
+#define MMSS_MMPLL0_PLL_MODE					0x0C000
+#define MMSS_MMPLL1_PLL_MODE					0x0C050
+#define MMSS_MMPLL3_PLL_MODE					0x00000
+#define MMSS_MMPLL4_PLL_MODE					0x00050
+#define MMSS_MMPLL5_PLL_MODE					0x000A0
+#define MMSS_MMPLL6_PLL_MODE					0x000F0
+#define MMSS_MMPLL7_PLL_MODE					0x00140
+#define MMSS_MMPLL10_PLL_MODE					0x00190
+#define MMSS_AHB_CMD_RCGR					0x05000
+#define MMSS_CSI0_CMD_RCGR					0x03090
+#define MMSS_VFE0_CMD_RCGR					0x03600
+#define MMSS_VFE1_CMD_RCGR					0x03620
+#define MMSS_MDP_CMD_RCGR					0x02040
+#define MMSS_MAXI_CMD_RCGR					0x0F020
+#define MMSS_CPP_CMD_RCGR					0x03640
+#define MMSS_JPEG0_CMD_RCGR					0x03500
+#define MMSS_ROT_CMD_RCGR					0x021A0
+#define MMSS_VIDEO_CORE_CMD_RCGR				0x01000
+#define MMSS_CSI1_CMD_RCGR					0x03100
+#define MMSS_CSI2_CMD_RCGR					0x03160
+#define MMSS_CSI3_CMD_RCGR					0x031C0
+#define MMSS_FD_CORE_CMD_RCGR					0x03B00
+#define MMSS_BYTE0_CMD_RCGR					0x02120
+#define MMSS_BYTE1_CMD_RCGR					0x02140
+#define MMSS_PCLK0_CMD_RCGR					0x02000
+#define MMSS_PCLK1_CMD_RCGR					0x02020
+#define MMSS_VIDEO_SUBCORE0_CMD_RCGR				0x01060
+#define MMSS_VIDEO_SUBCORE1_CMD_RCGR				0x01080
+#define MMSS_CSIPHY_CMD_RCGR					0x03800
+#define MMSS_CCI_CMD_RCGR					0x03300
+#define MMSS_CAMSS_GP0_CMD_RCGR					0x03420
+#define MMSS_CAMSS_GP1_CMD_RCGR					0x03450
+#define MMSS_MCLK0_CMD_RCGR					0x03360
+#define MMSS_MCLK1_CMD_RCGR					0x03390
+#define MMSS_MCLK2_CMD_RCGR					0x033C0
+#define MMSS_MCLK3_CMD_RCGR					0x033F0
+#define MMSS_CAMSS_CSI2PHYTIMER_CBCR				0x03084
+#define MMSS_CSI0PHYTIMER_CMD_RCGR				0x03000
+#define MMSS_CSI1PHYTIMER_CMD_RCGR				0x03030
+#define MMSS_CSI2PHYTIMER_CMD_RCGR				0x03060
+#define MMSS_DP_GTC_CMD_RCGR					0x02280
+#define MMSS_ESC0_CMD_RCGR					0x02160
+#define MMSS_ESC1_CMD_RCGR					0x02180
+#define MMSS_EXTPCLK_CMD_RCGR					0x02060
+#define MMSS_HDMI_CMD_RCGR					0x02100
+#define MMSS_VSYNC_CMD_RCGR					0x02080
+#define MMSS_BIMC_SMMU_AHB_CBCR					0x0E004
+#define MMSS_BIMC_SMMU_AXI_CBCR					0x0E008
+#define MMSS_SNOC_DVM_AXI_CBCR					0x0E040
+#define MMSS_CAMSS_AHB_CBCR					0x0348C
+#define MMSS_CAMSS_CCI_AHB_CBCR					0x03348
+#define MMSS_CAMSS_CCI_CBCR					0x03344
+#define MMSS_CAMSS_CPP_AHB_CBCR					0x036B4
+#define MMSS_CAMSS_CPP_CBCR					0x036B0
+#define MMSS_CAMSS_CPP_AXI_CBCR					0x036C4
+#define MMSS_CAMSS_CPP_VBIF_AHB_CBCR				0x036C8
+#define MMSS_CAMSS_CPHY_CSID0_CBCR				0x03730
+#define MMSS_CAMSS_CSI0_AHB_CBCR				0x030BC
+#define MMSS_CAMSS_CSI0_CBCR					0x030B4
+#define MMSS_CAMSS_CSI0PIX_CBCR					0x030E4
+#define MMSS_CAMSS_CSI0RDI_CBCR					0x030D4
+#define MMSS_CAMSS_CPHY_CSID1_CBCR				0x03734
+#define MMSS_CAMSS_CSI1_AHB_CBCR				0x03128
+#define MMSS_CAMSS_CSI1_CBCR					0x03124
+#define MMSS_CAMSS_CSI1PIX_CBCR					0x03154
+#define MMSS_CAMSS_CSI1RDI_CBCR					0x03144
+#define MMSS_CAMSS_CPHY_CSID2_CBCR				0x03738
+#define MMSS_CAMSS_CSI2_AHB_CBCR				0x03188
+#define MMSS_CAMSS_CSI2_CBCR					0x03184
+#define MMSS_CAMSS_CSI2PIX_CBCR					0x031B4
+#define MMSS_CAMSS_CSI2RDI_CBCR					0x031A4
+#define MMSS_CAMSS_CPHY_CSID3_CBCR				0x0373C
+#define MMSS_CAMSS_CSI3_AHB_CBCR				0x031E8
+#define MMSS_CAMSS_CSI3_CBCR					0x031E4
+#define MMSS_CAMSS_CSI3PIX_CBCR					0x03214
+#define MMSS_CAMSS_CSI3RDI_CBCR					0x03204
+#define MMSS_CAMSS_CSI_VFE0_CBCR				0x03704
+#define MMSS_CAMSS_CSI_VFE1_CBCR				0x03714
+#define MMSS_CAMSS_CSIPHY0_CBCR					0x03740
+#define MMSS_CAMSS_CSIPHY1_CBCR					0x03744
+#define MMSS_CAMSS_CSIPHY2_CBCR					0x03748
+#define MMSS_FD_AHB_CBCR					0x03B74
+#define MMSS_FD_CORE_CBCR					0x03B68
+#define MMSS_FD_CORE_UAR_CBCR					0x03B6C
+#define MMSS_CAMSS_GP0_CBCR					0x03444
+#define MMSS_CAMSS_GP1_CBCR					0x03474
+#define MMSS_CAMSS_ISPIF_AHB_CBCR				0x03224
+#define MMSS_CAMSS_JPEG0_CBCR					0x035A8
+#define MMSS_CAMSS_JPEG_AHB_CBCR				0x035B4
+#define MMSS_CAMSS_JPEG_AXI_CBCR				0x035B8
+#define MMSS_CAMSS_MCLK0_CBCR					0x03384
+#define MMSS_CAMSS_MCLK1_CBCR					0x033B4
+#define MMSS_CAMSS_MCLK2_CBCR					0x033E4
+#define MMSS_CAMSS_MCLK3_CBCR					0x03414
+#define MMSS_CAMSS_MICRO_AHB_CBCR				0x03494
+#define MMSS_CAMSS_CSI0PHYTIMER_CBCR				0x03024
+#define MMSS_CAMSS_CSI1PHYTIMER_CBCR				0x03054
+#define MMSS_CSI2PHYTIMER_CMD_RCGR				0x03060
+#define MMSS_CAMSS_TOP_AHB_CBCR					0x03484
+#define MMSS_CAMSS_VFE0_AHB_CBCR				0x03668
+#define MMSS_CAMSS_VFE0_CBCR					0x036A8
+#define MMSS_CAMSS_VFE0_STREAM_CBCR				0x03720
+#define MMSS_CAMSS_VFE1_AHB_CBCR				0x03678
+#define MMSS_CAMSS_VFE1_CBCR					0x036AC
+#define MMSS_CAMSS_VFE1_STREAM_CBCR				0x03724
+#define MMSS_CAMSS_VFE_VBIF_AHB_CBCR				0x036B8
+#define MMSS_CAMSS_VFE_VBIF_AXI_CBCR				0x036BC
+#define MMSS_MDSS_AHB_CBCR					0x02308
+#define MMSS_MDSS_AXI_CBCR					0x02310
+#define MMSS_MDSS_BYTE0_CBCR					0x0233C
+#define MMSS_MDSS_BYTE0_INTF_CBCR				0x02374
+#define MMSS_MDSS_BYTE0_INTF_DIV				0x0237C
+#define MMSS_MDSS_BYTE1_CBCR					0x02340
+#define MMSS_MDSS_BYTE1_INTF_CBCR				0x02378
+#define MMSS_MDSS_BYTE1_INTF_DIV				0x02380
+#define MMSS_MDSS_DP_AUX_CBCR					0x02364
+#define MMSS_MDSS_DP_CRYPTO_CBCR				0x0235C
+#define MMSS_MDSS_DP_GTC_CBCR					0x02368
+#define MMSS_MDSS_DP_LINK_CBCR					0x02354
+#define MMSS_MDSS_DP_LINK_INTF_CBCR				0x02358
+#define MMSS_MDSS_DP_PIXEL_CBCR					0x02360
+#define MMSS_MDSS_ESC0_CBCR					0x02344
+#define MMSS_MDSS_ESC1_CBCR					0x02348
+#define MMSS_MDSS_EXTPCLK_CBCR					0x02324
+#define MMSS_MDSS_HDMI_CBCR					0x02338
+#define MMSS_MDSS_HDMI_DP_AHB_CBCR				0x0230C
+#define MMSS_MDSS_MDP_CBCR					0x0231C
+#define MMSS_MDSS_MDP_LUT_CBCR					0x02320
+#define MMSS_MDSS_PCLK0_CBCR					0x02314
+#define MMSS_MDSS_PCLK1_CBCR					0x02318
+#define MMSS_MDSS_ROT_CBCR					0x02350
+#define MMSS_MDSS_VSYNC_CBCR					0x02328
+#define MMSS_MISC_AHB_CBCR					0x00328
+#define MMSS_MISC_CXO_CBCR					0x00324
+#define MMSS_MNOC_AHB_CBCR					0x05024
+#define MMSS_MNOC_MAXI_CBCR					0x0F004
+#define MMSS_VIDEO_SUBCORE0_CBCR				0x01048
+#define MMSS_VIDEO_SUBCORE1_CBCR				0x0104C
+#define MMSS_VIDEO_AHB_CBCR					0x01030
+#define MMSS_VIDEO_AXI_CBCR					0x01034
+#define MMSS_VIDEO_CORE_CBCR					0x01028
+#define MMSS_VIDEO_MAXI_CBCR					0x01038
+#define MMSS_VMEM_AHB_CBCR					0x0F068
+#define MMSS_VMEM_MAXI_CBCR					0x0F064
+#define MMSS_DP_AUX_CMD_RCGR					0x02260
+#define MMSS_DP_CRYPTO_CMD_RCGR					0x02220
+#define MMSS_DP_LINK_CMD_RCGR					0x02200
+#define MMSS_DP_PIXEL_CMD_RCGR					0x02240
+#define MMSS_DEBUG_CLK_CTL					0x00900
diff -Nruw linux-4.4.115-fbx/arch/powerpc/boot/dts/include/dt-bindings/msm./msm-bus-ids.h linux-4.4.115-fbx/arch/powerpc/boot/dts/include/dt-bindings/msm/msm-bus-ids.h
--- linux-4.4.115-fbx/arch/powerpc/boot/dts/include/dt-bindings/msm./msm-bus-ids.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/powerpc/boot/dts/include/dt-bindings/msm/msm-bus-ids.h	2019-01-22 16:16:28.179288750 +0100
@@ -0,0 +1,887 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_BUS_IDS_H
+#define __MSM_BUS_IDS_H
+
+/* Aggregation types */
+#define AGG_SCHEME_NONE	0
+#define AGG_SCHEME_LEG	1
+#define AGG_SCHEME_1	2
+
+/* Topology related enums */
+#define	MSM_BUS_FAB_DEFAULT 0
+#define	MSM_BUS_FAB_APPSS 0
+#define	MSM_BUS_FAB_SYSTEM 1024
+#define	MSM_BUS_FAB_MMSS 2048
+#define	MSM_BUS_FAB_SYSTEM_FPB 3072
+#define	MSM_BUS_FAB_CPSS_FPB 4096
+
+#define	MSM_BUS_FAB_BIMC 0
+#define	MSM_BUS_FAB_SYS_NOC 1024
+#define	MSM_BUS_FAB_MMSS_NOC 2048
+#define	MSM_BUS_FAB_OCMEM_NOC 3072
+#define	MSM_BUS_FAB_PERIPH_NOC 4096
+#define	MSM_BUS_FAB_CONFIG_NOC 5120
+#define	MSM_BUS_FAB_OCMEM_VNOC 6144
+#define	MSM_BUS_FAB_MMSS_AHB 2049
+#define	MSM_BUS_FAB_A0_NOC 6145
+#define	MSM_BUS_FAB_A1_NOC 6146
+#define	MSM_BUS_FAB_A2_NOC 6147
+#define	MSM_BUS_FAB_GNOC 6148
+#define	MSM_BUS_FAB_CR_VIRT 6149
+
+#define	MSM_BUS_MASTER_FIRST 1
+#define	MSM_BUS_MASTER_AMPSS_M0 1
+#define	MSM_BUS_MASTER_AMPSS_M1 2
+#define	MSM_BUS_APPSS_MASTER_FAB_MMSS 3
+#define	MSM_BUS_APPSS_MASTER_FAB_SYSTEM 4
+#define	MSM_BUS_SYSTEM_MASTER_FAB_APPSS 5
+#define	MSM_BUS_MASTER_SPS 6
+#define	MSM_BUS_MASTER_ADM_PORT0 7
+#define	MSM_BUS_MASTER_ADM_PORT1 8
+#define	MSM_BUS_SYSTEM_MASTER_ADM1_PORT0 9
+#define	MSM_BUS_MASTER_ADM1_PORT1 10
+#define	MSM_BUS_MASTER_LPASS_PROC 11
+#define	MSM_BUS_MASTER_MSS_PROCI 12
+#define	MSM_BUS_MASTER_MSS_PROCD 13
+#define	MSM_BUS_MASTER_MSS_MDM_PORT0 14
+#define	MSM_BUS_MASTER_LPASS 15
+#define	MSM_BUS_SYSTEM_MASTER_CPSS_FPB 16
+#define	MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB 17
+#define	MSM_BUS_SYSTEM_MASTER_MMSS_FPB 18
+#define	MSM_BUS_MASTER_ADM1_CI 19
+#define	MSM_BUS_MASTER_ADM0_CI 20
+#define	MSM_BUS_MASTER_MSS_MDM_PORT1 21
+#define	MSM_BUS_MASTER_MDP_PORT0 22
+#define	MSM_BUS_MASTER_MDP_PORT1 23
+#define	MSM_BUS_MMSS_MASTER_ADM1_PORT0 24
+#define	MSM_BUS_MASTER_ROTATOR 25
+#define	MSM_BUS_MASTER_GRAPHICS_3D 26
+#define	MSM_BUS_MASTER_JPEG_DEC 27
+#define	MSM_BUS_MASTER_GRAPHICS_2D_CORE0 28
+#define	MSM_BUS_MASTER_VFE 29
+#define	MSM_BUS_MASTER_VFE0 MSM_BUS_MASTER_VFE
+#define	MSM_BUS_MASTER_VPE 30
+#define	MSM_BUS_MASTER_JPEG_ENC 31
+#define	MSM_BUS_MASTER_GRAPHICS_2D_CORE1 32
+#define	MSM_BUS_MMSS_MASTER_APPS_FAB 33
+#define	MSM_BUS_MASTER_HD_CODEC_PORT0 34
+#define	MSM_BUS_MASTER_HD_CODEC_PORT1 35
+#define	MSM_BUS_MASTER_SPDM 36
+#define	MSM_BUS_MASTER_RPM 37
+#define	MSM_BUS_MASTER_MSS 38
+#define	MSM_BUS_MASTER_RIVA 39
+#define	MSM_BUS_MASTER_SNOC_VMEM 40
+#define	MSM_BUS_MASTER_MSS_SW_PROC 41
+#define	MSM_BUS_MASTER_MSS_FW_PROC 42
+#define	MSM_BUS_MASTER_HMSS 43
+#define	MSM_BUS_MASTER_GSS_NAV 44
+#define	MSM_BUS_MASTER_PCIE 45
+#define	MSM_BUS_MASTER_SATA 46
+#define	MSM_BUS_MASTER_CRYPTO 47
+#define	MSM_BUS_MASTER_VIDEO_CAP 48
+#define	MSM_BUS_MASTER_GRAPHICS_3D_PORT1 49
+#define	MSM_BUS_MASTER_VIDEO_ENC 50
+#define	MSM_BUS_MASTER_VIDEO_DEC 51
+#define	MSM_BUS_MASTER_LPASS_AHB 52
+#define	MSM_BUS_MASTER_QDSS_BAM 53
+#define	MSM_BUS_MASTER_SNOC_CFG 54
+#define	MSM_BUS_MASTER_CRYPTO_CORE0 55
+#define	MSM_BUS_MASTER_CRYPTO_CORE1 56
+#define	MSM_BUS_MASTER_MSS_NAV 57
+#define	MSM_BUS_MASTER_OCMEM_DMA 58
+#define	MSM_BUS_MASTER_WCSS 59
+#define	MSM_BUS_MASTER_QDSS_ETR 60
+#define	MSM_BUS_MASTER_USB3 61
+#define	MSM_BUS_MASTER_JPEG 62
+#define	MSM_BUS_MASTER_VIDEO_P0 63
+#define	MSM_BUS_MASTER_VIDEO_P1 64
+#define	MSM_BUS_MASTER_MSS_PROC 65
+#define	MSM_BUS_MASTER_JPEG_OCMEM 66
+#define	MSM_BUS_MASTER_MDP_OCMEM 67
+#define	MSM_BUS_MASTER_VIDEO_P0_OCMEM 68
+#define	MSM_BUS_MASTER_VIDEO_P1_OCMEM 69
+#define	MSM_BUS_MASTER_VFE_OCMEM 70
+#define	MSM_BUS_MASTER_CNOC_ONOC_CFG 71
+#define	MSM_BUS_MASTER_RPM_INST 72
+#define	MSM_BUS_MASTER_RPM_DATA 73
+#define	MSM_BUS_MASTER_RPM_SYS 74
+#define	MSM_BUS_MASTER_DEHR 75
+#define	MSM_BUS_MASTER_QDSS_DAP 76
+#define	MSM_BUS_MASTER_TIC 77
+#define	MSM_BUS_MASTER_SDCC_1 78
+#define	MSM_BUS_MASTER_SDCC_3 79
+#define	MSM_BUS_MASTER_SDCC_4 80
+#define	MSM_BUS_MASTER_SDCC_2 81
+#define	MSM_BUS_MASTER_TSIF 82
+#define	MSM_BUS_MASTER_BAM_DMA 83
+#define	MSM_BUS_MASTER_BLSP_2 84
+#define	MSM_BUS_MASTER_USB_HSIC 85
+#define	MSM_BUS_MASTER_BLSP_1 86
+#define	MSM_BUS_MASTER_USB_HS 87
+#define	MSM_BUS_MASTER_PNOC_CFG 88
+#define	MSM_BUS_MASTER_V_OCMEM_GFX3D 89
+#define	MSM_BUS_MASTER_IPA 90
+#define	MSM_BUS_MASTER_QPIC 91
+#define	MSM_BUS_MASTER_MDPE 92
+#define	MSM_BUS_MASTER_USB_HS2 93
+#define	MSM_BUS_MASTER_VPU 94
+#define	MSM_BUS_MASTER_UFS 95
+#define	MSM_BUS_MASTER_BCAST 96
+#define	MSM_BUS_MASTER_CRYPTO_CORE2 97
+#define	MSM_BUS_MASTER_EMAC 98
+#define	MSM_BUS_MASTER_VPU_1 99
+#define	MSM_BUS_MASTER_PCIE_1 100
+#define	MSM_BUS_MASTER_USB3_1 101
+#define	MSM_BUS_MASTER_CNOC_MNOC_MMSS_CFG 102
+#define	MSM_BUS_MASTER_CNOC_MNOC_CFG 103
+#define	MSM_BUS_MASTER_TCU_0 104
+#define	MSM_BUS_MASTER_TCU_1 105
+#define	MSM_BUS_MASTER_CPP 106
+#define	MSM_BUS_MASTER_AUDIO 107
+#define	MSM_BUS_MASTER_PCIE_2 108
+#define	MSM_BUS_MASTER_VFE1 109
+#define	MSM_BUS_MASTER_XM_USB_HS1 110
+#define	MSM_BUS_MASTER_PCNOC_BIMC_1 111
+#define	MSM_BUS_MASTER_BIMC_PCNOC   112
+#define	MSM_BUS_MASTER_XI_USB_HSIC  113
+#define	MSM_BUS_MASTER_SGMII	    114
+#define	MSM_BUS_SPMI_FETCHER 115
+#define	MSM_BUS_MASTER_GNOC_BIMC 116
+#define	MSM_BUS_MASTER_CRVIRT_A2NOC 117
+#define	MSM_BUS_MASTER_CNOC_A2NOC 118
+#define	MSM_BUS_MASTER_WLAN 119
+#define	MSM_BUS_MASTER_MSS_CE 120
+#define	MSM_BUS_MASTER_CDSP_PROC 121
+#define	MSM_BUS_MASTER_GNOC_SNOC 122
+#define	MSM_BUS_MASTER_PIMEM 123
+#define	MSM_BUS_MASTER_MASTER_LAST 124
+
+#define	MSM_BUS_SYSTEM_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB
+#define	MSM_BUS_CPSS_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_CPSS_FPB
+
+#define	MSM_BUS_SNOC_MM_INT_0 10000
+#define	MSM_BUS_SNOC_MM_INT_1 10001
+#define	MSM_BUS_SNOC_MM_INT_2 10002
+#define	MSM_BUS_SNOC_MM_INT_BIMC 10003
+#define	MSM_BUS_SNOC_INT_0 10004
+#define	MSM_BUS_SNOC_INT_1 10005
+#define	MSM_BUS_SNOC_INT_BIMC 10006
+#define	MSM_BUS_SNOC_BIMC_0_MAS 10007
+#define	MSM_BUS_SNOC_BIMC_1_MAS 10008
+#define	MSM_BUS_SNOC_QDSS_INT 10009
+#define	MSM_BUS_PNOC_SNOC_MAS 10010
+#define	MSM_BUS_PNOC_SNOC_SLV 10011
+#define	MSM_BUS_PNOC_INT_0 10012
+#define	MSM_BUS_PNOC_INT_1 10013
+#define	MSM_BUS_PNOC_M_0 10014
+#define	MSM_BUS_PNOC_M_1 10015
+#define	MSM_BUS_BIMC_SNOC_MAS 10016
+#define	MSM_BUS_BIMC_SNOC_SLV 10017
+#define	MSM_BUS_PNOC_SLV_0 10018
+#define	MSM_BUS_PNOC_SLV_1 10019
+#define	MSM_BUS_PNOC_SLV_2 10020
+#define	MSM_BUS_PNOC_SLV_3 10021
+#define	MSM_BUS_PNOC_SLV_4 10022
+#define	MSM_BUS_PNOC_SLV_8 10023
+#define	MSM_BUS_PNOC_SLV_9 10024
+#define	MSM_BUS_SNOC_BIMC_0_SLV 10025
+#define	MSM_BUS_SNOC_BIMC_1_SLV 10026
+#define	MSM_BUS_MNOC_BIMC_MAS 10027
+#define	MSM_BUS_MNOC_BIMC_SLV 10028
+#define	MSM_BUS_BIMC_MNOC_MAS 10029
+#define	MSM_BUS_BIMC_MNOC_SLV 10030
+#define	MSM_BUS_SNOC_BIMC_MAS 10031
+#define	MSM_BUS_SNOC_BIMC_SLV 10032
+#define	MSM_BUS_CNOC_SNOC_MAS 10033
+#define	MSM_BUS_CNOC_SNOC_SLV 10034
+#define	MSM_BUS_SNOC_CNOC_MAS 10035
+#define	MSM_BUS_SNOC_CNOC_SLV 10036
+#define	MSM_BUS_OVNOC_SNOC_MAS 10037
+#define	MSM_BUS_OVNOC_SNOC_SLV 10038
+#define	MSM_BUS_SNOC_OVNOC_MAS 10039
+#define	MSM_BUS_SNOC_OVNOC_SLV 10040
+#define	MSM_BUS_SNOC_PNOC_MAS 10041
+#define	MSM_BUS_SNOC_PNOC_SLV 10042
+#define	MSM_BUS_BIMC_INT_APPS_EBI 10043
+#define	MSM_BUS_BIMC_INT_APPS_SNOC 10044
+#define	MSM_BUS_SNOC_BIMC_2_MAS 10045
+#define	MSM_BUS_SNOC_BIMC_2_SLV 10046
+#define	MSM_BUS_PNOC_SLV_5	10047
+#define	MSM_BUS_PNOC_SLV_7	10048
+#define	MSM_BUS_PNOC_INT_2 10049
+#define	MSM_BUS_PNOC_INT_3 10050
+#define	MSM_BUS_PNOC_INT_4 10051
+#define	MSM_BUS_PNOC_INT_5 10052
+#define	MSM_BUS_PNOC_INT_6 10053
+#define	MSM_BUS_PNOC_INT_7 10054
+#define	MSM_BUS_BIMC_SNOC_1_MAS 10055
+#define	MSM_BUS_BIMC_SNOC_1_SLV 10056
+#define	MSM_BUS_PNOC_A1NOC_MAS 10057
+#define	MSM_BUS_PNOC_A1NOC_SLV 10058
+#define	MSM_BUS_CNOC_A1NOC_MAS 10059
+#define	MSM_BUS_A0NOC_SNOC_MAS 10060
+#define	MSM_BUS_A0NOC_SNOC_SLV 10061
+#define	MSM_BUS_A1NOC_SNOC_SLV 10062
+#define	MSM_BUS_A1NOC_SNOC_MAS 10063
+#define	MSM_BUS_A2NOC_SNOC_MAS 10064
+#define	MSM_BUS_A2NOC_SNOC_SLV 10065
+#define	MSM_BUS_SNOC_INT_2 10066
+#define	MSM_BUS_A0NOC_QDSS_INT	10067
+#define	MSM_BUS_INT_LAST 10068
+
+#define	MSM_BUS_INT_TEST_ID	20000
+#define	MSM_BUS_INT_TEST_LAST	20050
+
+#define	MSM_BUS_SLAVE_FIRST 512
+#define	MSM_BUS_SLAVE_EBI_CH0 512
+#define	MSM_BUS_SLAVE_EBI_CH1 513
+#define	MSM_BUS_SLAVE_AMPSS_L2 514
+#define	MSM_BUS_APPSS_SLAVE_FAB_MMSS 515
+#define	MSM_BUS_APPSS_SLAVE_FAB_SYSTEM 516
+#define	MSM_BUS_SYSTEM_SLAVE_FAB_APPS 517
+#define	MSM_BUS_SLAVE_SPS 518
+#define	MSM_BUS_SLAVE_SYSTEM_IMEM 519
+#define	MSM_BUS_SLAVE_AMPSS 520
+#define	MSM_BUS_SLAVE_MSS 521
+#define	MSM_BUS_SLAVE_LPASS 522
+#define	MSM_BUS_SYSTEM_SLAVE_CPSS_FPB 523
+#define	MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB 524
+#define	MSM_BUS_SYSTEM_SLAVE_MMSS_FPB 525
+#define	MSM_BUS_SLAVE_CORESIGHT 526
+#define	MSM_BUS_SLAVE_RIVA 527
+#define	MSM_BUS_SLAVE_SMI 528
+#define	MSM_BUS_MMSS_SLAVE_FAB_APPS 529
+#define	MSM_BUS_MMSS_SLAVE_FAB_APPS_1 530
+#define	MSM_BUS_SLAVE_MM_IMEM 531
+#define	MSM_BUS_SLAVE_CRYPTO 532
+#define	MSM_BUS_SLAVE_SPDM 533
+#define	MSM_BUS_SLAVE_RPM 534
+#define	MSM_BUS_SLAVE_RPM_MSG_RAM 535
+#define	MSM_BUS_SLAVE_MPM 536
+#define	MSM_BUS_SLAVE_PMIC1_SSBI1_A 537
+#define	MSM_BUS_SLAVE_PMIC1_SSBI1_B 538
+#define	MSM_BUS_SLAVE_PMIC1_SSBI1_C 539
+#define	MSM_BUS_SLAVE_PMIC2_SSBI2_A 540
+#define	MSM_BUS_SLAVE_PMIC2_SSBI2_B 541
+#define	MSM_BUS_SLAVE_GSBI1_UART 542
+#define	MSM_BUS_SLAVE_GSBI2_UART 543
+#define	MSM_BUS_SLAVE_GSBI3_UART 544
+#define	MSM_BUS_SLAVE_GSBI4_UART 545
+#define	MSM_BUS_SLAVE_GSBI5_UART 546
+#define	MSM_BUS_SLAVE_GSBI6_UART 547
+#define	MSM_BUS_SLAVE_GSBI7_UART 548
+#define	MSM_BUS_SLAVE_GSBI8_UART 549
+#define	MSM_BUS_SLAVE_GSBI9_UART 550
+#define	MSM_BUS_SLAVE_GSBI10_UART 551
+#define	MSM_BUS_SLAVE_GSBI11_UART 552
+#define	MSM_BUS_SLAVE_GSBI12_UART 553
+#define	MSM_BUS_SLAVE_GSBI1_QUP 554
+#define	MSM_BUS_SLAVE_GSBI2_QUP 555
+#define	MSM_BUS_SLAVE_GSBI3_QUP 556
+#define	MSM_BUS_SLAVE_GSBI4_QUP 557
+#define	MSM_BUS_SLAVE_GSBI5_QUP 558
+#define	MSM_BUS_SLAVE_GSBI6_QUP 559
+#define	MSM_BUS_SLAVE_GSBI7_QUP 560
+#define	MSM_BUS_SLAVE_GSBI8_QUP 561
+#define	MSM_BUS_SLAVE_GSBI9_QUP 562
+#define	MSM_BUS_SLAVE_GSBI10_QUP 563
+#define	MSM_BUS_SLAVE_GSBI11_QUP 564
+#define	MSM_BUS_SLAVE_GSBI12_QUP 565
+#define	MSM_BUS_SLAVE_EBI2_NAND 566
+#define	MSM_BUS_SLAVE_EBI2_CS0 567
+#define	MSM_BUS_SLAVE_EBI2_CS1 568
+#define	MSM_BUS_SLAVE_EBI2_CS2 569
+#define	MSM_BUS_SLAVE_EBI2_CS3 570
+#define	MSM_BUS_SLAVE_EBI2_CS4 571
+#define	MSM_BUS_SLAVE_EBI2_CS5 572
+#define	MSM_BUS_SLAVE_USB_FS1 573
+#define	MSM_BUS_SLAVE_USB_FS2 574
+#define	MSM_BUS_SLAVE_TSIF 575
+#define	MSM_BUS_SLAVE_MSM_TSSC 576
+#define	MSM_BUS_SLAVE_MSM_PDM 577
+#define	MSM_BUS_SLAVE_MSM_DIMEM 578
+#define	MSM_BUS_SLAVE_MSM_TCSR 579
+#define	MSM_BUS_SLAVE_MSM_PRNG 580
+#define	MSM_BUS_SLAVE_GSS 581
+#define	MSM_BUS_SLAVE_SATA 582
+#define	MSM_BUS_SLAVE_USB3 583
+#define	MSM_BUS_SLAVE_WCSS 584
+#define	MSM_BUS_SLAVE_OCIMEM 585
+#define	MSM_BUS_SLAVE_SNOC_OCMEM 586
+#define	MSM_BUS_SLAVE_SERVICE_SNOC 587
+#define	MSM_BUS_SLAVE_QDSS_STM 588
+#define	MSM_BUS_SLAVE_CAMERA_CFG 589
+#define	MSM_BUS_SLAVE_DISPLAY_CFG 590
+#define	MSM_BUS_SLAVE_OCMEM_CFG 591
+#define	MSM_BUS_SLAVE_CPR_CFG 592
+#define	MSM_BUS_SLAVE_CPR_XPU_CFG 593
+#define	MSM_BUS_SLAVE_MISC_CFG 594
+#define	MSM_BUS_SLAVE_MISC_XPU_CFG 595
+#define	MSM_BUS_SLAVE_VENUS_CFG 596
+#define	MSM_BUS_SLAVE_MISC_VENUS_CFG 597
+#define	MSM_BUS_SLAVE_GRAPHICS_3D_CFG 598
+#define	MSM_BUS_SLAVE_MMSS_CLK_CFG 599
+#define	MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG 600
+#define	MSM_BUS_SLAVE_MNOC_MPU_CFG 601
+#define	MSM_BUS_SLAVE_ONOC_MPU_CFG 602
+#define	MSM_BUS_SLAVE_SERVICE_MNOC 603
+#define	MSM_BUS_SLAVE_OCMEM 604
+#define	MSM_BUS_SLAVE_SERVICE_ONOC 605
+#define	MSM_BUS_SLAVE_SDCC_1 606
+#define	MSM_BUS_SLAVE_SDCC_3 607
+#define	MSM_BUS_SLAVE_SDCC_2 608
+#define	MSM_BUS_SLAVE_SDCC_4 609
+#define	MSM_BUS_SLAVE_BAM_DMA 610
+#define	MSM_BUS_SLAVE_BLSP_2 611
+#define	MSM_BUS_SLAVE_USB_HSIC 612
+#define	MSM_BUS_SLAVE_BLSP_1 613
+#define	MSM_BUS_SLAVE_USB_HS 614
+#define	MSM_BUS_SLAVE_PDM 615
+#define	MSM_BUS_SLAVE_PERIPH_APU_CFG 616
+#define	MSM_BUS_SLAVE_PNOC_MPU_CFG 617
+#define	MSM_BUS_SLAVE_PRNG 618
+#define	MSM_BUS_SLAVE_SERVICE_PNOC 619
+#define	MSM_BUS_SLAVE_CLK_CTL 620
+#define	MSM_BUS_SLAVE_CNOC_MSS 621
+#define	MSM_BUS_SLAVE_SECURITY 622
+#define	MSM_BUS_SLAVE_TCSR 623
+#define	MSM_BUS_SLAVE_TLMM 624
+#define	MSM_BUS_SLAVE_CRYPTO_0_CFG 625
+#define	MSM_BUS_SLAVE_CRYPTO_1_CFG 626
+#define	MSM_BUS_SLAVE_IMEM_CFG 627
+#define	MSM_BUS_SLAVE_MESSAGE_RAM 628
+#define	MSM_BUS_SLAVE_BIMC_CFG 629
+#define	MSM_BUS_SLAVE_BOOT_ROM 630
+#define	MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG 631
+#define	MSM_BUS_SLAVE_PMIC_ARB 632
+#define	MSM_BUS_SLAVE_SPDM_WRAPPER 633
+#define	MSM_BUS_SLAVE_DEHR_CFG 634
+#define	MSM_BUS_SLAVE_QDSS_CFG 635
+#define	MSM_BUS_SLAVE_RBCPR_CFG 636
+#define	MSM_BUS_SLAVE_RBCPR_QDSS_APU_CFG 637
+#define	MSM_BUS_SLAVE_SNOC_MPU_CFG 638
+#define	MSM_BUS_SLAVE_CNOC_ONOC_CFG 639
+#define	MSM_BUS_SLAVE_CNOC_MNOC_CFG 640
+#define	MSM_BUS_SLAVE_PNOC_CFG 641
+#define	MSM_BUS_SLAVE_SNOC_CFG 642
+#define	MSM_BUS_SLAVE_EBI1_DLL_CFG 643
+#define	MSM_BUS_SLAVE_PHY_APU_CFG 644
+#define	MSM_BUS_SLAVE_EBI1_PHY_CFG 645
+#define	MSM_BUS_SLAVE_SERVICE_CNOC 646
+#define	MSM_BUS_SLAVE_IPS_CFG 647
+#define	MSM_BUS_SLAVE_QPIC 648
+#define	MSM_BUS_SLAVE_DSI_CFG 649
+#define	MSM_BUS_SLAVE_UFS_CFG 650
+#define	MSM_BUS_SLAVE_RBCPR_CX_CFG 651
+#define	MSM_BUS_SLAVE_RBCPR_MX_CFG 652
+#define	MSM_BUS_SLAVE_PCIE_CFG 653
+#define	MSM_BUS_SLAVE_USB_PHYS_CFG 654
+#define	MSM_BUS_SLAVE_VIDEO_CAP_CFG 655
+#define	MSM_BUS_SLAVE_AVSYNC_CFG 656
+#define	MSM_BUS_SLAVE_CRYPTO_2_CFG 657
+#define	MSM_BUS_SLAVE_VPU_CFG 658
+#define	MSM_BUS_SLAVE_BCAST_CFG 659
+#define	MSM_BUS_SLAVE_KLM_CFG 660
+#define	MSM_BUS_SLAVE_GENI_IR_CFG 661
+#define	MSM_BUS_SLAVE_OCMEM_GFX 662
+#define	MSM_BUS_SLAVE_CATS_128 663
+#define	MSM_BUS_SLAVE_OCMEM_64 664
+#define	MSM_BUS_SLAVE_PCIE_0 665
+#define	MSM_BUS_SLAVE_PCIE_1 666
+#define	MSM_BUS_SLAVE_PCIE_0_CFG 667
+#define	MSM_BUS_SLAVE_PCIE_1_CFG 668
+#define	MSM_BUS_SLAVE_SRVC_MNOC 669
+#define	MSM_BUS_SLAVE_USB_HS2 670
+#define	MSM_BUS_SLAVE_AUDIO 671
+#define	MSM_BUS_SLAVE_TCU 672
+#define	MSM_BUS_SLAVE_APPSS 673
+#define	MSM_BUS_SLAVE_PCIE_PARF 674
+#define	MSM_BUS_SLAVE_USB3_PHY_CFG 675
+#define	MSM_BUS_SLAVE_IPA_CFG 676
+#define	MSM_BUS_SLAVE_A0NOC_SNOC 677
+#define	MSM_BUS_SLAVE_A1NOC_SNOC 678
+#define	MSM_BUS_SLAVE_A2NOC_SNOC 679
+#define	MSM_BUS_SLAVE_HMSS_L3 680
+#define	MSM_BUS_SLAVE_PIMEM_CFG 681
+#define	MSM_BUS_SLAVE_DCC_CFG 682
+#define	MSM_BUS_SLAVE_QDSS_RBCPR_APU_CFG 683
+#define	MSM_BUS_SLAVE_PCIE_2_CFG 684
+#define	MSM_BUS_SLAVE_PCIE20_AHB2PHY 685
+#define	MSM_BUS_SLAVE_A0NOC_CFG 686
+#define	MSM_BUS_SLAVE_A1NOC_CFG 687
+#define	MSM_BUS_SLAVE_A2NOC_CFG 688
+#define	MSM_BUS_SLAVE_A1NOC_MPU_CFG 689
+#define	MSM_BUS_SLAVE_A2NOC_MPU_CFG 690
+#define	MSM_BUS_SLAVE_A0NOC_SMMU_CFG 691
+#define	MSM_BUS_SLAVE_A1NOC_SMMU_CFG 692
+#define	MSM_BUS_SLAVE_A2NOC_SMMU_CFG 693
+#define	MSM_BUS_SLAVE_LPASS_SMMU_CFG 694
+#define	MSM_BUS_SLAVE_MMAGIC_CFG 695
+#define	MSM_BUS_SLAVE_VENUS_THROTTLE_CFG 696
+#define	MSM_BUS_SLAVE_SSC_CFG 697
+#define	MSM_BUS_SLAVE_DSA_CFG 698
+#define	MSM_BUS_SLAVE_DSA_MPU_CFG 699
+#define	MSM_BUS_SLAVE_DISPLAY_THROTTLE_CFG 700
+#define	MSM_BUS_SLAVE_SMMU_CPP_CFG 701
+#define	MSM_BUS_SLAVE_SMMU_JPEG_CFG 702
+#define	MSM_BUS_SLAVE_SMMU_MDP_CFG 703
+#define	MSM_BUS_SLAVE_SMMU_ROTATOR_CFG 704
+#define	MSM_BUS_SLAVE_SMMU_VENUS_CFG 705
+#define	MSM_BUS_SLAVE_SMMU_VFE_CFG 706
+#define	MSM_BUS_SLAVE_A0NOC_MPU_CFG 707
+#define	MSM_BUS_SLAVE_VMEM_CFG 708
+#define	MSM_BUS_SLAVE_CAMERA_THROTTLE_CFG 709
+#define	MSM_BUS_SLAVE_VMEM 710
+#define	MSM_BUS_SLAVE_AHB2PHY 711
+#define	MSM_BUS_SLAVE_PIMEM 712
+#define	MSM_BUS_SLAVE_SNOC_VMEM 713
+#define	MSM_BUS_SLAVE_PCIE_2 714
+#define	MSM_BUS_SLAVE_RBCPR_MX 715
+#define	MSM_BUS_SLAVE_RBCPR_CX 716
+#define	MSM_BUS_SLAVE_BIMC_PCNOC 717
+#define	MSM_BUS_SLAVE_PCNOC_BIMC_1 718
+#define	MSM_BUS_SLAVE_SGMII 719
+#define	MSM_BUS_SLAVE_SPMI_FETCHER 720
+#define	MSM_BUS_PNOC_SLV_6 721
+#define	MSM_BUS_SLAVE_MMSS_SMMU_CFG 722
+#define	MSM_BUS_SLAVE_WLAN 723
+#define	MSM_BUS_SLAVE_CRVIRT_A2NOC 724
+#define	MSM_BUS_SLAVE_CNOC_A2NOC 725
+#define	MSM_BUS_SLAVE_GLM 726
+#define	MSM_BUS_SLAVE_GNOC_BIMC 727
+#define	MSM_BUS_SLAVE_GNOC_SNOC 728
+#define	MSM_BUS_SLAVE_QM_CFG 729
+#define	MSM_BUS_SLAVE_TLMM_EAST 730
+#define	MSM_BUS_SLAVE_TLMM_NORTH 731
+#define	MSM_BUS_SLAVE_TLMM_WEST 732
+#define	MSM_BUS_SLAVE_SKL 733
+#define	MSM_BUS_SLAVE_LPASS_TCM	734
+#define	MSM_BUS_SLAVE_TLMM_SOUTH 735
+#define	MSM_BUS_SLAVE_TLMM_CENTER 736
+#define	MSM_BUS_MSS_NAV_CE_MPU_CFG 737
+#define	MSM_BUS_SLAVE_A2NOC_THROTTLE_CFG 738
+#define	MSM_BUS_SLAVE_CDSP 739
+#define	MSM_BUS_SLAVE_CDSP_SMMU_CFG 740
+#define	MSM_BUS_SLAVE_LPASS_MPU_CFG 741
+#define	MSM_BUS_SLAVE_CSI_PHY_CFG 742
+#define	MSM_BUS_SLAVE_LAST 743
+
+#define	MSM_BUS_SYSTEM_FPB_SLAVE_SYSTEM  MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB
+#define	MSM_BUS_CPSS_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_CPSS_FPB
+
+/*
+ * ID's used in RPM messages
+ */
+#define	ICBID_MASTER_APPSS_PROC 0
+#define	ICBID_MASTER_MSS_PROC 1
+#define	ICBID_MASTER_MNOC_BIMC 2
+#define	ICBID_MASTER_SNOC_BIMC 3
+#define	ICBID_MASTER_SNOC_BIMC_0 ICBID_MASTER_SNOC_BIMC
+#define	ICBID_MASTER_CNOC_MNOC_MMSS_CFG 4
+#define	ICBID_MASTER_CNOC_MNOC_CFG 5
+#define	ICBID_MASTER_GFX3D 6
+#define	ICBID_MASTER_JPEG 7
+#define	ICBID_MASTER_MDP 8
+#define	ICBID_MASTER_MDP0 ICBID_MASTER_MDP
+#define	ICBID_MASTER_MDPS ICBID_MASTER_MDP
+#define	ICBID_MASTER_VIDEO 9
+#define	ICBID_MASTER_VIDEO_P0 ICBID_MASTER_VIDEO
+#define	ICBID_MASTER_VIDEO_P1 10
+#define	ICBID_MASTER_VFE 11
+#define	ICBID_MASTER_VFE0 ICBID_MASTER_VFE
+#define	ICBID_MASTER_CNOC_ONOC_CFG 12
+#define	ICBID_MASTER_JPEG_OCMEM 13
+#define	ICBID_MASTER_MDP_OCMEM 14
+#define	ICBID_MASTER_VIDEO_P0_OCMEM 15
+#define	ICBID_MASTER_VIDEO_P1_OCMEM 16
+#define	ICBID_MASTER_VFE_OCMEM 17
+#define	ICBID_MASTER_LPASS_AHB 18
+#define	ICBID_MASTER_QDSS_BAM 19
+#define	ICBID_MASTER_SNOC_CFG 20
+#define	ICBID_MASTER_BIMC_SNOC 21
+#define	ICBID_MASTER_BIMC_SNOC_0 ICBID_MASTER_BIMC_SNOC
+#define	ICBID_MASTER_CNOC_SNOC 22
+#define	ICBID_MASTER_CRYPTO 23
+#define	ICBID_MASTER_CRYPTO_CORE0 ICBID_MASTER_CRYPTO
+#define	ICBID_MASTER_CRYPTO_CORE1 24
+#define	ICBID_MASTER_LPASS_PROC 25
+#define	ICBID_MASTER_MSS 26
+#define	ICBID_MASTER_MSS_NAV 27
+#define	ICBID_MASTER_OCMEM_DMA 28
+#define	ICBID_MASTER_PNOC_SNOC 29
+#define	ICBID_MASTER_WCSS 30
+#define	ICBID_MASTER_QDSS_ETR 31
+#define	ICBID_MASTER_USB3 32
+#define	ICBID_MASTER_USB3_0 ICBID_MASTER_USB3
+#define	ICBID_MASTER_SDCC_1 33
+#define	ICBID_MASTER_SDCC_3 34
+#define	ICBID_MASTER_SDCC_2 35
+#define	ICBID_MASTER_SDCC_4 36
+#define	ICBID_MASTER_TSIF 37
+#define	ICBID_MASTER_BAM_DMA 38
+#define	ICBID_MASTER_BLSP_2 39
+#define	ICBID_MASTER_USB_HSIC 40
+#define	ICBID_MASTER_BLSP_1 41
+#define	ICBID_MASTER_USB_HS 42
+#define	ICBID_MASTER_USB_HS1 ICBID_MASTER_USB_HS
+#define	ICBID_MASTER_PNOC_CFG 43
+#define	ICBID_MASTER_SNOC_PNOC 44
+#define	ICBID_MASTER_RPM_INST 45
+#define	ICBID_MASTER_RPM_DATA 46
+#define	ICBID_MASTER_RPM_SYS 47
+#define	ICBID_MASTER_DEHR 48
+#define	ICBID_MASTER_QDSS_DAP 49
+#define	ICBID_MASTER_SPDM 50
+#define	ICBID_MASTER_TIC 51
+#define	ICBID_MASTER_SNOC_CNOC 52
+#define	ICBID_MASTER_GFX3D_OCMEM 53
+#define	ICBID_MASTER_GFX3D_GMEM ICBID_MASTER_GFX3D_OCMEM
+#define	ICBID_MASTER_OVIRT_SNOC 54
+#define	ICBID_MASTER_SNOC_OVIRT 55
+#define	ICBID_MASTER_SNOC_GVIRT ICBID_MASTER_SNOC_OVIRT
+#define	ICBID_MASTER_ONOC_OVIRT 56
+#define	ICBID_MASTER_USB_HS2 57
+#define	ICBID_MASTER_QPIC 58
+#define	ICBID_MASTER_IPA 59
+#define	ICBID_MASTER_DSI 60
+#define	ICBID_MASTER_MDP1 61
+#define	ICBID_MASTER_MDPE ICBID_MASTER_MDP1
+#define	ICBID_MASTER_VPU_PROC 62
+#define	ICBID_MASTER_VPU 63
+#define	ICBID_MASTER_VPU0 ICBID_MASTER_VPU
+#define	ICBID_MASTER_CRYPTO_CORE2 64
+#define	ICBID_MASTER_PCIE_0 65
+#define	ICBID_MASTER_PCIE_1 66
+#define	ICBID_MASTER_SATA 67
+#define	ICBID_MASTER_UFS 68
+#define	ICBID_MASTER_USB3_1 69
+#define	ICBID_MASTER_VIDEO_OCMEM 70
+#define	ICBID_MASTER_VPU1 71
+#define	ICBID_MASTER_VCAP 72
+#define	ICBID_MASTER_EMAC 73
+#define	ICBID_MASTER_BCAST 74
+#define	ICBID_MASTER_MMSS_PROC 75
+#define	ICBID_MASTER_SNOC_BIMC_1 76
+#define	ICBID_MASTER_SNOC_PCNOC 77
+#define	ICBID_MASTER_AUDIO 78
+#define	ICBID_MASTER_MM_INT_0 79
+#define	ICBID_MASTER_MM_INT_1 80
+#define	ICBID_MASTER_MM_INT_2 81
+#define	ICBID_MASTER_MM_INT_BIMC 82
+#define	ICBID_MASTER_MSS_INT 83
+#define	ICBID_MASTER_PCNOC_CFG 84
+#define	ICBID_MASTER_PCNOC_INT_0 85
+#define	ICBID_MASTER_PCNOC_INT_1 86
+#define	ICBID_MASTER_PCNOC_M_0 87
+#define	ICBID_MASTER_PCNOC_M_1 88
+#define	ICBID_MASTER_PCNOC_S_0 89
+#define	ICBID_MASTER_PCNOC_S_1 90
+#define	ICBID_MASTER_PCNOC_S_2 91
+#define	ICBID_MASTER_PCNOC_S_3 92
+#define	ICBID_MASTER_PCNOC_S_4 93
+#define	ICBID_MASTER_PCNOC_S_6 94
+#define	ICBID_MASTER_PCNOC_S_7 95
+#define	ICBID_MASTER_PCNOC_S_8 96
+#define	ICBID_MASTER_PCNOC_S_9 97
+#define	ICBID_MASTER_QDSS_INT 98
+#define	ICBID_MASTER_SNOC_INT_0	99
+#define	ICBID_MASTER_SNOC_INT_1 100
+#define	ICBID_MASTER_SNOC_INT_BIMC 101
+#define	ICBID_MASTER_TCU_0 102
+#define	ICBID_MASTER_TCU_1 103
+#define	ICBID_MASTER_BIMC_INT_0 104
+#define	ICBID_MASTER_BIMC_INT_1 105
+#define	ICBID_MASTER_CAMERA 106
+#define	ICBID_MASTER_RICA 107
+#define	ICBID_MASTER_SNOC_BIMC_2 108
+#define	ICBID_MASTER_BIMC_SNOC_1 109
+#define	ICBID_MASTER_A0NOC_SNOC 110
+#define	ICBID_MASTER_A1NOC_SNOC 111
+#define	ICBID_MASTER_A2NOC_SNOC 112
+#define	ICBID_MASTER_PIMEM 113
+#define	ICBID_MASTER_SNOC_VMEM 114
+#define	ICBID_MASTER_CPP 115
+#define	ICBID_MASTER_CNOC_A1NOC 116
+#define	ICBID_MASTER_PNOC_A1NOC 117
+#define	ICBID_MASTER_HMSS 118
+#define	ICBID_MASTER_PCIE_2 119
+#define	ICBID_MASTER_ROTATOR 120
+#define	ICBID_MASTER_VENUS_VMEM 121
+#define	ICBID_MASTER_DCC 122
+#define	ICBID_MASTER_MCDMA 123
+#define	ICBID_MASTER_PCNOC_INT_2 124
+#define	ICBID_MASTER_PCNOC_INT_3 125
+#define	ICBID_MASTER_PCNOC_INT_4 126
+#define	ICBID_MASTER_PCNOC_INT_5 127
+#define	ICBID_MASTER_PCNOC_INT_6 128
+#define	ICBID_MASTER_PCNOC_S_5 129
+#define	ICBID_MASTER_SENSORS_AHB 130
+#define	ICBID_MASTER_SENSORS_PROC 131
+#define	ICBID_MASTER_QSPI 132
+#define	ICBID_MASTER_VFE1 133
+#define	ICBID_MASTER_SNOC_INT_2 134
+#define	ICBID_MASTER_SMMNOC_BIMC 135
+#define	ICBID_MASTER_CRVIRT_A1NOC 136
+#define	ICBID_MASTER_XM_USB_HS1 137
+#define	ICBID_MASTER_XI_USB_HS1 138
+#define	ICBID_MASTER_PCNOC_BIMC_1 139
+#define	ICBID_MASTER_BIMC_PCNOC 140
+#define	ICBID_MASTER_XI_HSIC 141
+#define	ICBID_MASTER_SGMII  142
+#define	ICBID_MASTER_SPMI_FETCHER 143
+#define	ICBID_MASTER_GNOC_BIMC 144
+#define	ICBID_MASTER_CRVIRT_A2NOC 145
+#define	ICBID_MASTER_CNOC_A2NOC 146
+#define	ICBID_MASTER_WLAN 147
+#define	ICBID_MASTER_MSS_CE 148
+#define	ICBID_MASTER_CDSP_PROC 149
+#define	ICBID_MASTER_GNOC_SNOC 150
+
+#define	ICBID_SLAVE_EBI1 0
+#define	ICBID_SLAVE_APPSS_L2 1
+#define	ICBID_SLAVE_BIMC_SNOC 2
+#define	ICBID_SLAVE_BIMC_SNOC_0 ICBID_SLAVE_BIMC_SNOC
+#define	ICBID_SLAVE_CAMERA_CFG 3
+#define	ICBID_SLAVE_DISPLAY_CFG 4
+#define	ICBID_SLAVE_OCMEM_CFG 5
+#define	ICBID_SLAVE_CPR_CFG 6
+#define	ICBID_SLAVE_CPR_XPU_CFG 7
+#define	ICBID_SLAVE_MISC_CFG 8
+#define	ICBID_SLAVE_MISC_XPU_CFG 9
+#define	ICBID_SLAVE_VENUS_CFG 10
+#define	ICBID_SLAVE_GFX3D_CFG 11
+#define	ICBID_SLAVE_MMSS_CLK_CFG 12
+#define	ICBID_SLAVE_MMSS_CLK_XPU_CFG 13
+#define	ICBID_SLAVE_MNOC_MPU_CFG 14
+#define	ICBID_SLAVE_ONOC_MPU_CFG 15
+#define	ICBID_SLAVE_MNOC_BIMC 16
+#define	ICBID_SLAVE_SERVICE_MNOC 17
+#define	ICBID_SLAVE_OCMEM 18
+#define	ICBID_SLAVE_GMEM ICBID_SLAVE_OCMEM
+#define	ICBID_SLAVE_SERVICE_ONOC 19
+#define	ICBID_SLAVE_APPSS 20
+#define	ICBID_SLAVE_LPASS 21
+#define	ICBID_SLAVE_USB3 22
+#define	ICBID_SLAVE_USB3_0 ICBID_SLAVE_USB3
+#define	ICBID_SLAVE_WCSS 23
+#define	ICBID_SLAVE_SNOC_BIMC 24
+#define	ICBID_SLAVE_SNOC_BIMC_0 ICBID_SLAVE_SNOC_BIMC
+#define	ICBID_SLAVE_SNOC_CNOC 25
+#define	ICBID_SLAVE_IMEM 26
+#define	ICBID_SLAVE_OCIMEM ICBID_SLAVE_IMEM
+#define	ICBID_SLAVE_SNOC_OVIRT 27
+#define	ICBID_SLAVE_SNOC_GVIRT ICBID_SLAVE_SNOC_OVIRT
+#define	ICBID_SLAVE_SNOC_PNOC 28
+#define	ICBID_SLAVE_SNOC_PCNOC ICBID_SLAVE_SNOC_PNOC
+#define	ICBID_SLAVE_SERVICE_SNOC 29
+#define	ICBID_SLAVE_QDSS_STM 30
+#define	ICBID_SLAVE_SDCC_1 31
+#define	ICBID_SLAVE_SDCC_3 32
+#define	ICBID_SLAVE_SDCC_2 33
+#define	ICBID_SLAVE_SDCC_4 34
+#define	ICBID_SLAVE_TSIF 35
+#define	ICBID_SLAVE_BAM_DMA 36
+#define	ICBID_SLAVE_BLSP_2 37
+#define	ICBID_SLAVE_USB_HSIC 38
+#define	ICBID_SLAVE_BLSP_1 39
+#define	ICBID_SLAVE_USB_HS 40
+#define	ICBID_SLAVE_USB_HS1 ICBID_SLAVE_USB_HS
+#define	ICBID_SLAVE_PDM 41
+#define	ICBID_SLAVE_PERIPH_APU_CFG 42
+#define	ICBID_SLAVE_PNOC_MPU_CFG 43
+#define	ICBID_SLAVE_PRNG 44
+#define	ICBID_SLAVE_PNOC_SNOC 45
+#define	ICBID_SLAVE_PCNOC_SNOC ICBID_SLAVE_PNOC_SNOC
+#define	ICBID_SLAVE_SERVICE_PNOC 46
+#define	ICBID_SLAVE_CLK_CTL 47
+#define	ICBID_SLAVE_CNOC_MSS 48
+#define	ICBID_SLAVE_PCNOC_MSS ICBID_SLAVE_CNOC_MSS
+#define	ICBID_SLAVE_SECURITY 49
+#define	ICBID_SLAVE_TCSR 50
+#define	ICBID_SLAVE_TLMM 51
+#define	ICBID_SLAVE_CRYPTO_0_CFG 52
+#define	ICBID_SLAVE_CRYPTO_1_CFG 53
+#define	ICBID_SLAVE_IMEM_CFG 54
+#define	ICBID_SLAVE_MESSAGE_RAM 55
+#define	ICBID_SLAVE_BIMC_CFG 56
+#define	ICBID_SLAVE_BOOT_ROM 57
+#define	ICBID_SLAVE_CNOC_MNOC_MMSS_CFG 58
+#define	ICBID_SLAVE_PMIC_ARB 59
+#define	ICBID_SLAVE_SPDM_WRAPPER 60
+#define	ICBID_SLAVE_DEHR_CFG 61
+#define	ICBID_SLAVE_MPM 62
+#define	ICBID_SLAVE_QDSS_CFG 63
+#define	ICBID_SLAVE_RBCPR_CFG 64
+#define	ICBID_SLAVE_RBCPR_CX_CFG ICBID_SLAVE_RBCPR_CFG
+#define	ICBID_SLAVE_RBCPR_QDSS_APU_CFG 65
+#define	ICBID_SLAVE_CNOC_MNOC_CFG 66
+#define	ICBID_SLAVE_SNOC_MPU_CFG 67
+#define	ICBID_SLAVE_CNOC_ONOC_CFG 68
+#define	ICBID_SLAVE_PNOC_CFG 69
+#define	ICBID_SLAVE_SNOC_CFG 70
+#define	ICBID_SLAVE_EBI1_DLL_CFG 71
+#define	ICBID_SLAVE_PHY_APU_CFG 72
+#define	ICBID_SLAVE_EBI1_PHY_CFG 73
+#define	ICBID_SLAVE_RPM 74
+#define	ICBID_SLAVE_CNOC_SNOC 75
+#define	ICBID_SLAVE_SERVICE_CNOC 76
+#define	ICBID_SLAVE_OVIRT_SNOC 77
+#define	ICBID_SLAVE_OVIRT_OCMEM 78
+#define	ICBID_SLAVE_USB_HS2 79
+#define	ICBID_SLAVE_QPIC 80
+#define	ICBID_SLAVE_IPS_CFG 81
+#define	ICBID_SLAVE_DSI_CFG 82
+#define	ICBID_SLAVE_USB3_1 83
+#define	ICBID_SLAVE_PCIE_0 84
+#define	ICBID_SLAVE_PCIE_1 85
+#define	ICBID_SLAVE_PSS_SMMU_CFG 86
+#define	ICBID_SLAVE_CRYPTO_2_CFG 87
+#define	ICBID_SLAVE_PCIE_0_CFG 88
+#define	ICBID_SLAVE_PCIE_1_CFG 89
+#define	ICBID_SLAVE_SATA_CFG 90
+#define	ICBID_SLAVE_SPSS_GENI_IR 91
+#define	ICBID_SLAVE_UFS_CFG 92
+#define	ICBID_SLAVE_AVSYNC_CFG 93
+#define	ICBID_SLAVE_VPU_CFG 94
+#define	ICBID_SLAVE_USB_PHY_CFG 95
+#define	ICBID_SLAVE_RBCPR_MX_CFG 96
+#define	ICBID_SLAVE_PCIE_PARF 97
+#define	ICBID_SLAVE_VCAP_CFG 98
+#define	ICBID_SLAVE_EMAC_CFG 99
+#define	ICBID_SLAVE_BCAST_CFG 100
+#define	ICBID_SLAVE_KLM_CFG 101
+#define	ICBID_SLAVE_DISPLAY_PWM 102
+#define	ICBID_SLAVE_GENI 103
+#define	ICBID_SLAVE_SNOC_BIMC_1 104
+#define	ICBID_SLAVE_AUDIO 105
+#define	ICBID_SLAVE_CATS_0 106
+#define	ICBID_SLAVE_CATS_1 107
+#define	ICBID_SLAVE_MM_INT_0 108
+#define	ICBID_SLAVE_MM_INT_1 109
+#define	ICBID_SLAVE_MM_INT_2 110
+#define	ICBID_SLAVE_MM_INT_BIMC 111
+#define	ICBID_SLAVE_MMU_MODEM_XPU_CFG 112
+#define	ICBID_SLAVE_MSS_INT 113
+#define	ICBID_SLAVE_PCNOC_INT_0 114
+#define	ICBID_SLAVE_PCNOC_INT_1 115
+#define	ICBID_SLAVE_PCNOC_M_0 116
+#define	ICBID_SLAVE_PCNOC_M_1 117
+#define	ICBID_SLAVE_PCNOC_S_0 118
+#define	ICBID_SLAVE_PCNOC_S_1 119
+#define	ICBID_SLAVE_PCNOC_S_2 120
+#define	ICBID_SLAVE_PCNOC_S_3 121
+#define	ICBID_SLAVE_PCNOC_S_4 122
+#define	ICBID_SLAVE_PCNOC_S_6 123
+#define	ICBID_SLAVE_PCNOC_S_7 124
+#define	ICBID_SLAVE_PCNOC_S_8 125
+#define	ICBID_SLAVE_PCNOC_S_9 126
+#define	ICBID_SLAVE_PRNG_XPU_CFG 127
+#define	ICBID_SLAVE_QDSS_INT 128
+#define	ICBID_SLAVE_RPM_XPU_CFG 129
+#define	ICBID_SLAVE_SNOC_INT_0 130
+#define	ICBID_SLAVE_SNOC_INT_1 131
+#define	ICBID_SLAVE_SNOC_INT_BIMC 132
+#define	ICBID_SLAVE_TCU 133
+#define	ICBID_SLAVE_BIMC_INT_0 134
+#define	ICBID_SLAVE_BIMC_INT_1 135
+#define	ICBID_SLAVE_RICA_CFG 136
+#define	ICBID_SLAVE_SNOC_BIMC_2 137
+#define	ICBID_SLAVE_BIMC_SNOC_1 138
+#define	ICBID_SLAVE_PNOC_A1NOC 139
+#define	ICBID_SLAVE_SNOC_VMEM 140
+#define	ICBID_SLAVE_A0NOC_SNOC 141
+#define	ICBID_SLAVE_A1NOC_SNOC 142
+#define	ICBID_SLAVE_A2NOC_SNOC 143
+#define	ICBID_SLAVE_A0NOC_CFG 144
+#define	ICBID_SLAVE_A0NOC_MPU_CFG 145
+#define	ICBID_SLAVE_A0NOC_SMMU_CFG 146
+#define	ICBID_SLAVE_A1NOC_CFG 147
+#define	ICBID_SLAVE_A1NOC_MPU_CFG 148
+#define	ICBID_SLAVE_A1NOC_SMMU_CFG 149
+#define	ICBID_SLAVE_A2NOC_CFG 150
+#define	ICBID_SLAVE_A2NOC_MPU_CFG 151
+#define	ICBID_SLAVE_A2NOC_SMMU_CFG 152
+#define	ICBID_SLAVE_AHB2PHY 153
+#define	ICBID_SLAVE_CAMERA_THROTTLE_CFG 154
+#define	ICBID_SLAVE_DCC_CFG 155
+#define	ICBID_SLAVE_DISPLAY_THROTTLE_CFG 156
+#define	ICBID_SLAVE_DSA_CFG 157
+#define	ICBID_SLAVE_DSA_MPU_CFG 158
+#define	ICBID_SLAVE_SSC_MPU_CFG 159
+#define	ICBID_SLAVE_HMSS_L3 160
+#define	ICBID_SLAVE_LPASS_SMMU_CFG 161
+#define	ICBID_SLAVE_MMAGIC_CFG 162
+#define	ICBID_SLAVE_PCIE20_AHB2PHY 163
+#define	ICBID_SLAVE_PCIE_2 164
+#define	ICBID_SLAVE_PCIE_2_CFG 165
+#define	ICBID_SLAVE_PIMEM 166
+#define	ICBID_SLAVE_PIMEM_CFG 167
+#define	ICBID_SLAVE_QDSS_RBCPR_APU_CFG 168
+#define	ICBID_SLAVE_RBCPR_CX 169
+#define	ICBID_SLAVE_RBCPR_MX 170
+#define	ICBID_SLAVE_SMMU_CPP_CFG 171
+#define	ICBID_SLAVE_SMMU_JPEG_CFG 172
+#define	ICBID_SLAVE_SMMU_MDP_CFG 173
+#define	ICBID_SLAVE_SMMU_ROTATOR_CFG 174
+#define	ICBID_SLAVE_SMMU_VENUS_CFG 175
+#define	ICBID_SLAVE_SMMU_VFE_CFG 176
+#define	ICBID_SLAVE_SSC_CFG 177
+#define	ICBID_SLAVE_VENUS_THROTTLE_CFG 178
+#define	ICBID_SLAVE_VMEM 179
+#define	ICBID_SLAVE_VMEM_CFG 180
+#define	ICBID_SLAVE_QDSS_MPU_CFG 181
+#define	ICBID_SLAVE_USB3_PHY_CFG 182
+#define	ICBID_SLAVE_IPA_CFG 183
+#define	ICBID_SLAVE_PCNOC_INT_2 184
+#define	ICBID_SLAVE_PCNOC_INT_3 185
+#define	ICBID_SLAVE_PCNOC_INT_4 186
+#define	ICBID_SLAVE_PCNOC_INT_5 187
+#define	ICBID_SLAVE_PCNOC_INT_6 188
+#define	ICBID_SLAVE_PCNOC_S_5 189
+#define	ICBID_SLAVE_QSPI 190
+#define	ICBID_SLAVE_A1NOC_MS_MPU_CFG 191
+#define	ICBID_SLAVE_A2NOC_MS_MPU_CFG 192
+#define	ICBID_SLAVE_MODEM_Q6_SMMU_CFG 193
+#define	ICBID_SLAVE_MSS_MPU_CFG 194
+#define	ICBID_SLAVE_MSS_PROC_MS_MPU_CFG 195
+#define	ICBID_SLAVE_SKL 196
+#define	ICBID_SLAVE_SNOC_INT_2 197
+#define	ICBID_SLAVE_SMMNOC_BIMC 198
+#define	ICBID_SLAVE_CRVIRT_A1NOC 199
+#define	ICBID_SLAVE_SGMII	 200
+#define	ICBID_SLAVE_QHS4_APPS	 201
+#define	ICBID_SLAVE_BIMC_PCNOC   202
+#define	ICBID_SLAVE_PCNOC_BIMC_1 203
+#define	ICBID_SLAVE_SPMI_FETCHER 204
+#define	ICBID_SLAVE_MMSS_SMMU_CFG 205
+#define	ICBID_SLAVE_WLAN 206
+#define	ICBID_SLAVE_CRVIRT_A2NOC 207
+#define	ICBID_SLAVE_CNOC_A2NOC 208
+#define	ICBID_SLAVE_GLM 209
+#define	ICBID_SLAVE_GNOC_BIMC 210
+#define	ICBID_SLAVE_GNOC_SNOC 211
+#define	ICBID_SLAVE_QM_CFG 212
+#define	ICBID_SLAVE_TLMM_EAST 213
+#define	ICBID_SLAVE_TLMM_NORTH 214
+#define	ICBID_SLAVE_TLMM_WEST 215
+#define	ICBID_SLAVE_LPASS_TCM	216
+#define	ICBID_SLAVE_TLMM_SOUTH	217
+#define	ICBID_SLAVE_TLMM_CENTER	218
+#define	ICBID_SLAVE_MSS_NAV_CE_MPU_CFG	219
+#define	ICBID_SLAVE_A2NOC_THROTTLE_CFG	220
+#define	ICBID_SLAVE_CDSP	221
+#define	ICBID_SLAVE_CDSP_SMMU_CFG	222
+#define	ICBID_SLAVE_LPASS_MPU_CFG	223
+#define	ICBID_SLAVE_CSI_PHY_CFG	224
+#endif
diff -Nruw linux-4.4.115-fbx/arch/powerpc/boot/dts/include/dt-bindings/msm./msm-bus-rule-ops.h linux-4.4.115-fbx/arch/powerpc/boot/dts/include/dt-bindings/msm/msm-bus-rule-ops.h
--- linux-4.4.115-fbx/arch/powerpc/boot/dts/include/dt-bindings/msm./msm-bus-rule-ops.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/powerpc/boot/dts/include/dt-bindings/msm/msm-bus-rule-ops.h	2019-01-22 16:16:28.179288750 +0100
@@ -0,0 +1,34 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_BUS_RULE_OPS_H
+#define __MSM_BUS_RULE_OPS_H
+
+#define FLD_IB	0
+#define FLD_AB	1
+#define FLD_CLK	2
+
+#define OP_LE	0
+#define OP_LT	1
+#define OP_GE	2
+#define OP_GT	3
+#define OP_NOOP	4
+
+#define RULE_STATE_NOT_APPLIED	0
+#define RULE_STATE_APPLIED	1
+
+#define THROTTLE_ON	0
+#define THROTTLE_OFF	1
+#define THROTTLE_REG	2
+
+
+#endif
diff -Nruw linux-4.4.115-fbx/arch/powerpc/boot/dts/include/dt-bindings/msm./pm.h linux-4.4.115-fbx/arch/powerpc/boot/dts/include/dt-bindings/msm/pm.h
--- linux-4.4.115-fbx/arch/powerpc/boot/dts/include/dt-bindings/msm./pm.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/powerpc/boot/dts/include/dt-bindings/msm/pm.h	2019-01-22 16:16:28.179288750 +0100
@@ -0,0 +1,25 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DT_MSM_PM_H__
+#define __DT_MSM_PM_H__
+
+#define LPM_RESET_LVL_NONE	0
+#define LPM_RESET_LVL_RET	1
+#define LPM_RESET_LVL_GDHS	2
+#define LPM_RESET_LVL_PC	3
+
+#define LPM_AFF_LVL_CPU		0
+#define LPM_AFF_LVL_L2		1
+#define LPM_AFF_LVL_CCI		2
+
+#endif
diff -Nruw linux-4.4.115-fbx/arch/powerpc/boot/dts/include/dt-bindings/msm./power-on.h linux-4.4.115-fbx/arch/powerpc/boot/dts/include/dt-bindings/msm/power-on.h
--- linux-4.4.115-fbx/arch/powerpc/boot/dts/include/dt-bindings/msm./power-on.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/arch/powerpc/boot/dts/include/dt-bindings/msm/power-on.h	2019-01-22 16:16:28.179288750 +0100
@@ -0,0 +1,24 @@
+/* Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_POWER_ON_H__
+#define __MSM_POWER_ON_H__
+
+#define PON_POWER_OFF_RESERVED		0x00
+#define PON_POWER_OFF_WARM_RESET	0x01
+#define PON_POWER_OFF_SHUTDOWN		0x04
+#define PON_POWER_OFF_DVDD_SHUTDOWN	0x05
+#define PON_POWER_OFF_HARD_RESET	0x07
+#define PON_POWER_OFF_DVDD_HARD_RESET	0x08
+#define PON_POWER_OFF_MAX_TYPE		0x10
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/arch/powerpc/boot/dts/include/dt-bindings/regulator/qcom,rpm-smd-regulator.h	2019-01-22 16:16:28.183288787 +0100
@@ -0,0 +1,28 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_RPM_SMD_REGULATOR_H
+#define __QCOM_RPM_SMD_REGULATOR_H
+
+#define RPM_SMD_REGULATOR_LEVEL_NONE		0
+#define RPM_SMD_REGULATOR_LEVEL_RETENTION	16
+#define RPM_SMD_REGULATOR_LEVEL_RETENTION_PLUS	32
+#define RPM_SMD_REGULATOR_LEVEL_MIN_SVS		48
+#define RPM_SMD_REGULATOR_LEVEL_LOW_SVS		64
+#define RPM_SMD_REGULATOR_LEVEL_SVS		128
+#define RPM_SMD_REGULATOR_LEVEL_SVS_PLUS	192
+#define RPM_SMD_REGULATOR_LEVEL_NOM		256
+#define RPM_SMD_REGULATOR_LEVEL_NOM_PLUS	320
+#define RPM_SMD_REGULATOR_LEVEL_TURBO		384
+#define RPM_SMD_REGULATOR_LEVEL_BINNING		512
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/config	2019-10-29 09:38:15.536162274 +0100
@@ -0,0 +1,4992 @@
+#
+# Automatically generated file; DO NOT EDIT.
+# Linux/arm64 4.4.115 Kernel Configuration
+#
+CONFIG_ARM64=y
+CONFIG_64BIT=y
+CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
+CONFIG_MMU=y
+CONFIG_ARCH_MMAP_RND_BITS_MIN=18
+CONFIG_ARCH_MMAP_RND_BITS_MAX=24
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16
+CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_BUG=y
+CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CSUM=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_ZONE_DMA=y
+CONFIG_HAVE_GENERIC_RCU_GUP=y
+CONFIG_ARCH_DMA_ADDR_T_64BIT=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_NEED_SG_DMA_LENGTH=y
+CONFIG_SMP=y
+CONFIG_ARM64_DMA_USE_IOMMU=y
+CONFIG_ARM64_DMA_IOMMU_ALIGNMENT=9
+CONFIG_SWIOTLB=y
+CONFIG_IOMMU_HELPER=y
+CONFIG_KERNEL_MODE_NEON=y
+CONFIG_FIX_EARLYCON_MEM=y
+CONFIG_PGTABLE_LEVELS=3
+# CONFIG_MSM_GVM_QUIN is not set
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_IRQ_WORK=y
+CONFIG_BUILDTIME_EXTABLE_SORT=y
+CONFIG_THREAD_INFO_IN_TASK=y
+
+#
+# General setup
+#
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_CROSS_COMPILE="/opt/toolchains/aarch64-glibc-2.25-gcc-6.3.0-binutils-2.28-gdb-7.12.1/bin/aarch64-linux-gnu-"
+# CONFIG_COMPILE_TEST is not set
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_DEFAULT_HOSTNAME="fbx7hd"
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+# CONFIG_FHANDLE is not set
+# CONFIG_USELIB is not set
+CONFIG_AUDIT=y
+CONFIG_HAVE_ARCH_AUDITSYSCALL=y
+CONFIG_AUDITSYSCALL=y
+CONFIG_AUDIT_WATCH=y
+CONFIG_AUDIT_TREE=y
+
+#
+# IRQ subsystem
+#
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_IRQ_SHOW=y
+CONFIG_GENERIC_IRQ_SHOW_LEVEL=y
+CONFIG_GENERIC_IRQ_MIGRATION=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_IRQ_DOMAIN=y
+CONFIG_IRQ_DOMAIN_HIERARCHY=y
+CONFIG_GENERIC_MSI_IRQ=y
+CONFIG_GENERIC_MSI_IRQ_DOMAIN=y
+CONFIG_HANDLE_DOMAIN_IRQ=y
+# CONFIG_IRQ_DOMAIN_DEBUG is not set
+CONFIG_IRQ_FORCED_THREADING=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_ARCH_HAS_TICK_BROADCAST=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+
+#
+# Timers subsystem
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ_COMMON=y
+# CONFIG_HZ_PERIODIC is not set
+CONFIG_NO_HZ_IDLE=y
+# CONFIG_NO_HZ_FULL is not set
+# CONFIG_NO_HZ is not set
+CONFIG_HIGH_RES_TIMERS=y
+
+#
+# CPU/Task time and stats accounting
+#
+# CONFIG_TICK_CPU_ACCOUNTING is not set
+# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set
+CONFIG_IRQ_TIME_ACCOUNTING=y
+# CONFIG_SCHED_WALT is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+
+#
+# RCU Subsystem
+#
+CONFIG_PREEMPT_RCU=y
+CONFIG_RCU_EXPERT=y
+CONFIG_SRCU=y
+# CONFIG_TASKS_RCU is not set
+CONFIG_RCU_STALL_COMMON=y
+CONFIG_RCU_FANOUT=64
+CONFIG_RCU_FANOUT_LEAF=16
+CONFIG_RCU_FAST_NO_HZ=y
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_RCU_BOOST is not set
+CONFIG_RCU_KTHREAD_PRIO=0
+CONFIG_RCU_NOCB_CPU=y
+# CONFIG_RCU_NOCB_CPU_NONE is not set
+# CONFIG_RCU_NOCB_CPU_ZERO is not set
+CONFIG_RCU_NOCB_CPU_ALL=y
+# CONFIG_RCU_EXPEDITE_BOOT is not set
+CONFIG_BUILD_BIN2C=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=17
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+# CONFIG_FBX_DECRYPT_INITRD is not set
+CONFIG_GENERIC_SCHED_CLOCK=y
+CONFIG_CGROUPS=y
+# CONFIG_CGROUP_DEBUG is not set
+CONFIG_CGROUP_FREEZER=y
+# CONFIG_CGROUP_PIDS is not set
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_PROC_PID_CPUSET=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
+# CONFIG_MEMCG is not set
+# CONFIG_CGROUP_PERF is not set
+CONFIG_CGROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_CFS_BANDWIDTH is not set
+CONFIG_RT_GROUP_SCHED=y
+# CONFIG_BLK_CGROUP is not set
+CONFIG_SCHED_HMP=y
+CONFIG_SCHED_HMP_CSTATE_AWARE=y
+CONFIG_SCHED_CORE_CTL=y
+# CONFIG_CHECKPOINT_RESTORE is not set
+CONFIG_NAMESPACES=y
+CONFIG_UTS_NS=y
+CONFIG_IPC_NS=y
+# CONFIG_USER_NS is not set
+CONFIG_PID_NS=y
+CONFIG_NET_NS=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+# CONFIG_DEFAULT_USE_ENERGY_AWARE is not set
+# CONFIG_SYSFS_DEPRECATED is not set
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_RD_GZIP is not set
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_HAVE_UID16=y
+CONFIG_SYSCTL_EXCEPTION_TRACE=y
+CONFIG_BPF=y
+CONFIG_EXPERT=y
+CONFIG_UID16=y
+CONFIG_MULTIUSER=y
+# CONFIG_SGETMASK_SYSCALL is not set
+# CONFIG_SYSFS_SYSCALL is not set
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+# CONFIG_BPF_SYSCALL is not set
+CONFIG_SHMEM=y
+# CONFIG_AIO is not set
+CONFIG_ADVISE_SYSCALLS=y
+# CONFIG_USERFAULTFD is not set
+CONFIG_PCI_QUIRKS=y
+# CONFIG_MEMBARRIER is not set
+CONFIG_EMBEDDED=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_PERF_EVENTS=y
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+CONFIG_SLUB_CPU_PARTIAL=y
+# CONFIG_SYSTEM_DATA_VERIFICATION is not set
+# CONFIG_PROFILING is not set
+CONFIG_TRACEPOINTS=y
+# CONFIG_KPROBES is not set
+# CONFIG_JUMP_LABEL is not set
+# CONFIG_UPROBES is not set
+# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set
+CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
+CONFIG_HAVE_DMA_ATTRS=y
+CONFIG_HAVE_DMA_CONTIGUOUS=y
+CONFIG_GENERIC_SMP_IDLE_THREAD=y
+CONFIG_GENERIC_IDLE_POLL_SETUP=y
+CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
+CONFIG_HAVE_CLK=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_PERF_REGS=y
+CONFIG_HAVE_PERF_USER_STACK_DUMP=y
+CONFIG_HAVE_ARCH_JUMP_LABEL=y
+CONFIG_HAVE_RCU_TABLE_FREE=y
+CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y
+CONFIG_HAVE_CMPXCHG_LOCAL=y
+CONFIG_HAVE_CMPXCHG_DOUBLE=y
+CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y
+CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
+CONFIG_SECCOMP_FILTER=y
+CONFIG_HAVE_CC_STACKPROTECTOR=y
+CONFIG_CC_STACKPROTECTOR=y
+# CONFIG_CC_STACKPROTECTOR_NONE is not set
+CONFIG_CC_STACKPROTECTOR_REGULAR=y
+# CONFIG_CC_STACKPROTECTOR_STRONG is not set
+CONFIG_HAVE_CONTEXT_TRACKING=y
+CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y
+CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
+CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
+CONFIG_HAVE_ARCH_HUGE_VMAP=y
+CONFIG_MODULES_USE_ELF_RELA=y
+CONFIG_ARCH_HAS_ELF_RANDOMIZE=y
+CONFIG_HAVE_ARCH_MMAP_RND_BITS=y
+CONFIG_ARCH_MMAP_RND_BITS=18
+CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
+CONFIG_CLONE_BACKWARDS=y
+CONFIG_OLD_SIGSUSPEND3=y
+CONFIG_COMPAT_OLD_SIGACTION=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+# CONFIG_MODULE_SIG is not set
+# CONFIG_MODULE_COMPRESS is not set
+CONFIG_MODULES_TREE_LOOKUP=y
+CONFIG_BLOCK=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_BSGLIB is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+# CONFIG_BLK_CMDLINE_PARSER is not set
+# CONFIG_BLOCK_PERF_FRAMEWORK is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_AIX_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+CONFIG_EFI_PARTITION=y
+# CONFIG_SYSV68_PARTITION is not set
+# CONFIG_CMDLINE_PARTITION is not set
+CONFIG_BLOCK_COMPAT=y
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_TEST is not set
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_ASN1=y
+CONFIG_UNINLINE_SPIN_UNLOCK=y
+CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y
+CONFIG_MUTEX_SPIN_ON_OWNER=y
+CONFIG_RWSEM_SPIN_ON_OWNER=y
+CONFIG_LOCK_SPIN_ON_OWNER=y
+CONFIG_FREEZER=y
+
+#
+# Platform selection
+#
+# CONFIG_ARCH_BCM_IPROC is not set
+# CONFIG_ARCH_BERLIN is not set
+# CONFIG_ARCH_EXYNOS7 is not set
+# CONFIG_ARCH_LAYERSCAPE is not set
+# CONFIG_ARCH_HISI is not set
+# CONFIG_ARCH_MEDIATEK is not set
+CONFIG_ARCH_QCOM=y
+# CONFIG_ARCH_MSM8996 is not set
+CONFIG_ARCH_MSM8998=y
+# CONFIG_ARCH_MSMHAMSTER is not set
+# CONFIG_ARCH_SDM660 is not set
+# CONFIG_ARCH_SDM630 is not set
+# CONFIG_ARCH_ROCKCHIP is not set
+# CONFIG_ARCH_SEATTLE is not set
+# CONFIG_ARCH_STRATIX10 is not set
+# CONFIG_ARCH_TEGRA is not set
+# CONFIG_ARCH_SPRD is not set
+# CONFIG_ARCH_THUNDER is not set
+# CONFIG_ARCH_VEXPRESS is not set
+# CONFIG_ARCH_XGENE is not set
+# CONFIG_ARCH_ZYNQMP is not set
+
+#
+# Bus support
+#
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+CONFIG_PCI_DOMAINS_GENERIC=y
+CONFIG_PCI_SYSCALL=y
+CONFIG_PCI_BUS_ADDR_T_64BIT=y
+CONFIG_PCI_MSI=y
+CONFIG_PCI_MSI_IRQ_DOMAIN=y
+# CONFIG_PCI_DEBUG is not set
+# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set
+# CONFIG_PCI_STUB is not set
+# CONFIG_PCI_IOV is not set
+# CONFIG_PCI_PRI is not set
+# CONFIG_PCI_PASID is not set
+CONFIG_PCI_MSM=y
+CONFIG_PCI_LABEL=y
+
+#
+# PCI host controller drivers
+#
+# CONFIG_PCI_HOST_GENERIC is not set
+# CONFIG_PCIE_IPROC is not set
+# CONFIG_PCI_HISI is not set
+# CONFIG_PCIEPORTBUS is not set
+# CONFIG_HOTPLUG_PCI is not set
+
+#
+# Kernel Features
+#
+
+#
+# ARM errata workarounds via the alternatives framework
+#
+CONFIG_ARM64_ERRATUM_826319=y
+CONFIG_ARM64_ERRATUM_827319=y
+CONFIG_ARM64_ERRATUM_824069=y
+CONFIG_ARM64_ERRATUM_819472=y
+CONFIG_ARM64_ERRATUM_832075=y
+CONFIG_ARM64_ERRATUM_845719=y
+CONFIG_ARM64_ERRATUM_843419=y
+# CONFIG_CAVIUM_ERRATUM_22375 is not set
+# CONFIG_CAVIUM_ERRATUM_23154 is not set
+# CONFIG_CAVIUM_ERRATUM_27456 is not set
+CONFIG_ARM64_4K_PAGES=y
+# CONFIG_ARM64_16K_PAGES is not set
+# CONFIG_ARM64_DCACHE_DISABLE is not set
+# CONFIG_ARM64_ICACHE_DISABLE is not set
+# CONFIG_ARM64_64K_PAGES is not set
+CONFIG_ARM64_VA_BITS_39=y
+# CONFIG_ARM64_VA_BITS_48 is not set
+CONFIG_ARM64_VA_BITS=39
+# CONFIG_CPU_BIG_ENDIAN is not set
+CONFIG_SCHED_MC=y
+# CONFIG_SCHED_SMT is not set
+CONFIG_NR_CPUS=8
+CONFIG_HOTPLUG_CPU=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+CONFIG_ARCH_NR_GPIO=1024
+CONFIG_QCOM_TLB_EL2_HANDLER=y
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREEMPT=y
+CONFIG_PREEMPT_COUNT=y
+CONFIG_HZ_100=y
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=100
+CONFIG_SCHED_HRTICK=y
+CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
+CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_DEFAULT=y
+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_HAVE_ARCH_PFN_VALID=y
+CONFIG_HW_PERF_EVENTS=y
+CONFIG_ARM64_REG_REBALANCE_ON_CTX_SW=y
+# CONFIG_PERF_EVENTS_USERMODE is not set
+# CONFIG_PERF_EVENTS_RESET_PMU_DEBUGFS is not set
+CONFIG_SYS_SUPPORTS_HUGETLBFS=y
+CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y
+CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_SPARSEMEM_MANUAL=y
+CONFIG_SPARSEMEM=y
+CONFIG_HAVE_MEMORY_PRESENT=y
+CONFIG_SPARSEMEM_EXTREME=y
+CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
+CONFIG_SPARSEMEM_VMEMMAP=y
+CONFIG_HAVE_MEMBLOCK=y
+CONFIG_NO_BOOTMEM=y
+CONFIG_MEMORY_ISOLATION=y
+# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set
+# CONFIG_MEMORY_HOTPLUG is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_COMPACTION=y
+CONFIG_MIGRATION=y
+CONFIG_PHYS_ADDR_T_64BIT=y
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_MMU_NOTIFIER=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+# CONFIG_TRANSPARENT_HUGEPAGE is not set
+# CONFIG_CLEANCACHE is not set
+CONFIG_CMA=y
+# CONFIG_CMA_DEBUG is not set
+CONFIG_CMA_DEBUGFS=y
+CONFIG_CMA_AREAS=7
+# CONFIG_ZPOOL is not set
+# CONFIG_ZBUD is not set
+CONFIG_ZSMALLOC=y
+# CONFIG_PGTABLE_MAPPING is not set
+# CONFIG_ZSMALLOC_STAT is not set
+CONFIG_GENERIC_EARLY_IOREMAP=y
+CONFIG_KSWAPD_CPU_AFFINITY_MASK=""
+# CONFIG_IDLE_PAGE_TRACKING is not set
+# CONFIG_FORCE_ALLOC_FROM_DMA_ZONE is not set
+# CONFIG_PROCESS_RECLAIM is not set
+CONFIG_SECCOMP=y
+# CONFIG_XEN is not set
+CONFIG_FORCE_MAX_ZONEORDER=11
+CONFIG_UNMAP_KERNEL_AT_EL0=y
+CONFIG_HARDEN_BRANCH_PREDICTOR=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+# CONFIG_ARM64_SW_TTBR0_PAN is not set
+
+#
+# ARMv8.1 architectural features
+#
+# CONFIG_ARM64_HW_AFDBM is not set
+# CONFIG_ARM64_PAN is not set
+# CONFIG_ARM64_LSE_ATOMICS is not set
+# CONFIG_ARM64_UAO is not set
+CONFIG_ARM64_MODULE_CMODEL_LARGE=y
+# CONFIG_RANDOMIZE_BASE is not set
+
+#
+# Boot options
+#
+# CONFIG_ARM64_ACPI_PARKING_PROTOCOL is not set
+CONFIG_CMDLINE="ro earlyprintk earlycon=msm_serial_dm,0xc1b0000 console=ttyMSM0,115200,n8 androidboot.bootdevice=1da4000.ufshc rcupdate.rcu_expedited=1 root=/dev/nfs ip=:::::eth0.41:dhcp dhcpclass=linux-fbx7hd rootwait"
+# CONFIG_CMDLINE_FROM_BOOTLOADER is not set
+# CONFIG_CMDLINE_EXTEND is not set
+CONFIG_CMDLINE_FORCE=y
+# CONFIG_EFI is not set
+# CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE is not set
+# CONFIG_BUILD_ARM64_DT_OVERLAY is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_COMPAT_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_SCRIPT=y
+# CONFIG_HAVE_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+CONFIG_COREDUMP=y
+CONFIG_COMPAT=y
+CONFIG_SYSVIPC_COMPAT=y
+
+#
+# Power management options
+#
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_SUSPEND_SKIP_SYNC is not set
+CONFIG_WAKELOCK=y
+CONFIG_PM_SLEEP=y
+CONFIG_PM_SLEEP_SMP=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_OPP=y
+CONFIG_PM_CLK=y
+# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set
+CONFIG_CPU_PM=y
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+
+#
+# CPU Power Management
+#
+
+#
+# CPU Idle
+#
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+
+#
+# ARM CPU Idle Drivers
+#
+# CONFIG_ARM_CPUIDLE is not set
+# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set
+
+#
+# CPU Frequency scaling
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_COMMON=y
+# CONFIG_CPU_FREQ_STAT is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHED is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+# CONFIG_CPU_BOOST is not set
+# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set
+
+#
+# CPU frequency scaling drivers
+#
+# CONFIG_CPUFREQ_DT is not set
+# CONFIG_ARM_BIG_LITTLE_CPUFREQ is not set
+# CONFIG_ARM_KIRKWOOD_CPUFREQ is not set
+# CONFIG_ACPI_CPPC_CPUFREQ is not set
+CONFIG_CPU_FREQ_MSM=y
+CONFIG_NET=y
+CONFIG_COMPAT_NETLINK_MESSAGES=y
+# CONFIG_DISABLE_NET_SKB_FRAG_CACHE is not set
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_DIAG is not set
+CONFIG_UNIX=y
+# CONFIG_UNIX_DIAG is not set
+CONFIG_XFRM=y
+CONFIG_XFRM_ALGO=y
+CONFIG_XFRM_USER=y
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_NET_KEY=y
+# CONFIG_NET_KEY_MIGRATE is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE_DEMUX is not set
+# CONFIG_NET_IP_TUNNEL is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_NET_IPVTI is not set
+# CONFIG_NET_UDP_TUNNEL is not set
+# CONFIG_NET_FOU is not set
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+# CONFIG_INET_XFRM_MODE_BEET is not set
+CONFIG_INET_LRO=y
+# CONFIG_INET_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+CONFIG_IPV6=y
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+# CONFIG_INET6_AH is not set
+# CONFIG_INET6_ESP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_IPV6_MIP6 is not set
+# CONFIG_IPV6_ILA is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET6_XFRM_MODE_BEET is not set
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+# CONFIG_IPV6_SIT is not set
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPV6_GRE is not set
+# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_MROUTE is not set
+# CONFIG_ANDROID_PARANOID_NETWORK is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NET_PTP_CLASSIFY is not set
+# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_NETFILTER_ADVANCED=y
+# CONFIG_BRIDGE_NETFILTER is not set
+
+#
+# Core Netfilter Configuration
+#
+# CONFIG_NETFILTER_INGRESS is not set
+# CONFIG_NETFILTER_NETLINK_ACCT is not set
+# CONFIG_NETFILTER_NETLINK_QUEUE is not set
+# CONFIG_NETFILTER_NETLINK_LOG is not set
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_LOG_COMMON=y
+# CONFIG_NF_CONNTRACK_MARK is not set
+CONFIG_NF_CONNTRACK_PROCFS=y
+# CONFIG_NF_CONNTRACK_EVENTS is not set
+# CONFIG_NF_CONNTRACK_TIMEOUT is not set
+# CONFIG_NF_CONNTRACK_TIMESTAMP is not set
+# CONFIG_NF_CT_PROTO_DCCP is not set
+# CONFIG_NF_CT_PROTO_SCTP is not set
+# CONFIG_NF_CT_PROTO_UDPLITE is not set
+# CONFIG_NF_CONNTRACK_AMANDA is not set
+# CONFIG_NF_CONNTRACK_FTP is not set
+# CONFIG_NF_CONNTRACK_H323 is not set
+# CONFIG_NF_CONNTRACK_IRC is not set
+# CONFIG_NF_CONNTRACK_NETBIOS_NS is not set
+# CONFIG_NF_CONNTRACK_SNMP is not set
+# CONFIG_NF_CONNTRACK_PPTP is not set
+# CONFIG_NF_CONNTRACK_SANE is not set
+# CONFIG_NF_CONNTRACK_SIP is not set
+# CONFIG_NF_CONNTRACK_TFTP is not set
+# CONFIG_NF_CT_NETLINK is not set
+# CONFIG_NF_CT_NETLINK_TIMEOUT is not set
+CONFIG_NF_NAT=y
+CONFIG_NF_NAT_NEEDED=y
+# CONFIG_NF_NAT_AMANDA is not set
+# CONFIG_NF_NAT_FTP is not set
+# CONFIG_NF_NAT_IRC is not set
+# CONFIG_NF_NAT_SIP is not set
+# CONFIG_NF_NAT_TFTP is not set
+CONFIG_NF_NAT_REDIRECT=y
+# CONFIG_NF_TABLES is not set
+CONFIG_NETFILTER_XTABLES=y
+
+#
+# Xtables combined modules
+#
+# CONFIG_NETFILTER_XT_MARK is not set
+# CONFIG_NETFILTER_XT_CONNMARK is not set
+
+#
+# Xtables targets
+#
+# CONFIG_NETFILTER_XT_TARGET_AUDIT is not set
+# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set
+# CONFIG_NETFILTER_XT_TARGET_CONNMARK is not set
+# CONFIG_NETFILTER_XT_TARGET_HMARK is not set
+# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set
+# CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER is not set
+# CONFIG_NETFILTER_XT_TARGET_LED is not set
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+# CONFIG_NETFILTER_XT_TARGET_MARK is not set
+CONFIG_NETFILTER_XT_NAT=y
+# CONFIG_NETFILTER_XT_TARGET_NETMAP is not set
+# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
+# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set
+# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
+CONFIG_NETFILTER_XT_TARGET_REDIRECT=y
+# CONFIG_NETFILTER_XT_TARGET_TEE is not set
+# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set
+
+#
+# Xtables matches
+#
+# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE is not set
+# CONFIG_NETFILTER_XT_MATCH_BPF is not set
+# CONFIG_NETFILTER_XT_MATCH_CGROUP is not set
+# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set
+# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNBYTES is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNLABEL is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNLIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNMARK is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNTRACK is not set
+# CONFIG_NETFILTER_XT_MATCH_CPU is not set
+# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
+# CONFIG_NETFILTER_XT_MATCH_DEVGROUP is not set
+# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
+# CONFIG_NETFILTER_XT_MATCH_ECN is not set
+# CONFIG_NETFILTER_XT_MATCH_ESP is not set
+# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_HELPER is not set
+# CONFIG_NETFILTER_XT_MATCH_HL is not set
+# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set
+# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set
+# CONFIG_NETFILTER_XT_MATCH_LIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_MAC is not set
+# CONFIG_NETFILTER_XT_MATCH_MARK is not set
+# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set
+# CONFIG_NETFILTER_XT_MATCH_NFACCT is not set
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+# CONFIG_NETFILTER_XT_MATCH_POLICY is not set
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set
+# CONFIG_NETFILTER_XT_MATCH_QUOTA2 is not set
+# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
+# CONFIG_NETFILTER_XT_MATCH_REALM is not set
+# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+# CONFIG_NETFILTER_XT_MATCH_SOCKET is not set
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set
+# CONFIG_NETFILTER_XT_MATCH_STRING is not set
+# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set
+# CONFIG_NETFILTER_XT_MATCH_TIME is not set
+# CONFIG_NETFILTER_XT_MATCH_U32 is not set
+# CONFIG_IP_SET is not set
+# CONFIG_IP_VS is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV4=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_NF_CONNTRACK_PROC_COMPAT=y
+# CONFIG_NF_DUP_IPV4 is not set
+# CONFIG_NF_LOG_ARP is not set
+CONFIG_NF_LOG_IPV4=y
+CONFIG_NF_REJECT_IPV4=y
+CONFIG_NF_NAT_IPV4=y
+# CONFIG_NF_NAT_MASQUERADE_IPV4 is not set
+# CONFIG_NF_NAT_PPTP is not set
+# CONFIG_NF_NAT_H323 is not set
+CONFIG_IP_NF_IPTABLES=y
+# CONFIG_IP_NF_MATCH_AH is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+# CONFIG_IP_NF_TARGET_SYNPROXY is not set
+CONFIG_IP_NF_NAT=y
+# CONFIG_IP_NF_TARGET_MASQUERADE is not set
+# CONFIG_IP_NF_TARGET_NETMAP is not set
+CONFIG_IP_NF_TARGET_REDIRECT=y
+# CONFIG_IP_NF_MANGLE is not set
+# CONFIG_IP_NF_RAW is not set
+# CONFIG_IP_NF_ARPTABLES is not set
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV6=y
+CONFIG_NF_CONNTRACK_IPV6=y
+# CONFIG_NF_DUP_IPV6 is not set
+CONFIG_NF_REJECT_IPV6=y
+CONFIG_NF_LOG_IPV6=y
+# CONFIG_NF_NAT_IPV6 is not set
+CONFIG_IP6_NF_IPTABLES=y
+# CONFIG_IP6_NF_IPTABLES_128 is not set
+# CONFIG_IP6_NF_MATCH_AH is not set
+# CONFIG_IP6_NF_MATCH_EUI64 is not set
+# CONFIG_IP6_NF_MATCH_FRAG is not set
+# CONFIG_IP6_NF_MATCH_OPTS is not set
+# CONFIG_IP6_NF_MATCH_HL is not set
+# CONFIG_IP6_NF_MATCH_IPV6HEADER is not set
+# CONFIG_IP6_NF_MATCH_MH is not set
+# CONFIG_IP6_NF_MATCH_RT is not set
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+# CONFIG_IP6_NF_TARGET_SYNPROXY is not set
+# CONFIG_IP6_NF_MANGLE is not set
+# CONFIG_IP6_NF_RAW is not set
+# CONFIG_IP6_NF_NAT is not set
+# CONFIG_BRIDGE_NF_EBTABLES is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_L2TP is not set
+CONFIG_STP=y
+CONFIG_BRIDGE=y
+# CONFIG_BRIDGE_IGMP_SNOOPING is not set
+# CONFIG_BRIDGE_VLAN_FILTERING is not set
+CONFIG_HAVE_NET_DSA=y
+# CONFIG_NET_DSA is not set
+CONFIG_VLAN_8021Q=y
+# CONFIG_VLAN_8021Q_GVRP is not set
+# CONFIG_VLAN_8021Q_MVRP is not set
+# CONFIG_DECNET is not set
+CONFIG_LLC=y
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_PHONET is not set
+# CONFIG_6LOWPAN is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+CONFIG_DNS_RESOLVER=y
+# CONFIG_BATMAN_ADV is not set
+# CONFIG_OPENVSWITCH is not set
+# CONFIG_VSOCKETS is not set
+# CONFIG_NETLINK_DIAG is not set
+# CONFIG_MPLS is not set
+# CONFIG_HSR is not set
+# CONFIG_NET_SWITCHDEV is not set
+# CONFIG_NET_L3_MASTER_DEV is not set
+# CONFIG_RMNET_DATA is not set
+CONFIG_RPS=y
+CONFIG_RFS_ACCEL=y
+CONFIG_XPS=y
+# CONFIG_CGROUP_NET_PRIO is not set
+# CONFIG_CGROUP_NET_CLASSID is not set
+CONFIG_NET_RX_BUSY_POLL=y
+CONFIG_BQL=y
+CONFIG_BPF_JIT=y
+CONFIG_NET_FLOW_LIMIT=y
+# CONFIG_SOCKEV_NLMCAST is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_DROP_MONITOR is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+CONFIG_BT=y
+CONFIG_BT_BREDR=y
+# CONFIG_BT_RFCOMM is not set
+# CONFIG_BT_BNEP is not set
+# CONFIG_BT_HIDP is not set
+CONFIG_BT_HS=y
+CONFIG_BT_LE=y
+# CONFIG_BT_SELFTEST is not set
+CONFIG_BT_DEBUGFS=y
+
+#
+# Bluetooth device drivers
+#
+# CONFIG_BT_HCIBTUSB is not set
+# CONFIG_BT_HCIBTSDIO is not set
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_H4=y
+# CONFIG_BT_HCIUART_BCSP is not set
+# CONFIG_BT_HCIUART_ATH3K is not set
+# CONFIG_BT_HCIUART_LL is not set
+# CONFIG_BT_HCIUART_3WIRE is not set
+# CONFIG_BT_HCIUART_INTEL is not set
+# CONFIG_BT_HCIUART_BCM is not set
+# CONFIG_BT_HCIUART_QCA is not set
+# CONFIG_BT_HCIBCM203X is not set
+# CONFIG_BT_HCIBPA10X is not set
+# CONFIG_BT_HCIBFUSB is not set
+# CONFIG_BT_HCIVHCI is not set
+# CONFIG_BT_MRVL is not set
+CONFIG_MSM_BT_POWER=y
+CONFIG_BTFM_SLIM=y
+CONFIG_BTFM_SLIM_WCN3990=y
+# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+CONFIG_WIRELESS_EXT=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
+CONFIG_WEXT_SPY=y
+CONFIG_WEXT_PRIV=y
+CONFIG_CFG80211=m
+CONFIG_NL80211_TESTMODE=y
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
+# CONFIG_CFG80211_REG_DEBUG is not set
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+# CONFIG_CFG80211_REG_CELLULAR_HINTS is not set
+CONFIG_CFG80211_REG_RELAX_NO_IR=y
+CONFIG_CFG80211_DEFAULT_PS=y
+# CONFIG_CFG80211_DEBUGFS is not set
+CONFIG_CFG80211_INTERNAL_REGDB=y
+# CONFIG_CFG80211_CRDA_SUPPORT is not set
+# CONFIG_CFG80211_WEXT is not set
+# CONFIG_LIB80211 is not set
+CONFIG_MAC80211=m
+CONFIG_MAC80211_HAS_RC=y
+CONFIG_MAC80211_RC_MINSTREL=y
+CONFIG_MAC80211_RC_MINSTREL_HT=y
+CONFIG_MAC80211_RC_MINSTREL_VHT=y
+CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT="minstrel_ht"
+CONFIG_MAC80211_MESH=y
+CONFIG_MAC80211_LEDS=y
+# CONFIG_MAC80211_DEBUGFS is not set
+# CONFIG_MAC80211_MESSAGE_TRACING is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+CONFIG_MAC80211_STA_HASH_MAX_SIZE=0
+# CONFIG_WIMAX is not set
+CONFIG_RFKILL=y
+CONFIG_RFKILL_PM=y
+CONFIG_RFKILL_LEDS=y
+# CONFIG_RFKILL_INPUT is not set
+# CONFIG_RFKILL_REGULATOR is not set
+# CONFIG_RFKILL_GPIO is not set
+# CONFIG_NET_9P is not set
+# CONFIG_CAIF is not set
+# CONFIG_CEPH_LIB is not set
+# CONFIG_NFC is not set
+CONFIG_NFC_NQ=y
+# CONFIG_LWTUNNEL is not set
+CONFIG_IPC_ROUTER=y
+CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_HAVE_BPF_JIT=y
+CONFIG_HAVE_EBPF_JIT=y
+
+#
+# Device Drivers
+#
+CONFIG_ARM_AMBA=y
+# CONFIG_TEGRA_AHB is not set
+
+#
+# Generic Driver Options
+#
+# CONFIG_UEVENT_HELPER is not set
+CONFIG_DEVTMPFS=y
+# CONFIG_DEVTMPFS_MOUNT is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set
+CONFIG_ALLOW_DEV_COREDUMP=y
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_GENERIC_CPU_DEVICES is not set
+CONFIG_GENERIC_CPU_AUTOPROBE=y
+CONFIG_SOC_BUS=y
+CONFIG_REGMAP=y
+CONFIG_REGMAP_I2C=y
+CONFIG_REGMAP_SPI=y
+CONFIG_REGMAP_SPMI=y
+CONFIG_REGMAP_MMIO=y
+CONFIG_REGMAP_SWR=y
+# CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS is not set
+CONFIG_DMA_SHARED_BUFFER=y
+# CONFIG_FENCE_TRACE is not set
+CONFIG_DMA_CMA=y
+
+#
+# Default contiguous memory area size:
+#
+CONFIG_CMA_SIZE_MBYTES=16
+CONFIG_CMA_SIZE_SEL_MBYTES=y
+# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set
+# CONFIG_CMA_SIZE_SEL_MIN is not set
+# CONFIG_CMA_SIZE_SEL_MAX is not set
+CONFIG_CMA_ALIGNMENT=8
+
+#
+# Bus devices
+#
+# CONFIG_ARM_CCI400_PMU is not set
+# CONFIG_ARM_CCI500_PMU is not set
+# CONFIG_ARM_CCN is not set
+# CONFIG_VEXPRESS_CONFIG is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_FREEBOX_PROCFS=y
+# CONFIG_MTD is not set
+CONFIG_DTC=y
+CONFIG_OF=y
+# CONFIG_OF_UNITTEST is not set
+CONFIG_OF_FLATTREE=y
+CONFIG_OF_EARLY_FLATTREE=y
+CONFIG_OF_ADDRESS=y
+CONFIG_OF_ADDRESS_PCI=y
+CONFIG_OF_IRQ=y
+CONFIG_OF_NET=y
+CONFIG_OF_MDIO=y
+CONFIG_OF_PCI=y
+CONFIG_OF_PCI_IRQ=y
+CONFIG_OF_RESERVED_MEM=y
+CONFIG_OF_SLIMBUS=y
+# CONFIG_OF_OVERLAY is not set
+CONFIG_OF_BATTERYDATA=y
+# CONFIG_PARPORT is not set
+CONFIG_PNP=y
+# CONFIG_PNP_DEBUG_MESSAGES is not set
+
+#
+# Protocols
+#
+CONFIG_PNPACPI=y
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_NULL_BLK is not set
+# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set
+# CONFIG_ZRAM is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_DRBD is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SKD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=1
+CONFIG_BLK_DEV_RAM_SIZE=196608
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_RBD is not set
+# CONFIG_BLK_DEV_RSXX is not set
+# CONFIG_BLK_DEV_NVME is not set
+
+#
+# Misc devices
+#
+# CONFIG_SENSORS_LIS3LV02D is not set
+# CONFIG_AD525X_DPOT is not set
+# CONFIG_DUMMY_IRQ is not set
+# CONFIG_PHANTOM is not set
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_TIFM_CORE is not set
+# CONFIG_ICS932S401 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_HP_ILO is not set
+# CONFIG_QCOM_COINCELL is not set
+# CONFIG_APDS9802ALS is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_ISL29020 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_SENSORS_BH1780 is not set
+# CONFIG_SENSORS_BH1770 is not set
+# CONFIG_SENSORS_APDS990X is not set
+# CONFIG_HMC6352 is not set
+# CONFIG_DS1682 is not set
+# CONFIG_TI_DAC7512 is not set
+# CONFIG_BMP085_I2C is not set
+# CONFIG_BMP085_SPI is not set
+CONFIG_FBXSERIAL_OF=y
+# CONFIG_USB_SWITCH_FSA9480 is not set
+# CONFIG_LATTICE_ECP3_CONFIG is not set
+# CONFIG_SRAM is not set
+CONFIG_QSEECOM=y
+CONFIG_HDCP_QSEECOM=y
+# CONFIG_PROFILER is not set
+CONFIG_QPNP_MISC=y
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+CONFIG_EEPROM_AT24=y
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_EEPROM_93XX46 is not set
+# CONFIG_CB710_CORE is not set
+
+#
+# Texas Instruments shared transport line discipline
+#
+# CONFIG_TI_ST is not set
+# CONFIG_SENSORS_LIS3_I2C is not set
+
+#
+# Altera FPGA firmware download module
+#
+# CONFIG_ALTERA_STAPL is not set
+CONFIG_MSM_QDSP6V2_CODECS=y
+CONFIG_MSM_ULTRASOUND=y
+
+#
+# Intel MIC Bus Driver
+#
+
+#
+# SCIF Bus Driver
+#
+
+#
+# Intel MIC Host Driver
+#
+
+#
+# Intel MIC Card Driver
+#
+
+#
+# SCIF Driver
+#
+
+#
+# Intel MIC Coprocessor State Management (COSM) Drivers
+#
+# CONFIG_GENWQE is not set
+# CONFIG_ECHO is not set
+# CONFIG_CXL_BASE is not set
+# CONFIG_CXL_KERNEL_API is not set
+# CONFIG_CXL_EEH is not set
+CONFIG_NET_RTL8367C_SPI=y
+CONFIG_NET_RTL8367C_SPI_CONFIG=y
+
+#
+# HDMI CEC support
+#
+CONFIG_HDMI_CEC=y
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=y
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_SCSI_MQ_DEFAULT is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=y
+# CONFIG_BLK_DEV_SR_VENDOR is not set
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+CONFIG_SCSI_LOWLEVEL=y
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_ISCSI_BOOT_SYSFS is not set
+# CONFIG_SCSI_CXGB3_ISCSI is not set
+# CONFIG_SCSI_CXGB4_ISCSI is not set
+# CONFIG_SCSI_BNX2_ISCSI is not set
+# CONFIG_BE2ISCSI is not set
+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_HPSA is not set
+# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_3W_SAS is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_AACRAID is not set
+# CONFIG_SCSI_AIC7XXX is not set
+# CONFIG_SCSI_AIC79XX is not set
+# CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_MVSAS is not set
+# CONFIG_SCSI_MVUMI is not set
+# CONFIG_SCSI_ADVANSYS is not set
+# CONFIG_SCSI_ARCMSR is not set
+# CONFIG_SCSI_ESAS2R is not set
+# CONFIG_MEGARAID_NEWGEN is not set
+# CONFIG_MEGARAID_LEGACY is not set
+# CONFIG_MEGARAID_SAS is not set
+# CONFIG_SCSI_MPT3SAS is not set
+# CONFIG_SCSI_MPT2SAS is not set
+CONFIG_SCSI_UFSHCD=y
+# CONFIG_SCSI_UFSHCD_PCI is not set
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+# CONFIG_SCSI_UFSHCD_CMD_LOGGING is not set
+# CONFIG_SCSI_HPTIOP is not set
+# CONFIG_SCSI_SNIC is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_IPS is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_STEX is not set
+# CONFIG_SCSI_SYM53C8XX_2 is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
+# CONFIG_SCSI_QLA_ISCSI is not set
+# CONFIG_SCSI_DC395x is not set
+# CONFIG_SCSI_AM53C974 is not set
+# CONFIG_SCSI_WD719X is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_PMCRAID is not set
+# CONFIG_SCSI_PM8001 is not set
+# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+CONFIG_HAVE_PATA_PLATFORM=y
+# CONFIG_ATA is not set
+CONFIG_MD=y
+# CONFIG_BLK_DEV_MD is not set
+# CONFIG_BCACHE is not set
+CONFIG_BLK_DEV_DM_BUILTIN=y
+CONFIG_BLK_DEV_DM=y
+# CONFIG_DM_MQ_DEFAULT is not set
+# CONFIG_DM_DEBUG is not set
+CONFIG_DM_BUFIO=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_REQ_CRYPT=y
+# CONFIG_DM_SNAPSHOT is not set
+# CONFIG_DM_THIN_PROVISIONING is not set
+# CONFIG_DM_CACHE is not set
+# CONFIG_DM_ERA is not set
+# CONFIG_DM_MIRROR is not set
+# CONFIG_DM_RAID is not set
+# CONFIG_DM_ZERO is not set
+# CONFIG_DM_MULTIPATH is not set
+# CONFIG_DM_DELAY is not set
+CONFIG_DM_UEVENT=y
+# CONFIG_DM_FLAKEY is not set
+CONFIG_DM_VERITY=y
+# CONFIG_DM_VERITY_HASH_PREFETCH_MIN_SIZE_128 is not set
+CONFIG_DM_VERITY_HASH_PREFETCH_MIN_SIZE=1
+CONFIG_DM_VERITY_FEC=y
+# CONFIG_DM_SWITCH is not set
+# CONFIG_DM_LOG_WRITES is not set
+# CONFIG_DM_VERITY_AVB is not set
+# CONFIG_TARGET_CORE is not set
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_FIREWIRE is not set
+# CONFIG_FIREWIRE_NOSY is not set
+CONFIG_NETDEVICES=y
+CONFIG_NET_CORE=y
+# CONFIG_BONDING is not set
+# CONFIG_DUMMY is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_NET_FC is not set
+# CONFIG_NET_TEAM is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_IPVLAN is not set
+# CONFIG_VXLAN is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+CONFIG_TUN=y
+# CONFIG_TUN_VNET_CROSS_LE is not set
+# CONFIG_VETH is not set
+# CONFIG_NLMON is not set
+# CONFIG_ARCNET is not set
+
+#
+# CAIF transport drivers
+#
+
+#
+# Distributed Switch Architecture drivers
+#
+# CONFIG_NET_DSA_MV88E6XXX is not set
+# CONFIG_NET_DSA_MV88E6XXX_NEED_PPU is not set
+CONFIG_ETHERNET=y
+CONFIG_MDIO=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_AGERE is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_ALTERA_TSE is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_ARC is not set
+CONFIG_NET_VENDOR_ATHEROS=y
+# CONFIG_ATL2 is not set
+# CONFIG_ATL1 is not set
+# CONFIG_ATL1E is not set
+# CONFIG_ATL1C is not set
+# CONFIG_ALX is not set
+CONFIG_ALX_PROP=y
+# CONFIG_NET_VENDOR_AURORA is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_DNET is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HISILICON is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_JME is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_FEALNX is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_ETHOC is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_SFC is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_NET_SB1000 is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_AQUANTIA_PHY is not set
+# CONFIG_AT803X_PHY is not set
+# CONFIG_AMD_PHY is not set
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_TERANETICS_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_BCM7XXX_PHY is not set
+# CONFIG_BCM87XX_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_MICREL_PHY is not set
+# CONFIG_DP83848_PHY is not set
+# CONFIG_DP83867_PHY is not set
+# CONFIG_MICROCHIP_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+# CONFIG_MDIO_OCTEON is not set
+# CONFIG_MDIO_BUS_MUX_GPIO is not set
+# CONFIG_MDIO_BUS_MUX_MMIOREG is not set
+# CONFIG_MDIO_BCM_UNIMAC is not set
+# CONFIG_MICREL_KS8995MA is not set
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+# CONFIG_PPP_FILTER is not set
+CONFIG_PPP_MPPE=y
+# CONFIG_PPP_MULTILINK is not set
+# CONFIG_PPPOE is not set
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+# CONFIG_PPP_ASYNC is not set
+# CONFIG_PPP_SYNC_TTY is not set
+# CONFIG_SLIP is not set
+CONFIG_SLHC=y
+# CONFIG_USB_NET_DRIVERS is not set
+CONFIG_WLAN=y
+# CONFIG_LIBERTAS_THINFIRM is not set
+# CONFIG_ATMEL is not set
+# CONFIG_AT76C50X_USB is not set
+# CONFIG_PRISM54 is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_USB_NET_RNDIS_WLAN is not set
+# CONFIG_ADM8211 is not set
+# CONFIG_RTL8180 is not set
+# CONFIG_RTL8187 is not set
+# CONFIG_MAC80211_HWSIM is not set
+# CONFIG_MWL8K is not set
+# CONFIG_WIFI_CONTROL_FUNC is not set
+# CONFIG_WCNSS_CORE is not set
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+# CONFIG_CNSS_CRYPTO is not set
+# CONFIG_CNSS_QCA6290 is not set
+CONFIG_ATH_COMMON=m
+CONFIG_ATH_CARDS=m
+# CONFIG_ATH_DEBUG is not set
+# CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS is not set
+# CONFIG_ATH5K is not set
+# CONFIG_ATH5K_PCI is not set
+# CONFIG_ATH9K is not set
+# CONFIG_ATH9K_HTC is not set
+# CONFIG_CARL9170 is not set
+# CONFIG_ATH6KL is not set
+# CONFIG_AR5523 is not set
+# CONFIG_WIL6210 is not set
+CONFIG_ATH10K=m
+# CONFIG_ATH10K_PCI is not set
+CONFIG_ATH10K_TARGET_SNOC=m
+CONFIG_ATH10K_SNOC=y
+CONFIG_ATH10K_DEBUG=y
+# CONFIG_ATH10K_DEBUGFS is not set
+# CONFIG_ATH10K_TRACING is not set
+CONFIG_ATH10K_DFS_CERTIFIED=y
+# CONFIG_WCN36XX is not set
+# CONFIG_B43 is not set
+# CONFIG_B43LEGACY is not set
+# CONFIG_BRCMSMAC is not set
+# CONFIG_BRCMFMAC is not set
+# CONFIG_HOSTAP is not set
+# CONFIG_IPW2100 is not set
+# CONFIG_IPW2200 is not set
+# CONFIG_IWLWIFI is not set
+# CONFIG_IWL4965 is not set
+# CONFIG_IWL3945 is not set
+# CONFIG_LIBERTAS is not set
+# CONFIG_HERMES is not set
+# CONFIG_P54_COMMON is not set
+# CONFIG_RT2X00 is not set
+# CONFIG_WL_MEDIATEK is not set
+# CONFIG_RTL_CARDS is not set
+# CONFIG_RTL8XXXU is not set
+# CONFIG_WL_TI is not set
+# CONFIG_ZD1211RW is not set
+# CONFIG_MWIFIEX is not set
+# CONFIG_CW1200 is not set
+# CONFIG_RSI_91X is not set
+# CONFIG_CNSS is not set
+# CONFIG_CLD_DEBUG is not set
+# CONFIG_CLD_HL_SDIO_CORE is not set
+CONFIG_CLD_LL_CORE=y
+# CONFIG_CNSS2 is not set
+# CONFIG_CNSS_GENL is not set
+CONFIG_CNSS_UTILS=y
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+# CONFIG_VMXNET3 is not set
+# CONFIG_FUJITSU_ES is not set
+# CONFIG_RMNET is not set
+# CONFIG_ISDN is not set
+# CONFIG_NVM is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+CONFIG_INPUT_LEDS=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
+# CONFIG_INPUT_MATRIXKMAP is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+CONFIG_INPUT_KEYRESET=y
+CONFIG_INPUT_KEYCOMBO=y
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
+# CONFIG_KEYBOARD_ADP5589 is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_QT1070 is not set
+# CONFIG_KEYBOARD_QT2160 is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_KEYBOARD_GPIO_POLLED is not set
+# CONFIG_KEYBOARD_TCA6416 is not set
+# CONFIG_KEYBOARD_TCA8418 is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_LM8323 is not set
+# CONFIG_KEYBOARD_LM8333 is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_MCS is not set
+# CONFIG_KEYBOARD_MPR121 is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_SAMSUNG is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_OMAP4 is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_CAP11XX is not set
+# CONFIG_KEYBOARD_BCM is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_AD714X is not set
+# CONFIG_INPUT_BMA150 is not set
+# CONFIG_INPUT_E3X0_BUTTON is not set
+# CONFIG_INPUT_HBTP_INPUT is not set
+# CONFIG_INPUT_PM8941_PWRKEY is not set
+CONFIG_INPUT_QPNP_POWER_ON=y
+# CONFIG_INPUT_MMA8450 is not set
+# CONFIG_INPUT_MPU3050 is not set
+# CONFIG_INPUT_GP2A is not set
+# CONFIG_INPUT_GPIO_BEEPER is not set
+# CONFIG_INPUT_GPIO_TILT_POLLED is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+# CONFIG_INPUT_KEYCHORD is not set
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_KXTJ9 is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
+# CONFIG_INPUT_REGULATOR_HAPTIC is not set
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+# CONFIG_INPUT_PCF8574 is not set
+# CONFIG_INPUT_PWM_BEEPER is not set
+# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
+# CONFIG_INPUT_ADXL34X is not set
+# CONFIG_INPUT_IMS_PCU is not set
+# CONFIG_INPUT_CMA3000 is not set
+# CONFIG_INPUT_SOC_BUTTON_ARRAY is not set
+# CONFIG_INPUT_DRV260X_HAPTICS is not set
+# CONFIG_INPUT_DRV2665_HAPTICS is not set
+# CONFIG_INPUT_DRV2667_HAPTICS is not set
+# CONFIG_INPUT_PIXART_OTS_PAT9125_SWITCH is not set
+# CONFIG_INPUT_STMVL53L0 is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_TTY=y
+CONFIG_VT=y
+# CONFIG_CONSOLE_TRANSLATIONS is not set
+# CONFIG_VT_CONSOLE is not set
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=16
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_NOZOMI is not set
+# CONFIG_N_GSM is not set
+# CONFIG_TRACE_SINK is not set
+CONFIG_DEVMEM=y
+# CONFIG_DEVKMEM is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_EARLYCON=y
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_AMBA_PL010 is not set
+# CONFIG_SERIAL_AMBA_PL011 is not set
+# CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST is not set
+# CONFIG_SERIAL_MAX3100 is not set
+# CONFIG_SERIAL_MAX310X is not set
+# CONFIG_SERIAL_UARTLITE is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_MSM_HS=y
+# CONFIG_SERIAL_SCCNXP is not set
+# CONFIG_SERIAL_SC16IS7XX is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
+# CONFIG_SERIAL_IFX6X60 is not set
+CONFIG_SERIAL_MSM_SMD=y
+# CONFIG_SERIAL_XILINX_PS_UART is not set
+# CONFIG_SERIAL_ARC is not set
+# CONFIG_SERIAL_RP2 is not set
+# CONFIG_SERIAL_FSL_LPUART is not set
+# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set
+
+#
+# Diag Support
+#
+CONFIG_DIAG_CHAR=y
+
+#
+# DIAG traffic over USB
+#
+
+#
+# HSIC/SMUX support for DIAG
+#
+# CONFIG_TTY_PRINTK is not set
+# CONFIG_HVC_DCC is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+# CONFIG_HW_RANDOM_MSM is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# PCMCIA character devices
+#
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_HPET is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_DEVPORT is not set
+# CONFIG_MSM_SMD_PKT is not set
+# CONFIG_XILLYBUS is not set
+CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
+
+#
+# I2C support
+#
+CONFIG_I2C=y
+CONFIG_ACPI_I2C_OPREGION=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+
+#
+# Multiplexer I2C Chip support
+#
+# CONFIG_I2C_ARB_GPIO_CHALLENGE is not set
+CONFIG_I2C_MUX_GPIO=y
+# CONFIG_I2C_MUX_PCA9541 is not set
+# CONFIG_I2C_MUX_PCA954x is not set
+# CONFIG_I2C_MUX_PINCTRL is not set
+# CONFIG_I2C_MUX_REG is not set
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# PC SMBus host controller drivers
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_ISCH is not set
+# CONFIG_I2C_PIIX4 is not set
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+
+#
+# ACPI drivers
+#
+# CONFIG_I2C_SCMI is not set
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_CADENCE is not set
+# CONFIG_I2C_CBUS_GPIO is not set
+# CONFIG_I2C_DESIGNWARE_PCI is not set
+# CONFIG_I2C_EMEV2 is not set
+CONFIG_I2C_GPIO=y
+# CONFIG_I2C_NOMADIK is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_PXA_PCI is not set
+# CONFIG_I2C_QUP is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_DIOLAN_U2C is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_ROBOTFUZZ_OSIF is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+CONFIG_I2C_MSM_V2=y
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_SLAVE is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+CONFIG_SLIMBUS=y
+# CONFIG_SLIMBUS_MSM_CTRL is not set
+CONFIG_SLIMBUS_MSM_NGD=y
+CONFIG_SOUNDWIRE=y
+CONFIG_SOUNDWIRE_WCD_CTRL=y
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_ALTERA is not set
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_CADENCE is not set
+# CONFIG_SPI_GPIO is not set
+# CONFIG_SPI_FSL_SPI is not set
+# CONFIG_SPI_OC_TINY is not set
+# CONFIG_SPI_PL022 is not set
+# CONFIG_SPI_PXA2XX is not set
+# CONFIG_SPI_PXA2XX_PCI is not set
+# CONFIG_SPI_ROCKCHIP is not set
+CONFIG_SPI_QUP=y
+# CONFIG_SPI_SC18IS602 is not set
+# CONFIG_SPI_XCOMM is not set
+# CONFIG_SPI_XILINX is not set
+# CONFIG_SPI_ZYNQMP_GQSPI is not set
+# CONFIG_SPI_DESIGNWARE is not set
+
+#
+# SPI Protocol Masters
+#
+CONFIG_SPI_SPIDEV=y
+# CONFIG_SPI_TLE62X0 is not set
+# CONFIG_SPI_SLAVE is not set
+CONFIG_SPMI=y
+CONFIG_SPMI_MSM_PMIC_ARB=y
+# CONFIG_HSI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+
+#
+# PPS generators support
+#
+
+#
+# PTP clock support
+#
+# CONFIG_PTP_1588_CLOCK is not set
+
+#
+# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks.
+#
+CONFIG_PINCTRL=y
+
+#
+# Pin controllers
+#
+CONFIG_PINMUX=y
+CONFIG_PINCONF=y
+CONFIG_GENERIC_PINCONF=y
+# CONFIG_DEBUG_PINCTRL is not set
+# CONFIG_PINCTRL_AMD is not set
+# CONFIG_PINCTRL_SINGLE is not set
+# CONFIG_PINCTRL_BAYTRAIL is not set
+# CONFIG_PINCTRL_CHERRYVIEW is not set
+# CONFIG_PINCTRL_BROXTON is not set
+# CONFIG_PINCTRL_SUNRISEPOINT is not set
+CONFIG_PINCTRL_MSM=y
+# CONFIG_PINCTRL_APQ8064 is not set
+# CONFIG_PINCTRL_APQ8084 is not set
+# CONFIG_PINCTRL_IPQ8064 is not set
+# CONFIG_PINCTRL_MSM8660 is not set
+# CONFIG_PINCTRL_MSM8960 is not set
+# CONFIG_PINCTRL_MSM8X74 is not set
+# CONFIG_PINCTRL_MSM8916 is not set
+# CONFIG_PINCTRL_QDF2XXX is not set
+# CONFIG_PINCTRL_QCOM_SPMI_PMIC is not set
+# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set
+CONFIG_PINCTRL_MSM8998=y
+# CONFIG_PINCTRL_MSM8996 is not set
+# CONFIG_PINCTRL_SDM660 is not set
+CONFIG_PINCTRL_WCD=y
+# CONFIG_PINCTRL_LPI is not set
+CONFIG_ARCH_HAVE_CUSTOM_GPIO_H=y
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_DEVRES=y
+CONFIG_OF_GPIO=y
+CONFIG_GPIO_ACPI=y
+CONFIG_GPIOLIB_IRQCHIP=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO drivers
+#
+# CONFIG_GPIO_74XX_MMIO is not set
+# CONFIG_GPIO_ALTERA is not set
+# CONFIG_GPIO_AMDPT is not set
+# CONFIG_GPIO_DWAPB is not set
+# CONFIG_GPIO_GENERIC_PLATFORM is not set
+# CONFIG_GPIO_GRGPIO is not set
+# CONFIG_GPIO_PL061 is not set
+CONFIG_GPIO_QPNP_PIN=y
+# CONFIG_GPIO_QPNP_PIN_DEBUG is not set
+# CONFIG_GPIO_SYSCON is not set
+# CONFIG_GPIO_VX855 is not set
+# CONFIG_GPIO_XGENE is not set
+# CONFIG_GPIO_XILINX is not set
+# CONFIG_GPIO_ZX is not set
+
+#
+# I2C GPIO expanders
+#
+# CONFIG_GPIO_ADP5588 is not set
+# CONFIG_GPIO_ADNP is not set
+# CONFIG_GPIO_MAX7300 is not set
+# CONFIG_GPIO_MAX732X is not set
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_PCA953X_IRQ=y
+# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_SX150X is not set
+
+#
+# MFD GPIO expanders
+#
+
+#
+# PCI GPIO expanders
+#
+# CONFIG_GPIO_AMD8111 is not set
+# CONFIG_GPIO_BT8XX is not set
+# CONFIG_GPIO_ML_IOH is not set
+# CONFIG_GPIO_RDC321X is not set
+
+#
+# SPI GPIO expanders
+#
+# CONFIG_GPIO_74X164 is not set
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# SPI or I2C GPIO expanders
+#
+# CONFIG_GPIO_MCP23S08 is not set
+
+#
+# USB GPIO expanders
+#
+CONFIG_FREEBOX_GPIO=y
+CONFIG_FREEBOX_GPIO_DT=y
+# CONFIG_W1 is not set
+CONFIG_POWER_SUPPLY=y
+# CONFIG_POWER_SUPPLY_DEBUG is not set
+# CONFIG_PDA_POWER is not set
+# CONFIG_GENERIC_ADC_BATTERY is not set
+# CONFIG_TEST_POWER is not set
+# CONFIG_BATTERY_DS2780 is not set
+# CONFIG_BATTERY_DS2781 is not set
+# CONFIG_BATTERY_DS2782 is not set
+# CONFIG_BATTERY_SBS is not set
+# CONFIG_BATTERY_BQ27XXX is not set
+# CONFIG_BATTERY_MAX17040 is not set
+# CONFIG_BATTERY_MAX17042 is not set
+# CONFIG_CHARGER_ISP1704 is not set
+# CONFIG_CHARGER_MAX8903 is not set
+# CONFIG_CHARGER_LP8727 is not set
+# CONFIG_CHARGER_GPIO is not set
+# CONFIG_CHARGER_MANAGER is not set
+# CONFIG_CHARGER_QCOM_SMBB is not set
+# CONFIG_CHARGER_BQ2415X is not set
+# CONFIG_CHARGER_BQ24190 is not set
+# CONFIG_CHARGER_BQ24257 is not set
+# CONFIG_CHARGER_BQ24735 is not set
+# CONFIG_CHARGER_BQ25890 is not set
+# CONFIG_CHARGER_SMB347 is not set
+# CONFIG_BATTERY_BQ28400 is not set
+# CONFIG_BATTERY_GAUGE_LTC2941 is not set
+# CONFIG_CHARGER_RT9455 is not set
+CONFIG_POWER_RESET=y
+# CONFIG_POWER_RESET_GPIO is not set
+# CONFIG_POWER_RESET_GPIO_RESTART is not set
+# CONFIG_POWER_RESET_LTC2952 is not set
+CONFIG_POWER_RESET_QCOM=y
+# CONFIG_QCOM_DLOAD_MODE is not set
+# CONFIG_POWER_RESET_RESTART is not set
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set
+# CONFIG_SYSCON_REBOOT_MODE is not set
+
+#
+# Qualcomm Technologies Inc Charger and Fuel Gauge support
+#
+# CONFIG_QPNP_SMBCHARGER is not set
+# CONFIG_QPNP_FG is not set
+CONFIG_QPNP_FG_GEN3=y
+# CONFIG_SMB135X_CHARGER is not set
+# CONFIG_SMB1351_USB_CHARGER is not set
+CONFIG_MSM_BCL_CTL=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
+CONFIG_BATTERY_BCL=y
+CONFIG_QPNP_SMB2=y
+CONFIG_SMB138X_CHARGER=y
+CONFIG_QPNP_QNOVO=y
+# CONFIG_POWER_AVS is not set
+CONFIG_MSM_PM=y
+CONFIG_APSS_CORE_EA=y
+CONFIG_MSM_APM=y
+CONFIG_MSM_IDLE_STATS=y
+CONFIG_MSM_IDLE_STATS_FIRST_BUCKET=62500
+CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT=2
+CONFIG_MSM_IDLE_STATS_BUCKET_COUNT=10
+CONFIG_MSM_SUSPEND_STATS_FIRST_BUCKET=1000000000
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
+# CONFIG_SENSORS_AD7314 is not set
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7310 is not set
+# CONFIG_SENSORS_ADT7410 is not set
+# CONFIG_SENSORS_ADT7411 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ASC7621 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS620 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_I5K_AMB is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_G760A is not set
+# CONFIG_SENSORS_G762 is not set
+# CONFIG_SENSORS_GPIO_FAN is not set
+# CONFIG_SENSORS_HIH6130 is not set
+# CONFIG_SENSORS_IIO_HWMON is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_JC42 is not set
+# CONFIG_SENSORS_POWR1220 is not set
+# CONFIG_SENSORS_LINEAGE is not set
+# CONFIG_SENSORS_LTC2945 is not set
+# CONFIG_SENSORS_LTC4151 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4222 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LTC4260 is not set
+# CONFIG_SENSORS_LTC4261 is not set
+# CONFIG_SENSORS_MAX1111 is not set
+# CONFIG_SENSORS_MAX16065 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX1668 is not set
+# CONFIG_SENSORS_MAX197 is not set
+# CONFIG_SENSORS_MAX6639 is not set
+# CONFIG_SENSORS_MAX6642 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_MAX6697 is not set
+# CONFIG_SENSORS_MAX31790 is not set
+# CONFIG_SENSORS_HTU21 is not set
+# CONFIG_SENSORS_MCP3021 is not set
+# CONFIG_SENSORS_ADCXX is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_LM73 is not set
+CONFIG_SENSORS_LM75=y
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LM95234 is not set
+# CONFIG_SENSORS_LM95241 is not set
+# CONFIG_SENSORS_LM95245 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_NTC_THERMISTOR is not set
+# CONFIG_SENSORS_NCT6683 is not set
+# CONFIG_SENSORS_NCT6775 is not set
+# CONFIG_SENSORS_NCT7802 is not set
+# CONFIG_SENSORS_NCT7904 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_EPM_ADC is not set
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+# CONFIG_SENSORS_QPNP_ADC_CURRENT is not set
+# CONFIG_PMBUS is not set
+# CONFIG_SENSORS_PWM_FAN is not set
+# CONFIG_SENSORS_SHT15 is not set
+# CONFIG_SENSORS_SHT21 is not set
+# CONFIG_SENSORS_SHTC1 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_EMC1403 is not set
+# CONFIG_SENSORS_EMC2103 is not set
+# CONFIG_SENSORS_EMC6W201 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_SCH56XX_COMMON is not set
+# CONFIG_SENSORS_SMM665 is not set
+# CONFIG_SENSORS_ADC128D818 is not set
+# CONFIG_SENSORS_ADS1015 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_ADS7871 is not set
+# CONFIG_SENSORS_AMC6821 is not set
+# CONFIG_SENSORS_INA209 is not set
+# CONFIG_SENSORS_INA2XX is not set
+# CONFIG_SENSORS_TC74 is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP102 is not set
+# CONFIG_SENSORS_TMP103 is not set
+# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_VT8231 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83795 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+
+#
+# ACPI drivers
+#
+# CONFIG_SENSORS_ACPI_POWER is not set
+CONFIG_THERMAL=y
+CONFIG_THERMAL_HWMON=y
+CONFIG_THERMAL_OF=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
+# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set
+# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set
+# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set
+# CONFIG_THERMAL_GOV_FAIR_SHARE is not set
+CONFIG_THERMAL_GOV_STEP_WISE=y
+# CONFIG_THERMAL_GOV_BANG_BANG is not set
+# CONFIG_THERMAL_GOV_USER_SPACE is not set
+# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
+# CONFIG_THERMAL_EMULATION is not set
+CONFIG_LIMITS_MONITOR=y
+CONFIG_LIMITS_LITE_HW=y
+CONFIG_THERMAL_MONITOR=y
+CONFIG_THERMAL_TSENS8974=y
+# CONFIG_IMX_THERMAL is not set
+CONFIG_THERMAL_QPNP=y
+CONFIG_THERMAL_QPNP_ADC_TM=y
+CONFIG_QCOM_THERMAL_LIMITS_DCVS=y
+# CONFIG_QCOM_SPMI_TEMP_ALARM is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+CONFIG_BCMA_POSSIBLE=y
+
+#
+# Broadcom specific AMBA
+#
+# CONFIG_BCMA is not set
+
+#
+# Multifunction device drivers
+#
+CONFIG_MFD_CORE=y
+# CONFIG_MFD_AS3711 is not set
+# CONFIG_MFD_AS3722 is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_AAT2870_CORE is not set
+# CONFIG_MFD_ATMEL_FLEXCOM is not set
+# CONFIG_MFD_ATMEL_HLCDC is not set
+# CONFIG_MFD_BCM590XX is not set
+# CONFIG_MFD_AXP20X is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_MFD_DA9052_SPI is not set
+# CONFIG_MFD_DA9052_I2C is not set
+# CONFIG_MFD_DA9055 is not set
+# CONFIG_MFD_DA9062 is not set
+# CONFIG_MFD_DA9063 is not set
+# CONFIG_MFD_DA9150 is not set
+# CONFIG_MFD_DLN2 is not set
+# CONFIG_MFD_MC13XXX_SPI is not set
+# CONFIG_MFD_MC13XXX_I2C is not set
+# CONFIG_MFD_HI6421_PMIC is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
+# CONFIG_LPC_ICH is not set
+# CONFIG_LPC_SCH is not set
+# CONFIG_INTEL_SOC_PMIC is not set
+# CONFIG_MFD_JANZ_CMODIO is not set
+# CONFIG_MFD_KEMPLD is not set
+# CONFIG_MFD_88PM800 is not set
+# CONFIG_MFD_88PM805 is not set
+# CONFIG_MFD_88PM860X is not set
+# CONFIG_MFD_MAX14577 is not set
+# CONFIG_MFD_MAX77686 is not set
+# CONFIG_MFD_MAX77693 is not set
+# CONFIG_MFD_MAX77843 is not set
+# CONFIG_MFD_MAX8907 is not set
+# CONFIG_MFD_MAX8925 is not set
+# CONFIG_MFD_MAX8997 is not set
+# CONFIG_MFD_MAX8998 is not set
+# CONFIG_MFD_MT6397 is not set
+# CONFIG_MFD_MENF21BMC is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_MFD_VIPERBOARD is not set
+# CONFIG_MFD_RETU is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_QCOM_RPM is not set
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_I2C_PMIC=y
+# CONFIG_MFD_RDC321X is not set
+# CONFIG_MFD_RTSX_PCI is not set
+# CONFIG_MFD_RT5033 is not set
+# CONFIG_MFD_RTSX_USB is not set
+# CONFIG_MFD_RC5T583 is not set
+# CONFIG_MFD_RK808 is not set
+# CONFIG_MFD_RN5T618 is not set
+# CONFIG_MFD_SEC_CORE is not set
+# CONFIG_MFD_SI476X_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_SKY81452 is not set
+# CONFIG_MFD_SMSC is not set
+# CONFIG_ABX500_CORE is not set
+# CONFIG_MFD_STMPE is not set
+CONFIG_MFD_SYSCON=y
+# CONFIG_MFD_TI_AM335X_TSCADC is not set
+# CONFIG_MFD_LP3943 is not set
+# CONFIG_MFD_LP8788 is not set
+# CONFIG_MFD_PALMAS is not set
+# CONFIG_TPS6105X is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TPS6507X is not set
+# CONFIG_MFD_TPS65090 is not set
+# CONFIG_MFD_TPS65217 is not set
+# CONFIG_MFD_TPS65218 is not set
+# CONFIG_MFD_TPS6586X is not set
+# CONFIG_MFD_TPS65910 is not set
+# CONFIG_MFD_TPS65912 is not set
+# CONFIG_MFD_TPS65912_I2C is not set
+# CONFIG_MFD_TPS65912_SPI is not set
+# CONFIG_MFD_TPS80031 is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_TWL6040_CORE is not set
+# CONFIG_MFD_WL1273_CORE is not set
+# CONFIG_MFD_LM3533 is not set
+# CONFIG_MFD_TC3589X is not set
+CONFIG_MFD_FBX7HD_TOP_PSOC=y
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_VX855 is not set
+# CONFIG_MFD_ARIZONA_I2C is not set
+# CONFIG_MFD_ARIZONA_SPI is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X_I2C is not set
+# CONFIG_MFD_WM831X_SPI is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
+CONFIG_MSM_CDC_PINCTRL=y
+CONFIG_MSM_CDC_SUPPLY=y
+CONFIG_WCD9XXX_CODEC_UTIL=y
+# CONFIG_WCD9330_CODEC is not set
+CONFIG_WCD9335_CODEC=y
+CONFIG_WCD934X_CODEC=y
+CONFIG_REGULATOR=y
+# CONFIG_REGULATOR_DEBUG is not set
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
+# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
+# CONFIG_REGULATOR_ACT8865 is not set
+# CONFIG_REGULATOR_AD5398 is not set
+# CONFIG_REGULATOR_ANATOP is not set
+# CONFIG_REGULATOR_DA9210 is not set
+# CONFIG_REGULATOR_DA9211 is not set
+# CONFIG_REGULATOR_FAN53555 is not set
+# CONFIG_REGULATOR_MSM_GFX_LDO is not set
+# CONFIG_REGULATOR_GPIO is not set
+# CONFIG_REGULATOR_ISL9305 is not set
+# CONFIG_REGULATOR_ISL6271A is not set
+# CONFIG_REGULATOR_LP3971 is not set
+# CONFIG_REGULATOR_LP3972 is not set
+# CONFIG_REGULATOR_LP872X is not set
+# CONFIG_REGULATOR_LP8755 is not set
+# CONFIG_REGULATOR_LTC3589 is not set
+# CONFIG_REGULATOR_MAX1586 is not set
+# CONFIG_REGULATOR_MAX20010 is not set
+# CONFIG_REGULATOR_MAX8649 is not set
+# CONFIG_REGULATOR_MAX8660 is not set
+# CONFIG_REGULATOR_MAX8952 is not set
+# CONFIG_REGULATOR_MAX8973 is not set
+# CONFIG_REGULATOR_MT6311 is not set
+# CONFIG_REGULATOR_ONSEMI_NCP6335D is not set
+# CONFIG_REGULATOR_PFUZE100 is not set
+# CONFIG_REGULATOR_PWM is not set
+# CONFIG_REGULATOR_QCOM_SPMI is not set
+# CONFIG_REGULATOR_TPS51632 is not set
+# CONFIG_REGULATOR_TPS62360 is not set
+# CONFIG_REGULATOR_TPS65023 is not set
+# CONFIG_REGULATOR_TPS6507X is not set
+# CONFIG_REGULATOR_TPS6524X is not set
+CONFIG_REGULATOR_RPM_SMD=y
+CONFIG_REGULATOR_QPNP=y
+CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP_LCDB=y
+# CONFIG_REGULATOR_QPNP_OLEDB is not set
+CONFIG_REGULATOR_SPM=y
+# CONFIG_REGULATOR_CPR is not set
+# CONFIG_REGULATOR_CPR2_GFX is not set
+CONFIG_REGULATOR_CPR3=y
+CONFIG_REGULATOR_CPR3_HMSS=y
+CONFIG_REGULATOR_CPR3_MMSS=y
+# CONFIG_REGULATOR_CPR4_APSS is not set
+CONFIG_REGULATOR_CPRH_KBSS=y
+# CONFIG_REGULATOR_CPR4_MMSS_LDO is not set
+# CONFIG_REGULATOR_KRYO is not set
+CONFIG_REGULATOR_MEM_ACC=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_MEDIA_SUPPORT=y
+
+#
+# Multimedia core support
+#
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_RADIO_SUPPORT=y
+# CONFIG_MEDIA_SDR_SUPPORT is not set
+CONFIG_MEDIA_RC_SUPPORT=y
+CONFIG_MEDIA_CEC_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_DEV=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_V4L2=y
+CONFIG_VIDEO_ADV_DEBUG=y
+# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
+CONFIG_VIDEOBUF2_CORE=y
+CONFIG_DVB_CORE=y
+# CONFIG_DVB_NET is not set
+# CONFIG_TTPCI_EEPROM is not set
+CONFIG_DVB_MAX_ADAPTERS=8
+# CONFIG_DVB_DYNAMIC_MINORS is not set
+
+#
+# Media drivers
+#
+CONFIG_RC_CORE=y
+CONFIG_RC_MAP=y
+CONFIG_RC_DECODERS=y
+# CONFIG_LIRC is not set
+# CONFIG_IR_NEC_DECODER is not set
+# CONFIG_IR_RC5_DECODER is not set
+CONFIG_IR_RC6_DECODER=y
+# CONFIG_IR_JVC_DECODER is not set
+# CONFIG_IR_SONY_DECODER is not set
+# CONFIG_IR_SANYO_DECODER is not set
+# CONFIG_IR_SHARP_DECODER is not set
+CONFIG_IR_MCE_KBD_DECODER=y
+# CONFIG_IR_XMP_DECODER is not set
+CONFIG_RC_DEVICES=y
+# CONFIG_RC_ATI_REMOTE is not set
+# CONFIG_IR_ENE is not set
+# CONFIG_IR_HIX5HD2 is not set
+# CONFIG_IR_IMON is not set
+CONFIG_IR_MCEUSB=y
+# CONFIG_IR_ITE_CIR is not set
+# CONFIG_IR_FINTEK is not set
+# CONFIG_IR_NUVOTON is not set
+# CONFIG_IR_REDRAT3 is not set
+# CONFIG_IR_STREAMZAP is not set
+# CONFIG_IR_IGORPLUGUSB is not set
+# CONFIG_IR_IGUANA is not set
+# CONFIG_IR_TTUSBIR is not set
+# CONFIG_RC_LOOPBACK is not set
+CONFIG_IR_GPIO_CIR=y
+CONFIG_MEDIA_USB_SUPPORT=y
+
+#
+# Webcam devices
+#
+# CONFIG_USB_VIDEO_CLASS is not set
+# CONFIG_USB_GSPCA is not set
+# CONFIG_USB_PWC is not set
+# CONFIG_VIDEO_CPIA2 is not set
+# CONFIG_USB_ZR364XX is not set
+# CONFIG_USB_STKWEBCAM is not set
+# CONFIG_USB_S2255 is not set
+# CONFIG_VIDEO_USBTV is not set
+
+#
+# Analog/digital TV USB devices
+#
+# CONFIG_VIDEO_AU0828 is not set
+# CONFIG_VIDEO_CX231XX is not set
+# CONFIG_VIDEO_TM6000 is not set
+
+#
+# Digital TV USB devices
+#
+# CONFIG_DVB_USB is not set
+# CONFIG_DVB_USB_V2 is not set
+# CONFIG_DVB_TTUSB_BUDGET is not set
+# CONFIG_DVB_TTUSB_DEC is not set
+# CONFIG_SMS_USB_DRV is not set
+# CONFIG_DVB_B2C2_FLEXCOP_USB is not set
+# CONFIG_DVB_AS102 is not set
+
+#
+# Webcam, TV (analog/digital) USB devices
+#
+# CONFIG_VIDEO_EM28XX is not set
+# CONFIG_MEDIA_PCI_SUPPORT is not set
+CONFIG_V4L_PLATFORM_DRIVERS=y
+# CONFIG_VIDEO_CAFE_CCIC is not set
+# CONFIG_SOC_CAMERA is not set
+# CONFIG_VIDEO_XILINX is not set
+# CONFIG_V4L_MEM2MEM_DRIVERS is not set
+# CONFIG_V4L_TEST_DRIVERS is not set
+# CONFIG_DVB_PLATFORM_DRIVERS is not set
+
+#
+# QTI MSM Camera And Video & AIS
+#
+# CONFIG_MSM_CAMERA is not set
+# CONFIG_MSMB_CAMERA is not set
+CONFIG_MSM_VIDC_V4L2=y
+CONFIG_MSM_VIDC_VMEM=y
+CONFIG_MSM_VIDC_GOVERNORS=y
+# CONFIG_MSM_SDE_ROTATOR is not set
+CONFIG_MSM_SDE_HDMI_CEC=y
+# CONFIG_MSM_AIS is not set
+CONFIG_DVB_MSM_TSPP=m
+
+#
+# Supported MMC/SDIO adapters
+#
+# CONFIG_SMS_SDIO_DRV is not set
+CONFIG_RADIO_ADAPTERS=y
+# CONFIG_RADIO_SI470X is not set
+# CONFIG_RADIO_SI4713 is not set
+# CONFIG_USB_MR800 is not set
+# CONFIG_USB_DSBR is not set
+# CONFIG_RADIO_MAXIRADIO is not set
+# CONFIG_RADIO_SHARK is not set
+# CONFIG_RADIO_SHARK2 is not set
+# CONFIG_USB_KEENE is not set
+# CONFIG_USB_RAREMONO is not set
+# CONFIG_USB_MA901 is not set
+# CONFIG_RADIO_TEA5764 is not set
+# CONFIG_RADIO_SAA7706H is not set
+# CONFIG_RADIO_TEF6862 is not set
+# CONFIG_RADIO_WL1273 is not set
+
+#
+# Texas Instruments WL128x FM driver (ST based)
+#
+# CONFIG_CYPRESS_FIRMWARE is not set
+
+#
+# Media ancillary drivers (tuners, sensors, i2c, frontends)
+#
+# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
+CONFIG_MEDIA_ATTACH=y
+# CONFIG_VIDEO_IR_I2C is not set
+
+#
+# Encoders, decoders, sensors and other helper chips
+#
+
+#
+# Audio decoders, processors and mixers
+#
+# CONFIG_VIDEO_TVAUDIO is not set
+# CONFIG_VIDEO_TDA7432 is not set
+# CONFIG_VIDEO_TDA9840 is not set
+# CONFIG_VIDEO_TEA6415C is not set
+# CONFIG_VIDEO_TEA6420 is not set
+# CONFIG_VIDEO_MSP3400 is not set
+# CONFIG_VIDEO_CS5345 is not set
+# CONFIG_VIDEO_CS53L32A is not set
+# CONFIG_VIDEO_TLV320AIC23B is not set
+# CONFIG_VIDEO_UDA1342 is not set
+# CONFIG_VIDEO_WM8775 is not set
+# CONFIG_VIDEO_WM8739 is not set
+# CONFIG_VIDEO_VP27SMPX is not set
+# CONFIG_VIDEO_SONY_BTF_MPX is not set
+
+#
+# RDS decoders
+#
+# CONFIG_VIDEO_SAA6588 is not set
+
+#
+# Video decoders
+#
+# CONFIG_VIDEO_ADV7180 is not set
+# CONFIG_VIDEO_ADV7183 is not set
+# CONFIG_VIDEO_ADV7604 is not set
+# CONFIG_VIDEO_ADV7842 is not set
+# CONFIG_VIDEO_ADV7481 is not set
+# CONFIG_VIDEO_TVTUNER is not set
+# CONFIG_VIDEO_BT819 is not set
+# CONFIG_VIDEO_BT856 is not set
+# CONFIG_VIDEO_BT866 is not set
+# CONFIG_VIDEO_KS0127 is not set
+# CONFIG_VIDEO_ML86V7667 is not set
+# CONFIG_VIDEO_SAA7110 is not set
+# CONFIG_VIDEO_SAA711X is not set
+# CONFIG_VIDEO_TC358743 is not set
+# CONFIG_VIDEO_TVP514X is not set
+# CONFIG_VIDEO_TVP5150 is not set
+# CONFIG_VIDEO_TVP7002 is not set
+# CONFIG_VIDEO_TW2804 is not set
+# CONFIG_VIDEO_TW9903 is not set
+# CONFIG_VIDEO_TW9906 is not set
+# CONFIG_VIDEO_VPX3220 is not set
+
+#
+# Video and audio decoders
+#
+# CONFIG_VIDEO_SAA717X is not set
+# CONFIG_VIDEO_CX25840 is not set
+
+#
+# Video encoders
+#
+# CONFIG_VIDEO_SAA7127 is not set
+# CONFIG_VIDEO_SAA7185 is not set
+# CONFIG_VIDEO_ADV7170 is not set
+# CONFIG_VIDEO_ADV7175 is not set
+# CONFIG_VIDEO_ADV7343 is not set
+# CONFIG_VIDEO_ADV7393 is not set
+# CONFIG_VIDEO_ADV7511 is not set
+# CONFIG_VIDEO_AD9389B is not set
+# CONFIG_VIDEO_AK881X is not set
+# CONFIG_VIDEO_THS8200 is not set
+
+#
+# Camera sensor devices
+#
+# CONFIG_VIDEO_OV2659 is not set
+# CONFIG_VIDEO_OV7640 is not set
+# CONFIG_VIDEO_OV7670 is not set
+# CONFIG_VIDEO_OV9650 is not set
+# CONFIG_VIDEO_VS6624 is not set
+# CONFIG_VIDEO_MT9M032 is not set
+# CONFIG_VIDEO_MT9P031 is not set
+# CONFIG_VIDEO_MT9T001 is not set
+# CONFIG_VIDEO_MT9V011 is not set
+# CONFIG_VIDEO_MT9V032 is not set
+# CONFIG_VIDEO_SR030PC30 is not set
+# CONFIG_VIDEO_NOON010PC30 is not set
+# CONFIG_VIDEO_M5MOLS is not set
+# CONFIG_VIDEO_S5K6AA is not set
+# CONFIG_VIDEO_S5K6A3 is not set
+# CONFIG_VIDEO_S5K4ECGX is not set
+# CONFIG_VIDEO_S5K5BAF is not set
+# CONFIG_VIDEO_SMIAPP is not set
+# CONFIG_VIDEO_S5C73M3 is not set
+
+#
+# Flash devices
+#
+# CONFIG_VIDEO_ADP1653 is not set
+# CONFIG_VIDEO_AS3645A is not set
+# CONFIG_VIDEO_LM3560 is not set
+# CONFIG_VIDEO_LM3646 is not set
+
+#
+# Video improvement chips
+#
+# CONFIG_VIDEO_UPD64031A is not set
+# CONFIG_VIDEO_UPD64083 is not set
+
+#
+# Audio/Video compression chips
+#
+# CONFIG_VIDEO_SAA6752HS is not set
+
+#
+# Miscellaneous helper chips
+#
+# CONFIG_VIDEO_THS7303 is not set
+# CONFIG_VIDEO_M52790 is not set
+
+#
+# Sensors used on soc_camera driver
+#
+CONFIG_MEDIA_TUNER=y
+
+#
+# Customize TV tuners
+#
+# CONFIG_MEDIA_TUNER_SIMPLE is not set
+# CONFIG_MEDIA_TUNER_TDA8290 is not set
+# CONFIG_MEDIA_TUNER_TDA827X is not set
+# CONFIG_MEDIA_TUNER_TDA18271 is not set
+# CONFIG_MEDIA_TUNER_TDA9887 is not set
+# CONFIG_MEDIA_TUNER_TEA5761 is not set
+# CONFIG_MEDIA_TUNER_TEA5767 is not set
+# CONFIG_MEDIA_TUNER_MSI001 is not set
+# CONFIG_MEDIA_TUNER_MT20XX is not set
+# CONFIG_MEDIA_TUNER_MT2060 is not set
+# CONFIG_MEDIA_TUNER_MT2063 is not set
+# CONFIG_MEDIA_TUNER_MT2266 is not set
+# CONFIG_MEDIA_TUNER_MT2131 is not set
+# CONFIG_MEDIA_TUNER_QT1010 is not set
+# CONFIG_MEDIA_TUNER_XC2028 is not set
+# CONFIG_MEDIA_TUNER_XC5000 is not set
+# CONFIG_MEDIA_TUNER_XC4000 is not set
+# CONFIG_MEDIA_TUNER_MXL5005S is not set
+# CONFIG_MEDIA_TUNER_MXL5007T is not set
+# CONFIG_MEDIA_TUNER_MC44S803 is not set
+# CONFIG_MEDIA_TUNER_MAX2165 is not set
+# CONFIG_MEDIA_TUNER_TDA18218 is not set
+# CONFIG_MEDIA_TUNER_FC0011 is not set
+# CONFIG_MEDIA_TUNER_FC0012 is not set
+# CONFIG_MEDIA_TUNER_FC0013 is not set
+# CONFIG_MEDIA_TUNER_TDA18212 is not set
+# CONFIG_MEDIA_TUNER_E4000 is not set
+# CONFIG_MEDIA_TUNER_FC2580 is not set
+# CONFIG_MEDIA_TUNER_M88RS6000T is not set
+# CONFIG_MEDIA_TUNER_TUA9001 is not set
+CONFIG_MEDIA_TUNER_SI2157=m
+# CONFIG_MEDIA_TUNER_IT913X is not set
+# CONFIG_MEDIA_TUNER_R820T is not set
+# CONFIG_MEDIA_TUNER_MXL301RF is not set
+# CONFIG_MEDIA_TUNER_QM1D1C0042 is not set
+
+#
+# Customise DVB Frontends
+#
+
+#
+# Multistandard (satellite) frontends
+#
+# CONFIG_DVB_STB0899 is not set
+# CONFIG_DVB_STB6100 is not set
+# CONFIG_DVB_STV090x is not set
+# CONFIG_DVB_STV6110x is not set
+# CONFIG_DVB_M88DS3103 is not set
+
+#
+# Multistandard (cable + terrestrial) frontends
+#
+# CONFIG_DVB_DRXK is not set
+# CONFIG_DVB_TDA18271C2DD is not set
+# CONFIG_DVB_SI2165 is not set
+
+#
+# DVB-S (satellite) frontends
+#
+# CONFIG_DVB_CX24110 is not set
+# CONFIG_DVB_CX24123 is not set
+# CONFIG_DVB_MT312 is not set
+# CONFIG_DVB_ZL10036 is not set
+# CONFIG_DVB_ZL10039 is not set
+# CONFIG_DVB_S5H1420 is not set
+# CONFIG_DVB_STV0288 is not set
+# CONFIG_DVB_STB6000 is not set
+# CONFIG_DVB_STV0299 is not set
+# CONFIG_DVB_STV6110 is not set
+# CONFIG_DVB_STV0900 is not set
+# CONFIG_DVB_TDA8083 is not set
+# CONFIG_DVB_TDA10086 is not set
+# CONFIG_DVB_TDA8261 is not set
+# CONFIG_DVB_VES1X93 is not set
+# CONFIG_DVB_TUNER_ITD1000 is not set
+# CONFIG_DVB_TUNER_CX24113 is not set
+# CONFIG_DVB_TDA826X is not set
+# CONFIG_DVB_TUA6100 is not set
+# CONFIG_DVB_CX24116 is not set
+# CONFIG_DVB_CX24117 is not set
+# CONFIG_DVB_CX24120 is not set
+# CONFIG_DVB_SI21XX is not set
+# CONFIG_DVB_TS2020 is not set
+# CONFIG_DVB_DS3000 is not set
+# CONFIG_DVB_MB86A16 is not set
+# CONFIG_DVB_TDA10071 is not set
+
+#
+# DVB-T (terrestrial) frontends
+#
+# CONFIG_DVB_SP8870 is not set
+# CONFIG_DVB_SP887X is not set
+# CONFIG_DVB_CX22700 is not set
+# CONFIG_DVB_CX22702 is not set
+# CONFIG_DVB_S5H1432 is not set
+# CONFIG_DVB_DRXD is not set
+# CONFIG_DVB_L64781 is not set
+# CONFIG_DVB_TDA1004X is not set
+# CONFIG_DVB_NXT6000 is not set
+# CONFIG_DVB_MT352 is not set
+# CONFIG_DVB_ZL10353 is not set
+# CONFIG_DVB_DIB3000MB is not set
+# CONFIG_DVB_DIB3000MC is not set
+# CONFIG_DVB_DIB7000M is not set
+# CONFIG_DVB_DIB7000P is not set
+# CONFIG_DVB_DIB9000 is not set
+# CONFIG_DVB_TDA10048 is not set
+# CONFIG_DVB_AF9013 is not set
+# CONFIG_DVB_EC100 is not set
+# CONFIG_DVB_HD29L2 is not set
+# CONFIG_DVB_STV0367 is not set
+# CONFIG_DVB_CXD2820R is not set
+# CONFIG_DVB_CXD2841ER is not set
+# CONFIG_DVB_RTL2830 is not set
+# CONFIG_DVB_RTL2832 is not set
+CONFIG_DVB_SI2168=m
+# CONFIG_DVB_AS102_FE is not set
+
+#
+# DVB-C (cable) frontends
+#
+# CONFIG_DVB_VES1820 is not set
+# CONFIG_DVB_TDA10021 is not set
+# CONFIG_DVB_TDA10023 is not set
+# CONFIG_DVB_STV0297 is not set
+
+#
+# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
+#
+# CONFIG_DVB_NXT200X is not set
+# CONFIG_DVB_OR51211 is not set
+# CONFIG_DVB_OR51132 is not set
+# CONFIG_DVB_BCM3510 is not set
+# CONFIG_DVB_LGDT330X is not set
+# CONFIG_DVB_LGDT3305 is not set
+# CONFIG_DVB_LGDT3306A is not set
+# CONFIG_DVB_LG2160 is not set
+# CONFIG_DVB_S5H1409 is not set
+# CONFIG_DVB_AU8522_DTV is not set
+# CONFIG_DVB_AU8522_V4L is not set
+# CONFIG_DVB_S5H1411 is not set
+
+#
+# ISDB-T (terrestrial) frontends
+#
+# CONFIG_DVB_S921 is not set
+# CONFIG_DVB_DIB8000 is not set
+# CONFIG_DVB_MB86A20S is not set
+
+#
+# ISDB-S (satellite) & ISDB-T (terrestrial) frontends
+#
+# CONFIG_DVB_TC90522 is not set
+
+#
+# Digital terrestrial only tuners/PLL
+#
+# CONFIG_DVB_PLL is not set
+# CONFIG_DVB_TUNER_DIB0070 is not set
+# CONFIG_DVB_TUNER_DIB0090 is not set
+
+#
+# SEC control devices for DVB-S
+#
+# CONFIG_DVB_DRX39XYJ is not set
+# CONFIG_DVB_LNBH25 is not set
+# CONFIG_DVB_LNBP21 is not set
+# CONFIG_DVB_LNBP22 is not set
+# CONFIG_DVB_ISL6405 is not set
+# CONFIG_DVB_ISL6421 is not set
+# CONFIG_DVB_ISL6423 is not set
+# CONFIG_DVB_A8293 is not set
+# CONFIG_DVB_SP2 is not set
+# CONFIG_DVB_LGS8GL5 is not set
+# CONFIG_DVB_LGS8GXX is not set
+# CONFIG_DVB_ATBM8830 is not set
+# CONFIG_DVB_TDA665x is not set
+# CONFIG_DVB_IX2505V is not set
+# CONFIG_DVB_M88RS2000 is not set
+# CONFIG_DVB_AF9033 is not set
+# CONFIG_DVB_HORUS3A is not set
+# CONFIG_DVB_ASCOT2E is not set
+
+#
+# Tools to develop new frontends
+#
+# CONFIG_DVB_DUMMY_FE is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGA_ARB is not set
+# CONFIG_QCOM_KGSL is not set
+CONFIG_DRM=y
+CONFIG_DRM_MIPI_DSI=y
+CONFIG_DRM_KMS_HELPER=y
+# CONFIG_DRM_FBDEV_EMULATION is not set
+# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set
+
+#
+# I2C encoder or helper chips
+#
+# CONFIG_DRM_I2C_ADV7511 is not set
+# CONFIG_DRM_I2C_CH7006 is not set
+# CONFIG_DRM_I2C_SIL164 is not set
+# CONFIG_DRM_I2C_NXP_TDA998X is not set
+# CONFIG_DRM_TDFX is not set
+# CONFIG_DRM_R128 is not set
+# CONFIG_DRM_RADEON is not set
+# CONFIG_DRM_AMDGPU is not set
+# CONFIG_DRM_NOUVEAU is not set
+# CONFIG_DRM_MGA is not set
+# CONFIG_DRM_VIA is not set
+# CONFIG_DRM_SAVAGE is not set
+# CONFIG_DRM_VGEM is not set
+# CONFIG_DRM_UDL is not set
+# CONFIG_DRM_AST is not set
+# CONFIG_DRM_MGAG200 is not set
+# CONFIG_DRM_CIRRUS_QEMU is not set
+# CONFIG_DRM_QXL is not set
+# CONFIG_DRM_BOCHS is not set
+CONFIG_DRM_MSM=y
+# CONFIG_DRM_MSM_REGISTER_LOGGING is not set
+CONFIG_DRM_MSM_DSI=y
+CONFIG_DRM_MSM_DSI_STAGING=y
+CONFIG_DRM_MSM_DSI_28NM_PHY=y
+CONFIG_DRM_MSM_DSI_20NM_PHY=y
+# CONFIG_DRM_MSM_MDP4 is not set
+# CONFIG_DRM_MSM_HDCP is not set
+CONFIG_DRM_SDE_WB=y
+CONFIG_DRM_SDE_HDMI=y
+# CONFIG_DRM_SDE_EVTLOG_DEBUG is not set
+CONFIG_DRM_PANEL=y
+
+#
+# Display Panels
+#
+# CONFIG_DRM_PANEL_SIMPLE is not set
+# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set
+# CONFIG_DRM_PANEL_LG_LG4573 is not set
+# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set
+# CONFIG_DRM_PANEL_SHARP_LQ101R1SX01 is not set
+CONFIG_DRM_BRIDGE=y
+
+#
+# Display Interface Bridges
+#
+# CONFIG_DRM_NXP_PTN3460 is not set
+# CONFIG_DRM_PARADE_PS8622 is not set
+# CONFIG_MSM_BA_V4L2 is not set
+
+#
+# Frame buffer Devices
+#
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+CONFIG_FB_CMDLINE=y
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+# CONFIG_FB_CFB_FILLRECT is not set
+# CONFIG_FB_CFB_COPYAREA is not set
+# CONFIG_FB_CFB_IMAGEBLIT is not set
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_CIRRUS is not set
+# CONFIG_FB_PM2 is not set
+# CONFIG_FB_ARMCLCD is not set
+# CONFIG_FB_CYBER2000 is not set
+# CONFIG_FB_ASILIANT is not set
+# CONFIG_FB_IMSTT is not set
+# CONFIG_FB_OPENCORES is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_NVIDIA is not set
+# CONFIG_FB_RIVA is not set
+# CONFIG_FB_I740 is not set
+# CONFIG_FB_MATROX is not set
+# CONFIG_FB_RADEON is not set
+# CONFIG_FB_ATY128 is not set
+# CONFIG_FB_ATY is not set
+# CONFIG_FB_S3 is not set
+# CONFIG_FB_SAVAGE is not set
+# CONFIG_FB_SIS is not set
+# CONFIG_FB_NEOMAGIC is not set
+# CONFIG_FB_KYRO is not set
+# CONFIG_FB_3DFX is not set
+# CONFIG_FB_VOODOO1 is not set
+# CONFIG_FB_VT8623 is not set
+# CONFIG_FB_TRIDENT is not set
+# CONFIG_FB_ARK is not set
+# CONFIG_FB_PM3 is not set
+# CONFIG_FB_CARMINE is not set
+# CONFIG_FB_SMSCUFX is not set
+# CONFIG_FB_UDL is not set
+# CONFIG_FB_IBM_GXT4500 is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_MSM is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_FB_AUO_K190X is not set
+# CONFIG_FB_SIMPLE is not set
+# CONFIG_MSM_DBA is not set
+# CONFIG_FB_SSD1307 is not set
+# CONFIG_FB_SM712 is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
+# CONFIG_BACKLIGHT_PWM is not set
+# CONFIG_BACKLIGHT_PM8941_WLED is not set
+# CONFIG_BACKLIGHT_ADP8860 is not set
+# CONFIG_BACKLIGHT_ADP8870 is not set
+# CONFIG_BACKLIGHT_LM3630A is not set
+# CONFIG_BACKLIGHT_LM3639 is not set
+# CONFIG_BACKLIGHT_LP855X is not set
+# CONFIG_BACKLIGHT_GPIO is not set
+# CONFIG_BACKLIGHT_LV5207LP is not set
+# CONFIG_BACKLIGHT_BD6107 is not set
+# CONFIG_ADF is not set
+# CONFIG_VGASTATE is not set
+CONFIG_HDMI=y
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_DUMMY_CONSOLE_COLUMNS=80
+CONFIG_DUMMY_CONSOLE_ROWS=25
+# CONFIG_FRAMEBUFFER_CONSOLE is not set
+# CONFIG_LOGO is not set
+CONFIG_SOUND=y
+# CONFIG_SOUND_OSS_CORE is not set
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+CONFIG_SND_HWDEP=y
+CONFIG_SND_RAWMIDI=y
+CONFIG_SND_COMPRESS_OFFLOAD=y
+CONFIG_SND_JACK=y
+# CONFIG_SND_SEQUENCER is not set
+# CONFIG_SND_MIXER_OSS is not set
+# CONFIG_SND_PCM_OSS is not set
+CONFIG_SND_PCM_TIMER=y
+# CONFIG_SND_HRTIMER is not set
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_MAX_CARDS=32
+CONFIG_SND_SUPPORT_OLD_API=y
+CONFIG_SND_PROC_FS=y
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+CONFIG_SND_DRIVERS=y
+# CONFIG_SND_DUMMY is not set
+# CONFIG_SND_ALOOP is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+CONFIG_SND_PCI=y
+# CONFIG_SND_AD1889 is not set
+# CONFIG_SND_ALS300 is not set
+# CONFIG_SND_ALI5451 is not set
+# CONFIG_SND_ATIIXP is not set
+# CONFIG_SND_ATIIXP_MODEM is not set
+# CONFIG_SND_AU8810 is not set
+# CONFIG_SND_AU8820 is not set
+# CONFIG_SND_AU8830 is not set
+# CONFIG_SND_AW2 is not set
+# CONFIG_SND_AZT3328 is not set
+# CONFIG_SND_BT87X is not set
+# CONFIG_SND_CA0106 is not set
+# CONFIG_SND_CMIPCI is not set
+# CONFIG_SND_OXYGEN is not set
+# CONFIG_SND_CS4281 is not set
+# CONFIG_SND_CS46XX is not set
+# CONFIG_SND_CTXFI is not set
+# CONFIG_SND_DARLA20 is not set
+# CONFIG_SND_GINA20 is not set
+# CONFIG_SND_LAYLA20 is not set
+# CONFIG_SND_DARLA24 is not set
+# CONFIG_SND_GINA24 is not set
+# CONFIG_SND_LAYLA24 is not set
+# CONFIG_SND_MONA is not set
+# CONFIG_SND_MIA is not set
+# CONFIG_SND_ECHO3G is not set
+# CONFIG_SND_INDIGO is not set
+# CONFIG_SND_INDIGOIO is not set
+# CONFIG_SND_INDIGODJ is not set
+# CONFIG_SND_INDIGOIOX is not set
+# CONFIG_SND_INDIGODJX is not set
+# CONFIG_SND_EMU10K1 is not set
+# CONFIG_SND_EMU10K1X is not set
+# CONFIG_SND_ENS1370 is not set
+# CONFIG_SND_ENS1371 is not set
+# CONFIG_SND_ES1938 is not set
+# CONFIG_SND_ES1968 is not set
+# CONFIG_SND_FM801 is not set
+# CONFIG_SND_HDSP is not set
+# CONFIG_SND_HDSPM is not set
+# CONFIG_SND_ICE1712 is not set
+# CONFIG_SND_ICE1724 is not set
+# CONFIG_SND_INTEL8X0 is not set
+# CONFIG_SND_INTEL8X0M is not set
+# CONFIG_SND_KORG1212 is not set
+# CONFIG_SND_LOLA is not set
+# CONFIG_SND_LX6464ES is not set
+# CONFIG_SND_MAESTRO3 is not set
+# CONFIG_SND_MIXART is not set
+# CONFIG_SND_NM256 is not set
+# CONFIG_SND_PCXHR is not set
+# CONFIG_SND_RIPTIDE is not set
+# CONFIG_SND_RME32 is not set
+# CONFIG_SND_RME96 is not set
+# CONFIG_SND_RME9652 is not set
+# CONFIG_SND_SE6X is not set
+# CONFIG_SND_SONICVIBES is not set
+# CONFIG_SND_TRIDENT is not set
+# CONFIG_SND_VIA82XX is not set
+# CONFIG_SND_VIA82XX_MODEM is not set
+# CONFIG_SND_VIRTUOSO is not set
+# CONFIG_SND_VX222 is not set
+# CONFIG_SND_YMFPCI is not set
+
+#
+# HD-Audio
+#
+# CONFIG_SND_HDA_INTEL is not set
+CONFIG_SND_HDA_PREALLOC_SIZE=64
+CONFIG_SND_SPI=y
+CONFIG_SND_USB=y
+CONFIG_SND_USB_AUDIO=y
+# CONFIG_SND_USB_UA101 is not set
+# CONFIG_SND_USB_CAIAQ is not set
+# CONFIG_SND_USB_6FIRE is not set
+# CONFIG_SND_USB_HIFACE is not set
+# CONFIG_SND_BCD2000 is not set
+# CONFIG_SND_USB_POD is not set
+# CONFIG_SND_USB_PODHD is not set
+# CONFIG_SND_USB_TONEPORT is not set
+# CONFIG_SND_USB_VARIAX is not set
+CONFIG_SND_USB_AUDIO_QMI=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_COMPRESS=y
+# CONFIG_SND_ATMEL_SOC is not set
+# CONFIG_SND_DESIGNWARE_I2S is not set
+
+#
+# SoC Audio for Freescale CPUs
+#
+
+#
+# Common SoC Audio options for Freescale CPUs:
+#
+# CONFIG_SND_SOC_FSL_ASRC is not set
+# CONFIG_SND_SOC_FSL_SAI is not set
+# CONFIG_SND_SOC_FSL_SSI is not set
+# CONFIG_SND_SOC_FSL_SPDIF is not set
+# CONFIG_SND_SOC_FSL_ESAI is not set
+# CONFIG_SND_SOC_IMX_AUDMUX is not set
+
+#
+# MSM SoC Audio support
+#
+CONFIG_SND_SOC_MSM_HOSTLESS_PCM=y
+CONFIG_SND_SOC_MSM_QDSP6V2_INTF=y
+CONFIG_SND_SOC_QDSP6V2=y
+# CONFIG_SND_SOC_QDSP_DEBUG is not set
+# CONFIG_DOLBY_DS2 is not set
+CONFIG_DOLBY_LICENSE=y
+CONFIG_DTS_EAGLE=y
+CONFIG_DTS_SRS_TM=y
+CONFIG_QTI_PP=y
+# CONFIG_QTI_PP_AUDIOSPHERE is not set
+CONFIG_SND_SOC_CPE=y
+# CONFIG_SND_SOC_INT_CODEC is not set
+# CONFIG_SND_SOC_EXT_CODEC is not set
+CONFIG_SND_SOC_MSM8998=y
+# CONFIG_SND_SOC_QCOM is not set
+
+#
+# Allwinner SoC Audio support
+#
+# CONFIG_SND_SUN4I_CODEC is not set
+# CONFIG_SND_SOC_XTFPGA_I2S is not set
+CONFIG_SND_SOC_I2C_AND_SPI=y
+
+#
+# CODEC drivers
+#
+# CONFIG_SND_SOC_AC97_CODEC is not set
+# CONFIG_SND_SOC_ADAU1701 is not set
+# CONFIG_SND_SOC_AK4104 is not set
+# CONFIG_SND_SOC_AK4554 is not set
+# CONFIG_SND_SOC_AK4613 is not set
+# CONFIG_SND_SOC_AK4642 is not set
+# CONFIG_SND_SOC_AK5386 is not set
+# CONFIG_SND_SOC_ALC5623 is not set
+# CONFIG_SND_SOC_CS35L32 is not set
+# CONFIG_SND_SOC_CS42L51_I2C is not set
+# CONFIG_SND_SOC_CS42L52 is not set
+# CONFIG_SND_SOC_CS42L56 is not set
+# CONFIG_SND_SOC_CS42L73 is not set
+# CONFIG_SND_SOC_CS4265 is not set
+# CONFIG_SND_SOC_CS4270 is not set
+# CONFIG_SND_SOC_CS4271_I2C is not set
+# CONFIG_SND_SOC_CS4271_SPI is not set
+# CONFIG_SND_SOC_CS42XX8_I2C is not set
+# CONFIG_SND_SOC_CS4349 is not set
+# CONFIG_SND_SOC_HDMI_CODEC is not set
+# CONFIG_SND_SOC_ES8328 is not set
+# CONFIG_SND_SOC_GTM601 is not set
+# CONFIG_SND_SOC_PCM1681 is not set
+# CONFIG_SND_SOC_PCM1792A is not set
+# CONFIG_SND_SOC_PCM512x_I2C is not set
+# CONFIG_SND_SOC_PCM512x_SPI is not set
+# CONFIG_SND_SOC_RT5631 is not set
+# CONFIG_SND_SOC_RT5677_SPI is not set
+# CONFIG_SND_SOC_SGTL5000 is not set
+CONFIG_SND_SOC_SIL9437=y
+# CONFIG_SND_SOC_SIRF_AUDIO_CODEC is not set
+# CONFIG_SND_SOC_SPDIF is not set
+# CONFIG_SND_SOC_SSM2602_SPI is not set
+# CONFIG_SND_SOC_SSM2602_I2C is not set
+# CONFIG_SND_SOC_SSM4567 is not set
+# CONFIG_SND_SOC_STA32X is not set
+# CONFIG_SND_SOC_STA350 is not set
+# CONFIG_SND_SOC_STI_SAS is not set
+# CONFIG_SND_SOC_TAS2552 is not set
+# CONFIG_SND_SOC_TAS5086 is not set
+# CONFIG_SND_SOC_TAS571X is not set
+CONFIG_SND_SOC_TAS5766=y
+# CONFIG_SND_SOC_TFA9879 is not set
+# CONFIG_SND_SOC_TLV320AIC23_I2C is not set
+# CONFIG_SND_SOC_TLV320AIC23_SPI is not set
+# CONFIG_SND_SOC_TLV320AIC31XX is not set
+# CONFIG_SND_SOC_TLV320AIC3X is not set
+# CONFIG_SND_SOC_TS3A227E is not set
+CONFIG_SND_SOC_WCD934X_DSD=y
+CONFIG_SND_SOC_WCD9335=y
+CONFIG_SND_SOC_WCD934X=y
+CONFIG_SND_SOC_WCD934X_MBHC=y
+CONFIG_SND_SOC_WSA881X=y
+CONFIG_SND_SOC_WCD9XXX=y
+CONFIG_SND_SOC_WCD9XXX_V2=y
+CONFIG_SND_SOC_WCD_CPE=y
+CONFIG_AUDIO_EXT_CLK=y
+CONFIG_SND_SOC_WCD_MBHC=y
+CONFIG_SND_SOC_WCD_DSP_MGR=y
+CONFIG_SND_SOC_WCD_SPI=y
+# CONFIG_SND_SOC_WM8510 is not set
+# CONFIG_SND_SOC_WM8523 is not set
+# CONFIG_SND_SOC_WM8580 is not set
+# CONFIG_SND_SOC_WM8711 is not set
+# CONFIG_SND_SOC_WM8728 is not set
+# CONFIG_SND_SOC_WM8731 is not set
+# CONFIG_SND_SOC_WM8737 is not set
+# CONFIG_SND_SOC_WM8741 is not set
+# CONFIG_SND_SOC_WM8750 is not set
+# CONFIG_SND_SOC_WM8753 is not set
+# CONFIG_SND_SOC_WM8770 is not set
+# CONFIG_SND_SOC_WM8776 is not set
+CONFIG_SND_SOC_WM8804=y
+CONFIG_SND_SOC_WM8804_I2C=y
+# CONFIG_SND_SOC_WM8804_SPI is not set
+# CONFIG_SND_SOC_WM8903 is not set
+# CONFIG_SND_SOC_WM8962 is not set
+# CONFIG_SND_SOC_WM8978 is not set
+# CONFIG_SND_SOC_TPA6130A2 is not set
+CONFIG_SND_SOC_MSM_STUB=y
+CONFIG_SND_SOC_MSM_HDMI_CODEC_RX=y
+# CONFIG_SND_SOC_SDM660_CDC is not set
+# CONFIG_SND_SOC_MSM_SDW is not set
+# CONFIG_SND_SIMPLE_CARD is not set
+# CONFIG_SOUND_PRIME is not set
+
+#
+# HID support
+#
+CONFIG_HID=y
+# CONFIG_HID_BATTERY_STRENGTH is not set
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+CONFIG_HID_GENERIC=y
+
+#
+# Special HID drivers
+#
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_ACRUX is not set
+CONFIG_HID_APPLE=y
+# CONFIG_HID_APPLEIR is not set
+# CONFIG_HID_AUREAL is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_BETOP_FF is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_CORSAIR is not set
+# CONFIG_HID_PRODIKEYS is not set
+# CONFIG_HID_CP2112 is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_DRAGONRISE is not set
+# CONFIG_HID_EMS_FF is not set
+# CONFIG_HID_ELECOM is not set
+# CONFIG_HID_ELO is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_GEMBIRD is not set
+# CONFIG_HID_GFRM is not set
+# CONFIG_HID_HOLTEK is not set
+# CONFIG_HID_GT683R is not set
+# CONFIG_HID_KEYTOUCH is not set
+# CONFIG_HID_KYE is not set
+# CONFIG_HID_UCLOGIC is not set
+# CONFIG_HID_WALTOP is not set
+CONFIG_HID_FBX_REMOTE_AUDIO=y
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_ICADE is not set
+# CONFIG_HID_TWINHAN is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_LCPOWER is not set
+# CONFIG_HID_LENOVO is not set
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MAGICMOUSE is not set
+CONFIG_HID_MICROSOFT=y
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_MULTITOUCH is not set
+# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PENMOUNT is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_PICOLCD is not set
+# CONFIG_HID_PLANTRONICS is not set
+# CONFIG_HID_PRIMAX is not set
+# CONFIG_HID_ROCCAT is not set
+# CONFIG_HID_SAITEK is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_SPEEDLINK is not set
+# CONFIG_HID_STEELSERIES is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_HID_RMI is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_TIVO is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_THINGM is not set
+# CONFIG_HID_THRUSTMASTER is not set
+# CONFIG_HID_WACOM is not set
+# CONFIG_HID_WIIMOTE is not set
+# CONFIG_HID_XINMO is not set
+# CONFIG_HID_ZEROPLUS is not set
+# CONFIG_HID_ZYDACRON is not set
+# CONFIG_HID_SENSOR_HUB is not set
+
+#
+# USB HID support
+#
+CONFIG_USB_HID=y
+# CONFIG_HID_PID is not set
+CONFIG_USB_HIDDEV=y
+
+#
+# I2C HID support
+#
+# CONFIG_I2C_HID is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_COMMON=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEFAULT_PERSIST=y
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_ULPI_BUS is not set
+# CONFIG_USB_MON is not set
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_PCI=y
+CONFIG_USB_XHCI_PLATFORM=y
+# CONFIG_USB_EHCI_HCD is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
+# CONFIG_USB_FOTG210_HCD is not set
+# CONFIG_USB_MAX3421_HCD is not set
+# CONFIG_USB_OHCI_HCD is not set
+# CONFIG_USB_UHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HCD_TEST_MODE is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_REALTEK is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_STORAGE_ENE_UB6250 is not set
+CONFIG_USB_UAS=y
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+# CONFIG_USBIP_CORE is not set
+# CONFIG_USB_MUSB_HDRC is not set
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_HOST=y
+
+#
+# Platform Glue Driver Support
+#
+CONFIG_USB_DWC3_PCI=y
+CONFIG_USB_DWC3_QCOM=y
+# CONFIG_USB_DWC2 is not set
+# CONFIG_USB_ISP1760 is not set
+
+#
+# USB Power Delivery
+#
+# CONFIG_USB_PD is not set
+CONFIG_QPNP_USB_PDPHY=y
+
+#
+# USB port drivers
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_EHSET_TEST_FIXTURE is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_YUREX is not set
+# CONFIG_USB_EZUSB_FX2 is not set
+# CONFIG_USB_HSIC_USB3503 is not set
+# CONFIG_USB_LINK_LAYER_TEST is not set
+# CONFIG_USB_CHAOSKEY is not set
+# CONFIG_USB_QTI_KS_BRIDGE is not set
+
+#
+# USB Physical Layer drivers
+#
+CONFIG_USB_PHY=y
+CONFIG_USB_OTG_WAKELOCK=y
+CONFIG_NOP_USB_XCEIV=y
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_USB_ISP1301 is not set
+# CONFIG_USB_MSM_OTG is not set
+# CONFIG_USB_QCOM_8X16_PHY is not set
+# CONFIG_USB_MSM_HSPHY is not set
+# CONFIG_USB_MSM_SSPHY is not set
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+# CONFIG_USB_ULPI is not set
+# CONFIG_DUAL_ROLE_USB_INTF is not set
+# CONFIG_USB_GADGET is not set
+# CONFIG_USB_LED_TRIG is not set
+# CONFIG_UWB is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+CONFIG_MMC_PERF_PROFILING=y
+# CONFIG_MMC_RING_BUFFER is not set
+# CONFIG_MMC_EMBEDDED_SDIO is not set
+# CONFIG_MMC_PARANOID_SD_INIT is not set
+CONFIG_MMC_CLKGATE=y
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_MMC_BLOCK_DEFERRED_RESUME is not set
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+# CONFIG_MMC_SIMULATE_MAX_SPEED is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_ARMMMCI is not set
+CONFIG_MMC_SDHCI=y
+# CONFIG_MMC_SDHCI_PCI is not set
+# CONFIG_MMC_SDHCI_ACPI is not set
+CONFIG_MMC_SDHCI_PLTFM=y
+# CONFIG_MMC_SDHCI_OF_ARASAN is not set
+# CONFIG_MMC_SDHCI_OF_AT91 is not set
+# CONFIG_MMC_SDHCI_F_SDH30 is not set
+CONFIG_MMC_SDHCI_MSM=y
+# CONFIG_MMC_TIFM_SD is not set
+# CONFIG_MMC_SPI is not set
+# CONFIG_MMC_CB710 is not set
+# CONFIG_MMC_VIA_SDMMC is not set
+# CONFIG_MMC_DW is not set
+# CONFIG_MMC_VUB300 is not set
+# CONFIG_MMC_USHC is not set
+# CONFIG_MMC_USDHI6ROL0 is not set
+# CONFIG_MMC_CQ_HCI is not set
+# CONFIG_MMC_TOSHIBA_PCI is not set
+# CONFIG_MMC_MTK is not set
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+# CONFIG_LEDS_CLASS_FLASH is not set
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_BCM6328 is not set
+# CONFIG_LEDS_BCM6358 is not set
+# CONFIG_LEDS_LM3530 is not set
+# CONFIG_LEDS_LM3642 is not set
+# CONFIG_LEDS_PCA9532 is not set
+# CONFIG_LEDS_GPIO is not set
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_LP5521 is not set
+# CONFIG_LEDS_LP5523 is not set
+# CONFIG_LEDS_LP5562 is not set
+# CONFIG_LEDS_LP8501 is not set
+# CONFIG_LEDS_LP8860 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_PCA963X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_PWM is not set
+# CONFIG_LEDS_REGULATOR is not set
+# CONFIG_LEDS_BD2802 is not set
+# CONFIG_LEDS_LT3593 is not set
+# CONFIG_LEDS_TCA6507 is not set
+# CONFIG_LEDS_TLC591XX is not set
+# CONFIG_LEDS_LM355x is not set
+
+#
+# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM)
+#
+# CONFIG_LEDS_BLINKM is not set
+CONFIG_LEDS_QPNP=y
+# CONFIG_LEDS_QPNP_FLASH is not set
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_WLED=y
+CONFIG_LEDS_SYSCON=y
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+# CONFIG_LEDS_TRIGGER_TIMER is not set
+# CONFIG_LEDS_TRIGGER_ONESHOT is not set
+# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_CPU is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
+# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_LEDS_TRIGGER_TRANSIENT is not set
+# CONFIG_LEDS_TRIGGER_CAMERA is not set
+CONFIG_SWITCH=y
+# CONFIG_SWITCH_GPIO is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_INFINIBAND is not set
+CONFIG_EDAC_SUPPORT=y
+CONFIG_EDAC=y
+# CONFIG_EDAC_LEGACY_SYSFS is not set
+# CONFIG_EDAC_DEBUG is not set
+CONFIG_EDAC_MM_EDAC=y
+# CONFIG_EDAC_XGENE is not set
+CONFIG_EDAC_CORTEX_ARM64=y
+# CONFIG_EDAC_CORTEX_ARM64_PANIC_ON_CE is not set
+# CONFIG_EDAC_CORTEX_ARM64_DBE_IRQ_ONLY is not set
+CONFIG_EDAC_CORTEX_ARM64_PANIC_ON_UE=y
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+CONFIG_RTC_SYSTOHC=y
+CONFIG_RTC_SYSTOHC_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_ABB5ZES3 is not set
+# CONFIG_RTC_DRV_ABX80X is not set
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_DS3232 is not set
+# CONFIG_RTC_DRV_HYM8563 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_ISL12022 is not set
+# CONFIG_RTC_DRV_ISL12057 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF2127 is not set
+# CONFIG_RTC_DRV_PCF8523 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF85063 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+# CONFIG_RTC_DRV_EM3027 is not set
+# CONFIG_RTC_DRV_RV3029C2 is not set
+# CONFIG_RTC_DRV_RV8803 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T93 is not set
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1343 is not set
+# CONFIG_RTC_DRV_DS1347 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+# CONFIG_RTC_DRV_RX4581 is not set
+# CONFIG_RTC_DRV_MCP795 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1685_FAMILY is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_DS2404 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+# CONFIG_RTC_DRV_ZYNQMP is not set
+
+#
+# on-CPU RTC drivers
+#
+# CONFIG_RTC_DRV_PL030 is not set
+# CONFIG_RTC_DRV_PL031 is not set
+# CONFIG_RTC_DRV_PM8XXX is not set
+# CONFIG_RTC_DRV_SNVS is not set
+CONFIG_RTC_DRV_QPNP=y
+
+#
+# HID Sensor RTC drivers
+#
+# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set
+# CONFIG_ESOC is not set
+CONFIG_DMADEVICES=y
+# CONFIG_DMADEVICES_DEBUG is not set
+
+#
+# DMA Devices
+#
+CONFIG_DMA_ENGINE=y
+CONFIG_DMA_ACPI=y
+CONFIG_DMA_OF=y
+# CONFIG_AMBA_PL08X is not set
+# CONFIG_FSL_EDMA is not set
+# CONFIG_INTEL_IDMA64 is not set
+# CONFIG_PL330_DMA is not set
+# CONFIG_QCOM_BAM_DMA is not set
+CONFIG_QCOM_SPS_DMA=y
+# CONFIG_DW_DMAC is not set
+# CONFIG_DW_DMAC_PCI is not set
+
+#
+# DMA Clients
+#
+# CONFIG_ASYNC_TX_DMA is not set
+# CONFIG_DMATEST is not set
+# CONFIG_AUXDISPLAY is not set
+CONFIG_UIO=y
+# CONFIG_UIO_CIF is not set
+# CONFIG_UIO_PDRV_GENIRQ is not set
+# CONFIG_UIO_DMEM_GENIRQ is not set
+# CONFIG_UIO_AEC is not set
+# CONFIG_UIO_SERCOS3 is not set
+# CONFIG_UIO_PCI_GENERIC is not set
+# CONFIG_UIO_NETX is not set
+# CONFIG_UIO_PRUSS is not set
+# CONFIG_UIO_MF624 is not set
+CONFIG_UIO_MSM_SHAREDMEM=y
+# CONFIG_VFIO is not set
+# CONFIG_VIRT_DRIVERS is not set
+
+#
+# Virtio drivers
+#
+# CONFIG_VIRTIO_PCI is not set
+# CONFIG_VIRTIO_MMIO is not set
+
+#
+# Microsoft Hyper-V guest support
+#
+CONFIG_STAGING=y
+# CONFIG_PRISM2_USB is not set
+# CONFIG_COMEDI is not set
+# CONFIG_RTL8192U is not set
+# CONFIG_RTLLIB is not set
+# CONFIG_R8712U is not set
+# CONFIG_R8188EU is not set
+# CONFIG_R8723AU is not set
+# CONFIG_RTS5208 is not set
+# CONFIG_VT6655 is not set
+# CONFIG_VT6656 is not set
+
+#
+# IIO staging drivers
+#
+
+#
+# Accelerometers
+#
+# CONFIG_ADIS16201 is not set
+# CONFIG_ADIS16203 is not set
+# CONFIG_ADIS16204 is not set
+# CONFIG_ADIS16209 is not set
+# CONFIG_ADIS16220 is not set
+# CONFIG_ADIS16240 is not set
+# CONFIG_LIS3L02DQ is not set
+
+#
+# Analog to digital converters
+#
+# CONFIG_AD7606 is not set
+# CONFIG_AD7780 is not set
+# CONFIG_AD7816 is not set
+# CONFIG_AD7192 is not set
+# CONFIG_AD7280 is not set
+
+#
+# Analog digital bi-direction converters
+#
+# CONFIG_ADT7316 is not set
+
+#
+# Capacitance to digital converters
+#
+# CONFIG_AD7150 is not set
+# CONFIG_AD7152 is not set
+# CONFIG_AD7746 is not set
+
+#
+# Direct Digital Synthesis
+#
+# CONFIG_AD9832 is not set
+# CONFIG_AD9834 is not set
+
+#
+# Digital gyroscope sensors
+#
+# CONFIG_ADIS16060 is not set
+
+#
+# Network Analyzer, Impedance Converters
+#
+# CONFIG_AD5933 is not set
+
+#
+# Light sensors
+#
+# CONFIG_SENSORS_ISL29018 is not set
+# CONFIG_SENSORS_ISL29028 is not set
+# CONFIG_TSL2583 is not set
+# CONFIG_TSL2x7x is not set
+
+#
+# Magnetometer sensors
+#
+# CONFIG_SENSORS_HMC5843_I2C is not set
+# CONFIG_SENSORS_HMC5843_SPI is not set
+
+#
+# Active energy metering IC
+#
+# CONFIG_ADE7753 is not set
+# CONFIG_ADE7754 is not set
+# CONFIG_ADE7758 is not set
+# CONFIG_ADE7759 is not set
+# CONFIG_ADE7854 is not set
+
+#
+# Resolver to digital converters
+#
+# CONFIG_AD2S90 is not set
+# CONFIG_AD2S1200 is not set
+# CONFIG_AD2S1210 is not set
+
+#
+# Triggers - standalone
+#
+# CONFIG_IIO_SIMPLE_DUMMY is not set
+# CONFIG_FB_SM750 is not set
+# CONFIG_FB_XGI is not set
+
+#
+# Speakup console speech
+#
+# CONFIG_SPEAKUP is not set
+# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set
+# CONFIG_STAGING_MEDIA is not set
+
+#
+# Android
+#
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_TIMED_OUTPUT=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES=y
+CONFIG_SYNC=y
+CONFIG_SW_SYNC=y
+# CONFIG_SW_SYNC_USER is not set
+# CONFIG_ONESHOT_SYNC is not set
+# CONFIG_ONESHOT_SYNC_USER is not set
+CONFIG_ION=y
+# CONFIG_ION_TEST is not set
+# CONFIG_ION_DUMMY is not set
+CONFIG_ION_MSM=y
+# CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS is not set
+# CONFIG_FIQ_DEBUGGER is not set
+# CONFIG_FIQ_WATCHDOG is not set
+# CONFIG_STAGING_BOARD is not set
+# CONFIG_WIMAX_GDM72XX is not set
+# CONFIG_LTE_GDM724X is not set
+# CONFIG_LUSTRE_FS is not set
+# CONFIG_DGNC is not set
+# CONFIG_DGAP is not set
+# CONFIG_GS_FPGABOOT is not set
+# CONFIG_FB_TFT is not set
+# CONFIG_FSL_MC_BUS is not set
+# CONFIG_WILC1000_DRIVER is not set
+# CONFIG_MOST is not set
+# CONFIG_GOLDFISH is not set
+# CONFIG_CHROME_PLATFORMS is not set
+
+#
+# Qualcomm MSM specific device drivers
+#
+CONFIG_QPNP_REVID=y
+CONFIG_QPNP_COINCELL=y
+CONFIG_SPS=y
+# CONFIG_SPS_SUPPORT_BAMDMA is not set
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+# CONFIG_EP_PCIE is not set
+# CONFIG_IPA is not set
+# CONFIG_GSI is not set
+CONFIG_GPIO_USB_DETECT=y
+# CONFIG_MSM_MHI is not set
+# CONFIG_MSM_11AD is not set
+CONFIG_SEEMP_CORE=y
+CONFIG_MSM_EXT_DISPLAY=y
+CONFIG_CLKDEV_LOOKUP=y
+CONFIG_HAVE_CLK_PREPARE=y
+CONFIG_COMMON_CLK_MSM=y
+# CONFIG_MSM_CLK_CONTROLLER_V2 is not set
+CONFIG_MSM_MDSS_PLL=y
+CONFIG_HWSPINLOCK=y
+
+#
+# Hardware Spinlock drivers
+#
+# CONFIG_HWSPINLOCK_QCOM is not set
+CONFIG_REMOTE_SPINLOCK_MSM=y
+
+#
+# Clock Source drivers
+#
+CONFIG_CLKSRC_OF=y
+CONFIG_CLKSRC_ACPI=y
+CONFIG_CLKSRC_PROBE=y
+CONFIG_ARM_ARCH_TIMER=y
+CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y
+CONFIG_MSM_TIMER_LEAP=y
+# CONFIG_ARM_ARCH_TIMER_VCT_ACCESS is not set
+# CONFIG_ARM_TIMER_SP804 is not set
+# CONFIG_ATMEL_PIT is not set
+# CONFIG_SH_TIMER_CMT is not set
+# CONFIG_SH_TIMER_MTU2 is not set
+# CONFIG_SH_TIMER_TMU is not set
+# CONFIG_EM_TIMER_STI is not set
+# CONFIG_MAILBOX is not set
+CONFIG_IOMMU_API=y
+CONFIG_IOMMU_SUPPORT=y
+
+#
+# Generic IOMMU Pagetable Support
+#
+CONFIG_IOMMU_IO_PGTABLE=y
+CONFIG_IOMMU_IO_PGTABLE_LPAE=y
+# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+# CONFIG_IOMMU_IO_PGTABLE_FAST_SELFTEST is not set
+# CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB is not set
+CONFIG_OF_IOMMU=y
+CONFIG_ARM_SMMU=y
+# CONFIG_ARM_SMMU_V3 is not set
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+# CONFIG_IOMMU_TESTS is not set
+
+#
+# Remoteproc drivers
+#
+# CONFIG_STE_MODEM_RPROC is not set
+
+#
+# Rpmsg drivers
+#
+
+#
+# SOC (System On Chip) specific Drivers
+#
+# CONFIG_MSM_HAB is not set
+# CONFIG_MSM_INRUSH_CURRENT_MITIGATION is not set
+# CONFIG_MSM_PFE_WA is not set
+# CONFIG_QCOM_COMMON_LOG is not set
+CONFIG_MSM_SMEM=y
+CONFIG_QPNP_HAPTIC=y
+# CONFIG_QPNP_PBS is not set
+CONFIG_MSM_SMD=y
+# CONFIG_MSM_SMD_DEBUG is not set
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMD_XPRT=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_MSM_GLINK_SPI_XPRT=y
+CONFIG_MSM_SPCOM=y
+CONFIG_MSM_SPSS_UTILS=y
+CONFIG_MSM_SMEM_LOGGING=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_SMP2P_TEST=y
+CONFIG_MSM_QMI_INTERFACE=y
+# CONFIG_MSM_L2_IA_DEBUG is not set
+CONFIG_MSM_RPM_SMD=y
+CONFIG_QCOM_BUS_SCALING=y
+# CONFIG_QCOM_GSBI is not set
+# CONFIG_QCOM_SMEM is not set
+CONFIG_MSM_SERVICE_LOCATOR=y
+# CONFIG_MSM_HVC is not set
+CONFIG_QCOM_DCC=y
+CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
+CONFIG_MSM_SYSMON_GLINK_COMM=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+# CONFIG_MSM_SYSTEM_HEALTH_MONITOR is not set
+CONFIG_MSM_GLINK_PKT=y
+CONFIG_MSM_SPM=y
+# CONFIG_MSM_L2_SPM is not set
+CONFIG_QCOM_SCM=y
+# CONFIG_QCOM_SCM_QCPE is not set
+# CONFIG_QCOM_SCM_XPU is not set
+# CONFIG_QCOM_SCM_ERRATA is not set
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_IRQ_HELPER=y
+# CONFIG_QCOM_MEMORY_DUMP is not set
+# CONFIG_QCOM_MEMORY_DUMP_V2 is not set
+CONFIG_ICNSS=y
+# CONFIG_ICNSS_DEBUG is not set
+CONFIG_MSM_SECURE_BUFFER=y
+# CONFIG_MSM_GLADIATOR_ERP is not set
+CONFIG_MSM_GLADIATOR_ERP_V2=y
+CONFIG_PANIC_ON_GLADIATOR_ERROR_V2=y
+# CONFIG_MSM_GLADIATOR_ERROR_V2_MAIN_LOGGER_ONLY is not set
+CONFIG_MSM_GLADIATOR_HANG_DETECT=y
+CONFIG_MSM_CORE_HANG_DETECT=y
+CONFIG_MSM_RUN_QUEUE_STATS=y
+CONFIG_MSM_JTAGV8=y
+CONFIG_MSM_BOOT_STATS=y
+# CONFIG_MSM_BOOT_TIME_MARKER is not set
+# CONFIG_QCOM_CPUSS_DUMP is not set
+# CONFIG_MSM_QDSP6_APRV2 is not set
+# CONFIG_MSM_QDSP6_APRV3 is not set
+CONFIG_MSM_QDSP6_APRV2_GLINK=y
+# CONFIG_MSM_QDSP6_APRV3_GLINK is not set
+CONFIG_MSM_QDSP6_SSR=y
+CONFIG_MSM_QDSP6_PDR=y
+CONFIG_MSM_QDSP6_NOTIFIER=y
+CONFIG_MSM_ADSP_LOADER=y
+# CONFIG_MSM_CDSP_LOADER is not set
+CONFIG_MSM_PERFORMANCE=y
+# CONFIG_MSM_PERFORMANCE_HOTPLUG_ON is not set
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+# CONFIG_MSM_SYSMON_COMM is not set
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
+CONFIG_TRACER_PKT=y
+# CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC is not set
+CONFIG_MSM_MPM_OF=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_AVTIMER=y
+# CONFIG_MSM_KERNEL_PROTECT is not set
+CONFIG_QCOM_REMOTEQDSS=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+# CONFIG_MSM_QBT1000 is not set
+CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
+CONFIG_MSM_RPM_LOG=y
+CONFIG_MSM_RPM_STATS_LOG=y
+CONFIG_QSEE_IPC_IRQ_BRIDGE=y
+CONFIG_WCD_DSP_GLINK=y
+CONFIG_QCOM_SMCINVOKE=y
+CONFIG_QCOM_EARLY_RANDOM=y
+# CONFIG_QCOM_CX_IPEAK is not set
+# CONFIG_MSM_CACHE_M4M_ERP64_PANIC_ON_CE is not set
+# CONFIG_MSM_CACHE_M4M_ERP64_PANIC_ON_UE is not set
+CONFIG_MEM_SHARE_QMI_SERVICE=y
+# CONFIG_SUNXI_SRAM is not set
+# CONFIG_SOC_TI is not set
+CONFIG_PM_DEVFREQ=y
+
+#
+# DEVFREQ Governors
+#
+CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+CONFIG_DEVFREQ_GOV_PERFORMANCE=y
+CONFIG_DEVFREQ_GOV_POWERSAVE=y
+CONFIG_DEVFREQ_GOV_USERSPACE=y
+CONFIG_DEVFREQ_GOV_CPUFREQ=y
+CONFIG_QCOM_BIMC_BWMON=y
+# CONFIG_ARMBW_HWMON is not set
+CONFIG_ARM_MEMLAT_MON=y
+# CONFIG_QCOMCCI_HWMON is not set
+# CONFIG_QCOM_M4M_HWMON is not set
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+# CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON is not set
+CONFIG_DEVFREQ_GOV_SPDM_HYP=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+
+#
+# DEVFREQ Drivers
+#
+# CONFIG_DEVFREQ_SIMPLE_DEV is not set
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_SPDM_SCM=y
+CONFIG_DEVFREQ_SPDM=y
+# CONFIG_PM_DEVFREQ_EVENT is not set
+CONFIG_EXTCON=y
+
+#
+# Extcon Device Drivers
+#
+# CONFIG_EXTCON_ADC_JACK is not set
+# CONFIG_EXTCON_GPIO is not set
+# CONFIG_EXTCON_RT8973A is not set
+# CONFIG_EXTCON_SM5502 is not set
+# CONFIG_EXTCON_USB_GPIO is not set
+# CONFIG_MEMORY is not set
+CONFIG_IIO=y
+# CONFIG_IIO_BUFFER is not set
+# CONFIG_IIO_TRIGGER is not set
+
+#
+# Accelerometers
+#
+# CONFIG_BMA180 is not set
+# CONFIG_BMC150_ACCEL is not set
+# CONFIG_IIO_ST_ACCEL_3AXIS is not set
+# CONFIG_KXSD9 is not set
+# CONFIG_KXCJK1013 is not set
+# CONFIG_MMA8452 is not set
+# CONFIG_MMA9551 is not set
+# CONFIG_MMA9553 is not set
+# CONFIG_MXC4005 is not set
+# CONFIG_STK8312 is not set
+
+#
+# Analog to digital converters
+#
+# CONFIG_AD7266 is not set
+# CONFIG_AD7291 is not set
+# CONFIG_AD7298 is not set
+# CONFIG_AD7476 is not set
+# CONFIG_AD7791 is not set
+# CONFIG_AD7793 is not set
+# CONFIG_AD7887 is not set
+# CONFIG_AD7923 is not set
+# CONFIG_AD799X is not set
+# CONFIG_CC10001_ADC is not set
+# CONFIG_HI8435 is not set
+# CONFIG_MAX1027 is not set
+# CONFIG_MAX1363 is not set
+# CONFIG_MCP320X is not set
+# CONFIG_MCP3422 is not set
+# CONFIG_NAU7802 is not set
+# CONFIG_QCOM_SPMI_IADC is not set
+# CONFIG_QCOM_SPMI_VADC is not set
+CONFIG_QCOM_RRADC=y
+CONFIG_QCOM_TADC=y
+# CONFIG_TI_ADC081C is not set
+# CONFIG_TI_ADC128S052 is not set
+# CONFIG_VF610_ADC is not set
+
+#
+# Amplifiers
+#
+# CONFIG_AD8366 is not set
+
+#
+# Chemical Sensors
+#
+# CONFIG_VZ89X is not set
+
+#
+# Hid Sensor IIO Common
+#
+
+#
+# SSP Sensor Common
+#
+# CONFIG_IIO_SSP_SENSORHUB is not set
+
+#
+# Digital to analog converters
+#
+# CONFIG_AD5064 is not set
+# CONFIG_AD5360 is not set
+# CONFIG_AD5380 is not set
+# CONFIG_AD5421 is not set
+# CONFIG_AD5446 is not set
+# CONFIG_AD5449 is not set
+# CONFIG_AD5504 is not set
+# CONFIG_AD5624R_SPI is not set
+# CONFIG_AD5686 is not set
+# CONFIG_AD5755 is not set
+# CONFIG_AD5764 is not set
+# CONFIG_AD5791 is not set
+# CONFIG_AD7303 is not set
+# CONFIG_M62332 is not set
+# CONFIG_MAX517 is not set
+# CONFIG_MAX5821 is not set
+# CONFIG_MCP4725 is not set
+# CONFIG_MCP4922 is not set
+
+#
+# Frequency Synthesizers DDS/PLL
+#
+
+#
+# Clock Generator/Distribution
+#
+# CONFIG_AD9523 is not set
+
+#
+# Phase-Locked Loop (PLL) frequency synthesizers
+#
+# CONFIG_ADF4350 is not set
+
+#
+# Digital gyroscope sensors
+#
+# CONFIG_ADIS16080 is not set
+# CONFIG_ADIS16130 is not set
+# CONFIG_ADIS16136 is not set
+# CONFIG_ADIS16260 is not set
+# CONFIG_ADXRS450 is not set
+# CONFIG_BMG160 is not set
+# CONFIG_IIO_ST_GYRO_3AXIS is not set
+# CONFIG_ITG3200 is not set
+
+#
+# Humidity sensors
+#
+# CONFIG_DHT11 is not set
+# CONFIG_HDC100X is not set
+# CONFIG_HTU21 is not set
+# CONFIG_SI7005 is not set
+# CONFIG_SI7020 is not set
+
+#
+# Inertial measurement units
+#
+# CONFIG_ADIS16400 is not set
+# CONFIG_ADIS16480 is not set
+# CONFIG_KMX61 is not set
+# CONFIG_INV_MPU6050_IIO is not set
+
+#
+# Light sensors
+#
+# CONFIG_ACPI_ALS is not set
+# CONFIG_ADJD_S311 is not set
+# CONFIG_AL3320A is not set
+# CONFIG_APDS9300 is not set
+# CONFIG_APDS9960 is not set
+# CONFIG_BH1750 is not set
+# CONFIG_CM32181 is not set
+# CONFIG_CM3232 is not set
+# CONFIG_CM3323 is not set
+# CONFIG_CM36651 is not set
+# CONFIG_GP2AP020A00F is not set
+# CONFIG_ISL29125 is not set
+# CONFIG_JSA1212 is not set
+# CONFIG_RPR0521 is not set
+# CONFIG_LTR501 is not set
+# CONFIG_OPT3001 is not set
+# CONFIG_PA12203001 is not set
+# CONFIG_STK3310 is not set
+# CONFIG_TCS3414 is not set
+# CONFIG_TCS3472 is not set
+# CONFIG_SENSORS_TSL2563 is not set
+# CONFIG_TSL4531 is not set
+# CONFIG_US5182D is not set
+# CONFIG_VCNL4000 is not set
+
+#
+# Magnetometer sensors
+#
+# CONFIG_AK8975 is not set
+# CONFIG_AK09911 is not set
+# CONFIG_BMC150_MAGN is not set
+# CONFIG_MAG3110 is not set
+# CONFIG_MMC35240 is not set
+# CONFIG_IIO_ST_MAGN_3AXIS is not set
+
+#
+# Inclinometer sensors
+#
+
+#
+# Digital potentiometers
+#
+# CONFIG_MCP4531 is not set
+
+#
+# Pressure sensors
+#
+# CONFIG_BMP280 is not set
+# CONFIG_MPL115 is not set
+# CONFIG_MPL3115 is not set
+# CONFIG_MS5611 is not set
+# CONFIG_MS5637 is not set
+# CONFIG_IIO_ST_PRESS is not set
+# CONFIG_T5403 is not set
+
+#
+# Lightning sensors
+#
+# CONFIG_AS3935 is not set
+
+#
+# Proximity sensors
+#
+# CONFIG_LIDAR_LITE_V2 is not set
+# CONFIG_SX9500 is not set
+
+#
+# Temperature sensors
+#
+# CONFIG_MLX90614 is not set
+# CONFIG_TMP006 is not set
+# CONFIG_TSYS01 is not set
+# CONFIG_TSYS02D is not set
+# CONFIG_NTB is not set
+# CONFIG_VME_BUS is not set
+CONFIG_PWM=y
+CONFIG_PWM_SYSFS=y
+# CONFIG_PWM_FSL_FTM is not set
+# CONFIG_PWM_PCA9685 is not set
+CONFIG_PWM_QPNP=y
+CONFIG_IRQCHIP=y
+CONFIG_ARM_GIC=y
+CONFIG_ARM_GIC_V2M=y
+CONFIG_ARM_GIC_V3=y
+CONFIG_ARM_GIC_V3_ITS=y
+CONFIG_ARM_GIC_V3_ACL=y
+# CONFIG_ARM_GIC_V3_NO_ACCESS_CONTROL is not set
+CONFIG_QCOM_SHOW_RESUME_IRQ=y
+CONFIG_MSM_IRQ=y
+# CONFIG_IPACK_BUS is not set
+CONFIG_ARCH_HAS_RESET_CONTROLLER=y
+CONFIG_RESET_CONTROLLER=y
+# CONFIG_FMC is not set
+
+#
+# PHY Subsystem
+#
+CONFIG_GENERIC_PHY=y
+# CONFIG_PHY_PXA_28NM_HSIC is not set
+# CONFIG_PHY_PXA_28NM_USB2 is not set
+# CONFIG_BCM_KONA_USB2_PHY is not set
+# CONFIG_PHY_QCOM_APQ8064_SATA is not set
+# CONFIG_PHY_QCOM_IPQ806X_SATA is not set
+# CONFIG_PHY_XGENE is not set
+CONFIG_PHY_QCOM_UFS=y
+# CONFIG_POWERCAP is not set
+# CONFIG_MCB is not set
+
+#
+# Performance monitor support
+#
+CONFIG_ARM_PMU=y
+CONFIG_RAS=y
+# CONFIG_THUNDERBOLT is not set
+
+#
+# Android
+#
+CONFIG_ANDROID=y
+# CONFIG_ANDROID_BINDER_IPC is not set
+# CONFIG_LIBNVDIMM is not set
+# CONFIG_NVMEM is not set
+# CONFIG_STM is not set
+# CONFIG_INTEL_TH is not set
+
+#
+# FPGA Configuration Support
+#
+# CONFIG_FPGA is not set
+
+#
+# Firmware Drivers
+#
+CONFIG_ARM_PSCI_FW=y
+# CONFIG_FIRMWARE_MEMMAP is not set
+CONFIG_QCOM_SCM_64=y
+CONFIG_HAVE_ARM_SMCCC=y
+CONFIG_MSM_TZ_LOG=y
+# CONFIG_BIF is not set
+CONFIG_SENSORS_SSC=y
+# CONFIG_TEE is not set
+
+#
+# Firmware Drivers
+#
+CONFIG_ACPI=y
+CONFIG_ACPI_GENERIC_GSI=y
+CONFIG_ACPI_CCA_REQUIRED=y
+# CONFIG_ACPI_DEBUGGER is not set
+# CONFIG_ACPI_EC_DEBUGFS is not set
+CONFIG_ACPI_BUTTON=y
+CONFIG_ACPI_FAN=y
+# CONFIG_ACPI_DOCK is not set
+CONFIG_ACPI_PROCESSOR=y
+CONFIG_ACPI_HOTPLUG_CPU=y
+CONFIG_ACPI_THERMAL=y
+# CONFIG_ACPI_CUSTOM_DSDT is not set
+# CONFIG_ACPI_DEBUG is not set
+# CONFIG_ACPI_PCI_SLOT is not set
+CONFIG_ACPI_CONTAINER=y
+# CONFIG_ACPI_HED is not set
+# CONFIG_ACPI_CUSTOM_METHOD is not set
+CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y
+# CONFIG_PMIC_OPREGION is not set
+
+#
+# File systems
+#
+CONFIG_DCACHE_WORD_ACCESS=y
+# CONFIG_EXT2_FS is not set
+# CONFIG_EXT3_FS is not set
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_USE_FOR_EXT2=y
+# CONFIG_EXT4_FS_POSIX_ACL is not set
+# CONFIG_EXT4_FS_SECURITY is not set
+# CONFIG_EXT4_ENCRYPTION is not set
+# CONFIG_EXT4_DEBUG is not set
+CONFIG_JBD2=y
+# CONFIG_JBD2_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_XFS_FS=y
+# CONFIG_XFS_QUOTA is not set
+# CONFIG_XFS_POSIX_ACL is not set
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_WARN is not set
+# CONFIG_XFS_DEBUG is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+# CONFIG_F2FS_FS is not set
+# CONFIG_FS_DAX is not set
+# CONFIG_FS_POSIX_ACL is not set
+CONFIG_EXPORTFS=y
+CONFIG_FILE_LOCKING=y
+# CONFIG_FS_ENCRYPTION is not set
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY_USER=y
+CONFIG_FANOTIFY=y
+# CONFIG_QUOTA is not set
+# CONFIG_QUOTACTL is not set
+# CONFIG_AUTOFS4_FS is not set
+CONFIG_FUSE_FS=y
+# CONFIG_CUSE is not set
+# CONFIG_OVERLAY_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=y
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=850
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+CONFIG_EXFAT_FS=y
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+# CONFIG_PROC_KCORE is not set
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+# CONFIG_PROC_CHILDREN is not set
+CONFIG_KERNFS=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_TMPFS_XATTR is not set
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_CONFIGFS_FS=y
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_ECRYPT_FS is not set
+# CONFIG_SDCARD_FS is not set
+CONFIG_HFS_FS=y
+CONFIG_HFSPLUS_FS=y
+# CONFIG_HFSPLUS_FS_POSIX_ACL is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
+CONFIG_CRAMFS=y
+CONFIG_SQUASHFS=y
+# CONFIG_SQUASHFS_DECOMP_SINGLE is not set
+# CONFIG_SQUASHFS_DECOMP_MULTI is not set
+CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y
+# CONFIG_SQUASHFS_XATTR is not set
+# CONFIG_SQUASHFS_ZLIB is not set
+# CONFIG_SQUASHFS_LZ4 is not set
+# CONFIG_SQUASHFS_LZO is not set
+CONFIG_SQUASHFS_XZ=y
+# CONFIG_SQUASHFS_ZSTD is not set
+# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set
+# CONFIG_SQUASHFS_EMBEDDED is not set
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_QNX6FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_PSTORE=y
+# CONFIG_PSTORE_CONSOLE is not set
+# CONFIG_PSTORE_PMSG is not set
+CONFIG_PSTORE_RAM=y
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V2=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+CONFIG_NFS_V4=y
+# CONFIG_NFS_SWAP is not set
+# CONFIG_NFS_V4_1 is not set
+CONFIG_ROOT_NFS=y
+# CONFIG_NFS_USE_LEGACY_DNS is not set
+CONFIG_NFS_USE_KERNEL_DNS=y
+# CONFIG_NFSD is not set
+CONFIG_GRACE_PERIOD=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+# CONFIG_SUNRPC_DEBUG is not set
+# CONFIG_CEPH_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+# CONFIG_NLS_CODEPAGE_437 is not set
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+CONFIG_NLS_CODEPAGE_850=y
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_MAC_ROMAN is not set
+# CONFIG_NLS_MAC_CELTIC is not set
+# CONFIG_NLS_MAC_CENTEURO is not set
+# CONFIG_NLS_MAC_CROATIAN is not set
+# CONFIG_NLS_MAC_CYRILLIC is not set
+# CONFIG_NLS_MAC_GAELIC is not set
+# CONFIG_NLS_MAC_GREEK is not set
+# CONFIG_NLS_MAC_ICELAND is not set
+# CONFIG_NLS_MAC_INUIT is not set
+# CONFIG_NLS_MAC_ROMANIAN is not set
+# CONFIG_NLS_MAC_TURKISH is not set
+CONFIG_NLS_UTF8=y
+# CONFIG_DLM is not set
+# CONFIG_FILE_TABLE_DEBUG is not set
+# CONFIG_VIRTUALIZATION is not set
+
+#
+# Kernel hacking
+#
+
+#
+# printk and dmesg options
+#
+CONFIG_PRINTK_TIME=y
+CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4
+# CONFIG_BOOT_PRINTK_DELAY is not set
+CONFIG_DYNAMIC_DEBUG=y
+
+#
+# Compile-time checks and compiler options
+#
+# CONFIG_DEBUG_INFO is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=2048
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_READABLE_ASM is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_PAGE_OWNER=y
+CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_SECTION_MISMATCH is not set
+CONFIG_SECTION_MISMATCH_WARN_ONLY=y
+CONFIG_ARCH_WANT_FRAME_POINTERS=y
+CONFIG_FRAME_POINTER=y
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1
+CONFIG_DEBUG_KERNEL=y
+
+#
+# Memory Debugging
+#
+CONFIG_PAGE_EXTENSION=y
+# CONFIG_DEBUG_PAGEALLOC is not set
+# CONFIG_SLUB_DEBUG_PANIC_ON is not set
+# CONFIG_PAGE_POISONING is not set
+# CONFIG_DEBUG_OBJECTS is not set
+CONFIG_SLUB_DEBUG_ON=y
+# CONFIG_SLUB_STATS is not set
+CONFIG_HAVE_DEBUG_KMEMLEAK=y
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_PER_CPU_MAPS is not set
+CONFIG_HAVE_ARCH_KASAN=y
+# CONFIG_KASAN is not set
+# CONFIG_DEBUG_REFCOUNT is not set
+# CONFIG_DEBUG_SHIRQ is not set
+
+#
+# Debug Lockups and Hangs
+#
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU=y
+CONFIG_HARDLOCKUP_DETECTOR=y
+# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=1
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_WQ_WATCHDOG=y
+# CONFIG_PANIC_ON_OOPS is not set
+CONFIG_PANIC_ON_OOPS_VALUE=0
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_SCHED_DEBUG=y
+CONFIG_SCHED_INFO=y
+CONFIG_PANIC_ON_SCHED_BUG=y
+CONFIG_PANIC_ON_RT_THROTTLING=y
+CONFIG_SYSRQ_SCHED_DEBUG=y
+CONFIG_SCHEDSTATS=y
+# CONFIG_SCHED_STACK_END_CHECK is not set
+# CONFIG_DEBUG_TIMEKEEPING is not set
+# CONFIG_DEBUG_PREEMPT is not set
+
+#
+# Lock Debugging (spinlocks, mutexes, etc...)
+#
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_ATOMIC_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_LOCK_TORTURE_TEST is not set
+CONFIG_STACKTRACE=y
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_HAVE_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_PI_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+
+#
+# RCU Debugging
+#
+# CONFIG_PROVE_RCU is not set
+# CONFIG_SPARSE_RCU_POINTER is not set
+# CONFIG_TORTURE_TEST is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_RCU_CPU_STALL_TIMEOUT=21
+# CONFIG_RCU_STALL_WATCHDOG_BITE is not set
+# CONFIG_RCU_TRACE is not set
+# CONFIG_RCU_EQS_DEBUG is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_NOTIFIER_ERROR_INJECTION is not set
+# CONFIG_FAULT_INJECTION is not set
+CONFIG_NOP_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
+CONFIG_HAVE_C_RECORDMCOUNT=y
+CONFIG_TRACE_CLOCK=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_IPC_LOGGING=y
+# CONFIG_QCOM_RTB is not set
+CONFIG_TRACING=y
+CONFIG_GENERIC_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+
+#
+# Runtime Testing
+#
+# CONFIG_LKDTM is not set
+# CONFIG_TEST_LIST_SORT is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_RBTREE_TEST is not set
+# CONFIG_INTERVAL_TREE_TEST is not set
+# CONFIG_PERCPU_TEST is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
+# CONFIG_TEST_HEXDUMP is not set
+# CONFIG_TEST_STRING_HELPERS is not set
+# CONFIG_TEST_KSTRTOX is not set
+# CONFIG_TEST_PRINTF is not set
+# CONFIG_TEST_RHASHTABLE is not set
+# CONFIG_DMA_API_DEBUG is not set
+# CONFIG_TEST_LKM is not set
+# CONFIG_TEST_USER_COPY is not set
+# CONFIG_TEST_BPF is not set
+# CONFIG_TEST_FIRMWARE is not set
+# CONFIG_TEST_UDELAY is not set
+# CONFIG_MEMTEST is not set
+# CONFIG_TEST_STATIC_KEYS is not set
+CONFIG_PANIC_ON_DATA_CORRUPTION=y
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y
+# CONFIG_UBSAN is not set
+# CONFIG_ARM64_PTDUMP is not set
+# CONFIG_STRICT_DEVMEM is not set
+# CONFIG_PID_IN_CONTEXTIDR is not set
+# CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set
+# CONFIG_DEBUG_SET_MODULE_RONX is not set
+CONFIG_DEBUG_RODATA=y
+CONFIG_DEBUG_ALIGN_RODATA=y
+# CONFIG_FORCE_PAGES is not set
+# CONFIG_FREE_PAGES_RDONLY is not set
+# CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE is not set
+# CONFIG_CORESIGHT is not set
+
+#
+# Security options
+#
+CONFIG_KEYS=y
+CONFIG_KEYS_COMPAT=y
+# CONFIG_PERSISTENT_KEYRINGS is not set
+# CONFIG_BIG_KEYS is not set
+# CONFIG_ENCRYPTED_KEYS is not set
+
+#
+# Qualcomm Technologies, Inc Per File Encryption security device drivers
+#
+# CONFIG_SECURITY_DMESG_RESTRICT is not set
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y
+CONFIG_HAVE_ARCH_HARDENED_USERCOPY=y
+# CONFIG_HARDENED_USERCOPY is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_RNG_DEFAULT=y
+CONFIG_CRYPTO_PCOMP2=y
+CONFIG_CRYPTO_AKCIPHER2=y
+CONFIG_CRYPTO_AKCIPHER=y
+# CONFIG_CRYPTO_RSA is not set
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_USER is not set
+CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
+CONFIG_CRYPTO_GF128MUL=y
+CONFIG_CRYPTO_NULL=y
+CONFIG_CRYPTO_NULL2=y
+# CONFIG_CRYPTO_PCRYPT is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+CONFIG_CRYPTO_CRYPTD=y
+# CONFIG_CRYPTO_MCRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_TEST is not set
+CONFIG_CRYPTO_ABLK_HELPER=y
+
+#
+# Authenticated Encryption with Associated Data
+#
+CONFIG_CRYPTO_CCM=y
+CONFIG_CRYPTO_GCM=y
+# CONFIG_CRYPTO_CHACHA20POLY1305 is not set
+CONFIG_CRYPTO_SEQIV=y
+CONFIG_CRYPTO_ECHAINIV=y
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_HEH is not set
+CONFIG_CRYPTO_CTR=y
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+CONFIG_CRYPTO_XTS=y
+# CONFIG_CRYPTO_KEYWRAP is not set
+
+#
+# Hash modes
+#
+CONFIG_CRYPTO_CMAC=y
+CONFIG_CRYPTO_HMAC=y
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=y
+# CONFIG_CRYPTO_CRC32 is not set
+# CONFIG_CRYPTO_CRCT10DIF is not set
+CONFIG_CRYPTO_GHASH=y
+# CONFIG_CRYPTO_POLY1305 is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_SHA512=y
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=y
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_CHACHA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=y
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+# CONFIG_CRYPTO_842 is not set
+# CONFIG_CRYPTO_LZ4 is not set
+# CONFIG_CRYPTO_LZ4HC is not set
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DRBG_MENU=y
+CONFIG_CRYPTO_DRBG_HMAC=y
+# CONFIG_CRYPTO_DRBG_HASH is not set
+# CONFIG_CRYPTO_DRBG_CTR is not set
+CONFIG_CRYPTO_DRBG=y
+CONFIG_CRYPTO_JITTERENTROPY=y
+# CONFIG_CRYPTO_USER_API_HASH is not set
+# CONFIG_CRYPTO_USER_API_SKCIPHER is not set
+# CONFIG_CRYPTO_USER_API_RNG is not set
+# CONFIG_CRYPTO_USER_API_AEAD is not set
+CONFIG_CRYPTO_HASH_INFO=y
+CONFIG_CRYPTO_HW=y
+CONFIG_CRYPTO_DEV_QCE50=y
+# CONFIG_FIPS_ENABLE is not set
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
+# CONFIG_CRYPTO_DEV_CCP is not set
+# CONFIG_CRYPTO_DEV_QCE is not set
+CONFIG_ASYMMETRIC_KEY_TYPE=y
+CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
+CONFIG_PUBLIC_KEY_ALGO_RSA=y
+CONFIG_X509_CERTIFICATE_PARSER=y
+CONFIG_PKCS7_MESSAGE_PARSER=y
+# CONFIG_PKCS7_TEST_KEY is not set
+# CONFIG_SIGNED_PE_FILE_VERIFICATION is not set
+
+#
+# Certificates for signature checking
+#
+CONFIG_SYSTEM_TRUSTED_KEYRING=y
+CONFIG_SYSTEM_TRUSTED_KEYS=""
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_POLY_HASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_CRYPTO_CRC32_ARM64=y
+CONFIG_BINARY_PRINTF=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_HAVE_ARCH_BITREVERSE=y
+CONFIG_RATIONAL=y
+CONFIG_GENERIC_STRNCPY_FROM_USER=y
+CONFIG_GENERIC_STRNLEN_USER=y
+CONFIG_GENERIC_NET_UTILS=y
+CONFIG_GENERIC_PCI_IOMAP=y
+CONFIG_GENERIC_IO=y
+CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y
+CONFIG_CRC_CCITT=y
+CONFIG_CRC16=y
+# CONFIG_CRC_T10DIF is not set
+CONFIG_CRC_ITU_T=y
+CONFIG_CRC32=y
+# CONFIG_CRC32_SELFTEST is not set
+CONFIG_CRC32_SLICEBY8=y
+# CONFIG_CRC32_SLICEBY4 is not set
+# CONFIG_CRC32_SARWATE is not set
+# CONFIG_CRC32_BIT is not set
+# CONFIG_CRC7 is not set
+CONFIG_LIBCRC32C=y
+# CONFIG_CRC8 is not set
+CONFIG_AUDIT_GENERIC=y
+CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y
+CONFIG_AUDIT_COMPAT_GENERIC=y
+# CONFIG_RANDOM32_SELFTEST is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_XZ_DEC=y
+# CONFIG_XZ_DEC_X86 is not set
+# CONFIG_XZ_DEC_POWERPC is not set
+# CONFIG_XZ_DEC_IA64 is not set
+# CONFIG_XZ_DEC_ARM is not set
+# CONFIG_XZ_DEC_ARMTHUMB is not set
+# CONFIG_XZ_DEC_SPARC is not set
+# CONFIG_XZ_DEC_BCJ is not set
+# CONFIG_XZ_DEC_TEST is not set
+CONFIG_GENERIC_ALLOCATOR=y
+CONFIG_REED_SOLOMON=y
+CONFIG_REED_SOLOMON_ENC8=y
+CONFIG_REED_SOLOMON_DEC8=y
+CONFIG_INTERVAL_TREE=y
+CONFIG_ASSOCIATIVE_ARRAY=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT_MAP=y
+CONFIG_HAS_DMA=y
+CONFIG_CPU_RMAP=y
+CONFIG_DQL=y
+CONFIG_NLATTR=y
+CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y
+CONFIG_CLZ_TAB=y
+# CONFIG_CORDIC is not set
+# CONFIG_DDR is not set
+CONFIG_MPILIB=y
+CONFIG_LIBFDT=y
+CONFIG_OID_REGISTRY=y
+# CONFIG_SG_SPLIT is not set
+CONFIG_ARCH_HAS_SG_CHAIN=y
+CONFIG_QMI_ENCDEC=y
+# CONFIG_QMI_ENCDEC_DEBUG is not set
+CONFIG_STACKDEPOT=y
+CONFIG_ARCH_HAS_FBXSERIAL=y
+CONFIG_FBXSERIAL=y
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/base/dma-removed.c	2019-10-29 09:26:23.409200888 +0100
@@ -0,0 +1,452 @@
+/*
+ *
+ *  Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *  Copyright (C) 2000-2004 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/bootmem.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/gfp.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
+#include <linux/highmem.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+#include <linux/sizes.h>
+#include <linux/spinlock.h>
+#include <asm/dma-contiguous.h>
+#include <asm/tlbflush.h>
+
+struct removed_region {
+	phys_addr_t	base;
+	int		nr_pages;
+	unsigned long	*bitmap;
+	int		fixup;
+	struct mutex	lock;
+};
+
+#define NO_KERNEL_MAPPING_DUMMY	0x2222
+
+static int dma_init_removed_memory(phys_addr_t phys_addr, size_t size,
+				struct removed_region **mem)
+{
+	struct removed_region *dma_mem = NULL;
+	int pages = size >> PAGE_SHIFT;
+	int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
+
+	dma_mem = kzalloc(sizeof(struct removed_region), GFP_KERNEL);
+	if (!dma_mem)
+		goto out;
+	dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+	if (!dma_mem->bitmap)
+		goto free1_out;
+
+	dma_mem->base = phys_addr;
+	dma_mem->nr_pages = pages;
+	mutex_init(&dma_mem->lock);
+
+	*mem = dma_mem;
+
+	return 0;
+
+free1_out:
+	kfree(dma_mem);
+out:
+	return -ENOMEM;
+}
+
+static int dma_assign_removed_region(struct device *dev,
+					struct removed_region *mem)
+{
+	if (dev->removed_mem)
+		return -EBUSY;
+
+	dev->removed_mem = mem;
+	return 0;
+}
+
+static void adapt_iomem_resource(unsigned long base_pfn, unsigned long end_pfn)
+{
+	struct resource *res, *conflict;
+	resource_size_t cstart, cend;
+
+	res = kzalloc(sizeof(*res), GFP_KERNEL);
+	if (!res)
+		return;
+
+	res->name  = "System RAM";
+	res->start = __pfn_to_phys(base_pfn);
+	res->end = __pfn_to_phys(end_pfn) - 1;
+	res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+
+	conflict = request_resource_conflict(&iomem_resource, res);
+	if (!conflict) {
+		pr_err("Removed memory: no conflict resource found\n");
+		kfree(res);
+		goto done;
+	}
+
+	cstart = conflict->start;
+	cend = conflict->end;
+	if ((cstart == res->start) && (cend == res->end)) {
+		release_resource(conflict);
+	} else if ((res->start >= cstart) && (res->start <= cend)) {
+		if (res->start == cstart) {
+			adjust_resource(conflict, res->end + 1,
+					cend - res->end);
+		} else if (res->end == cend) {
+			adjust_resource(conflict, cstart,
+					res->start - cstart);
+		} else {
+			adjust_resource(conflict, cstart,
+					res->start - cstart);
+			res->start = res->end + 1;
+			res->end = cend;
+			request_resource(&iomem_resource, res);
+			goto done;
+		}
+	} else {
+		pr_err("Removed memory: incorrect resource conflict start=%llx end=%llx\n",
+				(unsigned long long) conflict->start,
+				(unsigned long long) conflict->end);
+	}
+
+	kfree(res);
+done:
+	return;
+}
+
+#ifdef CONFIG_FLATMEM
+static void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
+{
+	struct page *start_pg, *end_pg;
+	unsigned long pg, pgend;
+
+	start_pfn = ALIGN(start_pfn, pageblock_nr_pages);
+	end_pfn = round_down(end_pfn, pageblock_nr_pages);
+	/*
+	 * Convert start_pfn/end_pfn to a struct page pointer.
+	 */
+	start_pg = pfn_to_page(start_pfn - 1) + 1;
+	end_pg = pfn_to_page(end_pfn - 1) + 1;
+
+	/*
+	 * Convert to physical addresses, and round start upwards and end
+	 * downwards.
+	 */
+	pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
+	pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
+
+	/*
+	 * If there are free pages between these, free the section of the
+	 * memmap array.
+	 */
+	if (pg < pgend)
+		free_bootmem_late(pg, pgend - pg);
+}
+#else
+static void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
+{
+}
+#endif
+
+static int _clear_pte(pte_t *pte, pgtable_t token, unsigned long addr,
+			    void *data)
+{
+	pte_clear(&init_mm, addr, pte);
+	return 0;
+}
+
+static void clear_mapping(unsigned long addr, unsigned long size)
+{
+	apply_to_page_range(&init_mm, addr, size, _clear_pte, NULL);
+	/* ensure ptes are updated */
+	mb();
+	flush_tlb_kernel_range(addr, addr + size);
+}
+
+static void removed_region_fixup(struct removed_region *dma_mem, int index)
+{
+	unsigned long fixup_size;
+	unsigned long base_pfn;
+	unsigned long flags;
+
+	if (index > dma_mem->nr_pages)
+		return;
+
+	/* carve-out */
+	flags = memblock_region_resize_late_begin();
+	memblock_free(dma_mem->base, dma_mem->nr_pages * PAGE_SIZE);
+	memblock_remove(dma_mem->base, index * PAGE_SIZE);
+	memblock_region_resize_late_end(flags);
+
+	/* clear page-mappings */
+	base_pfn = dma_mem->base >> PAGE_SHIFT;
+	if (!PageHighMem(pfn_to_page(base_pfn))) {
+		clear_mapping((unsigned long) phys_to_virt(dma_mem->base),
+				index * PAGE_SIZE);
+	}
+
+	/* free page objects */
+	free_memmap(base_pfn, base_pfn + index);
+
+	/* return remaining area to system */
+	fixup_size = (dma_mem->nr_pages - index) * PAGE_SIZE;
+	free_bootmem_late(dma_mem->base + index * PAGE_SIZE, fixup_size);
+
+	/*
+	 * release freed resource region so as to show up under iomem resource
+	 * list
+	 */
+	adapt_iomem_resource(base_pfn, base_pfn + index);
+
+	/* limit the fixup region */
+	dma_mem->nr_pages = index;
+}
+
+void *removed_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+		    gfp_t gfp, struct dma_attrs *attrs)
+{
+	bool no_kernel_mapping = dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING,
+					attrs);
+	bool skip_zeroing = dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs);
+	int pageno;
+	unsigned long order;
+	void *addr = NULL;
+	struct removed_region *dma_mem = dev->removed_mem;
+	int nbits;
+	unsigned int align;
+
+	if (!gfpflags_allow_blocking(gfp))
+		return NULL;
+
+	size = PAGE_ALIGN(size);
+	nbits = size >> PAGE_SHIFT;
+	order = get_order(size);
+
+	if (order > get_order(SZ_1M))
+		order = get_order(SZ_1M);
+
+	align = (1 << order) - 1;
+
+
+	mutex_lock(&dma_mem->lock);
+	pageno = bitmap_find_next_zero_area(dma_mem->bitmap, dma_mem->nr_pages,
+						0, nbits, align);
+
+	if (pageno < dma_mem->nr_pages) {
+		phys_addr_t base = dma_mem->base + pageno * PAGE_SIZE;
+		*handle = base;
+
+		bitmap_set(dma_mem->bitmap, pageno, nbits);
+
+		if (dma_mem->fixup) {
+			removed_region_fixup(dma_mem, pageno + nbits);
+			dma_mem->fixup = 0;
+		}
+
+		if (no_kernel_mapping && skip_zeroing) {
+			addr = (void *)NO_KERNEL_MAPPING_DUMMY;
+			goto out;
+		}
+
+		addr = ioremap(base, size);
+		if (WARN_ON(!addr)) {
+			bitmap_clear(dma_mem->bitmap, pageno, nbits);
+		} else {
+			if (!skip_zeroing)
+				memset_io(addr, 0, size);
+			if (no_kernel_mapping) {
+				iounmap(addr);
+				addr = (void *)NO_KERNEL_MAPPING_DUMMY;
+			}
+			*handle = base;
+		}
+	}
+
+out:
+	mutex_unlock(&dma_mem->lock);
+	return addr;
+}
+
+
+int removed_mmap(struct device *dev, struct vm_area_struct *vma,
+		 void *cpu_addr, dma_addr_t dma_addr, size_t size,
+		 struct dma_attrs *attrs)
+{
+	return -ENXIO;
+}
+
+void removed_free(struct device *dev, size_t size, void *cpu_addr,
+		  dma_addr_t handle, struct dma_attrs *attrs)
+{
+	bool no_kernel_mapping = dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING,
+					attrs);
+	struct removed_region *dma_mem = dev->removed_mem;
+
+	size = PAGE_ALIGN(size);
+	if (!no_kernel_mapping)
+		iounmap(cpu_addr);
+	mutex_lock(&dma_mem->lock);
+	bitmap_clear(dma_mem->bitmap, (handle - dma_mem->base) >> PAGE_SHIFT,
+				size >> PAGE_SHIFT);
+	mutex_unlock(&dma_mem->lock);
+}
+
+static dma_addr_t removed_map_page(struct device *dev, struct page *page,
+			unsigned long offset, size_t size,
+			enum dma_data_direction dir,
+			struct dma_attrs *attrs)
+{
+	return ~(dma_addr_t)0;
+}
+
+static void removed_unmap_page(struct device *dev, dma_addr_t dma_handle,
+		size_t size, enum dma_data_direction dir,
+		struct dma_attrs *attrs)
+{
+	return;
+}
+
+static int removed_map_sg(struct device *dev, struct scatterlist *sg,
+			int nents, enum dma_data_direction dir,
+			struct dma_attrs *attrs)
+{
+	return 0;
+}
+
+static void removed_unmap_sg(struct device *dev,
+			struct scatterlist *sg, int nents,
+			enum dma_data_direction dir,
+			struct dma_attrs *attrs)
+{
+	return;
+}
+
+static void removed_sync_single_for_cpu(struct device *dev,
+			dma_addr_t dma_handle, size_t size,
+			enum dma_data_direction dir)
+{
+	return;
+}
+
+void removed_sync_single_for_device(struct device *dev,
+			dma_addr_t dma_handle, size_t size,
+			enum dma_data_direction dir)
+{
+	return;
+}
+
+void removed_sync_sg_for_cpu(struct device *dev,
+			struct scatterlist *sg, int nents,
+			enum dma_data_direction dir)
+{
+	return;
+}
+
+void removed_sync_sg_for_device(struct device *dev,
+			struct scatterlist *sg, int nents,
+			enum dma_data_direction dir)
+{
+	return;
+}
+
+void *removed_remap(struct device *dev, void *cpu_addr, dma_addr_t handle,
+			size_t size, struct dma_attrs *attrs)
+{
+	return ioremap(handle, size);
+}
+
+void removed_unremap(struct device *dev, void *remapped_address, size_t size)
+{
+	iounmap(remapped_address);
+}
+
+struct dma_map_ops removed_dma_ops = {
+	.alloc			= removed_alloc,
+	.free			= removed_free,
+	.mmap			= removed_mmap,
+	.map_page		= removed_map_page,
+	.unmap_page		= removed_unmap_page,
+	.map_sg			= removed_map_sg,
+	.unmap_sg		= removed_unmap_sg,
+	.sync_single_for_cpu	= removed_sync_single_for_cpu,
+	.sync_single_for_device	= removed_sync_single_for_device,
+	.sync_sg_for_cpu	= removed_sync_sg_for_cpu,
+	.sync_sg_for_device	= removed_sync_sg_for_device,
+	.remap			= removed_remap,
+	.unremap		= removed_unremap,
+};
+EXPORT_SYMBOL(removed_dma_ops);
+
+#ifdef CONFIG_OF_RESERVED_MEM
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_reserved_mem.h>
+
+static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
+{
+        struct removed_region *mem = rmem->priv;
+        if (!mem && dma_init_removed_memory(rmem->base, rmem->size, &mem)) {
+                pr_info("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
+                        &rmem->base, (unsigned long)rmem->size / SZ_1M);
+                return -EINVAL;
+        }
+	mem->fixup = rmem->fixup;
+	set_dma_ops(dev, &removed_dma_ops);
+        rmem->priv = mem;
+        dma_assign_removed_region(dev, mem);
+	return 0;
+}
+
+static void rmem_dma_device_release(struct reserved_mem *rmem,
+                                    struct device *dev)
+{
+        dev->dma_mem = NULL;
+}
+
+static const struct reserved_mem_ops removed_mem_ops = {
+        .device_init    = rmem_dma_device_init,
+        .device_release = rmem_dma_device_release,
+};
+
+static int __init removed_dma_setup(struct reserved_mem *rmem)
+{
+	unsigned long node = rmem->fdt_node;
+	int nomap, fixup;
+
+	nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
+	fixup = of_get_flat_dt_prop(node, "no-map-fixup", NULL) != NULL;
+
+	if (nomap && fixup) {
+		pr_err("Removed memory: nomap & nomap-fixup can't co-exist\n");
+		return -EINVAL;
+	}
+
+	rmem->fixup = fixup;
+	if (rmem->fixup) {
+		/* Architecture specific contiguous memory fixup only for
+		 * no-map-fixup to split mappings
+		 */
+		dma_contiguous_early_fixup(rmem->base, rmem->size);
+	}
+
+        rmem->ops = &removed_mem_ops;
+        pr_info("Removed memory: created DMA memory pool at %pa, size %ld MiB\n",
+                &rmem->base, (unsigned long)rmem->size / SZ_1M);
+        return 0;
+}
+RESERVEDMEM_OF_DECLARE(dma, "removed-dma-pool", removed_dma_setup);
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/base/power/opp/debugfs.c	2019-01-22 16:16:22.879240756 +0100
@@ -0,0 +1,218 @@
+/*
+ * Generic OPP debugfs interface
+ *
+ * Copyright (C) 2015-2016 Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/limits.h>
+
+#include "opp.h"
+
+static struct dentry *rootdir;
+
+static void opp_set_dev_name(const struct device *dev, char *name)
+{
+	if (dev->parent)
+		snprintf(name, NAME_MAX, "%s-%s", dev_name(dev->parent),
+			 dev_name(dev));
+	else
+		snprintf(name, NAME_MAX, "%s", dev_name(dev));
+}
+
+void opp_debug_remove_one(struct dev_pm_opp *opp)
+{
+	debugfs_remove_recursive(opp->dentry);
+}
+
+int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
+{
+	struct dentry *pdentry = opp_table->dentry;
+	struct dentry *d;
+	char name[25];	/* 20 chars for 64 bit value + 5 (opp:\0) */
+
+	/* Rate is unique to each OPP, use it to give opp-name */
+	snprintf(name, sizeof(name), "opp:%lu", opp->rate);
+
+	/* Create per-opp directory */
+	d = debugfs_create_dir(name, pdentry);
+	if (!d)
+		return -ENOMEM;
+
+	if (!debugfs_create_bool("available", S_IRUGO, d, &opp->available))
+		return -ENOMEM;
+
+	if (!debugfs_create_bool("dynamic", S_IRUGO, d, &opp->dynamic))
+		return -ENOMEM;
+
+	if (!debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo))
+		return -ENOMEM;
+
+	if (!debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend))
+		return -ENOMEM;
+
+	if (!debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate))
+		return -ENOMEM;
+
+	if (!debugfs_create_ulong("u_volt_target", S_IRUGO, d, &opp->u_volt))
+		return -ENOMEM;
+
+	if (!debugfs_create_ulong("u_volt_min", S_IRUGO, d, &opp->u_volt_min))
+		return -ENOMEM;
+
+	if (!debugfs_create_ulong("u_volt_max", S_IRUGO, d, &opp->u_volt_max))
+		return -ENOMEM;
+
+	if (!debugfs_create_ulong("u_amp", S_IRUGO, d, &opp->u_amp))
+		return -ENOMEM;
+
+	if (!debugfs_create_ulong("clock_latency_ns", S_IRUGO, d,
+				  &opp->clock_latency_ns))
+		return -ENOMEM;
+
+	opp->dentry = d;
+	return 0;
+}
+
+static int opp_list_debug_create_dir(struct opp_device *opp_dev,
+				     struct opp_table *opp_table)
+{
+	const struct device *dev = opp_dev->dev;
+	struct dentry *d;
+
+	opp_set_dev_name(dev, opp_table->dentry_name);
+
+	/* Create device specific directory */
+	d = debugfs_create_dir(opp_table->dentry_name, rootdir);
+	if (!d) {
+		dev_err(dev, "%s: Failed to create debugfs dir\n", __func__);
+		return -ENOMEM;
+	}
+
+	opp_dev->dentry = d;
+	opp_table->dentry = d;
+
+	return 0;
+}
+
+static int opp_list_debug_create_link(struct opp_device *opp_dev,
+				      struct opp_table *opp_table)
+{
+	const struct device *dev = opp_dev->dev;
+	char name[NAME_MAX];
+	struct dentry *d;
+
+	opp_set_dev_name(opp_dev->dev, name);
+
+	/* Create device specific directory link */
+	d = debugfs_create_symlink(name, rootdir, opp_table->dentry_name);
+	if (!d) {
+		dev_err(dev, "%s: Failed to create link\n", __func__);
+		return -ENOMEM;
+	}
+
+	opp_dev->dentry = d;
+
+	return 0;
+}
+
+/**
+ * opp_debug_register - add a device opp node to the debugfs 'opp' directory
+ * @opp_dev: opp-dev pointer for device
+ * @opp_table: the device-opp being added
+ *
+ * Dynamically adds device specific directory in debugfs 'opp' directory. If the
+ * device-opp is shared with other devices, then links will be created for all
+ * devices except the first.
+ *
+ * Return: 0 on success, otherwise negative error.
+ */
+int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table)
+{
+	if (!rootdir) {
+		pr_debug("%s: Uninitialized rootdir\n", __func__);
+		return -EINVAL;
+	}
+
+	if (opp_table->dentry)
+		return opp_list_debug_create_link(opp_dev, opp_table);
+
+	return opp_list_debug_create_dir(opp_dev, opp_table);
+}
+
+static void opp_migrate_dentry(struct opp_device *opp_dev,
+			       struct opp_table *opp_table)
+{
+	struct opp_device *new_dev;
+	const struct device *dev;
+	struct dentry *dentry;
+
+	/* Look for next opp-dev */
+	list_for_each_entry(new_dev, &opp_table->dev_list, node)
+		if (new_dev != opp_dev)
+			break;
+
+	/* new_dev is guaranteed to be valid here */
+	dev = new_dev->dev;
+	debugfs_remove_recursive(new_dev->dentry);
+
+	opp_set_dev_name(dev, opp_table->dentry_name);
+
+	dentry = debugfs_rename(rootdir, opp_dev->dentry, rootdir,
+				opp_table->dentry_name);
+	if (!dentry) {
+		dev_err(dev, "%s: Failed to rename link from: %s to %s\n",
+			__func__, dev_name(opp_dev->dev), dev_name(dev));
+		return;
+	}
+
+	new_dev->dentry = dentry;
+	opp_table->dentry = dentry;
+}
+
+/**
+ * opp_debug_unregister - remove a device opp node from debugfs opp directory
+ * @opp_dev: opp-dev pointer for device
+ * @opp_table: the device-opp being removed
+ *
+ * Dynamically removes device specific directory from debugfs 'opp' directory.
+ */
+void opp_debug_unregister(struct opp_device *opp_dev,
+			  struct opp_table *opp_table)
+{
+	if (opp_dev->dentry == opp_table->dentry) {
+		/* Move the real dentry object under another device */
+		if (!list_is_singular(&opp_table->dev_list)) {
+			opp_migrate_dentry(opp_dev, opp_table);
+			goto out;
+		}
+		opp_table->dentry = NULL;
+	}
+
+	debugfs_remove_recursive(opp_dev->dentry);
+
+out:
+	opp_dev->dentry = NULL;
+}
+
+static int __init opp_debug_init(void)
+{
+	/* Create /sys/kernel/debug/opp directory */
+	rootdir = debugfs_create_dir("opp", NULL);
+	if (!rootdir) {
+		pr_err("%s: Failed to create root directory\n", __func__);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+core_initcall(opp_debug_init);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/base/regmap/regmap-swr.c	2019-01-22 16:16:22.883240792 +0100
@@ -0,0 +1,216 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/soundwire/soundwire.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include "internal.h"
+
+static int regmap_swr_gather_write(void *context,
+				const void *reg, size_t reg_size,
+				const void *val, size_t val_len)
+{
+	struct device *dev = context;
+	struct swr_device *swr = to_swr_device(dev);
+	struct regmap *map = dev_get_regmap(dev, NULL);
+	size_t addr_bytes;
+	size_t val_bytes;
+	int i, ret = 0;
+	u16 reg_addr = 0;
+
+	if (map == NULL) {
+		dev_err(dev, "%s: regmap is NULL\n", __func__);
+		return -EINVAL;
+	}
+	addr_bytes = map->format.reg_bytes;
+	if (swr == NULL) {
+		dev_err(dev, "%s: swr device is NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (reg_size != addr_bytes) {
+		dev_err(dev, "%s: reg size %zd bytes not supported\n",
+			__func__, reg_size);
+		return -EINVAL;
+	}
+	reg_addr = *(u16 *)reg;
+	val_bytes = map->format.val_bytes;
+	/* val_len = val_bytes * val_count */
+	for (i = 0; i < (val_len / val_bytes); i++) {
+		reg_addr = reg_addr + i;
+		val = (u8 *)val + (val_bytes * i);
+		ret = swr_write(swr, swr->dev_num, reg_addr, val);
+		if (ret < 0) {
+			dev_err(dev, "%s: write reg 0x%x failed, err %d\n",
+				__func__, reg_addr, ret);
+			break;
+		}
+	}
+	return ret;
+}
+
+static int regmap_swr_raw_multi_reg_write(void *context, const void *data,
+					  size_t count)
+{
+	struct device *dev = context;
+	struct swr_device *swr = to_swr_device(dev);
+	struct regmap *map = dev_get_regmap(dev, NULL);
+	size_t addr_bytes;
+	size_t val_bytes;
+	size_t pad_bytes;
+	size_t num_regs;
+	int i = 0;
+	int ret = 0;
+	u16 *reg;
+	u8 *val;
+	u8 *buf;
+
+	if (swr == NULL) {
+		dev_err(dev, "%s: swr device is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	if (map == NULL) {
+		dev_err(dev, "%s: regmap is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	addr_bytes = map->format.reg_bytes;
+	val_bytes = map->format.val_bytes;
+	pad_bytes = map->format.pad_bytes;
+
+	if (addr_bytes + val_bytes + pad_bytes == 0) {
+		dev_err(dev, "%s: sum of addr, value and pad is 0\n", __func__);
+		return -EINVAL;
+	}
+	num_regs = count / (addr_bytes + val_bytes + pad_bytes);
+
+	reg = kcalloc(num_regs, sizeof(u16), GFP_KERNEL);
+	if (!reg)
+		return -ENOMEM;
+
+	val = kcalloc(num_regs, sizeof(u8), GFP_KERNEL);
+	if (!val) {
+		ret = -ENOMEM;
+		goto mem_fail;
+	}
+
+	buf = (u8 *)data;
+	for (i = 0; i < num_regs; i++) {
+		reg[i] = *(u16 *)buf;
+		buf += (map->format.reg_bytes + map->format.pad_bytes);
+		val[i] = *buf;
+		buf += map->format.val_bytes;
+	}
+	ret = swr_bulk_write(swr, swr->dev_num, reg, val, num_regs);
+	if (ret)
+		dev_err(dev, "%s: multi reg write failed\n", __func__);
+
+	kfree(val);
+mem_fail:
+	kfree(reg);
+	return ret;
+}
+
+static int regmap_swr_write(void *context, const void *data, size_t count)
+{
+	struct device *dev = context;
+	struct regmap *map = dev_get_regmap(dev, NULL);
+	size_t addr_bytes;
+	size_t val_bytes;
+	size_t pad_bytes;
+
+	if (map == NULL) {
+		dev_err(dev, "%s: regmap is NULL\n", __func__);
+		return -EINVAL;
+	}
+	addr_bytes = map->format.reg_bytes;
+	val_bytes = map->format.val_bytes;
+	pad_bytes = map->format.pad_bytes;
+
+	WARN_ON(count < addr_bytes);
+
+	if (count > (addr_bytes + val_bytes + pad_bytes))
+		return regmap_swr_raw_multi_reg_write(context, data, count);
+	else
+		return regmap_swr_gather_write(context, data, addr_bytes,
+					       (data + addr_bytes),
+					       (count - addr_bytes));
+}
+
+static int regmap_swr_read(void *context,
+			const void *reg, size_t reg_size,
+			void *val, size_t val_size)
+{
+	struct device *dev = context;
+	struct swr_device *swr = to_swr_device(dev);
+	struct regmap *map = dev_get_regmap(dev, NULL);
+	size_t addr_bytes;
+	int ret = 0;
+	u16 reg_addr = 0;
+
+	if (map == NULL) {
+		dev_err(dev, "%s: regmap is NULL\n", __func__);
+		return -EINVAL;
+	}
+	addr_bytes = map->format.reg_bytes;
+	if (swr == NULL) {
+		dev_err(dev, "%s: swr is NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (reg_size != addr_bytes) {
+		dev_err(dev, "%s: register size %zd bytes not supported\n",
+			__func__, reg_size);
+		return -EINVAL;
+	}
+	reg_addr = *(u16 *)reg;
+	ret = swr_read(swr, swr->dev_num, reg_addr, val, val_size);
+	if (ret < 0)
+		dev_err(dev, "%s: codec reg 0x%x read failed %d\n",
+			__func__, reg_addr, ret);
+	return ret;
+}
+
+static struct regmap_bus regmap_swr = {
+	.write = regmap_swr_write,
+	.gather_write = regmap_swr_gather_write,
+	.read = regmap_swr_read,
+	.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+	.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+struct regmap *__regmap_init_swr(struct swr_device *swr,
+				 const struct regmap_config *config,
+				 struct lock_class_key *lock_key,
+				 const char *lock_name)
+{
+	return __regmap_init(&swr->dev, &regmap_swr, &swr->dev, config,
+			   lock_key, lock_name);
+}
+EXPORT_SYMBOL(__regmap_init_swr);
+
+struct regmap *__devm_regmap_init_swr(struct swr_device *swr,
+				      const struct regmap_config *config,
+				      struct lock_class_key *lock_key,
+				      const char *lock_name)
+{
+	return __devm_regmap_init(&swr->dev, &regmap_swr, &swr->dev, config,
+				lock_key, lock_name);
+}
+EXPORT_SYMBOL(__devm_regmap_init_swr);
+
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/drivers/bif./Kconfig linux-4.4.115-fbx/drivers/bif/Kconfig
--- linux-4.4.115-fbx/drivers/bif./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/bif/Kconfig	2019-01-22 16:16:22.887240828 +0100
@@ -0,0 +1,25 @@
+#
+# BIF framework and drivers
+#
+menuconfig BIF
+	bool "MIPI-BIF support"
+	select CRC_CCITT
+	select BITREVERSE
+	help
+	  MIPI-BIF (battery interface) is a one-wire serial interface between a
+	  host master device and one or more slave devices which are located in
+	  a battery pack or also on the host.  Enabling this option allows for
+	  BIF consumer drivers to issue transactions via BIF controller drivers.
+
+if BIF
+config BIF_QPNP
+	depends on SPMI
+	tristate "Qualcomm QPNP BIF support"
+	help
+	  This driver supports the QPNP BSI peripheral found inside of Qualcomm
+	  QPNP PMIC devices.  The BSI peripheral is able to communicate using
+	  the BIF protocol.  The QPNP BSI driver hooks into the BIF framework.
+	  Enable this option in order to provide support for BIF communication
+	  on targets which have BSI PMIC peripherals.
+
+endif
diff -Nruw linux-4.4.115-fbx/drivers/bif./Makefile linux-4.4.115-fbx/drivers/bif/Makefile
--- linux-4.4.115-fbx/drivers/bif./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/bif/Makefile	2019-01-22 16:16:22.887240828 +0100
@@ -0,0 +1,5 @@
+#
+# Makefile for kernel BIF framework.
+#
+obj-$(CONFIG_BIF)			+= bif-core.o
+obj-$(CONFIG_BIF_QPNP)			+= qpnp-bsi.o
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/bluetooth/bluetooth-power.c	2019-01-22 16:16:22.927241190 +0100
@@ -0,0 +1,773 @@
+/* Copyright (c) 2009-2010, 2013-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Bluetooth Power Switch Module
+ * controls power to external Bluetooth device
+ * with interface to power management device
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/rfkill.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/delay.h>
+#include <linux/bluetooth-power.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#include <net/cnss.h>
+#include "btfm_slim.h"
+#include <linux/fs.h>
+
+#define BT_PWR_DBG(fmt, arg...)  pr_debug("%s: " fmt "\n", __func__, ## arg)
+#define BT_PWR_INFO(fmt, arg...) pr_info("%s: " fmt "\n", __func__, ## arg)
+#define BT_PWR_ERR(fmt, arg...)  pr_err("%s: " fmt "\n", __func__, ## arg)
+
+
+static const struct of_device_id bt_power_match_table[] = {
+	{	.compatible = "qca,ar3002" },
+	{	.compatible = "qca,qca6174" },
+	{	.compatible = "qca,wcn3990" },
+	{}
+};
+
+static struct bluetooth_power_platform_data *bt_power_pdata;
+static struct platform_device *btpdev;
+static bool previous;
+static int pwr_state;
+struct class *bt_class;
+static int bt_major;
+
+static int bt_vreg_init(struct bt_power_vreg_data *vreg)
+{
+	int rc = 0;
+	struct device *dev = &btpdev->dev;
+
+	BT_PWR_DBG("vreg_get for : %s", vreg->name);
+
+	/* Get the regulator handle */
+	vreg->reg = regulator_get(dev, vreg->name);
+	if (IS_ERR(vreg->reg)) {
+		rc = PTR_ERR(vreg->reg);
+		pr_err("%s: regulator_get(%s) failed. rc=%d\n",
+			__func__, vreg->name, rc);
+		goto out;
+	}
+
+	if ((regulator_count_voltages(vreg->reg) > 0)
+			&& (vreg->low_vol_level) && (vreg->high_vol_level))
+		vreg->set_voltage_sup = 1;
+
+out:
+	return rc;
+}
+
+static int bt_vreg_enable(struct bt_power_vreg_data *vreg)
+{
+	int rc = 0;
+
+	BT_PWR_DBG("vreg_en for : %s", vreg->name);
+
+	if (!vreg->is_enabled) {
+		if (vreg->set_voltage_sup) {
+			rc = regulator_set_voltage(vreg->reg,
+						vreg->low_vol_level,
+						vreg->high_vol_level);
+			if (rc < 0) {
+				BT_PWR_ERR("vreg_set_vol(%s) failed rc=%d\n",
+						vreg->name, rc);
+				goto out;
+			}
+		}
+
+		if (vreg->load_uA >= 0) {
+			rc = regulator_set_load(vreg->reg,
+					vreg->load_uA);
+			if (rc < 0) {
+				BT_PWR_ERR("vreg_set_mode(%s) failed rc=%d\n",
+						vreg->name, rc);
+				goto out;
+			}
+		}
+
+		rc = regulator_enable(vreg->reg);
+		if (rc < 0) {
+			BT_PWR_ERR("regulator_enable(%s) failed. rc=%d\n",
+					vreg->name, rc);
+			goto out;
+		}
+		vreg->is_enabled = true;
+	}
+out:
+	return rc;
+}
+
+static int bt_vreg_disable(struct bt_power_vreg_data *vreg)
+{
+	int rc = 0;
+
+	if (!vreg)
+		return rc;
+
+	BT_PWR_DBG("vreg_disable for : %s", vreg->name);
+
+	if (vreg->is_enabled) {
+		rc = regulator_disable(vreg->reg);
+		if (rc < 0) {
+			BT_PWR_ERR("regulator_disable(%s) failed. rc=%d\n",
+					vreg->name, rc);
+			goto out;
+		}
+		vreg->is_enabled = false;
+
+		if (vreg->set_voltage_sup) {
+			/* Set the min voltage to 0 */
+			rc = regulator_set_voltage(vreg->reg, 0,
+					vreg->high_vol_level);
+			if (rc < 0) {
+				BT_PWR_ERR("vreg_set_vol(%s) failed rc=%d\n",
+						vreg->name, rc);
+				goto out;
+			}
+		}
+		if (vreg->load_uA >= 0) {
+			rc = regulator_set_load(vreg->reg, 0);
+			if (rc < 0) {
+				BT_PWR_ERR("vreg_set_mode(%s) failed rc=%d\n",
+						vreg->name, rc);
+			}
+		}
+	}
+out:
+	return rc;
+}
+
+static int bt_configure_vreg(struct bt_power_vreg_data *vreg)
+{
+	int rc = 0;
+
+	BT_PWR_DBG("config %s", vreg->name);
+
+	/* Get the regulator handle for vreg */
+	if (!(vreg->reg)) {
+		rc = bt_vreg_init(vreg);
+		if (rc < 0)
+			return rc;
+	}
+	rc = bt_vreg_enable(vreg);
+
+	return rc;
+}
+
+static int bt_clk_enable(struct bt_power_clk_data *clk)
+{
+	int rc = 0;
+
+	BT_PWR_DBG("%s", clk->name);
+
+	/* Get the clock handle for vreg */
+	if (!clk->clk || clk->is_enabled) {
+		BT_PWR_ERR("error - node: %p, clk->is_enabled:%d",
+			clk->clk, clk->is_enabled);
+		return -EINVAL;
+	}
+
+	rc = clk_prepare_enable(clk->clk);
+	if (rc) {
+		BT_PWR_ERR("failed to enable %s, rc(%d)\n", clk->name, rc);
+		return rc;
+	}
+
+	clk->is_enabled = true;
+	return rc;
+}
+
+static int bt_clk_disable(struct bt_power_clk_data *clk)
+{
+	int rc = 0;
+
+	BT_PWR_DBG("%s", clk->name);
+
+	/* Get the clock handle for vreg */
+	if (!clk->clk || !clk->is_enabled) {
+		BT_PWR_ERR("error - node: %p, clk->is_enabled:%d",
+			clk->clk, clk->is_enabled);
+		return -EINVAL;
+	}
+	clk_disable_unprepare(clk->clk);
+
+	clk->is_enabled = false;
+	return rc;
+}
+
+static int bt_configure_gpios(int on)
+{
+	int rc = 0;
+	int bt_reset_gpio = bt_power_pdata->bt_gpio_sys_rst;
+
+	BT_PWR_DBG("bt_gpio= %d on: %d", bt_reset_gpio, on);
+
+	if (on) {
+		rc = gpio_request(bt_reset_gpio, "bt_sys_rst_n");
+		if (rc) {
+			BT_PWR_ERR("unable to request gpio %d (%d)\n",
+					bt_reset_gpio, rc);
+			return rc;
+		}
+
+		rc = gpio_direction_output(bt_reset_gpio, 0);
+		if (rc) {
+			BT_PWR_ERR("Unable to set direction\n");
+			return rc;
+		}
+		msleep(50);
+		rc = gpio_direction_output(bt_reset_gpio, 1);
+		if (rc) {
+			BT_PWR_ERR("Unable to set direction\n");
+			return rc;
+		}
+		msleep(50);
+	} else {
+		gpio_set_value(bt_reset_gpio, 0);
+		msleep(100);
+	}
+	return rc;
+}
+
+static int bluetooth_power(int on)
+{
+	int rc = 0;
+
+	BT_PWR_DBG("on: %d", on);
+
+	if (on) {
+		if (bt_power_pdata->bt_vdd_io) {
+			rc = bt_configure_vreg(bt_power_pdata->bt_vdd_io);
+			if (rc < 0) {
+				BT_PWR_ERR("bt_power vddio config failed");
+				goto out;
+			}
+		}
+		if (bt_power_pdata->bt_vdd_xtal) {
+			rc = bt_configure_vreg(bt_power_pdata->bt_vdd_xtal);
+			if (rc < 0) {
+				BT_PWR_ERR("bt_power vddxtal config failed");
+				goto vdd_xtal_fail;
+			}
+		}
+		if (bt_power_pdata->bt_vdd_core) {
+			rc = bt_configure_vreg(bt_power_pdata->bt_vdd_core);
+			if (rc < 0) {
+				BT_PWR_ERR("bt_power vddcore config failed");
+				goto vdd_core_fail;
+			}
+		}
+		if (bt_power_pdata->bt_vdd_pa) {
+			rc = bt_configure_vreg(bt_power_pdata->bt_vdd_pa);
+			if (rc < 0) {
+				BT_PWR_ERR("bt_power vddpa config failed");
+				goto vdd_pa_fail;
+			}
+		}
+		if (bt_power_pdata->bt_vdd_ldo) {
+			rc = bt_configure_vreg(bt_power_pdata->bt_vdd_ldo);
+			if (rc < 0) {
+				BT_PWR_ERR("bt_power vddldo config failed");
+				goto vdd_ldo_fail;
+			}
+		}
+		if (bt_power_pdata->bt_chip_pwd) {
+			rc = bt_configure_vreg(bt_power_pdata->bt_chip_pwd);
+			if (rc < 0) {
+				BT_PWR_ERR("bt_power chippwd config failed");
+				goto chip_pwd_fail;
+			}
+		}
+		/* Parse dt_info and check if a target requires clock voting.
+		 * Enable BT clock when BT is on and disable it when BT is off
+		 */
+		if (bt_power_pdata->bt_chip_clk) {
+			rc = bt_clk_enable(bt_power_pdata->bt_chip_clk);
+			if (rc < 0) {
+				BT_PWR_ERR("bt_power gpio config failed");
+				goto clk_fail;
+			}
+		}
+		if (bt_power_pdata->bt_gpio_sys_rst > 0) {
+			rc = bt_configure_gpios(on);
+			if (rc < 0) {
+				BT_PWR_ERR("bt_power gpio config failed");
+				goto gpio_fail;
+			}
+		}
+	} else {
+		if (bt_power_pdata->bt_gpio_sys_rst > 0)
+			bt_configure_gpios(on);
+gpio_fail:
+		if (bt_power_pdata->bt_gpio_sys_rst > 0)
+			gpio_free(bt_power_pdata->bt_gpio_sys_rst);
+		if (bt_power_pdata->bt_chip_clk)
+			bt_clk_disable(bt_power_pdata->bt_chip_clk);
+clk_fail:
+		if (bt_power_pdata->bt_chip_pwd)
+			bt_vreg_disable(bt_power_pdata->bt_chip_pwd);
+chip_pwd_fail:
+		if (bt_power_pdata->bt_vdd_ldo)
+			bt_vreg_disable(bt_power_pdata->bt_vdd_ldo);
+vdd_ldo_fail:
+		if (bt_power_pdata->bt_vdd_pa)
+			bt_vreg_disable(bt_power_pdata->bt_vdd_pa);
+vdd_pa_fail:
+		if (bt_power_pdata->bt_vdd_core)
+			bt_vreg_disable(bt_power_pdata->bt_vdd_core);
+vdd_core_fail:
+		if (bt_power_pdata->bt_vdd_xtal)
+			bt_vreg_disable(bt_power_pdata->bt_vdd_xtal);
+vdd_xtal_fail:
+		if (bt_power_pdata->bt_vdd_io)
+			bt_vreg_disable(bt_power_pdata->bt_vdd_io);
+	}
+out:
+	return rc;
+}
+
+static int bluetooth_toggle_radio(void *data, bool blocked)
+{
+	int ret = 0;
+	int (*power_control)(int enable);
+
+	power_control =
+		((struct bluetooth_power_platform_data *)data)->bt_power_setup;
+
+	if (previous != blocked)
+		ret = (*power_control)(!blocked);
+	if (!ret)
+		previous = blocked;
+	return ret;
+}
+
+static const struct rfkill_ops bluetooth_power_rfkill_ops = {
+	.set_block = bluetooth_toggle_radio,
+};
+
+#if defined(CONFIG_CNSS) && defined(CONFIG_CLD_LL_CORE)
+static ssize_t enable_extldo(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	int ret;
+	bool enable = false;
+	struct cnss_platform_cap cap;
+
+	ret = cnss_get_platform_cap(&cap);
+	if (ret) {
+		BT_PWR_ERR("Platform capability info from CNSS not available!");
+		enable = false;
+	} else if (!ret && (cap.cap_flag & CNSS_HAS_EXTERNAL_SWREG)) {
+		enable = true;
+	}
+	return snprintf(buf, 6, "%s", (enable ? "true" : "false"));
+}
+#else
+static ssize_t enable_extldo(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, 6, "%s", "false");
+}
+#endif
+
+static DEVICE_ATTR(extldo, S_IRUGO, enable_extldo, NULL);
+
+static int bluetooth_power_rfkill_probe(struct platform_device *pdev)
+{
+	struct rfkill *rfkill;
+	int ret;
+
+	rfkill = rfkill_alloc("bt_power", &pdev->dev, RFKILL_TYPE_BLUETOOTH,
+			      &bluetooth_power_rfkill_ops,
+			      pdev->dev.platform_data);
+
+	if (!rfkill) {
+		dev_err(&pdev->dev, "rfkill allocate failed\n");
+		return -ENOMEM;
+	}
+
+	/* add file into rfkill0 to handle LDO27 */
+	ret = device_create_file(&pdev->dev, &dev_attr_extldo);
+	if (ret < 0)
+		BT_PWR_ERR("device create file error!");
+
+	/* force Bluetooth off during init to allow for user control */
+	rfkill_init_sw_state(rfkill, 1);
+	previous = 1;
+
+	ret = rfkill_register(rfkill);
+	if (ret) {
+		dev_err(&pdev->dev, "rfkill register failed=%d\n", ret);
+		rfkill_destroy(rfkill);
+		return ret;
+	}
+
+	platform_set_drvdata(pdev, rfkill);
+
+	return 0;
+}
+
+static void bluetooth_power_rfkill_remove(struct platform_device *pdev)
+{
+	struct rfkill *rfkill;
+
+	dev_dbg(&pdev->dev, "%s\n", __func__);
+
+	rfkill = platform_get_drvdata(pdev);
+	if (rfkill)
+		rfkill_unregister(rfkill);
+	rfkill_destroy(rfkill);
+	platform_set_drvdata(pdev, NULL);
+}
+
+#define MAX_PROP_SIZE 32
+static int bt_dt_parse_vreg_info(struct device *dev,
+		struct bt_power_vreg_data **vreg_data, const char *vreg_name)
+{
+	int len, ret = 0;
+	const __be32 *prop;
+	char prop_name[MAX_PROP_SIZE];
+	struct bt_power_vreg_data *vreg;
+	struct device_node *np = dev->of_node;
+
+	BT_PWR_DBG("vreg dev tree parse for %s", vreg_name);
+
+	*vreg_data = NULL;
+	snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
+	if (of_parse_phandle(np, prop_name, 0)) {
+		vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
+		if (!vreg) {
+			dev_err(dev, "No memory for vreg: %s\n", vreg_name);
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		vreg->name = vreg_name;
+
+		/* Parse voltage-level from each node */
+		snprintf(prop_name, MAX_PROP_SIZE,
+				"%s-voltage-level", vreg_name);
+		prop = of_get_property(np, prop_name, &len);
+		if (!prop || (len != (2 * sizeof(__be32)))) {
+			dev_warn(dev, "%s %s property\n",
+				prop ? "invalid format" : "no", prop_name);
+		} else {
+			vreg->low_vol_level = be32_to_cpup(&prop[0]);
+			vreg->high_vol_level = be32_to_cpup(&prop[1]);
+		}
+
+		/* Parse current-level from each node */
+		snprintf(prop_name, MAX_PROP_SIZE,
+				"%s-current-level", vreg_name);
+		ret = of_property_read_u32(np, prop_name, &vreg->load_uA);
+		if (ret < 0) {
+			BT_PWR_DBG("%s property is not valid\n", prop_name);
+			vreg->load_uA = -1;
+			ret = 0;
+		}
+
+		*vreg_data = vreg;
+		BT_PWR_DBG("%s: vol=[%d %d]uV, current=[%d]uA\n",
+			vreg->name, vreg->low_vol_level,
+			vreg->high_vol_level,
+			vreg->load_uA);
+	} else
+		BT_PWR_INFO("%s: is not provided in device tree", vreg_name);
+
+err:
+	return ret;
+}
+
+static int bt_dt_parse_clk_info(struct device *dev,
+		struct bt_power_clk_data **clk_data)
+{
+	int ret = -EINVAL;
+	struct bt_power_clk_data *clk = NULL;
+	struct device_node *np = dev->of_node;
+
+	BT_PWR_DBG("");
+
+	*clk_data = NULL;
+	if (of_parse_phandle(np, "clocks", 0)) {
+		clk = devm_kzalloc(dev, sizeof(*clk), GFP_KERNEL);
+		if (!clk) {
+			BT_PWR_ERR("No memory for clocks");
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		/* Allocated 20 bytes size buffer for clock name string */
+		clk->name = devm_kzalloc(dev, 20, GFP_KERNEL);
+
+		/* Parse clock name from node */
+		ret = of_property_read_string_index(np, "clock-names", 0,
+				&(clk->name));
+		if (ret < 0) {
+			BT_PWR_ERR("reading \"clock-names\" failed");
+			return ret;
+		}
+
+		clk->clk = devm_clk_get(dev, clk->name);
+		if (IS_ERR(clk->clk)) {
+			ret = PTR_ERR(clk->clk);
+			BT_PWR_ERR("failed to get %s, ret (%d)",
+				clk->name, ret);
+			clk->clk = NULL;
+			return ret;
+		}
+
+		*clk_data = clk;
+	} else {
+		BT_PWR_ERR("clocks is not provided in device tree");
+	}
+
+err:
+	return ret;
+}
+
+static int bt_power_populate_dt_pinfo(struct platform_device *pdev)
+{
+	int rc;
+
+	BT_PWR_DBG("");
+
+	if (!bt_power_pdata)
+		return -ENOMEM;
+
+	if (pdev->dev.of_node) {
+		bt_power_pdata->bt_gpio_sys_rst =
+			of_get_named_gpio(pdev->dev.of_node,
+						"qca,bt-reset-gpio", 0);
+		if (bt_power_pdata->bt_gpio_sys_rst < 0)
+			BT_PWR_ERR("bt-reset-gpio not provided in device tree");
+
+		rc = bt_dt_parse_vreg_info(&pdev->dev,
+					&bt_power_pdata->bt_vdd_core,
+					"qca,bt-vdd-core");
+		if (rc < 0)
+			BT_PWR_ERR("bt-vdd-core not provided in device tree");
+
+		rc = bt_dt_parse_vreg_info(&pdev->dev,
+					&bt_power_pdata->bt_vdd_io,
+					"qca,bt-vdd-io");
+		if (rc < 0)
+			BT_PWR_ERR("bt-vdd-io not provided in device tree");
+
+		rc = bt_dt_parse_vreg_info(&pdev->dev,
+					&bt_power_pdata->bt_vdd_xtal,
+					"qca,bt-vdd-xtal");
+		if (rc < 0)
+			BT_PWR_ERR("bt-vdd-xtal not provided in device tree");
+
+		rc = bt_dt_parse_vreg_info(&pdev->dev,
+					&bt_power_pdata->bt_vdd_pa,
+					"qca,bt-vdd-pa");
+		if (rc < 0)
+			BT_PWR_ERR("bt-vdd-pa not provided in device tree");
+
+		rc = bt_dt_parse_vreg_info(&pdev->dev,
+					&bt_power_pdata->bt_vdd_ldo,
+					"qca,bt-vdd-ldo");
+		if (rc < 0)
+			BT_PWR_ERR("bt-vdd-ldo not provided in device tree");
+
+		rc = bt_dt_parse_vreg_info(&pdev->dev,
+					&bt_power_pdata->bt_chip_pwd,
+					"qca,bt-chip-pwd");
+		if (rc < 0)
+			BT_PWR_ERR("bt-chip-pwd not provided in device tree");
+
+		rc = bt_dt_parse_clk_info(&pdev->dev,
+					&bt_power_pdata->bt_chip_clk);
+		if (rc < 0)
+			BT_PWR_ERR("clock not provided in device tree");
+	}
+
+	bt_power_pdata->bt_power_setup = bluetooth_power;
+
+	return 0;
+}
+
+static int bt_power_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	dev_dbg(&pdev->dev, "%s\n", __func__);
+
+	bt_power_pdata =
+		kzalloc(sizeof(struct bluetooth_power_platform_data),
+			GFP_KERNEL);
+
+	if (!bt_power_pdata) {
+		BT_PWR_ERR("Failed to allocate memory");
+		return -ENOMEM;
+	}
+
+	if (pdev->dev.of_node) {
+		ret = bt_power_populate_dt_pinfo(pdev);
+		if (ret < 0) {
+			BT_PWR_ERR("Failed to populate device tree info");
+			goto free_pdata;
+		}
+		pdev->dev.platform_data = bt_power_pdata;
+	} else if (pdev->dev.platform_data) {
+		/* Optional data set to default if not provided */
+		if (!((struct bluetooth_power_platform_data *)
+			(pdev->dev.platform_data))->bt_power_setup)
+			((struct bluetooth_power_platform_data *)
+				(pdev->dev.platform_data))->bt_power_setup =
+						bluetooth_power;
+
+		memcpy(bt_power_pdata, pdev->dev.platform_data,
+			sizeof(struct bluetooth_power_platform_data));
+		pwr_state = 0;
+	} else {
+		BT_PWR_ERR("Failed to get platform data");
+		goto free_pdata;
+	}
+
+	if (bluetooth_power_rfkill_probe(pdev) < 0)
+		goto free_pdata;
+
+	btpdev = pdev;
+
+	return 0;
+
+free_pdata:
+	kfree(bt_power_pdata);
+	return ret;
+}
+
+static int bt_power_remove(struct platform_device *pdev)
+{
+	dev_dbg(&pdev->dev, "%s\n", __func__);
+
+	bluetooth_power_rfkill_remove(pdev);
+
+	if (bt_power_pdata->bt_chip_pwd->reg)
+		regulator_put(bt_power_pdata->bt_chip_pwd->reg);
+
+	kfree(bt_power_pdata);
+
+	return 0;
+}
+
+int bt_register_slimdev(struct device *dev)
+{
+	BT_PWR_DBG("");
+	if (!bt_power_pdata || (dev == NULL)) {
+		BT_PWR_ERR("Failed to allocate memory");
+		return -EINVAL;
+	}
+	bt_power_pdata->slim_dev = dev;
+	return 0;
+}
+
+static long bt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	int ret = 0, pwr_cntrl = 0;
+
+	switch (cmd) {
+	case BT_CMD_SLIM_TEST:
+		if (!bt_power_pdata->slim_dev) {
+			BT_PWR_ERR("slim_dev is null\n");
+			return -EINVAL;
+		}
+		ret = btfm_slim_hw_init(
+			bt_power_pdata->slim_dev->platform_data
+		);
+		break;
+	case BT_CMD_PWR_CTRL:
+		pwr_cntrl = (int)arg;
+		BT_PWR_ERR("BT_CMD_PWR_CTRL pwr_cntrl:%d", pwr_cntrl);
+		if (pwr_state != pwr_cntrl) {
+			ret = bluetooth_power(pwr_cntrl);
+			if (!ret)
+				pwr_state = pwr_cntrl;
+		} else {
+			BT_PWR_ERR("BT chip state is already :%d no change d\n"
+				, pwr_state);
+			ret = 0;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+	return ret;
+}
+
+static struct platform_driver bt_power_driver = {
+	.probe = bt_power_probe,
+	.remove = bt_power_remove,
+	.driver = {
+		.name = "bt_power",
+		.owner = THIS_MODULE,
+		.of_match_table = bt_power_match_table,
+	},
+};
+
+static const struct file_operations bt_dev_fops = {
+	.owner		= THIS_MODULE,
+	.unlocked_ioctl = bt_ioctl,
+	.compat_ioctl = bt_ioctl,
+};
+
+static int __init bluetooth_power_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&bt_power_driver);
+
+	bt_major = register_chrdev(0, "bt", &bt_dev_fops);
+	if (bt_major < 0) {
+		BTFMSLIM_ERR("failed to allocate char dev\n");
+		goto chrdev_unreg;
+	}
+
+	bt_class = class_create(THIS_MODULE, "bt-dev");
+	if (IS_ERR(bt_class)) {
+		BTFMSLIM_ERR("coudn't create class");
+		goto chrdev_unreg;
+	}
+
+
+	if (device_create(bt_class, NULL, MKDEV(bt_major, 0),
+		NULL, "btpower") == NULL) {
+		BTFMSLIM_ERR("failed to allocate char dev\n");
+		goto chrdev_unreg;
+	}
+	return 0;
+
+chrdev_unreg:
+	unregister_chrdev(bt_major, "bt");
+	class_destroy(bt_class);
+	return ret;
+}
+
+static void __exit bluetooth_power_exit(void)
+{
+	platform_driver_unregister(&bt_power_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM Bluetooth power control driver");
+
+module_init(bluetooth_power_init);
+module_exit(bluetooth_power_exit);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/bluetooth/btfm_slim.c	2019-01-22 16:16:22.931241227 +0100
@@ -0,0 +1,565 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/debugfs.h>
+#include <linux/ratelimit.h>
+#include <linux/slab.h>
+#include <linux/bluetooth-power.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+#include "btfm_slim.h"
+#include "btfm_slim_wcn3990.h"
+
+int btfm_slim_write(struct btfmslim *btfmslim,
+		uint16_t reg, int bytes, void *src, uint8_t pgd)
+{
+	int ret, i;
+	struct slim_ele_access msg;
+	int slim_write_tries = SLIM_SLAVE_RW_MAX_TRIES;
+
+	BTFMSLIM_DBG("Write to %s", pgd?"PGD":"IFD");
+	msg.start_offset = SLIM_SLAVE_REG_OFFSET + reg;
+	msg.num_bytes = bytes;
+	msg.comp = NULL;
+
+	for ( ; slim_write_tries != 0; slim_write_tries--) {
+		mutex_lock(&btfmslim->xfer_lock);
+		ret = slim_change_val_element(pgd ? btfmslim->slim_pgd :
+			&btfmslim->slim_ifd, &msg, src, bytes);
+		mutex_unlock(&btfmslim->xfer_lock);
+		if (ret == 0)
+			break;
+		usleep_range(5000, 5100);
+	}
+
+	if (ret) {
+		BTFMSLIM_ERR("failed (%d)", ret);
+		return ret;
+	}
+
+	for (i = 0; i < bytes; i++)
+		BTFMSLIM_DBG("Write 0x%02x to reg 0x%x", ((uint8_t *)src)[i],
+			reg + i);
+	return 0;
+}
+
+int btfm_slim_write_pgd(struct btfmslim *btfmslim,
+		uint16_t reg, int bytes, void *src)
+{
+	return btfm_slim_write(btfmslim, reg, bytes, src, PGD);
+}
+
+int btfm_slim_write_inf(struct btfmslim *btfmslim,
+		uint16_t reg, int bytes, void *src)
+{
+	return btfm_slim_write(btfmslim, reg, bytes, src, IFD);
+}
+
+int btfm_slim_read(struct btfmslim *btfmslim, unsigned short reg,
+				int bytes, void *dest, uint8_t pgd)
+{
+	int ret, i;
+	struct slim_ele_access msg;
+	int slim_read_tries = SLIM_SLAVE_RW_MAX_TRIES;
+
+	BTFMSLIM_DBG("Read from %s", pgd?"PGD":"IFD");
+	msg.start_offset = SLIM_SLAVE_REG_OFFSET + reg;
+	msg.num_bytes = bytes;
+	msg.comp = NULL;
+
+	for ( ; slim_read_tries != 0; slim_read_tries--) {
+		mutex_lock(&btfmslim->xfer_lock);
+		ret = slim_request_val_element(pgd ? btfmslim->slim_pgd :
+			&btfmslim->slim_ifd, &msg, dest, bytes);
+		mutex_unlock(&btfmslim->xfer_lock);
+		if (ret == 0)
+			break;
+		usleep_range(5000, 5100);
+	}
+
+	if (ret)
+		BTFMSLIM_ERR("failed (%d)", ret);
+
+	for (i = 0; i < bytes; i++)
+		BTFMSLIM_DBG("Read 0x%02x from reg 0x%x", ((uint8_t *)dest)[i],
+			reg + i);
+
+	return 0;
+}
+
+int btfm_slim_read_pgd(struct btfmslim *btfmslim,
+		uint16_t reg, int bytes, void *dest)
+{
+	return btfm_slim_read(btfmslim, reg, bytes, dest, PGD);
+}
+
+int btfm_slim_read_inf(struct btfmslim *btfmslim,
+		uint16_t reg, int bytes, void *dest)
+{
+	return btfm_slim_read(btfmslim, reg, bytes, dest, IFD);
+}
+
+int btfm_slim_enable_ch(struct btfmslim *btfmslim, struct btfmslim_ch *ch,
+	uint8_t rxport, uint32_t rates, uint8_t grp, uint8_t nchan)
+{
+	int ret, i;
+	struct slim_ch prop;
+	struct btfmslim_ch *chan = ch;
+	uint16_t ch_h[2];
+
+	if (!btfmslim || !ch)
+		return -EINVAL;
+
+	BTFMSLIM_DBG("port: %d ch: %d", ch->port, ch->ch);
+
+	/* Define the channel with below parameters */
+	prop.prot = SLIM_AUTO_ISO;
+	prop.baser = SLIM_RATE_4000HZ;
+	prop.dataf = (rates == 48000) ? SLIM_CH_DATAF_NOT_DEFINED
+			: SLIM_CH_DATAF_LPCM_AUDIO;
+	prop.auxf = SLIM_CH_AUXF_NOT_APPLICABLE;
+	prop.ratem = (rates/4000);
+	prop.sampleszbits = 16;
+
+	ch_h[0] = ch->ch_hdl;
+	ch_h[1] = (grp) ? (ch+1)->ch_hdl : 0;
+
+	ret = slim_define_ch(btfmslim->slim_pgd, &prop, ch_h, nchan, grp,
+			&ch->grph);
+	if (ret < 0) {
+		BTFMSLIM_ERR("slim_define_ch failed ret[%d]", ret);
+		goto error;
+	}
+
+	for (i = 0; i < nchan; i++, ch++) {
+		/* Enable port through registration setting */
+		if (btfmslim->vendor_port_en) {
+			ret = btfmslim->vendor_port_en(btfmslim, ch->port,
+					rxport, 1);
+			if (ret < 0) {
+				BTFMSLIM_ERR("vendor_port_en failed ret[%d]",
+						ret);
+				goto error;
+			}
+		}
+
+		if (rxport) {
+			BTFMSLIM_INFO("slim_connect_sink(port: %d, ch: %d)",
+							ch->port, ch->ch);
+			/* Connect Port with channel given by Machine driver*/
+			ret = slim_connect_sink(btfmslim->slim_pgd,
+				&ch->port_hdl, 1, ch->ch_hdl);
+			if (ret < 0) {
+				BTFMSLIM_ERR("slim_connect_sink failed ret[%d]",
+						ret);
+				goto remove_channel;
+			}
+		} else {
+			BTFMSLIM_INFO("slim_connect_src(port: %d, ch: %d)",
+				ch->port, ch->ch);
+			/* Connect Port with channel given by Machine driver*/
+			ret = slim_connect_src(btfmslim->slim_pgd, ch->port_hdl,
+				ch->ch_hdl);
+			if (ret < 0) {
+				BTFMSLIM_ERR("slim_connect_src failed ret[%d]",
+						ret);
+				goto remove_channel;
+			}
+		}
+	}
+
+	/* Activate the channel immediately */
+	BTFMSLIM_INFO(
+		"port: %d, ch: %d, grp: %d, ch->grph: 0x%x, ch_hdl: 0x%x",
+		chan->port, chan->ch, grp, chan->grph, chan->ch_hdl);
+
+	ret = slim_control_ch(btfmslim->slim_pgd, (grp ? chan->grph :
+		chan->ch_hdl), SLIM_CH_ACTIVATE, true);
+	if (ret < 0) {
+		BTFMSLIM_ERR("slim_control_ch failed ret[%d]", ret);
+		goto remove_channel;
+	}
+
+error:
+	return ret;
+
+remove_channel:
+	/* Remove the channel immediately*/
+	ret = slim_control_ch(btfmslim->slim_pgd, (grp ? ch->grph : ch->ch_hdl),
+			SLIM_CH_REMOVE, true);
+	if (ret < 0)
+		BTFMSLIM_ERR("slim_control_ch failed ret[%d]", ret);
+
+	return ret;
+}
+
+int btfm_slim_disable_ch(struct btfmslim *btfmslim, struct btfmslim_ch *ch,
+	uint8_t rxport, uint8_t grp, uint8_t nchan)
+{
+	int ret, i;
+
+	if (!btfmslim || !ch)
+		return -EINVAL;
+
+	BTFMSLIM_INFO("port:%d, grp: %d, ch->grph:0x%x, ch->ch_hdl:0x%x ",
+		ch->port, grp, ch->grph, ch->ch_hdl);
+
+	/* Remove the channel immediately*/
+	ret = slim_control_ch(btfmslim->slim_pgd, (grp ? ch->grph : ch->ch_hdl),
+			SLIM_CH_REMOVE, true);
+	if (ret < 0) {
+		BTFMSLIM_ERR("slim_control_ch failed ret[%d]", ret);
+		ret = slim_disconnect_ports(btfmslim->slim_pgd,
+			&ch->port_hdl, 1);
+		if (ret < 0) {
+			BTFMSLIM_ERR("slim_disconnect_ports failed ret[%d]",
+				ret);
+			goto error;
+		}
+	}
+	/* Disable port through registration setting */
+	for (i = 0; i < nchan; i++, ch++) {
+		if (btfmslim->vendor_port_en) {
+			ret = btfmslim->vendor_port_en(btfmslim, ch->port,
+				rxport, 0);
+			if (ret < 0) {
+				BTFMSLIM_ERR("vendor_port_en failed ret[%d]",
+					ret);
+				break;
+			}
+		}
+	}
+
+error:
+	return ret;
+}
+
+static int btfm_slim_get_logical_addr(struct slim_device *slim)
+{
+	int ret = 0;
+	const unsigned long timeout = jiffies +
+			      msecs_to_jiffies(SLIM_SLAVE_PRESENT_TIMEOUT);
+
+	do {
+		ret = slim_get_logical_addr(slim, slim->e_addr,
+			ARRAY_SIZE(slim->e_addr), &slim->laddr);
+		if (!ret)  {
+			BTFMSLIM_DBG("Assigned l-addr: 0x%x", slim->laddr);
+			break;
+		}
+		/* Give SLIMBUS time to report present and be ready. */
+		usleep_range(1000, 1100);
+		BTFMSLIM_DBG("retyring get logical addr");
+	} while (time_before(jiffies, timeout));
+
+	return ret;
+}
+
+static int btfm_slim_alloc_port(struct btfmslim *btfmslim)
+{
+	int ret = -EINVAL, i;
+	struct btfmslim_ch *rx_chs;
+	struct btfmslim_ch *tx_chs;
+
+	if (!btfmslim)
+		return ret;
+
+	rx_chs = btfmslim->rx_chs;
+	tx_chs = btfmslim->tx_chs;
+
+	if (!rx_chs || !tx_chs)
+		return ret;
+
+	BTFMSLIM_DBG("Rx: id\tname\tport\thdl\tch\tch_hdl");
+	for (i = 0 ; (rx_chs->port != BTFM_SLIM_PGD_PORT_LAST) &&
+		(i < BTFM_SLIM_NUM_CODEC_DAIS); i++, rx_chs++) {
+
+		/* Get Rx port handler from slimbus driver based
+		  * on port number
+		  */
+		ret = slim_get_slaveport(btfmslim->slim_pgd->laddr,
+			rx_chs->port, &rx_chs->port_hdl, SLIM_SINK);
+		if (ret < 0) {
+			BTFMSLIM_ERR("slave port failure port#%d - ret[%d]",
+				rx_chs->port, SLIM_SINK);
+			return ret;
+		}
+		BTFMSLIM_DBG("    %d\t%s\t%d\t%x\t%d\t%x", rx_chs->id,
+			rx_chs->name, rx_chs->port, rx_chs->port_hdl,
+			rx_chs->ch, rx_chs->ch_hdl);
+	}
+
+	BTFMSLIM_DBG("Tx: id\tname\tport\thdl\tch\tch_hdl");
+	for (i = 0; (tx_chs->port != BTFM_SLIM_PGD_PORT_LAST) &&
+		(i < BTFM_SLIM_NUM_CODEC_DAIS); i++, tx_chs++) {
+
+		/* Get Tx port handler from slimbus driver based
+		  * on port number
+		  */
+		ret = slim_get_slaveport(btfmslim->slim_pgd->laddr,
+			tx_chs->port, &tx_chs->port_hdl, SLIM_SRC);
+		if (ret < 0) {
+			BTFMSLIM_ERR("slave port failure port#%d - ret[%d]",
+				tx_chs->port, SLIM_SRC);
+			return ret;
+		}
+		BTFMSLIM_DBG("    %d\t%s\t%d\t%x\t%d\t%x", tx_chs->id,
+			tx_chs->name, tx_chs->port, tx_chs->port_hdl,
+			tx_chs->ch, tx_chs->ch_hdl);
+	}
+	return ret;
+}
+
+int btfm_slim_hw_init(struct btfmslim *btfmslim)
+{
+	int ret;
+
+	BTFMSLIM_DBG("");
+	if (!btfmslim)
+		return -EINVAL;
+
+	if (btfmslim->enabled) {
+		BTFMSLIM_DBG("Already enabled");
+		return 0;
+	}
+	mutex_lock(&btfmslim->io_lock);
+
+	/* Assign Logical Address for PGD (Ported Generic Device)
+	  * enumeration address
+	  */
+	ret = btfm_slim_get_logical_addr(btfmslim->slim_pgd);
+	if (ret) {
+		BTFMSLIM_ERR("failed to get slimbus %s logical address: %d",
+		       btfmslim->slim_pgd->name, ret);
+		goto error;
+	}
+
+	/* Assign Logical Address for Ported Generic Device
+	  * enumeration address
+	  */
+	ret = btfm_slim_get_logical_addr(&btfmslim->slim_ifd);
+	if (ret) {
+		BTFMSLIM_ERR("failed to get slimbus %s logical address: %d",
+		       btfmslim->slim_ifd.name, ret);
+		goto error;
+	}
+
+	/* Allocate ports with logical address to get port handler from
+	  * slimbus driver
+	  */
+	ret = btfm_slim_alloc_port(btfmslim);
+	if (ret)
+		goto error;
+
+	/* Start vendor specific initialization and get port information */
+	if (btfmslim->vendor_init)
+		ret = btfmslim->vendor_init(btfmslim);
+
+	/* Only when all registers read/write successfully, it set to
+	  * enabled status
+	  */
+	btfmslim->enabled = 1;
+error:
+	mutex_unlock(&btfmslim->io_lock);
+	return ret;
+}
+
+
+int btfm_slim_hw_deinit(struct btfmslim *btfmslim)
+{
+	int ret = 0;
+
+	if (!btfmslim)
+		return -EINVAL;
+
+	if (!btfmslim->enabled) {
+		BTFMSLIM_DBG("Already disabled");
+		return 0;
+	}
+	mutex_lock(&btfmslim->io_lock);
+	btfmslim->enabled = 0;
+	mutex_unlock(&btfmslim->io_lock);
+	return ret;
+}
+
+static int btfm_slim_get_dt_info(struct btfmslim *btfmslim)
+{
+	int ret = 0;
+	struct slim_device *slim = btfmslim->slim_pgd;
+	struct slim_device *slim_ifd = &btfmslim->slim_ifd;
+	struct property *prop;
+
+	if (!slim || !slim_ifd)
+		return -EINVAL;
+
+	if (slim->dev.of_node) {
+		BTFMSLIM_DBG("Platform data from device tree (%s)",
+			slim->name);
+		ret = of_property_read_string(slim->dev.of_node,
+			"qcom,btfm-slim-ifd", &slim_ifd->name);
+		if (ret) {
+			BTFMSLIM_ERR("Looking up %s property in node %s failed",
+				"qcom,btfm-slim-ifd",
+				 slim->dev.of_node->full_name);
+			return -ENODEV;
+		}
+		BTFMSLIM_DBG("qcom,btfm-slim-ifd (%s)", slim_ifd->name);
+
+		prop = of_find_property(slim->dev.of_node,
+				"qcom,btfm-slim-ifd-elemental-addr", NULL);
+		if (!prop) {
+			BTFMSLIM_ERR("Looking up %s property in node %s failed",
+				"qcom,btfm-slim-ifd-elemental-addr",
+				slim->dev.of_node->full_name);
+			return -ENODEV;
+		} else if (prop->length != 6) {
+			BTFMSLIM_ERR(
+				"invalid codec slim ifd addr. addr length= %d",
+				prop->length);
+			return -ENODEV;
+		}
+		memcpy(slim_ifd->e_addr, prop->value, 6);
+		BTFMSLIM_DBG(
+			"PGD Enum Addr: %.02x:%.02x:%.02x:%.02x:%.02x: %.02x",
+			slim->e_addr[0], slim->e_addr[1], slim->e_addr[2],
+			slim->e_addr[3], slim->e_addr[4], slim->e_addr[5]);
+		BTFMSLIM_DBG(
+			"IFD Enum Addr: %.02x:%.02x:%.02x:%.02x:%.02x: %.02x",
+			slim_ifd->e_addr[0], slim_ifd->e_addr[1],
+			slim_ifd->e_addr[2], slim_ifd->e_addr[3],
+			slim_ifd->e_addr[4], slim_ifd->e_addr[5]);
+	} else {
+		BTFMSLIM_ERR("Platform data is not valid");
+	}
+
+	return ret;
+}
+
+static int btfm_slim_probe(struct slim_device *slim)
+{
+	int ret = 0;
+	struct btfmslim *btfm_slim;
+
+	BTFMSLIM_DBG("");
+	if (!slim->ctrl)
+		return -EINVAL;
+
+	/* Allocation btfmslim data pointer */
+	btfm_slim = kzalloc(sizeof(struct btfmslim), GFP_KERNEL);
+	if (btfm_slim == NULL) {
+		BTFMSLIM_ERR("error, allocation failed");
+		return -ENOMEM;
+	}
+	/* BTFM Slimbus driver control data configuration */
+	btfm_slim->slim_pgd = slim;
+
+	/* Assign vendor specific function */
+	btfm_slim->rx_chs = SLIM_SLAVE_RXPORT;
+	btfm_slim->tx_chs = SLIM_SLAVE_TXPORT;
+	btfm_slim->vendor_init = SLIM_SLAVE_INIT;
+	btfm_slim->vendor_port_en = SLIM_SLAVE_PORT_EN;
+
+	/* Created Mutex for slimbus data transfer */
+	mutex_init(&btfm_slim->io_lock);
+	mutex_init(&btfm_slim->xfer_lock);
+
+	/* Get Device tree node for Interface Device enumeration address */
+	ret = btfm_slim_get_dt_info(btfm_slim);
+	if (ret)
+		goto dealloc;
+
+	/* Add Interface Device for slimbus driver */
+	ret = slim_add_device(btfm_slim->slim_pgd->ctrl, &btfm_slim->slim_ifd);
+	if (ret) {
+		BTFMSLIM_ERR("error, adding SLIMBUS device failed");
+		goto dealloc;
+	}
+
+	/* Platform driver data allocation */
+	slim->dev.platform_data = btfm_slim;
+
+	/* Driver specific data allocation */
+	btfm_slim->dev = &slim->dev;
+	ret = btfm_slim_register_codec(&slim->dev);
+	ret = bt_register_slimdev(&slim->dev);
+	return ret;
+
+dealloc:
+	mutex_destroy(&btfm_slim->io_lock);
+	mutex_destroy(&btfm_slim->xfer_lock);
+	kfree(btfm_slim);
+	return ret;
+}
+static int btfm_slim_remove(struct slim_device *slim)
+{
+	struct btfmslim *btfm_slim = slim->dev.platform_data;
+
+	BTFMSLIM_DBG("");
+	mutex_destroy(&btfm_slim->io_lock);
+	mutex_destroy(&btfm_slim->xfer_lock);
+	snd_soc_unregister_codec(&slim->dev);
+
+	BTFMSLIM_DBG("slim_remove_device() - btfm_slim->slim_ifd");
+	slim_remove_device(&btfm_slim->slim_ifd);
+
+	BTFMSLIM_DBG("slim_remove_device() - btfm_slim->slim_pgd");
+	slim_remove_device(slim);
+
+	kfree(btfm_slim);
+	return 0;
+}
+
+static const struct slim_device_id btfm_slim_id[] = {
+	{SLIM_SLAVE_COMPATIBLE_STR, 0},
+	{}
+};
+
+static struct slim_driver btfm_slim_driver = {
+	.driver = {
+		.name = "btfmslim-driver",
+		.owner = THIS_MODULE,
+	},
+	.probe = btfm_slim_probe,
+	.remove = btfm_slim_remove,
+	.id_table = btfm_slim_id
+};
+
+static int __init btfm_slim_init(void)
+{
+	int ret;
+
+	BTFMSLIM_DBG("");
+	ret = slim_driver_register(&btfm_slim_driver);
+	if (ret)
+		BTFMSLIM_ERR("Failed to register slimbus driver: %d", ret);
+	return ret;
+}
+
+static void __exit btfm_slim_exit(void)
+{
+	BTFMSLIM_DBG("");
+	slim_driver_unregister(&btfm_slim_driver);
+}
+
+module_init(btfm_slim_init);
+module_exit(btfm_slim_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("BTFM Slimbus Slave driver");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/bluetooth/btfm_slim_codec.c	2019-01-22 16:16:22.931241227 +0100
@@ -0,0 +1,434 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/debugfs.h>
+#include <linux/slimbus/slimbus.h>
+#include <linux/ratelimit.h>
+#include <linux/slab.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+#include "btfm_slim.h"
+
+static int bt_soc_enable_status;
+
+
+static int btfm_slim_codec_write(struct snd_soc_codec *codec, unsigned int reg,
+	unsigned int value)
+{
+	return 0;
+}
+
+static unsigned int btfm_slim_codec_read(struct snd_soc_codec *codec,
+				unsigned int reg)
+{
+	return 0;
+}
+
+static int bt_soc_status_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = bt_soc_enable_status;
+	return 1;
+}
+
+static int bt_soc_status_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	return 1;
+}
+
+static const struct snd_kcontrol_new status_controls[] = {
+	SOC_SINGLE_EXT("BT SOC status", 0, 0, 1, 0,
+			bt_soc_status_get,
+			bt_soc_status_put)
+
+};
+
+
+static int btfm_slim_codec_probe(struct snd_soc_codec *codec)
+{
+	snd_soc_add_codec_controls(codec, status_controls,
+				   ARRAY_SIZE(status_controls));
+	return 0;
+}
+
+static int btfm_slim_codec_remove(struct snd_soc_codec *codec)
+{
+	return 0;
+}
+
+static int btfm_slim_dai_startup(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	int ret;
+	struct btfmslim *btfmslim = dai->dev->platform_data;
+
+	BTFMSLIM_DBG("substream = %s  stream = %d dai->name = %s",
+		 substream->name, substream->stream, dai->name);
+	ret = btfm_slim_hw_init(btfmslim);
+	return ret;
+}
+
+static void btfm_slim_dai_shutdown(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	int i;
+	struct btfmslim *btfmslim = dai->dev->platform_data;
+	struct btfmslim_ch *ch;
+	uint8_t rxport, grp = false, nchan = 1;
+
+	BTFMSLIM_DBG("dai->name: %s, dai->id: %d, dai->rate: %d", dai->name,
+		dai->id, dai->rate);
+
+	switch (dai->id) {
+	case BTFM_FM_SLIM_TX:
+		grp = true; nchan = 2;
+		ch = btfmslim->tx_chs;
+		rxport = 0;
+		break;
+	case BTFM_BT_SCO_SLIM_TX:
+		ch = btfmslim->tx_chs;
+		rxport = 0;
+		break;
+	case BTFM_BT_SCO_A2DP_SLIM_RX:
+	case BTFM_BT_SPLIT_A2DP_SLIM_RX:
+		ch = btfmslim->rx_chs;
+		rxport = 1;
+		break;
+	case BTFM_SLIM_NUM_CODEC_DAIS:
+	default:
+		BTFMSLIM_ERR("dai->id is invalid:%d", dai->id);
+		return;
+	}
+
+	/* Search for dai->id matched port handler */
+	for (i = 0; (i < BTFM_SLIM_NUM_CODEC_DAIS) &&
+		(ch->id != BTFM_SLIM_NUM_CODEC_DAIS) &&
+		(ch->id != dai->id); ch++, i++)
+		;
+
+	if ((ch->port == BTFM_SLIM_PGD_PORT_LAST) ||
+		(ch->id == BTFM_SLIM_NUM_CODEC_DAIS)) {
+		BTFMSLIM_ERR("ch is invalid!!");
+		return;
+	}
+
+	btfm_slim_disable_ch(btfmslim, ch, rxport, grp, nchan);
+	btfm_slim_hw_deinit(btfmslim);
+}
+
+static int btfm_slim_dai_hw_params(struct snd_pcm_substream *substream,
+			    struct snd_pcm_hw_params *params,
+			    struct snd_soc_dai *dai)
+{
+	BTFMSLIM_DBG("dai->name = %s DAI-ID %x rate %d num_ch %d",
+		dai->name, dai->id, params_rate(params),
+		params_channels(params));
+
+	return 0;
+}
+
+int btfm_slim_dai_prepare(struct snd_pcm_substream *substream,
+	struct snd_soc_dai *dai)
+{
+	int i, ret = -EINVAL;
+	struct btfmslim *btfmslim = dai->dev->platform_data;
+	struct btfmslim_ch *ch;
+	uint8_t rxport, grp = false, nchan = 1;
+	bt_soc_enable_status = 0;
+
+	BTFMSLIM_DBG("dai->name: %s, dai->id: %d, dai->rate: %d", dai->name,
+		dai->id, dai->rate);
+
+	/* save sample rate */
+	btfmslim->sample_rate = dai->rate;
+
+	switch (dai->id) {
+	case BTFM_FM_SLIM_TX:
+		grp = true; nchan = 2;
+		ch = btfmslim->tx_chs;
+		rxport = 0;
+		break;
+	case BTFM_BT_SCO_SLIM_TX:
+		ch = btfmslim->tx_chs;
+		rxport = 0;
+		break;
+	case BTFM_BT_SCO_A2DP_SLIM_RX:
+	case BTFM_BT_SPLIT_A2DP_SLIM_RX:
+		ch = btfmslim->rx_chs;
+		rxport = 1;
+		break;
+	case BTFM_SLIM_NUM_CODEC_DAIS:
+	default:
+		BTFMSLIM_ERR("dai->id is invalid:%d", dai->id);
+		return ret;
+	}
+
+	/* Search for dai->id matched port handler */
+	for (i = 0; (i < BTFM_SLIM_NUM_CODEC_DAIS) &&
+		(ch->id != BTFM_SLIM_NUM_CODEC_DAIS) &&
+		(ch->id != dai->id); ch++, i++)
+		;
+
+	if ((ch->port == BTFM_SLIM_PGD_PORT_LAST) ||
+		(ch->id == BTFM_SLIM_NUM_CODEC_DAIS)) {
+		BTFMSLIM_ERR("ch is invalid!!");
+		return ret;
+	}
+
+	ret = btfm_slim_enable_ch(btfmslim, ch, rxport, dai->rate, grp, nchan);
+
+	/* save the enable channel status */
+	if (ret == 0)
+		bt_soc_enable_status = 1;
+	return ret;
+}
+
+/* This function will be called once during boot up */
+static int btfm_slim_dai_set_channel_map(struct snd_soc_dai *dai,
+				unsigned int tx_num, unsigned int *tx_slot,
+				unsigned int rx_num, unsigned int *rx_slot)
+{
+	int ret = -EINVAL, i;
+	struct btfmslim *btfmslim = dai->dev->platform_data;
+	struct btfmslim_ch *rx_chs;
+	struct btfmslim_ch *tx_chs;
+
+	BTFMSLIM_DBG("");
+
+	if (!btfmslim)
+		return ret;
+
+	rx_chs = btfmslim->rx_chs;
+	tx_chs = btfmslim->tx_chs;
+
+	if (!rx_chs || !tx_chs)
+		return ret;
+
+	BTFMSLIM_DBG("Rx: id\tname\tport\thdl\tch\tch_hdl");
+	for (i = 0; (rx_chs->port != BTFM_SLIM_PGD_PORT_LAST) && (i < rx_num);
+		i++, rx_chs++) {
+		/* Set Rx Channel number from machine driver and
+		 * get channel handler from slimbus driver
+		 */
+		rx_chs->ch = *(uint8_t *)(rx_slot + i);
+		ret = slim_query_ch(btfmslim->slim_pgd, rx_chs->ch,
+			&rx_chs->ch_hdl);
+		if (ret < 0) {
+			BTFMSLIM_ERR("slim_query_ch failure ch#%d - ret[%d]",
+				rx_chs->ch, ret);
+			goto error;
+		}
+		BTFMSLIM_DBG("    %d\t%s\t%d\t%x\t%d\t%x", rx_chs->id,
+			rx_chs->name, rx_chs->port, rx_chs->port_hdl,
+			rx_chs->ch, rx_chs->ch_hdl);
+	}
+
+	BTFMSLIM_DBG("Tx: id\tname\tport\thdl\tch\tch_hdl");
+	for (i = 0; (tx_chs->port != BTFM_SLIM_PGD_PORT_LAST) && (i < tx_num);
+		i++, tx_chs++) {
+		/* Set Tx Channel number from machine driver and
+		 * get channel handler from slimbus driver
+		 */
+		tx_chs->ch = *(uint8_t *)(tx_slot + i);
+		ret = slim_query_ch(btfmslim->slim_pgd, tx_chs->ch,
+			&tx_chs->ch_hdl);
+		if (ret < 0) {
+			BTFMSLIM_ERR("slim_query_ch failure ch#%d - ret[%d]",
+				tx_chs->ch, ret);
+			goto error;
+		}
+		BTFMSLIM_DBG("    %d\t%s\t%d\t%x\t%d\t%x", tx_chs->id,
+			tx_chs->name, tx_chs->port, tx_chs->port_hdl,
+			tx_chs->ch, tx_chs->ch_hdl);
+	}
+
+error:
+	return ret;
+}
+
+static int btfm_slim_dai_get_channel_map(struct snd_soc_dai *dai,
+				 unsigned int *tx_num, unsigned int *tx_slot,
+				 unsigned int *rx_num, unsigned int *rx_slot)
+{
+	int i, ret = -EINVAL, *slot = NULL, j = 0, num = 1;
+	struct btfmslim *btfmslim = dai->dev->platform_data;
+	struct btfmslim_ch *ch = NULL;
+
+	if (!btfmslim)
+		return ret;
+
+	switch (dai->id) {
+	case BTFM_FM_SLIM_TX:
+		num = 2;
+	case BTFM_BT_SCO_SLIM_TX:
+		if (!tx_slot || !tx_num) {
+			BTFMSLIM_ERR("Invalid tx_slot %p or tx_num %p",
+				tx_slot, tx_num);
+			return -EINVAL;
+		}
+		ch = btfmslim->tx_chs;
+		if (!ch)
+			return -EINVAL;
+		slot = tx_slot;
+		*rx_slot = 0;
+		*tx_num = num;
+		*rx_num = 0;
+		break;
+	case BTFM_BT_SCO_A2DP_SLIM_RX:
+	case BTFM_BT_SPLIT_A2DP_SLIM_RX:
+		if (!rx_slot || !rx_num) {
+			BTFMSLIM_ERR("Invalid rx_slot %p or rx_num %p",
+				 rx_slot, rx_num);
+			return -EINVAL;
+		}
+		ch = btfmslim->rx_chs;
+		if (!ch)
+			return -EINVAL;
+		slot = rx_slot;
+		*tx_slot = 0;
+		*tx_num = 0;
+		*rx_num = num;
+		break;
+	default:
+		BTFMSLIM_ERR("Unsupported DAI %d", dai->id);
+		return -EINVAL;
+	}
+
+	do {
+		for (i = 0; (i < BTFM_SLIM_NUM_CODEC_DAIS) && (ch->id !=
+			BTFM_SLIM_NUM_CODEC_DAIS) && (ch->id != dai->id);
+			ch++, i++)
+			;
+
+		if (ch->id == BTFM_SLIM_NUM_CODEC_DAIS ||
+			i == BTFM_SLIM_NUM_CODEC_DAIS) {
+			BTFMSLIM_ERR(
+				"No channel has been allocated for dai (%d)",
+				dai->id);
+			return -EINVAL;
+		}
+
+		*(slot + j) = ch->ch;
+		BTFMSLIM_DBG("id:%d, port:%d, ch:%d, slot: %d", ch->id,
+			ch->port, ch->ch, *(slot + j));
+
+		/* In case it has mulitiple channels */
+		if (++j < num)
+			ch++;
+	} while (j < num);
+
+	return 0;
+}
+
+static struct snd_soc_dai_ops btfmslim_dai_ops = {
+	.startup = btfm_slim_dai_startup,
+	.shutdown = btfm_slim_dai_shutdown,
+	.hw_params = btfm_slim_dai_hw_params,
+	.prepare = btfm_slim_dai_prepare,
+	.set_channel_map = btfm_slim_dai_set_channel_map,
+	.get_channel_map = btfm_slim_dai_get_channel_map,
+};
+
+static struct snd_soc_dai_driver btfmslim_dai[] = {
+	{	/* FM Audio data multiple channel  : FM -> qdsp */
+		.name = "btfm_fm_slim_tx",
+		.id = BTFM_FM_SLIM_TX,
+		.capture = {
+			.stream_name = "FM TX Capture",
+			.rates = SNDRV_PCM_RATE_48000, /* 48 KHz */
+			.formats = SNDRV_PCM_FMTBIT_S16_LE, /* 16 bits */
+			.rate_max = 48000,
+			.rate_min = 48000,
+			.channels_min = 1,
+			.channels_max = 2,
+		},
+		.ops = &btfmslim_dai_ops,
+	},
+	{	/* Bluetooth SCO voice uplink: bt -> modem */
+		.name = "btfm_bt_sco_slim_tx",
+		.id = BTFM_BT_SCO_SLIM_TX,
+		.capture = {
+			.stream_name = "SCO TX Capture",
+			/* 8 KHz or 16 KHz */
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE, /* 16 bits */
+			.rate_max = 16000,
+			.rate_min = 8000,
+			.channels_min = 1,
+			.channels_max = 1,
+		},
+		.ops = &btfmslim_dai_ops,
+	},
+	{	/* Bluetooth SCO voice downlink: modem -> bt or A2DP Playback */
+		.name = "btfm_bt_sco_a2dp_slim_rx",
+		.id = BTFM_BT_SCO_A2DP_SLIM_RX,
+		.playback = {
+			.stream_name = "SCO A2DP RX Playback",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000
+				| SNDRV_PCM_RATE_48000, /* 8 or 16 or 48 Khz*/
+			.formats = SNDRV_PCM_FMTBIT_S16_LE, /* 16 bits */
+			.rate_max = 48000,
+			.rate_min = 8000,
+			.channels_min = 1,
+			.channels_max = 1,
+		},
+		.ops = &btfmslim_dai_ops,
+	},
+	{	/* Bluetooth Split A2DP data: qdsp -> bt */
+		.name = "btfm_bt_split_a2dp_slim_rx",
+		.id = BTFM_BT_SPLIT_A2DP_SLIM_RX,
+		.playback = {
+			.stream_name = "SPLIT A2DP Playback",
+			.rates = SNDRV_PCM_RATE_48000, /* 48 KHz */
+			.formats = SNDRV_PCM_FMTBIT_S16_LE, /* 16 bits */
+			.rate_max = 48000,
+			.rate_min = 48000,
+			.channels_min = 1,
+			.channels_max = 1,
+		},
+		.ops = &btfmslim_dai_ops,
+	},
+};
+
+static struct snd_soc_codec_driver btfmslim_codec = {
+	.probe	= btfm_slim_codec_probe,
+	.remove	= btfm_slim_codec_remove,
+	.read	= btfm_slim_codec_read,
+	.write	= btfm_slim_codec_write,
+};
+
+int btfm_slim_register_codec(struct device *dev)
+{
+	int ret = 0;
+
+	BTFMSLIM_DBG("");
+	/* Register Codec driver */
+	ret = snd_soc_register_codec(dev, &btfmslim_codec,
+		btfmslim_dai, ARRAY_SIZE(btfmslim_dai));
+
+	if (ret)
+		BTFMSLIM_ERR("failed to register codec (%d)", ret);
+
+	return ret;
+}
+
+MODULE_DESCRIPTION("BTFM Slimbus Codec driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/bluetooth/btfm_slim.h	2019-01-22 16:16:22.931241227 +0100
@@ -0,0 +1,165 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef BTFM_SLIM_H
+#define BTFM_SLIM_H
+#include <linux/slimbus/slimbus.h>
+
+#define BTFMSLIM_DBG(fmt, arg...)  pr_debug("%s: " fmt "\n", __func__, ## arg)
+#define BTFMSLIM_INFO(fmt, arg...) pr_info("%s: " fmt "\n", __func__, ## arg)
+#define BTFMSLIM_ERR(fmt, arg...)  pr_err("%s: " fmt "\n", __func__, ## arg)
+
+/* Vendor specific defines
+ * This should redefines in slimbus slave specific header
+*/
+#define SLIM_SLAVE_COMPATIBLE_STR	"btfmslim_slave"
+#define SLIM_SLAVE_REG_OFFSET		0x0000
+#define SLIM_SLAVE_RXPORT		NULL
+#define SLIM_SLAVE_TXPORT		NULL
+#define SLIM_SLAVE_INIT			NULL
+#define SLIM_SLAVE_PORT_EN		NULL
+
+/* Misc defines */
+#define SLIM_SLAVE_RW_MAX_TRIES		3
+#define SLIM_SLAVE_PRESENT_TIMEOUT	100
+
+#define PGD	1
+#define IFD	0
+
+
+/* Codec driver defines */
+enum {
+	BTFM_FM_SLIM_TX = 0,
+	BTFM_BT_SCO_SLIM_TX,
+	BTFM_BT_SCO_A2DP_SLIM_RX,
+	BTFM_BT_SPLIT_A2DP_SLIM_RX,
+	BTFM_SLIM_NUM_CODEC_DAIS
+};
+
+/* Slimbus Port defines - This should be redefined in specific device file */
+#define BTFM_SLIM_PGD_PORT_LAST				0xFF
+
+struct btfmslim_ch {
+	int id;
+	char *name;
+	uint32_t port_hdl;	/* slimbus port handler */
+	uint16_t port;		/* slimbus port number */
+
+	uint8_t ch;		/* slimbus channel number */
+	uint16_t ch_hdl;	/* slimbus channel handler */
+	uint16_t grph;	/* slimbus group channel handler */
+};
+
+struct btfmslim {
+	struct device *dev;
+	struct slim_device *slim_pgd;
+	struct slim_device slim_ifd;
+	struct mutex io_lock;
+	struct mutex xfer_lock;
+	uint8_t enabled;
+
+	uint32_t num_rx_port;
+	uint32_t num_tx_port;
+	uint32_t sample_rate;
+
+	struct btfmslim_ch *rx_chs;
+	struct btfmslim_ch *tx_chs;
+
+	int (*vendor_init)(struct btfmslim *btfmslim);
+	int (*vendor_port_en)(struct btfmslim *btfmslim, uint8_t port_num,
+		uint8_t rxport, uint8_t enable);
+};
+
+/**
+ * btfm_slim_hw_init: Initialize slimbus slave device
+ * Returns:
+ * 0: Success
+ * else: Fail
+ */
+int btfm_slim_hw_init(struct btfmslim *btfmslim);
+
+/**
+ * btfm_slim_hw_deinit: Deinitialize slimbus slave device
+ * Returns:
+ * 0: Success
+ * else: Fail
+ */
+int btfm_slim_hw_deinit(struct btfmslim *btfmslim);
+
+/**
+ * btfm_slim_write: write value to pgd or ifd device
+ * @btfmslim: slimbus slave device data pointer.
+ * @reg: slimbus slave register address
+ * @bytes: length of data
+ * @src: data pointer to write
+ * @pgd: selection for device: either PGD or IFD
+ * Returns:
+ * -EINVAL
+ * -ETIMEDOUT
+ * -ENOMEM
+ */
+int btfm_slim_write(struct btfmslim *btfmslim,
+	uint16_t reg, int bytes, void *src, uint8_t pgd);
+
+
+
+/**
+ * btfm_slim_read: read value from pgd or ifd device
+ * @btfmslim: slimbus slave device data pointer.
+ * @reg: slimbus slave register address
+ * @bytes: length of data
+ * @dest: data pointer to read
+ * @pgd: selection for device: either PGD or IFD
+ * Returns:
+ * -EINVAL
+ * -ETIMEDOUT
+ * -ENOMEM
+ */
+int btfm_slim_read(struct btfmslim *btfmslim,
+	uint16_t reg, int bytes, void *dest, uint8_t pgd);
+
+
+/**
+ * btfm_slim_enable_ch: enable channel for slimbus slave port
+ * @btfmslim: slimbus slave device data pointer.
+ * @ch: slimbus slave channel pointer
+ * @rxport: rxport or txport
+ * Returns:
+ * -EINVAL
+ * -ETIMEDOUT
+ * -ENOMEM
+ */
+int btfm_slim_enable_ch(struct btfmslim *btfmslim,
+	struct btfmslim_ch *ch, uint8_t rxport, uint32_t rates,
+	uint8_t grp, uint8_t nchan);
+
+/**
+ * btfm_slim_disable_ch: disable channel for slimbus slave port
+ * @btfmslim: slimbus slave device data pointer.
+ * @ch: slimbus slave channel pointer
+ * @rxport: rxport or txport
+ * Returns:
+ * -EINVAL
+ * -ETIMEDOUT
+ * -ENOMEM
+ */
+int btfm_slim_disable_ch(struct btfmslim *btfmslim,
+	struct btfmslim_ch *ch, uint8_t rxport, uint8_t grp, uint8_t nchan);
+
+/**
+ * btfm_slim_register_codec: Register codec driver in slimbus device node
+ * @dev: device node
+ * Returns:
+ * -ENOMEM
+ * 0
+*/
+int btfm_slim_register_codec(struct device *dev);
+#endif /* BTFM_SLIM_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/bluetooth/btfm_slim_wcn3990.c	2019-01-22 16:16:22.931241227 +0100
@@ -0,0 +1,176 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/slimbus/slimbus.h>
+#include "btfm_slim.h"
+#include "btfm_slim_wcn3990.h"
+
+/* WCN3990 Port assignment */
+struct btfmslim_ch wcn3990_rxport[] = {
+	{.id = BTFM_BT_SCO_A2DP_SLIM_RX, .name = "SCO_A2P_Rx",
+	.port = CHRK_SB_PGD_PORT_RX_SCO},
+	{.id = BTFM_BT_SPLIT_A2DP_SLIM_RX, .name = "A2P_Rx",
+	.port = CHRK_SB_PGD_PORT_RX_A2P},
+	{.id = BTFM_SLIM_NUM_CODEC_DAIS, .name = "",
+	.port = BTFM_SLIM_PGD_PORT_LAST},
+};
+
+struct btfmslim_ch wcn3990_txport[] = {
+	{.id = BTFM_FM_SLIM_TX, .name = "FM_Tx1",
+	.port = CHRK_SB_PGD_PORT_TX1_FM},
+	{.id = BTFM_FM_SLIM_TX, .name = "FM_Tx2",
+	.port = CHRK_SB_PGD_PORT_TX2_FM},
+	{.id = BTFM_BT_SCO_SLIM_TX, .name = "SCO_Tx",
+	.port = CHRK_SB_PGD_PORT_TX_SCO},
+	{.id = BTFM_SLIM_NUM_CODEC_DAIS, .name = "",
+	.port = BTFM_SLIM_PGD_PORT_LAST},
+};
+
+/* Function description */
+int btfm_slim_chrk_hw_init(struct btfmslim *btfmslim)
+{
+	int ret = 0;
+	uint8_t reg_val;
+	uint16_t reg;
+
+	BTFMSLIM_DBG("");
+
+	if (!btfmslim)
+		return -EINVAL;
+
+	/* Get SB_SLAVE_HW_REV_MSB value*/
+	reg = CHRK_SB_SLAVE_HW_REV_MSB;
+	ret = btfm_slim_read(btfmslim, reg,  1, &reg_val, IFD);
+	if (ret) {
+		BTFMSLIM_ERR("failed to read (%d) reg 0x%x", ret, reg);
+		goto error;
+	}
+	BTFMSLIM_DBG("Major Rev: 0x%x, Minor Rev: 0x%x",
+		(reg_val & 0xF0) >> 4, (reg_val & 0x0F));
+
+	/* Get SB_SLAVE_HW_REV_LSB value*/
+	reg = CHRK_SB_SLAVE_HW_REV_LSB;
+	ret = btfm_slim_read(btfmslim, reg,  1, &reg_val, IFD);
+	if (ret) {
+		BTFMSLIM_ERR("failed to read (%d) reg 0x%x", ret, reg);
+		goto error;
+	}
+	BTFMSLIM_DBG("Step Rev: 0x%x", reg_val);
+
+error:
+	return ret;
+}
+
+static inline int is_fm_port(uint8_t port_num)
+{
+	if (port_num == CHRK_SB_PGD_PORT_TX1_FM ||
+		port_num == CHRK_SB_PGD_PORT_TX2_FM)
+		return 1;
+	else
+		return 0;
+}
+
+int btfm_slim_chrk_enable_port(struct btfmslim *btfmslim, uint8_t port_num,
+	uint8_t rxport, uint8_t enable)
+{
+	int ret = 0;
+	uint8_t reg_val = 0, en;
+	uint8_t port_bit = 0;
+	uint16_t reg;
+
+	BTFMSLIM_DBG("port(%d) enable(%d)", port_num, enable);
+
+	if (rxport) {
+		if (enable) {
+			/* For SCO Rx, A2DP Rx */
+			reg_val = 0x1;
+			port_bit = port_num - 0x10;
+			reg = CHRK_SB_PGD_RX_PORTn_MULTI_CHNL_0(port_bit);
+			BTFMSLIM_DBG("writing reg_val (%d) to reg(%x)",
+					reg_val, reg);
+			ret = btfm_slim_write(btfmslim, reg, 1, &reg_val, IFD);
+			if (ret) {
+				BTFMSLIM_ERR("failed to write (%d) reg 0x%x",
+						ret, reg);
+				goto error;
+			}
+		}
+		/* Port enable */
+		reg = CHRK_SB_PGD_PORT_RX_CFGN(port_num - 0x10);
+		goto enable_disable_rxport;
+	}
+	if (!enable)
+		goto enable_disable_txport;
+
+	/* txport */
+	/* Multiple Channel Setting */
+	if (is_fm_port(port_num)) {
+		reg_val = (0x1 << CHRK_SB_PGD_PORT_TX1_FM) |
+				(0x1 << CHRK_SB_PGD_PORT_TX2_FM);
+		reg = CHRK_SB_PGD_TX_PORTn_MULTI_CHNL_0(port_num);
+		ret = btfm_slim_write(btfmslim, reg, 1, &reg_val, IFD);
+		if (ret) {
+			BTFMSLIM_ERR("failed to write (%d) reg 0x%x",
+					ret, reg);
+			goto error;
+		}
+	} else if (port_num == CHRK_SB_PGD_PORT_TX_SCO) {
+		/* SCO Tx */
+		reg_val = 0x1 << CHRK_SB_PGD_PORT_TX_SCO;
+		reg = CHRK_SB_PGD_TX_PORTn_MULTI_CHNL_0(port_num);
+		BTFMSLIM_DBG("writing reg_val (%d) to reg(%x)",
+				reg_val, reg);
+		ret = btfm_slim_write(btfmslim, reg, 1, &reg_val, IFD);
+		if (ret) {
+			BTFMSLIM_ERR("failed to write (%d) reg 0x%x",
+					ret, reg);
+			goto error;
+		}
+	}
+
+	/* Enable Tx port hw auto recovery for underrun or overrun error */
+	reg_val = (CHRK_ENABLE_OVERRUN_AUTO_RECOVERY |
+				CHRK_ENABLE_UNDERRUN_AUTO_RECOVERY);
+	reg = CHRK_SB_PGD_PORT_TX_OR_UR_CFGN(port_num);
+	ret = btfm_slim_write(btfmslim, reg, 1, &reg_val, IFD);
+	if (ret) {
+		BTFMSLIM_ERR("failed to write (%d) reg 0x%x", ret, reg);
+		goto error;
+	}
+
+enable_disable_txport:
+	/* Port enable */
+	reg = CHRK_SB_PGD_PORT_TX_CFGN(port_num);
+
+enable_disable_rxport:
+	if (enable)
+		en = CHRK_SB_PGD_PORT_ENABLE;
+	else
+		en = CHRK_SB_PGD_PORT_DISABLE;
+
+	if (is_fm_port(port_num))
+		reg_val = en | CHRK_SB_PGD_PORT_WM_L8;
+	else if (port_num == CHRK_SB_PGD_PORT_TX_SCO)
+		reg_val = enable ? en | CHRK_SB_PGD_PORT_WM_L1 : en;
+	else
+		reg_val = enable ? en | CHRK_SB_PGD_PORT_WM_LB : en;
+
+	if (enable && port_num == CHRK_SB_PGD_PORT_TX_SCO)
+		BTFMSLIM_INFO("programming SCO Tx with reg_val %d to reg 0x%x",
+				reg_val, reg);
+
+	ret = btfm_slim_write(btfmslim, reg, 1, &reg_val, IFD);
+	if (ret)
+		BTFMSLIM_ERR("failed to write (%d) reg 0x%x", ret, reg);
+
+error:
+	return ret;
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/bluetooth/btfm_slim_wcn3990.h	2019-01-22 16:16:22.931241227 +0100
@@ -0,0 +1,141 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef BTFM_SLIM_WCN3990_H
+#define BTFM_SLIM_WCN3990_H
+#ifdef CONFIG_BTFM_SLIM_WCN3990
+#include "btfm_slim.h"
+
+/* Registers Address */
+#define CHRK_SB_COMP_TEST			0x00000000
+#define CHRK_SB_SLAVE_HW_REV_MSB		0x00000001
+#define CHRK_SB_SLAVE_HW_REV_LSB		0x00000002
+#define CHRK_SB_DEBUG_FEATURES			0x00000005
+#define CHRK_SB_INTF_INT_EN			0x00000010
+#define CHRK_SB_INTF_INT_STATUS			0x00000011
+#define CHRK_SB_INTF_INT_CLR			0x00000012
+#define CHRK_SB_FRM_CFG				0x00000013
+#define CHRK_SB_FRM_STATUS			0x00000014
+#define CHRK_SB_FRM_INT_EN			0x00000015
+#define CHRK_SB_FRM_INT_STATUS			0x00000016
+#define CHRK_SB_FRM_INT_CLR			0x00000017
+#define CHRK_SB_FRM_WAKEUP			0x00000018
+#define CHRK_SB_FRM_CLKCTL_DONE			0x00000019
+#define CHRK_SB_FRM_IE_STATUS			0x0000001A
+#define CHRK_SB_FRM_VE_STATUS			0x0000001B
+#define CHRK_SB_PGD_TX_CFG_STATUS		0x00000020
+#define CHRK_SB_PGD_RX_CFG_STATUS		0x00000021
+#define CHRK_SB_PGD_DEV_INT_EN			0x00000022
+#define CHRK_SB_PGD_DEV_INT_STATUS		0x00000023
+#define CHRK_SB_PGD_DEV_INT_CLR			0x00000024
+#define CHRK_SB_PGD_PORT_INT_EN_RX_0		0x00000030
+#define CHRK_SB_PGD_PORT_INT_EN_RX_1		0x00000031
+#define CHRK_SB_PGD_PORT_INT_EN_TX_0		0x00000032
+#define CHRK_SB_PGD_PORT_INT_EN_TX_1		0x00000033
+#define CHRK_SB_PGD_PORT_INT_STATUS_RX_0	0x00000034
+#define CHRK_SB_PGD_PORT_INT_STATUS_RX_1	0x00000035
+#define CHRK_SB_PGD_PORT_INT_STATUS_TX_0	0x00000036
+#define CHRK_SB_PGD_PORT_INT_STATUS_TX_1	0x00000037
+#define CHRK_SB_PGD_PORT_INT_CLR_RX_0		0x00000038
+#define CHRK_SB_PGD_PORT_INT_CLR_RX_1		0x00000039
+#define CHRK_SB_PGD_PORT_INT_CLR_TX_0		0x0000003A
+#define CHRK_SB_PGD_PORT_INT_CLR_TX_1		0x0000003B
+#define CHRK_SB_PGD_PORT_RX_CFGN(n)		(0x00000040 + n)
+#define CHRK_SB_PGD_PORT_TX_CFGN(n)		(0x00000050 + n)
+#define CHRK_SB_PGD_PORT_INT_RX_SOURCEN(n)	(0x00000060 + n)
+#define CHRK_SB_PGD_PORT_INT_TX_SOURCEN(n)	(0x00000070 + n)
+#define CHRK_SB_PGD_PORT_RX_STATUSN(n)		(0x00000080 + n)
+#define CHRK_SB_PGD_PORT_TX_STATUSN(n)		(0x00000090 + n)
+#define CHRK_SB_PGD_TX_PORTn_MULTI_CHNL_0(n)	(0x00000100 + 0x4*n)
+#define CHRK_SB_PGD_TX_PORTn_MULTI_CHNL_1(n)	(0x00000101 + 0x4*n)
+#define CHRK_SB_PGD_RX_PORTn_MULTI_CHNL_0(n)	(0x00000180 + 0x4*n)
+#define CHRK_SB_PGD_RX_PORTn_MULTI_CHNL_1(n)	(0x00000181 + 0x4*n)
+#define CHRK_SB_PGD_PORT_TX_OR_UR_CFGN(n)	(0x000001F0 + n)
+
+/* Register Bit Setting */
+#define CHRK_ENABLE_OVERRUN_AUTO_RECOVERY	(0x1 << 1)
+#define CHRK_ENABLE_UNDERRUN_AUTO_RECOVERY	(0x1 << 0)
+#define CHRK_SB_PGD_PORT_ENABLE			(0x1 << 0)
+#define CHRK_SB_PGD_PORT_DISABLE		(0x0 << 0)
+#define CHRK_SB_PGD_PORT_WM_L1			(0x1 << 1)
+#define CHRK_SB_PGD_PORT_WM_L2			(0x2 << 1)
+#define CHRK_SB_PGD_PORT_WM_L3			(0x3 << 1)
+#define CHRK_SB_PGD_PORT_WM_L8			(0x8 << 1)
+#define CHRK_SB_PGD_PORT_WM_LB			(0xB << 1)
+
+#define CHRK_SB_PGD_PORT_RX_NUM			16
+#define CHRK_SB_PGD_PORT_TX_NUM			16
+
+/* PGD Port Map */
+#define CHRK_SB_PGD_PORT_TX_SCO			0
+#define CHRK_SB_PGD_PORT_TX1_FM			1
+#define CHRK_SB_PGD_PORT_TX2_FM			2
+#define CHRK_SB_PGD_PORT_RX_SCO			16
+#define CHRK_SB_PGD_PORT_RX_A2P			17
+
+
+/* Function Prototype */
+
+/*
+ * btfm_slim_chrk_hw_init: Initialize wcn3990 specific slimbus slave device
+ * @btfmslim: slimbus slave device data pointer.
+ * Returns:
+ * 0: Success
+ * else: Fail
+ */
+int btfm_slim_chrk_hw_init(struct btfmslim *btfmslim);
+
+/*
+ * btfm_slim_chrk_enable_rxport: Enable wcn3990 Rx port by given port number
+ * @btfmslim: slimbus slave device data pointer.
+ * @portNum: slimbus slave port number to enable
+ * @rxport: rxport or txport
+ * @enable: enable port or disable port
+ * Returns:
+ * 0: Success
+ * else: Fail
+ */
+int btfm_slim_chrk_enable_port(struct btfmslim *btfmslim, uint8_t portNum,
+	uint8_t rxport, uint8_t enable);
+
+/* Specific defines for wcn3990 slimbus device */
+#define WCN3990_SLIM_REG_OFFSET		0x0800
+
+#ifdef SLIM_SLAVE_REG_OFFSET
+#undef SLIM_SLAVE_REG_OFFSET
+#define SLIM_SLAVE_REG_OFFSET		WCN3990_SLIM_REG_OFFSET
+#endif
+
+/* Assign vendor specific function */
+extern struct btfmslim_ch wcn3990_txport[];
+extern struct btfmslim_ch wcn3990_rxport[];
+
+#ifdef SLIM_SLAVE_RXPORT
+#undef SLIM_SLAVE_RXPORT
+#define SLIM_SLAVE_RXPORT (&wcn3990_rxport[0])
+#endif
+
+#ifdef SLIM_SLAVE_TXPORT
+#undef SLIM_SLAVE_TXPORT
+#define SLIM_SLAVE_TXPORT (&wcn3990_txport[0])
+#endif
+
+#ifdef SLIM_SLAVE_INIT
+#undef SLIM_SLAVE_INIT
+#define SLIM_SLAVE_INIT btfm_slim_chrk_hw_init
+#endif
+
+#ifdef SLIM_SLAVE_PORT_EN
+#undef SLIM_SLAVE_PORT_EN
+#define SLIM_SLAVE_PORT_EN btfm_slim_chrk_enable_port
+#endif
+#endif /* CONFIG_BTFM_WCN3990 */
+#endif /* BTFM_SLIM_WCN3990_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/char/adsprpc.c	2019-10-29 09:26:23.445201240 +0100
@@ -0,0 +1,3263 @@
+/*
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/pagemap.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/list.h>
+#include <linux/hash.h>
+#include <linux/msm_ion.h>
+#include <soc/qcom/secure_buffer.h>
+#include <soc/qcom/smd.h>
+#include <soc/qcom/glink.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <linux/scatterlist.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/dma-contiguous.h>
+#include <linux/cma.h>
+#include <linux/iommu.h>
+#include <linux/qcom_iommu.h>
+#include <linux/kref.h>
+#include <linux/sort.h>
+#include <linux/msm_dma_iommu_mapping.h>
+#include <asm/dma-iommu.h>
+#include <soc/qcom/scm.h>
+#include "adsprpc_compat.h"
+#include "adsprpc_shared.h"
+#include <soc/qcom/ramdump.h>
+#include <linux/debugfs.h>
+
+#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
+#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
+#define TZ_PIL_AUTH_QDSP6_PROC 1
+#define ADSP_MMAP_HEAP_ADDR 4
+#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
+#define FASTRPC_ENOSUCH 39
+#define VMID_SSC_Q6     38
+#define VMID_ADSP_Q6    6
+#define AC_VM_ADSP_HEAP_SHARED 33
+#define DEBUGFS_SIZE 1024
+
+#define RPC_TIMEOUT	(5 * HZ)
+#define BALIGN		128
+#define NUM_CHANNELS	4		/* adsp,sdsp,mdsp,cdsp */
+#define NUM_SESSIONS	9		/*8 compute, 1 cpz*/
+#define FASTRPC_CTX_MAGIC (0xbeeddeed)
+#define FASTRPC_CTX_MAX (256)
+#define FASTRPC_CTXID_MASK (0xFF0)
+
+#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
+
+#define FASTRPC_LINK_STATE_DOWN   (0x0)
+#define FASTRPC_LINK_STATE_UP     (0x1)
+#define FASTRPC_LINK_DISCONNECTED (0x0)
+#define FASTRPC_LINK_CONNECTING   (0x1)
+#define FASTRPC_LINK_CONNECTED    (0x3)
+#define FASTRPC_LINK_DISCONNECTING (0x7)
+
+#define PERF_KEYS "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke"
+#define FASTRPC_STATIC_HANDLE_KERNEL (1)
+#define FASTRPC_STATIC_HANDLE_LISTENER (3)
+#define FASTRPC_STATIC_HANDLE_MAX (20)
+
+#define PERF_END (void)0
+
+#define PERF(enb, cnt, ff) \
+	{\
+		struct timespec startT = {0};\
+		if (enb) {\
+			getnstimeofday(&startT);\
+		} \
+		ff ;\
+		if (enb) {\
+			cnt += getnstimediff(&startT);\
+		} \
+	}
+
+static int fastrpc_glink_open(int cid);
+static void fastrpc_glink_close(void *chan, int cid);
+static struct dentry *debugfs_root;
+static struct dentry *debugfs_global_file;
+
+static inline uint64_t buf_page_start(uint64_t buf)
+{
+	uint64_t start = (uint64_t) buf & PAGE_MASK;
+	return start;
+}
+
+static inline uint64_t buf_page_offset(uint64_t buf)
+{
+	uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
+	return offset;
+}
+
+static inline uint64_t buf_num_pages(uint64_t buf, size_t len)
+{
+	uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
+	uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
+	uint64_t nPages = end - start + 1;
+	return nPages;
+}
+
+static inline uint64_t buf_page_size(uint32_t size)
+{
+	uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
+	return sz > PAGE_SIZE ? sz : PAGE_SIZE;
+}
+
+static inline void *uint64_to_ptr(uint64_t addr)
+{
+	void *ptr = (void *)((uintptr_t)addr);
+	return ptr;
+}
+
+static inline uint64_t ptr_to_uint64(void *ptr)
+{
+	uint64_t addr = (uint64_t)((uintptr_t)ptr);
+	return addr;
+}
+
+struct fastrpc_file;
+
+struct fastrpc_buf {
+	struct hlist_node hn;
+	struct fastrpc_file *fl;
+	void *virt;
+	uint64_t phys;
+	size_t size;
+};
+
+struct fastrpc_ctx_lst;
+
+struct overlap {
+	uintptr_t start;
+	uintptr_t end;
+	int raix;
+	uintptr_t mstart;
+	uintptr_t mend;
+	uintptr_t offset;
+};
+
+struct smq_invoke_ctx {
+	struct hlist_node hn;
+	struct completion work;
+	int retval;
+	int pid;
+	int tgid;
+	remote_arg_t *lpra;
+	remote_arg64_t *rpra;
+	int *fds;
+	unsigned *attrs;
+	struct fastrpc_mmap **maps;
+	struct fastrpc_buf *buf;
+	size_t used;
+	struct fastrpc_file *fl;
+	uint32_t sc;
+	struct overlap *overs;
+	struct overlap **overps;
+	struct smq_msg msg;
+	unsigned int magic;
+	uint64_t ctxid;
+};
+
+struct fastrpc_ctx_lst {
+	struct hlist_head pending;
+	struct hlist_head interrupted;
+};
+
+struct fastrpc_smmu {
+	struct device *dev;
+	struct dma_iommu_mapping *mapping;
+	int cb;
+	int enabled;
+	int faults;
+	int secure;
+	int coherent;
+};
+
+struct fastrpc_session_ctx {
+	struct device *dev;
+	struct fastrpc_smmu smmu;
+	int used;
+};
+
+struct fastrpc_glink_info {
+	int link_state;
+	int port_state;
+	struct glink_open_config cfg;
+	struct glink_link_info link_info;
+	void *link_notify_handle;
+};
+
+struct fastrpc_channel_ctx {
+	char *name;
+	char *subsys;
+	void *chan;
+	struct device *dev;
+	struct fastrpc_session_ctx session[NUM_SESSIONS];
+	struct completion work;
+	struct completion workport;
+	struct notifier_block nb;
+	struct kref kref;
+	int channel;
+	int sesscount;
+	int ssrcount;
+	void *handle;
+	int prevssrcount;
+	int issubsystemup;
+	int vmid;
+	int heap_vmid;
+	int ramdumpenabled;
+	void *remoteheap_ramdump_dev;
+	struct fastrpc_glink_info link;
+};
+
+struct fastrpc_apps {
+	struct fastrpc_channel_ctx *channel;
+	struct cdev cdev;
+	struct class *class;
+	struct mutex smd_mutex;
+	struct smq_phy_page range;
+	struct hlist_head maps;
+	uint32_t staticpd_flags;
+	dev_t dev_no;
+	int compat;
+	struct hlist_head drivers;
+	spinlock_t hlock;
+	struct ion_client *client;
+	struct device *dev;
+	bool glink;
+	spinlock_t ctxlock;
+	struct smq_invoke_ctx *ctxtable[FASTRPC_CTX_MAX];
+};
+
+struct fastrpc_mmap {
+	struct hlist_node hn;
+	struct fastrpc_file *fl;
+	struct fastrpc_apps *apps;
+	int fd;
+	uint32_t flags;
+	struct dma_buf *buf;
+	struct sg_table *table;
+	struct dma_buf_attachment *attach;
+	struct ion_handle *handle;
+	uint64_t phys;
+	size_t size;
+	uintptr_t va;
+	size_t len;
+	int refs;
+	uintptr_t raddr;
+	int uncached;
+	int secure;
+	uintptr_t attr;
+};
+
+struct fastrpc_perf {
+	int64_t count;
+	int64_t flush;
+	int64_t map;
+	int64_t copy;
+	int64_t link;
+	int64_t getargs;
+	int64_t putargs;
+	int64_t invargs;
+	int64_t invoke;
+};
+
+struct fastrpc_file {
+	struct hlist_node hn;
+	spinlock_t hlock;
+	struct hlist_head maps;
+	struct hlist_head bufs;
+	struct fastrpc_ctx_lst clst;
+	struct fastrpc_session_ctx *sctx;
+	struct fastrpc_session_ctx *secsctx;
+	uint32_t mode;
+	uint32_t profile;
+	int tgid;
+	int cid;
+	int ssrcount;
+	int pd;
+	int file_close;
+	struct fastrpc_apps *apps;
+	struct fastrpc_perf perf;
+	struct dentry *debugfs_file;
+	struct mutex map_mutex;
+};
+
+static struct fastrpc_apps gfa;
+
+static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
+	{
+		.name = "adsprpc-smd",
+		.subsys = "adsp",
+		.channel = SMD_APPS_QDSP,
+		.link.link_info.edge = "lpass",
+		.link.link_info.transport = "smem",
+	},
+	{
+		.name = "mdsprpc-smd",
+		.subsys = "modem",
+		.channel = SMD_APPS_MODEM,
+		.link.link_info.edge = "mpss",
+		.link.link_info.transport = "smem",
+	},
+	{
+		.name = "sdsprpc-smd",
+		.subsys = "slpi",
+		.channel = SMD_APPS_DSPS,
+		.link.link_info.edge = "dsps",
+		.link.link_info.transport = "smem",
+		.vmid = VMID_SSC_Q6,
+	},
+	{
+		.name = "cdsprpc-smd",
+		.subsys = "cdsp",
+		.link.link_info.edge = "cdsp",
+		.link.link_info.transport = "smem",
+	},
+};
+
+static inline int64_t getnstimediff(struct timespec *start)
+{
+	int64_t ns;
+	struct timespec ts, b;
+
+	getnstimeofday(&ts);
+	b = timespec_sub(ts, *start);
+	ns = timespec_to_ns(&b);
+	return ns;
+}
+
+static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
+{
+	struct fastrpc_file *fl = buf == NULL ? NULL : buf->fl;
+	int vmid;
+
+	if (!fl)
+		return;
+	if (cache) {
+		spin_lock(&fl->hlock);
+		hlist_add_head(&buf->hn, &fl->bufs);
+		spin_unlock(&fl->hlock);
+		return;
+	}
+	if (!IS_ERR_OR_NULL(buf->virt)) {
+		int destVM[1] = {VMID_HLOS};
+		int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
+
+		if (fl->sctx->smmu.cb)
+			buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
+		vmid = fl->apps->channel[fl->cid].vmid;
+		if (vmid) {
+			int srcVM[2] = {VMID_HLOS, vmid};
+
+			hyp_assign_phys(buf->phys, buf_page_size(buf->size),
+				srcVM, 2, destVM, destVMperm, 1);
+		}
+		dma_free_coherent(fl->sctx->smmu.dev, buf->size, buf->virt,
+					buf->phys);
+	}
+	kfree(buf);
+}
+
+static void fastrpc_buf_list_free(struct fastrpc_file *fl)
+{
+	struct fastrpc_buf *buf, *free;
+	do {
+		struct hlist_node *n;
+
+		free = NULL;
+		spin_lock(&fl->hlock);
+		hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
+			hlist_del_init(&buf->hn);
+			free = buf;
+			break;
+		}
+		spin_unlock(&fl->hlock);
+		if (free)
+			fastrpc_buf_free(free, 0);
+	} while (free);
+}
+
+static void fastrpc_mmap_add(struct fastrpc_mmap *map)
+{
+	if (map->flags == ADSP_MMAP_HEAP_ADDR ||
+				map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
+		struct fastrpc_apps *me = &gfa;
+
+		spin_lock(&me->hlock);
+		hlist_add_head(&map->hn, &me->maps);
+		spin_unlock(&me->hlock);
+	} else {
+		struct fastrpc_file *fl = map->fl;
+
+		spin_lock(&fl->hlock);
+		hlist_add_head(&map->hn, &fl->maps);
+		spin_unlock(&fl->hlock);
+	}
+}
+
+static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, uintptr_t va,
+			size_t len, int mflags, struct fastrpc_mmap **ppmap)
+{
+	struct fastrpc_apps *me = &gfa;
+	struct fastrpc_mmap *match = NULL, *map = NULL;
+	struct hlist_node *n;
+
+	if ((va + len) < va)
+		return -EOVERFLOW;
+	if (mflags == ADSP_MMAP_HEAP_ADDR ||
+				 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
+		spin_lock(&me->hlock);
+		hlist_for_each_entry_safe(map, n, &me->maps, hn) {
+			if (va >= map->va &&
+				va + len <= map->va + map->len &&
+				map->fd == fd) {
+				map->refs++;
+				match = map;
+				break;
+			}
+		}
+		spin_unlock(&me->hlock);
+	} else {
+		spin_lock(&fl->hlock);
+		hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
+			if (va >= map->va &&
+				va + len <= map->va + map->len &&
+				map->fd == fd) {
+				map->refs++;
+				match = map;
+				break;
+			}
+		}
+		spin_unlock(&fl->hlock);
+	}
+	if (match) {
+		*ppmap = match;
+		return 0;
+	}
+	return -ENOTTY;
+}
+
+static int dma_alloc_memory(phys_addr_t *region_start, size_t size)
+{
+	struct fastrpc_apps *me = &gfa;
+	void *vaddr = NULL;
+	DEFINE_DMA_ATTRS(attrs);
+
+	if (me->dev == NULL) {
+		pr_err("device adsprpc-mem is not initialized\n");
+		return -ENODEV;
+	}
+	dma_set_attr(DMA_ATTR_SKIP_ZEROING, &attrs);
+	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+	vaddr = dma_alloc_attrs(me->dev, size, region_start, GFP_KERNEL,
+						&attrs);
+	if (!vaddr) {
+		pr_err("ADSPRPC: Failed to allocate %x remote heap memory\n",
+						(unsigned int)size);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
+			       size_t len, struct fastrpc_mmap **ppmap)
+{
+	struct fastrpc_mmap *match = NULL, *map;
+	struct hlist_node *n;
+	struct fastrpc_apps *me = &gfa;
+
+	spin_lock(&me->hlock);
+	hlist_for_each_entry_safe(map, n, &me->maps, hn) {
+		if (map->raddr == va &&
+			map->raddr + map->len == va + len &&
+			map->refs == 1) {
+			match = map;
+			hlist_del_init(&map->hn);
+			break;
+		}
+	}
+	spin_unlock(&me->hlock);
+	if (match) {
+		*ppmap = match;
+		return 0;
+	}
+	spin_lock(&fl->hlock);
+	hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
+		if (map->raddr == va &&
+			map->raddr + map->len == va + len &&
+			map->refs == 1) {
+			match = map;
+			hlist_del_init(&map->hn);
+			break;
+		}
+	}
+	spin_unlock(&fl->hlock);
+	if (match) {
+		*ppmap = match;
+		return 0;
+	}
+	return -ENOTTY;
+}
+
+static void fastrpc_mmap_free(struct fastrpc_mmap *map)
+{
+	struct fastrpc_apps *me = &gfa;
+	struct fastrpc_file *fl;
+	int vmid;
+	struct fastrpc_session_ctx *sess;
+
+	if (!map)
+		return;
+	fl = map->fl;
+	if (map->flags == ADSP_MMAP_HEAP_ADDR ||
+				map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
+		spin_lock(&me->hlock);
+		map->refs--;
+		if (!map->refs)
+			hlist_del_init(&map->hn);
+		spin_unlock(&me->hlock);
+	} else {
+		spin_lock(&fl->hlock);
+		map->refs--;
+		if (!map->refs)
+			hlist_del_init(&map->hn);
+		spin_unlock(&fl->hlock);
+	}
+	if (map->refs > 0)
+		return;
+	if (map->flags == ADSP_MMAP_HEAP_ADDR ||
+				map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
+		DEFINE_DMA_ATTRS(attrs);
+
+		if (me->dev == NULL) {
+			pr_err("failed to free remote heap allocation\n");
+			return;
+		}
+		if (map->phys) {
+			dma_set_attr(DMA_ATTR_SKIP_ZEROING, &attrs);
+			dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+			dma_free_attrs(me->dev, map->size,
+					&(map->va), map->phys,	&attrs);
+		}
+	} else {
+		int destVM[1] = {VMID_HLOS};
+		int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
+
+		if (map->secure)
+			sess = fl->secsctx;
+		else
+			sess = fl->sctx;
+
+		if (!IS_ERR_OR_NULL(map->handle))
+			ion_free(fl->apps->client, map->handle);
+		if (sess && sess->smmu.enabled) {
+			if (map->size || map->phys)
+				msm_dma_unmap_sg(sess->smmu.dev,
+					map->table->sgl,
+					map->table->nents, DMA_BIDIRECTIONAL,
+					map->buf);
+		}
+		vmid = fl->apps->channel[fl->cid].vmid;
+		if (vmid && map->phys) {
+			int srcVM[2] = {VMID_HLOS, vmid};
+
+			hyp_assign_phys(map->phys, buf_page_size(map->size),
+				srcVM, 2, destVM, destVMperm, 1);
+		}
+
+		if (!IS_ERR_OR_NULL(map->table))
+			dma_buf_unmap_attachment(map->attach, map->table,
+					DMA_BIDIRECTIONAL);
+		if (!IS_ERR_OR_NULL(map->attach))
+			dma_buf_detach(map->buf, map->attach);
+		if (!IS_ERR_OR_NULL(map->buf))
+			dma_buf_put(map->buf);
+	}
+	kfree(map);
+}
+
+static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
+					struct fastrpc_session_ctx **session);
+
+static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, unsigned attr,
+	uintptr_t va, size_t len, int mflags, struct fastrpc_mmap **ppmap)
+{
+	struct fastrpc_apps *me = &gfa;
+	struct fastrpc_session_ctx *sess;
+	struct fastrpc_apps *apps = fl->apps;
+	int cid = fl->cid;
+	struct fastrpc_channel_ctx *chan = &apps->channel[cid];
+	struct fastrpc_mmap *map = NULL;
+	struct dma_attrs attrs;
+	phys_addr_t region_start = 0;
+	unsigned long flags;
+	int err = 0, vmid;
+
+	if (!fastrpc_mmap_find(fl, fd, va, len, mflags, ppmap))
+		return 0;
+	map = kzalloc(sizeof(*map), GFP_KERNEL);
+	VERIFY(err, !IS_ERR_OR_NULL(map));
+	if (err)
+		goto bail;
+	INIT_HLIST_NODE(&map->hn);
+	map->flags = mflags;
+	map->refs = 1;
+	map->fl = fl;
+	map->fd = fd;
+	map->attr = attr;
+	if (mflags == ADSP_MMAP_HEAP_ADDR ||
+				mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
+		map->apps = me;
+		map->fl = NULL;
+		VERIFY(err, !dma_alloc_memory(&region_start, len));
+		if (err)
+			goto bail;
+		map->phys = (uintptr_t)region_start;
+		map->size = len;
+		map->va = (uintptr_t)map->phys;
+	} else {
+		VERIFY(err, !IS_ERR_OR_NULL(map->handle =
+				ion_import_dma_buf(fl->apps->client, fd)));
+		if (err)
+			goto bail;
+		VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
+						&flags));
+		if (err)
+			goto bail;
+
+		map->uncached = !ION_IS_CACHED(flags);
+		if (map->attr & FASTRPC_ATTR_NOVA)
+			map->uncached = 1;
+
+		map->secure = flags & ION_FLAG_SECURE;
+		if (map->secure) {
+			if (!fl->secsctx)
+				err = fastrpc_session_alloc(chan, 1,
+							&fl->secsctx);
+			if (err)
+				goto bail;
+		}
+		if (map->secure)
+			sess = fl->secsctx;
+		else
+			sess = fl->sctx;
+
+		VERIFY(err, !IS_ERR_OR_NULL(sess));
+		if (err)
+			goto bail;
+		VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
+		if (err)
+			goto bail;
+		VERIFY(err, !IS_ERR_OR_NULL(map->attach =
+				dma_buf_attach(map->buf, sess->smmu.dev)));
+		if (err)
+			goto bail;
+		VERIFY(err, !IS_ERR_OR_NULL(map->table =
+			dma_buf_map_attachment(map->attach,
+				DMA_BIDIRECTIONAL)));
+		if (err)
+			goto bail;
+		if (sess->smmu.enabled) {
+			init_dma_attrs(&attrs);
+			dma_set_attr(DMA_ATTR_EXEC_MAPPING, &attrs);
+
+			if ((map->attr & FASTRPC_ATTR_NON_COHERENT) ||
+				(sess->smmu.coherent && map->uncached))
+				dma_set_attr(DMA_ATTR_FORCE_NON_COHERENT,
+								 &attrs);
+			else if (map->attr & FASTRPC_ATTR_COHERENT)
+				dma_set_attr(DMA_ATTR_FORCE_COHERENT, &attrs);
+
+			VERIFY(err, map->table->nents ==
+					msm_dma_map_sg_attrs(sess->smmu.dev,
+					map->table->sgl, map->table->nents,
+					DMA_BIDIRECTIONAL, map->buf, &attrs));
+			if (err)
+				goto bail;
+		} else {
+			VERIFY(err, map->table->nents == 1);
+			if (err)
+				goto bail;
+		}
+		map->phys = sg_dma_address(map->table->sgl);
+		if (sess->smmu.cb) {
+			map->phys += ((uint64_t)sess->smmu.cb << 32);
+			map->size = sg_dma_len(map->table->sgl);
+		} else {
+			map->size = buf_page_size(len);
+		}
+		vmid = fl->apps->channel[fl->cid].vmid;
+		if (vmid) {
+			int srcVM[1] = {VMID_HLOS};
+			int destVM[2] = {VMID_HLOS, vmid};
+			int destVMperm[2] = {PERM_READ | PERM_WRITE | PERM_EXEC,
+					PERM_READ | PERM_WRITE | PERM_EXEC};
+
+			VERIFY(err, !hyp_assign_phys(map->phys,
+					buf_page_size(map->size),
+					srcVM, 1, destVM, destVMperm, 2));
+			if (err)
+				goto bail;
+		}
+		map->va = va;
+	}
+	map->len = len;
+
+	fastrpc_mmap_add(map);
+	*ppmap = map;
+
+bail:
+	if (err && map)
+		fastrpc_mmap_free(map);
+	return err;
+}
+
+static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
+			     struct fastrpc_buf **obuf)
+{
+	int err = 0, vmid;
+	struct fastrpc_buf *buf = NULL, *fr = NULL;
+	struct hlist_node *n;
+
+	VERIFY(err, size > 0);
+	if (err)
+		goto bail;
+
+	/* find the smallest buffer that fits in the cache */
+	spin_lock(&fl->hlock);
+	hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
+		if (buf->size >= size && (!fr || fr->size > buf->size))
+			fr = buf;
+	}
+	if (fr)
+		hlist_del_init(&fr->hn);
+	spin_unlock(&fl->hlock);
+	if (fr) {
+		*obuf = fr;
+		return 0;
+	}
+	buf = NULL;
+	VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL)));
+	if (err)
+		goto bail;
+	INIT_HLIST_NODE(&buf->hn);
+	buf->fl = fl;
+	buf->virt = NULL;
+	buf->phys = 0;
+	buf->size = size;
+	buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
+				       (void *)&buf->phys, GFP_KERNEL);
+	if (IS_ERR_OR_NULL(buf->virt)) {
+		/* free cache and retry */
+		fastrpc_buf_list_free(fl);
+		buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
+					       (void *)&buf->phys, GFP_KERNEL);
+		VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
+	}
+	if (err)
+		goto bail;
+	if (fl->sctx->smmu.cb)
+		buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
+	vmid = fl->apps->channel[fl->cid].vmid;
+	if (vmid) {
+		int srcVM[1] = {VMID_HLOS};
+		int destVM[2] = {VMID_HLOS, vmid};
+		int destVMperm[2] = {PERM_READ | PERM_WRITE | PERM_EXEC,
+					PERM_READ | PERM_WRITE | PERM_EXEC};
+
+		VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
+			srcVM, 1, destVM, destVMperm, 2));
+		if (err)
+			goto bail;
+	}
+
+	*obuf = buf;
+ bail:
+	if (err && buf)
+		fastrpc_buf_free(buf, 0);
+	return err;
+}
+
+
+static int context_restore_interrupted(struct fastrpc_file *fl,
+				       struct fastrpc_ioctl_invoke_attrs *inv,
+				       struct smq_invoke_ctx **po)
+{
+	int err = 0;
+	struct smq_invoke_ctx *ctx = NULL, *ictx = NULL;
+	struct hlist_node *n;
+	struct fastrpc_ioctl_invoke *invoke = &inv->inv;
+	spin_lock(&fl->hlock);
+	hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
+		if (ictx->pid == current->pid) {
+			if (invoke->sc != ictx->sc || ictx->fl != fl)
+				err = -1;
+			else {
+				ctx = ictx;
+				hlist_del_init(&ctx->hn);
+				hlist_add_head(&ctx->hn, &fl->clst.pending);
+			}
+			break;
+		}
+	}
+	spin_unlock(&fl->hlock);
+	if (ctx)
+		*po = ctx;
+	return err;
+}
+
+#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
+static int overlap_ptr_cmp(const void *a, const void *b)
+{
+	struct overlap *pa = *((struct overlap **)a);
+	struct overlap *pb = *((struct overlap **)b);
+	/* sort with lowest starting buffer first */
+	int st = CMP(pa->start, pb->start);
+	/* sort with highest ending buffer first */
+	int ed = CMP(pb->end, pa->end);
+	return st == 0 ? ed : st;
+}
+
+static int context_build_overlap(struct smq_invoke_ctx *ctx)
+{
+	int i, err = 0;
+	remote_arg_t *lpra = ctx->lpra;
+	int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
+	int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
+	int nbufs = inbufs + outbufs;
+	struct overlap max;
+	for (i = 0; i < nbufs; ++i) {
+		ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
+		ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
+		if (lpra[i].buf.len) {
+			VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
+			if (err)
+				goto bail;
+		}
+		ctx->overs[i].raix = i;
+		ctx->overps[i] = &ctx->overs[i];
+	}
+	sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, NULL);
+	max.start = 0;
+	max.end = 0;
+	for (i = 0; i < nbufs; ++i) {
+		if (ctx->overps[i]->start < max.end) {
+			ctx->overps[i]->mstart = max.end;
+			ctx->overps[i]->mend = ctx->overps[i]->end;
+			ctx->overps[i]->offset = max.end -
+				ctx->overps[i]->start;
+			if (ctx->overps[i]->end > max.end) {
+				max.end = ctx->overps[i]->end;
+			} else {
+				ctx->overps[i]->mend = 0;
+				ctx->overps[i]->mstart = 0;
+			}
+		} else  {
+			ctx->overps[i]->mend = ctx->overps[i]->end;
+			ctx->overps[i]->mstart = ctx->overps[i]->start;
+			ctx->overps[i]->offset = 0;
+			max = *ctx->overps[i];
+		}
+	}
+bail:
+	return err;
+}
+
+#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
+	do {\
+		if (!(kernel))\
+			VERIFY(err, 0 == copy_from_user((dst),\
+			(void const __user *)(src),\
+							(size)));\
+		else\
+			memmove((dst), (src), (size));\
+	} while (0)
+
+#define K_COPY_TO_USER(err, kernel, dst, src, size) \
+	do {\
+		if (!(kernel))\
+			VERIFY(err, 0 == copy_to_user((void __user *)(dst), \
+						(src), (size)));\
+		else\
+			memmove((dst), (src), (size));\
+	} while (0)
+
+
+static void context_free(struct smq_invoke_ctx *ctx);
+
+static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
+			 struct fastrpc_ioctl_invoke_attrs *invokefd,
+			 struct smq_invoke_ctx **po)
+{
+	int err = 0, bufs, ii, size = 0;
+	struct fastrpc_apps *me = &gfa;
+	struct smq_invoke_ctx *ctx = NULL;
+	struct fastrpc_ctx_lst *clst = &fl->clst;
+	struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
+
+	bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
+	size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
+		sizeof(*ctx->fds) * (bufs) +
+		sizeof(*ctx->attrs) * (bufs) +
+		sizeof(*ctx->overs) * (bufs) +
+		sizeof(*ctx->overps) * (bufs);
+
+	VERIFY(err, NULL != (ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL)));
+	if (err)
+		goto bail;
+
+	INIT_HLIST_NODE(&ctx->hn);
+	hlist_add_fake(&ctx->hn);
+	ctx->fl = fl;
+	ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
+	ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
+	ctx->fds = (int *)(&ctx->lpra[bufs]);
+	ctx->attrs = (unsigned *)(&ctx->fds[bufs]);
+	ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
+	ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
+
+	K_COPY_FROM_USER(err, kernel, (void *)ctx->lpra, invoke->pra,
+					bufs * sizeof(*ctx->lpra));
+	if (err)
+		goto bail;
+
+	if (invokefd->fds) {
+		K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
+						bufs * sizeof(*ctx->fds));
+		if (err)
+			goto bail;
+	}
+	if (invokefd->attrs) {
+		K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
+						bufs * sizeof(*ctx->attrs));
+		if (err)
+			goto bail;
+	}
+
+	ctx->sc = invoke->sc;
+	if (bufs) {
+		VERIFY(err, 0 == context_build_overlap(ctx));
+		if (err)
+			goto bail;
+	}
+	ctx->retval = -1;
+	ctx->pid = current->pid;
+	ctx->tgid = current->tgid;
+	init_completion(&ctx->work);
+	ctx->magic = FASTRPC_CTX_MAGIC;
+
+	spin_lock(&fl->hlock);
+	hlist_add_head(&ctx->hn, &clst->pending);
+	spin_unlock(&fl->hlock);
+
+	spin_lock(&me->ctxlock);
+	for (ii = 0; ii < FASTRPC_CTX_MAX; ii++) {
+		if (!me->ctxtable[ii]) {
+			me->ctxtable[ii] = ctx;
+			ctx->ctxid = (ptr_to_uint64(ctx) & ~0xFFF)|(ii << 4);
+			break;
+		}
+	}
+	spin_unlock(&me->ctxlock);
+	VERIFY(err, ii < FASTRPC_CTX_MAX);
+	if (err) {
+		pr_err("adsprpc: out of context memory\n");
+		goto bail;
+	}
+
+	*po = ctx;
+bail:
+	if (ctx && err)
+		context_free(ctx);
+	return err;
+}
+
+static void context_save_interrupted(struct smq_invoke_ctx *ctx)
+{
+	struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
+	spin_lock(&ctx->fl->hlock);
+	hlist_del_init(&ctx->hn);
+	hlist_add_head(&ctx->hn, &clst->interrupted);
+	spin_unlock(&ctx->fl->hlock);
+	/* free the cache on power collapse */
+	fastrpc_buf_list_free(ctx->fl);
+}
+
+static void context_free(struct smq_invoke_ctx *ctx)
+{
+	int i;
+	struct fastrpc_apps *me = &gfa;
+	int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
+		    REMOTE_SCALARS_OUTBUFS(ctx->sc);
+	spin_lock(&ctx->fl->hlock);
+	hlist_del_init(&ctx->hn);
+	spin_unlock(&ctx->fl->hlock);
+	for (i = 0; i < nbufs; ++i)
+		fastrpc_mmap_free(ctx->maps[i]);
+	fastrpc_buf_free(ctx->buf, 1);
+	ctx->magic = 0;
+	ctx->ctxid = 0;
+
+	spin_lock(&me->ctxlock);
+	for (i = 0; i < FASTRPC_CTX_MAX; i++) {
+		if (me->ctxtable[i] == ctx) {
+			me->ctxtable[i] = NULL;
+			break;
+		}
+	}
+	spin_unlock(&me->ctxlock);
+
+	kfree(ctx);
+}
+
+static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
+{
+	ctx->retval = retval;
+	complete(&ctx->work);
+}
+
+
+static void fastrpc_notify_users(struct fastrpc_file *me)
+{
+	struct smq_invoke_ctx *ictx;
+	struct hlist_node *n;
+	spin_lock(&me->hlock);
+	hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
+		complete(&ictx->work);
+	}
+	hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
+		complete(&ictx->work);
+	}
+	spin_unlock(&me->hlock);
+
+}
+
+static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
+{
+	struct fastrpc_file *fl;
+	struct hlist_node *n;
+	spin_lock(&me->hlock);
+	hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
+		if (fl->cid == cid)
+			fastrpc_notify_users(fl);
+	}
+	spin_unlock(&me->hlock);
+
+}
+static void context_list_ctor(struct fastrpc_ctx_lst *me)
+{
+	INIT_HLIST_HEAD(&me->interrupted);
+	INIT_HLIST_HEAD(&me->pending);
+}
+
+static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
+{
+	struct fastrpc_ctx_lst *clst = &fl->clst;
+	struct smq_invoke_ctx *ictx = NULL, *ctxfree;
+	struct hlist_node *n;
+	do {
+		ctxfree = NULL;
+		spin_lock(&fl->hlock);
+		hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
+			hlist_del_init(&ictx->hn);
+			ctxfree = ictx;
+			break;
+		}
+		spin_unlock(&fl->hlock);
+		if (ctxfree)
+			context_free(ctxfree);
+	} while (ctxfree);
+	do {
+		ctxfree = NULL;
+		spin_lock(&fl->hlock);
+		hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
+			hlist_del_init(&ictx->hn);
+			ctxfree = ictx;
+			break;
+		}
+		spin_unlock(&fl->hlock);
+		if (ctxfree)
+			context_free(ctxfree);
+	} while (ctxfree);
+}
+
+static int fastrpc_file_free(struct fastrpc_file *fl);
+static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
+{
+	struct fastrpc_file *fl, *free;
+	struct hlist_node *n;
+	do {
+		free = NULL;
+		spin_lock(&me->hlock);
+		hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
+			hlist_del_init(&fl->hn);
+			free = fl;
+			break;
+		}
+		spin_unlock(&me->hlock);
+		if (free)
+			fastrpc_file_free(free);
+	} while (free);
+}
+
+static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
+{
+	remote_arg64_t *rpra;
+	remote_arg_t *lpra = ctx->lpra;
+	struct smq_invoke_buf *list;
+	struct smq_phy_page *pages, *ipage;
+	uint32_t sc = ctx->sc;
+	int inbufs = REMOTE_SCALARS_INBUFS(sc);
+	int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
+	int bufs = inbufs + outbufs;
+	uintptr_t args;
+	size_t rlen = 0, copylen = 0, metalen = 0;
+	int i, inh, oix;
+	int err = 0;
+	int mflags = 0;
+
+	/* calculate size of the metadata */
+	rpra = NULL;
+	list = smq_invoke_buf_start(rpra, sc);
+	pages = smq_phy_page_start(sc, list);
+	ipage = pages;
+
+	for (i = 0; i < bufs; ++i) {
+		uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
+		size_t len = lpra[i].buf.len;
+
+		if (ctx->fds[i] && (ctx->fds[i] != -1))
+			fastrpc_mmap_create(ctx->fl, ctx->fds[i],
+					ctx->attrs[i], buf, len,
+					mflags, &ctx->maps[i]);
+		ipage += 1;
+	}
+	metalen = copylen = (size_t)&ipage[0];
+	/* calculate len requreed for copying */
+	for (oix = 0; oix < inbufs + outbufs; ++oix) {
+		int i = ctx->overps[oix]->raix;
+		uintptr_t mstart, mend;
+		size_t len = lpra[i].buf.len;
+
+		if (!len)
+			continue;
+		if (ctx->maps[i])
+			continue;
+		if (ctx->overps[oix]->offset == 0)
+			copylen = ALIGN(copylen, BALIGN);
+		mstart = ctx->overps[oix]->mstart;
+		mend = ctx->overps[oix]->mend;
+		VERIFY(err, (mend - mstart) <= LONG_MAX);
+		if (err)
+			goto bail;
+		copylen += mend - mstart;
+		VERIFY(err, copylen >= 0);
+		if (err)
+			goto bail;
+	}
+	ctx->used = copylen;
+
+	/* allocate new buffer */
+	if (copylen) {
+		VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
+		if (err)
+			goto bail;
+	}
+	VERIFY(err, ctx->buf->virt != NULL);
+	if (err)
+		goto bail;
+	if (metalen <= copylen)
+		memset(ctx->buf->virt, 0, metalen);
+
+	/* copy metadata */
+	rpra = ctx->buf->virt;
+	ctx->rpra = rpra;
+	list = smq_invoke_buf_start(rpra, sc);
+	pages = smq_phy_page_start(sc, list);
+	ipage = pages;
+	args = (uintptr_t)ctx->buf->virt + metalen;
+	for (i = 0; i < bufs; ++i) {
+		size_t len = lpra[i].buf.len;
+		list[i].num = 0;
+		list[i].pgidx = 0;
+		if (!len)
+			continue;
+		list[i].num = 1;
+		list[i].pgidx = ipage - pages;
+		ipage++;
+	}
+	/* map ion buffers */
+	PERF(ctx->fl->profile, ctx->fl->perf.map,
+	for (i = 0; i < inbufs + outbufs; ++i) {
+		struct fastrpc_mmap *map = ctx->maps[i];
+		uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
+		size_t len = lpra[i].buf.len;
+		rpra[i].buf.pv = 0;
+		rpra[i].buf.len = len;
+		if (!len)
+			continue;
+		if (map) {
+			struct vm_area_struct *vma;
+			uintptr_t offset;
+			uint64_t num = buf_num_pages(buf, len);
+			int idx = list[i].pgidx;
+
+			if (map->attr & FASTRPC_ATTR_NOVA) {
+				offset = 0;
+			} else {
+				down_read(&current->mm->mmap_sem);
+				VERIFY(err, NULL != (vma = find_vma(current->mm,
+								map->va)));
+				if (err) {
+					up_read(&current->mm->mmap_sem);
+					goto bail;
+				}
+				offset = buf_page_start(buf) - vma->vm_start;
+				up_read(&current->mm->mmap_sem);
+				VERIFY(err, offset < (uintptr_t)map->size);
+				if (err)
+					goto bail;
+			}
+			pages[idx].addr = map->phys + offset;
+			pages[idx].size = num << PAGE_SHIFT;
+		}
+		rpra[i].buf.pv = buf;
+	}
+	PERF_END);
+
+	/* copy non ion buffers */
+	PERF(ctx->fl->profile, ctx->fl->perf.copy,
+	rlen = copylen - metalen;
+	for (oix = 0; rpra && oix < inbufs + outbufs; ++oix) {
+		int i = ctx->overps[oix]->raix;
+		struct fastrpc_mmap *map = ctx->maps[i];
+		size_t mlen;
+		uint64_t buf;
+		size_t len = lpra[i].buf.len;
+
+		if (!len)
+			continue;
+		if (map)
+			continue;
+		if (ctx->overps[oix]->offset == 0) {
+			rlen -= ALIGN(args, BALIGN) - args;
+			args = ALIGN(args, BALIGN);
+		}
+		mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
+		VERIFY(err, rlen >= mlen);
+		if (err)
+			goto bail;
+		rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
+		pages[list[i].pgidx].addr = ctx->buf->phys -
+					    ctx->overps[oix]->offset +
+					    (copylen - rlen);
+		pages[list[i].pgidx].addr =
+			buf_page_start(pages[list[i].pgidx].addr);
+		buf = rpra[i].buf.pv;
+		pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
+		if (i < inbufs) {
+			K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
+					lpra[i].buf.pv, len);
+			if (err)
+				goto bail;
+		}
+		args = args + mlen;
+		rlen -= mlen;
+	}
+	PERF_END);
+
+	PERF(ctx->fl->profile, ctx->fl->perf.flush,
+	for (oix = 0; oix < inbufs + outbufs; ++oix) {
+		int i = ctx->overps[oix]->raix;
+		struct fastrpc_mmap *map = ctx->maps[i];
+
+		if (map && map->uncached)
+			continue;
+		if (ctx->fl->sctx->smmu.coherent &&
+			!(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
+			continue;
+		if (map && (map->attr & FASTRPC_ATTR_COHERENT))
+			continue;
+
+		if (rpra && rpra[i].buf.len && ctx->overps[oix]->mstart) {
+			if (map && map->handle)
+				msm_ion_do_cache_op(ctx->fl->apps->client,
+					map->handle,
+					uint64_to_ptr(rpra[i].buf.pv),
+					rpra[i].buf.len,
+					ION_IOC_CLEAN_INV_CACHES);
+			else
+				dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
+					uint64_to_ptr(rpra[i].buf.pv
+						+ rpra[i].buf.len));
+		}
+	}
+	PERF_END);
+
+	inh = inbufs + outbufs;
+	for (i = 0; rpra && i < REMOTE_SCALARS_INHANDLES(sc); i++) {
+		rpra[inh + i].buf.pv = ptr_to_uint64(ctx->lpra[inh + i].buf.pv);
+		rpra[inh + i].buf.len = ctx->lpra[inh + i].buf.len;
+		rpra[inh + i].h = ctx->lpra[inh + i].h;
+	}
+
+ bail:
+	return err;
+}
+
+static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
+		    remote_arg_t *upra)
+{
+	uint32_t sc = ctx->sc;
+	remote_arg64_t *rpra = ctx->rpra;
+	int i, inbufs, outbufs, outh, size;
+	int err = 0;
+
+	inbufs = REMOTE_SCALARS_INBUFS(sc);
+	outbufs = REMOTE_SCALARS_OUTBUFS(sc);
+	for (i = inbufs; i < inbufs + outbufs; ++i) {
+		if (!ctx->maps[i]) {
+			K_COPY_TO_USER(err, kernel,
+				ctx->lpra[i].buf.pv,
+				uint64_to_ptr(rpra[i].buf.pv),
+				rpra[i].buf.len);
+			if (err)
+				goto bail;
+		} else {
+			fastrpc_mmap_free(ctx->maps[i]);
+			ctx->maps[i] = NULL;
+		}
+	}
+	size = sizeof(*rpra) * REMOTE_SCALARS_OUTHANDLES(sc);
+	if (size) {
+		outh = inbufs + outbufs + REMOTE_SCALARS_INHANDLES(sc);
+		K_COPY_TO_USER(err, kernel, &upra[outh], &rpra[outh], size);
+		if (err)
+			goto bail;
+	}
+ bail:
+	return err;
+}
+
+static void inv_args_pre(struct smq_invoke_ctx *ctx)
+{
+	int i, inbufs, outbufs;
+	uint32_t sc = ctx->sc;
+	remote_arg64_t *rpra = ctx->rpra;
+	uintptr_t end;
+
+	inbufs = REMOTE_SCALARS_INBUFS(sc);
+	outbufs = REMOTE_SCALARS_OUTBUFS(sc);
+	for (i = inbufs; i < inbufs + outbufs; ++i) {
+		struct fastrpc_mmap *map = ctx->maps[i];
+
+		if (map && map->uncached)
+			continue;
+		if (!rpra[i].buf.len)
+			continue;
+		if (ctx->fl->sctx->smmu.coherent &&
+			!(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
+			continue;
+		if (map && (map->attr & FASTRPC_ATTR_COHERENT))
+			continue;
+
+		if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
+				buf_page_start(rpra[i].buf.pv))
+			continue;
+		if (!IS_CACHE_ALIGNED((uintptr_t)
+				uint64_to_ptr(rpra[i].buf.pv))) {
+			if (map && map->handle)
+				msm_ion_do_cache_op(ctx->fl->apps->client,
+					map->handle,
+					uint64_to_ptr(rpra[i].buf.pv),
+					sizeof(uintptr_t),
+					ION_IOC_CLEAN_INV_CACHES);
+			else
+				dmac_flush_range(
+					uint64_to_ptr(rpra[i].buf.pv), (char *)
+					uint64_to_ptr(rpra[i].buf.pv + 1));
+		}
+
+		end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
+							rpra[i].buf.len);
+		if (!IS_CACHE_ALIGNED(end)) {
+			if (map && map->handle)
+				msm_ion_do_cache_op(ctx->fl->apps->client,
+						map->handle,
+						uint64_to_ptr(end),
+						sizeof(uintptr_t),
+						ION_IOC_CLEAN_INV_CACHES);
+			else
+				dmac_flush_range((char *)end,
+					(char *)end + 1);
+		}
+	}
+}
+
+static void inv_args(struct smq_invoke_ctx *ctx)
+{
+	int i, inbufs, outbufs;
+	uint32_t sc = ctx->sc;
+	remote_arg64_t *rpra = ctx->rpra;
+	int inv = 0;
+
+	inbufs = REMOTE_SCALARS_INBUFS(sc);
+	outbufs = REMOTE_SCALARS_OUTBUFS(sc);
+	for (i = inbufs; i < inbufs + outbufs; ++i) {
+		struct fastrpc_mmap *map = ctx->maps[i];
+
+		if (map && map->uncached)
+			continue;
+		if (!rpra[i].buf.len)
+			continue;
+		if (ctx->fl->sctx->smmu.coherent &&
+			!(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
+			continue;
+		if (map && (map->attr & FASTRPC_ATTR_COHERENT))
+			continue;
+
+		if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
+				buf_page_start(rpra[i].buf.pv)) {
+			inv = 1;
+			continue;
+		}
+		if (map && map->handle)
+			msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
+				(char *)uint64_to_ptr(rpra[i].buf.pv),
+				rpra[i].buf.len, ION_IOC_INV_CACHES);
+		else
+			dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
+				(char *)uint64_to_ptr(rpra[i].buf.pv
+						 + rpra[i].buf.len));
+	}
+
+}
+
+static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
+			       uint32_t kernel, uint32_t handle)
+{
+	struct smq_msg *msg = &ctx->msg;
+	struct fastrpc_file *fl = ctx->fl;
+	struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
+	int err = 0, len;
+
+	VERIFY(err, NULL != channel_ctx->chan);
+	if (err)
+		goto bail;
+	msg->pid = current->tgid;
+	msg->tid = current->pid;
+	if (kernel)
+		msg->pid = 0;
+	msg->invoke.header.ctx = ctx->ctxid | fl->pd;
+	msg->invoke.header.handle = handle;
+	msg->invoke.header.sc = ctx->sc;
+	msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
+	msg->invoke.page.size = buf_page_size(ctx->used);
+
+	if (fl->apps->glink) {
+		if (fl->ssrcount != channel_ctx->ssrcount) {
+			err = -ECONNRESET;
+			goto bail;
+		}
+		VERIFY(err, channel_ctx->link.port_state ==
+				FASTRPC_LINK_CONNECTED);
+		if (err)
+			goto bail;
+		err = glink_tx(channel_ctx->chan,
+			(void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
+			GLINK_TX_REQ_INTENT);
+	} else {
+		spin_lock(&fl->apps->hlock);
+		len = smd_write((smd_channel_t *)
+				channel_ctx->chan,
+				msg, sizeof(*msg));
+		spin_unlock(&fl->apps->hlock);
+		VERIFY(err, len == sizeof(*msg));
+	}
+ bail:
+	return err;
+}
+
+static void fastrpc_smd_read_handler(int cid)
+{
+	struct fastrpc_apps *me = &gfa;
+	struct smq_invoke_rsp rsp = {0};
+	int ret = 0, err = 0;
+	uint32_t index;
+
+	do {
+		ret = smd_read_from_cb(me->channel[cid].chan, &rsp,
+					sizeof(rsp));
+		if (ret != sizeof(rsp))
+			break;
+		index = (uint32_t)((rsp.ctx & FASTRPC_CTXID_MASK) >> 4);
+		VERIFY(err, index < FASTRPC_CTX_MAX);
+		if (err)
+			goto bail;
+
+		VERIFY(err, !IS_ERR_OR_NULL(me->ctxtable[index]));
+		if (err)
+			goto bail;
+
+		VERIFY(err, ((me->ctxtable[index]->ctxid == (rsp.ctx & ~1)) &&
+			me->ctxtable[index]->magic == FASTRPC_CTX_MAGIC));
+		if (err)
+			goto bail;
+
+		context_notify_user(me->ctxtable[index], rsp.retval);
+	} while (ret == sizeof(rsp));
+
+bail:
+	if (err)
+		pr_err("adsprpc: invalid response or context\n");
+}
+
+static void smd_event_handler(void *priv, unsigned event)
+{
+	struct fastrpc_apps *me = &gfa;
+	int cid = (int)(uintptr_t)priv;
+
+	switch (event) {
+	case SMD_EVENT_OPEN:
+		complete(&me->channel[cid].workport);
+		break;
+	case SMD_EVENT_CLOSE:
+		fastrpc_notify_drivers(me, cid);
+		break;
+	case SMD_EVENT_DATA:
+		fastrpc_smd_read_handler(cid);
+		break;
+	}
+}
+
+static void fastrpc_init(struct fastrpc_apps *me)
+{
+	int i;
+	INIT_HLIST_HEAD(&me->drivers);
+	INIT_HLIST_HEAD(&me->maps);
+	spin_lock_init(&me->hlock);
+	spin_lock_init(&me->ctxlock);
+	mutex_init(&me->smd_mutex);
+	me->channel = &gcinfo[0];
+	for (i = 0; i < NUM_CHANNELS; i++) {
+		init_completion(&me->channel[i].work);
+		init_completion(&me->channel[i].workport);
+		me->channel[i].sesscount = 0;
+	}
+}
+
+static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
+
+static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
+				   uint32_t kernel,
+				   struct fastrpc_ioctl_invoke_attrs *inv)
+{
+	struct smq_invoke_ctx *ctx = NULL;
+	struct fastrpc_ioctl_invoke *invoke = &inv->inv;
+	int cid = fl->cid;
+	int interrupted = 0;
+	int err = 0;
+	struct timespec invoket = {0};
+
+	VERIFY(err, fl->sctx);
+	if (err)
+		goto bail;
+	VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
+	if (err)
+		goto bail;
+	if (fl->profile)
+		getnstimeofday(&invoket);
+
+	if (!kernel) {
+		VERIFY(err, invoke->handle != FASTRPC_STATIC_HANDLE_KERNEL);
+		if (err) {
+			pr_err("adsprpc: ERROR: %s: user application %s trying to send a kernel RPC message to channel %d",
+				__func__, current->comm, cid);
+			goto bail;
+		}
+	}
+
+	VERIFY(err, fl->sctx != NULL);
+	if (err)
+		goto bail;
+	VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
+	if (err)
+		goto bail;
+	if (!kernel) {
+		VERIFY(err, 0 == context_restore_interrupted(fl, inv,
+								&ctx));
+		if (err)
+			goto bail;
+		if (fl->sctx->smmu.faults)
+			err = FASTRPC_ENOSUCH;
+		if (err)
+			goto bail;
+		if (ctx)
+			goto wait;
+	}
+
+	VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
+	if (err)
+		goto bail;
+
+	if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
+		PERF(fl->profile, fl->perf.getargs,
+		VERIFY(err, 0 == get_args(kernel, ctx));
+		PERF_END);
+		if (err)
+			goto bail;
+	}
+
+	PERF(fl->profile, fl->perf.invargs,
+	inv_args_pre(ctx);
+	if (mode == FASTRPC_MODE_SERIAL)
+		inv_args(ctx);
+	PERF_END);
+
+	PERF(fl->profile, fl->perf.link,
+	VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
+	PERF_END);
+
+	if (err)
+		goto bail;
+
+	PERF(fl->profile, fl->perf.invargs,
+	if (mode == FASTRPC_MODE_PARALLEL)
+		inv_args(ctx);
+	PERF_END);
+ wait:
+	if (kernel)
+		wait_for_completion(&ctx->work);
+	else {
+		interrupted = wait_for_completion_interruptible(&ctx->work);
+		VERIFY(err, 0 == (err = interrupted));
+		if (err)
+			goto bail;
+	}
+	VERIFY(err, 0 == (err = ctx->retval));
+	if (err)
+		goto bail;
+
+	PERF(fl->profile, fl->perf.putargs,
+	VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
+	PERF_END);
+	if (err)
+		goto bail;
+ bail:
+	if (ctx && interrupted == -ERESTARTSYS)
+		context_save_interrupted(ctx);
+	else if (ctx)
+		context_free(ctx);
+	if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
+		err = ECONNRESET;
+
+	if (fl->profile && !interrupted) {
+		if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER)
+			fl->perf.invoke += getnstimediff(&invoket);
+		if (!(invoke->handle >= 0 &&
+			invoke->handle <= FASTRPC_STATIC_HANDLE_MAX))
+			fl->perf.count++;
+	}
+	return err;
+}
+
+static int fastrpc_channel_open(struct fastrpc_file *fl);
+static int fastrpc_init_process(struct fastrpc_file *fl,
+				struct fastrpc_ioctl_init_attrs *uproc)
+{
+	int err = 0;
+	struct fastrpc_apps *me = &gfa;
+	struct fastrpc_ioctl_invoke_attrs ioctl;
+	struct fastrpc_ioctl_init *init = &uproc->init;
+	struct smq_phy_page pages[1];
+	struct fastrpc_mmap *file = NULL, *mem = NULL;
+	char *proc_name = NULL;
+	int srcVM[1] = {VMID_HLOS};
+	int destVM[1] = {gcinfo[0].heap_vmid};
+	int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
+	int hlosVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
+
+	VERIFY(err, 0 == (err = fastrpc_channel_open(fl)));
+	if (err)
+		goto bail;
+	if (init->flags == FASTRPC_INIT_ATTACH) {
+		remote_arg_t ra[1];
+		int tgid = current->tgid;
+		ra[0].buf.pv = (void *)&tgid;
+		ra[0].buf.len = sizeof(tgid);
+		ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
+		ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
+		ioctl.inv.pra = ra;
+		ioctl.fds = NULL;
+		ioctl.attrs = NULL;
+		fl->pd = 0;
+		VERIFY(err, !(err = fastrpc_internal_invoke(fl,
+			FASTRPC_MODE_PARALLEL, 1, &ioctl)));
+		if (err)
+			goto bail;
+	} else if (init->flags == FASTRPC_INIT_CREATE) {
+		remote_arg_t ra[6];
+		int fds[6];
+		int mflags = 0;
+		struct {
+			int pgid;
+			unsigned int namelen;
+			unsigned int filelen;
+			unsigned int pageslen;
+			int attrs;
+			int siglen;
+		} inbuf;
+		inbuf.pgid = current->tgid;
+		inbuf.namelen = strlen(current->comm) + 1;
+		inbuf.filelen = init->filelen;
+		fl->pd = 1;
+
+		if (!access_ok(0, (void const __user *)init->file,
+				init->filelen))
+			goto bail;
+		if (init->filelen) {
+			VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
+				init->file, init->filelen, mflags, &file));
+			if (err)
+				goto bail;
+		}
+		if (!access_ok(1, (void const __user *)init->mem,
+				init->memlen))
+			goto bail;
+		inbuf.pageslen = 1;
+		VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
+				init->mem, init->memlen, mflags, &mem));
+		if (err)
+			goto bail;
+		inbuf.pageslen = 1;
+		ra[0].buf.pv = (void *)&inbuf;
+		ra[0].buf.len = sizeof(inbuf);
+		fds[0] = 0;
+
+		ra[1].buf.pv = (void *)current->comm;
+		ra[1].buf.len = inbuf.namelen;
+		fds[1] = 0;
+
+		ra[2].buf.pv = (void *)init->file;
+		ra[2].buf.len = inbuf.filelen;
+		fds[2] = init->filefd;
+
+		pages[0].addr = mem->phys;
+		pages[0].size = mem->size;
+		ra[3].buf.pv = (void *)pages;
+		ra[3].buf.len = 1 * sizeof(*pages);
+		fds[3] = 0;
+
+		inbuf.attrs = uproc->attrs;
+		ra[4].buf.pv = (void *)&(inbuf.attrs);
+		ra[4].buf.len = sizeof(inbuf.attrs);
+		fds[4] = 0;
+
+		inbuf.siglen = uproc->siglen;
+		ra[5].buf.pv = (void *)&(inbuf.siglen);
+		ra[5].buf.len = sizeof(inbuf.siglen);
+		fds[5] = 0;
+
+		ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
+		ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
+		if (uproc->attrs)
+			ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
+		ioctl.inv.pra = ra;
+		ioctl.fds = fds;
+		ioctl.attrs = NULL;
+		VERIFY(err, !(err = fastrpc_internal_invoke(fl,
+			FASTRPC_MODE_PARALLEL, 1, &ioctl)));
+		if (err)
+			goto bail;
+	} else if (init->flags == FASTRPC_INIT_CREATE_STATIC) {
+		remote_arg_t ra[3];
+		uint64_t phys = 0;
+		size_t size = 0;
+		int fds[3];
+		struct {
+			int pgid;
+			unsigned int namelen;
+			unsigned int pageslen;
+		} inbuf;
+
+		if (!init->filelen)
+			goto bail;
+		VERIFY(err, proc_name = kzalloc(init->filelen, GFP_KERNEL));
+		if (err)
+			goto bail;
+		VERIFY(err, 0 == copy_from_user(proc_name,
+			(unsigned char *)init->file, init->filelen));
+		if (err)
+			goto bail;
+		inbuf.pgid = current->tgid;
+		inbuf.namelen = init->filelen;
+		inbuf.pageslen = 0;
+		if (!me->staticpd_flags) {
+			inbuf.pageslen = 1;
+			VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem,
+				 init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR,
+				 &mem));
+			if (err)
+				goto bail;
+			phys = mem->phys;
+			size = mem->size;
+			VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
+					srcVM, 1, destVM, destVMperm, 1));
+			if (err) {
+				pr_err("ADSPRPC: hyp_assign_phys fail err %d",
+							 err);
+				pr_err("map->phys %llx, map->size %d\n",
+							 phys, (int)size);
+				goto bail;
+			}
+			me->staticpd_flags = 1;
+		}
+
+		ra[0].buf.pv = (void *)&inbuf;
+		ra[0].buf.len = sizeof(inbuf);
+		fds[0] = 0;
+
+		ra[1].buf.pv = (void *)proc_name;
+		ra[1].buf.len = inbuf.namelen;
+		fds[1] = 0;
+
+		pages[0].addr = phys;
+		pages[0].size = size;
+
+		ra[2].buf.pv = (void *)pages;
+		ra[2].buf.len = sizeof(*pages);
+		fds[2] = 0;
+		ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
+
+		ioctl.inv.sc = REMOTE_SCALARS_MAKE(8, 3, 0);
+		ioctl.inv.pra = ra;
+		ioctl.fds = NULL;
+		ioctl.attrs = NULL;
+		VERIFY(err, !(err = fastrpc_internal_invoke(fl,
+			FASTRPC_MODE_PARALLEL, 1, &ioctl)));
+		if (err)
+			goto bail;
+	} else {
+		err = -ENOTTY;
+	}
+bail:
+	kfree(proc_name);
+	if (err && (init->flags == FASTRPC_INIT_CREATE_STATIC))
+		me->staticpd_flags = 0;
+	if (mem && err) {
+		if (mem->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
+			hyp_assign_phys(mem->phys, (uint64_t)mem->size,
+					destVM, 1, srcVM, hlosVMperm, 1);
+		fastrpc_mmap_free(mem);
+	}
+	if (file)
+		fastrpc_mmap_free(file);
+	return err;
+}
+
+static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
+{
+	int err = 0;
+	struct fastrpc_ioctl_invoke_attrs ioctl;
+	remote_arg_t ra[1];
+	int tgid = 0;
+
+	VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
+	if (err)
+		goto bail;
+	VERIFY(err, fl->apps->channel[fl->cid].chan != NULL);
+	if (err)
+		goto bail;
+	tgid = fl->tgid;
+	ra[0].buf.pv = (void *)&tgid;
+	ra[0].buf.len = sizeof(tgid);
+	ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
+	ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
+	ioctl.inv.pra = ra;
+	ioctl.fds = NULL;
+	ioctl.attrs = NULL;
+	VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
+		FASTRPC_MODE_PARALLEL, 1, &ioctl)));
+bail:
+	return err;
+}
+
+static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
+			       struct fastrpc_mmap *map)
+{
+	struct fastrpc_ioctl_invoke_attrs ioctl;
+	struct smq_phy_page page;
+	int num = 1;
+	remote_arg_t ra[3];
+	int err = 0;
+	struct {
+		int pid;
+		uint32_t flags;
+		uintptr_t vaddrin;
+		int num;
+	} inargs;
+
+	struct {
+		uintptr_t vaddrout;
+	} routargs;
+	inargs.pid = current->tgid;
+	inargs.vaddrin = (uintptr_t)map->va;
+	inargs.flags = flags;
+	inargs.num = fl->apps->compat ? num * sizeof(page) : num;
+	ra[0].buf.pv = (void *)&inargs;
+	ra[0].buf.len = sizeof(inargs);
+	page.addr = map->phys;
+	page.size = map->size;
+	ra[1].buf.pv = (void *)&page;
+	ra[1].buf.len = num * sizeof(page);
+
+	ra[2].buf.pv = (void *)&routargs;
+	ra[2].buf.len = sizeof(routargs);
+
+	ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
+	if (fl->apps->compat)
+		ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
+	else
+		ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
+	ioctl.inv.pra = ra;
+	ioctl.fds = NULL;
+	ioctl.attrs = NULL;
+	VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
+		FASTRPC_MODE_PARALLEL, 1, &ioctl)));
+	map->raddr = (uintptr_t)routargs.vaddrout;
+	if (err)
+		goto bail;
+	if (flags == ADSP_MMAP_HEAP_ADDR) {
+		struct scm_desc desc = {0};
+
+		desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
+		desc.args[1] = map->phys;
+		desc.args[2] = map->size;
+		desc.arginfo = SCM_ARGS(3);
+		err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
+			TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
+	} else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
+
+		int srcVM[1] = {VMID_HLOS};
+		int destVM[1] = {gcinfo[0].heap_vmid};
+		int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
+
+		VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
+				srcVM, 1, destVM, destVMperm, 1));
+		if (err)
+			goto bail;
+	}
+bail:
+	return err;
+}
+
+static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl,
+				 struct fastrpc_mmap *map)
+{
+	int err = 0;
+	int srcVM[1] = {gcinfo[0].heap_vmid};
+	int destVM[1] = {VMID_HLOS};
+	int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
+
+	if (map->flags == ADSP_MMAP_HEAP_ADDR) {
+		struct fastrpc_ioctl_invoke_attrs ioctl;
+		struct scm_desc desc = {0};
+		remote_arg_t ra[1];
+		int err = 0;
+		struct {
+			uint8_t skey;
+		} routargs;
+
+		ra[0].buf.pv = (void *)&routargs;
+		ra[0].buf.len = sizeof(routargs);
+
+		ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
+		ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 0, 1);
+		ioctl.inv.pra = ra;
+		ioctl.fds = NULL;
+		ioctl.attrs = NULL;
+		if (fl == NULL)
+			goto bail;
+
+		VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
+				FASTRPC_MODE_PARALLEL, 1, &ioctl)));
+		if (err)
+			goto bail;
+		desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
+		desc.args[1] = map->phys;
+		desc.args[2] = map->size;
+		desc.args[3] = routargs.skey;
+		desc.arginfo = SCM_ARGS(4);
+		err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
+			TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
+	} else if (map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
+		VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
+					srcVM, 1, destVM, destVMperm, 1));
+		if (err)
+			goto bail;
+	}
+
+bail:
+	return err;
+}
+
+static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
+				 struct fastrpc_mmap *map)
+{
+	struct fastrpc_ioctl_invoke_attrs ioctl;
+	remote_arg_t ra[1];
+	int err = 0;
+	struct {
+		int pid;
+		uintptr_t vaddrout;
+		size_t size;
+	} inargs;
+
+	inargs.pid = current->tgid;
+	inargs.size = map->size;
+	inargs.vaddrout = map->raddr;
+	ra[0].buf.pv = (void *)&inargs;
+	ra[0].buf.len = sizeof(inargs);
+
+	ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
+	if (fl->apps->compat)
+		ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
+	else
+		ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
+	ioctl.inv.pra = ra;
+	ioctl.fds = NULL;
+	ioctl.attrs = NULL;
+	VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
+		FASTRPC_MODE_PARALLEL, 1, &ioctl)));
+	if (err)
+		goto bail;
+	if (map->flags == ADSP_MMAP_HEAP_ADDR ||
+				map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
+		VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, map));
+		if (err)
+			goto bail;
+	}
+bail:
+	return err;
+}
+
+static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl)
+{
+	struct fastrpc_mmap *match = NULL, *map = NULL;
+	struct hlist_node *n = NULL;
+	int err = 0, ret = 0;
+	struct fastrpc_apps *me = &gfa;
+	struct ramdump_segment *ramdump_segments_rh = NULL;
+
+	do {
+		match = NULL;
+		spin_lock(&me->hlock);
+		hlist_for_each_entry_safe(map, n, &me->maps, hn) {
+			match = map;
+			hlist_del_init(&map->hn);
+			break;
+		}
+		spin_unlock(&me->hlock);
+
+		if (match) {
+			VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match));
+			if (err)
+				goto bail;
+			if (me->channel[0].ramdumpenabled) {
+				ramdump_segments_rh = kcalloc(1,
+				sizeof(struct ramdump_segment), GFP_KERNEL);
+				if (ramdump_segments_rh) {
+					ramdump_segments_rh->address =
+					match->phys;
+					ramdump_segments_rh->size = match->size;
+					ret = do_elf_ramdump(
+					 me->channel[0].remoteheap_ramdump_dev,
+					 ramdump_segments_rh, 1);
+					if (ret < 0)
+						pr_err("ADSPRPC: unable to dump heap");
+					kfree(ramdump_segments_rh);
+				}
+			}
+			fastrpc_mmap_free(match);
+		}
+	} while (match);
+bail:
+	if (err && match)
+		fastrpc_mmap_add(match);
+	return err;
+}
+
+static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
+			     size_t len, struct fastrpc_mmap **ppmap);
+
+static void fastrpc_mmap_add(struct fastrpc_mmap *map);
+
+static int fastrpc_internal_munmap(struct fastrpc_file *fl,
+				   struct fastrpc_ioctl_munmap *ud)
+{
+	int err = 0;
+	struct fastrpc_mmap *map = NULL;
+
+	mutex_lock(&fl->map_mutex);
+	VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
+	if (err)
+		goto bail;
+	VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
+	if (err)
+		goto bail;
+	fastrpc_mmap_free(map);
+bail:
+	if (err && map)
+		fastrpc_mmap_add(map);
+	mutex_unlock(&fl->map_mutex);
+	return err;
+}
+
+static int fastrpc_internal_mmap(struct fastrpc_file *fl,
+				 struct fastrpc_ioctl_mmap *ud)
+{
+
+	struct fastrpc_mmap *map = NULL;
+	int err = 0;
+
+	mutex_lock(&fl->map_mutex);
+	if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin, ud->size,
+			       ud->flags, &map)){
+		mutex_unlock(&fl->map_mutex);
+		return 0;
+	}
+	VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
+			(uintptr_t)ud->vaddrin, ud->size, ud->flags, &map));
+	if (err)
+		goto bail;
+	VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
+	if (err)
+		goto bail;
+	ud->vaddrout = map->raddr;
+ bail:
+	if (err && map)
+		fastrpc_mmap_free(map);
+	mutex_unlock(&fl->map_mutex);
+	return err;
+}
+
+static void fastrpc_channel_close(struct kref *kref)
+{
+	struct fastrpc_apps *me = &gfa;
+	struct fastrpc_channel_ctx *ctx;
+	int cid;
+
+	ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
+	cid = ctx - &gcinfo[0];
+	if (!me->glink)
+		smd_close(ctx->chan);
+	else
+		fastrpc_glink_close(ctx->chan, cid);
+
+	ctx->chan = NULL;
+	mutex_unlock(&me->smd_mutex);
+	pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
+						MAJOR(me->dev_no), cid);
+}
+
+static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
+
+static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
+			int secure, struct fastrpc_session_ctx **session)
+{
+	struct fastrpc_apps *me = &gfa;
+	int idx = 0, err = 0;
+
+	if (chan->sesscount) {
+		for (idx = 0; idx < chan->sesscount; ++idx) {
+			if (!chan->session[idx].used &&
+				chan->session[idx].smmu.secure == secure) {
+				chan->session[idx].used = 1;
+				break;
+			}
+		}
+		VERIFY(err, idx < chan->sesscount);
+		if (err)
+			goto bail;
+		chan->session[idx].smmu.faults = 0;
+	} else {
+		VERIFY(err, me->dev != NULL);
+		if (err)
+			goto bail;
+		chan->session[0].dev = me->dev;
+		chan->session[0].smmu.dev = me->dev;
+	}
+
+	*session = &chan->session[idx];
+ bail:
+	return err;
+}
+
+static bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv,
+						size_t size)
+{
+	if (glink_queue_rx_intent(h, NULL, size))
+		return false;
+	return true;
+}
+
+static void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
+		const void *pkt_priv, const void *ptr)
+{
+}
+
+static void fastrpc_glink_notify_rx(void *handle, const void *priv,
+	const void *pkt_priv, const void *ptr, size_t size)
+{
+	struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
+	struct fastrpc_apps *me = &gfa;
+	uint32_t index;
+	int err = 0;
+
+	VERIFY(err, (rsp && size >= sizeof(*rsp)));
+	if (err)
+		goto bail;
+
+	index = (uint32_t)((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
+	VERIFY(err, index < FASTRPC_CTX_MAX);
+	if (err)
+		goto bail;
+
+	VERIFY(err, !IS_ERR_OR_NULL(me->ctxtable[index]));
+	if (err)
+		goto bail;
+
+	VERIFY(err, ((me->ctxtable[index]->ctxid == (rsp->ctx & ~1)) &&
+		me->ctxtable[index]->magic == FASTRPC_CTX_MAGIC));
+	if (err)
+		goto bail;
+
+	context_notify_user(me->ctxtable[index], rsp->retval);
+bail:
+	if (err)
+		pr_err("adsprpc: invalid response or context\n");
+	glink_rx_done(handle, ptr, true);
+}
+
+static void fastrpc_glink_notify_state(void *handle, const void *priv,
+				unsigned int event)
+{
+	struct fastrpc_apps *me = &gfa;
+	int cid = (int)(uintptr_t)priv;
+	struct fastrpc_glink_info *link;
+
+	if (cid < 0 || cid >= NUM_CHANNELS)
+		return;
+	link = &me->channel[cid].link;
+	switch (event) {
+	case GLINK_CONNECTED:
+		link->port_state = FASTRPC_LINK_CONNECTED;
+		complete(&me->channel[cid].workport);
+		break;
+	case GLINK_LOCAL_DISCONNECTED:
+		link->port_state = FASTRPC_LINK_DISCONNECTED;
+		break;
+	case GLINK_REMOTE_DISCONNECTED:
+		if (me->channel[cid].chan) {
+			fastrpc_glink_close(me->channel[cid].chan, cid);
+			me->channel[cid].chan = 0;
+		}
+		break;
+	default:
+		break;
+	}
+}
+
+static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
+					struct fastrpc_session_ctx **session)
+{
+	int err = 0;
+	struct fastrpc_apps *me = &gfa;
+
+	mutex_lock(&me->smd_mutex);
+	if (!*session)
+		err = fastrpc_session_alloc_locked(chan, secure, session);
+	mutex_unlock(&me->smd_mutex);
+	return err;
+}
+
+static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
+				struct fastrpc_session_ctx *session)
+{
+	struct fastrpc_apps *me = &gfa;
+
+	mutex_lock(&me->smd_mutex);
+	session->used = 0;
+	mutex_unlock(&me->smd_mutex);
+}
+
+static int fastrpc_file_free(struct fastrpc_file *fl)
+{
+	struct hlist_node *n;
+	struct fastrpc_mmap *map = NULL;
+	int cid;
+
+	if (!fl)
+		return 0;
+	cid = fl->cid;
+
+	(void)fastrpc_release_current_dsp_process(fl);
+
+	spin_lock(&fl->apps->hlock);
+	hlist_del_init(&fl->hn);
+	spin_unlock(&fl->apps->hlock);
+
+	if (!fl->sctx) {
+		goto bail;
+	}
+
+	spin_lock(&fl->hlock);
+	fl->file_close = 1;
+	spin_unlock(&fl->hlock);
+	fastrpc_context_list_dtor(fl);
+	fastrpc_buf_list_free(fl);
+	hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
+		fastrpc_mmap_free(map);
+	}
+	if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
+		kref_put_mutex(&fl->apps->channel[cid].kref,
+				fastrpc_channel_close, &fl->apps->smd_mutex);
+	if (fl->sctx)
+		fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
+	if (fl->secsctx)
+		fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
+bail:
+	mutex_destroy(&fl->map_mutex);
+	kfree(fl);
+	return 0;
+}
+
+static int fastrpc_device_release(struct inode *inode, struct file *file)
+{
+	struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
+
+	if (fl) {
+		if (fl->debugfs_file != NULL)
+			debugfs_remove(fl->debugfs_file);
+
+		fastrpc_file_free(fl);
+		file->private_data = NULL;
+	}
+	return 0;
+}
+
+static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
+					 void *priv)
+{
+	struct fastrpc_apps *me = &gfa;
+	int cid = (int)((uintptr_t)priv);
+	struct fastrpc_glink_info *link;
+
+	if (cid < 0 || cid >= NUM_CHANNELS)
+		return;
+
+	link = &me->channel[cid].link;
+	switch (cb_info->link_state) {
+	case GLINK_LINK_STATE_UP:
+		link->link_state = FASTRPC_LINK_STATE_UP;
+		complete(&me->channel[cid].work);
+		break;
+	case GLINK_LINK_STATE_DOWN:
+		link->link_state = FASTRPC_LINK_STATE_DOWN;
+		break;
+	default:
+		pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
+		break;
+	}
+}
+
+static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
+{
+	int err = 0;
+	struct fastrpc_glink_info *link;
+
+	VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
+	if (err)
+		goto bail;
+
+	link = &me->channel[cid].link;
+	if (link->link_notify_handle != NULL)
+		goto bail;
+
+	link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
+	link->link_notify_handle = glink_register_link_state_cb(
+					&link->link_info,
+					(void *)((uintptr_t)cid));
+	VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
+	if (err) {
+		link->link_notify_handle = NULL;
+		goto bail;
+	}
+	VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
+			RPC_TIMEOUT));
+bail:
+	return err;
+}
+
+static void fastrpc_glink_close(void *chan, int cid)
+{
+	int err = 0;
+	struct fastrpc_glink_info *link;
+
+	VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
+	if (err)
+		return;
+	link = &gfa.channel[cid].link;
+
+	if (link->port_state == FASTRPC_LINK_CONNECTED) {
+		link->port_state = FASTRPC_LINK_DISCONNECTING;
+		glink_close(chan);
+	}
+}
+
+static int fastrpc_glink_open(int cid)
+{
+	int err = 0;
+	void *handle = NULL;
+	struct fastrpc_apps *me = &gfa;
+	struct glink_open_config *cfg;
+	struct fastrpc_glink_info *link;
+
+	VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
+	if (err)
+		goto bail;
+	link = &me->channel[cid].link;
+	cfg = &me->channel[cid].link.cfg;
+	VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
+	if (err)
+		goto bail;
+
+	VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
+	if (err)
+		goto bail;
+
+	link->port_state = FASTRPC_LINK_CONNECTING;
+	cfg->priv = (void *)(uintptr_t)cid;
+	cfg->edge = gcinfo[cid].link.link_info.edge;
+	cfg->transport = gcinfo[cid].link.link_info.transport;
+	cfg->name = FASTRPC_GLINK_GUID;
+	cfg->notify_rx = fastrpc_glink_notify_rx;
+	cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
+	cfg->notify_state = fastrpc_glink_notify_state;
+	cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
+	handle = glink_open(cfg);
+	VERIFY(err, !IS_ERR_OR_NULL(handle));
+	if (err)
+		goto bail;
+	me->channel[cid].chan = handle;
+bail:
+	return err;
+}
+
+static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
+{
+	filp->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
+					 size_t count, loff_t *position)
+{
+	struct fastrpc_file *fl = filp->private_data;
+	struct hlist_node *n;
+	struct fastrpc_buf *buf = NULL;
+	struct fastrpc_mmap *map = NULL;
+	struct smq_invoke_ctx *ictx = NULL;
+	struct fastrpc_channel_ctx *chan;
+	struct fastrpc_session_ctx *sess;
+	unsigned int len = 0;
+	int i, j, ret = 0;
+	char *fileinfo = NULL;
+
+	fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
+	if (!fileinfo)
+		goto bail;
+	if (fl == NULL) {
+		for (i = 0; i < NUM_CHANNELS; i++) {
+			chan = &gcinfo[i];
+			len += scnprintf(fileinfo + len,
+					DEBUGFS_SIZE - len, "%s\n\n",
+					chan->name);
+			len += scnprintf(fileinfo + len,
+					DEBUGFS_SIZE - len, "%s %d\n",
+					"sesscount:", chan->sesscount);
+			for (j = 0; j < chan->sesscount; j++) {
+				sess = &chan->session[j];
+				len += scnprintf(fileinfo + len,
+						DEBUGFS_SIZE - len,
+						"%s%d\n\n", "SESSION", j);
+				len += scnprintf(fileinfo + len,
+						DEBUGFS_SIZE - len,
+						"%s %d\n", "sid:",
+						sess->smmu.cb);
+				len += scnprintf(fileinfo + len,
+						DEBUGFS_SIZE - len,
+						"%s %d\n", "SECURE:",
+						sess->smmu.secure);
+			}
+		}
+	} else {
+		len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
+				"%s %d\n\n",
+				"PROCESS_ID:", fl->tgid);
+		len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
+				"%s %d\n\n",
+				"CHANNEL_ID:", fl->cid);
+		len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
+				"%s %d\n\n",
+				"SSRCOUNT:", fl->ssrcount);
+		len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
+				"%s\n",
+				"LIST OF BUFS:");
+		spin_lock(&fl->hlock);
+		hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
+			len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
+					"%s %pK %s %pK %s %llx\n", "buf:",
+					buf, "buf->virt:", buf->virt,
+					"buf->phys:", buf->phys);
+		}
+		len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
+					"\n%s\n",
+					"LIST OF MAPS:");
+		hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
+			len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
+						"%s %pK %s %lx %s %llx\n",
+						"map:", map,
+						"map->va:", map->va,
+						"map->phys:", map->phys);
+		}
+		len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
+					"\n%s\n",
+					"LIST OF PENDING SMQCONTEXTS:");
+		hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
+			len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
+						"%s %pK %s %u %s %u %s %u\n",
+						"smqcontext:", ictx,
+						"sc:", ictx->sc,
+						"tid:", ictx->pid,
+						"handle", ictx->rpra->h);
+		}
+		len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
+					"\n%s\n",
+					"LIST OF INTERRUPTED SMQCONTEXTS:");
+		hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
+		len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
+					"%s %pK %s %u %s %u %s %u\n",
+					"smqcontext:", ictx,
+					"sc:", ictx->sc,
+					"tid:", ictx->pid,
+					"handle", ictx->rpra->h);
+		}
+		spin_unlock(&fl->hlock);
+	}
+	if (len > DEBUGFS_SIZE)
+		len = DEBUGFS_SIZE;
+	ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
+	kfree(fileinfo);
+bail:
+	return ret;
+}
+
+static const struct file_operations debugfs_fops = {
+	.open = fastrpc_debugfs_open,
+	.read = fastrpc_debugfs_read,
+};
+static int fastrpc_channel_open(struct fastrpc_file *fl)
+{
+	struct fastrpc_apps *me = &gfa;
+	int cid, err = 0;
+
+	mutex_lock(&me->smd_mutex);
+
+	VERIFY(err, fl && fl->sctx);
+	if (err)
+		goto bail;
+	cid = fl->cid;
+	VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
+	if (err)
+		goto bail;
+	if (me->channel[cid].ssrcount !=
+				 me->channel[cid].prevssrcount) {
+		if (!me->channel[cid].issubsystemup) {
+			VERIFY(err, 0);
+			if (err)
+				goto bail;
+		}
+	}
+	fl->ssrcount = me->channel[cid].ssrcount;
+	if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
+	    (me->channel[cid].chan == NULL)) {
+		if (me->glink) {
+			VERIFY(err, 0 == fastrpc_glink_register(cid, me));
+			if (err)
+				goto bail;
+			VERIFY(err, 0 == fastrpc_glink_open(cid));
+		} else {
+			VERIFY(err, !smd_named_open_on_edge(FASTRPC_SMD_GUID,
+				    gcinfo[cid].channel,
+				    (smd_channel_t **)&me->channel[cid].chan,
+				    (void *)(uintptr_t)cid,
+				    smd_event_handler));
+		}
+		if (err)
+			goto bail;
+
+		VERIFY(err,
+			wait_for_completion_timeout(&me->channel[cid].workport,
+							RPC_TIMEOUT));
+		if (err) {
+			me->channel[cid].chan = NULL;
+			goto bail;
+		}
+		kref_init(&me->channel[cid].kref);
+		pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
+						MAJOR(me->dev_no), cid);
+
+		if (me->glink) {
+			err = glink_queue_rx_intent(me->channel[cid].chan,
+							NULL, 16);
+			err |= glink_queue_rx_intent(me->channel[cid].chan,
+							 NULL, 64);
+			if (err)
+				pr_warn("adsprpc: intent fail for %d err %d\n",
+						cid, err);
+		}
+		if (cid == 0 && me->channel[cid].ssrcount !=
+				 me->channel[cid].prevssrcount) {
+			if (fastrpc_mmap_remove_ssr(fl))
+				pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
+			me->channel[cid].prevssrcount =
+						me->channel[cid].ssrcount;
+		}
+	}
+
+bail:
+	mutex_unlock(&me->smd_mutex);
+	return err;
+}
+
+static int fastrpc_device_open(struct inode *inode, struct file *filp)
+{
+	int err = 0;
+	struct dentry *debugfs_file;
+	struct fastrpc_file *fl = NULL;
+	struct fastrpc_apps *me = &gfa;
+
+	VERIFY(err, NULL != (fl = kzalloc(sizeof(*fl), GFP_KERNEL)));
+	if (err)
+		return err;
+	debugfs_file = debugfs_create_file(current->comm, 0644, debugfs_root,
+						fl, &debugfs_fops);
+	context_list_ctor(&fl->clst);
+	spin_lock_init(&fl->hlock);
+	INIT_HLIST_HEAD(&fl->maps);
+	INIT_HLIST_HEAD(&fl->bufs);
+	INIT_HLIST_NODE(&fl->hn);
+	fl->tgid = current->tgid;
+	fl->apps = me;
+	fl->mode = FASTRPC_MODE_SERIAL;
+	fl->cid = -1;
+	if (debugfs_file != NULL)
+		fl->debugfs_file = debugfs_file;
+	memset(&fl->perf, 0, sizeof(fl->perf));
+	filp->private_data = fl;
+	mutex_init(&fl->map_mutex);
+	spin_lock(&me->hlock);
+	hlist_add_head(&fl->hn, &me->drivers);
+	spin_unlock(&me->hlock);
+	return 0;
+}
+
+static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
+{
+	int err = 0;
+	uint32_t cid;
+
+	VERIFY(err, fl != NULL);
+	if (err)
+		goto bail;
+	if (fl->cid == -1) {
+		cid = *info;
+		VERIFY(err, cid < NUM_CHANNELS);
+		if (err)
+			goto bail;
+		fl->cid = cid;
+		fl->ssrcount = fl->apps->channel[cid].ssrcount;
+		VERIFY(err, !fastrpc_session_alloc_locked(
+				&fl->apps->channel[cid], 0, &fl->sctx));
+		if (err)
+			goto bail;
+	}
+	if (fl->sctx)
+		*info = (fl->sctx->smmu.enabled ? 1 : 0);
+bail:
+	return err;
+}
+
+static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
+				 unsigned long ioctl_param)
+{
+	union {
+		struct fastrpc_ioctl_invoke_attrs inv;
+		struct fastrpc_ioctl_mmap mmap;
+		struct fastrpc_ioctl_munmap munmap;
+		struct fastrpc_ioctl_init_attrs init;
+		struct fastrpc_ioctl_perf perf;
+	} p;
+	void *param = (char *)ioctl_param;
+	struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
+	int size = 0, err = 0;
+	uint32_t info;
+
+	p.inv.fds = NULL;
+	p.inv.attrs = NULL;
+	spin_lock(&fl->hlock);
+	if (fl->file_close == 1) {
+		err = EBADF;
+		pr_warn("ADSPRPC: fastrpc_device_release is happening, So not sending any new requests to DSP");
+		spin_unlock(&fl->hlock);
+		goto bail;
+	}
+	spin_unlock(&fl->hlock);
+
+	switch (ioctl_num) {
+	case FASTRPC_IOCTL_INVOKE:
+		size = sizeof(struct fastrpc_ioctl_invoke);
+	case FASTRPC_IOCTL_INVOKE_FD:
+		if (!size)
+			size = sizeof(struct fastrpc_ioctl_invoke_fd);
+		/* fall through */
+	case FASTRPC_IOCTL_INVOKE_ATTRS:
+		if (!size)
+			size = sizeof(struct fastrpc_ioctl_invoke_attrs);
+		K_COPY_FROM_USER(err, 0, &p.inv, param, size);
+		if (err)
+			goto bail;
+		VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
+						0, &p.inv)));
+		if (err)
+			goto bail;
+		break;
+	case FASTRPC_IOCTL_MMAP:
+		K_COPY_FROM_USER(err, 0, &p.mmap, param,
+						sizeof(p.mmap));
+		if (err)
+			goto bail;
+		VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
+		if (err)
+			goto bail;
+		K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
+		if (err)
+			goto bail;
+		break;
+	case FASTRPC_IOCTL_MUNMAP:
+		K_COPY_FROM_USER(err, 0, &p.munmap, param,
+						sizeof(p.munmap));
+		if (err)
+			goto bail;
+		VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
+							&p.munmap)));
+		if (err)
+			goto bail;
+		break;
+	case FASTRPC_IOCTL_SETMODE:
+		switch ((uint32_t)ioctl_param) {
+		case FASTRPC_MODE_PARALLEL:
+		case FASTRPC_MODE_SERIAL:
+			fl->mode = (uint32_t)ioctl_param;
+			break;
+		case FASTRPC_MODE_PROFILE:
+			fl->profile = (uint32_t)ioctl_param;
+			break;
+		default:
+			err = -ENOTTY;
+			break;
+		}
+		break;
+	case FASTRPC_IOCTL_GETPERF:
+		K_COPY_FROM_USER(err, 0, &p.perf,
+					param, sizeof(p.perf));
+		if (err)
+			goto bail;
+		p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
+		if (p.perf.keys) {
+			char *keys = PERF_KEYS;
+
+			K_COPY_TO_USER(err, 0, (void *)p.perf.keys,
+						 keys, strlen(keys)+1);
+			if (err)
+				goto bail;
+		}
+		if (p.perf.data) {
+			K_COPY_TO_USER(err, 0, (void *)p.perf.data,
+						 &fl->perf, sizeof(fl->perf));
+		}
+		K_COPY_TO_USER(err, 0, param, &p.perf, sizeof(p.perf));
+		if (err)
+			goto bail;
+		break;
+	case FASTRPC_IOCTL_GETINFO:
+	    K_COPY_FROM_USER(err, 0, &info, param, sizeof(info));
+		if (err)
+			goto bail;
+		VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
+		if (err)
+			goto bail;
+		K_COPY_TO_USER(err, 0, param, &info, sizeof(info));
+		if (err)
+			goto bail;
+		break;
+	case FASTRPC_IOCTL_INIT:
+		p.init.attrs = 0;
+		p.init.siglen = 0;
+		size = sizeof(struct fastrpc_ioctl_init);
+		/* fall through */
+	case FASTRPC_IOCTL_INIT_ATTRS:
+		if (!size)
+			size = sizeof(struct fastrpc_ioctl_init_attrs);
+		K_COPY_FROM_USER(err, 0, &p.init, param, size);
+		if (err)
+			goto bail;
+		VERIFY(err, p.init.init.filelen >= 0 &&
+			p.init.init.memlen >= 0);
+		if (err)
+			goto bail;
+		VERIFY(err, 0 == fastrpc_init_process(fl, &p.init));
+		if (err)
+			goto bail;
+		break;
+
+	default:
+		err = -ENOTTY;
+		pr_info("bad ioctl: %d\n", ioctl_num);
+		break;
+	}
+ bail:
+	return err;
+}
+
+static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
+					unsigned long code,
+					void *data)
+{
+	struct fastrpc_apps *me = &gfa;
+	struct fastrpc_channel_ctx *ctx;
+	struct notif_data *notifdata = data;
+	int cid;
+
+	ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
+	cid = ctx - &me->channel[0];
+	if (code == SUBSYS_BEFORE_SHUTDOWN) {
+		mutex_lock(&me->smd_mutex);
+		ctx->ssrcount++;
+		ctx->issubsystemup = 0;
+		if (ctx->chan) {
+			if (me->glink)
+				fastrpc_glink_close(ctx->chan, cid);
+			else
+				smd_close(ctx->chan);
+
+			ctx->chan = 0;
+			pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
+				 gcinfo[cid].name, MAJOR(me->dev_no), cid);
+		}
+		mutex_unlock(&me->smd_mutex);
+		if (cid == 0)
+			me->staticpd_flags = 0;
+		fastrpc_notify_drivers(me, cid);
+	} else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
+		if (me->channel[0].remoteheap_ramdump_dev &&
+				notifdata->enable_ramdump) {
+			me->channel[0].ramdumpenabled = 1;
+		}
+	} else if (code == SUBSYS_AFTER_POWERUP) {
+		ctx->issubsystemup = 1;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static int fastrpc_smmu_fault_handler(struct iommu_domain *domain,
+	struct device *dev, unsigned long iova, int flags, void *token)
+{
+	struct fastrpc_session_ctx *sess = (struct fastrpc_session_ctx *)token;
+	int err = 0;
+
+	VERIFY(err, sess != NULL);
+	if (err)
+		return err;
+	sess->smmu.faults++;
+	dev_err(dev, "ADSPRPC context fault: iova=0x%08lx, cb = %d, faults=%d",
+					iova, sess->smmu.cb, sess->smmu.faults);
+	return 0;
+}
+
+static const struct file_operations fops = {
+	.open = fastrpc_device_open,
+	.release = fastrpc_device_release,
+	.unlocked_ioctl = fastrpc_device_ioctl,
+	.compat_ioctl = compat_fastrpc_device_ioctl,
+};
+
+static struct of_device_id fastrpc_match_table[] = {
+	{ .compatible = "qcom,msm-fastrpc-adsp", },
+	{ .compatible = "qcom,msm-fastrpc-compute-cb", },
+	{ .compatible = "qcom,msm-fastrpc-legacy-compute-cb", },
+	{ .compatible = "qcom,msm-adsprpc-mem-region", },
+	{}
+};
+
+static int fastrpc_cb_probe(struct device *dev)
+{
+	struct fastrpc_channel_ctx *chan;
+	struct fastrpc_session_ctx *sess;
+	struct of_phandle_args iommuspec;
+	const char *name;
+	unsigned int start = 0x80000000;
+	int err = 0, i;
+	int secure_vmid = VMID_CP_PIXEL;
+
+	VERIFY(err, NULL != (name = of_get_property(dev->of_node,
+					 "label", NULL)));
+	if (err)
+		goto bail;
+	for (i = 0; i < NUM_CHANNELS; i++) {
+		if (!gcinfo[i].name)
+			continue;
+		if (!strcmp(name, gcinfo[i].name))
+			break;
+	}
+	VERIFY(err, i < NUM_CHANNELS);
+	if (err)
+		goto bail;
+	chan = &gcinfo[i];
+	VERIFY(err, chan->sesscount < NUM_SESSIONS);
+	if (err)
+		goto bail;
+
+	VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
+						"#iommu-cells", 0, &iommuspec));
+	if (err)
+		goto bail;
+	sess = &chan->session[chan->sesscount];
+	sess->smmu.cb = iommuspec.args[0];
+	sess->used = 0;
+	sess->smmu.coherent = of_property_read_bool(dev->of_node,
+						"dma-coherent");
+	sess->smmu.secure = of_property_read_bool(dev->of_node,
+						"qcom,secure-context-bank");
+	if (sess->smmu.secure)
+		start = 0x60000000;
+	VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
+				arm_iommu_create_mapping(&platform_bus_type,
+						start, 0x70000000)));
+	if (err)
+		goto bail;
+	iommu_set_fault_handler(sess->smmu.mapping->domain,
+				fastrpc_smmu_fault_handler, sess);
+	if (sess->smmu.secure)
+		iommu_domain_set_attr(sess->smmu.mapping->domain,
+				DOMAIN_ATTR_SECURE_VMID,
+				&secure_vmid);
+
+	VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
+	if (err)
+		goto bail;
+	sess->smmu.dev = dev;
+	sess->smmu.enabled = 1;
+	chan->sesscount++;
+	debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
+							NULL, &debugfs_fops);
+
+bail:
+	return err;
+}
+
+static int fastrpc_cb_legacy_probe(struct device *dev)
+{
+	struct device_node *domains_child_node = NULL;
+	struct device_node *ctx_node = NULL;
+	struct fastrpc_channel_ctx *chan;
+	struct fastrpc_session_ctx *first_sess, *sess;
+	const char *name;
+	unsigned int *range = NULL, range_size = 0;
+	unsigned int *sids = NULL, sids_size = 0;
+	int err = 0, ret = 0, i;
+
+	VERIFY(err, 0 != (domains_child_node = of_get_child_by_name(
+			dev->of_node,
+			"qcom,msm_fastrpc_compute_cb")));
+	if (err)
+		goto bail;
+	VERIFY(err, 0 != (ctx_node = of_parse_phandle(
+			domains_child_node,
+			"qcom,adsp-shared-phandle", 0)));
+	if (err)
+		goto bail;
+	VERIFY(err, 0 != of_get_property(domains_child_node,
+				"qcom,adsp-shared-sids", &sids_size));
+	if (err)
+		goto bail;
+	VERIFY(err, sids = kzalloc(sids_size, GFP_KERNEL));
+	if (err)
+		goto bail;
+	ret = of_property_read_u32_array(domains_child_node,
+					"qcom,adsp-shared-sids",
+					sids,
+					sids_size/sizeof(unsigned int));
+	if (ret)
+		goto bail;
+	VERIFY(err, 0 != (name = of_get_property(ctx_node, "label", NULL)));
+	if (err)
+		goto bail;
+	VERIFY(err, 0 != of_get_property(domains_child_node,
+					"qcom,virtual-addr-pool", &range_size));
+	if (err)
+		goto bail;
+	VERIFY(err, range = kzalloc(range_size, GFP_KERNEL));
+	if (err)
+		goto bail;
+	ret = of_property_read_u32_array(domains_child_node,
+					"qcom,virtual-addr-pool",
+					range,
+					range_size/sizeof(unsigned int));
+	if (ret)
+		goto bail;
+
+	chan = &gcinfo[0];
+	VERIFY(err, chan->sesscount < NUM_SESSIONS);
+	if (err)
+		goto bail;
+	first_sess = &chan->session[chan->sesscount];
+	first_sess->smmu.dev = msm_iommu_get_ctx(name);
+	VERIFY(err, !IS_ERR_OR_NULL(first_sess->smmu.mapping =
+			arm_iommu_create_mapping(
+				msm_iommu_get_bus(first_sess->smmu.dev),
+				range[0], range[1])));
+	if (err)
+		goto bail;
+	VERIFY(err, !arm_iommu_attach_device(first_sess->dev,
+					first_sess->smmu.mapping));
+	if (err)
+		goto bail;
+	for (i = 0; i < sids_size/sizeof(unsigned int); i++) {
+		VERIFY(err, chan->sesscount < NUM_SESSIONS);
+		if (err)
+			goto bail;
+		sess = &chan->session[chan->sesscount];
+		sess->smmu.cb = sids[i];
+		sess->smmu.dev = first_sess->smmu.dev;
+		sess->smmu.enabled = 1;
+		sess->smmu.mapping = first_sess->smmu.mapping;
+		chan->sesscount++;
+	}
+bail:
+	kfree(sids);
+	kfree(range);
+	return err;
+}
+
+static int fastrpc_probe(struct platform_device *pdev)
+{
+	int err = 0;
+	struct fastrpc_apps *me = &gfa;
+	struct device *dev = &pdev->dev;
+
+	if (of_device_is_compatible(dev->of_node,
+					"qcom,msm-fastrpc-compute-cb"))
+		return fastrpc_cb_probe(dev);
+
+	if (of_device_is_compatible(dev->of_node,
+					"qcom,msm-fastrpc-legacy-compute-cb"))
+		return fastrpc_cb_legacy_probe(dev);
+
+	if (of_device_is_compatible(dev->of_node,
+					"qcom,msm-adsprpc-mem-region")) {
+		me->dev = dev;
+		me->channel[0].remoteheap_ramdump_dev =
+				create_ramdump_device("adsp_rh", dev);
+		if (IS_ERR_OR_NULL(me->channel[0].remoteheap_ramdump_dev)) {
+			pr_err("ADSPRPC: Unable to create adsp-remoteheap ramdump device.\n");
+			me->channel[0].remoteheap_ramdump_dev = NULL;
+		}
+		return 0;
+	}
+	if (of_property_read_bool(dev->of_node,
+					"qcom,fastrpc-vmid-heap-shared"))
+		gcinfo[0].heap_vmid = AC_VM_ADSP_HEAP_SHARED;
+	else
+		gcinfo[0].heap_vmid = VMID_ADSP_Q6;
+	pr_info("ADSPRPC: gcinfo[0].heap_vmid %d\n", gcinfo[0].heap_vmid);
+	me->glink = of_property_read_bool(dev->of_node, "qcom,fastrpc-glink");
+	VERIFY(err, !of_platform_populate(pdev->dev.of_node,
+					  fastrpc_match_table,
+					  NULL, &pdev->dev));
+	if (err)
+		goto bail;
+bail:
+	return err;
+}
+
+static void fastrpc_deinit(void)
+{
+	struct fastrpc_apps *me = &gfa;
+	struct fastrpc_channel_ctx *chan = gcinfo;
+	int i, j;
+
+	for (i = 0; i < NUM_CHANNELS; i++, chan++) {
+		if (chan->chan) {
+			kref_put_mutex(&chan->kref,
+				fastrpc_channel_close, &me->smd_mutex);
+			chan->chan = NULL;
+		}
+		for (j = 0; j < NUM_SESSIONS; j++) {
+			struct fastrpc_session_ctx *sess = &chan->session[j];
+			if (sess->smmu.dev) {
+				arm_iommu_detach_device(sess->smmu.dev);
+				sess->smmu.dev = NULL;
+			}
+			if (sess->smmu.mapping) {
+				arm_iommu_release_mapping(sess->smmu.mapping);
+				sess->smmu.mapping = NULL;
+			}
+		}
+	}
+}
+
+static struct platform_driver fastrpc_driver = {
+	.probe = fastrpc_probe,
+	.driver = {
+		.name = "fastrpc",
+		.owner = THIS_MODULE,
+		.of_match_table = fastrpc_match_table,
+	},
+};
+
+static int __init fastrpc_device_init(void)
+{
+	struct fastrpc_apps *me = &gfa;
+	struct device *dev = NULL;
+	int err = 0, i;
+
+	memset(me, 0, sizeof(*me));
+
+	fastrpc_init(me);
+	me->dev = NULL;
+	VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
+	if (err)
+		goto register_bail;
+	VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
+					DEVICE_NAME));
+	if (err)
+		goto alloc_chrdev_bail;
+	cdev_init(&me->cdev, &fops);
+	me->cdev.owner = THIS_MODULE;
+	VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
+				1));
+	if (err)
+		goto cdev_init_bail;
+	me->class = class_create(THIS_MODULE, "fastrpc");
+	VERIFY(err, !IS_ERR(me->class));
+	if (err)
+		goto class_create_bail;
+	me->compat = (NULL == fops.compat_ioctl) ? 0 : 1;
+	dev = device_create(me->class, NULL,
+				MKDEV(MAJOR(me->dev_no), 0),
+				NULL, gcinfo[0].name);
+	VERIFY(err, !IS_ERR_OR_NULL(dev));
+	if (err)
+		goto device_create_bail;
+	for (i = 0; i < NUM_CHANNELS; i++) {
+		me->channel[i].dev = dev;
+		me->channel[i].ssrcount = 0;
+		me->channel[i].prevssrcount = 0;
+		me->channel[i].issubsystemup = 1;
+		me->channel[i].ramdumpenabled = 0;
+		me->channel[i].remoteheap_ramdump_dev = NULL;
+		me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
+		me->channel[i].handle = subsys_notif_register_notifier(
+							gcinfo[i].subsys,
+							&me->channel[i].nb);
+	}
+
+	me->client = msm_ion_client_create(DEVICE_NAME);
+	VERIFY(err, !IS_ERR_OR_NULL(me->client));
+	if (err)
+		goto device_create_bail;
+	debugfs_root = debugfs_create_dir("adsprpc", NULL);
+	return 0;
+device_create_bail:
+	for (i = 0; i < NUM_CHANNELS; i++) {
+		if (me->channel[i].handle)
+			subsys_notif_unregister_notifier(me->channel[i].handle,
+							&me->channel[i].nb);
+	}
+	if (!IS_ERR_OR_NULL(dev))
+		device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
+	class_destroy(me->class);
+class_create_bail:
+	cdev_del(&me->cdev);
+cdev_init_bail:
+	unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
+alloc_chrdev_bail:
+register_bail:
+	fastrpc_deinit();
+	return err;
+}
+
+static void __exit fastrpc_device_exit(void)
+{
+	struct fastrpc_apps *me = &gfa;
+	int i;
+
+	fastrpc_file_list_dtor(me);
+	fastrpc_deinit();
+	for (i = 0; i < NUM_CHANNELS; i++) {
+		if (!gcinfo[i].name)
+			continue;
+		device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
+		subsys_notif_unregister_notifier(me->channel[i].handle,
+						&me->channel[i].nb);
+	}
+	class_destroy(me->class);
+	cdev_del(&me->cdev);
+	unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
+	ion_client_destroy(me->client);
+	debugfs_remove_recursive(debugfs_root);
+}
+
+late_initcall(fastrpc_device_init);
+module_exit(fastrpc_device_exit);
+
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/char/adsprpc_compat.c	2019-01-22 16:16:22.943241335 +0100
@@ -0,0 +1,436 @@
+/*
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/compat.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/msm_ion.h>
+
+#include "adsprpc_compat.h"
+#include "adsprpc_shared.h"
+
+#define COMPAT_FASTRPC_IOCTL_INVOKE \
+		_IOWR('R', 1, struct compat_fastrpc_ioctl_invoke)
+#define COMPAT_FASTRPC_IOCTL_MMAP \
+		_IOWR('R', 2, struct compat_fastrpc_ioctl_mmap)
+#define COMPAT_FASTRPC_IOCTL_MUNMAP \
+		_IOWR('R', 3, struct compat_fastrpc_ioctl_munmap)
+#define COMPAT_FASTRPC_IOCTL_INVOKE_FD \
+		_IOWR('R', 4, struct compat_fastrpc_ioctl_invoke_fd)
+#define COMPAT_FASTRPC_IOCTL_INIT \
+		_IOWR('R', 6, struct compat_fastrpc_ioctl_init)
+#define COMPAT_FASTRPC_IOCTL_INVOKE_ATTRS \
+		_IOWR('R', 7, struct compat_fastrpc_ioctl_invoke_attrs)
+#define COMPAT_FASTRPC_IOCTL_GETPERF \
+		_IOWR('R', 9, struct compat_fastrpc_ioctl_perf)
+#define COMPAT_FASTRPC_IOCTL_INIT_ATTRS \
+		_IOWR('R', 10, struct compat_fastrpc_ioctl_init_attrs)
+
+struct compat_remote_buf {
+	compat_uptr_t pv;	/* buffer pointer */
+	compat_size_t len;	/* length of buffer */
+};
+
+union compat_remote_arg {
+	struct compat_remote_buf buf;
+	compat_uint_t h;
+};
+
+struct compat_fastrpc_ioctl_invoke {
+	compat_uint_t handle;	/* remote handle */
+	compat_uint_t sc;	/* scalars describing the data */
+	compat_uptr_t pra;	/* remote arguments list */
+};
+
+struct compat_fastrpc_ioctl_invoke_fd {
+	struct compat_fastrpc_ioctl_invoke inv;
+	compat_uptr_t fds;	/* fd list */
+};
+
+struct compat_fastrpc_ioctl_invoke_attrs {
+	struct compat_fastrpc_ioctl_invoke inv;
+	compat_uptr_t fds;	/* fd list */
+	compat_uptr_t attrs;	/* attribute list */
+};
+
+struct compat_fastrpc_ioctl_mmap {
+	compat_int_t fd;	/* ion fd */
+	compat_uint_t flags;	/* flags for dsp to map with */
+	compat_uptr_t vaddrin;	/* optional virtual address */
+	compat_size_t size;	/* size */
+	compat_uptr_t vaddrout;	/* dsps virtual address */
+};
+
+struct compat_fastrpc_ioctl_munmap {
+	compat_uptr_t vaddrout;	/* address to unmap */
+	compat_size_t size;	/* size */
+};
+
+struct compat_fastrpc_ioctl_init {
+	compat_uint_t flags;	/* one of FASTRPC_INIT_* macros */
+	compat_uptr_t file;	/* pointer to elf file */
+	compat_int_t filelen;	/* elf file length */
+	compat_int_t filefd;	/* ION fd for the file */
+	compat_uptr_t mem;	/* mem for the PD */
+	compat_int_t memlen;	/* mem length */
+	compat_int_t memfd;	/* ION fd for the mem */
+};
+
+struct compat_fastrpc_ioctl_init_attrs {
+	struct compat_fastrpc_ioctl_init init;
+	compat_int_t attrs;	/* attributes to init process */
+	compat_int_t siglen;	/* test signature file length */
+};
+
+struct compat_fastrpc_ioctl_perf {	/* kernel performance data */
+	compat_uptr_t  data;
+	compat_int_t numkeys;
+	compat_uptr_t keys;
+};
+
+static int compat_get_fastrpc_ioctl_invoke(
+			struct compat_fastrpc_ioctl_invoke_attrs __user *inv32,
+			struct fastrpc_ioctl_invoke_attrs __user **inva,
+			unsigned int cmd)
+{
+	compat_uint_t u, sc;
+	compat_size_t s;
+	compat_uptr_t p;
+	struct fastrpc_ioctl_invoke_attrs *inv;
+	union compat_remote_arg *pra32;
+	union remote_arg *pra;
+	int err, len, num, j;
+
+	err = get_user(sc, &inv32->inv.sc);
+	if (err)
+		return err;
+
+	len = REMOTE_SCALARS_LENGTH(sc);
+	VERIFY(err, NULL != (inv = compat_alloc_user_space(
+				sizeof(*inv) + len * sizeof(*pra))));
+	if (err)
+		return -EFAULT;
+
+	pra = (union remote_arg *)(inv + 1);
+	err = put_user(pra, &inv->inv.pra);
+	err |= put_user(sc, &inv->inv.sc);
+	err |= get_user(u, &inv32->inv.handle);
+	err |= put_user(u, &inv->inv.handle);
+	err |= get_user(p, &inv32->inv.pra);
+	if (err)
+		return err;
+
+	pra32 = compat_ptr(p);
+	pra = (union remote_arg *)(inv + 1);
+	num = REMOTE_SCALARS_INBUFS(sc) + REMOTE_SCALARS_OUTBUFS(sc);
+	for (j = 0; j < num; j++) {
+		err |= get_user(p, &pra32[j].buf.pv);
+		err |= put_user(p, (uintptr_t *)&pra[j].buf.pv);
+		err |= get_user(s, &pra32[j].buf.len);
+		err |= put_user(s, &pra[j].buf.len);
+	}
+	for (j = 0; j < REMOTE_SCALARS_INHANDLES(sc); j++) {
+		err |= get_user(u, &pra32[num + j].h);
+		err |= put_user(u, &pra[num + j].h);
+	}
+
+	err |= put_user(NULL, &inv->fds);
+	if (cmd != COMPAT_FASTRPC_IOCTL_INVOKE) {
+		err |= get_user(p, &inv32->fds);
+		err |= put_user(p, (compat_uptr_t *)&inv->fds);
+	}
+	err |= put_user(NULL, &inv->attrs);
+	if (cmd == COMPAT_FASTRPC_IOCTL_INVOKE_ATTRS) {
+		err |= get_user(p, &inv32->attrs);
+		err |= put_user(p, (compat_uptr_t *)&inv->attrs);
+	}
+
+	*inva = inv;
+	return err;
+}
+
+static int compat_put_fastrpc_ioctl_invoke(
+			struct compat_fastrpc_ioctl_invoke_attrs __user *inv32,
+			struct fastrpc_ioctl_invoke_attrs __user *inv)
+{
+	compat_uptr_t p;
+	compat_uint_t u, h;
+	union compat_remote_arg *pra32;
+	union remote_arg *pra;
+	int err, i, num;
+
+	err = get_user(u, &inv32->inv.sc);
+	err |= get_user(p, &inv32->inv.pra);
+	if (err)
+		return err;
+
+	pra32 = compat_ptr(p);
+	pra = (union remote_arg *)(inv + 1);
+	num = REMOTE_SCALARS_INBUFS(u) + REMOTE_SCALARS_OUTBUFS(u)
+		+ REMOTE_SCALARS_INHANDLES(u);
+	for (i = 0;  i < REMOTE_SCALARS_OUTHANDLES(u); i++) {
+		err |= get_user(h, &pra[num + i].h);
+		err |= put_user(h, &pra32[num + i].h);
+	}
+
+	return err;
+}
+
+static int compat_get_fastrpc_ioctl_mmap(
+			struct compat_fastrpc_ioctl_mmap __user *map32,
+			struct fastrpc_ioctl_mmap __user *map)
+{
+	compat_uint_t u;
+	compat_int_t i;
+	compat_size_t s;
+	compat_uptr_t p;
+	int err;
+
+	err = get_user(i, &map32->fd);
+	err |= put_user(i, &map->fd);
+	err |= get_user(u, &map32->flags);
+	err |= put_user(u, &map->flags);
+	err |= get_user(p, &map32->vaddrin);
+	err |= put_user(p, (uintptr_t *)&map->vaddrin);
+	err |= get_user(s, &map32->size);
+	err |= put_user(s, &map->size);
+
+	return err;
+}
+
+static int compat_put_fastrpc_ioctl_mmap(
+			struct compat_fastrpc_ioctl_mmap __user *map32,
+			struct fastrpc_ioctl_mmap __user *map)
+{
+	compat_uptr_t p;
+	int err;
+
+	err = get_user(p, &map->vaddrout);
+	err |= put_user(p, &map32->vaddrout);
+
+	return err;
+}
+
+static int compat_get_fastrpc_ioctl_munmap(
+			struct compat_fastrpc_ioctl_munmap __user *unmap32,
+			struct fastrpc_ioctl_munmap __user *unmap)
+{
+	compat_uptr_t p;
+	compat_size_t s;
+	int err;
+
+	err = get_user(p, &unmap32->vaddrout);
+	err |= put_user(p, &unmap->vaddrout);
+	err |= get_user(s, &unmap32->size);
+	err |= put_user(s, &unmap->size);
+
+	return err;
+}
+
+static int compat_get_fastrpc_ioctl_perf(
+			struct compat_fastrpc_ioctl_perf __user *perf32,
+			struct fastrpc_ioctl_perf __user *perf)
+{
+	compat_uptr_t p;
+	int err;
+
+	err = get_user(p, &perf32->data);
+	err |= put_user(p, &perf->data);
+	err |= get_user(p, &perf32->keys);
+	err |= put_user(p, &perf->keys);
+
+	return err;
+}
+
+static int compat_get_fastrpc_ioctl_init(
+			struct compat_fastrpc_ioctl_init_attrs __user *init32,
+			struct fastrpc_ioctl_init_attrs __user *init,
+			unsigned int cmd)
+{
+	compat_uint_t u;
+	compat_uptr_t p;
+	compat_int_t i;
+	int err;
+
+	err = get_user(u, &init32->init.flags);
+	err |= put_user(u, &init->init.flags);
+	err |= get_user(p, &init32->init.file);
+	err |= put_user(p, &init->init.file);
+	err |= get_user(i, &init32->init.filelen);
+	err |= put_user(i, &init->init.filelen);
+	err |= get_user(i, &init32->init.filefd);
+	err |= put_user(i, &init->init.filefd);
+	err |= get_user(p, &init32->init.mem);
+	err |= put_user(p, &init->init.mem);
+	err |= get_user(i, &init32->init.memlen);
+	err |= put_user(i, &init->init.memlen);
+	err |= get_user(i, &init32->init.memfd);
+	err |= put_user(i, &init->init.memfd);
+
+	err |= put_user(0, &init->attrs);
+	if (cmd == COMPAT_FASTRPC_IOCTL_INIT_ATTRS) {
+		err |= get_user(i, &init32->attrs);
+		err |= put_user(i, (compat_uptr_t *)&init->attrs);
+	}
+
+	err |= put_user(0, &init->siglen);
+	if (cmd == COMPAT_FASTRPC_IOCTL_INIT_ATTRS) {
+		err |= get_user(i, &init32->siglen);
+		err |= put_user(i, (compat_uptr_t *)&init->siglen);
+	}
+
+	return err;
+}
+
+long compat_fastrpc_device_ioctl(struct file *filp, unsigned int cmd,
+				unsigned long arg)
+{
+	int err = 0;
+
+	if (!filp->f_op || !filp->f_op->unlocked_ioctl)
+		return -ENOTTY;
+
+	switch (cmd) {
+	case COMPAT_FASTRPC_IOCTL_INVOKE:
+	case COMPAT_FASTRPC_IOCTL_INVOKE_FD:
+	case COMPAT_FASTRPC_IOCTL_INVOKE_ATTRS:
+	{
+		struct compat_fastrpc_ioctl_invoke_attrs __user *inv32;
+		struct fastrpc_ioctl_invoke_attrs __user *inv;
+		long ret;
+
+		inv32 = compat_ptr(arg);
+		VERIFY(err, 0 == compat_get_fastrpc_ioctl_invoke(inv32,
+							&inv, cmd));
+		if (err)
+			return err;
+		ret = filp->f_op->unlocked_ioctl(filp,
+				FASTRPC_IOCTL_INVOKE_ATTRS, (unsigned long)inv);
+		if (ret)
+			return ret;
+		VERIFY(err, 0 == compat_put_fastrpc_ioctl_invoke(inv32, inv));
+		return err;
+	}
+	case COMPAT_FASTRPC_IOCTL_MMAP:
+	{
+		struct compat_fastrpc_ioctl_mmap __user *map32;
+		struct fastrpc_ioctl_mmap __user *map;
+		long ret;
+
+		map32 = compat_ptr(arg);
+		VERIFY(err, NULL != (map = compat_alloc_user_space(
+							sizeof(*map))));
+		if (err)
+			return -EFAULT;
+		VERIFY(err, 0 == compat_get_fastrpc_ioctl_mmap(map32, map));
+		if (err)
+			return err;
+		ret = filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_MMAP,
+							(unsigned long)map);
+		if (ret)
+			return ret;
+		VERIFY(err, 0 == compat_put_fastrpc_ioctl_mmap(map32, map));
+		return err;
+	}
+	case COMPAT_FASTRPC_IOCTL_MUNMAP:
+	{
+		struct compat_fastrpc_ioctl_munmap __user *unmap32;
+		struct fastrpc_ioctl_munmap __user *unmap;
+
+		unmap32 = compat_ptr(arg);
+		VERIFY(err, NULL != (unmap = compat_alloc_user_space(
+							sizeof(*unmap))));
+		if (err)
+			return -EFAULT;
+		VERIFY(err, 0 == compat_get_fastrpc_ioctl_munmap(unmap32,
+							unmap));
+		if (err)
+			return err;
+		return filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_MUNMAP,
+							(unsigned long)unmap);
+	}
+	case COMPAT_FASTRPC_IOCTL_INIT:
+		/* fall through */
+	case COMPAT_FASTRPC_IOCTL_INIT_ATTRS:
+	{
+		struct compat_fastrpc_ioctl_init_attrs __user *init32;
+		struct fastrpc_ioctl_init_attrs __user *init;
+
+		init32 = compat_ptr(arg);
+		VERIFY(err, NULL != (init = compat_alloc_user_space(
+							sizeof(*init))));
+		if (err)
+			return -EFAULT;
+		VERIFY(err, 0 == compat_get_fastrpc_ioctl_init(init32,
+							init, cmd));
+		if (err)
+			return err;
+		return filp->f_op->unlocked_ioctl(filp,
+			 FASTRPC_IOCTL_INIT_ATTRS, (unsigned long)init);
+	}
+	case FASTRPC_IOCTL_GETINFO:
+	{
+		compat_uptr_t __user *info32;
+		uint32_t __user *info;
+		compat_uint_t u;
+		long ret;
+
+		info32 = compat_ptr(arg);
+		VERIFY(err, NULL != (info = compat_alloc_user_space(
+							sizeof(*info))));
+		if (err)
+			return -EFAULT;
+		err = get_user(u, info32);
+		err |= put_user(u, info);
+		if (err)
+			return err;
+		ret = filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_GETINFO,
+							(unsigned long)info);
+		if (ret)
+			return ret;
+		err = get_user(u, info);
+		err |= put_user(u, info32);
+		return err;
+	}
+	case FASTRPC_IOCTL_SETMODE:
+		return filp->f_op->unlocked_ioctl(filp, cmd,
+						(unsigned long)compat_ptr(arg));
+	case COMPAT_FASTRPC_IOCTL_GETPERF:
+	{
+		struct compat_fastrpc_ioctl_perf __user *perf32;
+		struct fastrpc_ioctl_perf *perf;
+		compat_uint_t u;
+		long ret;
+
+		perf32 = compat_ptr(arg);
+		VERIFY(err, NULL != (perf = compat_alloc_user_space(
+							sizeof(*perf))));
+		if (err)
+			return -EFAULT;
+		VERIFY(err, 0 == compat_get_fastrpc_ioctl_perf(perf32,
+							perf));
+		if (err)
+			return err;
+		ret = filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_GETPERF,
+							(unsigned long)perf);
+		if (ret)
+			return ret;
+		err = get_user(u, &perf->numkeys);
+		err |= put_user(u, &perf32->numkeys);
+		return err;
+	}
+	default:
+		return -ENOIOCTLCMD;
+	}
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/char/adsprpc_compat.h	2019-01-22 16:16:22.943241335 +0100
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef ADSPRPC_COMPAT_H
+#define ADSPRPC_COMPAT_H
+
+#ifdef CONFIG_COMPAT
+
+long compat_fastrpc_device_ioctl(struct file *filp, unsigned int cmd,
+				unsigned long arg);
+#else
+
+#define compat_fastrpc_device_ioctl	NULL
+
+#endif /* CONFIG_COMPAT */
+#endif /* ADSPRPC_COMPAT_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/char/adsprpc_shared.h	2019-01-22 16:16:22.943241335 +0100
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef ADSPRPC_SHARED_H
+#define ADSPRPC_SHARED_H
+
+#include <linux/types.h>
+
+#define FASTRPC_IOCTL_INVOKE	_IOWR('R', 1, struct fastrpc_ioctl_invoke)
+#define FASTRPC_IOCTL_MMAP	_IOWR('R', 2, struct fastrpc_ioctl_mmap)
+#define FASTRPC_IOCTL_MUNMAP	_IOWR('R', 3, struct fastrpc_ioctl_munmap)
+#define FASTRPC_IOCTL_INVOKE_FD	_IOWR('R', 4, struct fastrpc_ioctl_invoke_fd)
+#define FASTRPC_IOCTL_SETMODE	_IOWR('R', 5, uint32_t)
+#define FASTRPC_IOCTL_INIT	_IOWR('R', 6, struct fastrpc_ioctl_init)
+#define FASTRPC_IOCTL_INVOKE_ATTRS \
+				_IOWR('R', 7, struct fastrpc_ioctl_invoke_attrs)
+#define FASTRPC_IOCTL_GETINFO	_IOWR('R', 8, uint32_t)
+#define FASTRPC_IOCTL_GETPERF	_IOWR('R', 9, struct fastrpc_ioctl_perf)
+#define FASTRPC_IOCTL_INIT_ATTRS _IOWR('R', 10, struct fastrpc_ioctl_init_attrs)
+
+#define FASTRPC_GLINK_GUID "fastrpcglink-apps-dsp"
+#define FASTRPC_SMD_GUID "fastrpcsmd-apps-dsp"
+#define DEVICE_NAME      "adsprpc-smd"
+
+/* Set for buffers that have no virtual mapping in userspace */
+#define FASTRPC_ATTR_NOVA 0x1
+
+/* Set for buffers that are NOT dma coherent */
+#define FASTRPC_ATTR_NON_COHERENT 0x2
+
+/* Set for buffers that are dma coherent */
+#define FASTRPC_ATTR_COHERENT 0x4
+
+/* Driver should operate in parallel with the co-processor */
+#define FASTRPC_MODE_PARALLEL    0
+
+/* Driver should operate in serial mode with the co-processor */
+#define FASTRPC_MODE_SERIAL      1
+
+/* Driver should operate in profile mode with the co-processor */
+#define FASTRPC_MODE_PROFILE     2
+
+/* INIT a new process or attach to guestos */
+#define FASTRPC_INIT_ATTACH      0
+#define FASTRPC_INIT_CREATE      1
+#define FASTRPC_INIT_CREATE_STATIC  2
+
+/* Retrives number of input buffers from the scalars parameter */
+#define REMOTE_SCALARS_INBUFS(sc)        (((sc) >> 16) & 0x0ff)
+
+/* Retrives number of output buffers from the scalars parameter */
+#define REMOTE_SCALARS_OUTBUFS(sc)       (((sc) >> 8) & 0x0ff)
+
+/* Retrives number of input handles from the scalars parameter */
+#define REMOTE_SCALARS_INHANDLES(sc)     (((sc) >> 4) & 0x0f)
+
+/* Retrives number of output handles from the scalars parameter */
+#define REMOTE_SCALARS_OUTHANDLES(sc)    ((sc) & 0x0f)
+
+#define REMOTE_SCALARS_LENGTH(sc)	(REMOTE_SCALARS_INBUFS(sc) +\
+					REMOTE_SCALARS_OUTBUFS(sc) +\
+					REMOTE_SCALARS_INHANDLES(sc) +\
+					REMOTE_SCALARS_OUTHANDLES(sc))
+
+#define REMOTE_SCALARS_MAKEX(attr, method, in, out, oin, oout) \
+		((((uint32_t)   (attr) & 0x7) << 29) | \
+		(((uint32_t) (method) & 0x1f) << 24) | \
+		(((uint32_t)     (in) & 0xff) << 16) | \
+		(((uint32_t)    (out) & 0xff) <<  8) | \
+		(((uint32_t)    (oin) & 0x0f) <<  4) | \
+		((uint32_t)   (oout) & 0x0f))
+
+#define REMOTE_SCALARS_MAKE(method, in, out) \
+		REMOTE_SCALARS_MAKEX(0, method, in, out, 0, 0)
+
+
+#ifndef VERIFY_PRINT_ERROR
+#define VERIFY_EPRINTF(format, args) (void)0
+#endif
+
+#ifndef VERIFY_PRINT_INFO
+#define VERIFY_IPRINTF(args) (void)0
+#endif
+
+#ifndef VERIFY
+#define __STR__(x) #x ":"
+#define __TOSTR__(x) __STR__(x)
+#define __FILE_LINE__ __FILE__ ":" __TOSTR__(__LINE__)
+
+#define VERIFY(err, val) \
+do {\
+	VERIFY_IPRINTF(__FILE_LINE__"info: calling: " #val "\n");\
+	if (0 == (val)) {\
+		(err) = (err) == 0 ? -1 : (err);\
+		VERIFY_EPRINTF(__FILE_LINE__"error: %d: " #val "\n", (err));\
+	} else {\
+		VERIFY_IPRINTF(__FILE_LINE__"info: passed: " #val "\n");\
+	} \
+} while (0)
+#endif
+
+#define remote_arg64_t    union remote_arg64
+
+struct remote_buf64 {
+	uint64_t pv;
+	uint64_t len;
+};
+
+union remote_arg64 {
+	struct remote_buf64	buf;
+	uint32_t h;
+};
+
+#define remote_arg_t    union remote_arg
+
+struct remote_buf {
+	void *pv;		/* buffer pointer */
+	size_t len;		/* length of buffer */
+};
+
+union remote_arg {
+	struct remote_buf buf;	/* buffer info */
+	uint32_t h;		/* remote handle */
+};
+
+struct fastrpc_ioctl_invoke {
+	uint32_t handle;	/* remote handle */
+	uint32_t sc;		/* scalars describing the data */
+	remote_arg_t *pra;	/* remote arguments list */
+};
+
+struct fastrpc_ioctl_invoke_fd {
+	struct fastrpc_ioctl_invoke inv;
+	int *fds;		/* fd list */
+};
+
+struct fastrpc_ioctl_invoke_attrs {
+	struct fastrpc_ioctl_invoke inv;
+	int *fds;		/* fd list */
+	unsigned *attrs;	/* attribute list */
+};
+
+struct fastrpc_ioctl_init {
+	uint32_t flags;		/* one of FASTRPC_INIT_* macros */
+	uintptr_t file;		/* pointer to elf file */
+	uint32_t filelen;	/* elf file length */
+	int32_t filefd;		/* ION fd for the file */
+	uintptr_t mem;		/* mem for the PD */
+	uint32_t memlen;	/* mem length */
+	int32_t memfd;		/* ION fd for the mem */
+};
+
+struct fastrpc_ioctl_init_attrs {
+		struct fastrpc_ioctl_init init;
+		int attrs;
+		unsigned int siglen;
+};
+
+struct fastrpc_ioctl_munmap {
+	uintptr_t vaddrout;	/* address to unmap */
+	size_t size;		/* size */
+};
+
+struct fastrpc_ioctl_mmap {
+	int fd;				/* ion fd */
+	uint32_t flags;			/* flags for dsp to map with */
+	uintptr_t vaddrin;		/* optional virtual address */
+	size_t size;			/* size */
+	uintptr_t vaddrout;		/* dsps virtual address */
+};
+
+struct fastrpc_ioctl_perf {			/* kernel performance data */
+	uintptr_t data;
+	uint32_t numkeys;
+	uintptr_t keys;
+};
+
+struct smq_null_invoke {
+	uint64_t ctx;			/* invoke caller context */
+	uint32_t handle;	    /* handle to invoke */
+	uint32_t sc;		    /* scalars structure describing the data */
+};
+
+struct smq_phy_page {
+	uint64_t addr;		/* physical address */
+	uint64_t size;		/* size of contiguous region */
+};
+
+struct smq_invoke_buf {
+	int num;		/* number of contiguous regions */
+	int pgidx;		/* index to start of contiguous region */
+};
+
+struct smq_invoke {
+	struct smq_null_invoke header;
+	struct smq_phy_page page;   /* remote arg and list of pages address */
+};
+
+struct smq_msg {
+	uint32_t pid;           /* process group id */
+	uint32_t tid;           /* thread id */
+	struct smq_invoke invoke;
+};
+
+struct smq_invoke_rsp {
+	uint64_t ctx;			/* invoke caller context */
+	int retval;	             /* invoke return value */
+};
+
+static inline struct smq_invoke_buf *smq_invoke_buf_start(remote_arg64_t *pra,
+							uint32_t sc)
+{
+	unsigned int len = REMOTE_SCALARS_LENGTH(sc);
+
+	return (struct smq_invoke_buf *)(&pra[len]);
+}
+
+static inline struct smq_phy_page *smq_phy_page_start(uint32_t sc,
+						struct smq_invoke_buf *buf)
+{
+	uint64_t nTotal = REMOTE_SCALARS_INBUFS(sc)+REMOTE_SCALARS_OUTBUFS(sc);
+	return (struct smq_phy_page *)(&buf[nTotal]);
+}
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diagchar_core.c linux-4.4.115-fbx/drivers/char/diag/diagchar_core.c
--- linux-4.4.115-fbx/drivers/char/diag./diagchar_core.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diagchar_core.c	2019-10-29 09:26:23.449201280 +0100
@@ -0,0 +1,3890 @@
+/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/diagchar.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/timer.h>
+#include <linux/platform_device.h>
+#include <linux/msm_mhi.h>
+#ifdef CONFIG_DIAG_OVER_USB
+#include <linux/usb/usbdiag.h>
+#endif
+#include <asm/current.h>
+#include "diagchar_hdlc.h"
+#include "diagmem.h"
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_cntl.h"
+#include "diag_dci.h"
+#include "diag_debugfs.h"
+#include "diag_masks.h"
+#include "diagfwd_bridge.h"
+#include "diag_usb.h"
+#include "diag_memorydevice.h"
+#include "diag_mux.h"
+#include "diag_ipc_logging.h"
+#include "diagfwd_peripheral.h"
+
+#include <linux/coresight-stm.h>
+#include <linux/kernel.h>
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+
+MODULE_DESCRIPTION("Diag Char Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("1.0");
+
+#define MIN_SIZ_ALLOW 4
+#define INIT	1
+#define EXIT	-1
+struct diagchar_dev *driver;
+struct diagchar_priv {
+	int pid;
+};
+
+#define USER_SPACE_RAW_DATA	0
+#define USER_SPACE_HDLC_DATA	1
+
+/* Memory pool variables */
+/* Used for copying any incoming packet from user space clients. */
+static unsigned int poolsize = 12;
+module_param(poolsize, uint, 0);
+
+/*
+ * Used for HDLC encoding packets coming from the user
+ * space.
+ */
+static unsigned int poolsize_hdlc = 10;
+module_param(poolsize_hdlc, uint, 0);
+
+/*
+ * This is used for incoming DCI requests from the user space clients.
+ * Don't expose itemsize as it is internal.
+ */
+static unsigned int poolsize_user = 8;
+module_param(poolsize_user, uint, 0);
+
+/*
+ * USB structures allocated for writing Diag data generated on the Apps to USB.
+ * Don't expose itemsize as it is constant.
+ */
+static unsigned int itemsize_usb_apps = sizeof(struct diag_request);
+static unsigned int poolsize_usb_apps = 10;
+module_param(poolsize_usb_apps, uint, 0);
+
+/* Used for DCI client buffers. Don't expose itemsize as it is constant. */
+static unsigned int poolsize_dci = 10;
+module_param(poolsize_dci, uint, 0);
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+/* Used for reading data from the remote device. */
+static unsigned int itemsize_mdm = DIAG_MDM_BUF_SIZE;
+static unsigned int poolsize_mdm = 18;
+module_param(itemsize_mdm, uint, 0);
+module_param(poolsize_mdm, uint, 0);
+
+/*
+ * Used for reading DCI data from the remote device.
+ * Don't expose poolsize for DCI data. There is only one read buffer
+ */
+static unsigned int itemsize_mdm_dci = DIAG_MDM_BUF_SIZE;
+static unsigned int poolsize_mdm_dci = 1;
+module_param(itemsize_mdm_dci, uint, 0);
+
+/*
+ * Used for USB structues associated with a remote device.
+ * Don't expose the itemsize since it is constant.
+ */
+static unsigned int itemsize_mdm_usb = sizeof(struct diag_request);
+static unsigned int poolsize_mdm_usb = 18;
+module_param(poolsize_mdm_usb, uint, 0);
+
+/*
+ * Used for writing read DCI data to remote peripherals. Don't
+ * expose poolsize for DCI data. There is only one read
+ * buffer. Add 6 bytes for DCI header information: Start (1),
+ * Version (1), Length (2), Tag (2)
+ */
+static unsigned int itemsize_mdm_dci_write = DIAG_MDM_DCI_BUF_SIZE;
+static unsigned int poolsize_mdm_dci_write = 1;
+module_param(itemsize_mdm_dci_write, uint, 0);
+
+/*
+ * Used for USB structures associated with a remote SMUX
+ * device Don't expose the itemsize since it is constant
+ */
+static unsigned int itemsize_qsc_usb = sizeof(struct diag_request);
+static unsigned int poolsize_qsc_usb = 8;
+module_param(poolsize_qsc_usb, uint, 0);
+#endif
+
+/* This is the max number of user-space clients supported at initialization*/
+static unsigned int max_clients = 15;
+module_param(max_clients, uint, 0);
+
+/* Timer variables */
+static struct timer_list drain_timer;
+static int timer_in_progress;
+
+/*
+ * Diag Mask clear variable
+ * Used for clearing masks upon
+ * USB disconnection and stopping ODL
+ */
+static int diag_mask_clear_param = 1;
+module_param(diag_mask_clear_param, int, 0644);
+
+struct diag_apps_data_t {
+	void *buf;
+	uint32_t len;
+	int ctxt;
+};
+
+static struct diag_apps_data_t hdlc_data;
+static struct diag_apps_data_t non_hdlc_data;
+static struct mutex apps_data_mutex;
+
+#define DIAGPKT_MAX_DELAYED_RSP 0xFFFF
+
+#ifdef DIAG_DEBUG
+uint16_t diag_debug_mask;
+void *diag_ipc_log;
+#endif
+
+static void diag_md_session_close(int pid);
+
+/*
+ * Returns the next delayed rsp id. If wrapping is enabled,
+ * wraps the delayed rsp id to DIAGPKT_MAX_DELAYED_RSP.
+ */
+static uint16_t diag_get_next_delayed_rsp_id(void)
+{
+	uint16_t rsp_id = 0;
+
+	mutex_lock(&driver->delayed_rsp_mutex);
+	rsp_id = driver->delayed_rsp_id;
+	if (rsp_id < DIAGPKT_MAX_DELAYED_RSP)
+		rsp_id++;
+	else {
+		if (wrap_enabled) {
+			rsp_id = 1;
+			wrap_count++;
+		} else
+			rsp_id = DIAGPKT_MAX_DELAYED_RSP;
+	}
+	driver->delayed_rsp_id = rsp_id;
+	mutex_unlock(&driver->delayed_rsp_mutex);
+
+	return rsp_id;
+}
+
+static int diag_switch_logging(struct diag_logging_mode_param_t *param);
+
+#define COPY_USER_SPACE_OR_EXIT(buf, data, length)		\
+do {								\
+	if ((count < ret+length) || (copy_to_user(buf,		\
+			(void *)&data, length))) {		\
+		ret = -EFAULT;					\
+		goto exit;					\
+	}							\
+	ret += length;						\
+} while (0)
+
+#define COPY_USER_SPACE_OR_ERR(buf, data, length)		\
+do {								\
+	if ((count < ret+length) || (copy_to_user(buf,		\
+			(void *)&data, length))) {		\
+		ret = -EFAULT;					\
+		break;						\
+	}							\
+	ret += length;						\
+} while (0)
+
+static void drain_timer_func(unsigned long data)
+{
+	queue_work(driver->diag_wq , &(driver->diag_drain_work));
+}
+
+static void diag_drain_apps_data(struct diag_apps_data_t *data)
+{
+	int err = 0;
+
+	if (!data || !data->buf)
+		return;
+
+	err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
+			     data->ctxt);
+	if (err)
+		diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
+
+	data->buf = NULL;
+	data->len = 0;
+}
+
+void diag_update_user_client_work_fn(struct work_struct *work)
+{
+	diag_update_userspace_clients(HDLC_SUPPORT_TYPE);
+}
+
+static void diag_update_md_client_work_fn(struct work_struct *work)
+{
+	diag_update_md_clients(HDLC_SUPPORT_TYPE);
+}
+
+void diag_drain_work_fn(struct work_struct *work)
+{
+	struct diag_md_session_t *session_info = NULL;
+	uint8_t hdlc_disabled = 0;
+
+	timer_in_progress = 0;
+	mutex_lock(&apps_data_mutex);
+	mutex_lock(&driver->md_session_lock);
+	session_info = diag_md_session_get_peripheral(APPS_DATA);
+	if (session_info)
+		hdlc_disabled = session_info->hdlc_disabled;
+	else
+		hdlc_disabled = driver->hdlc_disabled;
+	mutex_unlock(&driver->md_session_lock);
+	if (!hdlc_disabled)
+		diag_drain_apps_data(&hdlc_data);
+	else
+		diag_drain_apps_data(&non_hdlc_data);
+	mutex_unlock(&apps_data_mutex);
+}
+
+void check_drain_timer(void)
+{
+	int ret = 0;
+
+	if (!timer_in_progress) {
+		timer_in_progress = 1;
+		ret = mod_timer(&drain_timer, jiffies + msecs_to_jiffies(200));
+	}
+}
+
+void diag_add_client(int i, struct file *file)
+{
+	struct diagchar_priv *diagpriv_data;
+
+	driver->client_map[i].pid = current->tgid;
+	diagpriv_data = kmalloc(sizeof(struct diagchar_priv),
+							GFP_KERNEL);
+	if (diagpriv_data)
+		diagpriv_data->pid = current->tgid;
+	file->private_data = diagpriv_data;
+	strlcpy(driver->client_map[i].name, current->comm, 20);
+	driver->client_map[i].name[19] = '\0';
+}
+
+static void diag_mempool_init(void)
+{
+	uint32_t itemsize = DIAG_MAX_REQ_SIZE;
+	uint32_t itemsize_hdlc = DIAG_MAX_HDLC_BUF_SIZE + APF_DIAG_PADDING;
+	uint32_t itemsize_dci = IN_BUF_SIZE;
+	uint32_t itemsize_user = DCI_REQ_BUF_SIZE;
+
+	itemsize += ((DCI_HDR_SIZE > CALLBACK_HDR_SIZE) ? DCI_HDR_SIZE :
+		     CALLBACK_HDR_SIZE);
+	diagmem_setsize(POOL_TYPE_COPY, itemsize, poolsize);
+	diagmem_setsize(POOL_TYPE_HDLC, itemsize_hdlc, poolsize_hdlc);
+	diagmem_setsize(POOL_TYPE_DCI, itemsize_dci, poolsize_dci);
+	diagmem_setsize(POOL_TYPE_USER, itemsize_user, poolsize_user);
+
+	diagmem_init(driver, POOL_TYPE_COPY);
+	diagmem_init(driver, POOL_TYPE_HDLC);
+	diagmem_init(driver, POOL_TYPE_USER);
+	diagmem_init(driver, POOL_TYPE_DCI);
+}
+
+static void diag_mempool_exit(void)
+{
+	diagmem_exit(driver, POOL_TYPE_COPY);
+	diagmem_exit(driver, POOL_TYPE_HDLC);
+	diagmem_exit(driver, POOL_TYPE_USER);
+	diagmem_exit(driver, POOL_TYPE_DCI);
+}
+
+static int diagchar_open(struct inode *inode, struct file *file)
+{
+	int i = 0;
+	void *temp;
+
+	if (driver) {
+		mutex_lock(&driver->diagchar_mutex);
+
+		for (i = 0; i < driver->num_clients; i++)
+			if (driver->client_map[i].pid == 0)
+				break;
+
+		if (i < driver->num_clients) {
+			diag_add_client(i, file);
+		} else {
+			if (i < THRESHOLD_CLIENT_LIMIT) {
+				driver->num_clients++;
+				temp = krealloc(driver->client_map
+					, (driver->num_clients) * sizeof(struct
+						 diag_client_map), GFP_KERNEL);
+				if (!temp)
+					goto fail;
+				else
+					driver->client_map = temp;
+				temp = krealloc(driver->data_ready
+					, (driver->num_clients) * sizeof(int),
+							GFP_KERNEL);
+				if (!temp)
+					goto fail;
+				else
+					driver->data_ready = temp;
+				diag_add_client(i, file);
+			} else {
+				mutex_unlock(&driver->diagchar_mutex);
+				pr_err_ratelimited("diag: Max client limit for DIAG reached\n");
+				pr_err_ratelimited("diag: Cannot open handle %s"
+					   " %d", current->comm, current->tgid);
+				for (i = 0; i < driver->num_clients; i++)
+					pr_debug("%d) %s PID=%d", i, driver->
+						client_map[i].name,
+						driver->client_map[i].pid);
+				return -ENOMEM;
+			}
+		}
+		driver->data_ready[i] = 0x0;
+		atomic_set(&driver->data_ready_notif[i], 0);
+		driver->data_ready[i] |= MSG_MASKS_TYPE;
+		atomic_inc(&driver->data_ready_notif[i]);
+		driver->data_ready[i] |= EVENT_MASKS_TYPE;
+		atomic_inc(&driver->data_ready_notif[i]);
+		driver->data_ready[i] |= LOG_MASKS_TYPE;
+		atomic_inc(&driver->data_ready_notif[i]);
+		driver->data_ready[i] |= DCI_LOG_MASKS_TYPE;
+		atomic_inc(&driver->data_ready_notif[i]);
+		driver->data_ready[i] |= DCI_EVENT_MASKS_TYPE;
+		atomic_inc(&driver->data_ready_notif[i]);
+
+		if (driver->ref_count == 0)
+			diag_mempool_init();
+		driver->ref_count++;
+		mutex_unlock(&driver->diagchar_mutex);
+		return 0;
+	}
+	return -ENOMEM;
+
+fail:
+	driver->num_clients--;
+	mutex_unlock(&driver->diagchar_mutex);
+	pr_err_ratelimited("diag: Insufficient memory for new client");
+	return -ENOMEM;
+}
+
+static uint32_t diag_translate_kernel_to_user_mask(uint32_t peripheral_mask)
+{
+	uint32_t ret = 0;
+
+	if (peripheral_mask & MD_PERIPHERAL_MASK(APPS_DATA))
+		ret |= DIAG_CON_APSS;
+	if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_MODEM))
+		ret |= DIAG_CON_MPSS;
+	if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_LPASS))
+		ret |= DIAG_CON_LPASS;
+	if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_WCNSS))
+		ret |= DIAG_CON_WCNSS;
+	if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_SENSORS))
+		ret |= DIAG_CON_SENSORS;
+	if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_WDSP))
+		ret |= DIAG_CON_WDSP;
+	if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_CDSP))
+		ret |= DIAG_CON_CDSP;
+	if (peripheral_mask & MD_PERIPHERAL_MASK(UPD_WLAN))
+		ret |= DIAG_CON_UPD_WLAN;
+	if (peripheral_mask & MD_PERIPHERAL_MASK(UPD_AUDIO))
+		ret |= DIAG_CON_UPD_AUDIO;
+	if (peripheral_mask & MD_PERIPHERAL_MASK(UPD_SENSORS))
+		ret |= DIAG_CON_UPD_SENSORS;
+	return ret;
+}
+
+uint8_t diag_mask_to_pd_value(uint32_t peripheral_mask)
+{
+	uint8_t upd = 0;
+	uint32_t pd_mask = 0;
+
+	pd_mask = diag_translate_kernel_to_user_mask(peripheral_mask);
+	switch (pd_mask) {
+	case DIAG_CON_UPD_WLAN:
+		upd = UPD_WLAN;
+		break;
+	case DIAG_CON_UPD_AUDIO:
+		upd = UPD_AUDIO;
+		break;
+	case DIAG_CON_UPD_SENSORS:
+		upd = UPD_SENSORS;
+		break;
+	default:
+		DIAG_LOG(DIAG_DEBUG_MASKS,
+		"asking for mask update with no pd mask set\n");
+	}
+	return upd;
+}
+
+int diag_mask_param(void)
+{
+	return diag_mask_clear_param;
+}
+void diag_clear_masks(int pid)
+{
+	int ret;
+	char cmd_disable_log_mask[] = { 0x73, 0, 0, 0, 0, 0, 0, 0};
+	char cmd_disable_msg_mask[] = { 0x7D, 0x05, 0, 0, 0, 0, 0, 0};
+	char cmd_disable_event_mask[] = { 0x60, 0};
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+	"diag: %s: masks clear request upon %s\n", __func__,
+	((pid) ? "ODL exit" : "USB Disconnection"));
+
+	ret = diag_process_apps_masks(cmd_disable_log_mask,
+			sizeof(cmd_disable_log_mask), pid);
+	ret = diag_process_apps_masks(cmd_disable_msg_mask,
+			sizeof(cmd_disable_msg_mask), pid);
+	ret = diag_process_apps_masks(cmd_disable_event_mask,
+			sizeof(cmd_disable_event_mask), pid);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+	"diag:%s: masks cleared successfully\n", __func__);
+}
+
+static void diag_close_logging_process(const int pid)
+{
+	int i, j;
+	int session_mask;
+	uint32_t p_mask;
+	struct diag_md_session_t *session_info = NULL;
+	struct diag_logging_mode_param_t params;
+
+	mutex_lock(&driver->md_session_lock);
+	session_info = diag_md_session_get_pid(pid);
+	if (!session_info) {
+		mutex_unlock(&driver->md_session_lock);
+		return;
+	}
+	session_mask = session_info->peripheral_mask;
+	mutex_unlock(&driver->md_session_lock);
+
+	if (diag_mask_clear_param)
+		diag_clear_masks(pid);
+
+	mutex_lock(&driver->diag_maskclear_mutex);
+	driver->mask_clear = 1;
+	mutex_unlock(&driver->diag_maskclear_mutex);
+
+	mutex_lock(&driver->diagchar_mutex);
+
+	p_mask =
+	diag_translate_kernel_to_user_mask(session_mask);
+
+	for (i = 0; i < NUM_MD_SESSIONS; i++)
+		if (MD_PERIPHERAL_MASK(i) & session_mask)
+			diag_mux_close_peripheral(DIAG_LOCAL_PROC, i);
+
+	params.req_mode = USB_MODE;
+	params.mode_param = 0;
+	params.pd_mask = 0;
+	params.peripheral_mask = p_mask;
+
+	if (driver->num_pd_session > 0) {
+		for (i = UPD_WLAN; (i < NUM_MD_SESSIONS); i++) {
+			if (session_mask & MD_PERIPHERAL_MASK(i)) {
+				j = i - UPD_WLAN;
+				driver->pd_session_clear[j] = 1;
+				driver->pd_logging_mode[j] = 0;
+				driver->num_pd_session -= 1;
+				params.pd_mask = p_mask;
+			}
+		}
+	}
+	mutex_lock(&driver->md_session_lock);
+	diag_md_session_close(pid);
+	mutex_unlock(&driver->md_session_lock);
+	diag_switch_logging(&params);
+
+	mutex_unlock(&driver->diagchar_mutex);
+}
+
+static int diag_remove_client_entry(struct file *file)
+{
+	int i = -1;
+	struct diagchar_priv *diagpriv_data = NULL;
+	struct diag_dci_client_tbl *dci_entry = NULL;
+
+	if (!driver)
+		return -ENOMEM;
+
+	mutex_lock(&driver->diag_file_mutex);
+	if (!file) {
+		DIAG_LOG(DIAG_DEBUG_USERSPACE, "Invalid file pointer\n");
+		mutex_unlock(&driver->diag_file_mutex);
+		return -ENOENT;
+	}
+	if (!(file->private_data)) {
+		DIAG_LOG(DIAG_DEBUG_USERSPACE, "Invalid private data\n");
+		mutex_unlock(&driver->diag_file_mutex);
+		return -EINVAL;
+	}
+
+	diagpriv_data = file->private_data;
+
+	/*
+	 * clean up any DCI registrations, if this is a DCI client
+	 * This will specially help in case of ungraceful exit of any DCI client
+	 * This call will remove any pending registrations of such client
+	 */
+	mutex_lock(&driver->dci_mutex);
+	dci_entry = dci_lookup_client_entry_pid(current->tgid);
+	if (dci_entry)
+		diag_dci_deinit_client(dci_entry);
+	mutex_unlock(&driver->dci_mutex);
+
+	diag_close_logging_process(current->tgid);
+
+	/* Delete the pkt response table entry for the exiting process */
+	diag_cmd_remove_reg_by_pid(current->tgid);
+
+	mutex_lock(&driver->diagchar_mutex);
+	driver->ref_count--;
+	if (driver->ref_count == 0)
+		diag_mempool_exit();
+
+	for (i = 0; i < driver->num_clients; i++) {
+		if (NULL != diagpriv_data && diagpriv_data->pid ==
+						driver->client_map[i].pid) {
+			driver->client_map[i].pid = 0;
+			kfree(diagpriv_data);
+			diagpriv_data = NULL;
+			file->private_data = 0;
+			break;
+		}
+	}
+	mutex_unlock(&driver->diagchar_mutex);
+	mutex_unlock(&driver->diag_file_mutex);
+	return 0;
+}
+static int diagchar_close(struct inode *inode, struct file *file)
+{
+	int ret;
+	DIAG_LOG(DIAG_DEBUG_USERSPACE, "diag: process exit %s\n",
+		current->comm);
+	ret = diag_remove_client_entry(file);
+	mutex_lock(&driver->diag_maskclear_mutex);
+	driver->mask_clear = 0;
+	mutex_unlock(&driver->diag_maskclear_mutex);
+	return ret;
+}
+
+void diag_record_stats(int type, int flag)
+{
+	struct diag_pkt_stats_t *pkt_stats = NULL;
+
+	switch (type) {
+	case DATA_TYPE_EVENT:
+		pkt_stats = &driver->event_stats;
+		break;
+	case DATA_TYPE_F3:
+		pkt_stats = &driver->msg_stats;
+		break;
+	case DATA_TYPE_LOG:
+		pkt_stats = &driver->log_stats;
+		break;
+	case DATA_TYPE_RESPONSE:
+		if (flag != PKT_DROP)
+			return;
+		pr_err_ratelimited("diag: In %s, dropping response. This shouldn't happen\n",
+				   __func__);
+		return;
+	case DATA_TYPE_DELAYED_RESPONSE:
+		/* No counters to increase for Delayed responses */
+		return;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
+				   __func__, type);
+		return;
+	}
+
+	switch (flag) {
+	case PKT_ALLOC:
+		atomic_add(1, (atomic_t *)&pkt_stats->alloc_count);
+		break;
+	case PKT_DROP:
+		atomic_add(1, (atomic_t *)&pkt_stats->drop_count);
+		break;
+	case PKT_RESET:
+		atomic_set((atomic_t *)&pkt_stats->alloc_count, 0);
+		atomic_set((atomic_t *)&pkt_stats->drop_count, 0);
+		break;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid flag: %d\n",
+				   __func__, flag);
+		return;
+	}
+}
+
+void diag_get_timestamp(char *time_str)
+{
+	struct timeval t;
+	struct tm broken_tm;
+	do_gettimeofday(&t);
+	if (!time_str)
+		return;
+	time_to_tm(t.tv_sec, 0, &broken_tm);
+	scnprintf(time_str, DIAG_TS_SIZE, "%d:%d:%d:%ld", broken_tm.tm_hour,
+				broken_tm.tm_min, broken_tm.tm_sec, t.tv_usec);
+}
+
+int diag_get_remote(int remote_info)
+{
+	int val = (remote_info < 0) ? -remote_info : remote_info;
+	int remote_val;
+
+	switch (val) {
+	case MDM:
+	case MDM2:
+	case QSC:
+		remote_val = -remote_info;
+		break;
+	default:
+		remote_val = 0;
+		break;
+	}
+
+	return remote_val;
+}
+
+int diag_cmd_chk_polling(struct diag_cmd_reg_entry_t *entry)
+{
+	int polling = DIAG_CMD_NOT_POLLING;
+
+	if (!entry)
+		return -EIO;
+
+	if (entry->cmd_code == DIAG_CMD_NO_SUBSYS) {
+		if (entry->subsys_id == DIAG_CMD_NO_SUBSYS &&
+		    entry->cmd_code_hi >= DIAG_CMD_STATUS &&
+		    entry->cmd_code_lo <= DIAG_CMD_STATUS)
+			polling = DIAG_CMD_POLLING;
+		else if (entry->subsys_id == DIAG_SS_WCDMA &&
+			 entry->cmd_code_hi >= DIAG_CMD_QUERY_CALL &&
+			 entry->cmd_code_lo <= DIAG_CMD_QUERY_CALL)
+			polling = DIAG_CMD_POLLING;
+		else if (entry->subsys_id == DIAG_SS_GSM &&
+			 entry->cmd_code_hi >= DIAG_CMD_QUERY_TMC &&
+			 entry->cmd_code_lo <= DIAG_CMD_QUERY_TMC)
+			polling = DIAG_CMD_POLLING;
+		else if (entry->subsys_id == DIAG_SS_PARAMS &&
+			 entry->cmd_code_hi >= DIAG_DIAG_POLL  &&
+			 entry->cmd_code_lo <= DIAG_DIAG_POLL)
+			polling = DIAG_CMD_POLLING;
+		else if (entry->subsys_id == DIAG_SS_TDSCDMA &&
+			 entry->cmd_code_hi >= DIAG_CMD_TDSCDMA_STATUS &&
+			 entry->cmd_code_lo <= DIAG_CMD_TDSCDMA_STATUS)
+			polling = DIAG_CMD_POLLING;
+	}
+
+	return polling;
+}
+
+static void diag_cmd_invalidate_polling(int change_flag)
+{
+	int polling = DIAG_CMD_NOT_POLLING;
+	struct list_head *start;
+	struct list_head *temp;
+	struct diag_cmd_reg_t *item = NULL;
+
+	if (change_flag == DIAG_CMD_ADD) {
+		if (driver->polling_reg_flag)
+			return;
+	}
+
+	driver->polling_reg_flag = 0;
+	list_for_each_safe(start, temp, &driver->cmd_reg_list) {
+		item = list_entry(start, struct diag_cmd_reg_t, link);
+		if (&item->entry == NULL) {
+			pr_err("diag: In %s, unable to search command\n",
+			       __func__);
+			return;
+		}
+		polling = diag_cmd_chk_polling(&item->entry);
+		if (polling == DIAG_CMD_POLLING) {
+			driver->polling_reg_flag = 1;
+			break;
+		}
+	}
+}
+
+int diag_cmd_add_reg(struct diag_cmd_reg_entry_t *new_entry, uint8_t proc,
+		     int pid)
+{
+	struct diag_cmd_reg_t *new_item = NULL;
+
+	if (!new_entry) {
+		pr_err("diag: In %s, invalid new entry\n", __func__);
+		return -EINVAL;
+	}
+
+	if (proc > APPS_DATA) {
+		pr_err("diag: In %s, invalid peripheral %d\n", __func__, proc);
+		return -EINVAL;
+	}
+
+	if (proc != APPS_DATA)
+		pid = INVALID_PID;
+
+	new_item = kzalloc(sizeof(struct diag_cmd_reg_t), GFP_KERNEL);
+	if (!new_item) {
+		pr_err("diag: In %s, unable to create memory for new command registration\n",
+		       __func__);
+		return -ENOMEM;
+	}
+	kmemleak_not_leak(new_item);
+
+	new_item->pid = pid;
+	new_item->proc = proc;
+	memcpy(&new_item->entry, new_entry,
+	       sizeof(struct diag_cmd_reg_entry_t));
+	INIT_LIST_HEAD(&new_item->link);
+
+	mutex_lock(&driver->cmd_reg_mutex);
+	list_add_tail(&new_item->link, &driver->cmd_reg_list);
+	driver->cmd_reg_count++;
+	diag_cmd_invalidate_polling(DIAG_CMD_ADD);
+	mutex_unlock(&driver->cmd_reg_mutex);
+
+	return 0;
+}
+
+struct diag_cmd_reg_entry_t *diag_cmd_search(
+			struct diag_cmd_reg_entry_t *entry, int proc)
+{
+	struct list_head *start;
+	struct list_head *temp;
+	struct diag_cmd_reg_t *item = NULL;
+	struct diag_cmd_reg_entry_t *temp_entry = NULL;
+
+	if (!entry) {
+		pr_err("diag: In %s, invalid entry\n", __func__);
+		return NULL;
+	}
+
+	list_for_each_safe(start, temp, &driver->cmd_reg_list) {
+		item = list_entry(start, struct diag_cmd_reg_t, link);
+		if (&item->entry == NULL) {
+			pr_err("diag: In %s, unable to search command\n",
+			       __func__);
+			return NULL;
+		}
+		temp_entry = &item->entry;
+		if (temp_entry->cmd_code == entry->cmd_code &&
+		    temp_entry->subsys_id == entry->subsys_id &&
+		    temp_entry->cmd_code_hi >= entry->cmd_code_hi &&
+		    temp_entry->cmd_code_lo <= entry->cmd_code_lo &&
+		    (proc == item->proc || proc == ALL_PROC)) {
+			return &item->entry;
+		} else if (temp_entry->cmd_code == DIAG_CMD_NO_SUBSYS &&
+			   entry->cmd_code == DIAG_CMD_DIAG_SUBSYS) {
+			if (temp_entry->subsys_id == entry->subsys_id &&
+			    temp_entry->cmd_code_hi >= entry->cmd_code_hi &&
+			    temp_entry->cmd_code_lo <= entry->cmd_code_lo &&
+			    (proc == item->proc || proc == ALL_PROC)) {
+				return &item->entry;
+			}
+		} else if (temp_entry->cmd_code == DIAG_CMD_NO_SUBSYS &&
+			   temp_entry->subsys_id == DIAG_CMD_NO_SUBSYS) {
+			if ((temp_entry->cmd_code_hi >= entry->cmd_code) &&
+			    (temp_entry->cmd_code_lo <= entry->cmd_code) &&
+			    (proc == item->proc || proc == ALL_PROC)) {
+				if (entry->cmd_code == MODE_CMD) {
+					if (entry->subsys_id == RESET_ID &&
+						item->proc != APPS_DATA) {
+						continue;
+					}
+					if (entry->subsys_id != RESET_ID &&
+						item->proc == APPS_DATA) {
+						continue;
+					}
+				}
+				return &item->entry;
+			}
+		}
+	}
+
+	return NULL;
+}
+
+void diag_cmd_remove_reg(struct diag_cmd_reg_entry_t *entry, uint8_t proc)
+{
+	struct diag_cmd_reg_t *item = NULL;
+	struct diag_cmd_reg_entry_t *temp_entry;
+	if (!entry) {
+		pr_err("diag: In %s, invalid entry\n", __func__);
+		return;
+	}
+
+	mutex_lock(&driver->cmd_reg_mutex);
+	temp_entry = diag_cmd_search(entry, proc);
+	if (temp_entry) {
+		item = container_of(temp_entry, struct diag_cmd_reg_t, entry);
+		if (!item) {
+			mutex_unlock(&driver->cmd_reg_mutex);
+			return;
+		}
+		list_del(&item->link);
+		kfree(item);
+		driver->cmd_reg_count--;
+	}
+	diag_cmd_invalidate_polling(DIAG_CMD_REMOVE);
+	mutex_unlock(&driver->cmd_reg_mutex);
+}
+
+void diag_cmd_remove_reg_by_pid(int pid)
+{
+	struct list_head *start;
+	struct list_head *temp;
+	struct diag_cmd_reg_t *item = NULL;
+
+	mutex_lock(&driver->cmd_reg_mutex);
+	list_for_each_safe(start, temp, &driver->cmd_reg_list) {
+		item = list_entry(start, struct diag_cmd_reg_t, link);
+		if (&item->entry == NULL) {
+			pr_err("diag: In %s, unable to search command\n",
+			       __func__);
+			mutex_unlock(&driver->cmd_reg_mutex);
+			return;
+		}
+		if (item->pid == pid) {
+			list_del(&item->link);
+			kfree(item);
+			driver->cmd_reg_count--;
+		}
+	}
+	mutex_unlock(&driver->cmd_reg_mutex);
+}
+
+void diag_cmd_remove_reg_by_proc(int proc)
+{
+	struct list_head *start;
+	struct list_head *temp;
+	struct diag_cmd_reg_t *item = NULL;
+
+	mutex_lock(&driver->cmd_reg_mutex);
+	list_for_each_safe(start, temp, &driver->cmd_reg_list) {
+		item = list_entry(start, struct diag_cmd_reg_t, link);
+		if (&item->entry == NULL) {
+			pr_err("diag: In %s, unable to search command\n",
+			       __func__);
+			mutex_unlock(&driver->cmd_reg_mutex);
+			return;
+		}
+		if (item->proc == proc) {
+			list_del(&item->link);
+			kfree(item);
+			driver->cmd_reg_count--;
+		}
+	}
+	diag_cmd_invalidate_polling(DIAG_CMD_REMOVE);
+	mutex_unlock(&driver->cmd_reg_mutex);
+}
+
+static int diag_copy_dci(char __user *buf, size_t count,
+			struct diag_dci_client_tbl *entry, int *pret)
+{
+	int total_data_len = 0;
+	int ret = 0;
+	int exit_stat = 1;
+	uint8_t drain_again = 0;
+	struct diag_dci_buffer_t *buf_entry, *temp;
+
+	if (!buf || !entry || !pret)
+		return exit_stat;
+
+	ret = *pret;
+
+	ret += sizeof(int);
+	if (ret >= count) {
+		pr_err("diag: In %s, invalid value for ret: %d, count: %zu\n",
+		       __func__, ret, count);
+		return -EINVAL;
+	}
+
+	mutex_lock(&entry->write_buf_mutex);
+	list_for_each_entry_safe(buf_entry, temp, &entry->list_write_buf,
+								buf_track) {
+
+		if ((ret + buf_entry->data_len) > count) {
+			drain_again = 1;
+			break;
+		}
+
+		list_del(&buf_entry->buf_track);
+		mutex_lock(&buf_entry->data_mutex);
+		if ((buf_entry->data_len > 0) &&
+		    (buf_entry->in_busy) &&
+		    (buf_entry->data)) {
+			if (copy_to_user(buf+ret, (void *)buf_entry->data,
+					 buf_entry->data_len))
+				goto drop;
+			ret += buf_entry->data_len;
+			total_data_len += buf_entry->data_len;
+			diag_ws_on_copy(DIAG_WS_DCI);
+drop:
+			buf_entry->in_busy = 0;
+			buf_entry->data_len = 0;
+			buf_entry->in_list = 0;
+			if (buf_entry->buf_type == DCI_BUF_CMD) {
+				mutex_unlock(&buf_entry->data_mutex);
+				continue;
+			} else if (buf_entry->buf_type == DCI_BUF_SECONDARY) {
+				diagmem_free(driver, buf_entry->data,
+					     POOL_TYPE_DCI);
+				buf_entry->data = NULL;
+				mutex_unlock(&buf_entry->data_mutex);
+				kfree(buf_entry);
+				continue;
+			}
+
+		}
+		mutex_unlock(&buf_entry->data_mutex);
+	}
+
+	if (total_data_len > 0) {
+		/* Copy the total data length */
+		COPY_USER_SPACE_OR_EXIT(buf+8, total_data_len, 4);
+		ret -= 4;
+	} else {
+		pr_debug("diag: In %s, Trying to copy ZERO bytes, total_data_len: %d\n",
+			__func__, total_data_len);
+	}
+
+	exit_stat = 0;
+exit:
+	entry->in_service = 0;
+	mutex_unlock(&entry->write_buf_mutex);
+	*pret = ret;
+	if (drain_again)
+		dci_drain_data(0);
+
+	return exit_stat;
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static int diag_remote_init(void)
+{
+	diagmem_setsize(POOL_TYPE_MDM, itemsize_mdm, poolsize_mdm);
+	diagmem_setsize(POOL_TYPE_MDM2, itemsize_mdm, poolsize_mdm);
+	diagmem_setsize(POOL_TYPE_MDM_DCI, itemsize_mdm_dci, poolsize_mdm_dci);
+	diagmem_setsize(POOL_TYPE_MDM2_DCI, itemsize_mdm_dci,
+			poolsize_mdm_dci);
+	diagmem_setsize(POOL_TYPE_MDM_MUX, itemsize_mdm_usb, poolsize_mdm_usb);
+	diagmem_setsize(POOL_TYPE_MDM2_MUX, itemsize_mdm_usb, poolsize_mdm_usb);
+	diagmem_setsize(POOL_TYPE_MDM_DCI_WRITE, itemsize_mdm_dci_write,
+			poolsize_mdm_dci_write);
+	diagmem_setsize(POOL_TYPE_MDM2_DCI_WRITE, itemsize_mdm_dci_write,
+			poolsize_mdm_dci_write);
+	diagmem_setsize(POOL_TYPE_QSC_MUX, itemsize_qsc_usb,
+			poolsize_qsc_usb);
+	driver->hdlc_encode_buf = kzalloc(DIAG_MAX_HDLC_BUF_SIZE, GFP_KERNEL);
+	if (!driver->hdlc_encode_buf)
+		return -ENOMEM;
+	driver->hdlc_encode_buf_len = 0;
+	return 0;
+}
+
+static void diag_remote_exit(void)
+{
+	kfree(driver->hdlc_encode_buf);
+}
+
+static int diag_send_raw_data_remote(int proc, void *buf, int len,
+				    uint8_t hdlc_flag)
+{
+	int err = 0;
+	int max_len = 0;
+	uint8_t retry_count = 0;
+	uint8_t max_retries = 3;
+	uint16_t payload = 0;
+	struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
+	struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
+	int bridge_index = proc - 1;
+	struct diag_md_session_t *session_info = NULL;
+	uint8_t hdlc_disabled = 0;
+
+	if (!buf)
+		return -EINVAL;
+
+	if (len <= 0) {
+		pr_err("diag: In %s, invalid len: %d", __func__, len);
+		return -EBADMSG;
+	}
+
+	if (bridge_index < 0 || bridge_index > NUM_REMOTE_DEV) {
+		pr_err("diag: In %s, invalid bridge index: %d\n", __func__,
+			bridge_index);
+		return -EINVAL;
+	 }
+
+	do {
+		if (driver->hdlc_encode_buf_len == 0)
+			break;
+		usleep_range(10000, 10100);
+		retry_count++;
+	} while (retry_count < max_retries);
+
+	if (driver->hdlc_encode_buf_len != 0)
+		return -EAGAIN;
+	mutex_lock(&driver->md_session_lock);
+	session_info = diag_md_session_get_peripheral(APPS_DATA);
+	if (session_info)
+		hdlc_disabled = session_info->hdlc_disabled;
+	else
+		hdlc_disabled = driver->hdlc_disabled;
+	mutex_unlock(&driver->md_session_lock);
+	if (hdlc_disabled) {
+		if (len < 4) {
+			pr_err("diag: In %s, invalid len: %d of non_hdlc pkt",
+			__func__, len);
+			return -EBADMSG;
+		}
+		payload = *(uint16_t *)(buf + 2);
+		if (payload > DIAG_MAX_HDLC_BUF_SIZE) {
+			pr_err("diag: Dropping packet, payload size is %d\n",
+				payload);
+			return -EBADMSG;
+		}
+		driver->hdlc_encode_buf_len = payload;
+		/*
+		 * Adding 5 bytes for start (1 byte), version (1 byte),
+		 * payload (2 bytes) and end (1 byte)
+		 */
+		if (len == (payload + 5)) {
+			/*
+			 * Adding 4 bytes for start (1 byte), version (1 byte)
+			 * and payload (2 bytes)
+			 */
+			memcpy(driver->hdlc_encode_buf, buf + 4, payload);
+			goto send_data;
+		} else {
+			pr_err("diag: In %s, invalid len: %d of non_hdlc pkt",
+			__func__, len);
+			return -EBADMSG;
+		}
+	}
+
+	if (hdlc_flag) {
+		if (DIAG_MAX_HDLC_BUF_SIZE < len) {
+			pr_err("diag: Dropping packet, HDLC encoded packet payload size crosses buffer limit. Current payload size %d\n",
+			       len);
+			return -EBADMSG;
+		}
+		driver->hdlc_encode_buf_len = len;
+		memcpy(driver->hdlc_encode_buf, buf, len);
+		goto send_data;
+	}
+
+	/*
+	 * The worst case length will be twice as the incoming packet length.
+	 * Add 3 bytes for CRC bytes (2 bytes) and delimiter (1 byte)
+	 */
+	max_len = (2 * len) + 3;
+	if (DIAG_MAX_HDLC_BUF_SIZE < max_len) {
+		pr_err("diag: Dropping packet, HDLC encoded packet payload size crosses buffer limit. Current payload size %d\n",
+		       max_len);
+		return -EBADMSG;
+	}
+
+	/* Perform HDLC encoding on incoming data */
+	send.state = DIAG_STATE_START;
+	send.pkt = (void *)(buf);
+	send.last = (void *)(buf + len - 1);
+	send.terminate = 1;
+
+	enc.dest = driver->hdlc_encode_buf;
+	enc.dest_last = (void *)(driver->hdlc_encode_buf + max_len - 1);
+	diag_hdlc_encode(&send, &enc);
+	driver->hdlc_encode_buf_len = (int)(enc.dest -
+					(void *)driver->hdlc_encode_buf);
+
+send_data:
+	err = diagfwd_bridge_write(bridge_index, driver->hdlc_encode_buf,
+				   driver->hdlc_encode_buf_len);
+	if (err) {
+		pr_err_ratelimited("diag: Error writing Callback packet to proc: %d, err: %d\n",
+				   proc, err);
+		driver->hdlc_encode_buf_len = 0;
+	}
+
+	return err;
+}
+
+static int diag_process_userspace_remote(int proc, void *buf, int len)
+{
+	int bridge_index = proc - 1;
+
+	if (!buf || len < 0) {
+		pr_err("diag: Invalid input in %s, buf: %pK, len: %d\n",
+		       __func__, buf, len);
+		return -EINVAL;
+	}
+
+	if (bridge_index < 0 || bridge_index > NUM_REMOTE_DEV) {
+		pr_err("diag: In %s, invalid bridge index: %d\n", __func__,
+		       bridge_index);
+		return -EINVAL;
+	}
+
+	driver->user_space_data_busy = 1;
+	return diagfwd_bridge_write(bridge_index, buf, len);
+}
+#else
+static int diag_remote_init(void)
+{
+	return 0;
+}
+
+static void diag_remote_exit(void)
+{
+	return;
+}
+
+int diagfwd_bridge_init(void)
+{
+	return 0;
+}
+
+void diagfwd_bridge_exit(void)
+{
+	return;
+}
+
+uint16_t diag_get_remote_device_mask(void)
+{
+	return 0;
+}
+
+static int diag_send_raw_data_remote(int proc, void *buf, int len,
+				    uint8_t hdlc_flag)
+{
+	return -EINVAL;
+}
+
+static int diag_process_userspace_remote(int proc, void *buf, int len)
+{
+	return 0;
+}
+#endif
+
+static int mask_request_validate(unsigned char mask_buf[])
+{
+	uint8_t packet_id;
+	uint8_t subsys_id;
+	uint16_t ss_cmd;
+
+	packet_id = mask_buf[0];
+
+	if (packet_id == DIAG_CMD_DIAG_SUBSYS_DELAY) {
+		subsys_id = mask_buf[1];
+		ss_cmd = *(uint16_t *)(mask_buf + 2);
+		switch (subsys_id) {
+		case DIAG_SS_DIAG:
+			if ((ss_cmd == DIAG_SS_FILE_READ_MODEM) ||
+				(ss_cmd == DIAG_SS_FILE_READ_ADSP) ||
+				(ss_cmd == DIAG_SS_FILE_READ_WCNSS) ||
+				(ss_cmd == DIAG_SS_FILE_READ_SLPI) ||
+				(ss_cmd == DIAG_SS_FILE_READ_APPS))
+				return 1;
+			break;
+		default:
+			return 0;
+		}
+	} else if (packet_id == 0x4B) {
+		subsys_id = mask_buf[1];
+		ss_cmd = *(uint16_t *)(mask_buf + 2);
+		/* Packets with SSID which are allowed */
+		switch (subsys_id) {
+		case 0x04: /* DIAG_SUBSYS_WCDMA */
+			if ((ss_cmd == 0) || (ss_cmd == 0xF))
+				return 1;
+			break;
+		case 0x08: /* DIAG_SUBSYS_GSM */
+			if ((ss_cmd == 0) || (ss_cmd == 0x1))
+				return 1;
+			break;
+		case 0x09: /* DIAG_SUBSYS_UMTS */
+		case 0x0F: /* DIAG_SUBSYS_CM */
+			if (ss_cmd == 0)
+				return 1;
+			break;
+		case 0x0C: /* DIAG_SUBSYS_OS */
+			if ((ss_cmd == 2) || (ss_cmd == 0x100))
+				return 1; /* MPU and APU */
+			break;
+		case 0x12: /* DIAG_SUBSYS_DIAG_SERV */
+			if ((ss_cmd == 0) || (ss_cmd == 0x6) || (ss_cmd == 0x7))
+				return 1;
+			else if (ss_cmd == 0x218) /* HDLC Disabled Command*/
+				return 0;
+			else if (ss_cmd == DIAG_GET_TIME_API)
+				return 1;
+			else if (ss_cmd == DIAG_SET_TIME_API)
+				return 1;
+			else if (ss_cmd == DIAG_SWITCH_COMMAND)
+				return 1;
+			else if (ss_cmd == DIAG_BUFFERING_MODE)
+				return 1;
+			break;
+		case 0x13: /* DIAG_SUBSYS_FS */
+			if ((ss_cmd == 0) || (ss_cmd == 0x1))
+				return 1;
+			break;
+		default:
+			return 0;
+			break;
+		}
+	} else {
+		switch (packet_id) {
+		case 0x00:    /* Version Number */
+		case 0x0C:    /* CDMA status packet */
+		case 0x1C:    /* Diag Version */
+		case 0x1D:    /* Time Stamp */
+		case 0x60:    /* Event Report Control */
+		case 0x63:    /* Status snapshot */
+		case 0x73:    /* Logging Configuration */
+		case 0x7C:    /* Extended build ID */
+		case 0x7D:    /* Extended Message configuration */
+		case 0x81:    /* Event get mask */
+		case 0x82:    /* Set the event mask */
+			return 1;
+			break;
+		default:
+			return 0;
+			break;
+		}
+	}
+	return 0;
+}
+
+static void diag_md_session_init(void)
+{
+	int i;
+
+	mutex_init(&driver->md_session_lock);
+	driver->md_session_mask = 0;
+	driver->md_session_mode = DIAG_MD_NONE;
+	for (i = 0; i < NUM_MD_SESSIONS; i++)
+		driver->md_session_map[i] = NULL;
+}
+
+static void diag_md_session_exit(void)
+{
+	int i;
+	struct diag_md_session_t *session_info = NULL;
+
+	for (i = 0; i < NUM_MD_SESSIONS; i++) {
+		if (driver->md_session_map[i]) {
+			session_info = driver->md_session_map[i];
+			diag_log_mask_free(session_info->log_mask);
+			kfree(session_info->log_mask);
+			session_info->log_mask = NULL;
+			diag_msg_mask_free(session_info->msg_mask,
+				session_info);
+			kfree(session_info->msg_mask);
+			session_info->msg_mask = NULL;
+			diag_event_mask_free(session_info->event_mask);
+			kfree(session_info->event_mask);
+			session_info->event_mask = NULL;
+			kfree(session_info);
+			session_info = NULL;
+			driver->md_session_map[i] = NULL;
+		}
+	}
+	mutex_destroy(&driver->md_session_lock);
+	driver->md_session_mask = 0;
+	driver->md_session_mode = DIAG_MD_NONE;
+}
+
+int diag_md_session_create(int mode, int peripheral_mask, int proc)
+{
+	int i;
+	int err = 0;
+	struct diag_md_session_t *new_session = NULL;
+
+	/*
+	 * If a session is running with a peripheral mask and a new session
+	 * request comes in with same peripheral mask value then return
+	 * invalid param
+	 */
+	if (driver->md_session_mode == DIAG_MD_PERIPHERAL &&
+	    (driver->md_session_mask & peripheral_mask) != 0)
+		return -EINVAL;
+
+	mutex_lock(&driver->md_session_lock);
+	new_session = kzalloc(sizeof(struct diag_md_session_t), GFP_KERNEL);
+	if (!new_session) {
+		mutex_unlock(&driver->md_session_lock);
+		return -ENOMEM;
+	}
+	new_session->peripheral_mask = 0;
+	new_session->pid = current->tgid;
+	new_session->task = current;
+	new_session->md_client_thread_info = current_thread_info();
+	new_session->log_mask = kzalloc(sizeof(struct diag_mask_info),
+					GFP_KERNEL);
+	if (!new_session->log_mask) {
+		err = -ENOMEM;
+		goto fail_peripheral;
+	}
+	new_session->event_mask = kzalloc(sizeof(struct diag_mask_info),
+					  GFP_KERNEL);
+	if (!new_session->event_mask) {
+		err = -ENOMEM;
+		goto fail_peripheral;
+	}
+	new_session->msg_mask = kzalloc(sizeof(struct diag_mask_info),
+					GFP_KERNEL);
+	if (!new_session->msg_mask) {
+		err = -ENOMEM;
+		goto fail_peripheral;
+	}
+
+	err = diag_log_mask_copy(new_session->log_mask, &log_mask);
+	if (err) {
+		DIAG_LOG(DIAG_DEBUG_USERSPACE,
+			 "return value of log copy. err %d\n", err);
+		goto fail_peripheral;
+	}
+	err = diag_event_mask_copy(new_session->event_mask, &event_mask);
+	if (err) {
+		DIAG_LOG(DIAG_DEBUG_USERSPACE,
+			 "return value of event copy. err %d\n", err);
+		goto fail_peripheral;
+	}
+	new_session->msg_mask_tbl_count = 0;
+	err = diag_msg_mask_copy(new_session, new_session->msg_mask,
+		&msg_mask);
+	if (err) {
+		DIAG_LOG(DIAG_DEBUG_USERSPACE,
+			 "return value of msg copy. err %d\n", err);
+		goto fail_peripheral;
+	}
+	for (i = 0; i < NUM_MD_SESSIONS; i++) {
+		if ((MD_PERIPHERAL_MASK(i) & peripheral_mask) == 0)
+			continue;
+		if (driver->md_session_map[i] != NULL) {
+			DIAG_LOG(DIAG_DEBUG_USERSPACE,
+				 "another instance present for %d\n", i);
+			err = -EEXIST;
+			goto fail_peripheral;
+		}
+		new_session->peripheral_mask |= MD_PERIPHERAL_MASK(i);
+		driver->md_session_map[i] = new_session;
+		driver->md_session_mask |= MD_PERIPHERAL_MASK(i);
+	}
+	setup_timer(&new_session->hdlc_reset_timer,
+		diag_md_hdlc_reset_timer_func,
+		new_session->pid);
+
+	driver->md_session_mode = DIAG_MD_PERIPHERAL;
+	mutex_unlock(&driver->md_session_lock);
+	DIAG_LOG(DIAG_DEBUG_USERSPACE,
+		 "created session in peripheral mode\n");
+	return 0;
+
+fail_peripheral:
+	diag_log_mask_free(new_session->log_mask);
+	kfree(new_session->log_mask);
+	new_session->log_mask = NULL;
+	diag_event_mask_free(new_session->event_mask);
+	kfree(new_session->event_mask);
+	new_session->event_mask = NULL;
+	diag_msg_mask_free(new_session->msg_mask,
+		new_session);
+	kfree(new_session->msg_mask);
+	new_session->msg_mask = NULL;
+	kfree(new_session);
+	new_session = NULL;
+	mutex_unlock(&driver->md_session_lock);
+	return err;
+}
+
+static void diag_md_session_close(int pid)
+{
+	int i;
+	uint8_t found = 0;
+	struct diag_md_session_t *session_info = NULL;
+
+	session_info = diag_md_session_get_pid(pid);
+	if (!session_info)
+		return;
+
+	for (i = 0; i < NUM_MD_SESSIONS; i++) {
+		if (driver->md_session_map[i] != session_info)
+			continue;
+		driver->md_session_map[i] = NULL;
+		driver->md_session_mask &= ~session_info->peripheral_mask;
+	}
+	diag_log_mask_free(session_info->log_mask);
+	kfree(session_info->log_mask);
+	session_info->log_mask = NULL;
+	diag_msg_mask_free(session_info->msg_mask,
+		session_info);
+	kfree(session_info->msg_mask);
+	session_info->msg_mask = NULL;
+	diag_event_mask_free(session_info->event_mask);
+	kfree(session_info->event_mask);
+	session_info->event_mask = NULL;
+	del_timer(&session_info->hdlc_reset_timer);
+
+	for (i = 0; i < NUM_MD_SESSIONS && !found; i++) {
+		if (driver->md_session_map[i] != NULL)
+			found = 1;
+	}
+
+	driver->md_session_mode = (found) ? DIAG_MD_PERIPHERAL : DIAG_MD_NONE;
+	kfree(session_info);
+	session_info = NULL;
+	DIAG_LOG(DIAG_DEBUG_USERSPACE, "cleared up session\n");
+}
+
+struct diag_md_session_t *diag_md_session_get_pid(int pid)
+{
+	int i;
+	if (pid <= 0)
+		return NULL;
+	for (i = 0; i < NUM_MD_SESSIONS; i++) {
+		if (driver->md_session_map[i] &&
+		    driver->md_session_map[i]->pid == pid)
+			return driver->md_session_map[i];
+	}
+	return NULL;
+}
+
+struct diag_md_session_t *diag_md_session_get_peripheral(uint8_t peripheral)
+{
+	if (peripheral >= NUM_MD_SESSIONS)
+		return NULL;
+	return driver->md_session_map[peripheral];
+}
+
+static int diag_md_peripheral_switch(int pid,
+				int peripheral_mask, int req_mode) {
+	int i, bit = 0;
+	struct diag_md_session_t *session_info = NULL;
+
+	session_info = diag_md_session_get_pid(pid);
+	if (!session_info)
+		return -EINVAL;
+	if (req_mode != DIAG_USB_MODE || req_mode != DIAG_MEMORY_DEVICE_MODE)
+		return -EINVAL;
+
+	/*
+	 * check that md_session_map for i == session_info,
+	 * if not then race condition occurred and bail
+	 */
+	for (i = 0; i < NUM_MD_SESSIONS; i++) {
+		bit = MD_PERIPHERAL_MASK(i) & peripheral_mask;
+		if (!bit)
+			continue;
+		if (req_mode == DIAG_USB_MODE) {
+			if (driver->md_session_map[i] != session_info)
+				return -EINVAL;
+			driver->md_session_map[i] = NULL;
+			driver->md_session_mask &= ~bit;
+			session_info->peripheral_mask &= ~bit;
+
+		} else {
+			if (driver->md_session_map[i] != NULL)
+				return -EINVAL;
+			driver->md_session_map[i] = session_info;
+			driver->md_session_mask |= bit;
+			session_info->peripheral_mask |= bit;
+
+		}
+	}
+
+	driver->md_session_mode = DIAG_MD_PERIPHERAL;
+	DIAG_LOG(DIAG_DEBUG_USERSPACE, "Changed Peripherals:0x%x to mode:%d\n",
+		peripheral_mask, req_mode);
+}
+
+static int diag_md_session_check(int curr_mode, int req_mode,
+				 const struct diag_logging_mode_param_t *param,
+				 uint8_t *change_mode)
+{
+	int i, bit = 0, err = 0, peripheral_mask = 0;
+	int change_mask = 0;
+	struct diag_md_session_t *session_info = NULL;
+
+	if (!param || !change_mode)
+		return -EIO;
+
+	*change_mode = 0;
+
+	switch (curr_mode) {
+	case DIAG_USB_MODE:
+	case DIAG_MEMORY_DEVICE_MODE:
+	case DIAG_MULTI_MODE:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (req_mode != DIAG_USB_MODE && req_mode != DIAG_MEMORY_DEVICE_MODE)
+		return -EINVAL;
+
+	if (req_mode == DIAG_USB_MODE) {
+		if (curr_mode == DIAG_USB_MODE)
+			return 0;
+		mutex_lock(&driver->md_session_lock);
+		if (driver->md_session_mode == DIAG_MD_NONE
+		    && driver->md_session_mask == 0 && driver->logging_mask) {
+			*change_mode = 1;
+			mutex_unlock(&driver->md_session_lock);
+			return 0;
+		}
+		/*
+		 * curr_mode is either DIAG_MULTI_MODE or DIAG_MD_MODE
+		 * Check if requested peripherals are already in usb mode
+		 */
+		for (i = 0; i < NUM_MD_SESSIONS; i++) {
+			bit = MD_PERIPHERAL_MASK(i) & param->peripheral_mask;
+			if (!bit)
+				continue;
+			if (bit & driver->logging_mask)
+				change_mask |= bit;
+		}
+		if (!change_mask) {
+			mutex_unlock(&driver->md_session_lock);
+			return 0;
+		}
+
+		/*
+		 * Change is needed. Check if this md_session has set all the
+		 * requested peripherals. If another md session set a requested
+		 * peripheral then we cannot switch that peripheral to USB.
+		 * If this session owns all the requested peripherals, then
+		 * call function to switch the modes/masks for the md_session
+		 */
+		session_info = diag_md_session_get_pid(current->tgid);
+		if (!session_info) {
+			*change_mode = 1;
+			mutex_unlock(&driver->md_session_lock);
+			return 0;
+		}
+		peripheral_mask = session_info->peripheral_mask;
+		if ((change_mask & peripheral_mask)
+							!= change_mask) {
+			DIAG_LOG(DIAG_DEBUG_USERSPACE,
+			    "Another MD Session owns a requested peripheral\n");
+			mutex_unlock(&driver->md_session_lock);
+			return -EINVAL;
+		}
+		*change_mode = 1;
+
+		/* If all peripherals are being set to USB Mode, call close */
+		if (~change_mask & peripheral_mask) {
+			err = diag_md_peripheral_switch(current->tgid,
+					change_mask, DIAG_USB_MODE);
+		} else
+			diag_md_session_close(current->tgid);
+		mutex_unlock(&driver->md_session_lock);
+		return err;
+
+	} else if (req_mode == DIAG_MEMORY_DEVICE_MODE) {
+		/*
+		 * Get bit mask that represents what peripherals already have
+		 * been set. Check that requested peripherals already set are
+		 * owned by this md session
+		 */
+		mutex_lock(&driver->md_session_lock);
+		change_mask = driver->md_session_mask & param->peripheral_mask;
+		session_info = diag_md_session_get_pid(current->tgid);
+
+		if (session_info) {
+			if ((session_info->peripheral_mask & change_mask)
+							!= change_mask) {
+				DIAG_LOG(DIAG_DEBUG_USERSPACE,
+				    "Another MD Session owns a requested peripheral\n");
+				mutex_unlock(&driver->md_session_lock);
+				return -EINVAL;
+			}
+			err = diag_md_peripheral_switch(current->tgid,
+					change_mask, DIAG_USB_MODE);
+			mutex_unlock(&driver->md_session_lock);
+		} else {
+			mutex_unlock(&driver->md_session_lock);
+			if (change_mask) {
+				DIAG_LOG(DIAG_DEBUG_USERSPACE,
+				    "Another MD Session owns a requested peripheral\n");
+				return -EINVAL;
+			}
+			err = diag_md_session_create(DIAG_MD_PERIPHERAL,
+				param->peripheral_mask, DIAG_LOCAL_PROC);
+		}
+		*change_mode = 1;
+		return err;
+	}
+	return -EINVAL;
+}
+
+static uint32_t diag_translate_mask(uint32_t peripheral_mask)
+{
+	uint32_t ret = 0;
+
+	if (peripheral_mask & DIAG_CON_APSS)
+		ret |= (1 << APPS_DATA);
+	if (peripheral_mask & DIAG_CON_MPSS)
+		ret |= (1 << PERIPHERAL_MODEM);
+	if (peripheral_mask & DIAG_CON_LPASS)
+		ret |= (1 << PERIPHERAL_LPASS);
+	if (peripheral_mask & DIAG_CON_WCNSS)
+		ret |= (1 << PERIPHERAL_WCNSS);
+	if (peripheral_mask & DIAG_CON_SENSORS)
+		ret |= (1 << PERIPHERAL_SENSORS);
+	if (peripheral_mask & DIAG_CON_WDSP)
+		ret |= (1 << PERIPHERAL_WDSP);
+	if (peripheral_mask & DIAG_CON_CDSP)
+		ret |= (1 << PERIPHERAL_CDSP);
+	if (peripheral_mask & DIAG_CON_UPD_WLAN)
+		ret |= (1 << UPD_WLAN);
+	if (peripheral_mask & DIAG_CON_UPD_AUDIO)
+		ret |= (1 << UPD_AUDIO);
+	if (peripheral_mask & DIAG_CON_UPD_SENSORS)
+		ret |= (1 << UPD_SENSORS);
+
+	return ret;
+}
+
+static int diag_switch_logging(struct diag_logging_mode_param_t *param)
+{
+	int new_mode, i = 0;
+	int curr_mode;
+	int err = 0;
+	uint8_t do_switch = 1;
+	uint32_t peripheral_mask = 0;
+	uint8_t peripheral, upd;
+
+	if (!param)
+		return -EINVAL;
+
+	if (!param->peripheral_mask) {
+		DIAG_LOG(DIAG_DEBUG_USERSPACE,
+			"asking for mode switch with no peripheral mask set\n");
+		return -EINVAL;
+	}
+
+	if (param->pd_mask) {
+		switch (param->pd_mask) {
+		case DIAG_CON_UPD_WLAN:
+			peripheral = PERIPHERAL_MODEM;
+			upd = UPD_WLAN;
+			break;
+		case DIAG_CON_UPD_AUDIO:
+			peripheral = PERIPHERAL_LPASS;
+			upd = UPD_AUDIO;
+			break;
+		case DIAG_CON_UPD_SENSORS:
+			peripheral = PERIPHERAL_LPASS;
+			upd = UPD_SENSORS;
+			break;
+		default:
+			DIAG_LOG(DIAG_DEBUG_USERSPACE,
+			"asking for mode switch with no pd mask set\n");
+			return -EINVAL;
+		}
+
+		mutex_lock(&driver->md_session_lock);
+		if (driver->md_session_map[peripheral] &&
+			(MD_PERIPHERAL_MASK(peripheral) &
+			diag_mux->mux_mask)) {
+			DIAG_LOG(DIAG_DEBUG_USERSPACE,
+			"diag_fr: User PD is already logging onto active peripheral logging\n");
+			i = upd - UPD_WLAN;
+			mutex_unlock(&driver->md_session_lock);
+			driver->pd_session_clear[i] = 0;
+			return -EINVAL;
+		}
+		mutex_unlock(&driver->md_session_lock);
+		peripheral_mask =
+			diag_translate_mask(param->pd_mask);
+		param->peripheral_mask = peripheral_mask;
+		i = upd - UPD_WLAN;
+		if (!driver->pd_session_clear[i]) {
+			driver->pd_logging_mode[i] = 1;
+			driver->num_pd_session += 1;
+		}
+		driver->pd_session_clear[i] = 0;
+	} else {
+		peripheral_mask =
+			diag_translate_mask(param->peripheral_mask);
+		param->peripheral_mask = peripheral_mask;
+	}
+
+	switch (param->req_mode) {
+	case CALLBACK_MODE:
+	case UART_MODE:
+	case SOCKET_MODE:
+	case MEMORY_DEVICE_MODE:
+		new_mode = DIAG_MEMORY_DEVICE_MODE;
+		break;
+	case USB_MODE:
+		new_mode = DIAG_USB_MODE;
+		break;
+	default:
+		pr_err("diag: In %s, request to switch to invalid mode: %d\n",
+		       __func__, param->req_mode);
+		return -EINVAL;
+	}
+
+	curr_mode = driver->logging_mode;
+	DIAG_LOG(DIAG_DEBUG_USERSPACE,
+		"request to switch logging from %d mask:%0x to new_mode %d mask:%0x\n",
+		curr_mode, driver->md_session_mask, new_mode, peripheral_mask);
+
+	err = diag_md_session_check(curr_mode, new_mode, param, &do_switch);
+	if (err) {
+		DIAG_LOG(DIAG_DEBUG_USERSPACE,
+			 "err from diag_md_session_check, err: %d\n", err);
+		return err;
+	}
+
+	if (do_switch == 0) {
+		DIAG_LOG(DIAG_DEBUG_USERSPACE,
+			 "not switching modes c: %d n: %d\n",
+			 curr_mode, new_mode);
+		return 0;
+	}
+
+	diag_ws_reset(DIAG_WS_MUX);
+	err = diag_mux_switch_logging(&new_mode, &peripheral_mask);
+	if (err) {
+		pr_err("diag: In %s, unable to switch mode from %d to %d, err: %d\n",
+		       __func__, curr_mode, new_mode, err);
+		driver->logging_mode = curr_mode;
+		goto fail;
+	}
+	driver->logging_mode = new_mode;
+	driver->logging_mask = peripheral_mask;
+	DIAG_LOG(DIAG_DEBUG_USERSPACE,
+		"Switch logging to %d mask:%0x\n", new_mode, peripheral_mask);
+
+	/* Update to take peripheral_mask */
+	if (new_mode != DIAG_MEMORY_DEVICE_MODE) {
+		diag_update_real_time_vote(DIAG_PROC_MEMORY_DEVICE,
+					   MODE_REALTIME, ALL_PROC);
+	} else {
+		diag_update_proc_vote(DIAG_PROC_MEMORY_DEVICE, VOTE_UP,
+				      ALL_PROC);
+	}
+
+	if (!(new_mode == DIAG_MEMORY_DEVICE_MODE &&
+	      curr_mode == DIAG_USB_MODE)) {
+		queue_work(driver->diag_real_time_wq,
+			   &driver->diag_real_time_work);
+	}
+
+	return 0;
+fail:
+	return err;
+}
+
+static int diag_ioctl_dci_reg(unsigned long ioarg)
+{
+	int result = -EINVAL;
+	struct diag_dci_reg_tbl_t dci_reg_params;
+
+	if (copy_from_user(&dci_reg_params, (void __user *)ioarg,
+				sizeof(struct diag_dci_reg_tbl_t)))
+		return -EFAULT;
+
+	result = diag_dci_register_client(&dci_reg_params);
+
+	return result;
+}
+
+static int diag_ioctl_dci_health_stats(unsigned long ioarg)
+{
+	int result = -EINVAL;
+	struct diag_dci_health_stats_proc stats;
+
+	if (copy_from_user(&stats, (void __user *)ioarg,
+				sizeof(struct diag_dci_health_stats_proc)))
+		return -EFAULT;
+
+	result = diag_dci_copy_health_stats(&stats);
+	if (result == DIAG_DCI_NO_ERROR) {
+		if (copy_to_user((void __user *)ioarg, &stats,
+			sizeof(struct diag_dci_health_stats_proc)))
+			return -EFAULT;
+	}
+
+	return result;
+}
+
+static int diag_ioctl_dci_log_status(unsigned long ioarg)
+{
+	struct diag_log_event_stats le_stats;
+	struct diag_dci_client_tbl *dci_client = NULL;
+
+	if (copy_from_user(&le_stats, (void __user *)ioarg,
+				sizeof(struct diag_log_event_stats)))
+		return -EFAULT;
+
+	dci_client = diag_dci_get_client_entry(le_stats.client_id);
+	if (!dci_client)
+		return DIAG_DCI_NOT_SUPPORTED;
+	le_stats.is_set = diag_dci_query_log_mask(dci_client, le_stats.code);
+	if (copy_to_user((void __user *)ioarg, &le_stats,
+				sizeof(struct diag_log_event_stats)))
+		return -EFAULT;
+
+	return DIAG_DCI_NO_ERROR;
+}
+
+static int diag_ioctl_dci_event_status(unsigned long ioarg)
+{
+	struct diag_log_event_stats le_stats;
+	struct diag_dci_client_tbl *dci_client = NULL;
+
+	if (copy_from_user(&le_stats, (void __user *)ioarg,
+				sizeof(struct diag_log_event_stats)))
+		return -EFAULT;
+
+	dci_client = diag_dci_get_client_entry(le_stats.client_id);
+	if (!dci_client)
+		return DIAG_DCI_NOT_SUPPORTED;
+
+	le_stats.is_set = diag_dci_query_event_mask(dci_client, le_stats.code);
+	if (copy_to_user((void __user *)ioarg, &le_stats,
+				sizeof(struct diag_log_event_stats)))
+		return -EFAULT;
+
+	return DIAG_DCI_NO_ERROR;
+}
+
+static int diag_ioctl_lsm_deinit(void)
+{
+	int i;
+
+	mutex_lock(&driver->diagchar_mutex);
+	for (i = 0; i < driver->num_clients; i++)
+		if (driver->client_map[i].pid == current->tgid)
+			break;
+
+	if (i == driver->num_clients) {
+		mutex_unlock(&driver->diagchar_mutex);
+		return -EINVAL;
+	}
+	if (!(driver->data_ready[i] & DEINIT_TYPE)) {
+		driver->data_ready[i] |= DEINIT_TYPE;
+		atomic_inc(&driver->data_ready_notif[i]);
+	}
+	mutex_unlock(&driver->diagchar_mutex);
+	wake_up_interruptible(&driver->wait_q);
+
+	return 1;
+}
+
+static int diag_ioctl_vote_real_time(unsigned long ioarg)
+{
+	int real_time = 0;
+	int temp_proc = ALL_PROC;
+	struct real_time_vote_t vote;
+	struct diag_dci_client_tbl *dci_client = NULL;
+
+	if (copy_from_user(&vote, (void __user *)ioarg,
+			sizeof(struct real_time_vote_t)))
+		return -EFAULT;
+
+	if (vote.proc > DIAG_PROC_MEMORY_DEVICE ||
+		vote.real_time_vote > MODE_UNKNOWN ||
+		vote.client_id < 0) {
+		pr_err("diag: %s, invalid params, proc: %d, vote: %d, client_id: %d\n",
+			__func__, vote.proc, vote.real_time_vote,
+			vote.client_id);
+		return -EINVAL;
+	}
+
+	driver->real_time_update_busy++;
+	if (vote.proc == DIAG_PROC_DCI) {
+		dci_client = diag_dci_get_client_entry(vote.client_id);
+		if (!dci_client) {
+			driver->real_time_update_busy--;
+			return DIAG_DCI_NOT_SUPPORTED;
+		}
+		diag_dci_set_real_time(dci_client, vote.real_time_vote);
+		real_time = diag_dci_get_cumulative_real_time(
+					dci_client->client_info.token);
+		diag_update_real_time_vote(vote.proc, real_time,
+					dci_client->client_info.token);
+	} else {
+		real_time = vote.real_time_vote;
+		temp_proc = vote.client_id;
+		diag_update_real_time_vote(vote.proc, real_time,
+					   temp_proc);
+	}
+	queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
+	return 0;
+}
+
+static int diag_ioctl_get_real_time(unsigned long ioarg)
+{
+	int i;
+	int retry_count = 0;
+	int timer = 0;
+	struct real_time_query_t rt_query;
+
+	if (copy_from_user(&rt_query, (void __user *)ioarg,
+					sizeof(struct real_time_query_t)))
+		return -EFAULT;
+	while (retry_count < 3) {
+		if (driver->real_time_update_busy > 0) {
+			retry_count++;
+			/*
+			 * The value 10000 was chosen empirically as an
+			 * optimum value in order to give the work in
+			 * diag_real_time_wq to complete processing.
+			 */
+			for (timer = 0; timer < 5; timer++)
+				usleep_range(10000, 10100);
+		} else {
+			break;
+		}
+	}
+
+	if (driver->real_time_update_busy > 0)
+		return -EAGAIN;
+
+	if (rt_query.proc < 0 || rt_query.proc >= DIAG_NUM_PROC) {
+		pr_err("diag: Invalid proc %d in %s\n", rt_query.proc,
+		       __func__);
+		return -EINVAL;
+	}
+	rt_query.real_time = driver->real_time_mode[rt_query.proc];
+	/*
+	 * For the local processor, if any of the peripherals is in buffering
+	 * mode, overwrite the value of real time with UNKNOWN_MODE
+	 */
+	if (rt_query.proc == DIAG_LOCAL_PROC) {
+		for (i = 0; i < NUM_PERIPHERALS; i++) {
+			if (!driver->feature[i].peripheral_buffering)
+				continue;
+			switch (driver->buffering_mode[i].mode) {
+			case DIAG_BUFFERING_MODE_CIRCULAR:
+			case DIAG_BUFFERING_MODE_THRESHOLD:
+				rt_query.real_time = MODE_UNKNOWN;
+				break;
+			}
+		}
+	}
+
+	if (copy_to_user((void __user *)ioarg, &rt_query,
+			 sizeof(struct real_time_query_t)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int diag_ioctl_set_buffering_mode(unsigned long ioarg)
+{
+	struct diag_buffering_mode_t params;
+	int peripheral = 0;
+	uint8_t diag_id = 0;
+
+	if (copy_from_user(&params, (void __user *)ioarg, sizeof(params)))
+		return -EFAULT;
+
+	diag_map_pd_to_diagid(params.peripheral, &diag_id, &peripheral);
+
+	if ((peripheral < 0) ||
+		peripheral >= NUM_PERIPHERALS) {
+		pr_err("diag: In %s, invalid peripheral %d\n", __func__,
+		       peripheral);
+		return -EIO;
+	}
+
+	if (params.peripheral > NUM_PERIPHERALS &&
+		!driver->feature[peripheral].pd_buffering) {
+		pr_err("diag: In %s, pd buffering not supported for peripheral:%d\n",
+			__func__, peripheral);
+		return -EIO;
+	}
+
+	if (!driver->feature[peripheral].peripheral_buffering) {
+		pr_err("diag: In %s, peripheral %d doesn't support buffering\n",
+		       __func__, peripheral);
+		return -EIO;
+	}
+
+	mutex_lock(&driver->mode_lock);
+	driver->buffering_flag[params.peripheral] = 1;
+	mutex_unlock(&driver->mode_lock);
+
+	return diag_send_peripheral_buffering_mode(&params);
+}
+
+static int diag_ioctl_peripheral_drain_immediate(unsigned long ioarg)
+{
+	uint8_t pd, diag_id = 0;
+	int peripheral = 0;
+
+	if (copy_from_user(&pd, (void __user *)ioarg, sizeof(uint8_t)))
+		return -EFAULT;
+
+	diag_map_pd_to_diagid(pd, &diag_id, &peripheral);
+
+	if ((peripheral < 0) ||
+		peripheral >= NUM_PERIPHERALS) {
+		pr_err("diag: In %s, invalid peripheral %d\n", __func__,
+		       peripheral);
+		return -EINVAL;
+	}
+
+	if (pd > NUM_PERIPHERALS &&
+		!driver->feature[peripheral].pd_buffering) {
+		pr_err("diag: In %s, pd buffering not supported for peripheral:%d\n",
+			__func__, peripheral);
+		return -EIO;
+	}
+
+	return diag_send_peripheral_drain_immediate(pd, diag_id, peripheral);
+}
+
+static int diag_ioctl_dci_support(unsigned long ioarg)
+{
+	struct diag_dci_peripherals_t dci_support;
+	int result = -EINVAL;
+
+	if (copy_from_user(&dci_support, (void __user *)ioarg,
+				sizeof(struct diag_dci_peripherals_t)))
+		return -EFAULT;
+
+	result = diag_dci_get_support_list(&dci_support);
+	if (result == DIAG_DCI_NO_ERROR)
+		if (copy_to_user((void __user *)ioarg, &dci_support,
+				sizeof(struct diag_dci_peripherals_t)))
+			return -EFAULT;
+
+	return result;
+}
+
+static int diag_ioctl_hdlc_toggle(unsigned long ioarg)
+{
+	uint8_t hdlc_support;
+	struct diag_md_session_t *session_info = NULL;
+	if (copy_from_user(&hdlc_support, (void __user *)ioarg,
+				sizeof(uint8_t)))
+		return -EFAULT;
+	mutex_lock(&driver->hdlc_disable_mutex);
+	mutex_lock(&driver->md_session_lock);
+	session_info = diag_md_session_get_pid(current->tgid);
+	if (session_info)
+		session_info->hdlc_disabled = hdlc_support;
+	else
+		driver->hdlc_disabled = hdlc_support;
+	mutex_unlock(&driver->md_session_lock);
+	mutex_unlock(&driver->hdlc_disable_mutex);
+	diag_update_md_clients(HDLC_SUPPORT_TYPE);
+
+	return 0;
+}
+
+static int diag_ioctl_query_pd_logging(struct diag_logging_mode_param_t *param)
+{
+	int ret = -EINVAL;
+	int peripheral;
+	char *p_str = NULL;
+
+	if (!param)
+		return -EINVAL;
+
+	if (!param->pd_mask) {
+		DIAG_LOG(DIAG_DEBUG_USERSPACE,
+			"query with no pd mask set, returning error\n");
+		return -EINVAL;
+	}
+
+	switch (param->pd_mask) {
+	case DIAG_CON_UPD_WLAN:
+		peripheral = PERIPHERAL_MODEM;
+		p_str = "MODEM";
+		break;
+	case DIAG_CON_UPD_AUDIO:
+	case DIAG_CON_UPD_SENSORS:
+		peripheral = PERIPHERAL_LPASS;
+		p_str = "LPASS";
+		break;
+	default:
+		DIAG_LOG(DIAG_DEBUG_USERSPACE,
+		"Invalid pd mask, returning EINVAL\n");
+		return -EINVAL;
+	}
+
+	DIAG_LOG(DIAG_DEBUG_USERSPACE,
+	"diag: %s: Untagging support on APPS is %s\n", __func__,
+	((driver->supports_apps_header_untagging) ?
+	"present" : "absent"));
+
+	DIAG_LOG(DIAG_DEBUG_USERSPACE,
+	"diag: %s: Tagging support on %s is %s\n",
+	__func__, p_str,
+	(driver->feature[peripheral].untag_header ?
+	"present" : "absent"));
+
+	if (driver->supports_apps_header_untagging &&
+		driver->feature[peripheral].untag_header)
+		ret = 0;
+
+	return ret;
+}
+
+static int diag_ioctl_register_callback(unsigned long ioarg)
+{
+	int err = 0;
+	struct diag_callback_reg_t reg;
+
+	if (copy_from_user(&reg, (void __user *)ioarg,
+			   sizeof(struct diag_callback_reg_t))) {
+		return -EFAULT;
+	}
+
+	if (reg.proc < 0 || reg.proc >= DIAG_NUM_PROC) {
+		pr_err("diag: In %s, invalid proc %d for callback registration\n",
+		       __func__, reg.proc);
+		return -EINVAL;
+	}
+
+	if (driver->md_session_mode == DIAG_MD_PERIPHERAL)
+		return -EIO;
+
+	return err;
+}
+
+static int diag_cmd_register_tbl(struct diag_cmd_reg_tbl_t *reg_tbl)
+{
+	int i;
+	int err = 0;
+	uint32_t count = 0;
+	struct diag_cmd_reg_entry_t *entries = NULL;
+	const uint16_t entry_len = sizeof(struct diag_cmd_reg_entry_t);
+
+
+	if (!reg_tbl) {
+		pr_err("diag: In %s, invalid registration table\n", __func__);
+		return -EINVAL;
+	}
+
+	count = reg_tbl->count;
+	if ((UINT_MAX / entry_len) < count) {
+		pr_warn("diag: In %s, possbile integer overflow.\n", __func__);
+		return -EFAULT;
+	}
+
+	entries = kzalloc(count * entry_len, GFP_KERNEL);
+	if (!entries) {
+		pr_err("diag: In %s, unable to create memory for registration table entries\n",
+		       __func__);
+		return -ENOMEM;
+	}
+
+	err = copy_from_user(entries, reg_tbl->entries, count * entry_len);
+	if (err) {
+		pr_err("diag: In %s, error copying data from userspace, err: %d\n",
+		       __func__, err);
+		kfree(entries);
+		return -EFAULT;
+	}
+
+	for (i = 0; i < count; i++) {
+		err = diag_cmd_add_reg(&entries[i], APPS_DATA, current->tgid);
+		if (err) {
+			pr_err("diag: In %s, unable to register command, err: %d\n",
+			       __func__, err);
+			break;
+		}
+	}
+
+	kfree(entries);
+	return err;
+}
+
+static int diag_ioctl_cmd_reg(unsigned long ioarg)
+{
+	struct diag_cmd_reg_tbl_t reg_tbl;
+
+	if (copy_from_user(&reg_tbl, (void __user *)ioarg,
+			   sizeof(struct diag_cmd_reg_tbl_t))) {
+		return -EFAULT;
+	}
+
+	return diag_cmd_register_tbl(&reg_tbl);
+}
+
+static int diag_ioctl_cmd_dereg(void)
+{
+	diag_cmd_remove_reg_by_pid(current->tgid);
+	return 0;
+}
+
+#ifdef CONFIG_COMPAT
+/*
+ * @sync_obj_name: name of the synchronization object associated with this proc
+ * @count: number of entries in the bind
+ * @params: the actual packet registrations
+ */
+struct diag_cmd_reg_tbl_compat_t {
+	char sync_obj_name[MAX_SYNC_OBJ_NAME_SIZE];
+	uint32_t count;
+	compat_uptr_t entries;
+};
+
+static int diag_ioctl_cmd_reg_compat(unsigned long ioarg)
+{
+	struct diag_cmd_reg_tbl_compat_t reg_tbl_compat;
+	struct diag_cmd_reg_tbl_t reg_tbl;
+
+	if (copy_from_user(&reg_tbl_compat, (void __user *)ioarg,
+			   sizeof(struct diag_cmd_reg_tbl_compat_t))) {
+		return -EFAULT;
+	}
+
+	strlcpy(reg_tbl.sync_obj_name, reg_tbl_compat.sync_obj_name,
+		MAX_SYNC_OBJ_NAME_SIZE);
+	reg_tbl.count = reg_tbl_compat.count;
+	reg_tbl.entries = (struct diag_cmd_reg_entry_t *)
+			  (uintptr_t)reg_tbl_compat.entries;
+
+	return diag_cmd_register_tbl(&reg_tbl);
+}
+
+long diagchar_compat_ioctl(struct file *filp,
+			   unsigned int iocmd, unsigned long ioarg)
+{
+	int result = -EINVAL;
+	int client_id = 0;
+	uint16_t delayed_rsp_id = 0;
+	uint16_t remote_dev;
+	struct diag_dci_client_tbl *dci_client = NULL;
+	struct diag_logging_mode_param_t mode_param;
+
+	switch (iocmd) {
+	case DIAG_IOCTL_COMMAND_REG:
+		result = diag_ioctl_cmd_reg_compat(ioarg);
+		break;
+	case DIAG_IOCTL_COMMAND_DEREG:
+		result = diag_ioctl_cmd_dereg();
+		break;
+	case DIAG_IOCTL_GET_DELAYED_RSP_ID:
+		delayed_rsp_id = diag_get_next_delayed_rsp_id();
+		if (copy_to_user((void __user *)ioarg, &delayed_rsp_id,
+				 sizeof(uint16_t)))
+			result = -EFAULT;
+		else
+			result = 0;
+		break;
+	case DIAG_IOCTL_DCI_REG:
+		result = diag_ioctl_dci_reg(ioarg);
+		break;
+	case DIAG_IOCTL_DCI_DEINIT:
+		mutex_lock(&driver->dci_mutex);
+		if (copy_from_user((void *)&client_id, (void __user *)ioarg,
+			sizeof(int))) {
+			mutex_unlock(&driver->dci_mutex);
+			return -EFAULT;
+		}
+		dci_client = diag_dci_get_client_entry(client_id);
+		if (!dci_client) {
+			mutex_unlock(&driver->dci_mutex);
+			return DIAG_DCI_NOT_SUPPORTED;
+		}
+		result = diag_dci_deinit_client(dci_client);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_DCI_SUPPORT:
+		result = diag_ioctl_dci_support(ioarg);
+		break;
+	case DIAG_IOCTL_DCI_HEALTH_STATS:
+		mutex_lock(&driver->dci_mutex);
+		result = diag_ioctl_dci_health_stats(ioarg);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_DCI_LOG_STATUS:
+		mutex_lock(&driver->dci_mutex);
+		result = diag_ioctl_dci_log_status(ioarg);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_DCI_EVENT_STATUS:
+		mutex_lock(&driver->dci_mutex);
+		result = diag_ioctl_dci_event_status(ioarg);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_DCI_CLEAR_LOGS:
+		mutex_lock(&driver->dci_mutex);
+		if (copy_from_user((void *)&client_id, (void __user *)ioarg,
+			sizeof(int))) {
+			mutex_unlock(&driver->dci_mutex);
+			return -EFAULT;
+		}
+		result = diag_dci_clear_log_mask(client_id);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_DCI_CLEAR_EVENTS:
+		mutex_lock(&driver->dci_mutex);
+		if (copy_from_user(&client_id, (void __user *)ioarg,
+			sizeof(int))) {
+			mutex_unlock(&driver->dci_mutex);
+			return -EFAULT;
+		}
+		result = diag_dci_clear_event_mask(client_id);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_LSM_DEINIT:
+		result = diag_ioctl_lsm_deinit();
+		break;
+	case DIAG_IOCTL_SWITCH_LOGGING:
+		if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
+				   sizeof(mode_param)))
+			return -EFAULT;
+		mutex_lock(&driver->diagchar_mutex);
+		result = diag_switch_logging(&mode_param);
+		mutex_unlock(&driver->diagchar_mutex);
+		break;
+	case DIAG_IOCTL_REMOTE_DEV:
+		remote_dev = diag_get_remote_device_mask();
+		if (copy_to_user((void __user *)ioarg, &remote_dev,
+			sizeof(uint16_t)))
+			result = -EFAULT;
+		else
+			result = 1;
+		break;
+	case DIAG_IOCTL_VOTE_REAL_TIME:
+		mutex_lock(&driver->dci_mutex);
+		result = diag_ioctl_vote_real_time(ioarg);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_GET_REAL_TIME:
+		result = diag_ioctl_get_real_time(ioarg);
+		break;
+	case DIAG_IOCTL_PERIPHERAL_BUF_CONFIG:
+		result = diag_ioctl_set_buffering_mode(ioarg);
+		break;
+	case DIAG_IOCTL_PERIPHERAL_BUF_DRAIN:
+		result = diag_ioctl_peripheral_drain_immediate(ioarg);
+		break;
+	case DIAG_IOCTL_REGISTER_CALLBACK:
+		result = diag_ioctl_register_callback(ioarg);
+		break;
+	case DIAG_IOCTL_HDLC_TOGGLE:
+		result = diag_ioctl_hdlc_toggle(ioarg);
+		break;
+	case DIAG_IOCTL_QUERY_PD_LOGGING:
+		if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
+				   sizeof(mode_param)))
+			return -EFAULT;
+		result = diag_ioctl_query_pd_logging(&mode_param);
+		break;
+	}
+	return result;
+}
+#endif
+
+long diagchar_ioctl(struct file *filp,
+			   unsigned int iocmd, unsigned long ioarg)
+{
+	int result = -EINVAL;
+	int client_id = 0;
+	uint16_t delayed_rsp_id;
+	uint16_t remote_dev;
+	struct diag_dci_client_tbl *dci_client = NULL;
+	struct diag_logging_mode_param_t mode_param;
+
+	switch (iocmd) {
+	case DIAG_IOCTL_COMMAND_REG:
+		result = diag_ioctl_cmd_reg(ioarg);
+		break;
+	case DIAG_IOCTL_COMMAND_DEREG:
+		result = diag_ioctl_cmd_dereg();
+		break;
+	case DIAG_IOCTL_GET_DELAYED_RSP_ID:
+		delayed_rsp_id = diag_get_next_delayed_rsp_id();
+		if (copy_to_user((void __user *)ioarg, &delayed_rsp_id,
+				 sizeof(uint16_t)))
+			result = -EFAULT;
+		else
+			result = 0;
+		break;
+	case DIAG_IOCTL_DCI_REG:
+		result = diag_ioctl_dci_reg(ioarg);
+		break;
+	case DIAG_IOCTL_DCI_DEINIT:
+		mutex_lock(&driver->dci_mutex);
+		if (copy_from_user((void *)&client_id, (void __user *)ioarg,
+			sizeof(int))) {
+			mutex_unlock(&driver->dci_mutex);
+			return -EFAULT;
+		}
+		dci_client = diag_dci_get_client_entry(client_id);
+		if (!dci_client) {
+			mutex_unlock(&driver->dci_mutex);
+			return DIAG_DCI_NOT_SUPPORTED;
+		}
+		result = diag_dci_deinit_client(dci_client);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_DCI_SUPPORT:
+		result = diag_ioctl_dci_support(ioarg);
+		break;
+	case DIAG_IOCTL_DCI_HEALTH_STATS:
+		mutex_lock(&driver->dci_mutex);
+		result = diag_ioctl_dci_health_stats(ioarg);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_DCI_LOG_STATUS:
+		mutex_lock(&driver->dci_mutex);
+		result = diag_ioctl_dci_log_status(ioarg);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_DCI_EVENT_STATUS:
+		mutex_lock(&driver->dci_mutex);
+		result = diag_ioctl_dci_event_status(ioarg);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_DCI_CLEAR_LOGS:
+		mutex_lock(&driver->dci_mutex);
+		if (copy_from_user((void *)&client_id, (void __user *)ioarg,
+			sizeof(int))) {
+			mutex_unlock(&driver->dci_mutex);
+			return -EFAULT;
+		}
+		result = diag_dci_clear_log_mask(client_id);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_DCI_CLEAR_EVENTS:
+		mutex_lock(&driver->dci_mutex);
+		if (copy_from_user(&client_id, (void __user *)ioarg,
+			sizeof(int))) {
+			mutex_unlock(&driver->dci_mutex);
+			return -EFAULT;
+		}
+		result = diag_dci_clear_event_mask(client_id);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_LSM_DEINIT:
+		result = diag_ioctl_lsm_deinit();
+		break;
+	case DIAG_IOCTL_SWITCH_LOGGING:
+		if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
+				   sizeof(mode_param)))
+			return -EFAULT;
+		mutex_lock(&driver->diagchar_mutex);
+		result = diag_switch_logging(&mode_param);
+		mutex_unlock(&driver->diagchar_mutex);
+		break;
+	case DIAG_IOCTL_REMOTE_DEV:
+		remote_dev = diag_get_remote_device_mask();
+		if (copy_to_user((void __user *)ioarg, &remote_dev,
+			sizeof(uint16_t)))
+			result = -EFAULT;
+		else
+			result = 1;
+		break;
+	case DIAG_IOCTL_VOTE_REAL_TIME:
+		mutex_lock(&driver->dci_mutex);
+		result = diag_ioctl_vote_real_time(ioarg);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_GET_REAL_TIME:
+		result = diag_ioctl_get_real_time(ioarg);
+		break;
+	case DIAG_IOCTL_PERIPHERAL_BUF_CONFIG:
+		result = diag_ioctl_set_buffering_mode(ioarg);
+		break;
+	case DIAG_IOCTL_PERIPHERAL_BUF_DRAIN:
+		result = diag_ioctl_peripheral_drain_immediate(ioarg);
+		break;
+	case DIAG_IOCTL_REGISTER_CALLBACK:
+		result = diag_ioctl_register_callback(ioarg);
+		break;
+	case DIAG_IOCTL_HDLC_TOGGLE:
+		result = diag_ioctl_hdlc_toggle(ioarg);
+		break;
+	case DIAG_IOCTL_QUERY_PD_LOGGING:
+		if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
+				   sizeof(mode_param)))
+			return -EFAULT;
+		result = diag_ioctl_query_pd_logging(&mode_param);
+		break;
+	}
+	return result;
+}
+
+static int diag_process_apps_data_hdlc(unsigned char *buf, int len,
+				       int pkt_type)
+{
+	int err = 0;
+	int ret = PKT_DROP;
+	struct diag_apps_data_t *data = &hdlc_data;
+	struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
+	struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
+	/*
+	 * The maximum encoded size of the buffer can be atmost twice the length
+	 * of the packet. Add three bytes foe footer - 16 bit CRC (2 bytes) +
+	 * delimiter (1 byte).
+	 */
+	const uint32_t max_encoded_size = ((2 * len) + 3);
+
+	if (!buf || len <= 0) {
+		pr_err("diag: In %s, invalid buf: %pK len: %d\n",
+		       __func__, buf, len);
+		return -EIO;
+	}
+
+	if (DIAG_MAX_HDLC_BUF_SIZE < max_encoded_size) {
+		pr_err_ratelimited("diag: In %s, encoded data is larger %d than the buffer size %d\n",
+		       __func__, max_encoded_size, DIAG_MAX_HDLC_BUF_SIZE);
+		return -EBADMSG;
+	}
+
+	send.state = DIAG_STATE_START;
+	send.pkt = buf;
+	send.last = (void *)(buf + len - 1);
+	send.terminate = 1;
+
+	if (!data->buf)
+		data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
+					APF_DIAG_PADDING,
+					  POOL_TYPE_HDLC);
+	if (!data->buf) {
+		ret = PKT_DROP;
+		goto fail_ret;
+	}
+
+	if ((DIAG_MAX_HDLC_BUF_SIZE - data->len) <= max_encoded_size) {
+		err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
+				     data->ctxt);
+		if (err) {
+			ret = -EIO;
+			goto fail_free_buf;
+		}
+		data->buf = NULL;
+		data->len = 0;
+		data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
+					APF_DIAG_PADDING,
+					  POOL_TYPE_HDLC);
+		if (!data->buf) {
+			ret = PKT_DROP;
+			goto fail_ret;
+		}
+	}
+
+	enc.dest = data->buf + data->len;
+	enc.dest_last = (void *)(data->buf + data->len + max_encoded_size);
+	diag_hdlc_encode(&send, &enc);
+
+	/*
+	 * This is to check if after HDLC encoding, we are still within
+	 * the limits of aggregation buffer. If not, we write out the
+	 * current buffer and start aggregation in a newly allocated
+	 * buffer.
+	 */
+	if ((uintptr_t)enc.dest >= (uintptr_t)(data->buf +
+					       DIAG_MAX_HDLC_BUF_SIZE)) {
+		err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
+				     data->ctxt);
+		if (err) {
+			ret = -EIO;
+			goto fail_free_buf;
+		}
+		data->buf = NULL;
+		data->len = 0;
+		data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
+					APF_DIAG_PADDING,
+					 POOL_TYPE_HDLC);
+		if (!data->buf) {
+			ret = PKT_DROP;
+			goto fail_ret;
+		}
+
+		enc.dest = data->buf + data->len;
+		enc.dest_last = (void *)(data->buf + data->len +
+					 max_encoded_size);
+		diag_hdlc_encode(&send, &enc);
+	}
+
+	data->len = (((uintptr_t)enc.dest - (uintptr_t)data->buf) <
+			DIAG_MAX_HDLC_BUF_SIZE) ?
+			((uintptr_t)enc.dest - (uintptr_t)data->buf) :
+			DIAG_MAX_HDLC_BUF_SIZE;
+
+	if (pkt_type == DATA_TYPE_RESPONSE) {
+		err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
+				     data->ctxt);
+		if (err) {
+			ret = -EIO;
+			goto fail_free_buf;
+		}
+		data->buf = NULL;
+		data->len = 0;
+	}
+
+	return PKT_ALLOC;
+
+fail_free_buf:
+	diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
+	data->buf = NULL;
+	data->len = 0;
+
+fail_ret:
+	return ret;
+}
+
+static int diag_process_apps_data_non_hdlc(unsigned char *buf, int len,
+					   int pkt_type)
+{
+	int err = 0;
+	int ret = PKT_DROP;
+	struct diag_pkt_frame_t header;
+	struct diag_apps_data_t *data = &non_hdlc_data;
+	/*
+	 * The maximum packet size, when the data is non hdlc encoded is equal
+	 * to the size of the packet frame header and the length. Add 1 for the
+	 * delimiter 0x7E at the end.
+	 */
+	const uint32_t max_pkt_size = sizeof(header) + len + 1;
+
+	if (!buf || len <= 0) {
+		pr_err("diag: In %s, invalid buf: %pK len: %d\n",
+		       __func__, buf, len);
+		return -EIO;
+	}
+
+	if (!data->buf) {
+		data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
+					APF_DIAG_PADDING,
+					  POOL_TYPE_HDLC);
+		if (!data->buf) {
+			ret = PKT_DROP;
+			goto fail_ret;
+		}
+	}
+
+	if ((DIAG_MAX_HDLC_BUF_SIZE - data->len) <= max_pkt_size) {
+		err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
+				     data->ctxt);
+		if (err) {
+			ret = -EIO;
+			goto fail_free_buf;
+		}
+		data->buf = NULL;
+		data->len = 0;
+		data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
+					APF_DIAG_PADDING,
+					  POOL_TYPE_HDLC);
+		if (!data->buf) {
+			ret = PKT_DROP;
+			goto fail_ret;
+		}
+	}
+
+	header.start = CONTROL_CHAR;
+	header.version = 1;
+	header.length = len;
+	memcpy(data->buf + data->len, &header, sizeof(header));
+	data->len += sizeof(header);
+	memcpy(data->buf + data->len, buf, len);
+	data->len += len;
+	*(uint8_t *)(data->buf + data->len) = CONTROL_CHAR;
+	data->len += sizeof(uint8_t);
+	if (pkt_type == DATA_TYPE_RESPONSE) {
+		err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
+				     data->ctxt);
+		if (err) {
+			ret = -EIO;
+			goto fail_free_buf;
+		}
+		data->buf = NULL;
+		data->len = 0;
+	}
+
+	return PKT_ALLOC;
+
+fail_free_buf:
+	diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
+	data->buf = NULL;
+	data->len = 0;
+
+fail_ret:
+	return ret;
+}
+
+static int diag_user_process_dci_data(const char __user *buf, int len)
+{
+	int err = 0;
+	const int mempool = POOL_TYPE_USER;
+	unsigned char *user_space_data = NULL;
+
+	if (!buf || len <= 0 || len > diag_mempools[mempool].itemsize) {
+		pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
+				   __func__, buf, len);
+		return -EBADMSG;
+	}
+
+	user_space_data = diagmem_alloc(driver, len, mempool);
+	if (!user_space_data)
+		return -ENOMEM;
+
+	err = copy_from_user(user_space_data, buf, len);
+	if (err) {
+		pr_err_ratelimited("diag: In %s, unable to copy data from userspace, err: %d\n",
+				   __func__, err);
+		err = DIAG_DCI_SEND_DATA_FAIL;
+		goto fail;
+	}
+
+	err = diag_process_dci_transaction(user_space_data, len);
+fail:
+	diagmem_free(driver, user_space_data, mempool);
+	user_space_data = NULL;
+	return err;
+}
+
+static int diag_user_process_dci_apps_data(const char __user *buf, int len,
+					   int pkt_type)
+{
+	int err = 0;
+	const int mempool = POOL_TYPE_COPY;
+	unsigned char *user_space_data = NULL;
+
+	if (!buf || len <= 0 || len > diag_mempools[mempool].itemsize) {
+		pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
+				   __func__, buf, len);
+		return -EBADMSG;
+	}
+
+	pkt_type &= (DCI_PKT_TYPE | DATA_TYPE_DCI_LOG | DATA_TYPE_DCI_EVENT);
+	if (!pkt_type) {
+		pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
+				   __func__, pkt_type);
+		return -EBADMSG;
+	}
+
+	user_space_data = diagmem_alloc(driver, len, mempool);
+	if (!user_space_data)
+		return -ENOMEM;
+
+	err = copy_from_user(user_space_data, buf, len);
+	if (err) {
+		pr_alert("diag: In %s, unable to copy data from userspace, err: %d\n",
+			 __func__, err);
+		goto fail;
+	}
+
+	diag_process_apps_dci_read_data(pkt_type, user_space_data, len);
+fail:
+	diagmem_free(driver, user_space_data, mempool);
+	user_space_data = NULL;
+	return err;
+}
+
+static int diag_user_process_raw_data(const char __user *buf, int len)
+{
+	int err = 0;
+	int ret = 0;
+	int token_offset = 0;
+	int remote_proc = 0;
+	const int mempool = POOL_TYPE_COPY;
+	unsigned char *user_space_data = NULL;
+
+	if (!buf || len <= 0 || len > CALLBACK_BUF_SIZE) {
+		pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
+				   __func__, buf, len);
+		return -EBADMSG;
+	}
+
+	user_space_data = diagmem_alloc(driver, len, mempool);
+	if (!user_space_data)
+		return -ENOMEM;
+
+	err = copy_from_user(user_space_data, buf, len);
+	if (err) {
+		pr_err("diag: copy failed for user space data\n");
+		goto fail;
+	}
+
+	/* Check for proc_type */
+	remote_proc = diag_get_remote(*(int *)user_space_data);
+	if (remote_proc) {
+		token_offset = sizeof(int);
+		if (len <= MIN_SIZ_ALLOW) {
+			pr_err("diag: In %s, possible integer underflow, payload size: %d\n",
+		       __func__, len);
+			diagmem_free(driver, user_space_data, mempool);
+			user_space_data = NULL;
+			return -EBADMSG;
+		}
+		len -= sizeof(int);
+	}
+	if (driver->mask_check) {
+		if (!mask_request_validate(user_space_data +
+						token_offset)) {
+			pr_alert("diag: mask request Invalid\n");
+			diagmem_free(driver, user_space_data, mempool);
+			user_space_data = NULL;
+			return -EFAULT;
+		}
+	}
+	if (remote_proc) {
+		ret = diag_send_raw_data_remote(remote_proc,
+				(void *)(user_space_data + token_offset),
+				len, USER_SPACE_RAW_DATA);
+		if (ret) {
+			pr_err("diag: Error sending data to remote proc %d, err: %d\n",
+				remote_proc, ret);
+		}
+	} else {
+		wait_event_interruptible(driver->wait_q,
+					 (driver->in_busy_pktdata == 0));
+		ret = diag_process_apps_pkt(user_space_data, len,
+			current->tgid);
+		if (ret == 1)
+			diag_send_error_rsp((void *)(user_space_data), len,
+						current->tgid);
+	}
+fail:
+	diagmem_free(driver, user_space_data, mempool);
+	user_space_data = NULL;
+	return ret;
+}
+
+static int diag_user_process_userspace_data(const char __user *buf, int len)
+{
+	int err = 0;
+	int max_retries = 3;
+	int retry_count = 0;
+	int remote_proc = 0;
+	int token_offset = 0;
+	struct diag_md_session_t *session_info = NULL;
+	uint8_t hdlc_disabled;
+
+	if (!buf || len <= 0 || len > USER_SPACE_DATA) {
+		pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
+				   __func__, buf, len);
+		return -EBADMSG;
+	}
+
+	do {
+		if (!driver->user_space_data_busy)
+			break;
+		retry_count++;
+		usleep_range(10000, 10100);
+	} while (retry_count < max_retries);
+
+	if (driver->user_space_data_busy)
+		return -EAGAIN;
+
+	err = copy_from_user(driver->user_space_data_buf, buf, len);
+	if (err) {
+		pr_err("diag: In %s, failed to copy data from userspace, err: %d\n",
+		       __func__, err);
+		return -EIO;
+	}
+
+	/* Check for proc_type */
+	remote_proc = diag_get_remote(*(int *)driver->user_space_data_buf);
+	if (remote_proc) {
+		if (len <= MIN_SIZ_ALLOW) {
+			pr_err("diag: Integer underflow in %s, payload size: %d",
+			       __func__, len);
+			return -EBADMSG;
+		}
+		token_offset = sizeof(int);
+		len -= sizeof(int);
+	}
+
+	/* Check masks for On-Device logging */
+	if (driver->mask_check) {
+		if (!mask_request_validate(driver->user_space_data_buf +
+					   token_offset)) {
+			pr_alert("diag: mask request Invalid\n");
+			return -EFAULT;
+		}
+	}
+
+	/* send masks to local processor now */
+	if (!remote_proc) {
+		mutex_lock(&driver->md_session_lock);
+		session_info = diag_md_session_get_pid(current->tgid);
+		if (!session_info) {
+			pr_err("diag:In %s request came from invalid md session pid:%d",
+				__func__, current->tgid);
+			mutex_unlock(&driver->md_session_lock);
+			return -EINVAL;
+		}
+		if (session_info)
+			hdlc_disabled = session_info->hdlc_disabled;
+		else
+			hdlc_disabled = driver->hdlc_disabled;
+		mutex_unlock(&driver->md_session_lock);
+		if (!hdlc_disabled)
+			diag_process_hdlc_pkt((void *)
+				(driver->user_space_data_buf),
+				len, current->tgid);
+		else
+			diag_process_non_hdlc_pkt((char *)
+						(driver->user_space_data_buf),
+						len, current->tgid);
+		return 0;
+	}
+
+	err = diag_process_userspace_remote(remote_proc,
+					    driver->user_space_data_buf +
+					    token_offset, len);
+	if (err) {
+		driver->user_space_data_busy = 0;
+		pr_err("diag: Error sending mask to remote proc %d, err: %d\n",
+		       remote_proc, err);
+	}
+
+	return err;
+}
+
+static int diag_user_process_apps_data(const char __user *buf, int len,
+				       int pkt_type)
+{
+	int ret = 0;
+	int stm_size = 0;
+	const int mempool = POOL_TYPE_COPY;
+	unsigned char *user_space_data = NULL;
+	struct diag_md_session_t *session_info = NULL;
+	uint8_t hdlc_disabled;
+
+	if (!buf || len <= 0 || len > DIAG_MAX_RSP_SIZE) {
+		pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
+				   __func__, buf, len);
+		return -EBADMSG;
+	}
+
+	switch (pkt_type) {
+	case DATA_TYPE_EVENT:
+	case DATA_TYPE_F3:
+	case DATA_TYPE_LOG:
+	case DATA_TYPE_RESPONSE:
+	case DATA_TYPE_DELAYED_RESPONSE:
+		break;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
+				   __func__, pkt_type);
+		return -EBADMSG;
+	}
+
+	user_space_data = diagmem_alloc(driver, len, mempool);
+	if (!user_space_data) {
+		diag_record_stats(pkt_type, PKT_DROP);
+		return -ENOMEM;
+	}
+
+	ret = copy_from_user(user_space_data, buf, len);
+	if (ret) {
+		pr_alert("diag: In %s, unable to copy data from userspace, err: %d\n",
+			 __func__, ret);
+		diagmem_free(driver, user_space_data, mempool);
+		user_space_data = NULL;
+		diag_record_stats(pkt_type, PKT_DROP);
+		return -EBADMSG;
+	}
+
+	if (driver->stm_state[APPS_DATA] &&
+	    (pkt_type >= DATA_TYPE_EVENT) && (pkt_type <= DATA_TYPE_LOG)) {
+		stm_size = stm_log_inv_ts(OST_ENTITY_DIAG, 0, user_space_data,
+					  len);
+		if (stm_size == 0) {
+			pr_debug("diag: In %s, stm_log_inv_ts returned size of 0\n",
+				 __func__);
+		}
+		diagmem_free(driver, user_space_data, mempool);
+		user_space_data = NULL;
+
+		return 0;
+	}
+
+	mutex_lock(&apps_data_mutex);
+	mutex_lock(&driver->hdlc_disable_mutex);
+	mutex_lock(&driver->md_session_lock);
+	session_info = diag_md_session_get_peripheral(APPS_DATA);
+	if (session_info)
+		hdlc_disabled = session_info->hdlc_disabled;
+	else
+		hdlc_disabled = driver->hdlc_disabled;
+	mutex_unlock(&driver->md_session_lock);
+	if (hdlc_disabled)
+		ret = diag_process_apps_data_non_hdlc(user_space_data, len,
+						      pkt_type);
+	else
+		ret = diag_process_apps_data_hdlc(user_space_data, len,
+						  pkt_type);
+	mutex_unlock(&driver->hdlc_disable_mutex);
+	mutex_unlock(&apps_data_mutex);
+
+	diagmem_free(driver, user_space_data, mempool);
+	user_space_data = NULL;
+
+	check_drain_timer();
+
+	if (ret == PKT_DROP)
+		diag_record_stats(pkt_type, PKT_DROP);
+	else if (ret == PKT_ALLOC)
+		diag_record_stats(pkt_type, PKT_ALLOC);
+	else
+		return ret;
+
+	return 0;
+}
+
+static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
+			  loff_t *ppos)
+{
+	struct diag_dci_client_tbl *entry;
+	struct list_head *start, *temp;
+	int index = -1, i = 0, ret = 0;
+	int data_type;
+	int copy_dci_data = 0;
+	int exit_stat = 0;
+	int write_len = 0;
+	struct diag_md_session_t *session_info = NULL;
+
+	mutex_lock(&driver->diagchar_mutex);
+	for (i = 0; i < driver->num_clients; i++)
+		if (driver->client_map[i].pid == current->tgid)
+			index = i;
+	mutex_unlock(&driver->diagchar_mutex);
+
+	if (index == -1) {
+		pr_err("diag: Client PID not found in table");
+		return -EINVAL;
+	}
+	if (!buf) {
+		pr_err("diag: bad address from user side\n");
+		return -EFAULT;
+	}
+	wait_event_interruptible(driver->wait_q,
+			atomic_read(&driver->data_ready_notif[index]) > 0);
+
+	mutex_lock(&driver->diagchar_mutex);
+
+	if ((driver->data_ready[index] & USER_SPACE_DATA_TYPE) &&
+	    (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE ||
+	     driver->logging_mode == DIAG_MULTI_MODE)) {
+		pr_debug("diag: process woken up\n");
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & USER_SPACE_DATA_TYPE;
+		driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
+		COPY_USER_SPACE_OR_EXIT(buf, data_type, sizeof(int));
+		/* place holder for number of data field */
+		ret += sizeof(int);
+		mutex_lock(&driver->md_session_lock);
+		session_info = diag_md_session_get_pid(current->tgid);
+		exit_stat = diag_md_copy_to_user(buf, &ret, count,
+						 session_info);
+		mutex_unlock(&driver->md_session_lock);
+		goto exit;
+	} else if (driver->data_ready[index] & USER_SPACE_DATA_TYPE) {
+		/* In case, the thread wakes up and the logging mode is
+		not memory device any more, the condition needs to be cleared */
+		driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
+	}
+
+	if (driver->data_ready[index] & HDLC_SUPPORT_TYPE) {
+		data_type = driver->data_ready[index] & HDLC_SUPPORT_TYPE;
+		driver->data_ready[index] ^= HDLC_SUPPORT_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
+		COPY_USER_SPACE_OR_EXIT(buf, data_type, sizeof(int));
+		mutex_lock(&driver->md_session_lock);
+		session_info = diag_md_session_get_pid(current->tgid);
+		if (session_info) {
+			COPY_USER_SPACE_OR_ERR(buf+4,
+					session_info->hdlc_disabled,
+					sizeof(uint8_t));
+			if (ret == -EFAULT) {
+				mutex_unlock(&driver->md_session_lock);
+				goto exit;
+			}
+		}
+		mutex_unlock(&driver->md_session_lock);
+		goto exit;
+	}
+
+	if (driver->data_ready[index] & DEINIT_TYPE) {
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & DEINIT_TYPE;
+		COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
+		driver->data_ready[index] ^= DEINIT_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
+		mutex_unlock(&driver->diagchar_mutex);
+		diag_remove_client_entry(file);
+		return ret;
+	}
+
+	if (driver->data_ready[index] & MSG_MASKS_TYPE) {
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & MSG_MASKS_TYPE;
+		mutex_lock(&driver->md_session_lock);
+		session_info = diag_md_session_get_peripheral(APPS_DATA);
+		COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
+		if (ret == -EFAULT) {
+			mutex_unlock(&driver->md_session_lock);
+			goto exit;
+		}
+		write_len = diag_copy_to_user_msg_mask(buf + ret, count,
+						       session_info);
+		mutex_unlock(&driver->md_session_lock);
+		if (write_len > 0)
+			ret += write_len;
+		driver->data_ready[index] ^= MSG_MASKS_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
+		goto exit;
+	}
+
+	if (driver->data_ready[index] & EVENT_MASKS_TYPE) {
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & EVENT_MASKS_TYPE;
+		mutex_lock(&driver->md_session_lock);
+		session_info = diag_md_session_get_peripheral(APPS_DATA);
+		COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
+		if (ret == -EFAULT) {
+			mutex_unlock(&driver->md_session_lock);
+			goto exit;
+		}
+		if (session_info && session_info->event_mask &&
+		    session_info->event_mask->ptr) {
+			COPY_USER_SPACE_OR_ERR(buf + sizeof(int),
+					*(session_info->event_mask->ptr),
+					session_info->event_mask->mask_len);
+			if (ret == -EFAULT) {
+				mutex_unlock(&driver->md_session_lock);
+				goto exit;
+			}
+		} else {
+			COPY_USER_SPACE_OR_ERR(buf + sizeof(int),
+						*(event_mask.ptr),
+						event_mask.mask_len);
+			if (ret == -EFAULT) {
+				mutex_unlock(&driver->md_session_lock);
+				goto exit;
+			}
+		}
+		mutex_unlock(&driver->md_session_lock);
+		driver->data_ready[index] ^= EVENT_MASKS_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
+		goto exit;
+	}
+
+	if (driver->data_ready[index] & LOG_MASKS_TYPE) {
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & LOG_MASKS_TYPE;
+		mutex_lock(&driver->md_session_lock);
+		session_info = diag_md_session_get_peripheral(APPS_DATA);
+		COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
+		if (ret == -EFAULT) {
+			mutex_unlock(&driver->md_session_lock);
+			goto exit;
+		}
+		write_len = diag_copy_to_user_log_mask(buf + ret, count,
+						       session_info);
+		mutex_unlock(&driver->md_session_lock);
+		if (write_len > 0)
+			ret += write_len;
+		driver->data_ready[index] ^= LOG_MASKS_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
+		goto exit;
+	}
+
+	if (driver->data_ready[index] & PKT_TYPE) {
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & PKT_TYPE;
+		COPY_USER_SPACE_OR_EXIT(buf, data_type, sizeof(data_type));
+		COPY_USER_SPACE_OR_EXIT(buf + sizeof(data_type),
+					*(driver->apps_req_buf),
+					driver->apps_req_buf_len);
+		driver->data_ready[index] ^= PKT_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
+		driver->in_busy_pktdata = 0;
+		goto exit;
+	}
+
+	if (driver->data_ready[index] & DCI_PKT_TYPE) {
+		/* Copy the type of data being passed */
+		data_type = driver->data_ready[index] & DCI_PKT_TYPE;
+		COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
+		COPY_USER_SPACE_OR_EXIT(buf+4, *(driver->dci_pkt_buf),
+					driver->dci_pkt_length);
+		driver->data_ready[index] ^= DCI_PKT_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
+		driver->in_busy_dcipktdata = 0;
+		goto exit;
+	}
+
+	if (driver->data_ready[index] & DCI_EVENT_MASKS_TYPE) {
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & DCI_EVENT_MASKS_TYPE;
+		COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
+		COPY_USER_SPACE_OR_EXIT(buf+4, driver->num_dci_client, 4);
+		COPY_USER_SPACE_OR_EXIT(buf + 8, (dci_ops_tbl[DCI_LOCAL_PROC].
+				event_mask_composite), DCI_EVENT_MASK_SIZE);
+		driver->data_ready[index] ^= DCI_EVENT_MASKS_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
+		goto exit;
+	}
+
+	if (driver->data_ready[index] & DCI_LOG_MASKS_TYPE) {
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & DCI_LOG_MASKS_TYPE;
+		COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
+		COPY_USER_SPACE_OR_EXIT(buf+4, driver->num_dci_client, 4);
+		COPY_USER_SPACE_OR_EXIT(buf+8, (dci_ops_tbl[DCI_LOCAL_PROC].
+				log_mask_composite), DCI_LOG_MASK_SIZE);
+		driver->data_ready[index] ^= DCI_LOG_MASKS_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
+		goto exit;
+	}
+
+exit:
+	if (driver->data_ready[index] & DCI_DATA_TYPE) {
+		data_type = driver->data_ready[index] & DCI_DATA_TYPE;
+		mutex_unlock(&driver->diagchar_mutex);
+		/* Copy the type of data being passed */
+		mutex_lock(&driver->dci_mutex);
+		list_for_each_safe(start, temp, &driver->dci_client_list) {
+			entry = list_entry(start, struct diag_dci_client_tbl,
+									track);
+			if (entry->client->tgid != current->tgid)
+				continue;
+			if (!entry->in_service)
+				continue;
+			if (copy_to_user(buf + ret, &data_type, sizeof(int))) {
+				mutex_unlock(&driver->dci_mutex);
+				goto end;
+			}
+			ret += sizeof(int);
+			if (copy_to_user(buf + ret, &entry->client_info.token,
+				sizeof(int))) {
+				mutex_unlock(&driver->dci_mutex);
+				goto end;
+			}
+			ret += sizeof(int);
+			copy_dci_data = 1;
+			exit_stat = diag_copy_dci(buf, count, entry, &ret);
+			mutex_lock(&driver->diagchar_mutex);
+			driver->data_ready[index] ^= DCI_DATA_TYPE;
+			atomic_dec(&driver->data_ready_notif[index]);
+			mutex_unlock(&driver->diagchar_mutex);
+			if (exit_stat == 1) {
+				mutex_unlock(&driver->dci_mutex);
+				goto end;
+			}
+		}
+		mutex_unlock(&driver->dci_mutex);
+		goto end;
+	}
+	mutex_unlock(&driver->diagchar_mutex);
+end:
+	/*
+	 * Flush any read that is currently pending on DCI data and
+	 * command channnels. This will ensure that the next read is not
+	 * missed.
+	 */
+	if (copy_dci_data) {
+		diag_ws_on_copy_complete(DIAG_WS_DCI);
+		flush_workqueue(driver->diag_dci_wq);
+	}
+	return ret;
+}
+
+static ssize_t diagchar_write(struct file *file, const char __user *buf,
+			      size_t count, loff_t *ppos)
+{
+	int err = 0;
+	int pkt_type = 0;
+	int payload_len = 0;
+	const char __user *payload_buf = NULL;
+
+	/*
+	 * The data coming from the user sapce should at least have the
+	 * packet type heeader.
+	 */
+	if (count < sizeof(int)) {
+		pr_err("diag: In %s, client is sending short data, len: %d\n",
+		       __func__, (int)count);
+		return -EBADMSG;
+	}
+
+	err = copy_from_user((&pkt_type), buf, sizeof(int));
+	if (err) {
+		pr_err_ratelimited("diag: In %s, unable to copy pkt_type from userspace, err: %d\n",
+				   __func__, err);
+		return -EIO;
+	}
+
+#ifdef CONFIG_DIAG_OVER_USB
+	if (driver->logging_mode == DIAG_USB_MODE && !driver->usb_connected) {
+		if (!((pkt_type == DCI_DATA_TYPE) ||
+		    (pkt_type == DCI_PKT_TYPE) ||
+		    (pkt_type & DATA_TYPE_DCI_LOG) ||
+		    (pkt_type & DATA_TYPE_DCI_EVENT))) {
+			pr_debug("diag: In %s, Dropping non DCI packet type\n",
+				 __func__);
+			return -EIO;
+		}
+	}
+#endif
+
+	payload_buf = buf + sizeof(int);
+	payload_len = count - sizeof(int);
+
+	if (pkt_type == DCI_PKT_TYPE)
+		return diag_user_process_dci_apps_data(payload_buf,
+						       payload_len,
+						       pkt_type);
+	else if (pkt_type == DCI_DATA_TYPE)
+		return diag_user_process_dci_data(payload_buf, payload_len);
+	else if (pkt_type == USER_SPACE_RAW_DATA_TYPE)
+		return diag_user_process_raw_data(payload_buf,
+							    payload_len);
+	else if (pkt_type == USER_SPACE_DATA_TYPE)
+		return diag_user_process_userspace_data(payload_buf,
+							payload_len);
+	if (pkt_type & (DATA_TYPE_DCI_LOG | DATA_TYPE_DCI_EVENT)) {
+		err = diag_user_process_dci_apps_data(payload_buf, payload_len,
+						      pkt_type);
+		if (pkt_type & DATA_TYPE_DCI_LOG)
+			pkt_type ^= DATA_TYPE_DCI_LOG;
+		if (pkt_type & DATA_TYPE_DCI_EVENT)
+			pkt_type ^= DATA_TYPE_DCI_EVENT;
+#ifdef CONFIG_DIAG_OVER_USB
+		/*
+		 * Check if the log or event is selected even on the regular
+		 * stream. If USB is not connected and we are not in memory
+		 * device mode, we should not process these logs/events.
+		 */
+		if (pkt_type && driver->logging_mode == DIAG_USB_MODE &&
+		    !driver->usb_connected)
+			return err;
+#endif
+	}
+
+	switch (pkt_type) {
+	case DATA_TYPE_EVENT:
+	case DATA_TYPE_F3:
+	case DATA_TYPE_LOG:
+	case DATA_TYPE_DELAYED_RESPONSE:
+	case DATA_TYPE_RESPONSE:
+		return diag_user_process_apps_data(payload_buf, payload_len,
+						   pkt_type);
+	default:
+		pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
+				   __func__, pkt_type);
+		return -EINVAL;
+	}
+
+	return err;
+}
+
+void diag_ws_init()
+{
+	driver->dci_ws.ref_count = 0;
+	driver->dci_ws.copy_count = 0;
+	spin_lock_init(&driver->dci_ws.lock);
+
+	driver->md_ws.ref_count = 0;
+	driver->md_ws.copy_count = 0;
+	spin_lock_init(&driver->md_ws.lock);
+}
+
+static void diag_stats_init(void)
+{
+	if (!driver)
+		return;
+
+	driver->msg_stats.alloc_count = 0;
+	driver->msg_stats.drop_count = 0;
+
+	driver->log_stats.alloc_count = 0;
+	driver->log_stats.drop_count = 0;
+
+	driver->event_stats.alloc_count = 0;
+	driver->event_stats.drop_count = 0;
+}
+
+void diag_ws_on_notify()
+{
+	/*
+	 * Do not deal with reference count here as there can be spurious
+	 * interrupts.
+	 */
+	pm_stay_awake(driver->diag_dev);
+}
+
+void diag_ws_on_read(int type, int pkt_len)
+{
+	unsigned long flags;
+	struct diag_ws_ref_t *ws_ref = NULL;
+
+	switch (type) {
+	case DIAG_WS_DCI:
+		ws_ref = &driver->dci_ws;
+		break;
+	case DIAG_WS_MUX:
+		ws_ref = &driver->md_ws;
+		break;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+				   __func__, type);
+		return;
+	}
+
+	spin_lock_irqsave(&ws_ref->lock, flags);
+	if (pkt_len > 0) {
+		ws_ref->ref_count++;
+	} else {
+		if (ws_ref->ref_count < 1) {
+			ws_ref->ref_count = 0;
+			ws_ref->copy_count = 0;
+		}
+		diag_ws_release();
+	}
+	spin_unlock_irqrestore(&ws_ref->lock, flags);
+}
+
+
+void diag_ws_on_copy(int type)
+{
+	unsigned long flags;
+	struct diag_ws_ref_t *ws_ref = NULL;
+
+	switch (type) {
+	case DIAG_WS_DCI:
+		ws_ref = &driver->dci_ws;
+		break;
+	case DIAG_WS_MUX:
+		ws_ref = &driver->md_ws;
+		break;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+				   __func__, type);
+		return;
+	}
+
+	spin_lock_irqsave(&ws_ref->lock, flags);
+	ws_ref->copy_count++;
+	spin_unlock_irqrestore(&ws_ref->lock, flags);
+}
+
+void diag_ws_on_copy_fail(int type)
+{
+	unsigned long flags;
+	struct diag_ws_ref_t *ws_ref = NULL;
+
+	switch (type) {
+	case DIAG_WS_DCI:
+		ws_ref = &driver->dci_ws;
+		break;
+	case DIAG_WS_MUX:
+		ws_ref = &driver->md_ws;
+		break;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+				   __func__, type);
+		return;
+	}
+
+	spin_lock_irqsave(&ws_ref->lock, flags);
+	ws_ref->ref_count--;
+	spin_unlock_irqrestore(&ws_ref->lock, flags);
+
+	diag_ws_release();
+}
+
+void diag_ws_on_copy_complete(int type)
+{
+	unsigned long flags;
+	struct diag_ws_ref_t *ws_ref = NULL;
+
+	switch (type) {
+	case DIAG_WS_DCI:
+		ws_ref = &driver->dci_ws;
+		break;
+	case DIAG_WS_MUX:
+		ws_ref = &driver->md_ws;
+		break;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+				   __func__, type);
+		return;
+	}
+
+	spin_lock_irqsave(&ws_ref->lock, flags);
+	ws_ref->ref_count -= ws_ref->copy_count;
+		if (ws_ref->ref_count < 1)
+			ws_ref->ref_count = 0;
+		ws_ref->copy_count = 0;
+	spin_unlock_irqrestore(&ws_ref->lock, flags);
+
+	diag_ws_release();
+}
+
+void diag_ws_reset(int type)
+{
+	unsigned long flags;
+	struct diag_ws_ref_t *ws_ref = NULL;
+
+	switch (type) {
+	case DIAG_WS_DCI:
+		ws_ref = &driver->dci_ws;
+		break;
+	case DIAG_WS_MUX:
+		ws_ref = &driver->md_ws;
+		break;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+				   __func__, type);
+		return;
+	}
+
+	spin_lock_irqsave(&ws_ref->lock, flags);
+	ws_ref->ref_count = 0;
+	ws_ref->copy_count = 0;
+	spin_unlock_irqrestore(&ws_ref->lock, flags);
+
+	diag_ws_release();
+}
+
+void diag_ws_release()
+{
+	if (driver->dci_ws.ref_count == 0 && driver->md_ws.ref_count == 0)
+		pm_relax(driver->diag_dev);
+}
+
+#ifdef DIAG_DEBUG
+static void diag_debug_init(void)
+{
+	diag_ipc_log = ipc_log_context_create(DIAG_IPC_LOG_PAGES, "diag", 0);
+	if (!diag_ipc_log)
+		pr_err("diag: Failed to create IPC logging context\n");
+	/*
+	 * Set the bit mask here as per diag_ipc_logging.h to enable debug logs
+	 * to be logged to IPC
+	 */
+	diag_debug_mask = DIAG_DEBUG_PERIPHERALS | DIAG_DEBUG_DCI |
+				DIAG_DEBUG_USERSPACE | DIAG_DEBUG_BRIDGE;
+}
+#else
+static void diag_debug_init(void)
+{
+
+}
+#endif
+
+static int diag_real_time_info_init(void)
+{
+	int i;
+	if (!driver)
+		return -EIO;
+	for (i = 0; i < DIAG_NUM_PROC; i++) {
+		driver->real_time_mode[i] = 1;
+		driver->proc_rt_vote_mask[i] |= DIAG_PROC_DCI;
+		driver->proc_rt_vote_mask[i] |= DIAG_PROC_MEMORY_DEVICE;
+	}
+	driver->real_time_update_busy = 0;
+	driver->proc_active_mask = 0;
+	driver->diag_real_time_wq = create_singlethread_workqueue(
+							"diag_real_time_wq");
+	if (!driver->diag_real_time_wq)
+		return -ENOMEM;
+	INIT_WORK(&(driver->diag_real_time_work), diag_real_time_work_fn);
+	mutex_init(&driver->real_time_mutex);
+	return 0;
+}
+
+static const struct file_operations diagcharfops = {
+	.owner = THIS_MODULE,
+	.read = diagchar_read,
+	.write = diagchar_write,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = diagchar_compat_ioctl,
+#endif
+	.unlocked_ioctl = diagchar_ioctl,
+	.open = diagchar_open,
+	.release = diagchar_close
+};
+
+static int diagchar_setup_cdev(dev_t devno)
+{
+
+	int err;
+
+	cdev_init(driver->cdev, &diagcharfops);
+
+	driver->cdev->owner = THIS_MODULE;
+	driver->cdev->ops = &diagcharfops;
+
+	err = cdev_add(driver->cdev, devno, 1);
+
+	if (err) {
+		printk(KERN_INFO "diagchar cdev registration failed !\n\n");
+		return -1;
+	}
+
+	driver->diagchar_class = class_create(THIS_MODULE, "diag");
+
+	if (IS_ERR(driver->diagchar_class)) {
+		printk(KERN_ERR "Error creating diagchar class.\n");
+		return -1;
+	}
+
+	driver->diag_dev = device_create(driver->diagchar_class, NULL, devno,
+					 (void *)driver, "diag");
+
+	if (!driver->diag_dev)
+		return -EIO;
+
+	driver->diag_dev->power.wakeup = wakeup_source_register("DIAG_WS");
+	return 0;
+
+}
+
+static int diagchar_cleanup(void)
+{
+	if (driver) {
+		if (driver->cdev) {
+			/* TODO - Check if device exists before deleting */
+			device_destroy(driver->diagchar_class,
+				       MKDEV(driver->major,
+					     driver->minor_start));
+			cdev_del(driver->cdev);
+		}
+		if (!IS_ERR(driver->diagchar_class))
+			class_destroy(driver->diagchar_class);
+		kfree(driver);
+	}
+	return 0;
+}
+
+static int diag_mhi_probe(struct platform_device *pdev)
+{
+	int ret;
+
+	if (!mhi_is_device_ready(&pdev->dev, "qcom,mhi"))
+		return -EPROBE_DEFER;
+	driver->pdev = pdev;
+	ret = diag_remote_init();
+	if (ret) {
+		diag_remote_exit();
+		return ret;
+	}
+	ret = diagfwd_bridge_init();
+	if (ret) {
+		diagfwd_bridge_exit();
+		return ret;
+	}
+	pr_debug("diag: mhi device is ready\n");
+	return 0;
+}
+
+static const struct of_device_id diag_mhi_table[] = {
+	{.compatible = "qcom,diag-mhi"},
+	{},
+};
+
+static struct platform_driver diag_mhi_driver = {
+	.probe = diag_mhi_probe,
+	.driver = {
+		.name = "DIAG MHI Platform",
+		.owner = THIS_MODULE,
+		.of_match_table = diag_mhi_table,
+	},
+};
+
+static int __init diagchar_init(void)
+{
+	dev_t dev;
+	int error, ret, i;
+
+	pr_debug("diagfwd initializing ..\n");
+	ret = 0;
+	driver = kzalloc(sizeof(struct diagchar_dev) + 5, GFP_KERNEL);
+	if (!driver)
+		return -ENOMEM;
+	kmemleak_not_leak(driver);
+
+	timer_in_progress = 0;
+	driver->delayed_rsp_id = 0;
+	driver->hdlc_disabled = 0;
+	driver->dci_state = DIAG_DCI_NO_ERROR;
+	setup_timer(&drain_timer, drain_timer_func, 1234);
+	driver->supports_sockets = 1;
+	driver->time_sync_enabled = 0;
+	driver->uses_time_api = 0;
+	driver->poolsize = poolsize;
+	driver->poolsize_hdlc = poolsize_hdlc;
+	driver->poolsize_dci = poolsize_dci;
+	driver->poolsize_user = poolsize_user;
+	/*
+	 * POOL_TYPE_MUX_APPS is for the buffers in the Diag MUX layer.
+	 * The number of buffers encompasses Diag data generated on
+	 * the Apss processor + 1 for the responses generated exclusively on
+	 * the Apps processor + data from data channels (4 channels per
+	 * peripheral) + data from command channels (2)
+	 */
+	diagmem_setsize(POOL_TYPE_MUX_APPS, itemsize_usb_apps,
+			poolsize_usb_apps + 1 + (NUM_PERIPHERALS * 6));
+	driver->num_clients = max_clients;
+	driver->logging_mode = DIAG_USB_MODE;
+	for (i = 0; i < NUM_UPD; i++) {
+		driver->pd_logging_mode[i] = 0;
+		driver->pd_session_clear[i] = 0;
+	}
+	driver->num_pd_session = 0;
+	driver->mask_check = 0;
+	driver->in_busy_pktdata = 0;
+	driver->in_busy_dcipktdata = 0;
+	driver->rsp_buf_ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_CMD, 1);
+	hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1);
+	hdlc_data.len = 0;
+	non_hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1);
+	non_hdlc_data.len = 0;
+	mutex_init(&driver->hdlc_disable_mutex);
+	mutex_init(&driver->diagchar_mutex);
+	mutex_init(&driver->diag_maskclear_mutex);
+	mutex_init(&driver->diag_notifier_mutex);
+	mutex_init(&driver->diag_file_mutex);
+	mutex_init(&driver->delayed_rsp_mutex);
+	mutex_init(&apps_data_mutex);
+	mutex_init(&driver->msg_mask_lock);
+	mutex_init(&driver->hdlc_recovery_mutex);
+	for (i = 0; i < NUM_PERIPHERALS; i++)
+		mutex_init(&driver->diagfwd_channel_mutex[i]);
+	init_waitqueue_head(&driver->wait_q);
+	INIT_WORK(&(driver->diag_drain_work), diag_drain_work_fn);
+	INIT_WORK(&(driver->update_user_clients),
+			diag_update_user_client_work_fn);
+	INIT_WORK(&(driver->update_md_clients),
+			diag_update_md_client_work_fn);
+	diag_ws_init();
+	diag_stats_init();
+	diag_debug_init();
+	diag_md_session_init();
+
+	driver->incoming_pkt.capacity = DIAG_MAX_REQ_SIZE;
+	driver->incoming_pkt.data = kzalloc(DIAG_MAX_REQ_SIZE, GFP_KERNEL);
+	if (!driver->incoming_pkt.data)
+		goto fail;
+	kmemleak_not_leak(driver->incoming_pkt.data);
+	driver->incoming_pkt.processing = 0;
+	driver->incoming_pkt.read_len = 0;
+	driver->incoming_pkt.remaining = 0;
+	driver->incoming_pkt.total_len = 0;
+
+	ret = diag_real_time_info_init();
+	if (ret)
+		goto fail;
+	ret = diag_debugfs_init();
+	if (ret)
+		goto fail;
+	ret = diag_masks_init();
+	if (ret)
+		goto fail;
+	ret = diag_mux_init();
+	if (ret)
+		goto fail;
+	ret = diagfwd_init();
+	if (ret)
+		goto fail;
+	ret = diagfwd_cntl_init();
+	if (ret)
+		goto fail;
+	driver->dci_state = diag_dci_init();
+	ret = diagfwd_peripheral_init();
+	if (ret)
+		goto fail;
+	diagfwd_cntl_channel_init();
+	if (driver->dci_state == DIAG_DCI_NO_ERROR)
+		diag_dci_channel_init();
+	pr_debug("diagchar initializing ..\n");
+	driver->num = 1;
+	driver->name = ((void *)driver) + sizeof(struct diagchar_dev);
+	strlcpy(driver->name, "diag", 4);
+	/* Get major number from kernel and initialize */
+	error = alloc_chrdev_region(&dev, driver->minor_start,
+				    driver->num, driver->name);
+	if (!error) {
+		driver->major = MAJOR(dev);
+		driver->minor_start = MINOR(dev);
+	} else {
+		pr_err("diag: Major number not allocated\n");
+		goto fail;
+	}
+	driver->cdev = cdev_alloc();
+	error = diagchar_setup_cdev(dev);
+	if (error)
+		goto fail;
+
+	pr_debug("diagchar initialized now");
+	platform_driver_register(&diag_mhi_driver);
+	return 0;
+
+fail:
+	pr_err("diagchar is not initialized, ret: %d\n", ret);
+	diag_debugfs_cleanup();
+	diagchar_cleanup();
+	diag_mux_exit();
+	diagfwd_peripheral_exit();
+	diagfwd_bridge_exit();
+	diagfwd_exit();
+	diagfwd_cntl_exit();
+	diag_dci_exit();
+	diag_masks_exit();
+	return -1;
+}
+
+static void diagchar_exit(void)
+{
+	printk(KERN_INFO "diagchar exiting ..\n");
+	diag_mempool_exit();
+	diag_mux_exit();
+	diagfwd_peripheral_exit();
+	diagfwd_exit();
+	diagfwd_cntl_exit();
+	diag_dci_exit();
+	diag_masks_exit();
+	diag_md_session_exit();
+	diag_remote_exit();
+	diag_debugfs_cleanup();
+	diagchar_cleanup();
+	printk(KERN_INFO "done diagchar exit\n");
+}
+
+module_init(diagchar_init);
+module_exit(diagchar_exit);
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diagchar.h linux-4.4.115-fbx/drivers/char/diag/diagchar.h
--- linux-4.4.115-fbx/drivers/char/diag./diagchar.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diagchar.h	2019-10-29 09:26:23.449201280 +0100
@@ -0,0 +1,688 @@
+/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGCHAR_H
+#define DIAGCHAR_H
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/wakelock.h>
+#include <soc/qcom/smd.h>
+#include <asm/atomic.h>
+#include "diagfwd_bridge.h"
+
+#define THRESHOLD_CLIENT_LIMIT	50
+
+/* Size of the USB buffers used for read and write*/
+#define USB_MAX_OUT_BUF 4096
+#define APPS_BUF_SIZE	4096
+#define IN_BUF_SIZE		16384
+#define MAX_SYNC_OBJ_NAME_SIZE	32
+
+#define DIAG_MAX_REQ_SIZE	(16 * 1024)
+#define DIAG_MAX_RSP_SIZE	(16 * 1024)
+#define APF_DIAG_PADDING	0
+/*
+ * In the worst case, the HDLC buffer can be atmost twice the size of the
+ * original packet. Add 3 bytes for 16 bit CRC (2 bytes) and a delimiter
+ * (1 byte)
+ */
+#define DIAG_MAX_HDLC_BUF_SIZE	((DIAG_MAX_REQ_SIZE * 2) + 3)
+
+/* The header of callback data type has remote processor token (of type int) */
+#define CALLBACK_HDR_SIZE	(sizeof(int))
+#define CALLBACK_BUF_SIZE	(DIAG_MAX_REQ_SIZE + CALLBACK_HDR_SIZE)
+
+#define MAX_SSID_PER_RANGE	200
+
+#define ALL_PROC		-1
+
+#define REMOTE_DATA		4
+
+#define USER_SPACE_DATA		16384
+
+#define DIAG_CTRL_MSG_LOG_MASK	9
+#define DIAG_CTRL_MSG_EVENT_MASK	10
+#define DIAG_CTRL_MSG_F3_MASK	11
+#define CONTROL_CHAR	0x7E
+
+#define DIAG_CON_APSS		(0x0001)	/* Bit mask for APSS */
+#define DIAG_CON_MPSS		(0x0002)	/* Bit mask for MPSS */
+#define DIAG_CON_LPASS		(0x0004)	/* Bit mask for LPASS */
+#define DIAG_CON_WCNSS		(0x0008)	/* Bit mask for WCNSS */
+#define DIAG_CON_SENSORS	(0x0010)	/* Bit mask for Sensors */
+#define DIAG_CON_WDSP		(0x0020)	/* Bit mask for WDSP */
+#define DIAG_CON_CDSP		(0x0040)	/* Bit mask for CDSP */
+
+#define DIAG_CON_UPD_WLAN		(0x1000) /*Bit mask for WLAN PD*/
+#define DIAG_CON_UPD_AUDIO		(0x2000) /*Bit mask for AUDIO PD*/
+#define DIAG_CON_UPD_SENSORS	(0x4000) /*Bit mask for SENSORS PD*/
+
+#define DIAG_CON_NONE		(0x0000)	/* Bit mask for No SS*/
+#define DIAG_CON_ALL		(DIAG_CON_APSS | DIAG_CON_MPSS \
+				| DIAG_CON_LPASS | DIAG_CON_WCNSS \
+				| DIAG_CON_SENSORS | DIAG_CON_WDSP \
+				| DIAG_CON_CDSP)
+#define DIAG_CON_UPD_ALL	(DIAG_CON_UPD_WLAN \
+				| DIAG_CON_UPD_AUDIO \
+				| DIAG_CON_UPD_SENSORS)
+
+#define DIAG_STM_MODEM	0x01
+#define DIAG_STM_LPASS	0x02
+#define DIAG_STM_WCNSS	0x04
+#define DIAG_STM_APPS	0x08
+#define DIAG_STM_SENSORS 0x10
+#define DIAG_STM_WDSP 0x20
+#define DIAG_STM_CDSP 0x40
+
+#define INVALID_PID		-1
+#define DIAG_CMD_FOUND		1
+#define DIAG_CMD_NOT_FOUND	0
+#define DIAG_CMD_POLLING	1
+#define DIAG_CMD_NOT_POLLING	0
+#define DIAG_CMD_ADD		1
+#define DIAG_CMD_REMOVE		0
+
+#define DIAG_CMD_VERSION	0
+#define DIAG_CMD_ERROR		0x13
+#define DIAG_CMD_DOWNLOAD	0x3A
+#define DIAG_CMD_DIAG_SUBSYS	0x4B
+#define DIAG_CMD_LOG_CONFIG	0x73
+#define DIAG_CMD_LOG_ON_DMND	0x78
+#define DIAG_CMD_EXT_BUILD	0x7c
+#define DIAG_CMD_MSG_CONFIG	0x7D
+#define DIAG_CMD_GET_EVENT_MASK	0x81
+#define DIAG_CMD_SET_EVENT_MASK	0x82
+#define DIAG_CMD_EVENT_TOGGLE	0x60
+#define DIAG_CMD_NO_SUBSYS	0xFF
+#define DIAG_CMD_STATUS	0x0C
+#define DIAG_SS_WCDMA	0x04
+#define DIAG_CMD_QUERY_CALL	0x0E
+#define DIAG_SS_GSM	0x08
+#define DIAG_CMD_QUERY_TMC	0x02
+#define DIAG_SS_TDSCDMA	0x57
+#define DIAG_CMD_TDSCDMA_STATUS	0x0E
+#define DIAG_CMD_DIAG_SUBSYS_DELAY 0x80
+
+#define DIAG_SS_DIAG		0x12
+#define DIAG_SS_PARAMS		0x32
+#define DIAG_SS_FILE_READ_MODEM 0x0816
+#define DIAG_SS_FILE_READ_ADSP  0x0E10
+#define DIAG_SS_FILE_READ_WCNSS 0x141F
+#define DIAG_SS_FILE_READ_SLPI 0x01A18
+#define DIAG_SS_FILE_READ_APPS 0x020F
+
+#define DIAG_DIAG_MAX_PKT_SZ	0x55
+#define DIAG_DIAG_STM		0x214
+#define DIAG_DIAG_POLL		0x03
+#define DIAG_DEL_RSP_WRAP	0x04
+#define DIAG_DEL_RSP_WRAP_CNT	0x05
+#define DIAG_EXT_MOBILE_ID	0x06
+#define DIAG_GET_TIME_API	0x21B
+#define DIAG_SET_TIME_API	0x21C
+#define DIAG_SWITCH_COMMAND	0x081B
+#define DIAG_BUFFERING_MODE	0x080C
+
+#define DIAG_CMD_OP_LOG_DISABLE		0
+#define DIAG_CMD_OP_GET_LOG_RANGE	1
+#define DIAG_CMD_OP_SET_LOG_MASK	3
+#define DIAG_CMD_OP_GET_LOG_MASK	4
+
+#define DIAG_CMD_OP_GET_SSID_RANGE	1
+#define DIAG_CMD_OP_GET_BUILD_MASK	2
+#define DIAG_CMD_OP_GET_MSG_MASK	3
+#define DIAG_CMD_OP_SET_MSG_MASK	4
+#define DIAG_CMD_OP_SET_ALL_MSG_MASK	5
+
+#define DIAG_CMD_OP_GET_MSG_ALLOC       0x33
+#define DIAG_CMD_OP_GET_MSG_DROP	0x30
+#define DIAG_CMD_OP_RESET_MSG_STATS	0x2F
+#define DIAG_CMD_OP_GET_LOG_ALLOC	0x31
+#define DIAG_CMD_OP_GET_LOG_DROP	0x2C
+#define DIAG_CMD_OP_RESET_LOG_STATS	0x2B
+#define DIAG_CMD_OP_GET_EVENT_ALLOC	0x32
+#define DIAG_CMD_OP_GET_EVENT_DROP	0x2E
+#define DIAG_CMD_OP_RESET_EVENT_STATS	0x2D
+
+#define DIAG_CMD_OP_HDLC_DISABLE	0x218
+
+#define BAD_PARAM_RESPONSE_MESSAGE 20
+
+#define PERSIST_TIME_SUCCESS 0
+#define PERSIST_TIME_FAILURE 1
+#define PERSIST_TIME_NOT_SUPPORTED 2
+
+#define MODE_CMD	41
+#define RESET_ID	2
+
+#define PKT_DROP	0
+#define PKT_ALLOC	1
+#define PKT_RESET	2
+
+#define FEATURE_MASK_LEN	4
+
+#define DIAG_MD_NONE			0
+#define DIAG_MD_PERIPHERAL		1
+
+/*
+ * The status bit masks when received in a signal handler are to be
+ * used in conjunction with the peripheral list bit mask to determine the
+ * status for a peripheral. For instance, 0x00010002 would denote an open
+ * status on the MPSS
+ */
+#define DIAG_STATUS_OPEN (0x00010000)	/* DCI channel open status mask   */
+#define DIAG_STATUS_CLOSED (0x00020000)	/* DCI channel closed status mask */
+
+#define MODE_NONREALTIME	0
+#define MODE_REALTIME		1
+#define MODE_UNKNOWN		2
+
+#define DIAG_BUFFERING_MODE_STREAMING	0
+#define DIAG_BUFFERING_MODE_THRESHOLD	1
+#define DIAG_BUFFERING_MODE_CIRCULAR	2
+
+#define DIAG_MIN_WM_VAL		0
+#define DIAG_MAX_WM_VAL		100
+
+#define DEFAULT_LOW_WM_VAL	15
+#define DEFAULT_HIGH_WM_VAL	85
+
+#define TYPE_DATA		0
+#define TYPE_CNTL		1
+#define TYPE_DCI		2
+#define TYPE_CMD		3
+#define TYPE_DCI_CMD		4
+#define NUM_TYPES		5
+
+#define PERIPHERAL_MODEM	0
+#define PERIPHERAL_LPASS	1
+#define PERIPHERAL_WCNSS	2
+#define PERIPHERAL_SENSORS	3
+#define PERIPHERAL_WDSP		4
+#define PERIPHERAL_CDSP		5
+#define NUM_PERIPHERALS		6
+#define APPS_DATA		(NUM_PERIPHERALS)
+
+#define UPD_WLAN		7
+#define UPD_AUDIO		8
+#define UPD_SENSORS		9
+#define NUM_UPD			3
+
+#define DIAG_ID_APPS		1
+#define DIAG_ID_MPSS		2
+#define DIAG_ID_WLAN		3
+#define DIAG_ID_LPASS		4
+#define DIAG_ID_CDSP		5
+#define DIAG_ID_AUDIO		6
+#define DIAG_ID_SENSORS		7
+
+/* Number of sessions possible in Memory Device Mode. +1 for Apps data */
+#define NUM_MD_SESSIONS		(NUM_PERIPHERALS \
+					+ NUM_UPD + 1)
+
+#define MD_PERIPHERAL_MASK(x)	(1 << x)
+
+#define MD_PERIPHERAL_PD_MASK(x)					\
+	((x == PERIPHERAL_MODEM) ? (1 << UPD_WLAN) :			\
+	((x == PERIPHERAL_LPASS) ? (1 << UPD_AUDIO | 1 << UPD_SENSORS) : 0))\
+
+/*
+ * Number of stm processors includes all the peripherals and
+ * apps.Added 1 below to indicate apps
+ */
+#define NUM_STM_PROCESSORS	(NUM_PERIPHERALS + 1)
+/*
+ * Indicates number of peripherals that can support DCI and Apps
+ * processor. This doesn't mean that a peripheral has the
+ * feature.
+ */
+#define NUM_DCI_PERIPHERALS	(NUM_PERIPHERALS + 1)
+
+#define DIAG_PROC_DCI			1
+#define DIAG_PROC_MEMORY_DEVICE		2
+
+/* Flags to vote the DCI or Memory device process up or down
+   when it becomes active or inactive */
+#define VOTE_DOWN			0
+#define VOTE_UP				1
+
+#define DIAG_TS_SIZE	50
+
+#define DIAG_MDM_BUF_SIZE	2048
+/* The Maximum request size is 2k + DCI header + footer (6 bytes) */
+#define DIAG_MDM_DCI_BUF_SIZE	(2048 + 6)
+
+#define DIAG_LOCAL_PROC	0
+
+#ifndef CONFIG_DIAGFWD_BRIDGE_CODE
+/* Local Processor only */
+#define DIAG_NUM_PROC	1
+#else
+/* Local Processor + Remote Devices */
+#define DIAG_NUM_PROC	(1 + NUM_REMOTE_DEV)
+#endif
+
+#define DIAG_WS_DCI		0
+#define DIAG_WS_MUX		1
+
+#define DIAG_DATA_TYPE		1
+#define DIAG_CNTL_TYPE		2
+#define DIAG_DCI_TYPE		3
+
+/* List of remote processor supported */
+enum remote_procs {
+	MDM = 1,
+	MDM2 = 2,
+	QSC = 5,
+};
+
+struct diag_pkt_header_t {
+	uint8_t cmd_code;
+	uint8_t subsys_id;
+	uint16_t subsys_cmd_code;
+} __packed;
+
+struct diag_cmd_ext_mobile_rsp_t {
+	struct diag_pkt_header_t header;
+	uint8_t version;
+	uint8_t padding[3];
+	uint32_t family;
+	uint32_t chip_id;
+} __packed;
+
+struct diag_cmd_time_sync_query_req_t {
+	struct diag_pkt_header_t header;
+	uint8_t version;
+};
+
+struct diag_cmd_time_sync_query_rsp_t {
+	struct diag_pkt_header_t header;
+	uint8_t version;
+	uint8_t time_api;
+};
+
+struct diag_cmd_time_sync_switch_req_t {
+	struct diag_pkt_header_t header;
+	uint8_t version;
+	uint8_t time_api;
+	uint8_t persist_time;
+};
+
+struct diag_cmd_time_sync_switch_rsp_t {
+	struct diag_pkt_header_t header;
+	uint8_t version;
+	uint8_t time_api;
+	uint8_t time_api_status;
+	uint8_t persist_time_status;
+};
+
+struct diag_cmd_reg_entry_t {
+	uint16_t cmd_code;
+	uint16_t subsys_id;
+	uint16_t cmd_code_lo;
+	uint16_t cmd_code_hi;
+} __packed;
+
+struct diag_cmd_reg_t {
+	struct list_head link;
+	struct diag_cmd_reg_entry_t entry;
+	uint8_t proc;
+	int pid;
+};
+
+/*
+ * @sync_obj_name: name of the synchronization object associated with this proc
+ * @count: number of entries in the bind
+ * @entries: the actual packet registrations
+ */
+struct diag_cmd_reg_tbl_t {
+	char sync_obj_name[MAX_SYNC_OBJ_NAME_SIZE];
+	uint32_t count;
+	struct diag_cmd_reg_entry_t *entries;
+};
+
+struct diag_client_map {
+	char name[20];
+	int pid;
+};
+
+struct real_time_vote_t {
+	int client_id;
+	uint16_t proc;
+	uint8_t real_time_vote;
+} __packed;
+
+struct real_time_query_t {
+	int real_time;
+	int proc;
+} __packed;
+
+struct diag_buffering_mode_t {
+	uint8_t peripheral;
+	uint8_t mode;
+	uint8_t high_wm_val;
+	uint8_t low_wm_val;
+} __packed;
+
+struct diag_callback_reg_t {
+	int proc;
+} __packed;
+
+struct diag_ws_ref_t {
+	int ref_count;
+	int copy_count;
+	spinlock_t lock;
+};
+
+/* This structure is defined in USB header file */
+#ifndef CONFIG_DIAG_OVER_USB
+struct diag_request {
+	char *buf;
+	int length;
+	int actual;
+	int status;
+	void *context;
+};
+#endif
+
+struct diag_pkt_stats_t {
+	uint32_t alloc_count;
+	uint32_t drop_count;
+};
+
+struct diag_cmd_stats_rsp_t {
+	struct diag_pkt_header_t header;
+	uint32_t payload;
+};
+
+struct diag_cmd_hdlc_disable_rsp_t {
+	struct diag_pkt_header_t header;
+	uint8_t framing_version;
+	uint8_t result;
+};
+
+struct diag_pkt_frame_t {
+	uint8_t start;
+	uint8_t version;
+	uint16_t length;
+};
+
+struct diag_partial_pkt_t {
+	uint32_t total_len;
+	uint32_t read_len;
+	uint32_t remaining;
+	uint32_t capacity;
+	uint8_t processing;
+	unsigned char *data;
+} __packed;
+
+struct diag_logging_mode_param_t {
+	uint32_t req_mode;
+	uint32_t peripheral_mask;
+	uint32_t pd_mask;
+	uint8_t mode_param;
+} __packed;
+
+struct diag_md_session_t {
+	int pid;
+	int peripheral_mask;
+	uint8_t hdlc_disabled;
+	uint8_t msg_mask_tbl_count;
+	struct timer_list hdlc_reset_timer;
+	struct diag_mask_info *msg_mask;
+	struct diag_mask_info *log_mask;
+	struct diag_mask_info *event_mask;
+	struct thread_info *md_client_thread_info;
+	struct task_struct *task;
+};
+
+/*
+ * High level structure for storing Diag masks.
+ *
+ * @ptr: Pointer to the buffer that stores the masks
+ * @mask_len: Length of the buffer pointed by ptr
+ * @update_buf: Buffer for performing mask updates to peripherals
+ * @update_buf_len: Length of the buffer pointed by buf
+ * @status: status of the mask - all enable, disabled, valid
+ * @lock: To protect access to the mask variables
+ */
+struct diag_mask_info {
+	uint8_t *ptr;
+	int mask_len;
+	uint8_t *update_buf;
+	int update_buf_len;
+	uint8_t status;
+	struct mutex lock;
+};
+
+struct diag_md_proc_info {
+	int pid;
+	struct task_struct *socket_process;
+	struct task_struct *callback_process;
+	struct task_struct *mdlog_process;
+};
+
+struct diag_feature_t {
+	uint8_t feature_mask[FEATURE_MASK_LEN];
+	uint8_t rcvd_feature_mask;
+	uint8_t log_on_demand;
+	uint8_t separate_cmd_rsp;
+	uint8_t encode_hdlc;
+	uint8_t untag_header;
+	uint8_t peripheral_buffering;
+	uint8_t pd_buffering;
+	uint8_t mask_centralization;
+	uint8_t stm_support;
+	uint8_t sockets_enabled;
+	uint8_t sent_feature_mask;
+};
+
+struct diagchar_dev {
+
+	/* State for the char driver */
+	unsigned int major;
+	unsigned int minor_start;
+	int num;
+	struct cdev *cdev;
+	char *name;
+	struct class *diagchar_class;
+	struct device *diag_dev;
+	int ref_count;
+	int mask_clear;
+	struct mutex diag_maskclear_mutex;
+	struct mutex diag_notifier_mutex;
+	struct mutex diagchar_mutex;
+	struct mutex diag_file_mutex;
+	wait_queue_head_t wait_q;
+	struct diag_client_map *client_map;
+	int *data_ready;
+	atomic_t data_ready_notif[THRESHOLD_CLIENT_LIMIT];
+	int num_clients;
+	int polling_reg_flag;
+	int use_device_tree;
+	int supports_separate_cmdrsp;
+	int supports_apps_hdlc_encoding;
+	int supports_apps_header_untagging;
+	int supports_pd_buffering;
+	int peripheral_untag[NUM_PERIPHERALS];
+	int supports_sockets;
+	/* The state requested in the STM command */
+	int stm_state_requested[NUM_STM_PROCESSORS];
+	/* The current STM state */
+	int stm_state[NUM_STM_PROCESSORS];
+	uint16_t stm_peripheral;
+	struct work_struct stm_update_work;
+	uint16_t mask_update;
+	struct work_struct mask_update_work;
+	uint16_t close_transport;
+	struct work_struct close_transport_work;
+	struct workqueue_struct *cntl_wq;
+	struct mutex cntl_lock;
+	/* Whether or not the peripheral supports STM */
+	/* Delayed response Variables */
+	uint16_t delayed_rsp_id;
+	struct mutex delayed_rsp_mutex;
+	/* DCI related variables */
+	struct list_head dci_req_list;
+	struct list_head dci_client_list;
+	int dci_tag;
+	int dci_client_id;
+	struct mutex dci_mutex;
+	int num_dci_client;
+	unsigned char *apps_dci_buf;
+	int dci_state;
+	struct workqueue_struct *diag_dci_wq;
+	struct list_head cmd_reg_list;
+	struct mutex cmd_reg_mutex;
+	uint32_t cmd_reg_count;
+	struct mutex diagfwd_channel_mutex[NUM_PERIPHERALS];
+	/* Sizes that reflect memory pool sizes */
+	unsigned int poolsize;
+	unsigned int poolsize_hdlc;
+	unsigned int poolsize_dci;
+	unsigned int poolsize_user;
+	/* Buffers for masks */
+	struct mutex diag_cntl_mutex;
+	/* Members for Sending response */
+	unsigned char *encoded_rsp_buf;
+	int encoded_rsp_len;
+	uint8_t rsp_buf_busy;
+	spinlock_t rsp_buf_busy_lock;
+	int rsp_buf_ctxt;
+	struct diagfwd_info *diagfwd_data[NUM_PERIPHERALS];
+	struct diagfwd_info *diagfwd_cntl[NUM_PERIPHERALS];
+	struct diagfwd_info *diagfwd_dci[NUM_PERIPHERALS];
+	struct diagfwd_info *diagfwd_cmd[NUM_PERIPHERALS];
+	struct diagfwd_info *diagfwd_dci_cmd[NUM_PERIPHERALS];
+	struct diag_feature_t feature[NUM_PERIPHERALS];
+	struct diag_buffering_mode_t buffering_mode[NUM_MD_SESSIONS];
+	uint8_t buffering_flag[NUM_MD_SESSIONS];
+	struct mutex mode_lock;
+	unsigned char *user_space_data_buf;
+	uint8_t user_space_data_busy;
+	struct diag_pkt_stats_t msg_stats;
+	struct diag_pkt_stats_t log_stats;
+	struct diag_pkt_stats_t event_stats;
+	/* buffer for updating mask to peripherals */
+	unsigned char *buf_feature_mask_update;
+	uint8_t hdlc_disabled;
+	struct mutex hdlc_disable_mutex;
+	struct mutex hdlc_recovery_mutex;
+	struct timer_list hdlc_reset_timer;
+	struct mutex diag_hdlc_mutex;
+	unsigned char *hdlc_buf;
+	uint32_t hdlc_buf_len;
+	unsigned char *apps_rsp_buf;
+	struct diag_partial_pkt_t incoming_pkt;
+	int in_busy_pktdata;
+	/* Variables for non real time mode */
+	int real_time_mode[DIAG_NUM_PROC];
+	int real_time_update_busy;
+	uint16_t proc_active_mask;
+	uint16_t proc_rt_vote_mask[DIAG_NUM_PROC];
+	struct mutex real_time_mutex;
+	struct work_struct diag_real_time_work;
+	struct workqueue_struct *diag_real_time_wq;
+#ifdef CONFIG_DIAG_OVER_USB
+	int usb_connected;
+#endif
+	struct workqueue_struct *diag_wq;
+	struct work_struct diag_drain_work;
+	struct work_struct update_user_clients;
+	struct work_struct update_md_clients;
+	struct workqueue_struct *diag_cntl_wq;
+	uint8_t log_on_demand_support;
+	uint8_t *apps_req_buf;
+	uint32_t apps_req_buf_len;
+	uint8_t *dci_pkt_buf; /* For Apps DCI packets */
+	uint32_t dci_pkt_length;
+	int in_busy_dcipktdata;
+	int logging_mode;
+	int logging_mask;
+	int pd_logging_mode[NUM_UPD];
+	int pd_session_clear[NUM_UPD];
+	int num_pd_session;
+	int mask_check;
+	uint32_t md_session_mask;
+	uint8_t md_session_mode;
+	struct diag_md_session_t *md_session_map[NUM_MD_SESSIONS];
+	struct mutex md_session_lock;
+	/* Power related variables */
+	struct diag_ws_ref_t dci_ws;
+	struct diag_ws_ref_t md_ws;
+	/* Pointers to Diag Masks */
+	struct diag_mask_info *msg_mask;
+	struct diag_mask_info *log_mask;
+	struct diag_mask_info *event_mask;
+	struct diag_mask_info *build_time_mask;
+	uint8_t msg_mask_tbl_count;
+	uint8_t bt_msg_mask_tbl_count;
+	uint16_t event_mask_size;
+	uint16_t last_event_id;
+	struct mutex msg_mask_lock;
+	/* Variables for Mask Centralization */
+	uint16_t num_event_id[NUM_PERIPHERALS];
+	uint32_t num_equip_id[NUM_PERIPHERALS];
+	uint32_t max_ssid_count[NUM_PERIPHERALS];
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+	/* For sending command requests in callback mode */
+	unsigned char *hdlc_encode_buf;
+	int hdlc_encode_buf_len;
+#endif
+	int time_sync_enabled;
+	uint8_t uses_time_api;
+	struct platform_device *pdev;
+};
+
+extern struct diagchar_dev *driver;
+
+extern int wrap_enabled;
+extern uint16_t wrap_count;
+
+void diag_get_timestamp(char *time_str);
+void check_drain_timer(void);
+int diag_get_remote(int remote_info);
+
+void diag_ws_init(void);
+void diag_ws_on_notify(void);
+void diag_ws_on_read(int type, int pkt_len);
+void diag_ws_on_copy(int type);
+void diag_ws_on_copy_fail(int type);
+void diag_ws_on_copy_complete(int type);
+void diag_ws_reset(int type);
+void diag_ws_release(void);
+void chk_logging_wakeup(void);
+int diag_cmd_add_reg(struct diag_cmd_reg_entry_t *new_entry, uint8_t proc,
+		     int pid);
+struct diag_cmd_reg_entry_t *diag_cmd_search(
+			struct diag_cmd_reg_entry_t *entry,
+			int proc);
+void diag_cmd_remove_reg(struct diag_cmd_reg_entry_t *entry, uint8_t proc);
+void diag_cmd_remove_reg_by_pid(int pid);
+void diag_cmd_remove_reg_by_proc(int proc);
+int diag_cmd_chk_polling(struct diag_cmd_reg_entry_t *entry);
+int diag_mask_param(void);
+void diag_clear_masks(int pid);
+uint8_t diag_mask_to_pd_value(uint32_t peripheral_mask);
+
+void diag_record_stats(int type, int flag);
+
+struct diag_md_session_t *diag_md_session_get_pid(int pid);
+struct diag_md_session_t *diag_md_session_get_peripheral(uint8_t peripheral);
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diagchar_hdlc.c linux-4.4.115-fbx/drivers/char/diag/diagchar_hdlc.c
--- linux-4.4.115-fbx/drivers/char/diag./diagchar_hdlc.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diagchar_hdlc.c	2019-01-22 16:16:22.959241480 +0100
@@ -0,0 +1,267 @@
+/* Copyright (c) 2008-2009, 2012-2014, The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <linux/ratelimit.h>
+#include <linux/crc-ccitt.h>
+#include "diagchar_hdlc.h"
+#include "diagchar.h"
+
+
+MODULE_LICENSE("GPL v2");
+
+#define CRC_16_L_SEED           0xFFFF
+
+#define CRC_16_L_STEP(xx_crc, xx_c) \
+	crc_ccitt_byte(xx_crc, xx_c)
+
+void diag_hdlc_encode(struct diag_send_desc_type *src_desc,
+		      struct diag_hdlc_dest_type *enc)
+{
+	uint8_t *dest;
+	uint8_t *dest_last;
+	const uint8_t *src;
+	const uint8_t *src_last;
+	uint16_t crc;
+	unsigned char src_byte = 0;
+	enum diag_send_state_enum_type state;
+	unsigned int used = 0;
+
+	if (src_desc && enc) {
+
+		/* Copy parts to local variables. */
+		src = src_desc->pkt;
+		src_last = src_desc->last;
+		state = src_desc->state;
+		dest = enc->dest;
+		dest_last = enc->dest_last;
+
+		if (state == DIAG_STATE_START) {
+			crc = CRC_16_L_SEED;
+			state++;
+		} else {
+			/* Get a local copy of the CRC */
+			crc = enc->crc;
+		}
+
+		/* dest or dest_last may be NULL to trigger a
+		   state transition only */
+		if (dest && dest_last) {
+			/* This condition needs to include the possibility
+			   of 2 dest bytes for an escaped byte */
+			while (src <= src_last && dest <= dest_last) {
+
+				src_byte = *src++;
+
+				if ((src_byte == CONTROL_CHAR) ||
+				    (src_byte == ESC_CHAR)) {
+
+					/* If the escape character is not the
+					   last byte */
+					if (dest != dest_last) {
+						crc = CRC_16_L_STEP(crc,
+								    src_byte);
+
+						*dest++ = ESC_CHAR;
+						used++;
+
+						*dest++ = src_byte
+							  ^ ESC_MASK;
+						used++;
+					} else {
+
+						src--;
+						break;
+					}
+
+				} else {
+					crc = CRC_16_L_STEP(crc, src_byte);
+					*dest++ = src_byte;
+					used++;
+				}
+			}
+
+			if (src > src_last) {
+
+				if (state == DIAG_STATE_BUSY) {
+					if (src_desc->terminate) {
+						crc = ~crc;
+						state++;
+					} else {
+						/* Done with fragment */
+						state = DIAG_STATE_COMPLETE;
+					}
+				}
+
+				while (dest <= dest_last &&
+				       state >= DIAG_STATE_CRC1 &&
+				       state < DIAG_STATE_TERM) {
+					/* Encode a byte of the CRC next */
+					src_byte = crc & 0xFF;
+
+					if ((src_byte == CONTROL_CHAR)
+					    || (src_byte == ESC_CHAR)) {
+
+						if (dest != dest_last) {
+
+							*dest++ = ESC_CHAR;
+							used++;
+							*dest++ = src_byte ^
+								  ESC_MASK;
+							used++;
+
+							crc >>= 8;
+						} else {
+
+							break;
+						}
+					} else {
+
+						crc >>= 8;
+						*dest++ = src_byte;
+						used++;
+					}
+
+					state++;
+				}
+
+				if (state == DIAG_STATE_TERM) {
+					if (dest_last >= dest) {
+						*dest++ = CONTROL_CHAR;
+						used++;
+						state++;	/* Complete */
+					}
+				}
+			}
+		}
+		/* Copy local variables back into the encode structure. */
+
+		enc->dest = dest;
+		enc->dest_last = dest_last;
+		enc->crc = crc;
+		src_desc->pkt = src;
+		src_desc->last = src_last;
+		src_desc->state = state;
+	}
+
+	return;
+}
+
+
+int diag_hdlc_decode(struct diag_hdlc_decode_type *hdlc)
+{
+	uint8_t *src_ptr = NULL, *dest_ptr = NULL;
+	unsigned int src_length = 0, dest_length = 0;
+
+	unsigned int len = 0;
+	unsigned int i;
+	uint8_t src_byte;
+
+	int pkt_bnd = HDLC_INCOMPLETE;
+	int msg_start;
+
+	if (hdlc && hdlc->src_ptr && hdlc->dest_ptr &&
+	    (hdlc->src_size > hdlc->src_idx) &&
+	    (hdlc->dest_size > hdlc->dest_idx)) {
+
+		msg_start = (hdlc->src_idx == 0) ? 1 : 0;
+
+		src_ptr = hdlc->src_ptr;
+		src_ptr = &src_ptr[hdlc->src_idx];
+		src_length = hdlc->src_size - hdlc->src_idx;
+
+		dest_ptr = hdlc->dest_ptr;
+		dest_ptr = &dest_ptr[hdlc->dest_idx];
+		dest_length = hdlc->dest_size - hdlc->dest_idx;
+
+		for (i = 0; i < src_length; i++) {
+
+			src_byte = src_ptr[i];
+
+			if (hdlc->escaping) {
+				dest_ptr[len++] = src_byte ^ ESC_MASK;
+				hdlc->escaping = 0;
+			} else if (src_byte == ESC_CHAR) {
+				if (i == (src_length - 1)) {
+					hdlc->escaping = 1;
+					i++;
+					break;
+				} else {
+					dest_ptr[len++] = src_ptr[++i]
+							  ^ ESC_MASK;
+				}
+			} else if (src_byte == CONTROL_CHAR) {
+				if (msg_start && i == 0 && src_length > 1)
+					continue;
+				/* Byte 0x7E will be considered
+					as end of packet */
+				dest_ptr[len++] = src_byte;
+				i++;
+				pkt_bnd = HDLC_COMPLETE;
+				break;
+			} else {
+				dest_ptr[len++] = src_byte;
+			}
+
+			if (len >= dest_length) {
+				i++;
+				break;
+			}
+		}
+
+		hdlc->src_idx += i;
+		hdlc->dest_idx += len;
+	}
+
+	return pkt_bnd;
+}
+
+int crc_check(uint8_t *buf, uint16_t len)
+{
+	uint16_t crc = CRC_16_L_SEED;
+	uint8_t sent_crc[2] = {0, 0};
+
+	/*
+	 * The minimum length of a valid incoming packet is 4. 1 byte
+	 * of data and 3 bytes for CRC
+	 */
+	if (!buf || len < 4) {
+		pr_err_ratelimited("diag: In %s, invalid packet or length, buf: 0x%p, len: %d",
+				   __func__, buf, len);
+		return -EIO;
+	}
+
+	/*
+	 * Run CRC check for the original input. Skip the last 3 CRC
+	 * bytes
+	 */
+	crc = crc_ccitt(crc, buf, len-3);
+	crc ^= CRC_16_L_SEED;
+
+	/* Check the computed CRC against the original CRC bytes. */
+	sent_crc[0] = buf[len-3];
+	sent_crc[1] = buf[len-2];
+	if (crc != *((uint16_t *)sent_crc)) {
+		pr_debug("diag: In %s, crc mismatch. expected: %x, sent %x.\n",
+				__func__, crc, *((uint16_t *)sent_crc));
+		return -EIO;
+	}
+
+	return 0;
+}
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diagchar_hdlc.h linux-4.4.115-fbx/drivers/char/diag/diagchar_hdlc.h
--- linux-4.4.115-fbx/drivers/char/diag./diagchar_hdlc.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diagchar_hdlc.h	2019-01-22 16:16:22.959241480 +0100
@@ -0,0 +1,66 @@
+/* Copyright (c) 2008-2009, 2012-2014, The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGCHAR_HDLC
+#define DIAGCHAR_HDLC
+
+enum diag_send_state_enum_type {
+	DIAG_STATE_START,
+	DIAG_STATE_BUSY,
+	DIAG_STATE_CRC1,
+	DIAG_STATE_CRC2,
+	DIAG_STATE_TERM,
+	DIAG_STATE_COMPLETE
+};
+
+struct diag_send_desc_type {
+	const void *pkt;
+	const void *last;	/* Address of last byte to send. */
+	enum diag_send_state_enum_type state;
+	unsigned char terminate;	/* True if this fragment
+					   terminates the packet */
+};
+
+struct diag_hdlc_dest_type {
+	void *dest;
+	void *dest_last;
+	/* Below: internal use only */
+	uint16_t crc;
+};
+
+struct diag_hdlc_decode_type {
+	uint8_t *src_ptr;
+	unsigned int src_idx;
+	unsigned int src_size;
+	uint8_t *dest_ptr;
+	unsigned int dest_idx;
+	unsigned int dest_size;
+	int escaping;
+
+};
+
+void diag_hdlc_encode(struct diag_send_desc_type *src_desc,
+		      struct diag_hdlc_dest_type *enc);
+
+int diag_hdlc_decode(struct diag_hdlc_decode_type *hdlc);
+
+int crc_check(uint8_t *buf, uint16_t len);
+
+#define ESC_CHAR     0x7D
+#define ESC_MASK     0x20
+
+#define HDLC_INCOMPLETE		0
+#define HDLC_COMPLETE		1
+
+#define HDLC_FOOTER_LEN		3
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diag_dci.c linux-4.4.115-fbx/drivers/char/diag/diag_dci.c
--- linux-4.4.115-fbx/drivers/char/diag./diag_dci.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diag_dci.c	2019-10-29 09:26:23.445201240 +0100
@@ -0,0 +1,3312 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/diagchar.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeup.h>
+#include <linux/spinlock.h>
+#include <linux/ratelimit.h>
+#include <linux/reboot.h>
+#include <asm/current.h>
+#include <soc/qcom/restart.h>
+#include <linux/vmalloc.h>
+#ifdef CONFIG_DIAG_OVER_USB
+#include <linux/usb/usbdiag.h>
+#endif
+#include "diagchar_hdlc.h"
+#include "diagmem.h"
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_cntl.h"
+#include "diag_dci.h"
+#include "diag_masks.h"
+#include "diagfwd_bridge.h"
+#include "diagfwd_peripheral.h"
+#include "diag_ipc_logging.h"
+
+static struct timer_list dci_drain_timer;
+static int dci_timer_in_progress;
+static struct work_struct dci_data_drain_work;
+
+struct diag_dci_partial_pkt_t partial_pkt;
+
+unsigned int dci_max_reg = 100;
+unsigned int dci_max_clients = 10;
+struct mutex dci_log_mask_mutex;
+struct mutex dci_event_mask_mutex;
+
+/*
+ * DCI_HANDSHAKE_RETRY_TIME: Time to wait (in microseconds) before checking the
+ * connection status again.
+ *
+ * DCI_HANDSHAKE_WAIT_TIME: Timeout (in milliseconds) to check for dci
+ * connection status
+ */
+#define DCI_HANDSHAKE_RETRY_TIME	500000
+#define DCI_HANDSHAKE_WAIT_TIME		200
+
+spinlock_t ws_lock;
+unsigned long ws_lock_flags;
+
+struct dci_ops_tbl_t dci_ops_tbl[NUM_DCI_PROC] = {
+	{
+		.ctx = 0,
+		.send_log_mask = diag_send_dci_log_mask,
+		.send_event_mask = diag_send_dci_event_mask,
+		.peripheral_status = 0,
+		.mempool = 0,
+	},
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+	{
+		.ctx = DIAGFWD_MDM_DCI,
+		.send_log_mask = diag_send_dci_log_mask_remote,
+		.send_event_mask = diag_send_dci_event_mask_remote,
+		.peripheral_status = 0,
+		.mempool = POOL_TYPE_MDM_DCI_WRITE,
+	}
+#endif
+};
+
+struct dci_channel_status_t dci_channel_status[NUM_DCI_PROC] = {
+	{
+		.id = 0,
+		.open = 0,
+		.retry_count = 0
+	},
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+	{
+		.id = DIAGFWD_MDM_DCI,
+		.open = 0,
+		.retry_count = 0
+	}
+#endif
+};
+
+/* Number of milliseconds anticipated to process the DCI data */
+#define DCI_WAKEUP_TIMEOUT 1
+
+#define DCI_CAN_ADD_BUF_TO_LIST(buf)					\
+	(buf && buf->data && !buf->in_busy && buf->data_len > 0)	\
+
+#ifdef CONFIG_DEBUG_FS
+struct diag_dci_data_info *dci_traffic;
+struct mutex dci_stat_mutex;
+void diag_dci_record_traffic(int read_bytes, uint8_t ch_type,
+			     uint8_t peripheral, uint8_t proc)
+{
+	static int curr_dci_data;
+	static unsigned long iteration;
+	struct diag_dci_data_info *temp_data = dci_traffic;
+	if (!temp_data)
+		return;
+	mutex_lock(&dci_stat_mutex);
+	if (curr_dci_data == DIAG_DCI_DEBUG_CNT)
+		curr_dci_data = 0;
+	temp_data += curr_dci_data;
+	temp_data->iteration = iteration + 1;
+	temp_data->data_size = read_bytes;
+	temp_data->peripheral = peripheral;
+	temp_data->ch_type = ch_type;
+	temp_data->proc = proc;
+	diag_get_timestamp(temp_data->time_stamp);
+	curr_dci_data++;
+	iteration++;
+	mutex_unlock(&dci_stat_mutex);
+}
+#else
+void diag_dci_record_traffic(int read_bytes, uint8_t ch_type,
+			     uint8_t peripheral, uint8_t proc) { }
+#endif
+
+static int check_peripheral_dci_support(int peripheral_id, int dci_proc_id)
+{
+	int dci_peripheral_list = 0;
+
+	if (dci_proc_id < 0 || dci_proc_id >= NUM_DCI_PROC) {
+		pr_err("diag:In %s,not a supported DCI proc id\n", __func__);
+		return 0;
+	}
+	if (peripheral_id < 0 || peripheral_id >= NUM_PERIPHERALS) {
+		pr_err("diag:In %s,not a valid peripheral id\n", __func__);
+		return 0;
+	}
+	dci_peripheral_list = dci_ops_tbl[dci_proc_id].peripheral_status;
+
+	if (dci_peripheral_list <= 0 || dci_peripheral_list > DIAG_CON_ALL) {
+		pr_err("diag:In %s,not a valid dci peripheral mask\n",
+			 __func__);
+		return 0;
+	}
+	/* Remove APSS bit mask information */
+	dci_peripheral_list = dci_peripheral_list >> 1;
+
+	if ((1 << peripheral_id) & (dci_peripheral_list))
+		return 1;
+	else
+		return 0;
+}
+
+static void create_dci_log_mask_tbl(unsigned char *mask, uint8_t dirty)
+{
+	unsigned char *temp = mask;
+	uint8_t i;
+
+	if (!mask)
+		return;
+
+	/* create hard coded table for log mask with 16 categories */
+	for (i = 0; i < DCI_MAX_LOG_CODES; i++) {
+		*temp = i;
+		temp++;
+		*temp = dirty ? 1 : 0;
+		temp++;
+		memset(temp, 0, DCI_MAX_ITEMS_PER_LOG_CODE);
+		temp += DCI_MAX_ITEMS_PER_LOG_CODE;
+	}
+}
+
+static void create_dci_event_mask_tbl(unsigned char *tbl_buf)
+{
+	if (tbl_buf)
+		memset(tbl_buf, 0, DCI_EVENT_MASK_SIZE);
+}
+
+void dci_drain_data(unsigned long data)
+{
+	queue_work(driver->diag_dci_wq, &dci_data_drain_work);
+}
+
+static void dci_check_drain_timer(void)
+{
+	if (!dci_timer_in_progress) {
+		dci_timer_in_progress = 1;
+		mod_timer(&dci_drain_timer, jiffies + msecs_to_jiffies(200));
+	}
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static void dci_handshake_work_fn(struct work_struct *work)
+{
+	int err = 0;
+	int max_retries = 5;
+
+	struct dci_channel_status_t *status = container_of(work,
+						struct dci_channel_status_t,
+						handshake_work);
+
+	if (status->open) {
+		pr_debug("diag: In %s, remote dci channel is open, index: %d\n",
+			 __func__, status->id);
+		return;
+	}
+
+	if (status->retry_count == max_retries) {
+		status->retry_count = 0;
+		pr_info("diag: dci channel connection handshake timed out, id: %d\n",
+			status->id);
+		err = diagfwd_bridge_close(TOKEN_TO_BRIDGE(status->id));
+		if (err) {
+			pr_err("diag: In %s, unable to close dci channel id: %d, err: %d\n",
+			       __func__, status->id, err);
+		}
+		return;
+	}
+	status->retry_count++;
+	/*
+	 * Sleep for sometime to check for the connection status again. The
+	 * value should be optimum to include a roundabout time for a small
+	 * packet to the remote processor.
+	 */
+	usleep_range(DCI_HANDSHAKE_RETRY_TIME, DCI_HANDSHAKE_RETRY_TIME + 100);
+	mod_timer(&status->wait_time,
+		  jiffies + msecs_to_jiffies(DCI_HANDSHAKE_WAIT_TIME));
+}
+
+static void dci_chk_handshake(unsigned long data)
+{
+	int index = (int)data;
+
+	if (index < 0 || index >= NUM_DCI_PROC)
+		return;
+
+	queue_work(driver->diag_dci_wq,
+		   &dci_channel_status[index].handshake_work);
+}
+#endif
+
+static int diag_dci_init_buffer(struct diag_dci_buffer_t *buffer, int type)
+{
+	if (!buffer || buffer->data)
+		return -EINVAL;
+
+	switch (type) {
+	case DCI_BUF_PRIMARY:
+		buffer->capacity = IN_BUF_SIZE;
+		buffer->data = vzalloc(buffer->capacity);
+		if (!buffer->data)
+			return -ENOMEM;
+		break;
+	case DCI_BUF_SECONDARY:
+		buffer->data = NULL;
+		buffer->capacity = IN_BUF_SIZE;
+		break;
+	case DCI_BUF_CMD:
+		buffer->capacity = DIAG_MAX_REQ_SIZE + DCI_BUF_SIZE;
+		buffer->data = vzalloc(buffer->capacity);
+		if (!buffer->data)
+			return -ENOMEM;
+		break;
+	default:
+		pr_err("diag: In %s, unknown type %d", __func__, type);
+		return -EINVAL;
+	}
+
+	buffer->data_len = 0;
+	buffer->in_busy = 0;
+	buffer->buf_type = type;
+	mutex_init(&buffer->data_mutex);
+
+	return 0;
+}
+
+static inline int diag_dci_check_buffer(struct diag_dci_buffer_t *buf, int len)
+{
+	if (!buf)
+		return -EINVAL;
+
+	/* Return 1 if the buffer is not busy and can hold new data */
+	if ((buf->data_len + len < buf->capacity) && !buf->in_busy)
+		return 1;
+
+	return 0;
+}
+
+static void dci_add_buffer_to_list(struct diag_dci_client_tbl *client,
+				   struct diag_dci_buffer_t *buf)
+{
+	if (!buf || !client || !buf->data)
+		return;
+
+	if (buf->in_list || buf->data_len == 0)
+		return;
+
+	mutex_lock(&client->write_buf_mutex);
+	list_add_tail(&buf->buf_track, &client->list_write_buf);
+	/*
+	 * In the case of DCI, there can be multiple packets in one read. To
+	 * calculate the wakeup source reference count, we must account for each
+	 * packet in a single read.
+	 */
+	diag_ws_on_read(DIAG_WS_DCI, buf->data_len);
+	mutex_lock(&buf->data_mutex);
+	buf->in_busy = 1;
+	buf->in_list = 1;
+	mutex_unlock(&buf->data_mutex);
+	mutex_unlock(&client->write_buf_mutex);
+}
+
+static int diag_dci_get_buffer(struct diag_dci_client_tbl *client,
+			       int data_source, int len)
+{
+	struct diag_dci_buffer_t *buf_primary = NULL;
+	struct diag_dci_buffer_t *buf_temp = NULL;
+	struct diag_dci_buffer_t *curr = NULL;
+
+	if (!client)
+		return -EINVAL;
+	if (len < 0 || len > IN_BUF_SIZE)
+		return -EINVAL;
+
+	curr = client->buffers[data_source].buf_curr;
+	buf_primary = client->buffers[data_source].buf_primary;
+
+	if (curr && diag_dci_check_buffer(curr, len) == 1)
+		return 0;
+
+	dci_add_buffer_to_list(client, curr);
+	client->buffers[data_source].buf_curr = NULL;
+
+	if (diag_dci_check_buffer(buf_primary, len) == 1) {
+		client->buffers[data_source].buf_curr = buf_primary;
+		return 0;
+	}
+
+	buf_temp = kzalloc(sizeof(struct diag_dci_buffer_t), GFP_KERNEL);
+	if (!buf_temp)
+		return -EIO;
+
+	if (!diag_dci_init_buffer(buf_temp, DCI_BUF_SECONDARY)) {
+		buf_temp->data = diagmem_alloc(driver, IN_BUF_SIZE,
+					       POOL_TYPE_DCI);
+		if (!buf_temp->data) {
+			kfree(buf_temp);
+			buf_temp = NULL;
+			return -ENOMEM;
+		}
+		client->buffers[data_source].buf_curr = buf_temp;
+		return 0;
+	}
+
+	kfree(buf_temp);
+	buf_temp = NULL;
+	return -EIO;
+}
+
+void diag_dci_wakeup_clients()
+{
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	mutex_lock(&driver->dci_mutex);
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+
+		/*
+		 * Don't wake up the client when there is no pending buffer to
+		 * write or when it is writing to user space
+		 */
+		if (!list_empty(&entry->list_write_buf) && !entry->in_service) {
+			mutex_lock(&entry->write_buf_mutex);
+			entry->in_service = 1;
+			mutex_unlock(&entry->write_buf_mutex);
+			diag_update_sleeping_process(entry->client->tgid,
+						     DCI_DATA_TYPE);
+		}
+	}
+	mutex_unlock(&driver->dci_mutex);
+}
+
+void dci_data_drain_work_fn(struct work_struct *work)
+{
+	int i;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+	struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+	struct diag_dci_buffer_t *buf_temp = NULL;
+
+	mutex_lock(&driver->dci_mutex);
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		for (i = 0; i < entry->num_buffers; i++) {
+			proc_buf = &entry->buffers[i];
+
+			mutex_lock(&proc_buf->buf_mutex);
+			buf_temp = proc_buf->buf_primary;
+			if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp))
+				dci_add_buffer_to_list(entry, buf_temp);
+
+			buf_temp = proc_buf->buf_cmd;
+			if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp))
+				dci_add_buffer_to_list(entry, buf_temp);
+
+			buf_temp = proc_buf->buf_curr;
+			if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp)) {
+				dci_add_buffer_to_list(entry, buf_temp);
+				proc_buf->buf_curr = NULL;
+			}
+			mutex_unlock(&proc_buf->buf_mutex);
+		}
+		if (!list_empty(&entry->list_write_buf) && !entry->in_service) {
+			mutex_lock(&entry->write_buf_mutex);
+			entry->in_service = 1;
+			mutex_unlock(&entry->write_buf_mutex);
+			diag_update_sleeping_process(entry->client->tgid,
+						     DCI_DATA_TYPE);
+		}
+	}
+	mutex_unlock(&driver->dci_mutex);
+	dci_timer_in_progress = 0;
+}
+
+static int diag_process_single_dci_pkt(unsigned char *buf, int len,
+				       int data_source, int token)
+{
+	uint8_t cmd_code = 0;
+
+	if (!buf || len < 0) {
+		pr_err("diag: Invalid input in %s, buf: %pK, len: %d\n",
+			__func__, buf, len);
+		return -EIO;
+	}
+
+	cmd_code = *(uint8_t *)buf;
+
+	switch (cmd_code) {
+	case LOG_CMD_CODE:
+		extract_dci_log(buf, len, data_source, token, NULL);
+		break;
+	case EVENT_CMD_CODE:
+		extract_dci_events(buf, len, data_source, token, NULL);
+		break;
+	case EXT_HDR_CMD_CODE:
+		extract_dci_ext_pkt(buf, len, data_source, token);
+		break;
+	case DCI_PKT_RSP_CODE:
+	case DCI_DELAYED_RSP_CODE:
+		extract_dci_pkt_rsp(buf, len, data_source, token);
+		break;
+	case DCI_CONTROL_PKT_CODE:
+		extract_dci_ctrl_pkt(buf, len, token);
+		break;
+	default:
+		pr_err("diag: Unable to process single DCI packet, cmd_code: %d, data_source: %d",
+			cmd_code, data_source);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Process the data read from apps userspace client */
+void diag_process_apps_dci_read_data(int data_type, void *buf, int recd_bytes)
+{
+	int err = 0;
+
+	if (!buf) {
+		pr_err_ratelimited("diag: In %s, Null buf pointer\n", __func__);
+		return;
+	}
+
+	if (data_type != DATA_TYPE_DCI_LOG && data_type != DATA_TYPE_DCI_EVENT
+						&& data_type != DCI_PKT_TYPE) {
+		pr_err("diag: In %s, unsupported data_type: 0x%x\n",
+				__func__, (unsigned int)data_type);
+		return;
+	}
+
+	err = diag_process_single_dci_pkt(buf, recd_bytes, APPS_DATA,
+					  DCI_LOCAL_PROC);
+	if (err)
+		return;
+
+	/* wake up all sleeping DCI clients which have some data */
+	diag_dci_wakeup_clients();
+	dci_check_drain_timer();
+}
+
+void diag_process_remote_dci_read_data(int index, void *buf, int recd_bytes)
+{
+	int read_bytes = 0, err = 0;
+	uint16_t dci_pkt_len;
+	struct diag_dci_header_t *header = NULL;
+	int header_len = sizeof(struct diag_dci_header_t);
+	int token = BRIDGE_TO_TOKEN(index);
+
+	if (!buf)
+		return;
+
+	diag_dci_record_traffic(recd_bytes, 0, 0, token);
+
+	if (!partial_pkt.processing)
+		goto start;
+
+	if (partial_pkt.remaining > recd_bytes) {
+		if ((partial_pkt.read_len + recd_bytes) >
+							(MAX_DCI_PACKET_SZ)) {
+			pr_err("diag: Invalid length %d, %d received in %s\n",
+			       partial_pkt.read_len, recd_bytes, __func__);
+			goto end;
+		}
+		memcpy(partial_pkt.data + partial_pkt.read_len, buf,
+								recd_bytes);
+		read_bytes += recd_bytes;
+		buf += read_bytes;
+		partial_pkt.read_len += recd_bytes;
+		partial_pkt.remaining -= recd_bytes;
+	} else {
+		if ((partial_pkt.read_len + partial_pkt.remaining) >
+							(MAX_DCI_PACKET_SZ)) {
+			pr_err("diag: Invalid length during partial read %d, %d received in %s\n",
+			       partial_pkt.read_len,
+			       partial_pkt.remaining, __func__);
+			goto end;
+		}
+		memcpy(partial_pkt.data + partial_pkt.read_len, buf,
+						partial_pkt.remaining);
+		read_bytes += partial_pkt.remaining;
+		buf += read_bytes;
+		partial_pkt.read_len += partial_pkt.remaining;
+		partial_pkt.remaining = 0;
+	}
+
+	if (partial_pkt.remaining == 0) {
+		/*
+		 * Retrieve from the DCI control packet after the header = start
+		 * (1 byte) + version (1 byte) + length (2 bytes)
+		 */
+		diag_process_single_dci_pkt(partial_pkt.data + 4,
+				partial_pkt.read_len - header_len,
+				DCI_REMOTE_DATA, token);
+		partial_pkt.read_len = 0;
+		partial_pkt.total_len = 0;
+		partial_pkt.processing = 0;
+		goto start;
+	}
+	goto end;
+
+start:
+	while (read_bytes < recd_bytes) {
+		header = (struct diag_dci_header_t *)buf;
+		dci_pkt_len = header->length;
+
+		if (header->cmd_code != DCI_CONTROL_PKT_CODE &&
+			driver->num_dci_client == 0) {
+			read_bytes += header_len + dci_pkt_len;
+			buf += header_len + dci_pkt_len;
+			continue;
+		}
+
+		if (dci_pkt_len + header_len > MAX_DCI_PACKET_SZ) {
+			pr_err("diag: Invalid length in the dci packet field %d\n",
+								dci_pkt_len);
+			break;
+		}
+
+		if ((dci_pkt_len + header_len) > (recd_bytes - read_bytes)) {
+			partial_pkt.read_len = recd_bytes - read_bytes;
+			partial_pkt.total_len = dci_pkt_len + header_len;
+			partial_pkt.remaining = partial_pkt.total_len -
+						partial_pkt.read_len;
+			partial_pkt.processing = 1;
+			memcpy(partial_pkt.data, buf, partial_pkt.read_len);
+			break;
+		}
+		/*
+		 * Retrieve from the DCI control packet after the header = start
+		 * (1 byte) + version (1 byte) + length (2 bytes)
+		 */
+		err = diag_process_single_dci_pkt(buf + 4, dci_pkt_len,
+						 DCI_REMOTE_DATA, DCI_MDM_PROC);
+		if (err)
+			break;
+		read_bytes += header_len + dci_pkt_len;
+		buf += header_len + dci_pkt_len; /* advance to next DCI pkt */
+	}
+end:
+	if (err)
+		return;
+	/* wake up all sleeping DCI clients which have some data */
+	diag_dci_wakeup_clients();
+	dci_check_drain_timer();
+	return;
+}
+
+/* Process the data read from the peripheral dci channels */
+void diag_dci_process_peripheral_data(struct diagfwd_info *p_info, void *buf,
+				      int recd_bytes)
+{
+	int read_bytes = 0, err = 0;
+	uint16_t dci_pkt_len;
+	struct diag_dci_pkt_header_t *header = NULL;
+	uint8_t recv_pkt_cmd_code;
+
+	if (!buf || !p_info)
+		return;
+
+	/*
+	 * Release wakeup source when there are no more clients to
+	 * process DCI data
+	 */
+	if (driver->num_dci_client == 0) {
+		diag_ws_reset(DIAG_WS_DCI);
+		return;
+	}
+
+	diag_dci_record_traffic(recd_bytes, p_info->type, p_info->peripheral,
+				DCI_LOCAL_PROC);
+	while (read_bytes < recd_bytes) {
+		header = (struct diag_dci_pkt_header_t *)buf;
+		recv_pkt_cmd_code = header->pkt_code;
+		dci_pkt_len = header->len;
+
+		/*
+		 * Check if the length of the current packet is lesser than the
+		 * remaining bytes in the received buffer. This includes space
+		 * for the Start byte (1), Version byte (1), length bytes (2)
+		 * and End byte (1)
+		 */
+		if ((dci_pkt_len + 5) > (recd_bytes - read_bytes)) {
+			pr_err("diag: Invalid length in %s, len: %d, dci_pkt_len: %d",
+				__func__, recd_bytes, dci_pkt_len);
+			diag_ws_release();
+			return;
+		}
+		/*
+		 * Retrieve from the DCI control packet after the header = start
+		 * (1 byte) + version (1 byte) + length (2 bytes)
+		 */
+		err = diag_process_single_dci_pkt(buf + 4, dci_pkt_len,
+						  (int)p_info->peripheral,
+						  DCI_LOCAL_PROC);
+		if (err) {
+			diag_ws_release();
+			break;
+		}
+		read_bytes += 5 + dci_pkt_len;
+		buf += 5 + dci_pkt_len; /* advance to next DCI pkt */
+	}
+
+	if (err)
+		return;
+	/* wake up all sleeping DCI clients which have some data */
+	diag_dci_wakeup_clients();
+	dci_check_drain_timer();
+	return;
+}
+
+int diag_dci_query_log_mask(struct diag_dci_client_tbl *entry,
+			    uint16_t log_code)
+{
+	uint16_t item_num;
+	uint8_t equip_id, *log_mask_ptr, byte_mask;
+	int byte_index, offset;
+
+	if (!entry) {
+		pr_err("diag: In %s, invalid client entry\n", __func__);
+		return 0;
+	}
+
+	equip_id = LOG_GET_EQUIP_ID(log_code);
+	item_num = LOG_GET_ITEM_NUM(log_code);
+	byte_index = item_num/8 + 2;
+	byte_mask = 0x01 << (item_num % 8);
+	offset = equip_id * 514;
+
+	if (offset + byte_index >= DCI_LOG_MASK_SIZE) {
+		pr_err("diag: In %s, invalid offset: %d, log_code: %d, byte_index: %d\n",
+				__func__, offset, log_code, byte_index);
+		return 0;
+	}
+
+	log_mask_ptr = entry->dci_log_mask;
+	log_mask_ptr = log_mask_ptr + offset + byte_index;
+	return ((*log_mask_ptr & byte_mask) == byte_mask) ? 1 : 0;
+
+}
+
+int diag_dci_query_event_mask(struct diag_dci_client_tbl *entry,
+			      uint16_t event_id)
+{
+	uint8_t *event_mask_ptr, byte_mask;
+	int byte_index, bit_index;
+
+	if (!entry) {
+		pr_err("diag: In %s, invalid client entry\n", __func__);
+		return 0;
+	}
+
+	byte_index = event_id/8;
+	bit_index = event_id % 8;
+	byte_mask = 0x1 << bit_index;
+
+	if (byte_index >= DCI_EVENT_MASK_SIZE) {
+		pr_err("diag: In %s, invalid, event_id: %d, byte_index: %d\n",
+				__func__, event_id, byte_index);
+		return 0;
+	}
+
+	event_mask_ptr = entry->dci_event_mask;
+	event_mask_ptr = event_mask_ptr + byte_index;
+	return ((*event_mask_ptr & byte_mask) == byte_mask) ? 1 : 0;
+}
+
+static int diag_dci_filter_commands(struct diag_pkt_header_t *header)
+{
+	if (!header)
+		return -ENOMEM;
+
+	switch (header->cmd_code) {
+	case 0x7d: /* Msg Mask Configuration */
+	case 0x73: /* Log Mask Configuration */
+	case 0x81: /* Event Mask Configuration */
+	case 0x82: /* Event Mask Change */
+	case 0x60: /* Event Mask Toggle */
+		return 1;
+	}
+
+	if (header->cmd_code == 0x4b && header->subsys_id == 0x12) {
+		switch (header->subsys_cmd_code) {
+		case 0x60: /* Extended Event Mask Config */
+		case 0x61: /* Extended Msg Mask Config */
+		case 0x62: /* Extended Log Mask Config */
+		case 0x20C: /* Set current Preset ID */
+		case 0x20D: /* Get current Preset ID */
+		case 0x218: /* HDLC Disabled Command */
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+static struct dci_pkt_req_entry_t *diag_register_dci_transaction(int uid,
+								 int client_id)
+{
+	struct dci_pkt_req_entry_t *entry = NULL;
+	entry = kzalloc(sizeof(struct dci_pkt_req_entry_t), GFP_KERNEL);
+	if (!entry)
+		return NULL;
+
+	driver->dci_tag++;
+	entry->client_id = client_id;
+	entry->uid = uid;
+	entry->tag = driver->dci_tag;
+	pr_debug("diag: Registering DCI cmd req, client_id: %d, uid: %d, tag:%d\n",
+				entry->client_id, entry->uid, entry->tag);
+	list_add_tail(&entry->track, &driver->dci_req_list);
+
+	return entry;
+}
+
+static struct dci_pkt_req_entry_t *diag_dci_get_request_entry(int tag)
+{
+	struct list_head *start, *temp;
+	struct dci_pkt_req_entry_t *entry = NULL;
+	list_for_each_safe(start, temp, &driver->dci_req_list) {
+		entry = list_entry(start, struct dci_pkt_req_entry_t, track);
+		if (entry->tag == tag)
+			return entry;
+	}
+	return NULL;
+}
+
+static int diag_dci_remove_req_entry(unsigned char *buf, int len,
+				     struct dci_pkt_req_entry_t *entry)
+{
+	uint16_t rsp_count = 0, delayed_rsp_id = 0;
+	if (!buf || len <= 0 || !entry) {
+		pr_err("diag: In %s, invalid input buf: %pK, len: %d, entry: %pK\n",
+			__func__, buf, len, entry);
+		return -EIO;
+	}
+
+	/* It is an immediate response, delete it from the table */
+	if (*buf != 0x80) {
+		list_del(&entry->track);
+		kfree(entry);
+		entry = NULL;
+		return 1;
+	}
+
+	/* It is a delayed response. Check if the length is valid */
+	if (len < MIN_DELAYED_RSP_LEN) {
+		pr_err("diag: Invalid delayed rsp packet length %d\n", len);
+		return -EINVAL;
+	}
+
+	/*
+	 * If the delayed response id field (uint16_t at byte 8) is 0 then
+	 * there is only one response and we can remove the request entry.
+	 */
+	delayed_rsp_id = *(uint16_t *)(buf + 8);
+	if (delayed_rsp_id == 0) {
+		list_del(&entry->track);
+		kfree(entry);
+		entry = NULL;
+		return 1;
+	}
+
+	/*
+	 * Check the response count field (uint16 at byte 10). The request
+	 * entry can be deleted it it is the last response in the sequence.
+	 * It is the last response in the sequence if the response count
+	 * is 1 or if the signed bit gets dropped.
+	 */
+	rsp_count = *(uint16_t *)(buf + 10);
+	if (rsp_count > 0 && rsp_count < 0x1000) {
+		list_del(&entry->track);
+		kfree(entry);
+		entry = NULL;
+		return 1;
+	}
+
+	return 0;
+}
+
+static void dci_process_ctrl_status(unsigned char *buf, int len, int token)
+{
+	struct diag_ctrl_dci_status *header = NULL;
+	unsigned char *temp = buf;
+	uint32_t read_len = 0;
+	uint8_t i;
+	int peripheral_mask, status;
+
+	if (!buf || (len < sizeof(struct diag_ctrl_dci_status))) {
+		pr_err("diag: In %s, invalid buf %pK or length: %d\n",
+		       __func__, buf, len);
+		return;
+	}
+
+	if (!VALID_DCI_TOKEN(token)) {
+		pr_err("diag: In %s, invalid DCI token %d\n", __func__, token);
+		return;
+	}
+
+	header = (struct diag_ctrl_dci_status *)temp;
+	temp += sizeof(struct diag_ctrl_dci_status);
+	read_len += sizeof(struct diag_ctrl_dci_status);
+
+	for (i = 0; i < header->count; i++) {
+		if (read_len > (len - 2)) {
+			pr_err("diag: In %s, Invalid length len: %d\n",
+			       __func__, len);
+			return;
+		}
+
+		switch (*(uint8_t *)temp) {
+		case PERIPHERAL_MODEM:
+			peripheral_mask = DIAG_CON_MPSS;
+			break;
+		case PERIPHERAL_LPASS:
+			peripheral_mask = DIAG_CON_LPASS;
+			break;
+		case PERIPHERAL_WCNSS:
+			peripheral_mask = DIAG_CON_WCNSS;
+			break;
+		case PERIPHERAL_SENSORS:
+			peripheral_mask = DIAG_CON_SENSORS;
+			break;
+		default:
+			pr_err("diag: In %s, unknown peripheral, peripheral: %d\n",
+				__func__, *(uint8_t *)temp);
+			return;
+		}
+		temp += sizeof(uint8_t);
+		read_len += sizeof(uint8_t);
+
+		status = (*(uint8_t *)temp) ? DIAG_STATUS_OPEN :
+							DIAG_STATUS_CLOSED;
+		temp += sizeof(uint8_t);
+		read_len += sizeof(uint8_t);
+		diag_dci_notify_client(peripheral_mask, status, token);
+	}
+}
+
+static void dci_process_ctrl_handshake_pkt(unsigned char *buf, int len,
+					   int token)
+{
+	struct diag_ctrl_dci_handshake_pkt *header = NULL;
+	unsigned char *temp = buf;
+	int err = 0;
+
+	if (!buf || (len < sizeof(struct diag_ctrl_dci_handshake_pkt)))
+		return;
+
+	if (!VALID_DCI_TOKEN(token))
+		return;
+
+	header = (struct diag_ctrl_dci_handshake_pkt *)temp;
+	if (header->magic == DCI_MAGIC) {
+		dci_channel_status[token].open = 1;
+		err = dci_ops_tbl[token].send_log_mask(token);
+		if (err) {
+			pr_err("diag: In %s, unable to send log mask to token: %d, err: %d\n",
+			       __func__, token, err);
+		}
+		err = dci_ops_tbl[token].send_event_mask(token);
+		if (err) {
+			pr_err("diag: In %s, unable to send event mask to token: %d, err: %d\n",
+			       __func__, token, err);
+		}
+	}
+}
+
+void extract_dci_ctrl_pkt(unsigned char *buf, int len, int token)
+{
+	unsigned char *temp = buf;
+	uint32_t ctrl_pkt_id;
+
+	diag_ws_on_read(DIAG_WS_DCI, len);
+	if (!buf) {
+		pr_err("diag: Invalid buffer in %s\n", __func__);
+		goto err;
+	}
+
+	if (len < (sizeof(uint8_t) + sizeof(uint32_t))) {
+		pr_err("diag: In %s, invalid length %d\n", __func__, len);
+		goto err;
+	}
+
+	/* Skip the Control packet command code */
+	temp += sizeof(uint8_t);
+	len -= sizeof(uint8_t);
+	ctrl_pkt_id = *(uint32_t *)temp;
+	switch (ctrl_pkt_id) {
+	case DIAG_CTRL_MSG_DCI_CONNECTION_STATUS:
+		dci_process_ctrl_status(temp, len, token);
+		break;
+	case DIAG_CTRL_MSG_DCI_HANDSHAKE_PKT:
+		dci_process_ctrl_handshake_pkt(temp, len, token);
+		break;
+	default:
+		pr_debug("diag: In %s, unknown control pkt %d\n",
+			 __func__, ctrl_pkt_id);
+		break;
+	}
+
+err:
+	/*
+	 * DCI control packets are not consumed by the clients. Mimic client
+	 * consumption by setting and clearing the wakeup source copy_count
+	 * explicitly.
+	 */
+	diag_ws_on_copy_fail(DIAG_WS_DCI);
+}
+
+void extract_dci_pkt_rsp(unsigned char *buf, int len, int data_source,
+			 int token)
+{
+	int tag;
+	struct diag_dci_client_tbl *entry = NULL;
+	void *temp_buf = NULL;
+	uint8_t dci_cmd_code, cmd_code_len, delete_flag = 0;
+	uint32_t rsp_len = 0;
+	struct diag_dci_buffer_t *rsp_buf = NULL;
+	struct dci_pkt_req_entry_t *req_entry = NULL;
+	unsigned char *temp = buf;
+	int save_req_uid = 0;
+	struct diag_dci_pkt_rsp_header_t pkt_rsp_header;
+
+	if (!buf) {
+		pr_err("diag: Invalid pointer in %s\n", __func__);
+		return;
+	}
+	dci_cmd_code = *(uint8_t *)(temp);
+	if (dci_cmd_code == DCI_PKT_RSP_CODE) {
+		cmd_code_len = sizeof(uint8_t);
+	} else if (dci_cmd_code == DCI_DELAYED_RSP_CODE) {
+		cmd_code_len = sizeof(uint32_t);
+	} else {
+		pr_err("diag: In %s, invalid command code %d\n", __func__,
+								dci_cmd_code);
+		return;
+	}
+	temp += cmd_code_len;
+	tag = *(int *)temp;
+	temp += sizeof(int);
+
+	/*
+	 * The size of the response is (total length) - (length of the command
+	 * code, the tag (int)
+	 */
+	rsp_len = len - (cmd_code_len + sizeof(int));
+	if ((rsp_len == 0) || (rsp_len > (len - 5))) {
+		pr_err("diag: Invalid length in %s, len: %d, rsp_len: %d",
+						__func__, len, rsp_len);
+		return;
+	}
+
+	mutex_lock(&driver->dci_mutex);
+	req_entry = diag_dci_get_request_entry(tag);
+	if (!req_entry) {
+		pr_err_ratelimited("diag: No matching client for DCI data\n");
+		mutex_unlock(&driver->dci_mutex);
+		return;
+	}
+
+	entry = diag_dci_get_client_entry(req_entry->client_id);
+	if (!entry) {
+		pr_err("diag: In %s, couldn't find client entry, id:%d\n",
+						__func__, req_entry->client_id);
+		mutex_unlock(&driver->dci_mutex);
+		return;
+	}
+
+	save_req_uid = req_entry->uid;
+	/* Remove the headers and send only the response to this function */
+	delete_flag = diag_dci_remove_req_entry(temp, rsp_len, req_entry);
+	if (delete_flag < 0) {
+		mutex_unlock(&driver->dci_mutex);
+		return;
+	}
+
+	mutex_lock(&entry->buffers[data_source].buf_mutex);
+	rsp_buf = entry->buffers[data_source].buf_cmd;
+
+	mutex_lock(&rsp_buf->data_mutex);
+	/*
+	 * Check if we can fit the data in the rsp buffer. The total length of
+	 * the rsp is the rsp length (write_len) + DCI_PKT_RSP_TYPE header (int)
+	 * + field for length (int) + delete_flag (uint8_t)
+	 */
+	if ((rsp_buf->data_len + 9 + rsp_len) > rsp_buf->capacity) {
+		pr_alert("diag: create capacity for pkt rsp\n");
+		rsp_buf->capacity += 9 + rsp_len;
+		temp_buf = krealloc(rsp_buf->data, rsp_buf->capacity,
+				    GFP_KERNEL);
+		if (!temp_buf) {
+			pr_err("diag: DCI realloc failed\n");
+			mutex_unlock(&rsp_buf->data_mutex);
+			mutex_unlock(&entry->buffers[data_source].buf_mutex);
+			mutex_unlock(&driver->dci_mutex);
+			return;
+		} else {
+			rsp_buf->data = temp_buf;
+		}
+	}
+
+	/* Fill in packet response header information */
+	pkt_rsp_header.type = DCI_PKT_RSP_TYPE;
+	/* Packet Length = Response Length + Length of uid field (int) */
+	pkt_rsp_header.length = rsp_len + sizeof(int);
+	pkt_rsp_header.delete_flag = delete_flag;
+	pkt_rsp_header.uid = save_req_uid;
+	memcpy(rsp_buf->data + rsp_buf->data_len, &pkt_rsp_header,
+		sizeof(struct diag_dci_pkt_rsp_header_t));
+	rsp_buf->data_len += sizeof(struct diag_dci_pkt_rsp_header_t);
+	memcpy(rsp_buf->data + rsp_buf->data_len, temp, rsp_len);
+	rsp_buf->data_len += rsp_len;
+	rsp_buf->data_source = data_source;
+
+	mutex_unlock(&rsp_buf->data_mutex);
+
+	/*
+	 * Add directly to the list for writing responses to the
+	 * userspace as these shouldn't be buffered and shouldn't wait
+	 * for log and event buffers to be full
+	 */
+	dci_add_buffer_to_list(entry, rsp_buf);
+	mutex_unlock(&entry->buffers[data_source].buf_mutex);
+	mutex_unlock(&driver->dci_mutex);
+}
+
+static void copy_ext_hdr(struct diag_dci_buffer_t *data_buffer, void *ext_hdr)
+{
+	if (!data_buffer) {
+		pr_err("diag: In %s, data buffer is NULL", __func__);
+		return;
+	}
+
+	*(int *)(data_buffer->data + data_buffer->data_len) =
+			DCI_EXT_HDR_TYPE;
+	data_buffer->data_len += sizeof(int);
+	memcpy(data_buffer->data + data_buffer->data_len, ext_hdr,
+			EXT_HDR_LEN);
+	data_buffer->data_len += EXT_HDR_LEN;
+}
+
+static void copy_dci_event(unsigned char *buf, int len,
+			struct diag_dci_client_tbl *client, int data_source,
+			void *ext_hdr)
+{
+	struct diag_dci_buffer_t *data_buffer = NULL;
+	struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+	int err = 0, total_len = 0;
+
+	if (!buf || !client) {
+		pr_err("diag: Invalid pointers in %s", __func__);
+		return;
+	}
+
+	total_len = sizeof(int) + len;
+	if (ext_hdr)
+		total_len += sizeof(int) + EXT_HDR_LEN;
+
+	proc_buf = &client->buffers[data_source];
+	mutex_lock(&proc_buf->buf_mutex);
+	mutex_lock(&proc_buf->health_mutex);
+	err = diag_dci_get_buffer(client, data_source, total_len);
+	if (err) {
+		if (err == -ENOMEM)
+			proc_buf->health.dropped_events++;
+		else
+			pr_err("diag: In %s, invalid packet\n", __func__);
+		mutex_unlock(&proc_buf->health_mutex);
+		mutex_unlock(&proc_buf->buf_mutex);
+		return;
+	}
+
+	data_buffer = proc_buf->buf_curr;
+
+	proc_buf->health.received_events++;
+	mutex_unlock(&proc_buf->health_mutex);
+	mutex_unlock(&proc_buf->buf_mutex);
+
+	mutex_lock(&data_buffer->data_mutex);
+	if (ext_hdr)
+		copy_ext_hdr(data_buffer, ext_hdr);
+
+	*(int *)(data_buffer->data + data_buffer->data_len) = DCI_EVENT_TYPE;
+	data_buffer->data_len += sizeof(int);
+	memcpy(data_buffer->data + data_buffer->data_len, buf, len);
+	data_buffer->data_len += len;
+	data_buffer->data_source = data_source;
+	mutex_unlock(&data_buffer->data_mutex);
+
+}
+
+void extract_dci_events(unsigned char *buf, int len, int data_source,
+		int token, void *ext_hdr)
+{
+	uint16_t event_id, event_id_packet, length, temp_len;
+	uint8_t payload_len, payload_len_field;
+	uint8_t timestamp[8] = {0}, timestamp_len;
+	unsigned char event_data[MAX_EVENT_SIZE];
+	unsigned int total_event_len;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	if (!buf) {
+		pr_err("diag: In %s buffer is NULL\n", __func__);
+		return;
+	}
+	/*
+	 * 1 byte for event code and 2 bytes for the length field.
+	 * The length field indicates the total length removing the cmd_code
+	 * and the lenght field. The event parsing in that case should happen
+	 * till the end.
+	 */
+	if (len < 3) {
+		pr_err("diag: In %s invalid len: %d\n", __func__, len);
+		return;
+	}
+	length = *(uint16_t *)(buf + 1); /* total length of event series */
+	if ((length == 0) || (len != (length + 3))) {
+		pr_err("diag: Incoming dci event length: %d is invalid\n",
+			length);
+		return;
+	}
+	/*
+	 * Move directly to the start of the event series.
+	 * The event parsing should happen from start of event
+	 * series till the end.
+	 */
+	temp_len = 3;
+	while (temp_len < length) {
+		event_id_packet = *(uint16_t *)(buf + temp_len);
+		event_id = event_id_packet & 0x0FFF; /* extract 12 bits */
+		if (event_id_packet & 0x8000) {
+			/* The packet has the two smallest byte of the
+			 * timestamp
+			 */
+			timestamp_len = 2;
+		} else {
+			/* The packet has the full timestamp. The first event
+			 * will always have full timestamp. Save it in the
+			 * timestamp buffer and use it for subsequent events if
+			 * necessary.
+			 */
+			timestamp_len = 8;
+			if ((temp_len + timestamp_len + 2) <= len)
+				memcpy(timestamp, buf + temp_len + 2,
+					timestamp_len);
+			else {
+				pr_err("diag: Invalid length in %s, len: %d, temp_len: %d",
+						__func__, len, temp_len);
+				return;
+			}
+		}
+		/* 13th and 14th bit represent the payload length */
+		if (((event_id_packet & 0x6000) >> 13) == 3) {
+			payload_len_field = 1;
+			if ((temp_len + timestamp_len + 3) <= len) {
+				payload_len = *(uint8_t *)
+					(buf + temp_len + 2 + timestamp_len);
+			} else {
+				pr_err("diag: Invalid length in %s, len: %d, temp_len: %d",
+						__func__, len, temp_len);
+				return;
+			}
+			if ((payload_len < (MAX_EVENT_SIZE - 13)) &&
+			((temp_len + timestamp_len + payload_len + 3) <= len)) {
+				/*
+				 * Copy the payload length and the payload
+				 * after skipping temp_len bytes for already
+				 * parsed packet, timestamp_len for timestamp
+				 * buffer, 2 bytes for event_id_packet.
+				 */
+				memcpy(event_data + 12, buf + temp_len + 2 +
+							timestamp_len, 1);
+				memcpy(event_data + 13, buf + temp_len + 2 +
+					timestamp_len + 1, payload_len);
+			} else {
+				pr_err("diag: event > %d, payload_len = %d, temp_len = %d\n",
+				(MAX_EVENT_SIZE - 13), payload_len, temp_len);
+				return;
+			}
+		} else {
+			payload_len_field = 0;
+			payload_len = (event_id_packet & 0x6000) >> 13;
+			/*
+			 * Copy the payload after skipping temp_len bytes
+			 * for already parsed packet, timestamp_len for
+			 * timestamp buffer, 2 bytes for event_id_packet.
+			 */
+			if ((payload_len < (MAX_EVENT_SIZE - 12)) &&
+			((temp_len + timestamp_len + payload_len + 2) <= len))
+				memcpy(event_data + 12, buf + temp_len + 2 +
+						timestamp_len, payload_len);
+			else {
+				pr_err("diag: event > %d, payload_len = %d, temp_len = %d\n",
+				(MAX_EVENT_SIZE - 12), payload_len, temp_len);
+				return;
+			}
+		}
+
+		/* Before copying the data to userspace, check if we are still
+		 * within the buffer limit. This is an error case, don't count
+		 * it towards the health statistics.
+		 *
+		 * Here, the offset of 2 bytes(uint16_t) is for the
+		 * event_id_packet length
+		 */
+		temp_len += sizeof(uint16_t) + timestamp_len +
+						payload_len_field + payload_len;
+		if (temp_len > len) {
+			pr_err("diag: Invalid length in %s, len: %d, read: %d",
+						__func__, len, temp_len);
+			return;
+		}
+
+		/* 2 bytes for the event id & timestamp len is hard coded to 8,
+		   as individual events have full timestamp */
+		*(uint16_t *)(event_data) = 10 +
+					payload_len_field + payload_len;
+		*(uint16_t *)(event_data + 2) = event_id_packet & 0x7FFF;
+		memcpy(event_data + 4, timestamp, 8);
+		/* 2 bytes for the event length field which is added to
+		   the event data */
+		total_event_len = 2 + 10 + payload_len_field + payload_len;
+		/* parse through event mask tbl of each client and check mask */
+		mutex_lock(&driver->dci_mutex);
+		list_for_each_safe(start, temp, &driver->dci_client_list) {
+			entry = list_entry(start, struct diag_dci_client_tbl,
+									track);
+			if (entry->client_info.token != token)
+				continue;
+			if (diag_dci_query_event_mask(entry, event_id)) {
+				/* copy to client buffer */
+				copy_dci_event(event_data, total_event_len,
+					       entry, data_source, ext_hdr);
+			}
+		}
+		mutex_unlock(&driver->dci_mutex);
+	}
+}
+
+static void copy_dci_log(unsigned char *buf, int len,
+			 struct diag_dci_client_tbl *client, int data_source,
+			 void *ext_hdr)
+{
+	uint16_t log_length = 0;
+	struct diag_dci_buffer_t *data_buffer = NULL;
+	struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+	int err = 0, total_len = 0;
+
+	if (!buf || !client) {
+		pr_err("diag: Invalid pointers in %s", __func__);
+		return;
+	}
+
+	log_length = *(uint16_t *)(buf + 2);
+	if (log_length > USHRT_MAX - 4) {
+		pr_err("diag: Integer overflow in %s, log_len: %d",
+				__func__, log_length);
+		return;
+	}
+	total_len = sizeof(int) + log_length;
+	if (ext_hdr)
+		total_len += sizeof(int) + EXT_HDR_LEN;
+
+	/* Check if we are within the len. The check should include the
+	 * first 4 bytes for the Log code(2) and the length bytes (2)
+	 */
+	if ((log_length + sizeof(uint16_t) + 2) > len) {
+		pr_err("diag: Invalid length in %s, log_len: %d, len: %d",
+						__func__, log_length, len);
+		return;
+	}
+
+	proc_buf = &client->buffers[data_source];
+	mutex_lock(&proc_buf->buf_mutex);
+	mutex_lock(&proc_buf->health_mutex);
+	err = diag_dci_get_buffer(client, data_source, total_len);
+	if (err) {
+		if (err == -ENOMEM)
+			proc_buf->health.dropped_logs++;
+		else
+			pr_err("diag: In %s, invalid packet\n", __func__);
+		mutex_unlock(&proc_buf->health_mutex);
+		mutex_unlock(&proc_buf->buf_mutex);
+		return;
+	}
+
+	data_buffer = proc_buf->buf_curr;
+	proc_buf->health.received_logs++;
+	mutex_unlock(&proc_buf->health_mutex);
+	mutex_unlock(&proc_buf->buf_mutex);
+
+	mutex_lock(&data_buffer->data_mutex);
+	if (!data_buffer->data) {
+		mutex_unlock(&data_buffer->data_mutex);
+		return;
+	}
+	if (ext_hdr)
+		copy_ext_hdr(data_buffer, ext_hdr);
+
+	*(int *)(data_buffer->data + data_buffer->data_len) = DCI_LOG_TYPE;
+	data_buffer->data_len += sizeof(int);
+	memcpy(data_buffer->data + data_buffer->data_len, buf + sizeof(int),
+	       log_length);
+	data_buffer->data_len += log_length;
+	data_buffer->data_source = data_source;
+	mutex_unlock(&data_buffer->data_mutex);
+}
+
+void extract_dci_log(unsigned char *buf, int len, int data_source, int token,
+			void *ext_hdr)
+{
+	uint16_t log_code, read_bytes = 0;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	if (!buf) {
+		pr_err("diag: In %s buffer is NULL\n", __func__);
+		return;
+	}
+	/*
+	 * The first eight bytes for the incoming log packet contains
+	 * Command code (2), the length of the packet (2), the length
+	 * of the log (2) and log code (2)
+	 */
+	if (len < 8) {
+		pr_err("diag: In %s invalid len: %d\n", __func__, len);
+		return;
+	}
+
+	log_code = *(uint16_t *)(buf + 6);
+	read_bytes += sizeof(uint16_t) + 6;
+
+	/* parse through log mask table of each client and check mask */
+	mutex_lock(&driver->dci_mutex);
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->client_info.token != token)
+			continue;
+		if (diag_dci_query_log_mask(entry, log_code)) {
+			pr_debug("\t log code %x needed by client %d",
+				 log_code, entry->client->tgid);
+			/* copy to client buffer */
+			copy_dci_log(buf, len, entry, data_source, ext_hdr);
+		}
+	}
+	mutex_unlock(&driver->dci_mutex);
+}
+
+void extract_dci_ext_pkt(unsigned char *buf, int len, int data_source,
+		int token)
+{
+	uint8_t version, pkt_cmd_code = 0;
+	unsigned char *pkt = NULL;
+
+	if (!buf) {
+		pr_err("diag: In %s buffer is NULL\n", __func__);
+		return;
+	}
+	if (len < (EXT_HDR_LEN + sizeof(uint8_t))) {
+		pr_err("diag: In %s invalid len: %d\n", __func__, len);
+		return;
+	}
+
+	version = *(uint8_t *)buf + 1;
+	if (version < EXT_HDR_VERSION)  {
+		pr_err("diag: %s, Extended header with invalid version: %d\n",
+			__func__, version);
+		return;
+	}
+
+	pkt = buf + EXT_HDR_LEN;
+	pkt_cmd_code = *(uint8_t *)pkt;
+	len -= EXT_HDR_LEN;
+
+	switch (pkt_cmd_code) {
+	case LOG_CMD_CODE:
+		extract_dci_log(pkt, len, data_source, token, buf);
+		break;
+	case EVENT_CMD_CODE:
+		extract_dci_events(pkt, len, data_source, token, buf);
+		break;
+	default:
+		pr_err("diag: %s unsupported cmd_code: %d, data_source: %d\n",
+			__func__, pkt_cmd_code, data_source);
+		return;
+	}
+}
+
+void diag_dci_channel_open_work(struct work_struct *work)
+{
+	int i, j;
+	char dirty_bits[16];
+	uint8_t *client_log_mask_ptr;
+	uint8_t *log_mask_ptr;
+	int ret;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	/* Update apps and peripheral(s) with the dci log and event masks */
+	memset(dirty_bits, 0, 16 * sizeof(uint8_t));
+
+	/*
+	 * From each log entry used by each client, determine
+	 * which log entries in the cumulative logs that need
+	 * to be updated on the peripheral.
+	 */
+	mutex_lock(&driver->dci_mutex);
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->client_info.token != DCI_LOCAL_PROC)
+			continue;
+		client_log_mask_ptr = entry->dci_log_mask;
+		for (j = 0; j < 16; j++) {
+			if (*(client_log_mask_ptr+1))
+				dirty_bits[j] = 1;
+			client_log_mask_ptr += 514;
+		}
+	}
+	mutex_unlock(&driver->dci_mutex);
+
+	mutex_lock(&dci_log_mask_mutex);
+	/* Update the appropriate dirty bits in the cumulative mask */
+	log_mask_ptr = dci_ops_tbl[DCI_LOCAL_PROC].log_mask_composite;
+	for (i = 0; i < 16; i++) {
+		if (dirty_bits[i])
+			*(log_mask_ptr+1) = dirty_bits[i];
+
+		log_mask_ptr += 514;
+	}
+	mutex_unlock(&dci_log_mask_mutex);
+
+	/* Send updated mask to userspace clients */
+	diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
+	/* Send updated log mask to peripherals */
+	ret = dci_ops_tbl[DCI_LOCAL_PROC].send_log_mask(DCI_LOCAL_PROC);
+
+	/* Send updated event mask to userspace clients */
+	diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
+	/* Send updated event mask to peripheral */
+	ret = dci_ops_tbl[DCI_LOCAL_PROC].send_event_mask(DCI_LOCAL_PROC);
+}
+
+void diag_dci_notify_client(int peripheral_mask, int data, int proc)
+{
+	int stat = 0;
+	struct siginfo info;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+	struct pid *pid_struct = NULL;
+	struct task_struct *dci_task = NULL;
+
+	memset(&info, 0, sizeof(struct siginfo));
+	info.si_code = SI_QUEUE;
+	info.si_int = (peripheral_mask | data);
+	if (data == DIAG_STATUS_OPEN)
+		dci_ops_tbl[proc].peripheral_status |= peripheral_mask;
+	else
+		dci_ops_tbl[proc].peripheral_status &= ~peripheral_mask;
+
+	/* Notify the DCI process that the peripheral DCI Channel is up */
+	mutex_lock(&driver->dci_mutex);
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->client_info.token != proc)
+			continue;
+		if (entry->client_info.notification_list & peripheral_mask) {
+			info.si_signo = entry->client_info.signal_type;
+			pid_struct = find_get_pid(entry->tgid);
+			if (pid_struct) {
+				dci_task = get_pid_task(pid_struct,
+						PIDTYPE_PID);
+				if (!dci_task) {
+					DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+						"diag: dci client with pid = %d Exited..\n",
+						entry->tgid);
+					mutex_unlock(&driver->dci_mutex);
+					return;
+				}
+				if (entry->client &&
+					entry->tgid == dci_task->tgid) {
+					DIAG_LOG(DIAG_DEBUG_DCI,
+						"entry tgid = %d, dci client tgid = %d\n",
+						entry->tgid, dci_task->tgid);
+					stat = send_sig_info(
+						entry->client_info.signal_type,
+						&info, dci_task);
+					if (stat)
+						pr_err("diag: Err sending dci signal to client, signal data: 0x%x, stat: %d\n",
+							info.si_int, stat);
+				} else
+					pr_err("diag: client data is corrupted, signal data: 0x%x, stat: %d\n",
+						info.si_int, stat);
+			}
+		}
+	}
+	mutex_unlock(&driver->dci_mutex);
+}
+
+static int diag_send_dci_pkt(struct diag_cmd_reg_t *entry,
+			     unsigned char *buf, int len, int tag)
+{
+	int i, status = DIAG_DCI_NO_ERROR;
+	uint32_t write_len = 0;
+	struct diag_dci_pkt_header_t header;
+
+	if (!entry)
+		return -EIO;
+
+	if (len < 1 || len > DIAG_MAX_REQ_SIZE) {
+		pr_err("diag: dci: In %s, invalid length %d, max_length: %d\n",
+		       __func__, len, (int)(DCI_REQ_BUF_SIZE - sizeof(header)));
+		return -EIO;
+	}
+
+	if ((len + sizeof(header) + sizeof(uint8_t)) > DCI_BUF_SIZE) {
+		pr_err("diag: dci: In %s, invalid length %d for apps_dci_buf, max_length: %d\n",
+		       __func__, len, DIAG_MAX_REQ_SIZE);
+		return -EIO;
+	}
+
+	mutex_lock(&driver->dci_mutex);
+	/* prepare DCI packet */
+	header.start = CONTROL_CHAR;
+	header.version = 1;
+	header.len = len + sizeof(int) + sizeof(uint8_t);
+	header.pkt_code = DCI_PKT_RSP_CODE;
+	header.tag = tag;
+	memcpy(driver->apps_dci_buf, &header, sizeof(header));
+	write_len += sizeof(header);
+	memcpy(driver->apps_dci_buf + write_len , buf, len);
+	write_len += len;
+	*(uint8_t *)(driver->apps_dci_buf + write_len) = CONTROL_CHAR;
+	write_len += sizeof(uint8_t);
+
+	/* This command is registered locally on the Apps */
+	if (entry->proc == APPS_DATA) {
+		diag_update_pkt_buffer(driver->apps_dci_buf, write_len,
+				       DCI_PKT_TYPE);
+		diag_update_sleeping_process(entry->pid, DCI_PKT_TYPE);
+		mutex_unlock(&driver->dci_mutex);
+		return DIAG_DCI_NO_ERROR;
+	}
+
+	for (i = 0; i < NUM_PERIPHERALS; i++)
+		if (entry->proc == i) {
+			status = 1;
+			break;
+		}
+
+	if (status) {
+		status = diag_dci_write_proc(entry->proc,
+					     DIAG_DATA_TYPE,
+					     driver->apps_dci_buf,
+					     write_len);
+	} else {
+		pr_err("diag: Cannot send packet to peripheral %d",
+		       entry->proc);
+		status = DIAG_DCI_SEND_DATA_FAIL;
+	}
+	mutex_unlock(&driver->dci_mutex);
+	return status;
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+unsigned char *dci_get_buffer_from_bridge(int token)
+{
+	uint8_t retries = 0, max_retries = 3;
+	unsigned char *buf = NULL;
+
+	do {
+		buf = diagmem_alloc(driver, DIAG_MDM_BUF_SIZE,
+				    dci_ops_tbl[token].mempool);
+		if (!buf) {
+			usleep_range(5000, 5100);
+			retries++;
+		} else
+			break;
+	} while (retries < max_retries);
+
+	return buf;
+}
+
+int diag_dci_write_bridge(int token, unsigned char *buf, int len)
+{
+	return diagfwd_bridge_write(TOKEN_TO_BRIDGE(token), buf, len);
+}
+
+int diag_dci_write_done_bridge(int index, unsigned char *buf, int len)
+{
+	int token = BRIDGE_TO_TOKEN(index);
+	if (!VALID_DCI_TOKEN(token)) {
+		pr_err("diag: Invalid DCI token %d in %s\n", token, __func__);
+		return -EINVAL;
+	}
+	diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static int diag_send_dci_pkt_remote(unsigned char *data, int len, int tag,
+				    int token)
+{
+	unsigned char *buf = NULL;
+	struct diag_dci_header_t dci_header;
+	int dci_header_size = sizeof(struct diag_dci_header_t);
+	int ret = DIAG_DCI_NO_ERROR;
+	uint32_t write_len = 0;
+
+	if (!data)
+		return -EIO;
+
+	buf = dci_get_buffer_from_bridge(token);
+	if (!buf) {
+		pr_err("diag: In %s, unable to get dci buffers to write data\n",
+			__func__);
+		return -EAGAIN;
+	}
+
+	dci_header.start = CONTROL_CHAR;
+	dci_header.version = 1;
+	/*
+	 * The Length of the DCI packet = length of the command + tag (int) +
+	 * the command code size (uint8_t)
+	 */
+	dci_header.length = len + sizeof(int) + sizeof(uint8_t);
+	dci_header.cmd_code = DCI_PKT_RSP_CODE;
+
+	memcpy(buf + write_len, &dci_header, dci_header_size);
+	write_len += dci_header_size;
+	*(int *)(buf + write_len) = tag;
+	write_len += sizeof(int);
+	memcpy(buf + write_len, data, len);
+	write_len += len;
+	*(buf + write_len) = CONTROL_CHAR; /* End Terminator */
+	write_len += sizeof(uint8_t);
+
+	ret = diag_dci_write_bridge(token, buf, write_len);
+	if (ret) {
+		pr_err("diag: error writing dci pkt to remote proc, token: %d, err: %d\n",
+			token, ret);
+		diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+	} else {
+		ret = DIAG_DCI_NO_ERROR;
+	}
+
+	return ret;
+}
+#else
+static int diag_send_dci_pkt_remote(unsigned char *data, int len, int tag,
+				    int token)
+{
+	return DIAG_DCI_NO_ERROR;
+}
+#endif
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+int diag_dci_send_handshake_pkt(int index)
+{
+	int err = 0;
+	int token = BRIDGE_TO_TOKEN(index);
+	int write_len = 0;
+	struct diag_ctrl_dci_handshake_pkt ctrl_pkt;
+	unsigned char *buf = NULL;
+	struct diag_dci_header_t dci_header;
+
+	if (!VALID_DCI_TOKEN(token)) {
+		pr_err("diag: In %s, invalid DCI token %d\n", __func__, token);
+		return -EINVAL;
+	}
+
+	buf = dci_get_buffer_from_bridge(token);
+	if (!buf) {
+		pr_err("diag: In %s, unable to get dci buffers to write data\n",
+			__func__);
+		return -EAGAIN;
+	}
+
+	dci_header.start = CONTROL_CHAR;
+	dci_header.version = 1;
+	/* Include the cmd code (uint8_t) in the length */
+	dci_header.length = sizeof(ctrl_pkt) + sizeof(uint8_t);
+	dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
+	memcpy(buf, &dci_header, sizeof(dci_header));
+	write_len += sizeof(dci_header);
+
+	ctrl_pkt.ctrl_pkt_id = DIAG_CTRL_MSG_DCI_HANDSHAKE_PKT;
+	/*
+	 *  The control packet data length accounts for the version (uint32_t)
+	 *  of the packet and the magic number (uint32_t).
+	 */
+	ctrl_pkt.ctrl_pkt_data_len = 2 * sizeof(uint32_t);
+	ctrl_pkt.version = 1;
+	ctrl_pkt.magic = DCI_MAGIC;
+	memcpy(buf + write_len, &ctrl_pkt, sizeof(ctrl_pkt));
+	write_len += sizeof(ctrl_pkt);
+
+	*(uint8_t *)(buf + write_len) = CONTROL_CHAR;
+	write_len += sizeof(uint8_t);
+
+	err = diag_dci_write_bridge(token, buf, write_len);
+	if (err) {
+		pr_err("diag: error writing ack packet to remote proc, token: %d, err: %d\n",
+		       token, err);
+		diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+		return err;
+	}
+
+	mod_timer(&(dci_channel_status[token].wait_time),
+		  jiffies + msecs_to_jiffies(DCI_HANDSHAKE_WAIT_TIME));
+
+	return 0;
+}
+#else
+int diag_dci_send_handshake_pkt(int index)
+{
+	return 0;
+}
+#endif
+
+static int diag_dci_process_apps_pkt(struct diag_pkt_header_t *pkt_header,
+				     unsigned char *req_buf, int req_len,
+				     int tag)
+{
+	uint8_t cmd_code, subsys_id, i, goto_download = 0;
+	uint8_t header_len = sizeof(struct diag_dci_pkt_header_t);
+	uint16_t ss_cmd_code;
+	uint32_t write_len = 0;
+	unsigned char *dest_buf = driver->apps_dci_buf;
+	unsigned char *payload_ptr = driver->apps_dci_buf + header_len;
+	struct diag_dci_pkt_header_t dci_header;
+
+	if (!pkt_header || !req_buf || req_len <= 0 || tag < 0)
+		return -EIO;
+
+	cmd_code = pkt_header->cmd_code;
+	subsys_id = pkt_header->subsys_id;
+	ss_cmd_code = pkt_header->subsys_cmd_code;
+
+	if (cmd_code == DIAG_CMD_DOWNLOAD) {
+		*payload_ptr = DIAG_CMD_DOWNLOAD;
+		write_len = sizeof(uint8_t);
+		goto_download = 1;
+		goto fill_buffer;
+	} else if (cmd_code == DIAG_CMD_VERSION) {
+		if (chk_polling_response()) {
+			for (i = 0; i < 55; i++, write_len++, payload_ptr++)
+				*(payload_ptr) = 0;
+			goto fill_buffer;
+		}
+	} else if (cmd_code == DIAG_CMD_EXT_BUILD) {
+		if (chk_polling_response()) {
+			*payload_ptr = DIAG_CMD_EXT_BUILD;
+			write_len = sizeof(uint8_t);
+			payload_ptr += sizeof(uint8_t);
+			for (i = 0; i < 8; i++, write_len++, payload_ptr++)
+				*(payload_ptr) = 0;
+			*(int *)(payload_ptr) = chk_config_get_id();
+			write_len += sizeof(int);
+			goto fill_buffer;
+		}
+	} else if (cmd_code == DIAG_CMD_LOG_ON_DMND) {
+		write_len = diag_cmd_log_on_demand(req_buf, req_len,
+						   payload_ptr,
+						   APPS_BUF_SIZE - header_len);
+		goto fill_buffer;
+	} else if (cmd_code != DIAG_CMD_DIAG_SUBSYS) {
+		return DIAG_DCI_TABLE_ERR;
+	}
+
+	if (subsys_id == DIAG_SS_DIAG) {
+		if (ss_cmd_code == DIAG_DIAG_MAX_PKT_SZ) {
+			memcpy(payload_ptr, pkt_header,
+					sizeof(struct diag_pkt_header_t));
+			write_len = sizeof(struct diag_pkt_header_t);
+			*(uint32_t *)(payload_ptr + write_len) =
+							DIAG_MAX_REQ_SIZE;
+			write_len += sizeof(uint32_t);
+		} else if (ss_cmd_code == DIAG_DIAG_STM) {
+			write_len = diag_process_stm_cmd(req_buf, payload_ptr);
+		}
+	} else if (subsys_id == DIAG_SS_PARAMS) {
+		if (ss_cmd_code == DIAG_DIAG_POLL) {
+			if (chk_polling_response()) {
+				memcpy(payload_ptr, pkt_header,
+					sizeof(struct diag_pkt_header_t));
+				write_len = sizeof(struct diag_pkt_header_t);
+				payload_ptr += write_len;
+				for (i = 0; i < 12; i++, write_len++) {
+					*(payload_ptr) = 0;
+					payload_ptr++;
+				}
+			}
+		} else if (ss_cmd_code == DIAG_DEL_RSP_WRAP) {
+			memcpy(payload_ptr, pkt_header,
+					sizeof(struct diag_pkt_header_t));
+			write_len = sizeof(struct diag_pkt_header_t);
+			*(int *)(payload_ptr + write_len) = wrap_enabled;
+			write_len += sizeof(int);
+		} else if (ss_cmd_code == DIAG_DEL_RSP_WRAP_CNT) {
+			wrap_enabled = true;
+			memcpy(payload_ptr, pkt_header,
+					sizeof(struct diag_pkt_header_t));
+			write_len = sizeof(struct diag_pkt_header_t);
+			*(uint16_t *)(payload_ptr + write_len) = wrap_count;
+			write_len += sizeof(uint16_t);
+		} else if (ss_cmd_code == DIAG_EXT_MOBILE_ID) {
+			write_len = diag_cmd_get_mobile_id(req_buf, req_len,
+						   payload_ptr,
+						   APPS_BUF_SIZE - header_len);
+		}
+	}
+
+fill_buffer:
+	if (write_len > 0) {
+		/* Check if we are within the range of the buffer*/
+		if (write_len + header_len > DIAG_MAX_REQ_SIZE) {
+			pr_err("diag: In %s, invalid length %d\n", __func__,
+						write_len + header_len);
+			return -ENOMEM;
+		}
+		dci_header.start = CONTROL_CHAR;
+		dci_header.version = 1;
+		/*
+		 * Length of the rsp pkt = actual data len + pkt rsp code
+		 * (uint8_t) + tag (int)
+		 */
+		dci_header.len = write_len + sizeof(uint8_t) + sizeof(int);
+		dci_header.pkt_code = DCI_PKT_RSP_CODE;
+		dci_header.tag = tag;
+		driver->in_busy_dcipktdata = 1;
+		memcpy(dest_buf, &dci_header, header_len);
+		diag_process_apps_dci_read_data(DCI_PKT_TYPE, dest_buf + 4,
+						dci_header.len);
+		driver->in_busy_dcipktdata = 0;
+
+		if (goto_download) {
+			/*
+			 * Sleep for sometime so that the response reaches the
+			 * client. The value 5000 empirically as an optimum
+			 * time for the response to reach the client.
+			 */
+			usleep_range(5000, 5100);
+			/* call download API */
+			msm_set_restart_mode(RESTART_DLOAD);
+			pr_alert("diag: download mode set, Rebooting SoC..\n");
+			kernel_restart(NULL);
+		}
+		return DIAG_DCI_NO_ERROR;
+	}
+
+	return DIAG_DCI_TABLE_ERR;
+}
+
+static int diag_process_dci_pkt_rsp(unsigned char *buf, int len)
+{
+	int ret = DIAG_DCI_TABLE_ERR;
+	int common_cmd = 0;
+	struct diag_pkt_header_t *header = NULL;
+	unsigned char *temp = buf;
+	unsigned char *req_buf = NULL;
+	uint8_t retry_count = 0, max_retries = 3;
+	uint32_t read_len = 0, req_len = len;
+	struct dci_pkt_req_entry_t *req_entry = NULL;
+	struct diag_dci_client_tbl *dci_entry = NULL;
+	struct dci_pkt_req_t req_hdr;
+	struct diag_cmd_reg_t *reg_item;
+	struct diag_cmd_reg_entry_t reg_entry;
+	struct diag_cmd_reg_entry_t *temp_entry;
+
+	if (!buf)
+		return -EIO;
+
+	if (len <= sizeof(struct dci_pkt_req_t) || len > DCI_REQ_BUF_SIZE) {
+		pr_err("diag: dci: Invalid length %d len in %s", len, __func__);
+		return -EIO;
+	}
+
+	req_hdr = *(struct dci_pkt_req_t *)temp;
+	temp += sizeof(struct dci_pkt_req_t);
+	read_len += sizeof(struct dci_pkt_req_t);
+	req_len -= sizeof(struct dci_pkt_req_t);
+	req_buf = temp; /* Start of the Request */
+	header = (struct diag_pkt_header_t *)temp;
+	temp += sizeof(struct diag_pkt_header_t);
+	read_len += sizeof(struct diag_pkt_header_t);
+	if (read_len >= DCI_REQ_BUF_SIZE) {
+		pr_err("diag: dci: In %s, invalid read_len: %d\n", __func__,
+		       read_len);
+		return -EIO;
+	}
+
+	mutex_lock(&driver->dci_mutex);
+	dci_entry = diag_dci_get_client_entry(req_hdr.client_id);
+	if (!dci_entry) {
+		pr_err("diag: Invalid client %d in %s\n",
+		       req_hdr.client_id, __func__);
+		mutex_unlock(&driver->dci_mutex);
+		return DIAG_DCI_NO_REG;
+	}
+
+	/* Check if the command is allowed on DCI */
+	if (diag_dci_filter_commands(header)) {
+		pr_debug("diag: command not supported %d %d %d",
+			 header->cmd_code, header->subsys_id,
+			 header->subsys_cmd_code);
+		mutex_unlock(&driver->dci_mutex);
+		return DIAG_DCI_SEND_DATA_FAIL;
+	}
+
+	common_cmd = diag_check_common_cmd(header);
+	if (common_cmd < 0) {
+		pr_debug("diag: error in checking common command, %d\n",
+			 common_cmd);
+		mutex_unlock(&driver->dci_mutex);
+		return DIAG_DCI_SEND_DATA_FAIL;
+	}
+
+	/*
+	 * Previous packet is yet to be consumed by the client. Wait
+	 * till the buffer is free.
+	 */
+	while (retry_count < max_retries) {
+		retry_count++;
+		if (driver->in_busy_dcipktdata)
+			usleep_range(10000, 10100);
+		else
+			break;
+	}
+	/* The buffer is still busy */
+	if (driver->in_busy_dcipktdata) {
+		pr_err("diag: In %s, apps dci buffer is still busy. Dropping packet\n",
+								__func__);
+		mutex_unlock(&driver->dci_mutex);
+		return -EAGAIN;
+	}
+
+	/* Register this new DCI packet */
+	req_entry = diag_register_dci_transaction(req_hdr.uid,
+						  req_hdr.client_id);
+	if (!req_entry) {
+		pr_alert("diag: registering new DCI transaction failed\n");
+		mutex_unlock(&driver->dci_mutex);
+		return DIAG_DCI_NO_REG;
+	}
+	mutex_unlock(&driver->dci_mutex);
+
+	/*
+	 * If the client has registered for remote data, route the packet to the
+	 * remote processor
+	 */
+	if (dci_entry->client_info.token > 0) {
+		ret = diag_send_dci_pkt_remote(req_buf, req_len, req_entry->tag,
+					       dci_entry->client_info.token);
+		return ret;
+	}
+
+	/* Check if it is a dedicated Apps command */
+	ret = diag_dci_process_apps_pkt(header, req_buf, req_len,
+					req_entry->tag);
+	if ((ret == DIAG_DCI_NO_ERROR && !common_cmd) || ret < 0)
+		return ret;
+
+	reg_entry.cmd_code = header->cmd_code;
+	reg_entry.subsys_id = header->subsys_id;
+	reg_entry.cmd_code_hi = header->subsys_cmd_code;
+	reg_entry.cmd_code_lo = header->subsys_cmd_code;
+
+	mutex_lock(&driver->cmd_reg_mutex);
+	temp_entry = diag_cmd_search(&reg_entry, ALL_PROC);
+	if (temp_entry) {
+		reg_item = container_of(temp_entry, struct diag_cmd_reg_t,
+								entry);
+		ret = diag_send_dci_pkt(reg_item, req_buf, req_len,
+					req_entry->tag);
+	} else {
+		DIAG_LOG(DIAG_DEBUG_DCI, "Command not found: %02x %02x %02x\n",
+				reg_entry.cmd_code, reg_entry.subsys_id,
+				reg_entry.cmd_code_hi);
+	}
+	mutex_unlock(&driver->cmd_reg_mutex);
+
+	return ret;
+}
+
+int diag_process_dci_transaction(unsigned char *buf, int len)
+{
+	unsigned char *temp = buf;
+	uint16_t log_code, item_num;
+	int ret = -1, found = 0, client_id = 0, client_token = 0;
+	int count, set_mask, num_codes, bit_index, event_id, offset = 0;
+	unsigned int byte_index, read_len = 0;
+	uint8_t equip_id, *log_mask_ptr, *head_log_mask_ptr, byte_mask;
+	uint8_t *event_mask_ptr;
+	struct diag_dci_client_tbl *dci_entry = NULL;
+
+	if (!temp) {
+		pr_err("diag: Invalid buffer in %s\n", __func__);
+		return -ENOMEM;
+	}
+
+	/* This is Pkt request/response transaction */
+	if (*(int *)temp > 0) {
+		return diag_process_dci_pkt_rsp(buf, len);
+	} else if (*(int *)temp == DCI_LOG_TYPE) {
+		/* Minimum length of a log mask config is 12 + 2 bytes for
+		   atleast one log code to be set or reset */
+		if (len < DCI_LOG_CON_MIN_LEN || len > USER_SPACE_DATA) {
+			pr_err("diag: dci: Invalid length in %s\n", __func__);
+			return -EIO;
+		}
+
+		/* Extract each log code and put in client table */
+		temp += sizeof(int);
+		read_len += sizeof(int);
+		client_id = *(int *)temp;
+		temp += sizeof(int);
+		read_len += sizeof(int);
+		set_mask = *(int *)temp;
+		temp += sizeof(int);
+		read_len += sizeof(int);
+		num_codes = *(int *)temp;
+		temp += sizeof(int);
+		read_len += sizeof(int);
+
+		/* find client table entry */
+		mutex_lock(&driver->dci_mutex);
+		dci_entry = diag_dci_get_client_entry(client_id);
+		if (!dci_entry) {
+			pr_err("diag: In %s, invalid client\n", __func__);
+			mutex_unlock(&driver->dci_mutex);
+			return ret;
+		}
+		client_token = dci_entry->client_info.token;
+
+		if (num_codes == 0 || (num_codes >= (USER_SPACE_DATA - 8)/2)) {
+			pr_err("diag: dci: Invalid number of log codes %d\n",
+								num_codes);
+			mutex_unlock(&driver->dci_mutex);
+			return -EIO;
+		}
+
+		head_log_mask_ptr = dci_entry->dci_log_mask;
+		if (!head_log_mask_ptr) {
+			pr_err("diag: dci: Invalid Log mask pointer in %s\n",
+								__func__);
+			mutex_unlock(&driver->dci_mutex);
+			return -ENOMEM;
+		}
+		pr_debug("diag: head of dci log mask %pK\n", head_log_mask_ptr);
+		count = 0; /* iterator for extracting log codes */
+
+		while (count < num_codes) {
+			if (read_len >= USER_SPACE_DATA) {
+				pr_err("diag: dci: Invalid length for log type in %s",
+								__func__);
+				mutex_unlock(&driver->dci_mutex);
+				return -EIO;
+			}
+			log_code = *(uint16_t *)temp;
+			equip_id = LOG_GET_EQUIP_ID(log_code);
+			item_num = LOG_GET_ITEM_NUM(log_code);
+			byte_index = item_num/8 + 2;
+			if (byte_index >= (DCI_MAX_ITEMS_PER_LOG_CODE+2)) {
+				pr_err("diag: dci: Log type, invalid byte index\n");
+				mutex_unlock(&driver->dci_mutex);
+				return ret;
+			}
+			byte_mask = 0x01 << (item_num % 8);
+			/*
+			 * Parse through log mask table and find
+			 * relevant range
+			 */
+			log_mask_ptr = head_log_mask_ptr;
+			found = 0;
+			offset = 0;
+			while (log_mask_ptr && (offset < DCI_LOG_MASK_SIZE)) {
+				if (*log_mask_ptr == equip_id) {
+					found = 1;
+					pr_debug("diag: find equip id = %x at %pK\n",
+						 equip_id, log_mask_ptr);
+					break;
+				} else {
+					pr_debug("diag: did not find equip id = %x at %d\n",
+						 equip_id, *log_mask_ptr);
+					log_mask_ptr += 514;
+					offset += 514;
+				}
+			}
+			if (!found) {
+				pr_err("diag: dci equip id not found\n");
+				mutex_unlock(&driver->dci_mutex);
+				return ret;
+			}
+			*(log_mask_ptr+1) = 1; /* set the dirty byte */
+			log_mask_ptr = log_mask_ptr + byte_index;
+			if (set_mask)
+				*log_mask_ptr |= byte_mask;
+			else
+				*log_mask_ptr &= ~byte_mask;
+			/* add to cumulative mask */
+			update_dci_cumulative_log_mask(
+				offset, byte_index,
+				byte_mask, client_token);
+			temp += 2;
+			read_len += 2;
+			count++;
+			ret = DIAG_DCI_NO_ERROR;
+		}
+		/* send updated mask to userspace clients */
+		if (client_token == DCI_LOCAL_PROC)
+			diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
+		/* send updated mask to peripherals */
+		ret = dci_ops_tbl[client_token].send_log_mask(client_token);
+		mutex_unlock(&driver->dci_mutex);
+	} else if (*(int *)temp == DCI_EVENT_TYPE) {
+		/* Minimum length of a event mask config is 12 + 4 bytes for
+		  atleast one event id to be set or reset. */
+		if (len < DCI_EVENT_CON_MIN_LEN || len > USER_SPACE_DATA) {
+			pr_err("diag: dci: Invalid length in %s\n", __func__);
+			return -EIO;
+		}
+
+		/* Extract each event id and put in client table */
+		temp += sizeof(int);
+		read_len += sizeof(int);
+		client_id = *(int *)temp;
+		temp += sizeof(int);
+		read_len += sizeof(int);
+		set_mask = *(int *)temp;
+		temp += sizeof(int);
+		read_len += sizeof(int);
+		num_codes = *(int *)temp;
+		temp += sizeof(int);
+		read_len += sizeof(int);
+
+		/* find client table entry */
+		mutex_lock(&driver->dci_mutex);
+		dci_entry = diag_dci_get_client_entry(client_id);
+		if (!dci_entry) {
+			pr_err("diag: In %s, invalid client\n", __func__);
+			mutex_unlock(&driver->dci_mutex);
+			return ret;
+		}
+		client_token = dci_entry->client_info.token;
+
+		/* Check for positive number of event ids. Also, the number of
+		   event ids should fit in the buffer along with set_mask and
+		   num_codes which are 4 bytes each */
+		if (num_codes == 0 || (num_codes >= (USER_SPACE_DATA - 8)/2)) {
+			pr_err("diag: dci: Invalid number of event ids %d\n",
+								num_codes);
+			mutex_unlock(&driver->dci_mutex);
+			return -EIO;
+		}
+
+		event_mask_ptr = dci_entry->dci_event_mask;
+		if (!event_mask_ptr) {
+			pr_err("diag: dci: Invalid event mask pointer in %s\n",
+								__func__);
+			mutex_unlock(&driver->dci_mutex);
+			return -ENOMEM;
+		}
+		pr_debug("diag: head of dci event mask %pK\n", event_mask_ptr);
+		count = 0; /* iterator for extracting log codes */
+		while (count < num_codes) {
+			if (read_len >= USER_SPACE_DATA) {
+				pr_err("diag: dci: Invalid length for event type in %s",
+								__func__);
+				mutex_unlock(&driver->dci_mutex);
+				return -EIO;
+			}
+			event_id = *(int *)temp;
+			byte_index = event_id/8;
+			if (byte_index >= DCI_EVENT_MASK_SIZE) {
+				pr_err("diag: dci: Event type, invalid byte index\n");
+				mutex_unlock(&driver->dci_mutex);
+				return ret;
+			}
+			bit_index = event_id % 8;
+			byte_mask = 0x1 << bit_index;
+			/*
+			 * Parse through event mask table and set
+			 * relevant byte & bit combination
+			 */
+			if (set_mask)
+				*(event_mask_ptr + byte_index) |= byte_mask;
+			else
+				*(event_mask_ptr + byte_index) &= ~byte_mask;
+			/* add to cumulative mask */
+			update_dci_cumulative_event_mask(byte_index, byte_mask,
+							 client_token);
+			temp += sizeof(int);
+			read_len += sizeof(int);
+			count++;
+			ret = DIAG_DCI_NO_ERROR;
+		}
+		/* send updated mask to userspace clients */
+		if (dci_entry->client_info.token == DCI_LOCAL_PROC)
+			diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
+		/* send updated mask to peripherals */
+		ret = dci_ops_tbl[client_token].send_event_mask(client_token);
+		mutex_unlock(&driver->dci_mutex);
+	} else {
+		pr_alert("diag: Incorrect DCI transaction\n");
+	}
+	return ret;
+}
+
+
+struct diag_dci_client_tbl *diag_dci_get_client_entry(int client_id)
+{
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->client_info.client_id == client_id)
+			return entry;
+	}
+	return NULL;
+}
+
+struct diag_dci_client_tbl *dci_lookup_client_entry_pid(int tgid)
+{
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+	struct pid *pid_struct = NULL;
+	struct task_struct *task_s = NULL;
+
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		pid_struct = find_get_pid(entry->tgid);
+		if (!pid_struct) {
+			DIAG_LOG(DIAG_DEBUG_DCI,
+				"diag: valid pid doesn't exist for pid = %d\n",
+				entry->tgid);
+			continue;
+		}
+		task_s = get_pid_task(pid_struct, PIDTYPE_PID);
+		if (!task_s) {
+			DIAG_LOG(DIAG_DEBUG_DCI,
+				"diag: valid task doesn't exist for pid = %d\n",
+				entry->tgid);
+			continue;
+		}
+		if (task_s == entry->client)
+			if (entry->client->tgid == tgid)
+				return entry;
+	}
+	return NULL;
+}
+
+void update_dci_cumulative_event_mask(int offset, uint8_t byte_mask, int token)
+{
+	uint8_t *event_mask_ptr, *update_ptr = NULL;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+	bool is_set = false;
+
+	mutex_lock(&dci_event_mask_mutex);
+	update_ptr = dci_ops_tbl[token].event_mask_composite;
+	if (!update_ptr) {
+		mutex_unlock(&dci_event_mask_mutex);
+		return;
+	}
+	update_ptr += offset;
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->client_info.token != token)
+			continue;
+		event_mask_ptr = entry->dci_event_mask;
+		event_mask_ptr += offset;
+		if ((*event_mask_ptr & byte_mask) == byte_mask) {
+			is_set = true;
+			/* break even if one client has the event mask set */
+			break;
+		}
+	}
+	if (is_set == false)
+		*update_ptr &= ~byte_mask;
+	else
+		*update_ptr |= byte_mask;
+	mutex_unlock(&dci_event_mask_mutex);
+}
+
+void diag_dci_invalidate_cumulative_event_mask(int token)
+{
+	int i = 0;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+	uint8_t *event_mask_ptr, *update_ptr = NULL;
+
+	mutex_lock(&dci_event_mask_mutex);
+	update_ptr = dci_ops_tbl[token].event_mask_composite;
+	if (!update_ptr) {
+		mutex_unlock(&dci_event_mask_mutex);
+		return;
+	}
+
+	create_dci_event_mask_tbl(update_ptr);
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->client_info.token != token)
+			continue;
+		event_mask_ptr = entry->dci_event_mask;
+		for (i = 0; i < DCI_EVENT_MASK_SIZE; i++)
+			*(update_ptr+i) |= *(event_mask_ptr+i);
+	}
+	mutex_unlock(&dci_event_mask_mutex);
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+int diag_send_dci_event_mask_remote(int token)
+{
+	unsigned char *buf = NULL;
+	struct diag_dci_header_t dci_header;
+	struct diag_ctrl_event_mask event_mask;
+	int dci_header_size = sizeof(struct diag_dci_header_t);
+	int event_header_size = sizeof(struct diag_ctrl_event_mask);
+	int i, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
+	unsigned char *event_mask_ptr = NULL;
+	uint32_t write_len = 0;
+
+	mutex_lock(&dci_event_mask_mutex);
+	event_mask_ptr = dci_ops_tbl[token].event_mask_composite;
+	if (!event_mask_ptr) {
+		mutex_unlock(&dci_event_mask_mutex);
+		return -EINVAL;
+	}
+	buf = dci_get_buffer_from_bridge(token);
+	if (!buf) {
+		pr_err("diag: In %s, unable to get dci buffers to write data\n",
+			__func__);
+		mutex_unlock(&dci_event_mask_mutex);
+		return -EAGAIN;
+	}
+
+	/* Frame the DCI header */
+	dci_header.start = CONTROL_CHAR;
+	dci_header.version = 1;
+	dci_header.length = event_header_size + DCI_EVENT_MASK_SIZE + 1;
+	dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
+
+	event_mask.cmd_type = DIAG_CTRL_MSG_EVENT_MASK;
+	event_mask.data_len = EVENT_MASK_CTRL_HEADER_LEN + DCI_EVENT_MASK_SIZE;
+	event_mask.stream_id = DCI_MASK_STREAM;
+	event_mask.status = DIAG_CTRL_MASK_VALID;
+	event_mask.event_config = 0; /* event config */
+	event_mask.event_mask_size = DCI_EVENT_MASK_SIZE;
+	for (i = 0; i < DCI_EVENT_MASK_SIZE; i++) {
+		if (event_mask_ptr[i] != 0) {
+			event_mask.event_config = 1;
+			break;
+		}
+	}
+	memcpy(buf + write_len, &dci_header, dci_header_size);
+	write_len += dci_header_size;
+	memcpy(buf + write_len, &event_mask, event_header_size);
+	write_len += event_header_size;
+	memcpy(buf + write_len, event_mask_ptr, DCI_EVENT_MASK_SIZE);
+	write_len += DCI_EVENT_MASK_SIZE;
+	*(buf + write_len) = CONTROL_CHAR; /* End Terminator */
+	write_len += sizeof(uint8_t);
+	err = diag_dci_write_bridge(token, buf, write_len);
+	if (err) {
+		pr_err("diag: error writing event mask to remote proc, token: %d, err: %d\n",
+		       token, err);
+		diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+		ret = err;
+	} else {
+		ret = DIAG_DCI_NO_ERROR;
+	}
+	mutex_unlock(&dci_event_mask_mutex);
+	return ret;
+}
+#endif
+
+int diag_send_dci_event_mask(int token)
+{
+	void *buf = event_mask.update_buf;
+	struct diag_ctrl_event_mask header;
+	int header_size = sizeof(struct diag_ctrl_event_mask);
+	int ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR, i;
+	unsigned char *event_mask_ptr = NULL;
+
+	mutex_lock(&dci_event_mask_mutex);
+	event_mask_ptr = dci_ops_tbl[DCI_LOCAL_PROC].event_mask_composite;
+	if (!event_mask_ptr) {
+		mutex_unlock(&dci_event_mask_mutex);
+		return -EINVAL;
+	}
+
+	mutex_lock(&event_mask.lock);
+	/* send event mask update */
+	header.cmd_type = DIAG_CTRL_MSG_EVENT_MASK;
+	header.data_len = EVENT_MASK_CTRL_HEADER_LEN + DCI_EVENT_MASK_SIZE;
+	header.stream_id = DCI_MASK_STREAM;
+	header.status = DIAG_CTRL_MASK_VALID;
+	header.event_config = 0; /* event config */
+	header.event_mask_size = DCI_EVENT_MASK_SIZE;
+	for (i = 0; i < DCI_EVENT_MASK_SIZE; i++) {
+		if (event_mask_ptr[i] != 0) {
+			header.event_config = 1;
+			break;
+		}
+	}
+	memcpy(buf, &header, header_size);
+	memcpy(buf+header_size, event_mask_ptr, DCI_EVENT_MASK_SIZE);
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		/*
+		 * Don't send to peripheral if its regular channel
+		 * is down. It may also mean that the peripheral doesn't
+		 * support DCI.
+		 */
+		if (check_peripheral_dci_support(i, DCI_LOCAL_PROC)) {
+			err = diag_dci_write_proc(i, DIAG_CNTL_TYPE, buf,
+				  header_size + DCI_EVENT_MASK_SIZE);
+			if (err != DIAG_DCI_NO_ERROR)
+				ret = DIAG_DCI_SEND_DATA_FAIL;
+		}
+	}
+
+	mutex_unlock(&event_mask.lock);
+	mutex_unlock(&dci_event_mask_mutex);
+
+	return ret;
+}
+
+void update_dci_cumulative_log_mask(int offset, unsigned int byte_index,
+						uint8_t byte_mask, int token)
+{
+	uint8_t *log_mask_ptr, *update_ptr = NULL;
+	bool is_set = false;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	mutex_lock(&dci_log_mask_mutex);
+	update_ptr = dci_ops_tbl[token].log_mask_composite;
+	if (!update_ptr) {
+		mutex_unlock(&dci_log_mask_mutex);
+		return;
+	}
+
+	update_ptr += offset;
+	/* update the dirty bit */
+	*(update_ptr+1) = 1;
+	update_ptr = update_ptr + byte_index;
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->client_info.token != token)
+			continue;
+		log_mask_ptr = entry->dci_log_mask;
+		log_mask_ptr = log_mask_ptr + offset + byte_index;
+		if ((*log_mask_ptr & byte_mask) == byte_mask) {
+			is_set = true;
+			/* break even if one client has the log mask set */
+			break;
+		}
+	}
+
+	if (is_set == false)
+		*update_ptr &= ~byte_mask;
+	else
+		*update_ptr |= byte_mask;
+	mutex_unlock(&dci_log_mask_mutex);
+}
+
+void diag_dci_invalidate_cumulative_log_mask(int token)
+{
+	int i = 0;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+	uint8_t *log_mask_ptr, *update_ptr = NULL;
+
+	/* Clear the composite mask and redo all the masks */
+	mutex_lock(&dci_log_mask_mutex);
+	update_ptr = dci_ops_tbl[token].log_mask_composite;
+	if (!update_ptr) {
+		mutex_unlock(&dci_log_mask_mutex);
+		return;
+	}
+
+	create_dci_log_mask_tbl(update_ptr, DCI_LOG_MASK_DIRTY);
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->client_info.token != token)
+			continue;
+		log_mask_ptr = entry->dci_log_mask;
+		for (i = 0; i < DCI_LOG_MASK_SIZE; i++)
+			*(update_ptr+i) |= *(log_mask_ptr+i);
+	}
+	mutex_unlock(&dci_log_mask_mutex);
+}
+
+static int dci_fill_log_mask(unsigned char *dest_ptr, unsigned char *src_ptr)
+{
+	struct diag_ctrl_log_mask header;
+	int header_len = sizeof(struct diag_ctrl_log_mask);
+
+	header.cmd_type = DIAG_CTRL_MSG_LOG_MASK;
+	header.num_items = DCI_MAX_ITEMS_PER_LOG_CODE;
+	header.data_len = 11 + DCI_MAX_ITEMS_PER_LOG_CODE;
+	header.stream_id = DCI_MASK_STREAM;
+	header.status = 3;
+	header.equip_id = *src_ptr;
+	header.log_mask_size = DCI_MAX_ITEMS_PER_LOG_CODE;
+	memcpy(dest_ptr, &header, header_len);
+	memcpy(dest_ptr + header_len, src_ptr + 2, DCI_MAX_ITEMS_PER_LOG_CODE);
+
+	return header_len + DCI_MAX_ITEMS_PER_LOG_CODE;
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+int diag_send_dci_log_mask_remote(int token)
+{
+
+	unsigned char *buf = NULL;
+	struct diag_dci_header_t dci_header;
+	int dci_header_size = sizeof(struct diag_dci_header_t);
+	int log_header_size = sizeof(struct diag_ctrl_log_mask);
+	uint8_t *log_mask_ptr = NULL;
+	int i, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
+	int updated;
+	uint32_t write_len = 0;
+
+	mutex_lock(&dci_log_mask_mutex);
+	log_mask_ptr = dci_ops_tbl[token].log_mask_composite;
+	if (!log_mask_ptr) {
+		mutex_unlock(&dci_log_mask_mutex);
+		return -EINVAL;
+	}
+
+	/* DCI header is common to all equipment IDs */
+	dci_header.start = CONTROL_CHAR;
+	dci_header.version = 1;
+	dci_header.length = log_header_size + DCI_MAX_ITEMS_PER_LOG_CODE + 1;
+	dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
+
+	for (i = 0; i < DCI_MAX_LOG_CODES; i++) {
+		updated = 1;
+		write_len = 0;
+		if (!*(log_mask_ptr + 1)) {
+			log_mask_ptr += 514;
+			continue;
+		}
+
+		buf = dci_get_buffer_from_bridge(token);
+		if (!buf) {
+			pr_err("diag: In %s, unable to get dci buffers to write data\n",
+				__func__);
+			mutex_unlock(&dci_log_mask_mutex);
+			return -EAGAIN;
+		}
+
+		memcpy(buf + write_len, &dci_header, dci_header_size);
+		write_len += dci_header_size;
+		write_len += dci_fill_log_mask(buf + write_len, log_mask_ptr);
+		*(buf + write_len) = CONTROL_CHAR; /* End Terminator */
+		write_len += sizeof(uint8_t);
+		err = diag_dci_write_bridge(token, buf, write_len);
+		if (err) {
+			pr_err("diag: error writing log mask to remote processor, equip_id: %d, token: %d, err: %d\n",
+			       i, token, err);
+			diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+			updated = 0;
+		}
+		if (updated)
+			*(log_mask_ptr + 1) = 0; /* clear dirty byte */
+		log_mask_ptr += 514;
+	}
+	mutex_unlock(&dci_log_mask_mutex);
+	return ret;
+}
+#endif
+
+int diag_send_dci_log_mask(int token)
+{
+	void *buf = log_mask.update_buf;
+	int write_len = 0;
+	uint8_t *log_mask_ptr = NULL;
+	int i, j, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
+	int updated;
+
+
+	mutex_lock(&dci_log_mask_mutex);
+	log_mask_ptr = dci_ops_tbl[DCI_LOCAL_PROC].log_mask_composite;
+	if (!log_mask_ptr) {
+		mutex_unlock(&dci_log_mask_mutex);
+		return -EINVAL;
+	}
+
+	mutex_lock(&log_mask.lock);
+	for (i = 0; i < 16; i++) {
+		updated = 1;
+		/* Dirty bit is set don't update the mask for this equip id */
+		if (!(*(log_mask_ptr + 1))) {
+			log_mask_ptr += 514;
+			continue;
+		}
+		write_len = dci_fill_log_mask(buf, log_mask_ptr);
+		for (j = 0; j < NUM_PERIPHERALS && write_len; j++) {
+			if (check_peripheral_dci_support(j, DCI_LOCAL_PROC)) {
+				err = diag_dci_write_proc(j, DIAG_CNTL_TYPE,
+					buf, write_len);
+				if (err != DIAG_DCI_NO_ERROR) {
+					updated = 0;
+					ret = DIAG_DCI_SEND_DATA_FAIL;
+				}
+			}
+		}
+		if (updated)
+			*(log_mask_ptr+1) = 0; /* clear dirty byte */
+		log_mask_ptr += 514;
+	}
+	mutex_unlock(&log_mask.lock);
+	mutex_unlock(&dci_log_mask_mutex);
+	return ret;
+}
+
+static int diag_dci_init_local(void)
+{
+	struct dci_ops_tbl_t *temp = &dci_ops_tbl[DCI_LOCAL_PROC];
+
+	create_dci_log_mask_tbl(temp->log_mask_composite, DCI_LOG_MASK_CLEAN);
+	create_dci_event_mask_tbl(temp->event_mask_composite);
+	temp->peripheral_status |= DIAG_CON_APSS;
+
+	return 0;
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static void diag_dci_init_handshake_remote(void)
+{
+	int i;
+	struct dci_channel_status_t *temp = NULL;
+
+	for (i = DCI_REMOTE_BASE; i < NUM_DCI_PROC; i++) {
+		temp = &dci_channel_status[i];
+		temp->id = i;
+		setup_timer(&temp->wait_time, dci_chk_handshake, i);
+		INIT_WORK(&temp->handshake_work, dci_handshake_work_fn);
+	}
+}
+
+static int diag_dci_init_remote(void)
+{
+	int i;
+	struct dci_ops_tbl_t *temp = NULL;
+
+	diagmem_init(driver, POOL_TYPE_MDM_DCI_WRITE);
+
+	for (i = DCI_REMOTE_BASE; i < DCI_REMOTE_LAST; i++) {
+		temp = &dci_ops_tbl[i];
+		create_dci_log_mask_tbl(temp->log_mask_composite,
+					DCI_LOG_MASK_CLEAN);
+		create_dci_event_mask_tbl(temp->event_mask_composite);
+	}
+
+	partial_pkt.data = vzalloc(MAX_DCI_PACKET_SZ);
+	if (!partial_pkt.data) {
+		pr_err("diag: Unable to create partial pkt data\n");
+		return -ENOMEM;
+	}
+
+	partial_pkt.total_len = 0;
+	partial_pkt.read_len = 0;
+	partial_pkt.remaining = 0;
+	partial_pkt.processing = 0;
+
+	diag_dci_init_handshake_remote();
+
+	return 0;
+}
+#else
+static int diag_dci_init_remote(void)
+{
+	return 0;
+}
+#endif
+
+static int diag_dci_init_ops_tbl(void)
+{
+	int err = 0;
+
+	err = diag_dci_init_local();
+	if (err)
+		goto err;
+	err = diag_dci_init_remote();
+	if (err)
+		goto err;
+
+	return 0;
+
+err:
+	return -ENOMEM;
+}
+
+int diag_dci_init(void)
+{
+	int ret = 0;
+
+	driver->dci_tag = 0;
+	driver->dci_client_id = 0;
+	driver->num_dci_client = 0;
+	mutex_init(&driver->dci_mutex);
+	mutex_init(&dci_log_mask_mutex);
+	mutex_init(&dci_event_mask_mutex);
+	spin_lock_init(&ws_lock);
+
+	ret = diag_dci_init_ops_tbl();
+	if (ret)
+		goto err;
+
+	if (driver->apps_dci_buf == NULL) {
+		driver->apps_dci_buf = vzalloc(DCI_BUF_SIZE);
+		if (driver->apps_dci_buf == NULL)
+			goto err;
+	}
+	INIT_LIST_HEAD(&driver->dci_client_list);
+	INIT_LIST_HEAD(&driver->dci_req_list);
+
+	driver->diag_dci_wq = create_singlethread_workqueue("diag_dci_wq");
+	if (!driver->diag_dci_wq)
+		goto err;
+
+	INIT_WORK(&dci_data_drain_work, dci_data_drain_work_fn);
+
+	setup_timer(&dci_drain_timer, dci_drain_data, 0);
+	return DIAG_DCI_NO_ERROR;
+err:
+	pr_err("diag: Could not initialize diag DCI buffers");
+	vfree(driver->apps_dci_buf);
+	driver->apps_dci_buf = NULL;
+
+	if (driver->diag_dci_wq)
+		destroy_workqueue(driver->diag_dci_wq);
+	vfree(partial_pkt.data);
+	partial_pkt.data = NULL;
+	mutex_destroy(&driver->dci_mutex);
+	mutex_destroy(&dci_log_mask_mutex);
+	mutex_destroy(&dci_event_mask_mutex);
+	return DIAG_DCI_NO_REG;
+}
+
+void diag_dci_channel_init(void)
+{
+	uint8_t peripheral;
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		diagfwd_open(peripheral, TYPE_DCI);
+		diagfwd_open(peripheral, TYPE_DCI_CMD);
+	}
+}
+
+void diag_dci_exit(void)
+{
+	vfree(partial_pkt.data);
+	partial_pkt.data = NULL;
+	vfree(driver->apps_dci_buf);
+	driver->apps_dci_buf = NULL;
+	mutex_destroy(&driver->dci_mutex);
+	mutex_destroy(&dci_log_mask_mutex);
+	mutex_destroy(&dci_event_mask_mutex);
+	destroy_workqueue(driver->diag_dci_wq);
+}
+
+int diag_dci_clear_log_mask(int client_id)
+{
+	int err = DIAG_DCI_NO_ERROR, token = DCI_LOCAL_PROC;
+	uint8_t *update_ptr;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	entry = diag_dci_get_client_entry(client_id);
+	if (!entry) {
+		pr_err("diag: In %s, invalid client entry\n", __func__);
+		return DIAG_DCI_TABLE_ERR;
+	}
+	token = entry->client_info.token;
+	update_ptr = dci_ops_tbl[token].log_mask_composite;
+
+	create_dci_log_mask_tbl(entry->dci_log_mask, DCI_LOG_MASK_CLEAN);
+	diag_dci_invalidate_cumulative_log_mask(token);
+
+	/*
+	 * Send updated mask to userspace clients only if the client
+	 * is registered on the local processor
+	 */
+	if (token == DCI_LOCAL_PROC)
+		diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
+	/* Send updated mask to peripherals */
+	err = dci_ops_tbl[token].send_log_mask(token);
+	return err;
+}
+
+int diag_dci_clear_event_mask(int client_id)
+{
+	int err = DIAG_DCI_NO_ERROR, token = DCI_LOCAL_PROC;
+	uint8_t *update_ptr;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	entry = diag_dci_get_client_entry(client_id);
+	if (!entry) {
+		pr_err("diag: In %s, invalid client entry\n", __func__);
+		return DIAG_DCI_TABLE_ERR;
+	}
+	token = entry->client_info.token;
+	update_ptr = dci_ops_tbl[token].event_mask_composite;
+
+	create_dci_event_mask_tbl(entry->dci_event_mask);
+	diag_dci_invalidate_cumulative_event_mask(token);
+
+	/*
+	 * Send updated mask to userspace clients only if the client is
+	 * registerted on the local processor
+	 */
+	if (token == DCI_LOCAL_PROC)
+		diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
+	/* Send updated mask to peripherals */
+	err = dci_ops_tbl[token].send_event_mask(token);
+	return err;
+}
+
+uint8_t diag_dci_get_cumulative_real_time(int token)
+{
+	uint8_t real_time = MODE_NONREALTIME;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->real_time == MODE_REALTIME &&
+					entry->client_info.token == token) {
+			real_time = 1;
+			break;
+		}
+	}
+	return real_time;
+}
+
+int diag_dci_set_real_time(struct diag_dci_client_tbl *entry, uint8_t real_time)
+{
+	if (!entry) {
+		pr_err("diag: In %s, invalid client entry\n", __func__);
+		return 0;
+	}
+	entry->real_time = real_time;
+	return 1;
+}
+
+int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry)
+{
+	int i, err = 0;
+	struct diag_dci_client_tbl *new_entry = NULL;
+	struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+
+	if (!reg_entry)
+		return DIAG_DCI_NO_REG;
+	if (!VALID_DCI_TOKEN(reg_entry->token)) {
+		pr_alert("diag: Invalid DCI client token, %d\n",
+						reg_entry->token);
+		return DIAG_DCI_NO_REG;
+	}
+
+	if (driver->dci_state == DIAG_DCI_NO_REG)
+		return DIAG_DCI_NO_REG;
+
+	if (driver->num_dci_client >= MAX_DCI_CLIENTS)
+		return DIAG_DCI_NO_REG;
+
+	new_entry = kzalloc(sizeof(struct diag_dci_client_tbl), GFP_KERNEL);
+	if (new_entry == NULL) {
+		pr_err("diag: unable to alloc memory\n");
+		return DIAG_DCI_NO_REG;
+	}
+
+	mutex_lock(&driver->dci_mutex);
+
+	new_entry->client = current;
+	new_entry->tgid = current->tgid;
+	new_entry->client_info.notification_list =
+				reg_entry->notification_list;
+	new_entry->client_info.signal_type =
+				reg_entry->signal_type;
+	new_entry->client_info.token = reg_entry->token;
+	switch (reg_entry->token) {
+	case DCI_LOCAL_PROC:
+		new_entry->num_buffers = NUM_DCI_PERIPHERALS;
+		break;
+	case DCI_MDM_PROC:
+		new_entry->num_buffers = 1;
+		break;
+	}
+
+	new_entry->buffers = NULL;
+	new_entry->real_time = MODE_REALTIME;
+	new_entry->in_service = 0;
+	INIT_LIST_HEAD(&new_entry->list_write_buf);
+	mutex_init(&new_entry->write_buf_mutex);
+	new_entry->dci_log_mask =  vzalloc(DCI_LOG_MASK_SIZE);
+	if (!new_entry->dci_log_mask) {
+		pr_err("diag: Unable to create log mask for client, %d",
+							driver->dci_client_id);
+		goto fail_alloc;
+	}
+	create_dci_log_mask_tbl(new_entry->dci_log_mask, DCI_LOG_MASK_CLEAN);
+
+	new_entry->dci_event_mask =  vzalloc(DCI_EVENT_MASK_SIZE);
+	if (!new_entry->dci_event_mask) {
+		pr_err("diag: Unable to create event mask for client, %d",
+							driver->dci_client_id);
+		goto fail_alloc;
+	}
+	create_dci_event_mask_tbl(new_entry->dci_event_mask);
+
+	new_entry->buffers = kzalloc(new_entry->num_buffers *
+				     sizeof(struct diag_dci_buf_peripheral_t),
+					GFP_KERNEL);
+	if (!new_entry->buffers) {
+		pr_err("diag: Unable to allocate buffers for peripherals in %s\n",
+								__func__);
+		goto fail_alloc;
+	}
+
+	for (i = 0; i < new_entry->num_buffers; i++) {
+		proc_buf = &new_entry->buffers[i];
+		if (!proc_buf)
+			goto fail_alloc;
+
+		mutex_init(&proc_buf->health_mutex);
+		mutex_init(&proc_buf->buf_mutex);
+		proc_buf->health.dropped_events = 0;
+		proc_buf->health.dropped_logs = 0;
+		proc_buf->health.received_events = 0;
+		proc_buf->health.received_logs = 0;
+		proc_buf->buf_primary = kzalloc(
+					sizeof(struct diag_dci_buffer_t),
+					GFP_KERNEL);
+		if (!proc_buf->buf_primary)
+			goto fail_alloc;
+		proc_buf->buf_cmd = kzalloc(sizeof(struct diag_dci_buffer_t),
+					GFP_KERNEL);
+		if (!proc_buf->buf_cmd)
+			goto fail_alloc;
+		err = diag_dci_init_buffer(proc_buf->buf_primary,
+					   DCI_BUF_PRIMARY);
+		if (err)
+			goto fail_alloc;
+		err = diag_dci_init_buffer(proc_buf->buf_cmd, DCI_BUF_CMD);
+		if (err)
+			goto fail_alloc;
+		proc_buf->buf_curr = proc_buf->buf_primary;
+	}
+
+	list_add_tail(&new_entry->track, &driver->dci_client_list);
+	driver->dci_client_id++;
+	new_entry->client_info.client_id = driver->dci_client_id;
+	reg_entry->client_id = driver->dci_client_id;
+	driver->num_dci_client++;
+	if (driver->num_dci_client == 1)
+		diag_update_proc_vote(DIAG_PROC_DCI, VOTE_UP, reg_entry->token);
+	queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
+	mutex_unlock(&driver->dci_mutex);
+
+	return driver->dci_client_id;
+
+fail_alloc:
+	if (new_entry) {
+		for (i = 0; ((i < new_entry->num_buffers) &&
+			new_entry->buffers); i++) {
+			proc_buf = &new_entry->buffers[i];
+			if (proc_buf) {
+				mutex_destroy(&proc_buf->health_mutex);
+				if (proc_buf->buf_primary) {
+					vfree(proc_buf->buf_primary->data);
+					proc_buf->buf_primary->data = NULL;
+					mutex_destroy(
+					   &proc_buf->buf_primary->data_mutex);
+				}
+				kfree(proc_buf->buf_primary);
+				proc_buf->buf_primary = NULL;
+				if (proc_buf->buf_cmd) {
+					vfree(proc_buf->buf_cmd->data);
+					proc_buf->buf_cmd->data = NULL;
+					mutex_destroy(
+					   &proc_buf->buf_cmd->data_mutex);
+				}
+				kfree(proc_buf->buf_cmd);
+				proc_buf->buf_cmd = NULL;
+			}
+		}
+		vfree(new_entry->dci_event_mask);
+		new_entry->dci_event_mask = NULL;
+		vfree(new_entry->dci_log_mask);
+		new_entry->dci_log_mask = NULL;
+		kfree(new_entry->buffers);
+		new_entry->buffers = NULL;
+		kfree(new_entry);
+		new_entry = NULL;
+	}
+	mutex_unlock(&driver->dci_mutex);
+	return DIAG_DCI_NO_REG;
+}
+
+int diag_dci_deinit_client(struct diag_dci_client_tbl *entry)
+{
+	int ret = DIAG_DCI_NO_ERROR, real_time = MODE_REALTIME, i, peripheral;
+	struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+	struct diag_dci_buffer_t *buf_entry, *temp;
+	struct list_head *start, *req_temp;
+	struct dci_pkt_req_entry_t *req_entry = NULL;
+	int token = DCI_LOCAL_PROC;
+
+	if (!entry)
+		return DIAG_DCI_NOT_SUPPORTED;
+
+	token = entry->client_info.token;
+	/*
+	 * Remove the entry from the list before freeing the buffers
+	 * to ensure that we don't have any invalid access.
+	 */
+	if (!list_empty(&entry->track))
+		list_del(&entry->track);
+	driver->num_dci_client--;
+	/*
+	 * Clear the client's log and event masks, update the cumulative
+	 * masks and send the masks to peripherals
+	 */
+	vfree(entry->dci_log_mask);
+	entry->dci_log_mask = NULL;
+	diag_dci_invalidate_cumulative_log_mask(token);
+	if (token == DCI_LOCAL_PROC)
+		diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
+	ret = dci_ops_tbl[token].send_log_mask(token);
+	if (ret != DIAG_DCI_NO_ERROR) {
+		return ret;
+	}
+	vfree(entry->dci_event_mask);
+	entry->dci_event_mask = NULL;
+	diag_dci_invalidate_cumulative_event_mask(token);
+	if (token == DCI_LOCAL_PROC)
+		diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
+	ret = dci_ops_tbl[token].send_event_mask(token);
+	if (ret != DIAG_DCI_NO_ERROR) {
+		return ret;
+	}
+
+	list_for_each_safe(start, req_temp, &driver->dci_req_list) {
+		req_entry = list_entry(start, struct dci_pkt_req_entry_t,
+				       track);
+		if (req_entry->client_id == entry->client_info.client_id) {
+			if (!list_empty(&req_entry->track))
+				list_del(&req_entry->track);
+			kfree(req_entry);
+			req_entry = NULL;
+		}
+	}
+
+	/* Clean up any buffer that is pending write */
+	mutex_lock(&entry->write_buf_mutex);
+	list_for_each_entry_safe(buf_entry, temp, &entry->list_write_buf,
+							buf_track) {
+		if (!list_empty(&buf_entry->buf_track))
+			list_del(&buf_entry->buf_track);
+		if (buf_entry->buf_type == DCI_BUF_SECONDARY) {
+			mutex_lock(&buf_entry->data_mutex);
+			diagmem_free(driver, buf_entry->data, POOL_TYPE_DCI);
+			buf_entry->data = NULL;
+			mutex_unlock(&buf_entry->data_mutex);
+			kfree(buf_entry);
+			buf_entry = NULL;
+		} else if (buf_entry->buf_type == DCI_BUF_CMD) {
+			peripheral = buf_entry->data_source;
+			if (peripheral == APPS_DATA)
+				continue;
+		}
+		/*
+		 * These are buffers that can't be written to the client which
+		 * means that the copy cannot be completed. Make sure that we
+		 * remove those references in DCI wakeup source.
+		 */
+		diag_ws_on_copy_fail(DIAG_WS_DCI);
+	}
+	mutex_unlock(&entry->write_buf_mutex);
+
+	for (i = 0; i < entry->num_buffers; i++) {
+		proc_buf = &entry->buffers[i];
+		buf_entry = proc_buf->buf_curr;
+		mutex_lock(&proc_buf->buf_mutex);
+		/* Clean up secondary buffer from mempool that is active */
+		if (buf_entry && buf_entry->buf_type == DCI_BUF_SECONDARY) {
+			mutex_lock(&buf_entry->data_mutex);
+			diagmem_free(driver, buf_entry->data, POOL_TYPE_DCI);
+			buf_entry->data = NULL;
+			mutex_unlock(&buf_entry->data_mutex);
+			mutex_destroy(&buf_entry->data_mutex);
+			kfree(buf_entry);
+			buf_entry = NULL;
+		}
+
+		mutex_lock(&proc_buf->buf_primary->data_mutex);
+		vfree(proc_buf->buf_primary->data);
+		proc_buf->buf_primary->data = NULL;
+		mutex_unlock(&proc_buf->buf_primary->data_mutex);
+
+		mutex_lock(&proc_buf->buf_cmd->data_mutex);
+		vfree(proc_buf->buf_cmd->data);
+		proc_buf->buf_cmd->data = NULL;
+		mutex_unlock(&proc_buf->buf_cmd->data_mutex);
+
+		mutex_destroy(&proc_buf->health_mutex);
+		mutex_destroy(&proc_buf->buf_primary->data_mutex);
+		mutex_destroy(&proc_buf->buf_cmd->data_mutex);
+
+		kfree(proc_buf->buf_primary);
+		proc_buf->buf_primary = NULL;
+		kfree(proc_buf->buf_cmd);
+		proc_buf->buf_cmd = NULL;
+		mutex_unlock(&proc_buf->buf_mutex);
+	}
+	mutex_destroy(&entry->write_buf_mutex);
+
+	kfree(entry->buffers);
+	entry->buffers = NULL;
+	kfree(entry);
+	entry = NULL;
+
+	if (driver->num_dci_client == 0) {
+		diag_update_proc_vote(DIAG_PROC_DCI, VOTE_DOWN, token);
+	} else {
+		real_time = diag_dci_get_cumulative_real_time(token);
+		diag_update_real_time_vote(DIAG_PROC_DCI, real_time, token);
+	}
+	queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
+
+	return DIAG_DCI_NO_ERROR;
+}
+
+int diag_dci_write_proc(uint8_t peripheral, int pkt_type, char *buf, int len)
+{
+	uint8_t dest_channel = TYPE_DATA;
+	int err = 0;
+
+	if (!buf || peripheral >= NUM_PERIPHERALS || len < 0 ||
+	    !(driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask)) {
+		DIAG_LOG(DIAG_DEBUG_DCI,
+			"buf: 0x%pK, p: %d, len: %d, f_mask: %d\n",
+			buf, peripheral, len,
+			driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask);
+		return -EINVAL;
+	}
+
+	if (pkt_type == DIAG_DATA_TYPE) {
+		dest_channel = TYPE_DCI_CMD;
+	} else if (pkt_type == DIAG_CNTL_TYPE) {
+		dest_channel = TYPE_CNTL;
+	} else {
+		pr_err("diag: Invalid DCI pkt type in %s", __func__);
+		return -EINVAL;
+	}
+
+	err = diagfwd_write(peripheral, dest_channel, buf, len);
+	if (err && err != -ENODEV) {
+		pr_err("diag: In %s, unable to write to peripheral: %d, type: %d, len: %d, err: %d\n",
+		       __func__, peripheral, dest_channel, len, err);
+	} else {
+		err = DIAG_DCI_NO_ERROR;
+	}
+
+	return err;
+}
+
+int diag_dci_copy_health_stats(struct diag_dci_health_stats_proc *stats_proc)
+{
+	struct diag_dci_client_tbl *entry = NULL;
+	struct diag_dci_health_t *health = NULL;
+	struct diag_dci_health_stats *stats = NULL;
+	int i, proc;
+
+	if (!stats_proc)
+		return -EINVAL;
+
+	stats = &stats_proc->health;
+	proc = stats_proc->proc;
+	if (proc < ALL_PROC || proc > APPS_DATA)
+		return -EINVAL;
+
+	entry = diag_dci_get_client_entry(stats_proc->client_id);
+	if (!entry)
+		return DIAG_DCI_NOT_SUPPORTED;
+
+	/*
+	 * If the client has registered for remote processor, the
+	 * proc field doesn't have any effect as they have only one buffer.
+	 */
+	if (entry->client_info.token)
+		proc = 0;
+
+	stats->stats.dropped_logs = 0;
+	stats->stats.dropped_events = 0;
+	stats->stats.received_logs = 0;
+	stats->stats.received_events = 0;
+
+	if (proc != ALL_PROC) {
+		health = &entry->buffers[proc].health;
+		stats->stats.dropped_logs = health->dropped_logs;
+		stats->stats.dropped_events = health->dropped_events;
+		stats->stats.received_logs = health->received_logs;
+		stats->stats.received_events = health->received_events;
+		if (stats->reset_status) {
+			mutex_lock(&entry->buffers[proc].health_mutex);
+			health->dropped_logs = 0;
+			health->dropped_events = 0;
+			health->received_logs = 0;
+			health->received_events = 0;
+			mutex_unlock(&entry->buffers[proc].health_mutex);
+		}
+		return DIAG_DCI_NO_ERROR;
+	}
+
+	for (i = 0; i < entry->num_buffers; i++) {
+		health = &entry->buffers[i].health;
+		stats->stats.dropped_logs += health->dropped_logs;
+		stats->stats.dropped_events += health->dropped_events;
+		stats->stats.received_logs += health->received_logs;
+		stats->stats.received_events += health->received_events;
+		if (stats->reset_status) {
+			mutex_lock(&entry->buffers[i].health_mutex);
+			health->dropped_logs = 0;
+			health->dropped_events = 0;
+			health->received_logs = 0;
+			health->received_events = 0;
+			mutex_unlock(&entry->buffers[i].health_mutex);
+		}
+	}
+	return DIAG_DCI_NO_ERROR;
+}
+
+int diag_dci_get_support_list(struct diag_dci_peripherals_t *support_list)
+{
+	if (!support_list)
+		return -ENOMEM;
+
+	if (!VALID_DCI_TOKEN(support_list->proc))
+		return -EIO;
+
+	support_list->list = dci_ops_tbl[support_list->proc].peripheral_status;
+	return DIAG_DCI_NO_ERROR;
+}
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diag_dci.h linux-4.4.115-fbx/drivers/char/diag/diag_dci.h
--- linux-4.4.115-fbx/drivers/char/diag./diag_dci.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diag_dci.h	2019-10-29 09:26:23.449201280 +0100
@@ -0,0 +1,329 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef DIAG_DCI_H
+#define DIAG_DCI_H
+
+#define MAX_DCI_CLIENTS		10
+#define DCI_PKT_RSP_CODE	0x93
+#define DCI_DELAYED_RSP_CODE	0x94
+#define DCI_CONTROL_PKT_CODE	0x9A
+#define EXT_HDR_CMD_CODE	0x98
+#define LOG_CMD_CODE		0x10
+#define EVENT_CMD_CODE		0x60
+#define DCI_PKT_RSP_TYPE	0
+#define DCI_LOG_TYPE		-1
+#define DCI_EVENT_TYPE		-2
+#define DCI_EXT_HDR_TYPE	-3
+#define SET_LOG_MASK		1
+#define DISABLE_LOG_MASK	0
+#define MAX_EVENT_SIZE		512
+#define DCI_CLIENT_INDEX_INVALID -1
+#define DCI_LOG_CON_MIN_LEN		14
+#define DCI_EVENT_CON_MIN_LEN		16
+
+#define EXT_HDR_LEN		8
+#define EXT_HDR_VERSION		1
+
+#define DCI_BUF_PRIMARY		1
+#define DCI_BUF_SECONDARY	2
+#define DCI_BUF_CMD		3
+
+#ifdef CONFIG_DEBUG_FS
+#define DIAG_DCI_DEBUG_CNT	100
+#define DIAG_DCI_DEBUG_LEN	100
+#endif
+
+/* 16 log code categories, each has:
+ * 1 bytes equip id + 1 dirty byte + 512 byte max log mask
+ */
+#define DCI_LOG_MASK_SIZE		(16*514)
+#define DCI_EVENT_MASK_SIZE		512
+#define DCI_MASK_STREAM			2
+#define DCI_MAX_LOG_CODES		16
+#define DCI_MAX_ITEMS_PER_LOG_CODE	512
+
+#define DCI_LOG_MASK_CLEAN		0
+#define DCI_LOG_MASK_DIRTY		1
+
+#define MIN_DELAYED_RSP_LEN		12
+/*
+ * Maximum data size that peripherals send = 8.5K log +
+ * DCI header + footer (6 bytes)
+ */
+#define MAX_DCI_PACKET_SZ		8710
+
+extern unsigned int dci_max_reg;
+extern unsigned int dci_max_clients;
+
+#define DCI_LOCAL_PROC		0
+#define DCI_REMOTE_BASE		1
+#define DCI_MDM_PROC		DCI_REMOTE_BASE
+#define DCI_REMOTE_LAST		(DCI_REMOTE_BASE + 1)
+
+#ifndef CONFIG_DIAGFWD_BRIDGE_CODE
+#define NUM_DCI_PROC		1
+#else
+#define NUM_DCI_PROC		DCI_REMOTE_LAST
+#endif
+
+#define DCI_REMOTE_DATA	0
+
+#define VALID_DCI_TOKEN(x)	((x >= 0 && x < NUM_DCI_PROC) ? 1 : 0)
+#define BRIDGE_TO_TOKEN(x)	(x - DIAGFWD_MDM_DCI + DCI_REMOTE_BASE)
+#define TOKEN_TO_BRIDGE(x)	(dci_ops_tbl[x].ctx)
+
+#define DCI_MAGIC		(0xAABB1122)
+
+struct dci_pkt_req_t {
+	int uid;
+	int client_id;
+} __packed;
+
+struct dci_stream_req_t {
+	int type;
+	int client_id;
+	int set_flag;
+	int count;
+} __packed;
+
+struct dci_pkt_req_entry_t {
+	int client_id;
+	int uid;
+	int tag;
+	struct list_head track;
+} __packed;
+
+struct diag_dci_reg_tbl_t {
+	int client_id;
+	uint16_t notification_list;
+	int signal_type;
+	int token;
+} __packed;
+
+struct diag_dci_health_t {
+	int dropped_logs;
+	int dropped_events;
+	int received_logs;
+	int received_events;
+};
+
+struct diag_dci_partial_pkt_t {
+	unsigned char *data;
+	uint32_t total_len;
+	uint32_t read_len;
+	uint32_t remaining;
+	uint8_t processing;
+} __packed;
+
+struct diag_dci_buffer_t {
+	unsigned char *data;
+	unsigned int data_len;
+	struct mutex data_mutex;
+	uint8_t in_busy;
+	uint8_t buf_type;
+	int data_source;
+	int capacity;
+	uint8_t in_list;
+	struct list_head buf_track;
+};
+
+struct diag_dci_buf_peripheral_t {
+	struct diag_dci_buffer_t *buf_curr;
+	struct diag_dci_buffer_t *buf_primary;
+	struct diag_dci_buffer_t *buf_cmd;
+	struct diag_dci_health_t health;
+	struct mutex health_mutex;
+	struct mutex buf_mutex;
+};
+
+struct diag_dci_client_tbl {
+	int tgid;
+	struct diag_dci_reg_tbl_t client_info;
+	struct task_struct *client;
+	unsigned char *dci_log_mask;
+	unsigned char *dci_event_mask;
+	uint8_t real_time;
+	struct list_head track;
+	struct diag_dci_buf_peripheral_t *buffers;
+	uint8_t num_buffers;
+	uint8_t in_service;
+	struct list_head list_write_buf;
+	struct mutex write_buf_mutex;
+};
+
+struct diag_dci_health_stats {
+	struct diag_dci_health_t stats;
+	int reset_status;
+};
+
+struct diag_dci_health_stats_proc {
+	int client_id;
+	struct diag_dci_health_stats health;
+	int proc;
+} __packed;
+
+struct diag_dci_peripherals_t {
+	int proc;
+	uint16_t list;
+} __packed;
+
+/* This is used for querying DCI Log
+   or Event Mask */
+struct diag_log_event_stats {
+	int client_id;
+	uint16_t code;
+	int is_set;
+} __packed;
+
+struct diag_dci_pkt_rsp_header_t {
+	int type;
+	int length;
+	uint8_t delete_flag;
+	int uid;
+} __packed;
+
+struct diag_dci_pkt_header_t {
+	uint8_t start;
+	uint8_t version;
+	uint16_t len;
+	uint8_t pkt_code;
+	int tag;
+} __packed;
+
+struct diag_dci_header_t {
+	uint8_t start;
+	uint8_t version;
+	uint16_t length;
+	uint8_t cmd_code;
+} __packed;
+
+struct dci_ops_tbl_t {
+	int ctx;
+	int mempool;
+	unsigned char log_mask_composite[DCI_LOG_MASK_SIZE];
+	unsigned char event_mask_composite[DCI_EVENT_MASK_SIZE];
+	int (*send_log_mask)(int token);
+	int (*send_event_mask)(int token);
+	uint16_t peripheral_status;
+} __packed;
+
+struct dci_channel_status_t {
+	int id;
+	int open;
+	int retry_count;
+	struct timer_list wait_time;
+	struct work_struct handshake_work;
+} __packed;
+
+extern struct dci_ops_tbl_t dci_ops_tbl[NUM_DCI_PROC];
+
+enum {
+	DIAG_DCI_NO_ERROR = 1001,	/* No error */
+	DIAG_DCI_NO_REG,		/* Could not register */
+	DIAG_DCI_NO_MEM,		/* Failed memory allocation */
+	DIAG_DCI_NOT_SUPPORTED,	/* This particular client is not supported */
+	DIAG_DCI_HUGE_PACKET,	/* Request/Response Packet too huge */
+	DIAG_DCI_SEND_DATA_FAIL,/* writing to kernel or peripheral fails */
+	DIAG_DCI_TABLE_ERR	/* Error dealing with registration tables */
+};
+
+#define DCI_HDR_SIZE					\
+	((sizeof(struct diag_dci_pkt_header_t) >	\
+	  sizeof(struct diag_dci_header_t)) ?		\
+	(sizeof(struct diag_dci_pkt_header_t) + 1) :	\
+	(sizeof(struct diag_dci_header_t) + 1))		\
+
+#define DCI_BUF_SIZE (uint32_t)(DIAG_MAX_REQ_SIZE + DCI_HDR_SIZE)
+
+#define DCI_REQ_HDR_SIZE				\
+	((sizeof(struct dci_pkt_req_t) >		\
+	  sizeof(struct dci_stream_req_t)) ?		\
+	(sizeof(struct dci_pkt_req_t)) :		\
+	(sizeof(struct dci_stream_req_t)))		\
+
+#define DCI_REQ_BUF_SIZE (uint32_t)(DIAG_MAX_REQ_SIZE + DCI_REQ_HDR_SIZE)
+
+#ifdef CONFIG_DEBUG_FS
+/* To collect debug information during each smd read */
+struct diag_dci_data_info {
+	unsigned long iteration;
+	int data_size;
+	char time_stamp[DIAG_TS_SIZE];
+	uint8_t peripheral;
+	uint8_t ch_type;
+	uint8_t proc;
+};
+
+extern struct diag_dci_data_info *dci_traffic;
+extern struct mutex dci_stat_mutex;
+#endif
+
+int diag_dci_init(void);
+void diag_dci_channel_init(void);
+void diag_dci_exit(void);
+int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry);
+int diag_dci_deinit_client(struct diag_dci_client_tbl *entry);
+void diag_dci_channel_open_work(struct work_struct *);
+void diag_dci_notify_client(int peripheral_mask, int data, int proc);
+void diag_dci_wakeup_clients(void);
+void diag_process_apps_dci_read_data(int data_type, void *buf, int recd_bytes);
+void diag_dci_process_peripheral_data(struct diagfwd_info *p_info, void *buf,
+				      int recd_bytes);
+int diag_process_dci_transaction(unsigned char *buf, int len);
+void extract_dci_pkt_rsp(unsigned char *buf, int len, int data_source,
+			 int token);
+void extract_dci_ctrl_pkt(unsigned char *buf, int len, int token);
+struct diag_dci_client_tbl *diag_dci_get_client_entry(int client_id);
+struct diag_dci_client_tbl *dci_lookup_client_entry_pid(int tgid);
+void diag_process_remote_dci_read_data(int index, void *buf, int recd_bytes);
+int diag_dci_get_support_list(struct diag_dci_peripherals_t *support_list);
+/* DCI Log streaming functions */
+void update_dci_cumulative_log_mask(int offset, unsigned int byte_index,
+						uint8_t byte_mask, int token);
+void diag_dci_invalidate_cumulative_log_mask(int token);
+int diag_send_dci_log_mask(int token);
+void extract_dci_log(unsigned char *buf, int len, int data_source, int token,
+	void *ext_hdr);
+int diag_dci_clear_log_mask(int client_id);
+int diag_dci_query_log_mask(struct diag_dci_client_tbl *entry,
+			    uint16_t log_code);
+/* DCI event streaming functions */
+void update_dci_cumulative_event_mask(int offset, uint8_t byte_mask, int token);
+void diag_dci_invalidate_cumulative_event_mask(int token);
+int diag_send_dci_event_mask(int token);
+void extract_dci_events(unsigned char *buf, int len, int data_source,
+			int token, void *ext_hdr);
+/* DCI extended header handling functions */
+void extract_dci_ext_pkt(unsigned char *buf, int len, int data_source,
+		int token);
+int diag_dci_clear_event_mask(int client_id);
+int diag_dci_query_event_mask(struct diag_dci_client_tbl *entry,
+			      uint16_t event_id);
+void diag_dci_record_traffic(int read_bytes, uint8_t ch_type,
+			     uint8_t peripheral, uint8_t proc);
+uint8_t diag_dci_get_cumulative_real_time(int token);
+int diag_dci_set_real_time(struct diag_dci_client_tbl *entry,
+			   uint8_t real_time);
+int diag_dci_copy_health_stats(struct diag_dci_health_stats_proc *stats_proc);
+int diag_dci_write_proc(uint8_t peripheral, int pkt_type, char *buf, int len);
+void dci_drain_data(unsigned long data);
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+int diag_send_dci_log_mask_remote(int token);
+int diag_send_dci_event_mask_remote(int token);
+unsigned char *dci_get_buffer_from_bridge(int token);
+int diag_dci_write_bridge(int token, unsigned char *buf, int len);
+int diag_dci_write_done_bridge(int index, unsigned char *buf, int len);
+int diag_dci_send_handshake_pkt(int index);
+#endif
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diag_debugfs.c linux-4.4.115-fbx/drivers/char/diag/diag_debugfs.c
--- linux-4.4.115-fbx/drivers/char/diag./diag_debugfs.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diag_debugfs.c	2019-10-29 09:26:23.449201280 +0100
@@ -0,0 +1,1216 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include "diagchar.h"
+#include "diagfwd.h"
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+#include "diagfwd_bridge.h"
+#endif
+#ifdef CONFIG_USB_QCOM_DIAG_BRIDGE
+#include "diagfwd_hsic.h"
+#include "diagfwd_smux.h"
+#endif
+#ifdef CONFIG_MSM_MHI
+#include "diagfwd_mhi.h"
+#endif
+#include "diagmem.h"
+#include "diag_dci.h"
+#include "diag_usb.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_smd.h"
+#include "diagfwd_socket.h"
+#include "diagfwd_glink.h"
+#include "diag_debugfs.h"
+#include "diag_ipc_logging.h"
+
+#define DEBUG_BUF_SIZE	4096
+static struct dentry *diag_dbgfs_dent;
+static int diag_dbgfs_table_index;
+static int diag_dbgfs_mempool_index;
+static int diag_dbgfs_usbinfo_index;
+static int diag_dbgfs_smdinfo_index;
+static int diag_dbgfs_socketinfo_index;
+static int diag_dbgfs_glinkinfo_index;
+static int diag_dbgfs_hsicinfo_index;
+static int diag_dbgfs_mhiinfo_index;
+static int diag_dbgfs_bridgeinfo_index;
+static int diag_dbgfs_finished;
+static int diag_dbgfs_dci_data_index;
+static int diag_dbgfs_dci_finished;
+static struct mutex diag_dci_dbgfs_mutex;
+static ssize_t diag_dbgfs_read_status(struct file *file, char __user *ubuf,
+				      size_t count, loff_t *ppos)
+{
+	char *buf;
+	int ret, i;
+	unsigned int buf_size;
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (!buf) {
+		pr_err("diag: %s, Error allocating memory\n", __func__);
+		return -ENOMEM;
+	}
+	buf_size = ksize(buf);
+	ret = scnprintf(buf, buf_size,
+		"CPU Tools ID: %d\n"
+		"Check Polling Response: %d\n"
+		"Polling Registered: %d\n"
+		"Uses Device Tree: %d\n"
+		"Apps Supports Separate CMDRSP: %d\n"
+		"Apps Supports HDLC Encoding: %d\n"
+		"Apps Supports Header Untagging: %d\n"
+		"Apps Supports Sockets: %d\n"
+		"Logging Mode: %d\n"
+		"RSP Buffer is Busy: %d\n"
+		"HDLC Disabled: %d\n"
+		"Time Sync Enabled: %d\n"
+		"MD session mode: %d\n"
+		"MD session mask: %d\n"
+		"Uses Time API: %d\n"
+		"Supports PD buffering: %d\n",
+		chk_config_get_id(),
+		chk_polling_response(),
+		driver->polling_reg_flag,
+		driver->use_device_tree,
+		driver->supports_separate_cmdrsp,
+		driver->supports_apps_hdlc_encoding,
+		driver->supports_apps_header_untagging,
+		driver->supports_sockets,
+		driver->logging_mode,
+		driver->rsp_buf_busy,
+		driver->hdlc_disabled,
+		driver->time_sync_enabled,
+		driver->md_session_mode,
+		driver->md_session_mask,
+		driver->uses_time_api,
+		driver->supports_pd_buffering);
+
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		ret += scnprintf(buf+ret, buf_size-ret,
+			"p: %s Feature: %02x %02x |%c%c%c%c%c%c%c%c%c%c|\n",
+			PERIPHERAL_STRING(i),
+			driver->feature[i].feature_mask[0],
+			driver->feature[i].feature_mask[1],
+			driver->feature[i].rcvd_feature_mask ? 'F':'f',
+			driver->feature[i].peripheral_buffering ? 'B':'b',
+			driver->feature[i].separate_cmd_rsp ? 'C':'c',
+			driver->feature[i].encode_hdlc ? 'H':'h',
+			driver->feature[i].mask_centralization ? 'M':'m',
+			driver->feature[i].pd_buffering ? 'P':'p',
+			driver->feature[i].stm_support ? 'Q':'q',
+			driver->feature[i].sockets_enabled ? 'S':'s',
+			driver->feature[i].sent_feature_mask ? 'T':'t',
+			driver->feature[i].untag_header ? 'U':'u');
+	}
+
+#ifdef CONFIG_DIAG_OVER_USB
+	ret += scnprintf(buf+ret, buf_size-ret,
+		"USB Connected: %d\n",
+		driver->usb_connected);
+#endif
+
+	for (i = 0; i < DIAG_NUM_PROC; i++) {
+		ret += scnprintf(buf+ret, buf_size-ret,
+				 "Real Time Mode: %d: %d\n", i,
+				 driver->real_time_mode[i]);
+	}
+
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret);
+
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t diag_dbgfs_read_dcistats(struct file *file,
+				char __user *ubuf, size_t count, loff_t *ppos)
+{
+	char *buf = NULL;
+	unsigned int bytes_remaining, bytes_written = 0;
+	unsigned int bytes_in_buf = 0, i = 0;
+	struct diag_dci_data_info *temp_data = dci_traffic;
+	unsigned int buf_size;
+	buf_size = (DEBUG_BUF_SIZE < count) ? DEBUG_BUF_SIZE : count;
+
+	if (diag_dbgfs_dci_finished) {
+		diag_dbgfs_dci_finished = 0;
+		return 0;
+	}
+
+	buf = kzalloc(sizeof(char) * buf_size, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(buf)) {
+		pr_err("diag: %s, Error allocating memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	buf_size = ksize(buf);
+	bytes_remaining = buf_size;
+
+	mutex_lock(&diag_dci_dbgfs_mutex);
+	if (diag_dbgfs_dci_data_index == 0) {
+		bytes_written =
+			scnprintf(buf, buf_size,
+			"number of clients: %d\n"
+			"dci proc active: %d\n"
+			"dci real time vote: %d\n",
+			driver->num_dci_client,
+			(driver->proc_active_mask & DIAG_PROC_DCI) ? 1 : 0,
+			(driver->proc_rt_vote_mask[DIAG_LOCAL_PROC] &
+							DIAG_PROC_DCI) ? 1 : 0);
+		bytes_in_buf += bytes_written;
+		bytes_remaining -= bytes_written;
+#ifdef CONFIG_DIAG_OVER_USB
+		bytes_written = scnprintf(buf+bytes_in_buf, bytes_remaining,
+			"usb_connected: %d\n",
+			driver->usb_connected);
+		bytes_in_buf += bytes_written;
+		bytes_remaining -= bytes_written;
+#endif
+		bytes_written = scnprintf(buf+bytes_in_buf,
+					  bytes_remaining,
+					  "dci power: active, relax: %lu, %lu\n",
+					  driver->diag_dev->power.wakeup->
+						active_count,
+					  driver->diag_dev->
+						power.wakeup->relax_count);
+		bytes_in_buf += bytes_written;
+		bytes_remaining -= bytes_written;
+
+	}
+	temp_data += diag_dbgfs_dci_data_index;
+	for (i = diag_dbgfs_dci_data_index; i < DIAG_DCI_DEBUG_CNT; i++) {
+		if (temp_data->iteration != 0) {
+			bytes_written = scnprintf(
+				buf + bytes_in_buf, bytes_remaining,
+				"i %-5ld\t"
+				"s %-5d\t"
+				"p %-5d\t"
+				"r %-5d\t"
+				"c %-5d\t"
+				"t %-15s\n",
+				temp_data->iteration,
+				temp_data->data_size,
+				temp_data->peripheral,
+				temp_data->proc,
+				temp_data->ch_type,
+				temp_data->time_stamp);
+			bytes_in_buf += bytes_written;
+			bytes_remaining -= bytes_written;
+			/* Check if there is room for another entry */
+			if (bytes_remaining < bytes_written)
+				break;
+		}
+		temp_data++;
+	}
+	diag_dbgfs_dci_data_index = (i >= DIAG_DCI_DEBUG_CNT) ? 0 : i + 1;
+	mutex_unlock(&diag_dci_dbgfs_mutex);
+	bytes_written = simple_read_from_buffer(ubuf, count, ppos, buf,
+								bytes_in_buf);
+	kfree(buf);
+	diag_dbgfs_dci_finished = 1;
+	return bytes_written;
+}
+
+static ssize_t diag_dbgfs_read_power(struct file *file, char __user *ubuf,
+				     size_t count, loff_t *ppos)
+{
+	char *buf;
+	int ret;
+	unsigned int buf_size;
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (!buf) {
+		pr_err("diag: %s, Error allocating memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	buf_size = ksize(buf);
+	ret = scnprintf(buf, buf_size,
+		"DCI reference count: %d\n"
+		"DCI copy count: %d\n"
+		"DCI Client Count: %d\n\n"
+		"Memory Device reference count: %d\n"
+		"Memory Device copy count: %d\n"
+		"Logging mode: %d\n\n"
+		"Wakeup source active count: %lu\n"
+		"Wakeup source relax count: %lu\n\n",
+		driver->dci_ws.ref_count,
+		driver->dci_ws.copy_count,
+		driver->num_dci_client,
+		driver->md_ws.ref_count,
+		driver->md_ws.copy_count,
+		driver->logging_mode,
+		driver->diag_dev->power.wakeup->active_count,
+		driver->diag_dev->power.wakeup->relax_count);
+
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret);
+
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t diag_dbgfs_read_table(struct file *file, char __user *ubuf,
+				     size_t count, loff_t *ppos)
+{
+	char *buf;
+	int ret = 0;
+	int i = 0;
+	int is_polling = 0;
+	unsigned int bytes_remaining;
+	unsigned int bytes_in_buffer = 0;
+	unsigned int bytes_written;
+	unsigned int buf_size;
+	struct list_head *start;
+	struct list_head *temp;
+	struct diag_cmd_reg_t *item = NULL;
+
+	mutex_lock(&driver->cmd_reg_mutex);
+	if (diag_dbgfs_table_index == driver->cmd_reg_count) {
+		diag_dbgfs_table_index = 0;
+		mutex_unlock(&driver->cmd_reg_mutex);
+		return 0;
+	}
+
+	buf_size = (DEBUG_BUF_SIZE < count) ? DEBUG_BUF_SIZE : count;
+
+	buf = kzalloc(sizeof(char) * buf_size, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(buf)) {
+		pr_err("diag: %s, Error allocating memory\n", __func__);
+		mutex_unlock(&driver->cmd_reg_mutex);
+		return -ENOMEM;
+	}
+	buf_size = ksize(buf);
+	bytes_remaining = buf_size;
+
+	if (diag_dbgfs_table_index == 0) {
+		bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+					  "Client ids: Modem: %d, LPASS: %d, WCNSS: %d, SLPI: %d, APPS: %d\n",
+					  PERIPHERAL_MODEM, PERIPHERAL_LPASS,
+					  PERIPHERAL_WCNSS, PERIPHERAL_SENSORS,
+					  APPS_DATA);
+		bytes_in_buffer += bytes_written;
+		bytes_remaining -= bytes_written;
+	}
+
+	list_for_each_safe(start, temp, &driver->cmd_reg_list) {
+		item = list_entry(start, struct diag_cmd_reg_t, link);
+		if (i < diag_dbgfs_table_index) {
+			i++;
+			continue;
+		}
+
+		is_polling = diag_cmd_chk_polling(&item->entry);
+		bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+					  "i: %3d, cmd_code: %4x, subsys_id: %4x, cmd_code_lo: %4x, cmd_code_hi: %4x, proc: %d, process_id: %5d %s\n",
+					  i++,
+					  item->entry.cmd_code,
+					  item->entry.subsys_id,
+					  item->entry.cmd_code_lo,
+					  item->entry.cmd_code_hi,
+					  item->proc,
+					  item->pid,
+					  (is_polling == DIAG_CMD_POLLING) ?
+					  "<-- Polling Cmd" : "");
+
+		bytes_in_buffer += bytes_written;
+
+		/* Check if there is room to add another table entry */
+		bytes_remaining = buf_size - bytes_in_buffer;
+
+		if (bytes_remaining < bytes_written)
+			break;
+	}
+	diag_dbgfs_table_index = i;
+	mutex_unlock(&driver->cmd_reg_mutex);
+
+	*ppos = 0;
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t diag_dbgfs_read_mempool(struct file *file, char __user *ubuf,
+						size_t count, loff_t *ppos)
+{
+	char *buf = NULL;
+	int ret = 0;
+	int i = 0;
+	unsigned int buf_size;
+	unsigned int bytes_remaining = 0;
+	unsigned int bytes_written = 0;
+	unsigned int bytes_in_buffer = 0;
+	struct diag_mempool_t *mempool = NULL;
+
+	if (diag_dbgfs_mempool_index >= NUM_MEMORY_POOLS) {
+		/* Done. Reset to prepare for future requests */
+		diag_dbgfs_mempool_index = 0;
+		return 0;
+	}
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(buf)) {
+		pr_err("diag: %s, Error allocating memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	buf_size = ksize(buf);
+	bytes_remaining = buf_size;
+	bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+			"%-24s\t"
+			"%-10s\t"
+			"%-5s\t"
+			"%-5s\t"
+			"%-5s\n",
+			"POOL", "HANDLE", "COUNT", "SIZE", "ITEMSIZE");
+	bytes_in_buffer += bytes_written;
+	bytes_remaining = buf_size - bytes_in_buffer;
+
+	for (i = diag_dbgfs_mempool_index; i < NUM_MEMORY_POOLS; i++) {
+		mempool = &diag_mempools[i];
+		bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+			"%-24s\t"
+			"%-10p\t"
+			"%-5d\t"
+			"%-5d\t"
+			"%-5d\n",
+			mempool->name,
+			mempool->pool,
+			mempool->count,
+			mempool->poolsize,
+			mempool->itemsize);
+		bytes_in_buffer += bytes_written;
+
+		/* Check if there is room to add another table entry */
+		bytes_remaining = buf_size - bytes_in_buffer;
+
+		if (bytes_remaining < bytes_written)
+			break;
+	}
+	diag_dbgfs_mempool_index = i+1;
+	*ppos = 0;
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+	kfree(buf);
+	return ret;
+}
+
+#ifdef CONFIG_DIAG_OVER_USB
+static ssize_t diag_dbgfs_read_usbinfo(struct file *file, char __user *ubuf,
+				       size_t count, loff_t *ppos)
+{
+	char *buf = NULL;
+	int ret = 0;
+	int i = 0;
+	unsigned int buf_size;
+	unsigned int bytes_remaining = 0;
+	unsigned int bytes_written = 0;
+	unsigned int bytes_in_buffer = 0;
+	struct diag_usb_info *usb_info = NULL;
+
+	if (diag_dbgfs_usbinfo_index >= NUM_DIAG_USB_DEV) {
+		/* Done. Reset to prepare for future requests */
+		diag_dbgfs_usbinfo_index = 0;
+		return 0;
+	}
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(buf)) {
+		pr_err("diag: %s, Error allocating memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	buf_size = ksize(buf);
+	bytes_remaining = buf_size;
+	for (i = diag_dbgfs_usbinfo_index; i < NUM_DIAG_USB_DEV; i++) {
+		usb_info = &diag_usb[i];
+		if (!usb_info->enabled)
+			continue;
+		bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+			"id: %d\n"
+			"name: %s\n"
+			"hdl: %pK\n"
+			"connected: %d\n"
+			"diag state: %d\n"
+			"enabled: %d\n"
+			"mempool: %s\n"
+			"read pending: %d\n"
+			"read count: %lu\n"
+			"write count: %lu\n"
+			"read work pending: %d\n"
+			"read done work pending: %d\n"
+			"connect work pending: %d\n"
+			"disconnect work pending: %d\n"
+			"max size supported: %d\n\n",
+			usb_info->id,
+			usb_info->name,
+			usb_info->hdl,
+			atomic_read(&usb_info->connected),
+			atomic_read(&usb_info->diag_state),
+			usb_info->enabled,
+			DIAG_MEMPOOL_GET_NAME(usb_info->mempool),
+			atomic_read(&usb_info->read_pending),
+			usb_info->read_cnt,
+			usb_info->write_cnt,
+			work_pending(&usb_info->read_work),
+			work_pending(&usb_info->read_done_work),
+			work_pending(&usb_info->connect_work),
+			work_pending(&usb_info->disconnect_work),
+			usb_info->max_size);
+		bytes_in_buffer += bytes_written;
+
+		/* Check if there is room to add another table entry */
+		bytes_remaining = buf_size - bytes_in_buffer;
+
+		if (bytes_remaining < bytes_written)
+			break;
+	}
+	diag_dbgfs_usbinfo_index = i+1;
+	*ppos = 0;
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+	kfree(buf);
+	return ret;
+}
+
+const struct file_operations diag_dbgfs_usbinfo_ops = {
+	.read = diag_dbgfs_read_usbinfo,
+};
+#endif
+
+static ssize_t diag_dbgfs_read_smdinfo(struct file *file, char __user *ubuf,
+				       size_t count, loff_t *ppos)
+{
+	char *buf = NULL;
+	int ret = 0;
+	int i = 0;
+	int j = 0;
+	unsigned int buf_size;
+	unsigned int bytes_remaining = 0;
+	unsigned int bytes_written = 0;
+	unsigned int bytes_in_buffer = 0;
+	struct diag_smd_info *smd_info = NULL;
+	struct diagfwd_info *fwd_ctxt = NULL;
+
+	if (diag_dbgfs_smdinfo_index >= NUM_PERIPHERALS) {
+		/* Done. Reset to prepare for future requests */
+		diag_dbgfs_smdinfo_index = 0;
+		return 0;
+	}
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(buf)) {
+		pr_err("diag: %s, Error allocating memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	buf_size = ksize(buf);
+	bytes_remaining = buf_size;
+	for (i = 0; i < NUM_TYPES; i++) {
+		for (j = 0; j < NUM_PERIPHERALS; j++) {
+			switch (i) {
+			case TYPE_DATA:
+				smd_info = &smd_data[j];
+				break;
+			case TYPE_CNTL:
+				smd_info = &smd_cntl[j];
+				break;
+			case TYPE_DCI:
+				smd_info = &smd_dci[j];
+				break;
+			case TYPE_CMD:
+				smd_info = &smd_cmd[j];
+				break;
+			case TYPE_DCI_CMD:
+				smd_info = &smd_dci_cmd[j];
+				break;
+			default:
+				return -EINVAL;
+			}
+
+			fwd_ctxt = (struct diagfwd_info *)(smd_info->fwd_ctxt);
+
+			bytes_written = scnprintf(buf+bytes_in_buffer,
+				bytes_remaining,
+				"name\t\t:\t%s\n"
+				"hdl\t\t:\t%pK\n"
+				"inited\t\t:\t%d\n"
+				"opened\t\t:\t%d\n"
+				"diag_state\t:\t%d\n"
+				"fifo size\t:\t%d\n"
+				"open pending\t:\t%d\n"
+				"close pending\t:\t%d\n"
+				"read pending\t:\t%d\n"
+				"buf_1 busy\t:\t%d\n"
+				"buf_2 busy\t:\t%d\n"
+				"bytes read\t:\t%lu\n"
+				"bytes written\t:\t%lu\n"
+				"fwd inited\t:\t%d\n"
+				"fwd opened\t:\t%d\n"
+				"fwd ch_open\t:\t%d\n\n",
+				smd_info->name,
+				smd_info->hdl,
+				smd_info->inited,
+				atomic_read(&smd_info->opened),
+				atomic_read(&smd_info->diag_state),
+				smd_info->fifo_size,
+				work_pending(&smd_info->open_work),
+				work_pending(&smd_info->close_work),
+				work_pending(&smd_info->read_work),
+				(fwd_ctxt && fwd_ctxt->buf_1) ?
+				atomic_read(&fwd_ctxt->buf_1->in_busy) : -1,
+				(fwd_ctxt && fwd_ctxt->buf_2) ?
+				atomic_read(&fwd_ctxt->buf_2->in_busy) : -1,
+				(fwd_ctxt) ? fwd_ctxt->read_bytes : 0,
+				(fwd_ctxt) ? fwd_ctxt->write_bytes : 0,
+				(fwd_ctxt) ? fwd_ctxt->inited : -1,
+				(fwd_ctxt) ?
+				atomic_read(&fwd_ctxt->opened) : -1,
+				(fwd_ctxt) ? fwd_ctxt->ch_open : -1);
+			bytes_in_buffer += bytes_written;
+
+			/* Check if there is room to add another table entry */
+			bytes_remaining = buf_size - bytes_in_buffer;
+
+			if (bytes_remaining < bytes_written)
+				break;
+		}
+	}
+	diag_dbgfs_smdinfo_index = i+1;
+	*ppos = 0;
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t diag_dbgfs_read_socketinfo(struct file *file, char __user *ubuf,
+					  size_t count, loff_t *ppos)
+{
+	char *buf = NULL;
+	int ret = 0;
+	int i = 0;
+	int j = 0;
+	unsigned int buf_size;
+	unsigned int bytes_remaining = 0;
+	unsigned int bytes_written = 0;
+	unsigned int bytes_in_buffer = 0;
+	struct diag_socket_info *info = NULL;
+	struct diagfwd_info *fwd_ctxt = NULL;
+
+	if (diag_dbgfs_socketinfo_index >= NUM_PERIPHERALS) {
+		/* Done. Reset to prepare for future requests */
+		diag_dbgfs_socketinfo_index = 0;
+		return 0;
+	}
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(buf)) {
+		pr_err("diag: %s, Error allocating memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	buf_size = ksize(buf);
+	bytes_remaining = buf_size;
+	for (i = 0; i < NUM_TYPES; i++) {
+		for (j = 0; j < NUM_PERIPHERALS; j++) {
+			switch (i) {
+			case TYPE_DATA:
+				info = &socket_data[j];
+				break;
+			case TYPE_CNTL:
+				info = &socket_cntl[j];
+				break;
+			case TYPE_DCI:
+				info = &socket_dci[j];
+				break;
+			case TYPE_CMD:
+				info = &socket_cmd[j];
+				break;
+			case TYPE_DCI_CMD:
+				info = &socket_dci_cmd[j];
+				break;
+			default:
+				return -EINVAL;
+			}
+
+			fwd_ctxt = (struct diagfwd_info *)(info->fwd_ctxt);
+
+			bytes_written = scnprintf(buf+bytes_in_buffer,
+				bytes_remaining,
+				"name\t\t:\t%s\n"
+				"hdl\t\t:\t%pK\n"
+				"inited\t\t:\t%d\n"
+				"opened\t\t:\t%d\n"
+				"diag_state\t:\t%d\n"
+				"buf_1 busy\t:\t%d\n"
+				"buf_2 busy\t:\t%d\n"
+				"flow ctrl count\t:\t%d\n"
+				"data_ready\t:\t%d\n"
+				"init pending\t:\t%d\n"
+				"read pending\t:\t%d\n"
+				"bytes read\t:\t%lu\n"
+				"bytes written\t:\t%lu\n"
+				"fwd inited\t:\t%d\n"
+				"fwd opened\t:\t%d\n"
+				"fwd ch_open\t:\t%d\n\n",
+				info->name,
+				info->hdl,
+				info->inited,
+				atomic_read(&info->opened),
+				atomic_read(&info->diag_state),
+				(fwd_ctxt && fwd_ctxt->buf_1) ?
+				atomic_read(&fwd_ctxt->buf_1->in_busy) : -1,
+				(fwd_ctxt && fwd_ctxt->buf_2) ?
+				atomic_read(&fwd_ctxt->buf_2->in_busy) : -1,
+				atomic_read(&info->flow_cnt),
+				info->data_ready,
+				work_pending(&info->init_work),
+				work_pending(&info->read_work),
+				(fwd_ctxt) ? fwd_ctxt->read_bytes : 0,
+				(fwd_ctxt) ? fwd_ctxt->write_bytes : 0,
+				(fwd_ctxt) ? fwd_ctxt->inited : -1,
+				(fwd_ctxt) ?
+				atomic_read(&fwd_ctxt->opened) : -1,
+				(fwd_ctxt) ? fwd_ctxt->ch_open : -1);
+			bytes_in_buffer += bytes_written;
+
+			/* Check if there is room to add another table entry */
+			bytes_remaining = buf_size - bytes_in_buffer;
+
+			if (bytes_remaining < bytes_written)
+				break;
+		}
+	}
+	diag_dbgfs_socketinfo_index = i+1;
+	*ppos = 0;
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t diag_dbgfs_read_glinkinfo(struct file *file, char __user *ubuf,
+					  size_t count, loff_t *ppos)
+{
+	char *buf = NULL;
+	int ret = 0;
+	int i = 0;
+	int j = 0;
+	unsigned int buf_size;
+	unsigned int bytes_remaining = 0;
+	unsigned int bytes_written = 0;
+	unsigned int bytes_in_buffer = 0;
+	struct diag_glink_info *info = NULL;
+	struct diagfwd_info *fwd_ctxt = NULL;
+
+	if (diag_dbgfs_glinkinfo_index >= NUM_PERIPHERALS) {
+		/* Done. Reset to prepare for future requests */
+		diag_dbgfs_socketinfo_index = 0;
+		return 0;
+	}
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	buf_size = ksize(buf);
+	bytes_remaining = buf_size;
+	for (i = 0; i < NUM_TYPES; i++) {
+		for (j = 0; j < NUM_PERIPHERALS; j++) {
+			switch (i) {
+			case TYPE_DATA:
+				info = &glink_data[j];
+				break;
+			case TYPE_CNTL:
+				info = &glink_cntl[j];
+				break;
+			case TYPE_DCI:
+				info = &glink_dci[j];
+				break;
+			case TYPE_CMD:
+				info = &glink_cmd[j];
+				break;
+			case TYPE_DCI_CMD:
+				info = &glink_dci_cmd[j];
+				break;
+			default:
+				return -EINVAL;
+			}
+
+			fwd_ctxt = (struct diagfwd_info *)(info->fwd_ctxt);
+
+			bytes_written = scnprintf(buf+bytes_in_buffer,
+				bytes_remaining,
+				"name\t\t:\t%s\n"
+				"hdl\t\t:\t%pK\n"
+				"inited\t\t:\t%d\n"
+				"opened\t\t:\t%d\n"
+				"diag_state\t:\t%d\n"
+				"buf_1 busy\t:\t%d\n"
+				"buf_2 busy\t:\t%d\n"
+				"tx_intent_ready\t:\t%d\n"
+				"open pending\t:\t%d\n"
+				"close pending\t:\t%d\n"
+				"read pending\t:\t%d\n"
+				"bytes read\t:\t%lu\n"
+				"bytes written\t:\t%lu\n"
+				"fwd inited\t:\t%d\n"
+				"fwd opened\t:\t%d\n"
+				"fwd ch_open\t:\t%d\n\n",
+				info->name,
+				info->hdl,
+				info->inited,
+				atomic_read(&info->opened),
+				atomic_read(&info->diag_state),
+				(fwd_ctxt && fwd_ctxt->buf_1) ?
+				atomic_read(&fwd_ctxt->buf_1->in_busy) : -1,
+				(fwd_ctxt && fwd_ctxt->buf_2) ?
+				atomic_read(&fwd_ctxt->buf_2->in_busy) : -1,
+				atomic_read(&info->tx_intent_ready),
+				work_pending(&info->open_work),
+				work_pending(&info->close_work),
+				work_pending(&info->read_work),
+				(fwd_ctxt) ? fwd_ctxt->read_bytes : 0,
+				(fwd_ctxt) ? fwd_ctxt->write_bytes : 0,
+				(fwd_ctxt) ? fwd_ctxt->inited : -1,
+				(fwd_ctxt) ?
+				atomic_read(&fwd_ctxt->opened) : -1,
+				(fwd_ctxt) ? fwd_ctxt->ch_open : -1);
+			bytes_in_buffer += bytes_written;
+
+			/* Check if there is room to add another table entry */
+			bytes_remaining = buf_size - bytes_in_buffer;
+
+			if (bytes_remaining < bytes_written)
+				break;
+		}
+	}
+	diag_dbgfs_glinkinfo_index = i+1;
+	*ppos = 0;
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t diag_dbgfs_write_debug(struct file *fp, const char __user *buf,
+				      size_t count, loff_t *ppos)
+{
+	const int size = 10;
+	unsigned char cmd[size];
+	long value = 0;
+	int len = 0;
+
+	if (count < 1)
+		return -EINVAL;
+
+	len = (count < (size - 1)) ? count : size - 1;
+	if (copy_from_user(cmd, buf, len))
+		return -EFAULT;
+
+	cmd[len] = 0;
+	if (cmd[len-1] == '\n') {
+		cmd[len-1] = 0;
+		len--;
+	}
+
+	if (kstrtol(cmd, 10, &value))
+		return -EINVAL;
+
+	if (value < 0)
+		return -EINVAL;
+
+	diag_debug_mask = (uint16_t)value;
+	return count;
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+#ifdef CONFIG_USB_QCOM_DIAG_BRIDGE
+static ssize_t diag_dbgfs_read_hsicinfo(struct file *file, char __user *ubuf,
+					size_t count, loff_t *ppos)
+{
+	char *buf = NULL;
+	int ret = 0;
+	int i = 0;
+	unsigned int buf_size;
+	unsigned int bytes_remaining = 0;
+	unsigned int bytes_written = 0;
+	unsigned int bytes_in_buffer = 0;
+	struct diag_hsic_info *hsic_info = NULL;
+
+	if (diag_dbgfs_hsicinfo_index >= NUM_DIAG_USB_DEV) {
+		/* Done. Reset to prepare for future requests */
+		diag_dbgfs_hsicinfo_index = 0;
+		return 0;
+	}
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(buf)) {
+		pr_err("diag: %s, Error allocating memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	buf_size = ksize(buf);
+	bytes_remaining = buf_size;
+	for (i = diag_dbgfs_hsicinfo_index; i < NUM_HSIC_DEV; i++) {
+		hsic_info = &diag_hsic[i];
+		if (!hsic_info->enabled)
+			continue;
+		bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+			"id: %d\n"
+			"name: %s\n"
+			"bridge index: %s\n"
+			"opened: %d\n"
+			"enabled: %d\n"
+			"suspended: %d\n"
+			"mempool: %s\n"
+			"read work pending: %d\n"
+			"open work pending: %d\n"
+			"close work pending: %d\n\n",
+			hsic_info->id,
+			hsic_info->name,
+			DIAG_BRIDGE_GET_NAME(hsic_info->dev_id),
+			hsic_info->opened,
+			hsic_info->enabled,
+			hsic_info->suspended,
+			DIAG_MEMPOOL_GET_NAME(hsic_info->mempool),
+			work_pending(&hsic_info->read_work),
+			work_pending(&hsic_info->open_work),
+			work_pending(&hsic_info->close_work));
+		bytes_in_buffer += bytes_written;
+
+		/* Check if there is room to add another table entry */
+		bytes_remaining = buf_size - bytes_in_buffer;
+
+		if (bytes_remaining < bytes_written)
+			break;
+	}
+	diag_dbgfs_hsicinfo_index = i+1;
+	*ppos = 0;
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+	kfree(buf);
+	return ret;
+}
+
+const struct file_operations diag_dbgfs_hsicinfo_ops = {
+	.read = diag_dbgfs_read_hsicinfo,
+};
+#endif
+#ifdef CONFIG_MSM_MHI
+static ssize_t diag_dbgfs_read_mhiinfo(struct file *file, char __user *ubuf,
+				       size_t count, loff_t *ppos)
+{
+	char *buf = NULL;
+	int ret = 0;
+	int i = 0;
+	unsigned int buf_size;
+	unsigned int bytes_remaining = 0;
+	unsigned int bytes_written = 0;
+	unsigned int bytes_in_buffer = 0;
+	struct diag_mhi_info *mhi_info = NULL;
+
+	if (diag_dbgfs_mhiinfo_index >= NUM_MHI_DEV) {
+		/* Done. Reset to prepare for future requests */
+		diag_dbgfs_mhiinfo_index = 0;
+		return 0;
+	}
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(buf)) {
+		pr_err("diag: %s, Error allocating memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	buf_size = ksize(buf);
+	bytes_remaining = buf_size;
+	for (i = diag_dbgfs_mhiinfo_index; i < NUM_MHI_DEV; i++) {
+		mhi_info = &diag_mhi[i];
+		bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+			"id: %d\n"
+			"name: %s\n"
+			"bridge index: %s\n"
+			"mempool: %s\n"
+			"read ch opened: %d\n"
+			"read ch hdl: %pK\n"
+			"write ch opened: %d\n"
+			"write ch hdl: %pK\n"
+			"read work pending: %d\n"
+			"read done work pending: %d\n"
+			"open work pending: %d\n"
+			"close work pending: %d\n\n",
+			mhi_info->id,
+			mhi_info->name,
+			DIAG_BRIDGE_GET_NAME(mhi_info->dev_id),
+			DIAG_MEMPOOL_GET_NAME(mhi_info->mempool),
+			atomic_read(&mhi_info->read_ch.opened),
+			mhi_info->read_ch.hdl,
+			atomic_read(&mhi_info->write_ch.opened),
+			mhi_info->write_ch.hdl,
+			work_pending(&mhi_info->read_work),
+			work_pending(&mhi_info->read_done_work),
+			work_pending(&mhi_info->open_work),
+			work_pending(&mhi_info->close_work));
+		bytes_in_buffer += bytes_written;
+
+		/* Check if there is room to add another table entry */
+		bytes_remaining = buf_size - bytes_in_buffer;
+
+		if (bytes_remaining < bytes_written)
+			break;
+	}
+	diag_dbgfs_mhiinfo_index = i+1;
+	*ppos = 0;
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+	kfree(buf);
+	return ret;
+}
+
+
+const struct file_operations diag_dbgfs_mhiinfo_ops = {
+	.read = diag_dbgfs_read_mhiinfo,
+};
+
+#endif
+static ssize_t diag_dbgfs_read_bridge(struct file *file, char __user *ubuf,
+				      size_t count, loff_t *ppos)
+{
+	char *buf = NULL;
+	int ret = 0;
+	int i = 0;
+	unsigned int buf_size;
+	unsigned int bytes_remaining = 0;
+	unsigned int bytes_written = 0;
+	unsigned int bytes_in_buffer = 0;
+	struct diagfwd_bridge_info *info = NULL;
+
+	if (diag_dbgfs_bridgeinfo_index >= NUM_DIAG_USB_DEV) {
+		/* Done. Reset to prepare for future requests */
+		diag_dbgfs_bridgeinfo_index = 0;
+		return 0;
+	}
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(buf)) {
+		pr_err("diag: %s, Error allocating memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	buf_size = ksize(buf);
+	bytes_remaining = buf_size;
+	for (i = diag_dbgfs_bridgeinfo_index; i < NUM_REMOTE_DEV; i++) {
+		info = &bridge_info[i];
+		if (!info->inited)
+			continue;
+		bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+			"id: %d\n"
+			"name: %s\n"
+			"type: %d\n"
+			"inited: %d\n"
+			"ctxt: %d\n"
+			"dev_ops: %pK\n"
+			"dci_read_buf: %pK\n"
+			"dci_read_ptr: %pK\n"
+			"dci_read_len: %d\n\n",
+			info->id,
+			info->name,
+			info->type,
+			info->inited,
+			info->ctxt,
+			info->dev_ops,
+			info->dci_read_buf,
+			info->dci_read_ptr,
+			info->dci_read_len);
+		bytes_in_buffer += bytes_written;
+
+		/* Check if there is room to add another table entry */
+		bytes_remaining = buf_size - bytes_in_buffer;
+
+		if (bytes_remaining < bytes_written)
+			break;
+	}
+	diag_dbgfs_bridgeinfo_index = i+1;
+	*ppos = 0;
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+	kfree(buf);
+	return ret;
+}
+
+const struct file_operations diag_dbgfs_bridge_ops = {
+	.read = diag_dbgfs_read_bridge,
+};
+
+#endif
+
+const struct file_operations diag_dbgfs_status_ops = {
+	.read = diag_dbgfs_read_status,
+};
+
+const struct file_operations diag_dbgfs_smdinfo_ops = {
+	.read = diag_dbgfs_read_smdinfo,
+};
+
+const struct file_operations diag_dbgfs_socketinfo_ops = {
+	.read = diag_dbgfs_read_socketinfo,
+};
+
+const struct file_operations diag_dbgfs_glinkinfo_ops = {
+	.read = diag_dbgfs_read_glinkinfo,
+};
+
+const struct file_operations diag_dbgfs_table_ops = {
+	.read = diag_dbgfs_read_table,
+};
+
+const struct file_operations diag_dbgfs_mempool_ops = {
+	.read = diag_dbgfs_read_mempool,
+};
+
+const struct file_operations diag_dbgfs_dcistats_ops = {
+	.read = diag_dbgfs_read_dcistats,
+};
+
+const struct file_operations diag_dbgfs_power_ops = {
+	.read = diag_dbgfs_read_power,
+};
+
+const struct file_operations diag_dbgfs_debug_ops = {
+	.write = diag_dbgfs_write_debug
+};
+
+int diag_debugfs_init(void)
+{
+	struct dentry *entry = NULL;
+
+	diag_dbgfs_dent = debugfs_create_dir("diag", 0);
+	if (IS_ERR(diag_dbgfs_dent))
+		return -ENOMEM;
+
+	entry = debugfs_create_file("status", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_status_ops);
+	if (!entry)
+		goto err;
+
+	entry = debugfs_create_file("smdinfo", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_smdinfo_ops);
+	if (!entry)
+		goto err;
+
+	entry = debugfs_create_file("socketinfo", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_socketinfo_ops);
+	if (!entry)
+		goto err;
+
+	entry = debugfs_create_file("glinkinfo", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_glinkinfo_ops);
+	if (!entry)
+		goto err;
+
+	entry = debugfs_create_file("table", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_table_ops);
+	if (!entry)
+		goto err;
+
+	entry = debugfs_create_file("mempool", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_mempool_ops);
+	if (!entry)
+		goto err;
+
+#ifdef CONFIG_DIAG_OVER_USB
+	entry = debugfs_create_file("usbinfo", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_usbinfo_ops);
+	if (!entry)
+		goto err;
+#endif
+
+	entry = debugfs_create_file("dci_stats", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_dcistats_ops);
+	if (!entry)
+		goto err;
+
+	entry = debugfs_create_file("power", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_power_ops);
+	if (!entry)
+		goto err;
+
+	entry = debugfs_create_file("debug", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_debug_ops);
+	if (!entry)
+		goto err;
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+	entry = debugfs_create_file("bridge", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_bridge_ops);
+	if (!entry)
+		goto err;
+#ifdef CONFIG_USB_QCOM_DIAG_BRIDGE
+	entry = debugfs_create_file("hsicinfo", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_hsicinfo_ops);
+	if (!entry)
+		goto err;
+#endif
+#ifdef CONFIG_MSM_MHI
+	entry = debugfs_create_file("mhiinfo", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_mhiinfo_ops);
+	if (!entry)
+		goto err;
+#endif
+#endif
+	diag_dbgfs_table_index = 0;
+	diag_dbgfs_mempool_index = 0;
+	diag_dbgfs_usbinfo_index = 0;
+	diag_dbgfs_smdinfo_index = 0;
+	diag_dbgfs_socketinfo_index = 0;
+	diag_dbgfs_hsicinfo_index = 0;
+	diag_dbgfs_bridgeinfo_index = 0;
+	diag_dbgfs_mhiinfo_index = 0;
+	diag_dbgfs_finished = 0;
+	diag_dbgfs_dci_data_index = 0;
+	diag_dbgfs_dci_finished = 0;
+
+	/* DCI related structures */
+	dci_traffic = kzalloc(sizeof(struct diag_dci_data_info) *
+				DIAG_DCI_DEBUG_CNT, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(dci_traffic))
+		pr_warn("diag: could not allocate memory for dci debug info\n");
+
+	mutex_init(&dci_stat_mutex);
+	mutex_init(&diag_dci_dbgfs_mutex);
+	return 0;
+err:
+	kfree(dci_traffic);
+	debugfs_remove_recursive(diag_dbgfs_dent);
+	return -ENOMEM;
+}
+
+void diag_debugfs_cleanup(void)
+{
+	if (diag_dbgfs_dent) {
+		debugfs_remove_recursive(diag_dbgfs_dent);
+		diag_dbgfs_dent = NULL;
+	}
+
+	kfree(dci_traffic);
+	mutex_destroy(&dci_stat_mutex);
+	mutex_destroy(&diag_dci_dbgfs_mutex);
+}
+#else
+int diag_debugfs_init(void) { return 0; }
+void diag_debugfs_cleanup(void) { }
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diag_debugfs.h linux-4.4.115-fbx/drivers/char/diag/diag_debugfs.h
--- linux-4.4.115-fbx/drivers/char/diag./diag_debugfs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diag_debugfs.h	2019-01-22 16:16:22.955241444 +0100
@@ -0,0 +1,19 @@
+/* Copyright (c) 2012, 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAG_DEBUGFS_H
+#define DIAG_DEBUGFS_H
+
+int diag_debugfs_init(void);
+void diag_debugfs_cleanup(void);
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diagfwd_bridge.h linux-4.4.115-fbx/drivers/char/diag/diagfwd_bridge.h
--- linux-4.4.115-fbx/drivers/char/diag./diagfwd_bridge.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diagfwd_bridge.h	2019-10-29 09:26:23.453201319 +0100
@@ -0,0 +1,67 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_BRIDGE_H
+#define DIAGFWD_BRIDGE_H
+
+/*
+ * Add Data channels at the top half and the DCI channels at the
+ * bottom half of this list.
+ */
+#define DIAGFWD_MDM		0
+#define DIAGFWD_SMUX		1
+#define NUM_REMOTE_DATA_DEV	2
+#define DIAGFWD_MDM_DCI		NUM_REMOTE_DATA_DEV
+#define NUM_REMOTE_DCI_DEV	(DIAGFWD_MDM_DCI - NUM_REMOTE_DATA_DEV + 1)
+#define NUM_REMOTE_DEV		(NUM_REMOTE_DATA_DEV + NUM_REMOTE_DCI_DEV)
+
+#define DIAG_BRIDGE_NAME_SZ	24
+#define DIAG_BRIDGE_GET_NAME(x)	(bridge_info[x].name)
+
+struct diag_remote_dev_ops {
+	int (*open)(int id);
+	int (*close)(int id);
+	int (*queue_read)(int id);
+	int (*write)(int id, unsigned char *buf, int len, int ctxt);
+	int (*fwd_complete)(int id, unsigned char *buf, int len, int ctxt);
+};
+
+struct diagfwd_bridge_info {
+	int id;
+	int type;
+	int inited;
+	int ctxt;
+	char name[DIAG_BRIDGE_NAME_SZ];
+	struct diag_remote_dev_ops *dev_ops;
+	/* DCI related variables. These would be NULL for data channels */
+	void *dci_read_ptr;
+	unsigned char *dci_read_buf;
+	int dci_read_len;
+	struct workqueue_struct *dci_wq;
+	struct work_struct dci_read_work;
+};
+
+extern struct diagfwd_bridge_info bridge_info[NUM_REMOTE_DEV];
+int diagfwd_bridge_init(void);
+void diagfwd_bridge_exit(void);
+int diagfwd_bridge_close(int id);
+int diagfwd_bridge_write(int id, unsigned char *buf, int len);
+uint16_t diag_get_remote_device_mask(void);
+
+/* The following functions must be called by Diag remote devices only. */
+int diagfwd_bridge_register(int id, int ctxt, struct diag_remote_dev_ops *ops);
+int diag_remote_dev_open(int id);
+void diag_remote_dev_close(int id);
+int diag_remote_dev_read_done(int id, unsigned char *buf, int len);
+int diag_remote_dev_write_done(int id, unsigned char *buf, int len, int ctxt);
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diagfwd.c linux-4.4.115-fbx/drivers/char/diag/diagfwd.c
--- linux-4.4.115-fbx/drivers/char/diag./diagfwd.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diagfwd.c	2019-10-29 09:26:23.453201319 +0100
@@ -0,0 +1,1816 @@
+/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/diagchar.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+#ifdef CONFIG_DIAG_OVER_USB
+#include <linux/usb/usbdiag.h>
+#endif
+#include <soc/qcom/socinfo.h>
+#include <soc/qcom/restart.h>
+#include "diagmem.h"
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_cntl.h"
+#include "diagchar_hdlc.h"
+#include "diag_dci.h"
+#include "diag_masks.h"
+#include "diag_usb.h"
+#include "diag_mux.h"
+#include "diag_ipc_logging.h"
+
+#define STM_CMD_VERSION_OFFSET	4
+#define STM_CMD_MASK_OFFSET	5
+#define STM_CMD_DATA_OFFSET	6
+#define STM_CMD_NUM_BYTES	7
+
+#define STM_RSP_SUPPORTED_INDEX		7
+#define STM_RSP_STATUS_INDEX		8
+#define STM_RSP_NUM_BYTES		9
+#define RETRY_MAX_COUNT		1000
+
+static int timestamp_switch;
+module_param(timestamp_switch, int, 0644);
+
+int wrap_enabled;
+uint16_t wrap_count;
+static struct diag_hdlc_decode_type *hdlc_decode;
+
+#define DIAG_NUM_COMMON_CMD	1
+static uint8_t common_cmds[DIAG_NUM_COMMON_CMD] = {
+	DIAG_CMD_LOG_ON_DMND
+};
+
+static uint8_t hdlc_timer_in_progress;
+
+/* Determine if this device uses a device tree */
+#ifdef CONFIG_OF
+static int has_device_tree(void)
+{
+	struct device_node *node;
+
+	node = of_find_node_by_path("/");
+	if (node) {
+		of_node_put(node);
+		return 1;
+	}
+	return 0;
+}
+#else
+static int has_device_tree(void)
+{
+	return 0;
+}
+#endif
+
+int chk_config_get_id(void)
+{
+	switch (socinfo_get_msm_cpu()) {
+	case MSM_CPU_8X60:
+		return APQ8060_TOOLS_ID;
+	case MSM_CPU_8960:
+	case MSM_CPU_8960AB:
+		return AO8960_TOOLS_ID;
+	case MSM_CPU_8064:
+	case MSM_CPU_8064AB:
+	case MSM_CPU_8064AA:
+		return APQ8064_TOOLS_ID;
+	case MSM_CPU_8930:
+	case MSM_CPU_8930AA:
+	case MSM_CPU_8930AB:
+		return MSM8930_TOOLS_ID;
+	case MSM_CPU_8974:
+		return MSM8974_TOOLS_ID;
+	case MSM_CPU_8625:
+		return MSM8625_TOOLS_ID;
+	case MSM_CPU_8084:
+		return APQ8084_TOOLS_ID;
+	case MSM_CPU_8916:
+		return MSM8916_TOOLS_ID;
+	case MSM_CPU_8939:
+		return MSM8939_TOOLS_ID;
+	case MSM_CPU_8994:
+		return MSM8994_TOOLS_ID;
+	case MSM_CPU_8226:
+		return APQ8026_TOOLS_ID;
+	case MSM_CPU_8909:
+		return MSM8909_TOOLS_ID;
+	case MSM_CPU_8992:
+		return MSM8992_TOOLS_ID;
+	case MSM_CPU_8996:
+		return MSM_8996_TOOLS_ID;
+	default:
+		if (driver->use_device_tree) {
+			if (machine_is_msm8974())
+				return MSM8974_TOOLS_ID;
+			else if (machine_is_apq8074())
+				return APQ8074_TOOLS_ID;
+			else
+				return 0;
+		} else {
+			return 0;
+		}
+	}
+}
+
+/*
+ * This will return TRUE for targets which support apps only mode and hence SSR.
+ * This applies to 8960 and newer targets.
+ */
+int chk_apps_only(void)
+{
+	if (driver->use_device_tree)
+		return 1;
+
+	switch (socinfo_get_msm_cpu()) {
+	case MSM_CPU_8960:
+	case MSM_CPU_8960AB:
+	case MSM_CPU_8064:
+	case MSM_CPU_8064AB:
+	case MSM_CPU_8064AA:
+	case MSM_CPU_8930:
+	case MSM_CPU_8930AA:
+	case MSM_CPU_8930AB:
+	case MSM_CPU_8627:
+	case MSM_CPU_9615:
+	case MSM_CPU_8974:
+		return 1;
+	default:
+		return 0;
+	}
+}
+
+/*
+ * This will return TRUE for targets which support apps as master.
+ * Thus, SW DLOAD and Mode Reset are supported on apps processor.
+ * This applies to 8960 and newer targets.
+ */
+int chk_apps_master(void)
+{
+	if (driver->use_device_tree)
+		return 1;
+	else
+		return 0;
+}
+
+int chk_polling_response(void)
+{
+	if (!(driver->polling_reg_flag) && chk_apps_master())
+		/*
+		 * If the apps processor is master and no other processor
+		 * has registered to respond for polling
+		 */
+		return 1;
+	else if (!(driver->diagfwd_cntl[PERIPHERAL_MODEM] &&
+		   driver->diagfwd_cntl[PERIPHERAL_MODEM]->ch_open) &&
+		 (driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask))
+		/*
+		 * If the apps processor is not the master and the modem
+		 * is not up or we did not receive the feature masks from Modem
+		 */
+		return 1;
+	else
+		return 0;
+}
+
+/*
+ * This function should be called if you feel that the logging process may
+ * need to be woken up. For instance, if the logging mode is MEMORY_DEVICE MODE
+ * and while trying to read data from data channel there are no buffers
+ * available to read the data into, then this function should be called to
+ * determine if the logging process needs to be woken up.
+ */
+void chk_logging_wakeup(void)
+{
+	int i;
+	int j;
+	int pid = 0;
+
+	for (j = 0; j < NUM_MD_SESSIONS; j++) {
+		if (!driver->md_session_map[j])
+			continue;
+		pid = driver->md_session_map[j]->pid;
+
+		/* Find the index of the logging process */
+		for (i = 0; i < driver->num_clients; i++) {
+			if (driver->client_map[i].pid != pid)
+				continue;
+			if (driver->data_ready[i] & USER_SPACE_DATA_TYPE)
+				continue;
+			/*
+			 * At very high logging rates a race condition can
+			 * occur where the buffers containing the data from
+			 * a channel are all in use, but the data_ready flag
+			 * is cleared. In this case, the buffers never have
+			 * their data read/logged. Detect and remedy this
+			 * situation.
+			 */
+			driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
+			atomic_inc(&driver->data_ready_notif[i]);
+			pr_debug("diag: Force wakeup of logging process\n");
+			wake_up_interruptible(&driver->wait_q);
+			break;
+		}
+		/*
+		 * Diag Memory Device is in normal. Check only for the first
+		 * index as all the indices point to the same session
+		 * structure.
+		 */
+		if ((driver->md_session_mask == DIAG_CON_ALL) && (j == 0))
+			break;
+	}
+}
+
+static void pack_rsp_and_send(unsigned char *buf, int len,
+				int pid)
+{
+	int err;
+	int retry_count = 0, i, rsp_ctxt;
+	uint32_t write_len = 0;
+	unsigned long flags;
+	unsigned char *rsp_ptr = driver->encoded_rsp_buf;
+	struct diag_pkt_frame_t header;
+	struct diag_md_session_t *session_info = NULL, *info = NULL;
+
+	if (!rsp_ptr || !buf)
+		return;
+
+	if (len > DIAG_MAX_RSP_SIZE || len < 0) {
+		pr_err("diag: In %s, invalid len %d, permissible len %d\n",
+		       __func__, len, DIAG_MAX_RSP_SIZE);
+		return;
+	}
+
+	mutex_lock(&driver->md_session_lock);
+	session_info = diag_md_session_get_pid(pid);
+	info = (session_info) ? session_info :
+				diag_md_session_get_peripheral(APPS_DATA);
+
+	if (info && info->peripheral_mask) {
+		for (i = 0; i < NUM_MD_SESSIONS; i++) {
+			if (info->peripheral_mask & (1 << i))
+				break;
+		}
+		rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, TYPE_CMD);
+	} else
+		rsp_ctxt = driver->rsp_buf_ctxt;
+	mutex_unlock(&driver->md_session_lock);
+
+	/*
+	 * Keep trying till we get the buffer back. It should probably
+	 * take one or two iterations. When this loops till RETRY_MAX_COUNT, it
+	 * means we did not get a write complete for the previous
+	 * response.
+	 */
+	while (retry_count < RETRY_MAX_COUNT) {
+		if (!driver->rsp_buf_busy)
+			break;
+		/*
+		 * Wait for sometime and try again. The value 10000 was chosen
+		 * empirically as an optimum value for USB to complete a write
+		 */
+		usleep_range(10000, 10100);
+		retry_count++;
+
+		/*
+		 * There can be a race conditon that clears the data ready flag
+		 * for responses. Make sure we don't miss previous wakeups for
+		 * draining responses when we are in Memory Device Mode.
+		 */
+		if (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE ||
+				driver->logging_mode == DIAG_MULTI_MODE) {
+			mutex_lock(&driver->md_session_lock);
+			chk_logging_wakeup();
+			mutex_unlock(&driver->md_session_lock);
+		}
+	}
+	if (driver->rsp_buf_busy) {
+		pr_err("diag: unable to get hold of response buffer\n");
+		return;
+	}
+
+	driver->rsp_buf_busy = 1;
+	header.start = CONTROL_CHAR;
+	header.version = 1;
+	header.length = len;
+	memcpy(rsp_ptr, &header, sizeof(header));
+	write_len += sizeof(header);
+	memcpy(rsp_ptr + write_len, buf, len);
+	write_len += len;
+	*(uint8_t *)(rsp_ptr + write_len) = CONTROL_CHAR;
+	write_len += sizeof(uint8_t);
+
+	err = diag_mux_write(DIAG_LOCAL_PROC, rsp_ptr, write_len, rsp_ctxt);
+	if (err) {
+		pr_err("diag: In %s, unable to write to mux, err: %d\n",
+		       __func__, err);
+		spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
+		driver->rsp_buf_busy = 0;
+		spin_unlock_irqrestore(&driver->rsp_buf_busy_lock, flags);
+	}
+}
+
+static void encode_rsp_and_send(unsigned char *buf, int len,
+				int pid)
+{
+	struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
+	struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
+	unsigned char *rsp_ptr = driver->encoded_rsp_buf;
+	int err, i, rsp_ctxt, retry_count = 0;
+	unsigned long flags;
+	struct diag_md_session_t *session_info = NULL, *info = NULL;
+
+	if (!rsp_ptr || !buf)
+		return;
+
+	if (len > DIAG_MAX_RSP_SIZE || len < 0) {
+		pr_err("diag: In %s, invalid len %d, permissible len %d\n",
+		       __func__, len, DIAG_MAX_RSP_SIZE);
+		return;
+	}
+
+	mutex_lock(&driver->md_session_lock);
+	session_info = diag_md_session_get_pid(pid);
+	info = (session_info) ? session_info :
+				diag_md_session_get_peripheral(APPS_DATA);
+
+	if (info && info->peripheral_mask) {
+		for (i = 0; i < NUM_MD_SESSIONS; i++) {
+			if (info->peripheral_mask & (1 << i))
+				break;
+		}
+		rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, TYPE_CMD);
+	} else
+		rsp_ctxt = driver->rsp_buf_ctxt;
+	mutex_unlock(&driver->md_session_lock);
+	/*
+	 * Keep trying till we get the buffer back. It should probably
+	 * take one or two iterations. When this loops till RETRY_MAX_COUNT, it
+	 * means we did not get a write complete for the previous
+	 * response.
+	 */
+	while (retry_count < RETRY_MAX_COUNT) {
+		if (!driver->rsp_buf_busy)
+			break;
+		/*
+		 * Wait for sometime and try again. The value 10000 was chosen
+		 * empirically as an optimum value for USB to complete a write
+		 */
+		usleep_range(10000, 10100);
+		retry_count++;
+
+		/*
+		 * There can be a race conditon that clears the data ready flag
+		 * for responses. Make sure we don't miss previous wakeups for
+		 * draining responses when we are in Memory Device Mode.
+		 */
+		if (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE ||
+				driver->logging_mode == DIAG_MULTI_MODE) {
+			mutex_lock(&driver->md_session_lock);
+			chk_logging_wakeup();
+			mutex_unlock(&driver->md_session_lock);
+		}
+	}
+
+	if (driver->rsp_buf_busy) {
+		pr_err("diag: unable to get hold of response buffer\n");
+		return;
+	}
+
+	spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
+	driver->rsp_buf_busy = 1;
+	spin_unlock_irqrestore(&driver->rsp_buf_busy_lock, flags);
+	send.state = DIAG_STATE_START;
+	send.pkt = buf;
+	send.last = (void *)(buf + len - 1);
+	send.terminate = 1;
+	enc.dest = rsp_ptr;
+	enc.dest_last = (void *)(rsp_ptr + DIAG_MAX_HDLC_BUF_SIZE - 1);
+	diag_hdlc_encode(&send, &enc);
+	driver->encoded_rsp_len = (int)(enc.dest - (void *)rsp_ptr);
+	err = diag_mux_write(DIAG_LOCAL_PROC, rsp_ptr, driver->encoded_rsp_len,
+			     rsp_ctxt);
+	if (err) {
+		pr_err("diag: In %s, Unable to write to device, err: %d\n",
+			__func__, err);
+		spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
+		driver->rsp_buf_busy = 0;
+		spin_unlock_irqrestore(&driver->rsp_buf_busy_lock, flags);
+	}
+	memset(buf, '\0', DIAG_MAX_RSP_SIZE);
+}
+
+static void diag_send_rsp(unsigned char *buf, int len, int pid)
+{
+	struct diag_md_session_t *session_info = NULL, *info = NULL;
+	uint8_t hdlc_disabled;
+	mutex_lock(&driver->md_session_lock);
+	info = diag_md_session_get_pid(pid);
+	session_info = (info) ? info :
+				diag_md_session_get_peripheral(APPS_DATA);
+	if (session_info)
+		hdlc_disabled = session_info->hdlc_disabled;
+	else
+		hdlc_disabled = driver->hdlc_disabled;
+	mutex_unlock(&driver->md_session_lock);
+	if (hdlc_disabled)
+		pack_rsp_and_send(buf, len, pid);
+	else
+		encode_rsp_and_send(buf, len, pid);
+}
+
+void diag_update_pkt_buffer(unsigned char *buf, uint32_t len, int type)
+{
+	unsigned char *ptr = NULL;
+	unsigned char *temp = buf;
+	int *in_busy = NULL;
+	uint32_t *length = NULL;
+	uint32_t max_len = 0;
+
+	if (!buf || len == 0) {
+		pr_err("diag: In %s, Invalid ptr %pK and length %d\n",
+		       __func__, buf, len);
+		return;
+	}
+
+	switch (type) {
+	case PKT_TYPE:
+		ptr = driver->apps_req_buf;
+		length = &driver->apps_req_buf_len;
+		max_len = DIAG_MAX_REQ_SIZE;
+		in_busy = &driver->in_busy_pktdata;
+		break;
+	case DCI_PKT_TYPE:
+		ptr = driver->dci_pkt_buf;
+		length = &driver->dci_pkt_length;
+		max_len = DCI_BUF_SIZE;
+		in_busy = &driver->in_busy_dcipktdata;
+		break;
+	default:
+		pr_err("diag: Invalid type %d in %s\n", type, __func__);
+		return;
+	}
+
+	mutex_lock(&driver->diagchar_mutex);
+	if (CHK_OVERFLOW(ptr, ptr, ptr + max_len, len)) {
+		memcpy(ptr, temp , len);
+		*length = len;
+		*in_busy = 1;
+	} else {
+		pr_alert("diag: In %s, no space for response packet, len: %d, type: %d\n",
+			 __func__, len, type);
+	}
+	mutex_unlock(&driver->diagchar_mutex);
+}
+
+void diag_update_userspace_clients(unsigned int type)
+{
+	int i;
+
+	mutex_lock(&driver->diagchar_mutex);
+	for (i = 0; i < driver->num_clients; i++)
+		if (driver->client_map[i].pid != 0 &&
+			!(driver->data_ready[i] & type)) {
+			driver->data_ready[i] |= type;
+			atomic_inc(&driver->data_ready_notif[i]);
+		}
+	wake_up_interruptible(&driver->wait_q);
+	mutex_unlock(&driver->diagchar_mutex);
+}
+
+void diag_update_md_clients(unsigned int type)
+{
+	int i, j;
+
+	mutex_lock(&driver->diagchar_mutex);
+	mutex_lock(&driver->md_session_lock);
+	for (i = 0; i < NUM_MD_SESSIONS; i++) {
+		if (driver->md_session_map[i] != NULL)
+			for (j = 0; j < driver->num_clients; j++) {
+				if (driver->client_map[j].pid != 0 &&
+					driver->client_map[j].pid ==
+					driver->md_session_map[i]->pid) {
+					if (!(driver->data_ready[j] & type)) {
+						driver->data_ready[j] |= type;
+						atomic_inc(
+						&driver->data_ready_notif[j]);
+					}
+					break;
+				}
+			}
+	}
+	mutex_unlock(&driver->md_session_lock);
+	wake_up_interruptible(&driver->wait_q);
+	mutex_unlock(&driver->diagchar_mutex);
+}
+void diag_update_sleeping_process(int process_id, int data_type)
+{
+	int i;
+
+	mutex_lock(&driver->diagchar_mutex);
+	for (i = 0; i < driver->num_clients; i++)
+		if (driver->client_map[i].pid == process_id) {
+			if (!(driver->data_ready[i] & data_type)) {
+				driver->data_ready[i] |= data_type;
+				atomic_inc(&driver->data_ready_notif[i]);
+			}
+			break;
+		}
+	wake_up_interruptible(&driver->wait_q);
+	mutex_unlock(&driver->diagchar_mutex);
+}
+
+static int diag_send_data(struct diag_cmd_reg_t *entry, unsigned char *buf,
+			  int len)
+{
+	if (!entry)
+		return -EIO;
+
+	if (entry->proc == APPS_DATA) {
+		diag_update_pkt_buffer(buf, len, PKT_TYPE);
+		diag_update_sleeping_process(entry->pid, PKT_TYPE);
+		return 0;
+	}
+
+	return diagfwd_write(entry->proc, TYPE_CMD, buf, len);
+}
+
+void diag_process_stm_mask(uint8_t cmd, uint8_t data_mask, int data_type)
+{
+	int status = 0;
+	if (data_type >= PERIPHERAL_MODEM && data_type <= PERIPHERAL_SENSORS) {
+		if (driver->feature[data_type].stm_support) {
+			status = diag_send_stm_state(data_type, cmd);
+			if (status == 0)
+				driver->stm_state[data_type] = cmd;
+		}
+		driver->stm_state_requested[data_type] = cmd;
+	} else if (data_type == APPS_DATA) {
+		driver->stm_state[data_type] = cmd;
+		driver->stm_state_requested[data_type] = cmd;
+	}
+}
+
+int diag_process_stm_cmd(unsigned char *buf, unsigned char *dest_buf)
+{
+	uint8_t version, mask, cmd;
+	uint8_t rsp_supported = 0;
+	uint8_t rsp_status = 0;
+	int i;
+
+	if (!buf || !dest_buf) {
+		pr_err("diag: Invalid pointers buf: %pK, dest_buf %pK in %s\n",
+		       buf, dest_buf, __func__);
+		return -EIO;
+	}
+
+	version = *(buf + STM_CMD_VERSION_OFFSET);
+	mask = *(buf + STM_CMD_MASK_OFFSET);
+	cmd = *(buf + STM_CMD_DATA_OFFSET);
+
+	/*
+	 * Check if command is valid. If the command is asking for
+	 * status, then the processor mask field is to be ignored.
+	 */
+	if ((version != 2) || (cmd > STATUS_STM) ||
+		((cmd != STATUS_STM) && ((mask == 0) || (0 != (mask >> 4))))) {
+		/* Command is invalid. Send bad param message response */
+		dest_buf[0] = BAD_PARAM_RESPONSE_MESSAGE;
+		for (i = 0; i < STM_CMD_NUM_BYTES; i++)
+			dest_buf[i+1] = *(buf + i);
+		return STM_CMD_NUM_BYTES+1;
+	} else if (cmd != STATUS_STM) {
+		if (mask & DIAG_STM_MODEM)
+			diag_process_stm_mask(cmd, DIAG_STM_MODEM,
+					      PERIPHERAL_MODEM);
+
+		if (mask & DIAG_STM_LPASS)
+			diag_process_stm_mask(cmd, DIAG_STM_LPASS,
+					      PERIPHERAL_LPASS);
+
+		if (mask & DIAG_STM_WCNSS)
+			diag_process_stm_mask(cmd, DIAG_STM_WCNSS,
+					      PERIPHERAL_WCNSS);
+
+		if (mask & DIAG_STM_SENSORS)
+			diag_process_stm_mask(cmd, DIAG_STM_SENSORS,
+						PERIPHERAL_SENSORS);
+		if (mask & DIAG_STM_WDSP)
+			diag_process_stm_mask(cmd, DIAG_STM_WDSP,
+						PERIPHERAL_WDSP);
+
+		if (mask & DIAG_STM_APPS)
+			diag_process_stm_mask(cmd, DIAG_STM_APPS, APPS_DATA);
+	}
+
+	for (i = 0; i < STM_CMD_NUM_BYTES; i++)
+		dest_buf[i] = *(buf + i);
+
+	/* Set mask denoting which peripherals support STM */
+	if (driver->feature[PERIPHERAL_MODEM].stm_support)
+		rsp_supported |= DIAG_STM_MODEM;
+
+	if (driver->feature[PERIPHERAL_LPASS].stm_support)
+		rsp_supported |= DIAG_STM_LPASS;
+
+	if (driver->feature[PERIPHERAL_WCNSS].stm_support)
+		rsp_supported |= DIAG_STM_WCNSS;
+
+	if (driver->feature[PERIPHERAL_SENSORS].stm_support)
+		rsp_supported |= DIAG_STM_SENSORS;
+
+	if (driver->feature[PERIPHERAL_WDSP].stm_support)
+		rsp_supported |= DIAG_STM_WDSP;
+
+	rsp_supported |= DIAG_STM_APPS;
+
+	/* Set mask denoting STM state/status for each peripheral/APSS */
+	if (driver->stm_state[PERIPHERAL_MODEM])
+		rsp_status |= DIAG_STM_MODEM;
+
+	if (driver->stm_state[PERIPHERAL_LPASS])
+		rsp_status |= DIAG_STM_LPASS;
+
+	if (driver->stm_state[PERIPHERAL_WCNSS])
+		rsp_status |= DIAG_STM_WCNSS;
+
+	if (driver->stm_state[PERIPHERAL_SENSORS])
+		rsp_status |= DIAG_STM_SENSORS;
+
+	if (driver->stm_state[PERIPHERAL_WDSP])
+		rsp_status |= DIAG_STM_WDSP;
+
+	if (driver->stm_state[APPS_DATA])
+		rsp_status |= DIAG_STM_APPS;
+
+	dest_buf[STM_RSP_SUPPORTED_INDEX] = rsp_supported;
+	dest_buf[STM_RSP_STATUS_INDEX] = rsp_status;
+
+	return STM_RSP_NUM_BYTES;
+}
+
+int diag_process_time_sync_query_cmd(unsigned char *src_buf, int src_len,
+				      unsigned char *dest_buf, int dest_len)
+{
+	int write_len = 0;
+	struct diag_cmd_time_sync_query_req_t *req = NULL;
+	struct diag_cmd_time_sync_query_rsp_t rsp;
+
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
+			__func__, src_buf, src_len, dest_buf, dest_len);
+		return -EINVAL;
+	}
+
+	req = (struct diag_cmd_time_sync_query_req_t *)src_buf;
+	rsp.header.cmd_code = req->header.cmd_code;
+	rsp.header.subsys_id = req->header.subsys_id;
+	rsp.header.subsys_cmd_code = req->header.subsys_cmd_code;
+	rsp.version = req->version;
+	rsp.time_api = driver->uses_time_api;
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+	write_len = sizeof(rsp);
+	return write_len;
+}
+
+int diag_process_time_sync_switch_cmd(unsigned char *src_buf, int src_len,
+				      unsigned char *dest_buf, int dest_len)
+{
+	uint8_t peripheral, status = 0;
+	struct diag_cmd_time_sync_switch_req_t *req = NULL;
+	struct diag_cmd_time_sync_switch_rsp_t rsp;
+	struct diag_ctrl_msg_time_sync time_sync_msg;
+	int msg_size = sizeof(struct diag_ctrl_msg_time_sync);
+	int err = 0, write_len = 0;
+
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
+			__func__, src_buf, src_len, dest_buf, dest_len);
+		return -EINVAL;
+	}
+
+	req = (struct diag_cmd_time_sync_switch_req_t *)src_buf;
+	rsp.header.cmd_code = req->header.cmd_code;
+	rsp.header.subsys_id = req->header.subsys_id;
+	rsp.header.subsys_cmd_code = req->header.subsys_cmd_code;
+	rsp.version = req->version;
+	rsp.time_api = req->time_api;
+	if ((req->version > 1) || (req->time_api > 1) ||
+					(req->persist_time > 0)) {
+		dest_buf[0] = BAD_PARAM_RESPONSE_MESSAGE;
+		rsp.time_api_status = 0;
+		rsp.persist_time_status = PERSIST_TIME_NOT_SUPPORTED;
+		memcpy(dest_buf + 1, &rsp, sizeof(rsp));
+		write_len = sizeof(rsp) + 1;
+		timestamp_switch = 0;
+		return write_len;
+	}
+
+	time_sync_msg.ctrl_pkt_id = DIAG_CTRL_MSG_TIME_SYNC_PKT;
+	time_sync_msg.ctrl_pkt_data_len = 5;
+	time_sync_msg.version = 1;
+	time_sync_msg.time_api = req->time_api;
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		err = diagfwd_write(peripheral, TYPE_CNTL, &time_sync_msg,
+					msg_size);
+		if (err && err != -ENODEV) {
+			pr_err("diag: In %s, unable to write to peripheral: %d, type: %d, len: %d, err: %d\n",
+				__func__, peripheral, TYPE_CNTL,
+				msg_size, err);
+			status |= (1 << peripheral);
+		}
+	}
+
+	driver->time_sync_enabled = 1;
+	driver->uses_time_api = req->time_api;
+
+	switch (req->time_api) {
+	case 0:
+		timestamp_switch = 0;
+		break;
+	case 1:
+		timestamp_switch = 1;
+		break;
+	default:
+		timestamp_switch = 0;
+		break;
+	}
+
+	rsp.time_api_status = status;
+	rsp.persist_time_status = PERSIST_TIME_NOT_SUPPORTED;
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+	write_len = sizeof(rsp);
+	return write_len;
+}
+
+int diag_cmd_log_on_demand(unsigned char *src_buf, int src_len,
+			   unsigned char *dest_buf, int dest_len)
+{
+	int write_len = 0;
+	struct diag_log_on_demand_rsp_t header;
+
+	if (!driver->diagfwd_cntl[PERIPHERAL_MODEM] ||
+	    !driver->diagfwd_cntl[PERIPHERAL_MODEM]->ch_open ||
+	    !driver->log_on_demand_support)
+		return 0;
+
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
+		       __func__, src_buf, src_len, dest_buf, dest_len);
+		return -EINVAL;
+	}
+
+	header.cmd_code = DIAG_CMD_LOG_ON_DMND;
+	header.log_code = *(uint16_t *)(src_buf + 1);
+	header.status = 1;
+	memcpy(dest_buf, &header, sizeof(struct diag_log_on_demand_rsp_t));
+	write_len += sizeof(struct diag_log_on_demand_rsp_t);
+
+	return write_len;
+}
+
+int diag_cmd_get_mobile_id(unsigned char *src_buf, int src_len,
+			   unsigned char *dest_buf, int dest_len)
+{
+	int write_len = 0;
+	struct diag_pkt_header_t *header = NULL;
+	struct diag_cmd_ext_mobile_rsp_t rsp;
+
+	if (!src_buf || src_len != sizeof(*header) || !dest_buf ||
+	    dest_len < sizeof(rsp))
+		return -EIO;
+
+	header = (struct diag_pkt_header_t *)src_buf;
+	rsp.header.cmd_code = header->cmd_code;
+	rsp.header.subsys_id = header->subsys_id;
+	rsp.header.subsys_cmd_code = header->subsys_cmd_code;
+	rsp.version = 2;
+	rsp.padding[0] = 0;
+	rsp.padding[1] = 0;
+	rsp.padding[2] = 0;
+	rsp.family = 0;
+	rsp.chip_id = (uint32_t)socinfo_get_id();
+
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+	write_len += sizeof(rsp);
+
+	return write_len;
+}
+
+int diag_check_common_cmd(struct diag_pkt_header_t *header)
+{
+	int i;
+
+	if (!header)
+		return -EIO;
+
+	for (i = 0; i < DIAG_NUM_COMMON_CMD; i++) {
+		if (header->cmd_code == common_cmds[i])
+			return 1;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_DIAG_OVER_USB
+static int diag_cmd_chk_stats(unsigned char *src_buf, int src_len,
+			      unsigned char *dest_buf, int dest_len)
+{
+	int payload = 0;
+	int write_len = 0;
+	struct diag_pkt_header_t *header = NULL;
+	struct diag_cmd_stats_rsp_t rsp;
+
+	if (!src_buf || src_len < sizeof(struct diag_pkt_header_t) ||
+	    !dest_buf || dest_len < sizeof(rsp))
+		return -EINVAL;
+
+	header = (struct diag_pkt_header_t *)src_buf;
+
+	if (header->cmd_code != DIAG_CMD_DIAG_SUBSYS ||
+	    header->subsys_id != DIAG_SS_DIAG)
+		return -EINVAL;
+
+	switch (header->subsys_cmd_code) {
+	case DIAG_CMD_OP_GET_MSG_ALLOC:
+		payload = driver->msg_stats.alloc_count;
+		break;
+	case DIAG_CMD_OP_GET_MSG_DROP:
+		payload = driver->msg_stats.drop_count;
+		break;
+	case DIAG_CMD_OP_RESET_MSG_STATS:
+		diag_record_stats(DATA_TYPE_F3, PKT_RESET);
+		break;
+	case DIAG_CMD_OP_GET_LOG_ALLOC:
+		payload = driver->log_stats.alloc_count;
+		break;
+	case DIAG_CMD_OP_GET_LOG_DROP:
+		payload = driver->log_stats.drop_count;
+		break;
+	case DIAG_CMD_OP_RESET_LOG_STATS:
+		diag_record_stats(DATA_TYPE_LOG, PKT_RESET);
+		break;
+	case DIAG_CMD_OP_GET_EVENT_ALLOC:
+		payload = driver->event_stats.alloc_count;
+		break;
+	case DIAG_CMD_OP_GET_EVENT_DROP:
+		payload = driver->event_stats.drop_count;
+		break;
+	case DIAG_CMD_OP_RESET_EVENT_STATS:
+		diag_record_stats(DATA_TYPE_EVENT, PKT_RESET);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	memcpy(&rsp.header, header, sizeof(struct diag_pkt_header_t));
+	rsp.payload = payload;
+	write_len = sizeof(rsp);
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+
+	return write_len;
+}
+
+static int diag_cmd_disable_hdlc(unsigned char *src_buf, int src_len,
+				 unsigned char *dest_buf, int dest_len)
+{
+	struct diag_pkt_header_t *header = NULL;
+	struct diag_cmd_hdlc_disable_rsp_t rsp;
+	int write_len = 0;
+
+	if (!src_buf || src_len < sizeof(*header) ||
+	    !dest_buf || dest_len < sizeof(rsp)) {
+		return -EIO;
+	}
+
+	header = (struct diag_pkt_header_t *)src_buf;
+	if (header->cmd_code != DIAG_CMD_DIAG_SUBSYS ||
+	    header->subsys_id != DIAG_SS_DIAG ||
+	    header->subsys_cmd_code != DIAG_CMD_OP_HDLC_DISABLE) {
+		return -EINVAL;
+	}
+
+	memcpy(&rsp.header, header, sizeof(struct diag_pkt_header_t));
+	rsp.framing_version = 1;
+	rsp.result = 0;
+	write_len = sizeof(rsp);
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+
+	return write_len;
+}
+#endif
+
+void diag_send_error_rsp(unsigned char *buf, int len,
+			int pid)
+{
+	/* -1 to accomodate the first byte 0x13 */
+	if (len > (DIAG_MAX_RSP_SIZE - 1)) {
+		pr_err("diag: cannot send err rsp, huge length: %d\n", len);
+		return;
+	}
+
+	*(uint8_t *)driver->apps_rsp_buf = DIAG_CMD_ERROR;
+	memcpy((driver->apps_rsp_buf + sizeof(uint8_t)), buf, len);
+	diag_send_rsp(driver->apps_rsp_buf, len + 1, pid);
+}
+
+int diag_process_apps_pkt(unsigned char *buf, int len, int pid)
+{
+	int p_mask = 0;
+	int mask_ret;
+	int write_len = 0;
+	unsigned char *temp = NULL;
+	struct diag_cmd_reg_entry_t entry;
+	struct diag_cmd_reg_entry_t *temp_entry = NULL;
+	struct diag_cmd_reg_t *reg_item = NULL;
+	struct diag_md_session_t *info = NULL;
+
+	if (!buf)
+		return -EIO;
+
+	/* Check if the command is a supported mask command */
+	mask_ret = diag_process_apps_masks(buf, len, pid);
+	if (mask_ret > 0) {
+		diag_send_rsp(driver->apps_rsp_buf, mask_ret, pid);
+		return 0;
+	}
+
+	temp = buf;
+	entry.cmd_code = (uint16_t)(*(uint8_t *)temp);
+	temp += sizeof(uint8_t);
+	entry.subsys_id = (uint16_t)(*(uint8_t *)temp);
+	temp += sizeof(uint8_t);
+	entry.cmd_code_hi = (uint16_t)(*(uint16_t *)temp);
+	entry.cmd_code_lo = (uint16_t)(*(uint16_t *)temp);
+	temp += sizeof(uint16_t);
+
+	pr_debug("diag: In %s, received cmd %02x %02x %02x\n",
+		 __func__, entry.cmd_code, entry.subsys_id, entry.cmd_code_hi);
+
+	if (*buf == DIAG_CMD_LOG_ON_DMND && driver->log_on_demand_support &&
+	    driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask) {
+		write_len = diag_cmd_log_on_demand(buf, len,
+						   driver->apps_rsp_buf,
+						   DIAG_MAX_RSP_SIZE);
+		if (write_len > 0)
+			diag_send_rsp(driver->apps_rsp_buf, write_len, pid);
+		return 0;
+	}
+
+	mutex_lock(&driver->cmd_reg_mutex);
+	temp_entry = diag_cmd_search(&entry, ALL_PROC);
+	if (temp_entry) {
+		reg_item = container_of(temp_entry, struct diag_cmd_reg_t,
+								entry);
+		mutex_lock(&driver->md_session_lock);
+		info = diag_md_session_get_pid(pid);
+		if (info) {
+			p_mask = info->peripheral_mask;
+			mutex_unlock(&driver->md_session_lock);
+			if (MD_PERIPHERAL_MASK(reg_item->proc) & p_mask)
+				write_len = diag_send_data(reg_item, buf, len);
+		} else {
+			mutex_unlock(&driver->md_session_lock);
+			if (MD_PERIPHERAL_MASK(reg_item->proc) &
+				driver->logging_mask)
+				diag_send_error_rsp(buf, len, pid);
+			else
+				write_len = diag_send_data(reg_item, buf, len);
+		}
+		mutex_unlock(&driver->cmd_reg_mutex);
+		return write_len;
+	}
+	mutex_unlock(&driver->cmd_reg_mutex);
+
+#if defined(CONFIG_DIAG_OVER_USB)
+	/* Check for the command/respond msg for the maximum packet length */
+	if ((*buf == 0x4b) && (*(buf+1) == 0x12) &&
+		(*(uint16_t *)(buf+2) == 0x0055)) {
+		int i;
+		for (i = 0; i < 4; i++)
+			*(driver->apps_rsp_buf+i) = *(buf+i);
+		*(uint32_t *)(driver->apps_rsp_buf+4) = DIAG_MAX_REQ_SIZE;
+		diag_send_rsp(driver->apps_rsp_buf, 8, pid);
+		return 0;
+	} else if ((*buf == 0x4b) && (*(buf+1) == 0x12) &&
+		(*(uint16_t *)(buf+2) == DIAG_DIAG_STM)) {
+		len = diag_process_stm_cmd(buf, driver->apps_rsp_buf);
+		if (len > 0) {
+			diag_send_rsp(driver->apps_rsp_buf, len, pid);
+			return 0;
+		}
+		return len;
+	}
+	/* Check for time sync query command */
+	else if ((*buf == DIAG_CMD_DIAG_SUBSYS) &&
+		(*(buf+1) == DIAG_SS_DIAG) &&
+		(*(uint16_t *)(buf+2) == DIAG_GET_TIME_API)) {
+		write_len = diag_process_time_sync_query_cmd(buf, len,
+							driver->apps_rsp_buf,
+							DIAG_MAX_RSP_SIZE);
+		if (write_len > 0)
+			diag_send_rsp(driver->apps_rsp_buf, write_len, pid);
+		return 0;
+	}
+	/* Check for time sync switch command */
+	else if ((*buf == DIAG_CMD_DIAG_SUBSYS) &&
+		(*(buf+1) == DIAG_SS_DIAG) &&
+		(*(uint16_t *)(buf+2) == DIAG_SET_TIME_API)) {
+		write_len = diag_process_time_sync_switch_cmd(buf, len,
+							driver->apps_rsp_buf,
+							DIAG_MAX_RSP_SIZE);
+		if (write_len > 0)
+			diag_send_rsp(driver->apps_rsp_buf, write_len, pid);
+		return 0;
+	}
+	/* Check for download command */
+	else if ((chk_apps_master()) && (*buf == 0x3A)) {
+		/* send response back */
+		driver->apps_rsp_buf[0] = *buf;
+		diag_send_rsp(driver->apps_rsp_buf, 1, pid);
+		msleep(5000);
+		/* call download API */
+		msm_set_restart_mode(RESTART_DLOAD);
+		printk(KERN_CRIT "diag: download mode set, Rebooting SoC..\n");
+		kernel_restart(NULL);
+		/* Not required, represents that command isnt sent to modem */
+		return 0;
+	}
+	/* Check for polling for Apps only DIAG */
+	else if ((*buf == 0x4b) && (*(buf+1) == 0x32) &&
+		(*(buf+2) == 0x03)) {
+		/* If no one has registered for polling */
+		if (chk_polling_response()) {
+			int i;
+			/* Respond to polling for Apps only DIAG */
+			for (i = 0; i < 3; i++)
+				driver->apps_rsp_buf[i] = *(buf+i);
+			for (i = 0; i < 13; i++)
+				driver->apps_rsp_buf[i+3] = 0;
+
+			diag_send_rsp(driver->apps_rsp_buf, 16, pid);
+			return 0;
+		}
+	}
+	/* Return the Delayed Response Wrap Status */
+	else if ((*buf == 0x4b) && (*(buf+1) == 0x32) &&
+		(*(buf+2) == 0x04) && (*(buf+3) == 0x0)) {
+		memcpy(driver->apps_rsp_buf, buf, 4);
+		driver->apps_rsp_buf[4] = wrap_enabled;
+		diag_send_rsp(driver->apps_rsp_buf, 5, pid);
+		return 0;
+	}
+	/* Wrap the Delayed Rsp ID */
+	else if ((*buf == 0x4b) && (*(buf+1) == 0x32) &&
+		(*(buf+2) == 0x05) && (*(buf+3) == 0x0)) {
+		wrap_enabled = true;
+		memcpy(driver->apps_rsp_buf, buf, 4);
+		driver->apps_rsp_buf[4] = wrap_count;
+		diag_send_rsp(driver->apps_rsp_buf, 6, pid);
+		return 0;
+	}
+	/* Mobile ID Rsp */
+	else if ((*buf == DIAG_CMD_DIAG_SUBSYS) &&
+		(*(buf+1) == DIAG_SS_PARAMS) &&
+		(*(buf+2) == DIAG_EXT_MOBILE_ID) && (*(buf+3) == 0x0))  {
+			write_len = diag_cmd_get_mobile_id(buf, len,
+						   driver->apps_rsp_buf,
+						   DIAG_MAX_RSP_SIZE);
+		if (write_len > 0) {
+			diag_send_rsp(driver->apps_rsp_buf, write_len, pid);
+			return 0;
+		}
+	}
+	 /*
+	  * If the apps processor is master and no other
+	  * processor has registered for polling command.
+	  * If modem is not up and we have not received feature
+	  * mask update from modem, in that case APPS should
+	  * respond for 0X7C command
+	  */
+	else if (chk_apps_master() &&
+		 !(driver->polling_reg_flag) &&
+		 !(driver->diagfwd_cntl[PERIPHERAL_MODEM]->ch_open) &&
+		 !(driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask)) {
+		int i;
+		/* respond to 0x0 command */
+		if (*buf == 0x00) {
+			for (i = 0; i < 55; i++)
+				driver->apps_rsp_buf[i] = 0;
+
+			diag_send_rsp(driver->apps_rsp_buf, 55, pid);
+			return 0;
+		}
+		/* respond to 0x7c command */
+		else if (*buf == 0x7c) {
+			driver->apps_rsp_buf[0] = 0x7c;
+			for (i = 1; i < 8; i++)
+				driver->apps_rsp_buf[i] = 0;
+			/* Tools ID for APQ 8060 */
+			*(int *)(driver->apps_rsp_buf + 8) =
+							 chk_config_get_id();
+			*(unsigned char *)(driver->apps_rsp_buf + 12) = '\0';
+			*(unsigned char *)(driver->apps_rsp_buf + 13) = '\0';
+			diag_send_rsp(driver->apps_rsp_buf, 14, pid);
+			return 0;
+		}
+	}
+	write_len = diag_cmd_chk_stats(buf, len, driver->apps_rsp_buf,
+				       DIAG_MAX_RSP_SIZE);
+	if (write_len > 0) {
+		diag_send_rsp(driver->apps_rsp_buf, write_len, pid);
+		return 0;
+	}
+	write_len = diag_cmd_disable_hdlc(buf, len, driver->apps_rsp_buf,
+					  DIAG_MAX_RSP_SIZE);
+	if (write_len > 0) {
+		/*
+		 * This mutex lock is necessary since we need to drain all the
+		 * pending buffers from peripherals which may be HDLC encoded
+		 * before disabling HDLC encoding on Apps processor.
+		 */
+		mutex_lock(&driver->hdlc_disable_mutex);
+		diag_send_rsp(driver->apps_rsp_buf, write_len, pid);
+		/*
+		 * Set the value of hdlc_disabled after sending the response to
+		 * the tools. This is required since the tools is expecting a
+		 * HDLC encoded reponse for this request.
+		 */
+		pr_debug("diag: In %s, disabling HDLC encoding\n",
+		       __func__);
+		mutex_lock(&driver->md_session_lock);
+		info = diag_md_session_get_pid(pid);
+		if (info)
+			info->hdlc_disabled = 1;
+		else
+			driver->hdlc_disabled = 1;
+		mutex_unlock(&driver->md_session_lock);
+		diag_update_md_clients(HDLC_SUPPORT_TYPE);
+		mutex_unlock(&driver->hdlc_disable_mutex);
+		return 0;
+	}
+#endif
+
+	/* We have now come to the end of the function. */
+	if (chk_apps_only())
+		diag_send_error_rsp(buf, len, pid);
+
+	return 0;
+}
+
+void diag_process_hdlc_pkt(void *data, unsigned int len, int pid)
+{
+	int err = 0;
+	int ret = 0;
+
+	if (len > DIAG_MAX_HDLC_BUF_SIZE) {
+		pr_err("diag: In %s, invalid length: %d\n", __func__, len);
+		return;
+	}
+
+	mutex_lock(&driver->diag_hdlc_mutex);
+	pr_debug("diag: In %s, received packet of length: %d, req_buf_len: %d\n",
+		 __func__, len, driver->hdlc_buf_len);
+
+	if (driver->hdlc_buf_len >= DIAG_MAX_REQ_SIZE) {
+		pr_err("diag: In %s, request length is more than supported len. Dropping packet.\n",
+		       __func__);
+		goto fail;
+	}
+
+	hdlc_decode->dest_ptr = driver->hdlc_buf + driver->hdlc_buf_len;
+	hdlc_decode->dest_size = DIAG_MAX_HDLC_BUF_SIZE - driver->hdlc_buf_len;
+	hdlc_decode->src_ptr = data;
+	hdlc_decode->src_size = len;
+	hdlc_decode->src_idx = 0;
+	hdlc_decode->dest_idx = 0;
+
+	ret = diag_hdlc_decode(hdlc_decode);
+	/*
+	 * driver->hdlc_buf is of size DIAG_MAX_HDLC_BUF_SIZE. But the decoded
+	 * packet should be within DIAG_MAX_REQ_SIZE.
+	 */
+	if (driver->hdlc_buf_len + hdlc_decode->dest_idx <= DIAG_MAX_REQ_SIZE) {
+		driver->hdlc_buf_len += hdlc_decode->dest_idx;
+	} else {
+		pr_err_ratelimited("diag: In %s, Dropping packet. pkt_size: %d, max: %d\n",
+				   __func__,
+				   driver->hdlc_buf_len + hdlc_decode->dest_idx,
+				   DIAG_MAX_REQ_SIZE);
+		goto fail;
+	}
+
+	if (ret == HDLC_COMPLETE) {
+		err = crc_check(driver->hdlc_buf, driver->hdlc_buf_len);
+		if (err) {
+			/* CRC check failed. */
+			pr_err_ratelimited("diag: In %s, bad CRC. Dropping packet\n",
+					   __func__);
+			goto fail;
+		}
+		driver->hdlc_buf_len -= HDLC_FOOTER_LEN;
+
+		if (driver->hdlc_buf_len < 1) {
+			pr_err_ratelimited("diag: In %s, message is too short, len: %d, dest len: %d\n",
+					   __func__, driver->hdlc_buf_len,
+					   hdlc_decode->dest_idx);
+			goto fail;
+		}
+
+		err = diag_process_apps_pkt(driver->hdlc_buf,
+					    driver->hdlc_buf_len, pid);
+		if (err < 0)
+			goto fail;
+	} else {
+		goto end;
+	}
+
+	driver->hdlc_buf_len = 0;
+	mutex_unlock(&driver->diag_hdlc_mutex);
+	return;
+
+fail:
+	/*
+	 * Tools needs to get a response in order to start its
+	 * recovery algorithm. Send an error response if the
+	 * packet is not in expected format.
+	 */
+	diag_send_error_rsp(driver->hdlc_buf, driver->hdlc_buf_len, pid);
+	driver->hdlc_buf_len = 0;
+end:
+	mutex_unlock(&driver->diag_hdlc_mutex);
+}
+
+static int diagfwd_mux_open(int id, int mode)
+{
+	uint8_t i;
+	unsigned long flags;
+
+	switch (mode) {
+#ifdef CONFIG_DIAG_OVER_USB
+	case DIAG_USB_MODE:
+		driver->usb_connected = 1;
+		break;
+#endif
+	case DIAG_MEMORY_DEVICE_MODE:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (driver->rsp_buf_busy) {
+		/*
+		 * When a client switches from callback mode to USB mode
+		 * explicitly, there can be a situation when the last response
+		 * is not drained to the user space application. Reset the
+		 * in_busy flag in this case.
+		 */
+		spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
+		driver->rsp_buf_busy = 0;
+		spin_unlock_irqrestore(&driver->rsp_buf_busy_lock, flags);
+	}
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		diagfwd_open(i, TYPE_DATA);
+		diagfwd_open(i, TYPE_CMD);
+	}
+	queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
+	return 0;
+}
+
+static int diagfwd_mux_close(int id, int mode)
+{
+	uint8_t i;
+
+	switch (mode) {
+#ifdef CONFIG_DIAG_OVER_USB
+	case DIAG_USB_MODE:
+		driver->usb_connected = 0;
+		break;
+#endif
+	case DIAG_MEMORY_DEVICE_MODE:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if ((driver->logging_mode == DIAG_MULTI_MODE &&
+		driver->md_session_mode == DIAG_MD_NONE) ||
+		(driver->md_session_mode == DIAG_MD_PERIPHERAL)) {
+		/*
+		 * This case indicates that the USB is removed
+		 * but there is a client running in background
+		 * with Memory Device mode.
+		 */
+	} else {
+		/*
+		 * With sysfs parameter to clear masks set,
+		 * peripheral masks are cleared on ODL exit and
+		 * USB disconnection and buffers are not marked busy.
+		 * This enables read and drop of stale packets.
+		 *
+		 * With sysfs parameter to clear masks cleared,
+		 * masks are not cleared and buffers are to be marked
+		 * busy to ensure traffic generated by peripheral
+		 * are not read
+		 */
+		if (!(diag_mask_param())) {
+			for (i = 0; i < NUM_PERIPHERALS; i++) {
+				diagfwd_close(i, TYPE_DATA);
+				diagfwd_close(i, TYPE_CMD);
+			}
+		}
+		/* Re enable HDLC encoding */
+		pr_debug("diag: In %s, re-enabling HDLC encoding\n",
+		       __func__);
+		mutex_lock(&driver->hdlc_disable_mutex);
+		if (driver->md_session_mode == DIAG_MD_NONE)
+			driver->hdlc_disabled = 0;
+		mutex_unlock(&driver->hdlc_disable_mutex);
+		queue_work(driver->diag_wq,
+			&(driver->update_user_clients));
+	}
+	queue_work(driver->diag_real_time_wq,
+		   &driver->diag_real_time_work);
+	return 0;
+}
+
+static uint8_t hdlc_reset;
+
+static void hdlc_reset_timer_start(int pid)
+{
+	struct diag_md_session_t *info = NULL;
+	mutex_lock(&driver->md_session_lock);
+	info = diag_md_session_get_pid(pid);
+	if (!hdlc_timer_in_progress) {
+		hdlc_timer_in_progress = 1;
+		if (info)
+			mod_timer(&info->hdlc_reset_timer,
+			  jiffies + msecs_to_jiffies(200));
+		else
+			mod_timer(&driver->hdlc_reset_timer,
+			  jiffies + msecs_to_jiffies(200));
+	}
+	mutex_unlock(&driver->md_session_lock);
+}
+
+static void hdlc_reset_timer_func(unsigned long data)
+{
+	pr_debug("diag: In %s, re-enabling HDLC encoding\n",
+		       __func__);
+	if (hdlc_reset) {
+		driver->hdlc_disabled = 0;
+		queue_work(driver->diag_wq,
+			&(driver->update_user_clients));
+	}
+	hdlc_timer_in_progress = 0;
+}
+
+void diag_md_hdlc_reset_timer_func(unsigned long pid)
+{
+	struct diag_md_session_t *session_info = NULL;
+
+	pr_debug("diag: In %s, re-enabling HDLC encoding\n",
+		       __func__);
+	if (hdlc_reset) {
+		session_info = diag_md_session_get_pid(pid);
+		if (session_info)
+			session_info->hdlc_disabled = 0;
+		queue_work(driver->diag_wq,
+			&(driver->update_md_clients));
+	}
+	hdlc_timer_in_progress = 0;
+}
+
+static void diag_hdlc_start_recovery(unsigned char *buf, int len,
+				     int pid)
+{
+	int i;
+	static uint32_t bad_byte_counter;
+	unsigned char *start_ptr = NULL;
+	struct diag_pkt_frame_t *actual_pkt = NULL;
+	struct diag_md_session_t *info = NULL;
+
+	hdlc_reset = 1;
+	hdlc_reset_timer_start(pid);
+
+	actual_pkt = (struct diag_pkt_frame_t *)buf;
+	for (i = 0; i < len; i++) {
+		if (actual_pkt->start == CONTROL_CHAR &&
+			actual_pkt->version == 1 &&
+			actual_pkt->length < len &&
+			(*(uint8_t *)(buf + sizeof(struct diag_pkt_frame_t) +
+			actual_pkt->length) == CONTROL_CHAR)) {
+				start_ptr = &buf[i];
+				break;
+		}
+		bad_byte_counter++;
+		if (bad_byte_counter > (DIAG_MAX_REQ_SIZE +
+				sizeof(struct diag_pkt_frame_t) + 1)) {
+			bad_byte_counter = 0;
+			pr_err("diag: In %s, re-enabling HDLC encoding\n",
+					__func__);
+			mutex_lock(&driver->hdlc_disable_mutex);
+			mutex_lock(&driver->md_session_lock);
+			info = diag_md_session_get_pid(pid);
+			if (info)
+				info->hdlc_disabled = 0;
+			else
+				driver->hdlc_disabled = 0;
+			mutex_unlock(&driver->md_session_lock);
+			mutex_unlock(&driver->hdlc_disable_mutex);
+			diag_update_md_clients(HDLC_SUPPORT_TYPE);
+
+			return;
+		}
+	}
+
+	if (start_ptr) {
+		/* Discard any partial packet reads */
+		mutex_lock(&driver->hdlc_recovery_mutex);
+		driver->incoming_pkt.processing = 0;
+		mutex_unlock(&driver->hdlc_recovery_mutex);
+		diag_process_non_hdlc_pkt(start_ptr, len - i, pid);
+	}
+}
+
+void diag_process_non_hdlc_pkt(unsigned char *buf, int len, int pid)
+{
+	int err = 0;
+	uint16_t pkt_len = 0;
+	uint32_t read_bytes = 0;
+	const uint32_t header_len = sizeof(struct diag_pkt_frame_t);
+	struct diag_pkt_frame_t *actual_pkt = NULL;
+	unsigned char *data_ptr = NULL;
+	struct diag_partial_pkt_t *partial_pkt = NULL;
+
+	mutex_lock(&driver->hdlc_recovery_mutex);
+	if (!buf || len <= 0) {
+		mutex_unlock(&driver->hdlc_recovery_mutex);
+		return;
+	}
+	partial_pkt = &driver->incoming_pkt;
+	if (!partial_pkt->processing) {
+		mutex_unlock(&driver->hdlc_recovery_mutex);
+		goto start;
+	}
+
+	if (partial_pkt->remaining > len) {
+		if ((partial_pkt->read_len + len) > partial_pkt->capacity) {
+			pr_err("diag: Invalid length %d, %d received in %s\n",
+			       partial_pkt->read_len, len, __func__);
+			mutex_unlock(&driver->hdlc_recovery_mutex);
+			goto end;
+		}
+		memcpy(partial_pkt->data + partial_pkt->read_len, buf, len);
+		read_bytes += len;
+		buf += read_bytes;
+		partial_pkt->read_len += len;
+		partial_pkt->remaining -= len;
+	} else {
+		if ((partial_pkt->read_len + partial_pkt->remaining) >
+						partial_pkt->capacity) {
+			pr_err("diag: Invalid length during partial read %d, %d received in %s\n",
+			       partial_pkt->read_len,
+			       partial_pkt->remaining, __func__);
+			mutex_unlock(&driver->hdlc_recovery_mutex);
+			goto end;
+		}
+		memcpy(partial_pkt->data + partial_pkt->read_len, buf,
+						partial_pkt->remaining);
+		read_bytes += partial_pkt->remaining;
+		buf += read_bytes;
+		partial_pkt->read_len += partial_pkt->remaining;
+		partial_pkt->remaining = 0;
+	}
+
+	if (partial_pkt->remaining == 0) {
+		actual_pkt = (struct diag_pkt_frame_t *)(partial_pkt->data);
+		data_ptr = partial_pkt->data + header_len;
+		if (*(uint8_t *)(data_ptr + actual_pkt->length) !=
+						CONTROL_CHAR) {
+			mutex_unlock(&driver->hdlc_recovery_mutex);
+			diag_hdlc_start_recovery(buf, len, pid);
+			mutex_lock(&driver->hdlc_recovery_mutex);
+		}
+		err = diag_process_apps_pkt(data_ptr,
+					    actual_pkt->length, pid);
+		if (err) {
+			pr_err("diag: In %s, unable to process incoming data packet, err: %d\n",
+			       __func__, err);
+			mutex_unlock(&driver->hdlc_recovery_mutex);
+			goto end;
+		}
+		partial_pkt->read_len = 0;
+		partial_pkt->total_len = 0;
+		partial_pkt->processing = 0;
+		mutex_unlock(&driver->hdlc_recovery_mutex);
+		goto start;
+	}
+	mutex_unlock(&driver->hdlc_recovery_mutex);
+	goto end;
+
+start:
+	while (read_bytes < len) {
+		actual_pkt = (struct diag_pkt_frame_t *)buf;
+		pkt_len = actual_pkt->length;
+
+		if (actual_pkt->start != CONTROL_CHAR) {
+			diag_hdlc_start_recovery(buf, len, pid);
+			diag_send_error_rsp(buf, len, pid);
+			goto end;
+		}
+		mutex_lock(&driver->hdlc_recovery_mutex);
+		if (pkt_len + header_len > partial_pkt->capacity) {
+			pr_err("diag: In %s, incoming data is too large for the request buffer %d\n",
+			       __func__, pkt_len);
+			mutex_unlock(&driver->hdlc_recovery_mutex);
+			diag_hdlc_start_recovery(buf, len, pid);
+			break;
+		}
+		if ((pkt_len + header_len) > (len - read_bytes)) {
+			partial_pkt->read_len = len - read_bytes;
+			partial_pkt->total_len = pkt_len + header_len;
+			partial_pkt->remaining = partial_pkt->total_len -
+						 partial_pkt->read_len;
+			partial_pkt->processing = 1;
+			memcpy(partial_pkt->data, buf, partial_pkt->read_len);
+			mutex_unlock(&driver->hdlc_recovery_mutex);
+			break;
+		}
+		data_ptr = buf + header_len;
+		if (*(uint8_t *)(data_ptr + actual_pkt->length) !=
+						CONTROL_CHAR) {
+			mutex_unlock(&driver->hdlc_recovery_mutex);
+			diag_hdlc_start_recovery(buf, len, pid);
+			mutex_lock(&driver->hdlc_recovery_mutex);
+		}
+		else
+			hdlc_reset = 0;
+		err = diag_process_apps_pkt(data_ptr,
+					    actual_pkt->length, pid);
+		if (err) {
+			mutex_unlock(&driver->hdlc_recovery_mutex);
+			break;
+		}
+		read_bytes += header_len + pkt_len + 1;
+		buf += header_len + pkt_len + 1; /* advance to next pkt */
+		mutex_unlock(&driver->hdlc_recovery_mutex);
+	}
+end:
+	return;
+}
+
+static int diagfwd_mux_read_done(unsigned char *buf, int len, int ctxt)
+{
+	if (!buf || len <= 0)
+		return -EINVAL;
+
+	if (!driver->hdlc_disabled)
+		diag_process_hdlc_pkt(buf, len, 0);
+	else
+		diag_process_non_hdlc_pkt(buf, len, 0);
+
+	diag_mux_queue_read(ctxt);
+	return 0;
+}
+
+static int diagfwd_mux_write_done(unsigned char *buf, int len, int buf_ctxt,
+				  int ctxt)
+{
+	unsigned long flags;
+	int peripheral = -1;
+	int type = -1;
+	int num = -1;
+
+	if (!buf || len < 0)
+		return -EINVAL;
+
+	peripheral = GET_BUF_PERIPHERAL(buf_ctxt);
+	type = GET_BUF_TYPE(buf_ctxt);
+	num = GET_BUF_NUM(buf_ctxt);
+
+	switch (type) {
+	case TYPE_DATA:
+		if (peripheral >= 0 && peripheral < NUM_PERIPHERALS) {
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"Marking buffer as free after write done p: %d, t: %d, buf_num: %d\n",
+				peripheral, type, num);
+			diagfwd_write_done(peripheral, type, num);
+			diag_ws_on_copy(DIAG_WS_MUX);
+		} else if (peripheral == APPS_DATA) {
+			diagmem_free(driver, (unsigned char *)buf,
+				     POOL_TYPE_HDLC);
+			buf = NULL;
+		} else {
+			pr_err_ratelimited("diag: Invalid peripheral %d in %s, type: %d\n",
+					   peripheral, __func__, type);
+		}
+		break;
+	case TYPE_CMD:
+		if (peripheral >= 0 && peripheral < NUM_PERIPHERALS &&
+			num != TYPE_CMD) {
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"Marking buffer as free after write done p: %d, t: %d, buf_num: %d\n",
+			peripheral, type, num);
+			diagfwd_write_done(peripheral, type, num);
+		} else if (peripheral == APPS_DATA ||
+			(peripheral >= 0 && peripheral < NUM_PERIPHERALS &&
+			num == TYPE_CMD)) {
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"Marking APPS response buffer free after write done for p: %d, t: %d, buf_num: %d\n",
+			peripheral, type, num);
+			spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
+			driver->rsp_buf_busy = 0;
+			driver->encoded_rsp_len = 0;
+			spin_unlock_irqrestore(&driver->rsp_buf_busy_lock,
+					       flags);
+		}
+		break;
+	default:
+		pr_err_ratelimited("diag: Incorrect data type %d, buf_ctxt: %d in %s\n",
+				   type, buf_ctxt, __func__);
+		break;
+	}
+
+	return 0;
+}
+
+static struct diag_mux_ops diagfwd_mux_ops = {
+	.open = diagfwd_mux_open,
+	.close = diagfwd_mux_close,
+	.read_done = diagfwd_mux_read_done,
+	.write_done = diagfwd_mux_write_done
+};
+
+int diagfwd_init(void)
+{
+	int ret;
+	int i;
+
+	wrap_enabled = 0;
+	wrap_count = 0;
+	driver->use_device_tree = has_device_tree();
+	for (i = 0; i < DIAG_NUM_PROC; i++)
+		driver->real_time_mode[i] = 1;
+	driver->supports_separate_cmdrsp = 1;
+	driver->supports_apps_hdlc_encoding = 1;
+	driver->supports_apps_header_untagging = 1;
+	driver->supports_pd_buffering = 1;
+	for (i = 0; i < NUM_PERIPHERALS; i++)
+		driver->peripheral_untag[i] = 0;
+	mutex_init(&driver->diag_hdlc_mutex);
+	mutex_init(&driver->diag_cntl_mutex);
+	mutex_init(&driver->mode_lock);
+	driver->encoded_rsp_buf = kzalloc(DIAG_MAX_HDLC_BUF_SIZE +
+				APF_DIAG_PADDING, GFP_KERNEL);
+	if (!driver->encoded_rsp_buf)
+		goto err;
+	kmemleak_not_leak(driver->encoded_rsp_buf);
+	hdlc_decode = kzalloc(sizeof(struct diag_hdlc_decode_type),
+			      GFP_KERNEL);
+	if (!hdlc_decode)
+		goto err;
+	setup_timer(&driver->hdlc_reset_timer, hdlc_reset_timer_func, 0);
+	kmemleak_not_leak(hdlc_decode);
+	driver->encoded_rsp_len = 0;
+	driver->rsp_buf_busy = 0;
+	spin_lock_init(&driver->rsp_buf_busy_lock);
+	driver->user_space_data_busy = 0;
+	driver->hdlc_buf_len = 0;
+	INIT_LIST_HEAD(&driver->cmd_reg_list);
+	driver->cmd_reg_count = 0;
+	mutex_init(&driver->cmd_reg_mutex);
+
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		driver->feature[i].separate_cmd_rsp = 0;
+		driver->feature[i].stm_support = DISABLE_STM;
+		driver->feature[i].rcvd_feature_mask = 0;
+		driver->feature[i].peripheral_buffering = 0;
+		driver->feature[i].pd_buffering = 0;
+		driver->feature[i].encode_hdlc = 0;
+		driver->feature[i].untag_header =
+			DISABLE_PKT_HEADER_UNTAGGING;
+		driver->feature[i].mask_centralization = 0;
+		driver->feature[i].log_on_demand = 0;
+		driver->feature[i].sent_feature_mask = 0;
+	}
+
+	for (i = 0; i < NUM_MD_SESSIONS; i++) {
+		driver->buffering_mode[i].peripheral = i;
+		driver->buffering_mode[i].mode = DIAG_BUFFERING_MODE_STREAMING;
+		driver->buffering_mode[i].high_wm_val = DEFAULT_HIGH_WM_VAL;
+		driver->buffering_mode[i].low_wm_val = DEFAULT_LOW_WM_VAL;
+	}
+
+	for (i = 0; i < NUM_STM_PROCESSORS; i++) {
+		driver->stm_state_requested[i] = DISABLE_STM;
+		driver->stm_state[i] = DISABLE_STM;
+	}
+
+	if (driver->hdlc_buf == NULL) {
+		driver->hdlc_buf = kzalloc(DIAG_MAX_HDLC_BUF_SIZE, GFP_KERNEL);
+		if (!driver->hdlc_buf)
+			goto err;
+		kmemleak_not_leak(driver->hdlc_buf);
+	}
+	if (driver->user_space_data_buf == NULL)
+		driver->user_space_data_buf = kzalloc(USER_SPACE_DATA,
+							GFP_KERNEL);
+	if (driver->user_space_data_buf == NULL)
+		goto err;
+	kmemleak_not_leak(driver->user_space_data_buf);
+	if (driver->client_map == NULL &&
+	    (driver->client_map = kzalloc
+	     ((driver->num_clients) * sizeof(struct diag_client_map),
+		   GFP_KERNEL)) == NULL)
+		goto err;
+	kmemleak_not_leak(driver->client_map);
+	if (driver->data_ready == NULL &&
+	     (driver->data_ready = kzalloc(driver->num_clients * sizeof(int)
+							, GFP_KERNEL)) == NULL)
+		goto err;
+	kmemleak_not_leak(driver->data_ready);
+	for (i = 0; i < THRESHOLD_CLIENT_LIMIT; i++)
+		atomic_set(&driver->data_ready_notif[i], 0);
+	if (driver->apps_req_buf == NULL) {
+		driver->apps_req_buf = kzalloc(DIAG_MAX_REQ_SIZE, GFP_KERNEL);
+		if (!driver->apps_req_buf)
+			goto err;
+		kmemleak_not_leak(driver->apps_req_buf);
+	}
+	if (driver->dci_pkt_buf == NULL) {
+		driver->dci_pkt_buf = kzalloc(DCI_BUF_SIZE, GFP_KERNEL);
+		if (!driver->dci_pkt_buf)
+			goto err;
+		kmemleak_not_leak(driver->dci_pkt_buf);
+	}
+	if (driver->apps_rsp_buf == NULL) {
+		driver->apps_rsp_buf = kzalloc(DIAG_MAX_RSP_SIZE, GFP_KERNEL);
+		if (driver->apps_rsp_buf == NULL)
+			goto err;
+		kmemleak_not_leak(driver->apps_rsp_buf);
+	}
+	driver->diag_wq = create_singlethread_workqueue("diag_wq");
+	if (!driver->diag_wq)
+		goto err;
+	ret = diag_mux_register(DIAG_LOCAL_PROC, DIAG_LOCAL_PROC,
+				&diagfwd_mux_ops);
+	if (ret) {
+		pr_err("diag: Unable to register with USB, err: %d\n", ret);
+		goto err;
+	}
+
+	return 0;
+err:
+	pr_err("diag: In %s, couldn't initialize diag\n", __func__);
+
+	diag_usb_exit(DIAG_USB_LOCAL);
+	kfree(driver->encoded_rsp_buf);
+	kfree(driver->hdlc_buf);
+	kfree(driver->client_map);
+	kfree(driver->data_ready);
+	kfree(driver->apps_req_buf);
+	kfree(driver->dci_pkt_buf);
+	kfree(driver->apps_rsp_buf);
+	kfree(hdlc_decode);
+	kfree(driver->user_space_data_buf);
+	if (driver->diag_wq)
+		destroy_workqueue(driver->diag_wq);
+	return -ENOMEM;
+}
+
+void diagfwd_exit(void)
+{
+	kfree(driver->encoded_rsp_buf);
+	kfree(driver->hdlc_buf);
+	kfree(hdlc_decode);
+	kfree(driver->client_map);
+	kfree(driver->data_ready);
+	kfree(driver->apps_req_buf);
+	kfree(driver->dci_pkt_buf);
+	kfree(driver->apps_rsp_buf);
+	kfree(driver->user_space_data_buf);
+	destroy_workqueue(driver->diag_wq);
+}
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diagfwd_cntl.c linux-4.4.115-fbx/drivers/char/diag/diagfwd_cntl.c
--- linux-4.4.115-fbx/drivers/char/diag./diagfwd_cntl.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diagfwd_cntl.c	2019-10-29 09:26:23.453201319 +0100
@@ -0,0 +1,1566 @@
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/diagchar.h>
+#include <linux/kmemleak.h>
+#include <linux/delay.h>
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_cntl.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_bridge.h"
+#include "diag_dci.h"
+#include "diagmem.h"
+#include "diag_masks.h"
+#include "diag_ipc_logging.h"
+#include "diag_mux.h"
+
+#define FEATURE_SUPPORTED(x)	((feature_mask << (i * 8)) & (1 << x))
+
+/* tracks which peripheral is undergoing SSR */
+static uint16_t reg_dirty;
+static void diag_notify_md_client(uint8_t peripheral, int data);
+
+static void diag_mask_update_work_fn(struct work_struct *work)
+{
+	uint8_t peripheral;
+
+	for (peripheral = 0; peripheral <= NUM_PERIPHERALS; peripheral++) {
+		if (!(driver->mask_update & PERIPHERAL_MASK(peripheral)))
+			continue;
+		mutex_lock(&driver->cntl_lock);
+		driver->mask_update ^= PERIPHERAL_MASK(peripheral);
+		mutex_unlock(&driver->cntl_lock);
+		diag_send_updates_peripheral(peripheral);
+	}
+}
+
+void diag_cntl_channel_open(struct diagfwd_info *p_info)
+{
+	if (!p_info)
+		return;
+	driver->mask_update |= PERIPHERAL_MASK(p_info->peripheral);
+	queue_work(driver->cntl_wq, &driver->mask_update_work);
+	diag_notify_md_client(p_info->peripheral, DIAG_STATUS_OPEN);
+}
+
+void diag_cntl_channel_close(struct diagfwd_info *p_info)
+{
+	uint8_t peripheral;
+
+	if (!p_info)
+		return;
+
+	peripheral = p_info->peripheral;
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	driver->feature[peripheral].sent_feature_mask = 0;
+	driver->feature[peripheral].rcvd_feature_mask = 0;
+	reg_dirty |= PERIPHERAL_MASK(peripheral);
+	diag_cmd_remove_reg_by_proc(peripheral);
+	driver->feature[peripheral].stm_support = DISABLE_STM;
+	driver->feature[peripheral].log_on_demand = 0;
+	driver->stm_state[peripheral] = DISABLE_STM;
+	driver->stm_state_requested[peripheral] = DISABLE_STM;
+	reg_dirty ^= PERIPHERAL_MASK(peripheral);
+	diag_notify_md_client(peripheral, DIAG_STATUS_CLOSED);
+}
+
+static void diag_stm_update_work_fn(struct work_struct *work)
+{
+	uint8_t i;
+	uint16_t peripheral_mask = 0;
+	int err = 0;
+
+	mutex_lock(&driver->cntl_lock);
+	peripheral_mask = driver->stm_peripheral;
+	driver->stm_peripheral = 0;
+	mutex_unlock(&driver->cntl_lock);
+
+	if (peripheral_mask == 0)
+		return;
+
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		if (!driver->feature[i].stm_support)
+				continue;
+		if (peripheral_mask & PERIPHERAL_MASK(i)) {
+			err = diag_send_stm_state(i,
+				(uint8_t)(driver->stm_state_requested[i]));
+			if (!err) {
+				driver->stm_state[i] =
+					driver->stm_state_requested[i];
+			}
+		}
+	}
+}
+
+void diag_notify_md_client(uint8_t peripheral, int data)
+{
+	int stat = 0;
+	struct siginfo info;
+	struct pid *pid_struct;
+	struct task_struct *result;
+
+	if (peripheral > NUM_PERIPHERALS)
+		return;
+
+	if (driver->logging_mode != DIAG_MEMORY_DEVICE_MODE)
+		return;
+
+	mutex_lock(&driver->md_session_lock);
+	memset(&info, 0, sizeof(struct siginfo));
+	info.si_code = SI_QUEUE;
+	info.si_int = (PERIPHERAL_MASK(peripheral) | data);
+	info.si_signo = SIGCONT;
+
+	if (!driver->md_session_map[peripheral] ||
+		driver->md_session_map[peripheral]->pid <= 0) {
+		pr_err("diag: md_session_map[%d] is invalid\n", peripheral);
+		mutex_unlock(&driver->md_session_lock);
+		return;
+	}
+
+	pid_struct = find_get_pid(
+			driver->md_session_map[peripheral]->pid);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"md_session_map[%d] pid = %d task = %pK\n",
+		peripheral,
+		driver->md_session_map[peripheral]->pid,
+		driver->md_session_map[peripheral]->task);
+
+	if (pid_struct) {
+		result = get_pid_task(pid_struct, PIDTYPE_PID);
+
+		if (!result) {
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+				"diag: md_session_map[%d] with pid = %d Exited..\n",
+				peripheral,
+				driver->md_session_map[peripheral]->pid);
+			mutex_unlock(&driver->md_session_lock);
+			return;
+		}
+
+		if (driver->md_session_map[peripheral] &&
+			driver->md_session_map[peripheral]->task == result) {
+			stat = send_sig_info(info.si_signo,
+					&info, result);
+			if (stat)
+				pr_err("diag: Err sending signal to memory device client, signal data: 0x%x, stat: %d\n",
+					info.si_int, stat);
+		} else
+			pr_err("diag: md_session_map[%d] data is corrupted, signal data: 0x%x, stat: %d\n",
+				peripheral, info.si_int, stat);
+	}
+	mutex_unlock(&driver->md_session_lock);
+}
+
+static void process_pd_status(uint8_t *buf, uint32_t len,
+			      uint8_t peripheral)
+{
+	struct diag_ctrl_msg_pd_status *pd_msg = NULL;
+	uint32_t pd;
+	int status = DIAG_STATUS_CLOSED;
+
+	if (!buf || peripheral >= NUM_PERIPHERALS || len < sizeof(*pd_msg))
+		return;
+
+	pd_msg = (struct diag_ctrl_msg_pd_status *)buf;
+	pd = pd_msg->pd_id;
+	status = (pd_msg->status == 0) ? DIAG_STATUS_OPEN : DIAG_STATUS_CLOSED;
+	diag_notify_md_client(peripheral, status);
+}
+
+static void enable_stm_feature(uint8_t peripheral)
+{
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	mutex_lock(&driver->cntl_lock);
+	driver->feature[peripheral].stm_support = ENABLE_STM;
+	driver->stm_peripheral |= PERIPHERAL_MASK(peripheral);
+	mutex_unlock(&driver->cntl_lock);
+
+	queue_work(driver->cntl_wq, &(driver->stm_update_work));
+}
+
+static void enable_socket_feature(uint8_t peripheral)
+{
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	if (driver->supports_sockets)
+		driver->feature[peripheral].sockets_enabled = 1;
+	else
+		driver->feature[peripheral].sockets_enabled = 0;
+}
+
+static void process_hdlc_encoding_feature(uint8_t peripheral)
+{
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	if (driver->supports_apps_hdlc_encoding) {
+		driver->feature[peripheral].encode_hdlc =
+					ENABLE_APPS_HDLC_ENCODING;
+	} else {
+		driver->feature[peripheral].encode_hdlc =
+					DISABLE_APPS_HDLC_ENCODING;
+	}
+}
+
+static void process_upd_header_untagging_feature(uint8_t peripheral)
+{
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	if (driver->supports_apps_header_untagging) {
+		driver->feature[peripheral].untag_header =
+					ENABLE_PKT_HEADER_UNTAGGING;
+	} else {
+		driver->feature[peripheral].untag_header =
+					DISABLE_PKT_HEADER_UNTAGGING;
+	}
+}
+
+static void process_command_deregistration(uint8_t *buf, uint32_t len,
+					   uint8_t peripheral)
+{
+	uint8_t *ptr = buf;
+	int i;
+	int header_len = sizeof(struct diag_ctrl_cmd_dereg);
+	int read_len = 0;
+	struct diag_ctrl_cmd_dereg *dereg = NULL;
+	struct cmd_code_range *range = NULL;
+	struct diag_cmd_reg_entry_t del_entry;
+
+	/*
+	 * Perform Basic sanity. The len field is the size of the data payload.
+	 * This doesn't include the header size.
+	 */
+	if (!buf || peripheral >= NUM_PERIPHERALS || len == 0)
+		return;
+
+	dereg = (struct diag_ctrl_cmd_dereg *)ptr;
+	ptr += header_len;
+	/* Don't account for pkt_id and length */
+	read_len += header_len - (2 * sizeof(uint32_t));
+
+	if (dereg->count_entries == 0) {
+		pr_debug("diag: In %s, received reg tbl with no entries\n",
+			 __func__);
+		return;
+	}
+
+	for (i = 0; i < dereg->count_entries && read_len < len; i++) {
+		range = (struct cmd_code_range *)ptr;
+		ptr += sizeof(struct cmd_code_range) - sizeof(uint32_t);
+		read_len += sizeof(struct cmd_code_range) - sizeof(uint32_t);
+		del_entry.cmd_code = dereg->cmd_code;
+		del_entry.subsys_id = dereg->subsysid;
+		del_entry.cmd_code_hi = range->cmd_code_hi;
+		del_entry.cmd_code_lo = range->cmd_code_lo;
+		diag_cmd_remove_reg(&del_entry, peripheral);
+	}
+
+	if (i != dereg->count_entries) {
+		pr_err("diag: In %s, reading less than available, read_len: %d, len: %d count: %d\n",
+		       __func__, read_len, len, dereg->count_entries);
+	}
+}
+static void process_command_registration(uint8_t *buf, uint32_t len,
+					 uint8_t peripheral)
+{
+	uint8_t *ptr = buf;
+	int i;
+	int header_len = sizeof(struct diag_ctrl_cmd_reg);
+	int read_len = 0;
+	struct diag_ctrl_cmd_reg *reg = NULL;
+	struct cmd_code_range *range = NULL;
+	struct diag_cmd_reg_entry_t new_entry;
+
+	/*
+	 * Perform Basic sanity. The len field is the size of the data payload.
+	 * This doesn't include the header size.
+	 */
+	if (!buf || peripheral >= NUM_PERIPHERALS || len == 0)
+		return;
+
+	reg = (struct diag_ctrl_cmd_reg *)ptr;
+	ptr += header_len;
+	/* Don't account for pkt_id and length */
+	read_len += header_len - (2 * sizeof(uint32_t));
+
+	if (reg->count_entries == 0) {
+		pr_debug("diag: In %s, received reg tbl with no entries\n",
+			 __func__);
+		return;
+	}
+
+	for (i = 0; i < reg->count_entries && read_len < len; i++) {
+		range = (struct cmd_code_range *)ptr;
+		ptr += sizeof(struct cmd_code_range);
+		read_len += sizeof(struct cmd_code_range);
+		new_entry.cmd_code = reg->cmd_code;
+		new_entry.subsys_id = reg->subsysid;
+		new_entry.cmd_code_hi = range->cmd_code_hi;
+		new_entry.cmd_code_lo = range->cmd_code_lo;
+		diag_cmd_add_reg(&new_entry, peripheral, INVALID_PID);
+	}
+
+	if (i != reg->count_entries) {
+		pr_err("diag: In %s, reading less than available, read_len: %d, len: %d count: %d\n",
+		       __func__, read_len, len, reg->count_entries);
+	}
+}
+
+static void diag_close_transport_work_fn(struct work_struct *work)
+{
+	uint8_t transport;
+	uint8_t peripheral;
+
+	mutex_lock(&driver->cntl_lock);
+	for (peripheral = 0; peripheral <= NUM_PERIPHERALS; peripheral++) {
+		if (!(driver->close_transport & PERIPHERAL_MASK(peripheral)))
+			continue;
+		driver->close_transport ^= PERIPHERAL_MASK(peripheral);
+		transport = driver->feature[peripheral].sockets_enabled ?
+					TRANSPORT_SMD : TRANSPORT_SOCKET;
+		diagfwd_close_transport(transport, peripheral);
+	}
+	mutex_unlock(&driver->cntl_lock);
+}
+
+static void process_socket_feature(uint8_t peripheral)
+{
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	mutex_lock(&driver->cntl_lock);
+	driver->close_transport |= PERIPHERAL_MASK(peripheral);
+	queue_work(driver->cntl_wq, &driver->close_transport_work);
+	mutex_unlock(&driver->cntl_lock);
+}
+
+static void process_log_on_demand_feature(uint8_t peripheral)
+{
+	/* Log On Demand command is registered only on Modem */
+	if (peripheral != PERIPHERAL_MODEM)
+		return;
+
+	if (driver->feature[PERIPHERAL_MODEM].log_on_demand)
+		driver->log_on_demand_support = 1;
+	else
+		driver->log_on_demand_support = 0;
+}
+
+static void process_incoming_feature_mask(uint8_t *buf, uint32_t len,
+					  uint8_t peripheral)
+{
+	int i;
+	int header_len = sizeof(struct diag_ctrl_feature_mask);
+	int read_len = 0;
+	struct diag_ctrl_feature_mask *header = NULL;
+	uint32_t feature_mask_len = 0;
+	uint32_t feature_mask = 0;
+	uint8_t *ptr = buf;
+
+	if (!buf || peripheral >= NUM_PERIPHERALS || len == 0)
+		return;
+
+	header = (struct diag_ctrl_feature_mask *)ptr;
+	ptr += header_len;
+	feature_mask_len = header->feature_mask_len;
+
+	if (feature_mask_len == 0) {
+		pr_debug("diag: In %s, received invalid feature mask from peripheral %d\n",
+			 __func__, peripheral);
+		return;
+	}
+
+	if (feature_mask_len > FEATURE_MASK_LEN) {
+		pr_alert("diag: Receiving feature mask length more than Apps support\n");
+		feature_mask_len = FEATURE_MASK_LEN;
+	}
+
+	diag_cmd_remove_reg_by_proc(peripheral);
+
+	driver->feature[peripheral].rcvd_feature_mask = 1;
+
+	for (i = 0; i < feature_mask_len && read_len < len; i++) {
+		feature_mask = *(uint8_t *)ptr;
+		driver->feature[peripheral].feature_mask[i] = feature_mask;
+		ptr += sizeof(uint8_t);
+		read_len += sizeof(uint8_t);
+
+		if (FEATURE_SUPPORTED(F_DIAG_LOG_ON_DEMAND_APPS))
+			driver->feature[peripheral].log_on_demand = 1;
+		if (FEATURE_SUPPORTED(F_DIAG_REQ_RSP_SUPPORT))
+			driver->feature[peripheral].separate_cmd_rsp = 1;
+		if (FEATURE_SUPPORTED(F_DIAG_APPS_HDLC_ENCODE))
+			process_hdlc_encoding_feature(peripheral);
+		if (FEATURE_SUPPORTED(F_DIAG_PKT_HEADER_UNTAG))
+			process_upd_header_untagging_feature(peripheral);
+		if (FEATURE_SUPPORTED(F_DIAG_STM))
+			enable_stm_feature(peripheral);
+		if (FEATURE_SUPPORTED(F_DIAG_MASK_CENTRALIZATION))
+			driver->feature[peripheral].mask_centralization = 1;
+		if (FEATURE_SUPPORTED(F_DIAG_PERIPHERAL_BUFFERING))
+			driver->feature[peripheral].peripheral_buffering = 1;
+		if (FEATURE_SUPPORTED(F_DIAG_PD_BUFFERING))
+			driver->feature[peripheral].pd_buffering = 1;
+		if (FEATURE_SUPPORTED(F_DIAG_SOCKETS_ENABLED))
+			enable_socket_feature(peripheral);
+	}
+
+	process_socket_feature(peripheral);
+	process_log_on_demand_feature(peripheral);
+}
+
+static void process_last_event_report(uint8_t *buf, uint32_t len,
+				      uint8_t peripheral)
+{
+	struct diag_ctrl_last_event_report *header = NULL;
+	uint8_t *ptr = buf;
+	uint8_t *temp = NULL;
+	uint32_t pkt_len = sizeof(uint32_t) + sizeof(uint16_t);
+	uint16_t event_size = 0;
+
+	if (!buf || peripheral >= NUM_PERIPHERALS || len != pkt_len)
+		return;
+
+	mutex_lock(&event_mask.lock);
+	header = (struct diag_ctrl_last_event_report *)ptr;
+	event_size = ((header->event_last_id / 8) + 1);
+	if (event_size >= driver->event_mask_size) {
+		DIAG_LOG(DIAG_DEBUG_MASKS,
+		"diag: receiving event mask size more that Apps can handle\n");
+		temp = krealloc(driver->event_mask->ptr, event_size,
+				GFP_KERNEL);
+		if (!temp) {
+			pr_err("diag: In %s, unable to reallocate event mask to support events from %d\n",
+			       __func__, peripheral);
+			goto err;
+		}
+		driver->event_mask->ptr = temp;
+		driver->event_mask_size = event_size;
+	}
+
+	driver->num_event_id[peripheral] = header->event_last_id;
+	if (header->event_last_id > driver->last_event_id)
+		driver->last_event_id = header->event_last_id;
+err:
+	mutex_unlock(&event_mask.lock);
+}
+
+static void process_log_range_report(uint8_t *buf, uint32_t len,
+				     uint8_t peripheral)
+{
+	int i;
+	int read_len = 0;
+	int header_len = sizeof(struct diag_ctrl_log_range_report);
+	uint8_t *ptr = buf;
+	struct diag_ctrl_log_range_report *header = NULL;
+	struct diag_ctrl_log_range *log_range = NULL;
+	struct diag_log_mask_t *mask_ptr = NULL;
+
+	if (!buf || peripheral >= NUM_PERIPHERALS || len < 0)
+		return;
+
+	header = (struct diag_ctrl_log_range_report *)ptr;
+	ptr += header_len;
+	/* Don't account for pkt_id and length */
+	read_len += header_len - (2 * sizeof(uint32_t));
+
+	driver->num_equip_id[peripheral] = header->num_ranges;
+	for (i = 0; i < header->num_ranges && read_len < len; i++) {
+		log_range = (struct diag_ctrl_log_range *)ptr;
+		ptr += sizeof(struct diag_ctrl_log_range);
+		read_len += sizeof(struct diag_ctrl_log_range);
+
+		if (log_range->equip_id >= MAX_EQUIP_ID) {
+			pr_err("diag: receiving log equip id %d more than supported equip id: %d from peripheral: %d\n",
+			       log_range->equip_id, MAX_EQUIP_ID, peripheral);
+			continue;
+		}
+		mask_ptr = (struct diag_log_mask_t *)log_mask.ptr;
+		mask_ptr = &mask_ptr[log_range->equip_id];
+
+		mutex_lock(&(mask_ptr->lock));
+		mask_ptr->num_items = log_range->num_items;
+		mask_ptr->range = LOG_ITEMS_TO_SIZE(log_range->num_items);
+		mutex_unlock(&(mask_ptr->lock));
+	}
+}
+
+static int update_msg_mask_tbl_entry(struct diag_msg_mask_t *mask,
+				     struct diag_ssid_range_t *range)
+{
+	uint32_t temp_range;
+
+	if (!mask || !range)
+		return -EIO;
+	if (range->ssid_last < range->ssid_first) {
+		pr_err("diag: In %s, invalid ssid range, first: %d, last: %d\n",
+		       __func__, range->ssid_first, range->ssid_last);
+		return -EINVAL;
+	}
+	if (range->ssid_last >= mask->ssid_last) {
+		temp_range = range->ssid_last - mask->ssid_first + 1;
+		if (temp_range > MAX_SSID_PER_RANGE) {
+			temp_range = MAX_SSID_PER_RANGE;
+			mask->ssid_last = mask->ssid_first + temp_range - 1;
+		} else
+			mask->ssid_last = range->ssid_last;
+		mask->ssid_last_tools = mask->ssid_last;
+		mask->range = temp_range;
+	}
+
+	return 0;
+}
+
+static void process_ssid_range_report(uint8_t *buf, uint32_t len,
+				      uint8_t peripheral)
+{
+	int i;
+	int j;
+	int read_len = 0;
+	int found = 0;
+	int new_size = 0;
+	int err = 0;
+	struct diag_ctrl_ssid_range_report *header = NULL;
+	struct diag_ssid_range_t *ssid_range = NULL;
+	int header_len = sizeof(struct diag_ctrl_ssid_range_report);
+	struct diag_msg_mask_t *mask_ptr = NULL;
+	uint8_t *ptr = buf;
+	uint8_t *temp = NULL;
+	uint32_t min_len = header_len - sizeof(struct diag_ctrl_pkt_header_t);
+
+	if (!buf || peripheral >= NUM_PERIPHERALS || len < min_len)
+		return;
+
+	header = (struct diag_ctrl_ssid_range_report *)ptr;
+	ptr += header_len;
+	/* Don't account for pkt_id and length */
+	read_len += header_len - (2 * sizeof(uint32_t));
+
+	mutex_lock(&driver->msg_mask_lock);
+	driver->max_ssid_count[peripheral] = header->count;
+	for (i = 0; i < header->count && read_len < len; i++) {
+		ssid_range = (struct diag_ssid_range_t *)ptr;
+		ptr += sizeof(struct diag_ssid_range_t);
+		read_len += sizeof(struct diag_ssid_range_t);
+		mask_ptr = (struct diag_msg_mask_t *)msg_mask.ptr;
+		found = 0;
+		for (j = 0; j < driver->msg_mask_tbl_count; j++, mask_ptr++) {
+			if (!mask_ptr->ptr || !ssid_range) {
+				found = 1;
+				break;
+			}
+			if (mask_ptr->ssid_first != ssid_range->ssid_first)
+				continue;
+			mutex_lock(&mask_ptr->lock);
+			err = update_msg_mask_tbl_entry(mask_ptr, ssid_range);
+			mutex_unlock(&mask_ptr->lock);
+			if (err == -ENOMEM) {
+				pr_err("diag: In %s, unable to increase the msg mask table range\n",
+				       __func__);
+			}
+			found = 1;
+			break;
+		}
+
+		if (found)
+			continue;
+
+		new_size = (driver->msg_mask_tbl_count + 1) *
+			   sizeof(struct diag_msg_mask_t);
+		DIAG_LOG(DIAG_DEBUG_MASKS,
+			"diag: receiving msg mask size more that Apps can handle\n");
+		temp = krealloc(msg_mask.ptr, new_size, GFP_KERNEL);
+		if (!temp) {
+			pr_err("diag: In %s, Unable to add new ssid table to msg mask, ssid first: %d, last: %d\n",
+			       __func__, ssid_range->ssid_first,
+			       ssid_range->ssid_last);
+			continue;
+		}
+		msg_mask.ptr = temp;
+		mask_ptr = (struct diag_msg_mask_t *)msg_mask.ptr;
+		err = diag_create_msg_mask_table_entry(mask_ptr, ssid_range);
+		if (err) {
+			pr_err("diag: In %s, Unable to create a new msg mask table entry, first: %d last: %d err: %d\n",
+			       __func__, ssid_range->ssid_first,
+			       ssid_range->ssid_last, err);
+			continue;
+		}
+		driver->msg_mask_tbl_count += 1;
+	}
+	mutex_unlock(&driver->msg_mask_lock);
+}
+
+static void diag_build_time_mask_update(uint8_t *buf,
+					struct diag_ssid_range_t *range)
+{
+	int i;
+	int j;
+	int num_items = 0;
+	int err = 0;
+	int found = 0;
+	int new_size = 0;
+	uint8_t *temp = NULL;
+	uint32_t *mask_ptr = (uint32_t *)buf;
+	uint32_t *dest_ptr = NULL;
+	struct diag_msg_mask_t *build_mask = NULL;
+
+	if (!range || !buf)
+		return;
+
+	if (range->ssid_last < range->ssid_first) {
+		pr_err("diag: In %s, invalid ssid range, first: %d, last: %d\n",
+		       __func__, range->ssid_first, range->ssid_last);
+		return;
+	}
+	mutex_lock(&driver->msg_mask_lock);
+	build_mask = (struct diag_msg_mask_t *)(driver->build_time_mask->ptr);
+	num_items = range->ssid_last - range->ssid_first + 1;
+
+	for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, build_mask++) {
+		if (!build_mask->ptr) {
+			found = 1;
+			break;
+		}
+		if (build_mask->ssid_first != range->ssid_first)
+			continue;
+		found = 1;
+		mutex_lock(&build_mask->lock);
+		err = update_msg_mask_tbl_entry(build_mask, range);
+		if (err == -ENOMEM) {
+			pr_err("diag: In %s, unable to increase the msg build mask table range\n",
+			       __func__);
+		}
+		dest_ptr = build_mask->ptr;
+		for (j = 0; (j < build_mask->range) && mask_ptr && dest_ptr;
+			j++, mask_ptr++, dest_ptr++)
+			*(uint32_t *)dest_ptr |= *mask_ptr;
+		mutex_unlock(&build_mask->lock);
+		break;
+	}
+
+	if (found)
+		goto end;
+
+	new_size = (driver->bt_msg_mask_tbl_count + 1) *
+		   sizeof(struct diag_msg_mask_t);
+	DIAG_LOG(DIAG_DEBUG_MASKS,
+		"diag: receiving build time mask size more that Apps can handle\n");
+
+	temp = krealloc(driver->build_time_mask->ptr, new_size, GFP_KERNEL);
+	if (!temp) {
+		pr_err("diag: In %s, unable to create a new entry for build time mask\n",
+		       __func__);
+		goto end;
+	}
+	driver->build_time_mask->ptr = temp;
+	build_mask = (struct diag_msg_mask_t *)driver->build_time_mask->ptr;
+	err = diag_create_msg_mask_table_entry(build_mask, range);
+	if (err) {
+		pr_err("diag: In %s, Unable to create a new msg mask table entry, err: %d\n",
+		       __func__, err);
+		goto end;
+	}
+	driver->bt_msg_mask_tbl_count += 1;
+end:
+	mutex_unlock(&driver->msg_mask_lock);
+	return;
+}
+
+static void process_build_mask_report(uint8_t *buf, uint32_t len,
+				      uint8_t peripheral)
+{
+	int i;
+	int read_len = 0;
+	int num_items = 0;
+	int header_len = sizeof(struct diag_ctrl_build_mask_report);
+	uint8_t *ptr = buf;
+	struct diag_ctrl_build_mask_report *header = NULL;
+	struct diag_ssid_range_t *range = NULL;
+
+	if (!buf || peripheral >= NUM_PERIPHERALS || len < header_len)
+		return;
+
+	header = (struct diag_ctrl_build_mask_report *)ptr;
+	ptr += header_len;
+	/* Don't account for pkt_id and length */
+	read_len += header_len - (2 * sizeof(uint32_t));
+
+	for (i = 0; i < header->count && read_len < len; i++) {
+		range = (struct diag_ssid_range_t *)ptr;
+		ptr += sizeof(struct diag_ssid_range_t);
+		read_len += sizeof(struct diag_ssid_range_t);
+		num_items = range->ssid_last - range->ssid_first + 1;
+		diag_build_time_mask_update(ptr, range);
+		ptr += num_items * sizeof(uint32_t);
+		read_len += num_items * sizeof(uint32_t);
+	}
+}
+
+void diag_cntl_process_read_data(struct diagfwd_info *p_info, void *buf,
+				 int len)
+{
+	uint32_t read_len = 0;
+	uint32_t header_len = sizeof(struct diag_ctrl_pkt_header_t);
+	uint8_t *ptr = buf;
+	struct diag_ctrl_pkt_header_t *ctrl_pkt = NULL;
+
+	if (!buf || len <= 0 || !p_info)
+		return;
+
+	if (reg_dirty & PERIPHERAL_MASK(p_info->peripheral)) {
+		pr_err_ratelimited("diag: dropping command registration from peripheral %d\n",
+		       p_info->peripheral);
+		return;
+	}
+
+	while (read_len + header_len < len) {
+		ctrl_pkt = (struct diag_ctrl_pkt_header_t *)ptr;
+		switch (ctrl_pkt->pkt_id) {
+		case DIAG_CTRL_MSG_REG:
+			process_command_registration(ptr, ctrl_pkt->len,
+						     p_info->peripheral);
+			break;
+		case DIAG_CTRL_MSG_DEREG:
+			process_command_deregistration(ptr, ctrl_pkt->len,
+						       p_info->peripheral);
+			break;
+		case DIAG_CTRL_MSG_FEATURE:
+			process_incoming_feature_mask(ptr, ctrl_pkt->len,
+						      p_info->peripheral);
+			break;
+		case DIAG_CTRL_MSG_LAST_EVENT_REPORT:
+			process_last_event_report(ptr, ctrl_pkt->len,
+						  p_info->peripheral);
+			break;
+		case DIAG_CTRL_MSG_LOG_RANGE_REPORT:
+			process_log_range_report(ptr, ctrl_pkt->len,
+						 p_info->peripheral);
+			break;
+		case DIAG_CTRL_MSG_SSID_RANGE_REPORT:
+			process_ssid_range_report(ptr, ctrl_pkt->len,
+						  p_info->peripheral);
+			break;
+		case DIAG_CTRL_MSG_BUILD_MASK_REPORT:
+			process_build_mask_report(ptr, ctrl_pkt->len,
+						  p_info->peripheral);
+			break;
+		case DIAG_CTRL_MSG_PD_STATUS:
+			process_pd_status(ptr, ctrl_pkt->len,
+						p_info->peripheral);
+			break;
+		default:
+			pr_debug("diag: Control packet %d not supported\n",
+				 ctrl_pkt->pkt_id);
+		}
+		ptr += header_len + ctrl_pkt->len;
+		read_len += header_len + ctrl_pkt->len;
+	}
+
+	return;
+}
+
+#ifdef CONFIG_DIAG_OVER_USB
+static int diag_compute_real_time(int idx)
+{
+	int real_time = MODE_REALTIME;
+	if (driver->proc_active_mask == 0) {
+		/*
+		 * There are no DCI or Memory Device processes. Diag should
+		 * be in Real Time mode irrespective of USB connection
+		 */
+		real_time = MODE_REALTIME;
+	} else if (driver->proc_rt_vote_mask[idx] & driver->proc_active_mask) {
+		/*
+		 * Atleast one process is alive and is voting for Real Time
+		 * data - Diag should be in real time mode irrespective of USB
+		 * connection.
+		 */
+		real_time = MODE_REALTIME;
+	} else if (driver->usb_connected) {
+		/*
+		 * If USB is connected, check individual process. If Memory
+		 * Device Mode is active, set the mode requested by Memory
+		 * Device process. Set to realtime mode otherwise.
+		 */
+		if ((driver->proc_rt_vote_mask[idx] &
+						DIAG_PROC_MEMORY_DEVICE) == 0)
+			real_time = MODE_NONREALTIME;
+		else
+			real_time = MODE_REALTIME;
+	} else {
+		/*
+		 * We come here if USB is not connected and the active
+		 * processes are voting for Non realtime mode.
+		 */
+		real_time = MODE_NONREALTIME;
+	}
+	return real_time;
+}
+#endif
+
+static void diag_create_diag_mode_ctrl_pkt(unsigned char *dest_buf,
+					   uint8_t diag_id, int real_time)
+{
+	struct diag_ctrl_msg_diagmode diagmode;
+	struct diag_ctrl_msg_diagmode_v2 diagmode_v2;
+	int msg_size = sizeof(struct diag_ctrl_msg_diagmode);
+	int msg_size_2 = sizeof(struct diag_ctrl_msg_diagmode_v2);
+
+	if (!dest_buf)
+		return;
+
+	if (diag_id) {
+		diagmode_v2.ctrl_pkt_id = DIAG_CTRL_MSG_DIAGMODE;
+		diagmode_v2.ctrl_pkt_data_len = DIAG_MODE_PKT_LEN_V2;
+		diagmode_v2.version = 2;
+		diagmode_v2.sleep_vote = real_time ? 1 : 0;
+		/*
+		 * 0 - Disables real-time logging (to prevent
+		 *	   frequent APPS wake-ups, etc.).
+		 * 1 - Enable real-time logging
+		 */
+		diagmode_v2.real_time = real_time;
+		diagmode_v2.use_nrt_values = 0;
+		diagmode_v2.commit_threshold = 0;
+		diagmode_v2.sleep_threshold = 0;
+		diagmode_v2.sleep_time = 0;
+		diagmode_v2.drain_timer_val = 0;
+		diagmode_v2.event_stale_timer_val = 0;
+		diagmode_v2.diag_id = diag_id;
+		memcpy(dest_buf, &diagmode_v2, msg_size_2);
+	} else {
+		diagmode.ctrl_pkt_id = DIAG_CTRL_MSG_DIAGMODE;
+		diagmode.ctrl_pkt_data_len = DIAG_MODE_PKT_LEN;
+		diagmode.version = 1;
+		diagmode.sleep_vote = real_time ? 1 : 0;
+		/*
+		 * 0 - Disables real-time logging (to prevent
+		 *     frequent APPS wake-ups, etc.).
+		 * 1 - Enable real-time logging
+		 */
+		diagmode.real_time = real_time;
+		diagmode.use_nrt_values = 0;
+		diagmode.commit_threshold = 0;
+		diagmode.sleep_threshold = 0;
+		diagmode.sleep_time = 0;
+		diagmode.drain_timer_val = 0;
+		diagmode.event_stale_timer_val = 0;
+		memcpy(dest_buf, &diagmode, msg_size);
+	}
+}
+
+void diag_update_proc_vote(uint16_t proc, uint8_t vote, int index)
+{
+	int i;
+
+	mutex_lock(&driver->real_time_mutex);
+	if (vote)
+		driver->proc_active_mask |= proc;
+	else {
+		driver->proc_active_mask &= ~proc;
+		if (index == ALL_PROC) {
+			for (i = 0; i < DIAG_NUM_PROC; i++)
+				driver->proc_rt_vote_mask[i] |= proc;
+		} else {
+			driver->proc_rt_vote_mask[index] |= proc;
+		}
+	}
+	mutex_unlock(&driver->real_time_mutex);
+}
+
+void diag_update_real_time_vote(uint16_t proc, uint8_t real_time, int index)
+{
+	int i;
+
+	if (index >= DIAG_NUM_PROC) {
+		pr_err("diag: In %s, invalid index %d\n", __func__, index);
+		return;
+	}
+
+	mutex_lock(&driver->real_time_mutex);
+	if (index == ALL_PROC) {
+		for (i = 0; i < DIAG_NUM_PROC; i++) {
+			if (real_time)
+				driver->proc_rt_vote_mask[i] |= proc;
+			else
+				driver->proc_rt_vote_mask[i] &= ~proc;
+		}
+	} else {
+		if (real_time)
+			driver->proc_rt_vote_mask[index] |= proc;
+		else
+			driver->proc_rt_vote_mask[index] &= ~proc;
+	}
+	mutex_unlock(&driver->real_time_mutex);
+}
+
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static void diag_send_diag_mode_update_remote(int token, int real_time)
+{
+	unsigned char *buf = NULL;
+	int err = 0;
+	struct diag_dci_header_t dci_header;
+	int dci_header_size = sizeof(struct diag_dci_header_t);
+	int msg_size = sizeof(struct diag_ctrl_msg_diagmode);
+	uint32_t write_len = 0;
+
+	if (token < 0 || token >= NUM_DCI_PROC) {
+		pr_err("diag: Invalid remote device channel in %s, token: %d\n",
+							__func__, token);
+		return;
+	}
+
+	if (real_time != MODE_REALTIME && real_time != MODE_NONREALTIME) {
+		pr_err("diag: Invalid real time value in %s, type: %d\n",
+							__func__, real_time);
+		return;
+	}
+
+	buf = dci_get_buffer_from_bridge(token);
+	if (!buf) {
+		pr_err("diag: In %s, unable to get dci buffers to write data\n",
+			__func__);
+		return;
+	}
+	/* Frame the DCI header */
+	dci_header.start = CONTROL_CHAR;
+	dci_header.version = 1;
+	dci_header.length = msg_size + 1;
+	dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
+
+	memcpy(buf + write_len, &dci_header, dci_header_size);
+	write_len += dci_header_size;
+	diag_create_diag_mode_ctrl_pkt(buf + write_len, 0, real_time);
+	write_len += msg_size;
+	*(buf + write_len) = CONTROL_CHAR; /* End Terminator */
+	write_len += sizeof(uint8_t);
+	err = diagfwd_bridge_write(TOKEN_TO_BRIDGE(token), buf, write_len);
+	if (err != write_len) {
+		pr_err("diag: cannot send nrt mode ctrl pkt, err: %d\n", err);
+		diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+	} else {
+		driver->real_time_mode[token + 1] = real_time;
+	}
+}
+#else
+static inline void diag_send_diag_mode_update_remote(int token, int real_time)
+{
+}
+#endif
+
+#ifdef CONFIG_DIAG_OVER_USB
+void diag_real_time_work_fn(struct work_struct *work)
+{
+	int temp_real_time = MODE_REALTIME, i, j;
+	uint8_t send_update = 1;
+
+	/*
+	 * If any peripheral in the local processor is in either threshold or
+	 * circular buffering mode, don't send the real time mode control
+	 * packet.
+	 */
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		if (!driver->feature[i].peripheral_buffering)
+			continue;
+		switch (driver->buffering_mode[i].mode) {
+		case DIAG_BUFFERING_MODE_THRESHOLD:
+		case DIAG_BUFFERING_MODE_CIRCULAR:
+			send_update = 0;
+			break;
+		}
+	}
+
+	mutex_lock(&driver->mode_lock);
+	for (i = 0; i < DIAG_NUM_PROC; i++) {
+		temp_real_time = diag_compute_real_time(i);
+		if (temp_real_time == driver->real_time_mode[i]) {
+			pr_debug("diag: did not update real time mode on proc %d, already in the req mode %d",
+				i, temp_real_time);
+			continue;
+		}
+
+		if (i == DIAG_LOCAL_PROC) {
+			if (!send_update) {
+				pr_debug("diag: In %s, cannot send real time mode pkt since one of the periperhal is in buffering mode\n",
+					 __func__);
+				break;
+			}
+			for (j = 0; j < NUM_PERIPHERALS; j++)
+				diag_send_real_time_update(j,
+						temp_real_time);
+		} else {
+			diag_send_diag_mode_update_remote(i - 1,
+							   temp_real_time);
+		}
+	}
+	mutex_unlock(&driver->mode_lock);
+
+	if (driver->real_time_update_busy > 0)
+		driver->real_time_update_busy--;
+}
+#else
+void diag_real_time_work_fn(struct work_struct *work)
+{
+	int temp_real_time = MODE_REALTIME, i, j;
+
+	for (i = 0; i < DIAG_NUM_PROC; i++) {
+		if (driver->proc_active_mask == 0) {
+			/*
+			 * There are no DCI or Memory Device processes.
+			 * Diag should be in Real Time mode.
+			 */
+			temp_real_time = MODE_REALTIME;
+		} else if (!(driver->proc_rt_vote_mask[i] &
+						driver->proc_active_mask)) {
+			/* No active process is voting for real time mode */
+			temp_real_time = MODE_NONREALTIME;
+		}
+		if (temp_real_time == driver->real_time_mode[i]) {
+			pr_debug("diag: did not update real time mode on proc %d, already in the req mode %d",
+				i, temp_real_time);
+			continue;
+		}
+
+		if (i == DIAG_LOCAL_PROC) {
+			for (j = 0; j < NUM_PERIPHERALS; j++)
+				diag_send_real_time_update(
+					j, temp_real_time);
+		} else {
+			diag_send_diag_mode_update_remote(i - 1,
+							  temp_real_time);
+		}
+	}
+
+	if (driver->real_time_update_busy > 0)
+		driver->real_time_update_busy--;
+}
+#endif
+
+static int __diag_send_real_time_update(uint8_t peripheral, int real_time,
+						uint8_t diag_id)
+{
+	char buf[sizeof(struct diag_ctrl_msg_diagmode_v2)];
+	int msg_size = 0;
+	int err = 0;
+
+	if (peripheral >= NUM_PERIPHERALS) {
+		pr_err("diag: In %s, invalid peripheral %d\n", __func__,
+		       peripheral);
+		return -EINVAL;
+	}
+
+	if (!driver->diagfwd_cntl[peripheral] ||
+	    !driver->diagfwd_cntl[peripheral]->ch_open) {
+		pr_debug("diag: In %s, control channel is not open, p: %d\n",
+			 __func__, peripheral);
+		return err;
+	}
+
+	if (real_time != MODE_NONREALTIME && real_time != MODE_REALTIME) {
+		pr_err("diag: In %s, invalid real time mode %d, peripheral: %d\n",
+		       __func__, real_time, peripheral);
+		return -EINVAL;
+	}
+
+	msg_size = (diag_id ? sizeof(struct diag_ctrl_msg_diagmode_v2) :
+		sizeof(struct diag_ctrl_msg_diagmode));
+
+	diag_create_diag_mode_ctrl_pkt(buf, diag_id, real_time);
+
+	mutex_lock(&driver->diag_cntl_mutex);
+
+	err = diagfwd_write(peripheral, TYPE_CNTL, buf, msg_size);
+
+	if (err && err != -ENODEV) {
+		pr_err("diag: In %s, unable to write, peripheral: %d, type: %d, len: %d, err: %d\n",
+		       __func__, peripheral, TYPE_CNTL,
+		       msg_size, err);
+	} else {
+		driver->real_time_mode[DIAG_LOCAL_PROC] = real_time;
+	}
+
+	mutex_unlock(&driver->diag_cntl_mutex);
+
+	return err;
+}
+
+int diag_send_real_time_update(uint8_t peripheral, int real_time)
+{
+	int i;
+
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		if (!driver->buffering_flag[i])
+			continue;
+		/*
+		 * One of the peripherals is in buffering mode. Don't set
+		 * the RT value.
+		 */
+		return -EINVAL;
+	}
+
+	return __diag_send_real_time_update(peripheral, real_time, 0);
+}
+
+void diag_map_pd_to_diagid(uint8_t pd, uint8_t *diag_id, int *peripheral)
+{
+	switch (pd) {
+	case UPD_WLAN:
+		*diag_id = DIAG_ID_WLAN;
+		*peripheral = PERIPHERAL_MODEM;
+		break;
+	case UPD_AUDIO:
+		*diag_id = DIAG_ID_AUDIO;
+		*peripheral = PERIPHERAL_LPASS;
+		break;
+	case UPD_SENSORS:
+		*diag_id = DIAG_ID_SENSORS;
+		*peripheral = PERIPHERAL_LPASS;
+		break;
+	case PERIPHERAL_MODEM:
+		*diag_id = DIAG_ID_MPSS;
+		*peripheral = PERIPHERAL_MODEM;
+		break;
+	case PERIPHERAL_LPASS:
+		*diag_id = DIAG_ID_LPASS;
+		*peripheral = PERIPHERAL_LPASS;
+		break;
+	case PERIPHERAL_WCNSS:
+		*diag_id = 0;
+		*peripheral = PERIPHERAL_WCNSS;
+		break;
+	case PERIPHERAL_SENSORS:
+		*diag_id = 0;
+		*peripheral = PERIPHERAL_SENSORS;
+		break;
+	case PERIPHERAL_WDSP:
+		*diag_id = 0;
+		*peripheral = PERIPHERAL_WDSP;
+		break;
+	case PERIPHERAL_CDSP:
+		*diag_id = DIAG_ID_CDSP;
+		*peripheral = PERIPHERAL_CDSP;
+		break;
+	default:
+		pr_err("diag: In %s, invalid peripheral %d\n", __func__,
+			pd);
+		*peripheral = -EINVAL;
+		break;
+	}
+
+	if (*peripheral > 0)
+		if (!driver->feature[*peripheral].pd_buffering)
+			*diag_id = 0;
+}
+
+int diag_send_peripheral_buffering_mode(struct diag_buffering_mode_t *params)
+{
+	int err = 0;
+	int mode = MODE_REALTIME;
+	int peripheral = 0;
+	uint8_t diag_id = 0;
+
+	if (!params)
+		return -EIO;
+
+	diag_map_pd_to_diagid(params->peripheral,
+		&diag_id, &peripheral);
+
+	if ((peripheral < 0) ||
+		peripheral >= NUM_PERIPHERALS) {
+		pr_err("diag: In %s, invalid peripheral %d\n", __func__,
+		       peripheral);
+		return -EINVAL;
+	}
+
+	if (!driver->buffering_flag[params->peripheral]) {
+		pr_err("diag: In %s, buffering flag not set for %d\n", __func__,
+		       params->peripheral);
+		return -EINVAL;
+	}
+
+	if (!driver->feature[peripheral].peripheral_buffering) {
+		pr_err("diag: In %s, peripheral %d doesn't support buffering\n",
+		       __func__, peripheral);
+		return -EIO;
+	}
+
+	switch (params->mode) {
+	case DIAG_BUFFERING_MODE_STREAMING:
+		mode = MODE_REALTIME;
+		break;
+	case DIAG_BUFFERING_MODE_THRESHOLD:
+	case DIAG_BUFFERING_MODE_CIRCULAR:
+		mode = MODE_NONREALTIME;
+		break;
+	default:
+		pr_err("diag: In %s, invalid tx mode %d\n", __func__,
+		       params->mode);
+		return -EINVAL;
+	}
+
+	if (!driver->feature[peripheral].peripheral_buffering) {
+		pr_debug("diag: In %s, peripheral %d doesn't support buffering\n",
+			 __func__, peripheral);
+		driver->buffering_flag[params->peripheral] = 0;
+		return -EIO;
+	}
+
+	/*
+	 * Perform sanity on watermark values. These values must be
+	 * checked irrespective of the buffering mode.
+	 */
+	if (((params->high_wm_val > DIAG_MAX_WM_VAL) ||
+	     (params->low_wm_val > DIAG_MAX_WM_VAL)) ||
+	    (params->low_wm_val > params->high_wm_val) ||
+	    ((params->low_wm_val == params->high_wm_val) &&
+	     (params->low_wm_val != DIAG_MIN_WM_VAL))) {
+		pr_err("diag: In %s, invalid watermark values, high: %d, low: %d, peripheral: %d\n",
+		       __func__, params->high_wm_val, params->low_wm_val,
+		       params->peripheral);
+		return -EINVAL;
+	}
+
+	mutex_lock(&driver->mode_lock);
+	err = diag_send_buffering_tx_mode_pkt(peripheral, diag_id, params);
+	if (err) {
+		pr_err("diag: In %s, unable to send buffering mode packet to peripheral %d, err: %d\n",
+		       __func__, peripheral, err);
+		goto fail;
+	}
+	err = diag_send_buffering_wm_values(peripheral, diag_id, params);
+	if (err) {
+		pr_err("diag: In %s, unable to send buffering wm value packet to peripheral %d, err: %d\n",
+		       __func__, peripheral, err);
+		goto fail;
+	}
+	err = __diag_send_real_time_update(peripheral, mode, diag_id);
+	if (err) {
+		pr_err("diag: In %s, unable to send mode update to peripheral %d, mode: %d, err: %d\n",
+		       __func__, peripheral, mode, err);
+		goto fail;
+	}
+	driver->buffering_mode[params->peripheral].peripheral =
+		params->peripheral;
+	driver->buffering_mode[params->peripheral].mode =
+		params->mode;
+	driver->buffering_mode[params->peripheral].low_wm_val =
+		params->low_wm_val;
+	driver->buffering_mode[params->peripheral].high_wm_val =
+		params->high_wm_val;
+	if (params->mode == DIAG_BUFFERING_MODE_STREAMING)
+		driver->buffering_flag[params->peripheral] = 0;
+fail:
+	mutex_unlock(&driver->mode_lock);
+	return err;
+}
+
+int diag_send_stm_state(uint8_t peripheral, uint8_t stm_control_data)
+{
+	struct diag_ctrl_msg_stm stm_msg;
+	int msg_size = sizeof(struct diag_ctrl_msg_stm);
+	int err = 0;
+
+	if (peripheral >= NUM_PERIPHERALS)
+		return -EIO;
+
+	if (!driver->diagfwd_cntl[peripheral] ||
+	    !driver->diagfwd_cntl[peripheral]->ch_open) {
+		pr_debug("diag: In %s, control channel is not open, p: %d\n",
+			 __func__, peripheral);
+		return -ENODEV;
+	}
+
+	if (driver->feature[peripheral].stm_support == DISABLE_STM)
+		return -EINVAL;
+
+	stm_msg.ctrl_pkt_id = 21;
+	stm_msg.ctrl_pkt_data_len = 5;
+	stm_msg.version = 1;
+	stm_msg.control_data = stm_control_data;
+	err = diagfwd_write(peripheral, TYPE_CNTL, &stm_msg, msg_size);
+	if (err && err != -ENODEV) {
+		pr_err("diag: In %s, unable to write to smd, peripheral: %d, type: %d, len: %d, err: %d\n",
+		       __func__, peripheral, TYPE_CNTL,
+		       msg_size, err);
+	}
+
+	return err;
+}
+
+int diag_send_peripheral_drain_immediate(uint8_t pd,
+		uint8_t diag_id, int peripheral)
+{
+	int err = 0;
+	struct diag_ctrl_drain_immediate ctrl_pkt;
+	struct diag_ctrl_drain_immediate_v2 ctrl_pkt_v2;
+
+	if (!driver->feature[peripheral].peripheral_buffering) {
+		pr_debug("diag: In %s, peripheral  %d doesn't support buffering\n",
+			 __func__, peripheral);
+		return -EINVAL;
+	}
+
+	if (!driver->diagfwd_cntl[peripheral] ||
+	    !driver->diagfwd_cntl[peripheral]->ch_open) {
+		pr_debug("diag: In %s, control channel is not open, p: %d\n",
+			 __func__, peripheral);
+		return -ENODEV;
+	}
+
+	if (diag_id && driver->feature[peripheral].pd_buffering) {
+		ctrl_pkt_v2.pkt_id = DIAG_CTRL_MSG_PERIPHERAL_BUF_DRAIN_IMM;
+		/*
+		 * The length of the ctrl pkt is size of version,
+		 * diag_id and stream id
+		 */
+		ctrl_pkt_v2.len = sizeof(uint32_t) + (2 * sizeof(uint8_t));
+		ctrl_pkt_v2.version = 2;
+		ctrl_pkt_v2.diag_id = diag_id;
+		ctrl_pkt_v2.stream_id = 1;
+		err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt_v2,
+				sizeof(ctrl_pkt_v2));
+		if (err && err != -ENODEV) {
+			pr_err("diag: Unable to send drain immediate ctrl packet to peripheral %d, err: %d\n",
+			peripheral, err);
+		}
+	} else {
+		ctrl_pkt.pkt_id = DIAG_CTRL_MSG_PERIPHERAL_BUF_DRAIN_IMM;
+		/*
+		 * The length of the ctrl pkt is
+		 * size of version and stream id
+		 */
+		ctrl_pkt.len = sizeof(uint32_t) + sizeof(uint8_t);
+		ctrl_pkt.version = 1;
+		ctrl_pkt.stream_id = 1;
+		err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt,
+				sizeof(ctrl_pkt));
+		if (err && err != -ENODEV) {
+			pr_err("diag: Unable to send drain immediate ctrl packet to peripheral %d, err: %d\n",
+			peripheral, err);
+		}
+	}
+
+	return err;
+}
+
+int diag_send_buffering_tx_mode_pkt(uint8_t peripheral,
+		    uint8_t diag_id, struct diag_buffering_mode_t *params)
+{
+	int err = 0;
+	struct diag_ctrl_peripheral_tx_mode ctrl_pkt;
+	struct diag_ctrl_peripheral_tx_mode_v2 ctrl_pkt_v2;
+
+	if (!params)
+		return -EIO;
+
+	if (peripheral >= NUM_PERIPHERALS) {
+		pr_err("diag: In %s, invalid peripheral %d\n", __func__,
+		       peripheral);
+		return -EINVAL;
+	}
+
+	if (!driver->feature[peripheral].peripheral_buffering) {
+		pr_debug("diag: In %s, peripheral  %d doesn't support buffering\n",
+			 __func__, peripheral);
+		return -EINVAL;
+	}
+
+	switch (params->mode) {
+	case DIAG_BUFFERING_MODE_STREAMING:
+	case DIAG_BUFFERING_MODE_THRESHOLD:
+	case DIAG_BUFFERING_MODE_CIRCULAR:
+		break;
+	default:
+		pr_err("diag: In %s, invalid tx mode: %d\n", __func__,
+		       params->mode);
+		return -EINVAL;
+	}
+
+	if (diag_id &&
+		driver->feature[peripheral].pd_buffering) {
+
+		ctrl_pkt_v2.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_TX_MODE;
+		/*
+		 * Control packet length is size of version, diag_id,
+		 * stream_id and tx_mode
+		 */
+		ctrl_pkt_v2.len = sizeof(uint32_t) +  (3 * sizeof(uint8_t));
+		ctrl_pkt_v2.version = 2;
+		ctrl_pkt_v2.diag_id = diag_id;
+		ctrl_pkt_v2.stream_id = 1;
+		ctrl_pkt_v2.tx_mode = params->mode;
+
+		err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt_v2,
+			sizeof(ctrl_pkt_v2));
+		if (err && err != -ENODEV) {
+			pr_err("diag: Unable to send tx_mode ctrl packet to peripheral %d, err: %d\n",
+				   peripheral, err);
+			goto fail;
+		}
+	} else {
+		ctrl_pkt.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_TX_MODE;
+		/*
+		 * Control packet length is size of version,
+		 * stream_id and tx_mode
+		 */
+		ctrl_pkt.len = sizeof(uint32_t) +  (2 * sizeof(uint8_t));
+		ctrl_pkt.version = 1;
+		ctrl_pkt.stream_id = 1;
+		ctrl_pkt.tx_mode = params->mode;
+
+		err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt,
+			sizeof(ctrl_pkt));
+		if (err && err != -ENODEV) {
+			pr_err("diag: Unable to send tx_mode ctrl packet to peripheral %d, err: %d\n",
+			       peripheral, err);
+			goto fail;
+		}
+	}
+	driver->buffering_mode[params->peripheral].mode = params->mode;
+
+fail:
+	return err;
+}
+
+int diag_send_buffering_wm_values(uint8_t peripheral,
+		uint8_t diag_id, struct diag_buffering_mode_t *params)
+{
+	int err = 0;
+	struct diag_ctrl_set_wq_val ctrl_pkt;
+	struct diag_ctrl_set_wq_val_v2 ctrl_pkt_v2;
+
+	if (!params)
+		return -EIO;
+
+	if (peripheral >= NUM_PERIPHERALS) {
+		pr_err("diag: In %s, invalid peripheral %d\n", __func__,
+		       peripheral);
+		return -EINVAL;
+	}
+
+	if (!driver->feature[peripheral].peripheral_buffering) {
+		pr_debug("diag: In %s, peripheral  %d doesn't support buffering\n",
+			 __func__, peripheral);
+		return -EINVAL;
+	}
+
+	if (!driver->diagfwd_cntl[peripheral] ||
+	    !driver->diagfwd_cntl[peripheral]->ch_open) {
+		pr_debug("diag: In %s, control channel is not open, p: %d\n",
+			 __func__, peripheral);
+		return -ENODEV;
+	}
+
+	switch (params->mode) {
+	case DIAG_BUFFERING_MODE_STREAMING:
+	case DIAG_BUFFERING_MODE_THRESHOLD:
+	case DIAG_BUFFERING_MODE_CIRCULAR:
+		break;
+	default:
+		pr_err("diag: In %s, invalid tx mode: %d\n", __func__,
+		       params->mode);
+		return -EINVAL;
+	}
+
+	if (diag_id &&
+		driver->feature[peripheral].pd_buffering) {
+		ctrl_pkt_v2.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_WMQ_VAL;
+		/*
+		 * Control packet length is size of version, diag_id,
+		 * stream_id and wmq values
+		 */
+		ctrl_pkt_v2.len = sizeof(uint32_t) + (4 * sizeof(uint8_t));
+		ctrl_pkt_v2.version = 2;
+		ctrl_pkt_v2.diag_id = diag_id;
+		ctrl_pkt_v2.stream_id = 1;
+		ctrl_pkt_v2.high_wm_val = params->high_wm_val;
+		ctrl_pkt_v2.low_wm_val = params->low_wm_val;
+
+		err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt_v2,
+					sizeof(ctrl_pkt_v2));
+		if (err && err != -ENODEV) {
+			pr_err("diag: Unable to send watermark values to peripheral %d, err: %d\n",
+				   peripheral, err);
+		}
+	} else {
+		ctrl_pkt.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_WMQ_VAL;
+		/*
+		 * Control packet length is size of version,
+		 * stream_id and wmq values
+		 */
+		ctrl_pkt.len = sizeof(uint32_t) + (3 * sizeof(uint8_t));
+		ctrl_pkt.version = 1;
+		ctrl_pkt.stream_id = 1;
+		ctrl_pkt.high_wm_val = params->high_wm_val;
+		ctrl_pkt.low_wm_val = params->low_wm_val;
+
+		err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt,
+				    sizeof(ctrl_pkt));
+		if (err && err != -ENODEV) {
+			pr_err("diag: Unable to send watermark values to peripheral %d, err: %d\n",
+			       peripheral, err);
+		}
+	}
+	return err;
+}
+
+int diagfwd_cntl_init(void)
+{
+	uint8_t peripheral = 0;
+
+	reg_dirty = 0;
+	driver->polling_reg_flag = 0;
+	driver->log_on_demand_support = 1;
+	driver->stm_peripheral = 0;
+	driver->close_transport = 0;
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++)
+		driver->buffering_flag[peripheral] = 0;
+
+	mutex_init(&driver->cntl_lock);
+	INIT_WORK(&(driver->stm_update_work), diag_stm_update_work_fn);
+	INIT_WORK(&(driver->mask_update_work), diag_mask_update_work_fn);
+	INIT_WORK(&(driver->close_transport_work),
+		  diag_close_transport_work_fn);
+
+	driver->cntl_wq = create_singlethread_workqueue("diag_cntl_wq");
+	if (!driver->cntl_wq)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void diagfwd_cntl_channel_init(void)
+{
+	uint8_t peripheral;
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		diagfwd_early_open(peripheral);
+		diagfwd_open(peripheral, TYPE_CNTL);
+	}
+}
+
+void diagfwd_cntl_exit(void)
+{
+	if (driver->cntl_wq)
+		destroy_workqueue(driver->cntl_wq);
+	return;
+}
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diagfwd_cntl.h linux-4.4.115-fbx/drivers/char/diag/diagfwd_cntl.h
--- linux-4.4.115-fbx/drivers/char/diag./diagfwd_cntl.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diagfwd_cntl.h	2019-01-22 16:16:22.959241480 +0100
@@ -0,0 +1,332 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_CNTL_H
+#define DIAGFWD_CNTL_H
+
+/* Message registration commands */
+#define DIAG_CTRL_MSG_REG		1
+/* Message passing for DTR events */
+#define DIAG_CTRL_MSG_DTR		2
+/* Control Diag sleep vote, buffering etc */
+#define DIAG_CTRL_MSG_DIAGMODE		3
+/* Diag data based on "light" diag mask */
+#define DIAG_CTRL_MSG_DIAGDATA		4
+/* Send diag internal feature mask 'diag_int_feature_mask' */
+#define DIAG_CTRL_MSG_FEATURE		8
+/* Send Diag log mask for a particular equip id */
+#define DIAG_CTRL_MSG_EQUIP_LOG_MASK	9
+/* Send Diag event mask */
+#define DIAG_CTRL_MSG_EVENT_MASK_V2	10
+/* Send Diag F3 mask */
+#define DIAG_CTRL_MSG_F3_MASK_V2	11
+#define DIAG_CTRL_MSG_NUM_PRESETS	12
+#define DIAG_CTRL_MSG_SET_PRESET_ID	13
+#define DIAG_CTRL_MSG_LOG_MASK_WITH_PRESET_ID	14
+#define DIAG_CTRL_MSG_EVENT_MASK_WITH_PRESET_ID	15
+#define DIAG_CTRL_MSG_F3_MASK_WITH_PRESET_ID	16
+#define DIAG_CTRL_MSG_CONFIG_PERIPHERAL_TX_MODE	17
+#define DIAG_CTRL_MSG_PERIPHERAL_BUF_DRAIN_IMM	18
+#define DIAG_CTRL_MSG_CONFIG_PERIPHERAL_WMQ_VAL	19
+#define DIAG_CTRL_MSG_DCI_CONNECTION_STATUS	20
+#define DIAG_CTRL_MSG_LAST_EVENT_REPORT		22
+#define DIAG_CTRL_MSG_LOG_RANGE_REPORT		23
+#define DIAG_CTRL_MSG_SSID_RANGE_REPORT		24
+#define DIAG_CTRL_MSG_BUILD_MASK_REPORT		25
+#define DIAG_CTRL_MSG_DEREG		27
+#define DIAG_CTRL_MSG_DCI_HANDSHAKE_PKT		29
+#define DIAG_CTRL_MSG_PD_STATUS			30
+#define DIAG_CTRL_MSG_TIME_SYNC_PKT		31
+
+/*
+ * Feature Mask Definitions: Feature mask is used to sepcify Diag features
+ * supported by the Apps processor
+ *
+ * F_DIAG_FEATURE_MASK_SUPPORT - Denotes we support sending and receiving
+ *                               feature masks
+ * F_DIAG_LOG_ON_DEMAND_APPS - Apps responds to Log on Demand request
+ * F_DIAG_REQ_RSP_SUPPORT - Apps supported dedicated request response Channel
+ * F_DIAG_APPS_HDLC_ENCODE - HDLC encoding is done on the forward channel
+ * F_DIAG_STM - Denotes Apps supports Diag over STM
+ */
+#define F_DIAG_FEATURE_MASK_SUPPORT		0
+#define F_DIAG_LOG_ON_DEMAND_APPS		2
+#define F_DIAG_REQ_RSP_SUPPORT			4
+#define F_DIAG_APPS_HDLC_ENCODE			6
+#define F_DIAG_STM				9
+#define F_DIAG_PERIPHERAL_BUFFERING		10
+#define F_DIAG_MASK_CENTRALIZATION		11
+#define F_DIAG_SOCKETS_ENABLED			13
+#define F_DIAG_DCI_EXTENDED_HEADER_SUPPORT	14
+#define F_DIAG_PKT_HEADER_UNTAG			16
+#define F_DIAG_PD_BUFFERING		17
+
+#define ENABLE_SEPARATE_CMDRSP	1
+#define DISABLE_SEPARATE_CMDRSP	0
+
+#define DISABLE_STM	0
+#define ENABLE_STM	1
+#define STATUS_STM	2
+
+#define UPDATE_PERIPHERAL_STM_STATE	1
+#define CLEAR_PERIPHERAL_STM_STATE	2
+
+#define ENABLE_APPS_HDLC_ENCODING	1
+#define DISABLE_APPS_HDLC_ENCODING	0
+
+#define ENABLE_PKT_HEADER_UNTAGGING		1
+#define DISABLE_PKT_HEADER_UNTAGGING	0
+
+#define DIAG_MODE_PKT_LEN		36
+#define DIAG_MODE_PKT_LEN_V2	37
+
+struct diag_ctrl_pkt_header_t {
+	uint32_t pkt_id;
+	uint32_t len;
+};
+
+struct cmd_code_range {
+	uint16_t cmd_code_lo;
+	uint16_t cmd_code_hi;
+	uint32_t data;
+};
+
+struct diag_ctrl_cmd_reg {
+	uint32_t pkt_id;
+	uint32_t len;
+	uint32_t version;
+	uint16_t cmd_code;
+	uint16_t subsysid;
+	uint16_t count_entries;
+	uint16_t port;
+};
+
+struct diag_ctrl_cmd_dereg {
+	uint32_t pkt_id;
+	uint32_t len;
+	uint32_t version;
+	uint16_t cmd_code;
+	uint16_t subsysid;
+	uint16_t count_entries;
+} __packed;
+
+struct diag_ctrl_event_mask {
+	uint32_t cmd_type;
+	uint32_t data_len;
+	uint8_t stream_id;
+	uint8_t status;
+	uint8_t event_config;
+	uint32_t event_mask_size;
+	/* Copy event mask here */
+} __packed;
+
+struct diag_ctrl_log_mask {
+	uint32_t cmd_type;
+	uint32_t data_len;
+	uint8_t stream_id;
+	uint8_t status;
+	uint8_t equip_id;
+	uint32_t num_items; /* Last log code for this equip_id */
+	uint32_t log_mask_size; /* Size of log mask stored in log_mask[] */
+	/* Copy log mask here */
+} __packed;
+
+struct diag_ctrl_msg_mask {
+	uint32_t cmd_type;
+	uint32_t data_len;
+	uint8_t stream_id;
+	uint8_t status;
+	uint8_t msg_mode;
+	uint16_t ssid_first; /* Start of range of supported SSIDs */
+	uint16_t ssid_last; /* Last SSID in range */
+	uint32_t msg_mask_size; /* ssid_last - ssid_first + 1 */
+	/* Copy msg mask here */
+} __packed;
+
+struct diag_ctrl_feature_mask {
+	uint32_t ctrl_pkt_id;
+	uint32_t ctrl_pkt_data_len;
+	uint32_t feature_mask_len;
+	/* Copy feature mask here */
+} __packed;
+
+struct diag_ctrl_msg_diagmode {
+	uint32_t ctrl_pkt_id;
+	uint32_t ctrl_pkt_data_len;
+	uint32_t version;
+	uint32_t sleep_vote;
+	uint32_t real_time;
+	uint32_t use_nrt_values;
+	uint32_t commit_threshold;
+	uint32_t sleep_threshold;
+	uint32_t sleep_time;
+	uint32_t drain_timer_val;
+	uint32_t event_stale_timer_val;
+} __packed;
+
+struct diag_ctrl_msg_diagmode_v2 {
+	uint32_t ctrl_pkt_id;
+	uint32_t ctrl_pkt_data_len;
+	uint32_t version;
+	uint32_t sleep_vote;
+	uint32_t real_time;
+	uint32_t use_nrt_values;
+	uint32_t commit_threshold;
+	uint32_t sleep_threshold;
+	uint32_t sleep_time;
+	uint32_t drain_timer_val;
+	uint32_t event_stale_timer_val;
+	uint8_t diag_id;
+} __packed;
+
+struct diag_ctrl_msg_stm {
+	uint32_t ctrl_pkt_id;
+	uint32_t ctrl_pkt_data_len;
+	uint32_t version;
+	uint8_t  control_data;
+} __packed;
+
+struct diag_ctrl_msg_time_sync {
+	uint32_t ctrl_pkt_id;
+	uint32_t ctrl_pkt_data_len;
+	uint32_t version;
+	uint8_t  time_api;
+} __packed;
+
+struct diag_ctrl_dci_status {
+	uint32_t ctrl_pkt_id;
+	uint32_t ctrl_pkt_data_len;
+	uint32_t version;
+	uint8_t count;
+} __packed;
+
+struct diag_ctrl_dci_handshake_pkt {
+	uint32_t ctrl_pkt_id;
+	uint32_t ctrl_pkt_data_len;
+	uint32_t version;
+	uint32_t magic;
+} __packed;
+
+struct diag_ctrl_msg_pd_status {
+	uint32_t ctrl_pkt_id;
+	uint32_t ctrl_pkt_data_len;
+	uint32_t version;
+	uint32_t pd_id;
+	uint8_t status;
+} __packed;
+
+struct diag_ctrl_last_event_report {
+	uint32_t pkt_id;
+	uint32_t len;
+	uint32_t version;
+	uint16_t event_last_id;
+} __packed;
+
+struct diag_ctrl_log_range_report {
+	uint32_t pkt_id;
+	uint32_t len;
+	uint32_t version;
+	uint32_t last_equip_id;
+	uint32_t num_ranges;
+} __packed;
+
+struct diag_ctrl_log_range {
+	uint32_t equip_id;
+	uint32_t num_items;
+} __packed;
+
+struct diag_ctrl_ssid_range_report {
+	uint32_t pkt_id;
+	uint32_t len;
+	uint32_t version;
+	uint32_t count;
+} __packed;
+
+struct diag_ctrl_build_mask_report {
+	uint32_t pkt_id;
+	uint32_t len;
+	uint32_t version;
+	uint32_t count;
+} __packed;
+
+struct diag_ctrl_peripheral_tx_mode {
+	uint32_t pkt_id;
+	uint32_t len;
+	uint32_t version;
+	uint8_t stream_id;
+	uint8_t tx_mode;
+} __packed;
+
+struct diag_ctrl_peripheral_tx_mode_v2 {
+	uint32_t pkt_id;
+	uint32_t len;
+	uint32_t version;
+	uint8_t diag_id;
+	uint8_t stream_id;
+	uint8_t tx_mode;
+} __packed;
+
+struct diag_ctrl_drain_immediate {
+	uint32_t pkt_id;
+	uint32_t len;
+	uint32_t version;
+	uint8_t stream_id;
+} __packed;
+
+struct diag_ctrl_drain_immediate_v2 {
+	uint32_t pkt_id;
+	uint32_t len;
+	uint32_t version;
+	uint8_t diag_id;
+	uint8_t stream_id;
+} __packed;
+
+struct diag_ctrl_set_wq_val {
+	uint32_t pkt_id;
+	uint32_t len;
+	uint32_t version;
+	uint8_t stream_id;
+	uint8_t high_wm_val;
+	uint8_t low_wm_val;
+} __packed;
+
+struct diag_ctrl_set_wq_val_v2 {
+	uint32_t pkt_id;
+	uint32_t len;
+	uint32_t version;
+	uint8_t diag_id;
+	uint8_t stream_id;
+	uint8_t high_wm_val;
+	uint8_t low_wm_val;
+} __packed;
+
+int diagfwd_cntl_init(void);
+void diagfwd_cntl_channel_init(void);
+void diagfwd_cntl_exit(void);
+void diag_cntl_channel_open(struct diagfwd_info *p_info);
+void diag_cntl_channel_close(struct diagfwd_info *p_info);
+void diag_cntl_process_read_data(struct diagfwd_info *p_info, void *buf,
+				 int len);
+int diag_send_real_time_update(uint8_t peripheral, int real_time);
+void diag_map_pd_to_diagid(uint8_t pd, uint8_t *diag_id, int *peripheral);
+int diag_send_peripheral_buffering_mode(struct diag_buffering_mode_t *params);
+void diag_update_proc_vote(uint16_t proc, uint8_t vote, int index);
+void diag_update_real_time_vote(uint16_t proc, uint8_t real_time, int index);
+void diag_real_time_work_fn(struct work_struct *work);
+int diag_send_stm_state(uint8_t peripheral, uint8_t stm_control_data);
+int diag_send_peripheral_drain_immediate(uint8_t pd,
+			uint8_t diag_id, int peripheral);
+int diag_send_buffering_tx_mode_pkt(uint8_t peripheral,
+		    uint8_t diag_id, struct diag_buffering_mode_t *params);
+int diag_send_buffering_wm_values(uint8_t peripheral,
+		    uint8_t diag_id, struct diag_buffering_mode_t *params);
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diagfwd_glink.c linux-4.4.115-fbx/drivers/char/diag/diagfwd_glink.c
--- linux-4.4.115-fbx/drivers/char/diag./diagfwd_glink.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diagfwd_glink.c	2019-01-22 16:16:22.959241480 +0100
@@ -0,0 +1,830 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include <linux/diagchar.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+#include <soc/qcom/glink.h>
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_glink.h"
+#include "diag_ipc_logging.h"
+
+struct diag_glink_info glink_data[NUM_PERIPHERALS] = {
+	{
+		.peripheral = PERIPHERAL_MODEM,
+		.type = TYPE_DATA,
+		.edge = "mpss",
+		.name = "DIAG_DATA",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_LPASS,
+		.type = TYPE_DATA,
+		.edge = "lpass",
+		.name = "DIAG_DATA",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_WCNSS,
+		.type = TYPE_DATA,
+		.edge = "wcnss",
+		.name = "DIAG_DATA",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_SENSORS,
+		.type = TYPE_DATA,
+		.edge = "dsps",
+		.name = "DIAG_DATA",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_WDSP,
+		.type = TYPE_DATA,
+		.edge = "wdsp",
+		.name = "DIAG_DATA",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_CDSP,
+		.type = TYPE_DATA,
+		.edge = "cdsp",
+		.name = "DIAG_DATA",
+		.hdl = NULL
+	}
+};
+
+struct diag_glink_info glink_cntl[NUM_PERIPHERALS] = {
+	{
+		.peripheral = PERIPHERAL_MODEM,
+		.type = TYPE_CNTL,
+		.edge = "mpss",
+		.name = "DIAG_CTRL",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_LPASS,
+		.type = TYPE_CNTL,
+		.edge = "lpass",
+		.name = "DIAG_CTRL",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_WCNSS,
+		.type = TYPE_CNTL,
+		.edge = "wcnss",
+		.name = "DIAG_CTRL",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_SENSORS,
+		.type = TYPE_CNTL,
+		.edge = "dsps",
+		.name = "DIAG_CTRL",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_WDSP,
+		.type = TYPE_CNTL,
+		.edge = "wdsp",
+		.name = "DIAG_CTRL",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_CDSP,
+		.type = TYPE_CNTL,
+		.edge = "cdsp",
+		.name = "DIAG_CTRL",
+		.hdl = NULL
+	}
+};
+
+struct diag_glink_info glink_dci[NUM_PERIPHERALS] = {
+	{
+		.peripheral = PERIPHERAL_MODEM,
+		.type = TYPE_DCI,
+		.edge = "mpss",
+		.name = "DIAG_DCI_DATA",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_LPASS,
+		.type = TYPE_DCI,
+		.edge = "lpass",
+		.name = "DIAG_DCI_DATA",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_WCNSS,
+		.type = TYPE_DCI,
+		.edge = "wcnss",
+		.name = "DIAG_DCI_DATA",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_SENSORS,
+		.type = TYPE_DCI,
+		.edge = "dsps",
+		.name = "DIAG_DCI_DATA",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_WDSP,
+		.type = TYPE_DCI,
+		.edge = "wdsp",
+		.name = "DIAG_DCI_DATA",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_CDSP,
+		.type = TYPE_DCI,
+		.edge = "cdsp",
+		.name = "DIAG_DCI_DATA",
+		.hdl = NULL
+	}
+};
+
+struct diag_glink_info glink_cmd[NUM_PERIPHERALS] = {
+	{
+		.peripheral = PERIPHERAL_MODEM,
+		.type = TYPE_CMD,
+		.edge = "mpss",
+		.name = "DIAG_CMD",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_LPASS,
+		.type = TYPE_CMD,
+		.edge = "lpass",
+		.name = "DIAG_CMD",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_WCNSS,
+		.type = TYPE_CMD,
+		.edge = "wcnss",
+		.name = "DIAG_CMD",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_SENSORS,
+		.type = TYPE_CMD,
+		.edge = "dsps",
+		.name = "DIAG_CMD",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_WDSP,
+		.type = TYPE_CMD,
+		.edge = "wdsp",
+		.name = "DIAG_CMD",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_CDSP,
+		.type = TYPE_CMD,
+		.edge = "cdsp",
+		.name = "DIAG_CMD",
+		.hdl = NULL
+	}
+};
+
+struct diag_glink_info glink_dci_cmd[NUM_PERIPHERALS] = {
+	{
+		.peripheral = PERIPHERAL_MODEM,
+		.type = TYPE_DCI_CMD,
+		.edge = "mpss",
+		.name = "DIAG_DCI_CMD",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_LPASS,
+		.type = TYPE_DCI_CMD,
+		.edge = "lpass",
+		.name = "DIAG_DCI_CMD",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_WCNSS,
+		.type = TYPE_DCI_CMD,
+		.edge = "wcnss",
+		.name = "DIAG_DCI_CMD",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_SENSORS,
+		.type = TYPE_DCI_CMD,
+		.edge = "dsps",
+		.name = "DIAG_DCI_CMD",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_WDSP,
+		.type = TYPE_DCI_CMD,
+		.edge = "wdsp",
+		.name = "DIAG_DCI_CMD",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_CDSP,
+		.type = TYPE_DCI_CMD,
+		.edge = "cdsp",
+		.name = "DIAG_DCI_CMD",
+		.hdl = NULL
+	}
+};
+
+static void diag_state_open_glink(void *ctxt);
+static void diag_state_close_glink(void *ctxt);
+static int diag_glink_write(void *ctxt, unsigned char *buf, int len);
+static int diag_glink_read(void *ctxt, unsigned char *buf, int buf_len);
+static void diag_glink_queue_read(void *ctxt);
+
+static struct diag_peripheral_ops glink_ops = {
+	.open = diag_state_open_glink,
+	.close = diag_state_close_glink,
+	.write = diag_glink_write,
+	.read = diag_glink_read,
+	.queue_read = diag_glink_queue_read
+};
+
+static void diag_state_open_glink(void *ctxt)
+{
+	struct diag_glink_info *glink_info = NULL;
+
+	if (!ctxt)
+		return;
+
+	glink_info = (struct diag_glink_info *)(ctxt);
+	atomic_set(&glink_info->diag_state, 1);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			 "%s setting diag state to 1", glink_info->name);
+}
+
+static void diag_glink_queue_read(void *ctxt)
+{
+	struct diag_glink_info *glink_info = NULL;
+
+	if (!ctxt)
+		return;
+
+	glink_info = (struct diag_glink_info *)ctxt;
+	if (glink_info->hdl && glink_info->wq &&
+		atomic_read(&glink_info->opened))
+		queue_work(glink_info->wq, &(glink_info->read_work));
+}
+
+static void diag_state_close_glink(void *ctxt)
+{
+	struct diag_glink_info *glink_info = NULL;
+
+	if (!ctxt)
+		return;
+
+	glink_info = (struct diag_glink_info *)(ctxt);
+	atomic_set(&glink_info->diag_state, 0);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			 "%s setting diag state to 0", glink_info->name);
+	wake_up_interruptible(&glink_info->read_wait_q);
+	flush_workqueue(glink_info->wq);
+}
+
+int diag_glink_check_state(void *ctxt)
+{
+	struct diag_glink_info *info = NULL;
+
+	if (!ctxt)
+		return 0;
+
+	info = (struct diag_glink_info *)ctxt;
+	return (int)(atomic_read(&info->diag_state));
+}
+
+static int diag_glink_read(void *ctxt, unsigned char *buf, int buf_len)
+{
+	struct diag_glink_info *glink_info =  NULL;
+	int ret_val = 0;
+
+	if (!ctxt || !buf || buf_len <= 0)
+		return -EIO;
+
+	glink_info = (struct diag_glink_info *)ctxt;
+	if (!glink_info || !atomic_read(&glink_info->opened) ||
+		!glink_info->hdl || !glink_info->inited) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag:Glink channel not opened");
+		return -EIO;
+	}
+
+	ret_val = glink_queue_rx_intent(glink_info->hdl, buf, buf_len);
+	if (ret_val == 0)
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: queued an rx intent ch:%s perip:%d buf:%pK of len:%d\n",
+		glink_info->name, glink_info->peripheral, buf, buf_len);
+
+	return ret_val;
+}
+
+static void diag_glink_read_work_fn(struct work_struct *work)
+{
+	struct diag_glink_info *glink_info = container_of(work,
+							struct diag_glink_info,
+							read_work);
+
+	if (!glink_info || !atomic_read(&glink_info->opened))
+		return;
+
+	if (!glink_info->inited) {
+		diag_ws_release();
+		return;
+	}
+
+	diagfwd_channel_read(glink_info->fwd_ctxt);
+}
+struct diag_glink_read_work {
+	struct diag_glink_info *glink_info;
+	const void *ptr_read_done;
+	const void *ptr_rx_done;
+	size_t ptr_read_size;
+	struct work_struct work;
+};
+
+static void diag_glink_notify_rx_work_fn(struct work_struct *work)
+{
+	struct diag_glink_read_work *read_work = container_of(work,
+			struct diag_glink_read_work, work);
+	struct diag_glink_info *glink_info = read_work->glink_info;
+
+	if (!glink_info || !glink_info->hdl) {
+		kfree(read_work);
+		return;
+	}
+
+	diagfwd_channel_read_done(glink_info->fwd_ctxt,
+			(unsigned char *)(read_work->ptr_read_done),
+			read_work->ptr_read_size);
+
+	glink_rx_done(glink_info->hdl, read_work->ptr_rx_done, false);
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Rx done for packet %pK of len: %d periph: %d ch: %d\n",
+		read_work->ptr_rx_done, (int)read_work->ptr_read_size,
+		glink_info->peripheral, glink_info->type);
+	kfree(read_work);
+}
+
+static void diag_glink_notify_rx(void *hdl, const void *priv,
+				const void *pkt_priv, const void *ptr,
+				size_t size)
+{
+	struct diag_glink_info *glink_info = (struct diag_glink_info *)priv;
+	struct diag_glink_read_work *read_work;
+
+	if (!glink_info || !glink_info->hdl || !ptr || !pkt_priv || !hdl)
+		return;
+
+	if (size <= 0)
+		return;
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: received a packet %pK of len:%d from periph:%d ch:%d\n",
+		ptr, (int)size, glink_info->peripheral, glink_info->type);
+
+	read_work = kmalloc(sizeof(*read_work), GFP_ATOMIC);
+	if (!read_work) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: Could not allocate read_work\n");
+		glink_rx_done(glink_info->hdl, ptr, true);
+		return;
+	}
+
+	memcpy((void *)pkt_priv, ptr, size);
+
+	read_work->glink_info = glink_info;
+	read_work->ptr_read_done = pkt_priv;
+	read_work->ptr_rx_done = ptr;
+	read_work->ptr_read_size = size;
+	INIT_WORK(&read_work->work, diag_glink_notify_rx_work_fn);
+	queue_work(glink_info->wq, &read_work->work);
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Rx queued for packet %pK of len: %d periph: %d ch: %d\n",
+		ptr, (int)size, glink_info->peripheral, glink_info->type);
+}
+
+static void diag_glink_notify_remote_rx_intent(void *hdl, const void *priv,
+						size_t size)
+{
+	struct diag_glink_info *glink_info = (struct diag_glink_info *)priv;
+
+	if (!glink_info)
+		return;
+
+	atomic_inc(&glink_info->tx_intent_ready);
+	wake_up_interruptible(&glink_info->wait_q);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag:received remote rx intent for %d type %d\n",
+		glink_info->peripheral, glink_info->type);
+}
+
+static void diag_glink_notify_tx_done(void *hdl, const void *priv,
+					const void *pkt_priv,
+					const void *ptr)
+{
+	struct diag_glink_info *glink_info = NULL;
+	struct diagfwd_info *fwd_info = NULL;
+	int found = 0;
+
+	glink_info = (struct diag_glink_info *)priv;
+	if (!glink_info)
+		return;
+
+	fwd_info = glink_info->fwd_ctxt;
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Received glink tx done notify for ptr%pK pkt_priv %pK\n",
+		ptr, pkt_priv);
+	found = diagfwd_write_buffer_done(fwd_info, ptr);
+	if (!found)
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"Received Tx done on invalid buffer ptr %pK\n", ptr);
+}
+
+static int  diag_glink_write(void *ctxt, unsigned char *buf, int len)
+{
+	struct diag_glink_info *glink_info = NULL;
+	int err = 0;
+	uint32_t tx_flags = GLINK_TX_REQ_INTENT;
+
+	if (!ctxt || !buf)
+		return -EIO;
+
+	glink_info = (struct diag_glink_info *)ctxt;
+	if (!glink_info || len <= 0) {
+		pr_err_ratelimited("diag: In %s, invalid params, glink_info: %pK, buf: %pK, len: %d\n",
+				__func__, glink_info, buf, len);
+		return -EINVAL;
+	}
+
+	if (!glink_info->inited || !glink_info->hdl ||
+		!atomic_read(&glink_info->opened)) {
+		pr_err_ratelimited("diag: In %s, glink not inited, glink_info: %pK, buf: %pK, len: %d\n",
+				 __func__, glink_info, buf, len);
+		return -ENODEV;
+	}
+
+	if (atomic_read(&glink_info->tx_intent_ready)) {
+		atomic_dec(&glink_info->tx_intent_ready);
+		err = glink_tx(glink_info->hdl, glink_info, buf, len, tx_flags);
+		if (!err) {
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+				"%s wrote to glink, len: %d\n",
+				glink_info->name, len);
+		}
+	} else
+		err = -ENOMEM;
+
+	return err;
+
+}
+
+static void diag_glink_connect_work_fn(struct work_struct *work)
+{
+	struct diag_glink_info *glink_info = container_of(work,
+							struct diag_glink_info,
+							connect_work);
+	if (!glink_info || !glink_info->hdl)
+		return;
+	atomic_set(&glink_info->opened, 1);
+	diagfwd_channel_open(glink_info->fwd_ctxt);
+	diagfwd_late_open(glink_info->fwd_ctxt);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "glink channel open: p: %d t: %d\n",
+			glink_info->peripheral, glink_info->type);
+}
+
+static void diag_glink_remote_disconnect_work_fn(struct work_struct *work)
+{
+	struct diag_glink_info *glink_info = container_of(work,
+							struct diag_glink_info,
+							remote_disconnect_work);
+	if (!glink_info || !glink_info->hdl)
+		return;
+	atomic_set(&glink_info->opened, 0);
+	diagfwd_channel_close(glink_info->fwd_ctxt);
+	atomic_set(&glink_info->tx_intent_ready, 0);
+}
+
+static void diag_glink_late_init_work_fn(struct work_struct *work)
+{
+	struct diag_glink_info *glink_info = container_of(work,
+							struct diag_glink_info,
+							late_init_work);
+	if (!glink_info || !glink_info->hdl)
+		return;
+	diagfwd_channel_open(glink_info->fwd_ctxt);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "glink late init p: %d t: %d\n",
+			glink_info->peripheral, glink_info->type);
+}
+
+static void diag_glink_transport_notify_state(void *handle, const void *priv,
+					  unsigned event)
+{
+	struct diag_glink_info *glink_info = (struct diag_glink_info *)priv;
+
+	if (!glink_info)
+		return;
+
+	switch (event) {
+	case GLINK_CONNECTED:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"%s received channel connect for periph:%d\n",
+			 glink_info->name, glink_info->peripheral);
+		queue_work(glink_info->wq, &glink_info->connect_work);
+		break;
+	case GLINK_LOCAL_DISCONNECTED:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"%s received channel disconnect for periph:%d\n",
+			glink_info->name, glink_info->peripheral);
+
+		break;
+	case GLINK_REMOTE_DISCONNECTED:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"%s received channel remote disconnect for periph:%d\n",
+			 glink_info->name, glink_info->peripheral);
+		queue_work(glink_info->wq, &glink_info->remote_disconnect_work);
+		break;
+	default:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"%s received invalid notification\n",
+			glink_info->name);
+		break;
+	}
+
+}
+static void diag_glink_open_work_fn(struct work_struct *work)
+{
+	struct diag_glink_info *glink_info = container_of(work,
+							struct diag_glink_info,
+							open_work);
+	struct glink_open_config open_cfg;
+	void *handle = NULL;
+
+	if (!glink_info || glink_info->hdl)
+		return;
+
+	memset(&open_cfg, 0, sizeof(struct glink_open_config));
+	open_cfg.priv = glink_info;
+	open_cfg.edge = glink_info->edge;
+	open_cfg.name = glink_info->name;
+	open_cfg.notify_rx = diag_glink_notify_rx;
+	open_cfg.notify_tx_done = diag_glink_notify_tx_done;
+	open_cfg.notify_state = diag_glink_transport_notify_state;
+	open_cfg.notify_remote_rx_intent = diag_glink_notify_remote_rx_intent;
+	handle = glink_open(&open_cfg);
+	if (IS_ERR_OR_NULL(handle)) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "error opening channel %s",
+			glink_info->name);
+	} else
+		glink_info->hdl = handle;
+}
+
+static void diag_glink_close_work_fn(struct work_struct *work)
+{
+	struct diag_glink_info *glink_info = container_of(work,
+							struct diag_glink_info,
+							close_work);
+	if (!glink_info || !glink_info->inited || !glink_info->hdl)
+		return;
+
+	glink_close(glink_info->hdl);
+	atomic_set(&glink_info->opened, 0);
+	atomic_set(&glink_info->tx_intent_ready, 0);
+	glink_info->hdl = NULL;
+	diagfwd_channel_close(glink_info->fwd_ctxt);
+}
+
+static void diag_glink_notify_cb(struct glink_link_state_cb_info *cb_info,
+				void *priv)
+{
+	struct diag_glink_info *glink_info = NULL;
+
+	glink_info = (struct diag_glink_info *)priv;
+	if (!glink_info)
+		return;
+	if (!cb_info)
+		return;
+
+	switch (cb_info->link_state) {
+	case GLINK_LINK_STATE_UP:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"%s channel opened for periph:%d\n",
+			glink_info->name, glink_info->peripheral);
+		queue_work(glink_info->wq, &glink_info->open_work);
+		break;
+	case GLINK_LINK_STATE_DOWN:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"%s channel closed for periph:%d\n",
+			glink_info->name, glink_info->peripheral);
+		queue_work(glink_info->wq, &glink_info->close_work);
+		break;
+	default:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"Invalid link state notification for ch:%s\n",
+			glink_info->name);
+		break;
+
+	}
+}
+
+static void glink_late_init(struct diag_glink_info *glink_info)
+{
+	struct diagfwd_info *fwd_info = NULL;
+
+	if (!glink_info)
+		return;
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s entering\n",
+		 glink_info->name);
+
+	diagfwd_register(TRANSPORT_GLINK, glink_info->peripheral,
+			glink_info->type, (void *)glink_info,
+			&glink_ops, &glink_info->fwd_ctxt);
+	fwd_info = glink_info->fwd_ctxt;
+	if (!fwd_info)
+		return;
+
+	glink_info->inited = 1;
+
+	if (atomic_read(&glink_info->opened))
+		queue_work(glink_info->wq, &(glink_info->late_init_work));
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n",
+		 glink_info->name);
+}
+
+int diag_glink_init_peripheral(uint8_t peripheral)
+{
+	if (peripheral >= NUM_PERIPHERALS) {
+		pr_err("diag: In %s, invalid peripheral %d\n",
+		       __func__, peripheral);
+		return -EINVAL;
+	}
+
+	glink_late_init(&glink_data[peripheral]);
+	glink_late_init(&glink_dci[peripheral]);
+	glink_late_init(&glink_cmd[peripheral]);
+	glink_late_init(&glink_dci_cmd[peripheral]);
+
+	return 0;
+}
+
+static void __diag_glink_init(struct diag_glink_info *glink_info)
+{
+	char wq_name[DIAG_GLINK_NAME_SZ + 12];
+	struct glink_link_info link_info;
+	void *link_state_handle = NULL;
+
+	if (!glink_info)
+		return;
+
+	init_waitqueue_head(&glink_info->wait_q);
+	init_waitqueue_head(&glink_info->read_wait_q);
+	mutex_init(&glink_info->lock);
+	strlcpy(wq_name, "DIAG_GLINK_", 12);
+	strlcat(wq_name, glink_info->name, sizeof(glink_info->name));
+	glink_info->wq = create_singlethread_workqueue(wq_name);
+	if (!glink_info->wq) {
+		pr_err("diag: In %s, unable to create workqueue for glink ch:%s\n",
+			   __func__, glink_info->name);
+		return;
+	}
+	INIT_WORK(&(glink_info->open_work), diag_glink_open_work_fn);
+	INIT_WORK(&(glink_info->close_work), diag_glink_close_work_fn);
+	INIT_WORK(&(glink_info->read_work), diag_glink_read_work_fn);
+	INIT_WORK(&(glink_info->connect_work), diag_glink_connect_work_fn);
+	INIT_WORK(&(glink_info->remote_disconnect_work),
+		diag_glink_remote_disconnect_work_fn);
+	INIT_WORK(&(glink_info->late_init_work), diag_glink_late_init_work_fn);
+	link_info.glink_link_state_notif_cb = diag_glink_notify_cb;
+	link_info.transport = NULL;
+	link_info.edge = glink_info->edge;
+	glink_info->link_state_handle = NULL;
+	link_state_handle = glink_register_link_state_cb(&link_info,
+							(void *)glink_info);
+	if (IS_ERR_OR_NULL(link_state_handle)) {
+		pr_err("diag: In %s, unable to register for glink channel %s\n",
+			   __func__, glink_info->name);
+		destroy_workqueue(glink_info->wq);
+		return;
+	}
+	glink_info->link_state_handle = link_state_handle;
+	glink_info->fwd_ctxt = NULL;
+	atomic_set(&glink_info->tx_intent_ready, 0);
+	atomic_set(&glink_info->opened, 0);
+	atomic_set(&glink_info->diag_state, 0);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"%s initialized fwd_ctxt: %pK hdl: %pK\n",
+		glink_info->name, glink_info->fwd_ctxt,
+		glink_info->link_state_handle);
+}
+
+void diag_glink_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt)
+{
+	struct diag_glink_info *info = NULL;
+
+	if (!ctxt || !fwd_ctxt)
+		return;
+
+	info = (struct diag_glink_info *)ctxt;
+	info->fwd_ctxt = fwd_ctxt;
+}
+
+int diag_glink_init(void)
+{
+	uint8_t peripheral;
+	struct diag_glink_info *glink_info = NULL;
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		if (peripheral != PERIPHERAL_WDSP)
+			continue;
+		glink_info = &glink_cntl[peripheral];
+		__diag_glink_init(glink_info);
+		diagfwd_cntl_register(TRANSPORT_GLINK, glink_info->peripheral,
+					(void *)glink_info, &glink_ops,
+					&(glink_info->fwd_ctxt));
+		glink_info->inited = 1;
+		__diag_glink_init(&glink_data[peripheral]);
+		__diag_glink_init(&glink_cmd[peripheral]);
+		__diag_glink_init(&glink_dci[peripheral]);
+		__diag_glink_init(&glink_dci_cmd[peripheral]);
+	}
+	return 0;
+}
+
+static void __diag_glink_exit(struct diag_glink_info *glink_info)
+{
+	if (!glink_info)
+		return;
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s entering\n",
+			 glink_info->name);
+
+	diagfwd_deregister(glink_info->peripheral, glink_info->type,
+					   (void *)glink_info);
+	glink_info->fwd_ctxt = NULL;
+	glink_info->hdl = NULL;
+	if (glink_info->wq)
+		destroy_workqueue(glink_info->wq);
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n",
+			 glink_info->name);
+}
+
+void diag_glink_early_exit(void)
+{
+	int peripheral = 0;
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		if (peripheral != PERIPHERAL_WDSP)
+			continue;
+		__diag_glink_exit(&glink_cntl[peripheral]);
+		glink_unregister_link_state_cb(&glink_cntl[peripheral].hdl);
+	}
+}
+
+void diag_glink_exit(void)
+{
+	int peripheral = 0;
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		if (peripheral != PERIPHERAL_WDSP)
+			continue;
+		__diag_glink_exit(&glink_data[peripheral]);
+		__diag_glink_exit(&glink_cmd[peripheral]);
+		__diag_glink_exit(&glink_dci[peripheral]);
+		__diag_glink_exit(&glink_dci_cmd[peripheral]);
+		glink_unregister_link_state_cb(&glink_data[peripheral].hdl);
+		glink_unregister_link_state_cb(&glink_cmd[peripheral].hdl);
+		glink_unregister_link_state_cb(&glink_dci[peripheral].hdl);
+		glink_unregister_link_state_cb(&glink_dci_cmd[peripheral].hdl);
+	}
+}
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diagfwd_glink.h linux-4.4.115-fbx/drivers/char/diag/diagfwd_glink.h
--- linux-4.4.115-fbx/drivers/char/diag./diagfwd_glink.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diagfwd_glink.h	2019-01-22 16:16:22.963241516 +0100
@@ -0,0 +1,57 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_GLINK_H
+#define DIAGFWD_GLINK_H
+
+#define DIAG_GLINK_NAME_SZ	24
+#define GLINK_DRAIN_BUF_SIZE	4096
+
+struct diag_glink_info {
+	uint8_t peripheral;
+	uint8_t type;
+	uint8_t inited;
+	atomic_t opened;
+	atomic_t diag_state;
+	uint32_t fifo_size;
+	atomic_t tx_intent_ready;
+	void *hdl;
+	void *link_state_handle;
+	char edge[DIAG_GLINK_NAME_SZ];
+	char name[DIAG_GLINK_NAME_SZ];
+	struct mutex lock;
+	wait_queue_head_t read_wait_q;
+	wait_queue_head_t wait_q;
+	struct workqueue_struct *wq;
+	struct work_struct open_work;
+	struct work_struct close_work;
+	struct work_struct read_work;
+	struct work_struct connect_work;
+	struct work_struct remote_disconnect_work;
+	struct work_struct late_init_work;
+	struct diagfwd_info *fwd_ctxt;
+};
+
+extern struct diag_glink_info glink_data[NUM_PERIPHERALS];
+extern struct diag_glink_info glink_cntl[NUM_PERIPHERALS];
+extern struct diag_glink_info glink_cmd[NUM_PERIPHERALS];
+extern struct diag_glink_info glink_dci_cmd[NUM_PERIPHERALS];
+extern struct diag_glink_info glink_dci[NUM_PERIPHERALS];
+
+int diag_glink_init_peripheral(uint8_t peripheral);
+void diag_glink_exit(void);
+int diag_glink_init(void);
+void diag_glink_early_exit(void);
+void diag_glink_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt);
+int diag_glink_check_state(void *ctxt);
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diagfwd.h linux-4.4.115-fbx/drivers/char/diag/diagfwd.h
--- linux-4.4.115-fbx/drivers/char/diag./diagfwd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diagfwd.h	2019-01-22 16:16:22.959241480 +0100
@@ -0,0 +1,52 @@
+/* Copyright (c) 2008-2015, 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_H
+#define DIAGFWD_H
+
+/*
+ * The context applies to Diag SMD data buffers. It is used to identify the
+ * buffer once these buffers are writtent to USB.
+ */
+#define SET_BUF_CTXT(p, d, n) \
+	(((p & 0xFF) << 16) | ((d & 0xFF) << 8) | (n & 0xFF))
+#define SET_PD_CTXT(u)		((u & 0xFF) << 24)
+#define GET_BUF_PERIPHERAL(p)	((p & 0xFF0000) >> 16)
+#define GET_BUF_TYPE(d)		((d & 0x00FF00) >> 8)
+#define GET_BUF_NUM(n)		((n & 0x0000FF))
+#define GET_PD_CTXT(u)		((u & 0xFF000000) >> 24)
+
+#define CHK_OVERFLOW(bufStart, start, end, length) \
+	((((bufStart) <= (start)) && ((end) - (start) >= (length))) ? 1 : 0)
+
+int diagfwd_init(void);
+void diagfwd_exit(void);
+void diag_process_hdlc_pkt(void *data, unsigned int len, int pid);
+void diag_process_non_hdlc_pkt(unsigned char *data, int len, int pid);
+int chk_config_get_id(void);
+int chk_apps_only(void);
+int chk_apps_master(void);
+int chk_polling_response(void);
+int diag_cmd_log_on_demand(unsigned char *src_buf, int src_len,
+			   unsigned char *dest_buf, int dest_len);
+int diag_cmd_get_mobile_id(unsigned char *src_buf, int src_len,
+			   unsigned char *dest_buf, int dest_len);
+int diag_check_common_cmd(struct diag_pkt_header_t *header);
+void diag_update_userspace_clients(unsigned int type);
+void diag_update_sleeping_process(int process_id, int data_type);
+int diag_process_apps_pkt(unsigned char *buf, int len, int pid);
+void diag_send_error_rsp(unsigned char *buf, int len, int pid);
+void diag_update_pkt_buffer(unsigned char *buf, uint32_t len, int type);
+int diag_process_stm_cmd(unsigned char *buf, unsigned char *dest_buf);
+void diag_md_hdlc_reset_timer_func(unsigned long pid);
+void diag_update_md_clients(unsigned int type);
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diagfwd_peripheral.c linux-4.4.115-fbx/drivers/char/diag/diagfwd_peripheral.c
--- linux-4.4.115-fbx/drivers/char/diag./diagfwd_peripheral.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diagfwd_peripheral.c	2019-10-29 09:26:23.453201319 +0100
@@ -0,0 +1,2074 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/diagchar.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include "diagchar.h"
+#include "diagchar_hdlc.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_cntl.h"
+#include "diag_masks.h"
+#include "diag_dci.h"
+#include "diagfwd.h"
+#include "diagfwd_smd.h"
+#include "diagfwd_socket.h"
+#include "diag_mux.h"
+#include "diag_ipc_logging.h"
+#include "diagfwd_glink.h"
+#include "diag_memorydevice.h"
+
+struct data_header {
+	uint8_t control_char;
+	uint8_t version;
+	uint16_t length;
+};
+
+static struct diagfwd_info *early_init_info[NUM_TRANSPORT];
+
+static void diagfwd_queue_read(struct diagfwd_info *fwd_info);
+static void diagfwd_buffers_exit(struct diagfwd_info *fwd_info);
+static void diagfwd_cntl_open(struct diagfwd_info *fwd_info);
+static void diagfwd_cntl_close(struct diagfwd_info *fwd_info);
+static void diagfwd_dci_open(struct diagfwd_info *fwd_info);
+static void diagfwd_dci_close(struct diagfwd_info *fwd_info);
+static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info,
+				   unsigned char *buf, int len);
+static void diagfwd_data_read_done(struct diagfwd_info *fwd_info,
+				   unsigned char *buf, int len);
+static void diagfwd_cntl_read_done(struct diagfwd_info *fwd_info,
+				   unsigned char *buf, int len);
+static void diagfwd_dci_read_done(struct diagfwd_info *fwd_info,
+				  unsigned char *buf, int len);
+static void diagfwd_write_buffers_init(struct diagfwd_info *fwd_info);
+static void diagfwd_write_buffers_exit(struct diagfwd_info *fwd_info);
+struct diagfwd_info peripheral_info[NUM_TYPES][NUM_PERIPHERALS];
+
+static struct diag_channel_ops data_ch_ops = {
+	.open = NULL,
+	.close = NULL,
+	.read_done = diagfwd_data_read_untag_done
+};
+
+static struct diag_channel_ops cntl_ch_ops = {
+	.open = diagfwd_cntl_open,
+	.close = diagfwd_cntl_close,
+	.read_done = diagfwd_cntl_read_done
+};
+
+static struct diag_channel_ops dci_ch_ops = {
+	.open = diagfwd_dci_open,
+	.close = diagfwd_dci_close,
+	.read_done = diagfwd_dci_read_done
+};
+
+static void diagfwd_cntl_open(struct diagfwd_info *fwd_info)
+{
+	if (!fwd_info)
+		return;
+	diag_cntl_channel_open(fwd_info);
+}
+
+static void diagfwd_cntl_close(struct diagfwd_info *fwd_info)
+{
+	if (!fwd_info)
+		return;
+	diag_cntl_channel_close(fwd_info);
+}
+
+static void diagfwd_dci_open(struct diagfwd_info *fwd_info)
+{
+	if (!fwd_info)
+		return;
+
+	diag_dci_notify_client(PERIPHERAL_MASK(fwd_info->peripheral),
+			       DIAG_STATUS_OPEN, DCI_LOCAL_PROC);
+}
+
+static void diagfwd_dci_close(struct diagfwd_info *fwd_info)
+{
+	if (!fwd_info)
+		return;
+
+	diag_dci_notify_client(PERIPHERAL_MASK(fwd_info->peripheral),
+			       DIAG_STATUS_CLOSED, DCI_LOCAL_PROC);
+}
+
+static int diag_add_hdlc_encoding(unsigned char *dest_buf, int *dest_len,
+				  unsigned char *buf, int len)
+{
+	struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
+	struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
+	struct data_header *header;
+	int header_size = sizeof(struct data_header);
+	uint8_t *end_control_char = NULL;
+	uint8_t *payload = NULL;
+	uint8_t *temp_buf = NULL;
+	uint8_t *temp_encode_buf = NULL;
+	int src_pkt_len;
+	int encoded_pkt_length;
+	int max_size;
+	int total_processed = 0;
+	int bytes_remaining;
+	int err = 0;
+	uint8_t loop_count = 0;
+
+	if (!dest_buf || !dest_len || !buf)
+		return -EIO;
+
+	temp_buf = buf;
+	temp_encode_buf = dest_buf;
+	bytes_remaining = *dest_len;
+
+	while (total_processed < len) {
+		loop_count++;
+		header = (struct data_header *)temp_buf;
+		/* Perform initial error checking */
+		if (header->control_char != CONTROL_CHAR ||
+		    header->version != 1) {
+			err = -EINVAL;
+			break;
+		}
+
+		if (header->length >= bytes_remaining)
+			break;
+
+		payload = temp_buf + header_size;
+		end_control_char = payload + header->length;
+		if (*end_control_char != CONTROL_CHAR) {
+			err = -EINVAL;
+			break;
+		}
+
+		max_size = 2 * header->length + 3;
+		if (bytes_remaining < max_size) {
+			err = -EINVAL;
+			break;
+		}
+
+		/* Prepare for encoding the data */
+		send.state = DIAG_STATE_START;
+		send.pkt = payload;
+		send.last = (void *)(payload + header->length - 1);
+		send.terminate = 1;
+
+		enc.dest = temp_encode_buf;
+		enc.dest_last = (void *)(temp_encode_buf + max_size);
+		enc.crc = 0;
+		diag_hdlc_encode(&send, &enc);
+
+		/* Prepare for next packet */
+		src_pkt_len = (header_size + header->length + 1);
+		total_processed += src_pkt_len;
+		temp_buf += src_pkt_len;
+
+		encoded_pkt_length = (uint8_t *)enc.dest - temp_encode_buf;
+		bytes_remaining -= encoded_pkt_length;
+		temp_encode_buf = enc.dest;
+	}
+
+	*dest_len = (int)(temp_encode_buf - dest_buf);
+
+	return err;
+}
+
+static int check_bufsize_for_encoding(struct diagfwd_buf_t *buf, uint32_t len)
+{
+	int i, ctx = 0;
+	uint32_t max_size = 0;
+	unsigned char *temp_buf = NULL;
+	struct diag_md_info *ch = NULL;
+
+	if (!buf || len == 0)
+		return -EINVAL;
+
+	max_size = (2 * len) + 3;
+	if (max_size > PERIPHERAL_BUF_SZ) {
+		if (max_size > MAX_PERIPHERAL_HDLC_BUF_SZ) {
+			pr_err("diag: In %s, max_size is going beyond limit %d\n",
+			       __func__, max_size);
+			max_size = MAX_PERIPHERAL_HDLC_BUF_SZ;
+		}
+
+		if (buf->len < max_size) {
+			if (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE) {
+				ch = &diag_md[DIAG_LOCAL_PROC];
+				for (i = 0; ch != NULL &&
+						i < ch->num_tbl_entries; i++) {
+					if (ch->tbl[i].buf == buf->data) {
+						ctx = ch->tbl[i].ctx;
+						ch->tbl[i].buf = NULL;
+						ch->tbl[i].len = 0;
+						ch->tbl[i].ctx = 0;
+						DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+						"Flushed mdlog table entries before reallocating data buffer, p:%d, t:%d\n",
+						GET_BUF_PERIPHERAL(ctx),
+						GET_BUF_TYPE(ctx));
+						break;
+					}
+				}
+			}
+			temp_buf = krealloc(buf->data, max_size +
+						APF_DIAG_PADDING,
+					    GFP_KERNEL);
+			if (!temp_buf)
+				return -ENOMEM;
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"Reallocated data buffer: %pK with size: %d\n",
+			temp_buf, max_size);
+			buf->data = temp_buf;
+			buf->len = max_size;
+		}
+	}
+
+	return buf->len;
+}
+
+int diag_md_get_peripheral(int ctxt)
+{
+	int peripheral;
+
+	if (driver->num_pd_session) {
+		peripheral = GET_PD_CTXT(ctxt);
+		switch (peripheral) {
+		case UPD_WLAN:
+			if (!driver->pd_logging_mode[0])
+				peripheral = PERIPHERAL_MODEM;
+			break;
+		case UPD_AUDIO:
+			if (!driver->pd_logging_mode[1])
+				peripheral = PERIPHERAL_LPASS;
+			break;
+		case UPD_SENSORS:
+			if (!driver->pd_logging_mode[2])
+				peripheral = PERIPHERAL_LPASS;
+			break;
+		case DIAG_ID_MPSS:
+		case DIAG_ID_LPASS:
+		case DIAG_ID_CDSP:
+		default:
+			peripheral =
+				GET_BUF_PERIPHERAL(ctxt);
+			if (peripheral > NUM_PERIPHERALS)
+				peripheral = -EINVAL;
+			break;
+		}
+	} else {
+		/* Account for Apps data as well */
+		peripheral = GET_BUF_PERIPHERAL(ctxt);
+		if (peripheral > NUM_PERIPHERALS)
+			peripheral = -EINVAL;
+	}
+
+	return peripheral;
+}
+
+static void diagfwd_data_process_done(struct diagfwd_info *fwd_info,
+				   struct diagfwd_buf_t *buf, int len)
+{
+	int err = 0;
+	int write_len = 0, peripheral = 0;
+	unsigned char *write_buf = NULL;
+	struct diag_md_session_t *session_info = NULL;
+	uint8_t hdlc_disabled = 0;
+
+	if (!fwd_info || !buf || len <= 0) {
+		diag_ws_release();
+		return;
+	}
+
+	switch (fwd_info->type) {
+	case TYPE_DATA:
+	case TYPE_CMD:
+		break;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid type %d for peripheral %d\n",
+			__func__, fwd_info->type,
+			fwd_info->peripheral);
+		diag_ws_release();
+		return;
+	}
+
+	mutex_lock(&driver->hdlc_disable_mutex);
+	mutex_lock(&fwd_info->data_mutex);
+
+	peripheral = diag_md_get_peripheral(buf->ctxt);
+	if (peripheral < 0) {
+		pr_err("diag:%s:%d invalid peripheral = %d\n",
+			__func__, __LINE__, peripheral);
+		mutex_unlock(&fwd_info->data_mutex);
+		mutex_unlock(&driver->hdlc_disable_mutex);
+		diag_ws_release();
+		return;
+	}
+	mutex_lock(&driver->md_session_lock);
+	session_info = diag_md_session_get_peripheral(peripheral);
+	if (session_info)
+		hdlc_disabled = session_info->hdlc_disabled;
+	else
+		hdlc_disabled = driver->hdlc_disabled;
+	mutex_unlock(&driver->md_session_lock);
+	if (hdlc_disabled) {
+		/* The data is raw and and on APPS side HDLC is disabled */
+		if (!buf) {
+			pr_err("diag: In %s, no match for non encode buffer %pK, peripheral %d, type: %d\n",
+			       __func__, buf, fwd_info->peripheral,
+			       fwd_info->type);
+			goto end;
+		}
+		if (len > PERIPHERAL_BUF_SZ) {
+			pr_err("diag: In %s, Incoming buffer too large %d, peripheral %d, type: %d\n",
+			       __func__, len, fwd_info->peripheral,
+			       fwd_info->type);
+			goto end;
+		}
+		write_len = len;
+		if (write_len <= 0)
+			goto end;
+		write_buf = buf->data_raw;
+	} else {
+		if (!buf) {
+			pr_err("diag: In %s, no match for non encode buffer %pK, peripheral %d, type: %d\n",
+				__func__, buf, fwd_info->peripheral,
+				fwd_info->type);
+			goto end;
+		}
+
+		write_len = check_bufsize_for_encoding(buf, len);
+		if (write_len <= 0) {
+			pr_err("diag: error in checking buf for encoding\n");
+			goto end;
+		}
+		write_buf = buf->data;
+		err = diag_add_hdlc_encoding(write_buf, &write_len,
+			buf->data_raw, len);
+		if (err) {
+			pr_err("diag: error in adding hdlc encoding\n");
+			goto end;
+		}
+	}
+	mutex_unlock(&fwd_info->data_mutex);
+	mutex_unlock(&driver->hdlc_disable_mutex);
+
+	if (write_len > 0) {
+		err = diag_mux_write(DIAG_LOCAL_PROC, write_buf, write_len,
+				     buf->ctxt);
+		if (err) {
+			pr_err_ratelimited("diag: In %s, unable to write to mux error: %d\n",
+					   __func__, err);
+			goto end_write;
+		}
+	}
+
+	diagfwd_queue_read(fwd_info);
+	return;
+
+end:
+	mutex_unlock(&fwd_info->data_mutex);
+	mutex_unlock(&driver->hdlc_disable_mutex);
+end_write:
+	diag_ws_release();
+	if (buf) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"Marking buffer as free p: %d, t: %d, buf_num: %d\n",
+			fwd_info->peripheral, fwd_info->type,
+			GET_BUF_NUM(buf->ctxt));
+		diagfwd_write_done(fwd_info->peripheral, fwd_info->type,
+				   GET_BUF_NUM(buf->ctxt));
+	}
+	diagfwd_queue_read(fwd_info);
+}
+
+static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info,
+				   unsigned char *buf, int len)
+{
+	int len_cpd = 0;
+	int len_upd_1 = 0, len_upd_2 = 0;
+	int ctxt_cpd = 0;
+	int ctxt_upd_1 = 0, ctxt_upd_2 = 0;
+	int buf_len = 0, processed = 0;
+	unsigned char *temp_buf_main = NULL;
+	unsigned char *temp_buf_cpd = NULL;
+	unsigned char *temp_buf_upd_1 = NULL;
+	unsigned char *temp_buf_upd_2 = NULL;
+	struct diagfwd_buf_t *temp_ptr_upd = NULL;
+	struct diagfwd_buf_t *temp_ptr_cpd = NULL;
+	int flag_buf_1 = 0, flag_buf_2 = 0;
+	uint8_t peripheral;
+
+	if (!fwd_info || !buf || len <= 0) {
+		diag_ws_release();
+		return;
+	}
+
+	switch (fwd_info->type) {
+	case TYPE_DATA:
+	case TYPE_CMD:
+		break;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid type %d for peripheral %d\n",
+				   __func__, fwd_info->type,
+				   fwd_info->peripheral);
+		diag_ws_release();
+		return;
+	}
+	peripheral = fwd_info->peripheral;
+
+	if (driver->feature[peripheral].encode_hdlc &&
+		driver->feature[peripheral].untag_header &&
+		driver->peripheral_untag[peripheral]) {
+		temp_buf_cpd = buf;
+		temp_buf_main = buf;
+		if (fwd_info->buf_1 &&
+			fwd_info->buf_1->data_raw == buf) {
+			flag_buf_1 = 1;
+			temp_ptr_cpd = fwd_info->buf_1;
+			if (fwd_info->type == TYPE_DATA) {
+				temp_buf_upd_1 =
+				fwd_info->buf_upd_1_a->data_raw;
+				if (peripheral ==
+					PERIPHERAL_LPASS)
+					temp_buf_upd_2 =
+					fwd_info->buf_upd_2_a->data_raw;
+				}
+		} else if (fwd_info->buf_2 &&
+					fwd_info->buf_2->data_raw == buf) {
+			flag_buf_2 = 1;
+			temp_ptr_cpd = fwd_info->buf_2;
+			if (fwd_info->type == TYPE_DATA) {
+				temp_buf_upd_1 =
+				fwd_info->buf_upd_1_b->data_raw;
+				if (peripheral ==
+					PERIPHERAL_LPASS)
+					temp_buf_upd_2 =
+					fwd_info->buf_upd_2_b->data_raw;
+			}
+		} else {
+			pr_err("diag: In %s, no match for buffer %pK, peripheral %d, type: %d\n",
+			       __func__, buf, peripheral,
+			       fwd_info->type);
+			goto end;
+		}
+		while (processed < len) {
+			buf_len =
+				*(uint16_t *) (temp_buf_main + 2);
+			switch ((*temp_buf_main)) {
+			case DIAG_ID_MPSS:
+				ctxt_cpd = DIAG_ID_MPSS;
+				len_cpd += buf_len;
+				if (temp_buf_cpd) {
+					memcpy(temp_buf_cpd,
+					(temp_buf_main + 4), buf_len);
+					temp_buf_cpd += buf_len;
+				}
+				break;
+			case DIAG_ID_WLAN:
+				ctxt_upd_1 = UPD_WLAN;
+				len_upd_1 += buf_len;
+				if (temp_buf_upd_1) {
+					memcpy(temp_buf_upd_1,
+					(temp_buf_main + 4), buf_len);
+					temp_buf_upd_1 += buf_len;
+				}
+				break;
+			case DIAG_ID_LPASS:
+				ctxt_cpd = DIAG_ID_LPASS;
+				len_cpd += buf_len;
+				if (temp_buf_cpd) {
+					memcpy(temp_buf_cpd,
+					(temp_buf_main + 4), buf_len);
+					temp_buf_cpd += buf_len;
+				}
+				break;
+			case DIAG_ID_AUDIO:
+				ctxt_upd_1 = UPD_AUDIO;
+				len_upd_1 += buf_len;
+				if (temp_buf_upd_1) {
+					memcpy(temp_buf_upd_1,
+					(temp_buf_main + 4), buf_len);
+					temp_buf_upd_1 += buf_len;
+				}
+				break;
+			case DIAG_ID_SENSORS:
+				ctxt_upd_2 = UPD_SENSORS;
+				len_upd_2 += buf_len;
+				if (temp_buf_upd_2) {
+					memcpy(temp_buf_upd_2,
+					(temp_buf_main + 4), buf_len);
+					temp_buf_upd_2 += buf_len;
+				}
+				break;
+			case DIAG_ID_CDSP:
+				ctxt_cpd = DIAG_ID_CDSP;
+				len_cpd += buf_len;
+				if (temp_buf_cpd) {
+					memcpy(temp_buf_cpd,
+					(temp_buf_main + 4), buf_len);
+					temp_buf_cpd += buf_len;
+				}
+				break;
+			default:
+				goto end;
+			}
+			len = len - 4;
+			temp_buf_main += (buf_len + 4);
+			processed += buf_len;
+		}
+
+		if (flag_buf_1) {
+			fwd_info->cpd_len_1 = len_cpd;
+			if (fwd_info->type == TYPE_DATA)
+				fwd_info->upd_len_1_a = len_upd_1;
+			if (peripheral == PERIPHERAL_LPASS &&
+				fwd_info->type == TYPE_DATA)
+				fwd_info->upd_len_2_a = len_upd_2;
+		} else if (flag_buf_2) {
+			fwd_info->cpd_len_2 = len_cpd;
+			if (fwd_info->type == TYPE_DATA)
+				fwd_info->upd_len_1_b = len_upd_1;
+			if (peripheral == PERIPHERAL_LPASS &&
+				fwd_info->type == TYPE_DATA)
+				fwd_info->upd_len_2_b = len_upd_2;
+		}
+
+		if (peripheral == PERIPHERAL_LPASS &&
+			fwd_info->type == TYPE_DATA && len_upd_2) {
+			if (flag_buf_1)
+				temp_ptr_upd = fwd_info->buf_upd_2_a;
+			else
+				temp_ptr_upd = fwd_info->buf_upd_2_b;
+			temp_ptr_upd->ctxt &= 0x00FFFFFF;
+			temp_ptr_upd->ctxt |=
+				(SET_PD_CTXT(ctxt_upd_2));
+			atomic_set(&temp_ptr_upd->in_busy, 1);
+			diagfwd_data_process_done(fwd_info,
+				temp_ptr_upd, len_upd_2);
+		} else {
+			if (flag_buf_1)
+				fwd_info->upd_len_2_a = 0;
+			if (flag_buf_2)
+				fwd_info->upd_len_2_b = 0;
+		}
+		if (fwd_info->type == TYPE_DATA && len_upd_1) {
+			if (flag_buf_1)
+				temp_ptr_upd = fwd_info->buf_upd_1_a;
+			else
+				temp_ptr_upd = fwd_info->buf_upd_1_b;
+			temp_ptr_upd->ctxt &= 0x00FFFFFF;
+			temp_ptr_upd->ctxt |=
+				(SET_PD_CTXT(ctxt_upd_1));
+				atomic_set(&temp_ptr_upd->in_busy, 1);
+			diagfwd_data_process_done(fwd_info,
+				temp_ptr_upd, len_upd_1);
+		} else {
+			if (flag_buf_1)
+				fwd_info->upd_len_1_a = 0;
+			if (flag_buf_2)
+				fwd_info->upd_len_1_b = 0;
+		}
+		if (len_cpd) {
+			temp_ptr_cpd->ctxt &= 0x00FFFFFF;
+			temp_ptr_cpd->ctxt |=
+				(SET_PD_CTXT(ctxt_cpd));
+			diagfwd_data_process_done(fwd_info,
+				temp_ptr_cpd, len_cpd);
+		} else {
+			if (flag_buf_1)
+				fwd_info->cpd_len_1 = 0;
+			if (flag_buf_2)
+				fwd_info->cpd_len_2 = 0;
+		}
+		return;
+	} else {
+		diagfwd_data_read_done(fwd_info, buf, len);
+		return;
+	}
+end:
+	diag_ws_release();
+	if (temp_ptr_cpd) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"Marking buffer as free p: %d, t: %d, buf_num: %d\n",
+			fwd_info->peripheral, fwd_info->type,
+			GET_BUF_NUM(temp_ptr_cpd->ctxt));
+		diagfwd_write_done(fwd_info->peripheral, fwd_info->type,
+				   GET_BUF_NUM(temp_ptr_cpd->ctxt));
+	}
+	diagfwd_queue_read(fwd_info);
+}
+
+static void diagfwd_data_read_done(struct diagfwd_info *fwd_info,
+				   unsigned char *buf, int len)
+{
+	int err = 0;
+	int write_len = 0;
+	unsigned char *write_buf = NULL;
+	struct diagfwd_buf_t *temp_buf = NULL;
+	struct diag_md_session_t *session_info = NULL;
+	uint8_t hdlc_disabled = 0;
+
+	if (!fwd_info || !buf || len <= 0) {
+		diag_ws_release();
+		return;
+	}
+
+	switch (fwd_info->type) {
+	case TYPE_DATA:
+	case TYPE_CMD:
+		break;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid type %d for peripheral %d\n",
+			__func__, fwd_info->type,
+			fwd_info->peripheral);
+		diag_ws_release();
+		return;
+	}
+
+	mutex_lock(&driver->hdlc_disable_mutex);
+	mutex_lock(&fwd_info->data_mutex);
+	mutex_lock(&driver->md_session_lock);
+	session_info = diag_md_session_get_peripheral(fwd_info->peripheral);
+	if (session_info)
+		hdlc_disabled = session_info->hdlc_disabled;
+	else
+		hdlc_disabled = driver->hdlc_disabled;
+	mutex_unlock(&driver->md_session_lock);
+	if (!driver->feature[fwd_info->peripheral].encode_hdlc) {
+		if (fwd_info->buf_1 && fwd_info->buf_1->data == buf) {
+			temp_buf = fwd_info->buf_1;
+			write_buf = fwd_info->buf_1->data;
+		} else if (fwd_info->buf_2 && fwd_info->buf_2->data == buf) {
+			temp_buf = fwd_info->buf_2;
+			write_buf = fwd_info->buf_2->data;
+		} else {
+			pr_err("diag: In %s, no match for buffer %pK, peripheral %d, type: %d\n",
+			       __func__, buf, fwd_info->peripheral,
+			       fwd_info->type);
+			goto end;
+		}
+		write_len = len;
+	} else if (hdlc_disabled) {
+		/* The data is raw and and on APPS side HDLC is disabled */
+		if (fwd_info->buf_1 && fwd_info->buf_1->data_raw == buf) {
+			temp_buf = fwd_info->buf_1;
+		} else if (fwd_info->buf_2 &&
+			   fwd_info->buf_2->data_raw == buf) {
+			temp_buf = fwd_info->buf_2;
+		} else {
+			pr_err("diag: In %s, no match for non encode buffer %pK, peripheral %d, type: %d\n",
+			       __func__, buf, fwd_info->peripheral,
+			       fwd_info->type);
+			goto end;
+		}
+		if (len > PERIPHERAL_BUF_SZ) {
+			pr_err("diag: In %s, Incoming buffer too large %d, peripheral %d, type: %d\n",
+			       __func__, len, fwd_info->peripheral,
+			       fwd_info->type);
+			goto end;
+		}
+		write_len = len;
+		write_buf = buf;
+	} else {
+		if (fwd_info->buf_1 && fwd_info->buf_1->data_raw == buf) {
+			temp_buf = fwd_info->buf_1;
+		} else if (fwd_info->buf_2 &&
+			   fwd_info->buf_2->data_raw == buf) {
+			temp_buf = fwd_info->buf_2;
+		} else {
+			pr_err("diag: In %s, no match for non encode buffer %pK, peripheral %d, type: %d\n",
+				__func__, buf, fwd_info->peripheral,
+				fwd_info->type);
+			goto end;
+		}
+		write_len = check_bufsize_for_encoding(temp_buf, len);
+		if (write_len <= 0) {
+			pr_err("diag: error in checking buf for encoding\n");
+			goto end;
+		}
+		write_buf = temp_buf->data;
+		err = diag_add_hdlc_encoding(write_buf, &write_len, buf, len);
+		if (err) {
+			pr_err("diag: error in adding hdlc encoding\n");
+			goto end;
+		}
+	}
+
+	mutex_unlock(&fwd_info->data_mutex);
+	mutex_unlock(&driver->hdlc_disable_mutex);
+
+	if (write_len > 0) {
+		err = diag_mux_write(DIAG_LOCAL_PROC, write_buf, write_len,
+				     temp_buf->ctxt);
+		if (err) {
+			pr_err_ratelimited("diag: In %s, unable to write to mux error: %d\n",
+					   __func__, err);
+			goto end_write;
+		}
+	}
+	diagfwd_queue_read(fwd_info);
+	return;
+
+end:
+	mutex_unlock(&fwd_info->data_mutex);
+	mutex_unlock(&driver->hdlc_disable_mutex);
+end_write:
+	diag_ws_release();
+	if (temp_buf) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"Marking buffer as free p: %d, t: %d, buf_num: %d\n",
+			fwd_info->peripheral, fwd_info->type,
+			GET_BUF_NUM(temp_buf->ctxt));
+		diagfwd_write_done(fwd_info->peripheral, fwd_info->type,
+				   GET_BUF_NUM(temp_buf->ctxt));
+	}
+	diagfwd_queue_read(fwd_info);
+	return;
+}
+
+static void diagfwd_cntl_read_done(struct diagfwd_info *fwd_info,
+				   unsigned char *buf, int len)
+{
+	if (!fwd_info) {
+		diag_ws_release();
+		return;
+	}
+
+	if (fwd_info->type != TYPE_CNTL) {
+		pr_err("diag: In %s, invalid type %d for peripheral %d\n",
+		       __func__, fwd_info->type, fwd_info->peripheral);
+		diag_ws_release();
+		return;
+	}
+
+	diag_ws_on_read(DIAG_WS_MUX, len);
+	diag_cntl_process_read_data(fwd_info, buf, len);
+	/*
+	 * Control packets are not consumed by the clients. Mimic
+	 * consumption by setting and clearing the wakeup source copy_count
+	 * explicitly.
+	 */
+	diag_ws_on_copy_fail(DIAG_WS_MUX);
+	/* Reset the buffer in_busy value after processing the data */
+	if (fwd_info->buf_1)
+		atomic_set(&fwd_info->buf_1->in_busy, 0);
+
+	diagfwd_queue_read(fwd_info);
+	diagfwd_queue_read(&peripheral_info[TYPE_DATA][fwd_info->peripheral]);
+	diagfwd_queue_read(&peripheral_info[TYPE_CMD][fwd_info->peripheral]);
+}
+
+static void diagfwd_dci_read_done(struct diagfwd_info *fwd_info,
+				  unsigned char *buf, int len)
+{
+	if (!fwd_info)
+		return;
+
+	switch (fwd_info->type) {
+	case TYPE_DCI:
+	case TYPE_DCI_CMD:
+		break;
+	default:
+		pr_err("diag: In %s, invalid type %d for peripheral %d\n",
+		       __func__, fwd_info->type, fwd_info->peripheral);
+		return;
+	}
+
+	diag_dci_process_peripheral_data(fwd_info, (void *)buf, len);
+	/* Reset the buffer in_busy value after processing the data */
+	if (fwd_info->buf_1)
+		atomic_set(&fwd_info->buf_1->in_busy, 0);
+
+	diagfwd_queue_read(fwd_info);
+}
+
+static void diagfwd_reset_buffers(struct diagfwd_info *fwd_info,
+				  unsigned char *buf)
+{
+	if (!fwd_info || !buf)
+		return;
+
+	if (!driver->feature[fwd_info->peripheral].encode_hdlc) {
+		if (fwd_info->buf_1 && fwd_info->buf_1->data == buf)
+			atomic_set(&fwd_info->buf_1->in_busy, 0);
+		else if (fwd_info->buf_2 && fwd_info->buf_2->data == buf)
+			atomic_set(&fwd_info->buf_2->in_busy, 0);
+	} else {
+		if (fwd_info->buf_1 && fwd_info->buf_1->data_raw == buf)
+			atomic_set(&fwd_info->buf_1->in_busy, 0);
+		else if (fwd_info->buf_2 && fwd_info->buf_2->data_raw == buf)
+			atomic_set(&fwd_info->buf_2->in_busy, 0);
+	}
+	if (fwd_info->buf_1 && !atomic_read(&(fwd_info->buf_1->in_busy))) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"Buffer 1 for core PD is marked free, p: %d, t: %d\n",
+			fwd_info->peripheral, fwd_info->type);
+	}
+	if (fwd_info->buf_2 && !atomic_read(&(fwd_info->buf_2->in_busy))) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"Buffer 2 for core PD is marked free, p: %d, t: %d\n",
+			fwd_info->peripheral, fwd_info->type);
+	}
+}
+
+int diagfwd_peripheral_init(void)
+{
+	uint8_t peripheral;
+	uint8_t transport;
+	uint8_t type;
+	struct diagfwd_info *fwd_info = NULL;
+
+	for (transport = 0; transport < NUM_TRANSPORT; transport++) {
+		early_init_info[transport] = kzalloc(
+				sizeof(struct diagfwd_info) * NUM_PERIPHERALS,
+				GFP_KERNEL);
+		if (!early_init_info[transport])
+			return -ENOMEM;
+		kmemleak_not_leak(early_init_info[transport]);
+	}
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		for (transport = 0; transport < NUM_TRANSPORT; transport++) {
+			fwd_info = &early_init_info[transport][peripheral];
+			fwd_info->peripheral = peripheral;
+			fwd_info->type = TYPE_CNTL;
+			fwd_info->transport = transport;
+			fwd_info->ctxt = NULL;
+			fwd_info->p_ops = NULL;
+			fwd_info->ch_open = 0;
+			fwd_info->inited = 1;
+			fwd_info->read_bytes = 0;
+			fwd_info->write_bytes = 0;
+			fwd_info->cpd_len_1 = 0;
+			fwd_info->cpd_len_2 = 0;
+			fwd_info->upd_len_1_a = 0;
+			fwd_info->upd_len_1_b = 0;
+			fwd_info->upd_len_2_a = 0;
+			fwd_info->upd_len_2_a = 0;
+			mutex_init(&fwd_info->buf_mutex);
+			mutex_init(&fwd_info->data_mutex);
+			spin_lock_init(&fwd_info->write_buf_lock);
+		}
+	}
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		for (type = 0; type < NUM_TYPES; type++) {
+			fwd_info = &peripheral_info[type][peripheral];
+			fwd_info->peripheral = peripheral;
+			fwd_info->type = type;
+			fwd_info->ctxt = NULL;
+			fwd_info->p_ops = NULL;
+			fwd_info->ch_open = 0;
+			fwd_info->read_bytes = 0;
+			fwd_info->write_bytes = 0;
+			fwd_info->cpd_len_1 = 0;
+			fwd_info->cpd_len_2 = 0;
+			fwd_info->upd_len_1_a = 0;
+			fwd_info->upd_len_1_b = 0;
+			fwd_info->upd_len_2_a = 0;
+			fwd_info->upd_len_2_a = 0;
+			spin_lock_init(&fwd_info->write_buf_lock);
+			mutex_init(&fwd_info->buf_mutex);
+			mutex_init(&fwd_info->data_mutex);
+			/*
+			 * This state shouldn't be set for Control channels
+			 * during initialization. This is set when the feature
+			 * mask is received for the first time.
+			 */
+			if (type != TYPE_CNTL)
+				fwd_info->inited = 1;
+		}
+		driver->diagfwd_data[peripheral] =
+			&peripheral_info[TYPE_DATA][peripheral];
+		driver->diagfwd_cntl[peripheral] =
+			&peripheral_info[TYPE_CNTL][peripheral];
+		driver->diagfwd_dci[peripheral] =
+			&peripheral_info[TYPE_DCI][peripheral];
+		driver->diagfwd_cmd[peripheral] =
+			&peripheral_info[TYPE_CMD][peripheral];
+		driver->diagfwd_dci_cmd[peripheral] =
+			&peripheral_info[TYPE_DCI_CMD][peripheral];
+	}
+
+	diag_smd_init();
+	if (driver->supports_sockets)
+		diag_socket_init();
+	diag_glink_init();
+
+	return 0;
+}
+
+void diagfwd_peripheral_exit(void)
+{
+	uint8_t peripheral;
+	uint8_t type;
+	struct diagfwd_info *fwd_info = NULL;
+	int transport = 0;
+
+	diag_smd_exit();
+	diag_socket_exit();
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		for (type = 0; type < NUM_TYPES; type++) {
+			fwd_info = &peripheral_info[type][peripheral];
+			fwd_info->ctxt = NULL;
+			fwd_info->p_ops = NULL;
+			fwd_info->ch_open = 0;
+			diagfwd_buffers_exit(fwd_info);
+		}
+	}
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		driver->diagfwd_data[peripheral] = NULL;
+		driver->diagfwd_cntl[peripheral] = NULL;
+		driver->diagfwd_dci[peripheral] = NULL;
+		driver->diagfwd_cmd[peripheral] = NULL;
+		driver->diagfwd_dci_cmd[peripheral] = NULL;
+	}
+
+	for (transport = 0; transport < NUM_TRANSPORT; transport++) {
+		kfree(early_init_info[transport]);
+		early_init_info[transport] = NULL;
+	}
+}
+
+int diagfwd_cntl_register(uint8_t transport, uint8_t peripheral, void *ctxt,
+			  struct diag_peripheral_ops *ops,
+			  struct diagfwd_info **fwd_ctxt)
+{
+	struct diagfwd_info *fwd_info = NULL;
+
+	if (!ctxt || !ops)
+		return -EIO;
+
+	if (transport >= NUM_TRANSPORT || peripheral >= NUM_PERIPHERALS)
+		return -EINVAL;
+
+	fwd_info = &early_init_info[transport][peripheral];
+	*fwd_ctxt = &early_init_info[transport][peripheral];
+	fwd_info->ctxt = ctxt;
+	fwd_info->p_ops = ops;
+	fwd_info->c_ops = &cntl_ch_ops;
+
+	return 0;
+}
+
+int diagfwd_register(uint8_t transport, uint8_t peripheral, uint8_t type,
+		     void *ctxt, struct diag_peripheral_ops *ops,
+		     struct diagfwd_info **fwd_ctxt)
+{
+	struct diagfwd_info *fwd_info = NULL;
+
+	if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES ||
+	    !ctxt || !ops || transport >= NUM_TRANSPORT) {
+		pr_err("diag: In %s, returning error\n", __func__);
+		return -EIO;
+	}
+
+	fwd_info = &peripheral_info[type][peripheral];
+	*fwd_ctxt = &peripheral_info[type][peripheral];
+	fwd_info->ctxt = ctxt;
+	fwd_info->p_ops = ops;
+	fwd_info->transport = transport;
+	fwd_info->ch_open = 0;
+
+	switch (type) {
+	case TYPE_DATA:
+	case TYPE_CMD:
+		fwd_info->c_ops = &data_ch_ops;
+		break;
+	case TYPE_DCI:
+	case TYPE_DCI_CMD:
+		fwd_info->c_ops = &dci_ch_ops;
+		break;
+	default:
+		pr_err("diag: In %s, invalid type: %d\n", __func__, type);
+		return -EINVAL;
+	}
+
+	if (atomic_read(&fwd_info->opened) &&
+	    fwd_info->p_ops && fwd_info->p_ops->open) {
+		/*
+		 * The registration can happen late, like in the case of
+		 * sockets. fwd_info->opened reflects diag_state. Propogate the
+		 * state to the peipherals.
+		 */
+		fwd_info->p_ops->open(fwd_info->ctxt);
+	}
+
+	return 0;
+}
+
+void diagfwd_deregister(uint8_t peripheral, uint8_t type, void *ctxt)
+{
+	struct diagfwd_info *fwd_info = NULL;
+
+	if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES || !ctxt)
+		return;
+
+	fwd_info = &peripheral_info[type][peripheral];
+	if (fwd_info->ctxt != ctxt) {
+		pr_err("diag: In %s, unable to find a match for p: %d t: %d\n",
+		       __func__, peripheral, type);
+		return;
+	}
+	fwd_info->ctxt = NULL;
+	fwd_info->p_ops = NULL;
+	fwd_info->ch_open = 0;
+	diagfwd_buffers_exit(fwd_info);
+
+	switch (type) {
+	case TYPE_DATA:
+		driver->diagfwd_data[peripheral] = NULL;
+		break;
+	case TYPE_CNTL:
+		driver->diagfwd_cntl[peripheral] = NULL;
+		break;
+	case TYPE_DCI:
+		driver->diagfwd_dci[peripheral] = NULL;
+		break;
+	case TYPE_CMD:
+		driver->diagfwd_cmd[peripheral] = NULL;
+		break;
+	case TYPE_DCI_CMD:
+		driver->diagfwd_dci_cmd[peripheral] = NULL;
+		break;
+	}
+}
+
+void diagfwd_close_transport(uint8_t transport, uint8_t peripheral)
+{
+	struct diagfwd_info *fwd_info = NULL;
+	struct diagfwd_info *dest_info = NULL;
+	int (*init_fn)(uint8_t) = NULL;
+	void (*invalidate_fn)(void *, struct diagfwd_info *) = NULL;
+	int (*check_channel_state)(void *) = NULL;
+	uint8_t transport_open = 0;
+	int i = 0;
+
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	switch (transport) {
+	case TRANSPORT_SMD:
+		transport_open = TRANSPORT_SOCKET;
+		init_fn = diag_socket_init_peripheral;
+		invalidate_fn = diag_socket_invalidate;
+		check_channel_state = diag_socket_check_state;
+		break;
+	case TRANSPORT_SOCKET:
+		if (peripheral == PERIPHERAL_WDSP) {
+			transport_open = TRANSPORT_GLINK;
+			init_fn = diag_glink_init_peripheral;
+			invalidate_fn = diag_glink_invalidate;
+			check_channel_state = diag_glink_check_state;
+		} else {
+			transport_open = TRANSPORT_SMD;
+			init_fn = diag_smd_init_peripheral;
+			invalidate_fn = diag_smd_invalidate;
+			check_channel_state = diag_smd_check_state;
+		}
+		break;
+	default:
+		return;
+	}
+
+	mutex_lock(&driver->diagfwd_channel_mutex[peripheral]);
+	fwd_info = &early_init_info[transport][peripheral];
+	if (fwd_info->p_ops && fwd_info->p_ops->close)
+		fwd_info->p_ops->close(fwd_info->ctxt);
+	fwd_info = &early_init_info[transport_open][peripheral];
+	dest_info = &peripheral_info[TYPE_CNTL][peripheral];
+	dest_info->inited = 1;
+	dest_info->ctxt = fwd_info->ctxt;
+	dest_info->p_ops = fwd_info->p_ops;
+	dest_info->c_ops = fwd_info->c_ops;
+	dest_info->ch_open = fwd_info->ch_open;
+	dest_info->read_bytes = fwd_info->read_bytes;
+	dest_info->write_bytes = fwd_info->write_bytes;
+	dest_info->inited = fwd_info->inited;
+	dest_info->buf_1 = fwd_info->buf_1;
+	dest_info->buf_2 = fwd_info->buf_2;
+	dest_info->transport = fwd_info->transport;
+	invalidate_fn(dest_info->ctxt, dest_info);
+	for (i = 0; i < NUM_WRITE_BUFFERS; i++)
+		dest_info->buf_ptr[i] = fwd_info->buf_ptr[i];
+	if (!check_channel_state(dest_info->ctxt))
+		diagfwd_late_open(dest_info);
+	diagfwd_cntl_open(dest_info);
+	init_fn(peripheral);
+	mutex_unlock(&driver->diagfwd_channel_mutex[peripheral]);
+	diagfwd_queue_read(&peripheral_info[TYPE_DATA][peripheral]);
+	diagfwd_queue_read(&peripheral_info[TYPE_CMD][peripheral]);
+}
+
+void *diagfwd_request_write_buf(struct diagfwd_info *fwd_info)
+{
+	void *buf = NULL;
+	int index;
+	unsigned long flags;
+
+	spin_lock_irqsave(&fwd_info->write_buf_lock, flags);
+	for (index = 0 ; index < NUM_WRITE_BUFFERS; index++) {
+		if (!atomic_read(&(fwd_info->buf_ptr[index]->in_busy))) {
+			atomic_set(&(fwd_info->buf_ptr[index]->in_busy), 1);
+			buf = fwd_info->buf_ptr[index]->data;
+			if (!buf)
+				return NULL;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
+	return buf;
+}
+
+int diagfwd_write(uint8_t peripheral, uint8_t type, void *buf, int len)
+{
+	struct diagfwd_info *fwd_info = NULL;
+	int err = 0;
+	uint8_t retry_count = 0;
+	uint8_t max_retries = 3;
+	void *buf_ptr = NULL;
+	if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
+		return -EINVAL;
+
+	if (type == TYPE_CMD || type == TYPE_DCI_CMD) {
+		if (!driver->feature[peripheral].rcvd_feature_mask ||
+			!driver->feature[peripheral].sent_feature_mask) {
+			pr_debug_ratelimited("diag: In %s, feature mask for peripheral: %d not received or sent yet\n",
+					     __func__, peripheral);
+			return 0;
+		}
+		if (!driver->feature[peripheral].separate_cmd_rsp)
+			type = (type == TYPE_CMD) ? TYPE_DATA : TYPE_DCI;
+	}
+
+	fwd_info = &peripheral_info[type][peripheral];
+	if (!fwd_info->inited || !atomic_read(&fwd_info->opened))
+		return -ENODEV;
+
+	if (!(fwd_info->p_ops && fwd_info->p_ops->write && fwd_info->ctxt))
+		return -EIO;
+
+	if (fwd_info->transport == TRANSPORT_GLINK) {
+		buf_ptr = diagfwd_request_write_buf(fwd_info);
+		if (buf_ptr)
+			memcpy(buf_ptr, buf, len);
+		else {
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+				 "diag: buffer not found for writing\n");
+			return -EIO;
+		}
+	} else
+		buf_ptr = buf;
+
+	while (retry_count < max_retries) {
+		err = 0;
+		err = fwd_info->p_ops->write(fwd_info->ctxt, buf_ptr, len);
+		if (err && err != -ENODEV) {
+			usleep_range(100000, 101000);
+			retry_count++;
+			continue;
+		}
+		break;
+	}
+
+	if (!err)
+		fwd_info->write_bytes += len;
+	else
+		if (fwd_info->transport == TRANSPORT_GLINK)
+			diagfwd_write_buffer_done(fwd_info, buf_ptr);
+	return err;
+}
+
+static void __diag_fwd_open(struct diagfwd_info *fwd_info)
+{
+	bool set_busy = true;
+
+	if (!fwd_info)
+		return;
+
+	atomic_set(&fwd_info->opened, 1);
+	if (!fwd_info->inited)
+		return;
+
+	/*
+	 * Logging mode here is reflecting previous mode
+	 * status and will be updated to new mode later.
+	 *
+	 * Keeping the buffers busy for Memory Device Mode.
+	 */
+
+#ifdef CONFIG_DIAG_OVER_USB
+	set_busy = (driver->logging_mode != DIAG_USB_MODE) ||
+		driver->usb_connected;
+#endif
+	if (set_busy) {
+		if (fwd_info->buf_1) {
+			atomic_set(&fwd_info->buf_1->in_busy, 0);
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"Buffer 1 for core PD is marked free, p: %d, t: %d\n",
+				fwd_info->peripheral, fwd_info->type);
+		}
+		if (fwd_info->buf_2) {
+			atomic_set(&fwd_info->buf_2->in_busy, 0);
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"Buffer 2 for core PD is marked free, p: %d, t: %d\n",
+				fwd_info->peripheral, fwd_info->type);
+		}
+	}
+
+	if (fwd_info->p_ops && fwd_info->p_ops->open)
+		fwd_info->p_ops->open(fwd_info->ctxt);
+
+	diagfwd_queue_read(fwd_info);
+}
+
+void diagfwd_early_open(uint8_t peripheral)
+{
+	uint8_t transport = 0;
+	struct diagfwd_info *fwd_info = NULL;
+
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	for (transport = 0; transport < NUM_TRANSPORT; transport++) {
+		fwd_info = &early_init_info[transport][peripheral];
+		__diag_fwd_open(fwd_info);
+	}
+}
+
+void diagfwd_open(uint8_t peripheral, uint8_t type)
+{
+	struct diagfwd_info *fwd_info = NULL;
+
+	if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
+		return;
+
+	fwd_info = &peripheral_info[type][peripheral];
+	__diag_fwd_open(fwd_info);
+}
+
+void diagfwd_late_open(struct diagfwd_info *fwd_info)
+{
+	__diag_fwd_open(fwd_info);
+}
+
+void diagfwd_close(uint8_t peripheral, uint8_t type)
+{
+	struct diagfwd_info *fwd_info = NULL;
+
+	if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
+		return;
+
+	fwd_info = &peripheral_info[type][peripheral];
+	atomic_set(&fwd_info->opened, 0);
+	if (!fwd_info->inited)
+		return;
+
+	if (fwd_info->p_ops && fwd_info->p_ops->close)
+		fwd_info->p_ops->close(fwd_info->ctxt);
+
+	if (fwd_info->buf_1)
+		atomic_set(&fwd_info->buf_1->in_busy, 1);
+	/*
+	 * Only Data channels have two buffers. Set both the buffers
+	 * to busy on close.
+	 */
+	if (fwd_info->buf_2)
+		atomic_set(&fwd_info->buf_2->in_busy, 1);
+}
+
+int diagfwd_channel_open(struct diagfwd_info *fwd_info)
+{
+	int i;
+	if (!fwd_info)
+		return -EIO;
+
+	if (!fwd_info->inited) {
+		pr_debug("diag: In %s, channel is not inited, p: %d, t: %d\n",
+			 __func__, fwd_info->peripheral, fwd_info->type);
+		return -EINVAL;
+	}
+
+	if (fwd_info->ch_open) {
+		pr_debug("diag: In %s, channel is already open, p: %d, t: %d\n",
+			 __func__, fwd_info->peripheral, fwd_info->type);
+		return 0;
+	}
+	mutex_lock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]);
+	fwd_info->ch_open = 1;
+	diagfwd_buffers_init(fwd_info);
+
+	/*
+	 * Initialize buffers for glink supported
+	 * peripherals only.
+	 */
+	if (fwd_info->transport == TRANSPORT_GLINK)
+		diagfwd_write_buffers_init(fwd_info);
+
+	if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->open)
+		fwd_info->c_ops->open(fwd_info);
+	for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
+		if (fwd_info->buf_ptr[i])
+			atomic_set(&fwd_info->buf_ptr[i]->in_busy, 0);
+	}
+	diagfwd_queue_read(fwd_info);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "p: %d t: %d considered opened\n",
+		 fwd_info->peripheral, fwd_info->type);
+
+	if (atomic_read(&fwd_info->opened)) {
+		if (fwd_info->p_ops && fwd_info->p_ops->open)
+			fwd_info->p_ops->open(fwd_info->ctxt);
+	}
+	mutex_unlock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]);
+	return 0;
+}
+
+int diagfwd_channel_close(struct diagfwd_info *fwd_info)
+{
+	int i;
+	if (!fwd_info)
+		return -EIO;
+
+	if (fwd_info->type == TYPE_CNTL)
+		flush_workqueue(driver->cntl_wq);
+
+	mutex_lock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]);
+	fwd_info->ch_open = 0;
+	if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->close)
+		fwd_info->c_ops->close(fwd_info);
+
+	if (fwd_info->buf_1 && fwd_info->buf_1->data) {
+		atomic_set(&fwd_info->buf_1->in_busy, 0);
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"Buffer 1 for core PD is marked free, p: %d, t: %d\n",
+			fwd_info->peripheral, fwd_info->type);
+	}
+	if (fwd_info->buf_2 && fwd_info->buf_2->data) {
+		atomic_set(&fwd_info->buf_2->in_busy, 0);
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"Buffer 2 for core PD is marked free, p: %d, t: %d\n",
+				fwd_info->peripheral, fwd_info->type);
+	}
+
+	for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
+		if (fwd_info->buf_ptr[i])
+			atomic_set(&fwd_info->buf_ptr[i]->in_busy, 1);
+	}
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "p: %d t: %d considered closed\n",
+		 fwd_info->peripheral, fwd_info->type);
+	mutex_unlock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]);
+	return 0;
+}
+
+int diagfwd_channel_read_done(struct diagfwd_info *fwd_info,
+			      unsigned char *buf, uint32_t len)
+{
+	if (!fwd_info) {
+		diag_ws_release();
+		return -EIO;
+	}
+
+	/*
+	 * Diag peripheral layers should send len as 0 if there is any error
+	 * in reading data from the transport. Use this information to reset the
+	 * in_busy flags. No need to queue read in this case.
+	 */
+	if (len == 0) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"Read Length is 0, resetting the diag buffers p: %d, t: %d\n",
+			fwd_info->peripheral, fwd_info->type);
+		diagfwd_reset_buffers(fwd_info, buf);
+		diag_ws_release();
+		return 0;
+	}
+
+	if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->read_done)
+		fwd_info->c_ops->read_done(fwd_info, buf, len);
+	fwd_info->read_bytes += len;
+
+	return 0;
+}
+
+void diagfwd_write_done(uint8_t peripheral, uint8_t type, int buf_num)
+{
+	struct diagfwd_info *fwd_info = NULL;
+
+	if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
+		return;
+
+	fwd_info = &peripheral_info[type][peripheral];
+	if (!fwd_info)
+		return;
+
+	if (buf_num == 1 && fwd_info->buf_1) {
+		/* Buffer 1 for core PD is freed */
+		fwd_info->cpd_len_1 = 0;
+
+		if (peripheral == PERIPHERAL_LPASS) {
+			if (!fwd_info->upd_len_1_a &&
+				!fwd_info->upd_len_2_a)
+				atomic_set(&fwd_info->buf_1->in_busy, 0);
+		} else if (peripheral == PERIPHERAL_MODEM) {
+			if (!fwd_info->upd_len_1_a)
+				atomic_set(&fwd_info->buf_1->in_busy, 0);
+		} else {
+			atomic_set(&fwd_info->buf_1->in_busy, 0);
+		}
+		if (!atomic_read(&(fwd_info->buf_1->in_busy))) {
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"Buffer 1 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+				fwd_info->peripheral, fwd_info->type, buf_num);
+		}
+	} else if (buf_num == 2 && fwd_info->buf_2) {
+		/* Buffer 2 for core PD is freed */
+		fwd_info->cpd_len_2 = 0;
+
+		if (peripheral == PERIPHERAL_LPASS) {
+			if (!fwd_info->upd_len_1_b &&
+				!fwd_info->upd_len_2_b)
+				atomic_set(&fwd_info->buf_2->in_busy, 0);
+		} else if (peripheral == PERIPHERAL_MODEM) {
+			if (!fwd_info->upd_len_1_b)
+				atomic_set(&fwd_info->buf_2->in_busy, 0);
+		} else {
+			atomic_set(&fwd_info->buf_2->in_busy, 0);
+		}
+		if (!atomic_read(&(fwd_info->buf_2->in_busy))) {
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"Buffer 2 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+				fwd_info->peripheral, fwd_info->type, buf_num);
+		}
+	} else if (buf_num == 3 && fwd_info->buf_upd_1_a && fwd_info->buf_1) {
+		/* Buffer 1 for user pd 1  is freed */
+		atomic_set(&fwd_info->buf_upd_1_a->in_busy, 0);
+
+		if (peripheral == PERIPHERAL_LPASS) {
+			/* if not data in cpd and other user pd
+			 * free the core pd buffer for LPASS
+			 */
+			if (!fwd_info->cpd_len_1 &&
+				!fwd_info->upd_len_2_a)
+				atomic_set(&fwd_info->buf_1->in_busy, 0);
+		} else {
+			/* if not data in cpd
+			 * free the core pd buffer for MPSS
+			 */
+			if (!fwd_info->cpd_len_1)
+				atomic_set(&fwd_info->buf_1->in_busy, 0);
+		}
+		if (!atomic_read(&(fwd_info->buf_1->in_busy))) {
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"Buffer 1 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+				fwd_info->peripheral, fwd_info->type, buf_num);
+		}
+		fwd_info->upd_len_1_a = 0;
+
+	} else if (buf_num == 4 && fwd_info->buf_upd_1_b && fwd_info->buf_2) {
+		/* Buffer 2 for user pd 1  is freed */
+		atomic_set(&fwd_info->buf_upd_1_b->in_busy, 0);
+		if (peripheral == PERIPHERAL_LPASS) {
+			/* if not data in cpd and other user pd
+			 * free the core pd buffer for LPASS
+			 */
+			if (!fwd_info->cpd_len_2 &&
+				!fwd_info->upd_len_2_b)
+				atomic_set(&fwd_info->buf_2->in_busy, 0);
+		} else {
+			/* if not data in cpd
+			 * free the core pd buffer for MPSS
+			 */
+			if (!fwd_info->cpd_len_2)
+				atomic_set(&fwd_info->buf_2->in_busy, 0);
+		}
+		if (!atomic_read(&(fwd_info->buf_2->in_busy))) {
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"Buffer 2 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+				fwd_info->peripheral, fwd_info->type, buf_num);
+		}
+		fwd_info->upd_len_1_b = 0;
+
+	} else if (buf_num == 5 && fwd_info->buf_upd_2_a && fwd_info->buf_1) {
+		/* Buffer 1 for user pd 2  is freed */
+		atomic_set(&fwd_info->buf_upd_2_a->in_busy, 0);
+		/* if not data in cpd and other user pd
+		 * free the core pd buffer for LPASS
+		 */
+		if (!fwd_info->cpd_len_1 &&
+			!fwd_info->upd_len_1_a) {
+			atomic_set(&fwd_info->buf_1->in_busy, 0);
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"Buffer 1 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+				fwd_info->peripheral, fwd_info->type, buf_num);
+		}
+
+		fwd_info->upd_len_2_a = 0;
+
+	} else if (buf_num == 6 && fwd_info->buf_upd_2_b && fwd_info->buf_2) {
+		/* Buffer 2 for user pd 2  is freed */
+		atomic_set(&fwd_info->buf_upd_2_b->in_busy, 0);
+		/* if not data in cpd and other user pd
+		 * free the core pd buffer for LPASS
+		 */
+		if (!fwd_info->cpd_len_2 &&
+			!fwd_info->upd_len_1_b) {
+			atomic_set(&fwd_info->buf_2->in_busy, 0);
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"Buffer 2 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+				fwd_info->peripheral, fwd_info->type, buf_num);
+		}
+		fwd_info->upd_len_2_b = 0;
+
+	} else
+		pr_err("diag: In %s, invalid buf_num: %d\n", __func__, buf_num);
+
+	diagfwd_queue_read(fwd_info);
+}
+
+int diagfwd_write_buffer_done(struct diagfwd_info *fwd_info, const void *ptr)
+{
+
+	int found = 0;
+	int index = 0;
+	unsigned long flags;
+
+	if (!fwd_info || !ptr)
+		return found;
+	spin_lock_irqsave(&fwd_info->write_buf_lock, flags);
+	for (index = 0; index < NUM_WRITE_BUFFERS; index++) {
+		if (fwd_info->buf_ptr[index]->data == ptr) {
+			atomic_set(&fwd_info->buf_ptr[index]->in_busy, 0);
+			found = 1;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
+	return found;
+}
+
+void diagfwd_channel_read(struct diagfwd_info *fwd_info)
+{
+	int err = 0;
+	uint32_t read_len = 0;
+	unsigned char *read_buf = NULL;
+	struct diagfwd_buf_t *temp_buf = NULL;
+
+	if (!fwd_info) {
+		diag_ws_release();
+		return;
+	}
+
+	if (!fwd_info->inited || !atomic_read(&fwd_info->opened)) {
+		pr_debug("diag: In %s, p: %d, t: %d, inited: %d, opened: %d  ch_open: %d\n",
+			 __func__, fwd_info->peripheral, fwd_info->type,
+			 fwd_info->inited, atomic_read(&fwd_info->opened),
+			 fwd_info->ch_open);
+		diag_ws_release();
+		return;
+	}
+
+	if (fwd_info->buf_1 && !atomic_read(&fwd_info->buf_1->in_busy)) {
+		if (driver->feature[fwd_info->peripheral].encode_hdlc &&
+		    (fwd_info->type == TYPE_DATA ||
+		     fwd_info->type == TYPE_CMD)) {
+			read_buf = fwd_info->buf_1->data_raw;
+			read_len = fwd_info->buf_1->len_raw;
+		} else {
+			read_buf = fwd_info->buf_1->data;
+			read_len = fwd_info->buf_1->len;
+		}
+		if (read_buf) {
+			temp_buf = fwd_info->buf_1;
+			atomic_set(&temp_buf->in_busy, 1);
+		}
+	} else if (fwd_info->buf_2 && !atomic_read(&fwd_info->buf_2->in_busy)) {
+		if (driver->feature[fwd_info->peripheral].encode_hdlc &&
+		    (fwd_info->type == TYPE_DATA ||
+		     fwd_info->type == TYPE_CMD)) {
+			read_buf = fwd_info->buf_2->data_raw;
+			read_len = fwd_info->buf_2->len_raw;
+		} else {
+			read_buf = fwd_info->buf_2->data;
+			read_len = fwd_info->buf_2->len;
+		}
+		if (read_buf) {
+			temp_buf = fwd_info->buf_2;
+			atomic_set(&temp_buf->in_busy, 1);
+		}
+	} else {
+		pr_debug("diag: In %s, both buffers are empty for p: %d, t: %d\n",
+			 __func__, fwd_info->peripheral, fwd_info->type);
+	}
+
+	if (!read_buf) {
+		diag_ws_release();
+		return;
+	}
+
+	if (!(fwd_info->p_ops && fwd_info->p_ops->read && fwd_info->ctxt))
+		goto fail_return;
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "issued a read p: %d t: %d buf: %pK\n",
+		 fwd_info->peripheral, fwd_info->type, read_buf);
+	err = fwd_info->p_ops->read(fwd_info->ctxt, read_buf, read_len);
+	if (err)
+		goto fail_return;
+
+	return;
+
+fail_return:
+	diag_ws_release();
+	atomic_set(&temp_buf->in_busy, 0);
+	return;
+}
+
+static void diagfwd_queue_read(struct diagfwd_info *fwd_info)
+{
+	if (!fwd_info)
+		return;
+
+	if (!fwd_info->inited || !atomic_read(&fwd_info->opened)) {
+		pr_debug("diag: In %s, p: %d, t: %d, inited: %d, opened: %d  ch_open: %d\n",
+			 __func__, fwd_info->peripheral, fwd_info->type,
+			 fwd_info->inited, atomic_read(&fwd_info->opened),
+			 fwd_info->ch_open);
+		return;
+	}
+
+	/*
+	 * Don't queue a read on the data and command channels before receiving
+	 * the feature mask from the peripheral. We won't know which buffer to
+	 * use - HDLC or non HDLC buffer for reading.
+	 */
+	if ((!driver->feature[fwd_info->peripheral].rcvd_feature_mask) &&
+	    (fwd_info->type != TYPE_CNTL)) {
+		return;
+	}
+
+	if (fwd_info->p_ops && fwd_info->p_ops->queue_read && fwd_info->ctxt)
+		fwd_info->p_ops->queue_read(fwd_info->ctxt);
+}
+
+void diagfwd_buffers_init(struct diagfwd_info *fwd_info)
+{
+	struct diagfwd_buf_t *temp_fwd_buf;
+	unsigned char *temp_char_buf;
+
+	if (!fwd_info)
+		return;
+
+	if (!fwd_info->inited) {
+		pr_err("diag: In %s, channel not inited, p: %d, t: %d\n",
+		       __func__, fwd_info->peripheral, fwd_info->type);
+		return;
+	}
+
+	mutex_lock(&fwd_info->buf_mutex);
+
+	if (!fwd_info->buf_1) {
+		fwd_info->buf_1 = kzalloc(sizeof(struct diagfwd_buf_t),
+					  GFP_KERNEL);
+		if (ZERO_OR_NULL_PTR(fwd_info->buf_1))
+			goto err;
+		kmemleak_not_leak(fwd_info->buf_1);
+	}
+
+	if (!fwd_info->buf_1->data) {
+		fwd_info->buf_1->data = kzalloc(PERIPHERAL_BUF_SZ +
+					APF_DIAG_PADDING,
+					GFP_KERNEL);
+		if (ZERO_OR_NULL_PTR(fwd_info->buf_1->data))
+			goto err;
+		fwd_info->buf_1->len = PERIPHERAL_BUF_SZ;
+		kmemleak_not_leak(fwd_info->buf_1->data);
+		fwd_info->buf_1->ctxt = SET_BUF_CTXT(fwd_info->peripheral,
+						     fwd_info->type, 1);
+	}
+
+	if (fwd_info->type == TYPE_DATA) {
+		if (!fwd_info->buf_2) {
+			fwd_info->buf_2 = kzalloc(sizeof(struct diagfwd_buf_t),
+					      GFP_KERNEL);
+			if (ZERO_OR_NULL_PTR(fwd_info->buf_2))
+				goto err;
+			kmemleak_not_leak(fwd_info->buf_2);
+		}
+
+		if (!fwd_info->buf_2->data) {
+			fwd_info->buf_2->data = kzalloc(PERIPHERAL_BUF_SZ +
+							APF_DIAG_PADDING,
+						    GFP_KERNEL);
+			if (ZERO_OR_NULL_PTR(fwd_info->buf_2->data))
+				goto err;
+			fwd_info->buf_2->len = PERIPHERAL_BUF_SZ;
+			kmemleak_not_leak(fwd_info->buf_2->data);
+			fwd_info->buf_2->ctxt = SET_BUF_CTXT(
+							fwd_info->peripheral,
+							fwd_info->type, 2);
+		}
+
+		if (driver->feature[fwd_info->peripheral].untag_header) {
+			if (!fwd_info->buf_upd_1_a) {
+				fwd_info->buf_upd_1_a =
+					kzalloc(sizeof(struct diagfwd_buf_t),
+						      GFP_KERNEL);
+				if (ZERO_OR_NULL_PTR(fwd_info->buf_upd_1_a))
+					goto err;
+				kmemleak_not_leak(fwd_info->buf_upd_1_a);
+			}
+
+			if (fwd_info->buf_upd_1_a &&
+				!fwd_info->buf_upd_1_a->data) {
+				fwd_info->buf_upd_1_a->data =
+					kzalloc(PERIPHERAL_BUF_SZ +
+						APF_DIAG_PADDING,
+					    GFP_KERNEL);
+				temp_char_buf = fwd_info->buf_upd_1_a->data;
+				if (ZERO_OR_NULL_PTR(temp_char_buf))
+					goto err;
+				fwd_info->buf_upd_1_a->len = PERIPHERAL_BUF_SZ;
+				kmemleak_not_leak(temp_char_buf);
+				fwd_info->buf_upd_1_a->ctxt = SET_BUF_CTXT(
+					fwd_info->peripheral,
+					fwd_info->type, 3);
+			}
+
+			if (!fwd_info->buf_upd_1_b) {
+				fwd_info->buf_upd_1_b =
+				kzalloc(sizeof(struct diagfwd_buf_t),
+					      GFP_KERNEL);
+				if (ZERO_OR_NULL_PTR(fwd_info->buf_upd_1_b))
+					goto err;
+				kmemleak_not_leak(fwd_info->buf_upd_1_b);
+			}
+
+			if (fwd_info->buf_upd_1_b &&
+				!fwd_info->buf_upd_1_b->data) {
+				fwd_info->buf_upd_1_b->data =
+					kzalloc(PERIPHERAL_BUF_SZ +
+						APF_DIAG_PADDING,
+						GFP_KERNEL);
+				temp_char_buf =
+					fwd_info->buf_upd_1_b->data;
+				if (ZERO_OR_NULL_PTR(temp_char_buf))
+					goto err;
+				fwd_info->buf_upd_1_b->len =
+					PERIPHERAL_BUF_SZ;
+				kmemleak_not_leak(temp_char_buf);
+				fwd_info->buf_upd_1_b->ctxt = SET_BUF_CTXT(
+					fwd_info->peripheral,
+					fwd_info->type, 4);
+			}
+			if (fwd_info->peripheral ==
+				PERIPHERAL_LPASS) {
+				if (!fwd_info->buf_upd_2_a) {
+					fwd_info->buf_upd_2_a =
+					kzalloc(sizeof(struct diagfwd_buf_t),
+						      GFP_KERNEL);
+					temp_fwd_buf =
+						fwd_info->buf_upd_2_a;
+					if (ZERO_OR_NULL_PTR(temp_fwd_buf))
+						goto err;
+					kmemleak_not_leak(temp_fwd_buf);
+				}
+
+				if (!fwd_info->buf_upd_2_a->data) {
+					fwd_info->buf_upd_2_a->data =
+						kzalloc(PERIPHERAL_BUF_SZ +
+							APF_DIAG_PADDING,
+						    GFP_KERNEL);
+					temp_char_buf =
+						fwd_info->buf_upd_2_a->data;
+					if (ZERO_OR_NULL_PTR(temp_char_buf))
+						goto err;
+					fwd_info->buf_upd_2_a->len =
+						PERIPHERAL_BUF_SZ;
+					kmemleak_not_leak(temp_char_buf);
+					fwd_info->buf_upd_2_a->ctxt =
+						SET_BUF_CTXT(
+						fwd_info->peripheral,
+						fwd_info->type, 5);
+				}
+				if (!fwd_info->buf_upd_2_b) {
+					fwd_info->buf_upd_2_b =
+					kzalloc(sizeof(struct diagfwd_buf_t),
+							      GFP_KERNEL);
+					temp_fwd_buf =
+						fwd_info->buf_upd_2_b;
+					if (ZERO_OR_NULL_PTR(temp_fwd_buf))
+						goto err;
+					kmemleak_not_leak(temp_fwd_buf);
+				}
+
+				if (!fwd_info->buf_upd_2_b->data) {
+					fwd_info->buf_upd_2_b->data =
+						kzalloc(PERIPHERAL_BUF_SZ +
+							APF_DIAG_PADDING,
+							GFP_KERNEL);
+					temp_char_buf =
+						fwd_info->buf_upd_2_b->data;
+					if (ZERO_OR_NULL_PTR(temp_char_buf))
+						goto err;
+					fwd_info->buf_upd_2_b->len =
+						PERIPHERAL_BUF_SZ;
+					kmemleak_not_leak(temp_char_buf);
+					fwd_info->buf_upd_2_b->ctxt =
+						SET_BUF_CTXT(
+						fwd_info->peripheral,
+						fwd_info->type, 6);
+				}
+			}
+		}
+
+		if (driver->supports_apps_hdlc_encoding) {
+			/* In support of hdlc encoding */
+			if (!fwd_info->buf_1->data_raw) {
+				fwd_info->buf_1->data_raw =
+					kzalloc(PERIPHERAL_BUF_SZ +
+						APF_DIAG_PADDING,
+						GFP_KERNEL);
+				temp_char_buf =
+					fwd_info->buf_1->data_raw;
+				if (ZERO_OR_NULL_PTR(temp_char_buf))
+					goto err;
+				fwd_info->buf_1->len_raw =
+					PERIPHERAL_BUF_SZ;
+				kmemleak_not_leak(temp_char_buf);
+			}
+
+			if (!fwd_info->buf_2->data_raw) {
+				fwd_info->buf_2->data_raw =
+					kzalloc(PERIPHERAL_BUF_SZ +
+						APF_DIAG_PADDING,
+						GFP_KERNEL);
+				temp_char_buf =
+					fwd_info->buf_2->data_raw;
+				if (ZERO_OR_NULL_PTR(temp_char_buf))
+					goto err;
+				fwd_info->buf_2->len_raw =
+					PERIPHERAL_BUF_SZ;
+				kmemleak_not_leak(temp_char_buf);
+			}
+
+			if (driver->feature[fwd_info->peripheral].
+				untag_header) {
+				if (fwd_info->buf_upd_1_a &&
+					!fwd_info->buf_upd_1_a->data_raw) {
+					fwd_info->buf_upd_1_a->data_raw =
+						kzalloc(PERIPHERAL_BUF_SZ +
+							APF_DIAG_PADDING,
+							GFP_KERNEL);
+					temp_char_buf =
+						fwd_info->buf_upd_1_a->data_raw;
+					if (ZERO_OR_NULL_PTR(temp_char_buf))
+						goto err;
+					fwd_info->buf_upd_1_a->len_raw =
+						PERIPHERAL_BUF_SZ;
+					kmemleak_not_leak(temp_char_buf);
+				}
+
+				if (fwd_info->buf_upd_1_b &&
+					!fwd_info->buf_upd_1_b->data_raw) {
+					fwd_info->buf_upd_1_b->data_raw =
+						kzalloc(PERIPHERAL_BUF_SZ +
+							APF_DIAG_PADDING,
+							GFP_KERNEL);
+					temp_char_buf =
+						fwd_info->buf_upd_1_b->data_raw;
+					if (ZERO_OR_NULL_PTR(temp_char_buf))
+						goto err;
+					fwd_info->buf_upd_1_b->len_raw =
+						PERIPHERAL_BUF_SZ;
+					kmemleak_not_leak(temp_char_buf);
+				}
+				if (fwd_info->peripheral == PERIPHERAL_LPASS
+					&& !fwd_info->buf_upd_2_a->data_raw) {
+					fwd_info->buf_upd_2_a->data_raw =
+						kzalloc(PERIPHERAL_BUF_SZ +
+							APF_DIAG_PADDING,
+							GFP_KERNEL);
+					temp_char_buf =
+						fwd_info->buf_upd_2_a->data_raw;
+					if (ZERO_OR_NULL_PTR(temp_char_buf))
+						goto err;
+					fwd_info->buf_upd_2_a->len_raw =
+						PERIPHERAL_BUF_SZ;
+					kmemleak_not_leak(temp_char_buf);
+				}
+				if (fwd_info->peripheral == PERIPHERAL_LPASS
+					&& !fwd_info->buf_upd_2_b->data_raw) {
+					fwd_info->buf_upd_2_b->data_raw =
+						kzalloc(PERIPHERAL_BUF_SZ +
+							APF_DIAG_PADDING,
+							GFP_KERNEL);
+					temp_char_buf =
+						fwd_info->buf_upd_2_b->data_raw;
+					if (ZERO_OR_NULL_PTR(temp_char_buf))
+						goto err;
+					fwd_info->buf_upd_2_b->len_raw =
+						PERIPHERAL_BUF_SZ;
+					kmemleak_not_leak(temp_char_buf);
+				}
+			}
+		}
+	}
+
+	if (fwd_info->type == TYPE_CMD &&
+		driver->supports_apps_hdlc_encoding) {
+		/* In support of hdlc encoding */
+		if (!fwd_info->buf_1->data_raw) {
+			fwd_info->buf_1->data_raw = kzalloc(PERIPHERAL_BUF_SZ +
+						APF_DIAG_PADDING,
+							GFP_KERNEL);
+			temp_char_buf =
+				fwd_info->buf_1->data_raw;
+			if (ZERO_OR_NULL_PTR(temp_char_buf))
+				goto err;
+			fwd_info->buf_1->len_raw = PERIPHERAL_BUF_SZ;
+			kmemleak_not_leak(temp_char_buf);
+		}
+	}
+
+	mutex_unlock(&fwd_info->buf_mutex);
+	return;
+
+err:
+	mutex_unlock(&fwd_info->buf_mutex);
+	diagfwd_buffers_exit(fwd_info);
+
+	return;
+}
+
+static void diagfwd_buffers_exit(struct diagfwd_info *fwd_info)
+{
+
+	if (!fwd_info)
+		return;
+
+	mutex_lock(&fwd_info->buf_mutex);
+	if (fwd_info->buf_1) {
+		kfree(fwd_info->buf_1->data);
+		fwd_info->buf_1->data = NULL;
+		kfree(fwd_info->buf_1->data_raw);
+		fwd_info->buf_1->data_raw = NULL;
+		kfree(fwd_info->buf_1);
+		fwd_info->buf_1 = NULL;
+	}
+	if (fwd_info->buf_2) {
+		kfree(fwd_info->buf_2->data);
+		fwd_info->buf_2->data = NULL;
+		kfree(fwd_info->buf_2->data_raw);
+		fwd_info->buf_2->data_raw = NULL;
+		kfree(fwd_info->buf_2);
+		fwd_info->buf_2 = NULL;
+	}
+	if (fwd_info->buf_upd_1_a) {
+		kfree(fwd_info->buf_upd_1_a->data);
+		fwd_info->buf_upd_1_a->data = NULL;
+		kfree(fwd_info->buf_upd_1_a->data_raw);
+		fwd_info->buf_upd_1_a->data_raw = NULL;
+		kfree(fwd_info->buf_upd_1_a);
+		fwd_info->buf_upd_1_a = NULL;
+	}
+	if (fwd_info->buf_upd_1_b) {
+		kfree(fwd_info->buf_upd_1_b->data);
+		fwd_info->buf_upd_1_b->data = NULL;
+		kfree(fwd_info->buf_upd_1_b->data_raw);
+		fwd_info->buf_upd_1_b->data_raw = NULL;
+		kfree(fwd_info->buf_upd_1_b);
+		fwd_info->buf_upd_1_b = NULL;
+	}
+	if (fwd_info->buf_upd_2_a) {
+		kfree(fwd_info->buf_upd_2_a->data);
+		fwd_info->buf_upd_2_a->data = NULL;
+		kfree(fwd_info->buf_upd_2_a->data_raw);
+		fwd_info->buf_upd_2_a->data_raw = NULL;
+		kfree(fwd_info->buf_upd_2_a);
+		fwd_info->buf_upd_2_a = NULL;
+	}
+	if (fwd_info->buf_upd_2_b) {
+		kfree(fwd_info->buf_upd_2_b->data);
+		fwd_info->buf_upd_2_b->data = NULL;
+		kfree(fwd_info->buf_upd_2_b->data_raw);
+		fwd_info->buf_upd_2_b->data_raw = NULL;
+		kfree(fwd_info->buf_upd_2_b);
+		fwd_info->buf_upd_2_b = NULL;
+	}
+	mutex_unlock(&fwd_info->buf_mutex);
+}
+
+void diagfwd_write_buffers_init(struct diagfwd_info *fwd_info)
+{
+	unsigned long flags;
+	int i;
+
+	if (!fwd_info)
+		return;
+
+	if (!fwd_info->inited) {
+		pr_err("diag: In %s, channel not inited, p: %d, t: %d\n",
+		       __func__, fwd_info->peripheral, fwd_info->type);
+		return;
+	}
+
+	spin_lock_irqsave(&fwd_info->write_buf_lock, flags);
+	for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
+		if (!fwd_info->buf_ptr[i])
+			fwd_info->buf_ptr[i] =
+					kzalloc(sizeof(struct diagfwd_buf_t),
+						GFP_ATOMIC);
+		if (!fwd_info->buf_ptr[i])
+			goto err;
+		kmemleak_not_leak(fwd_info->buf_ptr[i]);
+		if (!fwd_info->buf_ptr[i]->data) {
+			fwd_info->buf_ptr[i]->data = kzalloc(PERIPHERAL_BUF_SZ,
+								GFP_ATOMIC);
+			if (!fwd_info->buf_ptr[i]->data)
+				goto err;
+			fwd_info->buf_ptr[i]->len = PERIPHERAL_BUF_SZ;
+			kmemleak_not_leak(fwd_info->buf_ptr[i]->data);
+		}
+	}
+	spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
+	return;
+
+err:
+	spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
+	pr_err("diag:unable to allocate write buffers\n");
+	diagfwd_write_buffers_exit(fwd_info);
+
+}
+
+static void diagfwd_write_buffers_exit(struct diagfwd_info *fwd_info)
+{
+	unsigned long flags;
+	int i;
+
+	if (!fwd_info)
+		return;
+
+	spin_lock_irqsave(&fwd_info->write_buf_lock, flags);
+	for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
+		if (fwd_info->buf_ptr[i]) {
+			kfree(fwd_info->buf_ptr[i]->data);
+			fwd_info->buf_ptr[i]->data = NULL;
+			kfree(fwd_info->buf_ptr[i]);
+			fwd_info->buf_ptr[i] = NULL;
+		}
+	}
+	spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
+}
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diagfwd_peripheral.h linux-4.4.115-fbx/drivers/char/diag/diagfwd_peripheral.h
--- linux-4.4.115-fbx/drivers/char/diag./diagfwd_peripheral.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diagfwd_peripheral.h	2019-01-22 16:16:22.963241516 +0100
@@ -0,0 +1,133 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_PERIPHERAL_H
+#define DIAGFWD_PERIPHERAL_H
+
+#define PERIPHERAL_BUF_SZ		16384
+#define MAX_PERIPHERAL_BUF_SZ		32768
+#define MAX_PERIPHERAL_HDLC_BUF_SZ	65539
+
+#define TRANSPORT_UNKNOWN		-1
+#define TRANSPORT_SMD			0
+#define TRANSPORT_SOCKET		1
+#define TRANSPORT_GLINK			2
+#define NUM_TRANSPORT			3
+#define NUM_WRITE_BUFFERS		2
+#define PERIPHERAL_MASK(x)					\
+	((x == PERIPHERAL_MODEM) ? DIAG_CON_MPSS :		\
+	((x == PERIPHERAL_LPASS) ? DIAG_CON_LPASS :		\
+	((x == PERIPHERAL_WCNSS) ? DIAG_CON_WCNSS :		\
+	((x == PERIPHERAL_SENSORS) ? DIAG_CON_SENSORS : \
+	((x == PERIPHERAL_WDSP) ? DIAG_CON_WDSP : \
+	((x == PERIPHERAL_CDSP) ? DIAG_CON_CDSP : 0))))))	\
+
+#define PERIPHERAL_STRING(x)					\
+	((x == PERIPHERAL_MODEM) ? "MODEM" :			\
+	((x == PERIPHERAL_LPASS) ? "LPASS" :			\
+	((x == PERIPHERAL_WCNSS) ? "WCNSS" :			\
+	((x == PERIPHERAL_SENSORS) ? "SENSORS" :		\
+	((x == PERIPHERAL_WDSP) ? "WDSP" :			\
+	((x == PERIPHERAL_CDSP) ? "CDSP" : "UNKNOWN"))))))	\
+
+struct diagfwd_buf_t {
+	unsigned char *data;
+	unsigned char *data_raw;
+	uint32_t len;
+	uint32_t len_raw;
+	atomic_t in_busy;
+	int ctxt;
+};
+
+struct diag_channel_ops {
+	void (*open)(struct diagfwd_info *fwd_info);
+	void (*close)(struct diagfwd_info *fwd_info);
+	void (*read_done)(struct diagfwd_info *fwd_info,
+			  unsigned char *buf, int len);
+};
+
+struct diag_peripheral_ops {
+	void (*open)(void *ctxt);
+	void (*close)(void *ctxt);
+	int (*write)(void *ctxt, unsigned char *buf, int len);
+	int (*read)(void *ctxt, unsigned char *buf, int len);
+	void (*queue_read)(void *ctxt);
+};
+
+struct diagfwd_info {
+	uint8_t peripheral;
+	uint8_t type;
+	uint8_t transport;
+	uint8_t inited;
+	uint8_t ch_open;
+	atomic_t opened;
+	unsigned long read_bytes;
+	unsigned long write_bytes;
+	spinlock_t write_buf_lock;
+	struct mutex buf_mutex;
+	struct mutex data_mutex;
+	void *ctxt;
+	struct diagfwd_buf_t *buf_1;
+	struct diagfwd_buf_t *buf_2;
+	struct diagfwd_buf_t *buf_upd_1_a;
+	struct diagfwd_buf_t *buf_upd_1_b;
+	struct diagfwd_buf_t *buf_upd_2_a;
+	struct diagfwd_buf_t *buf_upd_2_b;
+	struct diagfwd_buf_t *buf_ptr[NUM_WRITE_BUFFERS];
+	int cpd_len_1;
+	int cpd_len_2;
+	int upd_len_1_a;
+	int upd_len_1_b;
+	int upd_len_2_a;
+	int upd_len_2_b;
+	struct diag_peripheral_ops *p_ops;
+	struct diag_channel_ops *c_ops;
+};
+
+extern struct diagfwd_info peripheral_info[NUM_TYPES][NUM_PERIPHERALS];
+
+int diagfwd_peripheral_init(void);
+void diagfwd_peripheral_exit(void);
+
+void diagfwd_close_transport(uint8_t transport, uint8_t peripheral);
+
+void diagfwd_open(uint8_t peripheral, uint8_t type);
+void diagfwd_early_open(uint8_t peripheral);
+
+void diagfwd_late_open(struct diagfwd_info *fwd_info);
+void diagfwd_close(uint8_t peripheral, uint8_t type);
+
+int diag_md_get_peripheral(int ctxt);
+
+int diagfwd_register(uint8_t transport, uint8_t peripheral, uint8_t type,
+		     void *ctxt, struct diag_peripheral_ops *ops,
+		     struct diagfwd_info **fwd_ctxt);
+int diagfwd_cntl_register(uint8_t transport, uint8_t peripheral, void *ctxt,
+			  struct diag_peripheral_ops *ops,
+			  struct diagfwd_info **fwd_ctxt);
+void diagfwd_deregister(uint8_t peripheral, uint8_t type, void *ctxt);
+
+int diagfwd_write(uint8_t peripheral, uint8_t type, void *buf, int len);
+void diagfwd_write_done(uint8_t peripheral, uint8_t type, int buf_num);
+void diagfwd_buffers_init(struct diagfwd_info *fwd_info);
+
+/*
+ * The following functions are called by the channels
+ */
+int diagfwd_channel_open(struct diagfwd_info *fwd_info);
+int diagfwd_channel_close(struct diagfwd_info *fwd_info);
+void diagfwd_channel_read(struct diagfwd_info *fwd_info);
+int diagfwd_channel_read_done(struct diagfwd_info *fwd_info,
+			      unsigned char *buf, uint32_t len);
+int diagfwd_write_buffer_done(struct diagfwd_info *fwd_info, const void *ptr);
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diagfwd_smd.c linux-4.4.115-fbx/drivers/char/diag/diagfwd_smd.c
--- linux-4.4.115-fbx/drivers/char/diag./diagfwd_smd.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diagfwd_smd.c	2019-01-22 16:16:22.963241516 +0100
@@ -0,0 +1,898 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include <linux/diagchar.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_smd.h"
+#include "diag_ipc_logging.h"
+
+struct diag_smd_info smd_data[NUM_PERIPHERALS] = {
+	{
+		.peripheral = PERIPHERAL_MODEM,
+		.type = TYPE_DATA,
+		.name = "MODEM_DATA"
+	},
+	{
+		.peripheral = PERIPHERAL_LPASS,
+		.type = TYPE_DATA,
+		.name = "LPASS_DATA"
+	},
+	{
+		.peripheral = PERIPHERAL_WCNSS,
+		.type = TYPE_DATA,
+		.name = "WCNSS_DATA"
+	},
+	{
+		.peripheral = PERIPHERAL_SENSORS,
+		.type = TYPE_DATA,
+		.name = "SENSORS_DATA"
+	},
+	{
+		.peripheral = PERIPHERAL_WDSP,
+		.type = TYPE_DATA,
+		.name = "DIAG_DATA"
+	},
+	{
+		.peripheral = PERIPHERAL_CDSP,
+		.type = TYPE_DATA,
+		.name = "CDSP_DATA"
+	}
+};
+
+struct diag_smd_info smd_cntl[NUM_PERIPHERALS] = {
+	{
+		.peripheral = PERIPHERAL_MODEM,
+		.type = TYPE_CNTL,
+		.name = "MODEM_CNTL"
+	},
+	{
+		.peripheral = PERIPHERAL_LPASS,
+		.type = TYPE_CNTL,
+		.name = "LPASS_CNTL"
+	},
+	{
+		.peripheral = PERIPHERAL_WCNSS,
+		.type = TYPE_CNTL,
+		.name = "WCNSS_CNTL"
+	},
+	{
+		.peripheral = PERIPHERAL_SENSORS,
+		.type = TYPE_CNTL,
+		.name = "SENSORS_CNTL"
+	},
+	{
+		.peripheral = PERIPHERAL_WDSP,
+		.type = TYPE_CNTL,
+		.name = "DIAG_CTRL"
+	},
+	{
+		.peripheral = PERIPHERAL_CDSP,
+		.type = TYPE_CNTL,
+		.name = "CDSP_CNTL"
+	}
+};
+
+struct diag_smd_info smd_dci[NUM_PERIPHERALS] = {
+	{
+		.peripheral = PERIPHERAL_MODEM,
+		.type = TYPE_DCI,
+		.name = "MODEM_DCI"
+	},
+	{
+		.peripheral = PERIPHERAL_LPASS,
+		.type = TYPE_DCI,
+		.name = "LPASS_DCI"
+	},
+	{
+		.peripheral = PERIPHERAL_WCNSS,
+		.type = TYPE_DCI,
+		.name = "WCNSS_DCI"
+	},
+	{
+		.peripheral = PERIPHERAL_SENSORS,
+		.type = TYPE_DCI,
+		.name = "SENSORS_DCI"
+	},
+	{
+		.peripheral = PERIPHERAL_WDSP,
+		.type = TYPE_DCI,
+		.name = "DIAG_DCI_DATA"
+	},
+	{
+		.peripheral = PERIPHERAL_CDSP,
+		.type = TYPE_DCI,
+		.name = "CDSP_DCI"
+	}
+};
+
+struct diag_smd_info smd_cmd[NUM_PERIPHERALS] = {
+	{
+		.peripheral = PERIPHERAL_MODEM,
+		.type = TYPE_CMD,
+		.name = "MODEM_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_LPASS,
+		.type = TYPE_CMD,
+		.name = "LPASS_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_WCNSS,
+		.type = TYPE_CMD,
+		.name = "WCNSS_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_SENSORS,
+		.type = TYPE_CMD,
+		.name = "SENSORS_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_WDSP,
+		.type = TYPE_CMD,
+		.name = "DIAG_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_CDSP,
+		.type = TYPE_CMD,
+		.name = "CDSP_CMD"
+	}
+};
+
+struct diag_smd_info smd_dci_cmd[NUM_PERIPHERALS] = {
+	{
+		.peripheral = PERIPHERAL_MODEM,
+		.type = TYPE_DCI_CMD,
+		.name = "MODEM_DCI_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_LPASS,
+		.type = TYPE_DCI_CMD,
+		.name = "LPASS_DCI_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_WCNSS,
+		.type = TYPE_DCI_CMD,
+		.name = "WCNSS_DCI_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_SENSORS,
+		.type = TYPE_DCI_CMD,
+		.name = "SENSORS_DCI_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_WDSP,
+		.type = TYPE_DCI_CMD,
+		.name = "DIAG_DCI_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_CDSP,
+		.type = TYPE_DCI_CMD,
+		.name = "CDSP_DCI_CMD"
+	}
+};
+
+static void diag_state_open_smd(void *ctxt);
+static void diag_state_close_smd(void *ctxt);
+static void smd_notify(void *ctxt, unsigned event);
+static int diag_smd_write(void *ctxt, unsigned char *buf, int len);
+static int diag_smd_read(void *ctxt, unsigned char *buf, int buf_len);
+static void diag_smd_queue_read(void *ctxt);
+
+static struct diag_peripheral_ops smd_ops = {
+	.open = diag_state_open_smd,
+	.close = diag_state_close_smd,
+	.write = diag_smd_write,
+	.read = diag_smd_read,
+	.queue_read = diag_smd_queue_read
+};
+
+static void diag_state_open_smd(void *ctxt)
+{
+	struct diag_smd_info *smd_info = NULL;
+
+	if (!ctxt)
+		return;
+
+	smd_info = (struct diag_smd_info *)(ctxt);
+	atomic_set(&smd_info->diag_state, 1);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		 "%s setting diag state to 1", smd_info->name);
+}
+
+static void diag_state_close_smd(void *ctxt)
+{
+	struct diag_smd_info *smd_info = NULL;
+
+	if (!ctxt)
+		return;
+
+	smd_info = (struct diag_smd_info *)(ctxt);
+	atomic_set(&smd_info->diag_state, 0);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		 "%s setting diag state to 0", smd_info->name);
+	wake_up_interruptible(&smd_info->read_wait_q);
+	flush_workqueue(smd_info->wq);
+}
+
+static int smd_channel_probe(struct platform_device *pdev, uint8_t type)
+{
+	int r = 0;
+	int index = -1;
+	const char *channel_name = NULL;
+	struct diag_smd_info *smd_info = NULL;
+
+	switch (pdev->id) {
+	case SMD_APPS_MODEM:
+		index = PERIPHERAL_MODEM;
+		break;
+	case SMD_APPS_QDSP:
+		index = PERIPHERAL_LPASS;
+		break;
+	case SMD_APPS_WCNSS:
+		index = PERIPHERAL_WCNSS;
+		break;
+	case SMD_APPS_DSPS:
+		index = PERIPHERAL_SENSORS;
+		break;
+	default:
+		pr_debug("diag: In %s Received probe for invalid index %d",
+			__func__, pdev->id);
+		return -EINVAL;
+	}
+
+	switch (type) {
+	case TYPE_DATA:
+		smd_info = &smd_data[index];
+		channel_name = "DIAG";
+		break;
+	case TYPE_CNTL:
+		smd_info = &smd_cntl[index];
+		channel_name = "DIAG_CNTL";
+		break;
+	case TYPE_CMD:
+		smd_info = &smd_cmd[index];
+		channel_name = "DIAG_CMD";
+		break;
+	case TYPE_DCI:
+		smd_info = &smd_dci[index];
+		channel_name = "DIAG_2";
+		break;
+	case TYPE_DCI_CMD:
+		smd_info = &smd_dci_cmd[index];
+		channel_name = "DIAG_2_CMD";
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (index == PERIPHERAL_WCNSS && type == TYPE_DATA)
+		channel_name = "APPS_RIVA_DATA";
+	else if (index == PERIPHERAL_WCNSS && type == TYPE_CNTL)
+		channel_name = "APPS_RIVA_CTRL";
+
+	if (!channel_name || !smd_info)
+		return -EIO;
+
+	r = smd_named_open_on_edge(channel_name, pdev->id, &smd_info->hdl,
+				   smd_info, smd_notify);
+
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+	pr_debug("diag: In %s, SMD port probed %s, id = %d, r = %d\n",
+		 __func__, smd_info->name, pdev->id, r);
+
+	return 0;
+}
+
+static int smd_data_probe(struct platform_device *pdev)
+{
+	return smd_channel_probe(pdev, TYPE_DATA);
+}
+
+static int smd_cntl_probe(struct platform_device *pdev)
+{
+	return smd_channel_probe(pdev, TYPE_CNTL);
+}
+
+static int smd_cmd_probe(struct platform_device *pdev)
+{
+	return smd_channel_probe(pdev, TYPE_CMD);
+}
+
+static int smd_dci_probe(struct platform_device *pdev)
+{
+	return smd_channel_probe(pdev, TYPE_DCI);
+}
+
+static int smd_dci_cmd_probe(struct platform_device *pdev)
+{
+	return smd_channel_probe(pdev, TYPE_DCI_CMD);
+}
+
+static int smd_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: suspending...\n");
+	return 0;
+}
+
+static int smd_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: resuming...\n");
+	return 0;
+}
+
+static const struct dev_pm_ops smd_dev_pm_ops = {
+	.runtime_suspend = smd_runtime_suspend,
+	.runtime_resume = smd_runtime_resume,
+};
+
+static struct platform_driver diag_smd_ch_driver = {
+	.probe = smd_data_probe,
+	.driver = {
+		.name = "DIAG",
+		.owner = THIS_MODULE,
+		.pm   = &smd_dev_pm_ops,
+	},
+};
+
+static struct platform_driver diag_smd_lite_driver = {
+	.probe = smd_data_probe,
+	.driver = {
+		.name = "APPS_RIVA_DATA",
+		.owner = THIS_MODULE,
+		.pm   = &smd_dev_pm_ops,
+	},
+};
+
+static struct platform_driver diag_smd_cntl_driver = {
+	.probe = smd_cntl_probe,
+	.driver = {
+		.name = "DIAG_CNTL",
+		.owner = THIS_MODULE,
+		.pm   = &smd_dev_pm_ops,
+	},
+};
+
+static struct platform_driver diag_smd_lite_cntl_driver = {
+	.probe = smd_cntl_probe,
+	.driver = {
+		.name = "APPS_RIVA_CTRL",
+		.owner = THIS_MODULE,
+		.pm   = &smd_dev_pm_ops,
+	},
+};
+
+static struct platform_driver diag_smd_lite_cmd_driver = {
+	.probe = smd_cmd_probe,
+	.driver = {
+		.name = "DIAG_CMD",
+		.owner = THIS_MODULE,
+		.pm   = &smd_dev_pm_ops,
+	}
+};
+
+static struct platform_driver diag_smd_dci_driver = {
+	.probe = smd_dci_probe,
+	.driver = {
+		.name = "DIAG_2",
+		.owner = THIS_MODULE,
+		.pm   = &smd_dev_pm_ops,
+	},
+};
+
+static struct platform_driver diag_smd_dci_cmd_driver = {
+	.probe = smd_dci_cmd_probe,
+	.driver = {
+		.name = "DIAG_2_CMD",
+		.owner = THIS_MODULE,
+		.pm   = &smd_dev_pm_ops,
+	},
+};
+
+static void smd_open_work_fn(struct work_struct *work)
+{
+	struct diag_smd_info *smd_info = container_of(work,
+						      struct diag_smd_info,
+						      open_work);
+	if (!smd_info->inited)
+		return;
+
+	diagfwd_channel_open(smd_info->fwd_ctxt);
+	diagfwd_late_open(smd_info->fwd_ctxt);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n",
+		 smd_info->name);
+}
+
+static void smd_close_work_fn(struct work_struct *work)
+{
+	struct diag_smd_info *smd_info = container_of(work,
+						      struct diag_smd_info,
+						      close_work);
+	if (!smd_info->inited)
+		return;
+
+	diagfwd_channel_close(smd_info->fwd_ctxt);
+	wake_up_interruptible(&smd_info->read_wait_q);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n",
+		 smd_info->name);
+}
+
+static void smd_read_work_fn(struct work_struct *work)
+{
+	struct diag_smd_info *smd_info = container_of(work,
+						      struct diag_smd_info,
+						      read_work);
+	if (!smd_info->inited) {
+		diag_ws_release();
+		return;
+	}
+
+	diagfwd_channel_read(smd_info->fwd_ctxt);
+}
+
+static void diag_smd_queue_read(void *ctxt)
+{
+	struct diag_smd_info *smd_info = NULL;
+
+	if (!ctxt)
+		return;
+
+	smd_info = (struct diag_smd_info *)ctxt;
+	if (smd_info->inited && atomic_read(&smd_info->opened) &&
+	    smd_info->hdl) {
+		wake_up_interruptible(&smd_info->read_wait_q);
+		queue_work(smd_info->wq, &(smd_info->read_work));
+	}
+}
+int diag_smd_check_state(void *ctxt)
+{
+	struct diag_smd_info *info = NULL;
+
+	if (!ctxt)
+		return 0;
+
+	info = (struct diag_smd_info *)ctxt;
+	return (int)(atomic_read(&info->diag_state));
+}
+void diag_smd_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt)
+{
+	struct diag_smd_info *smd_info = NULL;
+	void *prev = NULL;
+
+	if (!ctxt || !fwd_ctxt)
+		return;
+
+	smd_info = (struct diag_smd_info *)ctxt;
+	prev = smd_info->fwd_ctxt;
+	smd_info->fwd_ctxt = fwd_ctxt;
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s prev: %pK fwd_ctxt: %pK\n",
+		 smd_info->name, prev, smd_info->fwd_ctxt);
+}
+
+static void __diag_smd_init(struct diag_smd_info *smd_info)
+{
+	char wq_name[DIAG_SMD_NAME_SZ + 10];
+	if (!smd_info)
+		return;
+
+	init_waitqueue_head(&smd_info->read_wait_q);
+	mutex_init(&smd_info->lock);
+	strlcpy(wq_name, "DIAG_SMD_", 10);
+	strlcat(wq_name, smd_info->name, sizeof(smd_info->name));
+	smd_info->wq = create_singlethread_workqueue(wq_name);
+	if (!smd_info->wq) {
+		pr_err("diag: In %s, unable to create workqueue for smd channel %s\n",
+		       __func__, smd_info->name);
+		return;
+	}
+	INIT_WORK(&(smd_info->open_work), smd_open_work_fn);
+	INIT_WORK(&(smd_info->close_work), smd_close_work_fn);
+	INIT_WORK(&(smd_info->read_work), smd_read_work_fn);
+	smd_info->fifo_size = 0;
+	smd_info->hdl = NULL;
+	smd_info->fwd_ctxt = NULL;
+	atomic_set(&smd_info->opened, 0);
+	atomic_set(&smd_info->diag_state, 0);
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s initialized fwd_ctxt: %pK\n",
+		 smd_info->name, smd_info->fwd_ctxt);
+}
+
+int diag_smd_init(void)
+{
+	uint8_t peripheral;
+	struct diag_smd_info *smd_info = NULL;
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		smd_info = &smd_cntl[peripheral];
+		__diag_smd_init(smd_info);
+		diagfwd_cntl_register(TRANSPORT_SMD, smd_info->peripheral,
+				      (void *)smd_info, &smd_ops,
+				      &smd_info->fwd_ctxt);
+		smd_info->inited = 1;
+		__diag_smd_init(&smd_data[peripheral]);
+		__diag_smd_init(&smd_cmd[peripheral]);
+		__diag_smd_init(&smd_dci[peripheral]);
+		__diag_smd_init(&smd_dci_cmd[peripheral]);
+	}
+
+	platform_driver_register(&diag_smd_cntl_driver);
+	platform_driver_register(&diag_smd_lite_cntl_driver);
+	platform_driver_register(&diag_smd_ch_driver);
+	platform_driver_register(&diag_smd_lite_driver);
+	platform_driver_register(&diag_smd_lite_cmd_driver);
+	platform_driver_register(&diag_smd_dci_driver);
+	platform_driver_register(&diag_smd_dci_cmd_driver);
+
+	return 0;
+}
+
+static void smd_late_init(struct diag_smd_info *smd_info)
+{
+	struct diagfwd_info *fwd_info = NULL;
+	if (!smd_info)
+		return;
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s entering\n",
+		 smd_info->name);
+
+	diagfwd_register(TRANSPORT_SMD, smd_info->peripheral, smd_info->type,
+			 (void *)smd_info, &smd_ops, &smd_info->fwd_ctxt);
+	fwd_info = smd_info->fwd_ctxt;
+	smd_info->inited = 1;
+	/*
+	 * The channel is already open by the probe call as a result of other
+	 * peripheral. Inform the diag fwd layer that the channel is open.
+	 */
+	if (atomic_read(&smd_info->opened))
+		diagfwd_channel_open(smd_info->fwd_ctxt);
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n",
+		 smd_info->name);
+}
+
+int diag_smd_init_peripheral(uint8_t peripheral)
+{
+	if (peripheral >= NUM_PERIPHERALS) {
+		pr_err("diag: In %s, invalid peripheral %d\n",
+		       __func__, peripheral);
+		return -EINVAL;
+	}
+
+	smd_late_init(&smd_data[peripheral]);
+	smd_late_init(&smd_dci[peripheral]);
+	smd_late_init(&smd_cmd[peripheral]);
+	smd_late_init(&smd_dci_cmd[peripheral]);
+
+	return 0;
+}
+
+static void __diag_smd_exit(struct diag_smd_info *smd_info)
+{
+	if (!smd_info)
+		return;
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s entering\n",
+		 smd_info->name);
+
+	diagfwd_deregister(smd_info->peripheral, smd_info->type,
+			   (void *)smd_info);
+	smd_info->fwd_ctxt = NULL;
+	smd_info->hdl = NULL;
+	if (smd_info->wq)
+		destroy_workqueue(smd_info->wq);
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n",
+		 smd_info->name);
+}
+
+void diag_smd_early_exit(void)
+{
+	int i = 0;
+
+	for (i = 0; i < NUM_PERIPHERALS; i++)
+		__diag_smd_exit(&smd_cntl[i]);
+
+	platform_driver_unregister(&diag_smd_cntl_driver);
+	platform_driver_unregister(&diag_smd_lite_cntl_driver);
+}
+
+void diag_smd_exit(void)
+{
+	int i = 0;
+
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		__diag_smd_exit(&smd_data[i]);
+		__diag_smd_exit(&smd_cmd[i]);
+		__diag_smd_exit(&smd_dci[i]);
+		__diag_smd_exit(&smd_dci_cmd[i]);
+	}
+
+	platform_driver_unregister(&diag_smd_ch_driver);
+	platform_driver_unregister(&diag_smd_lite_driver);
+	platform_driver_unregister(&diag_smd_lite_cmd_driver);
+	platform_driver_unregister(&diag_smd_dci_driver);
+	platform_driver_unregister(&diag_smd_dci_cmd_driver);
+}
+
+static int diag_smd_write_ext(struct diag_smd_info *smd_info,
+			      unsigned char *buf, int len)
+{
+	int err = 0;
+	int offset = 0;
+	int write_len = 0;
+	int retry_count = 0;
+	int max_retries = 3;
+	uint8_t avail = 0;
+
+	if (!smd_info || !buf || len <= 0) {
+		pr_err_ratelimited("diag: In %s, invalid params, smd_info: %pK, buf: %pK, len: %d\n",
+				   __func__, smd_info, buf, len);
+		return -EINVAL;
+	}
+
+	if (!smd_info->inited || !smd_info->hdl ||
+	    !atomic_read(&smd_info->opened))
+		return -ENODEV;
+
+	mutex_lock(&smd_info->lock);
+	err = smd_write_start(smd_info->hdl, len);
+	if (err) {
+		pr_err_ratelimited("diag: In %s, error calling smd_write_start, peripheral: %d, err: %d\n",
+				   __func__, smd_info->peripheral, err);
+		goto fail;
+	}
+
+	while (offset < len) {
+		retry_count = 0;
+		do {
+			if (smd_write_segment_avail(smd_info->hdl)) {
+				avail = 1;
+				break;
+			}
+			/*
+			 * The channel maybe busy - the FIFO can be full. Retry
+			 * after sometime. The value of 10000 was chosen
+			 * emprically as the optimal value for the peripherals
+			 * to read data from the SMD channel.
+			 */
+			usleep_range(10000, 10100);
+			retry_count++;
+		} while (retry_count < max_retries);
+
+		if (!avail) {
+			err = -EAGAIN;
+			goto fail;
+		}
+
+		write_len = smd_write_segment(smd_info->hdl, buf + offset,
+					      (len - offset));
+		offset += write_len;
+		write_len = 0;
+	}
+
+	err = smd_write_end(smd_info->hdl);
+	if (err) {
+		pr_err_ratelimited("diag: In %s, error calling smd_write_end, peripheral: %d, err: %d\n",
+				   __func__, smd_info->peripheral, err);
+		goto fail;
+	}
+
+fail:
+	mutex_unlock(&smd_info->lock);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		 "%s wrote to channel, write_len: %d, err: %d\n",
+		 smd_info->name, offset, err);
+	return err;
+}
+
+static int diag_smd_write(void *ctxt, unsigned char *buf, int len)
+{
+	int write_len = 0;
+	int retry_count = 0;
+	int max_retries = 3;
+	struct diag_smd_info *smd_info = NULL;
+
+	if (!ctxt || !buf)
+		return -EIO;
+
+	smd_info = (struct diag_smd_info *)ctxt;
+	if (!smd_info || !buf || len <= 0) {
+		pr_err_ratelimited("diag: In %s, invalid params, smd_info: %pK, buf: %pK, len: %d\n",
+				   __func__, smd_info, buf, len);
+		return -EINVAL;
+	}
+
+	if (!smd_info->inited || !smd_info->hdl ||
+	    !atomic_read(&smd_info->opened))
+		return -ENODEV;
+
+	if (len > smd_info->fifo_size)
+		return diag_smd_write_ext(smd_info, buf, len);
+
+	do {
+		mutex_lock(&smd_info->lock);
+		write_len = smd_write(smd_info->hdl, buf, len);
+		mutex_unlock(&smd_info->lock);
+		if (write_len == len)
+			break;
+		/*
+		 * The channel maybe busy - the FIFO can be full. Retry after
+		 * sometime. The value of 10000 was chosen emprically as the
+		 * optimal value for the peripherals to read data from the SMD
+		 * channel.
+		 */
+		usleep_range(10000, 10100);
+		retry_count++;
+	} while (retry_count < max_retries);
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s wrote to channel, write_len: %d\n",
+		 smd_info->name, write_len);
+
+	if (write_len != len)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int diag_smd_read(void *ctxt, unsigned char *buf, int buf_len)
+{
+	int pkt_len = 0;
+	int err = 0;
+	int total_recd_partial = 0;
+	int total_recd = 0;
+	uint8_t buf_full = 0;
+	unsigned char *temp_buf = NULL;
+	uint32_t read_len = 0;
+	struct diag_smd_info *smd_info = NULL;
+
+	if (!ctxt || !buf || buf_len <= 0)
+		return -EIO;
+
+	smd_info = (struct diag_smd_info *)ctxt;
+	if (!smd_info->hdl || !smd_info->inited ||
+	    !atomic_read(&smd_info->opened))
+		return -EIO;
+
+	/*
+	 * Always try to read the data if notification is received from smd
+	 * In case if packet size is 0 release the wake source hold earlier
+	 */
+	err = wait_event_interruptible(smd_info->read_wait_q,
+				       (smd_info->hdl != NULL) &&
+				       (atomic_read(&smd_info->opened) == 1));
+	if (err) {
+		diagfwd_channel_read_done(smd_info->fwd_ctxt, buf, 0);
+		return -ERESTARTSYS;
+	}
+
+	/*
+	 * Reset the buffers. Also release the wake source hold earlier.
+	 */
+	if (atomic_read(&smd_info->diag_state) == 0) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			 "%s closing read thread. diag state is closed\n",
+			 smd_info->name);
+		diagfwd_channel_read_done(smd_info->fwd_ctxt, buf, 0);
+		return 0;
+	}
+
+	if (!smd_info->hdl || !atomic_read(&smd_info->opened)) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			 "%s stopping read, hdl: %pK, opened: %d\n",
+			 smd_info->name, smd_info->hdl,
+			 atomic_read(&smd_info->opened));
+		goto fail_return;
+	}
+
+	do {
+		total_recd_partial = 0;
+		temp_buf = buf + total_recd;
+		pkt_len = smd_cur_packet_size(smd_info->hdl);
+		if (pkt_len <= 0)
+			break;
+
+		if (total_recd + pkt_len > buf_len) {
+			buf_full = 1;
+			break;
+		}
+
+		while (total_recd_partial < pkt_len) {
+			read_len = smd_read_avail(smd_info->hdl);
+			if (!read_len) {
+				wait_event_interruptible(smd_info->read_wait_q,
+					   ((atomic_read(&smd_info->opened)) &&
+					    smd_read_avail(smd_info->hdl)));
+
+				if (!smd_info->hdl ||
+				    !atomic_read(&smd_info->opened)) {
+					DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+						"%s exiting from wait",
+						smd_info->name);
+					goto fail_return;
+				}
+			}
+
+			if (pkt_len < read_len)
+				goto fail_return;
+
+			smd_read(smd_info->hdl, temp_buf, read_len);
+			total_recd_partial += read_len;
+			total_recd += read_len;
+			temp_buf += read_len;
+		}
+	} while (pkt_len > 0);
+
+	if ((smd_info->type == TYPE_DATA && pkt_len) || buf_full)
+		err = queue_work(smd_info->wq, &(smd_info->read_work));
+
+	if (total_recd > 0) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s read total bytes: %d\n",
+			 smd_info->name, total_recd);
+		diagfwd_channel_read_done(smd_info->fwd_ctxt, buf, total_recd);
+	} else {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s error in read, err: %d\n",
+			 smd_info->name, total_recd);
+		goto fail_return;
+	}
+	return 0;
+
+fail_return:
+	diagfwd_channel_read_done(smd_info->fwd_ctxt, buf, 0);
+	return -EINVAL;
+}
+
+static void smd_notify(void *ctxt, unsigned event)
+{
+	struct diag_smd_info *smd_info = NULL;
+
+	smd_info = (struct diag_smd_info *)ctxt;
+	if (!smd_info)
+		return;
+
+	switch (event) {
+	case SMD_EVENT_OPEN:
+		atomic_set(&smd_info->opened, 1);
+		smd_info->fifo_size = smd_write_avail(smd_info->hdl);
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s channel opened\n",
+			 smd_info->name);
+		queue_work(smd_info->wq, &(smd_info->open_work));
+		break;
+	case SMD_EVENT_CLOSE:
+		atomic_set(&smd_info->opened, 0);
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s channel closed\n",
+			 smd_info->name);
+		queue_work(smd_info->wq, &(smd_info->close_work));
+		break;
+	case SMD_EVENT_DATA:
+		diag_ws_on_notify();
+		queue_work(smd_info->wq, &(smd_info->read_work));
+		break;
+	}
+
+	wake_up_interruptible(&smd_info->read_wait_q);
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diagfwd_smd.h linux-4.4.115-fbx/drivers/char/diag/diagfwd_smd.h
--- linux-4.4.115-fbx/drivers/char/diag./diagfwd_smd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diagfwd_smd.h	2019-01-22 16:16:22.963241516 +0100
@@ -0,0 +1,50 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_SMD_H
+#define DIAGFWD_SMD_H
+
+#define DIAG_SMD_NAME_SZ	24
+#define SMD_DRAIN_BUF_SIZE	4096
+
+struct diag_smd_info {
+	uint8_t peripheral;
+	uint8_t type;
+	uint8_t inited;
+	atomic_t opened;
+	atomic_t diag_state;
+	uint32_t fifo_size;
+	smd_channel_t *hdl;
+	char name[DIAG_SMD_NAME_SZ];
+	struct mutex lock;
+	wait_queue_head_t read_wait_q;
+	struct workqueue_struct *wq;
+	struct work_struct open_work;
+	struct work_struct close_work;
+	struct work_struct read_work;
+	struct diagfwd_info *fwd_ctxt;
+};
+
+extern struct diag_smd_info smd_data[NUM_PERIPHERALS];
+extern struct diag_smd_info smd_cntl[NUM_PERIPHERALS];
+extern struct diag_smd_info smd_dci[NUM_PERIPHERALS];
+extern struct diag_smd_info smd_cmd[NUM_PERIPHERALS];
+extern struct diag_smd_info smd_dci_cmd[NUM_PERIPHERALS];
+
+int diag_smd_init_peripheral(uint8_t peripheral);
+void diag_smd_exit(void);
+int diag_smd_init(void);
+void diag_smd_early_exit(void);
+void diag_smd_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt);
+int diag_smd_check_state(void *ctxt);
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diagfwd_socket.c linux-4.4.115-fbx/drivers/char/diag/diagfwd_socket.c
--- linux-4.4.115-fbx/drivers/char/diag./diagfwd_socket.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diagfwd_socket.c	2019-01-22 16:16:22.963241516 +0100
@@ -0,0 +1,1238 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/msm_ipc.h>
+#include <linux/socket.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
+#include <linux/diagchar.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+#include <asm/current.h>
+#include <net/sock.h>
+#include <linux/ipc_router.h>
+#include <linux/notifier.h>
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_socket.h"
+#include "diag_ipc_logging.h"
+
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/subsystem_restart.h>
+
+#define DIAG_SVC_ID		0x1001
+
+#define MODEM_INST_BASE		0
+#define LPASS_INST_BASE		64
+#define WCNSS_INST_BASE		128
+#define SENSORS_INST_BASE	192
+#define CDSP_INST_BASE	256
+#define WDSP_INST_BASE  320
+
+#define INST_ID_CNTL		0
+#define INST_ID_CMD		1
+#define INST_ID_DATA		2
+#define INST_ID_DCI_CMD		3
+#define INST_ID_DCI		4
+
+struct diag_cntl_socket_info *cntl_socket;
+static uint64_t bootup_req[NUM_SOCKET_SUBSYSTEMS];
+
+struct diag_socket_info socket_data[NUM_PERIPHERALS] = {
+	{
+		.peripheral = PERIPHERAL_MODEM,
+		.type = TYPE_DATA,
+		.name = "MODEM_DATA"
+	},
+	{
+		.peripheral = PERIPHERAL_LPASS,
+		.type = TYPE_DATA,
+		.name = "LPASS_DATA"
+	},
+	{
+		.peripheral = PERIPHERAL_WCNSS,
+		.type = TYPE_DATA,
+		.name = "WCNSS_DATA"
+	},
+	{
+		.peripheral = PERIPHERAL_SENSORS,
+		.type = TYPE_DATA,
+		.name = "SENSORS_DATA"
+	},
+	{
+		.peripheral = PERIPHERAL_WDSP,
+		.type = TYPE_DATA,
+		.name = "DIAG_DATA"
+	},
+	{
+		.peripheral = PERIPHERAL_CDSP,
+		.type = TYPE_DATA,
+		.name = "CDSP_DATA"
+	}
+};
+
+struct diag_socket_info socket_cntl[NUM_PERIPHERALS] = {
+	{
+		.peripheral = PERIPHERAL_MODEM,
+		.type = TYPE_CNTL,
+		.name = "MODEM_CNTL"
+	},
+	{
+		.peripheral = PERIPHERAL_LPASS,
+		.type = TYPE_CNTL,
+		.name = "LPASS_CNTL"
+	},
+	{
+		.peripheral = PERIPHERAL_WCNSS,
+		.type = TYPE_CNTL,
+		.name = "WCNSS_CNTL"
+	},
+	{
+		.peripheral = PERIPHERAL_SENSORS,
+		.type = TYPE_CNTL,
+		.name = "SENSORS_CNTL"
+	},
+	{
+		.peripheral = PERIPHERAL_WDSP,
+		.type = TYPE_CNTL,
+		.name = "DIAG_CTRL"
+	},
+	{
+		.peripheral = PERIPHERAL_CDSP,
+		.type = TYPE_CNTL,
+		.name = "CDSP_CNTL"
+	}
+};
+
+struct diag_socket_info socket_dci[NUM_PERIPHERALS] = {
+	{
+		.peripheral = PERIPHERAL_MODEM,
+		.type = TYPE_DCI,
+		.name = "MODEM_DCI"
+	},
+	{
+		.peripheral = PERIPHERAL_LPASS,
+		.type = TYPE_DCI,
+		.name = "LPASS_DCI"
+	},
+	{
+		.peripheral = PERIPHERAL_WCNSS,
+		.type = TYPE_DCI,
+		.name = "WCNSS_DCI"
+	},
+	{
+		.peripheral = PERIPHERAL_SENSORS,
+		.type = TYPE_DCI,
+		.name = "SENSORS_DCI"
+	},
+	{
+		.peripheral = PERIPHERAL_WDSP,
+		.type = TYPE_DCI,
+		.name = "DIAG_DCI_DATA"
+	},
+	{
+		.peripheral = PERIPHERAL_CDSP,
+		.type = TYPE_DCI,
+		.name = "CDSP_DCI"
+	}
+};
+
+struct diag_socket_info socket_cmd[NUM_PERIPHERALS] = {
+	{
+		.peripheral = PERIPHERAL_MODEM,
+		.type = TYPE_CMD,
+		.name = "MODEM_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_LPASS,
+		.type = TYPE_CMD,
+		.name = "LPASS_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_WCNSS,
+		.type = TYPE_CMD,
+		.name = "WCNSS_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_SENSORS,
+		.type = TYPE_CMD,
+		.name = "SENSORS_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_WDSP,
+		.type = TYPE_CMD,
+		.name = "DIAG_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_CDSP,
+		.type = TYPE_CMD,
+		.name = "CDSP_CMD"
+	}
+
+};
+
+struct diag_socket_info socket_dci_cmd[NUM_PERIPHERALS] = {
+	{
+		.peripheral = PERIPHERAL_MODEM,
+		.type = TYPE_DCI_CMD,
+		.name = "MODEM_DCI_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_LPASS,
+		.type = TYPE_DCI_CMD,
+		.name = "LPASS_DCI_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_WCNSS,
+		.type = TYPE_DCI_CMD,
+		.name = "WCNSS_DCI_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_SENSORS,
+		.type = TYPE_DCI_CMD,
+		.name = "SENSORS_DCI_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_WDSP,
+		.type = TYPE_DCI_CMD,
+		.name = "DIAG_DCI_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_CDSP,
+		.type = TYPE_DCI_CMD,
+		.name = "CDSP_DCI_CMD"
+	},
+};
+
+static void diag_state_open_socket(void *ctxt);
+static void diag_state_close_socket(void *ctxt);
+static int diag_socket_write(void *ctxt, unsigned char *buf, int len);
+static int diag_socket_read(void *ctxt, unsigned char *buf, int buf_len);
+static void diag_socket_queue_read(void *ctxt);
+static void socket_init_work_fn(struct work_struct *work);
+static int socket_ready_notify(struct notifier_block *nb,
+			       unsigned long action, void *data);
+
+static struct diag_peripheral_ops socket_ops = {
+	.open = diag_state_open_socket,
+	.close = diag_state_close_socket,
+	.write = diag_socket_write,
+	.read = diag_socket_read,
+	.queue_read = diag_socket_queue_read
+};
+
+static struct notifier_block socket_notify = {
+	.notifier_call = socket_ready_notify,
+};
+
+static void diag_state_open_socket(void *ctxt)
+{
+	struct diag_socket_info *info = NULL;
+
+	if (!ctxt)
+		return;
+
+	info = (struct diag_socket_info *)(ctxt);
+	atomic_set(&info->diag_state, 1);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		 "%s setting diag state to 1", info->name);
+}
+
+static void diag_state_close_socket(void *ctxt)
+{
+	struct diag_socket_info *info = NULL;
+
+	if (!ctxt)
+		return;
+
+	info = (struct diag_socket_info *)(ctxt);
+	atomic_set(&info->diag_state, 0);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		 "%s setting diag state to 0", info->name);
+	wake_up_interruptible(&info->read_wait_q);
+	flush_workqueue(info->wq);
+}
+
+static void socket_data_ready(struct sock *sk_ptr)
+{
+	unsigned long flags;
+	struct diag_socket_info *info = NULL;
+
+	if (!sk_ptr) {
+		pr_err_ratelimited("diag: In %s, invalid sk_ptr", __func__);
+		return;
+	}
+
+	info = (struct diag_socket_info *)(sk_ptr->sk_user_data);
+	if (!info) {
+		pr_err_ratelimited("diag: In %s, invalid info\n", __func__);
+		return;
+	}
+
+	spin_lock_irqsave(&info->lock, flags);
+	info->data_ready++;
+	spin_unlock_irqrestore(&info->lock, flags);
+	diag_ws_on_notify();
+
+	queue_work(info->wq, &(info->read_work));
+	wake_up_interruptible(&info->read_wait_q);
+	return;
+}
+
+static void cntl_socket_data_ready(struct sock *sk_ptr)
+{
+	if (!sk_ptr || !cntl_socket) {
+		pr_err_ratelimited("diag: In %s, invalid ptrs. sk_ptr: %pK cntl_socket: %pK\n",
+				   __func__, sk_ptr, cntl_socket);
+		return;
+	}
+
+	atomic_inc(&cntl_socket->data_ready);
+	wake_up_interruptible(&cntl_socket->read_wait_q);
+	queue_work(cntl_socket->wq, &(cntl_socket->read_work));
+}
+
+static void socket_flow_cntl(struct sock *sk_ptr)
+{
+	struct diag_socket_info *info = NULL;
+
+	if (!sk_ptr)
+		return;
+
+	info = (struct diag_socket_info *)(sk_ptr->sk_user_data);
+	if (!info) {
+		pr_err_ratelimited("diag: In %s, invalid info\n", __func__);
+		return;
+	}
+
+	atomic_inc(&info->flow_cnt);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s flow controlled\n", info->name);
+	pr_debug("diag: In %s, channel %s flow controlled\n",
+		 __func__, info->name);
+}
+
+static int lookup_server(struct diag_socket_info *info)
+{
+	int ret = 0;
+	struct server_lookup_args *args = NULL;
+	struct sockaddr_msm_ipc *srv_addr = NULL;
+
+	if (!info)
+		return -EINVAL;
+
+	args = kzalloc((sizeof(struct server_lookup_args) +
+			sizeof(struct msm_ipc_server_info)), GFP_KERNEL);
+	if (!args)
+		return -ENOMEM;
+	kmemleak_not_leak(args);
+
+	args->lookup_mask = 0xFFFFFFFF;
+	args->port_name.service = info->svc_id;
+	args->port_name.instance = info->ins_id;
+	args->num_entries_in_array = 1;
+	args->num_entries_found = 0;
+
+	ret = kernel_sock_ioctl(info->hdl, IPC_ROUTER_IOCTL_LOOKUP_SERVER,
+				(unsigned long)args);
+	if (ret < 0) {
+		pr_err("diag: In %s, cannot find service for %s\n", __func__,
+		       info->name);
+		kfree(args);
+		return -EFAULT;
+	}
+
+	srv_addr = &info->remote_addr;
+	srv_addr->family = AF_MSM_IPC;
+	srv_addr->address.addrtype = MSM_IPC_ADDR_ID;
+	srv_addr->address.addr.port_addr.node_id = args->srv_info[0].node_id;
+	srv_addr->address.addr.port_addr.port_id = args->srv_info[0].port_id;
+	ret = args->num_entries_found;
+	kfree(args);
+	if (ret < 1)
+		return -EIO;
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s found server node: %d port: %d",
+		 info->name, srv_addr->address.addr.port_addr.node_id,
+		 srv_addr->address.addr.port_addr.port_id);
+	return 0;
+}
+
+static void __socket_open_channel(struct diag_socket_info *info)
+{
+	if (!info)
+		return;
+
+	if (!info->inited) {
+		pr_debug("diag: In %s, socket %s is not initialized\n",
+			 __func__, info->name);
+		return;
+	}
+
+	if (atomic_read(&info->opened)) {
+		pr_debug("diag: In %s, socket %s already opened\n",
+			 __func__, info->name);
+		return;
+	}
+
+	atomic_set(&info->opened, 1);
+	diagfwd_channel_open(info->fwd_ctxt);
+}
+
+static void socket_open_client(struct diag_socket_info *info)
+{
+	int ret = 0;
+
+	if (!info || info->port_type != PORT_TYPE_CLIENT)
+		return;
+
+	ret = sock_create(AF_MSM_IPC, SOCK_DGRAM, 0, &info->hdl);
+	if (ret < 0 || !info->hdl) {
+		pr_err("diag: In %s, socket not initialized for %s\n", __func__,
+		       info->name);
+		return;
+	}
+
+	write_lock_bh(&info->hdl->sk->sk_callback_lock);
+	info->hdl->sk->sk_user_data = (void *)(info);
+	info->hdl->sk->sk_data_ready = socket_data_ready;
+	info->hdl->sk->sk_write_space = socket_flow_cntl;
+	write_unlock_bh(&info->hdl->sk->sk_callback_lock);
+	ret = lookup_server(info);
+	if (ret) {
+		pr_err("diag: In %s, failed to lookup server, ret: %d\n",
+		       __func__, ret);
+		return;
+	}
+	__socket_open_channel(info);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s opened client\n", info->name);
+}
+
+static void socket_open_server(struct diag_socket_info *info)
+{
+	int ret = 0;
+	struct sockaddr_msm_ipc srv_addr = { 0 };
+
+	if (!info)
+		return;
+
+	ret = sock_create(AF_MSM_IPC, SOCK_DGRAM, 0, &info->hdl);
+	if (ret < 0 || !info->hdl) {
+		pr_err("diag: In %s, socket not initialized for %s\n", __func__,
+		       info->name);
+		return;
+	}
+
+	write_lock_bh(&info->hdl->sk->sk_callback_lock);
+	info->hdl->sk->sk_user_data = (void *)(info);
+	info->hdl->sk->sk_data_ready = socket_data_ready;
+	info->hdl->sk->sk_write_space = socket_flow_cntl;
+	write_unlock_bh(&info->hdl->sk->sk_callback_lock);
+
+	srv_addr.family = AF_MSM_IPC;
+	srv_addr.address.addrtype = MSM_IPC_ADDR_NAME;
+	srv_addr.address.addr.port_name.service = info->svc_id;
+	srv_addr.address.addr.port_name.instance = info->ins_id;
+
+	ret = kernel_bind(info->hdl, (struct sockaddr *)&srv_addr,
+			  sizeof(srv_addr));
+	if (ret) {
+		pr_err("diag: In %s, failed to bind, ch: %s, svc_id: %d ins_id: %d, err: %d\n",
+		       __func__, info->name, info->svc_id, info->ins_id, ret);
+		return;
+	}
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s opened server svc: %d ins: %d",
+		 info->name, info->svc_id, info->ins_id);
+}
+
+static void socket_init_work_fn(struct work_struct *work)
+{
+	struct diag_socket_info *info = container_of(work,
+						     struct diag_socket_info,
+						     init_work);
+	if (!info)
+		return;
+
+	if (!info->inited) {
+		pr_debug("diag: In %s, socket %s is not initialized\n",
+			 __func__, info->name);
+		return;
+	}
+
+	switch (info->port_type) {
+	case PORT_TYPE_SERVER:
+		socket_open_server(info);
+		break;
+	case PORT_TYPE_CLIENT:
+		socket_open_client(info);
+		break;
+	default:
+		pr_err("diag: In %s, unknown type %d\n", __func__,
+		       info->port_type);
+		break;
+	}
+}
+
+static void __socket_close_channel(struct diag_socket_info *info)
+{
+	if (!info || !info->hdl)
+		return;
+
+	if (!atomic_read(&info->opened))
+		return;
+
+	if (bootup_req[info->peripheral] == PEPIPHERAL_SSR_UP) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: %s is up, stopping cleanup: bootup_req = %d\n",
+		info->name, (int)bootup_req[info->peripheral]);
+		return;
+	}
+
+	memset(&info->remote_addr, 0, sizeof(struct sockaddr_msm_ipc));
+	diagfwd_channel_close(info->fwd_ctxt);
+
+	atomic_set(&info->opened, 0);
+
+	/* Don't close the server. Server should always remain open */
+	if (info->port_type != PORT_TYPE_SERVER) {
+		write_lock_bh(&info->hdl->sk->sk_callback_lock);
+		info->hdl->sk->sk_user_data = NULL;
+		info->hdl->sk->sk_data_ready = NULL;
+		write_unlock_bh(&info->hdl->sk->sk_callback_lock);
+		sock_release(info->hdl);
+		info->hdl = NULL;
+		wake_up_interruptible(&info->read_wait_q);
+	}
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n", info->name);
+
+	return;
+}
+
+static void socket_close_channel(struct diag_socket_info *info)
+{
+	if (!info)
+		return;
+
+	__socket_close_channel(info);
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n", info->name);
+}
+
+static int cntl_socket_process_msg_server(uint32_t cmd, uint32_t svc_id,
+					  uint32_t ins_id)
+{
+	uint8_t peripheral;
+	uint8_t found = 0;
+	struct diag_socket_info *info = NULL;
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		info = &socket_cmd[peripheral];
+		if ((svc_id == info->svc_id) &&
+		    (ins_id == info->ins_id)) {
+			found = 1;
+			break;
+		}
+
+		info = &socket_dci_cmd[peripheral];
+		if ((svc_id == info->svc_id) &&
+		    (ins_id == info->ins_id)) {
+			found = 1;
+			break;
+		}
+	}
+
+	if (!found)
+		return -EIO;
+
+	switch (cmd) {
+	case CNTL_CMD_NEW_SERVER:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s received new server\n",
+			 info->name);
+		diagfwd_register(TRANSPORT_SOCKET, info->peripheral,
+				 info->type, (void *)info, &socket_ops,
+				 &info->fwd_ctxt);
+		queue_work(info->wq, &(info->init_work));
+		break;
+	case CNTL_CMD_REMOVE_SERVER:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s received remove server\n",
+			 info->name);
+		socket_close_channel(info);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int cntl_socket_process_msg_client(uint32_t cmd, uint32_t node_id,
+					  uint32_t port_id)
+{
+	uint8_t peripheral;
+	uint8_t found = 0;
+	struct diag_socket_info *info = NULL;
+	struct msm_ipc_port_addr remote_port = {0};
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		info = &socket_data[peripheral];
+		remote_port = info->remote_addr.address.addr.port_addr;
+		if ((remote_port.node_id == node_id) &&
+		    (remote_port.port_id == port_id)) {
+			found = 1;
+			break;
+		}
+
+		info = &socket_cntl[peripheral];
+		remote_port = info->remote_addr.address.addr.port_addr;
+		if ((remote_port.node_id == node_id) &&
+		    (remote_port.port_id == port_id)) {
+			found = 1;
+			break;
+		}
+
+		info = &socket_dci[peripheral];
+		remote_port = info->remote_addr.address.addr.port_addr;
+		if ((remote_port.node_id == node_id) &&
+		    (remote_port.port_id == port_id)) {
+			found = 1;
+			break;
+		}
+	}
+
+	if (!found)
+		return -EIO;
+
+	switch (cmd) {
+	case CNTL_CMD_REMOVE_CLIENT:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s received remove client\n",
+			 info->name);
+		mutex_lock(&driver->diag_notifier_mutex);
+		socket_close_channel(info);
+		mutex_unlock(&driver->diag_notifier_mutex);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int restart_notifier_cb(struct notifier_block *this,
+				  unsigned long code,
+				  void *data);
+
+struct restart_notifier_block {
+	unsigned processor;
+	char *name;
+	struct notifier_block nb;
+};
+
+static struct restart_notifier_block restart_notifiers[] = {
+	{SOCKET_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
+	{SOCKET_ADSP, "adsp", .nb.notifier_call = restart_notifier_cb},
+	{SOCKET_WCNSS, "wcnss", .nb.notifier_call = restart_notifier_cb},
+	{SOCKET_SLPI, "slpi", .nb.notifier_call = restart_notifier_cb},
+	{SOCKET_CDSP, "cdsp", .nb.notifier_call = restart_notifier_cb},
+};
+
+
+static void cntl_socket_read_work_fn(struct work_struct *work)
+{
+	union cntl_port_msg msg;
+	int ret = 0;
+	struct kvec iov = { 0 };
+	struct msghdr read_msg = { 0 };
+
+	if (!cntl_socket)
+		return;
+
+	ret = wait_event_interruptible(cntl_socket->read_wait_q,
+				(atomic_read(&cntl_socket->data_ready) > 0));
+	if (ret)
+		return;
+
+	do {
+		iov.iov_base = &msg;
+		iov.iov_len = sizeof(msg);
+		read_msg.msg_name = NULL;
+		read_msg.msg_namelen = 0;
+		ret = kernel_recvmsg(cntl_socket->hdl, &read_msg, &iov, 1,
+				     sizeof(msg), MSG_DONTWAIT);
+		if (ret < 0) {
+			pr_debug("diag: In %s, Error recving data %d\n",
+				 __func__, ret);
+			break;
+		}
+
+		atomic_dec(&cntl_socket->data_ready);
+
+		switch (msg.srv.cmd) {
+		case CNTL_CMD_NEW_SERVER:
+		case CNTL_CMD_REMOVE_SERVER:
+			cntl_socket_process_msg_server(msg.srv.cmd,
+						       msg.srv.service,
+						       msg.srv.instance);
+			break;
+		case CNTL_CMD_REMOVE_CLIENT:
+			cntl_socket_process_msg_client(msg.cli.cmd,
+						       msg.cli.node_id,
+						       msg.cli.port_id);
+			break;
+		}
+	} while (atomic_read(&cntl_socket->data_ready) > 0);
+}
+
+static void socket_read_work_fn(struct work_struct *work)
+{
+	struct diag_socket_info *info = container_of(work,
+						     struct diag_socket_info,
+						     read_work);
+
+	if (!info)
+		return;
+
+	if (!atomic_read(&info->opened) && info->port_type == PORT_TYPE_SERVER)
+		diagfwd_buffers_init(info->fwd_ctxt);
+
+	diagfwd_channel_read(info->fwd_ctxt);
+}
+
+static void diag_socket_queue_read(void *ctxt)
+{
+	struct diag_socket_info *info = NULL;
+
+	if (!ctxt)
+		return;
+
+	info = (struct diag_socket_info *)ctxt;
+	if (info->hdl && info->wq)
+		queue_work(info->wq, &(info->read_work));
+}
+
+void diag_socket_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt)
+{
+	struct diag_socket_info *info = NULL;
+
+	if (!ctxt || !fwd_ctxt)
+		return;
+
+	info = (struct diag_socket_info *)ctxt;
+	info->fwd_ctxt = fwd_ctxt;
+}
+
+int diag_socket_check_state(void *ctxt)
+{
+	struct diag_socket_info *info = NULL;
+
+	if (!ctxt)
+		return 0;
+
+	info = (struct diag_socket_info *)ctxt;
+	return (int)(atomic_read(&info->diag_state));
+}
+
+static void __diag_socket_init(struct diag_socket_info *info)
+{
+	uint16_t ins_base = 0;
+	uint16_t ins_offset = 0;
+
+	char wq_name[DIAG_SOCKET_NAME_SZ + 10];
+	if (!info)
+		return;
+
+	init_waitqueue_head(&info->wait_q);
+	info->inited = 0;
+	atomic_set(&info->opened, 0);
+	atomic_set(&info->diag_state, 0);
+	info->pkt_len = 0;
+	info->pkt_read = 0;
+	info->hdl = NULL;
+	info->fwd_ctxt = NULL;
+	info->data_ready = 0;
+	atomic_set(&info->flow_cnt, 0);
+	spin_lock_init(&info->lock);
+	strlcpy(wq_name, "DIAG_SOCKET_", 10);
+	strlcat(wq_name, info->name, sizeof(info->name));
+	init_waitqueue_head(&info->read_wait_q);
+	info->wq = create_singlethread_workqueue(wq_name);
+	if (!info->wq) {
+		pr_err("diag: In %s, unable to create workqueue for socket channel %s\n",
+		       __func__, info->name);
+		return;
+	}
+	INIT_WORK(&(info->init_work), socket_init_work_fn);
+	INIT_WORK(&(info->read_work), socket_read_work_fn);
+
+	switch (info->peripheral) {
+	case PERIPHERAL_MODEM:
+		ins_base = MODEM_INST_BASE;
+		break;
+	case PERIPHERAL_LPASS:
+		ins_base = LPASS_INST_BASE;
+		break;
+	case PERIPHERAL_WCNSS:
+		ins_base = WCNSS_INST_BASE;
+		break;
+	case PERIPHERAL_SENSORS:
+		ins_base = SENSORS_INST_BASE;
+		break;
+	case PERIPHERAL_WDSP:
+		ins_base = WDSP_INST_BASE;
+		break;
+	case PERIPHERAL_CDSP:
+		ins_base = CDSP_INST_BASE;
+		break;
+	}
+
+	switch (info->type) {
+	case TYPE_DATA:
+		ins_offset = INST_ID_DATA;
+		info->port_type = PORT_TYPE_SERVER;
+		break;
+	case TYPE_CNTL:
+		ins_offset = INST_ID_CNTL;
+		info->port_type = PORT_TYPE_SERVER;
+		break;
+	case TYPE_DCI:
+		ins_offset = INST_ID_DCI;
+		info->port_type = PORT_TYPE_SERVER;
+		break;
+	case TYPE_CMD:
+		ins_offset = INST_ID_CMD;
+		info->port_type = PORT_TYPE_CLIENT;
+		break;
+	case TYPE_DCI_CMD:
+		ins_offset = INST_ID_DCI_CMD;
+		info->port_type = PORT_TYPE_CLIENT;
+		break;
+	}
+
+	info->svc_id = DIAG_SVC_ID;
+	info->ins_id = ins_base + ins_offset;
+	info->inited = 1;
+}
+
+static void cntl_socket_init_work_fn(struct work_struct *work)
+{
+	int ret = 0;
+
+	if (!cntl_socket)
+		return;
+
+	ret = sock_create(AF_MSM_IPC, SOCK_DGRAM, 0, &cntl_socket->hdl);
+	if (ret < 0 || !cntl_socket->hdl) {
+		pr_err("diag: In %s, cntl socket is not initialized, ret: %d\n",
+		       __func__, ret);
+		return;
+	}
+
+	write_lock_bh(&cntl_socket->hdl->sk->sk_callback_lock);
+	cntl_socket->hdl->sk->sk_user_data = (void *)cntl_socket;
+	cntl_socket->hdl->sk->sk_data_ready = cntl_socket_data_ready;
+	write_unlock_bh(&cntl_socket->hdl->sk->sk_callback_lock);
+
+	ret = kernel_sock_ioctl(cntl_socket->hdl,
+				IPC_ROUTER_IOCTL_BIND_CONTROL_PORT, 0);
+	if (ret < 0) {
+		pr_err("diag: In %s Could not bind as control port, ret: %d\n",
+		       __func__, ret);
+	}
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "Initialized control sockets");
+}
+
+static int __diag_cntl_socket_init(void)
+{
+	cntl_socket = kzalloc(sizeof(struct diag_cntl_socket_info), GFP_KERNEL);
+	if (!cntl_socket)
+		return -ENOMEM;
+
+	cntl_socket->svc_id = DIAG_SVC_ID;
+	cntl_socket->ins_id = 1;
+	atomic_set(&cntl_socket->data_ready, 0);
+	init_waitqueue_head(&cntl_socket->read_wait_q);
+	cntl_socket->wq = create_singlethread_workqueue("DIAG_CNTL_SOCKET");
+	INIT_WORK(&(cntl_socket->read_work), cntl_socket_read_work_fn);
+	INIT_WORK(&(cntl_socket->init_work), cntl_socket_init_work_fn);
+
+	return 0;
+}
+
+int diag_socket_init(void)
+{
+	int err = 0;
+	int i;
+	int peripheral = 0;
+	void *handle;
+	struct diag_socket_info *info = NULL;
+	struct restart_notifier_block *nb;
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		info = &socket_cntl[peripheral];
+		__diag_socket_init(&socket_cntl[peripheral]);
+
+		diagfwd_cntl_register(TRANSPORT_SOCKET, peripheral,
+			(void *)info, &socket_ops, &(info->fwd_ctxt));
+
+		__diag_socket_init(&socket_data[peripheral]);
+		__diag_socket_init(&socket_cmd[peripheral]);
+		__diag_socket_init(&socket_dci[peripheral]);
+		__diag_socket_init(&socket_dci_cmd[peripheral]);
+	}
+
+	err = __diag_cntl_socket_init();
+	if (err) {
+		pr_err("diag: Unable to open control sockets, err: %d\n", err);
+		goto fail;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
+		nb = &restart_notifiers[i];
+		if (nb) {
+			handle = subsys_notif_register_notifier(nb->name,
+				&nb->nb);
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"%s: registering notifier for '%s', handle=%p\n",
+			__func__, nb->name, handle);
+		}
+	}
+
+	register_ipcrtr_af_init_notifier(&socket_notify);
+fail:
+	return err;
+}
+
+static int socket_ready_notify(struct notifier_block *nb,
+			       unsigned long action, void *data)
+{
+	uint8_t peripheral;
+	struct diag_socket_info *info = NULL;
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "received notification from IPCR");
+
+	if (action != IPCRTR_AF_INIT) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			 "action not recognized by diag %lu\n", action);
+		return 0;
+	}
+
+	/* Initialize only the servers */
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		info = &socket_cntl[peripheral];
+		queue_work(info->wq, &(info->init_work));
+		info = &socket_data[peripheral];
+		queue_work(info->wq, &(info->init_work));
+		info = &socket_dci[peripheral];
+		queue_work(info->wq, &(info->init_work));
+	}
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "Initialized all servers");
+
+	queue_work(cntl_socket->wq, &(cntl_socket->init_work));
+
+	return 0;
+}
+
+static int restart_notifier_cb(struct notifier_block *this, unsigned long code,
+	void *_cmd)
+{
+	struct restart_notifier_block *notifier;
+
+	notifier = container_of(this,
+			struct restart_notifier_block, nb);
+	if (!notifier) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: %s: invalid notifier block\n", __func__);
+		return NOTIFY_DONE;
+	}
+
+	mutex_lock(&driver->diag_notifier_mutex);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+	"%s: ssr for processor %d ('%s')\n",
+	__func__, notifier->processor, notifier->name);
+
+	switch (code) {
+
+	case SUBSYS_BEFORE_SHUTDOWN:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: %s: SUBSYS_BEFORE_SHUTDOWN\n", __func__);
+		bootup_req[notifier->processor] = PEPIPHERAL_SSR_DOWN;
+		break;
+
+	case SUBSYS_AFTER_SHUTDOWN:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: %s: SUBSYS_AFTER_SHUTDOWN\n", __func__);
+		break;
+
+	case SUBSYS_BEFORE_POWERUP:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: %s: SUBSYS_BEFORE_POWERUP\n", __func__);
+		break;
+
+	case SUBSYS_AFTER_POWERUP:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: %s: SUBSYS_AFTER_POWERUP\n", __func__);
+		if (!bootup_req[notifier->processor]) {
+			bootup_req[notifier->processor] = PEPIPHERAL_SSR_DOWN;
+			break;
+		}
+		bootup_req[notifier->processor] = PEPIPHERAL_SSR_UP;
+		break;
+
+	default:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: code: %lu\n", code);
+		break;
+	}
+	mutex_unlock(&driver->diag_notifier_mutex);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+	"diag: bootup_req[%s] = %d\n",
+	notifier->name, (int)bootup_req[notifier->processor]);
+
+	return NOTIFY_DONE;
+}
+
+int diag_socket_init_peripheral(uint8_t peripheral)
+{
+	struct diag_socket_info *info = NULL;
+
+	if (peripheral >= NUM_PERIPHERALS)
+		return -EINVAL;
+
+	info = &socket_data[peripheral];
+	diagfwd_register(TRANSPORT_SOCKET, info->peripheral,
+			 info->type, (void *)info, &socket_ops,
+			 &info->fwd_ctxt);
+
+	info = &socket_dci[peripheral];
+	diagfwd_register(TRANSPORT_SOCKET, info->peripheral,
+			 info->type, (void *)info, &socket_ops,
+			 &info->fwd_ctxt);
+	return 0;
+}
+
+static void __diag_socket_exit(struct diag_socket_info *info)
+{
+	if (!info)
+		return;
+
+	diagfwd_deregister(info->peripheral, info->type, (void *)info);
+	info->fwd_ctxt = NULL;
+	info->hdl = NULL;
+	if (info->wq)
+		destroy_workqueue(info->wq);
+
+}
+
+void diag_socket_early_exit(void)
+{
+	int i = 0;
+
+	for (i = 0; i < NUM_PERIPHERALS; i++)
+		__diag_socket_exit(&socket_cntl[i]);
+}
+
+void diag_socket_exit(void)
+{
+	int i = 0;
+
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		__diag_socket_exit(&socket_data[i]);
+		__diag_socket_exit(&socket_cmd[i]);
+		__diag_socket_exit(&socket_dci[i]);
+		__diag_socket_exit(&socket_dci_cmd[i]);
+	}
+}
+
+static int diag_socket_read(void *ctxt, unsigned char *buf, int buf_len)
+{
+	int err = 0;
+	int pkt_len = 0;
+	int read_len = 0;
+	int bytes_remaining = 0;
+	int total_recd = 0;
+	int loop_count = 0;
+	uint8_t buf_full = 0;
+	unsigned char *temp = NULL;
+	struct kvec iov = {0};
+	struct msghdr read_msg = {0};
+	struct sockaddr_msm_ipc src_addr = {0};
+	struct diag_socket_info *info = NULL;
+	unsigned long flags;
+
+	info = (struct diag_socket_info *)(ctxt);
+	if (!info)
+		return -ENODEV;
+
+	if (!buf || !ctxt || buf_len <= 0)
+		return -EINVAL;
+
+	temp = buf;
+	bytes_remaining = buf_len;
+
+	err = wait_event_interruptible(info->read_wait_q,
+				      (info->data_ready > 0) || (!info->hdl) ||
+				      (atomic_read(&info->diag_state) == 0));
+	if (err) {
+		mutex_lock(&driver->diagfwd_channel_mutex[info->peripheral]);
+		diagfwd_channel_read_done(info->fwd_ctxt, buf, 0);
+		mutex_unlock(&driver->diagfwd_channel_mutex[info->peripheral]);
+		return -ERESTARTSYS;
+	}
+
+	/*
+	 * There is no need to continue reading over peripheral in this case.
+	 * Release the wake source hold earlier.
+	 */
+	if (atomic_read(&info->diag_state) == 0) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			 "%s closing read thread. diag state is closed\n",
+			 info->name);
+		mutex_lock(&driver->diagfwd_channel_mutex[info->peripheral]);
+		diagfwd_channel_read_done(info->fwd_ctxt, buf, 0);
+		mutex_unlock(&driver->diagfwd_channel_mutex[info->peripheral]);
+		return 0;
+	}
+
+	if (!info->hdl) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s closing read thread\n",
+			 info->name);
+		goto fail;
+	}
+
+	do {
+		loop_count++;
+		iov.iov_base = temp;
+		iov.iov_len = bytes_remaining;
+		read_msg.msg_name = &src_addr;
+		read_msg.msg_namelen = sizeof(src_addr);
+
+		pkt_len = kernel_recvmsg(info->hdl, &read_msg, &iov, 1, 0,
+					 MSG_PEEK);
+		if (pkt_len <= 0)
+			break;
+
+		if (pkt_len > bytes_remaining) {
+			buf_full = 1;
+			break;
+		}
+
+		spin_lock_irqsave(&info->lock, flags);
+		info->data_ready--;
+		spin_unlock_irqrestore(&info->lock, flags);
+
+		read_len = kernel_recvmsg(info->hdl, &read_msg, &iov, 1,
+					  pkt_len, 0);
+		if (read_len <= 0)
+			goto fail;
+
+		if (!atomic_read(&info->opened) &&
+		    info->port_type == PORT_TYPE_SERVER) {
+			/*
+			 * This is the first packet from the client. Copy its
+			 * address to the connection object. Consider this
+			 * channel open for communication.
+			 */
+			memcpy(&info->remote_addr, &src_addr, sizeof(src_addr));
+			if (info->ins_id == INST_ID_DCI)
+				atomic_set(&info->opened, 1);
+			else
+				__socket_open_channel(info);
+		}
+
+		if (read_len < 0) {
+			pr_err_ratelimited("diag: In %s, error receiving data, err: %d\n",
+					   __func__, pkt_len);
+			err = read_len;
+			goto fail;
+		}
+		temp += read_len;
+		total_recd += read_len;
+		bytes_remaining -= read_len;
+	} while (info->data_ready > 0);
+
+	if (buf_full || (info->type == TYPE_DATA && pkt_len))
+		err = queue_work(info->wq, &(info->read_work));
+
+	if (total_recd > 0) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s read total bytes: %d\n",
+			 info->name, total_recd);
+		mutex_lock(&driver->diagfwd_channel_mutex[info->peripheral]);
+		err = diagfwd_channel_read_done(info->fwd_ctxt,
+						buf, total_recd);
+		mutex_unlock(&driver->diagfwd_channel_mutex[info->peripheral]);
+		if (err)
+			goto fail;
+	} else {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s error in read, err: %d\n",
+			 info->name, total_recd);
+		goto fail;
+	}
+
+	diag_socket_queue_read(info);
+	return 0;
+
+fail:
+	mutex_lock(&driver->diagfwd_channel_mutex[info->peripheral]);
+	diagfwd_channel_read_done(info->fwd_ctxt, buf, 0);
+	mutex_unlock(&driver->diagfwd_channel_mutex[info->peripheral]);
+	return -EIO;
+}
+
+static int diag_socket_write(void *ctxt, unsigned char *buf, int len)
+{
+	int err = 0;
+	int write_len = 0;
+	struct kvec iov = {0};
+	struct msghdr write_msg = {0};
+	struct diag_socket_info *info = NULL;
+
+	if (!ctxt || !buf || len <= 0)
+		return -EIO;
+
+	info = (struct diag_socket_info *)(ctxt);
+	if (!atomic_read(&info->opened) || !info->hdl)
+		return -ENODEV;
+
+	iov.iov_base = buf;
+	iov.iov_len = len;
+	write_msg.msg_name = &info->remote_addr;
+	write_msg.msg_namelen = sizeof(info->remote_addr);
+	write_msg.msg_flags |= MSG_DONTWAIT;
+	write_len = kernel_sendmsg(info->hdl, &write_msg, &iov, 1, len);
+	if (write_len < 0) {
+		err = write_len;
+		/*
+		 * -EAGAIN means that the number of packets in flight is at
+		 * max capactity and the peripheral hasn't read the data.
+		 */
+		if (err != -EAGAIN) {
+			pr_err_ratelimited("diag: In %s, error sending data, err: %d, ch: %s\n",
+					   __func__, err, info->name);
+		}
+	} else if (write_len != len) {
+		err = write_len;
+		pr_err_ratelimited("diag: In %s, wrote partial packet to %s, len: %d, wrote: %d\n",
+				   __func__, info->name, len, write_len);
+	}
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s wrote to socket, len: %d\n",
+		 info->name, write_len);
+
+	return err;
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diagfwd_socket.h linux-4.4.115-fbx/drivers/char/diag/diagfwd_socket.h
--- linux-4.4.115-fbx/drivers/char/diag./diagfwd_socket.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diagfwd_socket.h	2019-01-22 16:16:22.963241516 +0100
@@ -0,0 +1,110 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_SOCKET_H
+#define DIAGFWD_SOCKET_H
+
+#include <linux/socket.h>
+#include <linux/msm_ipc.h>
+
+#define DIAG_SOCKET_NAME_SZ		24
+
+#define DIAG_SOCK_MODEM_SVC_ID		64
+#define DIAG_SOCK_MODEM_INS_ID		3
+
+#define PORT_TYPE_SERVER		0
+#define PORT_TYPE_CLIENT		1
+
+#define PEPIPHERAL_AFTER_BOOT		0
+#define PEPIPHERAL_SSR_DOWN		1
+#define PEPIPHERAL_SSR_UP		2
+
+#define CNTL_CMD_NEW_SERVER		4
+#define CNTL_CMD_REMOVE_SERVER		5
+#define CNTL_CMD_REMOVE_CLIENT		6
+
+enum {
+	SOCKET_MODEM,
+	SOCKET_ADSP,
+	SOCKET_WCNSS,
+	SOCKET_SLPI,
+	SOCKET_CDSP,
+	SOCKET_APPS,
+	NUM_SOCKET_SUBSYSTEMS,
+};
+
+struct diag_socket_info {
+	uint8_t peripheral;
+	uint8_t type;
+	uint8_t port_type;
+	uint8_t inited;
+	atomic_t opened;
+	atomic_t diag_state;
+	uint32_t pkt_len;
+	uint32_t pkt_read;
+	uint32_t svc_id;
+	uint32_t ins_id;
+	uint32_t data_ready;
+	atomic_t flow_cnt;
+	char name[DIAG_SOCKET_NAME_SZ];
+	spinlock_t lock;
+	wait_queue_head_t wait_q;
+	struct sockaddr_msm_ipc remote_addr;
+	struct socket *hdl;
+	struct workqueue_struct *wq;
+	struct work_struct init_work;
+	struct work_struct read_work;
+	struct diagfwd_info *fwd_ctxt;
+	wait_queue_head_t read_wait_q;
+};
+
+union cntl_port_msg {
+	struct {
+		uint32_t cmd;
+		uint32_t service;
+		uint32_t instance;
+		uint32_t node_id;
+		uint32_t port_id;
+	} srv;
+	struct {
+		uint32_t cmd;
+		uint32_t node_id;
+		uint32_t port_id;
+	} cli;
+};
+
+struct diag_cntl_socket_info {
+	uint32_t svc_id;
+	uint32_t ins_id;
+	atomic_t data_ready;
+	struct workqueue_struct *wq;
+	struct work_struct read_work;
+	struct work_struct init_work;
+	wait_queue_head_t read_wait_q;
+	struct socket *hdl;
+};
+
+extern struct diag_socket_info socket_data[NUM_PERIPHERALS];
+extern struct diag_socket_info socket_cntl[NUM_PERIPHERALS];
+extern struct diag_socket_info socket_dci[NUM_PERIPHERALS];
+extern struct diag_socket_info socket_cmd[NUM_PERIPHERALS];
+extern struct diag_socket_info socket_dci_cmd[NUM_PERIPHERALS];
+
+extern struct diag_cntl_socket_info *cntl_socket;
+
+int diag_socket_init(void);
+int diag_socket_init_peripheral(uint8_t peripheral);
+void diag_socket_exit(void);
+void diag_socket_early_exit(void);
+void diag_socket_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt);
+int diag_socket_check_state(void *ctxt);
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diag_ipc_logging.h linux-4.4.115-fbx/drivers/char/diag/diag_ipc_logging.h
--- linux-4.4.115-fbx/drivers/char/diag./diag_ipc_logging.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diag_ipc_logging.h	2019-10-29 09:26:23.449201280 +0100
@@ -0,0 +1,45 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGIPCLOG_H
+#define DIAGIPCLOG_H
+
+#include <linux/ipc_logging.h>
+
+#define DIAG_IPC_LOG_PAGES	50
+
+#define DIAG_DEBUG_USERSPACE	0x0001
+#define DIAG_DEBUG_MUX		0x0002
+#define DIAG_DEBUG_DCI		0x0004
+#define DIAG_DEBUG_PERIPHERALS	0x0008
+#define DIAG_DEBUG_MASKS	0x0010
+#define DIAG_DEBUG_POWER	0x0020
+#define DIAG_DEBUG_BRIDGE	0x0040
+
+#define DIAG_DEBUG
+
+#ifdef DIAG_DEBUG
+extern uint16_t diag_debug_mask;
+extern void *diag_ipc_log;
+
+#define DIAG_LOG(log_lvl, msg, ...)					\
+	do {								\
+		if (diag_ipc_log && (log_lvl & diag_debug_mask)) {	\
+			ipc_log_string(diag_ipc_log,			\
+				"[%s] " msg, __func__, ##__VA_ARGS__);	\
+		}							\
+	} while (0)
+#else
+#define DIAG_LOG(log_lvl, msg, ...)
+#endif
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diag_masks.c linux-4.4.115-fbx/drivers/char/diag/diag_masks.c
--- linux-4.4.115-fbx/drivers/char/diag./diag_masks.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diag_masks.c	2019-10-29 09:26:23.449201280 +0100
@@ -0,0 +1,2351 @@
+/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/diagchar.h>
+#include <linux/kmemleak.h>
+#include <linux/workqueue.h>
+#include <linux/uaccess.h>
+#include "diagchar.h"
+#include "diagfwd_cntl.h"
+#include "diag_masks.h"
+#include "diagfwd_peripheral.h"
+#include "diag_ipc_logging.h"
+
+#define ALL_EQUIP_ID		100
+#define ALL_SSID		-1
+
+#define DIAG_SET_FEATURE_MASK(x) (feature_bytes[(x)/8] |= (1 << (x & 0x7)))
+
+#define diag_check_update(x)	\
+	(!info || (info && (info->peripheral_mask & MD_PERIPHERAL_MASK(x))) \
+	|| (info && (info->peripheral_mask & MD_PERIPHERAL_PD_MASK(x)))) \
+
+struct diag_mask_info msg_mask;
+struct diag_mask_info msg_bt_mask;
+struct diag_mask_info log_mask;
+struct diag_mask_info event_mask;
+
+static const struct diag_ssid_range_t msg_mask_tbl[] = {
+	{ .ssid_first = MSG_SSID_0, .ssid_last = MSG_SSID_0_LAST },
+	{ .ssid_first = MSG_SSID_1, .ssid_last = MSG_SSID_1_LAST },
+	{ .ssid_first = MSG_SSID_2, .ssid_last = MSG_SSID_2_LAST },
+	{ .ssid_first = MSG_SSID_3, .ssid_last = MSG_SSID_3_LAST },
+	{ .ssid_first = MSG_SSID_4, .ssid_last = MSG_SSID_4_LAST },
+	{ .ssid_first = MSG_SSID_5, .ssid_last = MSG_SSID_5_LAST },
+	{ .ssid_first = MSG_SSID_6, .ssid_last = MSG_SSID_6_LAST },
+	{ .ssid_first = MSG_SSID_7, .ssid_last = MSG_SSID_7_LAST },
+	{ .ssid_first = MSG_SSID_8, .ssid_last = MSG_SSID_8_LAST },
+	{ .ssid_first = MSG_SSID_9, .ssid_last = MSG_SSID_9_LAST },
+	{ .ssid_first = MSG_SSID_10, .ssid_last = MSG_SSID_10_LAST },
+	{ .ssid_first = MSG_SSID_11, .ssid_last = MSG_SSID_11_LAST },
+	{ .ssid_first = MSG_SSID_12, .ssid_last = MSG_SSID_12_LAST },
+	{ .ssid_first = MSG_SSID_13, .ssid_last = MSG_SSID_13_LAST },
+	{ .ssid_first = MSG_SSID_14, .ssid_last = MSG_SSID_14_LAST },
+	{ .ssid_first = MSG_SSID_15, .ssid_last = MSG_SSID_15_LAST },
+	{ .ssid_first = MSG_SSID_16, .ssid_last = MSG_SSID_16_LAST },
+	{ .ssid_first = MSG_SSID_17, .ssid_last = MSG_SSID_17_LAST },
+	{ .ssid_first = MSG_SSID_18, .ssid_last = MSG_SSID_18_LAST },
+	{ .ssid_first = MSG_SSID_19, .ssid_last = MSG_SSID_19_LAST },
+	{ .ssid_first = MSG_SSID_20, .ssid_last = MSG_SSID_20_LAST },
+	{ .ssid_first = MSG_SSID_21, .ssid_last = MSG_SSID_21_LAST },
+	{ .ssid_first = MSG_SSID_22, .ssid_last = MSG_SSID_22_LAST },
+	{ .ssid_first = MSG_SSID_23, .ssid_last = MSG_SSID_23_LAST },
+	{ .ssid_first = MSG_SSID_24, .ssid_last = MSG_SSID_24_LAST },
+	{ .ssid_first = MSG_SSID_25, .ssid_last = MSG_SSID_25_LAST }
+};
+
+static int diag_apps_responds(void)
+{
+	/*
+	 * Apps processor should respond to mask commands only if the
+	 * Modem channel is up, the feature mask is received from Modem
+	 * and if Modem supports Mask Centralization.
+	 */
+	if (!chk_apps_only())
+		return 0;
+
+	if (driver->diagfwd_cntl[PERIPHERAL_MODEM] &&
+	    driver->diagfwd_cntl[PERIPHERAL_MODEM]->ch_open &&
+	    driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask) {
+		if (driver->feature[PERIPHERAL_MODEM].mask_centralization)
+			return 1;
+		return 0;
+	}
+	return 1;
+}
+
+static void diag_send_log_mask_update(uint8_t peripheral, int equip_id)
+{
+	int i;
+	int err = 0;
+	int send_once = 0;
+	int header_len = sizeof(struct diag_ctrl_log_mask);
+	uint8_t *buf = NULL, upd = 0;
+	uint8_t *temp = NULL;
+	uint32_t mask_size = 0;
+	struct diag_ctrl_log_mask ctrl_pkt;
+	struct diag_mask_info *mask_info = NULL;
+	struct diag_log_mask_t *mask = NULL;
+
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	if (!driver->diagfwd_cntl[peripheral] ||
+	    !driver->diagfwd_cntl[peripheral]->ch_open) {
+		pr_debug("diag: In %s, control channel is not open, p: %d\n",
+			 __func__, peripheral);
+		return;
+	}
+
+	if (driver->md_session_mask != 0) {
+		if (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral)) {
+			if (driver->md_session_map[peripheral])
+				mask_info =
+				driver->md_session_map[peripheral]->log_mask;
+		} else if (driver->md_session_mask &
+				MD_PERIPHERAL_PD_MASK(peripheral)) {
+			upd = diag_mask_to_pd_value(driver->md_session_mask);
+			if (upd && driver->md_session_map[upd])
+				mask_info =
+				driver->md_session_map[upd]->log_mask;
+		} else {
+			DIAG_LOG(DIAG_DEBUG_MASKS,
+			"asking for mask update with unknown session mask\n");
+			return;
+		}
+	} else {
+		mask_info = &log_mask;
+	}
+
+	if (!mask_info || !mask_info->ptr || !mask_info->update_buf)
+		return;
+
+	mask = (struct diag_log_mask_t *)mask_info->ptr;
+	if (!mask->ptr)
+		return;
+	buf = mask_info->update_buf;
+
+	switch (mask_info->status) {
+	case DIAG_CTRL_MASK_ALL_DISABLED:
+		ctrl_pkt.equip_id = 0;
+		ctrl_pkt.num_items = 0;
+		ctrl_pkt.log_mask_size = 0;
+		send_once = 1;
+		break;
+	case DIAG_CTRL_MASK_ALL_ENABLED:
+		ctrl_pkt.equip_id = 0;
+		ctrl_pkt.num_items = 0;
+		ctrl_pkt.log_mask_size = 0;
+		send_once = 1;
+		break;
+	case DIAG_CTRL_MASK_VALID:
+		send_once = 0;
+		break;
+	default:
+		pr_debug("diag: In %s, invalid log_mask status\n", __func__);
+		return;
+	}
+
+	mutex_lock(&mask_info->lock);
+	for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
+		if (!mask->ptr)
+			continue;
+
+		if (equip_id != i && equip_id != ALL_EQUIP_ID)
+			continue;
+
+		mutex_lock(&mask->lock);
+		ctrl_pkt.cmd_type = DIAG_CTRL_MSG_LOG_MASK;
+		ctrl_pkt.stream_id = 1;
+		ctrl_pkt.status = mask_info->status;
+		if (mask_info->status == DIAG_CTRL_MASK_VALID) {
+			mask_size = LOG_ITEMS_TO_SIZE(mask->num_items_tools);
+			ctrl_pkt.equip_id = i;
+			ctrl_pkt.num_items = mask->num_items_tools;
+			ctrl_pkt.log_mask_size = mask_size;
+		}
+		ctrl_pkt.data_len = LOG_MASK_CTRL_HEADER_LEN + mask_size;
+
+		if (header_len + mask_size > mask_info->update_buf_len) {
+			temp = krealloc(buf, header_len + mask_size,
+					GFP_KERNEL);
+			if (!temp) {
+				pr_err_ratelimited("diag: Unable to realloc log update buffer, new size: %d, equip_id: %d\n",
+				       header_len + mask_size, equip_id);
+				mutex_unlock(&mask->lock);
+				break;
+			}
+			mask_info->update_buf = temp;
+			mask_info->update_buf_len = header_len + mask_size;
+			buf = temp;
+		}
+
+		memcpy(buf, &ctrl_pkt, header_len);
+		if (mask_size > 0 && mask_size <= LOG_MASK_SIZE)
+			memcpy(buf + header_len, mask->ptr, mask_size);
+		mutex_unlock(&mask->lock);
+
+		DIAG_LOG(DIAG_DEBUG_MASKS,
+			 "sending ctrl pkt to %d, e %d num_items %d size %d\n",
+			 peripheral, i, ctrl_pkt.num_items,
+			 ctrl_pkt.log_mask_size);
+
+		err = diagfwd_write(peripheral, TYPE_CNTL,
+				    buf, header_len + mask_size);
+		if (err && err != -ENODEV)
+			pr_err_ratelimited("diag: Unable to send log masks to peripheral %d, equip_id: %d, err: %d\n",
+			       peripheral, i, err);
+		if (send_once || equip_id != ALL_EQUIP_ID)
+			break;
+
+	}
+	mutex_unlock(&mask_info->lock);
+}
+
+static void diag_send_event_mask_update(uint8_t peripheral)
+{
+	uint8_t *buf = NULL, upd = 0;
+	uint8_t *temp = NULL;
+	struct diag_ctrl_event_mask header;
+	struct diag_mask_info *mask_info = NULL;
+	int num_bytes = EVENT_COUNT_TO_BYTES(driver->last_event_id);
+	int write_len = 0;
+	int err = 0;
+	int temp_len = 0;
+
+	if (num_bytes <= 0 || num_bytes > driver->event_mask_size) {
+		pr_debug("diag: In %s, invalid event mask length %d\n",
+			 __func__, num_bytes);
+		return;
+	}
+
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	if (!driver->diagfwd_cntl[peripheral] ||
+	    !driver->diagfwd_cntl[peripheral]->ch_open) {
+		pr_debug("diag: In %s, control channel is not open, p: %d\n",
+			 __func__, peripheral);
+		return;
+	}
+
+	if (driver->md_session_mask != 0) {
+		if (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral)) {
+			if (driver->md_session_map[peripheral])
+				mask_info =
+				driver->md_session_map[peripheral]->event_mask;
+		} else if (driver->md_session_mask &
+				MD_PERIPHERAL_PD_MASK(peripheral)) {
+			upd = diag_mask_to_pd_value(driver->md_session_mask);
+			if (upd && driver->md_session_map[upd])
+				mask_info =
+				driver->md_session_map[upd]->event_mask;
+		} else {
+			DIAG_LOG(DIAG_DEBUG_MASKS,
+			"asking for mask update with unknown session mask\n");
+			return;
+		}
+	} else {
+		mask_info = &event_mask;
+	}
+
+	if (!mask_info || !mask_info->ptr || !mask_info->update_buf)
+		return;
+
+	buf = mask_info->update_buf;
+	mutex_lock(&mask_info->lock);
+	header.cmd_type = DIAG_CTRL_MSG_EVENT_MASK;
+	header.stream_id = 1;
+	header.status = mask_info->status;
+
+	switch (mask_info->status) {
+	case DIAG_CTRL_MASK_ALL_DISABLED:
+		header.event_config = 0;
+		header.event_mask_size = 0;
+		break;
+	case DIAG_CTRL_MASK_ALL_ENABLED:
+		header.event_config = 1;
+		header.event_mask_size = 0;
+		break;
+	case DIAG_CTRL_MASK_VALID:
+		header.event_config = 1;
+		header.event_mask_size = num_bytes;
+		if (num_bytes + sizeof(header) > mask_info->update_buf_len) {
+			temp_len = num_bytes + sizeof(header);
+			temp = krealloc(buf, temp_len, GFP_KERNEL);
+			if (!temp) {
+				pr_err("diag: Unable to realloc event mask update buffer\n");
+				goto err;
+			} else {
+				mask_info->update_buf = temp;
+				mask_info->update_buf_len = temp_len;
+				buf = temp;
+			}
+		}
+		if (num_bytes > 0 && num_bytes < mask_info->mask_len)
+			memcpy(buf + sizeof(header), mask_info->ptr, num_bytes);
+		else {
+			pr_err("diag: num_bytes(%d) is not satisfying length condition\n",
+				num_bytes);
+			goto err;
+		}
+		write_len += num_bytes;
+		break;
+	default:
+		pr_debug("diag: In %s, invalid status %d\n", __func__,
+			 mask_info->status);
+		goto err;
+	}
+	header.data_len = EVENT_MASK_CTRL_HEADER_LEN + header.event_mask_size;
+	memcpy(buf, &header, sizeof(header));
+	write_len += sizeof(header);
+
+	err = diagfwd_write(peripheral, TYPE_CNTL, buf, write_len);
+	if (err && err != -ENODEV)
+		pr_err_ratelimited("diag: Unable to send event masks to peripheral %d\n",
+		       peripheral);
+err:
+	mutex_unlock(&mask_info->lock);
+}
+
+static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last)
+{
+	int i;
+	int err = 0;
+	int header_len = sizeof(struct diag_ctrl_msg_mask);
+	int temp_len = 0;
+	uint8_t *buf = NULL, *temp = NULL;
+	uint8_t upd = 0;
+	uint8_t msg_mask_tbl_count_local = 0;
+	uint32_t mask_size = 0;
+	struct diag_mask_info *mask_info = NULL;
+	struct diag_msg_mask_t *mask = NULL;
+	struct diag_ctrl_msg_mask header;
+	struct diag_md_session_t *md_session_info = NULL;
+
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	if (!driver->diagfwd_cntl[peripheral] ||
+	    !driver->diagfwd_cntl[peripheral]->ch_open) {
+		pr_debug("diag: In %s, control channel is not open, p: %d\n",
+			 __func__, peripheral);
+		return;
+	}
+
+	if (driver->md_session_mask != 0) {
+		if (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral)) {
+			if (driver->md_session_map[peripheral]) {
+				mask_info =
+				driver->md_session_map[peripheral]->msg_mask;
+				md_session_info =
+				driver->md_session_map[peripheral];
+			}
+		} else if (driver->md_session_mask &
+				MD_PERIPHERAL_PD_MASK(peripheral)) {
+			upd = diag_mask_to_pd_value(driver->md_session_mask);
+			if (upd && driver->md_session_map[upd]) {
+				mask_info =
+				driver->md_session_map[upd]->msg_mask;
+				md_session_info =
+				driver->md_session_map[upd];
+			}
+		} else {
+			DIAG_LOG(DIAG_DEBUG_MASKS,
+			"asking for mask update with unknown session mask\n");
+			return;
+		}
+	} else {
+		mask_info = &msg_mask;
+	}
+
+	if (!mask_info || !mask_info->ptr || !mask_info->update_buf)
+		return;
+	mutex_lock(&driver->msg_mask_lock);
+	mask = (struct diag_msg_mask_t *)mask_info->ptr;
+	if (!mask->ptr) {
+		mutex_unlock(&driver->msg_mask_lock);
+		return;
+	}
+	buf = mask_info->update_buf;
+	if (md_session_info)
+		msg_mask_tbl_count_local = md_session_info->msg_mask_tbl_count;
+	else
+		msg_mask_tbl_count_local = driver->msg_mask_tbl_count;
+	mutex_unlock(&driver->msg_mask_lock);
+	mutex_lock(&mask_info->lock);
+	switch (mask_info->status) {
+	case DIAG_CTRL_MASK_ALL_DISABLED:
+		mask_size = 0;
+		break;
+	case DIAG_CTRL_MASK_ALL_ENABLED:
+		mask_size = 1;
+		break;
+	case DIAG_CTRL_MASK_VALID:
+		break;
+	default:
+		pr_debug("diag: In %s, invalid status: %d\n", __func__,
+			 mask_info->status);
+		goto err;
+	}
+
+	for (i = 0; i < msg_mask_tbl_count_local; i++, mask++) {
+		if (!mask->ptr)
+			continue;
+		mutex_lock(&driver->msg_mask_lock);
+		if (((mask->ssid_first > first) ||
+			(mask->ssid_last_tools < last)) && first != ALL_SSID) {
+			mutex_unlock(&driver->msg_mask_lock);
+			continue;
+		}
+
+		mutex_lock(&mask->lock);
+		if (mask_info->status == DIAG_CTRL_MASK_VALID) {
+			mask_size =
+				mask->ssid_last_tools - mask->ssid_first + 1;
+			temp_len = mask_size * sizeof(uint32_t);
+			if (temp_len + header_len <= mask_info->update_buf_len)
+				goto proceed;
+			temp = krealloc(mask_info->update_buf, temp_len,
+					GFP_KERNEL);
+			if (!temp) {
+				pr_err("diag: In %s, unable to realloc msg_mask update buffer\n",
+				       __func__);
+				mask_size = (mask_info->update_buf_len -
+					    header_len) / sizeof(uint32_t);
+			} else {
+				mask_info->update_buf = temp;
+				mask_info->update_buf_len = temp_len;
+				buf = temp;
+				pr_debug("diag: In %s, successfully reallocated msg_mask update buffer to len: %d\n",
+					 __func__, mask_info->update_buf_len);
+			}
+		} else if (mask_info->status == DIAG_CTRL_MASK_ALL_ENABLED) {
+			mask_size = 1;
+		}
+proceed:
+		header.cmd_type = DIAG_CTRL_MSG_F3_MASK;
+		header.status = mask_info->status;
+		header.stream_id = 1;
+		header.msg_mode = 0;
+		header.ssid_first = mask->ssid_first;
+		header.ssid_last = mask->ssid_last_tools;
+		header.msg_mask_size = mask_size;
+		mask_size *= sizeof(uint32_t);
+		header.data_len = MSG_MASK_CTRL_HEADER_LEN + mask_size;
+		memcpy(buf, &header, header_len);
+		if (mask_size > 0)
+			memcpy(buf + header_len, mask->ptr, mask_size);
+		mutex_unlock(&mask->lock);
+		mutex_unlock(&driver->msg_mask_lock);
+
+		err = diagfwd_write(peripheral, TYPE_CNTL, buf,
+				    header_len + mask_size);
+		if (err && err != -ENODEV)
+			pr_err_ratelimited("diag: Unable to send msg masks to peripheral %d, error = %d\n",
+			       peripheral, err);
+
+		if (first != ALL_SSID)
+			break;
+	}
+err:
+	mutex_unlock(&mask_info->lock);
+}
+
+static void diag_send_time_sync_update(uint8_t peripheral)
+{
+	struct diag_ctrl_msg_time_sync time_sync_msg;
+	int msg_size = sizeof(struct diag_ctrl_msg_time_sync);
+	int err = 0;
+
+	if (peripheral >= NUM_PERIPHERALS) {
+		pr_err("diag: In %s, Invalid peripheral, %d\n",
+				__func__, peripheral);
+		return;
+	}
+
+	if (!driver->diagfwd_cntl[peripheral] ||
+		!driver->diagfwd_cntl[peripheral]->ch_open) {
+		pr_err("diag: In %s, control channel is not open, p: %d, %pK\n",
+			__func__, peripheral, driver->diagfwd_cntl[peripheral]);
+		return;
+	}
+
+	mutex_lock(&driver->diag_cntl_mutex);
+	time_sync_msg.ctrl_pkt_id = DIAG_CTRL_MSG_TIME_SYNC_PKT;
+	time_sync_msg.ctrl_pkt_data_len = 5;
+	time_sync_msg.version = 1;
+	time_sync_msg.time_api = driver->uses_time_api;
+
+	err = diagfwd_write(peripheral, TYPE_CNTL, &time_sync_msg, msg_size);
+	if (err)
+		pr_err("diag: In %s, unable to write to peripheral: %d, type: %d, len: %d, err: %d\n",
+				__func__, peripheral, TYPE_CNTL,
+				msg_size, err);
+	mutex_unlock(&driver->diag_cntl_mutex);
+}
+
+static void diag_send_feature_mask_update(uint8_t peripheral)
+{
+	void *buf = driver->buf_feature_mask_update;
+	int header_size = sizeof(struct diag_ctrl_feature_mask);
+	uint8_t feature_bytes[FEATURE_MASK_LEN] = {0, 0};
+	struct diag_ctrl_feature_mask feature_mask;
+	int total_len = 0;
+	int err = 0;
+
+	if (peripheral >= NUM_PERIPHERALS) {
+		pr_err("diag: In %s, Invalid peripheral, %d\n",
+			__func__, peripheral);
+		return;
+	}
+
+	if (!driver->diagfwd_cntl[peripheral] ||
+	    !driver->diagfwd_cntl[peripheral]->ch_open) {
+		pr_err("diag: In %s, control channel is not open, p: %d, %pK\n",
+		       __func__, peripheral, driver->diagfwd_cntl[peripheral]);
+		return;
+	}
+
+	mutex_lock(&driver->diag_cntl_mutex);
+	/* send feature mask update */
+	feature_mask.ctrl_pkt_id = DIAG_CTRL_MSG_FEATURE;
+	feature_mask.ctrl_pkt_data_len = sizeof(uint32_t) + FEATURE_MASK_LEN;
+	feature_mask.feature_mask_len = FEATURE_MASK_LEN;
+	memcpy(buf, &feature_mask, header_size);
+	DIAG_SET_FEATURE_MASK(F_DIAG_FEATURE_MASK_SUPPORT);
+	DIAG_SET_FEATURE_MASK(F_DIAG_LOG_ON_DEMAND_APPS);
+	DIAG_SET_FEATURE_MASK(F_DIAG_STM);
+	DIAG_SET_FEATURE_MASK(F_DIAG_DCI_EXTENDED_HEADER_SUPPORT);
+	if (driver->supports_separate_cmdrsp)
+		DIAG_SET_FEATURE_MASK(F_DIAG_REQ_RSP_SUPPORT);
+	if (driver->supports_apps_hdlc_encoding)
+		DIAG_SET_FEATURE_MASK(F_DIAG_APPS_HDLC_ENCODE);
+	if (driver->supports_apps_header_untagging) {
+		if (peripheral == PERIPHERAL_MODEM ||
+			peripheral == PERIPHERAL_LPASS ||
+			peripheral == PERIPHERAL_CDSP) {
+			DIAG_SET_FEATURE_MASK(F_DIAG_PKT_HEADER_UNTAG);
+			driver->peripheral_untag[peripheral] =
+				ENABLE_PKT_HEADER_UNTAGGING;
+		}
+	}
+	DIAG_SET_FEATURE_MASK(F_DIAG_MASK_CENTRALIZATION);
+	if (driver->supports_sockets)
+		DIAG_SET_FEATURE_MASK(F_DIAG_SOCKETS_ENABLED);
+
+	memcpy(buf + header_size, &feature_bytes, FEATURE_MASK_LEN);
+	total_len = header_size + FEATURE_MASK_LEN;
+
+	err = diagfwd_write(peripheral, TYPE_CNTL, buf, total_len);
+	if (err) {
+		pr_err_ratelimited("diag: In %s, unable to write feature mask to peripheral: %d, type: %d, len: %d, err: %d\n",
+		       __func__, peripheral, TYPE_CNTL,
+		       total_len, err);
+		mutex_unlock(&driver->diag_cntl_mutex);
+		return;
+	}
+	driver->feature[peripheral].sent_feature_mask = 1;
+	mutex_unlock(&driver->diag_cntl_mutex);
+}
+
+static int diag_cmd_get_ssid_range(unsigned char *src_buf, int src_len,
+			unsigned char *dest_buf, int dest_len, int pid)
+{
+	int i;
+	int write_len = 0;
+	uint8_t msg_mask_tbl_count = 0;
+	struct diag_msg_mask_t *mask_ptr = NULL;
+	struct diag_msg_ssid_query_t rsp;
+	struct diag_ssid_range_t ssid_range;
+	struct diag_mask_info *mask_info = NULL;
+	struct diag_md_session_t *info = NULL;
+
+	mutex_lock(&driver->md_session_lock);
+	info = diag_md_session_get_pid(pid);
+	mask_info = (!info) ? &msg_mask : info->msg_mask;
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+	    !mask_info) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len,
+		       mask_info);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	if (!mask_info->ptr) {
+		pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+			__func__, mask_info->ptr);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+
+	if (!diag_apps_responds()) {
+		mutex_unlock(&driver->md_session_lock);
+		return 0;
+	}
+	mutex_lock(&driver->msg_mask_lock);
+	msg_mask_tbl_count = (info) ? info->msg_mask_tbl_count :
+		driver->msg_mask_tbl_count;
+	rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
+	rsp.sub_cmd = DIAG_CMD_OP_GET_SSID_RANGE;
+	rsp.status = MSG_STATUS_SUCCESS;
+	rsp.padding = 0;
+	rsp.count = msg_mask_tbl_count;
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+	write_len += sizeof(rsp);
+	mask_ptr = (struct diag_msg_mask_t *)mask_info->ptr;
+	for (i = 0; i < msg_mask_tbl_count; i++, mask_ptr++) {
+		if (write_len + sizeof(ssid_range) > dest_len) {
+			pr_err("diag: In %s, Truncating response due to size limitations of rsp buffer\n",
+			       __func__);
+			break;
+		}
+		ssid_range.ssid_first = mask_ptr->ssid_first;
+		ssid_range.ssid_last = mask_ptr->ssid_last_tools;
+		memcpy(dest_buf + write_len, &ssid_range, sizeof(ssid_range));
+		write_len += sizeof(ssid_range);
+	}
+	mutex_unlock(&driver->msg_mask_lock);
+	mutex_unlock(&driver->md_session_lock);
+	return write_len;
+}
+
+static int diag_cmd_get_build_mask(unsigned char *src_buf, int src_len,
+			unsigned char *dest_buf, int dest_len, int pid)
+{
+	int i = 0;
+	int write_len = 0;
+	int num_entries = 0;
+	int copy_len = 0;
+	struct diag_msg_mask_t *build_mask = NULL;
+	struct diag_build_mask_req_t *req = NULL;
+	struct diag_msg_build_mask_t rsp;
+
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len);
+		return -EINVAL;
+	}
+
+	if (!diag_apps_responds())
+		return 0;
+	mutex_lock(&driver->msg_mask_lock);
+	req = (struct diag_build_mask_req_t *)src_buf;
+	rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
+	rsp.sub_cmd = DIAG_CMD_OP_GET_BUILD_MASK;
+	rsp.ssid_first = req->ssid_first;
+	rsp.ssid_last = req->ssid_last;
+	rsp.status = MSG_STATUS_FAIL;
+	rsp.padding = 0;
+	build_mask = (struct diag_msg_mask_t *)msg_bt_mask.ptr;
+	for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, build_mask++) {
+		if (!build_mask->ptr)
+			continue;
+		if (build_mask->ssid_first != req->ssid_first)
+			continue;
+		num_entries = req->ssid_last - req->ssid_first + 1;
+		if (num_entries > build_mask->range) {
+			pr_warn("diag: In %s, truncating ssid range for ssid_first: %d ssid_last %d\n",
+				__func__, req->ssid_first, req->ssid_last);
+			num_entries = build_mask->range;
+			req->ssid_last = req->ssid_first + build_mask->range;
+		}
+		copy_len = num_entries * sizeof(uint32_t);
+		if (copy_len + sizeof(rsp) > dest_len)
+			copy_len = dest_len - sizeof(rsp);
+		memcpy(dest_buf + sizeof(rsp), build_mask->ptr, copy_len);
+		write_len += copy_len;
+		rsp.ssid_last = build_mask->ssid_last;
+		rsp.status = MSG_STATUS_SUCCESS;
+		break;
+	}
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+	write_len += sizeof(rsp);
+	mutex_unlock(&driver->msg_mask_lock);
+	return write_len;
+}
+
+static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len,
+			unsigned char *dest_buf, int dest_len, int pid)
+{
+	int i;
+	int write_len = 0;
+	uint32_t mask_size = 0;
+	uint8_t msg_mask_tbl_count = 0;
+	struct diag_msg_mask_t *mask = NULL;
+	struct diag_build_mask_req_t *req = NULL;
+	struct diag_msg_build_mask_t rsp;
+	struct diag_mask_info *mask_info = NULL;
+	struct diag_md_session_t *info = NULL;
+
+	mutex_lock(&driver->md_session_lock);
+	info = diag_md_session_get_pid(pid);
+
+	mask_info = (!info) ? &msg_mask : info->msg_mask;
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+	    !mask_info) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len,
+		       mask_info);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	if (!mask_info->ptr) {
+		pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+			__func__, mask_info->ptr);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	if (!diag_apps_responds()) {
+		mutex_unlock(&driver->md_session_lock);
+		return 0;
+	}
+
+	mutex_lock(&driver->msg_mask_lock);
+	msg_mask_tbl_count = (info) ? info->msg_mask_tbl_count :
+			driver->msg_mask_tbl_count;
+	req = (struct diag_build_mask_req_t *)src_buf;
+	rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
+	rsp.sub_cmd = DIAG_CMD_OP_GET_MSG_MASK;
+	rsp.ssid_first = req->ssid_first;
+	rsp.ssid_last = req->ssid_last;
+	rsp.status = MSG_STATUS_FAIL;
+	rsp.padding = 0;
+	mask = (struct diag_msg_mask_t *)mask_info->ptr;
+	if (!mask->ptr) {
+		pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+			__func__, mask->ptr);
+		mutex_unlock(&driver->msg_mask_lock);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	for (i = 0; i < msg_mask_tbl_count; i++, mask++) {
+		if (!mask->ptr)
+			continue;
+		if ((req->ssid_first < mask->ssid_first) ||
+		    (req->ssid_first > mask->ssid_last_tools)) {
+			continue;
+		}
+		mask_size = mask->range * sizeof(uint32_t);
+		/* Copy msg mask only till the end of the rsp buffer */
+		if (mask_size + sizeof(rsp) > dest_len)
+			mask_size = dest_len - sizeof(rsp);
+		memcpy(dest_buf + sizeof(rsp), mask->ptr, mask_size);
+		write_len += mask_size;
+		rsp.status = MSG_STATUS_SUCCESS;
+		break;
+	}
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+	write_len += sizeof(rsp);
+	mutex_unlock(&driver->msg_mask_lock);
+	mutex_unlock(&driver->md_session_lock);
+	return write_len;
+}
+
+static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
+			unsigned char *dest_buf, int dest_len, int pid)
+{
+	int i;
+	int write_len = 0;
+	int header_len = sizeof(struct diag_msg_build_mask_t);
+	int found = 0;
+	uint32_t mask_size = 0;
+	uint32_t offset = 0;
+	struct diag_msg_mask_t *mask = NULL;
+	struct diag_msg_build_mask_t *req = NULL;
+	struct diag_msg_build_mask_t rsp;
+	struct diag_mask_info *mask_info = NULL;
+	struct diag_msg_mask_t *mask_next = NULL;
+	uint32_t *temp = NULL;
+	struct diag_md_session_t *info = NULL;
+	uint8_t msg_mask_tbl_count = 0;
+
+	mutex_lock(&driver->md_session_lock);
+	info = diag_md_session_get_pid(pid);
+
+	mask_info = (!info) ? &msg_mask : info->msg_mask;
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+	    !mask_info) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len,
+		       mask_info);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	if (!mask_info->ptr) {
+		pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+			__func__, mask_info->ptr);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+
+	req = (struct diag_msg_build_mask_t *)src_buf;
+	mutex_lock(&mask_info->lock);
+	mutex_lock(&driver->msg_mask_lock);
+	mask = (struct diag_msg_mask_t *)mask_info->ptr;
+	if (!mask->ptr) {
+		pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+			__func__, mask->ptr);
+		mutex_unlock(&driver->msg_mask_lock);
+		mutex_unlock(&mask_info->lock);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	msg_mask_tbl_count = (info) ? info->msg_mask_tbl_count :
+			driver->msg_mask_tbl_count;
+	for (i = 0; i < msg_mask_tbl_count; i++, mask++) {
+		if (!mask->ptr)
+			continue;
+		if (i < (msg_mask_tbl_count - 1)) {
+			mask_next = mask;
+			mask_next++;
+		} else
+			mask_next = NULL;
+
+		if ((req->ssid_first < mask->ssid_first) ||
+		    (req->ssid_first > mask->ssid_first + MAX_SSID_PER_RANGE) ||
+		    (mask_next && (req->ssid_first >= mask_next->ssid_first))) {
+			continue;
+		}
+		mask_next = NULL;
+		found = 1;
+		mutex_lock(&mask->lock);
+		mask_size = req->ssid_last - req->ssid_first + 1;
+		if (mask_size > MAX_SSID_PER_RANGE) {
+			pr_warn("diag: In %s, truncating ssid range, %d-%d to max allowed: %d\n",
+				__func__, mask->ssid_first, mask->ssid_last,
+				MAX_SSID_PER_RANGE);
+			mask_size = MAX_SSID_PER_RANGE;
+			mask->range_tools = MAX_SSID_PER_RANGE;
+			mask->ssid_last_tools =
+				mask->ssid_first + mask->range_tools;
+		}
+		if (req->ssid_last > mask->ssid_last_tools) {
+			pr_debug("diag: Msg SSID range mismatch\n");
+			if (mask_size != MAX_SSID_PER_RANGE)
+				mask->ssid_last_tools = req->ssid_last;
+			mask->range_tools =
+				mask->ssid_last_tools - mask->ssid_first + 1;
+			temp = krealloc(mask->ptr,
+					mask->range_tools * sizeof(uint32_t),
+					GFP_KERNEL);
+			if (!temp) {
+				pr_err_ratelimited("diag: In %s, unable to allocate memory for msg mask ptr, mask_size: %d\n",
+						   __func__, mask_size);
+				mutex_unlock(&mask->lock);
+				mutex_unlock(&driver->msg_mask_lock);
+				mutex_unlock(&mask_info->lock);
+				mutex_unlock(&driver->md_session_lock);
+				return -ENOMEM;
+			}
+			mask->ptr = temp;
+		}
+
+		offset = req->ssid_first - mask->ssid_first;
+		if (offset + mask_size > mask->range_tools) {
+			pr_err("diag: In %s, Not in msg mask range, mask_size: %d, offset: %d\n",
+			       __func__, mask_size, offset);
+			mutex_unlock(&mask->lock);
+			break;
+		}
+		mask_size = mask_size * sizeof(uint32_t);
+		memcpy(mask->ptr + offset, src_buf + header_len, mask_size);
+		mutex_unlock(&mask->lock);
+		mask_info->status = DIAG_CTRL_MASK_VALID;
+		break;
+	}
+	mutex_unlock(&driver->msg_mask_lock);
+	mutex_unlock(&mask_info->lock);
+	mutex_unlock(&driver->md_session_lock);
+	if (diag_check_update(APPS_DATA))
+		diag_update_userspace_clients(MSG_MASKS_TYPE);
+
+	/*
+	 * Apps processor must send the response to this command. Frame the
+	 * response.
+	 */
+	rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
+	rsp.sub_cmd = DIAG_CMD_OP_SET_MSG_MASK;
+	rsp.ssid_first = req->ssid_first;
+	rsp.ssid_last = req->ssid_last;
+	rsp.status = found;
+	rsp.padding = 0;
+	memcpy(dest_buf, &rsp, header_len);
+	write_len += header_len;
+	if (!found)
+		goto end;
+	if (mask_size + write_len > dest_len)
+		mask_size = dest_len - write_len;
+	memcpy(dest_buf + write_len, src_buf + header_len, mask_size);
+	write_len += mask_size;
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		if (!diag_check_update(i))
+			continue;
+		mutex_lock(&driver->md_session_lock);
+		diag_send_msg_mask_update(i, req->ssid_first, req->ssid_last);
+		mutex_unlock(&driver->md_session_lock);
+	}
+end:
+	return write_len;
+}
+
+static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len,
+			unsigned char *dest_buf, int dest_len, int pid)
+{
+	int i;
+	int write_len = 0;
+	int header_len = sizeof(struct diag_msg_config_rsp_t);
+	struct diag_msg_config_rsp_t rsp;
+	struct diag_msg_config_rsp_t *req = NULL;
+	struct diag_msg_mask_t *mask = NULL;
+	struct diag_mask_info *mask_info = NULL;
+	struct diag_md_session_t *info = NULL;
+	uint8_t msg_mask_tbl_count = 0;
+
+	mutex_lock(&driver->md_session_lock);
+	info = diag_md_session_get_pid(pid);
+
+	mask_info = (!info) ? &msg_mask : info->msg_mask;
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+	    !mask_info) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len,
+		       mask_info);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	if (!mask_info->ptr) {
+		pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+			__func__, mask_info->ptr);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+
+	req = (struct diag_msg_config_rsp_t *)src_buf;
+
+	mutex_lock(&mask_info->lock);
+	mutex_lock(&driver->msg_mask_lock);
+
+	mask = (struct diag_msg_mask_t *)mask_info->ptr;
+	if (!mask->ptr) {
+		pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+			__func__, mask->ptr);
+		mutex_unlock(&driver->msg_mask_lock);
+		mutex_unlock(&mask_info->lock);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	msg_mask_tbl_count = (info) ? info->msg_mask_tbl_count :
+			driver->msg_mask_tbl_count;
+	mask_info->status = (req->rt_mask) ? DIAG_CTRL_MASK_ALL_ENABLED :
+					   DIAG_CTRL_MASK_ALL_DISABLED;
+	for (i = 0; i < msg_mask_tbl_count; i++, mask++) {
+		if (mask && mask->ptr) {
+			mutex_lock(&mask->lock);
+			memset(mask->ptr, req->rt_mask,
+			       mask->range * sizeof(uint32_t));
+			mutex_unlock(&mask->lock);
+		}
+	}
+	mutex_unlock(&driver->msg_mask_lock);
+	mutex_unlock(&mask_info->lock);
+	mutex_unlock(&driver->md_session_lock);
+	if (diag_check_update(APPS_DATA))
+		diag_update_userspace_clients(MSG_MASKS_TYPE);
+
+	/*
+	 * Apps processor must send the response to this command. Frame the
+	 * response.
+	 */
+	rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
+	rsp.sub_cmd = DIAG_CMD_OP_SET_ALL_MSG_MASK;
+	rsp.status = MSG_STATUS_SUCCESS;
+	rsp.padding = 0;
+	rsp.rt_mask = req->rt_mask;
+	memcpy(dest_buf, &rsp, header_len);
+	write_len += header_len;
+
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		if (!diag_check_update(i))
+			continue;
+		mutex_lock(&driver->md_session_lock);
+		diag_send_msg_mask_update(i, ALL_SSID, ALL_SSID);
+		mutex_unlock(&driver->md_session_lock);
+	}
+
+	return write_len;
+}
+
+static int diag_cmd_get_event_mask(unsigned char *src_buf, int src_len,
+			unsigned char *dest_buf, int dest_len, int pid)
+{
+	int write_len = 0;
+	uint32_t mask_size;
+	struct diag_event_mask_config_t rsp;
+
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len);
+		return -EINVAL;
+	}
+
+	if (!diag_apps_responds())
+		return 0;
+
+	mask_size = EVENT_COUNT_TO_BYTES(driver->last_event_id);
+	if (mask_size + sizeof(rsp) > dest_len) {
+		pr_err("diag: In %s, invalid mask size: %d\n", __func__,
+		       mask_size);
+		return -ENOMEM;
+	}
+
+	rsp.cmd_code = DIAG_CMD_GET_EVENT_MASK;
+	rsp.status = EVENT_STATUS_SUCCESS;
+	rsp.padding = 0;
+	rsp.num_bits = driver->last_event_id + 1;
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+	write_len += sizeof(rsp);
+	memcpy(dest_buf + write_len, event_mask.ptr, mask_size);
+	write_len += mask_size;
+
+	return write_len;
+}
+
+static int diag_cmd_update_event_mask(unsigned char *src_buf, int src_len,
+			unsigned char *dest_buf, int dest_len, int pid)
+{
+	int i;
+	int write_len = 0;
+	int mask_len = 0;
+	int header_len = sizeof(struct diag_event_mask_config_t);
+	struct diag_event_mask_config_t rsp;
+	struct diag_event_mask_config_t *req;
+	struct diag_mask_info *mask_info = NULL;
+	struct diag_md_session_t *info = NULL;
+
+	mutex_lock(&driver->md_session_lock);
+	info = diag_md_session_get_pid(pid);
+	mask_info = (!info) ? &event_mask : info->event_mask;
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+	    !mask_info) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len,
+		       mask_info);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	if (!mask_info->ptr) {
+		pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+			__func__, mask_info->ptr);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	req = (struct diag_event_mask_config_t *)src_buf;
+	mask_len = EVENT_COUNT_TO_BYTES(req->num_bits);
+	if (mask_len <= 0 || mask_len > event_mask.mask_len) {
+		pr_err("diag: In %s, invalid event mask len: %d\n", __func__,
+		       mask_len);
+		mutex_unlock(&driver->md_session_lock);
+		return -EIO;
+	}
+
+	mutex_lock(&mask_info->lock);
+	memcpy(mask_info->ptr, src_buf + header_len, mask_len);
+	mask_info->status = DIAG_CTRL_MASK_VALID;
+	mutex_unlock(&mask_info->lock);
+	mutex_unlock(&driver->md_session_lock);
+	if (diag_check_update(APPS_DATA))
+		diag_update_userspace_clients(EVENT_MASKS_TYPE);
+
+	/*
+	 * Apps processor must send the response to this command. Frame the
+	 * response.
+	 */
+	rsp.cmd_code = DIAG_CMD_SET_EVENT_MASK;
+	rsp.status = EVENT_STATUS_SUCCESS;
+	rsp.padding = 0;
+	rsp.num_bits = driver->last_event_id + 1;
+	memcpy(dest_buf, &rsp, header_len);
+	write_len += header_len;
+	memcpy(dest_buf + write_len, mask_info->ptr, mask_len);
+	write_len += mask_len;
+
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		if (!diag_check_update(i))
+			continue;
+		mutex_lock(&driver->md_session_lock);
+		diag_send_event_mask_update(i);
+		mutex_unlock(&driver->md_session_lock);
+	}
+
+	return write_len;
+}
+
+static int diag_cmd_toggle_events(unsigned char *src_buf, int src_len,
+			unsigned char *dest_buf, int dest_len, int pid)
+{
+	int i;
+	int write_len = 0;
+	uint8_t toggle = 0;
+	struct diag_event_report_t header;
+	struct diag_mask_info *mask_info = NULL;
+	struct diag_md_session_t *info = NULL;
+
+	mutex_lock(&driver->md_session_lock);
+	info = diag_md_session_get_pid(pid);
+	mask_info = (!info) ? &event_mask : info->event_mask;
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+	    !mask_info) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len,
+		       mask_info);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	if (!mask_info->ptr) {
+		pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+			__func__, mask_info->ptr);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+
+	toggle = *(src_buf + 1);
+	mutex_lock(&mask_info->lock);
+	if (toggle) {
+		mask_info->status = DIAG_CTRL_MASK_ALL_ENABLED;
+		memset(mask_info->ptr, 0xFF, mask_info->mask_len);
+	} else {
+		mask_info->status = DIAG_CTRL_MASK_ALL_DISABLED;
+		memset(mask_info->ptr, 0, mask_info->mask_len);
+	}
+	mutex_unlock(&mask_info->lock);
+	mutex_unlock(&driver->md_session_lock);
+	if (diag_check_update(APPS_DATA))
+		diag_update_userspace_clients(EVENT_MASKS_TYPE);
+
+	/*
+	 * Apps processor must send the response to this command. Frame the
+	 * response.
+	 */
+	header.cmd_code = DIAG_CMD_EVENT_TOGGLE;
+	header.padding = 0;
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		if (!diag_check_update(i))
+			continue;
+		mutex_lock(&driver->md_session_lock);
+		diag_send_event_mask_update(i);
+		mutex_unlock(&driver->md_session_lock);
+	}
+	memcpy(dest_buf, &header, sizeof(header));
+	write_len += sizeof(header);
+
+	return write_len;
+}
+
+static int diag_cmd_get_log_mask(unsigned char *src_buf, int src_len,
+			unsigned char *dest_buf, int dest_len, int pid)
+{
+	int i;
+	int status = LOG_STATUS_INVALID;
+	int write_len = 0;
+	int read_len = 0;
+	int req_header_len = sizeof(struct diag_log_config_req_t);
+	int rsp_header_len = sizeof(struct diag_log_config_rsp_t);
+	uint32_t mask_size = 0;
+	struct diag_log_mask_t *log_item = NULL;
+	struct diag_log_config_req_t *req;
+	struct diag_log_config_rsp_t rsp;
+	struct diag_mask_info *mask_info = NULL;
+	struct diag_md_session_t *info = NULL;
+
+	mutex_lock(&driver->md_session_lock);
+	info = diag_md_session_get_pid(pid);
+
+	mask_info = (!info) ? &log_mask : info->log_mask;
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+	    !mask_info) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len,
+		       mask_info);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	if (!mask_info->ptr) {
+		pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+			__func__, mask_info->ptr);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+
+	if (!diag_apps_responds()) {
+		mutex_unlock(&driver->md_session_lock);
+		return 0;
+	}
+
+	req = (struct diag_log_config_req_t *)src_buf;
+	read_len += req_header_len;
+
+	rsp.cmd_code = DIAG_CMD_LOG_CONFIG;
+	rsp.padding[0] = 0;
+	rsp.padding[1] = 0;
+	rsp.padding[2] = 0;
+	rsp.sub_cmd = DIAG_CMD_OP_GET_LOG_MASK;
+	/*
+	 * Don't copy the response header now. Copy at the end after
+	 * calculating the status field value
+	 */
+	write_len += rsp_header_len;
+
+	log_item = (struct diag_log_mask_t *)mask_info->ptr;
+	if (!log_item->ptr) {
+		pr_err("diag: Invalid input in %s, mask: %pK\n",
+			__func__, log_item);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	for (i = 0; i < MAX_EQUIP_ID; i++, log_item++) {
+		if (log_item->equip_id != req->equip_id)
+			continue;
+		mutex_lock(&log_item->lock);
+		mask_size = LOG_ITEMS_TO_SIZE(log_item->num_items_tools);
+		/*
+		 * Make sure we have space to fill the response in the buffer.
+		 * Destination buffer should atleast be able to hold equip_id
+		 * (uint32_t), num_items(uint32_t), mask (mask_size) and the
+		 * response header.
+		 */
+		if ((mask_size + (2 * sizeof(uint32_t)) + rsp_header_len) >
+								dest_len) {
+			pr_err("diag: In %s, invalid length: %d, max rsp_len: %d\n",
+				__func__, mask_size, dest_len);
+			status = LOG_STATUS_FAIL;
+			mutex_unlock(&log_item->lock);
+			break;
+		}
+		*(uint32_t *)(dest_buf + write_len) = log_item->equip_id;
+		write_len += sizeof(uint32_t);
+		*(uint32_t *)(dest_buf + write_len) = log_item->num_items_tools;
+		write_len += sizeof(uint32_t);
+		if (mask_size > 0) {
+			memcpy(dest_buf + write_len, log_item->ptr, mask_size);
+			write_len += mask_size;
+		}
+		DIAG_LOG(DIAG_DEBUG_MASKS,
+			 "sending log e %d num_items %d size %d\n",
+			 log_item->equip_id, log_item->num_items_tools,
+			 log_item->range_tools);
+		mutex_unlock(&log_item->lock);
+		status = LOG_STATUS_SUCCESS;
+		break;
+	}
+
+	rsp.status = status;
+	memcpy(dest_buf, &rsp, rsp_header_len);
+
+	mutex_unlock(&driver->md_session_lock);
+	return write_len;
+}
+
+static int diag_cmd_get_log_range(unsigned char *src_buf, int src_len,
+			unsigned char *dest_buf, int dest_len, int pid)
+{
+	int i;
+	int write_len = 0;
+	struct diag_log_config_rsp_t rsp;
+	struct diag_log_mask_t *mask = (struct diag_log_mask_t *)log_mask.ptr;
+
+	if (!mask)
+		return -EINVAL;
+
+	if (!diag_apps_responds())
+		return 0;
+
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len);
+		return -EINVAL;
+	}
+
+	rsp.cmd_code = DIAG_CMD_LOG_CONFIG;
+	rsp.padding[0] = 0;
+	rsp.padding[1] = 0;
+	rsp.padding[2] = 0;
+	rsp.sub_cmd = DIAG_CMD_OP_GET_LOG_RANGE;
+	rsp.status = LOG_STATUS_SUCCESS;
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+	write_len += sizeof(rsp);
+
+	for (i = 0; i < MAX_EQUIP_ID && write_len < dest_len; i++, mask++) {
+		*(uint32_t *)(dest_buf + write_len) = mask->num_items_tools;
+		write_len += sizeof(uint32_t);
+	}
+
+	return write_len;
+}
+
+static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len,
+				 unsigned char *dest_buf, int dest_len,
+				 int pid)
+{
+	int i;
+	int write_len = 0;
+	int status = LOG_STATUS_SUCCESS;
+	int read_len = 0;
+	int payload_len = 0;
+	int req_header_len = sizeof(struct diag_log_config_req_t);
+	int rsp_header_len = sizeof(struct diag_log_config_set_rsp_t);
+	uint32_t mask_size = 0;
+	struct diag_log_config_req_t *req;
+	struct diag_log_config_set_rsp_t rsp;
+	struct diag_log_mask_t *mask = NULL;
+	unsigned char *temp_buf = NULL;
+	struct diag_mask_info *mask_info = NULL;
+	struct diag_md_session_t *info = NULL;
+
+	mutex_lock(&driver->md_session_lock);
+	info = diag_md_session_get_pid(pid);
+
+	mask_info = (!info) ? &log_mask : info->log_mask;
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+	    !mask_info) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len,
+		       mask_info);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	if (!mask_info->ptr) {
+		pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+			__func__, mask_info->ptr);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+
+	req = (struct diag_log_config_req_t *)src_buf;
+	read_len += req_header_len;
+	mask = (struct diag_log_mask_t *)mask_info->ptr;
+	if (!mask->ptr) {
+		pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+			__func__, mask->ptr);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	if (req->equip_id >= MAX_EQUIP_ID) {
+		pr_err("diag: In %s, Invalid logging mask request, equip_id: %d\n",
+		       __func__, req->equip_id);
+		status = LOG_STATUS_INVALID;
+	}
+
+	if (req->num_items == 0) {
+		pr_err("diag: In %s, Invalid number of items in log mask request, equip_id: %d\n",
+		       __func__, req->equip_id);
+		status = LOG_STATUS_INVALID;
+	}
+
+	mutex_lock(&mask_info->lock);
+	for (i = 0; i < MAX_EQUIP_ID && !status; i++, mask++) {
+		if (!mask || !mask->ptr)
+			continue;
+		if (mask->equip_id != req->equip_id)
+			continue;
+		mutex_lock(&mask->lock);
+
+		DIAG_LOG(DIAG_DEBUG_MASKS, "e: %d current: %d %d new: %d %d",
+			 mask->equip_id, mask->num_items_tools,
+			 mask->range_tools, req->num_items,
+			 LOG_ITEMS_TO_SIZE(req->num_items));
+		/*
+		 * If the size of the log mask cannot fit into our
+		 * buffer, trim till we have space left in the buffer.
+		 * num_items should then reflect the items that we have
+		 * in our buffer.
+		 */
+		mask->num_items_tools = (req->num_items > MAX_ITEMS_ALLOWED) ?
+					MAX_ITEMS_ALLOWED : req->num_items;
+		mask_size = LOG_ITEMS_TO_SIZE(mask->num_items_tools);
+		memset(mask->ptr, 0, mask->range_tools);
+		if (mask_size > mask->range_tools) {
+			DIAG_LOG(DIAG_DEBUG_MASKS,
+				 "log range mismatch, e: %d old: %d new: %d\n",
+				 req->equip_id, mask->range_tools,
+				 LOG_ITEMS_TO_SIZE(mask->num_items_tools));
+			/* Change in the mask reported by tools */
+			temp_buf = krealloc(mask->ptr, mask_size, GFP_KERNEL);
+			if (!temp_buf) {
+				mask_info->status = DIAG_CTRL_MASK_INVALID;
+				mutex_unlock(&mask->lock);
+				break;
+			}
+			mask->ptr = temp_buf;
+			memset(mask->ptr, 0, mask_size);
+			mask->range_tools = mask_size;
+		}
+		req->num_items = mask->num_items_tools;
+		if (mask_size > 0)
+			memcpy(mask->ptr, src_buf + read_len, mask_size);
+		DIAG_LOG(DIAG_DEBUG_MASKS,
+			 "copying log mask, e %d num %d range %d size %d\n",
+			 req->equip_id, mask->num_items_tools,
+			 mask->range_tools, mask_size);
+		mutex_unlock(&mask->lock);
+		mask_info->status = DIAG_CTRL_MASK_VALID;
+		break;
+	}
+	mutex_unlock(&mask_info->lock);
+	mutex_unlock(&driver->md_session_lock);
+	if (diag_check_update(APPS_DATA))
+		diag_update_userspace_clients(LOG_MASKS_TYPE);
+
+	/*
+	 * Apps processor must send the response to this command. Frame the
+	 * response.
+	 */
+	payload_len = LOG_ITEMS_TO_SIZE(req->num_items);
+	if ((payload_len + rsp_header_len > dest_len) || (payload_len == 0)) {
+		pr_err("diag: In %s, invalid length, payload_len: %d, header_len: %d, dest_len: %d\n",
+		       __func__, payload_len, rsp_header_len , dest_len);
+		status = LOG_STATUS_FAIL;
+	}
+	rsp.cmd_code = DIAG_CMD_LOG_CONFIG;
+	rsp.padding[0] = 0;
+	rsp.padding[1] = 0;
+	rsp.padding[2] = 0;
+	rsp.sub_cmd = DIAG_CMD_OP_SET_LOG_MASK;
+	rsp.status = status;
+	rsp.equip_id = req->equip_id;
+	rsp.num_items = req->num_items;
+	memcpy(dest_buf, &rsp, rsp_header_len);
+	write_len += rsp_header_len;
+	if (status != LOG_STATUS_SUCCESS)
+		goto end;
+	memcpy(dest_buf + write_len, src_buf + read_len, payload_len);
+	write_len += payload_len;
+
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		if (!diag_check_update(i))
+			continue;
+		mutex_lock(&driver->md_session_lock);
+		diag_send_log_mask_update(i, req->equip_id);
+		mutex_unlock(&driver->md_session_lock);
+	}
+end:
+	return write_len;
+}
+
+static int diag_cmd_disable_log_mask(unsigned char *src_buf, int src_len,
+			unsigned char *dest_buf, int dest_len, int pid)
+{
+	struct diag_mask_info *mask_info = NULL;
+	struct diag_log_mask_t *mask = NULL;
+	struct diag_log_config_rsp_t header;
+	int write_len = 0, i;
+	struct diag_md_session_t *info = NULL;
+
+	mutex_lock(&driver->md_session_lock);
+	info = diag_md_session_get_pid(pid);
+
+	mask_info = (!info) ? &log_mask : info->log_mask;
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+	    !mask_info) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len,
+		       mask_info);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	if (!mask_info->ptr) {
+		pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+			__func__, mask_info->ptr);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	mask = (struct diag_log_mask_t *)mask_info->ptr;
+	if (!mask->ptr) {
+		pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+			__func__, mask->ptr);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
+		if (mask && mask->ptr) {
+			mutex_lock(&mask->lock);
+			memset(mask->ptr, 0, mask->range);
+			mutex_unlock(&mask->lock);
+		}
+	}
+	mask_info->status = DIAG_CTRL_MASK_ALL_DISABLED;
+	mutex_unlock(&driver->md_session_lock);
+	if (diag_check_update(APPS_DATA))
+		diag_update_userspace_clients(LOG_MASKS_TYPE);
+
+	/*
+	 * Apps processor must send the response to this command. Frame the
+	 * response.
+	 */
+	header.cmd_code = DIAG_CMD_LOG_CONFIG;
+	header.padding[0] = 0;
+	header.padding[1] = 0;
+	header.padding[2] = 0;
+	header.sub_cmd = DIAG_CMD_OP_LOG_DISABLE;
+	header.status = LOG_STATUS_SUCCESS;
+	memcpy(dest_buf, &header, sizeof(struct diag_log_config_rsp_t));
+	write_len += sizeof(struct diag_log_config_rsp_t);
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		if (!diag_check_update(i))
+			continue;
+		mutex_lock(&driver->md_session_lock);
+		diag_send_log_mask_update(i, ALL_EQUIP_ID);
+		mutex_unlock(&driver->md_session_lock);
+	}
+
+	return write_len;
+}
+
+int diag_create_msg_mask_table_entry(struct diag_msg_mask_t *msg_mask,
+				     struct diag_ssid_range_t *range)
+{
+	if (!msg_mask || !range)
+		return -EIO;
+	if (range->ssid_last < range->ssid_first)
+		return -EINVAL;
+	msg_mask->ssid_first = range->ssid_first;
+	msg_mask->ssid_last = range->ssid_last;
+	msg_mask->ssid_last_tools = range->ssid_last;
+	msg_mask->range = msg_mask->ssid_last - msg_mask->ssid_first + 1;
+	if (msg_mask->range < MAX_SSID_PER_RANGE)
+		msg_mask->range = MAX_SSID_PER_RANGE;
+	msg_mask->range_tools = msg_mask->range;
+	mutex_init(&msg_mask->lock);
+	if (msg_mask->range > 0) {
+		msg_mask->ptr = kzalloc(msg_mask->range * sizeof(uint32_t),
+					GFP_KERNEL);
+		if (!msg_mask->ptr)
+			return -ENOMEM;
+		kmemleak_not_leak(msg_mask->ptr);
+	}
+	return 0;
+}
+
+static int diag_create_msg_mask_table(void)
+{
+	int i;
+	int err = 0;
+	struct diag_msg_mask_t *mask = (struct diag_msg_mask_t *)msg_mask.ptr;
+	struct diag_ssid_range_t range;
+
+	mutex_lock(&msg_mask.lock);
+	mutex_lock(&driver->msg_mask_lock);
+	driver->msg_mask_tbl_count = MSG_MASK_TBL_CNT;
+	for (i = 0; (i < driver->msg_mask_tbl_count) && mask;
+			i++, mask++) {
+		range.ssid_first = msg_mask_tbl[i].ssid_first;
+		range.ssid_last = msg_mask_tbl[i].ssid_last;
+		err = diag_create_msg_mask_table_entry(mask, &range);
+		if (err)
+			break;
+	}
+	mutex_unlock(&driver->msg_mask_lock);
+	mutex_unlock(&msg_mask.lock);
+	return err;
+}
+
+static int diag_create_build_time_mask(void)
+{
+	int i;
+	int err = 0;
+	const uint32_t *tbl = NULL;
+	uint32_t tbl_size = 0;
+	struct diag_msg_mask_t *build_mask = NULL;
+	struct diag_ssid_range_t range;
+
+	mutex_lock(&msg_bt_mask.lock);
+	mutex_lock(&driver->msg_mask_lock);
+	driver->bt_msg_mask_tbl_count = MSG_MASK_TBL_CNT;
+	build_mask = (struct diag_msg_mask_t *)msg_bt_mask.ptr;
+	for (i = 0; (i < driver->bt_msg_mask_tbl_count) && build_mask;
+			i++, build_mask++) {
+		range.ssid_first = msg_mask_tbl[i].ssid_first;
+		range.ssid_last = msg_mask_tbl[i].ssid_last;
+		err = diag_create_msg_mask_table_entry(build_mask, &range);
+		if (err)
+			break;
+		switch (build_mask->ssid_first) {
+		case MSG_SSID_0:
+			tbl = msg_bld_masks_0;
+			tbl_size = sizeof(msg_bld_masks_0);
+			break;
+		case MSG_SSID_1:
+			tbl = msg_bld_masks_1;
+			tbl_size = sizeof(msg_bld_masks_1);
+			break;
+		case MSG_SSID_2:
+			tbl = msg_bld_masks_2;
+			tbl_size = sizeof(msg_bld_masks_2);
+			break;
+		case MSG_SSID_3:
+			tbl = msg_bld_masks_3;
+			tbl_size = sizeof(msg_bld_masks_3);
+			break;
+		case MSG_SSID_4:
+			tbl = msg_bld_masks_4;
+			tbl_size = sizeof(msg_bld_masks_4);
+			break;
+		case MSG_SSID_5:
+			tbl = msg_bld_masks_5;
+			tbl_size = sizeof(msg_bld_masks_5);
+			break;
+		case MSG_SSID_6:
+			tbl = msg_bld_masks_6;
+			tbl_size = sizeof(msg_bld_masks_6);
+			break;
+		case MSG_SSID_7:
+			tbl = msg_bld_masks_7;
+			tbl_size = sizeof(msg_bld_masks_7);
+			break;
+		case MSG_SSID_8:
+			tbl = msg_bld_masks_8;
+			tbl_size = sizeof(msg_bld_masks_8);
+			break;
+		case MSG_SSID_9:
+			tbl = msg_bld_masks_9;
+			tbl_size = sizeof(msg_bld_masks_9);
+			break;
+		case MSG_SSID_10:
+			tbl = msg_bld_masks_10;
+			tbl_size = sizeof(msg_bld_masks_10);
+			break;
+		case MSG_SSID_11:
+			tbl = msg_bld_masks_11;
+			tbl_size = sizeof(msg_bld_masks_11);
+			break;
+		case MSG_SSID_12:
+			tbl = msg_bld_masks_12;
+			tbl_size = sizeof(msg_bld_masks_12);
+			break;
+		case MSG_SSID_13:
+			tbl = msg_bld_masks_13;
+			tbl_size = sizeof(msg_bld_masks_13);
+			break;
+		case MSG_SSID_14:
+			tbl = msg_bld_masks_14;
+			tbl_size = sizeof(msg_bld_masks_14);
+			break;
+		case MSG_SSID_15:
+			tbl = msg_bld_masks_15;
+			tbl_size = sizeof(msg_bld_masks_15);
+			break;
+		case MSG_SSID_16:
+			tbl = msg_bld_masks_16;
+			tbl_size = sizeof(msg_bld_masks_16);
+			break;
+		case MSG_SSID_17:
+			tbl = msg_bld_masks_17;
+			tbl_size = sizeof(msg_bld_masks_17);
+			break;
+		case MSG_SSID_18:
+			tbl = msg_bld_masks_18;
+			tbl_size = sizeof(msg_bld_masks_18);
+			break;
+		case MSG_SSID_19:
+			tbl = msg_bld_masks_19;
+			tbl_size = sizeof(msg_bld_masks_19);
+			break;
+		case MSG_SSID_20:
+			tbl = msg_bld_masks_20;
+			tbl_size = sizeof(msg_bld_masks_20);
+			break;
+		case MSG_SSID_21:
+			tbl = msg_bld_masks_21;
+			tbl_size = sizeof(msg_bld_masks_21);
+			break;
+		case MSG_SSID_22:
+			tbl = msg_bld_masks_22;
+			tbl_size = sizeof(msg_bld_masks_22);
+			break;
+		}
+		if (!tbl)
+			continue;
+		if (tbl_size > build_mask->range * sizeof(uint32_t)) {
+			pr_warn("diag: In %s, table %d has more ssid than max, ssid_first: %d, ssid_last: %d\n",
+				__func__, i, build_mask->ssid_first,
+				build_mask->ssid_last);
+			tbl_size = build_mask->range * sizeof(uint32_t);
+		}
+		memcpy(build_mask->ptr, tbl, tbl_size);
+	}
+	mutex_unlock(&driver->msg_mask_lock);
+	mutex_unlock(&msg_bt_mask.lock);
+	return err;
+}
+
+static int diag_create_log_mask_table(void)
+{
+	struct diag_log_mask_t *mask = NULL;
+	uint8_t i;
+	int err = 0;
+
+	mutex_lock(&log_mask.lock);
+	mask = (struct diag_log_mask_t *)(log_mask.ptr);
+	for (i = 0; (i < MAX_EQUIP_ID) && mask; i++, mask++) {
+		mask->equip_id = i;
+		mask->num_items = LOG_GET_ITEM_NUM(log_code_last_tbl[i]);
+		mask->num_items_tools = mask->num_items;
+		mutex_init(&mask->lock);
+		if (LOG_ITEMS_TO_SIZE(mask->num_items) > MAX_ITEMS_PER_EQUIP_ID)
+			mask->range = LOG_ITEMS_TO_SIZE(mask->num_items);
+		else
+			mask->range = MAX_ITEMS_PER_EQUIP_ID;
+		mask->range_tools = mask->range;
+		mask->ptr = kzalloc(mask->range, GFP_KERNEL);
+		if (!mask->ptr) {
+			err = -ENOMEM;
+			break;
+		}
+		kmemleak_not_leak(mask->ptr);
+	}
+	mutex_unlock(&log_mask.lock);
+	return err;
+}
+
+static int __diag_mask_init(struct diag_mask_info *mask_info, int mask_len,
+			    int update_buf_len)
+{
+	if (!mask_info || mask_len < 0 || update_buf_len < 0)
+		return -EINVAL;
+
+	mask_info->status = DIAG_CTRL_MASK_INVALID;
+	mask_info->mask_len = mask_len;
+	mask_info->update_buf_len = update_buf_len;
+	if (mask_len > 0) {
+		mask_info->ptr = kzalloc(mask_len, GFP_KERNEL);
+		if (!mask_info->ptr)
+			return -ENOMEM;
+		kmemleak_not_leak(mask_info->ptr);
+	}
+	if (update_buf_len > 0) {
+		mask_info->update_buf = kzalloc(update_buf_len, GFP_KERNEL);
+		if (!mask_info->update_buf) {
+			kfree(mask_info->ptr);
+			return -ENOMEM;
+		}
+		kmemleak_not_leak(mask_info->update_buf);
+	}
+	return 0;
+}
+
+static void __diag_mask_exit(struct diag_mask_info *mask_info)
+{
+	if (!mask_info || !mask_info->ptr)
+		return;
+
+	mutex_lock(&mask_info->lock);
+	kfree(mask_info->ptr);
+	mask_info->ptr = NULL;
+	kfree(mask_info->update_buf);
+	mask_info->update_buf = NULL;
+	mutex_unlock(&mask_info->lock);
+}
+
+int diag_log_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src)
+{
+	int i;
+	int err = 0;
+	struct diag_log_mask_t *src_mask = NULL;
+	struct diag_log_mask_t *dest_mask = NULL;
+
+	if (!src || !dest)
+		return -EINVAL;
+
+	mutex_init(&dest->lock);
+	err = __diag_mask_init(dest, LOG_MASK_SIZE, APPS_BUF_SIZE);
+	if (err)
+		return err;
+
+	mutex_lock(&dest->lock);
+	src_mask = (struct diag_log_mask_t *)(src->ptr);
+	dest_mask = (struct diag_log_mask_t *)(dest->ptr);
+
+	dest->mask_len = src->mask_len;
+	dest->status = src->status;
+
+	for (i = 0; i < MAX_EQUIP_ID; i++, src_mask++, dest_mask++) {
+		dest_mask->equip_id = src_mask->equip_id;
+		dest_mask->num_items = src_mask->num_items;
+		dest_mask->num_items_tools = src_mask->num_items_tools;
+		mutex_init(&dest_mask->lock);
+		dest_mask->range = src_mask->range;
+		dest_mask->range_tools = src_mask->range_tools;
+		dest_mask->ptr = kzalloc(dest_mask->range_tools, GFP_KERNEL);
+		if (!dest_mask->ptr) {
+			err = -ENOMEM;
+			break;
+		}
+		kmemleak_not_leak(dest_mask->ptr);
+		memcpy(dest_mask->ptr, src_mask->ptr, dest_mask->range_tools);
+	}
+	mutex_unlock(&dest->lock);
+
+	return err;
+}
+
+void diag_log_mask_free(struct diag_mask_info *mask_info)
+{
+	int i;
+	struct diag_log_mask_t *mask = NULL;
+
+	if (!mask_info || !mask_info->ptr)
+		return;
+
+	mutex_lock(&mask_info->lock);
+	mask = (struct diag_log_mask_t *)mask_info->ptr;
+	if (!mask->ptr) {
+		pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+			__func__, mask->ptr);
+		mutex_unlock(&mask_info->lock);
+		return;
+	}
+	for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
+		kfree(mask->ptr);
+		mask->ptr = NULL;
+	}
+	mutex_unlock(&mask_info->lock);
+
+	__diag_mask_exit(mask_info);
+
+}
+
+static int diag_msg_mask_init(void)
+{
+	int err = 0;
+	int i;
+
+	mutex_init(&msg_mask.lock);
+	err = __diag_mask_init(&msg_mask, MSG_MASK_SIZE, APPS_BUF_SIZE);
+	if (err)
+		return err;
+
+	err = diag_create_msg_mask_table();
+	if (err) {
+		pr_err("diag: Unable to create msg masks, err: %d\n", err);
+		return err;
+	}
+	mutex_lock(&driver->msg_mask_lock);
+	driver->msg_mask = &msg_mask;
+	for (i = 0; i < NUM_PERIPHERALS; i++)
+		driver->max_ssid_count[i] = 0;
+	mutex_unlock(&driver->msg_mask_lock);
+
+	return 0;
+}
+
+int diag_msg_mask_copy(struct diag_md_session_t *new_session,
+	struct diag_mask_info *dest, struct diag_mask_info *src)
+{
+	int i;
+	int err = 0;
+	struct diag_msg_mask_t *src_mask = NULL;
+	struct diag_msg_mask_t *dest_mask = NULL;
+	struct diag_ssid_range_t range;
+
+	if (!src || !dest)
+		return -EINVAL;
+
+	mutex_init(&dest->lock);
+	mutex_lock(&dest->lock);
+	mutex_lock(&driver->msg_mask_lock);
+	new_session->msg_mask_tbl_count =
+		driver->msg_mask_tbl_count;
+	err = __diag_mask_init(dest,
+		(new_session->msg_mask_tbl_count *
+		sizeof(struct diag_msg_mask_t)), APPS_BUF_SIZE);
+	if (err) {
+		mutex_unlock(&driver->msg_mask_lock);
+		mutex_unlock(&dest->lock);
+		return err;
+	}
+	src_mask = (struct diag_msg_mask_t *)src->ptr;
+	dest_mask = (struct diag_msg_mask_t *)dest->ptr;
+
+	dest->mask_len = src->mask_len;
+	dest->status = src->status;
+	for (i = 0; i < new_session->msg_mask_tbl_count; i++) {
+		range.ssid_first = src_mask->ssid_first;
+		range.ssid_last = src_mask->ssid_last;
+		err = diag_create_msg_mask_table_entry(dest_mask, &range);
+		if (err)
+			break;
+		memcpy(dest_mask->ptr, src_mask->ptr,
+		       dest_mask->range * sizeof(uint32_t));
+		src_mask++;
+		dest_mask++;
+	}
+	mutex_unlock(&driver->msg_mask_lock);
+	mutex_unlock(&dest->lock);
+	return err;
+}
+
+void diag_msg_mask_free(struct diag_mask_info *mask_info,
+	struct diag_md_session_t *session_info)
+{
+	int i;
+	struct diag_msg_mask_t *mask = NULL;
+	uint8_t msg_mask_tbl_count = 0;
+
+	if (!mask_info || !mask_info->ptr)
+		return;
+	mutex_lock(&mask_info->lock);
+	mutex_lock(&driver->msg_mask_lock);
+	mask = (struct diag_msg_mask_t *)mask_info->ptr;
+	if (!mask->ptr) {
+		pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+			__func__, mask->ptr);
+		mutex_unlock(&driver->msg_mask_lock);
+		mutex_unlock(&mask_info->lock);
+		return;
+	}
+	msg_mask_tbl_count = (session_info) ?
+		session_info->msg_mask_tbl_count :
+		driver->msg_mask_tbl_count;
+	for (i = 0; i < msg_mask_tbl_count; i++, mask++) {
+		kfree(mask->ptr);
+		mask->ptr = NULL;
+	}
+	mutex_unlock(&driver->msg_mask_lock);
+	mutex_unlock(&mask_info->lock);
+	__diag_mask_exit(mask_info);
+}
+
+static void diag_msg_mask_exit(void)
+{
+	int i;
+	struct diag_msg_mask_t *mask = NULL;
+	mutex_lock(&driver->msg_mask_lock);
+	mask = (struct diag_msg_mask_t *)(msg_mask.ptr);
+	if (mask) {
+		for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++)
+			kfree(mask->ptr);
+		kfree(msg_mask.ptr);
+		msg_mask.ptr = NULL;
+	}
+	kfree(msg_mask.update_buf);
+	msg_mask.update_buf = NULL;
+	mutex_unlock(&driver->msg_mask_lock);
+}
+
+static int diag_build_time_mask_init(void)
+{
+	int err = 0;
+
+	/* There is no need for update buffer for Build Time masks */
+	mutex_init(&msg_bt_mask.lock);
+	err = __diag_mask_init(&msg_bt_mask, MSG_MASK_SIZE, 0);
+	if (err)
+		return err;
+	err = diag_create_build_time_mask();
+	if (err) {
+		pr_err("diag: Unable to create msg build time masks, err: %d\n",
+		       err);
+		return err;
+	}
+	driver->build_time_mask = &msg_bt_mask;
+	return 0;
+}
+
+static void diag_build_time_mask_exit(void)
+{
+	int i;
+	struct diag_msg_mask_t *mask = NULL;
+	mutex_lock(&driver->msg_mask_lock);
+	mask = (struct diag_msg_mask_t *)(msg_bt_mask.ptr);
+	if (mask) {
+		for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, mask++)
+			kfree(mask->ptr);
+		kfree(msg_bt_mask.ptr);
+		msg_bt_mask.ptr = NULL;
+	}
+	mutex_unlock(&driver->msg_mask_lock);
+}
+
+static int diag_log_mask_init(void)
+{
+	int err = 0;
+	int i;
+
+	mutex_init(&log_mask.lock);
+	err = __diag_mask_init(&log_mask, LOG_MASK_SIZE, APPS_BUF_SIZE);
+	if (err)
+		return err;
+	err = diag_create_log_mask_table();
+	if (err)
+		return err;
+	driver->log_mask = &log_mask;
+
+	for (i = 0; i < NUM_PERIPHERALS; i++)
+		driver->num_equip_id[i] = 0;
+
+	return 0;
+}
+
+static void diag_log_mask_exit(void)
+{
+	int i;
+	struct diag_log_mask_t *mask = NULL;
+
+	mask = (struct diag_log_mask_t *)(log_mask.ptr);
+	if (mask) {
+		for (i = 0; i < MAX_EQUIP_ID; i++, mask++)
+			kfree(mask->ptr);
+		kfree(log_mask.ptr);
+	}
+
+	kfree(log_mask.update_buf);
+}
+
+static int diag_event_mask_init(void)
+{
+	int err = 0;
+	int i;
+
+	mutex_init(&event_mask.lock);
+	err = __diag_mask_init(&event_mask, EVENT_MASK_SIZE, APPS_BUF_SIZE);
+	if (err)
+		return err;
+	driver->event_mask_size = EVENT_MASK_SIZE;
+	driver->last_event_id = APPS_EVENT_LAST_ID;
+	driver->event_mask = &event_mask;
+
+	for (i = 0; i < NUM_PERIPHERALS; i++)
+		driver->num_event_id[i] = 0;
+
+	return 0;
+}
+
+int diag_event_mask_copy(struct diag_mask_info *dest,
+			 struct diag_mask_info *src)
+{
+	int err = 0;
+
+	if (!src || !dest)
+		return -EINVAL;
+
+	mutex_init(&dest->lock);
+	err = __diag_mask_init(dest, EVENT_MASK_SIZE, APPS_BUF_SIZE);
+	if (err)
+		return err;
+
+	mutex_lock(&dest->lock);
+	dest->mask_len = src->mask_len;
+	dest->status = src->status;
+	memcpy(dest->ptr, src->ptr, dest->mask_len);
+	mutex_unlock(&dest->lock);
+
+	return err;
+}
+
+void diag_event_mask_free(struct diag_mask_info *mask_info)
+{
+	if (!mask_info)
+		return;
+
+	__diag_mask_exit(mask_info);
+}
+
+static void diag_event_mask_exit(void)
+{
+	kfree(event_mask.ptr);
+	kfree(event_mask.update_buf);
+}
+
+int diag_copy_to_user_msg_mask(char __user *buf, size_t count,
+			       struct diag_md_session_t *info)
+{
+	int i;
+	int err = 0;
+	int len = 0;
+	int copy_len = 0;
+	int total_len = 0;
+	struct diag_msg_mask_userspace_t header;
+	struct diag_mask_info *mask_info = NULL;
+	struct diag_msg_mask_t *mask = NULL;
+	unsigned char *ptr = NULL;
+	uint8_t msg_mask_tbl_count = 0;
+
+	if (!buf || count == 0)
+		return -EINVAL;
+
+	mask_info = (!info) ? &msg_mask : info->msg_mask;
+	if (!mask_info)
+		return -EIO;
+
+	if (!mask_info->ptr || !mask_info->update_buf) {
+		pr_err("diag: In %s, invalid input mask_info->ptr: %pK, mask_info->update_buf: %pK\n",
+			__func__, mask_info->ptr, mask_info->update_buf);
+		return -EINVAL;
+	}
+	mutex_lock(&driver->diag_maskclear_mutex);
+	if (driver->mask_clear) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag:%s: count = %zu\n", __func__, count);
+		mutex_unlock(&driver->diag_maskclear_mutex);
+		return -EIO;
+	}
+	mutex_unlock(&driver->diag_maskclear_mutex);
+	mutex_lock(&mask_info->lock);
+	mutex_lock(&driver->msg_mask_lock);
+
+	mask = (struct diag_msg_mask_t *)(mask_info->ptr);
+	if (!mask->ptr) {
+		pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+			__func__, mask->ptr);
+		mutex_unlock(&driver->msg_mask_lock);
+		mutex_unlock(&mask_info->lock);
+		return -EINVAL;
+	}
+	msg_mask_tbl_count = (info) ? info->msg_mask_tbl_count :
+			driver->msg_mask_tbl_count;
+	for (i = 0; i < msg_mask_tbl_count; i++, mask++) {
+		if (!mask->ptr)
+			continue;
+		ptr = mask_info->update_buf;
+		len = 0;
+		mutex_lock(&mask->lock);
+		header.ssid_first = mask->ssid_first;
+		header.ssid_last = mask->ssid_last_tools;
+		header.range = mask->range_tools;
+		memcpy(ptr, &header, sizeof(header));
+		len += sizeof(header);
+		copy_len = (sizeof(uint32_t) * mask->range_tools);
+		if ((len + copy_len) > mask_info->update_buf_len) {
+			pr_err("diag: In %s, no space to update msg mask, first: %d, last: %d\n",
+			       __func__, mask->ssid_first,
+			       mask->ssid_last_tools);
+			mutex_unlock(&mask->lock);
+			continue;
+		}
+		memcpy(ptr + len, mask->ptr, copy_len);
+		len += copy_len;
+		mutex_unlock(&mask->lock);
+		/* + sizeof(int) to account for data_type already in buf */
+		if (total_len + sizeof(int) + len > count) {
+			pr_err("diag: In %s, unable to send msg masks to user space, total_len: %d, count: %zu\n",
+			       __func__, total_len, count);
+			err = -ENOMEM;
+			break;
+		}
+		err = copy_to_user(buf + total_len, (void *)ptr, len);
+		if (err) {
+			pr_err("diag: In %s Unable to send msg masks to user space clients, err: %d\n",
+			       __func__, err);
+			break;
+		}
+		total_len += len;
+	}
+	mutex_unlock(&driver->msg_mask_lock);
+	mutex_unlock(&mask_info->lock);
+	return err ? err : total_len;
+}
+
+int diag_copy_to_user_log_mask(char __user *buf, size_t count,
+			       struct diag_md_session_t *info)
+{
+	int i;
+	int err = 0;
+	int len = 0;
+	int copy_len = 0;
+	int total_len = 0;
+	struct diag_log_mask_userspace_t header;
+	struct diag_log_mask_t *mask = NULL;
+	struct diag_mask_info *mask_info = NULL;
+	unsigned char *ptr = NULL;
+
+	if (!buf || count == 0)
+		return -EINVAL;
+
+	mask_info = (!info) ? &log_mask : info->log_mask;
+	if (!mask_info)
+		return -EIO;
+
+	if (!mask_info->ptr || !mask_info->update_buf) {
+		pr_err("diag: In %s, invalid input mask_info->ptr: %pK, mask_info->update_buf: %pK\n",
+			__func__, mask_info->ptr, mask_info->update_buf);
+		return -EINVAL;
+	}
+
+	mutex_lock(&mask_info->lock);
+	mask = (struct diag_log_mask_t *)(mask_info->ptr);
+	if (!mask->ptr) {
+		pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+			__func__, mask->ptr);
+		mutex_unlock(&mask_info->lock);
+		return -EINVAL;
+	}
+	for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
+		if (!mask->ptr)
+			continue;
+		ptr = mask_info->update_buf;
+		len = 0;
+		mutex_lock(&mask->lock);
+		header.equip_id = mask->equip_id;
+		header.num_items = mask->num_items_tools;
+		memcpy(ptr, &header, sizeof(header));
+		len += sizeof(header);
+		copy_len = LOG_ITEMS_TO_SIZE(header.num_items);
+		if ((len + copy_len) > mask_info->update_buf_len) {
+			pr_err("diag: In %s, no space to update log mask, equip_id: %d\n",
+			       __func__, mask->equip_id);
+			mutex_unlock(&mask->lock);
+			continue;
+		}
+		memcpy(ptr + len, mask->ptr, copy_len);
+		len += copy_len;
+		mutex_unlock(&mask->lock);
+		/* + sizeof(int) to account for data_type already in buf */
+		if (total_len + sizeof(int) + len > count) {
+			pr_err("diag: In %s, unable to send log masks to user space, total_len: %d, count: %zu\n",
+			       __func__, total_len, count);
+			err = -ENOMEM;
+			break;
+		}
+		err = copy_to_user(buf + total_len, (void *)ptr, len);
+		if (err) {
+			pr_err("diag: In %s Unable to send log masks to user space clients, err: %d\n",
+			       __func__, err);
+			break;
+		}
+		total_len += len;
+	}
+	mutex_unlock(&mask_info->lock);
+
+	return err ? err : total_len;
+}
+
+void diag_send_updates_peripheral(uint8_t peripheral)
+{
+	diag_send_feature_mask_update(peripheral);
+	if (driver->time_sync_enabled)
+		diag_send_time_sync_update(peripheral);
+	mutex_lock(&driver->md_session_lock);
+	diag_send_msg_mask_update(peripheral, ALL_SSID, ALL_SSID);
+	diag_send_log_mask_update(peripheral, ALL_EQUIP_ID);
+	diag_send_event_mask_update(peripheral);
+	mutex_unlock(&driver->md_session_lock);
+	diag_send_real_time_update(peripheral,
+				driver->real_time_mode[DIAG_LOCAL_PROC]);
+	diag_send_peripheral_buffering_mode(
+				&driver->buffering_mode[peripheral]);
+}
+
+int diag_process_apps_masks(unsigned char *buf, int len, int pid)
+{
+	int size = 0;
+	int sub_cmd = 0;
+	int (*hdlr)(unsigned char *src_buf, int src_len,
+		    unsigned char *dest_buf, int dest_len, int pid) = NULL;
+
+	if (!buf || len <= 0)
+		return -EINVAL;
+
+	if (*buf == DIAG_CMD_LOG_CONFIG) {
+		sub_cmd = *(int *)(buf + sizeof(int));
+		switch (sub_cmd) {
+		case DIAG_CMD_OP_LOG_DISABLE:
+			hdlr = diag_cmd_disable_log_mask;
+			break;
+		case DIAG_CMD_OP_GET_LOG_RANGE:
+			hdlr = diag_cmd_get_log_range;
+			break;
+		case DIAG_CMD_OP_SET_LOG_MASK:
+			hdlr = diag_cmd_set_log_mask;
+			break;
+		case DIAG_CMD_OP_GET_LOG_MASK:
+			hdlr = diag_cmd_get_log_mask;
+			break;
+		}
+	} else if (*buf == DIAG_CMD_MSG_CONFIG) {
+		sub_cmd = *(uint8_t *)(buf + sizeof(uint8_t));
+		switch (sub_cmd) {
+		case DIAG_CMD_OP_GET_SSID_RANGE:
+			hdlr = diag_cmd_get_ssid_range;
+			break;
+		case DIAG_CMD_OP_GET_BUILD_MASK:
+			hdlr = diag_cmd_get_build_mask;
+			break;
+		case DIAG_CMD_OP_GET_MSG_MASK:
+			hdlr = diag_cmd_get_msg_mask;
+			break;
+		case DIAG_CMD_OP_SET_MSG_MASK:
+			hdlr = diag_cmd_set_msg_mask;
+			break;
+		case DIAG_CMD_OP_SET_ALL_MSG_MASK:
+			hdlr = diag_cmd_set_all_msg_mask;
+			break;
+		}
+	} else if (*buf == DIAG_CMD_GET_EVENT_MASK) {
+		hdlr = diag_cmd_get_event_mask;
+	} else if (*buf == DIAG_CMD_SET_EVENT_MASK) {
+		hdlr = diag_cmd_update_event_mask;
+	} else if (*buf == DIAG_CMD_EVENT_TOGGLE) {
+		hdlr = diag_cmd_toggle_events;
+	}
+
+	if (hdlr)
+		size = hdlr(buf, len, driver->apps_rsp_buf,
+			    DIAG_MAX_RSP_SIZE, pid);
+
+	return (size > 0) ? size : 0;
+}
+
+int diag_masks_init(void)
+{
+	int err = 0;
+	err = diag_msg_mask_init();
+	if (err)
+		goto fail;
+
+	err = diag_build_time_mask_init();
+	if (err)
+		goto fail;
+
+	err = diag_log_mask_init();
+	if (err)
+		goto fail;
+
+	err = diag_event_mask_init();
+	if (err)
+		goto fail;
+
+	if (driver->buf_feature_mask_update == NULL) {
+		driver->buf_feature_mask_update = kzalloc(sizeof(
+					struct diag_ctrl_feature_mask) +
+					FEATURE_MASK_LEN, GFP_KERNEL);
+		if (driver->buf_feature_mask_update == NULL)
+			goto fail;
+		kmemleak_not_leak(driver->buf_feature_mask_update);
+	}
+
+	return 0;
+fail:
+	pr_err("diag: Could not initialize diag mask buffers\n");
+	diag_masks_exit();
+	return -ENOMEM;
+}
+
+void diag_masks_exit(void)
+{
+	diag_msg_mask_exit();
+	diag_build_time_mask_exit();
+	diag_log_mask_exit();
+	diag_event_mask_exit();
+	kfree(driver->buf_feature_mask_update);
+}
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diag_masks.h linux-4.4.115-fbx/drivers/char/diag/diag_masks.h
--- linux-4.4.115-fbx/drivers/char/diag./diag_masks.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diag_masks.h	2019-10-29 09:26:23.449201280 +0100
@@ -0,0 +1,180 @@
+/* Copyright (c) 2013-2015, 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAG_MASKS_H
+#define DIAG_MASKS_H
+
+#include "diagfwd.h"
+
+struct diag_log_mask_t {
+	uint8_t equip_id;
+	uint32_t num_items;
+	uint32_t num_items_tools;
+	uint32_t range;
+	uint32_t range_tools;
+	struct mutex lock;
+	uint8_t *ptr;
+};
+
+struct diag_ssid_range_t {
+	uint16_t ssid_first;
+	uint16_t ssid_last;
+} __packed;
+
+struct diag_msg_mask_t {
+	uint32_t ssid_first;
+	uint32_t ssid_last;
+	uint32_t ssid_last_tools;
+	uint32_t range;
+	uint32_t range_tools;
+	struct mutex lock;
+	uint32_t *ptr;
+};
+
+struct diag_log_config_req_t {
+	uint8_t cmd_code;
+	uint8_t padding[3];
+	uint32_t sub_cmd;
+	uint32_t equip_id;
+	uint32_t num_items;
+} __packed;
+
+struct diag_log_config_rsp_t {
+	uint8_t cmd_code;
+	uint8_t padding[3];
+	uint32_t sub_cmd;
+	uint32_t status;
+} __packed;
+
+struct diag_log_config_set_rsp_t {
+	uint8_t cmd_code;
+	uint8_t padding[3];
+	uint32_t sub_cmd;
+	uint32_t status;
+	uint32_t equip_id;
+	uint32_t num_items;
+} __packed;
+
+struct diag_log_on_demand_rsp_t {
+	uint8_t cmd_code;
+	uint16_t log_code;
+	uint8_t status;
+} __packed;
+
+struct diag_event_report_t {
+	uint8_t cmd_code;
+	uint16_t padding;
+} __packed;
+
+struct diag_event_mask_config_t {
+	uint8_t cmd_code;
+	uint8_t status;
+	uint16_t padding;
+	uint16_t num_bits;
+} __packed;
+
+struct diag_msg_config_rsp_t {
+	uint8_t cmd_code;
+	uint8_t sub_cmd;
+	uint8_t status;
+	uint8_t padding;
+	uint32_t rt_mask;
+} __packed;
+
+struct diag_msg_ssid_query_t {
+	uint8_t cmd_code;
+	uint8_t sub_cmd;
+	uint8_t status;
+	uint8_t padding;
+	uint32_t count;
+} __packed;
+
+struct diag_build_mask_req_t {
+	uint8_t cmd_code;
+	uint8_t sub_cmd;
+	uint16_t ssid_first;
+	uint16_t ssid_last;
+} __packed;
+
+struct diag_msg_build_mask_t {
+	uint8_t cmd_code;
+	uint8_t sub_cmd;
+	uint16_t ssid_first;
+	uint16_t ssid_last;
+	uint8_t status;
+	uint8_t padding;
+} __packed;
+
+struct diag_msg_mask_userspace_t {
+	uint32_t ssid_first;
+	uint32_t ssid_last;
+	uint32_t range;
+} __packed;
+
+struct diag_log_mask_userspace_t {
+	uint8_t equip_id;
+	uint32_t num_items;
+} __packed;
+
+#define MAX_EQUIP_ID	16
+#define MSG_MASK_SIZE	(MSG_MASK_TBL_CNT * sizeof(struct diag_msg_mask_t))
+#define LOG_MASK_SIZE	(MAX_EQUIP_ID * sizeof(struct diag_log_mask_t))
+#define EVENT_MASK_SIZE 513
+#define MAX_ITEMS_PER_EQUIP_ID	512
+#define MAX_ITEMS_ALLOWED	0xFFF
+
+#define LOG_MASK_CTRL_HEADER_LEN	11
+#define MSG_MASK_CTRL_HEADER_LEN	11
+#define EVENT_MASK_CTRL_HEADER_LEN	7
+
+#define LOG_STATUS_SUCCESS	0
+#define LOG_STATUS_INVALID	1
+#define LOG_STATUS_FAIL		2
+
+#define MSG_STATUS_FAIL		0
+#define MSG_STATUS_SUCCESS	1
+
+#define EVENT_STATUS_SUCCESS	0
+#define EVENT_STATUS_FAIL	1
+
+#define DIAG_CTRL_MASK_INVALID		0
+#define DIAG_CTRL_MASK_ALL_DISABLED	1
+#define DIAG_CTRL_MASK_ALL_ENABLED	2
+#define DIAG_CTRL_MASK_VALID		3
+
+extern struct diag_mask_info msg_mask;
+extern struct diag_mask_info msg_bt_mask;
+extern struct diag_mask_info log_mask;
+extern struct diag_mask_info event_mask;
+
+int diag_masks_init(void);
+void diag_masks_exit(void);
+int diag_log_mask_copy(struct diag_mask_info *dest,
+		       struct diag_mask_info *src);
+int diag_msg_mask_copy(struct diag_md_session_t *new_session,
+	struct diag_mask_info *dest, struct diag_mask_info *src);
+int diag_event_mask_copy(struct diag_mask_info *dest,
+			 struct diag_mask_info *src);
+void diag_log_mask_free(struct diag_mask_info *mask_info);
+void diag_msg_mask_free(struct diag_mask_info *mask_info,
+	struct diag_md_session_t *session_info);
+void diag_event_mask_free(struct diag_mask_info *mask_info);
+int diag_process_apps_masks(unsigned char *buf, int len, int pid);
+void diag_send_updates_peripheral(uint8_t peripheral);
+
+extern int diag_create_msg_mask_table_entry(struct diag_msg_mask_t *msg_mask,
+					    struct diag_ssid_range_t *range);
+extern int diag_copy_to_user_msg_mask(char __user *buf, size_t count,
+				      struct diag_md_session_t *info);
+extern int diag_copy_to_user_log_mask(char __user *buf, size_t count,
+				      struct diag_md_session_t *info);
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diagmem.c linux-4.4.115-fbx/drivers/char/diag/diagmem.c
--- linux-4.4.115-fbx/drivers/char/diag./diagmem.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diagmem.c	2019-01-22 16:16:22.963241516 +0100
@@ -0,0 +1,294 @@
+/* Copyright (c) 2008-2014, 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+#include <linux/ratelimit.h>
+#include <asm/atomic.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+
+#include "diagchar.h"
+#include "diagmem.h"
+
+struct diag_mempool_t diag_mempools[NUM_MEMORY_POOLS] = {
+	{
+		.id = POOL_TYPE_COPY,
+		.name = "POOL_COPY",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_HDLC,
+		.name = "POOL_HDLC",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_USER,
+		.name = "POOL_USER",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_MUX_APPS,
+		.name = "POOL_MUX_APPS",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_DCI,
+		.name = "POOL_DCI",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+	{
+		.id = POOL_TYPE_MDM,
+		.name = "POOL_MDM",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_MDM2,
+		.name = "POOL_MDM2",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_MDM_DCI,
+		.name = "POOL_MDM_DCI",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_MDM2_DCI,
+		.name = "POOL_MDM2_DCI",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_MDM_MUX,
+		.name = "POOL_MDM_MUX",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_MDM2_MUX,
+		.name = "POOL_MDM2_MUX",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_MDM_DCI_WRITE,
+		.name = "POOL_MDM_DCI_WRITE",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_MDM2_DCI_WRITE,
+		.name = "POOL_MDM2_DCI_WRITE",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_QSC_MUX,
+		.name = "POOL_QSC_MUX",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	}
+#endif
+};
+
+void diagmem_setsize(int pool_idx, int itemsize, int poolsize)
+{
+	if (pool_idx < 0 || pool_idx >= NUM_MEMORY_POOLS) {
+		pr_err("diag: Invalid pool index %d in %s\n", pool_idx,
+		       __func__);
+		return;
+	}
+
+	diag_mempools[pool_idx].itemsize = itemsize;
+	diag_mempools[pool_idx].poolsize = poolsize;
+	pr_debug("diag: Mempool %s sizes: itemsize %d poolsize %d\n",
+		 diag_mempools[pool_idx].name, diag_mempools[pool_idx].itemsize,
+		 diag_mempools[pool_idx].poolsize);
+}
+
+void *diagmem_alloc(struct diagchar_dev *driver, int size, int pool_type)
+{
+	void *buf = NULL;
+	int i = 0;
+	unsigned long flags;
+	struct diag_mempool_t *mempool = NULL;
+
+	if (!driver)
+		return NULL;
+
+	for (i = 0; i < NUM_MEMORY_POOLS; i++) {
+		mempool = &diag_mempools[i];
+		if (pool_type != mempool->id)
+			continue;
+		if (!mempool->pool) {
+			pr_err_ratelimited("diag: %s mempool is not initialized yet\n",
+					   mempool->name);
+			break;
+		}
+		if (size == 0 || size > mempool->itemsize) {
+			pr_err_ratelimited("diag: cannot alloc from mempool %s, invalid size: %d\n",
+					   mempool->name, size);
+			break;
+		}
+		spin_lock_irqsave(&mempool->lock, flags);
+		if (mempool->count < mempool->poolsize) {
+			atomic_add(1, (atomic_t *)&mempool->count);
+			buf = mempool_alloc(mempool->pool, GFP_ATOMIC);
+			kmemleak_not_leak(buf);
+		}
+		spin_unlock_irqrestore(&mempool->lock, flags);
+		if (!buf) {
+			pr_debug_ratelimited("diag: Unable to allocate buffer from memory pool %s, size: %d/%d count: %d/%d\n",
+					     mempool->name,
+					     size, mempool->itemsize,
+					     mempool->count,
+					     mempool->poolsize);
+		}
+		break;
+	}
+
+	return buf;
+}
+
+void diagmem_free(struct diagchar_dev *driver, void *buf, int pool_type)
+{
+	int i = 0;
+	unsigned long flags;
+	struct diag_mempool_t *mempool = NULL;
+
+	if (!driver || !buf)
+		return;
+
+	for (i = 0; i < NUM_MEMORY_POOLS; i++) {
+		mempool = &diag_mempools[i];
+		if (pool_type != mempool->id)
+			continue;
+		if (!mempool->pool) {
+			pr_err_ratelimited("diag: %s mempool is not initialized yet\n",
+					   mempool->name);
+			break;
+		}
+		spin_lock_irqsave(&mempool->lock, flags);
+		if (mempool->count > 0) {
+			mempool_free(buf, mempool->pool);
+			atomic_add(-1, (atomic_t *)&mempool->count);
+		} else {
+			pr_err_ratelimited("diag: Attempting to free items from %s mempool which is already empty\n",
+					   mempool->name);
+		}
+		spin_unlock_irqrestore(&mempool->lock, flags);
+		break;
+	}
+}
+
+void diagmem_init(struct diagchar_dev *driver, int index)
+{
+	struct diag_mempool_t *mempool = NULL;
+	if (!driver)
+		return;
+
+	if (index < 0 || index >= NUM_MEMORY_POOLS) {
+		pr_err("diag: In %s, Invalid index %d\n", __func__, index);
+		return;
+	}
+
+	mempool = &diag_mempools[index];
+	if (mempool->pool) {
+		pr_debug("diag: mempool %s is already initialized\n",
+			 mempool->name);
+		return;
+	}
+	if (mempool->itemsize <= 0 || mempool->poolsize <= 0) {
+		pr_err("diag: Unable to initialize %s mempool, itemsize: %d poolsize: %d\n",
+		       mempool->name, mempool->itemsize,
+		       mempool->poolsize);
+		return;
+	}
+
+	mempool->pool = mempool_create_kmalloc_pool(mempool->poolsize,
+						    mempool->itemsize);
+	if (!mempool->pool)
+		pr_err("diag: cannot allocate %s mempool\n", mempool->name);
+	else
+		kmemleak_not_leak(mempool->pool);
+
+	spin_lock_init(&mempool->lock);
+}
+
+void diagmem_exit(struct diagchar_dev *driver, int index)
+{
+	unsigned long flags;
+	struct diag_mempool_t *mempool = NULL;
+
+	if (!driver)
+		return;
+
+	if (index < 0 || index >= NUM_MEMORY_POOLS) {
+		pr_err("diag: In %s, Invalid index %d\n", __func__, index);
+		return;
+	}
+
+	mempool = &diag_mempools[index];
+	spin_lock_irqsave(&mempool->lock, flags);
+	if (mempool->count == 0 && mempool->pool != NULL) {
+		mempool_destroy(mempool->pool);
+		mempool->pool = NULL;
+	} else {
+		pr_err("diag: Unable to destory %s pool, count: %d\n",
+		       mempool->name, mempool->count);
+	}
+	spin_unlock_irqrestore(&mempool->lock, flags);
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diagmem.h linux-4.4.115-fbx/drivers/char/diag/diagmem.h
--- linux-4.4.115-fbx/drivers/char/diag./diagmem.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diagmem.h	2019-01-22 16:16:22.963241516 +0100
@@ -0,0 +1,63 @@
+/* Copyright (c) 2008-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGMEM_H
+#define DIAGMEM_H
+#include "diagchar.h"
+
+#define POOL_TYPE_COPY			0
+#define POOL_TYPE_HDLC			1
+#define POOL_TYPE_USER			2
+#define POOL_TYPE_MUX_APPS		3
+#define POOL_TYPE_DCI			4
+#define POOL_TYPE_LOCAL_LAST		5
+
+#define POOL_TYPE_REMOTE_BASE		POOL_TYPE_LOCAL_LAST
+#define POOL_TYPE_MDM			POOL_TYPE_REMOTE_BASE
+#define POOL_TYPE_MDM2			(POOL_TYPE_REMOTE_BASE + 1)
+#define POOL_TYPE_MDM_DCI		(POOL_TYPE_REMOTE_BASE + 2)
+#define POOL_TYPE_MDM2_DCI		(POOL_TYPE_REMOTE_BASE + 3)
+#define POOL_TYPE_MDM_MUX		(POOL_TYPE_REMOTE_BASE + 4)
+#define POOL_TYPE_MDM2_MUX		(POOL_TYPE_REMOTE_BASE + 5)
+#define POOL_TYPE_MDM_DCI_WRITE		(POOL_TYPE_REMOTE_BASE + 6)
+#define POOL_TYPE_MDM2_DCI_WRITE	(POOL_TYPE_REMOTE_BASE + 7)
+#define POOL_TYPE_QSC_MUX		(POOL_TYPE_REMOTE_BASE + 8)
+#define POOL_TYPE_REMOTE_LAST		(POOL_TYPE_REMOTE_BASE + 9)
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+#define NUM_MEMORY_POOLS		POOL_TYPE_REMOTE_LAST
+#else
+#define NUM_MEMORY_POOLS		POOL_TYPE_LOCAL_LAST
+#endif
+
+#define DIAG_MEMPOOL_NAME_SZ		24
+#define DIAG_MEMPOOL_GET_NAME(x)	(diag_mempools[x].name)
+
+struct diag_mempool_t {
+	int id;
+	char name[DIAG_MEMPOOL_NAME_SZ];
+	mempool_t *pool;
+	unsigned int itemsize;
+	unsigned int poolsize;
+	int count;
+	spinlock_t lock;
+} __packed;
+
+extern struct diag_mempool_t diag_mempools[NUM_MEMORY_POOLS];
+
+void diagmem_setsize(int pool_idx, int itemsize, int poolsize);
+void *diagmem_alloc(struct diagchar_dev *driver, int size, int pool_type);
+void diagmem_free(struct diagchar_dev *driver, void *buf, int pool_type);
+void diagmem_init(struct diagchar_dev *driver, int type);
+void diagmem_exit(struct diagchar_dev *driver, int type);
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diag_memorydevice.c linux-4.4.115-fbx/drivers/char/diag/diag_memorydevice.c
--- linux-4.4.115-fbx/drivers/char/diag./diag_memorydevice.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diag_memorydevice.c	2019-10-29 09:26:23.449201280 +0100
@@ -0,0 +1,430 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/diagchar.h>
+#include <linux/delay.h>
+#include <linux/kmemleak.h>
+#include <linux/uaccess.h>
+#include "diagchar.h"
+#include "diag_memorydevice.h"
+#include "diagfwd_bridge.h"
+#include "diag_mux.h"
+#include "diagmem.h"
+#include "diagfwd.h"
+#include "diagfwd_peripheral.h"
+#include "diag_ipc_logging.h"
+
+struct diag_md_info diag_md[NUM_DIAG_MD_DEV] = {
+	{
+		.id = DIAG_MD_LOCAL,
+		.ctx = 0,
+		.mempool = POOL_TYPE_MUX_APPS,
+		.num_tbl_entries = 0,
+		.tbl = NULL,
+		.ops = NULL,
+	},
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+	{
+		.id = DIAG_MD_MDM,
+		.ctx = 0,
+		.mempool = POOL_TYPE_MDM_MUX,
+		.num_tbl_entries = 0,
+		.tbl = NULL,
+		.ops = NULL,
+	},
+	{
+		.id = DIAG_MD_MDM2,
+		.ctx = 0,
+		.mempool = POOL_TYPE_MDM2_MUX,
+		.num_tbl_entries = 0,
+		.tbl = NULL,
+		.ops = NULL,
+	},
+	{
+		.id = DIAG_MD_SMUX,
+		.ctx = 0,
+		.mempool = POOL_TYPE_QSC_MUX,
+		.num_tbl_entries = 0,
+		.tbl = NULL,
+		.ops = NULL,
+	}
+#endif
+};
+
+int diag_md_register(int id, int ctx, struct diag_mux_ops *ops)
+{
+	if (id < 0 || id >= NUM_DIAG_MD_DEV || !ops)
+		return -EINVAL;
+
+	diag_md[id].ops = ops;
+	diag_md[id].ctx = ctx;
+	return 0;
+}
+
+void diag_md_open_all()
+{
+	int i;
+	struct diag_md_info *ch = NULL;
+
+	for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
+		ch = &diag_md[i];
+		if (ch->ops && ch->ops->open)
+			ch->ops->open(ch->ctx, DIAG_MEMORY_DEVICE_MODE);
+	}
+
+	return;
+}
+
+void diag_md_close_all()
+{
+	int i, j;
+	unsigned long flags;
+	struct diag_md_info *ch = NULL;
+	struct diag_buf_tbl_t *entry = NULL;
+
+	for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
+		ch = &diag_md[i];
+
+		if (ch->ops && ch->ops->close)
+			ch->ops->close(ch->ctx, DIAG_MEMORY_DEVICE_MODE);
+
+		/*
+		 * When we close the Memory device mode, make sure we flush the
+		 * internal buffers in the table so that there are no stale
+		 * entries.
+		 */
+		spin_lock_irqsave(&ch->lock, flags);
+		for (j = 0; j < ch->num_tbl_entries; j++) {
+			entry = &ch->tbl[j];
+			if (entry->len <= 0)
+				continue;
+			if (ch->ops && ch->ops->write_done)
+				ch->ops->write_done(entry->buf, entry->len,
+						    entry->ctx,
+						    DIAG_MEMORY_DEVICE_MODE);
+			entry->buf = NULL;
+			entry->len = 0;
+			entry->ctx = 0;
+		}
+		spin_unlock_irqrestore(&ch->lock, flags);
+	}
+
+	diag_ws_reset(DIAG_WS_MUX);
+}
+
+int diag_md_write(int id, unsigned char *buf, int len, int ctx)
+{
+	int i, pid = 0;
+	uint8_t found = 0;
+	unsigned long flags;
+	struct diag_md_info *ch = NULL;
+	uint8_t peripheral;
+	struct diag_md_session_t *session_info = NULL;
+
+	if (id < 0 || id >= NUM_DIAG_MD_DEV || id >= DIAG_NUM_PROC)
+		return -EINVAL;
+
+	if (!buf || len < 0)
+		return -EINVAL;
+
+	peripheral =
+		diag_md_get_peripheral(ctx);
+	if (peripheral < 0)
+		return -EINVAL;
+
+	mutex_lock(&driver->md_session_lock);
+	session_info = diag_md_session_get_peripheral(peripheral);
+	if (!session_info) {
+		mutex_unlock(&driver->md_session_lock);
+		return -EIO;
+	}
+	pid = session_info->pid;
+	mutex_unlock(&driver->md_session_lock);
+
+	ch = &diag_md[id];
+	if (!ch)
+		return -EINVAL;
+
+	spin_lock_irqsave(&ch->lock, flags);
+	for (i = 0; i < ch->num_tbl_entries && !found; i++) {
+		if (ch->tbl[i].buf != buf)
+			continue;
+		found = 1;
+		pr_err_ratelimited("diag: trying to write the same buffer buf: %pK, len: %d, back to the table for p: %d, t: %d, buf_num: %d, proc: %d, i: %d\n",
+				   buf, ch->tbl[i].len, GET_BUF_PERIPHERAL(ctx),
+				   GET_BUF_TYPE(ctx), GET_BUF_NUM(ctx), id, i);
+		ch->tbl[i].buf = NULL;
+		ch->tbl[i].len = 0;
+		ch->tbl[i].ctx = 0;
+	}
+	spin_unlock_irqrestore(&ch->lock, flags);
+
+	if (found)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&ch->lock, flags);
+	for (i = 0; i < ch->num_tbl_entries && !found; i++) {
+		if (ch->tbl[i].len == 0) {
+			ch->tbl[i].buf = buf;
+			ch->tbl[i].len = len;
+			ch->tbl[i].ctx = ctx;
+			found = 1;
+			diag_ws_on_read(DIAG_WS_MUX, len);
+		}
+	}
+	spin_unlock_irqrestore(&ch->lock, flags);
+
+	if (!found) {
+		pr_err_ratelimited("diag: Unable to find an empty space in table, please reduce logging rate, proc: %d\n",
+				   id);
+		return -ENOMEM;
+	}
+
+	found = 0;
+	mutex_lock(&driver->diagchar_mutex);
+	for (i = 0; i < driver->num_clients && !found; i++) {
+		if ((driver->client_map[i].pid != pid) ||
+		    (driver->client_map[i].pid == 0))
+			continue;
+
+		found = 1;
+		if (!(driver->data_ready[i] & USER_SPACE_DATA_TYPE)) {
+			driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
+			atomic_inc(&driver->data_ready_notif[i]);
+		}
+		pr_debug("diag: wake up logging process\n");
+		wake_up_interruptible(&driver->wait_q);
+	}
+	mutex_unlock(&driver->diagchar_mutex);
+
+	if (!found)
+		return -EINVAL;
+
+	return 0;
+}
+
+int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size,
+			struct diag_md_session_t *info)
+{
+	int i, j;
+	int err = 0;
+	int ret = *pret;
+	int num_data = 0;
+	int remote_token;
+	unsigned long flags;
+	struct diag_md_info *ch = NULL;
+	struct diag_buf_tbl_t *entry = NULL;
+	uint8_t drain_again = 0;
+	uint8_t peripheral = 0;
+	struct diag_md_session_t *session_info = NULL;
+	struct pid *pid_struct = NULL;
+
+	for (i = 0; i < NUM_DIAG_MD_DEV && !err; i++) {
+		ch = &diag_md[i];
+		for (j = 0; j < ch->num_tbl_entries && !err; j++) {
+			entry = &ch->tbl[j];
+			if (entry->len <= 0 || entry->buf == NULL)
+				continue;
+
+			peripheral = diag_md_get_peripheral(entry->ctx);
+			if (peripheral < 0)
+				goto drop_data;
+
+			session_info =
+			diag_md_session_get_peripheral(peripheral);
+			if (!session_info) {
+				goto drop_data;
+			}
+
+			if (session_info && info &&
+				(session_info->pid != info->pid))
+				continue;
+			if ((info && (info->peripheral_mask &
+			    MD_PERIPHERAL_MASK(peripheral)) == 0))
+				goto drop_data;
+			pid_struct = find_get_pid(session_info->pid);
+			if (!pid_struct) {
+				err = -ESRCH;
+				DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+					"diag: No such md_session_map[%d] with pid = %d err=%d exists..\n",
+					peripheral, session_info->pid, err);
+				goto drop_data;
+			}
+			/*
+			 * If the data is from remote processor, copy the remote
+			 * token first
+			 */
+			if (i > 0) {
+				if ((ret + (3 * sizeof(int)) + entry->len) >=
+							buf_size) {
+					drain_again = 1;
+					break;
+				}
+			} else {
+				if ((ret + (2 * sizeof(int)) + entry->len) >=
+						buf_size) {
+					drain_again = 1;
+					break;
+				}
+			}
+			if (i > 0) {
+				remote_token = diag_get_remote(i);
+				if (get_pid_task(pid_struct, PIDTYPE_PID)) {
+					err = copy_to_user(buf + ret,
+							&remote_token,
+							sizeof(int));
+					if (err)
+						goto drop_data;
+					ret += sizeof(int);
+				}
+			}
+
+			/* Copy the length of data being passed */
+			if (get_pid_task(pid_struct, PIDTYPE_PID)) {
+				err = copy_to_user(buf + ret,
+						(void *)&(entry->len),
+						sizeof(int));
+				if (err)
+					goto drop_data;
+				ret += sizeof(int);
+			}
+
+			/* Copy the actual data being passed */
+			if (get_pid_task(pid_struct, PIDTYPE_PID)) {
+				err = copy_to_user(buf + ret,
+						(void *)entry->buf,
+						entry->len);
+				if (err)
+					goto drop_data;
+				ret += entry->len;
+			}
+			/*
+			 * The data is now copied to the user space client,
+			 * Notify that the write is complete and delete its
+			 * entry from the table
+			 */
+			num_data++;
+drop_data:
+			spin_lock_irqsave(&ch->lock, flags);
+			if (ch->ops && ch->ops->write_done)
+				ch->ops->write_done(entry->buf, entry->len,
+						    entry->ctx,
+						    DIAG_MEMORY_DEVICE_MODE);
+			diag_ws_on_copy(DIAG_WS_MUX);
+			entry->buf = NULL;
+			entry->len = 0;
+			entry->ctx = 0;
+			spin_unlock_irqrestore(&ch->lock, flags);
+		}
+	}
+
+	*pret = ret;
+	if (pid_struct && get_pid_task(pid_struct, PIDTYPE_PID)) {
+		err = copy_to_user(buf + sizeof(int),
+				(void *)&num_data,
+				sizeof(int));
+	}
+	diag_ws_on_copy_complete(DIAG_WS_MUX);
+	if (drain_again)
+		chk_logging_wakeup();
+
+	return err;
+}
+
+int diag_md_close_peripheral(int id, uint8_t peripheral)
+{
+	int i;
+	uint8_t found = 0;
+	unsigned long flags;
+	struct diag_md_info *ch = NULL;
+	struct diag_buf_tbl_t *entry = NULL;
+
+	if (id < 0 || id >= NUM_DIAG_MD_DEV || id >= DIAG_NUM_PROC)
+		return -EINVAL;
+
+	ch = &diag_md[id];
+
+	spin_lock_irqsave(&ch->lock, flags);
+	for (i = 0; i < ch->num_tbl_entries && !found; i++) {
+		entry = &ch->tbl[i];
+
+		if (peripheral > NUM_PERIPHERALS) {
+			if (GET_PD_CTXT(entry->ctx) != peripheral)
+				continue;
+		} else {
+			if (GET_BUF_PERIPHERAL(entry->ctx) !=
+					peripheral)
+				continue;
+		}
+		found = 1;
+		if (ch->ops && ch->ops->write_done) {
+			ch->ops->write_done(entry->buf, entry->len,
+					    entry->ctx,
+					    DIAG_MEMORY_DEVICE_MODE);
+			entry->buf = NULL;
+			entry->len = 0;
+			entry->ctx = 0;
+		}
+	}
+	spin_unlock_irqrestore(&ch->lock, flags);
+	return 0;
+}
+
+int diag_md_init()
+{
+	int i, j;
+	struct diag_md_info *ch = NULL;
+
+	for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
+		ch = &diag_md[i];
+		ch->num_tbl_entries = diag_mempools[ch->mempool].poolsize;
+		ch->tbl = kzalloc(ch->num_tbl_entries *
+				  sizeof(struct diag_buf_tbl_t),
+				  GFP_KERNEL);
+		if (!ch->tbl)
+			goto fail;
+
+		for (j = 0; j < ch->num_tbl_entries; j++) {
+			ch->tbl[j].buf = NULL;
+			ch->tbl[j].len = 0;
+			ch->tbl[j].ctx = 0;
+		}
+		spin_lock_init(&(ch->lock));
+	}
+
+	return 0;
+
+fail:
+	diag_md_exit();
+	return -ENOMEM;
+}
+
+void diag_md_exit()
+{
+	int i;
+	struct diag_md_info *ch = NULL;
+
+	for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
+		ch = &diag_md[i];
+		kfree(ch->tbl);
+		ch->num_tbl_entries = 0;
+		ch->ops = NULL;
+	}
+}
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diag_memorydevice.h linux-4.4.115-fbx/drivers/char/diag/diag_memorydevice.h
--- linux-4.4.115-fbx/drivers/char/diag./diag_memorydevice.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diag_memorydevice.h	2019-10-29 09:26:23.449201280 +0100
@@ -0,0 +1,57 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAG_MEMORYDEVICE_H
+#define DIAG_MEMORYDEVICE_H
+
+#define DIAG_MD_LOCAL		0
+#define DIAG_MD_LOCAL_LAST	1
+#define DIAG_MD_BRIDGE_BASE	DIAG_MD_LOCAL_LAST
+#define DIAG_MD_MDM		(DIAG_MD_BRIDGE_BASE)
+#define DIAG_MD_MDM2		(DIAG_MD_BRIDGE_BASE + 1)
+#define DIAG_MD_SMUX		(DIAG_MD_BRIDGE_BASE + 2)
+#define DIAG_MD_BRIDGE_LAST	(DIAG_MD_BRIDGE_BASE + 3)
+
+#ifndef CONFIG_DIAGFWD_BRIDGE_CODE
+#define NUM_DIAG_MD_DEV		DIAG_MD_LOCAL_LAST
+#else
+#define NUM_DIAG_MD_DEV		DIAG_MD_BRIDGE_LAST
+#endif
+
+struct diag_buf_tbl_t {
+	unsigned char *buf;
+	int len;
+	int ctx;
+};
+
+struct diag_md_info {
+	int id;
+	int ctx;
+	int mempool;
+	int num_tbl_entries;
+	spinlock_t lock;
+	struct diag_buf_tbl_t *tbl;
+	struct diag_mux_ops *ops;
+};
+
+extern struct diag_md_info diag_md[NUM_DIAG_MD_DEV];
+
+int diag_md_init(void);
+void diag_md_exit(void);
+void diag_md_open_all(void);
+void diag_md_close_all(void);
+int diag_md_register(int id, int ctx, struct diag_mux_ops *ops);
+int diag_md_close_peripheral(int id, uint8_t peripheral);
+int diag_md_write(int id, unsigned char *buf, int len, int ctx);
+int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size,
+			 struct diag_md_session_t *info);
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diag_mux.c linux-4.4.115-fbx/drivers/char/diag/diag_mux.c
--- linux-4.4.115-fbx/drivers/char/diag./diag_mux.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diag_mux.c	2019-10-29 09:26:23.449201280 +0100
@@ -0,0 +1,288 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/diagchar.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/ratelimit.h>
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diag_mux.h"
+#include "diag_usb.h"
+#include "diag_memorydevice.h"
+#include "diag_ipc_logging.h"
+
+struct diag_mux_state_t *diag_mux;
+static struct diag_logger_t usb_logger;
+static struct diag_logger_t md_logger;
+
+static struct diag_logger_ops usb_log_ops = {
+	.open = diag_usb_connect_all,
+	.close = diag_usb_disconnect_all,
+	.queue_read = diag_usb_queue_read,
+	.write = diag_usb_write,
+	.close_peripheral = NULL
+};
+
+static struct diag_logger_ops md_log_ops = {
+	.open = diag_md_open_all,
+	.close = diag_md_close_all,
+	.queue_read = NULL,
+	.write = diag_md_write,
+	.close_peripheral = diag_md_close_peripheral,
+};
+
+int diag_mux_init()
+{
+	diag_mux = kzalloc(sizeof(struct diag_mux_state_t),
+			 GFP_KERNEL);
+	if (!diag_mux)
+		return -ENOMEM;
+	kmemleak_not_leak(diag_mux);
+
+	usb_logger.mode = DIAG_USB_MODE;
+	usb_logger.log_ops = &usb_log_ops;
+
+	md_logger.mode = DIAG_MEMORY_DEVICE_MODE;
+	md_logger.log_ops = &md_log_ops;
+	diag_md_init();
+
+	/*
+	 * Set USB logging as the default logger. This is the mode
+	 * Diag should be in when it initializes.
+	 */
+	diag_mux->usb_ptr = &usb_logger;
+	diag_mux->md_ptr = &md_logger;
+	diag_mux->logger = &usb_logger;
+	diag_mux->mux_mask = 0;
+	diag_mux->mode = DIAG_USB_MODE;
+	return 0;
+}
+
+void diag_mux_exit()
+{
+	kfree(diag_mux);
+}
+
+int diag_mux_register(int proc, int ctx, struct diag_mux_ops *ops)
+{
+	int err = 0;
+	if (!ops)
+		return -EINVAL;
+
+	if (proc < 0 || proc >= NUM_MUX_PROC)
+		return 0;
+
+	/* Register with USB logger */
+	usb_logger.ops[proc] = ops;
+	err = diag_usb_register(proc, ctx, ops);
+	if (err) {
+		pr_err("diag: MUX: unable to register usb operations for proc: %d, err: %d\n",
+		       proc, err);
+		return err;
+	}
+
+	md_logger.ops[proc] = ops;
+	err = diag_md_register(proc, ctx, ops);
+	if (err) {
+		pr_err("diag: MUX: unable to register md operations for proc: %d, err: %d\n",
+		       proc, err);
+		return err;
+	}
+
+	return 0;
+}
+
+int diag_mux_queue_read(int proc)
+{
+	struct diag_logger_t *logger = NULL;
+
+	if (proc < 0 || proc >= NUM_MUX_PROC)
+		return -EINVAL;
+	if (!diag_mux)
+		return -EIO;
+
+	if (diag_mux->mode == DIAG_MULTI_MODE)
+		logger = diag_mux->usb_ptr;
+	else
+		logger = diag_mux->logger;
+
+	if (logger && logger->log_ops && logger->log_ops->queue_read)
+		return logger->log_ops->queue_read(proc);
+
+	return 0;
+}
+
+int diag_mux_write(int proc, unsigned char *buf, int len, int ctx)
+{
+	struct diag_logger_t *logger = NULL;
+	int peripheral, upd;
+
+	if (proc < 0 || proc >= NUM_MUX_PROC)
+		return -EINVAL;
+	if (!diag_mux)
+		return -EIO;
+
+	upd = GET_PD_CTXT(ctx);
+	if (upd) {
+		switch (upd) {
+		case DIAG_ID_MPSS:
+			upd = PERIPHERAL_MODEM;
+			break;
+		case DIAG_ID_LPASS:
+			upd = PERIPHERAL_LPASS;
+			break;
+		case DIAG_ID_CDSP:
+			upd = PERIPHERAL_CDSP;
+			break;
+		case UPD_WLAN:
+			if (!driver->pd_logging_mode[0])
+				upd = PERIPHERAL_MODEM;
+			break;
+		case UPD_AUDIO:
+			if (!driver->pd_logging_mode[1])
+				upd = PERIPHERAL_LPASS;
+			break;
+		case UPD_SENSORS:
+			if (!driver->pd_logging_mode[2])
+				upd = PERIPHERAL_LPASS;
+			break;
+		default:
+			pr_err("diag: invalid pd ctxt= %d\n", upd);
+			return -EINVAL;
+		}
+		if (((MD_PERIPHERAL_MASK(upd)) &
+			(diag_mux->mux_mask)) &&
+			driver->md_session_map[upd])
+			logger = diag_mux->md_ptr;
+		else
+			logger = diag_mux->usb_ptr;
+	} else {
+
+		peripheral = GET_BUF_PERIPHERAL(ctx);
+		if (peripheral > NUM_PERIPHERALS)
+			return -EINVAL;
+
+		if (MD_PERIPHERAL_MASK(peripheral) &
+			diag_mux->mux_mask)
+			logger = diag_mux->md_ptr;
+		else
+			logger = diag_mux->usb_ptr;
+	}
+
+	if (logger && logger->log_ops && logger->log_ops->write)
+		return logger->log_ops->write(proc, buf, len, ctx);
+	return 0;
+}
+
+int diag_mux_close_peripheral(int proc, uint8_t peripheral)
+{
+	struct diag_logger_t *logger = NULL;
+	if (proc < 0 || proc >= NUM_MUX_PROC)
+		return -EINVAL;
+
+	/* Peripheral should account for Apps data as well */
+	if (peripheral > NUM_PERIPHERALS) {
+		if (driver->num_pd_session) {
+			if (peripheral > NUM_MD_SESSIONS)
+				return -EINVAL;
+		} else {
+			return -EINVAL;
+		}
+	}
+
+	if (!diag_mux)
+		return -EIO;
+
+	if (MD_PERIPHERAL_MASK(peripheral) & diag_mux->mux_mask)
+		logger = diag_mux->md_ptr;
+	else
+		logger = diag_mux->logger;
+
+	if (logger && logger->log_ops && logger->log_ops->close_peripheral)
+		return logger->log_ops->close_peripheral(proc, peripheral);
+	return 0;
+}
+
+int diag_mux_switch_logging(int *req_mode, int *peripheral_mask)
+{
+	unsigned int new_mask = 0;
+
+	if (!req_mode)
+		return -EINVAL;
+
+	if (*peripheral_mask <= 0 ||
+		(*peripheral_mask > (DIAG_CON_ALL | DIAG_CON_UPD_ALL))) {
+		pr_err("diag: mask %d in %s\n", *peripheral_mask, __func__);
+		return -EINVAL;
+	}
+
+	switch (*req_mode) {
+	case DIAG_USB_MODE:
+		new_mask = ~(*peripheral_mask) & diag_mux->mux_mask;
+		if (new_mask != DIAG_CON_NONE)
+			*req_mode = DIAG_MULTI_MODE;
+		break;
+	case DIAG_MEMORY_DEVICE_MODE:
+		new_mask = (*peripheral_mask) | diag_mux->mux_mask;
+		if (new_mask != DIAG_CON_ALL)
+			*req_mode = DIAG_MULTI_MODE;
+		break;
+	default:
+		pr_err("diag: Invalid mode %d in %s\n", *req_mode, __func__);
+		return -EINVAL;
+	}
+
+	switch (diag_mux->mode) {
+	case DIAG_USB_MODE:
+		if (*req_mode == DIAG_MEMORY_DEVICE_MODE) {
+			diag_mux->usb_ptr->log_ops->close();
+			diag_mux->logger = diag_mux->md_ptr;
+			diag_mux->md_ptr->log_ops->open();
+		} else if (*req_mode == DIAG_MULTI_MODE) {
+			diag_mux->md_ptr->log_ops->open();
+			diag_mux->logger = NULL;
+		}
+		break;
+	case DIAG_MEMORY_DEVICE_MODE:
+		if (*req_mode == DIAG_USB_MODE) {
+			diag_mux->md_ptr->log_ops->close();
+			diag_mux->logger = diag_mux->usb_ptr;
+			diag_mux->usb_ptr->log_ops->open();
+		} else if (*req_mode == DIAG_MULTI_MODE) {
+			diag_mux->usb_ptr->log_ops->open();
+			diag_mux->logger = NULL;
+		}
+		break;
+	case DIAG_MULTI_MODE:
+		if (*req_mode == DIAG_USB_MODE) {
+			diag_mux->md_ptr->log_ops->close();
+			diag_mux->logger = diag_mux->usb_ptr;
+		} else if (*req_mode == DIAG_MEMORY_DEVICE_MODE) {
+			diag_mux->usb_ptr->log_ops->close();
+			diag_mux->logger = diag_mux->md_ptr;
+		}
+		break;
+	}
+	diag_mux->mode = *req_mode;
+	diag_mux->mux_mask = new_mask;
+	*peripheral_mask = new_mask;
+	return 0;
+}
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diag_mux.h linux-4.4.115-fbx/drivers/char/diag/diag_mux.h
--- linux-4.4.115-fbx/drivers/char/diag./diag_mux.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diag_mux.h	2019-01-22 16:16:22.955241444 +0100
@@ -0,0 +1,76 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef DIAG_MUX_H
+#define DIAG_MUX_H
+#include "diagchar.h"
+
+struct diag_mux_state_t {
+	struct diag_logger_t *logger;
+	struct diag_logger_t *usb_ptr;
+	struct diag_logger_t *md_ptr;
+	unsigned int mux_mask;
+	unsigned int mode;
+};
+
+struct diag_mux_ops {
+	int (*open)(int id, int mode);
+	int (*close)(int id, int mode);
+	int (*read_done)(unsigned char *buf, int len, int id);
+	int (*write_done)(unsigned char *buf, int len, int buf_ctx,
+			      int id);
+};
+
+#define DIAG_USB_MODE			0
+#define DIAG_MEMORY_DEVICE_MODE		1
+#define DIAG_NO_LOGGING_MODE		2
+#define DIAG_MULTI_MODE			3
+
+#define DIAG_MUX_LOCAL		0
+#define DIAG_MUX_LOCAL_LAST	1
+#define DIAG_MUX_BRIDGE_BASE	DIAG_MUX_LOCAL_LAST
+#define DIAG_MUX_MDM		(DIAG_MUX_BRIDGE_BASE)
+#define DIAG_MUX_MDM2		(DIAG_MUX_BRIDGE_BASE + 1)
+#define DIAG_MUX_SMUX		(DIAG_MUX_BRIDGE_BASE + 2)
+#define DIAG_MUX_BRIDGE_LAST	(DIAG_MUX_BRIDGE_BASE + 3)
+
+#ifndef CONFIG_DIAGFWD_BRIDGE_CODE
+#define NUM_MUX_PROC		DIAG_MUX_LOCAL_LAST
+#else
+#define NUM_MUX_PROC		DIAG_MUX_BRIDGE_LAST
+#endif
+
+struct diag_logger_ops {
+	void (*open)(void);
+	void (*close)(void);
+	int (*queue_read)(int id);
+	int (*write)(int id, unsigned char *buf, int len, int ctx);
+	int (*close_peripheral)(int id, uint8_t peripheral);
+};
+
+struct diag_logger_t {
+	int mode;
+	struct diag_mux_ops *ops[NUM_MUX_PROC];
+	struct diag_logger_ops *log_ops;
+};
+
+extern struct diag_mux_state_t *diag_mux;
+
+int diag_mux_init(void);
+void diag_mux_exit(void);
+int diag_mux_register(int proc, int ctx, struct diag_mux_ops *ops);
+int diag_mux_queue_read(int proc);
+int diag_mux_write(int proc, unsigned char *buf, int len, int ctx);
+int diag_mux_close_peripheral(int proc, uint8_t peripheral);
+int diag_mux_open_all(struct diag_logger_t *logger);
+int diag_mux_close_all(void);
+int diag_mux_switch_logging(int *new_mode, int *peripheral_mask);
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./diag_usb.h linux-4.4.115-fbx/drivers/char/diag/diag_usb.h
--- linux-4.4.115-fbx/drivers/char/diag./diag_usb.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/diag_usb.h	2019-01-22 16:16:22.955241444 +0100
@@ -0,0 +1,110 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGUSB_H
+#define DIAGUSB_H
+
+#ifdef CONFIG_DIAG_OVER_USB
+#include <linux/usb/usbdiag.h>
+#endif
+#include "diagchar.h"
+#include "diag_mux.h"
+
+#define DIAG_USB_LOCAL		0
+#define DIAG_USB_LOCAL_LAST	1
+#define DIAG_USB_BRIDGE_BASE	DIAG_USB_LOCAL_LAST
+#define DIAG_USB_MDM		(DIAG_USB_BRIDGE_BASE)
+#define DIAG_USB_MDM2		(DIAG_USB_BRIDGE_BASE + 1)
+#define DIAG_USB_QSC		(DIAG_USB_BRIDGE_BASE + 2)
+#define DIAG_USB_BRIDGE_LAST	(DIAG_USB_BRIDGE_BASE + 3)
+
+#ifndef CONFIG_DIAGFWD_BRIDGE_CODE
+#define NUM_DIAG_USB_DEV	DIAG_USB_LOCAL_LAST
+#else
+#define NUM_DIAG_USB_DEV	DIAG_USB_BRIDGE_LAST
+#endif
+
+#define DIAG_USB_NAME_SZ	24
+#define DIAG_USB_GET_NAME(x)	(diag_usb[x].name)
+
+#define DIAG_USB_MODE		0
+
+struct diag_usb_buf_tbl_t {
+	struct list_head track;
+	unsigned char *buf;
+	uint32_t len;
+	atomic_t ref_count;
+	int ctxt;
+};
+
+struct diag_usb_info {
+	int id;
+	int ctxt;
+	char name[DIAG_USB_NAME_SZ];
+	atomic_t connected;
+	atomic_t diag_state;
+	atomic_t read_pending;
+	int enabled;
+	int mempool;
+	int max_size;
+	struct list_head buf_tbl;
+	unsigned long read_cnt;
+	unsigned long write_cnt;
+	spinlock_t lock;
+	spinlock_t write_lock;
+	struct usb_diag_ch *hdl;
+	struct diag_mux_ops *ops;
+	unsigned char *read_buf;
+	struct diag_request *read_ptr;
+	struct work_struct read_work;
+	struct work_struct read_done_work;
+	struct work_struct connect_work;
+	struct work_struct disconnect_work;
+	struct workqueue_struct *usb_wq;
+};
+
+#ifdef CONFIG_DIAG_OVER_USB
+extern struct diag_usb_info diag_usb[NUM_DIAG_USB_DEV];
+int diag_usb_register(int id, int ctxt, struct diag_mux_ops *ops);
+int diag_usb_queue_read(int id);
+int diag_usb_write(int id, unsigned char *buf, int len, int ctxt);
+void diag_usb_connect_all(void);
+void diag_usb_disconnect_all(void);
+void diag_usb_exit(int id);
+#else
+static inline int diag_usb_register(int id, int ctxt, struct diag_mux_ops *ops)
+{
+	return 0;
+}
+static inline int diag_usb_queue_read(int id)
+{
+	return 0;
+}
+static inline int diag_usb_write(int id, unsigned char *buf, int len, int ctxt)
+{
+	return 0;
+}
+static inline void diag_usb_connect_all(void)
+{
+	return;
+}
+static inline void diag_usb_disconnect_all(void)
+{
+	return;
+}
+static inline void diag_usb_exit(int id)
+{
+	return;
+}
+#endif
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./Kconfig linux-4.4.115-fbx/drivers/char/diag/Kconfig
--- linux-4.4.115-fbx/drivers/char/diag./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/Kconfig	2019-10-29 09:26:23.445201240 +0100
@@ -0,0 +1,33 @@
+menu "Diag Support"
+
+config DIAG_CHAR
+	tristate "char driver interface and diag forwarding to/from modem"
+	default m
+	depends on ARCH_QCOM
+	depends on POWER_RESET_QCOM
+	select CRC_CCITT
+	help
+	 Char driver interface for diag user space and diag-forwarding to modem ARM and back.
+	 This enables diagchar for maemo usb gadget or android usb gadget based on config selected.
+endmenu
+
+menu "DIAG traffic over USB"
+
+config DIAG_OVER_USB
+	bool "Enable DIAG traffic to go over USB"
+	depends on USB_CONFIGFS_F_DIAG || USB_FUNCTION_DIAG || USB_QCOM_MAEMO
+        depends on ARCH_QCOM
+	default y
+	help
+	 This feature helps segregate code required for DIAG traffic to go over USB.
+endmenu
+
+menu "HSIC/SMUX support for DIAG"
+
+config DIAGFWD_BRIDGE_CODE
+	depends on USB_QCOM_DIAG_BRIDGE || MSM_MHI
+	default y
+	bool "Enable QSC/9K DIAG traffic over SMUX/HSIC"
+	help
+	 SMUX/HSIC Transport Layer for DIAG Router
+endmenu
diff -Nruw linux-4.4.115-fbx/drivers/char/diag./Makefile linux-4.4.115-fbx/drivers/char/diag/Makefile
--- linux-4.4.115-fbx/drivers/char/diag./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/char/diag/Makefile	2019-10-29 09:26:23.445201240 +0100
@@ -0,0 +1,7 @@
+obj-$(CONFIG_DIAG_CHAR) := diagchar.o
+obj-$(CONFIG_DIAGFWD_BRIDGE_CODE) += diagfwd_bridge.o
+obj-$(CONFIG_USB_QCOM_DIAG_BRIDGE) += diagfwd_hsic.o
+obj-$(CONFIG_USB_QCOM_DIAG_BRIDGE) += diagfwd_smux.o
+obj-$(CONFIG_MSM_MHI) += diagfwd_mhi.o
+obj-$(CONFIG_DIAG_OVER_USB) += diag_usb.o
+diagchar-objs := diagchar_core.o diagchar_hdlc.o diagfwd.o diagfwd_glink.o diagfwd_peripheral.o diagfwd_smd.o diagfwd_socket.o diag_mux.o diag_memorydevice.o diagmem.o diagfwd_cntl.o diag_dci.o diag_masks.o diag_debugfs.o
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/char/hw_random/msm_rng.c	2019-01-22 16:16:22.967241553 +0100
@@ -0,0 +1,482 @@
+/*
+ * Copyright (c) 2011-2013, 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <soc/qcom/socinfo.h>
+#include <linux/msm-bus.h>
+#include <linux/qrng.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <crypto/internal/rng.h>
+
+#include <linux/platform_data/qcom_crypto_device.h>
+
+
+
+#define DRIVER_NAME "msm_rng"
+
+/* Device specific register offsets */
+#define PRNG_DATA_OUT_OFFSET    0x0000
+#define PRNG_STATUS_OFFSET	0x0004
+#define PRNG_LFSR_CFG_OFFSET	0x0100
+#define PRNG_CONFIG_OFFSET	0x0104
+
+/* Device specific register masks and config values */
+#define PRNG_LFSR_CFG_MASK	0xFFFF0000
+#define PRNG_LFSR_CFG_CLOCKS	0x0000DDDD
+#define PRNG_CONFIG_MASK	0xFFFFFFFD
+#define PRNG_HW_ENABLE		0x00000002
+
+#define MAX_HW_FIFO_DEPTH 16                     /* FIFO is 16 words deep */
+#define MAX_HW_FIFO_SIZE (MAX_HW_FIFO_DEPTH * 4) /* FIFO is 32 bits wide  */
+
+struct msm_rng_device {
+	struct platform_device *pdev;
+	void __iomem *base;
+	struct clk *prng_clk;
+	uint32_t qrng_perf_client;
+	struct mutex rng_lock;
+};
+
+struct msm_rng_device msm_rng_device_info;
+static struct msm_rng_device *msm_rng_dev_cached;
+struct mutex cached_rng_lock;
+static long msm_rng_ioctl(struct file *filp, unsigned int cmd,
+				unsigned long arg)
+{
+	long ret = 0;
+
+	switch (cmd) {
+	case QRNG_IOCTL_RESET_BUS_BANDWIDTH:
+		pr_info("calling msm_rng_bus_scale(LOW)\n");
+		ret = msm_bus_scale_client_update_request(
+				msm_rng_device_info.qrng_perf_client, 0);
+		if (ret)
+			pr_err("failed qrng_reset_bus_bw, ret = %ld\n", ret);
+		break;
+	default:
+		pr_err("Unsupported IOCTL call");
+		break;
+	}
+	return ret;
+}
+
+/*
+ *
+ *  This function calls hardware random bit generator directory and retuns it
+ *  back to caller
+ *
+ */
+static int msm_rng_direct_read(struct msm_rng_device *msm_rng_dev,
+					void *data, size_t max)
+{
+	struct platform_device *pdev;
+	void __iomem *base;
+	size_t currsize = 0;
+	u32 val;
+	u32 *retdata = data;
+	int ret;
+	int failed = 0;
+
+	pdev = msm_rng_dev->pdev;
+	base = msm_rng_dev->base;
+
+	/* no room for word data */
+	if (max < 4)
+		return 0;
+
+	mutex_lock(&msm_rng_dev->rng_lock);
+
+	if (msm_rng_dev->qrng_perf_client) {
+		ret = msm_bus_scale_client_update_request(
+				msm_rng_dev->qrng_perf_client, 1);
+		if (ret)
+			pr_err("bus_scale_client_update_req failed!\n");
+	}
+	/* enable PRNG clock */
+	ret = clk_prepare_enable(msm_rng_dev->prng_clk);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to enable clock in callback\n");
+		goto err;
+	}
+	/* read random data from h/w */
+	do {
+		/* check status bit if data is available */
+		while (!(readl_relaxed(base + PRNG_STATUS_OFFSET)
+				& 0x00000001)) {
+			if (failed == 10) {
+				pr_err("Data not available after retry\n");
+				break;
+			}
+			pr_err("msm_rng:Data not available!\n");
+			msleep_interruptible(10);
+			failed++;
+		}
+
+		/* read FIFO */
+		val = readl_relaxed(base + PRNG_DATA_OUT_OFFSET);
+		if (!val)
+			break;	/* no data to read so just bail */
+
+		/* write data back to callers pointer */
+		*(retdata++) = val;
+		currsize += 4;
+		/* make sure we stay on 32bit boundary */
+		if ((max - currsize) < 4)
+			break;
+
+	} while (currsize < max);
+
+	/* vote to turn off clock */
+	clk_disable_unprepare(msm_rng_dev->prng_clk);
+err:
+	if (msm_rng_dev->qrng_perf_client) {
+		ret = msm_bus_scale_client_update_request(
+				msm_rng_dev->qrng_perf_client, 0);
+		if (ret)
+			pr_err("bus_scale_client_update_req failed!\n");
+	}
+	mutex_unlock(&msm_rng_dev->rng_lock);
+
+	val = 0L;
+	return currsize;
+}
+static int msm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+	struct msm_rng_device *msm_rng_dev;
+	int rv = 0;
+
+	msm_rng_dev = (struct msm_rng_device *)rng->priv;
+	rv = msm_rng_direct_read(msm_rng_dev, data, max);
+
+	return rv;
+}
+
+
+static struct hwrng msm_rng = {
+	.name = DRIVER_NAME,
+	.read = msm_rng_read,
+	.quality = 700,
+};
+
+static int msm_rng_enable_hw(struct msm_rng_device *msm_rng_dev)
+{
+	unsigned long val = 0;
+	unsigned long reg_val = 0;
+	int ret = 0;
+
+	if (msm_rng_dev->qrng_perf_client) {
+		ret = msm_bus_scale_client_update_request(
+				msm_rng_dev->qrng_perf_client, 1);
+		if (ret)
+			pr_err("bus_scale_client_update_req failed!\n");
+	}
+	/* Enable the PRNG CLK */
+	ret = clk_prepare_enable(msm_rng_dev->prng_clk);
+	if (ret) {
+		dev_err(&(msm_rng_dev->pdev)->dev,
+				"failed to enable clock in probe\n");
+		return -EPERM;
+	}
+
+	/* Enable PRNG h/w only if it is NOT ON */
+	val = readl_relaxed(msm_rng_dev->base + PRNG_CONFIG_OFFSET) &
+					PRNG_HW_ENABLE;
+	/* PRNG H/W is not ON */
+	if (val != PRNG_HW_ENABLE) {
+		val = readl_relaxed(msm_rng_dev->base + PRNG_LFSR_CFG_OFFSET);
+		val &= PRNG_LFSR_CFG_MASK;
+		val |= PRNG_LFSR_CFG_CLOCKS;
+		writel_relaxed(val, msm_rng_dev->base + PRNG_LFSR_CFG_OFFSET);
+
+		/* The PRNG CONFIG register should be first written */
+		mb();
+
+		reg_val = readl_relaxed(msm_rng_dev->base + PRNG_CONFIG_OFFSET)
+						& PRNG_CONFIG_MASK;
+		reg_val |= PRNG_HW_ENABLE;
+		writel_relaxed(reg_val, msm_rng_dev->base + PRNG_CONFIG_OFFSET);
+
+		/* The PRNG clk should be disabled only after we enable the
+		* PRNG h/w by writing to the PRNG CONFIG register.
+		*/
+		mb();
+	}
+	clk_disable_unprepare(msm_rng_dev->prng_clk);
+
+	if (msm_rng_dev->qrng_perf_client) {
+		ret = msm_bus_scale_client_update_request(
+				msm_rng_dev->qrng_perf_client, 0);
+		if (ret)
+			pr_err("bus_scale_client_update_req failed!\n");
+	}
+
+	return 0;
+}
+
+static const struct file_operations msm_rng_fops = {
+	.unlocked_ioctl = msm_rng_ioctl,
+};
+static struct class *msm_rng_class;
+static struct cdev msm_rng_cdev;
+
+static int msm_rng_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct msm_rng_device *msm_rng_dev = NULL;
+	void __iomem *base = NULL;
+	bool configure_qrng = true;
+	int error = 0;
+	int ret = 0;
+	struct device *dev;
+
+	struct msm_bus_scale_pdata *qrng_platform_support = NULL;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "invalid address\n");
+		error = -EFAULT;
+		goto err_exit;
+	}
+
+	msm_rng_dev = kzalloc(sizeof(struct msm_rng_device), GFP_KERNEL);
+	if (!msm_rng_dev) {
+		dev_err(&pdev->dev, "cannot allocate memory\n");
+		error = -ENOMEM;
+		goto err_exit;
+	}
+
+	base = ioremap(res->start, resource_size(res));
+	if (!base) {
+		dev_err(&pdev->dev, "ioremap failed\n");
+		error = -ENOMEM;
+		goto err_iomap;
+	}
+	msm_rng_dev->base = base;
+
+	/* create a handle for clock control */
+	if ((pdev->dev.of_node) && (of_property_read_bool(pdev->dev.of_node,
+					"qcom,msm-rng-iface-clk")))
+		msm_rng_dev->prng_clk = clk_get(&pdev->dev,
+							"iface_clk");
+	else
+		msm_rng_dev->prng_clk = clk_get(&pdev->dev, "core_clk");
+	if (IS_ERR(msm_rng_dev->prng_clk)) {
+		dev_err(&pdev->dev, "failed to register clock source\n");
+		error = -EPERM;
+		goto err_clk_get;
+	}
+
+	/* save away pdev and register driver data */
+	msm_rng_dev->pdev = pdev;
+	platform_set_drvdata(pdev, msm_rng_dev);
+
+	if (pdev->dev.of_node) {
+		/* Register bus client */
+		qrng_platform_support = msm_bus_cl_get_pdata(pdev);
+		msm_rng_dev->qrng_perf_client = msm_bus_scale_register_client(
+						qrng_platform_support);
+		msm_rng_device_info.qrng_perf_client =
+					msm_rng_dev->qrng_perf_client;
+		if (!msm_rng_dev->qrng_perf_client)
+			pr_err("Unable to register bus client\n");
+	}
+
+	/* Enable rng h/w for the targets which can access the entire
+	 * address space of PRNG.
+	 */
+	if ((pdev->dev.of_node) && (of_property_read_bool(pdev->dev.of_node,
+					"qcom,no-qrng-config")))
+		configure_qrng = false;
+	if (configure_qrng) {
+		error = msm_rng_enable_hw(msm_rng_dev);
+		if (error)
+			goto rollback_clk;
+	}
+
+	mutex_init(&msm_rng_dev->rng_lock);
+	mutex_init(&cached_rng_lock);
+
+	/* register with hwrng framework */
+	msm_rng.priv = (unsigned long) msm_rng_dev;
+	error = hwrng_register(&msm_rng);
+	if (error) {
+		dev_err(&pdev->dev, "failed to register hwrng\n");
+		error = -EPERM;
+		goto rollback_clk;
+	}
+	ret = register_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME, &msm_rng_fops);
+
+	msm_rng_class = class_create(THIS_MODULE, "msm-rng");
+	if (IS_ERR(msm_rng_class)) {
+		pr_err("class_create failed\n");
+		return PTR_ERR(msm_rng_class);
+	}
+
+	dev = device_create(msm_rng_class, NULL, MKDEV(QRNG_IOC_MAGIC, 0),
+				NULL, "msm-rng");
+	if (IS_ERR(dev)) {
+		pr_err("Device create failed\n");
+		error = PTR_ERR(dev);
+		goto unregister_chrdev;
+	}
+	cdev_init(&msm_rng_cdev, &msm_rng_fops);
+	msm_rng_dev_cached = msm_rng_dev;
+	return error;
+
+unregister_chrdev:
+	unregister_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME);
+rollback_clk:
+	clk_put(msm_rng_dev->prng_clk);
+err_clk_get:
+	iounmap(msm_rng_dev->base);
+err_iomap:
+	kzfree(msm_rng_dev);
+err_exit:
+	return error;
+}
+
+static int msm_rng_remove(struct platform_device *pdev)
+{
+	struct msm_rng_device *msm_rng_dev = platform_get_drvdata(pdev);
+
+	unregister_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME);
+	hwrng_unregister(&msm_rng);
+	clk_put(msm_rng_dev->prng_clk);
+	iounmap(msm_rng_dev->base);
+	platform_set_drvdata(pdev, NULL);
+	if (msm_rng_dev->qrng_perf_client)
+		msm_bus_scale_unregister_client(msm_rng_dev->qrng_perf_client);
+
+	kzfree(msm_rng_dev);
+	msm_rng_dev_cached = NULL;
+	return 0;
+}
+
+static int qrng_get_random(struct crypto_rng *tfm, const u8 *src,
+				unsigned int slen, u8 *rdata,
+				unsigned int dlen)
+{
+	int sizeread = 0;
+	int rv = -EFAULT;
+
+	if (!msm_rng_dev_cached) {
+		pr_err("%s: msm_rng_dev is not initialized.\n", __func__);
+		rv = -ENODEV;
+		goto err_exit;
+	}
+
+	if (!rdata) {
+		pr_err("%s: data buffer is null!\n", __func__);
+		rv = -EINVAL;
+		goto err_exit;
+	}
+
+	if (signal_pending(current) ||
+		mutex_lock_interruptible(&cached_rng_lock)) {
+		pr_err("%s: mutex lock interrupted!\n", __func__);
+		rv = -ERESTARTSYS;
+		goto err_exit;
+	}
+	sizeread = msm_rng_direct_read(msm_rng_dev_cached, rdata, dlen);
+
+	if (sizeread == dlen)
+		rv = 0;
+
+	mutex_unlock(&cached_rng_lock);
+err_exit:
+	return rv;
+
+}
+
+static int qrng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
+{
+	return 0;
+}
+
+static struct rng_alg rng_algs[] = { {
+      .generate = qrng_get_random,
+      .seed          = qrng_reset,
+      .seedsize = 0,
+      .base = {
+	.cra_name               = "qrng",
+	.cra_driver_name        = "fips_hw_qrng",
+	.cra_priority           = 300,
+	.cra_ctxsize            = 0,
+	.cra_module             = THIS_MODULE,
+        }
+} };
+
+static struct of_device_id qrng_match[] = {
+	{	.compatible = "qcom,msm-rng",
+	},
+	{}
+};
+
+static struct platform_driver rng_driver = {
+	.probe      = msm_rng_probe,
+	.remove     = msm_rng_remove,
+	.driver     = {
+		.name   = DRIVER_NAME,
+		.owner  = THIS_MODULE,
+		.of_match_table = qrng_match,
+	}
+};
+
+static int __init msm_rng_init(void)
+{
+	int ret;
+
+	msm_rng_dev_cached = NULL;
+	ret = platform_driver_register(&rng_driver);
+	if (ret) {
+		pr_err("%s: platform_driver_register error:%d\n",
+			__func__, ret);
+		goto err_exit;
+	}
+	ret = crypto_register_rngs(rng_algs, ARRAY_SIZE(rng_algs));
+	if (ret) {
+		pr_err("%s: crypto_register_algs error:%d\n",
+			__func__, ret);
+		goto err_exit;
+	}
+
+err_exit:
+	return ret;
+}
+
+module_init(msm_rng_init);
+
+static void __exit msm_rng_exit(void)
+{
+	crypto_unregister_rngs(rng_algs, ARRAY_SIZE(rng_algs));
+	platform_driver_unregister(&rng_driver);
+}
+
+module_exit(msm_rng_exit);
+
+MODULE_AUTHOR("The Linux Foundation");
+MODULE_DESCRIPTION("Qualcomm MSM Random Number Driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/char/rdbg.c	2019-01-22 16:16:22.983241698 +0100
@@ -0,0 +1,1173 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <soc/qcom/smsm.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+
+#define SMP2P_NUM_PROCS				16
+#define MAX_RETRIES				20
+
+#define SM_VERSION				1
+#define SM_BLOCKSIZE				128
+
+#define SMQ_MAGIC_INIT				0xFF00FF00
+#define SMQ_MAGIC_PRODUCER			(SMQ_MAGIC_INIT | 0x1)
+#define SMQ_MAGIC_CONSUMER			(SMQ_MAGIC_INIT | 0x2)
+
+enum SMQ_STATUS {
+	SMQ_SUCCESS    =  0,
+	SMQ_ENOMEMORY  = -1,
+	SMQ_EBADPARM   = -2,
+	SMQ_UNDERFLOW  = -3,
+	SMQ_OVERFLOW   = -4
+};
+
+enum smq_type {
+	PRODUCER = 1,
+	CONSUMER = 2,
+	INVALID  = 3
+};
+
+struct smq_block_map {
+	uint32_t index_read;
+	uint32_t num_blocks;
+	uint8_t *map;
+};
+
+struct smq_node {
+	uint16_t index_block;
+	uint16_t num_blocks;
+} __attribute__ ((__packed__));
+
+struct smq_hdr {
+	uint8_t producer_version;
+	uint8_t consumer_version;
+} __attribute__ ((__packed__));
+
+struct smq_out_state {
+	uint32_t init;
+	uint32_t index_check_queue_for_reset;
+	uint32_t index_sent_write;
+	uint32_t index_free_read;
+} __attribute__ ((__packed__));
+
+struct smq_out {
+	struct smq_out_state s;
+	struct smq_node sent[1];
+};
+
+struct smq_in_state {
+	uint32_t init;
+	uint32_t index_check_queue_for_reset_ack;
+	uint32_t index_sent_read;
+	uint32_t index_free_write;
+} __attribute__ ((__packed__));
+
+struct smq_in {
+	struct smq_in_state s;
+	struct smq_node free[1];
+};
+
+struct smq {
+	struct smq_hdr *hdr;
+	struct smq_out *out;
+	struct smq_in *in;
+	uint8_t *blocks;
+	uint32_t num_blocks;
+	struct mutex *lock;
+	uint32_t initialized;
+	struct smq_block_map block_map;
+	enum smq_type type;
+};
+
+struct gpio_info {
+	int gpio_base_id;
+	int irq_base_id;
+};
+
+struct rdbg_data {
+	struct device *device;
+	struct completion work;
+	struct gpio_info in;
+	struct gpio_info out;
+	bool   device_initialized;
+	int    gpio_out_offset;
+	bool   device_opened;
+	void   *smem_addr;
+	size_t smem_size;
+	struct smq    producer_smrb;
+	struct smq    consumer_smrb;
+	struct mutex  write_mutex;
+};
+
+struct rdbg_device {
+	struct cdev cdev;
+	struct class *class;
+	dev_t dev_no;
+	int num_devices;
+	struct rdbg_data *rdbg_data;
+};
+
+static struct rdbg_device g_rdbg_instance = {
+	{ {0} },
+	NULL,
+	0,
+	SMP2P_NUM_PROCS,
+	NULL
+};
+
+struct processor_specific_info {
+	char *name;
+	unsigned int smem_buffer_addr;
+	size_t smem_buffer_size;
+};
+
+static struct processor_specific_info proc_info[SMP2P_NUM_PROCS] = {
+		{0},	/*APPS*/
+		{"rdbg_modem", 0, 0},	/*MODEM*/
+		{"rdbg_adsp", SMEM_LC_DEBUGGER, 16*1024},	/*ADSP*/
+		{0},	/*SMP2P_RESERVED_PROC_1*/
+		{"rdbg_wcnss", 0, 0},		/*WCNSS*/
+		{"rdbg_cdsp", SMEM_LC_DEBUGGER, 16*1024},		/*CDSP*/
+		{NULL},	/*SMP2P_POWER_PROC*/
+		{NULL},	/*SMP2P_TZ_PROC*/
+		{NULL},	/*EMPTY*/
+		{NULL},	/*EMPTY*/
+		{NULL},	/*EMPTY*/
+		{NULL},	/*EMPTY*/
+		{NULL},	/*EMPTY*/
+		{NULL},	/*EMPTY*/
+		{NULL},	/*EMPTY*/
+		{NULL}		/*SMP2P_REMOTE_MOCK_PROC*/
+};
+
+static int smq_blockmap_get(struct smq_block_map *block_map,
+	uint32_t *block_index, uint32_t n)
+{
+	uint32_t start;
+	uint32_t mark = 0;
+	uint32_t found = 0;
+	uint32_t i = 0;
+
+	start = block_map->index_read;
+
+	if (n == 1) {
+		do {
+			if (!block_map->map[block_map->index_read]) {
+				*block_index = block_map->index_read;
+				block_map->map[block_map->index_read] = 1;
+				block_map->index_read++;
+				block_map->index_read %= block_map->num_blocks;
+				return SMQ_SUCCESS;
+			}
+			block_map->index_read++;
+		} while (start != (block_map->index_read %=
+			block_map->num_blocks));
+	} else {
+		mark = block_map->num_blocks;
+
+		do {
+			if (!block_map->map[block_map->index_read]) {
+				if (mark > block_map->index_read) {
+					mark = block_map->index_read;
+					start = block_map->index_read;
+					found = 0;
+				}
+
+				found++;
+				if (found == n) {
+					*block_index = mark;
+					for (i = 0; i < n; i++)
+						block_map->map[mark + i] =
+							(uint8_t)(n - i);
+					block_map->index_read += block_map->map
+						[block_map->index_read] - 1;
+					return SMQ_SUCCESS;
+				}
+			} else {
+				found = 0;
+				block_map->index_read += block_map->map
+					[block_map->index_read] - 1;
+				mark = block_map->num_blocks;
+			}
+			block_map->index_read++;
+		} while (start != (block_map->index_read %=
+			block_map->num_blocks));
+	}
+
+	return SMQ_ENOMEMORY;
+}
+
+static void smq_blockmap_put(struct smq_block_map *block_map, uint32_t i)
+{
+	uint32_t num_blocks = block_map->map[i];
+
+	while (num_blocks--) {
+		block_map->map[i] = 0;
+		i++;
+	}
+}
+
+static int smq_blockmap_reset(struct smq_block_map *block_map)
+{
+	if (!block_map->map)
+		return SMQ_ENOMEMORY;
+	memset(block_map->map, 0, block_map->num_blocks + 1);
+	block_map->index_read = 0;
+
+	return SMQ_SUCCESS;
+}
+
+static int smq_blockmap_ctor(struct smq_block_map *block_map,
+	uint32_t num_blocks)
+{
+	if (num_blocks <= 1)
+		return SMQ_ENOMEMORY;
+
+	block_map->map = kcalloc(num_blocks, sizeof(uint8_t), GFP_KERNEL);
+	if (!block_map->map)
+		return SMQ_ENOMEMORY;
+
+	block_map->num_blocks = num_blocks - 1;
+	smq_blockmap_reset(block_map);
+
+	return SMQ_SUCCESS;
+}
+
+static void smq_blockmap_dtor(struct smq_block_map *block_map)
+{
+	kfree(block_map->map);
+	block_map->map = NULL;
+}
+
+static int smq_free(struct smq *smq, void *data)
+{
+	struct smq_node node;
+	uint32_t index_block;
+	int err = SMQ_SUCCESS;
+
+	if (smq->lock)
+		mutex_lock(smq->lock);
+
+	if ((smq->hdr->producer_version != SM_VERSION) &&
+		(smq->out->s.init != SMQ_MAGIC_PRODUCER)) {
+		err = SMQ_UNDERFLOW;
+		goto bail;
+	}
+
+	index_block = ((uint8_t *)data - smq->blocks) / SM_BLOCKSIZE;
+	if (index_block >= smq->num_blocks) {
+		err = SMQ_EBADPARM;
+		goto bail;
+	}
+
+	node.index_block = (uint16_t)index_block;
+	node.num_blocks = 0;
+	*((struct smq_node *)(smq->in->free + smq->in->
+		s.index_free_write)) = node;
+
+	smq->in->s.index_free_write = (smq->in->s.index_free_write + 1)
+		% smq->num_blocks;
+
+bail:
+	if (smq->lock)
+		mutex_unlock(smq->lock);
+	return err;
+}
+
+static int smq_receive(struct smq *smq, void **pp, int *pnsize, int *pbmore)
+{
+	struct smq_node *node;
+	int err = SMQ_SUCCESS;
+	int more = 0;
+
+	if ((smq->hdr->producer_version != SM_VERSION) &&
+		(smq->out->s.init != SMQ_MAGIC_PRODUCER))
+		return SMQ_UNDERFLOW;
+
+	if (smq->in->s.index_sent_read == smq->out->s.index_sent_write) {
+		err = SMQ_UNDERFLOW;
+		goto bail;
+	}
+
+	node = (struct smq_node *)(smq->out->sent + smq->in->s.index_sent_read);
+	if (node->index_block >= smq->num_blocks) {
+		err = SMQ_EBADPARM;
+		goto bail;
+	}
+
+	smq->in->s.index_sent_read = (smq->in->s.index_sent_read + 1)
+		% smq->num_blocks;
+
+	*pp = smq->blocks + (node->index_block * SM_BLOCKSIZE);
+	*pnsize = SM_BLOCKSIZE * node->num_blocks;
+
+	/* Ensure that the reads and writes are updated in the memory
+	* when they are done and not cached. Also, ensure that the reads
+	* and writes are not reordered as they are shared between two cores.
+	*/
+	rmb();
+	if (smq->in->s.index_sent_read != smq->out->s.index_sent_write)
+		more = 1;
+
+bail:
+	*pbmore = more;
+	return err;
+}
+
+static int smq_alloc_send(struct smq *smq, const uint8_t *pcb, int nsize)
+{
+	void *pv = 0;
+	int num_blocks;
+	uint32_t index_block = 0;
+	int err = SMQ_SUCCESS;
+	struct smq_node *node = NULL;
+
+	mutex_lock(smq->lock);
+
+	if ((smq->in->s.init == SMQ_MAGIC_CONSUMER) &&
+	 (smq->hdr->consumer_version == SM_VERSION)) {
+		if (smq->out->s.index_check_queue_for_reset ==
+			smq->in->s.index_check_queue_for_reset_ack) {
+			while (smq->out->s.index_free_read !=
+				smq->in->s.index_free_write) {
+				node = (struct smq_node *)(
+					smq->in->free +
+					smq->out->s.index_free_read);
+				if (node->index_block >= smq->num_blocks) {
+					err = SMQ_EBADPARM;
+					goto bail;
+				}
+
+				smq->out->s.index_free_read =
+					(smq->out->s.index_free_read + 1)
+						% smq->num_blocks;
+
+				smq_blockmap_put(&smq->block_map,
+					node->index_block);
+				/* Ensure that the reads and writes are
+				* updated in the memory when they are done
+				* and not cached. Also, ensure that the reads
+				* and writes are not reordered as they are
+				* shared between two cores.
+				*/
+				rmb();
+			}
+		}
+	}
+
+	num_blocks = ALIGN(nsize, SM_BLOCKSIZE)/SM_BLOCKSIZE;
+	err = smq_blockmap_get(&smq->block_map, &index_block, num_blocks);
+	if (err != SMQ_SUCCESS)
+		goto bail;
+
+	pv = smq->blocks + (SM_BLOCKSIZE * index_block);
+
+	err = copy_from_user((void *)pv, (void *)pcb, nsize);
+	if (err != 0)
+		goto bail;
+
+	((struct smq_node *)(smq->out->sent +
+		smq->out->s.index_sent_write))->index_block
+			= (uint16_t)index_block;
+	((struct smq_node *)(smq->out->sent +
+		smq->out->s.index_sent_write))->num_blocks
+			= (uint16_t)num_blocks;
+
+	smq->out->s.index_sent_write = (smq->out->s.index_sent_write + 1)
+		% smq->num_blocks;
+
+bail:
+	if (err != SMQ_SUCCESS) {
+		if (pv)
+			smq_blockmap_put(&smq->block_map, index_block);
+	}
+	mutex_unlock(smq->lock);
+	return err;
+}
+
+static int smq_reset_producer_queue_internal(struct smq *smq,
+	uint32_t reset_num)
+{
+	int retval = 0;
+	uint32_t i;
+
+	if (smq->type != PRODUCER)
+		goto bail;
+
+	mutex_lock(smq->lock);
+	if (smq->out->s.index_check_queue_for_reset != reset_num) {
+		smq->out->s.index_check_queue_for_reset = reset_num;
+		for (i = 0; i < smq->num_blocks; i++)
+			(smq->out->sent + i)->index_block = 0xFFFF;
+
+		smq_blockmap_reset(&smq->block_map);
+		smq->out->s.index_sent_write = 0;
+		smq->out->s.index_free_read = 0;
+		retval = 1;
+	}
+	mutex_unlock(smq->lock);
+
+bail:
+	return retval;
+}
+
+static int smq_check_queue_reset(struct smq *p_cons, struct smq *p_prod)
+{
+	int retval = 0;
+	uint32_t reset_num, i;
+
+	if ((p_cons->type != CONSUMER) ||
+		(p_cons->out->s.init != SMQ_MAGIC_PRODUCER) ||
+		(p_cons->hdr->producer_version != SM_VERSION))
+		goto bail;
+
+	reset_num = p_cons->out->s.index_check_queue_for_reset;
+	if (p_cons->in->s.index_check_queue_for_reset_ack != reset_num) {
+		p_cons->in->s.index_check_queue_for_reset_ack = reset_num;
+		for (i = 0; i < p_cons->num_blocks; i++)
+			(p_cons->in->free + i)->index_block = 0xFFFF;
+
+		p_cons->in->s.index_sent_read = 0;
+		p_cons->in->s.index_free_write = 0;
+
+		retval = smq_reset_producer_queue_internal(p_prod, reset_num);
+	}
+
+bail:
+	return retval;
+}
+
+static int check_subsystem_debug_enabled(void *base_addr, int size)
+{
+	int num_blocks;
+	uint8_t *pb_orig;
+	uint8_t *pb;
+	struct smq smq;
+	int err = 0;
+
+	pb = pb_orig = (uint8_t *)base_addr;
+	pb += sizeof(struct smq_hdr);
+	pb = PTR_ALIGN(pb, 8);
+	size -= pb - (uint8_t *)pb_orig;
+	num_blocks = (int)((size - sizeof(struct smq_out_state) -
+		sizeof(struct smq_in_state))/(SM_BLOCKSIZE +
+		sizeof(struct smq_node) * 2));
+	if (num_blocks <= 0) {
+		err = SMQ_EBADPARM;
+		goto bail;
+	}
+
+	pb += num_blocks * SM_BLOCKSIZE;
+	smq.out = (struct smq_out *)pb;
+	pb += sizeof(struct smq_out_state) + (num_blocks *
+		sizeof(struct smq_node));
+	smq.in = (struct smq_in *)pb;
+
+	if (smq.in->s.init != SMQ_MAGIC_CONSUMER) {
+		pr_err("%s, smq in consumer not initialized", __func__);
+		err = -ECOMM;
+	}
+
+bail:
+	return err;
+}
+
+static void smq_dtor(struct smq *smq)
+{
+	if (smq->initialized == SMQ_MAGIC_INIT) {
+		switch (smq->type) {
+		case PRODUCER:
+			smq->out->s.init = 0;
+			smq_blockmap_dtor(&smq->block_map);
+			break;
+		case CONSUMER:
+			smq->in->s.init = 0;
+			break;
+		default:
+		case INVALID:
+			break;
+		}
+
+		smq->initialized = 0;
+	}
+}
+
+/*
+ * The shared memory is used as a circular ring buffer in each direction.
+ * Thus we have a bi-directional shared memory channel between the AP
+ * and a subsystem. We call this SMQ. Each memory channel contains a header,
+ * data and a control mechanism that is used to synchronize read and write
+ * of data between the AP and the remote subsystem.
+ *
+ * Overall SMQ memory view:
+ *
+ *    +------------------------------------------------+
+ *    | SMEM buffer                                    |
+ *    |-----------------------+------------------------|
+ *    |Producer: LA           | Producer: Remote       |
+ *    |Consumer: Remote       |           subsystem    |
+ *    |          subsystem    | Consumer: LA           |
+ *    |                       |                        |
+ *    |               Producer|                Consumer|
+ *    +-----------------------+------------------------+
+ *    |                       |
+ *    |                       |
+ *    |                       +--------------------------------------+
+ *    |                                                              |
+ *    |                                                              |
+ *    v                                                              v
+ *    +--------------------------------------------------------------+
+ *    |   Header  |       Data      |            Control             |
+ *    +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+
+ *    |           | b | b | b |     | S  |n |n |     | S |n |n |     |
+ *    |  Producer | l | l | l |     | M  |o |o |     | M |o |o |     |
+ *    |    Ver    | o | o | o |     | Q  |d |d |     | Q |d |d |     |
+ *    |-----------| c | c | c | ... |    |e |e | ... |   |e |e | ... |
+ *    |           | k | k | k |     | O  |  |  |     | I |  |  |     |
+ *    |  Consumer |   |   |   |     | u  |0 |1 |     | n |0 |1 |     |
+ *    |    Ver    | 0 | 1 | 2 |     | t  |  |  |     |   |  |  |     |
+ *    +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+
+ *                                       |           |
+ *                                       +           |
+ *                                                   |
+ *                          +------------------------+
+ *                          |
+ *                          v
+ *                        +----+----+----+----+
+ *                        | SMQ Nodes         |
+ *                        |----|----|----|----|
+ *                 Node # |  0 |  1 |  2 | ...|
+ *                        |----|----|----|----|
+ * Starting Block Index # |  0 |  3 |  8 | ...|
+ *                        |----|----|----|----|
+ *            # of blocks |  3 |  5 |  1 | ...|
+ *                        +----+----+----+----+
+ *
+ * Header: Contains version numbers for software compatibility to ensure
+ * that both producers and consumers on the AP and subsystems know how to
+ * read from and write to the queue.
+ * Both the producer and consumer versions are 1.
+ *     +---------+-------------------+
+ *     | Size    | Field             |
+ *     +---------+-------------------+
+ *     | 1 byte  | Producer Version  |
+ *     +---------+-------------------+
+ *     | 1 byte  | Consumer Version  |
+ *     +---------+-------------------+
+ *
+ * Data: The data portion contains multiple blocks [0..N] of a fixed size.
+ * The block size SM_BLOCKSIZE is fixed to 128 bytes for header version #1.
+ * Payload sent from the debug agent app is split (if necessary) and placed
+ * in these blocks. The first data block is placed at the next 8 byte aligned
+ * address after the header.
+ *
+ * The number of blocks for a given SMEM allocation is derived as follows:
+ *   Number of Blocks = ((Total Size - Alignment - Size of Header
+ *		- Size of SMQIn - Size of SMQOut)/(SM_BLOCKSIZE))
+ *
+ * The producer maintains a private block map of each of these blocks to
+ * determine which of these blocks in the queue is available and which are free.
+ *
+ * Control:
+ * The control portion contains a list of nodes [0..N] where N is number
+ * of available data blocks. Each node identifies the data
+ * block indexes that contain a particular debug message to be transferred,
+ * and the number of blocks it took to hold the contents of the message.
+ *
+ * Each node has the following structure:
+ *     +---------+-------------------+
+ *     | Size    | Field             |
+ *     +---------+-------------------+
+ *     | 2 bytes |Staring Block Index|
+ *     +---------+-------------------+
+ *     | 2 bytes |Number of Blocks   |
+ *     +---------+-------------------+
+ *
+ * The producer and the consumer update different parts of the control channel
+ * (SMQOut / SMQIn) respectively. Each of these control data structures contains
+ * information about the last node that was written / read, and the actual nodes
+ * that were written/read.
+ *
+ * SMQOut Structure (R/W by producer, R by consumer):
+ *     +---------+-------------------+
+ *     | Size    | Field             |
+ *     +---------+-------------------+
+ *     | 4 bytes | Magic Init Number |
+ *     +---------+-------------------+
+ *     | 4 bytes | Reset             |
+ *     +---------+-------------------+
+ *     | 4 bytes | Last Sent Index   |
+ *     +---------+-------------------+
+ *     | 4 bytes | Index Free Read   |
+ *     +---------+-------------------+
+ *
+ * SMQIn Structure (R/W by consumer, R by producer):
+ *     +---------+-------------------+
+ *     | Size    | Field             |
+ *     +---------+-------------------+
+ *     | 4 bytes | Magic Init Number |
+ *     +---------+-------------------+
+ *     | 4 bytes | Reset ACK         |
+ *     +---------+-------------------+
+ *     | 4 bytes | Last Read Index   |
+ *     +---------+-------------------+
+ *     | 4 bytes | Index Free Write  |
+ *     +---------+-------------------+
+ *
+ * Magic Init Number:
+ * Both SMQ Out and SMQ In initialize this field with a predefined magic
+ * number so as to make sure that both the consumer and producer blocks
+ * have fully initialized and have valid data in the shared memory control area.
+ *	Producer Magic #: 0xFF00FF01
+ *	Consumer Magic #: 0xFF00FF02
+ */
+static int smq_ctor(struct smq *smq, void *base_addr, int size,
+	enum smq_type type, struct mutex *lock_ptr)
+{
+	int num_blocks;
+	uint8_t *pb_orig;
+	uint8_t *pb;
+	uint32_t i;
+	int err;
+
+	if (smq->initialized == SMQ_MAGIC_INIT) {
+		err = SMQ_EBADPARM;
+		goto bail;
+	}
+
+	if (!base_addr || !size) {
+		err = SMQ_EBADPARM;
+		goto bail;
+	}
+
+	if (type == PRODUCER)
+		smq->lock = lock_ptr;
+
+	pb_orig = (uint8_t *)base_addr;
+	smq->hdr = (struct smq_hdr *)pb_orig;
+	pb = pb_orig;
+	pb += sizeof(struct smq_hdr);
+	pb = PTR_ALIGN(pb, 8);
+	size -= pb - (uint8_t *)pb_orig;
+	num_blocks = (int)((size - sizeof(struct smq_out_state) -
+		sizeof(struct smq_in_state))/(SM_BLOCKSIZE +
+		sizeof(struct smq_node) * 2));
+	if (num_blocks <= 0) {
+		err = SMQ_ENOMEMORY;
+		goto bail;
+	}
+
+	smq->blocks = pb;
+	smq->num_blocks = num_blocks;
+	pb += num_blocks * SM_BLOCKSIZE;
+	smq->out = (struct smq_out *)pb;
+	pb += sizeof(struct smq_out_state) + (num_blocks *
+		sizeof(struct smq_node));
+	smq->in = (struct smq_in *)pb;
+	smq->type = type;
+	if (type == PRODUCER) {
+		smq->hdr->producer_version = SM_VERSION;
+		for (i = 0; i < smq->num_blocks; i++)
+			(smq->out->sent + i)->index_block = 0xFFFF;
+
+		err = smq_blockmap_ctor(&smq->block_map, smq->num_blocks);
+		if (err != SMQ_SUCCESS)
+			goto bail;
+
+		smq->out->s.index_sent_write = 0;
+		smq->out->s.index_free_read = 0;
+		if (smq->out->s.init == SMQ_MAGIC_PRODUCER) {
+			smq->out->s.index_check_queue_for_reset += 1;
+		} else {
+			smq->out->s.index_check_queue_for_reset = 1;
+			smq->out->s.init = SMQ_MAGIC_PRODUCER;
+		}
+	} else {
+		smq->hdr->consumer_version = SM_VERSION;
+		for (i = 0; i < smq->num_blocks; i++)
+			(smq->in->free + i)->index_block = 0xFFFF;
+
+		smq->in->s.index_sent_read = 0;
+		smq->in->s.index_free_write = 0;
+		if (smq->out->s.init == SMQ_MAGIC_PRODUCER) {
+			smq->in->s.index_check_queue_for_reset_ack =
+				smq->out->s.index_check_queue_for_reset;
+		} else {
+			smq->in->s.index_check_queue_for_reset_ack = 0;
+		}
+
+		smq->in->s.init = SMQ_MAGIC_CONSUMER;
+	}
+	smq->initialized = SMQ_MAGIC_INIT;
+	err = SMQ_SUCCESS;
+
+bail:
+	return err;
+}
+
+static void send_interrupt_to_subsystem(struct rdbg_data *rdbgdata)
+{
+	int offset = rdbgdata->gpio_out_offset;
+	int val = 1 ^ gpio_get_value(rdbgdata->out.gpio_base_id + offset);
+
+	gpio_set_value(rdbgdata->out.gpio_base_id + offset, val);
+	rdbgdata->gpio_out_offset = (offset + 1) % 32;
+
+	dev_dbg(rdbgdata->device, "%s: sent interrupt %d to subsystem",
+		__func__, val);
+}
+
+static irqreturn_t on_interrupt_from(int irq, void *ptr)
+{
+	struct rdbg_data *rdbgdata = (struct rdbg_data *) ptr;
+
+	dev_dbg(rdbgdata->device, "%s: Received interrupt %d from subsystem",
+		__func__, irq);
+
+	complete(&(rdbgdata->work));
+	return IRQ_HANDLED;
+}
+
+static int initialize_smq(struct rdbg_data *rdbgdata)
+{
+	int err = 0;
+	unsigned char *smem_consumer_buffer = rdbgdata->smem_addr;
+
+	smem_consumer_buffer += (rdbgdata->smem_size/2);
+
+	if (smq_ctor(&(rdbgdata->producer_smrb), (void *)(rdbgdata->smem_addr),
+		((rdbgdata->smem_size)/2), PRODUCER, &rdbgdata->write_mutex)) {
+		dev_err(rdbgdata->device, "%s: smq producer allocation failed",
+			__func__);
+		err = -ENOMEM;
+		goto bail;
+	}
+
+	if (smq_ctor(&(rdbgdata->consumer_smrb), (void *)smem_consumer_buffer,
+		((rdbgdata->smem_size)/2), CONSUMER, NULL)) {
+		dev_err(rdbgdata->device, "%s: smq conmsumer allocation failed",
+			__func__);
+		err = -ENOMEM;
+	}
+
+bail:
+	return err;
+
+}
+
+static int rdbg_open(struct inode *inode, struct file *filp)
+{
+	int device_id = -1;
+	struct rdbg_device *device = &g_rdbg_instance;
+	struct rdbg_data *rdbgdata = NULL;
+	int err = 0;
+
+	if (!inode || !device->rdbg_data) {
+		pr_err("Memory not allocated yet");
+		err = -ENODEV;
+		goto bail;
+	}
+
+	device_id = MINOR(inode->i_rdev);
+	rdbgdata = &device->rdbg_data[device_id];
+
+	if (rdbgdata->device_opened) {
+		dev_err(rdbgdata->device, "%s: Device already opened",
+			__func__);
+		err = -EEXIST;
+		goto bail;
+	}
+
+	rdbgdata->smem_size = proc_info[device_id].smem_buffer_size;
+	if (!rdbgdata->smem_size) {
+		dev_err(rdbgdata->device, "%s: smem not initialized", __func__);
+		err = -ENOMEM;
+		goto bail;
+	}
+
+	rdbgdata->smem_addr = smem_find(proc_info[device_id].smem_buffer_addr,
+		rdbgdata->smem_size, 0, SMEM_ANY_HOST_FLAG);
+	if (!rdbgdata->smem_addr) {
+		dev_err(rdbgdata->device, "%s: Could not allocate smem memory",
+			__func__);
+		err = -ENOMEM;
+		goto bail;
+	}
+	dev_dbg(rdbgdata->device, "%s: SMEM address=0x%lx smem_size=%d",
+		__func__, (unsigned long)rdbgdata->smem_addr,
+		(unsigned int)rdbgdata->smem_size);
+
+	if (check_subsystem_debug_enabled(rdbgdata->smem_addr,
+		rdbgdata->smem_size/2)) {
+		dev_err(rdbgdata->device, "%s: Subsystem %s is not debug enabled",
+			__func__, proc_info[device_id].name);
+		err = -ECOMM;
+		goto bail;
+	}
+
+	init_completion(&rdbgdata->work);
+
+	err = request_irq(rdbgdata->in.irq_base_id, on_interrupt_from,
+			IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+			proc_info[device_id].name,
+			(void *)&device->rdbg_data[device_id]);
+	if (err) {
+		dev_err(rdbgdata->device,
+			"%s: Failed to register interrupt.Err=%d,irqid=%d.",
+			__func__, err, rdbgdata->in.irq_base_id);
+		goto irq_bail;
+	}
+
+	err = enable_irq_wake(rdbgdata->in.irq_base_id);
+	if (err < 0) {
+		dev_dbg(rdbgdata->device, "enable_irq_wake() failed with err=%d",
+			err);
+		err = 0;
+	}
+
+	mutex_init(&rdbgdata->write_mutex);
+
+	err = initialize_smq(rdbgdata);
+	if (err) {
+		dev_err(rdbgdata->device, "Error initializing smq. Err=%d",
+			err);
+		goto smq_bail;
+	}
+
+	rdbgdata->device_opened = 1;
+
+	filp->private_data = (void *)rdbgdata;
+
+	return 0;
+
+smq_bail:
+	smq_dtor(&(rdbgdata->producer_smrb));
+	smq_dtor(&(rdbgdata->consumer_smrb));
+	mutex_destroy(&rdbgdata->write_mutex);
+irq_bail:
+	free_irq(rdbgdata->in.irq_base_id, (void *)
+		&device->rdbg_data[device_id]);
+bail:
+	return err;
+}
+
+static int rdbg_release(struct inode *inode, struct file *filp)
+{
+	int device_id = -1;
+	struct rdbg_device *rdbgdevice = &g_rdbg_instance;
+	struct rdbg_data *rdbgdata = NULL;
+	int err = 0;
+
+	if (!inode || !rdbgdevice->rdbg_data) {
+		pr_err("Memory not allocated yet");
+		err = -ENODEV;
+		goto bail;
+	}
+
+	device_id = MINOR(inode->i_rdev);
+	rdbgdata = &rdbgdevice->rdbg_data[device_id];
+
+	if (rdbgdata->device_opened == 1) {
+		dev_dbg(rdbgdata->device, "%s: Destroying %s.", __func__,
+			proc_info[device_id].name);
+		rdbgdata->device_opened = 0;
+		complete(&(rdbgdata->work));
+		free_irq(rdbgdata->in.irq_base_id, (void *)
+			&rdbgdevice->rdbg_data[device_id]);
+		if (rdbgdevice->rdbg_data[device_id].producer_smrb.initialized)
+			smq_dtor(&(rdbgdevice->rdbg_data[device_id].
+				producer_smrb));
+		if (rdbgdevice->rdbg_data[device_id].consumer_smrb.initialized)
+			smq_dtor(&(rdbgdevice->rdbg_data[device_id].
+				consumer_smrb));
+		mutex_destroy(&rdbgdata->write_mutex);
+	}
+
+	filp->private_data = NULL;
+
+bail:
+	return err;
+}
+
+static ssize_t rdbg_read(struct file *filp, char __user *buf, size_t size,
+	loff_t *offset)
+{
+	int err = 0;
+	struct rdbg_data *rdbgdata = filp->private_data;
+	void *p_sent_buffer = NULL;
+	int nsize = 0;
+	int more = 0;
+
+	if (!rdbgdata) {
+		pr_err("Invalid argument");
+		err = -EINVAL;
+		goto bail;
+	}
+
+	dev_dbg(rdbgdata->device, "%s: In receive", __func__);
+	err = wait_for_completion_interruptible(&(rdbgdata->work));
+	if (err) {
+		dev_err(rdbgdata->device, "%s: Error in wait", __func__);
+		goto bail;
+	}
+
+	smq_check_queue_reset(&(rdbgdata->consumer_smrb),
+		&(rdbgdata->producer_smrb));
+	if (smq_receive(&(rdbgdata->consumer_smrb), &p_sent_buffer,
+			&nsize, &more) != SMQ_SUCCESS) {
+		dev_err(rdbgdata->device, "%s: Error in smq_recv(). Err code = %d",
+			__func__, err);
+		err = -ENODATA;
+		goto bail;
+	}
+
+	size = ((size < nsize) ? size : nsize);
+	err = copy_to_user(buf, p_sent_buffer, size);
+	if (err != 0) {
+		dev_err(rdbgdata->device, "%s: Error in copy_to_user(). Err code = %d",
+			__func__, err);
+		err = -ENODATA;
+		goto bail;
+	}
+
+	smq_free(&(rdbgdata->consumer_smrb), p_sent_buffer);
+	err = size;
+	dev_dbg(rdbgdata->device, "%s: Read data to buffer with address 0x%lx",
+		__func__, (unsigned long) buf);
+
+bail:
+	return err;
+}
+
+static ssize_t rdbg_write(struct file *filp, const char __user *buf,
+	size_t size, loff_t *offset)
+{
+	int err = 0;
+	int num_retries = 0;
+	struct rdbg_data *rdbgdata = filp->private_data;
+
+	if (!rdbgdata) {
+		pr_err("Invalid argument");
+		err = -EINVAL;
+		goto bail;
+	}
+
+	do {
+		err = smq_alloc_send(&(rdbgdata->producer_smrb), buf, size);
+		dev_dbg(rdbgdata->device, "%s, smq_alloc_send returned %d.",
+			__func__, err);
+	} while (err != 0 && num_retries++ < MAX_RETRIES);
+
+	if (err != 0) {
+		err = -ECOMM;
+		goto bail;
+	}
+
+	send_interrupt_to_subsystem(rdbgdata);
+
+	err = size;
+
+bail:
+	return err;
+}
+
+
+static const struct file_operations rdbg_fops = {
+	.open = rdbg_open,
+	.read =  rdbg_read,
+	.write =  rdbg_write,
+	.release = rdbg_release,
+};
+
+static int register_smp2p(char *node_name, struct gpio_info *gpio_info_ptr)
+{
+	struct device_node *node = NULL;
+	int cnt = 0;
+	int id = 0;
+
+	node = of_find_compatible_node(NULL, NULL, node_name);
+	if (node) {
+		cnt = of_gpio_count(node);
+		if (cnt && gpio_info_ptr) {
+			id = of_get_gpio(node, 0);
+			gpio_info_ptr->gpio_base_id = id;
+			gpio_info_ptr->irq_base_id = gpio_to_irq(id);
+			return 0;
+		}
+	}
+	return -EINVAL;
+}
+
+static int __init rdbg_init(void)
+{
+	int err = 0;
+	struct rdbg_device *rdbgdevice = &g_rdbg_instance;
+	int minor = 0;
+	int major = 0;
+	int minor_nodes_created = 0;
+
+	char *rdbg_compatible_string = "qcom,smp2pgpio_client_rdbg_";
+	int max_len = strlen(rdbg_compatible_string) + strlen("xx_out");
+
+	char *node_name = kcalloc(max_len, sizeof(char), GFP_KERNEL);
+
+	if (!node_name) {
+		err = -ENOMEM;
+		goto bail;
+	}
+
+	if (rdbgdevice->num_devices < 1 ||
+		rdbgdevice->num_devices > SMP2P_NUM_PROCS) {
+		pr_err("rgdb: invalid num_devices");
+		err = -EDOM;
+		goto name_bail;
+	}
+
+	rdbgdevice->rdbg_data = kcalloc(rdbgdevice->num_devices,
+		sizeof(struct rdbg_data), GFP_KERNEL);
+	if (!rdbgdevice->rdbg_data) {
+		err = -ENOMEM;
+		goto name_bail;
+	}
+
+	err = alloc_chrdev_region(&rdbgdevice->dev_no, 0,
+		rdbgdevice->num_devices, "rdbgctl");
+	if (err) {
+		pr_err("Error in alloc_chrdev_region.");
+		goto data_bail;
+	}
+	major = MAJOR(rdbgdevice->dev_no);
+
+	cdev_init(&rdbgdevice->cdev, &rdbg_fops);
+	rdbgdevice->cdev.owner = THIS_MODULE;
+	err = cdev_add(&rdbgdevice->cdev, MKDEV(major, 0),
+		rdbgdevice->num_devices);
+	if (err) {
+		pr_err("Error in cdev_add");
+		goto chrdev_bail;
+	}
+
+	rdbgdevice->class = class_create(THIS_MODULE, "rdbg");
+	if (IS_ERR(rdbgdevice->class)) {
+		err = PTR_ERR(rdbgdevice->class);
+		pr_err("Error in class_create");
+		goto cdev_bail;
+	}
+
+	for (minor = 0; minor < rdbgdevice->num_devices; minor++) {
+		if (!proc_info[minor].name)
+			continue;
+
+		if (snprintf(node_name, max_len, "%s%d_in",
+			rdbg_compatible_string, minor) <= 0) {
+			pr_err("Error in snprintf");
+			err = -ENOMEM;
+			goto device_bail;
+		}
+
+		if (register_smp2p(node_name,
+			&rdbgdevice->rdbg_data[minor].in)) {
+			pr_debug("No incoming device tree entry found for %s",
+				proc_info[minor].name);
+			continue;
+		}
+
+		if (snprintf(node_name, max_len, "%s%d_out",
+			rdbg_compatible_string, minor) <= 0) {
+			pr_err("Error in snprintf");
+			err = -ENOMEM;
+			goto device_bail;
+		}
+
+		if (register_smp2p(node_name,
+			&rdbgdevice->rdbg_data[minor].out)) {
+			pr_err("No outgoing device tree entry found for %s",
+				proc_info[minor].name);
+			err = -EINVAL;
+			goto device_bail;
+		}
+
+		rdbgdevice->rdbg_data[minor].device = device_create(
+			rdbgdevice->class, NULL, MKDEV(major, minor),
+			NULL, "%s", proc_info[minor].name);
+		if (IS_ERR(rdbgdevice->rdbg_data[minor].device)) {
+			err = PTR_ERR(rdbgdevice->rdbg_data[minor].device);
+			pr_err("Error in device_create");
+			goto device_bail;
+		}
+		rdbgdevice->rdbg_data[minor].device_initialized = 1;
+		minor_nodes_created++;
+		dev_dbg(rdbgdevice->rdbg_data[minor].device,
+			"%s: created /dev/%s c %d %d'", __func__,
+			proc_info[minor].name, major, minor);
+	}
+
+	if (!minor_nodes_created) {
+		pr_err("No device tree entries found");
+		err = -EINVAL;
+		goto class_bail;
+	}
+
+	goto name_bail;
+
+device_bail:
+	for (--minor; minor >= 0; minor--) {
+		if (rdbgdevice->rdbg_data[minor].device_initialized)
+			device_destroy(rdbgdevice->class,
+				MKDEV(MAJOR(rdbgdevice->dev_no), minor));
+	}
+class_bail:
+	class_destroy(rdbgdevice->class);
+cdev_bail:
+	cdev_del(&rdbgdevice->cdev);
+chrdev_bail:
+	unregister_chrdev_region(rdbgdevice->dev_no, rdbgdevice->num_devices);
+data_bail:
+	kfree(rdbgdevice->rdbg_data);
+name_bail:
+	kfree(node_name);
+bail:
+	return err;
+}
+
+static void __exit rdbg_exit(void)
+{
+	struct rdbg_device *rdbgdevice = &g_rdbg_instance;
+	int minor;
+
+	for (minor = 0; minor < rdbgdevice->num_devices; minor++) {
+		if (rdbgdevice->rdbg_data[minor].device_initialized) {
+			device_destroy(rdbgdevice->class,
+				MKDEV(MAJOR(rdbgdevice->dev_no), minor));
+		}
+	}
+	class_destroy(rdbgdevice->class);
+	cdev_del(&rdbgdevice->cdev);
+	unregister_chrdev_region(rdbgdevice->dev_no, 1);
+	kfree(rdbgdevice->rdbg_data);
+}
+
+module_init(rdbg_init);
+module_exit(rdbg_exit);
+
+MODULE_DESCRIPTION("rdbg module");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./clock-alpha-pll.c linux-4.4.115-fbx/drivers/clk/msm/clock-alpha-pll.c
--- linux-4.4.115-fbx/drivers/clk/msm./clock-alpha-pll.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/clock-alpha-pll.c	2019-01-22 16:16:23.015241987 +0100
@@ -0,0 +1,1255 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <soc/qcom/clock-alpha-pll.h>
+#include <soc/qcom/msm-clock-controller.h>
+
+#include "clock.h"
+
+#define WAIT_MAX_LOOPS 100
+
+#define MODE_REG(pll)		(*pll->base + pll->offset + 0x0)
+#define LOCK_REG(pll)		(*pll->base + pll->offset + 0x0)
+#define ACTIVE_REG(pll)		(*pll->base + pll->offset + 0x0)
+#define UPDATE_REG(pll)		(*pll->base + pll->offset + 0x0)
+#define L_REG(pll)		(*pll->base + pll->offset + 0x4)
+#define A_REG(pll)		(*pll->base + pll->offset + 0x8)
+#define VCO_REG(pll)		(*pll->base + pll->offset + 0x10)
+#define ALPHA_EN_REG(pll)	(*pll->base + pll->offset + 0x10)
+#define OUTPUT_REG(pll)		(*pll->base + pll->offset + 0x10)
+#define VOTE_REG(pll)		(*pll->base + pll->fsm_reg_offset)
+#define USER_CTL_LO_REG(pll)	(*pll->base + pll->offset + 0x10)
+#define USER_CTL_HI_REG(pll)	(*pll->base + pll->offset + 0x14)
+#define CONFIG_CTL_REG(pll)	(*pll->base + pll->offset + 0x18)
+#define TEST_CTL_LO_REG(pll)	(*pll->base + pll->offset + 0x1c)
+#define TEST_CTL_HI_REG(pll)	(*pll->base + pll->offset + 0x20)
+
+#define PLL_BYPASSNL 0x2
+#define PLL_RESET_N  0x4
+#define PLL_OUTCTRL  0x1
+#define PLL_LATCH_INTERFACE	BIT(11)
+
+#define FABIA_CONFIG_CTL_REG(pll)	(*pll->base + pll->offset + 0x14)
+#define FABIA_USER_CTL_LO_REG(pll)	(*pll->base + pll->offset + 0xc)
+#define FABIA_USER_CTL_HI_REG(pll)	(*pll->base + pll->offset + 0x10)
+#define FABIA_TEST_CTL_LO_REG(pll)	(*pll->base + pll->offset + 0x1c)
+#define FABIA_TEST_CTL_HI_REG(pll)	(*pll->base + pll->offset + 0x20)
+#define FABIA_L_REG(pll)		(*pll->base + pll->offset + 0x4)
+#define FABIA_CAL_L_VAL(pll)		(*pll->base + pll->offset + 0x8)
+#define FABIA_FRAC_REG(pll)		(*pll->base + pll->offset + 0x38)
+#define FABIA_PLL_OPMODE(pll)		(*pll->base + pll->offset + 0x2c)
+
+#define FABIA_PLL_STANDBY	0x0
+#define FABIA_PLL_RUN		0x1
+#define FABIA_PLL_OUT_MAIN	0x7
+#define FABIA_RATE_MARGIN	500
+#define ALPHA_PLL_ACK_LATCH	BIT(29)
+#define ALPHA_PLL_HW_UPDATE_LOGIC_BYPASS	BIT(23)
+
+/*
+ * Even though 40 bits are present, use only 32 for ease of calculation.
+ */
+#define ALPHA_REG_BITWIDTH 40
+#define ALPHA_BITWIDTH 32
+#define FABIA_ALPHA_BITWIDTH 16
+
+/*
+ * Enable/disable registers could be shared among PLLs when FSM voting
+ * is used. This lock protects against potential race when multiple
+ * PLLs are being enabled/disabled together.
+ */
+static DEFINE_SPINLOCK(alpha_pll_reg_lock);
+
+static unsigned long compute_rate(struct alpha_pll_clk *pll,
+				u32 l_val, u32 a_val)
+{
+	u64 rate, parent_rate;
+	int alpha_bw = ALPHA_BITWIDTH;
+
+	if (pll->is_fabia)
+		alpha_bw = FABIA_ALPHA_BITWIDTH;
+
+	parent_rate = clk_get_rate(pll->c.parent);
+	rate = parent_rate * l_val;
+	rate += (parent_rate * a_val) >> alpha_bw;
+	return rate;
+}
+
+static bool is_locked(struct alpha_pll_clk *pll)
+{
+	u32 reg = readl_relaxed(LOCK_REG(pll));
+	u32 mask = pll->masks->lock_mask;
+	return (reg & mask) == mask;
+}
+
+static bool is_active(struct alpha_pll_clk *pll)
+{
+	u32 reg = readl_relaxed(ACTIVE_REG(pll));
+	u32 mask = pll->masks->active_mask;
+	return (reg & mask) == mask;
+}
+
+/*
+ * Check active_flag if PLL is in FSM mode, otherwise check lock_det
+ * bit. This function assumes PLLs are already configured to the
+ * right mode.
+ */
+static bool update_finish(struct alpha_pll_clk *pll)
+{
+	if (pll->fsm_en_mask)
+		return is_active(pll);
+	else
+		return is_locked(pll);
+}
+
+static int wait_for_update(struct alpha_pll_clk *pll)
+{
+	int count;
+
+	for (count = WAIT_MAX_LOOPS; count > 0; count--) {
+		if (update_finish(pll))
+			break;
+		udelay(1);
+	}
+
+	if (!count) {
+		pr_err("%s didn't lock after enabling it!\n", pll->c.dbg_name);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int __alpha_pll_vote_enable(struct alpha_pll_clk *pll)
+{
+	u32 ena;
+
+	ena = readl_relaxed(VOTE_REG(pll));
+	ena |= pll->fsm_en_mask;
+	writel_relaxed(ena, VOTE_REG(pll));
+	mb();
+
+	return wait_for_update(pll);
+}
+
+static int __alpha_pll_enable(struct alpha_pll_clk *pll, int enable_output)
+{
+	int rc;
+	u32 mode;
+
+	mode  = readl_relaxed(MODE_REG(pll));
+	mode |= PLL_BYPASSNL;
+	writel_relaxed(mode, MODE_REG(pll));
+
+	/*
+	 * H/W requires a 5us delay between disabling the bypass and
+	 * de-asserting the reset.
+	 */
+	mb();
+	udelay(5);
+
+	mode |= PLL_RESET_N;
+	writel_relaxed(mode, MODE_REG(pll));
+
+	rc = wait_for_update(pll);
+	if (rc < 0)
+		return rc;
+
+	/* Enable PLL output. */
+	if (enable_output) {
+		mode |= PLL_OUTCTRL;
+		writel_relaxed(mode, MODE_REG(pll));
+	}
+
+	/* Ensure that the write above goes through before returning. */
+	mb();
+	return 0;
+}
+
+static void setup_alpha_pll_values(u64 a_val, u32 l_val, u32 vco_val,
+				struct alpha_pll_clk *pll)
+{
+	struct alpha_pll_masks *masks = pll->masks;
+	u32 regval;
+
+	a_val = a_val << (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH);
+
+	writel_relaxed(l_val, L_REG(pll));
+	__iowrite32_copy(A_REG(pll), &a_val, 2);
+
+	if (vco_val != UINT_MAX) {
+		regval = readl_relaxed(VCO_REG(pll));
+		regval &= ~(masks->vco_mask << masks->vco_shift);
+		regval |= vco_val << masks->vco_shift;
+		writel_relaxed(regval, VCO_REG(pll));
+	}
+
+	regval = readl_relaxed(ALPHA_EN_REG(pll));
+	regval |= masks->alpha_en_mask;
+	writel_relaxed(regval, ALPHA_EN_REG(pll));
+}
+
+static int alpha_pll_enable(struct clk *c)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	unsigned long flags;
+	int rc;
+
+	if (unlikely(!pll->inited))
+		__init_alpha_pll(c);
+
+	spin_lock_irqsave(&alpha_pll_reg_lock, flags);
+	if (pll->fsm_en_mask)
+		rc = __alpha_pll_vote_enable(pll);
+	else
+		rc = __alpha_pll_enable(pll, true);
+	spin_unlock_irqrestore(&alpha_pll_reg_lock, flags);
+
+	return rc;
+}
+
+static int __calibrate_alpha_pll(struct alpha_pll_clk *pll);
+static int dyna_alpha_pll_enable(struct clk *c)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	unsigned long flags;
+	int rc;
+
+	if (unlikely(!pll->inited))
+		__init_alpha_pll(c);
+
+	spin_lock_irqsave(&alpha_pll_reg_lock, flags);
+
+	if (pll->slew)
+		__calibrate_alpha_pll(pll);
+
+	if (pll->fsm_en_mask)
+		rc = __alpha_pll_vote_enable(pll);
+	else
+		rc = __alpha_pll_enable(pll, true);
+	spin_unlock_irqrestore(&alpha_pll_reg_lock, flags);
+
+	return rc;
+}
+
+#define PLL_OFFLINE_REQ_BIT BIT(7)
+#define PLL_FSM_ENA_BIT BIT(20)
+#define PLL_OFFLINE_ACK_BIT BIT(28)
+#define PLL_ACTIVE_FLAG BIT(30)
+
+static int alpha_pll_enable_hwfsm(struct clk *c)
+{
+	u32 mode;
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+
+	/* Re-enable HW FSM mode, clear OFFLINE request */
+	mode = readl_relaxed(MODE_REG(pll));
+	mode |= PLL_FSM_ENA_BIT;
+	mode &= ~PLL_OFFLINE_REQ_BIT;
+	writel_relaxed(mode, MODE_REG(pll));
+
+	/* Make sure enable request goes through before waiting for update */
+	mb();
+
+	if (wait_for_update(pll) < 0)
+		panic("PLL %s failed to lock", c->dbg_name);
+
+	return 0;
+}
+
+static void alpha_pll_disable_hwfsm(struct clk *c)
+{
+	u32 mode;
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+
+	/* Request PLL_OFFLINE and wait for ack */
+	mode = readl_relaxed(MODE_REG(pll));
+	writel_relaxed(mode | PLL_OFFLINE_REQ_BIT, MODE_REG(pll));
+	while (!(readl_relaxed(MODE_REG(pll)) & PLL_OFFLINE_ACK_BIT))
+		;
+
+	/* Disable HW FSM */
+	mode = readl_relaxed(MODE_REG(pll));
+	mode &= ~PLL_FSM_ENA_BIT;
+	if (pll->offline_bit_workaround)
+		mode &= ~PLL_OFFLINE_REQ_BIT;
+	writel_relaxed(mode, MODE_REG(pll));
+
+	while (readl_relaxed(MODE_REG(pll)) & PLL_ACTIVE_FLAG)
+		;
+}
+
+static void __alpha_pll_vote_disable(struct alpha_pll_clk *pll)
+{
+	u32 ena;
+
+	ena = readl_relaxed(VOTE_REG(pll));
+	ena &= ~pll->fsm_en_mask;
+	writel_relaxed(ena, VOTE_REG(pll));
+}
+
+static void __alpha_pll_disable(struct alpha_pll_clk *pll)
+{
+	u32 mode;
+
+	mode = readl_relaxed(MODE_REG(pll));
+	mode &= ~PLL_OUTCTRL;
+	writel_relaxed(mode, MODE_REG(pll));
+
+	/* Delay of 2 output clock ticks required until output is disabled */
+	mb();
+	udelay(1);
+
+	mode &= ~(PLL_BYPASSNL | PLL_RESET_N);
+	writel_relaxed(mode, MODE_REG(pll));
+}
+
+static void alpha_pll_disable(struct clk *c)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	unsigned long flags;
+
+	spin_lock_irqsave(&alpha_pll_reg_lock, flags);
+	if (pll->fsm_en_mask)
+		__alpha_pll_vote_disable(pll);
+	else
+		__alpha_pll_disable(pll);
+	spin_unlock_irqrestore(&alpha_pll_reg_lock, flags);
+}
+
+static void dyna_alpha_pll_disable(struct clk *c)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	unsigned long flags;
+
+	spin_lock_irqsave(&alpha_pll_reg_lock, flags);
+	if (pll->fsm_en_mask)
+		__alpha_pll_vote_disable(pll);
+	else
+		__alpha_pll_disable(pll);
+
+	spin_unlock_irqrestore(&alpha_pll_reg_lock, flags);
+}
+
+static u32 find_vco(struct alpha_pll_clk *pll, unsigned long rate)
+{
+	unsigned long i;
+	struct alpha_pll_vco_tbl *v = pll->vco_tbl;
+
+	for (i = 0; i < pll->num_vco; i++) {
+		if (rate >= v[i].min_freq && rate <= v[i].max_freq)
+			return v[i].vco_val;
+	}
+
+	return -EINVAL;
+}
+
+static unsigned long __calc_values(struct alpha_pll_clk *pll,
+		unsigned long rate, int *l_val, u64 *a_val, bool round_up)
+{
+	u32 parent_rate;
+	u64 remainder;
+	u64 quotient;
+	unsigned long freq_hz;
+	int alpha_bw = ALPHA_BITWIDTH;
+
+	parent_rate = clk_get_rate(pll->c.parent);
+	quotient = rate;
+	remainder = do_div(quotient, parent_rate);
+	*l_val = quotient;
+
+	if (!remainder) {
+		*a_val = 0;
+		return rate;
+	}
+
+	if (pll->is_fabia)
+		alpha_bw = FABIA_ALPHA_BITWIDTH;
+
+	/* Upper ALPHA_BITWIDTH bits of Alpha */
+	quotient = remainder << alpha_bw;
+	remainder = do_div(quotient, parent_rate);
+
+	if (remainder && round_up)
+		quotient++;
+
+	*a_val = quotient;
+	freq_hz = compute_rate(pll, *l_val, *a_val);
+	return freq_hz;
+}
+
+static unsigned long round_rate_down(struct alpha_pll_clk *pll,
+		unsigned long rate, int *l_val, u64 *a_val)
+{
+	return __calc_values(pll, rate, l_val, a_val, false);
+}
+
+static unsigned long round_rate_up(struct alpha_pll_clk *pll,
+		unsigned long rate, int *l_val, u64 *a_val)
+{
+	return __calc_values(pll, rate, l_val, a_val, true);
+}
+
+static bool dynamic_update_finish(struct alpha_pll_clk *pll)
+{
+	u32 reg = readl_relaxed(UPDATE_REG(pll));
+	u32 mask = pll->masks->update_mask;
+
+	return (reg & mask) == 0;
+}
+
+static int wait_for_dynamic_update(struct alpha_pll_clk *pll)
+{
+	int count;
+
+	for (count = WAIT_MAX_LOOPS; count > 0; count--) {
+		if (dynamic_update_finish(pll))
+			break;
+		udelay(1);
+	}
+
+	if (!count) {
+		pr_err("%s didn't latch after updating it!\n", pll->c.dbg_name);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int dyna_alpha_pll_dynamic_update(struct alpha_pll_clk *pll)
+{
+	struct alpha_pll_masks *masks = pll->masks;
+	u32 regval;
+	int rc;
+
+	regval = readl_relaxed(UPDATE_REG(pll));
+	regval |= masks->update_mask;
+	writel_relaxed(regval, UPDATE_REG(pll));
+
+	rc = wait_for_dynamic_update(pll);
+	if (rc < 0)
+		return rc;
+
+	/*
+	 * HPG mandates a wait of at least 570ns before polling the LOCK
+	 * detect bit. Have a delay of 1us just to be safe.
+	 */
+	mb();
+	udelay(1);
+
+	rc = wait_for_update(pll);
+	if (rc < 0)
+		return rc;
+
+	return 0;
+}
+
+static int alpha_pll_set_rate(struct clk *c, unsigned long rate);
+static int dyna_alpha_pll_set_rate(struct clk *c, unsigned long rate)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	unsigned long freq_hz, flags;
+	u32 l_val, vco_val;
+	u64 a_val;
+	int ret;
+
+	freq_hz = round_rate_up(pll, rate, &l_val, &a_val);
+	if (freq_hz != rate) {
+		pr_err("alpha_pll: Call clk_set_rate with rounded rates!\n");
+		return -EINVAL;
+	}
+
+	vco_val = find_vco(pll, freq_hz);
+
+	/*
+	 * Dynamic pll update will not support switching frequencies across
+	 * vco ranges. In those cases fall back to normal alpha set rate.
+	 */
+	if (pll->current_vco_val != vco_val) {
+		ret = alpha_pll_set_rate(c, rate);
+		if (!ret)
+			pll->current_vco_val = vco_val;
+		else
+			return ret;
+		return 0;
+	}
+
+	spin_lock_irqsave(&c->lock, flags);
+
+	a_val = a_val << (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH);
+
+	writel_relaxed(l_val, L_REG(pll));
+	__iowrite32_copy(A_REG(pll), &a_val, 2);
+
+	/* Ensure that the write above goes through before proceeding. */
+	mb();
+
+	if (c->count)
+		dyna_alpha_pll_dynamic_update(pll);
+
+	spin_unlock_irqrestore(&c->lock, flags);
+	return 0;
+}
+
+/*
+ * Slewing plls should be bought up at frequency which is in the middle of the
+ * desired VCO range. So after bringing up the pll at calibration freq, set it
+ * back to desired frequency(that was set by previous clk_set_rate).
+ */
+static int __calibrate_alpha_pll(struct alpha_pll_clk *pll)
+{
+	unsigned long calibration_freq, freq_hz;
+	struct alpha_pll_vco_tbl *vco_tbl = pll->vco_tbl;
+	u64 a_val;
+	u32 l_val, vco_val;
+	int rc;
+
+	vco_val = find_vco(pll, pll->c.rate);
+	if (IS_ERR_VALUE(vco_val)) {
+		pr_err("alpha pll: not in a valid vco range\n");
+		return -EINVAL;
+	}
+	/*
+	 * As during slewing plls vco_sel won't be allowed to change, vco table
+	 * should have only one entry table, i.e. index = 0, find the
+	 * calibration frequency.
+	 */
+	calibration_freq = (vco_tbl[0].min_freq +
+					vco_tbl[0].max_freq)/2;
+
+	freq_hz = round_rate_up(pll, calibration_freq, &l_val, &a_val);
+	if (freq_hz != calibration_freq) {
+		pr_err("alpha_pll: call clk_set_rate with rounded rates!\n");
+		return -EINVAL;
+	}
+
+	setup_alpha_pll_values(a_val, l_val, vco_tbl->vco_val, pll);
+
+	/* Bringup the pll at calibration frequency */
+	rc = __alpha_pll_enable(pll, false);
+	if (rc) {
+		pr_err("alpha pll calibration failed\n");
+		return rc;
+	}
+
+	/*
+	 * PLL is already running at calibration frequency.
+	 * So slew pll to the previously set frequency.
+	 */
+	pr_debug("pll %s: setting back to required rate %lu\n", pll->c.dbg_name,
+					pll->c.rate);
+	freq_hz = round_rate_up(pll, pll->c.rate, &l_val, &a_val);
+	setup_alpha_pll_values(a_val, l_val, UINT_MAX, pll);
+	dyna_alpha_pll_dynamic_update(pll);
+
+	return 0;
+}
+
+static int alpha_pll_dynamic_update(struct alpha_pll_clk *pll)
+{
+	u32 regval;
+
+	/* Latch the input to the PLL */
+	regval = readl_relaxed(MODE_REG(pll));
+	regval |= pll->masks->update_mask;
+	writel_relaxed(regval, MODE_REG(pll));
+
+	/* Wait for 2 reference cycle before checking ACK bit */
+	udelay(1);
+	if (!(readl_relaxed(MODE_REG(pll)) & ALPHA_PLL_ACK_LATCH)) {
+		WARN(1, "%s: PLL latch failed. Output may be unstable!\n",
+						pll->c.dbg_name);
+		return -EINVAL;
+	}
+
+	/* Return latch input to 0 */
+	regval = readl_relaxed(MODE_REG(pll));
+	regval &= ~pll->masks->update_mask;
+	writel_relaxed(regval, MODE_REG(pll));
+
+	/* Wait for PLL output to stabilize */
+	udelay(100);
+
+	return 0;
+}
+
+static int alpha_pll_set_rate(struct clk *c, unsigned long rate)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	struct alpha_pll_masks *masks = pll->masks;
+	unsigned long flags, freq_hz;
+	u32 regval, l_val;
+	int vco_val;
+	u64 a_val;
+
+	freq_hz = round_rate_up(pll, rate, &l_val, &a_val);
+	if (freq_hz != rate) {
+		pr_err("alpha_pll: Call clk_set_rate with rounded rates!\n");
+		return -EINVAL;
+	}
+
+	vco_val = find_vco(pll, freq_hz);
+	if (IS_ERR_VALUE(vco_val)) {
+		pr_err("alpha pll: not in a valid vco range\n");
+		return -EINVAL;
+	}
+
+	if (pll->no_irq_dis)
+		spin_lock(&c->lock);
+	else
+		spin_lock_irqsave(&c->lock, flags);
+
+	/*
+	 * For PLLs that do not support dynamic programming (dynamic_update
+	 * is not set), ensure PLL is off before changing rate. For
+	 * optimization reasons, assume no downstream clock is actively
+	 * using it.
+	 */
+	if (c->count && !pll->dynamic_update)
+		c->ops->disable(c);
+
+	a_val = a_val << (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH);
+
+	writel_relaxed(l_val, L_REG(pll));
+	__iowrite32_copy(A_REG(pll), &a_val, 2);
+
+	if (masks->vco_mask) {
+		regval = readl_relaxed(VCO_REG(pll));
+		regval &= ~(masks->vco_mask << masks->vco_shift);
+		regval |= vco_val << masks->vco_shift;
+		writel_relaxed(regval, VCO_REG(pll));
+	}
+
+	regval = readl_relaxed(ALPHA_EN_REG(pll));
+	regval |= masks->alpha_en_mask;
+	writel_relaxed(regval, ALPHA_EN_REG(pll));
+
+	if (c->count && pll->dynamic_update)
+		alpha_pll_dynamic_update(pll);
+
+	if (c->count && !pll->dynamic_update)
+		c->ops->enable(c);
+
+	if (pll->no_irq_dis)
+		spin_unlock(&c->lock);
+	else
+		spin_unlock_irqrestore(&c->lock, flags);
+	return 0;
+}
+
+static long alpha_pll_round_rate(struct clk *c, unsigned long rate)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	struct alpha_pll_vco_tbl *v = pll->vco_tbl;
+	int ret;
+	u32 l_val;
+	unsigned long freq_hz;
+	u64 a_val;
+	int i;
+
+	if (pll->no_prepared_reconfig && c->prepare_count)
+		return -EINVAL;
+
+	freq_hz = round_rate_up(pll, rate, &l_val, &a_val);
+	if (rate < pll->min_supported_freq)
+		return pll->min_supported_freq;
+	if (pll->is_fabia)
+		return freq_hz;
+
+	ret = find_vco(pll, freq_hz);
+	if (!IS_ERR_VALUE(ret))
+		return freq_hz;
+
+	freq_hz = 0;
+	for (i = 0; i < pll->num_vco; i++) {
+		if (is_better_rate(rate, freq_hz, v[i].min_freq))
+			freq_hz = v[i].min_freq;
+		if (is_better_rate(rate, freq_hz, v[i].max_freq))
+			freq_hz = v[i].max_freq;
+	}
+	if (!freq_hz)
+		return -EINVAL;
+	return freq_hz;
+}
+
+static void update_vco_tbl(struct alpha_pll_clk *pll)
+{
+	int i, l_val;
+	u64 a_val;
+	unsigned long hz;
+
+	/* Round vco limits to valid rates */
+	for (i = 0; i < pll->num_vco; i++) {
+		hz = round_rate_up(pll, pll->vco_tbl[i].min_freq, &l_val,
+					&a_val);
+		pll->vco_tbl[i].min_freq = hz;
+
+		hz = round_rate_down(pll, pll->vco_tbl[i].max_freq, &l_val,
+					&a_val);
+		pll->vco_tbl[i].max_freq = hz;
+	}
+}
+
+/*
+ * Program bias count to be 0x6 (corresponds to 5us), and lock count
+ * bits to 0 (check lock_det for locking).
+ */
+static void __set_fsm_mode(void __iomem *mode_reg)
+{
+	u32 regval = readl_relaxed(mode_reg);
+
+	/* De-assert reset to FSM */
+	regval &= ~BIT(21);
+	writel_relaxed(regval, mode_reg);
+
+	/* Program bias count */
+	regval &= ~BM(19, 14);
+	regval |= BVAL(19, 14, 0x6);
+	writel_relaxed(regval, mode_reg);
+
+	/* Program lock count */
+	regval &= ~BM(13, 8);
+	regval |= BVAL(13, 8, 0x0);
+	writel_relaxed(regval, mode_reg);
+
+	/* Enable PLL FSM voting */
+	regval |= BIT(20);
+	writel_relaxed(regval, mode_reg);
+}
+
+static bool is_fsm_mode(void __iomem *mode_reg)
+{
+	return !!(readl_relaxed(mode_reg) & BIT(20));
+}
+
+void __init_alpha_pll(struct clk *c)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	struct alpha_pll_masks *masks = pll->masks;
+	u32 regval;
+
+	if (pll->config_ctl_val)
+		writel_relaxed(pll->config_ctl_val, CONFIG_CTL_REG(pll));
+
+	if (masks->output_mask && pll->enable_config) {
+		regval = readl_relaxed(OUTPUT_REG(pll));
+		regval &= ~masks->output_mask;
+		regval |= pll->enable_config;
+		writel_relaxed(regval, OUTPUT_REG(pll));
+	}
+
+	if (masks->post_div_mask) {
+		regval = readl_relaxed(USER_CTL_LO_REG(pll));
+		regval &= ~masks->post_div_mask;
+		regval |= pll->post_div_config;
+		writel_relaxed(regval, USER_CTL_LO_REG(pll));
+	}
+
+	if (pll->slew) {
+		regval = readl_relaxed(USER_CTL_HI_REG(pll));
+		regval &= ~PLL_LATCH_INTERFACE;
+		writel_relaxed(regval, USER_CTL_HI_REG(pll));
+	}
+
+	if (masks->test_ctl_lo_mask) {
+		regval = readl_relaxed(TEST_CTL_LO_REG(pll));
+		regval &= ~masks->test_ctl_lo_mask;
+		regval |= pll->test_ctl_lo_val;
+		writel_relaxed(regval, TEST_CTL_LO_REG(pll));
+	}
+
+	if (masks->test_ctl_hi_mask) {
+		regval = readl_relaxed(TEST_CTL_HI_REG(pll));
+		regval &= ~masks->test_ctl_hi_mask;
+		regval |= pll->test_ctl_hi_val;
+		writel_relaxed(regval, TEST_CTL_HI_REG(pll));
+	}
+
+	if (pll->fsm_en_mask)
+		__set_fsm_mode(MODE_REG(pll));
+
+	pll->inited = true;
+}
+
+static enum handoff alpha_pll_handoff(struct clk *c)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	struct alpha_pll_masks *masks = pll->masks;
+	u64 a_val;
+	u32 alpha_en, l_val, regval;
+
+	/* Set the PLL_HW_UPDATE_LOGIC_BYPASS bit before continuing */
+	if (pll->dynamic_update) {
+		regval = readl_relaxed(MODE_REG(pll));
+		regval |= ALPHA_PLL_HW_UPDATE_LOGIC_BYPASS;
+		writel_relaxed(regval, MODE_REG(pll));
+	}
+
+	update_vco_tbl(pll);
+
+	if (!is_locked(pll)) {
+		if (c->rate && alpha_pll_set_rate(c, c->rate))
+			WARN(1, "%s: Failed to configure rate\n", c->dbg_name);
+		__init_alpha_pll(c);
+		return HANDOFF_DISABLED_CLK;
+	} else if (pll->fsm_en_mask && !is_fsm_mode(MODE_REG(pll))) {
+		WARN(1, "%s should be in FSM mode but is not\n", c->dbg_name);
+	}
+
+	l_val = readl_relaxed(L_REG(pll));
+	/* read u64 in two steps to satisfy alignment constraint */
+	a_val = readl_relaxed(A_REG(pll) + 0x4);
+	a_val = a_val << 32 | readl_relaxed(A_REG(pll));
+	/* get upper 32 bits */
+	a_val = a_val >> (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH);
+
+	alpha_en = readl_relaxed(ALPHA_EN_REG(pll));
+	alpha_en &= masks->alpha_en_mask;
+	if (!alpha_en)
+		a_val = 0;
+
+	c->rate = compute_rate(pll, l_val, a_val);
+
+	/*
+	 * Unconditionally vote for the PLL; it might be on because of
+	 * another master's vote.
+	 */
+	if (pll->fsm_en_mask)
+		__alpha_pll_vote_enable(pll);
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+static void __iomem *alpha_pll_list_registers(struct clk *clk, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(clk);
+	static struct clk_register_data data[] = {
+		{"PLL_MODE", 0x0},
+		{"PLL_L_VAL", 0x4},
+		{"PLL_ALPHA_VAL", 0x8},
+		{"PLL_ALPHA_VAL_U", 0xC},
+		{"PLL_USER_CTL", 0x10},
+		{"PLL_CONFIG_CTL", 0x18},
+	};
+
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return MODE_REG(pll);
+}
+
+static int __fabia_alpha_pll_enable(struct alpha_pll_clk *pll)
+{
+	int rc;
+	u32 mode;
+
+	/* Disable PLL output */
+	mode  = readl_relaxed(MODE_REG(pll));
+	mode &= ~PLL_OUTCTRL;
+	writel_relaxed(mode, MODE_REG(pll));
+
+	/* Set operation mode to STANDBY */
+	writel_relaxed(FABIA_PLL_STANDBY, FABIA_PLL_OPMODE(pll));
+
+	/* PLL should be in STANDBY mode before continuing */
+	mb();
+
+	/* Bring PLL out of reset */
+	mode  = readl_relaxed(MODE_REG(pll));
+	mode |= PLL_RESET_N;
+	writel_relaxed(mode, MODE_REG(pll));
+
+	/* Set operation mode to RUN */
+	writel_relaxed(FABIA_PLL_RUN, FABIA_PLL_OPMODE(pll));
+
+	rc = wait_for_update(pll);
+	if (rc < 0)
+		return rc;
+
+	/* Enable the main PLL output */
+	mode  = readl_relaxed(FABIA_USER_CTL_LO_REG(pll));
+	mode |= FABIA_PLL_OUT_MAIN;
+	writel_relaxed(mode, FABIA_USER_CTL_LO_REG(pll));
+
+	/* Enable PLL outputs */
+	mode  = readl_relaxed(MODE_REG(pll));
+	mode |= PLL_OUTCTRL;
+	writel_relaxed(mode, MODE_REG(pll));
+
+	/* Ensure that the write above goes through before returning. */
+	mb();
+	return 0;
+}
+
+static int fabia_alpha_pll_enable(struct clk *c)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(&alpha_pll_reg_lock, flags);
+	if (pll->fsm_en_mask)
+		rc = __alpha_pll_vote_enable(pll);
+	else
+		rc = __fabia_alpha_pll_enable(pll);
+	spin_unlock_irqrestore(&alpha_pll_reg_lock, flags);
+
+	return rc;
+}
+
+static void __fabia_alpha_pll_disable(struct alpha_pll_clk *pll)
+{
+	u32 mode;
+
+	/* Disable PLL outputs */
+	mode  = readl_relaxed(MODE_REG(pll));
+	mode &= ~PLL_OUTCTRL;
+	writel_relaxed(mode, MODE_REG(pll));
+
+	/* Disable the main PLL output */
+	mode  = readl_relaxed(FABIA_USER_CTL_LO_REG(pll));
+	mode &= ~FABIA_PLL_OUT_MAIN;
+	writel_relaxed(mode, FABIA_USER_CTL_LO_REG(pll));
+
+	/* Place the PLL mode in STANDBY */
+	writel_relaxed(FABIA_PLL_STANDBY, FABIA_PLL_OPMODE(pll));
+}
+
+static void fabia_alpha_pll_disable(struct clk *c)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	unsigned long flags;
+
+	spin_lock_irqsave(&alpha_pll_reg_lock, flags);
+	if (pll->fsm_en_mask)
+		__alpha_pll_vote_disable(pll);
+	else
+		__fabia_alpha_pll_disable(pll);
+	spin_unlock_irqrestore(&alpha_pll_reg_lock, flags);
+}
+
+static int fabia_alpha_pll_set_rate(struct clk *c, unsigned long rate)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	unsigned long flags, freq_hz;
+	u32 l_val;
+	u64 a_val;
+
+	freq_hz = round_rate_up(pll, rate, &l_val, &a_val);
+	if (freq_hz > rate + FABIA_RATE_MARGIN || freq_hz < rate) {
+		pr_err("%s: Call clk_set_rate with rounded rates!\n",
+						c->dbg_name);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&c->lock, flags);
+	/* Set the new L value */
+	writel_relaxed(l_val, FABIA_L_REG(pll));
+	/*
+	 * pll_cal_l_val is set to pll_l_val on MOST targets. Set it
+	 * explicitly here for PLL out-of-reset calibration to work
+	 * without a glitch on all of them.
+	 */
+	writel_relaxed(l_val, FABIA_CAL_L_VAL(pll));
+	writel_relaxed(a_val, FABIA_FRAC_REG(pll));
+
+	alpha_pll_dynamic_update(pll);
+
+	spin_unlock_irqrestore(&c->lock, flags);
+	return 0;
+}
+
+void __init_fabia_alpha_pll(struct clk *c)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	struct alpha_pll_masks *masks = pll->masks;
+	u32 regval;
+
+	if (pll->config_ctl_val)
+		writel_relaxed(pll->config_ctl_val, FABIA_CONFIG_CTL_REG(pll));
+
+	if (masks->output_mask && pll->enable_config) {
+		regval = readl_relaxed(FABIA_USER_CTL_LO_REG(pll));
+		regval &= ~masks->output_mask;
+		regval |= pll->enable_config;
+		writel_relaxed(regval, FABIA_USER_CTL_LO_REG(pll));
+	}
+
+	if (masks->post_div_mask) {
+		regval = readl_relaxed(FABIA_USER_CTL_LO_REG(pll));
+		regval &= ~masks->post_div_mask;
+		regval |= pll->post_div_config;
+		writel_relaxed(regval, FABIA_USER_CTL_LO_REG(pll));
+	}
+
+	if (pll->slew) {
+		regval = readl_relaxed(FABIA_USER_CTL_HI_REG(pll));
+		regval &= ~PLL_LATCH_INTERFACE;
+		writel_relaxed(regval, FABIA_USER_CTL_HI_REG(pll));
+	}
+
+	if (masks->test_ctl_lo_mask) {
+		regval = readl_relaxed(FABIA_TEST_CTL_LO_REG(pll));
+		regval &= ~masks->test_ctl_lo_mask;
+		regval |= pll->test_ctl_lo_val;
+		writel_relaxed(regval, FABIA_TEST_CTL_LO_REG(pll));
+	}
+
+	if (masks->test_ctl_hi_mask) {
+		regval = readl_relaxed(FABIA_TEST_CTL_HI_REG(pll));
+		regval &= ~masks->test_ctl_hi_mask;
+		regval |= pll->test_ctl_hi_val;
+		writel_relaxed(regval, FABIA_TEST_CTL_HI_REG(pll));
+	}
+
+	if (pll->fsm_en_mask)
+		__set_fsm_mode(MODE_REG(pll));
+
+	pll->inited = true;
+}
+
+static enum handoff fabia_alpha_pll_handoff(struct clk *c)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	u64 a_val;
+	u32 l_val, regval;
+
+	/* Set the PLL_HW_UPDATE_LOGIC_BYPASS bit before continuing */
+	regval = readl_relaxed(MODE_REG(pll));
+	regval |= ALPHA_PLL_HW_UPDATE_LOGIC_BYPASS;
+	writel_relaxed(regval, MODE_REG(pll));
+
+	/* Set the PLL_RESET_N bit to place the PLL in STANDBY from OFF */
+	regval |= PLL_RESET_N;
+	writel_relaxed(regval, MODE_REG(pll));
+
+	if (!is_locked(pll)) {
+		if (c->rate && fabia_alpha_pll_set_rate(c, c->rate))
+			WARN(1, "%s: Failed to configure rate\n", c->dbg_name);
+		__init_fabia_alpha_pll(c);
+		return HANDOFF_DISABLED_CLK;
+	} else if (pll->fsm_en_mask && !is_fsm_mode(MODE_REG(pll))) {
+		WARN(1, "%s should be in FSM mode but is not\n", c->dbg_name);
+	}
+
+	l_val = readl_relaxed(FABIA_L_REG(pll));
+	a_val = readl_relaxed(FABIA_FRAC_REG(pll));
+
+	c->rate = compute_rate(pll, l_val, a_val);
+
+	/*
+	 * Unconditionally vote for the PLL; it might be on because of
+	 * another master's vote.
+	 */
+	if (pll->fsm_en_mask)
+		__alpha_pll_vote_enable(pll);
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+struct clk_ops clk_ops_alpha_pll = {
+	.enable = alpha_pll_enable,
+	.disable = alpha_pll_disable,
+	.round_rate = alpha_pll_round_rate,
+	.set_rate = alpha_pll_set_rate,
+	.handoff = alpha_pll_handoff,
+	.list_registers = alpha_pll_list_registers,
+};
+
+struct clk_ops clk_ops_alpha_pll_hwfsm = {
+	.enable = alpha_pll_enable_hwfsm,
+	.disable = alpha_pll_disable_hwfsm,
+	.round_rate = alpha_pll_round_rate,
+	.set_rate = alpha_pll_set_rate,
+	.handoff = alpha_pll_handoff,
+	.list_registers = alpha_pll_list_registers,
+};
+
+struct clk_ops clk_ops_fixed_alpha_pll = {
+	.enable = alpha_pll_enable,
+	.disable = alpha_pll_disable,
+	.handoff = alpha_pll_handoff,
+	.list_registers = alpha_pll_list_registers,
+};
+
+struct clk_ops clk_ops_fixed_fabia_alpha_pll = {
+	.enable = fabia_alpha_pll_enable,
+	.disable = fabia_alpha_pll_disable,
+	.handoff = fabia_alpha_pll_handoff,
+};
+
+struct clk_ops clk_ops_fabia_alpha_pll = {
+	.enable = fabia_alpha_pll_enable,
+	.disable = fabia_alpha_pll_disable,
+	.round_rate = alpha_pll_round_rate,
+	.set_rate = fabia_alpha_pll_set_rate,
+	.handoff = fabia_alpha_pll_handoff,
+};
+
+struct clk_ops clk_ops_dyna_alpha_pll = {
+	.enable = dyna_alpha_pll_enable,
+	.disable = dyna_alpha_pll_disable,
+	.round_rate = alpha_pll_round_rate,
+	.set_rate = dyna_alpha_pll_set_rate,
+	.handoff = alpha_pll_handoff,
+	.list_registers = alpha_pll_list_registers,
+};
+
+static struct alpha_pll_masks masks_20nm_p = {
+	.lock_mask = BIT(31),
+	.active_mask = BIT(30),
+	.vco_mask = BM(21, 20) >> 20,
+	.vco_shift = 20,
+	.alpha_en_mask = BIT(24),
+	.output_mask = 0xF,
+	.post_div_mask = 0xF00,
+};
+
+static struct alpha_pll_vco_tbl vco_20nm_p[] = {
+	VCO(3,  250000000,  500000000),
+	VCO(2,  500000000, 1000000000),
+	VCO(1, 1000000000, 1500000000),
+	VCO(0, 1500000000, 2000000000),
+};
+
+static struct alpha_pll_masks masks_20nm_t = {
+	.lock_mask = BIT(31),
+	.alpha_en_mask = BIT(24),
+	.output_mask = 0xf,
+};
+
+static struct alpha_pll_vco_tbl vco_20nm_t[] = {
+	VCO(0, 500000000, 1250000000),
+};
+
+static struct alpha_pll_clk *alpha_pll_dt_parser(struct device *dev,
+						struct device_node *np)
+{
+	struct alpha_pll_clk *pll;
+	struct msmclk_data *drv;
+
+	pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
+	if (!pll) {
+		dt_err(np, "memory alloc failure\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (of_property_read_u32(np, "qcom,base-offset", &pll->offset)) {
+		dt_err(np, "missing qcom,base-offset\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Optional property */
+	of_property_read_u32(np, "qcom,post-div-config",
+					&pll->post_div_config);
+
+	pll->masks = devm_kzalloc(dev, sizeof(*pll->masks), GFP_KERNEL);
+	if (!pll->masks) {
+		dt_err(np, "memory alloc failure\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (of_device_is_compatible(np, "qcom,fixed-alpha-pll-20p") ||
+		of_device_is_compatible(np, "qcom,alpha-pll-20p")) {
+		*pll->masks = masks_20nm_p;
+		pll->vco_tbl = vco_20nm_p;
+		pll->num_vco = ARRAY_SIZE(vco_20nm_p);
+	} else if (of_device_is_compatible(np, "qcom,fixed-alpha-pll-20t") ||
+		of_device_is_compatible(np, "qcom,alpha-pll-20t")) {
+		*pll->masks = masks_20nm_t;
+		pll->vco_tbl = vco_20nm_t;
+		pll->num_vco = ARRAY_SIZE(vco_20nm_t);
+	} else {
+		dt_err(np, "unexpected compatible string\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	drv = msmclk_parse_phandle(dev, np->parent->phandle);
+	if (IS_ERR_OR_NULL(drv))
+		return ERR_CAST(drv);
+	pll->base = &drv->base;
+	return pll;
+}
+
+static void *variable_rate_alpha_pll_dt_parser(struct device *dev,
+					struct device_node *np)
+{
+	struct alpha_pll_clk *pll;
+
+	pll = alpha_pll_dt_parser(dev, np);
+	if (IS_ERR(pll))
+		return pll;
+
+	/* Optional Property */
+	of_property_read_u32(np, "qcom,output-enable", &pll->enable_config);
+
+	pll->c.ops = &clk_ops_alpha_pll;
+	return msmclk_generic_clk_init(dev, np, &pll->c);
+}
+
+static void *fixed_rate_alpha_pll_dt_parser(struct device *dev,
+						struct device_node *np)
+{
+	struct alpha_pll_clk *pll;
+	int rc;
+	u32 val;
+
+	pll = alpha_pll_dt_parser(dev, np);
+	if (IS_ERR(pll))
+		return pll;
+
+	rc = of_property_read_u32(np, "qcom,pll-config-rate", &val);
+	if (rc) {
+		dt_err(np, "missing qcom,pll-config-rate\n");
+		return ERR_PTR(-EINVAL);
+	}
+	pll->c.rate = val;
+
+	rc = of_property_read_u32(np, "qcom,output-enable",
+						&pll->enable_config);
+	if (rc) {
+		dt_err(np, "missing qcom,output-enable\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Optional Property */
+	rc = of_property_read_u32(np, "qcom,fsm-en-bit", &val);
+	if (!rc) {
+		rc = of_property_read_u32(np, "qcom,fsm-en-offset",
+						&pll->fsm_reg_offset);
+		if (rc) {
+			dt_err(np, "missing qcom,fsm-en-offset\n");
+			return ERR_PTR(-EINVAL);
+		}
+		pll->fsm_en_mask = BIT(val);
+	}
+
+	pll->c.ops = &clk_ops_fixed_alpha_pll;
+	return msmclk_generic_clk_init(dev, np, &pll->c);
+}
+
+MSMCLK_PARSER(fixed_rate_alpha_pll_dt_parser, "qcom,fixed-alpha-pll-20p", 0);
+MSMCLK_PARSER(fixed_rate_alpha_pll_dt_parser, "qcom,fixed-alpha-pll-20t", 1);
+MSMCLK_PARSER(variable_rate_alpha_pll_dt_parser, "qcom,alpha-pll-20p", 0);
+MSMCLK_PARSER(variable_rate_alpha_pll_dt_parser, "qcom,alpha-pll-20t", 1);
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./clock.c linux-4.4.115-fbx/drivers/clk/msm/clock.c
--- linux-4.4.115-fbx/drivers/clk/msm./clock.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/clock.c	2019-01-22 16:16:23.019242024 +0100
@@ -0,0 +1,1389 @@
+/* arch/arm/mach-msm/clock.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2015, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/list.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/of_platform.h>
+#include <linux/pm_opp.h>
+
+#include <trace/events/power.h>
+#include "clock.h"
+
+struct handoff_clk {
+	struct list_head list;
+	struct clk *clk;
+};
+static LIST_HEAD(handoff_list);
+
+struct handoff_vdd {
+	struct list_head list;
+	struct clk_vdd_class *vdd_class;
+};
+static LIST_HEAD(handoff_vdd_list);
+
+static DEFINE_MUTEX(msm_clock_init_lock);
+LIST_HEAD(orphan_clk_list);
+static LIST_HEAD(clk_notifier_list);
+
+/* Find the voltage level required for a given rate. */
+int find_vdd_level(struct clk *clk, unsigned long rate)
+{
+	int level;
+
+	for (level = 0; level < clk->num_fmax; level++)
+		if (rate <= clk->fmax[level])
+			break;
+
+	if (level == clk->num_fmax) {
+		pr_err("Rate %lu for %s is greater than highest Fmax\n", rate,
+			clk->dbg_name);
+		return -EINVAL;
+	}
+
+	return level;
+}
+
+/* Update voltage level given the current votes. */
+static int update_vdd(struct clk_vdd_class *vdd_class)
+{
+	int level, rc = 0, i, ignore;
+	struct regulator **r = vdd_class->regulator;
+	int *uv = vdd_class->vdd_uv;
+	int *ua = vdd_class->vdd_ua;
+	int n_reg = vdd_class->num_regulators;
+	int cur_lvl = vdd_class->cur_level;
+	int max_lvl = vdd_class->num_levels - 1;
+	int cur_base = cur_lvl * n_reg;
+	int new_base;
+
+	/* aggregate votes */
+	for (level = max_lvl; level > 0; level--)
+		if (vdd_class->level_votes[level])
+			break;
+
+	if (level == cur_lvl)
+		return 0;
+
+	max_lvl = max_lvl * n_reg;
+	new_base = level * n_reg;
+	for (i = 0; i < vdd_class->num_regulators; i++) {
+		rc = regulator_set_voltage(r[i], uv[new_base + i],
+			vdd_class->use_max_uV ? INT_MAX : uv[max_lvl + i]);
+		if (rc)
+			goto set_voltage_fail;
+
+		if (ua) {
+			rc = regulator_set_load(r[i], ua[new_base + i]);
+			rc = rc > 0 ? 0 : rc;
+			if (rc)
+				goto set_mode_fail;
+		}
+		if (cur_lvl == 0 || cur_lvl == vdd_class->num_levels)
+			rc = regulator_enable(r[i]);
+		else if (level == 0)
+			rc = regulator_disable(r[i]);
+		if (rc)
+			goto enable_disable_fail;
+	}
+	if (vdd_class->set_vdd && !vdd_class->num_regulators)
+		rc = vdd_class->set_vdd(vdd_class, level);
+
+	if (!rc)
+		vdd_class->cur_level = level;
+
+	return rc;
+
+enable_disable_fail:
+	/*
+	 * set_optimum_mode could use voltage to derive mode.  Restore
+	 * previous voltage setting for r[i] first.
+	 */
+	if (ua) {
+		regulator_set_voltage(r[i], uv[cur_base + i],
+			vdd_class->use_max_uV ? INT_MAX : uv[max_lvl + i]);
+		regulator_set_load(r[i], ua[cur_base + i]);
+	}
+
+set_mode_fail:
+	regulator_set_voltage(r[i], uv[cur_base + i],
+			vdd_class->use_max_uV ? INT_MAX : uv[max_lvl + i]);
+
+set_voltage_fail:
+	for (i--; i >= 0; i--) {
+		regulator_set_voltage(r[i], uv[cur_base + i],
+			vdd_class->use_max_uV ? INT_MAX : uv[max_lvl + i]);
+		if (ua)
+			regulator_set_load(r[i], ua[cur_base + i]);
+		if (cur_lvl == 0 || cur_lvl == vdd_class->num_levels)
+			regulator_disable(r[i]);
+		else if (level == 0)
+			ignore = regulator_enable(r[i]);
+	}
+	return rc;
+}
+
+/* Vote for a voltage level. */
+int vote_vdd_level(struct clk_vdd_class *vdd_class, int level)
+{
+	int rc;
+
+	if (level >= vdd_class->num_levels)
+		return -EINVAL;
+
+	mutex_lock(&vdd_class->lock);
+	vdd_class->level_votes[level]++;
+	rc = update_vdd(vdd_class);
+	if (rc)
+		vdd_class->level_votes[level]--;
+	mutex_unlock(&vdd_class->lock);
+
+	return rc;
+}
+
+/* Remove vote for a voltage level. */
+int unvote_vdd_level(struct clk_vdd_class *vdd_class, int level)
+{
+	int rc = 0;
+
+	if (level >= vdd_class->num_levels)
+		return -EINVAL;
+
+	mutex_lock(&vdd_class->lock);
+	if (WARN(!vdd_class->level_votes[level],
+			"Reference counts are incorrect for %s level %d\n",
+			vdd_class->class_name, level))
+		goto out;
+	vdd_class->level_votes[level]--;
+	rc = update_vdd(vdd_class);
+	if (rc)
+		vdd_class->level_votes[level]++;
+out:
+	mutex_unlock(&vdd_class->lock);
+	return rc;
+}
+
+/* Vote for a voltage level corresponding to a clock's rate. */
+static int vote_rate_vdd(struct clk *clk, unsigned long rate)
+{
+	int level;
+
+	if (!clk->vdd_class)
+		return 0;
+
+	level = find_vdd_level(clk, rate);
+	if (level < 0)
+		return level;
+
+	return vote_vdd_level(clk->vdd_class, level);
+}
+
+/* Remove vote for a voltage level corresponding to a clock's rate. */
+static void unvote_rate_vdd(struct clk *clk, unsigned long rate)
+{
+	int level;
+
+	if (!clk->vdd_class)
+		return;
+
+	level = find_vdd_level(clk, rate);
+	if (level < 0)
+		return;
+
+	unvote_vdd_level(clk->vdd_class, level);
+}
+
+/* Check if the rate is within the voltage limits of the clock. */
+bool is_rate_valid(struct clk *clk, unsigned long rate)
+{
+	int level;
+
+	if (!clk->vdd_class)
+		return true;
+
+	level = find_vdd_level(clk, rate);
+	return level >= 0;
+}
+
+/**
+ * __clk_pre_reparent() - Set up the new parent before switching to it and
+ * prevent the enable state of the child clock from changing.
+ * @c: The child clock that's going to switch parents
+ * @new: The new parent that the child clock is going to switch to
+ * @flags: Pointer to scratch space to save spinlock flags
+ *
+ * Cannot be called from atomic context.
+ *
+ * Use this API to set up the @new parent clock to be able to support the
+ * current prepare and enable state of the child clock @c. Once the parent is
+ * set up, the child clock can safely switch to it.
+ *
+ * The caller shall grab the prepare_lock of clock @c before calling this API
+ * and only release it after calling __clk_post_reparent() for clock @c (or
+ * if this API fails). This is necessary to prevent the prepare state of the
+ * child clock @c from changing while the reparenting is in progress. Since
+ * this API takes care of grabbing the enable lock of @c, only atomic
+ * operation are allowed between calls to __clk_pre_reparent and
+ * __clk_post_reparent()
+ *
+ * The scratch space pointed to by @flags should not be altered before
+ * calling __clk_post_reparent() for clock @c.
+ *
+ * See also: __clk_post_reparent()
+ */
+int __clk_pre_reparent(struct clk *c, struct clk *new, unsigned long *flags)
+{
+	int rc;
+
+	if (c->prepare_count) {
+		rc = clk_prepare(new);
+		if (rc)
+			return rc;
+	}
+
+	spin_lock_irqsave(&c->lock, *flags);
+	if (c->count) {
+		rc = clk_enable(new);
+		if (rc) {
+			spin_unlock_irqrestore(&c->lock, *flags);
+			clk_unprepare(new);
+			return rc;
+		}
+	}
+	return 0;
+}
+
+/**
+ * __clk_post_reparent() - Release requirements on old parent after switching
+ * away from it and allow changes to the child clock's enable state.
+ * @c:   The child clock that switched parents
+ * @old: The old parent that the child clock switched away from or the new
+ *	 parent of a failed reparent attempt.
+ * @flags: Pointer to scratch space where spinlock flags were saved
+ *
+ * Cannot be called from atomic context.
+ *
+ * This API works in tandem with __clk_pre_reparent. Use this API to
+ * - Remove prepare and enable requirements from the @old parent after
+ *   switching away from it
+ * - Or, undo the effects of __clk_pre_reparent() after a failed attempt to
+ *   change parents
+ *
+ * The caller shall release the prepare_lock of @c that was grabbed before
+ * calling __clk_pre_reparent() only after this API is called (or if
+ * __clk_pre_reparent() fails). This is necessary to prevent the prepare
+ * state of the child clock @c from changing while the reparenting is in
+ * progress. Since this API releases the enable lock of @c, the limit to
+ * atomic operations set by __clk_pre_reparent() is no longer present.
+ *
+ * The scratch space pointed to by @flags shall not be altered since the call
+ * to  __clk_pre_reparent() for clock @c.
+ *
+ * See also: __clk_pre_reparent()
+ */
+void __clk_post_reparent(struct clk *c, struct clk *old, unsigned long *flags)
+{
+	if (c->count)
+		clk_disable(old);
+	spin_unlock_irqrestore(&c->lock, *flags);
+
+	if (c->prepare_count)
+		clk_unprepare(old);
+}
+
+int clk_prepare(struct clk *clk)
+{
+	int ret = 0;
+	struct clk *parent;
+
+	if (!clk)
+		return 0;
+	if (IS_ERR(clk))
+		return -EINVAL;
+
+	mutex_lock(&clk->prepare_lock);
+	if (clk->prepare_count == 0) {
+		parent = clk->parent;
+
+		ret = clk_prepare(parent);
+		if (ret)
+			goto out;
+		ret = clk_prepare(clk->depends);
+		if (ret)
+			goto err_prepare_depends;
+
+		ret = vote_rate_vdd(clk, clk->rate);
+		if (ret)
+			goto err_vote_vdd;
+		if (clk->ops->prepare)
+			ret = clk->ops->prepare(clk);
+		if (ret)
+			goto err_prepare_clock;
+	}
+	clk->prepare_count++;
+out:
+	mutex_unlock(&clk->prepare_lock);
+	return ret;
+err_prepare_clock:
+	unvote_rate_vdd(clk, clk->rate);
+err_vote_vdd:
+	clk_unprepare(clk->depends);
+err_prepare_depends:
+	clk_unprepare(parent);
+	goto out;
+}
+EXPORT_SYMBOL(clk_prepare);
+
+/*
+ * Standard clock functions defined in include/linux/clk.h
+ */
+int clk_enable(struct clk *clk)
+{
+	int ret = 0;
+	unsigned long flags;
+	struct clk *parent;
+	const char *name;
+
+	if (!clk)
+		return 0;
+	if (IS_ERR(clk))
+		return -EINVAL;
+	name = clk->dbg_name;
+
+	spin_lock_irqsave(&clk->lock, flags);
+	WARN(!clk->prepare_count,
+			"%s: Don't call enable on unprepared clocks\n", name);
+	if (clk->count == 0) {
+		parent = clk->parent;
+
+		ret = clk_enable(parent);
+		if (ret)
+			goto err_enable_parent;
+		ret = clk_enable(clk->depends);
+		if (ret)
+			goto err_enable_depends;
+
+		trace_clock_enable(name, 1, smp_processor_id());
+		if (clk->ops->enable)
+			ret = clk->ops->enable(clk);
+		if (ret)
+			goto err_enable_clock;
+	}
+	clk->count++;
+	spin_unlock_irqrestore(&clk->lock, flags);
+
+	return 0;
+
+err_enable_clock:
+	clk_disable(clk->depends);
+err_enable_depends:
+	clk_disable(parent);
+err_enable_parent:
+	spin_unlock_irqrestore(&clk->lock, flags);
+	return ret;
+}
+EXPORT_SYMBOL(clk_enable);
+
+void clk_disable(struct clk *clk)
+{
+	const char *name;
+	unsigned long flags;
+
+	if (IS_ERR_OR_NULL(clk))
+		return;
+	name = clk->dbg_name;
+
+	spin_lock_irqsave(&clk->lock, flags);
+	WARN(!clk->prepare_count,
+			"%s: Never called prepare or calling disable after unprepare\n",
+			name);
+	if (WARN(clk->count == 0, "%s is unbalanced", name))
+		goto out;
+	if (clk->count == 1) {
+		struct clk *parent = clk->parent;
+
+		trace_clock_disable(name, 0, smp_processor_id());
+		if (clk->ops->disable)
+			clk->ops->disable(clk);
+		clk_disable(clk->depends);
+		clk_disable(parent);
+	}
+	clk->count--;
+out:
+	spin_unlock_irqrestore(&clk->lock, flags);
+}
+EXPORT_SYMBOL(clk_disable);
+
+void clk_unprepare(struct clk *clk)
+{
+	const char *name;
+
+	if (IS_ERR_OR_NULL(clk))
+		return;
+	name = clk->dbg_name;
+
+	mutex_lock(&clk->prepare_lock);
+	if (WARN(!clk->prepare_count, "%s is unbalanced (prepare)", name))
+		goto out;
+	if (clk->prepare_count == 1) {
+		struct clk *parent = clk->parent;
+
+		WARN(clk->count,
+			"%s: Don't call unprepare when the clock is enabled\n",
+			name);
+
+		if (clk->ops->unprepare)
+			clk->ops->unprepare(clk);
+		unvote_rate_vdd(clk, clk->rate);
+		clk_unprepare(clk->depends);
+		clk_unprepare(parent);
+	}
+	clk->prepare_count--;
+out:
+	mutex_unlock(&clk->prepare_lock);
+}
+EXPORT_SYMBOL(clk_unprepare);
+
+int clk_reset(struct clk *clk, enum clk_reset_action action)
+{
+	if (IS_ERR_OR_NULL(clk))
+		return -EINVAL;
+
+	if (!clk->ops->reset)
+		return -ENOSYS;
+
+	return clk->ops->reset(clk, action);
+}
+EXPORT_SYMBOL(clk_reset);
+
+/**
+ * __clk_notify - call clk notifier chain
+ * @clk: struct clk * that is changing rate
+ * @msg: clk notifier type (see include/linux/clk.h)
+ * @old_rate: old clk rate
+ * @new_rate: new clk rate
+ *
+ * Triggers a notifier call chain on the clk rate-change notification
+ * for 'clk'.  Passes a pointer to the struct clk and the previous
+ * and current rates to the notifier callback.  Intended to be called by
+ * internal clock code only.  Returns NOTIFY_DONE from the last driver
+ * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
+ * a driver returns that.
+ */
+static int __clk_notify(struct clk *clk, unsigned long msg,
+		unsigned long old_rate, unsigned long new_rate)
+{
+	struct msm_clk_notifier *cn;
+	struct msm_clk_notifier_data cnd;
+	int ret = NOTIFY_DONE;
+
+	cnd.clk = clk;
+	cnd.old_rate = old_rate;
+	cnd.new_rate = new_rate;
+
+	list_for_each_entry(cn, &clk_notifier_list, node) {
+		if (cn->clk == clk) {
+			ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
+					&cnd);
+			break;
+		}
+	}
+
+	return ret;
+}
+
+/*
+ * clk rate change notifiers
+ *
+ * Note - The following notifier functionality is a verbatim copy
+ * of the implementation in the common clock framework, copied here
+ * until MSM switches to the common clock framework.
+ */
+
+/**
+ * msm_clk_notif_register - add a clk rate change notifier
+ * @clk: struct clk * to watch
+ * @nb: struct notifier_block * with callback info
+ *
+ * Request notification when clk's rate changes.  This uses an SRCU
+ * notifier because we want it to block and notifier unregistrations are
+ * uncommon.  The callbacks associated with the notifier must not
+ * re-enter into the clk framework by calling any top-level clk APIs;
+ * this will cause a nested prepare_lock mutex.
+ *
+ * Pre-change notifier callbacks will be passed the current, pre-change
+ * rate of the clk via struct msm_clk_notifier_data.old_rate.  The new,
+ * post-change rate of the clk is passed via struct
+ * msm_clk_notifier_data.new_rate.
+ *
+ * Post-change notifiers will pass the now-current, post-change rate of
+ * the clk in both struct msm_clk_notifier_data.old_rate and struct
+ * msm_clk_notifier_data.new_rate.
+ *
+ * Abort-change notifiers are effectively the opposite of pre-change
+ * notifiers: the original pre-change clk rate is passed in via struct
+ * msm_clk_notifier_data.new_rate and the failed post-change rate is passed
+ * in via struct msm_clk_notifier_data.old_rate.
+ *
+ * msm_clk_notif_register() must be called from non-atomic context.
+ * Returns -EINVAL if called with null arguments, -ENOMEM upon
+ * allocation failure; otherwise, passes along the return value of
+ * srcu_notifier_chain_register().
+ */
+int msm_clk_notif_register(struct clk *clk, struct notifier_block *nb)
+{
+	struct msm_clk_notifier *cn;
+	int ret = -ENOMEM;
+
+	if (!clk || !nb)
+		return -EINVAL;
+
+	mutex_lock(&clk->prepare_lock);
+
+	/* search the list of notifiers for this clk */
+	list_for_each_entry(cn, &clk_notifier_list, node)
+		if (cn->clk == clk)
+			break;
+
+	/* if clk wasn't in the notifier list, allocate new clk_notifier */
+	if (cn->clk != clk) {
+		cn = kzalloc(sizeof(struct msm_clk_notifier), GFP_KERNEL);
+		if (!cn)
+			goto out;
+
+		cn->clk = clk;
+		srcu_init_notifier_head(&cn->notifier_head);
+
+		list_add(&cn->node, &clk_notifier_list);
+	}
+
+	ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
+
+	clk->notifier_count++;
+
+out:
+	mutex_unlock(&clk->prepare_lock);
+
+	return ret;
+}
+
+/**
+ * msm_clk_notif_unregister - remove a clk rate change notifier
+ * @clk: struct clk *
+ * @nb: struct notifier_block * with callback info
+ *
+ * Request no further notification for changes to 'clk' and frees memory
+ * allocated in msm_clk_notifier_register.
+ *
+ * Returns -EINVAL if called with null arguments; otherwise, passes
+ * along the return value of srcu_notifier_chain_unregister().
+ */
+int msm_clk_notif_unregister(struct clk *clk, struct notifier_block *nb)
+{
+	struct msm_clk_notifier *cn = NULL;
+	int ret = -EINVAL;
+
+	if (!clk || !nb)
+		return -EINVAL;
+
+	mutex_lock(&clk->prepare_lock);
+
+	list_for_each_entry(cn, &clk_notifier_list, node)
+		if (cn->clk == clk)
+			break;
+
+	if (cn->clk == clk) {
+		ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
+
+		clk->notifier_count--;
+
+		/* XXX the notifier code should handle this better */
+		if (!cn->notifier_head.head) {
+			srcu_cleanup_notifier_head(&cn->notifier_head);
+			list_del(&cn->node);
+			kfree(cn);
+		}
+
+	} else {
+		ret = -ENOENT;
+	}
+
+	mutex_unlock(&clk->prepare_lock);
+
+	return ret;
+}
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+	if (IS_ERR_OR_NULL(clk))
+		return 0;
+
+	if (!clk->ops->get_rate)
+		return clk->rate;
+
+	return clk->ops->get_rate(clk);
+}
+EXPORT_SYMBOL(clk_get_rate);
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+	unsigned long start_rate;
+	int rc = 0;
+	const char *name;
+
+	if (IS_ERR_OR_NULL(clk))
+		return -EINVAL;
+	name = clk->dbg_name;
+
+	if (!is_rate_valid(clk, rate))
+		return -EINVAL;
+
+	mutex_lock(&clk->prepare_lock);
+
+	/* Return early if the rate isn't going to change */
+	if (clk->rate == rate && !(clk->flags & CLKFLAG_NO_RATE_CACHE))
+		goto out;
+
+	if (!clk->ops->set_rate) {
+		rc = -ENOSYS;
+		goto out;
+	}
+
+	trace_clock_set_rate(name, rate, raw_smp_processor_id());
+
+	start_rate = clk->rate;
+
+	if (clk->notifier_count)
+		__clk_notify(clk, PRE_RATE_CHANGE, clk->rate, rate);
+
+	if (clk->ops->pre_set_rate) {
+		rc = clk->ops->pre_set_rate(clk, rate);
+		if (rc)
+			goto abort_set_rate;
+	}
+
+	/* Enforce vdd requirements for target frequency. */
+	if (clk->prepare_count) {
+		rc = vote_rate_vdd(clk, rate);
+		if (rc)
+			goto err_vote_vdd;
+	}
+
+	rc = clk->ops->set_rate(clk, rate);
+	if (rc)
+		goto err_set_rate;
+	clk->rate = rate;
+
+	/* Release vdd requirements for starting frequency. */
+	if (clk->prepare_count)
+		unvote_rate_vdd(clk, start_rate);
+
+	if (clk->ops->post_set_rate)
+		clk->ops->post_set_rate(clk, start_rate);
+
+	if (clk->notifier_count)
+		__clk_notify(clk, POST_RATE_CHANGE, start_rate, clk->rate);
+
+	trace_clock_set_rate_complete(name, clk->rate, raw_smp_processor_id());
+out:
+	mutex_unlock(&clk->prepare_lock);
+	return rc;
+
+abort_set_rate:
+	__clk_notify(clk, ABORT_RATE_CHANGE, clk->rate, rate);
+err_set_rate:
+	if (clk->prepare_count)
+		unvote_rate_vdd(clk, rate);
+err_vote_vdd:
+	/* clk->rate is still the old rate. So, pass the new rate instead. */
+	if (clk->ops->post_set_rate)
+		clk->ops->post_set_rate(clk, rate);
+	goto out;
+}
+EXPORT_SYMBOL(clk_set_rate);
+
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+	long rrate;
+	unsigned long fmax = 0, i;
+
+	if (IS_ERR_OR_NULL(clk))
+		return -EINVAL;
+
+	for (i = 0; i < clk->num_fmax; i++)
+		fmax = max(fmax, clk->fmax[i]);
+	if (!fmax)
+		fmax = ULONG_MAX;
+	rate = min(rate, fmax);
+
+	if (clk->ops->round_rate)
+		rrate = clk->ops->round_rate(clk, rate);
+	else if (clk->rate)
+		rrate = clk->rate;
+	else
+		return -ENOSYS;
+
+	if (rrate > fmax)
+		return -EINVAL;
+	return rrate;
+}
+EXPORT_SYMBOL(clk_round_rate);
+
+int clk_set_max_rate(struct clk *clk, unsigned long rate)
+{
+	if (IS_ERR_OR_NULL(clk))
+		return -EINVAL;
+
+	if (!clk->ops->set_max_rate)
+		return -ENOSYS;
+
+	return clk->ops->set_max_rate(clk, rate);
+}
+EXPORT_SYMBOL(clk_set_max_rate);
+
+int parent_to_src_sel(struct clk_src *parents, int num_parents, struct clk *p)
+{
+	int i;
+
+	for (i = 0; i < num_parents; i++) {
+		if (parents[i].src == p)
+			return parents[i].sel;
+	}
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL(parent_to_src_sel);
+
+int clk_get_parent_sel(struct clk *c, struct clk *parent)
+{
+	return parent_to_src_sel(c->parents, c->num_parents, parent);
+}
+EXPORT_SYMBOL(clk_get_parent_sel);
+
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+	int rc = 0;
+	if (IS_ERR_OR_NULL(clk))
+		return -EINVAL;
+
+	if (!clk->ops->set_parent && clk->parent == parent)
+		return 0;
+
+	if (!clk->ops->set_parent)
+		return -ENOSYS;
+
+	mutex_lock(&clk->prepare_lock);
+	if (clk->parent == parent && !(clk->flags & CLKFLAG_NO_RATE_CACHE))
+		goto out;
+	rc = clk->ops->set_parent(clk, parent);
+out:
+	mutex_unlock(&clk->prepare_lock);
+
+	return rc;
+}
+EXPORT_SYMBOL(clk_set_parent);
+
+struct clk *clk_get_parent(struct clk *clk)
+{
+	if (IS_ERR_OR_NULL(clk))
+		return NULL;
+
+	return clk->parent;
+}
+EXPORT_SYMBOL(clk_get_parent);
+
+int clk_set_flags(struct clk *clk, unsigned long flags)
+{
+	if (IS_ERR_OR_NULL(clk))
+		return -EINVAL;
+	if (!clk->ops->set_flags)
+		return -ENOSYS;
+
+	return clk->ops->set_flags(clk, flags);
+}
+EXPORT_SYMBOL(clk_set_flags);
+
+static LIST_HEAD(initdata_list);
+
+static void init_sibling_lists(struct clk_lookup *clock_tbl, size_t num_clocks)
+{
+	struct clk *clk, *parent;
+	unsigned n;
+
+	for (n = 0; n < num_clocks; n++) {
+		clk = clock_tbl[n].clk;
+		parent = clk->parent;
+		if (parent && list_empty(&clk->siblings))
+			list_add(&clk->siblings, &parent->children);
+	}
+}
+
+static void vdd_class_init(struct clk_vdd_class *vdd)
+{
+	struct handoff_vdd *v;
+
+	if (!vdd)
+		return;
+
+	if (vdd->skip_handoff)
+		return;
+
+	list_for_each_entry(v, &handoff_vdd_list, list) {
+		if (v->vdd_class == vdd)
+			return;
+	}
+
+	pr_debug("voting for vdd_class %s\n", vdd->class_name);
+	if (vote_vdd_level(vdd, vdd->num_levels - 1))
+		pr_err("failed to vote for %s\n", vdd->class_name);
+
+	v = kmalloc(sizeof(*v), GFP_KERNEL);
+	if (!v) {
+		pr_err("Unable to kmalloc. %s will be stuck at max.\n",
+			vdd->class_name);
+		return;
+	}
+
+	v->vdd_class = vdd;
+	list_add_tail(&v->list, &handoff_vdd_list);
+}
+
+static int __handoff_clk(struct clk *clk)
+{
+	enum handoff state = HANDOFF_DISABLED_CLK;
+	struct handoff_clk *h = NULL;
+	int rc, i;
+
+	if (clk == NULL || clk->flags & CLKFLAG_INIT_DONE ||
+	    clk->flags & CLKFLAG_SKIP_HANDOFF)
+		return 0;
+
+	if (clk->flags & CLKFLAG_INIT_ERR)
+		return -ENXIO;
+
+	if (clk->flags & CLKFLAG_EPROBE_DEFER)
+		return -EPROBE_DEFER;
+
+	/* Handoff any 'depends' clock first. */
+	rc = __handoff_clk(clk->depends);
+	if (rc)
+		goto err;
+
+	/*
+	 * Handoff functions for the parent must be called before the
+	 * children can be handed off. Without handing off the parents and
+	 * knowing their rate and state (on/off), it's impossible to figure
+	 * out the rate and state of the children.
+	 */
+	if (clk->ops->get_parent)
+		clk->parent = clk->ops->get_parent(clk);
+
+	if (IS_ERR(clk->parent)) {
+		rc = PTR_ERR(clk->parent);
+		goto err;
+	}
+
+	rc = __handoff_clk(clk->parent);
+	if (rc)
+		goto err;
+
+	for (i = 0; i < clk->num_parents; i++) {
+		rc = __handoff_clk(clk->parents[i].src);
+		if (rc)
+			goto err;
+	}
+
+	if (clk->ops->handoff)
+		state = clk->ops->handoff(clk);
+
+	if (state == HANDOFF_ENABLED_CLK) {
+
+		h = kmalloc(sizeof(*h), GFP_KERNEL);
+		if (!h) {
+			rc = -ENOMEM;
+			goto err;
+		}
+
+		rc = clk_prepare_enable(clk->parent);
+		if (rc)
+			goto err;
+
+		rc = clk_prepare_enable(clk->depends);
+		if (rc)
+			goto err_depends;
+
+		rc = vote_rate_vdd(clk, clk->rate);
+		WARN(rc, "%s unable to vote for voltage!\n", clk->dbg_name);
+
+		clk->count = 1;
+		clk->prepare_count = 1;
+		h->clk = clk;
+		list_add_tail(&h->list, &handoff_list);
+
+		pr_debug("Handed off %s rate=%lu\n", clk->dbg_name, clk->rate);
+	}
+
+	if (clk->init_rate && clk_set_rate(clk, clk->init_rate))
+		pr_err("failed to set an init rate of %lu on %s\n",
+			clk->init_rate, clk->dbg_name);
+	if (clk->always_on && clk_prepare_enable(clk))
+		pr_err("failed to enable always-on clock %s\n",
+			clk->dbg_name);
+
+	clk->flags |= CLKFLAG_INIT_DONE;
+	/* if the clk is on orphan list, remove it */
+	list_del_init(&clk->list);
+	clock_debug_register(clk);
+
+	return 0;
+
+err_depends:
+	clk_disable_unprepare(clk->parent);
+err:
+	kfree(h);
+	if (rc == -EPROBE_DEFER) {
+		clk->flags |= CLKFLAG_EPROBE_DEFER;
+		if (list_empty(&clk->list))
+			list_add_tail(&clk->list, &orphan_clk_list);
+	} else {
+		pr_err("%s handoff failed (%d)\n", clk->dbg_name, rc);
+		clk->flags |= CLKFLAG_INIT_ERR;
+	}
+	return rc;
+}
+
+/**
+ * msm_clock_register() - Register additional clock tables
+ * @table: Table of clocks
+ * @size: Size of @table
+ *
+ * Upon return, clock APIs may be used to control clocks registered using this
+ * function.
+ */
+int msm_clock_register(struct clk_lookup *table, size_t size)
+{
+	int n = 0, rc;
+	struct clk *c, *safe;
+	bool found_more_clks;
+
+	mutex_lock(&msm_clock_init_lock);
+
+	init_sibling_lists(table, size);
+
+	/*
+	 * Enable regulators and temporarily set them up at maximum voltage.
+	 * Once all the clocks have made their respective vote, remove this
+	 * temporary vote. The removing of the temporary vote is done at
+	 * late_init, by which time we assume all the clocks would have been
+	 * handed off.
+	 */
+	for (n = 0; n < size; n++)
+		vdd_class_init(table[n].clk->vdd_class);
+
+	/*
+	 * Detect and preserve initial clock state until clock_late_init() or
+	 * a driver explicitly changes it, whichever is first.
+	 */
+
+	for (n = 0; n < size; n++)
+		__handoff_clk(table[n].clk);
+
+	/* maintain backwards compatibility */
+	if (table[0].con_id || table[0].dev_id)
+		clkdev_add_table(table, size);
+
+	do {
+		found_more_clks = false;
+		/* clear cached __handoff_clk return values */
+		list_for_each_entry_safe(c, safe, &orphan_clk_list, list)
+			c->flags &= ~CLKFLAG_EPROBE_DEFER;
+
+		list_for_each_entry_safe(c, safe, &orphan_clk_list, list) {
+			rc = __handoff_clk(c);
+			if (!rc)
+				found_more_clks = true;
+		}
+	} while (found_more_clks);
+
+	mutex_unlock(&msm_clock_init_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_clock_register);
+
+struct of_msm_provider_data {
+	struct clk_lookup *table;
+	size_t size;
+};
+
+static struct clk *of_clk_src_get(struct of_phandle_args *clkspec,
+				  void *data)
+{
+	struct of_msm_provider_data *ofdata = data;
+	int n;
+
+	for (n = 0; n < ofdata->size; n++) {
+		if (clkspec->args[0] == ofdata->table[n].of_idx)
+			return ofdata->table[n].clk;
+	}
+	return ERR_PTR(-ENOENT);
+}
+
+#define MAX_LEN_OPP_HANDLE	50
+#define LEN_OPP_HANDLE		16
+#define LEN_OPP_VCORNER_HANDLE	22
+
+static struct device **derive_device_list(struct clk *clk,
+					struct device_node *np,
+					char *clk_handle_name, int len)
+{
+	int j, count, cpu;
+	struct platform_device *pdev;
+	struct device_node *dev_node;
+	struct device **device_list;
+
+	count = len/sizeof(u32);
+	device_list = kmalloc_array(count, sizeof(struct device *),
+							GFP_KERNEL);
+	if (!device_list)
+		return ERR_PTR(-ENOMEM);
+
+	for (j = 0; j < count; j++) {
+		device_list[j] = NULL;
+		dev_node = of_parse_phandle(np, clk_handle_name, j);
+		if (!dev_node) {
+			pr_err("Unable to get device_node pointer for %s opp-handle (%s)\n",
+					clk->dbg_name, clk_handle_name);
+			goto err_parse_phandle;
+		}
+
+		for_each_possible_cpu(cpu) {
+			if (of_get_cpu_node(cpu, NULL) == dev_node) {
+				device_list[j] = get_cpu_device(cpu);
+			}
+		}
+
+		if (device_list[j])
+			continue;
+
+		pdev = of_find_device_by_node(dev_node);
+		if (!pdev) {
+			pr_err("Unable to find platform_device node for %s opp-handle\n",
+						clk->dbg_name);
+			goto err_parse_phandle;
+		}
+		device_list[j] = &pdev->dev;
+	}
+	return device_list;
+err_parse_phandle:
+	kfree(device_list);
+	return ERR_PTR(-EINVAL);
+}
+
+static int get_voltage(struct clk *clk, unsigned long rate,
+				int store_vcorner, int n)
+{
+	struct clk_vdd_class *vdd;
+	int uv, level, corner;
+
+	/*
+	 * Use the first regulator in the vdd class
+	 * for the OPP table.
+	 */
+	vdd = clk->vdd_class;
+	if (vdd->num_regulators > 1) {
+		corner = vdd->vdd_uv[vdd->num_regulators * n];
+	} else {
+		level = find_vdd_level(clk, rate);
+		if (level < 0) {
+			pr_err("Could not find vdd level\n");
+			return -EINVAL;
+		}
+		corner = vdd->vdd_uv[level];
+	}
+
+	if (!corner) {
+		pr_err("%s: Unable to find vdd level for rate %lu\n",
+					clk->dbg_name, rate);
+		return -EINVAL;
+	}
+
+	if (store_vcorner) {
+		uv = corner;
+		return uv;
+	}
+
+	uv = regulator_list_corner_voltage(vdd->regulator[0], corner);
+	if (uv < 0) {
+		pr_err("%s: no uv for corner %d - err: %d\n",
+				clk->dbg_name, corner, uv);
+		return uv;
+	}
+	return uv;
+}
+
+static int add_and_print_opp(struct clk *clk, struct device **device_list,
+				int count, unsigned long rate, int uv, int n)
+{
+	int j, ret = 0;
+
+	for (j = 0; j < count; j++) {
+		ret = dev_pm_opp_add(device_list[j], rate, uv);
+		if (ret) {
+			pr_err("%s: couldn't add OPP for %lu - err: %d\n",
+						clk->dbg_name, rate, ret);
+			return ret;
+		}
+		if (n == 1 || n == clk->num_fmax - 1 ||
+					rate == clk_round_rate(clk, INT_MAX))
+			pr_info("%s: set OPP pair(%lu Hz: %u uV) on %s\n",
+						clk->dbg_name, rate, uv,
+						dev_name(device_list[j]));
+	}
+	return ret;
+}
+
+static void populate_clock_opp_table(struct device_node *np,
+			struct clk_lookup *table, size_t size)
+{
+	struct device **device_list;
+	struct clk *clk;
+	char clk_handle_name[MAX_LEN_OPP_HANDLE];
+	char clk_store_volt_corner[MAX_LEN_OPP_HANDLE];
+	size_t i;
+	int n, len, count, uv = 0;
+	unsigned long rate, ret = 0;
+	bool store_vcorner;
+
+	/* Iterate across all clocks in the clock controller */
+	for (i = 0; i < size; i++) {
+		n = 1;
+		rate = 0;
+
+		store_vcorner = false;
+		clk = table[i].clk;
+		if (!clk || !clk->num_fmax || clk->opp_table_populated)
+			continue;
+
+		if (strlen(clk->dbg_name) + LEN_OPP_HANDLE
+					< MAX_LEN_OPP_HANDLE) {
+			ret = snprintf(clk_handle_name,
+					ARRAY_SIZE(clk_handle_name),
+					"qcom,%s-opp-handle", clk->dbg_name);
+			if (ret < strlen(clk->dbg_name) + LEN_OPP_HANDLE) {
+				pr_err("Failed to hold clk_handle_name\n");
+				continue;
+			}
+		} else {
+			pr_err("clk name (%s) too large to fit in clk_handle_name\n",
+							clk->dbg_name);
+			continue;
+		}
+
+		if (strlen(clk->dbg_name) + LEN_OPP_VCORNER_HANDLE
+					< MAX_LEN_OPP_HANDLE) {
+			ret = snprintf(clk_store_volt_corner,
+				ARRAY_SIZE(clk_store_volt_corner),
+				"qcom,%s-opp-store-vcorner", clk->dbg_name);
+			if (ret < strlen(clk->dbg_name) +
+						LEN_OPP_VCORNER_HANDLE) {
+				pr_err("Failed to hold clk_store_volt_corner\n");
+				continue;
+			}
+		} else {
+			pr_err("clk name (%s) too large to fit in clk_store_volt_corner\n",
+							clk->dbg_name);
+			continue;
+		}
+
+		if (!of_find_property(np, clk_handle_name, &len)) {
+			pr_debug("Unable to find %s\n", clk_handle_name);
+			if (!of_find_property(np, clk_store_volt_corner,
+								&len)) {
+				pr_debug("Unable to find %s\n",
+						clk_store_volt_corner);
+				continue;
+			} else {
+				store_vcorner = true;
+				device_list = derive_device_list(clk, np,
+						clk_store_volt_corner, len);
+			}
+		} else
+			device_list = derive_device_list(clk, np,
+						clk_handle_name, len);
+		if (IS_ERR_OR_NULL(device_list)) {
+			pr_err("Failed to fill device_list\n");
+			continue;
+		}
+
+		count = len/sizeof(u32);
+		while (1) {
+			/*
+			 * Calling clk_round_rate will not work for all clocks
+			 * (eg. mux_div). Use their fmax values instead to get
+			 *  list of all available frequencies.
+			 */
+			if (clk->ops->list_rate) {
+				ret = clk_round_rate(clk, rate + 1);
+				if (ret < 0) {
+					pr_err("clk_round_rate failed for %s\n",
+							clk->dbg_name);
+					goto err_round_rate;
+				}
+				/*
+				 * If clk_round_rate give the same value on
+				 * consecutive iterations, exit loop since
+				 * we're at the maximum clock frequency.
+				 */
+				if (rate == ret)
+					break;
+				rate = ret;
+			} else {
+				if (n < clk->num_fmax)
+					rate = clk->fmax[n];
+				else
+					break;
+			}
+
+			uv = get_voltage(clk, rate, store_vcorner, n);
+			if (uv < 0)
+				goto err_round_rate;
+
+			ret = add_and_print_opp(clk, device_list, count,
+							rate, uv , n);
+			if (ret)
+				goto err_round_rate;
+
+			n++;
+		}
+err_round_rate:
+		/* If OPP table population was successful, set the flag */
+		if (uv >= 0 && ret >= 0)
+			clk->opp_table_populated = true;
+		kfree(device_list);
+	}
+}
+
+/**
+ * of_msm_clock_register() - Register clock tables with clkdev and with the
+ *			     clock DT framework
+ * @table: Table of clocks
+ * @size: Size of @table
+ * @np: Device pointer corresponding to the clock-provider device
+ *
+ * Upon return, clock APIs may be used to control clocks registered using this
+ * function.
+ */
+int of_msm_clock_register(struct device_node *np, struct clk_lookup *table,
+				size_t size)
+{
+	int ret = 0;
+	struct of_msm_provider_data *data;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->table = table;
+	data->size = size;
+
+	ret = of_clk_add_provider(np, of_clk_src_get, data);
+	if (ret) {
+		kfree(data);
+		return -ENOMEM;
+	}
+
+	populate_clock_opp_table(np, table, size);
+	return msm_clock_register(table, size);
+}
+EXPORT_SYMBOL(of_msm_clock_register);
+
+/**
+ * msm_clock_init() - Register and initialize a clock driver
+ * @data: Driver-specific clock initialization data
+ *
+ * Upon return from this call, clock APIs may be used to control
+ * clocks registered with this API.
+ */
+int __init msm_clock_init(struct clock_init_data *data)
+{
+	if (!data)
+		return -EINVAL;
+
+	if (data->pre_init)
+		data->pre_init();
+
+	mutex_lock(&msm_clock_init_lock);
+	if (data->late_init)
+		list_add(&data->list, &initdata_list);
+	mutex_unlock(&msm_clock_init_lock);
+
+	msm_clock_register(data->table, data->size);
+
+	if (data->post_init)
+		data->post_init();
+
+	return 0;
+}
+
+static int __init clock_late_init(void)
+{
+	struct handoff_clk *h, *h_temp;
+	struct handoff_vdd *v, *v_temp;
+	struct clock_init_data *initdata, *initdata_temp;
+	int ret = 0;
+
+	pr_info("%s: Removing enables held for handed-off clocks\n", __func__);
+
+	mutex_lock(&msm_clock_init_lock);
+
+	list_for_each_entry_safe(initdata, initdata_temp,
+					&initdata_list, list) {
+		ret = initdata->late_init();
+		if (ret)
+			pr_err("%s: %pS failed late_init.\n", __func__,
+				initdata);
+	}
+
+	list_for_each_entry_safe(h, h_temp, &handoff_list, list) {
+		clk_disable_unprepare(h->clk);
+		list_del(&h->list);
+		kfree(h);
+	}
+
+	list_for_each_entry_safe(v, v_temp, &handoff_vdd_list, list) {
+		unvote_vdd_level(v->vdd_class, v->vdd_class->num_levels - 1);
+		list_del(&v->list);
+		kfree(v);
+	}
+
+	mutex_unlock(&msm_clock_init_lock);
+
+	return ret;
+}
+/* clock_late_init should run only after all deferred probing
+ * (excluding DLKM probes) has completed.
+ */
+late_initcall_sync(clock_late_init);
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./clock-debug.c linux-4.4.115-fbx/drivers/clk/msm/clock-debug.c
--- linux-4.4.115-fbx/drivers/clk/msm./clock-debug.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/clock-debug.c	2019-01-22 16:16:23.015241987 +0100
@@ -0,0 +1,683 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2014, 2016-2017, The Linux Foundation. All rights
+ * reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/clk.h>
+#include <linux/list.h>
+#include <linux/clkdev.h>
+#include <linux/uaccess.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <trace/events/power.h>
+
+
+#include "clock.h"
+
+static LIST_HEAD(clk_list);
+static DEFINE_MUTEX(clk_list_lock);
+
+static struct dentry *debugfs_base;
+static u32 debug_suspend;
+
+static int clock_debug_rate_set(void *data, u64 val)
+{
+	struct clk *clock = data;
+	int ret;
+
+	/* Only increases to max rate will succeed, but that's actually good
+	 * for debugging purposes so we don't check for error. */
+	if (clock->flags & CLKFLAG_MAX)
+		clk_set_max_rate(clock, val);
+	ret = clk_set_rate(clock, val);
+	if (ret)
+		pr_err("clk_set_rate(%s, %lu) failed (%d)\n", clock->dbg_name,
+				(unsigned long)val, ret);
+
+	return ret;
+}
+
+static int clock_debug_rate_get(void *data, u64 *val)
+{
+	struct clk *clock = data;
+	*val = clk_get_rate(clock);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clock_rate_fops, clock_debug_rate_get,
+			clock_debug_rate_set, "%llu\n");
+
+static struct clk *measure;
+
+static int clock_debug_measure_get(void *data, u64 *val)
+{
+	struct clk *clock = data, *par;
+	int ret, is_hw_gated;
+	unsigned long meas_rate, sw_rate;
+
+	/* Check to see if the clock is in hardware gating mode */
+	if (clock->ops->in_hwcg_mode)
+		is_hw_gated = clock->ops->in_hwcg_mode(clock);
+	else
+		is_hw_gated = 0;
+
+	mutex_lock(&clock->prepare_lock);
+	ret = clk_set_parent(measure, clock);
+	if (!ret) {
+		/*
+		 * Disable hw gating to get accurate rate measurements. Only do
+		 * this if the clock is explictly enabled by software. This
+		 * allows us to detect errors where clocks are on even though
+		 * software is not requesting them to be on due to broken
+		 * hardware gating signals.
+		 */
+		if (is_hw_gated && clock->count)
+			clock->ops->disable_hwcg(clock);
+		par = measure;
+		while (par && par != clock) {
+			if (par->ops->enable)
+				par->ops->enable(par);
+			par = par->parent;
+		}
+		*val = clk_get_rate(measure);
+		/* Reenable hwgating if it was disabled */
+		if (is_hw_gated && clock->count)
+			clock->ops->enable_hwcg(clock);
+	}
+
+	/*
+	 * If there's a divider on the path from the clock output to the
+	 * measurement circuitry, account for it by dividing the original clock
+	 * rate with the rate set on the parent of the measure clock.
+	 */
+	meas_rate = clk_get_rate(clock);
+	sw_rate = clk_get_rate(measure->parent);
+	mutex_unlock(&clock->prepare_lock);
+	if (sw_rate && meas_rate >= (sw_rate * 2))
+		*val *= DIV_ROUND_CLOSEST(meas_rate, sw_rate);
+
+	return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clock_measure_fops, clock_debug_measure_get,
+			NULL, "%lld\n");
+
+static int clock_debug_enable_set(void *data, u64 val)
+{
+	struct clk *clock = data;
+	int rc = 0;
+
+	if (val)
+		rc = clk_prepare_enable(clock);
+	else
+		clk_disable_unprepare(clock);
+
+	return rc;
+}
+
+static int clock_debug_enable_get(void *data, u64 *val)
+{
+	struct clk *clock = data;
+	int enabled;
+
+	if (clock->ops->is_enabled)
+		enabled = clock->ops->is_enabled(clock);
+	else
+		enabled = !!(clock->count);
+
+	*val = enabled;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clock_enable_fops, clock_debug_enable_get,
+			clock_debug_enable_set, "%lld\n");
+
+static int clock_debug_local_get(void *data, u64 *val)
+{
+	struct clk *clock = data;
+
+	if (!clock->ops->is_local)
+		*val = true;
+	else
+		*val = clock->ops->is_local(clock);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clock_local_fops, clock_debug_local_get,
+			NULL, "%llu\n");
+
+static int clock_debug_hwcg_get(void *data, u64 *val)
+{
+	struct clk *clock = data;
+	if (clock->ops->in_hwcg_mode)
+		*val = !!clock->ops->in_hwcg_mode(clock);
+	else
+		*val = 0;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clock_hwcg_fops, clock_debug_hwcg_get,
+			NULL, "%llu\n");
+
+static void clock_print_fmax_by_level(struct seq_file *m, int level)
+{
+	struct clk *clock = m->private;
+	struct clk_vdd_class *vdd_class = clock->vdd_class;
+	int off, i, vdd_level, nregs = vdd_class->num_regulators;
+
+	vdd_level = find_vdd_level(clock, clock->rate);
+
+	seq_printf(m, "%2s%10lu", vdd_level == level ? "[" : "",
+		clock->fmax[level]);
+	for (i = 0; i < nregs; i++) {
+		off = nregs*level + i;
+		if (vdd_class->vdd_uv)
+			seq_printf(m, "%10u", vdd_class->vdd_uv[off]);
+		if (vdd_class->vdd_ua)
+			seq_printf(m, "%10u", vdd_class->vdd_ua[off]);
+	}
+
+	if (vdd_level == level)
+		seq_puts(m, "]");
+	seq_puts(m, "\n");
+}
+
+static int fmax_rates_show(struct seq_file *m, void *unused)
+{
+	struct clk *clock = m->private;
+	struct clk_vdd_class *vdd_class = clock->vdd_class;
+	int level = 0, i, nregs = vdd_class->num_regulators;
+	char reg_name[10];
+
+	int vdd_level = find_vdd_level(clock, clock->rate);
+	if (vdd_level < 0) {
+		seq_printf(m, "could not find_vdd_level for %s, %ld\n",
+			clock->dbg_name, clock->rate);
+		return 0;
+	}
+
+	seq_printf(m, "%12s", "");
+	for (i = 0; i < nregs; i++) {
+		snprintf(reg_name, ARRAY_SIZE(reg_name), "reg %d", i);
+		seq_printf(m, "%10s", reg_name);
+		if (vdd_class->vdd_ua)
+			seq_printf(m, "%10s", "");
+	}
+
+	seq_printf(m, "\n%12s", "freq");
+	for (i = 0; i < nregs; i++) {
+		seq_printf(m, "%10s", "uV");
+		if (vdd_class->vdd_ua)
+			seq_printf(m, "%10s", "uA");
+	}
+	seq_printf(m, "\n");
+
+	for (level = 0; level < clock->num_fmax; level++)
+		clock_print_fmax_by_level(m, level);
+
+	return 0;
+}
+
+static int fmax_rates_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, fmax_rates_show, inode->i_private);
+}
+
+static const struct file_operations fmax_rates_fops = {
+	.open		= fmax_rates_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static int orphan_list_show(struct seq_file *m, void *unused)
+{
+	struct clk *c, *safe;
+
+	list_for_each_entry_safe(c, safe, &orphan_clk_list, list)
+		seq_printf(m, "%s\n", c->dbg_name);
+
+	return 0;
+}
+
+static int orphan_list_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, orphan_list_show, inode->i_private);
+}
+
+static const struct file_operations orphan_list_fops = {
+	.open		= orphan_list_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+#define clock_debug_output(m, c, fmt, ...)		\
+do {							\
+	if (m)						\
+		seq_printf(m, fmt, ##__VA_ARGS__);	\
+	else if (c)					\
+		pr_cont(fmt, ##__VA_ARGS__);		\
+	else						\
+		pr_info(fmt, ##__VA_ARGS__);		\
+} while (0)
+
+static int clock_debug_print_clock(struct clk *c, struct seq_file *m)
+{
+	char *start = "";
+
+	if (!c || !c->prepare_count)
+		return 0;
+
+	clock_debug_output(m, 0, "\t");
+	do {
+		if (c->vdd_class)
+			clock_debug_output(m, 1, "%s%s:%u:%u [%ld, %d]", start,
+				c->dbg_name, c->prepare_count, c->count,
+				c->rate, find_vdd_level(c, c->rate));
+		else
+			clock_debug_output(m, 1, "%s%s:%u:%u [%ld]", start,
+				c->dbg_name, c->prepare_count, c->count,
+				c->rate);
+		start = " -> ";
+	} while ((c = clk_get_parent(c)));
+
+	clock_debug_output(m, 1, "\n");
+
+	return 1;
+}
+
+/**
+ * clock_debug_print_enabled_clocks() - Print names of enabled clocks
+ *
+ */
+static void clock_debug_print_enabled_clocks(struct seq_file *m)
+{
+	struct clk *c;
+	int cnt = 0;
+
+	if (!mutex_trylock(&clk_list_lock)) {
+		pr_err("clock-debug: Clocks are being registered. Cannot print clock state now.\n");
+		return;
+	}
+	clock_debug_output(m, 0, "Enabled clocks:\n");
+	list_for_each_entry(c, &clk_list, list) {
+		cnt += clock_debug_print_clock(c, m);
+	}
+	mutex_unlock(&clk_list_lock);
+
+	if (cnt)
+		clock_debug_output(m, 0, "Enabled clock count: %d\n", cnt);
+	else
+		clock_debug_output(m, 0, "No clocks enabled.\n");
+}
+
+static int enabled_clocks_show(struct seq_file *m, void *unused)
+{
+	clock_debug_print_enabled_clocks(m);
+	return 0;
+}
+
+static int enabled_clocks_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, enabled_clocks_show, inode->i_private);
+}
+
+static const struct file_operations enabled_clocks_fops = {
+	.open		= enabled_clocks_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static int trace_clocks_show(struct seq_file *m, void *unused)
+{
+	struct clk *c;
+	int total_cnt = 0;
+
+	if (!mutex_trylock(&clk_list_lock)) {
+		pr_err("trace_clocks: Clocks are being registered. Cannot trace clock state now.\n");
+		return 1;
+	}
+	list_for_each_entry(c, &clk_list, list) {
+		int vlevel = 0;
+
+		if (c->num_fmax)
+			vlevel = find_vdd_level(c, c->rate);
+		trace_clock_state(c->dbg_name, c->prepare_count, c->count,
+					c->rate, vlevel);
+		total_cnt++;
+	}
+	mutex_unlock(&clk_list_lock);
+	clock_debug_output(m, 0, "Total clock count: %d\n", total_cnt);
+
+	return 0;
+}
+
+static int trace_clocks_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, trace_clocks_show, inode->i_private);
+}
+static const struct file_operations trace_clocks_fops = {
+	.open		= trace_clocks_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static int list_rates_show(struct seq_file *m, void *unused)
+{
+	struct clk *clock = m->private;
+	int level, i = 0;
+	unsigned long rate, fmax = 0;
+
+	/* Find max frequency supported within voltage constraints. */
+	if (!clock->vdd_class) {
+		fmax = ULONG_MAX;
+	} else {
+		for (level = 0; level < clock->num_fmax; level++)
+			if (clock->fmax[level])
+				fmax = clock->fmax[level];
+	}
+
+	/*
+	 * List supported frequencies <= fmax. Higher frequencies may appear in
+	 * the frequency table, but are not valid and should not be listed.
+	 */
+	while (!IS_ERR_VALUE(rate = clock->ops->list_rate(clock, i++))) {
+		if (rate <= fmax)
+			seq_printf(m, "%lu\n", rate);
+	}
+
+	return 0;
+}
+
+static int list_rates_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, list_rates_show, inode->i_private);
+}
+
+static const struct file_operations list_rates_fops = {
+	.open		= list_rates_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static ssize_t clock_parent_read(struct file *filp, char __user *ubuf,
+		size_t cnt, loff_t *ppos)
+{
+	struct clk *clock = filp->private_data;
+	struct clk *p = clock->parent;
+	char name[256] = {0};
+
+	snprintf(name, sizeof(name), "%s\n", p ? p->dbg_name : "None\n");
+
+	return simple_read_from_buffer(ubuf, cnt, ppos, name, strlen(name));
+}
+
+
+static ssize_t clock_parent_write(struct file *filp,
+		const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+	struct clk *clock = filp->private_data;
+	char buf[256];
+	char *cmp;
+	int ret;
+	struct clk *parent = NULL;
+
+	cnt = min(cnt, sizeof(buf) - 1);
+	if (copy_from_user(&buf, ubuf, cnt))
+		return -EFAULT;
+	buf[cnt] = '\0';
+	cmp = strstrip(buf);
+
+	mutex_lock(&clk_list_lock);
+	list_for_each_entry(parent, &clk_list, list) {
+		if (!strcmp(cmp, parent->dbg_name))
+			break;
+	}
+
+	if (&parent->list == &clk_list) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	mutex_unlock(&clk_list_lock);
+	ret = clk_set_parent(clock, parent);
+	if (ret)
+		return ret;
+
+	return cnt;
+err:
+	mutex_unlock(&clk_list_lock);
+	return ret;
+}
+
+
+static const struct file_operations clock_parent_fops = {
+	.open		= simple_open,
+	.read		= clock_parent_read,
+	.write		= clock_parent_write,
+};
+
+void clk_debug_print_hw(struct clk *clk, struct seq_file *f)
+{
+	void __iomem *base;
+	struct clk_register_data *regs;
+	u32 i, j, size;
+
+	if (IS_ERR_OR_NULL(clk))
+		return;
+
+	clk_debug_print_hw(clk->parent, f);
+
+	clock_debug_output(f, false, "%s\n", clk->dbg_name);
+
+	if (!clk->ops->list_registers)
+		return;
+
+	j = 0;
+	base = clk->ops->list_registers(clk, j, &regs, &size);
+	while (!IS_ERR(base)) {
+		for (i = 0; i < size; i++) {
+			u32 val = readl_relaxed(base + regs[i].offset);
+			clock_debug_output(f, false, "%20s: 0x%.8x\n",
+						regs[i].name, val);
+		}
+		j++;
+		base = clk->ops->list_registers(clk, j, &regs, &size);
+	}
+}
+
+static int print_hw_show(struct seq_file *m, void *unused)
+{
+	struct clk *c = m->private;
+	clk_debug_print_hw(c, m);
+
+	return 0;
+}
+
+static int print_hw_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, print_hw_show, inode->i_private);
+}
+
+static const struct file_operations clock_print_hw_fops = {
+	.open		= print_hw_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+
+static void clock_measure_add(struct clk *clock)
+{
+	if (IS_ERR_OR_NULL(measure))
+		return;
+
+	if (clk_set_parent(measure, clock))
+		return;
+
+	debugfs_create_file("measure", S_IRUGO, clock->clk_dir, clock,
+				&clock_measure_fops);
+}
+
+static int clock_debug_add(struct clk *clock)
+{
+	char temp[50], *ptr;
+	struct dentry *clk_dir;
+
+	if (!debugfs_base)
+		return -ENOMEM;
+
+	strlcpy(temp, clock->dbg_name, ARRAY_SIZE(temp));
+	for (ptr = temp; *ptr; ptr++)
+		*ptr = tolower(*ptr);
+
+	clk_dir = debugfs_create_dir(temp, debugfs_base);
+	if (!clk_dir)
+		return -ENOMEM;
+
+	clock->clk_dir = clk_dir;
+
+	if (!debugfs_create_file("rate", S_IRUGO | S_IWUSR, clk_dir,
+				clock, &clock_rate_fops))
+		goto error;
+
+	if (!debugfs_create_file("enable", S_IRUGO | S_IWUSR, clk_dir,
+				clock, &clock_enable_fops))
+		goto error;
+
+	if (!debugfs_create_file("is_local", S_IRUGO, clk_dir, clock,
+				&clock_local_fops))
+		goto error;
+
+	if (!debugfs_create_file("has_hw_gating", S_IRUGO, clk_dir, clock,
+				&clock_hwcg_fops))
+		goto error;
+
+	if (clock->ops->list_rate)
+		if (!debugfs_create_file("list_rates",
+				S_IRUGO, clk_dir, clock, &list_rates_fops))
+			goto error;
+
+	if (clock->vdd_class && !debugfs_create_file("fmax_rates",
+				S_IRUGO, clk_dir, clock, &fmax_rates_fops))
+			goto error;
+
+	if (!debugfs_create_file("parent", S_IRUGO, clk_dir, clock,
+				&clock_parent_fops))
+			goto error;
+
+	if (!debugfs_create_file("print", S_IRUGO, clk_dir, clock,
+				&clock_print_hw_fops))
+			goto error;
+
+	clock_measure_add(clock);
+
+	return 0;
+error:
+	debugfs_remove_recursive(clk_dir);
+	return -ENOMEM;
+}
+static DEFINE_MUTEX(clk_debug_lock);
+static int clk_debug_init_once;
+
+/**
+ * clock_debug_init() - Initialize clock debugfs
+ * Lock clk_debug_lock before invoking this function.
+ */
+static int clock_debug_init(void)
+{
+	if (clk_debug_init_once)
+		return 0;
+
+	clk_debug_init_once = 1;
+
+	debugfs_base = debugfs_create_dir("clk", NULL);
+	if (!debugfs_base)
+		return -ENOMEM;
+
+	if (!debugfs_create_u32("debug_suspend", S_IRUGO | S_IWUSR,
+				debugfs_base, &debug_suspend)) {
+		debugfs_remove_recursive(debugfs_base);
+		return -ENOMEM;
+	}
+
+	if (!debugfs_create_file("enabled_clocks", S_IRUGO, debugfs_base, NULL,
+				&enabled_clocks_fops))
+		return -ENOMEM;
+
+	if (!debugfs_create_file("orphan_list", S_IRUGO, debugfs_base, NULL,
+				&orphan_list_fops))
+		return -ENOMEM;
+
+	if (!debugfs_create_file("trace_clocks", S_IRUGO, debugfs_base, NULL,
+				&trace_clocks_fops))
+		return -ENOMEM;
+
+	return 0;
+}
+
+/**
+ * clock_debug_register() - Add additional clocks to clock debugfs hierarchy
+ * @list: List of clocks to create debugfs nodes for
+ */
+int clock_debug_register(struct clk *clk)
+{
+	int ret = 0;
+	struct clk *c;
+
+	mutex_lock(&clk_list_lock);
+	if (!list_empty(&clk->list))
+		goto out;
+
+	ret = clock_debug_init();
+	if (ret)
+		goto out;
+
+	if (IS_ERR_OR_NULL(measure)) {
+		if (clk->flags & CLKFLAG_MEASURE)
+			measure = clk;
+		if (!IS_ERR_OR_NULL(measure)) {
+			list_for_each_entry(c, &clk_list, list)
+				clock_measure_add(c);
+		}
+	}
+
+	list_add_tail(&clk->list, &clk_list);
+	clock_debug_add(clk);
+out:
+	mutex_unlock(&clk_list_lock);
+	return ret;
+}
+
+/*
+ * Print the names of enabled clocks and their parents if debug_suspend is set
+ */
+void clock_debug_print_enabled(void)
+{
+	if (likely(!debug_suspend))
+		return;
+
+	clock_debug_print_enabled_clocks(NULL);
+}
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./clock-dummy.c linux-4.4.115-fbx/drivers/clk/msm/clock-dummy.c
--- linux-4.4.115-fbx/drivers/clk/msm./clock-dummy.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/clock-dummy.c	2019-10-29 09:26:23.473201514 +0100
@@ -0,0 +1,121 @@
+/* Copyright (c) 2011,2013-2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <soc/qcom/msm-clock-controller.h>
+
+static int dummy_clk_reset(struct clk *clk, enum clk_reset_action action)
+{
+	return 0;
+}
+
+static int dummy_clk_set_rate(struct clk *clk, unsigned long rate)
+{
+	clk->rate = rate;
+	return 0;
+}
+
+static int dummy_clk_set_max_rate(struct clk *clk, unsigned long rate)
+{
+	return 0;
+}
+
+static int dummy_clk_set_flags(struct clk *clk, unsigned flags)
+{
+	return 0;
+}
+
+static unsigned long dummy_clk_get_rate(struct clk *clk)
+{
+	return clk->rate;
+}
+
+static long dummy_clk_round_rate(struct clk *clk, unsigned long rate)
+{
+	return rate;
+}
+
+struct clk_ops clk_ops_dummy = {
+	.reset = dummy_clk_reset,
+	.set_rate = dummy_clk_set_rate,
+	.set_max_rate = dummy_clk_set_max_rate,
+	.set_flags = dummy_clk_set_flags,
+	.get_rate = dummy_clk_get_rate,
+	.round_rate = dummy_clk_round_rate,
+};
+
+struct clk dummy_clk = {
+	.dbg_name = "dummy_clk",
+	.ops = &clk_ops_dummy,
+	CLK_INIT(dummy_clk),
+};
+
+static void *dummy_clk_dt_parser(struct device *dev, struct device_node *np)
+{
+	struct clk *c;
+
+	c = devm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
+	if (!c) {
+		dev_err(dev, "failed to map memory for %s\n", np->name);
+		return ERR_PTR(-ENOMEM);
+	}
+	c->ops = &clk_ops_dummy;
+
+	return msmclk_generic_clk_init(dev, np, c);
+}
+MSMCLK_PARSER(dummy_clk_dt_parser, "qcom,dummy-clk", 0);
+
+static struct clk *of_dummy_get(struct of_phandle_args *clkspec,
+				  void *data)
+{
+	u32 rate;
+
+	if (!of_property_read_u32(clkspec->np, "clock-frequency", &rate))
+		dummy_clk.rate = rate;
+
+	return &dummy_clk;
+}
+
+static struct of_device_id msm_clock_dummy_match_table[] = {
+	{ .compatible = "qcom,dummycc" },
+	{ .compatible = "fixed-clock" },
+	{}
+};
+
+static int msm_clock_dummy_probe(struct platform_device *pdev)
+{
+	int ret;
+
+	ret = of_clk_add_provider(pdev->dev.of_node, of_dummy_get, NULL);
+	if (ret)
+		return -ENOMEM;
+
+	dev_info(&pdev->dev, "Registered DUMMY provider.\n");
+	return ret;
+}
+
+static struct platform_driver msm_clock_dummy_driver = {
+	.probe = msm_clock_dummy_probe,
+	.driver = {
+		.name = "clock-dummy",
+		.of_match_table = msm_clock_dummy_match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+int __init msm_dummy_clk_init(void)
+{
+	return platform_driver_register(&msm_clock_dummy_driver);
+}
+arch_initcall(msm_dummy_clk_init);
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./clock-gcc-8998.c linux-4.4.115-fbx/drivers/clk/msm/clock-gcc-8998.c
--- linux-4.4.115-fbx/drivers/clk/msm./clock-gcc-8998.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/clock-gcc-8998.c	2019-01-22 16:16:23.015241987 +0100
@@ -0,0 +1,2877 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/clk/msm-clock-generic.h>
+
+#include <soc/qcom/clock-local2.h>
+#include <soc/qcom/clock-voter.h>
+#include <soc/qcom/clock-pll.h>
+#include <soc/qcom/clock-alpha-pll.h>
+#include <soc/qcom/rpm-smd.h>
+#include <soc/qcom/clock-rpm.h>
+
+#include <dt-bindings/clock/msm-clocks-8998.h>
+#include <dt-bindings/clock/msm-clocks-hwio-8998.h>
+
+#include "vdd-level-8998.h"
+#include "reset.h"
+
+static void __iomem *virt_base;
+static void __iomem *virt_dbgbase;
+
+#define cxo_clk_src_source_val 0
+#define cxo_clk_src_ao_source_val 0
+#define gpll0_out_main_source_val 1
+#define gpll0_ao_source_val 1
+#define gpll4_out_main_source_val 5
+#define gpll0_early_div_source_val 6
+
+#define FIXDIV(div) (div ? (2 * (div) - 1) : (0))
+
+#define F(f, s, div, m, n) \
+	{ \
+		.freq_hz = (f), \
+		.src_clk = &s.c, \
+		.m_val = (m), \
+		.n_val = ~((n)-(m)) * !!(n), \
+		.d_val = ~(n),\
+		.div_src_val = BVAL(4, 0, (int)FIXDIV(div)) \
+			| BVAL(10, 8, s##_source_val), \
+	}
+
+static DEFINE_VDD_REGULATORS(vdd_dig, VDD_DIG_NUM, 1, vdd_corner, NULL);
+static DEFINE_VDD_REGULATORS(vdd_dig_ao, VDD_DIG_NUM, 1, vdd_corner, NULL);
+
+DEFINE_CLK_RPM_SMD_BRANCH(cxo_clk_src, cxo_clk_src_ao, RPM_MISC_CLK_TYPE,
+			  CXO_CLK_SRC_ID, 19200000);
+DEFINE_CLK_RPM_SMD(bimc_clk, bimc_a_clk, RPM_MEM_CLK_TYPE, BIMC_CLK_ID, NULL);
+DEFINE_CLK_RPM_SMD(cnoc_clk, cnoc_a_clk, RPM_BUS_CLK_TYPE, CNOC_CLK_ID, NULL);
+DEFINE_CLK_RPM_SMD(snoc_clk, snoc_a_clk, RPM_BUS_CLK_TYPE, SNOC_CLK_ID, NULL);
+DEFINE_CLK_RPM_SMD(cnoc_periph_clk, cnoc_periph_a_clk, RPM_BUS_CLK_TYPE,
+			CNOC_PERIPH_CLK_ID, NULL);
+static DEFINE_CLK_VOTER(cnoc_periph_keepalive_a_clk, &cnoc_periph_a_clk.c,
+			LONG_MAX);
+static DEFINE_CLK_VOTER(bimc_msmbus_clk, &bimc_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(bimc_msmbus_a_clk, &bimc_a_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(cnoc_msmbus_clk, &cnoc_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(cnoc_msmbus_a_clk, &cnoc_a_clk.c, LONG_MAX);
+static DEFINE_CLK_BRANCH_VOTER(cxo_dwc3_clk, &cxo_clk_src.c);
+static DEFINE_CLK_BRANCH_VOTER(cxo_lpm_clk, &cxo_clk_src.c);
+static DEFINE_CLK_BRANCH_VOTER(cxo_otg_clk, &cxo_clk_src.c);
+static DEFINE_CLK_BRANCH_VOTER(cxo_pil_lpass_clk, &cxo_clk_src.c);
+static DEFINE_CLK_BRANCH_VOTER(cxo_pil_ssc_clk, &cxo_clk_src.c);
+static DEFINE_CLK_BRANCH_VOTER(cxo_pil_spss_clk, &cxo_clk_src.c);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(div_clk1, div_clk1_ao, DIV_CLK1_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(div_clk2, div_clk2_ao, DIV_CLK2_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(div_clk3, div_clk3_ao, DIV_CLK3_ID);
+DEFINE_CLK_RPM_SMD(ipa_clk, ipa_a_clk, RPM_IPA_CLK_TYPE,
+		   IPA_CLK_ID, NULL);
+DEFINE_CLK_RPM_SMD(ce1_clk, ce1_a_clk, RPM_CE_CLK_TYPE,
+		   CE1_CLK_ID, NULL);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(ln_bb_clk1, ln_bb_clk1_ao, LN_BB_CLK1_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(ln_bb_clk1_pin, ln_bb_clk1_pin_ao,
+				     LN_BB_CLK1_PIN_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(ln_bb_clk2, ln_bb_clk2_ao, LN_BB_CLK2_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(ln_bb_clk2_pin, ln_bb_clk2_pin_ao,
+				     LN_BB_CLK2_PIN_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(ln_bb_clk3, ln_bb_clk3_ao, LN_BB_CLK3_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(ln_bb_clk3_pin, ln_bb_clk3_pin_ao,
+				     LN_BB_CLK3_PIN_ID);
+static DEFINE_CLK_VOTER(mcd_ce1_clk, &ce1_clk.c, 85710000);
+DEFINE_CLK_DUMMY(measure_only_bimc_hmss_axi_clk, 0);
+DEFINE_CLK_RPM_SMD(mmssnoc_axi_clk, mmssnoc_axi_a_clk,
+			RPM_MMAXI_CLK_TYPE, MMSSNOC_AXI_CLK_ID, NULL);
+DEFINE_CLK_RPM_SMD(aggre1_noc_clk, aggre1_noc_a_clk, RPM_AGGR_CLK_TYPE,
+				AGGR1_NOC_ID, NULL);
+DEFINE_CLK_RPM_SMD(aggre2_noc_clk, aggre2_noc_a_clk, RPM_AGGR_CLK_TYPE,
+				AGGR2_NOC_ID, NULL);
+static DEFINE_CLK_VOTER(qcedev_ce1_clk, &ce1_clk.c, 85710000);
+static DEFINE_CLK_VOTER(qcrypto_ce1_clk, &ce1_clk.c, 85710000);
+DEFINE_CLK_RPM_SMD_QDSS(qdss_clk, qdss_a_clk, RPM_MISC_CLK_TYPE,
+			QDSS_CLK_ID);
+static DEFINE_CLK_VOTER(qseecom_ce1_clk, &ce1_clk.c, 85710000);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(rf_clk1, rf_clk1_ao, RF_CLK1_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(rf_clk1_pin, rf_clk1_pin_ao,
+				     RF_CLK1_PIN_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(rf_clk2, rf_clk2_ao, RF_CLK2_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(rf_clk2_pin, rf_clk2_pin_ao,
+				     RF_CLK2_PIN_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(rf_clk3, rf_clk3_ao, RF_CLK3_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(rf_clk3_pin, rf_clk3_pin_ao,
+				     RF_CLK3_PIN_ID);
+static DEFINE_CLK_VOTER(scm_ce1_clk, &ce1_clk.c, 85710000);
+static DEFINE_CLK_VOTER(snoc_msmbus_clk, &snoc_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(snoc_msmbus_a_clk, &snoc_a_clk.c, LONG_MAX);
+DEFINE_CLK_DUMMY(gcc_ce1_ahb_m_clk, 0);
+DEFINE_CLK_DUMMY(gcc_ce1_axi_m_clk, 0);
+
+DEFINE_EXT_CLK(debug_mmss_clk, NULL);
+DEFINE_EXT_CLK(gpu_gcc_debug_clk, NULL);
+DEFINE_EXT_CLK(gfx_gcc_debug_clk, NULL);
+DEFINE_EXT_CLK(debug_cpu_clk, NULL);
+
+static unsigned int soft_vote_gpll0;
+
+static struct pll_vote_clk gpll0 = {
+	.en_reg = (void __iomem *)GCC_APCS_GPLL_ENA_VOTE,
+	.en_mask = BIT(0),
+	.status_reg = (void __iomem *)GCC_GPLL0_MODE,
+	.status_mask = BIT(31),
+	.soft_vote = &soft_vote_gpll0,
+	.soft_vote_mask = PLL_SOFT_VOTE_PRIMARY,
+	.base = &virt_base,
+	.c = {
+		.rate = 600000000,
+		.parent = &cxo_clk_src.c,
+		.dbg_name = "gpll0",
+		.ops = &clk_ops_pll_acpu_vote,
+		CLK_INIT(gpll0.c),
+	},
+};
+
+static struct pll_vote_clk gpll0_ao = {
+	.en_reg = (void __iomem *)GCC_APCS_GPLL_ENA_VOTE,
+	.en_mask = BIT(0),
+	.status_reg = (void __iomem *)GCC_GPLL0_MODE,
+	.status_mask = BIT(31),
+	.soft_vote = &soft_vote_gpll0,
+	.soft_vote_mask = PLL_SOFT_VOTE_ACPU,
+	.base = &virt_base,
+	.c = {
+		.rate = 600000000,
+		.parent = &cxo_clk_src_ao.c,
+		.dbg_name = "gpll0_ao",
+		.ops = &clk_ops_pll_acpu_vote,
+		CLK_INIT(gpll0_ao.c),
+	},
+};
+
+DEFINE_EXT_CLK(gpll0_out_main, &gpll0.c);
+DEFINE_EXT_CLK(gpll0_early_div, &gpll0.c);
+
+static struct local_vote_clk gcc_mmss_gpll0_clk = {
+	.cbcr_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE_1,
+	.vote_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE_1,
+	.en_mask = BIT(1),
+	.base = &virt_base,
+	.halt_check = DELAY,
+	.c = {
+		.dbg_name = "gcc_mmss_gpll0_clk",
+		.parent = &gpll0.c,
+		.ops = &clk_ops_vote,
+		CLK_INIT(gcc_mmss_gpll0_clk.c),
+	},
+};
+
+static struct local_vote_clk gcc_mmss_gpll0_div_clk = {
+	.cbcr_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE_1,
+	.vote_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE_1,
+	.en_mask = BIT(0),
+	.base = &virt_base,
+	.halt_check = DELAY,
+	.c = {
+		.dbg_name = "gcc_mmss_gpll0_div_clk",
+		.parent = &gpll0.c,
+		.ops = &clk_ops_vote,
+		CLK_INIT(gcc_mmss_gpll0_div_clk.c),
+	},
+};
+
+static struct local_vote_clk gcc_gpu_gpll0_clk = {
+	.cbcr_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE_1,
+	.vote_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE_1,
+	.en_mask = BIT(4),
+	.base = &virt_base,
+	.halt_check = DELAY,
+	.c = {
+		.dbg_name = "gcc_gpu_gpll0_clk",
+		.parent = &gpll0.c,
+		.ops = &clk_ops_vote,
+		CLK_INIT(gcc_gpu_gpll0_clk.c),
+	},
+};
+
+static struct local_vote_clk gcc_gpu_gpll0_div_clk = {
+	.cbcr_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE_1,
+	.vote_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE_1,
+	.en_mask = BIT(3),
+	.base = &virt_base,
+	.halt_check = DELAY,
+	.c = {
+		.dbg_name = "gcc_gpu_gpll0_div_clk",
+		.parent = &gpll0.c,
+		.ops = &clk_ops_vote,
+		CLK_INIT(gcc_gpu_gpll0_div_clk.c),
+	},
+};
+
+static struct pll_vote_clk gpll4 = {
+	.en_reg = (void __iomem *)GCC_APCS_GPLL_ENA_VOTE,
+	.en_mask = BIT(4),
+	.status_reg = (void __iomem *)GCC_GPLL4_MODE,
+	.status_mask = BIT(31),
+	.base = &virt_base,
+	.c = {
+		.rate = 384000000,
+		.parent = &cxo_clk_src.c,
+		.dbg_name = "gpll4",
+		.ops = &clk_ops_pll_vote,
+		VDD_DIG_FMAX_MAP3(LOWER, 400000000, LOW, 800000000,
+					NOMINAL, 1600000000),
+		CLK_INIT(gpll4.c),
+	},
+};
+DEFINE_EXT_CLK(gpll4_out_main, &gpll4.c);
+
+static struct clk_freq_tbl ftbl_usb30_master_clk_src[] = {
+	F(  19200000,    cxo_clk_src,    1,    0,     0),
+	F( 60000000, gpll0_out_main,    10,    0,     0),
+	F( 120000000, gpll0_out_main,    5,    0,     0),
+	F( 150000000, gpll0_out_main,    4,    0,     0),
+	F_END
+};
+
+static struct rcg_clk usb30_master_clk_src = {
+	.cmd_rcgr_reg = GCC_USB30_MASTER_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_usb30_master_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "usb30_master_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP4(LOWER, 66670000, LOW, 133330000,
+				NOMINAL, 200000000, HIGH, 240000000),
+		CLK_INIT(usb30_master_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_pcie_aux_clk_src[] = {
+	F(  19200000,    cxo_clk_src,    1,    0,     0),
+	F_END
+};
+
+static struct rcg_clk pcie_aux_clk_src = {
+	.cmd_rcgr_reg = GCC_PCIE_AUX_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_pcie_aux_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "pcie_aux_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP2(LOWER, 9600000, LOW, 19200000),
+		CLK_INIT(pcie_aux_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_ufs_axi_clk_src[] = {
+	F(  50000000, gpll0_out_main,   12,    0,     0),
+	F( 100000000, gpll0_out_main,    6,    0,     0),
+	F( 200000000, gpll0_out_main,    3,    0,     0),
+	F( 240000000, gpll0_out_main,  2.5,    0,     0),
+	F_END
+};
+
+static struct rcg_clk ufs_axi_clk_src = {
+	.cmd_rcgr_reg = GCC_UFS_AXI_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_ufs_axi_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "ufs_axi_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP4(LOWER, 50000000, LOW, 100000000,
+				NOMINAL, 200000000, HIGH, 240000000),
+		CLK_INIT(ufs_axi_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_blsp_i2c_apps_clk_src[] = {
+	F(  19200000,    cxo_clk_src,    1,    0,     0),
+	F(  50000000, gpll0_out_main,   12,    0,     0),
+	F_END
+};
+
+static struct rcg_clk blsp1_qup1_i2c_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_BLSP1_QUP1_I2C_APPS_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "blsp1_qup1_i2c_apps_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+		CLK_INIT(blsp1_qup1_i2c_apps_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_blsp_qup_spi_apps_clk_src[] = {
+	F(    960000,    cxo_clk_src,   10,    1,     2),
+	F(   4800000,    cxo_clk_src,    4,    0,     0),
+	F(   9600000,    cxo_clk_src,    2,    0,     0),
+	F(  15000000, gpll0_early_div,   5,    1,     4),
+	F(  19200000,    cxo_clk_src,    1,    0,     0),
+	F(  25000000, gpll0_out_main,   12,    1,     2),
+	F(  50000000, gpll0_out_main,   12,    0,     0),
+	F_END
+};
+
+static struct rcg_clk blsp1_qup1_spi_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_BLSP1_QUP1_SPI_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_blsp_qup_spi_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "blsp1_qup1_spi_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+					NOMINAL, 50000000),
+		CLK_INIT(blsp1_qup1_spi_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp1_qup2_i2c_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_BLSP1_QUP2_I2C_APPS_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "blsp1_qup2_i2c_apps_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+		CLK_INIT(blsp1_qup2_i2c_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp1_qup2_spi_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_BLSP1_QUP2_SPI_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_blsp_qup_spi_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "blsp1_qup2_spi_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+					NOMINAL, 50000000),
+		CLK_INIT(blsp1_qup2_spi_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp1_qup3_i2c_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_BLSP1_QUP3_I2C_APPS_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "blsp1_qup3_i2c_apps_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+		CLK_INIT(blsp1_qup3_i2c_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp1_qup3_spi_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_BLSP1_QUP3_SPI_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_blsp_qup_spi_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "blsp1_qup3_spi_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+					NOMINAL, 50000000),
+		CLK_INIT(blsp1_qup3_spi_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp1_qup4_i2c_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_BLSP1_QUP4_I2C_APPS_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "blsp1_qup4_i2c_apps_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+		CLK_INIT(blsp1_qup4_i2c_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp1_qup4_spi_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_BLSP1_QUP4_SPI_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_blsp_qup_spi_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "blsp1_qup4_spi_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+					NOMINAL, 50000000),
+		CLK_INIT(blsp1_qup4_spi_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp1_qup5_i2c_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_BLSP1_QUP5_I2C_APPS_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "blsp1_qup5_i2c_apps_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+		CLK_INIT(blsp1_qup5_i2c_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp1_qup5_spi_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_BLSP1_QUP5_SPI_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_blsp_qup_spi_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "blsp1_qup5_spi_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+					NOMINAL, 50000000),
+		CLK_INIT(blsp1_qup5_spi_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp1_qup6_i2c_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_BLSP1_QUP6_I2C_APPS_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "blsp1_qup6_i2c_apps_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+		CLK_INIT(blsp1_qup6_i2c_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp1_qup6_spi_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_BLSP1_QUP6_SPI_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_blsp_qup_spi_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "blsp1_qup6_spi_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+					NOMINAL, 50000000),
+		CLK_INIT(blsp1_qup6_spi_apps_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_blsp_uart_apps_clk_src[] = {
+	F(   3686400, gpll0_early_div,   1,  192, 15625),
+	F(   7372800, gpll0_early_div,   1,  384, 15625),
+	F(  14745600, gpll0_early_div,   1,  768, 15625),
+	F(  16000000, gpll0_early_div,   1,    4,    75),
+	F(  19200000,    cxo_clk_src,    1,    0,     0),
+	F(  24000000, gpll0_out_main,    5,    1,     5),
+	F(  32000000, gpll0_out_main,    1,    4,    75),
+	F(  40000000, gpll0_out_main,   15,    0,     0),
+	F(  46400000, gpll0_out_main,    1,   29,   375),
+	F(  48000000, gpll0_out_main, 12.5,    0,     0),
+	F(  51200000, gpll0_out_main,    1,   32,   375),
+	F(  56000000, gpll0_out_main,    1,    7,    75),
+	F(  58982400, gpll0_out_main,    1, 1536, 15625),
+	F(  60000000, gpll0_out_main,   10,    0,     0),
+	F(  63157895, gpll0_out_main,  9.5,    0,     0),
+	F_END
+};
+
+static struct rcg_clk blsp1_uart1_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_BLSP1_UART1_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_blsp_uart_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "blsp1_uart1_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 31580000,
+					NOMINAL, 63160000),
+		CLK_INIT(blsp1_uart1_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp1_uart2_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_BLSP1_UART2_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_blsp_uart_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "blsp1_uart2_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 31580000,
+					NOMINAL, 63160000),
+		CLK_INIT(blsp1_uart2_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp1_uart3_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_BLSP1_UART3_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_blsp_uart_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "blsp1_uart3_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 31580000,
+					NOMINAL, 63160000),
+		CLK_INIT(blsp1_uart3_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp2_qup1_i2c_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_BLSP2_QUP1_I2C_APPS_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "blsp2_qup1_i2c_apps_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+		CLK_INIT(blsp2_qup1_i2c_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp2_qup1_spi_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_BLSP2_QUP1_SPI_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_blsp_qup_spi_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "blsp2_qup1_spi_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+					NOMINAL, 50000000),
+		CLK_INIT(blsp2_qup1_spi_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp2_qup2_i2c_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_BLSP2_QUP2_I2C_APPS_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "blsp2_qup2_i2c_apps_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+		CLK_INIT(blsp2_qup2_i2c_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp2_qup2_spi_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_BLSP2_QUP2_SPI_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_blsp_qup_spi_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "blsp2_qup2_spi_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+					NOMINAL, 50000000),
+		CLK_INIT(blsp2_qup2_spi_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp2_qup3_i2c_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_BLSP2_QUP3_I2C_APPS_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "blsp2_qup3_i2c_apps_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+		CLK_INIT(blsp2_qup3_i2c_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp2_qup3_spi_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_BLSP2_QUP3_SPI_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_blsp_qup_spi_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "blsp2_qup3_spi_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+					NOMINAL, 50000000),
+		CLK_INIT(blsp2_qup3_spi_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp2_qup4_i2c_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_BLSP2_QUP4_I2C_APPS_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "blsp2_qup4_i2c_apps_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+		CLK_INIT(blsp2_qup4_i2c_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp2_qup4_spi_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_BLSP2_QUP4_SPI_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_blsp_qup_spi_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "blsp2_qup4_spi_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+					NOMINAL, 50000000),
+		CLK_INIT(blsp2_qup4_spi_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp2_qup5_i2c_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_BLSP2_QUP5_I2C_APPS_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "blsp2_qup5_i2c_apps_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+		CLK_INIT(blsp2_qup5_i2c_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp2_qup5_spi_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_BLSP2_QUP5_SPI_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_blsp_qup_spi_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "blsp2_qup5_spi_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+					NOMINAL, 50000000),
+		CLK_INIT(blsp2_qup5_spi_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp2_qup6_i2c_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_BLSP2_QUP6_I2C_APPS_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "blsp2_qup6_i2c_apps_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+		CLK_INIT(blsp2_qup6_i2c_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp2_qup6_spi_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_BLSP2_QUP6_SPI_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_blsp_qup_spi_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "blsp2_qup6_spi_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+					NOMINAL, 50000000),
+		CLK_INIT(blsp2_qup6_spi_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp2_uart1_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_BLSP2_UART1_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_blsp_uart_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "blsp2_uart1_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 31580000,
+					NOMINAL, 63160000),
+		CLK_INIT(blsp2_uart1_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp2_uart2_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_BLSP2_UART2_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_blsp_uart_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "blsp2_uart2_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 31580000,
+					NOMINAL, 63160000),
+		CLK_INIT(blsp2_uart2_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp2_uart3_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_BLSP2_UART3_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_blsp_uart_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "blsp2_uart3_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOWER, 19200000,  LOW, 31580000,
+					NOMINAL, 63160000),
+		CLK_INIT(blsp2_uart3_apps_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_gp_clk_src[] = {
+	F(  19200000,    cxo_clk_src,    1,    0,     0),
+	F( 100000000, gpll0_out_main,    6,    0,     0),
+	F( 200000000, gpll0_out_main,    3,    0,     0),
+	F_END
+};
+
+static struct rcg_clk gp1_clk_src = {
+	.cmd_rcgr_reg = GCC_GP1_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_gp_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gp1_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOWER, 50000000, LOW, 100000000,
+					NOMINAL, 200000000),
+		CLK_INIT(gp1_clk_src.c),
+	},
+};
+
+static struct rcg_clk gp2_clk_src = {
+	.cmd_rcgr_reg = GCC_GP2_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_gp_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gp2_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOWER, 50000000, LOW, 100000000,
+					NOMINAL, 200000000),
+		CLK_INIT(gp2_clk_src.c),
+	},
+};
+
+static struct rcg_clk gp3_clk_src = {
+	.cmd_rcgr_reg = GCC_GP3_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_gp_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gp3_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOWER, 50000000, LOW, 100000000,
+					NOMINAL, 200000000),
+		CLK_INIT(gp3_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_hmss_rbcpr_clk_src[] = {
+	F(  19200000,    cxo_clk_src_ao,    1,    0,     0),
+	F_END
+};
+
+static struct rcg_clk hmss_rbcpr_clk_src = {
+	.cmd_rcgr_reg = GCC_HMSS_RBCPR_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_hmss_rbcpr_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "hmss_rbcpr_clk_src",
+		.ops = &clk_ops_rcg,
+		CLK_INIT(hmss_rbcpr_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_pdm2_clk_src[] = {
+	F(  60000000, gpll0_out_main,   10,    0,     0),
+	F_END
+};
+
+static struct rcg_clk pdm2_clk_src = {
+	.cmd_rcgr_reg = GCC_PDM2_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_pdm2_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "pdm2_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 60000000),
+		CLK_INIT(pdm2_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_sdcc2_apps_clk_src[] = {
+	F(    144000,    cxo_clk_src,   16,    3,    25),
+	F(    400000,    cxo_clk_src,   12,    1,     4),
+	F(  20000000, gpll0_out_main,   15,    1,     2),
+	F(  25000000, gpll0_out_main,   12,    1,     2),
+	F(  50000000, gpll0_out_main,   12,    0,     0),
+	F( 100000000, gpll0_out_main,    6,    0,     0),
+	F( 200000000, gpll0_out_main,    3,    0,     0),
+	F_END
+};
+
+static struct rcg_clk sdcc2_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_SDCC2_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_sdcc2_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "sdcc2_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 100000000,
+					NOMINAL, 200000000),
+		CLK_INIT(sdcc2_apps_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_sdcc4_apps_clk_src[] = {
+	F(    144000,    cxo_clk_src,   16,    3,    25),
+	F(    400000,    cxo_clk_src,   12,    1,     4),
+	F(  20000000, gpll0_out_main,   15,    1,     2),
+	F(  25000000, gpll0_out_main,   12,    1,     2),
+	F(  50000000, gpll0_out_main,   12,    0,     0),
+	F( 100000000, gpll0_out_main,    6,    0,     0),
+	F_END
+};
+
+static struct rcg_clk sdcc4_apps_clk_src = {
+	.cmd_rcgr_reg = GCC_SDCC4_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_sdcc4_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "sdcc4_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 50000000,
+					NOMINAL, 100000000),
+		CLK_INIT(sdcc4_apps_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_tsif_ref_clk_src[] = {
+	F(    105495,    cxo_clk_src,    1,    1,   182),
+	F_END
+};
+
+static struct rcg_clk tsif_ref_clk_src = {
+	.cmd_rcgr_reg = GCC_TSIF_REF_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_tsif_ref_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "tsif_ref_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP1(LOWER, 105500),
+		CLK_INIT(tsif_ref_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_ufs_ice_core_clk_src[] = {
+	F(     75000000,  gpll0_out_main,    8,    0,   0),
+	F(    150000000,  gpll0_out_main,    4,    0,   0),
+	F(    300000000,  gpll0_out_main,    2,    0,   0),
+	F_END
+};
+
+static struct rcg_clk ufs_ice_core_clk_src = {
+	.cmd_rcgr_reg = GCC_UFS_ICE_CORE_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_ufs_ice_core_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "ufs_ice_core_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP3(LOWER, 75000000, LOW, 150000000,
+					NOMINAL, 300000000),
+		CLK_INIT(ufs_ice_core_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_ufs_phy_aux_clk_src[] = {
+	F(  19200000,    cxo_clk_src,    1,    0,     0),
+	F_END
+};
+
+static struct rcg_clk ufs_phy_aux_clk_src = {
+	.cmd_rcgr_reg = GCC_UFS_PHY_AUX_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_ufs_phy_aux_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "ufs_phy_aux_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP1(LOWER, 19200000),
+		CLK_INIT(ufs_phy_aux_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_ufs_unipro_core_clk_src[] = {
+	F(  37500000, gpll0_out_main,   16,    0,     0),
+	F(  75000000, gpll0_out_main,    8,    0,     0),
+	F( 150000000, gpll0_out_main,    4,    0,     0),
+	F_END
+};
+
+static struct rcg_clk ufs_unipro_core_clk_src = {
+	.cmd_rcgr_reg = GCC_UFS_UNIPRO_CORE_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_ufs_unipro_core_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "ufs_unipro_core_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP3(LOWER, 37500000, LOW, 75000000,
+					NOMINAL, 150000000),
+		CLK_INIT(ufs_unipro_core_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_usb30_mock_utmi_clk_src[] = {
+	F(  19200000,    cxo_clk_src,    1,    0,     0),
+	F_END
+};
+
+static struct rcg_clk usb30_mock_utmi_clk_src = {
+	.cmd_rcgr_reg = GCC_USB30_MOCK_UTMI_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_usb30_mock_utmi_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "usb30_mock_utmi_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP2(LOWER, 40000000, LOW, 60000000),
+		CLK_INIT(usb30_mock_utmi_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_usb3_phy_aux_clk_src[] = {
+	F(   1200000,    cxo_clk_src,   16,    0,     0),
+	F_END
+};
+
+static struct rcg_clk usb3_phy_aux_clk_src = {
+	.cmd_rcgr_reg = GCC_USB3_PHY_AUX_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_usb3_phy_aux_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "usb3_phy_aux_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP1(LOWER, 19200000),
+		CLK_INIT(usb3_phy_aux_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_hmss_gpll0_clk_src[] = {
+	F( 300000000,  gpll0_ao,    2,    0,     0),
+	F_END
+};
+
+static struct rcg_clk hmss_gpll0_clk_src = {
+	.cmd_rcgr_reg = GCC_HMSS_GPLL0_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_hmss_gpll0_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "hmss_gpll0_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP1_AO(LOWER, 600000000),
+		CLK_INIT(hmss_gpll0_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_qspi_ref_clk_src[] = {
+	F(  75000000,  gpll0_out_main,    8,    0,     0),
+	F( 150000000,  gpll0_out_main,    4,    0,     0),
+	F( 256000000,  gpll4_out_main,  1.5,    0,     0),
+	F( 300000000,  gpll0_out_main,    2,    0,     0),
+	F_END
+};
+
+static struct rcg_clk qspi_ref_clk_src = {
+	.cmd_rcgr_reg = GCC_QSPI_REF_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_qspi_ref_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "qspi_ref_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP3(LOWER, 40000000, LOW, 160400000,
+							NOMINAL, 320800000),
+		CLK_INIT(qspi_ref_clk_src.c),
+	},
+};
+
+static struct branch_clk gcc_hdmi_clkref_clk = {
+	.cbcr_reg = GCC_HDMI_CLKREF_EN,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_hdmi_clkref_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_hdmi_clkref_clk.c),
+	},
+};
+
+static struct branch_clk gcc_pcie_clkref_clk = {
+	.cbcr_reg = GCC_PCIE_CLKREF_EN,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_pcie_clkref_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_pcie_clkref_clk.c),
+	},
+};
+
+static struct branch_clk gcc_rx1_usb2_clkref_clk = {
+	.cbcr_reg = GCC_RX1_USB2_CLKREF_EN,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_rx1_usb2_clkref_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_rx1_usb2_clkref_clk.c),
+	},
+};
+
+static struct branch_clk gcc_ufs_clkref_clk = {
+	.cbcr_reg = GCC_UFS_CLKREF_EN,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_ufs_clkref_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_ufs_clkref_clk.c),
+	},
+};
+
+static struct branch_clk gcc_usb3_clkref_clk = {
+	.cbcr_reg = GCC_USB3_CLKREF_EN,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_usb3_clkref_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_usb3_clkref_clk.c),
+	},
+};
+
+static struct reset_clk gcc_usb3_phy_reset = {
+	.reset_reg = GCC_USB3_PHY_BCR,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_usb3_phy_reset",
+		.ops = &clk_ops_rst,
+		CLK_INIT(gcc_usb3_phy_reset.c),
+	},
+};
+
+static struct reset_clk gcc_usb3phy_phy_reset = {
+	.reset_reg = GCC_USB3PHY_PHY_BCR,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_usb3phy_phy_reset",
+		.ops = &clk_ops_rst,
+		CLK_INIT(gcc_usb3phy_phy_reset.c),
+	},
+};
+
+static struct gate_clk gpll0_out_msscc = {
+	.en_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE_1,
+	.en_mask = BIT(2),
+	.delay_us = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gpll0_out_msscc",
+		.ops = &clk_ops_gate,
+		CLK_INIT(gpll0_out_msscc.c),
+	},
+};
+
+static struct branch_clk gcc_aggre1_ufs_axi_clk = {
+	.cbcr_reg = GCC_AGGRE1_UFS_AXI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_aggre1_ufs_axi_clk",
+		.parent = &ufs_axi_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_aggre1_ufs_axi_clk.c),
+	},
+};
+
+static struct hw_ctl_clk gcc_aggre1_ufs_axi_hw_ctl_clk = {
+	.cbcr_reg = GCC_AGGRE1_UFS_AXI_CBCR,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_aggre1_ufs_axi_hw_ctl_clk",
+		.parent = &gcc_aggre1_ufs_axi_clk.c,
+		.ops = &clk_ops_branch_hw_ctl,
+		CLK_INIT(gcc_aggre1_ufs_axi_hw_ctl_clk.c),
+	},
+};
+
+static struct branch_clk gcc_aggre1_usb3_axi_clk = {
+	.cbcr_reg = GCC_AGGRE1_USB3_AXI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_aggre1_usb3_axi_clk",
+		.parent = &usb30_master_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_aggre1_usb3_axi_clk.c),
+	},
+};
+
+static struct branch_clk gcc_bimc_mss_q6_axi_clk = {
+	.cbcr_reg = GCC_BIMC_MSS_Q6_AXI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_bimc_mss_q6_axi_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_bimc_mss_q6_axi_clk.c),
+	},
+};
+
+static struct local_vote_clk gcc_blsp1_ahb_clk = {
+	.cbcr_reg = GCC_BLSP1_AHB_CBCR,
+	.bcr_reg = GCC_BLSP1_BCR,
+	.vote_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE,
+	.en_mask = BIT(17),
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp1_ahb_clk",
+		.ops = &clk_ops_vote,
+		CLK_INIT(gcc_blsp1_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp1_qup1_i2c_apps_clk = {
+	.cbcr_reg = GCC_BLSP1_QUP1_I2C_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp1_qup1_i2c_apps_clk",
+		.parent = &blsp1_qup1_i2c_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp1_qup1_i2c_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp1_qup1_spi_apps_clk = {
+	.cbcr_reg = GCC_BLSP1_QUP1_SPI_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp1_qup1_spi_apps_clk",
+		.parent = &blsp1_qup1_spi_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp1_qup1_spi_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp1_qup2_i2c_apps_clk = {
+	.cbcr_reg = GCC_BLSP1_QUP2_I2C_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp1_qup2_i2c_apps_clk",
+		.parent = &blsp1_qup2_i2c_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp1_qup2_i2c_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp1_qup2_spi_apps_clk = {
+	.cbcr_reg = GCC_BLSP1_QUP2_SPI_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp1_qup2_spi_apps_clk",
+		.parent = &blsp1_qup2_spi_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp1_qup2_spi_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp1_qup3_i2c_apps_clk = {
+	.cbcr_reg = GCC_BLSP1_QUP3_I2C_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp1_qup3_i2c_apps_clk",
+		.parent = &blsp1_qup3_i2c_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp1_qup3_i2c_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp1_qup3_spi_apps_clk = {
+	.cbcr_reg = GCC_BLSP1_QUP3_SPI_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp1_qup3_spi_apps_clk",
+		.parent = &blsp1_qup3_spi_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp1_qup3_spi_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp1_qup4_i2c_apps_clk = {
+	.cbcr_reg = GCC_BLSP1_QUP4_I2C_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp1_qup4_i2c_apps_clk",
+		.parent = &blsp1_qup4_i2c_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp1_qup4_i2c_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp1_qup4_spi_apps_clk = {
+	.cbcr_reg = GCC_BLSP1_QUP4_SPI_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp1_qup4_spi_apps_clk",
+		.parent = &blsp1_qup4_spi_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp1_qup4_spi_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp1_qup5_i2c_apps_clk = {
+	.cbcr_reg = GCC_BLSP1_QUP5_I2C_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp1_qup5_i2c_apps_clk",
+		.parent = &blsp1_qup5_i2c_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp1_qup5_i2c_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp1_qup5_spi_apps_clk = {
+	.cbcr_reg = GCC_BLSP1_QUP5_SPI_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp1_qup5_spi_apps_clk",
+		.parent = &blsp1_qup5_spi_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp1_qup5_spi_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp1_qup6_i2c_apps_clk = {
+	.cbcr_reg = GCC_BLSP1_QUP6_I2C_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp1_qup6_i2c_apps_clk",
+		.parent = &blsp1_qup6_i2c_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp1_qup6_i2c_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp1_qup6_spi_apps_clk = {
+	.cbcr_reg = GCC_BLSP1_QUP6_SPI_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp1_qup6_spi_apps_clk",
+		.parent = &blsp1_qup6_spi_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp1_qup6_spi_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp1_uart1_apps_clk = {
+	.cbcr_reg = GCC_BLSP1_UART1_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp1_uart1_apps_clk",
+		.parent = &blsp1_uart1_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp1_uart1_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp1_uart2_apps_clk = {
+	.cbcr_reg = GCC_BLSP1_UART2_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp1_uart2_apps_clk",
+		.parent = &blsp1_uart2_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp1_uart2_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp1_uart3_apps_clk = {
+	.cbcr_reg = GCC_BLSP1_UART3_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp1_uart3_apps_clk",
+		.parent = &blsp1_uart3_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp1_uart3_apps_clk.c),
+	},
+};
+
+static struct local_vote_clk gcc_blsp2_ahb_clk = {
+	.cbcr_reg = GCC_BLSP2_AHB_CBCR,
+	.bcr_reg = GCC_BLSP2_BCR,
+	.vote_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE,
+	.en_mask = BIT(15),
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp2_ahb_clk",
+		.ops = &clk_ops_vote,
+		CLK_INIT(gcc_blsp2_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp2_qup1_i2c_apps_clk = {
+	.cbcr_reg = GCC_BLSP2_QUP1_I2C_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp2_qup1_i2c_apps_clk",
+		.parent = &blsp2_qup1_i2c_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp2_qup1_i2c_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp2_qup1_spi_apps_clk = {
+	.cbcr_reg = GCC_BLSP2_QUP1_SPI_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp2_qup1_spi_apps_clk",
+		.parent = &blsp2_qup1_spi_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp2_qup1_spi_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp2_qup2_i2c_apps_clk = {
+	.cbcr_reg = GCC_BLSP2_QUP2_I2C_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp2_qup2_i2c_apps_clk",
+		.parent = &blsp2_qup2_i2c_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp2_qup2_i2c_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp2_qup2_spi_apps_clk = {
+	.cbcr_reg = GCC_BLSP2_QUP2_SPI_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp2_qup2_spi_apps_clk",
+		.parent = &blsp2_qup2_spi_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp2_qup2_spi_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp2_qup3_i2c_apps_clk = {
+	.cbcr_reg = GCC_BLSP2_QUP3_I2C_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp2_qup3_i2c_apps_clk",
+		.parent = &blsp2_qup3_i2c_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp2_qup3_i2c_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp2_qup3_spi_apps_clk = {
+	.cbcr_reg = GCC_BLSP2_QUP3_SPI_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp2_qup3_spi_apps_clk",
+		.parent = &blsp2_qup3_spi_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp2_qup3_spi_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp2_qup4_i2c_apps_clk = {
+	.cbcr_reg = GCC_BLSP2_QUP4_I2C_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp2_qup4_i2c_apps_clk",
+		.parent = &blsp2_qup4_i2c_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp2_qup4_i2c_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp2_qup4_spi_apps_clk = {
+	.cbcr_reg = GCC_BLSP2_QUP4_SPI_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp2_qup4_spi_apps_clk",
+		.parent = &blsp2_qup4_spi_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp2_qup4_spi_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp2_qup5_i2c_apps_clk = {
+	.cbcr_reg = GCC_BLSP2_QUP5_I2C_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp2_qup5_i2c_apps_clk",
+		.parent = &blsp2_qup5_i2c_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp2_qup5_i2c_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp2_qup5_spi_apps_clk = {
+	.cbcr_reg = GCC_BLSP2_QUP5_SPI_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp2_qup5_spi_apps_clk",
+		.parent = &blsp2_qup5_spi_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp2_qup5_spi_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp2_qup6_i2c_apps_clk = {
+	.cbcr_reg = GCC_BLSP2_QUP6_I2C_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp2_qup6_i2c_apps_clk",
+		.parent = &blsp2_qup6_i2c_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp2_qup6_i2c_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp2_qup6_spi_apps_clk = {
+	.cbcr_reg = GCC_BLSP2_QUP6_SPI_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp2_qup6_spi_apps_clk",
+		.parent = &blsp2_qup6_spi_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp2_qup6_spi_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp2_uart1_apps_clk = {
+	.cbcr_reg = GCC_BLSP2_UART1_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp2_uart1_apps_clk",
+		.parent = &blsp2_uart1_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp2_uart1_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp2_uart2_apps_clk = {
+	.cbcr_reg = GCC_BLSP2_UART2_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp2_uart2_apps_clk",
+		.parent = &blsp2_uart2_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp2_uart2_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp2_uart3_apps_clk = {
+	.cbcr_reg = GCC_BLSP2_UART3_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_blsp2_uart3_apps_clk",
+		.parent = &blsp2_uart3_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp2_uart3_apps_clk.c),
+	},
+};
+
+static struct local_vote_clk gcc_boot_rom_ahb_clk = {
+	.cbcr_reg = GCC_BOOT_ROM_AHB_CBCR,
+	.bcr_reg = GCC_BOOT_ROM_BCR,
+	.vote_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE,
+	.en_mask = BIT(10),
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_boot_rom_ahb_clk",
+		.ops = &clk_ops_vote,
+		CLK_INIT(gcc_boot_rom_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_cfg_noc_usb3_axi_clk = {
+	.cbcr_reg = GCC_CFG_NOC_USB3_AXI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_cfg_noc_usb3_axi_clk",
+		.parent = &usb30_master_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_cfg_noc_usb3_axi_clk.c),
+	},
+};
+
+static struct branch_clk gcc_bimc_gfx_clk = {
+	.cbcr_reg = GCC_BIMC_GFX_CBCR,
+	.has_sibling = 1,
+	.check_enable_bit = true,
+	.no_halt_check_on_disable = true,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_bimc_gfx_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_bimc_gfx_clk.c),
+	},
+};
+
+static struct branch_clk gcc_gp1_clk = {
+	.cbcr_reg = GCC_GP1_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_gp1_clk",
+		.parent = &gp1_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_gp1_clk.c),
+	},
+};
+
+static struct branch_clk gcc_gp2_clk = {
+	.cbcr_reg = GCC_GP2_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_gp2_clk",
+		.parent = &gp2_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_gp2_clk.c),
+	},
+};
+
+static struct branch_clk gcc_gp3_clk = {
+	.cbcr_reg = GCC_GP3_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_gp3_clk",
+		.parent = &gp3_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_gp3_clk.c),
+	},
+};
+
+static struct branch_clk gcc_gpu_bimc_gfx_clk = {
+	.cbcr_reg = GCC_GPU_BIMC_GFX_CBCR,
+	.has_sibling = 1,
+	.check_enable_bit = true,
+	.no_halt_check_on_disable = true,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_gpu_bimc_gfx_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_gpu_bimc_gfx_clk.c),
+	},
+};
+
+static struct branch_clk gcc_gpu_cfg_ahb_clk = {
+	.cbcr_reg = GCC_GPU_CFG_AHB_CBCR,
+	.has_sibling = 1,
+	.check_enable_bit = true,
+	.no_halt_check_on_disable = true,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_gpu_cfg_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_gpu_cfg_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_gpu_iref_clk = {
+	.cbcr_reg = GCC_GPU_IREF_EN,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_gpu_iref_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_gpu_iref_clk.c),
+	},
+};
+
+static struct branch_clk gcc_hmss_dvm_bus_clk = {
+	.cbcr_reg = GCC_HMSS_DVM_BUS_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_hmss_dvm_bus_clk",
+		.always_on = true,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_hmss_dvm_bus_clk.c),
+	},
+};
+
+static struct branch_clk gcc_hmss_rbcpr_clk = {
+	.cbcr_reg = GCC_HMSS_RBCPR_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_hmss_rbcpr_clk",
+		.parent = &hmss_rbcpr_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_hmss_rbcpr_clk.c),
+	},
+};
+
+static struct branch_clk gcc_mmss_noc_cfg_ahb_clk = {
+	.cbcr_reg = GCC_MMSS_NOC_CFG_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_mmss_noc_cfg_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_mmss_noc_cfg_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_mmss_sys_noc_axi_clk = {
+	.cbcr_reg = GCC_MMSS_SYS_NOC_AXI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_mmss_sys_noc_axi_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_mmss_sys_noc_axi_clk.c),
+	},
+};
+
+static struct branch_clk gcc_pcie_0_aux_clk = {
+	.cbcr_reg = GCC_PCIE_0_AUX_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_pcie_0_aux_clk",
+		.parent = &pcie_aux_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_pcie_0_aux_clk.c),
+	},
+};
+
+static struct branch_clk gcc_pcie_0_cfg_ahb_clk = {
+	.cbcr_reg = GCC_PCIE_0_CFG_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_pcie_0_cfg_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_pcie_0_cfg_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_pcie_0_mstr_axi_clk = {
+	.cbcr_reg = GCC_PCIE_0_MSTR_AXI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_pcie_0_mstr_axi_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_pcie_0_mstr_axi_clk.c),
+	},
+};
+
+static struct branch_clk gcc_pcie_0_pipe_clk = {
+	.cbcr_reg = GCC_PCIE_0_PIPE_CBCR,
+	.bcr_reg = GCC_PCIE_0_PHY_BCR,
+	.has_sibling = 1,
+	.halt_check = DELAY,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_pcie_0_pipe_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_pcie_0_pipe_clk.c),
+	},
+};
+
+static struct branch_clk gcc_pcie_0_slv_axi_clk = {
+	.cbcr_reg = GCC_PCIE_0_SLV_AXI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_pcie_0_slv_axi_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_pcie_0_slv_axi_clk.c),
+	},
+};
+
+static struct branch_clk gcc_pcie_phy_aux_clk = {
+	.cbcr_reg = GCC_PCIE_PHY_AUX_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_pcie_phy_aux_clk",
+		.parent = &pcie_aux_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_pcie_phy_aux_clk.c),
+	},
+};
+
+static struct reset_clk gcc_pcie_phy_reset = {
+	.reset_reg = GCC_PCIE_PHY_BCR,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_pcie_phy_reset",
+		.ops = &clk_ops_rst,
+		CLK_INIT(gcc_pcie_phy_reset.c),
+	},
+};
+
+static struct reset_clk gcc_pcie_phy_com_reset = {
+	.reset_reg = GCC_PCIE_PHY_COM_BCR,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_pcie_phy_com_reset",
+		.ops = &clk_ops_rst,
+		CLK_INIT(gcc_pcie_phy_com_reset.c),
+	},
+};
+
+static struct reset_clk gcc_pcie_phy_nocsr_com_phy_reset = {
+	.reset_reg = GCC_PCIE_PHY_NOCSR_COM_PHY_BCR,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_pcie_phy_nocsr_com_phy_reset",
+		.ops = &clk_ops_rst,
+		CLK_INIT(gcc_pcie_phy_nocsr_com_phy_reset.c),
+	},
+};
+
+static struct branch_clk gcc_pdm2_clk = {
+	.cbcr_reg = GCC_PDM2_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_pdm2_clk",
+		.parent = &pdm2_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_pdm2_clk.c),
+	},
+};
+
+static struct branch_clk gcc_pdm_ahb_clk = {
+	.cbcr_reg = GCC_PDM_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_pdm_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_pdm_ahb_clk.c),
+	},
+};
+
+static struct local_vote_clk gcc_prng_ahb_clk = {
+	.cbcr_reg = GCC_PRNG_AHB_CBCR,
+	.bcr_reg = GCC_PRNG_BCR,
+	.vote_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE,
+	.en_mask = BIT(13),
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_prng_ahb_clk",
+		.ops = &clk_ops_vote,
+		CLK_INIT(gcc_prng_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_sdcc2_ahb_clk = {
+	.cbcr_reg = GCC_SDCC2_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_sdcc2_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_sdcc2_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_sdcc2_apps_clk = {
+	.cbcr_reg = GCC_SDCC2_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_sdcc2_apps_clk",
+		.parent = &sdcc2_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_sdcc2_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_sdcc4_ahb_clk = {
+	.cbcr_reg = GCC_SDCC4_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_sdcc4_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_sdcc4_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_sdcc4_apps_clk = {
+	.cbcr_reg = GCC_SDCC4_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_sdcc4_apps_clk",
+		.parent = &sdcc4_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_sdcc4_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_tsif_ahb_clk = {
+	.cbcr_reg = GCC_TSIF_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_tsif_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_tsif_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_tsif_ref_clk = {
+	.cbcr_reg = GCC_TSIF_REF_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_tsif_ref_clk",
+		.parent = &tsif_ref_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_tsif_ref_clk.c),
+	},
+};
+
+static struct branch_clk gcc_ufs_ahb_clk = {
+	.cbcr_reg = GCC_UFS_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_ufs_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_ufs_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_ufs_axi_clk = {
+	.cbcr_reg = GCC_UFS_AXI_CBCR,
+	.bcr_reg = GCC_UFS_BCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_ufs_axi_clk",
+		.parent = &ufs_axi_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_ufs_axi_clk.c),
+	},
+};
+
+static struct hw_ctl_clk gcc_ufs_axi_hw_ctl_clk = {
+	.cbcr_reg = GCC_UFS_AXI_CBCR,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_ufs_axi_hw_ctl_clk",
+		.parent = &gcc_ufs_axi_clk.c,
+		.ops = &clk_ops_branch_hw_ctl,
+		CLK_INIT(gcc_ufs_axi_hw_ctl_clk.c),
+	},
+};
+
+static struct branch_clk gcc_ufs_ice_core_clk = {
+	.cbcr_reg = GCC_UFS_ICE_CORE_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_ufs_ice_core_clk",
+		.parent = &ufs_ice_core_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_ufs_ice_core_clk.c),
+	},
+};
+
+static struct hw_ctl_clk gcc_ufs_ice_core_hw_ctl_clk = {
+	.cbcr_reg = GCC_UFS_ICE_CORE_CBCR,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_ufs_ice_core_hw_ctl_clk",
+		.parent = &gcc_ufs_ice_core_clk.c,
+		.ops = &clk_ops_branch_hw_ctl,
+		CLK_INIT(gcc_ufs_ice_core_hw_ctl_clk.c),
+	},
+};
+
+static struct branch_clk gcc_ufs_phy_aux_clk = {
+	.cbcr_reg = GCC_UFS_PHY_AUX_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_ufs_phy_aux_clk",
+		.parent = &ufs_phy_aux_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_ufs_phy_aux_clk.c),
+	},
+};
+
+static struct hw_ctl_clk gcc_ufs_phy_aux_hw_ctl_clk = {
+	.cbcr_reg = GCC_UFS_PHY_AUX_CBCR,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_ufs_phy_aux_hw_ctl_clk",
+		.parent = &gcc_ufs_phy_aux_clk.c,
+		.ops = &clk_ops_branch_hw_ctl,
+		CLK_INIT(gcc_ufs_phy_aux_hw_ctl_clk.c),
+	},
+};
+
+static struct gate_clk gcc_ufs_rx_symbol_0_clk = {
+	.en_reg = GCC_UFS_RX_SYMBOL_0_CBCR,
+	.en_mask = BIT(0),
+	.delay_us = 500,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_ufs_rx_symbol_0_clk",
+		.ops = &clk_ops_gate,
+		CLK_INIT(gcc_ufs_rx_symbol_0_clk.c),
+	},
+};
+
+static struct gate_clk gcc_ufs_rx_symbol_1_clk = {
+	.en_reg = GCC_UFS_RX_SYMBOL_1_CBCR,
+	.en_mask = BIT(0),
+	.delay_us = 500,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_ufs_rx_symbol_1_clk",
+		.ops = &clk_ops_gate,
+		CLK_INIT(gcc_ufs_rx_symbol_1_clk.c),
+	},
+};
+
+static struct gate_clk gcc_ufs_tx_symbol_0_clk = {
+	.en_reg = GCC_UFS_TX_SYMBOL_0_CBCR,
+	.en_mask = BIT(0),
+	.delay_us = 500,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_ufs_tx_symbol_0_clk",
+		.ops = &clk_ops_gate,
+		CLK_INIT(gcc_ufs_tx_symbol_0_clk.c),
+	},
+};
+
+static struct branch_clk gcc_ufs_unipro_core_clk = {
+	.cbcr_reg = GCC_UFS_UNIPRO_CORE_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_ufs_unipro_core_clk",
+		.parent = &ufs_unipro_core_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_ufs_unipro_core_clk.c),
+	},
+};
+
+static struct hw_ctl_clk gcc_ufs_unipro_core_hw_ctl_clk = {
+	.cbcr_reg = GCC_UFS_UNIPRO_CORE_CBCR,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_ufs_unipro_core_hw_ctl_clk",
+		.parent = &gcc_ufs_unipro_core_clk.c,
+		.ops = &clk_ops_branch_hw_ctl,
+		CLK_INIT(gcc_ufs_unipro_core_hw_ctl_clk.c),
+	},
+};
+
+static struct branch_clk gcc_usb30_master_clk = {
+	.cbcr_reg = GCC_USB30_MASTER_CBCR,
+	.bcr_reg = GCC_USB_30_BCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_usb30_master_clk",
+		.parent = &usb30_master_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_usb30_master_clk.c),
+		.depends = &gcc_cfg_noc_usb3_axi_clk.c,
+	},
+};
+
+static struct branch_clk gcc_usb30_mock_utmi_clk = {
+	.cbcr_reg = GCC_USB30_MOCK_UTMI_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_usb30_mock_utmi_clk",
+		.parent = &usb30_mock_utmi_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_usb30_mock_utmi_clk.c),
+	},
+};
+
+static struct branch_clk gcc_usb30_sleep_clk = {
+	.cbcr_reg = GCC_USB30_SLEEP_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_usb30_sleep_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_usb30_sleep_clk.c),
+	},
+};
+
+static struct branch_clk gcc_usb3_phy_aux_clk = {
+	.cbcr_reg = GCC_USB3_PHY_AUX_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_usb3_phy_aux_clk",
+		.parent = &usb3_phy_aux_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_usb3_phy_aux_clk.c),
+	},
+};
+
+static struct gate_clk gcc_usb3_phy_pipe_clk = {
+	.en_reg = GCC_USB3_PHY_PIPE_CBCR,
+	.en_mask = BIT(0),
+	.delay_us = 50,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_usb3_phy_pipe_clk",
+		.ops = &clk_ops_gate,
+		CLK_INIT(gcc_usb3_phy_pipe_clk.c),
+	},
+};
+
+static struct reset_clk gcc_qusb2phy_prim_reset = {
+	.reset_reg = GCC_QUSB2PHY_PRIM_BCR,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_qusb2phy_prim_reset",
+		.ops = &clk_ops_rst,
+		CLK_INIT(gcc_qusb2phy_prim_reset.c),
+	},
+};
+
+static struct reset_clk gcc_qusb2phy_sec_reset = {
+	.reset_reg = GCC_QUSB2PHY_SEC_BCR,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_qusb2phy_sec_reset",
+		.ops = &clk_ops_rst,
+		CLK_INIT(gcc_qusb2phy_sec_reset.c),
+	},
+};
+
+static struct branch_clk gcc_mss_cfg_ahb_clk = {
+	.cbcr_reg = GCC_MSS_CFG_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_mss_cfg_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_mss_cfg_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_mss_q6_bimc_axi_clk = {
+	.cbcr_reg = GCC_MSS_Q6_BIMC_AXI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_mss_q6_bimc_axi_clk",
+		.always_on = true,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_mss_q6_bimc_axi_clk.c),
+	},
+};
+
+static struct branch_clk gcc_mss_mnoc_bimc_axi_clk = {
+	.cbcr_reg = GCC_MSS_MNOC_BIMC_AXI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_mss_mnoc_bimc_axi_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_mss_mnoc_bimc_axi_clk.c),
+	},
+};
+
+static struct branch_clk gcc_mss_snoc_axi_clk = {
+	.cbcr_reg = GCC_MSS_SNOC_AXI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_mss_snoc_axi_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_mss_snoc_axi_clk.c),
+	},
+};
+
+static struct branch_clk gcc_dcc_ahb_clk = {
+	.cbcr_reg = GCC_DCC_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_dcc_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_dcc_ahb_clk.c),
+	},
+};
+
+static struct branch_clk hlos1_vote_lpass_core_smmu_clk = {
+	.cbcr_reg = GCC_HLOS1_VOTE_LPASS_CORE_SMMU_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.no_halt_check_on_disable = true,
+	.c = {
+		.dbg_name = "hlos1_vote_lpass_core_smmu_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(hlos1_vote_lpass_core_smmu_clk.c),
+	},
+};
+
+static struct branch_clk hlos1_vote_lpass_adsp_smmu_clk = {
+	.cbcr_reg = GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.no_halt_check_on_disable = true,
+	.c = {
+		.dbg_name = "hlos1_vote_lpass_adsp_smmu_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(hlos1_vote_lpass_adsp_smmu_clk.c),
+	},
+};
+
+static struct branch_clk gcc_qspi_ahb_clk = {
+	.cbcr_reg = GCC_QSPI_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_qspi_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_qspi_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_qspi_ref_clk = {
+	.cbcr_reg = GCC_QSPI_REF_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gcc_qspi_ref_clk",
+		.parent = &qspi_ref_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_qspi_ref_clk.c),
+	},
+};
+
+static struct mux_clk gcc_debug_mux;
+static struct clk_ops clk_ops_debug_mux;
+
+static struct measure_clk_data debug_mux_priv = {
+	.cxo = &cxo_clk_src.c,
+	.plltest_reg = PLLTEST_PAD_CFG,
+	.plltest_val = 0x51A00,
+	.xo_div4_cbcr = GCC_XO_DIV4_CBCR,
+	.ctl_reg = CLOCK_FRQ_MEASURE_CTL,
+	.status_reg = CLOCK_FRQ_MEASURE_STATUS,
+	.base = &virt_base,
+};
+
+static struct mux_clk gcc_debug_mux = {
+	.priv = &debug_mux_priv,
+	.ops = &mux_reg_ops,
+	.en_mask = BIT(16),
+	.mask = 0x3FF,
+	.base = &virt_dbgbase,
+	MUX_REC_SRC_LIST(
+		&gpu_gcc_debug_clk.c,
+		&gfx_gcc_debug_clk.c,
+		&debug_mmss_clk.c,
+		&debug_cpu_clk.c,
+	),
+	MUX_SRC_LIST(
+		{ &gpu_gcc_debug_clk.c, 0x013d },
+		{ &gfx_gcc_debug_clk.c, 0x013d },
+		{ &debug_mmss_clk.c, 0x0022 },
+		{ &debug_cpu_clk.c, 0x00c0 },
+		{ &snoc_clk.c, 0x0000 },
+		{ &cnoc_clk.c, 0x000e },
+		{ &bimc_clk.c, 0x014e },
+		{ &gcc_mmss_sys_noc_axi_clk.c, 0x001f },
+		{ &gcc_mmss_noc_cfg_ahb_clk.c, 0x0020 },
+		{ &gcc_usb30_master_clk.c, 0x003e },
+		{ &gcc_usb30_sleep_clk.c, 0x003f },
+		{ &gcc_usb30_mock_utmi_clk.c, 0x0040 },
+		{ &gcc_usb3_phy_aux_clk.c, 0x0041 },
+		{ &gcc_usb3_phy_pipe_clk.c, 0x0042 },
+		{ &gcc_sdcc2_apps_clk.c, 0x0046 },
+		{ &gcc_sdcc2_ahb_clk.c, 0x0047 },
+		{ &gcc_sdcc4_apps_clk.c, 0x0048 },
+		{ &gcc_sdcc4_ahb_clk.c, 0x0049 },
+		{ &gcc_blsp1_ahb_clk.c, 0x004a },
+		{ &gcc_blsp1_qup1_spi_apps_clk.c, 0x004c },
+		{ &gcc_blsp1_qup1_i2c_apps_clk.c, 0x004d },
+		{ &gcc_blsp1_uart1_apps_clk.c, 0x004e },
+		{ &gcc_blsp1_qup2_spi_apps_clk.c, 0x0050 },
+		{ &gcc_blsp1_qup2_i2c_apps_clk.c, 0x0051 },
+		{ &gcc_blsp1_uart2_apps_clk.c, 0x0052 },
+		{ &gcc_blsp1_qup3_spi_apps_clk.c, 0x0054 },
+		{ &gcc_blsp1_qup3_i2c_apps_clk.c, 0x0055 },
+		{ &gcc_blsp1_uart3_apps_clk.c, 0x0056 },
+		{ &gcc_blsp1_qup4_spi_apps_clk.c, 0x0058 },
+		{ &gcc_blsp1_qup4_i2c_apps_clk.c, 0x0059 },
+		{ &gcc_blsp1_qup5_spi_apps_clk.c, 0x005a },
+		{ &gcc_blsp1_qup5_i2c_apps_clk.c, 0x005b },
+		{ &gcc_blsp1_qup6_spi_apps_clk.c, 0x005c },
+		{ &gcc_blsp1_qup6_i2c_apps_clk.c, 0x005d },
+		{ &gcc_blsp2_ahb_clk.c, 0x005e },
+		{ &gcc_blsp2_qup1_spi_apps_clk.c, 0x0060 },
+		{ &gcc_blsp2_qup1_i2c_apps_clk.c, 0x0061 },
+		{ &gcc_blsp2_uart1_apps_clk.c, 0x0062 },
+		{ &gcc_blsp2_qup2_spi_apps_clk.c, 0x0064 },
+		{ &gcc_blsp2_qup2_i2c_apps_clk.c, 0x0065 },
+		{ &gcc_blsp2_uart2_apps_clk.c, 0x0066 },
+		{ &gcc_blsp2_qup3_spi_apps_clk.c, 0x0068 },
+		{ &gcc_blsp2_qup3_i2c_apps_clk.c, 0x0069 },
+		{ &gcc_blsp2_uart3_apps_clk.c, 0x006a },
+		{ &gcc_blsp2_qup4_spi_apps_clk.c, 0x006c },
+		{ &gcc_blsp2_qup4_i2c_apps_clk.c, 0x006d },
+		{ &gcc_blsp2_qup5_spi_apps_clk.c, 0x006e },
+		{ &gcc_blsp2_qup5_i2c_apps_clk.c, 0x006f },
+		{ &gcc_blsp2_qup6_spi_apps_clk.c, 0x0070 },
+		{ &gcc_blsp2_qup6_i2c_apps_clk.c, 0x0071 },
+		{ &gcc_pdm_ahb_clk.c, 0x0072 },
+		{ &gcc_pdm2_clk.c, 0x0074},
+		{ &gcc_prng_ahb_clk.c, 0x0075 },
+		{ &gcc_tsif_ahb_clk.c, 0x0076 },
+		{ &gcc_tsif_ref_clk.c, 0x0077 },
+		{ &gcc_boot_rom_ahb_clk.c, 0x007a },
+		{ &ce1_clk.c, 0x0097 },
+		{ &gcc_ce1_axi_m_clk.c, 0x0098 },
+		{ &gcc_ce1_ahb_m_clk.c, 0x0099 },
+		{ &measure_only_bimc_hmss_axi_clk.c, 0x00bb },
+		{ &gcc_bimc_gfx_clk.c, 0x00ac },
+		{ &gcc_hmss_rbcpr_clk.c, 0x00bc },
+		{ &gcc_gp1_clk.c, 0x00df },
+		{ &gcc_gp2_clk.c, 0x00e0 },
+		{ &gcc_gp3_clk.c, 0x00e1 },
+		{ &gcc_pcie_0_slv_axi_clk.c, 0x00e2 },
+		{ &gcc_pcie_0_mstr_axi_clk.c, 0x00e3 },
+		{ &gcc_pcie_0_cfg_ahb_clk.c, 0x00e4 },
+		{ &gcc_pcie_0_aux_clk.c, 0x00e5 },
+		{ &gcc_pcie_0_pipe_clk.c, 0x00e6 },
+		{ &gcc_pcie_phy_aux_clk.c, 0x00e8 },
+		{ &gcc_ufs_axi_clk.c, 0x00ea },
+		{ &gcc_ufs_ahb_clk.c, 0x00eb },
+		{ &gcc_ufs_tx_symbol_0_clk.c, 0x00ec },
+		{ &gcc_ufs_rx_symbol_0_clk.c, 0x00ed },
+		{ &gcc_ufs_rx_symbol_1_clk.c, 0x0162 },
+		{ &gcc_ufs_unipro_core_clk.c, 0x00f0 },
+		{ &gcc_ufs_ice_core_clk.c, 0x00f1 },
+		{ &gcc_dcc_ahb_clk.c, 0x0119 },
+		{ &ipa_clk.c, 0x011b },
+		{ &gcc_mss_cfg_ahb_clk.c, 0x011f },
+		{ &gcc_mss_q6_bimc_axi_clk.c, 0x0124 },
+		{ &gcc_mss_mnoc_bimc_axi_clk.c, 0x0120 },
+		{ &gcc_mss_snoc_axi_clk.c, 0x0123 },
+		{ &gcc_gpu_cfg_ahb_clk.c, 0x013b },
+		{ &gcc_gpu_bimc_gfx_clk.c, 0x013f },
+		{ &gcc_qspi_ahb_clk.c, 0x0156 },
+		{ &gcc_qspi_ref_clk.c, 0x0157 },
+	),
+	.c = {
+		.dbg_name = "gcc_debug_mux",
+		.ops = &clk_ops_debug_mux,
+		.flags = CLKFLAG_NO_RATE_CACHE | CLKFLAG_MEASURE,
+		CLK_INIT(gcc_debug_mux.c),
+	},
+};
+
+static struct clk_lookup msm_clocks_rpm_8998[] = {
+	CLK_LIST(cxo_clk_src),
+	CLK_LIST(bimc_clk),
+	CLK_LIST(bimc_a_clk),
+	CLK_LIST(cnoc_clk),
+	CLK_LIST(cnoc_a_clk),
+	CLK_LIST(snoc_clk),
+	CLK_LIST(snoc_a_clk),
+	CLK_LIST(cnoc_periph_clk),
+	CLK_LIST(cnoc_periph_a_clk),
+	CLK_LIST(cnoc_periph_keepalive_a_clk),
+	CLK_LIST(bimc_msmbus_clk),
+	CLK_LIST(bimc_msmbus_a_clk),
+	CLK_LIST(ce1_clk),
+	CLK_LIST(ce1_a_clk),
+	CLK_LIST(cnoc_msmbus_clk),
+	CLK_LIST(cnoc_msmbus_a_clk),
+	CLK_LIST(cxo_clk_src_ao),
+	CLK_LIST(cxo_dwc3_clk),
+	CLK_LIST(cxo_lpm_clk),
+	CLK_LIST(cxo_otg_clk),
+	CLK_LIST(cxo_pil_lpass_clk),
+	CLK_LIST(cxo_pil_ssc_clk),
+	CLK_LIST(cxo_pil_spss_clk),
+	CLK_LIST(div_clk1),
+	CLK_LIST(div_clk1_ao),
+	CLK_LIST(div_clk2),
+	CLK_LIST(div_clk2_ao),
+	CLK_LIST(div_clk3),
+	CLK_LIST(div_clk3_ao),
+	CLK_LIST(ipa_clk),
+	CLK_LIST(ipa_a_clk),
+	CLK_LIST(ln_bb_clk1),
+	CLK_LIST(ln_bb_clk1_ao),
+	CLK_LIST(ln_bb_clk1_pin),
+	CLK_LIST(ln_bb_clk1_pin_ao),
+	CLK_LIST(ln_bb_clk2),
+	CLK_LIST(ln_bb_clk2_ao),
+	CLK_LIST(ln_bb_clk2_pin),
+	CLK_LIST(ln_bb_clk2_pin_ao),
+	CLK_LIST(ln_bb_clk3),
+	CLK_LIST(ln_bb_clk3_ao),
+	CLK_LIST(ln_bb_clk3_pin),
+	CLK_LIST(ln_bb_clk3_pin_ao),
+	CLK_LIST(mcd_ce1_clk),
+	CLK_LIST(measure_only_bimc_hmss_axi_clk),
+	CLK_LIST(mmssnoc_axi_clk),
+	CLK_LIST(mmssnoc_axi_a_clk),
+	CLK_LIST(aggre1_noc_clk),
+	CLK_LIST(aggre1_noc_a_clk),
+	CLK_LIST(aggre2_noc_clk),
+	CLK_LIST(aggre2_noc_a_clk),
+	CLK_LIST(qcedev_ce1_clk),
+	CLK_LIST(qcrypto_ce1_clk),
+	CLK_LIST(qdss_clk),
+	CLK_LIST(qdss_a_clk),
+	CLK_LIST(qseecom_ce1_clk),
+	CLK_LIST(rf_clk1),
+	CLK_LIST(rf_clk1_ao),
+	CLK_LIST(rf_clk1_pin),
+	CLK_LIST(rf_clk1_pin_ao),
+	CLK_LIST(rf_clk2),
+	CLK_LIST(rf_clk2_ao),
+	CLK_LIST(rf_clk2_pin),
+	CLK_LIST(rf_clk2_pin_ao),
+	CLK_LIST(rf_clk3),
+	CLK_LIST(rf_clk3_ao),
+	CLK_LIST(rf_clk3_pin),
+	CLK_LIST(rf_clk3_pin_ao),
+	CLK_LIST(scm_ce1_clk),
+	CLK_LIST(snoc_msmbus_clk),
+	CLK_LIST(snoc_msmbus_a_clk),
+	CLK_LIST(gcc_ce1_ahb_m_clk),
+	CLK_LIST(gcc_ce1_axi_m_clk),
+};
+
+static struct clk_lookup msm_clocks_gcc_8998[] = {
+	CLK_LIST(gpll0),
+	CLK_LIST(gpll0_ao),
+	CLK_LIST(gpll0_out_main),
+	CLK_LIST(gcc_mmss_gpll0_clk),
+	CLK_LIST(gcc_mmss_gpll0_div_clk),
+	CLK_LIST(gcc_gpu_gpll0_clk),
+	CLK_LIST(gcc_gpu_gpll0_div_clk),
+	CLK_LIST(gpll4),
+	CLK_LIST(gpll4_out_main),
+	CLK_LIST(usb30_master_clk_src),
+	CLK_LIST(pcie_aux_clk_src),
+	CLK_LIST(ufs_axi_clk_src),
+	CLK_LIST(blsp1_qup1_i2c_apps_clk_src),
+	CLK_LIST(blsp1_qup1_spi_apps_clk_src),
+	CLK_LIST(blsp1_qup2_i2c_apps_clk_src),
+	CLK_LIST(blsp1_qup2_spi_apps_clk_src),
+	CLK_LIST(blsp1_qup3_i2c_apps_clk_src),
+	CLK_LIST(blsp1_qup3_spi_apps_clk_src),
+	CLK_LIST(blsp1_qup4_i2c_apps_clk_src),
+	CLK_LIST(blsp1_qup4_spi_apps_clk_src),
+	CLK_LIST(blsp1_qup5_i2c_apps_clk_src),
+	CLK_LIST(blsp1_qup5_spi_apps_clk_src),
+	CLK_LIST(blsp1_qup6_i2c_apps_clk_src),
+	CLK_LIST(blsp1_qup6_spi_apps_clk_src),
+	CLK_LIST(blsp1_uart1_apps_clk_src),
+	CLK_LIST(blsp1_uart2_apps_clk_src),
+	CLK_LIST(blsp1_uart3_apps_clk_src),
+	CLK_LIST(blsp2_qup1_i2c_apps_clk_src),
+	CLK_LIST(blsp2_qup1_spi_apps_clk_src),
+	CLK_LIST(blsp2_qup2_i2c_apps_clk_src),
+	CLK_LIST(blsp2_qup2_spi_apps_clk_src),
+	CLK_LIST(blsp2_qup3_i2c_apps_clk_src),
+	CLK_LIST(blsp2_qup3_spi_apps_clk_src),
+	CLK_LIST(blsp2_qup4_i2c_apps_clk_src),
+	CLK_LIST(blsp2_qup4_spi_apps_clk_src),
+	CLK_LIST(blsp2_qup5_i2c_apps_clk_src),
+	CLK_LIST(blsp2_qup5_spi_apps_clk_src),
+	CLK_LIST(blsp2_qup6_i2c_apps_clk_src),
+	CLK_LIST(blsp2_qup6_spi_apps_clk_src),
+	CLK_LIST(blsp2_uart1_apps_clk_src),
+	CLK_LIST(blsp2_uart2_apps_clk_src),
+	CLK_LIST(blsp2_uart3_apps_clk_src),
+	CLK_LIST(gp1_clk_src),
+	CLK_LIST(gp2_clk_src),
+	CLK_LIST(gp3_clk_src),
+	CLK_LIST(hmss_rbcpr_clk_src),
+	CLK_LIST(pdm2_clk_src),
+	CLK_LIST(sdcc2_apps_clk_src),
+	CLK_LIST(sdcc4_apps_clk_src),
+	CLK_LIST(tsif_ref_clk_src),
+	CLK_LIST(ufs_ice_core_clk_src),
+	CLK_LIST(ufs_phy_aux_clk_src),
+	CLK_LIST(ufs_unipro_core_clk_src),
+	CLK_LIST(usb30_mock_utmi_clk_src),
+	CLK_LIST(usb3_phy_aux_clk_src),
+	CLK_LIST(hmss_gpll0_clk_src),
+	CLK_LIST(qspi_ref_clk_src),
+	CLK_LIST(gcc_usb3_phy_reset),
+	CLK_LIST(gcc_usb3phy_phy_reset),
+	CLK_LIST(gcc_qusb2phy_prim_reset),
+	CLK_LIST(gcc_qusb2phy_sec_reset),
+	CLK_LIST(gpll0_out_msscc),
+	CLK_LIST(gcc_aggre1_ufs_axi_clk),
+	CLK_LIST(gcc_aggre1_ufs_axi_hw_ctl_clk),
+	CLK_LIST(gcc_aggre1_usb3_axi_clk),
+	CLK_LIST(gcc_bimc_mss_q6_axi_clk),
+	CLK_LIST(gcc_blsp1_ahb_clk),
+	CLK_LIST(gcc_blsp1_qup1_i2c_apps_clk),
+	CLK_LIST(gcc_blsp1_qup1_spi_apps_clk),
+	CLK_LIST(gcc_blsp1_qup2_i2c_apps_clk),
+	CLK_LIST(gcc_blsp1_qup2_spi_apps_clk),
+	CLK_LIST(gcc_blsp1_qup3_i2c_apps_clk),
+	CLK_LIST(gcc_blsp1_qup3_spi_apps_clk),
+	CLK_LIST(gcc_blsp1_qup4_i2c_apps_clk),
+	CLK_LIST(gcc_blsp1_qup4_spi_apps_clk),
+	CLK_LIST(gcc_blsp1_qup5_i2c_apps_clk),
+	CLK_LIST(gcc_blsp1_qup5_spi_apps_clk),
+	CLK_LIST(gcc_blsp1_qup6_i2c_apps_clk),
+	CLK_LIST(gcc_blsp1_qup6_spi_apps_clk),
+	CLK_LIST(gcc_blsp1_uart1_apps_clk),
+	CLK_LIST(gcc_blsp1_uart2_apps_clk),
+	CLK_LIST(gcc_blsp1_uart3_apps_clk),
+	CLK_LIST(gcc_blsp2_ahb_clk),
+	CLK_LIST(gcc_blsp2_qup1_i2c_apps_clk),
+	CLK_LIST(gcc_blsp2_qup1_spi_apps_clk),
+	CLK_LIST(gcc_blsp2_qup2_i2c_apps_clk),
+	CLK_LIST(gcc_blsp2_qup2_spi_apps_clk),
+	CLK_LIST(gcc_blsp2_qup3_i2c_apps_clk),
+	CLK_LIST(gcc_blsp2_qup3_spi_apps_clk),
+	CLK_LIST(gcc_blsp2_qup4_i2c_apps_clk),
+	CLK_LIST(gcc_blsp2_qup4_spi_apps_clk),
+	CLK_LIST(gcc_blsp2_qup5_i2c_apps_clk),
+	CLK_LIST(gcc_blsp2_qup5_spi_apps_clk),
+	CLK_LIST(gcc_blsp2_qup6_i2c_apps_clk),
+	CLK_LIST(gcc_blsp2_qup6_spi_apps_clk),
+	CLK_LIST(gcc_blsp2_uart1_apps_clk),
+	CLK_LIST(gcc_blsp2_uart2_apps_clk),
+	CLK_LIST(gcc_blsp2_uart3_apps_clk),
+	CLK_LIST(gcc_cfg_noc_usb3_axi_clk),
+	CLK_LIST(gcc_bimc_gfx_clk),
+	CLK_LIST(gcc_gp1_clk),
+	CLK_LIST(gcc_gp2_clk),
+	CLK_LIST(gcc_gp3_clk),
+	CLK_LIST(gcc_gpu_bimc_gfx_clk),
+	CLK_LIST(gcc_gpu_cfg_ahb_clk),
+	CLK_LIST(gcc_gpu_iref_clk),
+	CLK_LIST(gcc_hmss_dvm_bus_clk),
+	CLK_LIST(gcc_hmss_rbcpr_clk),
+	CLK_LIST(gcc_mmss_noc_cfg_ahb_clk),
+	CLK_LIST(gcc_mmss_sys_noc_axi_clk),
+	CLK_LIST(gcc_pcie_0_aux_clk),
+	CLK_LIST(gcc_pcie_0_cfg_ahb_clk),
+	CLK_LIST(gcc_pcie_0_mstr_axi_clk),
+	CLK_LIST(gcc_pcie_0_pipe_clk),
+	CLK_LIST(gcc_pcie_0_slv_axi_clk),
+	CLK_LIST(gcc_pcie_phy_aux_clk),
+	CLK_LIST(gcc_pcie_phy_reset),
+	CLK_LIST(gcc_pcie_phy_com_reset),
+	CLK_LIST(gcc_pcie_phy_nocsr_com_phy_reset),
+	CLK_LIST(gcc_pdm2_clk),
+	CLK_LIST(gcc_pdm_ahb_clk),
+	CLK_LIST(gcc_sdcc2_ahb_clk),
+	CLK_LIST(gcc_sdcc2_apps_clk),
+	CLK_LIST(gcc_sdcc4_ahb_clk),
+	CLK_LIST(gcc_sdcc4_apps_clk),
+	CLK_LIST(gcc_tsif_ahb_clk),
+	CLK_LIST(gcc_tsif_ref_clk),
+	CLK_LIST(gcc_ufs_ahb_clk),
+	CLK_LIST(gcc_ufs_axi_clk),
+	CLK_LIST(gcc_ufs_axi_hw_ctl_clk),
+	CLK_LIST(gcc_ufs_ice_core_clk),
+	CLK_LIST(gcc_ufs_ice_core_hw_ctl_clk),
+	CLK_LIST(gcc_ufs_phy_aux_clk),
+	CLK_LIST(gcc_ufs_phy_aux_hw_ctl_clk),
+	CLK_LIST(gcc_ufs_rx_symbol_0_clk),
+	CLK_LIST(gcc_ufs_rx_symbol_1_clk),
+	CLK_LIST(gcc_ufs_tx_symbol_0_clk),
+	CLK_LIST(gcc_ufs_unipro_core_clk),
+	CLK_LIST(gcc_ufs_unipro_core_hw_ctl_clk),
+	CLK_LIST(gcc_usb30_master_clk),
+	CLK_LIST(gcc_usb30_mock_utmi_clk),
+	CLK_LIST(gcc_usb30_sleep_clk),
+	CLK_LIST(gcc_usb3_phy_aux_clk),
+	CLK_LIST(gcc_usb3_phy_pipe_clk),
+	CLK_LIST(gcc_prng_ahb_clk),
+	CLK_LIST(gcc_boot_rom_ahb_clk),
+	CLK_LIST(gcc_mss_cfg_ahb_clk),
+	CLK_LIST(gcc_mss_q6_bimc_axi_clk),
+	CLK_LIST(gcc_mss_mnoc_bimc_axi_clk),
+	CLK_LIST(gcc_mss_snoc_axi_clk),
+	CLK_LIST(gcc_hdmi_clkref_clk),
+	CLK_LIST(gcc_pcie_clkref_clk),
+	CLK_LIST(gcc_rx1_usb2_clkref_clk),
+	CLK_LIST(gcc_ufs_clkref_clk),
+	CLK_LIST(gcc_usb3_clkref_clk),
+	CLK_LIST(gcc_dcc_ahb_clk),
+	CLK_LIST(hlos1_vote_lpass_core_smmu_clk),
+	CLK_LIST(hlos1_vote_lpass_adsp_smmu_clk),
+	CLK_LIST(gcc_qspi_ahb_clk),
+	CLK_LIST(gcc_qspi_ref_clk),
+};
+
+static const struct msm_reset_map gcc_8998_resets[] = {
+	[QUSB2PHY_PRIM_BCR] = { 0x12000 },
+	[QUSB2PHY_SEC_BCR] = { 0x12004 },
+	[BLSP1_BCR] = { 0x17000 },
+	[BLSP2_BCR] = { 0x25000 },
+	[BOOT_ROM_BCR] = { 0x38000 },
+	[PRNG_BCR] = { 0x34000 },
+	[UFS_BCR] = { 0x75000 },
+	[USB_30_BCR] = { 0x0f000 },
+	[USB3_PHY_BCR] = { 0x50020 },
+	[USB3PHY_PHY_BCR] = { 0x50024 },
+	[PCIE_0_PHY_BCR] = { 0x6c01c },
+	[PCIE_PHY_BCR] = { 0x6f000 },
+	[PCIE_PHY_NOCSR_COM_PHY_BCR] = { 0x6f00C },
+	[PCIE_PHY_COM_BCR] = { 0x6f014 },
+};
+
+static void msm_gcc_8998_v1_fixup(void)
+{
+	gcc_ufs_rx_symbol_1_clk.c.ops = &clk_ops_dummy;
+	qspi_ref_clk_src.c.ops = &clk_ops_dummy;
+	gcc_qspi_ref_clk.c.ops = &clk_ops_dummy;
+	gcc_qspi_ahb_clk.c.ops = &clk_ops_dummy;
+}
+
+static void msm_gcc_8998_v2_fixup(void)
+{
+	qspi_ref_clk_src.c.ops = &clk_ops_dummy;
+	gcc_qspi_ref_clk.c.ops = &clk_ops_dummy;
+	gcc_qspi_ahb_clk.c.ops = &clk_ops_dummy;
+}
+
+static int msm_gcc_8998_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	u32 regval;
+	int ret;
+	bool is_v1 = 0, is_v2 = 0;
+
+	ret = vote_bimc(&bimc_clk, INT_MAX);
+	if (ret < 0)
+		return ret;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc_base");
+	if (!res) {
+		dev_err(&pdev->dev, "Failed to get CC base\n");
+		return -EINVAL;
+	}
+
+	virt_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+	if (!virt_base) {
+		dev_err(&pdev->dev, "Failed to map in CC registers\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * Clear the HMSS_AHB_CLK_ENA bit to allow the gcc_hmss_ahb_clk clock
+	 * to be gated by RPM during VDD_MIN.
+	 */
+	regval = readl_relaxed(virt_base + GCC_APCS_CLOCK_BRANCH_ENA_VOTE);
+	regval &= ~BIT(21);
+	writel_relaxed(regval, virt_base + GCC_APCS_CLOCK_BRANCH_ENA_VOTE);
+
+	vdd_dig.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_dig");
+	if (IS_ERR(vdd_dig.regulator[0])) {
+		if (!(PTR_ERR(vdd_dig.regulator[0]) == -EPROBE_DEFER))
+			dev_err(&pdev->dev,
+					"Unable to get vdd_dig regulator\n");
+		return PTR_ERR(vdd_dig.regulator[0]);
+	}
+
+	vdd_dig_ao.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_dig_ao");
+	if (IS_ERR(vdd_dig_ao.regulator[0])) {
+		if (!(PTR_ERR(vdd_dig_ao.regulator[0]) == -EPROBE_DEFER))
+			dev_err(&pdev->dev,
+					"Unable to get vdd_dig_ao regulator\n");
+		return PTR_ERR(vdd_dig_ao.regulator[0]);
+	}
+
+	bimc_clk.c.parent = &cxo_clk_src.c;
+	ret = of_msm_clock_register(pdev->dev.of_node, msm_clocks_rpm_8998,
+				    ARRAY_SIZE(msm_clocks_rpm_8998));
+	if (ret)
+		return ret;
+
+	gpll0_early_div.c.rate = 300000000;
+
+	ret = enable_rpm_scaling();
+	if (ret < 0)
+		return ret;
+
+	is_v1 = of_device_is_compatible(pdev->dev.of_node, "qcom,gcc-8998");
+	if (is_v1)
+		msm_gcc_8998_v1_fixup();
+
+	is_v2 = of_device_is_compatible(pdev->dev.of_node,
+						"qcom,gcc-8998-v2");
+	if (is_v2)
+		msm_gcc_8998_v2_fixup();
+
+	ret = of_msm_clock_register(pdev->dev.of_node, msm_clocks_gcc_8998,
+				    ARRAY_SIZE(msm_clocks_gcc_8998));
+	if (ret)
+		return ret;
+
+	/* Disable the GPLL0 active input to MMSS and GPU via MISC registers */
+	writel_relaxed(0x10003, virt_base + GCC_MMSS_MISC);
+	writel_relaxed(0x10003, virt_base + GCC_GPU_MISC);
+
+	/* Hold an active set vote for the cnoc_periph resource */
+	clk_set_rate(&cnoc_periph_keepalive_a_clk.c, 19200000);
+	clk_prepare_enable(&cnoc_periph_keepalive_a_clk.c);
+
+	/* This clock is used for all MMSSCC register access */
+	clk_prepare_enable(&gcc_mmss_noc_cfg_ahb_clk.c);
+
+	/* Keep bimc gfx clock port on all the time */
+	clk_prepare_enable(&gcc_bimc_gfx_clk.c);
+
+	/* This clock is used for all GPUCC register access */
+	clk_prepare_enable(&gcc_gpu_cfg_ahb_clk.c);
+
+	/* Keep an active vote on CXO in case no other driver votes for it */
+	clk_prepare_enable(&cxo_clk_src_ao.c);
+
+	clk_set_flags(&gcc_gpu_bimc_gfx_clk.c, CLKFLAG_RETAIN_MEM);
+
+	/* Register block resets */
+	msm_reset_controller_register(pdev, gcc_8998_resets,
+			ARRAY_SIZE(gcc_8998_resets), virt_base);
+
+	dev_info(&pdev->dev, "Registered GCC clocks\n");
+	return 0;
+}
+
+static struct of_device_id msm_clock_gcc_match_table[] = {
+	{ .compatible = "qcom,gcc-8998" },
+	{ .compatible = "qcom,gcc-8998-v2" },
+	{ .compatible = "qcom,gcc-hamster" },
+	{}
+};
+
+static struct platform_driver msm_clock_gcc_driver = {
+	.probe = msm_gcc_8998_probe,
+	.driver = {
+		.name = "qcom,gcc-8998",
+		.of_match_table = msm_clock_gcc_match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+int __init msm_gcc_8998_init(void)
+{
+	return platform_driver_register(&msm_clock_gcc_driver);
+}
+arch_initcall(msm_gcc_8998_init);
+
+/* ======== Clock Debug Controller ======== */
+static struct clk_lookup msm_clocks_measure_8998[] = {
+	CLK_LIST(gpu_gcc_debug_clk),
+	CLK_LIST(gfx_gcc_debug_clk),
+	CLK_LIST(debug_mmss_clk),
+	CLK_LIST(debug_cpu_clk),
+	CLK_LOOKUP_OF("measure", gcc_debug_mux, "debug"),
+};
+
+static struct of_device_id msm_clock_debug_match_table[] = {
+	{ .compatible = "qcom,cc-debug-8998" },
+	{}
+};
+
+static int msm_clock_debug_8998_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	int ret;
+
+	clk_ops_debug_mux = clk_ops_gen_mux;
+	clk_ops_debug_mux.get_rate = measure_get_rate;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc_base");
+	if (!res) {
+		dev_err(&pdev->dev, "Failed to get CC base\n");
+		return -EINVAL;
+	}
+	virt_dbgbase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+	if (!virt_dbgbase) {
+		dev_err(&pdev->dev, "Failed to map in CC registers\n");
+		return -ENOMEM;
+	}
+
+	gpu_gcc_debug_clk.dev = &pdev->dev;
+	gpu_gcc_debug_clk.clk_id = "debug_gpu_clk";
+
+	gfx_gcc_debug_clk.dev = &pdev->dev;
+	gfx_gcc_debug_clk.clk_id = "debug_gfx_clk";
+
+	debug_mmss_clk.dev = &pdev->dev;
+	debug_mmss_clk.clk_id = "debug_mmss_clk";
+
+	debug_cpu_clk.dev = &pdev->dev;
+	debug_cpu_clk.clk_id = "debug_cpu_clk";
+
+	ret = of_msm_clock_register(pdev->dev.of_node,
+				    msm_clocks_measure_8998,
+				    ARRAY_SIZE(msm_clocks_measure_8998));
+	if (ret)
+		return ret;
+
+	dev_info(&pdev->dev, "Registered debug mux\n");
+	return ret;
+}
+
+static struct platform_driver msm_clock_debug_driver = {
+	.probe = msm_clock_debug_8998_probe,
+	.driver = {
+		.name = "qcom,cc-debug-8998",
+		.of_match_table = msm_clock_debug_match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+int __init msm_clock_debug_8998_init(void)
+{
+	return platform_driver_register(&msm_clock_debug_driver);
+}
+late_initcall(msm_clock_debug_8998_init);
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./clock-generic.c linux-4.4.115-fbx/drivers/clk/msm/clock-generic.c
--- linux-4.4.115-fbx/drivers/clk/msm./clock-generic.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/clock-generic.c	2019-01-22 16:16:23.015241987 +0100
@@ -0,0 +1,927 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <soc/qcom/msm-clock-controller.h>
+
+/* ==================== Mux clock ==================== */
+
+static int mux_parent_to_src_sel(struct mux_clk *mux, struct clk *p)
+{
+	return parent_to_src_sel(mux->parents, mux->num_parents, p);
+}
+
+static int mux_set_parent(struct clk *c, struct clk *p)
+{
+	struct mux_clk *mux = to_mux_clk(c);
+	int sel = mux_parent_to_src_sel(mux, p);
+	struct clk *old_parent;
+	int rc = 0, i;
+	unsigned long flags;
+
+	if (sel < 0 && mux->rec_parents) {
+		for (i = 0; i < mux->num_rec_parents; i++) {
+			rc = clk_set_parent(mux->rec_parents[i], p);
+			if (!rc) {
+				/*
+				 * This is necessary to ensure prepare/enable
+				 * counts get propagated correctly.
+				 */
+				p = mux->rec_parents[i];
+				sel = mux_parent_to_src_sel(mux, p);
+				break;
+			}
+		}
+	}
+
+	if (sel < 0)
+		return sel;
+
+	rc = __clk_pre_reparent(c, p, &flags);
+	if (rc)
+		goto out;
+
+	rc = mux->ops->set_mux_sel(mux, sel);
+	if (rc)
+		goto set_fail;
+
+	old_parent = c->parent;
+	c->parent = p;
+	c->rate = clk_get_rate(p);
+	__clk_post_reparent(c, old_parent, &flags);
+
+	return 0;
+
+set_fail:
+	__clk_post_reparent(c, p, &flags);
+out:
+	return rc;
+}
+
+static long mux_round_rate(struct clk *c, unsigned long rate)
+{
+	struct mux_clk *mux = to_mux_clk(c);
+	int i;
+	unsigned long prate, rrate = 0;
+
+	for (i = 0; i < mux->num_parents; i++) {
+		prate = clk_round_rate(mux->parents[i].src, rate);
+		if (is_better_rate(rate, rrate, prate))
+			rrate = prate;
+	}
+	if (!rrate)
+		return -EINVAL;
+
+	return rrate;
+}
+
+static int mux_set_rate(struct clk *c, unsigned long rate)
+{
+	struct mux_clk *mux = to_mux_clk(c);
+	struct clk *new_parent = NULL;
+	int rc = 0, i;
+	unsigned long new_par_curr_rate;
+	unsigned long flags;
+
+	/*
+	 * Check if one of the possible parents is already at the requested
+	 * rate.
+	 */
+	for (i = 0; i < mux->num_parents && mux->try_get_rate; i++) {
+		struct clk *p = mux->parents[i].src;
+		if (p->rate == rate && clk_round_rate(p, rate) == rate) {
+			new_parent = mux->parents[i].src;
+			break;
+		}
+	}
+
+	for (i = 0; i < mux->num_parents && !(!i && new_parent); i++) {
+		if (clk_round_rate(mux->parents[i].src, rate) == rate) {
+			new_parent = mux->parents[i].src;
+			if (!mux->try_new_parent)
+				break;
+			if (mux->try_new_parent && new_parent != c->parent)
+				break;
+		}
+	}
+
+	if (new_parent == NULL)
+		return -EINVAL;
+
+	/*
+	 * Switch to safe parent since the old and new parent might be the
+	 * same and the parent might temporarily turn off while switching
+	 * rates. If the mux can switch between distinct sources safely
+	 * (indicated by try_new_parent), and the new source is not the current
+	 * parent, do not switch to the safe parent.
+	 */
+	if (mux->safe_sel >= 0 &&
+		!(mux->try_new_parent && (new_parent != c->parent))) {
+		/*
+		 * The safe parent might be a clock with multiple sources;
+		 * to select the "safe" source, set a safe frequency.
+		 */
+		if (mux->safe_freq) {
+			rc = clk_set_rate(mux->safe_parent, mux->safe_freq);
+			if (rc) {
+				pr_err("Failed to set safe rate on %s\n",
+					clk_name(mux->safe_parent));
+				return rc;
+			}
+		}
+
+		/*
+		 * Some mux implementations might switch to/from a low power
+		 * parent as part of their disable/enable ops. Grab the
+		 * enable lock to avoid racing with these implementations.
+		 */
+		spin_lock_irqsave(&c->lock, flags);
+		rc = mux->ops->set_mux_sel(mux, mux->safe_sel);
+		spin_unlock_irqrestore(&c->lock, flags);
+		if (rc)
+			return rc;
+
+	}
+
+	new_par_curr_rate = clk_get_rate(new_parent);
+	rc = clk_set_rate(new_parent, rate);
+	if (rc)
+		goto set_rate_fail;
+
+	rc = mux_set_parent(c, new_parent);
+	if (rc)
+		goto set_par_fail;
+
+	return 0;
+
+set_par_fail:
+	clk_set_rate(new_parent, new_par_curr_rate);
+set_rate_fail:
+	WARN(mux->ops->set_mux_sel(mux,
+		mux_parent_to_src_sel(mux, c->parent)),
+		"Set rate failed for %s. Also in bad state!\n", c->dbg_name);
+	return rc;
+}
+
+static int mux_enable(struct clk *c)
+{
+	struct mux_clk *mux = to_mux_clk(c);
+	if (mux->ops->enable)
+		return mux->ops->enable(mux);
+	return 0;
+}
+
+static void mux_disable(struct clk *c)
+{
+	struct mux_clk *mux = to_mux_clk(c);
+	if (mux->ops->disable)
+		return mux->ops->disable(mux);
+}
+
+static struct clk *mux_get_parent(struct clk *c)
+{
+	struct mux_clk *mux = to_mux_clk(c);
+	int sel = mux->ops->get_mux_sel(mux);
+	int i;
+
+	for (i = 0; i < mux->num_parents; i++) {
+		if (mux->parents[i].sel == sel)
+			return mux->parents[i].src;
+	}
+
+	/* Unfamiliar parent. */
+	return NULL;
+}
+
+static enum handoff mux_handoff(struct clk *c)
+{
+	struct mux_clk *mux = to_mux_clk(c);
+
+	c->rate = clk_get_rate(c->parent);
+	mux->safe_sel = mux_parent_to_src_sel(mux, mux->safe_parent);
+
+	if (mux->en_mask && mux->ops && mux->ops->is_enabled)
+		return mux->ops->is_enabled(mux)
+			? HANDOFF_ENABLED_CLK
+			: HANDOFF_DISABLED_CLK;
+
+	/*
+	 * If this function returns 'enabled' even when the clock downstream
+	 * of this clock is disabled, then handoff code will unnecessarily
+	 * enable the current parent of this clock. If this function always
+	 * returns 'disabled' and a clock downstream is on, the clock handoff
+	 * code will bump up the ref count for this clock and its current
+	 * parent as necessary. So, clocks without an actual HW gate can
+	 * always return disabled.
+	 */
+	return HANDOFF_DISABLED_CLK;
+}
+
+static void __iomem *mux_clk_list_registers(struct clk *c, int n,
+			struct clk_register_data **regs, u32 *size)
+{
+	struct mux_clk *mux = to_mux_clk(c);
+
+	if (mux->ops && mux->ops->list_registers)
+		return mux->ops->list_registers(mux, n, regs, size);
+
+	return ERR_PTR(-EINVAL);
+}
+
+struct clk_ops clk_ops_gen_mux = {
+	.enable = mux_enable,
+	.disable = mux_disable,
+	.set_parent = mux_set_parent,
+	.round_rate = mux_round_rate,
+	.set_rate = mux_set_rate,
+	.handoff = mux_handoff,
+	.get_parent = mux_get_parent,
+	.list_registers = mux_clk_list_registers,
+};
+
+/* ==================== Divider clock ==================== */
+
+static long __div_round_rate(struct div_data *data, unsigned long rate,
+	struct clk *parent, unsigned int *best_div, unsigned long *best_prate)
+{
+	unsigned int div, min_div, max_div, _best_div = 1;
+	unsigned long prate, _best_prate = 0, rrate = 0, req_prate, actual_rate;
+	unsigned int numer;
+
+	rate = max(rate, 1UL);
+
+	min_div = max(data->min_div, 1U);
+	max_div = min(data->max_div, (unsigned int) (ULONG_MAX));
+
+	/*
+	 * div values are doubled for half dividers.
+	 * Adjust for that by picking a numer of 2.
+	 */
+	numer = data->is_half_divider ? 2 : 1;
+
+	for (div = min_div; div <= max_div; div++) {
+		if (data->skip_odd_div && (div & 1))
+			if (!(data->allow_div_one && (div == 1)))
+				continue;
+		if (data->skip_even_div && !(div & 1))
+			continue;
+		req_prate = mult_frac(rate, div, numer);
+		prate = clk_round_rate(parent, req_prate);
+		if (IS_ERR_VALUE(prate))
+			break;
+
+		actual_rate = mult_frac(prate, numer, div);
+		if (is_better_rate(rate, rrate, actual_rate)) {
+			rrate = actual_rate;
+			_best_div = div;
+			_best_prate = prate;
+		}
+
+		/*
+		 * Trying higher dividers is only going to ask the parent for
+		 * a higher rate. If it can't even output a rate higher than
+		 * the one we request for this divider, the parent is not
+		 * going to be able to output an even higher rate required
+		 * for a higher divider. So, stop trying higher dividers.
+		 */
+		if (actual_rate < rate)
+			break;
+
+		if (rrate <= rate + data->rate_margin)
+			break;
+	}
+
+	if (!rrate)
+		return -EINVAL;
+	if (best_div)
+		*best_div = _best_div;
+	if (best_prate)
+		*best_prate = _best_prate;
+
+	return rrate;
+}
+
+static long div_round_rate(struct clk *c, unsigned long rate)
+{
+	struct div_clk *d = to_div_clk(c);
+
+	return __div_round_rate(&d->data, rate, c->parent, NULL, NULL);
+}
+
+static int _find_safe_div(struct clk *c, unsigned long rate)
+{
+	struct div_clk *d = to_div_clk(c);
+	struct div_data *data = &d->data;
+	unsigned long fast = max(rate, c->rate);
+	unsigned int numer = data->is_half_divider ? 2 : 1;
+	int i, safe_div = 0;
+
+	if (!d->safe_freq)
+		return 0;
+
+	/* Find the max safe freq that is lesser than fast */
+	for (i = data->max_div; i >= data->min_div; i--)
+		if (mult_frac(d->safe_freq, numer, i) <= fast)
+			safe_div = i;
+
+	return safe_div ?: -EINVAL;
+}
+
+static int div_set_rate(struct clk *c, unsigned long rate)
+{
+	struct div_clk *d = to_div_clk(c);
+	int safe_div, div, rc = 0;
+	long rrate, old_prate, new_prate;
+	struct div_data *data = &d->data;
+
+	rrate = __div_round_rate(data, rate, c->parent, &div, &new_prate);
+	if (rrate < rate || rrate > rate + data->rate_margin)
+		return -EINVAL;
+
+	/*
+	 * For fixed divider clock we don't want to return an error if the
+	 * requested rate matches the achievable rate. So, don't check for
+	 * !d->ops and return an error. __div_round_rate() ensures div ==
+	 * d->div if !d->ops.
+	 */
+
+	safe_div = _find_safe_div(c, rate);
+	if (d->safe_freq && safe_div < 0) {
+		pr_err("No safe div on %s for transitioning from %lu to %lu\n",
+			c->dbg_name, c->rate, rate);
+		return -EINVAL;
+	}
+
+	safe_div = max(safe_div, div);
+
+	if (safe_div > data->div) {
+		rc = d->ops->set_div(d, safe_div);
+		if (rc) {
+			pr_err("Failed to set div %d on %s\n", safe_div,
+				c->dbg_name);
+			return rc;
+		}
+	}
+
+	old_prate = clk_get_rate(c->parent);
+	rc = clk_set_rate(c->parent, new_prate);
+	if (rc)
+		goto set_rate_fail;
+
+	if (div < data->div)
+		rc = d->ops->set_div(d, div);
+	else if (div < safe_div)
+		rc = d->ops->set_div(d, div);
+	if (rc)
+		goto div_dec_fail;
+
+	data->div = div;
+
+	return 0;
+
+div_dec_fail:
+	WARN(clk_set_rate(c->parent, old_prate),
+		"Set rate failed for %s. Also in bad state!\n", c->dbg_name);
+set_rate_fail:
+	if (safe_div > data->div)
+		WARN(d->ops->set_div(d, data->div),
+			"Set rate failed for %s. Also in bad state!\n",
+			c->dbg_name);
+	return rc;
+}
+
+static int div_enable(struct clk *c)
+{
+	struct div_clk *d = to_div_clk(c);
+	if (d->ops && d->ops->enable)
+		return d->ops->enable(d);
+	return 0;
+}
+
+static void div_disable(struct clk *c)
+{
+	struct div_clk *d = to_div_clk(c);
+	if (d->ops && d->ops->disable)
+		return d->ops->disable(d);
+}
+
+static enum handoff div_handoff(struct clk *c)
+{
+	struct div_clk *d = to_div_clk(c);
+	unsigned int div = d->data.div;
+
+	if (d->ops && d->ops->get_div)
+		div = max(d->ops->get_div(d), 1);
+	div = max(div, 1U);
+	c->rate = clk_get_rate(c->parent) / div;
+
+	if (!d->ops || !d->ops->set_div)
+		d->data.min_div = d->data.max_div = div;
+	d->data.div = div;
+
+	if (d->en_mask && d->ops && d->ops->is_enabled)
+		return d->ops->is_enabled(d)
+			? HANDOFF_ENABLED_CLK
+			: HANDOFF_DISABLED_CLK;
+
+	/*
+	 * If this function returns 'enabled' even when the clock downstream
+	 * of this clock is disabled, then handoff code will unnecessarily
+	 * enable the current parent of this clock. If this function always
+	 * returns 'disabled' and a clock downstream is on, the clock handoff
+	 * code will bump up the ref count for this clock and its current
+	 * parent as necessary. So, clocks without an actual HW gate can
+	 * always return disabled.
+	 */
+	return HANDOFF_DISABLED_CLK;
+}
+
+static void __iomem *div_clk_list_registers(struct clk *c, int n,
+			struct clk_register_data **regs, u32 *size)
+{
+	struct div_clk *d = to_div_clk(c);
+
+	if (d->ops && d->ops->list_registers)
+		return d->ops->list_registers(d, n, regs, size);
+
+	return ERR_PTR(-EINVAL);
+}
+
+struct clk_ops clk_ops_div = {
+	.enable = div_enable,
+	.disable = div_disable,
+	.round_rate = div_round_rate,
+	.set_rate = div_set_rate,
+	.handoff = div_handoff,
+	.list_registers = div_clk_list_registers,
+};
+
+static long __slave_div_round_rate(struct clk *c, unsigned long rate,
+					int *best_div)
+{
+	struct div_clk *d = to_div_clk(c);
+	unsigned int div, min_div, max_div;
+	long p_rate;
+
+	rate = max(rate, 1UL);
+
+	min_div = d->data.min_div;
+	max_div = d->data.max_div;
+
+	p_rate = clk_get_rate(c->parent);
+	div = DIV_ROUND_CLOSEST(p_rate, rate);
+	div = max(div, min_div);
+	div = min(div, max_div);
+	if (best_div)
+		*best_div = div;
+
+	if (d->data.is_half_divider)
+		p_rate *= 2;
+
+	return p_rate / div;
+}
+
+static long slave_div_round_rate(struct clk *c, unsigned long rate)
+{
+	return __slave_div_round_rate(c, rate, NULL);
+}
+
+static int slave_div_set_rate(struct clk *c, unsigned long rate)
+{
+	struct div_clk *d = to_div_clk(c);
+	int div, rc = 0;
+	long rrate;
+
+	rrate = __slave_div_round_rate(c, rate, &div);
+	if (rrate != rate)
+		return -EINVAL;
+
+	if (div == d->data.div)
+		return 0;
+
+	/*
+	 * For fixed divider clock we don't want to return an error if the
+	 * requested rate matches the achievable rate. So, don't check for
+	 * !d->ops and return an error. __slave_div_round_rate() ensures
+	 * div == d->data.div if !d->ops.
+	 */
+	rc = d->ops->set_div(d, div);
+	if (rc)
+		return rc;
+
+	d->data.div = div;
+
+	return 0;
+}
+
+static unsigned long slave_div_get_rate(struct clk *c)
+{
+	struct div_clk *d = to_div_clk(c);
+	unsigned long rate;
+
+	if (!d->data.div)
+		return 0;
+
+	rate = clk_get_rate(c->parent) / d->data.div;
+	if (d->data.is_half_divider)
+		rate *= 2;
+
+	return rate;
+}
+
+struct clk_ops clk_ops_slave_div = {
+	.enable = div_enable,
+	.disable = div_disable,
+	.round_rate = slave_div_round_rate,
+	.set_rate = slave_div_set_rate,
+	.get_rate = slave_div_get_rate,
+	.handoff = div_handoff,
+	.list_registers = div_clk_list_registers,
+};
+
+
+/**
+ * External clock
+ * Some clock controllers have input clock signal that come from outside the
+ * clock controller. That input clock signal might then be used as a source for
+ * several clocks inside the clock controller. This external clock
+ * implementation models this input clock signal by just passing on the requests
+ * to the clock's parent, the original external clock source. The driver for the
+ * clock controller should clk_get() the original external clock in the probe
+ * function and set is as a parent to this external clock..
+ */
+
+long parent_round_rate(struct clk *c, unsigned long rate)
+{
+	return clk_round_rate(c->parent, rate);
+}
+
+int parent_set_rate(struct clk *c, unsigned long rate)
+{
+	return clk_set_rate(c->parent, rate);
+}
+
+unsigned long parent_get_rate(struct clk *c)
+{
+	return clk_get_rate(c->parent);
+}
+
+static int ext_set_parent(struct clk *c, struct clk *p)
+{
+	return clk_set_parent(c->parent, p);
+}
+
+static struct clk *ext_get_parent(struct clk *c)
+{
+	struct ext_clk *ext = to_ext_clk(c);
+
+	if (!IS_ERR_OR_NULL(c->parent))
+		return c->parent;
+	return clk_get(ext->dev, ext->clk_id);
+}
+
+static enum handoff ext_handoff(struct clk *c)
+{
+	c->rate = clk_get_rate(c->parent);
+	/* Similar reasoning applied in div_handoff, see comment there. */
+	return HANDOFF_DISABLED_CLK;
+}
+
+struct clk_ops clk_ops_ext = {
+	.handoff = ext_handoff,
+	.round_rate = parent_round_rate,
+	.set_rate = parent_set_rate,
+	.get_rate = parent_get_rate,
+	.set_parent = ext_set_parent,
+	.get_parent = ext_get_parent,
+};
+
+static void *ext_clk_dt_parser(struct device *dev, struct device_node *np)
+{
+	struct ext_clk *ext;
+	const char *str;
+	int rc;
+
+	ext = devm_kzalloc(dev, sizeof(*ext), GFP_KERNEL);
+	if (!ext) {
+		dev_err(dev, "memory allocation failure\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	ext->dev = dev;
+	rc = of_property_read_string(np, "qcom,clock-names", &str);
+	if (!rc)
+		ext->clk_id = (void *)str;
+
+	ext->c.ops = &clk_ops_ext;
+	return msmclk_generic_clk_init(dev, np, &ext->c);
+}
+MSMCLK_PARSER(ext_clk_dt_parser, "qcom,ext-clk", 0);
+
+/* ==================== Mux_div clock ==================== */
+
+static int mux_div_clk_enable(struct clk *c)
+{
+	struct mux_div_clk *md = to_mux_div_clk(c);
+
+	if (md->ops->enable)
+		return md->ops->enable(md);
+	return 0;
+}
+
+static void mux_div_clk_disable(struct clk *c)
+{
+	struct mux_div_clk *md = to_mux_div_clk(c);
+
+	if (md->ops->disable)
+		return md->ops->disable(md);
+}
+
+static long __mux_div_round_rate(struct clk *c, unsigned long rate,
+	struct clk **best_parent, int *best_div, unsigned long *best_prate)
+{
+	struct mux_div_clk *md = to_mux_div_clk(c);
+	unsigned int i;
+	unsigned long rrate, best = 0, _best_div = 0, _best_prate = 0;
+	struct clk *_best_parent = 0;
+
+	if (md->try_get_rate) {
+		for (i = 0; i < md->num_parents; i++) {
+			int divider;
+			unsigned long p_rate;
+
+			rrate = __div_round_rate(&md->data, rate,
+						md->parents[i].src,
+						&divider, &p_rate);
+			/*
+			 * Check if one of the possible parents is already at
+			 * the requested rate.
+			 */
+			if (p_rate == clk_get_rate(md->parents[i].src)
+					&& rrate == rate) {
+				best = rrate;
+				_best_div = divider;
+				_best_prate = p_rate;
+				_best_parent = md->parents[i].src;
+				goto end;
+			}
+		}
+	}
+
+	for (i = 0; i < md->num_parents; i++) {
+		int div;
+		unsigned long prate;
+
+		rrate = __div_round_rate(&md->data, rate, md->parents[i].src,
+				&div, &prate);
+
+		if (is_better_rate(rate, best, rrate)) {
+			best = rrate;
+			_best_div = div;
+			_best_prate = prate;
+			_best_parent = md->parents[i].src;
+		}
+
+		if (rate <= rrate && rrate <= rate + md->data.rate_margin)
+			break;
+	}
+end:
+	if (best_div)
+		*best_div = _best_div;
+	if (best_prate)
+		*best_prate = _best_prate;
+	if (best_parent)
+		*best_parent = _best_parent;
+
+	if (best)
+		return best;
+	return -EINVAL;
+}
+
+static long mux_div_clk_round_rate(struct clk *c, unsigned long rate)
+{
+	return __mux_div_round_rate(c, rate, NULL, NULL, NULL);
+}
+
+/* requires enable lock to be held */
+static int __set_src_div(struct mux_div_clk *md, struct clk *parent, u32 div)
+{
+	u32 rc = 0, src_sel;
+
+	src_sel = parent_to_src_sel(md->parents, md->num_parents, parent);
+	/*
+	 * If the clock is disabled, don't change to the new settings until
+	 * the clock is reenabled
+	 */
+	if (md->c.count)
+		rc = md->ops->set_src_div(md, src_sel, div);
+	if (!rc) {
+		md->data.div = div;
+		md->src_sel = src_sel;
+	}
+
+	return rc;
+}
+
+static int set_src_div(struct mux_div_clk *md, struct clk *parent, u32 div)
+{
+	unsigned long flags;
+	u32 rc;
+
+	spin_lock_irqsave(&md->c.lock, flags);
+	rc = __set_src_div(md, parent, div);
+	spin_unlock_irqrestore(&md->c.lock, flags);
+
+	return rc;
+}
+
+/* Must be called after handoff to ensure parent clock rates are initialized */
+static int safe_parent_init_once(struct clk *c)
+{
+	unsigned long rrate;
+	u32 best_div;
+	struct clk *best_parent;
+	struct mux_div_clk *md = to_mux_div_clk(c);
+
+	if (IS_ERR(md->safe_parent))
+		return -EINVAL;
+	if (!md->safe_freq || md->safe_parent)
+		return 0;
+
+	rrate = __mux_div_round_rate(c, md->safe_freq, &best_parent,
+			&best_div, NULL);
+
+	if (rrate == md->safe_freq) {
+		md->safe_div = best_div;
+		md->safe_parent = best_parent;
+	} else {
+		md->safe_parent = ERR_PTR(-EINVAL);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int mux_div_clk_set_rate(struct clk *c, unsigned long rate)
+{
+	struct mux_div_clk *md = to_mux_div_clk(c);
+	unsigned long flags, rrate;
+	unsigned long new_prate, new_parent_orig_rate;
+	struct clk *old_parent, *new_parent;
+	u32 new_div, old_div;
+	int rc;
+
+	rc = safe_parent_init_once(c);
+	if (rc)
+		return rc;
+
+	rrate = __mux_div_round_rate(c, rate, &new_parent, &new_div,
+							&new_prate);
+	if (rrate < rate || rrate > rate + md->data.rate_margin)
+		return -EINVAL;
+
+	old_parent = c->parent;
+	old_div = md->data.div;
+
+	/* Refer to the description of safe_freq in clock-generic.h */
+	if (md->safe_freq)
+		rc = set_src_div(md, md->safe_parent, md->safe_div);
+
+	else if (new_parent == old_parent && new_div >= old_div) {
+		/*
+		 * If both the parent_rate and divider changes, there may be an
+		 * intermediate frequency generated. Ensure this intermediate
+		 * frequency is less than both the new rate and previous rate.
+		 */
+		rc = set_src_div(md, old_parent, new_div);
+	}
+	if (rc)
+		return rc;
+
+	new_parent_orig_rate = clk_get_rate(new_parent);
+	rc = clk_set_rate(new_parent, new_prate);
+	if (rc) {
+		pr_err("failed to set %s to %ld\n",
+			clk_name(new_parent), new_prate);
+		goto err_set_rate;
+	}
+
+	rc = __clk_pre_reparent(c, new_parent, &flags);
+	if (rc)
+		goto err_pre_reparent;
+
+	/* Set divider and mux src atomically */
+	rc = __set_src_div(md, new_parent, new_div);
+	if (rc)
+		goto err_set_src_div;
+
+	c->parent = new_parent;
+
+	__clk_post_reparent(c, old_parent, &flags);
+	return 0;
+
+err_set_src_div:
+	/* Not switching to new_parent, so disable it */
+	__clk_post_reparent(c, new_parent, &flags);
+err_pre_reparent:
+	rc = clk_set_rate(new_parent, new_parent_orig_rate);
+	WARN(rc, "%s: error changing new_parent (%s) rate back to %ld\n",
+		clk_name(c), clk_name(new_parent), new_parent_orig_rate);
+err_set_rate:
+	rc = set_src_div(md, old_parent, old_div);
+	WARN(rc, "%s: error changing back to original div (%d) and parent (%s)\n",
+		clk_name(c), old_div, clk_name(old_parent));
+
+	return rc;
+}
+
+static struct clk *mux_div_clk_get_parent(struct clk *c)
+{
+	struct mux_div_clk *md = to_mux_div_clk(c);
+	u32 i, div, src_sel;
+
+	md->ops->get_src_div(md, &src_sel, &div);
+
+	md->data.div = div;
+	md->src_sel = src_sel;
+
+	for (i = 0; i < md->num_parents; i++) {
+		if (md->parents[i].sel == src_sel)
+			return md->parents[i].src;
+	}
+
+	return NULL;
+}
+
+static enum handoff mux_div_clk_handoff(struct clk *c)
+{
+	struct mux_div_clk *md = to_mux_div_clk(c);
+	unsigned long parent_rate;
+	unsigned int numer;
+
+	parent_rate = clk_get_rate(c->parent);
+	/*
+	 * div values are doubled for half dividers.
+	 * Adjust for that by picking a numer of 2.
+	 */
+	numer = md->data.is_half_divider ? 2 : 1;
+
+	if (md->data.div) {
+		c->rate = mult_frac(parent_rate, numer, md->data.div);
+	} else {
+		c->rate = 0;
+		return HANDOFF_DISABLED_CLK;
+	}
+
+	if (md->en_mask && md->ops && md->ops->is_enabled)
+		return md->ops->is_enabled(md)
+			? HANDOFF_ENABLED_CLK
+			: HANDOFF_DISABLED_CLK;
+
+	/*
+	 * If this function returns 'enabled' even when the clock downstream
+	 * of this clock is disabled, then handoff code will unnecessarily
+	 * enable the current parent of this clock. If this function always
+	 * returns 'disabled' and a clock downstream is on, the clock handoff
+	 * code will bump up the ref count for this clock and its current
+	 * parent as necessary. So, clocks without an actual HW gate can
+	 * always return disabled.
+	 */
+	return HANDOFF_DISABLED_CLK;
+}
+
+static void __iomem *mux_div_clk_list_registers(struct clk *c, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct mux_div_clk *md = to_mux_div_clk(c);
+
+	if (md->ops && md->ops->list_registers)
+		return md->ops->list_registers(md, n , regs, size);
+
+	return ERR_PTR(-EINVAL);
+}
+
+struct clk_ops clk_ops_mux_div_clk = {
+	.enable = mux_div_clk_enable,
+	.disable = mux_div_clk_disable,
+	.set_rate = mux_div_clk_set_rate,
+	.round_rate = mux_div_clk_round_rate,
+	.get_parent = mux_div_clk_get_parent,
+	.handoff = mux_div_clk_handoff,
+	.list_registers = mux_div_clk_list_registers,
+};
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./clock-gpu-8998.c linux-4.4.115-fbx/drivers/clk/msm/clock-gpu-8998.c
--- linux-4.4.115-fbx/drivers/clk/msm./clock-gpu-8998.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/clock-gpu-8998.c	2019-01-22 16:16:23.015241987 +0100
@@ -0,0 +1,734 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/clk/msm-clock-generic.h>
+
+#include <soc/qcom/clock-local2.h>
+#include <soc/qcom/clock-voter.h>
+#include <soc/qcom/clock-pll.h>
+#include <soc/qcom/clock-alpha-pll.h>
+
+#include <dt-bindings/clock/msm-clocks-8998.h>
+#include <dt-bindings/clock/msm-clocks-hwio-8998.h>
+
+#include "vdd-level-8998.h"
+
+static void __iomem *virt_base;
+static void __iomem *virt_base_gfx;
+
+#define gpucc_cxo_clk_source_val		0
+#define gpucc_gpll0_source_val			5
+#define gpu_pll0_pll_out_even_source_val	1
+#define gpu_pll0_pll_out_odd_source_val		2
+
+#define SW_COLLAPSE_MASK			BIT(0)
+#define GPU_CX_GDSCR_OFFSET			0x1004
+#define GPU_GX_GDSCR_OFFSET			0x1094
+#define CRC_SID_FSM_OFFSET			0x10A0
+#define CRC_MND_CFG_OFFSET			0x10A4
+
+#define F(f, s, div, m, n) \
+	{ \
+		.freq_hz = (f), \
+		.src_clk = &s.c, \
+		.m_val = (m), \
+		.n_val = ~((n)-(m)) * !!(n), \
+		.d_val = ~(n),\
+		.div_src_val = BVAL(4, 0, (int)(2*(div) - 1)) \
+			| BVAL(10, 8, s##_source_val), \
+	}
+
+#define F_SLEW(f, s_f, s, div, m, n) \
+	{ \
+		.freq_hz = (f), \
+		.src_freq = (s_f), \
+		.src_clk = &s.c, \
+		.m_val = (m), \
+		.n_val = ~((n)-(m)) * !!(n), \
+		.d_val = ~(n),\
+		.div_src_val = BVAL(4, 0, (int)(2*(div) - 1)) \
+			| BVAL(10, 8, s##_source_val), \
+	}
+
+static struct alpha_pll_masks pll_masks_p = {
+	.lock_mask = BIT(31),
+	.active_mask = BIT(30),
+	.update_mask = BIT(22),
+	.output_mask = 0xf,
+	.post_div_mask = BM(15, 8),
+};
+
+static DEFINE_VDD_REGULATORS(vdd_dig, VDD_DIG_NUM, 1, vdd_corner, NULL);
+DEFINE_VDD_REGS_INIT(vdd_gpucc, 2);
+DEFINE_VDD_REGS_INIT(vdd_gpucc_mx, 1);
+
+DEFINE_EXT_CLK(gpucc_xo, NULL);
+DEFINE_EXT_CLK(gpucc_gpll0, NULL);
+
+static struct branch_clk gpucc_cxo_clk = {
+	.cbcr_reg = GPUCC_CXO_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gpucc_cxo_clk",
+		.parent = &gpucc_xo.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gpucc_cxo_clk.c),
+	},
+};
+
+static struct alpha_pll_clk gpu_pll0_pll = {
+	.masks = &pll_masks_p,
+	.base = &virt_base_gfx,
+	.offset = GPUCC_GPU_PLL0_PLL_MODE,
+	.enable_config = 0x1,
+	.is_fabia = true,
+	.c = {
+		.rate = 0,
+		.parent = &gpucc_xo.c,
+		.dbg_name = "gpu_pll0_pll",
+		.ops = &clk_ops_fabia_alpha_pll,
+		VDD_GPU_PLL_FMAX_MAP1(MIN, 1300000500),
+		CLK_INIT(gpu_pll0_pll.c),
+	},
+};
+
+static struct div_clk gpu_pll0_pll_out_even = {
+	.base = &virt_base_gfx,
+	.offset = GPUCC_GPU_PLL0_USER_CTL_MODE,
+	.mask = 0xf,
+	.shift = 8,
+	.data = {
+		.max_div = 8,
+		.min_div = 1,
+		.skip_odd_div = true,
+		.allow_div_one = true,
+		.rate_margin = 500,
+	},
+	.ops = &postdiv_reg_ops,
+	.c = {
+		.parent = &gpu_pll0_pll.c,
+		.dbg_name = "gpu_pll0_pll_out_even",
+		.ops = &clk_ops_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(gpu_pll0_pll_out_even.c),
+	},
+};
+
+static struct div_clk gpu_pll0_pll_out_odd = {
+	.base = &virt_base_gfx,
+	.offset = GPUCC_GPU_PLL0_USER_CTL_MODE,
+	.mask = 0xf,
+	.shift = 12,
+	.data = {
+		.max_div = 7,
+		.min_div = 3,
+		.skip_even_div = true,
+		.rate_margin = 500,
+	},
+	.ops = &postdiv_reg_ops,
+	.c = {
+		.parent = &gpu_pll0_pll.c,
+		.dbg_name = "gpu_pll0_pll_out_odd",
+		.ops = &clk_ops_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(gpu_pll0_pll_out_odd.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_gfx3d_clk_src[] = {
+	F_SLEW( 171000000,  342000000, gpu_pll0_pll_out_even,    1, 0, 0),
+	F_SLEW( 251000000,  502000000, gpu_pll0_pll_out_even,    1, 0, 0),
+	F_SLEW( 332000000,  664000000, gpu_pll0_pll_out_even,    1, 0, 0),
+	F_SLEW( 403000000,  806000000, gpu_pll0_pll_out_even,    1, 0, 0),
+	F_SLEW( 504000000, 1008000000, gpu_pll0_pll_out_even,    1, 0, 0),
+	F_SLEW( 650000000, 1300000000, gpu_pll0_pll_out_even,    1, 0, 0),
+	F_END
+};
+
+static struct clk_freq_tbl ftbl_gfx3d_clk_src_v2[] = {
+	F_SLEW( 180000000,  360000000, gpu_pll0_pll_out_even,    1, 0, 0),
+	F_SLEW( 257000000,  514000000, gpu_pll0_pll_out_even,    1, 0, 0),
+	F_SLEW( 342000000,  684000000, gpu_pll0_pll_out_even,    1, 0, 0),
+	F_SLEW( 414000000,  828000000, gpu_pll0_pll_out_even,    1, 0, 0),
+	F_SLEW( 515000000, 1030000000, gpu_pll0_pll_out_even,    1, 0, 0),
+	F_SLEW( 596000000, 1192000000, gpu_pll0_pll_out_even,    1, 0, 0),
+	F_SLEW( 670000000, 1340000000, gpu_pll0_pll_out_even,    1, 0, 0),
+	F_SLEW( 710000000, 1420000000, gpu_pll0_pll_out_even,    1, 0, 0),
+	F_END
+};
+
+static struct clk_freq_tbl ftbl_gfx3d_clk_src_vq[] = {
+	F_SLEW( 180000000,  360000000, gpu_pll0_pll_out_even,    1, 0, 0),
+	F_SLEW( 265000000,  530000000, gpu_pll0_pll_out_even,    1, 0, 0),
+	F_SLEW( 358000000,  716000000, gpu_pll0_pll_out_even,    1, 0, 0),
+	F_SLEW( 434000000,  868000000, gpu_pll0_pll_out_even,    1, 0, 0),
+	F_SLEW( 542000000, 1084000000, gpu_pll0_pll_out_even,    1, 0, 0),
+	F_SLEW( 630000000, 1260000000, gpu_pll0_pll_out_even,    1, 0, 0),
+	F_SLEW( 700000000, 1400000000, gpu_pll0_pll_out_even,    1, 0, 0),
+	F_SLEW( 750000000, 1500000000, gpu_pll0_pll_out_even,    1, 0, 0),
+	F_END
+};
+
+static struct rcg_clk gfx3d_clk_src = {
+	.cmd_rcgr_reg = GPUCC_GFX3D_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_gfx3d_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.force_enable_rcgr = true,
+	.base = &virt_base_gfx,
+	.c = {
+		.dbg_name = "gfx3d_clk_src",
+		.ops = &clk_ops_rcg,
+		.vdd_class = &vdd_gpucc,
+		CLK_INIT(gfx3d_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_rbbmtimer_clk_src[] = {
+	F( 19200000, gpucc_cxo_clk,    1,    0,     0),
+	F_END
+};
+
+static struct rcg_clk rbbmtimer_clk_src = {
+	.cmd_rcgr_reg = GPUCC_RBBMTIMER_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_rbbmtimer_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "rbbmtimer_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP1(MIN, 19200000),
+		CLK_INIT(rbbmtimer_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_gfx3d_isense_clk_src[] = {
+	F(  19200000, gpucc_cxo_clk,    1,    0,     0),
+	F(  40000000,   gpucc_gpll0,   15,    0,     0),
+	F( 200000000,   gpucc_gpll0,    3,    0,     0),
+	F( 300000000,   gpucc_gpll0,    2,    0,     0),
+	F_END
+};
+
+static struct rcg_clk gfx3d_isense_clk_src = {
+	.cmd_rcgr_reg = GPUCC_GFX3D_ISENSE_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_gfx3d_isense_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gfx3d_isense_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP4(MIN, 19200000, LOWER, 40000000,
+				LOW, 200000000, HIGH, 300000000),
+		CLK_INIT(gfx3d_isense_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_rbcpr_clk_src[] = {
+	F( 19200000, gpucc_cxo_clk,    1,    0,     0),
+	F( 50000000,   gpucc_gpll0,   12,    0,     0),
+	F_END
+};
+
+static struct rcg_clk rbcpr_clk_src = {
+	.cmd_rcgr_reg = GPUCC_RBCPR_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_rbcpr_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "rbcpr_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP2(MIN, 19200000, NOMINAL, 50000000),
+		CLK_INIT(rbcpr_clk_src.c),
+	},
+};
+
+static struct branch_clk gpucc_gfx3d_clk = {
+	.cbcr_reg = GPUCC_GFX3D_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base_gfx,
+	.c = {
+		.dbg_name = "gpucc_gfx3d_clk",
+		.parent = &gfx3d_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gpucc_gfx3d_clk.c),
+	},
+};
+
+static struct branch_clk gpucc_rbbmtimer_clk = {
+	.cbcr_reg = GPUCC_RBBMTIMER_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gpucc_rbbmtimer_clk",
+		.parent = &rbbmtimer_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gpucc_rbbmtimer_clk.c),
+	},
+};
+
+static struct branch_clk gpucc_gfx3d_isense_clk = {
+	.cbcr_reg = GPUCC_GFX3D_ISENSE_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gpucc_gfx3d_isense_clk",
+		.parent = &gfx3d_isense_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gpucc_gfx3d_isense_clk.c),
+	},
+};
+
+static struct branch_clk gpucc_rbcpr_clk = {
+	.cbcr_reg = GPUCC_RBCPR_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "gpucc_rbcpr_clk",
+		.parent = &rbcpr_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gpucc_rbcpr_clk.c),
+	},
+};
+
+static struct fixed_clk gpucc_mx_clk = {
+	.c = {
+		.dbg_name = "gpucc_mx_clk",
+		.vdd_class = &vdd_gpucc_mx,
+		.ops = &clk_ops_dummy,
+		CLK_INIT(gpucc_mx_clk.c),
+	},
+};
+
+static int of_get_fmax_vdd_class(struct platform_device *pdev, struct clk *c,
+								char *prop_name)
+{
+	struct device_node *of = pdev->dev.of_node;
+	int prop_len, i, j;
+	struct clk_vdd_class *vdd = c->vdd_class;
+	int num = vdd->num_regulators + 1;
+	u32 *array;
+
+	if (!of_find_property(of, prop_name, &prop_len)) {
+		dev_err(&pdev->dev, "missing %s\n", prop_name);
+		return -EINVAL;
+	}
+
+	prop_len /= sizeof(u32);
+	if (prop_len % num) {
+		dev_err(&pdev->dev, "bad length %d\n", prop_len);
+		return -EINVAL;
+	}
+
+	prop_len /= num;
+	vdd->level_votes = devm_kzalloc(&pdev->dev, prop_len * sizeof(int),
+					GFP_KERNEL);
+	if (!vdd->level_votes)
+		return -ENOMEM;
+
+	vdd->vdd_uv = devm_kzalloc(&pdev->dev,
+			prop_len * sizeof(int) * (num - 1), GFP_KERNEL);
+	if (!vdd->vdd_uv)
+		return -ENOMEM;
+
+	c->fmax = devm_kzalloc(&pdev->dev, prop_len * sizeof(unsigned long),
+					GFP_KERNEL);
+	if (!c->fmax)
+		return -ENOMEM;
+
+	array = devm_kzalloc(&pdev->dev,
+			prop_len * sizeof(u32) * num, GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	of_property_read_u32_array(of, prop_name, array, prop_len * num);
+	for (i = 0; i < prop_len; i++) {
+		c->fmax[i] = array[num * i];
+		for (j = 1; j < num; j++) {
+			vdd->vdd_uv[(num - 1) * i + (j - 1)] =
+						array[num * i + j];
+		}
+	}
+
+	devm_kfree(&pdev->dev, array);
+	vdd->num_levels = prop_len;
+	vdd->cur_level = prop_len;
+	c->num_fmax = prop_len;
+	return 0;
+}
+
+static struct mux_clk gpucc_gcc_dbg_clk = {
+	.ops = &mux_reg_ops,
+	.en_mask = BIT(16),
+	.mask = 0x3FF,
+	.offset = GPUCC_DEBUG_CLK_CTL,
+	.en_offset = GPUCC_DEBUG_CLK_CTL,
+	.base = &virt_base,
+	MUX_SRC_LIST(
+		{ &gpucc_rbcpr_clk.c, 0x0003 },
+		{ &gpucc_rbbmtimer_clk.c, 0x0005 },
+		{ &gpucc_gfx3d_isense_clk.c, 0x000a },
+	),
+	.c = {
+		.dbg_name = "gpucc_gcc_dbg_clk",
+		.ops = &clk_ops_gen_mux,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(gpucc_gcc_dbg_clk.c),
+	},
+};
+
+static void enable_gfx_crc(void)
+{
+	u32 regval;
+
+	/* Set graphics clock at a safe frequency */
+	clk_set_rate(&gpucc_gfx3d_clk.c, gfx3d_clk_src.c.fmax[2]);
+	/* Turn on the GPU_CX GDSC */
+	regval = readl_relaxed(virt_base_gfx + GPU_CX_GDSCR_OFFSET);
+	regval &= ~SW_COLLAPSE_MASK;
+	writel_relaxed(regval, virt_base_gfx + GPU_CX_GDSCR_OFFSET);
+	/* Wait for 10usecs to let the GDSC turn ON */
+	mb();
+	udelay(10);
+	/* Turn on the Graphics rail */
+	if (regulator_enable(vdd_gpucc.regulator[0]))
+		pr_warn("Enabling the graphics rail during CRC sequence failed!\n");
+	/* Turn on the GPU_GX GDSC */
+	writel_relaxed(0x1, virt_base_gfx + GPU_GX_BCR);
+	/*
+	 * BLK_ARES should be kept asserted for 1us before being de-asserted.
+	 */
+	wmb();
+	udelay(1);
+	writel_relaxed(0x0, virt_base_gfx + GPU_GX_BCR);
+	regval = readl_relaxed(virt_base_gfx + GPUCC_GX_DOMAIN_MISC);
+	regval |= BIT(4);
+	writel_relaxed(regval, virt_base_gfx + GPUCC_GX_DOMAIN_MISC);
+	/* Keep reset asserted for at-least 1us before continuing. */
+	wmb();
+	udelay(1);
+	regval &= ~BIT(4);
+	writel_relaxed(regval, virt_base_gfx + GPUCC_GX_DOMAIN_MISC);
+	/* Make sure GMEM_RESET is de-asserted before continuing. */
+	wmb();
+	regval &= ~BIT(0);
+	writel_relaxed(regval, virt_base_gfx + GPUCC_GX_DOMAIN_MISC);
+	/* All previous writes should be done at this point */
+	wmb();
+	regval = readl_relaxed(virt_base_gfx + GPU_GX_GDSCR_OFFSET);
+	regval &= ~SW_COLLAPSE_MASK;
+	writel_relaxed(regval, virt_base_gfx + GPU_GX_GDSCR_OFFSET);
+	/* Wait for 10usecs to let the GDSC turn ON */
+	mb();
+	udelay(10);
+	/* Enable the graphics clock */
+	clk_prepare_enable(&gpucc_gfx3d_clk.c);
+	/* Enabling MND RC in Bypass mode */
+	writel_relaxed(0x00015010, virt_base_gfx + CRC_MND_CFG_OFFSET);
+	writel_relaxed(0x00800000, virt_base_gfx + CRC_SID_FSM_OFFSET);
+	/* Wait for 16 cycles before continuing */
+	udelay(1);
+	clk_set_rate(&gpucc_gfx3d_clk.c,
+			gfx3d_clk_src.c.fmax[gfx3d_clk_src.c.num_fmax - 1]);
+	/* Disable the graphics clock */
+	clk_disable_unprepare(&gpucc_gfx3d_clk.c);
+	/* Turn off the gpu_cx and gpu_gx GDSCs */
+	regval = readl_relaxed(virt_base_gfx + GPU_GX_GDSCR_OFFSET);
+	regval |= SW_COLLAPSE_MASK;
+	writel_relaxed(regval, virt_base_gfx + GPU_GX_GDSCR_OFFSET);
+	/* Write to disable GX GDSC should go through before continuing */
+	wmb();
+	regval = readl_relaxed(virt_base_gfx + GPUCC_GX_DOMAIN_MISC);
+	regval |= BIT(0);
+	writel_relaxed(regval, virt_base_gfx + GPUCC_GX_DOMAIN_MISC);
+	/* Make sure GMEM_CLAMP_IO is asserted before continuing. */
+	wmb();
+	regulator_disable(vdd_gpucc.regulator[0]);
+	regval = readl_relaxed(virt_base_gfx + GPU_CX_GDSCR_OFFSET);
+	regval |= SW_COLLAPSE_MASK;
+	writel_relaxed(regval, virt_base_gfx + GPU_CX_GDSCR_OFFSET);
+}
+
+static struct mux_clk gfxcc_dbg_clk = {
+	.ops = &mux_reg_ops,
+	.en_mask = BIT(16),
+	.mask = 0x3FF,
+	.offset = GPUCC_DEBUG_CLK_CTL,
+	.en_offset = GPUCC_DEBUG_CLK_CTL,
+	.base = &virt_base_gfx,
+	MUX_SRC_LIST(
+		{ &gpucc_gfx3d_clk.c, 0x0008 },
+	),
+	.c = {
+		.dbg_name = "gfxcc_dbg_clk",
+		.ops = &clk_ops_gen_mux,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(gfxcc_dbg_clk.c),
+	},
+};
+
+static struct clk_lookup msm_clocks_gpucc_8998[] = {
+	CLK_LIST(gpucc_xo),
+	CLK_LIST(gpucc_gpll0),
+	CLK_LIST(gpucc_cxo_clk),
+	CLK_LIST(rbbmtimer_clk_src),
+	CLK_LIST(gfx3d_isense_clk_src),
+	CLK_LIST(rbcpr_clk_src),
+	CLK_LIST(gpucc_rbbmtimer_clk),
+	CLK_LIST(gpucc_gfx3d_isense_clk),
+	CLK_LIST(gpucc_rbcpr_clk),
+	CLK_LIST(gpucc_gcc_dbg_clk),
+};
+
+static void msm_gpucc_hamster_fixup(void)
+{
+	gfx3d_isense_clk_src.c.ops = &clk_ops_dummy;
+	gpucc_gfx3d_isense_clk.c.ops = &clk_ops_dummy;
+}
+
+static struct platform_driver msm_clock_gfxcc_driver;
+int msm_gpucc_8998_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct device_node *of_node = pdev->dev.of_node;
+	int rc;
+	struct regulator *reg;
+	u32 regval;
+	struct clk *tmp;
+	bool is_vq = 0;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc_base");
+	if (!res) {
+		dev_err(&pdev->dev, "Unable to retrieve register base\n");
+		return -ENOMEM;
+	}
+
+	virt_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+	if (!virt_base) {
+		dev_err(&pdev->dev, "Failed to map CC registers\n");
+		return -ENOMEM;
+	}
+
+	reg = vdd_dig.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_dig");
+	if (IS_ERR(reg)) {
+		if (PTR_ERR(reg) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get vdd_dig regulator\n");
+		return PTR_ERR(reg);
+	}
+
+	tmp = gpucc_xo.c.parent = devm_clk_get(&pdev->dev, "xo_ao");
+	if (IS_ERR(tmp)) {
+		if (PTR_ERR(tmp) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get xo_ao clock\n");
+		return PTR_ERR(tmp);
+	}
+
+	tmp = gpucc_gpll0.c.parent = devm_clk_get(&pdev->dev, "gpll0");
+	if (IS_ERR(tmp)) {
+		if (PTR_ERR(tmp) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get gpll0 clock\n");
+		return PTR_ERR(tmp);
+	}
+
+	/* Clear the DBG_CLK_DIV bits of the GPU debug register */
+	regval = readl_relaxed(virt_base + gpucc_gcc_dbg_clk.offset);
+	regval &= ~BM(18, 17);
+	writel_relaxed(regval, virt_base + gpucc_gcc_dbg_clk.offset);
+
+	is_vq = of_device_is_compatible(pdev->dev.of_node,
+						"qcom,gpucc-hamster");
+	if (is_vq)
+		msm_gpucc_hamster_fixup();
+
+	rc = of_msm_clock_register(of_node, msm_clocks_gpucc_8998,
+					ARRAY_SIZE(msm_clocks_gpucc_8998));
+	if (rc)
+		return rc;
+
+	/*
+	 * gpucc_cxo_clk works as the root clock for all GPUCC RCGs and GDSCs.
+	 *  Keep it enabled always.
+	 */
+	clk_prepare_enable(&gpucc_cxo_clk.c);
+
+	dev_info(&pdev->dev, "Registered GPU clocks (barring gfx3d clocks)\n");
+	return platform_driver_register(&msm_clock_gfxcc_driver);
+}
+
+static const struct of_device_id msm_clock_gpucc_match_table[] = {
+	{ .compatible = "qcom,gpucc-8998" },
+	{ .compatible = "qcom,gpucc-8998-v2" },
+	{ .compatible = "qcom,gpucc-hamster" },
+	{},
+};
+
+static struct platform_driver msm_clock_gpucc_driver = {
+	.probe = msm_gpucc_8998_probe,
+	.driver = {
+		.name = "qcom,gpucc-8998",
+		.of_match_table = msm_clock_gpucc_match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+static struct clk_lookup msm_clocks_gfxcc_8998[] = {
+	CLK_LIST(gpu_pll0_pll),
+	CLK_LIST(gpu_pll0_pll_out_even),
+	CLK_LIST(gpu_pll0_pll_out_odd),
+	CLK_LIST(gfx3d_clk_src),
+	CLK_LIST(gpucc_gfx3d_clk),
+	CLK_LIST(gpucc_mx_clk),
+	CLK_LIST(gfxcc_dbg_clk),
+};
+
+static void msm_gfxcc_hamster_fixup(void)
+{
+	gpu_pll0_pll.c.fmax[VDD_DIG_MIN] = 1420000500;
+	gfx3d_clk_src.freq_tbl = ftbl_gfx3d_clk_src_vq;
+}
+
+static void msm_gfxcc_8998_v2_fixup(void)
+{
+	gpu_pll0_pll.c.fmax[VDD_DIG_MIN] = 1420000500;
+	gfx3d_clk_src.freq_tbl = ftbl_gfx3d_clk_src_v2;
+}
+
+int msm_gfxcc_8998_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct device_node *of_node = pdev->dev.of_node;
+	int rc;
+	struct regulator *reg;
+	u32 regval;
+	bool is_v2 = 0, is_vq = 0;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc_base");
+	if (!res) {
+		dev_err(&pdev->dev, "Unable to retrieve register base\n");
+		return -ENOMEM;
+	}
+
+	virt_base_gfx = devm_ioremap(&pdev->dev, res->start,
+						resource_size(res));
+	if (!virt_base_gfx) {
+		dev_err(&pdev->dev, "Failed to map CC registers\n");
+		return -ENOMEM;
+	}
+
+	reg = vdd_gpucc.regulator[0] = devm_regulator_get(&pdev->dev,
+								"vdd_gpucc");
+	if (IS_ERR(reg)) {
+		if (PTR_ERR(reg) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get vdd_gpucc regulator\n");
+		return PTR_ERR(reg);
+	}
+
+	reg = vdd_gpucc.regulator[1] = devm_regulator_get(&pdev->dev, "vdd_mx");
+	if (IS_ERR(reg)) {
+		if (PTR_ERR(reg) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get vdd_mx regulator\n");
+		return PTR_ERR(reg);
+	}
+
+	reg = vdd_gpucc_mx.regulator[0] = devm_regulator_get(&pdev->dev,
+								"vdd_gpu_mx");
+	if (IS_ERR(reg)) {
+		if (PTR_ERR(reg) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get vdd_gpu_mx regulator\n");
+		return PTR_ERR(reg);
+	}
+
+	rc = of_get_fmax_vdd_class(pdev, &gfx3d_clk_src.c,
+						"qcom,gfxfreq-speedbin0");
+	if (rc) {
+		dev_err(&pdev->dev, "Can't get freq-corner mapping for gfx3d_clk_src\n");
+		return rc;
+	}
+
+	rc = of_get_fmax_vdd_class(pdev, &gpucc_mx_clk.c,
+						"qcom,gfxfreq-mx-speedbin0");
+	if (rc) {
+		dev_err(&pdev->dev, "Can't get freq-corner mapping for gpucc_mx_clk\n");
+		return rc;
+	}
+
+	is_v2 = of_device_is_compatible(pdev->dev.of_node,
+						"qcom,gfxcc-8998-v2");
+	if (is_v2)
+		msm_gfxcc_8998_v2_fixup();
+
+	is_vq = of_device_is_compatible(pdev->dev.of_node,
+						"qcom,gfxcc-hamster");
+	if (is_vq)
+		msm_gfxcc_hamster_fixup();
+
+	rc = of_msm_clock_register(of_node, msm_clocks_gfxcc_8998,
+					ARRAY_SIZE(msm_clocks_gfxcc_8998));
+	if (rc)
+		return rc;
+
+	enable_gfx_crc();
+
+	/*
+	 * Force periph logic to be ON since after NAP, the value of the perf
+	 * counter might be corrupted frequently.
+	 */
+	clk_set_flags(&gpucc_gfx3d_clk.c, CLKFLAG_RETAIN_PERIPH);
+
+	/*
+	 * Program the droop detector's gfx_pdn to 1'b1 in order to reduce
+	 * leakage between the graphics and CX rails.
+	 */
+	regval = readl_relaxed(virt_base_gfx + GPUCC_GPU_DD_WRAP_CTRL);
+	regval |= BIT(0);
+	writel_relaxed(regval, virt_base_gfx + GPUCC_GPU_DD_WRAP_CTRL);
+
+	dev_info(&pdev->dev, "Completed registering all GPU clocks\n");
+
+	return 0;
+}
+
+static const struct of_device_id msm_clock_gfxcc_match_table[] = {
+	{ .compatible = "qcom,gfxcc-8998" },
+	{ .compatible = "qcom,gfxcc-8998-v2" },
+	{ .compatible = "qcom,gfxcc-hamster" },
+	{},
+};
+
+static struct platform_driver msm_clock_gfxcc_driver = {
+	.probe = msm_gfxcc_8998_probe,
+	.driver = {
+		.name = "qcom,gfxcc-8998",
+		.of_match_table = msm_clock_gfxcc_match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+int __init msm_gpucc_8998_init(void)
+{
+	return platform_driver_register(&msm_clock_gpucc_driver);
+}
+arch_initcall(msm_gpucc_8998_init);
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./clock.h linux-4.4.115-fbx/drivers/clk/msm/clock.h
--- linux-4.4.115-fbx/drivers/clk/msm./clock.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/clock.h	2019-01-22 16:16:23.019242024 +0100
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2013-2014, 2016, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_CLK_MSM_CLOCK_H
+#define __DRIVERS_CLK_MSM_CLOCK_H
+
+#include <linux/clkdev.h>
+
+/**
+ * struct clock_init_data - SoC specific clock initialization data
+ * @table: table of lookups to add
+ * @size: size of @table
+ * @pre_init: called before initializing the clock driver.
+ * @post_init: called after registering @table. clock APIs can be called inside.
+ * @late_init: called during late init
+ */
+struct clock_init_data {
+	struct list_head list;
+	struct clk_lookup *table;
+	size_t size;
+	void (*pre_init)(void);
+	void (*post_init)(void);
+	int (*late_init)(void);
+};
+
+int msm_clock_init(struct clock_init_data *data);
+int find_vdd_level(struct clk *clk, unsigned long rate);
+extern struct list_head orphan_clk_list;
+
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMMON_CLK_MSM)
+int clock_debug_register(struct clk *clk);
+void clock_debug_print_enabled(void);
+#elif defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMMON_CLK_QCOM)
+void clock_debug_print_enabled(void);
+#else
+static inline int clock_debug_register(struct clk *unused)
+{
+	return 0;
+}
+static inline void clock_debug_print_enabled(void) { return; }
+#endif
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./clock-local2.c linux-4.4.115-fbx/drivers/clk/msm/clock-local2.c
--- linux-4.4.115-fbx/drivers/clk/msm./clock-local2.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/clock-local2.c	2019-01-22 16:16:23.015241987 +0100
@@ -0,0 +1,3010 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/rational.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <soc/qcom/clock-local2.h>
+#include <soc/qcom/msm-clock-controller.h>
+
+/*
+ * When enabling/disabling a clock, check the halt bit up to this number
+ * number of times (with a 1 us delay in between) before continuing.
+ */
+#define HALT_CHECK_MAX_LOOPS	500
+/* For clock without halt checking, wait this long after enables/disables. */
+#define HALT_CHECK_DELAY_US	500
+
+#define RCG_FORCE_DISABLE_DELAY_US	100
+
+/*
+ * When updating an RCG configuration, check the update bit up to this number
+ * number of times (with a 1 us delay in between) before continuing.
+ */
+#define UPDATE_CHECK_MAX_LOOPS	500
+
+DEFINE_SPINLOCK(local_clock_reg_lock);
+struct clk_freq_tbl rcg_dummy_freq = F_END;
+
+#define CMD_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg)
+#define CFG_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x4)
+#define M_REG(x)	(*(x)->base + (x)->cmd_rcgr_reg + 0x8)
+#define N_REG(x)	(*(x)->base + (x)->cmd_rcgr_reg + 0xC)
+#define D_REG(x)	(*(x)->base + (x)->cmd_rcgr_reg + 0x10)
+#define CBCR_REG(x)	(*(x)->base + (x)->cbcr_reg)
+#define BCR_REG(x)	(*(x)->base + (x)->bcr_reg)
+#define RST_REG(x)	(*(x)->base + (x)->reset_reg)
+#define VOTE_REG(x)	(*(x)->base + (x)->vote_reg)
+#define GATE_EN_REG(x)	(*(x)->base + (x)->en_reg)
+#define DIV_REG(x)	(*(x)->base + (x)->offset)
+#define MUX_REG(x)	(*(x)->base + (x)->offset)
+
+/*
+ * Important clock bit positions and masks
+ */
+#define CMD_RCGR_ROOT_ENABLE_BIT	BIT(1)
+#define CBCR_BRANCH_ENABLE_BIT		BIT(0)
+#define CBCR_BRANCH_OFF_BIT		BIT(31)
+#define CMD_RCGR_CONFIG_UPDATE_BIT	BIT(0)
+#define CMD_RCGR_ROOT_STATUS_BIT	BIT(31)
+#define BCR_BLK_ARES_BIT		BIT(0)
+#define CBCR_HW_CTL_BIT			BIT(1)
+#define CFG_RCGR_DIV_MASK		BM(4, 0)
+#define CFG_RCGR_SRC_SEL_MASK		BM(10, 8)
+#define MND_MODE_MASK			BM(13, 12)
+#define MND_DUAL_EDGE_MODE_BVAL		BVAL(13, 12, 0x2)
+#define CMD_RCGR_CONFIG_DIRTY_MASK	BM(7, 4)
+#define CBCR_CDIV_LSB			16
+#define CBCR_CDIV_MSB			19
+
+enum branch_state {
+	BRANCH_ON,
+	BRANCH_OFF,
+};
+
+static struct clk_freq_tbl cxo_f = {
+	.freq_hz = 19200000,
+	.m_val = 0,
+	.n_val = 0,
+	.d_val = 0,
+	.div_src_val = 0,
+};
+
+struct div_map {
+	u32 mask;
+	int div;
+};
+
+/*
+ * RCG functions
+ */
+
+/*
+ * Update an RCG with a new configuration. This may include a new M, N, or D
+ * value, source selection or pre-divider value.
+ *
+ */
+static void rcg_update_config(struct rcg_clk *rcg)
+{
+	u32 cmd_rcgr_regval;
+	int count = UPDATE_CHECK_MAX_LOOPS;
+
+	if (rcg->non_local_control_timeout)
+		count = rcg->non_local_control_timeout;
+
+	cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+	cmd_rcgr_regval |= CMD_RCGR_CONFIG_UPDATE_BIT;
+	writel_relaxed(cmd_rcgr_regval, CMD_RCGR_REG(rcg));
+
+	/* Wait for update to take effect */
+	for (; count > 0; count--) {
+		if (!(readl_relaxed(CMD_RCGR_REG(rcg)) &
+				CMD_RCGR_CONFIG_UPDATE_BIT))
+			return;
+		udelay(1);
+	}
+
+	CLK_WARN(&rcg->c, count == 0, "rcg didn't update its configuration.");
+}
+
+static void rcg_on_check(struct rcg_clk *rcg)
+{
+	int count = UPDATE_CHECK_MAX_LOOPS;
+
+	if (rcg->non_local_control_timeout)
+		count = rcg->non_local_control_timeout;
+
+	/* Wait for RCG to turn on */
+	for (; count > 0; count--) {
+		if (!(readl_relaxed(CMD_RCGR_REG(rcg)) &
+				CMD_RCGR_ROOT_STATUS_BIT))
+			return;
+		udelay(1);
+	}
+	CLK_WARN(&rcg->c, count == 0, "rcg didn't turn on.");
+}
+
+/* RCG set rate function for clocks with Half Integer Dividers. */
+static void __set_rate_hid(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
+{
+	u32 cfg_regval;
+
+	cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+	cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
+	cfg_regval |= nf->div_src_val;
+	writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
+
+	rcg_update_config(rcg);
+}
+
+void set_rate_hid(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	__set_rate_hid(rcg, nf);
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+/* RCG set rate function for clocks with MND & Half Integer Dividers. */
+static void __set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
+{
+	u32 cfg_regval;
+
+	cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+	writel_relaxed(nf->m_val, M_REG(rcg));
+	writel_relaxed(nf->n_val, N_REG(rcg));
+	writel_relaxed(nf->d_val, D_REG(rcg));
+
+	cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+	cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
+	cfg_regval |= nf->div_src_val;
+
+	/* Activate or disable the M/N:D divider as necessary */
+	cfg_regval &= ~MND_MODE_MASK;
+	if (nf->n_val != 0)
+		cfg_regval |= MND_DUAL_EDGE_MODE_BVAL;
+	writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
+
+	rcg_update_config(rcg);
+}
+
+void set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	__set_rate_mnd(rcg, nf);
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static void rcg_set_force_enable(struct rcg_clk *rcg)
+{
+	u32 cmd_rcgr_regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+	cmd_rcgr_regval |= CMD_RCGR_ROOT_ENABLE_BIT;
+	writel_relaxed(cmd_rcgr_regval, CMD_RCGR_REG(rcg));
+	rcg_on_check(rcg);
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static void rcg_clear_force_enable(struct rcg_clk *rcg)
+{
+	u32 cmd_rcgr_regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+	cmd_rcgr_regval &= ~CMD_RCGR_ROOT_ENABLE_BIT;
+	writel_relaxed(cmd_rcgr_regval, CMD_RCGR_REG(rcg));
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+	/* Add a delay of 100usecs to let the RCG disable */
+	udelay(RCG_FORCE_DISABLE_DELAY_US);
+}
+
+static int rcg_clk_enable(struct clk *c)
+{
+	struct rcg_clk *rcg = to_rcg_clk(c);
+
+	WARN(rcg->current_freq == &rcg_dummy_freq,
+		"Attempting to prepare %s before setting its rate. "
+		"Set the rate first!\n", rcg->c.dbg_name);
+
+	if (rcg->force_enable_rcgr) {
+		rcg_set_force_enable(rcg);
+		return 0;
+	}
+
+	if (!rcg->non_local_children || rcg->current_freq == &rcg_dummy_freq)
+		return 0;
+	/*
+	 * Switch from CXO to saved mux value. Force enable/disable while
+	 * switching. The current parent is already prepared and enabled
+	 * at this point, and the CXO source is always-on. Therefore the
+	 * RCG can safely execute a dynamic switch.
+	 */
+	rcg_set_force_enable(rcg);
+	rcg->set_rate(rcg, rcg->current_freq);
+	rcg_clear_force_enable(rcg);
+
+	return 0;
+}
+
+static void rcg_clk_disable(struct clk *c)
+{
+	struct rcg_clk *rcg = to_rcg_clk(c);
+
+	if (rcg->force_enable_rcgr) {
+		rcg_clear_force_enable(rcg);
+		return;
+	}
+
+	if (!rcg->non_local_children)
+		return;
+
+	/*
+	 * Save mux select and switch to CXO. Force enable/disable while
+	 * switching. The current parent is still prepared and enabled at this
+	 * point, and the CXO source is always-on. Therefore the RCG can safely
+	 * execute a dynamic switch.
+	 */
+	rcg_set_force_enable(rcg);
+	rcg->set_rate(rcg, &cxo_f);
+	rcg_clear_force_enable(rcg);
+}
+
+static int prepare_enable_rcg_srcs(struct clk *c, struct clk *curr,
+					struct clk *new, unsigned long *flags)
+{
+	int rc;
+
+	rc = clk_prepare(curr);
+	if (rc)
+		return rc;
+
+	if (c->prepare_count) {
+		rc = clk_prepare(new);
+		if (rc)
+			goto err_new_src_prepare;
+	}
+
+	rc = clk_prepare(new);
+	if (rc)
+		goto err_new_src_prepare2;
+
+	spin_lock_irqsave(&c->lock, *flags);
+	rc = clk_enable(curr);
+	if (rc) {
+		spin_unlock_irqrestore(&c->lock, *flags);
+		goto err_curr_src_enable;
+	}
+
+	if (c->count) {
+		rc = clk_enable(new);
+		if (rc) {
+			spin_unlock_irqrestore(&c->lock, *flags);
+			goto err_new_src_enable;
+		}
+	}
+
+	rc = clk_enable(new);
+	if (rc) {
+		spin_unlock_irqrestore(&c->lock, *flags);
+		goto err_new_src_enable2;
+	}
+	return 0;
+
+err_new_src_enable2:
+	if (c->count)
+		clk_disable(new);
+err_new_src_enable:
+	clk_disable(curr);
+err_curr_src_enable:
+	clk_unprepare(new);
+err_new_src_prepare2:
+	if (c->prepare_count)
+		clk_unprepare(new);
+err_new_src_prepare:
+	clk_unprepare(curr);
+	return rc;
+}
+
+static void disable_unprepare_rcg_srcs(struct clk *c, struct clk *curr,
+					struct clk *new, unsigned long *flags)
+{
+	clk_disable(new);
+	clk_disable(curr);
+	if (c->count)
+		clk_disable(curr);
+	spin_unlock_irqrestore(&c->lock, *flags);
+
+	clk_unprepare(new);
+	clk_unprepare(curr);
+	if (c->prepare_count)
+		clk_unprepare(curr);
+}
+
+static int rcg_clk_set_rate(struct clk *c, unsigned long rate)
+{
+	struct clk_freq_tbl *cf, *nf;
+	struct rcg_clk *rcg = to_rcg_clk(c);
+	int rc;
+	unsigned long flags;
+
+	for (nf = rcg->freq_tbl; nf->freq_hz != FREQ_END
+			&& nf->freq_hz != rate; nf++)
+		;
+
+	if (nf->freq_hz == FREQ_END)
+		return -EINVAL;
+
+	cf = rcg->current_freq;
+	if (nf->src_freq != FIXED_CLK_SRC) {
+		rc = clk_set_rate(nf->src_clk, nf->src_freq);
+		if (rc)
+			return rc;
+	}
+
+	if (rcg->non_local_control_timeout) {
+		/*
+		 * __clk_pre_reparent only enables the RCG source if the SW
+		 * count for the RCG is non-zero. We need to make sure that
+		 * both PLL sources are ON before force turning on the RCG.
+		 */
+		rc = prepare_enable_rcg_srcs(c, cf->src_clk, nf->src_clk,
+								&flags);
+	} else
+		rc = __clk_pre_reparent(c, nf->src_clk, &flags);
+
+	if (rc)
+		return rc;
+
+	BUG_ON(!rcg->set_rate);
+
+	/* Perform clock-specific frequency switch operations. */
+	if ((rcg->non_local_children && c->count) ||
+			rcg->non_local_control_timeout) {
+		/*
+		 * Force enable the RCG before updating the RCG configuration
+		 * since the downstream clock/s can be disabled at around the
+		 * same time causing the feedback from the CBCR to turn off
+		 * the RCG.
+		 */
+		rcg_set_force_enable(rcg);
+		rcg->set_rate(rcg, nf);
+		rcg_clear_force_enable(rcg);
+	} else if (!rcg->non_local_children) {
+		rcg->set_rate(rcg, nf);
+	}
+
+	/*
+	 * If non_local_children is set and the RCG is not enabled,
+	 * the following operations switch parent in software and cache
+	 * the frequency. The mux switch will occur when the RCG is enabled.
+	 */
+	rcg->current_freq = nf;
+	c->parent = nf->src_clk;
+
+	if (rcg->non_local_control_timeout)
+		disable_unprepare_rcg_srcs(c, cf->src_clk, nf->src_clk,
+								&flags);
+	else
+		__clk_post_reparent(c, cf->src_clk, &flags);
+
+	return 0;
+}
+
+/*
+ * Return a supported rate that's at least the specified rate or
+ * the max supported rate if the specified rate is larger than the
+ * max supported rate.
+ */
+static long rcg_clk_round_rate(struct clk *c, unsigned long rate)
+{
+	struct rcg_clk *rcg = to_rcg_clk(c);
+	struct clk_freq_tbl *f;
+
+	for (f = rcg->freq_tbl; f->freq_hz != FREQ_END; f++)
+		if (f->freq_hz >= rate)
+			return f->freq_hz;
+
+	f--;
+	return f->freq_hz;
+}
+
+/* Return the nth supported frequency for a given clock. */
+static long rcg_clk_list_rate(struct clk *c, unsigned n)
+{
+	struct rcg_clk *rcg = to_rcg_clk(c);
+
+	if (!rcg->freq_tbl || rcg->freq_tbl->freq_hz == FREQ_END)
+		return -ENXIO;
+
+	return (rcg->freq_tbl + n)->freq_hz;
+}
+
+static struct clk *_rcg_clk_get_parent(struct rcg_clk *rcg, bool has_mnd,
+								bool match_rate)
+{
+	u32 n_regval = 0, m_regval = 0, d_regval = 0;
+	u32 cfg_regval, div, div_regval;
+	struct clk_freq_tbl *freq;
+	u32 cmd_rcgr_regval;
+
+	if (!rcg->freq_tbl) {
+		WARN(1, "No frequency table present for rcg %s\n",
+							rcg->c.dbg_name);
+		return NULL;
+	}
+
+	/* Is there a pending configuration? */
+	cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+	if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK) {
+		WARN(1, "Pending transaction for rcg %s\n", rcg->c.dbg_name);
+		return NULL;
+	}
+
+	/* Get values of m, n, d, div and src_sel registers. */
+	if (has_mnd) {
+		m_regval = readl_relaxed(M_REG(rcg));
+		n_regval = readl_relaxed(N_REG(rcg));
+		d_regval = readl_relaxed(D_REG(rcg));
+
+		/*
+		 * The n and d values stored in the frequency tables are sign
+		 * extended to 32 bits. The n and d values in the registers are
+		 * sign extended to 8 or 16 bits. Sign extend the values read
+		 * from the registers so that they can be compared to the
+		 * values in the frequency tables.
+		 */
+		n_regval |= (n_regval >> 8) ? BM(31, 16) : BM(31, 8);
+		d_regval |= (d_regval >> 8) ? BM(31, 16) : BM(31, 8);
+	}
+
+	cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+	cfg_regval &= CFG_RCGR_SRC_SEL_MASK | CFG_RCGR_DIV_MASK
+				| MND_MODE_MASK;
+
+	/* If mnd counter is present, check if it's in use. */
+	has_mnd = (has_mnd) &&
+		((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL);
+
+	/*
+	 * Clear out the mn counter mode bits since we now want to compare only
+	 * the source mux selection and pre-divider values in the registers.
+	 */
+	cfg_regval &= ~MND_MODE_MASK;
+
+	/* Figure out what rate the rcg is running at */
+	for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
+		/* source select does not match */
+		if ((freq->div_src_val & CFG_RCGR_SRC_SEL_MASK)
+		    != (cfg_regval & CFG_RCGR_SRC_SEL_MASK))
+			continue;
+		/*
+		 * Stop if we found the required parent in the frequency table
+		 * and only care if the source matches but dont care if the
+		 * frequency matches
+		 */
+		if (!match_rate)
+			break;
+		/* divider does not match */
+		div = freq->div_src_val & CFG_RCGR_DIV_MASK;
+		div_regval = cfg_regval & CFG_RCGR_DIV_MASK;
+		if (div != div_regval && (div > 1 || div_regval > 1))
+			continue;
+
+		if (has_mnd) {
+			if (freq->m_val != m_regval)
+				continue;
+			if (freq->n_val != n_regval)
+				continue;
+			if (freq->d_val != d_regval)
+				continue;
+		} else if (freq->n_val) {
+			continue;
+		}
+		break;
+	}
+
+	/* No known frequency found */
+	if (freq->freq_hz == FREQ_END) {
+		/*
+		 * If we can't recognize the frequency and non_local_children is
+		 * set, switch to safe frequency. It is assumed the current
+		 * parent has been turned on by the bootchain if the RCG is on.
+		 */
+		if (rcg->non_local_children) {
+			rcg->set_rate(rcg, &cxo_f);
+			WARN(1, "don't recognize rcg frequency for %s\n",
+				rcg->c.dbg_name);
+		}
+		return NULL;
+	}
+
+	rcg->current_freq = freq;
+	return freq->src_clk;
+}
+
+static enum handoff _rcg_clk_handoff(struct rcg_clk *rcg)
+{
+	u32 cmd_rcgr_regval;
+
+	if (rcg->current_freq && rcg->current_freq->freq_hz != FREQ_END)
+		rcg->c.rate = rcg->current_freq->freq_hz;
+
+	/* Is the root enabled? */
+	cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+	if ((cmd_rcgr_regval & CMD_RCGR_ROOT_STATUS_BIT))
+		return HANDOFF_DISABLED_CLK;
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+static struct clk *display_clk_get_parent(struct clk *c)
+{
+	return _rcg_clk_get_parent(to_rcg_clk(c), false, false);
+}
+
+static struct clk *rcg_mnd_clk_get_parent(struct clk *c)
+{
+	return _rcg_clk_get_parent(to_rcg_clk(c), true, true);
+}
+
+static struct clk *rcg_clk_get_parent(struct clk *c)
+{
+	return _rcg_clk_get_parent(to_rcg_clk(c), false, true);
+}
+
+static enum handoff rcg_mnd_clk_handoff(struct clk *c)
+{
+	return _rcg_clk_handoff(to_rcg_clk(c));
+}
+
+static enum handoff rcg_clk_handoff(struct clk *c)
+{
+	return _rcg_clk_handoff(to_rcg_clk(c));
+}
+
+static void __iomem *rcg_hid_clk_list_registers(struct clk *c, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct rcg_clk *rcg = to_rcg_clk(c);
+	static struct clk_register_data data[] = {
+		{"CMD_RCGR", 0x0},
+		{"CFG_RCGR", 0x4},
+	};
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return CMD_RCGR_REG(rcg);
+}
+
+static void __iomem *rcg_mnd_clk_list_registers(struct clk *c, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct rcg_clk *rcg = to_rcg_clk(c);
+	static struct clk_register_data data[] = {
+		{"CMD_RCGR", 0x0},
+		{"CFG_RCGR", 0x4},
+		{"M_VAL", 0x8},
+		{"N_VAL", 0xC},
+		{"D_VAL", 0x10},
+	};
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return CMD_RCGR_REG(rcg);
+}
+
+#define BRANCH_CHECK_MASK	BM(31, 28)
+#define BRANCH_ON_VAL		BVAL(31, 28, 0x0)
+#define BRANCH_OFF_VAL		BVAL(31, 28, 0x8)
+#define BRANCH_NOC_FSM_ON_VAL	BVAL(31, 28, 0x2)
+
+/*
+ * Branch clock functions
+ */
+static int branch_clk_halt_check(struct clk *c, u32 halt_check,
+			void __iomem *cbcr_reg, enum branch_state br_status)
+{
+	char *status_str = (br_status == BRANCH_ON) ? "off" : "on";
+
+	/*
+	 * Use a memory barrier since some halt status registers are
+	 * not within the same 1K segment as the branch/root enable
+	 * registers.  It's also needed in the udelay() case to ensure
+	 * the delay starts after the branch disable.
+	 */
+	mb();
+
+	if (halt_check == DELAY || halt_check == HALT_VOTED) {
+		udelay(HALT_CHECK_DELAY_US);
+	} else if (halt_check == HALT) {
+		int count;
+		u32 val;
+		for (count = HALT_CHECK_MAX_LOOPS; count > 0; count--) {
+			val = readl_relaxed(cbcr_reg);
+			val &= BRANCH_CHECK_MASK;
+			switch (br_status) {
+			case BRANCH_ON:
+				if (val == BRANCH_ON_VAL
+					|| val == BRANCH_NOC_FSM_ON_VAL)
+					return 0;
+				break;
+
+			case BRANCH_OFF:
+				if (val == BRANCH_OFF_VAL)
+					return 0;
+				break;
+			};
+			udelay(1);
+		}
+		CLK_WARN(c, count == 0, "status stuck %s", status_str);
+		if (!count)
+			return -ETIMEDOUT;
+	} else {
+		pr_err("Invalid halt_check flag - %u\n", halt_check);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static unsigned long branch_clk_aggregate_rate(const struct clk *parent)
+{
+	struct clk *clk;
+	unsigned long rate = 0;
+
+	list_for_each_entry(clk, &parent->children, siblings) {
+		struct branch_clk *v = to_branch_clk(clk);
+
+		if (v->is_prepared)
+			rate = max(clk->rate, rate);
+	}
+	return rate;
+}
+
+static int cbcr_set_flags(void * __iomem regaddr, unsigned flags)
+{
+	u32 cbcr_val;
+	unsigned long irq_flags;
+	int delay_us = 0, ret = 0;
+
+	spin_lock_irqsave(&local_clock_reg_lock, irq_flags);
+	cbcr_val = readl_relaxed(regaddr);
+	switch (flags) {
+	case CLKFLAG_PERIPH_OFF_SET:
+		cbcr_val |= BIT(12);
+		delay_us = 1;
+		break;
+	case CLKFLAG_PERIPH_OFF_CLEAR:
+		cbcr_val &= ~BIT(12);
+		break;
+	case CLKFLAG_RETAIN_PERIPH:
+		cbcr_val |= BIT(13);
+		delay_us = 1;
+		break;
+	case CLKFLAG_NORETAIN_PERIPH:
+		cbcr_val &= ~BIT(13);
+		break;
+	case CLKFLAG_RETAIN_MEM:
+		cbcr_val |= BIT(14);
+		delay_us = 1;
+		break;
+	case CLKFLAG_NORETAIN_MEM:
+		cbcr_val &= ~BIT(14);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	writel_relaxed(cbcr_val, regaddr);
+	/* Make sure power is enabled before returning. */
+	mb();
+	udelay(delay_us);
+
+	spin_unlock_irqrestore(&local_clock_reg_lock, irq_flags);
+
+	return ret;
+}
+
+static int branch_clk_set_flags(struct clk *c, unsigned flags)
+{
+	return cbcr_set_flags(CBCR_REG(to_branch_clk(c)), flags);
+}
+
+static DEFINE_MUTEX(branch_clk_lock);
+
+static void branch_clk_unprepare(struct clk *c)
+{
+	struct branch_clk *branch = to_branch_clk(c);
+	unsigned long curr_rate, new_rate;
+
+	if (!branch->aggr_sibling_rates)
+		return;
+
+	mutex_lock(&branch_clk_lock);
+	branch->is_prepared = false;
+	new_rate = branch_clk_aggregate_rate(c->parent);
+	curr_rate = max(new_rate, c->rate);
+	if (new_rate < curr_rate)
+		clk_set_rate(c->parent, new_rate);
+	mutex_unlock(&branch_clk_lock);
+}
+
+static int branch_clk_prepare(struct clk *c)
+{
+	struct branch_clk *branch = to_branch_clk(c);
+	unsigned long curr_rate;
+	int ret = 0;
+
+	if (!branch->aggr_sibling_rates)
+		return ret;
+
+	mutex_lock(&branch_clk_lock);
+	branch->is_prepared = false;
+	curr_rate = branch_clk_aggregate_rate(c->parent);
+	if (c->rate > curr_rate) {
+		ret = clk_set_rate(c->parent, c->rate);
+		if (ret)
+			goto exit;
+	}
+	branch->is_prepared = true;
+exit:
+	mutex_unlock(&branch_clk_lock);
+	return ret;
+}
+
+static void branch_clk_disable(struct clk *c)
+{
+	unsigned long flags;
+	struct branch_clk *branch = to_branch_clk(c);
+	u32 reg_val;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	reg_val = readl_relaxed(CBCR_REG(branch));
+	reg_val &= ~CBCR_BRANCH_ENABLE_BIT;
+	writel_relaxed(reg_val, CBCR_REG(branch));
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+	/*
+	 * Wait for clock to disable before continuing. If disable times out,
+	 * it is not handled explicitly since it is considered as non-fatal.
+	 */
+	if (!branch->no_halt_check_on_disable)
+		branch_clk_halt_check(c, branch->halt_check, CBCR_REG(branch),
+					BRANCH_OFF);
+
+	if (branch->toggle_memory) {
+		branch_clk_set_flags(c, CLKFLAG_NORETAIN_MEM);
+		branch_clk_set_flags(c, CLKFLAG_NORETAIN_PERIPH);
+	}
+}
+
+static int branch_clk_enable(struct clk *c)
+{
+	unsigned long flags;
+	u32 cbcr_val;
+	struct branch_clk *branch = to_branch_clk(c);
+	int ret = 0;
+
+	if (branch->toggle_memory) {
+		branch_clk_set_flags(c, CLKFLAG_RETAIN_MEM);
+		branch_clk_set_flags(c, CLKFLAG_RETAIN_PERIPH);
+	}
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	cbcr_val = readl_relaxed(CBCR_REG(branch));
+	cbcr_val |= CBCR_BRANCH_ENABLE_BIT;
+	writel_relaxed(cbcr_val, CBCR_REG(branch));
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+	/*
+	 * For clocks controlled by other masters via voting registers,
+	 * delay polling for the status bit to allow previous clk_disable
+	 * by the GDS controller to go through.
+	 */
+	if (branch->no_halt_check_on_disable)
+		udelay(5);
+
+	/* Wait for clock to enable before continuing. */
+	ret = branch_clk_halt_check(c, branch->halt_check, CBCR_REG(branch),
+				BRANCH_ON);
+	if (ret)
+		branch_clk_disable(c);
+
+	return ret;
+}
+
+static int branch_cdiv_set_rate(struct branch_clk *branch, unsigned long rate)
+{
+	unsigned long flags;
+	u32 regval;
+
+	if (rate > branch->max_div)
+		return -EINVAL;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	regval = readl_relaxed(CBCR_REG(branch));
+	regval &= ~BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
+	regval |= BVAL(CBCR_CDIV_MSB, CBCR_CDIV_LSB, rate);
+	writel_relaxed(regval, CBCR_REG(branch));
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+	return 0;
+}
+
+static int branch_clk_set_rate(struct clk *c, unsigned long rate)
+{
+	struct branch_clk *clkh, *branch = to_branch_clk(c);
+	struct clk *clkp, *parent = c->parent;
+	unsigned long flags, curr_rate, new_rate, other_rate = 0;
+	int ret = 0;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	if (readl_relaxed(CBCR_REG(branch)) & CBCR_HW_CTL_BIT) {
+		pr_err("Cannot scale %s clock while HW gating is enabled. Use corresponding hw_ctl_clk to scale it\n",
+				c->dbg_name);
+		spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+		return -EINVAL;
+	}
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+	if (branch->max_div)
+		return branch_cdiv_set_rate(branch, rate);
+
+	if (branch->has_sibling)
+		return -EPERM;
+
+	if (!branch->aggr_sibling_rates)
+		return clk_set_rate(c->parent, rate);
+
+	mutex_lock(&branch_clk_lock);
+	if (!branch->is_prepared) {
+		c->rate = rate;
+		goto exit;
+	}
+	/*
+	 * Get the aggregate rate without this clock's vote and update
+	 * if the new rate is different than the current rate.
+	 */
+	list_for_each_entry(clkp, &parent->children, siblings) {
+		clkh = to_branch_clk(clkp);
+		if (clkh->is_prepared && clkh != branch)
+			other_rate = max(clkp->rate, other_rate);
+	}
+	curr_rate = max(other_rate, c->rate);
+	new_rate = max(other_rate, rate);
+	if (new_rate != curr_rate) {
+		ret = clk_set_rate(parent, new_rate);
+		if (!ret)
+			c->rate = rate;
+	}
+exit:
+	mutex_unlock(&branch_clk_lock);
+	return ret;
+}
+
+static long branch_clk_round_rate(struct clk *c, unsigned long rate)
+{
+	struct branch_clk *branch = to_branch_clk(c);
+
+	if (branch->max_div)
+		return rate <= (branch->max_div) ? rate : -EPERM;
+
+	if (!branch->has_sibling)
+		return clk_round_rate(c->parent, rate);
+
+	return -EPERM;
+}
+
+static unsigned long branch_clk_get_rate(struct clk *c)
+{
+	struct branch_clk *branch = to_branch_clk(c);
+
+	if (branch->max_div ||
+			(branch->aggr_sibling_rates && !branch->is_prepared))
+		return branch->c.rate;
+
+	return clk_get_rate(c->parent);
+}
+
+static long branch_clk_list_rate(struct clk *c, unsigned n)
+{
+	int level;
+	unsigned long fmax = 0, rate;
+	struct branch_clk *branch = to_branch_clk(c);
+	struct clk *parent = c->parent;
+
+	if (branch->has_sibling == 1)
+		return -ENXIO;
+
+	if (!parent || !parent->ops->list_rate)
+		return -ENXIO;
+
+	/* Find max frequency supported within voltage constraints. */
+	if (!parent->vdd_class) {
+		fmax = ULONG_MAX;
+	} else {
+		for (level = 0; level < parent->num_fmax; level++)
+			if (parent->fmax[level])
+				fmax = parent->fmax[level];
+	}
+
+	rate = parent->ops->list_rate(parent, n);
+	if (rate <= fmax)
+		return rate;
+	else
+		return -ENXIO;
+}
+
+static enum handoff branch_clk_handoff(struct clk *c)
+{
+	struct branch_clk *branch = to_branch_clk(c);
+	u32 cbcr_regval;
+
+	cbcr_regval = readl_relaxed(CBCR_REG(branch));
+
+	/* Set the cdiv to c->rate for fixed divider branch clock */
+	if (c->rate && (c->rate < branch->max_div)) {
+		cbcr_regval &= ~BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
+		cbcr_regval |= BVAL(CBCR_CDIV_MSB, CBCR_CDIV_LSB, c->rate);
+		writel_relaxed(cbcr_regval, CBCR_REG(branch));
+	}
+
+	if ((cbcr_regval & CBCR_BRANCH_OFF_BIT))
+		return HANDOFF_DISABLED_CLK;
+
+	if (!(cbcr_regval & CBCR_BRANCH_ENABLE_BIT)) {
+		if (!branch->check_enable_bit)
+			pr_warn("%s clock is enabled in HW even though ENABLE_BIT is not set\n",
+			c->dbg_name);
+		return HANDOFF_DISABLED_CLK;
+	}
+
+	if (branch->max_div) {
+		cbcr_regval &= BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
+		cbcr_regval >>= CBCR_CDIV_LSB;
+		c->rate = cbcr_regval;
+	} else if (!branch->has_sibling) {
+		c->rate = clk_get_rate(c->parent);
+	}
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+static int __branch_clk_reset(void __iomem *bcr_reg,
+				enum clk_reset_action action)
+{
+	int ret = 0;
+	unsigned long flags;
+	u32 reg_val;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	reg_val = readl_relaxed(bcr_reg);
+	switch (action) {
+	case CLK_RESET_ASSERT:
+		reg_val |= BCR_BLK_ARES_BIT;
+		break;
+	case CLK_RESET_DEASSERT:
+		reg_val &= ~BCR_BLK_ARES_BIT;
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	writel_relaxed(reg_val, bcr_reg);
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+	/* Make sure write is issued before returning. */
+	mb();
+
+	return ret;
+}
+
+static int branch_clk_reset(struct clk *c, enum clk_reset_action action)
+{
+	struct branch_clk *branch = to_branch_clk(c);
+
+	if (!branch->bcr_reg)
+		return -EPERM;
+	return __branch_clk_reset(BCR_REG(branch), action);
+}
+
+static void __iomem *branch_clk_list_registers(struct clk *c, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct branch_clk *branch = to_branch_clk(c);
+	static struct clk_register_data data[] = {
+		{"CBCR", 0x0},
+	};
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return CBCR_REG(branch);
+}
+
+static void _hw_ctl_clk_enable(struct hw_ctl_clk *hwctl_clk)
+{
+	unsigned long flags;
+	u32 cbcr_val;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	cbcr_val = readl_relaxed(CBCR_REG(hwctl_clk));
+	cbcr_val |= CBCR_HW_CTL_BIT;
+	writel_relaxed(cbcr_val, CBCR_REG(hwctl_clk));
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static int hw_ctl_clk_enable(struct clk *c)
+{
+	struct hw_ctl_clk *hwctl_clk = to_hw_ctl_clk(c);
+	struct clk *parent = c->parent;
+
+	/* The parent branch clock should have been prepared prior to this. */
+	if (!parent || (parent && !parent->prepare_count))
+		return -EINVAL;
+
+	_hw_ctl_clk_enable(hwctl_clk);
+	return 0;
+}
+
+static void _hw_ctl_clk_disable(struct hw_ctl_clk *hwctl_clk)
+{
+	unsigned long flags;
+	u32 cbcr_val;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	cbcr_val = readl_relaxed(CBCR_REG(hwctl_clk));
+	cbcr_val &= ~CBCR_HW_CTL_BIT;
+	writel_relaxed(cbcr_val, CBCR_REG(hwctl_clk));
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static void hw_ctl_clk_disable(struct clk *c)
+{
+	struct hw_ctl_clk *hwctl_clk = to_hw_ctl_clk(c);
+
+	if (!c->parent)
+		return;
+
+	_hw_ctl_clk_disable(hwctl_clk);
+}
+
+static int hw_ctl_clk_set_rate(struct clk *c, unsigned long rate)
+{
+	struct hw_ctl_clk *hwctl_clk = to_hw_ctl_clk(c);
+	struct clk *parent = c->parent;
+	int ret = 0;
+
+	if (!parent)
+		return -EINVAL;
+	/*
+	 * Switch back to SW control while doing a frequency change to avoid
+	 * having the downstream clock being gated at the same time that the
+	 * RCG rate switch happens.
+	 */
+	_hw_ctl_clk_disable(hwctl_clk);
+	ret = clk_set_rate(parent, rate);
+	if (ret)
+		return ret;
+	_hw_ctl_clk_enable(hwctl_clk);
+
+	return 0;
+}
+
+static long hw_ctl_clk_round_rate(struct clk *c, unsigned long rate)
+{
+	return clk_round_rate(c->parent, rate);
+}
+
+static unsigned long hw_ctl_clk_get_rate(struct clk *c)
+{
+	return clk_get_rate(c->parent);
+}
+
+/*
+ * Voteable clock functions
+ */
+static int local_vote_clk_reset(struct clk *c, enum clk_reset_action action)
+{
+	struct local_vote_clk *vclk = to_local_vote_clk(c);
+
+	if (!vclk->bcr_reg) {
+		WARN("clk_reset called on an unsupported clock (%s)\n",
+			c->dbg_name);
+		return -EPERM;
+	}
+	return __branch_clk_reset(BCR_REG(vclk), action);
+}
+
+static void local_vote_clk_disable(struct clk *c)
+{
+	unsigned long flags;
+	u32 ena;
+	struct local_vote_clk *vclk = to_local_vote_clk(c);
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	ena = readl_relaxed(VOTE_REG(vclk));
+	ena &= ~vclk->en_mask;
+	writel_relaxed(ena, VOTE_REG(vclk));
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static int local_vote_clk_enable(struct clk *c)
+{
+	unsigned long flags;
+	u32 ena;
+	struct local_vote_clk *vclk = to_local_vote_clk(c);
+	int ret = 0;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	ena = readl_relaxed(VOTE_REG(vclk));
+	ena |= vclk->en_mask;
+	writel_relaxed(ena, VOTE_REG(vclk));
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+	ret = branch_clk_halt_check(c, vclk->halt_check, CBCR_REG(vclk),
+						BRANCH_ON);
+	if (ret)
+		local_vote_clk_disable(c);
+
+	return ret;
+}
+
+static enum handoff local_vote_clk_handoff(struct clk *c)
+{
+	struct local_vote_clk *vclk = to_local_vote_clk(c);
+	u32 vote_regval;
+
+	/* Is the branch voted on by apps? */
+	vote_regval = readl_relaxed(VOTE_REG(vclk));
+	if (!(vote_regval & vclk->en_mask))
+		return HANDOFF_DISABLED_CLK;
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+/* Sample clock for 'ticks' reference clock ticks. */
+static u32 run_measurement(unsigned ticks, void __iomem *ctl_reg,
+				void __iomem *status_reg)
+{
+	/* Stop counters and set the XO4 counter start value. */
+	writel_relaxed(ticks, ctl_reg);
+
+	/* Wait for timer to become ready. */
+	while ((readl_relaxed(status_reg) & BIT(25)) != 0)
+		cpu_relax();
+
+	/* Run measurement and wait for completion. */
+	writel_relaxed(BIT(20)|ticks, ctl_reg);
+	while ((readl_relaxed(status_reg) & BIT(25)) == 0)
+		cpu_relax();
+
+	/* Return measured ticks. */
+	return readl_relaxed(status_reg) & BM(24, 0);
+}
+
+/*
+ * Perform a hardware rate measurement for a given clock.
+ * FOR DEBUG USE ONLY: Measurements take ~15 ms!
+ */
+unsigned long measure_get_rate(struct clk *c)
+{
+	unsigned long flags;
+	u32 gcc_xo4_reg, regval;
+	u64 raw_count_short, raw_count_full;
+	unsigned ret;
+	u32 sample_ticks = 0x10000;
+	u32 multiplier = to_mux_clk(c)->post_div + 1;
+	struct measure_clk_data *data = to_mux_clk(c)->priv;
+
+	regval = readl_relaxed(MUX_REG(to_mux_clk(c)));
+	/* clear and set post divider bits */
+	regval &= ~BM(15, 12);
+	regval |= BVAL(15, 12, to_mux_clk(c)->post_div);
+	writel_relaxed(regval, MUX_REG(to_mux_clk(c)));
+
+	ret = clk_prepare_enable(data->cxo);
+	if (ret) {
+		pr_warn("CXO clock failed to enable. Can't measure\n");
+		ret = 0;
+		goto fail;
+	}
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+
+	/* Enable CXO/4 and RINGOSC branch. */
+	gcc_xo4_reg = readl_relaxed(*data->base + data->xo_div4_cbcr);
+	gcc_xo4_reg |= CBCR_BRANCH_ENABLE_BIT;
+	writel_relaxed(gcc_xo4_reg, *data->base + data->xo_div4_cbcr);
+
+	/*
+	 * The ring oscillator counter will not reset if the measured clock
+	 * is not running.  To detect this, run a short measurement before
+	 * the full measurement.  If the raw results of the two are the same
+	 * then the clock must be off.
+	 */
+
+	/* Run a short measurement. (~1 ms) */
+	raw_count_short = run_measurement(0x1000, *data->base + data->ctl_reg,
+					  *data->base + data->status_reg);
+	/* Run a full measurement. (~14 ms) */
+	raw_count_full = run_measurement(sample_ticks,
+					 *data->base + data->ctl_reg,
+					 *data->base + data->status_reg);
+
+	gcc_xo4_reg &= ~CBCR_BRANCH_ENABLE_BIT;
+	writel_relaxed(gcc_xo4_reg, *data->base + data->xo_div4_cbcr);
+
+	/* Return 0 if the clock is off. */
+	if (raw_count_full == raw_count_short) {
+		ret = 0;
+	} else {
+		/* Compute rate in Hz. */
+		raw_count_full = ((raw_count_full * 10) + 15) * 4800000;
+		do_div(raw_count_full, ((sample_ticks * 10) + 35));
+		ret = (raw_count_full * multiplier);
+	}
+	writel_relaxed(data->plltest_val, *data->base + data->plltest_reg);
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+	clk_disable_unprepare(data->cxo);
+
+fail:
+	regval = readl_relaxed(MUX_REG(to_mux_clk(c)));
+	/* clear post divider bits */
+	regval &= ~BM(15, 12);
+	writel_relaxed(regval, MUX_REG(to_mux_clk(c)));
+
+	return ret;
+}
+
+struct frac_entry {
+	int num;
+	int den;
+};
+
+static void __iomem *local_vote_clk_list_registers(struct clk *c, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct local_vote_clk *vclk = to_local_vote_clk(c);
+	static struct clk_register_data data1[] = {
+		{"CBCR", 0x0},
+	};
+	static struct clk_register_data data2[] = {
+		{"APPS_VOTE", 0x0},
+		{"APPS_SLEEP_VOTE", 0x4},
+	};
+	switch (n) {
+	case 0:
+		*regs = data1;
+		*size = ARRAY_SIZE(data1);
+		return CBCR_REG(vclk);
+	case 1:
+		*regs = data2;
+		*size = ARRAY_SIZE(data2);
+		return VOTE_REG(vclk);
+	default:
+		return ERR_PTR(-EINVAL);
+	}
+}
+
+static struct frac_entry frac_table_675m[] = {	/* link rate of 270M */
+	{52, 295},	/* 119 M */
+	{11, 57},	/* 130.25 M */
+	{63, 307},	/* 138.50 M */
+	{11, 50},	/* 148.50 M */
+	{47, 206},	/* 154 M */
+	{31, 100},	/* 205.25 M */
+	{107, 269},	/* 268.50 M */
+	{0, 0},
+};
+
+static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */
+	{31, 211},	/* 119 M */
+	{32, 199},	/* 130.25 M */
+	{63, 307},	/* 138.50 M */
+	{11, 60},	/* 148.50 M */
+	{50, 263},	/* 154 M */
+	{31, 120},	/* 205.25 M */
+	{119, 359},	/* 268.50 M */
+	{0, 0},
+};
+
+static int set_rate_edp_pixel(struct clk *clk, unsigned long rate)
+{
+	struct rcg_clk *rcg = to_rcg_clk(clk);
+	struct clk_freq_tbl *pixel_freq = rcg->current_freq;
+	struct frac_entry *frac;
+	int delta = 100000;
+	s64 request;
+	s64 src_rate;
+	unsigned long flags;
+
+	src_rate = clk_get_rate(clk->parent);
+
+	if (src_rate == 810000000)
+		frac = frac_table_810m;
+	else
+		frac = frac_table_675m;
+
+	while (frac->num) {
+		request = rate;
+		request *= frac->den;
+		request = div_s64(request, frac->num);
+		if ((src_rate < (request - delta)) ||
+			(src_rate > (request + delta))) {
+			frac++;
+			continue;
+		}
+
+		pixel_freq->div_src_val &= ~BM(4, 0);
+		if (frac->den == frac->num) {
+			pixel_freq->m_val = 0;
+			pixel_freq->n_val = 0;
+		} else {
+			pixel_freq->m_val = frac->num;
+			pixel_freq->n_val = ~(frac->den - frac->num);
+			pixel_freq->d_val = ~frac->den;
+		}
+		spin_lock_irqsave(&local_clock_reg_lock, flags);
+		__set_rate_mnd(rcg, pixel_freq);
+		spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+		return 0;
+	}
+	return -EINVAL;
+}
+
+enum handoff byte_rcg_handoff(struct clk *clk)
+{
+	struct rcg_clk *rcg = to_rcg_clk(clk);
+	u32 div_val;
+	unsigned long pre_div_rate, parent_rate = clk_get_rate(clk->parent);
+
+	/* If the pre-divider is used, find the rate after the division */
+	div_val = readl_relaxed(CFG_RCGR_REG(rcg)) & CFG_RCGR_DIV_MASK;
+	if (div_val > 1)
+		pre_div_rate = parent_rate / ((div_val + 1) >> 1);
+	else
+		pre_div_rate = parent_rate;
+
+	clk->rate = pre_div_rate;
+
+	if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_ROOT_STATUS_BIT)
+		return HANDOFF_DISABLED_CLK;
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+static int set_rate_byte(struct clk *clk, unsigned long rate)
+{
+	struct rcg_clk *rcg = to_rcg_clk(clk);
+	struct clk *pll = clk->parent;
+	unsigned long source_rate, div, flags;
+	struct clk_freq_tbl *byte_freq = rcg->current_freq;
+	int rc;
+
+	if (rate == 0)
+		return -EINVAL;
+
+	rc = clk_set_rate(pll, rate);
+	if (rc)
+		return rc;
+
+	source_rate = clk_round_rate(pll, rate);
+	if ((2 * source_rate) % rate)
+		return -EINVAL;
+
+	div = ((2 * source_rate)/rate) - 1;
+	if (div > CFG_RCGR_DIV_MASK)
+		return -EINVAL;
+
+	/*
+	 * Both 0 and 1 represent same divider value in HW.
+	 * Always use 0 to simplify comparison.
+	 */
+	div = (div == 1) ? 0 : div;
+
+	byte_freq->div_src_val &= ~CFG_RCGR_DIV_MASK;
+	byte_freq->div_src_val |= BVAL(4, 0, div);
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	__set_rate_hid(rcg, byte_freq);
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+	return 0;
+}
+
+enum handoff pixel_rcg_handoff(struct clk *clk)
+{
+	struct rcg_clk *rcg = to_rcg_clk(clk);
+	u32 div_val = 0, mval = 0, nval = 0, cfg_regval;
+	unsigned long pre_div_rate, parent_rate = clk_get_rate(clk->parent);
+
+	cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+
+	/* If the pre-divider is used, find the rate after the division */
+	div_val = cfg_regval & CFG_RCGR_DIV_MASK;
+	if (div_val > 1)
+		pre_div_rate = parent_rate / ((div_val + 1) >> 1);
+	else
+		pre_div_rate = parent_rate;
+
+	clk->rate = pre_div_rate;
+
+	/*
+	 * Pixel clocks have one frequency entry in their frequency table.
+	 * Update that entry.
+	 */
+	if (rcg->current_freq) {
+		rcg->current_freq->div_src_val &= ~CFG_RCGR_DIV_MASK;
+		rcg->current_freq->div_src_val |= div_val;
+	}
+
+	/* If MND is used, find the rate after the MND division */
+	if ((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL) {
+		mval = readl_relaxed(M_REG(rcg));
+		nval = readl_relaxed(N_REG(rcg));
+		if (!nval)
+			return HANDOFF_DISABLED_CLK;
+		nval = (~nval) + mval;
+		if (rcg->current_freq) {
+			rcg->current_freq->n_val = ~(nval - mval);
+			rcg->current_freq->m_val = mval;
+			rcg->current_freq->d_val = ~nval;
+		}
+		clk->rate = (pre_div_rate * mval) / nval;
+	}
+
+	if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_ROOT_STATUS_BIT)
+		return HANDOFF_DISABLED_CLK;
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+static long round_rate_pixel(struct clk *clk, unsigned long rate)
+{
+	int frac_num[] = {3, 2, 4, 1};
+	int frac_den[] = {8, 9, 9, 1};
+	int delta = 100000;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(frac_num); i++) {
+		unsigned long request = (rate * frac_den[i]) / frac_num[i];
+		unsigned long src_rate;
+
+		src_rate = clk_round_rate(clk->parent, request);
+		if ((src_rate < (request - delta)) ||
+			(src_rate > (request + delta)))
+			continue;
+
+		return (src_rate * frac_num[i]) / frac_den[i];
+	}
+
+	return -EINVAL;
+}
+
+
+static int set_rate_pixel(struct clk *clk, unsigned long rate)
+{
+	struct rcg_clk *rcg = to_rcg_clk(clk);
+	struct clk_freq_tbl *pixel_freq = rcg->current_freq;
+	int frac_num[] = {1, 2, 4, 3, 2};
+	int frac_den[] = {1, 3, 9, 8, 9};
+	int delta = 100000;
+	int i, rc;
+
+	for (i = 0; i < ARRAY_SIZE(frac_num); i++) {
+		unsigned long request = (rate * frac_den[i]) / frac_num[i];
+		unsigned long src_rate;
+
+		src_rate = clk_round_rate(clk->parent, request);
+		if ((src_rate < (request - delta)) ||
+			(src_rate > (request + delta)))
+			continue;
+
+		rc =  clk_set_rate(clk->parent, src_rate);
+		if (rc)
+			return rc;
+
+		pixel_freq->div_src_val &= ~BM(4, 0);
+		if (frac_den[i] == frac_num[i]) {
+			pixel_freq->m_val = 0;
+			pixel_freq->n_val = 0;
+		} else {
+			pixel_freq->m_val = frac_num[i];
+			pixel_freq->n_val = ~(frac_den[i] - frac_num[i]);
+			pixel_freq->d_val = ~frac_den[i];
+		}
+		set_rate_mnd(rcg, pixel_freq);
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int rcg_clk_set_parent(struct clk *clk, struct clk *parent_clk)
+{
+	struct rcg_clk *rcg = to_rcg_clk(clk);
+	struct clk *old_parent = clk->parent;
+	struct clk_freq_tbl *nf;
+	unsigned long flags;
+	int rc = 0;
+	unsigned int parent_rate, rate;
+	u32 m_val, n_val, d_val, div_val;
+	u32 cfg_regval;
+
+	/* Find the source clock freq tbl for the requested parent */
+	if (!rcg->freq_tbl)
+		return -ENXIO;
+
+	for (nf = rcg->freq_tbl; parent_clk != nf->src_clk; nf++) {
+		if (nf->freq_hz == FREQ_END)
+			return -ENXIO;
+	}
+
+	/* This implementation recommends that the RCG be unprepared
+	 * when switching RCG source since the divider configuration
+	 * remains unchanged.
+	 */
+	WARN(clk->prepare_count,
+		"Trying to switch RCG source while it is prepared!\n");
+
+	parent_rate = clk_get_rate(parent_clk);
+
+	div_val = (rcg->current_freq->div_src_val & CFG_RCGR_DIV_MASK);
+	if (div_val)
+		parent_rate /= ((div_val + 1) >> 1);
+
+	/* Update divisor. Source select bits should already be as expected */
+	nf->div_src_val &= ~CFG_RCGR_DIV_MASK;
+	nf->div_src_val |= div_val;
+
+	cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+
+	if ((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL) {
+		nf->m_val = m_val = readl_relaxed(M_REG(rcg));
+		n_val = readl_relaxed(N_REG(rcg));
+		d_val = readl_relaxed(D_REG(rcg));
+
+		/* Sign extend the n and d values as those in registers are not
+		 * sign extended.
+		 */
+		n_val |= (n_val >> 8) ? BM(31, 16) : BM(31, 8);
+		d_val |= (d_val >> 8) ? BM(31, 16) : BM(31, 8);
+
+		nf->n_val = n_val;
+		nf->d_val = d_val;
+
+		n_val = ~(n_val) + m_val;
+		rate = parent_rate * m_val;
+		if (n_val)
+			rate /= n_val;
+		else
+			WARN(1, "n_val was 0!!");
+	} else
+		rate = parent_rate;
+
+	/* Warn if switching to the new parent with the current m, n ,d values
+	 * violates the voltage constraints for the RCG.
+	 */
+	WARN(!is_rate_valid(clk, rate) && clk->prepare_count,
+		"Switch to new RCG parent violates voltage requirement!\n");
+
+	rc = __clk_pre_reparent(clk, nf->src_clk, &flags);
+	if (rc)
+		return rc;
+
+	/* Switch RCG source */
+	rcg->set_rate(rcg, nf);
+
+	rcg->current_freq = nf;
+	clk->parent = parent_clk;
+	clk->rate = rate;
+
+	__clk_post_reparent(clk, old_parent, &flags);
+
+	return 0;
+}
+
+/*
+ * Unlike other clocks, the HDMI rate is adjusted through PLL
+ * re-programming. It is also routed through an HID divider.
+ */
+static int rcg_clk_set_rate_hdmi(struct clk *c, unsigned long rate)
+{
+	struct rcg_clk *rcg = to_rcg_clk(c);
+	struct clk_freq_tbl *nf = rcg->freq_tbl;
+	int rc;
+
+	rc = clk_set_rate(nf->src_clk, rate);
+	if (rc < 0)
+		goto out;
+	set_rate_hid(rcg, nf);
+
+	rcg->current_freq = nf;
+out:
+	return rc;
+}
+
+static struct clk *rcg_hdmi_clk_get_parent(struct clk *c)
+{
+	struct rcg_clk *rcg = to_rcg_clk(c);
+	struct clk_freq_tbl *freq = rcg->freq_tbl;
+	u32 cmd_rcgr_regval;
+
+	/* Is there a pending configuration? */
+	cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+	if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK)
+		return NULL;
+
+	rcg->current_freq->freq_hz = clk_get_rate(c->parent);
+
+	return freq->src_clk;
+}
+
+static int rcg_clk_set_rate_edp(struct clk *c, unsigned long rate)
+{
+	struct clk_freq_tbl *nf;
+	struct rcg_clk *rcg = to_rcg_clk(c);
+	int rc;
+
+	for (nf = rcg->freq_tbl; nf->freq_hz != rate; nf++)
+		if (nf->freq_hz == FREQ_END) {
+			rc = -EINVAL;
+			goto out;
+		}
+
+	rc = clk_set_rate(nf->src_clk, rate);
+	if (rc < 0)
+		goto out;
+	set_rate_hid(rcg, nf);
+
+	rcg->current_freq = nf;
+	c->parent = nf->src_clk;
+out:
+	return rc;
+}
+
+static struct clk *edp_clk_get_parent(struct clk *c)
+{
+	struct rcg_clk *rcg = to_rcg_clk(c);
+	struct clk *clk;
+	struct clk_freq_tbl *freq;
+	unsigned long rate;
+	u32 cmd_rcgr_regval;
+
+	/* Is there a pending configuration? */
+	cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+	if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK)
+		return NULL;
+
+	/* Figure out what rate the rcg is running at */
+	for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
+		clk = freq->src_clk;
+		if (clk && clk->ops->get_rate) {
+			rate = clk->ops->get_rate(clk);
+			if (rate == freq->freq_hz)
+				break;
+		}
+	}
+
+	/* No known frequency found */
+	if (freq->freq_hz == FREQ_END)
+		return NULL;
+
+	rcg->current_freq = freq;
+	return freq->src_clk;
+}
+
+static int rcg_clk_set_rate_dp(struct clk *clk, unsigned long rate)
+{
+	struct rcg_clk *rcg = to_rcg_clk(clk);
+	struct clk_freq_tbl *freq_tbl = rcg->current_freq;
+	unsigned long src_rate;
+	unsigned long num, den, flags;
+
+	src_rate = clk_get_rate(clk->parent);
+	if (src_rate <= 0) {
+		pr_err("Invalid RCG parent rate\n");
+		return -EINVAL;
+	}
+
+	rational_best_approximation(src_rate, rate,
+			(unsigned long)(1 << 16) - 1,
+			(unsigned long)(1 << 16) - 1, &den, &num);
+
+	if (!num || !den) {
+		pr_err("Invalid MN values derived for requested rate %lu\n",
+							rate);
+		return -EINVAL;
+	}
+
+	freq_tbl->div_src_val &= ~BM(4, 0);
+	if (num == den) {
+		freq_tbl->m_val = 0;
+		freq_tbl->n_val = 0;
+	} else {
+		freq_tbl->m_val = num;
+		freq_tbl->n_val = ~(den - num);
+		freq_tbl->d_val = ~den;
+	}
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	__set_rate_mnd(rcg, freq_tbl);
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+	return 0;
+}
+
+static int gate_clk_enable(struct clk *c)
+{
+	unsigned long flags;
+	u32 regval;
+	struct gate_clk *g = to_gate_clk(c);
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	regval = readl_relaxed(GATE_EN_REG(g));
+	regval |= g->en_mask;
+	writel_relaxed(regval, GATE_EN_REG(g));
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+	if (g->delay_us)
+		udelay(g->delay_us);
+
+	return 0;
+}
+
+static void gate_clk_disable(struct clk *c)
+{
+	unsigned long flags;
+	u32 regval;
+	struct gate_clk *g = to_gate_clk(c);
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	regval = readl_relaxed(GATE_EN_REG(g));
+	regval &= ~(g->en_mask);
+	writel_relaxed(regval, GATE_EN_REG(g));
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+	if (g->delay_us)
+		udelay(g->delay_us);
+}
+
+static void __iomem *gate_clk_list_registers(struct clk *c, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct gate_clk *g = to_gate_clk(c);
+	static struct clk_register_data data[] = {
+		{"EN_REG", 0x0},
+	};
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return GATE_EN_REG(g);
+}
+
+static enum handoff gate_clk_handoff(struct clk *c)
+{
+	struct gate_clk *g = to_gate_clk(c);
+	u32 regval;
+
+	regval = readl_relaxed(GATE_EN_REG(g));
+	if (regval & g->en_mask)
+		return HANDOFF_ENABLED_CLK;
+
+	return HANDOFF_DISABLED_CLK;
+}
+
+static int gate_clk_set_flags(struct clk *c, unsigned flags)
+{
+	return cbcr_set_flags(GATE_EN_REG(to_gate_clk(c)), flags);
+}
+
+
+static int reset_clk_rst(struct clk *c, enum clk_reset_action action)
+{
+	struct reset_clk *rst = to_reset_clk(c);
+
+	if (!rst->reset_reg)
+		return -EPERM;
+
+	return __branch_clk_reset(RST_REG(rst), action);
+}
+
+static void __iomem *reset_clk_list_registers(struct clk *clk, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct reset_clk *rst = to_reset_clk(clk);
+	static struct clk_register_data data[] = {
+		{"BCR", 0x0},
+	};
+
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return RST_REG(rst);
+}
+
+static DEFINE_SPINLOCK(mux_reg_lock);
+
+static int mux_reg_enable(struct mux_clk *clk)
+{
+	u32 regval;
+	unsigned long flags;
+
+	if (!clk->en_mask)
+		return 0;
+
+	spin_lock_irqsave(&mux_reg_lock, flags);
+	regval = readl_relaxed(*clk->base + clk->en_offset);
+	regval |= clk->en_mask;
+	writel_relaxed(regval, *clk->base + clk->en_offset);
+	/* Ensure enable request goes through before returning */
+	mb();
+	spin_unlock_irqrestore(&mux_reg_lock, flags);
+
+	return 0;
+}
+
+static void mux_reg_disable(struct mux_clk *clk)
+{
+	u32 regval;
+	unsigned long flags;
+
+	if (!clk->en_mask)
+		return;
+
+	spin_lock_irqsave(&mux_reg_lock, flags);
+	regval = readl_relaxed(*clk->base + clk->en_offset);
+	regval &= ~clk->en_mask;
+	writel_relaxed(regval, *clk->base + clk->en_offset);
+	spin_unlock_irqrestore(&mux_reg_lock, flags);
+}
+
+static int mux_reg_set_mux_sel(struct mux_clk *clk, int sel)
+{
+	u32 regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mux_reg_lock, flags);
+	regval = readl_relaxed(MUX_REG(clk));
+	regval &= ~(clk->mask << clk->shift);
+	regval |= (sel & clk->mask) << clk->shift;
+	writel_relaxed(regval, MUX_REG(clk));
+	/* Ensure switch request goes through before returning */
+	mb();
+	spin_unlock_irqrestore(&mux_reg_lock, flags);
+
+	return 0;
+}
+
+static int mux_reg_get_mux_sel(struct mux_clk *clk)
+{
+	u32 regval = readl_relaxed(MUX_REG(clk));
+	return (regval >> clk->shift) & clk->mask;
+}
+
+static bool mux_reg_is_enabled(struct mux_clk *clk)
+{
+	u32 regval = readl_relaxed(MUX_REG(clk));
+	return !!(regval & clk->en_mask);
+}
+
+static void __iomem *mux_clk_list_registers(struct mux_clk *clk, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	static struct clk_register_data data[] = {
+		{"DEBUG_CLK_CTL", 0x0},
+	};
+
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return *clk->base + clk->offset;
+}
+
+/* PLL post-divider setting for each divider value */
+static struct div_map postdiv_map[] = {
+	{  0x0, 1  },
+	{  0x1, 2  },
+	{  0x3, 3  },
+	{  0x3, 4  },
+	{  0x5, 5  },
+	{  0x7, 7  },
+	{  0x7, 8  },
+	{  0xF, 16 },
+};
+
+static int postdiv_reg_set_div(struct div_clk *clk, int div)
+{
+	struct clk *parent = NULL;
+	u32 regval;
+	unsigned long flags;
+	unsigned int mask = -1;
+	int i, ret = 0;
+
+	/* Divider is not configurable */
+	if (!clk->mask)
+		return 0;
+
+	for (i = 0; i < ARRAY_SIZE(postdiv_map); i++) {
+		if (postdiv_map[i].div == div) {
+			mask = postdiv_map[i].mask;
+			break;
+		}
+	}
+
+	if (mask < 0)
+		return -EINVAL;
+
+	spin_lock_irqsave(&clk->c.lock, flags);
+	parent = clk->c.parent;
+	if (parent->count && parent->ops->disable)
+		parent->ops->disable(parent);
+
+	regval = readl_relaxed(DIV_REG(clk));
+	regval &= ~(clk->mask << clk->shift);
+	regval |= (mask & clk->mask) << clk->shift;
+	writel_relaxed(regval, DIV_REG(clk));
+	/* Ensure switch request goes through before returning */
+	mb();
+
+	if (parent->count && parent->ops->enable) {
+		ret = parent->ops->enable(parent);
+		if (ret)
+			pr_err("Failed to force enable div parent!\n");
+	}
+
+	spin_unlock_irqrestore(&clk->c.lock, flags);
+	return ret;
+}
+
+static int postdiv_reg_get_div(struct div_clk *clk)
+{
+	u32 regval;
+	int i, div = 0;
+
+	/* Divider is not configurable */
+	if (!clk->mask)
+		return clk->data.div;
+
+	regval = readl_relaxed(DIV_REG(clk));
+	regval = (regval >> clk->shift) & clk->mask;
+	for (i = 0; i < ARRAY_SIZE(postdiv_map); i++) {
+		if (postdiv_map[i].mask == regval) {
+			div = postdiv_map[i].div;
+			break;
+		}
+	}
+	if (!div)
+		return -EINVAL;
+
+	return div;
+}
+
+static int div_reg_set_div(struct div_clk *clk, int div)
+{
+	u32 regval;
+	unsigned long flags;
+
+	/* Divider is not configurable */
+	if (!clk->mask)
+		return 0;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	regval = readl_relaxed(*clk->base + clk->offset);
+	regval &= ~(clk->mask << clk->shift);
+	regval |= (div & clk->mask) << clk->shift;
+	/* Ensure switch request goes through before returning */
+	mb();
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+	return 0;
+}
+
+static int div_reg_get_div(struct div_clk *clk)
+{
+	u32 regval;
+	/* Divider is not configurable */
+	if (!clk->mask)
+		return clk->data.div;
+
+	regval = readl_relaxed(*clk->base + clk->offset);
+	return (regval >> clk->shift) & clk->mask;
+}
+
+/* =================Half-integer RCG without MN counter================= */
+#define RCGR_CMD_REG(x) ((x)->base + (x)->div_offset)
+#define RCGR_DIV_REG(x) ((x)->base + (x)->div_offset + 4)
+#define RCGR_SRC_REG(x) ((x)->base + (x)->div_offset + 4)
+
+static int rcg_mux_div_update_config(struct mux_div_clk *md)
+{
+	u32 regval, count;
+
+	regval = readl_relaxed(RCGR_CMD_REG(md));
+	regval |= CMD_RCGR_CONFIG_UPDATE_BIT;
+	writel_relaxed(regval, RCGR_CMD_REG(md));
+
+	/* Wait for update to take effect */
+	for (count = UPDATE_CHECK_MAX_LOOPS; count > 0; count--) {
+		if (!(readl_relaxed(RCGR_CMD_REG(md)) &
+			    CMD_RCGR_CONFIG_UPDATE_BIT))
+			return 0;
+		udelay(1);
+	}
+
+	CLK_WARN(&md->c, true, "didn't update its configuration.");
+
+	return -EBUSY;
+}
+
+static void rcg_get_src_div(struct mux_div_clk *md, u32 *src_sel, u32 *div)
+{
+	u32 regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	/* Is there a pending configuration? */
+	regval = readl_relaxed(RCGR_CMD_REG(md));
+	if (regval & CMD_RCGR_CONFIG_DIRTY_MASK) {
+		CLK_WARN(&md->c, true, "it's a pending configuration.");
+		spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+		return;
+	}
+
+	regval = readl_relaxed(RCGR_DIV_REG(md));
+	regval &= (md->div_mask << md->div_shift);
+	*div = regval >> md->div_shift;
+
+	/* bypass */
+	if (*div == 0)
+		*div = 1;
+	/* the div is doubled here*/
+	*div += 1;
+
+	regval = readl_relaxed(RCGR_SRC_REG(md));
+	regval &= (md->src_mask << md->src_shift);
+	*src_sel = regval >> md->src_shift;
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static void mux_div_set_force_enable(struct mux_div_clk *md)
+{
+	u32 regval;
+	unsigned long flags;
+	int count;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	regval = readl_relaxed(RCGR_CMD_REG(md));
+	regval |= CMD_RCGR_ROOT_ENABLE_BIT;
+	writel_relaxed(regval, RCGR_CMD_REG(md));
+
+	/* Wait for RCG to turn ON */
+	for (count = UPDATE_CHECK_MAX_LOOPS; count > 0; count--) {
+		if (!(readl_relaxed(RCGR_CMD_REG(md)) &
+				CMD_RCGR_CONFIG_UPDATE_BIT))
+			goto exit;
+		udelay(1);
+	}
+	CLK_WARN(&md->c, count == 0, "rcg didn't turn on.");
+exit:
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static void mux_div_clear_force_enable(struct mux_div_clk *md)
+{
+	u32 regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	regval = readl_relaxed(RCGR_CMD_REG(md));
+	regval &= ~CMD_RCGR_ROOT_ENABLE_BIT;
+	writel_relaxed(regval, RCGR_CMD_REG(md));
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static int rcg_set_src_div(struct mux_div_clk *md, u32 src_sel, u32 div)
+{
+	u32 regval;
+	unsigned long flags;
+	int ret;
+
+	/* for half-integer divider, div here is doubled */
+	if (div)
+		div -= 1;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	regval = readl_relaxed(RCGR_DIV_REG(md));
+	regval &= ~(md->div_mask << md->div_shift);
+	regval |= div << md->div_shift;
+	writel_relaxed(regval, RCGR_DIV_REG(md));
+
+	regval = readl_relaxed(RCGR_SRC_REG(md));
+	regval &= ~(md->src_mask << md->src_shift);
+	regval |= src_sel << md->src_shift;
+	writel_relaxed(regval, RCGR_SRC_REG(md));
+
+	ret = rcg_mux_div_update_config(md);
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+	return ret;
+}
+
+static int rcg_enable(struct mux_div_clk *md)
+{
+	if (md->force_enable_md)
+		mux_div_set_force_enable(md);
+
+	return rcg_set_src_div(md, md->src_sel, md->data.div);
+}
+
+static void rcg_disable(struct mux_div_clk *md)
+{
+	u32 src_sel;
+
+	if (md->force_enable_md)
+		mux_div_clear_force_enable(md);
+
+	if (!md->safe_freq)
+		return;
+
+	src_sel = parent_to_src_sel(md->parents, md->num_parents,
+				md->safe_parent);
+
+	rcg_set_src_div(md, src_sel, md->safe_div);
+}
+
+static bool rcg_is_enabled(struct mux_div_clk *md)
+{
+	u32 regval;
+
+	regval = readl_relaxed(RCGR_CMD_REG(md));
+	if (regval & CMD_RCGR_ROOT_STATUS_BIT)
+		return false;
+	else
+		return true;
+}
+
+static void __iomem *rcg_list_registers(struct mux_div_clk *md, int n,
+			struct clk_register_data **regs, u32 *size)
+{
+	static struct clk_register_data data[] = {
+		{"CMD_RCGR", 0x0},
+		{"CFG_RCGR", 0x4},
+	};
+
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return RCGR_CMD_REG(md);
+}
+
+struct clk_ops clk_ops_empty;
+
+struct clk_ops clk_ops_rst = {
+	.reset = reset_clk_rst,
+	.list_registers = reset_clk_list_registers,
+};
+
+struct clk_ops clk_ops_rcg = {
+	.enable = rcg_clk_enable,
+	.disable = rcg_clk_disable,
+	.set_rate = rcg_clk_set_rate,
+	.list_rate = rcg_clk_list_rate,
+	.round_rate = rcg_clk_round_rate,
+	.handoff = rcg_clk_handoff,
+	.get_parent = rcg_clk_get_parent,
+	.set_parent = rcg_clk_set_parent,
+	.list_registers = rcg_hid_clk_list_registers,
+};
+
+struct clk_ops clk_ops_rcg_mnd = {
+	.enable = rcg_clk_enable,
+	.disable = rcg_clk_disable,
+	.set_rate = rcg_clk_set_rate,
+	.list_rate = rcg_clk_list_rate,
+	.round_rate = rcg_clk_round_rate,
+	.handoff = rcg_mnd_clk_handoff,
+	.get_parent = rcg_mnd_clk_get_parent,
+	.set_parent = rcg_clk_set_parent,
+	.list_registers = rcg_mnd_clk_list_registers,
+};
+
+struct clk_ops clk_ops_pixel = {
+	.enable = rcg_clk_enable,
+	.disable = rcg_clk_disable,
+	.set_rate = set_rate_pixel,
+	.list_rate = rcg_clk_list_rate,
+	.round_rate = round_rate_pixel,
+	.handoff = pixel_rcg_handoff,
+	.list_registers = rcg_mnd_clk_list_registers,
+};
+
+struct clk_ops clk_ops_pixel_multiparent = {
+	.enable = rcg_clk_enable,
+	.disable = rcg_clk_disable,
+	.set_rate = set_rate_pixel,
+	.list_rate = rcg_clk_list_rate,
+	.round_rate = round_rate_pixel,
+	.handoff = pixel_rcg_handoff,
+	.list_registers = rcg_mnd_clk_list_registers,
+	.get_parent = display_clk_get_parent,
+	.set_parent = rcg_clk_set_parent,
+};
+
+struct clk_ops clk_ops_edppixel = {
+	.enable = rcg_clk_enable,
+	.disable = rcg_clk_disable,
+	.set_rate = set_rate_edp_pixel,
+	.list_rate = rcg_clk_list_rate,
+	.round_rate = rcg_clk_round_rate,
+	.handoff = pixel_rcg_handoff,
+	.list_registers = rcg_mnd_clk_list_registers,
+};
+
+struct clk_ops clk_ops_byte = {
+	.enable = rcg_clk_enable,
+	.disable = rcg_clk_disable,
+	.set_rate = set_rate_byte,
+	.list_rate = rcg_clk_list_rate,
+	.round_rate = rcg_clk_round_rate,
+	.handoff = byte_rcg_handoff,
+	.list_registers = rcg_hid_clk_list_registers,
+};
+
+struct clk_ops clk_ops_byte_multiparent = {
+	.enable = rcg_clk_enable,
+	.disable = rcg_clk_disable,
+	.set_rate = set_rate_byte,
+	.list_rate = rcg_clk_list_rate,
+	.round_rate = rcg_clk_round_rate,
+	.handoff = byte_rcg_handoff,
+	.list_registers = rcg_hid_clk_list_registers,
+	.get_parent = display_clk_get_parent,
+	.set_parent = rcg_clk_set_parent,
+};
+
+struct clk_ops clk_ops_rcg_hdmi = {
+	.enable = rcg_clk_enable,
+	.disable = rcg_clk_disable,
+	.set_rate = rcg_clk_set_rate_hdmi,
+	.list_rate = rcg_clk_list_rate,
+	.round_rate = rcg_clk_round_rate,
+	.handoff = rcg_clk_handoff,
+	.get_parent = rcg_hdmi_clk_get_parent,
+	.list_registers = rcg_hid_clk_list_registers,
+};
+
+struct clk_ops clk_ops_rcg_edp = {
+	.enable = rcg_clk_enable,
+	.disable = rcg_clk_disable,
+	.set_rate = rcg_clk_set_rate_edp,
+	.list_rate = rcg_clk_list_rate,
+	.round_rate = rcg_clk_round_rate,
+	.handoff = rcg_clk_handoff,
+	.get_parent = edp_clk_get_parent,
+	.list_registers = rcg_hid_clk_list_registers,
+};
+
+struct clk_ops clk_ops_rcg_dp = {
+	.enable = rcg_clk_enable,
+	.disable = rcg_clk_disable,
+	.set_rate = rcg_clk_set_rate_dp,
+	.list_rate = rcg_clk_list_rate,
+	.handoff = pixel_rcg_handoff,
+	.list_registers = rcg_mnd_clk_list_registers,
+};
+
+struct clk_ops clk_ops_branch = {
+	.enable = branch_clk_enable,
+	.prepare = branch_clk_prepare,
+	.disable = branch_clk_disable,
+	.unprepare = branch_clk_unprepare,
+	.set_rate = branch_clk_set_rate,
+	.get_rate = branch_clk_get_rate,
+	.list_rate = branch_clk_list_rate,
+	.round_rate = branch_clk_round_rate,
+	.reset = branch_clk_reset,
+	.set_flags = branch_clk_set_flags,
+	.handoff = branch_clk_handoff,
+	.list_registers = branch_clk_list_registers,
+};
+
+struct clk_ops clk_ops_branch_hw_ctl = {
+	.enable = hw_ctl_clk_enable,
+	.disable = hw_ctl_clk_disable,
+	.set_rate = hw_ctl_clk_set_rate,
+	.get_rate = hw_ctl_clk_get_rate,
+	.round_rate = hw_ctl_clk_round_rate,
+};
+
+struct clk_ops clk_ops_vote = {
+	.enable = local_vote_clk_enable,
+	.disable = local_vote_clk_disable,
+	.reset = local_vote_clk_reset,
+	.handoff = local_vote_clk_handoff,
+	.list_registers = local_vote_clk_list_registers,
+};
+
+struct clk_ops clk_ops_gate = {
+	.enable = gate_clk_enable,
+	.disable = gate_clk_disable,
+	.set_rate = parent_set_rate,
+	.get_rate = parent_get_rate,
+	.round_rate = parent_round_rate,
+	.set_flags = gate_clk_set_flags,
+	.handoff = gate_clk_handoff,
+	.list_registers = gate_clk_list_registers,
+};
+
+struct clk_mux_ops mux_reg_ops = {
+	.enable = mux_reg_enable,
+	.disable = mux_reg_disable,
+	.set_mux_sel = mux_reg_set_mux_sel,
+	.get_mux_sel = mux_reg_get_mux_sel,
+	.is_enabled = mux_reg_is_enabled,
+	.list_registers = mux_clk_list_registers,
+};
+
+struct clk_div_ops div_reg_ops = {
+	.set_div = div_reg_set_div,
+	.get_div = div_reg_get_div,
+};
+
+struct clk_div_ops postdiv_reg_ops = {
+	.set_div = postdiv_reg_set_div,
+	.get_div = postdiv_reg_get_div,
+};
+
+struct mux_div_ops rcg_mux_div_ops = {
+	.enable = rcg_enable,
+	.disable = rcg_disable,
+	.set_src_div = rcg_set_src_div,
+	.get_src_div = rcg_get_src_div,
+	.is_enabled = rcg_is_enabled,
+	.list_registers = rcg_list_registers,
+};
+
+static void *cbc_dt_parser(struct device *dev, struct device_node *np)
+{
+	struct msmclk_data *drv;
+	struct branch_clk *branch_clk;
+	u32 rc;
+
+	branch_clk = devm_kzalloc(dev, sizeof(*branch_clk), GFP_KERNEL);
+	if (!branch_clk) {
+		dt_err(np, "memory alloc failure\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	drv = msmclk_parse_phandle(dev, np->parent->phandle);
+	if (IS_ERR_OR_NULL(drv))
+		return ERR_CAST(drv);
+	branch_clk->base = &drv->base;
+
+	rc = of_property_read_u32(np, "qcom,base-offset",
+						&branch_clk->cbcr_reg);
+	if (rc) {
+		dt_err(np, "missing/incorrect qcom,base-offset dt property\n");
+		return ERR_PTR(rc);
+	}
+
+	/* Optional property */
+	of_property_read_u32(np, "qcom,bcr-offset", &branch_clk->bcr_reg);
+
+	branch_clk->has_sibling = of_property_read_bool(np,
+							"qcom,has-sibling");
+
+	branch_clk->c.ops = &clk_ops_branch;
+
+	return msmclk_generic_clk_init(dev, np, &branch_clk->c);
+}
+MSMCLK_PARSER(cbc_dt_parser, "qcom,cbc", 0);
+
+static void *local_vote_clk_dt_parser(struct device *dev,
+						struct device_node *np)
+{
+	struct local_vote_clk *vote_clk;
+	struct msmclk_data *drv;
+	int rc, val;
+
+	vote_clk = devm_kzalloc(dev, sizeof(*vote_clk), GFP_KERNEL);
+	if (!vote_clk) {
+		dt_err(np, "failed to alloc memory\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	drv = msmclk_parse_phandle(dev, np->parent->phandle);
+	if (IS_ERR_OR_NULL(drv))
+		return ERR_CAST(drv);
+	vote_clk->base = &drv->base;
+
+	rc = of_property_read_u32(np, "qcom,base-offset",
+						&vote_clk->cbcr_reg);
+	if (rc) {
+		dt_err(np, "missing/incorrect qcom,base-offset dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,en-offset", &vote_clk->vote_reg);
+	if (rc) {
+		dt_err(np, "missing/incorrect qcom,en-offset dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,en-bit", &val);
+	if (rc) {
+		dt_err(np, "missing/incorrect qcom,en-bit dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+	vote_clk->en_mask = BIT(val);
+
+	vote_clk->c.ops = &clk_ops_vote;
+
+	/* Optional property */
+	of_property_read_u32(np, "qcom,bcr-offset", &vote_clk->bcr_reg);
+
+	return msmclk_generic_clk_init(dev, np, &vote_clk->c);
+}
+MSMCLK_PARSER(local_vote_clk_dt_parser, "qcom,local-vote-clk", 0);
+
+static void *gate_clk_dt_parser(struct device *dev, struct device_node *np)
+{
+	struct gate_clk *gate_clk;
+	struct msmclk_data *drv;
+	u32 en_bit, rc;
+
+	gate_clk = devm_kzalloc(dev, sizeof(*gate_clk), GFP_KERNEL);
+	if (!gate_clk) {
+		dt_err(np, "memory alloc failure\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	drv = msmclk_parse_phandle(dev, np->parent->phandle);
+	if (IS_ERR_OR_NULL(drv))
+		return ERR_CAST(drv);
+	gate_clk->base = &drv->base;
+
+	rc = of_property_read_u32(np, "qcom,en-offset", &gate_clk->en_reg);
+	if (rc) {
+		dt_err(np, "missing qcom,en-offset dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,en-bit", &en_bit);
+	if (rc) {
+		dt_err(np, "missing qcom,en-bit dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+	gate_clk->en_mask = BIT(en_bit);
+
+	/* Optional Property */
+	rc = of_property_read_u32(np, "qcom,delay", &gate_clk->delay_us);
+	if (rc)
+		gate_clk->delay_us = 0;
+
+	gate_clk->c.ops = &clk_ops_gate;
+	return msmclk_generic_clk_init(dev, np, &gate_clk->c);
+}
+MSMCLK_PARSER(gate_clk_dt_parser, "qcom,gate-clk", 0);
+
+
+static inline u32 rcg_calc_m(u32 m, u32 n)
+{
+	return m;
+}
+
+static inline u32 rcg_calc_n(u32 m, u32 n)
+{
+	n = n > 1 ? n : 0;
+	return ~((n)-(m)) * !!(n);
+}
+
+static inline u32 rcg_calc_duty_cycle(u32 m, u32 n)
+{
+	return ~n;
+}
+
+static inline u32 rcg_calc_div_src(u32 div_int, u32 div_frac, u32 src_sel)
+{
+	int div = 2 * div_int + (div_frac ? 1 : 0) - 1;
+	/* set bypass mode instead of a divider of 1 */
+	div = (div != 1) ? div : 0;
+	return BVAL(4, 0, max(div, 0))
+			| BVAL(10, 8, src_sel);
+}
+
+struct clk_src *msmclk_parse_clk_src(struct device *dev,
+				struct device_node *np, int *array_size)
+{
+	struct clk_src *clks;
+	const void *prop;
+	int num_parents, len, i, prop_len, rc;
+	char *name = "qcom,parents";
+
+	if (!array_size) {
+		dt_err(np, "array_size must be a valid pointer\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	prop = of_get_property(np, name, &prop_len);
+	if (!prop) {
+		dt_prop_err(np, name, "missing dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	len = sizeof(phandle) + sizeof(u32);
+	if (prop_len % len) {
+		dt_prop_err(np, name, "invalid property length\n");
+		return ERR_PTR(-EINVAL);
+	}
+	num_parents = prop_len / len;
+
+	clks = devm_kzalloc(dev, sizeof(*clks) * num_parents, GFP_KERNEL);
+	if (!clks) {
+		dt_err(np, "memory alloc failure\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* Assume that u32 and phandle have the same size */
+	for (i = 0; i < num_parents; i++) {
+		phandle p;
+		struct clk_src *a = &clks[i];
+
+		rc = of_property_read_u32_index(np, name, 2 * i, &a->sel);
+		rc |= of_property_read_phandle_index(np, name, 2 * i + 1, &p);
+
+		if (rc) {
+			dt_prop_err(np, name,
+				"unable to read parent clock or mux index\n");
+			return ERR_PTR(-EINVAL);
+		}
+
+		a->src = msmclk_parse_phandle(dev, p);
+		if (IS_ERR(a->src)) {
+			dt_prop_err(np, name, "hashtable lookup failed\n");
+			return ERR_CAST(a->src);
+		}
+	}
+
+	*array_size = num_parents;
+
+	return clks;
+}
+
+static int rcg_parse_freq_tbl(struct device *dev,
+			struct device_node *np, struct rcg_clk *rcg)
+{
+	const void *prop;
+	u32 prop_len, num_rows, i, j = 0;
+	struct clk_freq_tbl *tbl;
+	int rc;
+	char *name = "qcom,freq-tbl";
+
+	prop = of_get_property(np, name, &prop_len);
+	if (!prop) {
+		dt_prop_err(np, name, "missing dt property\n");
+		return -EINVAL;
+	}
+
+	prop_len /= sizeof(u32);
+	if (prop_len % 6) {
+		dt_prop_err(np, name, "bad length\n");
+		return -EINVAL;
+	}
+
+	num_rows = prop_len / 6;
+	/* Array is null terminated. */
+	rcg->freq_tbl = devm_kzalloc(dev,
+				sizeof(*rcg->freq_tbl) * (num_rows + 1),
+				GFP_KERNEL);
+
+	if (!rcg->freq_tbl) {
+		dt_err(np, "memory alloc failure\n");
+		return -ENOMEM;
+	}
+
+	tbl = rcg->freq_tbl;
+	for (i = 0; i < num_rows; i++, tbl++) {
+		phandle p;
+		u32 div_int, div_frac, m, n, src_sel, freq_hz;
+
+		rc = of_property_read_u32_index(np, name, j++, &freq_hz);
+		rc |= of_property_read_u32_index(np, name, j++, &div_int);
+		rc |= of_property_read_u32_index(np, name, j++, &div_frac);
+		rc |= of_property_read_u32_index(np, name, j++, &m);
+		rc |= of_property_read_u32_index(np, name, j++, &n);
+		rc |= of_property_read_u32_index(np, name, j++, &p);
+
+		if (rc) {
+			dt_prop_err(np, name, "unable to read u32\n");
+			return -EINVAL;
+		}
+
+		tbl->freq_hz = (unsigned long)freq_hz;
+		tbl->src_clk = msmclk_parse_phandle(dev, p);
+		if (IS_ERR_OR_NULL(tbl->src_clk)) {
+			dt_prop_err(np, name, "hashtable lookup failure\n");
+			return PTR_ERR(tbl->src_clk);
+		}
+
+		tbl->m_val = rcg_calc_m(m, n);
+		tbl->n_val = rcg_calc_n(m, n);
+		tbl->d_val = rcg_calc_duty_cycle(m, n);
+
+		src_sel = parent_to_src_sel(rcg->c.parents,
+					rcg->c.num_parents, tbl->src_clk);
+		tbl->div_src_val = rcg_calc_div_src(div_int, div_frac,
+								src_sel);
+	}
+	/* End table with special value */
+	tbl->freq_hz = FREQ_END;
+	return 0;
+}
+
+static void *rcg_clk_dt_parser(struct device *dev, struct device_node *np)
+{
+	struct rcg_clk *rcg;
+	struct msmclk_data *drv;
+	int rc;
+
+	rcg = devm_kzalloc(dev, sizeof(*rcg), GFP_KERNEL);
+	if (!rcg) {
+		dt_err(np, "memory alloc failure\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	drv = msmclk_parse_phandle(dev, np->parent->phandle);
+	if (IS_ERR_OR_NULL(drv))
+		return drv;
+	rcg->base = &drv->base;
+
+	rcg->c.parents = msmclk_parse_clk_src(dev, np, &rcg->c.num_parents);
+	if (IS_ERR(rcg->c.parents)) {
+		dt_err(np, "unable to read parents\n");
+		return ERR_CAST(rcg->c.parents);
+	}
+
+	rc = of_property_read_u32(np, "qcom,base-offset", &rcg->cmd_rcgr_reg);
+	if (rc) {
+		dt_err(np, "missing qcom,base-offset dt property\n");
+		return ERR_PTR(rc);
+	}
+
+	rc = rcg_parse_freq_tbl(dev, np, rcg);
+	if (rc) {
+		dt_err(np, "unable to read freq_tbl\n");
+		return ERR_PTR(rc);
+	}
+	rcg->current_freq = &rcg_dummy_freq;
+
+	if (of_device_is_compatible(np, "qcom,rcg-hid")) {
+		rcg->c.ops = &clk_ops_rcg;
+		rcg->set_rate = set_rate_hid;
+	} else if (of_device_is_compatible(np, "qcom,rcg-mn")) {
+		rcg->c.ops = &clk_ops_rcg_mnd;
+		rcg->set_rate = set_rate_mnd;
+	} else {
+		dt_err(np, "unexpected compatible string\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	return msmclk_generic_clk_init(dev, np, &rcg->c);
+}
+MSMCLK_PARSER(rcg_clk_dt_parser, "qcom,rcg-hid", 0);
+MSMCLK_PARSER(rcg_clk_dt_parser, "qcom,rcg-mn", 1);
+
+static int parse_rec_parents(struct device *dev,
+			struct device_node *np, struct mux_clk *mux)
+{
+	int i, rc;
+	char *name = "qcom,recursive-parents";
+	phandle p;
+
+	mux->num_rec_parents = of_property_count_phandles(np, name);
+	if (mux->num_rec_parents <= 0)
+		return 0;
+
+	mux->rec_parents = devm_kzalloc(dev,
+			sizeof(*mux->rec_parents) * mux->num_rec_parents,
+			GFP_KERNEL);
+
+	if (!mux->rec_parents) {
+		dt_err(np, "memory alloc failure\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < mux->num_rec_parents; i++) {
+		rc = of_property_read_phandle_index(np, name, i, &p);
+		if (rc) {
+			dt_prop_err(np, name, "unable to read u32\n");
+			return rc;
+		}
+
+		mux->rec_parents[i] = msmclk_parse_phandle(dev, p);
+		if (IS_ERR(mux->rec_parents[i])) {
+			dt_prop_err(np, name, "hashtable lookup failure\n");
+			return PTR_ERR(mux->rec_parents[i]);
+		}
+	}
+
+	return 0;
+}
+
+static void *mux_reg_clk_dt_parser(struct device *dev, struct device_node *np)
+{
+	struct mux_clk *mux;
+	struct msmclk_data *drv;
+	int rc;
+
+	mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+	if (!mux) {
+		dt_err(np, "memory alloc failure\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	mux->parents = msmclk_parse_clk_src(dev, np, &mux->num_parents);
+	if (IS_ERR(mux->parents))
+		return mux->parents;
+
+	mux->c.parents = mux->parents;
+	mux->c.num_parents = mux->num_parents;
+
+	drv = msmclk_parse_phandle(dev, np->parent->phandle);
+	if (IS_ERR_OR_NULL(drv))
+		return drv;
+	mux->base = &drv->base;
+
+	rc = parse_rec_parents(dev, np, mux);
+	if (rc) {
+		dt_err(np, "Incorrect qcom,recursive-parents dt property\n");
+		return ERR_PTR(rc);
+	}
+
+	rc = of_property_read_u32(np, "qcom,offset", &mux->offset);
+	if (rc) {
+		dt_err(np, "missing qcom,offset dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,mask", &mux->mask);
+	if (rc) {
+		dt_err(np, "missing qcom,mask dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,shift", &mux->shift);
+	if (rc) {
+		dt_err(np, "missing qcom,shift dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	mux->c.ops = &clk_ops_gen_mux;
+	mux->ops = &mux_reg_ops;
+
+	/* Optional Properties */
+	of_property_read_u32(np, "qcom,en-offset", &mux->en_offset);
+	of_property_read_u32(np, "qcom,en-mask", &mux->en_mask);
+
+	return msmclk_generic_clk_init(dev, np, &mux->c);
+};
+MSMCLK_PARSER(mux_reg_clk_dt_parser, "qcom,mux-reg", 0);
+
+static void *measure_clk_dt_parser(struct device *dev,
+					struct device_node *np)
+{
+	struct mux_clk *mux;
+	struct clk *c;
+	struct measure_clk_data *p;
+	struct clk_ops *clk_ops_measure_mux;
+	phandle cxo;
+	int rc;
+
+	c = mux_reg_clk_dt_parser(dev, np);
+	if (IS_ERR(c))
+		return c;
+
+	mux = to_mux_clk(c);
+
+	p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
+	if (!p) {
+		dt_err(np, "memory alloc failure\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	rc = of_property_read_phandle_index(np, "qcom,cxo", 0, &cxo);
+	if (rc) {
+		dt_err(np, "missing qcom,cxo\n");
+		return ERR_PTR(-EINVAL);
+	}
+	p->cxo = msmclk_parse_phandle(dev, cxo);
+	if (IS_ERR_OR_NULL(p->cxo)) {
+		dt_prop_err(np, "qcom,cxo", "hashtable lookup failure\n");
+		return p->cxo;
+	}
+
+	rc = of_property_read_u32(np, "qcom,xo-div4-cbcr", &p->xo_div4_cbcr);
+	if (rc) {
+		dt_err(np, "missing qcom,xo-div4-cbcr dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,test-pad-config", &p->plltest_val);
+	if (rc) {
+		dt_err(np, "missing qcom,test-pad-config dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	p->base = mux->base;
+	p->ctl_reg = mux->offset + 0x4;
+	p->status_reg = mux->offset + 0x8;
+	p->plltest_reg = mux->offset + 0xC;
+	mux->priv = p;
+
+	clk_ops_measure_mux = devm_kzalloc(dev, sizeof(*clk_ops_measure_mux),
+								GFP_KERNEL);
+	if (!clk_ops_measure_mux) {
+		dt_err(np, "memory alloc failure\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	*clk_ops_measure_mux = clk_ops_gen_mux;
+	clk_ops_measure_mux->get_rate = measure_get_rate;
+
+	mux->c.ops = clk_ops_measure_mux;
+
+	/* Already did generic clk init */
+	return &mux->c;
+};
+MSMCLK_PARSER(measure_clk_dt_parser, "qcom,measure-mux", 0);
+
+static void *div_clk_dt_parser(struct device *dev,
+					struct device_node *np)
+{
+	struct div_clk *div_clk;
+	struct msmclk_data *drv;
+	int rc;
+
+	div_clk = devm_kzalloc(dev, sizeof(*div_clk), GFP_KERNEL);
+	if (!div_clk) {
+		dt_err(np, "memory alloc failed\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	rc = of_property_read_u32(np, "qcom,max-div", &div_clk->data.max_div);
+	if (rc) {
+		dt_err(np, "missing qcom,max-div\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,min-div", &div_clk->data.min_div);
+	if (rc) {
+		dt_err(np, "missing qcom,min-div\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,base-offset", &div_clk->offset);
+	if (rc) {
+		dt_err(np, "missing qcom,base-offset\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,mask", &div_clk->mask);
+	if (rc) {
+		dt_err(np, "missing qcom,mask\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,shift", &div_clk->shift);
+	if (rc) {
+		dt_err(np, "missing qcom,shift\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (of_property_read_bool(np, "qcom,slave-div"))
+		div_clk->c.ops = &clk_ops_slave_div;
+	else
+		div_clk->c.ops = &clk_ops_div;
+	div_clk->ops = &div_reg_ops;
+
+	drv = msmclk_parse_phandle(dev, np->parent->phandle);
+	if (IS_ERR_OR_NULL(drv))
+		return ERR_CAST(drv);
+	div_clk->base = &drv->base;
+
+	return msmclk_generic_clk_init(dev, np, &div_clk->c);
+};
+MSMCLK_PARSER(div_clk_dt_parser, "qcom,div-clk", 0);
+
+static void *fixed_div_clk_dt_parser(struct device *dev,
+						struct device_node *np)
+{
+	struct div_clk *div_clk;
+	int rc;
+
+	div_clk = devm_kzalloc(dev, sizeof(*div_clk), GFP_KERNEL);
+	if (!div_clk) {
+		dt_err(np, "memory alloc failed\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	rc = of_property_read_u32(np, "qcom,div", &div_clk->data.div);
+	if (rc) {
+		dt_err(np, "missing qcom,div\n");
+		return ERR_PTR(-EINVAL);
+	}
+	div_clk->data.min_div = div_clk->data.div;
+	div_clk->data.max_div = div_clk->data.div;
+
+	if (of_property_read_bool(np, "qcom,slave-div"))
+		div_clk->c.ops = &clk_ops_slave_div;
+	else
+		div_clk->c.ops = &clk_ops_div;
+	div_clk->ops = &div_reg_ops;
+
+	return msmclk_generic_clk_init(dev, np, &div_clk->c);
+}
+MSMCLK_PARSER(fixed_div_clk_dt_parser, "qcom,fixed-div-clk", 0);
+
+static void *reset_clk_dt_parser(struct device *dev,
+					struct device_node *np)
+{
+	struct reset_clk *reset_clk;
+	struct msmclk_data *drv;
+	int rc;
+
+	reset_clk = devm_kzalloc(dev, sizeof(*reset_clk), GFP_KERNEL);
+	if (!reset_clk) {
+		dt_err(np, "memory alloc failed\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	rc = of_property_read_u32(np, "qcom,base-offset",
+						&reset_clk->reset_reg);
+	if (rc) {
+		dt_err(np, "missing qcom,base-offset\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	drv = msmclk_parse_phandle(dev, np->parent->phandle);
+	if (IS_ERR_OR_NULL(drv))
+		return ERR_CAST(drv);
+	reset_clk->base = &drv->base;
+
+	reset_clk->c.ops = &clk_ops_rst;
+	return msmclk_generic_clk_init(dev, np, &reset_clk->c);
+};
+MSMCLK_PARSER(reset_clk_dt_parser, "qcom,reset-clk", 0);
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./clock-mmss-8998.c linux-4.4.115-fbx/drivers/clk/msm/clock-mmss-8998.c
--- linux-4.4.115-fbx/drivers/clk/msm./clock-mmss-8998.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/clock-mmss-8998.c	2019-01-22 16:16:23.019242024 +0100
@@ -0,0 +1,2954 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/clk/msm-clock-generic.h>
+
+#include <soc/qcom/clock-local2.h>
+#include <soc/qcom/clock-voter.h>
+#include <soc/qcom/clock-pll.h>
+#include <soc/qcom/clock-alpha-pll.h>
+
+#include <dt-bindings/clock/msm-clocks-8998.h>
+#include <dt-bindings/clock/msm-clocks-hwio-8998.h>
+
+#include "vdd-level-8998.h"
+#include "reset.h"
+
+static void __iomem *virt_base;
+
+#define mmsscc_xo_mm_source_val			0
+#define mmsscc_gpll0_mm_source_val		5
+#define mmsscc_gpll0_div_mm_source_val		6
+#define mmpll0_pll_out_mm_source_val		1
+#define mmpll1_pll_out_mm_source_val		2
+#define mmpll3_pll_out_mm_source_val		3
+#define mmpll4_pll_out_mm_source_val		2
+#define mmpll5_pll_out_mm_source_val		2
+#define mmpll6_pll_out_mm_source_val		4
+#define mmpll7_pll_out_mm_source_val		3
+#define mmpll10_pll_out_mm_source_val		4
+#define dsi0phypll_mm_source_val		1
+#define dsi1phypll_mm_source_val		2
+#define hdmiphypll_mm_source_val		1
+#define ext_dp_phy_pll_link_mm_source_val	1
+#define ext_dp_phy_pll_vco_mm_source_val	2
+
+#define FIXDIV(div) (div ? (2 * (div) - 1) : (0))
+
+#define F_MM(f, s, div, m, n) \
+	{ \
+		.freq_hz = (f), \
+		.src_clk = &s.c, \
+		.m_val = (m), \
+		.n_val = ~((n)-(m)) * !!(n), \
+		.d_val = ~(n),\
+		.div_src_val = BVAL(4, 0, (int)FIXDIV(div)) \
+			| BVAL(10, 8, s##_mm_source_val), \
+	}
+
+#define F_SLEW(f, s_f, s, div, m, n) \
+	{ \
+		.freq_hz = (f), \
+		.src_freq = (s_f), \
+		.src_clk = &s.c, \
+		.m_val = (m), \
+		.n_val = ~((n)-(m)) * !!(n), \
+		.d_val = ~(n),\
+		.div_src_val = BVAL(4, 0, (int)(2*(div) - 1)) \
+			| BVAL(10, 8, s##_mm_source_val), \
+	}
+
+DEFINE_EXT_CLK(mmsscc_xo, NULL);
+DEFINE_EXT_CLK(mmsscc_gpll0, NULL);
+DEFINE_EXT_CLK(mmsscc_gpll0_div, NULL);
+DEFINE_EXT_CLK(ext_dp_phy_pll_vco, NULL);
+DEFINE_EXT_CLK(ext_dp_phy_pll_link, NULL);
+
+static DEFINE_VDD_REGULATORS(vdd_dig, VDD_DIG_NUM, 1, vdd_corner, NULL);
+static DEFINE_VDD_REGULATORS(vdd_mmsscc_mx, VDD_DIG_NUM, 1, vdd_corner, NULL);
+
+static struct alpha_pll_masks pll_masks_p = {
+	.lock_mask = BIT(31),
+	.active_mask = BIT(30),
+	.update_mask = BIT(22),
+	.output_mask = 0xf,
+};
+
+static struct pll_vote_clk mmpll0_pll = {
+	.en_reg = (void __iomem *)MMSS_PLL_VOTE_APCS,
+	.en_mask = BIT(0),
+	.status_reg = (void __iomem *)MMSS_MMPLL0_PLL_MODE,
+	.status_mask = BIT(31),
+	.base = &virt_base,
+	.c = {
+		.rate = 808000000,
+		.parent = &mmsscc_xo.c,
+		.dbg_name = "mmpll0",
+		.ops = &clk_ops_pll_vote,
+		VDD_MM_PLL_FMAX_MAP2(LOWER, 404000000, NOMINAL, 808000000),
+		CLK_INIT(mmpll0_pll.c),
+	},
+};
+DEFINE_EXT_CLK(mmpll0_pll_out, &mmpll0_pll.c);
+
+static struct pll_vote_clk mmpll1_pll = {
+	.en_reg = (void __iomem *)MMSS_PLL_VOTE_APCS,
+	.en_mask = BIT(1),
+	.status_reg = (void __iomem *)MMSS_MMPLL1_PLL_MODE,
+	.status_mask = BIT(31),
+	.base = &virt_base,
+	.c = {
+		.rate = 812000000,
+		.parent = &mmsscc_xo.c,
+		.dbg_name = "mmpll1_pll",
+		.ops = &clk_ops_pll_vote,
+		VDD_MM_PLL_FMAX_MAP2(LOWER, 406000000, NOMINAL, 812000000),
+		CLK_INIT(mmpll1_pll.c),
+	},
+};
+DEFINE_EXT_CLK(mmpll1_pll_out, &mmpll1_pll.c);
+
+static struct alpha_pll_clk mmpll3_pll = {
+	.offset = MMSS_MMPLL3_PLL_MODE,
+	.masks = &pll_masks_p,
+	.enable_config = 0x1,
+	.base = &virt_base,
+	.is_fabia = true,
+	.c = {
+		.rate = 930000000,
+		.parent = &mmsscc_xo.c,
+		.dbg_name = "mmpll3_pll",
+		.ops = &clk_ops_fixed_fabia_alpha_pll,
+		VDD_MM_PLL_FMAX_MAP2(LOWER, 465000000, LOW, 930000000),
+		CLK_INIT(mmpll3_pll.c),
+	},
+};
+DEFINE_EXT_CLK(mmpll3_pll_out, &mmpll3_pll.c);
+
+static struct alpha_pll_clk mmpll4_pll = {
+	.offset = MMSS_MMPLL4_PLL_MODE,
+	.masks = &pll_masks_p,
+	.enable_config = 0x1,
+	.base = &virt_base,
+	.is_fabia = true,
+	.c = {
+		.rate = 768000000,
+		.parent = &mmsscc_xo.c,
+		.dbg_name = "mmpll4_pll",
+		.ops = &clk_ops_fixed_fabia_alpha_pll,
+		VDD_MM_PLL_FMAX_MAP2(LOWER, 384000000, LOW, 768000000),
+		CLK_INIT(mmpll4_pll.c),
+	},
+};
+DEFINE_EXT_CLK(mmpll4_pll_out, &mmpll4_pll.c);
+
+static struct alpha_pll_clk mmpll5_pll = {
+	.offset = MMSS_MMPLL5_PLL_MODE,
+	.masks = &pll_masks_p,
+	.enable_config = 0x1,
+	.base = &virt_base,
+	.is_fabia = true,
+	.c = {
+		.rate = 825000000,
+		.parent = &mmsscc_xo.c,
+		.dbg_name = "mmpll5_pll",
+		.ops = &clk_ops_fixed_fabia_alpha_pll,
+		VDD_MM_PLL_FMAX_MAP2(LOWER, 412500000, LOW, 825000000),
+		CLK_INIT(mmpll5_pll.c),
+	},
+};
+DEFINE_EXT_CLK(mmpll5_pll_out, &mmpll5_pll.c);
+
+static struct alpha_pll_clk mmpll6_pll = {
+	.offset = MMSS_MMPLL6_PLL_MODE,
+	.masks = &pll_masks_p,
+	.enable_config = 0x1,
+	.base = &virt_base,
+	.is_fabia = true,
+	.c = {
+		.rate = 720000000,
+		.parent = &mmsscc_xo.c,
+		.dbg_name = "mmpll6_pll",
+		.ops = &clk_ops_fixed_fabia_alpha_pll,
+		VDD_MM_PLL_FMAX_MAP2(LOWER, 360000000, NOMINAL, 720000000),
+		CLK_INIT(mmpll6_pll.c),
+	},
+};
+DEFINE_EXT_CLK(mmpll6_pll_out, &mmpll6_pll.c);
+
+static struct alpha_pll_clk mmpll7_pll = {
+	.offset = MMSS_MMPLL7_PLL_MODE,
+	.masks = &pll_masks_p,
+	.enable_config = 0x1,
+	.base = &virt_base,
+	.is_fabia = true,
+	.c = {
+		.rate = 960000000,
+		.parent = &mmsscc_xo.c,
+		.dbg_name = "mmpll7_pll",
+		.ops = &clk_ops_fixed_fabia_alpha_pll,
+		VDD_MM_PLL_FMAX_MAP1(LOW, 960000000),
+		CLK_INIT(mmpll7_pll.c),
+	},
+};
+DEFINE_EXT_CLK(mmpll7_pll_out, &mmpll7_pll.c);
+
+static struct alpha_pll_clk mmpll10_pll = {
+	.offset = MMSS_MMPLL10_PLL_MODE,
+	.masks = &pll_masks_p,
+	.enable_config = 0x1,
+	.base = &virt_base,
+	.is_fabia = true,
+	.c = {
+		.rate = 576000000,
+		.parent = &mmsscc_xo.c,
+		.dbg_name = "mmpll10_pll",
+		.ops = &clk_ops_fixed_fabia_alpha_pll,
+		VDD_MM_PLL_FMAX_MAP2(LOWER, 288000000, NOMINAL, 576000000),
+		CLK_INIT(mmpll10_pll.c),
+	},
+};
+DEFINE_EXT_CLK(mmpll10_pll_out, &mmpll10_pll.c);
+
+static struct clk_freq_tbl ftbl_ahb_clk_src[] = {
+	F_MM(  19200000,      mmsscc_xo,    1,    0,     0),
+	F_MM(  40000000,   mmsscc_gpll0,   15,    0,     0),
+	F_MM(  80800000, mmpll0_pll_out,   10,    0,     0),
+	F_END
+};
+
+static struct rcg_clk ahb_clk_src = {
+	.cmd_rcgr_reg = MMSS_AHB_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_ahb_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.non_local_children = true,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "ahb_clk_src",
+		.ops = &clk_ops_rcg,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 40000000,
+					NOMINAL, 80800000),
+		CLK_INIT(ahb_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_csi_clk_src[] = {
+	F_MM( 164571429, mmpll10_pll_out,  3.5,    0,     0),
+	F_MM( 256000000,  mmpll4_pll_out,    3,    0,     0),
+	F_MM( 384000000,  mmpll4_pll_out,    2,    0,     0),
+	F_MM( 576000000, mmpll10_pll_out,    1,    0,     0),
+	F_END
+};
+
+static struct clk_freq_tbl ftbl_csi_clk_src_vq[] = {
+	F_MM( 164571429, mmpll10_pll_out,  3.5,    0,     0),
+	F_MM( 256000000,  mmpll4_pll_out,    3,    0,     0),
+	F_MM( 274290000,  mmpll7_pll_out,  3.5,    0,     0),
+	F_MM( 300000000,    mmsscc_gpll0,    2,    0,     0),
+	F_MM( 384000000,  mmpll4_pll_out,    2,    0,     0),
+	F_MM( 576000000, mmpll10_pll_out,    1,    0,     0),
+	F_END
+};
+
+static struct rcg_clk csi0_clk_src = {
+	.cmd_rcgr_reg = MMSS_CSI0_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_csi_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "csi0_clk_src",
+		.ops = &clk_ops_rcg,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP4(LOWER, 164571429, LOW, 256000000,
+					NOMINAL, 384000000, HIGH, 576000000),
+		CLK_INIT(csi0_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_vfe_clk_src[] = {
+	F_MM( 200000000,    mmsscc_gpll0,    3,    0,     0),
+	F_MM( 300000000,    mmsscc_gpll0,    2,    0,     0),
+	F_MM( 320000000,  mmpll7_pll_out,    3,    0,     0),
+	F_MM( 384000000,  mmpll4_pll_out,    2,    0,     0),
+	F_MM( 576000000, mmpll10_pll_out,    1,    0,     0),
+	F_MM( 600000000,    mmsscc_gpll0,    1,    0,     0),
+	F_END
+};
+
+static struct clk_freq_tbl ftbl_vfe_clk_src_vq[] = {
+	F_MM( 200000000,    mmsscc_gpll0,    3,    0,     0),
+	F_MM( 300000000,    mmsscc_gpll0,    2,    0,     0),
+	F_MM( 320000000,  mmpll7_pll_out,    3,    0,     0),
+	F_MM( 384000000,  mmpll4_pll_out,    2,    0,     0),
+	F_MM( 404000000,  mmpll0_pll_out,    2,    0,     0),
+	F_MM( 480000000,  mmpll7_pll_out,    2,    0,     0),
+	F_MM( 576000000, mmpll10_pll_out,    1,    0,     0),
+	F_MM( 600000000,    mmsscc_gpll0,    1,    0,     0),
+	F_END
+};
+
+
+static struct rcg_clk vfe0_clk_src = {
+	.cmd_rcgr_reg = MMSS_VFE0_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_vfe_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "vfe0_clk_src",
+		.ops = &clk_ops_rcg,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP4(LOWER, 200000000, LOW, 384000000,
+					NOMINAL, 576000000, HIGH, 600000000),
+		CLK_INIT(vfe0_clk_src.c),
+	},
+};
+
+static struct rcg_clk vfe1_clk_src = {
+	.cmd_rcgr_reg = MMSS_VFE1_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_vfe_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "vfe1_clk_src",
+		.ops = &clk_ops_rcg,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP4(LOWER, 200000000, LOW, 384000000,
+					NOMINAL, 576000000, HIGH, 600000000),
+		CLK_INIT(vfe1_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_mdp_clk_src[] = {
+	F_MM(  85714286,   mmsscc_gpll0,    7,    0,     0),
+	F_MM( 100000000,   mmsscc_gpll0,    6,    0,     0),
+	F_MM( 150000000,   mmsscc_gpll0,    4,    0,     0),
+	F_MM( 171428571,   mmsscc_gpll0,  3.5,    0,     0),
+	F_MM( 200000000,   mmsscc_gpll0,    3,    0,     0),
+	F_MM( 275000000, mmpll5_pll_out,    3,    0,     0),
+	F_MM( 300000000,   mmsscc_gpll0,    2,    0,     0),
+	F_MM( 330000000, mmpll5_pll_out,  2.5,    0,     0),
+	F_MM( 412500000, mmpll5_pll_out,    2,    0,     0),
+	F_END
+};
+
+static struct rcg_clk mdp_clk_src = {
+	.cmd_rcgr_reg = MMSS_MDP_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_mdp_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.non_local_children = true,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mdp_clk_src",
+		.ops = &clk_ops_rcg,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP4(LOWER, 171430000, LOW, 275000000,
+					NOMINAL, 330000000, HIGH, 412500000),
+		CLK_INIT(mdp_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_maxi_clk_src[] = {
+	F_MM(  19200000,        mmsscc_xo,    1,    0,     0),
+	F_MM(  75000000, mmsscc_gpll0_div,    4,    0,     0),
+	F_MM( 171428571,     mmsscc_gpll0,  3.5,    0,     0),
+	F_MM( 323200000,   mmpll0_pll_out,  2.5,    0,     0),
+	F_MM( 406000000,   mmpll1_pll_out,    2,    0,     0),
+	F_END
+};
+
+static struct rcg_clk maxi_clk_src = {
+	.cmd_rcgr_reg = MMSS_MAXI_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_maxi_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "maxi_clk_src",
+		.ops = &clk_ops_rcg,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP4(LOWER, 75000000, LOW, 171428571,
+					NOMINAL, 323200000, HIGH, 406000000),
+		CLK_INIT(maxi_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_cpp_clk_src[] = {
+	F_MM( 100000000,    mmsscc_gpll0,    6,    0,     0),
+	F_MM( 200000000,    mmsscc_gpll0,    3,    0,     0),
+	F_MM( 576000000, mmpll10_pll_out,    1,    0,     0),
+	F_MM( 600000000,    mmsscc_gpll0,    1,    0,     0),
+	F_END
+};
+
+static struct clk_freq_tbl ftbl_cpp_clk_src_vq[] = {
+	F_MM( 100000000,    mmsscc_gpll0,    6,    0,     0),
+	F_MM( 200000000,    mmsscc_gpll0,    3,    0,     0),
+	F_MM( 384000000,  mmpll4_pll_out,    2,    0,     0),
+	F_MM( 404000000,  mmpll0_pll_out,    2,    0,     0),
+	F_MM( 480000000,  mmpll7_pll_out,    2,    0,     0),
+	F_MM( 576000000, mmpll10_pll_out,    1,    0,     0),
+	F_MM( 600000000,    mmsscc_gpll0,    1,    0,     0),
+	F_END
+};
+
+static struct rcg_clk cpp_clk_src = {
+	.cmd_rcgr_reg = MMSS_CPP_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_cpp_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "cpp_clk_src",
+		.ops = &clk_ops_rcg,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP4(LOWER, 100000000, LOW, 200000000,
+					NOMINAL, 576000000, HIGH, 600000000),
+		CLK_INIT(cpp_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_jpeg0_clk_src[] = {
+	F_MM(  75000000,   mmsscc_gpll0,    8,    0,     0),
+	F_MM( 150000000,   mmsscc_gpll0,    4,    0,     0),
+	F_MM( 480000000, mmpll7_pll_out,    2,    0,     0),
+	F_END
+};
+
+static struct clk_freq_tbl ftbl_jpeg0_clk_src_vq[] = {
+	F_MM(  75000000,   mmsscc_gpll0,    8,    0,     0),
+	F_MM( 150000000,   mmsscc_gpll0,    4,    0,     0),
+	F_MM( 320000000, mmpll7_pll_out,    3,    0,     0),
+	F_MM( 480000000, mmpll7_pll_out,    2,    0,     0),
+	F_END
+};
+
+static struct rcg_clk jpeg0_clk_src = {
+	.cmd_rcgr_reg = MMSS_JPEG0_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_jpeg0_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "jpeg0_clk_src",
+		.ops = &clk_ops_rcg,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP3(LOWER, 75000000, LOW, 150000000,
+					NOMINAL, 480000000),
+		CLK_INIT(jpeg0_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_rot_clk_src[] = {
+	F_MM( 171428571,   mmsscc_gpll0,  3.5,    0,     0),
+	F_MM( 275000000, mmpll5_pll_out,    3,    0,     0),
+	F_MM( 330000000, mmpll5_pll_out,  2.5,    0,     0),
+	F_MM( 412500000, mmpll5_pll_out,    2,    0,     0),
+	F_END
+};
+
+static struct rcg_clk rot_clk_src = {
+	.cmd_rcgr_reg = MMSS_ROT_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_rot_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "rot_clk_src",
+		.ops = &clk_ops_rcg,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP4(LOWER, 171430000, LOW, 275000000,
+					NOMINAL, 330000000, HIGH, 412500000),
+		CLK_INIT(rot_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_video_core_clk_src[] = {
+	F_MM( 100000000,   mmsscc_gpll0,    6,    0,     0),
+	F_MM( 186000000, mmpll3_pll_out,    5,    0,     0),
+	F_MM( 360000000, mmpll6_pll_out,    2,    0,     0),
+	F_MM( 465000000, mmpll3_pll_out,    2,    0,     0),
+	F_END
+};
+
+static struct clk_freq_tbl ftbl_video_core_clk_src_vq[] = {
+	F_MM( 200000000,   mmsscc_gpll0,    3,    0,     0),
+	F_MM( 269330000, mmpll0_pll_out,    3,    0,     0),
+	F_MM( 355200000, mmpll6_pll_out,  2.5,    0,     0),
+	F_MM( 444000000, mmpll6_pll_out,    2,    0,     0),
+	F_MM( 533000000, mmpll3_pll_out,    2,    0,     0),
+	F_END
+};
+
+static struct rcg_clk video_core_clk_src = {
+	.cmd_rcgr_reg = MMSS_VIDEO_CORE_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_video_core_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "video_core_clk_src",
+		.ops = &clk_ops_rcg,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP4(LOWER, 100000000, LOW, 186000000,
+					NOMINAL, 360000000, HIGH, 465000000),
+		CLK_INIT(video_core_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_csiphy_clk_src[] = {
+	F_MM(  164570000,  mmpll10_pll_out, 3.5,    0,     0),
+	F_MM(  256000000,   mmpll4_pll_out,   3,    0,     0),
+	F_MM(  384000000,   mmpll4_pll_out,   2,    0,     0),
+	F_END
+};
+
+static struct clk_freq_tbl ftbl_csiphy_clk_src_vq[] = {
+	F_MM(  164570000,  mmpll10_pll_out, 3.5,    0,     0),
+	F_MM(  256000000,   mmpll4_pll_out,   3,    0,     0),
+	F_MM(  274290000,   mmpll7_pll_out, 3.5,    0,     0),
+	F_MM(  300000000,     mmsscc_gpll0,   2,    0,     0),
+	F_MM(  384000000,   mmpll4_pll_out,   2,    0,     0),
+	F_END
+};
+
+static struct rcg_clk csiphy_clk_src = {
+	.cmd_rcgr_reg = MMSS_CSIPHY_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_csiphy_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "csiphy_clk_src",
+		.ops = &clk_ops_rcg,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP3(LOWER, 164570000, LOW, 256000000,
+					NOMINAL, 384000000),
+		CLK_INIT(csiphy_clk_src.c),
+	},
+};
+
+static struct rcg_clk csi1_clk_src = {
+	.cmd_rcgr_reg = MMSS_CSI1_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_csi_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "csi1_clk_src",
+		.ops = &clk_ops_rcg,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP4(LOWER, 164570000, LOW, 256000000,
+					NOMINAL, 384000000, HIGH, 576000000),
+		CLK_INIT(csi1_clk_src.c),
+	},
+};
+
+static struct rcg_clk csi2_clk_src = {
+	.cmd_rcgr_reg = MMSS_CSI2_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_csi_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "csi2_clk_src",
+		.ops = &clk_ops_rcg,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP4(LOWER, 164570000, LOW, 256000000,
+					NOMINAL, 384000000, HIGH, 576000000),
+		CLK_INIT(csi2_clk_src.c),
+	},
+};
+
+static struct rcg_clk csi3_clk_src = {
+	.cmd_rcgr_reg = MMSS_CSI3_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_csi_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "csi3_clk_src",
+		.ops = &clk_ops_rcg,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP4(LOWER, 164570000, LOW, 256000000,
+					NOMINAL, 384000000, HIGH, 576000000),
+		CLK_INIT(csi3_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_fd_core_clk_src[] = {
+	F_MM( 100000000,    mmsscc_gpll0,    6,    0,     0),
+	F_MM( 200000000,    mmsscc_gpll0,    3,    0,     0),
+	F_MM( 576000000, mmpll10_pll_out,    1,    0,     0),
+	F_END
+};
+
+static struct clk_freq_tbl ftbl_fd_core_clk_src_vq[] = {
+	F_MM( 100000000,    mmsscc_gpll0,    6,    0,     0),
+	F_MM( 200000000,    mmsscc_gpll0,    3,    0,     0),
+	F_MM( 404000000,  mmpll0_pll_out,    2,    0,     0),
+	F_MM( 480000000,  mmpll7_pll_out,    2,    0,     0),
+	F_MM( 576000000, mmpll10_pll_out,    1,    0,     0),
+	F_END
+};
+
+static struct rcg_clk fd_core_clk_src = {
+	.cmd_rcgr_reg = MMSS_FD_CORE_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_fd_core_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "fd_core_clk_src",
+		.ops = &clk_ops_rcg,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP3(LOWER, 100000000, LOW, 200000000,
+					NOMINAL, 576000000),
+		CLK_INIT(fd_core_clk_src.c),
+	},
+};
+
+DEFINE_EXT_CLK(ext_byte0_clk_src, NULL);
+DEFINE_EXT_CLK(ext_byte1_clk_src, NULL);
+static struct clk_freq_tbl ftbl_byte_clk_src[] = {
+	{
+		.div_src_val = BVAL(10, 8, mmsscc_xo_mm_source_val)
+				| BVAL(4, 0, 0),
+		.src_clk = &mmsscc_xo.c,
+		.freq_hz = 0,
+	},
+	{
+		.div_src_val = BVAL(10, 8, dsi0phypll_mm_source_val),
+		.src_clk = &ext_byte0_clk_src.c,
+		.freq_hz = 0,
+	},
+	{
+		.div_src_val = BVAL(10, 8, dsi1phypll_mm_source_val)
+				| BVAL(4, 0, 0),
+		.src_clk = &ext_byte1_clk_src.c,
+		.freq_hz = 0,
+	},
+	F_END
+};
+
+static struct rcg_clk byte0_clk_src = {
+	.cmd_rcgr_reg = MMSS_BYTE0_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.current_freq = ftbl_byte_clk_src,
+	.freq_tbl = ftbl_byte_clk_src,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "byte0_clk_src",
+		.parent = &ext_byte0_clk_src.c,
+		.ops = &clk_ops_byte_multiparent,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP3(LOWER, 131250000, LOW, 210000000,
+						NOMINAL, 312500000),
+		CLK_INIT(byte0_clk_src.c),
+	},
+};
+
+static struct rcg_clk byte1_clk_src = {
+	.cmd_rcgr_reg = MMSS_BYTE1_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.current_freq = ftbl_byte_clk_src,
+	.freq_tbl = ftbl_byte_clk_src,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "byte1_clk_src",
+		.parent = &ext_byte1_clk_src.c,
+		.ops = &clk_ops_byte_multiparent,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP3(LOWER, 131250000, LOW, 210000000,
+						NOMINAL, 312500000),
+		CLK_INIT(byte1_clk_src.c),
+	},
+};
+
+DEFINE_EXT_CLK(ext_pclk0_clk_src, NULL);
+DEFINE_EXT_CLK(ext_pclk1_clk_src, NULL);
+static struct clk_freq_tbl ftbl_pclk_clk_src[] = {
+	{
+		.div_src_val = BVAL(10, 8, mmsscc_xo_mm_source_val)
+				| BVAL(4, 0, 0),
+		.src_clk = &mmsscc_xo.c,
+		.freq_hz = 0,
+	},
+	{
+		.div_src_val = BVAL(10, 8, dsi0phypll_mm_source_val)
+				| BVAL(4, 0, 0),
+		.src_clk = &ext_pclk0_clk_src.c,
+		.freq_hz = 0,
+	},
+	{
+		.div_src_val = BVAL(10, 8, dsi1phypll_mm_source_val)
+				| BVAL(4, 0, 0),
+		.src_clk = &ext_pclk1_clk_src.c,
+		.freq_hz = 0,
+	},
+	F_END
+};
+
+static struct rcg_clk pclk0_clk_src = {
+	.cmd_rcgr_reg = MMSS_PCLK0_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.current_freq = ftbl_pclk_clk_src,
+	.freq_tbl = ftbl_pclk_clk_src,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "pclk0_clk_src",
+		.parent = &ext_pclk0_clk_src.c,
+		.ops = &clk_ops_pixel_multiparent,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP3(LOWER, 175000000, LOW, 280000000,
+						NOMINAL, 416670000),
+		CLK_INIT(pclk0_clk_src.c),
+	},
+};
+
+static struct rcg_clk pclk1_clk_src = {
+	.cmd_rcgr_reg = MMSS_PCLK1_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.current_freq = ftbl_pclk_clk_src,
+	.freq_tbl = ftbl_pclk_clk_src,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "pclk1_clk_src",
+		.parent = &ext_pclk1_clk_src.c,
+		.ops = &clk_ops_pixel_multiparent,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP3(LOWER, 175000000, LOW, 280000000,
+						NOMINAL, 416670000),
+		CLK_INIT(pclk1_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_video_subcore_clk_src[] = {
+	F_MM( 100000000,   mmsscc_gpll0,    6,    0,     0),
+	F_MM( 186000000, mmpll3_pll_out,    5,    0,     0),
+	F_MM( 360000000, mmpll6_pll_out,    2,    0,     0),
+	F_MM( 465000000, mmpll3_pll_out,    2,    0,     0),
+	F_END
+};
+
+static struct clk_freq_tbl ftbl_video_subcore_clk_src_vq[] = {
+	F_MM( 200000000,   mmsscc_gpll0,    3,    0,     0),
+	F_MM( 269330000, mmpll0_pll_out,    3,    0,     0),
+	F_MM( 355200000, mmpll6_pll_out,  2.5,    0,     0),
+	F_MM( 444000000, mmpll6_pll_out,    2,    0,     0),
+	F_MM( 533000000, mmpll3_pll_out,    2,    0,     0),
+	F_END
+};
+
+static struct rcg_clk video_subcore0_clk_src = {
+	.cmd_rcgr_reg = MMSS_VIDEO_SUBCORE0_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_video_subcore_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.non_local_control_timeout = 1000,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "video_subcore0_clk_src",
+		.ops = &clk_ops_rcg,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP4(LOWER, 100000000, LOW, 186000000,
+					NOMINAL, 360000000, HIGH, 465000000),
+		CLK_INIT(video_subcore0_clk_src.c),
+	},
+};
+
+static struct rcg_clk video_subcore1_clk_src = {
+	.cmd_rcgr_reg = MMSS_VIDEO_SUBCORE1_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_video_subcore_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.non_local_control_timeout = 1000,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "video_subcore1_clk_src",
+		.ops = &clk_ops_rcg,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP4(LOWER, 100000000, LOW, 186000000,
+					NOMINAL, 360000000, HIGH, 465000000),
+		CLK_INIT(video_subcore1_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_cci_clk_src[] = {
+	F_MM(  37500000,     mmsscc_gpll0,   16,    0,     0),
+	F_MM(  50000000,     mmsscc_gpll0,   12,    0,     0),
+	F_MM( 100000000,     mmsscc_gpll0,    6,    0,     0),
+	F_END
+};
+
+static struct rcg_clk cci_clk_src = {
+	.cmd_rcgr_reg = MMSS_CCI_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_cci_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "cci_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP3(LOWER, 37500000, LOW, 50000000,
+					NOMINAL, 100000000),
+		CLK_INIT(cci_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_camss_gp_clk_src[] = {
+	F_MM(     10000,	mmsscc_xo,   16,    1,   120),
+	F_MM(     24000,	mmsscc_xo,   16,    1,    50),
+	F_MM(   6000000,     mmsscc_gpll0,   10,    1,    10),
+	F_MM(  12000000,     mmsscc_gpll0,   10,    1,     5),
+	F_MM(  13000000,     mmsscc_gpll0,    4,   13,   150),
+	F_MM(  24000000,     mmsscc_gpll0,    5,    1,     5),
+	F_END
+};
+
+static struct rcg_clk camss_gp0_clk_src = {
+	.cmd_rcgr_reg = MMSS_CAMSS_GP0_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_camss_gp_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "camss_gp0_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP3(LOWER, 50000000, LOW, 100000000,
+					NOMINAL, 200000000),
+		CLK_INIT(camss_gp0_clk_src.c),
+	},
+};
+
+static struct rcg_clk camss_gp1_clk_src = {
+	.cmd_rcgr_reg = MMSS_CAMSS_GP1_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_camss_gp_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "camss_gp1_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP3(LOWER, 50000000, LOW, 100000000,
+					NOMINAL, 200000000),
+		CLK_INIT(camss_gp1_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_mclk_clk_src[] = {
+	F_MM(   4800000,	mmsscc_xo,    4,    0,     0),
+	F_MM(   6000000, mmsscc_gpll0_div,   10,    1,     5),
+	F_MM(   8000000, mmsscc_gpll0_div,    1,    2,    75),
+	F_MM(   9600000,	mmsscc_xo,    2,    0,     0),
+	F_MM(  16666667, mmsscc_gpll0_div,    2,    1,     9),
+	F_MM(  19200000,	mmsscc_xo,    1,    0,     0),
+	F_MM(  24000000, mmsscc_gpll0_div,    1,    2,    25),
+	F_MM(  33333333, mmsscc_gpll0_div,    1,    1,     9),
+	F_MM(  48000000,     mmsscc_gpll0,    1,    2,    25),
+	F_MM(  66666667,     mmsscc_gpll0,    1,    1,     9),
+	F_END
+};
+
+static struct rcg_clk mclk0_clk_src = {
+	.cmd_rcgr_reg = MMSS_MCLK0_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_mclk_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mclk0_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP3(LOWER, 33333333, LOW, 66666667,
+					NOMINAL, 68571429),
+		CLK_INIT(mclk0_clk_src.c),
+	},
+};
+
+static struct rcg_clk mclk1_clk_src = {
+	.cmd_rcgr_reg = MMSS_MCLK1_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_mclk_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mclk1_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP3(LOWER, 33333333, LOW, 66666667,
+					NOMINAL, 68571429),
+		CLK_INIT(mclk1_clk_src.c),
+	},
+};
+
+static struct rcg_clk mclk2_clk_src = {
+	.cmd_rcgr_reg = MMSS_MCLK2_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_mclk_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mclk2_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP3(LOWER, 33333333, LOW, 66666667,
+					NOMINAL, 68571429),
+		CLK_INIT(mclk2_clk_src.c),
+	},
+};
+
+static struct rcg_clk mclk3_clk_src = {
+	.cmd_rcgr_reg = MMSS_MCLK3_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_mclk_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mclk3_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP3(LOWER, 33333333, LOW, 66666667,
+					NOMINAL, 68571429),
+		CLK_INIT(mclk3_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_csiphytimer_clk_src[] = {
+	F_MM( 200000000,   mmsscc_gpll0,    3,    0,     0),
+	F_MM( 269333333, mmpll0_pll_out,    3,    0,     0),
+	F_END
+};
+
+static struct rcg_clk csi0phytimer_clk_src = {
+	.cmd_rcgr_reg = MMSS_CSI0PHYTIMER_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_csiphytimer_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "csi0phytimer_clk_src",
+		.ops = &clk_ops_rcg,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP3(LOWER, 100000000, LOW, 200000000,
+					NOMINAL, 269333333),
+		CLK_INIT(csi0phytimer_clk_src.c),
+	},
+};
+
+static struct rcg_clk csi1phytimer_clk_src = {
+	.cmd_rcgr_reg = MMSS_CSI1PHYTIMER_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_csiphytimer_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "csi1phytimer_clk_src",
+		.ops = &clk_ops_rcg,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP3(LOWER, 100000000, LOW, 200000000,
+					NOMINAL, 269333333),
+		CLK_INIT(csi1phytimer_clk_src.c),
+	},
+};
+
+static struct rcg_clk csi2phytimer_clk_src = {
+	.cmd_rcgr_reg = MMSS_CSI2PHYTIMER_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_csiphytimer_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "csi2phytimer_clk_src",
+		.ops = &clk_ops_rcg,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP3(LOWER, 100000000, LOW, 200000000,
+					NOMINAL, 269333333),
+		CLK_INIT(csi2phytimer_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_dp_gtc_clk_src[] = {
+	F_MM( 300000000,   mmsscc_gpll0,    2,    0,     0),
+	F_END
+};
+
+static struct rcg_clk dp_gtc_clk_src = {
+	.cmd_rcgr_reg = MMSS_DP_GTC_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_dp_gtc_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "dp_gtc_clk_src",
+		.ops = &clk_ops_rcg,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP2(LOWER, 40000000, LOW, 300000000),
+		CLK_INIT(dp_gtc_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_esc_clk_src[] = {
+	F_MM(  19200000,      mmsscc_xo,    1,    0,     0),
+	F_END
+};
+
+static struct rcg_clk esc0_clk_src = {
+	.cmd_rcgr_reg = MMSS_ESC0_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_esc_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "esc0_clk_src",
+		.ops = &clk_ops_rcg,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP2(LOWER, 19200000, NOMINAL, 19200000),
+		CLK_INIT(esc0_clk_src.c),
+	},
+};
+
+static struct rcg_clk esc1_clk_src = {
+	.cmd_rcgr_reg = MMSS_ESC1_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_esc_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "esc1_clk_src",
+		.ops = &clk_ops_rcg,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP2(LOWER, 19200000, NOMINAL, 19200000),
+		CLK_INIT(esc1_clk_src.c),
+	},
+};
+
+DEFINE_EXT_CLK(ext_extpclk_clk_src, NULL);
+static struct clk_freq_tbl ftbl_extpclk_clk_src[] = {
+	{
+		.div_src_val = BVAL(10, 8, hdmiphypll_mm_source_val),
+		.src_clk = &ext_extpclk_clk_src.c,
+	},
+	F_END
+};
+
+static struct rcg_clk extpclk_clk_src = {
+	.cmd_rcgr_reg = MMSS_EXTPCLK_CMD_RCGR,
+	.current_freq = ftbl_extpclk_clk_src,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "extpclk_clk_src",
+		.parent = &ext_extpclk_clk_src.c,
+		.ops = &clk_ops_byte,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP3(LOWER, 150000000, LOW, 300000000,
+					NOMINAL, 600000000),
+		CLK_INIT(extpclk_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_hdmi_clk_src[] = {
+	F_MM(  19200000,      mmsscc_xo,    1,    0,     0),
+	F_END
+};
+
+static struct rcg_clk hdmi_clk_src = {
+	.cmd_rcgr_reg = MMSS_HDMI_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_hdmi_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "hdmi_clk_src",
+		.ops = &clk_ops_rcg,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP2(LOWER, 19200000, NOMINAL, 19200000),
+		CLK_INIT(hdmi_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_vsync_clk_src[] = {
+	F_MM(  19200000,      mmsscc_xo,    1,    0,     0),
+	F_END
+};
+
+static struct rcg_clk vsync_clk_src = {
+	.cmd_rcgr_reg = MMSS_VSYNC_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_vsync_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "vsync_clk_src",
+		.ops = &clk_ops_rcg,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP2(LOWER, 19200000, NOMINAL, 19200000),
+		CLK_INIT(vsync_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_dp_aux_clk_src[] = {
+	F_MM(  19200000,      mmsscc_xo,    1,    0,     0),
+	F_END
+};
+
+static struct rcg_clk dp_aux_clk_src = {
+	.cmd_rcgr_reg = MMSS_DP_AUX_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_dp_aux_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "dp_aux_clk_src",
+		.ops = &clk_ops_rcg,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP2(LOWER, 19200000, NOMINAL, 19200000),
+		CLK_INIT(dp_aux_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_dp_pixel_clk_src[] = {
+	{
+		.div_src_val = BVAL(10, 8, ext_dp_phy_pll_vco_mm_source_val),
+		.src_clk = &ext_dp_phy_pll_vco.c,
+	},
+	F_END
+};
+
+static struct rcg_clk dp_pixel_clk_src = {
+	.cmd_rcgr_reg = MMSS_DP_PIXEL_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.current_freq = ftbl_dp_pixel_clk_src,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "dp_pixel_clk_src",
+		.parent = &ext_dp_phy_pll_vco.c,
+		.ops = &clk_ops_rcg_dp,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP3(LOWER, 154000000, LOW, 337500000,
+					NOMINAL, 675000000),
+		CLK_INIT(dp_pixel_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_dp_link_clk_src[] = {
+	F_SLEW( 162000,  324000, ext_dp_phy_pll_link,   2,   0,   0),
+	F_SLEW( 270000,  540000, ext_dp_phy_pll_link,   2,   0,   0),
+	F_SLEW( 540000, 1080000, ext_dp_phy_pll_link,   2,   0,   0),
+	F_END
+};
+
+static struct rcg_clk dp_link_clk_src = {
+	.cmd_rcgr_reg = MMSS_DP_LINK_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_dp_link_clk_src,
+	.current_freq = ftbl_dp_link_clk_src,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "dp_link_clk_src",
+		.ops = &clk_ops_rcg,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP3(LOWER, 162000, LOW, 270000,
+					NOMINAL, 540000),
+		CLK_INIT(dp_link_clk_src.c),
+	},
+};
+
+/*
+ * Current understanding is that the DP PLL is going to be configured by using
+ * the set_rate ops for the dp_link_clk_src and dp_pixel_clk_src. When set_rate
+ * is called on this RCG, the rate call never makes it to the external DP
+ * clocks.
+ */
+static struct clk_freq_tbl ftbl_dp_crypto_clk_src[] = {
+	F_MM( 101250, ext_dp_phy_pll_link,   1,   5,   16),
+	F_MM( 168750, ext_dp_phy_pll_link,   1,   5,   16),
+	F_MM( 337500, ext_dp_phy_pll_link,   1,   5,   16),
+	F_END
+};
+
+static struct rcg_clk dp_crypto_clk_src = {
+	.cmd_rcgr_reg = MMSS_DP_CRYPTO_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_dp_crypto_clk_src,
+	.current_freq = ftbl_dp_crypto_clk_src,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "dp_crypto_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP3(LOWER, 101250, LOW, 168750,
+					NOMINAL, 337500),
+		CLK_INIT(dp_crypto_clk_src.c),
+	},
+};
+
+static struct branch_clk mmss_bimc_smmu_ahb_clk = {
+	.cbcr_reg = MMSS_BIMC_SMMU_AHB_CBCR,
+	.has_sibling = 1,
+	.check_enable_bit = true,
+	.no_halt_check_on_disable = true,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_bimc_smmu_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_bimc_smmu_ahb_clk.c),
+	},
+};
+
+static struct branch_clk mmss_bimc_smmu_axi_clk = {
+	.cbcr_reg = MMSS_BIMC_SMMU_AXI_CBCR,
+	.has_sibling = 1,
+	.check_enable_bit = true,
+	.no_halt_check_on_disable = true,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_bimc_smmu_axi_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_bimc_smmu_axi_clk.c),
+	},
+};
+
+static struct branch_clk mmss_snoc_dvm_axi_clk = {
+	.cbcr_reg = MMSS_SNOC_DVM_AXI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_snoc_dvm_axi_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_snoc_dvm_axi_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_ahb_clk = {
+	.cbcr_reg = MMSS_CAMSS_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_ahb_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_cci_ahb_clk = {
+	.cbcr_reg = MMSS_CAMSS_CCI_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_cci_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_cci_ahb_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_cci_clk = {
+	.cbcr_reg = MMSS_CAMSS_CCI_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_cci_clk",
+		.parent = &cci_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_cci_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_cpp_ahb_clk = {
+	.cbcr_reg = MMSS_CAMSS_CPP_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_cpp_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_cpp_ahb_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_cpp_clk = {
+	.cbcr_reg = MMSS_CAMSS_CPP_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_cpp_clk",
+		.parent = &cpp_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_cpp_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_cpp_axi_clk = {
+	.cbcr_reg = MMSS_CAMSS_CPP_AXI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_cpp_axi_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_cpp_axi_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_cpp_vbif_ahb_clk = {
+	.cbcr_reg = MMSS_CAMSS_CPP_VBIF_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_cpp_vbif_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_cpp_vbif_ahb_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_cphy_csid0_clk = {
+	.cbcr_reg = MMSS_CAMSS_CPHY_CSID0_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_cphy_csid0_clk",
+		.parent = &csiphy_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_cphy_csid0_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_csi0_ahb_clk = {
+	.cbcr_reg = MMSS_CAMSS_CSI0_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_csi0_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_csi0_ahb_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_csi0_clk = {
+	.cbcr_reg = MMSS_CAMSS_CSI0_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_csi0_clk",
+		.parent = &csi0_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_csi0_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_csi0pix_clk = {
+	.cbcr_reg = MMSS_CAMSS_CSI0PIX_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_csi0pix_clk",
+		.parent = &csi0_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_csi0pix_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_csi0rdi_clk = {
+	.cbcr_reg = MMSS_CAMSS_CSI0RDI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_csi0rdi_clk",
+		.parent = &csi0_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_csi0rdi_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_cphy_csid1_clk = {
+	.cbcr_reg = MMSS_CAMSS_CPHY_CSID1_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_cphy_csid1_clk",
+		.parent = &csiphy_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_cphy_csid1_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_csi1_ahb_clk = {
+	.cbcr_reg = MMSS_CAMSS_CSI1_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_csi1_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_csi1_ahb_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_csi1_clk = {
+	.cbcr_reg = MMSS_CAMSS_CSI1_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_csi1_clk",
+		.parent = &csi1_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_csi1_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_csi1pix_clk = {
+	.cbcr_reg = MMSS_CAMSS_CSI1PIX_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_csi1pix_clk",
+		.parent = &csi1_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_csi1pix_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_csi1rdi_clk = {
+	.cbcr_reg = MMSS_CAMSS_CSI1RDI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_csi1rdi_clk",
+		.parent = &csi1_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_csi1rdi_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_cphy_csid2_clk = {
+	.cbcr_reg = MMSS_CAMSS_CPHY_CSID2_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_cphy_csid2_clk",
+		.parent = &csiphy_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_cphy_csid2_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_csi2_ahb_clk = {
+	.cbcr_reg = MMSS_CAMSS_CSI2_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_csi2_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_csi2_ahb_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_csi2_clk = {
+	.cbcr_reg = MMSS_CAMSS_CSI2_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_csi2_clk",
+		.parent = &csi2_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_csi2_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_csi2pix_clk = {
+	.cbcr_reg = MMSS_CAMSS_CSI2PIX_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_csi2pix_clk",
+		.parent = &csi2_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_csi2pix_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_csi2rdi_clk = {
+	.cbcr_reg = MMSS_CAMSS_CSI2RDI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_csi2rdi_clk",
+		.parent = &csi2_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_csi2rdi_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_cphy_csid3_clk = {
+	.cbcr_reg = MMSS_CAMSS_CPHY_CSID3_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_cphy_csid3_clk",
+		.parent = &csiphy_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_cphy_csid3_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_csi3_ahb_clk = {
+	.cbcr_reg = MMSS_CAMSS_CSI3_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_csi3_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_csi3_ahb_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_csi3_clk = {
+	.cbcr_reg = MMSS_CAMSS_CSI3_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_csi3_clk",
+		.parent = &csi3_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_csi3_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_csi3pix_clk = {
+	.cbcr_reg = MMSS_CAMSS_CSI3PIX_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_csi3pix_clk",
+		.parent = &csi3_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_csi3pix_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_csi3rdi_clk = {
+	.cbcr_reg = MMSS_CAMSS_CSI3RDI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_csi3rdi_clk",
+		.parent = &csi3_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_csi3rdi_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_csi_vfe0_clk = {
+	.cbcr_reg = MMSS_CAMSS_CSI_VFE0_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_csi_vfe0_clk",
+		.parent = &vfe0_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_csi_vfe0_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_csi_vfe1_clk = {
+	.cbcr_reg = MMSS_CAMSS_CSI_VFE1_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_csi_vfe1_clk",
+		.parent = &vfe1_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_csi_vfe1_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_csiphy0_clk = {
+	.cbcr_reg = MMSS_CAMSS_CSIPHY0_CBCR,
+	.has_sibling = 0,
+	.aggr_sibling_rates = true,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_csiphy0_clk",
+		.parent = &csiphy_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_csiphy0_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_csiphy1_clk = {
+	.cbcr_reg = MMSS_CAMSS_CSIPHY1_CBCR,
+	.has_sibling = 0,
+	.aggr_sibling_rates = true,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_csiphy1_clk",
+		.parent = &csiphy_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_csiphy1_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_csiphy2_clk = {
+	.cbcr_reg = MMSS_CAMSS_CSIPHY2_CBCR,
+	.has_sibling = 0,
+	.aggr_sibling_rates = true,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_csiphy2_clk",
+		.parent = &csiphy_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_csiphy2_clk.c),
+	},
+};
+
+static struct branch_clk mmss_fd_ahb_clk = {
+	.cbcr_reg = MMSS_FD_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_fd_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_fd_ahb_clk.c),
+	},
+};
+
+static struct branch_clk mmss_fd_core_clk = {
+	.cbcr_reg = MMSS_FD_CORE_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_fd_core_clk",
+		.parent = &fd_core_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_fd_core_clk.c),
+	},
+};
+
+static struct branch_clk mmss_fd_core_uar_clk = {
+	.cbcr_reg = MMSS_FD_CORE_UAR_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_fd_core_uar_clk",
+		.parent = &fd_core_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_fd_core_uar_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_gp0_clk = {
+	.cbcr_reg = MMSS_CAMSS_GP0_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_gp0_clk",
+		.parent = &camss_gp0_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_gp0_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_gp1_clk = {
+	.cbcr_reg = MMSS_CAMSS_GP1_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_gp1_clk",
+		.parent = &camss_gp1_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_gp1_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_ispif_ahb_clk = {
+	.cbcr_reg = MMSS_CAMSS_ISPIF_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_ispif_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_ispif_ahb_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_jpeg0_clk = {
+	.cbcr_reg = MMSS_CAMSS_JPEG0_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_jpeg0_clk",
+		.parent = &jpeg0_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_jpeg0_clk.c),
+	},
+};
+
+static DEFINE_CLK_VOTER(mmss_camss_jpeg0_vote_clk, &mmss_camss_jpeg0_clk.c, 0);
+static DEFINE_CLK_VOTER(mmss_camss_jpeg0_dma_vote_clk,
+					&mmss_camss_jpeg0_clk.c, 0);
+
+static struct branch_clk mmss_camss_jpeg_ahb_clk = {
+	.cbcr_reg = MMSS_CAMSS_JPEG_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_jpeg_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_jpeg_ahb_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_jpeg_axi_clk = {
+	.cbcr_reg = MMSS_CAMSS_JPEG_AXI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_jpeg_axi_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_jpeg_axi_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_mclk0_clk = {
+	.cbcr_reg = MMSS_CAMSS_MCLK0_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_mclk0_clk",
+		.parent = &mclk0_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_mclk0_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_mclk1_clk = {
+	.cbcr_reg = MMSS_CAMSS_MCLK1_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_mclk1_clk",
+		.parent = &mclk1_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_mclk1_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_mclk2_clk = {
+	.cbcr_reg = MMSS_CAMSS_MCLK2_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_mclk2_clk",
+		.parent = &mclk2_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_mclk2_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_mclk3_clk = {
+	.cbcr_reg = MMSS_CAMSS_MCLK3_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_mclk3_clk",
+		.parent = &mclk3_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_mclk3_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_micro_ahb_clk = {
+	.cbcr_reg = MMSS_CAMSS_MICRO_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_micro_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_micro_ahb_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_csi0phytimer_clk = {
+	.cbcr_reg = MMSS_CAMSS_CSI0PHYTIMER_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_csi0phytimer_clk",
+		.parent = &csi0phytimer_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_csi0phytimer_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_csi1phytimer_clk = {
+	.cbcr_reg = MMSS_CAMSS_CSI1PHYTIMER_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_csi1phytimer_clk",
+		.parent = &csi1phytimer_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_csi1phytimer_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_csi2phytimer_clk = {
+	.cbcr_reg = MMSS_CAMSS_CSI2PHYTIMER_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_csi2phytimer_clk",
+		.parent = &csi2phytimer_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_csi2phytimer_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_top_ahb_clk = {
+	.cbcr_reg = MMSS_CAMSS_TOP_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_top_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_top_ahb_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_vfe0_ahb_clk = {
+	.cbcr_reg = MMSS_CAMSS_VFE0_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_vfe0_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_vfe0_ahb_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_vfe0_clk = {
+	.cbcr_reg = MMSS_CAMSS_VFE0_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_vfe0_clk",
+		.parent = &vfe0_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_vfe0_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_vfe0_stream_clk = {
+	.cbcr_reg = MMSS_CAMSS_VFE0_STREAM_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_vfe0_stream_clk",
+		.parent = &vfe0_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_vfe0_stream_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_vfe1_ahb_clk = {
+	.cbcr_reg = MMSS_CAMSS_VFE1_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_vfe1_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_vfe1_ahb_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_vfe1_clk = {
+	.cbcr_reg = MMSS_CAMSS_VFE1_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_vfe1_clk",
+		.parent = &vfe1_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_vfe1_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_vfe1_stream_clk = {
+	.cbcr_reg = MMSS_CAMSS_VFE1_STREAM_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_vfe1_stream_clk",
+		.parent = &vfe1_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_vfe1_stream_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_vfe_vbif_ahb_clk = {
+	.cbcr_reg = MMSS_CAMSS_VFE_VBIF_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_vfe_vbif_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_vfe_vbif_ahb_clk.c),
+	},
+};
+
+static struct branch_clk mmss_camss_vfe_vbif_axi_clk = {
+	.cbcr_reg = MMSS_CAMSS_VFE_VBIF_AXI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_camss_vfe_vbif_axi_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_camss_vfe_vbif_axi_clk.c),
+	},
+};
+
+static struct branch_clk mmss_mdss_ahb_clk = {
+	.cbcr_reg = MMSS_MDSS_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_mdss_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_mdss_ahb_clk.c),
+	},
+};
+
+static struct branch_clk mmss_mdss_axi_clk = {
+	.cbcr_reg = MMSS_MDSS_AXI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_mdss_axi_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_mdss_axi_clk.c),
+	},
+};
+
+static struct branch_clk mmss_mdss_byte0_clk = {
+	.cbcr_reg = MMSS_MDSS_BYTE0_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_mdss_byte0_clk",
+		.parent = &byte0_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_mdss_byte0_clk.c),
+	},
+};
+
+static struct div_clk mmss_mdss_byte0_intf_div_clk = {
+	.offset = MMSS_MDSS_BYTE0_INTF_DIV,
+	.mask = 0x3,
+	.shift = 0,
+	.data = {
+		.min_div = 1,
+		.max_div = 4,
+	},
+	.base = &virt_base,
+	/*
+	 * NOTE: Op does not work for div-3. Current assumption is that div-3
+	 * is not a recommended setting for this divider.
+	 */
+	.ops = &postdiv_reg_ops,
+	.c = {
+		.dbg_name = "mmss_mdss_byte0_intf_div_clk",
+		.parent = &byte0_clk_src.c,
+		.ops = &clk_ops_slave_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(mmss_mdss_byte0_intf_div_clk.c),
+	},
+};
+
+static struct branch_clk mmss_mdss_byte0_intf_clk = {
+	.cbcr_reg = MMSS_MDSS_BYTE0_INTF_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_mdss_byte0_intf_clk",
+		.parent = &mmss_mdss_byte0_intf_div_clk.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_mdss_byte0_intf_clk.c),
+	},
+};
+
+static struct branch_clk mmss_mdss_byte1_clk = {
+	.cbcr_reg = MMSS_MDSS_BYTE1_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_mdss_byte1_clk",
+		.parent = &byte1_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_mdss_byte1_clk.c),
+	},
+};
+
+static struct div_clk mmss_mdss_byte1_intf_div_clk = {
+	.offset = MMSS_MDSS_BYTE1_INTF_DIV,
+	.mask = 0x3,
+	.shift = 0,
+	.data = {
+		.min_div = 1,
+		.max_div = 4,
+	},
+	.base = &virt_base,
+	/*
+	 * NOTE: Op does not work for div-3. Current assumption is that div-3
+	 * is not a recommended setting for this divider.
+	 */
+	.ops = &postdiv_reg_ops,
+	.c = {
+		.dbg_name = "mmss_mdss_byte1_intf_div_clk",
+		.parent = &byte1_clk_src.c,
+		.ops = &clk_ops_slave_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(mmss_mdss_byte1_intf_div_clk.c),
+	},
+};
+
+static struct branch_clk mmss_mdss_byte1_intf_clk = {
+	.cbcr_reg = MMSS_MDSS_BYTE1_INTF_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_mdss_byte1_intf_clk",
+		.parent = &mmss_mdss_byte1_intf_div_clk.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_mdss_byte1_intf_clk.c),
+	},
+};
+
+static struct branch_clk mmss_mdss_dp_aux_clk = {
+	.cbcr_reg = MMSS_MDSS_DP_AUX_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_mdss_dp_aux_clk",
+		.parent = &dp_aux_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_mdss_dp_aux_clk.c),
+	},
+};
+
+static struct branch_clk mmss_mdss_dp_pixel_clk = {
+	.cbcr_reg = MMSS_MDSS_DP_PIXEL_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_mdss_dp_pixel_clk",
+		.parent = &dp_pixel_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_mdss_dp_pixel_clk.c),
+	},
+};
+
+static struct branch_clk mmss_mdss_dp_link_clk = {
+	.cbcr_reg = MMSS_MDSS_DP_LINK_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_mdss_dp_link_clk",
+		.parent = &dp_link_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_mdss_dp_link_clk.c),
+	},
+};
+
+/* Reset state of MMSS_MDSS_DP_LINK_INTF_DIV is 0x3 (div-4) */
+static struct branch_clk mmss_mdss_dp_link_intf_clk = {
+	.cbcr_reg = MMSS_MDSS_DP_LINK_INTF_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_mdss_dp_link_intf_clk",
+		.parent = &dp_link_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_mdss_dp_link_intf_clk.c),
+	},
+};
+
+static struct branch_clk mmss_mdss_dp_crypto_clk = {
+	.cbcr_reg = MMSS_MDSS_DP_CRYPTO_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_mdss_dp_crypto_clk",
+		.parent = &dp_crypto_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_mdss_dp_crypto_clk.c),
+	},
+};
+
+static struct branch_clk mmss_mdss_dp_gtc_clk = {
+	.cbcr_reg = MMSS_MDSS_DP_GTC_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_mdss_dp_gtc_clk",
+		.parent = &dp_gtc_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_mdss_dp_gtc_clk.c),
+	},
+};
+
+static struct branch_clk mmss_mdss_esc0_clk = {
+	.cbcr_reg = MMSS_MDSS_ESC0_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_mdss_esc0_clk",
+		.parent = &esc0_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_mdss_esc0_clk.c),
+	},
+};
+
+static struct branch_clk mmss_mdss_esc1_clk = {
+	.cbcr_reg = MMSS_MDSS_ESC1_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_mdss_esc1_clk",
+		.parent = &esc1_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_mdss_esc1_clk.c),
+	},
+};
+
+static struct branch_clk mmss_mdss_extpclk_clk = {
+	.cbcr_reg = MMSS_MDSS_EXTPCLK_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_mdss_extpclk_clk",
+		.parent = &extpclk_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_mdss_extpclk_clk.c),
+	},
+};
+
+static struct branch_clk mmss_mdss_hdmi_clk = {
+	.cbcr_reg = MMSS_MDSS_HDMI_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_mdss_hdmi_clk",
+		.parent = &hdmi_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_mdss_hdmi_clk.c),
+	},
+};
+
+static struct branch_clk mmss_mdss_hdmi_dp_ahb_clk = {
+	.cbcr_reg = MMSS_MDSS_HDMI_DP_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_mdss_hdmi_dp_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_mdss_hdmi_dp_ahb_clk.c),
+	},
+};
+
+static struct branch_clk mmss_mdss_mdp_clk = {
+	.cbcr_reg = MMSS_MDSS_MDP_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_mdss_mdp_clk",
+		.parent = &mdp_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_mdss_mdp_clk.c),
+	},
+};
+
+static struct branch_clk mmss_mdss_mdp_lut_clk = {
+	.cbcr_reg = MMSS_MDSS_MDP_LUT_CBCR,
+	.has_sibling = 1,
+	.check_enable_bit = true,
+	.halt_check = DELAY,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_mdss_mdp_lut_clk",
+		.parent = &mdp_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_mdss_mdp_lut_clk.c),
+	},
+};
+
+static struct branch_clk mmss_mdss_pclk0_clk = {
+	.cbcr_reg = MMSS_MDSS_PCLK0_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_mdss_pclk0_clk",
+		.parent = &pclk0_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_mdss_pclk0_clk.c),
+	},
+};
+
+static struct branch_clk mmss_mdss_pclk1_clk = {
+	.cbcr_reg = MMSS_MDSS_PCLK1_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_mdss_pclk1_clk",
+		.parent = &pclk1_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_mdss_pclk1_clk.c),
+	},
+};
+
+static struct branch_clk mmss_mdss_rot_clk = {
+	.cbcr_reg = MMSS_MDSS_ROT_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_mdss_rot_clk",
+		.parent = &rot_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_mdss_rot_clk.c),
+	},
+};
+
+static struct branch_clk mmss_mdss_vsync_clk = {
+	.cbcr_reg = MMSS_MDSS_VSYNC_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_mdss_vsync_clk",
+		.parent = &vsync_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_mdss_vsync_clk.c),
+	},
+};
+
+static struct branch_clk mmss_mnoc_ahb_clk = {
+	.cbcr_reg = MMSS_MNOC_AHB_CBCR,
+	.has_sibling = 0,
+	.check_enable_bit = true,
+	.no_halt_check_on_disable = true,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_mnoc_ahb_clk",
+		.parent = &ahb_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_mnoc_ahb_clk.c),
+	},
+};
+
+static struct branch_clk mmss_misc_ahb_clk = {
+	.cbcr_reg = MMSS_MISC_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_misc_ahb_clk",
+		.ops = &clk_ops_branch,
+		.depends = &mmss_mnoc_ahb_clk.c,
+		CLK_INIT(mmss_misc_ahb_clk.c),
+	},
+};
+
+static struct branch_clk mmss_misc_cxo_clk = {
+	.cbcr_reg = MMSS_MISC_CXO_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_misc_cxo_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_misc_cxo_clk.c),
+	},
+};
+
+static struct branch_clk mmss_mnoc_maxi_clk = {
+	.cbcr_reg = MMSS_MNOC_MAXI_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_mnoc_maxi_clk",
+		.parent = &maxi_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_mnoc_maxi_clk.c),
+	},
+};
+
+static struct branch_clk mmss_video_subcore0_clk = {
+	.cbcr_reg = MMSS_VIDEO_SUBCORE0_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_video_subcore0_clk",
+		.parent = &video_subcore0_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_video_subcore0_clk.c),
+	},
+};
+
+static struct branch_clk mmss_video_subcore1_clk = {
+	.cbcr_reg = MMSS_VIDEO_SUBCORE1_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_video_subcore1_clk",
+		.parent = &video_subcore1_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_video_subcore1_clk.c),
+	},
+};
+
+static struct branch_clk mmss_video_ahb_clk = {
+	.cbcr_reg = MMSS_VIDEO_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_video_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_video_ahb_clk.c),
+	},
+};
+
+static struct branch_clk mmss_video_axi_clk = {
+	.cbcr_reg = MMSS_VIDEO_AXI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_video_axi_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_video_axi_clk.c),
+	},
+};
+
+static struct branch_clk mmss_video_core_clk = {
+	.cbcr_reg = MMSS_VIDEO_CORE_CBCR,
+	.has_sibling = 0,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_video_core_clk",
+		.parent = &video_core_clk_src.c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_video_core_clk.c),
+	},
+};
+
+static struct branch_clk mmss_video_maxi_clk = {
+	.cbcr_reg = MMSS_VIDEO_MAXI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_video_maxi_clk",
+		.parent = &maxi_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_video_maxi_clk.c),
+	},
+};
+
+static struct branch_clk mmss_vmem_ahb_clk = {
+	.cbcr_reg = MMSS_VMEM_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_vmem_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_vmem_ahb_clk.c),
+	},
+};
+
+static struct branch_clk mmss_vmem_maxi_clk = {
+	.cbcr_reg = MMSS_VMEM_MAXI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "mmss_vmem_maxi_clk",
+		.parent = &maxi_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(mmss_vmem_maxi_clk.c),
+	},
+};
+
+static struct mux_clk mmss_debug_mux = {
+	.ops = &mux_reg_ops,
+	.en_mask = BIT(16),
+	.mask = 0x3FF,
+	.offset = MMSS_DEBUG_CLK_CTL,
+	.en_offset = MMSS_DEBUG_CLK_CTL,
+	.base = &virt_base,
+	MUX_SRC_LIST(
+		{ &mmss_mnoc_ahb_clk.c, 0x0001 },
+		{ &mmss_misc_ahb_clk.c, 0x0003 },
+		{ &mmss_vmem_maxi_clk.c, 0x0009 },
+		{ &mmss_vmem_ahb_clk.c, 0x000a },
+		{ &mmss_bimc_smmu_ahb_clk.c, 0x000c },
+		{ &mmss_bimc_smmu_axi_clk.c, 0x000d },
+		{ &mmss_video_core_clk.c, 0x000e },
+		{ &mmss_video_axi_clk.c, 0x000f },
+		{ &mmss_video_maxi_clk.c, 0x0010 },
+		{ &mmss_video_ahb_clk.c, 0x0011 },
+		{ &mmss_mdss_rot_clk.c, 0x0012 },
+		{ &mmss_snoc_dvm_axi_clk.c, 0x0013 },
+		{ &mmss_mdss_mdp_clk.c, 0x0014 },
+		{ &mmss_mdss_mdp_lut_clk.c, 0x0015 },
+		{ &mmss_mdss_pclk0_clk.c, 0x0016 },
+		{ &mmss_mdss_pclk1_clk.c, 0x0017 },
+		{ &mmss_mdss_extpclk_clk.c, 0x0018 },
+		{ &mmss_video_subcore0_clk.c, 0x001a },
+		{ &mmss_video_subcore1_clk.c, 0x001b },
+		{ &mmss_mdss_vsync_clk.c, 0x001c },
+		{ &mmss_mdss_hdmi_clk.c, 0x001d },
+		{ &mmss_mdss_byte0_clk.c, 0x001e },
+		{ &mmss_mdss_byte1_clk.c, 0x001f },
+		{ &mmss_mdss_esc0_clk.c, 0x0020 },
+		{ &mmss_mdss_esc1_clk.c, 0x0021 },
+		{ &mmss_mdss_ahb_clk.c, 0x0022 },
+		{ &mmss_mdss_hdmi_dp_ahb_clk.c, 0x0023 },
+		{ &mmss_mdss_axi_clk.c, 0x0024 },
+		{ &mmss_camss_top_ahb_clk.c, 0x0025 },
+		{ &mmss_camss_micro_ahb_clk.c, 0x0026 },
+		{ &mmss_camss_gp0_clk.c, 0x0027 },
+		{ &mmss_camss_gp1_clk.c, 0x0028 },
+		{ &mmss_camss_mclk0_clk.c, 0x0029 },
+		{ &mmss_camss_mclk1_clk.c, 0x002a },
+		{ &mmss_camss_mclk2_clk.c, 0x002b },
+		{ &mmss_camss_mclk3_clk.c, 0x002c },
+		{ &mmss_camss_cci_clk.c, 0x002d },
+		{ &mmss_camss_cci_ahb_clk.c, 0x002e },
+		{ &mmss_camss_csi0phytimer_clk.c, 0x002f },
+		{ &mmss_camss_csi1phytimer_clk.c, 0x0030 },
+		{ &mmss_camss_csi2phytimer_clk.c, 0x0031 },
+		{ &mmss_camss_jpeg0_clk.c, 0x0032 },
+		{ &mmss_camss_ispif_ahb_clk.c, 0x0033 },
+		{ &mmss_camss_jpeg_ahb_clk.c, 0x0035 },
+		{ &mmss_camss_jpeg_axi_clk.c, 0x0036 },
+		{ &mmss_camss_ahb_clk.c, 0x0037 },
+		{ &mmss_camss_vfe0_clk.c, 0x0038 },
+		{ &mmss_camss_vfe1_clk.c, 0x0039 },
+		{ &mmss_camss_cpp_clk.c, 0x003a },
+		{ &mmss_camss_cpp_ahb_clk.c, 0x003b },
+		{ &mmss_camss_csi_vfe0_clk.c, 0x003f },
+		{ &mmss_camss_csi_vfe1_clk.c, 0x0040 },
+		{ &mmss_camss_csi0_clk.c, 0x0041 },
+		{ &mmss_camss_csi0_ahb_clk.c, 0x0042 },
+		{ &mmss_camss_csiphy0_clk.c, 0x0043 },
+		{ &mmss_camss_csi0rdi_clk.c, 0x0044 },
+		{ &mmss_camss_csi0pix_clk.c, 0x0045 },
+		{ &mmss_camss_csi1_clk.c, 0x0046 },
+		{ &mmss_camss_csi1_ahb_clk.c, 0x0047 },
+		{ &mmss_camss_csi1rdi_clk.c, 0x0049 },
+		{ &mmss_camss_csi1pix_clk.c, 0x004a },
+		{ &mmss_camss_csi2_clk.c, 0x004b },
+		{ &mmss_camss_csi2_ahb_clk.c, 0x004c },
+		{ &mmss_camss_csi2rdi_clk.c, 0x004e },
+		{ &mmss_camss_csi2pix_clk.c, 0x004f },
+		{ &mmss_camss_csi3_clk.c, 0x0050 },
+		{ &mmss_camss_csi3_ahb_clk.c, 0x0051 },
+		{ &mmss_camss_csi3rdi_clk.c, 0x0053 },
+		{ &mmss_camss_csi3pix_clk.c, 0x0054 },
+		{ &mmss_mnoc_maxi_clk.c, 0x0070 },
+		{ &mmss_camss_vfe0_stream_clk.c, 0x0071 },
+		{ &mmss_camss_vfe1_stream_clk.c, 0x0072 },
+		{ &mmss_camss_cpp_vbif_ahb_clk.c, 0x0073 },
+		{ &mmss_misc_cxo_clk.c, 0x0077 },
+		{ &mmss_camss_cpp_axi_clk.c, 0x007a },
+		{ &mmss_camss_csiphy1_clk.c, 0x0085 },
+		{ &mmss_camss_vfe0_ahb_clk.c, 0x0086 },
+		{ &mmss_camss_vfe1_ahb_clk.c, 0x0087 },
+		{ &mmss_camss_csiphy2_clk.c, 0x0088 },
+		{ &mmss_fd_core_clk.c, 0x0089 },
+		{ &mmss_fd_core_uar_clk.c, 0x008a },
+		{ &mmss_fd_ahb_clk.c, 0x008c },
+		{ &mmss_camss_cphy_csid0_clk.c, 0x008d },
+		{ &mmss_camss_cphy_csid1_clk.c, 0x008e },
+		{ &mmss_camss_cphy_csid2_clk.c, 0x008f },
+		{ &mmss_camss_cphy_csid3_clk.c, 0x0090 },
+		{ &mmss_mdss_dp_link_clk.c, 0x0098 },
+		{ &mmss_mdss_dp_link_intf_clk.c, 0x0099 },
+		{ &mmss_mdss_dp_crypto_clk.c, 0x009a },
+		{ &mmss_mdss_dp_pixel_clk.c, 0x009b },
+		{ &mmss_mdss_dp_aux_clk.c, 0x009c },
+		{ &mmss_mdss_dp_gtc_clk.c, 0x009d },
+		{ &mmss_mdss_byte0_intf_clk.c, 0x00ad },
+		{ &mmss_mdss_byte1_intf_clk.c, 0x00ae },
+	),
+	.c = {
+		.dbg_name = "mmss_debug_mux",
+		.ops = &clk_ops_gen_mux,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(mmss_debug_mux.c),
+	},
+};
+
+static struct clk_lookup msm_clocks_mmss_8998[] = {
+	CLK_LIST(mmsscc_xo),
+	CLK_LIST(mmsscc_gpll0),
+	CLK_LIST(mmsscc_gpll0_div),
+	CLK_LIST(mmpll0_pll),
+	CLK_LIST(mmpll0_pll_out),
+	CLK_LIST(mmpll1_pll),
+	CLK_LIST(mmpll1_pll_out),
+	CLK_LIST(mmpll3_pll),
+	CLK_LIST(mmpll3_pll_out),
+	CLK_LIST(mmpll4_pll),
+	CLK_LIST(mmpll4_pll_out),
+	CLK_LIST(mmpll5_pll),
+	CLK_LIST(mmpll5_pll_out),
+	CLK_LIST(mmpll6_pll),
+	CLK_LIST(mmpll6_pll_out),
+	CLK_LIST(mmpll7_pll),
+	CLK_LIST(mmpll7_pll_out),
+	CLK_LIST(mmpll10_pll),
+	CLK_LIST(mmpll10_pll_out),
+	CLK_LIST(ahb_clk_src),
+	CLK_LIST(csi0_clk_src),
+	CLK_LIST(vfe0_clk_src),
+	CLK_LIST(vfe1_clk_src),
+	CLK_LIST(mdp_clk_src),
+	CLK_LIST(maxi_clk_src),
+	CLK_LIST(cpp_clk_src),
+	CLK_LIST(jpeg0_clk_src),
+	CLK_LIST(rot_clk_src),
+	CLK_LIST(video_core_clk_src),
+	CLK_LIST(csi1_clk_src),
+	CLK_LIST(csi2_clk_src),
+	CLK_LIST(csi3_clk_src),
+	CLK_LIST(fd_core_clk_src),
+	CLK_LIST(video_subcore0_clk_src),
+	CLK_LIST(video_subcore1_clk_src),
+	CLK_LIST(cci_clk_src),
+	CLK_LIST(csiphy_clk_src),
+	CLK_LIST(camss_gp0_clk_src),
+	CLK_LIST(camss_gp1_clk_src),
+	CLK_LIST(mclk0_clk_src),
+	CLK_LIST(mclk1_clk_src),
+	CLK_LIST(mclk2_clk_src),
+	CLK_LIST(mclk3_clk_src),
+	CLK_LIST(ext_byte0_clk_src),
+	CLK_LIST(ext_byte1_clk_src),
+	CLK_LIST(byte0_clk_src),
+	CLK_LIST(byte1_clk_src),
+	CLK_LIST(ext_pclk0_clk_src),
+	CLK_LIST(ext_pclk1_clk_src),
+	CLK_LIST(pclk0_clk_src),
+	CLK_LIST(pclk1_clk_src),
+	CLK_LIST(ext_extpclk_clk_src),
+	CLK_LIST(extpclk_clk_src),
+	CLK_LIST(ext_dp_phy_pll_vco),
+	CLK_LIST(ext_dp_phy_pll_link),
+	CLK_LIST(dp_pixel_clk_src),
+	CLK_LIST(dp_link_clk_src),
+	CLK_LIST(dp_crypto_clk_src),
+	CLK_LIST(csi0phytimer_clk_src),
+	CLK_LIST(csi1phytimer_clk_src),
+	CLK_LIST(csi2phytimer_clk_src),
+	CLK_LIST(dp_aux_clk_src),
+	CLK_LIST(dp_gtc_clk_src),
+	CLK_LIST(esc0_clk_src),
+	CLK_LIST(esc1_clk_src),
+	CLK_LIST(hdmi_clk_src),
+	CLK_LIST(vsync_clk_src),
+	CLK_LIST(mmss_bimc_smmu_ahb_clk),
+	CLK_LIST(mmss_bimc_smmu_axi_clk),
+	CLK_LIST(mmss_snoc_dvm_axi_clk),
+	CLK_LIST(mmss_camss_ahb_clk),
+	CLK_LIST(mmss_camss_cci_ahb_clk),
+	CLK_LIST(mmss_camss_cci_clk),
+	CLK_LIST(mmss_camss_cpp_ahb_clk),
+	CLK_LIST(mmss_camss_cpp_clk),
+	CLK_LIST(mmss_camss_cpp_axi_clk),
+	CLK_LIST(mmss_camss_cpp_vbif_ahb_clk),
+	CLK_LIST(mmss_camss_cphy_csid0_clk),
+	CLK_LIST(mmss_camss_csi0_ahb_clk),
+	CLK_LIST(mmss_camss_csi0_clk),
+	CLK_LIST(mmss_camss_csi0pix_clk),
+	CLK_LIST(mmss_camss_csi0rdi_clk),
+	CLK_LIST(mmss_camss_cphy_csid1_clk),
+	CLK_LIST(mmss_camss_csi1_ahb_clk),
+	CLK_LIST(mmss_camss_csi1_clk),
+	CLK_LIST(mmss_camss_csi1pix_clk),
+	CLK_LIST(mmss_camss_csi1rdi_clk),
+	CLK_LIST(mmss_camss_cphy_csid2_clk),
+	CLK_LIST(mmss_camss_csi2_ahb_clk),
+	CLK_LIST(mmss_camss_csi2_clk),
+	CLK_LIST(mmss_camss_csi2pix_clk),
+	CLK_LIST(mmss_camss_csi2rdi_clk),
+	CLK_LIST(mmss_camss_cphy_csid3_clk),
+	CLK_LIST(mmss_camss_csi3_ahb_clk),
+	CLK_LIST(mmss_camss_csi3_clk),
+	CLK_LIST(mmss_camss_csi3pix_clk),
+	CLK_LIST(mmss_camss_csi3rdi_clk),
+	CLK_LIST(mmss_camss_csi_vfe0_clk),
+	CLK_LIST(mmss_camss_csi_vfe1_clk),
+	CLK_LIST(mmss_camss_csiphy0_clk),
+	CLK_LIST(mmss_camss_csiphy1_clk),
+	CLK_LIST(mmss_camss_csiphy2_clk),
+	CLK_LIST(mmss_fd_ahb_clk),
+	CLK_LIST(mmss_fd_core_clk),
+	CLK_LIST(mmss_fd_core_uar_clk),
+	CLK_LIST(mmss_camss_gp0_clk),
+	CLK_LIST(mmss_camss_gp1_clk),
+	CLK_LIST(mmss_camss_ispif_ahb_clk),
+	CLK_LIST(mmss_camss_jpeg0_clk),
+	CLK_LIST(mmss_camss_jpeg0_vote_clk),
+	CLK_LIST(mmss_camss_jpeg0_dma_vote_clk),
+	CLK_LIST(mmss_camss_jpeg_ahb_clk),
+	CLK_LIST(mmss_camss_jpeg_axi_clk),
+	CLK_LIST(mmss_camss_mclk0_clk),
+	CLK_LIST(mmss_camss_mclk1_clk),
+	CLK_LIST(mmss_camss_mclk2_clk),
+	CLK_LIST(mmss_camss_mclk3_clk),
+	CLK_LIST(mmss_camss_micro_ahb_clk),
+	CLK_LIST(mmss_camss_csi0phytimer_clk),
+	CLK_LIST(mmss_camss_csi1phytimer_clk),
+	CLK_LIST(mmss_camss_csi2phytimer_clk),
+	CLK_LIST(mmss_camss_top_ahb_clk),
+	CLK_LIST(mmss_camss_vfe0_ahb_clk),
+	CLK_LIST(mmss_camss_vfe0_clk),
+	CLK_LIST(mmss_camss_vfe0_stream_clk),
+	CLK_LIST(mmss_camss_vfe1_ahb_clk),
+	CLK_LIST(mmss_camss_vfe1_clk),
+	CLK_LIST(mmss_camss_vfe1_stream_clk),
+	CLK_LIST(mmss_camss_vfe_vbif_ahb_clk),
+	CLK_LIST(mmss_camss_vfe_vbif_axi_clk),
+	CLK_LIST(mmss_mdss_ahb_clk),
+	CLK_LIST(mmss_mdss_axi_clk),
+	CLK_LIST(mmss_mdss_byte0_clk),
+	CLK_LIST(mmss_mdss_byte0_intf_div_clk),
+	CLK_LIST(mmss_mdss_byte0_intf_clk),
+	CLK_LIST(mmss_mdss_byte1_clk),
+	CLK_LIST(mmss_mdss_byte0_intf_div_clk),
+	CLK_LIST(mmss_mdss_byte1_intf_clk),
+	CLK_LIST(mmss_mdss_dp_aux_clk),
+	CLK_LIST(mmss_mdss_dp_crypto_clk),
+	CLK_LIST(mmss_mdss_dp_pixel_clk),
+	CLK_LIST(mmss_mdss_dp_link_clk),
+	CLK_LIST(mmss_mdss_dp_link_intf_clk),
+	CLK_LIST(mmss_mdss_dp_gtc_clk),
+	CLK_LIST(mmss_mdss_esc0_clk),
+	CLK_LIST(mmss_mdss_esc1_clk),
+	CLK_LIST(mmss_mdss_extpclk_clk),
+	CLK_LIST(mmss_mdss_hdmi_clk),
+	CLK_LIST(mmss_mdss_hdmi_dp_ahb_clk),
+	CLK_LIST(mmss_mdss_mdp_clk),
+	CLK_LIST(mmss_mdss_mdp_lut_clk),
+	CLK_LIST(mmss_mdss_pclk0_clk),
+	CLK_LIST(mmss_mdss_pclk1_clk),
+	CLK_LIST(mmss_mdss_rot_clk),
+	CLK_LIST(mmss_mdss_vsync_clk),
+	CLK_LIST(mmss_misc_ahb_clk),
+	CLK_LIST(mmss_misc_cxo_clk),
+	CLK_LIST(mmss_mnoc_ahb_clk),
+	CLK_LIST(mmss_video_subcore0_clk),
+	CLK_LIST(mmss_video_subcore1_clk),
+	CLK_LIST(mmss_video_ahb_clk),
+	CLK_LIST(mmss_video_axi_clk),
+	CLK_LIST(mmss_video_core_clk),
+	CLK_LIST(mmss_video_maxi_clk),
+	CLK_LIST(mmss_vmem_ahb_clk),
+	CLK_LIST(mmss_vmem_maxi_clk),
+	CLK_LIST(mmss_mnoc_maxi_clk),
+	CLK_LIST(mmss_debug_mux),
+};
+
+static const struct msm_reset_map mmss_8998_resets[] = {
+	[CAMSS_MICRO_BCR] = { 0x3490 },
+};
+
+static void msm_mmsscc_hamster_fixup(void)
+{
+	mmpll3_pll.c.rate = 1066000000;
+	mmpll3_pll.c.fmax[VDD_DIG_LOWER] = 533000000;
+	mmpll3_pll.c.fmax[VDD_DIG_LOW] = 533000000;
+	mmpll3_pll.c.fmax[VDD_DIG_LOW_L1] = 533000000;
+	mmpll3_pll.c.fmax[VDD_DIG_NOMINAL] = 1066000000;
+	mmpll3_pll.c.fmax[VDD_DIG_HIGH] = 1066000000;
+
+	mmpll4_pll.c.fmax[VDD_DIG_LOW] = 384000000;
+	mmpll4_pll.c.fmax[VDD_DIG_LOW_L1] = 384000000;
+	mmpll4_pll.c.fmax[VDD_DIG_NOMINAL] = 768000000;
+
+	mmpll5_pll.c.fmax[VDD_DIG_LOW] = 412500000;
+	mmpll5_pll.c.fmax[VDD_DIG_LOW_L1] = 412500000;
+	mmpll5_pll.c.fmax[VDD_DIG_NOMINAL] = 825000000;
+
+	mmpll6_pll.c.rate = 888000000;
+	mmpll6_pll.c.fmax[VDD_DIG_LOWER] = 444000000;
+	mmpll6_pll.c.fmax[VDD_DIG_LOW] = 444000000;
+	mmpll6_pll.c.fmax[VDD_DIG_LOW_L1] = 444000000;
+	mmpll6_pll.c.fmax[VDD_DIG_NOMINAL] = 888000000;
+	mmpll6_pll.c.fmax[VDD_DIG_HIGH] = 888000000;
+
+	vfe0_clk_src.freq_tbl = ftbl_vfe_clk_src_vq;
+	vfe0_clk_src.c.fmax[VDD_DIG_LOW] = 404000000;
+	vfe0_clk_src.c.fmax[VDD_DIG_LOW_L1] = 480000000;
+	vfe1_clk_src.freq_tbl = ftbl_vfe_clk_src_vq;
+	vfe1_clk_src.c.fmax[VDD_DIG_LOW] = 404000000;
+	vfe1_clk_src.c.fmax[VDD_DIG_LOW_L1] = 480000000;
+
+	csi0_clk_src.freq_tbl = ftbl_csi_clk_src_vq;
+	csi0_clk_src.c.fmax[VDD_DIG_LOW] = 274290000;
+	csi0_clk_src.c.fmax[VDD_DIG_LOW_L1] = 320000000;
+	csi1_clk_src.freq_tbl = ftbl_csi_clk_src_vq;
+	csi1_clk_src.c.fmax[VDD_DIG_LOW] = 274290000;
+	csi1_clk_src.c.fmax[VDD_DIG_LOW_L1] = 320000000;
+	csi2_clk_src.freq_tbl = ftbl_csi_clk_src_vq;
+	csi2_clk_src.c.fmax[VDD_DIG_LOW] = 274290000;
+	csi2_clk_src.c.fmax[VDD_DIG_LOW_L1] = 320000000;
+	csi3_clk_src.freq_tbl = ftbl_csi_clk_src_vq;
+	csi3_clk_src.c.fmax[VDD_DIG_LOW] = 274290000;
+	csi3_clk_src.c.fmax[VDD_DIG_LOW_L1] = 320000000;
+
+	cpp_clk_src.freq_tbl = ftbl_cpp_clk_src_vq;
+	cpp_clk_src.c.fmax[VDD_DIG_LOW] = 384000000;
+	cpp_clk_src.c.fmax[VDD_DIG_LOW_L1] = 404000000;
+	jpeg0_clk_src.freq_tbl = ftbl_jpeg0_clk_src_vq;
+	jpeg0_clk_src.c.fmax[VDD_DIG_LOW_L1] = 320000000;
+	csiphy_clk_src.freq_tbl = ftbl_csiphy_clk_src_vq;
+	csiphy_clk_src.c.fmax[VDD_DIG_LOW] = 274290000;
+	csiphy_clk_src.c.fmax[VDD_DIG_LOW_L1] = 300000000;
+	fd_core_clk_src.freq_tbl = ftbl_fd_core_clk_src_vq;
+	fd_core_clk_src.c.fmax[VDD_DIG_LOW] = 404000000;
+	fd_core_clk_src.c.fmax[VDD_DIG_LOW_L1] = 480000000;
+
+	csi0phytimer_clk_src.c.fmax[VDD_DIG_LOW_L1] = 269333333;
+	csi1phytimer_clk_src.c.fmax[VDD_DIG_LOW_L1] = 269333333;
+	csi2phytimer_clk_src.c.fmax[VDD_DIG_LOW_L1] = 269333333;
+
+	mdp_clk_src.c.fmax[VDD_DIG_LOW_L1] = 330000000;
+	extpclk_clk_src.c.fmax[VDD_DIG_LOW] = 312500000;
+	extpclk_clk_src.c.fmax[VDD_DIG_LOW_L1] = 375000000;
+	rot_clk_src.c.fmax[VDD_DIG_LOW_L1] = 330000000;
+
+	video_core_clk_src.freq_tbl = ftbl_video_core_clk_src_vq;
+	video_core_clk_src.c.fmax[VDD_DIG_LOWER] = 200000000;
+	video_core_clk_src.c.fmax[VDD_DIG_LOW] = 269330000;
+	video_core_clk_src.c.fmax[VDD_DIG_LOW_L1] = 355200000;
+	video_core_clk_src.c.fmax[VDD_DIG_NOMINAL] = 444000000;
+	video_core_clk_src.c.fmax[VDD_DIG_HIGH] = 533000000;
+
+	video_subcore0_clk_src.freq_tbl = ftbl_video_subcore_clk_src_vq;
+	video_subcore0_clk_src.c.fmax[VDD_DIG_LOWER] = 200000000;
+	video_subcore0_clk_src.c.fmax[VDD_DIG_LOW] = 269330000;
+	video_subcore0_clk_src.c.fmax[VDD_DIG_LOW_L1] = 355200000;
+	video_subcore0_clk_src.c.fmax[VDD_DIG_NOMINAL] = 444000000;
+	video_subcore0_clk_src.c.fmax[VDD_DIG_HIGH] = 533000000;
+
+	video_subcore1_clk_src.freq_tbl = ftbl_video_subcore_clk_src_vq;
+	video_subcore1_clk_src.c.fmax[VDD_DIG_LOWER] = 200000000;
+	video_subcore1_clk_src.c.fmax[VDD_DIG_LOW] = 269330000;
+	video_subcore1_clk_src.c.fmax[VDD_DIG_LOW_L1] = 355200000;
+	video_subcore1_clk_src.c.fmax[VDD_DIG_NOMINAL] = 444000000;
+	video_subcore1_clk_src.c.fmax[VDD_DIG_HIGH] = 533000000;
+};
+
+static void msm_mmsscc_v2_fixup(void)
+{
+	csi0_clk_src.c.fmax[VDD_DIG_NOMINAL] = 480000000;
+	csi1_clk_src.c.fmax[VDD_DIG_NOMINAL] = 480000000;
+	csi2_clk_src.c.fmax[VDD_DIG_NOMINAL] = 480000000;
+	csi3_clk_src.c.fmax[VDD_DIG_NOMINAL] = 480000000;
+}
+
+int msm_mmsscc_8998_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	int rc;
+	struct clk *tmp;
+	struct regulator *reg;
+	u32 regval;
+	bool is_v2 = 0, is_vq = 0;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc_base");
+	if (!res) {
+		dev_err(&pdev->dev, "Unable to retrieve register base.\n");
+		return -ENOMEM;
+	}
+	virt_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+	if (!virt_base) {
+		dev_err(&pdev->dev, "Failed to map CC registers\n");
+		return -ENOMEM;
+	}
+
+	/* Clear the DBG_CLK_DIV bits of the MMSS debug register */
+	regval = readl_relaxed(virt_base + mmss_debug_mux.offset);
+	regval &= ~BM(18, 17);
+	writel_relaxed(regval, virt_base + mmss_debug_mux.offset);
+
+	reg = vdd_dig.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_dig");
+	if (IS_ERR(reg)) {
+		if (PTR_ERR(reg) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get vdd_dig regulator!");
+		return PTR_ERR(reg);
+	}
+
+	reg = vdd_mmsscc_mx.regulator[0] = devm_regulator_get(&pdev->dev,
+							"vdd_mmsscc_mx");
+	if (IS_ERR(reg)) {
+		if (PTR_ERR(reg) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get vdd_mmsscc_mx regulator!");
+		return PTR_ERR(reg);
+	}
+
+	tmp = mmsscc_xo.c.parent = devm_clk_get(&pdev->dev, "xo");
+	if (IS_ERR(tmp)) {
+		if (PTR_ERR(tmp) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get xo clock!\n");
+		return PTR_ERR(tmp);
+	}
+
+	tmp = mmsscc_gpll0.c.parent = devm_clk_get(&pdev->dev, "gpll0");
+	if (IS_ERR(tmp)) {
+		if (PTR_ERR(tmp) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get gpll0 clock!\n");
+		return PTR_ERR(tmp);
+	}
+
+	tmp = mmsscc_gpll0_div.c.parent = devm_clk_get(&pdev->dev, "gpll0_div");
+	if (IS_ERR(tmp)) {
+		if (PTR_ERR(tmp) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get gpll0_div clock!\n");
+		return PTR_ERR(tmp);
+	}
+
+	ext_pclk0_clk_src.dev = &pdev->dev;
+	ext_pclk0_clk_src.clk_id = "pclk0_src";
+	ext_pclk0_clk_src.c.flags = CLKFLAG_NO_RATE_CACHE;
+	ext_pclk1_clk_src.dev = &pdev->dev;
+	ext_pclk1_clk_src.clk_id = "pclk1_src";
+	ext_pclk1_clk_src.c.flags = CLKFLAG_NO_RATE_CACHE;
+	ext_byte0_clk_src.dev = &pdev->dev;
+	ext_byte0_clk_src.clk_id = "byte0_src";
+	ext_byte0_clk_src.c.flags = CLKFLAG_NO_RATE_CACHE;
+	ext_byte1_clk_src.dev = &pdev->dev;
+	ext_byte1_clk_src.clk_id = "byte1_src";
+	ext_byte1_clk_src.c.flags = CLKFLAG_NO_RATE_CACHE;
+	ext_extpclk_clk_src.dev = &pdev->dev;
+	ext_extpclk_clk_src.clk_id = "extpclk_src";
+
+	ext_dp_phy_pll_link.dev = &pdev->dev;
+	ext_dp_phy_pll_link.clk_id = "dp_link_src";
+	ext_dp_phy_pll_link.c.flags = CLKFLAG_NO_RATE_CACHE;
+	ext_dp_phy_pll_vco.dev = &pdev->dev;
+	ext_dp_phy_pll_vco.clk_id = "dp_vco_div";
+	ext_dp_phy_pll_vco.c.flags = CLKFLAG_NO_RATE_CACHE;
+
+	mmss_camss_jpeg0_vote_clk.c.flags = CLKFLAG_NO_RATE_CACHE;
+	mmss_camss_jpeg0_dma_vote_clk.c.flags = CLKFLAG_NO_RATE_CACHE;
+
+	is_vq = of_device_is_compatible(pdev->dev.of_node,
+					"qcom,mmsscc-hamster");
+	if (is_vq)
+		msm_mmsscc_hamster_fixup();
+
+	is_v2 = of_device_is_compatible(pdev->dev.of_node,
+					"qcom,mmsscc-8998-v2");
+	if (is_v2) {
+		msm_mmsscc_hamster_fixup();
+		msm_mmsscc_v2_fixup();
+	}
+
+	rc = of_msm_clock_register(pdev->dev.of_node, msm_clocks_mmss_8998,
+				   ARRAY_SIZE(msm_clocks_mmss_8998));
+	if (rc)
+		return rc;
+
+	/* Register block resets */
+	msm_reset_controller_register(pdev, mmss_8998_resets,
+			ARRAY_SIZE(mmss_8998_resets), virt_base);
+
+	dev_info(&pdev->dev, "Registered MMSS clocks.\n");
+	return 0;
+}
+
+static struct of_device_id msm_clock_mmss_match_table[] = {
+	{ .compatible = "qcom,mmsscc-8998" },
+	{ .compatible = "qcom,mmsscc-8998-v2" },
+	{ .compatible = "qcom,mmsscc-hamster" },
+	{},
+};
+
+static struct platform_driver msm_clock_mmss_driver = {
+	.probe = msm_mmsscc_8998_probe,
+	.driver = {
+		.name = "qcom,mmsscc-8998",
+		.of_match_table = msm_clock_mmss_match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+int __init msm_mmsscc_8998_init(void)
+{
+	return platform_driver_register(&msm_clock_mmss_driver);
+}
+arch_initcall(msm_mmsscc_8998_init);
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./clock-osm.c linux-4.4.115-fbx/drivers/clk/msm/clock-osm.c
--- linux-4.4.115-fbx/drivers/clk/msm./clock-osm.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/clock-osm.c	2019-01-22 16:16:23.019242024 +0100
@@ -0,0 +1,3535 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <linux/cpu.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_qos.h>
+#include <linux/interrupt.h>
+#include <linux/regulator/driver.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+
+#include <soc/qcom/scm.h>
+#include <soc/qcom/clock-pll.h>
+#include <soc/qcom/clock-local2.h>
+#include <soc/qcom/clock-alpha-pll.h>
+
+#include <dt-bindings/clock/msm-clocks-hwio-8998.h>
+#include <dt-bindings/clock/msm-clocks-8998.h>
+
+#include "clock.h"
+
+enum clk_osm_bases {
+	OSM_BASE,
+	PLL_BASE,
+	EFUSE_BASE,
+	ACD_BASE,
+	NUM_BASES,
+};
+
+enum clk_osm_lut_data {
+	FREQ,
+	FREQ_DATA,
+	PLL_OVERRIDES,
+	SPARE_DATA,
+	VIRTUAL_CORNER,
+	NUM_FIELDS,
+};
+
+enum clk_osm_trace_method {
+	XOR_PACKET,
+	PERIODIC_PACKET,
+};
+
+enum clk_osm_trace_packet_id {
+	TRACE_PACKET0,
+	TRACE_PACKET1,
+	TRACE_PACKET2,
+	TRACE_PACKET3,
+};
+
+#define SEQ_REG(n) (0x300 + (n) * 4)
+#define MEM_ACC_SEQ_REG_CFG_START(n) (SEQ_REG(12 + (n)))
+#define MEM_ACC_SEQ_CONST(n) (n)
+#define MEM_ACC_INSTR_COMP(n) (0x67 + ((n) * 0x40))
+#define MEM_ACC_SEQ_REG_VAL_START(n) (SEQ_REG(60 + (n)))
+#define SEQ_REG1_MSM8998_V2 0x1048
+#define VERSION_REG 0x0
+
+#define OSM_TABLE_SIZE 40
+#define MAX_VIRTUAL_CORNER (OSM_TABLE_SIZE - 1)
+#define MAX_CLUSTER_CNT 2
+#define CORE_COUNT_VAL(val) ((val & GENMASK(18, 16)) >> 16)
+#define SINGLE_CORE 1
+#define MAX_CORE_COUNT 4
+#define LLM_SW_OVERRIDE_CNT 3
+#define OSM_SEQ_MINUS_ONE 0xff
+
+#define ENABLE_REG 0x1004
+#define INDEX_REG 0x1150
+#define FREQ_REG 0x1154
+#define VOLT_REG 0x1158
+#define OVERRIDE_REG 0x115C
+#define SPARE_REG 0x1164
+
+#define OSM_CYCLE_COUNTER_CTRL_REG 0x1F00
+#define OSM_CYCLE_COUNTER_STATUS_REG 0x1F04
+#define DCVS_PERF_STATE_DESIRED_REG 0x1F10
+#define DCVS_PERF_STATE_DEVIATION_INTR_STAT 0x1F14
+#define DCVS_PERF_STATE_DEVIATION_INTR_EN 0x1F18
+#define DCVS_PERF_STATE_DEVIATION_INTR_CLEAR 0x1F1C
+#define DCVS_PERF_STATE_DEVIATION_CORRECTED_INTR_STAT 0x1F20
+#define DCVS_PERF_STATE_DEVIATION_CORRECTED_INTR_EN 0x1F24
+#define DCVS_PERF_STATE_DEVIATION_CORRECTED_INTR_CLEAR 0x1F28
+#define DCVS_PERF_STATE_MET_INTR_STAT 0x1F2C
+#define DCVS_PERF_STATE_MET_INTR_EN 0x1F30
+#define DCVS_PERF_STATE_MET_INTR_CLR 0x1F34
+#define OSM_CORE_TABLE_SIZE 8192
+#define OSM_REG_SIZE 32
+
+#define WDOG_DOMAIN_PSTATE_STATUS	0x1c00
+#define WDOG_PROGRAM_COUNTER		0x1c74
+
+#define OSM_CYCLE_COUNTER_USE_XO_EDGE_EN BIT(8)
+#define PLL_MODE		0x0
+#define PLL_L_VAL		0x4
+#define PLL_USER_CTRL		0xC
+#define PLL_CONFIG_CTL_LO	0x10
+#define PLL_TEST_CTL_HI		0x1C
+#define PLL_STATUS		0x2C
+#define PLL_LOCK_DET_MASK	BIT(16)
+#define PLL_WAIT_LOCK_TIME_US	10
+#define PLL_WAIT_LOCK_TIME_NS	(PLL_WAIT_LOCK_TIME_US * 1000)
+#define PLL_MIN_LVAL 43
+#define L_VAL(freq_data)	((freq_data) & GENMASK(7, 0))
+
+#define CC_ZERO_BEHAV_CTRL 0x100C
+#define SPM_CC_DCVS_DISABLE 0x1020
+#define SPM_CC_CTRL 0x1028
+#define SPM_CC_HYSTERESIS 0x101C
+#define SPM_CORE_RET_MAPPING 0x1024
+#define CFG_DELAY_VAL_3 0x12C
+
+#define LLM_FREQ_VOTE_HYSTERESIS 0x102C
+#define LLM_VOLT_VOTE_HYSTERESIS 0x1030
+#define LLM_INTF_DCVS_DISABLE 0x1034
+
+#define ENABLE_OVERRIDE BIT(0)
+
+#define ITM_CL0_DISABLE_CL1_ENABLED 0x2
+#define ITM_CL0_ENABLED_CL1_DISABLE 0x1
+
+#define APM_MX_MODE 0
+#define APM_APC_MODE BIT(1)
+#define APM_MODE_SWITCH_MASK (BVAL(4, 2, 7) | BVAL(1, 0, 3))
+#define APM_MX_MODE_VAL 0
+#define APM_APC_MODE_VAL 0x3
+
+#define GPLL_SEL 0x400
+#define PLL_EARLY_SEL 0x500
+#define PLL_MAIN_SEL 0x300
+#define RCG_UPDATE 0x3
+#define RCG_UPDATE_SUCCESS 0x2
+#define PLL_POST_DIV1 0x1F
+#define PLL_POST_DIV2 0x11F
+
+#define LLM_SW_OVERRIDE_REG 0x1038
+#define VMIN_REDUC_ENABLE_REG 0x103C
+#define VMIN_REDUC_TIMER_REG 0x1040
+#define PDN_FSM_CTRL_REG 0x1070
+#define CC_BOOST_TIMER_REG0 0x1074
+#define CC_BOOST_TIMER_REG1 0x1078
+#define CC_BOOST_TIMER_REG2 0x107C
+#define CC_BOOST_EN_MASK BIT(0)
+#define PS_BOOST_EN_MASK BIT(1)
+#define DCVS_BOOST_EN_MASK BIT(2)
+#define PC_RET_EXIT_DROOP_EN_MASK BIT(3)
+#define WFX_DROOP_EN_MASK BIT(4)
+#define DCVS_DROOP_EN_MASK BIT(5)
+#define LMH_PS_EN_MASK BIT(6)
+#define IGNORE_PLL_LOCK_MASK BIT(15)
+#define SAFE_FREQ_WAIT_NS 5000
+#define DEXT_DECREMENT_WAIT_NS 1000
+#define DCVS_BOOST_TIMER_REG0 0x1084
+#define DCVS_BOOST_TIMER_REG1 0x1088
+#define DCVS_BOOST_TIMER_REG2 0x108C
+#define PS_BOOST_TIMER_REG0 0x1094
+#define PS_BOOST_TIMER_REG1 0x1098
+#define PS_BOOST_TIMER_REG2 0x109C
+#define BOOST_PROG_SYNC_DELAY_REG 0x10A0
+#define DROOP_CTRL_REG 0x10A4
+#define DROOP_RELEASE_TIMER_CTRL 0x10A8
+#define DROOP_PROG_SYNC_DELAY_REG 0x10BC
+#define DROOP_UNSTALL_TIMER_CTRL_REG 0x10AC
+#define DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG 0x10B0
+#define DROOP_WAIT_TO_RELEASE_TIMER_CTRL1_REG 0x10B4
+#define OSM_PLL_SW_OVERRIDE_EN 0x10C0
+
+#define PLL_SW_OVERRIDE_DROOP_EN BIT(0)
+#define DCVS_DROOP_TIMER_CTRL 0x10B8
+#define SEQ_MEM_ADDR 0x500
+#define SEQ_CFG_BR_ADDR 0x170
+#define MAX_INSTRUCTIONS 256
+#define MAX_BR_INSTRUCTIONS 49
+
+#define MAX_MEM_ACC_LEVELS 3
+#define MAX_MEM_ACC_VAL_PER_LEVEL 3
+#define MAX_MEM_ACC_VALUES (MAX_MEM_ACC_LEVELS * \
+			    MAX_MEM_ACC_VAL_PER_LEVEL)
+#define MEM_ACC_APM_READ_MASK 0xff
+
+#define TRACE_CTRL 0x1F38
+#define TRACE_CTRL_EN_MASK BIT(0)
+#define TRACE_CTRL_ENABLE 1
+#define TRACE_CTRL_DISABLE 0
+#define TRACE_CTRL_ENABLE_WDOG_STATUS	BIT(30)
+#define TRACE_CTRL_PACKET_TYPE_MASK BVAL(2, 1, 3)
+#define TRACE_CTRL_PACKET_TYPE_SHIFT 1
+#define TRACE_CTRL_PERIODIC_TRACE_EN_MASK BIT(3)
+#define TRACE_CTRL_PERIODIC_TRACE_ENABLE BIT(3)
+#define PERIODIC_TRACE_TIMER_CTRL 0x1F3C
+#define PERIODIC_TRACE_MIN_NS 1000
+#define PERIODIC_TRACE_MAX_NS 21474836475
+#define PERIODIC_TRACE_DEFAULT_NS 1000000
+
+#define PLL_DD_USER_CTL_LO_ENABLE	0x0f04c408
+#define PLL_DD_USER_CTL_LO_DISABLE	0x1f04c41f
+#define PLL_DD_D0_USER_CTL_LO		0x17916208
+#define PLL_DD_D1_USER_CTL_LO		0x17816208
+
+#define PWRCL_EFUSE_SHIFT	0
+#define PWRCL_EFUSE_MASK	0
+#define PERFCL_EFUSE_SHIFT	29
+#define PERFCL_EFUSE_MASK	0x7
+
+#define MSM8998V1_PWRCL_BOOT_RATE	1478400000
+#define MSM8998V1_PERFCL_BOOT_RATE	1536000000
+#define MSM8998V2_PWRCL_BOOT_RATE	1555200000
+#define MSM8998V2_PERFCL_BOOT_RATE	1728000000
+
+#define DEBUG_REG_NUM		3
+
+/* ACD registers */
+#define ACD_HW_VERSION		0x0
+#define ACDCR			0x4
+#define ACDTD			0x8
+#define ACDSSCR			0x28
+#define ACD_EXTINT_CFG		0x30
+#define ACD_DCVS_SW		0x34
+#define ACD_GFMUX_CFG		0x3c
+#define ACD_READOUT_CFG		0x48
+#define ACD_AUTOXFER_CFG	0x80
+#define ACD_AUTOXFER		0x84
+#define ACD_AUTOXFER_CTL	0x88
+#define ACD_AUTOXFER_STATUS	0x8c
+#define ACD_WRITE_CTL		0x90
+#define ACD_WRITE_STATUS	0x94
+#define ACD_READOUT		0x98
+
+#define ACD_MASTER_ONLY_REG_ADDR	0x80
+#define ACD_WRITE_CTL_UPDATE_EN		BIT(0)
+#define ACD_WRITE_CTL_SELECT_SHIFT	1
+#define ACD_GFMUX_CFG_SELECT		BIT(0)
+#define ACD_AUTOXFER_START_CLEAR	0
+#define ACD_AUTOXFER_START_SET		BIT(0)
+#define AUTO_XFER_DONE_MASK		BIT(0)
+#define ACD_DCVS_SW_DCVS_IN_PRGR_SET	BIT(0)
+#define ACD_DCVS_SW_DCVS_IN_PRGR_CLEAR	0
+#define ACD_LOCAL_TRANSFER_TIMEOUT_NS   500
+
+static void __iomem *virt_base;
+static void __iomem *debug_base;
+
+#define lmh_lite_clk_src_source_val 1
+
+#define ACD_REG_RELATIVE_ADDR(addr) (addr / 4)
+#define ACD_REG_RELATIVE_ADDR_BITMASK(addr) \
+			(1 << (ACD_REG_RELATIVE_ADDR(addr)))
+
+#define FIXDIV(div) (div ? (2 * (div) - 1) : (0))
+
+#define F(f, s, div, m, n) \
+	{ \
+		.freq_hz = (f), \
+		.src_clk = &s.c, \
+		.m_val = (m), \
+		.n_val = ~((n)-(m)) * !!(n), \
+		.d_val = ~(n),\
+		.div_src_val = BVAL(4, 0, (int)FIXDIV(div)) \
+			| BVAL(10, 8, s##_source_val), \
+	}
+
+static u32 seq_instr[] = {
+	0xc2005000, 0x2c9e3b21, 0xc0ab2cdc, 0xc2882525, 0x359dc491,
+	0x700a500b, 0x5001aefc, 0xaefd7000, 0x390938c8, 0xcb44c833,
+	0xce56cd54, 0x341336e0, 0xa4baadba, 0xb480a493, 0x10004000,
+	0x70005001, 0x1000500c, 0xc792c5a1, 0x501625e1, 0x3da335a2,
+	0x50170006, 0x50150006, 0x1000c633, 0x1000acb3, 0xc422acb4,
+	0xaefc1000, 0x700a500b, 0x70005001, 0x5010aefd, 0x5012700b,
+	0xad41700c, 0x84e5adb9, 0xb3808566, 0x239b0003, 0x856484e3,
+	0xb9800007, 0x2bad0003, 0xac3aa20b, 0x0003181b, 0x0003bb40,
+	0xa30d239b, 0x500c181b, 0x5011500f, 0x181b3413, 0x853984b9,
+	0x0003bd80, 0xa0012ba4, 0x72050803, 0x500e1000, 0x500c1000,
+	0x1c011c0a, 0x3b181c06, 0x1c073b43, 0x1c061000, 0x1c073983,
+	0x1c02500c, 0x10001c0a, 0x70015002, 0x81031000, 0x70025003,
+	0x70035004, 0x3b441000, 0x81553985, 0x70025003, 0x50054003,
+	0xa1467009, 0x0003b1c0, 0x4005238b, 0x835a1000, 0x855c84db,
+	0x1000a51f, 0x84de835d, 0xa52c855c, 0x50061000, 0x39cd3a4c,
+	0x3ad03a8f, 0x10004006, 0x70065007, 0xa00f2c12, 0x08034007,
+	0xaefc7205, 0xaefd700d, 0xa9641000, 0x40071c1a, 0x700daefc,
+	0x1000aefd, 0x70065007, 0x50101c16, 0x40075012, 0x700daefc,
+	0x2411aefd, 0xa8211000, 0x0803a00f, 0x500c7005, 0x1c1591e0,
+	0x500f5014, 0x10005011, 0x500c2bd4, 0x0803a00f, 0x10007205,
+	0xa00fa9d1, 0x0803a821, 0xa9d07005, 0x91e0500c, 0x500f1c15,
+	0x10005011, 0x1c162bce, 0x50125010, 0xa022a82a, 0x70050803,
+	0x1c1591df, 0x5011500f, 0x5014500c, 0x0803a00f, 0x10007205,
+	0x501391a4, 0x22172217, 0x70075008, 0xa9634008, 0x1c1a0006,
+	0x70085009, 0x10004009, 0x00008ed9, 0x3e05c8dd, 0x1c033604,
+	0xabaf1000, 0x856284e1, 0x0003bb80, 0x1000239f, 0x0803a037,
+	0x10007205, 0x8dc61000, 0x38a71c2a, 0x1c2a8dc4, 0x100038a6,
+	0x1c2a8dc5, 0x8dc73867, 0x38681c2a, 0x8c491000, 0x8d4b8cca,
+	0x10001c00, 0x8ccd8c4c, 0x1c008d4e, 0x8c4f1000, 0x8d518cd0,
+	0x10001c00, 0xa759a79a, 0x1000a718, 0xbf80af9b, 0x00001000,
+};
+
+static u32 seq_br_instr[] = {
+	0x248, 0x20e, 0x21c, 0xf6, 0x112,
+	0x11c, 0xe4, 0xea, 0xc6, 0xd6,
+	0x126, 0x108, 0x184, 0x1a8, 0x1b0,
+	0x134, 0x158, 0x16e, 0x14a, 0xc2,
+	0x190, 0x1d2, 0x1cc, 0x1d4, 0x1e8,
+	0x0, 0x1f6, 0x32, 0x66, 0xb0,
+	0xa6, 0x1fc, 0x3c, 0x44, 0x5c,
+	0x60, 0x204, 0x30, 0x22a, 0x234,
+	0x23e, 0x0, 0x250, 0x0, 0x0, 0x9a,
+	0x20c,
+};
+
+DEFINE_EXT_CLK(xo_ao, NULL);
+DEFINE_EXT_CLK(sys_apcsaux_clk_gcc, NULL);
+DEFINE_EXT_CLK(lmh_lite_clk_src, NULL);
+
+struct osm_entry {
+	u16 virtual_corner;
+	u16 open_loop_volt;
+	u32 freq_data;
+	u32 override_data;
+	u32 spare_data;
+	long frequency;
+};
+
+const char *clk_panic_reg_names[] = {"WDOG_DOMAIN_PSTATE_STATUS",
+				     "WDOG_PROGRAM_COUNTER",
+				     "APM_STATUS"};
+const int clk_panic_reg_offsets[] = {WDOG_DOMAIN_PSTATE_STATUS,
+				     WDOG_PROGRAM_COUNTER};
+
+static struct dentry *osm_debugfs_base;
+
+struct clk_osm {
+	struct clk c;
+	struct osm_entry osm_table[OSM_TABLE_SIZE];
+	struct dentry *debugfs;
+	struct regulator *vdd_reg;
+	struct platform_device *vdd_dev;
+	void *vbases[NUM_BASES];
+	unsigned long pbases[NUM_BASES];
+	void __iomem *debug_regs[DEBUG_REG_NUM];
+	spinlock_t lock;
+
+	u32 cpu_reg_mask;
+	u32 num_entries;
+	u32 cluster_num;
+	u32 irq;
+	u32 apm_crossover_vc;
+	u32 apm_threshold_pre_vc;
+	u32 apm_threshold_vc;
+	u32 mem_acc_crossover_vc;
+	u32 mem_acc_threshold_pre_vc;
+	u32 mem_acc_threshold_vc;
+	u32 cycle_counter_reads;
+	u32 cycle_counter_delay;
+	u32 cycle_counter_factor;
+	u64 total_cycle_counter;
+	u32 prev_cycle_counter;
+	u32 l_val_base;
+	u32 apcs_itm_present;
+	u32 apcs_cfg_rcgr;
+	u32 apcs_cmd_rcgr;
+	u32 apcs_pll_user_ctl;
+	u32 apcs_mem_acc_cfg[MAX_MEM_ACC_VAL_PER_LEVEL];
+	u32 apcs_mem_acc_val[MAX_MEM_ACC_VALUES];
+	u32 llm_sw_overr[LLM_SW_OVERRIDE_CNT];
+	u32 apm_mode_ctl;
+	u32 apm_ctrl_status;
+	u32 osm_clk_rate;
+	u32 xo_clk_rate;
+	u32 acd_td;
+	u32 acd_cr;
+	u32 acd_sscr;
+	u32 acd_extint0_cfg;
+	u32 acd_extint1_cfg;
+	u32 acd_autoxfer_ctl;
+	u32 acd_debugfs_addr;
+	u32 acd_debugfs_addr_size;
+	bool acd_init;
+	bool secure_init;
+	bool red_fsm_en;
+	bool boost_fsm_en;
+	bool safe_fsm_en;
+	bool ps_fsm_en;
+	bool droop_fsm_en;
+	bool wfx_fsm_en;
+	bool pc_fsm_en;
+
+	enum clk_osm_trace_method trace_method;
+	enum clk_osm_trace_packet_id trace_id;
+	struct notifier_block panic_notifier;
+	u32 trace_periodic_timer;
+	bool trace_en;
+	bool wdog_trace_en;
+};
+
+static bool msm8998_v1;
+static bool msm8998_v2;
+
+static inline void clk_osm_masked_write_reg(struct clk_osm *c, u32 val,
+					    u32 offset, u32 mask)
+{
+	u32 val2, orig_val;
+
+	val2 = orig_val = readl_relaxed((char *)c->vbases[OSM_BASE] + offset);
+	val2 &= ~mask;
+	val2 |= val & mask;
+
+	if (val2 != orig_val)
+		writel_relaxed(val2, (char *)c->vbases[OSM_BASE] + offset);
+}
+
+static inline void clk_osm_write_reg(struct clk_osm *c, u32 val, u32 offset)
+{
+	writel_relaxed(val, (char *)c->vbases[OSM_BASE] + offset);
+}
+
+static inline int clk_osm_read_reg(struct clk_osm *c, u32 offset)
+{
+	return readl_relaxed((char *)c->vbases[OSM_BASE] + offset);
+}
+
+static inline int clk_osm_read_reg_no_log(struct clk_osm *c, u32 offset)
+{
+	return readl_relaxed_no_log((char *)c->vbases[OSM_BASE] + offset);
+}
+
+static inline int clk_osm_mb(struct clk_osm *c, int base)
+{
+	return readl_relaxed_no_log((char *)c->vbases[base] + VERSION_REG);
+}
+
+static inline int clk_osm_acd_mb(struct clk_osm *c)
+{
+	return readl_relaxed_no_log((char *)c->vbases[ACD_BASE] +
+				    ACD_HW_VERSION);
+}
+
+static inline void clk_osm_acd_master_write_reg(struct clk_osm *c,
+						u32 val, u32 offset)
+{
+	writel_relaxed(val, (char *)c->vbases[ACD_BASE] + offset);
+}
+
+static int clk_osm_acd_local_read_reg(struct clk_osm *c, u32 offset)
+{
+	u32 reg = 0;
+	int timeout;
+
+	if (offset >= ACD_MASTER_ONLY_REG_ADDR) {
+		pr_err("ACD register at offset=0x%x not locally readable\n",
+		       offset);
+		return -EINVAL;
+	}
+
+	/* Set select field in read control register */
+	writel_relaxed(ACD_REG_RELATIVE_ADDR(offset),
+		       (char *)c->vbases[ACD_BASE] + ACD_READOUT_CFG);
+
+	/* Clear write control register */
+	writel_relaxed(reg, (char *)c->vbases[ACD_BASE] + ACD_WRITE_CTL);
+
+	/* Set select and update_en fields in write control register */
+	reg = (ACD_REG_RELATIVE_ADDR(ACD_READOUT_CFG)
+	       << ACD_WRITE_CTL_SELECT_SHIFT)
+		| ACD_WRITE_CTL_UPDATE_EN;
+	writel_relaxed(reg, (char *)c->vbases[ACD_BASE] + ACD_WRITE_CTL);
+
+	/* Ensure writes complete before polling */
+	clk_osm_acd_mb(c);
+
+	/* Poll write status register */
+	for (timeout = ACD_LOCAL_TRANSFER_TIMEOUT_NS; timeout > 0;
+	     timeout -= 100) {
+		reg = readl_relaxed((char *)c->vbases[ACD_BASE]
+				    + ACD_WRITE_STATUS);
+		if ((reg & (ACD_REG_RELATIVE_ADDR_BITMASK(ACD_READOUT_CFG))))
+			break;
+		ndelay(100);
+	}
+
+	if (!timeout) {
+		pr_err("local read timed out, offset=0x%x status=0x%x\n",
+		       offset, reg);
+		return -ETIMEDOUT;
+	}
+
+	reg = readl_relaxed((char *)c->vbases[ACD_BASE]
+			    + ACD_READOUT);
+	return reg;
+}
+
+static int clk_osm_acd_local_write_reg(struct clk_osm *c, u32 val, u32 offset)
+{
+	u32 reg = 0;
+	int timeout;
+
+	if (offset >= ACD_MASTER_ONLY_REG_ADDR) {
+		pr_err("ACD register at offset=0x%x not transferrable\n",
+		       offset);
+		return -EINVAL;
+	}
+
+	/* Clear write control register */
+	writel_relaxed(reg, (char *)c->vbases[ACD_BASE] + ACD_WRITE_CTL);
+
+	/* Set select and update_en fields in write control register */
+	reg = (ACD_REG_RELATIVE_ADDR(offset) << ACD_WRITE_CTL_SELECT_SHIFT)
+		| ACD_WRITE_CTL_UPDATE_EN;
+	writel_relaxed(reg, (char *)c->vbases[ACD_BASE] + ACD_WRITE_CTL);
+
+	/* Ensure writes complete before polling */
+	clk_osm_acd_mb(c);
+
+	/* Poll write status register */
+	for (timeout = ACD_LOCAL_TRANSFER_TIMEOUT_NS; timeout > 0;
+	     timeout -= 100) {
+		reg = readl_relaxed((char *)c->vbases[ACD_BASE]
+				    + ACD_WRITE_STATUS);
+		if ((reg & (ACD_REG_RELATIVE_ADDR_BITMASK(offset))))
+			break;
+		ndelay(100);
+	}
+
+	if (!timeout) {
+		pr_err("local write timed out, offset=0x%x val=0x%x status=0x%x\n",
+		       offset, val, reg);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int clk_osm_acd_master_write_through_reg(struct clk_osm *c,
+						u32 val, u32 offset)
+{
+	writel_relaxed(val, (char *)c->vbases[ACD_BASE] + offset);
+
+	/* Ensure writes complete before transfer to local copy */
+	clk_osm_acd_mb(c);
+
+	return clk_osm_acd_local_write_reg(c, val, offset);
+}
+
+static int clk_osm_acd_auto_local_write_reg(struct clk_osm *c, u32 mask)
+{
+	u32 numregs, bitmask = mask;
+	u32 reg = 0;
+	int timeout;
+
+	/* count number of bits set in register mask */
+	for (numregs = 0; bitmask; numregs++)
+		bitmask &= bitmask - 1;
+
+	/* Program auto-transfter mask */
+	writel_relaxed(mask, (char *)c->vbases[ACD_BASE] + ACD_AUTOXFER_CFG);
+
+	/* Clear start field in auto-transfer register */
+	writel_relaxed(ACD_AUTOXFER_START_CLEAR,
+		       (char *)c->vbases[ACD_BASE] + ACD_AUTOXFER);
+
+	/* Set start field in auto-transfer register */
+	writel_relaxed(ACD_AUTOXFER_START_SET,
+		       (char *)c->vbases[ACD_BASE] + ACD_AUTOXFER);
+
+	/* Ensure writes complete before polling */
+	clk_osm_acd_mb(c);
+
+	/* Poll auto-transfer status register */
+	for (timeout = ACD_LOCAL_TRANSFER_TIMEOUT_NS * numregs;
+	     timeout > 0; timeout -= 100) {
+		reg = readl_relaxed((char *)c->vbases[ACD_BASE]
+				    + ACD_AUTOXFER_STATUS);
+		if (reg & AUTO_XFER_DONE_MASK)
+			break;
+		ndelay(100);
+	}
+
+	if (!timeout) {
+		pr_err("local register auto-transfer timed out, mask=0x%x registers=%d status=0x%x\n",
+		       mask, numregs, reg);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int clk_osm_acd_init(struct clk_osm *c)
+{
+
+	int rc = 0;
+	u32 auto_xfer_mask = 0;
+
+	if (!c->acd_init)
+		return 0;
+
+	c->acd_debugfs_addr = ACD_HW_VERSION;
+
+	/* Program ACD tunable-length delay register */
+	clk_osm_acd_master_write_reg(c, c->acd_td, ACDTD);
+	auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACDTD);
+
+	/* Program ACD control register */
+	clk_osm_acd_master_write_reg(c, c->acd_cr, ACDCR);
+	auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACDCR);
+
+	/* Program ACD soft start control register */
+	clk_osm_acd_master_write_reg(c, c->acd_sscr, ACDSSCR);
+	auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACDSSCR);
+
+	/* Program initial ACD external interface configuration register */
+	clk_osm_acd_master_write_reg(c, c->acd_extint0_cfg, ACD_EXTINT_CFG);
+	auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACD_EXTINT_CFG);
+
+	/* Program ACD auto-register transfer control register */
+	clk_osm_acd_master_write_reg(c, c->acd_autoxfer_ctl, ACD_AUTOXFER_CTL);
+
+	/* Ensure writes complete before transfers to local copy */
+	clk_osm_acd_mb(c);
+
+	/* Transfer master copies */
+	rc = clk_osm_acd_auto_local_write_reg(c, auto_xfer_mask);
+	if (rc)
+		return rc;
+
+	/* Switch CPUSS clock source to ACD clock */
+	rc = clk_osm_acd_master_write_through_reg(c, ACD_GFMUX_CFG_SELECT,
+						  ACD_GFMUX_CFG);
+	if (rc)
+		return rc;
+
+	/* Program ACD_DCVS_SW */
+	rc = clk_osm_acd_master_write_through_reg(c,
+				  ACD_DCVS_SW_DCVS_IN_PRGR_SET,
+				  ACD_DCVS_SW);
+	if (rc)
+		return rc;
+
+	rc = clk_osm_acd_master_write_through_reg(c,
+				  ACD_DCVS_SW_DCVS_IN_PRGR_CLEAR,
+				  ACD_DCVS_SW);
+	if (rc)
+		return rc;
+
+	udelay(1);
+
+	/* Program final ACD external interface configuration register */
+	rc = clk_osm_acd_master_write_through_reg(c, c->acd_extint1_cfg,
+						  ACD_EXTINT_CFG);
+	if (rc)
+		return rc;
+
+	/*
+	 * ACDCR, ACDTD, ACDSSCR, ACD_EXTINT_CFG, ACD_GFMUX_CFG
+	 * must be copied from master to local copy on PC exit.
+	 */
+	auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACD_GFMUX_CFG);
+	clk_osm_acd_master_write_reg(c, auto_xfer_mask, ACD_AUTOXFER_CFG);
+
+	/* ACD has been initialized and enabled for this cluster */
+	c->acd_init = false;
+	return 0;
+}
+
+static inline int clk_osm_count_ns(struct clk_osm *c, u64 nsec)
+{
+	u64 temp;
+
+	temp = (u64)c->osm_clk_rate * nsec;
+	do_div(temp, 1000000000);
+
+	return temp;
+}
+
+static inline struct clk_osm *to_clk_osm(struct clk *c)
+{
+	return container_of(c, struct clk_osm, c);
+}
+
+static enum handoff clk_osm_handoff(struct clk *c)
+{
+	return HANDOFF_DISABLED_CLK;
+}
+
+static long clk_osm_list_rate(struct clk *c, unsigned n)
+{
+	if (n >= c->num_fmax)
+		return -ENXIO;
+	return c->fmax[n];
+}
+
+static long clk_osm_round_rate(struct clk *c, unsigned long rate)
+{
+	int i;
+	unsigned long rrate = 0;
+
+	/*
+	 * If the rate passed in is 0, return the first frequency in
+	 * the FMAX table.
+	 */
+	if (!rate)
+		return c->fmax[0];
+
+	for (i = 0; i < c->num_fmax; i++) {
+		if (is_better_rate(rate, rrate, c->fmax[i])) {
+			rrate = c->fmax[i];
+			if (rrate == rate)
+				break;
+		}
+	}
+
+	return rrate;
+}
+
+static int clk_osm_search_table(struct osm_entry *table, int entries, long rate)
+{
+	int quad_core_index, single_core_index = 0;
+	int core_count;
+
+	for (quad_core_index = 0; quad_core_index < entries;
+	     quad_core_index++) {
+		core_count =
+			CORE_COUNT_VAL(table[quad_core_index].freq_data);
+		if (rate == table[quad_core_index].frequency &&
+		    core_count == SINGLE_CORE) {
+			single_core_index = quad_core_index;
+			continue;
+		}
+		if (rate == table[quad_core_index].frequency &&
+		    core_count == MAX_CORE_COUNT)
+			return quad_core_index;
+	}
+	if (single_core_index)
+		return single_core_index;
+
+	return -EINVAL;
+}
+
+static int clk_osm_set_rate(struct clk *c, unsigned long rate)
+{
+	struct clk_osm *cpuclk = to_clk_osm(c);
+	int index = 0;
+	unsigned long r_rate;
+
+	r_rate = clk_osm_round_rate(c, rate);
+
+	if (rate != r_rate) {
+		pr_err("invalid rate requested rate=%ld\n", rate);
+		return -EINVAL;
+	}
+
+	/* Convert rate to table index */
+	index = clk_osm_search_table(cpuclk->osm_table,
+				     cpuclk->num_entries, r_rate);
+	if (index < 0) {
+		pr_err("cannot set cluster %u to %lu\n",
+		       cpuclk->cluster_num, rate);
+		return -EINVAL;
+	}
+	pr_debug("rate: %lu --> index %d\n", rate, index);
+
+	if (cpuclk->llm_sw_overr[0]) {
+		clk_osm_write_reg(cpuclk, cpuclk->llm_sw_overr[0],
+				  LLM_SW_OVERRIDE_REG);
+		clk_osm_write_reg(cpuclk, cpuclk->llm_sw_overr[1],
+				  LLM_SW_OVERRIDE_REG);
+		udelay(1);
+	}
+
+	/* Choose index and send request to OSM hardware */
+	clk_osm_write_reg(cpuclk, index, DCVS_PERF_STATE_DESIRED_REG);
+
+	if (cpuclk->llm_sw_overr[0]) {
+		udelay(1);
+		clk_osm_write_reg(cpuclk, cpuclk->llm_sw_overr[2],
+				  LLM_SW_OVERRIDE_REG);
+	}
+
+	/* Make sure the write goes through before proceeding */
+	clk_osm_mb(cpuclk, OSM_BASE);
+
+	return 0;
+}
+
+static int clk_osm_enable(struct clk *c)
+{
+	struct clk_osm *cpuclk = to_clk_osm(c);
+	int rc;
+
+	rc = clk_osm_acd_init(cpuclk);
+	if (rc) {
+		pr_err("Failed to initialize ACD for cluster %d, rc=%d\n",
+						cpuclk->cluster_num, rc);
+		return rc;
+	}
+
+	/* Wait for 5 usecs before enabling OSM */
+	udelay(5);
+
+	clk_osm_write_reg(cpuclk, 1, ENABLE_REG);
+
+	/* Make sure the write goes through before proceeding */
+	clk_osm_mb(cpuclk, OSM_BASE);
+
+	/* Wait for 5us for OSM hardware to enable */
+	udelay(5);
+
+	pr_debug("OSM clk enabled for cluster=%d\n", cpuclk->cluster_num);
+
+	return 0;
+}
+
+static struct clk_ops clk_ops_cpu_osm = {
+	.enable = clk_osm_enable,
+	.set_rate = clk_osm_set_rate,
+	.round_rate = clk_osm_round_rate,
+	.list_rate = clk_osm_list_rate,
+	.handoff = clk_osm_handoff,
+};
+
+static struct regulator *vdd_pwrcl;
+static struct regulator *vdd_perfcl;
+
+static struct clk_freq_tbl ftbl_osm_clk_src[] = {
+	F(  200000000,    lmh_lite_clk_src,    1.5,    0,     0),
+	F_END
+};
+
+static struct rcg_clk osm_clk_src = {
+	.cmd_rcgr_reg = APCS_COMMON_LMH_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_osm_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_base,
+	.c = {
+		.dbg_name = "osm_clk_src",
+		.ops = &clk_ops_rcg,
+		CLK_INIT(osm_clk_src.c),
+	},
+};
+
+static struct clk_osm pwrcl_clk = {
+	.cluster_num = 0,
+	.cpu_reg_mask = 0x3,
+	.c = {
+		.dbg_name = "pwrcl_clk",
+		.ops = &clk_ops_cpu_osm,
+		.parent = &xo_ao.c,
+		CLK_INIT(pwrcl_clk.c),
+	},
+};
+
+static struct clk_osm perfcl_clk = {
+	.cluster_num = 1,
+	.cpu_reg_mask = 0x103,
+	.c = {
+		.dbg_name = "perfcl_clk",
+		.ops = &clk_ops_cpu_osm,
+		.parent = &xo_ao.c,
+		CLK_INIT(perfcl_clk.c),
+	},
+};
+
+static struct clk_ops clk_ops_cpu_dbg_mux;
+
+static struct mux_clk cpu_debug_mux = {
+	.offset = 0x0,
+	.mask = 0x3,
+	.shift = 8,
+	.ops = &mux_reg_ops,
+	MUX_SRC_LIST(
+		{ &pwrcl_clk.c, 0x00 },
+		{ &perfcl_clk.c, 0x01 },
+	),
+	.base = &debug_base,
+	.c = {
+		.dbg_name = "cpu_debug_mux",
+		.ops = &clk_ops_cpu_dbg_mux,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(cpu_debug_mux.c),
+	},
+};
+
+static struct clk_lookup cpu_clocks_osm[] = {
+	CLK_LIST(pwrcl_clk),
+	CLK_LIST(perfcl_clk),
+	CLK_LIST(sys_apcsaux_clk_gcc),
+	CLK_LIST(xo_ao),
+	CLK_LIST(osm_clk_src),
+	CLK_LIST(cpu_debug_mux),
+};
+
+static unsigned long cpu_dbg_mux_get_rate(struct clk *clk)
+{
+	/* Account for the divider between the clock and the debug mux */
+	if (!strcmp(clk->parent->dbg_name, "pwrcl_clk"))
+		return clk->rate/4;
+	else if (!strcmp(clk->parent->dbg_name, "perfcl_clk"))
+		return clk->rate/8;
+	return clk->rate;
+}
+
+static void clk_osm_print_osm_table(struct clk_osm *c)
+{
+	int i;
+	struct osm_entry *table = c->osm_table;
+	u32 pll_src, pll_div, lval, core_count;
+
+	pr_debug("Index, Frequency, VC, OLV (mv), Core Count, PLL Src, PLL Div, L-Val, ACC Level\n");
+	for (i = 0; i < c->num_entries; i++) {
+		pll_src = (table[i].freq_data & GENMASK(27, 26)) >> 26;
+		pll_div = (table[i].freq_data & GENMASK(25, 24)) >> 24;
+		lval = L_VAL(table[i].freq_data);
+		core_count = (table[i].freq_data & GENMASK(18, 16)) >> 16;
+
+		pr_debug("%3d, %11lu, %2u, %5u, %2u, %6u, %8u, %7u, %5u\n",
+			i,
+			table[i].frequency,
+			table[i].virtual_corner,
+			table[i].open_loop_volt,
+			core_count,
+			pll_src,
+			pll_div,
+			lval,
+			table[i].spare_data);
+	}
+	pr_debug("APM threshold corner=%d, crossover corner=%d\n",
+		 c->apm_threshold_vc, c->apm_crossover_vc);
+	pr_debug("MEM-ACC threshold corner=%d, crossover corner=%d\n",
+		 c->mem_acc_threshold_vc, c->mem_acc_crossover_vc);
+}
+
+static int clk_osm_get_lut(struct platform_device *pdev,
+			   struct clk_osm *c, char *prop_name)
+{
+	struct clk *clk = &c->c;
+	struct device_node *of = pdev->dev.of_node;
+	int prop_len, total_elems, num_rows, i, j, k;
+	int rc = 0;
+	u32 *array;
+	u32 *fmax_temp;
+	u32 data;
+	bool last_entry = false;
+	unsigned long abs_fmax = 0;
+
+	if (!of_find_property(of, prop_name, &prop_len)) {
+		dev_err(&pdev->dev, "missing %s\n", prop_name);
+		return -EINVAL;
+	}
+
+	total_elems = prop_len / sizeof(u32);
+	if (total_elems % NUM_FIELDS) {
+		dev_err(&pdev->dev, "bad length %d\n", prop_len);
+		return -EINVAL;
+	}
+
+	num_rows = total_elems / NUM_FIELDS;
+
+	fmax_temp = devm_kzalloc(&pdev->dev, num_rows * sizeof(unsigned long),
+				 GFP_KERNEL);
+	if (!fmax_temp)
+		return -ENOMEM;
+
+	array = devm_kzalloc(&pdev->dev, prop_len, GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(of, prop_name, array, total_elems);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to parse OSM table, rc=%d\n", rc);
+		goto exit;
+	}
+
+	pr_debug("%s: Entries in Table: %d\n", __func__, num_rows);
+	c->num_entries = num_rows;
+	if (c->num_entries > OSM_TABLE_SIZE) {
+		pr_err("LUT entries %d exceed maximum size %d\n",
+		       c->num_entries, OSM_TABLE_SIZE);
+		return -EINVAL;
+	}
+
+	for (i = 0, j = 0, k = 0; j < OSM_TABLE_SIZE; j++) {
+		c->osm_table[j].frequency = array[i + FREQ];
+		c->osm_table[j].freq_data = array[i + FREQ_DATA];
+		c->osm_table[j].override_data = array[i + PLL_OVERRIDES];
+		c->osm_table[j].spare_data = array[i + SPARE_DATA];
+		/* Voltage corners are 0 based in the OSM LUT */
+		c->osm_table[j].virtual_corner = array[i + VIRTUAL_CORNER] - 1;
+		pr_debug("index=%d freq=%ld virtual_corner=%d freq_data=0x%x override_data=0x%x spare_data=0x%x\n",
+			 j, c->osm_table[j].frequency,
+			 c->osm_table[j].virtual_corner,
+			 c->osm_table[j].freq_data,
+			 c->osm_table[j].override_data,
+			 c->osm_table[j].spare_data);
+
+		data = (array[i + FREQ_DATA] & GENMASK(18, 16)) >> 16;
+		if (!last_entry && data == MAX_CORE_COUNT) {
+			fmax_temp[k] = array[i];
+			k++;
+		}
+
+		if (i < total_elems - NUM_FIELDS)
+			i += NUM_FIELDS;
+		else {
+			abs_fmax = array[i];
+			last_entry = true;
+		}
+	}
+
+	fmax_temp[k++] = abs_fmax;
+	clk->fmax = devm_kzalloc(&pdev->dev, k * sizeof(unsigned long),
+				 GFP_KERNEL);
+	if (!clk->fmax) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	for (i = 0; i < k; i++)
+		clk->fmax[i] = fmax_temp[i];
+
+	clk->num_fmax = k;
+exit:
+	devm_kfree(&pdev->dev, fmax_temp);
+	devm_kfree(&pdev->dev, array);
+	return rc;
+}
+
+static int clk_osm_parse_dt_configs(struct platform_device *pdev)
+{
+	struct device_node *of = pdev->dev.of_node;
+	u32 *array;
+	int i, rc = 0;
+
+	array = devm_kzalloc(&pdev->dev, MAX_CLUSTER_CNT * sizeof(u32),
+			     GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(of, "qcom,l-val-base",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,l-val-base property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	pwrcl_clk.l_val_base = array[pwrcl_clk.cluster_num];
+	perfcl_clk.l_val_base = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,apcs-itm-present",
+				  array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,apcs-itm-present property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	pwrcl_clk.apcs_itm_present = array[pwrcl_clk.cluster_num];
+	perfcl_clk.apcs_itm_present = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,apcs-cfg-rcgr",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,apcs-cfg-rcgr property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	pwrcl_clk.apcs_cfg_rcgr = array[pwrcl_clk.cluster_num];
+	perfcl_clk.apcs_cfg_rcgr = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,apcs-cmd-rcgr",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,apcs-cmd-rcgr property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	pwrcl_clk.apcs_cmd_rcgr = array[pwrcl_clk.cluster_num];
+	perfcl_clk.apcs_cmd_rcgr = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,apcs-pll-user-ctl",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,apcs-pll-user-ctl property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	pwrcl_clk.apcs_pll_user_ctl = array[pwrcl_clk.cluster_num];
+	perfcl_clk.apcs_pll_user_ctl = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,apm-mode-ctl",
+				  array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,apm-mode-ctl property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	pwrcl_clk.apm_mode_ctl = array[pwrcl_clk.cluster_num];
+	perfcl_clk.apm_mode_ctl = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,apm-ctrl-status",
+				  array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,apm-ctrl-status property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	pwrcl_clk.apm_ctrl_status = array[pwrcl_clk.cluster_num];
+	perfcl_clk.apm_ctrl_status = array[perfcl_clk.cluster_num];
+
+	for (i = 0; i < LLM_SW_OVERRIDE_CNT; i++)
+		of_property_read_u32_index(of, "qcom,llm-sw-overr",
+					   pwrcl_clk.cluster_num *
+					   LLM_SW_OVERRIDE_CNT + i,
+					   &pwrcl_clk.llm_sw_overr[i]);
+
+	for (i = 0; i < LLM_SW_OVERRIDE_CNT; i++)
+		of_property_read_u32_index(of, "qcom,llm-sw-overr",
+					   perfcl_clk.cluster_num *
+					   LLM_SW_OVERRIDE_CNT + i,
+					   &perfcl_clk.llm_sw_overr[i]);
+
+	if (pwrcl_clk.acd_init || perfcl_clk.acd_init) {
+		rc = of_property_read_u32_array(of, "qcom,acdtd-val",
+						array, MAX_CLUSTER_CNT);
+		if (rc) {
+			dev_err(&pdev->dev, "unable to find qcom,acdtd-val property, rc=%d\n",
+				rc);
+			return -EINVAL;
+		}
+
+		pwrcl_clk.acd_td = array[pwrcl_clk.cluster_num];
+		perfcl_clk.acd_td = array[perfcl_clk.cluster_num];
+
+		rc = of_property_read_u32_array(of, "qcom,acdcr-val",
+						array, MAX_CLUSTER_CNT);
+		if (rc) {
+			dev_err(&pdev->dev, "unable to find qcom,acdcr-val property, rc=%d\n",
+				rc);
+			return -EINVAL;
+		}
+
+		pwrcl_clk.acd_cr = array[pwrcl_clk.cluster_num];
+		perfcl_clk.acd_cr = array[perfcl_clk.cluster_num];
+
+		rc = of_property_read_u32_array(of, "qcom,acdsscr-val",
+						array, MAX_CLUSTER_CNT);
+		if (rc) {
+			dev_err(&pdev->dev, "unable to find qcom,acdsscr-val property, rc=%d\n",
+				rc);
+			return -EINVAL;
+		}
+
+		pwrcl_clk.acd_sscr = array[pwrcl_clk.cluster_num];
+		perfcl_clk.acd_sscr = array[perfcl_clk.cluster_num];
+
+		rc = of_property_read_u32_array(of, "qcom,acdextint0-val",
+						array, MAX_CLUSTER_CNT);
+		if (rc) {
+			dev_err(&pdev->dev, "unable to find qcom,acdextint0-val property, rc=%d\n",
+				rc);
+			return -EINVAL;
+		}
+
+		pwrcl_clk.acd_extint0_cfg = array[pwrcl_clk.cluster_num];
+		perfcl_clk.acd_extint0_cfg = array[perfcl_clk.cluster_num];
+
+		rc = of_property_read_u32_array(of, "qcom,acdextint1-val",
+						array, MAX_CLUSTER_CNT);
+		if (rc) {
+			dev_err(&pdev->dev, "unable to find qcom,acdextint1-val property, rc=%d\n",
+				rc);
+			return -EINVAL;
+		}
+
+		pwrcl_clk.acd_extint1_cfg = array[pwrcl_clk.cluster_num];
+		perfcl_clk.acd_extint1_cfg = array[perfcl_clk.cluster_num];
+
+		rc = of_property_read_u32_array(of, "qcom,acdautoxfer-val",
+						array, MAX_CLUSTER_CNT);
+		if (rc) {
+			dev_err(&pdev->dev, "unable to find qcom,acdautoxfer-val property, rc=%d\n",
+				rc);
+			return -EINVAL;
+		}
+
+		pwrcl_clk.acd_autoxfer_ctl = array[pwrcl_clk.cluster_num];
+		perfcl_clk.acd_autoxfer_ctl = array[perfcl_clk.cluster_num];
+	}
+
+	rc = of_property_read_u32(of, "qcom,xo-clk-rate",
+				  &pwrcl_clk.xo_clk_rate);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,xo-clk-rate property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	perfcl_clk.xo_clk_rate = pwrcl_clk.xo_clk_rate;
+
+	rc = of_property_read_u32(of, "qcom,osm-clk-rate",
+				  &pwrcl_clk.osm_clk_rate);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,osm-clk-rate property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+	perfcl_clk.osm_clk_rate = pwrcl_clk.osm_clk_rate;
+
+	rc = of_property_read_u32(of, "qcom,cc-reads",
+				  &pwrcl_clk.cycle_counter_reads);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,cc-reads property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+	perfcl_clk.cycle_counter_reads = pwrcl_clk.cycle_counter_reads;
+
+	rc = of_property_read_u32(of, "qcom,cc-delay",
+				  &pwrcl_clk.cycle_counter_delay);
+	if (rc)
+		dev_dbg(&pdev->dev, "no delays between cycle counter reads\n");
+	else
+		perfcl_clk.cycle_counter_delay = pwrcl_clk.cycle_counter_delay;
+
+	rc = of_property_read_u32(of, "qcom,cc-factor",
+				  &pwrcl_clk.cycle_counter_factor);
+	if (rc)
+		dev_dbg(&pdev->dev, "no factor specified for cycle counter estimation\n");
+	else
+		perfcl_clk.cycle_counter_factor =
+			pwrcl_clk.cycle_counter_factor;
+
+	perfcl_clk.red_fsm_en = pwrcl_clk.red_fsm_en =
+		of_property_read_bool(of, "qcom,red-fsm-en");
+
+	perfcl_clk.boost_fsm_en = pwrcl_clk.boost_fsm_en =
+		of_property_read_bool(of, "qcom,boost-fsm-en");
+
+	perfcl_clk.safe_fsm_en = pwrcl_clk.safe_fsm_en =
+		of_property_read_bool(of, "qcom,safe-fsm-en");
+
+	perfcl_clk.ps_fsm_en = pwrcl_clk.ps_fsm_en =
+		of_property_read_bool(of, "qcom,ps-fsm-en");
+
+	perfcl_clk.droop_fsm_en = pwrcl_clk.droop_fsm_en =
+		of_property_read_bool(of, "qcom,droop-fsm-en");
+
+	perfcl_clk.wfx_fsm_en = pwrcl_clk.wfx_fsm_en =
+		of_property_read_bool(of, "qcom,wfx-fsm-en");
+
+	perfcl_clk.pc_fsm_en = pwrcl_clk.pc_fsm_en =
+		of_property_read_bool(of, "qcom,pc-fsm-en");
+
+	devm_kfree(&pdev->dev, array);
+
+	perfcl_clk.secure_init = pwrcl_clk.secure_init =
+		of_property_read_bool(pdev->dev.of_node, "qcom,osm-no-tz");
+
+	if (!pwrcl_clk.secure_init)
+		return rc;
+
+	rc = of_property_read_u32_array(of, "qcom,pwrcl-apcs-mem-acc-cfg",
+					pwrcl_clk.apcs_mem_acc_cfg,
+					MAX_MEM_ACC_VAL_PER_LEVEL);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,pwrcl-apcs-mem-acc-cfg property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	of_property_read_u32_array(of, "qcom,perfcl-apcs-mem-acc-cfg",
+				   perfcl_clk.apcs_mem_acc_cfg,
+				   MAX_MEM_ACC_VAL_PER_LEVEL);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,perfcl-apcs-mem-acc-cfg property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32_array(of, "qcom,pwrcl-apcs-mem-acc-val",
+					pwrcl_clk.apcs_mem_acc_val,
+					MAX_MEM_ACC_VALUES);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,pwrcl-apcs-mem-acc-val property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32_array(of, "qcom,perfcl-apcs-mem-acc-val",
+					perfcl_clk.apcs_mem_acc_val,
+					MAX_MEM_ACC_VALUES);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,perfcl-apcs-mem-acc-val property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int clk_osm_resources_init(struct platform_device *pdev)
+{
+	struct device_node *node;
+	struct resource *res;
+	struct clk *c;
+	unsigned long pbase;
+	int i, rc = 0;
+	void *vbase;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "osm");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for osm");
+		return -ENOMEM;
+	}
+
+	pwrcl_clk.pbases[OSM_BASE] = (unsigned long)res->start;
+	pwrcl_clk.vbases[OSM_BASE] = devm_ioremap(&pdev->dev, res->start,
+						  resource_size(res));
+	if (!pwrcl_clk.vbases[OSM_BASE]) {
+		dev_err(&pdev->dev, "Unable to map in osm base\n");
+		return -ENOMEM;
+	}
+
+	perfcl_clk.pbases[OSM_BASE] = pwrcl_clk.pbases[OSM_BASE] +
+		perfcl_clk.cluster_num * OSM_CORE_TABLE_SIZE;
+	perfcl_clk.vbases[OSM_BASE] = pwrcl_clk.vbases[OSM_BASE]  +
+		perfcl_clk.cluster_num * OSM_CORE_TABLE_SIZE;
+
+	for (i = 0; i < MAX_CLUSTER_CNT; i++) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						   i == pwrcl_clk.cluster_num ?
+						   "pwrcl_pll" : "perfcl_pll");
+		if (!res) {
+			dev_err(&pdev->dev,
+				"Unable to get platform resource\n");
+			return -ENOMEM;
+		}
+		pbase = (unsigned long)res->start;
+		vbase = devm_ioremap(&pdev->dev, res->start,
+				     resource_size(res));
+
+		if (!vbase) {
+			dev_err(&pdev->dev, "Unable to map in base\n");
+			return -ENOMEM;
+		}
+
+		if (i == pwrcl_clk.cluster_num) {
+			pwrcl_clk.pbases[PLL_BASE] = pbase;
+			pwrcl_clk.vbases[PLL_BASE] = vbase;
+		} else {
+			perfcl_clk.pbases[PLL_BASE] = pbase;
+			perfcl_clk.vbases[PLL_BASE] = vbase;
+		}
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "debug");
+	if (!res) {
+		dev_err(&pdev->dev, "Failed to get debug mux base\n");
+		return -EINVAL;
+	}
+
+	debug_base = devm_ioremap(&pdev->dev, res->start,
+						  resource_size(res));
+	if (!debug_base) {
+		dev_err(&pdev->dev, "Unable to map in debug mux base\n");
+		return -ENOMEM;
+	}
+
+	clk_ops_cpu_dbg_mux = clk_ops_gen_mux;
+	clk_ops_cpu_dbg_mux.get_rate = cpu_dbg_mux_get_rate;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apcs_common");
+	if (!res) {
+		dev_err(&pdev->dev, "Failed to get apcs common base\n");
+		return -EINVAL;
+	}
+
+	virt_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+	if (!virt_base) {
+		dev_err(&pdev->dev, "Failed to map apcs common registers\n");
+		return -ENOMEM;
+	}
+
+	/* efuse speed bin fuses are optional */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "pwrcl_efuse");
+	if (res) {
+		pbase = (unsigned long)res->start;
+		vbase = devm_ioremap(&pdev->dev, res->start,
+				     resource_size(res));
+		if (!vbase) {
+			dev_err(&pdev->dev, "Unable to map in pwrcl_efuse base\n");
+			return -ENOMEM;
+		}
+		pwrcl_clk.pbases[EFUSE_BASE] = pbase;
+		pwrcl_clk.vbases[EFUSE_BASE] = vbase;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "perfcl_efuse");
+	if (res) {
+		pbase = (unsigned long)res->start;
+		vbase = devm_ioremap(&pdev->dev, res->start,
+				     resource_size(res));
+		if (!vbase) {
+			dev_err(&pdev->dev, "Unable to map in perfcl_efuse base\n");
+			return -ENOMEM;
+		}
+		perfcl_clk.pbases[EFUSE_BASE] = pbase;
+		perfcl_clk.vbases[EFUSE_BASE] = vbase;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "pwrcl_acd");
+	if (res) {
+		pbase = (unsigned long)res->start;
+		vbase = devm_ioremap(&pdev->dev, res->start,
+				     resource_size(res));
+		if (!vbase) {
+			dev_err(&pdev->dev, "Unable to map in pwrcl_acd base\n");
+			return -ENOMEM;
+		}
+		pwrcl_clk.pbases[ACD_BASE] = pbase;
+		pwrcl_clk.acd_debugfs_addr_size = resource_size(res);
+		pwrcl_clk.vbases[ACD_BASE] = vbase;
+		pwrcl_clk.acd_init = true;
+	} else {
+		pwrcl_clk.acd_init = false;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "perfcl_acd");
+	if (res) {
+		pbase = (unsigned long)res->start;
+		vbase = devm_ioremap(&pdev->dev, res->start,
+				     resource_size(res));
+		if (!vbase) {
+			dev_err(&pdev->dev, "Unable to map in perfcl_acd base\n");
+			return -ENOMEM;
+		}
+		perfcl_clk.pbases[ACD_BASE] = pbase;
+		perfcl_clk.acd_debugfs_addr_size = resource_size(res);
+		perfcl_clk.vbases[ACD_BASE] = vbase;
+		perfcl_clk.acd_init = true;
+	} else {
+		perfcl_clk.acd_init = false;
+	}
+
+	pwrcl_clk.debug_regs[0] = devm_ioremap(&pdev->dev,
+					       pwrcl_clk.pbases[OSM_BASE] +
+					       clk_panic_reg_offsets[0],
+					       0x4);
+	if (!pwrcl_clk.debug_regs[0]) {
+		dev_err(&pdev->dev, "Failed to map %s debug register\n",
+			clk_panic_reg_names[0]);
+		return -ENOMEM;
+	}
+
+	pwrcl_clk.debug_regs[1] = devm_ioremap(&pdev->dev,
+					       pwrcl_clk.pbases[OSM_BASE] +
+					       clk_panic_reg_offsets[1],
+					       0x4);
+	if (!pwrcl_clk.debug_regs[1]) {
+		dev_err(&pdev->dev, "Failed to map %s debug register\n",
+			clk_panic_reg_names[1]);
+		return -ENOMEM;
+	}
+
+	pwrcl_clk.debug_regs[2] = devm_ioremap(&pdev->dev,
+					       pwrcl_clk.apm_ctrl_status,
+					       0x4);
+	if (!pwrcl_clk.debug_regs[2]) {
+		dev_err(&pdev->dev, "Failed to map %s debug register\n",
+			clk_panic_reg_names[2]);
+		return -ENOMEM;
+	}
+
+	perfcl_clk.debug_regs[0] = devm_ioremap(&pdev->dev,
+						perfcl_clk.pbases[OSM_BASE] +
+						clk_panic_reg_offsets[0],
+						0x4);
+	if (!perfcl_clk.debug_regs[0]) {
+		dev_err(&pdev->dev, "Failed to map %s debug register\n",
+			clk_panic_reg_names[0]);
+		return -ENOMEM;
+	}
+
+	perfcl_clk.debug_regs[1] = devm_ioremap(&pdev->dev,
+						perfcl_clk.pbases[OSM_BASE] +
+						clk_panic_reg_offsets[1],
+						0x4);
+	if (!perfcl_clk.debug_regs[1]) {
+		dev_err(&pdev->dev, "Failed to map %s debug register\n",
+			clk_panic_reg_names[1]);
+		return -ENOMEM;
+	}
+
+	perfcl_clk.debug_regs[2] = devm_ioremap(&pdev->dev,
+						perfcl_clk.apm_ctrl_status,
+						0x4);
+	if (!perfcl_clk.debug_regs[2]) {
+		dev_err(&pdev->dev, "Failed to map %s debug register\n",
+			clk_panic_reg_names[2]);
+		return -ENOMEM;
+	}
+
+	vdd_pwrcl = devm_regulator_get(&pdev->dev, "vdd-pwrcl");
+	if (IS_ERR(vdd_pwrcl)) {
+		rc = PTR_ERR(vdd_pwrcl);
+		if (rc != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get the pwrcl vreg, rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	vdd_perfcl = devm_regulator_get(&pdev->dev, "vdd-perfcl");
+	if (IS_ERR(vdd_perfcl)) {
+		rc = PTR_ERR(vdd_perfcl);
+		if (rc != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get the perfcl vreg, rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	pwrcl_clk.vdd_reg = vdd_pwrcl;
+	perfcl_clk.vdd_reg = vdd_perfcl;
+
+	node = of_parse_phandle(pdev->dev.of_node, "vdd-pwrcl-supply", 0);
+	if (!node) {
+		pr_err("Unable to find vdd-pwrcl-supply\n");
+		return -EINVAL;
+	}
+
+	pwrcl_clk.vdd_dev = of_find_device_by_node(node->parent->parent);
+	if (!pwrcl_clk.vdd_dev) {
+		pr_err("Unable to find device for vdd-pwrcl-supply node\n");
+		return -EINVAL;
+	}
+
+	node = of_parse_phandle(pdev->dev.of_node,
+				"vdd-perfcl-supply", 0);
+	if (!node) {
+		pr_err("Unable to find vdd-perfcl-supply\n");
+		return -EINVAL;
+	}
+
+	perfcl_clk.vdd_dev = of_find_device_by_node(node->parent->parent);
+	if (!perfcl_clk.vdd_dev) {
+		pr_err("Unable to find device for vdd-perfcl-supply\n");
+		return -EINVAL;
+	}
+
+	c = devm_clk_get(&pdev->dev, "aux_clk");
+	if (IS_ERR(c)) {
+		rc = PTR_ERR(c);
+		if (rc != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get aux_clk, rc=%d\n",
+				rc);
+		return rc;
+	}
+	sys_apcsaux_clk_gcc.c.parent = c;
+
+	c = devm_clk_get(&pdev->dev, "xo_ao");
+	if (IS_ERR(c)) {
+		rc = PTR_ERR(c);
+		if (rc != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get xo_ao clk, rc=%d\n",
+				rc);
+		return rc;
+	}
+	xo_ao.c.parent = c;
+
+	return 0;
+}
+
+static void clk_osm_setup_cluster_pll(struct clk_osm *c)
+{
+	writel_relaxed(0x0, c->vbases[PLL_BASE] + PLL_MODE);
+	writel_relaxed(0x20, c->vbases[PLL_BASE] + PLL_L_VAL);
+	writel_relaxed(0x01000008, c->vbases[PLL_BASE] +
+		       PLL_USER_CTRL);
+	writel_relaxed(0x20004AA8, c->vbases[PLL_BASE] +
+		       PLL_CONFIG_CTL_LO);
+	writel_relaxed(0x2, c->vbases[PLL_BASE] +
+		       PLL_MODE);
+
+	/* Ensure writes complete before delaying */
+	clk_osm_mb(c, PLL_BASE);
+
+	udelay(PLL_WAIT_LOCK_TIME_US);
+
+	writel_relaxed(0x6, c->vbases[PLL_BASE] + PLL_MODE);
+
+	/* Ensure write completes before delaying */
+	clk_osm_mb(c, PLL_BASE);
+
+	usleep_range(50, 75);
+
+	writel_relaxed(0x7, c->vbases[PLL_BASE] + PLL_MODE);
+}
+
+static int clk_osm_setup_hw_table(struct clk_osm *c)
+{
+	struct osm_entry *entry = c->osm_table;
+	int i;
+	u32 freq_val = 0, volt_val = 0, override_val = 0, spare_val = 0;
+	u32 table_entry_offset = 0, last_spare = 0, last_virtual_corner = 0;
+
+	for (i = 0; i < OSM_TABLE_SIZE; i++) {
+		if (i < c->num_entries) {
+			freq_val = entry[i].freq_data;
+			volt_val = BVAL(21, 16, entry[i].virtual_corner)
+				| BVAL(11, 0, entry[i].open_loop_volt);
+			override_val = entry[i].override_data;
+			spare_val = entry[i].spare_data;
+
+			if (last_virtual_corner && last_virtual_corner ==
+			    entry[i].virtual_corner && last_spare !=
+			    entry[i].spare_data) {
+				pr_err("invalid LUT entry at row=%d virtual_corner=%d, spare_data=%d\n",
+				       i, entry[i].virtual_corner,
+				       entry[i].spare_data);
+				return -EINVAL;
+			}
+			last_virtual_corner = entry[i].virtual_corner;
+			last_spare = entry[i].spare_data;
+		}
+
+		table_entry_offset = i * OSM_REG_SIZE;
+		clk_osm_write_reg(c, i, INDEX_REG + table_entry_offset);
+		clk_osm_write_reg(c, freq_val, FREQ_REG + table_entry_offset);
+		clk_osm_write_reg(c, volt_val, VOLT_REG + table_entry_offset);
+		clk_osm_write_reg(c, override_val, OVERRIDE_REG +
+				  table_entry_offset);
+		clk_osm_write_reg(c, spare_val, SPARE_REG +
+				  table_entry_offset);
+	}
+
+	/* Make sure all writes go through */
+	clk_osm_mb(c, OSM_BASE);
+
+	return 0;
+}
+
+static int clk_osm_resolve_open_loop_voltages(struct clk_osm *c)
+{
+	struct regulator *regulator = c->vdd_reg;
+	u32 vc, mv;
+	int i;
+
+	for (i = 0; i < OSM_TABLE_SIZE; i++) {
+		vc = c->osm_table[i].virtual_corner + 1;
+		/* Voltage is in uv. Convert to mv */
+		mv = regulator_list_corner_voltage(regulator, vc) / 1000;
+		c->osm_table[i].open_loop_volt = mv;
+	}
+
+	return 0;
+}
+
+static int clk_osm_resolve_crossover_corners(struct clk_osm *c,
+				     struct platform_device *pdev,
+				     const char *mem_acc_prop)
+{
+	struct regulator *regulator = c->vdd_reg;
+	int count, vc, i, apm_threshold;
+	int mem_acc_threshold = 0;
+	int rc = 0;
+	u32 corner_volt;
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+				  "qcom,apm-threshold-voltage",
+				  &apm_threshold);
+	if (rc) {
+		pr_info("qcom,apm-threshold-voltage property not specified\n");
+		return rc;
+	}
+
+	if (mem_acc_prop)
+		of_property_read_u32(pdev->dev.of_node, mem_acc_prop,
+					  &mem_acc_threshold);
+
+	/* Determine crossover virtual corner */
+	count = regulator_count_voltages(regulator);
+	if (count < 0) {
+		pr_err("Failed to get the number of virtual corners supported\n");
+		return count;
+	}
+
+	/*
+	 * CPRh corners (in hardware) are ordered:
+	 * 0 - n-1		- for n functional corners
+	 * APM crossover	- required for OSM
+	 * [MEM ACC crossover]	- optional
+	 *
+	 * 'count' corresponds to the total number of corners including n
+	 * functional corners, the APM crossover corner, and potentially the
+	 * MEM ACC cross over corner.
+	 */
+	if (mem_acc_threshold) {
+		c->apm_crossover_vc = count - 2;
+		c->mem_acc_crossover_vc = count - 1;
+	} else {
+		c->apm_crossover_vc = count - 1;
+	}
+
+	/* Determine APM threshold virtual corner */
+	for (i = 0; i < OSM_TABLE_SIZE; i++) {
+		vc = c->osm_table[i].virtual_corner + 1;
+		corner_volt = regulator_list_corner_voltage(regulator, vc);
+
+		if (corner_volt >= apm_threshold) {
+			c->apm_threshold_vc = c->osm_table[i].virtual_corner;
+			/*
+			 * Handle case where VC 0 has open-loop
+			 * greater than or equal to APM threshold voltage.
+			 */
+			c->apm_threshold_pre_vc = c->apm_threshold_vc ?
+				c->apm_threshold_vc - 1 : OSM_SEQ_MINUS_ONE;
+			break;
+		}
+	}
+
+	/*
+	 * This assumes the OSM table uses corners
+	 * 0 to MAX_VIRTUAL_CORNER - 1.
+	 */
+	if (!c->apm_threshold_vc &&
+	    c->apm_threshold_pre_vc != OSM_SEQ_MINUS_ONE) {
+		c->apm_threshold_vc = MAX_VIRTUAL_CORNER;
+		c->apm_threshold_pre_vc = c->apm_threshold_vc - 1;
+	}
+
+	/* Determine MEM ACC threshold virtual corner */
+	if (mem_acc_threshold) {
+		for (i = 0; i < OSM_TABLE_SIZE; i++) {
+			vc = c->osm_table[i].virtual_corner + 1;
+			corner_volt
+				= regulator_list_corner_voltage(regulator, vc);
+
+			if (corner_volt >= mem_acc_threshold) {
+				c->mem_acc_threshold_vc
+					= c->osm_table[i].virtual_corner;
+				/*
+				 * Handle case where VC 0 has open-loop
+				 * greater than or equal to MEM-ACC threshold
+				 * voltage.
+				 */
+				c->mem_acc_threshold_pre_vc =
+					c->mem_acc_threshold_vc ?
+					c->mem_acc_threshold_vc - 1 :
+					OSM_SEQ_MINUS_ONE;
+				break;
+			}
+		}
+
+		/*
+		 * This assumes the OSM table uses corners
+		 * 0 to MAX_VIRTUAL_CORNER - 1.
+		 */
+		if (!c->mem_acc_threshold_vc && c->mem_acc_threshold_pre_vc
+		    != OSM_SEQ_MINUS_ONE) {
+			c->mem_acc_threshold_vc =
+				MAX_VIRTUAL_CORNER;
+			c->mem_acc_threshold_pre_vc =
+				c->mem_acc_threshold_vc - 1;
+		}
+	}
+
+	return 0;
+}
+
+static int clk_osm_set_cc_policy(struct platform_device *pdev)
+{
+	int rc = 0, val;
+	u32 *array;
+	struct device_node *of = pdev->dev.of_node;
+
+	array = devm_kzalloc(&pdev->dev, MAX_CLUSTER_CNT * sizeof(u32),
+			     GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(of, "qcom,up-timer", array,
+					MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "No up timer value, rc=%d\n",
+			 rc);
+	} else {
+		val = clk_osm_read_reg(&pwrcl_clk, SPM_CC_HYSTERESIS)
+			| BVAL(31, 16, clk_osm_count_ns(&pwrcl_clk,
+					array[pwrcl_clk.cluster_num]));
+		clk_osm_write_reg(&pwrcl_clk, val, SPM_CC_HYSTERESIS);
+		val = clk_osm_read_reg(&perfcl_clk, SPM_CC_HYSTERESIS)
+			| BVAL(31, 16, clk_osm_count_ns(&perfcl_clk,
+					array[perfcl_clk.cluster_num]));
+		clk_osm_write_reg(&perfcl_clk, val, SPM_CC_HYSTERESIS);
+	}
+
+	rc = of_property_read_u32_array(of, "qcom,down-timer",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "No down timer value, rc=%d\n", rc);
+	} else {
+		val = clk_osm_read_reg(&pwrcl_clk, SPM_CC_HYSTERESIS)
+			| BVAL(15, 0, clk_osm_count_ns(&pwrcl_clk,
+				       array[pwrcl_clk.cluster_num]));
+		clk_osm_write_reg(&pwrcl_clk, val, SPM_CC_HYSTERESIS);
+		val = clk_osm_read_reg(&perfcl_clk, SPM_CC_HYSTERESIS)
+			| BVAL(15, 0, clk_osm_count_ns(&perfcl_clk,
+				       array[perfcl_clk.cluster_num]));
+		clk_osm_write_reg(&perfcl_clk, val, SPM_CC_HYSTERESIS);
+	}
+
+	/* OSM index override for cluster PC */
+	rc = of_property_read_u32_array(of, "qcom,pc-override-index",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "No PC override index value, rc=%d\n",
+			rc);
+		clk_osm_write_reg(&pwrcl_clk, 0, CC_ZERO_BEHAV_CTRL);
+		clk_osm_write_reg(&perfcl_clk, 0, CC_ZERO_BEHAV_CTRL);
+	} else {
+		val = BVAL(6, 1, array[pwrcl_clk.cluster_num])
+			| ENABLE_OVERRIDE;
+		clk_osm_write_reg(&pwrcl_clk, val, CC_ZERO_BEHAV_CTRL);
+		val = BVAL(6, 1, array[perfcl_clk.cluster_num])
+			| ENABLE_OVERRIDE;
+		clk_osm_write_reg(&perfcl_clk, val, CC_ZERO_BEHAV_CTRL);
+	}
+
+	/* Wait for the writes to complete */
+	clk_osm_mb(&perfcl_clk, OSM_BASE);
+
+	rc = of_property_read_bool(pdev->dev.of_node, "qcom,set-ret-inactive");
+	if (rc) {
+		dev_dbg(&pdev->dev, "Treat cores in retention as active\n");
+		val = 0;
+	} else {
+		dev_dbg(&pdev->dev, "Treat cores in retention as inactive\n");
+		val = 1;
+	}
+
+	clk_osm_write_reg(&pwrcl_clk, val, SPM_CORE_RET_MAPPING);
+	clk_osm_write_reg(&perfcl_clk, val, SPM_CORE_RET_MAPPING);
+
+	rc = of_property_read_bool(pdev->dev.of_node, "qcom,disable-cc-dvcs");
+	if (rc) {
+		dev_dbg(&pdev->dev, "Disabling CC based DCVS\n");
+		val = 1;
+	} else
+		val = 0;
+
+	clk_osm_write_reg(&pwrcl_clk, val, SPM_CC_DCVS_DISABLE);
+	clk_osm_write_reg(&perfcl_clk, val, SPM_CC_DCVS_DISABLE);
+
+	/* Wait for the writes to complete */
+	clk_osm_mb(&perfcl_clk, OSM_BASE);
+
+	devm_kfree(&pdev->dev, array);
+	return 0;
+}
+
+static void clk_osm_setup_itm_to_osm_handoff(void)
+{
+	/* Program address of ITM_PRESENT of CPUSS */
+	clk_osm_write_reg(&pwrcl_clk, pwrcl_clk.apcs_itm_present,
+			  SEQ_REG(37));
+	clk_osm_write_reg(&pwrcl_clk, 0, SEQ_REG(38));
+	clk_osm_write_reg(&perfcl_clk, perfcl_clk.apcs_itm_present,
+			  SEQ_REG(37));
+	clk_osm_write_reg(&perfcl_clk, 0, SEQ_REG(38));
+
+	/*
+	 * Program data to write to ITM_PRESENT assuming ITM for other domain
+	 * is enabled and the ITM for this domain is to be disabled.
+	 */
+	clk_osm_write_reg(&pwrcl_clk, ITM_CL0_DISABLE_CL1_ENABLED,
+			  SEQ_REG(39));
+	clk_osm_write_reg(&perfcl_clk, ITM_CL0_ENABLED_CL1_DISABLE,
+			  SEQ_REG(39));
+}
+
+static int clk_osm_set_llm_freq_policy(struct platform_device *pdev)
+{
+	struct device_node *of = pdev->dev.of_node;
+	u32 *array;
+	int rc = 0, val, regval;
+
+	array = devm_kzalloc(&pdev->dev, MAX_CLUSTER_CNT * sizeof(u32),
+			     GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	/*
+	 * Setup Timer to control how long OSM should wait before performing
+	 * DCVS when a LLM up frequency request is received.
+	 * Time is specified in us.
+	 */
+	rc = of_property_read_u32_array(of, "qcom,llm-freq-up-timer", array,
+					MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "Unable to get CC up timer value: %d\n",
+			rc);
+	} else {
+		val = clk_osm_read_reg(&pwrcl_clk, LLM_FREQ_VOTE_HYSTERESIS)
+			| BVAL(31, 16, clk_osm_count_ns(&pwrcl_clk,
+						array[pwrcl_clk.cluster_num]));
+		clk_osm_write_reg(&pwrcl_clk, val, LLM_FREQ_VOTE_HYSTERESIS);
+		val = clk_osm_read_reg(&perfcl_clk, LLM_FREQ_VOTE_HYSTERESIS)
+			| BVAL(31, 16, clk_osm_count_ns(&perfcl_clk,
+						array[perfcl_clk.cluster_num]));
+		clk_osm_write_reg(&perfcl_clk, val, LLM_FREQ_VOTE_HYSTERESIS);
+	}
+
+	/*
+	 * Setup Timer to control how long OSM should wait before performing
+	 * DCVS when a LLM down frequency request is received.
+	 * Time is specified in us.
+	 */
+	rc = of_property_read_u32_array(of, "qcom,llm-freq-down-timer",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "No LLM Frequency down timer value: %d\n",
+			rc);
+	} else {
+		val = clk_osm_read_reg(&pwrcl_clk, LLM_FREQ_VOTE_HYSTERESIS)
+			| BVAL(15, 0, clk_osm_count_ns(&pwrcl_clk,
+					       array[pwrcl_clk.cluster_num]));
+		clk_osm_write_reg(&pwrcl_clk, val, LLM_FREQ_VOTE_HYSTERESIS);
+		val = clk_osm_read_reg(&perfcl_clk, LLM_FREQ_VOTE_HYSTERESIS)
+			| BVAL(15, 0, clk_osm_count_ns(&perfcl_clk,
+					       array[perfcl_clk.cluster_num]));
+		clk_osm_write_reg(&perfcl_clk, val, LLM_FREQ_VOTE_HYSTERESIS);
+	}
+
+	/* Enable or disable honoring of LLM frequency requests */
+	rc = of_property_read_bool(pdev->dev.of_node,
+					"qcom,enable-llm-freq-vote");
+	if (rc) {
+		dev_dbg(&pdev->dev, "Honoring LLM Frequency requests\n");
+		val = 0;
+	} else
+		val = 1;
+
+	/* Enable or disable LLM FREQ DVCS */
+	regval = val | clk_osm_read_reg(&pwrcl_clk, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&pwrcl_clk, regval, LLM_INTF_DCVS_DISABLE);
+	regval = val | clk_osm_read_reg(&perfcl_clk, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&perfcl_clk, regval, LLM_INTF_DCVS_DISABLE);
+
+	/* Wait for the write to complete */
+	clk_osm_mb(&perfcl_clk, OSM_BASE);
+
+	devm_kfree(&pdev->dev, array);
+	return 0;
+}
+
+static int clk_osm_set_llm_volt_policy(struct platform_device *pdev)
+{
+	struct device_node *of = pdev->dev.of_node;
+	u32 *array;
+	int rc = 0, val, regval;
+
+	array = devm_kzalloc(&pdev->dev, MAX_CLUSTER_CNT * sizeof(u32),
+			     GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	/*
+	 * Setup Timer to control how long OSM should wait before performing
+	 * DCVS when a LLM up voltage request is received.
+	 * Time is specified in us.
+	 */
+	rc = of_property_read_u32_array(of, "qcom,llm-volt-up-timer",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "No LLM voltage up timer value, rc=%d\n",
+			rc);
+	} else {
+		val = clk_osm_read_reg(&pwrcl_clk, LLM_VOLT_VOTE_HYSTERESIS)
+			| BVAL(31, 16, clk_osm_count_ns(&pwrcl_clk,
+						array[pwrcl_clk.cluster_num]));
+		clk_osm_write_reg(&pwrcl_clk, val, LLM_VOLT_VOTE_HYSTERESIS);
+		val = clk_osm_read_reg(&perfcl_clk, LLM_VOLT_VOTE_HYSTERESIS)
+			| BVAL(31, 16, clk_osm_count_ns(&perfcl_clk,
+						array[perfcl_clk.cluster_num]));
+		clk_osm_write_reg(&perfcl_clk, val, LLM_VOLT_VOTE_HYSTERESIS);
+	}
+
+	/*
+	 * Setup Timer to control how long OSM should wait before performing
+	 * DCVS when a LLM down voltage request is received.
+	 * Time is specified in us.
+	 */
+	rc = of_property_read_u32_array(of, "qcom,llm-volt-down-timer",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "No LLM Voltage down timer value: %d\n",
+									rc);
+	} else {
+		val = clk_osm_read_reg(&pwrcl_clk, LLM_VOLT_VOTE_HYSTERESIS)
+			| BVAL(15, 0, clk_osm_count_ns(&pwrcl_clk,
+					       array[pwrcl_clk.cluster_num]));
+		clk_osm_write_reg(&pwrcl_clk, val, LLM_VOLT_VOTE_HYSTERESIS);
+		val = clk_osm_read_reg(&perfcl_clk, LLM_VOLT_VOTE_HYSTERESIS)
+			| BVAL(15, 0, clk_osm_count_ns(&perfcl_clk,
+					       array[perfcl_clk.cluster_num]));
+		clk_osm_write_reg(&perfcl_clk, val, LLM_VOLT_VOTE_HYSTERESIS);
+	}
+
+	/* Enable or disable honoring of LLM Voltage requests */
+	rc = of_property_read_bool(pdev->dev.of_node,
+					"qcom,enable-llm-volt-vote");
+	if (rc) {
+		dev_dbg(&pdev->dev, "Honoring LLM Voltage requests\n");
+		val = 0;
+	} else
+		val = BIT(1);
+
+	/* Enable or disable LLM VOLT DVCS */
+	regval = val | clk_osm_read_reg(&pwrcl_clk, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&pwrcl_clk, regval, LLM_INTF_DCVS_DISABLE);
+	regval = val | clk_osm_read_reg(&perfcl_clk, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&perfcl_clk, regval, LLM_INTF_DCVS_DISABLE);
+
+	/* Wait for the writes to complete */
+	clk_osm_mb(&perfcl_clk, OSM_BASE);
+
+	devm_kfree(&pdev->dev, array);
+	return 0;
+}
+
+static void clk_osm_program_apm_regs(struct clk_osm *c)
+{
+	/*
+	 * Program address of the control register used to configure
+	 * the Array Power Mux controller
+	 */
+	clk_osm_write_reg(c, c->apm_mode_ctl, SEQ_REG(2));
+
+	/* Program address of controller status register */
+	clk_osm_write_reg(c, c->apm_ctrl_status, SEQ_REG(3));
+
+	/* Program mode value to switch APM from VDD_APCC to VDD_MX */
+	clk_osm_write_reg(c, APM_MX_MODE, SEQ_REG(77));
+
+	/* Program value used to determine current APM power supply is VDD_MX */
+	clk_osm_write_reg(c, APM_MX_MODE_VAL, SEQ_REG(78));
+
+	/* Program mask used to determine status of APM power supply switch */
+	clk_osm_write_reg(c, APM_MODE_SWITCH_MASK, SEQ_REG(79));
+
+	/* Program mode value to switch APM from VDD_MX to VDD_APCC */
+	clk_osm_write_reg(c, APM_APC_MODE, SEQ_REG(80));
+
+	/*
+	 * Program value used to determine current APM power supply
+	 * is VDD_APCC
+	 */
+	clk_osm_write_reg(c, APM_APC_MODE_VAL, SEQ_REG(81));
+}
+
+static void clk_osm_program_mem_acc_regs(struct clk_osm *c)
+{
+	struct osm_entry *table = c->osm_table;
+	int i, curr_level, j = 0;
+	int mem_acc_level_map[MAX_MEM_ACC_LEVELS] = {0, 0, 0};
+	int threshold_vc[4];
+
+	curr_level = c->osm_table[0].spare_data;
+	for (i = 0; i < c->num_entries; i++) {
+		if (curr_level == MAX_MEM_ACC_LEVELS)
+			break;
+
+		if (c->osm_table[i].spare_data != curr_level) {
+			mem_acc_level_map[j++]
+					= c->osm_table[i].virtual_corner - 1;
+			curr_level = c->osm_table[i].spare_data;
+		}
+	}
+
+	if (c->secure_init) {
+		clk_osm_write_reg(c, MEM_ACC_SEQ_CONST(1), SEQ_REG(51));
+		clk_osm_write_reg(c, MEM_ACC_SEQ_CONST(2), SEQ_REG(52));
+		clk_osm_write_reg(c, MEM_ACC_SEQ_CONST(3), SEQ_REG(53));
+		clk_osm_write_reg(c, MEM_ACC_SEQ_CONST(4), SEQ_REG(54));
+		clk_osm_write_reg(c, MEM_ACC_APM_READ_MASK, SEQ_REG(59));
+		clk_osm_write_reg(c, mem_acc_level_map[0], SEQ_REG(55));
+		clk_osm_write_reg(c, mem_acc_level_map[0] + 1, SEQ_REG(56));
+		clk_osm_write_reg(c, mem_acc_level_map[1], SEQ_REG(57));
+		clk_osm_write_reg(c, mem_acc_level_map[1] + 1, SEQ_REG(58));
+		clk_osm_write_reg(c, c->pbases[OSM_BASE] + SEQ_REG(28),
+				  SEQ_REG(49));
+
+		for (i = 0; i < MAX_MEM_ACC_VALUES; i++)
+			clk_osm_write_reg(c, c->apcs_mem_acc_val[i],
+					  MEM_ACC_SEQ_REG_VAL_START(i));
+
+		for (i = 0; i < MAX_MEM_ACC_VAL_PER_LEVEL; i++)
+			clk_osm_write_reg(c, c->apcs_mem_acc_cfg[i],
+					  MEM_ACC_SEQ_REG_CFG_START(i));
+	} else {
+		if (c->mem_acc_crossover_vc)
+			scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(88),
+					c->mem_acc_crossover_vc);
+
+		threshold_vc[0] = mem_acc_level_map[0];
+		threshold_vc[1] = mem_acc_level_map[0] + 1;
+		threshold_vc[2] = mem_acc_level_map[1];
+		threshold_vc[3] = mem_acc_level_map[1] + 1;
+
+		/*
+		 * Use dynamic MEM ACC threshold voltage based value for the
+		 * highest MEM ACC threshold if it is specified instead of the
+		 * fixed mapping in the LUT.
+		 */
+		if (c->mem_acc_threshold_vc || c->mem_acc_threshold_pre_vc
+		    == OSM_SEQ_MINUS_ONE) {
+			threshold_vc[2] = c->mem_acc_threshold_pre_vc;
+			threshold_vc[3] = c->mem_acc_threshold_vc;
+
+			if (c->mem_acc_threshold_pre_vc == OSM_SEQ_MINUS_ONE) {
+				threshold_vc[1] = threshold_vc[0] =
+					c->mem_acc_threshold_pre_vc;
+			} else {
+				if (threshold_vc[1] >= threshold_vc[2])
+					threshold_vc[1] = threshold_vc[2] - 1;
+				if (threshold_vc[0] >= threshold_vc[1])
+					threshold_vc[0] = threshold_vc[1] - 1;
+			}
+		}
+
+		scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(55),
+				threshold_vc[0]);
+		scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(56),
+				threshold_vc[1]);
+		scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(57),
+				threshold_vc[2]);
+		scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(58),
+				threshold_vc[3]);
+		/* SEQ_REG(49) = SEQ_REG(28) init by TZ */
+	}
+
+	/*
+	 * Program L_VAL corresponding to the first virtual
+	 * corner with MEM ACC level 3.
+	 */
+	if (c->mem_acc_threshold_vc ||
+	    c->mem_acc_threshold_pre_vc == OSM_SEQ_MINUS_ONE)
+		for (i = 0; i < c->num_entries; i++)
+			if (c->mem_acc_threshold_vc == table[i].virtual_corner)
+				scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(32),
+					     L_VAL(table[i].freq_data));
+
+	return;
+}
+
+void clk_osm_setup_sequencer(struct clk_osm *c)
+{
+	u32 i;
+
+	pr_debug("Setting up sequencer for cluster=%d\n", c->cluster_num);
+	for (i = 0; i < ARRAY_SIZE(seq_instr); i++) {
+		clk_osm_write_reg(c, seq_instr[i],
+				  (long)(SEQ_MEM_ADDR + i * 4));
+	}
+
+	pr_debug("Setting up sequencer branch instructions for cluster=%d\n",
+		c->cluster_num);
+	for (i = 0; i < ARRAY_SIZE(seq_br_instr); i++) {
+		clk_osm_write_reg(c, seq_br_instr[i],
+				  (long)(SEQ_CFG_BR_ADDR + i * 4));
+	}
+}
+
+static void clk_osm_setup_cycle_counters(struct clk_osm *c)
+{
+	u32 ratio = c->osm_clk_rate;
+	u32 val = 0;
+
+	/* Enable cycle counter */
+	val |= BIT(0);
+	/* Setup OSM clock to XO ratio */
+	do_div(ratio, c->xo_clk_rate);
+	val |= BVAL(5, 1, ratio - 1) | OSM_CYCLE_COUNTER_USE_XO_EDGE_EN;
+	clk_osm_write_reg(c, val, OSM_CYCLE_COUNTER_CTRL_REG);
+	c->total_cycle_counter = 0;
+	c->prev_cycle_counter = 0;
+	pr_debug("OSM to XO clock ratio: %d\n", ratio);
+}
+
+static void clk_osm_setup_osm_was(struct clk_osm *c)
+{
+	u32 cc_hyst;
+	u32 val;
+
+	if (msm8998_v2)
+		return;
+
+	val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+	val |= IGNORE_PLL_LOCK_MASK;
+	cc_hyst = clk_osm_read_reg(c, SPM_CC_HYSTERESIS);
+
+	if (c->secure_init) {
+		clk_osm_write_reg(c, val, SEQ_REG(47));
+		val &= ~IGNORE_PLL_LOCK_MASK;
+		clk_osm_write_reg(c, val, SEQ_REG(48));
+
+		clk_osm_write_reg(c, c->pbases[OSM_BASE] + SEQ_REG(42),
+				  SEQ_REG(40));
+		clk_osm_write_reg(c, c->pbases[OSM_BASE] + SEQ_REG(43),
+				  SEQ_REG(41));
+		clk_osm_write_reg(c, 0x1, SEQ_REG(44));
+		clk_osm_write_reg(c, 0x0, SEQ_REG(45));
+		clk_osm_write_reg(c, c->pbases[OSM_BASE] + PDN_FSM_CTRL_REG,
+				  SEQ_REG(46));
+
+		/* C2D/C3 + D2D workaround */
+		clk_osm_write_reg(c, c->pbases[OSM_BASE] + SPM_CC_HYSTERESIS,
+				  SEQ_REG(6));
+		clk_osm_write_reg(c, cc_hyst, SEQ_REG(7));
+
+		/* Droop detector PLL lock detect workaround */
+		clk_osm_write_reg(c, PLL_DD_USER_CTL_LO_ENABLE, SEQ_REG(4));
+		clk_osm_write_reg(c, PLL_DD_USER_CTL_LO_DISABLE, SEQ_REG(5));
+		clk_osm_write_reg(c, c->cluster_num == 0 ? PLL_DD_D0_USER_CTL_LO
+				  : PLL_DD_D1_USER_CTL_LO, SEQ_REG(21));
+
+		/* PLL lock detect and HMSS AHB clock workaround */
+		clk_osm_write_reg(c, 0x640, CFG_DELAY_VAL_3);
+
+		/* DxFSM workaround */
+		clk_osm_write_reg(c, c->cluster_num == 0 ? 0x17911200 :
+				  0x17811200, SEQ_REG(22));
+		clk_osm_write_reg(c, 0x80800, SEQ_REG(23));
+		clk_osm_write_reg(c, 0x179D1100, SEQ_REG(24));
+		clk_osm_write_reg(c, 0x11f, SEQ_REG(25));
+		clk_osm_write_reg(c, c->cluster_num == 0 ? 0x17912000 :
+				  0x17811290, SEQ_REG(26));
+		clk_osm_write_reg(c, c->cluster_num == 0 ? 0x17911290 :
+				  0x17811290, SEQ_REG(20));
+		clk_osm_write_reg(c, c->cluster_num == 0 ? 0x17811290 :
+				  0x17911290, SEQ_REG(32));
+		clk_osm_write_reg(c, 0x179D4020, SEQ_REG(35));
+		clk_osm_write_reg(c, 0x11f, SEQ_REG(25));
+		clk_osm_write_reg(c, 0xa, SEQ_REG(86));
+		clk_osm_write_reg(c, 0xe, SEQ_REG(87));
+		clk_osm_write_reg(c, 0x00400000, SEQ_REG(88));
+		clk_osm_write_reg(c, 0x00700000, SEQ_REG(89));
+	} else {
+		scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(47), val);
+		val &= ~IGNORE_PLL_LOCK_MASK;
+		scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(48), val);
+
+		/* C2D/C3 + D2D workaround */
+		scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(7),
+			     cc_hyst);
+
+		/* Droop detector PLL lock detect workaround */
+		scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(4),
+			     PLL_DD_USER_CTL_LO_ENABLE);
+	}
+
+	if (c->cluster_num == 0) {
+		val = readl_relaxed(c->vbases[PLL_BASE] + PLL_TEST_CTL_HI)
+			| BIT(13);
+		writel_relaxed(val, c->vbases[PLL_BASE] +
+			       PLL_TEST_CTL_HI);
+	}
+
+	/* Ensure writes complete before returning */
+	clk_osm_mb(c, OSM_BASE);
+}
+
+static void clk_osm_setup_fsms(struct clk_osm *c)
+{
+	u32 val;
+
+	/* Reduction FSM */
+	if (c->red_fsm_en) {
+		val = clk_osm_read_reg(c, VMIN_REDUC_ENABLE_REG) | BIT(0);
+		clk_osm_write_reg(c, val, VMIN_REDUC_ENABLE_REG);
+		clk_osm_write_reg(c, BVAL(15, 0, clk_osm_count_ns(c, 10000)),
+				  VMIN_REDUC_TIMER_REG);
+	}
+
+	/* Boost FSM */
+	if (c->boost_fsm_en) {
+		val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+		clk_osm_write_reg(c, val | CC_BOOST_EN_MASK, PDN_FSM_CTRL_REG);
+
+		val = clk_osm_read_reg(c, CC_BOOST_TIMER_REG0);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
+		val |= BVAL(31, 16, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
+		clk_osm_write_reg(c, val, CC_BOOST_TIMER_REG0);
+
+		val = clk_osm_read_reg(c, CC_BOOST_TIMER_REG1);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
+		val |= BVAL(31, 16, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
+		clk_osm_write_reg(c, val, CC_BOOST_TIMER_REG1);
+
+		val = clk_osm_read_reg(c, CC_BOOST_TIMER_REG2);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, DEXT_DECREMENT_WAIT_NS));
+		clk_osm_write_reg(c, val, CC_BOOST_TIMER_REG2);
+	}
+
+	/* Safe Freq FSM */
+	if (c->safe_fsm_en) {
+		val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+		clk_osm_write_reg(c, val | DCVS_BOOST_EN_MASK,
+				  PDN_FSM_CTRL_REG);
+
+		val = clk_osm_read_reg(c, DCVS_BOOST_TIMER_REG0);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
+		val |= BVAL(31, 16, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
+		clk_osm_write_reg(c, val, DCVS_BOOST_TIMER_REG0);
+
+		val = clk_osm_read_reg(c, DCVS_BOOST_TIMER_REG1);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
+		val |= BVAL(31, 16, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
+		clk_osm_write_reg(c, val, DCVS_BOOST_TIMER_REG1);
+
+		val = clk_osm_read_reg(c, DCVS_BOOST_TIMER_REG2);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, DEXT_DECREMENT_WAIT_NS));
+		clk_osm_write_reg(c, val, DCVS_BOOST_TIMER_REG2);
+
+	}
+
+	/* PS FSM */
+	if (c->ps_fsm_en) {
+		val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+		clk_osm_write_reg(c, val | PS_BOOST_EN_MASK, PDN_FSM_CTRL_REG);
+
+		val = clk_osm_read_reg(c, PS_BOOST_TIMER_REG0);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
+		val |= BVAL(31, 16, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
+		clk_osm_write_reg(c, val, PS_BOOST_TIMER_REG0);
+
+		val = clk_osm_read_reg(c, PS_BOOST_TIMER_REG1);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
+		val |= BVAL(31, 16, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
+		clk_osm_write_reg(c, val, PS_BOOST_TIMER_REG1);
+
+		val = clk_osm_read_reg(c, PS_BOOST_TIMER_REG2);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, DEXT_DECREMENT_WAIT_NS));
+		clk_osm_write_reg(c, val, PS_BOOST_TIMER_REG2);
+	}
+
+	/* PLL signal timing control */
+	if (c->boost_fsm_en || c->safe_fsm_en || c->ps_fsm_en)
+		clk_osm_write_reg(c, 0x5, BOOST_PROG_SYNC_DELAY_REG);
+
+	/* Droop FSM */
+	if (c->wfx_fsm_en) {
+		/* WFx FSM */
+		val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+		clk_osm_write_reg(c, val | WFX_DROOP_EN_MASK, PDN_FSM_CTRL_REG);
+
+		val = clk_osm_read_reg(c, DROOP_UNSTALL_TIMER_CTRL_REG);
+		val |= BVAL(31, 16, clk_osm_count_ns(c, 500));
+		clk_osm_write_reg(c, val, DROOP_UNSTALL_TIMER_CTRL_REG);
+
+		val = clk_osm_read_reg(c,
+			       DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG);
+		val |= BVAL(31, 16, clk_osm_count_ns(c, 250));
+		clk_osm_write_reg(c, val,
+				DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG);
+	}
+
+	/* PC/RET FSM */
+	if (c->pc_fsm_en) {
+		val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+		clk_osm_write_reg(c, val | PC_RET_EXIT_DROOP_EN_MASK,
+				  PDN_FSM_CTRL_REG);
+
+		val = clk_osm_read_reg(c, DROOP_UNSTALL_TIMER_CTRL_REG);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, 500));
+		clk_osm_write_reg(c, val, DROOP_UNSTALL_TIMER_CTRL_REG);
+
+		val = clk_osm_read_reg(c,
+				DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, 250));
+		clk_osm_write_reg(c, val,
+				DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG);
+	}
+
+	/* DCVS droop FSM - only if RCGwRC is not used for di/dt control */
+	if (c->droop_fsm_en) {
+		val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+		clk_osm_write_reg(c, val | DCVS_DROOP_EN_MASK,
+				  PDN_FSM_CTRL_REG);
+	}
+
+	if (c->wfx_fsm_en || c->ps_fsm_en || c->droop_fsm_en) {
+		clk_osm_write_reg(c, 0x1, DROOP_PROG_SYNC_DELAY_REG);
+		clk_osm_write_reg(c, clk_osm_count_ns(c, 5),
+				  DROOP_RELEASE_TIMER_CTRL);
+		clk_osm_write_reg(c, clk_osm_count_ns(c, 500),
+				  DCVS_DROOP_TIMER_CTRL);
+		val = clk_osm_read_reg(c, DROOP_CTRL_REG);
+		val |= BIT(31) | BVAL(22, 16, 0x2) |
+			BVAL(6, 0, 0x8);
+		clk_osm_write_reg(c, val, DROOP_CTRL_REG);
+	}
+
+	/* Enable the PLL Droop Override */
+	val = clk_osm_read_reg(c, OSM_PLL_SW_OVERRIDE_EN);
+	val |= PLL_SW_OVERRIDE_DROOP_EN;
+	clk_osm_write_reg(c, val, OSM_PLL_SW_OVERRIDE_EN);
+}
+
+static void clk_osm_do_additional_setup(struct clk_osm *c,
+					struct platform_device *pdev)
+{
+	if (!c->secure_init)
+		return;
+
+	dev_info(&pdev->dev, "Performing additional OSM setup due to lack of TZ for cluster=%d\n",
+		 c->cluster_num);
+
+	clk_osm_write_reg(c, BVAL(23, 16, 0xF), SPM_CC_CTRL);
+
+	/* PLL LVAL programming */
+	clk_osm_write_reg(c, c->l_val_base, SEQ_REG(0));
+	clk_osm_write_reg(c, PLL_MIN_LVAL, SEQ_REG(21));
+
+	/* PLL post-div programming */
+	clk_osm_write_reg(c, c->apcs_pll_user_ctl, SEQ_REG(18));
+	clk_osm_write_reg(c, PLL_POST_DIV2, SEQ_REG(19));
+	clk_osm_write_reg(c, PLL_POST_DIV1, SEQ_REG(29));
+
+	/* APM Programming */
+	clk_osm_program_apm_regs(c);
+
+	/* GFMUX Programming */
+	clk_osm_write_reg(c, c->apcs_cfg_rcgr, SEQ_REG(16));
+	clk_osm_write_reg(c, c->apcs_cmd_rcgr, SEQ_REG(33));
+	clk_osm_write_reg(c, RCG_UPDATE, SEQ_REG(34));
+	clk_osm_write_reg(c, GPLL_SEL, SEQ_REG(17));
+	clk_osm_write_reg(c, PLL_EARLY_SEL, SEQ_REG(82));
+	clk_osm_write_reg(c, PLL_MAIN_SEL, SEQ_REG(83));
+	clk_osm_write_reg(c, RCG_UPDATE_SUCCESS, SEQ_REG(84));
+	clk_osm_write_reg(c, RCG_UPDATE, SEQ_REG(85));
+
+	/* ITM to OSM handoff */
+	clk_osm_setup_itm_to_osm_handoff();
+
+	pr_debug("seq_size: %lu, seqbr_size: %lu\n", ARRAY_SIZE(seq_instr),
+						ARRAY_SIZE(seq_br_instr));
+	clk_osm_setup_sequencer(&pwrcl_clk);
+	clk_osm_setup_sequencer(&perfcl_clk);
+}
+
+static void clk_osm_apm_vc_setup(struct clk_osm *c)
+{
+	/*
+	 * APM crossover virtual corner corresponds to switching
+	 * voltage during APM transition. APM threshold virtual
+	 * corner is the first corner which requires switch
+	 * sequence of APM from MX to APC.
+	 */
+	if (c->secure_init) {
+		clk_osm_write_reg(c, c->apm_threshold_vc, SEQ_REG(1));
+		clk_osm_write_reg(c, c->apm_crossover_vc, SEQ_REG(72));
+		clk_osm_write_reg(c, c->pbases[OSM_BASE] + SEQ_REG(1),
+				  SEQ_REG(8));
+		clk_osm_write_reg(c, c->apm_threshold_vc,
+				  SEQ_REG(15));
+		clk_osm_write_reg(c, c->apm_threshold_pre_vc,
+				  SEQ_REG(31));
+		clk_osm_write_reg(c, 0x3b | c->apm_threshold_vc << 6,
+				  SEQ_REG(73));
+		clk_osm_write_reg(c, 0x39 | c->apm_threshold_vc << 6,
+				  SEQ_REG(76));
+
+		/* Ensure writes complete before returning */
+		clk_osm_mb(c, OSM_BASE);
+	} else {
+		if (msm8998_v1) {
+			scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(1),
+				     c->apm_threshold_vc);
+			scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(73),
+				     0x3b | c->apm_threshold_vc << 6);
+		} else if (msm8998_v2) {
+			clk_osm_write_reg(c, c->apm_threshold_vc,
+					  SEQ_REG1_MSM8998_V2);
+		}
+		scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(72),
+			     c->apm_crossover_vc);
+		scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(15),
+			     c->apm_threshold_vc);
+		scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(31),
+			     c->apm_threshold_pre_vc);
+		scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(76),
+			     0x39 | c->apm_threshold_vc << 6);
+	}
+}
+
+static irqreturn_t clk_osm_debug_irq_cb(int irq, void *data)
+{
+	struct clk_osm *c = data;
+	unsigned long first, second, total_delta = 0;
+	u32 val, factor;
+	int i;
+
+	val = clk_osm_read_reg(c, DCVS_PERF_STATE_DEVIATION_INTR_STAT);
+	if (val & BIT(0)) {
+		pr_info("OS DCVS performance state deviated\n");
+		clk_osm_write_reg(c, BIT(0),
+				  DCVS_PERF_STATE_DEVIATION_INTR_CLEAR);
+	}
+
+	val = clk_osm_read_reg(c,
+			       DCVS_PERF_STATE_DEVIATION_CORRECTED_INTR_STAT);
+	if (val & BIT(0)) {
+		pr_info("OS DCVS performance state corrected\n");
+		clk_osm_write_reg(c, BIT(0),
+			  DCVS_PERF_STATE_DEVIATION_CORRECTED_INTR_CLEAR);
+	}
+
+	val = clk_osm_read_reg(c, DCVS_PERF_STATE_MET_INTR_STAT);
+	if (val & BIT(0)) {
+		pr_info("OS DCVS performance state desired reached\n");
+		clk_osm_write_reg(c, BIT(0), DCVS_PERF_STATE_MET_INTR_CLR);
+	}
+
+	factor = c->cycle_counter_factor ? c->cycle_counter_factor : 1;
+
+	for (i = 0; i < c->cycle_counter_reads; i++) {
+		first = clk_osm_read_reg(c, OSM_CYCLE_COUNTER_STATUS_REG);
+
+		if (c->cycle_counter_delay)
+			udelay(c->cycle_counter_delay);
+
+		second = clk_osm_read_reg(c, OSM_CYCLE_COUNTER_STATUS_REG);
+		total_delta = total_delta + ((second - first) / factor);
+	}
+
+	pr_info("cluster=%d, L_VAL (estimated)=%lu\n",
+		c->cluster_num, total_delta / c->cycle_counter_factor);
+
+	return IRQ_HANDLED;
+}
+
+static int clk_osm_setup_irq(struct platform_device *pdev, struct clk_osm *c,
+			 char *irq_name)
+{
+	int rc = 0;
+
+	rc = c->irq = platform_get_irq_byname(pdev, irq_name);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "%s irq not specified\n", irq_name);
+		return rc;
+	}
+
+	rc = devm_request_irq(&pdev->dev, c->irq,
+			      clk_osm_debug_irq_cb,
+			      IRQF_TRIGGER_RISING | IRQF_SHARED,
+			      "OSM IRQ", c);
+	if (rc)
+		dev_err(&pdev->dev, "Request IRQ failed for OSM IRQ\n");
+
+	return rc;
+}
+
+static u32 find_voltage(struct clk_osm *c, unsigned long rate)
+{
+	struct osm_entry *table = c->osm_table;
+	int entries = c->num_entries, i;
+
+	for (i = 0; i < entries; i++) {
+		if (rate == table[i].frequency) {
+			/* OPP table voltages have units of mV */
+			return table[i].open_loop_volt * 1000;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static int add_opp(struct clk_osm *c, struct device *dev)
+{
+	unsigned long rate = 0;
+	u32 uv;
+	long rc;
+	int j = 0;
+	unsigned long min_rate = c->c.fmax[0];
+	unsigned long max_rate = c->c.fmax[c->c.num_fmax - 1];
+
+	while (1) {
+		rate = c->c.fmax[j++];
+		uv = find_voltage(c, rate);
+		if (uv <= 0) {
+			pr_warn("No voltage for %lu.\n", rate);
+			return -EINVAL;
+		}
+
+		rc = dev_pm_opp_add(dev, rate, uv);
+		if (rc) {
+			pr_warn("failed to add OPP for %lu\n", rate);
+			return rc;
+		}
+
+		/*
+		 * Print the OPP pair for the lowest and highest frequency for
+		 * each device that we're populating. This is important since
+		 * this information will be used by thermal mitigation and the
+		 * scheduler.
+		 */
+		if (rate == min_rate)
+			pr_info("Set OPP pair (%lu Hz, %d uv) on %s\n",
+				rate, uv, dev_name(dev));
+
+		if (rate == max_rate && max_rate != min_rate) {
+			pr_info("Set OPP pair (%lu Hz, %d uv) on %s\n",
+				rate, uv, dev_name(dev));
+			break;
+		}
+
+		if (min_rate == max_rate)
+			break;
+	}
+
+	return 0;
+}
+
+static struct clk *logical_cpu_to_clk(int cpu)
+{
+	struct device_node *cpu_node;
+	const u32 *cell;
+	u64 hwid;
+	static struct clk *cpu_clk_map[NR_CPUS];
+
+	if (cpu_clk_map[cpu])
+		return cpu_clk_map[cpu];
+
+	cpu_node = of_get_cpu_node(cpu, NULL);
+	if (!cpu_node)
+		goto fail;
+
+	cell = of_get_property(cpu_node, "reg", NULL);
+	if (!cell) {
+		pr_err("%s: missing reg property\n", cpu_node->full_name);
+		goto fail;
+	}
+
+	hwid = of_read_number(cell, of_n_addr_cells(cpu_node));
+	if ((hwid | pwrcl_clk.cpu_reg_mask) == pwrcl_clk.cpu_reg_mask) {
+		cpu_clk_map[cpu] = &pwrcl_clk.c;
+		return &pwrcl_clk.c;
+	}
+	if ((hwid | perfcl_clk.cpu_reg_mask) == perfcl_clk.cpu_reg_mask) {
+		cpu_clk_map[cpu] = &perfcl_clk.c;
+		return &perfcl_clk.c;
+	}
+
+fail:
+	return NULL;
+}
+
+static u64 clk_osm_get_cpu_cycle_counter(int cpu)
+{
+	struct clk_osm *c;
+	u32 val;
+	unsigned long flags;
+
+	if (logical_cpu_to_clk(cpu) == &pwrcl_clk.c)
+		c = &pwrcl_clk;
+	else if (logical_cpu_to_clk(cpu) == &perfcl_clk.c)
+		c = &perfcl_clk;
+	else {
+		pr_err("no clock device for CPU=%d\n", cpu);
+		return 0;
+	}
+
+	spin_lock_irqsave(&c->lock, flags);
+	val = clk_osm_read_reg_no_log(c, OSM_CYCLE_COUNTER_STATUS_REG);
+
+	if (val < c->prev_cycle_counter) {
+		/* Handle counter overflow */
+		c->total_cycle_counter += UINT_MAX -
+			c->prev_cycle_counter + val;
+		c->prev_cycle_counter = val;
+	} else {
+		c->total_cycle_counter += val - c->prev_cycle_counter;
+		c->prev_cycle_counter = val;
+	}
+	spin_unlock_irqrestore(&c->lock, flags);
+
+	return c->total_cycle_counter;
+}
+
+static void populate_opp_table(struct platform_device *pdev)
+{
+	int cpu;
+
+	for_each_possible_cpu(cpu) {
+		if (logical_cpu_to_clk(cpu) == &pwrcl_clk.c) {
+			WARN(add_opp(&pwrcl_clk, get_cpu_device(cpu)),
+			     "Failed to add OPP levels for power cluster\n");
+		}
+		if (logical_cpu_to_clk(cpu) == &perfcl_clk.c) {
+			WARN(add_opp(&perfcl_clk, get_cpu_device(cpu)),
+			     "Failed to add OPP levels for perf cluster\n");
+		}
+	}
+}
+
+static int debugfs_get_trace_enable(void *data, u64 *val)
+{
+	struct clk_osm *c = data;
+
+	*val = c->trace_en;
+	return 0;
+}
+
+static int debugfs_set_trace_enable(void *data, u64 val)
+{
+	struct clk_osm *c = data;
+
+	clk_osm_masked_write_reg(c, val ? TRACE_CTRL_ENABLE :
+				 TRACE_CTRL_DISABLE,
+				 TRACE_CTRL, TRACE_CTRL_EN_MASK);
+	c->trace_en = val ? true : false;
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_trace_enable_fops,
+			debugfs_get_trace_enable,
+			debugfs_set_trace_enable,
+			"%llu\n");
+
+static int debugfs_get_wdog_trace(void *data, u64 *val)
+{
+	struct clk_osm *c = data;
+
+	*val = c->wdog_trace_en;
+	return 0;
+}
+
+static int debugfs_set_wdog_trace(void *data, u64 val)
+{
+	struct clk_osm *c = data;
+	int regval;
+
+	if (msm8998_v2) {
+		regval = clk_osm_read_reg(c, TRACE_CTRL);
+		regval = val ? regval | TRACE_CTRL_ENABLE_WDOG_STATUS :
+			regval & ~TRACE_CTRL_ENABLE_WDOG_STATUS;
+		clk_osm_write_reg(c, regval, TRACE_CTRL);
+		c->wdog_trace_en = val ? true : false;
+	} else {
+		pr_info("wdog status registers enabled by default\n");
+	}
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_trace_wdog_enable_fops,
+			debugfs_get_wdog_trace,
+			debugfs_set_wdog_trace,
+			"%llu\n");
+
+#define MAX_DEBUG_BUF_LEN 15
+
+static DEFINE_MUTEX(debug_buf_mutex);
+static char debug_buf[MAX_DEBUG_BUF_LEN];
+
+static ssize_t debugfs_trace_method_set(struct file *file,
+					const char __user *buf,
+					size_t count, loff_t *ppos)
+{
+	struct clk_osm *c = file->private_data;
+	u32 val;
+
+	if (IS_ERR(file) || file == NULL) {
+		pr_err("input error %ld\n", PTR_ERR(file));
+		return -EINVAL;
+	}
+
+	if (!c) {
+		pr_err("invalid clk_osm handle\n");
+		return -EINVAL;
+	}
+
+	if (count < MAX_DEBUG_BUF_LEN) {
+		mutex_lock(&debug_buf_mutex);
+
+		if (copy_from_user(debug_buf, (void __user *) buf, count)) {
+			mutex_unlock(&debug_buf_mutex);
+			return -EFAULT;
+		}
+		debug_buf[count] = '\0';
+		mutex_unlock(&debug_buf_mutex);
+
+		/* check that user entered a supported packet type */
+		if (strcmp(debug_buf, "periodic\n") == 0) {
+			clk_osm_write_reg(c, clk_osm_count_ns(c,
+					      PERIODIC_TRACE_DEFAULT_NS),
+					  PERIODIC_TRACE_TIMER_CTRL);
+			clk_osm_masked_write_reg(c,
+				 TRACE_CTRL_PERIODIC_TRACE_ENABLE,
+				 TRACE_CTRL, TRACE_CTRL_PERIODIC_TRACE_EN_MASK);
+			c->trace_method = PERIODIC_PACKET;
+			c->trace_periodic_timer = PERIODIC_TRACE_DEFAULT_NS;
+			return count;
+		} else if (strcmp(debug_buf, "xor\n") == 0) {
+			val = clk_osm_read_reg(c, TRACE_CTRL);
+			val &= ~TRACE_CTRL_PERIODIC_TRACE_ENABLE;
+			clk_osm_write_reg(c, val, TRACE_CTRL);
+			c->trace_method = XOR_PACKET;
+			return count;
+		}
+	}
+
+	pr_err("error, supported trace mode types: 'periodic' or 'xor'\n");
+	return -EINVAL;
+}
+
+static ssize_t debugfs_trace_method_get(struct file *file, char __user *buf,
+					size_t count, loff_t *ppos)
+{
+	struct clk_osm *c = file->private_data;
+	int len = 0, rc;
+
+	if (IS_ERR(file) || file == NULL) {
+		pr_err("input error %ld\n", PTR_ERR(file));
+		return -EINVAL;
+	}
+
+	if (!c) {
+		pr_err("invalid clk_osm handle\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&debug_buf_mutex);
+
+	if (c->trace_method == PERIODIC_PACKET)
+		len = snprintf(debug_buf, sizeof(debug_buf), "periodic\n");
+	else if (c->trace_method == XOR_PACKET)
+		len = snprintf(debug_buf, sizeof(debug_buf), "xor\n");
+
+	rc = simple_read_from_buffer((void __user *) buf, count, ppos,
+				     (void *) debug_buf, len);
+
+	mutex_unlock(&debug_buf_mutex);
+
+	return rc;
+}
+
+static int debugfs_trace_method_open(struct inode *inode, struct file *file)
+{
+	if (IS_ERR(file) || file == NULL) {
+		pr_err("input error %ld\n", PTR_ERR(file));
+		return -EINVAL;
+	}
+
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static const struct file_operations debugfs_trace_method_fops = {
+	.write	= debugfs_trace_method_set,
+	.open   = debugfs_trace_method_open,
+	.read	= debugfs_trace_method_get,
+};
+
+static int debugfs_get_trace_packet_id(void *data, u64 *val)
+{
+	struct clk_osm *c = data;
+
+	*val = c->trace_id;
+	return 0;
+}
+
+static int debugfs_set_trace_packet_id(void *data, u64 val)
+{
+	struct clk_osm *c = data;
+
+	if (val < TRACE_PACKET0 || val > TRACE_PACKET3) {
+		pr_err("supported trace IDs=%d-%d\n",
+		       TRACE_PACKET0, TRACE_PACKET3);
+		return 0;
+	}
+
+	clk_osm_masked_write_reg(c, val << TRACE_CTRL_PACKET_TYPE_SHIFT,
+				 TRACE_CTRL, TRACE_CTRL_PACKET_TYPE_MASK);
+	c->trace_id = val;
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_trace_packet_id_fops,
+			debugfs_get_trace_packet_id,
+			debugfs_set_trace_packet_id,
+			"%llu\n");
+
+static int debugfs_get_trace_periodic_timer(void *data, u64 *val)
+{
+	struct clk_osm *c = data;
+
+	*val = c->trace_periodic_timer;
+	return 0;
+}
+
+static int debugfs_set_trace_periodic_timer(void *data, u64 val)
+{
+	struct clk_osm *c = data;
+
+	if (val < PERIODIC_TRACE_MIN_NS || val > PERIODIC_TRACE_MAX_NS) {
+		pr_err("supported periodic trace periods=%d-%ld ns\n",
+		       PERIODIC_TRACE_MIN_NS, PERIODIC_TRACE_MAX_NS);
+		return 0;
+	}
+
+	clk_osm_write_reg(c, clk_osm_count_ns(c, val),
+			  PERIODIC_TRACE_TIMER_CTRL);
+	c->trace_periodic_timer = val;
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_trace_periodic_timer_fops,
+			debugfs_get_trace_periodic_timer,
+			debugfs_set_trace_periodic_timer,
+			"%llu\n");
+
+static int debugfs_get_perf_state_met_irq(void *data, u64 *val)
+{
+	struct clk_osm *c = data;
+
+	*val = clk_osm_read_reg(c, DCVS_PERF_STATE_MET_INTR_EN);
+	return 0;
+}
+
+static int debugfs_set_perf_state_met_irq(void *data, u64 val)
+{
+	struct clk_osm *c = data;
+
+	clk_osm_write_reg(c, val ? 1 : 0,
+			  DCVS_PERF_STATE_MET_INTR_EN);
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_perf_state_met_irq_fops,
+			debugfs_get_perf_state_met_irq,
+			debugfs_set_perf_state_met_irq,
+			"%llu\n");
+
+static int debugfs_get_perf_state_deviation_irq(void *data, u64 *val)
+{
+	struct clk_osm *c = data;
+
+	*val = clk_osm_read_reg(c,
+				DCVS_PERF_STATE_DEVIATION_INTR_EN);
+	return 0;
+}
+
+static int debugfs_set_perf_state_deviation_irq(void *data, u64 val)
+{
+	struct clk_osm *c = data;
+
+	clk_osm_write_reg(c, val ? 1 : 0,
+			  DCVS_PERF_STATE_DEVIATION_INTR_EN);
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_perf_state_deviation_irq_fops,
+			debugfs_get_perf_state_deviation_irq,
+			debugfs_set_perf_state_deviation_irq,
+			"%llu\n");
+
+static int debugfs_get_perf_state_deviation_corrected_irq(void *data, u64 *val)
+{
+	struct clk_osm *c = data;
+
+	*val = clk_osm_read_reg(c,
+			DCVS_PERF_STATE_DEVIATION_CORRECTED_INTR_EN);
+	return 0;
+}
+
+static int debugfs_set_perf_state_deviation_corrected_irq(void *data, u64 val)
+{
+	struct clk_osm *c = data;
+
+	clk_osm_write_reg(c, val ? 1 : 0,
+		      DCVS_PERF_STATE_DEVIATION_CORRECTED_INTR_EN);
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_perf_state_deviation_corrected_irq_fops,
+			debugfs_get_perf_state_deviation_corrected_irq,
+			debugfs_set_perf_state_deviation_corrected_irq,
+			"%llu\n");
+
+static int debugfs_get_debug_reg(void *data, u64 *val)
+{
+	struct clk_osm *c = data;
+
+	if (!c->pbases[ACD_BASE]) {
+		pr_err("ACD base start not defined\n");
+		return -EINVAL;
+	}
+
+	if (c->acd_debugfs_addr >= ACD_MASTER_ONLY_REG_ADDR)
+		*val = readl_relaxed((char *)c->vbases[ACD_BASE] +
+				     c->acd_debugfs_addr);
+	else
+		*val = clk_osm_acd_local_read_reg(c, c->acd_debugfs_addr);
+	return 0;
+}
+
+static int debugfs_set_debug_reg(void *data, u64 val)
+{
+	struct clk_osm *c = data;
+
+	if (!c->pbases[ACD_BASE]) {
+		pr_err("ACD base start not defined\n");
+		return -EINVAL;
+	}
+
+	if (c->acd_debugfs_addr >= ACD_MASTER_ONLY_REG_ADDR)
+		clk_osm_acd_master_write_reg(c, val, c->acd_debugfs_addr);
+	else
+		clk_osm_acd_master_write_through_reg(c, val,
+						     c->acd_debugfs_addr);
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_acd_debug_reg_fops,
+			debugfs_get_debug_reg,
+			debugfs_set_debug_reg,
+			"0x%llx\n");
+
+static int debugfs_get_debug_reg_addr(void *data, u64 *val)
+{
+	struct clk_osm *c = data;
+
+	if (!c->pbases[ACD_BASE]) {
+		pr_err("ACD base start not defined\n");
+		return -EINVAL;
+	}
+
+	*val = c->acd_debugfs_addr;
+
+	return 0;
+}
+
+static int debugfs_set_debug_reg_addr(void *data, u64 val)
+{
+	struct clk_osm *c = data;
+
+	if (!c->pbases[ACD_BASE]) {
+		pr_err("ACD base start not defined\n");
+		return -EINVAL;
+	}
+
+	if (val >= c->acd_debugfs_addr_size)
+		return -EINVAL;
+
+	c->acd_debugfs_addr = val;
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_acd_debug_reg_addr_fops,
+			debugfs_get_debug_reg_addr,
+			debugfs_set_debug_reg_addr,
+			"%llu\n");
+
+static void populate_debugfs_dir(struct clk_osm *c)
+{
+	struct dentry *temp;
+
+	if (osm_debugfs_base == NULL) {
+		osm_debugfs_base = debugfs_create_dir("osm", NULL);
+		if (IS_ERR_OR_NULL(osm_debugfs_base)) {
+			pr_err("osm debugfs base directory creation failed\n");
+			osm_debugfs_base = NULL;
+			return;
+		}
+	}
+
+	c->debugfs = debugfs_create_dir(c->c.dbg_name, osm_debugfs_base);
+	if (IS_ERR_OR_NULL(c->debugfs)) {
+		pr_err("osm debugfs directory creation failed\n");
+		return;
+	}
+
+	temp = debugfs_create_file("perf_state_met_irq_enable",
+				   S_IRUGO | S_IWUSR,
+				   c->debugfs, c,
+				   &debugfs_perf_state_met_irq_fops);
+	if (IS_ERR_OR_NULL(temp)) {
+		pr_err("perf_state_met_irq_enable debugfs file creation failed\n");
+		goto exit;
+	}
+
+	temp = debugfs_create_file("perf_state_deviation_irq_enable",
+				   S_IRUGO | S_IWUSR,
+				   c->debugfs, c,
+				   &debugfs_perf_state_deviation_irq_fops);
+	if (IS_ERR_OR_NULL(temp)) {
+		pr_err("perf_state_deviation_irq_enable debugfs file creation failed\n");
+		goto exit;
+	}
+
+	temp = debugfs_create_file("perf_state_deviation_corrected_irq_enable",
+			   S_IRUGO | S_IWUSR,
+			   c->debugfs, c,
+			   &debugfs_perf_state_deviation_corrected_irq_fops);
+	if (IS_ERR_OR_NULL(temp)) {
+		pr_err("debugfs_perf_state_deviation_corrected_irq_fops debugfs file creation failed\n");
+		goto exit;
+	}
+
+	temp = debugfs_create_file("wdog_trace_enable",
+			   S_IRUGO | S_IWUSR,
+			   c->debugfs, c,
+			   &debugfs_trace_wdog_enable_fops);
+	if (IS_ERR_OR_NULL(temp)) {
+		pr_err("debugfs_trace_wdog_enable_fops debugfs file creation failed\n");
+		goto exit;
+	}
+
+	temp = debugfs_create_file("trace_enable",
+			   S_IRUGO | S_IWUSR,
+			   c->debugfs, c,
+			   &debugfs_trace_enable_fops);
+	if (IS_ERR_OR_NULL(temp)) {
+		pr_err("debugfs_trace_enable_fops debugfs file creation failed\n");
+		goto exit;
+	}
+
+	temp = debugfs_create_file("trace_method",
+			   S_IRUGO | S_IWUSR,
+			   c->debugfs, c,
+			   &debugfs_trace_method_fops);
+	if (IS_ERR_OR_NULL(temp)) {
+		pr_err("debugfs_trace_method_fops debugfs file creation failed\n");
+		goto exit;
+	}
+
+	temp = debugfs_create_file("trace_packet_id",
+			   S_IRUGO | S_IWUSR,
+			   c->debugfs, c,
+			   &debugfs_trace_packet_id_fops);
+	if (IS_ERR_OR_NULL(temp)) {
+		pr_err("debugfs_trace_packet_id_fops debugfs file creation failed\n");
+		goto exit;
+	}
+
+	temp = debugfs_create_file("trace_periodic_timer",
+			   S_IRUGO | S_IWUSR,
+			   c->debugfs, c,
+			   &debugfs_trace_periodic_timer_fops);
+	if (IS_ERR_OR_NULL(temp)) {
+		pr_err("debugfs_trace_periodic_timer_fops debugfs file creation failed\n");
+		goto exit;
+	}
+
+	temp = debugfs_create_file("acd_debug_reg",
+			   S_IRUGO | S_IWUSR,
+			   c->debugfs, c,
+			   &debugfs_acd_debug_reg_fops);
+	if (IS_ERR_OR_NULL(temp)) {
+		pr_err("debugfs_acd_debug_reg_fops debugfs file creation failed\n");
+		goto exit;
+	}
+
+	temp = debugfs_create_file("acd_debug_reg_addr",
+			   S_IRUGO | S_IWUSR,
+			   c->debugfs, c,
+			   &debugfs_acd_debug_reg_addr_fops);
+	if (IS_ERR_OR_NULL(temp)) {
+		pr_err("debugfs_acd_debug_reg_addr_fops debugfs file creation failed\n");
+		goto exit;
+	}
+
+exit:
+	if (IS_ERR_OR_NULL(temp))
+		debugfs_remove_recursive(c->debugfs);
+}
+
+static int clk_osm_panic_callback(struct notifier_block *nfb,
+				  unsigned long event,
+				  void *data)
+{
+	int i;
+	u32 value;
+	struct clk_osm *c = container_of(nfb,
+					 struct clk_osm,
+					 panic_notifier);
+
+	for (i = 0; i < DEBUG_REG_NUM; i++) {
+		value = readl_relaxed(c->debug_regs[i]);
+		pr_debug("%s_%d=0x%08x\n", clk_panic_reg_names[i],
+		       c->cluster_num, value);
+	}
+
+	return NOTIFY_OK;
+}
+
+static unsigned long init_rate = 300000000;
+static unsigned long osm_clk_init_rate = 200000000;
+
+static int cpu_clock_osm_driver_probe(struct platform_device *pdev)
+{
+	int rc, cpu;
+	int speedbin = 0, pvs_ver = 0;
+	u32 pte_efuse;
+	char pwrclspeedbinstr[] = "qcom,pwrcl-speedbin0-v0";
+	char perfclspeedbinstr[] = "qcom,perfcl-speedbin0-v0";
+	struct cpu_cycle_counter_cb cb = {
+		.get_cpu_cycle_counter = clk_osm_get_cpu_cycle_counter,
+	};
+
+	if (of_find_compatible_node(NULL, NULL,
+				    "qcom,cpu-clock-osm-msm8998-v1")) {
+		msm8998_v1 = true;
+	} else if (of_find_compatible_node(NULL, NULL,
+					   "qcom,cpu-clock-osm-msm8998-v2")) {
+		msm8998_v2 = true;
+	}
+
+	rc = clk_osm_parse_dt_configs(pdev);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to parse device tree configurations\n");
+		return rc;
+	}
+
+	rc = clk_osm_resources_init(pdev);
+	if (rc) {
+		if (rc != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "resources init failed, rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	if ((pwrcl_clk.secure_init || perfcl_clk.secure_init) &&
+	    msm8998_v2) {
+		pr_err("unsupported configuration for msm8998 v2\n");
+		return -EINVAL;
+	}
+
+	if (pwrcl_clk.vbases[EFUSE_BASE]) {
+		/* Multiple speed-bins are supported */
+		pte_efuse = readl_relaxed(pwrcl_clk.vbases[EFUSE_BASE]);
+		speedbin = ((pte_efuse >> PWRCL_EFUSE_SHIFT) &
+			    PWRCL_EFUSE_MASK);
+		snprintf(pwrclspeedbinstr, ARRAY_SIZE(pwrclspeedbinstr),
+			 "qcom,pwrcl-speedbin%d-v%d", speedbin, pvs_ver);
+	}
+
+	dev_info(&pdev->dev, "using pwrcl speed bin %u and pvs_ver %d\n",
+		 speedbin, pvs_ver);
+
+	rc = clk_osm_get_lut(pdev, &pwrcl_clk,
+			     pwrclspeedbinstr);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to get OSM LUT for power cluster, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	if (perfcl_clk.vbases[EFUSE_BASE]) {
+		/* Multiple speed-bins are supported */
+		pte_efuse = readl_relaxed(perfcl_clk.vbases[EFUSE_BASE]);
+		speedbin = ((pte_efuse >> PERFCL_EFUSE_SHIFT) &
+			    PERFCL_EFUSE_MASK);
+		snprintf(perfclspeedbinstr, ARRAY_SIZE(perfclspeedbinstr),
+			 "qcom,perfcl-speedbin%d-v%d", speedbin, pvs_ver);
+	}
+
+	dev_info(&pdev->dev, "using perfcl speed bin %u and pvs_ver %d\n",
+		 speedbin, pvs_ver);
+
+	rc = clk_osm_get_lut(pdev, &perfcl_clk, perfclspeedbinstr);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to get OSM LUT for perf cluster, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = clk_osm_resolve_open_loop_voltages(&pwrcl_clk);
+	if (rc) {
+		if (rc == -EPROBE_DEFER)
+			return rc;
+		dev_err(&pdev->dev, "Unable to determine open-loop voltages for power cluster, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = clk_osm_resolve_open_loop_voltages(&perfcl_clk);
+	if (rc) {
+		if (rc == -EPROBE_DEFER)
+			return rc;
+		dev_err(&pdev->dev, "Unable to determine open-loop voltages for perf cluster, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = clk_osm_resolve_crossover_corners(&pwrcl_clk, pdev,
+			       "qcom,pwrcl-apcs-mem-acc-threshold-voltage");
+	if (rc)
+		dev_info(&pdev->dev, "No MEM-ACC crossover corner programmed\n");
+
+	rc = clk_osm_resolve_crossover_corners(&perfcl_clk, pdev,
+				"qcom,perfcl-apcs-mem-acc-threshold-voltage");
+	if (rc)
+		dev_info(&pdev->dev, "No MEM-ACC crossover corner programmed\n");
+
+	clk_osm_setup_cycle_counters(&pwrcl_clk);
+	clk_osm_setup_cycle_counters(&perfcl_clk);
+
+	clk_osm_print_osm_table(&pwrcl_clk);
+	clk_osm_print_osm_table(&perfcl_clk);
+
+	rc = clk_osm_setup_hw_table(&pwrcl_clk);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to setup power cluster hardware table\n");
+		goto exit;
+	}
+	rc = clk_osm_setup_hw_table(&perfcl_clk);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to setup perf cluster hardware table\n");
+		goto exit;
+	}
+
+	/* Policy tuning */
+	rc = clk_osm_set_cc_policy(pdev);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "cc policy setup failed");
+		goto exit;
+	}
+
+	/* LLM Freq Policy Tuning */
+	rc = clk_osm_set_llm_freq_policy(pdev);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "LLM Frequency Policy setup failed");
+		goto exit;
+	}
+
+	/* LLM Voltage Policy Tuning */
+	rc = clk_osm_set_llm_volt_policy(pdev);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "Failed to set LLM voltage Policy");
+		goto exit;
+	}
+
+	clk_osm_setup_fsms(&pwrcl_clk);
+	clk_osm_setup_fsms(&perfcl_clk);
+
+	/*
+	 * Perform typical secure-world HW initialization
+	 * as necessary.
+	 */
+	clk_osm_do_additional_setup(&pwrcl_clk, pdev);
+	clk_osm_do_additional_setup(&perfcl_clk, pdev);
+
+	/* MEM-ACC Programming */
+	clk_osm_program_mem_acc_regs(&pwrcl_clk);
+	clk_osm_program_mem_acc_regs(&perfcl_clk);
+
+	/* Program APM crossover corners */
+	clk_osm_apm_vc_setup(&pwrcl_clk);
+	clk_osm_apm_vc_setup(&perfcl_clk);
+
+	rc = clk_osm_setup_irq(pdev, &pwrcl_clk, "pwrcl-irq");
+	if (rc)
+		pr_err("Debug IRQ not set for pwrcl\n");
+
+	rc = clk_osm_setup_irq(pdev, &perfcl_clk, "perfcl-irq");
+	if (rc)
+		pr_err("Debug IRQ not set for perfcl\n");
+
+	clk_osm_setup_osm_was(&pwrcl_clk);
+	clk_osm_setup_osm_was(&perfcl_clk);
+
+	if (of_property_read_bool(pdev->dev.of_node,
+				  "qcom,osm-pll-setup")) {
+		clk_osm_setup_cluster_pll(&pwrcl_clk);
+		clk_osm_setup_cluster_pll(&perfcl_clk);
+	}
+
+	spin_lock_init(&pwrcl_clk.lock);
+	spin_lock_init(&perfcl_clk.lock);
+
+	pwrcl_clk.panic_notifier.notifier_call = clk_osm_panic_callback;
+	atomic_notifier_chain_register(&panic_notifier_list,
+				       &pwrcl_clk.panic_notifier);
+	perfcl_clk.panic_notifier.notifier_call = clk_osm_panic_callback;
+	atomic_notifier_chain_register(&panic_notifier_list,
+				       &perfcl_clk.panic_notifier);
+
+	rc = of_msm_clock_register(pdev->dev.of_node, cpu_clocks_osm,
+				   ARRAY_SIZE(cpu_clocks_osm));
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to register CPU clocks, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	/*
+	 * The hmss_gpll0 clock runs at 300 MHz. Ensure it is at the correct
+	 * frequency before enabling OSM. LUT index 0 is always sourced from
+	 * this clock.
+	 */
+	rc = clk_set_rate(&sys_apcsaux_clk_gcc.c, init_rate);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to set init rate on hmss_gpll0, rc=%d\n",
+			rc);
+		return rc;
+	}
+	clk_prepare_enable(&sys_apcsaux_clk_gcc.c);
+
+	rc = clk_set_rate(&osm_clk_src.c, osm_clk_init_rate);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to set init rate on osm_clk, rc=%d\n",
+			rc);
+		goto exit2;
+	}
+
+	/* Make sure index zero is selected */
+	rc = clk_set_rate(&pwrcl_clk.c, init_rate);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to set init rate on pwr cluster, rc=%d\n",
+			rc);
+		goto exit2;
+	}
+
+	rc = clk_set_rate(&perfcl_clk.c, init_rate);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to set init rate on perf cluster, rc=%d\n",
+			rc);
+		goto exit2;
+	}
+
+	get_online_cpus();
+
+	/* Enable OSM */
+	for_each_online_cpu(cpu) {
+		WARN(clk_prepare_enable(logical_cpu_to_clk(cpu)),
+		     "Failed to enable clock for cpu %d\n", cpu);
+	}
+
+	/* Set final boot rate */
+	rc = clk_set_rate(&pwrcl_clk.c, msm8998_v1 ?
+			  MSM8998V1_PWRCL_BOOT_RATE :
+			  MSM8998V2_PWRCL_BOOT_RATE);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to set boot rate on pwr cluster, rc=%d\n",
+			rc);
+		goto exit2;
+	}
+
+	rc = clk_set_rate(&perfcl_clk.c, msm8998_v1 ?
+			  MSM8998V1_PERFCL_BOOT_RATE :
+			  MSM8998V2_PERFCL_BOOT_RATE);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to set boot rate on perf cluster, rc=%d\n",
+			rc);
+		goto exit2;
+	}
+
+	populate_opp_table(pdev);
+	populate_debugfs_dir(&pwrcl_clk);
+	populate_debugfs_dir(&perfcl_clk);
+
+	of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+
+	register_cpu_cycle_counter_cb(&cb);
+
+	pr_info("OSM driver inited\n");
+	put_online_cpus();
+
+	return 0;
+
+exit2:
+	clk_disable_unprepare(&sys_apcsaux_clk_gcc.c);
+exit:
+	dev_err(&pdev->dev, "OSM driver failed to initialize, rc=%d\n",
+		rc);
+	panic("Unable to Setup OSM");
+}
+
+static struct of_device_id match_table[] = {
+	{ .compatible = "qcom,cpu-clock-osm-msm8998-v1" },
+	{ .compatible = "qcom,cpu-clock-osm-msm8998-v2" },
+	{}
+};
+
+static struct platform_driver cpu_clock_osm_driver = {
+	.probe = cpu_clock_osm_driver_probe,
+	.driver = {
+		.name = "cpu-clock-osm",
+		.of_match_table = match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init cpu_clock_osm_init(void)
+{
+	return platform_driver_register(&cpu_clock_osm_driver);
+}
+arch_initcall(cpu_clock_osm_init);
+
+static void __exit cpu_clock_osm_exit(void)
+{
+	platform_driver_unregister(&cpu_clock_osm_driver);
+}
+module_exit(cpu_clock_osm_exit);
+
+MODULE_DESCRIPTION("CPU clock driver for OSM");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./clock-pll.c linux-4.4.115-fbx/drivers/clk/msm/clock-pll.c
--- linux-4.4.115-fbx/drivers/clk/msm./clock-pll.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/clock-pll.c	2019-01-22 16:16:23.019242024 +0100
@@ -0,0 +1,1206 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <soc/qcom/clock-pll.h>
+#include <soc/qcom/msm-clock-controller.h>
+
+#include "clock.h"
+
+#define PLL_OUTCTRL BIT(0)
+#define PLL_BYPASSNL BIT(1)
+#define PLL_RESET_N BIT(2)
+#define PLL_MODE_MASK BM(3, 0)
+
+#define PLL_EN_REG(x)		(*(x)->base + (unsigned long) (x)->en_reg)
+#define PLL_STATUS_REG(x)	(*(x)->base + (unsigned long) (x)->status_reg)
+#define PLL_ALT_STATUS_REG(x)	(*(x)->base + (unsigned long) \
+							(x)->alt_status_reg)
+#define PLL_MODE_REG(x)		(*(x)->base + (unsigned long) (x)->mode_reg)
+#define PLL_L_REG(x)		(*(x)->base + (unsigned long) (x)->l_reg)
+#define PLL_M_REG(x)		(*(x)->base + (unsigned long) (x)->m_reg)
+#define PLL_N_REG(x)		(*(x)->base + (unsigned long) (x)->n_reg)
+#define PLL_CONFIG_REG(x)	(*(x)->base + (unsigned long) (x)->config_reg)
+#define PLL_ALPHA_REG(x)	(*(x)->base + (unsigned long) (x)->alpha_reg)
+#define PLL_CFG_ALT_REG(x)	(*(x)->base + (unsigned long) \
+							(x)->config_alt_reg)
+#define PLL_CFG_CTL_REG(x)	(*(x)->base + (unsigned long) \
+							(x)->config_ctl_reg)
+#define PLL_CFG_CTL_HI_REG(x)	(*(x)->base + (unsigned long) \
+							(x)->config_ctl_hi_reg)
+#define PLL_TEST_CTL_LO_REG(x)	(*(x)->base + (unsigned long) \
+							(x)->test_ctl_lo_reg)
+#define PLL_TEST_CTL_HI_REG(x)	(*(x)->base + (unsigned long) \
+							(x)->test_ctl_hi_reg)
+static DEFINE_SPINLOCK(pll_reg_lock);
+
+#define ENABLE_WAIT_MAX_LOOPS 200
+#define PLL_LOCKED_BIT BIT(16)
+
+#define SPM_FORCE_EVENT   0x4
+
+static int pll_vote_clk_enable(struct clk *c)
+{
+	u32 ena, count;
+	unsigned long flags;
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+	ena = readl_relaxed(PLL_EN_REG(pllv));
+	ena |= pllv->en_mask;
+	writel_relaxed(ena, PLL_EN_REG(pllv));
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+
+	/*
+	 * Use a memory barrier since some PLL status registers are
+	 * not within the same 1K segment as the voting registers.
+	 */
+	mb();
+
+	/* Wait for pll to enable. */
+	for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
+		if (readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask)
+			return 0;
+		udelay(1);
+	}
+
+	WARN("PLL %s didn't enable after voting for it!\n", c->dbg_name);
+
+	return -ETIMEDOUT;
+}
+
+static void pll_vote_clk_disable(struct clk *c)
+{
+	u32 ena;
+	unsigned long flags;
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+	ena = readl_relaxed(PLL_EN_REG(pllv));
+	ena &= ~(pllv->en_mask);
+	writel_relaxed(ena, PLL_EN_REG(pllv));
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+}
+
+static int pll_vote_clk_is_enabled(struct clk *c)
+{
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+	return !!(readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask);
+}
+
+static enum handoff pll_vote_clk_handoff(struct clk *c)
+{
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+	if (readl_relaxed(PLL_EN_REG(pllv)) & pllv->en_mask)
+		return HANDOFF_ENABLED_CLK;
+
+	return HANDOFF_DISABLED_CLK;
+}
+
+static void __iomem *pll_vote_clk_list_registers(struct clk *c, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+	static struct clk_register_data data1[] = {
+		{"APPS_VOTE", 0x0},
+	};
+
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data1;
+	*size = ARRAY_SIZE(data1);
+	return PLL_EN_REG(pllv);
+}
+
+struct clk_ops clk_ops_pll_vote = {
+	.enable = pll_vote_clk_enable,
+	.disable = pll_vote_clk_disable,
+	.is_enabled = pll_vote_clk_is_enabled,
+	.handoff = pll_vote_clk_handoff,
+	.list_registers = pll_vote_clk_list_registers,
+};
+
+/*
+ *  spm_event() -- Set/Clear SPM events
+ *  PLL off sequence -- enable (1)
+ *    Set L2_SPM_FORCE_EVENT_EN[bit] register to 1
+ *    Set L2_SPM_FORCE_EVENT[bit] register to 1
+ *  PLL on sequence -- enable (0)
+ *   Clear L2_SPM_FORCE_EVENT[bit] register to 0
+ *   Clear L2_SPM_FORCE_EVENT_EN[bit] register to 0
+ */
+static void spm_event(void __iomem *base, u32 offset, u32 bit,
+							bool enable)
+{
+	uint32_t val;
+
+	if (!base)
+		return;
+
+	if (enable) {
+		/* L2_SPM_FORCE_EVENT_EN */
+		val = readl_relaxed(base + offset);
+		val |= BIT(bit);
+		writel_relaxed(val, (base + offset));
+		/* Ensure that the write above goes through. */
+		mb();
+
+		/* L2_SPM_FORCE_EVENT */
+		val = readl_relaxed(base + offset + SPM_FORCE_EVENT);
+		val |= BIT(bit);
+		writel_relaxed(val, (base + offset + SPM_FORCE_EVENT));
+		/* Ensure that the write above goes through. */
+		mb();
+	} else {
+		/* L2_SPM_FORCE_EVENT */
+		val = readl_relaxed(base + offset + SPM_FORCE_EVENT);
+		val &= ~BIT(bit);
+		writel_relaxed(val, (base + offset + SPM_FORCE_EVENT));
+		/* Ensure that the write above goes through. */
+		mb();
+
+		/* L2_SPM_FORCE_EVENT_EN */
+		val = readl_relaxed(base + offset);
+		val &= ~BIT(bit);
+		writel_relaxed(val, (base + offset));
+		/* Ensure that the write above goes through. */
+		mb();
+	}
+}
+
+static void __pll_config_reg(void __iomem *pll_config, struct pll_freq_tbl *f,
+			struct pll_config_masks *masks)
+{
+	u32 regval;
+
+	regval = readl_relaxed(pll_config);
+
+	/* Enable the MN counter if used */
+	if (f->m_val)
+		regval |= masks->mn_en_mask;
+
+	/* Set pre-divider and post-divider values */
+	regval &= ~masks->pre_div_mask;
+	regval |= f->pre_div_val;
+	regval &= ~masks->post_div_mask;
+	regval |= f->post_div_val;
+
+	/* Select VCO setting */
+	regval &= ~masks->vco_mask;
+	regval |= f->vco_val;
+
+	/* Enable main output if it has not been enabled */
+	if (masks->main_output_mask && !(regval & masks->main_output_mask))
+		regval |= masks->main_output_mask;
+
+	writel_relaxed(regval, pll_config);
+}
+
+static int sr2_pll_clk_enable(struct clk *c)
+{
+	unsigned long flags;
+	struct pll_clk *pll = to_pll_clk(c);
+	int ret = 0, count;
+	u32 mode = readl_relaxed(PLL_MODE_REG(pll));
+	u32 lockmask = pll->masks.lock_mask ?: PLL_LOCKED_BIT;
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+
+	spm_event(pll->spm_ctrl.spm_base, pll->spm_ctrl.offset,
+				pll->spm_ctrl.event_bit, false);
+
+	/* Disable PLL bypass mode. */
+	mode |= PLL_BYPASSNL;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/*
+	 * H/W requires a 5us delay between disabling the bypass and
+	 * de-asserting the reset. Delay 10us just to be safe.
+	 */
+	mb();
+	udelay(10);
+
+	/* De-assert active-low PLL reset. */
+	mode |= PLL_RESET_N;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/* Wait for pll to lock. */
+	for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
+		if (readl_relaxed(PLL_STATUS_REG(pll)) & lockmask)
+			break;
+		udelay(1);
+	}
+
+	if (!(readl_relaxed(PLL_STATUS_REG(pll)) & lockmask))
+		pr_err("PLL %s didn't lock after enabling it!\n", c->dbg_name);
+
+	/* Enable PLL output. */
+	mode |= PLL_OUTCTRL;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/* Ensure that the write above goes through before returning. */
+	mb();
+
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+	return ret;
+}
+
+void __variable_rate_pll_init(struct clk *c)
+{
+	struct pll_clk *pll = to_pll_clk(c);
+	u32 regval;
+
+	regval = readl_relaxed(PLL_CONFIG_REG(pll));
+
+	if (pll->masks.post_div_mask) {
+		regval &= ~pll->masks.post_div_mask;
+		regval |= pll->vals.post_div_masked;
+	}
+
+	if (pll->masks.pre_div_mask) {
+		regval &= ~pll->masks.pre_div_mask;
+		regval |= pll->vals.pre_div_masked;
+	}
+
+	if (pll->masks.main_output_mask)
+		regval |= pll->masks.main_output_mask;
+
+	if (pll->masks.early_output_mask)
+		regval |= pll->masks.early_output_mask;
+
+	if (pll->vals.enable_mn)
+		regval |= pll->masks.mn_en_mask;
+	else
+		regval &= ~pll->masks.mn_en_mask;
+
+	writel_relaxed(regval, PLL_CONFIG_REG(pll));
+
+	regval = readl_relaxed(PLL_MODE_REG(pll));
+	if (pll->masks.apc_pdn_mask)
+		regval &= ~pll->masks.apc_pdn_mask;
+	writel_relaxed(regval, PLL_MODE_REG(pll));
+
+	writel_relaxed(pll->vals.alpha_val, PLL_ALPHA_REG(pll));
+	writel_relaxed(pll->vals.config_ctl_val, PLL_CFG_CTL_REG(pll));
+	if (pll->vals.config_ctl_hi_val)
+		writel_relaxed(pll->vals.config_ctl_hi_val,
+				PLL_CFG_CTL_HI_REG(pll));
+	if (pll->init_test_ctl) {
+		writel_relaxed(pll->vals.test_ctl_lo_val,
+				PLL_TEST_CTL_LO_REG(pll));
+		writel_relaxed(pll->vals.test_ctl_hi_val,
+				PLL_TEST_CTL_HI_REG(pll));
+	}
+
+	pll->inited = true;
+}
+
+static int variable_rate_pll_clk_enable(struct clk *c)
+{
+	unsigned long flags;
+	struct pll_clk *pll = to_pll_clk(c);
+	int ret = 0, count;
+	u32 mode, testlo;
+	u32 lockmask = pll->masks.lock_mask ?: PLL_LOCKED_BIT;
+	u32 mode_lock;
+	u64 time;
+	bool early_lock = false;
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+
+	if (unlikely(!to_pll_clk(c)->inited))
+		__variable_rate_pll_init(c);
+
+	mode = readl_relaxed(PLL_MODE_REG(pll));
+
+	/* Set test control bits as required by HW doc */
+	if (pll->test_ctl_lo_reg && pll->vals.test_ctl_lo_val &&
+		pll->pgm_test_ctl_enable)
+		writel_relaxed(pll->vals.test_ctl_lo_val,
+				PLL_TEST_CTL_LO_REG(pll));
+
+	if (!pll->test_ctl_dbg) {
+		/* Enable test_ctl debug */
+		mode |= BIT(3);
+		writel_relaxed(mode, PLL_MODE_REG(pll));
+
+		testlo = readl_relaxed(PLL_TEST_CTL_LO_REG(pll));
+		testlo &= ~BM(7, 6);
+		testlo |= 0xC0;
+		writel_relaxed(testlo, PLL_TEST_CTL_LO_REG(pll));
+		/* Wait for the write to complete */
+		mb();
+	}
+
+	/* Disable PLL bypass mode. */
+	mode |= PLL_BYPASSNL;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/*
+	 * H/W requires a 5us delay between disabling the bypass and
+	 * de-asserting the reset. Use 10us to be sure.
+	 */
+	mb();
+	udelay(10);
+
+	/* De-assert active-low PLL reset. */
+	mode |= PLL_RESET_N;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/*
+	 * 5us delay mandated by HPG. However, put in a 200us delay here.
+	 * This is to address possible locking issues with the PLL exhibit
+	 * early "transient" locks about 16us from this point. With this
+	 * higher delay, we avoid running into those transients.
+	 */
+	mb();
+	udelay(200);
+
+	/* Clear test control bits */
+	if (pll->test_ctl_lo_reg && pll->vals.test_ctl_lo_val &&
+		pll->pgm_test_ctl_enable)
+		writel_relaxed(0x0, PLL_TEST_CTL_LO_REG(pll));
+
+
+	time = sched_clock();
+	/* Wait for pll to lock. */
+	for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
+		if (readl_relaxed(PLL_STATUS_REG(pll)) & lockmask) {
+			udelay(1);
+			/*
+			 * Check again to be sure. This is to avoid
+			 * breaking too early if there is a "transient"
+			 * lock.
+			 */
+			if ((readl_relaxed(PLL_STATUS_REG(pll)) & lockmask))
+				break;
+			else
+				early_lock = true;
+		}
+		udelay(1);
+	}
+	time = sched_clock() - time;
+
+	mode_lock = readl_relaxed(PLL_STATUS_REG(pll));
+
+	if (!(mode_lock & lockmask)) {
+		pr_err("PLL lock bit detection total wait time: %lld ns", time);
+		pr_err("PLL %s didn't lock after enabling for L value 0x%x!\n",
+			c->dbg_name, readl_relaxed(PLL_L_REG(pll)));
+		pr_err("mode register is 0x%x\n",
+			readl_relaxed(PLL_STATUS_REG(pll)));
+		pr_err("user control register is 0x%x\n",
+			readl_relaxed(PLL_CONFIG_REG(pll)));
+		pr_err("config control register is 0x%x\n",
+			readl_relaxed(PLL_CFG_CTL_REG(pll)));
+		pr_err("test control high register is 0x%x\n",
+			readl_relaxed(PLL_TEST_CTL_HI_REG(pll)));
+		pr_err("test control low register is 0x%x\n",
+			readl_relaxed(PLL_TEST_CTL_LO_REG(pll)));
+		pr_err("early lock? %s\n", early_lock ? "yes" : "no");
+
+		testlo = readl_relaxed(PLL_TEST_CTL_LO_REG(pll));
+		testlo &= ~BM(7, 6);
+		writel_relaxed(testlo, PLL_TEST_CTL_LO_REG(pll));
+		/* Wait for the write to complete */
+		mb();
+
+		pr_err("test_ctl_lo = 0x%x, pll status is: 0x%x\n",
+			readl_relaxed(PLL_TEST_CTL_LO_REG(pll)),
+			readl_relaxed(PLL_ALT_STATUS_REG(pll)));
+
+		testlo = readl_relaxed(PLL_TEST_CTL_LO_REG(pll));
+		testlo &= ~BM(7, 6);
+		testlo |= 0x40;
+		writel_relaxed(testlo, PLL_TEST_CTL_LO_REG(pll));
+		/* Wait for the write to complete */
+		mb();
+		pr_err("test_ctl_lo = 0x%x, pll status is: 0x%x\n",
+			readl_relaxed(PLL_TEST_CTL_LO_REG(pll)),
+			readl_relaxed(PLL_ALT_STATUS_REG(pll)));
+
+		testlo = readl_relaxed(PLL_TEST_CTL_LO_REG(pll));
+		testlo &= ~BM(7, 6);
+		testlo |= 0x80;
+		writel_relaxed(testlo, PLL_TEST_CTL_LO_REG(pll));
+		/* Wait for the write to complete */
+		mb();
+
+		pr_err("test_ctl_lo = 0x%x, pll status is: 0x%x\n",
+			readl_relaxed(PLL_TEST_CTL_LO_REG(pll)),
+			readl_relaxed(PLL_ALT_STATUS_REG(pll)));
+
+		testlo = readl_relaxed(PLL_TEST_CTL_LO_REG(pll));
+		testlo &= ~BM(7, 6);
+		testlo |= 0xC0;
+		writel_relaxed(testlo, PLL_TEST_CTL_LO_REG(pll));
+		/* Wait for the write to complete */
+		mb();
+
+		pr_err("test_ctl_lo = 0x%x, pll status is: 0x%x\n",
+			readl_relaxed(PLL_TEST_CTL_LO_REG(pll)),
+			readl_relaxed(PLL_ALT_STATUS_REG(pll)));
+		panic("failed to lock %s PLL\n", c->dbg_name);
+	}
+
+	/* Enable PLL output. */
+	mode |= PLL_OUTCTRL;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/* Ensure that the write above goes through before returning. */
+	mb();
+
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+
+	return ret;
+}
+
+static void variable_rate_pll_clk_disable_hwfsm(struct clk *c)
+{
+	struct pll_clk *pll = to_pll_clk(c);
+	u32 regval;
+
+	/* Set test control bit to stay-in-CFA if necessary */
+	if (pll->test_ctl_lo_reg && pll->pgm_test_ctl_enable) {
+		regval = readl_relaxed(PLL_TEST_CTL_LO_REG(pll));
+		writel_relaxed(regval | BIT(16),
+				PLL_TEST_CTL_LO_REG(pll));
+	}
+
+	/* 8 reference clock cycle delay mandated by the HPG */
+	udelay(1);
+}
+
+static int variable_rate_pll_clk_enable_hwfsm(struct clk *c)
+{
+	struct pll_clk *pll = to_pll_clk(c);
+	int count;
+	u32 lockmask = pll->masks.lock_mask ?: PLL_LOCKED_BIT;
+	unsigned long flags;
+	u32 regval;
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+
+	/* Clear test control bit if necessary */
+	if (pll->test_ctl_lo_reg && pll->pgm_test_ctl_enable) {
+		regval = readl_relaxed(PLL_TEST_CTL_LO_REG(pll));
+		regval &= ~BIT(16);
+		writel_relaxed(regval, PLL_TEST_CTL_LO_REG(pll));
+	}
+
+	/* Wait for 50us explicitly to avoid transient locks */
+	udelay(50);
+
+	for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
+		if (readl_relaxed(PLL_STATUS_REG(pll)) & lockmask)
+			break;
+		udelay(1);
+	}
+
+	if (!(readl_relaxed(PLL_STATUS_REG(pll)) & lockmask))
+		pr_err("PLL %s didn't lock after enabling it!\n", c->dbg_name);
+
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+
+	return 0;
+}
+
+static void __pll_clk_enable_reg(void __iomem *mode_reg)
+{
+	u32 mode = readl_relaxed(mode_reg);
+	/* Disable PLL bypass mode. */
+	mode |= PLL_BYPASSNL;
+	writel_relaxed(mode, mode_reg);
+
+	/*
+	 * H/W requires a 5us delay between disabling the bypass and
+	 * de-asserting the reset. Delay 10us just to be safe.
+	 */
+	mb();
+	udelay(10);
+
+	/* De-assert active-low PLL reset. */
+	mode |= PLL_RESET_N;
+	writel_relaxed(mode, mode_reg);
+
+	/* Wait until PLL is locked. */
+	mb();
+	udelay(50);
+
+	/* Enable PLL output. */
+	mode |= PLL_OUTCTRL;
+	writel_relaxed(mode, mode_reg);
+
+	/* Ensure that the write above goes through before returning. */
+	mb();
+}
+
+static int local_pll_clk_enable(struct clk *c)
+{
+	unsigned long flags;
+	struct pll_clk *pll = to_pll_clk(c);
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+	__pll_clk_enable_reg(PLL_MODE_REG(pll));
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+
+	return 0;
+}
+
+static void __pll_clk_disable_reg(void __iomem *mode_reg)
+{
+	u32 mode = readl_relaxed(mode_reg);
+	mode &= ~PLL_MODE_MASK;
+	writel_relaxed(mode, mode_reg);
+}
+
+static void local_pll_clk_disable(struct clk *c)
+{
+	unsigned long flags;
+	struct pll_clk *pll = to_pll_clk(c);
+
+	/*
+	 * Disable the PLL output, disable test mode, enable
+	 * the bypass mode, and assert the reset.
+	 */
+	spin_lock_irqsave(&pll_reg_lock, flags);
+	spm_event(pll->spm_ctrl.spm_base, pll->spm_ctrl.offset,
+				pll->spm_ctrl.event_bit, true);
+	__pll_clk_disable_reg(PLL_MODE_REG(pll));
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+}
+
+static enum handoff local_pll_clk_handoff(struct clk *c)
+{
+	struct pll_clk *pll = to_pll_clk(c);
+	u32 mode = readl_relaxed(PLL_MODE_REG(pll));
+	u32 mask = PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL;
+	unsigned long parent_rate;
+	u32 lval, mval, nval, userval;
+
+	if ((mode & mask) != mask)
+		return HANDOFF_DISABLED_CLK;
+
+	/* Assume bootloaders configure PLL to c->rate */
+	if (c->rate)
+		return HANDOFF_ENABLED_CLK;
+
+	parent_rate = clk_get_rate(c->parent);
+	lval = readl_relaxed(PLL_L_REG(pll));
+	mval = readl_relaxed(PLL_M_REG(pll));
+	nval = readl_relaxed(PLL_N_REG(pll));
+	userval = readl_relaxed(PLL_CONFIG_REG(pll));
+
+	c->rate = parent_rate * lval;
+
+	if (pll->masks.mn_en_mask && userval) {
+		if (!nval)
+			nval = 1;
+		c->rate += (parent_rate * mval) / nval;
+	}
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+static long local_pll_clk_round_rate(struct clk *c, unsigned long rate)
+{
+	struct pll_freq_tbl *nf;
+	struct pll_clk *pll = to_pll_clk(c);
+
+	if (!pll->freq_tbl)
+		return -EINVAL;
+
+	for (nf = pll->freq_tbl; nf->freq_hz != PLL_FREQ_END; nf++)
+		if (nf->freq_hz >= rate)
+			return nf->freq_hz;
+
+	nf--;
+	return nf->freq_hz;
+}
+
+static int local_pll_clk_set_rate(struct clk *c, unsigned long rate)
+{
+	struct pll_freq_tbl *nf;
+	struct pll_clk *pll = to_pll_clk(c);
+	unsigned long flags;
+
+	for (nf = pll->freq_tbl; nf->freq_hz != PLL_FREQ_END
+			&& nf->freq_hz != rate; nf++)
+		;
+
+	if (nf->freq_hz == PLL_FREQ_END)
+		return -EINVAL;
+
+	/*
+	 * Ensure PLL is off before changing rate. For optimization reasons,
+	 * assume no downstream clock is using actively using it.
+	 */
+	spin_lock_irqsave(&c->lock, flags);
+	if (c->count)
+		c->ops->disable(c);
+
+	writel_relaxed(nf->l_val, PLL_L_REG(pll));
+	writel_relaxed(nf->m_val, PLL_M_REG(pll));
+	writel_relaxed(nf->n_val, PLL_N_REG(pll));
+
+	__pll_config_reg(PLL_CONFIG_REG(pll), nf, &pll->masks);
+
+	if (c->count)
+		c->ops->enable(c);
+
+	spin_unlock_irqrestore(&c->lock, flags);
+	return 0;
+}
+
+static enum handoff variable_rate_pll_handoff(struct clk *c)
+{
+	struct pll_clk *pll = to_pll_clk(c);
+	u32 mode = readl_relaxed(PLL_MODE_REG(pll));
+	u32 mask = PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL;
+	u32 lval;
+
+	pll->src_rate = clk_get_rate(c->parent);
+
+	lval = readl_relaxed(PLL_L_REG(pll));
+	if (!lval)
+		return HANDOFF_DISABLED_CLK;
+
+	c->rate = pll->src_rate * lval;
+
+	if (c->rate > pll->max_rate || c->rate < pll->min_rate) {
+		WARN(1, "%s: Out of spec PLL", c->dbg_name);
+		return HANDOFF_DISABLED_CLK;
+	}
+
+	if ((mode & mask) != mask)
+		return HANDOFF_DISABLED_CLK;
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+static long variable_rate_pll_round_rate(struct clk *c, unsigned long rate)
+{
+	struct pll_clk *pll = to_pll_clk(c);
+
+	if (!pll->src_rate)
+		return 0;
+
+	if (pll->no_prepared_reconfig && c->prepare_count && c->rate != rate)
+		return -EINVAL;
+
+	if (rate < pll->min_rate)
+		rate = pll->min_rate;
+	if (rate > pll->max_rate)
+		rate = pll->max_rate;
+
+	return min(pll->max_rate,
+			DIV_ROUND_UP(rate, pll->src_rate) * pll->src_rate);
+}
+
+/*
+ * For optimization reasons, assumes no downstream clocks are actively using
+ * it.
+ */
+static int variable_rate_pll_set_rate(struct clk *c, unsigned long rate)
+{
+	struct pll_clk *pll = to_pll_clk(c);
+	unsigned long flags;
+	u32 l_val;
+
+	if (rate != variable_rate_pll_round_rate(c, rate))
+		return -EINVAL;
+
+	l_val = rate / pll->src_rate;
+
+	spin_lock_irqsave(&c->lock, flags);
+
+	if (c->count && c->ops->disable)
+		c->ops->disable(c);
+
+	writel_relaxed(l_val, PLL_L_REG(pll));
+
+	if (c->count && c->ops->enable)
+		c->ops->enable(c);
+
+	spin_unlock_irqrestore(&c->lock, flags);
+
+	return 0;
+}
+
+int sr_pll_clk_enable(struct clk *c)
+{
+	u32 mode;
+	unsigned long flags;
+	struct pll_clk *pll = to_pll_clk(c);
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+	mode = readl_relaxed(PLL_MODE_REG(pll));
+	/* De-assert active-low PLL reset. */
+	mode |= PLL_RESET_N;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/*
+	 * H/W requires a 5us delay between disabling the bypass and
+	 * de-asserting the reset. Delay 10us just to be safe.
+	 */
+	mb();
+	udelay(10);
+
+	/* Disable PLL bypass mode. */
+	mode |= PLL_BYPASSNL;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/* Wait until PLL is locked. */
+	mb();
+	udelay(60);
+
+	/* Enable PLL output. */
+	mode |= PLL_OUTCTRL;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/* Ensure that the write above goes through before returning. */
+	mb();
+
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+
+	return 0;
+}
+
+int sr_hpm_lp_pll_clk_enable(struct clk *c)
+{
+	unsigned long flags;
+	struct pll_clk *pll = to_pll_clk(c);
+	u32 count, mode;
+	int ret = 0;
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+
+	/* Disable PLL bypass mode and de-assert reset. */
+	mode = PLL_BYPASSNL | PLL_RESET_N;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/* Wait for pll to lock. */
+	for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
+		if (readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)
+			break;
+		udelay(1);
+	}
+
+	if (!(readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)) {
+		WARN("PLL %s didn't lock after enabling it!\n", c->dbg_name);
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	/* Enable PLL output. */
+	mode |= PLL_OUTCTRL;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/* Ensure the write above goes through before returning. */
+	mb();
+
+out:
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+	return ret;
+}
+
+
+static void __iomem *variable_rate_pll_list_registers(struct clk *c, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct pll_clk *pll = to_pll_clk(c);
+	static struct clk_register_data data[] = {
+		{"MODE", 0x0},
+		{"L", 0x4},
+		{"ALPHA", 0x8},
+		{"USER_CTL", 0x10},
+		{"CONFIG_CTL", 0x14},
+		{"STATUS", 0x1C},
+	};
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return PLL_MODE_REG(pll);
+}
+
+static void __iomem *local_pll_clk_list_registers(struct clk *c, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	/* Not compatible with 8960 & friends */
+	struct pll_clk *pll = to_pll_clk(c);
+	static struct clk_register_data data[] = {
+		{"MODE", 0x0},
+		{"L", 0x4},
+		{"M", 0x8},
+		{"N", 0xC},
+		{"USER", 0x10},
+		{"CONFIG", 0x14},
+		{"STATUS", 0x1C},
+	};
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return PLL_MODE_REG(pll);
+}
+
+
+struct clk_ops clk_ops_local_pll = {
+	.enable = local_pll_clk_enable,
+	.disable = local_pll_clk_disable,
+	.set_rate = local_pll_clk_set_rate,
+	.handoff = local_pll_clk_handoff,
+	.list_registers = local_pll_clk_list_registers,
+};
+
+struct clk_ops clk_ops_sr2_pll = {
+	.enable = sr2_pll_clk_enable,
+	.disable = local_pll_clk_disable,
+	.set_rate = local_pll_clk_set_rate,
+	.round_rate = local_pll_clk_round_rate,
+	.handoff = local_pll_clk_handoff,
+	.list_registers = local_pll_clk_list_registers,
+};
+
+struct clk_ops clk_ops_variable_rate_pll_hwfsm = {
+	.enable = variable_rate_pll_clk_enable_hwfsm,
+	.disable = variable_rate_pll_clk_disable_hwfsm,
+	.set_rate = variable_rate_pll_set_rate,
+	.round_rate = variable_rate_pll_round_rate,
+	.handoff = variable_rate_pll_handoff,
+};
+
+struct clk_ops clk_ops_variable_rate_pll = {
+	.enable = variable_rate_pll_clk_enable,
+	.disable = local_pll_clk_disable,
+	.set_rate = variable_rate_pll_set_rate,
+	.round_rate = variable_rate_pll_round_rate,
+	.handoff = variable_rate_pll_handoff,
+	.list_registers = variable_rate_pll_list_registers,
+};
+
+static DEFINE_SPINLOCK(soft_vote_lock);
+
+static int pll_acpu_vote_clk_enable(struct clk *c)
+{
+	int ret = 0;
+	unsigned long flags;
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+	spin_lock_irqsave(&soft_vote_lock, flags);
+
+	if (!*pllv->soft_vote)
+		ret = pll_vote_clk_enable(c);
+	if (ret == 0)
+		*pllv->soft_vote |= (pllv->soft_vote_mask);
+
+	spin_unlock_irqrestore(&soft_vote_lock, flags);
+	return ret;
+}
+
+static void pll_acpu_vote_clk_disable(struct clk *c)
+{
+	unsigned long flags;
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+	spin_lock_irqsave(&soft_vote_lock, flags);
+
+	*pllv->soft_vote &= ~(pllv->soft_vote_mask);
+	if (!*pllv->soft_vote)
+		pll_vote_clk_disable(c);
+
+	spin_unlock_irqrestore(&soft_vote_lock, flags);
+}
+
+static enum handoff pll_acpu_vote_clk_handoff(struct clk *c)
+{
+	if (pll_vote_clk_handoff(c) == HANDOFF_DISABLED_CLK)
+		return HANDOFF_DISABLED_CLK;
+
+	if (pll_acpu_vote_clk_enable(c))
+		return HANDOFF_DISABLED_CLK;
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+struct clk_ops clk_ops_pll_acpu_vote = {
+	.enable = pll_acpu_vote_clk_enable,
+	.disable = pll_acpu_vote_clk_disable,
+	.is_enabled = pll_vote_clk_is_enabled,
+	.handoff = pll_acpu_vote_clk_handoff,
+	.list_registers = pll_vote_clk_list_registers,
+};
+
+
+static int pll_sleep_clk_enable(struct clk *c)
+{
+	u32 ena;
+	unsigned long flags;
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+	ena = readl_relaxed(PLL_EN_REG(pllv));
+	ena &= ~(pllv->en_mask);
+	writel_relaxed(ena, PLL_EN_REG(pllv));
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+	return 0;
+}
+
+static void pll_sleep_clk_disable(struct clk *c)
+{
+	u32 ena;
+	unsigned long flags;
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+	ena = readl_relaxed(PLL_EN_REG(pllv));
+	ena |= pllv->en_mask;
+	writel_relaxed(ena, PLL_EN_REG(pllv));
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+}
+
+static enum handoff pll_sleep_clk_handoff(struct clk *c)
+{
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+	if (!(readl_relaxed(PLL_EN_REG(pllv)) & pllv->en_mask))
+		return HANDOFF_ENABLED_CLK;
+
+	return HANDOFF_DISABLED_CLK;
+}
+
+/*
+ * This .ops is meant to be used by gpll0_sleep_clk_src. The aim is to utilise
+ * the h/w feature of sleep enable bit to denote if the PLL can be turned OFF
+ * once APPS goes to PC. gpll0_sleep_clk_src will be enabled only if there is a
+ * peripheral client using it and disabled if there is none. The current
+ * implementation of enable .ops  clears the h/w bit of sleep enable while the
+ * disable .ops asserts it.
+ */
+
+struct clk_ops clk_ops_pll_sleep_vote = {
+	.enable = pll_sleep_clk_enable,
+	.disable = pll_sleep_clk_disable,
+	.handoff = pll_sleep_clk_handoff,
+	.list_registers = pll_vote_clk_list_registers,
+};
+
+static void __set_fsm_mode(void __iomem *mode_reg,
+					u32 bias_count, u32 lock_count)
+{
+	u32 regval = readl_relaxed(mode_reg);
+
+	/* De-assert reset to FSM */
+	regval &= ~BIT(21);
+	writel_relaxed(regval, mode_reg);
+
+	/* Program bias count */
+	regval &= ~BM(19, 14);
+	regval |= BVAL(19, 14, bias_count);
+	writel_relaxed(regval, mode_reg);
+
+	/* Program lock count */
+	regval &= ~BM(13, 8);
+	regval |= BVAL(13, 8, lock_count);
+	writel_relaxed(regval, mode_reg);
+
+	/* Enable PLL FSM voting */
+	regval |= BIT(20);
+	writel_relaxed(regval, mode_reg);
+}
+
+static void __configure_alt_config(struct pll_alt_config config,
+		struct pll_config_regs *regs)
+{
+	u32 regval;
+
+	regval = readl_relaxed(PLL_CFG_ALT_REG(regs));
+
+	if (config.mask) {
+		regval &= ~config.mask;
+		regval |= config.val;
+	}
+
+	writel_relaxed(regval, PLL_CFG_ALT_REG(regs));
+}
+
+void __configure_pll(struct pll_config *config,
+		struct pll_config_regs *regs, u32 ena_fsm_mode)
+{
+	u32 regval;
+
+	writel_relaxed(config->l, PLL_L_REG(regs));
+	writel_relaxed(config->m, PLL_M_REG(regs));
+	writel_relaxed(config->n, PLL_N_REG(regs));
+
+	regval = readl_relaxed(PLL_CONFIG_REG(regs));
+
+	/* Enable the MN accumulator  */
+	if (config->mn_ena_mask) {
+		regval &= ~config->mn_ena_mask;
+		regval |= config->mn_ena_val;
+	}
+
+	/* Enable the main output */
+	if (config->main_output_mask) {
+		regval &= ~config->main_output_mask;
+		regval |= config->main_output_val;
+	}
+
+	/* Enable the aux output */
+	if (config->aux_output_mask) {
+		regval &= ~config->aux_output_mask;
+		regval |= config->aux_output_val;
+	}
+
+	/* Set pre-divider and post-divider values */
+	regval &= ~config->pre_div_mask;
+	regval |= config->pre_div_val;
+	regval &= ~config->post_div_mask;
+	regval |= config->post_div_val;
+
+	/* Select VCO setting */
+	regval &= ~config->vco_mask;
+	regval |= config->vco_val;
+
+	if (config->add_factor_mask) {
+		regval &= ~config->add_factor_mask;
+		regval |= config->add_factor_val;
+	}
+
+	writel_relaxed(regval, PLL_CONFIG_REG(regs));
+
+	if (regs->config_alt_reg)
+		__configure_alt_config(config->alt_cfg, regs);
+
+	if (regs->config_ctl_reg)
+		writel_relaxed(config->cfg_ctl_val, PLL_CFG_CTL_REG(regs));
+}
+
+void configure_sr_pll(struct pll_config *config,
+		struct pll_config_regs *regs, u32 ena_fsm_mode)
+{
+	__configure_pll(config, regs, ena_fsm_mode);
+	if (ena_fsm_mode)
+		__set_fsm_mode(PLL_MODE_REG(regs), 0x1, 0x8);
+}
+
+void configure_sr_hpm_lp_pll(struct pll_config *config,
+		struct pll_config_regs *regs, u32 ena_fsm_mode)
+{
+	__configure_pll(config, regs, ena_fsm_mode);
+	if (ena_fsm_mode)
+		__set_fsm_mode(PLL_MODE_REG(regs), 0x1, 0x0);
+}
+
+static void *votable_pll_clk_dt_parser(struct device *dev,
+						struct device_node *np)
+{
+	struct pll_vote_clk *v, *peer;
+	struct clk *c;
+	u32 val, rc;
+	phandle p;
+	struct msmclk_data *drv;
+
+	v = devm_kzalloc(dev, sizeof(*v), GFP_KERNEL);
+	if (!v) {
+		dt_err(np, "memory alloc failure\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	drv = msmclk_parse_phandle(dev, np->parent->phandle);
+	if (IS_ERR_OR_NULL(drv))
+		return ERR_CAST(drv);
+	v->base = &drv->base;
+
+	rc = of_property_read_u32(np, "qcom,en-offset", (u32 *)&v->en_reg);
+	if (rc) {
+		dt_err(np, "missing qcom,en-offset dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,en-bit", &val);
+	if (rc) {
+		dt_err(np, "missing qcom,en-bit dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+	v->en_mask = BIT(val);
+
+	rc = of_property_read_u32(np, "qcom,status-offset",
+						(u32 *)&v->status_reg);
+	if (rc) {
+		dt_err(np, "missing qcom,status-offset dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,status-bit", &val);
+	if (rc) {
+		dt_err(np, "missing qcom,status-bit dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+	v->status_mask = BIT(val);
+
+	rc = of_property_read_u32(np, "qcom,pll-config-rate", &val);
+	if (rc) {
+		dt_err(np, "missing qcom,pll-config-rate dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+	v->c.rate = val;
+
+	if (of_device_is_compatible(np, "qcom,active-only-pll"))
+		v->soft_vote_mask = PLL_SOFT_VOTE_ACPU;
+	else if (of_device_is_compatible(np, "qcom,sleep-active-pll"))
+		v->soft_vote_mask = PLL_SOFT_VOTE_PRIMARY;
+
+	if (of_device_is_compatible(np, "qcom,votable-pll")) {
+		v->c.ops = &clk_ops_pll_vote;
+		return msmclk_generic_clk_init(dev, np, &v->c);
+	}
+
+	rc = of_property_read_phandle_index(np, "qcom,peer", 0, &p);
+	if (rc) {
+		dt_err(np, "missing qcom,peer dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	c = msmclk_lookup_phandle(dev, p);
+	if (!IS_ERR_OR_NULL(c)) {
+		v->soft_vote = devm_kzalloc(dev, sizeof(*v->soft_vote),
+						GFP_KERNEL);
+		if (!v->soft_vote) {
+			dt_err(np, "memory alloc failure\n");
+			return ERR_PTR(-ENOMEM);
+		}
+
+		peer = to_pll_vote_clk(c);
+		peer->soft_vote = v->soft_vote;
+	}
+
+	v->c.ops = &clk_ops_pll_acpu_vote;
+	return msmclk_generic_clk_init(dev, np, &v->c);
+}
+MSMCLK_PARSER(votable_pll_clk_dt_parser, "qcom,active-only-pll", 0);
+MSMCLK_PARSER(votable_pll_clk_dt_parser, "qcom,sleep-active-pll", 1);
+MSMCLK_PARSER(votable_pll_clk_dt_parser, "qcom,votable-pll", 2);
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./clock-rpm.c linux-4.4.115-fbx/drivers/clk/msm/clock-rpm.c
--- linux-4.4.115-fbx/drivers/clk/msm./clock-rpm.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/clock-rpm.c	2019-01-22 16:16:23.019242024 +0100
@@ -0,0 +1,472 @@
+/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/err.h>
+#include <linux/rtmutex.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <soc/qcom/clock-rpm.h>
+#include <soc/qcom/msm-clock-controller.h>
+
+#define __clk_rpmrs_set_rate(r, value, ctx) \
+	((r)->rpmrs_data->set_rate_fn((r), (value), (ctx)))
+
+#define clk_rpmrs_set_rate_sleep(r, value) \
+	    __clk_rpmrs_set_rate((r), (value), (r)->rpmrs_data->ctx_sleep_id)
+
+#define clk_rpmrs_set_rate_active(r, value) \
+	   __clk_rpmrs_set_rate((r), (value), (r)->rpmrs_data->ctx_active_id)
+
+static int clk_rpmrs_set_rate_smd(struct rpm_clk *r, uint32_t value,
+				uint32_t context)
+{
+	int ret;
+
+	struct msm_rpm_kvp kvp = {
+		.key = r->rpm_key,
+		.data = (void *)&value,
+		.length = sizeof(value),
+	};
+
+	switch (context) {
+	case MSM_RPM_CTX_ACTIVE_SET:
+		if (*r->last_active_set_vote == value)
+			return 0;
+		break;
+	case MSM_RPM_CTX_SLEEP_SET:
+		 if (*r->last_sleep_set_vote == value)
+			return 0;
+		break;
+	default:
+		return -EINVAL;
+	};
+
+	ret = msm_rpm_send_message(context, r->rpm_res_type, r->rpm_clk_id,
+			&kvp, 1);
+	if (ret)
+		return ret;
+
+	switch (context) {
+	case MSM_RPM_CTX_ACTIVE_SET:
+		*r->last_active_set_vote = value;
+		break;
+	case MSM_RPM_CTX_SLEEP_SET:
+		*r->last_sleep_set_vote = value;
+		break;
+	}
+
+	return 0;
+}
+
+static int clk_rpmrs_handoff_smd(struct rpm_clk *r)
+{
+	if (!r->branch)
+		r->c.rate = INT_MAX;
+
+	return 0;
+}
+
+static int clk_rpmrs_is_enabled_smd(struct rpm_clk *r)
+{
+	return !!r->c.prepare_count;
+}
+
+struct clk_rpmrs_data {
+	int (*set_rate_fn)(struct rpm_clk *r, uint32_t value, uint32_t context);
+	int (*get_rate_fn)(struct rpm_clk *r);
+	int (*handoff_fn)(struct rpm_clk *r);
+	int (*is_enabled)(struct rpm_clk *r);
+	int ctx_active_id;
+	int ctx_sleep_id;
+};
+
+struct clk_rpmrs_data clk_rpmrs_data_smd = {
+	.set_rate_fn = clk_rpmrs_set_rate_smd,
+	.handoff_fn = clk_rpmrs_handoff_smd,
+	.is_enabled = clk_rpmrs_is_enabled_smd,
+	.ctx_active_id = MSM_RPM_CTX_ACTIVE_SET,
+	.ctx_sleep_id = MSM_RPM_CTX_SLEEP_SET,
+};
+
+static DEFINE_RT_MUTEX(rpm_clock_lock);
+
+static void to_active_sleep_khz(struct rpm_clk *r, unsigned long rate,
+			unsigned long *active_khz, unsigned long *sleep_khz)
+{
+	/* Convert the rate (hz) to khz */
+	*active_khz = DIV_ROUND_UP(rate, 1000);
+
+	/*
+	 * Active-only clocks don't care what the rate is during sleep. So,
+	 * they vote for zero.
+	 */
+	if (r->active_only)
+		*sleep_khz = 0;
+	else
+		*sleep_khz = *active_khz;
+}
+
+static int rpm_clk_prepare(struct clk *clk)
+{
+	struct rpm_clk *r = to_rpm_clk(clk);
+	uint32_t value;
+	int rc = 0;
+	unsigned long this_khz, this_sleep_khz;
+	unsigned long peer_khz = 0, peer_sleep_khz = 0;
+	struct rpm_clk *peer = r->peer;
+
+	rt_mutex_lock(&rpm_clock_lock);
+
+	to_active_sleep_khz(r, r->c.rate, &this_khz, &this_sleep_khz);
+
+	/* Don't send requests to the RPM if the rate has not been set. */
+	if (this_khz == 0)
+		goto out;
+
+	/* Take peer clock's rate into account only if it's enabled. */
+	if (peer->enabled)
+		to_active_sleep_khz(peer, peer->c.rate,
+				&peer_khz, &peer_sleep_khz);
+
+	value = max(this_khz, peer_khz);
+	if (r->branch)
+		value = !!value;
+
+	rc = clk_rpmrs_set_rate_active(r, value);
+	if (rc)
+		goto out;
+
+	value = max(this_sleep_khz, peer_sleep_khz);
+	if (r->branch)
+		value = !!value;
+
+	rc = clk_rpmrs_set_rate_sleep(r, value);
+	if (rc) {
+		/* Undo the active set vote and restore it to peer_khz */
+		value = peer_khz;
+		rc = clk_rpmrs_set_rate_active(r, value);
+	}
+
+out:
+	if (!rc)
+		r->enabled = true;
+
+	rt_mutex_unlock(&rpm_clock_lock);
+
+	return rc;
+}
+
+static void rpm_clk_unprepare(struct clk *clk)
+{
+	struct rpm_clk *r = to_rpm_clk(clk);
+
+	rt_mutex_lock(&rpm_clock_lock);
+
+	if (r->c.rate) {
+		uint32_t value;
+		struct rpm_clk *peer = r->peer;
+		unsigned long peer_khz = 0, peer_sleep_khz = 0;
+		int rc;
+
+		/* Take peer clock's rate into account only if it's enabled. */
+		if (peer->enabled)
+			to_active_sleep_khz(peer, peer->c.rate,
+				&peer_khz, &peer_sleep_khz);
+
+		value = r->branch ? !!peer_khz : peer_khz;
+		rc = clk_rpmrs_set_rate_active(r, value);
+		if (rc)
+			goto out;
+
+		value = r->branch ? !!peer_sleep_khz : peer_sleep_khz;
+		rc = clk_rpmrs_set_rate_sleep(r, value);
+	}
+	r->enabled = false;
+out:
+	rt_mutex_unlock(&rpm_clock_lock);
+
+	return;
+}
+
+static int rpm_clk_set_rate(struct clk *clk, unsigned long rate)
+{
+	struct rpm_clk *r = to_rpm_clk(clk);
+	unsigned long this_khz, this_sleep_khz;
+	int rc = 0;
+
+	rt_mutex_lock(&rpm_clock_lock);
+
+	if (r->enabled) {
+		uint32_t value;
+		struct rpm_clk *peer = r->peer;
+		unsigned long peer_khz = 0, peer_sleep_khz = 0;
+
+		to_active_sleep_khz(r, rate, &this_khz, &this_sleep_khz);
+
+		/* Take peer clock's rate into account only if it's enabled. */
+		if (peer->enabled)
+			to_active_sleep_khz(peer, peer->c.rate,
+					&peer_khz, &peer_sleep_khz);
+
+		value = max(this_khz, peer_khz);
+		rc = clk_rpmrs_set_rate_active(r, value);
+		if (rc)
+			goto out;
+
+		value = max(this_sleep_khz, peer_sleep_khz);
+		rc = clk_rpmrs_set_rate_sleep(r, value);
+	}
+
+out:
+	rt_mutex_unlock(&rpm_clock_lock);
+
+	return rc;
+}
+
+static unsigned long rpm_clk_get_rate(struct clk *clk)
+{
+	struct rpm_clk *r = to_rpm_clk(clk);
+	if (r->rpmrs_data->get_rate_fn)
+		return r->rpmrs_data->get_rate_fn(r);
+	else
+		return clk->rate;
+}
+
+static int rpm_clk_is_enabled(struct clk *clk)
+{
+	struct rpm_clk *r = to_rpm_clk(clk);
+	return r->rpmrs_data->is_enabled(r);
+}
+
+static long rpm_clk_round_rate(struct clk *clk, unsigned long rate)
+{
+	/* Not supported. */
+	return rate;
+}
+
+static bool rpm_clk_is_local(struct clk *clk)
+{
+	return false;
+}
+
+static enum handoff rpm_clk_handoff(struct clk *clk)
+{
+	struct rpm_clk *r = to_rpm_clk(clk);
+	int rc;
+
+	/*
+	 * Querying an RPM clock's status will return 0 unless the clock's
+	 * rate has previously been set through the RPM. When handing off,
+	 * assume these clocks are enabled (unless the RPM call fails) so
+	 * child clocks of these RPM clocks can still be handed off.
+	 */
+	rc  = r->rpmrs_data->handoff_fn(r);
+	if (rc < 0)
+		return HANDOFF_DISABLED_CLK;
+
+	/*
+	 * Since RPM handoff code may update the software rate of the clock by
+	 * querying the RPM, we need to make sure our request to RPM now
+	 * matches the software rate of the clock. When we send the request
+	 * to RPM, we also need to update any other state info we would
+	 * normally update. So, call the appropriate clock function instead
+	 * of directly using the RPM driver APIs.
+	 */
+	rc = rpm_clk_prepare(clk);
+	if (rc < 0)
+		return HANDOFF_DISABLED_CLK;
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+#define RPM_MISC_CLK_TYPE	0x306b6c63
+#define RPM_SCALING_ENABLE_ID	0x2
+
+int enable_rpm_scaling(void)
+{
+	int rc, value = 0x1;
+	static int is_inited;
+
+	struct msm_rpm_kvp kvp = {
+		.key = RPM_SMD_KEY_ENABLE,
+		.data = (void *)&value,
+		.length = sizeof(value),
+	};
+
+	if (is_inited)
+		return 0;
+
+	rc = msm_rpm_send_message_noirq(MSM_RPM_CTX_SLEEP_SET,
+			RPM_MISC_CLK_TYPE, RPM_SCALING_ENABLE_ID, &kvp, 1);
+	if (rc < 0) {
+		if (rc != -EPROBE_DEFER)
+			WARN(1, "RPM clock scaling (sleep set) did not enable!\n");
+		return rc;
+	}
+
+	rc = msm_rpm_send_message_noirq(MSM_RPM_CTX_ACTIVE_SET,
+			RPM_MISC_CLK_TYPE, RPM_SCALING_ENABLE_ID, &kvp, 1);
+	if (rc < 0) {
+		if (rc != -EPROBE_DEFER)
+			WARN(1, "RPM clock scaling (active set) did not enable!\n");
+		return rc;
+	}
+
+	is_inited++;
+	return 0;
+}
+
+int vote_bimc(struct rpm_clk *r, uint32_t value)
+{
+	int rc;
+
+	struct msm_rpm_kvp kvp = {
+		.key = r->rpm_key,
+		.data = (void *)&value,
+		.length = sizeof(value),
+	};
+
+	rc = msm_rpm_send_message_noirq(MSM_RPM_CTX_ACTIVE_SET,
+			r->rpm_res_type, r->rpmrs_data->ctx_active_id,
+			&kvp, 1);
+	if (rc < 0) {
+		if (rc != -EPROBE_DEFER)
+			WARN(1, "BIMC vote not sent!\n");
+		return rc;
+	}
+
+	return rc;
+}
+
+struct clk_ops clk_ops_rpm = {
+	.prepare = rpm_clk_prepare,
+	.unprepare = rpm_clk_unprepare,
+	.set_rate = rpm_clk_set_rate,
+	.get_rate = rpm_clk_get_rate,
+	.is_enabled = rpm_clk_is_enabled,
+	.round_rate = rpm_clk_round_rate,
+	.is_local = rpm_clk_is_local,
+	.handoff = rpm_clk_handoff,
+};
+
+struct clk_ops clk_ops_rpm_branch = {
+	.prepare = rpm_clk_prepare,
+	.unprepare = rpm_clk_unprepare,
+	.is_local = rpm_clk_is_local,
+	.handoff = rpm_clk_handoff,
+};
+
+static struct rpm_clk *rpm_clk_dt_parser_common(struct device *dev,
+						struct device_node *np)
+{
+	struct rpm_clk *rpm, *peer;
+	struct clk *c;
+	int rc = 0;
+	phandle p;
+	const char *str;
+
+	rpm = devm_kzalloc(dev, sizeof(*rpm), GFP_KERNEL);
+	if (!rpm) {
+		dt_err(np, "memory alloc failure\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	rc = of_property_read_phandle_index(np, "qcom,rpm-peer", 0, &p);
+	if (rc) {
+		dt_err(np, "missing qcom,rpm-peer dt property\n");
+		return ERR_PTR(rc);
+	}
+
+	/* Rely on whoever's called last to setup the circular ref */
+	c = msmclk_lookup_phandle(dev, p);
+	if (!IS_ERR(c)) {
+		uint32_t *sleep = devm_kzalloc(dev, sizeof(uint32_t),
+					       GFP_KERNEL);
+		uint32_t *active =
+			devm_kzalloc(dev, sizeof(uint32_t),
+				     GFP_KERNEL);
+
+		if (!sleep || !active)
+			return ERR_PTR(-ENOMEM);
+		peer = to_rpm_clk(c);
+		peer->peer = rpm;
+		rpm->peer = peer;
+		rpm->last_active_set_vote = active;
+		peer->last_active_set_vote = active;
+		rpm->last_sleep_set_vote = sleep;
+		peer->last_sleep_set_vote = sleep;
+	}
+
+	rpm->rpmrs_data = &clk_rpmrs_data_smd;
+	rpm->active_only = of_device_is_compatible(np, "qcom,rpm-a-clk") ||
+			of_device_is_compatible(np, "qcom,rpm-branch-a-clk");
+
+	rc = of_property_read_string(np, "qcom,res-type", &str);
+	if (rc) {
+		dt_err(np, "missing qcom,res-type dt property\n");
+		return ERR_PTR(rc);
+	}
+	if (sscanf(str, "%4c", (char *) &rpm->rpm_res_type) <= 0)
+		return ERR_PTR(-EINVAL);
+
+	rc = of_property_read_u32(np, "qcom,res-id", &rpm->rpm_clk_id);
+	if (rc) {
+		dt_err(np, "missing qcom,res-id dt property\n");
+		return ERR_PTR(rc);
+	}
+
+	rc = of_property_read_string(np, "qcom,key", &str);
+	if (rc) {
+		dt_err(np, "missing qcom,key dt property\n");
+		return ERR_PTR(rc);
+	}
+	if (sscanf(str, "%4c", (char *) &rpm->rpm_key) <= 0)
+		return ERR_PTR(-EINVAL);
+	return rpm;
+}
+
+static void *rpm_clk_dt_parser(struct device *dev, struct device_node *np)
+{
+	struct rpm_clk *rpm;
+	rpm = rpm_clk_dt_parser_common(dev, np);
+	if (IS_ERR(rpm))
+		return rpm;
+
+	rpm->c.ops = &clk_ops_rpm;
+	return msmclk_generic_clk_init(dev, np, &rpm->c);
+}
+
+static void *rpm_branch_clk_dt_parser(struct device *dev,
+					struct device_node *np)
+{
+	struct rpm_clk *rpm;
+	u32 rate;
+	int rc;
+	rpm = rpm_clk_dt_parser_common(dev, np);
+	if (IS_ERR(rpm))
+		return rpm;
+
+	rpm->c.ops = &clk_ops_rpm_branch;
+	rpm->branch = true;
+
+	rc = of_property_read_u32(np, "qcom,rcg-init-rate", &rate);
+	if (!rc)
+		rpm->c.rate = rate;
+
+	return msmclk_generic_clk_init(dev, np, &rpm->c);
+}
+MSMCLK_PARSER(rpm_clk_dt_parser, "qcom,rpm-clk", 0);
+MSMCLK_PARSER(rpm_clk_dt_parser, "qcom,rpm-a-clk", 1);
+MSMCLK_PARSER(rpm_branch_clk_dt_parser, "qcom,rpm-branch-clk", 0);
+MSMCLK_PARSER(rpm_branch_clk_dt_parser, "qcom,rpm-branch-a-clk", 1);
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./clock-voter.c linux-4.4.115-fbx/drivers/clk/msm/clock-voter.c
--- linux-4.4.115-fbx/drivers/clk/msm./clock-voter.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/clock-voter.c	2019-01-22 16:16:23.019242024 +0100
@@ -0,0 +1,202 @@
+/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/err.h>
+#include <linux/rtmutex.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <soc/qcom/clock-voter.h>
+#include <soc/qcom/msm-clock-controller.h>
+
+static DEFINE_RT_MUTEX(voter_clk_lock);
+
+/* Aggregate the rate of clocks that are currently on. */
+static unsigned long voter_clk_aggregate_rate(const struct clk *parent)
+{
+	struct clk *clk;
+	unsigned long rate = 0;
+
+	list_for_each_entry(clk, &parent->children, siblings) {
+		struct clk_voter *v = to_clk_voter(clk);
+		if (v->enabled)
+			rate = max(clk->rate, rate);
+	}
+	return rate;
+}
+
+static int voter_clk_set_rate(struct clk *clk, unsigned long rate)
+{
+	int ret = 0;
+	struct clk *clkp;
+	struct clk_voter *clkh, *v = to_clk_voter(clk);
+	unsigned long cur_rate, new_rate, other_rate = 0;
+
+	if (v->is_branch)
+		return 0;
+
+	rt_mutex_lock(&voter_clk_lock);
+
+	if (v->enabled) {
+		struct clk *parent = clk->parent;
+
+		/*
+		 * Get the aggregate rate without this clock's vote and update
+		 * if the new rate is different than the current rate
+		 */
+		list_for_each_entry(clkp, &parent->children, siblings) {
+			clkh = to_clk_voter(clkp);
+			if (clkh->enabled && clkh != v)
+				other_rate = max(clkp->rate, other_rate);
+		}
+
+		cur_rate = max(other_rate, clk->rate);
+		new_rate = max(other_rate, rate);
+
+		if (new_rate != cur_rate) {
+			ret = clk_set_rate(parent, new_rate);
+			if (ret)
+				goto unlock;
+		}
+	}
+	clk->rate = rate;
+unlock:
+	rt_mutex_unlock(&voter_clk_lock);
+
+	return ret;
+}
+
+static int voter_clk_prepare(struct clk *clk)
+{
+	int ret = 0;
+	unsigned long cur_rate;
+	struct clk *parent;
+	struct clk_voter *v = to_clk_voter(clk);
+
+	rt_mutex_lock(&voter_clk_lock);
+	parent = clk->parent;
+
+	if (v->is_branch) {
+		v->enabled = true;
+		goto out;
+	}
+
+	/*
+	 * Increase the rate if this clock is voting for a higher rate
+	 * than the current rate.
+	 */
+	cur_rate = voter_clk_aggregate_rate(parent);
+	if (clk->rate > cur_rate) {
+		ret = clk_set_rate(parent, clk->rate);
+		if (ret)
+			goto out;
+	}
+	v->enabled = true;
+out:
+	rt_mutex_unlock(&voter_clk_lock);
+
+	return ret;
+}
+
+static void voter_clk_unprepare(struct clk *clk)
+{
+	unsigned long cur_rate, new_rate;
+	struct clk *parent;
+	struct clk_voter *v = to_clk_voter(clk);
+
+
+	rt_mutex_lock(&voter_clk_lock);
+	parent = clk->parent;
+
+	/*
+	 * Decrease the rate if this clock was the only one voting for
+	 * the highest rate.
+	 */
+	v->enabled = false;
+	if (v->is_branch)
+		goto out;
+
+	new_rate = voter_clk_aggregate_rate(parent);
+	cur_rate = max(new_rate, clk->rate);
+
+	if (new_rate < cur_rate)
+		clk_set_rate(parent, new_rate);
+
+out:
+	rt_mutex_unlock(&voter_clk_lock);
+}
+
+static int voter_clk_is_enabled(struct clk *clk)
+{
+	struct clk_voter *v = to_clk_voter(clk);
+	return v->enabled;
+}
+
+static long voter_clk_round_rate(struct clk *clk, unsigned long rate)
+{
+	return clk_round_rate(clk->parent, rate);
+}
+
+static bool voter_clk_is_local(struct clk *clk)
+{
+	return true;
+}
+
+static enum handoff voter_clk_handoff(struct clk *clk)
+{
+	if (!clk->rate)
+		return HANDOFF_DISABLED_CLK;
+
+	/*
+	 * Send the default rate to the parent if necessary and update the
+	 * software state of the voter clock.
+	 */
+	if (voter_clk_prepare(clk) < 0)
+		return HANDOFF_DISABLED_CLK;
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+struct clk_ops clk_ops_voter = {
+	.prepare = voter_clk_prepare,
+	.unprepare = voter_clk_unprepare,
+	.set_rate = voter_clk_set_rate,
+	.is_enabled = voter_clk_is_enabled,
+	.round_rate = voter_clk_round_rate,
+	.is_local = voter_clk_is_local,
+	.handoff = voter_clk_handoff,
+};
+
+static void *sw_vote_clk_dt_parser(struct device *dev,
+					struct device_node *np)
+{
+	struct clk_voter *v;
+	int rc;
+	u32 temp;
+
+	v = devm_kzalloc(dev, sizeof(*v), GFP_KERNEL);
+	if (!v) {
+		dt_err(np, "failed to alloc memory\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	rc = of_property_read_u32(np, "qcom,config-rate", &temp);
+	if (rc) {
+		dt_prop_err(np, "qcom,config-rate", "is missing");
+		return ERR_PTR(rc);
+	}
+
+	v->c.ops = &clk_ops_voter;
+	return msmclk_generic_clk_init(dev, np, &v->c);
+}
+MSMCLK_PARSER(sw_vote_clk_dt_parser, "qcom,sw-vote-clk", 0);
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./gdsc.c linux-4.4.115-fbx/drivers/clk/msm/gdsc.c
--- linux-4.4.115-fbx/drivers/clk/msm./gdsc.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/gdsc.c	2019-01-22 16:16:23.019242024 +0100
@@ -0,0 +1,718 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/reset.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk.h>
+
+#define PWR_ON_MASK		BIT(31)
+#define EN_REST_WAIT_MASK	(0xF << 20)
+#define EN_FEW_WAIT_MASK	(0xF << 16)
+#define CLK_DIS_WAIT_MASK	(0xF << 12)
+#define SW_OVERRIDE_MASK	BIT(2)
+#define HW_CONTROL_MASK		BIT(1)
+#define SW_COLLAPSE_MASK	BIT(0)
+#define GMEM_CLAMP_IO_MASK	BIT(0)
+#define GMEM_RESET_MASK		BIT(4)
+#define BCR_BLK_ARES_BIT	BIT(0)
+
+/* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
+#define EN_REST_WAIT_VAL	(0x2 << 20)
+#define EN_FEW_WAIT_VAL		(0x8 << 16)
+#define CLK_DIS_WAIT_VAL	(0x2 << 12)
+
+#define TIMEOUT_US		100
+
+struct gdsc {
+	struct regulator_dev	*rdev;
+	struct regulator_desc	rdesc;
+	void __iomem		*gdscr;
+	struct clk		**clocks;
+	struct reset_control	**reset_clocks;
+	int			clock_count;
+	int			reset_count;
+	bool			toggle_mem;
+	bool			toggle_periph;
+	bool			toggle_logic;
+	bool			resets_asserted;
+	bool			root_en;
+	bool			force_root_en;
+	int			root_clk_idx;
+	bool			no_status_check_on_disable;
+	bool			is_gdsc_enabled;
+	bool			allow_clear;
+	bool			reset_aon;
+	void __iomem		*domain_addr;
+	void __iomem		*hw_ctrl_addr;
+	void __iomem		*sw_reset_addr;
+	u32			gds_timeout;
+};
+
+enum gdscr_status {
+	ENABLED,
+	DISABLED,
+};
+
+static DEFINE_MUTEX(gdsc_seq_lock);
+
+void gdsc_allow_clear_retention(struct regulator *regulator)
+{
+	struct gdsc *sc = regulator_get_drvdata(regulator);
+
+	if (sc)
+		sc->allow_clear = true;
+}
+
+static int poll_gdsc_status(struct gdsc *sc, enum gdscr_status status)
+{
+	void __iomem *gdscr;
+	int count = sc->gds_timeout;
+	u32 val;
+
+	if (sc->hw_ctrl_addr)
+		gdscr = sc->hw_ctrl_addr;
+	else
+		gdscr = sc->gdscr;
+
+	for (; count > 0; count--) {
+		val = readl_relaxed(gdscr);
+		val &= PWR_ON_MASK;
+		switch (status) {
+		case ENABLED:
+			if (val)
+				return 0;
+			break;
+		case DISABLED:
+			if (!val)
+				return 0;
+			break;
+		}
+		/*
+		 * There is no guarantee about the delay needed for the enable
+		 * bit in the GDSCR to be set or reset after the GDSC state
+		 * changes. Hence, keep on checking for a reasonable number
+		 * of times until the bit is set with the least possible delay
+		 * between succeessive tries.
+		 */
+		udelay(1);
+	}
+	return -ETIMEDOUT;
+}
+
+static int gdsc_is_enabled(struct regulator_dev *rdev)
+{
+	struct gdsc *sc = rdev_get_drvdata(rdev);
+	uint32_t regval;
+
+	if (!sc->toggle_logic)
+		return !sc->resets_asserted;
+
+	regval = readl_relaxed(sc->gdscr);
+	if (regval & PWR_ON_MASK) {
+		/*
+		 * The GDSC might be turned on due to TZ/HYP vote on the
+		 * votable GDS registers. Check the SW_COLLAPSE_MASK to
+		 * determine if HLOS has voted for it.
+		 */
+		if (!(regval & SW_COLLAPSE_MASK))
+			return true;
+	}
+	return false;
+}
+
+static int gdsc_enable(struct regulator_dev *rdev)
+{
+	struct gdsc *sc = rdev_get_drvdata(rdev);
+	uint32_t regval, hw_ctrl_regval = 0x0;
+	int i, ret = 0;
+
+	mutex_lock(&gdsc_seq_lock);
+
+	if (sc->root_en || sc->force_root_en)
+		clk_prepare_enable(sc->clocks[sc->root_clk_idx]);
+
+	if (sc->toggle_logic) {
+		if (sc->sw_reset_addr) {
+			regval = readl_relaxed(sc->sw_reset_addr);
+			regval |= BCR_BLK_ARES_BIT;
+			writel_relaxed(regval, sc->sw_reset_addr);
+			/*
+			 * BLK_ARES should be kept asserted for 1us before
+			 * being de-asserted.
+			 */
+			wmb();
+			udelay(1);
+
+			regval &= ~BCR_BLK_ARES_BIT;
+			writel_relaxed(regval, sc->sw_reset_addr);
+
+			/* Make sure de-assert goes through before continuing */
+			wmb();
+		}
+
+		if (sc->domain_addr) {
+			if (sc->reset_aon) {
+				regval = readl_relaxed(sc->domain_addr);
+				regval |= GMEM_RESET_MASK;
+				writel_relaxed(regval, sc->domain_addr);
+				/*
+				 * Keep reset asserted for at-least 1us before
+				 * continuing.
+				 */
+				wmb();
+				udelay(1);
+
+				regval &= ~GMEM_RESET_MASK;
+				writel_relaxed(regval, sc->domain_addr);
+				/*
+				 * Make sure GMEM_RESET is de-asserted before
+				 * continuing.
+				 */
+				wmb();
+			}
+
+			regval = readl_relaxed(sc->domain_addr);
+			regval &= ~GMEM_CLAMP_IO_MASK;
+			writel_relaxed(regval, sc->domain_addr);
+			/*
+			 * Make sure CLAMP_IO is de-asserted before continuing.
+			 */
+			wmb();
+		}
+
+		regval = readl_relaxed(sc->gdscr);
+		if (regval & HW_CONTROL_MASK) {
+			dev_warn(&rdev->dev, "Invalid enable while %s is under HW control\n",
+				 sc->rdesc.name);
+			mutex_unlock(&gdsc_seq_lock);
+			return -EBUSY;
+		}
+
+		regval &= ~SW_COLLAPSE_MASK;
+		writel_relaxed(regval, sc->gdscr);
+
+		/* Wait for 8 XO cycles before polling the status bit. */
+		mb();
+		udelay(1);
+
+		ret = poll_gdsc_status(sc, ENABLED);
+		if (ret) {
+			regval = readl_relaxed(sc->gdscr);
+			if (sc->hw_ctrl_addr) {
+				hw_ctrl_regval =
+					readl_relaxed(sc->hw_ctrl_addr);
+				dev_warn(&rdev->dev, "%s state (after %d us timeout): 0x%x, GDS_HW_CTRL: 0x%x. Re-polling.\n",
+					sc->rdesc.name, sc->gds_timeout,
+					regval, hw_ctrl_regval);
+
+				ret = poll_gdsc_status(sc, ENABLED);
+				if (ret) {
+					dev_err(&rdev->dev, "%s final state (after additional %d us timeout): 0x%x, GDS_HW_CTRL: 0x%x\n",
+					sc->rdesc.name, sc->gds_timeout,
+					readl_relaxed(sc->gdscr),
+					readl_relaxed(sc->hw_ctrl_addr));
+
+					mutex_unlock(&gdsc_seq_lock);
+					return ret;
+				}
+			} else {
+				dev_err(&rdev->dev, "%s enable timed out: 0x%x\n",
+					sc->rdesc.name,
+					regval);
+				udelay(sc->gds_timeout);
+				regval = readl_relaxed(sc->gdscr);
+				dev_err(&rdev->dev, "%s final state: 0x%x (%d us after timeout)\n",
+					sc->rdesc.name, regval,
+					sc->gds_timeout);
+				mutex_unlock(&gdsc_seq_lock);
+				return ret;
+			}
+		}
+	} else {
+		for (i = 0; i < sc->reset_count; i++)
+			reset_control_deassert(sc->reset_clocks[i]);
+		sc->resets_asserted = false;
+	}
+
+	for (i = 0; i < sc->clock_count; i++) {
+		if (unlikely(i == sc->root_clk_idx))
+			continue;
+		if (sc->toggle_mem)
+			clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
+		if (sc->toggle_periph)
+			clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
+	}
+
+	/*
+	 * If clocks to this power domain were already on, they will take an
+	 * additional 4 clock cycles to re-enable after the rail is enabled.
+	 * Delay to account for this. A delay is also needed to ensure clocks
+	 * are not enabled within 400ns of enabling power to the memories.
+	 */
+	udelay(1);
+
+	/* Delay to account for staggered memory powerup. */
+	udelay(1);
+
+	if (sc->force_root_en)
+		clk_disable_unprepare(sc->clocks[sc->root_clk_idx]);
+	sc->is_gdsc_enabled = true;
+
+	mutex_unlock(&gdsc_seq_lock);
+
+	return ret;
+}
+
+static int gdsc_disable(struct regulator_dev *rdev)
+{
+	struct gdsc *sc = rdev_get_drvdata(rdev);
+	uint32_t regval;
+	int i, ret = 0;
+
+	mutex_lock(&gdsc_seq_lock);
+
+	if (sc->force_root_en)
+		clk_prepare_enable(sc->clocks[sc->root_clk_idx]);
+
+	for (i = sc->clock_count-1; i >= 0; i--) {
+		if (unlikely(i == sc->root_clk_idx))
+			continue;
+		if (sc->toggle_mem && sc->allow_clear)
+			clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);
+		if (sc->toggle_periph && sc->allow_clear)
+			clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
+	}
+
+	/* Delay to account for staggered memory powerdown. */
+	udelay(1);
+
+	if (sc->toggle_logic) {
+		regval = readl_relaxed(sc->gdscr);
+		if (regval & HW_CONTROL_MASK) {
+			dev_warn(&rdev->dev, "Invalid disable while %s is under HW control\n",
+				 sc->rdesc.name);
+			mutex_unlock(&gdsc_seq_lock);
+			return -EBUSY;
+		}
+
+		regval |= SW_COLLAPSE_MASK;
+		writel_relaxed(regval, sc->gdscr);
+		/* Wait for 8 XO cycles before polling the status bit. */
+		mb();
+		udelay(1);
+
+		if (sc->no_status_check_on_disable) {
+			/*
+			 * Add a short delay here to ensure that gdsc_enable
+			 * right after it was disabled does not put it in a
+			 * wierd state.
+			 */
+			udelay(TIMEOUT_US);
+		} else {
+			ret = poll_gdsc_status(sc, DISABLED);
+			if (ret)
+				dev_err(&rdev->dev, "%s disable timed out: 0x%x\n",
+					sc->rdesc.name, regval);
+		}
+
+		if (sc->domain_addr) {
+			regval = readl_relaxed(sc->domain_addr);
+			regval |= GMEM_CLAMP_IO_MASK;
+			writel_relaxed(regval, sc->domain_addr);
+			/* Make sure CLAMP_IO is asserted before continuing. */
+			wmb();
+		}
+	} else {
+		for (i = sc->reset_count-1; i >= 0; i--)
+			reset_control_assert(sc->reset_clocks[i]);
+		sc->resets_asserted = true;
+	}
+
+	/*
+	 * Check if gdsc_enable was called for this GDSC. If not, the root
+	 * clock will not have been enabled prior to this.
+	 */
+	if ((sc->is_gdsc_enabled && sc->root_en) || sc->force_root_en)
+		clk_disable_unprepare(sc->clocks[sc->root_clk_idx]);
+	sc->is_gdsc_enabled = false;
+
+	mutex_unlock(&gdsc_seq_lock);
+
+	return ret;
+}
+
+static unsigned int gdsc_get_mode(struct regulator_dev *rdev)
+{
+	struct gdsc *sc = rdev_get_drvdata(rdev);
+	uint32_t regval;
+
+	mutex_lock(&gdsc_seq_lock);
+	regval = readl_relaxed(sc->gdscr);
+	mutex_unlock(&gdsc_seq_lock);
+	if (regval & HW_CONTROL_MASK)
+		return REGULATOR_MODE_FAST;
+	return REGULATOR_MODE_NORMAL;
+}
+
+static int gdsc_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+	struct gdsc *sc = rdev_get_drvdata(rdev);
+	uint32_t regval;
+	int ret = 0;
+
+	mutex_lock(&gdsc_seq_lock);
+
+	regval = readl_relaxed(sc->gdscr);
+
+	/*
+	 * HW control can only be enable/disabled when SW_COLLAPSE
+	 * indicates on.
+	 */
+	if (regval & SW_COLLAPSE_MASK) {
+		dev_err(&rdev->dev, "can't enable hw collapse now\n");
+		mutex_unlock(&gdsc_seq_lock);
+		return -EBUSY;
+	}
+
+	switch (mode) {
+	case REGULATOR_MODE_FAST:
+		/* Turn on HW trigger mode */
+		regval |= HW_CONTROL_MASK;
+		writel_relaxed(regval, sc->gdscr);
+		/*
+		 * There may be a race with internal HW trigger signal,
+		 * that will result in GDSC going through a power down and
+		 * up cycle.  In case HW trigger signal is controlled by
+		 * firmware that also poll same status bits as we do, FW
+		 * might read an 'on' status before the GDSC can finish
+		 * power cycle.  We wait 1us before returning to ensure
+		 * FW can't immediately poll the status bit.
+		 */
+		mb();
+		udelay(1);
+		break;
+
+	case REGULATOR_MODE_NORMAL:
+		/* Turn off HW trigger mode */
+		regval &= ~HW_CONTROL_MASK;
+		writel_relaxed(regval, sc->gdscr);
+		/*
+		 * There may be a race with internal HW trigger signal,
+		 * that will result in GDSC going through a power down and
+		 * up cycle.  If we poll too early, status bit will
+		 * indicate 'on' before the GDSC can finish the power cycle.
+		 * Account for this case by waiting 1us before polling.
+		 */
+		mb();
+		udelay(1);
+
+		ret = poll_gdsc_status(sc, ENABLED);
+		if (ret)
+			dev_err(&rdev->dev, "%s set_mode timed out: 0x%x\n",
+				sc->rdesc.name, regval);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	mutex_unlock(&gdsc_seq_lock);
+
+	return ret;
+}
+
+static struct regulator_ops gdsc_ops = {
+	.is_enabled = gdsc_is_enabled,
+	.enable = gdsc_enable,
+	.disable = gdsc_disable,
+	.set_mode = gdsc_set_mode,
+	.get_mode = gdsc_get_mode,
+};
+
+static int gdsc_probe(struct platform_device *pdev)
+{
+	static atomic_t gdsc_count = ATOMIC_INIT(-1);
+	struct regulator_config reg_config = {};
+	struct regulator_init_data *init_data;
+	struct resource *res;
+	struct gdsc *sc;
+	uint32_t regval, clk_dis_wait_val = CLK_DIS_WAIT_VAL;
+	bool retain_mem, retain_periph, support_hw_trigger;
+	int i, ret;
+	u32 timeout;
+
+	sc = devm_kzalloc(&pdev->dev, sizeof(struct gdsc), GFP_KERNEL);
+	if (sc == NULL)
+		return -ENOMEM;
+
+	init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node,
+			&sc->rdesc);
+	if (init_data == NULL)
+		return -ENOMEM;
+
+	if (of_get_property(pdev->dev.of_node, "parent-supply", NULL))
+		init_data->supply_regulator = "parent";
+
+	ret = of_property_read_string(pdev->dev.of_node, "regulator-name",
+				      &sc->rdesc.name);
+	if (ret)
+		return ret;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL)
+		return -EINVAL;
+	sc->gdscr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+	if (sc->gdscr == NULL)
+		return -ENOMEM;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"domain_addr");
+	if (res) {
+		sc->domain_addr = devm_ioremap(&pdev->dev, res->start,
+							resource_size(res));
+		if (sc->domain_addr == NULL)
+			return -ENOMEM;
+	}
+
+	sc->reset_aon = of_property_read_bool(pdev->dev.of_node,
+						"qcom,reset-aon-logic");
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"sw_reset");
+	if (res) {
+		sc->sw_reset_addr = devm_ioremap(&pdev->dev, res->start,
+							resource_size(res));
+		if (sc->sw_reset_addr == NULL)
+			return -ENOMEM;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"hw_ctrl_addr");
+	if (res) {
+		sc->hw_ctrl_addr = devm_ioremap(&pdev->dev, res->start,
+							resource_size(res));
+		if (sc->hw_ctrl_addr == NULL)
+			return -ENOMEM;
+	}
+
+	sc->gds_timeout = TIMEOUT_US;
+	ret = of_property_read_u32(pdev->dev.of_node, "qcom,gds-timeout",
+							&timeout);
+	if (!ret)
+		sc->gds_timeout = timeout;
+
+	sc->clock_count = of_property_count_strings(pdev->dev.of_node,
+					    "clock-names");
+	if (sc->clock_count == -EINVAL) {
+		sc->clock_count = 0;
+	} else if (IS_ERR_VALUE(sc->clock_count)) {
+		dev_err(&pdev->dev, "Failed to get clock names\n");
+		return -EINVAL;
+	}
+
+	sc->clocks = devm_kzalloc(&pdev->dev,
+			sizeof(struct clk *) * sc->clock_count, GFP_KERNEL);
+	if (!sc->clocks)
+		return -ENOMEM;
+
+	sc->root_clk_idx = -1;
+
+	sc->root_en = of_property_read_bool(pdev->dev.of_node,
+						"qcom,enable-root-clk");
+	sc->force_root_en = of_property_read_bool(pdev->dev.of_node,
+						"qcom,force-enable-root-clk");
+	for (i = 0; i < sc->clock_count; i++) {
+		const char *clock_name;
+		of_property_read_string_index(pdev->dev.of_node, "clock-names",
+					      i, &clock_name);
+		sc->clocks[i] = devm_clk_get(&pdev->dev, clock_name);
+		if (IS_ERR(sc->clocks[i])) {
+			int rc = PTR_ERR(sc->clocks[i]);
+			if (rc != -EPROBE_DEFER)
+				dev_err(&pdev->dev, "Failed to get %s\n",
+					clock_name);
+			return rc;
+		}
+
+		if (!strcmp(clock_name, "core_root_clk"))
+			sc->root_clk_idx = i;
+	}
+
+	if ((sc->root_en || sc->force_root_en) && (sc->root_clk_idx == -1)) {
+		dev_err(&pdev->dev, "Failed to get root clock name\n");
+		return -EINVAL;
+	}
+
+	sc->rdesc.id = atomic_inc_return(&gdsc_count);
+	sc->rdesc.ops = &gdsc_ops;
+	sc->rdesc.type = REGULATOR_VOLTAGE;
+	sc->rdesc.owner = THIS_MODULE;
+	platform_set_drvdata(pdev, sc);
+
+	/*
+	 * Disable HW trigger: collapse/restore occur based on registers writes.
+	 * Disable SW override: Use hardware state-machine for sequencing.
+	 */
+	regval = readl_relaxed(sc->gdscr);
+	regval &= ~(HW_CONTROL_MASK | SW_OVERRIDE_MASK);
+
+	if (!of_property_read_u32(pdev->dev.of_node, "qcom,clk-dis-wait-val",
+				  &clk_dis_wait_val))
+		clk_dis_wait_val = clk_dis_wait_val << 12;
+
+	/* Configure wait time between states. */
+	regval &= ~(EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK);
+	regval |= EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | clk_dis_wait_val;
+	writel_relaxed(regval, sc->gdscr);
+
+	sc->no_status_check_on_disable =
+			of_property_read_bool(pdev->dev.of_node,
+					"qcom,no-status-check-on-disable");
+	retain_mem = of_property_read_bool(pdev->dev.of_node,
+					    "qcom,retain-mem");
+	sc->toggle_mem = !retain_mem;
+	retain_periph = of_property_read_bool(pdev->dev.of_node,
+					    "qcom,retain-periph");
+	sc->toggle_periph = !retain_periph;
+	sc->toggle_logic = !of_property_read_bool(pdev->dev.of_node,
+						"qcom,skip-logic-collapse");
+	support_hw_trigger = of_property_read_bool(pdev->dev.of_node,
+						    "qcom,support-hw-trigger");
+	if (support_hw_trigger) {
+		init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_MODE;
+		init_data->constraints.valid_modes_mask |=
+				REGULATOR_MODE_NORMAL | REGULATOR_MODE_FAST;
+	}
+
+	if (!sc->toggle_logic) {
+		sc->reset_count = of_property_count_strings(pdev->dev.of_node,
+							"reset-names");
+		if (sc->reset_count == -EINVAL) {
+			sc->reset_count = 0;
+		} else if (IS_ERR_VALUE(sc->reset_count)) {
+			dev_err(&pdev->dev, "Failed to get reset reset names\n");
+			return -EINVAL;
+		}
+
+		sc->reset_clocks = devm_kzalloc(&pdev->dev,
+					sizeof(struct reset_control *) *
+					sc->reset_count,
+					GFP_KERNEL);
+		if (!sc->reset_clocks)
+			return -ENOMEM;
+
+		for (i = 0; i < sc->reset_count; i++) {
+			const char *reset_name;
+
+			of_property_read_string_index(pdev->dev.of_node,
+					"reset-names", i, &reset_name);
+			sc->reset_clocks[i] = devm_reset_control_get(&pdev->dev,
+								reset_name);
+			if (IS_ERR(sc->reset_clocks[i])) {
+				int rc = PTR_ERR(sc->reset_clocks[i]);
+
+				if (rc != -EPROBE_DEFER)
+					dev_err(&pdev->dev, "Failed to get %s\n",
+							reset_name);
+				return rc;
+			}
+		}
+
+		regval &= ~SW_COLLAPSE_MASK;
+		writel_relaxed(regval, sc->gdscr);
+
+		ret = poll_gdsc_status(sc, ENABLED);
+		if (ret) {
+			dev_err(&pdev->dev, "%s enable timed out: 0x%x\n",
+				sc->rdesc.name, regval);
+			return ret;
+		}
+	}
+
+	sc->allow_clear = of_property_read_bool(pdev->dev.of_node,
+							"qcom,disallow-clear");
+	sc->allow_clear = !sc->allow_clear;
+
+	for (i = 0; i < sc->clock_count; i++) {
+		if (retain_mem || (regval & PWR_ON_MASK) || !sc->allow_clear)
+			clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
+		else
+			clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);
+
+		if (retain_periph || (regval & PWR_ON_MASK) || !sc->allow_clear)
+			clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
+		else
+			clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
+	}
+
+	reg_config.dev = &pdev->dev;
+	reg_config.init_data = init_data;
+	reg_config.driver_data = sc;
+	reg_config.of_node = pdev->dev.of_node;
+	sc->rdev = regulator_register(&sc->rdesc, &reg_config);
+	if (IS_ERR(sc->rdev)) {
+		dev_err(&pdev->dev, "regulator_register(\"%s\") failed.\n",
+			sc->rdesc.name);
+		return PTR_ERR(sc->rdev);
+	}
+
+	return 0;
+}
+
+static int gdsc_remove(struct platform_device *pdev)
+{
+	struct gdsc *sc = platform_get_drvdata(pdev);
+	regulator_unregister(sc->rdev);
+	return 0;
+}
+
+static struct of_device_id gdsc_match_table[] = {
+	{ .compatible = "qcom,gdsc" },
+	{}
+};
+
+static struct platform_driver gdsc_driver = {
+	.probe		= gdsc_probe,
+	.remove		= gdsc_remove,
+	.driver		= {
+		.name		= "gdsc",
+		.of_match_table = gdsc_match_table,
+		.owner		= THIS_MODULE,
+	},
+};
+
+static int __init gdsc_init(void)
+{
+	return platform_driver_register(&gdsc_driver);
+}
+subsys_initcall(gdsc_init);
+
+static void __exit gdsc_exit(void)
+{
+	platform_driver_unregister(&gdsc_driver);
+}
+module_exit(gdsc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM8974 GDSC power rail regulator driver");
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./Kconfig linux-4.4.115-fbx/drivers/clk/msm/Kconfig
--- linux-4.4.115-fbx/drivers/clk/msm./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/Kconfig	2019-01-22 16:16:23.015241987 +0100
@@ -0,0 +1,30 @@
+config COMMON_CLK_MSM
+	tristate "Support for MSM clock controllers"
+	depends on OF
+	depends on ARCH_QCOM
+	select RATIONAL
+	help
+	  This support clock controller used by MSM devices which support
+	  global, mmss and gpu clock controller.
+	  Say Y if you want to support the clocks exposed by the MSM on
+	  platforms such as msm8996, msm8998 etc.
+
+config MSM_CLK_CONTROLLER_V2
+	bool "QTI clock driver"
+	depends on COMMON_CLK_MSM
+	---help---
+	   Generate clock data structures from definitions found in
+	   device tree.
+
+config MSM_VIRTCLK_FRONTEND
+	bool
+
+config MSM_VIRTCLK_FRONTEND_8996
+	tristate "QTI msm8996 virtual clock frontend driver"
+	depends on COMMON_CLK_MSM && MSM_HAB
+	select MSM_VIRTCLK_FRONTEND
+	---help---
+	   This is the virtual clock frontend driver for the QTI msm8996
+	   virtual platform.
+
+source "drivers/clk/msm/mdss/Kconfig"
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./Makefile linux-4.4.115-fbx/drivers/clk/msm/Makefile
--- linux-4.4.115-fbx/drivers/clk/msm./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/Makefile	2019-01-22 16:16:23.015241987 +0100
@@ -0,0 +1,35 @@
+obj-$(CONFIG_COMMON_CLK_MSM)	+= clock.o
+obj-$(CONFIG_COMMON_CLK_MSM)	+= clock-dummy.o
+obj-$(CONFIG_COMMON_CLK_MSM)	+= clock-generic.o
+obj-$(CONFIG_COMMON_CLK_MSM)	+= clock-local2.o
+obj-$(CONFIG_COMMON_CLK_MSM)	+= clock-pll.o
+obj-$(CONFIG_COMMON_CLK_MSM)	+= clock-alpha-pll.o
+obj-$(CONFIG_COMMON_CLK_MSM)	+= clock-rpm.o
+obj-$(CONFIG_COMMON_CLK_MSM)	+= clock-voter.o
+obj-$(CONFIG_COMMON_CLK_MSM)	+= reset.o
+
+obj-$(CONFIG_MSM_CLK_CONTROLLER_V2)	+= msm-clock-controller.o
+
+obj-$(CONFIG_COMMON_CLK_MSM)	+= clock-debug.o
+
+# MSM 8996
+ifeq ($(CONFIG_COMMON_CLK_MSM), y)
+	obj-$(CONFIG_ARCH_MSM8996) += clock-gcc-8996.o
+	obj-$(CONFIG_ARCH_MSM8996) += clock-mmss-8996.o
+	obj-$(CONFIG_ARCH_MSM8996) += clock-cpu-8996.o
+endif
+
+# MSM 8998
+ifeq ($(CONFIG_COMMON_CLK_MSM), y)
+	obj-$(CONFIG_ARCH_MSM8998)	+= clock-gcc-8998.o
+	obj-$(CONFIG_ARCH_MSM8998)	+= clock-gpu-8998.o
+	obj-$(CONFIG_ARCH_MSM8998)	+= clock-mmss-8998.o
+	obj-$(CONFIG_ARCH_MSM8998)	+= clock-osm.o
+endif
+
+obj-$(CONFIG_COMMON_CLK_MSM)	+= gdsc.o
+obj-$(CONFIG_COMMON_CLK_MSM)	+= mdss/
+
+obj-$(CONFIG_MSM_VIRTCLK_FRONTEND)	+= virtclk-front.o
+obj-$(CONFIG_MSM_VIRTCLK_FRONTEND)	+= virt-reset-front.o
+obj-$(CONFIG_MSM_VIRTCLK_FRONTEND_8996)	+= virtclk-front-8996.o
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./mdss/Kconfig linux-4.4.115-fbx/drivers/clk/msm/mdss/Kconfig
--- linux-4.4.115-fbx/drivers/clk/msm./mdss/Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/mdss/Kconfig	2019-01-22 16:16:23.019242024 +0100
@@ -0,0 +1,7 @@
+config MSM_MDSS_PLL
+	bool "MDSS pll programming"
+	depends on COMMON_CLK_MSM
+	---help---
+	It provides support for DSI, eDP and HDMI interface pll programming on MDSS
+	hardware. It also handles the pll specific resources and turn them on/off when
+	mdss pll client tries to enable/disable pll clocks.
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./mdss/Makefile linux-4.4.115-fbx/drivers/clk/msm/mdss/Makefile
--- linux-4.4.115-fbx/drivers/clk/msm./mdss/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/mdss/Makefile	2019-01-22 16:16:23.019242024 +0100
@@ -0,0 +1,9 @@
+obj-$(CONFIG_MSM_MDSS_PLL) += mdss-pll-util.o
+obj-$(CONFIG_MSM_MDSS_PLL) += mdss-pll.o
+obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dsi-pll-8996.o
+obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dsi-pll-8996-util.o
+obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dsi-pll-8998.o
+obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dp-pll-8998.o
+obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dp-pll-8998-util.o
+obj-$(CONFIG_MSM_MDSS_PLL) += mdss-hdmi-pll-8996.o
+obj-$(CONFIG_MSM_MDSS_PLL) += mdss-hdmi-pll-8998.o
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./mdss/mdss-dp-pll-8998.c linux-4.4.115-fbx/drivers/clk/msm/mdss/mdss-dp-pll-8998.c
--- linux-4.4.115-fbx/drivers/clk/msm./mdss/mdss-dp-pll-8998.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/mdss/mdss-dp-pll-8998.c	2019-01-22 16:16:23.019242024 +0100
@@ -0,0 +1,224 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+***************************************************************************
+******** Display Port PLL driver block diagram for branch clocks **********
+***************************************************************************
+
+			+--------------------------+
+			|       DP_VCO_CLK         |
+			|			   |
+			|  +-------------------+   |
+			|  |   (DP PLL/VCO)    |   |
+			|  +---------+---------+   |
+			|	     v		   |
+			| +----------+-----------+ |
+			| | hsclk_divsel_clk_src | |
+			| +----------+-----------+ |
+			+--------------------------+
+				     |
+				     v
+	   +------------<------------|------------>-------------+
+	   |                         |                          |
++----------v----------+	  +----------v----------+    +----------v----------+
+|   dp_link_2x_clk    |	  | vco_divided_clk_src	|    | vco_divided_clk_src |
+|     divsel_five     |	  |			|    |			   |
+v----------+----------v	  |	divsel_two	|    |	   divsel_four	   |
+	   |		  +----------+----------+    +----------+----------+
+	   |                         |                          |
+	   v			     v				v
+				     |	+---------------------+	|
+  Input to MMSSCC block		     |	|    (aux_clk_ops)    |	|
+  for link clk, crypto clk	     +-->   vco_divided_clk   <-+
+  and interface clock			|	_src_mux      |
+					+----------+----------+
+						   |
+						   v
+					 Input to MMSSCC block
+					 for DP pixel clock
+
+******************************************************************************
+*/
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <dt-bindings/clock/msm-clocks-8998.h>
+
+#include "mdss-pll.h"
+#include "mdss-dp-pll.h"
+#include "mdss-dp-pll-8998.h"
+
+static struct clk_ops clk_ops_vco_divided_clk_src_c;
+static struct clk_ops clk_ops_link_2x_clk_div_c;
+static struct clk_ops clk_ops_gen_mux_dp;
+
+static struct clk_div_ops link2xclk_divsel_ops = {
+	.set_div = link2xclk_divsel_set_div,
+	.get_div = link2xclk_divsel_get_div,
+};
+
+static struct clk_div_ops vco_divided_clk_ops = {
+	.set_div = vco_divided_clk_set_div,
+	.get_div = vco_divided_clk_get_div,
+};
+
+static struct clk_ops dp_8998_vco_clk_ops = {
+	.set_rate = dp_vco_set_rate,
+	.round_rate = dp_vco_round_rate,
+	.prepare = dp_vco_prepare,
+	.unprepare = dp_vco_unprepare,
+	.handoff = dp_vco_handoff,
+};
+
+static struct clk_mux_ops mdss_mux_ops = {
+	.set_mux_sel = mdss_set_mux_sel,
+	.get_mux_sel = mdss_get_mux_sel,
+};
+
+static struct dp_pll_vco_clk dp_vco_clk = {
+	.min_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000,
+	.max_rate = DP_VCO_HSCLK_RATE_5400MHZDIV1000,
+	.c = {
+		.dbg_name = "dp_vco_clk",
+		.ops = &dp_8998_vco_clk_ops,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dp_vco_clk.c),
+	},
+};
+
+static struct div_clk dp_link_2x_clk_divsel_five = {
+	.data = {
+		.div = 5,
+		.min_div = 5,
+		.max_div = 5,
+	},
+	.ops = &link2xclk_divsel_ops,
+	.c = {
+		.parent = &dp_vco_clk.c,
+		.dbg_name = "dp_link_2x_clk_divsel_five",
+		.ops = &clk_ops_link_2x_clk_div_c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dp_link_2x_clk_divsel_five.c),
+	},
+};
+
+static struct div_clk vco_divsel_four_clk_src = {
+	.data = {
+		.div = 4,
+		.min_div = 4,
+		.max_div = 4,
+	},
+	.ops = &vco_divided_clk_ops,
+	.c = {
+		.parent = &dp_vco_clk.c,
+		.dbg_name = "vco_divsel_four_clk_src",
+		.ops = &clk_ops_vco_divided_clk_src_c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(vco_divsel_four_clk_src.c),
+	},
+};
+
+static struct div_clk vco_divsel_two_clk_src = {
+	.data = {
+		.div = 2,
+		.min_div = 2,
+		.max_div = 2,
+	},
+	.ops = &vco_divided_clk_ops,
+	.c = {
+		.parent = &dp_vco_clk.c,
+		.dbg_name = "vco_divsel_two_clk_src",
+		.ops = &clk_ops_vco_divided_clk_src_c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(vco_divsel_two_clk_src.c),
+	},
+};
+
+static struct mux_clk vco_divided_clk_src_mux = {
+	.num_parents = 2,
+	.parents = (struct clk_src[]) {
+		{&vco_divsel_two_clk_src.c, 0},
+		{&vco_divsel_four_clk_src.c, 1},
+	},
+	.ops = &mdss_mux_ops,
+	.c = {
+		.parent = &vco_divsel_two_clk_src.c,
+		.dbg_name = "vco_divided_clk_src_mux",
+		.ops = &clk_ops_gen_mux_dp,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(vco_divided_clk_src_mux.c),
+	}
+};
+
+static struct clk_lookup dp_pllcc_8998[] = {
+	CLK_LIST(dp_vco_clk),
+	CLK_LIST(dp_link_2x_clk_divsel_five),
+	CLK_LIST(vco_divsel_four_clk_src),
+	CLK_LIST(vco_divsel_two_clk_src),
+	CLK_LIST(vco_divided_clk_src_mux),
+};
+
+int dp_pll_clock_register_8998(struct platform_device *pdev,
+				 struct mdss_pll_resources *pll_res)
+{
+	int rc = -ENOTSUPP;
+
+	if (!pll_res || !pll_res->pll_base || !pll_res->phy_base) {
+		DEV_ERR("%s: Invalid input parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Set client data for vco, mux and div clocks */
+	dp_vco_clk.priv = pll_res;
+	vco_divided_clk_src_mux.priv = pll_res;
+	vco_divsel_two_clk_src.priv = pll_res;
+	vco_divsel_four_clk_src.priv = pll_res;
+	dp_link_2x_clk_divsel_five.priv = pll_res;
+
+	clk_ops_link_2x_clk_div_c = clk_ops_div;
+	clk_ops_link_2x_clk_div_c.prepare = mdss_pll_div_prepare;
+
+	/*
+	 * Set the ops for the divider in the pixel clock tree to the
+	 * slave_div to ensure that a set rate on this divider clock will not
+	 * be propagated to it's parent. This is needed ensure that when we set
+	 * the rate for pixel clock, the vco is not reconfigured
+	 */
+	clk_ops_vco_divided_clk_src_c = clk_ops_slave_div;
+	clk_ops_vco_divided_clk_src_c.prepare = mdss_pll_div_prepare;
+	clk_ops_vco_divided_clk_src_c.handoff = vco_divided_clk_handoff;
+
+	clk_ops_gen_mux_dp = clk_ops_gen_mux;
+	clk_ops_gen_mux_dp.get_rate = parent_get_rate;
+
+	/* We can select different clock ops for future versions */
+	dp_vco_clk.c.ops = &dp_8998_vco_clk_ops;
+
+	rc = of_msm_clock_register(pdev->dev.of_node, dp_pllcc_8998,
+					ARRAY_SIZE(dp_pllcc_8998));
+	if (rc) {
+		DEV_ERR("%s: Clock register failed rc=%d\n", __func__, rc);
+		rc = -EPROBE_DEFER;
+	} else {
+		DEV_DBG("%s SUCCESS\n", __func__);
+	}
+
+	return rc;
+}
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./mdss/mdss-dp-pll-8998.h linux-4.4.115-fbx/drivers/clk/msm/mdss/mdss-dp-pll-8998.h
--- linux-4.4.115-fbx/drivers/clk/msm./mdss/mdss-dp-pll-8998.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/mdss/mdss-dp-pll-8998.h	2019-01-22 16:16:23.019242024 +0100
@@ -0,0 +1,180 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MDSS_DP_PLL_8998_H
+#define __MDSS_DP_PLL_8998_H
+
+#define DP_PHY_REVISION_ID0			0x0000
+#define DP_PHY_REVISION_ID1			0x0004
+#define DP_PHY_REVISION_ID2			0x0008
+#define DP_PHY_REVISION_ID3			0x000C
+
+#define DP_PHY_CFG				0x0010
+#define DP_PHY_PD_CTL				0x0014
+#define DP_PHY_MODE				0x0018
+
+#define DP_PHY_AUX_CFG0				0x001C
+#define DP_PHY_AUX_CFG1				0x0020
+#define DP_PHY_AUX_CFG2				0x0024
+#define DP_PHY_AUX_CFG3				0x0028
+#define DP_PHY_AUX_CFG4				0x002C
+#define DP_PHY_AUX_CFG5				0x0030
+#define DP_PHY_AUX_CFG6				0x0034
+#define DP_PHY_AUX_CFG7				0x0038
+#define DP_PHY_AUX_CFG8				0x003C
+#define DP_PHY_AUX_CFG9				0x0040
+#define DP_PHY_AUX_INTERRUPT_MASK		0x0044
+#define DP_PHY_AUX_INTERRUPT_CLEAR		0x0048
+#define DP_PHY_AUX_BIST_CFG			0x004C
+
+#define DP_PHY_VCO_DIV				0x0064
+#define DP_PHY_TX0_TX1_LANE_CTL			0x0068
+
+#define DP_PHY_TX2_TX3_LANE_CTL			0x0084
+#define DP_PHY_SPARE0				0x00A8
+#define DP_PHY_STATUS				0x00BC
+
+/* Tx registers */
+#define QSERDES_TX0_OFFSET			0x0400
+#define QSERDES_TX1_OFFSET			0x0800
+
+#define TXn_BIST_MODE_LANENO			0x0000
+#define TXn_CLKBUF_ENABLE			0x0008
+#define TXn_TX_EMP_POST1_LVL			0x000C
+
+#define TXn_TX_DRV_LVL				0x001C
+
+#define TXn_RESET_TSYNC_EN			0x0024
+#define TXn_PRE_STALL_LDO_BOOST_EN		0x0028
+#define TXn_TX_BAND				0x002C
+#define TXn_SLEW_CNTL				0x0030
+#define TXn_INTERFACE_SELECT			0x0034
+
+#define TXn_RES_CODE_LANE_TX			0x003C
+#define TXn_RES_CODE_LANE_RX			0x0040
+#define TXn_RES_CODE_LANE_OFFSET_TX		0x0044
+#define TXn_RES_CODE_LANE_OFFSET_RX		0x0048
+
+#define TXn_DEBUG_BUS_SEL			0x0058
+#define TXn_TRANSCEIVER_BIAS_EN			0x005C
+#define TXn_HIGHZ_DRVR_EN			0x0060
+#define TXn_TX_POL_INV				0x0064
+#define TXn_PARRATE_REC_DETECT_IDLE_EN		0x0068
+
+#define TXn_LANE_MODE_1				0x008C
+
+#define TXn_TRAN_DRVR_EMP_EN			0x00C0
+#define TXn_TX_INTERFACE_MODE			0x00C4
+
+#define TXn_VMODE_CTRL1				0x00F0
+
+
+/* PLL register offset */
+#define QSERDES_COM_ATB_SEL1			0x0000
+#define QSERDES_COM_ATB_SEL2			0x0004
+#define QSERDES_COM_FREQ_UPDATE			0x0008
+#define QSERDES_COM_BG_TIMER			0x000C
+#define QSERDES_COM_SSC_EN_CENTER		0x0010
+#define QSERDES_COM_SSC_ADJ_PER1		0x0014
+#define QSERDES_COM_SSC_ADJ_PER2		0x0018
+#define QSERDES_COM_SSC_PER1			0x001C
+#define QSERDES_COM_SSC_PER2			0x0020
+#define QSERDES_COM_SSC_STEP_SIZE1		0x0024
+#define QSERDES_COM_SSC_STEP_SIZE2		0x0028
+#define QSERDES_COM_POST_DIV			0x002C
+#define QSERDES_COM_POST_DIV_MUX		0x0030
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		0x0034
+#define QSERDES_COM_CLK_ENABLE1			0x0038
+#define QSERDES_COM_SYS_CLK_CTRL		0x003C
+#define QSERDES_COM_SYSCLK_BUF_ENABLE		0x0040
+#define QSERDES_COM_PLL_EN			0x0044
+#define QSERDES_COM_PLL_IVCO			0x0048
+#define QSERDES_COM_CMN_IETRIM			0x004C
+#define QSERDES_COM_CMN_IPTRIM			0x0050
+
+#define QSERDES_COM_CP_CTRL_MODE0		0x0060
+#define QSERDES_COM_CP_CTRL_MODE1		0x0064
+#define QSERDES_COM_PLL_RCTRL_MODE0		0x0068
+#define QSERDES_COM_PLL_RCTRL_MODE1		0x006C
+#define QSERDES_COM_PLL_CCTRL_MODE0		0x0070
+#define QSERDES_COM_PLL_CCTRL_MODE1		0x0074
+#define QSERDES_COM_PLL_CNTRL			0x0078
+#define QSERDES_COM_BIAS_EN_CTRL_BY_PSM		0x007C
+#define QSERDES_COM_SYSCLK_EN_SEL		0x0080
+#define QSERDES_COM_CML_SYSCLK_SEL		0x0084
+#define QSERDES_COM_RESETSM_CNTRL		0x0088
+#define QSERDES_COM_RESETSM_CNTRL2		0x008C
+#define QSERDES_COM_LOCK_CMP_EN			0x0090
+#define QSERDES_COM_LOCK_CMP_CFG		0x0094
+#define QSERDES_COM_LOCK_CMP1_MODE0		0x0098
+#define QSERDES_COM_LOCK_CMP2_MODE0		0x009C
+#define QSERDES_COM_LOCK_CMP3_MODE0		0x00A0
+
+#define QSERDES_COM_DEC_START_MODE0		0x00B0
+#define QSERDES_COM_DEC_START_MODE1		0x00B4
+#define QSERDES_COM_DIV_FRAC_START1_MODE0	0x00B8
+#define QSERDES_COM_DIV_FRAC_START2_MODE0	0x00BC
+#define QSERDES_COM_DIV_FRAC_START3_MODE0	0x00C0
+#define QSERDES_COM_DIV_FRAC_START1_MODE1	0x00C4
+#define QSERDES_COM_DIV_FRAC_START2_MODE1	0x00C8
+#define QSERDES_COM_DIV_FRAC_START3_MODE1	0x00CC
+#define QSERDES_COM_INTEGLOOP_INITVAL		0x00D0
+#define QSERDES_COM_INTEGLOOP_EN		0x00D4
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0	0x00D8
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0	0x00DC
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1	0x00E0
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1	0x00E4
+#define QSERDES_COM_VCOCAL_DEADMAN_CTRL		0x00E8
+#define QSERDES_COM_VCO_TUNE_CTRL		0x00EC
+#define QSERDES_COM_VCO_TUNE_MAP		0x00F0
+
+#define QSERDES_COM_CMN_STATUS			0x0124
+#define QSERDES_COM_RESET_SM_STATUS		0x0128
+
+#define QSERDES_COM_CLK_SEL			0x0138
+#define QSERDES_COM_HSCLK_SEL			0x013C
+
+#define QSERDES_COM_CORECLK_DIV_MODE0		0x0148
+
+#define QSERDES_COM_SW_RESET			0x0150
+#define QSERDES_COM_CORE_CLK_EN			0x0154
+#define QSERDES_COM_C_READY_STATUS		0x0158
+#define QSERDES_COM_CMN_CONFIG			0x015C
+
+#define QSERDES_COM_SVS_MODE_CLK_SEL		0x0164
+
+#define DP_PLL_POLL_SLEEP_US			500
+#define DP_PLL_POLL_TIMEOUT_US			10000
+
+#define DP_VCO_RATE_8100MHZDIV1000		8100000UL
+#define DP_VCO_RATE_10800MHZDIV1000		10800000UL
+
+#define DP_VCO_HSCLK_RATE_1620MHZDIV1000	1620000UL
+#define DP_VCO_HSCLK_RATE_2700MHZDIV1000	2700000UL
+#define DP_VCO_HSCLK_RATE_5400MHZDIV1000	5400000UL
+
+int dp_vco_set_rate(struct clk *c, unsigned long rate);
+unsigned long dp_vco_get_rate(struct clk *c);
+long dp_vco_round_rate(struct clk *c, unsigned long rate);
+enum handoff dp_vco_handoff(struct clk *c);
+enum handoff vco_divided_clk_handoff(struct clk *c);
+int dp_vco_prepare(struct clk *c);
+void dp_vco_unprepare(struct clk *c);
+int hsclk_divsel_set_div(struct div_clk *clk, int div);
+int hsclk_divsel_get_div(struct div_clk *clk);
+int link2xclk_divsel_set_div(struct div_clk *clk, int div);
+int link2xclk_divsel_get_div(struct div_clk *clk);
+int vco_divided_clk_set_div(struct div_clk *clk, int div);
+int vco_divided_clk_get_div(struct div_clk *clk);
+
+#endif /* __MDSS_DP_PLL_8998_H */
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./mdss/mdss-dp-pll-8998-util.c linux-4.4.115-fbx/drivers/clk/msm/mdss/mdss-dp-pll-8998-util.c
--- linux-4.4.115-fbx/drivers/clk/msm./mdss/mdss-dp-pll-8998-util.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/mdss/mdss-dp-pll-8998-util.c	2019-01-22 16:16:23.019242024 +0100
@@ -0,0 +1,846 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <linux/usb/usbpd.h>
+
+#include "mdss-pll.h"
+#include "mdss-dp-pll.h"
+#include "mdss-dp-pll-8998.h"
+
+int link2xclk_divsel_set_div(struct div_clk *clk, int div)
+{
+	int rc;
+	u32 link2xclk_div_tx0, link2xclk_div_tx1;
+	u32 phy_mode;
+	u8 orientation;
+	u32 spare_value;
+	struct mdss_pll_resources *dp_res = clk->priv;
+
+	rc = mdss_pll_resource_enable(dp_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss DP PLL resources\n");
+		return rc;
+	}
+
+	spare_value = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_SPARE0);
+	orientation = (spare_value & 0xF0) >> 4;
+	pr_debug("spare_value=0x%x, orientation=0x%x\n", spare_value,
+			orientation);
+
+	link2xclk_div_tx0 = MDSS_PLL_REG_R(dp_res->phy_base,
+				QSERDES_TX0_OFFSET + TXn_TX_BAND);
+	link2xclk_div_tx1 = MDSS_PLL_REG_R(dp_res->phy_base,
+				QSERDES_TX1_OFFSET + TXn_TX_BAND);
+
+	link2xclk_div_tx0 &= ~0x07;	/* bits 0 to 2 */
+	link2xclk_div_tx1 &= ~0x07;	/* bits 0 to 2 */
+
+	/* Configure TX band Mux */
+	link2xclk_div_tx0 |= 0x4;
+	link2xclk_div_tx1 |= 0x4;
+
+	/* Configure DP PHY MODE depending on the plug orientation */
+	if (orientation == ORIENTATION_CC2)
+		phy_mode = 0x48;
+	else
+		phy_mode = 0x58;
+
+
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX0_OFFSET + TXn_TX_BAND,
+			link2xclk_div_tx0);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX1_OFFSET + TXn_TX_BAND,
+			link2xclk_div_tx1);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			DP_PHY_MODE, phy_mode);
+	/* Make sure the PHY register writes are done */
+	wmb();
+
+	pr_debug("%s: div=%d link2xclk_div_tx0=%x, link2xclk_div_tx1=%x\n",
+			__func__, div, link2xclk_div_tx0, link2xclk_div_tx1);
+
+	mdss_pll_resource_enable(dp_res, false);
+
+	return rc;
+}
+
+int link2xclk_divsel_get_div(struct div_clk *clk)
+{
+	int rc;
+	u32 div = 0, phy_mode;
+	struct mdss_pll_resources *dp_res = clk->priv;
+
+	rc = mdss_pll_resource_enable(dp_res, true);
+	if (rc) {
+		pr_err("Failed to enable dp_res resources\n");
+		return rc;
+	}
+
+	phy_mode = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_MODE);
+
+	if (phy_mode & 0x48)
+		pr_err("%s: DP PAR Rate not correct\n", __func__);
+
+	if ((phy_mode & 0x3) == 1)
+		div = 10;
+	else if ((phy_mode & 0x3) == 0)
+		div = 5;
+	else
+		pr_err("%s: unsupported div: %d\n", __func__, phy_mode);
+
+	mdss_pll_resource_enable(dp_res, false);
+	pr_debug("%s: phy_mode=%d, div=%d\n", __func__,
+						phy_mode, div);
+
+	return div;
+}
+
+int vco_divided_clk_set_div(struct div_clk *clk, int div)
+{
+	int rc;
+	u32 auxclk_div;
+	struct mdss_pll_resources *dp_res = clk->priv;
+
+	rc = mdss_pll_resource_enable(dp_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss DP PLL resources\n");
+		return rc;
+	}
+
+	auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
+	auxclk_div &= ~0x03;	/* bits 0 to 1 */
+
+	if (div == 4)
+		auxclk_div |= 2;
+	else
+		auxclk_div |= 1; /* Default divider */
+
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			DP_PHY_VCO_DIV, auxclk_div);
+	/* Make sure the PHY registers writes are done */
+	wmb();
+	pr_debug("%s: div=%d auxclk_div=%x\n", __func__, div, auxclk_div);
+
+	mdss_pll_resource_enable(dp_res, false);
+
+	return rc;
+}
+
+
+enum handoff vco_divided_clk_handoff(struct clk *c)
+{
+	/*
+	 * Since cont-splash is not enabled, disable handoff
+	 * for vco_divider_clk.
+	*/
+	return HANDOFF_DISABLED_CLK;
+}
+
+int vco_divided_clk_get_div(struct div_clk *clk)
+{
+	int rc;
+	u32 div, auxclk_div;
+	struct mdss_pll_resources *dp_res = clk->priv;
+
+	rc = mdss_pll_resource_enable(dp_res, true);
+	if (rc) {
+		pr_err("Failed to enable dp_res resources\n");
+		return rc;
+	}
+
+	auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
+	auxclk_div &= 0x03;
+
+	div = 2; /* Default divider */
+	if (auxclk_div == 2)
+		div = 4;
+
+	mdss_pll_resource_enable(dp_res, false);
+
+	pr_debug("%s: auxclk_div=%d, div=%d\n", __func__, auxclk_div, div);
+
+	return div;
+}
+
+int dp_config_vco_rate(struct dp_pll_vco_clk *vco, unsigned long rate)
+{
+	u32 res = 0;
+	struct mdss_pll_resources *dp_res = vco->priv;
+	u8 orientation, ln_cnt;
+	u32 spare_value;
+
+	spare_value = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_SPARE0);
+	ln_cnt = spare_value & 0x0F;
+	orientation = (spare_value & 0xF0) >> 4;
+	pr_debug("%s: spare_value=0x%x, ln_cnt=0x%x, orientation=0x%x\n",
+				__func__, spare_value, ln_cnt, orientation);
+
+	if (ln_cnt != 4) {
+		if (orientation == ORIENTATION_CC2)
+			MDSS_PLL_REG_W(dp_res->phy_base,
+				DP_PHY_PD_CTL, 0x2d);
+		else
+			MDSS_PLL_REG_W(dp_res->phy_base,
+				DP_PHY_PD_CTL, 0x35);
+	} else {
+		MDSS_PLL_REG_W(dp_res->phy_base,
+				DP_PHY_PD_CTL, 0x3d);
+	}
+
+	/* Make sure the PHY register writes are done */
+	wmb();
+	MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_SYSCLK_EN_SEL, 0x37);
+
+	MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_CLK_ENABLE1, 0x0e);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_SYSCLK_BUF_ENABLE, 0x06);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_CLK_SEL, 0x30);
+
+	/* Different for each clock rates */
+	if (rate == DP_VCO_HSCLK_RATE_1620MHZDIV1000) {
+		pr_debug("%s: VCO rate: %ld\n", __func__,
+				DP_VCO_RATE_8100MHZDIV1000);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_SYS_CLK_CTRL, 0x02);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_HSCLK_SEL, 0x2c);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_LOCK_CMP_EN, 0x04);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_DEC_START_MODE0, 0x69);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_DIV_FRAC_START2_MODE0, 0x80);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_DIV_FRAC_START3_MODE0, 0x07);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_CMN_CONFIG, 0x42);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_LOCK_CMP1_MODE0, 0xbf);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_LOCK_CMP2_MODE0, 0x21);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
+	} else if (rate == DP_VCO_HSCLK_RATE_2700MHZDIV1000) {
+		pr_debug("%s: VCO rate: %ld\n", __func__,
+				DP_VCO_RATE_8100MHZDIV1000);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_SYS_CLK_CTRL, 0x06);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_HSCLK_SEL, 0x84);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_LOCK_CMP_EN, 0x08);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_DEC_START_MODE0, 0x69);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_DIV_FRAC_START2_MODE0, 0x80);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_DIV_FRAC_START3_MODE0, 0x07);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_CMN_CONFIG, 0x02);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_LOCK_CMP1_MODE0, 0x3f);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_LOCK_CMP2_MODE0, 0x38);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
+	} else if (rate == DP_VCO_HSCLK_RATE_5400MHZDIV1000) {
+		pr_debug("%s: VCO rate: %ld\n", __func__,
+				DP_VCO_RATE_10800MHZDIV1000);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_SYS_CLK_CTRL, 0x06);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_HSCLK_SEL, 0x80);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_LOCK_CMP_EN, 0x08);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_DEC_START_MODE0, 0x8c);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_DIV_FRAC_START3_MODE0, 0x0a);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_CMN_CONFIG, 0x12);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_LOCK_CMP1_MODE0, 0x7f);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_LOCK_CMP2_MODE0, 0x70);
+		MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
+	} else {
+		pr_err("%s: unsupported rate: %ld\n", __func__, rate);
+		return -EINVAL;
+	}
+	/* Make sure the PLL register writes are done */
+	wmb();
+
+	if ((rate == DP_VCO_HSCLK_RATE_1620MHZDIV1000)
+	    || (rate == DP_VCO_HSCLK_RATE_2700MHZDIV1000)) {
+		MDSS_PLL_REG_W(dp_res->phy_base,
+				DP_PHY_VCO_DIV, 0x1);
+	} else {
+		MDSS_PLL_REG_W(dp_res->phy_base,
+				DP_PHY_VCO_DIV, 0x2);
+	}
+	/* Make sure the PHY register writes are done */
+	wmb();
+
+	MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x3f);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_VCO_TUNE_MAP, 0x00);
+
+	MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_BG_TIMER, 0x00);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_BG_TIMER, 0x0a);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_CORECLK_DIV_MODE0, 0x05);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_VCO_TUNE_CTRL, 0x00);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_CP_CTRL_MODE0, 0x06);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_PLL_CCTRL_MODE0, 0x36);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_PLL_IVCO, 0x07);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x37);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_CORE_CLK_EN, 0x0f);
+
+	/* Make sure the PLL register writes are done */
+	wmb();
+
+	if (orientation == ORIENTATION_CC2)
+		MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_MODE, 0x48);
+	else
+		MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_MODE, 0x58);
+
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			DP_PHY_TX0_TX1_LANE_CTL, 0x05);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			DP_PHY_TX2_TX3_LANE_CTL, 0x05);
+
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
+			0x1a);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
+			0x1a);
+
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX0_OFFSET + TXn_VMODE_CTRL1,
+			0x40);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX1_OFFSET + TXn_VMODE_CTRL1,
+			0x40);
+
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX0_OFFSET + TXn_PRE_STALL_LDO_BOOST_EN,
+			0x30);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX1_OFFSET + TXn_PRE_STALL_LDO_BOOST_EN,
+			0x30);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX0_OFFSET + TXn_INTERFACE_SELECT,
+			0x3d);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX1_OFFSET + TXn_INTERFACE_SELECT,
+			0x3d);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX0_OFFSET + TXn_CLKBUF_ENABLE,
+			0x0f);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX1_OFFSET + TXn_CLKBUF_ENABLE,
+			0x0f);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX0_OFFSET + TXn_RESET_TSYNC_EN,
+			0x03);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX1_OFFSET + TXn_RESET_TSYNC_EN,
+			0x03);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX0_OFFSET + TXn_TRAN_DRVR_EMP_EN,
+			0x03);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX1_OFFSET + TXn_TRAN_DRVR_EMP_EN,
+			0x03);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX0_OFFSET + TXn_PARRATE_REC_DETECT_IDLE_EN,
+			0x00);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX1_OFFSET + TXn_PARRATE_REC_DETECT_IDLE_EN,
+			0x00);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX0_OFFSET + TXn_TX_INTERFACE_MODE,
+			0x00);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX1_OFFSET + TXn_TX_INTERFACE_MODE,
+			0x00);
+
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX0_OFFSET + TXn_TX_BAND,
+			0x4);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX1_OFFSET + TXn_TX_BAND,
+			0x4);
+	/* Make sure the PHY register writes are done */
+	wmb();
+	return res;
+}
+
+static bool dp_pll_lock_status(struct mdss_pll_resources *dp_res)
+{
+	u32 status;
+	bool pll_locked;
+
+	/* poll for PLL ready status */
+	if (readl_poll_timeout_atomic((dp_res->pll_base +
+			QSERDES_COM_C_READY_STATUS),
+			status,
+			((status & BIT(0)) > 0),
+			DP_PLL_POLL_SLEEP_US,
+			DP_PLL_POLL_TIMEOUT_US)) {
+		pr_err("%s: C_READY status is not high. Status=%x\n",
+				__func__, status);
+		pll_locked = false;
+	} else if (readl_poll_timeout_atomic((dp_res->pll_base +
+			DP_PHY_STATUS),
+			status,
+			((status & BIT(1)) > 0),
+			DP_PLL_POLL_SLEEP_US,
+			DP_PLL_POLL_TIMEOUT_US)) {
+		pr_err("%s: Phy_ready is not high. Status=%x\n",
+				__func__, status);
+		pll_locked = false;
+	} else {
+		pll_locked = true;
+	}
+
+	return pll_locked;
+}
+
+
+static int dp_pll_enable(struct clk *c)
+{
+	int rc = 0;
+	u32 status;
+	struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
+	struct mdss_pll_resources *dp_res = vco->priv;
+	u8 orientation, ln_cnt;
+	u32 spare_value, bias_en, drvr_en, lane_mode;
+
+	spare_value = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_SPARE0);
+	ln_cnt = spare_value & 0x0F;
+	orientation = (spare_value & 0xF0) >> 4;
+
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			DP_PHY_CFG, 0x01);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			DP_PHY_CFG, 0x05);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			DP_PHY_CFG, 0x01);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			DP_PHY_CFG, 0x09);
+	/* Make sure the PHY register writes are done */
+	wmb();
+	MDSS_PLL_REG_W(dp_res->pll_base,
+			QSERDES_COM_RESETSM_CNTRL, 0x20);
+	/* Make sure the PLL register writes are done */
+	wmb();
+	/* poll for PLL ready status */
+	if (readl_poll_timeout_atomic((dp_res->pll_base +
+			QSERDES_COM_C_READY_STATUS),
+			status,
+			((status & BIT(0)) > 0),
+			DP_PLL_POLL_SLEEP_US,
+			DP_PLL_POLL_TIMEOUT_US)) {
+		pr_err("%s: C_READY status is not high. Status=%x\n",
+				__func__, status);
+		rc = -EINVAL;
+		goto lock_err;
+	}
+
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			DP_PHY_CFG, 0x19);
+	/* Make sure the PHY register writes are done */
+	wmb();
+	/* poll for PHY ready status */
+	if (readl_poll_timeout_atomic((dp_res->phy_base +
+			DP_PHY_STATUS),
+			status,
+			((status & BIT(1)) > 0),
+			DP_PLL_POLL_SLEEP_US,
+			DP_PLL_POLL_TIMEOUT_US)) {
+		pr_err("%s: Phy_ready is not high. Status=%x\n",
+				__func__, status);
+		rc = -EINVAL;
+		goto lock_err;
+	}
+
+	pr_debug("%s: PLL is locked\n", __func__);
+
+	if (ln_cnt == 1) {
+		bias_en = 0x3e;
+		drvr_en = 0x13;
+	} else {
+		bias_en = 0x3f;
+		drvr_en = 0x10;
+	}
+
+	if (ln_cnt != 4) {
+		if (orientation == ORIENTATION_CC1) {
+			MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
+			bias_en);
+			MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX1_OFFSET + TXn_HIGHZ_DRVR_EN,
+			drvr_en);
+		} else {
+			MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
+			bias_en);
+			MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX0_OFFSET + TXn_HIGHZ_DRVR_EN,
+			drvr_en);
+		}
+	} else {
+		MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
+			bias_en);
+		MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX0_OFFSET + TXn_HIGHZ_DRVR_EN,
+			drvr_en);
+		MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
+			bias_en);
+		MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX1_OFFSET + TXn_HIGHZ_DRVR_EN,
+			drvr_en);
+	}
+
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX0_OFFSET + TXn_TX_POL_INV,
+			0x0a);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX1_OFFSET + TXn_TX_POL_INV,
+			0x0a);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			DP_PHY_CFG, 0x18);
+	udelay(2000);
+
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			DP_PHY_CFG, 0x19);
+
+	/*
+	 * Make sure all the register writes are completed before
+	 * doing any other operation
+	 */
+	wmb();
+
+	if (vco->rate == DP_VCO_HSCLK_RATE_2700MHZDIV1000)
+		lane_mode = 0xc6;
+	else
+		lane_mode = 0xf6;
+
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX0_OFFSET + TXn_LANE_MODE_1,
+			lane_mode);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX1_OFFSET + TXn_LANE_MODE_1,
+			lane_mode);
+
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX0_OFFSET + TXn_CLKBUF_ENABLE,
+			0x1f);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX1_OFFSET + TXn_CLKBUF_ENABLE,
+			0x1f);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX0_OFFSET + TXn_CLKBUF_ENABLE,
+			0x0f);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX1_OFFSET + TXn_CLKBUF_ENABLE,
+			0x0f);
+	/*
+	 * Make sure all the register writes are completed before
+	 * doing any other operation
+	 */
+	wmb();
+
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			DP_PHY_CFG, 0x09);
+	udelay(2000);
+
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			DP_PHY_CFG, 0x19);
+	udelay(2000);
+	/* poll for PHY ready status */
+	if (readl_poll_timeout_atomic((dp_res->phy_base +
+			DP_PHY_STATUS),
+			status,
+			((status & BIT(1)) > 0),
+			DP_PLL_POLL_SLEEP_US,
+			DP_PLL_POLL_TIMEOUT_US)) {
+		pr_err("%s: Lane_mode: Phy_ready is not high. Status=%x\n",
+				__func__, status);
+		rc = -EINVAL;
+		goto lock_err;
+	}
+
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX0_OFFSET + TXn_TX_DRV_LVL,
+			0x2a);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX1_OFFSET + TXn_TX_DRV_LVL,
+			0x2a);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX0_OFFSET + TXn_TX_EMP_POST1_LVL,
+			0x20);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX1_OFFSET + TXn_TX_EMP_POST1_LVL,
+			0x20);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX0_OFFSET + TXn_RES_CODE_LANE_OFFSET_TX,
+			0x11);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX1_OFFSET + TXn_RES_CODE_LANE_OFFSET_TX,
+			0x11);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX0_OFFSET + TXn_RES_CODE_LANE_OFFSET_RX,
+			0x11);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX1_OFFSET + TXn_RES_CODE_LANE_OFFSET_RX,
+			0x11);
+	/* Make sure the PHY register writes are done */
+	wmb();
+
+lock_err:
+	return rc;
+}
+
+static int dp_pll_disable(struct clk *c)
+{
+	int rc = 0;
+	struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
+	struct mdss_pll_resources *dp_res = vco->priv;
+
+	/* Assert DP PHY power down */
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			DP_PHY_PD_CTL, 0x2);
+	/*
+	 * Make sure all the register writes to disable PLL are
+	 * completed before doing any other operation
+	 */
+	wmb();
+
+	return rc;
+}
+
+
+int dp_vco_prepare(struct clk *c)
+{
+	int rc = 0;
+	struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
+	struct mdss_pll_resources *dp_pll_res = vco->priv;
+
+	DEV_DBG("rate=%ld\n", vco->rate);
+	rc = mdss_pll_resource_enable(dp_pll_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss DP pll resources\n");
+		goto error;
+	}
+
+	rc = dp_pll_enable(c);
+	if (rc) {
+		mdss_pll_resource_enable(dp_pll_res, false);
+		pr_err("ndx=%d failed to enable dp pll\n",
+					dp_pll_res->index);
+		goto error;
+	}
+
+	mdss_pll_resource_enable(dp_pll_res, false);
+error:
+	return rc;
+}
+
+void dp_vco_unprepare(struct clk *c)
+{
+	struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+
+	if (!io) {
+		DEV_ERR("Invalid input parameter\n");
+		return;
+	}
+
+	if (!io->pll_on &&
+		mdss_pll_resource_enable(io, true)) {
+		DEV_ERR("pll resource can't be enabled\n");
+		return;
+	}
+	dp_pll_disable(c);
+
+	io->handoff_resources = false;
+	mdss_pll_resource_enable(io, false);
+	io->pll_on = false;
+}
+
+int dp_vco_set_rate(struct clk *c, unsigned long rate)
+{
+	struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+	int rc;
+
+	rc = mdss_pll_resource_enable(io, true);
+	if (rc) {
+		DEV_ERR("pll resource can't be enabled\n");
+		return rc;
+	}
+
+	DEV_DBG("DP lane CLK rate=%ld\n", rate);
+
+	rc = dp_config_vco_rate(vco, rate);
+	if (rc)
+		DEV_ERR("%s: Failed to set clk rate\n", __func__);
+
+	mdss_pll_resource_enable(io, false);
+
+	vco->rate = rate;
+
+	return 0;
+}
+
+unsigned long dp_vco_get_rate(struct clk *c)
+{
+	struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
+	int rc;
+	u32 div, hsclk_div, link2xclk_div = 0;
+	u64 vco_rate;
+	struct mdss_pll_resources *pll = vco->priv;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable mdss DP pll=%d\n", pll->index);
+		return rc;
+	}
+
+	div = MDSS_PLL_REG_R(pll->pll_base, QSERDES_COM_HSCLK_SEL);
+	div &= 0x0f;
+
+	if (div == 12)
+		hsclk_div = 5; /* Default */
+	else if (div == 4)
+		hsclk_div = 3;
+	else if (div == 0)
+		hsclk_div = 2;
+	else {
+		pr_debug("unknown divider. forcing to default\n");
+		hsclk_div = 5;
+	}
+
+	div = MDSS_PLL_REG_R(pll->phy_base, DP_PHY_MODE);
+
+	if (div & 0x58)
+		pr_err("%s: DP PAR Rate not correct\n", __func__);
+
+	if ((div & 0x3) == 1)
+		link2xclk_div = 10;
+	else if ((div & 0x3) == 0)
+		link2xclk_div = 5;
+	else
+		pr_err("%s: unsupported div. Phy_mode: %d\n", __func__, div);
+
+	if (link2xclk_div == 10) {
+		vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
+	} else {
+		if (hsclk_div == 5)
+			vco_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000;
+		else if (hsclk_div == 3)
+			vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
+		else
+			vco_rate = DP_VCO_HSCLK_RATE_5400MHZDIV1000;
+	}
+
+	pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
+
+	mdss_pll_resource_enable(pll, false);
+
+	return (unsigned long)vco_rate;
+}
+
+long dp_vco_round_rate(struct clk *c, unsigned long rate)
+{
+	unsigned long rrate = rate;
+	struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
+
+	if (rate <= vco->min_rate)
+		rrate = vco->min_rate;
+	else if (rate <= DP_VCO_HSCLK_RATE_2700MHZDIV1000)
+		rrate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
+	else
+		rrate = vco->max_rate;
+
+	pr_debug("%s: rrate=%ld\n", __func__, rrate);
+
+	return rrate;
+}
+
+enum handoff dp_vco_handoff(struct clk *c)
+{
+	enum handoff ret = HANDOFF_DISABLED_CLK;
+	struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+
+	if (mdss_pll_resource_enable(io, true)) {
+		DEV_ERR("pll resource can't be enabled\n");
+		return ret;
+	}
+
+	if (dp_pll_lock_status(io)) {
+		io->pll_on = true;
+		c->rate = dp_vco_get_rate(c);
+		io->handoff_resources = true;
+		ret = HANDOFF_ENABLED_CLK;
+	} else {
+		mdss_pll_resource_enable(io, false);
+		ret = HANDOFF_DISABLED_CLK;
+		DEV_DBG("%s: PLL not locked\n", __func__);
+	}
+
+	DEV_DBG("done, ret=%d\n", ret);
+	return ret;
+}
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./mdss/mdss-dp-pll.h linux-4.4.115-fbx/drivers/clk/msm/mdss/mdss-dp-pll.h
--- linux-4.4.115-fbx/drivers/clk/msm./mdss/mdss-dp-pll.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/mdss/mdss-dp-pll.h	2019-01-22 16:16:23.019242024 +0100
@@ -0,0 +1,36 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MDSS_DP_PLL_H
+#define __MDSS_DP_PLL_H
+
+struct dp_pll_vco_clk {
+	unsigned long	rate;		/* current vco rate */
+	u64		min_rate;	/* min vco rate */
+	u64		max_rate;	/* max vco rate */
+	void		*priv;
+
+	struct clk	c;
+};
+
+static inline struct dp_pll_vco_clk *mdss_dp_to_vco_clk(struct clk *clk)
+{
+	return container_of(clk, struct dp_pll_vco_clk, c);
+}
+
+int dp_pll_clock_register_8998(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res);
+
+
+#endif /* __MDSS_DP_PLL_H */
+
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./mdss/mdss-dsi-pll-8996.c linux-4.4.115-fbx/drivers/clk/msm/mdss/mdss-dsi-pll-8996.c
--- linux-4.4.115-fbx/drivers/clk/msm./mdss/mdss-dsi-pll-8996.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/mdss/mdss-dsi-pll-8996.c	2019-01-22 16:16:23.019242024 +0100
@@ -0,0 +1,566 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/workqueue.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <dt-bindings/clock/msm-clocks-8996.h>
+
+#include "mdss-pll.h"
+#include "mdss-dsi-pll.h"
+#include "mdss-dsi-pll-8996.h"
+
+#define VCO_DELAY_USEC		1
+
+static struct dsi_pll_db pll_db[DSI_PLL_NUM];
+
+static struct clk_ops n2_clk_src_ops;
+static struct clk_ops shadow_n2_clk_src_ops;
+static struct clk_ops byte_clk_src_ops;
+static struct clk_ops post_n1_div_clk_src_ops;
+static struct clk_ops shadow_post_n1_div_clk_src_ops;
+
+static struct clk_ops clk_ops_gen_mux_dsi;
+
+/* Op structures */
+static struct clk_ops clk_ops_dsi_vco = {
+	.set_rate = pll_vco_set_rate_8996,
+	.round_rate = pll_vco_round_rate_8996,
+	.handoff = pll_vco_handoff_8996,
+	.prepare = pll_vco_prepare_8996,
+	.unprepare = pll_vco_unprepare_8996,
+};
+
+static struct clk_div_ops post_n1_div_ops = {
+	.set_div = post_n1_div_set_div,
+	.get_div = post_n1_div_get_div,
+};
+
+static struct clk_div_ops n2_div_ops = {	/* hr_oclk3 */
+	.set_div = n2_div_set_div,
+	.get_div = n2_div_get_div,
+};
+
+static struct clk_mux_ops mdss_byte_mux_ops = {
+	.set_mux_sel = set_mdss_byte_mux_sel_8996,
+	.get_mux_sel = get_mdss_byte_mux_sel_8996,
+};
+
+static struct clk_mux_ops mdss_pixel_mux_ops = {
+	.set_mux_sel = set_mdss_pixel_mux_sel_8996,
+	.get_mux_sel = get_mdss_pixel_mux_sel_8996,
+};
+
+/* Shadow ops for dynamic refresh */
+static struct clk_ops clk_ops_shadow_dsi_vco = {
+	.set_rate = shadow_pll_vco_set_rate_8996,
+	.round_rate = pll_vco_round_rate_8996,
+	.handoff = shadow_pll_vco_handoff_8996,
+};
+
+static struct clk_div_ops shadow_post_n1_div_ops = {
+	.set_div = post_n1_div_set_div,
+};
+
+static struct clk_div_ops shadow_n2_div_ops = {
+	.set_div = shadow_n2_div_set_div,
+};
+
+static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
+	.ref_clk_rate = 19200000UL,
+	.min_rate = 1300000000UL,
+	.max_rate = 2600000000UL,
+	.pll_en_seq_cnt = 1,
+	.pll_enable_seqs[0] = dsi_pll_enable_seq_8996,
+	.c = {
+		.dbg_name = "dsi0pll_vco_clk_8996",
+		.ops = &clk_ops_dsi_vco,
+		CLK_INIT(dsi0pll_vco_clk.c),
+	},
+};
+
+static struct dsi_pll_vco_clk dsi0pll_shadow_vco_clk = {
+	.ref_clk_rate = 19200000u,
+	.min_rate = 1300000000u,
+	.max_rate = 2600000000u,
+	.c = {
+		.dbg_name = "dsi0pll_shadow_vco_clk",
+		.ops = &clk_ops_shadow_dsi_vco,
+		CLK_INIT(dsi0pll_shadow_vco_clk.c),
+	},
+};
+
+static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
+	.ref_clk_rate = 19200000UL,
+	.min_rate = 1300000000UL,
+	.max_rate = 2600000000UL,
+	.pll_en_seq_cnt = 1,
+	.pll_enable_seqs[0] = dsi_pll_enable_seq_8996,
+	.c = {
+		.dbg_name = "dsi1pll_vco_clk_8996",
+		.ops = &clk_ops_dsi_vco,
+		CLK_INIT(dsi1pll_vco_clk.c),
+	},
+};
+
+static struct dsi_pll_vco_clk dsi1pll_shadow_vco_clk = {
+	.ref_clk_rate = 19200000u,
+	.min_rate = 1300000000u,
+	.max_rate = 2600000000u,
+	.pll_en_seq_cnt = 1,
+	.pll_enable_seqs[0] = dsi_pll_enable_seq_8996,
+	.c = {
+		.dbg_name = "dsi1pll_shadow_vco_clk",
+		.ops = &clk_ops_shadow_dsi_vco,
+		CLK_INIT(dsi1pll_shadow_vco_clk.c),
+	},
+};
+
+static struct div_clk dsi0pll_post_n1_div_clk = {
+	.data = {
+		.max_div = 15,
+		.min_div = 1,
+	},
+	.ops = &post_n1_div_ops,
+	.c = {
+		.parent = &dsi0pll_vco_clk.c,
+		.dbg_name = "dsi0pll_post_n1_div_clk",
+		.ops = &post_n1_div_clk_src_ops,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_post_n1_div_clk.c),
+	},
+};
+
+static struct div_clk dsi0pll_shadow_post_n1_div_clk = {
+	.data = {
+		.max_div = 15,
+		.min_div = 1,
+	},
+	.ops = &shadow_post_n1_div_ops,
+	.c = {
+		.parent = &dsi0pll_shadow_vco_clk.c,
+		.dbg_name = "dsi0pll_shadow_post_n1_div_clk",
+		.ops = &shadow_post_n1_div_clk_src_ops,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_shadow_post_n1_div_clk.c),
+	},
+};
+
+static struct div_clk dsi1pll_post_n1_div_clk = {
+	.data = {
+		.max_div = 15,
+		.min_div = 1,
+	},
+	.ops = &post_n1_div_ops,
+	.c = {
+		.parent = &dsi1pll_vco_clk.c,
+		.dbg_name = "dsi1pll_post_n1_div_clk",
+		.ops = &post_n1_div_clk_src_ops,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_post_n1_div_clk.c),
+	},
+};
+
+static struct div_clk dsi1pll_shadow_post_n1_div_clk = {
+	.data = {
+		.max_div = 15,
+		.min_div = 1,
+	},
+	.ops = &shadow_post_n1_div_ops,
+	.c = {
+		.parent = &dsi1pll_shadow_vco_clk.c,
+		.dbg_name = "dsi1pll_shadow_post_n1_div_clk",
+		.ops = &shadow_post_n1_div_clk_src_ops,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_shadow_post_n1_div_clk.c),
+	},
+};
+
+static struct div_clk dsi0pll_n2_div_clk = {
+	.data = {
+		.max_div = 15,
+		.min_div = 1,
+	},
+	.ops = &n2_div_ops,
+	.c = {
+		.parent = &dsi0pll_post_n1_div_clk.c,
+		.dbg_name = "dsi0pll_n2_div_clk",
+		.ops = &n2_clk_src_ops,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_n2_div_clk.c),
+	},
+};
+
+static struct div_clk dsi0pll_shadow_n2_div_clk = {
+	.data = {
+		.max_div = 15,
+		.min_div = 1,
+	},
+	.ops = &shadow_n2_div_ops,
+	.c = {
+		.parent = &dsi0pll_shadow_post_n1_div_clk.c,
+		.dbg_name = "dsi0pll_shadow_n2_div_clk",
+		.ops = &shadow_n2_clk_src_ops,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_shadow_n2_div_clk.c),
+	},
+};
+
+static struct div_clk dsi1pll_n2_div_clk = {
+	.data = {
+		.max_div = 15,
+		.min_div = 1,
+	},
+	.ops = &n2_div_ops,
+	.c = {
+		.parent = &dsi1pll_post_n1_div_clk.c,
+		.dbg_name = "dsi1pll_n2_div_clk",
+		.ops = &n2_clk_src_ops,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_n2_div_clk.c),
+	},
+};
+
+static struct div_clk dsi1pll_shadow_n2_div_clk = {
+	.data = {
+		.max_div = 15,
+		.min_div = 1,
+	},
+	.ops = &shadow_n2_div_ops,
+	.c = {
+		.parent = &dsi1pll_shadow_post_n1_div_clk.c,
+		.dbg_name = "dsi1pll_shadow_n2_div_clk",
+		.ops = &shadow_n2_clk_src_ops,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_shadow_n2_div_clk.c),
+	},
+};
+
+static struct div_clk dsi0pll_pixel_clk_src = {
+	.data = {
+		.div = 2,
+		.min_div = 2,
+		.max_div = 2,
+	},
+	.c = {
+		.parent = &dsi0pll_n2_div_clk.c,
+		.dbg_name = "dsi0pll_pixel_clk_src",
+		.ops = &clk_ops_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_pixel_clk_src.c),
+	},
+};
+
+static struct div_clk dsi0pll_shadow_pixel_clk_src = {
+	.data = {
+		.div = 2,
+		.min_div = 2,
+		.max_div = 2,
+	},
+	.c = {
+		.parent = &dsi0pll_shadow_n2_div_clk.c,
+		.dbg_name = "dsi0pll_shadow_pixel_clk_src",
+		.ops = &clk_ops_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_shadow_pixel_clk_src.c),
+	},
+};
+
+static struct div_clk dsi1pll_pixel_clk_src = {
+	.data = {
+		.div = 2,
+		.min_div = 2,
+		.max_div = 2,
+	},
+	.c = {
+		.parent = &dsi1pll_n2_div_clk.c,
+		.dbg_name = "dsi1pll_pixel_clk_src",
+		.ops = &clk_ops_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_pixel_clk_src.c),
+	},
+};
+
+static struct div_clk dsi1pll_shadow_pixel_clk_src = {
+	.data = {
+		.div = 2,
+		.min_div = 2,
+		.max_div = 2,
+	},
+	.c = {
+		.parent = &dsi1pll_shadow_n2_div_clk.c,
+		.dbg_name = "dsi1pll_shadow_pixel_clk_src",
+		.ops = &clk_ops_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_shadow_pixel_clk_src.c),
+	},
+};
+
+static struct mux_clk dsi0pll_pixel_clk_mux = {
+	.num_parents = 2,
+	.parents = (struct clk_src[]) {
+		{&dsi0pll_pixel_clk_src.c, 0},
+		{&dsi0pll_shadow_pixel_clk_src.c, 1},
+	},
+	.ops = &mdss_pixel_mux_ops,
+	.c = {
+		.parent = &dsi0pll_pixel_clk_src.c,
+		.dbg_name = "dsi0pll_pixel_clk_mux",
+		.ops = &clk_ops_gen_mux_dsi,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_pixel_clk_mux.c),
+	}
+};
+
+static struct mux_clk dsi1pll_pixel_clk_mux = {
+	.num_parents = 2,
+	.parents = (struct clk_src[]) {
+		{&dsi1pll_pixel_clk_src.c, 0},
+		{&dsi1pll_shadow_pixel_clk_src.c, 1},
+	},
+	.ops = &mdss_pixel_mux_ops,
+	.c = {
+		.parent = &dsi1pll_pixel_clk_src.c,
+		.dbg_name = "dsi1pll_pixel_clk_mux",
+		.ops = &clk_ops_gen_mux_dsi,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_pixel_clk_mux.c),
+	}
+};
+
+static struct div_clk dsi0pll_byte_clk_src = {
+	.data = {
+		.div = 8,
+		.min_div = 8,
+		.max_div = 8,
+	},
+	.c = {
+		.parent = &dsi0pll_post_n1_div_clk.c,
+		.dbg_name = "dsi0pll_byte_clk_src",
+		.ops = &clk_ops_div,
+		CLK_INIT(dsi0pll_byte_clk_src.c),
+	},
+};
+
+static struct div_clk dsi0pll_shadow_byte_clk_src = {
+	.data = {
+		.div = 8,
+		.min_div = 8,
+		.max_div = 8,
+	},
+	.c = {
+		.parent = &dsi0pll_shadow_post_n1_div_clk.c,
+		.dbg_name = "dsi0pll_shadow_byte_clk_src",
+		.ops = &clk_ops_div,
+		CLK_INIT(dsi0pll_shadow_byte_clk_src.c),
+	},
+};
+
+static struct div_clk dsi1pll_byte_clk_src = {
+	.data = {
+		.div = 8,
+		.min_div = 8,
+		.max_div = 8,
+	},
+	.c = {
+		.parent = &dsi1pll_post_n1_div_clk.c,
+		.dbg_name = "dsi1pll_byte_clk_src",
+		.ops = &clk_ops_div,
+		CLK_INIT(dsi1pll_byte_clk_src.c),
+	},
+};
+
+static struct div_clk dsi1pll_shadow_byte_clk_src = {
+	.data = {
+		.div = 8,
+		.min_div = 8,
+		.max_div = 8,
+	},
+	.c = {
+		.parent = &dsi1pll_shadow_post_n1_div_clk.c,
+		.dbg_name = "dsi1pll_shadow_byte_clk_src",
+		.ops = &clk_ops_div,
+		CLK_INIT(dsi1pll_shadow_byte_clk_src.c),
+	},
+};
+
+static struct mux_clk dsi0pll_byte_clk_mux = {
+	.num_parents = 2,
+	.parents = (struct clk_src[]) {
+		{&dsi0pll_byte_clk_src.c, 0},
+		{&dsi0pll_shadow_byte_clk_src.c, 1},
+	},
+	.ops = &mdss_byte_mux_ops,
+	.c = {
+		.parent = &dsi0pll_byte_clk_src.c,
+		.dbg_name = "dsi0pll_byte_clk_mux",
+		.ops = &clk_ops_gen_mux_dsi,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_byte_clk_mux.c),
+	}
+};
+static struct mux_clk dsi1pll_byte_clk_mux = {
+	.num_parents = 2,
+	.parents = (struct clk_src[]) {
+		{&dsi1pll_byte_clk_src.c, 0},
+		{&dsi1pll_shadow_byte_clk_src.c, 1},
+	},
+	.ops = &mdss_byte_mux_ops,
+	.c = {
+		.parent = &dsi1pll_byte_clk_src.c,
+		.dbg_name = "dsi1pll_byte_clk_mux",
+		.ops = &clk_ops_gen_mux_dsi,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_byte_clk_mux.c),
+	}
+};
+
+static struct clk_lookup mdss_dsi_pllcc_8996[] = {
+	CLK_LIST(dsi0pll_byte_clk_mux),
+	CLK_LIST(dsi0pll_byte_clk_src),
+	CLK_LIST(dsi0pll_pixel_clk_mux),
+	CLK_LIST(dsi0pll_pixel_clk_src),
+	CLK_LIST(dsi0pll_n2_div_clk),
+	CLK_LIST(dsi0pll_post_n1_div_clk),
+	CLK_LIST(dsi0pll_vco_clk),
+	CLK_LIST(dsi0pll_shadow_byte_clk_src),
+	CLK_LIST(dsi0pll_shadow_pixel_clk_src),
+	CLK_LIST(dsi0pll_shadow_n2_div_clk),
+	CLK_LIST(dsi0pll_shadow_post_n1_div_clk),
+	CLK_LIST(dsi0pll_shadow_vco_clk),
+};
+
+static struct clk_lookup mdss_dsi_pllcc_8996_1[] = {
+	CLK_LIST(dsi1pll_byte_clk_mux),
+	CLK_LIST(dsi1pll_byte_clk_src),
+	CLK_LIST(dsi1pll_pixel_clk_mux),
+	CLK_LIST(dsi1pll_pixel_clk_src),
+	CLK_LIST(dsi1pll_n2_div_clk),
+	CLK_LIST(dsi1pll_post_n1_div_clk),
+	CLK_LIST(dsi1pll_vco_clk),
+	CLK_LIST(dsi1pll_shadow_byte_clk_src),
+	CLK_LIST(dsi1pll_shadow_pixel_clk_src),
+	CLK_LIST(dsi1pll_shadow_n2_div_clk),
+	CLK_LIST(dsi1pll_shadow_post_n1_div_clk),
+	CLK_LIST(dsi1pll_shadow_vco_clk),
+};
+
+int dsi_pll_clock_register_8996(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res)
+{
+	int rc = 0, ndx;
+	int const ssc_freq_default = 31500; /* default h/w recommended value */
+	int const ssc_ppm_default = 5000; /* default h/w recommended value */
+	struct dsi_pll_db *pdb;
+
+	if (!pdev || !pdev->dev.of_node) {
+		pr_err("Invalid input parameters\n");
+		return -EINVAL;
+	}
+
+	if (!pll_res || !pll_res->pll_base) {
+		pr_err("Invalid PLL resources\n");
+		return -EPROBE_DEFER;
+	}
+
+	if (pll_res->index >= DSI_PLL_NUM) {
+		pr_err("pll ndx=%d is NOT supported\n", pll_res->index);
+		return -EINVAL;
+	}
+
+	ndx = pll_res->index;
+	pdb = &pll_db[ndx];
+	pll_res->priv = pdb;
+	pdb->pll = pll_res;
+	ndx++;
+	ndx %= DSI_PLL_NUM;
+	pdb->next = &pll_db[ndx];
+
+	/* Set clock source operations */
+
+	/* hr_oclk3, pixel */
+	n2_clk_src_ops = clk_ops_slave_div;
+	n2_clk_src_ops.prepare = mdss_pll_div_prepare;
+
+	shadow_n2_clk_src_ops = clk_ops_slave_div;
+
+	/* hr_ockl2, byte, vco pll */
+	post_n1_div_clk_src_ops = clk_ops_div;
+	post_n1_div_clk_src_ops.prepare = mdss_pll_div_prepare;
+
+	shadow_post_n1_div_clk_src_ops = clk_ops_div;
+
+	byte_clk_src_ops = clk_ops_div;
+	byte_clk_src_ops.prepare = mdss_pll_div_prepare;
+
+	clk_ops_gen_mux_dsi = clk_ops_gen_mux;
+	clk_ops_gen_mux_dsi.round_rate = parent_round_rate;
+	clk_ops_gen_mux_dsi.set_rate = parent_set_rate;
+
+	if (pll_res->ssc_en) {
+		if (!pll_res->ssc_freq)
+			pll_res->ssc_freq = ssc_freq_default;
+		if (!pll_res->ssc_ppm)
+			pll_res->ssc_ppm = ssc_ppm_default;
+	}
+
+	/* Set client data to mux, div and vco clocks.  */
+	if (pll_res->index == DSI_PLL_1) {
+		dsi1pll_byte_clk_src.priv = pll_res;
+		dsi1pll_pixel_clk_src.priv = pll_res;
+		dsi1pll_post_n1_div_clk.priv = pll_res;
+		dsi1pll_n2_div_clk.priv = pll_res;
+		dsi1pll_vco_clk.priv = pll_res;
+
+		dsi1pll_shadow_byte_clk_src.priv = pll_res;
+		dsi1pll_shadow_pixel_clk_src.priv = pll_res;
+		dsi1pll_shadow_post_n1_div_clk.priv = pll_res;
+		dsi1pll_shadow_n2_div_clk.priv = pll_res;
+		dsi1pll_shadow_vco_clk.priv = pll_res;
+
+		pll_res->vco_delay = VCO_DELAY_USEC;
+		rc = of_msm_clock_register(pdev->dev.of_node,
+				mdss_dsi_pllcc_8996_1,
+				ARRAY_SIZE(mdss_dsi_pllcc_8996_1));
+	} else {
+		dsi0pll_byte_clk_src.priv = pll_res;
+		dsi0pll_pixel_clk_src.priv = pll_res;
+		dsi0pll_post_n1_div_clk.priv = pll_res;
+		dsi0pll_n2_div_clk.priv = pll_res;
+		dsi0pll_vco_clk.priv = pll_res;
+
+		dsi0pll_shadow_byte_clk_src.priv = pll_res;
+		dsi0pll_shadow_pixel_clk_src.priv = pll_res;
+		dsi0pll_shadow_post_n1_div_clk.priv = pll_res;
+		dsi0pll_shadow_n2_div_clk.priv = pll_res;
+		dsi0pll_shadow_vco_clk.priv = pll_res;
+
+		pll_res->vco_delay = VCO_DELAY_USEC;
+		rc = of_msm_clock_register(pdev->dev.of_node,
+				mdss_dsi_pllcc_8996,
+				ARRAY_SIZE(mdss_dsi_pllcc_8996));
+	}
+
+	if (!rc) {
+		pr_info("Registered DSI PLL ndx=%d clocks successfully\n",
+						pll_res->index);
+	}
+
+	return rc;
+}
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./mdss/mdss-dsi-pll-8996.h linux-4.4.115-fbx/drivers/clk/msm/mdss/mdss-dsi-pll-8996.h
--- linux-4.4.115-fbx/drivers/clk/msm./mdss/mdss-dsi-pll-8996.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/mdss/mdss-dsi-pll-8996.h	2019-01-22 16:16:23.019242024 +0100
@@ -0,0 +1,221 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MDSS_DSI_PLL_8996_H
+#define MDSS_DSI_PLL_8996_H
+
+#define DSIPHY_CMN_CLK_CFG0		0x0010
+#define DSIPHY_CMN_CLK_CFG1		0x0014
+#define DSIPHY_CMN_GLBL_TEST_CTRL	0x0018
+
+#define DSIPHY_CMN_PLL_CNTRL		0x0048
+#define DSIPHY_CMN_CTRL_0		0x001c
+#define DSIPHY_CMN_CTRL_1		0x0020
+
+#define DSIPHY_CMN_LDO_CNTRL		0x004c
+
+#define DSIPHY_PLL_IE_TRIM		0x0400
+#define DSIPHY_PLL_IP_TRIM		0x0404
+
+#define DSIPHY_PLL_IPTAT_TRIM		0x0410
+
+#define DSIPHY_PLL_CLKBUFLR_EN		0x041c
+
+#define DSIPHY_PLL_SYSCLK_EN_RESET	0x0428
+#define DSIPHY_PLL_RESETSM_CNTRL	0x042c
+#define DSIPHY_PLL_RESETSM_CNTRL2	0x0430
+#define DSIPHY_PLL_RESETSM_CNTRL3	0x0434
+#define DSIPHY_PLL_RESETSM_CNTRL4	0x0438
+#define DSIPHY_PLL_RESETSM_CNTRL5	0x043c
+#define DSIPHY_PLL_KVCO_DIV_REF1	0x0440
+#define DSIPHY_PLL_KVCO_DIV_REF2	0x0444
+#define DSIPHY_PLL_KVCO_COUNT1		0x0448
+#define DSIPHY_PLL_KVCO_COUNT2		0x044c
+#define DSIPHY_PLL_VREF_CFG1		0x045c
+
+#define DSIPHY_PLL_KVCO_CODE		0x0458
+
+#define DSIPHY_PLL_VCO_DIV_REF1		0x046c
+#define DSIPHY_PLL_VCO_DIV_REF2		0x0470
+#define DSIPHY_PLL_VCO_COUNT1		0x0474
+#define DSIPHY_PLL_VCO_COUNT2		0x0478
+#define DSIPHY_PLL_PLLLOCK_CMP1		0x047c
+#define DSIPHY_PLL_PLLLOCK_CMP2		0x0480
+#define DSIPHY_PLL_PLLLOCK_CMP3		0x0484
+#define DSIPHY_PLL_PLLLOCK_CMP_EN	0x0488
+#define DSIPHY_PLL_PLL_VCO_TUNE		0x048C
+#define DSIPHY_PLL_DEC_START		0x0490
+#define DSIPHY_PLL_SSC_EN_CENTER	0x0494
+#define DSIPHY_PLL_SSC_ADJ_PER1		0x0498
+#define DSIPHY_PLL_SSC_ADJ_PER2		0x049c
+#define DSIPHY_PLL_SSC_PER1		0x04a0
+#define DSIPHY_PLL_SSC_PER2		0x04a4
+#define DSIPHY_PLL_SSC_STEP_SIZE1	0x04a8
+#define DSIPHY_PLL_SSC_STEP_SIZE2	0x04ac
+#define DSIPHY_PLL_DIV_FRAC_START1	0x04b4
+#define DSIPHY_PLL_DIV_FRAC_START2	0x04b8
+#define DSIPHY_PLL_DIV_FRAC_START3	0x04bc
+#define DSIPHY_PLL_TXCLK_EN		0x04c0
+#define DSIPHY_PLL_PLL_CRCTRL		0x04c4
+
+#define DSIPHY_PLL_RESET_SM_READY_STATUS 0x04cc
+
+#define DSIPHY_PLL_PLL_MISC1		0x04e8
+
+#define DSIPHY_PLL_CP_SET_CUR		0x04f0
+#define DSIPHY_PLL_PLL_ICPMSET		0x04f4
+#define DSIPHY_PLL_PLL_ICPCSET		0x04f8
+#define DSIPHY_PLL_PLL_ICP_SET		0x04fc
+#define DSIPHY_PLL_PLL_LPF1		0x0500
+#define DSIPHY_PLL_PLL_LPF2_POSTDIV	0x0504
+#define DSIPHY_PLL_PLL_BANDGAP	0x0508
+
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL15		0x050
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL19		0x060
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL20		0x064
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL21		0x068
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL22		0x06C
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL23		0x070
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL24		0x074
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL25		0x078
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL26		0x07C
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL27		0x080
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL28		0x084
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL29		0x088
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR	0x094
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2	0x098
+
+struct dsi_pll_input {
+	u32 fref;	/* 19.2 Mhz, reference clk */
+	u32 fdata;	/* bit clock rate */
+	u32 dsiclk_sel; /* 1, reg: 0x0014 */
+	u32 n2div;	/* 1, reg: 0x0010, bit 4-7 */
+	u32 ssc_en;	/* 1, reg: 0x0494, bit 0 */
+	u32 ldo_en;	/* 0,  reg: 0x004c, bit 0 */
+
+	/* fixed  */
+	u32 refclk_dbler_en;	/* 0, reg: 0x04c0, bit 1 */
+	u32 vco_measure_time;	/* 5, unknown */
+	u32 kvco_measure_time;	/* 5, unknown */
+	u32 bandgap_timer;	/* 4, reg: 0x0430, bit 3 - 5 */
+	u32 pll_wakeup_timer;	/* 5, reg: 0x043c, bit 0 - 2 */
+	u32 plllock_cnt;	/* 1, reg: 0x0488, bit 1 - 2 */
+	u32 plllock_rng;	/* 1, reg: 0x0488, bit 3 - 4 */
+	u32 ssc_center;		/* 0, reg: 0x0494, bit 1 */
+	u32 ssc_adj_period;	/* 37, reg: 0x498, bit 0 - 9 */
+	u32 ssc_spread;		/* 0.005  */
+	u32 ssc_freq;		/* unknown */
+	u32 pll_ie_trim;	/* 4, reg: 0x0400 */
+	u32 pll_ip_trim;	/* 4, reg: 0x0404 */
+	u32 pll_iptat_trim;	/* reg: 0x0410 */
+	u32 pll_cpcset_cur;	/* 1, reg: 0x04f0, bit 0 - 2 */
+	u32 pll_cpmset_cur;	/* 1, reg: 0x04f0, bit 3 - 5 */
+
+	u32 pll_icpmset;	/* 4, reg: 0x04fc, bit 3 - 5 */
+	u32 pll_icpcset;	/* 4, reg: 0x04fc, bit 0 - 2 */
+
+	u32 pll_icpmset_p;	/* 0, reg: 0x04f4, bit 0 - 2 */
+	u32 pll_icpmset_m;	/* 0, reg: 0x04f4, bit 3 - 5 */
+
+	u32 pll_icpcset_p;	/* 0, reg: 0x04f8, bit 0 - 2 */
+	u32 pll_icpcset_m;	/* 0, reg: 0x04f8, bit 3 - 5 */
+
+	u32 pll_lpf_res1;	/* 3, reg: 0x0504, bit 0 - 3 */
+	u32 pll_lpf_cap1;	/* 11, reg: 0x0500, bit 0 - 3 */
+	u32 pll_lpf_cap2;	/* 1, reg: 0x0500, bit 4 - 7 */
+	u32 pll_c3ctrl;		/* 2, reg: 0x04c4 */
+	u32 pll_r3ctrl;		/* 1, reg: 0x04c4 */
+};
+
+struct dsi_pll_output {
+	u32 pll_txclk_en;	/* reg: 0x04c0 */
+	u32 dec_start;		/* reg: 0x0490 */
+	u32 div_frac_start;	/* reg: 0x04b4, 0x4b8, 0x04bc */
+	u32 ssc_period;		/* reg: 0x04a0, 0x04a4 */
+	u32 ssc_step_size;	/* reg: 0x04a8, 0x04ac */
+	u32 plllock_cmp;	/* reg: 0x047c, 0x0480, 0x0484 */
+	u32 pll_vco_div_ref;	/* reg: 0x046c, 0x0470 */
+	u32 pll_vco_count;	/* reg: 0x0474, 0x0478 */
+	u32 pll_kvco_div_ref;	/* reg: 0x0440, 0x0444 */
+	u32 pll_kvco_count;	/* reg: 0x0448, 0x044c */
+	u32 pll_misc1;		/* reg: 0x04e8 */
+	u32 pll_lpf2_postdiv;	/* reg: 0x0504 */
+	u32 pll_resetsm_cntrl;	/* reg: 0x042c */
+	u32 pll_resetsm_cntrl2;	/* reg: 0x0430 */
+	u32 pll_resetsm_cntrl5;	/* reg: 0x043c */
+	u32 pll_kvco_code;		/* reg: 0x0458 */
+
+	u32 cmn_clk_cfg0;	/* reg: 0x0010 */
+	u32 cmn_clk_cfg1;	/* reg: 0x0014 */
+	u32 cmn_ldo_cntrl;	/* reg: 0x004c */
+
+	u32 pll_postdiv;	/* vco */
+	u32 pll_n1div;		/* vco */
+	u32 pll_n2div;		/* hr_oclk3, pixel */
+	u32 fcvo;
+};
+
+enum {
+	DSI_PLL_0,
+	DSI_PLL_1,
+	DSI_PLL_NUM
+};
+
+struct dsi_pll_db {
+	struct dsi_pll_db *next;
+	struct mdss_pll_resources *pll;
+	struct dsi_pll_input in;
+	struct dsi_pll_output out;
+	int source_setup_done;
+};
+
+enum {
+	PLL_OUTPUT_NONE,
+	PLL_OUTPUT_RIGHT,
+	PLL_OUTPUT_LEFT,
+	PLL_OUTPUT_BOTH
+};
+
+enum {
+	PLL_SOURCE_FROM_LEFT,
+	PLL_SOURCE_FROM_RIGHT
+};
+
+enum {
+	PLL_UNKNOWN,
+	PLL_STANDALONE,
+	PLL_SLAVE,
+	PLL_MASTER
+};
+
+int pll_vco_set_rate_8996(struct clk *c, unsigned long rate);
+long pll_vco_round_rate_8996(struct clk *c, unsigned long rate);
+enum handoff pll_vco_handoff_8996(struct clk *c);
+enum handoff shadow_pll_vco_handoff_8996(struct clk *c);
+int shadow_post_n1_div_set_div(struct div_clk *clk, int div);
+int shadow_post_n1_div_get_div(struct div_clk *clk);
+int shadow_n2_div_set_div(struct div_clk *clk, int div);
+int shadow_n2_div_get_div(struct div_clk *clk);
+int shadow_pll_vco_set_rate_8996(struct clk *c, unsigned long rate);
+int pll_vco_prepare_8996(struct clk *c);
+void pll_vco_unprepare_8996(struct clk *c);
+int set_mdss_byte_mux_sel_8996(struct mux_clk *clk, int sel);
+int get_mdss_byte_mux_sel_8996(struct mux_clk *clk);
+int set_mdss_pixel_mux_sel_8996(struct mux_clk *clk, int sel);
+int get_mdss_pixel_mux_sel_8996(struct mux_clk *clk);
+int post_n1_div_set_div(struct div_clk *clk, int div);
+int post_n1_div_get_div(struct div_clk *clk);
+int n2_div_set_div(struct div_clk *clk, int div);
+int n2_div_get_div(struct div_clk *clk);
+int dsi_pll_enable_seq_8996(struct mdss_pll_resources *pll);
+
+#endif  /* MDSS_DSI_PLL_8996_H */
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./mdss/mdss-dsi-pll-8996-util.c linux-4.4.115-fbx/drivers/clk/msm/mdss/mdss-dsi-pll-8996-util.c
--- linux-4.4.115-fbx/drivers/clk/msm./mdss/mdss-dsi-pll-8996-util.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/mdss/mdss-dsi-pll-8996-util.c	2019-10-29 09:26:23.473201514 +0100
@@ -0,0 +1,1142 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/clk/msm-clock-generic.h>
+
+#include "mdss-pll.h"
+#include "mdss-dsi-pll.h"
+#include "mdss-dsi-pll-8996.h"
+
+#define DSI_PLL_POLL_MAX_READS                  15
+#define DSI_PLL_POLL_TIMEOUT_US                 1000
+#define MSM8996_DSI_PLL_REVISION_2		2
+
+#define CEIL(x, y)		(((x) + ((y)-1)) / (y))
+
+int set_mdss_byte_mux_sel_8996(struct mux_clk *clk, int sel)
+{
+	return 0;
+}
+
+int get_mdss_byte_mux_sel_8996(struct mux_clk *clk)
+{
+	return 0;
+}
+
+int set_mdss_pixel_mux_sel_8996(struct mux_clk *clk, int sel)
+{
+	return 0;
+}
+
+int get_mdss_pixel_mux_sel_8996(struct mux_clk *clk)
+{
+	return 0;
+}
+
+static int mdss_pll_read_stored_trim_codes(
+		struct mdss_pll_resources *dsi_pll_res, s64 vco_clk_rate)
+{
+	int i;
+	int rc = 0;
+	bool found = false;
+
+	if (!dsi_pll_res->dfps) {
+		rc = -EINVAL;
+		goto end_read;
+	}
+
+	for (i = 0; i < dsi_pll_res->dfps->panel_dfps.frame_rate_cnt; i++) {
+		struct dfps_codes_info *codes_info =
+			&dsi_pll_res->dfps->codes_dfps[i];
+
+		pr_debug("valid=%d frame_rate=%d, vco_rate=%d, code %d %d\n",
+			codes_info->is_valid, codes_info->frame_rate,
+			codes_info->clk_rate, codes_info->pll_codes.pll_codes_1,
+			codes_info->pll_codes.pll_codes_2);
+
+		if (vco_clk_rate != codes_info->clk_rate &&
+				codes_info->is_valid)
+			continue;
+
+		dsi_pll_res->cache_pll_trim_codes[0] =
+			codes_info->pll_codes.pll_codes_1;
+		dsi_pll_res->cache_pll_trim_codes[1] =
+			codes_info->pll_codes.pll_codes_2;
+		found = true;
+		break;
+	}
+
+	if (!found) {
+		rc = -EINVAL;
+		goto end_read;
+	}
+
+	pr_debug("core_kvco_code=0x%x core_vco_tune=0x%x\n",
+			dsi_pll_res->cache_pll_trim_codes[0],
+			dsi_pll_res->cache_pll_trim_codes[1]);
+
+end_read:
+	return rc;
+}
+
+int post_n1_div_set_div(struct div_clk *clk, int div)
+{
+	struct mdss_pll_resources *pll = clk->priv;
+	struct dsi_pll_db *pdb;
+	struct dsi_pll_output *pout;
+	int rc;
+	u32 n1div = 0;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	pdb = (struct dsi_pll_db *)pll->priv;
+	pout = &pdb->out;
+
+	/*
+	 * vco rate = bit_clk * postdiv * n1div
+	 * vco range from 1300 to 2600 Mhz
+	 * postdiv = 1
+	 * n1div = 1 to 15
+	 * n1div = roundup(1300Mhz / bit_clk)
+	 * support bit_clk above 86.67Mhz
+	 */
+
+	/* this is for vco/bit clock */
+	pout->pll_postdiv = 1;	/* fixed, divided by 1 */
+	pout->pll_n1div  = div;
+
+	n1div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
+	n1div &= ~0xf;
+	n1div |= (div & 0xf);
+	MDSS_PLL_REG_W(pll->pll_base, DSIPHY_CMN_CLK_CFG0, n1div);
+	/* ensure n1 divider is programed */
+	wmb();
+	pr_debug("ndx=%d div=%d postdiv=%x n1div=%x\n",
+			pll->index, div, pout->pll_postdiv, pout->pll_n1div);
+
+	mdss_pll_resource_enable(pll, false);
+
+	return 0;
+}
+
+int post_n1_div_get_div(struct div_clk *clk)
+{
+	u32  div;
+	int rc;
+	struct mdss_pll_resources *pll = clk->priv;
+
+	if (is_gdsc_disabled(pll))
+		return 0;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	/*
+	 * postdiv = 1/2/4/8
+	 * n1div = 1 - 15
+	 * fot the time being, assume postdiv = 1
+	 */
+
+	div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
+	div &= 0xF;
+	pr_debug("n1 div = %d\n", div);
+
+	mdss_pll_resource_enable(pll, false);
+
+	return div;
+}
+
+int n2_div_set_div(struct div_clk *clk, int div)
+{
+	int rc;
+	u32 n2div;
+	struct mdss_pll_resources *pll = clk->priv;
+	struct dsi_pll_db *pdb;
+	struct dsi_pll_output *pout;
+	struct mdss_pll_resources *slave;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	pdb = (struct dsi_pll_db *)pll->priv;
+	pout = &pdb->out;
+
+	/* this is for pixel clock */
+	n2div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
+	n2div &= ~0xf0;	/* bits 4 to 7 */
+	n2div |= (div << 4);
+	MDSS_PLL_REG_W(pll->pll_base, DSIPHY_CMN_CLK_CFG0, n2div);
+
+	/* commit slave if split display is enabled */
+	slave = pll->slave;
+	if (slave)
+		MDSS_PLL_REG_W(slave->pll_base, DSIPHY_CMN_CLK_CFG0, n2div);
+
+	pout->pll_n2div = div;
+
+	/* set dsiclk_sel=1 so that n2div *= 2 */
+	MDSS_PLL_REG_W(pll->pll_base, DSIPHY_CMN_CLK_CFG1, 1);
+	pr_debug("ndx=%d div=%d n2div=%x\n", pll->index, div, n2div);
+
+	mdss_pll_resource_enable(pll, false);
+
+	return rc;
+}
+
+int shadow_n2_div_set_div(struct div_clk *clk, int div)
+{
+	struct mdss_pll_resources *pll = clk->priv;
+	struct dsi_pll_db *pdb;
+	struct dsi_pll_output *pout;
+	u32 data;
+
+	pdb = pll->priv;
+	pout = &pdb->out;
+
+	pout->pll_n2div = div;
+
+	data = (pout->pll_n1div | (pout->pll_n2div << 4));
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+			DSI_DYNAMIC_REFRESH_PLL_CTRL19,
+			DSIPHY_CMN_CLK_CFG0, DSIPHY_CMN_CLK_CFG1,
+			data, 1);
+	return 0;
+}
+
+int n2_div_get_div(struct div_clk *clk)
+{
+	int rc;
+	u32 n2div;
+	struct mdss_pll_resources *pll = clk->priv;
+
+	if (is_gdsc_disabled(pll))
+		return 0;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll=%d resources\n",
+						pll->index);
+		return rc;
+	}
+
+	n2div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
+	n2div >>= 4;
+	n2div &= 0x0f;
+
+	mdss_pll_resource_enable(pll, false);
+
+	pr_debug("ndx=%d div=%d\n", pll->index, n2div);
+
+	return n2div;
+}
+
+static bool pll_is_pll_locked_8996(struct mdss_pll_resources *pll)
+{
+	u32 status;
+	bool pll_locked;
+
+	/* poll for PLL ready status */
+	if (readl_poll_timeout_atomic((pll->pll_base +
+			DSIPHY_PLL_RESET_SM_READY_STATUS),
+			status,
+			((status & BIT(5)) > 0),
+			DSI_PLL_POLL_MAX_READS,
+			DSI_PLL_POLL_TIMEOUT_US)) {
+			pr_err("DSI PLL ndx=%d status=%x failed to Lock\n",
+					pll->index, status);
+		pll_locked = false;
+	} else if (readl_poll_timeout_atomic((pll->pll_base +
+				DSIPHY_PLL_RESET_SM_READY_STATUS),
+				status,
+				((status & BIT(0)) > 0),
+				DSI_PLL_POLL_MAX_READS,
+				DSI_PLL_POLL_TIMEOUT_US)) {
+			pr_err("DSI PLL ndx=%d status=%x PLl not ready\n",
+					pll->index, status);
+		pll_locked = false;
+	} else {
+		pll_locked = true;
+	}
+
+	return pll_locked;
+}
+
+static void dsi_pll_start_8996(void __iomem *pll_base)
+{
+	pr_debug("start PLL at base=%p\n", pll_base);
+
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VREF_CFG1, 0x10);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_PLL_CNTRL, 1);
+}
+
+static void dsi_pll_stop_8996(void __iomem *pll_base)
+{
+	pr_debug("stop PLL at base=%p\n", pll_base);
+
+	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_PLL_CNTRL, 0);
+}
+
+int dsi_pll_enable_seq_8996(struct mdss_pll_resources *pll)
+{
+	int rc = 0;
+
+	if (!pll) {
+		pr_err("Invalid PLL resources\n");
+		return -EINVAL;
+	}
+
+	dsi_pll_start_8996(pll->pll_base);
+
+	/*
+	 * both DSIPHY_PLL_CLKBUFLR_EN and DSIPHY_CMN_GLBL_TEST_CTRL
+	 * enabled at mdss_dsi_8996_phy_config()
+	 */
+
+	if (!pll_is_pll_locked_8996(pll)) {
+		pr_err("DSI PLL ndx=%d lock failed\n", pll->index);
+		rc = -EINVAL;
+		goto init_lock_err;
+	}
+
+	pr_debug("DSI PLL ndx=%d Lock success\n", pll->index);
+
+init_lock_err:
+	return rc;
+}
+
+static int dsi_pll_enable(struct clk *c)
+{
+	int i, rc = 0;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *pll = vco->priv;
+
+	/* Try all enable sequences until one succeeds */
+	for (i = 0; i < vco->pll_en_seq_cnt; i++) {
+		rc = vco->pll_enable_seqs[i](pll);
+		pr_debug("DSI PLL %s after sequence #%d\n",
+			rc ? "unlocked" : "locked", i + 1);
+		if (!rc)
+			break;
+	}
+
+	if (rc)
+		pr_err("ndx=%d DSI PLL failed to lock\n", pll->index);
+	else
+		pll->pll_on = true;
+
+	return rc;
+}
+
+static void dsi_pll_disable(struct clk *c)
+{
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *pll = vco->priv;
+	struct mdss_pll_resources *slave;
+
+	if (!pll->pll_on &&
+		mdss_pll_resource_enable(pll, true)) {
+		pr_err("Failed to enable mdss dsi pll=%d\n", pll->index);
+		return;
+	}
+
+	pll->handoff_resources = false;
+	slave = pll->slave;
+
+	dsi_pll_stop_8996(pll->pll_base);
+
+	mdss_pll_resource_enable(pll, false);
+
+	pll->pll_on = false;
+
+	pr_debug("DSI PLL ndx=%d Disabled\n", pll->index);
+	return;
+}
+
+static void mdss_dsi_pll_8996_input_init(struct mdss_pll_resources *pll,
+					struct dsi_pll_db *pdb)
+{
+	pdb->in.fref = 19200000;	/* 19.2 Mhz*/
+	pdb->in.fdata = 0;		/* bit clock rate */
+	pdb->in.dsiclk_sel = 1;		/* 1, reg: 0x0014 */
+	pdb->in.ssc_en = pll->ssc_en;		/* 1, reg: 0x0494, bit 0 */
+	pdb->in.ldo_en = 0;		/* 0,  reg: 0x004c, bit 0 */
+
+	/* fixed  input */
+	pdb->in.refclk_dbler_en = 0;	/* 0, reg: 0x04c0, bit 1 */
+	pdb->in.vco_measure_time = 5;	/* 5, unknown */
+	pdb->in.kvco_measure_time = 5;	/* 5, unknown */
+	pdb->in.bandgap_timer = 4;	/* 4, reg: 0x0430, bit 3 - 5 */
+	pdb->in.pll_wakeup_timer = 5;	/* 5, reg: 0x043c, bit 0 - 2 */
+	pdb->in.plllock_cnt = 1;	/* 1, reg: 0x0488, bit 1 - 2 */
+	pdb->in.plllock_rng = 0;	/* 0, reg: 0x0488, bit 3 - 4 */
+	pdb->in.ssc_center = pll->ssc_center;/* 0, reg: 0x0494, bit 1 */
+	pdb->in.ssc_adj_period = 37;	/* 37, reg: 0x498, bit 0 - 9 */
+	pdb->in.ssc_spread = pll->ssc_ppm / 1000;
+	pdb->in.ssc_freq = pll->ssc_freq;
+
+	pdb->in.pll_ie_trim = 4;	/* 4, reg: 0x0400 */
+	pdb->in.pll_ip_trim = 4;	/* 4, reg: 0x0404 */
+	pdb->in.pll_cpcset_cur = 1;	/* 1, reg: 0x04f0, bit 0 - 2 */
+	pdb->in.pll_cpmset_cur = 1;	/* 1, reg: 0x04f0, bit 3 - 5 */
+	pdb->in.pll_icpmset = 4;	/* 4, reg: 0x04fc, bit 3 - 5 */
+	pdb->in.pll_icpcset = 4;	/* 4, reg: 0x04fc, bit 0 - 2 */
+	pdb->in.pll_icpmset_p = 0;	/* 0, reg: 0x04f4, bit 0 - 2 */
+	pdb->in.pll_icpmset_m = 0;	/* 0, reg: 0x04f4, bit 3 - 5 */
+	pdb->in.pll_icpcset_p = 0;	/* 0, reg: 0x04f8, bit 0 - 2 */
+	pdb->in.pll_icpcset_m = 0;	/* 0, reg: 0x04f8, bit 3 - 5 */
+	pdb->in.pll_lpf_res1 = 3;	/* 3, reg: 0x0504, bit 0 - 3 */
+	pdb->in.pll_lpf_cap1 = 11;	/* 11, reg: 0x0500, bit 0 - 3 */
+	pdb->in.pll_lpf_cap2 = 1;	/* 1, reg: 0x0500, bit 4 - 7 */
+	pdb->in.pll_iptat_trim = 7;
+	pdb->in.pll_c3ctrl = 2;		/* 2 */
+	pdb->in.pll_r3ctrl = 1;		/* 1 */
+}
+
+static void pll_8996_ssc_calc(struct mdss_pll_resources *pll,
+				struct dsi_pll_db *pdb)
+{
+	u32 period, ssc_period;
+	u32 ref, rem;
+	s64 step_size;
+
+	pr_debug("%s: vco=%lld ref=%lld\n", __func__,
+		pll->vco_current_rate, pll->vco_ref_clk_rate);
+
+	ssc_period = pdb->in.ssc_freq / 500;
+	period = (unsigned long)pll->vco_ref_clk_rate / 1000;
+	ssc_period  = CEIL(period, ssc_period);
+	ssc_period -= 1;
+	pdb->out.ssc_period = ssc_period;
+
+	pr_debug("%s: ssc, freq=%d spread=%d period=%d\n", __func__,
+	pdb->in.ssc_freq, pdb->in.ssc_spread, pdb->out.ssc_period);
+
+	step_size = (u32)pll->vco_current_rate;
+	ref = pll->vco_ref_clk_rate;
+	ref /= 1000;
+	step_size = div_s64(step_size, ref);
+	step_size <<= 20;
+	step_size = div_s64(step_size, 1000);
+	step_size *= pdb->in.ssc_spread;
+	step_size = div_s64(step_size, 1000);
+	step_size *= (pdb->in.ssc_adj_period + 1);
+
+	rem = 0;
+	step_size = div_s64_rem(step_size, ssc_period + 1, &rem);
+	if (rem)
+		step_size++;
+
+	pr_debug("%s: step_size=%lld\n", __func__, step_size);
+
+	step_size &= 0x0ffff;	/* take lower 16 bits */
+
+	pdb->out.ssc_step_size = step_size;
+}
+
+static void pll_8996_dec_frac_calc(struct mdss_pll_resources *pll,
+				struct dsi_pll_db *pdb)
+{
+	struct dsi_pll_input *pin = &pdb->in;
+	struct dsi_pll_output *pout = &pdb->out;
+	s64 multiplier = BIT(20);
+	s64 dec_start_multiple, dec_start, pll_comp_val;
+	s32 duration, div_frac_start;
+	s64 vco_clk_rate = pll->vco_current_rate;
+	s64 fref = pll->vco_ref_clk_rate;
+
+	pr_debug("vco_clk_rate=%lld ref_clk_rate=%lld\n",
+				vco_clk_rate, fref);
+
+	dec_start_multiple = div_s64(vco_clk_rate * multiplier, fref);
+	div_s64_rem(dec_start_multiple, multiplier, &div_frac_start);
+
+	dec_start = div_s64(dec_start_multiple, multiplier);
+
+	pout->dec_start = (u32)dec_start;
+	pout->div_frac_start = div_frac_start;
+
+	if (pin->plllock_cnt == 0)
+		duration = 1024;
+	else if (pin->plllock_cnt == 1)
+		duration = 256;
+	else if (pin->plllock_cnt == 2)
+		duration = 128;
+	else
+		duration = 32;
+
+	pll_comp_val =  duration * dec_start_multiple;
+	pll_comp_val =  div_s64(pll_comp_val, multiplier);
+	do_div(pll_comp_val, 10);
+
+	pout->plllock_cmp = (u32)pll_comp_val;
+
+	pout->pll_txclk_en = 1;
+	if (pll->revision == MSM8996_DSI_PLL_REVISION_2)
+		pout->cmn_ldo_cntrl = 0x3c;
+	else
+		pout->cmn_ldo_cntrl = 0x1c;
+}
+
+static u32 pll_8996_kvco_slop(u32 vrate)
+{
+	u32 slop = 0;
+
+	if (vrate > 1300000000UL && vrate <= 1800000000UL)
+		slop =  600;
+	else if (vrate > 1800000000UL && vrate < 2300000000UL)
+		slop = 400;
+	else if (vrate > 2300000000UL && vrate < 2600000000UL)
+		slop = 280;
+
+	return slop;
+}
+
+static void pll_8996_calc_vco_count(struct dsi_pll_db *pdb,
+			 s64 vco_clk_rate, s64 fref)
+{
+	struct dsi_pll_input *pin = &pdb->in;
+	struct dsi_pll_output *pout = &pdb->out;
+	s64 data;
+	u32 cnt;
+
+	data = fref * pin->vco_measure_time;
+	do_div(data, 1000000);
+	data &= 0x03ff;	/* 10 bits */
+	data -= 2;
+	pout->pll_vco_div_ref = data;
+
+	data = (unsigned long)vco_clk_rate / 1000000;	/* unit is Mhz */
+	data *= pin->vco_measure_time;
+	do_div(data, 10);
+	pout->pll_vco_count = data; /* reg: 0x0474, 0x0478 */
+
+	data = fref * pin->kvco_measure_time;
+	do_div(data, 1000000);
+	data &= 0x03ff;	/* 10 bits */
+	data -= 1;
+	pout->pll_kvco_div_ref = data;
+
+	cnt = pll_8996_kvco_slop(vco_clk_rate);
+	cnt *= 2;
+	do_div(cnt, 100);
+	cnt *= pin->kvco_measure_time;
+	pout->pll_kvco_count = cnt;
+
+	pout->pll_misc1 = 16;
+	pout->pll_resetsm_cntrl = 48;
+	pout->pll_resetsm_cntrl2 = pin->bandgap_timer << 3;
+	pout->pll_resetsm_cntrl5 = pin->pll_wakeup_timer;
+	pout->pll_kvco_code = 0;
+}
+
+static void pll_db_commit_ssc(struct mdss_pll_resources *pll,
+					struct dsi_pll_db *pdb)
+{
+	void __iomem *pll_base = pll->pll_base;
+	struct dsi_pll_input *pin = &pdb->in;
+	struct dsi_pll_output *pout = &pdb->out;
+	char data;
+
+	data = pin->ssc_adj_period;
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_ADJ_PER1, data);
+	data = (pin->ssc_adj_period >> 8);
+	data &= 0x03;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_ADJ_PER2, data);
+
+	data = pout->ssc_period;
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_PER1, data);
+	data = (pout->ssc_period >> 8);
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_PER2, data);
+
+	data = pout->ssc_step_size;
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_STEP_SIZE1, data);
+	data = (pout->ssc_step_size >> 8);
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_STEP_SIZE2, data);
+
+	data = (pin->ssc_center & 0x01);
+	data <<= 1;
+	data |= 0x01; /* enable */
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_EN_CENTER, data);
+
+	wmb();	/* make sure register committed */
+}
+
+static void pll_db_commit_common(struct mdss_pll_resources *pll,
+					struct dsi_pll_db *pdb)
+{
+	void __iomem *pll_base = pll->pll_base;
+	struct dsi_pll_input *pin = &pdb->in;
+	struct dsi_pll_output *pout = &pdb->out;
+	char data;
+
+	/* confgiure the non frequency dependent pll registers */
+	data = 0;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SYSCLK_EN_RESET, data);
+
+	/* DSIPHY_PLL_CLKBUFLR_EN updated at dsi phy */
+
+	data = pout->pll_txclk_en;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_TXCLK_EN, data);
+
+	data = pout->pll_resetsm_cntrl;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_RESETSM_CNTRL, data);
+	data = pout->pll_resetsm_cntrl2;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_RESETSM_CNTRL2, data);
+	data = pout->pll_resetsm_cntrl5;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_RESETSM_CNTRL5, data);
+
+	data = pout->pll_vco_div_ref;
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_DIV_REF1, data);
+	data = (pout->pll_vco_div_ref >> 8);
+	data &= 0x03;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_DIV_REF2, data);
+
+	data = pout->pll_kvco_div_ref;
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_DIV_REF1, data);
+	data = (pout->pll_kvco_div_ref >> 8);
+	data &= 0x03;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_DIV_REF2, data);
+
+	data = pout->pll_misc1;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_MISC1, data);
+
+	data = pin->pll_ie_trim;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_IE_TRIM, data);
+
+	data = pin->pll_ip_trim;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_IP_TRIM, data);
+
+	data = ((pin->pll_cpmset_cur << 3) | pin->pll_cpcset_cur);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_CP_SET_CUR, data);
+
+	data = ((pin->pll_icpcset_p << 3) | pin->pll_icpcset_m);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_ICPCSET, data);
+
+	data = ((pin->pll_icpmset_p << 3) | pin->pll_icpcset_m);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_ICPMSET, data);
+
+	data = ((pin->pll_icpmset << 3) | pin->pll_icpcset);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_ICP_SET, data);
+
+	data = ((pdb->in.pll_lpf_cap2 << 4) | pdb->in.pll_lpf_cap1);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_LPF1, data);
+
+	data = pin->pll_iptat_trim;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_IPTAT_TRIM, data);
+
+	data = (pdb->in.pll_c3ctrl | (pdb->in.pll_r3ctrl << 4));
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_CRCTRL, data);
+}
+
+static void pll_db_commit_8996(struct mdss_pll_resources *pll,
+					struct dsi_pll_db *pdb)
+{
+	void __iomem *pll_base = pll->pll_base;
+	struct dsi_pll_input *pin = &pdb->in;
+	struct dsi_pll_output *pout = &pdb->out;
+	char data;
+
+	data = pout->cmn_ldo_cntrl;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_LDO_CNTRL, data);
+
+	pll_db_commit_common(pll, pdb);
+
+	/* de assert pll start and apply pll sw reset */
+	/* stop pll */
+	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_PLL_CNTRL, 0);
+
+	/* pll sw reset */
+	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CTRL_1, 0x20);
+	wmb();	/* make sure register committed */
+	udelay(10);
+
+	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CTRL_1, 0);
+	wmb();	/* make sure register committed */
+
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_VCO_TUNE, 0);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_CODE, 0);
+	wmb(); /* make sure register committed */
+
+	data = pdb->in.dsiclk_sel; /* set dsiclk_sel = 1  */
+	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CLK_CFG1, data);
+
+	data = 0xff; /* data, clk, pll normal operation */
+	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CTRL_0, data);
+
+	/* confgiure the frequency dependent pll registers */
+	data = pout->dec_start;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_DEC_START, data);
+
+	data = pout->div_frac_start;
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_DIV_FRAC_START1, data);
+	data = (pout->div_frac_start >> 8);
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_DIV_FRAC_START2, data);
+	data = (pout->div_frac_start >> 16);
+	data &= 0x0f;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_DIV_FRAC_START3, data);
+
+	data = pout->plllock_cmp;
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLLLOCK_CMP1, data);
+	data = (pout->plllock_cmp >> 8);
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLLLOCK_CMP2, data);
+	data = (pout->plllock_cmp >> 16);
+	data &= 0x03;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLLLOCK_CMP3, data);
+
+	data = ((pin->plllock_cnt << 1) | (pin->plllock_rng << 3));
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLLLOCK_CMP_EN, data);
+
+	data = pout->pll_vco_count;
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_COUNT1, data);
+	data = (pout->pll_vco_count >> 8);
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_COUNT2, data);
+
+	data = pout->pll_kvco_count;
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_COUNT1, data);
+	data = (pout->pll_kvco_count >> 8);
+	data &= 0x03;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_COUNT2, data);
+
+	/*
+	 * tx_band = pll_postdiv
+	 * 0: divided by 1 <== for now
+	 * 1: divided by 2
+	 * 2: divided by 4
+	 * 3: divided by 8
+	 */
+	data = (((pout->pll_postdiv - 1) << 4) | pdb->in.pll_lpf_res1);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_LPF2_POSTDIV, data);
+
+	data = (pout->pll_n1div | (pout->pll_n2div << 4));
+	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CLK_CFG0, data);
+
+	if (pll->ssc_en)
+		pll_db_commit_ssc(pll, pdb);
+
+	wmb();	/* make sure register committed */
+}
+
+/*
+ * pll_source_finding:
+ * Both GLBL_TEST_CTRL and CLKBUFLR_EN are configured
+ * at mdss_dsi_8996_phy_config()
+ */
+static int pll_source_finding(struct mdss_pll_resources *pll)
+{
+	u32 clk_buf_en;
+	u32 glbl_test_ctrl;
+
+	glbl_test_ctrl = MDSS_PLL_REG_R(pll->pll_base,
+				DSIPHY_CMN_GLBL_TEST_CTRL);
+	clk_buf_en = MDSS_PLL_REG_R(pll->pll_base,
+				DSIPHY_PLL_CLKBUFLR_EN);
+
+	glbl_test_ctrl &= BIT(2);
+	glbl_test_ctrl >>= 2;
+
+	pr_debug("%s: pll=%d clk_buf_en=%x glbl_test_ctrl=%x\n",
+		__func__, pll->index, clk_buf_en, glbl_test_ctrl);
+
+	clk_buf_en &= (PLL_OUTPUT_RIGHT | PLL_OUTPUT_LEFT);
+
+	if ((glbl_test_ctrl == PLL_SOURCE_FROM_LEFT) &&
+			(clk_buf_en == PLL_OUTPUT_BOTH))
+		return PLL_MASTER;
+
+	if ((glbl_test_ctrl == PLL_SOURCE_FROM_RIGHT) &&
+			(clk_buf_en == PLL_OUTPUT_NONE))
+		return PLL_SLAVE;
+
+	if ((glbl_test_ctrl == PLL_SOURCE_FROM_LEFT) &&
+			(clk_buf_en == PLL_OUTPUT_RIGHT))
+		return PLL_STANDALONE;
+
+	pr_debug("%s: Error pll setup, clk_buf_en=%x glbl_test_ctrl=%x\n",
+			__func__, clk_buf_en, glbl_test_ctrl);
+
+	return PLL_UNKNOWN;
+}
+
+static void pll_source_setup(struct mdss_pll_resources *pll)
+{
+	int status;
+	struct dsi_pll_db *pdb = (struct dsi_pll_db *)pll->priv;
+	struct mdss_pll_resources *other;
+
+	if (pdb->source_setup_done)
+		return;
+
+	pdb->source_setup_done++;
+
+	status = pll_source_finding(pll);
+
+	if (status == PLL_STANDALONE || status == PLL_UNKNOWN)
+		return;
+
+	other = pdb->next->pll;
+	if (!other)
+		return;
+
+	pr_debug("%s: status=%d pll=%d other=%d\n", __func__,
+			status, pll->index, other->index);
+
+	if (status == PLL_MASTER)
+		pll->slave = other;
+	else
+		other->slave = pll;
+}
+
+int pll_vco_set_rate_8996(struct clk *c, unsigned long rate)
+{
+	int rc;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *pll = vco->priv;
+	struct mdss_pll_resources *slave;
+	struct dsi_pll_db *pdb;
+
+	pdb = (struct dsi_pll_db *)pll->priv;
+	if (!pdb) {
+		pr_err("No prov found\n");
+		return -EINVAL;
+	}
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi plla=%d\n", pll->index);
+		return rc;
+	}
+
+	pll_source_setup(pll);
+
+	pr_debug("%s: ndx=%d base=%p rate=%lu slave=%p\n", __func__,
+				pll->index, pll->pll_base, rate, pll->slave);
+
+	pll->vco_current_rate = rate;
+	pll->vco_ref_clk_rate = vco->ref_clk_rate;
+
+	mdss_dsi_pll_8996_input_init(pll, pdb);
+
+	pll_8996_dec_frac_calc(pll, pdb);
+
+	if (pll->ssc_en)
+		pll_8996_ssc_calc(pll, pdb);
+
+	pll_8996_calc_vco_count(pdb, pll->vco_current_rate,
+					pll->vco_ref_clk_rate);
+
+	/* commit slave if split display is enabled */
+	slave = pll->slave;
+	if (slave)
+		pll_db_commit_8996(slave, pdb);
+
+	/* commit master itself */
+	pll_db_commit_8996(pll, pdb);
+
+	mdss_pll_resource_enable(pll, false);
+
+	return rc;
+}
+
+static void shadow_pll_dynamic_refresh_8996(struct mdss_pll_resources *pll,
+							struct dsi_pll_db *pdb)
+{
+	struct dsi_pll_output *pout = &pdb->out;
+
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_CTRL20,
+		DSIPHY_CMN_CTRL_0, DSIPHY_PLL_SYSCLK_EN_RESET,
+		0xFF, 0x0);
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_CTRL21,
+		DSIPHY_PLL_DEC_START, DSIPHY_PLL_DIV_FRAC_START1,
+		pout->dec_start, (pout->div_frac_start & 0x0FF));
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_CTRL22,
+		DSIPHY_PLL_DIV_FRAC_START2, DSIPHY_PLL_DIV_FRAC_START3,
+		((pout->div_frac_start >> 8) & 0x0FF),
+		((pout->div_frac_start >> 16) & 0x0F));
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_CTRL23,
+		DSIPHY_PLL_PLLLOCK_CMP1, DSIPHY_PLL_PLLLOCK_CMP2,
+		(pout->plllock_cmp & 0x0FF),
+		((pout->plllock_cmp >> 8) & 0x0FF));
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_CTRL24,
+		DSIPHY_PLL_PLLLOCK_CMP3, DSIPHY_PLL_PLL_VCO_TUNE,
+		((pout->plllock_cmp >> 16) & 0x03),
+		(pll->cache_pll_trim_codes[1] | BIT(7))); /* VCO tune*/
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_CTRL25,
+		DSIPHY_PLL_KVCO_CODE, DSIPHY_PLL_RESETSM_CNTRL,
+		(pll->cache_pll_trim_codes[0] | BIT(5)), 0x38);
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_CTRL26,
+		DSIPHY_PLL_PLL_LPF2_POSTDIV, DSIPHY_CMN_PLL_CNTRL,
+		(((pout->pll_postdiv - 1) << 4) | pdb->in.pll_lpf_res1), 0x01);
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_CTRL27,
+		DSIPHY_CMN_PLL_CNTRL, DSIPHY_CMN_PLL_CNTRL,
+		0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_CTRL28,
+		DSIPHY_CMN_PLL_CNTRL, DSIPHY_CMN_PLL_CNTRL,
+		0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_CTRL29,
+		DSIPHY_CMN_PLL_CNTRL, DSIPHY_CMN_PLL_CNTRL,
+		0x01, 0x01);
+	MDSS_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR, 0x0000001E);
+	MDSS_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2, 0x001FFE00);
+
+	/*
+	 * Ensure all the dynamic refresh registers are written before
+	 * dynamic refresh to change the fps is triggered
+	 */
+	wmb();
+}
+
+int shadow_pll_vco_set_rate_8996(struct clk *c, unsigned long rate)
+{
+	int rc;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *pll = vco->priv;
+	struct dsi_pll_db *pdb;
+	s64 vco_clk_rate = (s64)rate;
+
+	if (!pll) {
+		pr_err("PLL data not found\n");
+		return -EINVAL;
+	}
+
+	pdb = pll->priv;
+	if (!pdb) {
+		pr_err("No priv data found\n");
+		return -EINVAL;
+	}
+
+	rc = mdss_pll_read_stored_trim_codes(pll, vco_clk_rate);
+	if (rc) {
+		pr_err("cannot find pll codes rate=%lld\n", vco_clk_rate);
+		return -EINVAL;
+	}
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi plla=%d\n", pll->index);
+		return rc;
+	}
+
+	pr_debug("%s: ndx=%d base=%p rate=%lu\n", __func__,
+			pll->index, pll->pll_base, rate);
+
+	pll->vco_current_rate = rate;
+	pll->vco_ref_clk_rate = vco->ref_clk_rate;
+
+	mdss_dsi_pll_8996_input_init(pll, pdb);
+
+	pll_8996_dec_frac_calc(pll, pdb);
+
+	pll_8996_calc_vco_count(pdb, pll->vco_current_rate,
+			pll->vco_ref_clk_rate);
+
+	shadow_pll_dynamic_refresh_8996(pll, pdb);
+
+	rc = mdss_pll_resource_enable(pll, false);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi plla=%d\n", pll->index);
+		return rc;
+	}
+
+	return rc;
+}
+
+unsigned long pll_vco_get_rate_8996(struct clk *c)
+{
+	u64 vco_rate, multiplier = BIT(20);
+	s32 div_frac_start;
+	u32 dec_start;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	u64 ref_clk = vco->ref_clk_rate;
+	int rc;
+	struct mdss_pll_resources *pll = vco->priv;
+
+	if (is_gdsc_disabled(pll))
+		return 0;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll=%d\n", pll->index);
+		return rc;
+	}
+
+	dec_start = MDSS_PLL_REG_R(pll->pll_base,
+			DSIPHY_PLL_DEC_START);
+	dec_start &= 0x0ff;
+	pr_debug("dec_start = 0x%x\n", dec_start);
+
+	div_frac_start = (MDSS_PLL_REG_R(pll->pll_base,
+			DSIPHY_PLL_DIV_FRAC_START3) & 0x0f) << 16;
+	div_frac_start |= (MDSS_PLL_REG_R(pll->pll_base,
+			DSIPHY_PLL_DIV_FRAC_START2) & 0x0ff) << 8;
+	div_frac_start |= MDSS_PLL_REG_R(pll->pll_base,
+			DSIPHY_PLL_DIV_FRAC_START1) & 0x0ff;
+	pr_debug("div_frac_start = 0x%x\n", div_frac_start);
+
+	vco_rate = ref_clk * dec_start;
+	vco_rate += ((ref_clk * div_frac_start) / multiplier);
+
+	pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
+
+	mdss_pll_resource_enable(pll, false);
+
+	return (unsigned long)vco_rate;
+}
+
+long pll_vco_round_rate_8996(struct clk *c, unsigned long rate)
+{
+	unsigned long rrate = rate;
+	u32 div;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+
+	div = vco->min_rate / rate;
+	if (div > 15) {
+		/* rate < 86.67 Mhz */
+		pr_err("rate=%lu NOT supportted\n", rate);
+		return -EINVAL;
+	}
+
+	if (rate < vco->min_rate)
+		rrate = vco->min_rate;
+	if (rate > vco->max_rate)
+		rrate = vco->max_rate;
+
+	return rrate;
+}
+
+enum handoff pll_vco_handoff_8996(struct clk *c)
+{
+	int rc;
+	enum handoff ret = HANDOFF_DISABLED_CLK;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *pll = vco->priv;
+
+	if (is_gdsc_disabled(pll))
+		return HANDOFF_DISABLED_CLK;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll=%d\n", pll->index);
+		return ret;
+	}
+
+	if (pll_is_pll_locked_8996(pll)) {
+		pll->handoff_resources = true;
+		pll->pll_on = true;
+		c->rate = pll_vco_get_rate_8996(c);
+		ret = HANDOFF_ENABLED_CLK;
+	} else {
+		mdss_pll_resource_enable(pll, false);
+	}
+
+	return ret;
+}
+
+enum handoff shadow_pll_vco_handoff_8996(struct clk *c)
+{
+	return HANDOFF_DISABLED_CLK;
+}
+
+int pll_vco_prepare_8996(struct clk *c)
+{
+	int rc = 0;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *pll = vco->priv;
+
+	if (!pll) {
+		pr_err("Dsi pll resources are not available\n");
+		return -EINVAL;
+	}
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("ndx=%d Failed to enable mdss dsi pll resources\n",
+							pll->index);
+		return rc;
+	}
+
+	if ((pll->vco_cached_rate != 0)
+	    && (pll->vco_cached_rate == c->rate)) {
+		rc = c->ops->set_rate(c, pll->vco_cached_rate);
+		if (rc) {
+			pr_err("index=%d vco_set_rate failed. rc=%d\n",
+					rc, pll->index);
+			mdss_pll_resource_enable(pll, false);
+			goto error;
+		}
+	}
+
+	rc = dsi_pll_enable(c);
+
+	if (rc) {
+		mdss_pll_resource_enable(pll, false);
+		pr_err("ndx=%d failed to enable dsi pll\n", pll->index);
+	}
+
+error:
+	return rc;
+}
+
+void pll_vco_unprepare_8996(struct clk *c)
+{
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *pll = vco->priv;
+
+	if (!pll) {
+		pr_err("Dsi pll resources are not available\n");
+		return;
+	}
+
+	pll->vco_cached_rate = c->rate;
+	dsi_pll_disable(c);
+}
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./mdss/mdss-dsi-pll-8998.c linux-4.4.115-fbx/drivers/clk/msm/mdss/mdss-dsi-pll-8998.c
--- linux-4.4.115-fbx/drivers/clk/msm./mdss/mdss-dsi-pll-8998.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/mdss/mdss-dsi-pll-8998.c	2019-01-22 16:16:23.019242024 +0100
@@ -0,0 +1,1814 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <dt-bindings/clock/msm-clocks-8998.h>
+
+#include "mdss-pll.h"
+#include "mdss-dsi-pll.h"
+#include "mdss-pll.h"
+
+#define VCO_DELAY_USEC 1
+
+#define MHZ_250		250000000UL
+#define MHZ_500		500000000UL
+#define MHZ_1000	1000000000UL
+#define MHZ_1100	1100000000UL
+#define MHZ_1900	1900000000UL
+#define MHZ_3000	3000000000UL
+
+/* Register Offsets from PLL base address */
+#define PLL_ANALOG_CONTROLS_ONE			0x000
+#define PLL_ANALOG_CONTROLS_TWO			0x004
+#define PLL_INT_LOOP_SETTINGS			0x008
+#define PLL_INT_LOOP_SETTINGS_TWO		0x00c
+#define PLL_ANALOG_CONTROLS_THREE		0x010
+#define PLL_ANALOG_CONTROLS_FOUR		0x014
+#define PLL_INT_LOOP_CONTROLS			0x018
+#define PLL_DSM_DIVIDER					0x01c
+#define PLL_FEEDBACK_DIVIDER			0x020
+#define PLL_SYSTEM_MUXES			0x024
+#define PLL_FREQ_UPDATE_CONTROL_OVERRIDES			0x028
+#define PLL_CMODE				0x02c
+#define PLL_CALIBRATION_SETTINGS		0x030
+#define PLL_BAND_SEL_CAL_TIMER_LOW		0x034
+#define PLL_BAND_SEL_CAL_TIMER_HIGH		0x038
+#define PLL_BAND_SEL_CAL_SETTINGS		0x03c
+#define PLL_BAND_SEL_MIN		0x040
+#define PLL_BAND_SEL_MAX		0x044
+#define PLL_BAND_SEL_PFILT		0x048
+#define PLL_BAND_SEL_IFILT		0x04c
+#define PLL_BAND_SEL_CAL_SETTINGS_TWO		0x050
+#define PLL_BAND_SEL_CAL_SETTINGS_THREE		0x054
+#define PLL_BAND_SEL_CAL_SETTINGS_FOUR		0x058
+#define PLL_BAND_SEL_ICODE_HIGH				0x05c
+#define PLL_BAND_SEL_ICODE_LOW				0x060
+#define PLL_FREQ_DETECT_SETTINGS_ONE		0x064
+#define PLL_PFILT				0x07c
+#define PLL_IFILT				0x080
+#define PLL_GAIN				0x084
+#define PLL_ICODE_LOW			0x088
+#define PLL_ICODE_HIGH			0x08c
+#define PLL_LOCKDET				0x090
+#define PLL_OUTDIV				0x094
+#define PLL_FASTLOCK_CONTROL	0x098
+#define PLL_PASS_OUT_OVERRIDE_ONE		0x09c
+#define PLL_PASS_OUT_OVERRIDE_TWO		0x0a0
+#define PLL_CORE_OVERRIDE				0x0a4
+#define PLL_CORE_INPUT_OVERRIDE			0x0a8
+#define PLL_RATE_CHANGE					0x0ac
+#define PLL_PLL_DIGITAL_TIMERS			0x0b0
+#define PLL_PLL_DIGITAL_TIMERS_TWO		0x0b4
+#define PLL_DEC_FRAC_MUXES				0x0c8
+#define PLL_DECIMAL_DIV_START_1			0x0cc
+#define PLL_FRAC_DIV_START_LOW_1		0x0d0
+#define PLL_FRAC_DIV_START_MID_1		0x0d4
+#define PLL_FRAC_DIV_START_HIGH_1		0x0d8
+#define PLL_MASH_CONTROL				0x0ec
+#define PLL_SSC_MUX_CONTROL				0x108
+#define PLL_SSC_STEPSIZE_LOW_1			0x10c
+#define PLL_SSC_STEPSIZE_HIGH_1			0x110
+#define PLL_SSC_DIV_PER_LOW_1			0x114
+#define PLL_SSC_DIV_PER_HIGH_1			0x118
+#define PLL_SSC_DIV_ADJPER_LOW_1		0x11c
+#define PLL_SSC_DIV_ADJPER_HIGH_1		0x120
+#define PLL_SSC_CONTROL					0x13c
+#define PLL_PLL_OUTDIV_RATE				0x140
+#define PLL_PLL_LOCKDET_RATE_1			0x144
+#define PLL_PLL_PROP_GAIN_RATE_1		0x14c
+#define PLL_PLL_BAND_SET_RATE_1			0x154
+#define PLL_PLL_INT_GAIN_IFILT_BAND_1		0x15c
+#define PLL_PLL_FL_INT_GAIN_PFILT_BAND_1	0x164
+#define PLL_FASTLOCK_EN_BAND				0x16c
+#define PLL_FREQ_TUNE_ACCUM_INIT_MUX		0x17c
+#define PLL_PLL_LOCK_OVERRIDE				0x180
+#define PLL_PLL_LOCK_DELAY					0x184
+#define PLL_PLL_LOCK_MIN_DELAY				0x188
+#define PLL_CLOCK_INVERTERS					0x18c
+#define PLL_SPARE_AND_JPC_OVERRIDES			0x190
+#define PLL_BIAS_CONTROL_1					0x194
+#define PLL_BIAS_CONTROL_2					0x198
+#define PLL_ALOG_OBSV_BUS_CTRL_1			0x19c
+#define PLL_COMMON_STATUS_ONE				0x1a0
+
+/* Register Offsets from PHY base address */
+#define PHY_CMN_CLK_CFG0	0x010
+#define PHY_CMN_CLK_CFG1	0x014
+#define PHY_CMN_RBUF_CTRL	0x01c
+#define PHY_CMN_PLL_CNTRL	0x038
+#define PHY_CMN_CTRL_0		0x024
+
+/* Bit definition of SSC control registers */
+#define SSC_CENTER		BIT(0)
+#define SSC_EN			BIT(1)
+#define SSC_FREQ_UPDATE		BIT(2)
+#define SSC_FREQ_UPDATE_MUX	BIT(3)
+#define SSC_UPDATE_SSC		BIT(4)
+#define SSC_UPDATE_SSC_MUX	BIT(5)
+#define SSC_START		BIT(6)
+#define SSC_START_MUX		BIT(7)
+
+enum {
+	DSI_PLL_0,
+	DSI_PLL_1,
+	DSI_PLL_MAX
+};
+
+struct dsi_pll_regs {
+	u32 pll_prop_gain_rate;
+	u32 pll_outdiv_rate;
+	u32 pll_lockdet_rate;
+	u32 decimal_div_start;
+	u32 frac_div_start_low;
+	u32 frac_div_start_mid;
+	u32 frac_div_start_high;
+	u32 pll_clock_inverters;
+	u32 ssc_stepsize_low;
+	u32 ssc_stepsize_high;
+	u32 ssc_div_per_low;
+	u32 ssc_div_per_high;
+	u32 ssc_adjper_low;
+	u32 ssc_adjper_high;
+	u32 ssc_control;
+};
+
+struct dsi_pll_config {
+	u32 ref_freq;
+	bool ignore_frac;
+	bool disable_prescaler;
+	bool enable_ssc;
+	bool ssc_center;
+	u32 dec_bits;
+	u32 frac_bits;
+	u32 lock_timer;
+	u32 ssc_freq;
+	u32 ssc_offset;
+	u32 ssc_adj_per;
+	u32 thresh_cycles;
+	u32 refclk_cycles;
+};
+
+struct dsi_pll_8998 {
+	struct mdss_pll_resources *rsc;
+	struct dsi_pll_config pll_configuration;
+	struct dsi_pll_regs reg_setup;
+};
+
+static struct mdss_pll_resources *pll_rsc_db[DSI_PLL_MAX];
+static struct dsi_pll_8998 plls[DSI_PLL_MAX];
+
+static void dsi_pll_config_slave(struct mdss_pll_resources *rsc)
+{
+	u32 reg;
+	struct mdss_pll_resources *orsc = pll_rsc_db[DSI_PLL_1];
+
+	if (!rsc)
+		return;
+
+	/* Only DSI PLL0 can act as a master */
+	if (rsc->index != DSI_PLL_0)
+		return;
+
+	/* default configuration: source is either internal or ref clock */
+	rsc->slave = NULL;
+
+	if (!orsc) {
+		pr_warn("slave PLL unavilable, assuming standalone config\n");
+		return;
+	}
+
+	/* check to see if the source of DSI1 PLL bitclk is set to external */
+	reg = MDSS_PLL_REG_R(orsc->phy_base, PHY_CMN_CLK_CFG1);
+	reg &= (BIT(2) | BIT(3));
+	if (reg == 0x04)
+		rsc->slave = pll_rsc_db[DSI_PLL_1]; /* external source */
+
+	pr_debug("Slave PLL %s\n", rsc->slave ? "configured" : "absent");
+}
+
+static void dsi_pll_setup_config(struct dsi_pll_8998 *pll,
+				 struct mdss_pll_resources *rsc)
+{
+	struct dsi_pll_config *config = &pll->pll_configuration;
+
+	config->ref_freq = 19200000;
+	config->dec_bits = 8;
+	config->frac_bits = 18;
+	config->lock_timer = 64;
+	config->ssc_freq = 31500;
+	config->ssc_offset = 5000;
+	config->ssc_adj_per = 2;
+	config->thresh_cycles = 32;
+	config->refclk_cycles = 256;
+
+	config->ignore_frac = false;
+	config->disable_prescaler = false;
+	config->enable_ssc = rsc->ssc_en;
+	config->ssc_center = rsc->ssc_center;
+
+	if (config->enable_ssc) {
+		if (rsc->ssc_freq)
+			config->ssc_freq = rsc->ssc_freq;
+		if (rsc->ssc_ppm)
+			config->ssc_offset = rsc->ssc_ppm;
+	}
+
+	dsi_pll_config_slave(rsc);
+}
+
+static void dsi_pll_calc_dec_frac(struct dsi_pll_8998 *pll,
+				  struct mdss_pll_resources *rsc)
+{
+	struct dsi_pll_config *config = &pll->pll_configuration;
+	struct dsi_pll_regs *regs = &pll->reg_setup;
+	u64 fref = rsc->vco_ref_clk_rate;
+	u64 pll_freq;
+	u64 divider;
+	u64 dec, dec_multiple;
+	u32 frac;
+	u64 multiplier;
+
+	pll_freq = rsc->vco_current_rate;
+
+	if (config->disable_prescaler)
+		divider = fref;
+	else
+		divider = fref * 2;
+
+	multiplier = 1 << config->frac_bits;
+	dec_multiple = div_u64(pll_freq * multiplier, divider);
+	div_u64_rem(dec_multiple, multiplier, &frac);
+
+	dec = div_u64(dec_multiple, multiplier);
+
+	if (pll_freq <= MHZ_1900)
+		regs->pll_prop_gain_rate = 8;
+	else if (pll_freq <= MHZ_3000)
+		regs->pll_prop_gain_rate = 10;
+	else
+		regs->pll_prop_gain_rate = 12;
+
+	if (pll_freq < MHZ_1100)
+		regs->pll_clock_inverters = 8;
+	else
+		regs->pll_clock_inverters = 0;
+
+	regs->pll_lockdet_rate = config->lock_timer;
+	regs->decimal_div_start = dec;
+	regs->frac_div_start_low = (frac & 0xff);
+	regs->frac_div_start_mid = (frac & 0xff00) >> 8;
+	regs->frac_div_start_high = (frac & 0x30000) >> 16;
+}
+
+static void dsi_pll_calc_ssc(struct dsi_pll_8998 *pll,
+		  struct mdss_pll_resources *rsc)
+{
+	struct dsi_pll_config *config = &pll->pll_configuration;
+	struct dsi_pll_regs *regs = &pll->reg_setup;
+	u32 ssc_per;
+	u32 ssc_mod;
+	u64 ssc_step_size;
+	u64 frac;
+
+	if (!config->enable_ssc) {
+		pr_debug("SSC not enabled\n");
+		return;
+	}
+
+	ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 1;
+	ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
+	ssc_per -= ssc_mod;
+
+	frac = regs->frac_div_start_low |
+			(regs->frac_div_start_mid << 8) |
+			(regs->frac_div_start_high << 16);
+	ssc_step_size = regs->decimal_div_start;
+	ssc_step_size *= (1 << config->frac_bits);
+	ssc_step_size += frac;
+	ssc_step_size *= config->ssc_offset;
+	ssc_step_size *= (config->ssc_adj_per + 1);
+	ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
+	ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
+
+	regs->ssc_div_per_low = ssc_per & 0xFF;
+	regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8;
+	regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF);
+	regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8);
+	regs->ssc_adjper_low = config->ssc_adj_per & 0xFF;
+	regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8;
+
+	regs->ssc_control = config->ssc_center ? SSC_CENTER : 0;
+
+	pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
+			regs->decimal_div_start, frac, config->frac_bits);
+	pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
+			ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
+}
+
+static void dsi_pll_ssc_commit(struct dsi_pll_8998 *pll,
+		struct mdss_pll_resources *rsc)
+{
+	void __iomem *pll_base = rsc->pll_base;
+	struct dsi_pll_regs *regs = &pll->reg_setup;
+
+	if (pll->pll_configuration.enable_ssc) {
+		pr_debug("SSC is enabled\n");
+
+		MDSS_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_LOW_1,
+				regs->ssc_stepsize_low);
+		MDSS_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_HIGH_1,
+				regs->ssc_stepsize_high);
+		MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_LOW_1,
+				regs->ssc_div_per_low);
+		MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_HIGH_1,
+				regs->ssc_div_per_high);
+		MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_ADJPER_LOW_1,
+				regs->ssc_adjper_low);
+		MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_ADJPER_HIGH_1,
+				regs->ssc_adjper_high);
+		MDSS_PLL_REG_W(pll_base, PLL_SSC_CONTROL,
+				SSC_EN | regs->ssc_control);
+	}
+}
+
+static void dsi_pll_config_hzindep_reg(struct dsi_pll_8998 *pll,
+				  struct mdss_pll_resources *rsc)
+{
+	void __iomem *pll_base = rsc->pll_base;
+
+	MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_ONE, 0x80);
+	MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_TWO, 0x03);
+	MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_THREE, 0x00);
+	MDSS_PLL_REG_W(pll_base, PLL_DSM_DIVIDER, 0x00);
+	MDSS_PLL_REG_W(pll_base, PLL_FEEDBACK_DIVIDER, 0x4e);
+	MDSS_PLL_REG_W(pll_base, PLL_CALIBRATION_SETTINGS, 0x40);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS_THREE, 0xba);
+	MDSS_PLL_REG_W(pll_base, PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
+	MDSS_PLL_REG_W(pll_base, PLL_OUTDIV, 0x00);
+	MDSS_PLL_REG_W(pll_base, PLL_CORE_OVERRIDE, 0x00);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_PROP_GAIN_RATE_1, 0x08);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_BAND_SET_RATE_1, 0xc0);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x82);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, 0x4c);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_OVERRIDE, 0x80);
+	MDSS_PLL_REG_W(pll_base, PLL_PFILT, 0x29);
+	MDSS_PLL_REG_W(pll_base, PLL_IFILT, 0x3f);
+}
+
+static void dsi_pll_init_val(struct mdss_pll_resources *rsc)
+{
+	void __iomem *pll_base = rsc->pll_base;
+
+	MDSS_PLL_REG_W(pll_base, PLL_CORE_INPUT_OVERRIDE, 0x10);
+	MDSS_PLL_REG_W(pll_base, PLL_INT_LOOP_SETTINGS, 0x3f);
+	MDSS_PLL_REG_W(pll_base, PLL_INT_LOOP_SETTINGS_TWO, 0x0);
+	MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_FOUR, 0x0);
+	MDSS_PLL_REG_W(pll_base, PLL_INT_LOOP_CONTROLS, 0x80);
+	MDSS_PLL_REG_W(pll_base, PLL_FREQ_UPDATE_CONTROL_OVERRIDES, 0x0);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_TIMER_LOW, 0x0);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_TIMER_HIGH, 0x02);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS, 0x82);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_MIN, 0x00);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_MAX, 0xff);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_PFILT, 0x00);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_IFILT, 0x00);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS_TWO, 0x25);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS_FOUR, 0x4f);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_ICODE_HIGH, 0x0a);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_ICODE_LOW, 0x0);
+	MDSS_PLL_REG_W(pll_base, PLL_GAIN, 0x42);
+	MDSS_PLL_REG_W(pll_base, PLL_ICODE_LOW, 0x00);
+	MDSS_PLL_REG_W(pll_base, PLL_ICODE_HIGH, 0x00);
+	MDSS_PLL_REG_W(pll_base, PLL_LOCKDET, 0x30);
+	MDSS_PLL_REG_W(pll_base, PLL_FASTLOCK_CONTROL, 0x04);
+	MDSS_PLL_REG_W(pll_base, PLL_PASS_OUT_OVERRIDE_ONE, 0x00);
+	MDSS_PLL_REG_W(pll_base, PLL_PASS_OUT_OVERRIDE_TWO, 0x00);
+	MDSS_PLL_REG_W(pll_base, PLL_RATE_CHANGE, 0x01);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_DIGITAL_TIMERS, 0x08);
+	MDSS_PLL_REG_W(pll_base, PLL_DEC_FRAC_MUXES, 0x00);
+	MDSS_PLL_REG_W(pll_base, PLL_MASH_CONTROL, 0x03);
+	MDSS_PLL_REG_W(pll_base, PLL_SSC_MUX_CONTROL, 0x0);
+	MDSS_PLL_REG_W(pll_base, PLL_SSC_CONTROL, 0x0);
+	MDSS_PLL_REG_W(pll_base, PLL_FASTLOCK_EN_BAND, 0x03);
+	MDSS_PLL_REG_W(pll_base, PLL_FREQ_TUNE_ACCUM_INIT_MUX, 0x0);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_MIN_DELAY, 0x19);
+	MDSS_PLL_REG_W(pll_base, PLL_SPARE_AND_JPC_OVERRIDES, 0x0);
+	MDSS_PLL_REG_W(pll_base, PLL_BIAS_CONTROL_1, 0x40);
+	MDSS_PLL_REG_W(pll_base, PLL_BIAS_CONTROL_2, 0x20);
+	MDSS_PLL_REG_W(pll_base, PLL_ALOG_OBSV_BUS_CTRL_1, 0x0);
+}
+
+static void dsi_pll_commit(struct dsi_pll_8998 *pll,
+			   struct mdss_pll_resources *rsc)
+{
+	void __iomem *pll_base = rsc->pll_base;
+	struct dsi_pll_regs *reg = &pll->reg_setup;
+
+	MDSS_PLL_REG_W(pll_base, PLL_CORE_INPUT_OVERRIDE, 0x12);
+	MDSS_PLL_REG_W(pll_base, PLL_DECIMAL_DIV_START_1,
+		       reg->decimal_div_start);
+	MDSS_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_LOW_1,
+		       reg->frac_div_start_low);
+	MDSS_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_MID_1,
+		       reg->frac_div_start_mid);
+	MDSS_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_HIGH_1,
+		       reg->frac_div_start_high);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCKDET_RATE_1, 0x40);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_DELAY, 0x06);
+	MDSS_PLL_REG_W(pll_base, PLL_CMODE, 0x10);
+	MDSS_PLL_REG_W(pll_base, PLL_CLOCK_INVERTERS, reg->pll_clock_inverters);
+
+}
+
+static int vco_8998_set_rate(struct clk *c, unsigned long rate)
+{
+	int rc;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *rsc = vco->priv;
+	struct dsi_pll_8998 *pll;
+
+	if (!rsc) {
+		pr_err("pll resource not found\n");
+		return -EINVAL;
+	}
+
+	if (rsc->pll_on)
+		return 0;
+
+	pll = rsc->priv;
+	if (!pll) {
+		pr_err("pll configuration not found\n");
+		return -EINVAL;
+	}
+
+	pr_debug("ndx=%d, rate=%lu\n", rsc->index, rate);
+
+	rsc->vco_current_rate = rate;
+	rsc->vco_ref_clk_rate = vco->ref_clk_rate;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("failed to enable mdss dsi pll(%d), rc=%d\n",
+		       rsc->index, rc);
+		return rc;
+	}
+
+	dsi_pll_init_val(rsc);
+
+	dsi_pll_setup_config(pll, rsc);
+
+	dsi_pll_calc_dec_frac(pll, rsc);
+
+	dsi_pll_calc_ssc(pll, rsc);
+
+	dsi_pll_commit(pll, rsc);
+
+	dsi_pll_config_hzindep_reg(pll, rsc);
+
+	dsi_pll_ssc_commit(pll, rsc);
+
+	/* flush, ensure all register writes are done*/
+	wmb();
+
+	mdss_pll_resource_enable(rsc, false);
+
+	return 0;
+}
+
+static int dsi_pll_8998_lock_status(struct mdss_pll_resources *pll)
+{
+	int rc;
+	u32 status;
+	u32 const delay_us = 100;
+	u32 const timeout_us = 5000;
+
+	rc = readl_poll_timeout_atomic(pll->pll_base + PLL_COMMON_STATUS_ONE,
+				       status,
+				       ((status & BIT(0)) > 0),
+				       delay_us,
+				       timeout_us);
+	if (rc)
+		pr_err("DSI PLL(%d) lock failed, status=0x%08x\n",
+			pll->index, status);
+
+	return rc;
+}
+
+static void dsi_pll_disable_pll_bias(struct mdss_pll_resources *rsc)
+{
+	u32 data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CTRL_0);
+
+	MDSS_PLL_REG_W(rsc->pll_base, PLL_SYSTEM_MUXES, 0);
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_0, data & ~BIT(5));
+	ndelay(250);
+}
+
+static void dsi_pll_enable_pll_bias(struct mdss_pll_resources *rsc)
+{
+	u32 data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CTRL_0);
+
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_0, data | BIT(5));
+	MDSS_PLL_REG_W(rsc->pll_base, PLL_SYSTEM_MUXES, 0xc0);
+	ndelay(250);
+}
+
+static void dsi_pll_disable_global_clk(struct mdss_pll_resources *rsc)
+{
+	u32 data;
+
+	data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1);
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG1, (data & ~BIT(5)));
+}
+
+static void dsi_pll_enable_global_clk(struct mdss_pll_resources *rsc)
+{
+	u32 data;
+
+	data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1);
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG1, (data | BIT(5)));
+}
+
+static int dsi_pll_enable(struct dsi_pll_vco_clk *vco)
+{
+	int rc;
+	struct mdss_pll_resources *rsc = vco->priv;
+	struct dsi_pll_8998 *pll = rsc->priv;
+	struct dsi_pll_regs *regs = &pll->reg_setup;
+
+	dsi_pll_enable_pll_bias(rsc);
+	if (rsc->slave)
+		dsi_pll_enable_pll_bias(rsc->slave);
+
+	/*
+	 * The PLL out dividers are fixed divider clocks and hence the
+	 * set_div is not called during set_rate cycle of the tree.
+	 * The outdiv rate is therefore set in the pll out mux's set_sel
+	 * callback. But that will be called only after vco's set rate.
+	 * Hence PLL out div value is set here before locking the PLL.
+	 */
+	MDSS_PLL_REG_W(rsc->pll_base, PLL_PLL_OUTDIV_RATE,
+		regs->pll_outdiv_rate);
+
+	/* Start PLL */
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_PLL_CNTRL, 0x01);
+
+	/*
+	 * ensure all PLL configurations are written prior to checking
+	 * for PLL lock.
+	 */
+	wmb();
+
+	/* Check for PLL lock */
+	rc = dsi_pll_8998_lock_status(rsc);
+	if (rc) {
+		pr_err("PLL(%d) lock failed\n", rsc->index);
+		goto error;
+	}
+
+	rsc->pll_on = true;
+
+	dsi_pll_enable_global_clk(rsc);
+	if (rsc->slave)
+		dsi_pll_enable_global_clk(rsc->slave);
+
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_RBUF_CTRL, 0x01);
+	if (rsc->slave)
+		MDSS_PLL_REG_W(rsc->slave->phy_base, PHY_CMN_RBUF_CTRL, 0x01);
+
+error:
+	return rc;
+}
+
+static void dsi_pll_disable_sub(struct mdss_pll_resources *rsc)
+{
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_RBUF_CTRL, 0);
+	dsi_pll_disable_pll_bias(rsc);
+}
+
+static void dsi_pll_disable(struct dsi_pll_vco_clk *vco)
+{
+	struct mdss_pll_resources *rsc = vco->priv;
+
+	if (!rsc->pll_on &&
+	    mdss_pll_resource_enable(rsc, true)) {
+		pr_err("failed to enable pll (%d) resources\n", rsc->index);
+		return;
+	}
+
+	rsc->handoff_resources = false;
+
+	pr_debug("stop PLL (%d)\n", rsc->index);
+
+	/*
+	 * To avoid any stray glitches while
+	 * abruptly powering down the PLL
+	 * make sure to gate the clock using
+	 * the clock enable bit before powering
+	 * down the PLL
+	 **/
+	dsi_pll_disable_global_clk(rsc);
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_PLL_CNTRL, 0);
+	dsi_pll_disable_sub(rsc);
+	if (rsc->slave) {
+		dsi_pll_disable_global_clk(rsc->slave);
+		dsi_pll_disable_sub(rsc->slave);
+	}
+	/* flush, ensure all register writes are done*/
+	wmb();
+	rsc->pll_on = false;
+}
+
+static void vco_8998_unprepare(struct clk *c)
+{
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *pll = vco->priv;
+
+	if (!pll) {
+		pr_err("dsi pll resources not available\n");
+		return;
+	}
+
+	pll->vco_cached_rate = c->rate;
+	dsi_pll_disable(vco);
+	mdss_pll_resource_enable(pll, false);
+}
+
+static int vco_8998_prepare(struct clk *c)
+{
+	int rc = 0;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *pll = vco->priv;
+
+	if (!pll) {
+		pr_err("dsi pll resources are not available\n");
+		return -EINVAL;
+	}
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("failed to enable pll (%d) resource, rc=%d\n",
+		       pll->index, rc);
+		return rc;
+	}
+
+	if ((pll->vco_cached_rate != 0) &&
+	    (pll->vco_cached_rate == c->rate)) {
+		rc = c->ops->set_rate(c, pll->vco_cached_rate);
+		if (rc) {
+			pr_err("pll(%d) set_rate failed, rc=%d\n",
+			       pll->index, rc);
+			mdss_pll_resource_enable(pll, false);
+			return rc;
+		}
+	}
+
+	rc = dsi_pll_enable(vco);
+	if (rc) {
+		mdss_pll_resource_enable(pll, false);
+		pr_err("pll(%d) enable failed, rc=%d\n", pll->index, rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static unsigned long dsi_pll_get_vco_rate(struct clk *c)
+{
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *rsc = vco->priv;
+	struct dsi_pll_8998 *pll = rsc->priv;
+	struct dsi_pll_regs *regs = &pll->reg_setup;
+	int rc;
+	u64 ref_clk = vco->ref_clk_rate;
+	u64 vco_rate;
+	u64 multiplier;
+	u32 frac;
+	u32 dec;
+	u32 outdiv;
+	u64 pll_freq, tmp64;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("failed to enable pll(%d) resource, rc=%d\n",
+		       rsc->index, rc);
+		return 0;
+	}
+
+	dec = MDSS_PLL_REG_R(rsc->pll_base, PLL_DECIMAL_DIV_START_1);
+	dec &= 0xFF;
+
+	frac = MDSS_PLL_REG_R(rsc->pll_base, PLL_FRAC_DIV_START_LOW_1);
+	frac |= ((MDSS_PLL_REG_R(rsc->pll_base, PLL_FRAC_DIV_START_MID_1) &
+		  0xFF) <<
+		8);
+	frac |= ((MDSS_PLL_REG_R(rsc->pll_base, PLL_FRAC_DIV_START_HIGH_1) &
+		  0x3) <<
+		16);
+
+	/* OUTDIV_1:0 field is (log(outdiv, 2)) */
+	outdiv = MDSS_PLL_REG_R(rsc->pll_base, PLL_PLL_OUTDIV_RATE);
+	outdiv &= 0x3;
+
+	regs->pll_outdiv_rate = outdiv;
+
+	outdiv = 1 << outdiv;
+
+	/*
+	 * TODO:
+	 *	1. Assumes prescaler is disabled
+	 *	2. Multiplier is 2^18. it should be 2^(num_of_frac_bits)
+	 **/
+	multiplier = 1 << 18;
+	pll_freq = dec * (ref_clk * 2);
+	tmp64 = (ref_clk * 2 * frac);
+	pll_freq += div_u64(tmp64, multiplier);
+
+	vco_rate = div_u64(pll_freq, outdiv);
+
+	pr_debug("dec=0x%x, frac=0x%x, outdiv=%d, vco=%llu\n",
+		 dec, frac, outdiv, vco_rate);
+
+	(void)mdss_pll_resource_enable(rsc, false);
+
+	return (unsigned long)vco_rate;
+}
+
+enum handoff vco_8998_handoff(struct clk *c)
+{
+	enum handoff ret = HANDOFF_DISABLED_CLK;
+	int rc;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *pll = vco->priv;
+	u32 status;
+
+	if (!pll) {
+		pr_err("Unable to find pll resource\n");
+		return HANDOFF_DISABLED_CLK;
+	}
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("failed to enable pll(%d) resources, rc=%d\n",
+		       pll->index, rc);
+		return ret;
+	}
+
+	status = MDSS_PLL_REG_R(pll->pll_base, PLL_COMMON_STATUS_ONE);
+	if (status & BIT(0)) {
+		pll->handoff_resources = true;
+		pll->pll_on = true;
+		c->rate = dsi_pll_get_vco_rate(c);
+		ret = HANDOFF_ENABLED_CLK;
+	} else {
+		(void)mdss_pll_resource_enable(pll, false);
+		ret = HANDOFF_DISABLED_CLK;
+	}
+
+	return ret;
+}
+
+static int pixel_clk_get_div(struct div_clk *clk)
+{
+	int rc;
+	struct mdss_pll_resources *pll = clk->priv;
+	u32 reg_val;
+	int div;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
+	div = (reg_val & 0xF0) >> 4;
+
+	(void)mdss_pll_resource_enable(pll, false);
+
+	return div;
+}
+
+static void pixel_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
+{
+	u32 reg_val;
+
+	reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
+	reg_val &= ~0xF0;
+	reg_val |= (div << 4);
+	MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG0, reg_val);
+}
+
+static int pixel_clk_set_div(struct div_clk *clk, int div)
+{
+	int rc;
+	struct mdss_pll_resources *pll = clk->priv;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	pixel_clk_set_div_sub(pll, div);
+	if (pll->slave)
+		pixel_clk_set_div_sub(pll->slave, div);
+
+	(void)mdss_pll_resource_enable(pll, false);
+
+	return 0;
+}
+
+static int bit_clk_get_div(struct div_clk *clk)
+{
+	int rc;
+	struct mdss_pll_resources *pll = clk->priv;
+	u32 reg_val;
+	int div;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
+	div = (reg_val & 0x0F);
+
+	(void)mdss_pll_resource_enable(pll, false);
+
+	return div;
+}
+
+static void bit_clk_set_div_sub(struct mdss_pll_resources *rsc, int div)
+{
+	u32 reg_val;
+
+	reg_val = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG0);
+	reg_val &= ~0x0F;
+	reg_val |= div;
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG0, reg_val);
+}
+
+static int bit_clk_set_div(struct div_clk *clk, int div)
+{
+	int rc;
+	struct mdss_pll_resources *rsc = clk->priv;
+	struct dsi_pll_8998 *pll;
+
+	if (!rsc) {
+		pr_err("pll resource not found\n");
+		return -EINVAL;
+	}
+
+	pll = rsc->priv;
+	if (!pll) {
+		pr_err("pll configuration not found\n");
+		return -EINVAL;
+	}
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	bit_clk_set_div_sub(rsc, div);
+	/* For slave PLL, this divider always should be set to 1 */
+	if (rsc->slave)
+		bit_clk_set_div_sub(rsc->slave, 1);
+
+	(void)mdss_pll_resource_enable(rsc, false);
+
+	return rc;
+}
+
+static int dsi_pll_out_set_mux_sel(struct mux_clk *clk, int sel)
+{
+	struct mdss_pll_resources *rsc = clk->priv;
+	struct dsi_pll_8998 *pll = rsc->priv;
+	struct dsi_pll_regs *regs = &pll->reg_setup;
+
+	regs->pll_outdiv_rate = sel;
+
+	return 0;
+}
+
+static int dsi_pll_out_get_mux_sel(struct mux_clk *clk)
+{
+	struct mdss_pll_resources *rsc = clk->priv;
+	struct dsi_pll_8998 *pll = rsc->priv;
+	struct dsi_pll_regs *regs = &pll->reg_setup;
+
+	return regs->pll_outdiv_rate;
+}
+
+static int post_vco_clk_get_div(struct div_clk *clk)
+{
+	int rc;
+	struct mdss_pll_resources *pll = clk->priv;
+	u32 reg_val;
+	int div;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
+	reg_val &= 0x3;
+
+	if (reg_val == 2)
+		div = 1;
+	else if (reg_val == 3)
+		div = 4;
+	else
+		div = 1;
+
+	(void)mdss_pll_resource_enable(pll, false);
+
+	return div;
+}
+
+static int post_vco_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
+{
+	u32 reg_val;
+	int rc = 0;
+
+	reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
+	reg_val &= ~0x03;
+	if (div == 1) {
+		reg_val |= 0x2;
+	} else if (div == 4) {
+		reg_val |= 0x3;
+	} else {
+		rc = -EINVAL;
+		pr_err("unsupported divider %d\n", div);
+		goto error;
+	}
+
+	MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG1, reg_val);
+
+error:
+	return rc;
+}
+
+static int post_vco_clk_set_div(struct div_clk *clk, int div)
+{
+	int rc = 0;
+	struct mdss_pll_resources *pll = clk->priv;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = post_vco_clk_set_div_sub(pll, div);
+	if (!rc && pll->slave)
+		rc = post_vco_clk_set_div_sub(pll->slave, div);
+
+	(void)mdss_pll_resource_enable(pll, false);
+
+	return rc;
+}
+
+static int post_bit_clk_get_div(struct div_clk *clk)
+{
+	int rc;
+	struct mdss_pll_resources *pll = clk->priv;
+	u32 reg_val;
+	int div;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
+	reg_val &= 0x3;
+
+	if (reg_val == 0)
+		div = 1;
+	else if (reg_val == 1)
+		div = 2;
+	else
+		div = 1;
+
+	(void)mdss_pll_resource_enable(pll, false);
+
+	return div;
+}
+
+static int post_bit_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
+{
+	int rc = 0;
+	u32 reg_val;
+
+	reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
+	reg_val &= ~0x03;
+	if (div == 1) {
+		reg_val |= 0x0;
+	} else if (div == 2) {
+		reg_val |= 0x1;
+	} else {
+		rc = -EINVAL;
+		pr_err("unsupported divider %d\n", div);
+		goto error;
+	}
+
+	MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG1, reg_val);
+
+error:
+	return rc;
+}
+
+static int post_bit_clk_set_div(struct div_clk *clk, int div)
+{
+	int rc = 0;
+	struct mdss_pll_resources *pll = clk->priv;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = post_bit_clk_set_div_sub(pll, div);
+	if (!rc && pll->slave)
+		rc = post_bit_clk_set_div_sub(pll->slave, div);
+
+	(void)mdss_pll_resource_enable(pll, false);
+
+	return rc;
+}
+
+long vco_8998_round_rate(struct clk *c, unsigned long rate)
+{
+	unsigned long rrate = rate;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+
+	if (rate < vco->min_rate)
+		rrate = vco->min_rate;
+	if (rate > vco->max_rate)
+		rrate = vco->max_rate;
+
+	return rrate;
+}
+
+/* clk ops that require runtime fixup */
+static struct clk_ops clk_ops_gen_mux_dsi;
+static struct clk_ops clk_ops_bitclk_src_c;
+static struct clk_ops clk_ops_post_vco_div_c;
+static struct clk_ops clk_ops_post_bit_div_c;
+static struct clk_ops clk_ops_pclk_src_c;
+
+static struct clk_div_ops clk_post_vco_div_ops = {
+	.set_div = post_vco_clk_set_div,
+	.get_div = post_vco_clk_get_div,
+};
+
+static struct clk_div_ops clk_post_bit_div_ops = {
+	.set_div = post_bit_clk_set_div,
+	.get_div = post_bit_clk_get_div,
+};
+
+static struct clk_div_ops pixel_clk_div_ops = {
+	.set_div = pixel_clk_set_div,
+	.get_div = pixel_clk_get_div,
+};
+
+static struct clk_div_ops clk_bitclk_src_ops = {
+	.set_div = bit_clk_set_div,
+	.get_div = bit_clk_get_div,
+};
+
+static struct clk_ops clk_ops_vco_8998 = {
+	.set_rate = vco_8998_set_rate,
+	.round_rate = vco_8998_round_rate,
+	.handoff = vco_8998_handoff,
+	.prepare = vco_8998_prepare,
+	.unprepare = vco_8998_unprepare,
+};
+
+static struct clk_mux_ops mdss_mux_ops = {
+	.set_mux_sel = mdss_set_mux_sel,
+	.get_mux_sel = mdss_get_mux_sel,
+};
+
+static struct clk_mux_ops mdss_pll_out_mux_ops = {
+	.set_mux_sel = dsi_pll_out_set_mux_sel,
+	.get_mux_sel = dsi_pll_out_get_mux_sel,
+};
+
+/*
+ * Clock tree for generating DSI byte and pixel clocks.
+ *
+ *        +---------------+
+ *        |    vco_clk    |
+ *        |               |
+ *        +-------+-------+
+ *                |
+ *                |
+ *        +-------+--------+------------------+-----------------+
+ *        |                |                  |                 |
+ * +------v-------+ +------v-------+  +-------v------+   +------v-------+
+ * | pll_out_div1 | | pll_out_div2 |  | pll_out_div4 |   | pll_out_div8 |
+ * |    DIV(1)    | |    DIV(2)    |  |    DIV(4)    |   |    DIV(8)    |
+ * +------+-------+ +------+-------+  +-------+------+   +------+-------+
+ *        |                |                  |                 |
+ *        +------------+   |   +--------------+                 |
+ *                     |   |   |    +---------------------------+
+ *                     |   |   |    |
+ *                  +--v---v---v----v--+
+ *                   \   pll_out_mux  /
+ *                    \              /
+ *                     +------+-----+
+ *                            |
+ *            +---------------+-----------------+
+ *            |               |                 |
+ *     +------v-----+ +-------v-------+ +-------v-------+
+ *     | bitclk_src | | post_vco_div1 | | post_vco_div4 |
+ *     | DIV(1..15) | +     DIV(1)    | |     DIV(4)    |
+ *     +------+-----+ +-------+-------+ +-------+-------+
+ *            |               |                 |
+ * Shadow     |               |                 +---------------------+
+ *  Path      |               +-----------------------------+         |
+ *   +        |                                             |         |
+ *   |        +---------------------------------+           |         |
+ *   |        |                                 |           |         |
+ *   | +------v------=+                  +------v-------+ +-v---------v----+
+ *   | | byteclk_src  |                  | post_bit_div |  \ post_vco_mux /
+ *   | |    DIV(8)    |                  |   DIV(1,2)   |   \            /
+ *   | +------+-------+                  +------+-------+    +---+------+
+ *   |        |                                 |                |
+ *   |        |                                 |     +----------+
+ *   |        |                                 |     |
+ *   |        |                            +----v-----v------+
+ * +-v--------v---------+                   \  pclk_src_mux /
+ *  \   byteclk_mux    /                     \             /
+ *   \                /                       +-----+-----+
+ *    +------+-------+                              |         Shadow
+ *           |                                      |          Path
+ *           v                                +-----v------+    +
+ *       dsi_byte_clk                         |  pclk_src  |    |
+ *                                            | DIV(1..15) |    |
+ *                                            +-----+------+    |
+ *                                                  |           |
+ *                                                  +------+    |
+ *                                                         |    |
+ *                                                     +---v----v----+
+ *                                                      \  pclk_mux /
+ *                                                       \         /
+ *                                                        +---+---+
+ *                                                            |
+ *                                                            |
+ *                                                            v
+ *                                                         dsi_pclk
+ *
+ */
+
+static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
+	.ref_clk_rate = 19200000UL,
+	.min_rate = 1000000000UL,
+	.max_rate = 3500000000UL,
+	.c = {
+		.dbg_name = "dsi0pll_vco_clk",
+		.ops = &clk_ops_vco_8998,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_vco_clk.c),
+	},
+};
+
+static struct div_clk dsi0pll_pll_out_div1 = {
+	.data = {
+		.div = 1,
+		.min_div = 1,
+		.max_div = 1,
+	},
+	.c = {
+		.parent = &dsi0pll_vco_clk.c,
+		.dbg_name = "dsi0pll_pll_out_div1",
+		.ops = &clk_ops_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_pll_out_div1.c),
+	}
+};
+
+static struct div_clk dsi0pll_pll_out_div2 = {
+	.data = {
+		.div = 2,
+		.min_div = 2,
+		.max_div = 2,
+	},
+	.c = {
+		.parent = &dsi0pll_vco_clk.c,
+		.dbg_name = "dsi0pll_pll_out_div2",
+		.ops = &clk_ops_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_pll_out_div2.c),
+	}
+};
+
+static struct div_clk dsi0pll_pll_out_div4 = {
+	.data = {
+		.div = 4,
+		.min_div = 4,
+		.max_div = 4,
+	},
+	.c = {
+		.parent = &dsi0pll_vco_clk.c,
+		.dbg_name = "dsi0pll_pll_out_div4",
+		.ops = &clk_ops_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_pll_out_div4.c),
+	}
+};
+
+static struct div_clk dsi0pll_pll_out_div8 = {
+	.data = {
+		.div = 8,
+		.min_div = 8,
+		.max_div = 8,
+	},
+	.c = {
+		.parent = &dsi0pll_vco_clk.c,
+		.dbg_name = "dsi0pll_pll_out_div8",
+		.ops = &clk_ops_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_pll_out_div8.c),
+	}
+};
+
+static struct mux_clk dsi0pll_pll_out_mux = {
+	.num_parents = 4,
+	.parents = (struct clk_src[]) {
+		{&dsi0pll_pll_out_div1.c, 0},
+		{&dsi0pll_pll_out_div2.c, 1},
+		{&dsi0pll_pll_out_div4.c, 2},
+		{&dsi0pll_pll_out_div8.c, 3},
+	},
+	.ops = &mdss_pll_out_mux_ops,
+	.c = {
+		.parent = &dsi0pll_pll_out_div1.c,
+		.dbg_name = "dsi0pll_pll_out_mux",
+		.ops = &clk_ops_gen_mux,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_pll_out_mux.c),
+	}
+};
+static struct div_clk dsi0pll_bitclk_src = {
+	.data = {
+		.div = 1,
+		.min_div = 1,
+		.max_div = 15,
+	},
+	.ops = &clk_bitclk_src_ops,
+	.c = {
+		.parent = &dsi0pll_pll_out_mux.c,
+		.dbg_name = "dsi0pll_bitclk_src",
+		.ops = &clk_ops_bitclk_src_c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_bitclk_src.c),
+	}
+};
+
+static struct div_clk dsi0pll_post_vco_div1 = {
+	.data = {
+		.div = 1,
+		.min_div = 1,
+		.max_div = 1,
+	},
+	.ops = &clk_post_vco_div_ops,
+	.c = {
+		.parent = &dsi0pll_pll_out_mux.c,
+		.dbg_name = "dsi0pll_post_vco_div1",
+		.ops = &clk_ops_post_vco_div_c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_post_vco_div1.c),
+	}
+};
+
+static struct div_clk dsi0pll_post_vco_div4 = {
+	.data = {
+		.div = 4,
+		.min_div = 4,
+		.max_div = 4,
+	},
+	.ops = &clk_post_vco_div_ops,
+	.c = {
+		.parent = &dsi0pll_pll_out_mux.c,
+		.dbg_name = "dsi0pll_post_vco_div4",
+		.ops = &clk_ops_post_vco_div_c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_post_vco_div4.c),
+	}
+};
+
+static struct mux_clk dsi0pll_post_vco_mux = {
+	.num_parents = 2,
+	.parents = (struct clk_src[]) {
+		{&dsi0pll_post_vco_div1.c, 0},
+		{&dsi0pll_post_vco_div4.c, 1},
+	},
+	.ops = &mdss_mux_ops,
+	.c = {
+		.parent = &dsi0pll_post_vco_div1.c,
+		.dbg_name = "dsi0pll_post_vco_mux",
+		.ops = &clk_ops_gen_mux,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_post_vco_mux.c),
+	}
+};
+
+static struct div_clk dsi0pll_post_bit_div = {
+	.data = {
+		.div = 1,
+		.min_div = 1,
+		.max_div = 2,
+	},
+	.ops = &clk_post_bit_div_ops,
+	.c = {
+		.parent = &dsi0pll_bitclk_src.c,
+		.dbg_name = "dsi0pll_post_bit_div",
+		.ops = &clk_ops_post_bit_div_c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_post_bit_div.c),
+	}
+};
+
+static struct mux_clk dsi0pll_pclk_src_mux = {
+	.num_parents = 2,
+	.parents = (struct clk_src[]) {
+		{&dsi0pll_post_bit_div.c, 0},
+		{&dsi0pll_post_vco_mux.c, 1},
+	},
+	.ops = &mdss_mux_ops,
+	.c = {
+		.parent = &dsi0pll_post_bit_div.c,
+		.dbg_name = "dsi0pll_pclk_src_mux",
+		.ops = &clk_ops_gen_mux,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_pclk_src_mux.c),
+	}
+};
+
+static struct div_clk dsi0pll_pclk_src = {
+	.data = {
+		.div = 1,
+		.min_div = 1,
+		.max_div = 15,
+	},
+	.ops = &pixel_clk_div_ops,
+	.c = {
+		.parent = &dsi0pll_pclk_src_mux.c,
+		.dbg_name = "dsi0pll_pclk_src",
+		.ops = &clk_ops_pclk_src_c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_pclk_src.c),
+	},
+};
+
+static struct mux_clk dsi0pll_pclk_mux = {
+	.num_parents = 1,
+	.parents = (struct clk_src[]) {
+		{&dsi0pll_pclk_src.c, 0},
+	},
+	.ops = &mdss_mux_ops,
+	.c = {
+		.parent = &dsi0pll_pclk_src.c,
+		.dbg_name = "dsi0pll_pclk_mux",
+		.ops = &clk_ops_gen_mux_dsi,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_pclk_mux.c),
+	}
+};
+
+static struct div_clk dsi0pll_byteclk_src = {
+	.data = {
+		.div = 8,
+		.min_div = 8,
+		.max_div = 8,
+	},
+	.c = {
+		.parent = &dsi0pll_bitclk_src.c,
+		.dbg_name = "dsi0pll_byteclk_src",
+		.ops = &clk_ops_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_byteclk_src.c),
+	},
+};
+
+static struct mux_clk dsi0pll_byteclk_mux = {
+	.num_parents = 1,
+	.parents = (struct clk_src[]) {
+		{&dsi0pll_byteclk_src.c, 0},
+	},
+	.ops = &mdss_mux_ops,
+	.c = {
+		.parent = &dsi0pll_byteclk_src.c,
+		.dbg_name = "dsi0pll_byteclk_mux",
+		.ops = &clk_ops_gen_mux_dsi,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_byteclk_mux.c),
+	}
+};
+
+static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
+	.ref_clk_rate = 19200000UL,
+	.min_rate = 1000000000UL,
+	.max_rate = 3500000000UL,
+	.c = {
+		.dbg_name = "dsi1pll_vco_clk",
+		.ops = &clk_ops_vco_8998,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_vco_clk.c),
+	},
+};
+
+static struct div_clk dsi1pll_pll_out_div1 = {
+	.data = {
+		.div = 1,
+		.min_div = 1,
+		.max_div = 1,
+	},
+	.c = {
+		.parent = &dsi1pll_vco_clk.c,
+		.dbg_name = "dsi1pll_pll_out_div1",
+		.ops = &clk_ops_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_pll_out_div1.c),
+	}
+};
+
+static struct div_clk dsi1pll_pll_out_div2 = {
+	.data = {
+		.div = 2,
+		.min_div = 2,
+		.max_div = 2,
+	},
+	.c = {
+		.parent = &dsi1pll_vco_clk.c,
+		.dbg_name = "dsi1pll_pll_out_div2",
+		.ops = &clk_ops_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_pll_out_div2.c),
+	}
+};
+
+static struct div_clk dsi1pll_pll_out_div4 = {
+	.data = {
+		.div = 4,
+		.min_div = 4,
+		.max_div = 4,
+	},
+	.c = {
+		.parent = &dsi1pll_vco_clk.c,
+		.dbg_name = "dsi1pll_pll_out_div4",
+		.ops = &clk_ops_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_pll_out_div4.c),
+	}
+};
+
+static struct div_clk dsi1pll_pll_out_div8 = {
+	.data = {
+		.div = 8,
+		.min_div = 8,
+		.max_div = 8,
+	},
+	.c = {
+		.parent = &dsi1pll_vco_clk.c,
+		.dbg_name = "dsi1pll_pll_out_div8",
+		.ops = &clk_ops_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_pll_out_div8.c),
+	}
+};
+
+static struct mux_clk dsi1pll_pll_out_mux = {
+	.num_parents = 4,
+	.parents = (struct clk_src[]) {
+		{&dsi1pll_pll_out_div1.c, 0},
+		{&dsi1pll_pll_out_div2.c, 1},
+		{&dsi1pll_pll_out_div4.c, 2},
+		{&dsi1pll_pll_out_div8.c, 3},
+	},
+	.ops = &mdss_pll_out_mux_ops,
+	.c = {
+		.parent = &dsi1pll_pll_out_div1.c,
+		.dbg_name = "dsi1pll_pll_out_mux",
+		.ops = &clk_ops_gen_mux,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_pll_out_mux.c),
+	}
+};
+
+static struct div_clk dsi1pll_bitclk_src = {
+	.data = {
+		.div = 1,
+		.min_div = 1,
+		.max_div = 15,
+	},
+	.ops = &clk_bitclk_src_ops,
+	.c = {
+		.parent = &dsi1pll_pll_out_mux.c,
+		.dbg_name = "dsi1pll_bitclk_src",
+		.ops = &clk_ops_bitclk_src_c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_bitclk_src.c),
+	}
+};
+
+static struct div_clk dsi1pll_post_vco_div1 = {
+	.data = {
+		.div = 1,
+		.min_div = 1,
+		.max_div = 1,
+	},
+	.ops = &clk_post_vco_div_ops,
+	.c = {
+		.parent = &dsi1pll_pll_out_mux.c,
+		.dbg_name = "dsi1pll_post_vco_div1",
+		.ops = &clk_ops_post_vco_div_c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_post_vco_div1.c),
+	}
+};
+
+static struct div_clk dsi1pll_post_vco_div4 = {
+	.data = {
+		.div = 4,
+		.min_div = 4,
+		.max_div = 4,
+	},
+	.ops = &clk_post_vco_div_ops,
+	.c = {
+		.parent = &dsi1pll_pll_out_mux.c,
+		.dbg_name = "dsi1pll_post_vco_div4",
+		.ops = &clk_ops_post_vco_div_c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_post_vco_div4.c),
+	}
+};
+
+static struct mux_clk dsi1pll_post_vco_mux = {
+	.num_parents = 2,
+	.parents = (struct clk_src[]) {
+		{&dsi1pll_post_vco_div1.c, 0},
+		{&dsi1pll_post_vco_div4.c, 1},
+	},
+	.ops = &mdss_mux_ops,
+	.c = {
+		.parent = &dsi1pll_post_vco_div1.c,
+		.dbg_name = "dsi1pll_post_vco_mux",
+		.ops = &clk_ops_gen_mux,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_post_vco_mux.c),
+	}
+};
+
+static struct div_clk dsi1pll_post_bit_div = {
+	.data = {
+		.div = 1,
+		.min_div = 1,
+		.max_div = 2,
+	},
+	.ops = &clk_post_bit_div_ops,
+	.c = {
+		.parent = &dsi1pll_bitclk_src.c,
+		.dbg_name = "dsi1pll_post_bit_div",
+		.ops = &clk_ops_post_bit_div_c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_post_bit_div.c),
+	}
+};
+
+static struct mux_clk dsi1pll_pclk_src_mux = {
+	.num_parents = 2,
+	.parents = (struct clk_src[]) {
+		{&dsi1pll_post_bit_div.c, 0},
+		{&dsi1pll_post_vco_mux.c, 1},
+	},
+	.ops = &mdss_mux_ops,
+	.c = {
+		.parent = &dsi1pll_post_bit_div.c,
+		.dbg_name = "dsi1pll_pclk_src_mux",
+		.ops = &clk_ops_gen_mux,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_pclk_src_mux.c),
+	}
+};
+
+static struct div_clk dsi1pll_pclk_src = {
+	.data = {
+		.div = 1,
+		.min_div = 1,
+		.max_div = 15,
+	},
+	.ops = &pixel_clk_div_ops,
+	.c = {
+		.parent = &dsi1pll_pclk_src_mux.c,
+		.dbg_name = "dsi1pll_pclk_src",
+		.ops = &clk_ops_pclk_src_c,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_pclk_src.c),
+	},
+};
+
+static struct mux_clk dsi1pll_pclk_mux = {
+	.num_parents = 1,
+	.parents = (struct clk_src[]) {
+		{&dsi1pll_pclk_src.c, 0},
+	},
+	.ops = &mdss_mux_ops,
+	.c = {
+		.parent = &dsi1pll_pclk_src.c,
+		.dbg_name = "dsi1pll_pclk_mux",
+		.ops = &clk_ops_gen_mux_dsi,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_pclk_mux.c),
+	}
+};
+
+static struct div_clk dsi1pll_byteclk_src = {
+	.data = {
+		.div = 8,
+		.min_div = 8,
+		.max_div = 8,
+	},
+	.c = {
+		.parent = &dsi1pll_bitclk_src.c,
+		.dbg_name = "dsi1pll_byteclk_src",
+		.ops = &clk_ops_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_byteclk_src.c),
+	},
+};
+
+static struct mux_clk dsi1pll_byteclk_mux = {
+	.num_parents = 1,
+	.parents = (struct clk_src[]) {
+		{&dsi1pll_byteclk_src.c, 0},
+	},
+	.ops = &mdss_mux_ops,
+	.c = {
+		.parent = &dsi1pll_byteclk_src.c,
+		.dbg_name = "dsi1pll_byteclk_mux",
+		.ops = &clk_ops_gen_mux_dsi,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_byteclk_mux.c),
+	}
+};
+
+static struct clk_lookup mdss_dsi_pll0cc_8998[] = {
+	CLK_LIST(dsi0pll_byteclk_mux),
+	CLK_LIST(dsi0pll_byteclk_src),
+	CLK_LIST(dsi0pll_pclk_mux),
+	CLK_LIST(dsi0pll_pclk_src),
+	CLK_LIST(dsi0pll_pclk_src_mux),
+	CLK_LIST(dsi0pll_post_bit_div),
+	CLK_LIST(dsi0pll_post_vco_mux),
+	CLK_LIST(dsi0pll_post_vco_div1),
+	CLK_LIST(dsi0pll_post_vco_div4),
+	CLK_LIST(dsi0pll_bitclk_src),
+	CLK_LIST(dsi0pll_pll_out_mux),
+	CLK_LIST(dsi0pll_pll_out_div8),
+	CLK_LIST(dsi0pll_pll_out_div4),
+	CLK_LIST(dsi0pll_pll_out_div2),
+	CLK_LIST(dsi0pll_pll_out_div1),
+	CLK_LIST(dsi0pll_vco_clk),
+};
+static struct clk_lookup mdss_dsi_pll1cc_8998[] = {
+	CLK_LIST(dsi1pll_byteclk_mux),
+	CLK_LIST(dsi1pll_byteclk_src),
+	CLK_LIST(dsi1pll_pclk_mux),
+	CLK_LIST(dsi1pll_pclk_src),
+	CLK_LIST(dsi1pll_pclk_src_mux),
+	CLK_LIST(dsi1pll_post_bit_div),
+	CLK_LIST(dsi1pll_post_vco_mux),
+	CLK_LIST(dsi1pll_post_vco_div1),
+	CLK_LIST(dsi1pll_post_vco_div4),
+	CLK_LIST(dsi1pll_bitclk_src),
+	CLK_LIST(dsi1pll_pll_out_mux),
+	CLK_LIST(dsi1pll_pll_out_div8),
+	CLK_LIST(dsi1pll_pll_out_div4),
+	CLK_LIST(dsi1pll_pll_out_div2),
+	CLK_LIST(dsi1pll_pll_out_div1),
+	CLK_LIST(dsi1pll_vco_clk),
+};
+
+int dsi_pll_clock_register_8998(struct platform_device *pdev,
+				  struct mdss_pll_resources *pll_res)
+{
+	int rc = 0, ndx;
+
+	if (!pdev || !pdev->dev.of_node ||
+		!pll_res || !pll_res->pll_base || !pll_res->phy_base) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	ndx = pll_res->index;
+
+	if (ndx >= DSI_PLL_MAX) {
+		pr_err("pll index(%d) NOT supported\n", ndx);
+		return -EINVAL;
+	}
+
+	pll_rsc_db[ndx] = pll_res;
+	pll_res->priv = &plls[ndx];
+	plls[ndx].rsc = pll_res;
+
+	/* runtime fixup of all div and mux clock ops */
+	clk_ops_gen_mux_dsi = clk_ops_gen_mux;
+	clk_ops_gen_mux_dsi.round_rate = parent_round_rate;
+	clk_ops_gen_mux_dsi.set_rate = parent_set_rate;
+
+	clk_ops_bitclk_src_c = clk_ops_div;
+	clk_ops_bitclk_src_c.prepare = mdss_pll_div_prepare;
+
+	/*
+	 * Set the ops for the two dividers in the pixel clock tree to the
+	 * slave_div to ensure that a set rate on this divider clock will not
+	 * be propagated to it's parent. This is needed ensure that when we set
+	 * the rate for pixel clock, the vco is not reconfigured
+	 */
+	clk_ops_post_vco_div_c = clk_ops_slave_div;
+	clk_ops_post_vco_div_c.prepare = mdss_pll_div_prepare;
+
+	clk_ops_post_bit_div_c = clk_ops_slave_div;
+	clk_ops_post_bit_div_c.prepare = mdss_pll_div_prepare;
+
+	clk_ops_pclk_src_c = clk_ops_div;
+	clk_ops_pclk_src_c.prepare = mdss_pll_div_prepare;
+
+	pll_res->vco_delay = VCO_DELAY_USEC;
+	if (ndx == 0) {
+		dsi0pll_byteclk_mux.priv = pll_res;
+		dsi0pll_byteclk_src.priv = pll_res;
+		dsi0pll_pclk_mux.priv = pll_res;
+		dsi0pll_pclk_src.priv = pll_res;
+		dsi0pll_pclk_src_mux.priv = pll_res;
+		dsi0pll_post_bit_div.priv = pll_res;
+		dsi0pll_post_vco_mux.priv = pll_res;
+		dsi0pll_post_vco_div1.priv = pll_res;
+		dsi0pll_post_vco_div4.priv = pll_res;
+		dsi0pll_bitclk_src.priv = pll_res;
+		dsi0pll_pll_out_div1.priv = pll_res;
+		dsi0pll_pll_out_div2.priv = pll_res;
+		dsi0pll_pll_out_div4.priv = pll_res;
+		dsi0pll_pll_out_div8.priv = pll_res;
+		dsi0pll_pll_out_mux.priv = pll_res;
+		dsi0pll_vco_clk.priv = pll_res;
+
+		rc = of_msm_clock_register(pdev->dev.of_node,
+			mdss_dsi_pll0cc_8998,
+			ARRAY_SIZE(mdss_dsi_pll0cc_8998));
+	} else {
+		dsi1pll_byteclk_mux.priv = pll_res;
+		dsi1pll_byteclk_src.priv = pll_res;
+		dsi1pll_pclk_mux.priv = pll_res;
+		dsi1pll_pclk_src.priv = pll_res;
+		dsi1pll_pclk_src_mux.priv = pll_res;
+		dsi1pll_post_bit_div.priv = pll_res;
+		dsi1pll_post_vco_mux.priv = pll_res;
+		dsi1pll_post_vco_div1.priv = pll_res;
+		dsi1pll_post_vco_div4.priv = pll_res;
+		dsi1pll_bitclk_src.priv = pll_res;
+		dsi1pll_pll_out_div1.priv = pll_res;
+		dsi1pll_pll_out_div2.priv = pll_res;
+		dsi1pll_pll_out_div4.priv = pll_res;
+		dsi1pll_pll_out_div8.priv = pll_res;
+		dsi1pll_pll_out_mux.priv = pll_res;
+		dsi1pll_vco_clk.priv = pll_res;
+
+		rc = of_msm_clock_register(pdev->dev.of_node,
+			mdss_dsi_pll1cc_8998,
+			ARRAY_SIZE(mdss_dsi_pll1cc_8998));
+	}
+	if (rc)
+		pr_err("dsi%dpll clock register failed, rc=%d\n", ndx, rc);
+
+	return rc;
+}
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./mdss/mdss-dsi-pll.h linux-4.4.115-fbx/drivers/clk/msm/mdss/mdss-dsi-pll.h
--- linux-4.4.115-fbx/drivers/clk/msm./mdss/mdss-dsi-pll.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/mdss/mdss-dsi-pll.h	2019-01-22 16:16:23.019242024 +0100
@@ -0,0 +1,110 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_DSI_PLL_H
+#define __MDSS_DSI_PLL_H
+
+#define MAX_DSI_PLL_EN_SEQS	10
+
+#define DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG		(0x0020)
+#define DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2	(0x0064)
+#define DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG		(0x0068)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG1		(0x0070)
+
+/* Register offsets for 20nm PHY PLL */
+#define MMSS_DSI_PHY_PLL_PLL_CNTRL		(0x0014)
+#define MMSS_DSI_PHY_PLL_PLL_BKG_KVCO_CAL_EN	(0x002C)
+#define MMSS_DSI_PHY_PLL_PLLLOCK_CMP_EN		(0x009C)
+
+struct lpfr_cfg {
+	unsigned long vco_rate;
+	u32 r;
+};
+
+struct dsi_pll_vco_clk {
+	unsigned long	ref_clk_rate;
+	unsigned long	min_rate;
+	unsigned long	max_rate;
+	u32		pll_en_seq_cnt;
+	struct lpfr_cfg *lpfr_lut;
+	u32		lpfr_lut_size;
+	void		*priv;
+
+	struct clk	c;
+
+	int (*pll_enable_seqs[MAX_DSI_PLL_EN_SEQS])
+			(struct mdss_pll_resources *dsi_pll_Res);
+};
+
+static inline struct dsi_pll_vco_clk *to_vco_clk(struct clk *clk)
+{
+	return container_of(clk, struct dsi_pll_vco_clk, c);
+}
+
+int dsi_pll_clock_register_hpm(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res);
+int dsi_pll_clock_register_20nm(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res);
+int dsi_pll_clock_register_lpm(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res);
+int dsi_pll_clock_register_8996(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res);
+int dsi_pll_clock_register_8998(struct platform_device *pdev,
+				  struct mdss_pll_resources *pll_res);
+
+int set_byte_mux_sel(struct mux_clk *clk, int sel);
+int get_byte_mux_sel(struct mux_clk *clk);
+int dsi_pll_mux_prepare(struct clk *c);
+int fixed_4div_set_div(struct div_clk *clk, int div);
+int fixed_4div_get_div(struct div_clk *clk);
+int digital_set_div(struct div_clk *clk, int div);
+int digital_get_div(struct div_clk *clk);
+int analog_set_div(struct div_clk *clk, int div);
+int analog_get_div(struct div_clk *clk);
+int dsi_pll_lock_status(struct mdss_pll_resources *dsi_pll_res);
+int vco_set_rate(struct dsi_pll_vco_clk *vco, unsigned long rate);
+unsigned long vco_get_rate(struct clk *c);
+long vco_round_rate(struct clk *c, unsigned long rate);
+enum handoff vco_handoff(struct clk *c);
+int vco_prepare(struct clk *c);
+void vco_unprepare(struct clk *c);
+
+/* APIs for 20nm PHY PLL */
+int pll_20nm_vco_set_rate(struct dsi_pll_vco_clk *vco, unsigned long rate);
+int shadow_pll_20nm_vco_set_rate(struct dsi_pll_vco_clk *vco,
+				unsigned long rate);
+long pll_20nm_vco_round_rate(struct clk *c, unsigned long rate);
+enum handoff pll_20nm_vco_handoff(struct clk *c);
+int pll_20nm_vco_prepare(struct clk *c);
+void pll_20nm_vco_unprepare(struct clk *c);
+int pll_20nm_vco_enable_seq(struct mdss_pll_resources *dsi_pll_res);
+
+int set_bypass_lp_div_mux_sel(struct mux_clk *clk, int sel);
+int set_shadow_bypass_lp_div_mux_sel(struct mux_clk *clk, int sel);
+int get_bypass_lp_div_mux_sel(struct mux_clk *clk);
+int fixed_hr_oclk2_set_div(struct div_clk *clk, int div);
+int shadow_fixed_hr_oclk2_set_div(struct div_clk *clk, int div);
+int fixed_hr_oclk2_get_div(struct div_clk *clk);
+int hr_oclk3_set_div(struct div_clk *clk, int div);
+int shadow_hr_oclk3_set_div(struct div_clk *clk, int div);
+int hr_oclk3_get_div(struct div_clk *clk);
+int ndiv_set_div(struct div_clk *clk, int div);
+int shadow_ndiv_set_div(struct div_clk *clk, int div);
+int ndiv_get_div(struct div_clk *clk);
+void __dsi_pll_disable(void __iomem *pll_base);
+
+int set_mdss_pixel_mux_sel(struct mux_clk *clk, int sel);
+int get_mdss_pixel_mux_sel(struct mux_clk *clk);
+int set_mdss_byte_mux_sel(struct mux_clk *clk, int sel);
+int get_mdss_byte_mux_sel(struct mux_clk *clk);
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./mdss/mdss-hdmi-pll-8996.c linux-4.4.115-fbx/drivers/clk/msm/mdss/mdss-hdmi-pll-8996.c
--- linux-4.4.115-fbx/drivers/clk/msm./mdss/mdss-hdmi-pll-8996.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/mdss/mdss-hdmi-pll-8996.c	2019-01-22 16:16:23.023242060 +0100
@@ -0,0 +1,2683 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <dt-bindings/clock/msm-clocks-8996.h>
+
+#include "mdss-pll.h"
+#include "mdss-hdmi-pll.h"
+
+/* CONSTANTS */
+#define HDMI_BIT_CLK_TO_PIX_CLK_RATIO            10
+#define HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD         3400000000UL
+#define HDMI_DIG_FREQ_BIT_CLK_THRESHOLD          1500000000UL
+#define HDMI_MID_FREQ_BIT_CLK_THRESHOLD          750000000
+#define HDMI_CLKS_PLL_DIVSEL                     0
+#define HDMI_CORECLK_DIV                         5
+#define HDMI_REF_CLOCK                           19200000
+#define HDMI_64B_ERR_VAL                         0xFFFFFFFFFFFFFFFF
+#define HDMI_VERSION_8996_V1                     1
+#define HDMI_VERSION_8996_V2                     2
+#define HDMI_VERSION_8996_V3                     3
+#define HDMI_VERSION_8996_V3_1_8                 4
+
+#define HDMI_VCO_MAX_FREQ                        12000000000
+#define HDMI_VCO_MIN_FREQ                        8000000000
+#define HDMI_2400MHZ_BIT_CLK_HZ                  2400000000UL
+#define HDMI_2250MHZ_BIT_CLK_HZ                  2250000000UL
+#define HDMI_2000MHZ_BIT_CLK_HZ                  2000000000UL
+#define HDMI_1700MHZ_BIT_CLK_HZ                  1700000000UL
+#define HDMI_1200MHZ_BIT_CLK_HZ                  1200000000UL
+#define HDMI_1334MHZ_BIT_CLK_HZ                  1334000000UL
+#define HDMI_1000MHZ_BIT_CLK_HZ                  1000000000UL
+#define HDMI_850MHZ_BIT_CLK_HZ                   850000000
+#define HDMI_667MHZ_BIT_CLK_HZ                   667000000
+#define HDMI_600MHZ_BIT_CLK_HZ                   600000000
+#define HDMI_500MHZ_BIT_CLK_HZ                   500000000
+#define HDMI_450MHZ_BIT_CLK_HZ                   450000000
+#define HDMI_334MHZ_BIT_CLK_HZ                   334000000
+#define HDMI_300MHZ_BIT_CLK_HZ                   300000000
+#define HDMI_282MHZ_BIT_CLK_HZ                   282000000
+#define HDMI_250MHZ_BIT_CLK_HZ                   250000000
+#define HDMI_KHZ_TO_HZ                           1000
+
+/* PLL REGISTERS */
+#define QSERDES_COM_ATB_SEL1                     (0x000)
+#define QSERDES_COM_ATB_SEL2                     (0x004)
+#define QSERDES_COM_FREQ_UPDATE                  (0x008)
+#define QSERDES_COM_BG_TIMER                     (0x00C)
+#define QSERDES_COM_SSC_EN_CENTER                (0x010)
+#define QSERDES_COM_SSC_ADJ_PER1                 (0x014)
+#define QSERDES_COM_SSC_ADJ_PER2                 (0x018)
+#define QSERDES_COM_SSC_PER1                     (0x01C)
+#define QSERDES_COM_SSC_PER2                     (0x020)
+#define QSERDES_COM_SSC_STEP_SIZE1               (0x024)
+#define QSERDES_COM_SSC_STEP_SIZE2               (0x028)
+#define QSERDES_COM_POST_DIV                     (0x02C)
+#define QSERDES_COM_POST_DIV_MUX                 (0x030)
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN          (0x034)
+#define QSERDES_COM_CLK_ENABLE1                  (0x038)
+#define QSERDES_COM_SYS_CLK_CTRL                 (0x03C)
+#define QSERDES_COM_SYSCLK_BUF_ENABLE            (0x040)
+#define QSERDES_COM_PLL_EN                       (0x044)
+#define QSERDES_COM_PLL_IVCO                     (0x048)
+#define QSERDES_COM_LOCK_CMP1_MODE0              (0x04C)
+#define QSERDES_COM_LOCK_CMP2_MODE0              (0x050)
+#define QSERDES_COM_LOCK_CMP3_MODE0              (0x054)
+#define QSERDES_COM_LOCK_CMP1_MODE1              (0x058)
+#define QSERDES_COM_LOCK_CMP2_MODE1              (0x05C)
+#define QSERDES_COM_LOCK_CMP3_MODE1              (0x060)
+#define QSERDES_COM_LOCK_CMP1_MODE2              (0x064)
+#define QSERDES_COM_CMN_RSVD0                    (0x064)
+#define QSERDES_COM_LOCK_CMP2_MODE2              (0x068)
+#define QSERDES_COM_EP_CLOCK_DETECT_CTRL         (0x068)
+#define QSERDES_COM_LOCK_CMP3_MODE2              (0x06C)
+#define QSERDES_COM_SYSCLK_DET_COMP_STATUS       (0x06C)
+#define QSERDES_COM_BG_TRIM                      (0x070)
+#define QSERDES_COM_CLK_EP_DIV                   (0x074)
+#define QSERDES_COM_CP_CTRL_MODE0                (0x078)
+#define QSERDES_COM_CP_CTRL_MODE1                (0x07C)
+#define QSERDES_COM_CP_CTRL_MODE2                (0x080)
+#define QSERDES_COM_CMN_RSVD1                    (0x080)
+#define QSERDES_COM_PLL_RCTRL_MODE0              (0x084)
+#define QSERDES_COM_PLL_RCTRL_MODE1              (0x088)
+#define QSERDES_COM_PLL_RCTRL_MODE2              (0x08C)
+#define QSERDES_COM_CMN_RSVD2                    (0x08C)
+#define QSERDES_COM_PLL_CCTRL_MODE0              (0x090)
+#define QSERDES_COM_PLL_CCTRL_MODE1              (0x094)
+#define QSERDES_COM_PLL_CCTRL_MODE2              (0x098)
+#define QSERDES_COM_CMN_RSVD3                    (0x098)
+#define QSERDES_COM_PLL_CNTRL                    (0x09C)
+#define QSERDES_COM_PHASE_SEL_CTRL               (0x0A0)
+#define QSERDES_COM_PHASE_SEL_DC                 (0x0A4)
+#define QSERDES_COM_CORE_CLK_IN_SYNC_SEL         (0x0A8)
+#define QSERDES_COM_BIAS_EN_CTRL_BY_PSM          (0x0A8)
+#define QSERDES_COM_SYSCLK_EN_SEL                (0x0AC)
+#define QSERDES_COM_CML_SYSCLK_SEL               (0x0B0)
+#define QSERDES_COM_RESETSM_CNTRL                (0x0B4)
+#define QSERDES_COM_RESETSM_CNTRL2               (0x0B8)
+#define QSERDES_COM_RESTRIM_CTRL                 (0x0BC)
+#define QSERDES_COM_RESTRIM_CTRL2                (0x0C0)
+#define QSERDES_COM_RESCODE_DIV_NUM              (0x0C4)
+#define QSERDES_COM_LOCK_CMP_EN                  (0x0C8)
+#define QSERDES_COM_LOCK_CMP_CFG                 (0x0CC)
+#define QSERDES_COM_DEC_START_MODE0              (0x0D0)
+#define QSERDES_COM_DEC_START_MODE1              (0x0D4)
+#define QSERDES_COM_DEC_START_MODE2              (0x0D8)
+#define QSERDES_COM_VCOCAL_DEADMAN_CTRL          (0x0D8)
+#define QSERDES_COM_DIV_FRAC_START1_MODE0        (0x0DC)
+#define QSERDES_COM_DIV_FRAC_START2_MODE0        (0x0E0)
+#define QSERDES_COM_DIV_FRAC_START3_MODE0        (0x0E4)
+#define QSERDES_COM_DIV_FRAC_START1_MODE1        (0x0E8)
+#define QSERDES_COM_DIV_FRAC_START2_MODE1        (0x0EC)
+#define QSERDES_COM_DIV_FRAC_START3_MODE1        (0x0F0)
+#define QSERDES_COM_DIV_FRAC_START1_MODE2        (0x0F4)
+#define QSERDES_COM_VCO_TUNE_MINVAL1             (0x0F4)
+#define QSERDES_COM_DIV_FRAC_START2_MODE2        (0x0F8)
+#define QSERDES_COM_VCO_TUNE_MINVAL2             (0x0F8)
+#define QSERDES_COM_DIV_FRAC_START3_MODE2        (0x0FC)
+#define QSERDES_COM_CMN_RSVD4                    (0x0FC)
+#define QSERDES_COM_INTEGLOOP_INITVAL            (0x100)
+#define QSERDES_COM_INTEGLOOP_EN                 (0x104)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0        (0x108)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0        (0x10C)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1        (0x110)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1        (0x114)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE2        (0x118)
+#define QSERDES_COM_VCO_TUNE_MAXVAL1             (0x118)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE2        (0x11C)
+#define QSERDES_COM_VCO_TUNE_MAXVAL2             (0x11C)
+#define QSERDES_COM_RES_TRIM_CONTROL2            (0x120)
+#define QSERDES_COM_VCO_TUNE_CTRL                (0x124)
+#define QSERDES_COM_VCO_TUNE_MAP                 (0x128)
+#define QSERDES_COM_VCO_TUNE1_MODE0              (0x12C)
+#define QSERDES_COM_VCO_TUNE2_MODE0              (0x130)
+#define QSERDES_COM_VCO_TUNE1_MODE1              (0x134)
+#define QSERDES_COM_VCO_TUNE2_MODE1              (0x138)
+#define QSERDES_COM_VCO_TUNE1_MODE2              (0x13C)
+#define QSERDES_COM_VCO_TUNE_INITVAL1            (0x13C)
+#define QSERDES_COM_VCO_TUNE2_MODE2              (0x140)
+#define QSERDES_COM_VCO_TUNE_INITVAL2            (0x140)
+#define QSERDES_COM_VCO_TUNE_TIMER1              (0x144)
+#define QSERDES_COM_VCO_TUNE_TIMER2              (0x148)
+#define QSERDES_COM_SAR                          (0x14C)
+#define QSERDES_COM_SAR_CLK                      (0x150)
+#define QSERDES_COM_SAR_CODE_OUT_STATUS          (0x154)
+#define QSERDES_COM_SAR_CODE_READY_STATUS        (0x158)
+#define QSERDES_COM_CMN_STATUS                   (0x15C)
+#define QSERDES_COM_RESET_SM_STATUS              (0x160)
+#define QSERDES_COM_RESTRIM_CODE_STATUS          (0x164)
+#define QSERDES_COM_PLLCAL_CODE1_STATUS          (0x168)
+#define QSERDES_COM_PLLCAL_CODE2_STATUS          (0x16C)
+#define QSERDES_COM_BG_CTRL                      (0x170)
+#define QSERDES_COM_CLK_SELECT                   (0x174)
+#define QSERDES_COM_HSCLK_SEL                    (0x178)
+#define QSERDES_COM_INTEGLOOP_BINCODE_STATUS     (0x17C)
+#define QSERDES_COM_PLL_ANALOG                   (0x180)
+#define QSERDES_COM_CORECLK_DIV                  (0x184)
+#define QSERDES_COM_SW_RESET                     (0x188)
+#define QSERDES_COM_CORE_CLK_EN                  (0x18C)
+#define QSERDES_COM_C_READY_STATUS               (0x190)
+#define QSERDES_COM_CMN_CONFIG                   (0x194)
+#define QSERDES_COM_CMN_RATE_OVERRIDE            (0x198)
+#define QSERDES_COM_SVS_MODE_CLK_SEL             (0x19C)
+#define QSERDES_COM_DEBUG_BUS0                   (0x1A0)
+#define QSERDES_COM_DEBUG_BUS1                   (0x1A4)
+#define QSERDES_COM_DEBUG_BUS2                   (0x1A8)
+#define QSERDES_COM_DEBUG_BUS3                   (0x1AC)
+#define QSERDES_COM_DEBUG_BUS_SEL                (0x1B0)
+#define QSERDES_COM_CMN_MISC1                    (0x1B4)
+#define QSERDES_COM_CMN_MISC2                    (0x1B8)
+#define QSERDES_COM_CORECLK_DIV_MODE1            (0x1BC)
+#define QSERDES_COM_CORECLK_DIV_MODE2            (0x1C0)
+#define QSERDES_COM_CMN_RSVD5                    (0x1C0)
+
+/* Tx Channel base addresses */
+#define HDMI_TX_L0_BASE_OFFSET                   (0x400)
+#define HDMI_TX_L1_BASE_OFFSET                   (0x600)
+#define HDMI_TX_L2_BASE_OFFSET                   (0x800)
+#define HDMI_TX_L3_BASE_OFFSET                   (0xA00)
+
+/* Tx Channel PHY registers */
+#define QSERDES_TX_L0_BIST_MODE_LANENO                    (0x000)
+#define QSERDES_TX_L0_BIST_INVERT                         (0x004)
+#define QSERDES_TX_L0_CLKBUF_ENABLE                       (0x008)
+#define QSERDES_TX_L0_CMN_CONTROL_ONE                     (0x00C)
+#define QSERDES_TX_L0_CMN_CONTROL_TWO                     (0x010)
+#define QSERDES_TX_L0_CMN_CONTROL_THREE                   (0x014)
+#define QSERDES_TX_L0_TX_EMP_POST1_LVL                    (0x018)
+#define QSERDES_TX_L0_TX_POST2_EMPH                       (0x01C)
+#define QSERDES_TX_L0_TX_BOOST_LVL_UP_DN                  (0x020)
+#define QSERDES_TX_L0_HP_PD_ENABLES                       (0x024)
+#define QSERDES_TX_L0_TX_IDLE_LVL_LARGE_AMP               (0x028)
+#define QSERDES_TX_L0_TX_DRV_LVL                          (0x02C)
+#define QSERDES_TX_L0_TX_DRV_LVL_OFFSET                   (0x030)
+#define QSERDES_TX_L0_RESET_TSYNC_EN                      (0x034)
+#define QSERDES_TX_L0_PRE_STALL_LDO_BOOST_EN              (0x038)
+#define QSERDES_TX_L0_TX_BAND                             (0x03C)
+#define QSERDES_TX_L0_SLEW_CNTL                           (0x040)
+#define QSERDES_TX_L0_INTERFACE_SELECT                    (0x044)
+#define QSERDES_TX_L0_LPB_EN                              (0x048)
+#define QSERDES_TX_L0_RES_CODE_LANE_TX                    (0x04C)
+#define QSERDES_TX_L0_RES_CODE_LANE_RX                    (0x050)
+#define QSERDES_TX_L0_RES_CODE_LANE_OFFSET                (0x054)
+#define QSERDES_TX_L0_PERL_LENGTH1                        (0x058)
+#define QSERDES_TX_L0_PERL_LENGTH2                        (0x05C)
+#define QSERDES_TX_L0_SERDES_BYP_EN_OUT                   (0x060)
+#define QSERDES_TX_L0_DEBUG_BUS_SEL                       (0x064)
+#define QSERDES_TX_L0_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN    (0x068)
+#define QSERDES_TX_L0_TX_POL_INV                          (0x06C)
+#define QSERDES_TX_L0_PARRATE_REC_DETECT_IDLE_EN          (0x070)
+#define QSERDES_TX_L0_BIST_PATTERN1                       (0x074)
+#define QSERDES_TX_L0_BIST_PATTERN2                       (0x078)
+#define QSERDES_TX_L0_BIST_PATTERN3                       (0x07C)
+#define QSERDES_TX_L0_BIST_PATTERN4                       (0x080)
+#define QSERDES_TX_L0_BIST_PATTERN5                       (0x084)
+#define QSERDES_TX_L0_BIST_PATTERN6                       (0x088)
+#define QSERDES_TX_L0_BIST_PATTERN7                       (0x08C)
+#define QSERDES_TX_L0_BIST_PATTERN8                       (0x090)
+#define QSERDES_TX_L0_LANE_MODE                           (0x094)
+#define QSERDES_TX_L0_IDAC_CAL_LANE_MODE                  (0x098)
+#define QSERDES_TX_L0_IDAC_CAL_LANE_MODE_CONFIGURATION    (0x09C)
+#define QSERDES_TX_L0_ATB_SEL1                            (0x0A0)
+#define QSERDES_TX_L0_ATB_SEL2                            (0x0A4)
+#define QSERDES_TX_L0_RCV_DETECT_LVL                      (0x0A8)
+#define QSERDES_TX_L0_RCV_DETECT_LVL_2                    (0x0AC)
+#define QSERDES_TX_L0_PRBS_SEED1                          (0x0B0)
+#define QSERDES_TX_L0_PRBS_SEED2                          (0x0B4)
+#define QSERDES_TX_L0_PRBS_SEED3                          (0x0B8)
+#define QSERDES_TX_L0_PRBS_SEED4                          (0x0BC)
+#define QSERDES_TX_L0_RESET_GEN                           (0x0C0)
+#define QSERDES_TX_L0_RESET_GEN_MUXES                     (0x0C4)
+#define QSERDES_TX_L0_TRAN_DRVR_EMP_EN                    (0x0C8)
+#define QSERDES_TX_L0_TX_INTERFACE_MODE                   (0x0CC)
+#define QSERDES_TX_L0_PWM_CTRL                            (0x0D0)
+#define QSERDES_TX_L0_PWM_ENCODED_OR_DATA                 (0x0D4)
+#define QSERDES_TX_L0_PWM_GEAR_1_DIVIDER_BAND2            (0x0D8)
+#define QSERDES_TX_L0_PWM_GEAR_2_DIVIDER_BAND2            (0x0DC)
+#define QSERDES_TX_L0_PWM_GEAR_3_DIVIDER_BAND2            (0x0E0)
+#define QSERDES_TX_L0_PWM_GEAR_4_DIVIDER_BAND2            (0x0E4)
+#define QSERDES_TX_L0_PWM_GEAR_1_DIVIDER_BAND0_1          (0x0E8)
+#define QSERDES_TX_L0_PWM_GEAR_2_DIVIDER_BAND0_1          (0x0EC)
+#define QSERDES_TX_L0_PWM_GEAR_3_DIVIDER_BAND0_1          (0x0F0)
+#define QSERDES_TX_L0_PWM_GEAR_4_DIVIDER_BAND0_1          (0x0F4)
+#define QSERDES_TX_L0_VMODE_CTRL1                         (0x0F8)
+#define QSERDES_TX_L0_VMODE_CTRL2                         (0x0FC)
+#define QSERDES_TX_L0_TX_ALOG_INTF_OBSV_CNTL              (0x100)
+#define QSERDES_TX_L0_BIST_STATUS                         (0x104)
+#define QSERDES_TX_L0_BIST_ERROR_COUNT1                   (0x108)
+#define QSERDES_TX_L0_BIST_ERROR_COUNT2                   (0x10C)
+#define QSERDES_TX_L0_TX_ALOG_INTF_OBSV                   (0x110)
+
+/* HDMI PHY REGISTERS */
+#define HDMI_PHY_BASE_OFFSET                  (0xC00)
+
+#define HDMI_PHY_CFG                          (0x00)
+#define HDMI_PHY_PD_CTL                       (0x04)
+#define HDMI_PHY_MODE                         (0x08)
+#define HDMI_PHY_MISR_CLEAR                   (0x0C)
+#define HDMI_PHY_TX0_TX1_BIST_CFG0            (0x10)
+#define HDMI_PHY_TX0_TX1_BIST_CFG1            (0x14)
+#define HDMI_PHY_TX0_TX1_PRBS_SEED_BYTE0      (0x18)
+#define HDMI_PHY_TX0_TX1_PRBS_SEED_BYTE1      (0x1C)
+#define HDMI_PHY_TX0_TX1_BIST_PATTERN0        (0x20)
+#define HDMI_PHY_TX0_TX1_BIST_PATTERN1        (0x24)
+#define HDMI_PHY_TX2_TX3_BIST_CFG0            (0x28)
+#define HDMI_PHY_TX2_TX3_BIST_CFG1            (0x2C)
+#define HDMI_PHY_TX2_TX3_PRBS_SEED_BYTE0      (0x30)
+#define HDMI_PHY_TX2_TX3_PRBS_SEED_BYTE1      (0x34)
+#define HDMI_PHY_TX2_TX3_BIST_PATTERN0        (0x38)
+#define HDMI_PHY_TX2_TX3_BIST_PATTERN1        (0x3C)
+#define HDMI_PHY_DEBUG_BUS_SEL                (0x40)
+#define HDMI_PHY_TXCAL_CFG0                   (0x44)
+#define HDMI_PHY_TXCAL_CFG1                   (0x48)
+#define HDMI_PHY_TX0_TX1_LANE_CTL             (0x4C)
+#define HDMI_PHY_TX2_TX3_LANE_CTL             (0x50)
+#define HDMI_PHY_LANE_BIST_CONFIG             (0x54)
+#define HDMI_PHY_CLOCK                        (0x58)
+#define HDMI_PHY_MISC1                        (0x5C)
+#define HDMI_PHY_MISC2                        (0x60)
+#define HDMI_PHY_TX0_TX1_BIST_STATUS0         (0x64)
+#define HDMI_PHY_TX0_TX1_BIST_STATUS1         (0x68)
+#define HDMI_PHY_TX0_TX1_BIST_STATUS2         (0x6C)
+#define HDMI_PHY_TX2_TX3_BIST_STATUS0         (0x70)
+#define HDMI_PHY_TX2_TX3_BIST_STATUS1         (0x74)
+#define HDMI_PHY_TX2_TX3_BIST_STATUS2         (0x78)
+#define HDMI_PHY_PRE_MISR_STATUS0             (0x7C)
+#define HDMI_PHY_PRE_MISR_STATUS1             (0x80)
+#define HDMI_PHY_PRE_MISR_STATUS2             (0x84)
+#define HDMI_PHY_PRE_MISR_STATUS3             (0x88)
+#define HDMI_PHY_POST_MISR_STATUS0            (0x8C)
+#define HDMI_PHY_POST_MISR_STATUS1            (0x90)
+#define HDMI_PHY_POST_MISR_STATUS2            (0x94)
+#define HDMI_PHY_POST_MISR_STATUS3            (0x98)
+#define HDMI_PHY_STATUS                       (0x9C)
+#define HDMI_PHY_MISC3_STATUS                 (0xA0)
+#define HDMI_PHY_MISC4_STATUS                 (0xA4)
+#define HDMI_PHY_DEBUG_BUS0                   (0xA8)
+#define HDMI_PHY_DEBUG_BUS1                   (0xAC)
+#define HDMI_PHY_DEBUG_BUS2                   (0xB0)
+#define HDMI_PHY_DEBUG_BUS3                   (0xB4)
+#define HDMI_PHY_PHY_REVISION_ID0             (0xB8)
+#define HDMI_PHY_PHY_REVISION_ID1             (0xBC)
+#define HDMI_PHY_PHY_REVISION_ID2             (0xC0)
+#define HDMI_PHY_PHY_REVISION_ID3             (0xC4)
+
+#define HDMI_PLL_POLL_MAX_READS                100
+#define HDMI_PLL_POLL_TIMEOUT_US               1500
+
+enum hdmi_pll_freqs {
+	HDMI_PCLK_25200_KHZ,
+	HDMI_PCLK_27027_KHZ,
+	HDMI_PCLK_27000_KHZ,
+	HDMI_PCLK_74250_KHZ,
+	HDMI_PCLK_148500_KHZ,
+	HDMI_PCLK_154000_KHZ,
+	HDMI_PCLK_268500_KHZ,
+	HDMI_PCLK_297000_KHZ,
+	HDMI_PCLK_594000_KHZ,
+	HDMI_PCLK_MAX
+};
+
+struct hdmi_8996_phy_pll_reg_cfg {
+	u32 tx_l0_lane_mode;
+	u32 tx_l2_lane_mode;
+	u32 tx_l0_tx_band;
+	u32 tx_l1_tx_band;
+	u32 tx_l2_tx_band;
+	u32 tx_l3_tx_band;
+	u32 com_svs_mode_clk_sel;
+	u32 com_hsclk_sel;
+	u32 com_pll_cctrl_mode0;
+	u32 com_pll_rctrl_mode0;
+	u32 com_cp_ctrl_mode0;
+	u32 com_dec_start_mode0;
+	u32 com_div_frac_start1_mode0;
+	u32 com_div_frac_start2_mode0;
+	u32 com_div_frac_start3_mode0;
+	u32 com_integloop_gain0_mode0;
+	u32 com_integloop_gain1_mode0;
+	u32 com_lock_cmp_en;
+	u32 com_lock_cmp1_mode0;
+	u32 com_lock_cmp2_mode0;
+	u32 com_lock_cmp3_mode0;
+	u32 com_core_clk_en;
+	u32 com_coreclk_div;
+	u32 com_restrim_ctrl;
+	u32 com_vco_tune_ctrl;
+
+	u32 tx_l0_tx_drv_lvl;
+	u32 tx_l0_tx_emp_post1_lvl;
+	u32 tx_l1_tx_drv_lvl;
+	u32 tx_l1_tx_emp_post1_lvl;
+	u32 tx_l2_tx_drv_lvl;
+	u32 tx_l2_tx_emp_post1_lvl;
+	u32 tx_l3_tx_drv_lvl;
+	u32 tx_l3_tx_emp_post1_lvl;
+	u32 tx_l0_vmode_ctrl1;
+	u32 tx_l0_vmode_ctrl2;
+	u32 tx_l1_vmode_ctrl1;
+	u32 tx_l1_vmode_ctrl2;
+	u32 tx_l2_vmode_ctrl1;
+	u32 tx_l2_vmode_ctrl2;
+	u32 tx_l3_vmode_ctrl1;
+	u32 tx_l3_vmode_ctrl2;
+	u32 tx_l0_res_code_lane_tx;
+	u32 tx_l1_res_code_lane_tx;
+	u32 tx_l2_res_code_lane_tx;
+	u32 tx_l3_res_code_lane_tx;
+
+	u32 phy_mode;
+};
+
+struct hdmi_8996_v3_post_divider {
+	u64 vco_freq;
+	u64 hsclk_divsel;
+	u64 vco_ratio;
+	u64 tx_band_sel;
+	u64 half_rate_mode;
+};
+
+static inline struct hdmi_pll_vco_clk *to_hdmi_8996_vco_clk(struct clk *clk)
+{
+	return container_of(clk, struct hdmi_pll_vco_clk, c);
+}
+
+static inline u64 hdmi_8996_v1_get_post_div_lt_2g(u64 bclk)
+{
+	if (bclk >= HDMI_2400MHZ_BIT_CLK_HZ)
+		return 2;
+	else if (bclk >= HDMI_1700MHZ_BIT_CLK_HZ)
+		return 3;
+	else if (bclk >= HDMI_1200MHZ_BIT_CLK_HZ)
+		return 4;
+	else if (bclk >= HDMI_850MHZ_BIT_CLK_HZ)
+		return 3;
+	else if (bclk >= HDMI_600MHZ_BIT_CLK_HZ)
+		return 4;
+	else if (bclk >= HDMI_450MHZ_BIT_CLK_HZ)
+		return 3;
+	else if (bclk >= HDMI_300MHZ_BIT_CLK_HZ)
+		return 4;
+
+	return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_v2_get_post_div_lt_2g(u64 bclk, u64 vco_range)
+{
+	u64 hdmi_8ghz = vco_range;
+	u64 tmp_calc;
+
+	hdmi_8ghz <<= 2;
+	tmp_calc = hdmi_8ghz;
+	do_div(tmp_calc, 6U);
+
+	if (bclk >= vco_range)
+		return 2;
+	else if (bclk >= tmp_calc)
+		return 3;
+	else if (bclk >= vco_range >> 1)
+		return 4;
+
+	tmp_calc = hdmi_8ghz;
+	do_div(tmp_calc, 12U);
+	if (bclk >= tmp_calc)
+		return 3;
+	else if (bclk >= vco_range >> 2)
+		return 4;
+
+	tmp_calc = hdmi_8ghz;
+	do_div(tmp_calc, 24U);
+	if (bclk >= tmp_calc)
+		return 3;
+	else if (bclk >= vco_range >> 3)
+		return 4;
+
+	return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_v2_get_post_div_gt_2g(u64 hsclk)
+{
+	if (hsclk >= 0 && hsclk <= 3)
+		return hsclk + 1;
+
+	return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_get_coreclk_div_lt_2g(u64 bclk)
+{
+	if (bclk >= HDMI_1334MHZ_BIT_CLK_HZ)
+		return 1;
+	else if (bclk >= HDMI_1000MHZ_BIT_CLK_HZ)
+		return 1;
+	else if (bclk >= HDMI_667MHZ_BIT_CLK_HZ)
+		return 2;
+	else if (bclk >= HDMI_500MHZ_BIT_CLK_HZ)
+		return 2;
+	else if (bclk >= HDMI_334MHZ_BIT_CLK_HZ)
+		return 3;
+	else if (bclk >= HDMI_250MHZ_BIT_CLK_HZ)
+		return 3;
+
+	return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_get_coreclk_div_ratio(u64 clks_pll_divsel,
+						  u64 coreclk_div)
+{
+	if (clks_pll_divsel == 0)
+		return coreclk_div*2;
+	else if (clks_pll_divsel == 1)
+		return coreclk_div*4;
+
+	return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_v1_get_tx_band(u64 bclk)
+{
+	if (bclk >= 2400000000UL)
+		return 0;
+	if (bclk >= 1200000000UL)
+		return 1;
+	if (bclk >= 600000000UL)
+		return 2;
+	if (bclk >= 300000000UL)
+		return 3;
+
+	return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_v2_get_tx_band(u64 bclk, u64 vco_range)
+{
+	if (bclk >= vco_range)
+		return 0;
+	else if (bclk >= vco_range >> 1)
+		return 1;
+	else if (bclk >= vco_range >> 2)
+		return 2;
+	else if (bclk >= vco_range >> 3)
+		return 3;
+
+	return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_v1_get_hsclk(u64 fdata)
+{
+	if (fdata >= 9600000000UL)
+		return 0;
+	else if (fdata >= 4800000000UL)
+		return 1;
+	else if (fdata >= 3200000000UL)
+		return 2;
+	else if (fdata >= 2400000000UL)
+		return 3;
+
+	return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_v2_get_hsclk(u64 fdata, u64 vco_range)
+{
+	u64 tmp_calc = vco_range;
+	tmp_calc <<= 2;
+	do_div(tmp_calc, 3U);
+	if (fdata >= (vco_range << 2))
+		return 0;
+	else if (fdata >= (vco_range << 1))
+		return 1;
+	else if (fdata >= tmp_calc)
+		return 2;
+	else if (fdata >= vco_range)
+		return 3;
+
+	return HDMI_64B_ERR_VAL;
+
+}
+
+static inline u64 hdmi_8996_v2_get_vco_freq(u64 bclk, u64 vco_range)
+{
+	u64 tx_band_div_ratio = 1U << hdmi_8996_v2_get_tx_band(bclk, vco_range);
+	u64 pll_post_div_ratio;
+
+	if (bclk >= vco_range) {
+		u64 hsclk = hdmi_8996_v2_get_hsclk(bclk, vco_range);
+		pll_post_div_ratio = hdmi_8996_v2_get_post_div_gt_2g(hsclk);
+	} else {
+		pll_post_div_ratio = hdmi_8996_v2_get_post_div_lt_2g(bclk,
+								vco_range);
+	}
+
+	return bclk * (pll_post_div_ratio * tx_band_div_ratio);
+}
+
+static inline u64 hdmi_8996_v2_get_fdata(u64 bclk, u64 vco_range)
+{
+	if (bclk >= vco_range) {
+		return bclk;
+	} else {
+		u64 tmp_calc = hdmi_8996_v2_get_vco_freq(bclk, vco_range);
+		u64 pll_post_div_ratio_lt_2g = hdmi_8996_v2_get_post_div_lt_2g(
+							bclk, vco_range);
+		if (pll_post_div_ratio_lt_2g == HDMI_64B_ERR_VAL)
+			return HDMI_64B_ERR_VAL;
+
+		do_div(tmp_calc, pll_post_div_ratio_lt_2g);
+		return tmp_calc;
+	}
+}
+
+static inline u64 hdmi_8996_get_cpctrl(u64 frac_start, bool gen_ssc)
+{
+	if ((frac_start != 0) ||
+	    (gen_ssc == true))
+		/*
+		 * This should be ROUND(11/(19.2/20))).
+		 * Since ref clock does not change, hardcoding to 11
+		 */
+		return 0xB;
+
+	return 0x23;
+}
+
+static inline u64 hdmi_8996_get_rctrl(u64 frac_start, bool gen_ssc)
+{
+	if ((frac_start != 0) || (gen_ssc == true))
+		return 0x16;
+
+	return 0x10;
+}
+
+static inline u64 hdmi_8996_get_cctrl(u64 frac_start, bool gen_ssc)
+{
+	if ((frac_start != 0) || (gen_ssc == true))
+		return 0x28;
+
+	return 0x1;
+}
+
+static inline u64 hdmi_8996_get_integloop_gain(u64 frac_start, bool gen_ssc)
+{
+	if ((frac_start != 0) || (gen_ssc == true))
+		return 0x80;
+
+	return 0xC4;
+}
+
+static inline u64 hdmi_8996_v3_get_integloop_gain(u64 frac_start, u64 bclk,
+							bool gen_ssc)
+{
+	u64 digclk_divsel = bclk >= HDMI_DIG_FREQ_BIT_CLK_THRESHOLD ? 1 : 2;
+	u64 base = ((frac_start != 0) || (gen_ssc == true)) ? 0x40 : 0xC4;
+
+	base <<= digclk_divsel;
+
+	return (base <= 2046 ? base : 0x7FE);
+}
+
+static inline u64 hdmi_8996_get_vco_tune(u64 fdata, u64 div)
+{
+	u64 vco_tune;
+
+	vco_tune = fdata * div;
+	do_div(vco_tune, 1000000);
+	vco_tune = 13000 - vco_tune - 256;
+	do_div(vco_tune, 5);
+
+	return vco_tune;
+}
+
+static inline u64 hdmi_8996_get_pll_cmp(u64 pll_cmp_cnt, u64 core_clk)
+{
+	u64 pll_cmp;
+	u64 rem;
+	pll_cmp = pll_cmp_cnt * core_clk;
+	rem = do_div(pll_cmp, HDMI_REF_CLOCK);
+	if (rem > (HDMI_REF_CLOCK >> 1))
+		pll_cmp++;
+	pll_cmp -= 1;
+
+	return pll_cmp;
+}
+
+static inline u64 hdmi_8996_v3_get_pll_cmp(u64 pll_cmp_cnt, u64 fdata)
+{
+	u64 dividend = pll_cmp_cnt * fdata;
+	u64 divisor = HDMI_REF_CLOCK * 10;
+	u64 rem;
+
+	rem = do_div(dividend, divisor);
+	if (rem > (divisor >> 1))
+		dividend++;
+
+	return dividend - 1;
+}
+
+static int hdmi_8996_v3_get_post_div(struct hdmi_8996_v3_post_divider *pd,
+						u64 bclk)
+{
+	u32 ratio[] = {2, 3, 4, 5, 6, 9, 10, 12, 14, 15, 20, 21, 25, 28, 35};
+	u32 tx_band_sel[] = {0, 1, 2, 3};
+	u64 vco_freq[60];
+	u64 vco, vco_optimal, half_rate_mode = 0;
+	int vco_optimal_index, vco_freq_index;
+	int i, j, k, x;
+
+	for (i = 0; i <= 1; i++) {
+		vco_optimal = HDMI_VCO_MAX_FREQ;
+		vco_optimal_index = -1;
+		vco_freq_index = 0;
+		for (j = 0; j < 15; j++) {
+			for (k = 0; k < 4; k++) {
+				u64 ratio_mult = ratio[j] << tx_band_sel[k];
+
+				vco = bclk >> half_rate_mode;
+				vco *= ratio_mult;
+				vco_freq[vco_freq_index++] = vco;
+			}
+		}
+
+		for (x = 0; x < 60; x++) {
+			u64 vco_tmp = vco_freq[x];
+
+			if ((vco_tmp >= HDMI_VCO_MIN_FREQ) &&
+					(vco_tmp <= vco_optimal)) {
+				vco_optimal = vco_tmp;
+				vco_optimal_index = x;
+			}
+		}
+
+		if (vco_optimal_index == -1) {
+			if (!half_rate_mode)
+				half_rate_mode++;
+			else
+				return -EINVAL;
+		} else {
+			pd->vco_freq = vco_optimal;
+			pd->tx_band_sel = tx_band_sel[vco_optimal_index % 4];
+			pd->vco_ratio = ratio[vco_optimal_index / 4];
+			break;
+		}
+	}
+
+	switch (pd->vco_ratio) {
+	case 2:
+		pd->hsclk_divsel = 0;
+		break;
+	case 3:
+		pd->hsclk_divsel = 4;
+		break;
+	case 4:
+		pd->hsclk_divsel = 8;
+		break;
+	case 5:
+		pd->hsclk_divsel = 12;
+		break;
+	case 6:
+		pd->hsclk_divsel = 1;
+		break;
+	case 9:
+		pd->hsclk_divsel = 5;
+		break;
+	case 10:
+		pd->hsclk_divsel = 2;
+		break;
+	case 12:
+		pd->hsclk_divsel = 9;
+		break;
+	case 14:
+		pd->hsclk_divsel = 3;
+		break;
+	case 15:
+		pd->hsclk_divsel = 13;
+		break;
+	case 20:
+		pd->hsclk_divsel = 10;
+		break;
+	case 21:
+		pd->hsclk_divsel = 7;
+		break;
+	case 25:
+		pd->hsclk_divsel = 14;
+		break;
+	case 28:
+		pd->hsclk_divsel = 11;
+		break;
+	case 35:
+		pd->hsclk_divsel = 15;
+		break;
+	};
+
+	return 0;
+}
+
+static int hdmi_8996_v1_calculate(u32 pix_clk,
+			       struct hdmi_8996_phy_pll_reg_cfg *cfg)
+{
+	int rc = -EINVAL;
+	u64 fdata, clk_divtx, tmds_clk;
+	u64 bclk;
+	u64 post_div_gt_2g;
+	u64 post_div_lt_2g;
+	u64 coreclk_div1_lt_2g;
+	u64 core_clk_div_ratio;
+	u64 core_clk;
+	u64 pll_cmp;
+	u64 tx_band;
+	u64 tx_band_div_ratio;
+	u64 hsclk;
+	u64 dec_start;
+	u64 frac_start;
+	u64 pll_divisor = 4 * HDMI_REF_CLOCK;
+	u64 cpctrl;
+	u64 rctrl;
+	u64 cctrl;
+	u64 integloop_gain;
+	u64 vco_tune;
+	u64 vco_freq;
+	u64 rem;
+
+	/* FDATA, CLK_DIVTX, PIXEL_CLK, TMDS_CLK */
+	bclk = ((u64)pix_clk) * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
+
+	if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD)
+		tmds_clk = bclk/4;
+	else
+		tmds_clk = bclk;
+
+	post_div_lt_2g = hdmi_8996_v1_get_post_div_lt_2g(bclk);
+	if (post_div_lt_2g == HDMI_64B_ERR_VAL)
+		goto fail;
+
+	coreclk_div1_lt_2g = hdmi_8996_get_coreclk_div_lt_2g(bclk);
+
+	core_clk_div_ratio = hdmi_8996_get_coreclk_div_ratio(
+				HDMI_CLKS_PLL_DIVSEL, HDMI_CORECLK_DIV);
+
+	tx_band = hdmi_8996_v1_get_tx_band(bclk);
+	if (tx_band == HDMI_64B_ERR_VAL)
+		goto fail;
+
+	tx_band_div_ratio = 1 << tx_band;
+
+	if (bclk >= HDMI_2400MHZ_BIT_CLK_HZ) {
+		fdata = bclk;
+		hsclk = hdmi_8996_v1_get_hsclk(fdata);
+		if (hsclk == HDMI_64B_ERR_VAL)
+			goto fail;
+
+		post_div_gt_2g = (hsclk <= 3) ? (hsclk + 1) : HDMI_64B_ERR_VAL;
+		if (post_div_gt_2g == HDMI_64B_ERR_VAL)
+			goto fail;
+
+		vco_freq = bclk * (post_div_gt_2g * tx_band_div_ratio);
+		clk_divtx = vco_freq;
+		do_div(clk_divtx, post_div_gt_2g);
+	} else {
+		vco_freq = bclk * (post_div_lt_2g * tx_band_div_ratio);
+		fdata = vco_freq;
+		do_div(fdata, post_div_lt_2g);
+		hsclk = hdmi_8996_v1_get_hsclk(fdata);
+		if (hsclk == HDMI_64B_ERR_VAL)
+			goto fail;
+
+		clk_divtx = vco_freq;
+		do_div(clk_divtx, post_div_lt_2g);
+		post_div_gt_2g = (hsclk <= 3) ? (hsclk + 1) : HDMI_64B_ERR_VAL;
+		if (post_div_gt_2g == HDMI_64B_ERR_VAL)
+			goto fail;
+	}
+
+	/* Decimal and fraction values */
+	dec_start = fdata * post_div_gt_2g;
+	do_div(dec_start, pll_divisor);
+	frac_start = ((pll_divisor - (((dec_start + 1) * pll_divisor) -
+			(fdata * post_div_gt_2g))) * (1 << 20));
+	rem = do_div(frac_start, pll_divisor);
+	/* Round off frac_start to closest integer */
+	if (rem >= (pll_divisor >> 1))
+		frac_start++;
+
+	cpctrl = hdmi_8996_get_cpctrl(frac_start, false);
+	rctrl = hdmi_8996_get_rctrl(frac_start, false);
+	cctrl = hdmi_8996_get_cctrl(frac_start, false);
+	integloop_gain = hdmi_8996_get_integloop_gain(frac_start, false);
+	vco_tune = hdmi_8996_get_vco_tune(fdata, post_div_gt_2g);
+
+	core_clk = clk_divtx;
+	do_div(core_clk, core_clk_div_ratio);
+	pll_cmp = hdmi_8996_get_pll_cmp(1024, core_clk);
+
+	/* Debug dump */
+	DEV_DBG("%s: VCO freq: %llu\n", __func__, vco_freq);
+	DEV_DBG("%s: fdata: %llu\n", __func__, fdata);
+	DEV_DBG("%s: CLK_DIVTX: %llu\n", __func__, clk_divtx);
+	DEV_DBG("%s: pix_clk: %d\n", __func__, pix_clk);
+	DEV_DBG("%s: tmds clk: %llu\n", __func__, tmds_clk);
+	DEV_DBG("%s: HSCLK_SEL: %llu\n", __func__, hsclk);
+	DEV_DBG("%s: DEC_START: %llu\n", __func__, dec_start);
+	DEV_DBG("%s: DIV_FRAC_START: %llu\n", __func__, frac_start);
+	DEV_DBG("%s: PLL_CPCTRL: %llu\n", __func__, cpctrl);
+	DEV_DBG("%s: PLL_RCTRL: %llu\n", __func__, rctrl);
+	DEV_DBG("%s: PLL_CCTRL: %llu\n", __func__, cctrl);
+	DEV_DBG("%s: INTEGLOOP_GAIN: %llu\n", __func__, integloop_gain);
+	DEV_DBG("%s: VCO_TUNE: %llu\n", __func__, vco_tune);
+	DEV_DBG("%s: TX_BAND: %llu\n", __func__, tx_band);
+	DEV_DBG("%s: PLL_CMP: %llu\n", __func__, pll_cmp);
+
+	/* Convert these values to register specific values */
+	cfg->tx_l0_lane_mode = 0x3;
+	cfg->tx_l2_lane_mode = 0x3;
+	cfg->tx_l0_tx_band = tx_band + 4;
+	cfg->tx_l1_tx_band = tx_band + 4;
+	cfg->tx_l2_tx_band = tx_band + 4;
+	cfg->tx_l3_tx_band = tx_band + 4;
+	cfg->tx_l0_res_code_lane_tx = 0x33;
+	cfg->tx_l1_res_code_lane_tx = 0x33;
+	cfg->tx_l2_res_code_lane_tx = 0x33;
+	cfg->tx_l3_res_code_lane_tx = 0x33;
+	cfg->com_restrim_ctrl = 0x0;
+	cfg->com_vco_tune_ctrl = 0x1C;
+
+	cfg->com_svs_mode_clk_sel =
+			(bclk >= HDMI_DIG_FREQ_BIT_CLK_THRESHOLD ? 1 : 2);
+	cfg->com_hsclk_sel = (0x28 | hsclk);
+	cfg->com_pll_cctrl_mode0 = cctrl;
+	cfg->com_pll_rctrl_mode0 = rctrl;
+	cfg->com_cp_ctrl_mode0 = cpctrl;
+	cfg->com_dec_start_mode0 = dec_start;
+	cfg->com_div_frac_start1_mode0 = (frac_start & 0xFF);
+	cfg->com_div_frac_start2_mode0 = ((frac_start & 0xFF00) >> 8);
+	cfg->com_div_frac_start3_mode0 = ((frac_start & 0xF0000) >> 16);
+	cfg->com_integloop_gain0_mode0 = (integloop_gain & 0xFF);
+	cfg->com_integloop_gain1_mode0 = ((integloop_gain & 0xF00) >> 8);
+	cfg->com_lock_cmp1_mode0 = (pll_cmp & 0xFF);
+	cfg->com_lock_cmp2_mode0 = ((pll_cmp & 0xFF00) >> 8);
+	cfg->com_lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16);
+	cfg->com_core_clk_en = (0x6C | (HDMI_CLKS_PLL_DIVSEL << 4));
+	cfg->com_coreclk_div = HDMI_CORECLK_DIV;
+
+	if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) {
+		cfg->tx_l0_tx_drv_lvl = 0x25;
+		cfg->tx_l0_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l1_tx_drv_lvl = 0x25;
+		cfg->tx_l1_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l2_tx_drv_lvl = 0x25;
+		cfg->tx_l2_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l3_tx_drv_lvl = 0x22;
+		cfg->tx_l3_tx_emp_post1_lvl = 0x27;
+		cfg->tx_l0_vmode_ctrl1 = 0x00;
+		cfg->tx_l0_vmode_ctrl2 = 0x0D;
+		cfg->tx_l1_vmode_ctrl1 = 0x00;
+		cfg->tx_l1_vmode_ctrl2 = 0x0D;
+		cfg->tx_l2_vmode_ctrl1 = 0x00;
+		cfg->tx_l2_vmode_ctrl2 = 0x0D;
+		cfg->tx_l3_vmode_ctrl1 = 0x00;
+		cfg->tx_l3_vmode_ctrl2 = 0x00;
+		cfg->com_restrim_ctrl = 0x0;
+	} else if (bclk > HDMI_MID_FREQ_BIT_CLK_THRESHOLD) {
+		cfg->tx_l0_tx_drv_lvl = 0x25;
+		cfg->tx_l0_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l1_tx_drv_lvl = 0x25;
+		cfg->tx_l1_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l2_tx_drv_lvl = 0x25;
+		cfg->tx_l2_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l3_tx_drv_lvl = 0x25;
+		cfg->tx_l3_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l0_vmode_ctrl1 = 0x00;
+		cfg->tx_l0_vmode_ctrl2 = 0x0D;
+		cfg->tx_l1_vmode_ctrl1 = 0x00;
+		cfg->tx_l1_vmode_ctrl2 = 0x0D;
+		cfg->tx_l2_vmode_ctrl1 = 0x00;
+		cfg->tx_l2_vmode_ctrl2 = 0x0D;
+		cfg->tx_l3_vmode_ctrl1 = 0x00;
+		cfg->tx_l3_vmode_ctrl2 = 0x00;
+		cfg->com_restrim_ctrl = 0x0;
+	} else {
+		cfg->tx_l0_tx_drv_lvl = 0x20;
+		cfg->tx_l0_tx_emp_post1_lvl = 0x20;
+		cfg->tx_l1_tx_drv_lvl = 0x20;
+		cfg->tx_l1_tx_emp_post1_lvl = 0x20;
+		cfg->tx_l2_tx_drv_lvl = 0x20;
+		cfg->tx_l2_tx_emp_post1_lvl = 0x20;
+		cfg->tx_l3_tx_drv_lvl = 0x20;
+		cfg->tx_l3_tx_emp_post1_lvl = 0x20;
+		cfg->tx_l0_vmode_ctrl1 = 0x00;
+		cfg->tx_l0_vmode_ctrl2 = 0x0E;
+		cfg->tx_l1_vmode_ctrl1 = 0x00;
+		cfg->tx_l1_vmode_ctrl2 = 0x0E;
+		cfg->tx_l2_vmode_ctrl1 = 0x00;
+		cfg->tx_l2_vmode_ctrl2 = 0x0E;
+		cfg->tx_l3_vmode_ctrl1 = 0x00;
+		cfg->tx_l3_vmode_ctrl2 = 0x0E;
+		cfg->com_restrim_ctrl = 0xD8;
+	}
+
+	cfg->phy_mode = (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) ? 0x10 : 0x0;
+	DEV_DBG("HDMI 8996 PLL: PLL Settings\n");
+	DEV_DBG("PLL PARAM: tx_l0_lane_mode = 0x%x\n", cfg->tx_l0_lane_mode);
+	DEV_DBG("PLL PARAM: tx_l2_lane_mode = 0x%x\n", cfg->tx_l2_lane_mode);
+	DEV_DBG("PLL PARAM: tx_l0_tx_band = 0x%x\n", cfg->tx_l0_tx_band);
+	DEV_DBG("PLL PARAM: tx_l1_tx_band = 0x%x\n", cfg->tx_l1_tx_band);
+	DEV_DBG("PLL PARAM: tx_l2_tx_band = 0x%x\n", cfg->tx_l2_tx_band);
+	DEV_DBG("PLL PARAM: tx_l3_tx_band = 0x%x\n", cfg->tx_l3_tx_band);
+	DEV_DBG("PLL PARAM: com_svs_mode_clk_sel = 0x%x\n",
+						cfg->com_svs_mode_clk_sel);
+	DEV_DBG("PLL PARAM: com_hsclk_sel = 0x%x\n", cfg->com_hsclk_sel);
+	DEV_DBG("PLL PARAM: com_pll_cctrl_mode0 = 0x%x\n",
+						cfg->com_pll_cctrl_mode0);
+	DEV_DBG("PLL PARAM: com_pll_rctrl_mode0 = 0x%x\n",
+						cfg->com_pll_rctrl_mode0);
+	DEV_DBG("PLL PARAM: com_cp_ctrl_mode0 = 0x%x\n",
+						cfg->com_cp_ctrl_mode0);
+	DEV_DBG("PLL PARAM: com_dec_start_mode0 = 0x%x\n",
+						cfg->com_dec_start_mode0);
+	DEV_DBG("PLL PARAM: com_div_frac_start1_mode0 = 0x%x\n",
+						cfg->com_div_frac_start1_mode0);
+	DEV_DBG("PLL PARAM: com_div_frac_start2_mode0 = 0x%x\n",
+						cfg->com_div_frac_start2_mode0);
+	DEV_DBG("PLL PARAM: com_div_frac_start3_mode0 = 0x%x\n",
+						cfg->com_div_frac_start3_mode0);
+	DEV_DBG("PLL PARAM: com_integloop_gain0_mode0 = 0x%x\n",
+						cfg->com_integloop_gain0_mode0);
+	DEV_DBG("PLL PARAM: com_integloop_gain1_mode0 = 0x%x\n",
+						cfg->com_integloop_gain1_mode0);
+	DEV_DBG("PLL PARAM: com_lock_cmp1_mode0 = 0x%x\n",
+						cfg->com_lock_cmp1_mode0);
+	DEV_DBG("PLL PARAM: com_lock_cmp2_mode0 = 0x%x\n",
+						cfg->com_lock_cmp2_mode0);
+	DEV_DBG("PLL PARAM: com_lock_cmp3_mode0 = 0x%x\n",
+						cfg->com_lock_cmp3_mode0);
+	DEV_DBG("PLL PARAM: com_core_clk_en = 0x%x\n", cfg->com_core_clk_en);
+	DEV_DBG("PLL PARAM: com_coreclk_div = 0x%x\n", cfg->com_coreclk_div);
+	DEV_DBG("PLL PARAM: com_restrim_ctrl = 0x%x\n",	cfg->com_restrim_ctrl);
+
+	DEV_DBG("PLL PARAM: l0_tx_drv_lvl = 0x%x\n", cfg->tx_l0_tx_drv_lvl);
+	DEV_DBG("PLL PARAM: l0_tx_emp_post1_lvl = 0x%x\n",
+						cfg->tx_l0_tx_emp_post1_lvl);
+	DEV_DBG("PLL PARAM: l1_tx_drv_lvl = 0x%x\n", cfg->tx_l1_tx_drv_lvl);
+	DEV_DBG("PLL PARAM: l1_tx_emp_post1_lvl = 0x%x\n",
+						cfg->tx_l1_tx_emp_post1_lvl);
+	DEV_DBG("PLL PARAM: l2_tx_drv_lvl = 0x%x\n", cfg->tx_l2_tx_drv_lvl);
+	DEV_DBG("PLL PARAM: l2_tx_emp_post1_lvl = 0x%x\n",
+						cfg->tx_l2_tx_emp_post1_lvl);
+	DEV_DBG("PLL PARAM: l3_tx_drv_lvl = 0x%x\n", cfg->tx_l3_tx_drv_lvl);
+	DEV_DBG("PLL PARAM: l3_tx_emp_post1_lvl = 0x%x\n",
+						cfg->tx_l3_tx_emp_post1_lvl);
+
+	DEV_DBG("PLL PARAM: l0_vmode_ctrl1 = 0x%x\n", cfg->tx_l0_vmode_ctrl1);
+	DEV_DBG("PLL PARAM: l0_vmode_ctrl2 = 0x%x\n", cfg->tx_l0_vmode_ctrl2);
+	DEV_DBG("PLL PARAM: l1_vmode_ctrl1 = 0x%x\n", cfg->tx_l1_vmode_ctrl1);
+	DEV_DBG("PLL PARAM: l1_vmode_ctrl2 = 0x%x\n", cfg->tx_l1_vmode_ctrl2);
+	DEV_DBG("PLL PARAM: l2_vmode_ctrl1 = 0x%x\n", cfg->tx_l2_vmode_ctrl1);
+	DEV_DBG("PLL PARAM: l2_vmode_ctrl2 = 0x%x\n", cfg->tx_l2_vmode_ctrl2);
+	DEV_DBG("PLL PARAM: l3_vmode_ctrl1 = 0x%x\n", cfg->tx_l3_vmode_ctrl1);
+	DEV_DBG("PLL PARAM: l3_vmode_ctrl2 = 0x%x\n", cfg->tx_l3_vmode_ctrl2);
+	DEV_DBG("PLL PARAM: tx_l0_res_code_lane_tx = 0x%x\n",
+					cfg->tx_l0_res_code_lane_tx);
+	DEV_DBG("PLL PARAM: tx_l1_res_code_lane_tx = 0x%x\n",
+					cfg->tx_l1_res_code_lane_tx);
+	DEV_DBG("PLL PARAM: tx_l2_res_code_lane_tx = 0x%x\n",
+					cfg->tx_l2_res_code_lane_tx);
+	DEV_DBG("PLL PARAM: tx_l3_res_code_lane_tx = 0x%x\n",
+					cfg->tx_l3_res_code_lane_tx);
+
+	DEV_DBG("PLL PARAM: phy_mode = 0x%x\n", cfg->phy_mode);
+	rc = 0;
+fail:
+	return rc;
+}
+
+static int hdmi_8996_v2_calculate(u32 pix_clk,
+			       struct hdmi_8996_phy_pll_reg_cfg *cfg)
+{
+	int rc = -EINVAL;
+	u64 fdata, clk_divtx, tmds_clk;
+	u64 bclk;
+	u64 post_div;
+	u64 core_clk_div;
+	u64 core_clk_div_ratio;
+	u64 core_clk;
+	u64 pll_cmp;
+	u64 tx_band;
+	u64 tx_band_div_ratio;
+	u64 hsclk;
+	u64 dec_start;
+	u64 frac_start;
+	u64 pll_divisor = 4 * HDMI_REF_CLOCK;
+	u64 cpctrl;
+	u64 rctrl;
+	u64 cctrl;
+	u64 integloop_gain;
+	u64 vco_tune;
+	u64 vco_freq;
+	u64 vco_range;
+	u64 rem;
+
+	/* FDATA, CLK_DIVTX, PIXEL_CLK, TMDS_CLK */
+	bclk = ((u64)pix_clk) * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
+
+	if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD)
+		tmds_clk = pix_clk >> 2;
+	else
+		tmds_clk = pix_clk;
+
+	vco_range = bclk < HDMI_282MHZ_BIT_CLK_HZ ? HDMI_2000MHZ_BIT_CLK_HZ :
+				HDMI_2250MHZ_BIT_CLK_HZ;
+
+	fdata = hdmi_8996_v2_get_fdata(bclk, vco_range);
+	if (fdata == HDMI_64B_ERR_VAL)
+		goto fail;
+
+	hsclk = hdmi_8996_v2_get_hsclk(fdata, vco_range);
+	if (hsclk == HDMI_64B_ERR_VAL)
+		goto fail;
+
+	if (bclk >= vco_range)
+		post_div = hdmi_8996_v2_get_post_div_gt_2g(hsclk);
+	else
+		post_div = hdmi_8996_v2_get_post_div_lt_2g(bclk, vco_range);
+
+	if (post_div == HDMI_64B_ERR_VAL)
+		goto fail;
+
+	core_clk_div = 5;
+	core_clk_div_ratio = core_clk_div * 2;
+
+	tx_band = hdmi_8996_v2_get_tx_band(bclk, vco_range);
+	if (tx_band == HDMI_64B_ERR_VAL)
+		goto fail;
+
+	tx_band_div_ratio = 1 << tx_band;
+
+	vco_freq = hdmi_8996_v2_get_vco_freq(bclk, vco_range);
+	clk_divtx = vco_freq;
+	do_div(clk_divtx, post_div);
+
+	/* Decimal and fraction values */
+	dec_start = fdata * post_div;
+	do_div(dec_start, pll_divisor);
+	frac_start = ((pll_divisor - (((dec_start + 1) * pll_divisor) -
+			(fdata * post_div))) * (1 << 20));
+	rem = do_div(frac_start, pll_divisor);
+	/* Round off frac_start to closest integer */
+	if (rem >= (pll_divisor >> 1))
+		frac_start++;
+
+	cpctrl = hdmi_8996_get_cpctrl(frac_start, false);
+	rctrl = hdmi_8996_get_rctrl(frac_start, false);
+	cctrl = hdmi_8996_get_cctrl(frac_start, false);
+	integloop_gain = hdmi_8996_get_integloop_gain(frac_start, false);
+	vco_tune = hdmi_8996_get_vco_tune(fdata, post_div);
+
+	core_clk = clk_divtx;
+	do_div(core_clk, core_clk_div_ratio);
+	pll_cmp = hdmi_8996_get_pll_cmp(1024, core_clk);
+
+	/* Debug dump */
+	DEV_DBG("%s: VCO freq: %llu\n", __func__, vco_freq);
+	DEV_DBG("%s: fdata: %llu\n", __func__, fdata);
+	DEV_DBG("%s: CLK_DIVTX: %llu\n", __func__, clk_divtx);
+	DEV_DBG("%s: pix_clk: %d\n", __func__, pix_clk);
+	DEV_DBG("%s: tmds clk: %llu\n", __func__, tmds_clk);
+	DEV_DBG("%s: HSCLK_SEL: %llu\n", __func__, hsclk);
+	DEV_DBG("%s: DEC_START: %llu\n", __func__, dec_start);
+	DEV_DBG("%s: DIV_FRAC_START: %llu\n", __func__, frac_start);
+	DEV_DBG("%s: PLL_CPCTRL: %llu\n", __func__, cpctrl);
+	DEV_DBG("%s: PLL_RCTRL: %llu\n", __func__, rctrl);
+	DEV_DBG("%s: PLL_CCTRL: %llu\n", __func__, cctrl);
+	DEV_DBG("%s: INTEGLOOP_GAIN: %llu\n", __func__, integloop_gain);
+	DEV_DBG("%s: VCO_TUNE: %llu\n", __func__, vco_tune);
+	DEV_DBG("%s: TX_BAND: %llu\n", __func__, tx_band);
+	DEV_DBG("%s: PLL_CMP: %llu\n", __func__, pll_cmp);
+
+	/* Convert these values to register specific values */
+	cfg->tx_l0_lane_mode = 0x3;
+	cfg->tx_l2_lane_mode = 0x3;
+	cfg->tx_l0_tx_band = tx_band + 4;
+	cfg->tx_l1_tx_band = tx_band + 4;
+	cfg->tx_l2_tx_band = tx_band + 4;
+	cfg->tx_l3_tx_band = tx_band + 4;
+
+	if (bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD)
+		cfg->com_svs_mode_clk_sel = 1;
+	else
+		cfg->com_svs_mode_clk_sel = 2;
+
+	cfg->com_hsclk_sel = (0x28 | hsclk);
+	cfg->com_pll_cctrl_mode0 = cctrl;
+	cfg->com_pll_rctrl_mode0 = rctrl;
+	cfg->com_cp_ctrl_mode0 = cpctrl;
+	cfg->com_dec_start_mode0 = dec_start;
+	cfg->com_div_frac_start1_mode0 = (frac_start & 0xFF);
+	cfg->com_div_frac_start2_mode0 = ((frac_start & 0xFF00) >> 8);
+	cfg->com_div_frac_start3_mode0 = ((frac_start & 0xF0000) >> 16);
+	cfg->com_integloop_gain0_mode0 = (integloop_gain & 0xFF);
+	cfg->com_integloop_gain1_mode0 = ((integloop_gain & 0xF00) >> 8);
+	cfg->com_lock_cmp1_mode0 = (pll_cmp & 0xFF);
+	cfg->com_lock_cmp2_mode0 = ((pll_cmp & 0xFF00) >> 8);
+	cfg->com_lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16);
+	cfg->com_core_clk_en = (0x6C | (HDMI_CLKS_PLL_DIVSEL << 4));
+	cfg->com_coreclk_div = HDMI_CORECLK_DIV;
+	cfg->com_vco_tune_ctrl = 0x0;
+
+	if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) {
+		cfg->tx_l0_tx_drv_lvl = 0x25;
+		cfg->tx_l0_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l1_tx_drv_lvl = 0x25;
+		cfg->tx_l1_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l2_tx_drv_lvl = 0x25;
+		cfg->tx_l2_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l3_tx_drv_lvl = 0x22;
+		cfg->tx_l3_tx_emp_post1_lvl = 0x27;
+		cfg->tx_l0_vmode_ctrl1 = 0x00;
+		cfg->tx_l0_vmode_ctrl2 = 0x0D;
+		cfg->tx_l1_vmode_ctrl1 = 0x00;
+		cfg->tx_l1_vmode_ctrl2 = 0x0D;
+		cfg->tx_l2_vmode_ctrl1 = 0x00;
+		cfg->tx_l2_vmode_ctrl2 = 0x0D;
+		cfg->tx_l3_vmode_ctrl1 = 0x00;
+		cfg->tx_l3_vmode_ctrl2 = 0x00;
+		cfg->tx_l0_res_code_lane_tx = 0x3F;
+		cfg->tx_l1_res_code_lane_tx = 0x3F;
+		cfg->tx_l2_res_code_lane_tx = 0x3F;
+		cfg->tx_l3_res_code_lane_tx = 0x3F;
+		cfg->com_restrim_ctrl = 0x0;
+	} else if (bclk > HDMI_MID_FREQ_BIT_CLK_THRESHOLD) {
+		cfg->tx_l0_tx_drv_lvl = 0x25;
+		cfg->tx_l0_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l1_tx_drv_lvl = 0x25;
+		cfg->tx_l1_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l2_tx_drv_lvl = 0x25;
+		cfg->tx_l2_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l3_tx_drv_lvl = 0x25;
+		cfg->tx_l3_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l0_vmode_ctrl1 = 0x00;
+		cfg->tx_l0_vmode_ctrl2 = 0x0D;
+		cfg->tx_l1_vmode_ctrl1 = 0x00;
+		cfg->tx_l1_vmode_ctrl2 = 0x0D;
+		cfg->tx_l2_vmode_ctrl1 = 0x00;
+		cfg->tx_l2_vmode_ctrl2 = 0x0D;
+		cfg->tx_l3_vmode_ctrl1 = 0x00;
+		cfg->tx_l3_vmode_ctrl2 = 0x00;
+		cfg->tx_l0_res_code_lane_tx = 0x39;
+		cfg->tx_l1_res_code_lane_tx = 0x39;
+		cfg->tx_l2_res_code_lane_tx = 0x39;
+		cfg->tx_l3_res_code_lane_tx = 0x39;
+		cfg->com_restrim_ctrl = 0x0;
+	} else {
+		cfg->tx_l0_tx_drv_lvl = 0x20;
+		cfg->tx_l0_tx_emp_post1_lvl = 0x20;
+		cfg->tx_l1_tx_drv_lvl = 0x20;
+		cfg->tx_l1_tx_emp_post1_lvl = 0x20;
+		cfg->tx_l2_tx_drv_lvl = 0x20;
+		cfg->tx_l2_tx_emp_post1_lvl = 0x20;
+		cfg->tx_l3_tx_drv_lvl = 0x20;
+		cfg->tx_l3_tx_emp_post1_lvl = 0x20;
+		cfg->tx_l0_vmode_ctrl1 = 0x00;
+		cfg->tx_l0_vmode_ctrl2 = 0x0E;
+		cfg->tx_l1_vmode_ctrl1 = 0x00;
+		cfg->tx_l1_vmode_ctrl2 = 0x0E;
+		cfg->tx_l2_vmode_ctrl1 = 0x00;
+		cfg->tx_l2_vmode_ctrl2 = 0x0E;
+		cfg->tx_l3_vmode_ctrl1 = 0x00;
+		cfg->tx_l3_vmode_ctrl2 = 0x0E;
+		cfg->tx_l0_res_code_lane_tx = 0x3F;
+		cfg->tx_l1_res_code_lane_tx = 0x3F;
+		cfg->tx_l2_res_code_lane_tx = 0x3F;
+		cfg->tx_l3_res_code_lane_tx = 0x3F;
+		cfg->com_restrim_ctrl = 0xD8;
+	}
+
+	cfg->phy_mode = (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) ? 0x10 : 0x0;
+	DEV_DBG("HDMI 8996 PLL: PLL Settings\n");
+	DEV_DBG("PLL PARAM: tx_l0_lane_mode = 0x%x\n", cfg->tx_l0_lane_mode);
+	DEV_DBG("PLL PARAM: tx_l2_lane_mode = 0x%x\n", cfg->tx_l2_lane_mode);
+	DEV_DBG("PLL PARAM: tx_l0_tx_band = 0x%x\n", cfg->tx_l0_tx_band);
+	DEV_DBG("PLL PARAM: tx_l1_tx_band = 0x%x\n", cfg->tx_l1_tx_band);
+	DEV_DBG("PLL PARAM: tx_l2_tx_band = 0x%x\n", cfg->tx_l2_tx_band);
+	DEV_DBG("PLL PARAM: tx_l3_tx_band = 0x%x\n", cfg->tx_l3_tx_band);
+	DEV_DBG("PLL PARAM: com_svs_mode_clk_sel = 0x%x\n",
+						cfg->com_svs_mode_clk_sel);
+	DEV_DBG("PLL PARAM: com_vco_tune_ctrl = 0x%x\n",
+						cfg->com_vco_tune_ctrl);
+	DEV_DBG("PLL PARAM: com_hsclk_sel = 0x%x\n", cfg->com_hsclk_sel);
+	DEV_DBG("PLL PARAM: com_lock_cmp_en = 0x%x\n", cfg->com_lock_cmp_en);
+	DEV_DBG("PLL PARAM: com_pll_cctrl_mode0 = 0x%x\n",
+						cfg->com_pll_cctrl_mode0);
+	DEV_DBG("PLL PARAM: com_pll_rctrl_mode0 = 0x%x\n",
+						cfg->com_pll_rctrl_mode0);
+	DEV_DBG("PLL PARAM: com_cp_ctrl_mode0 = 0x%x\n",
+						cfg->com_cp_ctrl_mode0);
+	DEV_DBG("PLL PARAM: com_dec_start_mode0 = 0x%x\n",
+						cfg->com_dec_start_mode0);
+	DEV_DBG("PLL PARAM: com_div_frac_start1_mode0 = 0x%x\n",
+						cfg->com_div_frac_start1_mode0);
+	DEV_DBG("PLL PARAM: com_div_frac_start2_mode0 = 0x%x\n",
+						cfg->com_div_frac_start2_mode0);
+	DEV_DBG("PLL PARAM: com_div_frac_start3_mode0 = 0x%x\n",
+						cfg->com_div_frac_start3_mode0);
+	DEV_DBG("PLL PARAM: com_integloop_gain0_mode0 = 0x%x\n",
+						cfg->com_integloop_gain0_mode0);
+	DEV_DBG("PLL PARAM: com_integloop_gain1_mode0 = 0x%x\n",
+						cfg->com_integloop_gain1_mode0);
+	DEV_DBG("PLL PARAM: com_lock_cmp1_mode0 = 0x%x\n",
+						cfg->com_lock_cmp1_mode0);
+	DEV_DBG("PLL PARAM: com_lock_cmp2_mode0 = 0x%x\n",
+						cfg->com_lock_cmp2_mode0);
+	DEV_DBG("PLL PARAM: com_lock_cmp3_mode0 = 0x%x\n",
+						cfg->com_lock_cmp3_mode0);
+	DEV_DBG("PLL PARAM: com_core_clk_en = 0x%x\n", cfg->com_core_clk_en);
+	DEV_DBG("PLL PARAM: com_coreclk_div = 0x%x\n", cfg->com_coreclk_div);
+
+	DEV_DBG("PLL PARAM: l0_tx_drv_lvl = 0x%x\n", cfg->tx_l0_tx_drv_lvl);
+	DEV_DBG("PLL PARAM: l0_tx_emp_post1_lvl = 0x%x\n",
+						cfg->tx_l0_tx_emp_post1_lvl);
+	DEV_DBG("PLL PARAM: l1_tx_drv_lvl = 0x%x\n", cfg->tx_l1_tx_drv_lvl);
+	DEV_DBG("PLL PARAM: l1_tx_emp_post1_lvl = 0x%x\n",
+						cfg->tx_l1_tx_emp_post1_lvl);
+	DEV_DBG("PLL PARAM: l2_tx_drv_lvl = 0x%x\n", cfg->tx_l2_tx_drv_lvl);
+	DEV_DBG("PLL PARAM: l2_tx_emp_post1_lvl = 0x%x\n",
+						cfg->tx_l2_tx_emp_post1_lvl);
+	DEV_DBG("PLL PARAM: l3_tx_drv_lvl = 0x%x\n", cfg->tx_l3_tx_drv_lvl);
+	DEV_DBG("PLL PARAM: l3_tx_emp_post1_lvl = 0x%x\n",
+						cfg->tx_l3_tx_emp_post1_lvl);
+
+	DEV_DBG("PLL PARAM: l0_vmode_ctrl1 = 0x%x\n", cfg->tx_l0_vmode_ctrl1);
+	DEV_DBG("PLL PARAM: l0_vmode_ctrl2 = 0x%x\n", cfg->tx_l0_vmode_ctrl2);
+	DEV_DBG("PLL PARAM: l1_vmode_ctrl1 = 0x%x\n", cfg->tx_l1_vmode_ctrl1);
+	DEV_DBG("PLL PARAM: l1_vmode_ctrl2 = 0x%x\n", cfg->tx_l1_vmode_ctrl2);
+	DEV_DBG("PLL PARAM: l2_vmode_ctrl1 = 0x%x\n", cfg->tx_l2_vmode_ctrl1);
+	DEV_DBG("PLL PARAM: l2_vmode_ctrl2 = 0x%x\n", cfg->tx_l2_vmode_ctrl2);
+	DEV_DBG("PLL PARAM: l3_vmode_ctrl1 = 0x%x\n", cfg->tx_l3_vmode_ctrl1);
+	DEV_DBG("PLL PARAM: l3_vmode_ctrl2 = 0x%x\n", cfg->tx_l3_vmode_ctrl2);
+	DEV_DBG("PLL PARAM: tx_l0_res_code_lane_tx = 0x%x\n",
+					cfg->tx_l0_res_code_lane_tx);
+	DEV_DBG("PLL PARAM: tx_l1_res_code_lane_tx = 0x%x\n",
+					cfg->tx_l1_res_code_lane_tx);
+	DEV_DBG("PLL PARAM: tx_l2_res_code_lane_tx = 0x%x\n",
+					cfg->tx_l2_res_code_lane_tx);
+	DEV_DBG("PLL PARAM: tx_l3_res_code_lane_tx = 0x%x\n",
+					cfg->tx_l3_res_code_lane_tx);
+	DEV_DBG("PLL PARAM: com_restrim_ctrl = 0x%x\n",	cfg->com_restrim_ctrl);
+
+	DEV_DBG("PLL PARAM: phy_mode = 0x%x\n", cfg->phy_mode);
+	rc = 0;
+fail:
+	return rc;
+}
+
+static int hdmi_8996_v3_calculate(u32 pix_clk,
+			       struct hdmi_8996_phy_pll_reg_cfg *cfg)
+{
+	int rc = -EINVAL;
+	struct hdmi_8996_v3_post_divider pd;
+	u64 fdata, tmds_clk;
+	u64 bclk;
+	u64 pll_cmp;
+	u64 tx_band;
+	u64 hsclk;
+	u64 dec_start;
+	u64 frac_start;
+	u64 pll_divisor = 4 * HDMI_REF_CLOCK;
+	u64 cpctrl;
+	u64 rctrl;
+	u64 cctrl;
+	u64 integloop_gain;
+	u64 vco_freq;
+	u64 rem;
+
+	/* FDATA, HSCLK, PIXEL_CLK, TMDS_CLK */
+	bclk = ((u64)pix_clk) * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
+
+	if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD)
+		tmds_clk = pix_clk >> 2;
+	else
+		tmds_clk = pix_clk;
+
+	if (hdmi_8996_v3_get_post_div(&pd, bclk) || pd.vco_ratio <= 0 ||
+			pd.vco_freq <= 0)
+		goto fail;
+
+	vco_freq = pd.vco_freq;
+	fdata = pd.vco_freq;
+	do_div(fdata, pd.vco_ratio);
+
+	hsclk = pd.hsclk_divsel;
+	dec_start = vco_freq;
+	do_div(dec_start, pll_divisor);
+
+	frac_start = vco_freq * (1 << 20);
+	rem = do_div(frac_start, pll_divisor);
+	frac_start -= dec_start * (1 << 20);
+	if (rem > (pll_divisor >> 1))
+		frac_start++;
+
+	cpctrl = hdmi_8996_get_cpctrl(frac_start, false);
+	rctrl = hdmi_8996_get_rctrl(frac_start, false);
+	cctrl = hdmi_8996_get_cctrl(frac_start, false);
+	integloop_gain = hdmi_8996_v3_get_integloop_gain(frac_start, bclk,
+									false);
+	pll_cmp = hdmi_8996_v3_get_pll_cmp(1024, fdata);
+	tx_band = pd.tx_band_sel;
+
+	/* Debug dump */
+	DEV_DBG("%s: VCO freq: %llu\n", __func__, vco_freq);
+	DEV_DBG("%s: fdata: %llu\n", __func__, fdata);
+	DEV_DBG("%s: pix_clk: %d\n", __func__, pix_clk);
+	DEV_DBG("%s: tmds clk: %llu\n", __func__, tmds_clk);
+	DEV_DBG("%s: HSCLK_SEL: %llu\n", __func__, hsclk);
+	DEV_DBG("%s: DEC_START: %llu\n", __func__, dec_start);
+	DEV_DBG("%s: DIV_FRAC_START: %llu\n", __func__, frac_start);
+	DEV_DBG("%s: PLL_CPCTRL: %llu\n", __func__, cpctrl);
+	DEV_DBG("%s: PLL_RCTRL: %llu\n", __func__, rctrl);
+	DEV_DBG("%s: PLL_CCTRL: %llu\n", __func__, cctrl);
+	DEV_DBG("%s: INTEGLOOP_GAIN: %llu\n", __func__, integloop_gain);
+	DEV_DBG("%s: TX_BAND: %llu\n", __func__, tx_band);
+	DEV_DBG("%s: PLL_CMP: %llu\n", __func__, pll_cmp);
+
+	/* Convert these values to register specific values */
+	cfg->tx_l0_tx_band = tx_band + 4;
+	cfg->tx_l1_tx_band = tx_band + 4;
+	cfg->tx_l2_tx_band = tx_band + 4;
+	cfg->tx_l3_tx_band = tx_band + 4;
+
+	if (bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD)
+		cfg->com_svs_mode_clk_sel = 1;
+	else
+		cfg->com_svs_mode_clk_sel = 2;
+
+	cfg->com_hsclk_sel = (0x20 | hsclk);
+	cfg->com_pll_cctrl_mode0 = cctrl;
+	cfg->com_pll_rctrl_mode0 = rctrl;
+	cfg->com_cp_ctrl_mode0 = cpctrl;
+	cfg->com_dec_start_mode0 = dec_start;
+	cfg->com_div_frac_start1_mode0 = (frac_start & 0xFF);
+	cfg->com_div_frac_start2_mode0 = ((frac_start & 0xFF00) >> 8);
+	cfg->com_div_frac_start3_mode0 = ((frac_start & 0xF0000) >> 16);
+	cfg->com_integloop_gain0_mode0 = (integloop_gain & 0xFF);
+	cfg->com_integloop_gain1_mode0 = ((integloop_gain & 0xF00) >> 8);
+	cfg->com_lock_cmp1_mode0 = (pll_cmp & 0xFF);
+	cfg->com_lock_cmp2_mode0 = ((pll_cmp & 0xFF00) >> 8);
+	cfg->com_lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16);
+	cfg->com_lock_cmp_en = 0x04;
+	cfg->com_core_clk_en = 0x2C;
+	cfg->com_coreclk_div = HDMI_CORECLK_DIV;
+	cfg->phy_mode = (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) ? 0x10 : 0x0;
+	cfg->com_vco_tune_ctrl = 0x0;
+
+	cfg->tx_l0_lane_mode = 0x43;
+	cfg->tx_l2_lane_mode = 0x43;
+
+	if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) {
+		cfg->tx_l0_tx_drv_lvl = 0x25;
+		cfg->tx_l0_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l1_tx_drv_lvl = 0x25;
+		cfg->tx_l1_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l2_tx_drv_lvl = 0x25;
+		cfg->tx_l2_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l3_tx_drv_lvl = 0x22;
+		cfg->tx_l3_tx_emp_post1_lvl = 0x27;
+		cfg->tx_l0_vmode_ctrl1 = 0x00;
+		cfg->tx_l0_vmode_ctrl2 = 0x0D;
+		cfg->tx_l1_vmode_ctrl1 = 0x00;
+		cfg->tx_l1_vmode_ctrl2 = 0x0D;
+		cfg->tx_l2_vmode_ctrl1 = 0x00;
+		cfg->tx_l2_vmode_ctrl2 = 0x0D;
+		cfg->tx_l3_vmode_ctrl1 = 0x00;
+		cfg->tx_l3_vmode_ctrl2 = 0x00;
+	} else if (bclk > HDMI_MID_FREQ_BIT_CLK_THRESHOLD) {
+		cfg->tx_l0_tx_drv_lvl = 0x25;
+		cfg->tx_l0_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l1_tx_drv_lvl = 0x25;
+		cfg->tx_l1_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l2_tx_drv_lvl = 0x25;
+		cfg->tx_l2_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l3_tx_drv_lvl = 0x25;
+		cfg->tx_l3_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l0_vmode_ctrl1 = 0x00;
+		cfg->tx_l0_vmode_ctrl2 = 0x0D;
+		cfg->tx_l1_vmode_ctrl1 = 0x00;
+		cfg->tx_l1_vmode_ctrl2 = 0x0D;
+		cfg->tx_l2_vmode_ctrl1 = 0x00;
+		cfg->tx_l2_vmode_ctrl2 = 0x0D;
+		cfg->tx_l3_vmode_ctrl1 = 0x00;
+		cfg->tx_l3_vmode_ctrl2 = 0x00;
+	} else {
+		cfg->tx_l0_tx_drv_lvl = 0x20;
+		cfg->tx_l0_tx_emp_post1_lvl = 0x20;
+		cfg->tx_l1_tx_drv_lvl = 0x20;
+		cfg->tx_l1_tx_emp_post1_lvl = 0x20;
+		cfg->tx_l2_tx_drv_lvl = 0x20;
+		cfg->tx_l2_tx_emp_post1_lvl = 0x20;
+		cfg->tx_l3_tx_drv_lvl = 0x20;
+		cfg->tx_l3_tx_emp_post1_lvl = 0x20;
+		cfg->tx_l0_vmode_ctrl1 = 0x00;
+		cfg->tx_l0_vmode_ctrl2 = 0x0E;
+		cfg->tx_l1_vmode_ctrl1 = 0x00;
+		cfg->tx_l1_vmode_ctrl2 = 0x0E;
+		cfg->tx_l2_vmode_ctrl1 = 0x00;
+		cfg->tx_l2_vmode_ctrl2 = 0x0E;
+		cfg->tx_l3_vmode_ctrl1 = 0x00;
+		cfg->tx_l3_vmode_ctrl2 = 0x0E;
+	}
+
+	DEV_DBG("HDMI 8996 PLL: PLL Settings\n");
+	DEV_DBG("PLL PARAM: tx_l0_tx_band = 0x%x\n", cfg->tx_l0_tx_band);
+	DEV_DBG("PLL PARAM: tx_l1_tx_band = 0x%x\n", cfg->tx_l1_tx_band);
+	DEV_DBG("PLL PARAM: tx_l2_tx_band = 0x%x\n", cfg->tx_l2_tx_band);
+	DEV_DBG("PLL PARAM: tx_l3_tx_band = 0x%x\n", cfg->tx_l3_tx_band);
+	DEV_DBG("PLL PARAM: com_svs_mode_clk_sel = 0x%x\n",
+						cfg->com_svs_mode_clk_sel);
+	DEV_DBG("PLL PARAM: com_hsclk_sel = 0x%x\n", cfg->com_hsclk_sel);
+	DEV_DBG("PLL PARAM: com_lock_cmp_en = 0x%x\n", cfg->com_lock_cmp_en);
+	DEV_DBG("PLL PARAM: com_pll_cctrl_mode0 = 0x%x\n",
+						cfg->com_pll_cctrl_mode0);
+	DEV_DBG("PLL PARAM: com_pll_rctrl_mode0 = 0x%x\n",
+						cfg->com_pll_rctrl_mode0);
+	DEV_DBG("PLL PARAM: com_cp_ctrl_mode0 = 0x%x\n",
+						cfg->com_cp_ctrl_mode0);
+	DEV_DBG("PLL PARAM: com_dec_start_mode0 = 0x%x\n",
+						cfg->com_dec_start_mode0);
+	DEV_DBG("PLL PARAM: com_div_frac_start1_mode0 = 0x%x\n",
+						cfg->com_div_frac_start1_mode0);
+	DEV_DBG("PLL PARAM: com_div_frac_start2_mode0 = 0x%x\n",
+						cfg->com_div_frac_start2_mode0);
+	DEV_DBG("PLL PARAM: com_div_frac_start3_mode0 = 0x%x\n",
+						cfg->com_div_frac_start3_mode0);
+	DEV_DBG("PLL PARAM: com_integloop_gain0_mode0 = 0x%x\n",
+						cfg->com_integloop_gain0_mode0);
+	DEV_DBG("PLL PARAM: com_integloop_gain1_mode0 = 0x%x\n",
+						cfg->com_integloop_gain1_mode0);
+	DEV_DBG("PLL PARAM: com_lock_cmp1_mode0 = 0x%x\n",
+						cfg->com_lock_cmp1_mode0);
+	DEV_DBG("PLL PARAM: com_lock_cmp2_mode0 = 0x%x\n",
+						cfg->com_lock_cmp2_mode0);
+	DEV_DBG("PLL PARAM: com_lock_cmp3_mode0 = 0x%x\n",
+						cfg->com_lock_cmp3_mode0);
+	DEV_DBG("PLL PARAM: com_core_clk_en = 0x%x\n", cfg->com_core_clk_en);
+	DEV_DBG("PLL PARAM: com_coreclk_div = 0x%x\n", cfg->com_coreclk_div);
+	DEV_DBG("PLL PARAM: phy_mode = 0x%x\n", cfg->phy_mode);
+
+	DEV_DBG("PLL PARAM: tx_l0_lane_mode = 0x%x\n", cfg->tx_l0_lane_mode);
+	DEV_DBG("PLL PARAM: tx_l2_lane_mode = 0x%x\n", cfg->tx_l2_lane_mode);
+	DEV_DBG("PLL PARAM: l0_tx_drv_lvl = 0x%x\n", cfg->tx_l0_tx_drv_lvl);
+	DEV_DBG("PLL PARAM: l0_tx_emp_post1_lvl = 0x%x\n",
+						cfg->tx_l0_tx_emp_post1_lvl);
+	DEV_DBG("PLL PARAM: l1_tx_drv_lvl = 0x%x\n", cfg->tx_l1_tx_drv_lvl);
+	DEV_DBG("PLL PARAM: l1_tx_emp_post1_lvl = 0x%x\n",
+						cfg->tx_l1_tx_emp_post1_lvl);
+	DEV_DBG("PLL PARAM: l2_tx_drv_lvl = 0x%x\n", cfg->tx_l2_tx_drv_lvl);
+	DEV_DBG("PLL PARAM: l2_tx_emp_post1_lvl = 0x%x\n",
+						cfg->tx_l2_tx_emp_post1_lvl);
+	DEV_DBG("PLL PARAM: l3_tx_drv_lvl = 0x%x\n", cfg->tx_l3_tx_drv_lvl);
+	DEV_DBG("PLL PARAM: l3_tx_emp_post1_lvl = 0x%x\n",
+						cfg->tx_l3_tx_emp_post1_lvl);
+
+	DEV_DBG("PLL PARAM: l0_vmode_ctrl1 = 0x%x\n", cfg->tx_l0_vmode_ctrl1);
+	DEV_DBG("PLL PARAM: l0_vmode_ctrl2 = 0x%x\n", cfg->tx_l0_vmode_ctrl2);
+	DEV_DBG("PLL PARAM: l1_vmode_ctrl1 = 0x%x\n", cfg->tx_l1_vmode_ctrl1);
+	DEV_DBG("PLL PARAM: l1_vmode_ctrl2 = 0x%x\n", cfg->tx_l1_vmode_ctrl2);
+	DEV_DBG("PLL PARAM: l2_vmode_ctrl1 = 0x%x\n", cfg->tx_l2_vmode_ctrl1);
+	DEV_DBG("PLL PARAM: l2_vmode_ctrl2 = 0x%x\n", cfg->tx_l2_vmode_ctrl2);
+	DEV_DBG("PLL PARAM: l3_vmode_ctrl1 = 0x%x\n", cfg->tx_l3_vmode_ctrl1);
+	DEV_DBG("PLL PARAM: l3_vmode_ctrl2 = 0x%x\n", cfg->tx_l3_vmode_ctrl2);
+	rc = 0;
+fail:
+	return rc;
+}
+
+static int hdmi_8996_calculate(u32 pix_clk,
+			       struct hdmi_8996_phy_pll_reg_cfg *cfg, u32 ver)
+{
+	switch (ver) {
+	case HDMI_VERSION_8996_V3:
+	case HDMI_VERSION_8996_V3_1_8:
+		return hdmi_8996_v3_calculate(pix_clk, cfg);
+	case HDMI_VERSION_8996_V2:
+		return hdmi_8996_v2_calculate(pix_clk, cfg);
+	default:
+		return hdmi_8996_v1_calculate(pix_clk, cfg);
+	}
+}
+
+static int hdmi_8996_phy_pll_set_clk_rate(struct clk *c, u32 tmds_clk, u32 ver)
+{
+	int rc = 0;
+	struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+	struct hdmi_8996_phy_pll_reg_cfg cfg = {0};
+
+	rc = hdmi_8996_calculate(tmds_clk, &cfg, ver);
+	if (rc) {
+		DEV_ERR("%s: PLL calculation failed\n", __func__);
+		return rc;
+	}
+
+	/* Initially shut down PHY */
+	DEV_DBG("%s: Disabling PHY\n", __func__);
+	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_PD_CTL, 0x0);
+	udelay(500);
+
+	/* Power up sequence */
+	switch (ver) {
+	case HDMI_VERSION_8996_V2:
+	case HDMI_VERSION_8996_V3:
+	case HDMI_VERSION_8996_V3_1_8:
+		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_BG_CTRL, 0x04);
+		break;
+	};
+
+	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_PD_CTL, 0x1);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RESETSM_CNTRL, 0x20);
+	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_TX0_TX1_LANE_CTL, 0x0F);
+	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_TX2_TX3_LANE_CTL, 0x0F);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+				     QSERDES_TX_L0_CLKBUF_ENABLE, 0x03);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+				     QSERDES_TX_L0_CLKBUF_ENABLE, 0x03);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+				     QSERDES_TX_L0_CLKBUF_ENABLE, 0x03);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+				     QSERDES_TX_L0_CLKBUF_ENABLE, 0x03);
+
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+		     QSERDES_TX_L0_LANE_MODE, cfg.tx_l0_lane_mode);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+		     QSERDES_TX_L0_LANE_MODE, cfg.tx_l2_lane_mode);
+
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+		     QSERDES_TX_L0_TX_BAND, cfg.tx_l0_tx_band);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+		     QSERDES_TX_L0_TX_BAND, cfg.tx_l1_tx_band);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+		     QSERDES_TX_L0_TX_BAND, cfg.tx_l2_tx_band);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+		     QSERDES_TX_L0_TX_BAND, cfg.tx_l3_tx_band);
+
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+			       QSERDES_TX_L0_RESET_TSYNC_EN, 0x03);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+			       QSERDES_TX_L0_RESET_TSYNC_EN, 0x03);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+			       QSERDES_TX_L0_RESET_TSYNC_EN, 0x03);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+			       QSERDES_TX_L0_RESET_TSYNC_EN, 0x03);
+
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1E);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x07);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SYSCLK_EN_SEL, 0x37);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SYS_CLK_CTRL, 0x02);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CLK_ENABLE1, 0x0E);
+	if (ver == HDMI_VERSION_8996_V1)
+		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_BG_CTRL, 0x06);
+
+	/* Bypass VCO calibration */
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SVS_MODE_CLK_SEL,
+					cfg.com_svs_mode_clk_sel);
+
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_BG_TRIM, 0x0F);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_IVCO, 0x0F);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VCO_TUNE_CTRL,
+			cfg.com_vco_tune_ctrl);
+
+	switch (ver) {
+	case HDMI_VERSION_8996_V1:
+		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SVS_MODE_CLK_SEL,
+					cfg.com_svs_mode_clk_sel);
+		break;
+	default:
+		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_BG_CTRL, 0x06);
+	}
+
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CLK_SELECT, 0x30);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_HSCLK_SEL,
+		       cfg.com_hsclk_sel);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP_EN,
+		       cfg.com_lock_cmp_en);
+
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_CCTRL_MODE0,
+		       cfg.com_pll_cctrl_mode0);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_RCTRL_MODE0,
+		       cfg.com_pll_rctrl_mode0);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CP_CTRL_MODE0,
+		       cfg.com_cp_ctrl_mode0);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DEC_START_MODE0,
+		       cfg.com_dec_start_mode0);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DIV_FRAC_START1_MODE0,
+		       cfg.com_div_frac_start1_mode0);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DIV_FRAC_START2_MODE0,
+		       cfg.com_div_frac_start2_mode0);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DIV_FRAC_START3_MODE0,
+		       cfg.com_div_frac_start3_mode0);
+
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_INTEGLOOP_GAIN0_MODE0,
+			cfg.com_integloop_gain0_mode0);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_INTEGLOOP_GAIN1_MODE0,
+			cfg.com_integloop_gain1_mode0);
+
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP1_MODE0,
+			cfg.com_lock_cmp1_mode0);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP2_MODE0,
+			cfg.com_lock_cmp2_mode0);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP3_MODE0,
+			cfg.com_lock_cmp3_mode0);
+
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VCO_TUNE_MAP, 0x00);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CORE_CLK_EN,
+		       cfg.com_core_clk_en);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CORECLK_DIV,
+		       cfg.com_coreclk_div);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CMN_CONFIG, 0x02);
+
+	if (ver == HDMI_VERSION_8996_V3 || ver == HDMI_VERSION_8996_V3_1_8)
+		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RESCODE_DIV_NUM, 0x15);
+
+	/* TX lanes setup (TX 0/1/2/3) */
+	if (ver == HDMI_VERSION_8996_V3_1_8) {
+		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+				QSERDES_TX_L0_TX_DRV_LVL,
+				0x00000023);
+	} else {
+		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+				QSERDES_TX_L0_TX_DRV_LVL,
+				cfg.tx_l0_tx_drv_lvl);
+	}
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+		       QSERDES_TX_L0_TX_EMP_POST1_LVL,
+		       cfg.tx_l0_tx_emp_post1_lvl);
+
+	if (ver == HDMI_VERSION_8996_V3_1_8) {
+		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+				QSERDES_TX_L0_TX_DRV_LVL,
+				0x00000023);
+	} else {
+		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+				QSERDES_TX_L0_TX_DRV_LVL,
+				cfg.tx_l1_tx_drv_lvl);
+	}
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+		       QSERDES_TX_L0_TX_EMP_POST1_LVL,
+		       cfg.tx_l1_tx_emp_post1_lvl);
+
+	if (ver == HDMI_VERSION_8996_V3_1_8) {
+		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+				QSERDES_TX_L0_TX_DRV_LVL,
+				0x00000023);
+	} else {
+		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+				QSERDES_TX_L0_TX_DRV_LVL,
+				cfg.tx_l2_tx_drv_lvl);
+	}
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+		       QSERDES_TX_L0_TX_EMP_POST1_LVL,
+		       cfg.tx_l2_tx_emp_post1_lvl);
+
+	if (ver == HDMI_VERSION_8996_V3_1_8) {
+		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+				QSERDES_TX_L0_TX_DRV_LVL,
+				0x00000020);
+	} else {
+		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+				QSERDES_TX_L0_TX_DRV_LVL,
+				cfg.tx_l3_tx_drv_lvl);
+	}
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+		       QSERDES_TX_L0_TX_EMP_POST1_LVL,
+		       cfg.tx_l3_tx_emp_post1_lvl);
+
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+		       QSERDES_TX_L0_VMODE_CTRL1,
+		       cfg.tx_l0_vmode_ctrl1);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+		       QSERDES_TX_L0_VMODE_CTRL2,
+		       cfg.tx_l0_vmode_ctrl2);
+
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+		       QSERDES_TX_L0_VMODE_CTRL1,
+		       cfg.tx_l1_vmode_ctrl1);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+		       QSERDES_TX_L0_VMODE_CTRL2,
+		       cfg.tx_l1_vmode_ctrl2);
+
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+		       QSERDES_TX_L0_VMODE_CTRL1,
+		       cfg.tx_l2_vmode_ctrl1);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+			QSERDES_TX_L0_VMODE_CTRL2,
+			cfg.tx_l2_vmode_ctrl2);
+
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+		       QSERDES_TX_L0_VMODE_CTRL1,
+		       cfg.tx_l3_vmode_ctrl1);
+	if (ver == HDMI_VERSION_8996_V3_1_8) {
+		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+				QSERDES_TX_L0_VMODE_CTRL2,
+				0x0000000D);
+	} else {
+		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+				QSERDES_TX_L0_VMODE_CTRL2,
+				cfg.tx_l3_vmode_ctrl2);
+	}
+
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+		       QSERDES_TX_L0_TX_DRV_LVL_OFFSET, 0x00);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+		       QSERDES_TX_L0_TX_DRV_LVL_OFFSET, 0x00);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+		       QSERDES_TX_L0_TX_DRV_LVL_OFFSET, 0x00);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+		       QSERDES_TX_L0_TX_DRV_LVL_OFFSET, 0x00);
+
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+		       QSERDES_TX_L0_RES_CODE_LANE_OFFSET, 0x00);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+		       QSERDES_TX_L0_RES_CODE_LANE_OFFSET, 0x00);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+		       QSERDES_TX_L0_RES_CODE_LANE_OFFSET, 0x00);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+		       QSERDES_TX_L0_RES_CODE_LANE_OFFSET, 0x00);
+
+	if (ver < HDMI_VERSION_8996_V3) {
+		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+			       QSERDES_TX_L0_RES_CODE_LANE_TX,
+			       cfg.tx_l0_res_code_lane_tx);
+		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+			       QSERDES_TX_L0_RES_CODE_LANE_TX,
+			       cfg.tx_l1_res_code_lane_tx);
+		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+			       QSERDES_TX_L0_RES_CODE_LANE_TX,
+			       cfg.tx_l2_res_code_lane_tx);
+		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+			       QSERDES_TX_L0_RES_CODE_LANE_TX,
+			       cfg.tx_l3_res_code_lane_tx);
+		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RESTRIM_CTRL,
+			       cfg.com_restrim_ctrl);
+
+		MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_TXCAL_CFG0, 0x00);
+		MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_TXCAL_CFG1, 0x05);
+	}
+
+	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_MODE, cfg.phy_mode);
+	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_PD_CTL, 0x1F);
+
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+			QSERDES_TX_L0_TRAN_DRVR_EMP_EN, 0x03);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+			QSERDES_TX_L0_TRAN_DRVR_EMP_EN, 0x03);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+			QSERDES_TX_L0_TRAN_DRVR_EMP_EN, 0x03);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+			QSERDES_TX_L0_TRAN_DRVR_EMP_EN, 0x03);
+
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+			QSERDES_TX_L0_PARRATE_REC_DETECT_IDLE_EN, 0x40);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+			QSERDES_TX_L0_PARRATE_REC_DETECT_IDLE_EN, 0x40);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+			QSERDES_TX_L0_PARRATE_REC_DETECT_IDLE_EN, 0x40);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+			QSERDES_TX_L0_PARRATE_REC_DETECT_IDLE_EN, 0x40);
+
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+		       QSERDES_TX_L0_HP_PD_ENABLES, 0x0C);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+		       QSERDES_TX_L0_HP_PD_ENABLES, 0x0C);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+		       QSERDES_TX_L0_HP_PD_ENABLES, 0x0C);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+		       QSERDES_TX_L0_HP_PD_ENABLES, 0x03);
+
+	if (ver == HDMI_VERSION_8996_V2) {
+		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_ATB_SEL1, 0x01);
+		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_ATB_SEL2, 0x01);
+	}
+	/*
+	 * Ensure that vco configuration gets flushed to hardware before
+	 * enabling the PLL
+	 */
+	wmb();
+	return 0;
+}
+
+static int hdmi_8996_phy_ready_status(struct mdss_pll_resources *io)
+{
+	u32 status = 0;
+	int phy_ready = 0;
+	int rc;
+	u32 read_count = 0;
+
+	rc = mdss_pll_resource_enable(io, true);
+	if (rc) {
+		DEV_ERR("%s: pll resource can't be enabled\n", __func__);
+		return rc;
+	}
+
+	DEV_DBG("%s: Waiting for PHY Ready\n", __func__);
+
+	/* Poll for PHY read status */
+	while (read_count < HDMI_PLL_POLL_MAX_READS) {
+		status = MDSS_PLL_REG_R(io->phy_base, HDMI_PHY_STATUS);
+		if ((status & BIT(0)) == 1) {
+			phy_ready = 1;
+			DEV_DBG("%s: PHY READY\n", __func__);
+			break;
+		}
+		udelay(HDMI_PLL_POLL_TIMEOUT_US);
+		read_count++;
+	}
+
+	if (read_count == HDMI_PLL_POLL_MAX_READS) {
+		phy_ready = 0;
+		DEV_DBG("%s: PHY READY TIMEOUT\n", __func__);
+	}
+
+	mdss_pll_resource_enable(io, false);
+
+	return phy_ready;
+}
+
+static int hdmi_8996_pll_lock_status(struct mdss_pll_resources *io)
+{
+	u32 status;
+	int pll_locked = 0;
+	int rc;
+	u32 read_count = 0;
+
+	rc = mdss_pll_resource_enable(io, true);
+	if (rc) {
+		DEV_ERR("%s: pll resource can't be enabled\n", __func__);
+		return rc;
+	}
+
+	DEV_DBG("%s: Waiting for PLL lock\n", __func__);
+
+	while (read_count < HDMI_PLL_POLL_MAX_READS) {
+		status = MDSS_PLL_REG_R(io->pll_base,
+				QSERDES_COM_C_READY_STATUS);
+		if ((status & BIT(0)) == 1) {
+			pll_locked = 1;
+			DEV_DBG("%s: C READY\n", __func__);
+			break;
+		}
+		udelay(HDMI_PLL_POLL_TIMEOUT_US);
+		read_count++;
+	}
+
+	if (read_count == HDMI_PLL_POLL_MAX_READS) {
+		pll_locked = 0;
+		DEV_DBG("%s: C READY TIMEOUT\n", __func__);
+	}
+
+	mdss_pll_resource_enable(io, false);
+
+	return pll_locked;
+}
+
+static int hdmi_8996_v1_perform_sw_calibration(struct clk *c)
+{
+	int rc = 0;
+	struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+
+	u32 max_code = 0x190;
+	u32 min_code = 0x0;
+	u32 max_cnt = 0;
+	u32 min_cnt = 0;
+	u32 expected_counter_value = 0;
+	u32 step = 0;
+	u32 dbus_all = 0;
+	u32 dbus_sel = 0;
+	u32 vco_code = 0;
+	u32 val = 0;
+
+	vco_code = 0xC8;
+
+	DEV_DBG("%s: Starting SW calibration with vco_code = %d\n", __func__,
+		 vco_code);
+
+	expected_counter_value =
+	   (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP3_MODE0) << 16) |
+	   (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP2_MODE0) << 8) |
+	   (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP1_MODE0));
+
+	DEV_DBG("%s: expected_counter_value = %d\n", __func__,
+		 expected_counter_value);
+
+	val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_CMN_MISC1);
+	val |= BIT(4);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CMN_MISC1, val);
+
+	val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_CMN_MISC1);
+	val |= BIT(3);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CMN_MISC1, val);
+
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DEBUG_BUS_SEL, 0x4);
+
+	val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP_CFG);
+	val |= BIT(1);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP_CFG, val);
+
+	udelay(60);
+
+	while (1) {
+		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VCO_TUNE1_MODE0,
+			       vco_code & 0xFF);
+		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VCO_TUNE2_MODE0,
+			       (vco_code >> 8) & 0x3);
+
+		udelay(20);
+
+		val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP_CFG);
+		val &= ~BIT(1);
+		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP_CFG, val);
+
+		udelay(60);
+
+		val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP_CFG);
+		val |= BIT(1);
+		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP_CFG, val);
+
+		udelay(60);
+
+		dbus_all =
+		  (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_DEBUG_BUS3) << 24) |
+		  (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_DEBUG_BUS2) << 16) |
+		  (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_DEBUG_BUS1) << 8) |
+		  (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_DEBUG_BUS0));
+
+		dbus_sel = (dbus_all >> 9) & 0x3FFFF;
+		DEV_DBG("%s: loop[%d], dbus_all = 0x%x, dbus_sel = 0x%x\n",
+			__func__, step, dbus_all, dbus_sel);
+		if (dbus_sel == 0)
+			DEV_ERR("%s: CHECK HDMI REF CLK\n", __func__);
+
+		if (dbus_sel == expected_counter_value) {
+			max_code = vco_code;
+			max_cnt = dbus_sel;
+			min_code = vco_code;
+			min_cnt = dbus_sel;
+		} else if (dbus_sel == 0) {
+			max_code = vco_code;
+			max_cnt = dbus_sel;
+			vco_code = (max_code + min_code)/2;
+		} else if (dbus_sel > expected_counter_value) {
+			min_code = vco_code;
+			min_cnt = dbus_sel;
+			vco_code = (max_code + min_code)/2;
+		} else if (dbus_sel < expected_counter_value) {
+			max_code = vco_code;
+			max_cnt = dbus_sel;
+			vco_code = (max_code + min_code)/2;
+		}
+
+		step++;
+
+		if ((vco_code == 0) || (vco_code == 0x3FF) || (step > 0x3FF)) {
+			DEV_ERR("%s: VCO tune code search failed\n", __func__);
+			rc = -ENOTSUPP;
+			break;
+		}
+		if ((max_code - min_code) <= 1) {
+			if ((max_code - min_code) == 1) {
+				if (abs((int)(max_cnt - expected_counter_value))
+				    < abs((int)(min_cnt - expected_counter_value
+					))) {
+					vco_code = max_code;
+				} else {
+					vco_code = min_code;
+				}
+			}
+			break;
+		}
+		DEV_DBG("%s: loop[%d], new vco_code = %d\n", __func__, step,
+			 vco_code);
+	}
+
+	DEV_DBG("%s: CALIB done. vco_code = %d\n", __func__, vco_code);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VCO_TUNE1_MODE0,
+		       vco_code & 0xFF);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VCO_TUNE2_MODE0,
+		       (vco_code >> 8) & 0x3);
+	val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP_CFG);
+	val &= ~BIT(1);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP_CFG, val);
+
+	val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_CMN_MISC1);
+	val |= BIT(4);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CMN_MISC1, val);
+
+	val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_CMN_MISC1);
+	val &= ~BIT(3);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CMN_MISC1, val);
+
+	return rc;
+}
+
+static int hdmi_8996_v2_perform_sw_calibration(struct clk *c)
+{
+	int rc = 0;
+	struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+	u32 vco_code1, vco_code2, integral_loop, ready_poll;
+	u32 read_count = 0;
+
+	while (read_count < (HDMI_PLL_POLL_MAX_READS << 1)) {
+		ready_poll = MDSS_PLL_REG_R(io->pll_base,
+				QSERDES_COM_C_READY_STATUS);
+		if ((ready_poll & BIT(0)) == 1) {
+			ready_poll = 1;
+			DEV_DBG("%s: C READY\n", __func__);
+			break;
+		}
+		udelay(HDMI_PLL_POLL_TIMEOUT_US);
+		read_count++;
+	}
+
+	if (read_count == (HDMI_PLL_POLL_MAX_READS << 1)) {
+		ready_poll = 0;
+		DEV_DBG("%s: C READY TIMEOUT, TRYING SW CALIBRATION\n",
+								__func__);
+	}
+
+	vco_code1 = MDSS_PLL_REG_R(io->pll_base,
+				QSERDES_COM_PLLCAL_CODE1_STATUS);
+	vco_code2 = MDSS_PLL_REG_R(io->pll_base,
+				QSERDES_COM_PLLCAL_CODE2_STATUS);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DEBUG_BUS_SEL, 0x5);
+	integral_loop = MDSS_PLL_REG_R(io->pll_base,
+				QSERDES_COM_DEBUG_BUS0);
+
+	if (((ready_poll & 0x1) == 0) || (((ready_poll & 1) == 1) &&
+			(vco_code1 == 0xFF) && ((vco_code2 & 0x3) == 0x1) &&
+			(integral_loop > 0xC0))) {
+		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_ATB_SEL1, 0x04);
+		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_ATB_SEL2, 0x00);
+		MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x17);
+		udelay(100);
+
+		MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x11);
+		udelay(100);
+
+		MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x19);
+	}
+	return rc;
+}
+
+static int hdmi_8996_perform_sw_calibration(struct clk *c, u32 ver)
+{
+	switch (ver) {
+	case HDMI_VERSION_8996_V1:
+		return hdmi_8996_v1_perform_sw_calibration(c);
+	case HDMI_VERSION_8996_V2:
+		return hdmi_8996_v2_perform_sw_calibration(c);
+	}
+	return 0;
+}
+
+static int hdmi_8996_vco_enable(struct clk *c, u32 ver)
+{
+	int rc = 0;
+	struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+
+	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x1);
+	udelay(100);
+
+	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x19);
+	udelay(100);
+
+	rc = hdmi_8996_perform_sw_calibration(c, ver);
+	if (rc) {
+		DEV_ERR("%s: software calibration failed\n", __func__);
+		return rc;
+	}
+
+	rc = hdmi_8996_pll_lock_status(io);
+	if (!rc) {
+		DEV_ERR("%s: PLL not locked\n", __func__);
+		return rc;
+	}
+
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+		       QSERDES_TX_L0_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN,
+		       0x6F);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+		       QSERDES_TX_L0_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN,
+		       0x6F);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+		       QSERDES_TX_L0_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN,
+		       0x6F);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+		       QSERDES_TX_L0_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN,
+		       0x6F);
+
+	/* Disable SSC */
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SSC_PER1, 0x0);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SSC_PER2, 0x0);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SSC_STEP_SIZE1, 0x0);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SSC_STEP_SIZE2, 0x0);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SSC_EN_CENTER, 0x2);
+
+	rc = hdmi_8996_phy_ready_status(io);
+	if (!rc) {
+		DEV_ERR("%s: PHY not READY\n", __func__);
+		return rc;
+	}
+
+	/* Restart the retiming buffer */
+	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x18);
+	udelay(1);
+	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x19);
+
+	io->pll_on = true;
+	return 0;
+}
+
+static int hdmi_8996_v1_vco_enable(struct clk *c)
+{
+	return hdmi_8996_vco_enable(c, HDMI_VERSION_8996_V1);
+}
+
+static int hdmi_8996_v2_vco_enable(struct clk *c)
+{
+	return hdmi_8996_vco_enable(c, HDMI_VERSION_8996_V2);
+}
+
+static int hdmi_8996_v3_vco_enable(struct clk *c)
+{
+	return hdmi_8996_vco_enable(c, HDMI_VERSION_8996_V3);
+}
+
+static int hdmi_8996_v3_1p8_vco_enable(struct clk *c)
+{
+	return hdmi_8996_vco_enable(c, HDMI_VERSION_8996_V3_1_8);
+}
+
+static int hdmi_8996_vco_get_lock_range(struct clk *c, unsigned long pixel_clk)
+{
+	u32 rng = 64, cmp_cnt = 1024;
+	u32 coreclk_div = 5, clks_pll_divsel = 2;
+	u32 vco_freq, vco_ratio, ppm_range;
+	u64 bclk;
+	struct hdmi_8996_v3_post_divider pd;
+
+	bclk = ((u64)pixel_clk) * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
+
+	DEV_DBG("%s: rate=%ld\n", __func__, pixel_clk);
+
+	if (hdmi_8996_v3_get_post_div(&pd, bclk) ||
+		pd.vco_ratio <= 0 || pd.vco_freq <= 0) {
+		DEV_ERR("%s: couldn't get post div\n", __func__);
+		return -EINVAL;
+	}
+
+	do_div(pd.vco_freq, HDMI_KHZ_TO_HZ * HDMI_KHZ_TO_HZ);
+
+	vco_freq  = (u32) pd.vco_freq;
+	vco_ratio = (u32) pd.vco_ratio;
+
+	DEV_DBG("%s: freq %d, ratio %d\n", __func__,
+		vco_freq, vco_ratio);
+
+	ppm_range = (rng * HDMI_REF_CLOCK) / cmp_cnt;
+	ppm_range /= vco_freq / vco_ratio;
+	ppm_range *= coreclk_div * clks_pll_divsel;
+
+	DEV_DBG("%s: ppm range: %d\n", __func__, ppm_range);
+
+	return ppm_range;
+}
+
+static int hdmi_8996_vco_rate_atomic_update(struct clk *c,
+	unsigned long rate, u32 ver)
+{
+	struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+	void __iomem *pll;
+	struct hdmi_8996_phy_pll_reg_cfg cfg = {0};
+	int rc = 0;
+
+	rc = hdmi_8996_calculate(rate, &cfg, ver);
+	if (rc) {
+		DEV_ERR("%s: PLL calculation failed\n", __func__);
+		goto end;
+	}
+
+	pll = io->pll_base;
+
+	MDSS_PLL_REG_W(pll, QSERDES_COM_DEC_START_MODE0,
+		       cfg.com_dec_start_mode0);
+	MDSS_PLL_REG_W(pll, QSERDES_COM_DIV_FRAC_START1_MODE0,
+		       cfg.com_div_frac_start1_mode0);
+	MDSS_PLL_REG_W(pll, QSERDES_COM_DIV_FRAC_START2_MODE0,
+		       cfg.com_div_frac_start2_mode0);
+	MDSS_PLL_REG_W(pll, QSERDES_COM_DIV_FRAC_START3_MODE0,
+		       cfg.com_div_frac_start3_mode0);
+
+	MDSS_PLL_REG_W(pll, QSERDES_COM_FREQ_UPDATE, 0x01);
+	MDSS_PLL_REG_W(pll, QSERDES_COM_FREQ_UPDATE, 0x00);
+
+	DEV_DBG("%s: updated to rate %ld\n", __func__, rate);
+end:
+	return rc;
+}
+
+static int hdmi_8996_vco_set_rate(struct clk *c, unsigned long rate, u32 ver)
+{
+	struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+	unsigned int set_power_dwn = 0;
+	bool atomic_update = false;
+	int rc, pll_lock_range;
+
+	rc = mdss_pll_resource_enable(io, true);
+	if (rc) {
+		DEV_ERR("pll resource can't be enabled\n");
+		return rc;
+	}
+
+	DEV_DBG("%s: rate %ld\n", __func__, rate);
+
+	if (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_C_READY_STATUS) & BIT(0) &&
+		MDSS_PLL_REG_R(io->phy_base, HDMI_PHY_STATUS) & BIT(0)) {
+		pll_lock_range = hdmi_8996_vco_get_lock_range(c, vco->rate);
+
+		if (pll_lock_range > 0 && vco->rate) {
+			u32 range_limit;
+
+			range_limit  = vco->rate *
+				(pll_lock_range / HDMI_KHZ_TO_HZ);
+			range_limit /= HDMI_KHZ_TO_HZ;
+
+			DEV_DBG("%s: range limit %d\n", __func__, range_limit);
+
+			if (abs(rate - vco->rate) < range_limit)
+				atomic_update = true;
+		}
+	}
+
+	if (io->pll_on && !atomic_update)
+		set_power_dwn = 1;
+
+	if (atomic_update) {
+		hdmi_8996_vco_rate_atomic_update(c, rate, ver);
+	} else {
+		rc = hdmi_8996_phy_pll_set_clk_rate(c, rate, ver);
+		if (rc)
+			DEV_ERR("%s: Failed to set clk rate\n", __func__);
+	}
+
+	mdss_pll_resource_enable(io, false);
+
+	if (set_power_dwn)
+		hdmi_8996_vco_enable(c, ver);
+
+	vco->rate = rate;
+	vco->rate_set = true;
+
+	return 0;
+}
+
+static int hdmi_8996_v1_vco_set_rate(struct clk *c, unsigned long rate)
+{
+	return hdmi_8996_vco_set_rate(c, rate, HDMI_VERSION_8996_V1);
+}
+
+static int hdmi_8996_v2_vco_set_rate(struct clk *c, unsigned long rate)
+{
+	return hdmi_8996_vco_set_rate(c, rate, HDMI_VERSION_8996_V2);
+}
+
+static int hdmi_8996_v3_vco_set_rate(struct clk *c, unsigned long rate)
+{
+	return hdmi_8996_vco_set_rate(c, rate, HDMI_VERSION_8996_V3);
+}
+
+static int hdmi_8996_v3_1p8_vco_set_rate(struct clk *c, unsigned long rate)
+{
+	return hdmi_8996_vco_set_rate(c, rate, HDMI_VERSION_8996_V3_1_8);
+}
+
+static unsigned long hdmi_get_hsclk_sel_divisor(unsigned long hsclk_sel)
+{
+	unsigned long divisor;
+
+	switch (hsclk_sel) {
+	case 0:
+		divisor = 2;
+		break;
+	case 1:
+		divisor = 6;
+		break;
+	case 2:
+		divisor = 10;
+		break;
+	case 3:
+		divisor = 14;
+		break;
+	case 4:
+		divisor = 3;
+		break;
+	case 5:
+		divisor = 9;
+		break;
+	case 6:
+	case 13:
+		divisor = 15;
+		break;
+	case 7:
+		divisor = 21;
+		break;
+	case 8:
+		divisor = 4;
+		break;
+	case 9:
+		divisor = 12;
+		break;
+	case 10:
+		divisor = 20;
+		break;
+	case 11:
+		divisor = 28;
+		break;
+	case 12:
+		divisor = 5;
+		break;
+	case 14:
+		divisor = 25;
+		break;
+	case 15:
+		divisor = 35;
+		break;
+	default:
+		divisor = 1;
+		DEV_ERR("%s: invalid hsclk_sel value = %lu",
+				__func__, hsclk_sel);
+		break;
+	}
+
+	return divisor;
+}
+
+static unsigned long hdmi_8996_vco_get_rate(struct clk *c)
+{
+	unsigned long freq = 0, hsclk_sel = 0, tx_band = 0, dec_start = 0,
+		      div_frac_start = 0, vco_clock_freq = 0;
+	struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+
+	if (mdss_pll_resource_enable(io, true)) {
+		DEV_ERR("%s: pll resource can't be enabled\n", __func__);
+		return freq;
+	}
+
+	dec_start = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_DEC_START_MODE0);
+
+	div_frac_start =
+		MDSS_PLL_REG_R(io->pll_base,
+				QSERDES_COM_DIV_FRAC_START1_MODE0) |
+		MDSS_PLL_REG_R(io->pll_base,
+				QSERDES_COM_DIV_FRAC_START2_MODE0) << 8 |
+		MDSS_PLL_REG_R(io->pll_base,
+				QSERDES_COM_DIV_FRAC_START3_MODE0) << 16;
+
+	vco_clock_freq = (dec_start + (div_frac_start / (1 << 20)))
+		* 4 * (HDMI_REF_CLOCK);
+
+	hsclk_sel = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_HSCLK_SEL) & 0x15;
+	hsclk_sel = hdmi_get_hsclk_sel_divisor(hsclk_sel);
+	tx_band = MDSS_PLL_REG_R(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+			QSERDES_TX_L0_TX_BAND) & 0x3;
+
+	freq = vco_clock_freq / (10 * hsclk_sel * (1 << tx_band));
+
+	mdss_pll_resource_enable(io, false);
+
+	DEV_DBG("%s: freq = %lu\n", __func__, freq);
+
+	return freq;
+}
+
+static long hdmi_8996_vco_round_rate(struct clk *c, unsigned long rate)
+{
+	unsigned long rrate = rate;
+
+	DEV_DBG("rrate=%ld\n", rrate);
+
+	return rrate;
+}
+
+static int hdmi_8996_vco_prepare(struct clk *c, u32 ver)
+{
+	struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+	int ret = 0;
+
+	DEV_DBG("rate=%ld\n", vco->rate);
+
+	if (!vco->rate_set && vco->rate)
+		ret = hdmi_8996_vco_set_rate(c, vco->rate, ver);
+
+	if (!ret) {
+		ret = mdss_pll_resource_enable(io, true);
+		if (ret)
+			DEV_ERR("pll resource can't be enabled\n");
+	}
+
+	return ret;
+}
+
+static int hdmi_8996_v1_vco_prepare(struct clk *c)
+{
+	return hdmi_8996_vco_prepare(c, HDMI_VERSION_8996_V1);
+}
+
+static int hdmi_8996_v2_vco_prepare(struct clk *c)
+{
+	return hdmi_8996_vco_prepare(c, HDMI_VERSION_8996_V2);
+}
+
+static int hdmi_8996_v3_vco_prepare(struct clk *c)
+{
+	return hdmi_8996_vco_prepare(c, HDMI_VERSION_8996_V3);
+}
+
+static int hdmi_8996_v3_1p8_vco_prepare(struct clk *c)
+{
+	return hdmi_8996_vco_prepare(c, HDMI_VERSION_8996_V3_1_8);
+}
+
+static void hdmi_8996_vco_unprepare(struct clk *c)
+{
+	struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+
+	vco->rate_set = false;
+
+	if (!io) {
+		DEV_ERR("Invalid input parameter\n");
+		return;
+	}
+
+	if (!io->pll_on &&
+		mdss_pll_resource_enable(io, true)) {
+		DEV_ERR("pll resource can't be enabled\n");
+		return;
+	}
+
+	io->handoff_resources = false;
+	mdss_pll_resource_enable(io, false);
+	io->pll_on = false;
+}
+
+static enum handoff hdmi_8996_vco_handoff(struct clk *c)
+{
+	enum handoff ret = HANDOFF_DISABLED_CLK;
+	struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+
+	if (is_gdsc_disabled(io))
+		return HANDOFF_DISABLED_CLK;
+
+	if (mdss_pll_resource_enable(io, true)) {
+		DEV_ERR("pll resource can't be enabled\n");
+		return ret;
+	}
+
+	io->handoff_resources = true;
+
+	if (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_C_READY_STATUS) & BIT(0)) {
+		if (MDSS_PLL_REG_R(io->phy_base, HDMI_PHY_STATUS) & BIT(0)) {
+			io->pll_on = true;
+			c->rate = hdmi_8996_vco_get_rate(c);
+			vco->rate = c->rate;
+			ret = HANDOFF_ENABLED_CLK;
+		} else {
+			io->handoff_resources = false;
+			mdss_pll_resource_enable(io, false);
+			DEV_DBG("%s: PHY not ready\n", __func__);
+		}
+	} else {
+		io->handoff_resources = false;
+		mdss_pll_resource_enable(io, false);
+		DEV_DBG("%s: PLL not locked\n", __func__);
+	}
+
+	DEV_DBG("done, ret=%d\n", ret);
+	return ret;
+}
+
+static struct clk_ops hdmi_8996_v1_vco_clk_ops = {
+	.enable = hdmi_8996_v1_vco_enable,
+	.set_rate = hdmi_8996_v1_vco_set_rate,
+	.get_rate = hdmi_8996_vco_get_rate,
+	.round_rate = hdmi_8996_vco_round_rate,
+	.prepare = hdmi_8996_v1_vco_prepare,
+	.unprepare = hdmi_8996_vco_unprepare,
+	.handoff = hdmi_8996_vco_handoff,
+};
+
+static struct clk_ops hdmi_8996_v2_vco_clk_ops = {
+	.enable = hdmi_8996_v2_vco_enable,
+	.set_rate = hdmi_8996_v2_vco_set_rate,
+	.get_rate = hdmi_8996_vco_get_rate,
+	.round_rate = hdmi_8996_vco_round_rate,
+	.prepare = hdmi_8996_v2_vco_prepare,
+	.unprepare = hdmi_8996_vco_unprepare,
+	.handoff = hdmi_8996_vco_handoff,
+};
+
+static struct clk_ops hdmi_8996_v3_vco_clk_ops = {
+	.enable = hdmi_8996_v3_vco_enable,
+	.set_rate = hdmi_8996_v3_vco_set_rate,
+	.get_rate = hdmi_8996_vco_get_rate,
+	.round_rate = hdmi_8996_vco_round_rate,
+	.prepare = hdmi_8996_v3_vco_prepare,
+	.unprepare = hdmi_8996_vco_unprepare,
+	.handoff = hdmi_8996_vco_handoff,
+};
+
+static struct clk_ops hdmi_8996_v3_1p8_vco_clk_ops = {
+	.enable = hdmi_8996_v3_1p8_vco_enable,
+	.set_rate = hdmi_8996_v3_1p8_vco_set_rate,
+	.get_rate = hdmi_8996_vco_get_rate,
+	.round_rate = hdmi_8996_vco_round_rate,
+	.prepare = hdmi_8996_v3_1p8_vco_prepare,
+	.unprepare = hdmi_8996_vco_unprepare,
+	.handoff = hdmi_8996_vco_handoff,
+};
+
+
+static struct hdmi_pll_vco_clk hdmi_vco_clk = {
+	.c = {
+		.dbg_name = "hdmi_8996_vco_clk",
+		.ops = &hdmi_8996_v1_vco_clk_ops,
+		CLK_INIT(hdmi_vco_clk.c),
+	},
+};
+
+static struct clk_lookup hdmipllcc_8996[] = {
+	CLK_LIST(hdmi_vco_clk),
+};
+
+int hdmi_8996_pll_clock_register(struct platform_device *pdev,
+				 struct mdss_pll_resources *pll_res, u32 ver)
+{
+	int rc = -ENOTSUPP;
+	if (!pll_res || !pll_res->phy_base || !pll_res->pll_base) {
+		DEV_ERR("%s: Invalid input parameters\n", __func__);
+		return -EPROBE_DEFER;
+	}
+
+	/* Set client data for vco, mux and div clocks */
+	hdmi_vco_clk.priv = pll_res;
+
+	switch (ver) {
+	case HDMI_VERSION_8996_V2:
+		hdmi_vco_clk.c.ops = &hdmi_8996_v2_vco_clk_ops;
+		break;
+	case HDMI_VERSION_8996_V3:
+		hdmi_vco_clk.c.ops = &hdmi_8996_v3_vco_clk_ops;
+		break;
+	case HDMI_VERSION_8996_V3_1_8:
+		hdmi_vco_clk.c.ops = &hdmi_8996_v3_1p8_vco_clk_ops;
+		break;
+	default:
+		hdmi_vco_clk.c.ops = &hdmi_8996_v1_vco_clk_ops;
+		break;
+	};
+
+	rc = of_msm_clock_register(pdev->dev.of_node, hdmipllcc_8996,
+					ARRAY_SIZE(hdmipllcc_8996));
+	if (rc) {
+		DEV_ERR("%s: Clock register failed rc=%d\n", __func__, rc);
+		rc = -EPROBE_DEFER;
+	} else {
+		DEV_DBG("%s SUCCESS\n", __func__);
+	}
+
+	return rc;
+}
+
+int hdmi_8996_v1_pll_clock_register(struct platform_device *pdev,
+				 struct mdss_pll_resources *pll_res)
+{
+	return hdmi_8996_pll_clock_register(pdev, pll_res,
+						HDMI_VERSION_8996_V1);
+}
+
+int hdmi_8996_v2_pll_clock_register(struct platform_device *pdev,
+				 struct mdss_pll_resources *pll_res)
+{
+	return hdmi_8996_pll_clock_register(pdev, pll_res,
+						HDMI_VERSION_8996_V2);
+}
+
+int hdmi_8996_v3_pll_clock_register(struct platform_device *pdev,
+				 struct mdss_pll_resources *pll_res)
+{
+	return hdmi_8996_pll_clock_register(pdev, pll_res,
+						HDMI_VERSION_8996_V3);
+}
+
+int hdmi_8996_v3_1p8_pll_clock_register(struct platform_device *pdev,
+				 struct mdss_pll_resources *pll_res)
+{
+	return hdmi_8996_pll_clock_register(pdev, pll_res,
+						HDMI_VERSION_8996_V3_1_8);
+}
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./mdss/mdss-hdmi-pll-8998.c linux-4.4.115-fbx/drivers/clk/msm/mdss/mdss-hdmi-pll-8998.c
--- linux-4.4.115-fbx/drivers/clk/msm./mdss/mdss-hdmi-pll-8998.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/mdss/mdss-hdmi-pll-8998.c	2019-01-22 16:16:23.023242060 +0100
@@ -0,0 +1,1054 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <dt-bindings/clock/msm-clocks-8998.h>
+#include <linux/math64.h>
+
+#include "mdss-pll.h"
+#include "mdss-hdmi-pll.h"
+
+#define _W(x, y, z) MDSS_PLL_REG_W(x, y, z)
+#define _R(x, y)    MDSS_PLL_REG_R(x, y)
+
+/* CONSTANTS */
+#define HDMI_VERSION_8998_3_3		1
+#define HDMI_VERSION_8998_1_8		2
+
+/* PLL REGISTERS */
+#define FREQ_UPDATE                  (0x008)
+#define BIAS_EN_CLKBUFLR_EN          (0x034)
+#define CLK_ENABLE1                  (0x038)
+#define SYS_CLK_CTRL                 (0x03C)
+#define SYSCLK_BUF_ENABLE            (0x040)
+#define PLL_IVCO                     (0x048)
+#define CP_CTRL_MODE0                (0x060)
+#define PLL_RCTRL_MODE0              (0x068)
+#define PLL_CCTRL_MODE0              (0x070)
+#define SYSCLK_EN_SEL                (0x080)
+#define RESETSM_CNTRL                (0x088)
+#define LOCK_CMP_EN                  (0x090)
+#define LOCK_CMP1_MODE0              (0x098)
+#define LOCK_CMP2_MODE0              (0x09C)
+#define LOCK_CMP3_MODE0              (0x0A0)
+#define DEC_START_MODE0              (0x0B0)
+#define DIV_FRAC_START1_MODE0        (0x0B8)
+#define DIV_FRAC_START2_MODE0        (0x0BC)
+#define DIV_FRAC_START3_MODE0        (0x0C0)
+#define INTEGLOOP_GAIN0_MODE0        (0x0D8)
+#define INTEGLOOP_GAIN1_MODE0        (0x0DC)
+#define VCO_TUNE_CTRL                (0x0EC)
+#define VCO_TUNE_MAP                 (0x0F0)
+#define CLK_SELECT                   (0x138)
+#define HSCLK_SEL                    (0x13C)
+#define CORECLK_DIV_MODE0            (0x148)
+#define CORE_CLK_EN                  (0x154)
+#define C_READY_STATUS               (0x158)
+#define SVS_MODE_CLK_SEL             (0x164)
+
+/* Tx Channel PHY registers */
+#define PHY_TX_EMP_POST1_LVL(n)              ((((n) * 0x200) + 0x400) + 0x000)
+#define PHY_TX_INTERFACE_SELECT_TX_BAND(n)   ((((n) * 0x200) + 0x400) + 0x008)
+#define PHY_TX_CLKBUF_TERM_ENABLE(n)         ((((n) * 0x200) + 0x400) + 0x00C)
+#define PHY_TX_DRV_LVL_RES_CODE_OFFSET(n)    ((((n) * 0x200) + 0x400) + 0x014)
+#define PHY_TX_DRV_LVL(n)                    ((((n) * 0x200) + 0x400) + 0x018)
+#define PHY_TX_LANE_CONFIG(n)                ((((n) * 0x200) + 0x400) + 0x01C)
+#define PHY_TX_PRE_DRIVER_1(n)               ((((n) * 0x200) + 0x400) + 0x024)
+#define PHY_TX_PRE_DRIVER_2(n)               ((((n) * 0x200) + 0x400) + 0x028)
+#define PHY_TX_LANE_MODE(n)                  ((((n) * 0x200) + 0x400) + 0x02C)
+
+/* HDMI PHY registers */
+#define PHY_CFG                      (0x00)
+#define PHY_PD_CTL                   (0x04)
+#define PHY_MODE                     (0x10)
+#define PHY_CLOCK                    (0x5C)
+#define PHY_CMN_CTRL                 (0x68)
+#define PHY_STATUS                   (0xB4)
+
+#define HDMI_VCO_MAX_FREQ			12000000000
+#define HDMI_VCO_MIN_FREQ			8000000000
+#define HDMI_BIT_CLK_TO_PIX_CLK_RATIO		10
+#define HDMI_MHZ_TO_HZ				1000000
+#define HDMI_HZ_TO_MHZ				1000000
+#define HDMI_KHZ_TO_HZ				1000
+#define HDMI_REF_CLOCK_MHZ			19.2
+#define HDMI_REF_CLOCK_HZ			(HDMI_REF_CLOCK_MHZ * 1000000)
+#define HDMI_VCO_MIN_RATE_HZ			25000000
+#define HDMI_VCO_MAX_RATE_HZ			600000000
+
+struct hdmi_8998_reg_cfg {
+	u32 tx_band;
+	u32 svs_mode_clk_sel;
+	u32 hsclk_sel;
+	u32 lock_cmp_en;
+	u32 cctrl_mode0;
+	u32 rctrl_mode0;
+	u32 cpctrl_mode0;
+	u32 dec_start_mode0;
+	u32 div_frac_start1_mode0;
+	u32 div_frac_start2_mode0;
+	u32 div_frac_start3_mode0;
+	u32 integloop_gain0_mode0;
+	u32 integloop_gain1_mode0;
+	u32 lock_cmp1_mode0;
+	u32 lock_cmp2_mode0;
+	u32 lock_cmp3_mode0;
+	u32 ssc_per1;
+	u32 ssc_per2;
+	u32 ssc_step_size1;
+	u32 ssc_step_size2;
+	u32 core_clk_en;
+	u32 coreclk_div_mode0;
+	u32 phy_mode;
+	u64 vco_freq;
+	u32 hsclk_divsel;
+	u32 vco_ratio;
+	u32 ssc_en_center;
+
+	u32 l0_tx_drv_lvl;
+	u32 l0_tx_emp_post1_lvl;
+	u32 l1_tx_drv_lvl;
+	u32 l1_tx_emp_post1_lvl;
+	u32 l2_tx_drv_lvl;
+	u32 l2_tx_emp_post1_lvl;
+	u32 l3_tx_drv_lvl;
+	u32 l3_tx_emp_post1_lvl;
+
+	u32 l0_pre_driver_1;
+	u32 l0_pre_driver_2;
+	u32 l1_pre_driver_1;
+	u32 l1_pre_driver_2;
+	u32 l2_pre_driver_1;
+	u32 l2_pre_driver_2;
+	u32 l3_pre_driver_1;
+	u32 l3_pre_driver_2;
+
+	u32 l0_res_code_offset;
+	u32 l1_res_code_offset;
+	u32 l2_res_code_offset;
+	u32 l3_res_code_offset;
+
+	bool debug;
+};
+
+static void hdmi_8998_get_div(struct hdmi_8998_reg_cfg *cfg, unsigned long pclk)
+{
+	u32 const ratio_list[] = {1, 2, 3, 4, 5, 6,
+				     9, 10, 12, 15, 25};
+	u32 const band_list[] = {0, 1, 2, 3};
+	u32 const sz_ratio = ARRAY_SIZE(ratio_list);
+	u32 const sz_band = ARRAY_SIZE(band_list);
+	u32 const cmp_cnt = 1024;
+	u32 const th_min = 500, th_max = 1000;
+	u32 half_rate_mode = 0;
+	u32 list_elements;
+	int optimal_index;
+	u32 i, j, k;
+	u32 found_hsclk_divsel = 0, found_vco_ratio;
+	u32 found_tx_band_sel;
+	u64 const min_freq = HDMI_VCO_MIN_FREQ, max_freq = HDMI_VCO_MAX_FREQ;
+	u64 const bit_clk = ((u64)pclk) * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
+	u64 freq_list[sz_ratio * sz_band];
+	u64 found_vco_freq;
+	u64 freq_optimal;
+
+find_optimal_index:
+	freq_optimal = max_freq;
+	optimal_index = -1;
+	list_elements = 0;
+
+	for (i = 0; i < sz_ratio; i++) {
+		for (j = 0; j < sz_band; j++) {
+			u64 freq = div_u64(bit_clk, (1 << half_rate_mode));
+
+			freq *= (ratio_list[i] * (1 << band_list[j]));
+			freq_list[list_elements++] = freq;
+		}
+	}
+
+	for (k = 0; k < ARRAY_SIZE(freq_list); k++) {
+		u32 const clks_pll_div = 2, core_clk_div = 5;
+		u32 const rng1 = 16, rng2 = 8;
+		u32 th1, th2;
+		u64 core_clk, rvar1, rem;
+
+		core_clk = (((freq_list[k] /
+			      ratio_list[k / sz_band]) /
+			      clks_pll_div) / core_clk_div);
+
+		rvar1 = HDMI_REF_CLOCK_HZ * rng1 * HDMI_MHZ_TO_HZ;
+		rvar1 = div64_u64_rem(rvar1, (cmp_cnt * core_clk), &rem);
+		if (rem > ((cmp_cnt * core_clk) >> 1))
+			rvar1++;
+		th1 = rvar1;
+
+		rvar1 = HDMI_REF_CLOCK_HZ * rng2 * HDMI_MHZ_TO_HZ;
+		rvar1 = div64_u64_rem(rvar1, (cmp_cnt * core_clk), &rem);
+		if (rem > ((cmp_cnt * core_clk) >> 1))
+			rvar1++;
+		th2 = rvar1;
+
+		if (freq_list[k] >= min_freq &&
+				freq_list[k] <= max_freq) {
+			if ((th1 >= th_min && th1 <= th_max) ||
+					(th2 >= th_min && th2 <= th_max)) {
+				if (freq_list[k] <= freq_optimal) {
+					freq_optimal = freq_list[k];
+					optimal_index = k;
+				}
+			}
+		}
+	}
+
+	if (optimal_index == -1) {
+		if (!half_rate_mode) {
+			half_rate_mode = 1;
+			goto find_optimal_index;
+		} else {
+			/* set to default values */
+			found_vco_freq = max_freq;
+			found_hsclk_divsel = 0;
+			found_vco_ratio = 2;
+			found_tx_band_sel = 0;
+			pr_err("Config error for pclk %ld\n", pclk);
+		}
+	} else {
+		found_vco_ratio = ratio_list[optimal_index / sz_band];
+		found_tx_band_sel = band_list[optimal_index % sz_band];
+		found_vco_freq = freq_optimal;
+	}
+
+	switch (found_vco_ratio) {
+	case 1:
+		found_hsclk_divsel = 15;
+		break;
+	case 2:
+		found_hsclk_divsel = 0;
+		break;
+	case 3:
+		found_hsclk_divsel = 4;
+		break;
+	case 4:
+		found_hsclk_divsel = 8;
+		break;
+	case 5:
+		found_hsclk_divsel = 12;
+		break;
+	case 6:
+		found_hsclk_divsel = 1;
+		break;
+	case 9:
+		found_hsclk_divsel = 5;
+		break;
+	case 10:
+		found_hsclk_divsel = 2;
+		break;
+	case 12:
+		found_hsclk_divsel = 9;
+		break;
+	case 15:
+		found_hsclk_divsel = 13;
+		break;
+	case 25:
+		found_hsclk_divsel = 14;
+		break;
+	};
+
+	pr_debug("found_vco_freq=%llu\n", found_vco_freq);
+	pr_debug("found_hsclk_divsel=%d\n", found_hsclk_divsel);
+	pr_debug("found_vco_ratio=%d\n", found_vco_ratio);
+	pr_debug("found_tx_band_sel=%d\n", found_tx_band_sel);
+	pr_debug("half_rate_mode=%d\n", half_rate_mode);
+	pr_debug("optimal_index=%d\n", optimal_index);
+
+	cfg->vco_freq = found_vco_freq;
+	cfg->hsclk_divsel = found_hsclk_divsel;
+	cfg->vco_ratio = found_vco_ratio;
+	cfg->tx_band = found_tx_band_sel;
+}
+
+static int hdmi_8998_config_phy(unsigned long rate,
+		struct hdmi_8998_reg_cfg *cfg, u32 ver)
+{
+	u64 const high_freq_bit_clk_threshold = 3400000000UL;
+	u64 const dig_freq_bit_clk_threshold = 1500000000UL;
+	u64 const mid_freq_bit_clk_threshold = 750000000;
+	int rc = 0;
+	u64 fdata, tmds_clk;
+	u32 pll_div = 4 * HDMI_REF_CLOCK_HZ;
+	u64 bclk;
+	u64 vco_freq;
+	u64 hsclk_sel, dec_start, div_frac_start;
+	u64 rem;
+	u64 cpctrl, rctrl, cctrl;
+	u64 integloop_gain;
+	u32 digclk_divsel;
+	u32 tmds_bclk_ratio;
+	u64 cmp_rng, cmp_cnt = 1024, pll_cmp;
+	bool gen_ssc = false;
+
+	bclk = rate * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
+
+	if (bclk > high_freq_bit_clk_threshold) {
+		tmds_clk = rate / 4;
+		tmds_bclk_ratio = 1;
+	} else {
+		tmds_clk = rate;
+		tmds_bclk_ratio = 0;
+	}
+
+	hdmi_8998_get_div(cfg, rate);
+
+	vco_freq = cfg->vco_freq;
+	fdata = cfg->vco_freq;
+	do_div(fdata, cfg->vco_ratio);
+
+	hsclk_sel = cfg->hsclk_divsel;
+	dec_start = vco_freq;
+	do_div(dec_start, pll_div);
+
+	div_frac_start = vco_freq * (1 << 20);
+	rem = do_div(div_frac_start, pll_div);
+	div_frac_start -= (dec_start * (1 << 20));
+	if (rem > (pll_div >> 1))
+		div_frac_start++;
+
+	if ((div_frac_start != 0) || (gen_ssc == true)) {
+		cpctrl = 0x8;
+		rctrl = 0x16;
+		cctrl = 0x34;
+	} else {
+		cpctrl = 0x30;
+		rctrl = 0x18;
+		cctrl = 0x2;
+	}
+
+	digclk_divsel = (bclk > dig_freq_bit_clk_threshold) ? 0x1 : 0x2;
+
+	integloop_gain = ((div_frac_start != 0) ||
+			(gen_ssc == true)) ? 0x3F : 0xC4;
+	integloop_gain <<= (digclk_divsel == 2 ? 1 : 0);
+	integloop_gain = (integloop_gain <= 2046 ? integloop_gain : 0x7FE);
+
+	cmp_rng = gen_ssc ? 0x40 : 0x10;
+
+	pll_cmp = cmp_cnt * fdata;
+	rem = do_div(pll_cmp, (u32)(HDMI_REF_CLOCK_HZ * 10));
+	if (rem > ((u64)(HDMI_REF_CLOCK_HZ * 10) >> 1))
+		pll_cmp++;
+
+	pll_cmp =  pll_cmp - 1;
+
+	pr_debug("VCO_FREQ = %llu\n", cfg->vco_freq);
+	pr_debug("FDATA = %llu\n", fdata);
+	pr_debug("DEC_START = %llu\n", dec_start);
+	pr_debug("DIV_FRAC_START = %llu\n", div_frac_start);
+	pr_debug("CPCTRL = %llu\n", cpctrl);
+	pr_debug("RCTRL = %llu\n", rctrl);
+	pr_debug("CCTRL = %llu\n", cctrl);
+	pr_debug("DIGCLK_DIVSEL = %u\n", digclk_divsel);
+	pr_debug("INTEGLOOP_GAIN = %llu\n", integloop_gain);
+	pr_debug("CMP_RNG = %llu\n", cmp_rng);
+	pr_debug("PLL_CMP = %llu\n", pll_cmp);
+	pr_debug("VER=%d\n", ver);
+
+	cfg->svs_mode_clk_sel = (digclk_divsel & 0xFF);
+	cfg->hsclk_sel = (0x20 | hsclk_sel);
+	cfg->lock_cmp_en = (gen_ssc ? 0x4 : 0x0);
+	cfg->cctrl_mode0 = (cctrl & 0xFF);
+	cfg->rctrl_mode0 = (rctrl & 0xFF);
+	cfg->cpctrl_mode0 = (cpctrl & 0xFF);
+	cfg->dec_start_mode0 = (dec_start & 0xFF);
+	cfg->div_frac_start1_mode0 = (div_frac_start & 0xFF);
+	cfg->div_frac_start2_mode0 = ((div_frac_start & 0xFF00) >> 8);
+	cfg->div_frac_start3_mode0 = ((div_frac_start & 0xF0000) >> 16);
+	cfg->integloop_gain0_mode0 = (integloop_gain & 0xFF);
+	cfg->integloop_gain1_mode0 = (integloop_gain & 0xF00) >> 8;
+	cfg->lock_cmp1_mode0 = (pll_cmp & 0xFF);
+	cfg->lock_cmp2_mode0 = ((pll_cmp & 0xFF00) >> 8);
+	cfg->lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16);
+	cfg->ssc_per1 = 0;
+	cfg->ssc_per2 = 0;
+	cfg->ssc_step_size1 = 0;
+	cfg->ssc_step_size2 = 0;
+	cfg->core_clk_en = 0x2C;
+	cfg->coreclk_div_mode0 = 0x5;
+	cfg->phy_mode = (tmds_bclk_ratio ? 0x5 : 0x4);
+	/* V1P8_SEL */
+	if (ver == HDMI_VERSION_8998_1_8)
+		cfg->phy_mode |= 1 << 4;
+	cfg->ssc_en_center = 0x0;
+
+	if (ver == HDMI_VERSION_8998_3_3) {
+		if (bclk > high_freq_bit_clk_threshold) {
+			cfg->l0_tx_drv_lvl = 0xf;
+			cfg->l0_tx_emp_post1_lvl = 0x3;
+			cfg->l1_tx_drv_lvl = 0xf;
+			cfg->l1_tx_emp_post1_lvl = 0x2;
+			cfg->l2_tx_drv_lvl = 0xf;
+			cfg->l2_tx_emp_post1_lvl = 0x3;
+			cfg->l3_tx_drv_lvl = 0xf;
+			cfg->l3_tx_emp_post1_lvl = 0x0;
+			cfg->l0_pre_driver_1 = 0x0;
+			cfg->l0_pre_driver_2 = 0x1C;
+			cfg->l1_pre_driver_1 = 0x0;
+			cfg->l1_pre_driver_2 = 0x1C;
+			cfg->l2_pre_driver_1 = 0x0;
+			cfg->l2_pre_driver_2 = 0x1C;
+			cfg->l3_pre_driver_1 = 0x0;
+			cfg->l3_pre_driver_2 = 0x0;
+			cfg->l0_res_code_offset = 0x3;
+			cfg->l1_res_code_offset = 0x0;
+			cfg->l2_res_code_offset = 0x0;
+			cfg->l3_res_code_offset = 0x3;
+		} else if (bclk > dig_freq_bit_clk_threshold) {
+			cfg->l0_tx_drv_lvl = 0xf;
+			cfg->l0_tx_emp_post1_lvl = 0x3;
+			cfg->l1_tx_drv_lvl = 0xf;
+			cfg->l1_tx_emp_post1_lvl = 0x3;
+			cfg->l2_tx_drv_lvl = 0xf;
+			cfg->l2_tx_emp_post1_lvl = 0x3;
+			cfg->l3_tx_drv_lvl = 0xf;
+			cfg->l3_tx_emp_post1_lvl = 0x0;
+			cfg->l0_pre_driver_1 = 0x0;
+			cfg->l0_pre_driver_2 = 0x16;
+			cfg->l1_pre_driver_1 = 0x0;
+			cfg->l1_pre_driver_2 = 0x16;
+			cfg->l2_pre_driver_1 = 0x0;
+			cfg->l2_pre_driver_2 = 0x16;
+			cfg->l3_pre_driver_1 = 0x0;
+			cfg->l3_pre_driver_2 = 0x18;
+			cfg->l0_res_code_offset = 0x3;
+			cfg->l1_res_code_offset = 0x0;
+			cfg->l2_res_code_offset = 0x0;
+			cfg->l3_res_code_offset = 0x0;
+		} else if (bclk > mid_freq_bit_clk_threshold) {
+			cfg->l0_tx_drv_lvl = 0xf;
+			cfg->l0_tx_emp_post1_lvl = 0x5;
+			cfg->l1_tx_drv_lvl = 0xf;
+			cfg->l1_tx_emp_post1_lvl = 0x5;
+			cfg->l2_tx_drv_lvl = 0xf;
+			cfg->l2_tx_emp_post1_lvl = 0x5;
+			cfg->l3_tx_drv_lvl = 0xf;
+			cfg->l3_tx_emp_post1_lvl = 0x0;
+			cfg->l0_pre_driver_1 = 0x0;
+			cfg->l0_pre_driver_2 = 0x0E;
+			cfg->l1_pre_driver_1 = 0x0;
+			cfg->l1_pre_driver_2 = 0x0E;
+			cfg->l2_pre_driver_1 = 0x0;
+			cfg->l2_pre_driver_2 = 0x0E;
+			cfg->l3_pre_driver_1 = 0x0;
+			cfg->l3_pre_driver_2 = 0x0E;
+			cfg->l0_res_code_offset = 0x0;
+			cfg->l1_res_code_offset = 0x0;
+			cfg->l2_res_code_offset = 0x0;
+			cfg->l3_res_code_offset = 0x0;
+		} else {
+			cfg->l0_tx_drv_lvl = 0x1;
+			cfg->l0_tx_emp_post1_lvl = 0x0;
+			cfg->l1_tx_drv_lvl = 0x1;
+			cfg->l1_tx_emp_post1_lvl = 0x0;
+			cfg->l2_tx_drv_lvl = 0x1;
+			cfg->l2_tx_emp_post1_lvl = 0x0;
+			cfg->l3_tx_drv_lvl = 0x0;
+			cfg->l3_tx_emp_post1_lvl = 0x0;
+			cfg->l0_pre_driver_1 = 0x0;
+			cfg->l0_pre_driver_2 = 0x16;
+			cfg->l1_pre_driver_1 = 0x0;
+			cfg->l1_pre_driver_2 = 0x16;
+			cfg->l2_pre_driver_1 = 0x0;
+			cfg->l2_pre_driver_2 = 0x16;
+			cfg->l3_pre_driver_1 = 0x0;
+			cfg->l3_pre_driver_2 = 0x18;
+			cfg->l0_res_code_offset = 0x0;
+			cfg->l1_res_code_offset = 0x0;
+			cfg->l2_res_code_offset = 0x0;
+			cfg->l3_res_code_offset = 0x0;
+		}
+	} else {
+		cfg->l0_tx_drv_lvl = 0xF;
+		cfg->l0_tx_emp_post1_lvl = 0x5;
+		cfg->l1_tx_drv_lvl = 0xF;
+		cfg->l1_tx_emp_post1_lvl = 0x2;
+		cfg->l2_tx_drv_lvl = 0xF;
+		cfg->l2_tx_emp_post1_lvl = 0x2;
+		cfg->l3_tx_drv_lvl = 0xF;
+		cfg->l3_tx_emp_post1_lvl = 0x0;
+		cfg->l0_pre_driver_1 = 0x0;
+		cfg->l0_pre_driver_2 = 0x1E;
+		cfg->l1_pre_driver_1 = 0x0;
+		cfg->l1_pre_driver_2 = 0x1E;
+		cfg->l2_pre_driver_1 = 0x0;
+		cfg->l2_pre_driver_2 = 0x1E;
+		cfg->l3_pre_driver_1 = 0x0;
+		cfg->l3_pre_driver_2 = 0x10;
+		cfg->l0_res_code_offset = 0x3;
+		cfg->l1_res_code_offset = 0x0;
+		cfg->l2_res_code_offset = 0x0;
+		cfg->l3_res_code_offset = 0x3;
+	}
+
+	return rc;
+}
+
+static int hdmi_8998_pll_set_clk_rate(struct clk *c, unsigned long rate,
+					u32 ver)
+{
+	int rc = 0;
+	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+	struct hdmi_8998_reg_cfg cfg = {0};
+	void __iomem *phy = io->phy_base, *pll = io->pll_base;
+
+	rc = hdmi_8998_config_phy(rate, &cfg, ver);
+	if (rc) {
+		pr_err("rate calculation failed\n, rc=%d", rc);
+		return rc;
+	}
+
+	_W(phy, PHY_PD_CTL, 0x0);
+	udelay(500);
+
+	_W(phy, PHY_PD_CTL, 0x1);
+	_W(pll, RESETSM_CNTRL, 0x20);
+	_W(phy, PHY_CMN_CTRL, 0x6);
+	_W(pll, PHY_TX_INTERFACE_SELECT_TX_BAND(0), cfg.tx_band);
+	_W(pll, PHY_TX_INTERFACE_SELECT_TX_BAND(1), cfg.tx_band);
+	_W(pll, PHY_TX_INTERFACE_SELECT_TX_BAND(2), cfg.tx_band);
+	_W(pll, PHY_TX_INTERFACE_SELECT_TX_BAND(3), cfg.tx_band);
+	_W(pll, PHY_TX_CLKBUF_TERM_ENABLE(0), 0x1);
+	_W(pll, PHY_TX_LANE_MODE(0), 0x20);
+	_W(pll, PHY_TX_LANE_MODE(1), 0x20);
+	_W(pll, PHY_TX_LANE_MODE(2), 0x20);
+	_W(pll, PHY_TX_LANE_MODE(3), 0x20);
+	_W(pll, PHY_TX_CLKBUF_TERM_ENABLE(1), 0x1);
+	_W(pll, PHY_TX_CLKBUF_TERM_ENABLE(2), 0x1);
+	_W(pll, PHY_TX_CLKBUF_TERM_ENABLE(3), 0x1);
+	_W(pll, SYSCLK_BUF_ENABLE, 0x2);
+	_W(pll, BIAS_EN_CLKBUFLR_EN, 0xB);
+	_W(pll, SYSCLK_EN_SEL, 0x37);
+	_W(pll, SYS_CLK_CTRL, 0x2);
+	_W(pll, CLK_ENABLE1, 0xE);
+	_W(pll, PLL_IVCO, 0x7);
+	_W(pll, VCO_TUNE_CTRL, 0x0);
+	_W(pll, SVS_MODE_CLK_SEL, cfg.svs_mode_clk_sel);
+	_W(pll, CLK_SELECT, 0x30);
+	_W(pll, HSCLK_SEL, cfg.hsclk_sel);
+	_W(pll, LOCK_CMP_EN, cfg.lock_cmp_en);
+	_W(pll, PLL_CCTRL_MODE0, cfg.cctrl_mode0);
+	_W(pll, PLL_RCTRL_MODE0, cfg.rctrl_mode0);
+	_W(pll, CP_CTRL_MODE0, cfg.cpctrl_mode0);
+	_W(pll, DEC_START_MODE0, cfg.dec_start_mode0);
+	_W(pll, DIV_FRAC_START1_MODE0, cfg.div_frac_start1_mode0);
+	_W(pll, DIV_FRAC_START2_MODE0, cfg.div_frac_start2_mode0);
+	_W(pll, DIV_FRAC_START3_MODE0, cfg.div_frac_start3_mode0);
+	_W(pll, INTEGLOOP_GAIN0_MODE0, cfg.integloop_gain0_mode0);
+	_W(pll, INTEGLOOP_GAIN1_MODE0, cfg.integloop_gain1_mode0);
+	_W(pll, LOCK_CMP1_MODE0, cfg.lock_cmp1_mode0);
+	_W(pll, LOCK_CMP2_MODE0, cfg.lock_cmp2_mode0);
+	_W(pll, LOCK_CMP3_MODE0, cfg.lock_cmp3_mode0);
+	_W(pll, VCO_TUNE_MAP, 0x0);
+	_W(pll, CORE_CLK_EN, cfg.core_clk_en);
+	_W(pll, CORECLK_DIV_MODE0, cfg.coreclk_div_mode0);
+
+	_W(pll, PHY_TX_DRV_LVL(0), cfg.l0_tx_drv_lvl);
+	_W(pll, PHY_TX_DRV_LVL(1), cfg.l1_tx_drv_lvl);
+	_W(pll, PHY_TX_DRV_LVL(2), cfg.l2_tx_drv_lvl);
+	_W(pll, PHY_TX_DRV_LVL(3), cfg.l3_tx_drv_lvl);
+
+	_W(pll, PHY_TX_EMP_POST1_LVL(0), cfg.l0_tx_emp_post1_lvl);
+	_W(pll, PHY_TX_EMP_POST1_LVL(1), cfg.l1_tx_emp_post1_lvl);
+	_W(pll, PHY_TX_EMP_POST1_LVL(2), cfg.l2_tx_emp_post1_lvl);
+	_W(pll, PHY_TX_EMP_POST1_LVL(3), cfg.l3_tx_emp_post1_lvl);
+
+	_W(pll, PHY_TX_PRE_DRIVER_1(0), cfg.l0_pre_driver_1);
+	_W(pll, PHY_TX_PRE_DRIVER_1(1), cfg.l1_pre_driver_1);
+	_W(pll, PHY_TX_PRE_DRIVER_1(2), cfg.l2_pre_driver_1);
+	_W(pll, PHY_TX_PRE_DRIVER_1(3), cfg.l3_pre_driver_1);
+
+	_W(pll, PHY_TX_PRE_DRIVER_2(0), cfg.l0_pre_driver_2);
+	_W(pll, PHY_TX_PRE_DRIVER_2(1), cfg.l1_pre_driver_2);
+	_W(pll, PHY_TX_PRE_DRIVER_2(2), cfg.l2_pre_driver_2);
+	_W(pll, PHY_TX_PRE_DRIVER_2(3), cfg.l3_pre_driver_2);
+
+	_W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(0), cfg.l0_res_code_offset);
+	_W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(1), cfg.l1_res_code_offset);
+	_W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(2), cfg.l2_res_code_offset);
+	_W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(3), cfg.l3_res_code_offset);
+
+	_W(phy, PHY_MODE, cfg.phy_mode);
+
+	_W(pll, PHY_TX_LANE_CONFIG(0), 0x10);
+	_W(pll, PHY_TX_LANE_CONFIG(1), 0x10);
+	_W(pll, PHY_TX_LANE_CONFIG(2), 0x10);
+	_W(pll, PHY_TX_LANE_CONFIG(3), 0x10);
+
+	/* Ensure all registers are flushed to hardware */
+	wmb();
+
+	return 0;
+}
+
+static int hdmi_8998_pll_lock_status(struct mdss_pll_resources *io)
+{
+	u32 const delay_us = 100;
+	u32 const timeout_us = 5000;
+	u32 status;
+	int rc = 0;
+	void __iomem *pll = io->pll_base;
+
+	rc = mdss_pll_resource_enable(io, true);
+	if (rc) {
+		pr_err("pll resource can't be enabled\n");
+		return rc;
+	}
+	rc = readl_poll_timeout_atomic(pll + C_READY_STATUS,
+			status,
+			((status & BIT(0)) > 0),
+			delay_us,
+			timeout_us);
+	if (rc)
+		pr_err("HDMI PLL(%d) lock failed, status=0x%08x\n",
+				io->index, status);
+	else
+		pr_debug("HDMI PLL(%d) lock passed, status=0x%08x\n",
+				io->index, status);
+
+	mdss_pll_resource_enable(io, false);
+
+	return rc;
+}
+
+static int hdmi_8998_phy_ready_status(struct mdss_pll_resources *io)
+{
+	u32 const delay_us = 100;
+	u32 const timeout_us = 5000;
+	u32 status;
+	int rc = 0;
+	void __iomem *phy = io->phy_base;
+
+	rc = mdss_pll_resource_enable(io, true);
+	if (rc) {
+		pr_err("pll resource can't be enabled\n");
+		return rc;
+	}
+
+	rc = readl_poll_timeout_atomic(phy + PHY_STATUS,
+			status,
+			((status & BIT(0)) > 0),
+			delay_us,
+			timeout_us);
+	if (rc)
+		pr_err("HDMI PHY(%d) not ready, status=0x%08x\n",
+				io->index, status);
+	else
+		pr_debug("HDMI PHY(%d) ready, status=0x%08x\n",
+				io->index, status);
+
+	mdss_pll_resource_enable(io, false);
+
+	return rc;
+}
+
+static int hdmi_8998_pll_enable(struct clk *c)
+{
+	int rc = 0;
+	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+	void __iomem *phy = io->phy_base, *pll = io->pll_base;
+
+	_W(phy, PHY_CFG, 0x1);
+	udelay(100);
+	_W(phy, PHY_CFG, 0x59);
+	udelay(100);
+
+	/* Ensure all registers are flushed to hardware */
+	wmb();
+
+	rc = hdmi_8998_pll_lock_status(io);
+	if (rc) {
+		pr_err("PLL not locked, rc=%d\n", rc);
+		return rc;
+	}
+
+	_W(pll, PHY_TX_LANE_CONFIG(0), 0x1F);
+	_W(pll, PHY_TX_LANE_CONFIG(1), 0x1F);
+	_W(pll, PHY_TX_LANE_CONFIG(2), 0x1F);
+	_W(pll, PHY_TX_LANE_CONFIG(3), 0x1F);
+
+	/* Ensure all registers are flushed to hardware */
+	wmb();
+
+	rc = hdmi_8998_phy_ready_status(io);
+	if (rc) {
+		pr_err("PHY NOT READY, rc=%d\n", rc);
+		return rc;
+	}
+
+	_W(phy, PHY_CFG, 0x58);
+	udelay(1);
+	_W(phy, PHY_CFG, 0x59);
+
+	/* Ensure all registers are flushed to hardware */
+	wmb();
+
+	io->pll_on = true;
+	return rc;
+}
+
+/*
+ * Get the clock range allowed in atomic update. If clock rate
+ * goes beyond this range, a full tear down is required to set
+ * the new pixel clock.
+ */
+static int hdmi_8998_vco_get_lock_range(struct clk *c,
+	unsigned long pixel_clk)
+{
+	const u32 rng = 64, cmp_cnt = 1024;
+	const u32 coreclk_div = 5, clks_pll_divsel = 2;
+	u32 vco_freq, vco_ratio, ppm_range;
+	struct hdmi_8998_reg_cfg cfg = {0};
+
+	pr_debug("rate=%ld\n", pixel_clk);
+
+	hdmi_8998_get_div(&cfg, pixel_clk);
+	if (cfg.vco_ratio <= 0 || cfg.vco_freq <= 0) {
+		pr_err("couldn't get post div\n");
+		return -EINVAL;
+	}
+
+	do_div(cfg.vco_freq, HDMI_KHZ_TO_HZ * HDMI_KHZ_TO_HZ);
+
+	vco_freq  = (u32) cfg.vco_freq;
+	vco_ratio = (u32) cfg.vco_ratio;
+
+	pr_debug("freq %d, ratio %d\n", vco_freq, vco_ratio);
+
+	ppm_range = (rng * HDMI_REF_CLOCK_HZ) / cmp_cnt;
+	ppm_range /= vco_freq / vco_ratio;
+	ppm_range *= coreclk_div * clks_pll_divsel;
+
+	pr_debug("ppm range: %d\n", ppm_range);
+
+	return ppm_range;
+}
+
+static int hdmi_8998_vco_rate_atomic_update(struct clk *c,
+	unsigned long rate, u32 ver)
+{
+	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+	void __iomem *pll;
+	struct hdmi_8998_reg_cfg cfg = {0};
+	int rc = 0;
+
+	rc = hdmi_8998_config_phy(rate, &cfg, ver);
+	if (rc) {
+		pr_err("rate calculation failed\n, rc=%d", rc);
+		goto end;
+	}
+
+	pll = io->pll_base;
+
+	_W(pll, DEC_START_MODE0, cfg.dec_start_mode0);
+	_W(pll, DIV_FRAC_START1_MODE0, cfg.div_frac_start1_mode0);
+	_W(pll, DIV_FRAC_START2_MODE0, cfg.div_frac_start2_mode0);
+	_W(pll, DIV_FRAC_START3_MODE0, cfg.div_frac_start3_mode0);
+
+	_W(pll, FREQ_UPDATE, 0x01);
+	_W(pll, FREQ_UPDATE, 0x00);
+
+	pr_debug("updated to rate %ld\n", rate);
+end:
+	return rc;
+}
+
+static int hdmi_8998_vco_set_rate(struct clk *c, unsigned long rate, u32 ver)
+{
+	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+	unsigned int set_power_dwn = 0;
+	bool atomic_update = false;
+	int pll_lock_range = 0;
+	int rc = 0;
+
+	rc = mdss_pll_resource_enable(io, true);
+	if (rc) {
+		pr_err("pll resource enable failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	pr_debug("rate %ld, vco_rate %ld\n", rate, vco->rate);
+
+	if (_R(io->pll_base, C_READY_STATUS) & BIT(0) &&
+		_R(io->phy_base, PHY_STATUS) & BIT(0)) {
+		pll_lock_range = hdmi_8998_vco_get_lock_range(c, vco->rate);
+
+		if (pll_lock_range > 0 && vco->rate) {
+			u32 range_limit;
+
+			range_limit  = pll_lock_range *
+				(vco->rate / HDMI_KHZ_TO_HZ);
+			range_limit /= HDMI_KHZ_TO_HZ;
+
+			pr_debug("range limit %d\n", range_limit);
+
+			if (abs(rate - vco->rate) < range_limit)
+				atomic_update = true;
+		}
+	}
+
+	if (io->pll_on && !atomic_update)
+		set_power_dwn = 1;
+
+	if (atomic_update)
+		rc = hdmi_8998_vco_rate_atomic_update(c, rate, ver);
+	else
+		rc = hdmi_8998_pll_set_clk_rate(c, rate, ver);
+
+	if (rc) {
+		pr_err("failed to set clk rate\n");
+		goto error;
+	}
+
+	if (set_power_dwn) {
+		rc = hdmi_8998_pll_enable(c);
+		if (rc) {
+			pr_err("failed to enable pll, rc=%d\n", rc);
+			goto error;
+		}
+	}
+
+	vco->rate = rate;
+	vco->rate_set = true;
+
+error:
+	(void)mdss_pll_resource_enable(io, false);
+
+	return rc;
+}
+
+static long hdmi_8998_vco_round_rate(struct clk *c, unsigned long rate)
+{
+	unsigned long rrate = rate;
+	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
+
+	if (rate < vco->min_rate)
+		rrate = vco->min_rate;
+	if (rate > vco->max_rate)
+		rrate = vco->max_rate;
+
+	return rrate;
+}
+
+static int hdmi_8998_vco_prepare(struct clk *c, u32 ver)
+{
+	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+	int rc = 0;
+
+	if (!io) {
+		pr_err("hdmi pll resources are not available\n");
+		return -EINVAL;
+	}
+
+	rc = mdss_pll_resource_enable(io, true);
+	if (rc) {
+		pr_err("pll resource enable failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (!vco->rate_set && vco->rate) {
+		rc = hdmi_8998_pll_set_clk_rate(c, vco->rate, ver);
+		if (rc) {
+			pr_err("set rate failed, rc=%d\n", rc);
+			goto error;
+		}
+	}
+
+	rc = hdmi_8998_pll_enable(c);
+	if (rc)
+		pr_err("pll enabled failed, rc=%d\n", rc);
+
+error:
+	if (rc)
+		mdss_pll_resource_enable(io, false);
+
+	return rc;
+}
+
+static void hdmi_8998_pll_disable(struct hdmi_pll_vco_clk *vco)
+{
+	struct mdss_pll_resources *io = vco->priv;
+	void __iomem *phy = io->phy_base;
+
+	if (!io->pll_on)
+		return;
+
+	_W(phy, PHY_PD_CTL, 0x0);
+
+	/* Ensure all registers are flushed to hardware */
+	wmb();
+
+	vco->rate_set = false;
+	io->handoff_resources = false;
+	io->pll_on = false;
+}
+
+static void hdmi_8998_vco_unprepare(struct clk *c)
+{
+	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+
+	if (!io) {
+		pr_err("HDMI pll resources not available\n");
+		return;
+	}
+
+	hdmi_8998_pll_disable(vco);
+	mdss_pll_resource_enable(io, false);
+}
+
+static enum handoff hdmi_8998_vco_handoff(struct clk *c)
+{
+	enum handoff ret = HANDOFF_DISABLED_CLK;
+	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+
+	if (mdss_pll_resource_enable(io, true)) {
+		pr_err("pll resource can't be enabled\n");
+		return ret;
+	}
+
+	io->handoff_resources = true;
+
+	if (_R(io->pll_base, C_READY_STATUS) & BIT(0) &&
+			_R(io->phy_base, PHY_STATUS) & BIT(0)) {
+		io->pll_on = true;
+		/* TODO: calculate rate based on the phy/pll register values. */
+		ret = HANDOFF_ENABLED_CLK;
+	} else {
+		io->handoff_resources = false;
+		mdss_pll_resource_enable(io, false);
+		pr_debug("%s: PHY/PLL not ready\n", __func__);
+	}
+
+	pr_debug("done, ret=%d\n", ret);
+	return ret;
+}
+
+static int hdmi_8998_3p3_vco_set_rate(struct clk *c, unsigned long rate)
+{
+	return hdmi_8998_vco_set_rate(c, rate, HDMI_VERSION_8998_3_3);
+}
+
+static int hdmi_8998_1p8_vco_set_rate(struct clk *c, unsigned long rate)
+{
+	return hdmi_8998_vco_set_rate(c, rate, HDMI_VERSION_8998_1_8);
+}
+
+static int hdmi_8998_3p3_vco_prepare(struct clk *c)
+{
+	return hdmi_8998_vco_prepare(c, HDMI_VERSION_8998_3_3);
+}
+
+static int hdmi_8998_1p8_vco_prepare(struct clk *c)
+{
+	return hdmi_8998_vco_prepare(c, HDMI_VERSION_8998_1_8);
+}
+
+static struct clk_ops hdmi_8998_3p3_vco_clk_ops = {
+	.set_rate = hdmi_8998_3p3_vco_set_rate,
+	.round_rate = hdmi_8998_vco_round_rate,
+	.prepare = hdmi_8998_3p3_vco_prepare,
+	.unprepare = hdmi_8998_vco_unprepare,
+	.handoff = hdmi_8998_vco_handoff,
+};
+
+static struct clk_ops hdmi_8998_1p8_vco_clk_ops = {
+	.set_rate = hdmi_8998_1p8_vco_set_rate,
+	.round_rate = hdmi_8998_vco_round_rate,
+	.prepare = hdmi_8998_1p8_vco_prepare,
+	.unprepare = hdmi_8998_vco_unprepare,
+	.handoff = hdmi_8998_vco_handoff,
+};
+
+static struct hdmi_pll_vco_clk hdmi_vco_clk = {
+	.min_rate = HDMI_VCO_MIN_RATE_HZ,
+	.max_rate = HDMI_VCO_MAX_RATE_HZ,
+	.c = {
+		.dbg_name = "hdmi_8998_vco_clk",
+		.ops = &hdmi_8998_3p3_vco_clk_ops,
+		CLK_INIT(hdmi_vco_clk.c),
+	},
+};
+
+static struct clk_lookup hdmipllcc_8998[] = {
+	CLK_LIST(hdmi_vco_clk),
+};
+
+int hdmi_8998_pll_clock_register(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res, u32 ver)
+{
+	int rc = 0;
+
+	if (!pdev || !pll_res) {
+		pr_err("invalid input parameters\n");
+		return -EINVAL;
+	}
+
+	hdmi_vco_clk.priv = pll_res;
+
+	switch (ver) {
+	case HDMI_VERSION_8998_3_3:
+		hdmi_vco_clk.c.ops = &hdmi_8998_3p3_vco_clk_ops;
+		break;
+	case HDMI_VERSION_8998_1_8:
+		hdmi_vco_clk.c.ops = &hdmi_8998_1p8_vco_clk_ops;
+		break;
+	default:
+		hdmi_vco_clk.c.ops = &hdmi_8998_3p3_vco_clk_ops;
+		break;
+	};
+
+	rc = of_msm_clock_register(pdev->dev.of_node, hdmipllcc_8998,
+				ARRAY_SIZE(hdmipllcc_8998));
+	if (rc) {
+		pr_err("clock register failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+int hdmi_8998_3p3_pll_clock_register(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res)
+{
+	return hdmi_8998_pll_clock_register(pdev, pll_res,
+				HDMI_VERSION_8998_3_3);
+}
+
+int hdmi_8998_1p8_pll_clock_register(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res)
+{
+	return hdmi_8998_pll_clock_register(pdev, pll_res,
+				HDMI_VERSION_8998_1_8);
+}
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./mdss/mdss-hdmi-pll.h linux-4.4.115-fbx/drivers/clk/msm/mdss/mdss-hdmi-pll.h
--- linux-4.4.115-fbx/drivers/clk/msm./mdss/mdss-hdmi-pll.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/mdss/mdss-hdmi-pll.h	2019-01-22 16:16:23.023242060 +0100
@@ -0,0 +1,63 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_HDMI_PLL_H
+#define __MDSS_HDMI_PLL_H
+
+struct hdmi_pll_cfg {
+	unsigned long vco_rate;
+	u32 reg;
+};
+
+struct hdmi_pll_vco_clk {
+	unsigned long	rate;	/* current vco rate */
+	unsigned long	min_rate;	/* min vco rate */
+	unsigned long	max_rate;	/* max vco rate */
+	bool		rate_set;
+	struct hdmi_pll_cfg *ip_seti;
+	struct hdmi_pll_cfg *cp_seti;
+	struct hdmi_pll_cfg *ip_setp;
+	struct hdmi_pll_cfg *cp_setp;
+	struct hdmi_pll_cfg *crctrl;
+	void		*priv;
+
+	struct clk	c;
+};
+
+static inline struct hdmi_pll_vco_clk *to_hdmi_vco_clk(struct clk *clk)
+{
+	return container_of(clk, struct hdmi_pll_vco_clk, c);
+}
+
+int hdmi_pll_clock_register(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res);
+
+int hdmi_20nm_pll_clock_register(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res);
+
+int hdmi_8996_v1_pll_clock_register(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res);
+
+int hdmi_8996_v2_pll_clock_register(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res);
+
+int hdmi_8996_v3_pll_clock_register(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res);
+
+int hdmi_8996_v3_1p8_pll_clock_register(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res);
+
+int hdmi_8998_3p3_pll_clock_register(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res);
+int hdmi_8998_1p8_pll_clock_register(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res);
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./mdss/mdss-pll.c linux-4.4.115-fbx/drivers/clk/msm/mdss/mdss-pll.c
--- linux-4.4.115-fbx/drivers/clk/msm./mdss/mdss-pll.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/mdss/mdss-pll.c	2019-01-22 16:16:23.023242060 +0100
@@ -0,0 +1,443 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/clk/msm-clock-generic.h>
+
+#include "mdss-pll.h"
+#include "mdss-dsi-pll.h"
+#include "mdss-hdmi-pll.h"
+#include "mdss-dp-pll.h"
+
+int mdss_pll_resource_enable(struct mdss_pll_resources *pll_res, bool enable)
+{
+	int rc = 0;
+	int changed = 0;
+	if (!pll_res) {
+		pr_err("Invalid input parameters\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Don't turn off resources during handoff or add more than
+	 * 1 refcount.
+	 */
+	if (pll_res->handoff_resources &&
+		(!enable || (enable & pll_res->resource_enable))) {
+		pr_debug("Do not turn on/off pll resources during handoff case\n");
+		return rc;
+	}
+
+	if (enable) {
+		if (pll_res->resource_ref_cnt == 0)
+			changed++;
+		pll_res->resource_ref_cnt++;
+	} else {
+		if (pll_res->resource_ref_cnt) {
+			pll_res->resource_ref_cnt--;
+			if (pll_res->resource_ref_cnt == 0)
+				changed++;
+		} else {
+			pr_err("PLL Resources already OFF\n");
+		}
+	}
+
+	if (changed) {
+		rc = mdss_pll_util_resource_enable(pll_res, enable);
+		if (rc)
+			pr_err("Resource update failed rc=%d\n", rc);
+		else
+			pll_res->resource_enable = enable;
+	}
+
+	return rc;
+}
+
+static int mdss_pll_resource_init(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res)
+{
+	if (!pdev || !pll_res) {
+		pr_err("Invalid input parameters\n");
+		return -EINVAL;
+	}
+
+	return mdss_pll_util_resource_init(pdev, pll_res);
+}
+
+static void mdss_pll_resource_deinit(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res)
+{
+	if (!pdev || !pll_res) {
+		pr_err("Invalid input parameters\n");
+		return;
+	}
+
+	mdss_pll_util_resource_deinit(pdev, pll_res);
+}
+
+static void mdss_pll_resource_release(struct platform_device *pdev,
+					struct mdss_pll_resources *pll_res)
+{
+	if (!pdev || !pll_res) {
+		pr_err("Invalid input parameters\n");
+		return;
+	}
+
+	mdss_pll_util_resource_release(pdev, pll_res);
+}
+
+static int mdss_pll_resource_parse(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res)
+{
+	int rc = 0;
+	const char *compatible_stream;
+
+	if (!pdev || !pll_res) {
+		pr_err("Invalid input parameters\n");
+		return -EINVAL;
+	}
+
+	rc = mdss_pll_util_resource_parse(pdev, pll_res);
+	if (rc) {
+		pr_err("Failed to parse the resources rc=%d\n", rc);
+		goto end;
+	}
+
+	compatible_stream = of_get_property(pdev->dev.of_node,
+				"compatible", NULL);
+	if (!compatible_stream) {
+		pr_err("Failed to parse the compatible stream\n");
+		goto err;
+	}
+
+	if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_8996")) {
+		pll_res->pll_interface_type = MDSS_DSI_PLL_8996;
+		pll_res->target_id = MDSS_PLL_TARGET_8996;
+		pll_res->revision = 1;
+	} else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_8996_v2")) {
+		pll_res->pll_interface_type = MDSS_DSI_PLL_8996;
+		pll_res->target_id = MDSS_PLL_TARGET_8996;
+		pll_res->revision = 2;
+	} else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_8998")) {
+		pll_res->pll_interface_type = MDSS_DSI_PLL_8998;
+	} else if (!strcmp(compatible_stream, "qcom,mdss_dp_pll_8998")) {
+		pll_res->pll_interface_type = MDSS_DP_PLL_8998;
+	} else if (!strcmp(compatible_stream, "qcom,mdss_hdmi_pll_8996")) {
+		pll_res->pll_interface_type = MDSS_HDMI_PLL_8996;
+	} else if (!strcmp(compatible_stream, "qcom,mdss_hdmi_pll_8996_v2")) {
+		pll_res->pll_interface_type = MDSS_HDMI_PLL_8996_V2;
+	} else if (!strcmp(compatible_stream, "qcom,mdss_hdmi_pll_8996_v3")) {
+		pll_res->pll_interface_type = MDSS_HDMI_PLL_8996_V3;
+	} else if (!strcmp(compatible_stream,
+				"qcom,mdss_hdmi_pll_8996_v3_1p8")) {
+		pll_res->pll_interface_type = MDSS_HDMI_PLL_8996_V3_1_8;
+	} else if (!strcmp(compatible_stream, "qcom,mdss_hdmi_pll_8998")) {
+		pll_res->pll_interface_type = MDSS_HDMI_PLL_8998_3_3;
+	} else if (!strcmp(compatible_stream, "qcom,mdss_hdmi_pll_8998_1p8")) {
+		pll_res->pll_interface_type = MDSS_HDMI_PLL_8998_1_8;
+	} else {
+		goto err;
+	}
+
+	return rc;
+
+err:
+	mdss_pll_resource_release(pdev, pll_res);
+end:
+	return rc;
+}
+
+static int mdss_pll_clock_register(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res)
+{
+	int rc;
+
+	if (!pdev || !pll_res) {
+		pr_err("Invalid input parameters\n");
+		return -EINVAL;
+	}
+
+	switch (pll_res->pll_interface_type) {
+	case MDSS_DSI_PLL_8996:
+		rc = dsi_pll_clock_register_8996(pdev, pll_res);
+		break;
+	case MDSS_DSI_PLL_8998:
+		rc = dsi_pll_clock_register_8998(pdev, pll_res);
+	case MDSS_DP_PLL_8998:
+		rc = dp_pll_clock_register_8998(pdev, pll_res);
+		break;
+	case MDSS_HDMI_PLL_8996:
+		rc = hdmi_8996_v1_pll_clock_register(pdev, pll_res);
+		break;
+	case MDSS_HDMI_PLL_8996_V2:
+		rc = hdmi_8996_v2_pll_clock_register(pdev, pll_res);
+		break;
+	case MDSS_HDMI_PLL_8996_V3:
+		rc = hdmi_8996_v3_pll_clock_register(pdev, pll_res);
+		break;
+	case MDSS_HDMI_PLL_8996_V3_1_8:
+		rc = hdmi_8996_v3_1p8_pll_clock_register(pdev, pll_res);
+		break;
+	case MDSS_HDMI_PLL_8998_3_3:
+		rc = hdmi_8998_3p3_pll_clock_register(pdev, pll_res);
+		break;
+	case MDSS_HDMI_PLL_8998_1_8:
+		rc = hdmi_8998_1p8_pll_clock_register(pdev, pll_res);
+		break;
+	case MDSS_UNKNOWN_PLL:
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	if (rc) {
+		pr_err("Pll ndx=%d clock register failed rc=%d\n",
+				pll_res->index, rc);
+	}
+
+	return rc;
+}
+
+static int mdss_pll_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	const char *label;
+	struct resource *pll_base_reg;
+	struct resource *phy_base_reg;
+	struct resource *dynamic_pll_base_reg;
+	struct resource *gdsc_base_reg;
+	struct mdss_pll_resources *pll_res;
+
+	if (!pdev->dev.of_node) {
+		pr_err("MDSS pll driver only supports device tree probe\n");
+		rc = -ENOTSUPP;
+		goto error;
+	}
+
+	label = of_get_property(pdev->dev.of_node, "label", NULL);
+	if (!label)
+		pr_info("%d: MDSS pll label not specified\n", __LINE__);
+	else
+		pr_info("MDSS pll label = %s\n", label);
+
+	pll_res = devm_kzalloc(&pdev->dev, sizeof(struct mdss_pll_resources),
+								GFP_KERNEL);
+	if (!pll_res) {
+		pr_err("Failed to allocate the clock pll\n");
+		rc = -ENOMEM;
+		goto error;
+	}
+	platform_set_drvdata(pdev, pll_res);
+
+	rc = of_property_read_u32(pdev->dev.of_node, "cell-index",
+			&pll_res->index);
+	if (rc) {
+		pr_err("Unable to get the cell-index rc=%d\n", rc);
+		pll_res->index = 0;
+	}
+
+	pll_res->ssc_en = of_property_read_bool(pdev->dev.of_node,
+						"qcom,dsi-pll-ssc-en");
+
+	if (pll_res->ssc_en) {
+		pr_info("%s: label=%s PLL SSC enabled\n", __func__, label);
+
+		rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,ssc-frequency-hz", &pll_res->ssc_freq);
+
+		rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,ssc-ppm", &pll_res->ssc_ppm);
+
+		pll_res->ssc_center = false;
+
+		label = of_get_property(pdev->dev.of_node,
+			"qcom,dsi-pll-ssc-mode", NULL);
+
+		if (label && !strcmp(label, "center-spread"))
+			pll_res->ssc_center = true;
+	}
+
+	pll_base_reg = platform_get_resource_byname(pdev,
+						IORESOURCE_MEM, "pll_base");
+	if (!pll_base_reg) {
+		pr_err("Unable to get the pll base resources\n");
+		rc = -ENOMEM;
+		goto io_error;
+	}
+
+	pll_res->pll_base = ioremap(pll_base_reg->start,
+						resource_size(pll_base_reg));
+	if (!pll_res->pll_base) {
+		pr_err("Unable to remap pll base resources\n");
+		rc = -ENOMEM;
+		goto io_error;
+	}
+
+	pr_debug("%s: ndx=%d base=%p\n", __func__,
+			pll_res->index, pll_res->pll_base);
+
+	rc = mdss_pll_resource_parse(pdev, pll_res);
+	if (rc) {
+		pr_err("Pll resource parsing from dt failed rc=%d\n", rc);
+		goto res_parse_error;
+	}
+
+	phy_base_reg = platform_get_resource_byname(pdev,
+						IORESOURCE_MEM, "phy_base");
+	if (phy_base_reg) {
+		pll_res->phy_base = ioremap(phy_base_reg->start,
+						resource_size(phy_base_reg));
+		if (!pll_res->phy_base) {
+			pr_err("Unable to remap pll phy base resources\n");
+			rc = -ENOMEM;
+			goto phy_io_error;
+		}
+	}
+
+	dynamic_pll_base_reg = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, "dynamic_pll_base");
+	if (dynamic_pll_base_reg) {
+		pll_res->dyn_pll_base = ioremap(dynamic_pll_base_reg->start,
+				resource_size(dynamic_pll_base_reg));
+		if (!pll_res->dyn_pll_base) {
+			pr_err("Unable to remap dynamic pll base resources\n");
+			rc = -ENOMEM;
+			goto dyn_pll_io_error;
+		}
+	}
+
+	gdsc_base_reg = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, "gdsc_base");
+	if (!gdsc_base_reg) {
+		pr_err("Unable to get the gdsc base resource\n");
+		rc = -ENOMEM;
+		goto gdsc_io_error;
+	}
+	pll_res->gdsc_base = ioremap(gdsc_base_reg->start,
+			resource_size(gdsc_base_reg));
+	if (!pll_res->gdsc_base) {
+		pr_err("Unable to remap gdsc base resources\n");
+		rc = -ENOMEM;
+		goto gdsc_io_error;
+	}
+
+	rc = mdss_pll_resource_init(pdev, pll_res);
+	if (rc) {
+		pr_err("Pll ndx=%d resource init failed rc=%d\n",
+				pll_res->index, rc);
+		goto res_init_error;
+	}
+
+	rc = mdss_pll_clock_register(pdev, pll_res);
+	if (rc) {
+		pr_err("Pll ndx=%d clock register failed rc=%d\n",
+			pll_res->index, rc);
+		goto clock_register_error;
+	}
+
+	return rc;
+
+clock_register_error:
+	mdss_pll_resource_deinit(pdev, pll_res);
+res_init_error:
+	if (pll_res->gdsc_base)
+		iounmap(pll_res->gdsc_base);
+gdsc_io_error:
+	if (pll_res->dyn_pll_base)
+		iounmap(pll_res->dyn_pll_base);
+dyn_pll_io_error:
+	if (pll_res->phy_base)
+		iounmap(pll_res->phy_base);
+phy_io_error:
+	mdss_pll_resource_release(pdev, pll_res);
+res_parse_error:
+	iounmap(pll_res->pll_base);
+io_error:
+	devm_kfree(&pdev->dev, pll_res);
+error:
+	return rc;
+}
+
+static int mdss_pll_remove(struct platform_device *pdev)
+{
+	struct mdss_pll_resources *pll_res;
+
+	pll_res = platform_get_drvdata(pdev);
+	if (!pll_res) {
+		pr_err("Invalid PLL resource data");
+		return 0;
+	}
+
+	mdss_pll_resource_deinit(pdev, pll_res);
+	if (pll_res->phy_base)
+		iounmap(pll_res->phy_base);
+	if (pll_res->gdsc_base)
+		iounmap(pll_res->gdsc_base);
+	mdss_pll_resource_release(pdev, pll_res);
+	iounmap(pll_res->pll_base);
+	devm_kfree(&pdev->dev, pll_res);
+	return 0;
+}
+
+static const struct of_device_id mdss_pll_dt_match[] = {
+	{.compatible = "qcom,mdss_dsi_pll_8996"},
+	{.compatible = "qcom,mdss_dsi_pll_8996_v2"},
+	{.compatible = "qcom,mdss_dsi_pll_8998"},
+	{.compatible = "qcom,mdss_hdmi_pll_8996"},
+	{.compatible = "qcom,mdss_hdmi_pll_8996_v2"},
+	{.compatible = "qcom,mdss_hdmi_pll_8996_v3"},
+	{.compatible = "qcom,mdss_hdmi_pll_8996_v3_1p8"},
+	{.compatible = "qcom,mdss_dp_pll_8998"},
+	{.compatible = "qcom,mdss_hdmi_pll_8998"},
+	{.compatible = "qcom,mdss_hdmi_pll_8998_1p8"},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, mdss_clock_dt_match);
+
+static struct platform_driver mdss_pll_driver = {
+	.probe = mdss_pll_probe,
+	.remove = mdss_pll_remove,
+	.driver = {
+		.name = "mdss_pll",
+		.of_match_table = mdss_pll_dt_match,
+	},
+};
+
+static int __init mdss_pll_driver_init(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&mdss_pll_driver);
+	if (rc)
+		pr_err("mdss_register_pll_driver() failed!\n");
+
+	return rc;
+}
+subsys_initcall(mdss_pll_driver_init);
+
+static void __exit mdss_pll_driver_deinit(void)
+{
+	platform_driver_unregister(&mdss_pll_driver);
+}
+module_exit(mdss_pll_driver_deinit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("mdss pll driver");
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./mdss/mdss-pll.h linux-4.4.115-fbx/drivers/clk/msm/mdss/mdss-pll.h
--- linux-4.4.115-fbx/drivers/clk/msm./mdss/mdss-pll.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/mdss/mdss-pll.h	2019-01-22 16:16:23.023242060 +0100
@@ -0,0 +1,235 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_PLL_H
+#define __MDSS_PLL_H
+
+#include <linux/mdss_io_util.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <linux/io.h>
+
+#define MDSS_PLL_REG_W(base, offset, data)	\
+				writel_relaxed((data), (base) + (offset))
+#define MDSS_PLL_REG_R(base, offset)	readl_relaxed((base) + (offset))
+
+#define PLL_CALC_DATA(addr0, addr1, data0, data1)      \
+	(((data1) << 24) | ((((addr1) / 4) & 0xFF) << 16) | \
+	 ((data0) << 8) | (((addr0) / 4) & 0xFF))
+
+#define MDSS_DYN_PLL_REG_W(base, offset, addr0, addr1, data0, data1)   \
+		writel_relaxed(PLL_CALC_DATA(addr0, addr1, data0, data1), \
+			(base) + (offset))
+
+enum {
+	MDSS_DSI_PLL_8996,
+	MDSS_DSI_PLL_8998,
+	MDSS_DP_PLL_8998,
+	MDSS_HDMI_PLL_8996,
+	MDSS_HDMI_PLL_8996_V2,
+	MDSS_HDMI_PLL_8996_V3,
+	MDSS_HDMI_PLL_8996_V3_1_8,
+	MDSS_HDMI_PLL_8998_3_3,
+	MDSS_HDMI_PLL_8998_1_8,
+	MDSS_UNKNOWN_PLL,
+};
+
+enum {
+	MDSS_PLL_TARGET_8996,
+};
+
+#define DFPS_MAX_NUM_OF_FRAME_RATES 20
+
+struct dfps_panel_info {
+	uint32_t enabled;
+	uint32_t frame_rate_cnt;
+	uint32_t frame_rate[DFPS_MAX_NUM_OF_FRAME_RATES]; /* hz */
+};
+
+struct dfps_pll_codes {
+	uint32_t pll_codes_1;
+	uint32_t pll_codes_2;
+};
+
+struct dfps_codes_info {
+	uint32_t is_valid;
+	uint32_t frame_rate;	/* hz */
+	uint32_t clk_rate;	/* hz */
+	struct dfps_pll_codes pll_codes;
+};
+
+struct dfps_info {
+	struct dfps_panel_info panel_dfps;
+	struct dfps_codes_info codes_dfps[DFPS_MAX_NUM_OF_FRAME_RATES];
+	void *dfps_fb_base;
+	uint32_t chip_serial;
+};
+
+struct mdss_pll_resources {
+
+	/* Pll specific resources like GPIO, power supply, clocks, etc*/
+	struct dss_module_power mp;
+
+	/*
+	 * dsi/edp/hmdi plls' base register, phy, gdsc and dynamic refresh
+	 * register mapping
+	 */
+	void __iomem	*pll_base;
+	void __iomem	*phy_base;
+	void __iomem	*gdsc_base;
+	void __iomem	*dyn_pll_base;
+
+	bool	is_init_locked;
+	s64	vco_current_rate;
+	s64	vco_locking_rate;
+	s64	vco_ref_clk_rate;
+
+	/*
+	 * Certain pll's needs to update the same vco rate after resume in
+	 * suspend/resume scenario. Cached the vco rate for such plls.
+	 */
+	unsigned long	vco_cached_rate;
+
+	/* dsi/edp/hmdi pll interface type */
+	u32		pll_interface_type;
+
+	/*
+	 * Target ID. Used in pll_register API for valid target check before
+	 * registering the PLL clocks.
+	 */
+	u32		target_id;
+
+	/* HW recommended delay during configuration of vco clock rate */
+	u32		vco_delay;
+
+	/* Ref-count of the PLL resources */
+	u32		resource_ref_cnt;
+
+	/*
+	 * Keep track to resource status to avoid updating same status for the
+	 * pll from different paths
+	 */
+	bool		resource_enable;
+
+	/*
+	 * Certain plls' do not allow vco rate update if it is on. Keep track of
+	 * status for them to turn on/off after set rate success.
+	 */
+	bool		pll_on;
+
+	/*
+	 * handoff_status is true of pll is already enabled by bootloader with
+	 * continuous splash enable case. Clock API will call the handoff API
+	 * to enable the status. It is disabled if continuous splash
+	 * feature is disabled.
+	 */
+	bool		handoff_resources;
+
+	/*
+	 * caching the pll trim codes in the case of dynamic refresh
+	 */
+	int		cache_pll_trim_codes[2];
+
+	/*
+	 * for maintaining the status of saving trim codes
+	 */
+	bool		reg_upd;
+
+	/*
+	 * Notifier callback for MDSS gdsc regulator events
+	 */
+	struct notifier_block gdsc_cb;
+
+	/*
+	 * Worker function to call PLL off event
+	 */
+	struct work_struct pll_off;
+
+	/*
+	 * PLL index if multiple index are available. Eg. in case of
+	 * DSI we have 2 plls.
+	 */
+	uint32_t index;
+
+	bool ssc_en;	/* share pll with master */
+	bool ssc_center;	/* default is down spread */
+	u32 ssc_freq;
+	u32 ssc_ppm;
+
+	struct mdss_pll_resources *slave;
+
+	/*
+	 * target pll revision information
+	 */
+	int		revision;
+
+	void *priv;
+
+	/*
+	 * dynamic refresh pll codes stored in this structure
+	 */
+	struct dfps_info *dfps;
+
+};
+
+struct mdss_pll_vco_calc {
+	s32 div_frac_start1;
+	s32 div_frac_start2;
+	s32 div_frac_start3;
+	s64 dec_start1;
+	s64 dec_start2;
+	s64 pll_plllock_cmp1;
+	s64 pll_plllock_cmp2;
+	s64 pll_plllock_cmp3;
+};
+
+static inline bool is_gdsc_disabled(struct mdss_pll_resources *pll_res)
+{
+	if (!pll_res->gdsc_base) {
+		WARN(1, "gdsc_base register is not defined\n");
+		return true;
+	}
+
+	return ((readl_relaxed(pll_res->gdsc_base + 0x4) & BIT(31)) &&
+		(!(readl_relaxed(pll_res->gdsc_base) & BIT(0)))) ? false : true;
+}
+
+static inline int mdss_pll_div_prepare(struct clk *c)
+{
+	struct div_clk *div = to_div_clk(c);
+	/* Restore the divider's value */
+	return div->ops->set_div(div, div->data.div);
+}
+
+static inline int mdss_set_mux_sel(struct mux_clk *clk, int sel)
+{
+	return 0;
+}
+
+static inline int mdss_get_mux_sel(struct mux_clk *clk)
+{
+	return 0;
+}
+
+int mdss_pll_resource_enable(struct mdss_pll_resources *pll_res, bool enable);
+int mdss_pll_util_resource_init(struct platform_device *pdev,
+					struct mdss_pll_resources *pll_res);
+void mdss_pll_util_resource_deinit(struct platform_device *pdev,
+					 struct mdss_pll_resources *pll_res);
+void mdss_pll_util_resource_release(struct platform_device *pdev,
+					struct mdss_pll_resources *pll_res);
+int mdss_pll_util_resource_enable(struct mdss_pll_resources *pll_res,
+								bool enable);
+int mdss_pll_util_resource_parse(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res);
+struct dss_vreg *mdss_pll_get_mp_by_reg_name(struct mdss_pll_resources *pll_res
+		, char *name);
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./mdss/mdss-pll-util.c linux-4.4.115-fbx/drivers/clk/msm/mdss/mdss-pll-util.c
--- linux-4.4.115-fbx/drivers/clk/msm./mdss/mdss-pll-util.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/mdss/mdss-pll-util.c	2019-01-22 16:16:23.023242060 +0100
@@ -0,0 +1,441 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <linux/of_address.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/memblock.h>
+
+#include "mdss-pll.h"
+
+int mdss_pll_util_resource_init(struct platform_device *pdev,
+					struct mdss_pll_resources *pll_res)
+{
+	int rc = 0;
+	struct dss_module_power *mp = &pll_res->mp;
+
+	rc = msm_dss_config_vreg(&pdev->dev,
+				mp->vreg_config, mp->num_vreg, 1);
+	if (rc) {
+		pr_err("Vreg config failed rc=%d\n", rc);
+		goto vreg_err;
+	}
+
+	rc = msm_dss_get_clk(&pdev->dev, mp->clk_config, mp->num_clk);
+	if (rc) {
+		pr_err("Clock get failed rc=%d\n", rc);
+		goto clk_err;
+	}
+
+	return rc;
+
+clk_err:
+	msm_dss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg, 0);
+vreg_err:
+	return rc;
+}
+
+/**
+ * mdss_pll_get_mp_by_reg_name() -- Find power module by regulator name
+ *@pll_res: Pointer to the PLL resource
+ *@name: Regulator name as specified in the pll dtsi
+ *
+ * This is a helper function to retrieve the regulator information
+ * for each pll resource.
+ */
+struct dss_vreg *mdss_pll_get_mp_by_reg_name(struct mdss_pll_resources *pll_res
+		, char *name)
+{
+
+	struct dss_vreg *regulator = NULL;
+	int i;
+
+	if ((pll_res == NULL) || (pll_res->mp.vreg_config == NULL)) {
+		pr_err("%s Invalid PLL resource\n", __func__);
+		goto error;
+	}
+
+	regulator = pll_res->mp.vreg_config;
+
+	for (i = 0; i < pll_res->mp.num_vreg; i++) {
+		if (!strcmp(name, regulator->vreg_name)) {
+			pr_debug("Found regulator match for %s\n", name);
+			break;
+		}
+		regulator++;
+	}
+
+error:
+	return regulator;
+}
+
+void mdss_pll_util_resource_deinit(struct platform_device *pdev,
+					 struct mdss_pll_resources *pll_res)
+{
+	struct dss_module_power *mp = &pll_res->mp;
+
+	msm_dss_put_clk(mp->clk_config, mp->num_clk);
+
+	msm_dss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg, 0);
+}
+
+void mdss_pll_util_resource_release(struct platform_device *pdev,
+					struct mdss_pll_resources *pll_res)
+{
+	struct dss_module_power *mp = &pll_res->mp;
+
+	devm_kfree(&pdev->dev, mp->clk_config);
+	devm_kfree(&pdev->dev, mp->vreg_config);
+	mp->num_vreg = 0;
+	mp->num_clk = 0;
+}
+
+int mdss_pll_util_resource_enable(struct mdss_pll_resources *pll_res,
+								bool enable)
+{
+	int rc = 0;
+	struct dss_module_power *mp = &pll_res->mp;
+
+	if (enable) {
+		rc = msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, enable);
+		if (rc) {
+			pr_err("Failed to enable vregs rc=%d\n", rc);
+			goto vreg_err;
+		}
+
+		rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk);
+		if (rc) {
+			pr_err("Failed to set clock rate rc=%d\n", rc);
+			goto clk_err;
+		}
+
+		rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
+		if (rc) {
+			pr_err("clock enable failed rc:%d\n", rc);
+			goto clk_err;
+		}
+	} else {
+		msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
+
+		msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, enable);
+	}
+
+	return rc;
+
+clk_err:
+	msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, 0);
+vreg_err:
+	return rc;
+}
+
+static int mdss_pll_util_parse_dt_supply(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res)
+{
+	int i = 0, rc = 0;
+	u32 tmp = 0;
+	struct device_node *of_node = NULL, *supply_root_node = NULL;
+	struct device_node *supply_node = NULL;
+	struct dss_module_power *mp = &pll_res->mp;
+
+	of_node = pdev->dev.of_node;
+
+	mp->num_vreg = 0;
+	supply_root_node = of_get_child_by_name(of_node,
+						"qcom,platform-supply-entries");
+	if (!supply_root_node) {
+		pr_err("no supply entry present\n");
+		return rc;
+	}
+
+	for_each_child_of_node(supply_root_node, supply_node) {
+		mp->num_vreg++;
+	}
+
+	if (mp->num_vreg == 0) {
+		pr_debug("no vreg\n");
+		return rc;
+	} else {
+		pr_debug("vreg found. count=%d\n", mp->num_vreg);
+	}
+
+	mp->vreg_config = devm_kzalloc(&pdev->dev, sizeof(struct dss_vreg) *
+						mp->num_vreg, GFP_KERNEL);
+	if (!mp->vreg_config) {
+		pr_err("can't alloc vreg mem\n");
+		rc = -ENOMEM;
+		return rc;
+	}
+
+	for_each_child_of_node(supply_root_node, supply_node) {
+
+		const char *st = NULL;
+
+		rc = of_property_read_string(supply_node,
+						"qcom,supply-name", &st);
+		if (rc) {
+			pr_err(":error reading name. rc=%d\n", rc);
+			goto error;
+		}
+
+		strlcpy(mp->vreg_config[i].vreg_name, st,
+					sizeof(mp->vreg_config[i].vreg_name));
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-min-voltage", &tmp);
+		if (rc) {
+			pr_err(": error reading min volt. rc=%d\n", rc);
+			goto error;
+		}
+		mp->vreg_config[i].min_voltage = tmp;
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-max-voltage", &tmp);
+		if (rc) {
+			pr_err(": error reading max volt. rc=%d\n", rc);
+			goto error;
+		}
+		mp->vreg_config[i].max_voltage = tmp;
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-enable-load", &tmp);
+		if (rc) {
+			pr_err(": error reading enable load. rc=%d\n", rc);
+			goto error;
+		}
+		mp->vreg_config[i].enable_load = tmp;
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-disable-load", &tmp);
+		if (rc) {
+			pr_err(": error reading disable load. rc=%d\n", rc);
+			goto error;
+		}
+		mp->vreg_config[i].disable_load = tmp;
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-pre-on-sleep", &tmp);
+		if (rc)
+			pr_debug("error reading supply pre sleep value. rc=%d\n",
+							rc);
+
+		mp->vreg_config[i].pre_on_sleep = (!rc ? tmp : 0);
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-pre-off-sleep", &tmp);
+		if (rc)
+			pr_debug("error reading supply pre sleep value. rc=%d\n",
+							rc);
+
+		mp->vreg_config[i].pre_off_sleep = (!rc ? tmp : 0);
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-post-on-sleep", &tmp);
+		if (rc)
+			pr_debug("error reading supply post sleep value. rc=%d\n",
+							rc);
+
+		mp->vreg_config[i].post_on_sleep = (!rc ? tmp : 0);
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-post-off-sleep", &tmp);
+		if (rc)
+			pr_debug("error reading supply post sleep value. rc=%d\n",
+							rc);
+
+		mp->vreg_config[i].post_off_sleep = (!rc ? tmp : 0);
+
+		pr_debug("%s min=%d, max=%d, enable=%d, disable=%d, preonsleep=%d, postonsleep=%d, preoffsleep=%d, postoffsleep=%d\n",
+					mp->vreg_config[i].vreg_name,
+					mp->vreg_config[i].min_voltage,
+					mp->vreg_config[i].max_voltage,
+					mp->vreg_config[i].enable_load,
+					mp->vreg_config[i].disable_load,
+					mp->vreg_config[i].pre_on_sleep,
+					mp->vreg_config[i].post_on_sleep,
+					mp->vreg_config[i].pre_off_sleep,
+					mp->vreg_config[i].post_off_sleep);
+		++i;
+
+		rc = 0;
+	}
+
+	return rc;
+
+error:
+	if (mp->vreg_config) {
+		devm_kfree(&pdev->dev, mp->vreg_config);
+		mp->vreg_config = NULL;
+		mp->num_vreg = 0;
+	}
+
+	return rc;
+}
+
+static int mdss_pll_util_parse_dt_clock(struct platform_device *pdev,
+					struct mdss_pll_resources *pll_res)
+{
+	u32 i = 0, rc = 0;
+	struct dss_module_power *mp = &pll_res->mp;
+	const char *clock_name;
+	u32 clock_rate;
+
+	mp->num_clk = of_property_count_strings(pdev->dev.of_node,
+							"clock-names");
+	if (mp->num_clk <= 0) {
+		pr_err("clocks are not defined\n");
+		goto clk_err;
+	}
+
+	mp->clk_config = devm_kzalloc(&pdev->dev,
+			sizeof(struct dss_clk) * mp->num_clk, GFP_KERNEL);
+	if (!mp->clk_config) {
+		pr_err("clock configuration allocation failed\n");
+		rc = -ENOMEM;
+		mp->num_clk = 0;
+		goto clk_err;
+	}
+
+	for (i = 0; i < mp->num_clk; i++) {
+		of_property_read_string_index(pdev->dev.of_node, "clock-names",
+							i, &clock_name);
+		strlcpy(mp->clk_config[i].clk_name, clock_name,
+				sizeof(mp->clk_config[i].clk_name));
+
+		of_property_read_u32_index(pdev->dev.of_node, "clock-rate",
+							i, &clock_rate);
+		mp->clk_config[i].rate = clock_rate;
+
+		if (!clock_rate)
+			mp->clk_config[i].type = DSS_CLK_AHB;
+		else
+			mp->clk_config[i].type = DSS_CLK_PCLK;
+	}
+
+clk_err:
+	return rc;
+}
+
+static void mdss_pll_free_bootmem(u32 mem_addr, u32 size)
+{
+	unsigned long pfn_start, pfn_end, pfn_idx;
+
+	pfn_start = mem_addr >> PAGE_SHIFT;
+	pfn_end = (mem_addr + size) >> PAGE_SHIFT;
+	for (pfn_idx = pfn_start; pfn_idx < pfn_end; pfn_idx++)
+		free_reserved_page(pfn_to_page(pfn_idx));
+}
+
+static int mdss_pll_util_parse_dt_dfps(struct platform_device *pdev,
+					struct mdss_pll_resources *pll_res)
+{
+	int rc = 0;
+	struct device_node *pnode;
+	const u32 *addr;
+	struct vm_struct *area;
+	u64 size;
+	u32 offsets[2];
+	unsigned long virt_add;
+
+	pnode = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
+	if (IS_ERR_OR_NULL(pnode)) {
+		rc = PTR_ERR(pnode);
+		goto pnode_err;
+	}
+
+	addr = of_get_address(pnode, 0, &size, NULL);
+	if (!addr) {
+		pr_err("failed to parse the dfps memory address\n");
+		rc = -EINVAL;
+		goto pnode_err;
+	}
+	/* maintain compatibility for 32/64 bit */
+	offsets[0] = (u32) of_read_ulong(addr, 2);
+	offsets[1] = (u32) size;
+
+	area = get_vm_area(offsets[1], VM_IOREMAP);
+	if (!area) {
+		rc = -ENOMEM;
+		goto dfps_mem_err;
+	}
+
+	virt_add = (unsigned long)area->addr;
+	rc = ioremap_page_range(virt_add, (virt_add + offsets[1]),
+			offsets[0], PAGE_KERNEL);
+	if (rc) {
+		rc = -ENOMEM;
+		goto ioremap_err;
+	}
+
+	pll_res->dfps = kzalloc(sizeof(struct dfps_info), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(pll_res->dfps)) {
+		rc = PTR_ERR(pll_res->dfps);
+		pr_err("couldn't allocate dfps kernel memory\n");
+		goto addr_err;
+	}
+
+	/* memcopy complete dfps structure from kernel virtual memory */
+	memcpy_fromio(pll_res->dfps, area->addr, sizeof(struct dfps_info));
+
+addr_err:
+	if (virt_add)
+		unmap_kernel_range(virt_add, (unsigned long) size);
+ioremap_err:
+	if (area)
+		vfree(area->addr);
+dfps_mem_err:
+	/* free the dfps memory here */
+	memblock_free(offsets[0], offsets[1]);
+	mdss_pll_free_bootmem(offsets[0], offsets[1]);
+pnode_err:
+	if (pnode)
+		of_node_put(pnode);
+
+	dma_release_declared_memory(&pdev->dev);
+	return rc;
+}
+
+int mdss_pll_util_resource_parse(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res)
+{
+	int rc = 0;
+	struct dss_module_power *mp = &pll_res->mp;
+
+	rc = mdss_pll_util_parse_dt_supply(pdev, pll_res);
+	if (rc) {
+		pr_err("vreg parsing failed rc=%d\n", rc);
+		goto end;
+	}
+
+	rc = mdss_pll_util_parse_dt_clock(pdev, pll_res);
+	if (rc) {
+		pr_err("clock name parsing failed rc=%d", rc);
+		goto clk_err;
+	}
+
+	if (mdss_pll_util_parse_dt_dfps(pdev, pll_res))
+		pr_err("dfps not enabled!\n");
+
+	return rc;
+
+clk_err:
+	devm_kfree(&pdev->dev, mp->vreg_config);
+	mp->num_vreg = 0;
+end:
+	return rc;
+}
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./reset.c linux-4.4.115-fbx/drivers/clk/msm/reset.c
--- linux-4.4.115-fbx/drivers/clk/msm./reset.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/reset.c	2019-01-22 16:16:23.023242060 +0100
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/reset-controller.h>
+
+#include "reset.h"
+
+static int msm_reset(struct reset_controller_dev *rcdev, unsigned long id)
+{
+	rcdev->ops->assert(rcdev, id);
+	udelay(1);
+	rcdev->ops->deassert(rcdev, id);
+	return 0;
+}
+
+static int
+msm_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+	struct msm_reset_controller *rst;
+	const struct msm_reset_map *map;
+	u32 regval;
+
+	rst = to_msm_reset_controller(rcdev);
+	map = &rst->reset_map[id];
+
+	regval = readl_relaxed(rst->base + map->reg);
+	regval |= BIT(map->bit);
+	writel_relaxed(regval, rst->base + map->reg);
+
+	/* Make sure the reset is asserted */
+	mb();
+
+	return 0;
+}
+
+static int
+msm_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+	struct msm_reset_controller *rst;
+	const struct msm_reset_map *map;
+	u32 regval;
+
+	rst = to_msm_reset_controller(rcdev);
+	map = &rst->reset_map[id];
+
+	regval = readl_relaxed(rst->base + map->reg);
+	regval &= ~BIT(map->bit);
+	writel_relaxed(regval, rst->base + map->reg);
+
+	/* Make sure the reset is de-asserted */
+	mb();
+
+	return 0;
+}
+
+struct reset_control_ops msm_reset_ops = {
+	.reset = msm_reset,
+	.assert = msm_reset_assert,
+	.deassert = msm_reset_deassert,
+};
+EXPORT_SYMBOL_GPL(msm_reset_ops);
+
+int msm_reset_controller_register(struct platform_device *pdev,
+	const struct msm_reset_map *map, unsigned int num_resets,
+	void __iomem *virt_base)
+{
+	struct msm_reset_controller *reset;
+	int ret = 0;
+
+	reset = devm_kzalloc(&pdev->dev, sizeof(*reset), GFP_KERNEL);
+	if (!reset)
+		return -ENOMEM;
+
+	reset->rcdev.of_node = pdev->dev.of_node;
+	reset->rcdev.ops = &msm_reset_ops;
+	reset->rcdev.owner = pdev->dev.driver->owner;
+	reset->rcdev.nr_resets = num_resets;
+	reset->reset_map = map;
+	reset->base = virt_base;
+
+	ret = reset_controller_register(&reset->rcdev);
+	if (ret)
+		dev_err(&pdev->dev, "Failed to register with reset controller\n");
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(msm_reset_controller_register);
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./reset.h linux-4.4.115-fbx/drivers/clk/msm/reset.h
--- linux-4.4.115-fbx/drivers/clk/msm./reset.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/reset.h	2019-01-22 16:16:23.023242060 +0100
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DRIVERS_CLK_RESET_H
+#define __DRIVERS_CLK_RESET_H
+
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+
+struct msm_reset_map {
+	unsigned int reg;
+	u8 bit;
+};
+
+struct msm_reset_controller {
+	const struct msm_reset_map *reset_map;
+	struct reset_controller_dev rcdev;
+	void __iomem  *base;
+};
+
+#define to_msm_reset_controller(r) \
+	container_of(r, struct msm_reset_controller, rcdev)
+
+extern struct reset_control_ops msm_reset_ops;
+
+int msm_reset_controller_register(struct platform_device *pdev,
+		const struct msm_reset_map *map, unsigned int nr_resets,
+		void __iomem *virt_base);
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/clk/msm./vdd-level-8998.h linux-4.4.115-fbx/drivers/clk/msm/vdd-level-8998.h
--- linux-4.4.115-fbx/drivers/clk/msm./vdd-level-8998.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/msm/vdd-level-8998.h	2019-01-22 16:16:23.023242060 +0100
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DRIVERS_CLK_QCOM_VDD_LEVEL_8998_H
+#define __DRIVERS_CLK_QCOM_VDD_LEVEL_8998_H
+
+#include <linux/clk/msm-clock-generic.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+#include <linux/regulator/consumer.h>
+
+#define VDD_DIG_FMAX_MAP1(l1, f1) \
+	.vdd_class = &vdd_dig,			\
+	.fmax = (unsigned long[VDD_DIG_NUM]) {	\
+		[VDD_DIG_##l1] = (f1),		\
+	},					\
+	.num_fmax = VDD_DIG_NUM
+
+#define VDD_DIG_FMAX_MAP2(l1, f1, l2, f2) \
+	.vdd_class = &vdd_dig,			\
+	.fmax = (unsigned long[VDD_DIG_NUM]) {	\
+		[VDD_DIG_##l1] = (f1),		\
+		[VDD_DIG_##l2] = (f2),		\
+	},					\
+	.num_fmax = VDD_DIG_NUM
+
+#define VDD_DIG_FMAX_MAP3(l1, f1, l2, f2, l3, f3) \
+	.vdd_class = &vdd_dig,			\
+	.fmax = (unsigned long[VDD_DIG_NUM]) {	\
+		[VDD_DIG_##l1] = (f1),		\
+		[VDD_DIG_##l2] = (f2),		\
+		[VDD_DIG_##l3] = (f3),		\
+	},					\
+	.num_fmax = VDD_DIG_NUM
+
+#define VDD_DIG_FMAX_MAP4(l1, f1, l2, f2, l3, f3, l4, f4) \
+	.vdd_class = &vdd_dig,			\
+	.fmax = (unsigned long[VDD_DIG_NUM]) {	\
+		[VDD_DIG_##l1] = (f1),		\
+		[VDD_DIG_##l2] = (f2),		\
+		[VDD_DIG_##l3] = (f3),		\
+		[VDD_DIG_##l4] = (f4),		\
+	},					\
+	.num_fmax = VDD_DIG_NUM
+
+#define VDD_DIG_FMAX_MAP1_AO(l1, f1)		 \
+	.vdd_class = &vdd_dig_ao,		\
+	.fmax = (unsigned long[VDD_DIG_NUM]) {	\
+		[VDD_DIG_##l1] = (f1),		\
+	},					\
+	.num_fmax = VDD_DIG_NUM
+
+#define VDD_DIG_FMAX_MAP3_AO(l1, f1, l2, f2, l3, f3) \
+	.vdd_class = &vdd_dig_ao,			\
+	.fmax = (unsigned long[VDD_DIG_NUM]) {	\
+		[VDD_DIG_##l1] = (f1),		\
+		[VDD_DIG_##l2] = (f2),		\
+		[VDD_DIG_##l3] = (f3),		\
+	},					\
+	.num_fmax = VDD_DIG_NUM
+
+#define VDD_MM_PLL_FMAX_MAP1(l1, f1) \
+	.vdd_class = &vdd_mmsscc_mx,		\
+	.fmax = (unsigned long[VDD_DIG_NUM]) {	\
+		[VDD_DIG_##l1] = (f1),		\
+	},					\
+	.num_fmax = VDD_DIG_NUM
+
+#define VDD_MM_PLL_FMAX_MAP2(l1, f1, l2, f2) \
+	.vdd_class = &vdd_mmsscc_mx,		\
+	.fmax = (unsigned long[VDD_DIG_NUM]) {	\
+		[VDD_DIG_##l1] = (f1),		\
+		[VDD_DIG_##l2] = (f2),		\
+	},					\
+	.num_fmax = VDD_DIG_NUM
+
+
+#define VDD_GPU_PLL_FMAX_MAP1(l1, f1)  \
+	.vdd_class = &vdd_gpucc_mx,		\
+	.fmax = (unsigned long[VDD_DIG_NUM]) {	\
+		[VDD_DIG_##l1] = (f1),		\
+	},					\
+	.num_fmax = VDD_DIG_NUM
+
+#define VDD_GPU_PLL_FMAX_MAP3(l1, f1, l2, f2, l3, f3)  \
+	.vdd_class = &vdd_gpucc_mx,			\
+	.fmax = (unsigned long[VDD_DIG_NUM]) {		\
+		[VDD_DIG_##l1] = (f1),			\
+		[VDD_DIG_##l2] = (f2),			\
+		[VDD_DIG_##l3] = (f3),			\
+	},						\
+	.num_fmax = VDD_DIG_NUM
+
+enum vdd_dig_levels {
+	VDD_DIG_NONE,
+	VDD_DIG_MIN,		/* MIN SVS */
+	VDD_DIG_LOWER,		/* SVS2 */
+	VDD_DIG_LOW,		/* SVS */
+	VDD_DIG_LOW_L1,		/* SVSL1 */
+	VDD_DIG_NOMINAL,	/* NOM */
+	VDD_DIG_HIGH,		/* TURBO */
+	VDD_DIG_NUM
+};
+
+static int vdd_corner[] = {
+	RPM_REGULATOR_LEVEL_NONE,		/* VDD_DIG_NONE */
+	RPM_REGULATOR_LEVEL_MIN_SVS,		/* VDD_DIG_MIN */
+	RPM_REGULATOR_LEVEL_LOW_SVS,		/* VDD_DIG_LOWER */
+	RPM_REGULATOR_LEVEL_SVS,		/* VDD_DIG_LOW */
+	RPM_REGULATOR_LEVEL_SVS_PLUS,		/* VDD_DIG_LOW_L1 */
+	RPM_REGULATOR_LEVEL_NOM,		/* VDD_DIG_NOMINAL */
+	RPM_REGULATOR_LEVEL_TURBO,		/* VDD_DIG_HIGH */
+};
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/clk/qcom/mdss./Kconfig linux-4.4.115-fbx/drivers/clk/qcom/mdss/Kconfig
--- linux-4.4.115-fbx/drivers/clk/qcom/mdss./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/qcom/mdss/Kconfig	2019-01-22 16:16:23.039242205 +0100
@@ -0,0 +1,7 @@
+config QCOM_MDSS_PLL
+	bool "MDSS pll programming"
+	depends on COMMON_CLK_QCOM
+	---help---
+	It provides support for DSI, eDP and HDMI interface pll programming on MDSS
+	hardware. It also handles the pll specific resources and turn them on/off when
+	mdss pll client tries to enable/disable pll clocks.
diff -Nruw linux-4.4.115-fbx/drivers/clk/qcom/mdss./Makefile linux-4.4.115-fbx/drivers/clk/qcom/mdss/Makefile
--- linux-4.4.115-fbx/drivers/clk/qcom/mdss./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/clk/qcom/mdss/Makefile	2019-01-22 16:16:23.039242205 +0100
@@ -0,0 +1,6 @@
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-pll-util.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-pll.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-14nm.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-14nm-util.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dp-pll-14nm.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dp-pll-14nm-util.o
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/cpufreq/cpufreq_governor_attr_set.c	2019-01-22 16:16:23.079242567 +0100
@@ -0,0 +1,84 @@
+/*
+ * Abstract code for CPUFreq governor tunable sysfs attributes.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "cpufreq_governor.h"
+
+static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj)
+{
+	return container_of(kobj, struct gov_attr_set, kobj);
+}
+
+static inline struct governor_attr *to_gov_attr(struct attribute *attr)
+{
+	return container_of(attr, struct governor_attr, attr);
+}
+
+static ssize_t governor_show(struct kobject *kobj, struct attribute *attr,
+			     char *buf)
+{
+	struct governor_attr *gattr = to_gov_attr(attr);
+
+	return gattr->show(to_gov_attr_set(kobj), buf);
+}
+
+static ssize_t governor_store(struct kobject *kobj, struct attribute *attr,
+			      const char *buf, size_t count)
+{
+	struct gov_attr_set *attr_set = to_gov_attr_set(kobj);
+	struct governor_attr *gattr = to_gov_attr(attr);
+	int ret;
+
+	mutex_lock(&attr_set->update_lock);
+	ret = attr_set->usage_count ? gattr->store(attr_set, buf, count) : -EBUSY;
+	mutex_unlock(&attr_set->update_lock);
+	return ret;
+}
+
+const struct sysfs_ops governor_sysfs_ops = {
+	.show	= governor_show,
+	.store	= governor_store,
+};
+EXPORT_SYMBOL_GPL(governor_sysfs_ops);
+
+void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node)
+{
+	INIT_LIST_HEAD(&attr_set->policy_list);
+	mutex_init(&attr_set->update_lock);
+	attr_set->usage_count = 1;
+	list_add(list_node, &attr_set->policy_list);
+}
+EXPORT_SYMBOL_GPL(gov_attr_set_init);
+
+void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node)
+{
+	mutex_lock(&attr_set->update_lock);
+	attr_set->usage_count++;
+	list_add(list_node, &attr_set->policy_list);
+	mutex_unlock(&attr_set->update_lock);
+}
+EXPORT_SYMBOL_GPL(gov_attr_set_get);
+
+unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node)
+{
+	unsigned int count;
+
+	mutex_lock(&attr_set->update_lock);
+	list_del(list_node);
+	count = --attr_set->usage_count;
+	mutex_unlock(&attr_set->update_lock);
+	if (count)
+		return count;
+
+	kobject_put(&attr_set->kobj);
+	mutex_destroy(&attr_set->update_lock);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(gov_attr_set_put);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/cpufreq/cpufreq_interactive.c	2019-10-29 09:26:23.493201710 +0100
@@ -0,0 +1,1876 @@
+/*
+ * drivers/cpufreq/cpufreq_interactive.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Author: Mike Chan (mike@android.com)
+ *
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/sched/rt.h>
+#include <linux/tick.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/hrtimer.h>
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/cpufreq_interactive.h>
+
+struct cpufreq_interactive_policyinfo {
+	struct timer_list policy_timer;
+	struct timer_list policy_slack_timer;
+	struct hrtimer notif_timer;
+	spinlock_t load_lock; /* protects load tracking stat */
+	u64 last_evaluated_jiffy;
+	struct cpufreq_policy *policy;
+	struct cpufreq_policy p_nolim; /* policy copy with no limits */
+	struct cpufreq_frequency_table *freq_table;
+	spinlock_t target_freq_lock; /*protects target freq */
+	unsigned int target_freq;
+	unsigned int floor_freq;
+	unsigned int min_freq;
+	u64 floor_validate_time;
+	u64 hispeed_validate_time;
+	u64 max_freq_hyst_start_time;
+	struct rw_semaphore enable_sem;
+	bool reject_notification;
+	bool notif_pending;
+	unsigned long notif_cpu;
+	int governor_enabled;
+	struct cpufreq_interactive_tunables *cached_tunables;
+	struct sched_load *sl;
+};
+
+/* Protected by per-policy load_lock */
+struct cpufreq_interactive_cpuinfo {
+	u64 time_in_idle;
+	u64 time_in_idle_timestamp;
+	u64 cputime_speedadj;
+	u64 cputime_speedadj_timestamp;
+	unsigned int loadadjfreq;
+};
+
+static DEFINE_PER_CPU(struct cpufreq_interactive_policyinfo *, polinfo);
+static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
+
+/* realtime thread handles frequency scaling */
+static struct task_struct *speedchange_task;
+static cpumask_t speedchange_cpumask;
+static spinlock_t speedchange_cpumask_lock;
+static struct mutex gov_lock;
+
+static int set_window_count;
+static int migration_register_count;
+static struct mutex sched_lock;
+static cpumask_t controlled_cpus;
+
+/* Target load.  Lower values result in higher CPU speeds. */
+#define DEFAULT_TARGET_LOAD 90
+static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
+
+#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
+#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
+static unsigned int default_above_hispeed_delay[] = {
+	DEFAULT_ABOVE_HISPEED_DELAY };
+
+struct cpufreq_interactive_tunables {
+	int usage_count;
+	/* Hi speed to bump to from lo speed when load burst (default max) */
+	unsigned int hispeed_freq;
+	/* Go to hi speed when CPU load at or above this value. */
+#define DEFAULT_GO_HISPEED_LOAD 99
+	unsigned long go_hispeed_load;
+	/* Target load. Lower values result in higher CPU speeds. */
+	spinlock_t target_loads_lock;
+	unsigned int *target_loads;
+	int ntarget_loads;
+	/*
+	 * The minimum amount of time to spend at a frequency before we can ramp
+	 * down.
+	 */
+#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
+	unsigned long min_sample_time;
+	/*
+	 * The sample rate of the timer used to increase frequency
+	 */
+	unsigned long timer_rate;
+	/*
+	 * Wait this long before raising speed above hispeed, by default a
+	 * single timer interval.
+	 */
+	spinlock_t above_hispeed_delay_lock;
+	unsigned int *above_hispeed_delay;
+	int nabove_hispeed_delay;
+	/* Non-zero means indefinite speed boost active */
+	int boost_val;
+	/* Duration of a boot pulse in usecs */
+	int boostpulse_duration_val;
+	/* End time of boost pulse in ktime converted to usecs */
+	u64 boostpulse_endtime;
+	bool boosted;
+	/*
+	 * Max additional time to wait in idle, beyond timer_rate, at speeds
+	 * above minimum before wakeup to reduce speed, or -1 if unnecessary.
+	 */
+#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
+	int timer_slack_val;
+	bool io_is_busy;
+
+	/* scheduler input related flags */
+	bool use_sched_load;
+	bool use_migration_notif;
+
+	/*
+	 * Whether to align timer windows across all CPUs. When
+	 * use_sched_load is true, this flag is ignored and windows
+	 * will always be aligned.
+	 */
+	bool align_windows;
+
+	/*
+	 * Stay at max freq for at least max_freq_hysteresis before dropping
+	 * frequency.
+	 */
+	unsigned int max_freq_hysteresis;
+
+	/* Ignore hispeed_freq and above_hispeed_delay for notification */
+	bool ignore_hispeed_on_notif;
+
+	/* Ignore min_sample_time for notification */
+	bool fast_ramp_down;
+
+	/* Whether to enable prediction or not */
+	bool enable_prediction;
+};
+
+/* For cases where we have single governor instance for system */
+static struct cpufreq_interactive_tunables *common_tunables;
+static struct cpufreq_interactive_tunables *cached_common_tunables;
+
+static struct attribute_group *get_sysfs_attr(void);
+
+/* Round to starting jiffy of next evaluation window */
+static u64 round_to_nw_start(u64 jif,
+			     struct cpufreq_interactive_tunables *tunables)
+{
+	unsigned long step = usecs_to_jiffies(tunables->timer_rate);
+	u64 ret;
+
+	if (tunables->use_sched_load || tunables->align_windows) {
+		do_div(jif, step);
+		ret = (jif + 1) * step;
+	} else {
+		ret = jiffies + usecs_to_jiffies(tunables->timer_rate);
+	}
+
+	return ret;
+}
+
+static inline int set_window_helper(
+			struct cpufreq_interactive_tunables *tunables)
+{
+	return sched_set_window(round_to_nw_start(get_jiffies_64(), tunables),
+			 usecs_to_jiffies(tunables->timer_rate));
+}
+
+static void cpufreq_interactive_timer_resched(unsigned long cpu,
+					      bool slack_only)
+{
+	struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
+	struct cpufreq_interactive_cpuinfo *pcpu;
+	struct cpufreq_interactive_tunables *tunables =
+		ppol->policy->governor_data;
+	u64 expires;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&ppol->load_lock, flags);
+	expires = round_to_nw_start(ppol->last_evaluated_jiffy, tunables);
+	if (!slack_only) {
+		for_each_cpu(i, ppol->policy->cpus) {
+			pcpu = &per_cpu(cpuinfo, i);
+			pcpu->time_in_idle = get_cpu_idle_time(i,
+						&pcpu->time_in_idle_timestamp,
+						tunables->io_is_busy);
+			pcpu->cputime_speedadj = 0;
+			pcpu->cputime_speedadj_timestamp =
+						pcpu->time_in_idle_timestamp;
+		}
+		del_timer(&ppol->policy_timer);
+		ppol->policy_timer.expires = expires;
+		add_timer(&ppol->policy_timer);
+	}
+
+	if (tunables->timer_slack_val >= 0 &&
+	    ppol->target_freq > ppol->policy->min) {
+		expires += usecs_to_jiffies(tunables->timer_slack_val);
+		del_timer(&ppol->policy_slack_timer);
+		ppol->policy_slack_timer.expires = expires;
+		add_timer(&ppol->policy_slack_timer);
+	}
+
+	spin_unlock_irqrestore(&ppol->load_lock, flags);
+}
+
+/* The caller shall take enable_sem write semaphore to avoid any timer race.
+ * The policy_timer and policy_slack_timer must be deactivated when calling
+ * this function.
+ */
+static void cpufreq_interactive_timer_start(
+	struct cpufreq_interactive_tunables *tunables, int cpu)
+{
+	struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
+	struct cpufreq_interactive_cpuinfo *pcpu;
+	u64 expires = round_to_nw_start(ppol->last_evaluated_jiffy, tunables);
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&ppol->load_lock, flags);
+	ppol->policy_timer.expires = expires;
+	add_timer(&ppol->policy_timer);
+	if (tunables->timer_slack_val >= 0 &&
+	    ppol->target_freq > ppol->policy->min) {
+		expires += usecs_to_jiffies(tunables->timer_slack_val);
+		ppol->policy_slack_timer.expires = expires;
+		add_timer(&ppol->policy_slack_timer);
+	}
+
+	for_each_cpu(i, ppol->policy->cpus) {
+		pcpu = &per_cpu(cpuinfo, i);
+		pcpu->time_in_idle =
+			get_cpu_idle_time(i, &pcpu->time_in_idle_timestamp,
+					  tunables->io_is_busy);
+		pcpu->cputime_speedadj = 0;
+		pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
+	}
+	spin_unlock_irqrestore(&ppol->load_lock, flags);
+}
+
+static unsigned int freq_to_above_hispeed_delay(
+	struct cpufreq_interactive_tunables *tunables,
+	unsigned int freq)
+{
+	int i;
+	unsigned int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
+
+	for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
+			freq >= tunables->above_hispeed_delay[i+1]; i += 2)
+		;
+
+	ret = tunables->above_hispeed_delay[i];
+	spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
+	return ret;
+}
+
+static unsigned int freq_to_targetload(
+	struct cpufreq_interactive_tunables *tunables, unsigned int freq)
+{
+	int i;
+	unsigned int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tunables->target_loads_lock, flags);
+
+	for (i = 0; i < tunables->ntarget_loads - 1 &&
+		    freq >= tunables->target_loads[i+1]; i += 2)
+		;
+
+	ret = tunables->target_loads[i];
+	spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
+	return ret;
+}
+
+#define DEFAULT_MAX_LOAD 100
+u32 get_freq_max_load(int cpu, unsigned int freq)
+{
+	struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
+
+	if (!cpumask_test_cpu(cpu, &controlled_cpus))
+		return DEFAULT_MAX_LOAD;
+
+	if (have_governor_per_policy()) {
+		if (!ppol || !ppol->cached_tunables)
+			return DEFAULT_MAX_LOAD;
+		return freq_to_targetload(ppol->cached_tunables, freq);
+	}
+
+	if (!cached_common_tunables)
+		return DEFAULT_MAX_LOAD;
+	return freq_to_targetload(cached_common_tunables, freq);
+}
+
+/*
+ * If increasing frequencies never map to a lower target load then
+ * choose_freq() will find the minimum frequency that does not exceed its
+ * target load given the current load.
+ */
+static unsigned int choose_freq(struct cpufreq_interactive_policyinfo *pcpu,
+		unsigned int loadadjfreq)
+{
+	unsigned int freq = pcpu->policy->cur;
+	unsigned int prevfreq, freqmin, freqmax;
+	unsigned int tl;
+	int index;
+
+	freqmin = 0;
+	freqmax = UINT_MAX;
+
+	do {
+		prevfreq = freq;
+		tl = freq_to_targetload(pcpu->policy->governor_data, freq);
+
+		/*
+		 * Find the lowest frequency where the computed load is less
+		 * than or equal to the target load.
+		 */
+
+		if (cpufreq_frequency_table_target(
+			    &pcpu->p_nolim, pcpu->freq_table, loadadjfreq / tl,
+			    CPUFREQ_RELATION_L, &index))
+			break;
+		freq = pcpu->freq_table[index].frequency;
+
+		if (freq > prevfreq) {
+			/* The previous frequency is too low. */
+			freqmin = prevfreq;
+
+			if (freq >= freqmax) {
+				/*
+				 * Find the highest frequency that is less
+				 * than freqmax.
+				 */
+				if (cpufreq_frequency_table_target(
+					    &pcpu->p_nolim, pcpu->freq_table,
+					    freqmax - 1, CPUFREQ_RELATION_H,
+					    &index))
+					break;
+				freq = pcpu->freq_table[index].frequency;
+
+				if (freq == freqmin) {
+					/*
+					 * The first frequency below freqmax
+					 * has already been found to be too
+					 * low.  freqmax is the lowest speed
+					 * we found that is fast enough.
+					 */
+					freq = freqmax;
+					break;
+				}
+			}
+		} else if (freq < prevfreq) {
+			/* The previous frequency is high enough. */
+			freqmax = prevfreq;
+
+			if (freq <= freqmin) {
+				/*
+				 * Find the lowest frequency that is higher
+				 * than freqmin.
+				 */
+				if (cpufreq_frequency_table_target(
+					    &pcpu->p_nolim, pcpu->freq_table,
+					    freqmin + 1, CPUFREQ_RELATION_L,
+					    &index))
+					break;
+				freq = pcpu->freq_table[index].frequency;
+
+				/*
+				 * If freqmax is the first frequency above
+				 * freqmin then we have already found that
+				 * this speed is fast enough.
+				 */
+				if (freq == freqmax)
+					break;
+			}
+		}
+
+		/* If same frequency chosen as previous then done. */
+	} while (freq != prevfreq);
+
+	return freq;
+}
+
+static u64 update_load(int cpu)
+{
+	struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
+	struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
+	struct cpufreq_interactive_tunables *tunables =
+		ppol->policy->governor_data;
+	u64 now;
+	u64 now_idle;
+	u64 delta_idle;
+	u64 delta_time;
+	u64 active_time;
+
+	now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
+	delta_idle = (now_idle - pcpu->time_in_idle);
+	delta_time = (now - pcpu->time_in_idle_timestamp);
+
+	if (delta_time <= delta_idle)
+		active_time = 0;
+	else
+		active_time = delta_time - delta_idle;
+
+	pcpu->cputime_speedadj += active_time * ppol->policy->cur;
+
+	pcpu->time_in_idle = now_idle;
+	pcpu->time_in_idle_timestamp = now;
+	return now;
+}
+
+static unsigned int sl_busy_to_laf(struct cpufreq_interactive_policyinfo *ppol,
+				   unsigned long busy)
+{
+	int prev_load;
+	struct cpufreq_interactive_tunables *tunables =
+		ppol->policy->governor_data;
+
+	prev_load = mult_frac(ppol->policy->cpuinfo.max_freq * 100,
+				busy, tunables->timer_rate);
+	return prev_load;
+}
+
+#define NEW_TASK_RATIO 75
+#define PRED_TOLERANCE_PCT 10
+static void cpufreq_interactive_timer(unsigned long data)
+{
+	s64 now;
+	unsigned int delta_time;
+	u64 cputime_speedadj;
+	int cpu_load;
+	int pol_load = 0;
+	struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, data);
+	struct cpufreq_interactive_tunables *tunables =
+		ppol->policy->governor_data;
+	struct sched_load *sl = ppol->sl;
+	struct cpufreq_interactive_cpuinfo *pcpu;
+	unsigned int new_freq;
+	unsigned int prev_laf = 0, t_prevlaf;
+	unsigned int pred_laf = 0, t_predlaf = 0;
+	unsigned int prev_chfreq, pred_chfreq, chosen_freq;
+	unsigned int index;
+	unsigned long flags;
+	unsigned long max_cpu;
+	int cpu, i;
+	int new_load_pct = 0;
+	int prev_l, pred_l = 0;
+	struct cpufreq_govinfo govinfo;
+	bool skip_hispeed_logic, skip_min_sample_time;
+	bool jump_to_max_no_ts = false;
+	bool jump_to_max = false;
+	bool start_hyst = true;
+
+	if (!down_read_trylock(&ppol->enable_sem))
+		return;
+	if (!ppol->governor_enabled)
+		goto exit;
+
+	now = ktime_to_us(ktime_get());
+
+	spin_lock_irqsave(&ppol->target_freq_lock, flags);
+	spin_lock(&ppol->load_lock);
+
+	skip_hispeed_logic =
+		tunables->ignore_hispeed_on_notif && ppol->notif_pending;
+	skip_min_sample_time = tunables->fast_ramp_down && ppol->notif_pending;
+	ppol->notif_pending = false;
+	now = ktime_to_us(ktime_get());
+	ppol->last_evaluated_jiffy = get_jiffies_64();
+
+	if (tunables->use_sched_load)
+		sched_get_cpus_busy(sl, ppol->policy->cpus);
+	max_cpu = cpumask_first(ppol->policy->cpus);
+	i = 0;
+	for_each_cpu(cpu, ppol->policy->cpus) {
+		pcpu = &per_cpu(cpuinfo, cpu);
+		if (tunables->use_sched_load) {
+			t_prevlaf = sl_busy_to_laf(ppol, sl[i].prev_load);
+			prev_l = t_prevlaf / ppol->target_freq;
+			if (tunables->enable_prediction) {
+				t_predlaf = sl_busy_to_laf(ppol,
+						sl[i].predicted_load);
+				pred_l = t_predlaf / ppol->target_freq;
+			}
+			if (sl[i].prev_load)
+				new_load_pct = sl[i].new_task_load * 100 /
+							sl[i].prev_load;
+			else
+				new_load_pct = 0;
+		} else {
+			now = update_load(cpu);
+			delta_time = (unsigned int)
+				(now - pcpu->cputime_speedadj_timestamp);
+			if (WARN_ON_ONCE(!delta_time))
+				continue;
+			cputime_speedadj = pcpu->cputime_speedadj;
+			do_div(cputime_speedadj, delta_time);
+			t_prevlaf = (unsigned int)cputime_speedadj * 100;
+			prev_l = t_prevlaf / ppol->target_freq;
+		}
+
+		/* find max of loadadjfreq inside policy */
+		if (t_prevlaf > prev_laf) {
+			prev_laf = t_prevlaf;
+			max_cpu = cpu;
+		}
+		pred_laf = max(t_predlaf, pred_laf);
+
+		cpu_load = max(prev_l, pred_l);
+		pol_load = max(pol_load, cpu_load);
+		trace_cpufreq_interactive_cpuload(cpu, cpu_load, new_load_pct,
+						  prev_l, pred_l);
+
+		/* save loadadjfreq for notification */
+		pcpu->loadadjfreq = max(t_prevlaf, t_predlaf);
+
+		/* detect heavy new task and jump to policy->max */
+		if (prev_l >= tunables->go_hispeed_load &&
+		    new_load_pct >= NEW_TASK_RATIO) {
+			skip_hispeed_logic = true;
+			jump_to_max = true;
+		}
+		i++;
+	}
+	spin_unlock(&ppol->load_lock);
+
+	tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
+
+	prev_chfreq = choose_freq(ppol, prev_laf);
+	pred_chfreq = choose_freq(ppol, pred_laf);
+	chosen_freq = max(prev_chfreq, pred_chfreq);
+
+	if (prev_chfreq < ppol->policy->max && pred_chfreq >= ppol->policy->max)
+		if (!jump_to_max)
+			jump_to_max_no_ts = true;
+
+	if (now - ppol->max_freq_hyst_start_time <
+	    tunables->max_freq_hysteresis &&
+	    pol_load >= tunables->go_hispeed_load &&
+	    ppol->target_freq < ppol->policy->max) {
+		skip_hispeed_logic = true;
+		skip_min_sample_time = true;
+		if (!jump_to_max)
+			jump_to_max_no_ts = true;
+	}
+
+	new_freq = chosen_freq;
+	if (jump_to_max_no_ts || jump_to_max) {
+		new_freq = ppol->policy->cpuinfo.max_freq;
+	} else if (!skip_hispeed_logic) {
+		if (pol_load >= tunables->go_hispeed_load ||
+		    tunables->boosted) {
+			if (ppol->target_freq < tunables->hispeed_freq)
+				new_freq = tunables->hispeed_freq;
+			else
+				new_freq = max(new_freq,
+					       tunables->hispeed_freq);
+		}
+	}
+
+	if (now - ppol->max_freq_hyst_start_time <
+	    tunables->max_freq_hysteresis) {
+		if (new_freq < ppol->policy->max &&
+				ppol->policy->max <= tunables->hispeed_freq)
+			start_hyst = false;
+		new_freq = max(tunables->hispeed_freq, new_freq);
+	}
+
+	if (!skip_hispeed_logic &&
+	    ppol->target_freq >= tunables->hispeed_freq &&
+	    new_freq > ppol->target_freq &&
+	    now - ppol->hispeed_validate_time <
+	    freq_to_above_hispeed_delay(tunables, ppol->target_freq)) {
+		trace_cpufreq_interactive_notyet(
+			max_cpu, pol_load, ppol->target_freq,
+			ppol->policy->cur, new_freq);
+		spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
+		goto rearm;
+	}
+
+	ppol->hispeed_validate_time = now;
+
+	if (cpufreq_frequency_table_target(&ppol->p_nolim, ppol->freq_table,
+					   new_freq, CPUFREQ_RELATION_L,
+					   &index)) {
+		spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
+		goto rearm;
+	}
+
+	new_freq = ppol->freq_table[index].frequency;
+
+	/*
+	 * Do not scale below floor_freq unless we have been at or above the
+	 * floor frequency for the minimum sample time since last validated.
+	 */
+	if (!skip_min_sample_time && new_freq < ppol->floor_freq) {
+		if (now - ppol->floor_validate_time <
+				tunables->min_sample_time) {
+			trace_cpufreq_interactive_notyet(
+				max_cpu, pol_load, ppol->target_freq,
+				ppol->policy->cur, new_freq);
+			spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
+			goto rearm;
+		}
+	}
+
+	/*
+	 * Update the timestamp for checking whether speed has been held at
+	 * or above the selected frequency for a minimum of min_sample_time,
+	 * if not boosted to hispeed_freq.  If boosted to hispeed_freq then we
+	 * allow the speed to drop as soon as the boostpulse duration expires
+	 * (or the indefinite boost is turned off). If policy->max is restored
+	 * for max_freq_hysteresis, don't extend the timestamp. Otherwise, it
+	 * could incorrectly extended the duration of max_freq_hysteresis by
+	 * min_sample_time.
+	 */
+
+	if ((!tunables->boosted || new_freq > tunables->hispeed_freq)
+	    && !jump_to_max_no_ts) {
+		ppol->floor_freq = new_freq;
+		ppol->floor_validate_time = now;
+	}
+
+	if (start_hyst && new_freq >= ppol->policy->max && !jump_to_max_no_ts)
+		ppol->max_freq_hyst_start_time = now;
+
+	if (ppol->target_freq == new_freq &&
+			ppol->target_freq <= ppol->policy->cur) {
+		trace_cpufreq_interactive_already(
+			max_cpu, pol_load, ppol->target_freq,
+			ppol->policy->cur, new_freq);
+		spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
+		goto rearm;
+	}
+
+	trace_cpufreq_interactive_target(max_cpu, pol_load, ppol->target_freq,
+					 ppol->policy->cur, new_freq);
+
+	ppol->target_freq = new_freq;
+	spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
+	spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+	cpumask_set_cpu(max_cpu, &speedchange_cpumask);
+	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+	wake_up_process_no_notif(speedchange_task);
+
+rearm:
+	if (!timer_pending(&ppol->policy_timer))
+		cpufreq_interactive_timer_resched(data, false);
+
+	/*
+	 * Send govinfo notification.
+	 * Govinfo notification could potentially wake up another thread
+	 * managed by its clients. Thread wakeups might trigger a load
+	 * change callback that executes this function again. Therefore
+	 * no spinlock could be held when sending the notification.
+	 */
+	for_each_cpu(i, ppol->policy->cpus) {
+		pcpu = &per_cpu(cpuinfo, i);
+		govinfo.cpu = i;
+		govinfo.load = pcpu->loadadjfreq / ppol->policy->max;
+		govinfo.sampling_rate_us = tunables->timer_rate;
+		atomic_notifier_call_chain(&cpufreq_govinfo_notifier_list,
+					   CPUFREQ_LOAD_CHANGE, &govinfo);
+	}
+
+exit:
+	up_read(&ppol->enable_sem);
+	return;
+}
+
+static int cpufreq_interactive_speedchange_task(void *data)
+{
+	unsigned int cpu;
+	cpumask_t tmp_mask;
+	unsigned long flags;
+	struct cpufreq_interactive_policyinfo *ppol;
+
+	while (1) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+
+		if (cpumask_empty(&speedchange_cpumask)) {
+			spin_unlock_irqrestore(&speedchange_cpumask_lock,
+					       flags);
+			schedule();
+
+			if (kthread_should_stop())
+				break;
+
+			spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+		}
+
+		set_current_state(TASK_RUNNING);
+		tmp_mask = speedchange_cpumask;
+		cpumask_clear(&speedchange_cpumask);
+		spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+
+		for_each_cpu(cpu, &tmp_mask) {
+			ppol = per_cpu(polinfo, cpu);
+			if (!down_read_trylock(&ppol->enable_sem))
+				continue;
+			if (!ppol->governor_enabled) {
+				up_read(&ppol->enable_sem);
+				continue;
+			}
+
+			if (ppol->target_freq != ppol->policy->cur)
+				__cpufreq_driver_target(ppol->policy,
+							ppol->target_freq,
+							CPUFREQ_RELATION_H);
+			trace_cpufreq_interactive_setspeed(cpu,
+						     ppol->target_freq,
+						     ppol->policy->cur);
+			up_read(&ppol->enable_sem);
+		}
+	}
+
+	return 0;
+}
+
+static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunables)
+{
+	int i;
+	int anyboost = 0;
+	unsigned long flags[2];
+	struct cpufreq_interactive_policyinfo *ppol;
+
+	tunables->boosted = true;
+
+	spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
+
+	for_each_online_cpu(i) {
+		ppol = per_cpu(polinfo, i);
+		if (!ppol || tunables != ppol->policy->governor_data)
+			continue;
+
+		spin_lock_irqsave(&ppol->target_freq_lock, flags[1]);
+		if (ppol->target_freq < tunables->hispeed_freq) {
+			ppol->target_freq = tunables->hispeed_freq;
+			cpumask_set_cpu(i, &speedchange_cpumask);
+			ppol->hispeed_validate_time =
+				ktime_to_us(ktime_get());
+			anyboost = 1;
+		}
+
+		/*
+		 * Set floor freq and (re)start timer for when last
+		 * validated.
+		 */
+
+		ppol->floor_freq = tunables->hispeed_freq;
+		ppol->floor_validate_time = ktime_to_us(ktime_get());
+		spin_unlock_irqrestore(&ppol->target_freq_lock, flags[1]);
+		break;
+	}
+
+	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
+
+	if (anyboost)
+		wake_up_process_no_notif(speedchange_task);
+}
+
+static int load_change_callback(struct notifier_block *nb, unsigned long val,
+				void *data)
+{
+	unsigned long cpu = (unsigned long) data;
+	struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
+	struct cpufreq_interactive_tunables *tunables;
+	unsigned long flags;
+
+	if (!ppol || ppol->reject_notification)
+		return 0;
+
+	if (!down_read_trylock(&ppol->enable_sem))
+		return 0;
+	if (!ppol->governor_enabled)
+		goto exit;
+
+	tunables = ppol->policy->governor_data;
+	if (!tunables->use_sched_load || !tunables->use_migration_notif)
+		goto exit;
+
+	spin_lock_irqsave(&ppol->target_freq_lock, flags);
+	ppol->notif_pending = true;
+	ppol->notif_cpu = cpu;
+	spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
+
+	if (!hrtimer_is_queued(&ppol->notif_timer))
+		hrtimer_start(&ppol->notif_timer, ms_to_ktime(1),
+			      HRTIMER_MODE_REL);
+exit:
+	up_read(&ppol->enable_sem);
+	return 0;
+}
+
+static enum hrtimer_restart cpufreq_interactive_hrtimer(struct hrtimer *timer)
+{
+	struct cpufreq_interactive_policyinfo *ppol = container_of(timer,
+			struct cpufreq_interactive_policyinfo, notif_timer);
+	int cpu;
+
+	if (!down_read_trylock(&ppol->enable_sem))
+		return 0;
+	if (!ppol->governor_enabled) {
+		up_read(&ppol->enable_sem);
+		return 0;
+	}
+	cpu = ppol->notif_cpu;
+	trace_cpufreq_interactive_load_change(cpu);
+	del_timer(&ppol->policy_timer);
+	del_timer(&ppol->policy_slack_timer);
+	cpufreq_interactive_timer(cpu);
+
+	up_read(&ppol->enable_sem);
+	return HRTIMER_NORESTART;
+}
+
+static struct notifier_block load_notifier_block = {
+	.notifier_call = load_change_callback,
+};
+
+static int cpufreq_interactive_notifier(
+	struct notifier_block *nb, unsigned long val, void *data)
+{
+	struct cpufreq_freqs *freq = data;
+	struct cpufreq_interactive_policyinfo *ppol;
+	int cpu;
+	unsigned long flags;
+
+	if (val == CPUFREQ_POSTCHANGE) {
+		ppol = per_cpu(polinfo, freq->cpu);
+		if (!ppol)
+			return 0;
+		if (!down_read_trylock(&ppol->enable_sem))
+			return 0;
+		if (!ppol->governor_enabled) {
+			up_read(&ppol->enable_sem);
+			return 0;
+		}
+
+		if (cpumask_first(ppol->policy->cpus) != freq->cpu) {
+			up_read(&ppol->enable_sem);
+			return 0;
+		}
+		spin_lock_irqsave(&ppol->load_lock, flags);
+		for_each_cpu(cpu, ppol->policy->cpus)
+			update_load(cpu);
+		spin_unlock_irqrestore(&ppol->load_lock, flags);
+
+		up_read(&ppol->enable_sem);
+	}
+	return 0;
+}
+
+static struct notifier_block cpufreq_notifier_block = {
+	.notifier_call = cpufreq_interactive_notifier,
+};
+
+static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
+{
+	const char *cp;
+	int i;
+	int ntokens = 1;
+	unsigned int *tokenized_data;
+	int err = -EINVAL;
+
+	cp = buf;
+	while ((cp = strpbrk(cp + 1, " :")))
+		ntokens++;
+
+	if (!(ntokens & 0x1))
+		goto err;
+
+	tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
+	if (!tokenized_data) {
+		err = -ENOMEM;
+		goto err;
+	}
+
+	cp = buf;
+	i = 0;
+	while (i < ntokens) {
+		if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
+			goto err_kfree;
+
+		cp = strpbrk(cp, " :");
+		if (!cp)
+			break;
+		cp++;
+	}
+
+	if (i != ntokens)
+		goto err_kfree;
+
+	*num_tokens = ntokens;
+	return tokenized_data;
+
+err_kfree:
+	kfree(tokenized_data);
+err:
+	return ERR_PTR(err);
+}
+
+static ssize_t show_target_loads(
+	struct cpufreq_interactive_tunables *tunables,
+	char *buf)
+{
+	int i;
+	ssize_t ret = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tunables->target_loads_lock, flags);
+
+	for (i = 0; i < tunables->ntarget_loads; i++)
+		ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
+			       i & 0x1 ? ":" : " ");
+
+	sprintf(buf + ret - 1, "\n");
+	spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
+	return ret;
+}
+
+static ssize_t store_target_loads(
+	struct cpufreq_interactive_tunables *tunables,
+	const char *buf, size_t count)
+{
+	int ntokens;
+	unsigned int *new_target_loads = NULL;
+	unsigned long flags;
+
+	new_target_loads = get_tokenized_data(buf, &ntokens);
+	if (IS_ERR(new_target_loads))
+		return PTR_RET(new_target_loads);
+
+	spin_lock_irqsave(&tunables->target_loads_lock, flags);
+	if (tunables->target_loads != default_target_loads)
+		kfree(tunables->target_loads);
+	tunables->target_loads = new_target_loads;
+	tunables->ntarget_loads = ntokens;
+	spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
+
+	sched_update_freq_max_load(&controlled_cpus);
+
+	return count;
+}
+
+static ssize_t show_above_hispeed_delay(
+	struct cpufreq_interactive_tunables *tunables, char *buf)
+{
+	int i;
+	ssize_t ret = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
+
+	for (i = 0; i < tunables->nabove_hispeed_delay; i++)
+		ret += sprintf(buf + ret, "%u%s",
+			       tunables->above_hispeed_delay[i],
+			       i & 0x1 ? ":" : " ");
+
+	sprintf(buf + ret - 1, "\n");
+	spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
+	return ret;
+}
+
+static ssize_t store_above_hispeed_delay(
+	struct cpufreq_interactive_tunables *tunables,
+	const char *buf, size_t count)
+{
+	int ntokens;
+	unsigned int *new_above_hispeed_delay = NULL;
+	unsigned long flags;
+
+	new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
+	if (IS_ERR(new_above_hispeed_delay))
+		return PTR_RET(new_above_hispeed_delay);
+
+	spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
+	if (tunables->above_hispeed_delay != default_above_hispeed_delay)
+		kfree(tunables->above_hispeed_delay);
+	tunables->above_hispeed_delay = new_above_hispeed_delay;
+	tunables->nabove_hispeed_delay = ntokens;
+	spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
+	return count;
+
+}
+
+static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
+		char *buf)
+{
+	return sprintf(buf, "%u\n", tunables->hispeed_freq);
+}
+
+static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
+		const char *buf, size_t count)
+{
+	int ret;
+	long unsigned int val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+	tunables->hispeed_freq = val;
+	return count;
+}
+
+#define show_store_one(file_name)					\
+static ssize_t show_##file_name(					\
+	struct cpufreq_interactive_tunables *tunables, char *buf)	\
+{									\
+	return snprintf(buf, PAGE_SIZE, "%u\n", tunables->file_name);	\
+}									\
+static ssize_t store_##file_name(					\
+		struct cpufreq_interactive_tunables *tunables,		\
+		const char *buf, size_t count)				\
+{									\
+	int ret;							\
+	unsigned long int val;						\
+									\
+	ret = kstrtoul(buf, 0, &val);				\
+	if (ret < 0)							\
+		return ret;						\
+	tunables->file_name = val;					\
+	return count;							\
+}
+show_store_one(max_freq_hysteresis);
+show_store_one(align_windows);
+show_store_one(ignore_hispeed_on_notif);
+show_store_one(fast_ramp_down);
+show_store_one(enable_prediction);
+
+static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
+		*tunables, char *buf)
+{
+	return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
+}
+
+static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
+		*tunables, const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+	tunables->go_hispeed_load = val;
+	return count;
+}
+
+static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
+		*tunables, char *buf)
+{
+	return sprintf(buf, "%lu\n", tunables->min_sample_time);
+}
+
+static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
+		*tunables, const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+	tunables->min_sample_time = val;
+	return count;
+}
+
+static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
+		char *buf)
+{
+	return sprintf(buf, "%lu\n", tunables->timer_rate);
+}
+
+static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
+		const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val, val_round;
+	struct cpufreq_interactive_tunables *t;
+	int cpu;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	val_round = jiffies_to_usecs(usecs_to_jiffies(val));
+	if (val != val_round)
+		pr_warn("timer_rate not aligned to jiffy. Rounded up to %lu\n",
+			val_round);
+	tunables->timer_rate = val_round;
+
+	if (!tunables->use_sched_load)
+		return count;
+
+	for_each_possible_cpu(cpu) {
+		if (!per_cpu(polinfo, cpu))
+			continue;
+		t = per_cpu(polinfo, cpu)->cached_tunables;
+		if (t && t->use_sched_load)
+			t->timer_rate = val_round;
+	}
+	set_window_helper(tunables);
+
+	return count;
+}
+
+static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
+		char *buf)
+{
+	return sprintf(buf, "%d\n", tunables->timer_slack_val);
+}
+
+static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
+		const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = kstrtol(buf, 10, &val);
+	if (ret < 0)
+		return ret;
+
+	tunables->timer_slack_val = val;
+	return count;
+}
+
+static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
+			  char *buf)
+{
+	return sprintf(buf, "%d\n", tunables->boost_val);
+}
+
+static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
+			   const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	tunables->boost_val = val;
+
+	if (tunables->boost_val) {
+		trace_cpufreq_interactive_boost("on");
+		if (!tunables->boosted)
+			cpufreq_interactive_boost(tunables);
+	} else {
+		tunables->boostpulse_endtime = ktime_to_us(ktime_get());
+		trace_cpufreq_interactive_unboost("off");
+	}
+
+	return count;
+}
+
+static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
+				const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
+		tunables->boostpulse_duration_val;
+	trace_cpufreq_interactive_boost("pulse");
+	if (!tunables->boosted)
+		cpufreq_interactive_boost(tunables);
+	return count;
+}
+
+static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
+		*tunables, char *buf)
+{
+	return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
+}
+
+static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
+		*tunables, const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	tunables->boostpulse_duration_val = val;
+	return count;
+}
+
+static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
+		char *buf)
+{
+	return sprintf(buf, "%u\n", tunables->io_is_busy);
+}
+
+static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
+		const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+	struct cpufreq_interactive_tunables *t;
+	int cpu;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+	tunables->io_is_busy = val;
+
+	if (!tunables->use_sched_load)
+		return count;
+
+	for_each_possible_cpu(cpu) {
+		if (!per_cpu(polinfo, cpu))
+			continue;
+		t = per_cpu(polinfo, cpu)->cached_tunables;
+		if (t && t->use_sched_load)
+			t->io_is_busy = val;
+	}
+	sched_set_io_is_busy(val);
+
+	return count;
+}
+
+static int cpufreq_interactive_enable_sched_input(
+			struct cpufreq_interactive_tunables *tunables)
+{
+	int rc = 0, j;
+	struct cpufreq_interactive_tunables *t;
+
+	mutex_lock(&sched_lock);
+
+	set_window_count++;
+	if (set_window_count > 1) {
+		for_each_possible_cpu(j) {
+			if (!per_cpu(polinfo, j))
+				continue;
+			t = per_cpu(polinfo, j)->cached_tunables;
+			if (t && t->use_sched_load) {
+				tunables->timer_rate = t->timer_rate;
+				tunables->io_is_busy = t->io_is_busy;
+				break;
+			}
+		}
+	} else {
+		rc = set_window_helper(tunables);
+		if (rc) {
+			pr_err("%s: Failed to set sched window\n", __func__);
+			set_window_count--;
+			goto out;
+		}
+		sched_set_io_is_busy(tunables->io_is_busy);
+	}
+
+	if (!tunables->use_migration_notif)
+		goto out;
+
+	migration_register_count++;
+	if (migration_register_count > 1)
+		goto out;
+	else
+		atomic_notifier_chain_register(&load_alert_notifier_head,
+						&load_notifier_block);
+out:
+	mutex_unlock(&sched_lock);
+	return rc;
+}
+
+static int cpufreq_interactive_disable_sched_input(
+			struct cpufreq_interactive_tunables *tunables)
+{
+	mutex_lock(&sched_lock);
+
+	if (tunables->use_migration_notif) {
+		migration_register_count--;
+		if (migration_register_count < 1)
+			atomic_notifier_chain_unregister(
+					&load_alert_notifier_head,
+					&load_notifier_block);
+	}
+	set_window_count--;
+
+	mutex_unlock(&sched_lock);
+	return 0;
+}
+
+static ssize_t show_use_sched_load(
+		struct cpufreq_interactive_tunables *tunables, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", tunables->use_sched_load);
+}
+
+static ssize_t store_use_sched_load(
+			struct cpufreq_interactive_tunables *tunables,
+			const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	if (tunables->use_sched_load == (bool) val)
+		return count;
+
+	tunables->use_sched_load = val;
+
+	if (val)
+		ret = cpufreq_interactive_enable_sched_input(tunables);
+	else
+		ret = cpufreq_interactive_disable_sched_input(tunables);
+
+	if (ret) {
+		tunables->use_sched_load = !val;
+		return ret;
+	}
+
+	return count;
+}
+
+static ssize_t show_use_migration_notif(
+		struct cpufreq_interactive_tunables *tunables, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			tunables->use_migration_notif);
+}
+
+static ssize_t store_use_migration_notif(
+			struct cpufreq_interactive_tunables *tunables,
+			const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	if (tunables->use_migration_notif == (bool) val)
+		return count;
+	tunables->use_migration_notif = val;
+
+	if (!tunables->use_sched_load)
+		return count;
+
+	mutex_lock(&sched_lock);
+	if (val) {
+		migration_register_count++;
+		if (migration_register_count == 1)
+			atomic_notifier_chain_register(
+					&load_alert_notifier_head,
+					&load_notifier_block);
+	} else {
+		migration_register_count--;
+		if (!migration_register_count)
+			atomic_notifier_chain_unregister(
+					&load_alert_notifier_head,
+					&load_notifier_block);
+	}
+	mutex_unlock(&sched_lock);
+
+	return count;
+}
+
+/*
+ * Create show/store routines
+ * - sys: One governor instance for complete SYSTEM
+ * - pol: One governor instance per struct cpufreq_policy
+ */
+#define show_gov_pol_sys(file_name)					\
+static ssize_t show_##file_name##_gov_sys				\
+(struct kobject *kobj, struct attribute *attr, char *buf)		\
+{									\
+	return show_##file_name(common_tunables, buf);			\
+}									\
+									\
+static ssize_t show_##file_name##_gov_pol				\
+(struct cpufreq_policy *policy, char *buf)				\
+{									\
+	return show_##file_name(policy->governor_data, buf);		\
+}
+
+#define store_gov_pol_sys(file_name)					\
+static ssize_t store_##file_name##_gov_sys				\
+(struct kobject *kobj, struct attribute *attr, const char *buf,		\
+	size_t count)							\
+{									\
+	return store_##file_name(common_tunables, buf, count);		\
+}									\
+									\
+static ssize_t store_##file_name##_gov_pol				\
+(struct cpufreq_policy *policy, const char *buf, size_t count)		\
+{									\
+	return store_##file_name(policy->governor_data, buf, count);	\
+}
+
+#define show_store_gov_pol_sys(file_name)				\
+show_gov_pol_sys(file_name);						\
+store_gov_pol_sys(file_name)
+
+show_store_gov_pol_sys(target_loads);
+show_store_gov_pol_sys(above_hispeed_delay);
+show_store_gov_pol_sys(hispeed_freq);
+show_store_gov_pol_sys(go_hispeed_load);
+show_store_gov_pol_sys(min_sample_time);
+show_store_gov_pol_sys(timer_rate);
+show_store_gov_pol_sys(timer_slack);
+show_store_gov_pol_sys(boost);
+store_gov_pol_sys(boostpulse);
+show_store_gov_pol_sys(boostpulse_duration);
+show_store_gov_pol_sys(io_is_busy);
+show_store_gov_pol_sys(use_sched_load);
+show_store_gov_pol_sys(use_migration_notif);
+show_store_gov_pol_sys(max_freq_hysteresis);
+show_store_gov_pol_sys(align_windows);
+show_store_gov_pol_sys(ignore_hispeed_on_notif);
+show_store_gov_pol_sys(fast_ramp_down);
+show_store_gov_pol_sys(enable_prediction);
+
+#define gov_sys_attr_rw(_name)						\
+static struct global_attr _name##_gov_sys =				\
+__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
+
+#define gov_pol_attr_rw(_name)						\
+static struct freq_attr _name##_gov_pol =				\
+__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
+
+#define gov_sys_pol_attr_rw(_name)					\
+	gov_sys_attr_rw(_name);						\
+	gov_pol_attr_rw(_name)
+
+gov_sys_pol_attr_rw(target_loads);
+gov_sys_pol_attr_rw(above_hispeed_delay);
+gov_sys_pol_attr_rw(hispeed_freq);
+gov_sys_pol_attr_rw(go_hispeed_load);
+gov_sys_pol_attr_rw(min_sample_time);
+gov_sys_pol_attr_rw(timer_rate);
+gov_sys_pol_attr_rw(timer_slack);
+gov_sys_pol_attr_rw(boost);
+gov_sys_pol_attr_rw(boostpulse_duration);
+gov_sys_pol_attr_rw(io_is_busy);
+gov_sys_pol_attr_rw(use_sched_load);
+gov_sys_pol_attr_rw(use_migration_notif);
+gov_sys_pol_attr_rw(max_freq_hysteresis);
+gov_sys_pol_attr_rw(align_windows);
+gov_sys_pol_attr_rw(ignore_hispeed_on_notif);
+gov_sys_pol_attr_rw(fast_ramp_down);
+gov_sys_pol_attr_rw(enable_prediction);
+
+static struct global_attr boostpulse_gov_sys =
+	__ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
+
+static struct freq_attr boostpulse_gov_pol =
+	__ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
+
+/* One Governor instance for entire system */
+static struct attribute *interactive_attributes_gov_sys[] = {
+	&target_loads_gov_sys.attr,
+	&above_hispeed_delay_gov_sys.attr,
+	&hispeed_freq_gov_sys.attr,
+	&go_hispeed_load_gov_sys.attr,
+	&min_sample_time_gov_sys.attr,
+	&timer_rate_gov_sys.attr,
+	&timer_slack_gov_sys.attr,
+	&boost_gov_sys.attr,
+	&boostpulse_gov_sys.attr,
+	&boostpulse_duration_gov_sys.attr,
+	&io_is_busy_gov_sys.attr,
+	&use_sched_load_gov_sys.attr,
+	&use_migration_notif_gov_sys.attr,
+	&max_freq_hysteresis_gov_sys.attr,
+	&align_windows_gov_sys.attr,
+	&ignore_hispeed_on_notif_gov_sys.attr,
+	&fast_ramp_down_gov_sys.attr,
+	&enable_prediction_gov_sys.attr,
+	NULL,
+};
+
+static struct attribute_group interactive_attr_group_gov_sys = {
+	.attrs = interactive_attributes_gov_sys,
+	.name = "interactive",
+};
+
+/* Per policy governor instance */
+static struct attribute *interactive_attributes_gov_pol[] = {
+	&target_loads_gov_pol.attr,
+	&above_hispeed_delay_gov_pol.attr,
+	&hispeed_freq_gov_pol.attr,
+	&go_hispeed_load_gov_pol.attr,
+	&min_sample_time_gov_pol.attr,
+	&timer_rate_gov_pol.attr,
+	&timer_slack_gov_pol.attr,
+	&boost_gov_pol.attr,
+	&boostpulse_gov_pol.attr,
+	&boostpulse_duration_gov_pol.attr,
+	&io_is_busy_gov_pol.attr,
+	&use_sched_load_gov_pol.attr,
+	&use_migration_notif_gov_pol.attr,
+	&max_freq_hysteresis_gov_pol.attr,
+	&align_windows_gov_pol.attr,
+	&ignore_hispeed_on_notif_gov_pol.attr,
+	&fast_ramp_down_gov_pol.attr,
+	&enable_prediction_gov_pol.attr,
+	NULL,
+};
+
+static struct attribute_group interactive_attr_group_gov_pol = {
+	.attrs = interactive_attributes_gov_pol,
+	.name = "interactive",
+};
+
+static struct attribute_group *get_sysfs_attr(void)
+{
+	if (have_governor_per_policy())
+		return &interactive_attr_group_gov_pol;
+	else
+		return &interactive_attr_group_gov_sys;
+}
+
+static void cpufreq_interactive_nop_timer(unsigned long data)
+{
+}
+
+static struct cpufreq_interactive_tunables *alloc_tunable(
+					struct cpufreq_policy *policy)
+{
+	struct cpufreq_interactive_tunables *tunables;
+
+	tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
+	if (!tunables)
+		return ERR_PTR(-ENOMEM);
+
+	tunables->above_hispeed_delay = default_above_hispeed_delay;
+	tunables->nabove_hispeed_delay =
+		ARRAY_SIZE(default_above_hispeed_delay);
+	tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
+	tunables->target_loads = default_target_loads;
+	tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
+	tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
+	tunables->timer_rate = DEFAULT_TIMER_RATE;
+	tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
+	tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
+
+	spin_lock_init(&tunables->target_loads_lock);
+	spin_lock_init(&tunables->above_hispeed_delay_lock);
+
+	return tunables;
+}
+
+static struct cpufreq_interactive_policyinfo *get_policyinfo(
+					struct cpufreq_policy *policy)
+{
+	struct cpufreq_interactive_policyinfo *ppol =
+				per_cpu(polinfo, policy->cpu);
+	int i;
+	struct sched_load *sl;
+
+	/* polinfo already allocated for policy, return */
+	if (ppol)
+		return ppol;
+
+	ppol = kzalloc(sizeof(*ppol), GFP_KERNEL);
+	if (!ppol)
+		return ERR_PTR(-ENOMEM);
+
+	sl = kcalloc(cpumask_weight(policy->related_cpus), sizeof(*sl),
+		     GFP_KERNEL);
+	if (!sl) {
+		kfree(ppol);
+		return ERR_PTR(-ENOMEM);
+	}
+	ppol->sl = sl;
+
+	init_timer_deferrable(&ppol->policy_timer);
+	ppol->policy_timer.function = cpufreq_interactive_timer;
+	init_timer(&ppol->policy_slack_timer);
+	ppol->policy_slack_timer.function = cpufreq_interactive_nop_timer;
+	hrtimer_init(&ppol->notif_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	ppol->notif_timer.function = cpufreq_interactive_hrtimer;
+	spin_lock_init(&ppol->load_lock);
+	spin_lock_init(&ppol->target_freq_lock);
+	init_rwsem(&ppol->enable_sem);
+
+	for_each_cpu(i, policy->related_cpus)
+		per_cpu(polinfo, i) = ppol;
+	return ppol;
+}
+
+/* This function is not multithread-safe. */
+static void free_policyinfo(int cpu)
+{
+	struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
+	int j;
+
+	if (!ppol)
+		return;
+
+	for_each_possible_cpu(j)
+		if (per_cpu(polinfo, j) == ppol)
+			per_cpu(polinfo, cpu) = NULL;
+	kfree(ppol->cached_tunables);
+	kfree(ppol->sl);
+	kfree(ppol);
+}
+
+static struct cpufreq_interactive_tunables *get_tunables(
+				struct cpufreq_interactive_policyinfo *ppol)
+{
+	if (have_governor_per_policy())
+		return ppol->cached_tunables;
+	else
+		return cached_common_tunables;
+}
+
+static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
+		unsigned int event)
+{
+	int rc;
+	struct cpufreq_interactive_policyinfo *ppol;
+	struct cpufreq_frequency_table *freq_table;
+	struct cpufreq_interactive_tunables *tunables;
+
+	if (have_governor_per_policy())
+		tunables = policy->governor_data;
+	else
+		tunables = common_tunables;
+
+	BUG_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
+
+	switch (event) {
+	case CPUFREQ_GOV_POLICY_INIT:
+		ppol = get_policyinfo(policy);
+		if (IS_ERR(ppol))
+			return PTR_ERR(ppol);
+
+		if (have_governor_per_policy()) {
+			WARN_ON(tunables);
+		} else if (tunables) {
+			tunables->usage_count++;
+			cpumask_or(&controlled_cpus, &controlled_cpus,
+				   policy->related_cpus);
+			sched_update_freq_max_load(policy->related_cpus);
+			policy->governor_data = tunables;
+			return 0;
+		}
+
+		tunables = get_tunables(ppol);
+		if (!tunables) {
+			tunables = alloc_tunable(policy);
+			if (IS_ERR(tunables))
+				return PTR_ERR(tunables);
+		}
+
+		tunables->usage_count = 1;
+		policy->governor_data = tunables;
+		if (!have_governor_per_policy())
+			common_tunables = tunables;
+
+		rc = sysfs_create_group(get_governor_parent_kobj(policy),
+				get_sysfs_attr());
+		if (rc) {
+			kfree(tunables);
+			policy->governor_data = NULL;
+			if (!have_governor_per_policy())
+				common_tunables = NULL;
+			return rc;
+		}
+
+		if (!policy->governor->initialized)
+			cpufreq_register_notifier(&cpufreq_notifier_block,
+					CPUFREQ_TRANSITION_NOTIFIER);
+
+		if (tunables->use_sched_load)
+			cpufreq_interactive_enable_sched_input(tunables);
+
+		cpumask_or(&controlled_cpus, &controlled_cpus,
+			   policy->related_cpus);
+		sched_update_freq_max_load(policy->related_cpus);
+
+		if (have_governor_per_policy())
+			ppol->cached_tunables = tunables;
+		else
+			cached_common_tunables = tunables;
+
+		break;
+
+	case CPUFREQ_GOV_POLICY_EXIT:
+		cpumask_andnot(&controlled_cpus, &controlled_cpus,
+			       policy->related_cpus);
+		sched_update_freq_max_load(cpu_possible_mask);
+		if (!--tunables->usage_count) {
+			if (policy->governor->initialized == 1)
+				cpufreq_unregister_notifier(&cpufreq_notifier_block,
+						CPUFREQ_TRANSITION_NOTIFIER);
+
+			sysfs_remove_group(get_governor_parent_kobj(policy),
+					get_sysfs_attr());
+
+			common_tunables = NULL;
+		}
+
+		policy->governor_data = NULL;
+
+		if (tunables->use_sched_load)
+			cpufreq_interactive_disable_sched_input(tunables);
+
+		break;
+
+	case CPUFREQ_GOV_START:
+		mutex_lock(&gov_lock);
+
+		freq_table = cpufreq_frequency_get_table(policy->cpu);
+		if (!tunables->hispeed_freq)
+			tunables->hispeed_freq = policy->max;
+
+		ppol = per_cpu(polinfo, policy->cpu);
+		ppol->policy = policy;
+		ppol->target_freq = policy->cur;
+		ppol->freq_table = freq_table;
+		ppol->p_nolim = *policy;
+		ppol->p_nolim.min = policy->cpuinfo.min_freq;
+		ppol->p_nolim.max = policy->cpuinfo.max_freq;
+		ppol->floor_freq = ppol->target_freq;
+		ppol->floor_validate_time = ktime_to_us(ktime_get());
+		ppol->hispeed_validate_time = ppol->floor_validate_time;
+		ppol->min_freq = policy->min;
+		ppol->reject_notification = true;
+		ppol->notif_pending = false;
+		down_write(&ppol->enable_sem);
+		del_timer_sync(&ppol->policy_timer);
+		del_timer_sync(&ppol->policy_slack_timer);
+		ppol->policy_timer.data = policy->cpu;
+		ppol->last_evaluated_jiffy = get_jiffies_64();
+		cpufreq_interactive_timer_start(tunables, policy->cpu);
+		ppol->governor_enabled = 1;
+		up_write(&ppol->enable_sem);
+		ppol->reject_notification = false;
+
+		mutex_unlock(&gov_lock);
+		break;
+
+	case CPUFREQ_GOV_STOP:
+		mutex_lock(&gov_lock);
+
+		ppol = per_cpu(polinfo, policy->cpu);
+		ppol->reject_notification = true;
+		down_write(&ppol->enable_sem);
+		ppol->governor_enabled = 0;
+		ppol->target_freq = 0;
+		del_timer_sync(&ppol->policy_timer);
+		del_timer_sync(&ppol->policy_slack_timer);
+		up_write(&ppol->enable_sem);
+		ppol->reject_notification = false;
+
+		mutex_unlock(&gov_lock);
+		break;
+
+	case CPUFREQ_GOV_LIMITS:
+		ppol = per_cpu(polinfo, policy->cpu);
+
+		__cpufreq_driver_target(policy,
+				ppol->target_freq, CPUFREQ_RELATION_L);
+
+		down_read(&ppol->enable_sem);
+		if (ppol->governor_enabled) {
+			if (policy->min < ppol->min_freq)
+				cpufreq_interactive_timer_resched(policy->cpu,
+								  true);
+			ppol->min_freq = policy->min;
+		}
+		up_read(&ppol->enable_sem);
+
+		break;
+	}
+	return 0;
+}
+
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+static
+#endif
+struct cpufreq_governor cpufreq_gov_interactive = {
+	.name = "interactive",
+	.governor = cpufreq_governor_interactive,
+	.max_transition_latency = 10000000,
+	.owner = THIS_MODULE,
+};
+
+static int __init cpufreq_interactive_init(void)
+{
+	struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+	int ret = 0;
+
+	spin_lock_init(&speedchange_cpumask_lock);
+	mutex_init(&gov_lock);
+	mutex_init(&sched_lock);
+	speedchange_task =
+		kthread_create(cpufreq_interactive_speedchange_task, NULL,
+			       "cfinteractive");
+	if (IS_ERR(speedchange_task))
+		return PTR_ERR(speedchange_task);
+
+	sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
+	get_task_struct(speedchange_task);
+
+	/* NB: wake up so the thread does not look hung to the freezer */
+	wake_up_process_no_notif(speedchange_task);
+
+	ret = cpufreq_register_governor(&cpufreq_gov_interactive);
+	if (ret) {
+		kthread_stop(speedchange_task);
+		put_task_struct(speedchange_task);
+	}
+	return ret;
+}
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+fs_initcall(cpufreq_interactive_init);
+#else
+module_init(cpufreq_interactive_init);
+#endif
+
+static void __exit cpufreq_interactive_exit(void)
+{
+	int cpu;
+
+	cpufreq_unregister_governor(&cpufreq_gov_interactive);
+	kthread_stop(speedchange_task);
+	put_task_struct(speedchange_task);
+
+	for_each_possible_cpu(cpu)
+		free_policyinfo(cpu);
+}
+
+module_exit(cpufreq_interactive_exit);
+
+MODULE_AUTHOR("Mike Chan <mike@android.com>");
+MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
+	"Latency sensitive workloads");
+MODULE_LICENSE("GPL");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/cpufreq/qcom-cpufreq.c	2019-01-22 16:16:23.087242639 +0100
@@ -0,0 +1,496 @@
+/* drivers/cpufreq/qcom-cpufreq.c
+ *
+ * MSM architecture cpufreq driver
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2017, The Linux Foundation. All rights reserved.
+ * Author: Mike A. Chan <mikechan@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cpufreq.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/suspend.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <trace/events/power.h>
+
+static DEFINE_MUTEX(l2bw_lock);
+
+static struct clk *cpu_clk[NR_CPUS];
+static struct clk *l2_clk;
+static DEFINE_PER_CPU(struct cpufreq_frequency_table *, freq_table);
+static bool hotplug_ready;
+
+struct cpufreq_suspend_t {
+	struct mutex suspend_mutex;
+	int device_suspended;
+};
+
+static DEFINE_PER_CPU(struct cpufreq_suspend_t, suspend_data);
+
+static int set_cpu_freq(struct cpufreq_policy *policy, unsigned int new_freq,
+			unsigned int index)
+{
+	int ret = 0;
+	struct cpufreq_freqs freqs;
+	unsigned long rate;
+
+	freqs.old = policy->cur;
+	freqs.new = new_freq;
+	freqs.cpu = policy->cpu;
+
+	trace_cpu_frequency_switch_start(freqs.old, freqs.new, policy->cpu);
+	cpufreq_freq_transition_begin(policy, &freqs);
+
+	rate = new_freq * 1000;
+	rate = clk_round_rate(cpu_clk[policy->cpu], rate);
+	ret = clk_set_rate(cpu_clk[policy->cpu], rate);
+	cpufreq_freq_transition_end(policy, &freqs, ret);
+	if (!ret)
+		trace_cpu_frequency_switch_end(policy->cpu);
+
+	return ret;
+}
+
+static int msm_cpufreq_target(struct cpufreq_policy *policy,
+				unsigned int target_freq,
+				unsigned int relation)
+{
+	int ret = 0;
+	int index;
+	struct cpufreq_frequency_table *table;
+
+	mutex_lock(&per_cpu(suspend_data, policy->cpu).suspend_mutex);
+
+	if (target_freq == policy->cur)
+		goto done;
+
+	if (per_cpu(suspend_data, policy->cpu).device_suspended) {
+		pr_debug("cpufreq: cpu%d scheduling frequency change "
+				"in suspend.\n", policy->cpu);
+		ret = -EFAULT;
+		goto done;
+	}
+
+	table = cpufreq_frequency_get_table(policy->cpu);
+	if (!table) {
+		pr_err("cpufreq: Failed to get frequency table for CPU%u\n",
+		       policy->cpu);
+		ret = -ENODEV;
+		goto done;
+	}
+	if (cpufreq_frequency_table_target(policy, table, target_freq, relation,
+			&index)) {
+		pr_err("cpufreq: invalid target_freq: %d\n", target_freq);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	pr_debug("CPU[%d] target %d relation %d (%d-%d) selected %d\n",
+		policy->cpu, target_freq, relation,
+		policy->min, policy->max, table[index].frequency);
+
+	ret = set_cpu_freq(policy, table[index].frequency,
+			   table[index].driver_data);
+done:
+	mutex_unlock(&per_cpu(suspend_data, policy->cpu).suspend_mutex);
+	return ret;
+}
+
+static int msm_cpufreq_verify(struct cpufreq_policy *policy)
+{
+	cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+			policy->cpuinfo.max_freq);
+	return 0;
+}
+
+static unsigned int msm_cpufreq_get_freq(unsigned int cpu)
+{
+	return clk_get_rate(cpu_clk[cpu]) / 1000;
+}
+
+static int msm_cpufreq_init(struct cpufreq_policy *policy)
+{
+	int cur_freq;
+	int index;
+	int ret = 0;
+	struct cpufreq_frequency_table *table =
+			per_cpu(freq_table, policy->cpu);
+	int cpu;
+
+	/*
+	 * In some SoC, some cores are clocked by same source, and their
+	 * frequencies can not be changed independently. Find all other
+	 * CPUs that share same clock, and mark them as controlled by
+	 * same policy.
+	 */
+	for_each_possible_cpu(cpu)
+		if (cpu_clk[cpu] == cpu_clk[policy->cpu])
+			cpumask_set_cpu(cpu, policy->cpus);
+
+	ret = cpufreq_table_validate_and_show(policy, table);
+	if (ret) {
+		pr_err("cpufreq: failed to get policy min/max\n");
+		return ret;
+	}
+
+	cur_freq = clk_get_rate(cpu_clk[policy->cpu])/1000;
+
+	if (cpufreq_frequency_table_target(policy, table, cur_freq,
+	    CPUFREQ_RELATION_H, &index) &&
+	    cpufreq_frequency_table_target(policy, table, cur_freq,
+	    CPUFREQ_RELATION_L, &index)) {
+		pr_info("cpufreq: cpu%d at invalid freq: %d\n",
+				policy->cpu, cur_freq);
+		return -EINVAL;
+	}
+	/*
+	 * Call set_cpu_freq unconditionally so that when cpu is set to
+	 * online, frequency limit will always be updated.
+	 */
+	ret = set_cpu_freq(policy, table[index].frequency,
+			   table[index].driver_data);
+	if (ret)
+		return ret;
+	pr_debug("cpufreq: cpu%d init at %d switching to %d\n",
+			policy->cpu, cur_freq, table[index].frequency);
+	policy->cur = table[index].frequency;
+
+	return 0;
+}
+
+static int msm_cpufreq_cpu_callback(struct notifier_block *nfb,
+		unsigned long action, void *hcpu)
+{
+	unsigned int cpu = (unsigned long)hcpu;
+	int rc;
+
+	/* Fail hotplug until this driver can get CPU clocks */
+	if (!hotplug_ready)
+		return NOTIFY_BAD;
+
+	switch (action & ~CPU_TASKS_FROZEN) {
+
+	case CPU_DYING:
+		clk_disable(cpu_clk[cpu]);
+		clk_disable(l2_clk);
+		break;
+	/*
+	 * Scale down clock/power of CPU that is dead and scale it back up
+	 * before the CPU is brought up.
+	 */
+	case CPU_DEAD:
+		clk_unprepare(cpu_clk[cpu]);
+		clk_unprepare(l2_clk);
+		break;
+	case CPU_UP_CANCELED:
+		clk_unprepare(cpu_clk[cpu]);
+		clk_unprepare(l2_clk);
+		break;
+	case CPU_UP_PREPARE:
+		rc = clk_prepare(l2_clk);
+		if (rc < 0)
+			return NOTIFY_BAD;
+		rc = clk_prepare(cpu_clk[cpu]);
+		if (rc < 0) {
+			clk_unprepare(l2_clk);
+			return NOTIFY_BAD;
+		}
+		break;
+
+	case CPU_STARTING:
+		rc = clk_enable(l2_clk);
+		if (rc < 0)
+			return NOTIFY_BAD;
+		rc = clk_enable(cpu_clk[cpu]);
+		if (rc) {
+			clk_disable(l2_clk);
+			return NOTIFY_BAD;
+		}
+		break;
+
+	default:
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block __refdata msm_cpufreq_cpu_notifier = {
+	.notifier_call = msm_cpufreq_cpu_callback,
+};
+
+static int msm_cpufreq_suspend(void)
+{
+	int cpu;
+
+	for_each_possible_cpu(cpu) {
+		mutex_lock(&per_cpu(suspend_data, cpu).suspend_mutex);
+		per_cpu(suspend_data, cpu).device_suspended = 1;
+		mutex_unlock(&per_cpu(suspend_data, cpu).suspend_mutex);
+	}
+
+	return NOTIFY_DONE;
+}
+
+static int msm_cpufreq_resume(void)
+{
+	int cpu, ret;
+	struct cpufreq_policy policy;
+
+	for_each_possible_cpu(cpu) {
+		per_cpu(suspend_data, cpu).device_suspended = 0;
+	}
+
+	/*
+	 * Freq request might be rejected during suspend, resulting
+	 * in policy->cur violating min/max constraint.
+	 * Correct the frequency as soon as possible.
+	 */
+	get_online_cpus();
+	for_each_online_cpu(cpu) {
+		ret = cpufreq_get_policy(&policy, cpu);
+		if (ret)
+			continue;
+		if (policy.cur <= policy.max && policy.cur >= policy.min)
+			continue;
+		ret = cpufreq_update_policy(cpu);
+		if (ret)
+			pr_info("cpufreq: Current frequency violates policy min/max for CPU%d\n",
+			       cpu);
+		else
+			pr_info("cpufreq: Frequency violation fixed for CPU%d\n",
+				cpu);
+	}
+	put_online_cpus();
+
+	return NOTIFY_DONE;
+}
+
+static int msm_cpufreq_pm_event(struct notifier_block *this,
+				unsigned long event, void *ptr)
+{
+	switch (event) {
+	case PM_POST_HIBERNATION:
+	case PM_POST_SUSPEND:
+		return msm_cpufreq_resume();
+	case PM_HIBERNATION_PREPARE:
+	case PM_SUSPEND_PREPARE:
+		return msm_cpufreq_suspend();
+	default:
+		return NOTIFY_DONE;
+	}
+}
+
+static struct notifier_block msm_cpufreq_pm_notifier = {
+	.notifier_call = msm_cpufreq_pm_event,
+};
+
+static struct freq_attr *msm_freq_attr[] = {
+	&cpufreq_freq_attr_scaling_available_freqs,
+	NULL,
+};
+
+static struct cpufreq_driver msm_cpufreq_driver = {
+	/* lps calculations are handled here. */
+	.flags		= CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS,
+	.init		= msm_cpufreq_init,
+	.verify		= msm_cpufreq_verify,
+	.target		= msm_cpufreq_target,
+	.get		= msm_cpufreq_get_freq,
+	.name		= "msm",
+	.attr		= msm_freq_attr,
+};
+
+static struct cpufreq_frequency_table *cpufreq_parse_dt(struct device *dev,
+						char *tbl_name, int cpu)
+{
+	int ret, nf, i, j;
+	u32 *data;
+	struct cpufreq_frequency_table *ftbl;
+
+	/* Parse list of usable CPU frequencies. */
+	if (!of_find_property(dev->of_node, tbl_name, &nf))
+		return ERR_PTR(-EINVAL);
+	nf /= sizeof(*data);
+
+	if (nf == 0)
+		return ERR_PTR(-EINVAL);
+
+	data = devm_kzalloc(dev, nf * sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return ERR_PTR(-ENOMEM);
+
+	ret = of_property_read_u32_array(dev->of_node, tbl_name, data, nf);
+	if (ret)
+		return ERR_PTR(ret);
+
+	ftbl = devm_kzalloc(dev, (nf + 1) * sizeof(*ftbl), GFP_KERNEL);
+	if (!ftbl)
+		return ERR_PTR(-ENOMEM);
+
+	j = 0;
+	for (i = 0; i < nf; i++) {
+		unsigned long f;
+
+		f = clk_round_rate(cpu_clk[cpu], data[i] * 1000);
+		if (IS_ERR_VALUE(f))
+			break;
+		f /= 1000;
+
+		/*
+		 * Don't repeat frequencies if they round up to the same clock
+		 * frequency.
+		 *
+		 */
+		if (j > 0 && f <= ftbl[j - 1].frequency)
+			continue;
+
+		ftbl[j].driver_data = j;
+		ftbl[j].frequency = f;
+		j++;
+	}
+
+	ftbl[j].driver_data = j;
+	ftbl[j].frequency = CPUFREQ_TABLE_END;
+
+	devm_kfree(dev, data);
+
+	return ftbl;
+}
+
+static int __init msm_cpufreq_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	char clk_name[] = "cpu??_clk";
+	char tbl_name[] = "qcom,cpufreq-table-??";
+	struct clk *c;
+	int cpu;
+	struct cpufreq_frequency_table *ftbl;
+
+	l2_clk = devm_clk_get(dev, "l2_clk");
+	if (IS_ERR(l2_clk))
+		l2_clk = NULL;
+
+	for_each_possible_cpu(cpu) {
+		snprintf(clk_name, sizeof(clk_name), "cpu%d_clk", cpu);
+		c = devm_clk_get(dev, clk_name);
+		if (cpu == 0 && IS_ERR(c))
+			return PTR_ERR(c);
+		else if (IS_ERR(c))
+			c = cpu_clk[cpu-1];
+		cpu_clk[cpu] = c;
+	}
+	hotplug_ready = true;
+
+	/* Use per-policy governor tunable for some targets */
+	if (of_property_read_bool(dev->of_node, "qcom,governor-per-policy"))
+		msm_cpufreq_driver.flags |= CPUFREQ_HAVE_GOVERNOR_PER_POLICY;
+
+	/* Parse commong cpufreq table for all CPUs */
+	ftbl = cpufreq_parse_dt(dev, "qcom,cpufreq-table", 0);
+	if (!IS_ERR(ftbl)) {
+		for_each_possible_cpu(cpu)
+			per_cpu(freq_table, cpu) = ftbl;
+		return 0;
+	}
+
+	/*
+	 * No common table. Parse individual tables for each unique
+	 * CPU clock.
+	 */
+	for_each_possible_cpu(cpu) {
+		snprintf(tbl_name, sizeof(tbl_name),
+			 "qcom,cpufreq-table-%d", cpu);
+		ftbl = cpufreq_parse_dt(dev, tbl_name, cpu);
+
+		/* CPU0 must contain freq table */
+		if (cpu == 0 && IS_ERR(ftbl)) {
+			dev_err(dev, "Failed to parse CPU0's freq table\n");
+			return PTR_ERR(ftbl);
+		}
+		if (cpu == 0) {
+			per_cpu(freq_table, cpu) = ftbl;
+			continue;
+		}
+
+		if (cpu_clk[cpu] != cpu_clk[cpu - 1] && IS_ERR(ftbl)) {
+			dev_err(dev, "Failed to parse CPU%d's freq table\n",
+				cpu);
+			return PTR_ERR(ftbl);
+		}
+
+		/* Use previous CPU's table if it shares same clock */
+		if (cpu_clk[cpu] == cpu_clk[cpu - 1]) {
+			if (!IS_ERR(ftbl)) {
+				dev_warn(dev, "Conflicting tables for CPU%d\n",
+					 cpu);
+				devm_kfree(dev, ftbl);
+			}
+			ftbl = per_cpu(freq_table, cpu - 1);
+		}
+		per_cpu(freq_table, cpu) = ftbl;
+	}
+
+	return 0;
+}
+
+static struct of_device_id match_table[] = {
+	{ .compatible = "qcom,msm-cpufreq" },
+	{}
+};
+
+static struct platform_driver msm_cpufreq_plat_driver = {
+	.driver = {
+		.name = "msm-cpufreq",
+		.of_match_table = match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init msm_cpufreq_register(void)
+{
+	int cpu, rc;
+
+	for_each_possible_cpu(cpu) {
+		mutex_init(&(per_cpu(suspend_data, cpu).suspend_mutex));
+		per_cpu(suspend_data, cpu).device_suspended = 0;
+	}
+
+	rc = platform_driver_probe(&msm_cpufreq_plat_driver,
+				   msm_cpufreq_probe);
+	if (rc < 0) {
+		/* Unblock hotplug if msm-cpufreq probe fails */
+		unregister_hotcpu_notifier(&msm_cpufreq_cpu_notifier);
+		for_each_possible_cpu(cpu)
+			mutex_destroy(&(per_cpu(suspend_data, cpu).
+					suspend_mutex));
+		return rc;
+	}
+
+	register_pm_notifier(&msm_cpufreq_pm_notifier);
+	return cpufreq_register_driver(&msm_cpufreq_driver);
+}
+
+subsys_initcall(msm_cpufreq_register);
+
+static int __init msm_cpufreq_early_register(void)
+{
+	return register_hotcpu_notifier(&msm_cpufreq_cpu_notifier);
+}
+core_initcall(msm_cpufreq_early_register);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/cpuidle/lpm-levels.c	2019-01-22 16:16:23.095242712 +0100
@@ -0,0 +1,2041 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
+ * Copyright (C) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/irqchip/msm-mpm-irq.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/tick.h>
+#include <linux/suspend.h>
+#include <linux/pm_qos.h>
+#include <linux/of_platform.h>
+#include <linux/smp.h>
+#include <linux/remote_spinlock.h>
+#include <linux/msm_remote_spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/coresight-cti.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/cpu_pm.h>
+#include <linux/arm-smccc.h>
+#include <soc/qcom/spm.h>
+#include <soc/qcom/pm.h>
+#include <soc/qcom/rpm-notifier.h>
+#include <soc/qcom/event_timer.h>
+#include <soc/qcom/lpm-stats.h>
+#include <soc/qcom/jtag.h>
+#include <soc/qcom/minidump.h>
+#include <asm/cputype.h>
+#include <asm/arch_timer.h>
+#include <asm/cacheflush.h>
+#include <asm/suspend.h>
+#include <asm/cpuidle.h>
+#include "lpm-levels.h"
+#include "lpm-workarounds.h"
+#include <trace/events/power.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/trace_msm_low_power.h>
+#include "../../drivers/clk/msm/clock.h"
+
+#define SCLK_HZ (32768)
+#define SCM_HANDOFF_LOCK_ID "S:7"
+#define PSCI_POWER_STATE(reset) (reset << 30)
+#define PSCI_AFFINITY_LEVEL(lvl) ((lvl & 0x3) << 24)
+static remote_spinlock_t scm_handoff_lock;
+
+enum {
+	MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0),
+	MSM_LPM_LVL_DBG_IDLE_LIMITS = BIT(1),
+};
+
+enum debug_event {
+	CPU_ENTER,
+	CPU_EXIT,
+	CLUSTER_ENTER,
+	CLUSTER_EXIT,
+	PRE_PC_CB,
+	CPU_HP_STARTING,
+	CPU_HP_DYING,
+};
+
+struct lpm_debug {
+	cycle_t time;
+	enum debug_event evt;
+	int cpu;
+	uint32_t arg1;
+	uint32_t arg2;
+	uint32_t arg3;
+	uint32_t arg4;
+};
+
+struct lpm_cluster *lpm_root_node;
+
+#define MAXSAMPLES 5
+
+static bool lpm_prediction = true;
+module_param_named(lpm_prediction,
+	lpm_prediction, bool, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static uint32_t ref_stddev = 100;
+module_param_named(
+	ref_stddev, ref_stddev, uint, S_IRUGO | S_IWUSR | S_IWGRP
+);
+
+static uint32_t tmr_add = 100;
+module_param_named(
+	tmr_add, tmr_add, uint, S_IRUGO | S_IWUSR | S_IWGRP
+);
+
+struct lpm_history {
+	uint32_t resi[MAXSAMPLES];
+	int mode[MAXSAMPLES];
+	int nsamp;
+	uint32_t hptr;
+	uint32_t hinvalid;
+	uint32_t htmr_wkup;
+	int64_t stime;
+};
+
+static DEFINE_PER_CPU(struct lpm_history, hist);
+
+static DEFINE_PER_CPU(struct lpm_cluster*, cpu_cluster);
+static bool suspend_in_progress;
+static struct hrtimer lpm_hrtimer;
+static struct hrtimer histtimer;
+static struct lpm_debug *lpm_debug;
+static phys_addr_t lpm_debug_phys;
+static const int num_dbg_elements = 0x100;
+static int lpm_cpu_callback(struct notifier_block *cpu_nb,
+				unsigned long action, void *hcpu);
+
+static void cluster_unprepare(struct lpm_cluster *cluster,
+		const struct cpumask *cpu, int child_idx, bool from_idle,
+		int64_t time);
+static void cluster_prepare(struct lpm_cluster *cluster,
+		const struct cpumask *cpu, int child_idx, bool from_idle,
+		int64_t time);
+
+static struct notifier_block __refdata lpm_cpu_nblk = {
+	.notifier_call = lpm_cpu_callback,
+};
+
+static bool menu_select;
+module_param_named(
+	menu_select, menu_select, bool, S_IRUGO | S_IWUSR | S_IWGRP
+);
+
+static int msm_pm_sleep_time_override;
+module_param_named(sleep_time_override,
+	msm_pm_sleep_time_override, int, S_IRUGO | S_IWUSR | S_IWGRP);
+static uint64_t suspend_wake_time;
+
+static bool print_parsed_dt;
+module_param_named(
+	print_parsed_dt, print_parsed_dt, bool, S_IRUGO | S_IWUSR | S_IWGRP
+);
+
+static bool sleep_disabled = true;
+module_param_named(sleep_disabled,
+	sleep_disabled, bool, S_IRUGO | S_IWUSR | S_IWGRP);
+
+s32 msm_cpuidle_get_deep_idle_latency(void)
+{
+	return 10;
+}
+
+void lpm_suspend_wake_time(uint64_t wakeup_time)
+{
+	if (wakeup_time <= 0) {
+		suspend_wake_time = msm_pm_sleep_time_override * MSEC_PER_SEC;
+		return;
+	}
+
+	if (msm_pm_sleep_time_override &&
+		(msm_pm_sleep_time_override < wakeup_time))
+		suspend_wake_time = msm_pm_sleep_time_override * MSEC_PER_SEC;
+	else
+		suspend_wake_time = wakeup_time;
+}
+EXPORT_SYMBOL(lpm_suspend_wake_time);
+
+static uint32_t least_cluster_latency(struct lpm_cluster *cluster,
+					struct latency_level *lat_level)
+{
+	struct list_head *list;
+	struct lpm_cluster_level *level;
+	struct lpm_cluster *n;
+	struct power_params *pwr_params;
+	uint32_t latency = 0;
+	int i;
+
+	if (!cluster->list.next) {
+		for (i = 0; i < cluster->nlevels; i++) {
+			level = &cluster->levels[i];
+			pwr_params = &level->pwr;
+			if (lat_level->reset_level == level->reset_level) {
+				if ((latency > pwr_params->latency_us)
+						|| (!latency))
+					latency = pwr_params->latency_us;
+				break;
+			}
+		}
+	} else {
+		list_for_each(list, &cluster->parent->child) {
+			n = list_entry(list, typeof(*n), list);
+			if (lat_level->level_name) {
+				if (strcmp(lat_level->level_name,
+						 n->cluster_name))
+					continue;
+			}
+			for (i = 0; i < n->nlevels; i++) {
+				level = &n->levels[i];
+				pwr_params = &level->pwr;
+				if (lat_level->reset_level ==
+						level->reset_level) {
+					if ((latency > pwr_params->latency_us)
+								|| (!latency))
+						latency =
+						pwr_params->latency_us;
+					break;
+				}
+			}
+		}
+	}
+	return latency;
+}
+
+static uint32_t least_cpu_latency(struct list_head *child,
+				struct latency_level *lat_level)
+{
+	struct list_head *list;
+	struct lpm_cpu_level *level;
+	struct power_params *pwr_params;
+	struct lpm_cpu *cpu;
+	struct lpm_cluster *n;
+	uint32_t latency = 0;
+	int i;
+
+	list_for_each(list, child) {
+		n = list_entry(list, typeof(*n), list);
+		if (lat_level->level_name) {
+			if (strcmp(lat_level->level_name, n->cluster_name))
+				continue;
+		}
+		cpu = n->cpu;
+		for (i = 0; i < cpu->nlevels; i++) {
+			level = &cpu->levels[i];
+			pwr_params = &level->pwr;
+			if (lat_level->reset_level == level->reset_level) {
+				if ((latency > pwr_params->latency_us)
+							|| (!latency))
+					latency = pwr_params->latency_us;
+				break;
+			}
+		}
+	}
+	return latency;
+}
+
+static struct lpm_cluster *cluster_aff_match(struct lpm_cluster *cluster,
+							int affinity_level)
+{
+	struct lpm_cluster *n;
+
+	if ((cluster->aff_level == affinity_level)
+		|| ((cluster->cpu) && (affinity_level == 0)))
+		return cluster;
+	else if (!cluster->cpu) {
+		n =  list_entry(cluster->child.next, typeof(*n), list);
+		return cluster_aff_match(n, affinity_level);
+	} else
+		return NULL;
+}
+
+int lpm_get_latency(struct latency_level *level, uint32_t *latency)
+{
+	struct lpm_cluster *cluster;
+	uint32_t val;
+
+	if (!lpm_root_node) {
+		pr_err("%s: lpm_probe not completed\n", __func__);
+		return -EAGAIN;
+	}
+
+	if ((level->affinity_level < 0)
+		|| (level->affinity_level > lpm_root_node->aff_level)
+		|| (level->reset_level < LPM_RESET_LVL_RET)
+		|| (level->reset_level > LPM_RESET_LVL_PC)
+		|| !latency)
+		return -EINVAL;
+
+	cluster = cluster_aff_match(lpm_root_node, level->affinity_level);
+	if (!cluster) {
+		pr_err("%s:No matching cluster found for affinity_level:%d\n",
+					__func__, level->affinity_level);
+		return -EINVAL;
+	}
+
+	if (level->affinity_level == 0)
+		val = least_cpu_latency(&cluster->parent->child, level);
+	else
+		val = least_cluster_latency(cluster, level);
+
+	if (!val) {
+		pr_err("%s:No mode with affinity_level:%d reset_level:%d\n",
+			__func__, level->affinity_level, level->reset_level);
+		return -EINVAL;
+	}
+
+	*latency = val;
+
+	return 0;
+}
+EXPORT_SYMBOL(lpm_get_latency);
+
+static void update_debug_pc_event(enum debug_event event, uint32_t arg1,
+		uint32_t arg2, uint32_t arg3, uint32_t arg4)
+{
+	struct lpm_debug *dbg;
+	int idx;
+	static DEFINE_SPINLOCK(debug_lock);
+	static int pc_event_index;
+
+	if (!lpm_debug)
+		return;
+
+	spin_lock(&debug_lock);
+	idx = pc_event_index++;
+	dbg = &lpm_debug[idx & (num_dbg_elements - 1)];
+
+	dbg->evt = event;
+	dbg->time = arch_counter_get_cntvct();
+	dbg->cpu = raw_smp_processor_id();
+	dbg->arg1 = arg1;
+	dbg->arg2 = arg2;
+	dbg->arg3 = arg3;
+	dbg->arg4 = arg4;
+	spin_unlock(&debug_lock);
+}
+
+static int lpm_cpu_callback(struct notifier_block *cpu_nb,
+	unsigned long action, void *hcpu)
+{
+	unsigned long cpu = (unsigned long) hcpu;
+	struct lpm_cluster *cluster = per_cpu(cpu_cluster, (unsigned int) cpu);
+
+	switch (action & ~CPU_TASKS_FROZEN) {
+	case CPU_DYING:
+		update_debug_pc_event(CPU_HP_DYING, cpu,
+				cluster->num_children_in_sync.bits[0],
+				cluster->child_cpus.bits[0], false);
+		cluster_prepare(cluster, get_cpu_mask((unsigned int) cpu),
+					NR_LPM_LEVELS, false, 0);
+		break;
+	case CPU_STARTING:
+		update_debug_pc_event(CPU_HP_STARTING, cpu,
+				cluster->num_children_in_sync.bits[0],
+				cluster->child_cpus.bits[0], false);
+		cluster_unprepare(cluster, get_cpu_mask((unsigned int) cpu),
+					NR_LPM_LEVELS, false, 0);
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+#ifdef CONFIG_ARM_PSCI
+
+static int __init set_cpuidle_ops(void)
+{
+	int ret = 0, cpu;
+
+	for_each_possible_cpu(cpu) {
+		ret = arm_cpuidle_init(cpu);
+		if (ret)
+			goto exit;
+	}
+
+exit:
+	return ret;
+}
+
+#endif
+
+static enum hrtimer_restart lpm_hrtimer_cb(struct hrtimer *h)
+{
+	return HRTIMER_NORESTART;
+}
+
+static void histtimer_cancel(void)
+{
+	hrtimer_try_to_cancel(&histtimer);
+}
+
+static enum hrtimer_restart histtimer_fn(struct hrtimer *h)
+{
+	int cpu = raw_smp_processor_id();
+	struct lpm_history *history = &per_cpu(hist, cpu);
+
+	history->hinvalid = 1;
+	return HRTIMER_NORESTART;
+}
+
+static void histtimer_start(uint32_t time_us)
+{
+	uint64_t time_ns = time_us * NSEC_PER_USEC;
+	ktime_t hist_ktime = ns_to_ktime(time_ns);
+
+	histtimer.function = histtimer_fn;
+	hrtimer_start(&histtimer, hist_ktime, HRTIMER_MODE_REL_PINNED);
+}
+
+static void cluster_timer_init(struct lpm_cluster *cluster)
+{
+	struct list_head *list;
+
+	if (!cluster)
+		return;
+
+	hrtimer_init(&cluster->histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
+	list_for_each(list, &cluster->child) {
+		struct lpm_cluster *n;
+
+		n = list_entry(list, typeof(*n), list);
+		cluster_timer_init(n);
+	}
+}
+
+static void clusttimer_cancel(void)
+{
+	int cpu = raw_smp_processor_id();
+	struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+
+	hrtimer_try_to_cancel(&cluster->histtimer);
+	hrtimer_try_to_cancel(&cluster->parent->histtimer);
+}
+
+static enum hrtimer_restart clusttimer_fn(struct hrtimer *h)
+{
+	struct lpm_cluster *cluster = container_of(h,
+				struct lpm_cluster, histtimer);
+
+	cluster->history.hinvalid = 1;
+	return HRTIMER_NORESTART;
+}
+
+static void clusttimer_start(struct lpm_cluster *cluster, uint32_t time_us)
+{
+	uint64_t time_ns = time_us * NSEC_PER_USEC;
+	ktime_t clust_ktime = ns_to_ktime(time_ns);
+
+	cluster->histtimer.function = clusttimer_fn;
+	hrtimer_start(&cluster->histtimer, clust_ktime,
+				HRTIMER_MODE_REL_PINNED);
+}
+
+static void msm_pm_set_timer(uint32_t modified_time_us)
+{
+	u64 modified_time_ns = modified_time_us * NSEC_PER_USEC;
+	ktime_t modified_ktime = ns_to_ktime(modified_time_ns);
+
+	lpm_hrtimer.function = lpm_hrtimer_cb;
+	hrtimer_start(&lpm_hrtimer, modified_ktime, HRTIMER_MODE_REL_PINNED);
+}
+
+int set_l2_mode(struct low_power_ops *ops, int mode, bool notify_rpm)
+{
+	int lpm = mode;
+	int rc = 0;
+	struct low_power_ops *cpu_ops = per_cpu(cpu_cluster,
+			smp_processor_id())->lpm_dev;
+
+	if (cpu_ops->tz_flag & MSM_SCM_L2_OFF ||
+			cpu_ops->tz_flag & MSM_SCM_L2_GDHS)
+		coresight_cti_ctx_restore();
+
+	switch (mode) {
+	case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE:
+	case MSM_SPM_MODE_POWER_COLLAPSE:
+	case MSM_SPM_MODE_FASTPC:
+		cpu_ops->tz_flag = MSM_SCM_L2_OFF;
+		coresight_cti_ctx_save();
+		break;
+	case MSM_SPM_MODE_GDHS:
+		cpu_ops->tz_flag = MSM_SCM_L2_GDHS;
+		coresight_cti_ctx_save();
+		break;
+	case MSM_SPM_MODE_CLOCK_GATING:
+	case MSM_SPM_MODE_RETENTION:
+	case MSM_SPM_MODE_DISABLED:
+		cpu_ops->tz_flag = MSM_SCM_L2_ON;
+		break;
+	default:
+		cpu_ops->tz_flag = MSM_SCM_L2_ON;
+		lpm = MSM_SPM_MODE_DISABLED;
+		break;
+	}
+	rc = msm_spm_config_low_power_mode(ops->spm, lpm, notify_rpm);
+
+	if (rc)
+		pr_err("%s: Failed to set L2 low power mode %d, ERR %d",
+				__func__, lpm, rc);
+
+	return rc;
+}
+
+int set_l3_mode(struct low_power_ops *ops, int mode, bool notify_rpm)
+{
+	struct low_power_ops *cpu_ops = per_cpu(cpu_cluster,
+			smp_processor_id())->lpm_dev;
+
+	switch (mode) {
+	case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE:
+	case MSM_SPM_MODE_POWER_COLLAPSE:
+	case MSM_SPM_MODE_FASTPC:
+		cpu_ops->tz_flag |= MSM_SCM_L3_PC_OFF;
+		break;
+	default:
+		break;
+	}
+	return msm_spm_config_low_power_mode(ops->spm, mode, notify_rpm);
+}
+
+
+int set_system_mode(struct low_power_ops *ops, int mode, bool notify_rpm)
+{
+	return msm_spm_config_low_power_mode(ops->spm, mode, notify_rpm);
+}
+
+static int set_device_mode(struct lpm_cluster *cluster, int ndevice,
+		struct lpm_cluster_level *level)
+{
+	struct low_power_ops *ops;
+
+	if (use_psci)
+		return 0;
+
+	ops = &cluster->lpm_dev[ndevice];
+	if (ops && ops->set_mode)
+		return ops->set_mode(ops, level->mode[ndevice],
+				level->notify_rpm);
+	else
+		return -EINVAL;
+}
+
+static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev,
+		struct lpm_cpu *cpu, int *idx_restrict,
+		uint32_t *idx_restrict_time)
+{
+	int i, j, divisor;
+	uint64_t max, avg, stddev;
+	int64_t thresh = LLONG_MAX;
+	struct lpm_history *history = &per_cpu(hist, dev->cpu);
+	uint32_t *min_residency = get_per_cpu_min_residency(dev->cpu);
+
+	if (!lpm_prediction)
+		return 0;
+
+	/*
+	 * Samples are marked invalid when woken-up due to timer,
+	 * so donot predict.
+	 */
+	if (history->hinvalid) {
+		history->hinvalid = 0;
+		history->htmr_wkup = 1;
+		history->stime = 0;
+		return 0;
+	}
+
+	/*
+	 * Predict only when all the samples are collected.
+	 */
+	if (history->nsamp < MAXSAMPLES) {
+		history->stime = 0;
+		return 0;
+	}
+
+	/*
+	 * Check if the samples are not much deviated, if so use the
+	 * average of those as predicted sleep time. Else if any
+	 * specific mode has more premature exits return the index of
+	 * that mode.
+	 */
+
+again:
+	max = avg = divisor = stddev = 0;
+	for (i = 0; i < MAXSAMPLES; i++) {
+		int64_t value = history->resi[i];
+
+		if (value <= thresh) {
+			avg += value;
+			divisor++;
+			if (value > max)
+				max = value;
+		}
+	}
+	do_div(avg, divisor);
+
+	for (i = 0; i < MAXSAMPLES; i++) {
+		int64_t value = history->resi[i];
+
+		if (value <= thresh) {
+			int64_t diff = value - avg;
+
+			stddev += diff * diff;
+		}
+	}
+	do_div(stddev, divisor);
+	stddev = int_sqrt(stddev);
+
+	/*
+	 * If the deviation is less, return the average, else
+	 * ignore one maximum sample and retry
+	 */
+	if (((avg > stddev * 6) && (divisor >= (MAXSAMPLES - 1)))
+					|| stddev <= ref_stddev) {
+		history->stime = ktime_to_us(ktime_get()) + avg;
+		return avg;
+	} else if (divisor  > (MAXSAMPLES - 1)) {
+		thresh = max - 1;
+		goto again;
+	}
+
+	/*
+	 * Find the number of premature exits for each of the mode,
+	 * excluding clockgating mode, and they are more than fifty
+	 * percent restrict that and deeper modes.
+	 */
+	if (history->htmr_wkup != 1) {
+		for (j = 1; j < cpu->nlevels; j++) {
+			uint32_t failed = 0;
+			uint64_t total = 0;
+
+			for (i = 0; i < MAXSAMPLES; i++) {
+				if ((history->mode[i] == j) &&
+					(history->resi[i] < min_residency[j])) {
+					failed++;
+					total += history->resi[i];
+				}
+			}
+			if (failed > (MAXSAMPLES/2)) {
+				*idx_restrict = j;
+				do_div(total, failed);
+				*idx_restrict_time = total;
+				history->stime = ktime_to_us(ktime_get())
+						+ *idx_restrict_time;
+				break;
+			}
+		}
+	}
+	return 0;
+}
+
+static inline void invalidate_predict_history(struct cpuidle_device *dev)
+{
+	struct lpm_history *history = &per_cpu(hist, dev->cpu);
+
+	if (!lpm_prediction)
+		return;
+
+	if (history->hinvalid) {
+		history->hinvalid = 0;
+		history->htmr_wkup = 1;
+		history->stime = 0;
+	}
+}
+
+static void clear_predict_history(void)
+{
+	struct lpm_history *history;
+	int i;
+	unsigned int cpu;
+
+	if (!lpm_prediction)
+		return;
+
+	for_each_possible_cpu(cpu) {
+		history = &per_cpu(hist, cpu);
+		for (i = 0; i < MAXSAMPLES; i++) {
+			history->resi[i]  = 0;
+			history->mode[i] = -1;
+			history->hptr = 0;
+			history->nsamp = 0;
+			history->stime = 0;
+		}
+	}
+}
+
+static void update_history(struct cpuidle_device *dev, int idx);
+
+static int cpu_power_select(struct cpuidle_device *dev,
+		struct lpm_cpu *cpu)
+{
+	int best_level = -1;
+	uint32_t latency_us = pm_qos_request_for_cpu(PM_QOS_CPU_DMA_LATENCY,
+							dev->cpu);
+	s64 sleep_us = ktime_to_us(tick_nohz_get_sleep_length());
+	uint32_t modified_time_us = 0;
+	uint32_t next_event_us = 0;
+	int i, idx_restrict;
+	uint32_t lvl_latency_us = 0;
+	uint64_t predicted = 0;
+	uint32_t htime = 0, idx_restrict_time = 0;
+	uint32_t next_wakeup_us = (uint32_t)sleep_us;
+	uint32_t *min_residency = get_per_cpu_min_residency(dev->cpu);
+	uint32_t *max_residency = get_per_cpu_max_residency(dev->cpu);
+
+	if (!cpu)
+		return -EINVAL;
+
+	if ((sleep_disabled && !cpu_isolated(dev->cpu)) || sleep_us  < 0)
+		return 0;
+
+	idx_restrict = cpu->nlevels + 1;
+
+	next_event_us = (uint32_t)(ktime_to_us(get_next_event_time(dev->cpu)));
+
+	for (i = 0; i < cpu->nlevels; i++) {
+		struct lpm_cpu_level *level = &cpu->levels[i];
+		struct power_params *pwr_params = &level->pwr;
+		enum msm_pm_sleep_mode mode = level->mode;
+		bool allow;
+
+		allow = lpm_cpu_mode_allow(dev->cpu, i, true);
+
+		if (!allow)
+			continue;
+
+		lvl_latency_us = pwr_params->latency_us;
+
+		if (latency_us < lvl_latency_us)
+			break;
+
+		if (next_event_us) {
+			if (next_event_us < lvl_latency_us)
+				break;
+
+			if (((next_event_us - lvl_latency_us) < sleep_us) ||
+					(next_event_us < sleep_us))
+				next_wakeup_us = next_event_us - lvl_latency_us;
+		}
+
+		if (!i) {
+			/*
+			 * If the next_wake_us itself is not sufficient for
+			 * deeper low power modes than clock gating do not
+			 * call prediction.
+			 */
+			if (next_wakeup_us > max_residency[i]) {
+				predicted = lpm_cpuidle_predict(dev, cpu,
+					&idx_restrict, &idx_restrict_time);
+				if (predicted && (predicted < min_residency[i]))
+					predicted = min_residency[i];
+			} else
+				invalidate_predict_history(dev);
+		}
+
+		if (i >= idx_restrict)
+			break;
+
+		best_level = i;
+
+		if (next_event_us && next_event_us < sleep_us &&
+			(mode != MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT))
+			modified_time_us
+				= next_event_us - lvl_latency_us;
+		else
+			modified_time_us = 0;
+
+		if (predicted ? (predicted <= max_residency[i])
+			: (next_wakeup_us <= max_residency[i]))
+			break;
+	}
+
+	if (modified_time_us)
+		msm_pm_set_timer(modified_time_us);
+
+	/*
+	 * Start timer to avoid staying in shallower mode forever
+	 * incase of misprediciton
+	 */
+	if ((predicted || (idx_restrict != (cpu->nlevels + 1)))
+			&& ((best_level >= 0)
+			&& (best_level < (cpu->nlevels-1)))) {
+		htime = predicted + tmr_add;
+		if (htime == tmr_add)
+			htime = idx_restrict_time;
+		else if (htime > max_residency[best_level])
+			htime = max_residency[best_level];
+
+		if ((next_wakeup_us > htime) &&
+			((next_wakeup_us - htime) > max_residency[best_level]))
+			histtimer_start(htime);
+	}
+
+	trace_cpu_power_select(best_level, sleep_us, latency_us, next_event_us);
+
+	trace_cpu_pred_select(idx_restrict_time ? 2 : (predicted ? 1 : 0),
+			predicted, htime);
+
+	return best_level;
+}
+
+static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster,
+		struct cpumask *mask, bool from_idle, uint32_t *pred_time)
+{
+	int cpu;
+	int next_cpu = raw_smp_processor_id();
+	ktime_t next_event;
+	struct cpumask online_cpus_in_cluster;
+	struct lpm_history *history;
+	int64_t prediction = LONG_MAX;
+
+	next_event.tv64 = KTIME_MAX;
+	if (!suspend_wake_time)
+		suspend_wake_time =  msm_pm_sleep_time_override;
+	if (!from_idle) {
+		if (mask)
+			cpumask_copy(mask, cpumask_of(raw_smp_processor_id()));
+		if (!suspend_wake_time)
+			return ~0ULL;
+		else
+			return USEC_PER_MSEC * suspend_wake_time;
+	}
+
+	cpumask_and(&online_cpus_in_cluster,
+			&cluster->num_children_in_sync, cpu_online_mask);
+
+	for_each_cpu(cpu, &online_cpus_in_cluster) {
+		ktime_t *next_event_c;
+
+		next_event_c = get_next_event_cpu(cpu);
+		if (next_event_c->tv64 < next_event.tv64) {
+			next_event.tv64 = next_event_c->tv64;
+			next_cpu = cpu;
+		}
+
+		if (from_idle && lpm_prediction) {
+			history = &per_cpu(hist, cpu);
+			if (history->stime && (history->stime < prediction))
+				prediction = history->stime;
+		}
+	}
+
+	if (mask)
+		cpumask_copy(mask, cpumask_of(next_cpu));
+
+	if (from_idle && lpm_prediction) {
+		if (prediction > ktime_to_us(ktime_get()))
+			*pred_time = prediction - ktime_to_us(ktime_get());
+	}
+
+	if (ktime_to_us(next_event) > ktime_to_us(ktime_get()))
+		return ktime_to_us(ktime_sub(next_event, ktime_get()));
+	else
+		return 0;
+}
+
+static int cluster_predict(struct lpm_cluster *cluster,
+				uint32_t *pred_us)
+{
+	int i, j;
+	int ret = 0;
+	struct cluster_history *history = &cluster->history;
+	int64_t cur_time = ktime_to_us(ktime_get());
+
+	if (!lpm_prediction)
+		return 0;
+
+	if (history->hinvalid) {
+		history->hinvalid = 0;
+		history->htmr_wkup = 1;
+		history->flag = 0;
+		return ret;
+	}
+
+	if (history->nsamp == MAXSAMPLES) {
+		for (i = 0; i < MAXSAMPLES; i++) {
+			if ((cur_time - history->stime[i])
+					> CLUST_SMPL_INVLD_TIME)
+				history->nsamp--;
+		}
+	}
+
+	if (history->nsamp < MAXSAMPLES) {
+		history->flag = 0;
+		return ret;
+	}
+
+	if (history->flag == 2)
+		history->flag = 0;
+
+	if (history->htmr_wkup != 1) {
+		uint64_t total = 0;
+
+		if (history->flag == 1) {
+			for (i = 0; i < MAXSAMPLES; i++)
+				total += history->resi[i];
+			do_div(total, MAXSAMPLES);
+			*pred_us = total;
+			return 2;
+		}
+
+		for (j = 1; j < cluster->nlevels; j++) {
+			uint32_t failed = 0;
+
+			total = 0;
+			for (i = 0; i < MAXSAMPLES; i++) {
+				if ((history->mode[i] == j) && (history->resi[i]
+				< cluster->levels[j].pwr.min_residency)) {
+					failed++;
+					total += history->resi[i];
+				}
+			}
+
+			if (failed > (MAXSAMPLES-2)) {
+				do_div(total, failed);
+				*pred_us = total;
+				history->flag = 1;
+				return 1;
+			}
+		}
+	}
+
+	return ret;
+}
+
+static void update_cluster_history_time(struct cluster_history *history,
+						int idx, uint64_t start)
+{
+	history->entry_idx = idx;
+	history->entry_time = start;
+}
+
+static void update_cluster_history(struct cluster_history *history, int idx)
+{
+	uint32_t tmr = 0;
+	uint32_t residency = 0;
+	struct lpm_cluster *cluster =
+			container_of(history, struct lpm_cluster, history);
+
+	if (!lpm_prediction)
+		return;
+
+	if ((history->entry_idx == -1) || (history->entry_idx == idx)) {
+		residency = ktime_to_us(ktime_get()) - history->entry_time;
+		history->stime[history->hptr] = history->entry_time;
+	} else
+		return;
+
+	if (history->htmr_wkup) {
+		if (!history->hptr)
+			history->hptr = MAXSAMPLES-1;
+		else
+			history->hptr--;
+
+		history->resi[history->hptr] += residency;
+
+		history->htmr_wkup = 0;
+		tmr = 1;
+	} else {
+		history->resi[history->hptr] = residency;
+	}
+
+	history->mode[history->hptr] = idx;
+
+	history->entry_idx = INT_MIN;
+	history->entry_time = 0;
+
+	if (history->nsamp < MAXSAMPLES)
+		history->nsamp++;
+
+	trace_cluster_pred_hist(cluster->cluster_name,
+		history->mode[history->hptr], history->resi[history->hptr],
+		history->hptr, tmr);
+
+	(history->hptr)++;
+
+	if (history->hptr >= MAXSAMPLES)
+		history->hptr = 0;
+}
+
+static void clear_cl_history_each(struct cluster_history *history)
+{
+	int i;
+
+	for (i = 0; i < MAXSAMPLES; i++) {
+		history->resi[i]  = 0;
+		history->mode[i] = -1;
+		history->stime[i] = 0;
+	}
+	history->hptr = 0;
+	history->nsamp = 0;
+	history->flag = 0;
+	history->hinvalid = 0;
+	history->htmr_wkup = 0;
+}
+
+static void clear_cl_predict_history(void)
+{
+	struct lpm_cluster *cluster = lpm_root_node;
+	struct list_head *list;
+
+	if (!lpm_prediction)
+		return;
+
+	clear_cl_history_each(&cluster->history);
+
+	list_for_each(list, &cluster->child) {
+		struct lpm_cluster *n;
+
+		n = list_entry(list, typeof(*n), list);
+		clear_cl_history_each(&n->history);
+	}
+}
+
+static int cluster_select(struct lpm_cluster *cluster, bool from_idle,
+							int *ispred)
+{
+	int best_level = -1;
+	int i;
+	struct cpumask mask;
+	uint32_t latency_us = ~0U;
+	uint32_t sleep_us;
+	uint32_t cpupred_us = 0, pred_us = 0;
+	int pred_mode = 0, predicted = 0;
+
+	if (!cluster)
+		return -EINVAL;
+
+	sleep_us = (uint32_t)get_cluster_sleep_time(cluster, NULL,
+						from_idle, &cpupred_us);
+
+	if (from_idle) {
+		pred_mode = cluster_predict(cluster, &pred_us);
+
+		if (cpupred_us && pred_mode && (cpupred_us < pred_us))
+			pred_us = cpupred_us;
+
+		if (pred_us && pred_mode && (pred_us < sleep_us))
+			predicted = 1;
+
+		if (predicted && (pred_us == cpupred_us))
+			predicted = 2;
+	}
+
+	if (cpumask_and(&mask, cpu_online_mask, &cluster->child_cpus))
+		latency_us = pm_qos_request_for_cpumask(PM_QOS_CPU_DMA_LATENCY,
+							&mask);
+
+	/*
+	 * If atleast one of the core in the cluster is online, the cluster
+	 * low power modes should be determined by the idle characteristics
+	 * even if the last core enters the low power mode as a part of
+	 * hotplug.
+	 */
+
+	if (!from_idle && num_online_cpus() > 1 &&
+		cpumask_intersects(&cluster->child_cpus, cpu_online_mask))
+		from_idle = true;
+
+	for (i = 0; i < cluster->nlevels; i++) {
+		struct lpm_cluster_level *level = &cluster->levels[i];
+		struct power_params *pwr_params = &level->pwr;
+
+		if (!lpm_cluster_mode_allow(cluster, i, from_idle))
+			continue;
+
+		if (level->last_core_only &&
+			cpumask_weight(cpu_online_mask) > 1)
+			continue;
+
+		if (!cpumask_equal(&cluster->num_children_in_sync,
+					&level->num_cpu_votes))
+			continue;
+
+		if (from_idle && latency_us < pwr_params->latency_us)
+			break;
+
+		if (sleep_us < pwr_params->time_overhead_us)
+			break;
+
+		if (suspend_in_progress && from_idle && level->notify_rpm)
+			continue;
+
+		if (level->notify_rpm && msm_rpm_waiting_for_ack())
+			continue;
+
+		best_level = i;
+
+		if (from_idle &&
+			(predicted ? (pred_us <= pwr_params->max_residency)
+			: (sleep_us <= pwr_params->max_residency)))
+			break;
+	}
+
+	if ((best_level == (cluster->nlevels - 1)) && (pred_mode == 2))
+		cluster->history.flag = 2;
+
+	*ispred = predicted;
+
+	trace_cluster_pred_select(cluster->cluster_name, best_level, sleep_us,
+						latency_us, predicted, pred_us);
+
+	return best_level;
+}
+
+static void cluster_notify(struct lpm_cluster *cluster,
+		struct lpm_cluster_level *level, bool enter)
+{
+	if (level->is_reset && enter)
+		cpu_cluster_pm_enter(cluster->aff_level);
+	else if (level->is_reset && !enter)
+		cpu_cluster_pm_exit(cluster->aff_level);
+}
+
+static int cluster_configure(struct lpm_cluster *cluster, int idx,
+		bool from_idle, int predicted)
+{
+	struct lpm_cluster_level *level = &cluster->levels[idx];
+	struct cpumask online_cpus;
+	int ret, i;
+
+	cpumask_and(&online_cpus, &cluster->num_children_in_sync,
+					cpu_online_mask);
+
+	if (!cpumask_equal(&cluster->num_children_in_sync, &cluster->child_cpus)
+			|| is_IPI_pending(&online_cpus)) {
+		return -EPERM;
+	}
+
+	if (idx != cluster->default_level) {
+		update_debug_pc_event(CLUSTER_ENTER, idx,
+			cluster->num_children_in_sync.bits[0],
+			cluster->child_cpus.bits[0], from_idle);
+		trace_cluster_enter(cluster->cluster_name, idx,
+			cluster->num_children_in_sync.bits[0],
+			cluster->child_cpus.bits[0], from_idle);
+		lpm_stats_cluster_enter(cluster->stats, idx);
+
+		if (from_idle && lpm_prediction)
+			update_cluster_history_time(&cluster->history, idx,
+						ktime_to_us(ktime_get()));
+	}
+
+	for (i = 0; i < cluster->ndevices; i++) {
+		ret = set_device_mode(cluster, i, level);
+		if (ret)
+			goto failed_set_mode;
+	}
+
+	if (level->notify_rpm) {
+		struct cpumask nextcpu, *cpumask;
+		uint64_t us;
+		uint32_t pred_us;
+		uint64_t sec;
+		uint64_t nsec;
+
+		us = get_cluster_sleep_time(cluster, &nextcpu,
+						from_idle, &pred_us);
+		cpumask = level->disable_dynamic_routing ? NULL : &nextcpu;
+
+		ret = msm_rpm_enter_sleep(0, cpumask);
+		if (ret) {
+			pr_info("Failed msm_rpm_enter_sleep() rc = %d\n", ret);
+			goto failed_set_mode;
+		}
+
+		clear_predict_history();
+		clear_cl_predict_history();
+
+		us = us + 1;
+		sec = us;
+		do_div(sec, USEC_PER_SEC);
+		nsec = us - sec * USEC_PER_SEC;
+
+		sec = sec * SCLK_HZ;
+		if (nsec > 0) {
+			nsec = nsec * NSEC_PER_USEC;
+			do_div(nsec, NSEC_PER_SEC/SCLK_HZ);
+		}
+		us = sec + nsec;
+		msm_mpm_enter_sleep(us, from_idle, cpumask);
+	}
+
+	/* Notify cluster enter event after successfully config completion */
+	cluster_notify(cluster, level, true);
+
+	sched_set_cluster_dstate(&cluster->child_cpus, idx, 0, 0);
+
+	cluster->last_level = idx;
+
+	if (predicted && (idx < (cluster->nlevels - 1))) {
+		struct power_params *pwr_params = &cluster->levels[idx].pwr;
+
+		tick_broadcast_exit();
+		clusttimer_start(cluster, pwr_params->max_residency + tmr_add);
+		tick_broadcast_enter();
+	}
+
+	return 0;
+
+failed_set_mode:
+
+	for (i = 0; i < cluster->ndevices; i++) {
+		int rc = 0;
+		level = &cluster->levels[cluster->default_level];
+		rc = set_device_mode(cluster, i, level);
+		BUG_ON(rc);
+	}
+	return ret;
+}
+
+static void cluster_prepare(struct lpm_cluster *cluster,
+		const struct cpumask *cpu, int child_idx, bool from_idle,
+		int64_t start_time)
+{
+	int i;
+	int predicted = 0;
+
+	if (!cluster)
+		return;
+
+	if (cluster->min_child_level > child_idx)
+		return;
+
+	spin_lock(&cluster->sync_lock);
+	cpumask_or(&cluster->num_children_in_sync, cpu,
+			&cluster->num_children_in_sync);
+
+	for (i = 0; i < cluster->nlevels; i++) {
+		struct lpm_cluster_level *lvl = &cluster->levels[i];
+
+		if (child_idx >= lvl->min_child_level)
+			cpumask_or(&lvl->num_cpu_votes, cpu,
+					&lvl->num_cpu_votes);
+	}
+
+	/*
+	 * cluster_select() does not make any configuration changes. So its ok
+	 * to release the lock here. If a core wakes up for a rude request,
+	 * it need not wait for another to finish its cluster selection and
+	 * configuration process
+	 */
+
+	if (!cpumask_equal(&cluster->num_children_in_sync,
+				&cluster->child_cpus))
+		goto failed;
+
+	i = cluster_select(cluster, from_idle, &predicted);
+
+	if (((i < 0) || (i == cluster->default_level))
+				&& predicted && from_idle) {
+		update_cluster_history_time(&cluster->history,
+					-1, ktime_to_us(ktime_get()));
+
+		if (i < 0) {
+			struct power_params *pwr_params =
+						&cluster->levels[0].pwr;
+
+			tick_broadcast_exit();
+			clusttimer_start(cluster,
+					pwr_params->max_residency + tmr_add);
+			tick_broadcast_enter();
+		}
+	}
+
+	if (i < 0)
+		goto failed;
+
+	if (cluster_configure(cluster, i, from_idle, predicted))
+		goto failed;
+
+	cluster->stats->sleep_time = start_time;
+	cluster_prepare(cluster->parent, &cluster->num_children_in_sync, i,
+			from_idle, start_time);
+
+	spin_unlock(&cluster->sync_lock);
+	return;
+failed:
+	spin_unlock(&cluster->sync_lock);
+	cluster->stats->sleep_time = 0;
+	return;
+}
+
+static void cluster_unprepare(struct lpm_cluster *cluster,
+		const struct cpumask *cpu, int child_idx, bool from_idle,
+		int64_t end_time)
+{
+	struct lpm_cluster_level *level;
+	bool first_cpu;
+	int last_level, i, ret;
+
+	if (!cluster)
+		return;
+
+	if (cluster->min_child_level > child_idx)
+		return;
+
+	spin_lock(&cluster->sync_lock);
+	last_level = cluster->default_level;
+	first_cpu = cpumask_equal(&cluster->num_children_in_sync,
+				&cluster->child_cpus);
+	cpumask_andnot(&cluster->num_children_in_sync,
+			&cluster->num_children_in_sync, cpu);
+
+	for (i = 0; i < cluster->nlevels; i++) {
+		struct lpm_cluster_level *lvl = &cluster->levels[i];
+
+		if (child_idx >= lvl->min_child_level)
+			cpumask_andnot(&lvl->num_cpu_votes,
+					&lvl->num_cpu_votes, cpu);
+	}
+
+	if (from_idle && first_cpu &&
+		(cluster->last_level == cluster->default_level))
+		update_cluster_history(&cluster->history, cluster->last_level);
+
+	if (!first_cpu || cluster->last_level == cluster->default_level)
+		goto unlock_return;
+
+	if (cluster->stats->sleep_time)
+		cluster->stats->sleep_time = end_time -
+			cluster->stats->sleep_time;
+	lpm_stats_cluster_exit(cluster->stats, cluster->last_level, true);
+
+	level = &cluster->levels[cluster->last_level];
+	if (level->notify_rpm) {
+		msm_rpm_exit_sleep();
+
+		/* If RPM bumps up CX to turbo, unvote CX turbo vote
+		 * during exit of rpm assisted power collapse to
+		 * reduce the power impact
+		 */
+
+		lpm_wa_cx_unvote_send();
+		msm_mpm_exit_sleep(from_idle);
+	}
+
+	update_debug_pc_event(CLUSTER_EXIT, cluster->last_level,
+			cluster->num_children_in_sync.bits[0],
+			cluster->child_cpus.bits[0], from_idle);
+	trace_cluster_exit(cluster->cluster_name, cluster->last_level,
+			cluster->num_children_in_sync.bits[0],
+			cluster->child_cpus.bits[0], from_idle);
+
+	last_level = cluster->last_level;
+	cluster->last_level = cluster->default_level;
+
+	for (i = 0; i < cluster->ndevices; i++) {
+		level = &cluster->levels[cluster->default_level];
+		ret = set_device_mode(cluster, i, level);
+
+		BUG_ON(ret);
+
+	}
+	sched_set_cluster_dstate(&cluster->child_cpus, 0, 0, 0);
+
+	cluster_notify(cluster, &cluster->levels[last_level], false);
+
+	if (from_idle)
+		update_cluster_history(&cluster->history, last_level);
+
+	cluster_unprepare(cluster->parent, &cluster->child_cpus,
+			last_level, from_idle, end_time);
+unlock_return:
+	spin_unlock(&cluster->sync_lock);
+}
+
+static inline void cpu_prepare(struct lpm_cluster *cluster, int cpu_index,
+				bool from_idle)
+{
+	struct lpm_cpu_level *cpu_level = &cluster->cpu->levels[cpu_index];
+	bool jtag_save_restore =
+			cluster->cpu->levels[cpu_index].jtag_save_restore;
+
+	/* Use broadcast timer for aggregating sleep mode within a cluster.
+	 * A broadcast timer could be used in the following scenarios
+	 * 1) The architected timer HW gets reset during certain low power
+	 * modes and the core relies on a external(broadcast) timer to wake up
+	 * from sleep. This information is passed through device tree.
+	 * 2) The CPU low power mode could trigger a system low power mode.
+	 * The low power module relies on Broadcast timer to aggregate the
+	 * next wakeup within a cluster, in which case, CPU switches over to
+	 * use broadcast timer.
+	 */
+	if (from_idle && (cpu_level->use_bc_timer ||
+			(cpu_index >= cluster->min_child_level)))
+		tick_broadcast_enter();
+
+	if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
+		|| (cpu_level->mode ==
+			MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)
+			|| (cpu_level->is_reset)))
+		cpu_pm_enter();
+
+	/*
+	 * Save JTAG registers for 8996v1.0 & 8996v2.x in C4 LPM
+	 */
+	if (jtag_save_restore)
+		msm_jtag_save_state();
+}
+
+static inline void cpu_unprepare(struct lpm_cluster *cluster, int cpu_index,
+				bool from_idle)
+{
+	struct lpm_cpu_level *cpu_level = &cluster->cpu->levels[cpu_index];
+	bool jtag_save_restore =
+			cluster->cpu->levels[cpu_index].jtag_save_restore;
+
+	if (from_idle && (cpu_level->use_bc_timer ||
+			(cpu_index >= cluster->min_child_level)))
+		tick_broadcast_exit();
+
+	if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
+		|| (cpu_level->mode ==
+			MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)
+		|| cpu_level->is_reset))
+		cpu_pm_exit();
+
+	/*
+	 * Restore JTAG registers for 8996v1.0 & 8996v2.x in C4 LPM
+	 */
+	if (jtag_save_restore)
+		msm_jtag_restore_state();
+}
+
+int get_cluster_id(struct lpm_cluster *cluster, int *aff_lvl)
+{
+	int state_id = 0;
+
+	if (!cluster)
+		return 0;
+
+	spin_lock(&cluster->sync_lock);
+
+	if (!cpumask_equal(&cluster->num_children_in_sync,
+				&cluster->child_cpus))
+		goto unlock_and_return;
+
+	state_id |= get_cluster_id(cluster->parent, aff_lvl);
+
+	if (cluster->last_level != cluster->default_level) {
+		struct lpm_cluster_level *level
+			= &cluster->levels[cluster->last_level];
+
+		state_id |= (level->psci_id & cluster->psci_mode_mask)
+					<< cluster->psci_mode_shift;
+		(*aff_lvl)++;
+	}
+unlock_and_return:
+	spin_unlock(&cluster->sync_lock);
+	return state_id;
+}
+
+#if !defined(CONFIG_CPU_V7)
+bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle)
+{
+	/*
+	 * idx = 0 is the default LPM state
+	 */
+	if (!idx) {
+		stop_critical_timings();
+		wfi();
+		start_critical_timings();
+		return 1;
+	} else {
+		int affinity_level = 0;
+		int state_id = get_cluster_id(cluster, &affinity_level);
+		int power_state =
+			PSCI_POWER_STATE(cluster->cpu->levels[idx].is_reset);
+		bool success = false;
+
+		if (cluster->cpu->levels[idx].hyp_psci) {
+			stop_critical_timings();
+			__invoke_psci_fn_smc(0xC4000021, 0, 0, 0);
+			start_critical_timings();
+			return 1;
+		}
+
+		affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
+		state_id |= (power_state | affinity_level
+			| cluster->cpu->levels[idx].psci_id);
+
+		update_debug_pc_event(CPU_ENTER, state_id,
+						0xdeaffeed, 0xdeaffeed, true);
+		stop_critical_timings();
+		success = !arm_cpuidle_suspend(state_id);
+		start_critical_timings();
+		update_debug_pc_event(CPU_EXIT, state_id,
+						success, 0xdeaffeed, true);
+		return success;
+	}
+}
+#elif defined(CONFIG_ARM_PSCI)
+bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle)
+{
+	if (!idx) {
+		stop_critical_timings();
+		wfi();
+		start_critical_timings();
+		return 1;
+	} else {
+		int affinity_level = 0;
+		int state_id = get_cluster_id(cluster, &affinity_level);
+		int power_state =
+			PSCI_POWER_STATE(cluster->cpu->levels[idx].is_reset);
+		bool success = false;
+
+		affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
+		state_id |= (power_state | affinity_level
+			| cluster->cpu->levels[idx].psci_id);
+
+		update_debug_pc_event(CPU_ENTER, state_id,
+						0xdeaffeed, 0xdeaffeed, true);
+		stop_critical_timings();
+		success = !arm_cpuidle_suspend(state_id);
+		start_critical_timings();
+		update_debug_pc_event(CPU_EXIT, state_id,
+						success, 0xdeaffeed, true);
+		return success;
+	}
+}
+#else
+bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle)
+{
+	WARN_ONCE(true, "PSCI cpu_suspend ops not supported\n");
+	return false;
+}
+#endif
+
+static int lpm_cpuidle_select(struct cpuidle_driver *drv,
+		struct cpuidle_device *dev)
+{
+	struct lpm_cluster *cluster = per_cpu(cpu_cluster, dev->cpu);
+	int idx;
+
+	if (!cluster)
+		return 0;
+
+	idx = cpu_power_select(dev, cluster->cpu);
+
+	if (idx < 0)
+		return -EPERM;
+
+	return idx;
+}
+
+static void update_history(struct cpuidle_device *dev, int idx)
+{
+	struct lpm_history *history = &per_cpu(hist, dev->cpu);
+	uint32_t tmr = 0;
+
+	if (!lpm_prediction)
+		return;
+
+	if (history->htmr_wkup) {
+		if (!history->hptr)
+			history->hptr = MAXSAMPLES-1;
+		else
+			history->hptr--;
+
+		history->resi[history->hptr] += dev->last_residency;
+		history->htmr_wkup = 0;
+		tmr = 1;
+	} else
+		history->resi[history->hptr] = dev->last_residency;
+
+	history->mode[history->hptr] = idx;
+
+	trace_cpu_pred_hist(history->mode[history->hptr],
+		history->resi[history->hptr], history->hptr, tmr);
+
+	if (history->nsamp < MAXSAMPLES)
+		history->nsamp++;
+
+	(history->hptr)++;
+	if (history->hptr >= MAXSAMPLES)
+		history->hptr = 0;
+}
+
+static int lpm_cpuidle_enter(struct cpuidle_device *dev,
+		struct cpuidle_driver *drv, int idx)
+{
+	struct lpm_cluster *cluster = per_cpu(cpu_cluster, dev->cpu);
+	bool success = false;
+	const struct cpumask *cpumask = get_cpu_mask(dev->cpu);
+	int64_t start_time = ktime_to_ns(ktime_get()), end_time;
+	struct power_params *pwr_params;
+
+	if (idx < 0)
+		return -EINVAL;
+
+	pwr_params = &cluster->cpu->levels[idx].pwr;
+	sched_set_cpu_cstate(smp_processor_id(), idx + 1,
+		pwr_params->energy_overhead, pwr_params->latency_us);
+
+	pwr_params = &cluster->cpu->levels[idx].pwr;
+
+	cpu_prepare(cluster, idx, true);
+	cluster_prepare(cluster, cpumask, idx, true, ktime_to_ns(ktime_get()));
+
+	trace_cpu_idle_enter(idx);
+	lpm_stats_cpu_enter(idx, start_time);
+
+	if (need_resched())
+		goto exit;
+
+	BUG_ON(!use_psci);
+	success = psci_enter_sleep(cluster, idx, true);
+
+exit:
+	end_time = ktime_to_ns(ktime_get());
+	lpm_stats_cpu_exit(idx, end_time, success);
+
+	cluster_unprepare(cluster, cpumask, idx, true, end_time);
+	cpu_unprepare(cluster, idx, true);
+	sched_set_cpu_cstate(smp_processor_id(), 0, 0, 0);
+	end_time = ktime_to_ns(ktime_get()) - start_time;
+	do_div(end_time, 1000);
+	dev->last_residency = end_time;
+	update_history(dev, idx);
+	trace_cpu_idle_exit(idx, success);
+	local_irq_enable();
+	if (lpm_prediction) {
+		histtimer_cancel();
+		clusttimer_cancel();
+	}
+	return idx;
+}
+
+#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS
+static int cpuidle_register_cpu(struct cpuidle_driver *drv,
+		struct cpumask *mask)
+{
+	struct cpuidle_device *device;
+	int cpu, ret;
+
+
+	if (!mask || !drv)
+		return -EINVAL;
+
+	drv->cpumask = mask;
+	ret = cpuidle_register_driver(drv);
+	if (ret) {
+		pr_err("Failed to register cpuidle driver %d\n", ret);
+		goto failed_driver_register;
+	}
+
+	for_each_cpu(cpu, mask) {
+		device = &per_cpu(cpuidle_dev, cpu);
+		device->cpu = cpu;
+
+		ret = cpuidle_register_device(device);
+		if (ret) {
+			pr_err("Failed to register cpuidle driver for cpu:%u\n",
+					cpu);
+			goto failed_driver_register;
+		}
+	}
+	return ret;
+failed_driver_register:
+	for_each_cpu(cpu, mask)
+		cpuidle_unregister_driver(drv);
+	return ret;
+}
+#else
+static int cpuidle_register_cpu(struct cpuidle_driver *drv,
+		struct  cpumask *mask)
+{
+	return cpuidle_register(drv, NULL);
+}
+#endif
+
+static struct cpuidle_governor lpm_governor = {
+	.name =		"qcom",
+	.rating =	30,
+	.select =	lpm_cpuidle_select,
+	.owner =	THIS_MODULE,
+};
+
+static int cluster_cpuidle_register(struct lpm_cluster *cl)
+{
+	int i = 0, ret = 0;
+	unsigned cpu;
+	struct lpm_cluster *p = NULL;
+
+	if (!cl->cpu) {
+		struct lpm_cluster *n;
+
+		list_for_each_entry(n, &cl->child, list) {
+			ret = cluster_cpuidle_register(n);
+			if (ret)
+				break;
+		}
+		return ret;
+	}
+
+	cl->drv = kzalloc(sizeof(*cl->drv), GFP_KERNEL);
+	if (!cl->drv)
+		return -ENOMEM;
+
+	cl->drv->name = "msm_idle";
+
+	for (i = 0; i < cl->cpu->nlevels; i++) {
+		struct cpuidle_state *st = &cl->drv->states[i];
+		struct lpm_cpu_level *cpu_level = &cl->cpu->levels[i];
+		snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i);
+		snprintf(st->desc, CPUIDLE_DESC_LEN, "%s",
+			cpu_level->name);
+		st->flags = 0;
+		st->exit_latency = cpu_level->pwr.latency_us;
+		st->power_usage = cpu_level->pwr.ss_power;
+		st->target_residency = 0;
+		st->enter = lpm_cpuidle_enter;
+	}
+
+	cl->drv->state_count = cl->cpu->nlevels;
+	cl->drv->safe_state_index = 0;
+	for_each_cpu(cpu, &cl->child_cpus)
+		per_cpu(cpu_cluster, cpu) = cl;
+
+	for_each_possible_cpu(cpu) {
+		if (cpu_online(cpu))
+			continue;
+		p = per_cpu(cpu_cluster, cpu);
+		while (p) {
+			int j;
+			spin_lock(&p->sync_lock);
+			cpumask_set_cpu(cpu, &p->num_children_in_sync);
+			for (j = 0; j < p->nlevels; j++)
+				cpumask_copy(&p->levels[j].num_cpu_votes,
+						&p->num_children_in_sync);
+			spin_unlock(&p->sync_lock);
+			p = p->parent;
+		}
+	}
+	ret = cpuidle_register_cpu(cl->drv, &cl->child_cpus);
+
+	if (ret) {
+		kfree(cl->drv);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+/**
+ * init_lpm - initializes the governor
+ */
+static int __init init_lpm(void)
+{
+	return cpuidle_register_governor(&lpm_governor);
+}
+
+postcore_initcall(init_lpm);
+
+static void register_cpu_lpm_stats(struct lpm_cpu *cpu,
+		struct lpm_cluster *parent)
+{
+	const char **level_name;
+	int i;
+
+	level_name = kzalloc(cpu->nlevels * sizeof(*level_name), GFP_KERNEL);
+
+	if (!level_name)
+		return;
+
+	for (i = 0; i < cpu->nlevels; i++)
+		level_name[i] = cpu->levels[i].name;
+
+	lpm_stats_config_level("cpu", level_name, cpu->nlevels,
+			parent->stats, &parent->child_cpus);
+
+	kfree(level_name);
+}
+
+static void register_cluster_lpm_stats(struct lpm_cluster *cl,
+		struct lpm_cluster *parent)
+{
+	const char **level_name;
+	int i;
+	struct lpm_cluster *child;
+
+	if (!cl)
+		return;
+
+	level_name = kzalloc(cl->nlevels * sizeof(*level_name), GFP_KERNEL);
+
+	if (!level_name)
+		return;
+
+	for (i = 0; i < cl->nlevels; i++)
+		level_name[i] = cl->levels[i].level_name;
+
+	cl->stats = lpm_stats_config_level(cl->cluster_name, level_name,
+			cl->nlevels, parent ? parent->stats : NULL, NULL);
+
+	kfree(level_name);
+
+	if (cl->cpu) {
+		register_cpu_lpm_stats(cl->cpu, cl);
+		return;
+	}
+
+	list_for_each_entry(child, &cl->child, list)
+		register_cluster_lpm_stats(child, cl);
+}
+
+static int lpm_suspend_prepare(void)
+{
+	suspend_in_progress = true;
+	msm_mpm_suspend_prepare();
+	lpm_stats_suspend_enter();
+
+	return 0;
+}
+
+static void lpm_suspend_wake(void)
+{
+	suspend_in_progress = false;
+	msm_mpm_suspend_wake();
+	lpm_stats_suspend_exit();
+}
+
+static int lpm_suspend_enter(suspend_state_t state)
+{
+	int cpu = raw_smp_processor_id();
+	struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+	struct lpm_cpu *lpm_cpu = cluster->cpu;
+	const struct cpumask *cpumask = get_cpu_mask(cpu);
+	int idx;
+
+	for (idx = lpm_cpu->nlevels - 1; idx >= 0; idx--) {
+
+		if (lpm_cpu_mode_allow(cpu, idx, false))
+			break;
+	}
+	if (idx < 0) {
+		pr_err("Failed suspend\n");
+		return 0;
+	}
+	cpu_prepare(cluster, idx, false);
+	cluster_prepare(cluster, cpumask, idx, false, 0);
+	if (idx > 0)
+		update_debug_pc_event(CPU_ENTER, idx, 0xdeaffeed,
+					0xdeaffeed, false);
+
+	/*
+	 * Print the clocks which are enabled during system suspend
+	 * This debug information is useful to know which are the
+	 * clocks that are enabled and preventing the system level
+	 * LPMs(XO and Vmin).
+	 */
+	clock_debug_print_enabled();
+
+	BUG_ON(!use_psci);
+	psci_enter_sleep(cluster, idx, true);
+
+	if (idx > 0)
+		update_debug_pc_event(CPU_EXIT, idx, true, 0xdeaffeed,
+					false);
+
+	cluster_unprepare(cluster, cpumask, idx, false, 0);
+	cpu_unprepare(cluster, idx, false);
+	return 0;
+}
+
+static const struct platform_suspend_ops lpm_suspend_ops = {
+	.enter = lpm_suspend_enter,
+	.valid = suspend_valid_only_mem,
+	.prepare_late = lpm_suspend_prepare,
+	.wake = lpm_suspend_wake,
+};
+
+static int lpm_probe(struct platform_device *pdev)
+{
+	int ret;
+	int size;
+	struct kobject *module_kobj = NULL;
+	struct md_region md_entry;
+
+	get_online_cpus();
+	lpm_root_node = lpm_of_parse_cluster(pdev);
+
+	if (IS_ERR_OR_NULL(lpm_root_node)) {
+		pr_err("%s(): Failed to probe low power modes\n", __func__);
+		put_online_cpus();
+		return PTR_ERR(lpm_root_node);
+	}
+
+	if (print_parsed_dt)
+		cluster_dt_walkthrough(lpm_root_node);
+
+	/*
+	 * Register hotplug notifier before broadcast time to ensure there
+	 * to prevent race where a broadcast timer might not be setup on for a
+	 * core.  BUG in existing code but no known issues possibly because of
+	 * how late lpm_levels gets initialized.
+	 */
+	suspend_set_ops(&lpm_suspend_ops);
+	hrtimer_init(&lpm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	hrtimer_init(&histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	cluster_timer_init(lpm_root_node);
+
+	ret = remote_spin_lock_init(&scm_handoff_lock, SCM_HANDOFF_LOCK_ID);
+	if (ret) {
+		pr_err("%s: Failed initializing scm_handoff_lock (%d)\n",
+			__func__, ret);
+		put_online_cpus();
+		return ret;
+	}
+
+	size = num_dbg_elements * sizeof(struct lpm_debug);
+	lpm_debug = dma_alloc_coherent(&pdev->dev, size,
+			&lpm_debug_phys, GFP_KERNEL);
+	register_cluster_lpm_stats(lpm_root_node, NULL);
+
+	ret = cluster_cpuidle_register(lpm_root_node);
+	put_online_cpus();
+	if (ret) {
+		pr_err("%s()Failed to register with cpuidle framework\n",
+				__func__);
+		goto failed;
+	}
+	register_hotcpu_notifier(&lpm_cpu_nblk);
+	module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+	if (!module_kobj) {
+		pr_err("%s: cannot find kobject for module %s\n",
+			__func__, KBUILD_MODNAME);
+		ret = -ENOENT;
+		goto failed;
+	}
+
+	ret = create_cluster_lvl_nodes(lpm_root_node, module_kobj);
+	if (ret) {
+		pr_err("%s(): Failed to create cluster level nodes\n",
+				__func__);
+		goto failed;
+	}
+
+	/* Add lpm_debug to Minidump*/
+	strlcpy(md_entry.name, "KLPMDEBUG", sizeof(md_entry.name));
+	md_entry.virt_addr = (uintptr_t)lpm_debug;
+	md_entry.phys_addr = lpm_debug_phys;
+	md_entry.size = size;
+	if (msm_minidump_add_region(&md_entry))
+		pr_info("Failed to add lpm_debug in Minidump\n");
+
+	return 0;
+failed:
+	free_cluster_node(lpm_root_node);
+	lpm_root_node = NULL;
+	return ret;
+}
+
+static struct of_device_id lpm_mtch_tbl[] = {
+	{.compatible = "qcom,lpm-levels"},
+	{},
+};
+
+static struct platform_driver lpm_driver = {
+	.probe = lpm_probe,
+	.driver = {
+		.name = "lpm-levels",
+		.owner = THIS_MODULE,
+		.of_match_table = lpm_mtch_tbl,
+	},
+};
+
+static int __init lpm_levels_module_init(void)
+{
+	int rc;
+	rc = platform_driver_register(&lpm_driver);
+	if (rc) {
+		pr_info("Error registering %s\n", lpm_driver.driver.name);
+		goto fail;
+	}
+
+#ifdef CONFIG_ARM_PSCI
+	rc = set_cpuidle_ops();
+	if (rc) {
+		pr_err("%s(): Failed to set cpuidle ops\n", __func__);
+		goto fail;
+	}
+#endif
+
+fail:
+	return rc;
+}
+late_initcall(lpm_levels_module_init);
+
+enum msm_pm_l2_scm_flag lpm_cpu_pre_pc_cb(unsigned int cpu)
+{
+	struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+	enum msm_pm_l2_scm_flag retflag = MSM_SCM_L2_ON;
+
+	/*
+	 * No need to acquire the lock if probe isn't completed yet
+	 * In the event of the hotplug happening before lpm probe, we want to
+	 * flush the cache to make sure that L2 is flushed. In particular, this
+	 * could cause incoherencies for a cluster architecture. This wouldn't
+	 * affect the idle case as the idle driver wouldn't be registered
+	 * before the probe function
+	 */
+	if (!cluster)
+		return MSM_SCM_L2_OFF;
+
+	/*
+	 * Assumes L2 only. What/How parameters gets passed into TZ will
+	 * determine how this function reports this info back in msm-pm.c
+	 */
+	spin_lock(&cluster->sync_lock);
+
+	if (!cluster->lpm_dev) {
+		retflag = MSM_SCM_L2_OFF;
+		goto unlock_and_return;
+	}
+
+	if (!cpumask_equal(&cluster->num_children_in_sync,
+						&cluster->child_cpus))
+		goto unlock_and_return;
+
+	if (cluster->lpm_dev)
+		retflag = cluster->lpm_dev->tz_flag;
+	/*
+	 * The scm_handoff_lock will be release by the secure monitor.
+	 * It is used to serialize power-collapses from this point on,
+	 * so that both Linux and the secure context have a consistent
+	 * view regarding the number of running cpus (cpu_count).
+	 *
+	 * It must be acquired before releasing the cluster lock.
+	 */
+unlock_and_return:
+	update_debug_pc_event(PRE_PC_CB, retflag, 0xdeadbeef, 0xdeadbeef,
+			0xdeadbeef);
+	trace_pre_pc_cb(retflag);
+	remote_spin_lock_rlock_id(&scm_handoff_lock,
+				  REMOTE_SPINLOCK_TID_START + cpu);
+	spin_unlock(&cluster->sync_lock);
+	return retflag;
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/cpuidle/lpm-levels.h	2019-01-22 16:16:23.095242712 +0100
@@ -0,0 +1,166 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <soc/qcom/pm.h>
+#include <soc/qcom/spm.h>
+
+#define NR_LPM_LEVELS 8
+#define MAXSAMPLES 5
+#define CLUST_SMPL_INVLD_TIME 40000
+
+extern bool use_psci;
+
+struct lpm_lookup_table {
+	uint32_t modes;
+	const char *mode_name;
+};
+
+struct power_params {
+	uint32_t latency_us;		/* Enter + Exit latency */
+	uint32_t ss_power;		/* Steady state power */
+	uint32_t energy_overhead;	/* Enter + exit over head */
+	uint32_t time_overhead_us;	/* Enter + exit overhead */
+	uint32_t residencies[NR_LPM_LEVELS];
+	uint32_t min_residency;
+	uint32_t max_residency;
+};
+
+struct lpm_cpu_level {
+	const char *name;
+	enum msm_pm_sleep_mode mode;
+	bool use_bc_timer;
+	struct power_params pwr;
+	unsigned int psci_id;
+	bool is_reset;
+	bool jtag_save_restore;
+	bool hyp_psci;
+	int reset_level;
+};
+
+struct lpm_cpu {
+	struct lpm_cpu_level levels[NR_LPM_LEVELS];
+	int nlevels;
+	unsigned int psci_mode_shift;
+	unsigned int psci_mode_mask;
+	struct lpm_cluster *parent;
+};
+
+struct lpm_level_avail {
+	bool idle_enabled;
+	bool suspend_enabled;
+	struct kobject *kobj;
+	struct kobj_attribute idle_enabled_attr;
+	struct kobj_attribute suspend_enabled_attr;
+	void *data;
+	int idx;
+	bool cpu_node;
+};
+
+struct lpm_cluster_level {
+	const char *level_name;
+	int *mode;			/* SPM mode to enter */
+	int min_child_level;
+	struct cpumask num_cpu_votes;
+	struct power_params pwr;
+	bool notify_rpm;
+	bool disable_dynamic_routing;
+	bool sync_level;
+	bool last_core_only;
+	struct lpm_level_avail available;
+	unsigned int psci_id;
+	bool is_reset;
+	int reset_level;
+};
+
+struct low_power_ops {
+	struct msm_spm_device *spm;
+	int (*set_mode)(struct low_power_ops *ops, int mode, bool notify_rpm);
+	enum msm_pm_l2_scm_flag tz_flag;
+};
+
+struct cluster_history {
+	uint32_t resi[MAXSAMPLES];
+	int mode[MAXSAMPLES];
+	int64_t stime[MAXSAMPLES];
+	uint32_t hptr;
+	uint32_t hinvalid;
+	uint32_t htmr_wkup;
+	uint64_t entry_time;
+	int entry_idx;
+	int nsamp;
+	int flag;
+};
+
+struct lpm_cluster {
+	struct list_head list;
+	struct list_head child;
+	const char *cluster_name;
+	const char **name;
+	unsigned long aff_level; /* Affinity level of the node */
+	struct low_power_ops *lpm_dev;
+	int ndevices;
+	struct lpm_cluster_level levels[NR_LPM_LEVELS];
+	int nlevels;
+	enum msm_pm_l2_scm_flag l2_flag;
+	int min_child_level;
+	int default_level;
+	int last_level;
+	struct lpm_cpu *cpu;
+	struct cpuidle_driver *drv;
+	spinlock_t sync_lock;
+	struct cpumask child_cpus;
+	struct cpumask num_children_in_sync;
+	struct lpm_cluster *parent;
+	struct lpm_stats *stats;
+	unsigned int psci_mode_shift;
+	unsigned int psci_mode_mask;
+	bool no_saw_devices;
+	struct cluster_history history;
+	struct hrtimer histtimer;
+};
+
+int set_l2_mode(struct low_power_ops *ops, int mode, bool notify_rpm);
+int set_system_mode(struct low_power_ops *ops, int mode, bool notify_rpm);
+int set_l3_mode(struct low_power_ops *ops, int mode, bool notify_rpm);
+void lpm_suspend_wake_time(uint64_t wakeup_time);
+
+struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev);
+void free_cluster_node(struct lpm_cluster *cluster);
+void cluster_dt_walkthrough(struct lpm_cluster *cluster);
+
+int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj);
+bool lpm_cpu_mode_allow(unsigned int cpu,
+		unsigned int mode, bool from_idle);
+bool lpm_cluster_mode_allow(struct lpm_cluster *cluster,
+		unsigned int mode, bool from_idle);
+uint32_t *get_per_cpu_max_residency(int cpu);
+uint32_t *get_per_cpu_min_residency(int cpu);
+extern struct lpm_cluster *lpm_root_node;
+
+#ifdef CONFIG_SMP
+extern DEFINE_PER_CPU(bool, pending_ipi);
+static inline bool is_IPI_pending(const struct cpumask *mask)
+{
+	unsigned int cpu;
+
+	for_each_cpu(cpu, mask) {
+		if per_cpu(pending_ipi, cpu)
+			return true;
+	}
+	return false;
+}
+#else
+static inline bool is_IPI_pending(const struct cpumask *mask)
+{
+	return false;
+}
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/cpuidle/lpm-levels-of.c	2019-01-22 16:16:23.095242712 +0100
@@ -0,0 +1,1022 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/sysfs.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/moduleparam.h>
+#include "lpm-levels.h"
+
+bool use_psci;
+enum lpm_type {
+	IDLE = 0,
+	SUSPEND,
+	LPM_TYPE_NR
+};
+
+struct lpm_type_str {
+	enum lpm_type type;
+	char *str;
+};
+
+static const struct lpm_type_str lpm_types[] = {
+	{IDLE, "idle_enabled"},
+	{SUSPEND, "suspend_enabled"},
+};
+
+static DEFINE_PER_CPU(uint32_t *, max_residency);
+static DEFINE_PER_CPU(uint32_t *, min_residency);
+static struct lpm_level_avail *cpu_level_available[NR_CPUS];
+static struct platform_device *lpm_pdev;
+
+static void *get_enabled_ptr(struct kobj_attribute *attr,
+					struct lpm_level_avail *avail)
+{
+	void *arg = NULL;
+
+	if (!strcmp(attr->attr.name, lpm_types[IDLE].str))
+		arg = (void *) &avail->idle_enabled;
+	else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
+		arg = (void *) &avail->suspend_enabled;
+
+	return arg;
+}
+
+static struct lpm_level_avail *get_avail_ptr(struct kobject *kobj,
+					struct kobj_attribute *attr)
+{
+	struct lpm_level_avail *avail = NULL;
+
+	if (!strcmp(attr->attr.name, lpm_types[IDLE].str))
+		avail = container_of(attr, struct lpm_level_avail,
+					idle_enabled_attr);
+	else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
+		avail = container_of(attr, struct lpm_level_avail,
+					suspend_enabled_attr);
+
+	return avail;
+}
+
+static void set_optimum_cpu_residency(struct lpm_cpu *cpu, int cpu_id,
+		bool probe_time)
+{
+	int i, j;
+	bool mode_avail;
+	uint32_t *maximum_residency = per_cpu(max_residency, cpu_id);
+	uint32_t *minimum_residency = per_cpu(min_residency, cpu_id);
+
+	for (i = 0; i < cpu->nlevels; i++) {
+		struct power_params *pwr = &cpu->levels[i].pwr;
+
+		mode_avail = probe_time ||
+			lpm_cpu_mode_allow(cpu_id, i, true);
+
+		if (!mode_avail) {
+			maximum_residency[i] = 0;
+			minimum_residency[i] = 0;
+			continue;
+		}
+
+		maximum_residency[i] = ~0;
+		for (j = i + 1; j < cpu->nlevels; j++) {
+			mode_avail = probe_time ||
+					lpm_cpu_mode_allow(cpu_id, j, true);
+
+			if (mode_avail &&
+				(maximum_residency[i] > pwr->residencies[j]) &&
+				(pwr->residencies[j] != 0))
+				maximum_residency[i] = pwr->residencies[j];
+		}
+
+		minimum_residency[i] = pwr->time_overhead_us;
+		for (j = i-1; j >= 0; j--) {
+			if (probe_time || lpm_cpu_mode_allow(cpu_id, j, true)) {
+				minimum_residency[i] = maximum_residency[j] + 1;
+				break;
+			}
+		}
+	}
+}
+
+static void set_optimum_cluster_residency(struct lpm_cluster *cluster,
+		bool probe_time)
+{
+	int i, j;
+	bool mode_avail;
+
+	for (i = 0; i < cluster->nlevels; i++) {
+		struct power_params *pwr = &cluster->levels[i].pwr;
+
+		mode_avail = probe_time ||
+			lpm_cluster_mode_allow(cluster, i,
+					true);
+
+		if (!mode_avail) {
+			pwr->max_residency = 0;
+			pwr->min_residency = 0;
+			continue;
+		}
+
+		pwr->max_residency = ~0;
+		for (j = i+1; j < cluster->nlevels; j++) {
+			mode_avail = probe_time ||
+					lpm_cluster_mode_allow(cluster, j,
+							true);
+			if (mode_avail &&
+				(pwr->max_residency > pwr->residencies[j]) &&
+				(pwr->residencies[j] != 0))
+				pwr->max_residency = pwr->residencies[j];
+		}
+
+		pwr->min_residency = pwr->time_overhead_us;
+		for (j = i-1;  j >= 0; j--) {
+			if (probe_time ||
+				lpm_cluster_mode_allow(cluster, j, true)) {
+				pwr->min_residency =
+				  cluster->levels[j].pwr.max_residency + 1;
+				break;
+			}
+		}
+	}
+}
+
+uint32_t *get_per_cpu_max_residency(int cpu)
+{
+	return per_cpu(max_residency, cpu);
+}
+
+uint32_t *get_per_cpu_min_residency(int cpu)
+{
+	return per_cpu(min_residency, cpu);
+}
+ssize_t lpm_enable_show(struct kobject *kobj, struct kobj_attribute *attr,
+				char *buf)
+{
+	int ret = 0;
+	struct kernel_param kp;
+
+	kp.arg = get_enabled_ptr(attr, get_avail_ptr(kobj, attr));
+	ret = param_get_bool(buf, &kp);
+	if (ret > 0) {
+		strlcat(buf, "\n", PAGE_SIZE);
+		ret++;
+	}
+
+	return ret;
+}
+
+ssize_t lpm_enable_store(struct kobject *kobj, struct kobj_attribute *attr,
+				const char *buf, size_t len)
+{
+	int ret = 0;
+	struct kernel_param kp;
+	struct lpm_level_avail *avail;
+
+	avail = get_avail_ptr(kobj, attr);
+	if (WARN_ON(!avail))
+		return -EINVAL;
+	kp.arg = get_enabled_ptr(attr, avail);
+	ret = param_set_bool(buf, &kp);
+
+	if (avail->cpu_node)
+		set_optimum_cpu_residency(avail->data, avail->idx, false);
+	else
+		set_optimum_cluster_residency(avail->data, false);
+
+	return ret ? ret : len;
+}
+
+static int create_lvl_avail_nodes(const char *name,
+			struct kobject *parent, struct lpm_level_avail *avail,
+			void *data, int index, bool cpu_node)
+{
+	struct attribute_group *attr_group = NULL;
+	struct attribute **attr = NULL;
+	struct kobject *kobj = NULL;
+	int ret = 0;
+
+	kobj = kobject_create_and_add(name, parent);
+	if (!kobj)
+		return -ENOMEM;
+
+	attr_group = devm_kzalloc(&lpm_pdev->dev, sizeof(*attr_group),
+					GFP_KERNEL);
+	if (!attr_group) {
+		ret = -ENOMEM;
+		goto failed;
+	}
+
+	attr = devm_kzalloc(&lpm_pdev->dev,
+		sizeof(*attr) * (LPM_TYPE_NR + 1), GFP_KERNEL);
+	if (!attr) {
+		ret = -ENOMEM;
+		goto failed;
+	}
+
+	sysfs_attr_init(&avail->idle_enabled_attr.attr);
+	avail->idle_enabled_attr.attr.name = lpm_types[IDLE].str;
+	avail->idle_enabled_attr.attr.mode = 0644;
+	avail->idle_enabled_attr.show = lpm_enable_show;
+	avail->idle_enabled_attr.store = lpm_enable_store;
+
+	sysfs_attr_init(&avail->suspend_enabled_attr.attr);
+	avail->suspend_enabled_attr.attr.name = lpm_types[SUSPEND].str;
+	avail->suspend_enabled_attr.attr.mode = 0644;
+	avail->suspend_enabled_attr.show = lpm_enable_show;
+	avail->suspend_enabled_attr.store = lpm_enable_store;
+
+	attr[0] = &avail->idle_enabled_attr.attr;
+	attr[1] = &avail->suspend_enabled_attr.attr;
+	attr[2] = NULL;
+	attr_group->attrs = attr;
+
+	ret = sysfs_create_group(kobj, attr_group);
+	if (ret) {
+		ret = -ENOMEM;
+		goto failed;
+	}
+
+	avail->idle_enabled = true;
+	avail->suspend_enabled = true;
+	avail->kobj = kobj;
+	avail->data = data;
+	avail->idx = index;
+	avail->cpu_node = cpu_node;
+
+	return ret;
+
+failed:
+	kobject_put(kobj);
+	return ret;
+}
+
+static int create_cpu_lvl_nodes(struct lpm_cluster *p, struct kobject *parent)
+{
+	int cpu;
+	int i, cpu_idx;
+	struct kobject **cpu_kobj = NULL;
+	struct lpm_level_avail *level_list = NULL;
+	char cpu_name[20] = {0};
+	int ret = 0;
+
+	cpu_kobj = devm_kzalloc(&lpm_pdev->dev, sizeof(*cpu_kobj) *
+			cpumask_weight(&p->child_cpus), GFP_KERNEL);
+	if (!cpu_kobj)
+		return -ENOMEM;
+
+	cpu_idx = 0;
+	for_each_cpu(cpu, &p->child_cpus) {
+		snprintf(cpu_name, sizeof(cpu_name), "cpu%d", cpu);
+		cpu_kobj[cpu_idx] = kobject_create_and_add(cpu_name, parent);
+		if (!cpu_kobj[cpu_idx]) {
+			ret = -ENOMEM;
+			goto release_kobj;
+		}
+
+		level_list = devm_kzalloc(&lpm_pdev->dev,
+				p->cpu->nlevels * sizeof(*level_list),
+				GFP_KERNEL);
+		if (!level_list) {
+			ret = -ENOMEM;
+			goto release_kobj;
+		}
+
+		for (i = 0; i < p->cpu->nlevels; i++) {
+
+			ret = create_lvl_avail_nodes(p->cpu->levels[i].name,
+					cpu_kobj[cpu_idx], &level_list[i],
+					(void *)p->cpu, cpu, true);
+			if (ret)
+				goto release_kobj;
+		}
+
+		cpu_level_available[cpu] = level_list;
+		cpu_idx++;
+	}
+
+	return ret;
+
+release_kobj:
+	for (i = 0; i < cpumask_weight(&p->child_cpus); i++)
+		kobject_put(cpu_kobj[i]);
+
+	return ret;
+}
+
+int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj)
+{
+	int ret = 0;
+	struct lpm_cluster *child = NULL;
+	int i;
+	struct kobject *cluster_kobj = NULL;
+
+	if (!p)
+		return -ENODEV;
+
+	cluster_kobj = kobject_create_and_add(p->cluster_name, kobj);
+	if (!cluster_kobj)
+		return -ENOMEM;
+
+	for (i = 0; i < p->nlevels; i++) {
+		ret = create_lvl_avail_nodes(p->levels[i].level_name,
+				cluster_kobj, &p->levels[i].available,
+				(void *)p, 0, false);
+		if (ret)
+			return ret;
+	}
+
+	list_for_each_entry(child, &p->child, list) {
+		ret = create_cluster_lvl_nodes(child, cluster_kobj);
+		if (ret)
+			return ret;
+	}
+
+	if (p->cpu) {
+		ret = create_cpu_lvl_nodes(p, cluster_kobj);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+bool lpm_cpu_mode_allow(unsigned int cpu,
+		unsigned int index, bool from_idle)
+{
+	struct lpm_level_avail *avail = cpu_level_available[cpu];
+
+	if (!lpm_pdev || !avail)
+		return !from_idle;
+
+	return !!(from_idle ? avail[index].idle_enabled :
+				avail[index].suspend_enabled);
+}
+
+bool lpm_cluster_mode_allow(struct lpm_cluster *cluster,
+		unsigned int mode, bool from_idle)
+{
+	struct lpm_level_avail *avail = &cluster->levels[mode].available;
+
+	if (!lpm_pdev || !avail)
+		return false;
+
+	return !!(from_idle ? avail->idle_enabled :
+				avail->suspend_enabled);
+}
+
+static int parse_legacy_cluster_params(struct device_node *node,
+		struct lpm_cluster *c)
+{
+	int i;
+	char *key;
+	int ret;
+	struct lpm_match {
+		char *devname;
+		int (*set_mode)(struct low_power_ops *, int, bool);
+	};
+	struct lpm_match match_tbl[] = {
+		{"l2", set_l2_mode},
+		{"cci", set_system_mode},
+		{"l3", set_l3_mode},
+		{"cbf", set_system_mode},
+	};
+
+
+	key = "qcom,spm-device-names";
+	c->ndevices = of_property_count_strings(node, key);
+
+	if (c->ndevices < 0) {
+		pr_info("%s(): Ignoring cluster params\n", __func__);
+		c->no_saw_devices = true;
+		c->ndevices = 0;
+		return 0;
+	}
+
+	c->name = devm_kzalloc(&lpm_pdev->dev, c->ndevices * sizeof(*c->name),
+				GFP_KERNEL);
+	c->lpm_dev = devm_kzalloc(&lpm_pdev->dev,
+				c->ndevices * sizeof(*c->lpm_dev),
+				GFP_KERNEL);
+	if (!c->name || !c->lpm_dev) {
+		ret = -ENOMEM;
+		goto failed;
+	}
+
+	for (i = 0; i < c->ndevices; i++) {
+		char device_name[20];
+		int j;
+
+		ret = of_property_read_string_index(node, key, i, &c->name[i]);
+		if (ret)
+			goto failed;
+		snprintf(device_name, sizeof(device_name), "%s-%s",
+				c->cluster_name, c->name[i]);
+
+		c->lpm_dev[i].spm = msm_spm_get_device_by_name(device_name);
+
+		if (IS_ERR_OR_NULL(c->lpm_dev[i].spm)) {
+			pr_err("Failed to get spm device by name:%s\n",
+					device_name);
+			ret = PTR_ERR(c->lpm_dev[i].spm);
+			goto failed;
+		}
+		for (j = 0; j < ARRAY_SIZE(match_tbl); j++) {
+			if (!strcmp(c->name[i], match_tbl[j].devname))
+				c->lpm_dev[i].set_mode = match_tbl[j].set_mode;
+		}
+
+		if (!c->lpm_dev[i].set_mode) {
+			ret = -ENODEV;
+			goto failed;
+		}
+	}
+
+	key = "qcom,default-level";
+	if (of_property_read_u32(node, key, &c->default_level))
+		c->default_level = 0;
+	return 0;
+failed:
+	pr_err("%s(): Failed reading %s\n", __func__, key);
+	return ret;
+}
+
+static int parse_cluster_params(struct device_node *node,
+		struct lpm_cluster *c)
+{
+	char *key;
+	int ret;
+
+	key = "label";
+	ret = of_property_read_string(node, key, &c->cluster_name);
+	if (ret) {
+		pr_err("%s(): Cannot read required param %s\n", __func__, key);
+		return ret;
+	}
+
+	if (use_psci) {
+		key = "qcom,psci-mode-shift";
+		ret = of_property_read_u32(node, key,
+				&c->psci_mode_shift);
+		if (ret) {
+			pr_err("%s(): Failed to read param: %s\n",
+							__func__, key);
+			return ret;
+		}
+
+		key = "qcom,psci-mode-mask";
+		ret = of_property_read_u32(node, key,
+				&c->psci_mode_mask);
+		if (ret) {
+			pr_err("%s(): Failed to read param: %s\n",
+							__func__, key);
+			return ret;
+		}
+
+		/* Set ndevice to 1 as default */
+		c->ndevices = 1;
+
+		return 0;
+	} else
+		return parse_legacy_cluster_params(node, c);
+}
+
+static int parse_lpm_mode(const char *str)
+{
+	int i;
+	struct lpm_lookup_table mode_lookup[] = {
+		{MSM_SPM_MODE_POWER_COLLAPSE, "pc"},
+		{MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE, "spc"},
+		{MSM_SPM_MODE_FASTPC, "fpc"},
+		{MSM_SPM_MODE_GDHS, "gdhs"},
+		{MSM_SPM_MODE_RETENTION, "retention"},
+		{MSM_SPM_MODE_CLOCK_GATING, "wfi"},
+		{MSM_SPM_MODE_DISABLED, "active"}
+	};
+
+	for (i = 0; i < ARRAY_SIZE(mode_lookup); i++)
+		if (!strcmp(str, mode_lookup[i].mode_name))
+			return  mode_lookup[i].modes;
+	return -EINVAL;
+}
+
+static int parse_power_params(struct device_node *node,
+		struct power_params *pwr)
+{
+	char *key;
+	int ret;
+
+	key = "qcom,latency-us";
+	ret  = of_property_read_u32(node, key, &pwr->latency_us);
+	if (ret)
+		goto fail;
+
+	key = "qcom,ss-power";
+	ret = of_property_read_u32(node, key, &pwr->ss_power);
+	if (ret)
+		goto fail;
+
+	key = "qcom,energy-overhead";
+	ret = of_property_read_u32(node, key, &pwr->energy_overhead);
+	if (ret)
+		goto fail;
+
+	key = "qcom,time-overhead";
+	ret = of_property_read_u32(node, key, &pwr->time_overhead_us);
+	if (ret)
+		goto fail;
+
+fail:
+	if (ret)
+		pr_err("%s(): %s Error reading %s\n", __func__, node->name,
+				key);
+	return ret;
+}
+
+static int parse_cluster_level(struct device_node *node,
+		struct lpm_cluster *cluster)
+{
+	int i = 0;
+	struct lpm_cluster_level *level = &cluster->levels[cluster->nlevels];
+	int ret = -ENOMEM;
+	char *key;
+
+	key = "label";
+	ret = of_property_read_string(node, key, &level->level_name);
+	if (ret)
+		goto failed;
+
+	if (use_psci) {
+		char *k = "qcom,psci-mode";
+		ret = of_property_read_u32(node, k, &level->psci_id);
+		if (ret)
+			goto failed;
+
+		level->is_reset = of_property_read_bool(node, "qcom,is-reset");
+	} else if (!cluster->no_saw_devices) {
+		key  = "no saw-devices";
+
+		level->mode = devm_kzalloc(&lpm_pdev->dev,
+				cluster->ndevices * sizeof(*level->mode),
+				GFP_KERNEL);
+		if (!level->mode) {
+			pr_err("Memory allocation failed\n");
+			goto failed;
+		}
+
+		for (i = 0; i < cluster->ndevices; i++) {
+			const char *spm_mode;
+			char key[25] = {0};
+
+			snprintf(key, 25, "qcom,spm-%s-mode", cluster->name[i]);
+			ret = of_property_read_string(node, key, &spm_mode);
+			if (ret)
+				goto failed;
+
+			level->mode[i] = parse_lpm_mode(spm_mode);
+
+			if (level->mode[i] < 0)
+				goto failed;
+
+			if (level->mode[i] == MSM_SPM_MODE_POWER_COLLAPSE
+				|| level->mode[i] ==
+				MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE)
+				level->is_reset |= true;
+		}
+	}
+
+	key = "label";
+	ret = of_property_read_string(node, key, &level->level_name);
+	if (ret)
+		goto failed;
+
+	if (cluster->nlevels != cluster->default_level) {
+		key = "min child idx";
+		ret = of_property_read_u32(node, "qcom,min-child-idx",
+				&level->min_child_level);
+		if (ret)
+			goto failed;
+
+		if (cluster->min_child_level > level->min_child_level)
+			cluster->min_child_level = level->min_child_level;
+	}
+
+	level->notify_rpm = of_property_read_bool(node, "qcom,notify-rpm");
+	level->disable_dynamic_routing = of_property_read_bool(node,
+					"qcom,disable-dynamic-int-routing");
+	level->last_core_only = of_property_read_bool(node,
+					"qcom,last-core-only");
+
+	key = "parse_power_params";
+	ret = parse_power_params(node, &level->pwr);
+	if (ret)
+		goto failed;
+
+	key = "qcom,reset-level";
+	ret = of_property_read_u32(node, key, &level->reset_level);
+	if (ret == -EINVAL)
+		level->reset_level = LPM_RESET_LVL_NONE;
+	else if (ret)
+		goto failed;
+
+	cluster->nlevels++;
+	return 0;
+failed:
+	pr_err("Failed %s() key = %s ret = %d\n", __func__, key, ret);
+	return ret;
+}
+
+static int parse_cpu_spm_mode(const char *mode_name)
+{
+	struct lpm_lookup_table pm_sm_lookup[] = {
+		{MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT,
+			"wfi"},
+		{MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE,
+			"standalone_pc"},
+		{MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
+			"pc"},
+		{MSM_PM_SLEEP_MODE_RETENTION,
+			"retention"},
+		{MSM_PM_SLEEP_MODE_FASTPC,
+			"fpc"},
+	};
+	int i;
+	int ret = -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(pm_sm_lookup); i++) {
+		if (!strcmp(mode_name, pm_sm_lookup[i].mode_name)) {
+			ret = pm_sm_lookup[i].modes;
+			break;
+		}
+	}
+	return ret;
+}
+
+static int parse_cpu_mode(struct device_node *n, struct lpm_cpu_level *l)
+{
+	char *key;
+	int ret;
+
+	key = "qcom,spm-cpu-mode";
+	ret  =  of_property_read_string(n, key, &l->name);
+	if (ret) {
+		pr_err("Failed %s %d\n", n->name, __LINE__);
+		return ret;
+	}
+
+	if (use_psci) {
+		key = "qcom,psci-cpu-mode";
+
+		ret = of_property_read_u32(n, key, &l->psci_id);
+		if (ret) {
+			pr_err("Failed reading %s on device %s\n", key,
+					n->name);
+			return ret;
+		}
+		key = "qcom,hyp-psci";
+
+		l->hyp_psci = of_property_read_bool(n, key);
+	} else {
+		l->mode = parse_cpu_spm_mode(l->name);
+
+		if (l->mode < 0)
+			return l->mode;
+	}
+	return 0;
+
+}
+
+static int get_cpumask_for_node(struct device_node *node, struct cpumask *mask)
+{
+	struct device_node *cpu_node;
+	int cpu;
+	int idx = 0;
+
+	cpu_node = of_parse_phandle(node, "qcom,cpu", idx++);
+	if (!cpu_node) {
+		pr_info("%s: No CPU phandle, assuming single cluster\n",
+				node->full_name);
+		/*
+		 * Not all targets have the cpu node populated in the device
+		 * tree. If cpu node is not populated assume all possible
+		 * nodes belong to this cluster
+		 */
+		cpumask_copy(mask, cpu_possible_mask);
+		return 0;
+	}
+
+	while (cpu_node) {
+		for_each_possible_cpu(cpu) {
+			if (of_get_cpu_node(cpu, NULL) == cpu_node) {
+				cpumask_set_cpu(cpu, mask);
+				break;
+			}
+		}
+		cpu_node = of_parse_phandle(node, "qcom,cpu", idx++);
+	}
+
+	return 0;
+}
+
+static int calculate_residency(struct power_params *base_pwr,
+					struct power_params *next_pwr)
+{
+	int32_t residency = (int32_t)(next_pwr->energy_overhead -
+						base_pwr->energy_overhead) -
+		((int32_t)(next_pwr->ss_power * next_pwr->time_overhead_us)
+		- (int32_t)(base_pwr->ss_power * base_pwr->time_overhead_us));
+
+	residency /= (int32_t)(base_pwr->ss_power  - next_pwr->ss_power);
+
+	if (residency < 0) {
+		pr_err("%s: residency < 0 for LPM\n",
+				__func__);
+		return next_pwr->time_overhead_us;
+	}
+
+	return residency < next_pwr->time_overhead_us ?
+				next_pwr->time_overhead_us : residency;
+}
+
+static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
+{
+	struct device_node *n;
+	int ret = -ENOMEM;
+	int i, j;
+	char *key;
+
+	c->cpu = devm_kzalloc(&lpm_pdev->dev, sizeof(*c->cpu), GFP_KERNEL);
+	if (!c->cpu)
+		return ret;
+
+	c->cpu->parent = c;
+	if (use_psci) {
+
+		key = "qcom,psci-mode-shift";
+
+		ret = of_property_read_u32(node, key, &c->cpu->psci_mode_shift);
+		if (ret) {
+			pr_err("Failed reading %s on device %s\n", key,
+					node->name);
+			return ret;
+		}
+		key = "qcom,psci-mode-mask";
+
+		ret = of_property_read_u32(node, key, &c->cpu->psci_mode_mask);
+		if (ret) {
+			pr_err("Failed reading %s on device %s\n", key,
+					node->name);
+			return ret;
+		}
+	}
+	for_each_child_of_node(node, n) {
+		struct lpm_cpu_level *l = &c->cpu->levels[c->cpu->nlevels];
+
+		c->cpu->nlevels++;
+
+		ret = parse_cpu_mode(n, l);
+		if (ret < 0) {
+			pr_info("Failed %s\n", l->name);
+			goto failed;
+		}
+
+		ret = parse_power_params(n, &l->pwr);
+		if (ret)
+			goto failed;
+
+		key = "qcom,use-broadcast-timer";
+		l->use_bc_timer = of_property_read_bool(n, key);
+
+		l->is_reset = of_property_read_bool(n, "qcom,is-reset");
+
+		key = "qcom,jtag-save-restore";
+		l->jtag_save_restore = of_property_read_bool(n, key);
+
+		key = "qcom,reset-level";
+		ret = of_property_read_u32(n, key, &l->reset_level);
+		if (ret == -EINVAL)
+			l->reset_level = LPM_RESET_LVL_NONE;
+		else if (ret)
+			goto failed;
+	}
+	for (i = 0; i < c->cpu->nlevels; i++) {
+		for (j = 0; j < c->cpu->nlevels; j++) {
+			if (i >= j) {
+				c->cpu->levels[i].pwr.residencies[j] = 0;
+				continue;
+			}
+
+			c->cpu->levels[i].pwr.residencies[j] =
+				calculate_residency(&c->cpu->levels[i].pwr,
+					&c->cpu->levels[j].pwr);
+
+			pr_err("%s: idx %d %u\n", __func__, j,
+					c->cpu->levels[i].pwr.residencies[j]);
+		}
+	}
+
+	return 0;
+failed:
+	pr_err("%s(): Failed with error code:%d\n", __func__, ret);
+	return ret;
+}
+
+void free_cluster_node(struct lpm_cluster *cluster)
+{
+	struct lpm_cluster *cl, *m;
+
+	list_for_each_entry_safe(cl, m, &cluster->child, list) {
+		list_del(&cl->list);
+		free_cluster_node(cl);
+	};
+
+	cluster->ndevices = 0;
+}
+
+/*
+ * TODO:
+ * Expects a CPU or a cluster only. This ensures that affinity
+ * level of a cluster is consistent with reference to its
+ * child nodes.
+ */
+struct lpm_cluster *parse_cluster(struct device_node *node,
+		struct lpm_cluster *parent)
+{
+	struct lpm_cluster *c;
+	struct device_node *n;
+	char *key;
+	int ret = 0;
+	int i, j;
+
+	c = devm_kzalloc(&lpm_pdev->dev, sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	ret = parse_cluster_params(node, c);
+
+	if (ret)
+		goto failed_parse_params;
+
+	INIT_LIST_HEAD(&c->child);
+	c->parent = parent;
+	spin_lock_init(&c->sync_lock);
+	c->min_child_level = NR_LPM_LEVELS;
+
+	for_each_child_of_node(node, n) {
+
+		if (!n->name)
+			continue;
+		key = "qcom,pm-cluster-level";
+		if (!of_node_cmp(n->name, key)) {
+			WARN_ON(!use_psci && c->no_saw_devices);
+			if (parse_cluster_level(n, c))
+				goto failed_parse_cluster;
+			continue;
+		}
+
+		key = "qcom,pm-cluster";
+		if (!of_node_cmp(n->name, key)) {
+			struct lpm_cluster *child;
+
+			WARN_ON(!use_psci && c->no_saw_devices);
+			child = parse_cluster(n, c);
+			if (!child)
+				goto failed_parse_cluster;
+
+			list_add(&child->list, &c->child);
+			cpumask_or(&c->child_cpus, &c->child_cpus,
+					&child->child_cpus);
+			c->aff_level = child->aff_level + 1;
+			continue;
+		}
+
+		key = "qcom,pm-cpu";
+		if (!of_node_cmp(n->name, key)) {
+			/*
+			 * Parse the the cpu node only if a pm-cpu node
+			 * is available, though the mask is defined @ the
+			 * cluster level
+			 */
+			if (get_cpumask_for_node(node, &c->child_cpus))
+				goto failed_parse_cluster;
+
+			if (parse_cpu_levels(n, c))
+				goto failed_parse_cluster;
+
+			c->aff_level = 1;
+
+			for_each_cpu(i, &c->child_cpus) {
+				per_cpu(max_residency, i) = devm_kzalloc(
+					&lpm_pdev->dev,
+					sizeof(uint32_t) * c->cpu->nlevels,
+					GFP_KERNEL);
+				if (!per_cpu(max_residency, i))
+					return ERR_PTR(-ENOMEM);
+				per_cpu(min_residency, i) = devm_kzalloc(
+					&lpm_pdev->dev,
+					sizeof(uint32_t) * c->cpu->nlevels,
+					GFP_KERNEL);
+				if (!per_cpu(min_residency, i))
+					return ERR_PTR(-ENOMEM);
+				set_optimum_cpu_residency(c->cpu, i, true);
+			}
+		}
+	}
+
+	if (cpumask_intersects(&c->child_cpus, cpu_online_mask))
+		c->last_level = c->default_level;
+	else
+		c->last_level = c->nlevels-1;
+
+	for (i = 0; i < c->nlevels; i++) {
+		for (j = 0; j < c->nlevels; j++) {
+			if (i >= j) {
+				c->levels[i].pwr.residencies[j] = 0;
+				continue;
+			}
+			c->levels[i].pwr.residencies[j] = calculate_residency(
+				&c->levels[i].pwr, &c->levels[j].pwr);
+		}
+	}
+	set_optimum_cluster_residency(c, true);
+	return c;
+
+failed_parse_cluster:
+	pr_err("Failed parse cluster:%s\n", key);
+	if (parent)
+		list_del(&c->list);
+	free_cluster_node(c);
+failed_parse_params:
+	pr_err("Failed parse params\n");
+	return NULL;
+}
+struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev)
+{
+	struct device_node *top = NULL;
+
+	use_psci = of_property_read_bool(pdev->dev.of_node, "qcom,use-psci");
+
+	top = of_find_node_by_name(pdev->dev.of_node, "qcom,pm-cluster");
+	if (!top) {
+		pr_err("Failed to find root node\n");
+		return ERR_PTR(-ENODEV);
+	}
+
+	lpm_pdev = pdev;
+	return parse_cluster(top, NULL);
+}
+
+void cluster_dt_walkthrough(struct lpm_cluster *cluster)
+{
+	struct list_head *list;
+	int i, j;
+	static int id;
+	char str[10] = {0};
+
+	if (!cluster)
+		return;
+
+	for (i = 0; i < id; i++)
+		snprintf(str+i, 10 - i, "\t");
+	pr_info("%d\n", __LINE__);
+
+	for (i = 0; i < cluster->nlevels; i++) {
+		struct lpm_cluster_level *l = &cluster->levels[i];
+		pr_info("%d ndevices:%d\n", __LINE__, cluster->ndevices);
+		for (j = 0; j < cluster->ndevices; j++)
+			pr_info("%sDevice: %p id:%p\n", str,
+					&cluster->name[j], &l->mode[i]);
+	}
+
+	if (cluster->cpu) {
+		pr_info("%d\n", __LINE__);
+		for (j = 0; j < cluster->cpu->nlevels; j++)
+			pr_info("%s\tCPU mode: %s id:%d\n", str,
+					cluster->cpu->levels[j].name,
+					cluster->cpu->levels[j].mode);
+	}
+
+	id++;
+
+
+	list_for_each(list, &cluster->child) {
+		struct lpm_cluster *n;
+		pr_info("%d\n", __LINE__);
+		n = list_entry(list, typeof(*n), list);
+		cluster_dt_walkthrough(n);
+	}
+	id--;
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/cpuidle/lpm-workarounds.c	2019-01-22 16:16:23.095242712 +0100
@@ -0,0 +1,134 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+
+static struct regulator *lpm_cx_reg;
+static struct work_struct dummy_vote_work;
+static struct workqueue_struct *lpm_wa_wq;
+static bool lpm_wa_cx_turbo_unvote;
+
+/* While exiting from RPM assisted power collapse on some targets like MSM8939
+ * the CX is bumped to turbo mode by RPM. To reduce the power impact, APSS
+ * low power driver need to remove the CX turbo vote.
+ */
+static void send_dummy_cx_vote(struct work_struct *w)
+{
+	if (lpm_cx_reg) {
+		regulator_set_voltage(lpm_cx_reg,
+			RPM_REGULATOR_CORNER_SUPER_TURBO,
+			RPM_REGULATOR_CORNER_SUPER_TURBO);
+
+		regulator_set_voltage(lpm_cx_reg,
+			RPM_REGULATOR_CORNER_NONE,
+			RPM_REGULATOR_CORNER_SUPER_TURBO);
+	}
+}
+
+/*
+ * lpm_wa_cx_unvote_send(): Unvote for CX turbo mode
+ */
+void lpm_wa_cx_unvote_send(void)
+{
+	if (lpm_wa_cx_turbo_unvote)
+		queue_work(lpm_wa_wq, &dummy_vote_work);
+}
+EXPORT_SYMBOL(lpm_wa_cx_unvote_send);
+
+static int lpm_wa_cx_unvote_init(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	lpm_cx_reg = devm_regulator_get(&pdev->dev, "lpm-cx");
+	if (IS_ERR(lpm_cx_reg)) {
+		ret = PTR_ERR(lpm_cx_reg);
+		if (ret != -EPROBE_DEFER)
+			pr_err("Unable to get the CX regulator\n");
+		return ret;
+	}
+
+	INIT_WORK(&dummy_vote_work, send_dummy_cx_vote);
+
+	lpm_wa_wq = alloc_workqueue("lpm-wa",
+				WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
+
+	return ret;
+}
+
+static int lpm_wa_cx_unvote_exit(void)
+{
+	if (lpm_wa_wq)
+		destroy_workqueue(lpm_wa_wq);
+
+	return 0;
+}
+
+static int lpm_wa_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	lpm_wa_cx_turbo_unvote = of_property_read_bool(pdev->dev.of_node,
+					"qcom,lpm-wa-cx-turbo-unvote");
+	if (lpm_wa_cx_turbo_unvote) {
+		ret = lpm_wa_cx_unvote_init(pdev);
+		if (ret) {
+			pr_err("%s: Failed to initialize lpm_wa_cx_unvote (%d)\n",
+				__func__, ret);
+			return ret;
+		}
+	}
+
+	return ret;
+}
+
+static int lpm_wa_remove(struct platform_device *pdev)
+{
+	int ret = 0;
+	if (lpm_wa_cx_turbo_unvote)
+		ret = lpm_wa_cx_unvote_exit();
+
+	return ret;
+}
+
+static struct of_device_id lpm_wa_mtch_tbl[] = {
+	{.compatible = "qcom,lpm-workarounds"},
+	{},
+};
+
+static struct platform_driver lpm_wa_driver = {
+	.probe = lpm_wa_probe,
+	.remove = lpm_wa_remove,
+	.driver = {
+		.name = "lpm-workarounds",
+		.owner = THIS_MODULE,
+		.of_match_table = lpm_wa_mtch_tbl,
+	},
+};
+
+static int __init lpm_wa_module_init(void)
+{
+	int ret;
+	ret = platform_driver_register(&lpm_wa_driver);
+	if (ret)
+		pr_info("Error registering %s\n", lpm_wa_driver.driver.name);
+
+	return ret;
+}
+late_initcall(lpm_wa_module_init);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/cpuidle/lpm-workarounds.h	2019-01-22 16:16:23.095242712 +0100
@@ -0,0 +1,19 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LPM_WA_H
+#define __LPM_WA_H
+
+void lpm_wa_cx_unvote_send(void);
+
+#endif  /* __LPM_WA_H */
diff -Nruw linux-4.4.115-fbx/drivers/crypto/msm./compat_qcedev.c linux-4.4.115-fbx/drivers/crypto/msm/compat_qcedev.c
--- linux-4.4.115-fbx/drivers/crypto/msm./compat_qcedev.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/crypto/msm/compat_qcedev.c	2019-01-22 16:16:23.107242820 +0100
@@ -0,0 +1,424 @@
+/*
+ * QTI CE 32-bit compatibility syscall for 64-bit systems
+ *
+ * Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "COMPAT-QCEDEV: %s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/qcedev.h>
+#include <linux/compat.h>
+#include "compat_qcedev.h"
+
+static int compat_get_qcedev_pmem_info(
+		struct compat_qcedev_pmem_info __user *pmem32,
+		struct qcedev_pmem_info __user *pmem)
+{
+	compat_ulong_t offset;
+	compat_int_t fd_src;
+	compat_int_t fd_dst;
+	int err = 0, i = 0;
+	uint32_t len;
+
+	err |= get_user(fd_src, &pmem32->fd_src);
+	err |= put_user(fd_src, &pmem->fd_src);
+
+	for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+		err |= get_user(offset, &pmem32->src[i].offset);
+		err |= put_user(offset, &pmem->src[i].offset);
+		err |= get_user(len, &pmem32->src[i].len);
+		err |= put_user(len, &pmem->src[i].len);
+	}
+
+	err |= get_user(fd_dst, &pmem32->fd_dst);
+	err |= put_user(fd_dst, &pmem->fd_dst);
+
+	for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+		err |= get_user(offset, &pmem32->dst[i].offset);
+		err |= put_user(offset, &pmem->dst[i].offset);
+		err |= get_user(len, &pmem32->dst[i].len);
+		err |= put_user(len, &pmem->dst[i].len);
+	}
+
+	return err;
+}
+
+static int compat_put_qcedev_pmem_info(
+		struct compat_qcedev_pmem_info __user *pmem32,
+		struct qcedev_pmem_info __user *pmem)
+{
+	compat_ulong_t offset;
+	compat_int_t fd_src;
+	compat_int_t fd_dst;
+	int err = 0, i = 0;
+	uint32_t len;
+
+	err |= get_user(fd_src, &pmem->fd_src);
+	err |= put_user(fd_src, &pmem32->fd_src);
+
+	for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+		err |= get_user(offset, &pmem->src[i].offset);
+		err |= put_user(offset, &pmem32->src[i].offset);
+		err |= get_user(len, &pmem->src[i].len);
+		err |= put_user(len, &pmem32->src[i].len);
+	}
+
+	err |= get_user(fd_dst, &pmem->fd_dst);
+	err |= put_user(fd_dst, &pmem32->fd_dst);
+
+	for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+		err |= get_user(offset, &pmem->dst[i].offset);
+		err |= put_user(offset, &pmem32->dst[i].offset);
+		err |= get_user(len, &pmem->dst[i].len);
+		err |= put_user(len, &pmem32->dst[i].len);
+	}
+
+	return err;
+}
+
+static int compat_get_qcedev_vbuf_info(
+		struct compat_qcedev_vbuf_info __user *vbuf32,
+		struct qcedev_vbuf_info __user *vbuf)
+{
+	compat_uptr_t vaddr;
+	int err = 0, i = 0;
+	uint32_t len;
+
+	for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+		err |= get_user(vaddr, &vbuf32->src[i].vaddr);
+		err |= put_user(vaddr, (compat_uptr_t *)&vbuf->src[i].vaddr);
+		err |= get_user(len, &vbuf32->src[i].len);
+		err |= put_user(len, &vbuf->src[i].len);
+	}
+
+	for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+		err |= get_user(vaddr, &vbuf32->dst[i].vaddr);
+		err |= put_user(vaddr, (compat_uptr_t *)&vbuf->dst[i].vaddr);
+		err |= get_user(len, &vbuf32->dst[i].len);
+		err |= put_user(len, &vbuf->dst[i].len);
+	}
+	return err;
+}
+
+static int compat_put_qcedev_vbuf_info(
+		struct compat_qcedev_vbuf_info __user *vbuf32,
+		struct qcedev_vbuf_info __user *vbuf)
+{
+	compat_uptr_t vaddr;
+	int err = 0, i = 0;
+	uint32_t len;
+
+	for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+		err |= get_user(vaddr, (compat_uptr_t *)&vbuf->src[i].vaddr);
+		err |= put_user(vaddr, &vbuf32->src[i].vaddr);
+		err |= get_user(len, &vbuf->src[i].len);
+		err |= put_user(len, &vbuf32->src[i].len);
+	}
+
+	for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+		err |= get_user(vaddr, (compat_uptr_t *)&vbuf->dst[i].vaddr);
+		err |= put_user(vaddr, &vbuf32->dst[i].vaddr);
+		err |= get_user(len, &vbuf->dst[i].len);
+		err |= put_user(len, &vbuf32->dst[i].len);
+	}
+	return err;
+}
+
+static int compat_get_qcedev_cipher_op_req(
+		struct compat_qcedev_cipher_op_req __user *data32,
+		struct qcedev_cipher_op_req __user *data)
+{
+	enum qcedev_cipher_mode_enum mode;
+	enum qcedev_cipher_alg_enum alg;
+	compat_ulong_t byteoffset;
+	enum qcedev_oper_enum op;
+	compat_ulong_t data_len;
+	compat_ulong_t encklen;
+	compat_ulong_t entries;
+	compat_ulong_t ivlen;
+	uint8_t in_place_op;
+	int err = 0, i = 0;
+	uint8_t use_pmem;
+	uint8_t enckey;
+	uint8_t iv;
+
+	err |= get_user(use_pmem, &data32->use_pmem);
+	err |= put_user(use_pmem, &data->use_pmem);
+
+	if (use_pmem)
+		err |= compat_get_qcedev_pmem_info(&data32->pmem, &data->pmem);
+	else
+		err |= compat_get_qcedev_vbuf_info(&data32->vbuf, &data->vbuf);
+
+	err |= get_user(entries, &data32->entries);
+	err |= put_user(entries, &data->entries);
+	err |= get_user(data_len, &data32->data_len);
+	err |= put_user(data_len, &data->data_len);
+	err |= get_user(in_place_op, &data32->in_place_op);
+	err |= put_user(in_place_op, &data->in_place_op);
+
+	for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
+		err |= get_user(enckey, &(data32->enckey[i]));
+		err |= put_user(enckey, &(data->enckey[i]));
+	}
+
+	err |= get_user(encklen, &data32->encklen);
+	err |= put_user(encklen, &data->encklen);
+
+	for (i = 0; i < QCEDEV_MAX_IV_SIZE; i++) {
+		err |= get_user(iv, &(data32->iv[i]));
+		err |= put_user(iv, &(data->iv[i]));
+	}
+
+	err |= get_user(ivlen, &data32->ivlen);
+	err |= put_user(ivlen, &data->ivlen);
+	err |= get_user(byteoffset, &data32->byteoffset);
+	err |= put_user(byteoffset, &data->byteoffset);
+	err |= get_user(alg, &data32->alg);
+	err |= put_user(alg, &data->alg);
+	err |= get_user(mode, &data32->mode);
+	err |= put_user(mode, &data->mode);
+	err |= get_user(op, &data32->op);
+	err |= put_user(op, &data->op);
+
+	return err;
+}
+
+static int compat_put_qcedev_cipher_op_req(
+		struct compat_qcedev_cipher_op_req __user *data32,
+		struct qcedev_cipher_op_req __user *data)
+{
+	enum qcedev_cipher_mode_enum mode;
+	enum qcedev_cipher_alg_enum alg;
+	compat_ulong_t byteoffset;
+	enum qcedev_oper_enum op;
+	compat_ulong_t data_len;
+	compat_ulong_t encklen;
+	compat_ulong_t entries;
+	compat_ulong_t ivlen;
+	uint8_t in_place_op;
+	int err = 0, i = 0;
+	uint8_t use_pmem;
+	uint8_t enckey;
+	uint8_t iv;
+
+	err |= get_user(use_pmem, &data->use_pmem);
+	err |= put_user(use_pmem, &data32->use_pmem);
+
+	if (use_pmem)
+		err |= compat_put_qcedev_pmem_info(&data32->pmem, &data->pmem);
+	else
+		err |= compat_put_qcedev_vbuf_info(&data32->vbuf, &data->vbuf);
+
+	err |= get_user(entries, &data->entries);
+	err |= put_user(entries, &data32->entries);
+	err |= get_user(data_len, &data->data_len);
+	err |= put_user(data_len, &data32->data_len);
+	err |= get_user(in_place_op, &data->in_place_op);
+	err |= put_user(in_place_op, &data32->in_place_op);
+
+	for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
+		err |= get_user(enckey, &(data->enckey[i]));
+		err |= put_user(enckey, &(data32->enckey[i]));
+	}
+
+	err |= get_user(encklen, &data->encklen);
+	err |= put_user(encklen, &data32->encklen);
+
+	for (i = 0; i < QCEDEV_MAX_IV_SIZE; i++) {
+		err |= get_user(iv, &(data->iv[i]));
+		err |= put_user(iv, &(data32->iv[i]));
+	}
+
+	err |= get_user(ivlen, &data->ivlen);
+	err |= put_user(ivlen, &data32->ivlen);
+	err |= get_user(byteoffset, &data->byteoffset);
+	err |= put_user(byteoffset, &data32->byteoffset);
+	err |= get_user(alg, &data->alg);
+	err |= put_user(alg, &data32->alg);
+	err |= get_user(mode, &data->mode);
+	err |= put_user(mode, &data32->mode);
+	err |= get_user(op, &data->op);
+	err |= put_user(op, &data32->op);
+
+	return err;
+}
+
+static int compat_get_qcedev_sha_op_req(
+		struct compat_qcedev_sha_op_req __user *data32,
+		struct qcedev_sha_op_req __user *data)
+{
+	enum qcedev_sha_alg_enum alg;
+	compat_ulong_t authklen;
+	compat_ulong_t data_len;
+	compat_ulong_t entries;
+	compat_ulong_t diglen;
+	compat_uptr_t authkey;
+	compat_uptr_t vaddr;
+	int err = 0, i = 0;
+	uint8_t digest;
+	uint32_t len;
+
+	for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+		err |= get_user(vaddr, &data32->data[i].vaddr);
+		err |= put_user(vaddr, (compat_uptr_t *)&data->data[i].vaddr);
+		err |= get_user(len, &data32->data[i].len);
+		err |= put_user(len, &data->data[i].len);
+	}
+
+	err |= get_user(entries, &data32->entries);
+	err |= put_user(entries, &data->entries);
+	err |= get_user(data_len, &data32->data_len);
+	err |= put_user(data_len, &data->data_len);
+
+	for (i = 0; i < QCEDEV_MAX_SHA_DIGEST; i++) {
+		err |= get_user(digest, &(data32->digest[i]));
+		err |= put_user(digest, &(data->digest[i]));
+	}
+
+	err |= get_user(diglen, &data32->diglen);
+	err |= put_user(diglen, &data->diglen);
+	err |= get_user(authkey, &data32->authkey);
+	err |= put_user(authkey, (compat_uptr_t *)&data->authkey);
+	err |= get_user(authklen, &data32->authklen);
+	err |= put_user(authklen, &data->authklen);
+	err |= get_user(alg, &data32->alg);
+	err |= put_user(alg, &data->alg);
+
+	return err;
+}
+
+static int compat_put_qcedev_sha_op_req(
+		struct compat_qcedev_sha_op_req __user *data32,
+		struct qcedev_sha_op_req __user *data)
+{
+	enum qcedev_sha_alg_enum alg;
+	compat_ulong_t authklen;
+	compat_ulong_t data_len;
+	compat_ulong_t entries;
+	compat_ulong_t diglen;
+	compat_uptr_t authkey;
+	compat_uptr_t vaddr;
+	int err = 0, i = 0;
+	uint8_t digest;
+	uint32_t len;
+
+	for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+		err |= get_user(vaddr, (compat_uptr_t *)&data->data[i].vaddr);
+		err |= put_user(vaddr, &data32->data[i].vaddr);
+		err |= get_user(len, &data->data[i].len);
+		err |= put_user(len, &data32->data[i].len);
+	}
+
+	err |= get_user(entries, &data->entries);
+	err |= put_user(entries, &data32->entries);
+	err |= get_user(data_len, &data->data_len);
+	err |= put_user(data_len, &data32->data_len);
+
+	for (i = 0; i < QCEDEV_MAX_SHA_DIGEST; i++) {
+		err |= get_user(digest, &(data->digest[i]));
+		err |= put_user(digest, &(data32->digest[i]));
+	}
+
+	err |= get_user(diglen, &data->diglen);
+	err |= put_user(diglen, &data32->diglen);
+	err |= get_user(authkey, (compat_uptr_t *)&data->authkey);
+	err |= put_user(authkey, &data32->authkey);
+	err |= get_user(authklen, &data->authklen);
+	err |= put_user(authklen, &data32->authklen);
+	err |= get_user(alg, &data->alg);
+	err |= put_user(alg, &data32->alg);
+
+	return err;
+}
+
+static unsigned int convert_cmd(unsigned int cmd)
+{
+	switch (cmd) {
+	case COMPAT_QCEDEV_IOCTL_ENC_REQ:
+		return QCEDEV_IOCTL_ENC_REQ;
+	case COMPAT_QCEDEV_IOCTL_DEC_REQ:
+		return QCEDEV_IOCTL_DEC_REQ;
+	case COMPAT_QCEDEV_IOCTL_SHA_INIT_REQ:
+		return QCEDEV_IOCTL_SHA_INIT_REQ;
+	case COMPAT_QCEDEV_IOCTL_SHA_UPDATE_REQ:
+		return QCEDEV_IOCTL_SHA_UPDATE_REQ;
+	case COMPAT_QCEDEV_IOCTL_SHA_FINAL_REQ:
+		return QCEDEV_IOCTL_SHA_FINAL_REQ;
+	case COMPAT_QCEDEV_IOCTL_GET_SHA_REQ:
+		return QCEDEV_IOCTL_GET_SHA_REQ;
+	case COMPAT_QCEDEV_IOCTL_GET_CMAC_REQ:
+		return QCEDEV_IOCTL_GET_CMAC_REQ;
+	default:
+		return cmd;
+	}
+
+}
+
+long compat_qcedev_ioctl(struct file *file,
+		unsigned int cmd, unsigned long arg)
+{
+	long ret;
+
+	switch (cmd) {
+	case COMPAT_QCEDEV_IOCTL_ENC_REQ:
+	case COMPAT_QCEDEV_IOCTL_DEC_REQ: {
+		struct compat_qcedev_cipher_op_req __user *data32;
+		struct qcedev_cipher_op_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (!data)
+			return -EFAULT;
+
+		err = compat_get_qcedev_cipher_op_req(data32, data);
+		if (err)
+			return err;
+
+		ret = qcedev_ioctl(file, convert_cmd(cmd), (unsigned long)data);
+		err = compat_put_qcedev_cipher_op_req(data32, data);
+		return ret ? ret : err;
+	}
+	case COMPAT_QCEDEV_IOCTL_SHA_INIT_REQ:
+	case COMPAT_QCEDEV_IOCTL_SHA_UPDATE_REQ:
+	case COMPAT_QCEDEV_IOCTL_SHA_FINAL_REQ:
+	case COMPAT_QCEDEV_IOCTL_GET_CMAC_REQ:
+	case COMPAT_QCEDEV_IOCTL_GET_SHA_REQ: {
+		struct compat_qcedev_sha_op_req __user *data32;
+		struct qcedev_sha_op_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (!data)
+			return -EFAULT;
+
+		err = compat_get_qcedev_sha_op_req(data32, data);
+		if (err)
+			return err;
+
+		ret = qcedev_ioctl(file, convert_cmd(cmd), (unsigned long)data);
+		err = compat_put_qcedev_sha_op_req(data32, data);
+		return ret ? ret : err;
+	}
+	default:
+		return -ENOIOCTLCMD;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(compat_qcedev_ioctl);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI 32-64 Compatibility for Crypto driver");
diff -Nruw linux-4.4.115-fbx/drivers/crypto/msm./compat_qcedev.h linux-4.4.115-fbx/drivers/crypto/msm/compat_qcedev.h
--- linux-4.4.115-fbx/drivers/crypto/msm./compat_qcedev.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/crypto/msm/compat_qcedev.h	2019-01-22 16:16:23.107242820 +0100
@@ -0,0 +1,165 @@
+#ifndef _UAPI_COMPAT_QCEDEV__H
+#define _UAPI_COMPAT_QCEDEV__H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#if IS_ENABLED(CONFIG_COMPAT)
+#include <linux/compat.h>
+
+/**
+* struct compat_buf_info - Buffer information
+* @offset:			Offset from the base address of the buffer
+*				(Used when buffer is allocated using PMEM)
+* @vaddr:			Virtual buffer address pointer
+* @len:				Size of the buffer
+*/
+struct	compat_buf_info {
+	union {
+		compat_ulong_t	offset;
+		compat_uptr_t	vaddr;
+	};
+	compat_ulong_t	len;
+};
+
+/**
+* struct compat_qcedev_vbuf_info - Source and destination Buffer information
+* @src:				Array of buf_info for input/source
+* @dst:				Array of buf_info for output/destination
+*/
+struct	compat_qcedev_vbuf_info {
+	struct compat_buf_info	src[QCEDEV_MAX_BUFFERS];
+	struct compat_buf_info	dst[QCEDEV_MAX_BUFFERS];
+};
+
+/**
+* struct compat_qcedev_pmem_info - Stores PMEM buffer information
+* @fd_src:			Handle to /dev/adsp_pmem used to allocate
+*				memory for input/src buffer
+* @src:				Array of buf_info for input/source
+* @fd_dst:			Handle to /dev/adsp_pmem used to allocate
+*				memory for output/dst buffer
+* @dst:				Array of buf_info for output/destination
+* @pmem_src_offset:		The offset from input/src buffer
+*				(allocated by PMEM)
+*/
+struct	compat_qcedev_pmem_info {
+	compat_int_t		fd_src;
+	struct compat_buf_info	src[QCEDEV_MAX_BUFFERS];
+	compat_int_t		fd_dst;
+	struct compat_buf_info	dst[QCEDEV_MAX_BUFFERS];
+};
+
+/**
+* struct compat_qcedev_cipher_op_req - Holds the ciphering request information
+* @use_pmem (IN):	Flag to indicate if buffer source is PMEM
+*			QCEDEV_USE_PMEM/QCEDEV_NO_PMEM
+* @pmem (IN):		Stores PMEM buffer information.
+*			Refer struct qcedev_pmem_info
+* @vbuf (IN/OUT):	Stores Source and destination Buffer information
+*			Refer to struct qcedev_vbuf_info
+* @data_len (IN):	Total Length of input/src and output/dst in bytes
+* @in_place_op (IN):	Indicates whether the operation is inplace where
+*			source == destination
+*			When using PMEM allocated memory, must set this to 1
+* @enckey (IN):		128 bits of confidentiality key
+*			enckey[0] bit 127-120, enckey[1] bit 119-112,..
+*			enckey[15] bit 7-0
+* @encklen (IN):	Length of the encryption key(set to 128  bits/16
+*			bytes in the driver)
+* @iv (IN/OUT):		Initialisation vector data
+*			This is updated by the driver, incremented by
+*			number of blocks encrypted/decrypted.
+* @ivlen (IN):		Length of the IV
+* @byteoffset (IN):	Offset in the Cipher BLOCK (applicable and to be set
+*			for AES-128 CTR mode only)
+* @alg (IN):		Type of ciphering algorithm: AES/DES/3DES
+* @mode (IN):		Mode use when using AES algorithm: ECB/CBC/CTR
+*			Apllicabel when using AES algorithm only
+* @op (IN):		Type of operation: QCEDEV_OPER_DEC/QCEDEV_OPER_ENC or
+*			QCEDEV_OPER_ENC_NO_KEY/QCEDEV_OPER_DEC_NO_KEY
+*
+*If use_pmem is set to 0, the driver assumes that memory was not allocated
+* via PMEM, and kernel will need to allocate memory and copy data from user
+* space buffer (data_src/dta_dst) and process accordingly and copy data back
+* to the user space buffer
+*
+* If use_pmem is set to 1, the driver assumes that memory was allocated via
+* PMEM.
+* The kernel driver will use the fd_src to determine the kernel virtual address
+* base that maps to the user space virtual address base for the  buffer
+* allocated in user space.
+* The final input/src and output/dst buffer pointer will be determined
+* by adding the offsets to the kernel virtual addr.
+*
+* If use of hardware key is supported in the target, user can configure the
+* key paramters (encklen, enckey) to use the hardware key.
+* In order to use the hardware key, set encklen to 0 and set the enckey
+* data array to 0.
+*/
+struct	compat_qcedev_cipher_op_req {
+	uint8_t					use_pmem;
+	union {
+		struct compat_qcedev_pmem_info	pmem;
+		struct compat_qcedev_vbuf_info	vbuf;
+	};
+	compat_ulong_t				entries;
+	compat_ulong_t				data_len;
+	uint8_t					in_place_op;
+	uint8_t					enckey[QCEDEV_MAX_KEY_SIZE];
+	compat_ulong_t				encklen;
+	uint8_t					iv[QCEDEV_MAX_IV_SIZE];
+	compat_ulong_t				ivlen;
+	compat_ulong_t				byteoffset;
+	enum qcedev_cipher_alg_enum		alg;
+	enum qcedev_cipher_mode_enum		mode;
+	enum qcedev_oper_enum			op;
+};
+
+/**
+* struct qcedev_sha_op_req - Holds the hashing request information
+* @data (IN):			Array of pointers to the data to be hashed
+* @entries (IN):		Number of buf_info entries in the data array
+* @data_len (IN):		Length of data to be hashed
+* @digest (IN/OUT):		Returns the hashed data information
+* @diglen (OUT):		Size of the hashed/digest data
+* @authkey (IN):		Pointer to authentication key for HMAC
+* @authklen (IN):		Size of the authentication key
+* @alg (IN):			Secure Hash algorithm
+*/
+struct	compat_qcedev_sha_op_req {
+	struct compat_buf_info			data[QCEDEV_MAX_BUFFERS];
+	compat_ulong_t				entries;
+	compat_ulong_t				data_len;
+	uint8_t					digest[QCEDEV_MAX_SHA_DIGEST];
+	compat_ulong_t				diglen;
+	compat_uptr_t				authkey;
+	compat_ulong_t				authklen;
+	enum qcedev_sha_alg_enum		alg;
+};
+
+struct file;
+extern long compat_qcedev_ioctl(struct file *file,
+			unsigned int cmd, unsigned long arg);
+
+#define COMPAT_QCEDEV_IOCTL_ENC_REQ		\
+	_IOWR(QCEDEV_IOC_MAGIC, 1, struct compat_qcedev_cipher_op_req)
+#define COMPAT_QCEDEV_IOCTL_DEC_REQ		\
+	_IOWR(QCEDEV_IOC_MAGIC, 2, struct compat_qcedev_cipher_op_req)
+#define COMPAT_QCEDEV_IOCTL_SHA_INIT_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 3, struct compat_qcedev_sha_op_req)
+#define COMPAT_QCEDEV_IOCTL_SHA_UPDATE_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 4, struct compat_qcedev_sha_op_req)
+#define COMPAT_QCEDEV_IOCTL_SHA_FINAL_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 5, struct compat_qcedev_sha_op_req)
+#define COMPAT_QCEDEV_IOCTL_GET_SHA_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 6, struct compat_qcedev_sha_op_req)
+#define COMPAT_QCEDEV_IOCTL_LOCK_CE	\
+	_IO(QCEDEV_IOC_MAGIC, 7)
+#define COMPAT_QCEDEV_IOCTL_UNLOCK_CE	\
+	_IO(QCEDEV_IOC_MAGIC, 8)
+#define COMPAT_QCEDEV_IOCTL_GET_CMAC_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 9, struct compat_qcedev_sha_op_req)
+
+#endif /* CONFIG_COMPAT */
+#endif /* _UAPI_COMPAT_QCEDEV__H */
diff -Nruw linux-4.4.115-fbx/drivers/crypto/msm./Kconfig linux-4.4.115-fbx/drivers/crypto/msm/Kconfig
--- linux-4.4.115-fbx/drivers/crypto/msm./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/crypto/msm/Kconfig	2019-01-22 16:16:23.107242820 +0100
@@ -0,0 +1,10 @@
+
+config CRYPTO_DEV_QCOM_ICE
+	tristate "Inline Crypto Module"
+	default n
+	depends on PFK && BLK_DEV_DM
+	help
+	  This driver supports Inline Crypto Engine for QTI chipsets, MSM8994
+	  and later, to accelerate crypto operations for storage needs.
+	  To compile this driver as a module, choose M here: the
+	  module will be called ice.
diff -Nruw linux-4.4.115-fbx/drivers/crypto/msm./Makefile linux-4.4.115-fbx/drivers/crypto/msm/Makefile
--- linux-4.4.115-fbx/drivers/crypto/msm./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/crypto/msm/Makefile	2019-01-22 16:16:23.107242820 +0100
@@ -0,0 +1,12 @@
+obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qcedev.o
+ifeq ($(CONFIG_CRYPTO_DEV_QCE50), y)
+        obj-$(CONFIG_CRYPTO_DEV_QCOM_MSM_QCE) += qce50.o
+else
+        obj-$(CONFIG_CRYPTO_DEV_QCOM_MSM_QCE) += qce.o
+endif
+ifdef CONFIG_COMPAT
+obj-$(CONFIG_CRYPTO_DEV_QCOM_MSM_QCE) += compat_qcedev.o
+endif
+obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcrypto.o
+obj-$(CONFIG_CRYPTO_DEV_OTA_CRYPTO) += ota_crypto.o
+obj-$(CONFIG_CRYPTO_DEV_QCOM_ICE) += ice.o
diff -Nruw linux-4.4.115-fbx/drivers/crypto/msm./ota_crypto.c linux-4.4.115-fbx/drivers/crypto/msm/ota_crypto.c
--- linux-4.4.115-fbx/drivers/crypto/msm./ota_crypto.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/crypto/msm/ota_crypto.c	2019-01-22 16:16:23.111242857 +0100
@@ -0,0 +1,980 @@
+/* Copyright (c) 2010-2014,2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* Qualcomm Over the Air (OTA) Crypto driver */
+
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/cache.h>
+
+
+#include <linux/qcota.h>
+#include "qce.h"
+#include "qce_ota.h"
+
+enum qce_ota_oper_enum {
+	QCE_OTA_F8_OPER   = 0,
+	QCE_OTA_MPKT_F8_OPER = 1,
+	QCE_OTA_F9_OPER  = 2,
+	QCE_OTA_VAR_MPKT_F8_OPER = 3,
+	QCE_OTA_OPER_LAST
+};
+
+struct ota_dev_control;
+
+struct ota_async_req {
+	struct list_head rlist;
+	struct completion complete;
+	int err;
+	enum qce_ota_oper_enum op;
+	union {
+		struct qce_f9_req f9_req;
+		struct qce_f8_req f8_req;
+		struct qce_f8_multi_pkt_req f8_mp_req;
+		struct qce_f8_varible_multi_pkt_req f8_v_mp_req;
+	} req;
+	unsigned int steps;
+	struct ota_qce_dev  *pqce;
+};
+
+/*
+ * Register ourselves as a misc device to be able to access the ota
+ * from userspace.
+ */
+
+
+#define QCOTA_DEV	"qcota"
+
+
+struct ota_dev_control {
+
+	/* misc device */
+	struct miscdevice miscdevice;
+	struct list_head ready_commands;
+	unsigned magic;
+	struct list_head qce_dev;
+	spinlock_t lock;
+	struct mutex register_lock;
+	bool registered;
+	uint32_t total_units;
+};
+
+struct ota_qce_dev {
+	struct list_head qlist;
+	/* qce handle */
+	void *qce;
+
+	/* platform device */
+	struct platform_device *pdev;
+
+	struct ota_async_req *active_command;
+	struct tasklet_struct done_tasklet;
+	struct ota_dev_control *podev;
+	uint32_t unit;
+	u64 total_req;
+	u64 err_req;
+};
+
+#define OTA_MAGIC 0x4f544143
+
+static long qcota_ioctl(struct file *file,
+			  unsigned cmd, unsigned long arg);
+static int qcota_open(struct inode *inode, struct file *file);
+static int qcota_release(struct inode *inode, struct file *file);
+static int start_req(struct ota_qce_dev *pqce, struct ota_async_req *areq);
+static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv, int ret);
+
+static const struct file_operations qcota_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = qcota_ioctl,
+	.open = qcota_open,
+	.release = qcota_release,
+};
+
+static struct ota_dev_control qcota_dev = {
+	.miscdevice = {
+			.minor = MISC_DYNAMIC_MINOR,
+			.name = "qcota0",
+			.fops = &qcota_fops,
+	},
+	.magic = OTA_MAGIC,
+};
+
+#define DEBUG_MAX_FNAME  16
+#define DEBUG_MAX_RW_BUF 1024
+
+struct qcota_stat {
+	u64 f8_req;
+	u64 f8_mp_req;
+	u64 f8_v_mp_req;
+	u64 f9_req;
+	u64 f8_op_success;
+	u64 f8_op_fail;
+	u64 f8_mp_op_success;
+	u64 f8_mp_op_fail;
+	u64 f8_v_mp_op_success;
+	u64 f8_v_mp_op_fail;
+	u64 f9_op_success;
+	u64 f9_op_fail;
+};
+static struct qcota_stat _qcota_stat;
+static struct dentry *_debug_dent;
+static char _debug_read_buf[DEBUG_MAX_RW_BUF];
+static int _debug_qcota;
+
+static struct ota_dev_control *qcota_control(void)
+{
+
+	return &qcota_dev;
+}
+
+static int qcota_open(struct inode *inode, struct file *file)
+{
+	struct ota_dev_control *podev;
+
+	podev = qcota_control();
+	if (podev == NULL) {
+		pr_err("%s: no such device %d\n", __func__,
+				MINOR(inode->i_rdev));
+		return -ENOENT;
+	}
+
+	file->private_data = podev;
+
+	return 0;
+}
+
+static int qcota_release(struct inode *inode, struct file *file)
+{
+	struct ota_dev_control *podev;
+
+	podev =  file->private_data;
+
+	if (podev != NULL && podev->magic != OTA_MAGIC) {
+		pr_err("%s: invalid handle %pK\n",
+			__func__, podev);
+	}
+
+	file->private_data = NULL;
+
+	return 0;
+}
+
+static bool  _next_v_mp_req(struct ota_async_req *areq)
+{
+	unsigned char *p;
+
+	if (areq->err)
+		return false;
+	if (++areq->steps >= areq->req.f8_v_mp_req.num_pkt)
+		return false;
+
+	p = areq->req.f8_v_mp_req.qce_f8_req.data_in;
+	p += areq->req.f8_v_mp_req.qce_f8_req.data_len;
+	p = (uint8_t *) ALIGN(((uintptr_t)p), L1_CACHE_BYTES);
+
+	areq->req.f8_v_mp_req.qce_f8_req.data_out = p;
+	areq->req.f8_v_mp_req.qce_f8_req.data_in = p;
+	areq->req.f8_v_mp_req.qce_f8_req.data_len =
+		areq->req.f8_v_mp_req.cipher_iov[areq->steps].size;
+
+	areq->req.f8_v_mp_req.qce_f8_req.count_c++;
+	return true;
+}
+
+static void req_done(unsigned long data)
+{
+	struct ota_qce_dev *pqce = (struct ota_qce_dev *)data;
+	struct ota_dev_control *podev = pqce->podev;
+	struct ota_async_req *areq;
+	unsigned long flags;
+	struct ota_async_req *new_req = NULL;
+	int ret = 0;
+	bool schedule = true;
+
+	spin_lock_irqsave(&podev->lock, flags);
+	areq = pqce->active_command;
+	if (unlikely(areq == NULL))
+		pr_err("ota_crypto: req_done, no active request\n");
+	else if (areq->op == QCE_OTA_VAR_MPKT_F8_OPER) {
+		if (_next_v_mp_req(areq)) {
+			/* execute next subcommand */
+			spin_unlock_irqrestore(&podev->lock, flags);
+			ret = start_req(pqce, areq);
+			if (unlikely(ret)) {
+				areq->err = ret;
+				schedule = true;
+				spin_lock_irqsave(&podev->lock, flags);
+			} else {
+				areq = NULL;
+				schedule = false;
+			}
+		} else {
+			/* done with this variable mp req */
+			schedule = true;
+		}
+	}
+	while (schedule) {
+		if (!list_empty(&podev->ready_commands)) {
+			new_req = container_of(podev->ready_commands.next,
+						struct ota_async_req, rlist);
+			if (!new_req)
+				break;
+
+			list_del(&new_req->rlist);
+			pqce->active_command = new_req;
+			spin_unlock_irqrestore(&podev->lock, flags);
+
+			new_req->err = 0;
+			/* start a new request */
+			ret = start_req(pqce, new_req);
+			if (unlikely(new_req && ret)) {
+				new_req->err = ret;
+				complete(&new_req->complete);
+				ret = 0;
+				new_req = NULL;
+				spin_lock_irqsave(&podev->lock, flags);
+			} else {
+				schedule = false;
+			}
+		} else {
+			pqce->active_command = NULL;
+			spin_unlock_irqrestore(&podev->lock, flags);
+			schedule = false;
+		};
+	}
+	if (areq)
+		complete(&areq->complete);
+	return;
+}
+
+static void f9_cb(void *cookie, unsigned char *icv, unsigned char *iv,
+	int ret)
+{
+	struct ota_async_req *areq = (struct ota_async_req *) cookie;
+	struct ota_qce_dev *pqce;
+
+	pqce = areq->pqce;
+	areq->req.f9_req.mac_i  = *((uint32_t *)icv);
+
+	if (ret) {
+		pqce->err_req++;
+		areq->err = -ENXIO;
+	} else
+		areq->err = 0;
+
+	tasklet_schedule(&pqce->done_tasklet);
+}
+
+static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv,
+	int ret)
+{
+	struct ota_async_req *areq = (struct ota_async_req *) cookie;
+	struct ota_qce_dev *pqce;
+
+	pqce = areq->pqce;
+
+	if (ret) {
+		pqce->err_req++;
+		areq->err = -ENXIO;
+	} else {
+		areq->err = 0;
+	}
+
+	tasklet_schedule(&pqce->done_tasklet);
+}
+
+static int start_req(struct ota_qce_dev *pqce, struct ota_async_req *areq)
+{
+	struct qce_f9_req *pf9;
+	struct qce_f8_multi_pkt_req *p_mp_f8;
+	struct qce_f8_req *pf8;
+	int ret = 0;
+
+	/* command should be on the podev->active_command */
+	areq->pqce = pqce;
+
+	switch (areq->op) {
+	case QCE_OTA_F8_OPER:
+		pf8 = &areq->req.f8_req;
+		ret = qce_f8_req(pqce->qce, pf8, areq, f8_cb);
+		break;
+	case QCE_OTA_MPKT_F8_OPER:
+		p_mp_f8 = &areq->req.f8_mp_req;
+		ret = qce_f8_multi_pkt_req(pqce->qce, p_mp_f8, areq, f8_cb);
+		break;
+
+	case QCE_OTA_F9_OPER:
+		pf9 = &areq->req.f9_req;
+		ret =  qce_f9_req(pqce->qce, pf9, areq, f9_cb);
+		break;
+
+	case QCE_OTA_VAR_MPKT_F8_OPER:
+		pf8 = &areq->req.f8_v_mp_req.qce_f8_req;
+		ret = qce_f8_req(pqce->qce, pf8, areq, f8_cb);
+		break;
+
+	default:
+		ret = -ENOTSUPP;
+		break;
+	};
+	areq->err = ret;
+	pqce->total_req++;
+	if (ret)
+		pqce->err_req++;
+	return ret;
+}
+
+static struct ota_qce_dev *schedule_qce(struct ota_dev_control *podev)
+{
+	/* do this function with spinlock set */
+	struct ota_qce_dev *p;
+
+	if (unlikely(list_empty(&podev->qce_dev))) {
+		pr_err("%s: no valid qce to schedule\n", __func__);
+		return NULL;
+	}
+
+	list_for_each_entry(p, &podev->qce_dev, qlist) {
+		if (p->active_command == NULL)
+			return p;
+	}
+	return NULL;
+}
+
+static int submit_req(struct ota_async_req *areq, struct ota_dev_control *podev)
+{
+	unsigned long flags;
+	int ret = 0;
+	struct qcota_stat *pstat;
+	struct ota_qce_dev *pqce;
+
+	areq->err = 0;
+
+	spin_lock_irqsave(&podev->lock, flags);
+	pqce = schedule_qce(podev);
+	if (pqce) {
+		pqce->active_command = areq;
+		spin_unlock_irqrestore(&podev->lock, flags);
+
+		ret = start_req(pqce, areq);
+		if (ret != 0) {
+			spin_lock_irqsave(&podev->lock, flags);
+			pqce->active_command = NULL;
+			spin_unlock_irqrestore(&podev->lock, flags);
+		}
+
+	} else {
+		list_add_tail(&areq->rlist, &podev->ready_commands);
+		spin_unlock_irqrestore(&podev->lock, flags);
+	}
+
+	if (ret == 0)
+		wait_for_completion(&areq->complete);
+
+	pstat = &_qcota_stat;
+	switch (areq->op) {
+	case QCE_OTA_F8_OPER:
+		if (areq->err)
+			pstat->f8_op_fail++;
+		else
+			pstat->f8_op_success++;
+		break;
+
+	case QCE_OTA_MPKT_F8_OPER:
+
+		if (areq->err)
+			pstat->f8_mp_op_fail++;
+		else
+			pstat->f8_mp_op_success++;
+		break;
+
+	case QCE_OTA_F9_OPER:
+		if (areq->err)
+			pstat->f9_op_fail++;
+		else
+			pstat->f9_op_success++;
+		break;
+	case QCE_OTA_VAR_MPKT_F8_OPER:
+	default:
+		if (areq->err)
+			pstat->f8_v_mp_op_fail++;
+		else
+			pstat->f8_v_mp_op_success++;
+		break;
+	};
+
+	return areq->err;
+}
+
+static long qcota_ioctl(struct file *file,
+			  unsigned cmd, unsigned long arg)
+{
+	int err = 0;
+	struct ota_dev_control *podev;
+	uint8_t *user_src;
+	uint8_t *user_dst;
+	uint8_t *k_buf = NULL;
+	struct ota_async_req areq;
+	uint32_t total, temp;
+	struct qcota_stat *pstat;
+	int i;
+	uint8_t *p = NULL;
+
+	podev =  file->private_data;
+	if (podev == NULL || podev->magic != OTA_MAGIC) {
+		pr_err("%s: invalid handle %pK\n",
+			__func__, podev);
+		return -ENOENT;
+	}
+
+	/* Verify user arguments. */
+	if (_IOC_TYPE(cmd) != QCOTA_IOC_MAGIC)
+		return -ENOTTY;
+
+	init_completion(&areq.complete);
+
+	pstat = &_qcota_stat;
+
+	switch (cmd) {
+	case QCOTA_F9_REQ:
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+			       sizeof(struct qce_f9_req)))
+			return -EFAULT;
+		if (__copy_from_user(&areq.req.f9_req, (void __user *)arg,
+				     sizeof(struct qce_f9_req)))
+			return -EFAULT;
+
+		user_src = areq.req.f9_req.message;
+		if (!access_ok(VERIFY_READ, (void __user *)user_src,
+			       areq.req.f9_req.msize))
+			return -EFAULT;
+
+		if (areq.req.f9_req.msize == 0)
+			return 0;
+		k_buf = kmalloc(areq.req.f9_req.msize, GFP_KERNEL);
+		if (k_buf == NULL)
+			return -ENOMEM;
+
+		if (__copy_from_user(k_buf, (void __user *)user_src,
+				areq.req.f9_req.msize)) {
+			kfree(k_buf);
+			return -EFAULT;
+		}
+
+		areq.req.f9_req.message = k_buf;
+		areq.op = QCE_OTA_F9_OPER;
+
+		pstat->f9_req++;
+		err = submit_req(&areq, podev);
+
+		areq.req.f9_req.message = user_src;
+		if (err == 0 && __copy_to_user((void __user *)arg,
+				&areq.req.f9_req, sizeof(struct qce_f9_req))) {
+			err = -EFAULT;
+		}
+		kfree(k_buf);
+		break;
+
+	case QCOTA_F8_REQ:
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+			       sizeof(struct qce_f8_req)))
+			return -EFAULT;
+		if (__copy_from_user(&areq.req.f8_req, (void __user *)arg,
+				     sizeof(struct qce_f8_req)))
+			return -EFAULT;
+		total = areq.req.f8_req.data_len;
+		user_src = areq.req.f8_req.data_in;
+		if (user_src != NULL) {
+			if (!access_ok(VERIFY_READ, (void __user *)
+					user_src, total))
+				return -EFAULT;
+
+		};
+
+		user_dst = areq.req.f8_req.data_out;
+		if (!access_ok(VERIFY_WRITE, (void __user *)
+				user_dst, total))
+			return -EFAULT;
+
+		if (!total)
+			return 0;
+		k_buf = kmalloc(total, GFP_KERNEL);
+		if (k_buf == NULL)
+			return -ENOMEM;
+
+		/* k_buf returned from kmalloc should be cache line aligned */
+		if (user_src && __copy_from_user(k_buf,
+				(void __user *)user_src, total)) {
+			kfree(k_buf);
+			return -EFAULT;
+		}
+
+		if (user_src)
+			areq.req.f8_req.data_in = k_buf;
+		else
+			areq.req.f8_req.data_in = NULL;
+		areq.req.f8_req.data_out = k_buf;
+
+		areq.op = QCE_OTA_F8_OPER;
+
+		pstat->f8_req++;
+		err = submit_req(&areq, podev);
+
+		if (err == 0 && __copy_to_user(user_dst, k_buf, total))
+			err = -EFAULT;
+		kfree(k_buf);
+
+		break;
+
+	case QCOTA_F8_MPKT_REQ:
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+			       sizeof(struct qce_f8_multi_pkt_req)))
+			return -EFAULT;
+		if (__copy_from_user(&areq.req.f8_mp_req, (void __user *)arg,
+				     sizeof(struct qce_f8_multi_pkt_req)))
+			return -EFAULT;
+		temp = areq.req.f8_mp_req.qce_f8_req.data_len;
+		if (temp < (uint32_t) areq.req.f8_mp_req.cipher_start +
+				 areq.req.f8_mp_req.cipher_size)
+			return -EINVAL;
+		total = (uint32_t) areq.req.f8_mp_req.num_pkt *
+				areq.req.f8_mp_req.qce_f8_req.data_len;
+
+		user_src = areq.req.f8_mp_req.qce_f8_req.data_in;
+		if (!access_ok(VERIFY_READ, (void __user *)
+				user_src, total))
+			return -EFAULT;
+
+		user_dst = areq.req.f8_mp_req.qce_f8_req.data_out;
+		if (!access_ok(VERIFY_WRITE, (void __user *)
+				user_dst, total))
+			return -EFAULT;
+
+		if (!total)
+			return 0;
+		k_buf = kmalloc(total, GFP_KERNEL);
+		if (k_buf == NULL)
+			return -ENOMEM;
+		/* k_buf returned from kmalloc should be cache line aligned */
+		if (__copy_from_user(k_buf, (void __user *)user_src, total)) {
+			kfree(k_buf);
+
+			return -EFAULT;
+		}
+
+		areq.req.f8_mp_req.qce_f8_req.data_out = k_buf;
+		areq.req.f8_mp_req.qce_f8_req.data_in = k_buf;
+
+		areq.op = QCE_OTA_MPKT_F8_OPER;
+
+		pstat->f8_mp_req++;
+		err = submit_req(&areq, podev);
+
+		if (err == 0 && __copy_to_user(user_dst, k_buf, total))
+			err = -EFAULT;
+		kfree(k_buf);
+		break;
+
+	case QCOTA_F8_V_MPKT_REQ:
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+				sizeof(struct qce_f8_varible_multi_pkt_req)))
+			return -EFAULT;
+		if (__copy_from_user(&areq.req.f8_v_mp_req, (void __user *)arg,
+				sizeof(struct qce_f8_varible_multi_pkt_req)))
+			return -EFAULT;
+
+		if (areq.req.f8_v_mp_req.num_pkt > MAX_NUM_V_MULTI_PKT)
+			return -EINVAL;
+
+		for (i = 0, total = 0; i < areq.req.f8_v_mp_req.num_pkt; i++) {
+			if (!access_ok(VERIFY_WRITE, (void __user *)
+				areq.req.f8_v_mp_req.cipher_iov[i].addr,
+				areq.req.f8_v_mp_req.cipher_iov[i].size))
+				return -EFAULT;
+			total += areq.req.f8_v_mp_req.cipher_iov[i].size;
+			total = ALIGN(total, L1_CACHE_BYTES);
+		}
+
+		if (!total)
+			return 0;
+		k_buf = kmalloc(total, GFP_KERNEL);
+		if (k_buf == NULL)
+			return -ENOMEM;
+
+		for (i = 0, p = k_buf; i < areq.req.f8_v_mp_req.num_pkt; i++) {
+			user_src =  areq.req.f8_v_mp_req.cipher_iov[i].addr;
+			if (__copy_from_user(p, (void __user *)user_src,
+				areq.req.f8_v_mp_req.cipher_iov[i].size)) {
+				kfree(k_buf);
+				return -EFAULT;
+			}
+			p += areq.req.f8_v_mp_req.cipher_iov[i].size;
+			p = (uint8_t *) ALIGN(((uintptr_t)p),
+							L1_CACHE_BYTES);
+		}
+
+		areq.req.f8_v_mp_req.qce_f8_req.data_out = k_buf;
+		areq.req.f8_v_mp_req.qce_f8_req.data_in = k_buf;
+		areq.req.f8_v_mp_req.qce_f8_req.data_len =
+			areq.req.f8_v_mp_req.cipher_iov[0].size;
+		areq.steps = 0;
+		areq.op = QCE_OTA_VAR_MPKT_F8_OPER;
+
+		pstat->f8_v_mp_req++;
+		err = submit_req(&areq, podev);
+
+		if (err != 0) {
+			kfree(k_buf);
+			return err;
+		}
+
+		for (i = 0, p = k_buf; i < areq.req.f8_v_mp_req.num_pkt; i++) {
+			user_dst =  areq.req.f8_v_mp_req.cipher_iov[i].addr;
+			if (__copy_to_user(user_dst, p,
+				areq.req.f8_v_mp_req.cipher_iov[i].size)) {
+				kfree(k_buf);
+				return -EFAULT;
+			}
+			p += areq.req.f8_v_mp_req.cipher_iov[i].size;
+			p = (uint8_t *) ALIGN(((uintptr_t)p),
+							L1_CACHE_BYTES);
+		}
+		kfree(k_buf);
+		break;
+	default:
+		return -ENOTTY;
+	}
+
+	return err;
+}
+
+static int qcota_probe(struct platform_device *pdev)
+{
+	void *handle = NULL;
+	int rc = 0;
+	struct ota_dev_control *podev;
+	struct ce_hw_support ce_support;
+	struct ota_qce_dev *pqce;
+	unsigned long flags;
+
+	podev = &qcota_dev;
+	pqce = kzalloc(sizeof(*pqce), GFP_KERNEL);
+	if (!pqce) {
+		pr_err("qcota_probe: Memory allocation FAIL\n");
+		return -ENOMEM;
+	}
+
+	pqce->podev = podev;
+	pqce->active_command = NULL;
+	tasklet_init(&pqce->done_tasklet, req_done, (unsigned long)pqce);
+
+	/* open qce */
+	handle = qce_open(pdev, &rc);
+	if (handle == NULL) {
+		pr_err("%s: device %s, can not open qce\n",
+			__func__, pdev->name);
+		goto err;
+	}
+	if (qce_hw_support(handle, &ce_support) < 0 ||
+					ce_support.ota == false) {
+		pr_err("%s: device %s, qce does not support ota capability\n",
+			__func__, pdev->name);
+		rc = -ENODEV;
+		goto err;
+	}
+	pqce->qce = handle;
+	pqce->pdev = pdev;
+	pqce->total_req = 0;
+	pqce->err_req = 0;
+	platform_set_drvdata(pdev, pqce);
+
+	mutex_lock(&podev->register_lock);
+	rc = 0;
+	if (podev->registered == false) {
+		rc = misc_register(&podev->miscdevice);
+		if (rc == 0) {
+			pqce->unit = podev->total_units;
+			podev->total_units++;
+			podev->registered = true;
+		};
+	} else {
+		pqce->unit = podev->total_units;
+		podev->total_units++;
+	}
+	mutex_unlock(&podev->register_lock);
+	if (rc) {
+		pr_err("ion: failed to register misc device.\n");
+		goto err;
+	}
+
+	spin_lock_irqsave(&podev->lock, flags);
+	list_add_tail(&pqce->qlist, &podev->qce_dev);
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	return 0;
+err:
+	if (handle)
+		qce_close(handle);
+
+	platform_set_drvdata(pdev, NULL);
+	tasklet_kill(&pqce->done_tasklet);
+	kfree(pqce);
+	return rc;
+}
+
+static int qcota_remove(struct platform_device *pdev)
+{
+	struct ota_dev_control *podev;
+	struct ota_qce_dev *pqce;
+	unsigned long flags;
+
+	pqce = platform_get_drvdata(pdev);
+	if (!pqce)
+		return 0;
+	if (pqce->qce)
+		qce_close(pqce->qce);
+
+	podev = pqce->podev;
+	if (!podev)
+		goto ret;
+
+	spin_lock_irqsave(&podev->lock, flags);
+	list_del(&pqce->qlist);
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	mutex_lock(&podev->register_lock);
+	if (--podev->total_units == 0) {
+		if (podev->miscdevice.minor != MISC_DYNAMIC_MINOR)
+			misc_deregister(&podev->miscdevice);
+		podev->registered = false;
+	}
+	mutex_unlock(&podev->register_lock);
+ret:
+
+	tasklet_kill(&pqce->done_tasklet);
+	kfree(pqce);
+	return 0;
+}
+
+static struct of_device_id qcota_match[] = {
+	{	.compatible = "qcom,qcota",
+	},
+	{}
+};
+
+static struct platform_driver qcota_plat_driver = {
+	.probe = qcota_probe,
+	.remove = qcota_remove,
+	.driver = {
+		.name = "qcota",
+		.owner = THIS_MODULE,
+		.of_match_table = qcota_match,
+	},
+};
+
+static int _disp_stats(void)
+{
+	struct qcota_stat *pstat;
+	int len = 0;
+	struct ota_dev_control *podev = &qcota_dev;
+	unsigned long flags;
+	struct ota_qce_dev *p;
+
+	pstat = &_qcota_stat;
+	len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
+			"\nQualcomm OTA crypto accelerator Statistics:\n");
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 request                      : %llu\n",
+					pstat->f8_req);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 operation success            : %llu\n",
+					pstat->f8_op_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 operation fail               : %llu\n",
+					pstat->f8_op_fail);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 MP request                   : %llu\n",
+					pstat->f8_mp_req);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 MP operation success         : %llu\n",
+					pstat->f8_mp_op_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 MP operation fail            : %llu\n",
+					pstat->f8_mp_op_fail);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 Variable MP request          : %llu\n",
+					pstat->f8_v_mp_req);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 Variable MP operation success: %llu\n",
+					pstat->f8_v_mp_op_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 Variable MP operation fail   : %llu\n",
+					pstat->f8_v_mp_op_fail);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F9 request                      : %llu\n",
+					pstat->f9_req);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F9 operation success            : %llu\n",
+					pstat->f9_op_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F9 operation fail               : %llu\n",
+					pstat->f9_op_fail);
+
+	spin_lock_irqsave(&podev->lock, flags);
+
+	list_for_each_entry(p, &podev->qce_dev, qlist) {
+		len += scnprintf(
+			_debug_read_buf + len,
+			DEBUG_MAX_RW_BUF - len - 1,
+			"   Engine %4d Req                 : %llu\n",
+			p->unit,
+			p->total_req
+		);
+		len += scnprintf(
+			_debug_read_buf + len,
+			DEBUG_MAX_RW_BUF - len - 1,
+			"   Engine %4d Req Error           : %llu\n",
+			p->unit,
+			p->err_req
+		);
+	}
+
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	return len;
+}
+
+static int _debug_stats_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t _debug_stats_read(struct file *file, char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	int rc = -EINVAL;
+	int len;
+
+	len = _disp_stats();
+	if (len <= count)
+		rc = simple_read_from_buffer((void __user *) buf, len,
+			ppos, (void *) _debug_read_buf, len);
+
+	return rc;
+}
+
+static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	struct ota_dev_control *podev = &qcota_dev;
+	unsigned long flags;
+	struct ota_qce_dev *p;
+
+	memset((char *)&_qcota_stat, 0, sizeof(struct qcota_stat));
+
+	spin_lock_irqsave(&podev->lock, flags);
+
+	list_for_each_entry(p, &podev->qce_dev, qlist) {
+		p->total_req = 0;
+		p->err_req = 0;
+	}
+
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	return count;
+}
+
+static const struct file_operations _debug_stats_ops = {
+	.open =         _debug_stats_open,
+	.read =         _debug_stats_read,
+	.write =        _debug_stats_write,
+};
+
+static int _qcota_debug_init(void)
+{
+	int rc;
+	char name[DEBUG_MAX_FNAME];
+	struct dentry *dent;
+
+	_debug_dent = debugfs_create_dir("qcota", NULL);
+	if (IS_ERR(_debug_dent)) {
+		pr_err("qcota debugfs_create_dir fail, error %ld\n",
+				PTR_ERR(_debug_dent));
+		return PTR_ERR(_debug_dent);
+	}
+
+	snprintf(name, DEBUG_MAX_FNAME-1, "stats-0");
+	_debug_qcota = 0;
+	dent = debugfs_create_file(name, 0644, _debug_dent,
+				&_debug_qcota, &_debug_stats_ops);
+	if (dent == NULL) {
+		pr_err("qcota debugfs_create_file fail, error %ld\n",
+					PTR_ERR(dent));
+		rc = PTR_ERR(dent);
+		goto err;
+	}
+	return 0;
+err:
+	debugfs_remove_recursive(_debug_dent);
+	return rc;
+}
+
+static int __init qcota_init(void)
+{
+	int rc;
+	struct ota_dev_control *podev;
+
+	rc = _qcota_debug_init();
+	if (rc)
+		return rc;
+
+	podev = &qcota_dev;
+	INIT_LIST_HEAD(&podev->ready_commands);
+	INIT_LIST_HEAD(&podev->qce_dev);
+	spin_lock_init(&podev->lock);
+	mutex_init(&podev->register_lock);
+	podev->registered = false;
+	podev->total_units = 0;
+
+	return platform_driver_register(&qcota_plat_driver);
+}
+static void __exit qcota_exit(void)
+{
+	debugfs_remove_recursive(_debug_dent);
+	platform_driver_unregister(&qcota_plat_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Rohit Vaswani <rvaswani@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm Ota Crypto driver");
+MODULE_VERSION("1.02");
+
+module_init(qcota_init);
+module_exit(qcota_exit);
diff -Nruw linux-4.4.115-fbx/drivers/crypto/msm./qce50.c linux-4.4.115-fbx/drivers/crypto/msm/qce50.c
--- linux-4.4.115-fbx/drivers/crypto/msm./qce50.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/crypto/msm/qce50.c	2019-01-22 16:16:23.111242857 +0100
@@ -0,0 +1,6158 @@
+/* Qualcomm Crypto Engine driver.
+ *
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "QCE50: %s: " fmt, __func__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <linux/bitops.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/qcrypto.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <soc/qcom/socinfo.h>
+
+#include "qce.h"
+#include "qce50.h"
+#include "qcryptohw_50.h"
+#include "qce_ota.h"
+
+#define CRYPTO_CONFIG_RESET 0xE01EF
+#define MAX_SPS_DESC_FIFO_SIZE 0xfff0
+#define QCE_MAX_NUM_DSCR    0x200
+#define QCE_SECTOR_SIZE	    0x200
+#define CE_CLK_100MHZ	100000000
+#define CE_CLK_DIV	1000000
+
+#define CRYPTO_CORE_MAJOR_VER_NUM 0x05
+#define CRYPTO_CORE_MINOR_VER_NUM 0x03
+#define CRYPTO_CORE_STEP_VER_NUM 0x1
+
+#define CRYPTO_REQ_USER_PAT 0xdead0000
+
+static DEFINE_MUTEX(bam_register_lock);
+static DEFINE_MUTEX(qce_iomap_mutex);
+
+struct bam_registration_info {
+	struct list_head qlist;
+	unsigned long handle;
+	uint32_t cnt;
+	uint32_t bam_mem;
+	void __iomem *bam_iobase;
+	bool support_cmd_dscr;
+};
+static LIST_HEAD(qce50_bam_list);
+
+/* Used to determine the mode */
+#define MAX_BUNCH_MODE_REQ 2
+/* Max number of request supported */
+#define MAX_QCE_BAM_REQ 8
+/* Interrupt flag will be set for every SET_INTR_AT_REQ request */
+#define SET_INTR_AT_REQ			(MAX_QCE_BAM_REQ / 2)
+/* To create extra request space to hold dummy request */
+#define MAX_QCE_BAM_REQ_WITH_DUMMY_REQ	(MAX_QCE_BAM_REQ + 1)
+/* Allocate the memory for MAX_QCE_BAM_REQ  + 1 (for dummy request) */
+#define MAX_QCE_ALLOC_BAM_REQ		MAX_QCE_BAM_REQ_WITH_DUMMY_REQ
+/* QCE driver modes */
+#define IN_INTERRUPT_MODE 0
+#define IN_BUNCH_MODE 1
+/* Dummy request data length */
+#define DUMMY_REQ_DATA_LEN 64
+/* Delay timer to expire when in bunch mode */
+#define DELAY_IN_JIFFIES 5
+/* Index to point the dummy request */
+#define DUMMY_REQ_INDEX			MAX_QCE_BAM_REQ
+
+#define TOTAL_IOVEC_SPACE_PER_PIPE (QCE_MAX_NUM_DSCR * sizeof(struct sps_iovec))
+
+enum qce_owner {
+	QCE_OWNER_NONE   = 0,
+	QCE_OWNER_CLIENT = 1,
+	QCE_OWNER_TIMEOUT = 2
+};
+
+struct dummy_request {
+	struct qce_sha_req sreq;
+	struct scatterlist sg;
+	struct ahash_request areq;
+};
+
+/*
+ * CE HW device structure.
+ * Each engine has an instance of the structure.
+ * Each engine can only handle one crypto operation at one time. It is up to
+ * the sw above to ensure single threading of operation on an engine.
+ */
+struct qce_device {
+	struct device *pdev;        /* Handle to platform_device structure */
+	struct bam_registration_info *pbam;
+
+	unsigned char *coh_vmem;    /* Allocated coherent virtual memory */
+	dma_addr_t coh_pmem;	    /* Allocated coherent physical memory */
+	int memsize;				/* Memory allocated */
+	unsigned char *iovec_vmem;  /* Allocate iovec virtual memory */
+	int iovec_memsize;				/* Memory allocated */
+	uint32_t bam_mem;		/* bam physical address, from DT */
+	uint32_t bam_mem_size;		/* bam io size, from DT */
+	int is_shared;			/* CE HW is shared */
+	bool support_cmd_dscr;
+	bool support_hw_key;
+	bool support_clk_mgmt_sus_res;
+	bool support_only_core_src_clk;
+
+	void __iomem *iobase;	    /* Virtual io base of CE HW  */
+	unsigned int phy_iobase;    /* Physical io base of CE HW    */
+
+	struct clk *ce_core_src_clk;	/* Handle to CE src clk*/
+	struct clk *ce_core_clk;	/* Handle to CE clk */
+	struct clk *ce_clk;		/* Handle to CE clk */
+	struct clk *ce_bus_clk;	/* Handle to CE AXI clk*/
+	bool no_get_around;
+	bool no_ccm_mac_status_get_around;
+	unsigned int ce_opp_freq_hz;
+	bool use_sw_aes_cbc_ecb_ctr_algo;
+	bool use_sw_aead_algo;
+	bool use_sw_aes_xts_algo;
+	bool use_sw_ahash_algo;
+	bool use_sw_hmac_algo;
+	bool use_sw_aes_ccm_algo;
+	uint32_t engines_avail;
+	struct qce_ce_cfg_reg_setting reg;
+	struct ce_bam_info ce_bam_info;
+	struct ce_request_info ce_request_info[MAX_QCE_ALLOC_BAM_REQ];
+	unsigned int ce_request_index;
+	enum qce_owner owner;
+	atomic_t no_of_queued_req;
+	struct timer_list timer;
+	struct dummy_request dummyreq;
+	unsigned int mode;
+	unsigned int intr_cadence;
+	unsigned int dev_no;
+	struct qce_driver_stats qce_stats;
+	atomic_t bunch_cmd_seq;
+	atomic_t last_intr_seq;
+	bool cadence_flag;
+	uint8_t *dummyreq_in_buf;
+};
+
+static void print_notify_debug(struct sps_event_notify *notify);
+static void _sps_producer_callback(struct sps_event_notify *notify);
+static int qce_dummy_req(struct qce_device *pce_dev);
+
+static int _qce50_disp_stats;
+
+/* Standard initialization vector for SHA-1, source: FIPS 180-2 */
+static uint32_t  _std_init_vector_sha1[] =   {
+	0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
+};
+
+/* Standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint32_t _std_init_vector_sha256[] = {
+	0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
+	0x510E527F, 0x9B05688C,	0x1F83D9AB, 0x5BE0CD19
+};
+
+static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned n;
+
+	n = len  / sizeof(uint32_t);
+	for (; n > 0; n--) {
+		*iv =  ((*b << 24)      & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00)     |
+				(*(b+3)          & 0xff);
+		b += sizeof(uint32_t);
+		iv++;
+	}
+
+	n = len %  sizeof(uint32_t);
+	if (n == 3) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00);
+	} else if (n == 2) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000);
+	} else if (n == 1) {
+		*iv = ((*b << 24) & 0xff000000);
+	}
+}
+
+static void _byte_stream_swap_to_net_words(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned i, j;
+	unsigned char swap_iv[AES_IV_LENGTH];
+
+	memset(swap_iv, 0, AES_IV_LENGTH);
+	for (i = (AES_IV_LENGTH-len), j = len-1;  i < AES_IV_LENGTH; i++, j--)
+		swap_iv[i] = b[j];
+	_byte_stream_to_net_words(iv, swap_iv, AES_IV_LENGTH);
+}
+
+static int count_sg(struct scatterlist *sg, int nbytes)
+{
+	int i;
+
+	for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
+		nbytes -= sg->length;
+	return i;
+}
+
+static int qce_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+	enum dma_data_direction direction)
+{
+	int i;
+
+	for (i = 0; i < nents; ++i) {
+		dma_map_sg(dev, sg, 1, direction);
+		sg = sg_next(sg);
+	}
+
+	return nents;
+}
+
+static int qce_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+	int nents, enum dma_data_direction direction)
+{
+	int i;
+
+	for (i = 0; i < nents; ++i) {
+		dma_unmap_sg(dev, sg, 1, direction);
+		sg = sg_next(sg);
+	}
+
+	return nents;
+}
+
+static int _probe_ce_engine(struct qce_device *pce_dev)
+{
+	unsigned int rev;
+	unsigned int maj_rev, min_rev, step_rev;
+
+	rev = readl_relaxed(pce_dev->iobase + CRYPTO_VERSION_REG);
+	mb();
+	maj_rev = (rev & CRYPTO_CORE_MAJOR_REV_MASK) >> CRYPTO_CORE_MAJOR_REV;
+	min_rev = (rev & CRYPTO_CORE_MINOR_REV_MASK) >> CRYPTO_CORE_MINOR_REV;
+	step_rev = (rev & CRYPTO_CORE_STEP_REV_MASK) >> CRYPTO_CORE_STEP_REV;
+
+	if (maj_rev != CRYPTO_CORE_MAJOR_VER_NUM) {
+		pr_err("Unsupported Qualcomm crypto device at 0x%x, rev %d.%d.%d\n",
+			pce_dev->phy_iobase, maj_rev, min_rev, step_rev);
+		return -EIO;
+	} else {
+		/*
+		 * The majority of crypto HW bugs have been fixed in 5.3.0 and
+		 * above. That allows a single sps transfer of consumer
+		 * pipe, and a single sps transfer of producer pipe
+		 * for a crypto request. no_get_around flag indicates this.
+		 *
+		 * In 5.3.1, the CCM MAC_FAILED in result dump issue is
+		 * fixed. no_ccm_mac_status_get_around flag indicates this.
+		 */
+		pce_dev->no_get_around = (min_rev >=
+			CRYPTO_CORE_MINOR_VER_NUM) ? true : false;
+		if (min_rev > CRYPTO_CORE_MINOR_VER_NUM)
+			pce_dev->no_ccm_mac_status_get_around = true;
+		else if ((min_rev == CRYPTO_CORE_MINOR_VER_NUM) &&
+				 (step_rev >= CRYPTO_CORE_STEP_VER_NUM))
+			pce_dev->no_ccm_mac_status_get_around = true;
+		else
+			pce_dev->no_ccm_mac_status_get_around = false;
+	}
+
+	pce_dev->ce_bam_info.minor_version = min_rev;
+
+	pce_dev->engines_avail = readl_relaxed(pce_dev->iobase +
+					CRYPTO_ENGINES_AVAIL);
+	dev_info(pce_dev->pdev, "Qualcomm Crypto %d.%d.%d device found @0x%x\n",
+			maj_rev, min_rev, step_rev, pce_dev->phy_iobase);
+
+	pce_dev->ce_bam_info.ce_burst_size = MAX_CE_BAM_BURST_SIZE;
+
+	dev_info(pce_dev->pdev,
+			"CE device = 0x%x\n"
+			"IO base, CE = 0x%pK\n"
+			"Consumer (IN) PIPE %d,    "
+			"Producer (OUT) PIPE %d\n"
+			"IO base BAM = 0x%pK\n"
+			"BAM IRQ %d\n"
+			"Engines Availability = 0x%x\n",
+			pce_dev->ce_bam_info.ce_device,
+			pce_dev->iobase,
+			pce_dev->ce_bam_info.dest_pipe_index,
+			pce_dev->ce_bam_info.src_pipe_index,
+			pce_dev->ce_bam_info.bam_iobase,
+			pce_dev->ce_bam_info.bam_irq,
+			pce_dev->engines_avail);
+	return 0;
+};
+
+static struct qce_cmdlist_info *_ce_get_hash_cmdlistinfo(
+			struct qce_device *pce_dev,
+			int req_info, struct qce_sha_req *sreq)
+{
+	struct ce_sps_data *pce_sps_data;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+
+	pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
+	cmdlistptr = &pce_sps_data->cmdlistptr;
+	switch (sreq->alg) {
+	case QCE_HASH_SHA1:
+		return &cmdlistptr->auth_sha1;
+	case QCE_HASH_SHA256:
+		return &cmdlistptr->auth_sha256;
+	case QCE_HASH_SHA1_HMAC:
+		return &cmdlistptr->auth_sha1_hmac;
+	case QCE_HASH_SHA256_HMAC:
+		return &cmdlistptr->auth_sha256_hmac;
+	case QCE_HASH_AES_CMAC:
+		if (sreq->authklen == AES128_KEY_SIZE)
+			return &cmdlistptr->auth_aes_128_cmac;
+		return &cmdlistptr->auth_aes_256_cmac;
+	default:
+		return NULL;
+	}
+	return NULL;
+}
+
+static int _ce_setup_hash(struct qce_device *pce_dev,
+				struct qce_sha_req *sreq,
+				struct qce_cmdlist_info *cmdlistinfo)
+{
+	uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
+	uint32_t diglen;
+	int i;
+	uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+	bool sha1 = false;
+	struct sps_command_element *pce = NULL;
+	bool use_hw_key = false;
+	bool use_pipe_key = false;
+	uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
+	uint32_t auth_cfg;
+
+	if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
+			(sreq->alg == QCE_HASH_SHA256_HMAC) ||
+			(sreq->alg ==  QCE_HASH_AES_CMAC)) {
+
+
+		/* no more check for null key. use flag */
+		if ((sreq->flags & QCRYPTO_CTX_USE_HW_KEY)
+						== QCRYPTO_CTX_USE_HW_KEY)
+			use_hw_key = true;
+		else if ((sreq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
+						QCRYPTO_CTX_USE_PIPE_KEY)
+			use_pipe_key = true;
+		pce = cmdlistinfo->go_proc;
+		if (use_hw_key == true) {
+			pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG +
+							pce_dev->phy_iobase);
+		} else {
+			pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
+							pce_dev->phy_iobase);
+			pce = cmdlistinfo->auth_key;
+			if (use_pipe_key == false) {
+				_byte_stream_to_net_words(mackey32,
+						sreq->authkey,
+						sreq->authklen);
+				for (i = 0; i < authk_size_in_word; i++, pce++)
+					pce->data = mackey32[i];
+			}
+		}
+	}
+
+	if (sreq->alg ==  QCE_HASH_AES_CMAC)
+		goto go_proc;
+
+	/* if not the last, the size has to be on the block boundary */
+	if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
+		return -EIO;
+
+	switch (sreq->alg) {
+	case QCE_HASH_SHA1:
+	case QCE_HASH_SHA1_HMAC:
+		diglen = SHA1_DIGEST_SIZE;
+		sha1 = true;
+		break;
+	case QCE_HASH_SHA256:
+	case QCE_HASH_SHA256_HMAC:
+		diglen = SHA256_DIGEST_SIZE;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
+	if (sreq->first_blk) {
+		if (sha1) {
+			for (i = 0; i < 5; i++)
+				auth32[i] = _std_init_vector_sha1[i];
+		} else {
+			for (i = 0; i < 8; i++)
+				auth32[i] = _std_init_vector_sha256[i];
+		}
+	} else {
+		_byte_stream_to_net_words(auth32, sreq->digest, diglen);
+	}
+
+	pce = cmdlistinfo->auth_iv;
+	for (i = 0; i < 5; i++, pce++)
+		pce->data = auth32[i];
+
+	if ((sreq->alg == QCE_HASH_SHA256) ||
+			(sreq->alg == QCE_HASH_SHA256_HMAC)) {
+		for (i = 5; i < 8; i++, pce++)
+			pce->data = auth32[i];
+	}
+
+	/* write auth_bytecnt 0/1, start with 0 */
+	pce = cmdlistinfo->auth_bytecount;
+	for (i = 0; i < 2; i++, pce++)
+		pce->data = sreq->auth_data[i];
+
+	/* Set/reset  last bit in CFG register  */
+	pce = cmdlistinfo->auth_seg_cfg;
+	auth_cfg = pce->data & ~(1 << CRYPTO_LAST |
+				1 << CRYPTO_FIRST |
+				1 << CRYPTO_USE_PIPE_KEY_AUTH |
+				1 << CRYPTO_USE_HW_KEY_AUTH);
+	if (sreq->last_blk)
+		auth_cfg |= 1 << CRYPTO_LAST;
+	if (sreq->first_blk)
+		auth_cfg |= 1 << CRYPTO_FIRST;
+	if (use_hw_key)
+		auth_cfg |= 1 << CRYPTO_USE_HW_KEY_AUTH;
+	if (use_pipe_key)
+		auth_cfg |= 1 << CRYPTO_USE_PIPE_KEY_AUTH;
+	pce->data = auth_cfg;
+go_proc:
+	/* write auth seg size */
+	pce = cmdlistinfo->auth_seg_size;
+	pce->data = sreq->size;
+
+	pce = cmdlistinfo->encr_seg_cfg;
+	pce->data = 0;
+
+	/* write auth seg size start*/
+	pce = cmdlistinfo->auth_seg_start;
+	pce->data = 0;
+
+	/* write seg size */
+	pce = cmdlistinfo->seg_size;
+
+	/* always ensure there is input data. ZLT does not work for bam-ndp */
+	if (sreq->size)
+		pce->data = sreq->size;
+	else
+		pce->data = pce_dev->ce_bam_info.ce_burst_size;
+
+	return 0;
+}
+
+static struct qce_cmdlist_info *_ce_get_aead_cmdlistinfo(
+			struct qce_device *pce_dev,
+			int req_info, struct qce_req *creq)
+{
+	struct ce_sps_data *pce_sps_data;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+
+	pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
+	cmdlistptr = &pce_sps_data->cmdlistptr;
+	switch (creq->alg) {
+	case CIPHER_ALG_DES:
+		switch (creq->mode) {
+		case QCE_MODE_CBC:
+			if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
+				return &cmdlistptr->aead_hmac_sha1_cbc_des;
+			else if (creq->auth_alg == QCE_HASH_SHA256_HMAC)
+				return &cmdlistptr->aead_hmac_sha256_cbc_des;
+			else
+				return NULL;
+			break;
+		default:
+			return NULL;
+		}
+		break;
+	case CIPHER_ALG_3DES:
+		switch (creq->mode) {
+		case QCE_MODE_CBC:
+			if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
+				return &cmdlistptr->aead_hmac_sha1_cbc_3des;
+			else if (creq->auth_alg == QCE_HASH_SHA256_HMAC)
+				return &cmdlistptr->aead_hmac_sha256_cbc_3des;
+			else
+				return NULL;
+			break;
+		default:
+			return NULL;
+		}
+		break;
+	case CIPHER_ALG_AES:
+		switch (creq->mode) {
+		case QCE_MODE_CBC:
+			if (creq->encklen ==  AES128_KEY_SIZE) {
+				if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
+					return &cmdlistptr->
+						aead_hmac_sha1_cbc_aes_128;
+				else if (creq->auth_alg ==
+						QCE_HASH_SHA256_HMAC)
+					return &cmdlistptr->
+						aead_hmac_sha256_cbc_aes_128;
+				else
+					return NULL;
+			} else if (creq->encklen ==  AES256_KEY_SIZE) {
+				if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
+					return &cmdlistptr->
+						aead_hmac_sha1_cbc_aes_256;
+				else if (creq->auth_alg ==
+						QCE_HASH_SHA256_HMAC)
+					return &cmdlistptr->
+						aead_hmac_sha256_cbc_aes_256;
+				else
+					return NULL;
+			} else
+				return NULL;
+			break;
+		default:
+			return NULL;
+		}
+		break;
+
+	default:
+		return NULL;
+	}
+	return NULL;
+}
+
+static int _ce_setup_aead(struct qce_device *pce_dev, struct qce_req *q_req,
+		uint32_t totallen_in, uint32_t coffset,
+		struct qce_cmdlist_info *cmdlistinfo)
+{
+	int32_t authk_size_in_word = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
+	int i;
+	uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {0};
+	struct sps_command_element *pce;
+	uint32_t a_cfg;
+	uint32_t enckey32[(MAX_CIPHER_KEY_SIZE*2)/sizeof(uint32_t)] = {0};
+	uint32_t enciv32[MAX_IV_LENGTH/sizeof(uint32_t)] = {0};
+	uint32_t enck_size_in_word = 0;
+	uint32_t enciv_in_word;
+	uint32_t key_size;
+	uint32_t encr_cfg = 0;
+	uint32_t ivsize = q_req->ivsize;
+
+	key_size = q_req->encklen;
+	enck_size_in_word = key_size/sizeof(uint32_t);
+
+	switch (q_req->alg) {
+	case CIPHER_ALG_DES:
+		enciv_in_word = 2;
+		break;
+	case CIPHER_ALG_3DES:
+		enciv_in_word = 2;
+		break;
+	case CIPHER_ALG_AES:
+		if ((key_size != AES128_KEY_SIZE) &&
+				(key_size != AES256_KEY_SIZE))
+			return -EINVAL;
+		enciv_in_word = 4;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* only support cbc mode */
+	if (q_req->mode != QCE_MODE_CBC)
+		return -EINVAL;
+
+	_byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
+	pce = cmdlistinfo->encr_cntr_iv;
+	for (i = 0; i < enciv_in_word; i++, pce++)
+		pce->data = enciv32[i];
+
+	/*
+	 * write encr key
+	 * do not use  hw key or pipe key
+	 */
+	_byte_stream_to_net_words(enckey32, q_req->enckey, key_size);
+	pce = cmdlistinfo->encr_key;
+	for (i = 0; i < enck_size_in_word; i++, pce++)
+		pce->data = enckey32[i];
+
+	/* write encr seg cfg */
+	pce = cmdlistinfo->encr_seg_cfg;
+	encr_cfg = pce->data;
+	if (q_req->dir == QCE_ENCRYPT)
+		encr_cfg |= (1 << CRYPTO_ENCODE);
+	else
+		encr_cfg &= ~(1 << CRYPTO_ENCODE);
+	pce->data = encr_cfg;
+
+	/* we only support sha1-hmac and sha256-hmac at this point */
+	_byte_stream_to_net_words(mackey32, q_req->authkey,
+					q_req->authklen);
+	pce = cmdlistinfo->auth_key;
+	for (i = 0; i < authk_size_in_word; i++, pce++)
+		pce->data = mackey32[i];
+	pce = cmdlistinfo->auth_iv;
+
+	if (q_req->auth_alg == QCE_HASH_SHA1_HMAC)
+		for (i = 0; i < 5; i++, pce++)
+			pce->data = _std_init_vector_sha1[i];
+	else
+		for (i = 0; i < 8; i++, pce++)
+			pce->data = _std_init_vector_sha256[i];
+
+	/* write auth_bytecnt 0/1, start with 0 */
+	pce = cmdlistinfo->auth_bytecount;
+	for (i = 0; i < 2; i++, pce++)
+		pce->data = 0;
+
+	pce = cmdlistinfo->auth_seg_cfg;
+	a_cfg = pce->data;
+	a_cfg &= ~(CRYPTO_AUTH_POS_MASK);
+	if (q_req->dir == QCE_ENCRYPT)
+		a_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
+	else
+		a_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+	pce->data = a_cfg;
+
+	/* write auth seg size */
+	pce = cmdlistinfo->auth_seg_size;
+	pce->data = totallen_in;
+
+	/* write auth seg size start*/
+	pce = cmdlistinfo->auth_seg_start;
+	pce->data = 0;
+
+	/* write seg size */
+	pce = cmdlistinfo->seg_size;
+	pce->data = totallen_in;
+
+	/* write encr seg size */
+	pce = cmdlistinfo->encr_seg_size;
+	pce->data = q_req->cryptlen;
+
+	/* write encr seg start */
+	pce = cmdlistinfo->encr_seg_start;
+	pce->data = (coffset & 0xffff);
+
+	return 0;
+
+}
+
+static struct qce_cmdlist_info *_ce_get_cipher_cmdlistinfo(
+			struct qce_device *pce_dev,
+			int req_info, struct qce_req *creq)
+{
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	cmdlistptr = &pce_sps_data->cmdlistptr;
+	if (creq->alg != CIPHER_ALG_AES) {
+		switch (creq->alg) {
+		case CIPHER_ALG_DES:
+			if (creq->mode == QCE_MODE_ECB)
+				return &cmdlistptr->cipher_des_ecb;
+			return &cmdlistptr->cipher_des_cbc;
+		case CIPHER_ALG_3DES:
+			if (creq->mode == QCE_MODE_ECB)
+				return &cmdlistptr->cipher_3des_ecb;
+			return &cmdlistptr->cipher_3des_cbc;
+		default:
+			return NULL;
+		}
+	} else {
+		switch (creq->mode) {
+		case QCE_MODE_ECB:
+			if (creq->encklen == AES128_KEY_SIZE)
+				return &cmdlistptr->cipher_aes_128_ecb;
+			return &cmdlistptr->cipher_aes_256_ecb;
+		case QCE_MODE_CBC:
+		case QCE_MODE_CTR:
+			if (creq->encklen == AES128_KEY_SIZE)
+				return &cmdlistptr->cipher_aes_128_cbc_ctr;
+			return &cmdlistptr->cipher_aes_256_cbc_ctr;
+		case QCE_MODE_XTS:
+			if (creq->encklen/2 == AES128_KEY_SIZE)
+				return &cmdlistptr->cipher_aes_128_xts;
+			return &cmdlistptr->cipher_aes_256_xts;
+		case QCE_MODE_CCM:
+			if (creq->encklen == AES128_KEY_SIZE)
+				return &cmdlistptr->aead_aes_128_ccm;
+			return &cmdlistptr->aead_aes_256_ccm;
+		default:
+			return NULL;
+		}
+	}
+	return NULL;
+}
+
+static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
+		uint32_t totallen_in, uint32_t coffset,
+		struct qce_cmdlist_info *cmdlistinfo)
+{
+	uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+	uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
+			0, 0, 0, 0};
+	uint32_t enck_size_in_word = 0;
+	uint32_t key_size;
+	bool use_hw_key = false;
+	bool use_pipe_key = false;
+	uint32_t encr_cfg = 0;
+	uint32_t ivsize = creq->ivsize;
+	int i;
+	struct sps_command_element *pce = NULL;
+
+	if (creq->mode == QCE_MODE_XTS)
+		key_size = creq->encklen/2;
+	else
+		key_size = creq->encklen;
+
+	pce = cmdlistinfo->go_proc;
+	if ((creq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
+		use_hw_key = true;
+	} else {
+		if ((creq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
+					QCRYPTO_CTX_USE_PIPE_KEY)
+			use_pipe_key = true;
+	}
+	pce = cmdlistinfo->go_proc;
+	if (use_hw_key == true)
+		pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG +
+						pce_dev->phy_iobase);
+	else
+		pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
+						pce_dev->phy_iobase);
+	if ((use_pipe_key == false) && (use_hw_key == false)) {
+		_byte_stream_to_net_words(enckey32, creq->enckey, key_size);
+		enck_size_in_word = key_size/sizeof(uint32_t);
+	}
+
+	if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
+		uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
+		uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
+		uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
+		uint32_t auth_cfg = 0;
+
+		/* write nonce */
+		_byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
+		pce = cmdlistinfo->auth_nonce_info;
+		for (i = 0; i < noncelen32; i++, pce++)
+			pce->data = nonce32[i];
+
+		if (creq->authklen ==  AES128_KEY_SIZE)
+			auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_128;
+		else {
+			if (creq->authklen ==  AES256_KEY_SIZE)
+				auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_256;
+		}
+		if (creq->dir == QCE_ENCRYPT)
+			auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+		else
+			auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
+		auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE);
+
+		if (use_hw_key == true)	{
+			auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH);
+		} else {
+			auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+			/* write auth key */
+			pce = cmdlistinfo->auth_key;
+			for (i = 0; i < authklen32; i++, pce++)
+				pce->data = enckey32[i];
+		}
+
+		pce = cmdlistinfo->auth_seg_cfg;
+		pce->data = auth_cfg;
+
+		pce = cmdlistinfo->auth_seg_size;
+		if (creq->dir == QCE_ENCRYPT)
+			pce->data = totallen_in;
+		else
+			pce->data = totallen_in - creq->authsize;
+		pce = cmdlistinfo->auth_seg_start;
+		pce->data = 0;
+	} else {
+		if (creq->op != QCE_REQ_AEAD) {
+			pce = cmdlistinfo->auth_seg_cfg;
+			pce->data = 0;
+		}
+	}
+	switch (creq->mode) {
+	case QCE_MODE_ECB:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_256;
+		break;
+	case QCE_MODE_CBC:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
+		break;
+	case QCE_MODE_XTS:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_xts_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_xts_256;
+		break;
+	case QCE_MODE_CCM:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_256;
+		encr_cfg |= (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE) |
+				(CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
+		break;
+	case QCE_MODE_CTR:
+	default:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_256;
+		break;
+	}
+
+	switch (creq->alg) {
+	case CIPHER_ALG_DES:
+		if (creq->mode !=  QCE_MODE_ECB) {
+			_byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+			pce = cmdlistinfo->encr_cntr_iv;
+			pce->data = enciv32[0];
+			pce++;
+			pce->data = enciv32[1];
+		}
+		if (use_hw_key == false) {
+			pce = cmdlistinfo->encr_key;
+			pce->data = enckey32[0];
+			pce++;
+			pce->data = enckey32[1];
+		}
+		break;
+	case CIPHER_ALG_3DES:
+		if (creq->mode !=  QCE_MODE_ECB) {
+			_byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+			pce = cmdlistinfo->encr_cntr_iv;
+			pce->data = enciv32[0];
+			pce++;
+			pce->data = enciv32[1];
+		}
+		if (use_hw_key == false) {
+			/* write encr key */
+			pce = cmdlistinfo->encr_key;
+			for (i = 0; i < 6; i++, pce++)
+				pce->data = enckey32[i];
+		}
+		break;
+	case CIPHER_ALG_AES:
+	default:
+		if (creq->mode ==  QCE_MODE_XTS) {
+			uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
+					= {0, 0, 0, 0, 0, 0, 0, 0};
+			uint32_t xtsklen =
+					creq->encklen/(2 * sizeof(uint32_t));
+
+			if ((use_hw_key == false) && (use_pipe_key == false)) {
+				_byte_stream_to_net_words(xtskey32,
+					(creq->enckey + creq->encklen/2),
+							creq->encklen/2);
+				/* write xts encr key */
+				pce = cmdlistinfo->encr_xts_key;
+				for (i = 0; i < xtsklen; i++, pce++)
+					pce->data = xtskey32[i];
+			}
+			/* write xts du size */
+			pce = cmdlistinfo->encr_xts_du_size;
+			switch (creq->flags & QCRYPTO_CTX_XTS_MASK) {
+			case QCRYPTO_CTX_XTS_DU_SIZE_512B:
+				pce->data = min((unsigned int)QCE_SECTOR_SIZE,
+						creq->cryptlen);
+				break;
+			case QCRYPTO_CTX_XTS_DU_SIZE_1KB:
+				pce->data =
+					min((unsigned int)QCE_SECTOR_SIZE * 2,
+					creq->cryptlen);
+				break;
+			default:
+				pce->data = creq->cryptlen;
+				break;
+			}
+		}
+		if (creq->mode !=  QCE_MODE_ECB) {
+			if (creq->mode ==  QCE_MODE_XTS)
+				_byte_stream_swap_to_net_words(enciv32,
+							creq->iv, ivsize);
+			else
+				_byte_stream_to_net_words(enciv32, creq->iv,
+								ivsize);
+			/* write encr cntr iv */
+			pce = cmdlistinfo->encr_cntr_iv;
+			for (i = 0; i < 4; i++, pce++)
+				pce->data = enciv32[i];
+
+			if (creq->mode ==  QCE_MODE_CCM) {
+				/* write cntr iv for ccm */
+				pce = cmdlistinfo->encr_ccm_cntr_iv;
+				for (i = 0; i < 4; i++, pce++)
+					pce->data = enciv32[i];
+				/* update cntr_iv[3] by one */
+				pce = cmdlistinfo->encr_cntr_iv;
+				pce += 3;
+				pce->data += 1;
+			}
+		}
+
+		if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
+				encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
+						CRYPTO_ENCR_KEY_SZ);
+		} else {
+			if (use_hw_key == false) {
+				/* write encr key */
+				pce = cmdlistinfo->encr_key;
+				for (i = 0; i < enck_size_in_word; i++, pce++)
+					pce->data = enckey32[i];
+			}
+		} /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
+		break;
+	} /* end of switch (creq->mode)  */
+
+	if (use_pipe_key)
+		encr_cfg |= (CRYPTO_USE_PIPE_KEY_ENCR_ENABLED
+					<< CRYPTO_USE_PIPE_KEY_ENCR);
+
+	/* write encr seg cfg */
+	pce = cmdlistinfo->encr_seg_cfg;
+	if ((creq->alg == CIPHER_ALG_DES) || (creq->alg == CIPHER_ALG_3DES)) {
+		if (creq->dir == QCE_ENCRYPT)
+			pce->data |= (1 << CRYPTO_ENCODE);
+		else
+			pce->data &= ~(1 << CRYPTO_ENCODE);
+		encr_cfg = pce->data;
+	}  else	{
+		encr_cfg |=
+			((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
+	}
+	if (use_hw_key == true)
+		encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
+	else
+		encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
+	pce->data = encr_cfg;
+
+	/* write encr seg size */
+	pce = cmdlistinfo->encr_seg_size;
+	if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT))
+		pce->data = (creq->cryptlen + creq->authsize);
+	else
+		pce->data = creq->cryptlen;
+
+	/* write encr seg start */
+	pce = cmdlistinfo->encr_seg_start;
+	pce->data = (coffset & 0xffff);
+
+	/* write seg size  */
+	pce = cmdlistinfo->seg_size;
+	pce->data = totallen_in;
+
+	return 0;
+};
+
+static int _ce_f9_setup(struct qce_device *pce_dev, struct qce_f9_req *req,
+		struct qce_cmdlist_info *cmdlistinfo)
+{
+	uint32_t ikey32[OTA_KEY_SIZE/sizeof(uint32_t)];
+	uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
+	uint32_t cfg;
+	struct sps_command_element *pce;
+	int i;
+
+	switch (req->algorithm) {
+	case QCE_OTA_ALGO_KASUMI:
+		cfg = pce_dev->reg.auth_cfg_kasumi;
+		break;
+	case QCE_OTA_ALGO_SNOW3G:
+	default:
+		cfg = pce_dev->reg.auth_cfg_snow3g;
+		break;
+	};
+
+	/* write key in CRYPTO_AUTH_IV0-3_REG */
+	_byte_stream_to_net_words(ikey32, &req->ikey[0], OTA_KEY_SIZE);
+	pce = cmdlistinfo->auth_iv;
+	for (i = 0; i < key_size_in_word; i++, pce++)
+		pce->data = ikey32[i];
+
+	/* write last bits  in CRYPTO_AUTH_IV4_REG  */
+	pce->data = req->last_bits;
+
+	/* write fresh to CRYPTO_AUTH_BYTECNT0_REG */
+	pce = cmdlistinfo->auth_bytecount;
+	pce->data = req->fresh;
+
+	/* write count-i  to CRYPTO_AUTH_BYTECNT1_REG */
+	pce++;
+	pce->data = req->count_i;
+
+	/* write auth seg cfg */
+	pce = cmdlistinfo->auth_seg_cfg;
+	if (req->direction == QCE_OTA_DIR_DOWNLINK)
+		cfg |= BIT(CRYPTO_F9_DIRECTION);
+	pce->data = cfg;
+
+	/* write auth seg size */
+	pce = cmdlistinfo->auth_seg_size;
+	pce->data = req->msize;
+
+	/* write auth seg start*/
+	pce = cmdlistinfo->auth_seg_start;
+	pce->data = 0;
+
+	/* write seg size  */
+	pce = cmdlistinfo->seg_size;
+	pce->data = req->msize;
+
+
+	/* write go */
+	pce = cmdlistinfo->go_proc;
+	pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + pce_dev->phy_iobase);
+	return 0;
+}
+
+static int _ce_f8_setup(struct qce_device *pce_dev, struct qce_f8_req *req,
+		bool key_stream_mode, uint16_t npkts, uint16_t cipher_offset,
+		uint16_t cipher_size,
+		struct qce_cmdlist_info *cmdlistinfo)
+{
+	uint32_t ckey32[OTA_KEY_SIZE/sizeof(uint32_t)];
+	uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
+	uint32_t cfg;
+	struct sps_command_element *pce;
+	int i;
+
+	switch (req->algorithm) {
+	case QCE_OTA_ALGO_KASUMI:
+		cfg = pce_dev->reg.encr_cfg_kasumi;
+		break;
+	case QCE_OTA_ALGO_SNOW3G:
+	default:
+		cfg = pce_dev->reg.encr_cfg_snow3g;
+		break;
+	};
+	/* write key */
+	_byte_stream_to_net_words(ckey32, &req->ckey[0], OTA_KEY_SIZE);
+	pce = cmdlistinfo->encr_key;
+	for (i = 0; i < key_size_in_word; i++, pce++)
+		pce->data = ckey32[i];
+
+	/* write encr seg cfg */
+	pce = cmdlistinfo->encr_seg_cfg;
+	if (key_stream_mode)
+		cfg |= BIT(CRYPTO_F8_KEYSTREAM_ENABLE);
+	if (req->direction == QCE_OTA_DIR_DOWNLINK)
+		cfg |= BIT(CRYPTO_F8_DIRECTION);
+	pce->data = cfg;
+
+	/* write encr seg start */
+	pce = cmdlistinfo->encr_seg_start;
+	pce->data = (cipher_offset & 0xffff);
+
+	/* write encr seg size  */
+	pce = cmdlistinfo->encr_seg_size;
+	pce->data = cipher_size;
+
+	/* write seg size  */
+	pce = cmdlistinfo->seg_size;
+	pce->data = req->data_len;
+
+	/* write cntr0_iv0 for countC */
+	pce = cmdlistinfo->encr_cntr_iv;
+	pce->data = req->count_c;
+	/* write cntr1_iv1 for nPkts, and bearer */
+	pce++;
+	if (npkts == 1)
+		npkts = 0;
+	pce->data = req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
+				npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT;
+
+	/* write go */
+	pce = cmdlistinfo->go_proc;
+	pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + pce_dev->phy_iobase);
+
+	return 0;
+}
+
+static void _qce_dump_descr_fifos(struct qce_device *pce_dev, int req_info)
+{
+	int i, j, ents;
+	struct ce_sps_data *pce_sps_data;
+	struct sps_iovec *iovec;
+	uint32_t cmd_flags = SPS_IOVEC_FLAG_CMD;
+
+	pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
+	iovec = pce_sps_data->in_transfer.iovec;
+	pr_info("==============================================\n");
+	pr_info("CONSUMER (TX/IN/DEST) PIPE DESCRIPTOR\n");
+	pr_info("==============================================\n");
+	for (i = 0; i <  pce_sps_data->in_transfer.iovec_count; i++) {
+		pr_info(" [%d] addr=0x%x  size=0x%x  flags=0x%x\n", i,
+					iovec->addr, iovec->size, iovec->flags);
+		if (iovec->flags & cmd_flags) {
+			struct sps_command_element *pced;
+
+			pced = (struct sps_command_element *)
+					(GET_VIRT_ADDR(iovec->addr));
+			ents = iovec->size/(sizeof(struct sps_command_element));
+			for (j = 0; j < ents; j++) {
+				pr_info("      [%d] [0x%x] 0x%x\n", j,
+					pced->addr, pced->data);
+				pced++;
+			}
+		}
+		iovec++;
+	}
+
+	pr_info("==============================================\n");
+	pr_info("PRODUCER (RX/OUT/SRC) PIPE DESCRIPTOR\n");
+	pr_info("==============================================\n");
+	iovec =  pce_sps_data->out_transfer.iovec;
+	for (i = 0; i <   pce_sps_data->out_transfer.iovec_count; i++) {
+		pr_info(" [%d] addr=0x%x  size=0x%x  flags=0x%x\n", i,
+				iovec->addr, iovec->size, iovec->flags);
+		iovec++;
+	}
+}
+
+#ifdef QCE_DEBUG
+
+static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info)
+{
+	_qce_dump_descr_fifos(pce_dev, req_info);
+}
+
+#define QCE_WRITE_REG(val, addr)					\
+{									\
+	pr_info("      [0x%pK] 0x%x\n", addr, (uint32_t)val);		\
+	writel_relaxed(val, addr);					\
+}
+
+#else
+
+static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info)
+{
+}
+
+#define QCE_WRITE_REG(val, addr)					\
+	writel_relaxed(val, addr)
+
+#endif
+
+static int _ce_setup_hash_direct(struct qce_device *pce_dev,
+				struct qce_sha_req *sreq)
+{
+	uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
+	uint32_t diglen;
+	bool use_hw_key = false;
+	bool use_pipe_key = false;
+	int i;
+	uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+	uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
+	bool sha1 = false;
+	uint32_t auth_cfg = 0;
+
+	/* clear status */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/*
+	 * Ensure previous instructions (setting the CONFIG register)
+	 * was completed before issuing starting to set other config register
+	 * This is to ensure the configurations are done in correct endian-ness
+	 * as set in the CONFIG registers
+	 */
+	mb();
+
+	if (sreq->alg == QCE_HASH_AES_CMAC) {
+		/* write seg_cfg */
+		QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+		/* write seg_cfg */
+		QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+		/* write seg_cfg */
+		QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
+
+		/* Clear auth_ivn, auth_keyn registers  */
+		for (i = 0; i < 16; i++) {
+			QCE_WRITE_REG(0, (pce_dev->iobase +
+				(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+			QCE_WRITE_REG(0, (pce_dev->iobase +
+				(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t))));
+		}
+		/* write auth_bytecnt 0/1/2/3, start with 0 */
+		for (i = 0; i < 4; i++)
+			QCE_WRITE_REG(0, pce_dev->iobase +
+						CRYPTO_AUTH_BYTECNT0_REG +
+						i * sizeof(uint32_t));
+
+		if (sreq->authklen == AES128_KEY_SIZE)
+			auth_cfg = pce_dev->reg.auth_cfg_cmac_128;
+		else
+			auth_cfg = pce_dev->reg.auth_cfg_cmac_256;
+	}
+
+	if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
+			(sreq->alg == QCE_HASH_SHA256_HMAC) ||
+			(sreq->alg ==  QCE_HASH_AES_CMAC)) {
+
+		_byte_stream_to_net_words(mackey32, sreq->authkey,
+						sreq->authklen);
+
+		/* no more check for null key. use flag to check*/
+
+		if ((sreq->flags & QCRYPTO_CTX_USE_HW_KEY) ==
+					QCRYPTO_CTX_USE_HW_KEY) {
+			use_hw_key = true;
+		} else if ((sreq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
+						QCRYPTO_CTX_USE_PIPE_KEY) {
+			use_pipe_key = true;
+		} else {
+			/* setup key */
+			for (i = 0; i < authk_size_in_word; i++)
+				QCE_WRITE_REG(mackey32[i], (pce_dev->iobase +
+					(CRYPTO_AUTH_KEY0_REG +
+							i*sizeof(uint32_t))));
+		}
+	}
+
+	if (sreq->alg ==  QCE_HASH_AES_CMAC)
+		goto go_proc;
+
+	/* if not the last, the size has to be on the block boundary */
+	if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
+		return -EIO;
+
+	switch (sreq->alg) {
+	case QCE_HASH_SHA1:
+		auth_cfg = pce_dev->reg.auth_cfg_sha1;
+		diglen = SHA1_DIGEST_SIZE;
+		sha1 = true;
+		break;
+	case QCE_HASH_SHA1_HMAC:
+		auth_cfg = pce_dev->reg.auth_cfg_hmac_sha1;
+		diglen = SHA1_DIGEST_SIZE;
+		sha1 = true;
+		break;
+	case QCE_HASH_SHA256:
+		auth_cfg = pce_dev->reg.auth_cfg_sha256;
+		diglen = SHA256_DIGEST_SIZE;
+		break;
+	case QCE_HASH_SHA256_HMAC:
+		auth_cfg = pce_dev->reg.auth_cfg_hmac_sha256;
+		diglen = SHA256_DIGEST_SIZE;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
+	if (sreq->first_blk) {
+		if (sha1) {
+			for (i = 0; i < 5; i++)
+				auth32[i] = _std_init_vector_sha1[i];
+		} else {
+			for (i = 0; i < 8; i++)
+				auth32[i] = _std_init_vector_sha256[i];
+		}
+	} else {
+		_byte_stream_to_net_words(auth32, sreq->digest, diglen);
+	}
+
+	/* Set auth_ivn, auth_keyn registers  */
+	for (i = 0; i < 5; i++)
+		QCE_WRITE_REG(auth32[i], (pce_dev->iobase +
+			(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+
+	if ((sreq->alg == QCE_HASH_SHA256) ||
+			(sreq->alg == QCE_HASH_SHA256_HMAC)) {
+		for (i = 5; i < 8; i++)
+			QCE_WRITE_REG(auth32[i], (pce_dev->iobase +
+				(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+	}
+
+
+	/* write auth_bytecnt 0/1/2/3, start with 0 */
+	for (i = 0; i < 2; i++)
+		QCE_WRITE_REG(sreq->auth_data[i], pce_dev->iobase +
+					CRYPTO_AUTH_BYTECNT0_REG +
+						i * sizeof(uint32_t));
+
+	/* Set/reset  last bit in CFG register  */
+	if (sreq->last_blk)
+		auth_cfg |= 1 << CRYPTO_LAST;
+	else
+		auth_cfg &= ~(1 << CRYPTO_LAST);
+	if (sreq->first_blk)
+		auth_cfg |= 1 << CRYPTO_FIRST;
+	else
+		auth_cfg &= ~(1 << CRYPTO_FIRST);
+	if (use_hw_key)
+		auth_cfg |= 1 << CRYPTO_USE_HW_KEY_AUTH;
+	if (use_pipe_key)
+		auth_cfg |= 1 << CRYPTO_USE_PIPE_KEY_AUTH;
+go_proc:
+	 /* write seg_cfg */
+	QCE_WRITE_REG(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+	/* write auth seg_size   */
+	QCE_WRITE_REG(sreq->size, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+
+	/* write auth_seg_start   */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
+
+	/* reset encr seg_cfg   */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+	/* write seg_size   */
+	QCE_WRITE_REG(sreq->size, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/* issue go to crypto   */
+	if (use_hw_key == false) {
+		QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+				(1 << CRYPTO_CLR_CNTXT)),
+				pce_dev->iobase + CRYPTO_GOPROC_REG);
+	} else {
+		QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+				pce_dev->iobase + CRYPTO_GOPROC_QC_KEY_REG);
+	}
+	/*
+	 * Ensure previous instructions (setting the GO register)
+	 * was completed before issuing a DMA transfer request
+	 */
+	mb();
+	return 0;
+}
+
+static int _ce_setup_aead_direct(struct qce_device *pce_dev,
+		struct qce_req *q_req, uint32_t totallen_in, uint32_t coffset)
+{
+	int32_t authk_size_in_word = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
+	int i;
+	uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {0};
+	uint32_t a_cfg;
+	uint32_t enckey32[(MAX_CIPHER_KEY_SIZE*2)/sizeof(uint32_t)] = {0};
+	uint32_t enciv32[MAX_IV_LENGTH/sizeof(uint32_t)] = {0};
+	uint32_t enck_size_in_word = 0;
+	uint32_t enciv_in_word;
+	uint32_t key_size;
+	uint32_t ivsize = q_req->ivsize;
+	uint32_t encr_cfg;
+
+
+	/* clear status */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/*
+	 * Ensure previous instructions (setting the CONFIG register)
+	 * was completed before issuing starting to set other config register
+	 * This is to ensure the configurations are done in correct endian-ness
+	 * as set in the CONFIG registers
+	 */
+	mb();
+
+	key_size = q_req->encklen;
+	enck_size_in_word = key_size/sizeof(uint32_t);
+
+	switch (q_req->alg) {
+
+	case CIPHER_ALG_DES:
+
+		switch (q_req->mode) {
+		case QCE_MODE_CBC:
+			encr_cfg = pce_dev->reg.encr_cfg_des_cbc;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		enciv_in_word = 2;
+		break;
+
+	case CIPHER_ALG_3DES:
+
+		switch (q_req->mode) {
+		case QCE_MODE_CBC:
+			encr_cfg = pce_dev->reg.encr_cfg_3des_cbc;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		enciv_in_word = 2;
+
+		break;
+
+	case CIPHER_ALG_AES:
+
+		switch (q_req->mode) {
+		case QCE_MODE_CBC:
+			if (key_size == AES128_KEY_SIZE)
+				encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
+			else if (key_size  == AES256_KEY_SIZE)
+				encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
+			else
+				return -EINVAL;
+			break;
+		default:
+		return -EINVAL;
+		}
+
+		enciv_in_word = 4;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+
+
+
+	/* write CNTR0_IV0_REG */
+	if (q_req->mode !=  QCE_MODE_ECB) {
+		_byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
+		for (i = 0; i < enciv_in_word; i++)
+			QCE_WRITE_REG(enciv32[i], pce_dev->iobase +
+				(CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)));
+	}
+
+	/*
+	 * write encr key
+	 * do not use  hw key or pipe key
+	 */
+	_byte_stream_to_net_words(enckey32, q_req->enckey, key_size);
+	for (i = 0; i < enck_size_in_word; i++)
+		QCE_WRITE_REG(enckey32[i], pce_dev->iobase +
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)));
+
+	/* write encr seg cfg */
+	if (q_req->dir == QCE_ENCRYPT)
+		encr_cfg |= (1 << CRYPTO_ENCODE);
+	QCE_WRITE_REG(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+	/* we only support sha1-hmac and sha256-hmac at this point */
+	_byte_stream_to_net_words(mackey32, q_req->authkey,
+					q_req->authklen);
+	for (i = 0; i < authk_size_in_word; i++)
+		QCE_WRITE_REG(mackey32[i], pce_dev->iobase +
+			(CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)));
+
+	if (q_req->auth_alg == QCE_HASH_SHA1_HMAC) {
+		for (i = 0; i < 5; i++)
+			QCE_WRITE_REG(_std_init_vector_sha1[i],
+				pce_dev->iobase +
+				(CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)));
+	} else {
+		for (i = 0; i < 8; i++)
+			QCE_WRITE_REG(_std_init_vector_sha256[i],
+				pce_dev->iobase +
+				(CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)));
+	}
+
+	/* write auth_bytecnt 0/1, start with 0 */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG);
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT1_REG);
+
+	/* write encr seg size    */
+	QCE_WRITE_REG(q_req->cryptlen, pce_dev->iobase +
+			CRYPTO_ENCR_SEG_SIZE_REG);
+
+	/* write encr start   */
+	QCE_WRITE_REG(coffset & 0xffff, pce_dev->iobase +
+			CRYPTO_ENCR_SEG_START_REG);
+
+	if (q_req->auth_alg == QCE_HASH_SHA1_HMAC)
+		a_cfg = pce_dev->reg.auth_cfg_aead_sha1_hmac;
+	else
+		a_cfg = pce_dev->reg.auth_cfg_aead_sha256_hmac;
+
+	if (q_req->dir == QCE_ENCRYPT)
+		a_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
+	else
+		a_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+	/* write auth seg_cfg */
+	QCE_WRITE_REG(a_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+
+	/* write auth seg_size   */
+	QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+
+	/* write auth_seg_start   */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
+
+
+	/* write seg_size   */
+	QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+
+							CRYPTO_CONFIG_REG));
+	/* issue go to crypto   */
+	QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+				(1 << CRYPTO_CLR_CNTXT)),
+				pce_dev->iobase + CRYPTO_GOPROC_REG);
+	/*
+	 * Ensure previous instructions (setting the GO register)
+	 * was completed before issuing a DMA transfer request
+	 */
+	mb();
+	return 0;
+};
+
+static int _ce_setup_cipher_direct(struct qce_device *pce_dev,
+		struct qce_req *creq, uint32_t totallen_in, uint32_t coffset)
+{
+	uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+	uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
+			0, 0, 0, 0};
+	uint32_t enck_size_in_word = 0;
+	uint32_t key_size;
+	bool use_hw_key = false;
+	bool use_pipe_key = false;
+	uint32_t encr_cfg = 0;
+	uint32_t ivsize = creq->ivsize;
+	int i;
+
+	/* clear status */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/*
+	 * Ensure previous instructions (setting the CONFIG register)
+	 * was completed before issuing starting to set other config register
+	 * This is to ensure the configurations are done in correct endian-ness
+	 * as set in the CONFIG registers
+	 */
+	mb();
+
+	if (creq->mode == QCE_MODE_XTS)
+		key_size = creq->encklen/2;
+	else
+		key_size = creq->encklen;
+
+	if ((creq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
+		use_hw_key = true;
+	} else {
+		if ((creq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
+					QCRYPTO_CTX_USE_PIPE_KEY)
+			use_pipe_key = true;
+	}
+	if ((use_pipe_key == false) && (use_hw_key == false)) {
+		_byte_stream_to_net_words(enckey32, creq->enckey, key_size);
+		enck_size_in_word = key_size/sizeof(uint32_t);
+	}
+	if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
+		uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
+		uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
+		uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
+		uint32_t auth_cfg = 0;
+
+		/* Clear auth_ivn, auth_keyn registers  */
+		for (i = 0; i < 16; i++) {
+			QCE_WRITE_REG(0, (pce_dev->iobase +
+				(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+			QCE_WRITE_REG(0, (pce_dev->iobase +
+				(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t))));
+		}
+		/* write auth_bytecnt 0/1/2/3, start with 0 */
+		for (i = 0; i < 4; i++)
+			QCE_WRITE_REG(0, pce_dev->iobase +
+						CRYPTO_AUTH_BYTECNT0_REG +
+						i * sizeof(uint32_t));
+		/* write nonce */
+		_byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
+		for (i = 0; i < noncelen32; i++)
+			QCE_WRITE_REG(nonce32[i], pce_dev->iobase +
+				CRYPTO_AUTH_INFO_NONCE0_REG +
+					(i*sizeof(uint32_t)));
+
+		if (creq->authklen ==  AES128_KEY_SIZE)
+			auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_128;
+		else {
+			if (creq->authklen ==  AES256_KEY_SIZE)
+				auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_256;
+		}
+		if (creq->dir == QCE_ENCRYPT)
+			auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+		else
+			auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
+		auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE);
+
+		if (use_hw_key == true)	{
+			auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH);
+		} else {
+			auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+			/* write auth key */
+			for (i = 0; i < authklen32; i++)
+				QCE_WRITE_REG(enckey32[i], pce_dev->iobase +
+				CRYPTO_AUTH_KEY0_REG + (i*sizeof(uint32_t)));
+		}
+		QCE_WRITE_REG(auth_cfg, pce_dev->iobase +
+						CRYPTO_AUTH_SEG_CFG_REG);
+		if (creq->dir == QCE_ENCRYPT) {
+			QCE_WRITE_REG(totallen_in, pce_dev->iobase +
+						CRYPTO_AUTH_SEG_SIZE_REG);
+		} else {
+			QCE_WRITE_REG((totallen_in - creq->authsize),
+				pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+		}
+		QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
+	} else {
+		if (creq->op != QCE_REQ_AEAD)
+			QCE_WRITE_REG(0, pce_dev->iobase +
+						CRYPTO_AUTH_SEG_CFG_REG);
+	}
+	/*
+	 * Ensure previous instructions (write to all AUTH registers)
+	 * was completed before accessing a register that is not in
+	 * in the same 1K range.
+	 */
+	mb();
+	switch (creq->mode) {
+	case QCE_MODE_ECB:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_256;
+		break;
+	case QCE_MODE_CBC:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
+		break;
+	case QCE_MODE_XTS:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_xts_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_xts_256;
+		break;
+	case QCE_MODE_CCM:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_256;
+		break;
+	case QCE_MODE_CTR:
+	default:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_256;
+		break;
+	}
+
+	switch (creq->alg) {
+	case CIPHER_ALG_DES:
+		if (creq->mode !=  QCE_MODE_ECB) {
+			encr_cfg = pce_dev->reg.encr_cfg_des_cbc;
+			_byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+			QCE_WRITE_REG(enciv32[0], pce_dev->iobase +
+						CRYPTO_CNTR0_IV0_REG);
+			QCE_WRITE_REG(enciv32[1], pce_dev->iobase +
+						CRYPTO_CNTR1_IV1_REG);
+		} else {
+			encr_cfg = pce_dev->reg.encr_cfg_des_ecb;
+		}
+		if (use_hw_key == false) {
+			QCE_WRITE_REG(enckey32[0], pce_dev->iobase +
+							CRYPTO_ENCR_KEY0_REG);
+			QCE_WRITE_REG(enckey32[1], pce_dev->iobase +
+							CRYPTO_ENCR_KEY1_REG);
+		}
+		break;
+	case CIPHER_ALG_3DES:
+		if (creq->mode !=  QCE_MODE_ECB) {
+			_byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+			QCE_WRITE_REG(enciv32[0], pce_dev->iobase +
+						CRYPTO_CNTR0_IV0_REG);
+			QCE_WRITE_REG(enciv32[1], pce_dev->iobase +
+						CRYPTO_CNTR1_IV1_REG);
+			encr_cfg = pce_dev->reg.encr_cfg_3des_cbc;
+		} else {
+			encr_cfg = pce_dev->reg.encr_cfg_3des_ecb;
+		}
+		if (use_hw_key == false) {
+			/* write encr key */
+			for (i = 0; i < 6; i++)
+				QCE_WRITE_REG(enckey32[0], (pce_dev->iobase +
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t))));
+		}
+		break;
+	case CIPHER_ALG_AES:
+	default:
+		if (creq->mode ==  QCE_MODE_XTS) {
+			uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
+					= {0, 0, 0, 0, 0, 0, 0, 0};
+			uint32_t xtsklen =
+					creq->encklen/(2 * sizeof(uint32_t));
+
+			if ((use_hw_key == false) && (use_pipe_key == false)) {
+				_byte_stream_to_net_words(xtskey32,
+					(creq->enckey + creq->encklen/2),
+							creq->encklen/2);
+				/* write xts encr key */
+				for (i = 0; i < xtsklen; i++)
+					QCE_WRITE_REG(xtskey32[i],
+						pce_dev->iobase +
+						CRYPTO_ENCR_XTS_KEY0_REG +
+						(i * sizeof(uint32_t)));
+			}
+			/* write xts du size */
+			switch (creq->flags & QCRYPTO_CTX_XTS_MASK) {
+			case QCRYPTO_CTX_XTS_DU_SIZE_512B:
+				QCE_WRITE_REG(
+					min((uint32_t)QCE_SECTOR_SIZE,
+					creq->cryptlen), pce_dev->iobase +
+					CRYPTO_ENCR_XTS_DU_SIZE_REG);
+				break;
+			case QCRYPTO_CTX_XTS_DU_SIZE_1KB:
+				QCE_WRITE_REG(
+					min((uint32_t)(QCE_SECTOR_SIZE * 2),
+					creq->cryptlen), pce_dev->iobase +
+					CRYPTO_ENCR_XTS_DU_SIZE_REG);
+				break;
+			default:
+				QCE_WRITE_REG(creq->cryptlen,
+					pce_dev->iobase +
+					CRYPTO_ENCR_XTS_DU_SIZE_REG);
+				break;
+			}
+		}
+		if (creq->mode !=  QCE_MODE_ECB) {
+			if (creq->mode ==  QCE_MODE_XTS)
+				_byte_stream_swap_to_net_words(enciv32,
+							creq->iv, ivsize);
+			else
+				_byte_stream_to_net_words(enciv32, creq->iv,
+								ivsize);
+
+			/* write encr cntr iv */
+			for (i = 0; i <= 3; i++)
+				QCE_WRITE_REG(enciv32[i], pce_dev->iobase +
+							CRYPTO_CNTR0_IV0_REG +
+							(i * sizeof(uint32_t)));
+
+			if (creq->mode == QCE_MODE_CCM) {
+				/* write cntr iv for ccm */
+				for (i = 0; i <= 3; i++)
+					QCE_WRITE_REG(enciv32[i],
+						pce_dev->iobase +
+						CRYPTO_ENCR_CCM_INT_CNTR0_REG +
+							(i * sizeof(uint32_t)));
+				/* update cntr_iv[3] by one */
+				QCE_WRITE_REG((enciv32[3] + 1),
+							pce_dev->iobase +
+							CRYPTO_CNTR0_IV0_REG +
+							(3 * sizeof(uint32_t)));
+			}
+		}
+
+		if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
+				encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
+						CRYPTO_ENCR_KEY_SZ);
+		} else {
+			if ((use_hw_key == false) && (use_pipe_key == false)) {
+				for (i = 0; i < enck_size_in_word; i++)
+					QCE_WRITE_REG(enckey32[i],
+						pce_dev->iobase +
+							CRYPTO_ENCR_KEY0_REG +
+							(i * sizeof(uint32_t)));
+			}
+		} /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
+		break;
+	} /* end of switch (creq->mode)  */
+
+	if (use_pipe_key)
+		encr_cfg |= (CRYPTO_USE_PIPE_KEY_ENCR_ENABLED
+					<< CRYPTO_USE_PIPE_KEY_ENCR);
+
+	/* write encr seg cfg */
+	encr_cfg |= ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
+	if (use_hw_key == true)
+		encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
+	else
+		encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
+	/* write encr seg cfg */
+	QCE_WRITE_REG(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+	/* write encr seg size */
+	if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT)) {
+		QCE_WRITE_REG((creq->cryptlen + creq->authsize),
+				pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
+	} else {
+		QCE_WRITE_REG(creq->cryptlen,
+				pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
+	}
+
+	/* write encr seg start */
+	QCE_WRITE_REG((coffset & 0xffff),
+			pce_dev->iobase + CRYPTO_ENCR_SEG_START_REG);
+
+	/* write encr counter mask */
+	QCE_WRITE_REG(0xffffffff,
+			pce_dev->iobase + CRYPTO_CNTR_MASK_REG);
+	QCE_WRITE_REG(0xffffffff,
+			pce_dev->iobase + CRYPTO_CNTR_MASK_REG0);
+	QCE_WRITE_REG(0xffffffff,
+			pce_dev->iobase + CRYPTO_CNTR_MASK_REG1);
+	QCE_WRITE_REG(0xffffffff,
+			pce_dev->iobase + CRYPTO_CNTR_MASK_REG2);
+
+	/* write seg size  */
+	QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/* issue go to crypto   */
+	if (use_hw_key == false) {
+		QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+				(1 << CRYPTO_CLR_CNTXT)),
+				pce_dev->iobase + CRYPTO_GOPROC_REG);
+	} else {
+		QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+				pce_dev->iobase + CRYPTO_GOPROC_QC_KEY_REG);
+	}
+	/*
+	 * Ensure previous instructions (setting the GO register)
+	 * was completed before issuing a DMA transfer request
+	 */
+	mb();
+	return 0;
+};
+
+static int _ce_f9_setup_direct(struct qce_device *pce_dev,
+				 struct qce_f9_req *req)
+{
+	uint32_t ikey32[OTA_KEY_SIZE/sizeof(uint32_t)];
+	uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
+	uint32_t auth_cfg;
+	int i;
+
+	switch (req->algorithm) {
+	case QCE_OTA_ALGO_KASUMI:
+		auth_cfg = pce_dev->reg.auth_cfg_kasumi;
+		break;
+	case QCE_OTA_ALGO_SNOW3G:
+	default:
+		auth_cfg = pce_dev->reg.auth_cfg_snow3g;
+		break;
+	};
+
+	/* clear status */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+
+	/* set big endian configuration */
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/*
+	 * Ensure previous instructions (setting the CONFIG register)
+	 * was completed before issuing starting to set other config register
+	 * This is to ensure the configurations are done in correct endian-ness
+	 * as set in the CONFIG registers
+	 */
+	mb();
+
+	/* write enc_seg_cfg */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+	/* write ecn_seg_size */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
+
+	/* write key in CRYPTO_AUTH_IV0-3_REG */
+	_byte_stream_to_net_words(ikey32, &req->ikey[0], OTA_KEY_SIZE);
+	for (i = 0; i < key_size_in_word; i++)
+		QCE_WRITE_REG(ikey32[i], (pce_dev->iobase +
+			(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+
+	/* write last bits  in CRYPTO_AUTH_IV4_REG  */
+	QCE_WRITE_REG(req->last_bits, (pce_dev->iobase +
+					CRYPTO_AUTH_IV4_REG));
+
+	/* write fresh to CRYPTO_AUTH_BYTECNT0_REG */
+	QCE_WRITE_REG(req->fresh, (pce_dev->iobase +
+					 CRYPTO_AUTH_BYTECNT0_REG));
+
+	/* write count-i  to CRYPTO_AUTH_BYTECNT1_REG */
+	QCE_WRITE_REG(req->count_i, (pce_dev->iobase +
+					 CRYPTO_AUTH_BYTECNT1_REG));
+
+	/* write auth seg cfg */
+	if (req->direction == QCE_OTA_DIR_DOWNLINK)
+		auth_cfg |= BIT(CRYPTO_F9_DIRECTION);
+	QCE_WRITE_REG(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+
+	/* write auth seg size */
+	QCE_WRITE_REG(req->msize, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+
+	/* write auth seg start*/
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
+
+	/* write seg size  */
+	QCE_WRITE_REG(req->msize, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+	/* set little endian configuration before go*/
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/* write go */
+	QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+				(1 << CRYPTO_CLR_CNTXT)),
+				pce_dev->iobase +  CRYPTO_GOPROC_REG);
+	/*
+	 * Ensure previous instructions (setting the GO register)
+	 * was completed before issuing a DMA transfer request
+	 */
+	mb();
+	return 0;
+}
+
+static int _ce_f8_setup_direct(struct qce_device *pce_dev,
+		struct qce_f8_req *req, bool key_stream_mode,
+		uint16_t npkts, uint16_t cipher_offset, uint16_t cipher_size)
+{
+	int i = 0;
+	uint32_t encr_cfg = 0;
+	uint32_t ckey32[OTA_KEY_SIZE/sizeof(uint32_t)];
+	uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
+
+	switch (req->algorithm) {
+	case QCE_OTA_ALGO_KASUMI:
+		encr_cfg = pce_dev->reg.encr_cfg_kasumi;
+		break;
+	case QCE_OTA_ALGO_SNOW3G:
+	default:
+		encr_cfg = pce_dev->reg.encr_cfg_snow3g;
+		break;
+	};
+	/* clear status */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+	/* set big endian configuration */
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/* write auth seg configuration */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+	/* write auth seg size */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+
+	/* write key */
+	_byte_stream_to_net_words(ckey32, &req->ckey[0], OTA_KEY_SIZE);
+
+	for (i = 0; i < key_size_in_word; i++)
+		QCE_WRITE_REG(ckey32[i], (pce_dev->iobase +
+			(CRYPTO_ENCR_KEY0_REG + i*sizeof(uint32_t))));
+	/* write encr seg cfg */
+	if (key_stream_mode)
+		encr_cfg |= BIT(CRYPTO_F8_KEYSTREAM_ENABLE);
+	if (req->direction == QCE_OTA_DIR_DOWNLINK)
+		encr_cfg |= BIT(CRYPTO_F8_DIRECTION);
+	QCE_WRITE_REG(encr_cfg, pce_dev->iobase +
+		CRYPTO_ENCR_SEG_CFG_REG);
+
+	/* write encr seg start */
+	QCE_WRITE_REG((cipher_offset & 0xffff), pce_dev->iobase +
+		CRYPTO_ENCR_SEG_START_REG);
+	/* write encr seg size  */
+	QCE_WRITE_REG(cipher_size, pce_dev->iobase +
+		CRYPTO_ENCR_SEG_SIZE_REG);
+
+	/* write seg size  */
+	QCE_WRITE_REG(req->data_len, pce_dev->iobase +
+		CRYPTO_SEG_SIZE_REG);
+
+	/* write cntr0_iv0 for countC */
+	QCE_WRITE_REG(req->count_c, pce_dev->iobase +
+		CRYPTO_CNTR0_IV0_REG);
+	/* write cntr1_iv1 for nPkts, and bearer */
+	if (npkts == 1)
+		npkts = 0;
+	QCE_WRITE_REG(req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
+				npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT,
+			pce_dev->iobase + CRYPTO_CNTR1_IV1_REG);
+
+	/* set little endian configuration before go*/
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/* write go */
+	QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+				(1 << CRYPTO_CLR_CNTXT)),
+				pce_dev->iobase +  CRYPTO_GOPROC_REG);
+	/*
+	 * Ensure previous instructions (setting the GO register)
+	 * was completed before issuing a DMA transfer request
+	 */
+	mb();
+	return 0;
+}
+
+
+static int _qce_unlock_other_pipes(struct qce_device *pce_dev, int req_info)
+{
+	int rc = 0;
+	struct ce_sps_data *pce_sps_data = &pce_dev->ce_request_info
+						[req_info].ce_sps;
+
+	if (pce_dev->no_get_around || pce_dev->support_cmd_dscr == false)
+		return rc;
+
+	rc = sps_transfer_one(pce_dev->ce_bam_info.consumer.pipe,
+		GET_PHYS_ADDR(pce_sps_data->
+				cmdlistptr.unlock_all_pipes.cmdlist),
+		0, NULL, (SPS_IOVEC_FLAG_CMD | SPS_IOVEC_FLAG_UNLOCK));
+	if (rc) {
+		pr_err("sps_xfr_one() fail rc=%d", rc);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info,
+		bool is_complete);
+
+static int _aead_complete(struct qce_device *pce_dev, int req_info)
+{
+	struct aead_request *areq;
+	unsigned char mac[SHA256_DIGEST_SIZE];
+	uint32_t ccm_fail_status = 0;
+	uint32_t result_dump_status;
+	int32_t result_status = 0;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+	qce_comp_func_ptr_t qce_callback;
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	qce_callback = preq_info->qce_cb;
+	areq = (struct aead_request *) preq_info->areq;
+	if (areq->src != areq->dst) {
+		qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
+					DMA_FROM_DEVICE);
+	}
+	qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+			(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+							DMA_TO_DEVICE);
+
+	if (preq_info->asg)
+		qce_dma_unmap_sg(pce_dev->pdev, preq_info->asg,
+			preq_info->assoc_nents, DMA_TO_DEVICE);
+	/* check MAC */
+	memcpy(mac, (char *)(&pce_sps_data->result->auth_iv[0]),
+						SHA256_DIGEST_SIZE);
+
+	/* read status before unlock */
+	if (preq_info->dir == QCE_DECRYPT) {
+		if (pce_dev->no_get_around)
+			if (pce_dev->no_ccm_mac_status_get_around)
+				ccm_fail_status = be32_to_cpu(pce_sps_data->
+							result->status);
+			else
+				ccm_fail_status = be32_to_cpu(pce_sps_data->
+							result_null->status);
+		else
+			ccm_fail_status = readl_relaxed(pce_dev->iobase +
+					CRYPTO_STATUS_REG);
+	}
+	if (_qce_unlock_other_pipes(pce_dev, req_info)) {
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, mac, NULL, -ENXIO);
+		return -ENXIO;
+	}
+	result_dump_status = be32_to_cpu(pce_sps_data->result->status);
+	pce_sps_data->result->status = 0;
+
+	if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+			| (1 <<  CRYPTO_HSD_ERR))) {
+		pr_err("aead operation error. Status %x\n", result_dump_status);
+		result_status = -ENXIO;
+	} else if (pce_sps_data->consumer_status |
+			pce_sps_data->producer_status)  {
+		pr_err("aead sps operation error. sps status %x %x\n",
+				pce_sps_data->consumer_status,
+				pce_sps_data->producer_status);
+		result_status = -ENXIO;
+	}
+
+	if (preq_info->mode == QCE_MODE_CCM) {
+		/*
+		 * Not from result dump, instead, use the status we just
+		 * read of device for MAC_FAILED.
+		 */
+		if (result_status == 0 && (preq_info->dir == QCE_DECRYPT) &&
+				(ccm_fail_status & (1 << CRYPTO_MAC_FAILED)))
+			result_status = -EBADMSG;
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, mac, NULL, result_status);
+
+	} else {
+		uint32_t ivsize = 0;
+		struct crypto_aead *aead;
+		unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
+		aead = crypto_aead_reqtfm(areq);
+		ivsize = crypto_aead_ivsize(aead);
+		memcpy(iv, (char *)(pce_sps_data->result->encr_cntr_iv),
+			sizeof(iv));
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, mac, iv, result_status);
+
+	}
+	return 0;
+};
+
+static int _sha_complete(struct qce_device *pce_dev, int req_info)
+{
+	struct ahash_request *areq;
+	unsigned char digest[SHA256_DIGEST_SIZE];
+	uint32_t bytecount32[2];
+	int32_t result_status = 0;
+	uint32_t result_dump_status;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+	qce_comp_func_ptr_t qce_callback;
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	qce_callback = preq_info->qce_cb;
+	areq = (struct ahash_request *) preq_info->areq;
+	if (!areq) {
+		pr_err("sha operation error. areq is NULL\n");
+		return -ENXIO;
+	}
+	qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+				DMA_TO_DEVICE);
+	memcpy(digest, (char *)(&pce_sps_data->result->auth_iv[0]),
+						SHA256_DIGEST_SIZE);
+	_byte_stream_to_net_words(bytecount32,
+		(unsigned char *)pce_sps_data->result->auth_byte_count,
+					2 * CRYPTO_REG_SIZE);
+
+	if (_qce_unlock_other_pipes(pce_dev, req_info)) {
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, digest, (char *)bytecount32,
+				-ENXIO);
+		return -ENXIO;
+	}
+
+	result_dump_status = be32_to_cpu(pce_sps_data->result->status);
+	pce_sps_data->result->status = 0;
+	if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+			| (1 <<  CRYPTO_HSD_ERR))) {
+
+		pr_err("sha operation error. Status %x\n", result_dump_status);
+		result_status = -ENXIO;
+	} else if (pce_sps_data->consumer_status) {
+		pr_err("sha sps operation error. sps status %x\n",
+			pce_sps_data->consumer_status);
+		result_status = -ENXIO;
+	}
+	qce_free_req_info(pce_dev, req_info, true);
+	qce_callback(areq, digest, (char *)bytecount32, result_status);
+	return 0;
+}
+
+static int _f9_complete(struct qce_device *pce_dev, int req_info)
+{
+	uint32_t mac_i;
+	int32_t result_status = 0;
+	uint32_t result_dump_status;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+	qce_comp_func_ptr_t qce_callback;
+	void *areq;
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	qce_callback = preq_info->qce_cb;
+	areq = preq_info->areq;
+	dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
+				preq_info->ota_size, DMA_TO_DEVICE);
+	_byte_stream_to_net_words(&mac_i,
+		(char *)(&pce_sps_data->result->auth_iv[0]),
+		CRYPTO_REG_SIZE);
+
+	if (_qce_unlock_other_pipes(pce_dev, req_info)) {
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, NULL, NULL, -ENXIO);
+		return -ENXIO;
+	}
+
+	result_dump_status = be32_to_cpu(pce_sps_data->result->status);
+	pce_sps_data->result->status = 0;
+	if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+				| (1 <<  CRYPTO_HSD_ERR))) {
+		pr_err("f9 operation error. Status %x\n", result_dump_status);
+		result_status = -ENXIO;
+	} else if (pce_sps_data->consumer_status |
+				pce_sps_data->producer_status)  {
+		pr_err("f9 sps operation error. sps status %x %x\n",
+				pce_sps_data->consumer_status,
+				pce_sps_data->producer_status);
+		result_status = -ENXIO;
+	}
+	qce_free_req_info(pce_dev, req_info, true);
+	qce_callback(areq, (char *)&mac_i, NULL, result_status);
+
+	return 0;
+}
+
+static int _ablk_cipher_complete(struct qce_device *pce_dev, int req_info)
+{
+	struct ablkcipher_request *areq;
+	unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
+	int32_t result_status = 0;
+	uint32_t result_dump_status;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+	qce_comp_func_ptr_t qce_callback;
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	qce_callback = preq_info->qce_cb;
+	areq = (struct ablkcipher_request *) preq_info->areq;
+	if (areq->src != areq->dst) {
+		qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
+			preq_info->dst_nents, DMA_FROM_DEVICE);
+	}
+	qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+		(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+						DMA_TO_DEVICE);
+
+	if (_qce_unlock_other_pipes(pce_dev, req_info)) {
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, NULL, NULL, -ENXIO);
+		return -ENXIO;
+	}
+	result_dump_status = be32_to_cpu(pce_sps_data->result->status);
+	pce_sps_data->result->status = 0;
+
+	if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+			| (1 <<  CRYPTO_HSD_ERR))) {
+		pr_err("ablk_cipher operation error. Status %x\n",
+				result_dump_status);
+		result_status = -ENXIO;
+	} else if (pce_sps_data->consumer_status |
+				pce_sps_data->producer_status)  {
+		pr_err("ablk_cipher sps operation error. sps status %x %x\n",
+				pce_sps_data->consumer_status,
+				pce_sps_data->producer_status);
+		result_status = -ENXIO;
+	}
+
+	if (preq_info->mode == QCE_MODE_ECB) {
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, NULL, NULL, pce_sps_data->consumer_status |
+								result_status);
+	} else {
+		if (pce_dev->ce_bam_info.minor_version == 0) {
+			if (preq_info->mode == QCE_MODE_CBC) {
+				if  (preq_info->dir == QCE_DECRYPT)
+					memcpy(iv, (char *)preq_info->dec_iv,
+								sizeof(iv));
+				else
+					memcpy(iv, (unsigned char *)
+						(sg_virt(areq->src) +
+						areq->src->length - 16),
+						sizeof(iv));
+			}
+			if ((preq_info->mode == QCE_MODE_CTR) ||
+				(preq_info->mode == QCE_MODE_XTS)) {
+				uint32_t num_blk = 0;
+				uint32_t cntr_iv3 = 0;
+				unsigned long long cntr_iv64 = 0;
+				unsigned char *b = (unsigned char *)(&cntr_iv3);
+
+				memcpy(iv, areq->info, sizeof(iv));
+				if (preq_info->mode != QCE_MODE_XTS)
+					num_blk = areq->nbytes/16;
+				else
+					num_blk = 1;
+				cntr_iv3 =  ((*(iv + 12) << 24) & 0xff000000) |
+					(((*(iv + 13)) << 16) & 0xff0000) |
+					(((*(iv + 14)) << 8) & 0xff00) |
+					(*(iv + 15) & 0xff);
+				cntr_iv64 =
+					(((unsigned long long)cntr_iv3 &
+					(unsigned long long)0xFFFFFFFFULL) +
+					(unsigned long long)num_blk) %
+					(unsigned long long)(0x100000000ULL);
+
+				cntr_iv3 = (u32)(cntr_iv64 & 0xFFFFFFFF);
+				*(iv + 15) = (char)(*b);
+				*(iv + 14) = (char)(*(b + 1));
+				*(iv + 13) = (char)(*(b + 2));
+				*(iv + 12) = (char)(*(b + 3));
+			}
+		} else {
+			memcpy(iv,
+				(char *)(pce_sps_data->result->encr_cntr_iv),
+				sizeof(iv));
+		}
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, NULL, iv, result_status);
+	}
+	return 0;
+}
+
+static int _f8_complete(struct qce_device *pce_dev, int req_info)
+{
+	int32_t result_status = 0;
+	uint32_t result_dump_status;
+	uint32_t result_dump_status2;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+	qce_comp_func_ptr_t qce_callback;
+	void *areq;
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	qce_callback = preq_info->qce_cb;
+	areq = preq_info->areq;
+	if (preq_info->phy_ota_dst)
+		dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst,
+				preq_info->ota_size, DMA_FROM_DEVICE);
+	if (preq_info->phy_ota_src)
+		dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
+				preq_info->ota_size, (preq_info->phy_ota_dst) ?
+				DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
+
+	if (_qce_unlock_other_pipes(pce_dev, req_info)) {
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, NULL, NULL, -ENXIO);
+		return -ENXIO;
+	}
+	result_dump_status = be32_to_cpu(pce_sps_data->result->status);
+	result_dump_status2 = be32_to_cpu(pce_sps_data->result->status2);
+
+	if ((result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+			| (1 <<  CRYPTO_HSD_ERR)))) {
+		pr_err(
+			"f8 oper error. Dump Sta %x Sta2 %x req %d\n",
+			result_dump_status, result_dump_status2, req_info);
+		result_status = -ENXIO;
+	} else if (pce_sps_data->consumer_status |
+				pce_sps_data->producer_status)  {
+		pr_err("f8 sps operation error. sps status %x %x\n",
+				pce_sps_data->consumer_status,
+				pce_sps_data->producer_status);
+		result_status = -ENXIO;
+	}
+	pce_sps_data->result->status = 0;
+	pce_sps_data->result->status2 = 0;
+	qce_free_req_info(pce_dev, req_info, true);
+	qce_callback(areq, NULL, NULL, result_status);
+	return 0;
+}
+
+static void _qce_sps_iovec_count_init(struct qce_device *pce_dev, int req_info)
+{
+	struct ce_sps_data *pce_sps_data = &pce_dev->ce_request_info[req_info]
+							.ce_sps;
+	pce_sps_data->in_transfer.iovec_count = 0;
+	pce_sps_data->out_transfer.iovec_count = 0;
+}
+
+static void _qce_set_flag(struct sps_transfer *sps_bam_pipe, uint32_t flag)
+{
+	struct sps_iovec *iovec;
+
+	if (sps_bam_pipe->iovec_count == 0)
+		return;
+	iovec  = sps_bam_pipe->iovec + (sps_bam_pipe->iovec_count - 1);
+	iovec->flags |= flag;
+}
+
+static int _qce_sps_add_data(dma_addr_t paddr, uint32_t len,
+		struct sps_transfer *sps_bam_pipe)
+{
+	struct sps_iovec *iovec = sps_bam_pipe->iovec +
+					sps_bam_pipe->iovec_count;
+	uint32_t data_cnt;
+
+	while (len > 0) {
+		if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
+			pr_err("Num of descrptor %d exceed max (%d)",
+				sps_bam_pipe->iovec_count,
+				(uint32_t)QCE_MAX_NUM_DSCR);
+			return -ENOMEM;
+		}
+		if (len > SPS_MAX_PKT_SIZE)
+			data_cnt = SPS_MAX_PKT_SIZE;
+		else
+			data_cnt = len;
+		iovec->size = data_cnt;
+		iovec->addr = SPS_GET_LOWER_ADDR(paddr);
+		iovec->flags = SPS_GET_UPPER_ADDR(paddr);
+		sps_bam_pipe->iovec_count++;
+		iovec++;
+		paddr += data_cnt;
+		len -= data_cnt;
+	}
+	return 0;
+}
+
+static int _qce_sps_add_sg_data(struct qce_device *pce_dev,
+		struct scatterlist *sg_src, uint32_t nbytes,
+		struct sps_transfer *sps_bam_pipe)
+{
+	uint32_t data_cnt, len;
+	dma_addr_t addr;
+	struct sps_iovec *iovec = sps_bam_pipe->iovec +
+						sps_bam_pipe->iovec_count;
+
+	if (!sg_src)
+		return -ENOENT;
+
+	while (nbytes > 0) {
+		len = min(nbytes, sg_dma_len(sg_src));
+		nbytes -= len;
+		addr = sg_dma_address(sg_src);
+		if (pce_dev->ce_bam_info.minor_version == 0)
+			len = ALIGN(len, pce_dev->ce_bam_info.ce_burst_size);
+		while (len > 0) {
+			if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
+				pr_err("Num of descrptor %d exceed max (%d)",
+						sps_bam_pipe->iovec_count,
+						(uint32_t)QCE_MAX_NUM_DSCR);
+				return -ENOMEM;
+			}
+			if (len > SPS_MAX_PKT_SIZE) {
+				data_cnt = SPS_MAX_PKT_SIZE;
+				iovec->size = data_cnt;
+				iovec->addr = SPS_GET_LOWER_ADDR(addr);
+				iovec->flags = SPS_GET_UPPER_ADDR(addr);
+			} else {
+				data_cnt = len;
+				iovec->size = data_cnt;
+				iovec->addr = SPS_GET_LOWER_ADDR(addr);
+				iovec->flags = SPS_GET_UPPER_ADDR(addr);
+			}
+			iovec++;
+			sps_bam_pipe->iovec_count++;
+			addr += data_cnt;
+			len -= data_cnt;
+		}
+		sg_src = sg_next(sg_src);
+	}
+	return 0;
+}
+
+static int _qce_sps_add_sg_data_off(struct qce_device *pce_dev,
+		struct scatterlist *sg_src, uint32_t nbytes, uint32_t off,
+		struct sps_transfer *sps_bam_pipe)
+{
+	uint32_t data_cnt, len;
+	dma_addr_t addr;
+	struct sps_iovec *iovec = sps_bam_pipe->iovec +
+						sps_bam_pipe->iovec_count;
+	unsigned int res_within_sg;
+
+	if (!sg_src)
+		return -ENOENT;
+	res_within_sg = sg_dma_len(sg_src);
+
+	while (off > 0) {
+		if (!sg_src) {
+			pr_err("broken sg list off %d nbytes %d\n",
+				off, nbytes);
+			return -ENOENT;
+		}
+		len = sg_dma_len(sg_src);
+		if (off < len) {
+			res_within_sg = len - off;
+			break;
+		}
+		off -= len;
+		sg_src = sg_next(sg_src);
+		if (sg_src)
+			res_within_sg = sg_dma_len(sg_src);
+	}
+	while (nbytes > 0 && sg_src) {
+		len = min(nbytes, res_within_sg);
+		nbytes -= len;
+		addr = sg_dma_address(sg_src) + off;
+		if (pce_dev->ce_bam_info.minor_version == 0)
+			len = ALIGN(len, pce_dev->ce_bam_info.ce_burst_size);
+		while (len > 0) {
+			if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
+				pr_err("Num of descrptor %d exceed max (%d)",
+						sps_bam_pipe->iovec_count,
+						(uint32_t)QCE_MAX_NUM_DSCR);
+				return -ENOMEM;
+			}
+			if (len > SPS_MAX_PKT_SIZE) {
+				data_cnt = SPS_MAX_PKT_SIZE;
+				iovec->size = data_cnt;
+				iovec->addr = SPS_GET_LOWER_ADDR(addr);
+				iovec->flags = SPS_GET_UPPER_ADDR(addr);
+			} else {
+				data_cnt = len;
+				iovec->size = data_cnt;
+				iovec->addr = SPS_GET_LOWER_ADDR(addr);
+				iovec->flags = SPS_GET_UPPER_ADDR(addr);
+			}
+			iovec++;
+			sps_bam_pipe->iovec_count++;
+			addr += data_cnt;
+			len -= data_cnt;
+		}
+		if (nbytes) {
+			sg_src = sg_next(sg_src);
+			if (!sg_src) {
+				pr_err("more data bytes %d\n", nbytes);
+				return -ENOMEM;
+			}
+			res_within_sg = sg_dma_len(sg_src);
+			off = 0;
+		}
+	}
+	return 0;
+}
+
+static int _qce_sps_add_cmd(struct qce_device *pce_dev, uint32_t flag,
+				struct qce_cmdlist_info *cmdptr,
+				struct sps_transfer *sps_bam_pipe)
+{
+	dma_addr_t  paddr = GET_PHYS_ADDR(cmdptr->cmdlist);
+	struct sps_iovec *iovec = sps_bam_pipe->iovec +
+					sps_bam_pipe->iovec_count;
+	iovec->size = cmdptr->size;
+	iovec->addr = SPS_GET_LOWER_ADDR(paddr);
+	iovec->flags = SPS_GET_UPPER_ADDR(paddr) | SPS_IOVEC_FLAG_CMD | flag;
+	sps_bam_pipe->iovec_count++;
+	if (sps_bam_pipe->iovec_count >= QCE_MAX_NUM_DSCR) {
+		pr_err("Num of descrptor %d exceed max (%d)",
+			sps_bam_pipe->iovec_count, (uint32_t)QCE_MAX_NUM_DSCR);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static int _qce_sps_transfer(struct qce_device *pce_dev, int req_info)
+{
+	int rc = 0;
+	struct ce_sps_data *pce_sps_data;
+
+	pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
+	pce_sps_data->out_transfer.user =
+		(void *)((uintptr_t)(CRYPTO_REQ_USER_PAT |
+					(unsigned int) req_info));
+	pce_sps_data->in_transfer.user =
+		(void *)((uintptr_t)(CRYPTO_REQ_USER_PAT |
+					(unsigned int) req_info));
+	_qce_dump_descr_fifos_dbg(pce_dev, req_info);
+
+	if (pce_sps_data->in_transfer.iovec_count) {
+		rc = sps_transfer(pce_dev->ce_bam_info.consumer.pipe,
+					  &pce_sps_data->in_transfer);
+		if (rc) {
+			pr_err("sps_xfr() fail (consumer pipe=0x%lx) rc = %d\n",
+				(uintptr_t)pce_dev->ce_bam_info.consumer.pipe,
+				rc);
+			goto ret;
+		}
+	}
+	rc = sps_transfer(pce_dev->ce_bam_info.producer.pipe,
+					  &pce_sps_data->out_transfer);
+	if (rc)
+		pr_err("sps_xfr() fail (producer pipe=0x%lx) rc = %d\n",
+			(uintptr_t)pce_dev->ce_bam_info.producer.pipe, rc);
+ret:
+	if (rc)
+		_qce_dump_descr_fifos(pce_dev, req_info);
+	return rc;
+}
+
+/**
+ * Allocate and Connect a CE peripheral's SPS endpoint
+ *
+ * This function allocates endpoint context and
+ * connect it with memory endpoint by calling
+ * appropriate SPS driver APIs.
+ *
+ * Also registers a SPS callback function with
+ * SPS driver
+ *
+ * This function should only be called once typically
+ * during driver probe.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ * @ep   - Pointer to sps endpoint data structure
+ * @is_produce - 1 means Producer endpoint
+ *		 0 means Consumer endpoint
+ *
+ * @return - 0 if successful else negative value.
+ *
+ */
+static int qce_sps_init_ep_conn(struct qce_device *pce_dev,
+				struct qce_sps_ep_conn_data *ep,
+				bool is_producer)
+{
+	int rc = 0;
+	struct sps_pipe *sps_pipe_info;
+	struct sps_connect *sps_connect_info = &ep->connect;
+	struct sps_register_event *sps_event = &ep->event;
+
+	/* Allocate endpoint context */
+	sps_pipe_info = sps_alloc_endpoint();
+	if (!sps_pipe_info) {
+		pr_err("sps_alloc_endpoint() failed!!! is_producer=%d",
+			   is_producer);
+		rc = -ENOMEM;
+		goto out;
+	}
+	/* Now save the sps pipe handle */
+	ep->pipe = sps_pipe_info;
+
+	/* Get default connection configuration for an endpoint */
+	rc = sps_get_config(sps_pipe_info, sps_connect_info);
+	if (rc) {
+		pr_err("sps_get_config() fail pipe_handle=0x%lx, rc = %d\n",
+				(uintptr_t)sps_pipe_info, rc);
+		goto get_config_err;
+	}
+
+	/* Modify the default connection configuration */
+	if (is_producer) {
+		/*
+		* For CE producer transfer, source should be
+		* CE peripheral where as destination should
+		* be system memory.
+		*/
+		sps_connect_info->source = pce_dev->ce_bam_info.bam_handle;
+		sps_connect_info->destination = SPS_DEV_HANDLE_MEM;
+		/* Producer pipe will handle this connection */
+		sps_connect_info->mode = SPS_MODE_SRC;
+		sps_connect_info->options =
+			SPS_O_AUTO_ENABLE | SPS_O_DESC_DONE;
+	} else {
+		/* For CE consumer transfer, source should be
+		 * system memory where as destination should
+		 * CE peripheral
+		 */
+		sps_connect_info->source = SPS_DEV_HANDLE_MEM;
+		sps_connect_info->destination = pce_dev->ce_bam_info.bam_handle;
+		sps_connect_info->mode = SPS_MODE_DEST;
+		sps_connect_info->options =
+			SPS_O_AUTO_ENABLE;
+	}
+
+	/* Producer pipe index */
+	sps_connect_info->src_pipe_index =
+				pce_dev->ce_bam_info.src_pipe_index;
+	/* Consumer pipe index */
+	sps_connect_info->dest_pipe_index =
+				pce_dev->ce_bam_info.dest_pipe_index;
+	/* Set pipe group */
+	sps_connect_info->lock_group = pce_dev->ce_bam_info.pipe_pair_index;
+	sps_connect_info->event_thresh = 0x10;
+	/*
+	 * Max. no of scatter/gather buffers that can
+	 * be passed by block layer = 32 (NR_SG).
+	 * Each BAM descritor needs 64 bits (8 bytes).
+	 * One BAM descriptor is required per buffer transfer.
+	 * So we would require total 256 (32 * 8) bytes of descriptor FIFO.
+	 * But due to HW limitation we need to allocate atleast one extra
+	 * descriptor memory (256 bytes + 8 bytes). But in order to be
+	 * in power of 2, we are allocating 512 bytes of memory.
+	 */
+	sps_connect_info->desc.size = QCE_MAX_NUM_DSCR * MAX_QCE_ALLOC_BAM_REQ *
+					sizeof(struct sps_iovec);
+	if (sps_connect_info->desc.size > MAX_SPS_DESC_FIFO_SIZE)
+		sps_connect_info->desc.size = MAX_SPS_DESC_FIFO_SIZE;
+	sps_connect_info->desc.base = dma_alloc_coherent(pce_dev->pdev,
+					sps_connect_info->desc.size,
+					&sps_connect_info->desc.phys_base,
+					GFP_KERNEL);
+	if (sps_connect_info->desc.base == NULL) {
+		rc = -ENOMEM;
+		pr_err("Can not allocate coherent memory for sps data\n");
+		goto get_config_err;
+	}
+
+	memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
+
+	/* Establish connection between peripheral and memory endpoint */
+	rc = sps_connect(sps_pipe_info, sps_connect_info);
+	if (rc) {
+		pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n",
+				(uintptr_t)sps_pipe_info, rc);
+		goto sps_connect_err;
+	}
+
+	sps_event->mode = SPS_TRIGGER_CALLBACK;
+	sps_event->xfer_done = NULL;
+	sps_event->user = (void *)pce_dev;
+	if (is_producer) {
+		sps_event->options = SPS_O_EOT | SPS_O_DESC_DONE;
+		sps_event->callback = _sps_producer_callback;
+		rc = sps_register_event(ep->pipe, sps_event);
+		if (rc) {
+			pr_err("Producer callback registration failed rc=%d\n",
+									rc);
+			goto sps_connect_err;
+		}
+	} else {
+		sps_event->options = SPS_O_EOT;
+		sps_event->callback = NULL;
+	}
+
+	pr_debug("success, %s : pipe_handle=0x%lx, desc fifo base (phy) = 0x%pK\n",
+		is_producer ? "PRODUCER(RX/OUT)" : "CONSUMER(TX/IN)",
+		(uintptr_t)sps_pipe_info, &sps_connect_info->desc.phys_base);
+	goto out;
+
+sps_connect_err:
+	dma_free_coherent(pce_dev->pdev,
+			sps_connect_info->desc.size,
+			sps_connect_info->desc.base,
+			sps_connect_info->desc.phys_base);
+get_config_err:
+	sps_free_endpoint(sps_pipe_info);
+out:
+	return rc;
+}
+
+/**
+ * Disconnect and Deallocate a CE peripheral's SPS endpoint
+ *
+ * This function disconnect endpoint and deallocates
+ * endpoint context.
+ *
+ * This function should only be called once typically
+ * during driver remove.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ * @ep   - Pointer to sps endpoint data structure
+ *
+ */
+static void qce_sps_exit_ep_conn(struct qce_device *pce_dev,
+				struct qce_sps_ep_conn_data *ep)
+{
+	struct sps_pipe *sps_pipe_info = ep->pipe;
+	struct sps_connect *sps_connect_info = &ep->connect;
+
+	sps_disconnect(sps_pipe_info);
+	dma_free_coherent(pce_dev->pdev,
+			sps_connect_info->desc.size,
+			sps_connect_info->desc.base,
+			sps_connect_info->desc.phys_base);
+	sps_free_endpoint(sps_pipe_info);
+}
+
+static void qce_sps_release_bam(struct qce_device *pce_dev)
+{
+	struct bam_registration_info *pbam;
+
+	mutex_lock(&bam_register_lock);
+	pbam = pce_dev->pbam;
+	if (pbam == NULL)
+		goto ret;
+
+	pbam->cnt--;
+	if (pbam->cnt > 0)
+		goto ret;
+
+	if (pce_dev->ce_bam_info.bam_handle) {
+		sps_deregister_bam_device(pce_dev->ce_bam_info.bam_handle);
+
+		pr_debug("deregister bam handle 0x%lx\n",
+					pce_dev->ce_bam_info.bam_handle);
+		pce_dev->ce_bam_info.bam_handle = 0;
+	}
+	iounmap(pbam->bam_iobase);
+	pr_debug("delete bam 0x%x\n", pbam->bam_mem);
+	list_del(&pbam->qlist);
+	kfree(pbam);
+
+ret:
+	pce_dev->pbam = NULL;
+	mutex_unlock(&bam_register_lock);
+}
+
+static int qce_sps_get_bam(struct qce_device *pce_dev)
+{
+	int rc = 0;
+	struct sps_bam_props bam = {0};
+	struct bam_registration_info *pbam = NULL;
+	struct bam_registration_info *p;
+	uint32_t bam_cfg = 0;
+
+
+	mutex_lock(&bam_register_lock);
+
+	list_for_each_entry(p, &qce50_bam_list, qlist) {
+		if (p->bam_mem == pce_dev->bam_mem) {
+			pbam = p;  /* found */
+			break;
+		}
+	}
+
+	if (pbam) {
+		pr_debug("found bam 0x%x\n", pbam->bam_mem);
+		pbam->cnt++;
+		pce_dev->ce_bam_info.bam_handle =  pbam->handle;
+		pce_dev->ce_bam_info.bam_mem = pbam->bam_mem;
+		pce_dev->ce_bam_info.bam_iobase = pbam->bam_iobase;
+		pce_dev->pbam = pbam;
+		pce_dev->support_cmd_dscr = pbam->support_cmd_dscr;
+		goto ret;
+	}
+
+	pbam = kzalloc(sizeof(struct  bam_registration_info), GFP_KERNEL);
+	if (!pbam) {
+		pr_err("qce50 Memory allocation of bam FAIL, error %ld\n",
+						PTR_ERR(pbam));
+
+		rc = -ENOMEM;
+		goto ret;
+	}
+	pbam->cnt = 1;
+	pbam->bam_mem = pce_dev->bam_mem;
+	pbam->bam_iobase = ioremap_nocache(pce_dev->bam_mem,
+					pce_dev->bam_mem_size);
+	if (!pbam->bam_iobase) {
+		kfree(pbam);
+		rc = -ENOMEM;
+		pr_err("Can not map BAM io memory\n");
+		goto ret;
+	}
+	pce_dev->ce_bam_info.bam_mem = pbam->bam_mem;
+	pce_dev->ce_bam_info.bam_iobase = pbam->bam_iobase;
+	pbam->handle = 0;
+	pr_debug("allocate bam 0x%x\n", pbam->bam_mem);
+	bam_cfg = readl_relaxed(pce_dev->ce_bam_info.bam_iobase +
+					CRYPTO_BAM_CNFG_BITS_REG);
+	pbam->support_cmd_dscr =  (bam_cfg & CRYPTO_BAM_CD_ENABLE_MASK) ?
+					true : false;
+	if (pbam->support_cmd_dscr == false) {
+		pr_info("qce50 don't support command descriptor. bam_cfg%x\n",
+							bam_cfg);
+		pce_dev->no_get_around = false;
+	}
+	pce_dev->support_cmd_dscr = pbam->support_cmd_dscr;
+
+	bam.phys_addr = pce_dev->ce_bam_info.bam_mem;
+	bam.virt_addr = pce_dev->ce_bam_info.bam_iobase;
+
+	/*
+	 * This event thresold value is only significant for BAM-to-BAM
+	 * transfer. It's ignored for BAM-to-System mode transfer.
+	 */
+	bam.event_threshold = 0x10;	/* Pipe event threshold */
+	/*
+	 * This threshold controls when the BAM publish
+	 * the descriptor size on the sideband interface.
+	 * SPS HW will only be used when
+	 * data transfer size >  64 bytes.
+	 */
+	bam.summing_threshold = 64;
+	/* SPS driver wll handle the crypto BAM IRQ */
+	bam.irq = (u32)pce_dev->ce_bam_info.bam_irq;
+	/*
+	 * Set flag to indicate BAM global device control is managed
+	 * remotely.
+	 */
+	if ((pce_dev->support_cmd_dscr == false) || (pce_dev->is_shared))
+		bam.manage = SPS_BAM_MGR_DEVICE_REMOTE;
+	else
+		bam.manage = SPS_BAM_MGR_LOCAL;
+
+	bam.ee = pce_dev->ce_bam_info.bam_ee;
+	bam.ipc_loglevel = QCE_BAM_DEFAULT_IPC_LOGLVL;
+	bam.options |= SPS_BAM_CACHED_WP;
+	pr_debug("bam physical base=0x%lx\n", (uintptr_t)bam.phys_addr);
+	pr_debug("bam virtual base=0x%pK\n", bam.virt_addr);
+
+	/* Register CE Peripheral BAM device to SPS driver */
+	rc = sps_register_bam_device(&bam, &pbam->handle);
+	if (rc) {
+		pr_err("sps_register_bam_device() failed! err=%d", rc);
+		rc = -EIO;
+		iounmap(pbam->bam_iobase);
+		kfree(pbam);
+		goto ret;
+	}
+
+	pce_dev->pbam = pbam;
+	list_add_tail(&pbam->qlist, &qce50_bam_list);
+	pce_dev->ce_bam_info.bam_handle =  pbam->handle;
+
+ret:
+	mutex_unlock(&bam_register_lock);
+
+	return rc;
+}
+/**
+ * Initialize SPS HW connected with CE core
+ *
+ * This function register BAM HW resources with
+ * SPS driver and then initialize 2 SPS endpoints
+ *
+ * This function should only be called once typically
+ * during driver probe.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ *
+ * @return - 0 if successful else negative value.
+ *
+ */
+static int qce_sps_init(struct qce_device *pce_dev)
+{
+	int rc = 0;
+
+	rc = qce_sps_get_bam(pce_dev);
+	if (rc)
+		return rc;
+	pr_debug("BAM device registered. bam_handle=0x%lx\n",
+		pce_dev->ce_bam_info.bam_handle);
+
+	rc = qce_sps_init_ep_conn(pce_dev,
+			&pce_dev->ce_bam_info.producer, true);
+	if (rc)
+		goto sps_connect_producer_err;
+	rc = qce_sps_init_ep_conn(pce_dev,
+			&pce_dev->ce_bam_info.consumer, false);
+	if (rc)
+		goto sps_connect_consumer_err;
+
+	pr_info(" Qualcomm MSM CE-BAM at 0x%016llx irq %d\n",
+		(unsigned long long)pce_dev->ce_bam_info.bam_mem,
+		(unsigned int)pce_dev->ce_bam_info.bam_irq);
+	return rc;
+
+sps_connect_consumer_err:
+	qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.producer);
+sps_connect_producer_err:
+	qce_sps_release_bam(pce_dev);
+	return rc;
+}
+
+static inline int qce_alloc_req_info(struct qce_device *pce_dev)
+{
+	int i;
+	int request_index = pce_dev->ce_request_index;
+
+	for (i = 0; i < MAX_QCE_BAM_REQ; i++) {
+		request_index++;
+		if (request_index >= MAX_QCE_BAM_REQ)
+			request_index = 0;
+		if (atomic_xchg(&pce_dev->ce_request_info[request_index].
+						in_use, true) == false) {
+			pce_dev->ce_request_index = request_index;
+			return request_index;
+		}
+	}
+	pr_warn("pcedev %d no reqs available no_of_queued_req %d\n",
+			pce_dev->dev_no, atomic_read(
+					&pce_dev->no_of_queued_req));
+	return -EBUSY;
+}
+
+static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info,
+		bool is_complete)
+{
+	pce_dev->ce_request_info[req_info].xfer_type = QCE_XFER_TYPE_LAST;
+	if (atomic_xchg(&pce_dev->ce_request_info[req_info].in_use,
+						false) == true) {
+		if (req_info < MAX_QCE_BAM_REQ && is_complete)
+			atomic_dec(&pce_dev->no_of_queued_req);
+	} else
+		pr_warn("request info %d free already\n", req_info);
+}
+
+static void print_notify_debug(struct sps_event_notify *notify)
+{
+	phys_addr_t addr =
+		DESC_FULL_ADDR((phys_addr_t) notify->data.transfer.iovec.flags,
+				  notify->data.transfer.iovec.addr);
+	pr_debug("sps ev_id=%d, addr=0x%pa, size=0x%x, flags=0x%x user=0x%pK\n",
+			notify->event_id, &addr,
+			notify->data.transfer.iovec.size,
+			notify->data.transfer.iovec.flags,
+			notify->data.transfer.user);
+}
+
+static void _qce_req_complete(struct qce_device *pce_dev, unsigned int req_info)
+{
+	struct ce_request_info *preq_info;
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+
+	switch (preq_info->xfer_type) {
+	case QCE_XFER_CIPHERING:
+		_ablk_cipher_complete(pce_dev, req_info);
+		break;
+	case QCE_XFER_HASHING:
+		_sha_complete(pce_dev, req_info);
+		break;
+	case QCE_XFER_AEAD:
+		_aead_complete(pce_dev, req_info);
+		break;
+	case QCE_XFER_F8:
+		_f8_complete(pce_dev, req_info);
+		break;
+	case QCE_XFER_F9:
+		_f9_complete(pce_dev, req_info);
+		break;
+	default:
+		qce_free_req_info(pce_dev, req_info, true);
+		break;
+	}
+}
+
+static void qce_multireq_timeout(unsigned long data)
+{
+	struct qce_device *pce_dev = (struct qce_device *)data;
+	int ret = 0;
+	int last_seq;
+	unsigned long flags;
+
+	last_seq = atomic_read(&pce_dev->bunch_cmd_seq);
+	if (last_seq == 0 ||
+		last_seq != atomic_read(&pce_dev->last_intr_seq)) {
+		atomic_set(&pce_dev->last_intr_seq, last_seq);
+		mod_timer(&(pce_dev->timer), (jiffies + DELAY_IN_JIFFIES));
+		return;
+	}
+	/* last bunch mode command time out */
+
+	/*
+	 * From here to dummy request finish sps request and set owner back
+	 * to none, we disable interrupt.
+	 * So it won't get preempted or interrupted. If bam inerrupts happen
+	 * between, and completion callback gets called from BAM, a new
+	 * request may be issued by the client driver.  Deadlock may happen.
+	 */
+	local_irq_save(flags);
+	if (cmpxchg(&pce_dev->owner, QCE_OWNER_NONE, QCE_OWNER_TIMEOUT)
+							!= QCE_OWNER_NONE) {
+		local_irq_restore(flags);
+		mod_timer(&(pce_dev->timer), (jiffies + DELAY_IN_JIFFIES));
+		return;
+	}
+
+	ret = qce_dummy_req(pce_dev);
+	if (ret)
+		pr_warn("pcedev %d: Failed to insert dummy req\n",
+				pce_dev->dev_no);
+	cmpxchg(&pce_dev->owner, QCE_OWNER_TIMEOUT, QCE_OWNER_NONE);
+	pce_dev->mode = IN_INTERRUPT_MODE;
+	local_irq_restore(flags);
+
+	del_timer(&(pce_dev->timer));
+	pce_dev->qce_stats.no_of_timeouts++;
+	pr_debug("pcedev %d mode switch to INTR\n", pce_dev->dev_no);
+}
+
+void qce_get_driver_stats(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+
+	if (!_qce50_disp_stats)
+		return;
+	pr_info("Engine %d timeout occuured %d\n", pce_dev->dev_no,
+			pce_dev->qce_stats.no_of_timeouts);
+	pr_info("Engine %d dummy request inserted %d\n", pce_dev->dev_no,
+			pce_dev->qce_stats.no_of_dummy_reqs);
+	if (pce_dev->mode)
+		pr_info("Engine %d is in BUNCH MODE\n", pce_dev->dev_no);
+	else
+		pr_info("Engine %d is in INTERRUPT MODE\n", pce_dev->dev_no);
+	pr_info("Engine %d outstanding request %d\n", pce_dev->dev_no,
+			atomic_read(&pce_dev->no_of_queued_req));
+}
+EXPORT_SYMBOL(qce_get_driver_stats);
+
+void qce_clear_driver_stats(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+
+	pce_dev->qce_stats.no_of_timeouts = 0;
+	pce_dev->qce_stats.no_of_dummy_reqs = 0;
+}
+EXPORT_SYMBOL(qce_clear_driver_stats);
+
+static void _sps_producer_callback(struct sps_event_notify *notify)
+{
+	struct qce_device *pce_dev = (struct qce_device *)
+		((struct sps_event_notify *)notify)->user;
+	int rc = 0;
+	unsigned int req_info;
+	struct ce_sps_data *pce_sps_data;
+	struct ce_request_info *preq_info;
+
+	print_notify_debug(notify);
+
+	req_info = (unsigned int)((uintptr_t)notify->data.transfer.user);
+	if ((req_info & 0xffff0000)  != CRYPTO_REQ_USER_PAT) {
+		pr_warn("request information %d out of range\n", req_info);
+		return;
+	}
+
+	req_info = req_info & 0x00ff;
+	if (req_info < 0 || req_info >= MAX_QCE_ALLOC_BAM_REQ) {
+		pr_warn("request information %d out of range\n", req_info);
+		return;
+	}
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+
+	pce_sps_data = &preq_info->ce_sps;
+	if ((preq_info->xfer_type == QCE_XFER_CIPHERING ||
+		preq_info->xfer_type == QCE_XFER_AEAD) &&
+			pce_sps_data->producer_state == QCE_PIPE_STATE_IDLE) {
+		pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+		pce_sps_data->out_transfer.iovec_count = 0;
+		_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_sps_data->out_transfer);
+		_qce_set_flag(&pce_sps_data->out_transfer,
+				SPS_IOVEC_FLAG_INT);
+		rc = sps_transfer(pce_dev->ce_bam_info.producer.pipe,
+					  &pce_sps_data->out_transfer);
+		if (rc) {
+			pr_err("sps_xfr() fail (producer pipe=0x%lx) rc = %d\n",
+				(uintptr_t)pce_dev->ce_bam_info.producer.pipe,
+				rc);
+		}
+		return;
+	}
+
+	_qce_req_complete(pce_dev, req_info);
+}
+
+/**
+ * De-initialize SPS HW connected with CE core
+ *
+ * This function deinitialize SPS endpoints and then
+ * deregisters BAM resources from SPS driver.
+ *
+ * This function should only be called once typically
+ * during driver remove.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ *
+ */
+static void qce_sps_exit(struct qce_device *pce_dev)
+{
+	qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.consumer);
+	qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.producer);
+	qce_sps_release_bam(pce_dev);
+}
+
+static void qce_add_cmd_element(struct qce_device *pdev,
+			struct sps_command_element **cmd_ptr, u32 addr,
+			u32 data, struct sps_command_element **populate)
+{
+	(*cmd_ptr)->addr = (uint32_t)(addr + pdev->phy_iobase);
+	(*cmd_ptr)->command = 0;
+	(*cmd_ptr)->data = data;
+	(*cmd_ptr)->mask = 0xFFFFFFFF;
+	(*cmd_ptr)->reserved = 0;
+	if (populate != NULL)
+		*populate = *cmd_ptr;
+	(*cmd_ptr)++;
+}
+
+static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev, int cri_index,
+		unsigned char **pvaddr, enum qce_cipher_mode_enum mode,
+		bool key_128)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t encr_cfg = 0;
+	uint32_t key_reg = 0;
+	uint32_t xts_key_reg = 0;
+	uint32_t iv_reg = 0;
+
+	cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to AES cipher operations defined
+	 * in ce_cmdlistptrs_ops structure.
+	 */
+	switch (mode) {
+	case QCE_MODE_CBC:
+	case QCE_MODE_CTR:
+		if (key_128 == true) {
+			cmdlistptr->cipher_aes_128_cbc_ctr.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_128_cbc_ctr);
+			if (mode == QCE_MODE_CBC)
+				encr_cfg = pdev->reg.encr_cfg_aes_cbc_128;
+			else
+				encr_cfg = pdev->reg.encr_cfg_aes_ctr_128;
+			iv_reg = 4;
+			key_reg = 4;
+			xts_key_reg = 0;
+		} else {
+			cmdlistptr->cipher_aes_256_cbc_ctr.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_256_cbc_ctr);
+
+			if (mode == QCE_MODE_CBC)
+				encr_cfg = pdev->reg.encr_cfg_aes_cbc_256;
+			else
+				encr_cfg = pdev->reg.encr_cfg_aes_ctr_256;
+			iv_reg = 4;
+			key_reg = 8;
+			xts_key_reg = 0;
+		}
+	break;
+	case QCE_MODE_ECB:
+		if (key_128 == true) {
+			cmdlistptr->cipher_aes_128_ecb.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_128_ecb);
+
+			encr_cfg = pdev->reg.encr_cfg_aes_ecb_128;
+			iv_reg = 0;
+			key_reg = 4;
+			xts_key_reg = 0;
+		} else {
+			cmdlistptr->cipher_aes_256_ecb.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_256_ecb);
+
+			encr_cfg = pdev->reg.encr_cfg_aes_ecb_256;
+			iv_reg = 0;
+			key_reg = 8;
+			xts_key_reg = 0;
+		}
+	break;
+	case QCE_MODE_XTS:
+		if (key_128 == true) {
+			cmdlistptr->cipher_aes_128_xts.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_128_xts);
+
+			encr_cfg = pdev->reg.encr_cfg_aes_xts_128;
+			iv_reg = 4;
+			key_reg = 4;
+			xts_key_reg = 4;
+		} else {
+			cmdlistptr->cipher_aes_256_xts.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_256_xts);
+
+			encr_cfg = pdev->reg.encr_cfg_aes_xts_256;
+			iv_reg = 4;
+			key_reg = 8;
+			xts_key_reg = 8;
+		}
+	break;
+	default:
+		pr_err("Unknown mode of operation %d received, exiting now\n",
+			mode);
+		return -EINVAL;
+	break;
+	}
+
+	/* clear status register */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+						&pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+						&pcl_info->encr_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+						&pcl_info->encr_seg_start);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
+				(uint32_t)0xffffffff, &pcl_info->encr_mask);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG0,
+				(uint32_t)0xffffffff, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG1,
+				(uint32_t)0xffffffff, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG2,
+				(uint32_t)0xffffffff, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
+						&pcl_info->auth_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+						&pcl_info->encr_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	if (xts_key_reg) {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_XTS_KEY0_REG,
+					0, &pcl_info->encr_xts_key);
+		for (i = 1; i < xts_key_reg; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_ENCR_XTS_KEY0_REG +
+						i * sizeof(uint32_t)), 0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				CRYPTO_ENCR_XTS_DU_SIZE_REG, 0,
+					&pcl_info->encr_xts_du_size);
+	}
+	if (iv_reg) {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+						&pcl_info->encr_cntr_iv);
+		for (i = 1; i < iv_reg; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	}
+	/* Add dummy to  align size to burst-size multiple */
+	if (mode == QCE_MODE_XTS) {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+						0, &pcl_info->auth_seg_size);
+	} else {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+						0, &pcl_info->auth_seg_size);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
+						0, &pcl_info->auth_seg_size);
+	}
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_le, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_cipher_des_cmdlistptrs(struct qce_device *pdev, int cri_index,
+		unsigned char **pvaddr, enum qce_cipher_alg_enum alg,
+		bool mode_cbc)
+{
+
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t encr_cfg = 0;
+	uint32_t key_reg = 0;
+	uint32_t iv_reg = 0;
+
+	cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to cipher operations defined
+	 * in ce_cmdlistptrs_ops structure.
+	 */
+	switch (alg) {
+	case CIPHER_ALG_DES:
+		if (mode_cbc) {
+			cmdlistptr->cipher_des_cbc.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_des_cbc);
+
+
+			encr_cfg = pdev->reg.encr_cfg_des_cbc;
+			iv_reg = 2;
+			key_reg = 2;
+		} else {
+			cmdlistptr->cipher_des_ecb.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_des_ecb);
+
+			encr_cfg = pdev->reg.encr_cfg_des_ecb;
+			iv_reg = 0;
+			key_reg = 2;
+		}
+	break;
+	case CIPHER_ALG_3DES:
+		if (mode_cbc) {
+			cmdlistptr->cipher_3des_cbc.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_3des_cbc);
+
+			encr_cfg = pdev->reg.encr_cfg_3des_cbc;
+			iv_reg = 2;
+			key_reg = 6;
+		} else {
+			cmdlistptr->cipher_3des_ecb.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_3des_ecb);
+
+			encr_cfg = pdev->reg.encr_cfg_3des_ecb;
+			iv_reg = 0;
+			key_reg = 6;
+		}
+	break;
+	default:
+		pr_err("Unknown algorithms %d received, exiting now\n", alg);
+		return -EINVAL;
+	break;
+	}
+
+	/* clear status register */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+						&pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+						&pcl_info->encr_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+						&pcl_info->encr_seg_start);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
+						&pcl_info->auth_seg_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+						&pcl_info->encr_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	if (iv_reg) {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+						&pcl_info->encr_cntr_iv);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0,
+								NULL);
+	}
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_le, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_cipher_null_cmdlistptrs(struct qce_device *pdev,
+		int cri_index, unsigned char **pvaddr)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_request_info
+						[cri_index].ce_sps.cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+	cmdlistptr->cipher_null.cmdlist = (uintptr_t)ce_vaddr;
+	pcl_info = &(cmdlistptr->cipher_null);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG,
+			pdev->ce_bam_info.ce_burst_size, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG,
+			pdev->reg.encr_cfg_aes_ecb_128, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+			NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+			NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+					0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+			 0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+						NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+	return 0;
+}
+
+static int _setup_auth_cmdlistptrs(struct qce_device *pdev, int cri_index,
+		unsigned char **pvaddr, enum qce_hash_alg_enum alg,
+		bool key_128)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t key_reg = 0;
+	uint32_t auth_cfg = 0;
+	uint32_t iv_reg = 0;
+
+	cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to authentication operations
+	 * defined in ce_cmdlistptrs_ops structure.
+	 */
+	switch (alg) {
+	case QCE_HASH_SHA1:
+		cmdlistptr->auth_sha1.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->auth_sha1);
+
+		auth_cfg = pdev->reg.auth_cfg_sha1;
+		iv_reg = 5;
+
+		/* clear status register */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+					0, NULL);
+
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+	break;
+	case QCE_HASH_SHA256:
+		cmdlistptr->auth_sha256.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->auth_sha256);
+
+		auth_cfg = pdev->reg.auth_cfg_sha256;
+		iv_reg = 8;
+
+		/* clear status register */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+					0, NULL);
+
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+		/* 1 dummy write */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+	break;
+	case QCE_HASH_SHA1_HMAC:
+		cmdlistptr->auth_sha1_hmac.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->auth_sha1_hmac);
+
+		auth_cfg = pdev->reg.auth_cfg_hmac_sha1;
+		key_reg = 16;
+		iv_reg = 5;
+
+		/* clear status register */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+					0, NULL);
+
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+	break;
+	case QCE_HASH_SHA256_HMAC:
+		cmdlistptr->auth_sha256_hmac.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->auth_sha256_hmac);
+
+		auth_cfg = pdev->reg.auth_cfg_hmac_sha256;
+		key_reg = 16;
+		iv_reg = 8;
+
+		/* clear status register */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0,
+					NULL);
+
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+		/* 1 dummy write */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+	break;
+	case QCE_HASH_AES_CMAC:
+		if (key_128 == true) {
+			cmdlistptr->auth_aes_128_cmac.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->auth_aes_128_cmac);
+
+			auth_cfg = pdev->reg.auth_cfg_cmac_128;
+			key_reg = 4;
+		} else {
+			cmdlistptr->auth_aes_256_cmac.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->auth_aes_256_cmac);
+
+			auth_cfg = pdev->reg.auth_cfg_cmac_256;
+			key_reg = 8;
+		}
+
+		/* clear status register */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0,
+					NULL);
+
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+		/* 1 dummy write */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+	break;
+	default:
+		pr_err("Unknown algorithms %d received, exiting now\n", alg);
+		return -EINVAL;
+	break;
+	}
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0,
+						&pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+					auth_cfg, &pcl_info->auth_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+						&pcl_info->auth_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+						&pcl_info->auth_seg_start);
+
+	if (alg == QCE_HASH_AES_CMAC) {
+		/* reset auth iv, bytecount and key  registers */
+		for (i = 0; i < 16; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+		for (i = 0; i < 16; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
+				0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+						0, NULL);
+	} else {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
+							&pcl_info->auth_iv);
+		for (i = 1; i < iv_reg; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
+				0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+						0, &pcl_info->auth_bytecount);
+	}
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
+
+	if (key_reg) {
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				CRYPTO_AUTH_KEY0_REG, 0, &pcl_info->auth_key);
+		for (i = 1; i < key_reg; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
+				0, NULL);
+	}
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					pdev->reg.crypto_cfg_le, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_aead_cmdlistptrs(struct qce_device *pdev,
+				int cri_index,
+				unsigned char **pvaddr,
+				uint32_t alg,
+				uint32_t mode,
+				uint32_t key_size,
+				bool     sha1)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	uint32_t key_reg;
+	uint32_t iv_reg;
+	uint32_t i;
+	uint32_t  enciv_in_word;
+	uint32_t encr_cfg;
+
+	cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+	switch (alg) {
+
+	case CIPHER_ALG_DES:
+
+		switch (mode) {
+
+		case QCE_MODE_CBC:
+			if (sha1) {
+				cmdlistptr->aead_hmac_sha1_cbc_des.cmdlist =
+					(uintptr_t)ce_vaddr;
+				pcl_info = &(cmdlistptr->
+					aead_hmac_sha1_cbc_des);
+			} else {
+				cmdlistptr->aead_hmac_sha256_cbc_des.cmdlist =
+					(uintptr_t)ce_vaddr;
+				pcl_info = &(cmdlistptr->
+					aead_hmac_sha256_cbc_des);
+			}
+			encr_cfg = pdev->reg.encr_cfg_des_cbc;
+			break;
+		default:
+			return -EINVAL;
+		};
+
+		enciv_in_word = 2;
+
+		break;
+
+	case CIPHER_ALG_3DES:
+		switch (mode) {
+
+		case QCE_MODE_CBC:
+			if (sha1) {
+				cmdlistptr->aead_hmac_sha1_cbc_3des.cmdlist =
+					(uintptr_t)ce_vaddr;
+				pcl_info = &(cmdlistptr->
+					aead_hmac_sha1_cbc_3des);
+			} else {
+				cmdlistptr->aead_hmac_sha256_cbc_3des.cmdlist =
+					(uintptr_t)ce_vaddr;
+				pcl_info = &(cmdlistptr->
+					aead_hmac_sha256_cbc_3des);
+			}
+			encr_cfg = pdev->reg.encr_cfg_3des_cbc;
+			break;
+		default:
+			return -EINVAL;
+		};
+
+		enciv_in_word = 2;
+
+		break;
+
+	case CIPHER_ALG_AES:
+		switch (mode) {
+
+		case QCE_MODE_CBC:
+			if (key_size ==  AES128_KEY_SIZE) {
+				if (sha1) {
+					cmdlistptr->
+						aead_hmac_sha1_cbc_aes_128.
+						cmdlist = (uintptr_t)ce_vaddr;
+					pcl_info = &(cmdlistptr->
+						aead_hmac_sha1_cbc_aes_128);
+				} else {
+					cmdlistptr->
+						aead_hmac_sha256_cbc_aes_128.
+						cmdlist = (uintptr_t)ce_vaddr;
+					pcl_info = &(cmdlistptr->
+						aead_hmac_sha256_cbc_aes_128);
+				}
+				encr_cfg = pdev->reg.encr_cfg_aes_cbc_128;
+			} else if (key_size ==  AES256_KEY_SIZE) {
+				if (sha1) {
+					cmdlistptr->
+						aead_hmac_sha1_cbc_aes_256.
+						cmdlist = (uintptr_t)ce_vaddr;
+					pcl_info = &(cmdlistptr->
+						aead_hmac_sha1_cbc_aes_256);
+				} else {
+					cmdlistptr->
+						aead_hmac_sha256_cbc_aes_256.
+						cmdlist = (uintptr_t)ce_vaddr;
+					pcl_info = &(cmdlistptr->
+						aead_hmac_sha256_cbc_aes_256);
+				}
+				encr_cfg = pdev->reg.encr_cfg_aes_cbc_256;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		default:
+			return -EINVAL;
+		};
+
+		enciv_in_word = 4;
+
+		break;
+
+	default:
+		return -EINVAL;
+	};
+
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+
+	key_reg = key_size/sizeof(uint32_t);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+			&pcl_info->encr_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+			(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+			0, NULL);
+
+	if (mode != QCE_MODE_ECB) {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+			&pcl_info->encr_cntr_iv);
+		for (i = 1; i < enciv_in_word; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	};
+
+	if (sha1)
+		iv_reg = 5;
+	else
+		iv_reg = 8;
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
+				&pcl_info->auth_iv);
+	for (i = 1; i < iv_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+			(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
+				0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+				0, &pcl_info->auth_bytecount);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
+
+	key_reg = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0,
+			 &pcl_info->auth_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+			(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)), 0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+			&pcl_info->seg_size);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+			&pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+			&pcl_info->encr_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+			&pcl_info->encr_seg_start);
+
+	if (sha1)
+		qce_add_cmd_element(
+			pdev,
+			&ce_vaddr,
+			CRYPTO_AUTH_SEG_CFG_REG,
+			pdev->reg.auth_cfg_aead_sha1_hmac,
+			&pcl_info->auth_seg_cfg);
+	else
+		qce_add_cmd_element(
+			pdev,
+			&ce_vaddr,
+			CRYPTO_AUTH_SEG_CFG_REG,
+			pdev->reg.auth_cfg_aead_sha256_hmac,
+			&pcl_info->auth_seg_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+			&pcl_info->auth_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+			&pcl_info->auth_seg_start);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					pdev->reg.crypto_cfg_le, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+	return 0;
+}
+
+static int _setup_aead_ccm_cmdlistptrs(struct qce_device *pdev, int cri_index,
+				unsigned char **pvaddr, bool key_128)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_request_info
+						[cri_index].ce_sps.cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t encr_cfg = 0;
+	uint32_t auth_cfg = 0;
+	uint32_t key_reg = 0;
+
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to aead operations
+	 * defined in ce_cmdlistptrs_ops structure.
+	 */
+	if (key_128 == true) {
+		cmdlistptr->aead_aes_128_ccm.cmdlist =
+						(uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->aead_aes_128_ccm);
+
+		auth_cfg = pdev->reg.auth_cfg_aes_ccm_128;
+		encr_cfg = pdev->reg.encr_cfg_aes_ccm_128;
+		key_reg = 4;
+	} else {
+
+		cmdlistptr->aead_aes_256_ccm.cmdlist =
+						(uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->aead_aes_256_ccm);
+
+		auth_cfg = pdev->reg.auth_cfg_aes_ccm_256;
+		encr_cfg = pdev->reg.encr_cfg_aes_ccm_256;
+
+		key_reg = 8;
+	}
+
+	/* clear status register */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+									NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG,
+					encr_cfg, &pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+						&pcl_info->encr_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+						&pcl_info->encr_seg_start);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
+				(uint32_t)0xffffffff, &pcl_info->encr_mask);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG0,
+				(uint32_t)0xffffffff, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG1,
+				(uint32_t)0xffffffff, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG2,
+				(uint32_t)0xffffffff, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+					auth_cfg, &pcl_info->auth_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+						&pcl_info->auth_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+						&pcl_info->auth_seg_start);
+	/* reset auth iv, bytecount and key  registers */
+	for (i = 0; i < 8; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+					0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG,
+					0, NULL);
+	for (i = 0; i < 16; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	/* set auth key */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0,
+							&pcl_info->auth_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	/* set NONCE info */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_INFO_NONCE0_REG, 0,
+						&pcl_info->auth_nonce_info);
+	for (i = 1; i < 4; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_INFO_NONCE0_REG +
+				i * sizeof(uint32_t)), 0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+						&pcl_info->encr_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+						&pcl_info->encr_cntr_iv);
+	for (i = 1; i < 4; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_CCM_INT_CNTR0_REG, 0,
+						&pcl_info->encr_ccm_cntr_iv);
+	for (i = 1; i < 4; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+			(CRYPTO_ENCR_CCM_INT_CNTR0_REG + i * sizeof(uint32_t)),
+			0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					pdev->reg.crypto_cfg_le, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_f8_cmdlistptrs(struct qce_device *pdev, int cri_index,
+	unsigned char **pvaddr, enum qce_ota_algo_enum alg)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t encr_cfg = 0;
+	uint32_t key_reg = 4;
+
+	cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to f8 cipher algorithm defined
+	 * in ce_cmdlistptrs_ops structure.
+	 */
+
+	switch (alg) {
+	case QCE_OTA_ALGO_KASUMI:
+		cmdlistptr->f8_kasumi.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->f8_kasumi);
+		encr_cfg = pdev->reg.encr_cfg_kasumi;
+		break;
+
+	case QCE_OTA_ALGO_SNOW3G:
+	default:
+		cmdlistptr->f8_snow3g.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->f8_snow3g);
+		encr_cfg = pdev->reg.encr_cfg_snow3g;
+		break;
+	}
+	/* clear status register */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+							0, NULL);
+	/* set config to big endian */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+						&pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+						&pcl_info->encr_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+						&pcl_info->encr_seg_start);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
+						&pcl_info->auth_seg_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+						0, &pcl_info->auth_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
+						0, &pcl_info->auth_seg_start);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+						 &pcl_info->encr_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+						&pcl_info->encr_cntr_iv);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0,
+								NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					pdev->reg.crypto_cfg_le, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_f9_cmdlistptrs(struct qce_device *pdev, int cri_index,
+	unsigned char **pvaddr, enum qce_ota_algo_enum alg)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t auth_cfg = 0;
+	uint32_t iv_reg = 0;
+
+	cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to authentication operations
+	 * defined in ce_cmdlistptrs_ops structure.
+	 */
+	switch (alg) {
+	case QCE_OTA_ALGO_KASUMI:
+		cmdlistptr->f9_kasumi.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->f9_kasumi);
+		auth_cfg = pdev->reg.auth_cfg_kasumi;
+		break;
+
+	case QCE_OTA_ALGO_SNOW3G:
+	default:
+		cmdlistptr->f9_snow3g.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->f9_snow3g);
+		auth_cfg = pdev->reg.auth_cfg_snow3g;
+	};
+
+	/* clear status register */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+							0, NULL);
+	/* set config to big endian */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+	iv_reg = 5;
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0,
+						&pcl_info->encr_seg_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+					auth_cfg, &pcl_info->auth_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+						&pcl_info->auth_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+						&pcl_info->auth_seg_start);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
+							&pcl_info->auth_iv);
+	for (i = 1; i < iv_reg; i++) {
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
+				0, NULL);
+	}
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+					0, &pcl_info->auth_bytecount);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					pdev->reg.crypto_cfg_le, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_unlock_pipe_cmdlistptrs(struct qce_device *pdev,
+		int cri_index, unsigned char **pvaddr)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start = (uintptr_t)(*pvaddr);
+	struct qce_cmdlistptr_ops *cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+
+	cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+	cmdlistptr->unlock_all_pipes.cmdlist = (uintptr_t)ce_vaddr;
+	pcl_info = &(cmdlistptr->unlock_all_pipes);
+
+	/*
+	 * Designate chunks of the allocated memory to command list
+	 * to unlock pipes.
+	 */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					CRYPTO_CONFIG_RESET, NULL);
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int qce_setup_cmdlistptrs(struct qce_device *pdev, int cri_index,
+					unsigned char **pvaddr)
+{
+	struct sps_command_element *ce_vaddr =
+				(struct sps_command_element *)(*pvaddr);
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to operations defined
+	 * in ce_cmdlistptrs_ops structure.
+	 */
+	ce_vaddr =
+		(struct sps_command_element *)ALIGN(((uintptr_t) ce_vaddr),
+					pdev->ce_bam_info.ce_burst_size);
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CBC,
+								true);
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CTR,
+								true);
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_ECB,
+								true);
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_XTS,
+								true);
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CBC,
+								false);
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CTR,
+								false);
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_ECB,
+								false);
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_XTS,
+								false);
+
+	_setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
+								true);
+	_setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
+								false);
+	_setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
+								true);
+	_setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
+								false);
+
+	_setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA1,
+								false);
+	_setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA256,
+								false);
+
+	_setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA1_HMAC,
+								false);
+	_setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA256_HMAC,
+								false);
+
+	_setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_AES_CMAC,
+								true);
+	_setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_AES_CMAC,
+								false);
+
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
+					QCE_MODE_CBC, DES_KEY_SIZE, true);
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
+					QCE_MODE_CBC, DES3_EDE_KEY_SIZE, true);
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
+					QCE_MODE_CBC, AES128_KEY_SIZE, true);
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
+					QCE_MODE_CBC, AES256_KEY_SIZE, true);
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
+					QCE_MODE_CBC, DES_KEY_SIZE, false);
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
+					QCE_MODE_CBC, DES3_EDE_KEY_SIZE, false);
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
+					QCE_MODE_CBC, AES128_KEY_SIZE, false);
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
+					QCE_MODE_CBC, AES256_KEY_SIZE, false);
+
+	_setup_cipher_null_cmdlistptrs(pdev, cri_index, pvaddr);
+
+	_setup_aead_ccm_cmdlistptrs(pdev, cri_index, pvaddr, true);
+	_setup_aead_ccm_cmdlistptrs(pdev, cri_index, pvaddr, false);
+	_setup_f8_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_KASUMI);
+	_setup_f8_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_SNOW3G);
+	_setup_f9_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_KASUMI);
+	_setup_f9_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_SNOW3G);
+	_setup_unlock_pipe_cmdlistptrs(pdev, cri_index, pvaddr);
+
+	return 0;
+}
+
+static int qce_setup_ce_sps_data(struct qce_device *pce_dev)
+{
+	unsigned char *vaddr;
+	int i;
+	unsigned char *iovec_vaddr;
+	int iovec_memsize;
+
+	vaddr = pce_dev->coh_vmem;
+	vaddr = (unsigned char *)ALIGN(((uintptr_t)vaddr),
+					pce_dev->ce_bam_info.ce_burst_size);
+	iovec_vaddr = pce_dev->iovec_vmem;
+	iovec_memsize = pce_dev->iovec_memsize;
+	for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++) {
+		/* Allow for 256 descriptor (cmd and data) entries per pipe */
+		pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec =
+				(struct sps_iovec *)iovec_vaddr;
+		pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec_phys =
+			virt_to_phys(pce_dev->ce_request_info[i].
+				ce_sps.in_transfer.iovec);
+		iovec_vaddr += TOTAL_IOVEC_SPACE_PER_PIPE;
+		iovec_memsize -= TOTAL_IOVEC_SPACE_PER_PIPE;
+		pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec =
+				(struct sps_iovec *)iovec_vaddr;
+		pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec_phys =
+			virt_to_phys(pce_dev->ce_request_info[i].
+				ce_sps.out_transfer.iovec);
+		iovec_vaddr += TOTAL_IOVEC_SPACE_PER_PIPE;
+		iovec_memsize -= TOTAL_IOVEC_SPACE_PER_PIPE;
+		if (pce_dev->support_cmd_dscr)
+			qce_setup_cmdlistptrs(pce_dev, i, &vaddr);
+		vaddr = (unsigned char *)ALIGN(((uintptr_t)vaddr),
+				pce_dev->ce_bam_info.ce_burst_size);
+		pce_dev->ce_request_info[i].ce_sps.result_dump =
+				(uintptr_t)vaddr;
+		pce_dev->ce_request_info[i].ce_sps.result_dump_phy =
+				GET_PHYS_ADDR((uintptr_t)vaddr);
+		pce_dev->ce_request_info[i].ce_sps.result =
+				(struct ce_result_dump_format *)vaddr;
+		vaddr += CRYPTO_RESULT_DUMP_SIZE;
+
+		pce_dev->ce_request_info[i].ce_sps.result_dump_null =
+				(uintptr_t)vaddr;
+		pce_dev->ce_request_info[i].ce_sps.result_dump_null_phy =
+				GET_PHYS_ADDR((uintptr_t)vaddr);
+		pce_dev->ce_request_info[i].ce_sps.result_null =
+				(struct ce_result_dump_format *)vaddr;
+		vaddr += CRYPTO_RESULT_DUMP_SIZE;
+
+		pce_dev->ce_request_info[i].ce_sps.ignore_buffer =
+				(uintptr_t)vaddr;
+		vaddr += pce_dev->ce_bam_info.ce_burst_size * 2;
+	}
+	if ((vaddr - pce_dev->coh_vmem) > pce_dev->memsize ||
+							iovec_memsize < 0)
+		panic("qce50: Not enough coherent memory. Allocate %x , need %lx\n",
+				 pce_dev->memsize, (uintptr_t)vaddr -
+				(uintptr_t)pce_dev->coh_vmem);
+	return 0;
+}
+
+static int qce_init_ce_cfg_val(struct qce_device *pce_dev)
+{
+	uint32_t beats = (pce_dev->ce_bam_info.ce_burst_size >> 3) - 1;
+	uint32_t pipe_pair = pce_dev->ce_bam_info.pipe_pair_index;
+
+	pce_dev->reg.crypto_cfg_be = (beats << CRYPTO_REQ_SIZE) |
+		BIT(CRYPTO_MASK_DOUT_INTR) | BIT(CRYPTO_MASK_DIN_INTR) |
+		BIT(CRYPTO_MASK_OP_DONE_INTR) | (0 << CRYPTO_HIGH_SPD_EN_N) |
+		(pipe_pair << CRYPTO_PIPE_SET_SELECT);
+
+	pce_dev->reg.crypto_cfg_le =
+		(pce_dev->reg.crypto_cfg_be | CRYPTO_LITTLE_ENDIAN_MASK);
+
+	/* Initialize encr_cfg register for AES alg */
+	pce_dev->reg.encr_cfg_aes_cbc_128 =
+		(CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_cbc_256 =
+		(CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_ctr_128 =
+		(CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_ctr_256 =
+		(CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_xts_128 =
+		(CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_xts_256 =
+		(CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_ecb_128 =
+		(CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_ecb_256 =
+		(CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_ccm_128 =
+		(CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE)|
+		(CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
+
+	pce_dev->reg.encr_cfg_aes_ccm_256 =
+		(CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE) |
+		(CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
+
+	/* Initialize encr_cfg register for DES alg */
+	pce_dev->reg.encr_cfg_des_ecb =
+		(CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_des_cbc =
+		(CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_3des_ecb =
+		(CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_3des_cbc =
+		(CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+
+	/* Initialize encr_cfg register for kasumi/snow3g  alg */
+	pce_dev->reg.encr_cfg_kasumi =
+		(CRYPTO_ENCR_ALG_KASUMI << CRYPTO_ENCR_ALG);
+
+	pce_dev->reg.encr_cfg_snow3g =
+		(CRYPTO_ENCR_ALG_SNOW_3G << CRYPTO_ENCR_ALG);
+
+	/* Initialize auth_cfg register for CMAC alg */
+	pce_dev->reg.auth_cfg_cmac_128 =
+		(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+		(CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_ENUM_16_BYTES << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE);
+
+	pce_dev->reg.auth_cfg_cmac_256 =
+		(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+		(CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_ENUM_16_BYTES << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE);
+
+	/* Initialize auth_cfg register for HMAC alg */
+	pce_dev->reg.auth_cfg_hmac_sha1 =
+		(CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+	pce_dev->reg.auth_cfg_hmac_sha256 =
+		(CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+	/* Initialize auth_cfg register for SHA1/256 alg */
+	pce_dev->reg.auth_cfg_sha1 =
+		(CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+	pce_dev->reg.auth_cfg_sha256 =
+		(CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+	/* Initialize auth_cfg register for AEAD alg */
+	pce_dev->reg.auth_cfg_aead_sha1_hmac =
+		(CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+		(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST);
+
+	pce_dev->reg.auth_cfg_aead_sha256_hmac =
+		(CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+		(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST);
+
+	pce_dev->reg.auth_cfg_aes_ccm_128 =
+		(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+		(CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE) |
+		((MAX_NONCE/sizeof(uint32_t)) << CRYPTO_AUTH_NONCE_NUM_WORDS);
+	pce_dev->reg.auth_cfg_aes_ccm_128 &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+
+	pce_dev->reg.auth_cfg_aes_ccm_256 =
+		(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+		(CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE) |
+		((MAX_NONCE/sizeof(uint32_t)) << CRYPTO_AUTH_NONCE_NUM_WORDS);
+	pce_dev->reg.auth_cfg_aes_ccm_256 &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+
+	/* Initialize auth_cfg register for kasumi/snow3g */
+	pce_dev->reg.auth_cfg_kasumi =
+			(CRYPTO_AUTH_ALG_KASUMI << CRYPTO_AUTH_ALG) |
+				BIT(CRYPTO_FIRST) | BIT(CRYPTO_LAST);
+	pce_dev->reg.auth_cfg_snow3g =
+			(CRYPTO_AUTH_ALG_SNOW3G << CRYPTO_AUTH_ALG) |
+				BIT(CRYPTO_FIRST) | BIT(CRYPTO_LAST);
+	return 0;
+}
+
+static void _qce_ccm_get_around_input(struct qce_device *pce_dev,
+	struct ce_request_info *preq_info, enum qce_cipher_dir_enum dir)
+{
+	struct qce_cmdlist_info *cmdlistinfo;
+	struct ce_sps_data *pce_sps_data;
+
+	pce_sps_data = &preq_info->ce_sps;
+	if ((dir == QCE_DECRYPT) && pce_dev->no_get_around &&
+			!(pce_dev->no_ccm_mac_status_get_around)) {
+		cmdlistinfo = &pce_sps_data->cmdlistptr.cipher_null;
+		_qce_sps_add_cmd(pce_dev, 0, cmdlistinfo,
+				&pce_sps_data->in_transfer);
+		_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+			pce_dev->ce_bam_info.ce_burst_size,
+			&pce_sps_data->in_transfer);
+		_qce_set_flag(&pce_sps_data->in_transfer,
+				SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD);
+	}
+}
+
+static void _qce_ccm_get_around_output(struct qce_device *pce_dev,
+	struct ce_request_info *preq_info, enum qce_cipher_dir_enum dir)
+{
+	struct ce_sps_data *pce_sps_data;
+
+	pce_sps_data = &preq_info->ce_sps;
+
+	if ((dir == QCE_DECRYPT) && pce_dev->no_get_around &&
+			!(pce_dev->no_ccm_mac_status_get_around)) {
+		_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+			pce_dev->ce_bam_info.ce_burst_size,
+			&pce_sps_data->out_transfer);
+		_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump_null),
+			CRYPTO_RESULT_DUMP_SIZE, &pce_sps_data->out_transfer);
+	}
+}
+
+/* QCE_DUMMY_REQ */
+static void qce_dummy_complete(void *cookie, unsigned char *digest,
+		unsigned char *authdata, int ret)
+{
+	if (!cookie)
+		pr_err("invalid cookie\n");
+}
+
+static int qce_dummy_req(struct qce_device *pce_dev)
+{
+	int ret = 0;
+
+	if (!(atomic_xchg(&pce_dev->ce_request_info[DUMMY_REQ_INDEX].
+				in_use, true) == false))
+		return -EBUSY;
+	ret = qce_process_sha_req(pce_dev, NULL);
+	pce_dev->qce_stats.no_of_dummy_reqs++;
+	return ret;
+}
+
+static int select_mode(struct qce_device *pce_dev,
+		struct ce_request_info *preq_info)
+{
+	struct ce_sps_data *pce_sps_data = &preq_info->ce_sps;
+	unsigned int no_of_queued_req;
+	unsigned int cadence;
+
+	if (!pce_dev->no_get_around) {
+		_qce_set_flag(&pce_sps_data->out_transfer, SPS_IOVEC_FLAG_INT);
+		return 0;
+	}
+
+	/*
+	 * claim ownership of device
+	 */
+again:
+	if (cmpxchg(&pce_dev->owner, QCE_OWNER_NONE, QCE_OWNER_CLIENT)
+							!= QCE_OWNER_NONE) {
+		ndelay(40);
+		goto again;
+	}
+	no_of_queued_req = atomic_inc_return(&pce_dev->no_of_queued_req);
+	if (pce_dev->mode == IN_INTERRUPT_MODE) {
+		if (no_of_queued_req >= MAX_BUNCH_MODE_REQ) {
+			pce_dev->mode = IN_BUNCH_MODE;
+			pr_debug("pcedev %d mode switch to BUNCH\n",
+					pce_dev->dev_no);
+			_qce_set_flag(&pce_sps_data->out_transfer,
+					SPS_IOVEC_FLAG_INT);
+			pce_dev->intr_cadence = 0;
+			atomic_set(&pce_dev->bunch_cmd_seq, 1);
+			atomic_set(&pce_dev->last_intr_seq, 1);
+			mod_timer(&(pce_dev->timer),
+					(jiffies + DELAY_IN_JIFFIES));
+		} else {
+			_qce_set_flag(&pce_sps_data->out_transfer,
+					SPS_IOVEC_FLAG_INT);
+		}
+	} else {
+		pce_dev->intr_cadence++;
+		cadence = (preq_info->req_len >> 7) + 1;
+		if (cadence > SET_INTR_AT_REQ)
+			cadence = SET_INTR_AT_REQ;
+		if (pce_dev->intr_cadence < cadence || ((pce_dev->intr_cadence
+					== cadence) && pce_dev->cadence_flag))
+			atomic_inc(&pce_dev->bunch_cmd_seq);
+		else {
+			_qce_set_flag(&pce_sps_data->out_transfer,
+					SPS_IOVEC_FLAG_INT);
+			pce_dev->intr_cadence = 0;
+			atomic_set(&pce_dev->bunch_cmd_seq, 0);
+			atomic_set(&pce_dev->last_intr_seq, 0);
+			pce_dev->cadence_flag = ~pce_dev->cadence_flag;
+		}
+	}
+
+	return 0;
+}
+
+static int _qce_aead_ccm_req(void *handle, struct qce_req *q_req)
+{
+	int rc = 0;
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	struct aead_request *areq = (struct aead_request *) q_req->areq;
+	uint32_t authsize = q_req->authsize;
+	uint32_t totallen_in, out_len;
+	uint32_t hw_pad_out = 0;
+	int ce_burst_size;
+	struct qce_cmdlist_info *cmdlistinfo = NULL;
+	int req_info = -1;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+
+	req_info = qce_alloc_req_info(pce_dev);
+	if (req_info < 0)
+		return -EBUSY;
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+
+	ce_burst_size = pce_dev->ce_bam_info.ce_burst_size;
+	totallen_in = areq->cryptlen + q_req->assoclen;
+	if (q_req->dir == QCE_ENCRYPT) {
+		q_req->cryptlen = areq->cryptlen;
+		out_len = areq->cryptlen + authsize;
+		hw_pad_out = ALIGN(authsize, ce_burst_size) - authsize;
+	} else {
+		q_req->cryptlen = areq->cryptlen - authsize;
+		out_len = q_req->cryptlen;
+		hw_pad_out = authsize;
+	}
+
+	/*
+	 * For crypto 5.0 that has burst size alignment requirement
+	 * for data descritpor,
+	 * the agent above(qcrypto) prepares the src scatter list with
+	 * memory starting with associated data, followed by
+	 * data stream to be ciphered.
+	 * The destination scatter list is pointing to the same
+	 * data area as source.
+	 */
+	if (pce_dev->ce_bam_info.minor_version == 0)
+		preq_info->src_nents = count_sg(areq->src, totallen_in);
+	else
+		preq_info->src_nents = count_sg(areq->src, areq->cryptlen +
+							areq->assoclen);
+
+	if (q_req->assoclen) {
+		preq_info->assoc_nents = count_sg(q_req->asg, q_req->assoclen);
+
+		/* formatted associated data input */
+		qce_dma_map_sg(pce_dev->pdev, q_req->asg,
+			preq_info->assoc_nents, DMA_TO_DEVICE);
+		preq_info->asg = q_req->asg;
+	} else {
+		preq_info->assoc_nents = 0;
+		preq_info->asg = NULL;
+	}
+	/* cipher input */
+	qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+			(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+							DMA_TO_DEVICE);
+	/* cipher + mac output  for encryption    */
+	if (areq->src != areq->dst) {
+		if (pce_dev->ce_bam_info.minor_version == 0)
+			/*
+			 * The destination scatter list is pointing to the same
+			 * data area as src.
+			 * Note, the associated data will be pass-through
+			 * at the begining of destination area.
+			 */
+			preq_info->dst_nents = count_sg(areq->dst,
+						out_len + areq->assoclen);
+		else
+			preq_info->dst_nents = count_sg(areq->dst, out_len +
+						areq->assoclen);
+
+		qce_dma_map_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
+				DMA_FROM_DEVICE);
+	} else {
+		preq_info->dst_nents = preq_info->src_nents;
+	}
+
+	if (pce_dev->support_cmd_dscr) {
+		cmdlistinfo = _ce_get_cipher_cmdlistinfo(pce_dev, req_info,
+								 q_req);
+		if (cmdlistinfo == NULL) {
+			pr_err("Unsupported cipher algorithm %d, mode %d\n",
+						q_req->alg, q_req->mode);
+			qce_free_req_info(pce_dev, req_info, false);
+			return -EINVAL;
+		}
+		/* set up crypto device */
+		rc = _ce_setup_cipher(pce_dev, q_req, totallen_in,
+					q_req->assoclen, cmdlistinfo);
+	} else {
+		/* set up crypto device */
+		rc = _ce_setup_cipher_direct(pce_dev, q_req, totallen_in,
+					q_req->assoclen);
+	}
+
+	if (rc < 0)
+		goto bad;
+
+	preq_info->mode = q_req->mode;
+
+	/* setup for callback, and issue command to bam */
+	preq_info->areq = q_req->areq;
+	preq_info->qce_cb = q_req->qce_cb;
+	preq_info->dir = q_req->dir;
+
+	/* setup xfer type for producer callback handling */
+	preq_info->xfer_type = QCE_XFER_AEAD;
+	preq_info->req_len = totallen_in;
+
+	_qce_sps_iovec_count_init(pce_dev, req_info);
+
+	if (pce_dev->support_cmd_dscr)
+		_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+					&pce_sps_data->in_transfer);
+
+	if (pce_dev->ce_bam_info.minor_version == 0) {
+		goto bad;
+	} else {
+		if (q_req->assoclen && (_qce_sps_add_sg_data(
+			pce_dev, q_req->asg, q_req->assoclen,
+					 &pce_sps_data->in_transfer)))
+			goto bad;
+		if (_qce_sps_add_sg_data_off(pce_dev, areq->src, areq->cryptlen,
+					areq->assoclen,
+					&pce_sps_data->in_transfer))
+			goto bad;
+		_qce_set_flag(&pce_sps_data->in_transfer,
+				SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+		_qce_ccm_get_around_input(pce_dev, preq_info, q_req->dir);
+
+		if (pce_dev->no_get_around)
+			_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+				&pce_sps_data->cmdlistptr.unlock_all_pipes,
+				&pce_sps_data->in_transfer);
+
+		/* Pass through to ignore associated  data*/
+		if (_qce_sps_add_data(
+				GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+				q_req->assoclen,
+				&pce_sps_data->out_transfer))
+			goto bad;
+		if (_qce_sps_add_sg_data_off(pce_dev, areq->dst, out_len,
+					areq->assoclen,
+					&pce_sps_data->out_transfer))
+			goto bad;
+		/* Pass through to ignore hw_pad (padding of the MAC data) */
+		if (_qce_sps_add_data(
+				GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+				hw_pad_out, &pce_sps_data->out_transfer))
+			goto bad;
+		if (pce_dev->no_get_around ||
+				totallen_in <= SPS_MAX_PKT_SIZE) {
+			if (_qce_sps_add_data(
+				GET_PHYS_ADDR(pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_sps_data->out_transfer))
+				goto bad;
+			pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+		} else {
+			pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
+		}
+
+		_qce_ccm_get_around_output(pce_dev, preq_info, q_req->dir);
+
+		select_mode(pce_dev, preq_info);
+		rc = _qce_sps_transfer(pce_dev, req_info);
+		cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+	}
+	if (rc)
+		goto bad;
+	return 0;
+
+bad:
+	if (preq_info->assoc_nents) {
+		qce_dma_unmap_sg(pce_dev->pdev, q_req->asg,
+				preq_info->assoc_nents, DMA_TO_DEVICE);
+	}
+	if (preq_info->src_nents) {
+		qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+				(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+								DMA_TO_DEVICE);
+	}
+	if (areq->src != areq->dst) {
+		qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
+				DMA_FROM_DEVICE);
+	}
+	qce_free_req_info(pce_dev, req_info, false);
+	return rc;
+}
+
+static int _qce_suspend(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *)handle;
+	struct sps_pipe *sps_pipe_info;
+
+	if (handle == NULL)
+		return -ENODEV;
+
+	qce_enable_clk(pce_dev);
+
+	sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe;
+	sps_disconnect(sps_pipe_info);
+
+	sps_pipe_info = pce_dev->ce_bam_info.producer.pipe;
+	sps_disconnect(sps_pipe_info);
+
+	qce_disable_clk(pce_dev);
+	return 0;
+}
+
+static int _qce_resume(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *)handle;
+	struct sps_pipe *sps_pipe_info;
+	struct sps_connect *sps_connect_info;
+	int rc;
+
+	if (handle == NULL)
+		return -ENODEV;
+
+	qce_enable_clk(pce_dev);
+
+	sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe;
+	sps_connect_info = &pce_dev->ce_bam_info.consumer.connect;
+	memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
+	rc = sps_connect(sps_pipe_info, sps_connect_info);
+	if (rc) {
+		pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n",
+			(uintptr_t)sps_pipe_info, rc);
+		return rc;
+	}
+	sps_pipe_info = pce_dev->ce_bam_info.producer.pipe;
+	sps_connect_info = &pce_dev->ce_bam_info.producer.connect;
+	memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
+	rc = sps_connect(sps_pipe_info, sps_connect_info);
+	if (rc)
+		pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n",
+			(uintptr_t)sps_pipe_info, rc);
+
+	rc = sps_register_event(sps_pipe_info,
+					&pce_dev->ce_bam_info.producer.event);
+	if (rc)
+		pr_err("Producer callback registration failed rc = %d\n", rc);
+
+	qce_disable_clk(pce_dev);
+	return rc;
+}
+
+struct qce_pm_table qce_pm_table  = {_qce_suspend, _qce_resume};
+EXPORT_SYMBOL(qce_pm_table);
+
+int qce_aead_req(void *handle, struct qce_req *q_req)
+{
+	struct qce_device *pce_dev = (struct qce_device *)handle;
+	struct aead_request *areq;
+	uint32_t authsize;
+	struct crypto_aead *aead;
+	uint32_t ivsize;
+	uint32_t totallen;
+	int rc = 0;
+	struct qce_cmdlist_info *cmdlistinfo = NULL;
+	int req_info = -1;
+	struct ce_sps_data *pce_sps_data;
+	struct ce_request_info *preq_info;
+
+	if (q_req->mode == QCE_MODE_CCM)
+		return _qce_aead_ccm_req(handle, q_req);
+
+	req_info = qce_alloc_req_info(pce_dev);
+	if (req_info < 0)
+		return -EBUSY;
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	areq = (struct aead_request *) q_req->areq;
+	aead = crypto_aead_reqtfm(areq);
+	ivsize = crypto_aead_ivsize(aead);
+	q_req->ivsize = ivsize;
+	authsize = q_req->authsize;
+	if (q_req->dir == QCE_ENCRYPT)
+		q_req->cryptlen = areq->cryptlen;
+	else
+		q_req->cryptlen = areq->cryptlen - authsize;
+
+	if (q_req->cryptlen > UINT_MAX - areq->assoclen) {
+		pr_err("Integer overflow on total aead req length.\n");
+		return -EINVAL;
+	}
+
+	totallen = q_req->cryptlen + areq->assoclen;
+
+	if (pce_dev->support_cmd_dscr) {
+		cmdlistinfo = _ce_get_aead_cmdlistinfo(pce_dev,
+							req_info, q_req);
+		if (cmdlistinfo == NULL) {
+			pr_err("Unsupported aead ciphering algorithm %d, mode %d, ciphering key length %d, auth digest size %d\n",
+				q_req->alg, q_req->mode, q_req->encklen,
+					q_req->authsize);
+			qce_free_req_info(pce_dev, req_info, false);
+			return -EINVAL;
+		}
+		/* set up crypto device */
+		rc = _ce_setup_aead(pce_dev, q_req, totallen,
+					areq->assoclen, cmdlistinfo);
+		if (rc < 0) {
+			qce_free_req_info(pce_dev, req_info, false);
+			return -EINVAL;
+		}
+	}
+
+	/*
+	 * For crypto 5.0 that has burst size alignment requirement
+	 * for data descritpor,
+	 * the agent above(qcrypto) prepares the src scatter list with
+	 * memory starting with associated data, followed by
+	 * iv, and data stream to be ciphered.
+	 */
+	preq_info->src_nents = count_sg(areq->src, totallen);
+
+
+	/* cipher input */
+	qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+			(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+							DMA_TO_DEVICE);
+	/* cipher output  for encryption    */
+	if (areq->src != areq->dst) {
+		preq_info->dst_nents = count_sg(areq->dst, totallen);
+
+		qce_dma_map_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
+				DMA_FROM_DEVICE);
+	}
+
+
+	/* setup for callback, and issue command to bam */
+	preq_info->areq = q_req->areq;
+	preq_info->qce_cb = q_req->qce_cb;
+	preq_info->dir = q_req->dir;
+	preq_info->asg = NULL;
+
+	/* setup xfer type for producer callback handling */
+	preq_info->xfer_type = QCE_XFER_AEAD;
+	preq_info->req_len = totallen;
+
+	_qce_sps_iovec_count_init(pce_dev, req_info);
+
+	if (pce_dev->support_cmd_dscr) {
+		_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+					&pce_sps_data->in_transfer);
+	} else {
+		rc = _ce_setup_aead_direct(pce_dev, q_req, totallen,
+					areq->assoclen);
+		if (rc)
+			goto bad;
+	}
+
+	preq_info->mode = q_req->mode;
+
+	if (pce_dev->ce_bam_info.minor_version == 0) {
+		if (_qce_sps_add_sg_data(pce_dev, areq->src, totallen,
+					&pce_sps_data->in_transfer))
+			goto bad;
+
+		_qce_set_flag(&pce_sps_data->in_transfer,
+				SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+		if (_qce_sps_add_sg_data(pce_dev, areq->dst, totallen,
+				&pce_sps_data->out_transfer))
+			goto bad;
+		if (totallen > SPS_MAX_PKT_SIZE) {
+			_qce_set_flag(&pce_sps_data->out_transfer,
+							SPS_IOVEC_FLAG_INT);
+			pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
+		} else {
+			if (_qce_sps_add_data(GET_PHYS_ADDR(
+					pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					&pce_sps_data->out_transfer))
+				goto bad;
+			_qce_set_flag(&pce_sps_data->out_transfer,
+							SPS_IOVEC_FLAG_INT);
+			pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+		}
+	rc = _qce_sps_transfer(pce_dev, req_info);
+	} else {
+		if (_qce_sps_add_sg_data(pce_dev, areq->src, totallen,
+					&pce_sps_data->in_transfer))
+			goto bad;
+		_qce_set_flag(&pce_sps_data->in_transfer,
+				SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+		if (pce_dev->no_get_around)
+			_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+				&pce_sps_data->cmdlistptr.unlock_all_pipes,
+				&pce_sps_data->in_transfer);
+
+		if (_qce_sps_add_sg_data(pce_dev, areq->dst, totallen,
+					&pce_sps_data->out_transfer))
+			goto bad;
+
+		if (pce_dev->no_get_around || totallen <= SPS_MAX_PKT_SIZE) {
+			if (_qce_sps_add_data(
+				GET_PHYS_ADDR(pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_sps_data->out_transfer))
+				goto bad;
+			pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+		} else {
+			pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
+		}
+		select_mode(pce_dev, preq_info);
+		rc = _qce_sps_transfer(pce_dev, req_info);
+		cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+	}
+	if (rc)
+		goto bad;
+	return 0;
+
+bad:
+	if (preq_info->src_nents)
+		qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+				(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+								DMA_TO_DEVICE);
+	if (areq->src != areq->dst)
+		qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
+				DMA_FROM_DEVICE);
+	qce_free_req_info(pce_dev, req_info, false);
+
+	return rc;
+}
+EXPORT_SYMBOL(qce_aead_req);
+
+int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
+{
+	int rc = 0;
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	struct ablkcipher_request *areq = (struct ablkcipher_request *)
+						c_req->areq;
+	struct qce_cmdlist_info *cmdlistinfo = NULL;
+	int req_info = -1;
+	struct ce_sps_data *pce_sps_data;
+	struct ce_request_info *preq_info;
+
+	req_info = qce_alloc_req_info(pce_dev);
+	if (req_info < 0)
+		return -EBUSY;
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+
+	preq_info->src_nents = 0;
+	preq_info->dst_nents = 0;
+
+	/* cipher input */
+	preq_info->src_nents = count_sg(areq->src, areq->nbytes);
+
+	qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+		(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+							DMA_TO_DEVICE);
+	/* cipher output */
+	if (areq->src != areq->dst) {
+		preq_info->dst_nents = count_sg(areq->dst, areq->nbytes);
+			qce_dma_map_sg(pce_dev->pdev, areq->dst,
+				preq_info->dst_nents, DMA_FROM_DEVICE);
+	} else {
+		preq_info->dst_nents = preq_info->src_nents;
+	}
+	preq_info->dir = c_req->dir;
+	if  ((pce_dev->ce_bam_info.minor_version == 0) &&
+			(preq_info->dir == QCE_DECRYPT) &&
+			(c_req->mode == QCE_MODE_CBC)) {
+		memcpy(preq_info->dec_iv, (unsigned char *)
+			sg_virt(areq->src) + areq->src->length - 16,
+			NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE);
+	}
+
+	/* set up crypto device */
+	if (pce_dev->support_cmd_dscr) {
+		cmdlistinfo = _ce_get_cipher_cmdlistinfo(pce_dev,
+							req_info, c_req);
+		if (cmdlistinfo == NULL) {
+			pr_err("Unsupported cipher algorithm %d, mode %d\n",
+						c_req->alg, c_req->mode);
+			qce_free_req_info(pce_dev, req_info, false);
+			return -EINVAL;
+		}
+		rc = _ce_setup_cipher(pce_dev, c_req, areq->nbytes, 0,
+							cmdlistinfo);
+	} else {
+		rc = _ce_setup_cipher_direct(pce_dev, c_req, areq->nbytes, 0);
+	}
+	if (rc < 0)
+		goto bad;
+
+	preq_info->mode = c_req->mode;
+
+	/* setup for client callback, and issue command to BAM */
+	preq_info->areq = areq;
+	preq_info->qce_cb = c_req->qce_cb;
+
+	/* setup xfer type for producer callback handling */
+	preq_info->xfer_type = QCE_XFER_CIPHERING;
+	preq_info->req_len = areq->nbytes;
+
+	_qce_sps_iovec_count_init(pce_dev, req_info);
+	if (pce_dev->support_cmd_dscr)
+		_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+					&pce_sps_data->in_transfer);
+	if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
+					&pce_sps_data->in_transfer))
+		goto bad;
+	_qce_set_flag(&pce_sps_data->in_transfer,
+				SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+	if (pce_dev->no_get_around)
+		_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+			&pce_sps_data->cmdlistptr.unlock_all_pipes,
+			&pce_sps_data->in_transfer);
+
+	if (_qce_sps_add_sg_data(pce_dev, areq->dst, areq->nbytes,
+					&pce_sps_data->out_transfer))
+		goto bad;
+	if (pce_dev->no_get_around || areq->nbytes <= SPS_MAX_PKT_SIZE) {
+		pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+		if (_qce_sps_add_data(
+				GET_PHYS_ADDR(pce_sps_data->result_dump),
+				CRYPTO_RESULT_DUMP_SIZE,
+				&pce_sps_data->out_transfer))
+			goto bad;
+	} else {
+		pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
+	}
+
+	select_mode(pce_dev, preq_info);
+	rc = _qce_sps_transfer(pce_dev, req_info);
+	cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+	if (rc)
+		goto bad;
+
+	return 0;
+bad:
+	if (areq->src != areq->dst) {
+		if (preq_info->dst_nents) {
+			qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
+			preq_info->dst_nents, DMA_FROM_DEVICE);
+		}
+	}
+	if (preq_info->src_nents) {
+		qce_dma_unmap_sg(pce_dev->pdev, areq->src,
+				preq_info->src_nents,
+				(areq->src == areq->dst) ?
+				DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+	}
+	qce_free_req_info(pce_dev, req_info, false);
+	return rc;
+}
+EXPORT_SYMBOL(qce_ablk_cipher_req);
+
+int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	int rc;
+
+	struct ahash_request *areq;
+	struct qce_cmdlist_info *cmdlistinfo = NULL;
+	int req_info = -1;
+	struct ce_sps_data *pce_sps_data;
+	struct ce_request_info *preq_info;
+	bool is_dummy = false;
+
+	if (!sreq) {
+		sreq = &(pce_dev->dummyreq.sreq);
+		req_info = DUMMY_REQ_INDEX;
+		is_dummy = true;
+	} else {
+		req_info = qce_alloc_req_info(pce_dev);
+		if (req_info < 0)
+			return -EBUSY;
+	}
+
+	areq = (struct ahash_request *)sreq->areq;
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+
+	preq_info->src_nents = count_sg(sreq->src, sreq->size);
+	qce_dma_map_sg(pce_dev->pdev, sreq->src, preq_info->src_nents,
+							DMA_TO_DEVICE);
+
+	if (pce_dev->support_cmd_dscr) {
+		cmdlistinfo = _ce_get_hash_cmdlistinfo(pce_dev, req_info, sreq);
+		if (cmdlistinfo == NULL) {
+			pr_err("Unsupported hash algorithm %d\n", sreq->alg);
+			qce_free_req_info(pce_dev, req_info, false);
+			return -EINVAL;
+		}
+		rc = _ce_setup_hash(pce_dev, sreq, cmdlistinfo);
+	} else {
+		rc = _ce_setup_hash_direct(pce_dev, sreq);
+	}
+	if (rc < 0)
+		goto bad;
+
+	preq_info->areq = areq;
+	preq_info->qce_cb = sreq->qce_cb;
+
+	/* setup xfer type for producer callback handling */
+	preq_info->xfer_type = QCE_XFER_HASHING;
+	preq_info->req_len = sreq->size;
+
+	_qce_sps_iovec_count_init(pce_dev, req_info);
+
+	if (pce_dev->support_cmd_dscr)
+		_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+					&pce_sps_data->in_transfer);
+	if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
+						 &pce_sps_data->in_transfer))
+		goto bad;
+
+	/* always ensure there is input data. ZLT does not work for bam-ndp */
+	if (!areq->nbytes)
+		_qce_sps_add_data(
+			GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+			pce_dev->ce_bam_info.ce_burst_size,
+			&pce_sps_data->in_transfer);
+	_qce_set_flag(&pce_sps_data->in_transfer,
+					SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+	if (pce_dev->no_get_around)
+		_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+			&pce_sps_data->cmdlistptr.unlock_all_pipes,
+			&pce_sps_data->in_transfer);
+
+	if (_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_sps_data->out_transfer))
+		goto bad;
+
+	if (is_dummy) {
+		_qce_set_flag(&pce_sps_data->out_transfer, SPS_IOVEC_FLAG_INT);
+		rc = _qce_sps_transfer(pce_dev, req_info);
+	} else {
+		select_mode(pce_dev, preq_info);
+		rc = _qce_sps_transfer(pce_dev, req_info);
+		cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+	}
+	if (rc)
+		goto bad;
+	return 0;
+bad:
+	if (preq_info->src_nents) {
+		qce_dma_unmap_sg(pce_dev->pdev, sreq->src,
+				preq_info->src_nents, DMA_TO_DEVICE);
+	}
+	qce_free_req_info(pce_dev, req_info, false);
+	return rc;
+}
+EXPORT_SYMBOL(qce_process_sha_req);
+
+int qce_f8_req(void *handle, struct qce_f8_req *req,
+			void *cookie, qce_comp_func_ptr_t qce_cb)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	bool key_stream_mode;
+	dma_addr_t dst;
+	int rc;
+	struct qce_cmdlist_info *cmdlistinfo;
+	int req_info = -1;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+
+	req_info = qce_alloc_req_info(pce_dev);
+	if (req_info < 0)
+		return -EBUSY;
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+
+	switch (req->algorithm) {
+	case QCE_OTA_ALGO_KASUMI:
+		cmdlistinfo = &pce_sps_data->cmdlistptr.f8_kasumi;
+		break;
+	case QCE_OTA_ALGO_SNOW3G:
+		cmdlistinfo = &pce_sps_data->cmdlistptr.f8_snow3g;
+		break;
+	default:
+		qce_free_req_info(pce_dev, req_info, false);
+		return -EINVAL;
+	};
+
+	key_stream_mode = (req->data_in == NULL);
+
+	/* don't support key stream mode */
+
+	if (key_stream_mode || (req->bearer >= QCE_OTA_MAX_BEARER)) {
+		qce_free_req_info(pce_dev, req_info, false);
+		return -EINVAL;
+	}
+
+	/* F8 cipher input       */
+	preq_info->phy_ota_src = dma_map_single(pce_dev->pdev,
+					req->data_in, req->data_len,
+					(req->data_in == req->data_out) ?
+					DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+
+	/* F8 cipher output     */
+	if (req->data_in != req->data_out) {
+		dst = dma_map_single(pce_dev->pdev, req->data_out,
+				req->data_len, DMA_FROM_DEVICE);
+		preq_info->phy_ota_dst = dst;
+	} else {
+		/* in place ciphering */
+		dst = preq_info->phy_ota_src;
+		preq_info->phy_ota_dst = 0;
+	}
+	preq_info->ota_size = req->data_len;
+
+
+	/* set up crypto device */
+	if (pce_dev->support_cmd_dscr)
+		rc = _ce_f8_setup(pce_dev, req, key_stream_mode, 1, 0,
+				 req->data_len, cmdlistinfo);
+	else
+		rc = _ce_f8_setup_direct(pce_dev, req, key_stream_mode, 1, 0,
+				 req->data_len);
+	if (rc < 0)
+		goto bad;
+
+	/* setup for callback, and issue command to sps */
+	preq_info->areq = cookie;
+	preq_info->qce_cb = qce_cb;
+
+	/* setup xfer type for producer callback handling */
+	preq_info->xfer_type = QCE_XFER_F8;
+	preq_info->req_len = req->data_len;
+
+	_qce_sps_iovec_count_init(pce_dev, req_info);
+
+	if (pce_dev->support_cmd_dscr)
+		_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+					&pce_sps_data->in_transfer);
+
+	_qce_sps_add_data((uint32_t)preq_info->phy_ota_src, req->data_len,
+					&pce_sps_data->in_transfer);
+
+	_qce_set_flag(&pce_sps_data->in_transfer,
+			SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+	_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+			&pce_sps_data->cmdlistptr.unlock_all_pipes,
+					&pce_sps_data->in_transfer);
+
+	_qce_sps_add_data((uint32_t)dst, req->data_len,
+					&pce_sps_data->out_transfer);
+
+	_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_sps_data->out_transfer);
+
+	select_mode(pce_dev, preq_info);
+	rc = _qce_sps_transfer(pce_dev, req_info);
+	cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+	if (rc)
+		goto bad;
+	return 0;
+bad:
+	if (preq_info->phy_ota_dst != 0)
+		dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst,
+				req->data_len, DMA_FROM_DEVICE);
+	if (preq_info->phy_ota_src != 0)
+		dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
+				req->data_len,
+				(req->data_in == req->data_out) ?
+					DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+	qce_free_req_info(pce_dev, req_info, false);
+	return rc;
+}
+EXPORT_SYMBOL(qce_f8_req);
+
+int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq,
+			void *cookie, qce_comp_func_ptr_t qce_cb)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	uint16_t num_pkt = mreq->num_pkt;
+	uint16_t cipher_start = mreq->cipher_start;
+	uint16_t cipher_size = mreq->cipher_size;
+	struct qce_f8_req *req = &mreq->qce_f8_req;
+	uint32_t total;
+	dma_addr_t dst = 0;
+	int rc = 0;
+	struct qce_cmdlist_info *cmdlistinfo;
+	int req_info = -1;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+
+	req_info = qce_alloc_req_info(pce_dev);
+	if (req_info < 0)
+		return -EBUSY;
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+
+	switch (req->algorithm) {
+	case QCE_OTA_ALGO_KASUMI:
+		cmdlistinfo = &pce_sps_data->cmdlistptr.f8_kasumi;
+		break;
+	case QCE_OTA_ALGO_SNOW3G:
+		cmdlistinfo = &pce_sps_data->cmdlistptr.f8_snow3g;
+		break;
+	default:
+		qce_free_req_info(pce_dev, req_info, false);
+		return -EINVAL;
+	};
+
+	total = num_pkt *  req->data_len;
+
+	/* F8 cipher input       */
+	preq_info->phy_ota_src = dma_map_single(pce_dev->pdev,
+				req->data_in, total,
+				(req->data_in == req->data_out) ?
+				DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+
+	/* F8 cipher output      */
+	if (req->data_in != req->data_out) {
+		dst = dma_map_single(pce_dev->pdev, req->data_out, total,
+						DMA_FROM_DEVICE);
+		preq_info->phy_ota_dst = dst;
+	} else {
+		/* in place ciphering */
+		dst = preq_info->phy_ota_src;
+		preq_info->phy_ota_dst = 0;
+	}
+
+	preq_info->ota_size = total;
+
+	/* set up crypto device */
+	if (pce_dev->support_cmd_dscr)
+		rc = _ce_f8_setup(pce_dev, req, false, num_pkt, cipher_start,
+			cipher_size, cmdlistinfo);
+	else
+		rc = _ce_f8_setup_direct(pce_dev, req, false, num_pkt,
+			cipher_start, cipher_size);
+	if (rc)
+		goto bad;
+
+	/* setup for callback, and issue command to sps */
+	preq_info->areq = cookie;
+	preq_info->qce_cb = qce_cb;
+
+	/* setup xfer type for producer callback handling */
+	preq_info->xfer_type = QCE_XFER_F8;
+	preq_info->req_len = total;
+
+	_qce_sps_iovec_count_init(pce_dev, req_info);
+
+	if (pce_dev->support_cmd_dscr)
+		_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+					&pce_sps_data->in_transfer);
+
+	_qce_sps_add_data((uint32_t)preq_info->phy_ota_src, total,
+					&pce_sps_data->in_transfer);
+	_qce_set_flag(&pce_sps_data->in_transfer,
+				SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+	_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+			&pce_sps_data->cmdlistptr.unlock_all_pipes,
+					&pce_sps_data->in_transfer);
+
+	_qce_sps_add_data((uint32_t)dst, total,
+					&pce_sps_data->out_transfer);
+
+	_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_sps_data->out_transfer);
+
+	select_mode(pce_dev, preq_info);
+	rc = _qce_sps_transfer(pce_dev, req_info);
+	cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+
+	if (rc == 0)
+		return 0;
+bad:
+	if (preq_info->phy_ota_dst)
+		dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst, total,
+				DMA_FROM_DEVICE);
+	dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src, total,
+				(req->data_in == req->data_out) ?
+				DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+	qce_free_req_info(pce_dev, req_info, false);
+	return rc;
+}
+EXPORT_SYMBOL(qce_f8_multi_pkt_req);
+
+int qce_f9_req(void *handle, struct qce_f9_req *req, void *cookie,
+			qce_comp_func_ptr_t qce_cb)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	int rc;
+	struct qce_cmdlist_info *cmdlistinfo;
+	int req_info = -1;
+	struct ce_sps_data *pce_sps_data;
+	struct ce_request_info *preq_info;
+
+	req_info = qce_alloc_req_info(pce_dev);
+	if (req_info < 0)
+		return -EBUSY;
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	switch (req->algorithm) {
+	case QCE_OTA_ALGO_KASUMI:
+		cmdlistinfo = &pce_sps_data->cmdlistptr.f9_kasumi;
+		break;
+	case QCE_OTA_ALGO_SNOW3G:
+		cmdlistinfo = &pce_sps_data->cmdlistptr.f9_snow3g;
+		break;
+	default:
+		qce_free_req_info(pce_dev, req_info, false);
+		return -EINVAL;
+	};
+
+	preq_info->phy_ota_src = dma_map_single(pce_dev->pdev, req->message,
+			req->msize, DMA_TO_DEVICE);
+
+	preq_info->ota_size = req->msize;
+
+	if (pce_dev->support_cmd_dscr)
+		rc = _ce_f9_setup(pce_dev, req, cmdlistinfo);
+	else
+		rc = _ce_f9_setup_direct(pce_dev, req);
+	if (rc < 0)
+		goto bad;
+
+	/* setup for callback, and issue command to sps */
+	preq_info->areq = cookie;
+	preq_info->qce_cb = qce_cb;
+
+	/* setup xfer type for producer callback handling */
+	preq_info->xfer_type = QCE_XFER_F9;
+	preq_info->req_len = req->msize;
+
+	_qce_sps_iovec_count_init(pce_dev, req_info);
+	if (pce_dev->support_cmd_dscr)
+		_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+					&pce_sps_data->in_transfer);
+	_qce_sps_add_data((uint32_t)preq_info->phy_ota_src, req->msize,
+					&pce_sps_data->in_transfer);
+	_qce_set_flag(&pce_sps_data->in_transfer,
+				SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+	_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+			&pce_sps_data->cmdlistptr.unlock_all_pipes,
+					&pce_sps_data->in_transfer);
+
+	_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_sps_data->out_transfer);
+
+	select_mode(pce_dev, preq_info);
+	rc = _qce_sps_transfer(pce_dev, req_info);
+	cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+	if (rc)
+		goto bad;
+	return 0;
+bad:
+	dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
+				req->msize, DMA_TO_DEVICE);
+	qce_free_req_info(pce_dev, req_info, false);
+	return rc;
+}
+EXPORT_SYMBOL(qce_f9_req);
+
+static int __qce_get_device_tree_data(struct platform_device *pdev,
+		struct qce_device *pce_dev)
+{
+	struct resource *resource;
+	int rc = 0;
+
+	pce_dev->is_shared = of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,ce-hw-shared");
+	pce_dev->support_hw_key = of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,ce-hw-key");
+
+	pce_dev->use_sw_aes_cbc_ecb_ctr_algo =
+				of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,use-sw-aes-cbc-ecb-ctr-algo");
+	pce_dev->use_sw_aead_algo =
+				of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,use-sw-aead-algo");
+	pce_dev->use_sw_aes_xts_algo =
+				of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,use-sw-aes-xts-algo");
+	pce_dev->use_sw_ahash_algo =
+				of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,use-sw-ahash-algo");
+	pce_dev->use_sw_hmac_algo =
+				of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,use-sw-hmac-algo");
+	pce_dev->use_sw_aes_ccm_algo =
+				of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,use-sw-aes-ccm-algo");
+	pce_dev->support_clk_mgmt_sus_res = of_property_read_bool(
+		(&pdev->dev)->of_node, "qcom,clk-mgmt-sus-res");
+	pce_dev->support_only_core_src_clk = of_property_read_bool(
+		(&pdev->dev)->of_node, "qcom,support-core-clk-only");
+
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,bam-pipe-pair",
+				&pce_dev->ce_bam_info.pipe_pair_index)) {
+		pr_err("Fail to get bam pipe pair information.\n");
+		return -EINVAL;
+	}
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,ce-device",
+				&pce_dev->ce_bam_info.ce_device)) {
+		pr_err("Fail to get CE device information.\n");
+		return -EINVAL;
+	}
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,ce-hw-instance",
+				&pce_dev->ce_bam_info.ce_hw_instance)) {
+		pr_err("Fail to get CE hw instance information.\n");
+		return -EINVAL;
+	}
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,bam-ee",
+				&pce_dev->ce_bam_info.bam_ee)) {
+		pr_info("BAM Apps EE is not defined, setting to default 1\n");
+		pce_dev->ce_bam_info.bam_ee = 1;
+	}
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,ce-opp-freq",
+				&pce_dev->ce_opp_freq_hz)) {
+		pr_info("CE operating frequency is not defined, setting to default 100MHZ\n");
+		pce_dev->ce_opp_freq_hz = CE_CLK_100MHZ;
+	}
+	pce_dev->ce_bam_info.dest_pipe_index	=
+			2 * pce_dev->ce_bam_info.pipe_pair_index;
+	pce_dev->ce_bam_info.src_pipe_index	=
+			pce_dev->ce_bam_info.dest_pipe_index + 1;
+
+	resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"crypto-base");
+	if (resource) {
+		pce_dev->phy_iobase = resource->start;
+		pce_dev->iobase = ioremap_nocache(resource->start,
+					resource_size(resource));
+		if (!pce_dev->iobase) {
+			pr_err("Can not map CRYPTO io memory\n");
+			return -ENOMEM;
+		}
+	} else {
+		pr_err("CRYPTO HW mem unavailable.\n");
+		return -ENODEV;
+	}
+
+	resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"crypto-bam-base");
+	if (resource) {
+		pce_dev->bam_mem = resource->start;
+		pce_dev->bam_mem_size = resource_size(resource);
+	} else {
+		pr_err("CRYPTO BAM mem unavailable.\n");
+		rc = -ENODEV;
+		goto err_getting_bam_info;
+	}
+
+	resource  = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (resource) {
+		pce_dev->ce_bam_info.bam_irq = resource->start;
+	} else {
+		pr_err("CRYPTO BAM IRQ unavailable.\n");
+		goto err_dev;
+	}
+	return rc;
+err_dev:
+	if (pce_dev->ce_bam_info.bam_iobase)
+		iounmap(pce_dev->ce_bam_info.bam_iobase);
+
+err_getting_bam_info:
+	if (pce_dev->iobase)
+		iounmap(pce_dev->iobase);
+
+	return rc;
+}
+
+static int __qce_init_clk(struct qce_device *pce_dev)
+{
+	int rc = 0;
+
+	pce_dev->ce_core_src_clk = clk_get(pce_dev->pdev, "core_clk_src");
+	if (!IS_ERR(pce_dev->ce_core_src_clk)) {
+		rc = clk_set_rate(pce_dev->ce_core_src_clk,
+						pce_dev->ce_opp_freq_hz);
+		if (rc) {
+			pr_err("Unable to set the core src clk @%uMhz.\n",
+					pce_dev->ce_opp_freq_hz/CE_CLK_DIV);
+			goto exit_put_core_src_clk;
+		}
+	} else {
+		if (pce_dev->support_only_core_src_clk) {
+			rc = PTR_ERR(pce_dev->ce_core_src_clk);
+			pce_dev->ce_core_src_clk = NULL;
+			pr_err("Unable to get CE core src clk\n");
+			return rc;
+		} else {
+			pr_warn("Unable to get CE core src clk, set to NULL\n");
+			pce_dev->ce_core_src_clk = NULL;
+		}
+	}
+
+	if (pce_dev->support_only_core_src_clk) {
+		pce_dev->ce_core_clk = NULL;
+		pce_dev->ce_clk = NULL;
+		pce_dev->ce_bus_clk = NULL;
+	} else {
+		pce_dev->ce_core_clk = clk_get(pce_dev->pdev, "core_clk");
+		if (IS_ERR(pce_dev->ce_core_clk)) {
+			rc = PTR_ERR(pce_dev->ce_core_clk);
+			pr_err("Unable to get CE core clk\n");
+			goto exit_put_core_src_clk;
+		}
+		pce_dev->ce_clk = clk_get(pce_dev->pdev, "iface_clk");
+		if (IS_ERR(pce_dev->ce_clk)) {
+			rc = PTR_ERR(pce_dev->ce_clk);
+			pr_err("Unable to get CE interface clk\n");
+			goto exit_put_core_clk;
+		}
+
+		pce_dev->ce_bus_clk = clk_get(pce_dev->pdev, "bus_clk");
+		if (IS_ERR(pce_dev->ce_bus_clk)) {
+			rc = PTR_ERR(pce_dev->ce_bus_clk);
+			pr_err("Unable to get CE BUS interface clk\n");
+			goto exit_put_iface_clk;
+		}
+	}
+	return rc;
+
+exit_put_iface_clk:
+	if (pce_dev->ce_clk)
+		clk_put(pce_dev->ce_clk);
+exit_put_core_clk:
+	if (pce_dev->ce_core_clk)
+		clk_put(pce_dev->ce_core_clk);
+exit_put_core_src_clk:
+	if (pce_dev->ce_core_src_clk)
+		clk_put(pce_dev->ce_core_src_clk);
+	pr_err("Unable to init CE clks, rc = %d\n", rc);
+	return rc;
+}
+
+static void __qce_deinit_clk(struct qce_device *pce_dev)
+{
+	if (pce_dev->ce_bus_clk)
+		clk_put(pce_dev->ce_bus_clk);
+	if (pce_dev->ce_clk)
+		clk_put(pce_dev->ce_clk);
+	if (pce_dev->ce_core_clk)
+		clk_put(pce_dev->ce_core_clk);
+	if (pce_dev->ce_core_src_clk)
+		clk_put(pce_dev->ce_core_src_clk);
+}
+
+int qce_enable_clk(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *)handle;
+	int rc = 0;
+
+	if (pce_dev->ce_core_src_clk) {
+		rc = clk_prepare_enable(pce_dev->ce_core_src_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE core src clk\n");
+			return rc;
+		}
+	}
+
+	if (pce_dev->support_only_core_src_clk)
+		return rc;
+
+	if (pce_dev->ce_core_clk) {
+		rc = clk_prepare_enable(pce_dev->ce_core_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE core clk\n");
+			goto exit_disable_core_src_clk;
+		}
+	}
+
+	if (pce_dev->ce_clk) {
+		rc = clk_prepare_enable(pce_dev->ce_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE iface clk\n");
+			goto exit_disable_core_clk;
+		}
+	}
+
+	if (pce_dev->ce_bus_clk) {
+		rc = clk_prepare_enable(pce_dev->ce_bus_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE BUS clk\n");
+			goto exit_disable_ce_clk;
+		}
+	}
+	return rc;
+
+exit_disable_ce_clk:
+	if (pce_dev->ce_clk)
+		clk_disable_unprepare(pce_dev->ce_clk);
+exit_disable_core_clk:
+	if (pce_dev->ce_core_clk)
+		clk_disable_unprepare(pce_dev->ce_core_clk);
+exit_disable_core_src_clk:
+	if (pce_dev->ce_core_src_clk)
+		clk_disable_unprepare(pce_dev->ce_core_src_clk);
+	return rc;
+}
+EXPORT_SYMBOL(qce_enable_clk);
+
+int qce_disable_clk(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	int rc = 0;
+
+	if (pce_dev->ce_bus_clk)
+		clk_disable_unprepare(pce_dev->ce_bus_clk);
+	if (pce_dev->ce_clk)
+		clk_disable_unprepare(pce_dev->ce_clk);
+	if (pce_dev->ce_core_clk)
+		clk_disable_unprepare(pce_dev->ce_core_clk);
+	if (pce_dev->ce_core_src_clk)
+		clk_disable_unprepare(pce_dev->ce_core_src_clk);
+
+	return rc;
+}
+EXPORT_SYMBOL(qce_disable_clk);
+
+/* dummy req setup */
+static int setup_dummy_req(struct qce_device *pce_dev)
+{
+	char *input =
+	"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopqopqrpqrs";
+	int len = DUMMY_REQ_DATA_LEN;
+
+	memcpy(pce_dev->dummyreq_in_buf, input, len);
+	sg_init_table(&pce_dev->dummyreq.sg, 1);
+	sg_set_buf(&pce_dev->dummyreq.sg, pce_dev->dummyreq_in_buf, len);
+	sg_mark_end(&pce_dev->dummyreq.sg);
+
+	pce_dev->dummyreq.sreq.alg = QCE_HASH_SHA1;
+	pce_dev->dummyreq.sreq.qce_cb = qce_dummy_complete;
+	pce_dev->dummyreq.sreq.src = &pce_dev->dummyreq.sg;
+	pce_dev->dummyreq.sreq.auth_data[0] = 0;
+	pce_dev->dummyreq.sreq.auth_data[1] = 0;
+	pce_dev->dummyreq.sreq.auth_data[2] = 0;
+	pce_dev->dummyreq.sreq.auth_data[3] = 0;
+	pce_dev->dummyreq.sreq.first_blk = 1;
+	pce_dev->dummyreq.sreq.last_blk = 1;
+	pce_dev->dummyreq.sreq.size = len;
+	pce_dev->dummyreq.sreq.areq = &pce_dev->dummyreq.areq;
+	pce_dev->dummyreq.sreq.flags = 0;
+	pce_dev->dummyreq.sreq.authkey = NULL;
+
+	pce_dev->dummyreq.areq.src = pce_dev->dummyreq.sreq.src;
+	pce_dev->dummyreq.areq.nbytes = pce_dev->dummyreq.sreq.size;
+
+	return 0;
+}
+
+/* crypto engine open function. */
+void *qce_open(struct platform_device *pdev, int *rc)
+{
+	struct qce_device *pce_dev;
+	int i;
+	static int pcedev_no = 1;
+
+	pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
+	if (!pce_dev) {
+		*rc = -ENOMEM;
+		pr_err("Can not allocate memory: %d\n", *rc);
+		return NULL;
+	}
+	pce_dev->pdev = &pdev->dev;
+
+	mutex_lock(&qce_iomap_mutex);
+	if (pdev->dev.of_node) {
+		*rc = __qce_get_device_tree_data(pdev, pce_dev);
+		if (*rc)
+			goto err_pce_dev;
+	} else {
+		*rc = -EINVAL;
+		pr_err("Device Node not found.\n");
+		goto err_pce_dev;
+	}
+
+	for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++)
+		atomic_set(&pce_dev->ce_request_info[i].in_use, false);
+	pce_dev->ce_request_index = 0;
+
+	pce_dev->memsize = 10 * PAGE_SIZE * MAX_QCE_ALLOC_BAM_REQ;
+	pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
+			pce_dev->memsize, &pce_dev->coh_pmem, GFP_KERNEL);
+
+	if (pce_dev->coh_vmem == NULL) {
+		*rc = -ENOMEM;
+		pr_err("Can not allocate coherent memory for sps data\n");
+		goto err_iobase;
+	}
+
+	pce_dev->iovec_memsize = TOTAL_IOVEC_SPACE_PER_PIPE *
+						MAX_QCE_ALLOC_BAM_REQ * 2;
+	pce_dev->iovec_vmem = kzalloc(pce_dev->iovec_memsize, GFP_KERNEL);
+	if (pce_dev->iovec_vmem == NULL)
+		goto err_mem;
+
+	pce_dev->dummyreq_in_buf = kzalloc(DUMMY_REQ_DATA_LEN, GFP_KERNEL);
+	if (pce_dev->dummyreq_in_buf == NULL)
+		goto err_mem;
+
+	*rc = __qce_init_clk(pce_dev);
+	if (*rc)
+		goto err_mem;
+	*rc = qce_enable_clk(pce_dev);
+	if (*rc)
+		goto err_enable_clk;
+
+	if (_probe_ce_engine(pce_dev)) {
+		*rc = -ENXIO;
+		goto err;
+	}
+	*rc = 0;
+
+	qce_init_ce_cfg_val(pce_dev);
+	*rc  = qce_sps_init(pce_dev);
+	if (*rc)
+		goto err;
+	qce_setup_ce_sps_data(pce_dev);
+	qce_disable_clk(pce_dev);
+	setup_dummy_req(pce_dev);
+	atomic_set(&pce_dev->no_of_queued_req, 0);
+	pce_dev->mode = IN_INTERRUPT_MODE;
+	init_timer(&(pce_dev->timer));
+	pce_dev->timer.function = qce_multireq_timeout;
+	pce_dev->timer.data = (unsigned long)pce_dev;
+	pce_dev->timer.expires = jiffies + DELAY_IN_JIFFIES;
+	pce_dev->intr_cadence = 0;
+	pce_dev->dev_no = pcedev_no;
+	pcedev_no++;
+	pce_dev->owner = QCE_OWNER_NONE;
+	mutex_unlock(&qce_iomap_mutex);
+	return pce_dev;
+err:
+	qce_disable_clk(pce_dev);
+
+err_enable_clk:
+	__qce_deinit_clk(pce_dev);
+
+err_mem:
+	kfree(pce_dev->dummyreq_in_buf);
+	kfree(pce_dev->iovec_vmem);
+	if (pce_dev->coh_vmem)
+		dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
+			pce_dev->coh_vmem, pce_dev->coh_pmem);
+err_iobase:
+	if (pce_dev->iobase)
+		iounmap(pce_dev->iobase);
+err_pce_dev:
+	mutex_unlock(&qce_iomap_mutex);
+	kfree(pce_dev);
+	return NULL;
+}
+EXPORT_SYMBOL(qce_open);
+
+/* crypto engine close function. */
+int qce_close(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+
+	if (handle == NULL)
+		return -ENODEV;
+
+	mutex_lock(&qce_iomap_mutex);
+	qce_enable_clk(pce_dev);
+	qce_sps_exit(pce_dev);
+
+	if (pce_dev->iobase)
+		iounmap(pce_dev->iobase);
+	if (pce_dev->coh_vmem)
+		dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
+				pce_dev->coh_vmem, pce_dev->coh_pmem);
+	kfree(pce_dev->dummyreq_in_buf);
+	kfree(pce_dev->iovec_vmem);
+
+	qce_disable_clk(pce_dev);
+	__qce_deinit_clk(pce_dev);
+	mutex_unlock(&qce_iomap_mutex);
+	kfree(handle);
+
+	return 0;
+}
+EXPORT_SYMBOL(qce_close);
+
+#define OTA_SUPPORT_MASK (1 << CRYPTO_ENCR_SNOW3G_SEL |\
+				1 << CRYPTO_ENCR_KASUMI_SEL |\
+				1 << CRYPTO_AUTH_SNOW3G_SEL |\
+				1 << CRYPTO_AUTH_KASUMI_SEL)
+
+int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
+{
+	struct qce_device *pce_dev = (struct qce_device *)handle;
+
+	if (ce_support == NULL)
+		return -EINVAL;
+
+	ce_support->sha1_hmac_20 = false;
+	ce_support->sha1_hmac = false;
+	ce_support->sha256_hmac = false;
+	ce_support->sha_hmac = true;
+	ce_support->cmac  = true;
+	ce_support->aes_key_192 = false;
+	ce_support->aes_xts = true;
+	if ((pce_dev->engines_avail & OTA_SUPPORT_MASK) == OTA_SUPPORT_MASK)
+		ce_support->ota = true;
+	else
+		ce_support->ota = false;
+	ce_support->bam = true;
+	ce_support->is_shared = (pce_dev->is_shared == 1) ? true : false;
+	ce_support->hw_key = pce_dev->support_hw_key;
+	ce_support->aes_ccm = true;
+	ce_support->clk_mgmt_sus_res = pce_dev->support_clk_mgmt_sus_res;
+	if (pce_dev->ce_bam_info.minor_version)
+		ce_support->aligned_only = false;
+	else
+		ce_support->aligned_only = true;
+
+	ce_support->use_sw_aes_cbc_ecb_ctr_algo =
+				pce_dev->use_sw_aes_cbc_ecb_ctr_algo;
+	ce_support->use_sw_aead_algo =
+				pce_dev->use_sw_aead_algo;
+	ce_support->use_sw_aes_xts_algo =
+				pce_dev->use_sw_aes_xts_algo;
+	ce_support->use_sw_ahash_algo =
+				pce_dev->use_sw_ahash_algo;
+	ce_support->use_sw_hmac_algo =
+				pce_dev->use_sw_hmac_algo;
+	ce_support->use_sw_aes_ccm_algo =
+				pce_dev->use_sw_aes_ccm_algo;
+	ce_support->ce_device = pce_dev->ce_bam_info.ce_device;
+	ce_support->ce_hw_instance = pce_dev->ce_bam_info.ce_hw_instance;
+	if (pce_dev->no_get_around)
+		ce_support->max_request = MAX_QCE_BAM_REQ;
+	else
+		ce_support->max_request = 1;
+	return 0;
+}
+EXPORT_SYMBOL(qce_hw_support);
+
+void qce_dump_req(void *handle)
+{
+	int i;
+	bool req_in_use;
+	struct qce_device *pce_dev = (struct qce_device *)handle;
+
+	for (i = 0; i < MAX_QCE_BAM_REQ; i++) {
+		req_in_use = atomic_read(&pce_dev->ce_request_info[i].in_use);
+		pr_info("qce_dump_req %d %d\n", i, req_in_use);
+		if (req_in_use == true)
+			_qce_dump_descr_fifos(pce_dev, i);
+	}
+}
+EXPORT_SYMBOL(qce_dump_req);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Crypto Engine driver");
diff -Nruw linux-4.4.115-fbx/drivers/crypto/msm./qce50.h linux-4.4.115-fbx/drivers/crypto/msm/qce50.h
--- linux-4.4.115-fbx/drivers/crypto/msm./qce50.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/crypto/msm/qce50.h	2019-01-22 16:16:23.111242857 +0100
@@ -0,0 +1,245 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _DRIVERS_CRYPTO_MSM_QCE50_H_
+#define _DRIVERS_CRYPTO_MSM_QCE50_H_
+
+#include <linux/msm-sps.h>
+
+/* MAX Data xfer block size between BAM and CE */
+#define MAX_CE_BAM_BURST_SIZE   0x40
+#define QCEBAM_BURST_SIZE	MAX_CE_BAM_BURST_SIZE
+
+#define GET_VIRT_ADDR(x)  \
+		((uintptr_t)pce_dev->coh_vmem +			\
+		((uintptr_t)x - (uintptr_t)pce_dev->coh_pmem))
+#define GET_PHYS_ADDR(x)  \
+		(phys_addr_t)(((uintptr_t)pce_dev->coh_pmem +	\
+		((uintptr_t)x - (uintptr_t)pce_dev->coh_vmem)))
+
+#define CRYPTO_REG_SIZE 4
+#define NUM_OF_CRYPTO_AUTH_IV_REG 16
+#define NUM_OF_CRYPTO_CNTR_IV_REG 4
+#define NUM_OF_CRYPTO_AUTH_BYTE_COUNT_REG 4
+#define CRYPTO_TOTAL_REGISTERS_DUMPED   26
+#define CRYPTO_RESULT_DUMP_SIZE   \
+	ALIGN((CRYPTO_TOTAL_REGISTERS_DUMPED * CRYPTO_REG_SIZE), \
+	QCEBAM_BURST_SIZE)
+
+/* QCE max number of descriptor in a descriptor list */
+#define QCE_MAX_NUM_DESC    128
+#define SPS_MAX_PKT_SIZE  (32 * 1024  - 64)
+
+/* default bam ipc log level */
+#define QCE_BAM_DEFAULT_IPC_LOGLVL 2
+
+/* State of consumer/producer Pipe */
+enum qce_pipe_st_enum {
+	QCE_PIPE_STATE_IDLE = 0,
+	QCE_PIPE_STATE_IN_PROG = 1,
+	QCE_PIPE_STATE_COMP = 2,
+	QCE_PIPE_STATE_LAST
+};
+
+enum qce_xfer_type_enum {
+	QCE_XFER_HASHING,
+	QCE_XFER_CIPHERING,
+	QCE_XFER_AEAD,
+	QCE_XFER_F8,
+	QCE_XFER_F9,
+	QCE_XFER_TYPE_LAST
+};
+
+struct qce_sps_ep_conn_data {
+	struct sps_pipe			*pipe;
+	struct sps_connect		connect;
+	struct sps_register_event	event;
+};
+
+/* CE Result DUMP format*/
+struct ce_result_dump_format {
+	uint32_t auth_iv[NUM_OF_CRYPTO_AUTH_IV_REG];
+	uint32_t auth_byte_count[NUM_OF_CRYPTO_AUTH_BYTE_COUNT_REG];
+	uint32_t encr_cntr_iv[NUM_OF_CRYPTO_CNTR_IV_REG];
+	uint32_t status;
+	uint32_t status2;
+};
+
+struct qce_cmdlist_info {
+
+	unsigned long cmdlist;
+	struct sps_command_element *crypto_cfg;
+	struct sps_command_element *encr_seg_cfg;
+	struct sps_command_element *encr_seg_size;
+	struct sps_command_element *encr_seg_start;
+	struct sps_command_element *encr_key;
+	struct sps_command_element *encr_xts_key;
+	struct sps_command_element *encr_cntr_iv;
+	struct sps_command_element *encr_ccm_cntr_iv;
+	struct sps_command_element *encr_mask;
+	struct sps_command_element *encr_xts_du_size;
+
+	struct sps_command_element *auth_seg_cfg;
+	struct sps_command_element *auth_seg_size;
+	struct sps_command_element *auth_seg_start;
+	struct sps_command_element *auth_key;
+	struct sps_command_element *auth_iv;
+	struct sps_command_element *auth_nonce_info;
+	struct sps_command_element *auth_bytecount;
+	struct sps_command_element *seg_size;
+	struct sps_command_element *go_proc;
+	ptrdiff_t size;
+};
+
+struct qce_cmdlistptr_ops {
+	struct qce_cmdlist_info cipher_aes_128_cbc_ctr;
+	struct qce_cmdlist_info cipher_aes_256_cbc_ctr;
+	struct qce_cmdlist_info cipher_aes_128_ecb;
+	struct qce_cmdlist_info cipher_aes_256_ecb;
+	struct qce_cmdlist_info cipher_aes_128_xts;
+	struct qce_cmdlist_info cipher_aes_256_xts;
+	struct qce_cmdlist_info cipher_des_cbc;
+	struct qce_cmdlist_info cipher_des_ecb;
+	struct qce_cmdlist_info cipher_3des_cbc;
+	struct qce_cmdlist_info cipher_3des_ecb;
+	struct qce_cmdlist_info auth_sha1;
+	struct qce_cmdlist_info auth_sha256;
+	struct qce_cmdlist_info auth_sha1_hmac;
+	struct qce_cmdlist_info auth_sha256_hmac;
+	struct qce_cmdlist_info auth_aes_128_cmac;
+	struct qce_cmdlist_info auth_aes_256_cmac;
+	struct qce_cmdlist_info aead_hmac_sha1_cbc_aes_128;
+	struct qce_cmdlist_info aead_hmac_sha1_cbc_aes_256;
+	struct qce_cmdlist_info aead_hmac_sha1_cbc_des;
+	struct qce_cmdlist_info aead_hmac_sha1_cbc_3des;
+	struct qce_cmdlist_info aead_hmac_sha256_cbc_aes_128;
+	struct qce_cmdlist_info aead_hmac_sha256_cbc_aes_256;
+	struct qce_cmdlist_info aead_hmac_sha256_cbc_des;
+	struct qce_cmdlist_info aead_hmac_sha256_cbc_3des;
+	struct qce_cmdlist_info aead_aes_128_ccm;
+	struct qce_cmdlist_info aead_aes_256_ccm;
+	struct qce_cmdlist_info cipher_null;
+	struct qce_cmdlist_info f8_kasumi;
+	struct qce_cmdlist_info f8_snow3g;
+	struct qce_cmdlist_info f9_kasumi;
+	struct qce_cmdlist_info f9_snow3g;
+	struct qce_cmdlist_info unlock_all_pipes;
+};
+
+struct qce_ce_cfg_reg_setting {
+	uint32_t crypto_cfg_be;
+	uint32_t crypto_cfg_le;
+
+	uint32_t encr_cfg_aes_cbc_128;
+	uint32_t encr_cfg_aes_cbc_256;
+
+	uint32_t encr_cfg_aes_ecb_128;
+	uint32_t encr_cfg_aes_ecb_256;
+
+	uint32_t encr_cfg_aes_xts_128;
+	uint32_t encr_cfg_aes_xts_256;
+
+	uint32_t encr_cfg_aes_ctr_128;
+	uint32_t encr_cfg_aes_ctr_256;
+
+	uint32_t encr_cfg_aes_ccm_128;
+	uint32_t encr_cfg_aes_ccm_256;
+
+	uint32_t encr_cfg_des_cbc;
+	uint32_t encr_cfg_des_ecb;
+
+	uint32_t encr_cfg_3des_cbc;
+	uint32_t encr_cfg_3des_ecb;
+	uint32_t encr_cfg_kasumi;
+	uint32_t encr_cfg_snow3g;
+
+	uint32_t auth_cfg_cmac_128;
+	uint32_t auth_cfg_cmac_256;
+
+	uint32_t auth_cfg_sha1;
+	uint32_t auth_cfg_sha256;
+
+	uint32_t auth_cfg_hmac_sha1;
+	uint32_t auth_cfg_hmac_sha256;
+
+	uint32_t auth_cfg_aes_ccm_128;
+	uint32_t auth_cfg_aes_ccm_256;
+	uint32_t auth_cfg_aead_sha1_hmac;
+	uint32_t auth_cfg_aead_sha256_hmac;
+	uint32_t auth_cfg_kasumi;
+	uint32_t auth_cfg_snow3g;
+};
+
+struct ce_bam_info {
+	uint32_t			bam_irq;
+	uint32_t			bam_mem;
+	void __iomem			*bam_iobase;
+	uint32_t			ce_device;
+	uint32_t			ce_hw_instance;
+	uint32_t			bam_ee;
+	unsigned int			pipe_pair_index;
+	unsigned int			src_pipe_index;
+	unsigned int			dest_pipe_index;
+	unsigned long			bam_handle;
+	int				ce_burst_size;
+	uint32_t			minor_version;
+	struct qce_sps_ep_conn_data	producer;
+	struct qce_sps_ep_conn_data	consumer;
+};
+
+/* SPS data structure with buffers, commandlists & commmand pointer lists */
+struct ce_sps_data {
+	enum qce_pipe_st_enum producer_state;	/* Producer pipe state */
+	int consumer_status;		/* consumer pipe status */
+	int producer_status;		/* producer pipe status */
+	struct sps_transfer in_transfer;
+	struct sps_transfer out_transfer;
+	struct qce_cmdlistptr_ops cmdlistptr;
+	uint32_t result_dump; /* reuslt dump virtual address */
+	uint32_t result_dump_null;
+	uint32_t result_dump_phy; /* result dump physical address (32 bits) */
+	uint32_t result_dump_null_phy;
+
+	uint32_t ignore_buffer; /* ignore buffer virtual address */
+	struct ce_result_dump_format *result; /* ponter to result dump */
+	struct ce_result_dump_format *result_null;
+};
+
+struct ce_request_info {
+	atomic_t in_use;
+	bool in_prog;
+	enum qce_xfer_type_enum	xfer_type;
+	struct ce_sps_data ce_sps;
+	qce_comp_func_ptr_t qce_cb;	/* qce callback function pointer */
+	void *user;
+	void *areq;
+	int assoc_nents;
+	struct scatterlist *asg;        /* Formatted associated data sg  */
+	int src_nents;
+	int dst_nents;
+	dma_addr_t phy_iv_in;
+	unsigned char dec_iv[16];
+	int dir;
+	enum qce_cipher_mode_enum mode;
+	dma_addr_t phy_ota_src;
+	dma_addr_t phy_ota_dst;
+	unsigned int ota_size;
+	unsigned int req_len;
+};
+
+struct qce_driver_stats {
+	int no_of_timeouts;
+	int no_of_dummy_reqs;
+	int current_mode;
+	int outstanding_reqs;
+};
+
+#endif /* _DRIVERS_CRYPTO_MSM_QCE50_H */
diff -Nruw linux-4.4.115-fbx/drivers/crypto/msm./qcedev.c linux-4.4.115-fbx/drivers/crypto/msm/qcedev.c
--- linux-4.4.115-fbx/drivers/crypto/msm./qcedev.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/crypto/msm/qcedev.c	2019-10-29 09:26:23.505201828 +0100
@@ -0,0 +1,2109 @@
+/* Qualcomm CE device driver.
+ *
+ * Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/mman.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <linux/platform_data/qcom_crypto_device.h>
+#include <linux/msm-bus.h>
+#include <linux/qcedev.h>
+
+#include <crypto/hash.h>
+#include "qcedevi.h"
+#include "qce.h"
+
+#include <linux/compat.h>
+#include "compat_qcedev.h"
+
+#define CACHE_LINE_SIZE 32
+#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
+
+static uint8_t  _std_init_vector_sha1_uint8[] =   {
+	0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
+	0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
+	0xC3, 0xD2, 0xE1, 0xF0
+};
+/* standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint8_t _std_init_vector_sha256_uint8[] = {
+	0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
+	0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
+	0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
+	0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
+};
+
+static DEFINE_MUTEX(send_cmd_lock);
+static DEFINE_MUTEX(qcedev_sent_bw_req);
+static DEFINE_MUTEX(hash_access_lock);
+
+static void qcedev_ce_high_bw_req(struct qcedev_control *podev,
+							bool high_bw_req)
+{
+	int ret = 0;
+
+	mutex_lock(&qcedev_sent_bw_req);
+	if (high_bw_req) {
+		if (podev->high_bw_req_count == 0) {
+			ret = qce_enable_clk(podev->qce);
+			if (ret) {
+				pr_err("%s Unable enable clk\n", __func__);
+				mutex_unlock(&qcedev_sent_bw_req);
+				return;
+			}
+			ret = msm_bus_scale_client_update_request(
+					podev->bus_scale_handle, 1);
+			if (ret) {
+				pr_err("%s Unable to set to high bandwidth\n",
+							__func__);
+				ret = qce_disable_clk(podev->qce);
+				mutex_unlock(&qcedev_sent_bw_req);
+				return;
+			}
+		}
+		podev->high_bw_req_count++;
+	} else {
+		if (podev->high_bw_req_count == 1) {
+			ret = msm_bus_scale_client_update_request(
+					podev->bus_scale_handle, 0);
+			if (ret) {
+				pr_err("%s Unable to set to low bandwidth\n",
+							__func__);
+				mutex_unlock(&qcedev_sent_bw_req);
+				return;
+			}
+			ret = qce_disable_clk(podev->qce);
+			if (ret) {
+				pr_err("%s Unable disable clk\n", __func__);
+				ret = msm_bus_scale_client_update_request(
+					podev->bus_scale_handle, 1);
+				if (ret)
+					pr_err("%s Unable to set to high bandwidth\n",
+							__func__);
+				mutex_unlock(&qcedev_sent_bw_req);
+				return;
+			}
+		}
+		podev->high_bw_req_count--;
+	}
+	mutex_unlock(&qcedev_sent_bw_req);
+}
+
+#define QCEDEV_MAGIC 0x56434544 /* "qced" */
+
+static int qcedev_open(struct inode *inode, struct file *file);
+static int qcedev_release(struct inode *inode, struct file *file);
+static int start_cipher_req(struct qcedev_control *podev);
+static int start_sha_req(struct qcedev_control *podev);
+
+static const struct file_operations qcedev_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = qcedev_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = compat_qcedev_ioctl,
+#endif
+	.open = qcedev_open,
+	.release = qcedev_release,
+};
+
+static struct qcedev_control qce_dev[] = {
+	{
+		.miscdevice = {
+			.minor = MISC_DYNAMIC_MINOR,
+			.name = "qce",
+			.fops = &qcedev_fops,
+		},
+		.magic = QCEDEV_MAGIC,
+	},
+};
+
+#define MAX_QCE_DEVICE ARRAY_SIZE(qce_dev)
+#define DEBUG_MAX_FNAME  16
+#define DEBUG_MAX_RW_BUF 1024
+
+struct qcedev_stat {
+	u32 qcedev_dec_success;
+	u32 qcedev_dec_fail;
+	u32 qcedev_enc_success;
+	u32 qcedev_enc_fail;
+	u32 qcedev_sha_success;
+	u32 qcedev_sha_fail;
+};
+
+static struct qcedev_stat _qcedev_stat;
+static struct dentry *_debug_dent;
+static char _debug_read_buf[DEBUG_MAX_RW_BUF];
+static int _debug_qcedev;
+
+static struct qcedev_control *qcedev_minor_to_control(unsigned n)
+{
+	int i;
+
+	for (i = 0; i < MAX_QCE_DEVICE; i++) {
+		if (qce_dev[i].miscdevice.minor == n)
+			return &qce_dev[i];
+	}
+	return NULL;
+}
+
+static int qcedev_open(struct inode *inode, struct file *file)
+{
+	struct qcedev_handle *handle;
+	struct qcedev_control *podev;
+
+	podev = qcedev_minor_to_control(MINOR(inode->i_rdev));
+	if (podev == NULL) {
+		pr_err("%s: no such device %d\n", __func__,
+					MINOR(inode->i_rdev));
+		return -ENOENT;
+	}
+
+	handle = kzalloc(sizeof(struct qcedev_handle), GFP_KERNEL);
+	if (handle == NULL) {
+		pr_err("Failed to allocate memory %ld\n",
+					PTR_ERR(handle));
+		return -ENOMEM;
+	}
+
+	handle->cntl = podev;
+	file->private_data = handle;
+	if (podev->platform_support.bus_scale_table != NULL)
+		qcedev_ce_high_bw_req(podev, true);
+	return 0;
+}
+
+static int qcedev_release(struct inode *inode, struct file *file)
+{
+	struct qcedev_control *podev;
+	struct qcedev_handle *handle;
+
+	handle =  file->private_data;
+	podev =  handle->cntl;
+	if (podev != NULL && podev->magic != QCEDEV_MAGIC) {
+		pr_err("%s: invalid handle %pK\n",
+					__func__, podev);
+	}
+	kzfree(handle);
+	file->private_data = NULL;
+	if (podev != NULL && podev->platform_support.bus_scale_table != NULL)
+		qcedev_ce_high_bw_req(podev, false);
+	return 0;
+}
+
+static void req_done(unsigned long data)
+{
+	struct qcedev_control *podev = (struct qcedev_control *)data;
+	struct qcedev_async_req *areq;
+	unsigned long flags = 0;
+	struct qcedev_async_req *new_req = NULL;
+	int ret = 0;
+
+	spin_lock_irqsave(&podev->lock, flags);
+	areq = podev->active_command;
+	podev->active_command = NULL;
+
+again:
+	if (!list_empty(&podev->ready_commands)) {
+		new_req = container_of(podev->ready_commands.next,
+						struct qcedev_async_req, list);
+		list_del(&new_req->list);
+		podev->active_command = new_req;
+		new_req->err = 0;
+		if (new_req->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
+			ret = start_cipher_req(podev);
+		else
+			ret = start_sha_req(podev);
+	}
+
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	if (areq)
+		complete(&areq->complete);
+
+	if (new_req && ret) {
+		complete(&new_req->complete);
+		spin_lock_irqsave(&podev->lock, flags);
+		podev->active_command = NULL;
+		areq = NULL;
+		ret = 0;
+		new_req = NULL;
+		goto again;
+	}
+
+	return;
+}
+
+void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
+	unsigned char *authdata, int ret)
+{
+	struct qcedev_sha_req *areq;
+	struct qcedev_control *pdev;
+	struct qcedev_handle *handle;
+
+	uint32_t *auth32 = (uint32_t *)authdata;
+
+	areq = (struct qcedev_sha_req *) cookie;
+	handle = (struct qcedev_handle *) areq->cookie;
+	pdev = handle->cntl;
+
+	if (digest)
+		memcpy(&handle->sha_ctxt.digest[0], digest, 32);
+
+	if (authdata) {
+		handle->sha_ctxt.auth_data[0] = auth32[0];
+		handle->sha_ctxt.auth_data[1] = auth32[1];
+	}
+
+	tasklet_schedule(&pdev->done_tasklet);
+};
+
+
+void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
+	unsigned char *iv, int ret)
+{
+	struct qcedev_cipher_req *areq;
+	struct qcedev_handle *handle;
+	struct qcedev_control *podev;
+	struct qcedev_async_req *qcedev_areq;
+
+	areq = (struct qcedev_cipher_req *) cookie;
+	handle = (struct qcedev_handle *) areq->cookie;
+	podev = handle->cntl;
+	qcedev_areq = podev->active_command;
+
+	if (iv)
+		memcpy(&qcedev_areq->cipher_op_req.iv[0], iv,
+					qcedev_areq->cipher_op_req.ivlen);
+	tasklet_schedule(&podev->done_tasklet);
+};
+
+static int start_cipher_req(struct qcedev_control *podev)
+{
+	struct qcedev_async_req *qcedev_areq;
+	struct qce_req creq;
+	int ret = 0;
+
+	/* start the command on the podev->active_command */
+	qcedev_areq = podev->active_command;
+	qcedev_areq->cipher_req.cookie = qcedev_areq->handle;
+	if (qcedev_areq->cipher_op_req.use_pmem == QCEDEV_USE_PMEM) {
+		pr_err("%s: Use of PMEM is not supported\n", __func__);
+		goto unsupported;
+	}
+	creq.pmem = NULL;
+	switch (qcedev_areq->cipher_op_req.alg) {
+	case QCEDEV_ALG_DES:
+		creq.alg = CIPHER_ALG_DES;
+		break;
+	case QCEDEV_ALG_3DES:
+		creq.alg = CIPHER_ALG_3DES;
+		break;
+	case QCEDEV_ALG_AES:
+		creq.alg = CIPHER_ALG_AES;
+		break;
+	default:
+		return -EINVAL;
+	};
+
+	switch (qcedev_areq->cipher_op_req.mode) {
+	case QCEDEV_AES_MODE_CBC:
+	case QCEDEV_DES_MODE_CBC:
+		creq.mode = QCE_MODE_CBC;
+		break;
+	case QCEDEV_AES_MODE_ECB:
+	case QCEDEV_DES_MODE_ECB:
+		creq.mode = QCE_MODE_ECB;
+		break;
+	case QCEDEV_AES_MODE_CTR:
+		creq.mode = QCE_MODE_CTR;
+		break;
+	case QCEDEV_AES_MODE_XTS:
+		creq.mode = QCE_MODE_XTS;
+		break;
+	default:
+		return -EINVAL;
+	};
+
+	if ((creq.alg == CIPHER_ALG_AES) &&
+		(creq.mode == QCE_MODE_CTR)) {
+		creq.dir = QCE_ENCRYPT;
+	} else {
+		if (QCEDEV_OPER_ENC == qcedev_areq->cipher_op_req.op)
+			creq.dir = QCE_ENCRYPT;
+		else
+			creq.dir = QCE_DECRYPT;
+	}
+
+	creq.iv = &qcedev_areq->cipher_op_req.iv[0];
+	creq.ivsize = qcedev_areq->cipher_op_req.ivlen;
+
+	creq.enckey =  &qcedev_areq->cipher_op_req.enckey[0];
+	creq.encklen = qcedev_areq->cipher_op_req.encklen;
+
+	creq.cryptlen = qcedev_areq->cipher_op_req.data_len;
+
+	if (qcedev_areq->cipher_op_req.encklen == 0) {
+		if ((qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC_NO_KEY)
+			|| (qcedev_areq->cipher_op_req.op ==
+				QCEDEV_OPER_DEC_NO_KEY))
+			creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY;
+		else {
+			int i;
+
+			for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
+				if (qcedev_areq->cipher_op_req.enckey[i] != 0)
+					break;
+			}
+
+			if ((podev->platform_support.hw_key_support == 1) &&
+						(i == QCEDEV_MAX_KEY_SIZE))
+				creq.op = QCE_REQ_ABLK_CIPHER;
+			else {
+				ret = -EINVAL;
+				goto unsupported;
+			}
+		}
+	} else {
+		creq.op = QCE_REQ_ABLK_CIPHER;
+	}
+
+	creq.qce_cb = qcedev_cipher_req_cb;
+	creq.areq = (void *)&qcedev_areq->cipher_req;
+	creq.flags = 0;
+	ret = qce_ablk_cipher_req(podev->qce, &creq);
+unsupported:
+	if (ret)
+		qcedev_areq->err = -ENXIO;
+	else
+		qcedev_areq->err = 0;
+	return ret;
+};
+
+static int start_sha_req(struct qcedev_control *podev)
+{
+	struct qcedev_async_req *qcedev_areq;
+	struct qce_sha_req sreq;
+	int ret = 0;
+	struct qcedev_handle *handle;
+
+	/* start the command on the podev->active_command */
+	qcedev_areq = podev->active_command;
+	handle = qcedev_areq->handle;
+
+	switch (qcedev_areq->sha_op_req.alg) {
+	case QCEDEV_ALG_SHA1:
+		sreq.alg = QCE_HASH_SHA1;
+		break;
+	case QCEDEV_ALG_SHA256:
+		sreq.alg = QCE_HASH_SHA256;
+		break;
+	case QCEDEV_ALG_SHA1_HMAC:
+		if (podev->ce_support.sha_hmac) {
+			sreq.alg = QCE_HASH_SHA1_HMAC;
+			sreq.authkey = &handle->sha_ctxt.authkey[0];
+			sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
+
+		} else {
+			sreq.alg = QCE_HASH_SHA1;
+			sreq.authkey = NULL;
+		}
+		break;
+	case QCEDEV_ALG_SHA256_HMAC:
+		if (podev->ce_support.sha_hmac) {
+			sreq.alg = QCE_HASH_SHA256_HMAC;
+			sreq.authkey = &handle->sha_ctxt.authkey[0];
+			sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
+		} else {
+			sreq.alg = QCE_HASH_SHA256;
+			sreq.authkey = NULL;
+		}
+		break;
+	case QCEDEV_ALG_AES_CMAC:
+		sreq.alg = QCE_HASH_AES_CMAC;
+		sreq.authkey = &handle->sha_ctxt.authkey[0];
+		sreq.authklen = qcedev_areq->sha_op_req.authklen;
+		break;
+	default:
+		pr_err("Algorithm %d not supported, exiting\n",
+			qcedev_areq->sha_op_req.alg);
+		return -EINVAL;
+		break;
+	};
+
+	qcedev_areq->sha_req.cookie = handle;
+
+	sreq.qce_cb = qcedev_sha_req_cb;
+	if (qcedev_areq->sha_op_req.alg != QCEDEV_ALG_AES_CMAC) {
+		sreq.auth_data[0] = handle->sha_ctxt.auth_data[0];
+		sreq.auth_data[1] = handle->sha_ctxt.auth_data[1];
+		sreq.auth_data[2] = handle->sha_ctxt.auth_data[2];
+		sreq.auth_data[3] = handle->sha_ctxt.auth_data[3];
+		sreq.digest = &handle->sha_ctxt.digest[0];
+		sreq.first_blk = handle->sha_ctxt.first_blk;
+		sreq.last_blk = handle->sha_ctxt.last_blk;
+	}
+	sreq.size = qcedev_areq->sha_req.sreq.nbytes;
+	sreq.src = qcedev_areq->sha_req.sreq.src;
+	sreq.areq = (void *)&qcedev_areq->sha_req;
+	sreq.flags = 0;
+
+	ret = qce_process_sha_req(podev->qce, &sreq);
+
+	if (ret)
+		qcedev_areq->err = -ENXIO;
+	else
+		qcedev_areq->err = 0;
+	return ret;
+};
+
+static int submit_req(struct qcedev_async_req *qcedev_areq,
+					struct qcedev_handle *handle)
+{
+	struct qcedev_control *podev;
+	unsigned long flags = 0;
+	int ret = 0;
+	struct qcedev_stat *pstat;
+
+	qcedev_areq->err = 0;
+	podev = handle->cntl;
+
+	spin_lock_irqsave(&podev->lock, flags);
+
+	if (podev->active_command == NULL) {
+		podev->active_command = qcedev_areq;
+		if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
+			ret = start_cipher_req(podev);
+		else
+			ret = start_sha_req(podev);
+	} else {
+		list_add_tail(&qcedev_areq->list, &podev->ready_commands);
+	}
+
+	if (ret != 0)
+		podev->active_command = NULL;
+
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	if (ret == 0)
+		wait_for_completion(&qcedev_areq->complete);
+
+	if (ret)
+		qcedev_areq->err = -EIO;
+
+	pstat = &_qcedev_stat;
+	if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) {
+		switch (qcedev_areq->cipher_op_req.op) {
+		case QCEDEV_OPER_DEC:
+			if (qcedev_areq->err)
+				pstat->qcedev_dec_fail++;
+			else
+				pstat->qcedev_dec_success++;
+			break;
+		case QCEDEV_OPER_ENC:
+			if (qcedev_areq->err)
+				pstat->qcedev_enc_fail++;
+			else
+				pstat->qcedev_enc_success++;
+			break;
+		default:
+			break;
+		};
+	} else {
+		if (qcedev_areq->err)
+			pstat->qcedev_sha_fail++;
+		else
+			pstat->qcedev_sha_success++;
+	}
+
+	return qcedev_areq->err;
+}
+
+static int qcedev_sha_init(struct qcedev_async_req *areq,
+				struct qcedev_handle *handle)
+{
+	struct qcedev_sha_ctxt *sha_ctxt = &handle->sha_ctxt;
+
+	memset(sha_ctxt, 0, sizeof(struct qcedev_sha_ctxt));
+	sha_ctxt->first_blk = 1;
+
+	if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
+			(areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)) {
+		memcpy(&sha_ctxt->digest[0],
+			&_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
+		sha_ctxt->diglen = SHA1_DIGEST_SIZE;
+	} else {
+		if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA256) ||
+			(areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)) {
+			memcpy(&sha_ctxt->digest[0],
+					&_std_init_vector_sha256_uint8[0],
+					SHA256_DIGEST_SIZE);
+			sha_ctxt->diglen = SHA256_DIGEST_SIZE;
+		}
+	}
+	sha_ctxt->init_done = true;
+	return 0;
+}
+
+
+static int qcedev_sha_update_max_xfer(struct qcedev_async_req *qcedev_areq,
+				struct qcedev_handle *handle,
+				struct scatterlist *sg_src)
+{
+	int err = 0;
+	int i = 0;
+	uint32_t total;
+
+	uint8_t *user_src = NULL;
+	uint8_t *k_src = NULL;
+	uint8_t *k_buf_src = NULL;
+	uint8_t *k_align_src = NULL;
+
+	uint32_t sha_pad_len = 0;
+	uint32_t trailing_buf_len = 0;
+	uint32_t t_buf = handle->sha_ctxt.trailing_buf_len;
+	uint32_t sha_block_size;
+
+	total = qcedev_areq->sha_op_req.data_len + t_buf;
+
+	if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1)
+		sha_block_size = SHA1_BLOCK_SIZE;
+	else
+		sha_block_size = SHA256_BLOCK_SIZE;
+
+	if (total <= sha_block_size) {
+		uint32_t len =  qcedev_areq->sha_op_req.data_len;
+
+		i = 0;
+
+		k_src = &handle->sha_ctxt.trailing_buf[t_buf];
+
+		/* Copy data from user src(s) */
+		while (len > 0) {
+			user_src =
+			(void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
+			if (user_src && copy_from_user(k_src,
+				(void __user *)user_src,
+				qcedev_areq->sha_op_req.data[i].len))
+				return -EFAULT;
+
+			len -= qcedev_areq->sha_op_req.data[i].len;
+			k_src += qcedev_areq->sha_op_req.data[i].len;
+			i++;
+		}
+		handle->sha_ctxt.trailing_buf_len = total;
+
+		return 0;
+	}
+
+
+	k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
+				GFP_KERNEL);
+	if (k_buf_src == NULL) {
+		pr_err("%s: Can't Allocate memory: k_buf_src 0x%lx\n",
+					__func__, (uintptr_t)k_buf_src);
+		return -ENOMEM;
+	}
+
+	k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
+							CACHE_LINE_SIZE);
+	k_src = k_align_src;
+
+	/* check for trailing buffer from previous updates and append it */
+	if (t_buf > 0) {
+		memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
+								t_buf);
+		k_src += t_buf;
+	}
+
+	/* Copy data from user src(s) */
+	user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
+	if (user_src && copy_from_user(k_src,
+				(void __user *)user_src,
+				qcedev_areq->sha_op_req.data[0].len)) {
+		kzfree(k_buf_src);
+		return -EFAULT;
+	}
+	k_src += qcedev_areq->sha_op_req.data[0].len;
+	for (i = 1; i < qcedev_areq->sha_op_req.entries; i++) {
+		user_src = (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
+		if (user_src && copy_from_user(k_src,
+					(void __user *)user_src,
+					qcedev_areq->sha_op_req.data[i].len)) {
+			kzfree(k_buf_src);
+			return -EFAULT;
+		}
+		k_src += qcedev_areq->sha_op_req.data[i].len;
+	}
+
+	/*  get new trailing buffer */
+	sha_pad_len = ALIGN(total, CE_SHA_BLOCK_SIZE) - total;
+	trailing_buf_len =  CE_SHA_BLOCK_SIZE - sha_pad_len;
+
+	qcedev_areq->sha_req.sreq.src = sg_src;
+	sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src,
+						total-trailing_buf_len);
+	sg_mark_end(qcedev_areq->sha_req.sreq.src);
+
+	qcedev_areq->sha_req.sreq.nbytes = total - trailing_buf_len;
+
+	/*  update sha_ctxt trailing buf content to new trailing buf */
+	if (trailing_buf_len > 0) {
+		memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
+		memcpy(&handle->sha_ctxt.trailing_buf[0],
+			(k_src - trailing_buf_len),
+			trailing_buf_len);
+	}
+	handle->sha_ctxt.trailing_buf_len = trailing_buf_len;
+
+	err = submit_req(qcedev_areq, handle);
+
+	handle->sha_ctxt.last_blk = 0;
+	handle->sha_ctxt.first_blk = 0;
+
+	kzfree(k_buf_src);
+	return err;
+}
+
+static int qcedev_sha_update(struct qcedev_async_req *qcedev_areq,
+				struct qcedev_handle *handle,
+				struct scatterlist *sg_src)
+{
+	int err = 0;
+	int i = 0;
+	int j = 0;
+	int k = 0;
+	int num_entries = 0;
+	uint32_t total = 0;
+
+	if (handle->sha_ctxt.init_done == false) {
+		pr_err("%s Init was not called\n", __func__);
+		return -EINVAL;
+	}
+
+	if (qcedev_areq->sha_op_req.data_len > QCE_MAX_OPER_DATA) {
+
+		struct	qcedev_sha_op_req *saved_req;
+		struct	qcedev_sha_op_req req;
+		struct	qcedev_sha_op_req *sreq = &qcedev_areq->sha_op_req;
+
+		/* save the original req structure */
+		saved_req =
+			kmalloc(sizeof(struct qcedev_sha_op_req), GFP_KERNEL);
+		if (saved_req == NULL) {
+			pr_err("%s:Can't Allocate mem:saved_req 0x%lx\n",
+						__func__, (uintptr_t)saved_req);
+			return -ENOMEM;
+		}
+		memcpy(&req, sreq, sizeof(struct qcedev_sha_op_req));
+		memcpy(saved_req, sreq, sizeof(struct qcedev_sha_op_req));
+
+		i = 0;
+		/* Address 32 KB  at a time */
+		while ((i < req.entries) && (err == 0)) {
+			if (sreq->data[i].len > QCE_MAX_OPER_DATA) {
+				sreq->data[0].len = QCE_MAX_OPER_DATA;
+				if (i > 0) {
+					sreq->data[0].vaddr =
+							sreq->data[i].vaddr;
+				}
+
+				sreq->data_len = QCE_MAX_OPER_DATA;
+				sreq->entries = 1;
+
+				err = qcedev_sha_update_max_xfer(qcedev_areq,
+								handle, sg_src);
+
+				sreq->data[i].len = req.data[i].len -
+							QCE_MAX_OPER_DATA;
+				sreq->data[i].vaddr = req.data[i].vaddr +
+							QCE_MAX_OPER_DATA;
+				req.data[i].vaddr = sreq->data[i].vaddr;
+				req.data[i].len = sreq->data[i].len;
+			} else {
+				total = 0;
+				for (j = i; j < req.entries; j++) {
+					num_entries++;
+					if ((total + sreq->data[j].len) >=
+							QCE_MAX_OPER_DATA) {
+						sreq->data[j].len =
+						(QCE_MAX_OPER_DATA - total);
+						total = QCE_MAX_OPER_DATA;
+						break;
+					}
+					total += sreq->data[j].len;
+				}
+
+				sreq->data_len = total;
+				if (i > 0)
+					for (k = 0; k < num_entries; k++) {
+						sreq->data[k].len =
+							sreq->data[i+k].len;
+						sreq->data[k].vaddr =
+							sreq->data[i+k].vaddr;
+					}
+				sreq->entries = num_entries;
+
+				i = j;
+				err = qcedev_sha_update_max_xfer(qcedev_areq,
+								handle, sg_src);
+				num_entries = 0;
+
+				sreq->data[i].vaddr = req.data[i].vaddr +
+							sreq->data[i].len;
+				sreq->data[i].len = req.data[i].len -
+							sreq->data[i].len;
+				req.data[i].vaddr = sreq->data[i].vaddr;
+				req.data[i].len = sreq->data[i].len;
+
+				if (sreq->data[i].len == 0)
+					i++;
+			}
+		} /* end of while ((i < req.entries) && (err == 0)) */
+
+		/* Restore the original req structure */
+		for (i = 0; i < saved_req->entries; i++) {
+			sreq->data[i].len = saved_req->data[i].len;
+			sreq->data[i].vaddr = saved_req->data[i].vaddr;
+		}
+		sreq->entries = saved_req->entries;
+		sreq->data_len = saved_req->data_len;
+		kzfree(saved_req);
+	} else
+		err = qcedev_sha_update_max_xfer(qcedev_areq, handle, sg_src);
+
+	return err;
+}
+
+static int qcedev_sha_final(struct qcedev_async_req *qcedev_areq,
+				struct qcedev_handle *handle)
+{
+	int err = 0;
+	struct scatterlist sg_src;
+	uint32_t total;
+	uint8_t *k_buf_src = NULL;
+	uint8_t *k_align_src = NULL;
+
+	if (handle->sha_ctxt.init_done == false) {
+		pr_err("%s Init was not called\n", __func__);
+		return -EINVAL;
+	}
+
+	handle->sha_ctxt.last_blk = 1;
+
+	total = handle->sha_ctxt.trailing_buf_len;
+
+	if (total) {
+		k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
+					GFP_KERNEL);
+		if (k_buf_src == NULL) {
+			pr_err("%s: Can't Allocate memory: k_buf_src 0x%lx\n",
+						__func__, (uintptr_t)k_buf_src);
+			return -ENOMEM;
+		}
+
+		k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
+							CACHE_LINE_SIZE);
+		memcpy(k_align_src, &handle->sha_ctxt.trailing_buf[0], total);
+	}
+	qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
+	sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src, total);
+	sg_mark_end(qcedev_areq->sha_req.sreq.src);
+
+	qcedev_areq->sha_req.sreq.nbytes = total;
+
+	err = submit_req(qcedev_areq, handle);
+
+	handle->sha_ctxt.first_blk = 0;
+	handle->sha_ctxt.last_blk = 0;
+	handle->sha_ctxt.auth_data[0] = 0;
+	handle->sha_ctxt.auth_data[1] = 0;
+	handle->sha_ctxt.trailing_buf_len = 0;
+	handle->sha_ctxt.init_done = false;
+	memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
+
+	kzfree(k_buf_src);
+	return err;
+}
+
+static int qcedev_hash_cmac(struct qcedev_async_req *qcedev_areq,
+					struct qcedev_handle *handle,
+					struct scatterlist *sg_src)
+{
+	int err = 0;
+	int i = 0;
+	uint32_t total;
+
+	uint8_t *user_src = NULL;
+	uint8_t *k_src = NULL;
+	uint8_t *k_buf_src = NULL;
+
+	total = qcedev_areq->sha_op_req.data_len;
+
+	if (copy_from_user(&handle->sha_ctxt.authkey[0],
+				(void __user *)qcedev_areq->sha_op_req.authkey,
+				qcedev_areq->sha_op_req.authklen))
+		return -EFAULT;
+
+
+	k_buf_src = kmalloc(total, GFP_KERNEL);
+	if (k_buf_src == NULL) {
+		pr_err("%s: Can't Allocate memory: k_buf_src 0x%lx\n",
+				__func__, (uintptr_t)k_buf_src);
+		return -ENOMEM;
+	}
+
+	k_src = k_buf_src;
+
+	/* Copy data from user src(s) */
+	user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
+	for (i = 0; i < qcedev_areq->sha_op_req.entries; i++) {
+		user_src =
+			(void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
+		if (user_src && copy_from_user(k_src, (void __user *)user_src,
+				qcedev_areq->sha_op_req.data[i].len)) {
+			kzfree(k_buf_src);
+			return -EFAULT;
+		}
+		k_src += qcedev_areq->sha_op_req.data[i].len;
+	}
+
+	qcedev_areq->sha_req.sreq.src = sg_src;
+	sg_set_buf(qcedev_areq->sha_req.sreq.src, k_buf_src, total);
+	sg_mark_end(qcedev_areq->sha_req.sreq.src);
+
+	qcedev_areq->sha_req.sreq.nbytes = total;
+	handle->sha_ctxt.diglen = qcedev_areq->sha_op_req.diglen;
+	err = submit_req(qcedev_areq, handle);
+
+	kzfree(k_buf_src);
+	return err;
+}
+
+static int qcedev_set_hmac_auth_key(struct qcedev_async_req *areq,
+					struct qcedev_handle *handle,
+					struct scatterlist *sg_src)
+{
+	int err = 0;
+
+	if (areq->sha_op_req.authklen <= QCEDEV_MAX_KEY_SIZE) {
+		qcedev_sha_init(areq, handle);
+		if (copy_from_user(&handle->sha_ctxt.authkey[0],
+				(void __user *)areq->sha_op_req.authkey,
+				areq->sha_op_req.authklen))
+			return -EFAULT;
+	} else {
+		struct qcedev_async_req authkey_areq;
+		uint8_t	authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
+
+		init_completion(&authkey_areq.complete);
+
+		authkey_areq.sha_op_req.entries = 1;
+		authkey_areq.sha_op_req.data[0].vaddr =
+						areq->sha_op_req.authkey;
+		authkey_areq.sha_op_req.data[0].len = areq->sha_op_req.authklen;
+		authkey_areq.sha_op_req.data_len = areq->sha_op_req.authklen;
+		authkey_areq.sha_op_req.diglen = 0;
+		authkey_areq.handle = handle;
+
+		memset(&authkey_areq.sha_op_req.digest[0], 0,
+						QCEDEV_MAX_SHA_DIGEST);
+		if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
+				authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA1;
+		if (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)
+				authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA256;
+
+		authkey_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+
+		qcedev_sha_init(&authkey_areq, handle);
+		err = qcedev_sha_update(&authkey_areq, handle, sg_src);
+		if (!err)
+			err = qcedev_sha_final(&authkey_areq, handle);
+		else
+			return err;
+		memcpy(&authkey[0], &handle->sha_ctxt.digest[0],
+				handle->sha_ctxt.diglen);
+		qcedev_sha_init(areq, handle);
+
+		memcpy(&handle->sha_ctxt.authkey[0], &authkey[0],
+				handle->sha_ctxt.diglen);
+	}
+	return err;
+}
+
+static int qcedev_hmac_get_ohash(struct qcedev_async_req *qcedev_areq,
+				struct qcedev_handle *handle)
+{
+	int err = 0;
+	struct scatterlist sg_src;
+	uint8_t *k_src = NULL;
+	uint32_t sha_block_size = 0;
+	uint32_t sha_digest_size = 0;
+
+	if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
+		sha_digest_size = SHA1_DIGEST_SIZE;
+		sha_block_size = SHA1_BLOCK_SIZE;
+	} else {
+		if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
+			sha_digest_size = SHA256_DIGEST_SIZE;
+			sha_block_size = SHA256_BLOCK_SIZE;
+		}
+	}
+	k_src = kmalloc(sha_block_size, GFP_KERNEL);
+	if (k_src == NULL) {
+		pr_err("%s: Can't Allocate memory: k_src 0x%lx\n",
+						__func__, (uintptr_t)k_src);
+		return -ENOMEM;
+	}
+
+	/* check for trailing buffer from previous updates and append it */
+	memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
+			handle->sha_ctxt.trailing_buf_len);
+
+	qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
+	sg_set_buf(qcedev_areq->sha_req.sreq.src, k_src, sha_block_size);
+	sg_mark_end(qcedev_areq->sha_req.sreq.src);
+
+	qcedev_areq->sha_req.sreq.nbytes = sha_block_size;
+	memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
+	memcpy(&handle->sha_ctxt.trailing_buf[0], &handle->sha_ctxt.digest[0],
+					sha_digest_size);
+	handle->sha_ctxt.trailing_buf_len = sha_digest_size;
+
+	handle->sha_ctxt.first_blk = 1;
+	handle->sha_ctxt.last_blk = 0;
+	handle->sha_ctxt.auth_data[0] = 0;
+	handle->sha_ctxt.auth_data[1] = 0;
+
+	if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
+		memcpy(&handle->sha_ctxt.digest[0],
+			&_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
+		handle->sha_ctxt.diglen = SHA1_DIGEST_SIZE;
+	}
+
+	if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
+		memcpy(&handle->sha_ctxt.digest[0],
+			&_std_init_vector_sha256_uint8[0], SHA256_DIGEST_SIZE);
+		handle->sha_ctxt.diglen = SHA256_DIGEST_SIZE;
+	}
+	err = submit_req(qcedev_areq, handle);
+
+	handle->sha_ctxt.last_blk = 0;
+	handle->sha_ctxt.first_blk = 0;
+
+	kzfree(k_src);
+	return err;
+}
+
+static int qcedev_hmac_update_iokey(struct qcedev_async_req *areq,
+				struct qcedev_handle *handle, bool ikey)
+{
+	int i;
+	uint32_t constant;
+	uint32_t sha_block_size;
+
+	if (ikey)
+		constant = 0x36;
+	else
+		constant = 0x5c;
+
+	if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
+		sha_block_size = SHA1_BLOCK_SIZE;
+	else
+		sha_block_size = SHA256_BLOCK_SIZE;
+
+	memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
+	for (i = 0; i < sha_block_size; i++)
+		handle->sha_ctxt.trailing_buf[i] =
+				(handle->sha_ctxt.authkey[i] ^ constant);
+
+	handle->sha_ctxt.trailing_buf_len = sha_block_size;
+	return 0;
+}
+
+static int qcedev_hmac_init(struct qcedev_async_req *areq,
+				struct qcedev_handle *handle,
+				struct scatterlist *sg_src)
+{
+	int err;
+	struct qcedev_control *podev = handle->cntl;
+
+	err = qcedev_set_hmac_auth_key(areq, handle, sg_src);
+	if (err)
+		return err;
+	if (!podev->ce_support.sha_hmac)
+		qcedev_hmac_update_iokey(areq, handle, true);
+	return 0;
+}
+
+static int qcedev_hmac_final(struct qcedev_async_req *areq,
+				struct qcedev_handle *handle)
+{
+	int err;
+	struct qcedev_control *podev = handle->cntl;
+
+	err = qcedev_sha_final(areq, handle);
+	if (podev->ce_support.sha_hmac)
+		return err;
+
+	qcedev_hmac_update_iokey(areq, handle, false);
+	err = qcedev_hmac_get_ohash(areq, handle);
+	if (err)
+		return err;
+	err = qcedev_sha_final(areq, handle);
+
+	return err;
+}
+
+static int qcedev_hash_init(struct qcedev_async_req *areq,
+				struct qcedev_handle *handle,
+				struct scatterlist *sg_src)
+{
+	if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
+			(areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
+		return qcedev_sha_init(areq, handle);
+	else
+		return qcedev_hmac_init(areq, handle, sg_src);
+}
+
+static int qcedev_hash_update(struct qcedev_async_req *qcedev_areq,
+				struct qcedev_handle *handle,
+				struct scatterlist *sg_src)
+{
+	return qcedev_sha_update(qcedev_areq, handle, sg_src);
+}
+
+static int qcedev_hash_final(struct qcedev_async_req *areq,
+				struct qcedev_handle *handle)
+{
+	if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
+			(areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
+		return qcedev_sha_final(areq, handle);
+	else
+		return qcedev_hmac_final(areq, handle);
+}
+
+static int qcedev_vbuf_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
+				int *di, struct qcedev_handle *handle,
+				uint8_t *k_align_src)
+{
+	int err = 0;
+	int i = 0;
+	int dst_i = *di;
+	struct scatterlist sg_src;
+	uint32_t byteoffset = 0;
+	uint8_t *user_src = NULL;
+	uint8_t *k_align_dst = k_align_src;
+	struct	qcedev_cipher_op_req *creq = &areq->cipher_op_req;
+
+
+	if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
+		byteoffset = areq->cipher_op_req.byteoffset;
+
+	user_src = (void __user *)areq->cipher_op_req.vbuf.src[0].vaddr;
+	if (user_src && copy_from_user((k_align_src + byteoffset),
+				(void __user *)user_src,
+				areq->cipher_op_req.vbuf.src[0].len))
+		return -EFAULT;
+
+	k_align_src += byteoffset + areq->cipher_op_req.vbuf.src[0].len;
+
+	for (i = 1; i < areq->cipher_op_req.entries; i++) {
+		user_src =
+			(void __user *)areq->cipher_op_req.vbuf.src[i].vaddr;
+		if (user_src && copy_from_user(k_align_src,
+					(void __user *)user_src,
+					areq->cipher_op_req.vbuf.src[i].len)) {
+			return -EFAULT;
+		}
+		k_align_src += areq->cipher_op_req.vbuf.src[i].len;
+	}
+
+	/* restore src beginning */
+	k_align_src = k_align_dst;
+	areq->cipher_op_req.data_len += byteoffset;
+
+	areq->cipher_req.creq.src = (struct scatterlist *) &sg_src;
+	areq->cipher_req.creq.dst = (struct scatterlist *) &sg_src;
+
+	/* In place encryption/decryption */
+	sg_set_buf(areq->cipher_req.creq.src,
+					k_align_dst,
+					areq->cipher_op_req.data_len);
+	sg_mark_end(areq->cipher_req.creq.src);
+
+	areq->cipher_req.creq.nbytes = areq->cipher_op_req.data_len;
+	areq->cipher_req.creq.info = areq->cipher_op_req.iv;
+	areq->cipher_op_req.entries = 1;
+
+	err = submit_req(areq, handle);
+
+	/* copy data to destination buffer*/
+	creq->data_len -= byteoffset;
+
+	while (creq->data_len > 0) {
+		if (creq->vbuf.dst[dst_i].len <= creq->data_len) {
+			if (err == 0 && copy_to_user(
+				(void __user *)creq->vbuf.dst[dst_i].vaddr,
+					(k_align_dst + byteoffset),
+					creq->vbuf.dst[dst_i].len))
+					return -EFAULT;
+
+			k_align_dst += creq->vbuf.dst[dst_i].len +
+						byteoffset;
+			creq->data_len -= creq->vbuf.dst[dst_i].len;
+			dst_i++;
+		} else {
+				if (err == 0 && copy_to_user(
+				(void __user *)creq->vbuf.dst[dst_i].vaddr,
+				(k_align_dst + byteoffset),
+				creq->data_len))
+					return -EFAULT;
+
+			k_align_dst += creq->data_len;
+			creq->vbuf.dst[dst_i].len -= creq->data_len;
+			creq->vbuf.dst[dst_i].vaddr += creq->data_len;
+			creq->data_len = 0;
+		}
+	}
+	*di = dst_i;
+
+	return err;
+};
+
+static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq,
+						struct qcedev_handle *handle)
+{
+	int err = 0;
+	int di = 0;
+	int i = 0;
+	int j = 0;
+	int k = 0;
+	uint32_t byteoffset = 0;
+	int num_entries = 0;
+	uint32_t total = 0;
+	uint32_t len;
+	uint8_t *k_buf_src = NULL;
+	uint8_t *k_align_src = NULL;
+	uint32_t max_data_xfer;
+	struct qcedev_cipher_op_req *saved_req;
+	struct	qcedev_cipher_op_req *creq = &areq->cipher_op_req;
+
+	total = 0;
+
+	if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
+		byteoffset = areq->cipher_op_req.byteoffset;
+	k_buf_src = kmalloc(QCE_MAX_OPER_DATA + CACHE_LINE_SIZE * 2,
+				GFP_KERNEL);
+	if (k_buf_src == NULL) {
+		pr_err("%s: Can't Allocate memory: k_buf_src 0x%lx\n",
+					__func__, (uintptr_t)k_buf_src);
+		return -ENOMEM;
+	}
+	k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
+							CACHE_LINE_SIZE);
+	max_data_xfer = QCE_MAX_OPER_DATA - byteoffset;
+
+	saved_req = kmalloc(sizeof(struct qcedev_cipher_op_req), GFP_KERNEL);
+	if (saved_req == NULL) {
+		pr_err("%s: Can't Allocate memory:saved_req 0x%lx\n",
+			__func__, (uintptr_t)saved_req);
+		kzfree(k_buf_src);
+		return -ENOMEM;
+
+	}
+	memcpy(saved_req, creq, sizeof(struct qcedev_cipher_op_req));
+
+	if (areq->cipher_op_req.data_len > max_data_xfer) {
+		struct qcedev_cipher_op_req req;
+
+		/* save the original req structure */
+		memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
+
+		i = 0;
+		/* Address 32 KB  at a time */
+		while ((i < req.entries) && (err == 0)) {
+			if (creq->vbuf.src[i].len > max_data_xfer) {
+				creq->vbuf.src[0].len =	max_data_xfer;
+				if (i > 0) {
+					creq->vbuf.src[0].vaddr =
+						creq->vbuf.src[i].vaddr;
+				}
+
+				creq->data_len = max_data_xfer;
+				creq->entries = 1;
+
+				err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
+						&di, handle, k_align_src);
+				if (err < 0) {
+					kzfree(k_buf_src);
+					kzfree(saved_req);
+					return err;
+				}
+
+				creq->vbuf.src[i].len =	req.vbuf.src[i].len -
+							max_data_xfer;
+				creq->vbuf.src[i].vaddr =
+						req.vbuf.src[i].vaddr +
+						max_data_xfer;
+				req.vbuf.src[i].vaddr =
+						creq->vbuf.src[i].vaddr;
+				req.vbuf.src[i].len = creq->vbuf.src[i].len;
+
+			} else {
+				total = areq->cipher_op_req.byteoffset;
+				for (j = i; j < req.entries; j++) {
+					num_entries++;
+					if ((total + creq->vbuf.src[j].len)
+							>= max_data_xfer) {
+						creq->vbuf.src[j].len =
+						max_data_xfer - total;
+						total = max_data_xfer;
+						break;
+					}
+					total += creq->vbuf.src[j].len;
+				}
+
+				creq->data_len = total;
+				if (i > 0)
+					for (k = 0; k < num_entries; k++) {
+						creq->vbuf.src[k].len =
+						creq->vbuf.src[i+k].len;
+						creq->vbuf.src[k].vaddr =
+						creq->vbuf.src[i+k].vaddr;
+					}
+				creq->entries =  num_entries;
+
+				i = j;
+				err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
+						&di, handle, k_align_src);
+				if (err < 0) {
+					kzfree(k_buf_src);
+					kzfree(saved_req);
+					return err;
+				}
+
+				num_entries = 0;
+				areq->cipher_op_req.byteoffset = 0;
+
+				creq->vbuf.src[i].vaddr = req.vbuf.src[i].vaddr
+					+ creq->vbuf.src[i].len;
+				creq->vbuf.src[i].len =	req.vbuf.src[i].len -
+							creq->vbuf.src[i].len;
+
+				req.vbuf.src[i].vaddr =
+						creq->vbuf.src[i].vaddr;
+				req.vbuf.src[i].len = creq->vbuf.src[i].len;
+
+				if (creq->vbuf.src[i].len == 0)
+					i++;
+			}
+
+			areq->cipher_op_req.byteoffset = 0;
+			max_data_xfer = QCE_MAX_OPER_DATA;
+			byteoffset = 0;
+
+		} /* end of while ((i < req.entries) && (err == 0)) */
+	} else
+		err = qcedev_vbuf_ablk_cipher_max_xfer(areq, &di, handle,
+								k_align_src);
+
+	/* Restore the original req structure */
+	for (i = 0; i < saved_req->entries; i++) {
+		creq->vbuf.src[i].len = saved_req->vbuf.src[i].len;
+		creq->vbuf.src[i].vaddr = saved_req->vbuf.src[i].vaddr;
+	}
+	for (len = 0, i = 0; len < saved_req->data_len; i++) {
+		creq->vbuf.dst[i].len = saved_req->vbuf.dst[i].len;
+		creq->vbuf.dst[i].vaddr = saved_req->vbuf.dst[i].vaddr;
+		len += saved_req->vbuf.dst[i].len;
+	}
+	creq->entries = saved_req->entries;
+	creq->data_len = saved_req->data_len;
+	creq->byteoffset = saved_req->byteoffset;
+
+	kzfree(saved_req);
+	kzfree(k_buf_src);
+	return err;
+
+}
+
+static int qcedev_check_cipher_key(struct qcedev_cipher_op_req *req,
+						struct qcedev_control *podev)
+{
+	/* if intending to use HW key make sure key fields are set
+	 * correctly and HW key is indeed supported in target
+	 */
+	if (req->encklen == 0) {
+		int i;
+		for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
+			if (req->enckey[i]) {
+				pr_err("%s: Invalid key: non-zero key input\n",
+								__func__);
+				goto error;
+			}
+		}
+		if ((req->op != QCEDEV_OPER_ENC_NO_KEY) &&
+			(req->op != QCEDEV_OPER_DEC_NO_KEY))
+			if (!podev->platform_support.hw_key_support) {
+				pr_err("%s: Invalid op %d\n", __func__,
+						(uint32_t)req->op);
+				goto error;
+			}
+	} else {
+		if (req->encklen == QCEDEV_AES_KEY_192) {
+			if (!podev->ce_support.aes_key_192) {
+				pr_err("%s: AES-192 not supported\n", __func__);
+				goto error;
+			}
+		} else {
+			/* if not using HW key make sure key
+			 * length is valid
+			 */
+			if (req->mode == QCEDEV_AES_MODE_XTS) {
+				if ((req->encklen != QCEDEV_AES_KEY_128*2) &&
+				(req->encklen != QCEDEV_AES_KEY_256*2)) {
+					pr_err("%s: unsupported key size: %d\n",
+							__func__, req->encklen);
+					goto error;
+				}
+			} else {
+				if ((req->encklen != QCEDEV_AES_KEY_128) &&
+					(req->encklen != QCEDEV_AES_KEY_256)) {
+					pr_err("%s: unsupported key size %d\n",
+							__func__, req->encklen);
+					goto error;
+				}
+			}
+		}
+	}
+	return 0;
+error:
+	return -EINVAL;
+}
+
+static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
+						struct qcedev_control *podev)
+{
+	uint32_t total = 0;
+	uint32_t i;
+
+	if (req->use_pmem) {
+		pr_err("%s: Use of PMEM is not supported\n", __func__);
+		goto error;
+	}
+	if ((req->entries == 0) || (req->data_len == 0) ||
+			(req->entries > QCEDEV_MAX_BUFFERS)) {
+		pr_err("%s: Invalid cipher length/entries\n", __func__);
+		goto error;
+	}
+	if ((req->alg >= QCEDEV_ALG_LAST) ||
+		(req->mode >= QCEDEV_AES_DES_MODE_LAST)) {
+		pr_err("%s: Invalid algorithm %d\n", __func__,
+						(uint32_t)req->alg);
+		goto error;
+	}
+	if ((req->mode == QCEDEV_AES_MODE_XTS) &&
+				(!podev->ce_support.aes_xts)) {
+		pr_err("%s: XTS algorithm is not supported\n", __func__);
+		goto error;
+	}
+	if (req->alg == QCEDEV_ALG_AES) {
+		if (qcedev_check_cipher_key(req, podev))
+			goto error;
+
+	}
+	/* if using a byteoffset, make sure it is CTR mode using vbuf */
+	if (req->byteoffset) {
+		if (req->mode != QCEDEV_AES_MODE_CTR) {
+			pr_err("%s: Operation on byte offset not supported\n",
+								 __func__);
+			goto error;
+		}
+		if (req->byteoffset >= AES_CE_BLOCK_SIZE) {
+			pr_err("%s: Invalid byte offset\n", __func__);
+			goto error;
+		}
+		total = req->byteoffset;
+		for (i = 0; i < req->entries; i++) {
+			if (total > U32_MAX - req->vbuf.src[i].len) {
+				pr_err("%s:Integer overflow on total src len\n",
+					__func__);
+				goto error;
+			}
+			total += req->vbuf.src[i].len;
+		}
+	}
+
+	if (req->data_len < req->byteoffset) {
+		pr_err("%s: req data length %u is less than byteoffset %u\n",
+				__func__, req->data_len, req->byteoffset);
+		goto error;
+	}
+
+	/* Ensure IV size */
+	if (req->ivlen > QCEDEV_MAX_IV_SIZE) {
+		pr_err("%s: ivlen is not correct: %u\n", __func__, req->ivlen);
+		goto error;
+	}
+
+	/* Ensure Key size */
+	if (req->encklen > QCEDEV_MAX_KEY_SIZE) {
+		pr_err("%s: Klen is not correct: %u\n", __func__, req->encklen);
+		goto error;
+	}
+
+	/* Ensure zer ivlen for ECB  mode  */
+	if (req->ivlen > 0) {
+		if ((req->mode == QCEDEV_AES_MODE_ECB) ||
+				(req->mode == QCEDEV_DES_MODE_ECB)) {
+			pr_err("%s: Expecting a zero length IV\n", __func__);
+			goto error;
+		}
+	} else {
+		if ((req->mode != QCEDEV_AES_MODE_ECB) &&
+				(req->mode != QCEDEV_DES_MODE_ECB)) {
+			pr_err("%s: Expecting a non-zero ength IV\n", __func__);
+			goto error;
+		}
+	}
+	/* Check for sum of all dst length is equal to data_len  */
+	for (i = 0, total = 0; i < req->entries; i++) {
+		if (!req->vbuf.dst[i].vaddr && req->vbuf.dst[i].len) {
+			pr_err("%s: NULL req dst vbuf[%d] with length %d\n",
+				__func__, i, req->vbuf.dst[i].len);
+			goto error;
+		}
+		if (req->vbuf.dst[i].len >= U32_MAX - total) {
+			pr_err("%s: Integer overflow on total req dst vbuf length\n",
+				__func__);
+			goto error;
+		}
+		total += req->vbuf.dst[i].len;
+	}
+	if (total != req->data_len) {
+		pr_err("%s: Total (i=%d) dst(%d) buf size != data_len (%d)\n",
+			__func__, i, total, req->data_len);
+		goto error;
+	}
+	/* Check for sum of all src length is equal to data_len  */
+	for (i = 0, total = 0; i < req->entries; i++) {
+		if (!req->vbuf.src[i].vaddr && req->vbuf.src[i].len) {
+			pr_err("%s: NULL req src vbuf[%d] with length %d\n",
+				__func__, i, req->vbuf.src[i].len);
+			goto error;
+		}
+		if (req->vbuf.src[i].len > U32_MAX - total) {
+			pr_err("%s: Integer overflow on total req src vbuf length\n",
+				__func__);
+			goto error;
+		}
+		total += req->vbuf.src[i].len;
+	}
+	if (total != req->data_len) {
+		pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
+			__func__, total, req->data_len);
+		goto error;
+	}
+	return 0;
+error:
+	return -EINVAL;
+
+}
+
+static int qcedev_check_sha_params(struct qcedev_sha_op_req *req,
+						struct qcedev_control *podev)
+{
+	uint32_t total = 0;
+	uint32_t i;
+
+	if ((req->alg == QCEDEV_ALG_AES_CMAC) &&
+				(!podev->ce_support.cmac)) {
+		pr_err("%s: CMAC not supported\n", __func__);
+		goto sha_error;
+	}
+	if ((!req->entries) || (req->entries > QCEDEV_MAX_BUFFERS)) {
+		pr_err("%s: Invalid num entries (%d)\n",
+						__func__, req->entries);
+		goto sha_error;
+	}
+
+	if (req->alg >= QCEDEV_ALG_SHA_ALG_LAST) {
+		pr_err("%s: Invalid algorithm (%d)\n", __func__, req->alg);
+		goto sha_error;
+	}
+	if ((req->alg == QCEDEV_ALG_SHA1_HMAC) ||
+			(req->alg == QCEDEV_ALG_SHA1_HMAC)) {
+		if (req->authkey == NULL) {
+			pr_err("%s: Invalid authkey pointer\n", __func__);
+			goto sha_error;
+		}
+		if (req->authklen <= 0) {
+			pr_err("%s: Invalid authkey length (%d)\n",
+						__func__, req->authklen);
+			goto sha_error;
+		}
+	}
+
+	if (req->alg == QCEDEV_ALG_AES_CMAC) {
+		if ((req->authklen != QCEDEV_AES_KEY_128) &&
+					(req->authklen != QCEDEV_AES_KEY_256)) {
+			pr_err("%s: unsupported key length\n", __func__);
+			goto sha_error;
+		}
+	}
+
+	/* Check for sum of all src length is equal to data_len  */
+	for (i = 0, total = 0; i < req->entries; i++) {
+		if (req->data[i].len > U32_MAX - total) {
+			pr_err("%s: Integer overflow on total req buf length\n",
+				__func__);
+			goto sha_error;
+		}
+		total += req->data[i].len;
+	}
+
+	if (total != req->data_len) {
+		pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
+			__func__, total, req->data_len);
+		goto sha_error;
+	}
+	return 0;
+sha_error:
+	return -EINVAL;
+}
+
+long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+{
+	int err = 0;
+	struct qcedev_handle *handle;
+	struct qcedev_control *podev;
+	struct qcedev_async_req qcedev_areq;
+	struct qcedev_stat *pstat;
+
+	handle =  file->private_data;
+	podev =  handle->cntl;
+	qcedev_areq.handle = handle;
+	if (podev == NULL || podev->magic != QCEDEV_MAGIC) {
+		pr_err("%s: invalid handle %pK\n",
+			__func__, podev);
+		return -ENOENT;
+	}
+
+	/* Verify user arguments. */
+	if (_IOC_TYPE(cmd) != QCEDEV_IOC_MAGIC)
+		return -ENOTTY;
+
+	init_completion(&qcedev_areq.complete);
+	pstat = &_qcedev_stat;
+
+	switch (cmd) {
+	case QCEDEV_IOCTL_ENC_REQ:
+	case QCEDEV_IOCTL_DEC_REQ:
+		if (copy_from_user(&qcedev_areq.cipher_op_req,
+				(void __user *)arg,
+				sizeof(struct qcedev_cipher_op_req)))
+			return -EFAULT;
+		qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_CIPHER;
+
+		if (qcedev_check_cipher_params(&qcedev_areq.cipher_op_req,
+				podev))
+			return -EINVAL;
+
+		err = qcedev_vbuf_ablk_cipher(&qcedev_areq, handle);
+		if (err)
+			return err;
+		if (copy_to_user((void __user *)arg,
+					&qcedev_areq.cipher_op_req,
+					sizeof(struct qcedev_cipher_op_req)))
+			return -EFAULT;
+		break;
+
+	case QCEDEV_IOCTL_SHA_INIT_REQ:
+		{
+		struct scatterlist sg_src;
+
+		if (copy_from_user(&qcedev_areq.sha_op_req,
+					(void __user *)arg,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		mutex_lock(&hash_access_lock);
+		if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) {
+			mutex_unlock(&hash_access_lock);
+			return -EINVAL;
+		}
+		qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+		err = qcedev_hash_init(&qcedev_areq, handle, &sg_src);
+		if (err) {
+			mutex_unlock(&hash_access_lock);
+			return err;
+		}
+		mutex_unlock(&hash_access_lock);
+		if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		}
+		handle->sha_ctxt.init_done = true;
+		break;
+	case QCEDEV_IOCTL_GET_CMAC_REQ:
+		if (!podev->ce_support.cmac)
+			return -ENOTTY;
+	case QCEDEV_IOCTL_SHA_UPDATE_REQ:
+		{
+		struct scatterlist sg_src;
+
+		if (copy_from_user(&qcedev_areq.sha_op_req,
+					(void __user *)arg,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		mutex_lock(&hash_access_lock);
+		if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) {
+			mutex_unlock(&hash_access_lock);
+			return -EINVAL;
+		}
+		qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+
+		if (qcedev_areq.sha_op_req.alg == QCEDEV_ALG_AES_CMAC) {
+			err = qcedev_hash_cmac(&qcedev_areq, handle, &sg_src);
+			if (err) {
+				mutex_unlock(&hash_access_lock);
+				return err;
+			}
+		} else {
+			if (handle->sha_ctxt.init_done == false) {
+				pr_err("%s Init was not called\n", __func__);
+				mutex_unlock(&hash_access_lock);
+				return -EINVAL;
+			}
+			err = qcedev_hash_update(&qcedev_areq, handle, &sg_src);
+			if (err) {
+				mutex_unlock(&hash_access_lock);
+				return err;
+			}
+		}
+
+		if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
+			pr_err("Invalid sha_ctxt.diglen %d\n",
+					handle->sha_ctxt.diglen);
+			mutex_unlock(&hash_access_lock);
+			return -EINVAL;
+		}
+		memcpy(&qcedev_areq.sha_op_req.digest[0],
+				&handle->sha_ctxt.digest[0],
+				handle->sha_ctxt.diglen);
+		mutex_unlock(&hash_access_lock);
+		if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		}
+		break;
+
+	case QCEDEV_IOCTL_SHA_FINAL_REQ:
+
+		if (handle->sha_ctxt.init_done == false) {
+			pr_err("%s Init was not called\n", __func__);
+			return -EINVAL;
+		}
+		if (copy_from_user(&qcedev_areq.sha_op_req,
+					(void __user *)arg,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		mutex_lock(&hash_access_lock);
+		if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) {
+			mutex_unlock(&hash_access_lock);
+			return -EINVAL;
+		}
+		qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+		err = qcedev_hash_final(&qcedev_areq, handle);
+		if (err) {
+			mutex_unlock(&hash_access_lock);
+			return err;
+		}
+		if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
+			pr_err("Invalid sha_ctxt.diglen %d\n",
+					handle->sha_ctxt.diglen);
+			mutex_unlock(&hash_access_lock);
+			return -EINVAL;
+		}
+		qcedev_areq.sha_op_req.diglen = handle->sha_ctxt.diglen;
+		memcpy(&qcedev_areq.sha_op_req.digest[0],
+				&handle->sha_ctxt.digest[0],
+				handle->sha_ctxt.diglen);
+		mutex_unlock(&hash_access_lock);
+		if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		handle->sha_ctxt.init_done = false;
+		break;
+
+	case QCEDEV_IOCTL_GET_SHA_REQ:
+		{
+		struct scatterlist sg_src;
+
+		if (copy_from_user(&qcedev_areq.sha_op_req,
+					(void __user *)arg,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		mutex_lock(&hash_access_lock);
+		if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) {
+			mutex_unlock(&hash_access_lock);
+			return -EINVAL;
+		}
+		qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+		qcedev_hash_init(&qcedev_areq, handle, &sg_src);
+		err = qcedev_hash_update(&qcedev_areq, handle, &sg_src);
+		if (err) {
+			mutex_unlock(&hash_access_lock);
+			return err;
+		}
+		err = qcedev_hash_final(&qcedev_areq, handle);
+		if (err) {
+			mutex_unlock(&hash_access_lock);
+			return err;
+		}
+		if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
+			pr_err("Invalid sha_ctxt.diglen %d\n",
+					handle->sha_ctxt.diglen);
+			mutex_unlock(&hash_access_lock);
+			return -EINVAL;
+		}
+		qcedev_areq.sha_op_req.diglen =	handle->sha_ctxt.diglen;
+		memcpy(&qcedev_areq.sha_op_req.digest[0],
+				&handle->sha_ctxt.digest[0],
+				handle->sha_ctxt.diglen);
+		mutex_unlock(&hash_access_lock);
+		if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		}
+		break;
+
+	default:
+		return -ENOTTY;
+	}
+
+	return err;
+}
+EXPORT_SYMBOL(qcedev_ioctl);
+
+static int qcedev_probe(struct platform_device *pdev)
+{
+	void *handle = NULL;
+	int rc = 0;
+	struct qcedev_control *podev;
+	struct msm_ce_hw_support *platform_support;
+
+	podev = &qce_dev[0];
+
+	podev->high_bw_req_count = 0;
+	INIT_LIST_HEAD(&podev->ready_commands);
+	podev->active_command = NULL;
+
+	spin_lock_init(&podev->lock);
+
+	tasklet_init(&podev->done_tasklet, req_done, (unsigned long)podev);
+
+	/* open qce */
+	handle = qce_open(pdev, &rc);
+	if (handle == NULL) {
+		platform_set_drvdata(pdev, NULL);
+		return rc;
+	}
+
+	podev->qce = handle;
+	podev->pdev = pdev;
+	platform_set_drvdata(pdev, podev);
+
+	rc = misc_register(&podev->miscdevice);
+	qce_hw_support(podev->qce, &podev->ce_support);
+	if (podev->ce_support.bam) {
+		podev->platform_support.ce_shared = 0;
+		podev->platform_support.shared_ce_resource = 0;
+		podev->platform_support.hw_key_support =
+						podev->ce_support.hw_key;
+		podev->platform_support.bus_scale_table = NULL;
+		podev->platform_support.sha_hmac = 1;
+
+		podev->platform_support.bus_scale_table =
+			(struct msm_bus_scale_pdata *)
+					msm_bus_cl_get_pdata(pdev);
+		if (!podev->platform_support.bus_scale_table)
+			pr_err("bus_scale_table is NULL\n");
+	} else {
+		platform_support =
+			(struct msm_ce_hw_support *)pdev->dev.platform_data;
+		podev->platform_support.ce_shared = platform_support->ce_shared;
+		podev->platform_support.shared_ce_resource =
+				platform_support->shared_ce_resource;
+		podev->platform_support.hw_key_support =
+				platform_support->hw_key_support;
+		podev->platform_support.bus_scale_table =
+				platform_support->bus_scale_table;
+		podev->platform_support.sha_hmac = platform_support->sha_hmac;
+	}
+	if (podev->platform_support.bus_scale_table != NULL) {
+		podev->bus_scale_handle =
+			msm_bus_scale_register_client(
+				(struct msm_bus_scale_pdata *)
+				podev->platform_support.bus_scale_table);
+		if (!podev->bus_scale_handle) {
+			pr_err("%s not able to get bus scale\n",
+				__func__);
+			rc =  -ENOMEM;
+			goto err;
+		}
+	}
+
+	if (rc >= 0)
+		return 0;
+	else
+		if (podev->platform_support.bus_scale_table != NULL)
+			msm_bus_scale_unregister_client(
+						podev->bus_scale_handle);
+err:
+
+	if (handle)
+		qce_close(handle);
+	platform_set_drvdata(pdev, NULL);
+	podev->qce = NULL;
+	podev->pdev = NULL;
+	return rc;
+};
+
+static int qcedev_remove(struct platform_device *pdev)
+{
+	struct qcedev_control *podev;
+
+	podev = platform_get_drvdata(pdev);
+	if (!podev)
+		return 0;
+	if (podev->qce)
+		qce_close(podev->qce);
+
+	if (podev->platform_support.bus_scale_table != NULL)
+		msm_bus_scale_unregister_client(podev->bus_scale_handle);
+
+	if (podev->miscdevice.minor != MISC_DYNAMIC_MINOR)
+		misc_deregister(&podev->miscdevice);
+	tasklet_kill(&podev->done_tasklet);
+	return 0;
+};
+
+static int qcedev_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct qcedev_control *podev;
+	int ret;
+	podev = platform_get_drvdata(pdev);
+
+	if (!podev || !podev->platform_support.bus_scale_table)
+		return 0;
+
+	mutex_lock(&qcedev_sent_bw_req);
+	if (podev->high_bw_req_count) {
+		ret = msm_bus_scale_client_update_request(
+				podev->bus_scale_handle, 0);
+		if (ret) {
+			pr_err("%s Unable to set to low bandwidth\n",
+						__func__);
+			goto suspend_exit;
+		}
+		ret = qce_disable_clk(podev->qce);
+		if (ret) {
+			pr_err("%s Unable disable clk\n", __func__);
+			ret = msm_bus_scale_client_update_request(
+				podev->bus_scale_handle, 1);
+			if (ret)
+				pr_err("%s Unable to set to high bandwidth\n",
+					__func__);
+			goto suspend_exit;
+		}
+	}
+
+suspend_exit:
+	mutex_unlock(&qcedev_sent_bw_req);
+	return 0;
+}
+
+static int qcedev_resume(struct platform_device *pdev)
+{
+	struct qcedev_control *podev;
+	int ret;
+	podev = platform_get_drvdata(pdev);
+
+	if (!podev || !podev->platform_support.bus_scale_table)
+		return 0;
+
+	mutex_lock(&qcedev_sent_bw_req);
+	if (podev->high_bw_req_count) {
+		ret = qce_enable_clk(podev->qce);
+		if (ret) {
+			pr_err("%s Unable enable clk\n", __func__);
+			goto resume_exit;
+		}
+		ret = msm_bus_scale_client_update_request(
+				podev->bus_scale_handle, 1);
+		if (ret) {
+			pr_err("%s Unable to set to high bandwidth\n",
+						__func__);
+			ret = qce_disable_clk(podev->qce);
+			if (ret)
+				pr_err("%s Unable enable clk\n",
+					__func__);
+			goto resume_exit;
+		}
+	}
+
+resume_exit:
+	mutex_unlock(&qcedev_sent_bw_req);
+	return 0;
+}
+
+static struct of_device_id qcedev_match[] = {
+	{	.compatible = "qcom,qcedev",
+	},
+	{}
+};
+
+static struct platform_driver qcedev_plat_driver = {
+	.probe = qcedev_probe,
+	.remove = qcedev_remove,
+	.suspend = qcedev_suspend,
+	.resume = qcedev_resume,
+	.driver = {
+		.name = "qce",
+		.owner = THIS_MODULE,
+		.of_match_table = qcedev_match,
+	},
+};
+
+static int _disp_stats(int id)
+{
+	struct qcedev_stat *pstat;
+	int len = 0;
+
+	pstat = &_qcedev_stat;
+	len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
+			"\nQualcomm QCE dev driver %d Statistics:\n",
+				id + 1);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   Encryption operation success       : %d\n",
+					pstat->qcedev_enc_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   Encryption operation fail   : %d\n",
+					pstat->qcedev_enc_fail);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   Decryption operation success     : %d\n",
+					pstat->qcedev_dec_success);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   Encryption operation fail          : %d\n",
+					pstat->qcedev_dec_fail);
+
+	return len;
+}
+
+static int _debug_stats_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t _debug_stats_read(struct file *file, char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	ssize_t rc = -EINVAL;
+	int qcedev = *((int *) file->private_data);
+	int len;
+
+	len = _disp_stats(qcedev);
+
+	if (len <= count)
+		rc = simple_read_from_buffer((void __user *) buf, len,
+			ppos, (void *) _debug_read_buf, len);
+	return rc;
+}
+
+static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	memset((char *)&_qcedev_stat, 0, sizeof(struct qcedev_stat));
+	return count;
+};
+
+static const struct file_operations _debug_stats_ops = {
+	.open =         _debug_stats_open,
+	.read =         _debug_stats_read,
+	.write =        _debug_stats_write,
+};
+
+static int _qcedev_debug_init(void)
+{
+	int rc;
+	char name[DEBUG_MAX_FNAME];
+	struct dentry *dent;
+
+	_debug_dent = debugfs_create_dir("qcedev", NULL);
+	if (IS_ERR(_debug_dent)) {
+		pr_err("qcedev debugfs_create_dir fail, error %ld\n",
+				PTR_ERR(_debug_dent));
+		return PTR_ERR(_debug_dent);
+	}
+
+	snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", 1);
+	_debug_qcedev = 0;
+	dent = debugfs_create_file(name, 0644, _debug_dent,
+			&_debug_qcedev, &_debug_stats_ops);
+	if (dent == NULL) {
+		pr_err("qcedev debugfs_create_file fail, error %ld\n",
+				PTR_ERR(dent));
+		rc = PTR_ERR(dent);
+		goto err;
+	}
+	return 0;
+err:
+	debugfs_remove_recursive(_debug_dent);
+	return rc;
+}
+
+static int qcedev_init(void)
+{
+	int rc;
+
+	rc = _qcedev_debug_init();
+	if (rc)
+		return rc;
+	return platform_driver_register(&qcedev_plat_driver);
+}
+
+static void qcedev_exit(void)
+{
+	debugfs_remove_recursive(_debug_dent);
+	platform_driver_unregister(&qcedev_plat_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm DEV Crypto driver");
+
+module_init(qcedev_init);
+module_exit(qcedev_exit);
diff -Nruw linux-4.4.115-fbx/drivers/crypto/msm./qcedevi.h linux-4.4.115-fbx/drivers/crypto/msm/qcedevi.h
--- linux-4.4.115-fbx/drivers/crypto/msm./qcedevi.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/crypto/msm/qcedevi.h	2019-01-22 16:16:23.111242857 +0100
@@ -0,0 +1,124 @@
+/* QTI crypto Driver
+ *
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CRYPTO_MSM_QCEDEVI_H
+#define __CRYPTO_MSM_QCEDEVI_H
+
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <crypto/hash.h>
+#include <linux/platform_data/qcom_crypto_device.h>
+#include <linux/fips_status.h>
+#include "qce.h"
+
+#define CACHE_LINE_SIZE 32
+#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
+
+enum qcedev_crypto_oper_type {
+	QCEDEV_CRYPTO_OPER_CIPHER = 0,
+	QCEDEV_CRYPTO_OPER_SHA = 1,
+	QCEDEV_CRYPTO_OPER_LAST
+};
+
+struct qcedev_handle;
+
+struct qcedev_cipher_req {
+	struct ablkcipher_request creq;
+	void *cookie;
+};
+
+struct qcedev_sha_req {
+	struct ahash_request sreq;
+	void *cookie;
+};
+
+struct	qcedev_sha_ctxt {
+	uint32_t	auth_data[4];
+	uint8_t	digest[QCEDEV_MAX_SHA_DIGEST];
+	uint32_t	diglen;
+	uint8_t	trailing_buf[64];
+	uint32_t	trailing_buf_len;
+	uint8_t	first_blk;
+	uint8_t	last_blk;
+	uint8_t	authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
+	bool		init_done;
+};
+
+struct qcedev_async_req {
+	struct list_head			list;
+	struct completion			complete;
+	enum qcedev_crypto_oper_type		op_type;
+	union {
+		struct qcedev_cipher_op_req	cipher_op_req;
+		struct qcedev_sha_op_req	sha_op_req;
+	};
+
+	union {
+		struct qcedev_cipher_req	cipher_req;
+		struct qcedev_sha_req		sha_req;
+	};
+	struct qcedev_handle			*handle;
+	int					err;
+};
+
+/**********************************************************************
+ * Register ourselves as a misc device to be able to access the dev driver
+ * from userspace. */
+
+#define QCEDEV_DEV	"qcedev"
+
+struct qcedev_control {
+
+	/* CE features supported by platform */
+	struct msm_ce_hw_support platform_support;
+
+	uint32_t ce_lock_count;
+	uint32_t high_bw_req_count;
+
+	/* CE features/algorithms supported by HW engine*/
+	struct ce_hw_support ce_support;
+
+	uint32_t  bus_scale_handle;
+
+	/* misc device */
+	struct miscdevice miscdevice;
+
+	/* qce handle */
+	void *qce;
+
+	/* platform device */
+	struct platform_device *pdev;
+
+	unsigned magic;
+
+	struct list_head ready_commands;
+	struct qcedev_async_req *active_command;
+	spinlock_t lock;
+	struct tasklet_struct done_tasklet;
+};
+
+struct qcedev_handle {
+	/* qcedev control handle */
+	struct qcedev_control *cntl;
+	/* qce internal sha context*/
+	struct qcedev_sha_ctxt sha_ctxt;
+};
+
+void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
+	unsigned char *iv, int ret);
+
+void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
+	unsigned char *authdata, int ret);
+
+#endif  /* __CRYPTO_MSM_QCEDEVI_H */
diff -Nruw linux-4.4.115-fbx/drivers/crypto/msm./qce.h linux-4.4.115-fbx/drivers/crypto/msm/qce.h
--- linux-4.4.115-fbx/drivers/crypto/msm./qce.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/crypto/msm/qce.h	2019-01-22 16:16:23.111242857 +0100
@@ -0,0 +1,190 @@
+/* Qualcomm Crypto Engine driver API
+ *
+ * Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+#ifndef __CRYPTO_MSM_QCE_H
+#define __CRYPTO_MSM_QCE_H
+
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/crypto.h>
+
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/sha.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+
+/* SHA digest size  in bytes */
+#define SHA256_DIGESTSIZE		32
+#define SHA1_DIGESTSIZE			20
+
+#define AES_CE_BLOCK_SIZE		16
+
+/* key size in bytes */
+#define HMAC_KEY_SIZE			(SHA1_DIGESTSIZE)    /* hmac-sha1 */
+#define SHA_HMAC_KEY_SIZE		64
+#define DES_KEY_SIZE			8
+#define TRIPLE_DES_KEY_SIZE		24
+#define AES128_KEY_SIZE			16
+#define AES192_KEY_SIZE			24
+#define AES256_KEY_SIZE			32
+#define MAX_CIPHER_KEY_SIZE		AES256_KEY_SIZE
+
+/* iv length in bytes */
+#define AES_IV_LENGTH			16
+#define DES_IV_LENGTH                   8
+#define MAX_IV_LENGTH			AES_IV_LENGTH
+
+/* Maximum number of bytes per transfer */
+#define QCE_MAX_OPER_DATA		0xFF00
+
+/* Maximum Nonce bytes  */
+#define MAX_NONCE  16
+
+typedef void (*qce_comp_func_ptr_t)(void *areq,
+		unsigned char *icv, unsigned char *iv, int ret);
+
+/* Cipher algorithms supported */
+enum qce_cipher_alg_enum {
+	CIPHER_ALG_DES = 0,
+	CIPHER_ALG_3DES = 1,
+	CIPHER_ALG_AES = 2,
+	CIPHER_ALG_LAST
+};
+
+/* Hash and hmac algorithms supported */
+enum qce_hash_alg_enum {
+	QCE_HASH_SHA1   = 0,
+	QCE_HASH_SHA256 = 1,
+	QCE_HASH_SHA1_HMAC   = 2,
+	QCE_HASH_SHA256_HMAC = 3,
+	QCE_HASH_AES_CMAC = 4,
+	QCE_HASH_LAST
+};
+
+/* Cipher encryption/decryption operations */
+enum qce_cipher_dir_enum {
+	QCE_ENCRYPT = 0,
+	QCE_DECRYPT = 1,
+	QCE_CIPHER_DIR_LAST
+};
+
+/* Cipher algorithms modes */
+enum qce_cipher_mode_enum {
+	QCE_MODE_CBC = 0,
+	QCE_MODE_ECB = 1,
+	QCE_MODE_CTR = 2,
+	QCE_MODE_XTS = 3,
+	QCE_MODE_CCM = 4,
+	QCE_CIPHER_MODE_LAST
+};
+
+/* Cipher operation type */
+enum qce_req_op_enum {
+	QCE_REQ_ABLK_CIPHER = 0,
+	QCE_REQ_ABLK_CIPHER_NO_KEY = 1,
+	QCE_REQ_AEAD = 2,
+	QCE_REQ_LAST
+};
+
+/* Algorithms/features supported in CE HW engine */
+struct ce_hw_support {
+	bool sha1_hmac_20; /* Supports 20 bytes of HMAC key*/
+	bool sha1_hmac; /* supports max HMAC key of 64 bytes*/
+	bool sha256_hmac; /* supports max HMAC key of 64 bytes*/
+	bool sha_hmac; /* supports SHA1 and SHA256 MAX HMAC key of 64 bytes*/
+	bool cmac;
+	bool aes_key_192;
+	bool aes_xts;
+	bool aes_ccm;
+	bool ota;
+	bool aligned_only;
+	bool bam;
+	bool is_shared;
+	bool hw_key;
+	bool use_sw_aes_cbc_ecb_ctr_algo;
+	bool use_sw_aead_algo;
+	bool use_sw_aes_xts_algo;
+	bool use_sw_ahash_algo;
+	bool use_sw_hmac_algo;
+	bool use_sw_aes_ccm_algo;
+	bool clk_mgmt_sus_res;
+	unsigned int ce_device;
+	unsigned int ce_hw_instance;
+	unsigned int max_request;
+};
+
+/* Sha operation parameters */
+struct qce_sha_req {
+	qce_comp_func_ptr_t qce_cb;	/* call back */
+	enum qce_hash_alg_enum alg;	/* sha algorithm */
+	unsigned char *digest;		/* sha digest  */
+	struct scatterlist *src;	/* pointer to scatter list entry */
+	uint32_t  auth_data[4];		/* byte count */
+	unsigned char *authkey;		/* auth key */
+	unsigned int  authklen;		/* auth key length */
+	bool first_blk;			/* first block indicator */
+	bool last_blk;			/* last block indicator */
+	unsigned int size;		/* data length in bytes */
+	void *areq;
+	unsigned int  flags;
+};
+
+struct qce_req {
+	enum qce_req_op_enum op;	/* operation type */
+	qce_comp_func_ptr_t qce_cb;	/* call back */
+	void *areq;
+	enum qce_cipher_alg_enum   alg;	/* cipher algorithms*/
+	enum qce_cipher_dir_enum dir;	/* encryption? decryption? */
+	enum qce_cipher_mode_enum mode;	/* algorithm mode  */
+	enum qce_hash_alg_enum auth_alg;/* authentication algorithm for aead */
+	unsigned char *authkey;		/* authentication key  */
+	unsigned int authklen;		/* authentication key kength */
+	unsigned int authsize;		/* authentication key kength */
+	unsigned char  nonce[MAX_NONCE];/* nonce for ccm mode */
+	unsigned char *assoc;		/* Ptr to formatted associated data */
+	unsigned int assoclen;		/* Formatted associated data length  */
+	struct scatterlist *asg;	/* Formatted associated data sg  */
+	unsigned char *enckey;		/* cipher key  */
+	unsigned int encklen;		/* cipher key length */
+	unsigned char *iv;		/* initialization vector */
+	unsigned int ivsize;		/* initialization vector size*/
+	unsigned int cryptlen;		/* data length */
+	unsigned int use_pmem;		/* is source of data PMEM allocated? */
+	struct qcedev_pmem_info *pmem;	/* pointer to pmem_info structure*/
+	unsigned int  flags;
+};
+
+struct qce_pm_table {
+	int (*suspend)(void *handle);
+	int (*resume)(void *handle);
+};
+
+extern struct qce_pm_table qce_pm_table;
+
+void *qce_open(struct platform_device *pdev, int *rc);
+int qce_close(void *handle);
+int qce_aead_req(void *handle, struct qce_req *req);
+int qce_ablk_cipher_req(void *handle, struct qce_req *req);
+int qce_hw_support(void *handle, struct ce_hw_support *support);
+int qce_process_sha_req(void *handle, struct qce_sha_req *s_req);
+int qce_enable_clk(void *handle);
+int qce_disable_clk(void *handle);
+void qce_get_driver_stats(void *handle);
+void qce_clear_driver_stats(void *handle);
+
+#endif /* __CRYPTO_MSM_QCE_H */
diff -Nruw linux-4.4.115-fbx/drivers/crypto/msm./qce_ota.h linux-4.4.115-fbx/drivers/crypto/msm/qce_ota.h
--- linux-4.4.115-fbx/drivers/crypto/msm./qce_ota.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/crypto/msm/qce_ota.h	2019-01-22 16:16:23.111242857 +0100
@@ -0,0 +1,30 @@
+/* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* Qualcomm Crypto Engine driver OTA APIi */
+
+#ifndef __CRYPTO_MSM_QCE_OTA_H
+#define __CRYPTO_MSM_QCE_OTA_H
+
+#include <linux/platform_device.h>
+#include <linux/qcota.h>
+
+
+int qce_f8_req(void *handle, struct qce_f8_req *req,
+		void *cookie, qce_comp_func_ptr_t qce_cb);
+int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *req,
+		void *cookie, qce_comp_func_ptr_t qce_cb);
+int qce_f9_req(void *handle, struct qce_f9_req *req,
+		void *cookie, qce_comp_func_ptr_t qce_cb);
+
+#endif /* __CRYPTO_MSM_QCE_OTA_H */
diff -Nruw linux-4.4.115-fbx/drivers/crypto/msm./qcrypto.c linux-4.4.115-fbx/drivers/crypto/msm/qcrypto.c
--- linux-4.4.115-fbx/drivers/crypto/msm./qcrypto.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/crypto/msm/qcrypto.c	2019-10-29 09:26:23.505201828 +0100
@@ -0,0 +1,5555 @@
+/* Qualcomm Crypto driver
+ *
+ * Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/rtnetlink.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/llist.h>
+#include <linux/debugfs.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/cache.h>
+#include <linux/platform_data/qcom_crypto_device.h>
+#include <linux/msm-bus.h>
+#include <linux/hardirq.h>
+#include <linux/qcrypto.h>
+
+#include <crypto/ctr.h>
+#include <crypto/des.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/algapi.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/aead.h>
+
+#include <linux/fips_status.h>
+
+#include "qce.h"
+
+#define DEBUG_MAX_FNAME  16
+#define DEBUG_MAX_RW_BUF 4096
+#define QCRYPTO_BIG_NUMBER 9999999 /* a big number */
+
+/*
+ * For crypto 5.0 which has burst size alignment requirement.
+ */
+#define MAX_ALIGN_SIZE  0x40
+
+#define QCRYPTO_HIGH_BANDWIDTH_TIMEOUT 1000
+
+
+
+/* Status of response workq */
+enum resp_workq_sts {
+	NOT_SCHEDULED  = 0,
+	IS_SCHEDULED   = 1,
+	SCHEDULE_AGAIN = 2
+};
+
+/* Status of req processing by CEs */
+enum req_processing_sts {
+	STOPPED     = 0,
+	IN_PROGRESS = 1
+};
+
+enum qcrypto_bus_state {
+	BUS_NO_BANDWIDTH = 0,
+	BUS_HAS_BANDWIDTH,
+	BUS_BANDWIDTH_RELEASING,
+	BUS_BANDWIDTH_ALLOCATING,
+	BUS_SUSPENDED,
+	BUS_SUSPENDING,
+};
+
+struct crypto_stat {
+	u64 aead_sha1_aes_enc;
+	u64 aead_sha1_aes_dec;
+	u64 aead_sha1_des_enc;
+	u64 aead_sha1_des_dec;
+	u64 aead_sha1_3des_enc;
+	u64 aead_sha1_3des_dec;
+	u64 aead_sha256_aes_enc;
+	u64 aead_sha256_aes_dec;
+	u64 aead_sha256_des_enc;
+	u64 aead_sha256_des_dec;
+	u64 aead_sha256_3des_enc;
+	u64 aead_sha256_3des_dec;
+	u64 aead_ccm_aes_enc;
+	u64 aead_ccm_aes_dec;
+	u64 aead_rfc4309_ccm_aes_enc;
+	u64 aead_rfc4309_ccm_aes_dec;
+	u64 aead_op_success;
+	u64 aead_op_fail;
+	u64 aead_bad_msg;
+	u64 ablk_cipher_aes_enc;
+	u64 ablk_cipher_aes_dec;
+	u64 ablk_cipher_des_enc;
+	u64 ablk_cipher_des_dec;
+	u64 ablk_cipher_3des_enc;
+	u64 ablk_cipher_3des_dec;
+	u64 ablk_cipher_op_success;
+	u64 ablk_cipher_op_fail;
+	u64 sha1_digest;
+	u64 sha256_digest;
+	u64 sha1_hmac_digest;
+	u64 sha256_hmac_digest;
+	u64 ahash_op_success;
+	u64 ahash_op_fail;
+};
+static struct crypto_stat _qcrypto_stat;
+static struct dentry *_debug_dent;
+static char _debug_read_buf[DEBUG_MAX_RW_BUF];
+static bool _qcrypto_init_assign;
+struct crypto_priv;
+struct qcrypto_req_control {
+	unsigned int index;
+	bool in_use;
+	struct crypto_engine *pce;
+	struct crypto_async_request *req;
+	struct qcrypto_resp_ctx *arsp;
+	int res; /* execution result */
+};
+
+struct crypto_engine {
+	struct list_head elist;
+	void *qce; /* qce handle */
+	struct platform_device *pdev; /* platform device */
+	struct crypto_priv *pcp;
+	uint32_t  bus_scale_handle;
+	struct crypto_queue req_queue;	/*
+					 * request queue for those requests
+					 * that have this engine assigned
+					 * waiting to be executed
+					 */
+	u64 total_req;
+	u64 err_req;
+	u32 unit;
+	u32 ce_device;
+	u32 ce_hw_instance;
+	unsigned int signature;
+
+	enum qcrypto_bus_state bw_state;
+	bool   high_bw_req;
+	struct timer_list bw_reaper_timer;
+	struct work_struct bw_reaper_ws;
+	struct work_struct bw_allocate_ws;
+
+	/* engine execution sequence number */
+	u32    active_seq;
+	/* last QCRYPTO_HIGH_BANDWIDTH_TIMEOUT active_seq */
+	u32    last_active_seq;
+
+	bool   check_flag;
+	/*Added to support multi-requests*/
+	unsigned int max_req;
+	struct   qcrypto_req_control *preq_pool;
+	atomic_t req_count;
+	bool issue_req;		/* an request is being issued to qce */
+	bool first_engine;	/* this engine is the first engine or not */
+	unsigned int irq_cpu;	/* the cpu running the irq of this engine */
+	unsigned int max_req_used; /* debug stats */
+};
+
+#define MAX_SMP_CPU    8
+
+struct crypto_priv {
+	/* CE features supported by target device*/
+	struct msm_ce_hw_support platform_support;
+
+	/* CE features/algorithms supported by HW engine*/
+	struct ce_hw_support ce_support;
+
+	/* the lock protects crypto queue and req */
+	spinlock_t lock;
+
+	/* list of  registered algorithms */
+	struct list_head alg_list;
+
+	/* current active request */
+	struct crypto_async_request *req;
+
+	struct work_struct unlock_ce_ws;
+	struct list_head engine_list; /* list of  qcrypto engines */
+	int32_t total_units;   /* total units of engines */
+	struct mutex engine_lock;
+
+	struct crypto_engine *next_engine; /* next assign engine */
+	struct crypto_queue req_queue;	/*
+					 * request queue for those requests
+					 * that waiting for an available
+					 * engine.
+					 */
+	struct llist_head ordered_resp_list;	/* Queue to maintain
+						 * responses in sequence.
+						 */
+	atomic_t resp_cnt;
+	struct workqueue_struct *resp_wq;
+	struct work_struct resp_work;	/*
+					 * Workq to send responses
+					 * in sequence.
+					 */
+	enum resp_workq_sts sched_resp_workq_status;
+	enum req_processing_sts ce_req_proc_sts;
+	int cpu_getting_irqs_frm_first_ce;
+	struct crypto_engine *first_engine;
+	struct crypto_engine *scheduled_eng; /* last engine scheduled */
+
+	/* debug stats */
+	unsigned no_avail;
+	unsigned resp_stop;
+	unsigned resp_start;
+	unsigned max_qlen;
+	unsigned int queue_work_eng3;
+	unsigned int queue_work_not_eng3;
+	unsigned int queue_work_not_eng3_nz;
+	unsigned int max_resp_qlen;
+	unsigned int max_reorder_cnt;
+	unsigned int cpu_req[MAX_SMP_CPU+1];
+};
+static struct crypto_priv qcrypto_dev;
+static struct crypto_engine *_qcrypto_static_assign_engine(
+					struct crypto_priv *cp);
+static struct crypto_engine *_avail_eng(struct crypto_priv *cp);
+static struct qcrypto_req_control *qcrypto_alloc_req_control(
+						struct crypto_engine *pce)
+{
+	int i;
+	struct qcrypto_req_control *pqcrypto_req_control = pce->preq_pool;
+	unsigned int req_count;
+
+	for (i = 0; i < pce->max_req; i++) {
+		if (xchg(&pqcrypto_req_control->in_use, true) == false) {
+			req_count = atomic_inc_return(&pce->req_count);
+			if (req_count > pce->max_req_used)
+				pce->max_req_used = req_count;
+			return pqcrypto_req_control;
+		}
+		pqcrypto_req_control++;
+	}
+	return NULL;
+}
+
+static void qcrypto_free_req_control(struct crypto_engine *pce,
+					struct qcrypto_req_control *preq)
+{
+	/* do this before free req */
+	preq->req = NULL;
+	preq->arsp = NULL;
+	/* free req */
+	if (xchg(&preq->in_use, false) == false) {
+		pr_warn("request info %pK free already\n", preq);
+	} else {
+		atomic_dec(&pce->req_count);
+	}
+}
+
+static struct qcrypto_req_control *find_req_control_for_areq(
+					struct crypto_engine *pce,
+					struct crypto_async_request *areq)
+{
+	int i;
+	struct qcrypto_req_control *pqcrypto_req_control = pce->preq_pool;
+
+	for (i = 0; i < pce->max_req; i++) {
+		if (pqcrypto_req_control->req == areq)
+			return pqcrypto_req_control;
+		pqcrypto_req_control++;
+	}
+	return NULL;
+}
+
+static void qcrypto_init_req_control(struct crypto_engine *pce,
+			struct qcrypto_req_control *pqcrypto_req_control)
+{
+	int i;
+
+	pce->preq_pool = pqcrypto_req_control;
+	atomic_set(&pce->req_count, 0);
+	for (i = 0; i < pce->max_req; i++) {
+		pqcrypto_req_control->index = i;
+		pqcrypto_req_control->in_use = false;
+		pqcrypto_req_control->pce = pce;
+		pqcrypto_req_control++;
+	}
+}
+
+static struct crypto_engine *_qrypto_find_pengine_device(struct crypto_priv *cp,
+			 unsigned int device)
+{
+	struct crypto_engine *entry = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	list_for_each_entry(entry, &cp->engine_list, elist) {
+		if (entry->ce_device == device)
+			break;
+	}
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+	if (((entry != NULL) && (entry->ce_device != device)) ||
+		(entry == NULL)) {
+		pr_err("Device node for CE device %d NOT FOUND!!\n",
+				device);
+		return NULL;
+	}
+
+	return entry;
+}
+
+static struct crypto_engine *_qrypto_find_pengine_device_hw
+			(struct crypto_priv *cp,
+			u32 device,
+			u32 hw_instance)
+{
+	struct crypto_engine *entry = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	list_for_each_entry(entry, &cp->engine_list, elist) {
+		if ((entry->ce_device == device) &&
+			(entry->ce_hw_instance == hw_instance))
+			break;
+	}
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+	if (((entry != NULL) &&
+		((entry->ce_device != device)
+		|| (entry->ce_hw_instance != hw_instance)))
+		|| (entry == NULL)) {
+		pr_err("Device node for CE device %d NOT FOUND!!\n",
+						 device);
+		return NULL;
+	}
+	return entry;
+}
+
+int qcrypto_get_num_engines(void)
+{
+	struct crypto_priv *cp = &qcrypto_dev;
+	struct crypto_engine *entry = NULL;
+	int count = 0;
+
+	list_for_each_entry(entry, &cp->engine_list, elist) {
+		count++;
+	}
+	return count;
+}
+EXPORT_SYMBOL(qcrypto_get_num_engines);
+
+void qcrypto_get_engine_list(size_t num_engines,
+				struct crypto_engine_entry *arr)
+{
+	struct crypto_priv *cp = &qcrypto_dev;
+	struct crypto_engine *entry = NULL;
+	size_t arr_index = 0;
+
+	list_for_each_entry(entry, &cp->engine_list, elist) {
+			arr[arr_index].ce_device = entry->ce_device;
+			arr[arr_index].hw_instance = entry->ce_hw_instance;
+			arr_index++;
+			if (arr_index >= num_engines)
+				break;
+	}
+}
+EXPORT_SYMBOL(qcrypto_get_engine_list);
+
+enum qcrypto_alg_type {
+	QCRYPTO_ALG_CIPHER	= 0,
+	QCRYPTO_ALG_SHA	= 1,
+	QCRYPTO_ALG_AEAD = 2,
+	QCRYPTO_ALG_LAST
+};
+
+struct qcrypto_alg {
+	struct list_head entry;
+	struct crypto_alg cipher_alg;
+	struct ahash_alg sha_alg;
+	struct aead_alg aead_alg;
+	enum qcrypto_alg_type alg_type;
+	struct crypto_priv *cp;
+};
+
+#define QCRYPTO_MAX_KEY_SIZE	64
+/* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
+#define QCRYPTO_MAX_IV_LENGTH	16
+
+#define	QCRYPTO_CCM4309_NONCE_LEN	3
+
+struct qcrypto_cipher_ctx {
+	struct list_head rsp_queue;     /* response queue */
+	struct crypto_engine *pengine;  /* fixed engine assigned to this tfm */
+	struct crypto_priv *cp;
+	unsigned int flags;
+
+	enum qce_hash_alg_enum  auth_alg; /* for aead */
+	u8 auth_key[QCRYPTO_MAX_KEY_SIZE];
+	u8 iv[QCRYPTO_MAX_IV_LENGTH];
+
+	u8 enc_key[QCRYPTO_MAX_KEY_SIZE];
+	unsigned int enc_key_len;
+
+	unsigned int authsize;
+	unsigned int auth_key_len;
+
+	u8 ccm4309_nonce[QCRYPTO_CCM4309_NONCE_LEN];
+
+	struct crypto_ablkcipher *cipher_aes192_fb;
+
+	struct crypto_ahash *ahash_aead_aes192_fb;
+};
+
+struct qcrypto_resp_ctx {
+	struct list_head list;
+	struct llist_node llist;
+	struct crypto_async_request *async_req; /* async req */
+	int res;                                /* execution result */
+};
+
+struct qcrypto_cipher_req_ctx {
+	struct qcrypto_resp_ctx rsp_entry;/* rsp entry. */
+	struct crypto_engine *pengine;  /* engine assigned to this request */
+	u8 *iv;
+	u8 rfc4309_iv[QCRYPTO_MAX_IV_LENGTH];
+	unsigned int ivsize;
+	int  aead;
+	int  ccmtype;			/* default: 0, rfc4309: 1 */
+	struct scatterlist asg;		/* Formatted associated data sg  */
+	unsigned char *adata;		/* Pointer to formatted assoc data */
+	enum qce_cipher_alg_enum alg;
+	enum qce_cipher_dir_enum dir;
+	enum qce_cipher_mode_enum mode;
+
+	struct scatterlist *orig_src;	/* Original src sg ptr  */
+	struct scatterlist *orig_dst;	/* Original dst sg ptr  */
+	struct scatterlist dsg;		/* Dest Data sg  */
+	struct scatterlist ssg;		/* Source Data sg  */
+	unsigned char *data;		/* Incoming data pointer*/
+
+	struct aead_request *aead_req;
+	struct ahash_request *fb_hash_req;
+	uint8_t	fb_ahash_digest[SHA256_DIGEST_SIZE];
+	struct scatterlist fb_ablkcipher_src_sg[2];
+	struct scatterlist fb_ablkcipher_dst_sg[2];
+	char *fb_aes_iv;
+	unsigned int  fb_ahash_length;
+	struct ablkcipher_request *fb_aes_req;
+	struct scatterlist *fb_aes_src;
+	struct scatterlist *fb_aes_dst;
+	unsigned int  fb_aes_cryptlen;
+};
+
+#define SHA_MAX_BLOCK_SIZE      SHA256_BLOCK_SIZE
+#define SHA_MAX_STATE_SIZE	(SHA256_DIGEST_SIZE / sizeof(u32))
+#define SHA_MAX_DIGEST_SIZE	 SHA256_DIGEST_SIZE
+
+#define	MSM_QCRYPTO_REQ_QUEUE_LENGTH 768
+#define	COMPLETION_CB_BACKLOG_LENGTH_STOP 400
+#define	COMPLETION_CB_BACKLOG_LENGTH_START \
+			(COMPLETION_CB_BACKLOG_LENGTH_STOP / 2)
+
+static uint8_t  _std_init_vector_sha1_uint8[] =   {
+	0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
+	0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
+	0xC3, 0xD2, 0xE1, 0xF0
+};
+
+/* standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint8_t _std_init_vector_sha256_uint8[] = {
+	0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
+	0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
+	0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
+	0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
+};
+
+struct qcrypto_sha_ctx {
+	struct list_head rsp_queue;     /* response queue */
+	struct crypto_engine *pengine;  /* fixed engine assigned to this tfm */
+	struct crypto_priv *cp;
+	unsigned int flags;
+	enum qce_hash_alg_enum  alg;
+	uint32_t		diglen;
+	uint32_t		authkey_in_len;
+	uint8_t			authkey[SHA_MAX_BLOCK_SIZE];
+	struct ahash_request *ahash_req;
+	struct completion ahash_req_complete;
+};
+
+struct qcrypto_sha_req_ctx {
+	struct qcrypto_resp_ctx rsp_entry;/* rsp entry. */
+	struct crypto_engine *pengine;  /* engine assigned to this request */
+
+	struct scatterlist *src;
+	uint32_t nbytes;
+
+	struct scatterlist *orig_src;	/* Original src sg ptr  */
+	struct scatterlist dsg;		/* Data sg */
+	unsigned char *data;		/* Incoming data pointer*/
+	unsigned char *data2;		/* Updated data pointer*/
+
+	uint32_t byte_count[4];
+	u64 count;
+	uint8_t	first_blk;
+	uint8_t	last_blk;
+	uint8_t	 trailing_buf[SHA_MAX_BLOCK_SIZE];
+	uint32_t trailing_buf_len;
+
+	/* dma buffer, Internal use */
+	uint8_t	staging_dmabuf
+		[SHA_MAX_BLOCK_SIZE+SHA_MAX_DIGEST_SIZE+MAX_ALIGN_SIZE];
+
+	uint8_t	digest[SHA_MAX_DIGEST_SIZE];
+	struct scatterlist sg[2];
+};
+
+static void _byte_stream_to_words(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned n;
+
+	n = len  / sizeof(uint32_t);
+	for (; n > 0; n--) {
+		*iv =  ((*b << 24)      & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00)     |
+				(*(b+3)          & 0xff);
+		b += sizeof(uint32_t);
+		iv++;
+	}
+
+	n = len %  sizeof(uint32_t);
+	if (n == 3) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00);
+	} else if (n == 2) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000);
+	} else if (n == 1) {
+		*iv = ((*b << 24) & 0xff000000);
+	}
+}
+
+static void _words_to_byte_stream(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned n = len  / sizeof(uint32_t);
+
+	for (; n > 0; n--) {
+		*b++ = (unsigned char) ((*iv >> 24)   & 0xff);
+		*b++ = (unsigned char) ((*iv >> 16)   & 0xff);
+		*b++ = (unsigned char) ((*iv >> 8)    & 0xff);
+		*b++ = (unsigned char) (*iv           & 0xff);
+		iv++;
+	}
+	n = len % sizeof(uint32_t);
+	if (n == 3) {
+		*b++ = (unsigned char) ((*iv >> 24)   & 0xff);
+		*b++ = (unsigned char) ((*iv >> 16)   & 0xff);
+		*b =   (unsigned char) ((*iv >> 8)    & 0xff);
+	} else if (n == 2) {
+		*b++ = (unsigned char) ((*iv >> 24)   & 0xff);
+		*b =   (unsigned char) ((*iv >> 16)   & 0xff);
+	} else if (n == 1) {
+		*b =   (unsigned char) ((*iv >> 24)   & 0xff);
+	}
+}
+
+static void qcrypto_ce_set_bus(struct crypto_engine *pengine,
+				 bool high_bw_req)
+{
+	int ret = 0;
+
+	if (high_bw_req) {
+		ret = qce_enable_clk(pengine->qce);
+		if (ret) {
+			pr_err("%s Unable enable clk\n", __func__);
+			goto clk_err;
+		}
+		ret = msm_bus_scale_client_update_request(
+				pengine->bus_scale_handle, 1);
+		if (ret) {
+			pr_err("%s Unable to set to high bandwidth\n",
+						__func__);
+			qce_disable_clk(pengine->qce);
+			goto clk_err;
+		}
+	} else {
+		ret = msm_bus_scale_client_update_request(
+				pengine->bus_scale_handle, 0);
+		if (ret) {
+			pr_err("%s Unable to set to low bandwidth\n",
+						__func__);
+			goto clk_err;
+		}
+		ret = qce_disable_clk(pengine->qce);
+		if (ret) {
+			pr_err("%s Unable disable clk\n", __func__);
+			ret = msm_bus_scale_client_update_request(
+				pengine->bus_scale_handle, 1);
+			if (ret)
+				pr_err("%s Unable to set to high bandwidth\n",
+						__func__);
+			goto clk_err;
+		}
+	}
+clk_err:
+	return;
+
+}
+
+static void qcrypto_bw_reaper_timer_callback(unsigned long data)
+{
+	struct crypto_engine *pengine = (struct crypto_engine *)data;
+
+	schedule_work(&pengine->bw_reaper_ws);
+
+	return;
+}
+
+static void qcrypto_bw_set_timeout(struct crypto_engine *pengine)
+{
+	pengine->bw_reaper_timer.data =
+			(unsigned long)(pengine);
+	pengine->bw_reaper_timer.expires = jiffies +
+			msecs_to_jiffies(QCRYPTO_HIGH_BANDWIDTH_TIMEOUT);
+	mod_timer(&(pengine->bw_reaper_timer),
+		pengine->bw_reaper_timer.expires);
+}
+
+static void qcrypto_ce_bw_allocate_req(struct crypto_engine *pengine)
+{
+	schedule_work(&pengine->bw_allocate_ws);
+}
+
+static int _start_qcrypto_process(struct crypto_priv *cp,
+					struct crypto_engine *pengine);
+
+static void qcrypto_bw_allocate_work(struct work_struct *work)
+{
+	struct  crypto_engine *pengine = container_of(work,
+				struct crypto_engine, bw_allocate_ws);
+	unsigned long flags;
+	struct crypto_priv *cp = pengine->pcp;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	pengine->bw_state = BUS_BANDWIDTH_ALLOCATING;
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+	qcrypto_ce_set_bus(pengine, true);
+	qcrypto_bw_set_timeout(pengine);
+	spin_lock_irqsave(&cp->lock, flags);
+	pengine->bw_state = BUS_HAS_BANDWIDTH;
+	pengine->high_bw_req = false;
+	pengine->active_seq++;
+	pengine->check_flag = true;
+	spin_unlock_irqrestore(&cp->lock, flags);
+	_start_qcrypto_process(cp, pengine);
+};
+
+static void qcrypto_bw_reaper_work(struct work_struct *work)
+{
+	struct  crypto_engine *pengine = container_of(work,
+				struct crypto_engine, bw_reaper_ws);
+	struct crypto_priv *cp = pengine->pcp;
+	unsigned long flags;
+	u32    active_seq;
+	bool restart = false;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	active_seq = pengine->active_seq;
+	if (pengine->bw_state == BUS_HAS_BANDWIDTH &&
+		(active_seq == pengine->last_active_seq)) {
+
+		/* check if engine is stuck */
+		if (atomic_read(&pengine->req_count) > 0) {
+			if (pengine->check_flag)
+				dev_warn(&pengine->pdev->dev,
+				"The engine appears to be stuck seq %d.\n",
+				active_seq);
+			pengine->check_flag = false;
+			goto ret;
+		}
+		if (cp->platform_support.bus_scale_table == NULL)
+			goto ret;
+		pengine->bw_state = BUS_BANDWIDTH_RELEASING;
+		spin_unlock_irqrestore(&cp->lock, flags);
+
+		qcrypto_ce_set_bus(pengine, false);
+
+		spin_lock_irqsave(&cp->lock, flags);
+
+		if (pengine->high_bw_req == true) {
+			/* we got request while we are disabling clock */
+			pengine->bw_state = BUS_BANDWIDTH_ALLOCATING;
+			spin_unlock_irqrestore(&cp->lock, flags);
+
+			qcrypto_ce_set_bus(pengine, true);
+
+			spin_lock_irqsave(&cp->lock, flags);
+			pengine->bw_state = BUS_HAS_BANDWIDTH;
+			pengine->high_bw_req = false;
+			restart = true;
+		} else
+			pengine->bw_state = BUS_NO_BANDWIDTH;
+	}
+ret:
+	pengine->last_active_seq = active_seq;
+	spin_unlock_irqrestore(&cp->lock, flags);
+	if (restart)
+		_start_qcrypto_process(cp, pengine);
+	if (pengine->bw_state != BUS_NO_BANDWIDTH)
+		qcrypto_bw_set_timeout(pengine);
+}
+
+static int qcrypto_count_sg(struct scatterlist *sg, int nbytes)
+{
+	int i;
+
+	for (i = 0; nbytes > 0 && sg != NULL; i++, sg = sg_next(sg))
+		nbytes -= sg->length;
+
+	return i;
+}
+
+static size_t qcrypto_sg_copy_from_buffer(struct scatterlist *sgl,
+				unsigned int nents, void *buf, size_t buflen)
+{
+	int i;
+	size_t offset, len;
+
+	for (i = 0, offset = 0; i < nents; ++i) {
+		len = sg_copy_from_buffer(sgl, 1, buf, buflen);
+		buf += len;
+		buflen -= len;
+		offset += len;
+		sgl = sg_next(sgl);
+	}
+
+	return offset;
+}
+
+static size_t qcrypto_sg_copy_to_buffer(struct scatterlist *sgl,
+				unsigned int nents, void *buf, size_t buflen)
+{
+	int i;
+	size_t offset, len;
+
+	for (i = 0, offset = 0; i < nents; ++i) {
+		len = sg_copy_to_buffer(sgl, 1, buf, buflen);
+		buf += len;
+		buflen -= len;
+		offset += len;
+		sgl = sg_next(sgl);
+	}
+
+	return offset;
+}
+static struct qcrypto_alg *_qcrypto_sha_alg_alloc(struct crypto_priv *cp,
+		struct ahash_alg *template)
+{
+	struct qcrypto_alg *q_alg;
+	q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
+	if (!q_alg) {
+		pr_err("qcrypto Memory allocation of q_alg FAIL, error %ld\n",
+				PTR_ERR(q_alg));
+		return ERR_PTR(-ENOMEM);
+	}
+
+	q_alg->alg_type = QCRYPTO_ALG_SHA;
+	q_alg->sha_alg = *template;
+	q_alg->cp = cp;
+
+	return q_alg;
+};
+
+static struct qcrypto_alg *_qcrypto_cipher_alg_alloc(struct crypto_priv *cp,
+		struct crypto_alg *template)
+{
+	struct qcrypto_alg *q_alg;
+
+	q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
+	if (!q_alg) {
+		pr_err("qcrypto Memory allocation of q_alg FAIL, error %ld\n",
+				PTR_ERR(q_alg));
+		return ERR_PTR(-ENOMEM);
+	}
+
+	q_alg->alg_type = QCRYPTO_ALG_CIPHER;
+	q_alg->cipher_alg = *template;
+	q_alg->cp = cp;
+
+	return q_alg;
+};
+
+static struct qcrypto_alg *_qcrypto_aead_alg_alloc(struct crypto_priv *cp,
+		struct aead_alg *template)
+{
+	struct qcrypto_alg *q_alg;
+
+	q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
+	if (!q_alg)
+		return ERR_PTR(-ENOMEM);
+
+	q_alg->alg_type = QCRYPTO_ALG_AEAD;
+	q_alg->aead_alg = *template;
+	q_alg->cp = cp;
+
+	return q_alg;
+};
+
+static int _qcrypto_cipher_ctx_init(struct qcrypto_cipher_ctx *ctx,
+					struct qcrypto_alg *q_alg)
+{
+	if (!ctx || !q_alg) {
+		pr_err("ctx or q_alg is NULL\n");
+		return -EINVAL;
+	}
+	ctx->flags = 0;
+	/* update context with ptr to cp */
+	ctx->cp = q_alg->cp;
+	/* random first IV */
+	get_random_bytes(ctx->iv, QCRYPTO_MAX_IV_LENGTH);
+	if (_qcrypto_init_assign) {
+		ctx->pengine = _qcrypto_static_assign_engine(ctx->cp);
+		if (ctx->pengine == NULL)
+			return -ENODEV;
+	} else
+		ctx->pengine = NULL;
+	INIT_LIST_HEAD(&ctx->rsp_queue);
+	ctx->auth_alg = QCE_HASH_LAST;
+	return 0;
+}
+
+static int _qcrypto_cipher_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct qcrypto_alg *q_alg;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	q_alg = container_of(alg, struct qcrypto_alg, cipher_alg);
+	return _qcrypto_cipher_ctx_init(ctx, q_alg);
+};
+
+static int _qcrypto_ahash_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
+	struct ahash_alg *alg =	container_of(crypto_hash_alg_common(ahash),
+						struct ahash_alg, halg);
+	struct qcrypto_alg *q_alg = container_of(alg, struct qcrypto_alg,
+								sha_alg);
+
+	crypto_ahash_set_reqsize(ahash, sizeof(struct qcrypto_sha_req_ctx));
+	/* update context with ptr to cp */
+	sha_ctx->cp = q_alg->cp;
+	sha_ctx->flags = 0;
+	sha_ctx->ahash_req = NULL;
+	if (_qcrypto_init_assign) {
+		sha_ctx->pengine = _qcrypto_static_assign_engine(sha_ctx->cp);
+		if (sha_ctx->pengine == NULL)
+			return -ENODEV;
+	} else
+		sha_ctx->pengine = NULL;
+	INIT_LIST_HEAD(&sha_ctx->rsp_queue);
+	return 0;
+};
+
+static void _qcrypto_ahash_cra_exit(struct crypto_tfm *tfm)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
+
+	if (!list_empty(&sha_ctx->rsp_queue))
+		pr_err("_qcrypto_ahash_cra_exit: requests still outstanding");
+	if (sha_ctx->ahash_req != NULL) {
+		ahash_request_free(sha_ctx->ahash_req);
+		sha_ctx->ahash_req = NULL;
+	}
+};
+
+
+static void _crypto_sha_hmac_ahash_req_complete(
+	struct crypto_async_request *req, int err);
+
+static int _qcrypto_ahash_hmac_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
+	int ret = 0;
+
+	ret = _qcrypto_ahash_cra_init(tfm);
+	if (ret)
+		return ret;
+	sha_ctx->ahash_req = ahash_request_alloc(ahash, GFP_KERNEL);
+
+	if (sha_ctx->ahash_req == NULL) {
+		_qcrypto_ahash_cra_exit(tfm);
+		return -ENOMEM;
+	}
+
+	init_completion(&sha_ctx->ahash_req_complete);
+	ahash_request_set_callback(sha_ctx->ahash_req,
+				CRYPTO_TFM_REQ_MAY_BACKLOG,
+				_crypto_sha_hmac_ahash_req_complete,
+				&sha_ctx->ahash_req_complete);
+	crypto_ahash_clear_flags(ahash, ~0);
+
+	return 0;
+};
+
+static int _qcrypto_cra_ablkcipher_init(struct crypto_tfm *tfm)
+{
+	tfm->crt_ablkcipher.reqsize = sizeof(struct qcrypto_cipher_req_ctx);
+	return _qcrypto_cipher_cra_init(tfm);
+};
+
+static int _qcrypto_cra_aes_ablkcipher_init(struct crypto_tfm *tfm)
+{
+	const char *name = tfm->__crt_alg->cra_name;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	int ret;
+	struct crypto_priv *cp = &qcrypto_dev;
+
+	if (cp->ce_support.use_sw_aes_cbc_ecb_ctr_algo) {
+		ctx->cipher_aes192_fb = NULL;
+		return _qcrypto_cra_ablkcipher_init(tfm);
+	}
+	ctx->cipher_aes192_fb = crypto_alloc_ablkcipher(name, 0,
+			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(ctx->cipher_aes192_fb)) {
+		pr_err("Error allocating fallback algo %s\n", name);
+		ret = PTR_ERR(ctx->cipher_aes192_fb);
+		ctx->cipher_aes192_fb = NULL;
+		return ret;
+	}
+	return _qcrypto_cra_ablkcipher_init(tfm);
+};
+
+static int _qcrypto_aead_cra_init(struct crypto_aead *tfm)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+	struct aead_alg *aeadalg = crypto_aead_alg(tfm);
+	struct qcrypto_alg *q_alg = container_of(aeadalg, struct qcrypto_alg,
+						aead_alg);
+	return _qcrypto_cipher_ctx_init(ctx, q_alg);
+};
+
+static int _qcrypto_cra_aead_sha1_init(struct crypto_aead *tfm)
+{
+	int rc;
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+	crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+	rc = _qcrypto_aead_cra_init(tfm);
+	ctx->auth_alg = QCE_HASH_SHA1_HMAC;
+	return rc;
+}
+
+static int _qcrypto_cra_aead_sha256_init(struct crypto_aead *tfm)
+{
+	int rc;
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+	crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+	rc = _qcrypto_aead_cra_init(tfm);
+	ctx->auth_alg = QCE_HASH_SHA256_HMAC;
+	return rc;
+}
+
+static int _qcrypto_cra_aead_ccm_init(struct  crypto_aead *tfm)
+{
+	int rc;
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+	crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+	rc = _qcrypto_aead_cra_init(tfm);
+	ctx->auth_alg =  QCE_HASH_AES_CMAC;
+	return rc;
+}
+
+static int _qcrypto_cra_aead_rfc4309_ccm_init(struct  crypto_aead *tfm)
+{
+	int rc;
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+	crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+	rc = _qcrypto_aead_cra_init(tfm);
+	ctx->auth_alg =  QCE_HASH_AES_CMAC;
+	return rc;
+}
+
+static int _qcrypto_cra_aead_aes_sha1_init(struct crypto_aead *tfm)
+{
+	int rc;
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+	struct crypto_priv *cp = &qcrypto_dev;
+
+	crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+	rc = _qcrypto_aead_cra_init(tfm);
+	if (rc)
+		return rc;
+	ctx->cipher_aes192_fb = NULL;
+	ctx->ahash_aead_aes192_fb = NULL;
+	if (!cp->ce_support.aes_key_192) {
+		ctx->cipher_aes192_fb = crypto_alloc_ablkcipher(
+							"cbc(aes)", 0, 0);
+		if (IS_ERR(ctx->cipher_aes192_fb)) {
+			ctx->cipher_aes192_fb = NULL;
+		} else {
+			ctx->ahash_aead_aes192_fb = crypto_alloc_ahash(
+							"hmac(sha1)", 0, 0);
+			if (IS_ERR(ctx->ahash_aead_aes192_fb)) {
+				ctx->ahash_aead_aes192_fb = NULL;
+				crypto_free_ablkcipher(ctx->cipher_aes192_fb);
+				ctx->cipher_aes192_fb = NULL;
+			}
+		}
+	}
+	ctx->auth_alg = QCE_HASH_SHA1_HMAC;
+	return 0;
+}
+
+static int _qcrypto_cra_aead_aes_sha256_init(struct crypto_aead *tfm)
+{
+	int rc;
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+	struct crypto_priv *cp = &qcrypto_dev;
+
+	crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+	rc = _qcrypto_aead_cra_init(tfm);
+	if (rc)
+		return rc;
+	ctx->cipher_aes192_fb = NULL;
+	ctx->ahash_aead_aes192_fb = NULL;
+	if (!cp->ce_support.aes_key_192) {
+		ctx->cipher_aes192_fb = crypto_alloc_ablkcipher(
+							"cbc(aes)", 0, 0);
+		if (IS_ERR(ctx->cipher_aes192_fb)) {
+			ctx->cipher_aes192_fb = NULL;
+		} else {
+			ctx->ahash_aead_aes192_fb = crypto_alloc_ahash(
+							"hmac(sha256)", 0, 0);
+			if (IS_ERR(ctx->ahash_aead_aes192_fb)) {
+				ctx->ahash_aead_aes192_fb = NULL;
+				crypto_free_ablkcipher(ctx->cipher_aes192_fb);
+				ctx->cipher_aes192_fb = NULL;
+			}
+		}
+	}
+	ctx->auth_alg = QCE_HASH_SHA256_HMAC;
+	return 0;
+}
+
+static void _qcrypto_cra_ablkcipher_exit(struct crypto_tfm *tfm)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if (!list_empty(&ctx->rsp_queue))
+		pr_err("_qcrypto__cra_ablkcipher_exit: requests still outstanding");
+};
+
+static void _qcrypto_cra_aes_ablkcipher_exit(struct crypto_tfm *tfm)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	_qcrypto_cra_ablkcipher_exit(tfm);
+	if (ctx->cipher_aes192_fb)
+		crypto_free_ablkcipher(ctx->cipher_aes192_fb);
+	ctx->cipher_aes192_fb = NULL;
+}
+
+static void _qcrypto_cra_aead_exit(struct crypto_aead *tfm)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+	if (!list_empty(&ctx->rsp_queue))
+		pr_err("_qcrypto__cra_aead_exit: requests still outstanding");
+}
+
+static void _qcrypto_cra_aead_aes_exit(struct crypto_aead *tfm)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+	if (!list_empty(&ctx->rsp_queue))
+		pr_err("_qcrypto__cra_aead_exit: requests still outstanding");
+	if (ctx->cipher_aes192_fb)
+		crypto_free_ablkcipher(ctx->cipher_aes192_fb);
+	if (ctx->ahash_aead_aes192_fb)
+		crypto_free_ahash(ctx->ahash_aead_aes192_fb);
+	ctx->cipher_aes192_fb = NULL;
+	ctx->ahash_aead_aes192_fb = NULL;
+}
+
+static int _disp_stats(int id)
+{
+	struct crypto_stat *pstat;
+	int len = 0;
+	unsigned long flags;
+	struct crypto_priv *cp = &qcrypto_dev;
+	struct crypto_engine *pe;
+	int i;
+
+	pstat = &_qcrypto_stat;
+	len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
+			"\nQualcomm crypto accelerator %d Statistics\n",
+				id + 1);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK CIPHER AES encryption          : %llu\n",
+					pstat->ablk_cipher_aes_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK CIPHER AES decryption          : %llu\n",
+					pstat->ablk_cipher_aes_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK CIPHER DES encryption          : %llu\n",
+					pstat->ablk_cipher_des_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK CIPHER DES decryption          : %llu\n",
+					pstat->ablk_cipher_des_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK CIPHER 3DES encryption         : %llu\n",
+					pstat->ablk_cipher_3des_enc);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK CIPHER 3DES decryption         : %llu\n",
+					pstat->ablk_cipher_3des_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK CIPHER operation success       : %llu\n",
+					pstat->ablk_cipher_op_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK CIPHER operation fail          : %llu\n",
+					pstat->ablk_cipher_op_fail);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"\n");
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-AES encryption            : %llu\n",
+					pstat->aead_sha1_aes_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-AES decryption            : %llu\n",
+					pstat->aead_sha1_aes_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-DES encryption            : %llu\n",
+					pstat->aead_sha1_des_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-DES decryption            : %llu\n",
+					pstat->aead_sha1_des_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-3DES encryption           : %llu\n",
+					pstat->aead_sha1_3des_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-3DES decryption           : %llu\n",
+					pstat->aead_sha1_3des_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA256-AES encryption          : %llu\n",
+					pstat->aead_sha256_aes_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA256-AES decryption          : %llu\n",
+					pstat->aead_sha256_aes_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA256-DES encryption          : %llu\n",
+					pstat->aead_sha256_des_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA256-DES decryption          : %llu\n",
+					pstat->aead_sha256_des_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA256-3DES encryption         : %llu\n",
+					pstat->aead_sha256_3des_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA256-3DES decryption         : %llu\n",
+					pstat->aead_sha256_3des_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD CCM-AES encryption             : %llu\n",
+					pstat->aead_ccm_aes_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD CCM-AES decryption             : %llu\n",
+					pstat->aead_ccm_aes_dec);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD RFC4309-CCM-AES encryption     : %llu\n",
+					pstat->aead_rfc4309_ccm_aes_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD RFC4309-CCM-AES decryption     : %llu\n",
+					pstat->aead_rfc4309_ccm_aes_dec);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD operation success              : %llu\n",
+					pstat->aead_op_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD operation fail                 : %llu\n",
+					pstat->aead_op_fail);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD bad message                    : %llu\n",
+					pstat->aead_bad_msg);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"\n");
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AHASH SHA1 digest                   : %llu\n",
+					pstat->sha1_digest);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AHASH SHA256 digest                 : %llu\n",
+					pstat->sha256_digest);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AHASH SHA1 HMAC digest              : %llu\n",
+					pstat->sha1_hmac_digest);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AHASH SHA256 HMAC digest            : %llu\n",
+					pstat->sha256_hmac_digest);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AHASH operation success             : %llu\n",
+					pstat->ahash_op_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AHASH operation fail                : %llu\n",
+					pstat->ahash_op_fail);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   resp start, resp stop, max rsp queue reorder-cnt : %u %u %u %u\n",
+					cp->resp_start, cp->resp_stop,
+					cp->max_resp_qlen, cp->max_reorder_cnt);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   max queue legnth, no avail          : %u %u\n",
+					cp->max_qlen, cp->no_avail);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   work queue                          : %u %u %u\n",
+					cp->queue_work_eng3,
+					cp->queue_work_not_eng3,
+					cp->queue_work_not_eng3_nz);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"\n");
+	spin_lock_irqsave(&cp->lock, flags);
+	list_for_each_entry(pe, &cp->engine_list, elist) {
+		len += scnprintf(
+			_debug_read_buf + len,
+			DEBUG_MAX_RW_BUF - len - 1,
+			"   Engine %4d Req max %d          : %llu\n",
+			pe->unit,
+			pe->max_req_used,
+			pe->total_req
+		);
+		len += scnprintf(
+			_debug_read_buf + len,
+			DEBUG_MAX_RW_BUF - len - 1,
+			"   Engine %4d Req Error               : %llu\n",
+			pe->unit,
+			pe->err_req
+		);
+		qce_get_driver_stats(pe->qce);
+	}
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+	for (i = 0; i < MAX_SMP_CPU+1; i++)
+		if (cp->cpu_req[i])
+			len += scnprintf(
+				_debug_read_buf + len,
+				DEBUG_MAX_RW_BUF - len - 1,
+				"CPU %d Issue Req                     : %d\n",
+				i, cp->cpu_req[i]);
+	return len;
+}
+
+static void _qcrypto_remove_engine(struct crypto_engine *pengine)
+{
+	struct crypto_priv *cp;
+	struct qcrypto_alg *q_alg;
+	struct qcrypto_alg *n;
+	unsigned long flags;
+	struct crypto_engine *pe;
+
+	cp = pengine->pcp;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	list_del(&pengine->elist);
+	if (pengine->first_engine) {
+		cp->first_engine = NULL;
+		pe = list_first_entry(&cp->engine_list, struct crypto_engine,
+								elist);
+		if (pe) {
+			pe->first_engine = true;
+			cp->first_engine = pe;
+		}
+	}
+	if (cp->next_engine == pengine)
+		cp->next_engine = NULL;
+	if (cp->scheduled_eng == pengine)
+		cp->scheduled_eng = NULL;
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+	cp->total_units--;
+
+	cancel_work_sync(&pengine->bw_reaper_ws);
+	cancel_work_sync(&pengine->bw_allocate_ws);
+	del_timer_sync(&pengine->bw_reaper_timer);
+
+	if (pengine->bus_scale_handle != 0)
+		msm_bus_scale_unregister_client(pengine->bus_scale_handle);
+	pengine->bus_scale_handle = 0;
+
+	kzfree(pengine->preq_pool);
+
+	if (cp->total_units)
+		return;
+
+	list_for_each_entry_safe(q_alg, n, &cp->alg_list, entry) {
+		if (q_alg->alg_type == QCRYPTO_ALG_CIPHER)
+			crypto_unregister_alg(&q_alg->cipher_alg);
+		if (q_alg->alg_type == QCRYPTO_ALG_SHA)
+			crypto_unregister_ahash(&q_alg->sha_alg);
+		if (q_alg->alg_type == QCRYPTO_ALG_AEAD)
+			crypto_unregister_aead(&q_alg->aead_alg);
+		list_del(&q_alg->entry);
+		kzfree(q_alg);
+	}
+}
+
+static int _qcrypto_remove(struct platform_device *pdev)
+{
+	struct crypto_engine *pengine;
+	struct crypto_priv *cp;
+
+	pengine = platform_get_drvdata(pdev);
+
+	if (!pengine)
+		return 0;
+	cp = pengine->pcp;
+	mutex_lock(&cp->engine_lock);
+	_qcrypto_remove_engine(pengine);
+	mutex_unlock(&cp->engine_lock);
+	if (pengine->qce)
+		qce_close(pengine->qce);
+	kzfree(pengine);
+	return 0;
+}
+
+static int _qcrypto_check_aes_keylen(struct crypto_ablkcipher *cipher,
+		struct crypto_priv *cp, unsigned int len)
+{
+
+	switch (len) {
+	case AES_KEYSIZE_128:
+	case AES_KEYSIZE_256:
+		break;
+	case AES_KEYSIZE_192:
+		if (cp->ce_support.aes_key_192)
+			break;
+	default:
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	};
+
+	return 0;
+}
+
+static int _qcrypto_setkey_aes_192_fallback(struct crypto_ablkcipher *cipher,
+		const u8 *key)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	int ret;
+
+	ctx->enc_key_len = AES_KEYSIZE_192;
+	ctx->cipher_aes192_fb->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+	ctx->cipher_aes192_fb->base.crt_flags |=
+			(cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+	ret = crypto_ablkcipher_setkey(ctx->cipher_aes192_fb, key,
+			AES_KEYSIZE_192);
+	if (ret) {
+		tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+		tfm->crt_flags |=
+			(cipher->base.crt_flags & CRYPTO_TFM_RES_MASK);
+	}
+	return ret;
+}
+
+static int _qcrypto_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
+		unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_priv *cp = ctx->cp;
+
+	if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY)
+		return 0;
+
+	if ((len == AES_KEYSIZE_192) && (!cp->ce_support.aes_key_192)
+					&& ctx->cipher_aes192_fb)
+		return _qcrypto_setkey_aes_192_fallback(cipher, key);
+
+	if (_qcrypto_check_aes_keylen(cipher, cp, len)) {
+		return -EINVAL;
+	} else {
+		ctx->enc_key_len = len;
+		if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY))  {
+			if (key != NULL) {
+				memcpy(ctx->enc_key, key, len);
+			} else {
+				pr_err("%s Inavlid key pointer\n", __func__);
+				return -EINVAL;
+			}
+		}
+	}
+	return 0;
+};
+
+static int _qcrypto_setkey_aes_xts(struct crypto_ablkcipher *cipher,
+		const u8 *key, unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_priv *cp = ctx->cp;
+
+	if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY)
+		return 0;
+	if (_qcrypto_check_aes_keylen(cipher, cp, len/2)) {
+		return -EINVAL;
+	} else {
+		ctx->enc_key_len = len;
+		if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY))  {
+			if (key != NULL) {
+				memcpy(ctx->enc_key, key, len);
+			} else {
+				pr_err("%s Inavlid key pointer\n", __func__);
+				return -EINVAL;
+			}
+		}
+	}
+	return 0;
+};
+
+static int _qcrypto_setkey_des(struct crypto_ablkcipher *cipher, const u8 *key,
+		unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	u32 tmp[DES_EXPKEY_WORDS];
+	int ret;
+
+	if (!key) {
+		pr_err("%s Inavlid key pointer\n", __func__);
+		return -EINVAL;
+	}
+
+	ret = des_ekey(tmp, key);
+
+	if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
+		pr_err("%s HW KEY usage not supported for DES algorithm\n",
+								__func__);
+		return 0;
+	};
+
+	if (len != DES_KEY_SIZE) {
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	};
+
+	if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+		tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+		return -EINVAL;
+	}
+
+	ctx->enc_key_len = len;
+	if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY))
+		memcpy(ctx->enc_key, key, len);
+
+	return 0;
+};
+
+static int _qcrypto_setkey_3des(struct crypto_ablkcipher *cipher, const u8 *key,
+		unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
+		pr_err("%s HW KEY usage not supported for 3DES algorithm\n",
+								__func__);
+		return 0;
+	};
+	if (len != DES3_EDE_KEY_SIZE) {
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	};
+	ctx->enc_key_len = len;
+	if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY)) {
+		if (key != NULL) {
+			memcpy(ctx->enc_key, key, len);
+		} else {
+			pr_err("%s Inavlid key pointer\n", __func__);
+			return -EINVAL;
+		}
+	}
+	return 0;
+};
+
+static void seq_response(struct work_struct *work)
+{
+	struct crypto_priv *cp = container_of(work, struct crypto_priv,
+							 resp_work);
+	struct llist_node *list;
+	struct llist_node *rev = NULL;
+	struct crypto_engine *pengine;
+	unsigned long flags;
+	int total_unit;
+
+again:
+	list = llist_del_all(&cp->ordered_resp_list);
+
+	if (!list)
+		goto end;
+
+	while (list) {
+		struct llist_node *t = list;
+		list = llist_next(list);
+
+		t->next = rev;
+		rev = t;
+	}
+
+	while (rev) {
+		struct qcrypto_resp_ctx *arsp;
+		struct crypto_async_request *areq;
+
+		arsp = container_of(rev, struct qcrypto_resp_ctx, llist);
+		rev = llist_next(rev);
+
+		areq = arsp->async_req;
+		local_bh_disable();
+		areq->complete(areq, arsp->res);
+		local_bh_enable();
+		atomic_dec(&cp->resp_cnt);
+	}
+
+	if (atomic_read(&cp->resp_cnt) < COMPLETION_CB_BACKLOG_LENGTH_START &&
+		(cmpxchg(&cp->ce_req_proc_sts, STOPPED, IN_PROGRESS)
+						== STOPPED)) {
+		cp->resp_start++;
+		for (total_unit = cp->total_units; total_unit-- > 0;) {
+			spin_lock_irqsave(&cp->lock, flags);
+			pengine = _avail_eng(cp);
+			spin_unlock_irqrestore(&cp->lock, flags);
+			if (pengine)
+				_start_qcrypto_process(cp, pengine);
+			else
+				break;
+		}
+	}
+end:
+	if (cmpxchg(&cp->sched_resp_workq_status, SCHEDULE_AGAIN,
+				IS_SCHEDULED) == SCHEDULE_AGAIN)
+		goto again;
+	else if (cmpxchg(&cp->sched_resp_workq_status, IS_SCHEDULED,
+				NOT_SCHEDULED) == SCHEDULE_AGAIN)
+		goto end;
+}
+
+#define SCHEUDLE_RSP_QLEN_THRESHOLD 64
+
+static void _qcrypto_tfm_complete(struct crypto_engine *pengine, u32 type,
+					void *tfm_ctx,
+					struct qcrypto_resp_ctx *cur_arsp,
+					int res)
+{
+	struct crypto_priv *cp = pengine->pcp;
+	unsigned long flags;
+	struct qcrypto_resp_ctx *arsp;
+	struct list_head *plist;
+	unsigned int resp_qlen;
+	unsigned int cnt = 0;
+
+	switch (type) {
+	case CRYPTO_ALG_TYPE_AHASH:
+		plist = &((struct qcrypto_sha_ctx *) tfm_ctx)->rsp_queue;
+		break;
+	case CRYPTO_ALG_TYPE_ABLKCIPHER:
+	case CRYPTO_ALG_TYPE_AEAD:
+	default:
+		plist = &((struct qcrypto_cipher_ctx *) tfm_ctx)->rsp_queue;
+		break;
+	}
+
+	spin_lock_irqsave(&cp->lock, flags);
+
+	cur_arsp->res = res;
+	while (!list_empty(plist)) {
+		arsp = list_first_entry(plist,
+				struct qcrypto_resp_ctx, list);
+		if (arsp->res == -EINPROGRESS)
+			break;
+		else {
+			list_del(&arsp->list);
+			llist_add(&arsp->llist, &cp->ordered_resp_list);
+			atomic_inc(&cp->resp_cnt);
+			cnt++;
+		}
+	}
+	resp_qlen = atomic_read(&cp->resp_cnt);
+	if (resp_qlen > cp->max_resp_qlen)
+		cp->max_resp_qlen = resp_qlen;
+	if (cnt > cp->max_reorder_cnt)
+		cp->max_reorder_cnt = cnt;
+	if ((resp_qlen >= COMPLETION_CB_BACKLOG_LENGTH_STOP) &&
+		cmpxchg(&cp->ce_req_proc_sts, IN_PROGRESS,
+						STOPPED) == IN_PROGRESS) {
+		cp->resp_stop++;
+	}
+
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+retry:
+	if (!llist_empty(&cp->ordered_resp_list)) {
+		unsigned int cpu;
+
+		if (pengine->first_engine) {
+			cpu = WORK_CPU_UNBOUND;
+			cp->queue_work_eng3++;
+		} else {
+			cp->queue_work_not_eng3++;
+			cpu = cp->cpu_getting_irqs_frm_first_ce;
+			/*
+			 * If source not the first engine, and there
+			 * are outstanding requests going on first engine,
+			 * skip scheduling of work queue to anticipate
+			 * more may be coming. If the response queue
+			 * length exceeds threshold, to avoid further
+			 * delay, schedule work queue immediately.
+			 */
+			if (cp->first_engine && atomic_read(
+						&cp->first_engine->req_count)) {
+				if (resp_qlen < SCHEUDLE_RSP_QLEN_THRESHOLD)
+					return;
+				cp->queue_work_not_eng3_nz++;
+			}
+		}
+		if (cmpxchg(&cp->sched_resp_workq_status, NOT_SCHEDULED,
+					IS_SCHEDULED) == NOT_SCHEDULED)
+			queue_work_on(cpu, cp->resp_wq, &cp->resp_work);
+		else if (cmpxchg(&cp->sched_resp_workq_status, IS_SCHEDULED,
+					SCHEDULE_AGAIN) == NOT_SCHEDULED)
+			goto retry;
+	}
+}
+
+static void req_done(struct qcrypto_req_control *pqcrypto_req_control)
+{
+	struct crypto_engine *pengine;
+	struct crypto_async_request *areq;
+	struct crypto_priv *cp;
+	struct qcrypto_resp_ctx *arsp;
+	u32 type = 0;
+	void *tfm_ctx = NULL;
+	unsigned int cpu;
+	int res;
+
+	pengine = pqcrypto_req_control->pce;
+	cp = pengine->pcp;
+	areq = pqcrypto_req_control->req;
+	arsp = pqcrypto_req_control->arsp;
+	res = pqcrypto_req_control->res;
+	qcrypto_free_req_control(pengine, pqcrypto_req_control);
+
+	if (areq) {
+		type = crypto_tfm_alg_type(areq->tfm);
+		tfm_ctx = crypto_tfm_ctx(areq->tfm);
+	}
+	cpu = smp_processor_id();
+	pengine->irq_cpu = cpu;
+	if (pengine->first_engine) {
+		if (cpu  != cp->cpu_getting_irqs_frm_first_ce)
+			cp->cpu_getting_irqs_frm_first_ce = cpu;
+	}
+	if (areq)
+		_qcrypto_tfm_complete(pengine, type, tfm_ctx, arsp, res);
+	if (ACCESS_ONCE(cp->ce_req_proc_sts) == IN_PROGRESS)
+		_start_qcrypto_process(cp, pengine);
+}
+
+static void _qce_ahash_complete(void *cookie, unsigned char *digest,
+		unsigned char *authdata, int ret)
+{
+	struct ahash_request *areq = (struct ahash_request *) cookie;
+	struct crypto_async_request *async_req;
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(areq->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(areq);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct crypto_stat *pstat;
+	uint32_t diglen = crypto_ahash_digestsize(ahash);
+	uint32_t *auth32 = (uint32_t *)authdata;
+	struct crypto_engine *pengine;
+	struct qcrypto_req_control *pqcrypto_req_control;
+
+	async_req = &areq->base;
+	pstat = &_qcrypto_stat;
+
+	pengine = rctx->pengine;
+	pqcrypto_req_control = find_req_control_for_areq(pengine,
+							 async_req);
+	if (pqcrypto_req_control == NULL) {
+		pr_err("async request not found\n");
+		return;
+	}
+
+#ifdef QCRYPTO_DEBUG
+	dev_info(&pengine->pdev->dev, "_qce_ahash_complete: %pK ret %d\n",
+				areq, ret);
+#endif
+	if (digest) {
+		memcpy(rctx->digest, digest, diglen);
+		if (rctx->last_blk)
+			memcpy(areq->result, digest, diglen);
+	}
+	if (authdata) {
+		rctx->byte_count[0] = auth32[0];
+		rctx->byte_count[1] = auth32[1];
+		rctx->byte_count[2] = auth32[2];
+		rctx->byte_count[3] = auth32[3];
+	}
+	areq->src = rctx->src;
+	areq->nbytes = rctx->nbytes;
+
+	rctx->last_blk = 0;
+	rctx->first_blk = 0;
+
+	if (ret) {
+		pqcrypto_req_control->res = -ENXIO;
+		pstat->ahash_op_fail++;
+	} else {
+		pqcrypto_req_control->res = 0;
+		pstat->ahash_op_success++;
+	}
+	if (cp->ce_support.aligned_only)  {
+		areq->src = rctx->orig_src;
+		kfree(rctx->data);
+	}
+	req_done(pqcrypto_req_control);
+};
+
+static void _qce_ablk_cipher_complete(void *cookie, unsigned char *icb,
+		unsigned char *iv, int ret)
+{
+	struct ablkcipher_request *areq = (struct ablkcipher_request *) cookie;
+	struct crypto_async_request *async_req;
+	struct crypto_ablkcipher *ablk = crypto_ablkcipher_reqtfm(areq);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_engine *pengine;
+	struct qcrypto_req_control *pqcrypto_req_control;
+
+	async_req = &areq->base;
+	pstat = &_qcrypto_stat;
+	rctx = ablkcipher_request_ctx(areq);
+	pengine = rctx->pengine;
+	pqcrypto_req_control = find_req_control_for_areq(pengine,
+							 async_req);
+	if (pqcrypto_req_control == NULL) {
+		pr_err("async request not found\n");
+		return;
+	}
+
+#ifdef QCRYPTO_DEBUG
+	dev_info(&pengine->pdev->dev, "_qce_ablk_cipher_complete: %pK ret %d\n",
+				areq, ret);
+#endif
+	if (iv)
+		memcpy(ctx->iv, iv, crypto_ablkcipher_ivsize(ablk));
+
+	if (ret) {
+		pqcrypto_req_control->res = -ENXIO;
+		pstat->ablk_cipher_op_fail++;
+	} else {
+		pqcrypto_req_control->res = 0;
+		pstat->ablk_cipher_op_success++;
+	}
+
+	if (cp->ce_support.aligned_only)  {
+		struct qcrypto_cipher_req_ctx *rctx;
+		uint32_t num_sg = 0;
+		uint32_t bytes = 0;
+
+		rctx = ablkcipher_request_ctx(areq);
+		areq->src = rctx->orig_src;
+		areq->dst = rctx->orig_dst;
+
+		num_sg = qcrypto_count_sg(areq->dst, areq->nbytes);
+		bytes = qcrypto_sg_copy_from_buffer(areq->dst, num_sg,
+			rctx->data, areq->nbytes);
+		if (bytes != areq->nbytes)
+			pr_warn("bytes copied=0x%x bytes to copy= 0x%x", bytes,
+								areq->nbytes);
+		kzfree(rctx->data);
+	}
+	req_done(pqcrypto_req_control);
+};
+
+static void _qce_aead_complete(void *cookie, unsigned char *icv,
+				unsigned char *iv, int ret)
+{
+	struct aead_request *areq = (struct aead_request *) cookie;
+	struct crypto_async_request *async_req;
+	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_stat *pstat;
+	struct crypto_engine *pengine;
+	struct qcrypto_req_control *pqcrypto_req_control;
+
+	async_req = &areq->base;
+	pstat = &_qcrypto_stat;
+	rctx = aead_request_ctx(areq);
+	pengine = rctx->pengine;
+	pqcrypto_req_control = find_req_control_for_areq(pengine,
+							 async_req);
+	if (pqcrypto_req_control == NULL) {
+		pr_err("async request not found\n");
+		return;
+	}
+
+	if (rctx->mode == QCE_MODE_CCM) {
+		kzfree(rctx->adata);
+	} else {
+		uint32_t ivsize = crypto_aead_ivsize(aead);
+
+		if (ret == 0) {
+			if (rctx->dir  == QCE_ENCRYPT) {
+				/* copy the icv to dst */
+				scatterwalk_map_and_copy(icv, areq->dst,
+						areq->cryptlen + areq->assoclen,
+						ctx->authsize, 1);
+
+			} else {
+				unsigned char tmp[SHA256_DIGESTSIZE] = {0};
+
+				/* compare icv from src */
+				scatterwalk_map_and_copy(tmp,
+					areq->src, areq->assoclen +
+					areq->cryptlen - ctx->authsize,
+					ctx->authsize, 0);
+				ret = memcmp(icv, tmp, ctx->authsize);
+				if (ret != 0)
+					ret = -EBADMSG;
+
+			}
+		} else {
+			ret = -ENXIO;
+		}
+
+		if (iv)
+			memcpy(ctx->iv, iv, ivsize);
+	}
+
+	if (ret == (-EBADMSG))
+		pstat->aead_bad_msg++;
+	else if (ret)
+		pstat->aead_op_fail++;
+	else
+		pstat->aead_op_success++;
+
+	pqcrypto_req_control->res = ret;
+	req_done(pqcrypto_req_control);
+}
+
+static int aead_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
+{
+	__be32 data;
+
+	memset(block, 0, csize);
+	block += csize;
+
+	if (csize >= 4)
+		csize = 4;
+	else if (msglen > (1 << (8 * csize)))
+		return -EOVERFLOW;
+
+	data = cpu_to_be32(msglen);
+	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
+
+	return 0;
+}
+
+static int qccrypto_set_aead_ccm_nonce(struct qce_req *qreq, uint32_t assoclen)
+{
+	unsigned int i = ((unsigned int)qreq->iv[0]) + 1;
+
+	memcpy(&qreq->nonce[0] , qreq->iv, qreq->ivsize);
+	/*
+	 * Format control info per RFC 3610 and
+	 * NIST Special Publication 800-38C
+	 */
+	qreq->nonce[0] |= (8 * ((qreq->authsize - 2) / 2));
+	if (assoclen)
+		qreq->nonce[0] |= 64;
+
+	if (i > MAX_NONCE)
+		return -EINVAL;
+
+	return aead_ccm_set_msg_len(qreq->nonce + 16 - i, qreq->cryptlen, i);
+}
+
+static int qcrypto_aead_ccm_format_adata(struct qce_req *qreq, uint32_t alen,
+				struct scatterlist *sg, unsigned char *adata)
+{
+	uint32_t len;
+	uint32_t bytes = 0;
+	uint32_t num_sg = 0;
+
+	/*
+	 * Add control info for associated data
+	 * RFC 3610 and NIST Special Publication 800-38C
+	 */
+	if (alen < 65280) {
+		*(__be16 *)adata = cpu_to_be16(alen);
+		len = 2;
+	} else {
+			if ((alen >= 65280) && (alen <= 0xffffffff)) {
+				*(__be16 *)adata = cpu_to_be16(0xfffe);
+				*(__be32 *)&adata[2] = cpu_to_be32(alen);
+				len = 6;
+		} else {
+				*(__be16 *)adata = cpu_to_be16(0xffff);
+				*(__be32 *)&adata[6] = cpu_to_be32(alen);
+				len = 10;
+		}
+	}
+	adata += len;
+	qreq->assoclen = ALIGN((alen + len), 16);
+
+	num_sg = qcrypto_count_sg(sg, alen);
+	bytes = qcrypto_sg_copy_to_buffer(sg, num_sg, adata, alen);
+	if (bytes != alen)
+		pr_warn("bytes copied=0x%x bytes to copy= 0x%x", bytes, alen);
+
+	return 0;
+}
+
+static int _qcrypto_process_ablkcipher(struct crypto_engine *pengine,
+			struct qcrypto_req_control *pqcrypto_req_control)
+{
+	struct crypto_async_request *async_req;
+	struct qce_req qreq;
+	int ret;
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *cipher_ctx;
+	struct ablkcipher_request *req;
+	struct crypto_ablkcipher *tfm;
+
+	async_req = pqcrypto_req_control->req;
+	req = container_of(async_req, struct ablkcipher_request, base);
+	cipher_ctx = crypto_tfm_ctx(async_req->tfm);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->pengine = pengine;
+	tfm = crypto_ablkcipher_reqtfm(req);
+	if (pengine->pcp->ce_support.aligned_only) {
+		uint32_t bytes = 0;
+		uint32_t num_sg = 0;
+
+		rctx->orig_src = req->src;
+		rctx->orig_dst = req->dst;
+		rctx->data = kzalloc((req->nbytes + 64), GFP_ATOMIC);
+
+		if (rctx->data == NULL) {
+			pr_err("Mem Alloc fail rctx->data, err %ld for 0x%x\n",
+				PTR_ERR(rctx->data), (req->nbytes + 64));
+			return -ENOMEM;
+		}
+		num_sg = qcrypto_count_sg(req->src, req->nbytes);
+		bytes = qcrypto_sg_copy_to_buffer(req->src, num_sg, rctx->data,
+								req->nbytes);
+		if (bytes != req->nbytes)
+			pr_warn("bytes copied=0x%x bytes to copy= 0x%x", bytes,
+								req->nbytes);
+		sg_set_buf(&rctx->dsg, rctx->data, req->nbytes);
+		sg_mark_end(&rctx->dsg);
+		rctx->iv = req->info;
+
+		req->src = &rctx->dsg;
+		req->dst = &rctx->dsg;
+	}
+	qreq.op = QCE_REQ_ABLK_CIPHER;
+	qreq.qce_cb = _qce_ablk_cipher_complete;
+	qreq.areq = req;
+	qreq.alg = rctx->alg;
+	qreq.dir = rctx->dir;
+	qreq.mode = rctx->mode;
+	qreq.enckey = cipher_ctx->enc_key;
+	qreq.encklen = cipher_ctx->enc_key_len;
+	qreq.iv = req->info;
+	qreq.ivsize = crypto_ablkcipher_ivsize(tfm);
+	qreq.cryptlen = req->nbytes;
+	qreq.use_pmem = 0;
+	qreq.flags = cipher_ctx->flags;
+
+	if ((cipher_ctx->enc_key_len == 0) &&
+			(pengine->pcp->platform_support.hw_key_support == 0))
+		ret = -EINVAL;
+	else
+		ret =  qce_ablk_cipher_req(pengine->qce, &qreq);
+
+	return ret;
+}
+
+static int _qcrypto_process_ahash(struct crypto_engine *pengine,
+			struct qcrypto_req_control *pqcrypto_req_control)
+{
+	struct crypto_async_request *async_req;
+	struct ahash_request *req;
+	struct qce_sha_req sreq;
+	struct qcrypto_sha_req_ctx *rctx;
+	struct qcrypto_sha_ctx *sha_ctx;
+	int ret = 0;
+
+	async_req = pqcrypto_req_control->req;
+	req = container_of(async_req,
+				struct ahash_request, base);
+	rctx = ahash_request_ctx(req);
+	sha_ctx = crypto_tfm_ctx(async_req->tfm);
+	rctx->pengine = pengine;
+
+	sreq.qce_cb = _qce_ahash_complete;
+	sreq.digest =  &rctx->digest[0];
+	sreq.src = req->src;
+	sreq.auth_data[0] = rctx->byte_count[0];
+	sreq.auth_data[1] = rctx->byte_count[1];
+	sreq.auth_data[2] = rctx->byte_count[2];
+	sreq.auth_data[3] = rctx->byte_count[3];
+	sreq.first_blk = rctx->first_blk;
+	sreq.last_blk = rctx->last_blk;
+	sreq.size = req->nbytes;
+	sreq.areq = req;
+	sreq.flags = sha_ctx->flags;
+
+	switch (sha_ctx->alg) {
+	case QCE_HASH_SHA1:
+		sreq.alg = QCE_HASH_SHA1;
+		sreq.authkey = NULL;
+		break;
+	case QCE_HASH_SHA256:
+		sreq.alg = QCE_HASH_SHA256;
+		sreq.authkey = NULL;
+		break;
+	case QCE_HASH_SHA1_HMAC:
+		sreq.alg = QCE_HASH_SHA1_HMAC;
+		sreq.authkey = &sha_ctx->authkey[0];
+		sreq.authklen = SHA_HMAC_KEY_SIZE;
+		break;
+	case QCE_HASH_SHA256_HMAC:
+		sreq.alg = QCE_HASH_SHA256_HMAC;
+		sreq.authkey = &sha_ctx->authkey[0];
+		sreq.authklen = SHA_HMAC_KEY_SIZE;
+		break;
+	default:
+		pr_err("Algorithm %d not supported, exiting", sha_ctx->alg);
+		ret = -1;
+		break;
+	};
+	ret =  qce_process_sha_req(pengine->qce, &sreq);
+
+	return ret;
+}
+
+static int _qcrypto_process_aead(struct  crypto_engine *pengine,
+			struct qcrypto_req_control *pqcrypto_req_control)
+{
+	struct crypto_async_request *async_req;
+	struct qce_req qreq;
+	int ret = 0;
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *cipher_ctx;
+	struct aead_request *req;
+	struct crypto_aead *aead;
+
+	async_req = pqcrypto_req_control->req;
+	req = container_of(async_req, struct aead_request, base);
+	aead = crypto_aead_reqtfm(req);
+	rctx = aead_request_ctx(req);
+	rctx->pengine = pengine;
+	cipher_ctx = crypto_tfm_ctx(async_req->tfm);
+
+	qreq.op = QCE_REQ_AEAD;
+	qreq.qce_cb = _qce_aead_complete;
+
+	qreq.areq = req;
+	qreq.alg = rctx->alg;
+	qreq.dir = rctx->dir;
+	qreq.mode = rctx->mode;
+	qreq.iv = rctx->iv;
+
+	qreq.enckey = cipher_ctx->enc_key;
+	qreq.encklen = cipher_ctx->enc_key_len;
+	qreq.authkey = cipher_ctx->auth_key;
+	qreq.authklen = cipher_ctx->auth_key_len;
+	qreq.authsize = crypto_aead_authsize(aead);
+	qreq.auth_alg = cipher_ctx->auth_alg;
+	if (qreq.mode == QCE_MODE_CCM)
+		qreq.ivsize =  AES_BLOCK_SIZE;
+	else
+		qreq.ivsize =  crypto_aead_ivsize(aead);
+	qreq.flags = cipher_ctx->flags;
+
+	if (qreq.mode == QCE_MODE_CCM) {
+		uint32_t assoclen;
+
+		if (qreq.dir == QCE_ENCRYPT)
+			qreq.cryptlen = req->cryptlen;
+		else
+			qreq.cryptlen = req->cryptlen -
+						qreq.authsize;
+
+		/* if rfc4309 ccm, adjust assoclen */
+		assoclen = req->assoclen;
+		if (rctx->ccmtype)
+			assoclen -= 8;
+		/* Get NONCE */
+		ret = qccrypto_set_aead_ccm_nonce(&qreq, assoclen);
+		if (ret)
+			return ret;
+
+		if (assoclen) {
+			rctx->adata = kzalloc((assoclen + 0x64),
+								GFP_ATOMIC);
+			if (!rctx->adata)
+				return -ENOMEM;
+			/* Format Associated data    */
+			ret = qcrypto_aead_ccm_format_adata(&qreq,
+						assoclen,
+						req->src,
+						rctx->adata);
+		} else {
+			qreq.assoclen = 0;
+			rctx->adata = NULL;
+		}
+		if (ret) {
+			kzfree(rctx->adata);
+			return ret;
+		}
+
+		/*
+		 * update req with new formatted associated
+		 * data info
+		 */
+		qreq.asg = &rctx->asg;
+		if (rctx->adata)
+			sg_set_buf(qreq.asg, rctx->adata,
+					qreq.assoclen);
+		sg_mark_end(qreq.asg);
+	}
+	ret =  qce_aead_req(pengine->qce, &qreq);
+
+	return ret;
+}
+
+static struct crypto_engine *_qcrypto_static_assign_engine(
+					struct crypto_priv *cp)
+{
+	struct crypto_engine *pengine;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	if (cp->next_engine)
+		pengine = cp->next_engine;
+	else
+		pengine = list_first_entry(&cp->engine_list,
+				struct crypto_engine, elist);
+
+	if (list_is_last(&pengine->elist, &cp->engine_list))
+		cp->next_engine = list_first_entry(
+			&cp->engine_list, struct crypto_engine, elist);
+	else
+		cp->next_engine = list_next_entry(pengine, elist);
+	spin_unlock_irqrestore(&cp->lock, flags);
+	return pengine;
+}
+
+static int _start_qcrypto_process(struct crypto_priv *cp,
+				struct crypto_engine *pengine)
+{
+	struct crypto_async_request *async_req = NULL;
+	struct crypto_async_request *backlog_eng = NULL;
+	struct crypto_async_request *backlog_cp = NULL;
+	unsigned long flags;
+	u32 type;
+	int ret = 0;
+	struct crypto_stat *pstat;
+	void *tfm_ctx;
+	struct qcrypto_cipher_req_ctx *cipher_rctx;
+	struct qcrypto_sha_req_ctx *ahash_rctx;
+	struct ablkcipher_request *ablkcipher_req;
+	struct ahash_request *ahash_req;
+	struct aead_request *aead_req;
+	struct qcrypto_resp_ctx *arsp;
+	struct qcrypto_req_control *pqcrypto_req_control;
+	unsigned int cpu = MAX_SMP_CPU;
+
+	if (ACCESS_ONCE(cp->ce_req_proc_sts) == STOPPED)
+		return 0;
+
+	if (in_interrupt()) {
+		cpu = smp_processor_id();
+		if (cpu >= MAX_SMP_CPU)
+			cpu = MAX_SMP_CPU - 1;
+	} else
+		cpu = MAX_SMP_CPU;
+
+	pstat = &_qcrypto_stat;
+
+again:
+	spin_lock_irqsave(&cp->lock, flags);
+	if (pengine->issue_req ||
+		atomic_read(&pengine->req_count) >= (pengine->max_req)) {
+		spin_unlock_irqrestore(&cp->lock, flags);
+		return 0;
+	}
+
+	backlog_eng = crypto_get_backlog(&pengine->req_queue);
+
+	/* make sure it is in high bandwidth state */
+	if (pengine->bw_state != BUS_HAS_BANDWIDTH) {
+		spin_unlock_irqrestore(&cp->lock, flags);
+		return 0;
+	}
+
+	/* try to get request from request queue of the engine first */
+	async_req = crypto_dequeue_request(&pengine->req_queue);
+	if (!async_req) {
+		/*
+		 * if no request from the engine,
+		 * try to  get from request queue of driver
+		 */
+		backlog_cp = crypto_get_backlog(&cp->req_queue);
+		async_req = crypto_dequeue_request(&cp->req_queue);
+		if (!async_req) {
+			spin_unlock_irqrestore(&cp->lock, flags);
+			return 0;
+		}
+	}
+	pqcrypto_req_control = qcrypto_alloc_req_control(pengine);
+	if (pqcrypto_req_control == NULL) {
+		pr_err("Allocation of request failed\n");
+		spin_unlock_irqrestore(&cp->lock, flags);
+		return 0;
+	}
+
+	/* add associated rsp entry to tfm response queue */
+	type = crypto_tfm_alg_type(async_req->tfm);
+	tfm_ctx = crypto_tfm_ctx(async_req->tfm);
+	switch (type) {
+	case CRYPTO_ALG_TYPE_AHASH:
+		ahash_req = container_of(async_req,
+			struct ahash_request, base);
+		ahash_rctx = ahash_request_ctx(ahash_req);
+		arsp = &ahash_rctx->rsp_entry;
+		list_add_tail(
+			&arsp->list,
+			&((struct qcrypto_sha_ctx *)tfm_ctx)
+				->rsp_queue);
+		break;
+	case CRYPTO_ALG_TYPE_ABLKCIPHER:
+		ablkcipher_req = container_of(async_req,
+			struct ablkcipher_request, base);
+		cipher_rctx = ablkcipher_request_ctx(ablkcipher_req);
+		arsp = &cipher_rctx->rsp_entry;
+		list_add_tail(
+			&arsp->list,
+			&((struct qcrypto_cipher_ctx *)tfm_ctx)
+				->rsp_queue);
+		break;
+	case CRYPTO_ALG_TYPE_AEAD:
+	default:
+		aead_req = container_of(async_req,
+			struct aead_request, base);
+		cipher_rctx = aead_request_ctx(aead_req);
+		arsp = &cipher_rctx->rsp_entry;
+		list_add_tail(
+			&arsp->list,
+			&((struct qcrypto_cipher_ctx *)tfm_ctx)
+				->rsp_queue);
+		break;
+	}
+
+	arsp->res = -EINPROGRESS;
+	arsp->async_req = async_req;
+	pqcrypto_req_control->pce = pengine;
+	pqcrypto_req_control->req = async_req;
+	pqcrypto_req_control->arsp = arsp;
+	pengine->active_seq++;
+	pengine->check_flag = true;
+
+	pengine->issue_req = true;
+	cp->cpu_req[cpu]++;
+	smp_mb(); /* make it visible */
+
+	spin_unlock_irqrestore(&cp->lock, flags);
+	if (backlog_eng)
+		backlog_eng->complete(backlog_eng, -EINPROGRESS);
+	if (backlog_cp)
+		backlog_cp->complete(backlog_cp, -EINPROGRESS);
+	switch (type) {
+	case CRYPTO_ALG_TYPE_ABLKCIPHER:
+		ret = _qcrypto_process_ablkcipher(pengine,
+					pqcrypto_req_control);
+		break;
+	case CRYPTO_ALG_TYPE_AHASH:
+		ret = _qcrypto_process_ahash(pengine, pqcrypto_req_control);
+		break;
+	case CRYPTO_ALG_TYPE_AEAD:
+		ret = _qcrypto_process_aead(pengine, pqcrypto_req_control);
+		break;
+	default:
+		ret = -EINVAL;
+	};
+
+	pengine->issue_req = false;
+	smp_mb(); /* make it visible */
+
+	pengine->total_req++;
+	if (ret) {
+		pengine->err_req++;
+		qcrypto_free_req_control(pengine, pqcrypto_req_control);
+
+		if (type == CRYPTO_ALG_TYPE_ABLKCIPHER)
+			pstat->ablk_cipher_op_fail++;
+		else
+			if (type == CRYPTO_ALG_TYPE_AHASH)
+				pstat->ahash_op_fail++;
+			else
+				pstat->aead_op_fail++;
+
+		_qcrypto_tfm_complete(pengine, type, tfm_ctx, arsp, ret);
+		goto again;
+	};
+	return ret;
+}
+
+static inline struct crypto_engine *_next_eng(struct crypto_priv *cp,
+		struct crypto_engine *p)
+{
+
+	if (p == NULL || list_is_last(&p->elist, &cp->engine_list))
+		p =  list_first_entry(&cp->engine_list, struct crypto_engine,
+			elist);
+	else
+		p = list_entry(p->elist.next, struct crypto_engine, elist);
+	return p;
+}
+static struct crypto_engine *_avail_eng(struct crypto_priv *cp)
+{
+	/* call this function with spinlock set */
+	struct crypto_engine *q = NULL;
+	struct crypto_engine *p = cp->scheduled_eng;
+	struct crypto_engine *q1;
+	int eng_cnt = cp->total_units;
+
+	if (unlikely(list_empty(&cp->engine_list))) {
+		pr_err("%s: no valid ce to schedule\n", __func__);
+		return NULL;
+	}
+
+	p = _next_eng(cp, p);
+	q1 = p;
+	while (eng_cnt-- > 0) {
+		if (!p->issue_req && atomic_read(&p->req_count) < p->max_req) {
+			q = p;
+			break;
+		}
+		p = _next_eng(cp, p);
+		if (q1 == p)
+			break;
+	}
+	cp->scheduled_eng = q;
+	return q;
+}
+
+static int _qcrypto_queue_req(struct crypto_priv *cp,
+				struct crypto_engine *pengine,
+				struct crypto_async_request *req)
+{
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cp->lock, flags);
+
+	if (pengine) {
+		ret = crypto_enqueue_request(&pengine->req_queue, req);
+	} else {
+		ret = crypto_enqueue_request(&cp->req_queue, req);
+		pengine = _avail_eng(cp);
+		if (cp->req_queue.qlen > cp->max_qlen)
+			cp->max_qlen = cp->req_queue.qlen;
+	}
+	if (pengine) {
+		switch (pengine->bw_state) {
+		case BUS_NO_BANDWIDTH:
+			if (pengine->high_bw_req == false) {
+				qcrypto_ce_bw_allocate_req(pengine);
+				pengine->high_bw_req = true;
+			}
+			pengine = NULL;
+			break;
+		case BUS_HAS_BANDWIDTH:
+			break;
+		case BUS_BANDWIDTH_RELEASING:
+			pengine->high_bw_req = true;
+			pengine = NULL;
+			break;
+		case BUS_BANDWIDTH_ALLOCATING:
+			pengine = NULL;
+			break;
+		case BUS_SUSPENDED:
+		case BUS_SUSPENDING:
+		default:
+			pengine = NULL;
+			break;
+		}
+	} else {
+		cp->no_avail++;
+	}
+	spin_unlock_irqrestore(&cp->lock, flags);
+	if (pengine && (ACCESS_ONCE(cp->ce_req_proc_sts) == IN_PROGRESS))
+		_start_qcrypto_process(cp, pengine);
+	return ret;
+}
+
+static int _qcrypto_enc_aes_192_fallback(struct ablkcipher_request *req)
+{
+	struct crypto_tfm *tfm =
+		crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	int err;
+
+	ablkcipher_request_set_tfm(req, ctx->cipher_aes192_fb);
+	err = crypto_ablkcipher_encrypt(req);
+	ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+	return err;
+}
+
+static int _qcrypto_dec_aes_192_fallback(struct ablkcipher_request *req)
+{
+	struct crypto_tfm *tfm =
+		crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	int err;
+
+	ablkcipher_request_set_tfm(req, ctx->cipher_aes192_fb);
+	err = crypto_ablkcipher_decrypt(req);
+	ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+	return err;
+}
+
+
+static int _qcrypto_enc_aes_ecb(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_ecb: %pK\n", req);
+#endif
+
+	if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+			(!cp->ce_support.aes_key_192) &&
+				ctx->cipher_aes192_fb)
+		return _qcrypto_enc_aes_192_fallback(req);
+
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->ablk_cipher_aes_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_enc_aes_cbc(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_cbc: %pK\n", req);
+#endif
+
+	if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+			(!cp->ce_support.aes_key_192) &&
+				ctx->cipher_aes192_fb)
+		return _qcrypto_enc_aes_192_fallback(req);
+
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->ablk_cipher_aes_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_enc_aes_ctr(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+				CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_ctr: %pK\n", req);
+#endif
+
+	if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+			(!cp->ce_support.aes_key_192) &&
+				ctx->cipher_aes192_fb)
+		return _qcrypto_enc_aes_192_fallback(req);
+
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CTR;
+
+	pstat->ablk_cipher_aes_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_enc_aes_xts(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_XTS;
+
+	pstat->ablk_cipher_aes_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_aead_encrypt_aes_ccm(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	if ((ctx->authsize > 16) || (ctx->authsize < 4) || (ctx->authsize & 1))
+		return  -EINVAL;
+	if ((ctx->auth_key_len != AES_KEYSIZE_128) &&
+		(ctx->auth_key_len != AES_KEYSIZE_256))
+		return  -EINVAL;
+
+	pstat = &_qcrypto_stat;
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CCM;
+	rctx->iv = req->iv;
+	rctx->ccmtype = 0;
+
+	pstat->aead_ccm_aes_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_rfc4309_enc_aes_ccm(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	if (req->assoclen != 16 && req->assoclen != 20)
+		return -EINVAL;
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CCM;
+	memset(rctx->rfc4309_iv, 0, sizeof(rctx->rfc4309_iv));
+	rctx->rfc4309_iv[0] = 3; /* L -1 */
+	memcpy(&rctx->rfc4309_iv[1], ctx->ccm4309_nonce, 3);
+	memcpy(&rctx->rfc4309_iv[4], req->iv, 8);
+	rctx->ccmtype = 1;
+	rctx->iv = rctx->rfc4309_iv;
+	pstat->aead_rfc4309_ccm_aes_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_enc_des_ecb(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->ablk_cipher_des_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_enc_des_cbc(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->ablk_cipher_des_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_enc_3des_ecb(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->ablk_cipher_3des_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_enc_3des_cbc(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->ablk_cipher_3des_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_aes_ecb(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+				CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_ecb: %pK\n", req);
+#endif
+
+	if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+			(!cp->ce_support.aes_key_192) &&
+				ctx->cipher_aes192_fb)
+		return _qcrypto_dec_aes_192_fallback(req);
+
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->ablk_cipher_aes_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_aes_cbc(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+				CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_cbc: %pK\n", req);
+#endif
+
+	if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+			(!cp->ce_support.aes_key_192) &&
+				ctx->cipher_aes192_fb)
+		return _qcrypto_dec_aes_192_fallback(req);
+
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->ablk_cipher_aes_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_aes_ctr(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_ctr: %pK\n", req);
+#endif
+
+	if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+			(!cp->ce_support.aes_key_192) &&
+				ctx->cipher_aes192_fb)
+		return _qcrypto_dec_aes_192_fallback(req);
+
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->mode = QCE_MODE_CTR;
+
+	/* Note. There is no such thing as aes/counter mode, decrypt */
+	rctx->dir = QCE_ENCRYPT;
+
+	pstat->ablk_cipher_aes_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_des_ecb(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->ablk_cipher_des_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_des_cbc(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->ablk_cipher_des_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_3des_ecb(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->ablk_cipher_3des_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_3des_cbc(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->ablk_cipher_3des_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_aes_xts(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->mode = QCE_MODE_XTS;
+	rctx->dir = QCE_DECRYPT;
+
+	pstat->ablk_cipher_aes_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_aead_decrypt_aes_ccm(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	if ((ctx->authsize > 16) || (ctx->authsize < 4) || (ctx->authsize & 1))
+		return  -EINVAL;
+	if ((ctx->auth_key_len != AES_KEYSIZE_128) &&
+		(ctx->auth_key_len != AES_KEYSIZE_256))
+		return  -EINVAL;
+
+	pstat = &_qcrypto_stat;
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CCM;
+	rctx->iv = req->iv;
+	rctx->ccmtype = 0;
+
+	pstat->aead_ccm_aes_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_rfc4309_dec_aes_ccm(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+	if (req->assoclen != 16 && req->assoclen != 20)
+		return -EINVAL;
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CCM;
+	memset(rctx->rfc4309_iv, 0, sizeof(rctx->rfc4309_iv));
+	rctx->rfc4309_iv[0] = 3; /* L -1 */
+	memcpy(&rctx->rfc4309_iv[1], ctx->ccm4309_nonce, 3);
+	memcpy(&rctx->rfc4309_iv[4], req->iv, 8);
+	rctx->ccmtype = 1;
+	rctx->iv = rctx->rfc4309_iv;
+	pstat->aead_rfc4309_ccm_aes_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_setauthsize(struct crypto_aead *authenc,
+				unsigned int authsize)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
+
+	ctx->authsize = authsize;
+	return 0;
+}
+
+static int _qcrypto_aead_ccm_setauthsize(struct crypto_aead *authenc,
+				  unsigned int authsize)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
+
+	switch (authsize) {
+	case 4:
+	case 6:
+	case 8:
+	case 10:
+	case 12:
+	case 14:
+	case 16:
+		break;
+	default:
+		return -EINVAL;
+	}
+	ctx->authsize = authsize;
+	return 0;
+}
+
+static int _qcrypto_aead_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
+				  unsigned int authsize)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
+
+	switch (authsize) {
+	case 8:
+	case 12:
+	case 16:
+		break;
+	default:
+		return -EINVAL;
+	}
+	ctx->authsize = authsize;
+	return 0;
+}
+
+static int _qcrypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+			unsigned int keylen)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+	struct rtattr *rta = (struct rtattr *)key;
+	struct crypto_authenc_key_param *param;
+	int ret;
+
+	if (!RTA_OK(rta, keylen))
+		goto badkey;
+	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+		goto badkey;
+	if (RTA_PAYLOAD(rta) < sizeof(*param))
+		goto badkey;
+
+	param = RTA_DATA(rta);
+	ctx->enc_key_len = be32_to_cpu(param->enckeylen);
+
+	key += RTA_ALIGN(rta->rta_len);
+	keylen -= RTA_ALIGN(rta->rta_len);
+
+	if (keylen < ctx->enc_key_len)
+		goto badkey;
+
+	ctx->auth_key_len = keylen - ctx->enc_key_len;
+	if (ctx->enc_key_len >= QCRYPTO_MAX_KEY_SIZE ||
+				ctx->auth_key_len >= QCRYPTO_MAX_KEY_SIZE)
+		goto badkey;
+	memset(ctx->auth_key, 0, QCRYPTO_MAX_KEY_SIZE);
+	memcpy(ctx->enc_key, key + ctx->auth_key_len, ctx->enc_key_len);
+	memcpy(ctx->auth_key, key, ctx->auth_key_len);
+
+	if (ctx->enc_key_len == AES_KEYSIZE_192 &&  ctx->cipher_aes192_fb &&
+			ctx->ahash_aead_aes192_fb) {
+		crypto_ahash_clear_flags(ctx->ahash_aead_aes192_fb, ~0);
+		ret = crypto_ahash_setkey(ctx->ahash_aead_aes192_fb,
+					ctx->auth_key, ctx->auth_key_len);
+		if (ret)
+			goto badkey;
+		crypto_ablkcipher_clear_flags(ctx->cipher_aes192_fb, ~0);
+		ret = crypto_ablkcipher_setkey(ctx->cipher_aes192_fb,
+					ctx->enc_key, ctx->enc_key_len);
+		if (ret)
+			goto badkey;
+	}
+
+	return 0;
+badkey:
+	ctx->enc_key_len = 0;
+	crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	return -EINVAL;
+}
+
+static int _qcrypto_aead_ccm_setkey(struct crypto_aead *aead, const u8 *key,
+			unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_priv *cp = ctx->cp;
+
+	switch (keylen) {
+	case AES_KEYSIZE_128:
+	case AES_KEYSIZE_256:
+		break;
+	case AES_KEYSIZE_192:
+		if (cp->ce_support.aes_key_192)
+			break;
+	default:
+		ctx->enc_key_len = 0;
+		crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	};
+	ctx->enc_key_len = keylen;
+	memcpy(ctx->enc_key, key, keylen);
+	ctx->auth_key_len = keylen;
+	memcpy(ctx->auth_key, key, keylen);
+
+	return 0;
+}
+
+static int _qcrypto_aead_rfc4309_ccm_setkey(struct crypto_aead *aead,
+				 const u8 *key, unsigned int key_len)
+{
+	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	int ret;
+
+	if (key_len < QCRYPTO_CCM4309_NONCE_LEN)
+		return -EINVAL;
+	key_len -= QCRYPTO_CCM4309_NONCE_LEN;
+	memcpy(ctx->ccm4309_nonce, key + key_len,  QCRYPTO_CCM4309_NONCE_LEN);
+	ret = _qcrypto_aead_ccm_setkey(aead, key, key_len);
+	return ret;
+};
+
+static void _qcrypto_aead_aes_192_fb_a_cb(struct qcrypto_cipher_req_ctx *rctx,
+								int res)
+{
+	struct aead_request *req;
+	struct crypto_async_request *areq;
+
+	req = rctx->aead_req;
+	areq = &req->base;
+	if (rctx->fb_aes_req)
+		ablkcipher_request_free(rctx->fb_aes_req);
+	if (rctx->fb_hash_req)
+		ahash_request_free(rctx->fb_hash_req);
+	rctx->fb_aes_req = NULL;
+	rctx->fb_hash_req = NULL;
+	kfree(rctx->fb_aes_iv);
+	areq->complete(areq, res);
+}
+
+static void _aead_aes_fb_stage2_ahash_complete(
+				struct crypto_async_request *base, int err)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct aead_request *req;
+	struct qcrypto_cipher_ctx *ctx;
+
+	rctx = base->data;
+	req = rctx->aead_req;
+	ctx = crypto_tfm_ctx(req->base.tfm);
+	/* copy icv */
+	if (err == 0)
+		scatterwalk_map_and_copy(rctx->fb_ahash_digest,
+					rctx->fb_aes_dst,
+					req->cryptlen,
+					ctx->authsize, 1);
+	_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
+}
+
+
+static int _start_aead_aes_fb_stage2_hmac(struct qcrypto_cipher_req_ctx *rctx)
+{
+	struct ahash_request *ahash_req;
+
+	ahash_req = rctx->fb_hash_req;
+	ahash_request_set_callback(ahash_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				 _aead_aes_fb_stage2_ahash_complete, rctx);
+
+	return crypto_ahash_digest(ahash_req);
+}
+
+static void _aead_aes_fb_stage2_decrypt_complete(
+			struct crypto_async_request *base, int err)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+
+	rctx = base->data;
+	_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
+}
+
+static int _start_aead_aes_fb_stage2_decrypt(
+					struct qcrypto_cipher_req_ctx *rctx)
+{
+	struct ablkcipher_request *aes_req;
+
+	aes_req = rctx->fb_aes_req;
+	ablkcipher_request_set_callback(aes_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+			_aead_aes_fb_stage2_decrypt_complete, rctx);
+	return crypto_ablkcipher_decrypt(aes_req);
+}
+
+static void _aead_aes_fb_stage1_ahash_complete(
+				struct crypto_async_request *base, int err)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct aead_request *req;
+	struct qcrypto_cipher_ctx *ctx;
+
+	rctx = base->data;
+	req = rctx->aead_req;
+	ctx = crypto_tfm_ctx(req->base.tfm);
+
+	/* compare icv */
+	if (err == 0) {
+		unsigned char tmp[ctx->authsize];
+
+		scatterwalk_map_and_copy(tmp, rctx->fb_aes_src,
+			req->cryptlen - ctx->authsize, ctx->authsize, 0);
+		if (memcmp(rctx->fb_ahash_digest, tmp, ctx->authsize) != 0)
+			err = -EBADMSG;
+	}
+	if (err)
+		_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
+	else {
+		err = _start_aead_aes_fb_stage2_decrypt(rctx);
+		if (err != -EINPROGRESS &&  err != -EBUSY)
+			_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
+	}
+}
+
+static void _aead_aes_fb_stage1_encrypt_complete(
+				struct crypto_async_request *base, int err)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct aead_request *req;
+	struct qcrypto_cipher_ctx *ctx;
+
+	rctx = base->data;
+	req = rctx->aead_req;
+	ctx = crypto_tfm_ctx(req->base.tfm);
+
+	memcpy(ctx->iv, rctx->fb_aes_iv, rctx->ivsize);
+
+	if (err) {
+		_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
+		return;
+	}
+
+	err = _start_aead_aes_fb_stage2_hmac(rctx);
+
+	/* copy icv */
+	if (err == 0) {
+		scatterwalk_map_and_copy(rctx->fb_ahash_digest,
+					rctx->fb_aes_dst,
+					req->cryptlen,
+					ctx->authsize, 1);
+	}
+	if (err != -EINPROGRESS &&  err != -EBUSY)
+		_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
+}
+
+static int _qcrypto_aead_aes_192_fallback(struct aead_request *req,
+							bool is_encrypt)
+{
+	int rc = -EINVAL;
+	struct qcrypto_cipher_req_ctx *rctx = aead_request_ctx(req);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_aead *aead_tfm = crypto_aead_reqtfm(req);
+	struct ablkcipher_request *aes_req = NULL;
+	struct ahash_request *ahash_req = NULL;
+	int nbytes;
+	struct scatterlist *src, *dst;
+
+	rctx->fb_aes_iv = NULL;
+	aes_req = ablkcipher_request_alloc(ctx->cipher_aes192_fb, GFP_KERNEL);
+	if (!aes_req)
+		return -ENOMEM;
+	ahash_req = ahash_request_alloc(ctx->ahash_aead_aes192_fb, GFP_KERNEL);
+	if (!ahash_req)
+		goto ret;
+	rctx->fb_aes_req = aes_req;
+	rctx->fb_hash_req = ahash_req;
+	rctx->aead_req = req;
+	/* assoc and iv are sitting in the beginning of src sg list */
+	/* Similarly, assoc and iv are sitting in the beginning of dst list */
+	src = scatterwalk_ffwd(rctx->fb_ablkcipher_src_sg, req->src,
+				req->assoclen);
+	dst = scatterwalk_ffwd(rctx->fb_ablkcipher_dst_sg, req->dst,
+				req->assoclen);
+
+	nbytes = req->cryptlen;
+	if (!is_encrypt)
+		nbytes -=  ctx->authsize;
+	rctx->fb_ahash_length = nbytes +  req->assoclen;
+	rctx->fb_aes_src = src;
+	rctx->fb_aes_dst = dst;
+	rctx->fb_aes_cryptlen = nbytes;
+	rctx->ivsize = crypto_aead_ivsize(aead_tfm);
+	rctx->fb_aes_iv = kzalloc(rctx->ivsize, GFP_ATOMIC);
+	if (!rctx->fb_aes_iv)
+		goto ret;
+	memcpy(rctx->fb_aes_iv, req->iv, rctx->ivsize);
+	ablkcipher_request_set_crypt(aes_req, rctx->fb_aes_src,
+					rctx->fb_aes_dst,
+					rctx->fb_aes_cryptlen, rctx->fb_aes_iv);
+	if (is_encrypt)
+		ahash_request_set_crypt(ahash_req, req->dst,
+					rctx->fb_ahash_digest,
+					rctx->fb_ahash_length);
+	else
+		ahash_request_set_crypt(ahash_req, req->src,
+					rctx->fb_ahash_digest,
+					rctx->fb_ahash_length);
+
+	if (is_encrypt) {
+
+		ablkcipher_request_set_callback(aes_req,
+			CRYPTO_TFM_REQ_MAY_BACKLOG,
+			_aead_aes_fb_stage1_encrypt_complete, rctx);
+
+		rc = crypto_ablkcipher_encrypt(aes_req);
+		if (rc == 0) {
+			memcpy(ctx->iv, rctx->fb_aes_iv, rctx->ivsize);
+			rc = _start_aead_aes_fb_stage2_hmac(rctx);
+			if (rc == 0) {
+				/* copy icv */
+				scatterwalk_map_and_copy(rctx->fb_ahash_digest,
+					dst,
+					req->cryptlen,
+					ctx->authsize, 1);
+			}
+		}
+		if (rc == -EINPROGRESS || rc == -EBUSY)
+			return rc;
+		goto ret;
+
+	} else {
+		ahash_request_set_callback(ahash_req,
+				CRYPTO_TFM_REQ_MAY_BACKLOG,
+				_aead_aes_fb_stage1_ahash_complete, rctx);
+
+		rc = crypto_ahash_digest(ahash_req);
+		if (rc == 0) {
+			unsigned char tmp[ctx->authsize];
+
+			/* compare icv */
+			scatterwalk_map_and_copy(tmp,
+				src, req->cryptlen - ctx->authsize,
+				ctx->authsize, 0);
+			if (memcmp(rctx->fb_ahash_digest, tmp,
+							ctx->authsize) != 0)
+				rc = -EBADMSG;
+			else
+				rc = _start_aead_aes_fb_stage2_decrypt(rctx);
+		}
+		if (rc == -EINPROGRESS || rc == -EBUSY)
+			return rc;
+		goto ret;
+	}
+ret:
+	if (aes_req)
+		ablkcipher_request_free(aes_req);
+	if (ahash_req)
+		ahash_request_free(ahash_req);
+	kfree(rctx->fb_aes_iv);
+	return rc;
+}
+
+static int _qcrypto_aead_encrypt_aes_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev,
+			 "_qcrypto_aead_encrypt_aes_cbc: %pK\n", req);
+#endif
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+	rctx->aead_req = req;
+	if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+		pstat->aead_sha1_aes_enc++;
+	else
+		pstat->aead_sha256_aes_enc++;
+	if (ctx->enc_key_len == AES_KEYSIZE_192 &&  ctx->cipher_aes192_fb &&
+						ctx->ahash_aead_aes192_fb)
+		return _qcrypto_aead_aes_192_fallback(req, true);
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_decrypt_aes_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev,
+			 "_qcrypto_aead_decrypt_aes_cbc: %pK\n", req);
+#endif
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+	rctx->aead_req = req;
+
+	if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+		pstat->aead_sha1_aes_dec++;
+	else
+		pstat->aead_sha256_aes_dec++;
+
+	if (ctx->enc_key_len == AES_KEYSIZE_192 &&  ctx->cipher_aes192_fb &&
+						ctx->ahash_aead_aes192_fb)
+		return _qcrypto_aead_aes_192_fallback(req, false);
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_encrypt_des_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+
+	if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+		pstat->aead_sha1_des_enc++;
+	else
+		pstat->aead_sha256_des_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_decrypt_des_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+
+	if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+		pstat->aead_sha1_des_dec++;
+	else
+		pstat->aead_sha256_des_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_encrypt_3des_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+
+	if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+		pstat->aead_sha1_3des_enc++;
+	else
+		pstat->aead_sha256_3des_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_decrypt_3des_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+
+	if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+		pstat->aead_sha1_3des_dec++;
+	else
+		pstat->aead_sha256_3des_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _sha_init(struct ahash_request *req)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+	rctx->first_blk = 1;
+	rctx->last_blk = 0;
+	rctx->byte_count[0] = 0;
+	rctx->byte_count[1] = 0;
+	rctx->byte_count[2] = 0;
+	rctx->byte_count[3] = 0;
+	rctx->trailing_buf_len = 0;
+	rctx->count = 0;
+
+	return 0;
+};
+
+static int _sha1_init(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_stat *pstat;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+	pstat = &_qcrypto_stat;
+
+	_sha_init(req);
+	sha_ctx->alg = QCE_HASH_SHA1;
+
+	memset(&rctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE);
+	memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
+						SHA1_DIGEST_SIZE);
+	sha_ctx->diglen = SHA1_DIGEST_SIZE;
+	pstat->sha1_digest++;
+	return 0;
+};
+
+static int _sha256_init(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_stat *pstat;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+	pstat = &_qcrypto_stat;
+
+	_sha_init(req);
+	sha_ctx->alg = QCE_HASH_SHA256;
+
+	memset(&rctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE);
+	memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
+						SHA256_DIGEST_SIZE);
+	sha_ctx->diglen = SHA256_DIGEST_SIZE;
+	pstat->sha256_digest++;
+	return 0;
+};
+
+
+static int _sha1_export(struct ahash_request  *req, void *out)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha1_state *out_ctx = (struct sha1_state *)out;
+
+	out_ctx->count = rctx->count;
+	_byte_stream_to_words(out_ctx->state, rctx->digest, SHA1_DIGEST_SIZE);
+	memcpy(out_ctx->buffer, rctx->trailing_buf, SHA1_BLOCK_SIZE);
+
+	return 0;
+};
+
+static int _sha1_hmac_export(struct ahash_request  *req, void *out)
+{
+	return _sha1_export(req, out);
+}
+
+/* crypto hw padding constant for hmac first operation */
+#define HMAC_PADDING 64
+
+static int __sha1_import_common(struct ahash_request  *req, const void *in,
+				bool hmac)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha1_state *in_ctx = (struct sha1_state *)in;
+	u64 hw_count = in_ctx->count;
+
+	rctx->count = in_ctx->count;
+	memcpy(rctx->trailing_buf, in_ctx->buffer, SHA1_BLOCK_SIZE);
+	if (in_ctx->count <= SHA1_BLOCK_SIZE) {
+		rctx->first_blk = 1;
+	} else {
+		rctx->first_blk = 0;
+		/*
+		 * For hmac, there is a hardware padding done
+		 * when first is set. So the byte_count will be
+		 * incremened by 64 after the operstion of first
+		 */
+		if (hmac)
+			hw_count += HMAC_PADDING;
+	}
+	rctx->byte_count[0] =  (uint32_t)(hw_count & 0xFFFFFFC0);
+	rctx->byte_count[1] =  (uint32_t)(hw_count >> 32);
+	_words_to_byte_stream(in_ctx->state, rctx->digest, sha_ctx->diglen);
+
+	rctx->trailing_buf_len = (uint32_t)(in_ctx->count &
+						(SHA1_BLOCK_SIZE-1));
+	return 0;
+}
+
+static int _sha1_import(struct ahash_request  *req, const void *in)
+{
+	return __sha1_import_common(req, in, false);
+}
+
+static int _sha1_hmac_import(struct ahash_request  *req, const void *in)
+{
+	return __sha1_import_common(req, in, true);
+}
+
+static int _sha256_export(struct ahash_request  *req, void *out)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha256_state *out_ctx = (struct sha256_state *)out;
+
+	out_ctx->count = rctx->count;
+	_byte_stream_to_words(out_ctx->state, rctx->digest, SHA256_DIGEST_SIZE);
+	memcpy(out_ctx->buf, rctx->trailing_buf, SHA256_BLOCK_SIZE);
+
+	return 0;
+};
+
+static int _sha256_hmac_export(struct ahash_request  *req, void *out)
+{
+	return _sha256_export(req, out);
+}
+
+static int __sha256_import_common(struct ahash_request  *req, const void *in,
+			bool hmac)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha256_state *in_ctx = (struct sha256_state *)in;
+	u64 hw_count = in_ctx->count;
+
+	rctx->count = in_ctx->count;
+	memcpy(rctx->trailing_buf, in_ctx->buf, SHA256_BLOCK_SIZE);
+
+	if (in_ctx->count <= SHA256_BLOCK_SIZE) {
+		rctx->first_blk = 1;
+	} else {
+		rctx->first_blk = 0;
+		/*
+		 * for hmac, there is a hardware padding done
+		 * when first is set. So the byte_count will be
+		 * incremened by 64 after the operstion of first
+		 */
+		if (hmac)
+			hw_count += HMAC_PADDING;
+	}
+
+	rctx->byte_count[0] =  (uint32_t)(hw_count & 0xFFFFFFC0);
+	rctx->byte_count[1] =  (uint32_t)(hw_count >> 32);
+	_words_to_byte_stream(in_ctx->state, rctx->digest, sha_ctx->diglen);
+
+	rctx->trailing_buf_len = (uint32_t)(in_ctx->count &
+						(SHA256_BLOCK_SIZE-1));
+
+
+	return 0;
+}
+
+static int _sha256_import(struct ahash_request  *req, const void *in)
+{
+	return __sha256_import_common(req, in, false);
+}
+
+static int _sha256_hmac_import(struct ahash_request  *req, const void *in)
+{
+	return __sha256_import_common(req, in, true);
+}
+
+static int _copy_source(struct ahash_request  *req)
+{
+	struct qcrypto_sha_req_ctx *srctx = NULL;
+	uint32_t bytes = 0;
+	uint32_t num_sg = 0;
+
+	srctx = ahash_request_ctx(req);
+	srctx->orig_src = req->src;
+	srctx->data = kzalloc((req->nbytes + 64), GFP_ATOMIC);
+	if (srctx->data == NULL) {
+		pr_err("Mem Alloc fail rctx->data, err %ld for 0x%x\n",
+				PTR_ERR(srctx->data), (req->nbytes + 64));
+		return -ENOMEM;
+	}
+
+	num_sg = qcrypto_count_sg(req->src, req->nbytes);
+	bytes = qcrypto_sg_copy_to_buffer(req->src, num_sg, srctx->data,
+						req->nbytes);
+	if (bytes != req->nbytes)
+		pr_warn("bytes copied=0x%x bytes to copy= 0x%x", bytes,
+							req->nbytes);
+	sg_set_buf(&srctx->dsg, srctx->data,
+				req->nbytes);
+	sg_mark_end(&srctx->dsg);
+	req->src = &srctx->dsg;
+
+	return 0;
+}
+
+static int _sha_update(struct ahash_request  *req, uint32_t sha_block_size)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	uint32_t total, len, num_sg;
+	struct scatterlist *sg_last;
+	uint8_t *k_src = NULL;
+	uint32_t sha_pad_len = 0;
+	uint32_t trailing_buf_len = 0;
+	uint32_t nbytes;
+	uint32_t offset = 0;
+	uint32_t bytes = 0;
+	uint8_t  *staging;
+	int ret = 0;
+
+	/* check for trailing buffer from previous updates and append it */
+	total = req->nbytes + rctx->trailing_buf_len;
+	len = req->nbytes;
+
+	if (total <= sha_block_size) {
+		k_src = &rctx->trailing_buf[rctx->trailing_buf_len];
+		num_sg = qcrypto_count_sg(req->src, len);
+		bytes = qcrypto_sg_copy_to_buffer(req->src, num_sg, k_src, len);
+
+		rctx->trailing_buf_len = total;
+		return 0;
+	}
+
+	/* save the original req structure fields*/
+	rctx->src = req->src;
+	rctx->nbytes = req->nbytes;
+
+	staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf),
+							L1_CACHE_BYTES);
+	memcpy(staging, rctx->trailing_buf, rctx->trailing_buf_len);
+	k_src = &rctx->trailing_buf[0];
+	/*  get new trailing buffer */
+	sha_pad_len = ALIGN(total, sha_block_size) - total;
+	trailing_buf_len =  sha_block_size - sha_pad_len;
+	offset = req->nbytes - trailing_buf_len;
+
+	if (offset != req->nbytes)
+		scatterwalk_map_and_copy(k_src, req->src, offset,
+						trailing_buf_len, 0);
+
+	nbytes = total - trailing_buf_len;
+	num_sg = qcrypto_count_sg(req->src, req->nbytes);
+
+	len = rctx->trailing_buf_len;
+	sg_last = req->src;
+
+	while (len < nbytes) {
+		if ((len + sg_last->length) > nbytes)
+			break;
+		len += sg_last->length;
+		sg_last = sg_next(sg_last);
+	}
+	if (rctx->trailing_buf_len) {
+		if (cp->ce_support.aligned_only)  {
+			rctx->data2 = kzalloc((req->nbytes + 64), GFP_ATOMIC);
+			if (rctx->data2 == NULL) {
+				pr_err("Mem Alloc fail srctx->data2, err %ld\n",
+							PTR_ERR(rctx->data2));
+				return -ENOMEM;
+			}
+			memcpy(rctx->data2, staging,
+						rctx->trailing_buf_len);
+			memcpy((rctx->data2 + rctx->trailing_buf_len),
+					rctx->data, req->src->length);
+			kzfree(rctx->data);
+			rctx->data = rctx->data2;
+			sg_set_buf(&rctx->sg[0], rctx->data,
+					(rctx->trailing_buf_len +
+							req->src->length));
+			req->src = rctx->sg;
+			sg_mark_end(&rctx->sg[0]);
+		} else {
+			sg_mark_end(sg_last);
+			memset(rctx->sg, 0, sizeof(rctx->sg));
+			sg_set_buf(&rctx->sg[0], staging,
+						rctx->trailing_buf_len);
+			sg_mark_end(&rctx->sg[1]);
+			sg_chain(rctx->sg, 2, req->src);
+			req->src = rctx->sg;
+		}
+	} else
+		sg_mark_end(sg_last);
+
+	req->nbytes = nbytes;
+	rctx->trailing_buf_len = trailing_buf_len;
+
+	ret =  _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
+
+	return ret;
+};
+
+static int _sha1_update(struct ahash_request  *req)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+
+	if (cp->ce_support.aligned_only) {
+		if (_copy_source(req))
+			return -ENOMEM;
+	}
+	rctx->count += req->nbytes;
+	return _sha_update(req, SHA1_BLOCK_SIZE);
+}
+
+static int _sha256_update(struct ahash_request  *req)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+
+	if (cp->ce_support.aligned_only) {
+		if (_copy_source(req))
+			return -ENOMEM;
+	}
+
+	rctx->count += req->nbytes;
+	return _sha_update(req, SHA256_BLOCK_SIZE);
+}
+
+static int _sha_final(struct ahash_request *req, uint32_t sha_block_size)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	int ret = 0;
+	uint8_t  *staging;
+
+	if (cp->ce_support.aligned_only) {
+		if (_copy_source(req))
+			return -ENOMEM;
+	}
+
+	rctx->last_blk = 1;
+
+	/* save the original req structure fields*/
+	rctx->src = req->src;
+	rctx->nbytes = req->nbytes;
+
+	staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf),
+							L1_CACHE_BYTES);
+	memcpy(staging, rctx->trailing_buf, rctx->trailing_buf_len);
+	sg_set_buf(&rctx->sg[0], staging, rctx->trailing_buf_len);
+	sg_mark_end(&rctx->sg[0]);
+
+	req->src = &rctx->sg[0];
+	req->nbytes = rctx->trailing_buf_len;
+
+	ret =  _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
+
+	return ret;
+};
+
+static int _sha1_final(struct ahash_request  *req)
+{
+	return _sha_final(req, SHA1_BLOCK_SIZE);
+}
+
+static int _sha256_final(struct ahash_request  *req)
+{
+	return _sha_final(req, SHA256_BLOCK_SIZE);
+}
+
+static int _sha_digest(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct crypto_priv *cp = sha_ctx->cp;
+	int ret = 0;
+
+	if (cp->ce_support.aligned_only) {
+		if (_copy_source(req))
+			return -ENOMEM;
+	}
+
+	/* save the original req structure fields*/
+	rctx->src = req->src;
+	rctx->nbytes = req->nbytes;
+	rctx->first_blk = 1;
+	rctx->last_blk = 1;
+	ret =  _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
+
+	return ret;
+}
+
+static int _sha1_digest(struct ahash_request *req)
+{
+	_sha1_init(req);
+	return _sha_digest(req);
+}
+
+static int _sha256_digest(struct ahash_request *req)
+{
+	_sha256_init(req);
+	return _sha_digest(req);
+}
+
+static void _crypto_sha_hmac_ahash_req_complete(
+	struct crypto_async_request *req, int err)
+{
+	struct completion *ahash_req_complete = req->data;
+
+	if (err == -EINPROGRESS)
+		return;
+	complete(ahash_req_complete);
+}
+
+static int _sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+		unsigned int len)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+	uint8_t	*in_buf;
+	int ret = 0;
+	struct scatterlist sg;
+	struct ahash_request *ahash_req;
+	struct completion ahash_req_complete;
+
+	ahash_req = ahash_request_alloc(tfm, GFP_KERNEL);
+	if (ahash_req == NULL)
+		return -ENOMEM;
+	init_completion(&ahash_req_complete);
+	ahash_request_set_callback(ahash_req,
+				CRYPTO_TFM_REQ_MAY_BACKLOG,
+				_crypto_sha_hmac_ahash_req_complete,
+				&ahash_req_complete);
+	crypto_ahash_clear_flags(tfm, ~0);
+
+	in_buf = kzalloc(len + 64, GFP_KERNEL);
+	if (in_buf == NULL) {
+		pr_err("qcrypto Can't Allocate mem: in_buf, error %ld\n",
+			PTR_ERR(in_buf));
+		ahash_request_free(ahash_req);
+		return -ENOMEM;
+	}
+	memcpy(in_buf, key, len);
+	sg_set_buf(&sg, in_buf, len);
+	sg_mark_end(&sg);
+
+	ahash_request_set_crypt(ahash_req, &sg,
+				&sha_ctx->authkey[0], len);
+
+	if (sha_ctx->alg == QCE_HASH_SHA1)
+		ret = _sha1_digest(ahash_req);
+	else
+		ret = _sha256_digest(ahash_req);
+	if (ret == -EINPROGRESS || ret == -EBUSY) {
+		ret =
+			wait_for_completion_interruptible(
+						&ahash_req_complete);
+		reinit_completion(&sha_ctx->ahash_req_complete);
+	}
+
+	kzfree(in_buf);
+	ahash_request_free(ahash_req);
+
+	return ret;
+}
+
+static int _sha1_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+							unsigned int len)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+	int ret = 0;
+	memset(&sha_ctx->authkey[0], 0, SHA1_BLOCK_SIZE);
+	if (len <= SHA1_BLOCK_SIZE) {
+		memcpy(&sha_ctx->authkey[0], key, len);
+		sha_ctx->authkey_in_len = len;
+	} else {
+		sha_ctx->alg = QCE_HASH_SHA1;
+		sha_ctx->diglen = SHA1_DIGEST_SIZE;
+		ret = _sha_hmac_setkey(tfm, key, len);
+		if (ret)
+			pr_err("SHA1 hmac setkey failed\n");
+		sha_ctx->authkey_in_len = SHA1_BLOCK_SIZE;
+	}
+	return ret;
+}
+
+static int _sha256_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+							unsigned int len)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+	int ret = 0;
+
+	memset(&sha_ctx->authkey[0], 0, SHA256_BLOCK_SIZE);
+	if (len <= SHA256_BLOCK_SIZE) {
+		memcpy(&sha_ctx->authkey[0], key, len);
+		sha_ctx->authkey_in_len = len;
+	} else {
+		sha_ctx->alg = QCE_HASH_SHA256;
+		sha_ctx->diglen = SHA256_DIGEST_SIZE;
+		ret = _sha_hmac_setkey(tfm, key, len);
+		if (ret)
+			pr_err("SHA256 hmac setkey failed\n");
+		sha_ctx->authkey_in_len = SHA256_BLOCK_SIZE;
+	}
+
+	return ret;
+}
+
+static int _sha_hmac_init_ihash(struct ahash_request *req,
+						uint32_t sha_block_size)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	int i;
+
+	for (i = 0; i < sha_block_size; i++)
+		rctx->trailing_buf[i] = sha_ctx->authkey[i] ^ 0x36;
+	rctx->trailing_buf_len = sha_block_size;
+
+	return 0;
+}
+
+static int _sha1_hmac_init(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct crypto_stat *pstat;
+	int ret = 0;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+	pstat = &_qcrypto_stat;
+	pstat->sha1_hmac_digest++;
+
+	_sha_init(req);
+	memset(&rctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE);
+	memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
+						SHA1_DIGEST_SIZE);
+	sha_ctx->diglen = SHA1_DIGEST_SIZE;
+
+	if (cp->ce_support.sha_hmac)
+			sha_ctx->alg = QCE_HASH_SHA1_HMAC;
+	else {
+		sha_ctx->alg = QCE_HASH_SHA1;
+		ret = _sha_hmac_init_ihash(req, SHA1_BLOCK_SIZE);
+	}
+
+	return ret;
+}
+
+static int _sha256_hmac_init(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct crypto_stat *pstat;
+	int ret = 0;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+	pstat = &_qcrypto_stat;
+	pstat->sha256_hmac_digest++;
+
+	_sha_init(req);
+
+	memset(&rctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE);
+	memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
+						SHA256_DIGEST_SIZE);
+	sha_ctx->diglen = SHA256_DIGEST_SIZE;
+
+	if (cp->ce_support.sha_hmac)
+		sha_ctx->alg = QCE_HASH_SHA256_HMAC;
+	else {
+		sha_ctx->alg = QCE_HASH_SHA256;
+		ret = _sha_hmac_init_ihash(req, SHA256_BLOCK_SIZE);
+	}
+
+	return ret;
+}
+
+static int _sha1_hmac_update(struct ahash_request *req)
+{
+	return _sha1_update(req);
+}
+
+static int _sha256_hmac_update(struct ahash_request *req)
+{
+	return _sha256_update(req);
+}
+
+static int _sha_hmac_outer_hash(struct ahash_request *req,
+		uint32_t sha_digest_size, uint32_t sha_block_size)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct crypto_priv *cp = sha_ctx->cp;
+	int i;
+	uint8_t  *staging;
+	uint8_t *p;
+
+	staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf),
+							L1_CACHE_BYTES);
+	p = staging;
+	for (i = 0; i < sha_block_size; i++)
+		*p++ = sha_ctx->authkey[i] ^ 0x5c;
+	memcpy(p, &rctx->digest[0], sha_digest_size);
+	sg_set_buf(&rctx->sg[0], staging, sha_block_size +
+							sha_digest_size);
+	sg_mark_end(&rctx->sg[0]);
+
+	/* save the original req structure fields*/
+	rctx->src = req->src;
+	rctx->nbytes = req->nbytes;
+
+	req->src = &rctx->sg[0];
+	req->nbytes = sha_block_size + sha_digest_size;
+
+	_sha_init(req);
+	if (sha_ctx->alg == QCE_HASH_SHA1) {
+		memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
+							SHA1_DIGEST_SIZE);
+		sha_ctx->diglen = SHA1_DIGEST_SIZE;
+	} else {
+		memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
+							SHA256_DIGEST_SIZE);
+		sha_ctx->diglen = SHA256_DIGEST_SIZE;
+	}
+
+	rctx->last_blk = 1;
+	return  _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
+}
+
+static int _sha_hmac_inner_hash(struct ahash_request *req,
+			uint32_t sha_digest_size, uint32_t sha_block_size)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct ahash_request *areq = sha_ctx->ahash_req;
+	struct crypto_priv *cp = sha_ctx->cp;
+	int ret = 0;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	uint8_t  *staging;
+
+	staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf),
+							L1_CACHE_BYTES);
+	memcpy(staging, rctx->trailing_buf, rctx->trailing_buf_len);
+	sg_set_buf(&rctx->sg[0], staging, rctx->trailing_buf_len);
+	sg_mark_end(&rctx->sg[0]);
+
+	ahash_request_set_crypt(areq, &rctx->sg[0], &rctx->digest[0],
+						rctx->trailing_buf_len);
+	rctx->last_blk = 1;
+	ret =  _qcrypto_queue_req(cp, sha_ctx->pengine, &areq->base);
+
+	if (ret == -EINPROGRESS || ret == -EBUSY) {
+		ret =
+		wait_for_completion_interruptible(&sha_ctx->ahash_req_complete);
+		reinit_completion(&sha_ctx->ahash_req_complete);
+	}
+
+	return ret;
+}
+
+static int _sha1_hmac_final(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	int ret = 0;
+
+	if (cp->ce_support.sha_hmac)
+		return _sha_final(req, SHA1_BLOCK_SIZE);
+	else {
+		ret = _sha_hmac_inner_hash(req, SHA1_DIGEST_SIZE,
+							SHA1_BLOCK_SIZE);
+		if (ret)
+			return ret;
+		return _sha_hmac_outer_hash(req, SHA1_DIGEST_SIZE,
+							SHA1_BLOCK_SIZE);
+	}
+}
+
+static int _sha256_hmac_final(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	int ret = 0;
+
+	if (cp->ce_support.sha_hmac)
+		return _sha_final(req, SHA256_BLOCK_SIZE);
+	else {
+		ret = _sha_hmac_inner_hash(req, SHA256_DIGEST_SIZE,
+							SHA256_BLOCK_SIZE);
+		if (ret)
+			return ret;
+		return _sha_hmac_outer_hash(req, SHA256_DIGEST_SIZE,
+							SHA256_BLOCK_SIZE);
+	}
+	return 0;
+}
+
+
+static int _sha1_hmac_digest(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_stat *pstat;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+	pstat = &_qcrypto_stat;
+	pstat->sha1_hmac_digest++;
+
+	_sha_init(req);
+	memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
+							SHA1_DIGEST_SIZE);
+	sha_ctx->diglen = SHA1_DIGEST_SIZE;
+	sha_ctx->alg = QCE_HASH_SHA1_HMAC;
+
+	return _sha_digest(req);
+}
+
+static int _sha256_hmac_digest(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_stat *pstat;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+	pstat = &_qcrypto_stat;
+	pstat->sha256_hmac_digest++;
+
+	_sha_init(req);
+	memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
+						SHA256_DIGEST_SIZE);
+	sha_ctx->diglen = SHA256_DIGEST_SIZE;
+	sha_ctx->alg = QCE_HASH_SHA256_HMAC;
+
+	return _sha_digest(req);
+}
+
+static int _qcrypto_prefix_alg_cra_name(char cra_name[], unsigned int size)
+{
+	char new_cra_name[CRYPTO_MAX_ALG_NAME] = "qcom-";
+	if (size >= CRYPTO_MAX_ALG_NAME - strlen("qcom-"))
+		return -EINVAL;
+	strlcat(new_cra_name, cra_name, CRYPTO_MAX_ALG_NAME);
+	strlcpy(cra_name, new_cra_name, CRYPTO_MAX_ALG_NAME);
+	return 0;
+}
+
+
+int qcrypto_cipher_set_device(struct ablkcipher_request *req, unsigned int dev)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_engine *pengine = NULL;
+
+	pengine = _qrypto_find_pengine_device(cp, dev);
+	if (pengine == NULL)
+		return -ENODEV;
+	ctx->pengine = pengine;
+
+	return 0;
+};
+EXPORT_SYMBOL(qcrypto_cipher_set_device);
+
+int qcrypto_cipher_set_device_hw(struct ablkcipher_request *req, u32 dev,
+			u32 hw_inst)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_engine *pengine = NULL;
+
+	pengine = _qrypto_find_pengine_device_hw(cp, dev, hw_inst);
+	if (pengine == NULL)
+		return -ENODEV;
+	ctx->pengine = pengine;
+
+	return 0;
+}
+EXPORT_SYMBOL(qcrypto_cipher_set_device_hw);
+
+int qcrypto_aead_set_device(struct aead_request *req, unsigned int dev)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_engine *pengine = NULL;
+
+	pengine = _qrypto_find_pengine_device(cp, dev);
+	if (pengine == NULL)
+		return -ENODEV;
+	ctx->pengine = pengine;
+
+	return 0;
+};
+EXPORT_SYMBOL(qcrypto_aead_set_device);
+
+int qcrypto_ahash_set_device(struct ahash_request *req, unsigned int dev)
+{
+	struct qcrypto_sha_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_engine *pengine = NULL;
+
+	pengine = _qrypto_find_pengine_device(cp, dev);
+	if (pengine == NULL)
+		return -ENODEV;
+	ctx->pengine = pengine;
+
+	return 0;
+};
+EXPORT_SYMBOL(qcrypto_ahash_set_device);
+
+int qcrypto_cipher_set_flag(struct ablkcipher_request *req, unsigned int flags)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+
+	if ((flags & QCRYPTO_CTX_USE_HW_KEY) &&
+		(cp->platform_support.hw_key_support == false)) {
+		pr_err("%s HW key usage not supported\n", __func__);
+		return -EINVAL;
+	}
+	if (((flags | ctx->flags) & QCRYPTO_CTX_KEY_MASK) ==
+						QCRYPTO_CTX_KEY_MASK) {
+		pr_err("%s Cannot set all key flags\n", __func__);
+		return -EINVAL;
+	}
+
+	ctx->flags |= flags;
+	return 0;
+};
+EXPORT_SYMBOL(qcrypto_cipher_set_flag);
+
+int qcrypto_aead_set_flag(struct aead_request *req, unsigned int flags)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+
+	if ((flags & QCRYPTO_CTX_USE_HW_KEY) &&
+		(cp->platform_support.hw_key_support == false)) {
+		pr_err("%s HW key usage not supported\n", __func__);
+		return -EINVAL;
+	}
+	if (((flags | ctx->flags) & QCRYPTO_CTX_KEY_MASK) ==
+						QCRYPTO_CTX_KEY_MASK) {
+		pr_err("%s Cannot set all key flags\n", __func__);
+		return -EINVAL;
+	}
+
+	ctx->flags |= flags;
+	return 0;
+};
+EXPORT_SYMBOL(qcrypto_aead_set_flag);
+
+int qcrypto_ahash_set_flag(struct ahash_request *req, unsigned int flags)
+{
+	struct qcrypto_sha_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+
+	if ((flags & QCRYPTO_CTX_USE_HW_KEY) &&
+		(cp->platform_support.hw_key_support == false)) {
+		pr_err("%s HW key usage not supported\n", __func__);
+		return -EINVAL;
+	}
+	if (((flags | ctx->flags) & QCRYPTO_CTX_KEY_MASK) ==
+						QCRYPTO_CTX_KEY_MASK) {
+		pr_err("%s Cannot set all key flags\n", __func__);
+		return -EINVAL;
+	}
+
+	ctx->flags |= flags;
+	return 0;
+};
+EXPORT_SYMBOL(qcrypto_ahash_set_flag);
+
+int qcrypto_cipher_clear_flag(struct ablkcipher_request *req,
+							unsigned int flags)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+	ctx->flags &= ~flags;
+	return 0;
+
+};
+EXPORT_SYMBOL(qcrypto_cipher_clear_flag);
+
+int qcrypto_aead_clear_flag(struct aead_request *req, unsigned int flags)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+	ctx->flags &= ~flags;
+	return 0;
+
+};
+EXPORT_SYMBOL(qcrypto_aead_clear_flag);
+
+int qcrypto_ahash_clear_flag(struct ahash_request *req, unsigned int flags)
+{
+	struct qcrypto_sha_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+	ctx->flags &= ~flags;
+	return 0;
+};
+EXPORT_SYMBOL(qcrypto_ahash_clear_flag);
+
+static struct ahash_alg _qcrypto_ahash_algos[] = {
+	{
+		.init		=	_sha1_init,
+		.update		=	_sha1_update,
+		.final		=	_sha1_final,
+		.export		=	_sha1_export,
+		.import		=	_sha1_import,
+		.digest		=	_sha1_digest,
+		.halg		= {
+			.digestsize	= SHA1_DIGEST_SIZE,
+			.statesize	= sizeof(struct sha1_state),
+			.base	= {
+				.cra_name	 = "sha1",
+				.cra_driver_name = "qcrypto-sha1",
+				.cra_priority	 = 300,
+				.cra_flags	 = CRYPTO_ALG_TYPE_AHASH |
+							 CRYPTO_ALG_ASYNC,
+				.cra_blocksize	 = SHA1_BLOCK_SIZE,
+				.cra_ctxsize	 =
+						sizeof(struct qcrypto_sha_ctx),
+				.cra_alignmask	 = 0,
+				.cra_type	 = &crypto_ahash_type,
+				.cra_module	 = THIS_MODULE,
+				.cra_init	 = _qcrypto_ahash_cra_init,
+				.cra_exit	 = _qcrypto_ahash_cra_exit,
+			},
+		},
+	},
+	{
+		.init		=	_sha256_init,
+		.update		=	_sha256_update,
+		.final		=	_sha256_final,
+		.export		=	_sha256_export,
+		.import		=	_sha256_import,
+		.digest		=	_sha256_digest,
+		.halg		= {
+			.digestsize	= SHA256_DIGEST_SIZE,
+			.statesize	= sizeof(struct sha256_state),
+			.base		= {
+				.cra_name	 = "sha256",
+				.cra_driver_name = "qcrypto-sha256",
+				.cra_priority	 = 300,
+				.cra_flags	 = CRYPTO_ALG_TYPE_AHASH |
+							CRYPTO_ALG_ASYNC,
+				.cra_blocksize	 = SHA256_BLOCK_SIZE,
+				.cra_ctxsize	 =
+						sizeof(struct qcrypto_sha_ctx),
+				.cra_alignmask	 = 0,
+				.cra_type	 = &crypto_ahash_type,
+				.cra_module	 = THIS_MODULE,
+				.cra_init	 = _qcrypto_ahash_cra_init,
+				.cra_exit	 = _qcrypto_ahash_cra_exit,
+			},
+		},
+	},
+};
+
+static struct ahash_alg _qcrypto_sha_hmac_algos[] = {
+	{
+		.init		=	_sha1_hmac_init,
+		.update		=	_sha1_hmac_update,
+		.final		=	_sha1_hmac_final,
+		.export		=	_sha1_hmac_export,
+		.import		=	_sha1_hmac_import,
+		.digest		=	_sha1_hmac_digest,
+		.setkey		=	_sha1_hmac_setkey,
+		.halg		= {
+			.digestsize	= SHA1_DIGEST_SIZE,
+			.statesize	= sizeof(struct sha1_state),
+			.base	= {
+				.cra_name	 = "hmac(sha1)",
+				.cra_driver_name = "qcrypto-hmac-sha1",
+				.cra_priority	 = 300,
+				.cra_flags	 = CRYPTO_ALG_TYPE_AHASH |
+							 CRYPTO_ALG_ASYNC,
+				.cra_blocksize	 = SHA1_BLOCK_SIZE,
+				.cra_ctxsize	 =
+						sizeof(struct qcrypto_sha_ctx),
+				.cra_alignmask	 = 0,
+				.cra_type	 = &crypto_ahash_type,
+				.cra_module	 = THIS_MODULE,
+				.cra_init	 = _qcrypto_ahash_hmac_cra_init,
+				.cra_exit	 = _qcrypto_ahash_cra_exit,
+			},
+		},
+	},
+	{
+		.init		=	_sha256_hmac_init,
+		.update		=	_sha256_hmac_update,
+		.final		=	_sha256_hmac_final,
+		.export		=	_sha256_hmac_export,
+		.import		=	_sha256_hmac_import,
+		.digest		=	_sha256_hmac_digest,
+		.setkey		=	_sha256_hmac_setkey,
+		.halg		= {
+			.digestsize	= SHA256_DIGEST_SIZE,
+			.statesize	= sizeof(struct sha256_state),
+			.base		= {
+				.cra_name	 = "hmac(sha256)",
+				.cra_driver_name = "qcrypto-hmac-sha256",
+				.cra_priority	 = 300,
+				.cra_flags	 = CRYPTO_ALG_TYPE_AHASH |
+							CRYPTO_ALG_ASYNC,
+				.cra_blocksize	 = SHA256_BLOCK_SIZE,
+				.cra_ctxsize	 =
+						sizeof(struct qcrypto_sha_ctx),
+				.cra_alignmask	 = 0,
+				.cra_type	 = &crypto_ahash_type,
+				.cra_module	 = THIS_MODULE,
+				.cra_init	 = _qcrypto_ahash_hmac_cra_init,
+				.cra_exit	 = _qcrypto_ahash_cra_exit,
+			},
+		},
+	},
+};
+
+static struct crypto_alg _qcrypto_ablk_cipher_algos[] = {
+	{
+		.cra_name		= "ecb(aes)",
+		.cra_driver_name	= "qcrypto-ecb-aes",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |
+					CRYPTO_ALG_NEED_FALLBACK |
+					CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_aes_ablkcipher_init,
+		.cra_exit	= _qcrypto_cra_aes_ablkcipher_exit,
+		.cra_u		= {
+			.ablkcipher = {
+				.min_keysize	= AES_MIN_KEY_SIZE,
+				.max_keysize	= AES_MAX_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_aes,
+				.encrypt	= _qcrypto_enc_aes_ecb,
+				.decrypt	= _qcrypto_dec_aes_ecb,
+			},
+		},
+	},
+	{
+		.cra_name	= "cbc(aes)",
+		.cra_driver_name = "qcrypto-cbc-aes",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |
+					CRYPTO_ALG_NEED_FALLBACK |
+					CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_aes_ablkcipher_init,
+		.cra_exit	= _qcrypto_cra_aes_ablkcipher_exit,
+		.cra_u		= {
+			.ablkcipher = {
+				.ivsize		= AES_BLOCK_SIZE,
+				.min_keysize	= AES_MIN_KEY_SIZE,
+				.max_keysize	= AES_MAX_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_aes,
+				.encrypt	= _qcrypto_enc_aes_cbc,
+				.decrypt	= _qcrypto_dec_aes_cbc,
+			},
+		},
+	},
+	{
+		.cra_name	= "ctr(aes)",
+		.cra_driver_name = "qcrypto-ctr-aes",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |
+					CRYPTO_ALG_NEED_FALLBACK |
+					CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_aes_ablkcipher_init,
+		.cra_exit	= _qcrypto_cra_aes_ablkcipher_exit,
+		.cra_u		= {
+			.ablkcipher = {
+				.ivsize		= AES_BLOCK_SIZE,
+				.min_keysize	= AES_MIN_KEY_SIZE,
+				.max_keysize	= AES_MAX_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_aes,
+				.encrypt	= _qcrypto_enc_aes_ctr,
+				.decrypt	= _qcrypto_dec_aes_ctr,
+			},
+		},
+	},
+	{
+		.cra_name		= "ecb(des)",
+		.cra_driver_name	= "qcrypto-ecb-des",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= DES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_ablkcipher_init,
+		.cra_exit	= _qcrypto_cra_ablkcipher_exit,
+		.cra_u		= {
+			.ablkcipher = {
+				.min_keysize	= DES_KEY_SIZE,
+				.max_keysize	= DES_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_des,
+				.encrypt	= _qcrypto_enc_des_ecb,
+				.decrypt	= _qcrypto_dec_des_ecb,
+			},
+		},
+	},
+	{
+		.cra_name	= "cbc(des)",
+		.cra_driver_name = "qcrypto-cbc-des",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= DES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_ablkcipher_init,
+		.cra_exit	= _qcrypto_cra_ablkcipher_exit,
+		.cra_u		= {
+			.ablkcipher = {
+				.ivsize		= DES_BLOCK_SIZE,
+				.min_keysize	= DES_KEY_SIZE,
+				.max_keysize	= DES_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_des,
+				.encrypt	= _qcrypto_enc_des_cbc,
+				.decrypt	= _qcrypto_dec_des_cbc,
+			},
+		},
+	},
+	{
+		.cra_name		= "ecb(des3_ede)",
+		.cra_driver_name	= "qcrypto-ecb-3des",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_ablkcipher_init,
+		.cra_exit	= _qcrypto_cra_ablkcipher_exit,
+		.cra_u		= {
+			.ablkcipher = {
+				.min_keysize	= DES3_EDE_KEY_SIZE,
+				.max_keysize	= DES3_EDE_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_3des,
+				.encrypt	= _qcrypto_enc_3des_ecb,
+				.decrypt	= _qcrypto_dec_3des_ecb,
+			},
+		},
+	},
+	{
+		.cra_name	= "cbc(des3_ede)",
+		.cra_driver_name = "qcrypto-cbc-3des",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_ablkcipher_init,
+		.cra_exit	= _qcrypto_cra_ablkcipher_exit,
+		.cra_u		= {
+			.ablkcipher = {
+				.ivsize		= DES3_EDE_BLOCK_SIZE,
+				.min_keysize	= DES3_EDE_KEY_SIZE,
+				.max_keysize	= DES3_EDE_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_3des,
+				.encrypt	= _qcrypto_enc_3des_cbc,
+				.decrypt	= _qcrypto_dec_3des_cbc,
+			},
+		},
+	},
+};
+
+static struct crypto_alg _qcrypto_ablk_cipher_xts_algo = {
+	.cra_name	= "xts(aes)",
+	.cra_driver_name = "qcrypto-xts-aes",
+	.cra_priority	= 300,
+	.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize	= AES_BLOCK_SIZE,
+	.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+	.cra_alignmask	= 0,
+	.cra_type	= &crypto_ablkcipher_type,
+	.cra_module	= THIS_MODULE,
+	.cra_init	= _qcrypto_cra_ablkcipher_init,
+	.cra_exit	= _qcrypto_cra_ablkcipher_exit,
+	.cra_u		= {
+		.ablkcipher = {
+			.ivsize		= AES_BLOCK_SIZE,
+			.min_keysize	= AES_MIN_KEY_SIZE,
+			.max_keysize	= AES_MAX_KEY_SIZE,
+			.setkey		= _qcrypto_setkey_aes_xts,
+			.encrypt	= _qcrypto_enc_aes_xts,
+			.decrypt	= _qcrypto_dec_aes_xts,
+		},
+	},
+};
+
+static struct aead_alg _qcrypto_aead_sha1_hmac_algos[] = {
+	{
+		.base = {
+		.cra_name	= "authenc(hmac(sha1),cbc(aes))",
+		.cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-aes",
+		.cra_priority	= 300,
+			.cra_flags	= CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_module	= THIS_MODULE,
+		},
+				.ivsize         = AES_BLOCK_SIZE,
+				.maxauthsize    = SHA1_DIGEST_SIZE,
+				.setkey = _qcrypto_aead_setkey,
+				.setauthsize = _qcrypto_aead_setauthsize,
+				.encrypt = _qcrypto_aead_encrypt_aes_cbc,
+				.decrypt = _qcrypto_aead_decrypt_aes_cbc,
+		.init	= _qcrypto_cra_aead_aes_sha1_init,
+		.exit	= _qcrypto_cra_aead_aes_exit,
+	},
+	{
+		.base = {
+		.cra_name	= "authenc(hmac(sha1),cbc(des))",
+		.cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-des",
+		.cra_priority	= 300,
+			.cra_flags	= CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = DES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_module	= THIS_MODULE,
+		},
+				.ivsize         = DES_BLOCK_SIZE,
+				.maxauthsize    = SHA1_DIGEST_SIZE,
+				.setkey = _qcrypto_aead_setkey,
+				.setauthsize = _qcrypto_aead_setauthsize,
+				.encrypt = _qcrypto_aead_encrypt_des_cbc,
+				.decrypt = _qcrypto_aead_decrypt_des_cbc,
+		.init	= _qcrypto_cra_aead_sha1_init,
+		.exit	= _qcrypto_cra_aead_exit,
+	},
+	{
+		.base = {
+		.cra_name	= "authenc(hmac(sha1),cbc(des3_ede))",
+		.cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-3des",
+		.cra_priority	= 300,
+			.cra_flags	= CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = DES3_EDE_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_module	= THIS_MODULE,
+		},
+				.ivsize         = DES3_EDE_BLOCK_SIZE,
+				.maxauthsize    = SHA1_DIGEST_SIZE,
+				.setkey = _qcrypto_aead_setkey,
+				.setauthsize = _qcrypto_aead_setauthsize,
+				.encrypt = _qcrypto_aead_encrypt_3des_cbc,
+				.decrypt = _qcrypto_aead_decrypt_3des_cbc,
+		.init	= _qcrypto_cra_aead_sha1_init,
+		.exit	= _qcrypto_cra_aead_exit,
+	},
+};
+
+static struct aead_alg _qcrypto_aead_sha256_hmac_algos[] = {
+	{
+		.base = {
+		.cra_name	= "authenc(hmac(sha256),cbc(aes))",
+		.cra_driver_name = "qcrypto-aead-hmac-sha256-cbc-aes",
+		.cra_priority	= 300,
+			.cra_flags	= CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_module	= THIS_MODULE,
+		},
+				.ivsize         = AES_BLOCK_SIZE,
+				.maxauthsize    = SHA256_DIGEST_SIZE,
+				.setkey = _qcrypto_aead_setkey,
+				.setauthsize = _qcrypto_aead_setauthsize,
+				.encrypt = _qcrypto_aead_encrypt_aes_cbc,
+				.decrypt = _qcrypto_aead_decrypt_aes_cbc,
+		.init	= _qcrypto_cra_aead_aes_sha256_init,
+		.exit	= _qcrypto_cra_aead_aes_exit,
+	},
+
+	{
+		.base = {
+		.cra_name	= "authenc(hmac(sha256),cbc(des))",
+		.cra_driver_name = "qcrypto-aead-hmac-sha256-cbc-des",
+		.cra_priority	= 300,
+			.cra_flags	= CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = DES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_module	= THIS_MODULE,
+		},
+				.ivsize         = DES_BLOCK_SIZE,
+				.maxauthsize    = SHA256_DIGEST_SIZE,
+				.setkey = _qcrypto_aead_setkey,
+				.setauthsize = _qcrypto_aead_setauthsize,
+				.encrypt = _qcrypto_aead_encrypt_des_cbc,
+				.decrypt = _qcrypto_aead_decrypt_des_cbc,
+		.init	= _qcrypto_cra_aead_sha256_init,
+		.exit	= _qcrypto_cra_aead_exit,
+	},
+	{
+		.base = {
+		.cra_name	= "authenc(hmac(sha256),cbc(des3_ede))",
+		.cra_driver_name = "qcrypto-aead-hmac-sha256-cbc-3des",
+		.cra_priority	= 300,
+			.cra_flags	= CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = DES3_EDE_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_module	= THIS_MODULE,
+		},
+				.ivsize         = DES3_EDE_BLOCK_SIZE,
+				.maxauthsize    = SHA256_DIGEST_SIZE,
+				.setkey = _qcrypto_aead_setkey,
+				.setauthsize = _qcrypto_aead_setauthsize,
+				.encrypt = _qcrypto_aead_encrypt_3des_cbc,
+				.decrypt = _qcrypto_aead_decrypt_3des_cbc,
+		.init	= _qcrypto_cra_aead_sha256_init,
+		.exit	= _qcrypto_cra_aead_exit,
+	},
+};
+
+static struct aead_alg _qcrypto_aead_ccm_algo = {
+	.base = {
+		.cra_name	= "ccm(aes)",
+		.cra_driver_name = "qcrypto-aes-ccm",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_module	= THIS_MODULE,
+	},
+	.ivsize         = AES_BLOCK_SIZE,
+	.maxauthsize    = AES_BLOCK_SIZE,
+	.setkey = _qcrypto_aead_ccm_setkey,
+	.setauthsize = _qcrypto_aead_ccm_setauthsize,
+	.encrypt = _qcrypto_aead_encrypt_aes_ccm,
+	.decrypt = _qcrypto_aead_decrypt_aes_ccm,
+	.init	= _qcrypto_cra_aead_ccm_init,
+	.exit	= _qcrypto_cra_aead_exit,
+};
+
+static struct aead_alg _qcrypto_aead_rfc4309_ccm_algo = {
+	.base = {
+		.cra_name	= "rfc4309(ccm(aes))",
+		.cra_driver_name = "qcrypto-rfc4309-aes-ccm",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = 1,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_module	= THIS_MODULE,
+	},
+	.ivsize         = 8,
+	.maxauthsize    = 16,
+	.setkey = _qcrypto_aead_rfc4309_ccm_setkey,
+	.setauthsize = _qcrypto_aead_rfc4309_ccm_setauthsize,
+	.encrypt = _qcrypto_aead_rfc4309_enc_aes_ccm,
+	.decrypt = _qcrypto_aead_rfc4309_dec_aes_ccm,
+	.init	= _qcrypto_cra_aead_rfc4309_ccm_init,
+	.exit	= _qcrypto_cra_aead_exit,
+};
+
+static int  _qcrypto_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	void *handle;
+	struct crypto_priv *cp = &qcrypto_dev;
+	int i;
+	struct msm_ce_hw_support *platform_support;
+	struct crypto_engine *pengine;
+	unsigned long flags;
+	struct qcrypto_req_control *pqcrypto_req_control = NULL;
+
+	pengine = kzalloc(sizeof(*pengine), GFP_KERNEL);
+	if (!pengine) {
+		pr_err("qcrypto Memory allocation of q_alg FAIL, error %ld\n",
+				PTR_ERR(pengine));
+		return -ENOMEM;
+	}
+
+	/* open qce */
+	handle = qce_open(pdev, &rc);
+	if (handle == NULL) {
+		kzfree(pengine);
+		platform_set_drvdata(pdev, NULL);
+		return rc;
+	}
+
+	platform_set_drvdata(pdev, pengine);
+	pengine->qce = handle;
+	pengine->pcp = cp;
+	pengine->pdev = pdev;
+	pengine->signature = 0xdeadbeef;
+
+	init_timer(&(pengine->bw_reaper_timer));
+	INIT_WORK(&pengine->bw_reaper_ws, qcrypto_bw_reaper_work);
+	pengine->bw_reaper_timer.function =
+			qcrypto_bw_reaper_timer_callback;
+	INIT_WORK(&pengine->bw_allocate_ws, qcrypto_bw_allocate_work);
+	pengine->high_bw_req = false;
+	pengine->active_seq = 0;
+	pengine->last_active_seq = 0;
+	pengine->check_flag = false;
+	pengine->max_req_used = 0;
+	pengine->issue_req = false;
+
+	crypto_init_queue(&pengine->req_queue, MSM_QCRYPTO_REQ_QUEUE_LENGTH);
+
+	mutex_lock(&cp->engine_lock);
+	cp->total_units++;
+	pengine->unit = cp->total_units;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	pengine->first_engine = list_empty(&cp->engine_list);
+	if (pengine->first_engine)
+		cp->first_engine = pengine;
+	list_add_tail(&pengine->elist, &cp->engine_list);
+	cp->next_engine = pengine;
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+	qce_hw_support(pengine->qce, &cp->ce_support);
+	pengine->ce_hw_instance = cp->ce_support.ce_hw_instance;
+	pengine->max_req = cp->ce_support.max_request;
+	pqcrypto_req_control = kzalloc(sizeof(struct qcrypto_req_control) *
+			pengine->max_req, GFP_KERNEL);
+	if (pqcrypto_req_control == NULL) {
+		rc = -ENOMEM;
+		goto err;
+	}
+	qcrypto_init_req_control(pengine, pqcrypto_req_control);
+	if (cp->ce_support.bam)	 {
+		cp->platform_support.ce_shared = cp->ce_support.is_shared;
+		cp->platform_support.shared_ce_resource = 0;
+		cp->platform_support.hw_key_support = cp->ce_support.hw_key;
+		cp->platform_support.sha_hmac = 1;
+
+		cp->platform_support.bus_scale_table =
+			(struct msm_bus_scale_pdata *)
+					msm_bus_cl_get_pdata(pdev);
+		if (!cp->platform_support.bus_scale_table)
+			pr_warn("bus_scale_table is NULL\n");
+
+		pengine->ce_device = cp->ce_support.ce_device;
+
+	} else {
+		platform_support =
+			(struct msm_ce_hw_support *)pdev->dev.platform_data;
+		cp->platform_support.ce_shared = platform_support->ce_shared;
+		cp->platform_support.shared_ce_resource =
+				platform_support->shared_ce_resource;
+		cp->platform_support.hw_key_support =
+				platform_support->hw_key_support;
+		cp->platform_support.bus_scale_table =
+				platform_support->bus_scale_table;
+		cp->platform_support.sha_hmac = platform_support->sha_hmac;
+	}
+
+	pengine->bus_scale_handle = 0;
+
+	if (cp->platform_support.bus_scale_table != NULL) {
+		pengine->bus_scale_handle =
+			msm_bus_scale_register_client(
+				(struct msm_bus_scale_pdata *)
+					cp->platform_support.bus_scale_table);
+		if (!pengine->bus_scale_handle) {
+			pr_err("%s not able to get bus scale\n",
+				__func__);
+			rc =  -ENOMEM;
+			goto err;
+		}
+		pengine->bw_state = BUS_NO_BANDWIDTH;
+	} else {
+		pengine->bw_state = BUS_HAS_BANDWIDTH;
+	}
+
+	if (cp->total_units != 1) {
+		mutex_unlock(&cp->engine_lock);
+		return 0;
+	}
+
+	/* register crypto cipher algorithms the device supports */
+	for (i = 0; i < ARRAY_SIZE(_qcrypto_ablk_cipher_algos); i++) {
+		struct qcrypto_alg *q_alg;
+
+		q_alg = _qcrypto_cipher_alg_alloc(cp,
+					&_qcrypto_ablk_cipher_algos[i]);
+		if (IS_ERR(q_alg)) {
+			rc = PTR_ERR(q_alg);
+			goto err;
+		}
+		if (cp->ce_support.use_sw_aes_cbc_ecb_ctr_algo) {
+			rc = _qcrypto_prefix_alg_cra_name(
+					q_alg->cipher_alg.cra_name,
+					strlen(q_alg->cipher_alg.cra_name));
+			if (rc) {
+				dev_err(&pdev->dev,
+					"The algorithm name %s is too long.\n",
+					q_alg->cipher_alg.cra_name);
+				kfree(q_alg);
+				goto err;
+			}
+		}
+		rc = crypto_register_alg(&q_alg->cipher_alg);
+		if (rc) {
+			dev_err(&pdev->dev, "%s alg registration failed\n",
+					q_alg->cipher_alg.cra_driver_name);
+			kzfree(q_alg);
+		} else {
+			list_add_tail(&q_alg->entry, &cp->alg_list);
+			dev_info(&pdev->dev, "%s\n",
+					q_alg->cipher_alg.cra_driver_name);
+		}
+	}
+
+	/* register crypto cipher algorithms the device supports */
+	if (cp->ce_support.aes_xts) {
+		struct qcrypto_alg *q_alg;
+
+		q_alg = _qcrypto_cipher_alg_alloc(cp,
+					&_qcrypto_ablk_cipher_xts_algo);
+		if (IS_ERR(q_alg)) {
+			rc = PTR_ERR(q_alg);
+			goto err;
+		}
+		if (cp->ce_support.use_sw_aes_xts_algo) {
+			rc = _qcrypto_prefix_alg_cra_name(
+					q_alg->cipher_alg.cra_name,
+					strlen(q_alg->cipher_alg.cra_name));
+			if (rc) {
+				dev_err(&pdev->dev,
+					"The algorithm name %s is too long.\n",
+					q_alg->cipher_alg.cra_name);
+				kfree(q_alg);
+				goto err;
+			}
+		}
+		rc = crypto_register_alg(&q_alg->cipher_alg);
+		if (rc) {
+			dev_err(&pdev->dev, "%s alg registration failed\n",
+					q_alg->cipher_alg.cra_driver_name);
+			kzfree(q_alg);
+		} else {
+			list_add_tail(&q_alg->entry, &cp->alg_list);
+			dev_info(&pdev->dev, "%s\n",
+					q_alg->cipher_alg.cra_driver_name);
+		}
+	}
+
+	/*
+	 * Register crypto hash (sha1 and sha256) algorithms the
+	 * device supports
+	 */
+	for (i = 0; i < ARRAY_SIZE(_qcrypto_ahash_algos); i++) {
+		struct qcrypto_alg *q_alg = NULL;
+
+		q_alg = _qcrypto_sha_alg_alloc(cp, &_qcrypto_ahash_algos[i]);
+
+		if (IS_ERR(q_alg)) {
+			rc = PTR_ERR(q_alg);
+			goto err;
+		}
+		if (cp->ce_support.use_sw_ahash_algo) {
+			rc = _qcrypto_prefix_alg_cra_name(
+				q_alg->sha_alg.halg.base.cra_name,
+				strlen(q_alg->sha_alg.halg.base.cra_name));
+			if (rc) {
+				dev_err(&pdev->dev,
+					"The algorithm name %s is too long.\n",
+					q_alg->sha_alg.halg.base.cra_name);
+				kfree(q_alg);
+				goto err;
+			}
+		}
+		rc = crypto_register_ahash(&q_alg->sha_alg);
+		if (rc) {
+			dev_err(&pdev->dev, "%s alg registration failed\n",
+				q_alg->sha_alg.halg.base.cra_driver_name);
+			kzfree(q_alg);
+		} else {
+			list_add_tail(&q_alg->entry, &cp->alg_list);
+			dev_info(&pdev->dev, "%s\n",
+				q_alg->sha_alg.halg.base.cra_driver_name);
+		}
+	}
+
+	/* register crypto aead (hmac-sha1) algorithms the device supports */
+	if (cp->ce_support.sha1_hmac_20 || cp->ce_support.sha1_hmac
+		|| cp->ce_support.sha_hmac) {
+		for (i = 0; i < ARRAY_SIZE(_qcrypto_aead_sha1_hmac_algos);
+									i++) {
+			struct qcrypto_alg *q_alg;
+
+			q_alg = _qcrypto_aead_alg_alloc(cp,
+					&_qcrypto_aead_sha1_hmac_algos[i]);
+			if (IS_ERR(q_alg)) {
+				rc = PTR_ERR(q_alg);
+				goto err;
+			}
+			if (cp->ce_support.use_sw_aead_algo) {
+				rc = _qcrypto_prefix_alg_cra_name(
+					q_alg->aead_alg.base.cra_name,
+					strlen(q_alg->aead_alg.base.cra_name));
+				if (rc) {
+					dev_err(&pdev->dev,
+						"The algorithm name %s is too long.\n",
+						q_alg->aead_alg.base.cra_name);
+					kfree(q_alg);
+					goto err;
+				}
+			}
+			rc = crypto_register_aead(&q_alg->aead_alg);
+			if (rc) {
+				dev_err(&pdev->dev,
+					"%s alg registration failed\n",
+					q_alg->aead_alg.base.cra_driver_name);
+				kfree(q_alg);
+			} else {
+				list_add_tail(&q_alg->entry, &cp->alg_list);
+				dev_info(&pdev->dev, "%s\n",
+					q_alg->aead_alg.base.cra_driver_name);
+			}
+		}
+	}
+
+	/* register crypto aead (hmac-sha256) algorithms the device supports */
+	if (cp->ce_support.sha_hmac) {
+		for (i = 0; i < ARRAY_SIZE(_qcrypto_aead_sha256_hmac_algos);
+									i++) {
+			struct qcrypto_alg *q_alg;
+
+			q_alg = _qcrypto_aead_alg_alloc(cp,
+					&_qcrypto_aead_sha256_hmac_algos[i]);
+			if (IS_ERR(q_alg)) {
+				rc = PTR_ERR(q_alg);
+				goto err;
+			}
+			if (cp->ce_support.use_sw_aead_algo) {
+				rc = _qcrypto_prefix_alg_cra_name(
+					q_alg->aead_alg.base.cra_name,
+					strlen(q_alg->aead_alg.base.cra_name));
+				if (rc) {
+					dev_err(&pdev->dev,
+						"The algorithm name %s is too long.\n",
+						q_alg->aead_alg.base.cra_name);
+					kfree(q_alg);
+					goto err;
+				}
+			}
+			rc = crypto_register_aead(&q_alg->aead_alg);
+			if (rc) {
+				dev_err(&pdev->dev,
+					"%s alg registration failed\n",
+					q_alg->aead_alg.base.cra_driver_name);
+				kfree(q_alg);
+			} else {
+				list_add_tail(&q_alg->entry, &cp->alg_list);
+				dev_info(&pdev->dev, "%s\n",
+					q_alg->aead_alg.base.cra_driver_name);
+			}
+		}
+	}
+
+	if ((cp->ce_support.sha_hmac) || (cp->platform_support.sha_hmac)) {
+		/* register crypto hmac algorithms the device supports */
+		for (i = 0; i < ARRAY_SIZE(_qcrypto_sha_hmac_algos); i++) {
+			struct qcrypto_alg *q_alg = NULL;
+
+			q_alg = _qcrypto_sha_alg_alloc(cp,
+						&_qcrypto_sha_hmac_algos[i]);
+
+			if (IS_ERR(q_alg)) {
+				rc = PTR_ERR(q_alg);
+				goto err;
+			}
+			if (cp->ce_support.use_sw_hmac_algo) {
+				rc = _qcrypto_prefix_alg_cra_name(
+					q_alg->sha_alg.halg.base.cra_name,
+					strlen(
+					q_alg->sha_alg.halg.base.cra_name));
+				if (rc) {
+					dev_err(&pdev->dev,
+					     "The algorithm name %s is too long.\n",
+					     q_alg->sha_alg.halg.base.cra_name);
+					kfree(q_alg);
+					goto err;
+				}
+			}
+			rc = crypto_register_ahash(&q_alg->sha_alg);
+			if (rc) {
+				dev_err(&pdev->dev,
+				"%s alg registration failed\n",
+				q_alg->sha_alg.halg.base.cra_driver_name);
+				kzfree(q_alg);
+			} else {
+				list_add_tail(&q_alg->entry, &cp->alg_list);
+				dev_info(&pdev->dev, "%s\n",
+				q_alg->sha_alg.halg.base.cra_driver_name);
+			}
+		}
+	}
+	/*
+	 * Register crypto cipher (aes-ccm) algorithms the
+	 * device supports
+	 */
+	if (cp->ce_support.aes_ccm) {
+		struct qcrypto_alg *q_alg;
+
+		q_alg = _qcrypto_aead_alg_alloc(cp, &_qcrypto_aead_ccm_algo);
+		if (IS_ERR(q_alg)) {
+			rc = PTR_ERR(q_alg);
+			goto err;
+		}
+		if (cp->ce_support.use_sw_aes_ccm_algo) {
+			rc = _qcrypto_prefix_alg_cra_name(
+					q_alg->aead_alg.base.cra_name,
+					strlen(q_alg->aead_alg.base.cra_name));
+			if (rc) {
+				dev_err(&pdev->dev,
+						"The algorithm name %s is too long.\n",
+						q_alg->aead_alg.base.cra_name);
+				kfree(q_alg);
+				goto err;
+			}
+		}
+		rc = crypto_register_aead(&q_alg->aead_alg);
+		if (rc) {
+			dev_err(&pdev->dev, "%s alg registration failed\n",
+					q_alg->aead_alg.base.cra_driver_name);
+			kzfree(q_alg);
+		} else {
+			list_add_tail(&q_alg->entry, &cp->alg_list);
+			dev_info(&pdev->dev, "%s\n",
+					q_alg->aead_alg.base.cra_driver_name);
+		}
+
+		q_alg = _qcrypto_aead_alg_alloc(cp,
+					&_qcrypto_aead_rfc4309_ccm_algo);
+		if (IS_ERR(q_alg)) {
+			rc = PTR_ERR(q_alg);
+			goto err;
+		}
+
+		if (cp->ce_support.use_sw_aes_ccm_algo) {
+			rc = _qcrypto_prefix_alg_cra_name(
+					q_alg->aead_alg.base.cra_name,
+					strlen(q_alg->aead_alg.base.cra_name));
+			if (rc) {
+				dev_err(&pdev->dev,
+						"The algorithm name %s is too long.\n",
+						q_alg->aead_alg.base.cra_name);
+				kfree(q_alg);
+				goto err;
+			}
+		}
+		rc = crypto_register_aead(&q_alg->aead_alg);
+		if (rc) {
+			dev_err(&pdev->dev, "%s alg registration failed\n",
+					q_alg->aead_alg.base.cra_driver_name);
+			kfree(q_alg);
+		} else {
+			list_add_tail(&q_alg->entry, &cp->alg_list);
+			dev_info(&pdev->dev, "%s\n",
+					q_alg->aead_alg.base.cra_driver_name);
+		}
+	}
+	mutex_unlock(&cp->engine_lock);
+
+
+	return 0;
+err:
+	_qcrypto_remove_engine(pengine);
+	mutex_unlock(&cp->engine_lock);
+	if (pengine->qce)
+		qce_close(pengine->qce);
+	kzfree(pengine);
+	return rc;
+};
+
+static int _qcrypto_engine_in_use(struct crypto_engine *pengine)
+{
+	struct crypto_priv *cp = pengine->pcp;
+
+	if ((atomic_read(&pengine->req_count) > 0) || pengine->req_queue.qlen
+					|| cp->req_queue.qlen)
+		return 1;
+	return 0;
+}
+
+static void _qcrypto_do_suspending(struct crypto_engine *pengine)
+{
+	struct crypto_priv *cp = pengine->pcp;
+
+	if (cp->platform_support.bus_scale_table == NULL)
+		return;
+	del_timer_sync(&pengine->bw_reaper_timer);
+	qcrypto_ce_set_bus(pengine, false);
+}
+
+static int  _qcrypto_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	int ret = 0;
+	struct crypto_engine *pengine;
+	struct crypto_priv *cp;
+	unsigned long flags;
+
+	pengine = platform_get_drvdata(pdev);
+	if (!pengine)
+		return -EINVAL;
+
+	/*
+	 * Check if this platform supports clock management in suspend/resume
+	 * If not, just simply return 0.
+	 */
+	cp = pengine->pcp;
+	if (!cp->ce_support.clk_mgmt_sus_res)
+		return 0;
+	spin_lock_irqsave(&cp->lock, flags);
+	switch (pengine->bw_state) {
+	case BUS_NO_BANDWIDTH:
+		if (pengine->high_bw_req == false)
+			pengine->bw_state = BUS_SUSPENDED;
+		else
+			ret = -EBUSY;
+		break;
+	case BUS_HAS_BANDWIDTH:
+		if (_qcrypto_engine_in_use(pengine)) {
+			ret = -EBUSY;
+		} else {
+			pengine->bw_state = BUS_SUSPENDING;
+			spin_unlock_irqrestore(&cp->lock, flags);
+			_qcrypto_do_suspending(pengine);
+			spin_lock_irqsave(&cp->lock, flags);
+			pengine->bw_state = BUS_SUSPENDED;
+		}
+		break;
+	case BUS_BANDWIDTH_RELEASING:
+	case BUS_BANDWIDTH_ALLOCATING:
+	case BUS_SUSPENDED:
+	case BUS_SUSPENDING:
+	default:
+			ret = -EBUSY;
+			break;
+	}
+
+	spin_unlock_irqrestore(&cp->lock, flags);
+	if (ret)
+		return ret;
+	else {
+		if (qce_pm_table.suspend)
+			qce_pm_table.suspend(pengine->qce);
+		return 0;
+	}
+}
+
+static int  _qcrypto_resume(struct platform_device *pdev)
+{
+	struct crypto_engine *pengine;
+	struct crypto_priv *cp;
+	unsigned long flags;
+	int ret = 0;
+
+	pengine = platform_get_drvdata(pdev);
+
+	if (!pengine)
+		return -EINVAL;
+	cp = pengine->pcp;
+	if (!cp->ce_support.clk_mgmt_sus_res)
+		return 0;
+	spin_lock_irqsave(&cp->lock, flags);
+	if (pengine->bw_state == BUS_SUSPENDED) {
+		spin_unlock_irqrestore(&cp->lock, flags);
+		if (qce_pm_table.resume)
+			qce_pm_table.resume(pengine->qce);
+
+		spin_lock_irqsave(&cp->lock, flags);
+		pengine->bw_state = BUS_NO_BANDWIDTH;
+		pengine->active_seq++;
+		pengine->check_flag = false;
+		if (cp->req_queue.qlen || pengine->req_queue.qlen) {
+			if (pengine->high_bw_req == false) {
+				qcrypto_ce_bw_allocate_req(pengine);
+				pengine->high_bw_req = true;
+			}
+		}
+	} else
+		ret = -EBUSY;
+
+	spin_unlock_irqrestore(&cp->lock, flags);
+	return ret;
+}
+
+static struct of_device_id qcrypto_match[] = {
+	{	.compatible = "qcom,qcrypto",
+	},
+	{}
+};
+
+static struct platform_driver _qualcomm_crypto = {
+	.probe          = _qcrypto_probe,
+	.remove         = _qcrypto_remove,
+	.suspend        = _qcrypto_suspend,
+	.resume         = _qcrypto_resume,
+	.driver         = {
+		.owner  = THIS_MODULE,
+		.name   = "qcrypto",
+		.of_match_table = qcrypto_match,
+	},
+};
+
+static int _debug_qcrypto;
+
+static int _debug_stats_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t _debug_stats_read(struct file *file, char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	int rc = -EINVAL;
+	int qcrypto = *((int *) file->private_data);
+	int len;
+
+	len = _disp_stats(qcrypto);
+
+	if (len <= count)
+		rc = simple_read_from_buffer((void __user *) buf, len,
+			ppos, (void *) _debug_read_buf, len);
+	return rc;
+}
+
+static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	unsigned long flags;
+	struct crypto_priv *cp = &qcrypto_dev;
+	struct crypto_engine *pe;
+	int i;
+
+	memset((char *)&_qcrypto_stat, 0, sizeof(struct crypto_stat));
+	spin_lock_irqsave(&cp->lock, flags);
+	list_for_each_entry(pe, &cp->engine_list, elist) {
+		pe->total_req = 0;
+		pe->err_req = 0;
+		qce_clear_driver_stats(pe->qce);
+		pe->max_req_used = 0;
+	}
+	cp->max_qlen = 0;
+	cp->resp_start = 0;
+	cp->resp_stop = 0;
+	cp->no_avail = 0;
+	cp->max_resp_qlen = 0;
+	cp->queue_work_eng3 = 0;
+	cp->queue_work_not_eng3 = 0;
+	cp->queue_work_not_eng3_nz = 0;
+	cp->max_reorder_cnt = 0;
+	for (i = 0; i < MAX_SMP_CPU + 1; i++)
+		cp->cpu_req[i] = 0;
+	spin_unlock_irqrestore(&cp->lock, flags);
+	return count;
+}
+
+static const struct file_operations _debug_stats_ops = {
+	.open =         _debug_stats_open,
+	.read =         _debug_stats_read,
+	.write =        _debug_stats_write,
+};
+
+static int _qcrypto_debug_init(void)
+{
+	int rc;
+	char name[DEBUG_MAX_FNAME];
+	struct dentry *dent;
+
+	_debug_dent = debugfs_create_dir("qcrypto", NULL);
+	if (IS_ERR(_debug_dent)) {
+		pr_err("qcrypto debugfs_create_dir fail, error %ld\n",
+				PTR_ERR(_debug_dent));
+		return PTR_ERR(_debug_dent);
+	}
+
+	snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", 1);
+	_debug_qcrypto = 0;
+	dent = debugfs_create_file(name, 0644, _debug_dent,
+				&_debug_qcrypto, &_debug_stats_ops);
+	if (dent == NULL) {
+		pr_err("qcrypto debugfs_create_file fail, error %ld\n",
+				PTR_ERR(dent));
+		rc = PTR_ERR(dent);
+		goto err;
+	}
+	return 0;
+err:
+	debugfs_remove_recursive(_debug_dent);
+	return rc;
+}
+
+static int __init _qcrypto_init(void)
+{
+	int rc;
+	struct crypto_priv *pcp = &qcrypto_dev;
+
+	rc = _qcrypto_debug_init();
+	if (rc)
+		return rc;
+	INIT_LIST_HEAD(&pcp->alg_list);
+	INIT_LIST_HEAD(&pcp->engine_list);
+	init_llist_head(&pcp->ordered_resp_list);
+	spin_lock_init(&pcp->lock);
+	mutex_init(&pcp->engine_lock);
+	pcp->resp_wq = alloc_workqueue("qcrypto_seq_response_wq",
+			WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
+	if (!pcp->resp_wq) {
+		pr_err("Error allocating workqueue\n");
+		return -ENOMEM;
+	}
+	INIT_WORK(&pcp->resp_work, seq_response);
+	pcp->total_units = 0;
+	pcp->platform_support.bus_scale_table = NULL;
+	pcp->next_engine = NULL;
+	pcp->scheduled_eng = NULL;
+	pcp->ce_req_proc_sts = IN_PROGRESS;
+	crypto_init_queue(&pcp->req_queue, MSM_QCRYPTO_REQ_QUEUE_LENGTH);
+	return platform_driver_register(&_qualcomm_crypto);
+}
+
+static void __exit _qcrypto_exit(void)
+{
+	pr_debug("%s Unregister QCRYPTO\n", __func__);
+	debugfs_remove_recursive(_debug_dent);
+	platform_driver_unregister(&_qualcomm_crypto);
+}
+
+module_init(_qcrypto_init);
+module_exit(_qcrypto_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm Crypto driver");
diff -Nruw linux-4.4.115-fbx/drivers/crypto/msm./qcryptohw_50.h linux-4.4.115-fbx/drivers/crypto/msm/qcryptohw_50.h
--- linux-4.4.115-fbx/drivers/crypto/msm./qcryptohw_50.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/crypto/msm/qcryptohw_50.h	2019-01-22 16:16:23.115242893 +0100
@@ -0,0 +1,528 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_
+#define _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_
+
+
+#define CRYPTO_BAM_CNFG_BITS_REG		0x0007C
+#define CRYPTO_BAM_CD_ENABLE			27
+#define CRYPTO_BAM_CD_ENABLE_MASK		(1 << CRYPTO_BAM_CD_ENABLE)
+
+#define QCE_AUTH_REG_BYTE_COUNT 4
+#define CRYPTO_VERSION_REG			0x1A000
+
+#define CRYPTO_DATA_IN0_REG			0x1A010
+#define CRYPTO_DATA_IN1_REG			0x1A014
+#define CRYPTO_DATA_IN2_REG			0x1A018
+#define CRYPTO_DATA_IN3_REG			0x1A01C
+
+#define CRYPTO_DATA_OUT0_REG			0x1A020
+#define CRYPTO_DATA_OUT1_REG			0x1A024
+#define CRYPTO_DATA_OUT2_REG			0x1A028
+#define CRYPTO_DATA_OUT3_REG			0x1A02C
+
+#define CRYPTO_STATUS_REG			0x1A100
+#define CRYPTO_STATUS2_REG			0x1A104
+#define CRYPTO_ENGINES_AVAIL			0x1A108
+#define CRYPTO_FIFO_SIZES_REG			0x1A10C
+
+#define CRYPTO_SEG_SIZE_REG			0x1A110
+#define CRYPTO_GOPROC_REG			0x1A120
+#define CRYPTO_GOPROC_QC_KEY_REG		0x1B000
+#define CRYPTO_GOPROC_OEM_KEY_REG		0x1C000
+
+#define CRYPTO_ENCR_SEG_CFG_REG			0x1A200
+#define CRYPTO_ENCR_SEG_SIZE_REG		0x1A204
+#define CRYPTO_ENCR_SEG_START_REG		0x1A208
+
+#define CRYPTO_ENCR_KEY0_REG			0x1D000
+#define CRYPTO_ENCR_KEY1_REG			0x1D004
+#define CRYPTO_ENCR_KEY2_REG			0x1D008
+#define CRYPTO_ENCR_KEY3_REG			0x1D00C
+#define CRYPTO_ENCR_KEY4_REG			0x1D010
+#define CRYPTO_ENCR_KEY5_REG			0x1D014
+#define CRYPTO_ENCR_KEY6_REG			0x1D018
+#define CRYPTO_ENCR_KEY7_REG			0x1D01C
+
+#define CRYPTO_ENCR_XTS_KEY0_REG		0x1D020
+#define CRYPTO_ENCR_XTS_KEY1_REG		0x1D024
+#define CRYPTO_ENCR_XTS_KEY2_REG		0x1D028
+#define CRYPTO_ENCR_XTS_KEY3_REG		0x1D02C
+#define CRYPTO_ENCR_XTS_KEY4_REG		0x1D030
+#define CRYPTO_ENCR_XTS_KEY5_REG		0x1D034
+#define CRYPTO_ENCR_XTS_KEY6_REG		0x1D038
+#define CRYPTO_ENCR_XTS_KEY7_REG		0x1D03C
+
+#define CRYPTO_ENCR_PIPE0_KEY0_REG		0x1E000
+#define CRYPTO_ENCR_PIPE0_KEY1_REG		0x1E004
+#define CRYPTO_ENCR_PIPE0_KEY2_REG		0x1E008
+#define CRYPTO_ENCR_PIPE0_KEY3_REG		0x1E00C
+#define CRYPTO_ENCR_PIPE0_KEY4_REG		0x1E010
+#define CRYPTO_ENCR_PIPE0_KEY5_REG		0x1E014
+#define CRYPTO_ENCR_PIPE0_KEY6_REG		0x1E018
+#define CRYPTO_ENCR_PIPE0_KEY7_REG		0x1E01C
+
+#define CRYPTO_ENCR_PIPE1_KEY0_REG		0x1E020
+#define CRYPTO_ENCR_PIPE1_KEY1_REG		0x1E024
+#define CRYPTO_ENCR_PIPE1_KEY2_REG		0x1E028
+#define CRYPTO_ENCR_PIPE1_KEY3_REG		0x1E02C
+#define CRYPTO_ENCR_PIPE1_KEY4_REG		0x1E030
+#define CRYPTO_ENCR_PIPE1_KEY5_REG		0x1E034
+#define CRYPTO_ENCR_PIPE1_KEY6_REG		0x1E038
+#define CRYPTO_ENCR_PIPE1_KEY7_REG		0x1E03C
+
+#define CRYPTO_ENCR_PIPE2_KEY0_REG		0x1E040
+#define CRYPTO_ENCR_PIPE2_KEY1_REG		0x1E044
+#define CRYPTO_ENCR_PIPE2_KEY2_REG		0x1E048
+#define CRYPTO_ENCR_PIPE2_KEY3_REG		0x1E04C
+#define CRYPTO_ENCR_PIPE2_KEY4_REG		0x1E050
+#define CRYPTO_ENCR_PIPE2_KEY5_REG		0x1E054
+#define CRYPTO_ENCR_PIPE2_KEY6_REG		0x1E058
+#define CRYPTO_ENCR_PIPE2_KEY7_REG		0x1E05C
+
+#define CRYPTO_ENCR_PIPE3_KEY0_REG		0x1E060
+#define CRYPTO_ENCR_PIPE3_KEY1_REG		0x1E064
+#define CRYPTO_ENCR_PIPE3_KEY2_REG		0x1E068
+#define CRYPTO_ENCR_PIPE3_KEY3_REG		0x1E06C
+#define CRYPTO_ENCR_PIPE3_KEY4_REG		0x1E070
+#define CRYPTO_ENCR_PIPE3_KEY5_REG		0x1E074
+#define CRYPTO_ENCR_PIPE3_KEY6_REG		0x1E078
+#define CRYPTO_ENCR_PIPE3_KEY7_REG		0x1E07C
+
+
+#define CRYPTO_ENCR_PIPE0_XTS_KEY0_REG		0x1E200
+#define CRYPTO_ENCR_PIPE0_XTS_KEY1_REG		0x1E204
+#define CRYPTO_ENCR_PIPE0_XTS_KEY2_REG		0x1E208
+#define CRYPTO_ENCR_PIPE0_XTS_KEY3_REG		0x1E20C
+#define CRYPTO_ENCR_PIPE0_XTS_KEY4_REG		0x1E210
+#define CRYPTO_ENCR_PIPE0_XTS_KEY5_REG		0x1E214
+#define CRYPTO_ENCR_PIPE0_XTS_KEY6_REG		0x1E218
+#define CRYPTO_ENCR_PIPE0_XTS_KEY7_REG		0x1E21C
+
+#define CRYPTO_ENCR_PIPE1_XTS_KEY0_REG		0x1E220
+#define CRYPTO_ENCR_PIPE1_XTS_KEY1_REG		0x1E224
+#define CRYPTO_ENCR_PIPE1_XTS_KEY2_REG		0x1E228
+#define CRYPTO_ENCR_PIPE1_XTS_KEY3_REG		0x1E22C
+#define CRYPTO_ENCR_PIPE1_XTS_KEY4_REG		0x1E230
+#define CRYPTO_ENCR_PIPE1_XTS_KEY5_REG		0x1E234
+#define CRYPTO_ENCR_PIPE1_XTS_KEY6_REG		0x1E238
+#define CRYPTO_ENCR_PIPE1_XTS_KEY7_REG		0x1E23C
+
+#define CRYPTO_ENCR_PIPE2_XTS_KEY0_REG		0x1E240
+#define CRYPTO_ENCR_PIPE2_XTS_KEY1_REG		0x1E244
+#define CRYPTO_ENCR_PIPE2_XTS_KEY2_REG		0x1E248
+#define CRYPTO_ENCR_PIPE2_XTS_KEY3_REG		0x1E24C
+#define CRYPTO_ENCR_PIPE2_XTS_KEY4_REG		0x1E250
+#define CRYPTO_ENCR_PIPE2_XTS_KEY5_REG		0x1E254
+#define CRYPTO_ENCR_PIPE2_XTS_KEY6_REG		0x1E258
+#define CRYPTO_ENCR_PIPE2_XTS_KEY7_REG		0x1E25C
+
+#define CRYPTO_ENCR_PIPE3_XTS_KEY0_REG		0x1E260
+#define CRYPTO_ENCR_PIPE3_XTS_KEY1_REG		0x1E264
+#define CRYPTO_ENCR_PIPE3_XTS_KEY2_REG		0x1E268
+#define CRYPTO_ENCR_PIPE3_XTS_KEY3_REG		0x1E26C
+#define CRYPTO_ENCR_PIPE3_XTS_KEY4_REG		0x1E270
+#define CRYPTO_ENCR_PIPE3_XTS_KEY5_REG		0x1E274
+#define CRYPTO_ENCR_PIPE3_XTS_KEY6_REG		0x1E278
+#define CRYPTO_ENCR_PIPE3_XTS_KEY7_REG		0x1E27C
+
+
+#define CRYPTO_CNTR0_IV0_REG			0x1A20C
+#define CRYPTO_CNTR1_IV1_REG			0x1A210
+#define CRYPTO_CNTR2_IV2_REG			0x1A214
+#define CRYPTO_CNTR3_IV3_REG			0x1A218
+
+#define CRYPTO_CNTR_MASK_REG0			0x1A23C
+#define CRYPTO_CNTR_MASK_REG1			0x1A238
+#define CRYPTO_CNTR_MASK_REG2			0x1A234
+#define CRYPTO_CNTR_MASK_REG			0x1A21C
+
+#define CRYPTO_ENCR_CCM_INT_CNTR0_REG		0x1A220
+#define CRYPTO_ENCR_CCM_INT_CNTR1_REG		0x1A224
+#define CRYPTO_ENCR_CCM_INT_CNTR2_REG		0x1A228
+#define CRYPTO_ENCR_CCM_INT_CNTR3_REG		0x1A22C
+
+#define CRYPTO_ENCR_XTS_DU_SIZE_REG		0x1A230
+
+#define CRYPTO_AUTH_SEG_CFG_REG			0x1A300
+#define CRYPTO_AUTH_SEG_SIZE_REG		0x1A304
+#define CRYPTO_AUTH_SEG_START_REG		0x1A308
+
+#define CRYPTO_AUTH_KEY0_REG			0x1D040
+#define CRYPTO_AUTH_KEY1_REG			0x1D044
+#define CRYPTO_AUTH_KEY2_REG			0x1D048
+#define CRYPTO_AUTH_KEY3_REG			0x1D04C
+#define CRYPTO_AUTH_KEY4_REG			0x1D050
+#define CRYPTO_AUTH_KEY5_REG			0x1D054
+#define CRYPTO_AUTH_KEY6_REG			0x1D058
+#define CRYPTO_AUTH_KEY7_REG			0x1D05C
+#define CRYPTO_AUTH_KEY8_REG			0x1D060
+#define CRYPTO_AUTH_KEY9_REG			0x1D064
+#define CRYPTO_AUTH_KEY10_REG			0x1D068
+#define CRYPTO_AUTH_KEY11_REG			0x1D06C
+#define CRYPTO_AUTH_KEY12_REG			0x1D070
+#define CRYPTO_AUTH_KEY13_REG			0x1D074
+#define CRYPTO_AUTH_KEY14_REG			0x1D078
+#define CRYPTO_AUTH_KEY15_REG			0x1D07C
+
+#define CRYPTO_AUTH_PIPE0_KEY0_REG		0x1E800
+#define CRYPTO_AUTH_PIPE0_KEY1_REG		0x1E804
+#define CRYPTO_AUTH_PIPE0_KEY2_REG		0x1E808
+#define CRYPTO_AUTH_PIPE0_KEY3_REG		0x1E80C
+#define CRYPTO_AUTH_PIPE0_KEY4_REG		0x1E810
+#define CRYPTO_AUTH_PIPE0_KEY5_REG		0x1E814
+#define CRYPTO_AUTH_PIPE0_KEY6_REG		0x1E818
+#define CRYPTO_AUTH_PIPE0_KEY7_REG		0x1E81C
+#define CRYPTO_AUTH_PIPE0_KEY8_REG		0x1E820
+#define CRYPTO_AUTH_PIPE0_KEY9_REG		0x1E824
+#define CRYPTO_AUTH_PIPE0_KEY10_REG		0x1E828
+#define CRYPTO_AUTH_PIPE0_KEY11_REG		0x1E82C
+#define CRYPTO_AUTH_PIPE0_KEY12_REG		0x1E830
+#define CRYPTO_AUTH_PIPE0_KEY13_REG		0x1E834
+#define CRYPTO_AUTH_PIPE0_KEY14_REG		0x1E838
+#define CRYPTO_AUTH_PIPE0_KEY15_REG		0x1E83C
+
+#define CRYPTO_AUTH_PIPE1_KEY0_REG		0x1E880
+#define CRYPTO_AUTH_PIPE1_KEY1_REG		0x1E884
+#define CRYPTO_AUTH_PIPE1_KEY2_REG		0x1E888
+#define CRYPTO_AUTH_PIPE1_KEY3_REG		0x1E88C
+#define CRYPTO_AUTH_PIPE1_KEY4_REG		0x1E890
+#define CRYPTO_AUTH_PIPE1_KEY5_REG		0x1E894
+#define CRYPTO_AUTH_PIPE1_KEY6_REG		0x1E898
+#define CRYPTO_AUTH_PIPE1_KEY7_REG		0x1E89C
+#define CRYPTO_AUTH_PIPE1_KEY8_REG		0x1E8A0
+#define CRYPTO_AUTH_PIPE1_KEY9_REG		0x1E8A4
+#define CRYPTO_AUTH_PIPE1_KEY10_REG		0x1E8A8
+#define CRYPTO_AUTH_PIPE1_KEY11_REG		0x1E8AC
+#define CRYPTO_AUTH_PIPE1_KEY12_REG		0x1E8B0
+#define CRYPTO_AUTH_PIPE1_KEY13_REG		0x1E8B4
+#define CRYPTO_AUTH_PIPE1_KEY14_REG		0x1E8B8
+#define CRYPTO_AUTH_PIPE1_KEY15_REG		0x1E8BC
+
+#define CRYPTO_AUTH_PIPE2_KEY0_REG		0x1E900
+#define CRYPTO_AUTH_PIPE2_KEY1_REG		0x1E904
+#define CRYPTO_AUTH_PIPE2_KEY2_REG		0x1E908
+#define CRYPTO_AUTH_PIPE2_KEY3_REG		0x1E90C
+#define CRYPTO_AUTH_PIPE2_KEY4_REG		0x1E910
+#define CRYPTO_AUTH_PIPE2_KEY5_REG		0x1E914
+#define CRYPTO_AUTH_PIPE2_KEY6_REG		0x1E918
+#define CRYPTO_AUTH_PIPE2_KEY7_REG		0x1E91C
+#define CRYPTO_AUTH_PIPE2_KEY8_REG		0x1E920
+#define CRYPTO_AUTH_PIPE2_KEY9_REG		0x1E924
+#define CRYPTO_AUTH_PIPE2_KEY10_REG		0x1E928
+#define CRYPTO_AUTH_PIPE2_KEY11_REG		0x1E92C
+#define CRYPTO_AUTH_PIPE2_KEY12_REG		0x1E930
+#define CRYPTO_AUTH_PIPE2_KEY13_REG		0x1E934
+#define CRYPTO_AUTH_PIPE2_KEY14_REG		0x1E938
+#define CRYPTO_AUTH_PIPE2_KEY15_REG		0x1E93C
+
+#define CRYPTO_AUTH_PIPE3_KEY0_REG		0x1E980
+#define CRYPTO_AUTH_PIPE3_KEY1_REG		0x1E984
+#define CRYPTO_AUTH_PIPE3_KEY2_REG		0x1E988
+#define CRYPTO_AUTH_PIPE3_KEY3_REG		0x1E98C
+#define CRYPTO_AUTH_PIPE3_KEY4_REG		0x1E990
+#define CRYPTO_AUTH_PIPE3_KEY5_REG		0x1E994
+#define CRYPTO_AUTH_PIPE3_KEY6_REG		0x1E998
+#define CRYPTO_AUTH_PIPE3_KEY7_REG		0x1E99C
+#define CRYPTO_AUTH_PIPE3_KEY8_REG		0x1E9A0
+#define CRYPTO_AUTH_PIPE3_KEY9_REG		0x1E9A4
+#define CRYPTO_AUTH_PIPE3_KEY10_REG		0x1E9A8
+#define CRYPTO_AUTH_PIPE3_KEY11_REG		0x1E9AC
+#define CRYPTO_AUTH_PIPE3_KEY12_REG		0x1E9B0
+#define CRYPTO_AUTH_PIPE3_KEY13_REG		0x1E9B4
+#define CRYPTO_AUTH_PIPE3_KEY14_REG		0x1E9B8
+#define CRYPTO_AUTH_PIPE3_KEY15_REG		0x1E9BC
+
+
+#define CRYPTO_AUTH_IV0_REG			0x1A310
+#define CRYPTO_AUTH_IV1_REG			0x1A314
+#define CRYPTO_AUTH_IV2_REG			0x1A318
+#define CRYPTO_AUTH_IV3_REG			0x1A31C
+#define CRYPTO_AUTH_IV4_REG			0x1A320
+#define CRYPTO_AUTH_IV5_REG			0x1A324
+#define CRYPTO_AUTH_IV6_REG			0x1A328
+#define CRYPTO_AUTH_IV7_REG			0x1A32C
+#define CRYPTO_AUTH_IV8_REG			0x1A330
+#define CRYPTO_AUTH_IV9_REG			0x1A334
+#define CRYPTO_AUTH_IV10_REG			0x1A338
+#define CRYPTO_AUTH_IV11_REG			0x1A33C
+#define CRYPTO_AUTH_IV12_REG			0x1A340
+#define CRYPTO_AUTH_IV13_REG			0x1A344
+#define CRYPTO_AUTH_IV14_REG			0x1A348
+#define CRYPTO_AUTH_IV15_REG			0x1A34C
+
+#define CRYPTO_AUTH_INFO_NONCE0_REG		0x1A350
+#define CRYPTO_AUTH_INFO_NONCE1_REG		0x1A354
+#define CRYPTO_AUTH_INFO_NONCE2_REG		0x1A358
+#define CRYPTO_AUTH_INFO_NONCE3_REG		0x1A35C
+
+#define CRYPTO_AUTH_BYTECNT0_REG		0x1A390
+#define CRYPTO_AUTH_BYTECNT1_REG		0x1A394
+#define CRYPTO_AUTH_BYTECNT2_REG		0x1A398
+#define CRYPTO_AUTH_BYTECNT3_REG		0x1A39C
+
+#define CRYPTO_AUTH_EXP_MAC0_REG		0x1A3A0
+#define CRYPTO_AUTH_EXP_MAC1_REG		0x1A3A4
+#define CRYPTO_AUTH_EXP_MAC2_REG		0x1A3A8
+#define CRYPTO_AUTH_EXP_MAC3_REG		0x1A3AC
+#define CRYPTO_AUTH_EXP_MAC4_REG		0x1A3B0
+#define CRYPTO_AUTH_EXP_MAC5_REG		0x1A3B4
+#define CRYPTO_AUTH_EXP_MAC6_REG		0x1A3B8
+#define CRYPTO_AUTH_EXP_MAC7_REG		0x1A3BC
+
+#define CRYPTO_CONFIG_REG			0x1A400
+#define CRYPTO_DEBUG_ENABLE_REG			0x1AF00
+#define CRYPTO_DEBUG_REG			0x1AF04
+
+
+
+/* Register bits */
+#define CRYPTO_CORE_STEP_REV_MASK		0xFFFF
+#define CRYPTO_CORE_STEP_REV			0 /* bit 15-0 */
+#define CRYPTO_CORE_MAJOR_REV_MASK		0xFF000000
+#define CRYPTO_CORE_MAJOR_REV			24 /* bit 31-24 */
+#define CRYPTO_CORE_MINOR_REV_MASK		0xFF0000
+#define CRYPTO_CORE_MINOR_REV			16 /* bit 23-16 */
+
+/* status reg  */
+#define CRYPTO_MAC_FAILED			31
+#define CRYPTO_DOUT_SIZE_AVAIL			26 /* bit 30-26 */
+#define CRYPTO_DOUT_SIZE_AVAIL_MASK		(0x1F << CRYPTO_DOUT_SIZE_AVAIL)
+#define CRYPTO_DIN_SIZE_AVAIL			21 /* bit 21-25 */
+#define CRYPTO_DIN_SIZE_AVAIL_MASK		(0x1F << CRYPTO_DIN_SIZE_AVAIL)
+#define CRYPTO_HSD_ERR				20
+#define CRYPTO_ACCESS_VIOL			19
+#define CRYPTO_PIPE_ACTIVE_ERR			18
+#define CRYPTO_CFG_CHNG_ERR			17
+#define CRYPTO_DOUT_ERR				16
+#define CRYPTO_DIN_ERR				15
+#define CRYPTO_AXI_ERR				14
+#define CRYPTO_CRYPTO_STATE			10 /* bit 13-10 */
+#define CRYPTO_CRYPTO_STATE_MASK		(0xF << CRYPTO_CRYPTO_STATE)
+#define CRYPTO_ENCR_BUSY			9
+#define CRYPTO_AUTH_BUSY			8
+#define CRYPTO_DOUT_INTR			7
+#define CRYPTO_DIN_INTR				6
+#define CRYPTO_OP_DONE_INTR			5
+#define CRYPTO_ERR_INTR				4
+#define CRYPTO_DOUT_RDY				3
+#define CRYPTO_DIN_RDY				2
+#define CRYPTO_OPERATION_DONE			1
+#define CRYPTO_SW_ERR				0
+
+/* status2 reg  */
+#define CRYPTO_AXI_EXTRA			1
+#define CRYPTO_LOCKED				2
+
+/* config reg */
+#define CRYPTO_REQ_SIZE				17 /* bit 20-17 */
+#define CRYPTO_REQ_SIZE_MASK			(0xF << CRYPTO_REQ_SIZE)
+#define CRYPTO_REQ_SIZE_ENUM_1_BEAT	0
+#define CRYPTO_REQ_SIZE_ENUM_2_BEAT	1
+#define CRYPTO_REQ_SIZE_ENUM_3_BEAT	2
+#define CRYPTO_REQ_SIZE_ENUM_4_BEAT	3
+#define CRYPTO_REQ_SIZE_ENUM_5_BEAT	4
+#define CRYPTO_REQ_SIZE_ENUM_6_BEAT	5
+#define CRYPTO_REQ_SIZE_ENUM_7_BEAT	6
+#define CRYPTO_REQ_SIZE_ENUM_8_BEAT	7
+#define CRYPTO_REQ_SIZE_ENUM_9_BEAT	8
+#define CRYPTO_REQ_SIZE_ENUM_10_BEAT	9
+#define CRYPTO_REQ_SIZE_ENUM_11_BEAT	10
+#define CRYPTO_REQ_SIZE_ENUM_12_BEAT	11
+#define CRYPTO_REQ_SIZE_ENUM_13_BEAT	12
+#define CRYPTO_REQ_SIZE_ENUM_14_BEAT	13
+#define CRYPTO_REQ_SIZE_ENUM_15_BEAT	14
+#define CRYPTO_REQ_SIZE_ENUM_16_BEAT	15
+
+#define CRYPTO_MAX_QUEUED_REQ			14 /* bit 16-14 */
+#define CRYPTO_MAX_QUEUED_REQ_MASK		(0x7 << CRYPTO_MAX_QUEUED_REQ)
+#define CRYPTO_ENUM_1_QUEUED_REQS	0
+#define CRYPTO_ENUM_2_QUEUED_REQS	1
+#define CRYPTO_ENUM_3_QUEUED_REQS	2
+
+#define CRYPTO_IRQ_ENABLES			10	/* bit 13-10 */
+#define CRYPTO_IRQ_ENABLES_MASK			(0xF << CRYPTO_IRQ_ENABLES)
+
+#define CRYPTO_LITTLE_ENDIAN_MODE		9
+#define CRYPTO_LITTLE_ENDIAN_MASK		(1 << CRYPTO_LITTLE_ENDIAN_MODE)
+#define CRYPTO_PIPE_SET_SELECT			5 /* bit 8-5 */
+#define CRYPTO_PIPE_SET_SELECT_MASK		(0xF << CRYPTO_PIPE_SET_SELECT)
+
+#define CRYPTO_HIGH_SPD_EN_N			4
+
+#define CRYPTO_MASK_DOUT_INTR			3
+#define CRYPTO_MASK_DIN_INTR			2
+#define CRYPTO_MASK_OP_DONE_INTR		1
+#define CRYPTO_MASK_ERR_INTR			0
+
+/* auth_seg_cfg reg */
+#define CRYPTO_COMP_EXP_MAC			24
+#define CRYPTO_COMP_EXP_MAC_DISABLED		0
+#define CRYPTO_COMP_EXP_MAC_ENABLED		1
+
+#define CRYPTO_F9_DIRECTION			23
+#define CRYPTO_F9_DIRECTION_UPLINK		0
+#define CRYPTO_F9_DIRECTION_DOWNLINK		1
+
+#define CRYPTO_AUTH_NONCE_NUM_WORDS		20 /* bit 22-20 */
+#define CRYPTO_AUTH_NONCE_NUM_WORDS_MASK \
+				(0x7 << CRYPTO_AUTH_NONCE_NUM_WORDS)
+
+#define CRYPTO_USE_PIPE_KEY_AUTH		19
+#define CRYPTO_USE_HW_KEY_AUTH			18
+#define CRYPTO_FIRST				17
+#define CRYPTO_LAST				16
+
+#define CRYPTO_AUTH_POS				14 /* bit 15 .. 14*/
+#define CRYPTO_AUTH_POS_MASK			(0x3 << CRYPTO_AUTH_POS)
+#define CRYPTO_AUTH_POS_BEFORE			0
+#define CRYPTO_AUTH_POS_AFTER			1
+
+#define CRYPTO_AUTH_SIZE			9 /* bits 13 .. 9*/
+#define CRYPTO_AUTH_SIZE_MASK			(0x1F << CRYPTO_AUTH_SIZE)
+#define CRYPTO_AUTH_SIZE_SHA1		0
+#define CRYPTO_AUTH_SIZE_SHA256		1
+#define CRYPTO_AUTH_SIZE_ENUM_1_BYTES	0
+#define CRYPTO_AUTH_SIZE_ENUM_2_BYTES	1
+#define CRYPTO_AUTH_SIZE_ENUM_3_BYTES	2
+#define CRYPTO_AUTH_SIZE_ENUM_4_BYTES	3
+#define CRYPTO_AUTH_SIZE_ENUM_5_BYTES	4
+#define CRYPTO_AUTH_SIZE_ENUM_6_BYTES	5
+#define CRYPTO_AUTH_SIZE_ENUM_7_BYTES	6
+#define CRYPTO_AUTH_SIZE_ENUM_8_BYTES	7
+#define CRYPTO_AUTH_SIZE_ENUM_9_BYTES	8
+#define CRYPTO_AUTH_SIZE_ENUM_10_BYTES	9
+#define CRYPTO_AUTH_SIZE_ENUM_11_BYTES	10
+#define CRYPTO_AUTH_SIZE_ENUM_12_BYTES	11
+#define CRYPTO_AUTH_SIZE_ENUM_13_BYTES	12
+#define CRYPTO_AUTH_SIZE_ENUM_14_BYTES	13
+#define CRYPTO_AUTH_SIZE_ENUM_15_BYTES	14
+#define CRYPTO_AUTH_SIZE_ENUM_16_BYTES	15
+
+
+#define CRYPTO_AUTH_MODE			6 /* bit 8 .. 6*/
+#define CRYPTO_AUTH_MODE_MASK			(0x7 << CRYPTO_AUTH_MODE)
+#define CRYPTO_AUTH_MODE_HASH	0
+#define CRYPTO_AUTH_MODE_HMAC	1
+#define CRYPTO_AUTH_MODE_CCM	0
+#define CRYPTO_AUTH_MODE_CMAC	1
+
+#define CRYPTO_AUTH_KEY_SIZE			3  /* bit 5 .. 3*/
+#define CRYPTO_AUTH_KEY_SIZE_MASK		(0x7 << CRYPTO_AUTH_KEY_SIZE)
+#define CRYPTO_AUTH_KEY_SZ_AES128	0
+#define CRYPTO_AUTH_KEY_SZ_AES256	2
+
+#define CRYPTO_AUTH_ALG				0 /* bit 2 .. 0*/
+#define CRYPTO_AUTH_ALG_MASK			7
+#define CRYPTO_AUTH_ALG_NONE	0
+#define CRYPTO_AUTH_ALG_SHA	1
+#define CRYPTO_AUTH_ALG_AES	2
+#define CRYPTO_AUTH_ALG_KASUMI	3
+#define CRYPTO_AUTH_ALG_SNOW3G	4
+#define CRYPTO_AUTH_ALG_ZUC	5
+
+/* encr_xts_du_size reg */
+#define CRYPTO_ENCR_XTS_DU_SIZE			0 /* bit 19-0  */
+#define CRYPTO_ENCR_XTS_DU_SIZE_MASK		0xfffff
+
+/* encr_seg_cfg reg */
+#define CRYPTO_F8_KEYSTREAM_ENABLE		17/* bit */
+#define CRYPTO_F8_KEYSTREAM_DISABLED	0
+#define CRYPTO_F8_KEYSTREAM_ENABLED	1
+
+#define CRYPTO_F8_DIRECTION			16 /* bit */
+#define CRYPTO_F8_DIRECTION_UPLINK	0
+#define CRYPTO_F8_DIRECTION_DOWNLINK	1
+
+
+#define CRYPTO_USE_PIPE_KEY_ENCR		15 /* bit */
+#define CRYPTO_USE_PIPE_KEY_ENCR_ENABLED	1
+#define CRYPTO_USE_KEY_REGISTERS		0
+
+
+#define CRYPTO_USE_HW_KEY_ENCR			14
+#define CRYPTO_USE_KEY_REG	0
+#define CRYPTO_USE_HW_KEY	1
+
+#define CRYPTO_LAST_CCM				13
+#define CRYPTO_LAST_CCM_XFR	1
+#define CRYPTO_INTERM_CCM_XFR	0
+
+
+#define CRYPTO_CNTR_ALG				11 /* bit 12-11 */
+#define CRYPTO_CNTR_ALG_MASK			(3 << CRYPTO_CNTR_ALG)
+#define CRYPTO_CNTR_ALG_NIST	0
+
+#define CRYPTO_ENCODE				10
+
+#define CRYPTO_ENCR_MODE			6 /* bit 9-6 */
+#define CRYPTO_ENCR_MODE_MASK			(0xF << CRYPTO_ENCR_MODE)
+/* only valid when AES */
+#define CRYPTO_ENCR_MODE_ECB	0
+#define CRYPTO_ENCR_MODE_CBC	1
+#define CRYPTO_ENCR_MODE_CTR	2
+#define CRYPTO_ENCR_MODE_XTS	3
+#define CRYPTO_ENCR_MODE_CCM	4
+
+#define CRYPTO_ENCR_KEY_SZ			3 /* bit 5-3 */
+#define CRYPTO_ENCR_KEY_SZ_MASK			(7 << CRYPTO_ENCR_KEY_SZ)
+#define CRYPTO_ENCR_KEY_SZ_DES		0
+#define CRYPTO_ENCR_KEY_SZ_3DES		1
+#define CRYPTO_ENCR_KEY_SZ_AES128	0
+#define CRYPTO_ENCR_KEY_SZ_AES256	2
+
+#define CRYPTO_ENCR_ALG				0 /* bit 2-0 */
+#define CRYPTO_ENCR_ALG_MASK			(7 << CRYPTO_ENCR_ALG)
+#define CRYPTO_ENCR_ALG_NONE		0
+#define CRYPTO_ENCR_ALG_DES		1
+#define CRYPTO_ENCR_ALG_AES		2
+#define CRYPTO_ENCR_ALG_KASUMI		4
+#define CRYPTO_ENCR_ALG_SNOW_3G		5
+#define CRYPTO_ENCR_ALG_ZUC		6
+
+/* goproc reg */
+#define CRYPTO_GO				0
+#define CRYPTO_CLR_CNTXT			1
+#define CRYPTO_RESULTS_DUMP			2
+
+/*  F8 definition of CRYPTO_ENCR_CNTR1_IV1 REG  */
+#define CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT		16	/* bit 31 - 16 */
+#define CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT_MASK \
+		(0xffff << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT)
+
+#define CRYPTO_CNTR1_IV1_REG_F8_BEARER		0	/* bit 4 - 0 */
+#define CRYPTO_CNTR1_IV1_REG_F8_BEARER_MASK \
+		(0x1f << CRYPTO_CNTR1_IV1_REG_F8_BEARER)
+
+/* F9 definition of CRYPTO_AUTH_IV4 REG */
+#define CRYPTO_AUTH_IV4_REG_F9_VALID_BIS	0	/* bit 2 - 0 */
+#define CRYPTO_AUTH_IV4_REG_F9_VALID_BIS_MASK \
+		(0x7  << CRYPTO_AUTH_IV4_REG_F9_VALID_BIS)
+
+/* engines_avail */
+#define CRYPTO_ENCR_AES_SEL			0
+#define CRYPTO_DES_SEL				1
+#define CRYPTO_ENCR_SNOW3G_SEL			2
+#define CRYPTO_ENCR_KASUMI_SEL			3
+#define CRYPTO_SHA_SEL				4
+#define CRYPTO_SHA512_SEL			5
+#define CRYPTO_AUTH_AES_SEL			6
+#define CRYPTO_AUTH_SNOW3G_SEL			7
+#define CRYPTO_AUTH_KASUMI_SEL			8
+#define CRYPTO_BAM_PIPE_SETS			9	/* bit 12 - 9 */
+#define CRYPTO_AXI_WR_BEATS			13	/* bit 18 - 13 */
+#define CRYPTO_AXI_RD_BEATS			19	/* bit 24 - 19 */
+#define CRYPTO_ENCR_ZUC_SEL			26
+#define CRYPTO_AUTH_ZUC_SEL			27
+#define CRYPTO_ZUC_ENABLE			28
+#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/devfreq/arm-memlat-mon.c	2019-10-29 09:26:23.513201906 +0100
@@ -0,0 +1,368 @@
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "arm-memlat-mon: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpu.h>
+#include "governor.h"
+#include "governor_memlat.h"
+#include <linux/perf_event.h>
+
+enum ev_index {
+	INST_IDX,
+	L2DM_IDX,
+	CYC_IDX,
+	NUM_EVENTS
+};
+#define INST_EV		0x08
+#define L2DM_EV		0x17
+#define CYC_EV		0x11
+
+struct event_data {
+	struct perf_event *pevent;
+	unsigned long prev_count;
+};
+
+struct memlat_hwmon_data {
+	struct event_data events[NUM_EVENTS];
+	ktime_t prev_ts;
+	bool init_pending;
+};
+static DEFINE_PER_CPU(struct memlat_hwmon_data, pm_data);
+
+struct cpu_grp_info {
+	cpumask_t cpus;
+	struct memlat_hwmon hw;
+	struct notifier_block arm_memlat_cpu_notif;
+};
+
+static unsigned long compute_freq(struct memlat_hwmon_data *hw_data,
+						unsigned long cyc_cnt)
+{
+	ktime_t ts;
+	unsigned int diff;
+	unsigned long freq = 0;
+
+	ts = ktime_get();
+	diff = ktime_to_us(ktime_sub(ts, hw_data->prev_ts));
+	if (!diff)
+		diff = 1;
+	hw_data->prev_ts = ts;
+	freq = cyc_cnt;
+	do_div(freq, diff);
+
+	return freq;
+}
+
+#define MAX_COUNT_LIM 0xFFFFFFFFFFFFFFFF
+static inline unsigned long read_event(struct event_data *event)
+{
+	unsigned long ev_count;
+	u64 total, enabled, running;
+
+	total = perf_event_read_value(event->pevent, &enabled, &running);
+	if (total >= event->prev_count)
+		ev_count = total - event->prev_count;
+	else
+		ev_count = (MAX_COUNT_LIM - event->prev_count) + total;
+
+	event->prev_count = total;
+
+	return ev_count;
+}
+
+static void read_perf_counters(int cpu, struct cpu_grp_info *cpu_grp)
+{
+	int cpu_idx;
+	struct memlat_hwmon_data *hw_data = &per_cpu(pm_data, cpu);
+	struct memlat_hwmon *hw = &cpu_grp->hw;
+	unsigned long cyc_cnt;
+
+	if (hw_data->init_pending)
+		return;
+
+	cpu_idx = cpu - cpumask_first(&cpu_grp->cpus);
+
+	hw->core_stats[cpu_idx].inst_count =
+			read_event(&hw_data->events[INST_IDX]);
+
+	hw->core_stats[cpu_idx].mem_count =
+			read_event(&hw_data->events[L2DM_IDX]);
+
+	cyc_cnt = read_event(&hw_data->events[CYC_IDX]);
+	hw->core_stats[cpu_idx].freq = compute_freq(hw_data, cyc_cnt);
+}
+
+static unsigned long get_cnt(struct memlat_hwmon *hw)
+{
+	int cpu;
+	struct cpu_grp_info *cpu_grp = container_of(hw,
+					struct cpu_grp_info, hw);
+
+	for_each_cpu(cpu, &cpu_grp->cpus)
+		read_perf_counters(cpu, cpu_grp);
+
+	return 0;
+}
+
+static void delete_events(struct memlat_hwmon_data *hw_data)
+{
+	int i;
+
+	for (i = 0; i < NUM_EVENTS; i++) {
+		hw_data->events[i].prev_count = 0;
+		perf_event_release_kernel(hw_data->events[i].pevent);
+	}
+}
+
+static void stop_hwmon(struct memlat_hwmon *hw)
+{
+	int cpu, idx;
+	struct memlat_hwmon_data *hw_data;
+	struct cpu_grp_info *cpu_grp = container_of(hw,
+					struct cpu_grp_info, hw);
+
+	get_online_cpus();
+	for_each_cpu(cpu, &cpu_grp->cpus) {
+		hw_data = &per_cpu(pm_data, cpu);
+		if (hw_data->init_pending)
+			hw_data->init_pending = false;
+		else
+			delete_events(hw_data);
+
+		/* Clear governor data */
+		idx = cpu - cpumask_first(&cpu_grp->cpus);
+		hw->core_stats[idx].inst_count = 0;
+		hw->core_stats[idx].mem_count = 0;
+		hw->core_stats[idx].freq = 0;
+	}
+	put_online_cpus();
+
+	unregister_cpu_notifier(&cpu_grp->arm_memlat_cpu_notif);
+}
+
+static struct perf_event_attr *alloc_attr(void)
+{
+	struct perf_event_attr *attr;
+
+	attr = kzalloc(sizeof(struct perf_event_attr), GFP_KERNEL);
+	if (!attr)
+		return ERR_PTR(-ENOMEM);
+
+	attr->type = PERF_TYPE_RAW;
+	attr->size = sizeof(struct perf_event_attr);
+	attr->pinned = 1;
+	attr->exclude_idle = 1;
+
+	return attr;
+}
+
+static int set_events(struct memlat_hwmon_data *hw_data, int cpu)
+{
+	struct perf_event *pevent;
+	struct perf_event_attr *attr;
+	int err;
+
+	/* Allocate an attribute for event initialization */
+	attr = alloc_attr();
+	if (IS_ERR(attr))
+		return PTR_ERR(attr);
+
+	attr->config = INST_EV;
+	pevent = perf_event_create_kernel_counter(attr, cpu, NULL, NULL, NULL);
+	if (IS_ERR(pevent))
+		goto err_out;
+	hw_data->events[INST_IDX].pevent = pevent;
+	perf_event_enable(hw_data->events[INST_IDX].pevent);
+
+	attr->config = L2DM_EV;
+	pevent = perf_event_create_kernel_counter(attr, cpu, NULL, NULL, NULL);
+	if (IS_ERR(pevent))
+		goto err_out;
+	hw_data->events[L2DM_IDX].pevent = pevent;
+	perf_event_enable(hw_data->events[L2DM_IDX].pevent);
+
+	attr->config = CYC_EV;
+	pevent = perf_event_create_kernel_counter(attr, cpu, NULL, NULL, NULL);
+	if (IS_ERR(pevent))
+		goto err_out;
+	hw_data->events[CYC_IDX].pevent = pevent;
+	perf_event_enable(hw_data->events[CYC_IDX].pevent);
+
+	kfree(attr);
+	return 0;
+
+err_out:
+	err = PTR_ERR(pevent);
+	kfree(attr);
+	return err;
+}
+
+static int arm_memlat_cpu_callback(struct notifier_block *nb,
+		unsigned long action, void *hcpu)
+{
+	unsigned long cpu = (unsigned long)hcpu;
+	struct memlat_hwmon_data *hw_data = &per_cpu(pm_data, cpu);
+
+	if ((action != CPU_ONLINE) || !hw_data->init_pending)
+		return NOTIFY_OK;
+
+	if (set_events(hw_data, cpu))
+		pr_warn("Failed to create perf event for CPU%lu\n", cpu);
+
+	hw_data->init_pending = false;
+
+	return NOTIFY_OK;
+}
+
+static int start_hwmon(struct memlat_hwmon *hw)
+{
+	int cpu, ret = 0;
+	struct memlat_hwmon_data *hw_data;
+	struct cpu_grp_info *cpu_grp = container_of(hw,
+					struct cpu_grp_info, hw);
+
+	register_cpu_notifier(&cpu_grp->arm_memlat_cpu_notif);
+
+	get_online_cpus();
+	for_each_cpu(cpu, &cpu_grp->cpus) {
+		hw_data = &per_cpu(pm_data, cpu);
+		ret = set_events(hw_data, cpu);
+		if (ret) {
+			if (!cpu_online(cpu)) {
+				hw_data->init_pending = true;
+				ret = 0;
+			} else {
+				pr_warn("Perf event init failed on CPU%d\n",
+					cpu);
+				break;
+			}
+		}
+	}
+
+	put_online_cpus();
+	return ret;
+}
+
+static int get_mask_from_dev_handle(struct platform_device *pdev,
+					cpumask_t *mask)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *dev_phandle;
+	struct device *cpu_dev;
+	int cpu, i = 0;
+	int ret = -ENOENT;
+
+	dev_phandle = of_parse_phandle(dev->of_node, "qcom,cpulist", i++);
+	while (dev_phandle) {
+		for_each_possible_cpu(cpu) {
+			cpu_dev = get_cpu_device(cpu);
+			if (cpu_dev && cpu_dev->of_node == dev_phandle) {
+				cpumask_set_cpu(cpu, mask);
+				ret = 0;
+				break;
+			}
+		}
+		dev_phandle = of_parse_phandle(dev->of_node,
+						"qcom,cpulist", i++);
+	}
+
+	return ret;
+}
+
+static int arm_memlat_mon_driver_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct memlat_hwmon *hw;
+	struct cpu_grp_info *cpu_grp;
+	int cpu, ret;
+
+	cpu_grp = devm_kzalloc(dev, sizeof(*cpu_grp), GFP_KERNEL);
+	if (!cpu_grp)
+		return -ENOMEM;
+	cpu_grp->arm_memlat_cpu_notif.notifier_call = arm_memlat_cpu_callback;
+	hw = &cpu_grp->hw;
+
+	hw->dev = dev;
+	hw->of_node = of_parse_phandle(dev->of_node, "qcom,target-dev", 0);
+	if (!hw->of_node) {
+		dev_err(dev, "Couldn't find a target device\n");
+		return -ENODEV;
+	}
+
+	if (get_mask_from_dev_handle(pdev, &cpu_grp->cpus)) {
+		dev_err(dev, "CPU list is empty\n");
+		return -ENODEV;
+	}
+
+	hw->num_cores = cpumask_weight(&cpu_grp->cpus);
+	hw->core_stats = devm_kzalloc(dev, hw->num_cores *
+				sizeof(*(hw->core_stats)), GFP_KERNEL);
+	if (!hw->core_stats)
+		return -ENOMEM;
+
+	for_each_cpu(cpu, &cpu_grp->cpus)
+		hw->core_stats[cpu - cpumask_first(&cpu_grp->cpus)].id = cpu;
+
+	hw->start_hwmon = &start_hwmon;
+	hw->stop_hwmon = &stop_hwmon;
+	hw->get_cnt = &get_cnt;
+
+	ret = register_memlat(dev, hw);
+	if (ret) {
+		pr_err("Mem Latency Gov registration failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static struct of_device_id match_table[] = {
+	{ .compatible = "qcom,arm-memlat-mon" },
+	{}
+};
+
+static struct platform_driver arm_memlat_mon_driver = {
+	.probe = arm_memlat_mon_driver_probe,
+	.driver = {
+		.name = "arm-memlat-mon",
+		.of_match_table = match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init arm_memlat_mon_init(void)
+{
+	return platform_driver_register(&arm_memlat_mon_driver);
+}
+module_init(arm_memlat_mon_init);
+
+static void __exit arm_memlat_mon_exit(void)
+{
+	platform_driver_unregister(&arm_memlat_mon_driver);
+}
+module_exit(arm_memlat_mon_exit);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/devfreq/bimc-bwmon.c	2019-10-29 09:26:23.513201906 +0100
@@ -0,0 +1,688 @@
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "bimc-bwmon: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/spinlock.h>
+#include "governor_bw_hwmon.h"
+
+#define GLB_INT_STATUS(m)	((m)->global_base + 0x100)
+#define GLB_INT_CLR(m)		((m)->global_base + 0x108)
+#define	GLB_INT_EN(m)		((m)->global_base + 0x10C)
+#define MON_INT_STATUS(m)	((m)->base + 0x100)
+#define MON_INT_CLR(m)		((m)->base + 0x108)
+#define	MON_INT_EN(m)		((m)->base + 0x10C)
+#define	MON_EN(m)		((m)->base + 0x280)
+#define MON_CLEAR(m)		((m)->base + 0x284)
+#define MON_CNT(m)		((m)->base + 0x288)
+#define MON_THRES(m)		((m)->base + 0x290)
+#define MON_MASK(m)		((m)->base + 0x298)
+#define MON_MATCH(m)		((m)->base + 0x29C)
+
+#define MON2_EN(m)		((m)->base + 0x2A0)
+#define MON2_CLEAR(m)		((m)->base + 0x2A4)
+#define MON2_SW(m)		((m)->base + 0x2A8)
+#define MON2_THRES_HI(m)	((m)->base + 0x2AC)
+#define MON2_THRES_MED(m)	((m)->base + 0x2B0)
+#define MON2_THRES_LO(m)	((m)->base + 0x2B4)
+#define MON2_ZONE_ACTIONS(m)	((m)->base + 0x2B8)
+#define MON2_ZONE_CNT_THRES(m)	((m)->base + 0x2BC)
+#define MON2_BYTE_CNT(m)	((m)->base + 0x2D0)
+#define MON2_WIN_TIMER(m)	((m)->base + 0x2D4)
+#define MON2_ZONE_CNT(m)	((m)->base + 0x2D8)
+#define MON2_ZONE_MAX(m, zone)	((m)->base + 0x2E0 + 0x4 * zone)
+
+struct bwmon_spec {
+	bool wrap_on_thres;
+	bool overflow;
+	bool throt_adj;
+	bool hw_sampling;
+};
+
+struct bwmon {
+	void __iomem *base;
+	void __iomem *global_base;
+	unsigned int mport;
+	unsigned int irq;
+	const struct bwmon_spec *spec;
+	struct device *dev;
+	struct bw_hwmon hw;
+	u32 hw_timer_hz;
+	u32 throttle_adj;
+	u32 sample_size_ms;
+	u32 intr_status;
+};
+
+#define to_bwmon(ptr)		container_of(ptr, struct bwmon, hw)
+#define has_hw_sampling(m)		(m->spec->hw_sampling)
+
+#define ENABLE_MASK BIT(0)
+#define THROTTLE_MASK 0x1F
+#define THROTTLE_SHIFT 16
+#define INT_ENABLE_V1	0x1
+#define INT_STATUS_MASK	0x03
+#define INT_STATUS_MASK_HWS	0xF0
+
+static DEFINE_SPINLOCK(glb_lock);
+static void mon_enable(struct bwmon *m)
+{
+	if (has_hw_sampling(m))
+		writel_relaxed((ENABLE_MASK | m->throttle_adj), MON2_EN(m));
+	else
+		writel_relaxed((ENABLE_MASK | m->throttle_adj), MON_EN(m));
+}
+
+static void mon_disable(struct bwmon *m)
+{
+	if (has_hw_sampling(m))
+		writel_relaxed(m->throttle_adj, MON2_EN(m));
+	else
+		writel_relaxed(m->throttle_adj, MON_EN(m));
+	/*
+	 * mon_disable() and mon_irq_clear(),
+	 * If latter goes first and count happen to trigger irq, we would
+	 * have the irq line high but no one handling it.
+	 */
+	mb();
+}
+
+#define MON_CLEAR_BIT	0x1
+#define MON_CLEAR_ALL_BIT	0x2
+static void mon_clear(struct bwmon *m, bool clear_all)
+{
+	if (!has_hw_sampling(m)) {
+		writel_relaxed(MON_CLEAR_BIT, MON_CLEAR(m));
+		goto out;
+	}
+
+	if (clear_all)
+		writel_relaxed(MON_CLEAR_ALL_BIT, MON2_CLEAR(m));
+	else
+		writel_relaxed(MON_CLEAR_BIT, MON2_CLEAR(m));
+
+	/*
+	 * The counter clear and IRQ clear bits are not in the same 4KB
+	 * region. So, we need to make sure the counter clear is completed
+	 * before we try to clear the IRQ or do any other counter operations.
+	 */
+out:
+	mb();
+}
+
+#define	SAMPLE_WIN_LIM	0xFFFFF
+static void mon_set_hw_sampling_window(struct bwmon *m, unsigned int sample_ms)
+{
+	u32 rate;
+
+	if (unlikely(sample_ms != m->sample_size_ms)) {
+		rate = mult_frac(sample_ms, m->hw_timer_hz, MSEC_PER_SEC);
+		m->sample_size_ms = sample_ms;
+		if (unlikely(rate > SAMPLE_WIN_LIM)) {
+			rate = SAMPLE_WIN_LIM;
+			pr_warn("Sample window %u larger than hw limit: %u\n",
+					rate, SAMPLE_WIN_LIM);
+		}
+		writel_relaxed(rate, MON2_SW(m));
+	}
+}
+
+static void mon_irq_enable(struct bwmon *m)
+{
+	u32 val;
+
+	spin_lock(&glb_lock);
+	val = readl_relaxed(GLB_INT_EN(m));
+	val |= 1 << m->mport;
+	writel_relaxed(val, GLB_INT_EN(m));
+
+	val = readl_relaxed(MON_INT_EN(m));
+	val |= has_hw_sampling(m) ? INT_STATUS_MASK_HWS : INT_ENABLE_V1;
+	writel_relaxed(val, MON_INT_EN(m));
+	spin_unlock(&glb_lock);
+	/*
+	 * make Sure irq enable complete for local and global
+	 * to avoid race with other monitor calls
+	 */
+	mb();
+}
+
+static void mon_irq_disable(struct bwmon *m)
+{
+	u32 val;
+
+	spin_lock(&glb_lock);
+	val = readl_relaxed(GLB_INT_EN(m));
+	val &= ~(1 << m->mport);
+	writel_relaxed(val, GLB_INT_EN(m));
+
+	val = readl_relaxed(MON_INT_EN(m));
+	val &= has_hw_sampling(m) ? ~INT_STATUS_MASK_HWS : ~INT_ENABLE_V1;
+	writel_relaxed(val, MON_INT_EN(m));
+	spin_unlock(&glb_lock);
+	/*
+	 * make Sure irq disable complete for local and global
+	 * to avoid race with other monitor calls
+	 */
+	mb();
+}
+
+static unsigned int mon_irq_status(struct bwmon *m)
+{
+	u32 mval;
+
+	mval = readl_relaxed(MON_INT_STATUS(m));
+
+	dev_dbg(m->dev, "IRQ status p:%x, g:%x\n", mval,
+			readl_relaxed(GLB_INT_STATUS(m)));
+
+	mval &= has_hw_sampling(m) ? INT_STATUS_MASK_HWS : INT_STATUS_MASK;
+
+	return mval;
+}
+
+static void mon_irq_clear(struct bwmon *m)
+{
+	u32 intclr;
+
+	intclr = has_hw_sampling(m) ? INT_STATUS_MASK_HWS : INT_STATUS_MASK;
+
+	writel_relaxed(intclr, MON_INT_CLR(m));
+	mb();
+	writel_relaxed(1 << m->mport, GLB_INT_CLR(m));
+	mb();
+}
+
+static int mon_set_throttle_adj(struct bw_hwmon *hw, uint adj)
+{
+	struct bwmon *m = to_bwmon(hw);
+
+	if (adj > THROTTLE_MASK)
+		return -EINVAL;
+
+	adj = (adj & THROTTLE_MASK) << THROTTLE_SHIFT;
+	m->throttle_adj = adj;
+
+	return 0;
+}
+
+static u32 mon_get_throttle_adj(struct bw_hwmon *hw)
+{
+	struct bwmon *m = to_bwmon(hw);
+
+	return m->throttle_adj >> THROTTLE_SHIFT;
+}
+
+#define ZONE1_SHIFT	8
+#define ZONE2_SHIFT	16
+#define ZONE3_SHIFT	24
+#define ZONE0_ACTION	0x01	/* Increment zone 0 count */
+#define ZONE1_ACTION	0x09	/* Increment zone 1 & clear lower zones */
+#define ZONE2_ACTION	0x25	/* Increment zone 2 & clear lower zones */
+#define ZONE3_ACTION	0x95	/* Increment zone 3 & clear lower zones */
+static u32 calc_zone_actions(void)
+{
+	u32 zone_actions;
+
+	zone_actions = ZONE0_ACTION;
+	zone_actions |= ZONE1_ACTION << ZONE1_SHIFT;
+	zone_actions |= ZONE2_ACTION << ZONE2_SHIFT;
+	zone_actions |= ZONE3_ACTION << ZONE3_SHIFT;
+
+	return zone_actions;
+}
+
+#define ZONE_CNT_LIM	0xFFU
+#define UP_CNT_1	1
+static u32 calc_zone_counts(struct bw_hwmon *hw)
+{
+	u32 zone_counts;
+
+	zone_counts = ZONE_CNT_LIM;
+	zone_counts |= min(hw->down_cnt, ZONE_CNT_LIM) << ZONE1_SHIFT;
+	zone_counts |= ZONE_CNT_LIM << ZONE2_SHIFT;
+	zone_counts |= UP_CNT_1 << ZONE3_SHIFT;
+
+	return zone_counts;
+}
+
+static unsigned int mbps_to_mb(unsigned long mbps, unsigned int ms)
+{
+	mbps *= ms;
+	mbps = DIV_ROUND_UP(mbps, MSEC_PER_SEC);
+	return mbps;
+}
+
+/*
+ * Define the 4 zones using HI, MED & LO thresholds:
+ * Zone 0: byte count < THRES_LO
+ * Zone 1: THRES_LO < byte count < THRES_MED
+ * Zone 2: THRES_MED < byte count < THRES_HI
+ * Zone 3: byte count > THRES_HI
+ */
+#define	THRES_LIM	0x7FFU
+static void set_zone_thres(struct bwmon *m, unsigned int sample_ms)
+{
+	struct bw_hwmon *hw = &(m->hw);
+	u32 hi, med, lo;
+
+	hi = mbps_to_mb(hw->up_wake_mbps, sample_ms);
+	med = mbps_to_mb(hw->down_wake_mbps, sample_ms);
+	lo = 0;
+
+	if (unlikely((hi > THRES_LIM) || (med > hi) || (lo > med))) {
+		pr_warn("Zone thres larger than hw limit: hi:%u med:%u lo:%u\n",
+				hi, med, lo);
+		hi = min(hi, THRES_LIM);
+		med = min(med, hi - 1);
+		lo = min(lo, med-1);
+	}
+
+	writel_relaxed(hi, MON2_THRES_HI(m));
+	writel_relaxed(med, MON2_THRES_MED(m));
+	writel_relaxed(lo, MON2_THRES_LO(m));
+	dev_dbg(m->dev, "Thres: hi:%u med:%u lo:%u\n", hi, med, lo);
+}
+
+static void mon_set_zones(struct bwmon *m, unsigned int sample_ms)
+{
+	struct bw_hwmon *hw = &(m->hw);
+	u32 zone_cnt_thres = calc_zone_counts(hw);
+
+	mon_set_hw_sampling_window(m, sample_ms);
+	set_zone_thres(m, sample_ms);
+	/* Set the zone count thresholds for interrupts */
+	writel_relaxed(zone_cnt_thres, MON2_ZONE_CNT_THRES(m));
+
+	dev_dbg(m->dev, "Zone Count Thres: %0x\n", zone_cnt_thres);
+}
+
+static void mon_set_limit(struct bwmon *m, u32 count)
+{
+	writel_relaxed(count, MON_THRES(m));
+	dev_dbg(m->dev, "Thres: %08x\n", count);
+}
+
+static u32 mon_get_limit(struct bwmon *m)
+{
+	return readl_relaxed(MON_THRES(m));
+}
+
+#define THRES_HIT(status)	(status & BIT(0))
+#define OVERFLOW(status)	(status & BIT(1))
+static unsigned long mon_get_count(struct bwmon *m)
+{
+	unsigned long count, status;
+
+	count = readl_relaxed(MON_CNT(m));
+	status = mon_irq_status(m);
+
+	dev_dbg(m->dev, "Counter: %08lx\n", count);
+
+	if (OVERFLOW(status) && m->spec->overflow)
+		count += 0xFFFFFFFF;
+	if (THRES_HIT(status) && m->spec->wrap_on_thres)
+		count += mon_get_limit(m);
+
+	dev_dbg(m->dev, "Actual Count: %08lx\n", count);
+
+	return count;
+}
+
+static unsigned int get_zone(struct bwmon *m)
+{
+	u32 zone_counts;
+	u32 zone;
+
+	zone = get_bitmask_order((m->intr_status & INT_STATUS_MASK_HWS) >> 4);
+	if (zone) {
+		zone--;
+	} else {
+		zone_counts = readl_relaxed(MON2_ZONE_CNT(m));
+		if (zone_counts) {
+			zone = get_bitmask_order(zone_counts) - 1;
+			zone /= 8;
+		}
+	}
+
+	m->intr_status = 0;
+	return zone;
+}
+
+static unsigned long mon_get_zone_stats(struct bwmon *m)
+{
+	unsigned int zone;
+	unsigned long count = 0;
+
+	zone = get_zone(m);
+
+	count = readl_relaxed(MON2_ZONE_MAX(m, zone)) + 1;
+	count *= SZ_1M;
+
+	dev_dbg(m->dev, "Zone%d Max byte count: %08lx\n", zone, count);
+
+	return count;
+}
+
+/* ********** CPUBW specific code  ********** */
+
+/* Returns MBps of read/writes for the sampling window. */
+static unsigned int mbps_to_bytes(unsigned long mbps, unsigned int ms,
+				  unsigned int tolerance_percent)
+{
+	mbps *= (100 + tolerance_percent) * ms;
+	mbps /= 100;
+	mbps = DIV_ROUND_UP(mbps, MSEC_PER_SEC);
+	mbps *= SZ_1M;
+	return mbps;
+}
+
+static unsigned long get_bytes_and_clear(struct bw_hwmon *hw)
+{
+	struct bwmon *m = to_bwmon(hw);
+	unsigned long count;
+
+	mon_disable(m);
+	count = has_hw_sampling(m) ? mon_get_zone_stats(m) : mon_get_count(m);
+	mon_clear(m, false);
+	mon_irq_clear(m);
+	mon_enable(m);
+
+	return count;
+}
+
+static unsigned long set_thres(struct bw_hwmon *hw, unsigned long bytes)
+{
+	unsigned long count;
+	u32 limit;
+	struct bwmon *m = to_bwmon(hw);
+
+	mon_disable(m);
+	count = mon_get_count(m);
+	mon_clear(m, false);
+	mon_irq_clear(m);
+
+	if (likely(!m->spec->wrap_on_thres))
+		limit = bytes;
+	else
+		limit = max(bytes, 500000UL);
+
+	mon_set_limit(m, limit);
+	mon_enable(m);
+
+	return count;
+}
+
+static unsigned long set_hw_events(struct bw_hwmon *hw, unsigned sample_ms)
+{
+	struct bwmon *m = to_bwmon(hw);
+
+	mon_disable(m);
+	mon_clear(m, false);
+	mon_irq_clear(m);
+
+	mon_set_zones(m, sample_ms);
+	mon_enable(m);
+
+	return 0;
+}
+
+static irqreturn_t bwmon_intr_handler(int irq, void *dev)
+{
+	struct bwmon *m = dev;
+
+	m->intr_status = mon_irq_status(m);
+	if (!m->intr_status)
+		return IRQ_NONE;
+
+	if (bw_hwmon_sample_end(&m->hw) > 0)
+		return IRQ_WAKE_THREAD;
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t bwmon_intr_thread(int irq, void *dev)
+{
+	struct bwmon *m = dev;
+
+	update_bw_hwmon(&m->hw);
+	return IRQ_HANDLED;
+}
+
+static int start_bw_hwmon(struct bw_hwmon *hw, unsigned long mbps)
+{
+	struct bwmon *m = to_bwmon(hw);
+	u32 limit;
+	u32 zone_actions = calc_zone_actions();
+	int ret;
+
+	ret = request_threaded_irq(m->irq, bwmon_intr_handler,
+				  bwmon_intr_thread,
+				  IRQF_ONESHOT | IRQF_SHARED,
+				  dev_name(m->dev), m);
+	if (ret) {
+		dev_err(m->dev, "Unable to register interrupt handler! (%d)\n",
+				ret);
+		return ret;
+	}
+
+	mon_disable(m);
+
+	mon_clear(m, true);
+	limit = mbps_to_bytes(mbps, hw->df->profile->polling_ms, 0);
+	if (has_hw_sampling(m)) {
+		mon_set_zones(m, hw->df->profile->polling_ms);
+		/* Set the zone actions to increment appropriate counters */
+		writel_relaxed(zone_actions, MON2_ZONE_ACTIONS(m));
+	} else {
+		mon_set_limit(m, limit);
+	}
+
+	mon_irq_clear(m);
+	mon_irq_enable(m);
+	mon_enable(m);
+
+	return 0;
+}
+
+static void stop_bw_hwmon(struct bw_hwmon *hw)
+{
+	struct bwmon *m = to_bwmon(hw);
+
+	mon_irq_disable(m);
+	free_irq(m->irq, m);
+	mon_disable(m);
+	mon_clear(m, true);
+	mon_irq_clear(m);
+}
+
+static int suspend_bw_hwmon(struct bw_hwmon *hw)
+{
+	struct bwmon *m = to_bwmon(hw);
+
+	mon_irq_disable(m);
+	free_irq(m->irq, m);
+	mon_disable(m);
+	mon_irq_clear(m);
+
+	return 0;
+}
+
+static int resume_bw_hwmon(struct bw_hwmon *hw)
+{
+	struct bwmon *m = to_bwmon(hw);
+	int ret;
+
+	mon_clear(m, false);
+	ret = request_threaded_irq(m->irq, bwmon_intr_handler,
+				  bwmon_intr_thread,
+				  IRQF_ONESHOT | IRQF_SHARED,
+				  dev_name(m->dev), m);
+	if (ret) {
+		dev_err(m->dev, "Unable to register interrupt handler! (%d)\n",
+				ret);
+		return ret;
+	}
+
+	mon_irq_enable(m);
+	mon_enable(m);
+
+	return 0;
+}
+
+/*************************************************************************/
+
+static const struct bwmon_spec spec[] = {
+	{ .wrap_on_thres = true, .overflow = false, .throt_adj = false,
+		.hw_sampling = false},
+	{ .wrap_on_thres = false, .overflow = true, .throt_adj = false,
+		.hw_sampling = false},
+	{ .wrap_on_thres = false, .overflow = true, .throt_adj = true,
+		.hw_sampling = false},
+	{ .wrap_on_thres = false, .overflow = true, .throt_adj = true,
+		.hw_sampling = true},
+};
+
+static struct of_device_id match_table[] = {
+	{ .compatible = "qcom,bimc-bwmon", .data = &spec[0] },
+	{ .compatible = "qcom,bimc-bwmon2", .data = &spec[1] },
+	{ .compatible = "qcom,bimc-bwmon3", .data = &spec[2] },
+	{ .compatible = "qcom,bimc-bwmon4", .data = &spec[3] },
+	{}
+};
+
+static int bimc_bwmon_driver_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	struct bwmon *m;
+	const struct of_device_id *id;
+	int ret;
+	u32 data;
+
+	m = devm_kzalloc(dev, sizeof(*m), GFP_KERNEL);
+	if (!m)
+		return -ENOMEM;
+	m->dev = dev;
+
+	ret = of_property_read_u32(dev->of_node, "qcom,mport", &data);
+	if (ret) {
+		dev_err(dev, "mport not found!\n");
+		return ret;
+	}
+	m->mport = data;
+
+	id = of_match_device(match_table, dev);
+	if (!id) {
+		dev_err(dev, "Unknown device type!\n");
+		return -ENODEV;
+	}
+	m->spec = id->data;
+
+	if (has_hw_sampling(m)) {
+		ret = of_property_read_u32(dev->of_node,
+				"qcom,hw-timer-hz", &data);
+		if (ret) {
+			dev_err(dev, "HW sampling rate not specified!\n");
+			return ret;
+		}
+		m->hw_timer_hz = data;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
+	if (!res) {
+		dev_err(dev, "base not found!\n");
+		return -EINVAL;
+	}
+	m->base = devm_ioremap(dev, res->start, resource_size(res));
+	if (!m->base) {
+		dev_err(dev, "Unable map base!\n");
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "global_base");
+	if (!res) {
+		dev_err(dev, "global_base not found!\n");
+		return -EINVAL;
+	}
+	m->global_base = devm_ioremap(dev, res->start, resource_size(res));
+	if (!m->global_base) {
+		dev_err(dev, "Unable map global_base!\n");
+		return -ENOMEM;
+	}
+
+	m->irq = platform_get_irq(pdev, 0);
+	if (m->irq < 0) {
+		dev_err(dev, "Unable to get IRQ number\n");
+		return m->irq;
+	}
+
+	m->hw.of_node = of_parse_phandle(dev->of_node, "qcom,target-dev", 0);
+	if (!m->hw.of_node)
+		return -EINVAL;
+	m->hw.start_hwmon = &start_bw_hwmon;
+	m->hw.stop_hwmon = &stop_bw_hwmon;
+	m->hw.suspend_hwmon = &suspend_bw_hwmon;
+	m->hw.resume_hwmon = &resume_bw_hwmon;
+	m->hw.get_bytes_and_clear = &get_bytes_and_clear;
+	m->hw.set_thres =  &set_thres;
+	if (has_hw_sampling(m))
+		m->hw.set_hw_events = &set_hw_events;
+	if (m->spec->throt_adj) {
+		m->hw.set_throttle_adj = &mon_set_throttle_adj;
+		m->hw.get_throttle_adj = &mon_get_throttle_adj;
+	}
+
+	ret = register_bw_hwmon(dev, &m->hw);
+	if (ret) {
+		dev_err(dev, "Dev BW hwmon registration failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static struct platform_driver bimc_bwmon_driver = {
+	.probe = bimc_bwmon_driver_probe,
+	.driver = {
+		.name = "bimc-bwmon",
+		.of_match_table = match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init bimc_bwmon_init(void)
+{
+	return platform_driver_register(&bimc_bwmon_driver);
+}
+module_init(bimc_bwmon_init);
+
+static void __exit bimc_bwmon_exit(void)
+{
+	platform_driver_unregister(&bimc_bwmon_driver);
+}
+module_exit(bimc_bwmon_exit);
+
+MODULE_DESCRIPTION("BIMC bandwidth monitor driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/devfreq/devfreq_devbw.c	2019-01-22 16:16:23.135243074 +0100
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "devbw: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/time.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/devfreq.h>
+#include <linux/of.h>
+#include <trace/events/power.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+
+/* Has to be ULL to prevent overflow where this macro is used. */
+#define MBYTE (1ULL << 20)
+#define MAX_PATHS	2
+#define DBL_BUF		2
+
+struct dev_data {
+	struct msm_bus_vectors vectors[MAX_PATHS * DBL_BUF];
+	struct msm_bus_paths bw_levels[DBL_BUF];
+	struct msm_bus_scale_pdata bw_data;
+	int num_paths;
+	u32 bus_client;
+	int cur_idx;
+	int cur_ab;
+	int cur_ib;
+	long gov_ab;
+	unsigned int ab_percent;
+	struct devfreq *df;
+	struct devfreq_dev_profile dp;
+};
+
+static int set_bw(struct device *dev, int new_ib, int new_ab)
+{
+	struct dev_data *d = dev_get_drvdata(dev);
+	int i, ret;
+
+	if (d->cur_ib == new_ib && d->cur_ab == new_ab)
+		return 0;
+
+	i = (d->cur_idx + 1) % DBL_BUF;
+
+	d->bw_levels[i].vectors[0].ib = new_ib * MBYTE;
+	d->bw_levels[i].vectors[0].ab = new_ab / d->num_paths * MBYTE;
+	d->bw_levels[i].vectors[1].ib = new_ib * MBYTE;
+	d->bw_levels[i].vectors[1].ab = new_ab / d->num_paths * MBYTE;
+
+	dev_dbg(dev, "BW MBps: AB: %d IB: %d\n", new_ab, new_ib);
+
+	ret = msm_bus_scale_client_update_request(d->bus_client, i);
+	if (ret) {
+		dev_err(dev, "bandwidth request failed (%d)\n", ret);
+	} else {
+		d->cur_idx = i;
+		d->cur_ib = new_ib;
+		d->cur_ab = new_ab;
+	}
+
+	return ret;
+}
+
+static unsigned int find_ab(struct dev_data *d, unsigned long *freq)
+{
+	return (d->ab_percent * (*freq)) / 100;
+}
+
+static void find_freq(struct devfreq_dev_profile *p, unsigned long *freq,
+			u32 flags)
+{
+	int i;
+	unsigned long atmost, atleast, f;
+
+	atmost = p->freq_table[0];
+	atleast = p->freq_table[p->max_state-1];
+	for (i = 0; i < p->max_state; i++) {
+		f = p->freq_table[i];
+		if (f <= *freq)
+			atmost = max(f, atmost);
+		if (f >= *freq)
+			atleast = min(f, atleast);
+	}
+
+	if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND)
+		*freq = atmost;
+	else
+		*freq = atleast;
+}
+
+static int devbw_target(struct device *dev, unsigned long *freq, u32 flags)
+{
+	struct dev_data *d = dev_get_drvdata(dev);
+
+	find_freq(&d->dp, freq, flags);
+
+	if (!d->gov_ab)
+		return set_bw(dev, *freq, find_ab(d, freq));
+	else
+		return set_bw(dev, *freq, d->gov_ab);
+}
+
+static int devbw_get_dev_status(struct device *dev,
+				struct devfreq_dev_status *stat)
+{
+	struct dev_data *d = dev_get_drvdata(dev);
+
+	stat->private_data = &d->gov_ab;
+	return 0;
+}
+
+#define PROP_PORTS "qcom,src-dst-ports"
+#define PROP_TBL "qcom,bw-tbl"
+#define PROP_AB_PER "qcom,ab-percent"
+#define PROP_ACTIVE "qcom,active-only"
+
+int devfreq_add_devbw(struct device *dev)
+{
+	struct dev_data *d;
+	struct devfreq_dev_profile *p;
+	u32 *data, ports[MAX_PATHS * 2];
+	const char *gov_name;
+	int ret, len, i, num_paths;
+
+	d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+	if (!d)
+		return -ENOMEM;
+	dev_set_drvdata(dev, d);
+
+	if (of_find_property(dev->of_node, PROP_PORTS, &len)) {
+		len /= sizeof(ports[0]);
+		if (len % 2 || len > ARRAY_SIZE(ports)) {
+			dev_err(dev, "Unexpected number of ports\n");
+			return -EINVAL;
+		}
+
+		ret = of_property_read_u32_array(dev->of_node, PROP_PORTS,
+						 ports, len);
+		if (ret)
+			return ret;
+
+		num_paths = len / 2;
+	} else {
+		return -EINVAL;
+	}
+
+	d->bw_levels[0].vectors = &d->vectors[0];
+	d->bw_levels[1].vectors = &d->vectors[MAX_PATHS];
+	d->bw_data.usecase = d->bw_levels;
+	d->bw_data.num_usecases = ARRAY_SIZE(d->bw_levels);
+	d->bw_data.name = dev_name(dev);
+	d->bw_data.active_only = of_property_read_bool(dev->of_node,
+							PROP_ACTIVE);
+
+	for (i = 0; i < num_paths; i++) {
+		d->bw_levels[0].vectors[i].src = ports[2 * i];
+		d->bw_levels[0].vectors[i].dst = ports[2 * i + 1];
+		d->bw_levels[1].vectors[i].src = ports[2 * i];
+		d->bw_levels[1].vectors[i].dst = ports[2 * i + 1];
+	}
+	d->bw_levels[0].num_paths = num_paths;
+	d->bw_levels[1].num_paths = num_paths;
+	d->num_paths = num_paths;
+
+	p = &d->dp;
+	p->polling_ms = 50;
+	p->target = devbw_target;
+	p->get_dev_status = devbw_get_dev_status;
+
+	if (of_find_property(dev->of_node, PROP_TBL, &len)) {
+		len /= sizeof(*data);
+		data = devm_kzalloc(dev, len * sizeof(*data), GFP_KERNEL);
+		if (!data)
+			return -ENOMEM;
+
+		p->freq_table = devm_kzalloc(dev,
+					     len * sizeof(*p->freq_table),
+					     GFP_KERNEL);
+		if (!p->freq_table)
+			return -ENOMEM;
+
+		ret = of_property_read_u32_array(dev->of_node, PROP_TBL,
+						 data, len);
+		if (ret)
+			return ret;
+
+		for (i = 0; i < len; i++)
+			p->freq_table[i] = data[i];
+		p->max_state = len;
+	}
+
+	if (of_find_property(dev->of_node, PROP_AB_PER, &len)) {
+		ret = of_property_read_u32(dev->of_node, PROP_AB_PER,
+							&d->ab_percent);
+		if (ret)
+			return ret;
+
+		dev_dbg(dev, "ab-percent used %u\n", d->ab_percent);
+	}
+
+	d->bus_client = msm_bus_scale_register_client(&d->bw_data);
+	if (!d->bus_client) {
+		dev_err(dev, "Unable to register bus client\n");
+		return -ENODEV;
+	}
+
+	if (of_property_read_string(dev->of_node, "governor", &gov_name))
+		gov_name = "performance";
+
+	d->df = devfreq_add_device(dev, p, gov_name, NULL);
+	if (IS_ERR(d->df)) {
+		msm_bus_scale_unregister_client(d->bus_client);
+		return PTR_ERR(d->df);
+	}
+
+	return 0;
+}
+
+int devfreq_remove_devbw(struct device *dev)
+{
+	struct dev_data *d = dev_get_drvdata(dev);
+	msm_bus_scale_unregister_client(d->bus_client);
+	devfreq_remove_device(d->df);
+	return 0;
+}
+
+int devfreq_suspend_devbw(struct device *dev)
+{
+	struct dev_data *d = dev_get_drvdata(dev);
+	return devfreq_suspend_device(d->df);
+}
+
+int devfreq_resume_devbw(struct device *dev)
+{
+	struct dev_data *d = dev_get_drvdata(dev);
+	return devfreq_resume_device(d->df);
+}
+
+static int devfreq_devbw_probe(struct platform_device *pdev)
+{
+	return devfreq_add_devbw(&pdev->dev);
+}
+
+static int devfreq_devbw_remove(struct platform_device *pdev)
+{
+	return devfreq_remove_devbw(&pdev->dev);
+}
+
+static struct of_device_id match_table[] = {
+	{ .compatible = "qcom,devbw" },
+	{}
+};
+
+static struct platform_driver devbw_driver = {
+	.probe = devfreq_devbw_probe,
+	.remove = devfreq_devbw_remove,
+	.driver = {
+		.name = "devbw",
+		.of_match_table = match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init devbw_init(void)
+{
+	platform_driver_register(&devbw_driver);
+	return 0;
+}
+device_initcall(devbw_init);
+
+MODULE_DESCRIPTION("Device DDR bandwidth voting driver MSM SoCs");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/devfreq/devfreq_spdm.c	2019-10-29 09:26:23.513201906 +0100
@@ -0,0 +1,443 @@
+/*
+*Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+*
+*This program is free software; you can redistribute it and/or modify
+*it under the terms of the GNU General Public License version 2 and
+*only version 2 as published by the Free Software Foundation.
+*
+*This program is distributed in the hope that it will be useful,
+*but WITHOUT ANY WARRANTY; without even the implied warranty of
+*MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+*GNU General Public License for more details.
+*/
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/devfreq.h>
+#include <linux/init.h>
+#include <linux/ipc_logging.h>
+#include <linux/gfp.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/msm-bus.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "governor.h"
+#include "devfreq_spdm.h"
+
+static void *spdm_ipc_log_ctxt;
+#define DEVFREQ_SPDM_DEFAULT_WINDOW_MS 100
+#define SPDM_IPC_LOG_PAGES	5
+
+#define SPDM_IPC_LOG(x...)	do { \
+	pr_debug(x); \
+	if (spdm_ipc_log_ctxt) \
+		ipc_log_string(spdm_ipc_log_ctxt, x); \
+} while (0)
+
+#define COPY_SIZE(x, y) ((x) <= (y) ? (x) : (y))
+
+static int change_bw(struct device *dev, unsigned long *freq, u32 flags)
+{
+	struct spdm_data *data = 0;
+	int i;
+	int next_idx;
+	int ret = 0;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+
+	if (!dev || !freq)
+		return -EINVAL;
+
+	data = dev_get_drvdata(dev);
+	if (!data)
+		return -EINVAL;
+
+	if (data->devfreq->previous_freq == *freq)
+		goto update_thresholds;
+
+	next_idx = data->cur_idx + 1;
+	next_idx = next_idx % 2;
+
+	for (i = 0; i < data->pdata->usecase[next_idx].num_paths; i++)
+		data->pdata->usecase[next_idx].vectors[i].ab = (*freq) << 6;
+
+	data->cur_idx = next_idx;
+	ret = msm_bus_scale_client_update_request(data->bus_scale_client_id,
+						  data->cur_idx);
+
+update_thresholds:
+	desc.arg[0] = SPDM_CMD_ENABLE;
+	desc.arg[1] = data->spdm_client;
+
+	if (data->cci_clk)
+		desc.arg[2] = (clk_get_rate(data->cci_clk)) / 1000;
+	else
+		desc.arg[2] = 0;
+
+	ext_status = spdm_ext_call(&desc, 3);
+	if (ext_status)
+		pr_err("External command %u failed with error %u",
+			(int)desc.arg[0], ext_status);
+	return ret;
+}
+
+static int get_cur_bw(struct device *dev, unsigned long *freq)
+{
+	struct spdm_data *data = 0;
+
+	if (!dev || !freq)
+		return -EINVAL;
+
+	data = dev_get_drvdata(dev);
+	if (!data)
+		return -EINVAL;
+
+	*freq = data->pdata->usecase[data->cur_idx].vectors[0].ab >> 6;
+
+	return 0;
+}
+
+static int get_dev_status(struct device *dev, struct devfreq_dev_status *status)
+{
+	struct spdm_data *data = 0;
+	int ret;
+
+	if (!dev || !status)
+		return -EINVAL;
+
+	data = dev_get_drvdata(dev);
+	if (!data)
+		return -EINVAL;
+
+	/* determine if we want to go up or down based on the notification */
+	if (data->action == SPDM_UP)
+		status->busy_time = 255;
+	else
+		status->busy_time = 0;
+	status->total_time = 255;
+	ret = get_cur_bw(dev, &status->current_frequency);
+	if (ret)
+		return ret;
+
+	return 0;
+
+}
+
+static int populate_config_data(struct spdm_data *data,
+				struct platform_device *pdev)
+{
+	int ret = -EINVAL;
+	struct device_node *node = pdev->dev.of_node;
+	struct property *prop = 0;
+
+	ret = of_property_read_u32(node, "qcom,max-vote",
+				   &data->config_data.max_vote);
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u32(node, "qcom,bw-upstep",
+				   &data->config_data.upstep);
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u32(node, "qcom,bw-dwnstep",
+				   &data->config_data.downstep);
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u32(node, "qcom,alpha-up",
+				   &data->config_data.aup);
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u32(node, "qcom,alpha-down",
+				   &data->config_data.adown);
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u32(node, "qcom,bucket-size",
+				   &data->config_data.bucket_size);
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u32_array(node, "qcom,pl-freqs",
+					 data->config_data.pl_freqs,
+					 SPDM_PL_COUNT - 1);
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u32_array(node, "qcom,reject-rate",
+					 data->config_data.reject_rate,
+					 SPDM_PL_COUNT * 2);
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u32_array(node, "qcom,response-time-us",
+					 data->config_data.response_time_us,
+					 SPDM_PL_COUNT * 2);
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u32_array(node, "qcom,cci-response-time-us",
+					 data->config_data.cci_response_time_us,
+					 SPDM_PL_COUNT * 2);
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u32(node, "qcom,max-cci-freq",
+				   &data->config_data.max_cci_freq);
+	if (ret)
+		return ret;
+	ret = of_property_read_u32(node, "qcom,up-step-multp",
+				   &data->config_data.up_step_multp);
+	if (ret)
+		return ret;
+
+	prop = of_find_property(node, "qcom,ports", 0);
+	if (!prop)
+		return -EINVAL;
+	data->config_data.num_ports = prop->length / sizeof(u32);
+	data->config_data.ports =
+	    devm_kzalloc(&pdev->dev, prop->length, GFP_KERNEL);
+	if (!data->config_data.ports)
+		return -ENOMEM;
+	ret = of_property_read_u32_array(node, "qcom,ports",
+					 data->config_data.ports,
+					 data->config_data.num_ports);
+	if (ret) {
+		devm_kfree(&pdev->dev, data->config_data.ports);
+		data->config_data.ports = NULL;
+		return ret;
+	}
+
+	return 0;
+}
+
+static int populate_spdm_data(struct spdm_data *data,
+			      struct platform_device *pdev)
+{
+	int ret = -EINVAL;
+	struct device_node *node = pdev->dev.of_node;
+
+	ret = populate_config_data(data, pdev);
+	if (ret)
+		return ret;
+
+	ret =
+	    of_property_read_u32(node, "qcom,spdm-client", &data->spdm_client);
+	if (ret)
+		goto no_client;
+
+	ret = of_property_read_u32(node, "qcom,spdm-interval", &data->window);
+	if (ret)
+		data->window = DEVFREQ_SPDM_DEFAULT_WINDOW_MS;
+
+	data->pdata = msm_bus_cl_get_pdata(pdev);
+	if (!data->pdata) {
+		ret = -EINVAL;
+		goto no_pdata;
+	}
+
+	return 0;
+
+no_client:
+no_pdata:
+	devm_kfree(&pdev->dev, data->config_data.ports);
+	data->config_data.ports = NULL;
+	return ret;
+}
+
+int __spdm_hyp_call(struct spdm_args *args, int num_args)
+{
+	struct hvc_desc desc = { { 0 } };
+	int status;
+
+	memcpy(desc.arg, args->arg,
+		COPY_SIZE(sizeof(desc.arg), sizeof(args->arg)));
+	SPDM_IPC_LOG("hvc call fn:0x%x, cmd:%llu, num_args:%d\n",
+		HVC_FN_SIP(SPDM_HYP_FNID), desc.arg[0], num_args);
+
+	status = hvc(HVC_FN_SIP(SPDM_HYP_FNID), &desc);
+
+	memcpy(args->ret, desc.ret,
+		COPY_SIZE(sizeof(args->ret), sizeof(desc.ret)));
+	SPDM_IPC_LOG("hvc return fn:0x%x cmd:%llu Ret[0]:%llu Ret[1]:%llu\n",
+			HVC_FN_SIP(SPDM_HYP_FNID), desc.arg[0],
+			desc.ret[0], desc.ret[1]);
+	return status;
+}
+
+int __spdm_scm_call(struct spdm_args *args, int num_args)
+{
+	int status = 0;
+
+	SPDM_IPC_LOG("%s:svc_id:%d,cmd_id:%d,cmd:%llu,num_args:%d\n",
+		__func__, SPDM_SCM_SVC_ID, SPDM_SCM_CMD_ID,
+		args->arg[0], num_args);
+
+	if (!is_scm_armv8()) {
+		status = scm_call(SPDM_SCM_SVC_ID, SPDM_SCM_CMD_ID, args->arg,
+				sizeof(args->arg), args->ret,
+				sizeof(args->ret));
+	} else {
+		struct scm_desc desc = {0};
+		/*
+		 * Need to hard code this, this is a requirement from TZ syscall
+		 * interface.
+		 */
+		desc.arginfo = SCM_ARGS(6);
+		memcpy(desc.args, args->arg,
+			COPY_SIZE(sizeof(desc.args), sizeof(args->arg)));
+
+		status = scm_call2(SCM_SIP_FNID(SPDM_SCM_SVC_ID,
+				SPDM_SCM_CMD_ID), &desc);
+
+		memcpy(args->ret, desc.ret,
+			COPY_SIZE(sizeof(args->ret), sizeof(desc.ret)));
+	}
+	SPDM_IPC_LOG("%s:svc_id:%d,cmd_id:%d,cmd:%llu,Ret[0]:%llu,Ret[1]:%llu\n"
+		, __func__, SPDM_SCM_SVC_ID, SPDM_SCM_CMD_ID, args->arg[0],
+		args->ret[0], args->ret[1]);
+	return status;
+}
+
+static int probe(struct platform_device *pdev)
+{
+	struct spdm_data *data = 0;
+	int ret = -EINVAL;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+
+	data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->action = SPDM_DOWN;
+
+	platform_set_drvdata(pdev, data);
+
+	ret = populate_spdm_data(data, pdev);
+	if (ret)
+		goto bad_of;
+
+	desc.arg[0] = SPDM_CMD_GET_VERSION;
+	ext_status = spdm_ext_call(&desc, 1);
+	if (ext_status) {
+		pr_err("%s:External command %u failed with error %u\n",
+			__func__, (int)desc.arg[0], ext_status);
+		goto bad_of;
+	}
+
+	if (desc.ret[0] < SPDM_TZ_VERSION) {
+		pr_err("%s: Version mismatch expected 0x%x got 0x%x", __func__,
+			SPDM_TZ_VERSION, (int)desc.arg[0]);
+		goto bad_of;
+	}
+
+	data->bus_scale_client_id = msm_bus_scale_register_client(data->pdata);
+	if (!data->bus_scale_client_id) {
+		ret = -EINVAL;
+		goto no_bus_scaling;
+	}
+
+	data->cci_clk = clk_get(&pdev->dev, "cci_clk");
+	if (IS_ERR(data->cci_clk)) {
+		data->cci_clk = NULL;
+	}
+
+	data->profile =
+	    devm_kzalloc(&pdev->dev, sizeof(*(data->profile)), GFP_KERNEL);
+	if (!data->profile) {
+		ret = -ENOMEM;
+		goto no_profile;
+	}
+	data->profile->target = change_bw;
+	data->profile->get_dev_status = get_dev_status;
+	data->profile->get_cur_freq = get_cur_bw;
+	data->profile->polling_ms = data->window;
+
+	data->devfreq =
+	    devfreq_add_device(&pdev->dev, data->profile, "spdm_bw_hyp", data);
+	if (IS_ERR(data->devfreq)) {
+		ret = PTR_ERR(data->devfreq);
+		goto no_spdm_device;
+	}
+
+	spdm_init_debugfs(&pdev->dev);
+	spdm_ipc_log_ctxt = ipc_log_context_create(SPDM_IPC_LOG_PAGES,
+							"devfreq_spdm", 0);
+
+	if (IS_ERR_OR_NULL(spdm_ipc_log_ctxt)) {
+		pr_err("%s: Failed to create IPC log context\n", __func__);
+		spdm_ipc_log_ctxt = NULL;
+	}
+
+
+	return 0;
+
+no_spdm_device:
+	devm_kfree(&pdev->dev, data->profile);
+no_profile:
+	msm_bus_scale_unregister_client(data->bus_scale_client_id);
+no_bus_scaling:
+	devm_kfree(&pdev->dev, data->config_data.ports);
+bad_of:
+	devm_kfree(&pdev->dev, data);
+	platform_set_drvdata(pdev, NULL);
+	return ret;
+}
+
+static int remove(struct platform_device *pdev)
+{
+	struct spdm_data *data = 0;
+
+	data = platform_get_drvdata(pdev);
+
+	spdm_remove_debugfs(data);
+
+	if (data->devfreq)
+		devfreq_remove_device(data->devfreq);
+
+	if (data->profile)
+		devm_kfree(&pdev->dev, data->profile);
+
+	if (data->bus_scale_client_id)
+		msm_bus_scale_unregister_client(data->bus_scale_client_id);
+
+	if (data->config_data.ports)
+		devm_kfree(&pdev->dev, data->config_data.ports);
+
+	devm_kfree(&pdev->dev, data);
+	platform_set_drvdata(pdev, NULL);
+
+	if (spdm_ipc_log_ctxt)
+		ipc_log_context_destroy(spdm_ipc_log_ctxt);
+
+	return 0;
+}
+
+static const struct of_device_id devfreq_spdm_match[] = {
+	{.compatible = "qcom,devfreq_spdm"},
+	{}
+};
+
+static struct platform_driver devfreq_spdm_drvr = {
+	.driver = {
+		   .name = "devfreq_spdm",
+		   .owner = THIS_MODULE,
+		   .of_match_table = devfreq_spdm_match,
+		   },
+	.probe = probe,
+	.remove = remove,
+};
+
+static int __init devfreq_spdm_init(void)
+{
+	return platform_driver_register(&devfreq_spdm_drvr);
+}
+
+module_init(devfreq_spdm_init);
+
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/devfreq/devfreq_spdm_debugfs.c	2019-01-22 16:16:23.135243074 +0100
@@ -0,0 +1,884 @@
+/*
+*Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+*
+*This program is free software; you can redistribute it and/or modify
+*it under the terms of the GNU General Public License version 2 and
+*only version 2 as published by the Free Software Foundation.
+*
+*This program is distributed in the hope that it will be useful,
+*but WITHOUT ANY WARRANTY; without even the implied warranty of
+*MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+*GNU General Public License for more details.
+*/
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/msm-bus.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include "devfreq_spdm.h"
+#include "governor.h"
+
+static int spdm_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static char buf[PAGE_SIZE];
+
+static ssize_t enable_write(struct file *file, const char __user *data,
+			    size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	int i;
+	int next_idx;
+
+	if (size > sizeof(buf) - 1)
+		return -EINVAL;
+
+	if (copy_from_user(buf, data, size)) {
+		goto err;
+		size = -EINVAL;
+	}
+
+	buf[size] = '\0';
+
+	if (sscanf(buf, "%u\n", &i) != 1) {
+		size = -EINVAL;
+		goto err;
+	}
+	i = !!i;
+
+	if (i == spdm_data->enabled)
+		goto out;
+
+	spdm_data->devfreq->governor->event_handler(spdm_data->devfreq,
+						    i ? DEVFREQ_GOV_START :
+						    DEVFREQ_GOV_STOP, NULL);
+
+	if (!i) {
+		next_idx = spdm_data->cur_idx + 1;
+		next_idx = next_idx % 2;
+
+		for (i = 0; i < spdm_data->pdata->usecase[next_idx].num_paths;
+		     i++)
+			spdm_data->pdata->usecase[next_idx].vectors[i].ab = 0;
+
+		spdm_data->cur_idx = next_idx;
+		msm_bus_scale_client_update_request
+		    (spdm_data->bus_scale_client_id, spdm_data->cur_idx);
+	}
+
+out:
+	*offset += size;
+err:
+	memset(buf, 0, sizeof(buf));
+	return size;
+}
+
+static ssize_t enable_read(struct file *file, char __user *data,
+			   size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	int len = 32;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	len = scnprintf(buf, size, "%u\n", spdm_data->enabled);
+	len = simple_read_from_buffer(data, size, offset, buf, len);
+
+	memset(buf, 0, sizeof(buf));
+	return len;
+}
+
+static const struct file_operations enable_fops = {
+	.open = spdm_open,
+	.write = enable_write,
+	.read = enable_read,
+};
+
+static ssize_t pl_write(struct file *file, const char __user *data,
+			size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+	int i;
+
+	if (size > sizeof(buf) - 1)
+		return -EINVAL;
+
+	if (copy_from_user(buf, data, size)) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	buf[size] = '\0';
+
+	if (sscanf(buf, "%u %u\n", &spdm_data->config_data.pl_freqs[0],
+	       &spdm_data->config_data.pl_freqs[1]) != 2) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	desc.arg[0] = SPDM_CMD_CFG_PL;
+	desc.arg[1] = spdm_data->spdm_client;
+	for (i = 0; i < SPDM_PL_COUNT - 1; i++)
+		desc.arg[i+2] = spdm_data->config_data.pl_freqs[i];
+	ext_status = spdm_ext_call(&desc, SPDM_PL_COUNT + 1);
+	if (ext_status)
+		pr_err("External command %u failed with error %u",
+			(int)desc.arg[0], ext_status);
+	*offset += size;
+out:
+	memset(buf, 0, sizeof(buf));
+	return size;
+
+}
+
+static ssize_t pl_read(struct file *file, char __user *data,
+		       size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	int i = 32;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	i = scnprintf(buf, size, "%u %u\n", spdm_data->config_data.pl_freqs[0],
+		     spdm_data->config_data.pl_freqs[1]);
+	i = simple_read_from_buffer(data, size, offset, buf, i);
+
+	memset(buf, 0, sizeof(buf));
+	return i;
+}
+
+static const struct file_operations pl_fops = {
+	.open = spdm_open,
+	.write = pl_write,
+	.read = pl_read,
+};
+
+static ssize_t rejrate_low_write(struct file *file, const char __user *data,
+				 size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+
+	if (size > sizeof(buf) - 1)
+		return -EINVAL;
+
+	if (copy_from_user(buf, data, size)) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	buf[size] = '\0';
+
+	if (sscanf(buf, "%u %u\n", &spdm_data->config_data.reject_rate[0],
+	       &spdm_data->config_data.reject_rate[1]) != 2) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	desc.arg[0] = SPDM_CMD_CFG_REJRATE_LOW;
+	desc.arg[1] = spdm_data->spdm_client;
+	desc.arg[2] = spdm_data->config_data.reject_rate[0];
+	desc.arg[3] = spdm_data->config_data.reject_rate[1];
+	ext_status = spdm_ext_call(&desc, 4);
+	if (ext_status)
+		pr_err("External command %u failed with error %u",
+			(int)desc.arg[0], ext_status);
+	*offset += size;
+out:
+	memset(buf, 0, sizeof(buf));
+	return size;
+}
+
+static ssize_t rejrate_low_read(struct file *file, char __user *data,
+				size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	int i = 32;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	i = scnprintf(buf, size, "%u %u\n",
+		     spdm_data->config_data.reject_rate[0],
+		     spdm_data->config_data.reject_rate[1]);
+
+	i = simple_read_from_buffer(data, size, offset, buf, i);
+
+	memset(buf, 0, sizeof(buf));
+	return i;
+}
+
+static const struct file_operations rrl_fops = {
+	.open = spdm_open,
+	.write = rejrate_low_write,
+	.read = rejrate_low_read,
+};
+
+static ssize_t rejrate_med_write(struct file *file, const char __user *data,
+				 size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+
+	if (size > sizeof(buf) - 1)
+		return -EINVAL;
+
+	if (copy_from_user(buf, data, size)) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	buf[size] = '\0';
+
+	if (sscanf(buf, "%u %u\n", &spdm_data->config_data.reject_rate[2],
+	       &spdm_data->config_data.reject_rate[3]) != 2) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	desc.arg[0] = SPDM_CMD_CFG_REJRATE_MED;
+	desc.arg[1] = spdm_data->spdm_client;
+	desc.arg[2] = spdm_data->config_data.reject_rate[2];
+	desc.arg[3] = spdm_data->config_data.reject_rate[3];
+	ext_status = spdm_ext_call(&desc, 4);
+	if (ext_status)
+		pr_err("External command %u failed with error %u",
+			(int)desc.arg[0], ext_status);
+	*offset += size;
+out:
+	memset(buf, 0, sizeof(buf));
+	return size;
+}
+
+static ssize_t rejrate_med_read(struct file *file, char __user *data,
+				size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	int i = 32;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	i = scnprintf(buf, size, "%u %u\n",
+		     spdm_data->config_data.reject_rate[2],
+		     spdm_data->config_data.reject_rate[3]);
+
+	i = simple_read_from_buffer(data, size, offset, buf, i);
+	memset(buf, 0, sizeof(buf));
+	return i;
+}
+
+static const struct file_operations rrm_fops = {
+	.open = spdm_open,
+	.write = rejrate_med_write,
+	.read = rejrate_med_read,
+};
+
+static ssize_t rejrate_high_write(struct file *file, const char __user *data,
+				  size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+
+	if (size > sizeof(buf) - 1)
+		return -EINVAL;
+
+	if (copy_from_user(buf, data, size)) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	buf[size] = '\0';
+
+	if (sscanf(buf, "%u %u\n", &spdm_data->config_data.reject_rate[4],
+	       &spdm_data->config_data.reject_rate[5]) != 2) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	desc.arg[0] = SPDM_CMD_CFG_REJRATE_HIGH;
+	desc.arg[1] = spdm_data->spdm_client;
+	desc.arg[2] = spdm_data->config_data.reject_rate[4];
+	desc.arg[3] = spdm_data->config_data.reject_rate[5];
+	ext_status = spdm_ext_call(&desc, 4);
+	if (ext_status)
+		pr_err("External command %u failed with error %u",
+			(int)desc.arg[0], ext_status);
+	*offset += size;
+out:
+	memset(buf, 0, sizeof(buf));
+	return size;
+}
+
+static ssize_t rejrate_high_read(struct file *file, char __user *data,
+				 size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	int i = 32;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	i = scnprintf(buf, size, "%u %u\n",
+		     spdm_data->config_data.reject_rate[4],
+		     spdm_data->config_data.reject_rate[5]);
+
+	i = simple_read_from_buffer(data, size, offset, buf, i);
+	memset(buf, 0, sizeof(buf));
+	return i;
+}
+
+static const struct file_operations rrh_fops = {
+	.open = spdm_open,
+	.write = rejrate_high_write,
+	.read = rejrate_high_read,
+};
+
+static ssize_t resptime_low_write(struct file *file, const char __user *data,
+				  size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+
+	if (size > sizeof(buf) - 1)
+		return -EINVAL;
+
+	if (copy_from_user(buf, data, size)) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	buf[size] = '\0';
+
+	if (sscanf(buf, "%u %u\n", &spdm_data->config_data.response_time_us[0],
+	       &spdm_data->config_data.response_time_us[1]) != 2) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	desc.arg[0] = SPDM_CMD_CFG_RESPTIME_LOW;
+	desc.arg[1] = spdm_data->spdm_client;
+	desc.arg[2] = spdm_data->config_data.response_time_us[0];
+	desc.arg[3] = spdm_data->config_data.response_time_us[1];
+	ext_status = spdm_ext_call(&desc, 4);
+	if (ext_status)
+		pr_err("External command %u failed with error %u",
+			(int)desc.arg[0], ext_status);
+	*offset += size;
+out:
+	memset(buf, 0, sizeof(buf));
+	return size;
+}
+
+static ssize_t resptime_low_read(struct file *file, char __user *data,
+				 size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	int i = 32;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	i = scnprintf(buf, size, "%u %u\n",
+		     spdm_data->config_data.response_time_us[0],
+		     spdm_data->config_data.response_time_us[1]);
+
+	i = simple_read_from_buffer(data, size, offset, buf, i);
+	memset(buf, 0, sizeof(buf));
+	return i;
+}
+
+static const struct file_operations rtl_fops = {
+	.open = spdm_open,
+	.write = resptime_low_write,
+	.read = resptime_low_read,
+};
+
+static ssize_t resptime_med_write(struct file *file, const char __user *data,
+				  size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+
+	if (size > sizeof(buf) - 1)
+		return -EINVAL;
+
+	if (copy_from_user(buf, data, size)) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	buf[size] = '\0';
+
+	if (sscanf(buf, "%u %u\n", &spdm_data->config_data.response_time_us[2],
+	       &spdm_data->config_data.response_time_us[3]) != 2) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	desc.arg[0] = SPDM_CMD_CFG_RESPTIME_MED;
+	desc.arg[1] = spdm_data->spdm_client;
+	desc.arg[2] = spdm_data->config_data.response_time_us[2];
+	desc.arg[3] = spdm_data->config_data.response_time_us[3];
+	ext_status = spdm_ext_call(&desc, 4);
+	if (ext_status)
+		pr_err("External command %u failed with error %u",
+			(int)desc.arg[0], ext_status);
+	*offset += size;
+out:
+	memset(buf, 0, sizeof(buf));
+	return size;
+}
+
+static ssize_t resptime_med_read(struct file *file, char __user *data,
+				 size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	int i = 32;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	i = scnprintf(buf, size, "%u %u\n",
+		     spdm_data->config_data.response_time_us[2],
+		     spdm_data->config_data.response_time_us[3]);
+
+	i = simple_read_from_buffer(data, size, offset, buf, i);
+	memset(buf, 0, sizeof(buf));
+	return i;
+}
+
+static const struct file_operations rtm_fops = {
+	.open = spdm_open,
+	.write = resptime_med_write,
+	.read = resptime_med_read,
+};
+
+static ssize_t resptime_high_write(struct file *file, const char __user *data,
+				   size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+
+	if (size > sizeof(buf) - 1)
+		return -EINVAL;
+
+	if (copy_from_user(buf, data, size)) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	buf[size] = '\0';
+
+	if (sscanf(buf, "%u %u\n", &spdm_data->config_data.response_time_us[4],
+	       &spdm_data->config_data.response_time_us[5]) != 2) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	desc.arg[0] = SPDM_CMD_CFG_RESPTIME_HIGH;
+	desc.arg[1] = spdm_data->spdm_client;
+	desc.arg[2] = spdm_data->config_data.response_time_us[4];
+	desc.arg[3] = spdm_data->config_data.response_time_us[5];
+	ext_status = spdm_ext_call(&desc, 4);
+	if (ext_status)
+		pr_err("External command %u failed with error %u",
+			(int)desc.arg[0], ext_status);
+	*offset += size;
+out:
+	memset(buf, 0, sizeof(buf));
+	return size;
+}
+
+static ssize_t resptime_high_read(struct file *file, char __user *data,
+				  size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	int i = 32;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	i = scnprintf(buf, size, "%u %u\n",
+		     spdm_data->config_data.response_time_us[4],
+		     spdm_data->config_data.response_time_us[5]);
+
+	i = simple_read_from_buffer(data, size, offset, buf, i);
+	memset(buf, 0, sizeof(buf));
+	return i;
+}
+
+static const struct file_operations rth_fops = {
+	.open = spdm_open,
+	.write = resptime_high_write,
+	.read = resptime_high_read,
+};
+
+static ssize_t cciresptime_low_write(struct file *file,
+				     const char __user *data, size_t size,
+				     loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+
+	if (size > sizeof(buf) - 1)
+		return -EINVAL;
+
+	if (copy_from_user(buf, data, size)) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	buf[size] = '\0';
+
+	if (sscanf(buf, "%u %u\n",
+		   &spdm_data->config_data.cci_response_time_us[0],
+		   &spdm_data->config_data.cci_response_time_us[1]) != 2) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	desc.arg[0] = SPDM_CMD_CFG_CCIRESPTIME_LOW;
+	desc.arg[1] = spdm_data->spdm_client;
+	desc.arg[2] = spdm_data->config_data.cci_response_time_us[0];
+	desc.arg[3] = spdm_data->config_data.cci_response_time_us[1];
+	ext_status = spdm_ext_call(&desc, 4);
+	if (ext_status)
+		pr_err("External command %u failed with error %u",
+			(int)desc.arg[0], ext_status);
+	*offset += size;
+out:
+	memset(buf, 0, sizeof(buf));
+	return size;
+}
+
+static ssize_t cciresptime_low_read(struct file *file, char __user *data,
+				    size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	int i = 32;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	i = scnprintf(buf, size, "%u %u\n",
+		     spdm_data->config_data.cci_response_time_us[0],
+		     spdm_data->config_data.cci_response_time_us[1]);
+
+	i = simple_read_from_buffer(data, size, offset, buf, i);
+	memset(buf, 0, sizeof(buf));
+	return i;
+}
+
+static const struct file_operations ccil_fops = {
+	.open = spdm_open,
+	.write = cciresptime_low_write,
+	.read = cciresptime_low_read,
+};
+
+static ssize_t cciresptime_med_write(struct file *file,
+				     const char __user *data, size_t size,
+				     loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+
+	if (size > sizeof(buf) - 1)
+		return -EINVAL;
+
+	if (copy_from_user(buf, data, size)) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	buf[size] = '\0';
+
+	if (sscanf(buf, "%u %u\n",
+		   &spdm_data->config_data.cci_response_time_us[2],
+		   &spdm_data->config_data.cci_response_time_us[3]) != 2) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	desc.arg[0] = SPDM_CMD_CFG_CCIRESPTIME_MED;
+	desc.arg[1] = spdm_data->spdm_client;
+	desc.arg[2] = spdm_data->config_data.cci_response_time_us[2];
+	desc.arg[3] = spdm_data->config_data.cci_response_time_us[3];
+	ext_status = spdm_ext_call(&desc, 4);
+	if (ext_status)
+		pr_err("External command %u failed with error %u",
+			(int)desc.arg[0], ext_status);
+	*offset += size;
+out:
+	memset(buf, 0, sizeof(buf));
+	return size;
+}
+
+static ssize_t cciresptime_med_read(struct file *file, char __user *data,
+				    size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	int i = 32;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	i = scnprintf(buf, size, "%u %u\n",
+		     spdm_data->config_data.cci_response_time_us[2],
+		     spdm_data->config_data.cci_response_time_us[3]);
+
+	i = simple_read_from_buffer(data, size, offset, buf, i);
+	memset(buf, 0, sizeof(buf));
+	return i;
+}
+
+static const struct file_operations ccim_fops = {
+	.open = spdm_open,
+	.write = cciresptime_med_write,
+	.read = cciresptime_med_read,
+};
+
+static ssize_t cciresptime_high_write(struct file *file,
+				      const char __user *data,
+				      size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+
+	if (size > sizeof(buf) - 1)
+		return -EINVAL;
+
+	if (copy_from_user(buf, data, size)) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	buf[size] = '\0';
+
+	if (sscanf(buf, "%u %u\n",
+		   &spdm_data->config_data.cci_response_time_us[4],
+		   &spdm_data->config_data.cci_response_time_us[5]) != 2){
+		size = -EINVAL;
+		goto out;
+	}
+
+	desc.arg[0] = SPDM_CMD_CFG_CCIRESPTIME_HIGH;
+	desc.arg[1] = spdm_data->spdm_client;
+	desc.arg[2] = spdm_data->config_data.cci_response_time_us[4];
+	desc.arg[3] = spdm_data->config_data.cci_response_time_us[5];
+	ext_status = spdm_ext_call(&desc, 4);
+	if (ext_status)
+		pr_err("External command %u failed with error %u",
+			(int)desc.arg[0], ext_status);
+	*offset += size;
+out:
+	memset(buf, 0, sizeof(buf));
+	return size;
+}
+
+static ssize_t cciresptime_high_read(struct file *file, char __user *data,
+				     size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	int i = 32;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	i = scnprintf(buf, size, "%u %u\n",
+		     spdm_data->config_data.cci_response_time_us[4],
+		     spdm_data->config_data.cci_response_time_us[5]);
+
+	i = simple_read_from_buffer(data, size, offset, buf, i);
+	memset(buf, 0, sizeof(buf));
+	return i;
+}
+
+static const struct file_operations ccih_fops = {
+	.open = spdm_open,
+	.write = cciresptime_high_write,
+	.read = cciresptime_high_read,
+};
+
+static ssize_t cci_max_write(struct file *file, const char __user *data,
+			     size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+
+	if (size > sizeof(buf) - 1)
+		return -EINVAL;
+
+	if (copy_from_user(buf, data, size)) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	buf[size] = '\0';
+
+	if (sscanf(buf, "%u\n", &spdm_data->config_data.max_cci_freq) != 1) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	desc.arg[0] = SPDM_CMD_CFG_MAXCCI;
+	desc.arg[1] = spdm_data->spdm_client;
+	desc.arg[2] = spdm_data->config_data.max_cci_freq;
+	ext_status = spdm_ext_call(&desc, 3);
+	if (ext_status)
+		pr_err("External command %u failed with error %u",
+			(int)desc.arg[0], ext_status);
+	*offset += size;
+out:
+	memset(buf, 0, sizeof(buf));
+	return size;
+}
+
+static ssize_t cci_max_read(struct file *file, char __user *data,
+			    size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	int i = 32;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	i = scnprintf(buf, size, "%u\n", spdm_data->config_data.max_cci_freq);
+
+	i = simple_read_from_buffer(data, size, offset, buf, i);
+	memset(buf, 0, sizeof(buf));
+	return i;
+}
+
+static const struct file_operations ccimax_fops = {
+	.open = spdm_open,
+	.write = cci_max_write,
+	.read = cci_max_read,
+};
+
+static ssize_t vote_cfg_write(struct file *file, const char __user *data,
+			      size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+
+	if (size > sizeof(buf) - 1)
+		return -EINVAL;
+
+	if (copy_from_user(buf, data, size)) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	buf[size] = '\0';
+
+	if (sscanf(buf, "%u %u %u %u\n", &spdm_data->config_data.upstep,
+	       &spdm_data->config_data.downstep,
+	       &spdm_data->config_data.max_vote,
+	       &spdm_data->config_data.up_step_multp) != 4) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	desc.arg[0] = SPDM_CMD_CFG_VOTES;
+	desc.arg[1] = spdm_data->spdm_client;
+	desc.arg[2] = spdm_data->config_data.upstep;
+	desc.arg[3] = spdm_data->config_data.downstep;
+	desc.arg[4] = spdm_data->config_data.max_vote;
+	desc.arg[5] = spdm_data->config_data.up_step_multp;
+	ext_status = spdm_ext_call(&desc, 6);
+	if (ext_status)
+		pr_err("External command %u failed with error %u",
+			(int)desc.arg[0], ext_status);
+	*offset += size;
+out:
+	memset(buf, 0, sizeof(buf));
+	return size;
+}
+
+static ssize_t vote_cfg_read(struct file *file, char __user *data,
+			     size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	int i = 32;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	i = scnprintf(buf, size, "%u %u %u %u\n",
+		     spdm_data->config_data.upstep,
+		     spdm_data->config_data.downstep,
+		     spdm_data->config_data.max_vote,
+		     spdm_data->config_data.up_step_multp);
+
+	i = simple_read_from_buffer(data, size, offset, buf, i);
+	memset(buf, 0, sizeof(buf));
+	return i;
+}
+
+static const struct file_operations vote_fops = {
+	.open = spdm_open,
+	.write = vote_cfg_write,
+	.read = vote_cfg_read,
+};
+
+void spdm_init_debugfs(struct device *dev)
+{
+	struct spdm_data *data = 0;
+
+	data = dev_get_drvdata(dev);
+	data->debugfs_dir = debugfs_create_dir(dev_name(dev), NULL);
+
+	debugfs_create_file("enable", 0600, data->debugfs_dir, data,
+			    &enable_fops);
+	debugfs_create_file("pl_freqs", 0600, data->debugfs_dir, data,
+			    &pl_fops);
+	debugfs_create_file("rej_rate_low", 0600, data->debugfs_dir, data,
+			    &rrl_fops);
+	debugfs_create_file("rej_rate_med", 0600, data->debugfs_dir, data,
+			    &rrm_fops);
+	debugfs_create_file("rej_rate_high", 0600, data->debugfs_dir, data,
+			    &rrh_fops);
+	debugfs_create_file("resp_time_low", 0600, data->debugfs_dir, data,
+			    &rtl_fops);
+	debugfs_create_file("resp_time_med", 0600, data->debugfs_dir, data,
+			    &rtm_fops);
+	debugfs_create_file("resp_time_high", 0600, data->debugfs_dir, data,
+			    &rth_fops);
+	debugfs_create_file("cci_resp_time_low", 0600, data->debugfs_dir, data,
+			    &ccil_fops);
+	debugfs_create_file("cci_resp_time_med", 0600, data->debugfs_dir, data,
+			    &ccim_fops);
+	debugfs_create_file("cci_resp_time_high", 0600, data->debugfs_dir,
+			    data, &ccih_fops);
+	debugfs_create_file("cci_max", 0600, data->debugfs_dir, data,
+			    &ccimax_fops);
+	debugfs_create_file("vote_cfg", 0600, data->debugfs_dir, data,
+			    &vote_fops);
+}
+
+void spdm_remove_debugfs(struct spdm_data *data)
+{
+	debugfs_remove_recursive(data->debugfs_dir);
+}
+
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/devfreq/devfreq_spdm.h	2019-01-22 16:16:23.135243074 +0100
@@ -0,0 +1,130 @@
+/*
+*Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+*
+*This program is free software; you can redistribute it and/or modify
+*it under the terms of the GNU General Public License version 2 and
+*only version 2 as published by the Free Software Foundation.
+*
+*This program is distributed in the hope that it will be useful,
+*but WITHOUT ANY WARRANTY; without even the implied warranty of
+*MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+*GNU General Public License for more details.
+*/
+
+#ifndef DEVFREQ_SPDM_H
+#define DEVFREQ_SPDM_H
+
+#include <linux/list.h>
+#include <soc/qcom/hvc.h>
+#include <soc/qcom/scm.h>
+
+enum pl_levels { SPDM_PL1, SPDM_PL2, SPDM_PL3, SPDM_PL_COUNT };
+enum actions { SPDM_UP, SPDM_DOWN };
+enum spdm_client { SPDM_CLIENT_CPU, SPDM_CLIENT_GPU, SPDM_CLIENT_COUNT };
+
+struct spdm_config_data {
+	/* in MB/s */
+	u32 upstep;
+	u32 downstep;
+	u32 up_step_multp;
+
+	u32 num_ports;
+	u32 *ports;
+	u32 aup;
+	u32 adown;
+	u32 bucket_size;
+
+	/*
+	 * If We define n PL levels we need n-1 frequencies to tell
+	 * where to change from one pl to another
+	 */
+	/* hz */
+	u32 pl_freqs[SPDM_PL_COUNT - 1];
+	/*
+	 * We have a low threshold and a high threhold for each pl to support
+	 * the two port solution so we need twice as many entries as
+	 * performance levels
+	 */
+	/* in 100th's of a percent */
+	u32 reject_rate[SPDM_PL_COUNT * 2];
+	u32 response_time_us[SPDM_PL_COUNT * 2];
+	u32 cci_response_time_us[SPDM_PL_COUNT * 2];
+	/* hz */
+	u32 max_cci_freq;
+	/* in MB/s */
+	u32 max_vote;
+
+};
+
+struct spdm_data {
+	/* bus scaling data */
+	int cur_idx;
+	struct msm_bus_scale_pdata *pdata;
+	u32 bus_scale_client_id;
+	/* in mb/s */
+	u32 new_bw;
+
+	/* devfreq data */
+	struct devfreq *devfreq;
+	struct devfreq_dev_profile *profile;
+	unsigned long action;
+	int window;
+	struct clk *cci_clk;
+
+	/* spdm hw/gov data */
+	struct spdm_config_data config_data;
+
+	enum spdm_client spdm_client;
+	/* list used by governor to keep track of spdm devices */
+	struct list_head list;
+
+	struct dentry *debugfs_dir;
+
+	bool enabled;
+};
+
+extern void spdm_init_debugfs(struct device *dev);
+extern void spdm_remove_debugfs(struct spdm_data *data);
+
+#define SPDM_HYP_FNID 5
+#define SPDM_SCM_SVC_ID 0x9
+#define SPDM_SCM_CMD_ID 0x4
+#define SPDM_TZ_VERSION 0x20000 /* TZ SPDM driver version */
+/* SPDM CMD ID's for hypervisor/SCM */
+#define SPDM_CMD_GET_VERSION 0
+#define SPDM_CMD_GET_BW_ALL 1
+#define SPDM_CMD_GET_BW_SPECIFIC 2
+#define SPDM_CMD_ENABLE 3
+#define SPDM_CMD_DISABLE 4
+#define SPDM_CMD_CFG_PORTS 5
+#define SPDM_CMD_CFG_FLTR 6
+#define SPDM_CMD_CFG_PL 7
+#define SPDM_CMD_CFG_REJRATE_LOW 8
+#define SPDM_CMD_CFG_REJRATE_MED 9
+#define SPDM_CMD_CFG_REJRATE_HIGH 10
+#define SPDM_CMD_CFG_RESPTIME_LOW 11
+#define SPDM_CMD_CFG_RESPTIME_MED 12
+#define SPDM_CMD_CFG_RESPTIME_HIGH 13
+#define SPDM_CMD_CFG_CCIRESPTIME_LOW 14
+#define SPDM_CMD_CFG_CCIRESPTIME_MED 15
+#define SPDM_CMD_CFG_CCIRESPTIME_HIGH 16
+#define SPDM_CMD_CFG_MAXCCI 17
+#define SPDM_CMD_CFG_VOTES 18
+
+#define SPDM_MAX_ARGS 6
+#define SPDM_MAX_RETS 3
+
+struct spdm_args {
+	u64 arg[SPDM_MAX_ARGS];
+	u64 ret[SPDM_MAX_RETS];
+};
+
+extern int __spdm_hyp_call(struct spdm_args *args, int num_args);
+extern int __spdm_scm_call(struct spdm_args *args, int num_args);
+
+#ifdef CONFIG_SPDM_SCM
+#define spdm_ext_call __spdm_scm_call
+#else
+#define spdm_ext_call __spdm_hyp_call
+#endif
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/devfreq/governor_bw_hwmon.c	2019-01-22 16:16:23.135243074 +0100
@@ -0,0 +1,983 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "bw-hwmon: " fmt
+
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/time.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/devfreq.h>
+#include <trace/events/power.h>
+#include "governor.h"
+#include "governor_bw_hwmon.h"
+
+#define NUM_MBPS_ZONES		10
+struct hwmon_node {
+	unsigned int guard_band_mbps;
+	unsigned int decay_rate;
+	unsigned int io_percent;
+	unsigned int bw_step;
+	unsigned int sample_ms;
+	unsigned int up_scale;
+	unsigned int up_thres;
+	unsigned int down_thres;
+	unsigned int down_count;
+	unsigned int hist_memory;
+	unsigned int hyst_trigger_count;
+	unsigned int hyst_length;
+	unsigned int idle_mbps;
+	unsigned int low_power_ceil_mbps;
+	unsigned int low_power_io_percent;
+	unsigned int low_power_delay;
+	unsigned int mbps_zones[NUM_MBPS_ZONES];
+
+	unsigned long prev_ab;
+	unsigned long *dev_ab;
+	unsigned long resume_freq;
+	unsigned long resume_ab;
+	unsigned long bytes;
+	unsigned long max_mbps;
+	unsigned long hist_max_mbps;
+	unsigned long hist_mem;
+	unsigned long hyst_peak;
+	unsigned long hyst_mbps;
+	unsigned long hyst_trig_win;
+	unsigned long hyst_en;
+	unsigned long above_low_power;
+	unsigned long prev_req;
+	unsigned int wake;
+	unsigned int down_cnt;
+	ktime_t prev_ts;
+	ktime_t hist_max_ts;
+	bool sampled;
+	bool mon_started;
+	struct list_head list;
+	void *orig_data;
+	struct bw_hwmon *hw;
+	struct devfreq_governor *gov;
+	struct attribute_group *attr_grp;
+};
+
+#define UP_WAKE 1
+#define DOWN_WAKE 2
+static DEFINE_SPINLOCK(irq_lock);
+
+static LIST_HEAD(hwmon_list);
+static DEFINE_MUTEX(list_lock);
+static DEFINE_MUTEX(sync_lock);
+
+
+static int use_cnt;
+static DEFINE_MUTEX(state_lock);
+
+#define show_attr(name) \
+static ssize_t show_##name(struct device *dev,				\
+			struct device_attribute *attr, char *buf)	\
+{									\
+	struct devfreq *df = to_devfreq(dev);				\
+	struct hwmon_node *hw = df->data;				\
+	return snprintf(buf, PAGE_SIZE, "%u\n", hw->name);		\
+}
+
+#define store_attr(name, _min, _max) \
+static ssize_t store_##name(struct device *dev,				\
+			struct device_attribute *attr, const char *buf,	\
+			size_t count)					\
+{									\
+	struct devfreq *df = to_devfreq(dev);				\
+	struct hwmon_node *hw = df->data;				\
+	int ret;							\
+	unsigned int val;						\
+	ret = sscanf(buf, "%u", &val);					\
+	if (ret != 1)							\
+		return -EINVAL;						\
+	val = max(val, _min);						\
+	val = min(val, _max);						\
+	hw->name = val;							\
+	return count;							\
+}
+
+#define gov_attr(__attr, min, max)	\
+show_attr(__attr)			\
+store_attr(__attr, min, max)		\
+static DEVICE_ATTR(__attr, 0644, show_##__attr, store_##__attr)
+
+#define show_list_attr(name, n) \
+static ssize_t show_list_##name(struct device *dev,			\
+			struct device_attribute *attr, char *buf)	\
+{									\
+	struct devfreq *df = to_devfreq(dev);				\
+	struct hwmon_node *hw = df->data;				\
+	unsigned int i, cnt = 0;					\
+									\
+	for (i = 0; i < n && hw->name[i]; i++)				\
+		cnt += snprintf(buf + cnt, PAGE_SIZE, "%u ", hw->name[i]);\
+	cnt += snprintf(buf + cnt, PAGE_SIZE, "\n");			\
+	return cnt;							\
+}
+
+#define store_list_attr(name, n, _min, _max) \
+static ssize_t store_list_##name(struct device *dev,			\
+			struct device_attribute *attr, const char *buf,	\
+			size_t count)					\
+{									\
+	struct devfreq *df = to_devfreq(dev);				\
+	struct hwmon_node *hw = df->data;				\
+	int ret;							\
+	unsigned int i = 0, val;					\
+									\
+	do {								\
+		ret = sscanf(buf, "%u", &val);				\
+		if (ret != 1)						\
+			break;						\
+		buf = strnchr(buf, PAGE_SIZE, ' ');			\
+		if (buf)						\
+			buf++;						\
+		val = max(val, _min);					\
+		val = min(val, _max);					\
+		hw->name[i] = val;					\
+		i++;							\
+	} while (buf && i < n - 1);					\
+	if (i < 1)							\
+		return -EINVAL;						\
+	hw->name[i] = 0;						\
+	return count;							\
+}
+
+#define gov_list_attr(__attr, n, min, max)	\
+show_list_attr(__attr, n)			\
+store_list_attr(__attr, n, min, max)		\
+static DEVICE_ATTR(__attr, 0644, show_list_##__attr, store_list_##__attr)
+
+#define MIN_MS	10U
+#define MAX_MS	500U
+
+/* Returns MBps of read/writes for the sampling window. */
+static unsigned int bytes_to_mbps(long long bytes, unsigned int us)
+{
+	bytes *= USEC_PER_SEC;
+	do_div(bytes, us);
+	bytes = DIV_ROUND_UP_ULL(bytes, SZ_1M);
+	return bytes;
+}
+
+static unsigned int mbps_to_bytes(unsigned long mbps, unsigned int ms)
+{
+	mbps *= ms;
+	mbps = DIV_ROUND_UP(mbps, MSEC_PER_SEC);
+	mbps *= SZ_1M;
+	return mbps;
+}
+
+static int __bw_hwmon_sw_sample_end(struct bw_hwmon *hwmon)
+{
+	struct devfreq *df;
+	struct hwmon_node *node;
+	ktime_t ts;
+	unsigned long bytes, mbps;
+	unsigned int us;
+	int wake = 0;
+
+	df = hwmon->df;
+	node = df->data;
+
+	ts = ktime_get();
+	us = ktime_to_us(ktime_sub(ts, node->prev_ts));
+
+	bytes = hwmon->get_bytes_and_clear(hwmon);
+	bytes += node->bytes;
+	node->bytes = 0;
+
+	mbps = bytes_to_mbps(bytes, us);
+	node->max_mbps = max(node->max_mbps, mbps);
+
+	/*
+	 * If the measured bandwidth in a micro sample is greater than the
+	 * wake up threshold, it indicates an increase in load that's non
+	 * trivial. So, have the governor ignore historical idle time or low
+	 * bandwidth usage and do the bandwidth calculation based on just
+	 * this micro sample.
+	 */
+	if (mbps > node->hw->up_wake_mbps) {
+		wake = UP_WAKE;
+	} else if (mbps < node->hw->down_wake_mbps) {
+		if (node->down_cnt)
+			node->down_cnt--;
+		if (node->down_cnt <= 0)
+			wake = DOWN_WAKE;
+	}
+
+	node->prev_ts = ts;
+	node->wake = wake;
+	node->sampled = true;
+
+	trace_bw_hwmon_meas(dev_name(df->dev.parent),
+				mbps,
+				us,
+				wake);
+
+	return wake;
+}
+
+static int __bw_hwmon_hw_sample_end(struct bw_hwmon *hwmon)
+{
+	struct devfreq *df;
+	struct hwmon_node *node;
+	unsigned long bytes, mbps;
+	int wake = 0;
+
+	df = hwmon->df;
+	node = df->data;
+
+	/*
+	 * If this read is in response to an IRQ, the HW monitor should
+	 * return the measurement in the micro sample that triggered the IRQ.
+	 * Otherwise, it should return the maximum measured value in any
+	 * micro sample since the last time we called get_bytes_and_clear()
+	 */
+	bytes = hwmon->get_bytes_and_clear(hwmon);
+	mbps = bytes_to_mbps(bytes, node->sample_ms * USEC_PER_MSEC);
+	node->max_mbps = mbps;
+
+	if (mbps > node->hw->up_wake_mbps)
+		wake = UP_WAKE;
+	else if (mbps < node->hw->down_wake_mbps)
+		wake = DOWN_WAKE;
+
+	node->wake = wake;
+	node->sampled = true;
+
+	trace_bw_hwmon_meas(dev_name(df->dev.parent),
+				mbps,
+				node->sample_ms * USEC_PER_MSEC,
+				wake);
+
+	return 1;
+}
+
+static int __bw_hwmon_sample_end(struct bw_hwmon *hwmon)
+{
+	if (hwmon->set_hw_events)
+		return __bw_hwmon_hw_sample_end(hwmon);
+	else
+		return __bw_hwmon_sw_sample_end(hwmon);
+}
+
+int bw_hwmon_sample_end(struct bw_hwmon *hwmon)
+{
+	unsigned long flags;
+	int wake;
+
+	spin_lock_irqsave(&irq_lock, flags);
+	wake = __bw_hwmon_sample_end(hwmon);
+	spin_unlock_irqrestore(&irq_lock, flags);
+
+	return wake;
+}
+
+unsigned long to_mbps_zone(struct hwmon_node *node, unsigned long mbps)
+{
+	int i;
+
+	for (i = 0; i < NUM_MBPS_ZONES && node->mbps_zones[i]; i++)
+		if (node->mbps_zones[i] >= mbps)
+			return node->mbps_zones[i];
+
+	return node->hw->df->max_freq;
+}
+
+#define MIN_MBPS	500UL
+#define HIST_PEAK_TOL	60
+static unsigned long get_bw_and_set_irq(struct hwmon_node *node,
+					unsigned long *freq, unsigned long *ab)
+{
+	unsigned long meas_mbps, thres, flags, req_mbps, adj_mbps;
+	unsigned long meas_mbps_zone;
+	unsigned long hist_lo_tol, hyst_lo_tol;
+	struct bw_hwmon *hw = node->hw;
+	unsigned int new_bw, io_percent;
+	ktime_t ts;
+	unsigned int ms = 0;
+
+	spin_lock_irqsave(&irq_lock, flags);
+
+	if (!hw->set_hw_events) {
+		ts = ktime_get();
+		ms = ktime_to_ms(ktime_sub(ts, node->prev_ts));
+	}
+	if (!node->sampled || ms >= node->sample_ms)
+		__bw_hwmon_sample_end(node->hw);
+	node->sampled = false;
+
+	req_mbps = meas_mbps = node->max_mbps;
+	node->max_mbps = 0;
+
+	hist_lo_tol = (node->hist_max_mbps * HIST_PEAK_TOL) / 100;
+	/* Remember historic peak in the past hist_mem decision windows. */
+	if (meas_mbps > node->hist_max_mbps || !node->hist_mem) {
+		/* If new max or no history */
+		node->hist_max_mbps = meas_mbps;
+		node->hist_mem = node->hist_memory;
+	} else if (meas_mbps >= hist_lo_tol) {
+		/*
+		 * If subsequent peaks come close (within tolerance) to but
+		 * less than the historic peak, then reset the history start,
+		 * but not the peak value.
+		 */
+		node->hist_mem = node->hist_memory;
+	} else {
+		/* Count down history expiration. */
+		if (node->hist_mem)
+			node->hist_mem--;
+	}
+
+	/* Keep track of whether we are in low power mode consistently. */
+	if (meas_mbps > node->low_power_ceil_mbps)
+		node->above_low_power = node->low_power_delay;
+	if (node->above_low_power)
+		node->above_low_power--;
+
+	if (node->above_low_power)
+		io_percent = node->io_percent;
+	else
+		io_percent = node->low_power_io_percent;
+
+	/*
+	 * The AB value that corresponds to the lowest mbps zone greater than
+	 * or equal to the "frequency" the current measurement will pick.
+	 * This upper limit is useful for balancing out any prediction
+	 * mechanisms to be power friendly.
+	 */
+	meas_mbps_zone = (meas_mbps * 100) / io_percent;
+	meas_mbps_zone = to_mbps_zone(node, meas_mbps_zone);
+	meas_mbps_zone = (meas_mbps_zone * io_percent) / 100;
+	meas_mbps_zone = max(meas_mbps, meas_mbps_zone);
+
+	/*
+	 * If this is a wake up due to BW increase, vote much higher BW than
+	 * what we measure to stay ahead of increasing traffic and then set
+	 * it up to vote for measured BW if we see down_count short sample
+	 * windows of low traffic.
+	 */
+	if (node->wake == UP_WAKE) {
+		req_mbps += ((meas_mbps - node->prev_req)
+				* node->up_scale) / 100;
+		/*
+		 * However if the measured load is less than the historic
+		 * peak, but the over request is higher than the historic
+		 * peak, then we could limit the over requesting to the
+		 * historic peak.
+		 */
+		if (req_mbps > node->hist_max_mbps
+		    && meas_mbps < node->hist_max_mbps)
+			req_mbps = node->hist_max_mbps;
+
+		req_mbps = min(req_mbps, meas_mbps_zone);
+	}
+
+	hyst_lo_tol = (node->hyst_mbps * HIST_PEAK_TOL) / 100;
+	if (meas_mbps > node->hyst_mbps && meas_mbps > MIN_MBPS) {
+		hyst_lo_tol = (meas_mbps * HIST_PEAK_TOL) / 100;
+		node->hyst_peak = 0;
+		node->hyst_trig_win = node->hyst_length;
+		node->hyst_mbps = meas_mbps;
+	}
+
+	/*
+	 * Check node->max_mbps to avoid double counting peaks that cause
+	 * early termination of a window.
+	 */
+	if (meas_mbps >= hyst_lo_tol && meas_mbps > MIN_MBPS
+	    && !node->max_mbps) {
+		node->hyst_peak++;
+		if (node->hyst_peak >= node->hyst_trigger_count
+		    || node->hyst_en)
+			node->hyst_en = node->hyst_length;
+	}
+
+	if (node->hyst_trig_win)
+		node->hyst_trig_win--;
+	if (node->hyst_en)
+		node->hyst_en--;
+
+	if (!node->hyst_trig_win && !node->hyst_en) {
+		node->hyst_peak = 0;
+		node->hyst_mbps = 0;
+	}
+
+	if (node->hyst_en) {
+		if (meas_mbps > node->idle_mbps)
+			req_mbps = max(req_mbps, node->hyst_mbps);
+	}
+
+	/* Stretch the short sample window size, if the traffic is too low */
+	if (meas_mbps < MIN_MBPS) {
+		hw->up_wake_mbps = (max(MIN_MBPS, req_mbps)
+					* (100 + node->up_thres)) / 100;
+		hw->down_wake_mbps = 0;
+		hw->undo_over_req_mbps = 0;
+		thres = mbps_to_bytes(max(MIN_MBPS, req_mbps / 2),
+					node->sample_ms);
+	} else {
+		/*
+		 * Up wake vs down wake are intentionally a percentage of
+		 * req_mbps vs meas_mbps to make sure the over requesting
+		 * phase is handled properly. We only want to wake up and
+		 * reduce the vote based on the measured mbps being less than
+		 * the previous measurement that caused the "over request".
+		 */
+		hw->up_wake_mbps = (req_mbps * (100 + node->up_thres)) / 100;
+		hw->down_wake_mbps = (meas_mbps * node->down_thres) / 100;
+		if (node->wake == UP_WAKE)
+			hw->undo_over_req_mbps = min(req_mbps, meas_mbps_zone);
+		else
+			hw->undo_over_req_mbps = 0;
+		thres = mbps_to_bytes(meas_mbps, node->sample_ms);
+	}
+
+	if (hw->set_hw_events) {
+		hw->down_cnt = node->down_count;
+		hw->set_hw_events(hw, node->sample_ms);
+	} else {
+		node->down_cnt = node->down_count;
+		node->bytes = hw->set_thres(hw, thres);
+	}
+
+	node->wake = 0;
+	node->prev_req = req_mbps;
+
+	spin_unlock_irqrestore(&irq_lock, flags);
+
+	adj_mbps = req_mbps + node->guard_band_mbps;
+
+	if (adj_mbps > node->prev_ab) {
+		new_bw = adj_mbps;
+	} else {
+		new_bw = adj_mbps * node->decay_rate
+			+ node->prev_ab * (100 - node->decay_rate);
+		new_bw /= 100;
+	}
+
+	node->prev_ab = new_bw;
+	if (ab)
+		*ab = roundup(new_bw, node->bw_step);
+
+	*freq = (new_bw * 100) / io_percent;
+	trace_bw_hwmon_update(dev_name(node->hw->df->dev.parent),
+				new_bw,
+				*freq,
+				hw->up_wake_mbps,
+				hw->down_wake_mbps);
+	return req_mbps;
+}
+
+static struct hwmon_node *find_hwmon_node(struct devfreq *df)
+{
+	struct hwmon_node *node, *found = NULL;
+
+	mutex_lock(&list_lock);
+	list_for_each_entry(node, &hwmon_list, list)
+		if (node->hw->dev == df->dev.parent ||
+		    node->hw->of_node == df->dev.parent->of_node ||
+		    (!node->hw->dev && !node->hw->of_node &&
+		     node->gov == df->governor)) {
+			found = node;
+			break;
+		}
+	mutex_unlock(&list_lock);
+
+	return found;
+}
+
+int update_bw_hwmon(struct bw_hwmon *hwmon)
+{
+	struct devfreq *df;
+	struct hwmon_node *node;
+	int ret;
+
+	if (!hwmon)
+		return -EINVAL;
+	df = hwmon->df;
+	if (!df)
+		return -ENODEV;
+	node = df->data;
+	if (!node)
+		return -ENODEV;
+
+	if (!node->mon_started)
+		return -EBUSY;
+
+	dev_dbg(df->dev.parent, "Got update request\n");
+	devfreq_monitor_stop(df);
+
+	mutex_lock(&df->lock);
+	ret = update_devfreq(df);
+	if (ret)
+		dev_err(df->dev.parent,
+			"Unable to update freq on request!\n");
+	mutex_unlock(&df->lock);
+
+	devfreq_monitor_start(df);
+
+	return 0;
+}
+
+static int start_monitor(struct devfreq *df, bool init)
+{
+	struct hwmon_node *node = df->data;
+	struct bw_hwmon *hw = node->hw;
+	struct device *dev = df->dev.parent;
+	unsigned long mbps;
+	int ret;
+
+	node->prev_ts = ktime_get();
+
+	if (init) {
+		node->prev_ab = 0;
+		node->resume_freq = 0;
+		node->resume_ab = 0;
+		mbps = (df->previous_freq * node->io_percent) / 100;
+		hw->up_wake_mbps = mbps;
+		hw->down_wake_mbps = MIN_MBPS;
+		hw->undo_over_req_mbps = 0;
+		ret = hw->start_hwmon(hw, mbps);
+	} else {
+		ret = hw->resume_hwmon(hw);
+	}
+
+	if (ret) {
+		dev_err(dev, "Unable to start HW monitor! (%d)\n", ret);
+		return ret;
+	}
+
+	if (init)
+		devfreq_monitor_start(df);
+	else
+		devfreq_monitor_resume(df);
+
+	node->mon_started = true;
+
+	return 0;
+}
+
+static void stop_monitor(struct devfreq *df, bool init)
+{
+	struct hwmon_node *node = df->data;
+	struct bw_hwmon *hw = node->hw;
+
+	node->mon_started = false;
+
+	if (init) {
+		devfreq_monitor_stop(df);
+		hw->stop_hwmon(hw);
+	} else {
+		devfreq_monitor_suspend(df);
+		hw->suspend_hwmon(hw);
+	}
+
+}
+
+static int gov_start(struct devfreq *df)
+{
+	int ret = 0;
+	struct device *dev = df->dev.parent;
+	struct hwmon_node *node;
+	struct bw_hwmon *hw;
+	struct devfreq_dev_status stat;
+
+	node = find_hwmon_node(df);
+	if (!node) {
+		dev_err(dev, "Unable to find HW monitor!\n");
+		return -ENODEV;
+	}
+	hw = node->hw;
+
+	stat.private_data = NULL;
+	if (df->profile->get_dev_status)
+		ret = df->profile->get_dev_status(df->dev.parent, &stat);
+	if (ret || !stat.private_data)
+		dev_warn(dev, "Device doesn't take AB votes!\n");
+	else
+		node->dev_ab = stat.private_data;
+
+	hw->df = df;
+	node->orig_data = df->data;
+	df->data = node;
+
+	if (start_monitor(df, true))
+		goto err_start;
+
+	ret = sysfs_create_group(&df->dev.kobj, node->attr_grp);
+	if (ret)
+		goto err_sysfs;
+
+	return 0;
+
+err_sysfs:
+	stop_monitor(df, true);
+err_start:
+	df->data = node->orig_data;
+	node->orig_data = NULL;
+	hw->df = NULL;
+	node->dev_ab = NULL;
+	return ret;
+}
+
+static void gov_stop(struct devfreq *df)
+{
+	struct hwmon_node *node = df->data;
+	struct bw_hwmon *hw = node->hw;
+
+	sysfs_remove_group(&df->dev.kobj, node->attr_grp);
+	stop_monitor(df, true);
+	df->data = node->orig_data;
+	node->orig_data = NULL;
+	hw->df = NULL;
+	/*
+	 * Not all governors know about this additional extended device
+	 * configuration. To avoid leaving the extended configuration at a
+	 * stale state, set it to 0 and let the next governor take it from
+	 * there.
+	 */
+	if (node->dev_ab)
+		*node->dev_ab = 0;
+	node->dev_ab = NULL;
+}
+
+static int gov_suspend(struct devfreq *df)
+{
+	struct hwmon_node *node = df->data;
+	unsigned long resume_freq = df->previous_freq;
+	unsigned long resume_ab = *node->dev_ab;
+
+	if (!node->hw->suspend_hwmon)
+		return -ENOSYS;
+
+	if (node->resume_freq) {
+		dev_warn(df->dev.parent, "Governor already suspended!\n");
+		return -EBUSY;
+	}
+
+	stop_monitor(df, false);
+
+	mutex_lock(&df->lock);
+	update_devfreq(df);
+	mutex_unlock(&df->lock);
+
+	node->resume_freq = resume_freq;
+	node->resume_ab = resume_ab;
+
+	return 0;
+}
+
+static int gov_resume(struct devfreq *df)
+{
+	struct hwmon_node *node = df->data;
+
+	if (!node->hw->resume_hwmon)
+		return -ENOSYS;
+
+	if (!node->resume_freq) {
+		dev_warn(df->dev.parent, "Governor already resumed!\n");
+		return -EBUSY;
+	}
+
+	mutex_lock(&df->lock);
+	update_devfreq(df);
+	mutex_unlock(&df->lock);
+
+	node->resume_freq = 0;
+	node->resume_ab = 0;
+
+	return start_monitor(df, false);
+}
+
+static int devfreq_bw_hwmon_get_freq(struct devfreq *df,
+					unsigned long *freq,
+					u32 *flag)
+{
+	struct hwmon_node *node = df->data;
+
+	/* Suspend/resume sequence */
+	if (!node->mon_started) {
+		*freq = node->resume_freq;
+		*node->dev_ab = node->resume_ab;
+		return 0;
+	}
+
+	get_bw_and_set_irq(node, freq, node->dev_ab);
+
+	return 0;
+}
+
+static ssize_t store_throttle_adj(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct devfreq *df = to_devfreq(dev);
+	struct hwmon_node *node = df->data;
+	int ret;
+	unsigned int val;
+
+	if (!node->hw->set_throttle_adj)
+		return -ENOSYS;
+
+	ret = kstrtouint(buf, 10, &val);
+	if (ret)
+		return ret;
+
+	ret = node->hw->set_throttle_adj(node->hw, val);
+
+	if (!ret)
+		return count;
+	else
+		return ret;
+}
+
+static ssize_t show_throttle_adj(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct devfreq *df = to_devfreq(dev);
+	struct hwmon_node *node = df->data;
+	unsigned int val;
+
+	if (!node->hw->get_throttle_adj)
+		val = 0;
+	else
+		val = node->hw->get_throttle_adj(node->hw);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static DEVICE_ATTR(throttle_adj, 0644, show_throttle_adj,
+						store_throttle_adj);
+
+gov_attr(guard_band_mbps, 0U, 2000U);
+gov_attr(decay_rate, 0U, 100U);
+gov_attr(io_percent, 1U, 100U);
+gov_attr(bw_step, 50U, 1000U);
+gov_attr(sample_ms, 1U, 50U);
+gov_attr(up_scale, 0U, 500U);
+gov_attr(up_thres, 1U, 100U);
+gov_attr(down_thres, 0U, 90U);
+gov_attr(down_count, 0U, 90U);
+gov_attr(hist_memory, 0U, 90U);
+gov_attr(hyst_trigger_count, 0U, 90U);
+gov_attr(hyst_length, 0U, 90U);
+gov_attr(idle_mbps, 0U, 2000U);
+gov_attr(low_power_ceil_mbps, 0U, 2500U);
+gov_attr(low_power_io_percent, 1U, 100U);
+gov_attr(low_power_delay, 1U, 60U);
+gov_list_attr(mbps_zones, NUM_MBPS_ZONES, 0U, UINT_MAX);
+
+static struct attribute *dev_attr[] = {
+	&dev_attr_guard_band_mbps.attr,
+	&dev_attr_decay_rate.attr,
+	&dev_attr_io_percent.attr,
+	&dev_attr_bw_step.attr,
+	&dev_attr_sample_ms.attr,
+	&dev_attr_up_scale.attr,
+	&dev_attr_up_thres.attr,
+	&dev_attr_down_thres.attr,
+	&dev_attr_down_count.attr,
+	&dev_attr_hist_memory.attr,
+	&dev_attr_hyst_trigger_count.attr,
+	&dev_attr_hyst_length.attr,
+	&dev_attr_idle_mbps.attr,
+	&dev_attr_low_power_ceil_mbps.attr,
+	&dev_attr_low_power_io_percent.attr,
+	&dev_attr_low_power_delay.attr,
+	&dev_attr_mbps_zones.attr,
+	&dev_attr_throttle_adj.attr,
+	NULL,
+};
+
+static struct attribute_group dev_attr_group = {
+	.name = "bw_hwmon",
+	.attrs = dev_attr,
+};
+
+static int devfreq_bw_hwmon_ev_handler(struct devfreq *df,
+					unsigned int event, void *data)
+{
+	int ret;
+	unsigned int sample_ms;
+	struct hwmon_node *node;
+	struct bw_hwmon *hw;
+
+	switch (event) {
+	case DEVFREQ_GOV_START:
+		sample_ms = df->profile->polling_ms;
+		sample_ms = max(MIN_MS, sample_ms);
+		sample_ms = min(MAX_MS, sample_ms);
+		df->profile->polling_ms = sample_ms;
+
+		ret = gov_start(df);
+		if (ret)
+			return ret;
+
+		dev_dbg(df->dev.parent,
+			"Enabled dev BW HW monitor governor\n");
+		break;
+
+	case DEVFREQ_GOV_STOP:
+		gov_stop(df);
+		dev_dbg(df->dev.parent,
+			"Disabled dev BW HW monitor governor\n");
+		break;
+
+	case DEVFREQ_GOV_INTERVAL:
+		mutex_lock(&sync_lock);
+		sample_ms = *(unsigned int *)data;
+		sample_ms = max(MIN_MS, sample_ms);
+		sample_ms = min(MAX_MS, sample_ms);
+		/*
+		 * Suspend/resume the HW monitor around the interval update
+		 * to prevent the HW monitor IRQ from trying to change
+		 * stop/start the delayed workqueue while the interval update
+		 * is happening.
+		 */
+		node = df->data;
+		hw = node->hw;
+		hw->suspend_hwmon(hw);
+		devfreq_interval_update(df, &sample_ms);
+		ret = hw->resume_hwmon(hw);
+		if (ret) {
+			dev_err(df->dev.parent,
+				"Unable to resume HW monitor (%d)\n", ret);
+			return ret;
+		}
+		mutex_unlock(&sync_lock);
+		break;
+
+	case DEVFREQ_GOV_SUSPEND:
+		ret = gov_suspend(df);
+		if (ret) {
+			dev_err(df->dev.parent,
+				"Unable to suspend BW HW mon governor (%d)\n",
+				ret);
+			return ret;
+		}
+
+		dev_dbg(df->dev.parent, "Suspended BW HW mon governor\n");
+		break;
+
+	case DEVFREQ_GOV_RESUME:
+		ret = gov_resume(df);
+		if (ret) {
+			dev_err(df->dev.parent,
+				"Unable to resume BW HW mon governor (%d)\n",
+				ret);
+			return ret;
+		}
+
+		dev_dbg(df->dev.parent, "Resumed BW HW mon governor\n");
+		break;
+	}
+
+	return 0;
+}
+
+static struct devfreq_governor devfreq_gov_bw_hwmon = {
+	.name = "bw_hwmon",
+	.get_target_freq = devfreq_bw_hwmon_get_freq,
+	.event_handler = devfreq_bw_hwmon_ev_handler,
+};
+
+int register_bw_hwmon(struct device *dev, struct bw_hwmon *hwmon)
+{
+	int ret = 0;
+	struct hwmon_node *node;
+	struct attribute_group *attr_grp;
+
+	if (!hwmon->gov && !hwmon->dev && !hwmon->of_node)
+		return -EINVAL;
+
+	node = devm_kzalloc(dev, sizeof(*node), GFP_KERNEL);
+	if (!node) {
+		dev_err(dev, "Unable to register gov. Out of memory!\n");
+		return -ENOMEM;
+	}
+
+	if (hwmon->gov) {
+		attr_grp = devm_kzalloc(dev, sizeof(*attr_grp), GFP_KERNEL);
+		if (!attr_grp)
+			return -ENOMEM;
+
+		hwmon->gov->get_target_freq = devfreq_bw_hwmon_get_freq;
+		hwmon->gov->event_handler = devfreq_bw_hwmon_ev_handler;
+		attr_grp->name = hwmon->gov->name;
+		attr_grp->attrs = dev_attr;
+
+		node->gov = hwmon->gov;
+		node->attr_grp = attr_grp;
+	} else {
+		node->gov = &devfreq_gov_bw_hwmon;
+		node->attr_grp = &dev_attr_group;
+	}
+
+	node->guard_band_mbps = 100;
+	node->decay_rate = 90;
+	node->io_percent = 16;
+	node->low_power_ceil_mbps = 0;
+	node->low_power_io_percent = 16;
+	node->low_power_delay = 60;
+	node->bw_step = 190;
+	node->sample_ms = 50;
+	node->up_scale = 0;
+	node->up_thres = 10;
+	node->down_thres = 0;
+	node->down_count = 3;
+	node->hist_memory = 0;
+	node->hyst_trigger_count = 3;
+	node->hyst_length = 0;
+	node->idle_mbps = 400;
+	node->mbps_zones[0] = 0;
+	node->hw = hwmon;
+
+	mutex_lock(&list_lock);
+	list_add_tail(&node->list, &hwmon_list);
+	mutex_unlock(&list_lock);
+
+	if (hwmon->gov) {
+		ret = devfreq_add_governor(hwmon->gov);
+	} else {
+		mutex_lock(&state_lock);
+		if (!use_cnt)
+			ret = devfreq_add_governor(&devfreq_gov_bw_hwmon);
+		if (!ret)
+			use_cnt++;
+		mutex_unlock(&state_lock);
+	}
+
+	if (!ret)
+		dev_info(dev, "BW HWmon governor registered.\n");
+	else
+		dev_err(dev, "BW HWmon governor registration failed!\n");
+
+	return ret;
+}
+
+MODULE_DESCRIPTION("HW monitor based dev DDR bandwidth voting driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/devfreq/governor_bw_hwmon.h	2019-01-22 16:16:23.135243074 +0100
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _GOVERNOR_BW_HWMON_H
+#define _GOVERNOR_BW_HWMON_H
+
+#include <linux/kernel.h>
+#include <linux/devfreq.h>
+
+/**
+ * struct bw_hwmon - dev BW HW monitor info
+ * @start_hwmon:		Start the HW monitoring of the dev BW
+ * @stop_hwmon:			Stop the HW monitoring of dev BW
+ * @set_thres:			Set the count threshold to generate an IRQ
+ * @get_bytes_and_clear:	Get the bytes transferred since the last call
+ *				and reset the counter to start over.
+ * @set_throttle_adj:		Set throttle adjust field to the given value
+ * @get_throttle_adj:		Get the value written to throttle adjust field
+ * @dev:			Pointer to device that this HW monitor can
+ *				monitor.
+ * @of_node:			OF node of device that this HW monitor can
+ *				monitor.
+ * @gov:			devfreq_governor struct that should be used
+ *				when registering this HW monitor with devfreq.
+ *				Only the name field is expected to be
+ *				initialized.
+ * @df:				Devfreq node that this HW monitor is being
+ *				used for. NULL when not actively in use and
+ *				non-NULL when in use.
+ *
+ * One of dev, of_node or governor_name needs to be specified for a
+ * successful registration.
+ *
+ */
+struct bw_hwmon {
+	int (*start_hwmon)(struct bw_hwmon *hw, unsigned long mbps);
+	void (*stop_hwmon)(struct bw_hwmon *hw);
+	int (*suspend_hwmon)(struct bw_hwmon *hw);
+	int (*resume_hwmon)(struct bw_hwmon *hw);
+	unsigned long (*set_thres)(struct bw_hwmon *hw, unsigned long bytes);
+	unsigned long (*set_hw_events)(struct bw_hwmon *hw,
+					unsigned int sample_ms);
+	unsigned long (*get_bytes_and_clear)(struct bw_hwmon *hw);
+	int (*set_throttle_adj)(struct bw_hwmon *hw, uint adj);
+	u32 (*get_throttle_adj)(struct bw_hwmon *hw);
+	struct device *dev;
+	struct device_node *of_node;
+	struct devfreq_governor *gov;
+
+	unsigned long up_wake_mbps;
+	unsigned long undo_over_req_mbps;
+	unsigned long down_wake_mbps;
+	unsigned int down_cnt;
+
+	struct devfreq *df;
+};
+
+#ifdef CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON
+int register_bw_hwmon(struct device *dev, struct bw_hwmon *hwmon);
+int update_bw_hwmon(struct bw_hwmon *hwmon);
+int bw_hwmon_sample_end(struct bw_hwmon *hwmon);
+#else
+static inline int register_bw_hwmon(struct device *dev,
+					struct bw_hwmon *hwmon)
+{
+	return 0;
+}
+static inline int update_bw_hwmon(struct bw_hwmon *hwmon)
+{
+	return 0;
+}
+static inline int bw_hwmon_sample_end(struct bw_hwmon *hwmon)
+{
+	return 0;
+}
+#endif
+
+#endif /* _GOVERNOR_BW_HWMON_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/devfreq/governor_cpufreq.c	2019-01-22 16:16:23.135243074 +0100
@@ -0,0 +1,712 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "dev-cpufreq: " fmt
+
+#include <linux/devfreq.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include "governor.h"
+
+struct cpu_state {
+	unsigned int freq;
+	unsigned int min_freq;
+	unsigned int max_freq;
+	bool on;
+	unsigned int first_cpu;
+};
+static struct cpu_state *state[NR_CPUS];
+static int cpufreq_cnt;
+
+struct freq_map {
+	unsigned int cpu_khz;
+	unsigned int target_freq;
+};
+
+struct devfreq_node {
+	struct devfreq *df;
+	void *orig_data;
+	struct device *dev;
+	struct device_node *of_node;
+	struct list_head list;
+	struct freq_map **map;
+	struct freq_map *common_map;
+	unsigned int timeout;
+	struct delayed_work dwork;
+	bool drop;
+	unsigned long prev_tgt;
+};
+static LIST_HEAD(devfreq_list);
+static DEFINE_MUTEX(state_lock);
+static DEFINE_MUTEX(cpufreq_reg_lock);
+
+#define show_attr(name) \
+static ssize_t show_##name(struct device *dev,				\
+			struct device_attribute *attr, char *buf)	\
+{									\
+	struct devfreq *df = to_devfreq(dev);				\
+	struct devfreq_node *n = df->data;				\
+	return snprintf(buf, PAGE_SIZE, "%u\n", n->name);		\
+}
+
+#define store_attr(name, _min, _max) \
+static ssize_t store_##name(struct device *dev,				\
+			struct device_attribute *attr, const char *buf,	\
+			size_t count)					\
+{									\
+	struct devfreq *df = to_devfreq(dev);				\
+	struct devfreq_node *n = df->data;				\
+	int ret;							\
+	unsigned int val;						\
+	ret = sscanf(buf, "%u", &val);					\
+	if (ret != 1)							\
+		return -EINVAL;						\
+	val = max(val, _min);						\
+	val = min(val, _max);						\
+	n->name = val;							\
+	return count;							\
+}
+
+#define gov_attr(__attr, min, max)	\
+show_attr(__attr)			\
+store_attr(__attr, min, max)		\
+static DEVICE_ATTR(__attr, 0644, show_##__attr, store_##__attr)
+
+static int update_node(struct devfreq_node *node)
+{
+	int ret;
+	struct devfreq *df = node->df;
+
+	if (!df)
+		return 0;
+
+	cancel_delayed_work_sync(&node->dwork);
+
+	mutex_lock(&df->lock);
+	node->drop = false;
+	ret = update_devfreq(df);
+	if (ret) {
+		dev_err(df->dev.parent, "Unable to update frequency\n");
+		goto out;
+	}
+
+	if (!node->timeout)
+		goto out;
+
+	if (df->previous_freq <= df->min_freq)
+		goto out;
+
+	schedule_delayed_work(&node->dwork,
+			      msecs_to_jiffies(node->timeout));
+out:
+	mutex_unlock(&df->lock);
+	return ret;
+}
+
+static void update_all_devfreqs(void)
+{
+	struct devfreq_node *node;
+
+	list_for_each_entry(node, &devfreq_list, list) {
+		update_node(node);
+	}
+}
+
+static void do_timeout(struct work_struct *work)
+{
+	struct devfreq_node *node = container_of(to_delayed_work(work),
+						struct devfreq_node, dwork);
+	struct devfreq *df = node->df;
+
+	mutex_lock(&df->lock);
+	node->drop = true;
+	update_devfreq(df);
+	mutex_unlock(&df->lock);
+}
+
+static struct devfreq_node *find_devfreq_node(struct device *dev)
+{
+	struct devfreq_node *node;
+
+	list_for_each_entry(node, &devfreq_list, list)
+		if (node->dev == dev || node->of_node == dev->of_node)
+			return node;
+
+	return NULL;
+}
+
+/* ==================== cpufreq part ==================== */
+static void add_policy(struct cpufreq_policy *policy)
+{
+	struct cpu_state *new_state;
+	unsigned int cpu, first_cpu;
+
+	if (state[policy->cpu]) {
+		state[policy->cpu]->freq = policy->cur;
+		state[policy->cpu]->on = true;
+	} else {
+		new_state = kzalloc(sizeof(struct cpu_state), GFP_KERNEL);
+		if (!new_state)
+			return;
+
+		first_cpu = cpumask_first(policy->related_cpus);
+		new_state->first_cpu = first_cpu;
+		new_state->freq = policy->cur;
+		new_state->min_freq = policy->cpuinfo.min_freq;
+		new_state->max_freq = policy->cpuinfo.max_freq;
+		new_state->on = true;
+
+		for_each_cpu(cpu, policy->related_cpus)
+			state[cpu] = new_state;
+	}
+}
+
+static int cpufreq_policy_notifier(struct notifier_block *nb,
+		unsigned long event, void *data)
+{
+	struct cpufreq_policy *policy = data;
+
+	switch (event) {
+	case CPUFREQ_CREATE_POLICY:
+		mutex_lock(&state_lock);
+		add_policy(policy);
+		update_all_devfreqs();
+		mutex_unlock(&state_lock);
+		break;
+
+	case CPUFREQ_REMOVE_POLICY:
+		mutex_lock(&state_lock);
+		if (state[policy->cpu]) {
+			state[policy->cpu]->on = false;
+			update_all_devfreqs();
+		}
+		mutex_unlock(&state_lock);
+		break;
+	}
+
+	return 0;
+}
+
+static struct notifier_block cpufreq_policy_nb = {
+	.notifier_call = cpufreq_policy_notifier
+};
+
+static int cpufreq_trans_notifier(struct notifier_block *nb,
+		unsigned long event, void *data)
+{
+	struct cpufreq_freqs *freq = data;
+	struct cpu_state *s;
+
+	if (event != CPUFREQ_POSTCHANGE)
+		return 0;
+
+	mutex_lock(&state_lock);
+
+	s = state[freq->cpu];
+	if (!s)
+		goto out;
+
+	if (s->freq != freq->new) {
+		s->freq = freq->new;
+		update_all_devfreqs();
+	}
+
+out:
+	mutex_unlock(&state_lock);
+	return 0;
+}
+
+static struct notifier_block cpufreq_trans_nb = {
+	.notifier_call = cpufreq_trans_notifier
+};
+
+static int register_cpufreq(void)
+{
+	int ret = 0;
+	unsigned int cpu;
+	struct cpufreq_policy *policy;
+
+	mutex_lock(&cpufreq_reg_lock);
+
+	if (cpufreq_cnt)
+		goto cnt_not_zero;
+
+	get_online_cpus();
+	ret = cpufreq_register_notifier(&cpufreq_policy_nb,
+				CPUFREQ_POLICY_NOTIFIER);
+	if (ret)
+		goto out;
+
+	ret = cpufreq_register_notifier(&cpufreq_trans_nb,
+				CPUFREQ_TRANSITION_NOTIFIER);
+	if (ret) {
+		cpufreq_unregister_notifier(&cpufreq_policy_nb,
+				CPUFREQ_POLICY_NOTIFIER);
+		goto out;
+	}
+
+	for_each_online_cpu(cpu) {
+		policy = cpufreq_cpu_get(cpu);
+		if (policy) {
+			add_policy(policy);
+			cpufreq_cpu_put(policy);
+		}
+	}
+out:
+	put_online_cpus();
+cnt_not_zero:
+	if (!ret)
+		cpufreq_cnt++;
+	mutex_unlock(&cpufreq_reg_lock);
+	return ret;
+}
+
+static int unregister_cpufreq(void)
+{
+	int ret = 0;
+	int cpu;
+
+	mutex_lock(&cpufreq_reg_lock);
+
+	if (cpufreq_cnt > 1)
+		goto out;
+
+	cpufreq_unregister_notifier(&cpufreq_policy_nb,
+				CPUFREQ_POLICY_NOTIFIER);
+	cpufreq_unregister_notifier(&cpufreq_trans_nb,
+				CPUFREQ_TRANSITION_NOTIFIER);
+
+	for (cpu = ARRAY_SIZE(state) - 1; cpu >= 0; cpu--) {
+		if (!state[cpu])
+			continue;
+		if (state[cpu]->first_cpu == cpu)
+			kfree(state[cpu]);
+		state[cpu] = NULL;
+	}
+
+out:
+	cpufreq_cnt--;
+	mutex_unlock(&cpufreq_reg_lock);
+	return ret;
+}
+
+/* ==================== devfreq part ==================== */
+
+static unsigned int interpolate_freq(struct devfreq *df, unsigned int cpu)
+{
+	unsigned int *freq_table = df->profile->freq_table;
+	unsigned int cpu_min = state[cpu]->min_freq;
+	unsigned int cpu_max = state[cpu]->max_freq;
+	unsigned int cpu_freq = state[cpu]->freq;
+	unsigned int dev_min, dev_max, cpu_percent;
+
+	if (freq_table) {
+		dev_min = freq_table[0];
+		dev_max = freq_table[df->profile->max_state - 1];
+	} else {
+		if (df->max_freq <= df->min_freq)
+			return 0;
+		dev_min = df->min_freq;
+		dev_max = df->max_freq;
+	}
+
+	cpu_percent = ((cpu_freq - cpu_min) * 100) / (cpu_max - cpu_min);
+	return dev_min + mult_frac(dev_max - dev_min, cpu_percent, 100);
+}
+
+static unsigned int cpu_to_dev_freq(struct devfreq *df, unsigned int cpu)
+{
+	struct freq_map *map = NULL;
+	unsigned int cpu_khz = 0, freq;
+	struct devfreq_node *n = df->data;
+
+	if (!state[cpu] || !state[cpu]->on || state[cpu]->first_cpu != cpu) {
+		freq = 0;
+		goto out;
+	}
+
+	if (n->common_map)
+		map = n->common_map;
+	else if (n->map)
+		map = n->map[cpu];
+
+	cpu_khz = state[cpu]->freq;
+
+	if (!map) {
+		freq = interpolate_freq(df, cpu);
+		goto out;
+	}
+
+	while (map->cpu_khz && map->cpu_khz < cpu_khz)
+		map++;
+	if (!map->cpu_khz)
+		map--;
+	freq = map->target_freq;
+
+out:
+	dev_dbg(df->dev.parent, "CPU%u: %d -> dev: %u\n", cpu, cpu_khz, freq);
+	return freq;
+}
+
+static int devfreq_cpufreq_get_freq(struct devfreq *df,
+					unsigned long *freq,
+					u32 *flag)
+{
+	unsigned int cpu, tgt_freq = 0;
+	struct devfreq_node *node;
+
+	node = df->data;
+	if (!node) {
+		pr_err("Unable to find devfreq node!\n");
+		return -ENODEV;
+	}
+
+	if (node->drop) {
+		*freq = 0;
+		return 0;
+	}
+
+	for_each_possible_cpu(cpu)
+		tgt_freq = max(tgt_freq, cpu_to_dev_freq(df, cpu));
+
+	if (node->timeout && tgt_freq < node->prev_tgt)
+		*freq = 0;
+	else
+		*freq = tgt_freq;
+
+	node->prev_tgt = tgt_freq;
+
+	return 0;
+}
+
+static unsigned int show_table(char *buf, unsigned int len,
+				struct freq_map *map)
+{
+	unsigned int cnt = 0;
+
+	cnt += snprintf(buf + cnt, len - cnt, "CPU freq\tDevice freq\n");
+
+	while (map->cpu_khz && cnt < len) {
+		cnt += snprintf(buf + cnt, len - cnt, "%8u\t%11u\n",
+				map->cpu_khz, map->target_freq);
+		map++;
+	}
+	if (cnt < len)
+		cnt += snprintf(buf + cnt, len - cnt, "\n");
+
+	return cnt;
+}
+
+static ssize_t show_map(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct devfreq *df = to_devfreq(dev);
+	struct devfreq_node *n = df->data;
+	struct freq_map *map;
+	unsigned int cnt = 0, cpu;
+
+	mutex_lock(&state_lock);
+	if (n->common_map) {
+		map = n->common_map;
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+				"Common table for all CPUs:\n");
+		cnt += show_table(buf + cnt, PAGE_SIZE - cnt, map);
+	} else if (n->map) {
+		for_each_possible_cpu(cpu) {
+			map = n->map[cpu];
+			if (!map)
+				continue;
+			cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+					"CPU %u:\n", cpu);
+			if (cnt >= PAGE_SIZE)
+				break;
+			cnt += show_table(buf + cnt, PAGE_SIZE - cnt, map);
+			if (cnt >= PAGE_SIZE)
+				break;
+		}
+	} else {
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+				"Device freq interpolated based on CPU freq\n");
+	}
+	mutex_unlock(&state_lock);
+
+	return cnt;
+}
+
+static DEVICE_ATTR(freq_map, 0444, show_map, NULL);
+gov_attr(timeout, 0U, 100U);
+
+static struct attribute *dev_attr[] = {
+	&dev_attr_freq_map.attr,
+	&dev_attr_timeout.attr,
+	NULL,
+};
+
+static struct attribute_group dev_attr_group = {
+	.name = "cpufreq",
+	.attrs = dev_attr,
+};
+
+static int devfreq_cpufreq_gov_start(struct devfreq *devfreq)
+{
+	int ret = 0;
+	struct devfreq_node *node;
+	bool alloc = false;
+
+	ret = register_cpufreq();
+	if (ret)
+		return ret;
+
+	ret = sysfs_create_group(&devfreq->dev.kobj, &dev_attr_group);
+	if (ret) {
+		unregister_cpufreq();
+		return ret;
+	}
+
+	mutex_lock(&state_lock);
+
+	node = find_devfreq_node(devfreq->dev.parent);
+	if (node == NULL) {
+		node = kzalloc(sizeof(struct devfreq_node), GFP_KERNEL);
+		if (!node) {
+			pr_err("Out of memory!\n");
+			ret = -ENOMEM;
+			goto alloc_fail;
+		}
+		alloc = true;
+		node->dev = devfreq->dev.parent;
+		list_add_tail(&node->list, &devfreq_list);
+	}
+
+	INIT_DELAYED_WORK(&node->dwork, do_timeout);
+
+	node->df = devfreq;
+	node->orig_data = devfreq->data;
+	devfreq->data = node;
+
+	ret = update_node(node);
+	if (ret)
+		goto update_fail;
+
+	mutex_unlock(&state_lock);
+	return 0;
+
+update_fail:
+	devfreq->data = node->orig_data;
+	if (alloc) {
+		list_del(&node->list);
+		kfree(node);
+	}
+alloc_fail:
+	mutex_unlock(&state_lock);
+	sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group);
+	unregister_cpufreq();
+	return ret;
+}
+
+static void devfreq_cpufreq_gov_stop(struct devfreq *devfreq)
+{
+	struct devfreq_node *node = devfreq->data;
+
+	cancel_delayed_work_sync(&node->dwork);
+
+	mutex_lock(&state_lock);
+	devfreq->data = node->orig_data;
+	if (node->map || node->common_map) {
+		node->df = NULL;
+	} else {
+		list_del(&node->list);
+		kfree(node);
+	}
+	mutex_unlock(&state_lock);
+
+	sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group);
+	unregister_cpufreq();
+}
+
+static int devfreq_cpufreq_ev_handler(struct devfreq *devfreq,
+					unsigned int event, void *data)
+{
+	int ret;
+
+	switch (event) {
+	case DEVFREQ_GOV_START:
+
+		ret = devfreq_cpufreq_gov_start(devfreq);
+		if (ret) {
+			pr_err("Governor start failed!\n");
+			return ret;
+		}
+		pr_debug("Enabled dev CPUfreq governor\n");
+		break;
+
+	case DEVFREQ_GOV_STOP:
+
+		devfreq_cpufreq_gov_stop(devfreq);
+		pr_debug("Disabled dev CPUfreq governor\n");
+		break;
+	}
+
+	return 0;
+}
+
+static struct devfreq_governor devfreq_cpufreq = {
+	.name = "cpufreq",
+	.get_target_freq = devfreq_cpufreq_get_freq,
+	.event_handler = devfreq_cpufreq_ev_handler,
+};
+
+#define NUM_COLS	2
+static struct freq_map *read_tbl(struct device_node *of_node, char *prop_name)
+{
+	int len, nf, i, j;
+	u32 data;
+	struct freq_map *tbl;
+
+	if (!of_find_property(of_node, prop_name, &len))
+		return NULL;
+	len /= sizeof(data);
+
+	if (len % NUM_COLS || len == 0)
+		return NULL;
+	nf = len / NUM_COLS;
+
+	tbl = kzalloc((nf + 1) * sizeof(*tbl), GFP_KERNEL);
+	if (!tbl)
+		return NULL;
+
+	for (i = 0, j = 0; i < nf; i++, j += 2) {
+		of_property_read_u32_index(of_node, prop_name, j, &data);
+		tbl[i].cpu_khz = data;
+
+		of_property_read_u32_index(of_node, prop_name, j + 1, &data);
+		tbl[i].target_freq = data;
+	}
+	tbl[i].cpu_khz = 0;
+
+	return tbl;
+}
+
+#define PROP_TARGET "target-dev"
+#define PROP_TABLE "cpu-to-dev-map"
+static int add_table_from_of(struct device_node *of_node)
+{
+	struct device_node *target_of_node;
+	struct devfreq_node *node;
+	struct freq_map *common_tbl;
+	struct freq_map **tbl_list = NULL;
+	static char prop_name[] = PROP_TABLE "-999999";
+	int cpu, ret, cnt = 0, prop_sz = ARRAY_SIZE(prop_name);
+
+	target_of_node = of_parse_phandle(of_node, PROP_TARGET, 0);
+	if (!target_of_node)
+		return -EINVAL;
+
+	node = kzalloc(sizeof(struct devfreq_node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+
+	common_tbl = read_tbl(of_node, PROP_TABLE);
+	if (!common_tbl) {
+		tbl_list = kzalloc(sizeof(*tbl_list) * NR_CPUS, GFP_KERNEL);
+		if (!tbl_list)
+			return -ENOMEM;
+
+		for_each_possible_cpu(cpu) {
+			ret = snprintf(prop_name, prop_sz, "%s-%d",
+					PROP_TABLE, cpu);
+			if (ret >= prop_sz) {
+				pr_warn("More CPUs than I can handle!\n");
+				pr_warn("Skipping rest of the tables!\n");
+				break;
+			}
+			tbl_list[cpu] = read_tbl(of_node, prop_name);
+			if (tbl_list[cpu])
+				cnt++;
+		}
+	}
+	if (!common_tbl && !cnt) {
+		kfree(tbl_list);
+		return -EINVAL;
+	}
+
+	mutex_lock(&state_lock);
+	node->of_node = target_of_node;
+	node->map = tbl_list;
+	node->common_map = common_tbl;
+	list_add_tail(&node->list, &devfreq_list);
+	mutex_unlock(&state_lock);
+
+	return 0;
+}
+
+static int __init devfreq_cpufreq_init(void)
+{
+	int ret;
+	struct device_node *of_par, *of_child;
+
+	of_par = of_find_node_by_name(NULL, "devfreq-cpufreq");
+	if (of_par) {
+		for_each_child_of_node(of_par, of_child) {
+			ret = add_table_from_of(of_child);
+			if (ret)
+				pr_err("Parsing %s failed!\n", of_child->name);
+			else
+				pr_debug("Parsed %s.\n", of_child->name);
+		}
+		of_node_put(of_par);
+	} else {
+		pr_info("No tables parsed from DT.\n");
+	}
+
+	ret = devfreq_add_governor(&devfreq_cpufreq);
+	if (ret) {
+		pr_err("Governor add failed!\n");
+		return ret;
+	}
+
+	return 0;
+}
+subsys_initcall(devfreq_cpufreq_init);
+
+static void __exit devfreq_cpufreq_exit(void)
+{
+	int ret, cpu;
+	struct devfreq_node *node, *tmp;
+
+	ret = devfreq_remove_governor(&devfreq_cpufreq);
+	if (ret)
+		pr_err("Governor remove failed!\n");
+
+	mutex_lock(&state_lock);
+	list_for_each_entry_safe(node, tmp, &devfreq_list, list) {
+		kfree(node->common_map);
+		for_each_possible_cpu(cpu)
+			kfree(node->map[cpu]);
+		kfree(node->map);
+		list_del(&node->list);
+		kfree(node);
+	}
+	mutex_unlock(&state_lock);
+
+	return;
+}
+module_exit(devfreq_cpufreq_exit);
+
+MODULE_DESCRIPTION("CPU freq based generic governor for devfreq devices");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/devfreq/governor_memlat.c	2019-01-22 16:16:23.135243074 +0100
@@ -0,0 +1,414 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "mem_lat: " fmt
+
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/time.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/devfreq.h>
+#include "governor.h"
+#include "governor_memlat.h"
+
+#include <trace/events/power.h>
+
+struct memlat_node {
+	unsigned int ratio_ceil;
+	bool mon_started;
+	struct list_head list;
+	void *orig_data;
+	struct memlat_hwmon *hw;
+	struct devfreq_governor *gov;
+	struct attribute_group *attr_grp;
+};
+
+static LIST_HEAD(memlat_list);
+static DEFINE_MUTEX(list_lock);
+
+static int use_cnt;
+static DEFINE_MUTEX(state_lock);
+
+#define show_attr(name) \
+static ssize_t show_##name(struct device *dev,				\
+			struct device_attribute *attr, char *buf)	\
+{									\
+	struct devfreq *df = to_devfreq(dev);				\
+	struct memlat_node *hw = df->data;				\
+	return snprintf(buf, PAGE_SIZE, "%u\n", hw->name);		\
+}
+
+#define store_attr(name, _min, _max) \
+static ssize_t store_##name(struct device *dev,				\
+			struct device_attribute *attr, const char *buf,	\
+			size_t count)					\
+{									\
+	struct devfreq *df = to_devfreq(dev);				\
+	struct memlat_node *hw = df->data;				\
+	int ret;							\
+	unsigned int val;						\
+	ret = kstrtouint(buf, 10, &val);				\
+	if (ret)							\
+		return ret;						\
+	val = max(val, _min);						\
+	val = min(val, _max);						\
+	hw->name = val;							\
+	return count;							\
+}
+
+#define gov_attr(__attr, min, max)	\
+show_attr(__attr)			\
+store_attr(__attr, min, max)		\
+static DEVICE_ATTR(__attr, 0644, show_##__attr, store_##__attr)
+
+static ssize_t show_map(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct devfreq *df = to_devfreq(dev);
+	struct memlat_node *n = df->data;
+	struct core_dev_map *map = n->hw->freq_map;
+	unsigned int cnt = 0;
+
+	cnt += snprintf(buf, PAGE_SIZE, "Core freq (MHz)\tDevice BW\n");
+
+	while (map->core_mhz && cnt < PAGE_SIZE) {
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "%15u\t%9u\n",
+				map->core_mhz, map->target_freq);
+		map++;
+	}
+	if (cnt < PAGE_SIZE)
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "\n");
+
+	return cnt;
+}
+
+static DEVICE_ATTR(freq_map, 0444, show_map, NULL);
+
+static unsigned long core_to_dev_freq(struct memlat_node *node,
+		unsigned long coref)
+{
+	struct memlat_hwmon *hw = node->hw;
+	struct core_dev_map *map = hw->freq_map;
+	unsigned long freq = 0;
+
+	if (!map)
+		goto out;
+
+	while (map->core_mhz && map->core_mhz < coref)
+		map++;
+	if (!map->core_mhz)
+		map--;
+	freq = map->target_freq;
+
+out:
+	pr_debug("freq: %lu -> dev: %lu\n", coref, freq);
+	return freq;
+}
+
+static struct memlat_node *find_memlat_node(struct devfreq *df)
+{
+	struct memlat_node *node, *found = NULL;
+
+	mutex_lock(&list_lock);
+	list_for_each_entry(node, &memlat_list, list)
+		if (node->hw->dev == df->dev.parent ||
+		    node->hw->of_node == df->dev.parent->of_node) {
+			found = node;
+			break;
+		}
+	mutex_unlock(&list_lock);
+
+	return found;
+}
+
+static int start_monitor(struct devfreq *df)
+{
+	struct memlat_node *node = df->data;
+	struct memlat_hwmon *hw = node->hw;
+	struct device *dev = df->dev.parent;
+	int ret;
+
+	ret = hw->start_hwmon(hw);
+
+	if (ret) {
+		dev_err(dev, "Unable to start HW monitor! (%d)\n", ret);
+		return ret;
+	}
+
+	devfreq_monitor_start(df);
+
+	node->mon_started = true;
+
+	return 0;
+}
+
+static void stop_monitor(struct devfreq *df)
+{
+	struct memlat_node *node = df->data;
+	struct memlat_hwmon *hw = node->hw;
+
+	node->mon_started = false;
+
+	devfreq_monitor_stop(df);
+	hw->stop_hwmon(hw);
+}
+
+static int gov_start(struct devfreq *df)
+{
+	int ret = 0;
+	struct device *dev = df->dev.parent;
+	struct memlat_node *node;
+	struct memlat_hwmon *hw;
+
+	node = find_memlat_node(df);
+	if (!node) {
+		dev_err(dev, "Unable to find HW monitor!\n");
+		return -ENODEV;
+	}
+	hw = node->hw;
+
+	hw->df = df;
+	node->orig_data = df->data;
+	df->data = node;
+
+	if (start_monitor(df))
+		goto err_start;
+
+	ret = sysfs_create_group(&df->dev.kobj, node->attr_grp);
+	if (ret)
+		goto err_sysfs;
+
+	return 0;
+
+err_sysfs:
+	stop_monitor(df);
+err_start:
+	df->data = node->orig_data;
+	node->orig_data = NULL;
+	hw->df = NULL;
+	return ret;
+}
+
+static void gov_stop(struct devfreq *df)
+{
+	struct memlat_node *node = df->data;
+	struct memlat_hwmon *hw = node->hw;
+
+	sysfs_remove_group(&df->dev.kobj, node->attr_grp);
+	stop_monitor(df);
+	df->data = node->orig_data;
+	node->orig_data = NULL;
+	hw->df = NULL;
+}
+
+static int devfreq_memlat_get_freq(struct devfreq *df,
+					unsigned long *freq,
+					u32 *flag)
+{
+	int i, lat_dev;
+	struct memlat_node *node = df->data;
+	struct memlat_hwmon *hw = node->hw;
+	unsigned long max_freq = 0;
+	unsigned int ratio;
+
+	hw->get_cnt(hw);
+
+	for (i = 0; i < hw->num_cores; i++) {
+		ratio = hw->core_stats[i].inst_count;
+
+		if (hw->core_stats[i].mem_count)
+			ratio /= hw->core_stats[i].mem_count;
+
+		trace_memlat_dev_meas(dev_name(df->dev.parent),
+					hw->core_stats[i].id,
+					hw->core_stats[i].inst_count,
+					hw->core_stats[i].mem_count,
+					hw->core_stats[i].freq, ratio);
+
+		if (ratio && ratio <= node->ratio_ceil
+		    && hw->core_stats[i].freq > max_freq) {
+			lat_dev = i;
+			max_freq = hw->core_stats[i].freq;
+		}
+	}
+
+	if (max_freq) {
+		max_freq = core_to_dev_freq(node, max_freq);
+		trace_memlat_dev_update(dev_name(df->dev.parent),
+					hw->core_stats[lat_dev].id,
+					hw->core_stats[lat_dev].inst_count,
+					hw->core_stats[lat_dev].mem_count,
+					hw->core_stats[lat_dev].freq,
+					max_freq);
+	}
+
+	*freq = max_freq;
+	return 0;
+}
+
+gov_attr(ratio_ceil, 1U, 10000U);
+
+static struct attribute *dev_attr[] = {
+	&dev_attr_ratio_ceil.attr,
+	&dev_attr_freq_map.attr,
+	NULL,
+};
+
+static struct attribute_group dev_attr_group = {
+	.name = "mem_latency",
+	.attrs = dev_attr,
+};
+
+#define MIN_MS	10U
+#define MAX_MS	500U
+static int devfreq_memlat_ev_handler(struct devfreq *df,
+					unsigned int event, void *data)
+{
+	int ret;
+	unsigned int sample_ms;
+
+	switch (event) {
+	case DEVFREQ_GOV_START:
+		sample_ms = df->profile->polling_ms;
+		sample_ms = max(MIN_MS, sample_ms);
+		sample_ms = min(MAX_MS, sample_ms);
+		df->profile->polling_ms = sample_ms;
+
+		ret = gov_start(df);
+		if (ret)
+			return ret;
+
+		dev_dbg(df->dev.parent,
+			"Enabled Memory Latency governor\n");
+		break;
+
+	case DEVFREQ_GOV_STOP:
+		gov_stop(df);
+		dev_dbg(df->dev.parent,
+			"Disabled Memory Latency governor\n");
+		break;
+
+	case DEVFREQ_GOV_INTERVAL:
+		sample_ms = *(unsigned int *)data;
+		sample_ms = max(MIN_MS, sample_ms);
+		sample_ms = min(MAX_MS, sample_ms);
+		devfreq_interval_update(df, &sample_ms);
+		break;
+	}
+
+	return 0;
+}
+
+static struct devfreq_governor devfreq_gov_memlat = {
+	.name = "mem_latency",
+	.get_target_freq = devfreq_memlat_get_freq,
+	.event_handler = devfreq_memlat_ev_handler,
+};
+
+#define NUM_COLS	2
+static struct core_dev_map *init_core_dev_map(struct device *dev,
+		char *prop_name)
+{
+	int len, nf, i, j;
+	u32 data;
+	struct core_dev_map *tbl;
+	int ret;
+
+	if (!of_find_property(dev->of_node, prop_name, &len))
+		return NULL;
+	len /= sizeof(data);
+
+	if (len % NUM_COLS || len == 0)
+		return NULL;
+	nf = len / NUM_COLS;
+
+	tbl = devm_kzalloc(dev, (nf + 1) * sizeof(struct core_dev_map),
+			GFP_KERNEL);
+	if (!tbl)
+		return NULL;
+
+	for (i = 0, j = 0; i < nf; i++, j += 2) {
+		ret = of_property_read_u32_index(dev->of_node, prop_name, j,
+				&data);
+		if (ret)
+			return NULL;
+		tbl[i].core_mhz = data / 1000;
+
+		ret = of_property_read_u32_index(dev->of_node, prop_name, j + 1,
+				&data);
+		if (ret)
+			return NULL;
+		tbl[i].target_freq = data;
+		pr_debug("Entry%d CPU:%u, Dev:%u\n", i, tbl[i].core_mhz,
+				tbl[i].target_freq);
+	}
+	tbl[i].core_mhz = 0;
+
+	return tbl;
+}
+
+int register_memlat(struct device *dev, struct memlat_hwmon *hw)
+{
+	int ret = 0;
+	struct memlat_node *node;
+
+	if (!hw->dev && !hw->of_node)
+		return -EINVAL;
+
+	node = devm_kzalloc(dev, sizeof(*node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+
+	node->gov = &devfreq_gov_memlat;
+	node->attr_grp = &dev_attr_group;
+
+	node->ratio_ceil = 10;
+	node->hw = hw;
+
+	hw->freq_map = init_core_dev_map(dev, "qcom,core-dev-table");
+	if (!hw->freq_map) {
+		dev_err(dev, "Couldn't find the core-dev freq table!\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&list_lock);
+	list_add_tail(&node->list, &memlat_list);
+	mutex_unlock(&list_lock);
+
+	mutex_lock(&state_lock);
+	if (!use_cnt)
+		ret = devfreq_add_governor(&devfreq_gov_memlat);
+	if (!ret)
+		use_cnt++;
+	mutex_unlock(&state_lock);
+
+	if (!ret)
+		dev_info(dev, "Memory Latency governor registered.\n");
+	else
+		dev_err(dev, "Memory Latency governor registration failed!\n");
+
+	return ret;
+}
+
+MODULE_DESCRIPTION("HW monitor based dev DDR bandwidth voting driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/devfreq/governor_memlat.h	2019-01-22 16:16:23.135243074 +0100
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _GOVERNOR_BW_HWMON_H
+#define _GOVERNOR_BW_HWMON_H
+
+#include <linux/kernel.h>
+#include <linux/devfreq.h>
+
+/**
+ * struct dev_stats - Device stats
+ * @inst_count:			Number of instructions executed.
+ * @mem_count:			Number of memory accesses made.
+ * @freq:			Effective frequency of the device in the
+ *				last interval.
+ */
+struct dev_stats {
+	int id;
+	unsigned long inst_count;
+	unsigned long mem_count;
+	unsigned long freq;
+};
+
+struct core_dev_map {
+	unsigned int core_mhz;
+	unsigned int target_freq;
+};
+
+/**
+ * struct memlat_hwmon - Memory Latency HW monitor info
+ * @start_hwmon:		Start the HW monitoring
+ * @stop_hwmon:			Stop the HW monitoring
+ * @get_cnt:			Return the number of intructions executed,
+ *				memory accesses and effective frequency
+ * @dev:			Pointer to device that this HW monitor can
+ *				monitor.
+ * @of_node:			OF node of device that this HW monitor can
+ *				monitor.
+ * @df:				Devfreq node that this HW monitor is being
+ *				used for. NULL when not actively in use and
+ *				non-NULL when in use.
+ * @num_cores:			Number of cores that are monitored by the
+ *				hardware monitor.
+ * @core_stats:			Array containing instruction count, memory
+ *				accesses and effective frequency for each core.
+ *
+ * One of dev or of_node needs to be specified for a successful registration.
+ *
+ */
+struct memlat_hwmon {
+	int (*start_hwmon)(struct memlat_hwmon *hw);
+	void (*stop_hwmon)(struct memlat_hwmon *hw);
+	unsigned long (*get_cnt)(struct memlat_hwmon *hw);
+	struct device *dev;
+	struct device_node *of_node;
+
+	unsigned int num_cores;
+	struct dev_stats *core_stats;
+
+	struct devfreq *df;
+	struct core_dev_map *freq_map;
+};
+
+#ifdef CONFIG_DEVFREQ_GOV_MEMLAT
+int register_memlat(struct device *dev, struct memlat_hwmon *hw);
+int update_memlat(struct memlat_hwmon *hw);
+#else
+static inline int register_memlat(struct device *dev,
+					struct memlat_hwmon *hw)
+{
+	return 0;
+}
+static inline int update_memlat(struct memlat_hwmon *hw)
+{
+	return 0;
+}
+#endif
+
+#endif /* _GOVERNOR_BW_HWMON_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/devfreq/governor_spdm_bw_hyp.c	2019-01-22 16:16:23.135243074 +0100
@@ -0,0 +1,417 @@
+/*
+*Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+*
+*This program is free software; you can redistribute it and/or modify
+*it under the terms of the GNU General Public License version 2 and
+*only version 2 as published by the Free Software Foundation.
+*
+*This program is distributed in the hope that it will be useful,
+*but WITHOUT ANY WARRANTY; without even the implied warranty of
+*MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+*GNU General Public License for more details.
+*/
+
+#include <linux/devfreq.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <soc/qcom/rpm-smd.h>
+#include "governor.h"
+#include "devfreq_spdm.h"
+
+enum msm_spdm_rt_res {
+	SPDM_RES_ID = 1,
+	SPDM_RES_TYPE = 0x63707362,
+	SPDM_KEY = 0x00006e65,
+	SPDM_SIZE = 4,
+};
+
+static LIST_HEAD(devfreqs);
+static DEFINE_MUTEX(devfreqs_lock);
+
+static int enable_clocks(void)
+{
+	struct msm_rpm_request *rpm_req;
+	int id;
+	const int one = 1;
+	rpm_req = msm_rpm_create_request(MSM_RPM_CTX_ACTIVE_SET, SPDM_RES_TYPE,
+					 SPDM_RES_ID, 1);
+	if (!rpm_req)
+		return -ENODEV;
+	msm_rpm_add_kvp_data(rpm_req, SPDM_KEY, (const uint8_t *)&one,
+			     sizeof(int));
+	id = msm_rpm_send_request(rpm_req);
+	msm_rpm_wait_for_ack(id);
+	msm_rpm_free_request(rpm_req);
+
+	return 0;
+}
+
+static int disable_clocks(void)
+{
+	struct msm_rpm_request *rpm_req;
+	int id;
+	const int zero = 0;
+	rpm_req = msm_rpm_create_request(MSM_RPM_CTX_ACTIVE_SET, SPDM_RES_TYPE,
+					 SPDM_RES_ID, 1);
+	if (!rpm_req)
+		return -ENODEV;
+	msm_rpm_add_kvp_data(rpm_req, SPDM_KEY, (const uint8_t *)&zero,
+			     sizeof(int));
+	id = msm_rpm_send_request(rpm_req);
+	msm_rpm_wait_for_ack(id);
+	msm_rpm_free_request(rpm_req);
+
+	return 0;
+}
+
+static irqreturn_t threaded_isr(int irq, void *dev_id)
+{
+	struct spdm_data *data;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+
+	/* call hyp to get bw_vote */
+	desc.arg[0] = SPDM_CMD_GET_BW_ALL;
+	ext_status = spdm_ext_call(&desc, 1);
+	if (ext_status)
+		pr_err("External command %u failed with error %u",
+			(int)desc.arg[0], ext_status);
+	mutex_lock(&devfreqs_lock);
+	list_for_each_entry(data, &devfreqs, list) {
+		if (data == NULL || data->devfreq == NULL) {
+			pr_err("Spurious interrupts\n");
+			break;
+		}
+		if (data->spdm_client == desc.ret[0]) {
+			devfreq_monitor_suspend(data->devfreq);
+			mutex_lock(&data->devfreq->lock);
+			data->action = SPDM_UP;
+			data->new_bw =
+				(desc.ret[1] * 1000) >> 6;
+			update_devfreq(data->devfreq);
+			data->action = SPDM_DOWN;
+			mutex_unlock(&data->devfreq->lock);
+			devfreq_monitor_resume(data->devfreq);
+			break;
+		}
+	}
+	mutex_unlock(&devfreqs_lock);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t isr(int irq, void *dev_id)
+{
+	return IRQ_WAKE_THREAD;
+}
+
+static int gov_spdm_hyp_target_bw(struct devfreq *devfreq, unsigned long *freq,
+				  u32 *flag)
+{
+	struct devfreq_dev_status status;
+	int ret = -EINVAL;
+	int usage;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+	u64 bw_ret;
+
+	if (!devfreq || !devfreq->profile || !devfreq->profile->get_dev_status)
+		return ret;
+
+	ret = devfreq->profile->get_dev_status(devfreq->dev.parent, &status);
+	if (ret)
+		return ret;
+
+	usage = (status.busy_time * 100) / status.total_time;
+
+	if (usage > 0) {
+		/* up was already called as part of hyp, so just use the
+		 * already stored values */
+		*freq = ((struct spdm_data *)devfreq->data)->new_bw;
+	} else {
+		desc.arg[0] = SPDM_CMD_GET_BW_SPECIFIC;
+		desc.arg[1] = ((struct spdm_data *)devfreq->data)->spdm_client;
+		ext_status = spdm_ext_call(&desc, 2);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+		bw_ret = desc.ret[0] * 1000;
+		*freq = bw_ret >> 6;
+	}
+
+	return 0;
+}
+
+static int gov_spdm_hyp_eh(struct devfreq *devfreq, unsigned int event,
+			   void *data)
+{
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+	struct spdm_data *spdm_data = (struct spdm_data *)devfreq->data;
+	int i;
+
+	switch (event) {
+	case DEVFREQ_GOV_START:
+		mutex_lock(&devfreqs_lock);
+		list_add(&spdm_data->list, &devfreqs);
+		mutex_unlock(&devfreqs_lock);
+		/* call hyp with config data */
+		desc.arg[0] = SPDM_CMD_CFG_PORTS;
+		desc.arg[1] = spdm_data->spdm_client;
+		desc.arg[2] = spdm_data->config_data.num_ports;
+		for (i = 0; i < spdm_data->config_data.num_ports; i++)
+			desc.arg[i+3] = spdm_data->config_data.ports[i];
+		ext_status = spdm_ext_call(&desc,
+				spdm_data->config_data.num_ports + 3);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+
+		desc.arg[0] = SPDM_CMD_CFG_FLTR;
+		desc.arg[1] = spdm_data->spdm_client;
+		desc.arg[2] = spdm_data->config_data.aup;
+		desc.arg[3] = spdm_data->config_data.adown;
+		desc.arg[4] = spdm_data->config_data.bucket_size;
+		ext_status = spdm_ext_call(&desc, 5);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+
+		desc.arg[0] = SPDM_CMD_CFG_PL;
+		desc.arg[1] = spdm_data->spdm_client;
+		for (i = 0; i < SPDM_PL_COUNT - 1; i++)
+			desc.arg[i+2] = spdm_data->config_data.pl_freqs[i];
+		ext_status = spdm_ext_call(&desc, SPDM_PL_COUNT + 1);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+
+		desc.arg[0] = SPDM_CMD_CFG_REJRATE_LOW;
+		desc.arg[1] = spdm_data->spdm_client;
+		desc.arg[2] = spdm_data->config_data.reject_rate[0];
+		desc.arg[3] = spdm_data->config_data.reject_rate[1];
+		ext_status = spdm_ext_call(&desc, 4);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+		desc.arg[0] = SPDM_CMD_CFG_REJRATE_MED;
+		desc.arg[1] = spdm_data->spdm_client;
+		desc.arg[2] = spdm_data->config_data.reject_rate[2];
+		desc.arg[3] = spdm_data->config_data.reject_rate[3];
+		ext_status = spdm_ext_call(&desc, 4);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+		desc.arg[0] = SPDM_CMD_CFG_REJRATE_HIGH;
+		desc.arg[1] = spdm_data->spdm_client;
+		desc.arg[2] = spdm_data->config_data.reject_rate[4];
+		desc.arg[3] = spdm_data->config_data.reject_rate[5];
+		ext_status = spdm_ext_call(&desc, 4);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+
+		desc.arg[0] = SPDM_CMD_CFG_RESPTIME_LOW;
+		desc.arg[1] = spdm_data->spdm_client;
+		desc.arg[2] = spdm_data->config_data.response_time_us[0];
+		desc.arg[3] = spdm_data->config_data.response_time_us[1];
+		ext_status = spdm_ext_call(&desc, 4);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+		desc.arg[0] = SPDM_CMD_CFG_RESPTIME_MED;
+		desc.arg[1] = spdm_data->spdm_client;
+		desc.arg[2] = spdm_data->config_data.response_time_us[2];
+		desc.arg[3] = spdm_data->config_data.response_time_us[3];
+		ext_status = spdm_ext_call(&desc, 4);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+		desc.arg[0] = SPDM_CMD_CFG_RESPTIME_HIGH;
+		desc.arg[1] = spdm_data->spdm_client;
+		desc.arg[2] = spdm_data->config_data.response_time_us[4];
+		desc.arg[3] = spdm_data->config_data.response_time_us[5];
+		ext_status = spdm_ext_call(&desc, 4);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+
+		desc.arg[0] = SPDM_CMD_CFG_CCIRESPTIME_LOW;
+		desc.arg[1] = spdm_data->spdm_client;
+		desc.arg[2] = spdm_data->config_data.cci_response_time_us[0];
+		desc.arg[3] = spdm_data->config_data.cci_response_time_us[1];
+		ext_status = spdm_ext_call(&desc, 4);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+		desc.arg[0] = SPDM_CMD_CFG_CCIRESPTIME_MED;
+		desc.arg[1] = spdm_data->spdm_client;
+		desc.arg[2] = spdm_data->config_data.cci_response_time_us[2];
+		desc.arg[3] = spdm_data->config_data.cci_response_time_us[3];
+		ext_status = spdm_ext_call(&desc, 4);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+		desc.arg[0] = SPDM_CMD_CFG_CCIRESPTIME_HIGH;
+		desc.arg[1] = spdm_data->spdm_client;
+		desc.arg[2] = spdm_data->config_data.cci_response_time_us[4];
+		desc.arg[3] = spdm_data->config_data.cci_response_time_us[5];
+		ext_status = spdm_ext_call(&desc, 4);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+
+		desc.arg[0] = SPDM_CMD_CFG_MAXCCI;
+		desc.arg[1] = spdm_data->spdm_client;
+		desc.arg[2] = spdm_data->config_data.max_cci_freq;
+		ext_status = spdm_ext_call(&desc, 3);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+
+		desc.arg[0] = SPDM_CMD_CFG_VOTES;
+		desc.arg[1] = spdm_data->spdm_client;
+		desc.arg[2] = spdm_data->config_data.upstep;
+		desc.arg[3] = spdm_data->config_data.downstep;
+		desc.arg[4] = spdm_data->config_data.max_vote;
+		desc.arg[5] = spdm_data->config_data.up_step_multp;
+		ext_status = spdm_ext_call(&desc, 6);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+
+		/* call hyp enable/commit */
+		desc.arg[0] = SPDM_CMD_ENABLE;
+		desc.arg[1] = spdm_data->spdm_client;
+		desc.arg[2] = 0;
+		ext_status = spdm_ext_call(&desc, 3);
+		if (ext_status) {
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+			mutex_lock(&devfreqs_lock);
+			/*
+			 * the spdm device probe will fail so remove it from
+			 * the list  to prevent accessing a deleted pointer in
+			 * the future
+			 * */
+			list_del(&spdm_data->list);
+			mutex_unlock(&devfreqs_lock);
+			return -EINVAL;
+		}
+		spdm_data->enabled = true;
+		devfreq_monitor_start(devfreq);
+		break;
+
+	case DEVFREQ_GOV_STOP:
+		devfreq_monitor_stop(devfreq);
+		/* find devfreq in list and remove it */
+		mutex_lock(&devfreqs_lock);
+		list_del(&spdm_data->list);
+		mutex_unlock(&devfreqs_lock);
+
+		/* call hypvervisor to disable */
+		desc.arg[0] = SPDM_CMD_DISABLE;
+		desc.arg[1] = spdm_data->spdm_client;
+		ext_status = spdm_ext_call(&desc, 2);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+		spdm_data->enabled = false;
+		break;
+
+	case DEVFREQ_GOV_INTERVAL:
+		devfreq_interval_update(devfreq, (unsigned int *)data);
+		break;
+
+	case DEVFREQ_GOV_SUSPEND:
+		devfreq_monitor_suspend(devfreq);
+		break;
+
+	case DEVFREQ_GOV_RESUME:
+		devfreq_monitor_resume(devfreq);
+		break;
+
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static struct devfreq_governor spdm_hyp_gov = {
+	.name = "spdm_bw_hyp",
+	.get_target_freq = gov_spdm_hyp_target_bw,
+	.event_handler = gov_spdm_hyp_eh,
+};
+
+static int probe(struct platform_device *pdev)
+{
+	int ret = -EINVAL;
+	int *irq = 0;
+
+	irq = devm_kzalloc(&pdev->dev, sizeof(int), GFP_KERNEL);
+	if (!irq)
+		return -ENOMEM;
+	platform_set_drvdata(pdev, irq);
+
+	ret = devfreq_add_governor(&spdm_hyp_gov);
+	if (ret)
+		goto nogov;
+
+	*irq = platform_get_irq_byname(pdev, "spdm-irq");
+	ret = request_threaded_irq(*irq, isr, threaded_isr,
+				   IRQF_ONESHOT,
+				   spdm_hyp_gov.name, pdev);
+	if (ret)
+		goto no_irq;
+
+	enable_clocks();
+	return 0;
+
+no_irq:
+	devfreq_remove_governor(&spdm_hyp_gov);
+nogov:
+	devm_kfree(&pdev->dev, irq);
+	return ret;
+}
+
+static int remove(struct platform_device *pdev)
+{
+	int *irq = 0;
+
+	disable_clocks();
+	irq = platform_get_drvdata(pdev);
+	free_irq(*irq, pdev);
+	devfreq_remove_governor(&spdm_hyp_gov);
+	devm_kfree(&pdev->dev, irq);
+	return 0;
+}
+
+static const struct of_device_id gov_spdm_match[] = {
+	{.compatible = "qcom,gov_spdm_hyp"},
+	{}
+};
+
+static struct platform_driver gov_spdm_hyp_drvr = {
+	.driver = {
+		   .name = "gov_spdm_hyp",
+		   .owner = THIS_MODULE,
+		   .of_match_table = gov_spdm_match,
+		   },
+	.probe = probe,
+	.remove = remove,
+};
+
+static int __init governor_spdm_bw_hyp(void)
+{
+	return platform_driver_register(&gov_spdm_hyp_drvr);
+}
+
+module_init(governor_spdm_bw_hyp);
+
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/dma/qcom-sps-dma.c	2019-01-22 16:16:23.159243291 +0100
@@ -0,0 +1,724 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * Qualcomm technologies inc, DMA API for BAM (Bus Access Manager).
+ * This DMA driver uses sps-BAM API to access the HW, thus it is effectively a
+ * DMA engine wrapper of the sps-BAM API.
+ *
+ * Client channel configuration example:
+ * struct dma_slave_config config {
+ *    .direction = DMA_MEM_TO_DEV;
+ * };
+ *
+ * chan = dma_request_slave_channel(client_dev, "rx");
+ * dmaengine_slave_config(chan, &config);
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/list.h>
+#include <linux/msm-sps.h>
+#include "dmaengine.h"
+
+#define QBAM_OF_SLAVE_N_ARGS	(4)
+#define QBAM_OF_MANAGE_LOCAL	"qcom,managed-locally"
+#define QBAM_OF_SUM_THRESHOLD	"qcom,summing-threshold"
+#define QBAM_MAX_DESCRIPTORS	(0x100)
+#define QBAM_MAX_CHANNELS	(32)
+
+/*
+ * qbam_async_tx_descriptor - dma descriptor plus a list of xfer_bufs
+ *
+ * @sgl scatterlist of transfer buffers
+ * @sg_len size of that list
+ * @flags dma xfer flags
+ */
+struct qbam_async_tx_descriptor {
+	struct dma_async_tx_descriptor	dma_desc;
+	struct scatterlist		*sgl;
+	unsigned int			sg_len;
+	unsigned long			flags;
+};
+
+#define DMA_TO_QBAM_ASYNC_DESC(dma_async_desc) \
+	container_of(dma_async_desc, struct qbam_async_tx_descriptor, dma_desc)
+
+struct qbam_channel;
+/*
+ * qbam_device - top level device of current driver
+ * @handle bam sps handle.
+ * @regs bam register space virtual base address.
+ * @mem_resource bam register space resource.
+ * @deregister_required if bam is registered by this driver it need to be
+ *   unregistered by this driver.
+ * @manage is bame managed locally or remotely,
+ * @summing_threshold event threshold.
+ * @irq bam interrupt line.
+ * @channels has the same channels as qbam_dev->dma_dev.channels but
+ *   supports fast access by pipe index.
+ */
+struct qbam_device {
+	struct dma_device		dma_dev;
+	void __iomem			*regs;
+	struct resource			*mem_resource;
+	ulong				handle;
+	bool				deregister_required;
+	u32				summing_threshold;
+	u32				manage;
+	int				irq;
+	struct qbam_channel		*channels[QBAM_MAX_CHANNELS];
+};
+
+/* qbam_pipe: aggregate of bam pipe related entries of qbam_channel */
+struct qbam_pipe {
+	u32				index;
+	struct sps_pipe			*handle;
+	struct sps_connect		cfg;
+	u32				num_descriptors;
+	u32				sps_connect_flags;
+	u32				sps_register_event_flags;
+};
+
+/*
+ * qbam_channel - dma channel plus bam pipe info and current pending transfers
+ *
+ * @direction is a producer or consumer (MEM => DEV or DEV => MEM)
+ * @pending_desc next set of transfer to process
+ * @error last error that took place on the current pending_desc
+ */
+struct qbam_channel {
+	struct qbam_pipe		bam_pipe;
+
+	struct dma_chan			chan;
+	enum dma_transfer_direction	direction;
+	struct qbam_async_tx_descriptor	pending_desc;
+
+	struct qbam_device		*qbam_dev;
+	struct mutex			lock;
+	int				error;
+};
+#define DMA_TO_QBAM_CHAN(dma_chan) \
+			container_of(dma_chan, struct qbam_channel, chan)
+#define qbam_err(qbam_dev, fmt ...) dev_err(qbam_dev->dma_dev.dev, fmt)
+
+/*  qbam_disconnect_chan - disconnect a channel */
+static int qbam_disconnect_chan(struct qbam_channel *qbam_chan)
+{
+	struct qbam_device  *qbam_dev    = qbam_chan->qbam_dev;
+	struct sps_pipe     *pipe_handle = qbam_chan->bam_pipe.handle;
+	struct sps_connect   pipe_config_no_irq = {.options = SPS_O_POLL};
+	int ret;
+
+	/*
+	 * SW workaround:
+	 * When disconnecting BAM pipe a spurious interrupt sometimes appears.
+	 * To avoid that, we change the pipe setting from interrupt (default)
+	 * to polling (SPS_O_POLL) before diconnecting the pipe.
+	 */
+	ret = sps_set_config(pipe_handle, &pipe_config_no_irq);
+	if (ret)
+		qbam_err(qbam_dev,
+			"error:%d sps_set_config(pipe:%d) before disconnect\n",
+			ret, qbam_chan->bam_pipe.index);
+
+	ret = sps_disconnect(pipe_handle);
+	if (ret)
+		qbam_err(qbam_dev, "error:%d sps_disconnect(pipe:%d)\n",
+			 ret, qbam_chan->bam_pipe.index);
+
+	return ret;
+}
+
+/*  qbam_free_chan - disconnect channel and free its resources */
+static void qbam_free_chan(struct dma_chan *chan)
+{
+	struct qbam_channel *qbam_chan = DMA_TO_QBAM_CHAN(chan);
+	struct qbam_device  *qbam_dev  = qbam_chan->qbam_dev;
+
+	mutex_lock(&qbam_chan->lock);
+	if (qbam_disconnect_chan(qbam_chan))
+		qbam_err(qbam_dev,
+			"error free_chan() failed to disconnect(pipe:%d)\n",
+			qbam_chan->bam_pipe.index);
+	qbam_chan->pending_desc.sgl = NULL;
+	qbam_chan->pending_desc.sg_len = 0;
+	mutex_unlock(&qbam_chan->lock);
+}
+
+static struct dma_chan *qbam_dma_xlate(struct of_phandle_args *dma_spec,
+							struct of_dma *of)
+{
+	struct qbam_device  *qbam_dev  = of->of_dma_data;
+	struct qbam_channel *qbam_chan;
+	u32 channel_index;
+	u32 num_descriptors;
+
+	if (dma_spec->args_count != QBAM_OF_SLAVE_N_ARGS) {
+		qbam_err(qbam_dev,
+			"invalid number of dma arguments, expect:%d got:%d\n",
+			QBAM_OF_SLAVE_N_ARGS, dma_spec->args_count);
+		return NULL;
+	};
+
+	channel_index = dma_spec->args[0];
+
+	if (channel_index >= QBAM_MAX_CHANNELS) {
+		qbam_err(qbam_dev,
+			"error: channel_index:%d out of bounds",
+			channel_index);
+		return NULL;
+	}
+	qbam_chan = qbam_dev->channels[channel_index];
+	 /* return qbam_chan if exists, or create one */
+	if (qbam_chan) {
+		qbam_chan->chan.client_count = 1;
+		return &qbam_chan->chan;
+	}
+
+	num_descriptors = dma_spec->args[1];
+	if (!num_descriptors || (num_descriptors > QBAM_MAX_DESCRIPTORS)) {
+		qbam_err(qbam_dev,
+			"invalid number of descriptors, range[1..%d] got:%d\n",
+			QBAM_MAX_DESCRIPTORS, num_descriptors);
+		return NULL;
+	}
+
+	/* allocate a channel */
+	qbam_chan = kzalloc(sizeof(*qbam_chan), GFP_KERNEL);
+	if (!qbam_chan) {
+		qbam_err(qbam_dev, "error kmalloc(size:%llu) failed\n",
+			 (u64) sizeof(*qbam_chan));
+		return NULL;
+	}
+
+	/* allocate BAM resources for that channel */
+	qbam_chan->bam_pipe.handle = sps_alloc_endpoint();
+	if (!qbam_chan->bam_pipe.handle) {
+		qbam_err(qbam_dev, "error: sps_alloc_endpoint() return NULL\n");
+		kfree(qbam_chan);
+		return NULL;
+	}
+
+	/* init dma_chan */
+	qbam_chan->chan.device = &qbam_dev->dma_dev;
+	dma_cookie_init(&qbam_chan->chan);
+	qbam_chan->chan.client_count                 = 1;
+	/* init qbam_chan */
+	qbam_chan->bam_pipe.index                    = channel_index;
+	qbam_chan->bam_pipe.num_descriptors          = num_descriptors;
+	qbam_chan->bam_pipe.sps_connect_flags        = dma_spec->args[2];
+	qbam_chan->bam_pipe.sps_register_event_flags = dma_spec->args[3];
+	qbam_chan->qbam_dev                          = qbam_dev;
+	mutex_init(&qbam_chan->lock);
+
+	/* add to dma_device list of channels */
+	list_add(&qbam_chan->chan.device_node, &qbam_dev->dma_dev.channels);
+	qbam_dev->channels[channel_index] = qbam_chan;
+
+	return &qbam_chan->chan;
+}
+
+static enum dma_status qbam_tx_status(struct dma_chan *chan,
+			dma_cookie_t cookie, struct dma_tx_state *state)
+{
+	struct qbam_channel *qbam_chan = DMA_TO_QBAM_CHAN(chan);
+	struct qbam_async_tx_descriptor	*qbam_desc = &qbam_chan->pending_desc;
+	enum dma_status ret;
+
+	mutex_lock(&qbam_chan->lock);
+
+	if (qbam_chan->error) {
+		mutex_unlock(&qbam_chan->lock);
+		return DMA_ERROR;
+	}
+
+	ret = dma_cookie_status(chan, cookie, state);
+	if (ret == DMA_IN_PROGRESS) {
+		struct scatterlist *sg;
+		int i;
+		u32 transfer_size = 0;
+
+		for_each_sg(qbam_desc->sgl, sg, qbam_desc->sg_len, i)
+			transfer_size += sg_dma_len(sg);
+
+		dma_set_residue(state, transfer_size);
+	}
+	mutex_unlock(&qbam_chan->lock);
+
+	return ret;
+}
+
+/*
+ * qbam_init_bam_handle - find or create bam handle.
+ *
+ * BAM device needs to be registerd for each BLSP once and only once. if it was
+ * registred, then we find the handle to the registerd bam and return it,
+ * otherwise we register it here.
+ * The module which registerd BAM is responsible for deregistering it.
+ */
+static int qbam_init_bam_handle(struct qbam_device *qbam_dev)
+{
+	int ret = 0;
+	struct sps_bam_props bam_props = {0};
+
+	/*
+	 * Check if BAM is already registred with SPS on the current
+	 * BLSP. If it isn't then go ahead and register it.
+	 */
+	ret = sps_phy2h(qbam_dev->mem_resource->start, &qbam_dev->handle);
+	if (qbam_dev->handle)
+		return 0;
+
+	qbam_dev->regs = devm_ioremap_resource(qbam_dev->dma_dev.dev,
+					       qbam_dev->mem_resource);
+	if (IS_ERR(qbam_dev->regs)) {
+		qbam_err(qbam_dev, "error:%ld ioremap(phy:0x%lx len:0x%lx)\n",
+			 PTR_ERR(qbam_dev->regs),
+			 (ulong) qbam_dev->mem_resource->start,
+			 (ulong) resource_size(qbam_dev->mem_resource));
+		return PTR_ERR(qbam_dev->regs);
+	};
+
+	bam_props.phys_addr		= qbam_dev->mem_resource->start;
+	bam_props.virt_addr		= qbam_dev->regs;
+	bam_props.summing_threshold	= qbam_dev->summing_threshold;
+	bam_props.manage		= qbam_dev->manage;
+	bam_props.irq			= qbam_dev->irq;
+
+	ret = sps_register_bam_device(&bam_props, &qbam_dev->handle);
+	if (ret)
+		qbam_err(qbam_dev, "error:%d sps_register_bam_device\n"
+			 "(phy:0x%lx virt:0x%lx irq:%d)\n",
+			 ret, (ulong) bam_props.phys_addr,
+			 (ulong) bam_props.virt_addr, qbam_dev->irq);
+	else
+		qbam_dev->deregister_required = true;
+
+	return ret;
+}
+
+
+static int qbam_alloc_chan(struct dma_chan *chan)
+{
+	return 0;
+}
+
+static void qbam_eot_callback(struct sps_event_notify *notify)
+{
+	struct qbam_async_tx_descriptor *qbam_desc = notify->data.transfer.user;
+	struct dma_async_tx_descriptor  *dma_desc  = &qbam_desc->dma_desc;
+	dma_async_tx_callback callback	= dma_desc->callback;
+	void *param			= dma_desc->callback_param;
+
+	if (callback)
+		callback(param);
+}
+
+static void qbam_error_callback(struct sps_event_notify *notify)
+{
+	struct qbam_channel *qbam_chan	= notify->user;
+	qbam_err(qbam_chan->qbam_dev, "error: qbam_error_callback(pipe:%d\n)",
+		 qbam_chan->bam_pipe.index);
+}
+
+static int qbam_connect_chan(struct qbam_channel *qbam_chan)
+{
+	int ret = 0;
+	struct qbam_device       *qbam_dev = qbam_chan->qbam_dev;
+	struct sps_register_event bam_eot_event = {
+		.mode		= SPS_TRIGGER_CALLBACK,
+		.options	= qbam_chan->bam_pipe.sps_register_event_flags,
+		.callback	= qbam_eot_callback,
+		};
+	struct sps_register_event bam_error_event = {
+		.mode		= SPS_TRIGGER_CALLBACK,
+		.options	= SPS_O_ERROR,
+		.callback	= qbam_error_callback,
+		.user		= qbam_chan,
+		};
+
+	ret = sps_connect(qbam_chan->bam_pipe.handle, &qbam_chan->bam_pipe.cfg);
+	if (ret) {
+		qbam_err(qbam_dev, "error:%d sps_connect(pipe:%d)\n", ret,
+			 qbam_chan->bam_pipe.index);
+		return ret;
+	}
+
+	ret = sps_register_event(qbam_chan->bam_pipe.handle, &bam_eot_event);
+	if (ret) {
+		qbam_err(qbam_dev, "error:%d sps_register_event(eot@pipe:%d)\n",
+			 ret, qbam_chan->bam_pipe.index);
+		goto need_disconnect;
+	}
+
+	ret = sps_register_event(qbam_chan->bam_pipe.handle, &bam_error_event);
+	if (ret) {
+		qbam_err(qbam_dev, "error:%d sps_register_event(err@pipe:%d)\n",
+			 ret, qbam_chan->bam_pipe.index);
+		goto need_disconnect;
+	}
+
+	return 0;
+
+need_disconnect:
+	ret = sps_disconnect(qbam_chan->bam_pipe.handle);
+	if (ret)
+		qbam_err(qbam_dev, "error:%d sps_disconnect(pipe:%d)\n", ret,
+			 qbam_chan->bam_pipe.index);
+	return ret;
+}
+
+/*
+ * qbam_slave_cfg - configure and connect a BAM pipe
+ *
+ * @cfg only cares about cfg->direction
+ */
+static int qbam_slave_cfg(struct dma_chan *chan,
+						struct dma_slave_config *cfg)
+{
+	int ret = 0;
+	struct qbam_channel *qbam_chan = DMA_TO_QBAM_CHAN(chan);
+	struct qbam_device *qbam_dev = qbam_chan->qbam_dev;
+	struct sps_connect *pipe_cfg = &qbam_chan->bam_pipe.cfg;
+
+	if (!qbam_dev->handle) {
+		ret = qbam_init_bam_handle(qbam_dev);
+		if (ret)
+			return ret;
+	}
+
+	if (qbam_chan->bam_pipe.cfg.desc.base)
+		goto cfg_done;
+
+	ret = sps_get_config(qbam_chan->bam_pipe.handle,
+						&qbam_chan->bam_pipe.cfg);
+	if (ret) {
+		qbam_err(qbam_dev, "error:%d sps_get_config(0x%p)\n",
+			 ret, qbam_chan->bam_pipe.handle);
+		return ret;
+	}
+
+	qbam_chan->direction = cfg->direction;
+	if (cfg->direction == DMA_MEM_TO_DEV) {
+		pipe_cfg->source          = SPS_DEV_HANDLE_MEM;
+		pipe_cfg->destination     = qbam_dev->handle;
+		pipe_cfg->mode            = SPS_MODE_DEST;
+		pipe_cfg->src_pipe_index  = 0;
+		pipe_cfg->dest_pipe_index = qbam_chan->bam_pipe.index;
+	} else {
+		pipe_cfg->source          = qbam_dev->handle;
+		pipe_cfg->destination     = SPS_DEV_HANDLE_MEM;
+		pipe_cfg->mode            = SPS_MODE_SRC;
+		pipe_cfg->src_pipe_index  = qbam_chan->bam_pipe.index;
+		pipe_cfg->dest_pipe_index = 0;
+	}
+	pipe_cfg->options   =  qbam_chan->bam_pipe.sps_connect_flags;
+	pipe_cfg->desc.size = (qbam_chan->bam_pipe.num_descriptors + 1) *
+						 sizeof(struct sps_iovec);
+	/* managed dma_alloc_coherent() */
+	pipe_cfg->desc.base = dmam_alloc_coherent(qbam_dev->dma_dev.dev,
+						  pipe_cfg->desc.size,
+						  &pipe_cfg->desc.phys_base,
+						  GFP_KERNEL);
+	if (!pipe_cfg->desc.base) {
+		qbam_err(qbam_dev,
+			"error dma_alloc_coherent(desc-sz:%llu * n-descs:%d)\n",
+			(u64) sizeof(struct sps_iovec),
+			qbam_chan->bam_pipe.num_descriptors);
+		return -ENOMEM;
+	}
+cfg_done:
+	ret = qbam_connect_chan(qbam_chan);
+	if (ret)
+		dmam_free_coherent(qbam_dev->dma_dev.dev, pipe_cfg->desc.size,
+				 pipe_cfg->desc.base, pipe_cfg->desc.phys_base);
+
+	return ret;
+}
+
+static int qbam_flush_chan(struct dma_chan *chan)
+{
+	struct qbam_channel *qbam_chan = DMA_TO_QBAM_CHAN(chan);
+	int ret = qbam_disconnect_chan(qbam_chan);
+	if (ret) {
+		qbam_err(qbam_chan->qbam_dev,
+			 "error: disconnect flush(pipe:%d\n)",
+			 qbam_chan->bam_pipe.index);
+		return ret;
+	}
+	ret = qbam_connect_chan(qbam_chan);
+	if (ret)
+		qbam_err(qbam_chan->qbam_dev,
+			 "error: reconnect flush(pipe:%d\n)",
+			 qbam_chan->bam_pipe.index);
+	return ret;
+}
+
+/* qbam_tx_submit - sets the descriptor as the next one to be executed */
+static dma_cookie_t qbam_tx_submit(struct dma_async_tx_descriptor *dma_desc)
+{
+	struct qbam_channel *qbam_chan = DMA_TO_QBAM_CHAN(dma_desc->chan);
+	dma_cookie_t ret;
+	mutex_lock(&qbam_chan->lock);
+
+	ret = dma_cookie_assign(dma_desc);
+
+	mutex_unlock(&qbam_chan->lock);
+
+	return ret;
+}
+
+/*
+ * qbam_prep_slave_sg - creates qbam_xfer_buf from a list of sg
+ *
+ * @chan: dma channel
+ * @sgl: scatter gather list
+ * @sg_len: length of sg
+ * @direction: DMA transfer direction
+ * @flags: DMA flags
+ * @context: transfer context (unused)
+ * @return the newly created descriptor or negative ERR_PTR() on error
+ */
+static struct dma_async_tx_descriptor *qbam_prep_slave_sg(struct dma_chan *chan,
+	struct scatterlist *sgl, unsigned int sg_len,
+	enum dma_transfer_direction direction, unsigned long flags,
+	void *context)
+{
+	struct qbam_channel *qbam_chan = DMA_TO_QBAM_CHAN(chan);
+	struct qbam_device *qbam_dev = qbam_chan->qbam_dev;
+	struct qbam_async_tx_descriptor *qbam_desc = &qbam_chan->pending_desc;
+
+	if (qbam_chan->direction != direction) {
+		qbam_err(qbam_dev,
+			"invalid dma transfer direction expected:%d given:%d\n",
+			qbam_chan->direction, direction);
+		return ERR_PTR(-EINVAL);
+	}
+
+	qbam_desc->dma_desc.chan	= &qbam_chan->chan;
+	qbam_desc->dma_desc.tx_submit	= qbam_tx_submit;
+	qbam_desc->sgl			= sgl;
+	qbam_desc->sg_len		= sg_len;
+	qbam_desc->flags		= flags;
+	return &qbam_desc->dma_desc;
+}
+
+/*
+ * qbam_issue_pending - queue pending descriptor to BAM
+ *
+ * Iterate over the transfers of the pending descriptor and push them to bam
+ */
+static void qbam_issue_pending(struct dma_chan *chan)
+{
+	int i;
+	int ret = 0;
+	struct qbam_channel *qbam_chan = DMA_TO_QBAM_CHAN(chan);
+	struct qbam_device  *qbam_dev  = qbam_chan->qbam_dev;
+	struct qbam_async_tx_descriptor *qbam_desc = &qbam_chan->pending_desc;
+	struct scatterlist		*sg;
+	mutex_lock(&qbam_chan->lock);
+	if (!qbam_chan->pending_desc.sgl) {
+		qbam_err(qbam_dev,
+		   "error qbam_issue_pending() no pending descriptor pipe:%d\n",
+		   qbam_chan->bam_pipe.index);
+		mutex_unlock(&qbam_chan->lock);
+		return;
+	}
+
+	for_each_sg(qbam_desc->sgl, sg, qbam_desc->sg_len, i) {
+
+		/* Add BAM flags only on the last buffer */
+		bool is_last_buf = (i == ((qbam_desc->sg_len) - 1));
+
+		ret = sps_transfer_one(qbam_chan->bam_pipe.handle,
+					sg_dma_address(sg), sg_dma_len(sg),
+					qbam_desc,
+					(is_last_buf ? qbam_desc->flags : 0));
+		if (ret < 0) {
+			qbam_chan->error = ret;
+
+			qbam_err(qbam_dev, "erorr:%d sps_transfer_one\n"
+				"(addr:0x%lx len:%d flags:0x%lx pipe:%d)\n",
+				ret, (ulong) sg_dma_address(sg), sg_dma_len(sg),
+				qbam_desc->flags, qbam_chan->bam_pipe.index);
+			break;
+		}
+	}
+
+	dma_cookie_complete(&qbam_desc->dma_desc);
+	qbam_chan->error = 0;
+	qbam_desc->sgl = NULL;
+	qbam_desc->sg_len = 0;
+	mutex_unlock(&qbam_chan->lock);
+};
+
+static int qbam_deregister_bam_dev(struct qbam_device *qbam_dev)
+{
+	int ret;
+
+	if (!qbam_dev->handle)
+		return 0;
+
+	ret = sps_deregister_bam_device(qbam_dev->handle);
+	if (ret)
+		qbam_err(qbam_dev,
+			"error:%d sps_deregister_bam_device(hndl:0x%lx) failed",
+			ret, qbam_dev->handle);
+	return ret;
+}
+
+static void qbam_pipes_free(struct qbam_device *qbam_dev)
+{
+	struct qbam_channel *qbam_chan_cur, *qbam_chan_next;
+
+	list_for_each_entry_safe(qbam_chan_cur, qbam_chan_next,
+			&qbam_dev->dma_dev.channels, chan.device_node) {
+		mutex_lock(&qbam_chan_cur->lock);
+		qbam_free_chan(&qbam_chan_cur->chan);
+		sps_free_endpoint(qbam_chan_cur->bam_pipe.handle);
+		list_del(&qbam_chan_cur->chan.device_node);
+		mutex_unlock(&qbam_chan_cur->lock);
+		kfree(qbam_chan_cur);
+	}
+}
+
+static int qbam_probe(struct platform_device *pdev)
+{
+	struct qbam_device *qbam_dev;
+	int ret;
+	bool managed_locally;
+	struct device_node *of_node = pdev->dev.of_node;
+
+	qbam_dev = devm_kzalloc(&pdev->dev, sizeof(*qbam_dev), GFP_KERNEL);
+	if (!qbam_dev) {
+		qbam_err(qbam_dev, "error kmalloc(size:%llu) failed",
+			(u64) sizeof(*qbam_dev));
+		return -ENOMEM;
+	}
+	qbam_dev->dma_dev.dev = &pdev->dev;
+	platform_set_drvdata(pdev, qbam_dev);
+
+	qbam_dev->mem_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!qbam_dev->mem_resource) {
+		qbam_err(qbam_dev, "missing 'reg' DT entry");
+		return -ENODEV;
+	}
+
+	qbam_dev->irq = platform_get_irq(pdev, 0);
+	if (qbam_dev->irq < 0) {
+		qbam_err(qbam_dev, "missing DT IRQ resource entry");
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32(of_node, QBAM_OF_SUM_THRESHOLD,
+				   &qbam_dev->summing_threshold);
+	if (ret) {
+		qbam_err(qbam_dev, "missing '%s' DT entry",
+			 QBAM_OF_SUM_THRESHOLD);
+		return ret;
+	}
+
+	/* read from DT and set sps_bam_props.manage */
+	managed_locally = of_property_read_bool(of_node, QBAM_OF_MANAGE_LOCAL);
+	qbam_dev->manage = managed_locally ? SPS_BAM_MGR_LOCAL :
+					     SPS_BAM_MGR_DEVICE_REMOTE;
+
+	/* Init channels */
+	INIT_LIST_HEAD(&qbam_dev->dma_dev.channels);
+
+	/* Set capabilities */
+	dma_cap_zero(qbam_dev->dma_dev.cap_mask);
+	dma_cap_set(DMA_SLAVE,		qbam_dev->dma_dev.cap_mask);
+	dma_cap_set(DMA_PRIVATE,	qbam_dev->dma_dev.cap_mask);
+
+	/* Initialize dmaengine callback apis */
+	qbam_dev->dma_dev.device_alloc_chan_resources	= qbam_alloc_chan;
+	qbam_dev->dma_dev.device_free_chan_resources	= qbam_free_chan;
+	qbam_dev->dma_dev.device_prep_slave_sg		= qbam_prep_slave_sg;
+	qbam_dev->dma_dev.device_terminate_all		= qbam_flush_chan;
+	qbam_dev->dma_dev.device_config			= qbam_slave_cfg;
+	qbam_dev->dma_dev.device_issue_pending		= qbam_issue_pending;
+	qbam_dev->dma_dev.device_tx_status		= qbam_tx_status;
+
+	/* Regiser to DMA framework */
+	ret = dma_async_device_register(&qbam_dev->dma_dev);
+	if (ret) {
+		qbam_err(qbam_dev, "error:%d dma_async_device_register()\n",
+			 ret);
+		goto err_unregister_bam;
+	}
+
+	ret = of_dma_controller_register(of_node, qbam_dma_xlate, qbam_dev);
+	if (ret) {
+		qbam_err(qbam_dev, "error:%d of_dma_controller_register()\n",
+			 ret);
+		goto err_unregister_dma;
+	}
+	return 0;
+
+err_unregister_dma:
+	dma_async_device_unregister(&qbam_dev->dma_dev);
+err_unregister_bam:
+	if (qbam_dev->deregister_required)
+		return qbam_deregister_bam_dev(qbam_dev);
+
+	return ret;
+}
+
+static int qbam_remove(struct platform_device *pdev)
+{
+	struct qbam_device *qbam_dev = platform_get_drvdata(pdev);
+
+	dma_async_device_unregister(&qbam_dev->dma_dev);
+
+	/* free BAM pipes resources */
+	qbam_pipes_free(qbam_dev);
+
+	if (qbam_dev->deregister_required)
+		return qbam_deregister_bam_dev(qbam_dev);
+
+	return 0;
+}
+
+static const struct of_device_id qbam_of_match[] = {
+	{ .compatible = "qcom,sps-dma" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, qbam_of_match);
+
+static struct platform_driver qbam_driver = {
+	.probe = qbam_probe,
+	.remove = qbam_remove,
+	.driver = {
+		.name = "qcom-sps-dma",
+		.owner = THIS_MODULE,
+		.of_match_table = qbam_of_match,
+	},
+};
+
+module_platform_driver(qbam_driver);
+
+MODULE_DESCRIPTION("DMA-API driver to qcom BAM");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qcom-sps-dma");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/edac/cortex_arm64_edac.c	2019-01-22 16:16:23.167243364 +0100
@@ -0,0 +1,1002 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/edac.h>
+#include <linux/interrupt.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/perf_event.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
+#include <linux/of_irq.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/percpu.h>
+#include <linux/msm_rtb.h>
+
+#include <asm/cputype.h>
+#include <asm/esr.h>
+
+#include "edac_core.h"
+
+#define A53_CPUMERRSR_FATAL(a)	((a) & (1LL << 63))
+#define A53_CPUMERRSR_OTHER(a)	(((a) >> 40) & 0xff)
+#define A53_CPUMERRSR_REPT(a)	(((a) >> 32) & 0xff)
+#define A53_CPUMERRSR_VALID(a)	((a) & (1 << 31))
+#define A53_CPUMERRSR_RAMID(a)	(((a) >> 24) & 0x7f)
+#define A53_CPUMERRSR_CPUID(a)	(((a) >> 18) & 0x07)
+#define A53_CPUMERRSR_ADDR(a)	((a) & 0xfff)
+
+#define A53_L2MERRSR_FATAL(a)	((a) & (1LL << 63))
+#define A53_L2MERRSR_OTHER(a)	(((a) >> 40) & 0xff)
+#define A53_L2MERRSR_REPT(a)	(((a) >> 32) & 0xff)
+#define A53_L2MERRSR_VALID(a)	((a) & (1 << 31))
+#define A53_L2MERRSR_RAMID(a)	(((a) >> 24) & 0x7f)
+#define A53_L2MERRSR_CPUID(a)	(((a) >> 18) & 0x0f)
+#define A53_L2MERRSR_INDEX(a)	(((a) >> 3) & 0x3fff)
+
+#define A57_CPUMERRSR_FATAL(a)	((a) & (1LL << 63))
+#define A57_CPUMERRSR_OTHER(a)	(((a) >> 40) & 0xff)
+#define A57_CPUMERRSR_REPT(a)	(((a) >> 32) & 0xff)
+#define A57_CPUMERRSR_VALID(a)	((a) & (1 << 31))
+#define A57_CPUMERRSR_RAMID(a)	(((a) >> 24) & 0x7f)
+#define A57_CPUMERRSR_BANK(a)	(((a) >> 18) & 0x1f)
+#define A57_CPUMERRSR_INDEX(a)	((a) & 0x1ffff)
+
+#define A57_L2MERRSR_FATAL(a)	((a) & (1LL << 63))
+#define A57_L2MERRSR_OTHER(a)	(((a) >> 40) & 0xff)
+#define A57_L2MERRSR_REPT(a)	(((a) >> 32) & 0xff)
+#define A57_L2MERRSR_VALID(a)	((a) & (1 << 31))
+#define A57_L2MERRSR_RAMID(a)	(((a) >> 24) & 0x7f)
+#define A57_L2MERRSR_CPUID(a)	(((a) >> 18) & 0x0f)
+#define A57_L2MERRSR_INDEX(a)	((a) & 0x1ffff)
+
+#define KRYO2XX_GOLD_L2MERRSR_FATAL(a)	((a) & (1LL << 63))
+#define KRYO2XX_GOLD_L2MERRSR_OTHER(a)	(((a) >> 40) & 0x3f)
+#define KRYO2XX_GOLD_L2MERRSR_REPT(a)	(((a) >> 32) & 0x3f)
+#define KRYO2XX_GOLD_L2MERRSR_VALID(a)	((a) & (1 << 31))
+#define KRYO2XX_GOLD_L2MERRSR_RAMID(a)	((a) & (1 << 24))
+#define KRYO2XX_GOLD_L2MERRSR_WAY(a)	(((a) >> 18) & 0x0f)
+#define KRYO2XX_GOLD_L2MERRSR_INDEX(a)	(((a) >> 3) & 0x3fff)
+
+#define L2ECTLR_INT_ERR		(1 << 30)
+#define L2ECTLR_EXT_ERR		(1 << 29)
+
+#define ESR_SERROR(a)	((a) >> ESR_ELx_EC_SHIFT == ESR_ELx_EC_SERROR)
+#define ESR_VALID(a)	((a) & BIT(24))
+#define ESR_L2_DBE(a) (ESR_SERROR(a) && ESR_VALID(a) && \
+			(((a) & 0x00C00003) == 0x1))
+
+#define CCI_IMPRECISEERROR_REG	0x10
+
+#define L1_CACHE		0
+#define L2_CACHE		1
+#define CCI			2
+
+#define A53_L1_CE			0
+#define A53_L1_UE			1
+#define A53_L2_CE			2
+#define A53_L2_UE			3
+#define A57_L1_CE			4
+#define A57_L1_UE			5
+#define A57_L2_CE			6
+#define A57_L2_UE			7
+#define L2_EXT_UE			8
+#define CCI_UE				9
+#define KRYO2XX_SILVER_L1_CE		10
+#define KRYO2XX_SILVER_L1_UE		11
+#define KRYO2XX_SILVER_L2_CE		12
+#define KRYO2XX_SILVER_L2_UE		13
+#define KRYO2XX_GOLD_L2_CE		14
+#define KRYO2XX_GOLD_L2_UE		15
+
+#ifdef CONFIG_EDAC_CORTEX_ARM64_PANIC_ON_UE
+#define ARM64_ERP_PANIC_ON_UE 1
+#else
+#define ARM64_ERP_PANIC_ON_UE 0
+#endif
+
+#ifdef CONFIG_EDAC_CORTEX_ARM64_PANIC_ON_CE
+static int panic_on_ce = 1;
+#else
+static int panic_on_ce;
+#endif
+module_param(panic_on_ce, int, 0);
+
+#define EDAC_CPU	"arm64"
+
+enum error_type {
+	SBE,
+	DBE,
+};
+
+const char *err_name[] = {
+	"Single-bit",
+	"Double-bit",
+};
+
+struct erp_drvdata {
+	struct edac_device_ctl_info *edev_ctl;
+	void __iomem *cci_base;
+	struct notifier_block nb_pm;
+	struct notifier_block nb_cpu;
+	struct notifier_block nb_panic;
+	struct work_struct work;
+	struct perf_event *memerr_counters[NR_CPUS];
+};
+
+static struct erp_drvdata *panic_handler_drvdata;
+
+struct erp_local_data {
+	struct erp_drvdata *drv;
+	enum error_type err;
+};
+
+#define MEM_ERROR_EVENT		0x1A
+
+struct errors_edac {
+	const char * const msg;
+	void (*func)(struct edac_device_ctl_info *edac_dev,
+			int inst_nr, int block_nr, const char *msg);
+};
+
+static const struct errors_edac errors[] = {
+	{"A53 L1 Correctable Error", edac_device_handle_ce },
+	{"A53 L1 Uncorrectable Error", edac_device_handle_ue },
+	{"A53 L2 Correctable Error", edac_device_handle_ce },
+	{"A53 L2 Uncorrectable Error", edac_device_handle_ue },
+	{"A57 L1 Correctable Error", edac_device_handle_ce },
+	{"A57 L1 Uncorrectable Error", edac_device_handle_ue },
+	{"A57 L2 Correctable Error", edac_device_handle_ce },
+	{"A57 L2 Uncorrectable Error", edac_device_handle_ue },
+	{"L2 External Error", edac_device_handle_ue },
+	{"CCI Error", edac_device_handle_ue },
+	{"Kryo2xx Silver L1 Correctable Error", edac_device_handle_ce },
+	{"Kryo2xx Silver L1 Uncorrectable Error", edac_device_handle_ue },
+	{"Kryo2xx Silver L2 Correctable Error", edac_device_handle_ce },
+	{"Kryo2xx Silver L2 Uncorrectable Error", edac_device_handle_ue },
+	{"Kryo2xx Gold L2 Correctable Error", edac_device_handle_ce },
+	{"Kryo2xx Gold L2 Uncorrectable Error", edac_device_handle_ue },
+};
+
+#define read_l2merrsr_el1 ({                                           \
+	u64 __val;                                                     \
+	asm("mrs %0, s3_1_c15_c2_3" : "=r" (__val));                  \
+	__val;                                                         \
+})
+
+#define read_l2ectlr_el1 ({						\
+	u32 __val;							\
+	asm("mrs %0, s3_1_c11_c0_3" : "=r" (__val));			\
+	__val;								\
+})
+
+#define read_cpumerrsr_el1 ({						\
+	u64 __val;							\
+	asm("mrs %0, s3_1_c15_c2_2" : "=r" (__val));			\
+	__val;								\
+})
+
+#define read_esr_el1 ({							\
+	u64 __val;							\
+	asm("mrs %0, esr_el1" : "=r" (__val));				\
+	__val;								\
+})
+
+#define write_l2merrsr_el1(val) ({					\
+	asm("msr s3_1_c15_c2_3, %0" : : "r" (val));			\
+})
+
+#define write_l2ectlr_el1(val) ({					\
+	asm("msr s3_1_c11_c0_3, %0" : : "r" (val));			\
+})
+
+#define write_cpumerrsr_el1(val) ({					\
+	asm("msr s3_1_c15_c2_2, %0" : : "r" (val));			\
+})
+
+static void kryo2xx_print_error_state_regs(void)
+{
+	u64 l2merrsr;
+	u64 cpumerrsr;
+	u32 esr_el1;
+	u32 l2ectlr;
+
+	cpumerrsr = read_cpumerrsr_el1;
+	l2merrsr = read_l2merrsr_el1;
+	esr_el1 = read_esr_el1;
+	l2ectlr = read_l2ectlr_el1;
+
+	/* store data in uncached rtb logs */
+	uncached_logk_pc(LOGK_READL, __builtin_return_address(0),
+				(void *)cpumerrsr);
+	uncached_logk_pc(LOGK_READL, __builtin_return_address(0),
+				(void *)l2merrsr);
+	uncached_logk_pc(LOGK_READL, __builtin_return_address(0),
+				(void *)((u64)esr_el1));
+	uncached_logk_pc(LOGK_READL, __builtin_return_address(0),
+				(void *)((u64)l2ectlr));
+
+	edac_printk(KERN_CRIT, EDAC_CPU, "CPUMERRSR value = %#llx\n",
+								cpumerrsr);
+	edac_printk(KERN_CRIT, EDAC_CPU, "L2MERRSR value = %#llx\n", l2merrsr);
+
+	edac_printk(KERN_CRIT, EDAC_CPU, "ESR value = %#x\n", esr_el1);
+	edac_printk(KERN_CRIT, EDAC_CPU, "L2ECTLR value = %#x\n", l2ectlr);
+	if (ESR_L2_DBE(esr_el1))
+		edac_printk(KERN_CRIT, EDAC_CPU,
+			"Double bit error on dirty L2 cacheline\n");
+}
+
+static void kryo2xx_gold_print_error_state_regs(void)
+{
+	u64 l2merrsr;
+	u32 esr_el1;
+	u32 l2ectlr;
+
+	l2merrsr = read_l2merrsr_el1;
+	esr_el1 = read_esr_el1;
+	l2ectlr = read_l2ectlr_el1;
+
+	uncached_logk_pc(LOGK_READL, __builtin_return_address(0),
+				(void *)l2merrsr);
+	uncached_logk_pc(LOGK_READL, __builtin_return_address(0),
+				(void *)((u64)esr_el1));
+	uncached_logk_pc(LOGK_READL, __builtin_return_address(0),
+				(void *)((u64)l2ectlr));
+
+	edac_printk(KERN_CRIT, EDAC_CPU, "L2MERRSR value = %#llx\n", l2merrsr);
+
+	edac_printk(KERN_CRIT, EDAC_CPU, "ESR value = %#x\n", esr_el1);
+	edac_printk(KERN_CRIT, EDAC_CPU, "L2ECTLR value = %#x\n", l2ectlr);
+	if (ESR_L2_DBE(esr_el1))
+		edac_printk(KERN_CRIT, EDAC_CPU,
+			"Double bit error on dirty L2 cacheline\n");
+}
+
+static void kryo2xx_silver_parse_cpumerrsr(struct erp_local_data *ed)
+{
+	u64 cpumerrsr;
+	int cpuid;
+
+	cpumerrsr = read_cpumerrsr_el1;
+
+	if (!A53_CPUMERRSR_VALID(cpumerrsr))
+		return;
+
+	if (A53_CPUMERRSR_FATAL(cpumerrsr))
+		ed->err = DBE;
+
+	edac_printk(KERN_CRIT, EDAC_CPU,
+			"Kryo2xx Silver CPU%d L1 %s Error detected\n",
+			smp_processor_id(), err_name[ed->err]);
+
+	kryo2xx_print_error_state_regs();
+	if (ed->err == DBE)
+		edac_printk(KERN_CRIT, EDAC_CPU, "Fatal error\n");
+
+	cpuid = A53_CPUMERRSR_CPUID(cpumerrsr);
+
+	switch (A53_CPUMERRSR_RAMID(cpumerrsr)) {
+	case 0x0:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"L1 Instruction tag RAM way is %d\n", cpuid);
+		break;
+	case 0x1:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"L1 Instruction data RAM bank is %d\n", cpuid);
+		break;
+	case 0x8:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"L1 Data tag RAM cpu %d way is %d\n",
+				cpuid / 4, cpuid % 4);
+		break;
+	case 0x9:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"L1 Data data RAM cpu %d way is %d\n",
+				cpuid / 4, cpuid % 4);
+		break;
+	case 0xA:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"L1 Data dirty RAM cpu %d way is %d\n",
+				cpuid / 4, cpuid % 4);
+		break;
+	case 0x18:
+		edac_printk(KERN_CRIT, EDAC_CPU, "TLB RAM way is %d\n", cpuid);
+		break;
+	default:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"Error in unknown RAM ID: %d\n",
+				(int) A53_CPUMERRSR_RAMID(cpumerrsr));
+		break;
+	}
+
+	edac_printk(KERN_CRIT, EDAC_CPU, "Repeated error count: %d\n",
+					 (int) A53_CPUMERRSR_REPT(cpumerrsr));
+	edac_printk(KERN_CRIT, EDAC_CPU, "Other error count: %d\n",
+					 (int) A53_CPUMERRSR_OTHER(cpumerrsr));
+
+	if (ed->err == SBE)
+		errors[KRYO2XX_SILVER_L1_CE].func(ed->drv->edev_ctl, smp_processor_id(),
+					L1_CACHE, errors[KRYO2XX_SILVER_L1_CE].msg);
+	else if (ed->err == DBE)
+		errors[KRYO2XX_SILVER_L1_UE].func(ed->drv->edev_ctl, smp_processor_id(),
+					L1_CACHE, errors[KRYO2XX_SILVER_L1_UE].msg);
+	write_cpumerrsr_el1(0);
+}
+
+static void kryo2xx_silver_parse_l2merrsr(struct erp_local_data *ed)
+{
+	u64 l2merrsr;
+	u32 l2ectlr;
+	int cpuid;
+
+	l2merrsr = read_l2merrsr_el1;
+	l2ectlr = read_l2ectlr_el1;
+
+	if (!A53_L2MERRSR_VALID(l2merrsr))
+		return;
+
+	if (A53_L2MERRSR_FATAL(l2merrsr))
+		ed->err = DBE;
+
+	edac_printk(KERN_CRIT, EDAC_CPU, "Kyro2xx Silver L2 %s Error detected\n",
+			err_name[ed->err]);
+	kryo2xx_print_error_state_regs();
+	if (ed->err == DBE)
+		edac_printk(KERN_CRIT, EDAC_CPU, "Fatal error\n");
+
+	cpuid = A53_L2MERRSR_CPUID(l2merrsr);
+
+	switch (A53_L2MERRSR_RAMID(l2merrsr)) {
+	case 0x10:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"L2 tag RAM way is %d\n", cpuid);
+		break;
+	case 0x11:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"L2 data RAM bank is %d\n", cpuid);
+		break;
+	case 0x12:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"SCU snoop filter RAM cpu %d way is %d\n",
+				cpuid / 4, cpuid % 4);
+		break;
+	default:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"Error in unknown RAM ID: %d\n",
+				(int) A53_L2MERRSR_RAMID(l2merrsr));
+		break;
+	}
+
+	edac_printk(KERN_CRIT, EDAC_CPU, "Repeated error count: %d\n",
+					 (int) A53_L2MERRSR_REPT(l2merrsr));
+	edac_printk(KERN_CRIT, EDAC_CPU, "Other error count: %d\n",
+					 (int) A53_L2MERRSR_OTHER(l2merrsr));
+
+	if (ed->err == SBE)
+		errors[KRYO2XX_SILVER_L2_CE].func(ed->drv->edev_ctl, smp_processor_id(),
+					L2_CACHE, errors[KRYO2XX_SILVER_L2_CE].msg);
+	else if (ed->err == DBE)
+		errors[KRYO2XX_SILVER_L2_UE].func(ed->drv->edev_ctl, smp_processor_id(),
+					L2_CACHE, errors[KRYO2XX_SILVER_L2_UE].msg);
+	write_l2merrsr_el1(0);
+}
+
+
+static void ca57_parse_cpumerrsr(struct erp_local_data *ed)
+{
+	u64 cpumerrsr;
+	int bank;
+
+	cpumerrsr = read_cpumerrsr_el1;
+
+	if (!A57_CPUMERRSR_VALID(cpumerrsr))
+		return;
+
+	if (A57_CPUMERRSR_FATAL(cpumerrsr))
+		ed->err = DBE;
+
+	edac_printk(KERN_CRIT, EDAC_CPU, "Cortex A57 CPU%d L1 %s Error detected\n",
+					 smp_processor_id(), err_name[ed->err]);
+	kryo2xx_print_error_state_regs();
+	if (ed->err == DBE)
+		edac_printk(KERN_CRIT, EDAC_CPU, "Fatal error\n");
+
+	bank = A57_CPUMERRSR_BANK(cpumerrsr);
+
+	switch (A57_CPUMERRSR_RAMID(cpumerrsr)) {
+	case 0x0:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"L1 Instruction tag RAM bank %d\n", bank);
+		break;
+	case 0x1:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"L1 Instruction data RAM bank %d\n", bank);
+		break;
+	case 0x8:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"L1 Data tag RAM bank %d\n", bank);
+		break;
+	case 0x9:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"L1 Data data RAM bank %d\n", bank);
+		break;
+	case 0x18:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"TLB RAM bank %d\n", bank);
+		break;
+	default:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"Error in unknown RAM ID: %d\n",
+				(int) A57_CPUMERRSR_RAMID(cpumerrsr));
+		break;
+	}
+
+	edac_printk(KERN_CRIT, EDAC_CPU, "Repeated error count: %d\n",
+					 (int) A57_CPUMERRSR_REPT(cpumerrsr));
+	edac_printk(KERN_CRIT, EDAC_CPU, "Other error count: %d\n",
+					 (int) A57_CPUMERRSR_OTHER(cpumerrsr));
+
+	if (ed->err == SBE)
+		errors[A57_L1_CE].func(ed->drv->edev_ctl, smp_processor_id(),
+					L1_CACHE, errors[A57_L1_CE].msg);
+	else if (ed->err == DBE)
+		errors[A57_L1_UE].func(ed->drv->edev_ctl, smp_processor_id(),
+					L1_CACHE, errors[A57_L1_UE].msg);
+	write_cpumerrsr_el1(0);
+}
+
+static void ca57_parse_l2merrsr(struct erp_local_data *ed)
+{
+	u64 l2merrsr;
+	u32 l2ectlr;
+	int cpuid;
+
+	l2merrsr = read_l2merrsr_el1;
+	l2ectlr = read_l2ectlr_el1;
+
+	if (!A57_L2MERRSR_VALID(l2merrsr))
+		return;
+
+	if (A57_L2MERRSR_FATAL(l2merrsr))
+		ed->err = DBE;
+
+	edac_printk(KERN_CRIT, EDAC_CPU, "CortexA57 L2 %s Error detected\n",
+							err_name[ed->err]);
+	kryo2xx_print_error_state_regs();
+	if (ed->err == DBE)
+		edac_printk(KERN_CRIT, EDAC_CPU, "Fatal error\n");
+
+	cpuid = A57_L2MERRSR_CPUID(l2merrsr);
+
+	switch (A57_L2MERRSR_RAMID(l2merrsr)) {
+	case 0x10:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"L2 tag RAM cpu %d way is %d\n",
+				cpuid / 2, cpuid % 2);
+		break;
+	case 0x11:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"L2 data RAM cpu %d bank is %d\n",
+				cpuid / 2, cpuid % 2);
+		break;
+	case 0x12:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"SCU snoop tag RAM bank is %d\n", cpuid);
+		break;
+	case 0x14:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"L2 dirty RAM cpu %d bank is %d\n",
+				cpuid / 2, cpuid % 2);
+		break;
+	case 0x18:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"L2 inclusion PF RAM bank is %d\n", cpuid);
+		break;
+	default:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"Error in unknown RAM ID: %d\n",
+				(int) A57_L2MERRSR_RAMID(l2merrsr));
+		break;
+	}
+
+	edac_printk(KERN_CRIT, EDAC_CPU, "Repeated error count: %d\n",
+					 (int) A57_L2MERRSR_REPT(l2merrsr));
+	edac_printk(KERN_CRIT, EDAC_CPU, "Other error count: %d\n",
+					 (int) A57_L2MERRSR_OTHER(l2merrsr));
+
+	if (ed->err == SBE) {
+		errors[A57_L2_CE].func(ed->drv->edev_ctl, smp_processor_id(),
+					L2_CACHE, errors[A57_L2_CE].msg);
+	} else if (ed->err == DBE) {
+		errors[A57_L2_UE].func(ed->drv->edev_ctl, smp_processor_id(),
+					L2_CACHE, errors[A57_L2_UE].msg);
+	}
+	write_l2merrsr_el1(0);
+}
+
+
+static void kryo2xx_gold_parse_l2merrsr(struct erp_local_data *ed)
+{
+	u64 l2merrsr;
+	int ramid, way;
+
+	l2merrsr = read_l2merrsr_el1;
+
+	if (!KRYO2XX_GOLD_L2MERRSR_VALID(l2merrsr))
+		return;
+
+	if (KRYO2XX_GOLD_L2MERRSR_FATAL(l2merrsr))
+		ed->err = DBE;
+
+	edac_printk(KERN_CRIT, EDAC_CPU, "Kryo2xx Gold L2 %s Error detected\n",
+							err_name[ed->err]);
+	kryo2xx_gold_print_error_state_regs();
+	if (ed->err == DBE)
+		edac_printk(KERN_CRIT, EDAC_CPU, "Fatal error\n");
+
+	way = KRYO2XX_GOLD_L2MERRSR_WAY(l2merrsr);
+	ramid = KRYO2XX_GOLD_L2MERRSR_RAMID(l2merrsr);
+
+	edac_printk(KERN_CRIT, EDAC_CPU,
+				"L2 %s RAM error in way 0x%02x, index 0x%04x\n",
+				ramid ? "data" : "tag",
+				(int) KRYO2XX_GOLD_L2MERRSR_WAY(l2merrsr),
+				(int) KRYO2XX_GOLD_L2MERRSR_INDEX(l2merrsr));
+
+	edac_printk(KERN_CRIT, EDAC_CPU, "Repeated error count: %d\n",
+		(int) KRYO2XX_GOLD_L2MERRSR_REPT(l2merrsr));
+	edac_printk(KERN_CRIT, EDAC_CPU, "Other error count: %d\n",
+		(int) KRYO2XX_GOLD_L2MERRSR_OTHER(l2merrsr));
+
+	if (ed->err == SBE) {
+		errors[KRYO2XX_GOLD_L2_CE].func(ed->drv->edev_ctl, smp_processor_id(),
+					L2_CACHE, errors[KRYO2XX_GOLD_L2_CE].msg);
+	} else if (ed->err == DBE) {
+		errors[KRYO2XX_GOLD_L2_UE].func(ed->drv->edev_ctl, smp_processor_id(),
+					L2_CACHE, errors[KRYO2XX_GOLD_L2_UE].msg);
+	}
+	write_l2merrsr_el1(0);
+}
+
+static DEFINE_SPINLOCK(local_handler_lock);
+static DEFINE_SPINLOCK(l2ectlr_lock);
+
+static void arm64_erp_local_handler(void *info)
+{
+	struct erp_local_data *errdata = info;
+	unsigned int cpuid = read_cpuid_id();
+	unsigned int partnum = read_cpuid_part_number();
+	unsigned long flags, flags2;
+	u32 l2ectlr;
+
+	spin_lock_irqsave(&local_handler_lock, flags);
+	edac_printk(KERN_CRIT, EDAC_CPU, "%s error information from CPU %d, MIDR=%#08x:\n",
+		       err_name[errdata->err], raw_smp_processor_id(), cpuid);
+
+	switch (partnum) {
+	case ARM_CPU_PART_CORTEX_A53:
+	case ARM_CPU_PART_KRYO2XX_SILVER:
+		kryo2xx_silver_parse_cpumerrsr(errdata);
+		kryo2xx_silver_parse_l2merrsr(errdata);
+	break;
+
+	case ARM_CPU_PART_CORTEX_A72:
+	case ARM_CPU_PART_CORTEX_A57:
+		ca57_parse_cpumerrsr(errdata);
+		ca57_parse_l2merrsr(errdata);
+	break;
+
+	case ARM_CPU_PART_KRYO2XX_GOLD:
+		kryo2xx_gold_parse_l2merrsr(errdata);
+	break;
+
+	default:
+		edac_printk(KERN_CRIT, EDAC_CPU, "Unknown CPU Part Number in MIDR: %#04x (%#08x)\n",
+						 partnum, cpuid);
+	};
+
+	/* Acklowledge internal error in L2ECTLR */
+	spin_lock_irqsave(&l2ectlr_lock, flags2);
+
+	l2ectlr = read_l2ectlr_el1;
+
+	if (l2ectlr & L2ECTLR_INT_ERR) {
+		l2ectlr &= ~L2ECTLR_INT_ERR;
+		write_l2ectlr_el1(l2ectlr);
+	}
+
+	spin_unlock_irqrestore(&l2ectlr_lock, flags2);
+	spin_unlock_irqrestore(&local_handler_lock, flags);
+}
+
+static irqreturn_t arm64_dbe_handler(int irq, void *drvdata)
+{
+	struct erp_local_data errdata;
+
+	errdata.drv = drvdata;
+	errdata.err = DBE;
+	edac_printk(KERN_CRIT, EDAC_CPU, "ARM64 CPU ERP: Double-bit error interrupt received!\n");
+
+	on_each_cpu(arm64_erp_local_handler, &errdata, 1);
+
+	return IRQ_HANDLED;
+}
+
+static void arm64_ext_local_handler(void *info)
+{
+	struct erp_drvdata *drv = info;
+	unsigned long flags, flags2;
+	u32 l2ectlr;
+
+	spin_lock_irqsave(&local_handler_lock, flags);
+
+	/* TODO: Shared locking for L2ECTLR access */
+	spin_lock_irqsave(&l2ectlr_lock, flags2);
+
+	l2ectlr = read_l2ectlr_el1;
+
+	if (l2ectlr & L2ECTLR_EXT_ERR) {
+		edac_printk(KERN_CRIT, EDAC_CPU,
+		    "L2 external error detected by CPU%d\n",
+		    smp_processor_id());
+
+		errors[L2_EXT_UE].func(drv->edev_ctl, smp_processor_id(),
+				       L2_CACHE, errors[L2_EXT_UE].msg);
+
+		l2ectlr &= ~L2ECTLR_EXT_ERR;
+		write_l2ectlr_el1(l2ectlr);
+	}
+
+	spin_unlock_irqrestore(&l2ectlr_lock, flags2);
+	spin_unlock_irqrestore(&local_handler_lock, flags);
+}
+
+static irqreturn_t arm64_ext_handler(int irq, void *drvdata)
+{
+	edac_printk(KERN_CRIT, EDAC_CPU, "External error interrupt received!\n");
+
+	on_each_cpu(arm64_ext_local_handler, drvdata, 1);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t arm64_cci_handler(int irq, void *drvdata)
+{
+	struct erp_drvdata *drv = drvdata;
+	u32 cci_err_reg;
+
+	edac_printk(KERN_CRIT, EDAC_CPU, "CCI error interrupt received!\n");
+
+	if (drv->cci_base) {
+		cci_err_reg = readl_relaxed(drv->cci_base +
+							CCI_IMPRECISEERROR_REG);
+
+		edac_printk(KERN_CRIT, EDAC_CPU, "CCI imprecise error register: %#08x.\n",
+						 cci_err_reg);
+
+		/* This register has write-clear semantics */
+		writel_relaxed(cci_err_reg, drv->cci_base +
+							CCI_IMPRECISEERROR_REG);
+
+		/* Ensure error bits cleared before exiting ISR */
+		mb();
+	} else {
+		edac_printk(KERN_CRIT, EDAC_CPU, "CCI registers not available.\n");
+	}
+
+	errors[CCI_UE].func(drv->edev_ctl, 0, CCI, errors[CCI_UE].msg);
+
+	return IRQ_HANDLED;
+}
+#ifndef CONFIG_EDAC_CORTEX_ARM64_DBE_IRQ_ONLY
+static void arm64_sbe_handler(struct perf_event *event,
+			      struct perf_sample_data *data,
+			      struct pt_regs *regs)
+{
+	struct erp_local_data errdata;
+	int cpu = raw_smp_processor_id();
+
+	errdata.drv = event->overflow_handler_context;
+	errdata.err = SBE;
+	edac_printk(KERN_CRIT, EDAC_CPU, "ARM64 CPU ERP: Single-bit error interrupt received on CPU %d!\n",
+					cpu);
+	arm64_erp_local_handler(&errdata);
+}
+#endif
+
+static int request_erp_irq(struct platform_device *pdev, const char *propname,
+			   const char *desc, irq_handler_t handler,
+			   void *ed)
+{
+	int rc;
+	struct resource *r;
+
+	r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, propname);
+
+	if (!r) {
+		pr_err("ARM64 CPU ERP: Could not find <%s> IRQ property. Proceeding anyway.\n",
+			propname);
+		return -EINVAL;
+	}
+
+	rc = devm_request_threaded_irq(&pdev->dev, r->start, NULL,
+				       handler,
+				       IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+				       desc,
+				       ed);
+
+	if (rc) {
+		pr_err("ARM64 CPU ERP: Failed to request IRQ %d: %d (%s / %s). Proceeding anyway.\n",
+		       (int) r->start, rc, propname, desc);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void check_sbe_event(struct erp_drvdata *drv)
+{
+	unsigned int partnum = read_cpuid_part_number();
+	struct erp_local_data errdata;
+	unsigned long flags;
+
+	errdata.drv = drv;
+	errdata.err = SBE;
+
+	spin_lock_irqsave(&local_handler_lock, flags);
+	switch (partnum) {
+	case ARM_CPU_PART_CORTEX_A53:
+	case ARM_CPU_PART_KRYO2XX_SILVER:
+		kryo2xx_silver_parse_cpumerrsr(&errdata);
+		kryo2xx_silver_parse_l2merrsr(&errdata);
+	break;
+
+	case ARM_CPU_PART_CORTEX_A72:
+	case ARM_CPU_PART_CORTEX_A57:
+		ca57_parse_cpumerrsr(&errdata);
+		ca57_parse_l2merrsr(&errdata);
+	break;
+
+	case ARM_CPU_PART_KRYO2XX_GOLD:
+		kryo2xx_gold_parse_l2merrsr(&errdata);
+	break;
+	};
+	spin_unlock_irqrestore(&local_handler_lock, flags);
+}
+
+#ifdef CONFIG_EDAC_CORTEX_ARM64_DBE_IRQ_ONLY
+static void create_sbe_counter(int cpu, void *info)
+{ }
+#else
+static void create_sbe_counter(int cpu, void *info)
+{
+	struct erp_drvdata *drv = info;
+	struct perf_event *event = drv->memerr_counters[cpu];
+	struct perf_event_attr attr = {
+		.pinned = 1,
+		.disabled = 0, /* 0 will enable the counter upon creation */
+		.sample_period = 1, /* 1 will set the counter to max int */
+		.type = PERF_TYPE_RAW,
+		.config = MEM_ERROR_EVENT,
+		.size = sizeof(struct perf_event_attr),
+	};
+
+	if (event)
+		return;
+
+	/* Fails if cpu is not online */
+	event = perf_event_create_kernel_counter(&attr, cpu, NULL,
+							arm64_sbe_handler,
+							drv);
+	if (IS_ERR(event)) {
+		pr_err("PERF Event creation failed on cpu %d ptr_err %ld\n",
+							cpu, PTR_ERR(event));
+		return;
+	}
+	drv->memerr_counters[cpu] = event;
+}
+#endif
+
+static int arm64_pmu_cpu_pm_notify(struct notifier_block *self,
+					   unsigned long action, void *v)
+{
+	struct erp_drvdata *drv = container_of(self, struct erp_drvdata, nb_pm);
+
+	switch (action) {
+	case CPU_PM_EXIT:
+		check_sbe_event(drv);
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static int arm64_edac_pmu_cpu_notify(struct notifier_block *self,
+					unsigned long action, void *hcpu)
+{
+	struct erp_drvdata *drv = container_of(self, struct erp_drvdata,
+								nb_cpu);
+	unsigned long cpu = (unsigned long)hcpu;
+
+	switch (action & ~CPU_TASKS_FROZEN) {
+	case CPU_ONLINE:
+		create_sbe_counter(cpu, drv);
+		break;
+	};
+
+	return NOTIFY_OK;
+}
+
+#ifndef CONFIG_EDAC_CORTEX_ARM64_DBE_IRQ_ONLY
+void arm64_check_cache_ecc(void *info)
+{
+	if (panic_handler_drvdata)
+		check_sbe_event(panic_handler_drvdata);
+}
+#else
+static inline void arm64_check_cache_ecc(void *info) {}
+#endif
+
+static int arm64_erp_panic_notify(struct notifier_block *this,
+			      unsigned long event, void *ptr)
+{
+	arm64_check_cache_ecc(NULL);
+
+	return NOTIFY_OK;
+}
+
+static void arm64_monitor_cache_errors(struct edac_device_ctl_info *edev)
+{
+	struct cpumask cluster_mask, old_mask;
+	int cpu;
+
+	cpumask_clear(&cluster_mask);
+	cpumask_clear(&old_mask);
+
+	for_each_possible_cpu(cpu) {
+		cpumask_copy(&cluster_mask, topology_core_cpumask(cpu));
+		if (cpumask_equal(&cluster_mask, &old_mask))
+			continue;
+		cpumask_copy(&old_mask, &cluster_mask);
+		smp_call_function_any(&cluster_mask,
+				arm64_check_cache_ecc, NULL, 0);
+	}
+}
+
+static int arm64_cpu_erp_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct erp_drvdata *drv;
+	struct resource *r;
+	int cpu;
+	u32 poll_msec;
+
+	int rc, fail = 0;
+
+	drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
+
+	if (!drv)
+		return -ENOMEM;
+
+	drv->edev_ctl = edac_device_alloc_ctl_info(0, "cpu",
+					num_possible_cpus(), "L", 3, 1, NULL, 0,
+					edac_device_alloc_index());
+
+	if (!drv->edev_ctl)
+		return -ENOMEM;
+
+	rc = of_property_read_u32(pdev->dev.of_node, "poll-delay-ms",
+							&poll_msec);
+	if (!rc && !IS_ENABLED(CONFIG_EDAC_CORTEX_ARM64_DBE_IRQ_ONLY)) {
+		drv->edev_ctl->edac_check = arm64_monitor_cache_errors;
+		drv->edev_ctl->poll_msec = poll_msec;
+		drv->edev_ctl->defer_work = 1;
+	}
+	drv->edev_ctl->dev = dev;
+	drv->edev_ctl->mod_name = dev_name(dev);
+	drv->edev_ctl->dev_name = dev_name(dev);
+	drv->edev_ctl->ctl_name = "cache";
+	drv->edev_ctl->panic_on_ce = panic_on_ce;
+	drv->edev_ctl->panic_on_ue = ARM64_ERP_PANIC_ON_UE;
+
+	rc = edac_device_add_device(drv->edev_ctl);
+	if (rc)
+		goto out_mem;
+
+	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cci");
+	if (r)
+		drv->cci_base = devm_ioremap_resource(dev, r);
+
+	if (request_erp_irq(pdev, "pri-dbe-irq", "ARM64 primary DBE IRQ",
+			    arm64_dbe_handler, drv))
+		fail++;
+
+	if (request_erp_irq(pdev, "sec-dbe-irq", "ARM64 secondary DBE IRQ",
+			    arm64_dbe_handler, drv))
+		fail++;
+
+	if (request_erp_irq(pdev, "pri-ext-irq", "ARM64 primary ext IRQ",
+			    arm64_ext_handler, drv))
+		fail++;
+
+	if (request_erp_irq(pdev, "sec-ext-irq", "ARM64 secondary ext IRQ",
+			    arm64_ext_handler, drv))
+		fail++;
+
+	/*
+	 * We still try to register a handler for CCI errors even if we don't
+	 * have access to cci_base, but error reporting becomes best-effort in
+	 * that case.
+	 */
+	if (request_erp_irq(pdev, "cci-irq", "CCI error IRQ",
+			    arm64_cci_handler, drv))
+		fail++;
+
+	if (IS_ENABLED(CONFIG_EDAC_CORTEX_ARM64_DBE_IRQ_ONLY)) {
+		pr_err("ARM64 CPU ERP: SBE detection is disabled.\n");
+		goto out_irq;
+	}
+
+	drv->nb_pm.notifier_call = arm64_pmu_cpu_pm_notify;
+	cpu_pm_register_notifier(&(drv->nb_pm));
+	drv->nb_panic.notifier_call = arm64_erp_panic_notify;
+	atomic_notifier_chain_register(&panic_notifier_list,
+				       &drv->nb_panic);
+	drv->nb_cpu.notifier_call = arm64_edac_pmu_cpu_notify;
+	register_cpu_notifier(&drv->nb_cpu);
+	get_online_cpus();
+	for_each_online_cpu(cpu)
+		create_sbe_counter(cpu, drv);
+	put_online_cpus();
+
+out_irq:
+	if (fail == of_irq_count(dev->of_node)) {
+		pr_err("ARM64 CPU ERP: Could not request any IRQs. Giving up.\n");
+		rc = -ENODEV;
+		goto out_dev;
+	}
+
+	panic_handler_drvdata = drv;
+
+	return 0;
+
+out_dev:
+	edac_device_del_device(dev);
+out_mem:
+	edac_device_free_ctl_info(drv->edev_ctl);
+	return rc;
+}
+
+static const struct of_device_id arm64_cpu_erp_match_table[] = {
+	{ .compatible = "arm,arm64-cpu-erp" },
+	{ }
+};
+
+static struct platform_driver arm64_cpu_erp_driver = {
+	.probe = arm64_cpu_erp_probe,
+	.driver = {
+		.name = "arm64_cpu_cache_erp",
+		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(arm64_cpu_erp_match_table),
+	},
+};
+
+static int __init arm64_cpu_erp_init(void)
+{
+	return platform_driver_register(&arm64_cpu_erp_driver);
+}
+device_initcall_sync(arm64_cpu_erp_init);
diff -Nruw linux-4.4.115-fbx/drivers/esoc./Kconfig linux-4.4.115-fbx/drivers/esoc/Kconfig
--- linux-4.4.115-fbx/drivers/esoc./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/esoc/Kconfig	2019-01-22 16:16:23.179243472 +0100
@@ -0,0 +1,69 @@
+#
+# External soc control infrastructure and drivers
+#
+menuconfig ESOC
+	bool "External SOCs Control"
+	help
+	  External SOCs can be powered on and monitored by user
+	  space or kernel drivers. Additionally they can be controlled
+	  to respond to control commands. This framework provides an
+	  interface to track events related to the external slave socs.
+
+if ESOC
+
+config ESOC_DEV
+	bool "ESOC userspace interface"
+	help
+	  Say yes here to enable a userspace representation of the control
+	  link. Userspace can register a request engine or a command engine
+	  for the external soc. It can receive event notifications from the
+	  control link.
+
+config ESOC_CLIENT
+	bool "ESOC client interface"
+	depends on OF
+	help
+	  Say yes here to enable client interface for external socs.
+	  Clients can specify the external soc that they are interested in
+	  by using device tree phandles. Based on this, clients can register
+	  for notifications from a specific soc.
+
+config ESOC_DEBUG
+	bool "ESOC debug support"
+	help
+	  Say yes here to enable debugging support in the ESOC framework
+	  and individual esoc drivers.
+
+config ESOC_MDM_4x
+	bool "Add support for external mdm9x25/mdm9x35/mdm9x45/mdm9x55"
+	help
+	  In some qualcomm boards, an external modem such as mdm9x25 or mdm9x35
+	  is connected to a primary msm. The primary soc can control/monitor
+	  the modem via gpios. The data communication with such modems can
+	  occur over PCIE or HSIC.
+
+config ESOC_MDM_DRV
+	tristate "Command engine for 4x series external modems"
+	help
+	  Provides a command engine to control the behavior of an external modem
+	  such as mdm9x25/mdm9x35/mdm9x45/mdm9x55/QSC. Allows the primary soc to put the
+	  external modem in a specific mode. Also listens for events on the
+	  external modem.
+
+config ESOC_MDM_DBG_ENG
+	tristate "debug engine for 4x series external modems"
+	depends on ESOC_MDM_DRV
+	help
+	  Provides a user interface to mask out certain commands sent
+	  by command engine to the external modem. Also allows masking
+	  of certain notifications being sent to the external modem.
+
+config MDM_DBG_REQ_ENG
+	tristate "manual request engine for 4x series external modems"
+	depends on ESOC_MDM_DBG_ENG
+	help
+	  Provides a user interface to handle incoming requests from
+	  the external modem. Allows for debugging of IPC mechanism
+	  between the external modem and the primary soc.
+
+endif
diff -Nruw linux-4.4.115-fbx/drivers/esoc./Makefile linux-4.4.115-fbx/drivers/esoc/Makefile
--- linux-4.4.115-fbx/drivers/esoc./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/esoc/Makefile	2019-01-22 16:16:23.179243472 +0100
@@ -0,0 +1,9 @@
+# generic  external soc control support
+
+ccflags-$(CONFIG_ESOC_DEBUG)	:= -DDEBUG
+obj-$(CONFIG_ESOC)	+= esoc_bus.o
+obj-$(CONFIG_ESOC_DEV)	+= esoc_dev.o
+obj-$(CONFIG_ESOC_CLIENT)	+= esoc_client.o
+obj-$(CONFIG_ESOC_MDM_4x)	+= esoc-mdm-pon.o esoc-mdm-4x.o
+obj-$(CONFIG_ESOC_MDM_DRV)	+= esoc-mdm-drv.o
+obj-$(CONFIG_ESOC_MDM_DBG_ENG)	+= esoc-mdm-dbg-eng.o
diff -Nruw linux-4.4.115-fbx/drivers/fbxgpio./fbxgpio_core.c linux-4.4.115-fbx/drivers/fbxgpio/fbxgpio_core.c
--- linux-4.4.115-fbx/drivers/fbxgpio./fbxgpio_core.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/fbxgpio/fbxgpio_core.c	2019-01-22 16:16:23.183243509 +0100
@@ -0,0 +1,360 @@
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/fbxgpio_core.h>
+#include <linux/of.h>
+
+#define PFX	"fbxgpio_core: "
+
+/* #define DEBUG */
+#ifdef DEBUG
+#define dprint(Fmt, Arg...)	printk(PFX Fmt, Arg)
+#else
+#define dprint(Fmt, Arg...)	do { } while (0)
+#endif
+
+static struct class *fbxgpio_class;
+
+/*
+ * retrieval of a struct fbxgpio_pin from a phandle in the device
+ * tree.
+ */
+struct fbxgpio_of_mach_data {
+	struct fbxgpio_pin *match;
+	struct device_node *np;
+};
+
+static int match_fbxgpio_of_node(struct device *dev, void *data)
+{
+	struct fbxgpio_of_mach_data *md = data;
+	struct fbxgpio_pin *pin = dev_get_drvdata(dev);
+
+	if (pin->of_node == md->np) {
+		md->match = pin;
+		return 1;
+	}
+	return 0;
+}
+
+struct fbxgpio_pin *fbxgpio_of_get(struct device_node *np, const char *propname,
+				   int index)
+{
+	struct fbxgpio_of_mach_data md;
+
+	/*
+	 * get the pin device_node.
+	 */
+	md.match = NULL;
+	md.np = of_parse_phandle(np, propname, index);
+	if (!md.np)
+		return ERR_PTR(-ENOENT);
+
+	/*
+	 * find the struct fbxgpio_pin behind that device_node.
+	 */
+	class_for_each_device(fbxgpio_class, NULL, &md,
+			      match_fbxgpio_of_node);
+
+	return md.match ? md.match : ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL(fbxgpio_of_get);
+
+/*
+ * show direction in for gpio associated with class_device dev.
+ */
+static ssize_t show_direction(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct fbxgpio_pin *p;
+	int dir, ret = 0;
+
+	p = dev_get_drvdata(dev);
+
+	if (p->ops->get_direction)
+		dir = p->ops->get_direction(p->pin_num);
+	else
+		dir = p->direction;
+
+	switch (dir) {
+	case GPIO_DIR_IN:
+		ret += sprintf(buf, "input\n");
+		break;
+	case GPIO_DIR_OUT:
+		ret += sprintf(buf, "output\n");
+		break;
+	default:
+		ret += sprintf(buf, "unknown\n");
+		break;
+	}
+	return ret;
+}
+
+/*
+ * store direction. return -EINVAL if direction string is bad. return
+ * -EPERM if flag FBXGPIO_PIN_DIR_RW is set in flags.
+ */
+static ssize_t store_direction(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	int dir;
+	struct fbxgpio_pin *p;
+	int match_len = 0;
+	int i, ret;
+	static const char *word_match[] = {
+		[GPIO_DIR_IN] = "input",
+		[GPIO_DIR_OUT] = "output",
+	};
+
+	if (*buf == ' ' || *buf == '\t' || *buf == '\r' || *buf == '\n')
+		/* silently eat any spaces/tab/linefeed/carriagereturn */
+		return 1;
+
+	p = dev_get_drvdata(dev);
+	if (!(p->flags & FBXGPIO_PIN_DIR_RW)) {
+		dprint("pin %s direction is read only.\n", p->pin_name);
+		return -EPERM;
+	}
+	dir = 0;
+	for (i = 0; i < 2; ++i) {
+		if (size >= strlen(word_match[i]) &&
+		    !strncmp(buf, word_match[i], strlen(word_match[i]))) {
+			dir = i;
+			match_len = strlen(word_match[i]);
+			break ;
+		}
+	}
+	if (i == 2)
+		return -EINVAL;
+
+	ret = p->ops->set_direction(p->pin_num, dir);
+	if (ret)
+		return ret;
+
+	return match_len;
+}
+
+/*
+ * show input data for input gpio pins.
+ */
+static ssize_t show_datain(struct device *dev,
+			   struct device_attribute *attr, char *buf)
+{
+	int val;
+	struct fbxgpio_pin *p;
+
+	p = dev_get_drvdata(dev);
+	if (p->direction == GPIO_DIR_OUT)
+		return -EINVAL;
+	val = p->ops->get_datain(p->pin_num);
+
+	if (p->flags & FBXGPIO_PIN_REVERSE_POL)
+		val = 1 - val;
+	return sprintf(buf, "%i\n", val);
+}
+
+/*
+ * show output data for output gpio pins.
+ */
+static ssize_t show_dataout(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{
+	int val;
+	struct fbxgpio_pin *p;
+
+	p = dev_get_drvdata(dev);
+	if (p->direction == GPIO_DIR_IN)
+		return -EINVAL;
+	if (p->ops->get_dataout)
+		val = p->ops->get_dataout(p->pin_num);
+	else
+		val = p->cur_dataout;
+
+	if (p->flags & FBXGPIO_PIN_REVERSE_POL)
+		val = 1 - val;
+	return sprintf(buf, "%i\n", val);
+}
+
+/*
+ * store new dataout value for output gpio pins.
+ */
+static ssize_t store_dataout(struct device *dev,
+	    struct device_attribute *attr, const char *buf, size_t size)
+{
+	int val;
+	struct fbxgpio_pin *p;
+
+	if (*buf == ' ' || *buf == '\t' || *buf == '\r' || *buf == '\n')
+		/* silently eat any spaces/tab/linefeed/carriagereturn */
+		return 1;
+
+	p = dev_get_drvdata(dev);
+
+	if (p->direction != GPIO_DIR_OUT)
+		return -EINVAL;
+
+	switch (*buf) {
+	case '0':
+		val = 0;
+		break ;
+	case '1':
+		val = 1;
+		break ;
+	default:
+		return -EINVAL;
+	}
+
+	p->cur_dataout = val;
+
+	if (p->flags & FBXGPIO_PIN_REVERSE_POL)
+		val = 1 - val;
+	p->ops->set_dataout(p->pin_num, val);
+	return 1;
+}
+
+/*
+ * show pin number associated with gpio pin.
+ */
+static ssize_t show_pinnum(struct device *dev,
+			   struct device_attribute *attr, char *buf)
+{
+	struct fbxgpio_pin *p;
+
+	p = dev_get_drvdata(dev);
+	return sprintf(buf, "%i\n", p->pin_num);
+}
+
+/*
+ * attribute list associated with each class device.
+ */
+static struct device_attribute gpio_attributes[] = {
+	__ATTR(direction, 0600, show_direction, store_direction),
+	__ATTR(data_in,   0400, show_datain, NULL),
+	__ATTR(data_out,  0600, show_dataout, store_dataout),
+	__ATTR(pin_num,   0400, show_pinnum, NULL),
+};
+
+static int fbxgpio_register_pin(struct platform_device *ppdev,
+				struct fbxgpio_pin *pin)
+{
+	struct device *dev;
+	int i, ret;
+
+	dprint("registering pin %s\n", pin->pin_name);
+
+	/* ensure ops is valid */
+	if (!pin->ops) {
+		printk(KERN_ERR PFX "no operation set for pin %s\n",
+		       pin->pin_name);
+		return -EINVAL;
+	}
+
+	dev = device_create(fbxgpio_class, &ppdev->dev, 0, pin,
+			    "%s", pin->pin_name);
+	if (IS_ERR(dev))
+		return PTR_ERR(dev);
+
+	for (i = 0; i < ARRAY_SIZE(gpio_attributes); i++) {
+		ret = device_create_file(dev, &gpio_attributes[i]);
+		if (ret)
+			goto err_out;
+	}
+
+	/* ensure pin direction matches hardware state */
+	if (pin->ops->get_direction &&
+	    pin->direction != pin->ops->get_direction(pin->pin_num)) {
+		printk(KERN_WARNING PFX "pin %s default direction does not "
+		       "match current hardware state, fixing.\n",
+		       pin->pin_name);
+		pin->ops->set_direction(pin->pin_num, pin->direction);
+	}
+	pin->dev = dev;
+	return 0;
+
+err_out:
+	for (; i >= 0; i--)
+		device_remove_file(dev, &gpio_attributes[i]);
+	device_unregister(dev);
+	return ret;
+}
+
+static void fbxgpio_unregister_pin(struct fbxgpio_pin *pin)
+{
+	struct device *dev;
+	int i;
+
+	dprint("unregistering pin %s\n", pin->pin_name);
+	dev = pin->dev;
+	pin->dev = NULL;
+
+	for (i = 0; i < ARRAY_SIZE(gpio_attributes); i++)
+		device_remove_file(dev, &gpio_attributes[i]);
+	device_unregister(dev);
+}
+
+static int fbxgpio_platform_probe(struct platform_device *pdev)
+{
+	struct fbxgpio_pin *p;
+	int err = 0;
+
+	p = pdev->dev.platform_data;
+	while (p->pin_name) {
+		err = fbxgpio_register_pin(pdev, p);
+		if (err)
+			return err;
+		++p;
+	}
+	return 0;
+}
+
+static int fbxgpio_platform_remove(struct platform_device *pdev)
+{
+	struct fbxgpio_pin *p;
+
+	p = pdev->dev.platform_data;
+	while (p->pin_name) {
+		fbxgpio_unregister_pin(p);
+		++p;
+	}
+	return 0;
+}
+
+static struct platform_driver fbxgpio_platform_driver =
+{
+	.probe	= fbxgpio_platform_probe,
+	.remove	= fbxgpio_platform_remove,
+	.driver	= {
+		.name	= "fbxgpio",
+	}
+};
+
+static int __init fbxgpio_init(void)
+{
+	int ret;
+
+	fbxgpio_class = class_create(THIS_MODULE, "fbxgpio");
+	if (IS_ERR(fbxgpio_class))
+		return PTR_ERR(fbxgpio_class);
+
+	ret = platform_driver_register(&fbxgpio_platform_driver);
+	if (ret) {
+		printk(KERN_ERR PFX "unable to register fbxgpio driver.\n");
+		class_destroy(fbxgpio_class);
+		return ret;
+	}
+	return 0;
+}
+
+static void __exit fbxgpio_exit(void)
+{
+	platform_driver_unregister(&fbxgpio_platform_driver);
+	class_destroy(fbxgpio_class);
+}
+
+subsys_initcall(fbxgpio_init);
+module_exit(fbxgpio_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Schichan <nicolas.schichan@freebox.fr>");
diff -Nruw linux-4.4.115-fbx/drivers/fbxgpio./fbxgpio_dt.c linux-4.4.115-fbx/drivers/fbxgpio/fbxgpio_dt.c
--- linux-4.4.115-fbx/drivers/fbxgpio./fbxgpio_dt.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/fbxgpio/fbxgpio_dt.c	2019-01-22 16:16:23.183243509 +0100
@@ -0,0 +1,254 @@
+/*
+ * fbxgpio_dt.c for fbxgpio
+ * Created by <nschichan@freebox.fr> on Tue Aug  1 14:01:01 2017
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/fbxgpio_core.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_gpio.h>
+
+static atomic_t last_id = ATOMIC_INIT(0);
+
+/*
+ * fbxgpio driver fetching gpios names and configuration from
+ * device-tree.
+ */
+
+struct fbxgpio_dt_priv {
+	struct fbxgpio_pin *pins;
+	unsigned int npins;
+
+	/* dynamically created platform_device for fbxgpio_core */
+	struct platform_device *top_pdev;
+};
+
+
+/*
+ * small shim layer for gpiolib <-> fbxgpio_operations.
+ *
+ * gpio direction change is unsupported, does userspace really need
+ * it?
+ */
+static int fbxgpio_dt_get_data(int gpio)
+{
+	return gpio_get_value_cansleep(gpio);
+}
+
+static void fbxgpio_dt_set_dataout(int gpio, int value)
+{
+	gpio_set_value_cansleep(gpio, value);
+}
+
+static int fbxgpio_dt_set_direction(int gpio, int dir)
+{
+	if (dir == GPIO_DIR_OUT)
+		return gpio_direction_output(gpio, 0);
+	else
+		return gpio_direction_input(gpio);
+}
+
+static const struct fbxgpio_operations fbxgpio_dt_ops = {
+	.get_datain = fbxgpio_dt_get_data,
+	.get_dataout = fbxgpio_dt_get_data,
+	.set_dataout = fbxgpio_dt_set_dataout,
+	.set_direction = fbxgpio_dt_set_direction,
+};
+
+/*
+ * fill an fbxgpio_pin with the configuration found in a device tree
+ * node.
+ *
+ * required properties are:
+ * - gpio: a phandle to a standard linux gpio.
+ *
+ * - the name of the node: the name of the gpio as it will appear under
+ *   /sys/class/fbxgpio/
+ *
+ * - <input>/<output-high>/<output-low>: how to declare gpio and
+ *   actually setup it unless no-claim is given
+ *
+ * - <no-claim>: just declare gpio, but don't request & setup it
+ */
+static int fbxgpio_dt_fill_gpio(struct platform_device *pdev,
+				struct device_node *np,
+				struct fbxgpio_pin *pin)
+{
+	int error;
+
+	error = of_property_read_string(np, "name", &pin->pin_name);
+	if (error) {
+		dev_err(&pdev->dev, "gpio has no name.\n");
+		return error;
+	}
+
+	pin->pin_num = of_get_named_gpio(np, "gpio", 0);
+	if (pin->pin_num < 0) {
+		if (pin->pin_num != -EPROBE_DEFER)
+			dev_err(&pdev->dev,
+				"unable to get gpio desc for %s: %d.\n",
+				pin->pin_name, pin->pin_num);
+		return pin->pin_num;
+	}
+
+	if (of_property_read_bool(np, "input")) {
+		pin->direction = GPIO_DIR_IN;
+	} else if (of_property_read_bool(np, "output-low")) {
+		pin->direction = GPIO_DIR_OUT;
+		pin->cur_dataout = 0;
+	} else if (of_property_read_bool(np, "output-high")) {
+		pin->direction = GPIO_DIR_OUT;
+		pin->cur_dataout = 1;
+	} else {
+		dev_err(&pdev->dev,
+			"no state specified for %s\n",
+			pin->pin_name);
+		return -EINVAL;
+	}
+
+	if (!of_property_read_bool(np, "no-claim")) {
+		error = gpio_request(pin->pin_num, "fbxgpio-dt");
+		if (error) {
+			dev_err(&pdev->dev, "unable to request gpio%d (%s): %d\n",
+				pin->pin_num, pin->pin_name, error);
+			return error;
+		}
+		pin->claimed = true;
+
+		if (pin->direction == GPIO_DIR_OUT)
+			gpio_direction_output(pin->pin_num, pin->cur_dataout);
+		else
+			gpio_direction_input(pin->pin_num);
+	}
+
+	pin->of_node = np;
+	pin->ops = &fbxgpio_dt_ops;
+	return 0;
+}
+
+static int fbxgpio_dt_probe(struct platform_device *pdev)
+{
+	struct fbxgpio_dt_priv *priv;
+	struct device_node *fbxgpio_node;
+	u32 cur_gpio;
+	int error = 0;
+	size_t priv_alloc_size;
+	int i;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof (*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	dev_set_drvdata(&pdev->dev, priv);
+
+	/*
+	 * first pass to get the number of struct fbxgpio_pin to
+	 * allocate.
+	 */
+	for_each_available_child_of_node(pdev->dev.of_node, fbxgpio_node) {
+		++priv->npins;
+	}
+
+	/*
+	 * allocate pins: use npins + 1 for zeroed end sentinel.
+	 */
+	priv_alloc_size = (priv->npins + 1) * sizeof (struct fbxgpio_pin);
+	priv->pins = devm_kzalloc(&pdev->dev, priv_alloc_size, GFP_KERNEL);
+	if (!priv->pins)
+		return -ENOMEM;
+
+	/*
+	 * second pass to fill the priv->pins array.
+	 */
+	cur_gpio = 0;
+	for_each_available_child_of_node(pdev->dev.of_node, fbxgpio_node) {
+		error = fbxgpio_dt_fill_gpio(pdev, fbxgpio_node,
+					     &priv->pins[cur_gpio]);
+		if (error)
+			goto out_free_gpios;
+		++cur_gpio;
+	}
+
+	dev_info(&pdev->dev, "%u gpios.\n", priv->npins);
+
+	/*
+	 * create and register a platform device for fbxgpio_core.
+	 */
+	priv->top_pdev = platform_device_register_data(&pdev->dev,
+						       "fbxgpio",
+						       atomic_inc_return(&last_id),
+						       priv->pins,
+						       priv_alloc_size);
+
+	if (IS_ERR(priv->top_pdev)) {
+		dev_err(&pdev->dev, "unable to register fbxgpio platform "
+			"device: %ld\n", PTR_ERR(priv->top_pdev));
+		return PTR_ERR(priv->top_pdev);
+	}
+
+	for (i = 0; i < priv->npins; i++) {
+		struct fbxgpio_pin *pin = &priv->pins[i];
+
+		if (pin->direction == GPIO_DIR_OUT)
+			dev_dbg(&pdev->dev,
+				"%sgpio %d (%s) is output, default %d\n",
+				pin->claimed ? "unclaimed " : "",
+				pin->pin_num, pin->pin_name, pin->cur_dataout);
+		else
+			dev_dbg(&pdev->dev,
+				"%sgpio %d (%s) is input\n",
+				pin->claimed ? "unclaimed " : "",
+				pin->pin_num, pin->pin_name);
+	}
+
+	return 0;
+
+out_free_gpios:
+	while (cur_gpio) {
+		--cur_gpio;
+		if (priv->pins[cur_gpio].claimed)
+			gpio_free(priv->pins[cur_gpio].pin_num);
+	}
+	return error;
+}
+
+static int fbxgpio_dt_remove(struct platform_device *pdev)
+{
+	struct fbxgpio_dt_priv *priv = dev_get_drvdata(&pdev->dev);
+	unsigned int i;
+
+	platform_device_unregister(priv->top_pdev);
+
+	for (i = 0; i < priv->npins; ++i) {
+		if (priv->pins[i].claimed)
+			gpio_free(priv->pins[i].pin_num);
+	}
+
+	return 0;
+}
+
+static const struct of_device_id fbxgpio_dt_of_match_table[] = {
+	{ .compatible = "fbx,fbxgpio" },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, fbxgpio_dt_of_match_table);
+
+static struct platform_driver fbxgpio_dt_platform_driver = {
+	.probe		= fbxgpio_dt_probe,
+	.remove		= fbxgpio_dt_remove,
+	.driver		= {
+		.name		= "fbxgpio-dt",
+		.of_match_table	= fbxgpio_dt_of_match_table,
+	},
+};
+
+module_platform_driver(fbxgpio_dt_platform_driver);
+
+MODULE_AUTHOR("Nicolas Schichan <nschichan@freebox.fr>");
+MODULE_DESCRIPTION("DT Freebox GPIO Driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/drivers/fbxgpio./Kconfig linux-4.4.115-fbx/drivers/fbxgpio/Kconfig
--- linux-4.4.115-fbx/drivers/fbxgpio./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/fbxgpio/Kconfig	2019-01-22 16:16:23.183243509 +0100
@@ -0,0 +1,7 @@
+config FREEBOX_GPIO
+	tristate "Freebox GPIO control interface"
+	default n
+
+config FREEBOX_GPIO_DT
+	tristate "Freebox GPIO DT binding."
+	default n
diff -Nruw linux-4.4.115-fbx/drivers/fbxgpio./Makefile linux-4.4.115-fbx/drivers/fbxgpio/Makefile
--- linux-4.4.115-fbx/drivers/fbxgpio./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/fbxgpio/Makefile	2019-01-22 16:16:23.183243509 +0100
@@ -0,0 +1,2 @@
+obj-$(CONFIG_FREEBOX_GPIO)	+= fbxgpio_core.o
+obj-$(CONFIG_FREEBOX_GPIO_DT)	+= fbxgpio_dt.o
diff -Nruw linux-4.4.115-fbx/drivers/fbxprocfs./fbxprocfs.c linux-4.4.115-fbx/drivers/fbxprocfs/fbxprocfs.c
--- linux-4.4.115-fbx/drivers/fbxprocfs./fbxprocfs.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/fbxprocfs/fbxprocfs.c	2019-01-22 16:16:23.183243509 +0100
@@ -0,0 +1,299 @@
+/*
+ * Freebox ProcFs interface
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/sizes.h>
+
+#include <linux/fbxprocfs.h>
+
+#define PFX	"fbxprocfs: "
+
+
+static struct list_head clients;
+static struct mutex clients_mutex;
+
+static struct proc_dir_entry *root;
+
+/*
+ * register  a  fbxprocfs client  with  given  dirname, caller  should
+ * consider returned struct opaque
+ */
+struct fbxprocfs_client *fbxprocfs_add_client(const char *dirname,
+					      struct module *owner)
+{
+	struct fbxprocfs_client *ret, *p;
+
+	ret = NULL;
+	mutex_lock(&clients_mutex);
+
+	/* check for duplicate */
+	list_for_each_entry(p, &clients, list) {
+		if (!strcmp(dirname, p->dirname))
+			goto out;
+	}
+
+	if (!(ret = kmalloc(sizeof (*ret), GFP_KERNEL))) {
+		printk(KERN_ERR PFX "kmalloc failed\n");
+		goto out;
+	}
+
+	/* try to create client directory */
+	if (!(ret->dir = proc_mkdir(dirname, root))) {
+		printk(KERN_ERR PFX "can't create %s dir\n", dirname);
+		kfree(ret);
+		ret = NULL;
+		goto out;
+	}
+
+	atomic_set(&ret->refcount, 1);
+	ret->dirname = dirname;
+	list_add(&ret->list, &clients);
+
+out:
+	mutex_unlock(&clients_mutex);
+	return ret;
+}
+
+/*
+ * unregister  a  fbxprocfs client, make sure usage count is zero
+ */
+int fbxprocfs_remove_client(struct fbxprocfs_client *client)
+{
+	int ret;
+
+	mutex_lock(&clients_mutex);
+
+	ret = 0;
+	if (atomic_read(&client->refcount) > 1) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	remove_proc_entry(client->dirname, root);
+	list_del(&client->list);
+	kfree(client);
+
+out:
+	mutex_unlock(&clients_mutex);
+	return ret;
+}
+
+/*
+ * remove given entries from client directory
+ */
+static int
+__remove_entries(struct fbxprocfs_client *client,
+		 const struct fbxprocfs_desc *ro_desc,
+		 const struct fbxprocfs_desc *rw_desc)
+{
+	int i;
+
+	for (i = 0; ro_desc && ro_desc[i].name; i++) {
+		remove_proc_entry(ro_desc[i].name, client->dir);
+		atomic_dec(&client->refcount);
+	}
+
+	for (i = 0; rw_desc && rw_desc[i].name; i++) {
+		remove_proc_entry(rw_desc[i].name, client->dir);
+		atomic_dec(&client->refcount);
+	}
+
+	return 0;
+}
+
+/*
+ * replacement for NULL rfunc.
+ */
+static int bad_rfunc(struct seq_file *m, void *ptr)
+{
+	return -EACCES;
+}
+
+/*
+ * fbxprocfs write path is now handled by seq_file code. this
+ * simplifies client code greatly.
+ */
+static int fbxprocfs_open(struct inode *inode, struct file *file)
+{
+	const struct fbxprocfs_desc *desc = PDE_DATA(inode);
+
+	return single_open(file, desc->rfunc ? desc->rfunc : bad_rfunc,
+			   (void*)desc->id);
+}
+
+/*
+ * no particular help from kernel in the write path, fetch user buffer
+ * in a kernel buffer and call write func.
+ */
+static ssize_t fbxprocfs_write(struct file *file, const char __user *ubuf,
+			       size_t len, loff_t *off)
+{
+	/*
+	 * get fbxprocfs desc via the proc_dir_entry in file inode
+	 */
+	struct fbxprocfs_desc *d = PDE_DATA(file_inode(file));
+	char *kbuf;
+	ssize_t ret;
+
+	/*
+	 * must have a wfunc callback.
+	 */
+	if (!d->wfunc)
+		return -EACCES;
+
+	/*
+	 * allow up to SZ_4K bytes to be written.
+	 */
+	if (len > SZ_4K)
+		return -EOVERFLOW;
+
+	/*
+	 * alloc and fetch kernel buffer containing user data.
+	 */
+	kbuf = kmalloc(SZ_4K, GFP_KERNEL);
+	if (!kbuf)
+		return -ENOMEM;
+
+	ret = -EFAULT;
+	if (copy_from_user(kbuf, ubuf, len))
+		goto kfree;
+
+	ret = d->wfunc(file, kbuf, len, (void*)d->id);
+
+kfree:
+	kfree(kbuf);
+	return ret;
+}
+
+/*
+ * fbxprocfs file operations, read stuff is handled by seq_file code.
+ */
+static const struct file_operations fbxprocfs_fops = {
+	.open		= fbxprocfs_open,
+	.llseek		= seq_lseek,
+	.read		= seq_read,
+	.release	= seq_release,
+	.write		= fbxprocfs_write,
+};
+
+/*
+ * replaces create_proc_read_entry removed in latest kernels.
+ */
+static struct proc_dir_entry *__create_proc_read_entry(
+				       const struct fbxprocfs_desc *desc,
+				       struct proc_dir_entry *base)
+{
+	return proc_create_data(desc->name, 0, base, &fbxprocfs_fops,
+				(void*)desc);
+}
+
+/*
+ * replaces create_proc_entry removed in latest kernels.
+ */
+static struct proc_dir_entry *__create_proc_entry(
+					const struct fbxprocfs_desc *desc,
+					struct proc_dir_entry *base)
+{
+	return proc_create_data(desc->name, S_IFREG | S_IWUSR | S_IRUGO,
+				base, &fbxprocfs_fops, (void*)desc);
+}
+
+/*
+ * create given entries in client directory
+ */
+static int
+__create_entries(struct fbxprocfs_client *client,
+		 const struct fbxprocfs_desc *ro_desc,
+		 const struct fbxprocfs_desc *rw_desc)
+{
+	struct proc_dir_entry	*proc;
+	int			i;
+
+	for (i = 0; ro_desc && ro_desc[i].name; i++) {
+		if (!(proc = __create_proc_read_entry(&ro_desc[i],
+						      client->dir))) {
+			printk(KERN_ERR PFX "can't create %s/%s entry\n",
+			       client->dirname, ro_desc[i].name);
+			goto err;
+		}
+		atomic_inc(&client->refcount);
+	}
+
+	for (i = 0; rw_desc && rw_desc[i].name; i++) {
+		if (!(proc = __create_proc_entry(&rw_desc[i], client->dir))) {
+			printk(KERN_ERR PFX "can't create %s/%s entry\n",
+			       client->dirname, ro_desc[i].name);
+			goto err;
+		}
+		atomic_inc(&client->refcount);
+	}
+
+	return 0;
+
+err:
+	__remove_entries(client, ro_desc, rw_desc);
+	return -1;
+}
+
+int
+fbxprocfs_create_entries(struct fbxprocfs_client *client,
+			 const struct fbxprocfs_desc *ro_desc,
+			 const struct fbxprocfs_desc *rw_desc)
+{
+	int	ret;
+
+	ret = __create_entries(client, ro_desc, rw_desc);
+	return ret;
+}
+
+int
+fbxprocfs_remove_entries(struct fbxprocfs_client *client,
+			 const struct fbxprocfs_desc *ro_desc,
+			 const struct fbxprocfs_desc *rw_desc)
+{
+	int	ret;
+
+	ret = __remove_entries(client, ro_desc, rw_desc);
+	return ret;
+}
+
+
+static int __init
+fbxprocfs_init(void)
+{
+	INIT_LIST_HEAD(&clients);
+	mutex_init(&clients_mutex);
+
+	/* create freebox directory */
+	if (!(root = proc_mkdir("freebox", NULL))) {
+		printk(KERN_ERR PFX "can't create freebox/ dir\n");
+		return -EIO;
+	}
+	return 0;
+}
+
+static void __exit
+fbxprocfs_exit(void)
+{
+	remove_proc_entry("freebox", NULL);
+}
+
+module_init(fbxprocfs_init);
+module_exit(fbxprocfs_exit);
+
+EXPORT_SYMBOL(fbxprocfs_create_entries);
+EXPORT_SYMBOL(fbxprocfs_remove_entries);
+EXPORT_SYMBOL(fbxprocfs_add_client);
+EXPORT_SYMBOL(fbxprocfs_remove_client);
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
+
diff -Nruw linux-4.4.115-fbx/drivers/fbxprocfs./Kconfig linux-4.4.115-fbx/drivers/fbxprocfs/Kconfig
--- linux-4.4.115-fbx/drivers/fbxprocfs./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/fbxprocfs/Kconfig	2019-01-22 16:16:23.183243509 +0100
@@ -0,0 +1,2 @@
+config FREEBOX_PROCFS
+	tristate "Freebox procfs interface"
diff -Nruw linux-4.4.115-fbx/drivers/fbxprocfs./Makefile linux-4.4.115-fbx/drivers/fbxprocfs/Makefile
--- linux-4.4.115-fbx/drivers/fbxprocfs./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/fbxprocfs/Makefile	2019-01-22 16:16:23.183243509 +0100
@@ -0,0 +1 @@
+obj-$(CONFIG_FREEBOX_PROCFS) += fbxprocfs.o
diff -Nruw linux-4.4.115-fbx/drivers/firmware/qcom./Kconfig linux-4.4.115-fbx/drivers/firmware/qcom/Kconfig
--- linux-4.4.115-fbx/drivers/firmware/qcom./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/firmware/qcom/Kconfig	2019-01-22 16:16:23.195243617 +0100
@@ -0,0 +1,7 @@
+config MSM_TZ_LOG
+        tristate "MSM Trust Zone (TZ) Log Driver"
+        depends on DEBUG_FS
+        help
+          This option enables a driver with a debugfs interface for messages
+          produced by the Secure code (Trust zone). These messages provide
+          diagnostic information about TZ operation.
diff -Nruw linux-4.4.115-fbx/drivers/firmware/qcom./Makefile linux-4.4.115-fbx/drivers/firmware/qcom/Makefile
--- linux-4.4.115-fbx/drivers/firmware/qcom./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/firmware/qcom/Makefile	2019-01-22 16:16:23.195243617 +0100
@@ -0,0 +1 @@
+obj-$(CONFIG_MSM_TZ_LOG) += tz_log.o
diff -Nruw linux-4.4.115-fbx/drivers/firmware/qcom./tz_log.c linux-4.4.115-fbx/drivers/firmware/qcom/tz_log.c
--- linux-4.4.115-fbx/drivers/firmware/qcom./tz_log.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/firmware/qcom/tz_log.c	2019-01-22 16:16:23.195243617 +0100
@@ -0,0 +1,1211 @@
+/* Copyright (c) 2011-2015,2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/msm_ion.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/of.h>
+
+#include <soc/qcom/scm.h>
+#include <soc/qcom/qseecomi.h>
+
+/* QSEE_LOG_BUF_SIZE = 32K */
+#define QSEE_LOG_BUF_SIZE 0x8000
+
+
+/* TZ Diagnostic Area legacy version number */
+#define TZBSP_DIAG_MAJOR_VERSION_LEGACY	2
+/*
+ * Preprocessor Definitions and Constants
+ */
+#define TZBSP_MAX_CPU_COUNT 0x08
+/*
+ * Number of VMID Tables
+ */
+#define TZBSP_DIAG_NUM_OF_VMID 16
+/*
+ * VMID Description length
+ */
+#define TZBSP_DIAG_VMID_DESC_LEN 7
+/*
+ * Number of Interrupts
+ */
+#define TZBSP_DIAG_INT_NUM  32
+/*
+ * Length of descriptive name associated with Interrupt
+ */
+#define TZBSP_MAX_INT_DESC 16
+/*
+ * TZ 3.X version info
+ */
+#define QSEE_VERSION_TZ_3_X 0x800000
+/*
+ * TZ 4.X version info
+ */
+#define QSEE_VERSION_TZ_4_X 0x1000000
+
+#define TZBSP_AES_256_ENCRYPTED_KEY_SIZE 256
+#define TZBSP_NONCE_LEN 12
+#define TZBSP_TAG_LEN 16
+
+/*
+ * VMID Table
+ */
+struct tzdbg_vmid_t {
+	uint8_t vmid; /* Virtual Machine Identifier */
+	uint8_t desc[TZBSP_DIAG_VMID_DESC_LEN];	/* ASCII Text */
+};
+/*
+ * Boot Info Table
+ */
+struct tzdbg_boot_info_t {
+	uint32_t wb_entry_cnt;	/* Warmboot entry CPU Counter */
+	uint32_t wb_exit_cnt;	/* Warmboot exit CPU Counter */
+	uint32_t pc_entry_cnt;	/* Power Collapse entry CPU Counter */
+	uint32_t pc_exit_cnt;	/* Power Collapse exit CPU counter */
+	uint32_t warm_jmp_addr;	/* Last Warmboot Jump Address */
+	uint32_t spare;	/* Reserved for future use. */
+};
+/*
+ * Boot Info Table for 64-bit
+ */
+struct tzdbg_boot_info64_t {
+	uint32_t wb_entry_cnt;  /* Warmboot entry CPU Counter */
+	uint32_t wb_exit_cnt;   /* Warmboot exit CPU Counter */
+	uint32_t pc_entry_cnt;  /* Power Collapse entry CPU Counter */
+	uint32_t pc_exit_cnt;   /* Power Collapse exit CPU counter */
+	uint32_t psci_entry_cnt;/* PSCI syscall entry CPU Counter */
+	uint32_t psci_exit_cnt;   /* PSCI syscall exit CPU Counter */
+	uint64_t warm_jmp_addr; /* Last Warmboot Jump Address */
+	uint32_t warm_jmp_instr; /* Last Warmboot Jump Address Instruction */
+};
+/*
+ * Reset Info Table
+ */
+struct tzdbg_reset_info_t {
+	uint32_t reset_type;	/* Reset Reason */
+	uint32_t reset_cnt;	/* Number of resets occured/CPU */
+};
+/*
+ * Interrupt Info Table
+ */
+struct tzdbg_int_t {
+	/*
+	 * Type of Interrupt/exception
+	 */
+	uint16_t int_info;
+	/*
+	 * Availability of the slot
+	 */
+	uint8_t avail;
+	/*
+	 * Reserved for future use
+	 */
+	uint8_t spare;
+	/*
+	 * Interrupt # for IRQ and FIQ
+	 */
+	uint32_t int_num;
+	/*
+	 * ASCII text describing type of interrupt e.g:
+	 * Secure Timer, EBI XPU. This string is always null terminated,
+	 * supporting at most TZBSP_MAX_INT_DESC characters.
+	 * Any additional characters are truncated.
+	 */
+	uint8_t int_desc[TZBSP_MAX_INT_DESC];
+	uint64_t int_count[TZBSP_MAX_CPU_COUNT]; /* # of times seen per CPU */
+};
+
+/*
+ * Interrupt Info Table used in tz version >=4.X
+ */
+struct tzdbg_int_t_tz40 {
+	uint16_t int_info;
+	uint8_t avail;
+	uint8_t spare;
+	uint32_t int_num;
+	uint8_t int_desc[TZBSP_MAX_INT_DESC];
+	uint32_t int_count[TZBSP_MAX_CPU_COUNT]; /* uint32_t in TZ ver >= 4.x*/
+};
+
+/* warm boot reason for cores */
+struct tzbsp_diag_wakeup_info_t {
+	/* Wake source info : APCS_GICC_HPPIR */
+	uint32_t HPPIR;
+	/* Wake source info : APCS_GICC_AHPPIR */
+	uint32_t AHPPIR;
+};
+
+/*
+ * Log ring buffer position
+ */
+struct tzdbg_log_pos_t {
+	uint16_t wrap;
+	uint16_t offset;
+};
+
+ /*
+ * Log ring buffer
+ */
+struct tzdbg_log_t {
+	struct tzdbg_log_pos_t	log_pos;
+	/* open ended array to the end of the 4K IMEM buffer */
+	uint8_t					log_buf[];
+};
+
+/*
+ * Diagnostic Table
+ * Note: This is the reference data structure for tz diagnostic table
+ * supporting TZBSP_MAX_CPU_COUNT, the real diagnostic data is directly
+ * copied into buffer from i/o memory.
+ */
+struct tzdbg_t {
+	uint32_t magic_num;
+	uint32_t version;
+	/*
+	 * Number of CPU's
+	 */
+	uint32_t cpu_count;
+	/*
+	 * Offset of VMID Table
+	 */
+	uint32_t vmid_info_off;
+	/*
+	 * Offset of Boot Table
+	 */
+	uint32_t boot_info_off;
+	/*
+	 * Offset of Reset info Table
+	 */
+	uint32_t reset_info_off;
+	/*
+	 * Offset of Interrupt info Table
+	 */
+	uint32_t int_info_off;
+	/*
+	 * Ring Buffer Offset
+	 */
+	uint32_t ring_off;
+	/*
+	 * Ring Buffer Length
+	 */
+	uint32_t ring_len;
+
+	/* Offset for Wakeup info */
+	uint32_t wakeup_info_off;
+
+	/*
+	 * VMID to EE Mapping
+	 */
+	struct tzdbg_vmid_t vmid_info[TZBSP_DIAG_NUM_OF_VMID];
+	/*
+	 * Boot Info
+	 */
+	struct tzdbg_boot_info_t  boot_info[TZBSP_MAX_CPU_COUNT];
+	/*
+	 * Reset Info
+	 */
+	struct tzdbg_reset_info_t reset_info[TZBSP_MAX_CPU_COUNT];
+	uint32_t num_interrupts;
+	struct tzdbg_int_t  int_info[TZBSP_DIAG_INT_NUM];
+
+	/* Wake up info */
+	struct tzbsp_diag_wakeup_info_t  wakeup_info[TZBSP_MAX_CPU_COUNT];
+
+	uint8_t key[TZBSP_AES_256_ENCRYPTED_KEY_SIZE];
+
+	uint8_t nonce[TZBSP_NONCE_LEN];
+
+	uint8_t tag[TZBSP_TAG_LEN];
+
+	/*
+	 * We need at least 2K for the ring buffer
+	 */
+	struct tzdbg_log_t ring_buffer;	/* TZ Ring Buffer */
+};
+
+struct hypdbg_log_pos_t {
+	uint16_t wrap;
+	uint16_t offset;
+};
+
+struct hypdbg_boot_info_t {
+	uint32_t warm_entry_cnt;
+	uint32_t warm_exit_cnt;
+};
+
+struct hypdbg_t {
+	/* Magic Number */
+	uint32_t magic_num;
+
+	/* Number of CPU's */
+	uint32_t cpu_count;
+
+	/* Ring Buffer Offset */
+	uint32_t ring_off;
+
+	/* Ring buffer position mgmt */
+	struct hypdbg_log_pos_t log_pos;
+	uint32_t log_len;
+
+	/* S2 fault numbers */
+	uint32_t s2_fault_counter;
+
+	/* Boot Info */
+	struct hypdbg_boot_info_t boot_info[TZBSP_MAX_CPU_COUNT];
+
+	/* Ring buffer pointer */
+	uint8_t log_buf_p[];
+};
+
+/*
+ * Enumeration order for VMID's
+ */
+enum tzdbg_stats_type {
+	TZDBG_BOOT = 0,
+	TZDBG_RESET,
+	TZDBG_INTERRUPT,
+	TZDBG_VMID,
+	TZDBG_GENERAL,
+	TZDBG_LOG,
+	TZDBG_QSEE_LOG,
+	TZDBG_HYP_GENERAL,
+	TZDBG_HYP_LOG,
+	TZDBG_STATS_MAX
+};
+
+struct tzdbg_stat {
+	char *name;
+	char *data;
+};
+
+struct tzdbg {
+	void __iomem *virt_iobase;
+	void __iomem *hyp_virt_iobase;
+	struct tzdbg_t *diag_buf;
+	struct hypdbg_t *hyp_diag_buf;
+	char *disp_buf;
+	int debug_tz[TZDBG_STATS_MAX];
+	struct tzdbg_stat stat[TZDBG_STATS_MAX];
+	uint32_t hyp_debug_rw_buf_size;
+	bool is_hyplog_enabled;
+	uint32_t tz_version;
+};
+
+static struct tzdbg tzdbg = {
+	.stat[TZDBG_BOOT].name = "boot",
+	.stat[TZDBG_RESET].name = "reset",
+	.stat[TZDBG_INTERRUPT].name = "interrupt",
+	.stat[TZDBG_VMID].name = "vmid",
+	.stat[TZDBG_GENERAL].name = "general",
+	.stat[TZDBG_LOG].name = "log",
+	.stat[TZDBG_QSEE_LOG].name = "qsee_log",
+	.stat[TZDBG_HYP_GENERAL].name = "hyp_general",
+	.stat[TZDBG_HYP_LOG].name = "hyp_log",
+};
+
+static struct tzdbg_log_t *g_qsee_log;
+static uint32_t debug_rw_buf_size;
+
+/*
+ * Debugfs data structure and functions
+ */
+
+static int _disp_tz_general_stats(void)
+{
+	int len = 0;
+
+	len += snprintf(tzdbg.disp_buf + len, debug_rw_buf_size - 1,
+			"   Version        : 0x%x\n"
+			"   Magic Number   : 0x%x\n"
+			"   Number of CPU  : %d\n",
+			tzdbg.diag_buf->version,
+			tzdbg.diag_buf->magic_num,
+			tzdbg.diag_buf->cpu_count);
+	tzdbg.stat[TZDBG_GENERAL].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int _disp_tz_vmid_stats(void)
+{
+	int i, num_vmid;
+	int len = 0;
+	struct tzdbg_vmid_t *ptr;
+
+	ptr = (struct tzdbg_vmid_t *)((unsigned char *)tzdbg.diag_buf +
+					tzdbg.diag_buf->vmid_info_off);
+	num_vmid = ((tzdbg.diag_buf->boot_info_off -
+				tzdbg.diag_buf->vmid_info_off)/
+					(sizeof(struct tzdbg_vmid_t)));
+
+	for (i = 0; i < num_vmid; i++) {
+		if (ptr->vmid < 0xFF) {
+			len += snprintf(tzdbg.disp_buf + len,
+				(debug_rw_buf_size - 1) - len,
+				"   0x%x        %s\n",
+				(uint32_t)ptr->vmid, (uint8_t *)ptr->desc);
+		}
+		if (len > (debug_rw_buf_size - 1)) {
+			pr_warn("%s: Cannot fit all info into the buffer\n",
+								__func__);
+			break;
+		}
+		ptr++;
+	}
+
+	tzdbg.stat[TZDBG_VMID].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int _disp_tz_boot_stats(void)
+{
+	int i;
+	int len = 0;
+	struct tzdbg_boot_info_t *ptr = NULL;
+	struct tzdbg_boot_info64_t *ptr_64 = NULL;
+
+	pr_info("qsee_version = 0x%x\n", tzdbg.tz_version);
+	if (tzdbg.tz_version >= QSEE_VERSION_TZ_3_X) {
+		ptr_64 = (struct tzdbg_boot_info64_t *)((unsigned char *)
+			tzdbg.diag_buf + tzdbg.diag_buf->boot_info_off);
+	} else {
+		ptr = (struct tzdbg_boot_info_t *)((unsigned char *)
+			tzdbg.diag_buf + tzdbg.diag_buf->boot_info_off);
+	}
+
+	for (i = 0; i < tzdbg.diag_buf->cpu_count; i++) {
+		if (tzdbg.tz_version >= QSEE_VERSION_TZ_3_X) {
+			len += snprintf(tzdbg.disp_buf + len,
+					(debug_rw_buf_size - 1) - len,
+					"  CPU #: %d\n"
+					"     Warmboot jump address : 0x%llx\n"
+					"     Warmboot entry CPU counter : 0x%x\n"
+					"     Warmboot exit CPU counter : 0x%x\n"
+					"     Power Collapse entry CPU counter : 0x%x\n"
+					"     Power Collapse exit CPU counter : 0x%x\n"
+					"     Psci entry CPU counter : 0x%x\n"
+					"     Psci exit CPU counter : 0x%x\n"
+					"     Warmboot Jump Address Instruction : 0x%x\n",
+					i, (uint64_t)ptr_64->warm_jmp_addr,
+					ptr_64->wb_entry_cnt,
+					ptr_64->wb_exit_cnt,
+					ptr_64->pc_entry_cnt,
+					ptr_64->pc_exit_cnt,
+					ptr_64->psci_entry_cnt,
+					ptr_64->psci_exit_cnt,
+					ptr_64->warm_jmp_instr);
+
+			if (len > (debug_rw_buf_size - 1)) {
+				pr_warn("%s: Cannot fit all info into the buffer\n",
+						__func__);
+				break;
+			}
+			ptr_64++;
+		} else {
+			len += snprintf(tzdbg.disp_buf + len,
+					(debug_rw_buf_size - 1) - len,
+					"  CPU #: %d\n"
+					"     Warmboot jump address     : 0x%x\n"
+					"     Warmboot entry CPU counter: 0x%x\n"
+					"     Warmboot exit CPU counter : 0x%x\n"
+					"     Power Collapse entry CPU counter: 0x%x\n"
+					"     Power Collapse exit CPU counter : 0x%x\n",
+					i, ptr->warm_jmp_addr,
+					ptr->wb_entry_cnt,
+					ptr->wb_exit_cnt,
+					ptr->pc_entry_cnt,
+					ptr->pc_exit_cnt);
+
+			if (len > (debug_rw_buf_size - 1)) {
+				pr_warn("%s: Cannot fit all info into the buffer\n",
+						__func__);
+				break;
+			}
+			ptr++;
+		}
+	}
+	tzdbg.stat[TZDBG_BOOT].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int _disp_tz_reset_stats(void)
+{
+	int i;
+	int len = 0;
+	struct tzdbg_reset_info_t *ptr;
+
+	ptr = (struct tzdbg_reset_info_t *)((unsigned char *)tzdbg.diag_buf +
+					tzdbg.diag_buf->reset_info_off);
+
+	for (i = 0; i < tzdbg.diag_buf->cpu_count; i++) {
+		len += snprintf(tzdbg.disp_buf + len,
+				(debug_rw_buf_size - 1) - len,
+				"  CPU #: %d\n"
+				"     Reset Type (reason)       : 0x%x\n"
+				"     Reset counter             : 0x%x\n",
+				i, ptr->reset_type, ptr->reset_cnt);
+
+		if (len > (debug_rw_buf_size - 1)) {
+			pr_warn("%s: Cannot fit all info into the buffer\n",
+								__func__);
+			break;
+		}
+
+		ptr++;
+	}
+	tzdbg.stat[TZDBG_RESET].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int _disp_tz_interrupt_stats(void)
+{
+	int i, j;
+	int len = 0;
+	int *num_int;
+	void *ptr;
+	struct tzdbg_int_t *tzdbg_ptr;
+	struct tzdbg_int_t_tz40 *tzdbg_ptr_tz40;
+
+	num_int = (uint32_t *)((unsigned char *)tzdbg.diag_buf +
+			(tzdbg.diag_buf->int_info_off - sizeof(uint32_t)));
+	ptr = ((unsigned char *)tzdbg.diag_buf +
+					tzdbg.diag_buf->int_info_off);
+
+	pr_info("qsee_version = 0x%x\n", tzdbg.tz_version);
+
+	if (tzdbg.tz_version < QSEE_VERSION_TZ_4_X) {
+		tzdbg_ptr = ptr;
+		for (i = 0; i < (*num_int); i++) {
+			len += snprintf(tzdbg.disp_buf + len,
+				(debug_rw_buf_size - 1) - len,
+				"     Interrupt Number          : 0x%x\n"
+				"     Type of Interrupt         : 0x%x\n"
+				"     Description of interrupt  : %s\n",
+				tzdbg_ptr->int_num,
+				(uint32_t)tzdbg_ptr->int_info,
+				(uint8_t *)tzdbg_ptr->int_desc);
+			for (j = 0; j < tzdbg.diag_buf->cpu_count; j++) {
+				len += snprintf(tzdbg.disp_buf + len,
+				(debug_rw_buf_size - 1) - len,
+				"     int_count on CPU # %d      : %u\n",
+				(uint32_t)j,
+				(uint32_t)tzdbg_ptr->int_count[j]);
+			}
+			len += snprintf(tzdbg.disp_buf + len,
+					debug_rw_buf_size - 1, "\n");
+
+			if (len > (debug_rw_buf_size - 1)) {
+				pr_warn("%s: Cannot fit all info into buf\n",
+								__func__);
+				break;
+			}
+			tzdbg_ptr++;
+		}
+	} else {
+		tzdbg_ptr_tz40 = ptr;
+		for (i = 0; i < (*num_int); i++) {
+			len += snprintf(tzdbg.disp_buf + len,
+				(debug_rw_buf_size - 1) - len,
+				"     Interrupt Number          : 0x%x\n"
+				"     Type of Interrupt         : 0x%x\n"
+				"     Description of interrupt  : %s\n",
+				tzdbg_ptr_tz40->int_num,
+				(uint32_t)tzdbg_ptr_tz40->int_info,
+				(uint8_t *)tzdbg_ptr_tz40->int_desc);
+			for (j = 0; j < tzdbg.diag_buf->cpu_count; j++) {
+				len += snprintf(tzdbg.disp_buf + len,
+				(debug_rw_buf_size - 1) - len,
+				"     int_count on CPU # %d      : %u\n",
+				(uint32_t)j,
+				(uint32_t)tzdbg_ptr_tz40->int_count[j]);
+			}
+			len += snprintf(tzdbg.disp_buf + len,
+					debug_rw_buf_size - 1, "\n");
+
+			if (len > (debug_rw_buf_size - 1)) {
+				pr_warn("%s: Cannot fit all info into buf\n",
+								__func__);
+				break;
+			}
+			tzdbg_ptr_tz40++;
+		}
+	}
+
+	tzdbg.stat[TZDBG_INTERRUPT].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int _disp_tz_log_stats_legacy(void)
+{
+	int len = 0;
+	unsigned char *ptr;
+
+	ptr = (unsigned char *)tzdbg.diag_buf +
+					tzdbg.diag_buf->ring_off;
+	len += snprintf(tzdbg.disp_buf, (debug_rw_buf_size - 1) - len,
+							"%s\n", ptr);
+
+	tzdbg.stat[TZDBG_LOG].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int _disp_log_stats(struct tzdbg_log_t *log,
+			struct tzdbg_log_pos_t *log_start, uint32_t log_len,
+			size_t count, uint32_t buf_idx)
+{
+	uint32_t wrap_start;
+	uint32_t wrap_end;
+	uint32_t wrap_cnt;
+	int max_len;
+	int len = 0;
+	int i = 0;
+
+	wrap_start = log_start->wrap;
+	wrap_end = log->log_pos.wrap;
+
+	/* Calculate difference in # of buffer wrap-arounds */
+	if (wrap_end >= wrap_start) {
+		wrap_cnt = wrap_end - wrap_start;
+	} else {
+		/* wrap counter has wrapped around, invalidate start position */
+		wrap_cnt = 2;
+	}
+
+	if (wrap_cnt > 1) {
+		/* end position has wrapped around more than once, */
+		/* current start no longer valid                   */
+		log_start->wrap = log->log_pos.wrap - 1;
+		log_start->offset = (log->log_pos.offset + 1) % log_len;
+	} else if ((wrap_cnt == 1) &&
+		(log->log_pos.offset > log_start->offset)) {
+		/* end position has overwritten start */
+		log_start->offset = (log->log_pos.offset + 1) % log_len;
+	}
+
+	while (log_start->offset == log->log_pos.offset) {
+		/*
+		 * No data in ring buffer,
+		 * so we'll hang around until something happens
+		 */
+		unsigned long t = msleep_interruptible(50);
+		if (t != 0) {
+			/* Some event woke us up, so let's quit */
+			return 0;
+		}
+
+		if (buf_idx == TZDBG_LOG)
+			memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase,
+						debug_rw_buf_size);
+
+	}
+
+	max_len = (count > debug_rw_buf_size) ? debug_rw_buf_size : count;
+
+	/*
+	 *  Read from ring buff while there is data and space in return buff
+	 */
+	while ((log_start->offset != log->log_pos.offset) && (len < max_len)) {
+		tzdbg.disp_buf[i++] = log->log_buf[log_start->offset];
+		log_start->offset = (log_start->offset + 1) % log_len;
+		if (0 == log_start->offset)
+			++log_start->wrap;
+		++len;
+	}
+
+	/*
+	 * return buffer to caller
+	 */
+	tzdbg.stat[buf_idx].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int __disp_hyp_log_stats(uint8_t *log,
+			struct hypdbg_log_pos_t *log_start, uint32_t log_len,
+			size_t count, uint32_t buf_idx)
+{
+	struct hypdbg_t *hyp = tzdbg.hyp_diag_buf;
+	unsigned long t = 0;
+	uint32_t wrap_start;
+	uint32_t wrap_end;
+	uint32_t wrap_cnt;
+	int max_len;
+	int len = 0;
+	int i = 0;
+
+	wrap_start = log_start->wrap;
+	wrap_end = hyp->log_pos.wrap;
+
+	/* Calculate difference in # of buffer wrap-arounds */
+	if (wrap_end >= wrap_start) {
+		wrap_cnt = wrap_end - wrap_start;
+	} else {
+		/* wrap counter has wrapped around, invalidate start position */
+		wrap_cnt = 2;
+	}
+
+	if (wrap_cnt > 1) {
+		/* end position has wrapped around more than once, */
+		/* current start no longer valid                   */
+		log_start->wrap = hyp->log_pos.wrap - 1;
+		log_start->offset = (hyp->log_pos.offset + 1) % log_len;
+	} else if ((wrap_cnt == 1) &&
+		(hyp->log_pos.offset > log_start->offset)) {
+		/* end position has overwritten start */
+		log_start->offset = (hyp->log_pos.offset + 1) % log_len;
+	}
+
+	while (log_start->offset == hyp->log_pos.offset) {
+		/*
+		 * No data in ring buffer,
+		 * so we'll hang around until something happens
+		 */
+		t = msleep_interruptible(50);
+		if (t != 0) {
+			/* Some event woke us up, so let's quit */
+			return 0;
+		}
+
+		/* TZDBG_HYP_LOG */
+		memcpy_fromio((void *)tzdbg.hyp_diag_buf, tzdbg.hyp_virt_iobase,
+						tzdbg.hyp_debug_rw_buf_size);
+	}
+
+	max_len = (count > tzdbg.hyp_debug_rw_buf_size) ?
+				tzdbg.hyp_debug_rw_buf_size : count;
+
+	/*
+	 *  Read from ring buff while there is data and space in return buff
+	 */
+	while ((log_start->offset != hyp->log_pos.offset) && (len < max_len)) {
+		tzdbg.disp_buf[i++] = log[log_start->offset];
+		log_start->offset = (log_start->offset + 1) % log_len;
+		if (0 == log_start->offset)
+			++log_start->wrap;
+		++len;
+	}
+
+	/*
+	 * return buffer to caller
+	 */
+	tzdbg.stat[buf_idx].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int _disp_tz_log_stats(size_t count)
+{
+	static struct tzdbg_log_pos_t log_start = {0};
+	struct tzdbg_log_t *log_ptr;
+	log_ptr = (struct tzdbg_log_t *)((unsigned char *)tzdbg.diag_buf +
+				tzdbg.diag_buf->ring_off -
+				offsetof(struct tzdbg_log_t, log_buf));
+
+	return _disp_log_stats(log_ptr, &log_start,
+				tzdbg.diag_buf->ring_len, count, TZDBG_LOG);
+}
+
+static int _disp_hyp_log_stats(size_t count)
+{
+	static struct hypdbg_log_pos_t log_start = {0};
+	uint8_t *log_ptr;
+
+	log_ptr = (uint8_t *)((unsigned char *)tzdbg.hyp_diag_buf +
+				tzdbg.hyp_diag_buf->ring_off);
+
+	return __disp_hyp_log_stats(log_ptr, &log_start,
+			tzdbg.hyp_debug_rw_buf_size, count, TZDBG_HYP_LOG);
+}
+
+static int _disp_qsee_log_stats(size_t count)
+{
+	static struct tzdbg_log_pos_t log_start = {0};
+
+	return _disp_log_stats(g_qsee_log, &log_start,
+			QSEE_LOG_BUF_SIZE - sizeof(struct tzdbg_log_pos_t),
+			count, TZDBG_QSEE_LOG);
+}
+
+static int _disp_hyp_general_stats(size_t count)
+{
+	int len = 0;
+	int i;
+	struct hypdbg_boot_info_t *ptr = NULL;
+
+	len += snprintf((unsigned char *)tzdbg.disp_buf + len,
+			tzdbg.hyp_debug_rw_buf_size - 1,
+			"   Magic Number    : 0x%x\n"
+			"   CPU Count       : 0x%x\n"
+			"   S2 Fault Counter: 0x%x\n",
+			tzdbg.hyp_diag_buf->magic_num,
+			tzdbg.hyp_diag_buf->cpu_count,
+			tzdbg.hyp_diag_buf->s2_fault_counter);
+
+	ptr = tzdbg.hyp_diag_buf->boot_info;
+	for (i = 0; i < tzdbg.hyp_diag_buf->cpu_count; i++) {
+		len += snprintf((unsigned char *)tzdbg.disp_buf + len,
+				(tzdbg.hyp_debug_rw_buf_size - 1) - len,
+				"  CPU #: %d\n"
+				"     Warmboot entry CPU counter: 0x%x\n"
+				"     Warmboot exit CPU counter : 0x%x\n",
+				i, ptr->warm_entry_cnt, ptr->warm_exit_cnt);
+
+		if (len > (tzdbg.hyp_debug_rw_buf_size - 1)) {
+			pr_warn("%s: Cannot fit all info into the buffer\n",
+								__func__);
+			break;
+		}
+		ptr++;
+	}
+
+	tzdbg.stat[TZDBG_HYP_GENERAL].data = (char *)tzdbg.disp_buf;
+	return len;
+}
+
+static ssize_t tzdbgfs_read(struct file *file, char __user *buf,
+	size_t count, loff_t *offp)
+{
+	int len = 0;
+	int *tz_id =  file->private_data;
+
+	if (*tz_id == TZDBG_BOOT || *tz_id == TZDBG_RESET ||
+		*tz_id == TZDBG_INTERRUPT || *tz_id == TZDBG_GENERAL ||
+		*tz_id == TZDBG_VMID || *tz_id == TZDBG_LOG)
+		memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase,
+						debug_rw_buf_size);
+
+	if (*tz_id == TZDBG_HYP_GENERAL || *tz_id == TZDBG_HYP_LOG)
+		memcpy_fromio((void *)tzdbg.hyp_diag_buf, tzdbg.hyp_virt_iobase,
+					tzdbg.hyp_debug_rw_buf_size);
+
+	switch (*tz_id) {
+	case TZDBG_BOOT:
+		len = _disp_tz_boot_stats();
+		break;
+	case TZDBG_RESET:
+		len = _disp_tz_reset_stats();
+		break;
+	case TZDBG_INTERRUPT:
+		len = _disp_tz_interrupt_stats();
+		break;
+	case TZDBG_GENERAL:
+		len = _disp_tz_general_stats();
+		break;
+	case TZDBG_VMID:
+		len = _disp_tz_vmid_stats();
+		break;
+	case TZDBG_LOG:
+		if (TZBSP_DIAG_MAJOR_VERSION_LEGACY <
+				(tzdbg.diag_buf->version >> 16)) {
+			len = _disp_tz_log_stats(count);
+			*offp = 0;
+		} else {
+			len = _disp_tz_log_stats_legacy();
+		}
+		break;
+	case TZDBG_QSEE_LOG:
+		len = _disp_qsee_log_stats(count);
+		*offp = 0;
+		break;
+	case TZDBG_HYP_GENERAL:
+		len = _disp_hyp_general_stats(count);
+		break;
+	case TZDBG_HYP_LOG:
+		len = _disp_hyp_log_stats(count);
+		*offp = 0;
+		break;
+	default:
+		break;
+	}
+
+	if (len > count)
+		len = count;
+
+	return simple_read_from_buffer(buf, len, offp,
+				tzdbg.stat[(*tz_id)].data, len);
+}
+
+static int tzdbgfs_open(struct inode *inode, struct file *pfile)
+{
+	pfile->private_data = inode->i_private;
+	return 0;
+}
+
+const struct file_operations tzdbg_fops = {
+	.owner   = THIS_MODULE,
+	.read    = tzdbgfs_read,
+	.open    = tzdbgfs_open,
+};
+
+static struct ion_client  *g_ion_clnt;
+static struct ion_handle *g_ihandle;
+
+/*
+ * Allocates log buffer from ION, registers the buffer at TZ
+ */
+static void tzdbg_register_qsee_log_buf(void)
+{
+	/* register log buffer scm request */
+	struct qseecom_reg_log_buf_ireq req;
+
+	/* scm response */
+	struct qseecom_command_scm_resp resp = {};
+	ion_phys_addr_t pa = 0;
+	size_t len;
+	int ret = 0;
+
+	/* Create ION msm client */
+	g_ion_clnt = msm_ion_client_create("qsee_log");
+	if (g_ion_clnt == NULL) {
+		pr_err("%s: Ion client cannot be created\n", __func__);
+		return;
+	}
+
+	g_ihandle = ion_alloc(g_ion_clnt, QSEE_LOG_BUF_SIZE,
+			4096, ION_HEAP(ION_QSECOM_HEAP_ID), 0);
+	if (IS_ERR_OR_NULL(g_ihandle)) {
+		pr_err("%s: Ion client could not retrieve the handle\n",
+			__func__);
+		goto err1;
+	}
+
+	ret = ion_phys(g_ion_clnt, g_ihandle, &pa, &len);
+	if (ret) {
+		pr_err("%s: Ion conversion to physical address failed\n",
+			__func__);
+		goto err2;
+	}
+
+	req.qsee_cmd_id = QSEOS_REGISTER_LOG_BUF_COMMAND;
+	req.phy_addr = (uint32_t)pa;
+	req.len = len;
+
+	if (!is_scm_armv8()) {
+		/*  SCM_CALL  to register the log buffer */
+		ret = scm_call(SCM_SVC_TZSCHEDULER, 1,  &req, sizeof(req),
+			&resp, sizeof(resp));
+	} else {
+		struct scm_desc desc = {0};
+		desc.args[0] = pa;
+		desc.args[1] = len;
+		desc.arginfo = 0x22;
+		ret = scm_call2(SCM_QSEEOS_FNID(1, 6), &desc);
+		resp.result = desc.ret[0];
+	}
+
+	if (ret) {
+		pr_err("%s: scm_call to register log buffer failed\n",
+			__func__);
+		goto err2;
+	}
+
+	if (resp.result != QSEOS_RESULT_SUCCESS) {
+		pr_err(
+		"%s: scm_call to register log buf failed, resp result =%d\n",
+		__func__, resp.result);
+		goto err2;
+	}
+
+	g_qsee_log =
+		(struct tzdbg_log_t *)ion_map_kernel(g_ion_clnt, g_ihandle);
+
+	if (IS_ERR(g_qsee_log)) {
+		pr_err("%s: Couldn't map ion buffer to kernel\n",
+			__func__);
+		goto err2;
+	}
+
+	g_qsee_log->log_pos.wrap = g_qsee_log->log_pos.offset = 0;
+	return;
+
+err2:
+	ion_free(g_ion_clnt, g_ihandle);
+	g_ihandle = NULL;
+err1:
+	ion_client_destroy(g_ion_clnt);
+	g_ion_clnt = NULL;
+}
+
+static int  tzdbgfs_init(struct platform_device *pdev)
+{
+	int rc = 0;
+	int i;
+	struct dentry           *dent_dir;
+	struct dentry           *dent;
+
+	dent_dir = debugfs_create_dir("tzdbg", NULL);
+	if (dent_dir == NULL) {
+		dev_err(&pdev->dev, "tzdbg debugfs_create_dir failed\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < TZDBG_STATS_MAX; i++) {
+		tzdbg.debug_tz[i] = i;
+		dent = debugfs_create_file(tzdbg.stat[i].name,
+				S_IRUGO, dent_dir,
+				&tzdbg.debug_tz[i], &tzdbg_fops);
+		if (dent == NULL) {
+			dev_err(&pdev->dev, "TZ debugfs_create_file failed\n");
+			rc = -ENOMEM;
+			goto err;
+		}
+	}
+	tzdbg.disp_buf = kzalloc(max(debug_rw_buf_size,
+			tzdbg.hyp_debug_rw_buf_size), GFP_KERNEL);
+	if (tzdbg.disp_buf == NULL) {
+		pr_err("%s: Can't Allocate memory for tzdbg.disp_buf\n",
+			__func__);
+
+		goto err;
+	}
+	platform_set_drvdata(pdev, dent_dir);
+	return 0;
+err:
+	debugfs_remove_recursive(dent_dir);
+
+	return rc;
+}
+
+static void tzdbgfs_exit(struct platform_device *pdev)
+{
+	struct dentry           *dent_dir;
+
+	kzfree(tzdbg.disp_buf);
+	dent_dir = platform_get_drvdata(pdev);
+	debugfs_remove_recursive(dent_dir);
+	if (g_ion_clnt != NULL) {
+		if (!IS_ERR_OR_NULL(g_ihandle)) {
+			ion_unmap_kernel(g_ion_clnt, g_ihandle);
+			ion_free(g_ion_clnt, g_ihandle);
+		}
+		ion_client_destroy(g_ion_clnt);
+	}
+}
+
+static int __update_hypdbg_base(struct platform_device *pdev,
+			void __iomem *virt_iobase)
+{
+	phys_addr_t hypdiag_phy_iobase;
+	uint32_t hyp_address_offset;
+	uint32_t hyp_size_offset;
+	struct hypdbg_t *hyp;
+	uint32_t *ptr = NULL;
+
+	if (of_property_read_u32((&pdev->dev)->of_node, "hyplog-address-offset",
+							&hyp_address_offset)) {
+		dev_err(&pdev->dev, "hyplog address offset is not defined\n");
+		return -EINVAL;
+	}
+	if (of_property_read_u32((&pdev->dev)->of_node, "hyplog-size-offset",
+							&hyp_size_offset)) {
+		dev_err(&pdev->dev, "hyplog size offset is not defined\n");
+		return -EINVAL;
+	}
+
+	hypdiag_phy_iobase = readl_relaxed(virt_iobase + hyp_address_offset);
+	tzdbg.hyp_debug_rw_buf_size = readl_relaxed(virt_iobase +
+					hyp_size_offset);
+
+	tzdbg.hyp_virt_iobase = devm_ioremap_nocache(&pdev->dev,
+					hypdiag_phy_iobase,
+					tzdbg.hyp_debug_rw_buf_size);
+	if (!tzdbg.hyp_virt_iobase) {
+		dev_err(&pdev->dev, "ERROR could not ioremap: start=%pr, len=%u\n",
+			&hypdiag_phy_iobase, tzdbg.hyp_debug_rw_buf_size);
+		return -ENXIO;
+	}
+
+	ptr = kzalloc(tzdbg.hyp_debug_rw_buf_size, GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+
+	tzdbg.hyp_diag_buf = (struct hypdbg_t *)ptr;
+	hyp = tzdbg.hyp_diag_buf;
+	hyp->log_pos.wrap = hyp->log_pos.offset = 0;
+	return 0;
+}
+
+static void tzdbg_get_tz_version(void)
+{
+	uint32_t smc_id = 0;
+	uint32_t feature = 10;
+	struct qseecom_command_scm_resp resp = {0};
+	struct scm_desc desc = {0};
+	int ret = 0;
+
+	if (!is_scm_armv8()) {
+		ret = scm_call(SCM_SVC_INFO, SCM_SVC_UTIL,  &feature,
+					sizeof(feature), &resp, sizeof(resp));
+	} else {
+		smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
+		desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
+		desc.args[0] = feature;
+		ret = scm_call2(smc_id, &desc);
+		resp.result = desc.ret[0];
+	}
+
+	if (ret)
+		pr_err("%s: scm_call to get tz version failed\n",
+				__func__);
+	else
+		tzdbg.tz_version = resp.result;
+
+}
+
+/*
+ * Driver functions
+ */
+static int tz_log_probe(struct platform_device *pdev)
+{
+	struct resource *resource;
+	void __iomem *virt_iobase;
+	phys_addr_t tzdiag_phy_iobase;
+	uint32_t *ptr = NULL;
+	int ret = 0;
+
+	/*
+	 * Get address that stores the physical location diagnostic data
+	 */
+	resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!resource) {
+		dev_err(&pdev->dev,
+				"%s: ERROR Missing MEM resource\n", __func__);
+		return -ENXIO;
+	};
+
+	/*
+	 * Get the debug buffer size
+	 */
+	debug_rw_buf_size = resource->end - resource->start + 1;
+
+	/*
+	 * Map address that stores the physical location diagnostic data
+	 */
+	virt_iobase = devm_ioremap_nocache(&pdev->dev, resource->start,
+				debug_rw_buf_size);
+	if (!virt_iobase) {
+		dev_err(&pdev->dev,
+			"%s: ERROR could not ioremap: start=%pr, len=%u\n",
+			__func__, &resource->start,
+			(unsigned int)(debug_rw_buf_size));
+		return -ENXIO;
+	}
+
+	if (pdev->dev.of_node) {
+		tzdbg.is_hyplog_enabled = of_property_read_bool(
+			(&pdev->dev)->of_node, "qcom,hyplog-enabled");
+		if (tzdbg.is_hyplog_enabled) {
+			ret = __update_hypdbg_base(pdev, virt_iobase);
+			if (ret) {
+				dev_err(&pdev->dev, "%s() failed to get device tree data ret = %d\n",
+						__func__, ret);
+				return -EINVAL;
+			}
+		} else {
+			dev_info(&pdev->dev, "Hyp log service is not supported\n");
+		}
+	} else {
+		dev_dbg(&pdev->dev, "Device tree data is not found\n");
+	}
+
+	/*
+	 * Retrieve the address of diagnostic data
+	 */
+	tzdiag_phy_iobase = readl_relaxed(virt_iobase);
+
+	/*
+	 * Map the diagnostic information area
+	 */
+	tzdbg.virt_iobase = devm_ioremap_nocache(&pdev->dev,
+				tzdiag_phy_iobase, debug_rw_buf_size);
+
+	if (!tzdbg.virt_iobase) {
+		dev_err(&pdev->dev,
+			"%s: ERROR could not ioremap: start=%pr, len=%u\n",
+			__func__, &tzdiag_phy_iobase,
+			debug_rw_buf_size);
+		return -ENXIO;
+	}
+
+	ptr = kzalloc(debug_rw_buf_size, GFP_KERNEL);
+	if (ptr == NULL) {
+		pr_err("%s: Can't Allocate memory: ptr\n",
+			__func__);
+		return -ENXIO;
+	}
+
+	tzdbg.diag_buf = (struct tzdbg_t *)ptr;
+
+	if (tzdbgfs_init(pdev))
+		goto err;
+
+	tzdbg_register_qsee_log_buf();
+
+	tzdbg_get_tz_version();
+
+	return 0;
+err:
+	kfree(tzdbg.diag_buf);
+	return -ENXIO;
+}
+
+
+static int tz_log_remove(struct platform_device *pdev)
+{
+	kzfree(tzdbg.diag_buf);
+	if (tzdbg.hyp_diag_buf)
+		kzfree(tzdbg.hyp_diag_buf);
+	tzdbgfs_exit(pdev);
+
+	return 0;
+}
+
+static struct of_device_id tzlog_match[] = {
+	{	.compatible = "qcom,tz-log",
+	},
+	{}
+};
+
+static struct platform_driver tz_log_driver = {
+	.probe		= tz_log_probe,
+	.remove		= tz_log_remove,
+	.driver		= {
+		.name = "tz_log",
+		.owner = THIS_MODULE,
+		.of_match_table = tzlog_match,
+		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+	},
+};
+
+static int __init tz_log_init(void)
+{
+	return platform_driver_register(&tz_log_driver);
+}
+
+static void __exit tz_log_exit(void)
+{
+	platform_driver_unregister(&tz_log_driver);
+}
+
+module_init(tz_log_init);
+module_exit(tz_log_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TZ Log driver");
+MODULE_ALIAS("platform:tz_log");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpio/gpio-msm-smp2p.c	2019-01-22 16:16:23.203243690 +0100
@@ -0,0 +1,835 @@
+/* drivers/gpio/gpio-msm-smp2p.c
+ *
+ * Copyright (c) 2013-2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/bitmap.h>
+#include <linux/of.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/ipc_logging.h>
+#include "../soc/qcom/smp2p_private_api.h"
+#include "../soc/qcom/smp2p_private.h"
+
+/* GPIO device - one per SMP2P entry. */
+struct smp2p_chip_dev {
+	struct list_head entry_list;
+	char name[SMP2P_MAX_ENTRY_NAME];
+	int remote_pid;
+	bool is_inbound;
+	bool is_open;
+	bool in_shadow;
+	uint32_t shadow_value;
+	struct work_struct shadow_work;
+	spinlock_t shadow_lock;
+	struct notifier_block out_notifier;
+	struct notifier_block in_notifier;
+	struct msm_smp2p_out *out_handle;
+
+	struct gpio_chip gpio;
+	struct irq_domain *irq_domain;
+	int irq_base;
+
+	spinlock_t irq_lock;
+	DECLARE_BITMAP(irq_enabled, SMP2P_BITS_PER_ENTRY);
+	DECLARE_BITMAP(irq_rising_edge, SMP2P_BITS_PER_ENTRY);
+	DECLARE_BITMAP(irq_falling_edge, SMP2P_BITS_PER_ENTRY);
+};
+
+static struct platform_driver smp2p_gpio_driver;
+static struct lock_class_key smp2p_gpio_lock_class;
+static struct irq_chip smp2p_gpio_irq_chip;
+static DEFINE_SPINLOCK(smp2p_entry_lock_lha1);
+static LIST_HEAD(smp2p_entry_list);
+
+/* Used for mapping edge to name for logging. */
+static const char * const edge_names[] = {
+	"-",
+	"0->1",
+	"1->0",
+	"-",
+};
+
+/* Used for mapping edge to value for logging. */
+static const char * const edge_name_rising[] = {
+	"-",
+	"0->1",
+};
+
+/* Used for mapping edge to value for logging. */
+static const char * const edge_name_falling[] = {
+	"-",
+	"1->0",
+};
+
+static int smp2p_gpio_to_irq(struct gpio_chip *cp,
+	unsigned offset);
+
+/**
+ * smp2p_get_value - Retrieves GPIO value.
+ *
+ * @cp:      GPIO chip pointer
+ * @offset:  Pin offset
+ * @returns: >=0: value of GPIO Pin; < 0 for error
+ *
+ * Error codes:
+ *   -ENODEV - chip/entry invalid
+ *   -ENETDOWN - valid entry, but entry not yet created
+ */
+static int smp2p_get_value(struct gpio_chip *cp,
+	unsigned offset)
+{
+	struct smp2p_chip_dev *chip;
+	int ret = 0;
+	uint32_t data;
+
+	if (!cp)
+		return -ENODEV;
+
+	chip = container_of(cp, struct smp2p_chip_dev, gpio);
+	if (!chip->is_open)
+		return -ENETDOWN;
+
+	if (chip->is_inbound)
+		ret = msm_smp2p_in_read(chip->remote_pid, chip->name, &data);
+	else
+		ret = msm_smp2p_out_read(chip->out_handle, &data);
+
+	if (!ret)
+		ret = (data & (1 << offset)) ? 1 : 0;
+
+	return ret;
+}
+
+/**
+ * smp2p_set_value - Sets GPIO value.
+ *
+ * @cp:     GPIO chip pointer
+ * @offset: Pin offset
+ * @value:  New value
+ */
+static void smp2p_set_value(struct gpio_chip *cp, unsigned offset, int value)
+{
+	struct smp2p_chip_dev *chip;
+	uint32_t data_set;
+	uint32_t data_clear;
+	bool send_irq;
+	int ret;
+	unsigned long flags;
+
+	if (!cp)
+		return;
+
+	chip = container_of(cp, struct smp2p_chip_dev, gpio);
+
+	if (chip->is_inbound) {
+		SMP2P_INFO("%s: '%s':%d virq %d invalid operation\n",
+			__func__, chip->name, chip->remote_pid,
+			chip->irq_base + offset);
+		return;
+	}
+
+	if (value & SMP2P_GPIO_NO_INT) {
+		value &= ~SMP2P_GPIO_NO_INT;
+		send_irq = false;
+	} else {
+		send_irq = true;
+	}
+
+	if (value) {
+		data_set = 1 << offset;
+		data_clear = 0;
+	} else {
+		data_set = 0;
+		data_clear = 1 << offset;
+	}
+
+	spin_lock_irqsave(&chip->shadow_lock, flags);
+	if (!chip->is_open) {
+		chip->in_shadow = true;
+		chip->shadow_value &= ~data_clear;
+		chip->shadow_value |= data_set;
+		spin_unlock_irqrestore(&chip->shadow_lock, flags);
+		return;
+	}
+
+	if (chip->in_shadow) {
+		chip->in_shadow = false;
+		chip->shadow_value &= ~data_clear;
+		chip->shadow_value |= data_set;
+		ret = msm_smp2p_out_modify(chip->out_handle,
+				chip->shadow_value, 0x0, send_irq);
+		chip->shadow_value = 0x0;
+	} else {
+		ret = msm_smp2p_out_modify(chip->out_handle,
+				data_set, data_clear, send_irq);
+	}
+	spin_unlock_irqrestore(&chip->shadow_lock, flags);
+
+	if (ret)
+		SMP2P_GPIO("'%s':%d gpio %d set to %d failed (%d)\n",
+			chip->name, chip->remote_pid,
+			chip->gpio.base + offset, value, ret);
+	else
+		SMP2P_GPIO("'%s':%d gpio %d set to %d\n",
+			chip->name, chip->remote_pid,
+			chip->gpio.base + offset, value);
+}
+
+/**
+ * smp2p_direction_input - Sets GPIO direction to input.
+ *
+ * @cp:      GPIO chip pointer
+ * @offset:  Pin offset
+ * @returns: 0 for success; < 0 for failure
+ */
+static int smp2p_direction_input(struct gpio_chip *cp, unsigned offset)
+{
+	struct smp2p_chip_dev *chip;
+
+	if (!cp)
+		return -ENODEV;
+
+	chip = container_of(cp, struct smp2p_chip_dev, gpio);
+	if (!chip->is_inbound)
+		return -EPERM;
+
+	return 0;
+}
+
+/**
+ * smp2p_direction_output - Sets GPIO direction to output.
+ *
+ * @cp:      GPIO chip pointer
+ * @offset:  Pin offset
+ * @value:   Direction
+ * @returns: 0 for success; < 0 for failure
+ */
+static int smp2p_direction_output(struct gpio_chip *cp,
+	unsigned offset, int value)
+{
+	struct smp2p_chip_dev *chip;
+
+	if (!cp)
+		return -ENODEV;
+
+	chip = container_of(cp, struct smp2p_chip_dev, gpio);
+	if (chip->is_inbound)
+		return -EPERM;
+
+	return 0;
+}
+
+/**
+ * smp2p_gpio_to_irq - Convert GPIO pin to virtual IRQ pin.
+ *
+ * @cp:      GPIO chip pointer
+ * @offset:  Pin offset
+ * @returns: >0 for virtual irq value; < 0 for failure
+ */
+static int smp2p_gpio_to_irq(struct gpio_chip *cp, unsigned offset)
+{
+	struct smp2p_chip_dev *chip;
+
+	chip = container_of(cp, struct smp2p_chip_dev, gpio);
+	if (!cp || chip->irq_base <= 0)
+		return -ENODEV;
+
+	return chip->irq_base + offset;
+}
+
+/**
+ * smp2p_gpio_irq_mask_helper - Mask/Unmask interrupt.
+ *
+ * @d:    IRQ data
+ * @mask: true to mask (disable), false to unmask (enable)
+ */
+static void smp2p_gpio_irq_mask_helper(struct irq_data *d, bool mask)
+{
+	struct smp2p_chip_dev *chip;
+	int offset;
+	unsigned long flags;
+
+	chip = (struct smp2p_chip_dev *)irq_get_chip_data(d->irq);
+	if (!chip || chip->irq_base <= 0)
+		return;
+
+	offset = d->irq - chip->irq_base;
+	spin_lock_irqsave(&chip->irq_lock, flags);
+	if (mask)
+		clear_bit(offset, chip->irq_enabled);
+	else
+		set_bit(offset, chip->irq_enabled);
+	spin_unlock_irqrestore(&chip->irq_lock, flags);
+}
+
+/**
+ * smp2p_gpio_irq_mask - Mask interrupt.
+ *
+ * @d: IRQ data
+ */
+static void smp2p_gpio_irq_mask(struct irq_data *d)
+{
+	smp2p_gpio_irq_mask_helper(d, true);
+}
+
+/**
+ * smp2p_gpio_irq_unmask - Unmask interrupt.
+ *
+ * @d: IRQ data
+ */
+static void smp2p_gpio_irq_unmask(struct irq_data *d)
+{
+	smp2p_gpio_irq_mask_helper(d, false);
+}
+
+/**
+ * smp2p_gpio_irq_set_type - Set interrupt edge type.
+ *
+ * @d:      IRQ data
+ * @type:   Edge type for interrupt
+ * @returns 0 for success; < 0 for failure
+ */
+static int smp2p_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+	struct smp2p_chip_dev *chip;
+	int offset;
+	unsigned long flags;
+	int ret = 0;
+
+	chip = (struct smp2p_chip_dev *)irq_get_chip_data(d->irq);
+	if (!chip)
+		return -ENODEV;
+
+	if (chip->irq_base <= 0) {
+		SMP2P_ERR("%s: '%s':%d virqbase %d invalid\n",
+			__func__, chip->name, chip->remote_pid,
+			chip->irq_base);
+		return -ENODEV;
+	}
+
+	offset = d->irq - chip->irq_base;
+
+	spin_lock_irqsave(&chip->irq_lock, flags);
+	clear_bit(offset, chip->irq_rising_edge);
+	clear_bit(offset, chip->irq_falling_edge);
+	switch (type) {
+	case IRQ_TYPE_EDGE_RISING:
+		set_bit(offset, chip->irq_rising_edge);
+		break;
+
+	case IRQ_TYPE_EDGE_FALLING:
+		set_bit(offset, chip->irq_falling_edge);
+		break;
+
+	case IRQ_TYPE_NONE:
+	case IRQ_TYPE_DEFAULT:
+	case IRQ_TYPE_EDGE_BOTH:
+		set_bit(offset, chip->irq_rising_edge);
+		set_bit(offset, chip->irq_falling_edge);
+		break;
+
+	default:
+		SMP2P_ERR("%s: unsupported interrupt type 0x%x\n",
+				__func__, type);
+		ret = -EINVAL;
+		break;
+	}
+	spin_unlock_irqrestore(&chip->irq_lock, flags);
+	return ret;
+}
+
+/**
+ * smp2p_irq_map - Creates or updates binding of virtual IRQ
+ *
+ * @domain_ptr: Interrupt domain pointer
+ * @virq:       Virtual IRQ
+ * @hw:         Hardware IRQ (same as virq for nomap)
+ * @returns:    0 for success
+ */
+static int smp2p_irq_map(struct irq_domain *domain_ptr, unsigned int virq,
+	irq_hw_number_t hw)
+{
+	struct smp2p_chip_dev *chip;
+
+	chip = domain_ptr->host_data;
+	if (!chip) {
+		SMP2P_ERR("%s: invalid domain ptr\n", __func__);
+		return -ENODEV;
+	}
+
+	/* map chip structures to device */
+	irq_set_lockdep_class(virq, &smp2p_gpio_lock_class);
+	irq_set_chip_and_handler(virq, &smp2p_gpio_irq_chip,
+				 handle_level_irq);
+	irq_set_chip_data(virq, chip);
+
+	return 0;
+}
+
+static struct irq_chip smp2p_gpio_irq_chip = {
+	.name = "smp2p_gpio",
+	.irq_mask = smp2p_gpio_irq_mask,
+	.irq_unmask = smp2p_gpio_irq_unmask,
+	.irq_set_type = smp2p_gpio_irq_set_type,
+};
+
+/* No-map interrupt Domain */
+static const struct irq_domain_ops smp2p_irq_domain_ops = {
+	.map = smp2p_irq_map,
+};
+
+/**
+ * msm_summary_irq_handler - Handles inbound entry change notification.
+ *
+ * @chip:  GPIO chip pointer
+ * @entry: Change notification data
+ *
+ * Whenever an entry changes, this callback is triggered to determine
+ * which bits changed and if the corresponding interrupts need to be
+ * triggered.
+ */
+static void msm_summary_irq_handler(struct smp2p_chip_dev *chip,
+	struct msm_smp2p_update_notif *entry)
+{
+	int i;
+	uint32_t cur_val;
+	uint32_t prev_val;
+	uint32_t edge;
+	unsigned long flags;
+	bool trigger_interrrupt;
+	bool irq_rising;
+	bool irq_falling;
+
+	cur_val = entry->current_value;
+	prev_val = entry->previous_value;
+
+	if (chip->irq_base <= 0)
+		return;
+
+	SMP2P_GPIO("'%s':%d GPIO Summary IRQ Change %08x->%08x\n",
+			chip->name, chip->remote_pid, prev_val, cur_val);
+
+	for (i = 0; i < SMP2P_BITS_PER_ENTRY; ++i) {
+		spin_lock_irqsave(&chip->irq_lock, flags);
+		trigger_interrrupt = false;
+		edge = (prev_val & 0x1) << 1 | (cur_val & 0x1);
+		irq_rising = test_bit(i, chip->irq_rising_edge);
+		irq_falling = test_bit(i, chip->irq_falling_edge);
+
+		if (test_bit(i, chip->irq_enabled)) {
+			if (edge == 0x1 && irq_rising)
+				/* 0->1 transition */
+				trigger_interrrupt = true;
+			else if (edge == 0x2 && irq_falling)
+				/* 1->0 transition */
+				trigger_interrrupt = true;
+		} else {
+			SMP2P_GPIO(
+				"'%s':%d GPIO bit %d virq %d (%s,%s) - edge %s disabled\n",
+				chip->name, chip->remote_pid, i,
+				chip->irq_base + i,
+				edge_name_rising[irq_rising],
+				edge_name_falling[irq_falling],
+				edge_names[edge]);
+		}
+		spin_unlock_irqrestore(&chip->irq_lock, flags);
+
+		if (trigger_interrrupt) {
+			SMP2P_INFO(
+				"'%s':%d GPIO bit %d virq %d (%s,%s) - edge %s triggering\n",
+				chip->name, chip->remote_pid, i,
+				chip->irq_base + i,
+				edge_name_rising[irq_rising],
+				edge_name_falling[irq_falling],
+				edge_names[edge]);
+			(void)generic_handle_irq(chip->irq_base + i);
+		}
+
+		cur_val >>= 1;
+		prev_val >>= 1;
+	}
+}
+
+/**
+ * Adds an interrupt domain based upon the DT node.
+ *
+ * @chip: pointer to GPIO chip
+ * @node: pointer to Device Tree node
+ */
+static void smp2p_add_irq_domain(struct smp2p_chip_dev *chip,
+	struct device_node *node)
+{
+	int irq_base;
+
+	/* map GPIO pins to interrupts */
+	chip->irq_domain = irq_domain_add_linear(node, SMP2P_BITS_PER_ENTRY,
+			&smp2p_irq_domain_ops, chip);
+	if (!chip->irq_domain) {
+		SMP2P_ERR("%s: unable to create interrupt domain '%s':%d\n",
+				__func__, chip->name, chip->remote_pid);
+		goto domain_fail;
+	}
+
+	/* alloc a contiguous set of virt irqs from anywhere in the irq space */
+	irq_base = irq_alloc_descs_from(0, SMP2P_BITS_PER_ENTRY, of_node_to_nid(
+				irq_domain_get_of_node(chip->irq_domain)));
+	if (irq_base < 0) {
+		SMP2P_ERR("alloc virt irqs failed:%d name:%s pid%d\n", irq_base,
+						chip->name, chip->remote_pid);
+		goto irq_alloc_fail;
+	}
+
+	/* map the allocated irqs to gpios */
+	irq_domain_associate_many(chip->irq_domain, irq_base, 0,
+				  SMP2P_BITS_PER_ENTRY);
+
+	chip->irq_base = irq_base;
+	SMP2P_DBG("create mapping:%d naem:%s pid:%d\n", chip->irq_base,
+						chip->name, chip->remote_pid);
+	return;
+
+irq_alloc_fail:
+	irq_domain_remove(chip->irq_domain);
+domain_fail:
+	return;
+}
+
+/**
+ * Notifier function passed into smp2p API for out bound entries.
+ *
+ * @self:       Pointer to calling notifier block
+ * @event:	    Event
+ * @data:       Event-specific data
+ * @returns:    0
+ */
+static int smp2p_gpio_out_notify(struct notifier_block *self,
+		unsigned long event, void *data)
+{
+	struct smp2p_chip_dev *chip;
+
+	chip = container_of(self, struct smp2p_chip_dev, out_notifier);
+
+	switch (event) {
+	case SMP2P_OPEN:
+		chip->is_open = 1;
+		SMP2P_GPIO("%s: Opened out '%s':%d in_shadow[%d]\n", __func__,
+				chip->name, chip->remote_pid, chip->in_shadow);
+		if (chip->in_shadow)
+			schedule_work(&chip->shadow_work);
+		break;
+	case SMP2P_ENTRY_UPDATE:
+		break;
+	default:
+		SMP2P_ERR("%s: Unknown event\n", __func__);
+		break;
+	}
+	return 0;
+}
+
+/**
+ * Notifier function passed into smp2p API for in bound entries.
+ *
+ * @self:       Pointer to calling notifier block
+ * @event:	    Event
+ * @data:       Event-specific data
+ * @returns:    0
+ */
+static int smp2p_gpio_in_notify(struct notifier_block *self,
+		unsigned long event, void *data)
+{
+	struct smp2p_chip_dev *chip;
+
+	chip = container_of(self, struct smp2p_chip_dev, in_notifier);
+
+	switch (event) {
+	case SMP2P_OPEN:
+		chip->is_open = 1;
+		SMP2P_GPIO("%s: Opened in '%s':%d\n", __func__,
+				chip->name, chip->remote_pid);
+		break;
+	case SMP2P_ENTRY_UPDATE:
+		msm_summary_irq_handler(chip, data);
+		break;
+	default:
+		SMP2P_ERR("%s: Unknown event\n", __func__);
+		break;
+	}
+	return 0;
+}
+
+/**
+ * smp2p_gpio_shadow_worker - Handles shadow updates of an entry.
+ *
+ * @work: Work Item scheduled to handle the shadow updates.
+ */
+static void smp2p_gpio_shadow_worker(struct work_struct *work)
+{
+	struct smp2p_chip_dev *chip;
+	int ret;
+	unsigned long flags;
+
+	chip = container_of(work, struct smp2p_chip_dev, shadow_work);
+	spin_lock_irqsave(&chip->shadow_lock, flags);
+	if (chip->in_shadow) {
+		ret = msm_smp2p_out_modify(chip->out_handle,
+					chip->shadow_value, 0x0, true);
+
+		if (ret)
+			SMP2P_GPIO("'%s':%d shadow val[0x%x] failed(%d)\n",
+					chip->name, chip->remote_pid,
+					chip->shadow_value, ret);
+		else
+			SMP2P_GPIO("'%s':%d shadow val[0x%x]\n",
+					chip->name, chip->remote_pid,
+					chip->shadow_value);
+		chip->shadow_value = 0;
+		chip->in_shadow = false;
+	}
+	spin_unlock_irqrestore(&chip->shadow_lock, flags);
+}
+
+/**
+ * Device tree probe function.
+ *
+ * @pdev:	 Pointer to device tree data.
+ * @returns: 0 on success; -ENODEV otherwise
+ *
+ * Called for each smp2pgpio entry in the device tree.
+ */
+static int smp2p_gpio_probe(struct platform_device *pdev)
+{
+	struct device_node *node;
+	char *key;
+	struct smp2p_chip_dev *chip;
+	const char *name_tmp;
+	unsigned long flags;
+	bool is_test_entry = false;
+	int ret;
+
+	chip = kzalloc(sizeof(struct smp2p_chip_dev), GFP_KERNEL);
+	if (!chip) {
+		SMP2P_ERR("%s: out of memory\n", __func__);
+		ret = -ENOMEM;
+		goto fail;
+	}
+	spin_lock_init(&chip->irq_lock);
+	spin_lock_init(&chip->shadow_lock);
+	INIT_WORK(&chip->shadow_work, smp2p_gpio_shadow_worker);
+
+	/* parse device tree */
+	node = pdev->dev.of_node;
+	key = "qcom,entry-name";
+	ret = of_property_read_string(node, key, &name_tmp);
+	if (ret) {
+		SMP2P_ERR("%s: missing DT key '%s'\n", __func__, key);
+		goto fail;
+	}
+	strlcpy(chip->name, name_tmp, sizeof(chip->name));
+
+	key = "qcom,remote-pid";
+	ret = of_property_read_u32(node, key, &chip->remote_pid);
+	if (ret) {
+		SMP2P_ERR("%s: missing DT key '%s'\n", __func__, key);
+		goto fail;
+	}
+
+	key = "qcom,is-inbound";
+	chip->is_inbound = of_property_read_bool(node, key);
+
+	/* create virtual GPIO controller */
+	chip->gpio.label = chip->name;
+	chip->gpio.dev = &pdev->dev;
+	chip->gpio.owner = THIS_MODULE;
+	chip->gpio.direction_input	= smp2p_direction_input,
+	chip->gpio.get = smp2p_get_value;
+	chip->gpio.direction_output = smp2p_direction_output,
+	chip->gpio.set = smp2p_set_value;
+	chip->gpio.to_irq = smp2p_gpio_to_irq,
+	chip->gpio.base = -1;	/* use dynamic GPIO pin allocation */
+	chip->gpio.ngpio = SMP2P_BITS_PER_ENTRY;
+	ret = gpiochip_add(&chip->gpio);
+	if (ret) {
+		SMP2P_ERR("%s: unable to register GPIO '%s' ret %d\n",
+				__func__, chip->name, ret);
+		goto fail;
+	}
+
+	/*
+	 * Test entries opened by GPIO Test conflict with loopback
+	 * support, so the test entries must be explicitly opened
+	 * in the unit test framework.
+	 */
+	if (strncmp("smp2p", chip->name, SMP2P_MAX_ENTRY_NAME) == 0)
+		is_test_entry = true;
+
+	if (!chip->is_inbound)	{
+		chip->out_notifier.notifier_call = smp2p_gpio_out_notify;
+		if (!is_test_entry) {
+			ret = msm_smp2p_out_open(chip->remote_pid, chip->name,
+					   &chip->out_notifier,
+					   &chip->out_handle);
+			if (ret < 0)
+				goto error;
+		}
+	} else {
+		chip->in_notifier.notifier_call = smp2p_gpio_in_notify;
+		if (!is_test_entry) {
+			ret = msm_smp2p_in_register(chip->remote_pid,
+					chip->name,
+					&chip->in_notifier);
+			if (ret < 0)
+				goto error;
+		}
+	}
+
+	spin_lock_irqsave(&smp2p_entry_lock_lha1, flags);
+	list_add(&chip->entry_list, &smp2p_entry_list);
+	spin_unlock_irqrestore(&smp2p_entry_lock_lha1, flags);
+
+	/*
+	 * Create interrupt domain - note that chip can't be removed from the
+	 * interrupt domain, so chip cannot be deleted after this point.
+	 */
+	if (chip->is_inbound)
+		smp2p_add_irq_domain(chip, node);
+	else
+		chip->irq_base = -1;
+
+	SMP2P_GPIO("%s: added %s%s entry '%s':%d gpio %d irq %d",
+			__func__,
+			is_test_entry ? "test " : "",
+			chip->is_inbound ? "in" : "out",
+			chip->name, chip->remote_pid,
+			chip->gpio.base, chip->irq_base);
+
+	return 0;
+error:
+	gpiochip_remove(&chip->gpio);
+
+fail:
+	kfree(chip);
+	return ret;
+}
+
+/**
+ * smp2p_gpio_open_close - Opens or closes entry.
+ *
+ * @entry:   Entry to open or close
+ * @do_open: true = open port; false = close
+ */
+static void smp2p_gpio_open_close(struct smp2p_chip_dev *entry,
+	bool do_open)
+{
+	int ret;
+
+	if (do_open) {
+		/* open entry */
+		if (entry->is_inbound)
+			ret = msm_smp2p_in_register(entry->remote_pid,
+					entry->name, &entry->in_notifier);
+		else
+			ret = msm_smp2p_out_open(entry->remote_pid,
+					entry->name, &entry->out_notifier,
+					&entry->out_handle);
+		SMP2P_GPIO("%s: opened %s '%s':%d ret %d\n",
+				__func__,
+				entry->is_inbound ? "in" : "out",
+				entry->name, entry->remote_pid,
+				ret);
+	} else {
+		/* close entry */
+		if (entry->is_inbound)
+			ret = msm_smp2p_in_unregister(entry->remote_pid,
+					entry->name, &entry->in_notifier);
+		else
+			ret = msm_smp2p_out_close(&entry->out_handle);
+		entry->is_open = false;
+		SMP2P_GPIO("%s: closed %s '%s':%d ret %d\n",
+				__func__,
+				entry->is_inbound ? "in" : "out",
+				entry->name, entry->remote_pid, ret);
+	}
+}
+
+/**
+ * smp2p_gpio_open_test_entry - Opens or closes test entries for unit testing.
+ *
+ * @name:       Name of the entry
+ * @remote_pid: Remote processor ID
+ * @do_open:    true = open port; false = close
+ */
+void smp2p_gpio_open_test_entry(const char *name, int remote_pid, bool do_open)
+{
+	struct smp2p_chip_dev *entry;
+	struct smp2p_chip_dev *start_entry;
+	unsigned long flags;
+
+	spin_lock_irqsave(&smp2p_entry_lock_lha1, flags);
+	if (list_empty(&smp2p_entry_list)) {
+		spin_unlock_irqrestore(&smp2p_entry_lock_lha1, flags);
+		return;
+	}
+	start_entry = list_first_entry(&smp2p_entry_list,
+					struct smp2p_chip_dev,
+					entry_list);
+	entry = start_entry;
+	do {
+		if (!strncmp(entry->name, name, SMP2P_MAX_ENTRY_NAME)
+				&& entry->remote_pid == remote_pid) {
+			/* found entry to change */
+			spin_unlock_irqrestore(&smp2p_entry_lock_lha1, flags);
+			smp2p_gpio_open_close(entry, do_open);
+			spin_lock_irqsave(&smp2p_entry_lock_lha1, flags);
+		}
+		list_rotate_left(&smp2p_entry_list);
+		entry = list_first_entry(&smp2p_entry_list,
+						struct smp2p_chip_dev,
+						entry_list);
+	} while (entry != start_entry);
+	spin_unlock_irqrestore(&smp2p_entry_lock_lha1, flags);
+}
+
+static struct of_device_id msm_smp2p_match_table[] = {
+	{.compatible = "qcom,smp2pgpio", },
+	{},
+};
+
+static struct platform_driver smp2p_gpio_driver = {
+	.probe = smp2p_gpio_probe,
+	.driver = {
+		.name = "smp2pgpio",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_smp2p_match_table,
+	},
+};
+
+static int smp2p_init(void)
+{
+	INIT_LIST_HEAD(&smp2p_entry_list);
+	return platform_driver_register(&smp2p_gpio_driver);
+}
+module_init(smp2p_init);
+
+static void __exit smp2p_exit(void)
+{
+	platform_driver_unregister(&smp2p_gpio_driver);
+}
+module_exit(smp2p_exit);
+
+MODULE_DESCRIPTION("SMP2P GPIO");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpio/qpnp-pin.c	2019-01-22 16:16:23.215243798 +0100
@@ -0,0 +1,1714 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/qpnp/pin.h>
+
+#define Q_REG_ADDR(q_spec, reg_index)	\
+		((q_spec)->offset + reg_index)
+
+#define Q_REG_STATUS1			0x8
+#define Q_REG_STATUS1_VAL_MASK		0x1
+#define Q_REG_STATUS1_GPIO_EN_REV0_MASK	0x2
+#define Q_REG_STATUS1_GPIO_EN_MASK	0x80
+#define Q_REG_STATUS1_MPP_EN_MASK	0x80
+
+#define Q_NUM_CTL_REGS			0xD
+
+/* revision registers base address offsets */
+#define Q_REG_DIG_MINOR_REV		0x0
+#define Q_REG_DIG_MAJOR_REV		0x1
+#define Q_REG_ANA_MINOR_REV		0x2
+
+/* type registers base address offsets */
+#define Q_REG_TYPE			0x4
+#define Q_REG_SUBTYPE			0x5
+
+/* gpio peripheral type and subtype values */
+#define Q_GPIO_TYPE			0x10
+#define Q_GPIO_SUBTYPE_GPIO_4CH		0x1
+#define Q_GPIO_SUBTYPE_GPIOC_4CH	0x5
+#define Q_GPIO_SUBTYPE_GPIO_8CH		0x9
+#define Q_GPIO_SUBTYPE_GPIOC_8CH	0xD
+#define Q_GPIO_SUBTYPE_GPIO_LV		0x10
+#define Q_GPIO_SUBTYPE_GPIO_MV		0x11
+
+/* mpp peripheral type and subtype values */
+#define Q_MPP_TYPE				0x11
+#define Q_MPP_SUBTYPE_4CH_NO_ANA_OUT		0x3
+#define Q_MPP_SUBTYPE_ULT_4CH_NO_ANA_OUT	0x4
+#define Q_MPP_SUBTYPE_4CH_NO_SINK		0x5
+#define Q_MPP_SUBTYPE_ULT_4CH_NO_SINK		0x6
+#define Q_MPP_SUBTYPE_4CH_FULL_FUNC		0x7
+#define Q_MPP_SUBTYPE_8CH_FULL_FUNC		0xF
+
+/* control register base address offsets */
+#define Q_REG_MODE_CTL			0x40
+#define Q_REG_DIG_VIN_CTL		0x41
+#define Q_REG_DIG_PULL_CTL		0x42
+#define Q_REG_DIG_IN_CTL		0x43
+#define Q_REG_DIG_OUT_SRC_CTL		0x44
+#define Q_REG_DIG_OUT_CTL		0x45
+#define Q_REG_EN_CTL			0x46
+#define Q_REG_AOUT_CTL			0x48
+#define Q_REG_AIN_CTL			0x4A
+#define Q_REG_APASS_SEL_CTL		0x4A
+#define Q_REG_SINK_CTL			0x4C
+
+/* control register regs array indices */
+#define Q_REG_I_MODE_CTL		0
+#define Q_REG_I_DIG_VIN_CTL		1
+#define Q_REG_I_DIG_PULL_CTL		2
+#define Q_REG_I_DIG_IN_CTL		3
+#define Q_REG_I_DIG_OUT_SRC_CTL		4
+#define Q_REG_I_DIG_OUT_CTL		5
+#define Q_REG_I_EN_CTL			6
+#define Q_REG_I_AOUT_CTL		8
+#define Q_REG_I_APASS_SEL_CTL		10
+#define Q_REG_I_AIN_CTL			10
+#define Q_REG_I_SINK_CTL		12
+
+/* control reg: mode */
+#define Q_REG_OUT_INVERT_SHIFT		0
+#define Q_REG_OUT_INVERT_MASK		0x1
+#define Q_REG_SRC_SEL_SHIFT		1
+#define Q_REG_SRC_SEL_MASK		0xE
+#define Q_REG_MODE_SEL_SHIFT		4
+#define Q_REG_MODE_SEL_MASK		0x70
+#define Q_REG_LV_MV_MODE_SEL_SHIFT	0
+#define Q_REG_LV_MV_MODE_SEL_MASK	0x3
+
+/* control reg: dig_out_src (GPIO LV/MV only) */
+#define Q_REG_DIG_OUT_SRC_SRC_SEL_SHIFT 0
+#define Q_REG_DIG_OUT_SRC_SRC_SEL_MASK	0xF
+#define Q_REG_DIG_OUT_SRC_INVERT_SHIFT	7
+#define Q_REG_DIG_OUT_SRC_INVERT_MASK	0x80
+
+/* control reg: dig_vin */
+#define Q_REG_VIN_SHIFT			0
+#define Q_REG_VIN_MASK			0x7
+
+/* control reg: dig_pull */
+#define Q_REG_PULL_SHIFT		0
+#define Q_REG_PULL_MASK			0x7
+
+/* control reg: dig_out */
+#define Q_REG_OUT_STRENGTH_SHIFT	0
+#define Q_REG_OUT_STRENGTH_MASK		0x3
+#define Q_REG_OUT_TYPE_SHIFT		4
+#define Q_REG_OUT_TYPE_MASK		0x30
+
+/* control reg: dig_in_ctl */
+#define Q_REG_DTEST_SEL_SHIFT			0
+#define Q_REG_DTEST_SEL_MASK			0xF
+#define Q_REG_LV_MV_DTEST_SEL_CFG_SHIFT		0
+#define Q_REG_LV_MV_DTEST_SEL_CFG_MASK		0x7
+#define Q_REG_LV_MV_DTEST_SEL_EN_SHIFT		7
+#define Q_REG_LV_MV_DTEST_SEL_EN_MASK		0x80
+
+/* control reg: en */
+#define Q_REG_MASTER_EN_SHIFT		7
+#define Q_REG_MASTER_EN_MASK		0x80
+
+/* control reg: ana_out */
+#define Q_REG_AOUT_REF_SHIFT		0
+#define Q_REG_AOUT_REF_MASK		0x7
+
+/* control reg: ana_in */
+#define Q_REG_AIN_ROUTE_SHIFT		0
+#define Q_REG_AIN_ROUTE_MASK		0x7
+
+/* control reg: sink */
+#define Q_REG_CS_OUT_SHIFT		0
+#define Q_REG_CS_OUT_MASK		0x7
+
+/* control ref: apass_sel */
+#define Q_REG_APASS_SEL_SHIFT		0
+#define Q_REG_APASS_SEL_MASK		0x3
+
+enum qpnp_pin_param_type {
+	Q_PIN_CFG_MODE,
+	Q_PIN_CFG_OUTPUT_TYPE,
+	Q_PIN_CFG_INVERT,
+	Q_PIN_CFG_PULL,
+	Q_PIN_CFG_VIN_SEL,
+	Q_PIN_CFG_OUT_STRENGTH,
+	Q_PIN_CFG_SRC_SEL,
+	Q_PIN_CFG_MASTER_EN,
+	Q_PIN_CFG_AOUT_REF,
+	Q_PIN_CFG_AIN_ROUTE,
+	Q_PIN_CFG_CS_OUT,
+	Q_PIN_CFG_APASS_SEL,
+	Q_PIN_CFG_DTEST_SEL,
+	Q_PIN_CFG_INVALID,
+};
+
+#define Q_NUM_PARAMS			Q_PIN_CFG_INVALID
+
+/* param error checking */
+#define QPNP_PIN_GPIO_MODE_INVALID		3
+#define QPNP_PIN_GPIO_LV_MV_MODE_INVALID	4
+#define QPNP_PIN_MPP_MODE_INVALID		7
+#define QPNP_PIN_INVERT_INVALID			2
+#define QPNP_PIN_OUT_BUF_INVALID		3
+#define QPNP_PIN_GPIO_LV_MV_OUT_BUF_INVALID	4
+#define QPNP_PIN_VIN_4CH_INVALID		5
+#define QPNP_PIN_VIN_8CH_INVALID		8
+#define QPNP_PIN_GPIO_LV_VIN_INVALID		1
+#define QPNP_PIN_GPIO_MV_VIN_INVALID		2
+#define QPNP_PIN_GPIO_PULL_INVALID		6
+#define QPNP_PIN_MPP_PULL_INVALID		4
+#define QPNP_PIN_OUT_STRENGTH_INVALID		4
+#define QPNP_PIN_SRC_INVALID			8
+#define QPNP_PIN_GPIO_LV_MV_SRC_INVALID		16
+#define QPNP_PIN_MASTER_INVALID			2
+#define QPNP_PIN_AOUT_REF_INVALID		8
+#define QPNP_PIN_AIN_ROUTE_INVALID		8
+#define QPNP_PIN_CS_OUT_INVALID			8
+#define QPNP_PIN_APASS_SEL_INVALID		4
+#define QPNP_PIN_DTEST_SEL_INVALID		4
+
+struct qpnp_pin_spec {
+	uint8_t slave;			/* 0-15 */
+	uint16_t offset;		/* 0-255 */
+	uint32_t gpio_chip_idx;		/* offset from gpio_chip base */
+	uint32_t pmic_pin;		/* PMIC pin number */
+	int irq;			/* logical IRQ number */
+	u8 regs[Q_NUM_CTL_REGS];	/* Control regs */
+	u8 num_ctl_regs;		/* usable number on this pin */
+	u8 type;			/* peripheral type */
+	u8 subtype;			/* peripheral subtype */
+	u8 dig_major_rev;
+	struct device_node *node;
+	enum qpnp_pin_param_type params[Q_NUM_PARAMS];
+	struct qpnp_pin_chip *q_chip;
+};
+
+struct qpnp_pin_chip {
+	struct gpio_chip	gpio_chip;
+	struct platform_device	*pdev;
+	struct regmap		*regmap;
+	struct qpnp_pin_spec	**pmic_pins;
+	struct qpnp_pin_spec	**chip_gpios;
+	uint32_t		pmic_pin_lowest;
+	uint32_t		pmic_pin_highest;
+	struct device_node	*int_ctrl;
+	struct list_head	chip_list;
+	struct dentry		*dfs_dir;
+	bool			chip_registered;
+};
+
+static LIST_HEAD(qpnp_pin_chips);
+static DEFINE_MUTEX(qpnp_pin_chips_lock);
+
+static inline void qpnp_pmic_pin_set_spec(struct qpnp_pin_chip *q_chip,
+					      uint32_t pmic_pin,
+					      struct qpnp_pin_spec *spec)
+{
+	q_chip->pmic_pins[pmic_pin - q_chip->pmic_pin_lowest] = spec;
+}
+
+static inline struct qpnp_pin_spec *qpnp_pmic_pin_get_spec(
+						struct qpnp_pin_chip *q_chip,
+						uint32_t pmic_pin)
+{
+	if (pmic_pin < q_chip->pmic_pin_lowest ||
+	    pmic_pin > q_chip->pmic_pin_highest)
+		return NULL;
+
+	return q_chip->pmic_pins[pmic_pin - q_chip->pmic_pin_lowest];
+}
+
+static inline struct qpnp_pin_spec *qpnp_chip_gpio_get_spec(
+						struct qpnp_pin_chip *q_chip,
+						uint32_t chip_gpio)
+{
+	if (chip_gpio >= q_chip->gpio_chip.ngpio)
+		return NULL;
+
+	return q_chip->chip_gpios[chip_gpio];
+}
+
+static inline void qpnp_chip_gpio_set_spec(struct qpnp_pin_chip *q_chip,
+					      uint32_t chip_gpio,
+					      struct qpnp_pin_spec *spec)
+{
+	q_chip->chip_gpios[chip_gpio] = spec;
+}
+
+static bool is_gpio_lv_mv(struct qpnp_pin_spec *q_spec)
+{
+	if ((q_spec->type == Q_GPIO_TYPE) &&
+		(q_spec->subtype == Q_GPIO_SUBTYPE_GPIO_LV ||
+		q_spec->subtype == Q_GPIO_SUBTYPE_GPIO_MV))
+		return true;
+
+	return false;
+}
+
+/*
+ * Determines whether a specified param's configuration is correct.
+ * This check is two tier. First a check is done whether the hardware
+ * supports this param and value requested. The second check validates
+ * that the configuration is correct, given the fact that the hardware
+ * supports it.
+ *
+ * Returns
+ *	-ENXIO is the hardware does not support this param.
+ *	-EINVAL if the the hardware does support this param, but the
+ *	requested value is outside the supported range.
+ */
+static int qpnp_pin_check_config(enum qpnp_pin_param_type idx,
+				 struct qpnp_pin_spec *q_spec, uint32_t val)
+{
+	u8 subtype = q_spec->subtype;
+
+	switch (idx) {
+	case Q_PIN_CFG_MODE:
+		if (q_spec->type == Q_GPIO_TYPE) {
+			if (is_gpio_lv_mv(q_spec)) {
+				if (val >= QPNP_PIN_GPIO_LV_MV_MODE_INVALID)
+					return -EINVAL;
+			} else if (val >= QPNP_PIN_GPIO_MODE_INVALID) {
+				return -EINVAL;
+			}
+		} else if (q_spec->type == Q_MPP_TYPE) {
+			if (val >= QPNP_PIN_MPP_MODE_INVALID)
+				return -EINVAL;
+			if ((subtype == Q_MPP_SUBTYPE_ULT_4CH_NO_ANA_OUT ||
+			     subtype == Q_MPP_SUBTYPE_ULT_4CH_NO_SINK) &&
+			     (val == QPNP_PIN_MODE_BIDIR))
+				return -ENXIO;
+		}
+		break;
+	case Q_PIN_CFG_OUTPUT_TYPE:
+		if (q_spec->type != Q_GPIO_TYPE)
+			return -ENXIO;
+		if ((val == QPNP_PIN_OUT_BUF_OPEN_DRAIN_NMOS ||
+		    val == QPNP_PIN_OUT_BUF_OPEN_DRAIN_PMOS) &&
+		    (subtype == Q_GPIO_SUBTYPE_GPIOC_4CH ||
+		    (subtype == Q_GPIO_SUBTYPE_GPIOC_8CH)))
+			return -EINVAL;
+		else if (is_gpio_lv_mv(q_spec) &&
+			val >= QPNP_PIN_GPIO_LV_MV_OUT_BUF_INVALID)
+			return -EINVAL;
+		else if (val >= QPNP_PIN_OUT_BUF_INVALID)
+			return -EINVAL;
+		break;
+	case Q_PIN_CFG_INVERT:
+		if (val >= QPNP_PIN_INVERT_INVALID)
+			return -EINVAL;
+		break;
+	case Q_PIN_CFG_PULL:
+		if (q_spec->type == Q_GPIO_TYPE &&
+		    val >= QPNP_PIN_GPIO_PULL_INVALID)
+			return -EINVAL;
+		if (q_spec->type == Q_MPP_TYPE) {
+			if (val >= QPNP_PIN_MPP_PULL_INVALID)
+				return -EINVAL;
+			if (subtype == Q_MPP_SUBTYPE_ULT_4CH_NO_ANA_OUT ||
+			    subtype == Q_MPP_SUBTYPE_ULT_4CH_NO_SINK)
+				return -ENXIO;
+		}
+		break;
+	case Q_PIN_CFG_VIN_SEL:
+		if (is_gpio_lv_mv(q_spec)) {
+			if (subtype == Q_GPIO_SUBTYPE_GPIO_LV) {
+				if (val >= QPNP_PIN_GPIO_LV_VIN_INVALID)
+					return -EINVAL;
+			} else {
+				if (val >= QPNP_PIN_GPIO_MV_VIN_INVALID)
+					return -EINVAL;
+			}
+		} else if (val >= QPNP_PIN_VIN_8CH_INVALID) {
+			return -EINVAL;
+		} else if (val >= QPNP_PIN_VIN_4CH_INVALID) {
+			if (q_spec->type == Q_GPIO_TYPE &&
+			   (subtype == Q_GPIO_SUBTYPE_GPIO_4CH ||
+			    subtype == Q_GPIO_SUBTYPE_GPIOC_4CH))
+				return -EINVAL;
+			if (q_spec->type == Q_MPP_TYPE &&
+			   (subtype == Q_MPP_SUBTYPE_4CH_NO_ANA_OUT ||
+			    subtype == Q_MPP_SUBTYPE_4CH_NO_SINK ||
+			    subtype == Q_MPP_SUBTYPE_4CH_FULL_FUNC ||
+			    subtype == Q_MPP_SUBTYPE_ULT_4CH_NO_ANA_OUT ||
+			    subtype == Q_MPP_SUBTYPE_ULT_4CH_NO_SINK))
+				return -EINVAL;
+		}
+		break;
+	case Q_PIN_CFG_OUT_STRENGTH:
+		if (q_spec->type != Q_GPIO_TYPE)
+			return -ENXIO;
+		if (val >= QPNP_PIN_OUT_STRENGTH_INVALID ||
+		    val == 0)
+			return -EINVAL;
+		break;
+	case Q_PIN_CFG_SRC_SEL:
+		if (q_spec->type == Q_MPP_TYPE &&
+		    (val == QPNP_PIN_SEL_FUNC_1 ||
+		     val == QPNP_PIN_SEL_FUNC_2))
+			return -EINVAL;
+		if (is_gpio_lv_mv(q_spec)) {
+			if (val >= QPNP_PIN_GPIO_LV_MV_SRC_INVALID)
+				return -EINVAL;
+		} else if (val >= QPNP_PIN_SRC_INVALID) {
+			return -EINVAL;
+		}
+		break;
+	case Q_PIN_CFG_MASTER_EN:
+		if (val >= QPNP_PIN_MASTER_INVALID)
+			return -EINVAL;
+		break;
+	case Q_PIN_CFG_AOUT_REF:
+		if (q_spec->type != Q_MPP_TYPE)
+			return -ENXIO;
+		if (subtype == Q_MPP_SUBTYPE_4CH_NO_ANA_OUT ||
+		    subtype == Q_MPP_SUBTYPE_ULT_4CH_NO_ANA_OUT)
+			return -ENXIO;
+		if (val >= QPNP_PIN_AOUT_REF_INVALID)
+			return -EINVAL;
+		break;
+	case Q_PIN_CFG_AIN_ROUTE:
+		if (q_spec->type != Q_MPP_TYPE)
+			return -ENXIO;
+		if (val >= QPNP_PIN_AIN_ROUTE_INVALID)
+			return -EINVAL;
+		break;
+	case Q_PIN_CFG_CS_OUT:
+		if (q_spec->type != Q_MPP_TYPE)
+			return -ENXIO;
+		if (subtype == Q_MPP_SUBTYPE_4CH_NO_SINK ||
+		    subtype == Q_MPP_SUBTYPE_ULT_4CH_NO_SINK)
+			return -ENXIO;
+		if (val >= QPNP_PIN_CS_OUT_INVALID)
+			return -EINVAL;
+		break;
+	case Q_PIN_CFG_APASS_SEL:
+		if (!is_gpio_lv_mv(q_spec))
+			return -ENXIO;
+		if (val >= QPNP_PIN_APASS_SEL_INVALID)
+			return -EINVAL;
+		break;
+	case Q_PIN_CFG_DTEST_SEL:
+		if (val > QPNP_PIN_DTEST_SEL_INVALID)
+			return -EINVAL;
+		break;
+	default:
+		pr_err("invalid param type %u specified\n", idx);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+#define Q_CHK_INVALID(idx, q_spec, val) \
+	(qpnp_pin_check_config(idx, q_spec, val) == -EINVAL)
+
+static int qpnp_pin_check_constraints(struct qpnp_pin_spec *q_spec,
+				      struct qpnp_pin_cfg *param)
+{
+	int pin = q_spec->pmic_pin;
+	const char *name;
+
+	name = (q_spec->type == Q_GPIO_TYPE) ? "gpio" : "mpp";
+
+	if (Q_CHK_INVALID(Q_PIN_CFG_MODE, q_spec, param->mode))
+		pr_err("invalid direction value %d for %s %d\n",
+						param->mode, name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_INVERT, q_spec, param->invert))
+		pr_err("invalid invert polarity value %d for %s %d\n",
+						param->invert,  name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_SRC_SEL, q_spec, param->src_sel))
+		pr_err("invalid source select value %d for %s %d\n",
+						param->src_sel, name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_OUT_STRENGTH,
+						q_spec, param->out_strength))
+		pr_err("invalid out strength value %d for %s %d\n",
+					param->out_strength,  name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_OUTPUT_TYPE,
+						 q_spec, param->output_type))
+		pr_err("invalid out type value %d for %s %d\n",
+					param->output_type,  name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_VIN_SEL, q_spec, param->vin_sel))
+		pr_err("invalid vin select %d value for %s %d\n",
+						param->vin_sel, name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_PULL, q_spec, param->pull))
+		pr_err("invalid pull value %d for pin %s %d\n",
+						param->pull,  name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_MASTER_EN, q_spec, param->master_en))
+		pr_err("invalid master_en value %d for %s %d\n",
+						param->master_en, name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_AOUT_REF, q_spec, param->aout_ref))
+		pr_err("invalid aout_reg value %d for %s %d\n",
+						param->aout_ref, name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_AIN_ROUTE, q_spec, param->ain_route))
+		pr_err("invalid ain_route value %d for %s %d\n",
+						param->ain_route, name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_CS_OUT, q_spec, param->cs_out))
+		pr_err("invalid cs_out value %d for %s %d\n",
+						param->cs_out, name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_APASS_SEL, q_spec, param->apass_sel))
+		pr_err("invalid apass_sel value %d for %s %d\n",
+						param->apass_sel, name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_DTEST_SEL, q_spec, param->dtest_sel))
+		pr_err("invalid dtest_sel value %d for %s %d\n",
+					param->dtest_sel, name, pin);
+	else
+		return 0;
+
+	return -EINVAL;
+}
+
+static inline u8 q_reg_get(u8 *reg, int shift, int mask)
+{
+	return (*reg & mask) >> shift;
+}
+
+static inline void q_reg_set(u8 *reg, int shift, int mask, int value)
+{
+	*reg |= (value << shift) & mask;
+}
+
+static inline void q_reg_clr_set(u8 *reg, int shift, int mask, int value)
+{
+	*reg &= ~mask;
+	*reg |= (value << shift) & mask;
+}
+
+/*
+ * Calculate the minimum number of registers that must be read / written
+ * in order to satisfy the full feature set of the given pin.
+ */
+static int qpnp_pin_ctl_regs_init(struct qpnp_pin_spec *q_spec)
+{
+	if (q_spec->type == Q_GPIO_TYPE) {
+		if (is_gpio_lv_mv(q_spec))
+			q_spec->num_ctl_regs = 11;
+		else
+			q_spec->num_ctl_regs = 7;
+	} else if (q_spec->type == Q_MPP_TYPE) {
+		switch (q_spec->subtype) {
+		case Q_MPP_SUBTYPE_4CH_NO_SINK:
+		case Q_MPP_SUBTYPE_ULT_4CH_NO_SINK:
+			q_spec->num_ctl_regs = 12;
+			break;
+		case Q_MPP_SUBTYPE_4CH_NO_ANA_OUT:
+		case Q_MPP_SUBTYPE_ULT_4CH_NO_ANA_OUT:
+		case Q_MPP_SUBTYPE_4CH_FULL_FUNC:
+		case Q_MPP_SUBTYPE_8CH_FULL_FUNC:
+			q_spec->num_ctl_regs = 13;
+			break;
+		default:
+			pr_err("Invalid MPP subtype 0x%x\n", q_spec->subtype);
+			return -EINVAL;
+		}
+	} else {
+		pr_err("Invalid type 0x%x\n", q_spec->type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int qpnp_pin_read_regs(struct qpnp_pin_chip *q_chip,
+			      struct qpnp_pin_spec *q_spec)
+{
+	int bytes_left = q_spec->num_ctl_regs;
+	int rc;
+	char *buf_p = &q_spec->regs[0];
+	u16 reg_addr = Q_REG_ADDR(q_spec, Q_REG_MODE_CTL);
+
+	while (bytes_left > 0) {
+		rc = regmap_bulk_read(q_chip->regmap, reg_addr, buf_p,
+				      bytes_left < 8 ? bytes_left : 8);
+		if (rc)
+			return rc;
+		bytes_left -= 8;
+		buf_p += 8;
+		reg_addr += 8;
+	}
+	return 0;
+}
+
+static int qpnp_pin_write_regs(struct qpnp_pin_chip *q_chip,
+			       struct qpnp_pin_spec *q_spec)
+{
+	int bytes_left = q_spec->num_ctl_regs;
+	int rc;
+	char *buf_p = &q_spec->regs[0];
+	u16 reg_addr = Q_REG_ADDR(q_spec, Q_REG_MODE_CTL);
+
+	while (bytes_left > 0) {
+		rc = regmap_bulk_write(q_chip->regmap, reg_addr, buf_p,
+				       bytes_left < 8 ? bytes_left : 8);
+		if (rc)
+			return rc;
+		bytes_left -= 8;
+		buf_p += 8;
+		reg_addr += 8;
+	}
+	return 0;
+}
+
+static int qpnp_pin_cache_regs(struct qpnp_pin_chip *q_chip,
+			       struct qpnp_pin_spec *q_spec)
+{
+	int rc;
+	struct device *dev = &q_chip->pdev->dev;
+
+	rc = qpnp_pin_read_regs(q_chip, q_spec);
+	if (rc)
+		dev_err(dev, "%s: unable to read control regs\n", __func__);
+
+	return rc;
+}
+
+#define Q_HAVE_HW_SP(idx, q_spec, val) \
+	(qpnp_pin_check_config(idx, q_spec, val) == 0)
+
+static int _qpnp_pin_config(struct qpnp_pin_chip *q_chip,
+			    struct qpnp_pin_spec *q_spec,
+			    struct qpnp_pin_cfg *param)
+{
+	struct device *dev = &q_chip->pdev->dev;
+	int rc;
+	u8 shift, mask, *reg;
+
+	rc = qpnp_pin_check_constraints(q_spec, param);
+	if (rc)
+		goto gpio_cfg;
+
+	/* set mode */
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_MODE, q_spec, param->mode)) {
+		if (is_gpio_lv_mv(q_spec)) {
+			shift = Q_REG_LV_MV_MODE_SEL_SHIFT;
+			mask = Q_REG_LV_MV_MODE_SEL_MASK;
+		} else {
+			shift = Q_REG_MODE_SEL_SHIFT;
+			mask = Q_REG_MODE_SEL_MASK;
+		}
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
+			shift, mask, param->mode);
+	}
+
+	/* output specific configuration */
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_INVERT, q_spec, param->invert)) {
+		if (is_gpio_lv_mv(q_spec)) {
+			shift = Q_REG_DIG_OUT_SRC_INVERT_SHIFT;
+			mask = Q_REG_DIG_OUT_SRC_INVERT_MASK;
+			reg = &q_spec->regs[Q_REG_I_DIG_OUT_SRC_CTL];
+		} else {
+			shift = Q_REG_OUT_INVERT_SHIFT;
+			mask = Q_REG_OUT_INVERT_MASK;
+			reg = &q_spec->regs[Q_REG_I_MODE_CTL];
+		}
+		q_reg_clr_set(reg, shift, mask, param->invert);
+	}
+
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_SRC_SEL, q_spec, param->src_sel)) {
+		if (is_gpio_lv_mv(q_spec)) {
+			shift = Q_REG_DIG_OUT_SRC_SRC_SEL_SHIFT;
+			mask = Q_REG_DIG_OUT_SRC_SRC_SEL_MASK;
+			reg = &q_spec->regs[Q_REG_I_DIG_OUT_SRC_CTL];
+		} else {
+			shift = Q_REG_SRC_SEL_SHIFT;
+			mask = Q_REG_SRC_SEL_MASK;
+			reg = &q_spec->regs[Q_REG_I_MODE_CTL];
+		}
+		q_reg_clr_set(reg, shift, mask, param->src_sel);
+	}
+
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_OUT_STRENGTH, q_spec, param->out_strength))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
+			  Q_REG_OUT_STRENGTH_SHIFT, Q_REG_OUT_STRENGTH_MASK,
+			  param->out_strength);
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_OUTPUT_TYPE, q_spec, param->output_type))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
+			  Q_REG_OUT_TYPE_SHIFT, Q_REG_OUT_TYPE_MASK,
+			  param->output_type);
+
+	/* input config */
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_DTEST_SEL, q_spec, param->dtest_sel)
+			&& param->dtest_sel) {
+		if (is_gpio_lv_mv(q_spec)) {
+			q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_IN_CTL],
+					Q_REG_LV_MV_DTEST_SEL_CFG_SHIFT,
+					Q_REG_LV_MV_DTEST_SEL_CFG_MASK,
+					param->dtest_sel - 1);
+			q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_IN_CTL],
+					Q_REG_LV_MV_DTEST_SEL_EN_SHIFT,
+					Q_REG_LV_MV_DTEST_SEL_EN_MASK, 0x1);
+		} else {
+			q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_IN_CTL],
+					Q_REG_DTEST_SEL_SHIFT,
+					Q_REG_DTEST_SEL_MASK,
+					BIT(param->dtest_sel - 1));
+		}
+	}
+
+	/* config applicable for both input / output */
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_VIN_SEL, q_spec, param->vin_sel))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_VIN_CTL],
+			  Q_REG_VIN_SHIFT, Q_REG_VIN_MASK,
+			  param->vin_sel);
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_PULL, q_spec, param->pull))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_PULL_CTL],
+			  Q_REG_PULL_SHIFT, Q_REG_PULL_MASK,
+			  param->pull);
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_MASTER_EN, q_spec, param->master_en))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_EN_CTL],
+			  Q_REG_MASTER_EN_SHIFT, Q_REG_MASTER_EN_MASK,
+			  param->master_en);
+
+	/* mpp specific config */
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_AOUT_REF, q_spec, param->aout_ref))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_AOUT_CTL],
+			  Q_REG_AOUT_REF_SHIFT, Q_REG_AOUT_REF_MASK,
+			  param->aout_ref);
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_AIN_ROUTE, q_spec, param->ain_route))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_AIN_CTL],
+			  Q_REG_AIN_ROUTE_SHIFT, Q_REG_AIN_ROUTE_MASK,
+			  param->ain_route);
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_CS_OUT, q_spec, param->cs_out))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_SINK_CTL],
+			  Q_REG_CS_OUT_SHIFT, Q_REG_CS_OUT_MASK,
+			  param->cs_out);
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_APASS_SEL, q_spec, param->apass_sel))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_APASS_SEL_CTL],
+			  Q_REG_APASS_SEL_SHIFT, Q_REG_APASS_SEL_MASK,
+			  param->apass_sel);
+
+	rc = qpnp_pin_write_regs(q_chip, q_spec);
+	if (rc) {
+		dev_err(&q_chip->pdev->dev,
+			"%s: unable to write master enable\n",
+								__func__);
+		goto gpio_cfg;
+	}
+
+	return 0;
+
+gpio_cfg:
+	dev_err(dev, "%s: unable to set default config for pmic pin %d\n",
+						__func__, q_spec->pmic_pin);
+
+	return rc;
+}
+
+int qpnp_pin_config(int gpio, struct qpnp_pin_cfg *param)
+{
+	int rc, chip_offset;
+	struct qpnp_pin_chip *q_chip;
+	struct qpnp_pin_spec *q_spec = NULL;
+	struct gpio_chip *gpio_chip;
+
+	if (param == NULL)
+		return -EINVAL;
+
+	mutex_lock(&qpnp_pin_chips_lock);
+	list_for_each_entry(q_chip, &qpnp_pin_chips, chip_list) {
+		gpio_chip = &q_chip->gpio_chip;
+		if (gpio >= gpio_chip->base
+				&& gpio < gpio_chip->base + gpio_chip->ngpio) {
+			chip_offset = gpio - gpio_chip->base;
+			q_spec = qpnp_chip_gpio_get_spec(q_chip, chip_offset);
+			if (WARN_ON(!q_spec)) {
+				mutex_unlock(&qpnp_pin_chips_lock);
+				return -ENODEV;
+			}
+			break;
+		}
+	}
+	mutex_unlock(&qpnp_pin_chips_lock);
+
+	if (!q_spec)
+		return -ENODEV;
+
+	rc = _qpnp_pin_config(q_chip, q_spec, param);
+
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_pin_config);
+
+int qpnp_pin_map(const char *name, uint32_t pmic_pin)
+{
+	struct qpnp_pin_chip *q_chip;
+	struct qpnp_pin_spec *q_spec = NULL;
+
+	if (!name)
+		return -EINVAL;
+
+	mutex_lock(&qpnp_pin_chips_lock);
+	list_for_each_entry(q_chip, &qpnp_pin_chips, chip_list) {
+		if (strcmp(q_chip->gpio_chip.label, name) != 0)
+			continue;
+		if (q_chip->pmic_pin_lowest <= pmic_pin &&
+		    q_chip->pmic_pin_highest >= pmic_pin) {
+			q_spec = qpnp_pmic_pin_get_spec(q_chip, pmic_pin);
+			mutex_unlock(&qpnp_pin_chips_lock);
+			if (WARN_ON(!q_spec))
+				return -ENODEV;
+			return q_chip->gpio_chip.base + q_spec->gpio_chip_idx;
+		}
+	}
+	mutex_unlock(&qpnp_pin_chips_lock);
+	return -EINVAL;
+}
+EXPORT_SYMBOL(qpnp_pin_map);
+
+static int qpnp_pin_to_irq(struct gpio_chip *gpio_chip, unsigned int offset)
+{
+	struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
+	struct qpnp_pin_spec *q_spec;
+	struct of_phandle_args oirq;
+
+	q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
+	if (!q_spec)
+		return -EINVAL;
+
+	/* if we have mapped this pin previously return the virq */
+	if (q_spec->irq)
+		return q_spec->irq;
+
+	/* call into irq_domain to get irq mapping */
+	oirq.np = q_chip->int_ctrl;
+	oirq.args[0] = to_spmi_device(q_chip->pdev->dev.parent)->usid;
+	oirq.args[1] = (q_spec->offset >> 8) & 0xFF;
+	oirq.args[2] = 0;
+	oirq.args[3] = IRQ_TYPE_NONE;
+	oirq.args_count = 4;
+
+	q_spec->irq = irq_create_of_mapping(&oirq);
+	if (!q_spec->irq) {
+		dev_err(&q_chip->pdev->dev, "%s: invalid irq for gpio %u\n",
+						__func__, q_spec->pmic_pin);
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	return q_spec->irq;
+}
+
+static int qpnp_pin_get(struct gpio_chip *gpio_chip, unsigned int offset)
+{
+	struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
+	struct qpnp_pin_spec *q_spec = NULL;
+	u8 buf, en_mask, shift, mask, reg;
+	unsigned int val;
+	int rc;
+
+	if (WARN_ON(!q_chip))
+		return -ENODEV;
+
+	q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
+	if (WARN_ON(!q_spec))
+		return -ENODEV;
+
+	if (is_gpio_lv_mv(q_spec)) {
+		mask = Q_REG_LV_MV_MODE_SEL_MASK;
+		shift = Q_REG_LV_MV_MODE_SEL_SHIFT;
+	} else {
+		mask = Q_REG_MODE_SEL_MASK;
+		shift = Q_REG_MODE_SEL_SHIFT;
+	}
+
+	/* gpio val is from RT status iff input is enabled */
+	if (q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL], shift, mask)
+					== QPNP_PIN_MODE_DIG_IN) {
+		rc = regmap_read(q_chip->regmap,
+				 Q_REG_ADDR(q_spec, Q_REG_STATUS1), &val);
+		if (rc)
+			return rc;
+		buf = val;
+
+		if (q_spec->type == Q_GPIO_TYPE && q_spec->dig_major_rev == 0)
+			en_mask = Q_REG_STATUS1_GPIO_EN_REV0_MASK;
+		else if (q_spec->type == Q_GPIO_TYPE &&
+			 q_spec->dig_major_rev > 0)
+			en_mask = Q_REG_STATUS1_GPIO_EN_MASK;
+		else /* MPP */
+			en_mask = Q_REG_STATUS1_MPP_EN_MASK;
+
+		if (!(buf & en_mask))
+			return -EPERM;
+
+		return buf & Q_REG_STATUS1_VAL_MASK;
+	}
+
+	if (is_gpio_lv_mv(q_spec)) {
+		shift = Q_REG_DIG_OUT_SRC_INVERT_SHIFT;
+		mask = Q_REG_DIG_OUT_SRC_INVERT_MASK;
+		reg = q_spec->regs[Q_REG_I_DIG_OUT_SRC_CTL];
+	} else {
+		shift = Q_REG_OUT_INVERT_SHIFT;
+		mask = Q_REG_OUT_INVERT_MASK;
+		reg = q_spec->regs[Q_REG_I_MODE_CTL];
+	}
+
+	return (reg & mask) >> shift;
+}
+
+static int __qpnp_pin_set(struct qpnp_pin_chip *q_chip,
+			   struct qpnp_pin_spec *q_spec, int value)
+{
+	int rc;
+	u8 shift, mask, *reg;
+	u16 address;
+
+	if (!q_chip || !q_spec)
+		return -EINVAL;
+
+	if (is_gpio_lv_mv(q_spec)) {
+		shift = Q_REG_DIG_OUT_SRC_INVERT_SHIFT;
+		mask = Q_REG_DIG_OUT_SRC_INVERT_MASK;
+		reg = &q_spec->regs[Q_REG_I_DIG_OUT_SRC_CTL];
+		address = Q_REG_ADDR(q_spec, Q_REG_DIG_OUT_SRC_CTL);
+	} else {
+		shift = Q_REG_OUT_INVERT_SHIFT;
+		mask = Q_REG_OUT_INVERT_MASK;
+		reg = &q_spec->regs[Q_REG_I_MODE_CTL];
+		address = Q_REG_ADDR(q_spec, Q_REG_MODE_CTL);
+	}
+
+	q_reg_clr_set(reg, shift, mask, !!value);
+
+	rc = regmap_write(q_chip->regmap, address, *reg);
+	if (rc)
+		dev_err(&q_chip->pdev->dev, "%s: spmi write failed\n",
+								__func__);
+	return rc;
+}
+
+
+static void qpnp_pin_set(struct gpio_chip *gpio_chip,
+		unsigned int offset, int value)
+{
+	struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
+	struct qpnp_pin_spec *q_spec;
+
+	if (WARN_ON(!q_chip))
+		return;
+
+	q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
+	if (WARN_ON(!q_spec))
+		return;
+
+	__qpnp_pin_set(q_chip, q_spec, value);
+}
+
+static int qpnp_pin_set_mode(struct qpnp_pin_chip *q_chip,
+				   struct qpnp_pin_spec *q_spec, int mode)
+{
+	int rc;
+	u8 shift, mask;
+
+	if (!q_chip || !q_spec)
+		return -EINVAL;
+
+	if (qpnp_pin_check_config(Q_PIN_CFG_MODE, q_spec, mode)) {
+		pr_err("invalid mode specification %d\n", mode);
+		return -EINVAL;
+	}
+
+	if (is_gpio_lv_mv(q_spec)) {
+		shift = Q_REG_LV_MV_MODE_SEL_SHIFT;
+		mask = Q_REG_LV_MV_MODE_SEL_MASK;
+	} else {
+		shift = Q_REG_MODE_SEL_SHIFT;
+		mask = Q_REG_MODE_SEL_MASK;
+	}
+
+	q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL], shift, mask, mode);
+
+	rc = regmap_write(q_chip->regmap, Q_REG_ADDR(q_spec, Q_REG_MODE_CTL),
+			  *&q_spec->regs[Q_REG_I_MODE_CTL]);
+	return rc;
+}
+
+static int qpnp_pin_direction_input(struct gpio_chip *gpio_chip,
+		unsigned int offset)
+{
+	struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
+	struct qpnp_pin_spec *q_spec;
+
+	if (WARN_ON(!q_chip))
+		return -ENODEV;
+
+	q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
+	if (WARN_ON(!q_spec))
+		return -ENODEV;
+
+	return qpnp_pin_set_mode(q_chip, q_spec, QPNP_PIN_MODE_DIG_IN);
+}
+
+static int qpnp_pin_direction_output(struct gpio_chip *gpio_chip,
+		unsigned int offset, int val)
+{
+	int rc;
+	struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
+	struct qpnp_pin_spec *q_spec;
+
+	if (WARN_ON(!q_chip))
+		return -ENODEV;
+
+	q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
+	if (WARN_ON(!q_spec))
+		return -ENODEV;
+
+	rc = __qpnp_pin_set(q_chip, q_spec, val);
+	if (rc)
+		return rc;
+
+	rc = qpnp_pin_set_mode(q_chip, q_spec, QPNP_PIN_MODE_DIG_OUT);
+
+	return rc;
+}
+
+static int qpnp_pin_of_gpio_xlate(struct gpio_chip *gpio_chip,
+				   const struct of_phandle_args *gpio_spec,
+				   u32 *flags)
+{
+	struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
+	struct qpnp_pin_spec *q_spec;
+
+	if (WARN_ON(gpio_chip->of_gpio_n_cells < 2)) {
+		pr_err("of_gpio_n_cells < 2\n");
+		return -EINVAL;
+	}
+
+	q_spec = qpnp_pmic_pin_get_spec(q_chip, gpio_spec->args[0]);
+	if (!q_spec) {
+		pr_err("no such PMIC gpio %u in device topology\n",
+							gpio_spec->args[0]);
+		return -EINVAL;
+	}
+
+	if (flags)
+		*flags = gpio_spec->args[1];
+
+	return q_spec->gpio_chip_idx;
+}
+
+static int qpnp_pin_apply_config(struct qpnp_pin_chip *q_chip,
+				  struct qpnp_pin_spec *q_spec)
+{
+	struct qpnp_pin_cfg param;
+	struct device_node *node = q_spec->node;
+	int rc;
+	u8 shift, mask, *reg;
+
+	if (is_gpio_lv_mv(q_spec)) {
+		shift = Q_REG_LV_MV_MODE_SEL_SHIFT;
+		mask = Q_REG_LV_MV_MODE_SEL_MASK;
+	} else {
+		shift = Q_REG_MODE_SEL_SHIFT;
+		mask = Q_REG_MODE_SEL_MASK;
+	}
+	param.mode	   = q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL],
+							shift, mask);
+
+	param.output_type  = q_reg_get(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
+				       Q_REG_OUT_TYPE_SHIFT,
+				       Q_REG_OUT_TYPE_MASK);
+
+	if (is_gpio_lv_mv(q_spec)) {
+		shift = Q_REG_DIG_OUT_SRC_INVERT_SHIFT;
+		mask = Q_REG_DIG_OUT_SRC_INVERT_MASK;
+		reg = &q_spec->regs[Q_REG_I_DIG_OUT_SRC_CTL];
+	} else {
+		shift = Q_REG_OUT_INVERT_SHIFT;
+		mask = Q_REG_OUT_INVERT_MASK;
+		reg = &q_spec->regs[Q_REG_I_MODE_CTL];
+	}
+	param.invert	   = q_reg_get(reg, shift, mask);
+
+	param.pull	   = q_reg_get(&q_spec->regs[Q_REG_I_DIG_PULL_CTL],
+				       Q_REG_PULL_SHIFT, Q_REG_PULL_MASK);
+	param.vin_sel	   = q_reg_get(&q_spec->regs[Q_REG_I_DIG_VIN_CTL],
+				       Q_REG_VIN_SHIFT, Q_REG_VIN_MASK);
+	param.out_strength = q_reg_get(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
+				       Q_REG_OUT_STRENGTH_SHIFT,
+				       Q_REG_OUT_STRENGTH_MASK);
+
+	if (is_gpio_lv_mv(q_spec)) {
+		shift = Q_REG_DIG_OUT_SRC_SRC_SEL_SHIFT;
+		mask = Q_REG_DIG_OUT_SRC_SRC_SEL_MASK;
+		reg = &q_spec->regs[Q_REG_I_DIG_OUT_SRC_CTL];
+	} else {
+		shift = Q_REG_SRC_SEL_SHIFT;
+		mask = Q_REG_SRC_SEL_MASK;
+		reg = &q_spec->regs[Q_REG_I_MODE_CTL];
+	}
+	param.src_sel   = q_reg_get(reg, shift, mask);
+
+	param.master_en    = q_reg_get(&q_spec->regs[Q_REG_I_EN_CTL],
+				       Q_REG_MASTER_EN_SHIFT,
+				       Q_REG_MASTER_EN_MASK);
+	param.aout_ref    = q_reg_get(&q_spec->regs[Q_REG_I_AOUT_CTL],
+				       Q_REG_AOUT_REF_SHIFT,
+				       Q_REG_AOUT_REF_MASK);
+	param.ain_route    = q_reg_get(&q_spec->regs[Q_REG_I_AIN_CTL],
+				       Q_REG_AIN_ROUTE_SHIFT,
+				       Q_REG_AIN_ROUTE_MASK);
+	param.cs_out    = q_reg_get(&q_spec->regs[Q_REG_I_SINK_CTL],
+				       Q_REG_CS_OUT_SHIFT,
+				       Q_REG_CS_OUT_MASK);
+	param.apass_sel    = q_reg_get(&q_spec->regs[Q_REG_I_APASS_SEL_CTL],
+				       Q_REG_APASS_SEL_SHIFT,
+				       Q_REG_APASS_SEL_MASK);
+	if (is_gpio_lv_mv(q_spec)) {
+		param.dtest_sel = q_reg_get(&q_spec->regs[Q_REG_I_DIG_IN_CTL],
+				Q_REG_LV_MV_DTEST_SEL_CFG_SHIFT,
+				Q_REG_LV_MV_DTEST_SEL_CFG_MASK);
+	} else {
+		 param.dtest_sel = q_reg_get(&q_spec->regs[Q_REG_I_DIG_IN_CTL],
+				Q_REG_DTEST_SEL_SHIFT,
+				Q_REG_DTEST_SEL_MASK);
+	}
+
+	of_property_read_u32(node, "qcom,mode",
+		&param.mode);
+	of_property_read_u32(node, "qcom,output-type",
+		&param.output_type);
+	of_property_read_u32(node, "qcom,invert",
+		&param.invert);
+	of_property_read_u32(node, "qcom,pull",
+		&param.pull);
+	of_property_read_u32(node, "qcom,vin-sel",
+		&param.vin_sel);
+	of_property_read_u32(node, "qcom,out-strength",
+		&param.out_strength);
+	of_property_read_u32(node, "qcom,src-sel",
+		&param.src_sel);
+	of_property_read_u32(node, "qcom,master-en",
+		&param.master_en);
+	of_property_read_u32(node, "qcom,aout-ref",
+		&param.aout_ref);
+	of_property_read_u32(node, "qcom,ain-route",
+		&param.ain_route);
+	of_property_read_u32(node, "qcom,cs-out",
+		&param.cs_out);
+	of_property_read_u32(node, "qcom,apass-sel",
+		&param.apass_sel);
+	of_property_read_u32(node, "qcom,dtest-sel",
+		&param.dtest_sel);
+
+	rc = _qpnp_pin_config(q_chip, q_spec, &param);
+
+	return rc;
+}
+
+static int qpnp_pin_free_chip(struct qpnp_pin_chip *q_chip)
+{
+	int i, rc = 0;
+
+	if (q_chip->chip_gpios)
+		for (i = 0; i < q_chip->gpio_chip.ngpio; i++)
+			kfree(q_chip->chip_gpios[i]);
+
+	mutex_lock(&qpnp_pin_chips_lock);
+	list_del(&q_chip->chip_list);
+	mutex_unlock(&qpnp_pin_chips_lock);
+	if (q_chip->chip_registered)
+		gpiochip_remove(&q_chip->gpio_chip);
+
+	kfree(q_chip->chip_gpios);
+	kfree(q_chip->pmic_pins);
+	kfree(q_chip);
+	return rc;
+}
+
+#ifdef CONFIG_GPIO_QPNP_PIN_DEBUG
+struct qpnp_pin_reg {
+	uint32_t addr;
+	uint32_t idx;
+	uint32_t shift;
+	uint32_t mask;
+};
+
+static struct dentry *driver_dfs_dir;
+
+static int qpnp_pin_reg_attr(enum qpnp_pin_param_type type,
+			     struct qpnp_pin_reg *cfg,
+			struct qpnp_pin_spec *q_spec)
+{
+	switch (type) {
+	case Q_PIN_CFG_MODE:
+		if (is_gpio_lv_mv(q_spec)) {
+			cfg->shift = Q_REG_LV_MV_MODE_SEL_SHIFT;
+			cfg->mask = Q_REG_LV_MV_MODE_SEL_MASK;
+		} else {
+			cfg->shift = Q_REG_MODE_SEL_SHIFT;
+			cfg->mask = Q_REG_MODE_SEL_MASK;
+		}
+		cfg->addr = Q_REG_MODE_CTL;
+		cfg->idx = Q_REG_I_MODE_CTL;
+		break;
+	case Q_PIN_CFG_OUTPUT_TYPE:
+		cfg->addr = Q_REG_DIG_OUT_CTL;
+		cfg->idx = Q_REG_I_DIG_OUT_CTL;
+		cfg->shift = Q_REG_OUT_TYPE_SHIFT;
+		cfg->mask = Q_REG_OUT_TYPE_MASK;
+		break;
+	case Q_PIN_CFG_INVERT:
+		if (is_gpio_lv_mv(q_spec)) {
+			cfg->addr = Q_REG_DIG_OUT_SRC_CTL;
+			cfg->idx = Q_REG_I_DIG_OUT_SRC_CTL;
+			cfg->shift = Q_REG_DIG_OUT_SRC_INVERT_SHIFT;
+			cfg->mask = Q_REG_DIG_OUT_SRC_INVERT_MASK;
+		} else {
+			cfg->addr = Q_REG_MODE_CTL;
+			cfg->idx = Q_REG_I_MODE_CTL;
+			cfg->shift = Q_REG_OUT_INVERT_SHIFT;
+			cfg->mask = Q_REG_OUT_INVERT_MASK;
+		}
+		break;
+	case Q_PIN_CFG_PULL:
+		cfg->addr = Q_REG_DIG_PULL_CTL;
+		cfg->idx = Q_REG_I_DIG_PULL_CTL;
+		cfg->shift = Q_REG_PULL_SHIFT;
+		cfg->mask = Q_REG_PULL_MASK;
+		break;
+	case Q_PIN_CFG_VIN_SEL:
+		cfg->addr = Q_REG_DIG_VIN_CTL;
+		cfg->idx = Q_REG_I_DIG_VIN_CTL;
+		cfg->shift = Q_REG_VIN_SHIFT;
+		cfg->mask = Q_REG_VIN_MASK;
+		break;
+	case Q_PIN_CFG_OUT_STRENGTH:
+		cfg->addr = Q_REG_DIG_OUT_CTL;
+		cfg->idx = Q_REG_I_DIG_OUT_CTL;
+		cfg->shift = Q_REG_OUT_STRENGTH_SHIFT;
+		cfg->mask = Q_REG_OUT_STRENGTH_MASK;
+		break;
+	case Q_PIN_CFG_SRC_SEL:
+		if (is_gpio_lv_mv(q_spec)) {
+			cfg->addr = Q_REG_DIG_OUT_SRC_CTL;
+			cfg->idx = Q_REG_I_DIG_OUT_SRC_CTL;
+			cfg->shift = Q_REG_DIG_OUT_SRC_SRC_SEL_SHIFT;
+			cfg->mask = Q_REG_DIG_OUT_SRC_SRC_SEL_MASK;
+		} else {
+			cfg->addr = Q_REG_MODE_CTL;
+			cfg->idx = Q_REG_I_MODE_CTL;
+			cfg->shift = Q_REG_SRC_SEL_SHIFT;
+			cfg->mask = Q_REG_SRC_SEL_MASK;
+		}
+		break;
+	case Q_PIN_CFG_MASTER_EN:
+		cfg->addr = Q_REG_EN_CTL;
+		cfg->idx = Q_REG_I_EN_CTL;
+		cfg->shift = Q_REG_MASTER_EN_SHIFT;
+		cfg->mask = Q_REG_MASTER_EN_MASK;
+		break;
+	case Q_PIN_CFG_AOUT_REF:
+		cfg->addr = Q_REG_AOUT_CTL;
+		cfg->idx = Q_REG_I_AOUT_CTL;
+		cfg->shift = Q_REG_AOUT_REF_SHIFT;
+		cfg->mask = Q_REG_AOUT_REF_MASK;
+		break;
+	case Q_PIN_CFG_AIN_ROUTE:
+		cfg->addr = Q_REG_AIN_CTL;
+		cfg->idx = Q_REG_I_AIN_CTL;
+		cfg->shift = Q_REG_AIN_ROUTE_SHIFT;
+		cfg->mask = Q_REG_AIN_ROUTE_MASK;
+		break;
+	case Q_PIN_CFG_CS_OUT:
+		cfg->addr = Q_REG_SINK_CTL;
+		cfg->idx = Q_REG_I_SINK_CTL;
+		cfg->shift = Q_REG_CS_OUT_SHIFT;
+		cfg->mask = Q_REG_CS_OUT_MASK;
+		break;
+	case Q_PIN_CFG_APASS_SEL:
+		cfg->addr = Q_REG_APASS_SEL_CTL;
+		cfg->idx = Q_REG_I_APASS_SEL_CTL;
+		cfg->shift = Q_REG_APASS_SEL_SHIFT;
+		cfg->mask = Q_REG_APASS_SEL_MASK;
+		break;
+	case Q_PIN_CFG_DTEST_SEL:
+		if (is_gpio_lv_mv(q_spec)) {
+			cfg->shift = Q_REG_LV_MV_DTEST_SEL_CFG_SHIFT;
+			cfg->mask = Q_REG_LV_MV_DTEST_SEL_CFG_MASK;
+		} else {
+			cfg->shift = Q_REG_DTEST_SEL_SHIFT;
+			cfg->mask = Q_REG_DTEST_SEL_MASK;
+		}
+		cfg->addr = Q_REG_DIG_IN_CTL;
+		cfg->idx = Q_REG_I_DIG_IN_CTL;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int qpnp_pin_debugfs_get(void *data, u64 *val)
+{
+	enum qpnp_pin_param_type *idx = data;
+	struct qpnp_pin_spec *q_spec;
+	struct qpnp_pin_reg cfg = {};
+	int rc;
+
+	q_spec = container_of(idx, struct qpnp_pin_spec, params[*idx]);
+
+	rc = qpnp_pin_reg_attr(*idx, &cfg, q_spec);
+	if (rc)
+		return rc;
+
+	*val = q_reg_get(&q_spec->regs[cfg.idx], cfg.shift, cfg.mask);
+	return 0;
+}
+
+static int qpnp_pin_debugfs_set(void *data, u64 val)
+{
+	enum qpnp_pin_param_type *idx = data;
+	struct qpnp_pin_spec *q_spec;
+	struct qpnp_pin_chip *q_chip;
+	struct qpnp_pin_reg cfg = {};
+	int rc;
+
+	q_spec = container_of(idx, struct qpnp_pin_spec, params[*idx]);
+	q_chip = q_spec->q_chip;
+
+	/*
+	 * special handling for GPIO_LV/MV 'dtest-sel'
+	 * if (dtest_sel == 0) then disable dtest-sel
+	 * else enable and set dtest.
+	 */
+	if ((q_spec->subtype == Q_GPIO_SUBTYPE_GPIO_LV ||
+		q_spec->subtype == Q_GPIO_SUBTYPE_GPIO_MV) &&
+				*idx == Q_PIN_CFG_DTEST_SEL) {
+		/* enable/disable DTEST */
+		cfg.shift = Q_REG_LV_MV_DTEST_SEL_EN_SHIFT;
+		cfg.mask = Q_REG_LV_MV_DTEST_SEL_EN_MASK;
+		cfg.addr = Q_REG_DIG_IN_CTL;
+		cfg.idx = Q_REG_I_DIG_IN_CTL;
+		q_reg_clr_set(&q_spec->regs[cfg.idx],
+				cfg.shift, cfg.mask, !!val);
+	}
+
+	rc = qpnp_pin_check_config(*idx, q_spec, val);
+	if (rc)
+		return rc;
+
+	rc = qpnp_pin_reg_attr(*idx, &cfg, q_spec);
+	if (rc)
+		return rc;
+
+	if (*idx == Q_PIN_CFG_DTEST_SEL && val)  {
+		if (is_gpio_lv_mv(q_spec))
+			val -= 1;
+		else
+			val = BIT(val - 1);
+	}
+
+	q_reg_clr_set(&q_spec->regs[cfg.idx], cfg.shift, cfg.mask, val);
+	rc = regmap_write(q_chip->regmap, Q_REG_ADDR(q_spec, cfg.addr),
+			  *&q_spec->regs[cfg.idx]);
+
+	return rc;
+}
+DEFINE_SIMPLE_ATTRIBUTE(qpnp_pin_fops, qpnp_pin_debugfs_get,
+			qpnp_pin_debugfs_set, "%llu\n");
+
+#define DEBUGFS_BUF_SIZE 11 /* supports 2^32 in decimal */
+
+struct qpnp_pin_debugfs_args {
+	enum qpnp_pin_param_type type;
+	const char *filename;
+};
+
+static struct qpnp_pin_debugfs_args dfs_args[Q_NUM_PARAMS] = {
+	{ Q_PIN_CFG_MODE, "mode" },
+	{ Q_PIN_CFG_OUTPUT_TYPE, "output_type" },
+	{ Q_PIN_CFG_INVERT, "invert" },
+	{ Q_PIN_CFG_PULL, "pull" },
+	{ Q_PIN_CFG_VIN_SEL, "vin_sel" },
+	{ Q_PIN_CFG_OUT_STRENGTH, "out_strength" },
+	{ Q_PIN_CFG_SRC_SEL, "src_sel" },
+	{ Q_PIN_CFG_MASTER_EN, "master_en" },
+	{ Q_PIN_CFG_AOUT_REF, "aout_ref" },
+	{ Q_PIN_CFG_AIN_ROUTE, "ain_route" },
+	{ Q_PIN_CFG_CS_OUT, "cs_out" },
+	{ Q_PIN_CFG_APASS_SEL, "apass_sel" },
+	{ Q_PIN_CFG_DTEST_SEL, "dtest-sel" },
+};
+
+static int qpnp_pin_debugfs_create(struct qpnp_pin_chip *q_chip)
+{
+	struct platform_device *pdev = q_chip->pdev;
+	struct device *dev = &pdev->dev;
+	struct qpnp_pin_spec *q_spec;
+	enum qpnp_pin_param_type *params;
+	enum qpnp_pin_param_type type;
+	char pmic_pin[DEBUGFS_BUF_SIZE];
+	const char *filename;
+	struct dentry *dfs, *dfs_io_dir;
+	int i, j, rc;
+
+	q_chip->dfs_dir = debugfs_create_dir(q_chip->gpio_chip.label,
+							driver_dfs_dir);
+	if (q_chip->dfs_dir == NULL) {
+		dev_err(dev, "%s: cannot register chip debugfs directory %s\n",
+						__func__, dev->of_node->name);
+		return -ENODEV;
+	}
+
+	for (i = 0; i < q_chip->gpio_chip.ngpio; i++) {
+		q_spec = qpnp_chip_gpio_get_spec(q_chip, i);
+		params = q_spec->params;
+		snprintf(pmic_pin, DEBUGFS_BUF_SIZE, "%u", q_spec->pmic_pin);
+		dfs_io_dir = debugfs_create_dir(pmic_pin, q_chip->dfs_dir);
+		if (dfs_io_dir == NULL)
+			goto dfs_err;
+
+		for (j = 0; j < Q_NUM_PARAMS; j++) {
+			type = dfs_args[j].type;
+			filename = dfs_args[j].filename;
+
+			/*
+			 * Use a value of '0' to see if the pin has even basic
+			 * support for a function. Do not create a file if
+			 * it doesn't.
+			 */
+			rc = qpnp_pin_check_config(type, q_spec, 0);
+			if (rc == -ENXIO)
+				continue;
+
+			params[type] = type;
+			dfs = debugfs_create_file(filename, 0644, dfs_io_dir,
+					&q_spec->params[type], &qpnp_pin_fops);
+			if (dfs == NULL)
+				goto dfs_err;
+		}
+	}
+	return 0;
+dfs_err:
+	dev_err(dev, "%s: cannot register debugfs for pmic gpio %u on chip %s\n",
+			__func__, q_spec->pmic_pin, dev->of_node->name);
+	debugfs_remove_recursive(q_chip->dfs_dir);
+	return -ENFILE;
+}
+#else
+static int qpnp_pin_debugfs_create(struct qpnp_pin_chip *q_chip)
+{
+	return 0;
+}
+#endif
+
+static int qpnp_pin_is_valid_pin(struct qpnp_pin_spec *q_spec)
+{
+	if (q_spec->type == Q_GPIO_TYPE)
+		switch (q_spec->subtype) {
+		case Q_GPIO_SUBTYPE_GPIO_4CH:
+		case Q_GPIO_SUBTYPE_GPIOC_4CH:
+		case Q_GPIO_SUBTYPE_GPIO_8CH:
+		case Q_GPIO_SUBTYPE_GPIOC_8CH:
+		case Q_GPIO_SUBTYPE_GPIO_LV:
+		case Q_GPIO_SUBTYPE_GPIO_MV:
+			return 1;
+		}
+	else if (q_spec->type == Q_MPP_TYPE)
+		switch (q_spec->subtype) {
+		case Q_MPP_SUBTYPE_4CH_NO_ANA_OUT:
+		case Q_MPP_SUBTYPE_ULT_4CH_NO_ANA_OUT:
+		case Q_MPP_SUBTYPE_4CH_NO_SINK:
+		case Q_MPP_SUBTYPE_ULT_4CH_NO_SINK:
+		case Q_MPP_SUBTYPE_4CH_FULL_FUNC:
+		case Q_MPP_SUBTYPE_8CH_FULL_FUNC:
+			return 1;
+		}
+
+	return 0;
+}
+
+static int qpnp_pin_probe(struct platform_device *pdev)
+{
+	struct qpnp_pin_chip *q_chip;
+	struct qpnp_pin_spec *q_spec;
+	unsigned int base;
+	struct device_node *child;
+	int i, rc;
+	u32 lowest_gpio = UINT_MAX, highest_gpio = 0;
+	u32 gpio;
+	char version[Q_REG_SUBTYPE - Q_REG_DIG_MAJOR_REV + 1];
+	const char *pin_dev_name;
+
+	pin_dev_name = dev_name(&pdev->dev);
+	if (!pin_dev_name) {
+		dev_err(&pdev->dev,
+			"%s: label binding undefined for node %s\n",
+					__func__,
+			pdev->dev.of_node->full_name);
+		return -EINVAL;
+	}
+
+	q_chip = kzalloc(sizeof(*q_chip), GFP_KERNEL);
+	if (!q_chip)
+		return -ENOMEM;
+
+	q_chip->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!q_chip->regmap) {
+		dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+		return -EINVAL;
+	}
+	q_chip->pdev = pdev;
+	dev_set_drvdata(&pdev->dev, q_chip);
+
+	mutex_lock(&qpnp_pin_chips_lock);
+	list_add(&q_chip->chip_list, &qpnp_pin_chips);
+	mutex_unlock(&qpnp_pin_chips_lock);
+
+	/* first scan through nodes to find the range required for allocation */
+	i = 0;
+	for_each_available_child_of_node(pdev->dev.of_node, child) {
+		rc = of_property_read_u32(child, "qcom,pin-num", &gpio);
+		if (rc) {
+			dev_err(&pdev->dev,
+				"%s: unable to get qcom,pin-num property\n",
+								__func__);
+			goto err_probe;
+		}
+
+		if (gpio < lowest_gpio)
+			lowest_gpio = gpio;
+		if (gpio > highest_gpio)
+			highest_gpio = gpio;
+		i++;
+	}
+	q_chip->gpio_chip.ngpio = i;
+
+	if (highest_gpio < lowest_gpio) {
+		dev_err(&pdev->dev,
+			"%s: no device nodes specified in topology\n",
+								__func__);
+		rc = -EINVAL;
+		goto err_probe;
+	} else if (lowest_gpio == 0) {
+		dev_err(&pdev->dev, "%s: 0 is not a valid PMIC GPIO\n",
+								__func__);
+		rc = -EINVAL;
+		goto err_probe;
+	}
+
+	q_chip->pmic_pin_lowest = lowest_gpio;
+	q_chip->pmic_pin_highest = highest_gpio;
+
+	/* allocate gpio lookup tables */
+	q_chip->pmic_pins = kzalloc(sizeof(struct qpnp_pin_spec *) *
+					(highest_gpio - lowest_gpio + 1),
+					GFP_KERNEL);
+	q_chip->chip_gpios = kzalloc(sizeof(struct qpnp_pin_spec *) *
+					q_chip->gpio_chip.ngpio,
+					GFP_KERNEL);
+	if (!q_chip->pmic_pins || !q_chip->chip_gpios) {
+		dev_err(&pdev->dev, "%s: unable to allocate memory\n",
+								__func__);
+		rc = -ENOMEM;
+		goto err_probe;
+	}
+
+	/* get interrupt controller device_node */
+	q_chip->int_ctrl = of_irq_find_parent(pdev->dev.of_node);
+	if (!q_chip->int_ctrl) {
+		dev_err(&pdev->dev, "%s: Can't find interrupt parent\n",
+								__func__);
+		rc = -EINVAL;
+		goto err_probe;
+	}
+	i = 0;
+	/* now scan through again and populate the lookup table */
+	for_each_available_child_of_node(pdev->dev.of_node, child) {
+		rc = of_property_read_u32(child, "reg", &base);
+		if (rc < 0) {
+			dev_err(&pdev->dev,
+				"Couldn't find reg in node = %s rc = %d\n",
+				child->full_name, rc);
+			goto err_probe;
+		}
+
+		rc = of_property_read_u32(child, "qcom,pin-num", &gpio);
+		if (rc) {
+			dev_err(&pdev->dev,
+				"%s: unable to get qcom,pin-num property\n",
+								__func__);
+			goto err_probe;
+		}
+
+		q_spec = kzalloc(sizeof(struct qpnp_pin_spec), GFP_KERNEL);
+		if (!q_spec) {
+			rc = -ENOMEM;
+			goto err_probe;
+		}
+
+		q_spec->slave = to_spmi_device(pdev->dev.parent)->usid;
+		q_spec->offset = base;
+		q_spec->gpio_chip_idx = i;
+		q_spec->pmic_pin = gpio;
+		q_spec->node = child;
+		q_spec->q_chip = q_chip;
+
+		rc = regmap_bulk_read(q_chip->regmap,
+				Q_REG_ADDR(q_spec, Q_REG_DIG_MAJOR_REV),
+				&version[0],
+				ARRAY_SIZE(version));
+		if (rc) {
+			dev_err(&pdev->dev, "%s: unable to read type regs\n",
+						__func__);
+			goto err_probe;
+		}
+		q_spec->dig_major_rev = version[Q_REG_DIG_MAJOR_REV -
+						Q_REG_DIG_MAJOR_REV];
+		q_spec->type	= version[Q_REG_TYPE - Q_REG_DIG_MAJOR_REV];
+		q_spec->subtype = version[Q_REG_SUBTYPE - Q_REG_DIG_MAJOR_REV];
+
+		if (!qpnp_pin_is_valid_pin(q_spec)) {
+			dev_err(&pdev->dev,
+				"%s: invalid pin type (type=0x%x subtype=0x%x)\n",
+				       __func__, q_spec->type, q_spec->subtype);
+			goto err_probe;
+		}
+
+		rc = qpnp_pin_ctl_regs_init(q_spec);
+		if (rc)
+			goto err_probe;
+
+		/* initialize lookup table params */
+		qpnp_pmic_pin_set_spec(q_chip, gpio, q_spec);
+		qpnp_chip_gpio_set_spec(q_chip, i, q_spec);
+		i++;
+	}
+
+	q_chip->gpio_chip.base = -1;
+	q_chip->gpio_chip.label = pin_dev_name;
+	q_chip->gpio_chip.direction_input = qpnp_pin_direction_input;
+	q_chip->gpio_chip.direction_output = qpnp_pin_direction_output;
+	q_chip->gpio_chip.to_irq = qpnp_pin_to_irq;
+	q_chip->gpio_chip.get = qpnp_pin_get;
+	q_chip->gpio_chip.set = qpnp_pin_set;
+	q_chip->gpio_chip.dev = &pdev->dev;
+	q_chip->gpio_chip.of_xlate = qpnp_pin_of_gpio_xlate;
+	q_chip->gpio_chip.of_gpio_n_cells = 2;
+	q_chip->gpio_chip.can_sleep = 0;
+
+	rc = gpiochip_add(&q_chip->gpio_chip);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: Can't add gpio chip, rc = %d\n",
+								__func__, rc);
+		goto err_probe;
+	}
+
+	q_chip->chip_registered = true;
+	/* now configure gpio config defaults if they exist */
+	for (i = 0; i < q_chip->gpio_chip.ngpio; i++) {
+		q_spec = qpnp_chip_gpio_get_spec(q_chip, i);
+		if (WARN_ON(!q_spec)) {
+			rc = -ENODEV;
+			goto err_probe;
+		}
+
+		rc = qpnp_pin_cache_regs(q_chip, q_spec);
+		if (rc)
+			goto err_probe;
+
+		rc = qpnp_pin_apply_config(q_chip, q_spec);
+		if (rc)
+			goto err_probe;
+	}
+
+	dev_dbg(&pdev->dev, "%s: gpio_chip registered between %d-%u\n",
+			__func__, q_chip->gpio_chip.base,
+			(q_chip->gpio_chip.base + q_chip->gpio_chip.ngpio) - 1);
+
+	rc = qpnp_pin_debugfs_create(q_chip);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: debugfs creation failed\n",
+			__func__);
+		goto err_probe;
+	}
+
+	return 0;
+
+err_probe:
+	qpnp_pin_free_chip(q_chip);
+	return rc;
+}
+
+static int qpnp_pin_remove(struct platform_device *pdev)
+{
+	struct qpnp_pin_chip *q_chip = dev_get_drvdata(&pdev->dev);
+
+	debugfs_remove_recursive(q_chip->dfs_dir);
+
+	return qpnp_pin_free_chip(q_chip);
+}
+
+static const struct of_device_id spmi_match_table[] = {
+	{	.compatible = "qcom,qpnp-pin",
+	},
+	{}
+};
+
+static const struct platform_device_id qpnp_pin_id[] = {
+	{ "qcom,qpnp-pin", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(spmi, qpnp_pin_id);
+
+static struct platform_driver qpnp_pin_driver = {
+	.driver		= {
+		.name		= "qcom,qpnp-pin",
+		.of_match_table = spmi_match_table,
+	},
+	.probe		= qpnp_pin_probe,
+	.remove		= qpnp_pin_remove,
+	.id_table	= qpnp_pin_id,
+};
+
+static int __init qpnp_pin_init(void)
+{
+#ifdef CONFIG_GPIO_QPNP_PIN_DEBUG
+	driver_dfs_dir = debugfs_create_dir("qpnp_pin", NULL);
+	if (driver_dfs_dir == NULL)
+		pr_err("Cannot register top level debugfs directory\n");
+#endif
+
+	return platform_driver_register(&qpnp_pin_driver);
+}
+
+static void __exit qpnp_pin_exit(void)
+{
+#ifdef CONFIG_GPIO_QPNP_PIN_DEBUG
+	debugfs_remove_recursive(driver_dfs_dir);
+#endif
+	platform_driver_unregister(&qpnp_pin_driver);
+}
+
+MODULE_DESCRIPTION("QPNP PMIC gpio driver");
+MODULE_LICENSE("GPL v2");
+
+subsys_initcall(qpnp_pin_init);
+module_exit(qpnp_pin_exit);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/adreno/a5xx_counters.c	2019-01-22 16:16:23.483246225 +0100
@@ -0,0 +1,825 @@
+/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "a5xx_gpu.h"
+
+/*
+ * Fixed counters are not selectable, they always count the same thing.
+ * The countable is an index into the group: countable 0 = register 0,
+ * etc and they have no select register
+ */
+static int a5xx_counter_get_fixed(struct msm_gpu *gpu,
+		struct adreno_counter_group *group,
+		u32 countable, u32 *lo, u32 *hi)
+{
+	if (countable >= group->nr_counters)
+		return -EINVAL;
+
+	if (lo)
+		*lo = group->counters[countable].lo;
+	if (hi)
+		*hi = group->counters[countable].hi;
+
+	return countable;
+}
+
+/*
+ * Most counters are selectable in that they can be programmed to count
+ * different events; in most cases there are many more countables than
+ * counters. When a new counter is requested, first walk the list to see if any
+ * other counters in that group are counting the same countable and if so reuse
+ * that counter. If not find the first empty counter in the list and register
+ * that for the desired countable. If we are out of counters too bad so sad.
+ */
+static int a5xx_counter_get(struct msm_gpu *gpu,
+		struct adreno_counter_group *group,
+		u32 countable, u32 *lo, u32 *hi)
+{
+	struct adreno_counter *counter;
+	int i, empty = -1;
+
+	spin_lock(&group->lock);
+
+	for (i = 0; i < group->nr_counters; i++) {
+		counter = &group->counters[i];
+
+		if (counter->refcount) {
+			if (counter->countable == countable) {
+				counter->refcount++;
+
+				if (lo)
+					*lo = counter->lo;
+				if (hi)
+					*hi = counter->hi;
+
+				spin_unlock(&group->lock);
+				return i;
+			}
+		} else
+			empty = (empty == -1) ? i : empty;
+	}
+
+	if (empty == -1) {
+		spin_unlock(&group->lock);
+		return -EBUSY;
+	}
+
+	counter = &group->counters[empty];
+
+	counter->refcount = 1;
+	counter->countable = countable;
+
+	if (lo)
+		*lo = counter->lo;
+	if (hi)
+		*hi = counter->hi;
+
+	spin_unlock(&group->lock);
+
+	if (pm_runtime_active(&gpu->pdev->dev) && group->funcs.enable)
+		group->funcs.enable(gpu, group, empty, false);
+
+	return empty;
+}
+
+/* The majority of the non-fixed counter selects can be programmed by the CPU */
+static void a5xx_counter_enable_cpu(struct msm_gpu *gpu,
+		struct adreno_counter_group *group, int counterid, bool restore)
+{
+	struct adreno_counter *counter = &group->counters[counterid];
+
+	gpu_write(gpu, counter->sel, counter->countable);
+}
+
+static void a5xx_counter_enable_pm4(struct msm_gpu *gpu,
+		struct adreno_counter_group *group, int counterid, bool restore)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+	struct msm_ringbuffer *ring = gpu->rb[0];
+	struct adreno_counter *counter = &group->counters[counterid];
+
+	/*
+	 * If we are restoring the counters after a power cycle we can safely
+	 * use AHB to enable the counters because we know SP/TP power collapse
+	 * isn't active
+	 */
+	if (restore) {
+		a5xx_counter_enable_cpu(gpu, group, counterid, true);
+		return;
+	}
+
+	mutex_lock(&gpu->dev->struct_mutex);
+
+	/*
+	 * If HW init hasn't run yet we can use the CPU to program the counter
+	 * (and indeed we must because we can't submit commands to the
+	 * GPU if it isn't initalized)
+	 */
+	if (gpu->needs_hw_init) {
+		a5xx_counter_enable_cpu(gpu, group, counterid, true);
+		mutex_unlock(&gpu->dev->struct_mutex);
+		return;
+	}
+
+	/* Turn off preemption for the duration of this command */
+	OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+	OUT_RING(ring, 0x02);
+
+	/* Turn off protected mode to write to special registers */
+	OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+	OUT_RING(ring, 0);
+
+	/* Set the save preemption record for the ring/command */
+	OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
+	OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[ring->id]));
+	OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[ring->id]));
+
+	/* Turn back on protected mode */
+	OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+	OUT_RING(ring, 1);
+
+	/* Idle the GPU */
+	OUT_PKT7(ring, CP_WAIT_FOR_IDLE, 0);
+
+	/* Enable the counter */
+	OUT_PKT4(ring, counter->sel, 1);
+	OUT_RING(ring, counter->countable);
+
+	/* Re-enable preemption */
+	OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+	OUT_RING(ring, 0x00);
+
+	OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
+	OUT_RING(ring, 0x01);
+
+	OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+	OUT_RING(ring, 0x01);
+
+	/* Yield */
+	OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
+	OUT_RING(ring, 0x00);
+	OUT_RING(ring, 0x00);
+	OUT_RING(ring, 0x01);
+	OUT_RING(ring, 0x01);
+
+	gpu->funcs->flush(gpu, ring);
+
+	/* Preempt into our ring if we need to */
+	a5xx_preempt_trigger(gpu);
+
+	/* wait for the operation to complete */
+	a5xx_idle(gpu, ring);
+
+	mutex_unlock(&gpu->dev->struct_mutex);
+}
+
+/*
+ * GPMU counters are selectable but the selects are muxed together in two
+ * registers
+ */
+static void a5xx_counter_enable_gpmu(struct msm_gpu *gpu,
+		struct adreno_counter_group *group, int counterid, bool restore)
+{
+	struct adreno_counter *counter = &group->counters[counterid];
+	u32 reg;
+	int shift;
+
+	/*
+	 * The selects for the GPMU counters are grouped together in two
+	 * registers, a nibble for each counter. Counters 0-3 are located in
+	 * GPMU_POWER_COUNTER_SELECT0 and 4-5 are in GPMU_POWER_COUNTER_SELECT1
+	 */
+	if (counterid <= 3) {
+		shift = counterid << 3;
+		reg = REG_A5XX_GPMU_POWER_COUNTER_SELECT_0;
+	} else {
+		shift = (counterid - 4) << 3;
+		reg = REG_A5XX_GPMU_POWER_COUNTER_SELECT_1;
+	}
+
+	gpu_rmw(gpu, reg, 0xFF << shift, (counter->countable & 0xff) << shift);
+}
+
+/* VBIF counters are selectable but have their own programming process */
+static void a5xx_counter_enable_vbif(struct msm_gpu *gpu,
+		struct adreno_counter_group *group, int counterid, bool restore)
+{
+	struct adreno_counter *counter = &group->counters[counterid];
+
+	gpu_write(gpu, REG_A5XX_VBIF_PERF_CNT_CLR(counterid), 1);
+	gpu_write(gpu, REG_A5XX_VBIF_PERF_CNT_CLR(counterid), 0);
+	gpu_write(gpu, REG_A5XX_VBIF_PERF_CNT_SEL(counterid),
+		counter->countable);
+	gpu_write(gpu, REG_A5XX_VBIF_PERF_CNT_EN(counterid), 1);
+}
+
+/*
+ * VBIF power counters are not slectable but need to be cleared/enabled before
+ * use
+ */
+static void a5xx_counter_enable_vbif_power(struct msm_gpu *gpu,
+		struct adreno_counter_group *group, int counterid, bool restore)
+{
+	gpu_write(gpu, REG_A5XX_VBIF_PERF_PWR_CNT_CLR(counterid), 1);
+	gpu_write(gpu, REG_A5XX_VBIF_PERF_PWR_CNT_CLR(counterid), 0);
+	gpu_write(gpu, REG_A5XX_VBIF_PERF_PWR_CNT_EN(counterid), 1);
+}
+
+/* GPMU always on counter needs to be enabled before use */
+static void a5xx_counter_enable_alwayson_power(struct msm_gpu *gpu,
+		struct adreno_counter_group *group, int counterid, bool restore)
+{
+	gpu_write(gpu, REG_A5XX_GPMU_ALWAYS_ON_COUNTER_RESET, 1);
+}
+
+static u64 a5xx_counter_read(struct msm_gpu *gpu,
+		struct adreno_counter_group *group, int counterid)
+{
+	if (counterid < 0 || counterid >= group->nr_counters)
+		return 0;
+
+	/* If the power is off, return the shadow value */
+	if (!pm_runtime_active(&gpu->pdev->dev))
+		return group->counters[counterid].value;
+
+	return gpu_read64(gpu, group->counters[counterid].lo,
+		group->counters[counterid].hi);
+}
+
+/*
+ * Selectable counters that are no longer used reset the countable to 0 to mark
+ * the counter as free
+ */
+static void a5xx_counter_put(struct msm_gpu *gpu,
+		struct adreno_counter_group *group, int counterid)
+{
+	struct adreno_counter *counter;
+
+	if (counterid >= group->nr_counters)
+		return;
+
+	counter = &group->counters[counterid];
+
+	spin_lock(&group->lock);
+	if (counter->refcount > 0)
+		counter->refcount--;
+	spin_unlock(&group->lock);
+}
+
+static void a5xx_counter_group_enable(struct msm_gpu *gpu,
+		struct adreno_counter_group *group, bool restore)
+{
+	int i;
+
+	if (!group || !group->funcs.enable)
+		return;
+
+	spin_lock(&group->lock);
+
+	for (i = 0; i < group->nr_counters; i++) {
+		if (!group->counters[i].refcount)
+			continue;
+
+		group->funcs.enable(gpu, group, i, restore);
+	}
+	spin_unlock(&group->lock);
+}
+
+static void a5xx_counter_restore(struct msm_gpu *gpu,
+		struct adreno_counter_group *group)
+{
+	int i;
+
+	spin_lock(&group->lock);
+	for (i = 0; i < group->nr_counters; i++) {
+		struct adreno_counter *counter = &group->counters[i];
+		uint32_t bit, offset = counter->load_bit;
+
+		/* Don't load if the counter isn't active or can't be loaded */
+		if (!counter->refcount)
+			continue;
+
+		/*
+		 * Each counter has a specific bit in one of four load command
+		 * registers. Figure out which register / relative bit to use
+		 * for the counter
+		 */
+		bit = do_div(offset, 32);
+
+		/* Write the counter value */
+		gpu_write64(gpu, REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_LO,
+			REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_HI,
+			counter->value);
+
+		/*
+		 * Write the load bit to load the counter - the command register
+		 * will get reset to 0 after the operation completes
+		 */
+		gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_LOAD_CMD0 + offset,
+			 (1 << bit));
+	}
+	spin_unlock(&group->lock);
+}
+
+static void a5xx_counter_save(struct msm_gpu *gpu,
+		struct adreno_counter_group *group)
+{
+	int i;
+
+	spin_lock(&group->lock);
+	for (i = 0; i < group->nr_counters; i++) {
+		struct adreno_counter *counter = &group->counters[i];
+
+		if (counter->refcount > 0)
+			counter->value = gpu_read64(gpu, counter->lo,
+				counter->hi);
+	}
+	spin_unlock(&group->lock);
+}
+
+static struct adreno_counter a5xx_counters_alwayson[1] = {
+	{ REG_A5XX_RBBM_ALWAYSON_COUNTER_LO,
+		REG_A5XX_RBBM_ALWAYSON_COUNTER_HI },
+};
+
+static struct adreno_counter a5xx_counters_ccu[] = {
+	{ REG_A5XX_RBBM_PERFCTR_CCU_0_LO, REG_A5XX_RBBM_PERFCTR_CCU_0_HI,
+		REG_A5XX_RB_PERFCTR_CCU_SEL_0, 40 },
+	{ REG_A5XX_RBBM_PERFCTR_CCU_1_LO, REG_A5XX_RBBM_PERFCTR_CCU_1_HI,
+		REG_A5XX_RB_PERFCTR_CCU_SEL_1, 41 },
+	{ REG_A5XX_RBBM_PERFCTR_CCU_2_LO, REG_A5XX_RBBM_PERFCTR_CCU_2_HI,
+		REG_A5XX_RB_PERFCTR_CCU_SEL_2, 42 },
+	{ REG_A5XX_RBBM_PERFCTR_CCU_3_LO, REG_A5XX_RBBM_PERFCTR_CCU_3_HI,
+		REG_A5XX_RB_PERFCTR_CCU_SEL_3, 43 },
+};
+
+static struct adreno_counter a5xx_counters_cmp[] = {
+	{ REG_A5XX_RBBM_PERFCTR_CMP_0_LO, REG_A5XX_RBBM_PERFCTR_CMP_0_HI,
+		REG_A5XX_RB_PERFCTR_CMP_SEL_0, 94 },
+	{ REG_A5XX_RBBM_PERFCTR_CMP_1_LO, REG_A5XX_RBBM_PERFCTR_CMP_1_HI,
+		REG_A5XX_RB_PERFCTR_CMP_SEL_1, 95 },
+	{ REG_A5XX_RBBM_PERFCTR_CMP_2_LO, REG_A5XX_RBBM_PERFCTR_CMP_2_HI,
+		REG_A5XX_RB_PERFCTR_CMP_SEL_2, 96 },
+	{ REG_A5XX_RBBM_PERFCTR_CMP_3_LO, REG_A5XX_RBBM_PERFCTR_CMP_3_HI,
+		REG_A5XX_RB_PERFCTR_CMP_SEL_3, 97 },
+};
+
+static struct adreno_counter a5xx_counters_cp[] = {
+	{ REG_A5XX_RBBM_PERFCTR_CP_0_LO, REG_A5XX_RBBM_PERFCTR_CP_0_HI,
+		REG_A5XX_CP_PERFCTR_CP_SEL_0, 0 },
+	{ REG_A5XX_RBBM_PERFCTR_CP_1_LO, REG_A5XX_RBBM_PERFCTR_CP_1_HI,
+		REG_A5XX_CP_PERFCTR_CP_SEL_1, 1},
+	{ REG_A5XX_RBBM_PERFCTR_CP_2_LO, REG_A5XX_RBBM_PERFCTR_CP_2_HI,
+		REG_A5XX_CP_PERFCTR_CP_SEL_2, 2 },
+	{ REG_A5XX_RBBM_PERFCTR_CP_3_LO, REG_A5XX_RBBM_PERFCTR_CP_3_HI,
+		REG_A5XX_CP_PERFCTR_CP_SEL_3, 3 },
+	{ REG_A5XX_RBBM_PERFCTR_CP_4_LO, REG_A5XX_RBBM_PERFCTR_CP_4_HI,
+		REG_A5XX_CP_PERFCTR_CP_SEL_4, 4 },
+	{ REG_A5XX_RBBM_PERFCTR_CP_5_LO, REG_A5XX_RBBM_PERFCTR_CP_5_HI,
+		REG_A5XX_CP_PERFCTR_CP_SEL_5, 5 },
+	{ REG_A5XX_RBBM_PERFCTR_CP_6_LO, REG_A5XX_RBBM_PERFCTR_CP_6_HI,
+		REG_A5XX_CP_PERFCTR_CP_SEL_6, 6 },
+	{ REG_A5XX_RBBM_PERFCTR_CP_7_LO, REG_A5XX_RBBM_PERFCTR_CP_7_HI,
+		REG_A5XX_CP_PERFCTR_CP_SEL_7, 7 },
+};
+
+static struct adreno_counter a5xx_counters_hlsq[] = {
+	{ REG_A5XX_RBBM_PERFCTR_HLSQ_0_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_0_HI,
+		REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_0, 28 },
+	{ REG_A5XX_RBBM_PERFCTR_HLSQ_1_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_1_HI,
+		REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_1, 29 },
+	{ REG_A5XX_RBBM_PERFCTR_HLSQ_2_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_2_HI,
+		REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_2, 30 },
+	{ REG_A5XX_RBBM_PERFCTR_HLSQ_3_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_3_HI,
+		REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_3, 31 },
+	{ REG_A5XX_RBBM_PERFCTR_HLSQ_4_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_4_HI,
+		REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_4, 32 },
+	{ REG_A5XX_RBBM_PERFCTR_HLSQ_5_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_5_HI,
+		REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_5, 33 },
+	{ REG_A5XX_RBBM_PERFCTR_HLSQ_6_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_6_HI,
+		REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_6, 34 },
+	{ REG_A5XX_RBBM_PERFCTR_HLSQ_7_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_7_HI,
+		REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_7, 35 },
+};
+
+static struct adreno_counter a5xx_counters_lrz[] = {
+	{ REG_A5XX_RBBM_PERFCTR_LRZ_0_LO, REG_A5XX_RBBM_PERFCTR_LRZ_0_HI,
+		REG_A5XX_GRAS_PERFCTR_LRZ_SEL_0, 90 },
+	{ REG_A5XX_RBBM_PERFCTR_LRZ_1_LO, REG_A5XX_RBBM_PERFCTR_LRZ_1_HI,
+		REG_A5XX_GRAS_PERFCTR_LRZ_SEL_1, 91 },
+	{ REG_A5XX_RBBM_PERFCTR_LRZ_2_LO, REG_A5XX_RBBM_PERFCTR_LRZ_2_HI,
+		REG_A5XX_GRAS_PERFCTR_LRZ_SEL_2, 92 },
+	{ REG_A5XX_RBBM_PERFCTR_LRZ_3_LO, REG_A5XX_RBBM_PERFCTR_LRZ_3_HI,
+		REG_A5XX_GRAS_PERFCTR_LRZ_SEL_3, 93 },
+};
+
+static struct adreno_counter a5xx_counters_pc[] = {
+	{ REG_A5XX_RBBM_PERFCTR_PC_0_LO, REG_A5XX_RBBM_PERFCTR_PC_0_HI,
+		REG_A5XX_PC_PERFCTR_PC_SEL_0, 12 },
+	{ REG_A5XX_RBBM_PERFCTR_PC_1_LO, REG_A5XX_RBBM_PERFCTR_PC_1_HI,
+		REG_A5XX_PC_PERFCTR_PC_SEL_1, 13 },
+	{ REG_A5XX_RBBM_PERFCTR_PC_2_LO, REG_A5XX_RBBM_PERFCTR_PC_2_HI,
+		REG_A5XX_PC_PERFCTR_PC_SEL_2, 14 },
+	{ REG_A5XX_RBBM_PERFCTR_PC_3_LO, REG_A5XX_RBBM_PERFCTR_PC_3_HI,
+		REG_A5XX_PC_PERFCTR_PC_SEL_3, 15 },
+	{ REG_A5XX_RBBM_PERFCTR_PC_4_LO, REG_A5XX_RBBM_PERFCTR_PC_4_HI,
+		REG_A5XX_PC_PERFCTR_PC_SEL_4, 16 },
+	{ REG_A5XX_RBBM_PERFCTR_PC_5_LO, REG_A5XX_RBBM_PERFCTR_PC_5_HI,
+		REG_A5XX_PC_PERFCTR_PC_SEL_5, 17 },
+	{ REG_A5XX_RBBM_PERFCTR_PC_6_LO, REG_A5XX_RBBM_PERFCTR_PC_6_HI,
+		REG_A5XX_PC_PERFCTR_PC_SEL_6, 18 },
+	{ REG_A5XX_RBBM_PERFCTR_PC_7_LO, REG_A5XX_RBBM_PERFCTR_PC_7_HI,
+		REG_A5XX_PC_PERFCTR_PC_SEL_7, 19 },
+};
+
+static struct adreno_counter a5xx_counters_ras[] = {
+	{ REG_A5XX_RBBM_PERFCTR_RAS_0_LO, REG_A5XX_RBBM_PERFCTR_RAS_0_HI,
+		REG_A5XX_GRAS_PERFCTR_RAS_SEL_0, 48 },
+	{ REG_A5XX_RBBM_PERFCTR_RAS_1_LO, REG_A5XX_RBBM_PERFCTR_RAS_1_HI,
+		REG_A5XX_GRAS_PERFCTR_RAS_SEL_1, 49 },
+	{ REG_A5XX_RBBM_PERFCTR_RAS_2_LO, REG_A5XX_RBBM_PERFCTR_RAS_2_HI,
+		REG_A5XX_GRAS_PERFCTR_RAS_SEL_2, 50 },
+	{ REG_A5XX_RBBM_PERFCTR_RAS_3_LO, REG_A5XX_RBBM_PERFCTR_RAS_3_HI,
+		REG_A5XX_GRAS_PERFCTR_RAS_SEL_3, 51 },
+};
+
+static struct adreno_counter a5xx_counters_rb[] = {
+	{ REG_A5XX_RBBM_PERFCTR_RB_0_LO, REG_A5XX_RBBM_PERFCTR_RB_0_HI,
+		REG_A5XX_RB_PERFCTR_RB_SEL_0, 80 },
+	{ REG_A5XX_RBBM_PERFCTR_RB_1_LO, REG_A5XX_RBBM_PERFCTR_RB_1_HI,
+		REG_A5XX_RB_PERFCTR_RB_SEL_1, 81 },
+	{ REG_A5XX_RBBM_PERFCTR_RB_2_LO, REG_A5XX_RBBM_PERFCTR_RB_2_HI,
+		REG_A5XX_RB_PERFCTR_RB_SEL_2, 82 },
+	{ REG_A5XX_RBBM_PERFCTR_RB_3_LO, REG_A5XX_RBBM_PERFCTR_RB_3_HI,
+		REG_A5XX_RB_PERFCTR_RB_SEL_3, 83 },
+	{ REG_A5XX_RBBM_PERFCTR_RB_4_LO, REG_A5XX_RBBM_PERFCTR_RB_4_HI,
+		REG_A5XX_RB_PERFCTR_RB_SEL_4, 84 },
+	{ REG_A5XX_RBBM_PERFCTR_RB_5_LO, REG_A5XX_RBBM_PERFCTR_RB_5_HI,
+		REG_A5XX_RB_PERFCTR_RB_SEL_5, 85 },
+	{ REG_A5XX_RBBM_PERFCTR_RB_6_LO, REG_A5XX_RBBM_PERFCTR_RB_6_HI,
+		REG_A5XX_RB_PERFCTR_RB_SEL_6, 86 },
+	{ REG_A5XX_RBBM_PERFCTR_RB_7_LO, REG_A5XX_RBBM_PERFCTR_RB_7_HI,
+		REG_A5XX_RB_PERFCTR_RB_SEL_7, 87 },
+};
+
+static struct adreno_counter a5xx_counters_rbbm[] = {
+	{ REG_A5XX_RBBM_PERFCTR_RBBM_0_LO, REG_A5XX_RBBM_PERFCTR_RBBM_0_HI,
+		REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0, 8 },
+	{ REG_A5XX_RBBM_PERFCTR_RBBM_1_LO, REG_A5XX_RBBM_PERFCTR_RBBM_1_HI,
+		REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1, 9 },
+	{ REG_A5XX_RBBM_PERFCTR_RBBM_2_LO, REG_A5XX_RBBM_PERFCTR_RBBM_2_HI,
+		REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2, 10 },
+	{ REG_A5XX_RBBM_PERFCTR_RBBM_3_LO, REG_A5XX_RBBM_PERFCTR_RBBM_3_HI,
+		REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3, 11 },
+};
+
+static struct adreno_counter a5xx_counters_sp[] = {
+	{ REG_A5XX_RBBM_PERFCTR_SP_0_LO, REG_A5XX_RBBM_PERFCTR_SP_0_HI,
+		REG_A5XX_SP_PERFCTR_SP_SEL_0, 68 },
+	{ REG_A5XX_RBBM_PERFCTR_SP_1_LO, REG_A5XX_RBBM_PERFCTR_SP_1_HI,
+		REG_A5XX_SP_PERFCTR_SP_SEL_1, 69 },
+	{ REG_A5XX_RBBM_PERFCTR_SP_2_LO, REG_A5XX_RBBM_PERFCTR_SP_2_HI,
+		REG_A5XX_SP_PERFCTR_SP_SEL_2, 70 },
+	{ REG_A5XX_RBBM_PERFCTR_SP_3_LO, REG_A5XX_RBBM_PERFCTR_SP_3_HI,
+		REG_A5XX_SP_PERFCTR_SP_SEL_3, 71 },
+	{ REG_A5XX_RBBM_PERFCTR_SP_4_LO, REG_A5XX_RBBM_PERFCTR_SP_4_HI,
+		REG_A5XX_SP_PERFCTR_SP_SEL_4, 72 },
+	{ REG_A5XX_RBBM_PERFCTR_SP_5_LO, REG_A5XX_RBBM_PERFCTR_SP_5_HI,
+		REG_A5XX_SP_PERFCTR_SP_SEL_5, 73 },
+	{ REG_A5XX_RBBM_PERFCTR_SP_6_LO, REG_A5XX_RBBM_PERFCTR_SP_6_HI,
+		REG_A5XX_SP_PERFCTR_SP_SEL_6, 74 },
+	{ REG_A5XX_RBBM_PERFCTR_SP_7_LO, REG_A5XX_RBBM_PERFCTR_SP_7_HI,
+		REG_A5XX_SP_PERFCTR_SP_SEL_7, 75 },
+	{ REG_A5XX_RBBM_PERFCTR_SP_8_LO, REG_A5XX_RBBM_PERFCTR_SP_8_HI,
+		REG_A5XX_SP_PERFCTR_SP_SEL_8, 76 },
+	{ REG_A5XX_RBBM_PERFCTR_SP_9_LO, REG_A5XX_RBBM_PERFCTR_SP_9_HI,
+		REG_A5XX_SP_PERFCTR_SP_SEL_9, 77 },
+	{ REG_A5XX_RBBM_PERFCTR_SP_10_LO, REG_A5XX_RBBM_PERFCTR_SP_10_HI,
+		REG_A5XX_SP_PERFCTR_SP_SEL_10, 78 },
+	{ REG_A5XX_RBBM_PERFCTR_SP_11_LO, REG_A5XX_RBBM_PERFCTR_SP_11_HI,
+		REG_A5XX_SP_PERFCTR_SP_SEL_11, 79 },
+};
+
+static struct adreno_counter a5xx_counters_tp[] = {
+	{ REG_A5XX_RBBM_PERFCTR_TP_0_LO, REG_A5XX_RBBM_PERFCTR_TP_0_HI,
+		REG_A5XX_TPL1_PERFCTR_TP_SEL_0, 60 },
+	{ REG_A5XX_RBBM_PERFCTR_TP_1_LO, REG_A5XX_RBBM_PERFCTR_TP_1_HI,
+		REG_A5XX_TPL1_PERFCTR_TP_SEL_1, 61 },
+	{ REG_A5XX_RBBM_PERFCTR_TP_2_LO, REG_A5XX_RBBM_PERFCTR_TP_2_HI,
+		REG_A5XX_TPL1_PERFCTR_TP_SEL_2, 62 },
+	{ REG_A5XX_RBBM_PERFCTR_TP_3_LO, REG_A5XX_RBBM_PERFCTR_TP_3_HI,
+		REG_A5XX_TPL1_PERFCTR_TP_SEL_3, 63 },
+	{ REG_A5XX_RBBM_PERFCTR_TP_4_LO, REG_A5XX_RBBM_PERFCTR_TP_4_HI,
+		REG_A5XX_TPL1_PERFCTR_TP_SEL_4, 64 },
+	{ REG_A5XX_RBBM_PERFCTR_TP_5_LO, REG_A5XX_RBBM_PERFCTR_TP_5_HI,
+		REG_A5XX_TPL1_PERFCTR_TP_SEL_5, 65 },
+	{ REG_A5XX_RBBM_PERFCTR_TP_6_LO, REG_A5XX_RBBM_PERFCTR_TP_6_HI,
+		REG_A5XX_TPL1_PERFCTR_TP_SEL_6, 66 },
+	{ REG_A5XX_RBBM_PERFCTR_TP_7_LO, REG_A5XX_RBBM_PERFCTR_TP_7_HI,
+		REG_A5XX_TPL1_PERFCTR_TP_SEL_7, 67 },
+};
+
+static struct adreno_counter a5xx_counters_tse[] = {
+	{ REG_A5XX_RBBM_PERFCTR_TSE_0_LO, REG_A5XX_RBBM_PERFCTR_TSE_0_HI,
+		REG_A5XX_GRAS_PERFCTR_TSE_SEL_0, 44 },
+	{ REG_A5XX_RBBM_PERFCTR_TSE_1_LO, REG_A5XX_RBBM_PERFCTR_TSE_1_HI,
+		REG_A5XX_GRAS_PERFCTR_TSE_SEL_1, 45 },
+	{ REG_A5XX_RBBM_PERFCTR_TSE_2_LO, REG_A5XX_RBBM_PERFCTR_TSE_2_HI,
+		REG_A5XX_GRAS_PERFCTR_TSE_SEL_2, 46 },
+	{ REG_A5XX_RBBM_PERFCTR_TSE_3_LO, REG_A5XX_RBBM_PERFCTR_TSE_3_HI,
+		REG_A5XX_GRAS_PERFCTR_TSE_SEL_3, 47 },
+};
+
+static struct adreno_counter a5xx_counters_uche[] = {
+	{ REG_A5XX_RBBM_PERFCTR_UCHE_0_LO, REG_A5XX_RBBM_PERFCTR_UCHE_0_HI,
+		REG_A5XX_UCHE_PERFCTR_UCHE_SEL_0, 52 },
+	{ REG_A5XX_RBBM_PERFCTR_UCHE_1_LO, REG_A5XX_RBBM_PERFCTR_UCHE_1_HI,
+		REG_A5XX_UCHE_PERFCTR_UCHE_SEL_1, 53 },
+	{ REG_A5XX_RBBM_PERFCTR_UCHE_2_LO, REG_A5XX_RBBM_PERFCTR_UCHE_2_HI,
+		REG_A5XX_UCHE_PERFCTR_UCHE_SEL_2, 54 },
+	{ REG_A5XX_RBBM_PERFCTR_UCHE_3_LO, REG_A5XX_RBBM_PERFCTR_UCHE_3_HI,
+		REG_A5XX_UCHE_PERFCTR_UCHE_SEL_3, 55 },
+	{ REG_A5XX_RBBM_PERFCTR_UCHE_4_LO, REG_A5XX_RBBM_PERFCTR_UCHE_4_HI,
+		REG_A5XX_UCHE_PERFCTR_UCHE_SEL_4, 56 },
+	{ REG_A5XX_RBBM_PERFCTR_UCHE_5_LO, REG_A5XX_RBBM_PERFCTR_UCHE_5_HI,
+		REG_A5XX_UCHE_PERFCTR_UCHE_SEL_5, 57 },
+	{ REG_A5XX_RBBM_PERFCTR_UCHE_6_LO, REG_A5XX_RBBM_PERFCTR_UCHE_6_HI,
+		REG_A5XX_UCHE_PERFCTR_UCHE_SEL_6, 58 },
+	{ REG_A5XX_RBBM_PERFCTR_UCHE_7_LO, REG_A5XX_RBBM_PERFCTR_UCHE_7_HI,
+		REG_A5XX_UCHE_PERFCTR_UCHE_SEL_7, 59 },
+};
+
+static struct adreno_counter a5xx_counters_vfd[] = {
+	{ REG_A5XX_RBBM_PERFCTR_VFD_0_LO, REG_A5XX_RBBM_PERFCTR_VFD_0_HI,
+		REG_A5XX_VFD_PERFCTR_VFD_SEL_0, 20 },
+	{ REG_A5XX_RBBM_PERFCTR_VFD_1_LO, REG_A5XX_RBBM_PERFCTR_VFD_1_HI,
+		REG_A5XX_VFD_PERFCTR_VFD_SEL_1, 21 },
+	{ REG_A5XX_RBBM_PERFCTR_VFD_2_LO, REG_A5XX_RBBM_PERFCTR_VFD_2_HI,
+		REG_A5XX_VFD_PERFCTR_VFD_SEL_2, 22 },
+	{ REG_A5XX_RBBM_PERFCTR_VFD_3_LO, REG_A5XX_RBBM_PERFCTR_VFD_3_HI,
+		REG_A5XX_VFD_PERFCTR_VFD_SEL_3, 23 },
+	{ REG_A5XX_RBBM_PERFCTR_VFD_4_LO, REG_A5XX_RBBM_PERFCTR_VFD_4_HI,
+		REG_A5XX_VFD_PERFCTR_VFD_SEL_4, 24 },
+	{ REG_A5XX_RBBM_PERFCTR_VFD_5_LO, REG_A5XX_RBBM_PERFCTR_VFD_5_HI,
+		REG_A5XX_VFD_PERFCTR_VFD_SEL_5, 25 },
+	{ REG_A5XX_RBBM_PERFCTR_VFD_6_LO, REG_A5XX_RBBM_PERFCTR_VFD_6_HI,
+		REG_A5XX_VFD_PERFCTR_VFD_SEL_6, 26 },
+	{ REG_A5XX_RBBM_PERFCTR_VFD_7_LO, REG_A5XX_RBBM_PERFCTR_VFD_7_HI,
+		REG_A5XX_VFD_PERFCTR_VFD_SEL_7, 27 },
+};
+
+static struct adreno_counter a5xx_counters_vpc[] = {
+	{ REG_A5XX_RBBM_PERFCTR_VPC_0_LO, REG_A5XX_RBBM_PERFCTR_VPC_0_HI,
+		REG_A5XX_VPC_PERFCTR_VPC_SEL_0, 36 },
+	{ REG_A5XX_RBBM_PERFCTR_VPC_1_LO, REG_A5XX_RBBM_PERFCTR_VPC_1_HI,
+		REG_A5XX_VPC_PERFCTR_VPC_SEL_1, 37 },
+	{ REG_A5XX_RBBM_PERFCTR_VPC_2_LO, REG_A5XX_RBBM_PERFCTR_VPC_2_HI,
+		REG_A5XX_VPC_PERFCTR_VPC_SEL_2, 38 },
+	{ REG_A5XX_RBBM_PERFCTR_VPC_3_LO, REG_A5XX_RBBM_PERFCTR_VPC_3_HI,
+		REG_A5XX_VPC_PERFCTR_VPC_SEL_3, 39 },
+};
+
+static struct adreno_counter a5xx_counters_vsc[] = {
+	{ REG_A5XX_RBBM_PERFCTR_VSC_0_LO, REG_A5XX_RBBM_PERFCTR_VSC_0_HI,
+		REG_A5XX_VSC_PERFCTR_VSC_SEL_0, 88 },
+	{ REG_A5XX_RBBM_PERFCTR_VSC_1_LO, REG_A5XX_RBBM_PERFCTR_VSC_1_HI,
+		REG_A5XX_VSC_PERFCTR_VSC_SEL_1, 89 },
+};
+
+static struct adreno_counter a5xx_counters_power_ccu[] = {
+	{ REG_A5XX_CCU_POWER_COUNTER_0_LO, REG_A5XX_CCU_POWER_COUNTER_0_HI,
+		REG_A5XX_RB_POWERCTR_CCU_SEL_0 },
+	{ REG_A5XX_CCU_POWER_COUNTER_1_LO, REG_A5XX_CCU_POWER_COUNTER_1_HI,
+		REG_A5XX_RB_POWERCTR_CCU_SEL_1 },
+};
+
+static struct adreno_counter a5xx_counters_power_cp[] = {
+	{ REG_A5XX_CP_POWER_COUNTER_0_LO, REG_A5XX_CP_POWER_COUNTER_0_HI,
+		REG_A5XX_CP_POWERCTR_CP_SEL_0 },
+	{ REG_A5XX_CP_POWER_COUNTER_1_LO, REG_A5XX_CP_POWER_COUNTER_1_HI,
+		REG_A5XX_CP_POWERCTR_CP_SEL_1 },
+	{ REG_A5XX_CP_POWER_COUNTER_2_LO, REG_A5XX_CP_POWER_COUNTER_2_HI,
+		REG_A5XX_CP_POWERCTR_CP_SEL_2 },
+	{ REG_A5XX_CP_POWER_COUNTER_3_LO, REG_A5XX_CP_POWER_COUNTER_3_HI,
+		REG_A5XX_CP_POWERCTR_CP_SEL_3 },
+};
+
+static struct adreno_counter a5xx_counters_power_rb[] = {
+	{ REG_A5XX_RB_POWER_COUNTER_0_LO, REG_A5XX_RB_POWER_COUNTER_0_HI,
+		REG_A5XX_RB_POWERCTR_RB_SEL_0 },
+	{ REG_A5XX_RB_POWER_COUNTER_1_LO, REG_A5XX_RB_POWER_COUNTER_1_HI,
+		REG_A5XX_RB_POWERCTR_RB_SEL_1 },
+	{ REG_A5XX_RB_POWER_COUNTER_2_LO, REG_A5XX_RB_POWER_COUNTER_2_HI,
+		REG_A5XX_RB_POWERCTR_RB_SEL_2 },
+	{ REG_A5XX_RB_POWER_COUNTER_3_LO, REG_A5XX_RB_POWER_COUNTER_3_HI,
+		REG_A5XX_RB_POWERCTR_RB_SEL_3 },
+};
+
+static struct adreno_counter a5xx_counters_power_sp[] = {
+	{ REG_A5XX_SP_POWER_COUNTER_0_LO, REG_A5XX_SP_POWER_COUNTER_0_HI,
+		REG_A5XX_SP_POWERCTR_SP_SEL_0 },
+	{ REG_A5XX_SP_POWER_COUNTER_1_LO, REG_A5XX_SP_POWER_COUNTER_1_HI,
+		REG_A5XX_SP_POWERCTR_SP_SEL_1 },
+	{ REG_A5XX_SP_POWER_COUNTER_2_LO, REG_A5XX_SP_POWER_COUNTER_2_HI,
+		REG_A5XX_SP_POWERCTR_SP_SEL_2 },
+	{ REG_A5XX_SP_POWER_COUNTER_3_LO, REG_A5XX_SP_POWER_COUNTER_3_HI,
+		REG_A5XX_SP_POWERCTR_SP_SEL_3 },
+};
+
+static struct adreno_counter a5xx_counters_power_tp[] = {
+	{ REG_A5XX_TP_POWER_COUNTER_0_LO, REG_A5XX_TP_POWER_COUNTER_0_HI,
+		REG_A5XX_TPL1_POWERCTR_TP_SEL_0 },
+	{ REG_A5XX_TP_POWER_COUNTER_1_LO, REG_A5XX_TP_POWER_COUNTER_1_HI,
+		REG_A5XX_TPL1_POWERCTR_TP_SEL_1 },
+	{ REG_A5XX_TP_POWER_COUNTER_2_LO, REG_A5XX_TP_POWER_COUNTER_2_HI,
+		REG_A5XX_TPL1_POWERCTR_TP_SEL_2 },
+	{ REG_A5XX_TP_POWER_COUNTER_3_LO, REG_A5XX_TP_POWER_COUNTER_3_HI,
+		REG_A5XX_TPL1_POWERCTR_TP_SEL_3 },
+};
+
+static struct adreno_counter a5xx_counters_power_uche[] = {
+	{ REG_A5XX_UCHE_POWER_COUNTER_0_LO, REG_A5XX_UCHE_POWER_COUNTER_0_HI,
+		REG_A5XX_UCHE_POWERCTR_UCHE_SEL_0 },
+	{ REG_A5XX_UCHE_POWER_COUNTER_1_LO, REG_A5XX_UCHE_POWER_COUNTER_1_HI,
+		REG_A5XX_UCHE_POWERCTR_UCHE_SEL_1 },
+	{ REG_A5XX_UCHE_POWER_COUNTER_2_LO, REG_A5XX_UCHE_POWER_COUNTER_2_HI,
+		REG_A5XX_UCHE_POWERCTR_UCHE_SEL_2 },
+	{ REG_A5XX_UCHE_POWER_COUNTER_3_LO, REG_A5XX_UCHE_POWER_COUNTER_3_HI,
+		REG_A5XX_UCHE_POWERCTR_UCHE_SEL_3 },
+};
+
+static struct adreno_counter a5xx_counters_vbif[] = {
+	{ REG_A5XX_VBIF_PERF_CNT_LOW0, REG_A5XX_VBIF_PERF_CNT_HIGH0 },
+	{ REG_A5XX_VBIF_PERF_CNT_LOW1, REG_A5XX_VBIF_PERF_CNT_HIGH1 },
+	{ REG_A5XX_VBIF_PERF_CNT_LOW2, REG_A5XX_VBIF_PERF_CNT_HIGH2 },
+	{ REG_A5XX_VBIF_PERF_CNT_LOW3, REG_A5XX_VBIF_PERF_CNT_HIGH3 },
+};
+
+static struct adreno_counter a5xx_counters_gpmu[] = {
+	{ REG_A5XX_GPMU_POWER_COUNTER_0_LO, REG_A5XX_GPMU_POWER_COUNTER_0_HI },
+	{ REG_A5XX_GPMU_POWER_COUNTER_1_LO, REG_A5XX_GPMU_POWER_COUNTER_1_HI },
+	{ REG_A5XX_GPMU_POWER_COUNTER_2_LO, REG_A5XX_GPMU_POWER_COUNTER_2_HI },
+	{ REG_A5XX_GPMU_POWER_COUNTER_3_LO, REG_A5XX_GPMU_POWER_COUNTER_3_HI },
+	{ REG_A5XX_GPMU_POWER_COUNTER_4_LO, REG_A5XX_GPMU_POWER_COUNTER_4_HI },
+	{ REG_A5XX_GPMU_POWER_COUNTER_5_LO, REG_A5XX_GPMU_POWER_COUNTER_5_HI },
+};
+
+static struct adreno_counter a5xx_counters_vbif_power[] = {
+	{ REG_A5XX_VBIF_PERF_PWR_CNT_LOW0, REG_A5XX_VBIF_PERF_PWR_CNT_HIGH0 },
+	{ REG_A5XX_VBIF_PERF_PWR_CNT_LOW1, REG_A5XX_VBIF_PERF_PWR_CNT_HIGH1 },
+	{ REG_A5XX_VBIF_PERF_PWR_CNT_LOW2, REG_A5XX_VBIF_PERF_PWR_CNT_HIGH2 },
+};
+
+static struct adreno_counter a5xx_counters_alwayson_power[] = {
+	{ REG_A5XX_GPMU_ALWAYS_ON_COUNTER_LO,
+		REG_A5XX_GPMU_ALWAYS_ON_COUNTER_HI },
+};
+
+#define DEFINE_COUNTER_GROUP(_n, _a, _get, _enable, _put, _save, _restore) \
+static struct adreno_counter_group _n = { \
+	.counters = _a, \
+	.nr_counters = ARRAY_SIZE(_a), \
+	.lock = __SPIN_LOCK_UNLOCKED(_name.lock), \
+	.funcs = { \
+		.get = _get, \
+		.enable = _enable, \
+		.read = a5xx_counter_read, \
+		.put = _put, \
+		.save = _save, \
+		.restore = _restore \
+	}, \
+}
+
+#define COUNTER_GROUP(_name, _array) DEFINE_COUNTER_GROUP(_name, \
+	_array, a5xx_counter_get, a5xx_counter_enable_cpu, a5xx_counter_put, \
+	a5xx_counter_save, a5xx_counter_restore)
+
+#define SPTP_COUNTER_GROUP(_name, _array) DEFINE_COUNTER_GROUP(_name, \
+	_array, a5xx_counter_get, a5xx_counter_enable_pm4, a5xx_counter_put, \
+	a5xx_counter_save, a5xx_counter_restore)
+
+#define POWER_COUNTER_GROUP(_name, _array) DEFINE_COUNTER_GROUP(_name, \
+	_array, a5xx_counter_get, a5xx_counter_enable_cpu, a5xx_counter_put, \
+	NULL, NULL)
+
+/* "standard" counters */
+COUNTER_GROUP(a5xx_counter_group_cp, a5xx_counters_cp);
+COUNTER_GROUP(a5xx_counter_group_rbbm, a5xx_counters_rbbm);
+COUNTER_GROUP(a5xx_counter_group_pc, a5xx_counters_pc);
+COUNTER_GROUP(a5xx_counter_group_vfd, a5xx_counters_vfd);
+COUNTER_GROUP(a5xx_counter_group_vpc, a5xx_counters_vpc);
+COUNTER_GROUP(a5xx_counter_group_ccu, a5xx_counters_ccu);
+COUNTER_GROUP(a5xx_counter_group_cmp, a5xx_counters_cmp);
+COUNTER_GROUP(a5xx_counter_group_tse, a5xx_counters_tse);
+COUNTER_GROUP(a5xx_counter_group_ras, a5xx_counters_ras);
+COUNTER_GROUP(a5xx_counter_group_uche, a5xx_counters_uche);
+COUNTER_GROUP(a5xx_counter_group_rb, a5xx_counters_rb);
+COUNTER_GROUP(a5xx_counter_group_vsc, a5xx_counters_vsc);
+COUNTER_GROUP(a5xx_counter_group_lrz, a5xx_counters_lrz);
+
+/* SP/TP counters */
+SPTP_COUNTER_GROUP(a5xx_counter_group_hlsq, a5xx_counters_hlsq);
+SPTP_COUNTER_GROUP(a5xx_counter_group_tp, a5xx_counters_tp);
+SPTP_COUNTER_GROUP(a5xx_counter_group_sp, a5xx_counters_sp);
+
+/* Power counters */
+POWER_COUNTER_GROUP(a5xx_counter_group_power_ccu, a5xx_counters_power_ccu);
+POWER_COUNTER_GROUP(a5xx_counter_group_power_cp, a5xx_counters_power_cp);
+POWER_COUNTER_GROUP(a5xx_counter_group_power_rb, a5xx_counters_power_rb);
+POWER_COUNTER_GROUP(a5xx_counter_group_power_sp, a5xx_counters_power_sp);
+POWER_COUNTER_GROUP(a5xx_counter_group_power_tp, a5xx_counters_power_tp);
+POWER_COUNTER_GROUP(a5xx_counter_group_power_uche, a5xx_counters_power_uche);
+
+DEFINE_COUNTER_GROUP(a5xx_counter_group_alwayson, a5xx_counters_alwayson,
+	a5xx_counter_get_fixed, NULL, NULL, NULL, NULL);
+DEFINE_COUNTER_GROUP(a5xx_counter_group_vbif, a5xx_counters_vbif,
+	a5xx_counter_get, a5xx_counter_enable_vbif, a5xx_counter_put,
+	NULL, NULL);
+DEFINE_COUNTER_GROUP(a5xx_counter_group_gpmu, a5xx_counters_gpmu,
+	a5xx_counter_get, a5xx_counter_enable_gpmu, a5xx_counter_put,
+	NULL, NULL);
+DEFINE_COUNTER_GROUP(a5xx_counter_group_vbif_power, a5xx_counters_vbif_power,
+	a5xx_counter_get_fixed, a5xx_counter_enable_vbif_power, NULL, NULL,
+	NULL);
+DEFINE_COUNTER_GROUP(a5xx_counter_group_alwayson_power,
+		a5xx_counters_alwayson_power, a5xx_counter_get_fixed,
+		a5xx_counter_enable_alwayson_power, NULL, NULL, NULL);
+
+static const struct adreno_counter_group *a5xx_counter_groups[] = {
+	[MSM_COUNTER_GROUP_ALWAYSON] = &a5xx_counter_group_alwayson,
+	[MSM_COUNTER_GROUP_CCU] = &a5xx_counter_group_ccu,
+	[MSM_COUNTER_GROUP_CMP] = &a5xx_counter_group_cmp,
+	[MSM_COUNTER_GROUP_CP] = &a5xx_counter_group_cp,
+	[MSM_COUNTER_GROUP_HLSQ] = &a5xx_counter_group_hlsq,
+	[MSM_COUNTER_GROUP_LRZ] = &a5xx_counter_group_lrz,
+	[MSM_COUNTER_GROUP_PC] = &a5xx_counter_group_pc,
+	[MSM_COUNTER_GROUP_RAS] = &a5xx_counter_group_ras,
+	[MSM_COUNTER_GROUP_RB] = &a5xx_counter_group_rb,
+	[MSM_COUNTER_GROUP_RBBM] = &a5xx_counter_group_rbbm,
+	[MSM_COUNTER_GROUP_SP] = &a5xx_counter_group_sp,
+	[MSM_COUNTER_GROUP_TP] = &a5xx_counter_group_tp,
+	[MSM_COUNTER_GROUP_TSE] = &a5xx_counter_group_tse,
+	[MSM_COUNTER_GROUP_UCHE] = &a5xx_counter_group_uche,
+	[MSM_COUNTER_GROUP_VFD] = &a5xx_counter_group_vfd,
+	[MSM_COUNTER_GROUP_VPC] = &a5xx_counter_group_vpc,
+	[MSM_COUNTER_GROUP_VSC] = &a5xx_counter_group_vsc,
+	[MSM_COUNTER_GROUP_VBIF] = &a5xx_counter_group_vbif,
+	[MSM_COUNTER_GROUP_GPMU_PWR] = &a5xx_counter_group_gpmu,
+	[MSM_COUNTER_GROUP_CCU_PWR] = &a5xx_counter_group_power_ccu,
+	[MSM_COUNTER_GROUP_CP_PWR] = &a5xx_counter_group_power_cp,
+	[MSM_COUNTER_GROUP_RB_PWR] = &a5xx_counter_group_power_rb,
+	[MSM_COUNTER_GROUP_SP_PWR] = &a5xx_counter_group_power_sp,
+	[MSM_COUNTER_GROUP_TP_PWR] = &a5xx_counter_group_power_tp,
+	[MSM_COUNTER_GROUP_UCHE_PWR] = &a5xx_counter_group_power_uche,
+	[MSM_COUNTER_GROUP_VBIF_PWR] = &a5xx_counter_group_vbif_power,
+	[MSM_COUNTER_GROUP_ALWAYSON_PWR] =
+		&a5xx_counter_group_alwayson_power,
+};
+
+void a5xx_counters_restore(struct msm_gpu *gpu)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(a5xx_counter_groups); i++) {
+		struct adreno_counter_group *group =
+			(struct adreno_counter_group *) a5xx_counter_groups[i];
+
+		if (group && group->funcs.restore)
+			group->funcs.restore(gpu, group);
+
+		a5xx_counter_group_enable(gpu, group, true);
+	}
+}
+
+
+void a5xx_counters_save(struct msm_gpu *gpu)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(a5xx_counter_groups); i++) {
+		struct adreno_counter_group *group =
+			(struct adreno_counter_group *) a5xx_counter_groups[i];
+
+		if (group && group->funcs.save)
+			group->funcs.save(gpu, group);
+	}
+}
+
+int a5xx_counters_init(struct adreno_gpu *adreno_gpu)
+{
+	adreno_gpu->counter_groups = a5xx_counter_groups;
+	adreno_gpu->nr_counter_groups = ARRAY_SIZE(a5xx_counter_groups);
+
+	return 0;
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/adreno/a5xx_gpu.c	2019-01-22 16:16:23.483246225 +0100
@@ -0,0 +1,1439 @@
+/* Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_gem.h"
+#include "msm_iommu.h"
+#include "msm_trace.h"
+#include "a5xx_gpu.h"
+
+#define SECURE_VA_START 0xc0000000
+#define SECURE_VA_SIZE  SZ_256M
+
+static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+	uint32_t wptr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ring->lock, flags);
+
+	/* Copy the shadow to the actual register */
+	ring->cur = ring->next;
+
+	/* Make sure to wrap wptr if we need to */
+	wptr = get_wptr(ring);
+
+	spin_unlock_irqrestore(&ring->lock, flags);
+
+	/* Make sure everything is posted before making a decision */
+	mb();
+
+	/* Update HW if this is the current ring and we are not in preempt */
+	if (a5xx_gpu->cur_ring == ring && !a5xx_in_preempt(a5xx_gpu))
+		gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
+}
+
+static void a5xx_set_pagetable(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+	struct msm_gem_address_space *aspace)
+{
+	struct msm_mmu *mmu = aspace->mmu;
+	struct msm_iommu *iommu = to_msm_iommu(mmu);
+
+	if (!iommu->ttbr0)
+		return;
+
+	/* Turn off protected mode */
+	OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+	OUT_RING(ring, 0);
+
+	/* Turn on APIV mode to access critical regions */
+	OUT_PKT4(ring, REG_A5XX_CP_CNTL, 1);
+	OUT_RING(ring, 1);
+
+	/* Make sure the ME is syncronized before staring the update */
+	OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
+
+	/* Execute the table update */
+	OUT_PKT7(ring, CP_SMMU_TABLE_UPDATE, 3);
+	OUT_RING(ring, lower_32_bits(iommu->ttbr0));
+	OUT_RING(ring, upper_32_bits(iommu->ttbr0));
+	OUT_RING(ring, iommu->contextidr);
+
+	/*
+	 * Write the new TTBR0 to the preemption records - this will be used to
+	 * reload the pagetable if the current ring gets preempted out.
+	 */
+	OUT_PKT7(ring, CP_MEM_WRITE, 4);
+	OUT_RING(ring, lower_32_bits(rbmemptr(ring, ttbr0)));
+	OUT_RING(ring, upper_32_bits(rbmemptr(ring, ttbr0)));
+	OUT_RING(ring, lower_32_bits(iommu->ttbr0));
+	OUT_RING(ring, upper_32_bits(iommu->ttbr0));
+
+	/* Also write the current contextidr (ASID) */
+	OUT_PKT7(ring, CP_MEM_WRITE, 3);
+	OUT_RING(ring, lower_32_bits(rbmemptr(ring, contextidr)));
+	OUT_RING(ring, upper_32_bits(rbmemptr(ring, contextidr)));
+	OUT_RING(ring, iommu->contextidr);
+
+	/* Invalidate the draw state so we start off fresh */
+	OUT_PKT7(ring, CP_SET_DRAW_STATE, 3);
+	OUT_RING(ring, 0x40000);
+	OUT_RING(ring, 1);
+	OUT_RING(ring, 0);
+
+	/* Turn off APRIV */
+	OUT_PKT4(ring, REG_A5XX_CP_CNTL, 1);
+	OUT_RING(ring, 0);
+
+	/* Turn off protected mode */
+	OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+	OUT_RING(ring, 1);
+}
+
+/* Inline PM4 code to get the current value of the 19.2 Mhz always on counter */
+static void a5xx_get_ticks(struct msm_ringbuffer *ring, uint64_t iova)
+{
+	/*
+	 * Set bit[30] to make this command a 64 bit write operation.
+	 * bits[18-29] is to specify number of consecutive registers
+	 * to copy, so set this space with 2, since we want to copy
+	 * data from REG_A5XX_RBBM_ALWAYSON_COUNTER_LO and [HI].
+	 */
+
+	OUT_PKT7(ring, CP_REG_TO_MEM, 3);
+	OUT_RING(ring, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO |
+		(1 << 30) | (2 << 18));
+	OUT_RING(ring, lower_32_bits(iova));
+	OUT_RING(ring, upper_32_bits(iova));
+}
+
+static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+	struct msm_ringbuffer *ring = gpu->rb[submit->ring];
+	unsigned int i, ibs = 0;
+	unsigned long flags;
+	u64 ticks;
+	ktime_t time;
+
+	a5xx_set_pagetable(gpu, ring, submit->aspace);
+
+	OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+	OUT_RING(ring, 0x02);
+
+	/* Turn off protected mode to write to special registers */
+	OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+	OUT_RING(ring, 0);
+
+	/* Set the save preemption record for the ring/command */
+	OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
+	OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[submit->ring]));
+	OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[submit->ring]));
+
+	/* Turn back on protected mode */
+	OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+	OUT_RING(ring, 1);
+
+	/* Enable local preemption for finegrain preemption */
+	OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+	OUT_RING(ring, 0x02);
+
+	/* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
+	OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+	OUT_RING(ring, 0x02);
+
+	/* Turn on secure mode if the submission is secure */
+	if (submit->secure) {
+		OUT_PKT7(ring, CP_SET_SECURE_MODE, 1);
+		OUT_RING(ring, 1);
+	}
+
+	/* Record the GPU ticks at command start for kernel side profiling */
+	a5xx_get_ticks(ring,
+		RING_TICKS_IOVA(ring, submit->tick_index, started));
+
+	/* And for the user profiling too if it is enabled */
+	if (submit->profile_buf_iova)
+		a5xx_get_ticks(ring, submit->profile_buf_iova +
+			offsetof(struct drm_msm_gem_submit_profile_buffer,
+				ticks_submitted));
+
+	/* Submit the commands */
+	for (i = 0; i < submit->nr_cmds; i++) {
+		switch (submit->cmd[i].type) {
+		case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+		case MSM_SUBMIT_CMD_PROFILE_BUF:
+			break;
+		case MSM_SUBMIT_CMD_BUF:
+			OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+			OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+			OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
+			OUT_RING(ring, submit->cmd[i].size);
+			ibs++;
+			break;
+		}
+	}
+
+	/*
+	 * Write the render mode to NULL (0) to indicate to the CP that the IBs
+	 * are done rendering - otherwise a lucky preemption would start
+	 * replaying from the last checkpoint
+	 */
+	OUT_PKT7(ring, CP_SET_RENDER_MODE, 5);
+	OUT_RING(ring, 0);
+	OUT_RING(ring, 0);
+	OUT_RING(ring, 0);
+	OUT_RING(ring, 0);
+	OUT_RING(ring, 0);
+
+	/* Turn off IB level preemptions */
+	OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+	OUT_RING(ring, 0x01);
+
+	/* Record the GPU ticks at command retire for kernel side profiling */
+	a5xx_get_ticks(ring,
+		RING_TICKS_IOVA(ring, submit->tick_index, retired));
+
+	/* Record the always on counter after command execution */
+	if (submit->profile_buf_iova)
+		a5xx_get_ticks(ring, submit->profile_buf_iova +
+			offsetof(struct drm_msm_gem_submit_profile_buffer,
+				ticks_retired));
+
+	/* Write the fence to the scratch register */
+	OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
+	OUT_RING(ring, submit->fence);
+
+	/*
+	 * Execute a CACHE_FLUSH_TS event. This will ensure that the
+	 * timestamp is written to the memory and then triggers the interrupt
+	 */
+	OUT_PKT7(ring, CP_EVENT_WRITE, 4);
+	OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
+
+	OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
+	OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
+	OUT_RING(ring, submit->fence);
+
+	if (submit->secure) {
+		OUT_PKT7(ring, CP_SET_SECURE_MODE, 1);
+		OUT_RING(ring, 0);
+	}
+
+	/* Yield the floor on command completion */
+	OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
+	/*
+	 * If dword[2:1] are non zero, they specify an address for the CP to
+	 * write the value of dword[3] to on preemption complete. Write 0 to
+	 * skip the write
+	 */
+	OUT_RING(ring, 0x00);
+	OUT_RING(ring, 0x00);
+	/* Data value - not used if the address above is 0 */
+	OUT_RING(ring, 0x01);
+	/* Set bit 0 to trigger an interrupt on preempt complete */
+	OUT_RING(ring, 0x01);
+
+	/*
+	 * Get the current kernel time and ticks with interrupts off so we don't
+	 * get interrupted between the operations and skew the numbers
+	 */
+
+	local_irq_save(flags);
+	ticks = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO,
+		REG_A5XX_RBBM_ALWAYSON_COUNTER_HI);
+	time = ktime_get_raw();
+	local_irq_restore(flags);
+
+	if (submit->profile_buf) {
+		struct timespec64 ts = ktime_to_timespec64(time);
+
+		/* Write the data into the user-specified profile buffer */
+		submit->profile_buf->time.tv_sec = ts.tv_sec;
+		submit->profile_buf->time.tv_nsec = ts.tv_nsec;
+		submit->profile_buf->ticks_queued = ticks;
+	}
+
+	trace_msm_submitted(submit, ticks, ktime_to_ns(time));
+
+	a5xx_flush(gpu, ring);
+
+	/* Check to see if we need to start preemption */
+	a5xx_preempt_trigger(gpu);
+}
+
+static const struct {
+	u32 offset;
+	u32 value;
+} a5xx_hwcg[] = {
+	{REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+	{REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
+	{REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
+	{REG_A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
+	{REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+	{REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
+	{REG_A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
+	{REG_A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
+	{REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+	{REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
+	{REG_A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
+	{REG_A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
+	{REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+	{REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
+	{REG_A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
+	{REG_A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
+	{REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+	{REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
+	{REG_A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222},
+	{REG_A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222},
+	{REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+	{REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+	{REG_A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
+	{REG_A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
+	{REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
+	{REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
+	{REG_A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222},
+	{REG_A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222},
+	{REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+	{REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+	{REG_A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
+	{REG_A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
+	{REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+	{REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+	{REG_A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
+	{REG_A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
+	{REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
+	{REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
+	{REG_A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777},
+	{REG_A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777},
+	{REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+	{REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+	{REG_A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
+	{REG_A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
+	{REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+	{REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+	{REG_A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
+	{REG_A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
+	{REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
+	{REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
+	{REG_A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111},
+	{REG_A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111},
+	{REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+	{REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+	{REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+	{REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+	{REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
+	{REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+	{REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+	{REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
+	{REG_A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
+	{REG_A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
+	{REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
+	{REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
+	{REG_A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222},
+	{REG_A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222},
+	{REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
+	{REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
+	{REG_A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220},
+	{REG_A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220},
+	{REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
+	{REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
+	{REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
+	{REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
+	{REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404},
+	{REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404},
+	{REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
+	{REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
+	{REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
+	{REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002},
+	{REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002},
+	{REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
+	{REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+	{REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+	{REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+	{REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+	{REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+	{REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+	{REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+	{REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+	{REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+	{REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
+};
+
+void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
+		gpu_write(gpu, a5xx_hwcg[i].offset,
+			state ? a5xx_hwcg[i].value : 0);
+
+	/* There are a few additional registers just for A540 */
+	if (adreno_is_a540(adreno_gpu)) {
+		gpu_write(gpu, REG_A5XX_RBBM_CLOCK_DELAY_GPMU,
+			state  ? 0x770 : 0);
+		gpu_write(gpu, REG_A5XX_RBBM_CLOCK_HYST_GPMU,
+			state ? 0x004 : 0);
+	}
+
+	gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
+	gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
+
+	if (state)
+		set_bit(A5XX_HWCG_ENABLED, &a5xx_gpu->flags);
+	else
+		clear_bit(A5XX_HWCG_ENABLED, &a5xx_gpu->flags);
+}
+
+static int a5xx_me_init(struct msm_gpu *gpu)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct msm_ringbuffer *ring = gpu->rb[0];
+
+	OUT_PKT7(ring, CP_ME_INIT, 8);
+
+	OUT_RING(ring, 0x0000002F);
+
+	/* Enable multiple hardware contexts */
+	OUT_RING(ring, 0x00000003);
+
+	/* Enable error detection */
+	OUT_RING(ring, 0x20000000);
+
+	/* Don't enable header dump */
+	OUT_RING(ring, 0x00000000);
+	OUT_RING(ring, 0x00000000);
+
+	/* Specify workarounds for various microcode issues */
+	if (adreno_is_a530(adreno_gpu)) {
+		/* Workaround for token end syncs
+		 * Force a WFI after every direct-render 3D mode draw and every
+		 * 2D mode 3 draw
+		 */
+		OUT_RING(ring, 0x0000000B);
+	} else {
+		/* No workarounds enabled */
+		OUT_RING(ring, 0x00000000);
+	}
+
+	OUT_RING(ring, 0x00000000);
+	OUT_RING(ring, 0x00000000);
+
+	gpu->funcs->flush(gpu, ring);
+	return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
+static int a5xx_preempt_start(struct msm_gpu *gpu)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+	struct msm_ringbuffer *ring = gpu->rb[0];
+
+	if (gpu->nr_rings == 1)
+		return 0;
+
+	/* Turn off protected mode to write to special registers */
+	OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+	OUT_RING(ring, 0);
+
+	/* Set the save preemption record for the ring/command */
+	OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
+	OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[ring->id]));
+	OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[ring->id]));
+
+	/* Turn back on protected mode */
+	OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+	OUT_RING(ring, 1);
+
+	OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+	OUT_RING(ring, 0x00);
+
+	OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
+	OUT_RING(ring, 0x01);
+
+	OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+	OUT_RING(ring, 0x01);
+
+	/* Yield the floor on command completion */
+	OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
+	OUT_RING(ring, 0x00);
+	OUT_RING(ring, 0x00);
+	OUT_RING(ring, 0x01);
+	OUT_RING(ring, 0x01);
+
+	gpu->funcs->flush(gpu, ring);
+
+	return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
+
+static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
+		const struct firmware *fw, u64 *iova)
+{
+	struct drm_gem_object *bo;
+	void *ptr;
+
+	ptr = msm_gem_kernel_new(gpu->dev, fw->size - 4,
+		MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
+
+	if (IS_ERR(ptr))
+		return ERR_CAST(ptr);
+
+	memcpy(ptr, &fw->data[4], fw->size - 4);
+	return bo;
+}
+
+static int a5xx_ucode_init(struct msm_gpu *gpu)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+	int ret;
+
+	if (!a5xx_gpu->pm4_bo) {
+		a5xx_gpu->pm4_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pm4,
+			&a5xx_gpu->pm4_iova);
+
+		if (IS_ERR(a5xx_gpu->pm4_bo)) {
+			ret = PTR_ERR(a5xx_gpu->pm4_bo);
+			a5xx_gpu->pm4_bo = NULL;
+			dev_err(gpu->dev->dev, "could not allocate PM4: %d\n",
+				ret);
+			return ret;
+		}
+	}
+
+	if (!a5xx_gpu->pfp_bo) {
+		a5xx_gpu->pfp_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pfp,
+			&a5xx_gpu->pfp_iova);
+
+		if (IS_ERR(a5xx_gpu->pfp_bo)) {
+			ret = PTR_ERR(a5xx_gpu->pfp_bo);
+			a5xx_gpu->pfp_bo = NULL;
+			dev_err(gpu->dev->dev, "could not allocate PFP: %d\n",
+				ret);
+			return ret;
+		}
+	}
+
+	gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
+		REG_A5XX_CP_ME_INSTR_BASE_HI, a5xx_gpu->pm4_iova);
+
+	gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO,
+		REG_A5XX_CP_PFP_INSTR_BASE_HI, a5xx_gpu->pfp_iova);
+
+	return 0;
+}
+
+#ifdef CONFIG_MSM_SUBSYSTEM_RESTART
+
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/scm.h>
+
+static void a5xx_zap_shader_init(struct msm_gpu *gpu)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+	const char *name;
+	void *ptr;
+
+	/* If no zap shader was defined, we'll assume that none is needed */
+	if (of_property_read_string(GPU_OF_NODE(gpu), "qcom,zap-shader", &name))
+		return;
+
+	/*
+	 * If the zap shader has already been loaded then just ask the SCM to
+	 * re-initialize the registers (not needed if CPZ retention is a thing)
+	 */
+	if (test_bit(A5XX_ZAP_SHADER_LOADED, &a5xx_gpu->flags)) {
+		int ret;
+		struct scm_desc desc = { 0 };
+
+		if (of_property_read_bool(GPU_OF_NODE(gpu),
+			"qcom,cpz-retention"))
+			return;
+
+		desc.args[0] = 0;
+		desc.args[1] = 13;
+		desc.arginfo = SCM_ARGS(2);
+
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_BOOT, 0x0A), &desc);
+		if (ret)
+			DRM_ERROR(
+				"%s: zap-shader resume failed with error %d\n",
+				gpu->name, ret);
+
+		return;
+	}
+
+	ptr = subsystem_get(name);
+
+	if (IS_ERR_OR_NULL(ptr)) {
+		DRM_ERROR("%s: Unable to load the zap shader: %ld\n", gpu->name,
+			IS_ERR(ptr) ? PTR_ERR(ptr) : -ENODEV);
+	} else {
+		set_bit(A5XX_ZAP_SHADER_LOADED, &a5xx_gpu->flags);
+	}
+}
+#else
+static void a5xx_zap_shader_init(struct msm_gpu *gpu)
+{
+	if (of_find_property(GPU_OF_NODE(gpu), "qcom,zap-shader", NULL))
+		return;
+
+	DRM_INFO_ONCE("%s: Zap shader is defined but loader isn't available\n",
+		gpu->name);
+}
+#endif
+
+#define A5XX_INT_MASK (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
+	  A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
+	  A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
+	  A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
+	  A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
+	  A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \
+	  A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
+	  A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT | \
+	  A5XX_RBBM_INT_0_MASK_CP_SW | \
+	  A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
+	  A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
+	  A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
+
+static int a5xx_hw_init(struct msm_gpu *gpu)
+{
+	struct msm_drm_private *priv = gpu->dev->dev_private;
+	struct platform_device *pdev = priv->gpu_pdev;
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+	int ret, bit = 0;
+
+	pm_qos_update_request(&gpu->pm_qos_req_dma, 101);
+
+	gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
+	if (adreno_is_a540(adreno_gpu))
+		gpu_write(gpu, REG_A5XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
+
+	/* Make all blocks contribute to the GPU BUSY perf counter */
+	gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
+
+	/* Enable RBBM error reporting bits */
+	gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001);
+
+	if (adreno_gpu->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
+		/*
+		 * Mask out the activity signals from RB1-3 to avoid false
+		 * positives
+		 */
+
+		gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11,
+			0xF0000000);
+		gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12,
+			0xFFFFFFFF);
+		gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13,
+			0xFFFFFFFF);
+		gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14,
+			0xFFFFFFFF);
+		gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15,
+			0xFFFFFFFF);
+		gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16,
+			0xFFFFFFFF);
+		gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17,
+			0xFFFFFFFF);
+		gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18,
+			0xFFFFFFFF);
+	}
+
+	/* Enable fault detection */
+	gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL,
+		(1 << 30) | 0xFFFF);
+
+	/* Turn on performance counters */
+	gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_CNTL, 0x01);
+
+	/* Increase VFD cache access so LRZ and other data gets evicted less */
+	gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02);
+
+	/* Disable L2 bypass in the UCHE */
+	gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000);
+	gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF);
+	gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000);
+	gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF);
+
+	/* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
+	gpu_write64(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO,
+		REG_A5XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000);
+
+	gpu_write64(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_LO,
+		REG_A5XX_UCHE_GMEM_RANGE_MAX_HI,
+		0x00100000 + adreno_gpu->gmem - 1);
+
+	gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
+	gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
+	gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
+	gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
+
+	gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22));
+
+	if (adreno_gpu->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
+		gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
+
+	gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100);
+
+	/* Enable USE_RETENTION_FLOPS */
+	gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
+
+	/* Enable ME/PFP split notification */
+	gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
+
+	/* Enable HWCG */
+	a5xx_set_hwcg(gpu, true);
+
+	gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
+
+	/* Set the highest bank bit if specified in the device tree */
+	if (!of_property_read_u32(pdev->dev.of_node, "qcom,highest-bank-bit",
+		&bit)) {
+		if (bit >= 13 && bit <= 16) {
+			bit -= 13;
+
+			gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, bit << 7);
+			gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, bit << 1);
+
+			if (adreno_is_a540(adreno_gpu))
+				gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2,
+					bit);
+		}
+	}
+
+	/* Try to load and initialize the zap shader if applicable */
+	a5xx_zap_shader_init(gpu);
+
+	/* Protect registers from the CP */
+	gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007);
+
+	/* RBBM */
+	gpu_write(gpu, REG_A5XX_CP_PROTECT(0), ADRENO_PROTECT_RW(0x04, 4));
+	gpu_write(gpu, REG_A5XX_CP_PROTECT(1), ADRENO_PROTECT_RW(0x08, 8));
+	gpu_write(gpu, REG_A5XX_CP_PROTECT(2), ADRENO_PROTECT_RW(0x10, 16));
+	gpu_write(gpu, REG_A5XX_CP_PROTECT(3), ADRENO_PROTECT_RW(0x20, 32));
+	gpu_write(gpu, REG_A5XX_CP_PROTECT(4), ADRENO_PROTECT_RW(0x40, 64));
+	gpu_write(gpu, REG_A5XX_CP_PROTECT(5), ADRENO_PROTECT_RW(0x80, 64));
+
+	/* Content protect */
+	gpu_write(gpu, REG_A5XX_CP_PROTECT(6),
+		ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
+			16));
+	gpu_write(gpu, REG_A5XX_CP_PROTECT(7),
+		ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TRUST_CNTL, 2));
+
+	/* CP */
+	gpu_write(gpu, REG_A5XX_CP_PROTECT(8), ADRENO_PROTECT_RW(0x800, 64));
+	gpu_write(gpu, REG_A5XX_CP_PROTECT(9), ADRENO_PROTECT_RW(0x840, 8));
+	gpu_write(gpu, REG_A5XX_CP_PROTECT(10), ADRENO_PROTECT_RW(0x880, 32));
+	gpu_write(gpu, REG_A5XX_CP_PROTECT(11), ADRENO_PROTECT_RW(0xAA0, 1));
+
+	/* RB */
+	gpu_write(gpu, REG_A5XX_CP_PROTECT(12), ADRENO_PROTECT_RW(0xCC0, 1));
+	gpu_write(gpu, REG_A5XX_CP_PROTECT(13), ADRENO_PROTECT_RW(0xCF0, 2));
+
+	/* VPC */
+	gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8));
+	gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 4));
+
+	/* UCHE */
+	gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
+
+	if (adreno_is_a530(adreno_gpu))
+		gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
+			ADRENO_PROTECT_RW(0x10000, 0x8000));
+
+	gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0);
+
+	gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
+		REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, SECURE_VA_START);
+	gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, SECURE_VA_SIZE);
+
+	/* Put the GPU into 64 bit by default */
+	gpu_write(gpu, REG_A5XX_CP_ADDR_MODE_CNTL, 0x1);
+	gpu_write(gpu, REG_A5XX_VSC_ADDR_MODE_CNTL, 0x1);
+	gpu_write(gpu, REG_A5XX_GRAS_ADDR_MODE_CNTL, 0x1);
+	gpu_write(gpu, REG_A5XX_RB_ADDR_MODE_CNTL, 0x1);
+	gpu_write(gpu, REG_A5XX_PC_ADDR_MODE_CNTL, 0x1);
+	gpu_write(gpu, REG_A5XX_HLSQ_ADDR_MODE_CNTL, 0x1);
+	gpu_write(gpu, REG_A5XX_VFD_ADDR_MODE_CNTL, 0x1);
+	gpu_write(gpu, REG_A5XX_VPC_ADDR_MODE_CNTL, 0x1);
+	gpu_write(gpu, REG_A5XX_UCHE_ADDR_MODE_CNTL, 0x1);
+	gpu_write(gpu, REG_A5XX_SP_ADDR_MODE_CNTL, 0x1);
+	gpu_write(gpu, REG_A5XX_TPL1_ADDR_MODE_CNTL, 0x1);
+	gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
+
+	a5xx_gpu->timestamp_counter = adreno_get_counter(gpu,
+		MSM_COUNTER_GROUP_CP, 0, NULL, NULL);
+
+	/* Get RBBM performance counter countable 6 to read GPU busy cycles */
+	a5xx_gpu->gpu_busy_counter = adreno_get_counter(gpu,
+		MSM_COUNTER_GROUP_RBBM, 6, NULL, NULL);
+
+	/* Load the GPMU firmware before starting the HW init */
+	a5xx_gpmu_ucode_init(gpu);
+
+	ret = adreno_hw_init(gpu);
+	if (ret)
+		return ret;
+
+	a5xx_preempt_hw_init(gpu);
+
+	ret = a5xx_ucode_init(gpu);
+	if (ret)
+		return ret;
+
+	/* Disable the interrupts through the initial bringup stage */
+	gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
+
+	/* Clear ME_HALT to start the micro engine */
+	gpu_write(gpu, REG_A5XX_CP_PFP_ME_CNTL, 0);
+	ret = a5xx_me_init(gpu);
+	if (ret)
+		return ret;
+
+	/*
+	 * Send a pipeline event stat to get misbehaving counters to start
+	 * ticking correctly
+	 */
+	if (adreno_is_a530(adreno_gpu)) {
+		OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1);
+		OUT_RING(gpu->rb[0], 0x0F);
+
+		gpu->funcs->flush(gpu, gpu->rb[0]);
+		if (!a5xx_idle(gpu, gpu->rb[0]))
+			return -EINVAL;
+	}
+
+	/*
+	 * If a zap shader was specified in the device tree, assume that we are
+	 * on a secure device that blocks access to the RBBM_SECVID registers
+	 * so we need to use the CP to switch out of secure mode. If a zap
+	 * shader was NOT specified then we assume we are on an unlocked device.
+	 * If we guessed wrong then the access to the register will probably
+	 * cause a XPU violation.
+	 */
+	if (test_bit(A5XX_ZAP_SHADER_LOADED, &a5xx_gpu->flags)) {
+		struct msm_ringbuffer *ring = gpu->rb[0];
+
+		OUT_PKT7(ring, CP_SET_SECURE_MODE, 1);
+		OUT_RING(ring, 0x00000000);
+
+		gpu->funcs->flush(gpu, gpu->rb[0]);
+		if (!a5xx_idle(gpu, gpu->rb[0]))
+			return -EINVAL;
+	} else {
+		/* Print a warning so if we die, we know why */
+		dev_warn_once(gpu->dev->dev,
+			"Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
+		gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+	}
+
+	/* Next, start the power */
+	ret = a5xx_power_init(gpu);
+	if (ret)
+		return ret;
+
+
+	/* Last step - yield the ringbuffer */
+	a5xx_preempt_start(gpu);
+
+	pm_qos_update_request(&gpu->pm_qos_req_dma, 501);
+
+	return 0;
+}
+
+static void a5xx_recover(struct msm_gpu *gpu)
+{
+	adreno_dump_info(gpu);
+
+	msm_gpu_snapshot(gpu, gpu->snapshot);
+
+	/* Reset the GPU so it can work again */
+	gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 1);
+	gpu_read(gpu, REG_A5XX_RBBM_SW_RESET_CMD);
+	gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 0);
+
+	adreno_recover(gpu);
+}
+
+static void a5xx_destroy(struct msm_gpu *gpu)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+	DBG("%s", gpu->name);
+
+	a5xx_preempt_fini(gpu);
+
+	if (a5xx_gpu->pm4_bo) {
+		if (a5xx_gpu->pm4_iova)
+			msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+		drm_gem_object_unreference_unlocked(a5xx_gpu->pm4_bo);
+	}
+
+	if (a5xx_gpu->pfp_bo) {
+		if (a5xx_gpu->pfp_iova)
+			msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+		drm_gem_object_unreference_unlocked(a5xx_gpu->pfp_bo);
+	}
+
+	if (a5xx_gpu->gpmu_bo) {
+		if (a5xx_gpu->gpmu_iova)
+			msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
+		drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
+	}
+
+	adreno_gpu_cleanup(adreno_gpu);
+	kfree(a5xx_gpu);
+}
+
+static inline bool _a5xx_check_idle(struct msm_gpu *gpu)
+{
+	if (gpu_read(gpu, REG_A5XX_RBBM_STATUS) & ~A5XX_RBBM_STATUS_HI_BUSY)
+		return false;
+
+	/*
+	 * Nearly every abnormality ends up pausing the GPU and triggering a
+	 * fault so we can safely just watch for this one interrupt to fire
+	 */
+	return !(gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS) &
+		A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT);
+}
+
+bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+	/* wait for CP to drain ringbuffer: */
+	if (!adreno_idle(gpu, ring))
+		return false;
+
+	if (spin_until(_a5xx_check_idle(gpu))) {
+		DRM_ERROR(
+			"%s: timeout waiting for GPU RB %d to idle: status %8.8X rptr/wptr: %4.4X/%4.4X irq %8.8X\n",
+			gpu->name, ring->id,
+			gpu_read(gpu, REG_A5XX_RBBM_STATUS),
+			gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
+			gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
+			gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS));
+
+		return false;
+	}
+
+	return true;
+}
+
+static void a5xx_cp_err_irq(struct msm_gpu *gpu)
+{
+	u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS);
+
+	if (status & A5XX_CP_INT_CP_OPCODE_ERROR) {
+		u32 val;
+
+		gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, 0);
+
+		/*
+		 * REG_A5XX_CP_PFP_STAT_DATA is indexed, and we want index 1 so
+		 * read it twice
+		 */
+
+		gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
+		val = gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
+
+		dev_err_ratelimited(gpu->dev->dev, "CP | opcode error | possible opcode=0x%8.8X\n",
+			val);
+	}
+
+	if (status & A5XX_CP_INT_CP_HW_FAULT_ERROR)
+		dev_err_ratelimited(gpu->dev->dev, "CP | HW fault | status=0x%8.8X\n",
+			gpu_read(gpu, REG_A5XX_CP_HW_FAULT));
+
+	if (status & A5XX_CP_INT_CP_DMA_ERROR)
+		dev_err_ratelimited(gpu->dev->dev, "CP | DMA error\n");
+
+	if (status & A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
+		u32 val = gpu_read(gpu, REG_A5XX_CP_PROTECT_STATUS);
+
+		dev_err_ratelimited(gpu->dev->dev,
+			"CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
+			val & (1 << 24) ? "WRITE" : "READ",
+			(val & 0xFFFFF) >> 2, val);
+	}
+
+	if (status & A5XX_CP_INT_CP_AHB_ERROR) {
+		u32 status = gpu_read(gpu, REG_A5XX_CP_AHB_FAULT);
+		const char *access[16] = { "reserved", "reserved",
+			"timestamp lo", "timestamp hi", "pfp read", "pfp write",
+			"", "", "me read", "me write", "", "", "crashdump read",
+			"crashdump write" };
+
+		dev_err_ratelimited(gpu->dev->dev,
+			"CP | AHB error | addr=%X access=%s error=%d | status=0x%8.8X\n",
+			status & 0xFFFFF, access[(status >> 24) & 0xF],
+			(status & (1 << 31)), status);
+	}
+}
+
+static void a5xx_rbbm_err_irq(struct msm_gpu *gpu, u32 status)
+{
+	if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) {
+		u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS);
+
+		dev_err_ratelimited(gpu->dev->dev,
+			"RBBM | AHB bus error | %s | addr=0x%X | ports=0x%X:0x%X\n",
+			val & (1 << 28) ? "WRITE" : "READ",
+			(val & 0xFFFFF) >> 2, (val >> 20) & 0x3,
+			(val >> 24) & 0xF);
+
+		/* Clear the error */
+		gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4));
+
+		/* Clear the interrupt */
+		gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
+			A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
+	}
+
+	if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT)
+		dev_err_ratelimited(gpu->dev->dev, "RBBM | AHB transfer timeout\n");
+
+	if (status & A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT)
+		dev_err_ratelimited(gpu->dev->dev, "RBBM | ME master split | status=0x%X\n",
+			gpu_read(gpu, REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS));
+
+	if (status & A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT)
+		dev_err_ratelimited(gpu->dev->dev, "RBBM | PFP master split | status=0x%X\n",
+			gpu_read(gpu, REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS));
+
+	if (status & A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT)
+		dev_err_ratelimited(gpu->dev->dev, "RBBM | ETS master split | status=0x%X\n",
+			gpu_read(gpu, REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS));
+
+	if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
+		dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB ASYNC overflow\n");
+
+	if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
+		dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB bus overflow\n");
+}
+
+static void a5xx_uche_err_irq(struct msm_gpu *gpu)
+{
+	uint64_t addr = (uint64_t) gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_HI);
+
+	addr |= gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_LO);
+
+	dev_err_ratelimited(gpu->dev->dev, "UCHE | Out of bounds access | addr=0x%llX\n",
+		addr);
+}
+
+static void a5xx_gpmu_err_irq(struct msm_gpu *gpu)
+{
+	dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n");
+}
+
+static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
+{
+	struct drm_device *dev = gpu->dev;
+	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
+
+	dev_err(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
+		ring ? ring->id : -1, adreno_submitted_fence(gpu, ring),
+		gpu_read(gpu, REG_A5XX_RBBM_STATUS),
+		gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
+		gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
+		gpu_read64(gpu, REG_A5XX_CP_IB1_BASE, REG_A5XX_CP_IB1_BASE_HI),
+		gpu_read(gpu, REG_A5XX_CP_IB1_BUFSZ),
+		gpu_read64(gpu, REG_A5XX_CP_IB2_BASE, REG_A5XX_CP_IB2_BASE_HI),
+		gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ));
+
+	/* Turn off the hangcheck timer to keep it from bothering us */
+	del_timer(&gpu->hangcheck_timer);
+
+	queue_work(priv->wq, &gpu->recover_work);
+}
+
+#define RBBM_ERROR_MASK \
+	(A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
+	A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
+	A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
+	A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
+	A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
+	A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
+
+static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
+{
+	u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
+
+	/*
+	 * Clear all the interrupts except for RBBM_AHB_ERROR
+	 * which needs to be cleared after the error condition
+	 * is cleared otherwise it will storm
+	 */
+	gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
+			status & ~A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
+
+	if (status & RBBM_ERROR_MASK)
+		a5xx_rbbm_err_irq(gpu, status);
+
+	if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR)
+		a5xx_cp_err_irq(gpu);
+
+	if (status & A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT)
+		a5xx_fault_detect_irq(gpu);
+
+	if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
+		a5xx_uche_err_irq(gpu);
+
+	if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
+		a5xx_gpmu_err_irq(gpu);
+
+	if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) {
+		a5xx_preempt_trigger(gpu);
+		msm_gpu_retire(gpu);
+	}
+
+	if (status & A5XX_RBBM_INT_0_MASK_CP_SW)
+		a5xx_preempt_irq(gpu);
+
+	return IRQ_HANDLED;
+}
+
+static const u32 a5xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
+	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A5XX_CP_RB_BASE),
+	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A5XX_CP_RB_BASE_HI),
+	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A5XX_CP_RB_RPTR_ADDR),
+	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
+		REG_A5XX_CP_RB_RPTR_ADDR_HI),
+	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A5XX_CP_RB_RPTR),
+	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A5XX_CP_RB_WPTR),
+	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A5XX_CP_RB_CNTL),
+};
+
+static const u32 a5xx_registers[] = {
+	0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002b,
+	0x002e, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
+	0x0097, 0x00bb, 0x03a0, 0x0464, 0x0469, 0x046f, 0x04d2, 0x04d3,
+	0x04e0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081a, 0x081f, 0x0841,
+	0x0860, 0x0860, 0x0880, 0x08a0, 0x0b00, 0x0b12, 0x0b14, 0x0b28,
+	0x0b78, 0x0b7f, 0x0bb0, 0x0bbd, 0x0bc0, 0x0bc6, 0x0bd0, 0x0c53,
+	0x0c60, 0x0c61, 0x0c80, 0x0c82, 0x0c84, 0x0c85, 0x0c90, 0x0c9b,
+	0x0ca0, 0x0ca0, 0x0cb0, 0x0cb2, 0x0cc1, 0x0cc1, 0x0cc4, 0x0cc7,
+	0x0ccc, 0x0ccc, 0x0cd0, 0x0cdb, 0x0ce0, 0x0ce5, 0x0ce8, 0x0ce8,
+	0x0cec, 0x0cf1, 0x0cfb, 0x0d0e, 0x0d10, 0x0d17, 0x0d20, 0x0d23,
+	0x0d30, 0x0d30, 0x0e40, 0x0e43, 0x0e4a, 0x0e4a, 0x0e50, 0x0e57,
+	0x0e60, 0x0e7c, 0x0e80, 0x0e8e, 0x0e90, 0x0e96, 0x0ea0, 0x0eab,
+	0x0eb0, 0x0eb2, 0x2100, 0x211e, 0x2140, 0x2145, 0x2180, 0x2185,
+	0x2500, 0x251e, 0x2540, 0x2545, 0x2580, 0x2585, 0x3000, 0x3014,
+	0x3018, 0x302c, 0x3030, 0x3030, 0x3034, 0x3036, 0x303c, 0x303d,
+	0x3040, 0x3040, 0x3042, 0x3042, 0x3049, 0x3049, 0x3058, 0x3058,
+	0x305a, 0x3061, 0x3064, 0x3068, 0x306c, 0x306d, 0x3080, 0x3088,
+	0x308b, 0x308c, 0x3090, 0x3094, 0x3098, 0x3098, 0x309c, 0x309c,
+	0x3124, 0x3124, 0x340c, 0x340c, 0x3410, 0x3410, 0x3800, 0x3801,
+	0xa800, 0xa800, 0xa820, 0xa828, 0xa840, 0xa87d, 0xa880, 0xa88d,
+	0xa890, 0xa8a3, 0xa8a8, 0xa8aa, 0xa8c0, 0xa8c3, 0xa8c6, 0xa8ca,
+	0xa8cc, 0xa8cf, 0xa8d1, 0xa8d8, 0xa8dc, 0xa8dc, 0xa8e0, 0xa8f5,
+	0xac00, 0xac06, 0xac40, 0xac47, 0xac60, 0xac62, 0xac80, 0xac82,
+	0xb800, 0xb808, 0xb80c, 0xb812, 0xb814, 0xb817, 0xb900, 0xb904,
+	0xb906, 0xb90a, 0xb90c, 0xb90f, 0xb920, 0xb924, 0xb926, 0xb92a,
+	0xb92c, 0xb92f, 0xb940, 0xb944, 0xb946, 0xb94a, 0xb94c, 0xb94f,
+	0xb960, 0xb964, 0xb966, 0xb96a, 0xb96c, 0xb96f, 0xb980, 0xb984,
+	0xb986, 0xb98a, 0xb98c, 0xb98f, 0xb9a0, 0xb9b0, 0xb9b8, 0xb9ba,
+	0xd200, 0xd23f, 0xe000, 0xe006, 0xe010, 0xe09a, 0xe0a0, 0xe0a4,
+	0xe0aa, 0xe0eb, 0xe100, 0xe105, 0xe140, 0xe147, 0xe150, 0xe187,
+	0xe1a0, 0xe1a9, 0xe1b0, 0xe1b6, 0xe1c0, 0xe1c7, 0xe1d0, 0xe1d1,
+	0xe200, 0xe201, 0xe210, 0xe21c, 0xe240, 0xe268, 0xe280, 0xe280,
+	0xe282, 0xe2a3, 0xe2a5, 0xe2c2, 0xe380, 0xe38f, 0xe3b0, 0xe3b0,
+	0xe400, 0xe405, 0xe408, 0xe4e9, 0xe4f0, 0xe4f0, 0xe800, 0xe806,
+	0xe810, 0xe89a, 0xe8a0, 0xe8a4, 0xe8aa, 0xe8eb, 0xe900, 0xe905,
+	0xe940, 0xe947, 0xe950, 0xe987, 0xe9a0, 0xe9a9, 0xe9b0, 0xe9b6,
+	0xe9c0, 0xe9c7, 0xe9d0, 0xe9d1, 0xea00, 0xea01, 0xea10, 0xea1c,
+	0xea40, 0xea68, 0xea80, 0xea80, 0xea82, 0xeaa3, 0xeaa5, 0xeac2,
+	0xeb80, 0xeb8f, 0xebb0, 0xebb0, 0xec00, 0xec05, 0xec08, 0xece9,
+	0xecf0, 0xecf0, 0xf800, 0xf807,
+	~0
+};
+
+static int a5xx_pm_resume(struct msm_gpu *gpu)
+{
+	int ret;
+
+	/* Turn on the core power */
+	ret = msm_gpu_pm_resume(gpu);
+	if (ret)
+		return ret;
+
+	/* Turn the RBCCU domain first to limit the chances of voltage droop */
+	gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
+
+	/* Wait 3 usecs before polling */
+	udelay(3);
+
+	ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS,
+		(1 << 20), (1 << 20));
+	if (ret) {
+		DRM_ERROR("%s: timeout waiting for RBCCU GDSC enable: %X\n",
+			gpu->name,
+			gpu_read(gpu, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS));
+		return ret;
+	}
+
+	/* Turn on the SP domain */
+	gpu_write(gpu, REG_A5XX_GPMU_SP_POWER_CNTL, 0x778000);
+	ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_SP_PWR_CLK_STATUS,
+		(1 << 20), (1 << 20));
+	if (ret)
+		DRM_ERROR("%s: timeout waiting for SP GDSC enable\n",
+			gpu->name);
+
+	a5xx_counters_restore(gpu);
+
+	return ret;
+}
+
+static int a5xx_pm_suspend(struct msm_gpu *gpu)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+	/* Clear the VBIF pipe before shutting down */
+	gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF);
+	spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF)
+			== 0xF);
+
+	gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
+
+	/* Save the counters before going down */
+	a5xx_counters_save(gpu);
+
+	/*
+	 * Reset the VBIF before power collapse to avoid issue with FIFO
+	 * entries
+	 */
+	if (adreno_is_a530(adreno_gpu)) {
+		/* These only need to be done for A530 */
+		gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD,
+				0x003C0000);
+		gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD,
+				0x00000000);
+	}
+
+	return msm_gpu_pm_suspend(gpu);
+}
+
+static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+	*value = adreno_read_counter(gpu, MSM_COUNTER_GROUP_CP,
+		a5xx_gpu->timestamp_counter);
+
+	return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
+{
+	seq_printf(m, "status:   %08x\n",
+			gpu_read(gpu, REG_A5XX_RBBM_STATUS));
+	adreno_show(gpu, m);
+
+}
+#endif
+
+static struct msm_ringbuffer *a5xx_active_ring(struct msm_gpu *gpu)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+	return a5xx_gpu->cur_ring;
+}
+
+static u64 a5xx_gpu_busy(struct msm_gpu *gpu)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+	return adreno_read_counter(gpu, MSM_COUNTER_GROUP_RBBM,
+		a5xx_gpu->gpu_busy_counter);
+}
+
+static const struct adreno_gpu_funcs funcs = {
+	.base = {
+		.get_param = adreno_get_param,
+		.hw_init = a5xx_hw_init,
+		.pm_suspend = a5xx_pm_suspend,
+		.pm_resume = a5xx_pm_resume,
+		.recover = a5xx_recover,
+		.submitted_fence = adreno_submitted_fence,
+		.submit = a5xx_submit,
+		.flush = a5xx_flush,
+		.active_ring = a5xx_active_ring,
+		.irq = a5xx_irq,
+		.destroy = a5xx_destroy,
+#ifdef CONFIG_DEBUG_FS
+		.show = a5xx_show,
+#endif
+		.snapshot = a5xx_snapshot,
+		.get_counter = adreno_get_counter,
+		.read_counter = adreno_read_counter,
+		.put_counter = adreno_put_counter,
+		.gpu_busy = a5xx_gpu_busy,
+	},
+	.get_timestamp = a5xx_get_timestamp,
+};
+
+/* Read the limits management leakage from the efuses */
+static void a530_efuse_leakage(struct platform_device *pdev,
+		struct adreno_gpu *adreno_gpu, void *base,
+		size_t size)
+{
+	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+	unsigned int row0, row2;
+	unsigned int leakage_pwr_on, coeff;
+
+	if (size < 0x148)
+		return;
+
+	/* Leakage */
+	row0 = readl_relaxed(base + 0x134);
+	row2 = readl_relaxed(base + 0x144);
+
+	/* Read barrier to get the previous two reads */
+	rmb();
+
+	/* Get the leakage coefficient from device tree */
+	if (of_property_read_u32(pdev->dev.of_node,
+		"qcom,base-leakage-coefficent", &coeff))
+		return;
+
+	leakage_pwr_on = ((row2 >> 2) & 0xFF) * (1 << (row0 >> 1) & 0x03);
+	a5xx_gpu->lm_leakage = (leakage_pwr_on << 16) |
+		((leakage_pwr_on * coeff) / 100);
+}
+
+/* Read the speed bin from the efuses */
+static void a530_efuse_bin(struct platform_device *pdev,
+		struct adreno_gpu *adreno_gpu, void *base,
+		size_t size)
+{
+	uint32_t speed_bin[3];
+	uint32_t val;
+
+	if (of_property_read_u32_array(pdev->dev.of_node,
+		"qcom,gpu-speed-bin", speed_bin, 3))
+		return;
+
+	if (size < speed_bin[0] + 4)
+		return;
+
+	val = readl_relaxed(base + speed_bin[0]);
+
+	adreno_gpu->speed_bin = (val & speed_bin[1]) >> speed_bin[2];
+}
+
+/* Read target specific configuration from the efuses */
+static void a5xx_efuses_read(struct platform_device *pdev,
+		struct adreno_gpu *adreno_gpu)
+{
+	struct adreno_platform_config *config = pdev->dev.platform_data;
+	const struct adreno_info *info = adreno_info(config->rev);
+	struct resource *res;
+	void *base;
+
+	/*
+	 * The adreno_gpu->revn mechanism isn't set up yet so we need to check
+	 * it directly here
+	 */
+	if (info->revn != 530)
+		return;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+		"qfprom_memory");
+	if (!res)
+		return;
+
+	base = ioremap(res->start, resource_size(res));
+	if (!base)
+		return;
+
+	a530_efuse_bin(pdev, adreno_gpu, base, resource_size(res));
+	a530_efuse_leakage(pdev, adreno_gpu, base, resource_size(res));
+
+	iounmap(base);
+}
+
+struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	struct platform_device *pdev = priv->gpu_pdev;
+	struct a5xx_gpu *a5xx_gpu = NULL;
+	struct adreno_gpu *adreno_gpu;
+	struct msm_gpu *gpu;
+	struct msm_gpu_config a5xx_config = { 0 };
+	int ret;
+
+	if (!pdev) {
+		dev_err(dev->dev, "No A5XX device is defined\n");
+		return ERR_PTR(-ENXIO);
+	}
+
+	a5xx_gpu = kzalloc(sizeof(*a5xx_gpu), GFP_KERNEL);
+	if (!a5xx_gpu)
+		return ERR_PTR(-ENOMEM);
+
+	adreno_gpu = &a5xx_gpu->base;
+	gpu = &adreno_gpu->base;
+
+	adreno_gpu->registers = a5xx_registers;
+	adreno_gpu->reg_offsets = a5xx_register_offsets;
+
+	a5xx_gpu->lm_leakage = 0x4E001A;
+
+	/* Check the efuses for some configuration */
+	a5xx_efuses_read(pdev, adreno_gpu);
+
+	a5xx_config.ioname = MSM_GPU_DEFAULT_IONAME;
+	a5xx_config.irqname = MSM_GPU_DEFAULT_IRQNAME;
+
+	/* Set the number of rings to 4 - yay preemption */
+	a5xx_config.nr_rings = 4;
+
+	/*
+	 * Set the user domain range to fall into the TTBR1 region for global
+	 * objects
+	 */
+	a5xx_config.va_start = 0xfffffff000000000ULL;
+	a5xx_config.va_end = 0xffffffffffffffffULL;
+
+	a5xx_config.secure_va_start = SECURE_VA_START;
+	a5xx_config.secure_va_end = SECURE_VA_START + SECURE_VA_SIZE - 1;
+
+	ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, &a5xx_config);
+	if (ret) {
+		a5xx_destroy(&(a5xx_gpu->base.base));
+		return ERR_PTR(ret);
+	}
+
+	/* Set up the preemption specific bits and pieces for each ringbuffer */
+	a5xx_preempt_init(gpu);
+
+	a5xx_counters_init(adreno_gpu);
+
+	return gpu;
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/adreno/a5xx_gpu.h	2019-01-22 16:16:23.483246225 +0100
@@ -0,0 +1,200 @@
+/* Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __A5XX_GPU_H__
+#define __A5XX_GPU_H__
+
+#include "adreno_gpu.h"
+
+/* Bringing over the hack from the previous targets */
+#undef ROP_COPY
+#undef ROP_XOR
+
+#include "a5xx.xml.h"
+
+enum {
+	A5XX_ZAP_SHADER_LOADED = 1,
+	A5XX_HWCG_ENABLED = 2,
+};
+
+struct a5xx_gpu {
+	unsigned long flags;
+
+	struct adreno_gpu base;
+
+	struct drm_gem_object *pm4_bo;
+	uint64_t pm4_iova;
+
+	struct drm_gem_object *pfp_bo;
+	uint64_t pfp_iova;
+
+	struct drm_gem_object *gpmu_bo;
+	uint64_t gpmu_iova;
+	uint32_t gpmu_dwords;
+
+	uint32_t lm_leakage;
+
+	struct msm_ringbuffer *cur_ring;
+	struct msm_ringbuffer *next_ring;
+
+	struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS];
+	struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
+	uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
+
+	atomic_t preempt_state;
+	struct timer_list preempt_timer;
+
+	struct a5xx_smmu_info *smmu_info;
+	struct drm_gem_object *smmu_info_bo;
+	uint64_t smmu_info_iova;
+
+	int timestamp_counter;
+	int gpu_busy_counter;
+};
+
+#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
+
+/*
+ * In order to do lockless preemption we use a simple state machine to progress
+ * through the process.
+ *
+ * PREEMPT_NONE - no preemption in progress.  Next state START.
+ * PREEMPT_START - The trigger is evaulating if preemption is possible. Next
+ * states: TRIGGERED, NONE
+ * PREEMPT_ABORT - An intermediate state before moving back to NONE. Next
+ * state: NONE.
+ * PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next
+ * states: FAULTED, PENDING
+ * PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger
+ * recovery.  Next state: N/A
+ * PREEMPT_PENDING: Preemption complete interrupt fired - the callback is
+ * checking the success of the operation. Next state: FAULTED, NONE.
+ */
+
+enum preempt_state {
+	PREEMPT_NONE = 0,
+	PREEMPT_START,
+	PREEMPT_ABORT,
+	PREEMPT_TRIGGERED,
+	PREEMPT_FAULTED,
+	PREEMPT_PENDING,
+};
+
+/*
+ * struct a5xx_preempt_record is a shared buffer between the microcode and the
+ * CPU to store the state for preemption. The record itself is much larger
+ * (64k) but most of that is used by the CP for storage.
+ *
+ * There is a preemption record assigned per ringbuffer. When the CPU triggers a
+ * preemption, it fills out the record with the useful information (wptr, ring
+ * base, etc) and the microcode uses that information to set up the CP following
+ * the preemption.  When a ring is switched out, the CP will save the ringbuffer
+ * state back to the record. In this way, once the records are properly set up
+ * the CPU can quickly switch back and forth between ringbuffers by only
+ * updating a few registers (often only the wptr).
+ *
+ * These are the CPU aware registers in the record:
+ * @magic: Must always be 0x27C4BAFC
+ * @info: Type of the record - written 0 by the CPU, updated by the CP
+ * @data: Data field from SET_RENDER_MODE or a checkpoint. Written and used by
+ * the CP
+ * @cntl: Value of RB_CNTL written by CPU, save/restored by CP
+ * @rptr: Value of RB_RPTR written by CPU, save/restored by CP
+ * @wptr: Value of RB_WPTR written by CPU, save/restored by CP
+ * @rptr_addr: Value of RB_RPTR_ADDR written by CPU, save/restored by CP
+ * @rbase: Value of RB_BASE written by CPU, save/restored by CP
+ * @counter: GPU address of the storage area for the performance counters
+ */
+struct a5xx_preempt_record {
+	uint32_t magic;
+	uint32_t info;
+	uint32_t data;
+	uint32_t cntl;
+	uint32_t rptr;
+	uint32_t wptr;
+	uint64_t rptr_addr;
+	uint64_t rbase;
+	uint64_t counter;
+};
+
+/* Magic identifier for the preemption record */
+#define A5XX_PREEMPT_RECORD_MAGIC 0x27C4BAFCUL
+
+/*
+ * Even though the structure above is only a few bytes, we need a full 64k to
+ * store the entire preemption record from the CP
+ */
+#define A5XX_PREEMPT_RECORD_SIZE (64 * 1024)
+
+/*
+ * The preemption counter block is a storage area for the value of the
+ * preemption counters that are saved immediately before context switch. We
+ * append it on to the end of the allocadtion for the preemption record.
+ */
+#define A5XX_PREEMPT_COUNTER_SIZE (16 * 4)
+
+/*
+ * This is a global structure that the preemption code uses to switch in the
+ * pagetable for the preempted process - the code switches in whatever we
+ * after preempting in a new ring.
+ */
+struct a5xx_smmu_info {
+	uint32_t  magic;
+	uint32_t  _pad4;
+	uint64_t  ttbr0;
+	uint32_t  asid;
+	uint32_t  contextidr;
+};
+
+#define A5XX_SMMU_INFO_MAGIC 0x3618CDA3UL
+
+int a5xx_power_init(struct msm_gpu *gpu);
+void a5xx_gpmu_ucode_init(struct msm_gpu *gpu);
+
+static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
+		uint32_t reg, uint32_t mask, uint32_t value)
+{
+	while (usecs--) {
+		udelay(1);
+		if ((gpu_read(gpu, reg) & mask) == value)
+			return 0;
+		cpu_relax();
+	}
+
+	return -ETIMEDOUT;
+}
+
+void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
+bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+
+void a5xx_preempt_init(struct msm_gpu *gpu);
+void a5xx_preempt_hw_init(struct msm_gpu *gpu);
+void a5xx_preempt_trigger(struct msm_gpu *gpu);
+void a5xx_preempt_irq(struct msm_gpu *gpu);
+void a5xx_preempt_fini(struct msm_gpu *gpu);
+
+int a5xx_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
+
+/* Return true if we are in a preempt state */
+static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu)
+{
+	int preempt_state = atomic_read(&a5xx_gpu->preempt_state);
+
+	return !(preempt_state == PREEMPT_NONE ||
+			preempt_state == PREEMPT_ABORT);
+}
+
+int a5xx_counters_init(struct adreno_gpu *adreno_gpu);
+void a5xx_counters_save(struct msm_gpu *gpu);
+void a5xx_counters_restore(struct msm_gpu *gpu);
+
+#endif /* __A5XX_GPU_H__ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/adreno/a5xx_power.c	2019-01-22 16:16:23.483246225 +0100
@@ -0,0 +1,491 @@
+/* Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/pm_opp.h>
+#include "a5xx_gpu.h"
+
+/*
+ * The GPMU data block is a block of shared registers that can be used to
+ * communicate back and forth. These "registers" are by convention with the GPMU
+ * firwmare and not bound to any specific hardware design
+ */
+
+#define AGC_INIT_BASE REG_A5XX_GPMU_DATA_RAM_BASE
+#define AGC_INIT_MSG_MAGIC (AGC_INIT_BASE + 5)
+#define AGC_MSG_BASE (AGC_INIT_BASE + 7)
+
+#define AGC_MSG_STATE (AGC_MSG_BASE + 0)
+#define AGC_MSG_COMMAND (AGC_MSG_BASE + 1)
+#define AGC_MSG_PAYLOAD_SIZE (AGC_MSG_BASE + 3)
+#define AGC_MSG_PAYLOAD(_o) ((AGC_MSG_BASE + 5) + (_o))
+
+#define AGC_POWER_CONFIG_PRODUCTION_ID 1
+#define AGC_INIT_MSG_VALUE 0xBABEFACE
+
+/* AGC_LM_CONFIG (A540+) */
+#define AGC_LM_CONFIG (136/4)
+#define AGC_LM_CONFIG_GPU_VERSION_SHIFT 17
+#define AGC_LM_CONFIG_ENABLE_GPMU_ADAPTIVE 1
+#define AGC_LM_CONFIG_THROTTLE_DISABLE (2 << 8)
+#define AGC_LM_CONFIG_ISENSE_ENABLE (1 << 4)
+#define AGC_LM_CONFIG_ENABLE_ERROR (3 << 4)
+#define AGC_LM_CONFIG_LLM_ENABLED (1 << 16)
+#define AGC_LM_CONFIG_BCL_DISABLED (1 << 24)
+
+#define AGC_LEVEL_CONFIG (140/4)
+
+static struct {
+	uint32_t reg;
+	uint32_t value;
+} a5xx_sequence_regs[] = {
+	{ 0xB9A1, 0x00010303 },
+	{ 0xB9A2, 0x13000000 },
+	{ 0xB9A3, 0x00460020 },
+	{ 0xB9A4, 0x10000000 },
+	{ 0xB9A5, 0x040A1707 },
+	{ 0xB9A6, 0x00010000 },
+	{ 0xB9A7, 0x0E000904 },
+	{ 0xB9A8, 0x10000000 },
+	{ 0xB9A9, 0x01165000 },
+	{ 0xB9AA, 0x000E0002 },
+	{ 0xB9AB, 0x03884141 },
+	{ 0xB9AC, 0x10000840 },
+	{ 0xB9AD, 0x572A5000 },
+	{ 0xB9AE, 0x00000003 },
+	{ 0xB9AF, 0x00000000 },
+	{ 0xB9B0, 0x10000000 },
+	{ 0xB828, 0x6C204010 },
+	{ 0xB829, 0x6C204011 },
+	{ 0xB82A, 0x6C204012 },
+	{ 0xB82B, 0x6C204013 },
+	{ 0xB82C, 0x6C204014 },
+	{ 0xB90F, 0x00000004 },
+	{ 0xB910, 0x00000002 },
+	{ 0xB911, 0x00000002 },
+	{ 0xB912, 0x00000002 },
+	{ 0xB913, 0x00000002 },
+	{ 0xB92F, 0x00000004 },
+	{ 0xB930, 0x00000005 },
+	{ 0xB931, 0x00000005 },
+	{ 0xB932, 0x00000005 },
+	{ 0xB933, 0x00000005 },
+	{ 0xB96F, 0x00000001 },
+	{ 0xB970, 0x00000003 },
+	{ 0xB94F, 0x00000004 },
+	{ 0xB950, 0x0000000B },
+	{ 0xB951, 0x0000000B },
+	{ 0xB952, 0x0000000B },
+	{ 0xB953, 0x0000000B },
+	{ 0xB907, 0x00000019 },
+	{ 0xB927, 0x00000019 },
+	{ 0xB947, 0x00000019 },
+	{ 0xB967, 0x00000019 },
+	{ 0xB987, 0x00000019 },
+	{ 0xB906, 0x00220001 },
+	{ 0xB926, 0x00220001 },
+	{ 0xB946, 0x00220001 },
+	{ 0xB966, 0x00220001 },
+	{ 0xB986, 0x00300000 },
+	{ 0xAC40, 0x0340FF41 },
+	{ 0xAC41, 0x03BEFED0 },
+	{ 0xAC42, 0x00331FED },
+	{ 0xAC43, 0x021FFDD3 },
+	{ 0xAC44, 0x5555AAAA },
+	{ 0xAC45, 0x5555AAAA },
+	{ 0xB9BA, 0x00000008 },
+};
+
+/*
+ * Get the actual voltage value for the operating point at the specified
+ * frequency
+ */
+static inline uint32_t _get_mvolts(struct msm_gpu *gpu, uint32_t freq)
+{
+	struct drm_device *dev = gpu->dev;
+	struct msm_drm_private *priv = dev->dev_private;
+	struct platform_device *pdev = priv->gpu_pdev;
+	struct dev_pm_opp *opp;
+
+	opp = dev_pm_opp_find_freq_exact(&pdev->dev, freq, true);
+
+	return (!IS_ERR(opp)) ? dev_pm_opp_get_voltage(opp) / 1000 : 0;
+}
+
+#define PAYLOAD_SIZE(_size) ((_size) * sizeof(u32))
+#define LM_DCVS_LIMIT 1
+#define LEVEL_CONFIG ~(0x303)
+
+/* Setup thermal limit management for A540 */
+static void a540_lm_setup(struct msm_gpu *gpu)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	u32 max_power = 0;
+	u32 rate = gpu->gpufreq[0];
+	u32 config;
+
+	/* The battery current limiter isn't enabled for A540 */
+	config = AGC_LM_CONFIG_BCL_DISABLED;
+	config |= adreno_gpu->rev.patchid << AGC_LM_CONFIG_GPU_VERSION_SHIFT;
+
+	/* For now disable GPMU side throttling */
+	config |= AGC_LM_CONFIG_THROTTLE_DISABLE;
+
+	/* Get the max-power from the device tree */
+	of_property_read_u32(GPU_OF_NODE(gpu), "qcom,lm-max-power", &max_power);
+
+	gpu_write(gpu, AGC_MSG_STATE, 0x80000001);
+	gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
+
+	gpu_write(gpu, AGC_MSG_PAYLOAD(0), max_power);
+	gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
+
+	gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, rate));
+	gpu_write(gpu, AGC_MSG_PAYLOAD(3), rate / 1000000);
+
+	gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LM_CONFIG), config);
+	gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LEVEL_CONFIG), LEVEL_CONFIG);
+	gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE,
+		PAYLOAD_SIZE(AGC_LEVEL_CONFIG + 1));
+
+	gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
+}
+
+/* Setup thermal limit management for A530 */
+static void a530_lm_setup(struct msm_gpu *gpu)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+	uint32_t rate = gpu->gpufreq[0];
+	uint32_t tsens = 0;
+	uint32_t max_power = 0;
+	unsigned int i;
+
+	/* Write the block of sequence registers */
+	for (i = 0; i < ARRAY_SIZE(a5xx_sequence_regs); i++)
+		gpu_write(gpu, a5xx_sequence_regs[i].reg,
+			a5xx_sequence_regs[i].value);
+
+	of_property_read_u32(GPU_OF_NODE(gpu), "qcom,gpmu-tsens", &tsens);
+
+	gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_ID, tsens);
+	gpu_write(gpu, REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD, 0x01);
+	gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_CONFIG, 0x01);
+
+	gpu_write(gpu, REG_A5XX_GPMU_BASE_LEAKAGE, a5xx_gpu->lm_leakage);
+
+	gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
+	gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x00201FF1);
+
+	/* Write the voltage table */
+
+	/* Get the max-power from the device tree */
+	of_property_read_u32(GPU_OF_NODE(gpu), "qcom,lm-max-power", &max_power);
+
+	gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
+	gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x201FF1);
+
+	gpu_write(gpu, AGC_MSG_STATE, 1);
+	gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
+
+	gpu_write(gpu, AGC_MSG_PAYLOAD(0), max_power);
+	gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
+
+	gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, rate));
+	gpu_write(gpu, AGC_MSG_PAYLOAD(3), rate / 1000000);
+
+	gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE, 4 * sizeof(uint32_t));
+	gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
+}
+
+/* Enable SP/TP cpower collapse */
+static void a5xx_pc_init(struct msm_gpu *gpu)
+{
+	gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL, 0x7F);
+	gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_BINNING_CTRL, 0);
+	gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST, 0xA0080);
+	gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY, 0x600040);
+}
+
+/* Enable the GPMU microcontroller */
+static int a5xx_gpmu_init(struct msm_gpu *gpu)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+	struct msm_ringbuffer *ring = gpu->rb[0];
+
+	if (!a5xx_gpu->gpmu_dwords)
+		return 0;
+
+	/* Turn off protected mode for this operation */
+	OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+	OUT_RING(ring, 0);
+
+	/* Kick off the IB to load the GPMU microcode */
+	OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+	OUT_RING(ring, lower_32_bits(a5xx_gpu->gpmu_iova));
+	OUT_RING(ring, upper_32_bits(a5xx_gpu->gpmu_iova));
+	OUT_RING(ring, a5xx_gpu->gpmu_dwords);
+
+	/* Turn back on protected mode */
+	OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+	OUT_RING(ring, 1);
+
+	gpu->funcs->flush(gpu, ring);
+
+	/* This is "fatal" because the CP is left in a bad state */
+	if (!a5xx_idle(gpu, ring)) {
+		DRM_ERROR("%s: Unable to load GPMU firmwaren",
+			gpu->name);
+		return -EINVAL;
+	}
+
+	/* Clock gating setup for A530 targets */
+	if (adreno_is_a530(adreno_gpu))
+		gpu_write(gpu, REG_A5XX_GPMU_WFI_CONFIG, 0x4014);
+
+	/* Kick off the GPMU */
+	gpu_write(gpu, REG_A5XX_GPMU_CM3_SYSRESET, 0x0);
+
+	/*
+	 * Wait for the GPMU to respond. It isn't fatal if it doesn't, we just
+	 * won't have advanced power collapse.
+	 */
+	if (spin_usecs(gpu, 25, REG_A5XX_GPMU_GENERAL_0, 0xFFFFFFFF,
+		0xBABEFACE)) {
+		DRM_ERROR("%s: GPMU firmware initialization timed out\n",
+			gpu->name);
+		return 0;
+	}
+
+	if (!adreno_is_a530(adreno_gpu)) {
+		u32 val = gpu_read(gpu, REG_A5XX_GPMU_GENERAL_1);
+
+		if (val)
+			DRM_ERROR("%s: GPMU firmare initialization failed: %d\n",
+				gpu->name, val);
+	}
+
+	/* FIXME: Clear GPMU interrupts? */
+	return 0;
+}
+
+/* Enable limits management */
+static void a5xx_lm_enable(struct msm_gpu *gpu)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+	/* This init sequence only applies to A530 */
+	if (!adreno_is_a530(adreno_gpu))
+		return;
+
+	gpu_write(gpu, REG_A5XX_GDPM_INT_MASK, 0x0);
+	gpu_write(gpu, REG_A5XX_GDPM_INT_EN, 0x0A);
+	gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK, 0x01);
+	gpu_write(gpu, REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK, 0x50000);
+	gpu_write(gpu, REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL, 0x30000);
+
+	gpu_write(gpu, REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL, 0x011);
+}
+
+int a5xx_power_init(struct msm_gpu *gpu)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	int ret;
+	u32 lm_limit = 6000;
+
+	/*
+	 * Set up the limit management
+	 * first, do some generic setup:
+	 */
+	gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0);
+
+	of_property_read_u32(GPU_OF_NODE(gpu), "qcom,lm-limit", &lm_limit);
+	gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | lm_limit);
+
+	/* Now do the target specific setup */
+	if (adreno_is_a530(adreno_gpu))
+		a530_lm_setup(gpu);
+	else
+		a540_lm_setup(gpu);
+
+	/* Set up SP/TP power collpase */
+	a5xx_pc_init(gpu);
+
+	/* Start the GPMU */
+	ret = a5xx_gpmu_init(gpu);
+	if (ret)
+		return ret;
+
+	/* Start the limits management */
+	a5xx_lm_enable(gpu);
+
+	return 0;
+}
+
+static int _read_header(unsigned int *data, uint32_t fwsize,
+		unsigned int *major, unsigned int *minor)
+{
+	uint32_t size;
+	unsigned int i;
+
+	/* First dword of the header is the header size */
+	if (fwsize < 4)
+		return -EINVAL;
+
+	size = data[0];
+
+	/* Make sure the header isn't too big and is a multiple of two */
+	if ((size % 2) || (size > 10) || size > (fwsize >> 2))
+		return -EINVAL;
+
+	/* Read the values in pairs */
+	for (i = 1; i < size; i += 2) {
+		switch (data[i]) {
+		case 1:
+			*major = data[i + 1];
+			break;
+		case 2:
+			*minor = data[i + 1];
+			break;
+		default:
+			/* Invalid values are non fatal */
+			break;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Make sure cur_major and cur_minor are greater than or equal to the minimum
+ * allowable major/minor
+ */
+static inline bool _check_gpmu_version(uint32_t cur_major, uint32_t cur_minor,
+		uint32_t min_major, uint32_t min_minor)
+{
+	return ((cur_major > min_major) ||
+		((cur_major == min_major) && (cur_minor >= min_minor)));
+}
+
+void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+	struct drm_device *drm = gpu->dev;
+	const char *name;
+	const struct firmware *fw;
+	uint32_t version[2] = { 0, 0 };
+	uint32_t dwords = 0, offset = 0;
+	uint32_t major = 0, minor = 0, bosize;
+	unsigned int *data, *ptr, *cmds;
+	unsigned int cmds_size;
+
+	if (a5xx_gpu->gpmu_bo)
+		return;
+
+	/*
+	 * Read the firmware name from the device tree - if it doesn't exist
+	 * then don't initialize the GPMU for this target
+	 */
+	if (of_property_read_string(GPU_OF_NODE(gpu), "qcom,gpmu-firmware",
+		&name))
+		return;
+
+	/*
+	 * The version isn't mandatory, but if it exists, we need to enforce
+	 * that the version of the GPMU firmware matches or is newer than the
+	 * value
+	 */
+	of_property_read_u32_array(GPU_OF_NODE(gpu), "qcom,gpmu-version",
+		version, 2);
+
+	/* Get the firmware */
+	if (request_firmware(&fw, name, drm->dev)) {
+		DRM_ERROR("%s: Could not get GPMU firmware. GPMU will not be active\n",
+			gpu->name);
+		return;
+	}
+
+	data = (unsigned int *) fw->data;
+
+	/*
+	 * The first dword is the size of the remaining data in dwords. Use it
+	 * as a checksum of sorts and make sure it matches the actual size of
+	 * the firmware that we read
+	 */
+
+	if (fw->size < 8 || (data[0] < 2) || (data[0] >= (fw->size >> 2)))
+		goto out;
+
+	/* The second dword is an ID - look for 2 (GPMU_FIRMWARE_ID) */
+	if (data[1] != 2)
+		goto out;
+
+	/* Read the header and get the major/minor of the read firmware */
+	if (_read_header(&data[2], fw->size - 8, &major, &minor))
+		goto out;
+
+	if (!_check_gpmu_version(major, minor, version[0], version[1])) {
+		DRM_ERROR("%s: Loaded GPMU version %d.%d is too old\n",
+			gpu->name, major, minor);
+		goto out;
+	}
+
+	cmds = data + data[2] + 3;
+	cmds_size = data[0] - data[2] - 2;
+
+	/*
+	 * A single type4 opcode can only have so many values attached so
+	 * add enough opcodes to load the all the commands
+	 */
+	bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
+
+	ptr = msm_gem_kernel_new(drm, bosize,
+		MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace,
+		&a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
+	if (IS_ERR(ptr))
+		goto err;
+
+	while (cmds_size > 0) {
+		int i;
+		uint32_t _size = cmds_size > TYPE4_MAX_PAYLOAD ?
+			TYPE4_MAX_PAYLOAD : cmds_size;
+
+		ptr[dwords++] = PKT4(REG_A5XX_GPMU_INST_RAM_BASE + offset,
+			_size);
+
+		for (i = 0; i < _size; i++)
+			ptr[dwords++] = *cmds++;
+
+		offset += _size;
+		cmds_size -= _size;
+	}
+
+	a5xx_gpu->gpmu_dwords = dwords;
+
+	goto out;
+
+err:
+	if (a5xx_gpu->gpmu_iova)
+		msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
+	if (a5xx_gpu->gpmu_bo)
+		drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
+
+	a5xx_gpu->gpmu_bo = NULL;
+	a5xx_gpu->gpmu_iova = 0;
+	a5xx_gpu->gpmu_dwords = 0;
+
+out:
+	/* No need to keep that firmware laying around anymore */
+	release_firmware(fw);
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/adreno/a5xx_preempt.c	2019-01-22 16:16:23.483246225 +0100
@@ -0,0 +1,359 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_gem.h"
+#include "msm_iommu.h"
+#include "a5xx_gpu.h"
+
+/*
+ * Try to transition the preemption state from old to new. Return
+ * true on success or false if the original state wasn't 'old'
+ */
+static inline bool try_preempt_state(struct a5xx_gpu *a5xx_gpu,
+		enum preempt_state old, enum preempt_state new)
+{
+	enum preempt_state cur = atomic_cmpxchg(&a5xx_gpu->preempt_state,
+		old, new);
+
+	return (cur == old);
+}
+
+/*
+ * Force the preemption state to the specified state.  This is used in cases
+ * where the current state is known and won't change
+ */
+static inline void set_preempt_state(struct a5xx_gpu *gpu,
+		enum preempt_state new)
+{
+	/*
+	 * preempt_state may be read by other cores trying to trigger a
+	 * preemption or in the interrupt handler so barriers are needed
+	 * before...
+	 */
+	smp_mb__before_atomic();
+	atomic_set(&gpu->preempt_state, new);
+	/* ... and after*/
+	smp_mb__after_atomic();
+}
+
+/* Write the most recent wptr for the given ring into the hardware */
+static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+	unsigned long flags;
+	uint32_t wptr;
+
+	if (!ring)
+		return;
+
+	spin_lock_irqsave(&ring->lock, flags);
+	wptr = get_wptr(ring);
+	spin_unlock_irqrestore(&ring->lock, flags);
+
+	gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
+}
+
+/* Return the highest priority ringbuffer with something in it */
+static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
+{
+	unsigned long flags;
+	int i;
+
+	/*
+	 * Find the highest prority ringbuffer that isn't empty and jump
+	 * to it (0 being the highest and gpu->nr_rings - 1 being the
+	 * lowest)
+	 */
+	for (i = 0; i < gpu->nr_rings; i++) {
+		bool empty;
+		struct msm_ringbuffer *ring = gpu->rb[i];
+
+		spin_lock_irqsave(&ring->lock, flags);
+		empty = (get_wptr(ring) == ring->memptrs->rptr);
+		spin_unlock_irqrestore(&ring->lock, flags);
+
+		if (!empty)
+			return ring;
+	}
+
+	return NULL;
+}
+
+static void a5xx_preempt_timer(unsigned long data)
+{
+	struct a5xx_gpu *a5xx_gpu = (struct a5xx_gpu *) data;
+	struct msm_gpu *gpu = &a5xx_gpu->base.base;
+	struct drm_device *dev = gpu->dev;
+	struct msm_drm_private *priv = dev->dev_private;
+
+	if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED))
+		return;
+
+	dev_err(dev->dev, "%s: preemption timed out\n", gpu->name);
+	queue_work(priv->wq, &gpu->recover_work);
+}
+
+/* Try to trigger a preemption switch */
+void a5xx_preempt_trigger(struct msm_gpu *gpu)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+	unsigned long flags;
+	struct msm_ringbuffer *ring;
+
+	if (gpu->nr_rings == 1)
+		return;
+
+	/*
+	 * Try to start preemption by moving from NONE to START. If
+	 * unsuccessful, a preemption is already in flight
+	 */
+	if (!try_preempt_state(a5xx_gpu, PREEMPT_NONE, PREEMPT_START))
+		return;
+
+	/* Get the next ring to preempt to */
+	ring = get_next_ring(gpu);
+
+	/*
+	 * If no ring is populated or the highest priority ring is the current
+	 * one do nothing except to update the wptr to the latest and greatest
+	 */
+	if (!ring || (a5xx_gpu->cur_ring == ring)) {
+		/*
+		 * Its possible that while a preemption request is in progress
+		 * from an irq context, a user context trying to submit might
+		 * fail to update the write pointer, because it determines
+		 * that the preempt state is not PREEMPT_NONE.
+		 *
+		 * Close the race by introducing an intermediate
+		 * state PREEMPT_ABORT to let the submit path
+		 * know that the ringbuffer is not going to change
+		 * and can safely update the write pointer.
+		 */
+
+		set_preempt_state(a5xx_gpu, PREEMPT_ABORT);
+		update_wptr(gpu, a5xx_gpu->cur_ring);
+		set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+		return;
+	}
+
+	/* Make sure the wptr doesn't update while we're in motion */
+	spin_lock_irqsave(&ring->lock, flags);
+	a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring);
+	spin_unlock_irqrestore(&ring->lock, flags);
+
+	/* Do read barrier to make sure we have updated pagetable info */
+	rmb();
+
+	/* Set the SMMU info for the preemption */
+	if (a5xx_gpu->smmu_info) {
+		a5xx_gpu->smmu_info->ttbr0 = ring->memptrs->ttbr0;
+		a5xx_gpu->smmu_info->contextidr = ring->memptrs->contextidr;
+	}
+
+	/* Set the address of the incoming preemption record */
+	gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO,
+		REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI,
+		a5xx_gpu->preempt_iova[ring->id]);
+
+	a5xx_gpu->next_ring = ring;
+
+	/* Start a timer to catch a stuck preemption */
+	mod_timer(&a5xx_gpu->preempt_timer, jiffies + msecs_to_jiffies(10000));
+
+	/* Set the preemption state to triggered */
+	set_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED);
+
+	/* Make sure everything is written before hitting the button */
+	wmb();
+
+	/* And actually start the preemption */
+	gpu_write(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL, 1);
+}
+
+void a5xx_preempt_irq(struct msm_gpu *gpu)
+{
+	uint32_t status;
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+	struct drm_device *dev = gpu->dev;
+	struct msm_drm_private *priv = dev->dev_private;
+
+	if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_PENDING))
+		return;
+
+	/* Delete the preemption watchdog timer */
+	del_timer(&a5xx_gpu->preempt_timer);
+
+	/*
+	 * The hardware should be setting CP_CONTEXT_SWITCH_CNTL to zero before
+	 * firing the interrupt, but there is a non zero chance of a hardware
+	 * condition or a software race that could set it again before we have a
+	 * chance to finish. If that happens, log and go for recovery
+	 */
+	status = gpu_read(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL);
+	if (unlikely(status)) {
+		set_preempt_state(a5xx_gpu, PREEMPT_FAULTED);
+		dev_err(dev->dev, "%s: Preemption failed to complete\n",
+			gpu->name);
+		queue_work(priv->wq, &gpu->recover_work);
+		return;
+	}
+
+	a5xx_gpu->cur_ring = a5xx_gpu->next_ring;
+	a5xx_gpu->next_ring = NULL;
+
+	update_wptr(gpu, a5xx_gpu->cur_ring);
+
+	set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+}
+
+void a5xx_preempt_hw_init(struct msm_gpu *gpu)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+	struct msm_ringbuffer *ring;
+	int i;
+
+	if (gpu->nr_rings > 1) {
+		/* Clear the preemption records */
+		FOR_EACH_RING(gpu, ring, i) {
+			if (ring) {
+				a5xx_gpu->preempt[ring->id]->wptr = 0;
+				a5xx_gpu->preempt[ring->id]->rptr = 0;
+				a5xx_gpu->preempt[ring->id]->rbase = ring->iova;
+			}
+		}
+	}
+
+	/* Tell the CP where to find the smmu_info buffer */
+	gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
+		REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI,
+		a5xx_gpu->smmu_info_iova);
+
+	/* Reset the preemption state */
+	set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+
+	/* Always come up on rb 0 */
+	a5xx_gpu->cur_ring = gpu->rb[0];
+}
+
+static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
+		struct msm_ringbuffer *ring)
+{
+	struct adreno_gpu *adreno_gpu = &a5xx_gpu->base;
+	struct msm_gpu *gpu = &adreno_gpu->base;
+	struct a5xx_preempt_record *ptr;
+	struct drm_gem_object *bo;
+	u64 iova;
+
+	ptr = msm_gem_kernel_new(gpu->dev,
+		A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE,
+		MSM_BO_UNCACHED | MSM_BO_PRIVILEGED,
+		gpu->aspace, &bo, &iova);
+
+	if (IS_ERR(ptr))
+		return PTR_ERR(ptr);
+
+	a5xx_gpu->preempt_bo[ring->id] = bo;
+	a5xx_gpu->preempt_iova[ring->id] = iova;
+	a5xx_gpu->preempt[ring->id] = ptr;
+
+	/* Set up the defaults on the preemption record */
+
+	ptr->magic = A5XX_PREEMPT_RECORD_MAGIC;
+	ptr->info = 0;
+	ptr->data = 0;
+	ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT;
+	ptr->rptr_addr = rbmemptr(ring, rptr);
+	ptr->counter = iova + A5XX_PREEMPT_RECORD_SIZE;
+
+	return 0;
+}
+
+void a5xx_preempt_fini(struct msm_gpu *gpu)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+	struct msm_ringbuffer *ring;
+	int i;
+
+	FOR_EACH_RING(gpu, ring, i) {
+		if (!ring || !a5xx_gpu->preempt_bo[i])
+			continue;
+
+		if (a5xx_gpu->preempt_iova[i])
+			msm_gem_put_iova(a5xx_gpu->preempt_bo[i], gpu->aspace);
+
+		drm_gem_object_unreference_unlocked(a5xx_gpu->preempt_bo[i]);
+
+		a5xx_gpu->preempt_bo[i] = NULL;
+	}
+
+	if (a5xx_gpu->smmu_info_bo) {
+		if (a5xx_gpu->smmu_info_iova)
+			msm_gem_put_iova(a5xx_gpu->smmu_info_bo, gpu->aspace);
+		drm_gem_object_unreference_unlocked(a5xx_gpu->smmu_info_bo);
+		a5xx_gpu->smmu_info_bo = NULL;
+	}
+}
+
+void a5xx_preempt_init(struct msm_gpu *gpu)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+	struct msm_ringbuffer *ring;
+	struct a5xx_smmu_info *ptr;
+	struct drm_gem_object *bo;
+	uint64_t iova;
+	int i;
+
+	/* No preemption if we only have one ring */
+	if (gpu->nr_rings <= 1)
+		return;
+
+	FOR_EACH_RING(gpu, ring, i) {
+		if (!ring)
+			continue;
+
+		if (preempt_init_ring(a5xx_gpu, ring))
+			goto fail;
+	}
+
+	if (msm_iommu_allow_dynamic(gpu->aspace->mmu)) {
+		ptr = msm_gem_kernel_new(gpu->dev,
+			sizeof(struct a5xx_smmu_info),
+			MSM_BO_UNCACHED | MSM_BO_PRIVILEGED,
+			gpu->aspace, &bo, &iova);
+
+		if (IS_ERR(ptr))
+			goto fail;
+
+		ptr->magic = A5XX_SMMU_INFO_MAGIC;
+
+		a5xx_gpu->smmu_info_bo = bo;
+		a5xx_gpu->smmu_info_iova = iova;
+		a5xx_gpu->smmu_info = ptr;
+	}
+
+	setup_timer(&a5xx_gpu->preempt_timer, a5xx_preempt_timer,
+		(unsigned long) a5xx_gpu);
+
+	return;
+fail:
+	/*
+	 * On any failure our adventure is over. Clean up and
+	 * set nr_rings to 1 to force preemption off
+	 */
+	a5xx_preempt_fini(gpu);
+	gpu->nr_rings = 1;
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c	2019-01-22 16:16:23.483246225 +0100
@@ -0,0 +1,815 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_gpu.h"
+#include "msm_gem.h"
+#include "a5xx_gpu.h"
+#include "msm_snapshot_api.h"
+
+#define A5XX_NR_SHADER_BANKS 4
+
+/*
+ * These are a list of the registers that need to be read through the HLSQ
+ * aperture through the crashdumper.  These are not nominally accessible from
+ * the CPU on a secure platform.
+ */
+static const struct {
+	u32 type;
+	u32 regoffset;
+	u32 count;
+} a5xx_hlsq_aperture_regs[] = {
+	{ 0x35, 0xE00, 0x32 },   /* HSLQ non-context */
+	{ 0x31, 0x2080, 0x1 },   /* HLSQ 2D context 0 */
+	{ 0x33, 0x2480, 0x1 },   /* HLSQ 2D context 1 */
+	{ 0x32, 0xE780, 0x62 },  /* HLSQ 3D context 0 */
+	{ 0x34, 0xEF80, 0x62 },  /* HLSQ 3D context 1 */
+	{ 0x3f, 0x0EC0, 0x40 },  /* SP non-context */
+	{ 0x3d, 0x2040, 0x1 },   /* SP 2D context 0 */
+	{ 0x3b, 0x2440, 0x1 },   /* SP 2D context 1 */
+	{ 0x3e, 0xE580, 0x180 }, /* SP 3D context 0 */
+	{ 0x3c, 0xED80, 0x180 }, /* SP 3D context 1 */
+	{ 0x3a, 0x0F00, 0x1c },  /* TP non-context */
+	{ 0x38, 0x2000, 0xa },   /* TP 2D context 0 */
+	{ 0x36, 0x2400, 0xa },   /* TP 2D context 1 */
+	{ 0x39, 0xE700, 0x80 },  /* TP 3D context 0 */
+	{ 0x37, 0xEF00, 0x80 },  /* TP 3D context 1 */
+};
+
+/*
+ * The debugbus registers contain device state that presumably makes
+ * sense to the hardware designers. 'count' is the number of indexes to read,
+ * each index value is 64 bits
+ */
+static const struct {
+	enum a5xx_debugbus id;
+	u32 count;
+} a5xx_debugbus_blocks[] = {
+	{  A5XX_RBBM_DBGBUS_CP, 0x100, },
+	{  A5XX_RBBM_DBGBUS_RBBM, 0x100, },
+	{  A5XX_RBBM_DBGBUS_HLSQ, 0x100, },
+	{  A5XX_RBBM_DBGBUS_UCHE, 0x100, },
+	{  A5XX_RBBM_DBGBUS_DPM, 0x100, },
+	{  A5XX_RBBM_DBGBUS_TESS, 0x100, },
+	{  A5XX_RBBM_DBGBUS_PC, 0x100, },
+	{  A5XX_RBBM_DBGBUS_VFDP, 0x100, },
+	{  A5XX_RBBM_DBGBUS_VPC, 0x100, },
+	{  A5XX_RBBM_DBGBUS_TSE, 0x100, },
+	{  A5XX_RBBM_DBGBUS_RAS, 0x100, },
+	{  A5XX_RBBM_DBGBUS_VSC, 0x100, },
+	{  A5XX_RBBM_DBGBUS_COM, 0x100, },
+	{  A5XX_RBBM_DBGBUS_DCOM, 0x100, },
+	{  A5XX_RBBM_DBGBUS_LRZ, 0x100, },
+	{  A5XX_RBBM_DBGBUS_A2D_DSP, 0x100, },
+	{  A5XX_RBBM_DBGBUS_CCUFCHE, 0x100, },
+	{  A5XX_RBBM_DBGBUS_GPMU, 0x100, },
+	{  A5XX_RBBM_DBGBUS_RBP, 0x100, },
+	{  A5XX_RBBM_DBGBUS_HM, 0x100, },
+	{  A5XX_RBBM_DBGBUS_RBBM_CFG, 0x100, },
+	{  A5XX_RBBM_DBGBUS_VBIF_CX, 0x100, },
+	{  A5XX_RBBM_DBGBUS_GPC, 0x100, },
+	{  A5XX_RBBM_DBGBUS_LARC, 0x100, },
+	{  A5XX_RBBM_DBGBUS_HLSQ_SPTP, 0x100, },
+	{  A5XX_RBBM_DBGBUS_RB_0, 0x100, },
+	{  A5XX_RBBM_DBGBUS_RB_1, 0x100, },
+	{  A5XX_RBBM_DBGBUS_RB_2, 0x100, },
+	{  A5XX_RBBM_DBGBUS_RB_3, 0x100, },
+	{  A5XX_RBBM_DBGBUS_CCU_0, 0x100, },
+	{  A5XX_RBBM_DBGBUS_CCU_1, 0x100, },
+	{  A5XX_RBBM_DBGBUS_CCU_2, 0x100, },
+	{  A5XX_RBBM_DBGBUS_CCU_3, 0x100, },
+	{  A5XX_RBBM_DBGBUS_A2D_RAS_0, 0x100, },
+	{  A5XX_RBBM_DBGBUS_A2D_RAS_1, 0x100, },
+	{  A5XX_RBBM_DBGBUS_A2D_RAS_2, 0x100, },
+	{  A5XX_RBBM_DBGBUS_A2D_RAS_3, 0x100, },
+	{  A5XX_RBBM_DBGBUS_VFD_0, 0x100, },
+	{  A5XX_RBBM_DBGBUS_VFD_1, 0x100, },
+	{  A5XX_RBBM_DBGBUS_VFD_2, 0x100, },
+	{  A5XX_RBBM_DBGBUS_VFD_3, 0x100, },
+	{  A5XX_RBBM_DBGBUS_SP_0, 0x100, },
+	{  A5XX_RBBM_DBGBUS_SP_1, 0x100, },
+	{  A5XX_RBBM_DBGBUS_SP_2, 0x100, },
+	{  A5XX_RBBM_DBGBUS_SP_3, 0x100, },
+	{  A5XX_RBBM_DBGBUS_TPL1_0, 0x100, },
+	{  A5XX_RBBM_DBGBUS_TPL1_1, 0x100, },
+	{  A5XX_RBBM_DBGBUS_TPL1_2, 0x100, },
+	{  A5XX_RBBM_DBGBUS_TPL1_3, 0x100, },
+};
+
+/*
+ * The shader blocks are read from the HLSQ aperture - each one has its own
+ * identifier for the aperture read
+ */
+static const struct {
+	enum a5xx_shader_blocks id;
+	u32 size;
+} a5xx_shader_blocks[] = {
+	{A5XX_TP_W_MEMOBJ,              0x200},
+	{A5XX_TP_W_MIPMAP_BASE,         0x3C0},
+	{A5XX_TP_W_SAMPLER_TAG,          0x40},
+	{A5XX_TP_S_3D_SAMPLER,           0x80},
+	{A5XX_TP_S_3D_SAMPLER_TAG,       0x20},
+	{A5XX_TP_S_CS_SAMPLER,           0x40},
+	{A5XX_TP_S_CS_SAMPLER_TAG,       0x10},
+	{A5XX_SP_W_CONST,               0x800},
+	{A5XX_SP_W_CB_SIZE,              0x30},
+	{A5XX_SP_W_CB_BASE,              0xF0},
+	{A5XX_SP_W_STATE,                 0x1},
+	{A5XX_SP_S_3D_CONST,            0x800},
+	{A5XX_SP_S_3D_CB_SIZE,           0x28},
+	{A5XX_SP_S_3D_UAV_SIZE,          0x80},
+	{A5XX_SP_S_CS_CONST,            0x400},
+	{A5XX_SP_S_CS_CB_SIZE,            0x8},
+	{A5XX_SP_S_CS_UAV_SIZE,          0x80},
+	{A5XX_SP_S_3D_CONST_DIRTY,       0x12},
+	{A5XX_SP_S_3D_CB_SIZE_DIRTY,      0x1},
+	{A5XX_SP_S_3D_UAV_SIZE_DIRTY,     0x2},
+	{A5XX_SP_S_CS_CONST_DIRTY,        0xA},
+	{A5XX_SP_S_CS_CB_SIZE_DIRTY,      0x1},
+	{A5XX_SP_S_CS_UAV_SIZE_DIRTY,     0x2},
+	{A5XX_HLSQ_ICB_DIRTY,             0xB},
+	{A5XX_SP_POWER_RESTORE_RAM_TAG,   0xA},
+	{A5XX_TP_POWER_RESTORE_RAM_TAG,   0xA},
+	{A5XX_TP_W_SAMPLER,              0x80},
+	{A5XX_TP_W_MEMOBJ_TAG,           0x40},
+	{A5XX_TP_S_3D_MEMOBJ,           0x200},
+	{A5XX_TP_S_3D_MEMOBJ_TAG,        0x20},
+	{A5XX_TP_S_CS_MEMOBJ,           0x100},
+	{A5XX_TP_S_CS_MEMOBJ_TAG,        0x10},
+	{A5XX_SP_W_INSTR,               0x800},
+	{A5XX_SP_W_UAV_SIZE,             0x80},
+	{A5XX_SP_W_UAV_BASE,             0x80},
+	{A5XX_SP_W_INST_TAG,             0x40},
+	{A5XX_SP_S_3D_INSTR,            0x800},
+	{A5XX_SP_S_3D_CB_BASE,           0xC8},
+	{A5XX_SP_S_3D_UAV_BASE,          0x80},
+	{A5XX_SP_S_CS_INSTR,            0x400},
+	{A5XX_SP_S_CS_CB_BASE,           0x28},
+	{A5XX_SP_S_CS_UAV_BASE,          0x80},
+	{A5XX_SP_S_3D_INSTR_DIRTY,        0x1},
+	{A5XX_SP_S_3D_CB_BASE_DIRTY,      0x5},
+	{A5XX_SP_S_3D_UAV_BASE_DIRTY,     0x2},
+	{A5XX_SP_S_CS_INSTR_DIRTY,        0x1},
+	{A5XX_SP_S_CS_CB_BASE_DIRTY,      0x1},
+	{A5XX_SP_S_CS_UAV_BASE_DIRTY,     0x2},
+	{A5XX_HLSQ_ICB,                 0x200},
+	{A5XX_HLSQ_ICB_CB_BASE_DIRTY,     0x4},
+	{A5XX_SP_POWER_RESTORE_RAM,     0x140},
+	{A5XX_TP_POWER_RESTORE_RAM,      0x40},
+};
+
+/*
+ * The A5XX architecture has a a built in engine to asynchronously dump
+ * registers from the GPU. It is used to accelerate the copy of hundreds
+ * (thousands) of registers and as a safe way to access registers that might
+ * have secure data in them (if the GPU is in secure, the crashdumper returns
+ * bogus values for those registers). On a fully secured device the CPU will be
+ * blocked from accessing those registers directly and so the crashdump is the
+ * only way that we can access context registers and the shader banks for debug
+ * purposes.
+ *
+ * The downside of the crashdump is that it requires access to GPU accessible
+ * memory (so the VBIF and the bus and the SMMU need to be up and working) and
+ * you need enough memory to write the script for the crashdumper and to store
+ * the data that you are dumping so there is a balancing act between the work to
+ * set up a crash dumper and the value we get out of it.
+ */
+
+/*
+ * The crashdump uses a pseudo-script format to read and write registers.  Each
+ * operation is two 64 bit values.
+ *
+ * READ:
+ *  [qword 0] [64:00] - The absolute IOVA address target for the register value
+ *  [qword 1] [63:44] - the dword address of the register offset to read
+ *            [15:00] - Number of dwords to read at once
+ *
+ * WRITE:
+ *  [qword 0] [31:0] 32 bit value to write to the register
+ *  [qword 1] [63:44] - the dword address of the register offset to write
+ *            [21:21] - set 1 to write
+ *            [15:00] - Number of dwords to write (usually 1)
+ *
+ * At the bottom of the script, write quadword zeros to trigger the end.
+ */
+struct crashdump {
+	struct drm_gem_object *bo;
+	void *ptr;
+	u64 iova;
+	u32 index;
+};
+
+#define CRASHDUMP_BO_SIZE (SZ_1M)
+#define CRASHDUMP_SCRIPT_SIZE (256 * SZ_1K)
+#define CRASHDUMP_DATA_SIZE (CRASHDUMP_BO_SIZE - CRASHDUMP_SCRIPT_SIZE)
+
+static int crashdump_init(struct msm_gpu *gpu, struct crashdump *crashdump)
+{
+	int ret = 0;
+
+	crashdump->ptr = msm_gem_kernel_new_locked(gpu->dev,
+		CRASHDUMP_BO_SIZE, MSM_BO_UNCACHED,
+		gpu->aspace, &crashdump->bo, &crashdump->iova);
+	if (IS_ERR(crashdump->ptr)) {
+		ret = PTR_ERR(crashdump->ptr);
+		crashdump->ptr = NULL;
+	}
+
+	return ret;
+}
+
+static int crashdump_run(struct msm_gpu *gpu, struct crashdump *crashdump)
+{
+	if (!crashdump->ptr || !crashdump->index)
+		return -EINVAL;
+
+	gpu_write(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO,
+		lower_32_bits(crashdump->iova));
+	gpu_write(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_HI,
+		upper_32_bits(crashdump->iova));
+
+	gpu_write(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, 1);
+
+	return spin_until(gpu_read(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL) & 0x04);
+}
+
+static void crashdump_destroy(struct msm_gpu *gpu, struct crashdump *crashdump)
+{
+	if (!crashdump->bo)
+		return;
+
+	if (crashdump->iova)
+		msm_gem_put_iova(crashdump->bo, gpu->aspace);
+
+	drm_gem_object_unreference(crashdump->bo);
+
+	memset(crashdump, 0, sizeof(*crashdump));
+}
+
+static inline void CRASHDUMP_SCRIPT_WRITE(struct crashdump *crashdump,
+		u32 reg, u32 val)
+{
+	u64 *ptr = crashdump->ptr + crashdump->index;
+
+	if (WARN_ON(crashdump->index + (2 * sizeof(u64))
+		>= CRASHDUMP_SCRIPT_SIZE))
+		return;
+
+	/* This is the value to write */
+	ptr[0] = (u64) val;
+
+	/*
+	 * This triggers a write to the specified register.  1 is the size of
+	 * the write in dwords
+	 */
+	ptr[1] = (((u64) reg) << 44) | (1 << 21) | 1;
+
+	crashdump->index += 2 * sizeof(u64);
+}
+
+static inline void CRASHDUMP_SCRIPT_READ(struct crashdump *crashdump,
+		u32 reg, u32 count, u32 offset)
+{
+	u64 *ptr = crashdump->ptr + crashdump->index;
+
+	if (WARN_ON(crashdump->index + (2 * sizeof(u64))
+		>= CRASHDUMP_SCRIPT_SIZE))
+		return;
+
+	if (WARN_ON(offset + (count * sizeof(u32)) >= CRASHDUMP_DATA_SIZE))
+		return;
+
+	ptr[0] = (u64) crashdump->iova + CRASHDUMP_SCRIPT_SIZE + offset;
+	ptr[1] = (((u64) reg) << 44) | count;
+
+	crashdump->index += 2 * sizeof(u64);
+}
+
+static inline void *CRASHDUMP_DATA_PTR(struct crashdump *crashdump, u32 offset)
+{
+	if (WARN_ON(!crashdump->ptr || offset >= CRASHDUMP_DATA_SIZE))
+		return NULL;
+
+	return crashdump->ptr + CRASHDUMP_SCRIPT_SIZE + offset;
+}
+
+static inline u32 CRASHDUMP_DATA_READ(struct crashdump *crashdump, u32 offset)
+{
+	return *((u32 *) CRASHDUMP_DATA_PTR(crashdump, offset));
+}
+
+static inline void CRASHDUMP_RESET(struct crashdump *crashdump)
+{
+	crashdump->index = 0;
+}
+
+static inline void CRASHDUMP_END(struct crashdump *crashdump)
+{
+	u64 *ptr = crashdump->ptr + crashdump->index;
+
+	if (WARN_ON((crashdump->index + (2 * sizeof(u64)))
+		>= CRASHDUMP_SCRIPT_SIZE))
+		return;
+
+	ptr[0] = 0;
+	ptr[1] = 0;
+
+	crashdump->index += 2 * sizeof(u64);
+}
+
+static u32 _crashdump_read_hlsq_aperture(struct crashdump *crashdump,
+		u32 offset, u32 statetype, u32 bank,
+		u32 count)
+{
+	CRASHDUMP_SCRIPT_WRITE(crashdump, REG_A5XX_HLSQ_DBG_READ_SEL,
+		A5XX_HLSQ_DBG_READ_SEL_STATETYPE(statetype) | bank);
+
+	CRASHDUMP_SCRIPT_READ(crashdump, REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE,
+		count, offset);
+
+	return count * sizeof(u32);
+}
+
+static u32 _copy_registers(struct msm_snapshot *snapshot,
+		struct crashdump *crashdump, u32 reg, u32 count,
+		u32 offset)
+{
+	int i;
+	u32 *ptr = (u32 *) (crashdump->ptr + CRASHDUMP_SCRIPT_SIZE + offset);
+	/*
+	 * Write the offset of the first register of the group and the number of
+	 * registers in the group
+	 */
+	SNAPSHOT_WRITE_U32(snapshot, ((count << 16) | reg));
+
+	/* Followed by each register value in the group */
+	for (i = 0; i < count; i++)
+		SNAPSHOT_WRITE_U32(snapshot, ptr[i]);
+
+	return count * sizeof(u32);
+}
+
+/*
+ * Return the number of registers in each register group from the
+ * adreno_gpu->rgisters
+ */
+static inline u32 REG_COUNT(const unsigned int *ptr)
+{
+	return (ptr[1] - ptr[0]) + 1;
+}
+
+/*
+ * Capture what registers we can from the CPU in case the crashdumper is
+ * unavailable or broken.  This will omit the SP,TP and HLSQ registers, but
+ * you'll get everything else and that ain't bad
+ */
+static void a5xx_snapshot_registers_cpu(struct msm_gpu *gpu,
+		struct msm_snapshot *snapshot)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct msm_snapshot_regs header;
+	u32 regcount = 0, groups = 0;
+	int i;
+
+	/*
+	 * Before we write the section we need to figure out how big our data
+	 * section will be
+	 */
+	for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
+		regcount += REG_COUNT(&(adreno_gpu->registers[i]));
+		groups++;
+	}
+
+	header.count = groups;
+
+	/*
+	 * We need one dword for each group and then one dword for each register
+	 * value in that group
+	 */
+	if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_REGS_V2,
+		regcount + groups))
+		return;
+
+	for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
+		u32 count = REG_COUNT(&(adreno_gpu->registers[i]));
+		u32 reg = adreno_gpu->registers[i];
+		int j;
+
+		/* Write the offset and count for the group */
+		SNAPSHOT_WRITE_U32(snapshot, (count << 16) | reg);
+
+		/* Write each value in the group */
+		for (j = 0; j < count; j++)
+			SNAPSHOT_WRITE_U32(snapshot, gpu_read(gpu, reg++));
+	}
+}
+
+static void a5xx_snapshot_registers(struct msm_gpu *gpu,
+		struct msm_snapshot *snapshot)
+{
+	struct msm_snapshot_regs header;
+	struct crashdump *crashdump = snapshot->priv;
+	u32 offset = 0, regcount = 0, groups = 0;
+	int i;
+
+	/*
+	 * First snapshot all the registers that we can from the CPU.  Do this
+	 * because the crashdumper has a tendency to "taint" the value of some
+	 * of the registers (because the GPU implements the crashdumper) so we
+	 * only want to use the crash dump facility if we have to
+	 */
+	a5xx_snapshot_registers_cpu(gpu, snapshot);
+
+	if (!crashdump)
+		return;
+
+	CRASHDUMP_RESET(crashdump);
+
+	/* HLSQ and context registers behind the aperture */
+	for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
+		u32 count = a5xx_hlsq_aperture_regs[i].count;
+
+		offset += _crashdump_read_hlsq_aperture(crashdump, offset,
+			a5xx_hlsq_aperture_regs[i].type, 0, count);
+		regcount += count;
+
+		groups++;
+	}
+
+	CRASHDUMP_END(crashdump);
+
+	if (crashdump_run(gpu, crashdump))
+		return;
+
+	header.count = groups;
+
+	/*
+	 * The size of the data will be one dword for each "group" of registers,
+	 * and then one dword for each of the registers in that group
+	 */
+	if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_REGS_V2,
+		groups + regcount))
+		return;
+
+	/* Copy the registers to the snapshot */
+	for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++)
+		offset += _copy_registers(snapshot, crashdump,
+			a5xx_hlsq_aperture_regs[i].regoffset,
+			a5xx_hlsq_aperture_regs[i].count, offset);
+}
+
+static void _a5xx_snapshot_shader_bank(struct msm_snapshot *snapshot,
+		struct crashdump *crashdump, u32 block, u32 bank,
+		u32 size, u32 offset)
+{
+	void *src;
+
+	struct msm_snapshot_shader header = {
+		.type = block,
+		.index = bank,
+		.size = size,
+	};
+
+	if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_SHADER, size))
+		return;
+
+	src = CRASHDUMP_DATA_PTR(crashdump, offset);
+
+	if (src)
+		SNAPSHOT_MEMCPY(snapshot, src, size * sizeof(u32));
+}
+
+static void a5xx_snapshot_shader_memory(struct msm_gpu *gpu,
+		struct msm_snapshot *snapshot)
+{
+	struct crashdump *crashdump = snapshot->priv;
+	u32 offset = 0;
+	int i;
+
+	/* We can only get shader memory through the crashdump */
+	if (!crashdump)
+		return;
+
+	CRASHDUMP_RESET(crashdump);
+
+	/* For each shader block */
+	for (i = 0; i < ARRAY_SIZE(a5xx_shader_blocks); i++) {
+		int j;
+
+		/* For each block, dump 4 banks */
+		for (j = 0; j < A5XX_NR_SHADER_BANKS; j++)
+			offset += _crashdump_read_hlsq_aperture(crashdump,
+				offset, a5xx_shader_blocks[i].id, j,
+				a5xx_shader_blocks[i].size);
+	}
+
+	CRASHDUMP_END(crashdump);
+
+	/* If the crashdump fails we can't get shader memory any other way */
+	if (crashdump_run(gpu, crashdump))
+		return;
+
+	/* Each bank of each shader gets its own snapshot section */
+	for (offset = 0, i = 0; i < ARRAY_SIZE(a5xx_shader_blocks); i++) {
+		int j;
+
+		for (j = 0; j < A5XX_NR_SHADER_BANKS; j++) {
+			_a5xx_snapshot_shader_bank(snapshot, crashdump,
+				a5xx_shader_blocks[i].id, j,
+				a5xx_shader_blocks[i].size, offset);
+			offset += a5xx_shader_blocks[i].size * sizeof(u32);
+		}
+	}
+}
+
+#define A5XX_NUM_AXI_ARB_BLOCKS 2
+#define A5XX_NUM_XIN_BLOCKS     4
+#define VBIF_DATA_SIZE ((16 * A5XX_NUM_AXI_ARB_BLOCKS) + \
+	(18 * A5XX_NUM_XIN_BLOCKS) + (12 * A5XX_NUM_XIN_BLOCKS))
+
+static void a5xx_snapshot_debugbus_vbif(struct msm_gpu *gpu,
+		struct msm_snapshot *snapshot)
+{
+	int i;
+	struct msm_snapshot_debugbus header = {
+		.id = A5XX_RBBM_DBGBUS_VBIF,
+		.count = VBIF_DATA_SIZE,
+	};
+
+	if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_DEBUGBUS,
+		VBIF_DATA_SIZE))
+		return;
+
+	gpu_rmw(gpu, REG_A5XX_VBIF_CLKON, A5XX_VBIF_CLKON_FORCE_ON_TESTBUS,
+		A5XX_VBIF_CLKON_FORCE_ON_TESTBUS);
+
+	gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS1_CTRL0, 0);
+	gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS_OUT_CTRL,
+		A5XX_VBIF_TEST_BUS_OUT_CTRL_TEST_BUS_CTRL_EN);
+
+	for (i = 0; i < A5XX_NUM_AXI_ARB_BLOCKS; i++) {
+		int j;
+
+		gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS2_CTRL0, 1 << (i + 16));
+		for (j = 0; j < 16; j++) {
+			gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS2_CTRL1,
+			A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL(j));
+			SNAPSHOT_WRITE_U32(snapshot, gpu_read(gpu,
+				REG_A5XX_VBIF_TEST_BUS_OUT));
+		}
+	}
+
+	for (i = 0; i < A5XX_NUM_XIN_BLOCKS; i++) {
+		int j;
+
+		gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS2_CTRL0, 1 << i);
+		for (j = 0; j < 18; j++) {
+			gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS2_CTRL1,
+			A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL(j));
+			SNAPSHOT_WRITE_U32(snapshot,
+				gpu_read(gpu, REG_A5XX_VBIF_TEST_BUS_OUT));
+		}
+	}
+
+	for (i = 0; i < A5XX_NUM_XIN_BLOCKS; i++) {
+		int j;
+
+		gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS1_CTRL0, 1 << i);
+		for (j = 0; j < 12; j++) {
+			gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS1_CTRL1,
+			A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL(j));
+			SNAPSHOT_WRITE_U32(snapshot, gpu_read(gpu,
+				REG_A5XX_VBIF_TEST_BUS_OUT));
+		}
+	}
+
+}
+
+static void a5xx_snapshot_debugbus_block(struct msm_gpu *gpu,
+		struct msm_snapshot *snapshot, u32 block, u32 count)
+{
+	int i;
+	struct msm_snapshot_debugbus header = {
+		.id = block,
+		.count = count * 2, /* Each value is 2 dwords */
+	};
+
+	if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_DEBUGBUS,
+		(count * 2)))
+		return;
+
+	for (i = 0; i < count; i++) {
+		u32 reg = A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX(i) |
+			A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
+
+		gpu_write(gpu, REG_A5XX_RBBM_CFG_DBGBUS_SEL_A, reg);
+		gpu_write(gpu, REG_A5XX_RBBM_CFG_DBGBUS_SEL_B, reg);
+		gpu_write(gpu, REG_A5XX_RBBM_CFG_DBGBUS_SEL_C, reg);
+		gpu_write(gpu, REG_A5XX_RBBM_CFG_DBGBUS_SEL_D, reg);
+
+		/* Each debugbus entry is a quad word */
+		SNAPSHOT_WRITE_U32(snapshot, gpu_read(gpu,
+			REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF2));
+		SNAPSHOT_WRITE_U32(snapshot,
+			gpu_read(gpu, REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF1));
+	}
+}
+
+static void a5xx_snapshot_debugbus(struct msm_gpu *gpu,
+		struct msm_snapshot *snapshot)
+{
+	int i;
+
+	gpu_write(gpu, REG_A5XX_RBBM_CFG_DBGBUS_CNTLM,
+		A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE(0xF));
+
+	for (i = 0; i < ARRAY_SIZE(a5xx_debugbus_blocks); i++)
+		a5xx_snapshot_debugbus_block(gpu, snapshot,
+			a5xx_debugbus_blocks[i].id,
+			a5xx_debugbus_blocks[i].count);
+
+	/* VBIF is special and not in a good way */
+	a5xx_snapshot_debugbus_vbif(gpu, snapshot);
+}
+
+static void a5xx_snapshot_cp_merciu(struct msm_gpu *gpu,
+		struct msm_snapshot *snapshot)
+{
+	unsigned int i;
+	struct msm_snapshot_debug header = {
+		.type = SNAPSHOT_DEBUG_CP_MERCIU,
+		.size = 64 << 1, /* Data size is 2 dwords per entry */
+	};
+
+	if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_DEBUG, 64 << 1))
+		return;
+
+	gpu_write(gpu, REG_A5XX_CP_MERCIU_DBG_ADDR, 0);
+	for (i = 0; i < 64; i++) {
+		SNAPSHOT_WRITE_U32(snapshot,
+			gpu_read(gpu, REG_A5XX_CP_MERCIU_DBG_DATA_1));
+		SNAPSHOT_WRITE_U32(snapshot,
+			gpu_read(gpu, REG_A5XX_CP_MERCIU_DBG_DATA_2));
+	}
+}
+
+static void a5xx_snapshot_cp_roq(struct msm_gpu *gpu,
+		struct msm_snapshot *snapshot)
+{
+	int i;
+	struct msm_snapshot_debug header = {
+		.type = SNAPSHOT_DEBUG_CP_ROQ,
+		.size = 512,
+	};
+
+	if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_DEBUG, 512))
+		return;
+
+	gpu_write(gpu, REG_A5XX_CP_ROQ_DBG_ADDR, 0);
+	for (i = 0; i < 512; i++)
+		SNAPSHOT_WRITE_U32(snapshot,
+			gpu_read(gpu, REG_A5XX_CP_ROQ_DBG_DATA));
+}
+
+static void a5xx_snapshot_cp_meq(struct msm_gpu *gpu,
+		struct msm_snapshot *snapshot)
+{
+	int i;
+	struct msm_snapshot_debug header = {
+		.type = SNAPSHOT_DEBUG_CP_MEQ,
+		.size = 64,
+	};
+
+	if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_DEBUG, 64))
+		return;
+
+	gpu_write(gpu, REG_A5XX_CP_MEQ_DBG_ADDR, 0);
+	for (i = 0; i < 64; i++)
+		SNAPSHOT_WRITE_U32(snapshot,
+			gpu_read(gpu, REG_A5XX_CP_MEQ_DBG_DATA));
+}
+
+static void a5xx_snapshot_indexed_registers(struct msm_gpu *gpu,
+		struct msm_snapshot *snapshot, u32 addr, u32 data,
+		u32 count)
+{
+	unsigned int i;
+	struct msm_snapshot_indexed_regs header = {
+		.index_reg = addr,
+		.data_reg = data,
+		.start = 0,
+		.count = count,
+	};
+
+	if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_INDEXED_REGS,
+		count))
+		return;
+
+	for (i = 0; i < count; i++) {
+		gpu_write(gpu, addr, i);
+		SNAPSHOT_WRITE_U32(snapshot, gpu_read(gpu, data));
+	}
+}
+
+static void a5xx_snapshot_preemption(struct msm_gpu *gpu, struct msm_snapshot
+		*snapshot)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+	struct msm_snapshot_gpu_object header = {
+		.type = SNAPSHOT_GPU_OBJECT_GLOBAL,
+		.size = A5XX_PREEMPT_RECORD_SIZE >> 2,
+		.pt_base = 0,
+	};
+	int index;
+
+	if (gpu->nr_rings <= 1)
+		return;
+
+	for (index = 0; index < gpu->nr_rings; index++) {
+
+		header.gpuaddr = a5xx_gpu->preempt_iova[index];
+
+		if (!SNAPSHOT_HEADER(snapshot, header,
+			SNAPSHOT_SECTION_GPU_OBJECT_V2,
+			A5XX_PREEMPT_RECORD_SIZE >> 2))
+			return;
+
+		SNAPSHOT_MEMCPY(snapshot, a5xx_gpu->preempt[index],
+				A5XX_PREEMPT_RECORD_SIZE);
+	}
+}
+
+int a5xx_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot)
+{
+	struct crashdump crashdump = { 0 };
+
+	if (!crashdump_init(gpu, &crashdump))
+		snapshot->priv = &crashdump;
+
+	/* To accurately read all registers, disable hardware clock gating */
+	a5xx_set_hwcg(gpu, false);
+
+	/* Kick it up to the generic level */
+	adreno_snapshot(gpu, snapshot);
+
+	/* Read the GPU registers */
+	a5xx_snapshot_registers(gpu, snapshot);
+
+	/* Read the shader memory banks */
+	a5xx_snapshot_shader_memory(gpu, snapshot);
+
+	/* Read the debugbus registers */
+	a5xx_snapshot_debugbus(gpu, snapshot);
+
+	/* PFP data */
+	a5xx_snapshot_indexed_registers(gpu, snapshot,
+		REG_A5XX_CP_PFP_STAT_ADDR, REG_A5XX_CP_PFP_STAT_DATA, 36);
+
+	/* ME data */
+	a5xx_snapshot_indexed_registers(gpu, snapshot,
+		REG_A5XX_CP_ME_STAT_ADDR, REG_A5XX_CP_ME_STAT_DATA, 29);
+
+	/* DRAW_STATE data */
+	a5xx_snapshot_indexed_registers(gpu, snapshot,
+		REG_A5XX_CP_DRAW_STATE_ADDR, REG_A5XX_CP_DRAW_STATE_DATA,
+		256);
+
+	/* ME cache */
+	a5xx_snapshot_indexed_registers(gpu, snapshot,
+		REG_A5XX_CP_ME_UCODE_DBG_ADDR, REG_A5XX_CP_ME_UCODE_DBG_DATA,
+		0x53F);
+
+	/* PFP cache */
+	a5xx_snapshot_indexed_registers(gpu, snapshot,
+		REG_A5XX_CP_PFP_UCODE_DBG_ADDR, REG_A5XX_CP_PFP_UCODE_DBG_DATA,
+		0x53F);
+
+	/* ME queue */
+	a5xx_snapshot_cp_meq(gpu, snapshot);
+
+	/* CP ROQ */
+	a5xx_snapshot_cp_roq(gpu, snapshot);
+
+	/* CP MERCIU */
+	a5xx_snapshot_cp_merciu(gpu, snapshot);
+
+	/* Preemption records*/
+	a5xx_snapshot_preemption(gpu, snapshot);
+
+	crashdump_destroy(gpu, &crashdump);
+	snapshot->priv = NULL;
+
+	/* Re-enable HWCG */
+	a5xx_set_hwcg(gpu, true);
+	return 0;
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/adreno/a5xx.xml.h	2019-01-22 16:16:23.483246225 +0100
@@ -0,0 +1,3493 @@
+#ifndef A5XX_XML
+#define A5XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- ./rnndb/adreno.xml               (    431 bytes, from 2016-10-24 21:12:27)
+- ./rnndb/freedreno_copyright.xml  (   1572 bytes, from 2016-10-24 21:12:27)
+- ./rnndb/adreno/a2xx.xml          (  32901 bytes, from 2016-10-24 21:12:27)
+- ./rnndb/adreno/adreno_common.xml (  12025 bytes, from 2016-10-24 21:12:27)
+- ./rnndb/adreno/adreno_pm4.xml    (  19684 bytes, from 2016-10-24 21:12:27)
+- ./rnndb/adreno/a3xx.xml          (  83840 bytes, from 2016-10-24 21:12:27)
+- ./rnndb/adreno/a4xx.xml          ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./rnndb/adreno/a5xx.xml          (  86963 bytes, from 2017-03-03 16:01:09)
+- ./rnndb/adreno/ocmem.xml         (   1773 bytes, from 2016-10-24 21:12:27)
+
+Copyright (C) 2013-2017 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a5xx_color_fmt {
+	RB5_R8_UNORM = 3,
+	RB5_R5G5B5A1_UNORM = 10,
+	RB5_R8G8B8A8_UNORM = 48,
+	RB5_R8G8B8_UNORM = 49,
+	RB5_R8G8B8A8_UINT = 51,
+	RB5_R10G10B10A2_UINT = 58,
+	RB5_R16G16B16A16_FLOAT = 98,
+};
+
+enum a5xx_tile_mode {
+	TILE5_LINEAR = 0,
+	TILE5_2 = 2,
+	TILE5_3 = 3,
+};
+
+enum a5xx_vtx_fmt {
+	VFMT5_8_UNORM = 3,
+	VFMT5_8_SNORM = 4,
+	VFMT5_8_UINT = 5,
+	VFMT5_8_SINT = 6,
+	VFMT5_8_8_UNORM = 15,
+	VFMT5_8_8_SNORM = 16,
+	VFMT5_8_8_UINT = 17,
+	VFMT5_8_8_SINT = 18,
+	VFMT5_16_UNORM = 21,
+	VFMT5_16_SNORM = 22,
+	VFMT5_16_FLOAT = 23,
+	VFMT5_16_UINT = 24,
+	VFMT5_16_SINT = 25,
+	VFMT5_8_8_8_UNORM = 33,
+	VFMT5_8_8_8_SNORM = 34,
+	VFMT5_8_8_8_UINT = 35,
+	VFMT5_8_8_8_SINT = 36,
+	VFMT5_8_8_8_8_UNORM = 48,
+	VFMT5_8_8_8_8_SNORM = 50,
+	VFMT5_8_8_8_8_UINT = 51,
+	VFMT5_8_8_8_8_SINT = 52,
+	VFMT5_16_16_UNORM = 67,
+	VFMT5_16_16_SNORM = 68,
+	VFMT5_16_16_FLOAT = 69,
+	VFMT5_16_16_UINT = 70,
+	VFMT5_16_16_SINT = 71,
+	VFMT5_32_UNORM = 72,
+	VFMT5_32_SNORM = 73,
+	VFMT5_32_FLOAT = 74,
+	VFMT5_32_UINT = 75,
+	VFMT5_32_SINT = 76,
+	VFMT5_32_FIXED = 77,
+	VFMT5_16_16_16_UNORM = 88,
+	VFMT5_16_16_16_SNORM = 89,
+	VFMT5_16_16_16_FLOAT = 90,
+	VFMT5_16_16_16_UINT = 91,
+	VFMT5_16_16_16_SINT = 92,
+	VFMT5_16_16_16_16_UNORM = 96,
+	VFMT5_16_16_16_16_SNORM = 97,
+	VFMT5_16_16_16_16_FLOAT = 98,
+	VFMT5_16_16_16_16_UINT = 99,
+	VFMT5_16_16_16_16_SINT = 100,
+	VFMT5_32_32_UNORM = 101,
+	VFMT5_32_32_SNORM = 102,
+	VFMT5_32_32_FLOAT = 103,
+	VFMT5_32_32_UINT = 104,
+	VFMT5_32_32_SINT = 105,
+	VFMT5_32_32_FIXED = 106,
+	VFMT5_32_32_32_UNORM = 112,
+	VFMT5_32_32_32_SNORM = 113,
+	VFMT5_32_32_32_UINT = 114,
+	VFMT5_32_32_32_SINT = 115,
+	VFMT5_32_32_32_FLOAT = 116,
+	VFMT5_32_32_32_FIXED = 117,
+	VFMT5_32_32_32_32_UNORM = 128,
+	VFMT5_32_32_32_32_SNORM = 129,
+	VFMT5_32_32_32_32_FLOAT = 130,
+	VFMT5_32_32_32_32_UINT = 131,
+	VFMT5_32_32_32_32_SINT = 132,
+	VFMT5_32_32_32_32_FIXED = 133,
+};
+
+enum a5xx_tex_fmt {
+	TFMT5_A8_UNORM = 2,
+	TFMT5_8_UNORM = 3,
+	TFMT5_4_4_4_4_UNORM = 8,
+	TFMT5_5_6_5_UNORM = 14,
+	TFMT5_L8_A8_UNORM = 19,
+	TFMT5_16_FLOAT = 23,
+	TFMT5_8_8_8_8_UNORM = 48,
+	TFMT5_10_10_10_2_UNORM = 54,
+	TFMT5_16_16_FLOAT = 69,
+	TFMT5_32_FLOAT = 74,
+	TFMT5_16_16_16_16_FLOAT = 98,
+	TFMT5_32_32_FLOAT = 103,
+	TFMT5_32_32_32_32_FLOAT = 130,
+	TFMT5_X8Z24_UNORM = 160,
+};
+
+enum a5xx_tex_fetchsize {
+	TFETCH5_1_BYTE = 0,
+	TFETCH5_2_BYTE = 1,
+	TFETCH5_4_BYTE = 2,
+	TFETCH5_8_BYTE = 3,
+	TFETCH5_16_BYTE = 4,
+};
+
+enum a5xx_depth_format {
+	DEPTH5_NONE = 0,
+	DEPTH5_16 = 1,
+	DEPTH5_24_8 = 2,
+	DEPTH5_32 = 4,
+};
+
+enum a5xx_debugbus {
+	A5XX_RBBM_DBGBUS_CP = 1,
+	A5XX_RBBM_DBGBUS_RBBM = 2,
+	A5XX_RBBM_DBGBUS_VBIF = 3,
+	A5XX_RBBM_DBGBUS_HLSQ = 4,
+	A5XX_RBBM_DBGBUS_UCHE = 5,
+	A5XX_RBBM_DBGBUS_DPM = 6,
+	A5XX_RBBM_DBGBUS_TESS = 7,
+	A5XX_RBBM_DBGBUS_PC = 8,
+	A5XX_RBBM_DBGBUS_VFDP = 9,
+	A5XX_RBBM_DBGBUS_VPC = 10,
+	A5XX_RBBM_DBGBUS_TSE = 11,
+	A5XX_RBBM_DBGBUS_RAS = 12,
+	A5XX_RBBM_DBGBUS_VSC = 13,
+	A5XX_RBBM_DBGBUS_COM = 14,
+	A5XX_RBBM_DBGBUS_DCOM = 15,
+	A5XX_RBBM_DBGBUS_LRZ = 16,
+	A5XX_RBBM_DBGBUS_A2D_DSP = 17,
+	A5XX_RBBM_DBGBUS_CCUFCHE = 18,
+	A5XX_RBBM_DBGBUS_GPMU = 19,
+	A5XX_RBBM_DBGBUS_RBP = 20,
+	A5XX_RBBM_DBGBUS_HM = 21,
+	A5XX_RBBM_DBGBUS_RBBM_CFG = 22,
+	A5XX_RBBM_DBGBUS_VBIF_CX = 23,
+	A5XX_RBBM_DBGBUS_GPC = 29,
+	A5XX_RBBM_DBGBUS_LARC = 30,
+	A5XX_RBBM_DBGBUS_HLSQ_SPTP = 31,
+	A5XX_RBBM_DBGBUS_RB_0 = 32,
+	A5XX_RBBM_DBGBUS_RB_1 = 33,
+	A5XX_RBBM_DBGBUS_RB_2 = 34,
+	A5XX_RBBM_DBGBUS_RB_3 = 35,
+	A5XX_RBBM_DBGBUS_CCU_0 = 40,
+	A5XX_RBBM_DBGBUS_CCU_1 = 41,
+	A5XX_RBBM_DBGBUS_CCU_2 = 42,
+	A5XX_RBBM_DBGBUS_CCU_3 = 43,
+	A5XX_RBBM_DBGBUS_A2D_RAS_0 = 48,
+	A5XX_RBBM_DBGBUS_A2D_RAS_1 = 49,
+	A5XX_RBBM_DBGBUS_A2D_RAS_2 = 50,
+	A5XX_RBBM_DBGBUS_A2D_RAS_3 = 51,
+	A5XX_RBBM_DBGBUS_VFD_0 = 56,
+	A5XX_RBBM_DBGBUS_VFD_1 = 57,
+	A5XX_RBBM_DBGBUS_VFD_2 = 58,
+	A5XX_RBBM_DBGBUS_VFD_3 = 59,
+	A5XX_RBBM_DBGBUS_SP_0 = 64,
+	A5XX_RBBM_DBGBUS_SP_1 = 65,
+	A5XX_RBBM_DBGBUS_SP_2 = 66,
+	A5XX_RBBM_DBGBUS_SP_3 = 67,
+	A5XX_RBBM_DBGBUS_TPL1_0 = 72,
+	A5XX_RBBM_DBGBUS_TPL1_1 = 73,
+	A5XX_RBBM_DBGBUS_TPL1_2 = 74,
+	A5XX_RBBM_DBGBUS_TPL1_3 = 75,
+};
+
+enum a5xx_shader_blocks {
+	A5XX_TP_W_MEMOBJ = 1,
+	A5XX_TP_W_SAMPLER = 2,
+	A5XX_TP_W_MIPMAP_BASE = 3,
+	A5XX_TP_W_MEMOBJ_TAG = 4,
+	A5XX_TP_W_SAMPLER_TAG = 5,
+	A5XX_TP_S_3D_MEMOBJ = 6,
+	A5XX_TP_S_3D_SAMPLER = 7,
+	A5XX_TP_S_3D_MEMOBJ_TAG = 8,
+	A5XX_TP_S_3D_SAMPLER_TAG = 9,
+	A5XX_TP_S_CS_MEMOBJ = 10,
+	A5XX_TP_S_CS_SAMPLER = 11,
+	A5XX_TP_S_CS_MEMOBJ_TAG = 12,
+	A5XX_TP_S_CS_SAMPLER_TAG = 13,
+	A5XX_SP_W_INSTR = 14,
+	A5XX_SP_W_CONST = 15,
+	A5XX_SP_W_UAV_SIZE = 16,
+	A5XX_SP_W_CB_SIZE = 17,
+	A5XX_SP_W_UAV_BASE = 18,
+	A5XX_SP_W_CB_BASE = 19,
+	A5XX_SP_W_INST_TAG = 20,
+	A5XX_SP_W_STATE = 21,
+	A5XX_SP_S_3D_INSTR = 22,
+	A5XX_SP_S_3D_CONST = 23,
+	A5XX_SP_S_3D_CB_BASE = 24,
+	A5XX_SP_S_3D_CB_SIZE = 25,
+	A5XX_SP_S_3D_UAV_BASE = 26,
+	A5XX_SP_S_3D_UAV_SIZE = 27,
+	A5XX_SP_S_CS_INSTR = 28,
+	A5XX_SP_S_CS_CONST = 29,
+	A5XX_SP_S_CS_CB_BASE = 30,
+	A5XX_SP_S_CS_CB_SIZE = 31,
+	A5XX_SP_S_CS_UAV_BASE = 32,
+	A5XX_SP_S_CS_UAV_SIZE = 33,
+	A5XX_SP_S_3D_INSTR_DIRTY = 34,
+	A5XX_SP_S_3D_CONST_DIRTY = 35,
+	A5XX_SP_S_3D_CB_BASE_DIRTY = 36,
+	A5XX_SP_S_3D_CB_SIZE_DIRTY = 37,
+	A5XX_SP_S_3D_UAV_BASE_DIRTY = 38,
+	A5XX_SP_S_3D_UAV_SIZE_DIRTY = 39,
+	A5XX_SP_S_CS_INSTR_DIRTY = 40,
+	A5XX_SP_S_CS_CONST_DIRTY = 41,
+	A5XX_SP_S_CS_CB_BASE_DIRTY = 42,
+	A5XX_SP_S_CS_CB_SIZE_DIRTY = 43,
+	A5XX_SP_S_CS_UAV_BASE_DIRTY = 44,
+	A5XX_SP_S_CS_UAV_SIZE_DIRTY = 45,
+	A5XX_HLSQ_ICB = 46,
+	A5XX_HLSQ_ICB_DIRTY = 47,
+	A5XX_HLSQ_ICB_CB_BASE_DIRTY = 48,
+	A5XX_SP_POWER_RESTORE_RAM = 64,
+	A5XX_SP_POWER_RESTORE_RAM_TAG = 65,
+	A5XX_TP_POWER_RESTORE_RAM = 66,
+	A5XX_TP_POWER_RESTORE_RAM_TAG = 67,
+};
+
+enum a5xx_tex_filter {
+	A5XX_TEX_NEAREST = 0,
+	A5XX_TEX_LINEAR = 1,
+	A5XX_TEX_ANISO = 2,
+};
+
+enum a5xx_tex_clamp {
+	A5XX_TEX_REPEAT = 0,
+	A5XX_TEX_CLAMP_TO_EDGE = 1,
+	A5XX_TEX_MIRROR_REPEAT = 2,
+	A5XX_TEX_CLAMP_TO_BORDER = 3,
+	A5XX_TEX_MIRROR_CLAMP = 4,
+};
+
+enum a5xx_tex_aniso {
+	A5XX_TEX_ANISO_1 = 0,
+	A5XX_TEX_ANISO_2 = 1,
+	A5XX_TEX_ANISO_4 = 2,
+	A5XX_TEX_ANISO_8 = 3,
+	A5XX_TEX_ANISO_16 = 4,
+};
+
+enum a5xx_tex_swiz {
+	A5XX_TEX_X = 0,
+	A5XX_TEX_Y = 1,
+	A5XX_TEX_Z = 2,
+	A5XX_TEX_W = 3,
+	A5XX_TEX_ZERO = 4,
+	A5XX_TEX_ONE = 5,
+};
+
+enum a5xx_tex_type {
+	A5XX_TEX_1D = 0,
+	A5XX_TEX_2D = 1,
+	A5XX_TEX_CUBE = 2,
+	A5XX_TEX_3D = 3,
+};
+
+#define A5XX_INT0_RBBM_GPU_IDLE					0x00000001
+#define A5XX_INT0_RBBM_AHB_ERROR				0x00000002
+#define A5XX_INT0_RBBM_TRANSFER_TIMEOUT				0x00000004
+#define A5XX_INT0_RBBM_ME_MS_TIMEOUT				0x00000008
+#define A5XX_INT0_RBBM_PFP_MS_TIMEOUT				0x00000010
+#define A5XX_INT0_RBBM_ETS_MS_TIMEOUT				0x00000020
+#define A5XX_INT0_RBBM_ATB_ASYNC_OVERFLOW			0x00000040
+#define A5XX_INT0_RBBM_GPC_ERROR				0x00000080
+#define A5XX_INT0_CP_SW						0x00000100
+#define A5XX_INT0_CP_HW_ERROR					0x00000200
+#define A5XX_INT0_CP_CCU_FLUSH_DEPTH_TS				0x00000400
+#define A5XX_INT0_CP_CCU_FLUSH_COLOR_TS				0x00000800
+#define A5XX_INT0_CP_CCU_RESOLVE_TS				0x00001000
+#define A5XX_INT0_CP_IB2					0x00002000
+#define A5XX_INT0_CP_IB1					0x00004000
+#define A5XX_INT0_CP_RB						0x00008000
+#define A5XX_INT0_CP_UNUSED_1					0x00010000
+#define A5XX_INT0_CP_RB_DONE_TS					0x00020000
+#define A5XX_INT0_CP_WT_DONE_TS					0x00040000
+#define A5XX_INT0_UNKNOWN_1					0x00080000
+#define A5XX_INT0_CP_CACHE_FLUSH_TS				0x00100000
+#define A5XX_INT0_UNUSED_2					0x00200000
+#define A5XX_INT0_RBBM_ATB_BUS_OVERFLOW				0x00400000
+#define A5XX_INT0_MISC_HANG_DETECT				0x00800000
+#define A5XX_INT0_UCHE_OOB_ACCESS				0x01000000
+#define A5XX_INT0_UCHE_TRAP_INTR				0x02000000
+#define A5XX_INT0_DEBBUS_INTR_0					0x04000000
+#define A5XX_INT0_DEBBUS_INTR_1					0x08000000
+#define A5XX_INT0_GPMU_VOLTAGE_DROOP				0x10000000
+#define A5XX_INT0_GPMU_FIRMWARE					0x20000000
+#define A5XX_INT0_ISDB_CPU_IRQ					0x40000000
+#define A5XX_INT0_ISDB_UNDER_DEBUG				0x80000000
+#define A5XX_CP_INT_CP_OPCODE_ERROR				0x00000001
+#define A5XX_CP_INT_CP_RESERVED_BIT_ERROR			0x00000002
+#define A5XX_CP_INT_CP_HW_FAULT_ERROR				0x00000004
+#define A5XX_CP_INT_CP_DMA_ERROR				0x00000008
+#define A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR		0x00000010
+#define A5XX_CP_INT_CP_AHB_ERROR				0x00000020
+#define REG_A5XX_CP_RB_BASE					0x00000800
+
+#define REG_A5XX_CP_RB_BASE_HI					0x00000801
+
+#define REG_A5XX_CP_RB_CNTL					0x00000802
+
+#define REG_A5XX_CP_RB_RPTR_ADDR				0x00000804
+
+#define REG_A5XX_CP_RB_RPTR_ADDR_HI				0x00000805
+
+#define REG_A5XX_CP_RB_RPTR					0x00000806
+
+#define REG_A5XX_CP_RB_WPTR					0x00000807
+
+#define REG_A5XX_CP_PFP_STAT_ADDR				0x00000808
+
+#define REG_A5XX_CP_PFP_STAT_DATA				0x00000809
+
+#define REG_A5XX_CP_DRAW_STATE_ADDR				0x0000080b
+
+#define REG_A5XX_CP_DRAW_STATE_DATA				0x0000080c
+
+#define REG_A5XX_CP_CRASH_SCRIPT_BASE_LO			0x00000817
+
+#define REG_A5XX_CP_CRASH_SCRIPT_BASE_HI			0x00000818
+
+#define REG_A5XX_CP_CRASH_DUMP_CNTL				0x00000819
+
+#define REG_A5XX_CP_ME_STAT_ADDR				0x0000081a
+
+#define REG_A5XX_CP_ROQ_THRESHOLDS_1				0x0000081f
+
+#define REG_A5XX_CP_ROQ_THRESHOLDS_2				0x00000820
+
+#define REG_A5XX_CP_ROQ_DBG_ADDR				0x00000821
+
+#define REG_A5XX_CP_ROQ_DBG_DATA				0x00000822
+
+#define REG_A5XX_CP_MEQ_DBG_ADDR				0x00000823
+
+#define REG_A5XX_CP_MEQ_DBG_DATA				0x00000824
+
+#define REG_A5XX_CP_MEQ_THRESHOLDS				0x00000825
+
+#define REG_A5XX_CP_MERCIU_SIZE					0x00000826
+
+#define REG_A5XX_CP_MERCIU_DBG_ADDR				0x00000827
+
+#define REG_A5XX_CP_MERCIU_DBG_DATA_1				0x00000828
+
+#define REG_A5XX_CP_MERCIU_DBG_DATA_2				0x00000829
+
+#define REG_A5XX_CP_PFP_UCODE_DBG_ADDR				0x0000082a
+
+#define REG_A5XX_CP_PFP_UCODE_DBG_DATA				0x0000082b
+
+#define REG_A5XX_CP_ME_UCODE_DBG_ADDR				0x0000082f
+
+#define REG_A5XX_CP_ME_UCODE_DBG_DATA				0x00000830
+
+#define REG_A5XX_CP_CNTL					0x00000831
+
+#define REG_A5XX_CP_PFP_ME_CNTL					0x00000832
+
+#define REG_A5XX_CP_CHICKEN_DBG					0x00000833
+
+#define REG_A5XX_CP_PFP_INSTR_BASE_LO				0x00000835
+
+#define REG_A5XX_CP_PFP_INSTR_BASE_HI				0x00000836
+
+#define REG_A5XX_CP_ME_INSTR_BASE_LO				0x00000838
+
+#define REG_A5XX_CP_ME_INSTR_BASE_HI				0x00000839
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_CNTL				0x0000083b
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO		0x0000083c
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI		0x0000083d
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO			0x0000083e
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_HI			0x0000083f
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO			0x00000840
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI			0x00000841
+
+#define REG_A5XX_CP_ADDR_MODE_CNTL				0x00000860
+
+#define REG_A5XX_CP_ME_STAT_DATA				0x00000b14
+
+#define REG_A5XX_CP_WFI_PEND_CTR				0x00000b15
+
+#define REG_A5XX_CP_INTERRUPT_STATUS				0x00000b18
+
+#define REG_A5XX_CP_HW_FAULT					0x00000b1a
+
+#define REG_A5XX_CP_PROTECT_STATUS				0x00000b1c
+
+#define REG_A5XX_CP_IB1_BASE					0x00000b1f
+
+#define REG_A5XX_CP_IB1_BASE_HI					0x00000b20
+
+#define REG_A5XX_CP_IB1_BUFSZ					0x00000b21
+
+#define REG_A5XX_CP_IB2_BASE					0x00000b22
+
+#define REG_A5XX_CP_IB2_BASE_HI					0x00000b23
+
+#define REG_A5XX_CP_IB2_BUFSZ					0x00000b24
+
+static inline uint32_t REG_A5XX_CP_SCRATCH(uint32_t i0) { return 0x00000b78 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000b78 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_PROTECT(uint32_t i0) { return 0x00000880 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000880 + 0x1*i0; }
+#define A5XX_CP_PROTECT_REG_BASE_ADDR__MASK			0x0001ffff
+#define A5XX_CP_PROTECT_REG_BASE_ADDR__SHIFT			0
+static inline uint32_t A5XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
+{
+	return ((val) << A5XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A5XX_CP_PROTECT_REG_BASE_ADDR__MASK;
+}
+#define A5XX_CP_PROTECT_REG_MASK_LEN__MASK			0x1f000000
+#define A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT			24
+static inline uint32_t A5XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
+{
+	return ((val) << A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A5XX_CP_PROTECT_REG_MASK_LEN__MASK;
+}
+#define A5XX_CP_PROTECT_REG_TRAP_WRITE				0x20000000
+#define A5XX_CP_PROTECT_REG_TRAP_READ				0x40000000
+
+#define REG_A5XX_CP_PROTECT_CNTL				0x000008a0
+
+#define REG_A5XX_CP_AHB_FAULT					0x00000b1b
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_0				0x00000bb0
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_1				0x00000bb1
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_2				0x00000bb2
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_3				0x00000bb3
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_4				0x00000bb4
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_5				0x00000bb5
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_6				0x00000bb6
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_7				0x00000bb7
+
+#define REG_A5XX_VSC_ADDR_MODE_CNTL				0x00000bc1
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_0				0x00000bba
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_1				0x00000bbb
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_2				0x00000bbc
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_3				0x00000bbd
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_A				0x00000004
+#define A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX__MASK		0x000000ff
+#define A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT		0
+static inline uint32_t A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX(uint32_t val)
+{
+	return ((val) << A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT) & A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX__MASK;
+}
+#define A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK		0x0000ff00
+#define A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT		8
+static inline uint32_t A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL(uint32_t val)
+{
+	return ((val) << A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT) & A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK;
+}
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_B				0x00000005
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_C				0x00000006
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_D				0x00000007
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CNTLT				0x00000008
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CNTLM				0x00000009
+#define A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE__MASK			0x0f000000
+#define A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE__SHIFT		24
+static inline uint32_t A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val)
+{
+	return ((val) << A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE__MASK;
+}
+
+#define REG_A5XX_RBBM_CFG_DEBBUS_CTLTM_ENABLE_SHIFT		0x00000018
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OPL				0x0000000a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OPE				0x0000000b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_0				0x0000000c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_1				0x0000000d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_2				0x0000000e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_3				0x0000000f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_0			0x00000010
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_1			0x00000011
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_2			0x00000012
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_3			0x00000013
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_BYTEL_0			0x00000014
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_BYTEL_1			0x00000015
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_0				0x00000016
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_1				0x00000017
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_2				0x00000018
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_3				0x00000019
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_0			0x0000001a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_1			0x0000001b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_2			0x0000001c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_3			0x0000001d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_NIBBLEE			0x0000001e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_PTRC0				0x0000001f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_PTRC1				0x00000020
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_LOADREG			0x00000021
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IDX				0x00000022
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CLRC				0x00000023
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_LOADIVT			0x00000024
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL			0x0000002f
+
+#define REG_A5XX_RBBM_INT_CLEAR_CMD				0x00000037
+
+#define REG_A5XX_RBBM_INT_0_MASK				0x00000038
+#define A5XX_RBBM_INT_0_MASK_RBBM_GPU_IDLE			0x00000001
+#define A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR			0x00000002
+#define A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT		0x00000004
+#define A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT			0x00000008
+#define A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT		0x00000010
+#define A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT		0x00000020
+#define A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW		0x00000040
+#define A5XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR			0x00000080
+#define A5XX_RBBM_INT_0_MASK_CP_SW				0x00000100
+#define A5XX_RBBM_INT_0_MASK_CP_HW_ERROR			0x00000200
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_DEPTH_TS		0x00000400
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_COLOR_TS		0x00000800
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_RESOLVE_TS			0x00001000
+#define A5XX_RBBM_INT_0_MASK_CP_IB2				0x00002000
+#define A5XX_RBBM_INT_0_MASK_CP_IB1				0x00004000
+#define A5XX_RBBM_INT_0_MASK_CP_RB				0x00008000
+#define A5XX_RBBM_INT_0_MASK_CP_RB_DONE_TS			0x00020000
+#define A5XX_RBBM_INT_0_MASK_CP_WT_DONE_TS			0x00040000
+#define A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS			0x00100000
+#define A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW		0x00400000
+#define A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT			0x00800000
+#define A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS			0x01000000
+#define A5XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR			0x02000000
+#define A5XX_RBBM_INT_0_MASK_DEBBUS_INTR_0			0x04000000
+#define A5XX_RBBM_INT_0_MASK_DEBBUS_INTR_1			0x08000000
+#define A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP			0x10000000
+#define A5XX_RBBM_INT_0_MASK_GPMU_FIRMWARE			0x20000000
+#define A5XX_RBBM_INT_0_MASK_ISDB_CPU_IRQ			0x40000000
+#define A5XX_RBBM_INT_0_MASK_ISDB_UNDER_DEBUG			0x80000000
+
+#define REG_A5XX_RBBM_AHB_DBG_CNTL				0x0000003f
+
+#define REG_A5XX_RBBM_EXT_VBIF_DBG_CNTL				0x00000041
+
+#define REG_A5XX_RBBM_SW_RESET_CMD				0x00000043
+
+#define REG_A5XX_RBBM_BLOCK_SW_RESET_CMD			0x00000045
+
+#define REG_A5XX_RBBM_BLOCK_SW_RESET_CMD2			0x00000046
+
+#define REG_A5XX_RBBM_DBG_LO_HI_GPIO				0x00000048
+
+#define REG_A5XX_RBBM_EXT_TRACE_BUS_CNTL			0x00000049
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP0				0x0000004a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP1				0x0000004b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP2				0x0000004c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP3				0x0000004d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP0				0x0000004e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP1				0x0000004f
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP2				0x00000050
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP3				0x00000051
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP0				0x00000052
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP1				0x00000053
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP2				0x00000054
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP3				0x00000055
+
+#define REG_A5XX_RBBM_READ_AHB_THROUGH_DBG			0x00000059
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_UCHE				0x0000005a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_UCHE				0x0000005b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_UCHE				0x0000005c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL4_UCHE				0x0000005d
+
+#define REG_A5XX_RBBM_CLOCK_HYST_UCHE				0x0000005e
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_UCHE				0x0000005f
+
+#define REG_A5XX_RBBM_CLOCK_MODE_GPC				0x00000060
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_GPC				0x00000061
+
+#define REG_A5XX_RBBM_CLOCK_HYST_GPC				0x00000062
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM			0x00000063
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM			0x00000064
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM			0x00000065
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_HLSQ				0x00000066
+
+#define REG_A5XX_RBBM_CLOCK_CNTL				0x00000067
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP0				0x00000068
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP1				0x00000069
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP2				0x0000006a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP3				0x0000006b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP0				0x0000006c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP1				0x0000006d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP2				0x0000006e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP3				0x0000006f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP0				0x00000070
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP1				0x00000071
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP2				0x00000072
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP3				0x00000073
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP0				0x00000074
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP1				0x00000075
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP2				0x00000076
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP3				0x00000077
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB0				0x00000078
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB1				0x00000079
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB2				0x0000007a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB3				0x0000007b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB0				0x0000007c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB1				0x0000007d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB2				0x0000007e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB3				0x0000007f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RAC				0x00000080
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RAC				0x00000081
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU0				0x00000082
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU1				0x00000083
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU2				0x00000084
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU3				0x00000085
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0			0x00000086
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1			0x00000087
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2			0x00000088
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3			0x00000089
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RAC				0x0000008a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RAC				0x0000008b
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0			0x0000008c
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1			0x0000008d
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2			0x0000008e
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3			0x0000008f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_VFD				0x00000090
+
+#define REG_A5XX_RBBM_CLOCK_MODE_VFD				0x00000091
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_VFD				0x00000092
+
+#define REG_A5XX_RBBM_AHB_CNTL0					0x00000093
+
+#define REG_A5XX_RBBM_AHB_CNTL1					0x00000094
+
+#define REG_A5XX_RBBM_AHB_CNTL2					0x00000095
+
+#define REG_A5XX_RBBM_AHB_CMD					0x00000096
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11		0x0000009c
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12		0x0000009d
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13		0x0000009e
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14		0x0000009f
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15		0x000000a0
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16		0x000000a1
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17		0x000000a2
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18		0x000000a3
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP0				0x000000a4
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP1				0x000000a5
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP2				0x000000a6
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP3				0x000000a7
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP0				0x000000a8
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP1				0x000000a9
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP2				0x000000aa
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP3				0x000000ab
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP0				0x000000ac
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP1				0x000000ad
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP2				0x000000ae
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP3				0x000000af
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP0				0x000000b0
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP1				0x000000b1
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP2				0x000000b2
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP3				0x000000b3
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP0				0x000000b4
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP1				0x000000b5
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP2				0x000000b6
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP3				0x000000b7
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP0				0x000000b8
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP1				0x000000b9
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP2				0x000000ba
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP3				0x000000bb
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_GPMU				0x000000c8
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_GPMU				0x000000c9
+
+#define REG_A5XX_RBBM_CLOCK_HYST_GPMU				0x000000ca
+
+#define REG_A5XX_RBBM_PERFCTR_CP_0_LO				0x000003a0
+
+#define REG_A5XX_RBBM_PERFCTR_CP_0_HI				0x000003a1
+
+#define REG_A5XX_RBBM_PERFCTR_CP_1_LO				0x000003a2
+
+#define REG_A5XX_RBBM_PERFCTR_CP_1_HI				0x000003a3
+
+#define REG_A5XX_RBBM_PERFCTR_CP_2_LO				0x000003a4
+
+#define REG_A5XX_RBBM_PERFCTR_CP_2_HI				0x000003a5
+
+#define REG_A5XX_RBBM_PERFCTR_CP_3_LO				0x000003a6
+
+#define REG_A5XX_RBBM_PERFCTR_CP_3_HI				0x000003a7
+
+#define REG_A5XX_RBBM_PERFCTR_CP_4_LO				0x000003a8
+
+#define REG_A5XX_RBBM_PERFCTR_CP_4_HI				0x000003a9
+
+#define REG_A5XX_RBBM_PERFCTR_CP_5_LO				0x000003aa
+
+#define REG_A5XX_RBBM_PERFCTR_CP_5_HI				0x000003ab
+
+#define REG_A5XX_RBBM_PERFCTR_CP_6_LO				0x000003ac
+
+#define REG_A5XX_RBBM_PERFCTR_CP_6_HI				0x000003ad
+
+#define REG_A5XX_RBBM_PERFCTR_CP_7_LO				0x000003ae
+
+#define REG_A5XX_RBBM_PERFCTR_CP_7_HI				0x000003af
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_0_LO				0x000003b0
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_0_HI				0x000003b1
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_1_LO				0x000003b2
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_1_HI				0x000003b3
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_2_LO				0x000003b4
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_2_HI				0x000003b5
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_3_LO				0x000003b6
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_3_HI				0x000003b7
+
+#define REG_A5XX_RBBM_PERFCTR_PC_0_LO				0x000003b8
+
+#define REG_A5XX_RBBM_PERFCTR_PC_0_HI				0x000003b9
+
+#define REG_A5XX_RBBM_PERFCTR_PC_1_LO				0x000003ba
+
+#define REG_A5XX_RBBM_PERFCTR_PC_1_HI				0x000003bb
+
+#define REG_A5XX_RBBM_PERFCTR_PC_2_LO				0x000003bc
+
+#define REG_A5XX_RBBM_PERFCTR_PC_2_HI				0x000003bd
+
+#define REG_A5XX_RBBM_PERFCTR_PC_3_LO				0x000003be
+
+#define REG_A5XX_RBBM_PERFCTR_PC_3_HI				0x000003bf
+
+#define REG_A5XX_RBBM_PERFCTR_PC_4_LO				0x000003c0
+
+#define REG_A5XX_RBBM_PERFCTR_PC_4_HI				0x000003c1
+
+#define REG_A5XX_RBBM_PERFCTR_PC_5_LO				0x000003c2
+
+#define REG_A5XX_RBBM_PERFCTR_PC_5_HI				0x000003c3
+
+#define REG_A5XX_RBBM_PERFCTR_PC_6_LO				0x000003c4
+
+#define REG_A5XX_RBBM_PERFCTR_PC_6_HI				0x000003c5
+
+#define REG_A5XX_RBBM_PERFCTR_PC_7_LO				0x000003c6
+
+#define REG_A5XX_RBBM_PERFCTR_PC_7_HI				0x000003c7
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_0_LO				0x000003c8
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_0_HI				0x000003c9
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_1_LO				0x000003ca
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_1_HI				0x000003cb
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_2_LO				0x000003cc
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_2_HI				0x000003cd
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_3_LO				0x000003ce
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_3_HI				0x000003cf
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_4_LO				0x000003d0
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_4_HI				0x000003d1
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_5_LO				0x000003d2
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_5_HI				0x000003d3
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_6_LO				0x000003d4
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_6_HI				0x000003d5
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_7_LO				0x000003d6
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_7_HI				0x000003d7
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_0_LO				0x000003d8
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_0_HI				0x000003d9
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_1_LO				0x000003da
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_1_HI				0x000003db
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_2_LO				0x000003dc
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_2_HI				0x000003dd
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_3_LO				0x000003de
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_3_HI				0x000003df
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_4_LO				0x000003e0
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_4_HI				0x000003e1
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_5_LO				0x000003e2
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_5_HI				0x000003e3
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_6_LO				0x000003e4
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_6_HI				0x000003e5
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_7_LO				0x000003e6
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_7_HI				0x000003e7
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_0_LO				0x000003e8
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_0_HI				0x000003e9
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_1_LO				0x000003ea
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_1_HI				0x000003eb
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_2_LO				0x000003ec
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_2_HI				0x000003ed
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_3_LO				0x000003ee
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_3_HI				0x000003ef
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_0_LO				0x000003f0
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_0_HI				0x000003f1
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_1_LO				0x000003f2
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_1_HI				0x000003f3
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_2_LO				0x000003f4
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_2_HI				0x000003f5
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_3_LO				0x000003f6
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_3_HI				0x000003f7
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_0_LO				0x000003f8
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_0_HI				0x000003f9
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_1_LO				0x000003fa
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_1_HI				0x000003fb
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_2_LO				0x000003fc
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_2_HI				0x000003fd
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_3_LO				0x000003fe
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_3_HI				0x000003ff
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_0_LO				0x00000400
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_0_HI				0x00000401
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_1_LO				0x00000402
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_1_HI				0x00000403
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_2_LO				0x00000404
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_2_HI				0x00000405
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_3_LO				0x00000406
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_3_HI				0x00000407
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_0_LO				0x00000408
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_0_HI				0x00000409
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_1_LO				0x0000040a
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_1_HI				0x0000040b
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_2_LO				0x0000040c
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_2_HI				0x0000040d
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_3_LO				0x0000040e
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_3_HI				0x0000040f
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_4_LO				0x00000410
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_4_HI				0x00000411
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_5_LO				0x00000412
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_5_HI				0x00000413
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_6_LO				0x00000414
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_6_HI				0x00000415
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_7_LO				0x00000416
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_7_HI				0x00000417
+
+#define REG_A5XX_RBBM_PERFCTR_TP_0_LO				0x00000418
+
+#define REG_A5XX_RBBM_PERFCTR_TP_0_HI				0x00000419
+
+#define REG_A5XX_RBBM_PERFCTR_TP_1_LO				0x0000041a
+
+#define REG_A5XX_RBBM_PERFCTR_TP_1_HI				0x0000041b
+
+#define REG_A5XX_RBBM_PERFCTR_TP_2_LO				0x0000041c
+
+#define REG_A5XX_RBBM_PERFCTR_TP_2_HI				0x0000041d
+
+#define REG_A5XX_RBBM_PERFCTR_TP_3_LO				0x0000041e
+
+#define REG_A5XX_RBBM_PERFCTR_TP_3_HI				0x0000041f
+
+#define REG_A5XX_RBBM_PERFCTR_TP_4_LO				0x00000420
+
+#define REG_A5XX_RBBM_PERFCTR_TP_4_HI				0x00000421
+
+#define REG_A5XX_RBBM_PERFCTR_TP_5_LO				0x00000422
+
+#define REG_A5XX_RBBM_PERFCTR_TP_5_HI				0x00000423
+
+#define REG_A5XX_RBBM_PERFCTR_TP_6_LO				0x00000424
+
+#define REG_A5XX_RBBM_PERFCTR_TP_6_HI				0x00000425
+
+#define REG_A5XX_RBBM_PERFCTR_TP_7_LO				0x00000426
+
+#define REG_A5XX_RBBM_PERFCTR_TP_7_HI				0x00000427
+
+#define REG_A5XX_RBBM_PERFCTR_SP_0_LO				0x00000428
+
+#define REG_A5XX_RBBM_PERFCTR_SP_0_HI				0x00000429
+
+#define REG_A5XX_RBBM_PERFCTR_SP_1_LO				0x0000042a
+
+#define REG_A5XX_RBBM_PERFCTR_SP_1_HI				0x0000042b
+
+#define REG_A5XX_RBBM_PERFCTR_SP_2_LO				0x0000042c
+
+#define REG_A5XX_RBBM_PERFCTR_SP_2_HI				0x0000042d
+
+#define REG_A5XX_RBBM_PERFCTR_SP_3_LO				0x0000042e
+
+#define REG_A5XX_RBBM_PERFCTR_SP_3_HI				0x0000042f
+
+#define REG_A5XX_RBBM_PERFCTR_SP_4_LO				0x00000430
+
+#define REG_A5XX_RBBM_PERFCTR_SP_4_HI				0x00000431
+
+#define REG_A5XX_RBBM_PERFCTR_SP_5_LO				0x00000432
+
+#define REG_A5XX_RBBM_PERFCTR_SP_5_HI				0x00000433
+
+#define REG_A5XX_RBBM_PERFCTR_SP_6_LO				0x00000434
+
+#define REG_A5XX_RBBM_PERFCTR_SP_6_HI				0x00000435
+
+#define REG_A5XX_RBBM_PERFCTR_SP_7_LO				0x00000436
+
+#define REG_A5XX_RBBM_PERFCTR_SP_7_HI				0x00000437
+
+#define REG_A5XX_RBBM_PERFCTR_SP_8_LO				0x00000438
+
+#define REG_A5XX_RBBM_PERFCTR_SP_8_HI				0x00000439
+
+#define REG_A5XX_RBBM_PERFCTR_SP_9_LO				0x0000043a
+
+#define REG_A5XX_RBBM_PERFCTR_SP_9_HI				0x0000043b
+
+#define REG_A5XX_RBBM_PERFCTR_SP_10_LO				0x0000043c
+
+#define REG_A5XX_RBBM_PERFCTR_SP_10_HI				0x0000043d
+
+#define REG_A5XX_RBBM_PERFCTR_SP_11_LO				0x0000043e
+
+#define REG_A5XX_RBBM_PERFCTR_SP_11_HI				0x0000043f
+
+#define REG_A5XX_RBBM_PERFCTR_RB_0_LO				0x00000440
+
+#define REG_A5XX_RBBM_PERFCTR_RB_0_HI				0x00000441
+
+#define REG_A5XX_RBBM_PERFCTR_RB_1_LO				0x00000442
+
+#define REG_A5XX_RBBM_PERFCTR_RB_1_HI				0x00000443
+
+#define REG_A5XX_RBBM_PERFCTR_RB_2_LO				0x00000444
+
+#define REG_A5XX_RBBM_PERFCTR_RB_2_HI				0x00000445
+
+#define REG_A5XX_RBBM_PERFCTR_RB_3_LO				0x00000446
+
+#define REG_A5XX_RBBM_PERFCTR_RB_3_HI				0x00000447
+
+#define REG_A5XX_RBBM_PERFCTR_RB_4_LO				0x00000448
+
+#define REG_A5XX_RBBM_PERFCTR_RB_4_HI				0x00000449
+
+#define REG_A5XX_RBBM_PERFCTR_RB_5_LO				0x0000044a
+
+#define REG_A5XX_RBBM_PERFCTR_RB_5_HI				0x0000044b
+
+#define REG_A5XX_RBBM_PERFCTR_RB_6_LO				0x0000044c
+
+#define REG_A5XX_RBBM_PERFCTR_RB_6_HI				0x0000044d
+
+#define REG_A5XX_RBBM_PERFCTR_RB_7_LO				0x0000044e
+
+#define REG_A5XX_RBBM_PERFCTR_RB_7_HI				0x0000044f
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_0_LO				0x00000450
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_0_HI				0x00000451
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_1_LO				0x00000452
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_1_HI				0x00000453
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_0_LO				0x00000454
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_0_HI				0x00000455
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_1_LO				0x00000456
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_1_HI				0x00000457
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_2_LO				0x00000458
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_2_HI				0x00000459
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_3_LO				0x0000045a
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_3_HI				0x0000045b
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_0_LO				0x0000045c
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_0_HI				0x0000045d
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_1_LO				0x0000045e
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_1_HI				0x0000045f
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_2_LO				0x00000460
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_2_HI				0x00000461
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_3_LO				0x00000462
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_3_HI				0x00000463
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0			0x0000046b
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1			0x0000046c
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2			0x0000046d
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3			0x0000046e
+
+#define REG_A5XX_RBBM_ALWAYSON_COUNTER_LO			0x000004d2
+
+#define REG_A5XX_RBBM_ALWAYSON_COUNTER_HI			0x000004d3
+
+#define REG_A5XX_RBBM_STATUS					0x000004f5
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB			0x80000000
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP			0x40000000
+#define A5XX_RBBM_STATUS_HLSQ_BUSY				0x20000000
+#define A5XX_RBBM_STATUS_VSC_BUSY				0x10000000
+#define A5XX_RBBM_STATUS_TPL1_BUSY				0x08000000
+#define A5XX_RBBM_STATUS_SP_BUSY				0x04000000
+#define A5XX_RBBM_STATUS_UCHE_BUSY				0x02000000
+#define A5XX_RBBM_STATUS_VPC_BUSY				0x01000000
+#define A5XX_RBBM_STATUS_VFDP_BUSY				0x00800000
+#define A5XX_RBBM_STATUS_VFD_BUSY				0x00400000
+#define A5XX_RBBM_STATUS_TESS_BUSY				0x00200000
+#define A5XX_RBBM_STATUS_PC_VSD_BUSY				0x00100000
+#define A5XX_RBBM_STATUS_PC_DCALL_BUSY				0x00080000
+#define A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY			0x00040000
+#define A5XX_RBBM_STATUS_DCOM_BUSY				0x00020000
+#define A5XX_RBBM_STATUS_COM_BUSY				0x00010000
+#define A5XX_RBBM_STATUS_LRZ_BUZY				0x00008000
+#define A5XX_RBBM_STATUS_A2D_DSP_BUSY				0x00004000
+#define A5XX_RBBM_STATUS_CCUFCHE_BUSY				0x00002000
+#define A5XX_RBBM_STATUS_RB_BUSY				0x00001000
+#define A5XX_RBBM_STATUS_RAS_BUSY				0x00000800
+#define A5XX_RBBM_STATUS_TSE_BUSY				0x00000400
+#define A5XX_RBBM_STATUS_VBIF_BUSY				0x00000200
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST			0x00000100
+#define A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST			0x00000080
+#define A5XX_RBBM_STATUS_CP_BUSY				0x00000040
+#define A5XX_RBBM_STATUS_GPMU_MASTER_BUSY			0x00000020
+#define A5XX_RBBM_STATUS_CP_CRASH_BUSY				0x00000010
+#define A5XX_RBBM_STATUS_CP_ETS_BUSY				0x00000008
+#define A5XX_RBBM_STATUS_CP_PFP_BUSY				0x00000004
+#define A5XX_RBBM_STATUS_CP_ME_BUSY				0x00000002
+#define A5XX_RBBM_STATUS_HI_BUSY				0x00000001
+
+#define REG_A5XX_RBBM_STATUS3					0x00000530
+
+#define REG_A5XX_RBBM_INT_0_STATUS				0x000004e1
+
+#define REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS			0x000004f0
+
+#define REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS			0x000004f1
+
+#define REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS			0x000004f3
+
+#define REG_A5XX_RBBM_AHB_ERROR_STATUS				0x000004f4
+
+#define REG_A5XX_RBBM_PERFCTR_CNTL				0x00000464
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD0				0x00000465
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD1				0x00000466
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD2				0x00000467
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD3				0x00000468
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_LO			0x00000469
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_HI			0x0000046a
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0			0x0000046b
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1			0x0000046c
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2			0x0000046d
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3			0x0000046e
+
+#define REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED			0x0000046f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_EVENT_LOGIC			0x00000504
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OVER				0x00000505
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT0				0x00000506
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT1				0x00000507
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT2				0x00000508
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT3				0x00000509
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT4				0x0000050a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT5				0x0000050b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_ADDR			0x0000050c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF0			0x0000050d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF1			0x0000050e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF2			0x0000050f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF3			0x00000510
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF4			0x00000511
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MISR0				0x00000512
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MISR1				0x00000513
+
+#define REG_A5XX_RBBM_ISDB_CNT					0x00000533
+
+#define REG_A5XX_RBBM_SECVID_TRUST_CONFIG			0x0000f000
+
+#define REG_A5XX_RBBM_SECVID_TRUST_CNTL				0x0000f400
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO		0x0000f800
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI		0x0000f801
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE			0x0000f802
+
+#define REG_A5XX_RBBM_SECVID_TSB_CNTL				0x0000f803
+
+#define REG_A5XX_RBBM_SECVID_TSB_COMP_STATUS_LO			0x0000f804
+
+#define REG_A5XX_RBBM_SECVID_TSB_COMP_STATUS_HI			0x0000f805
+
+#define REG_A5XX_RBBM_SECVID_TSB_UCHE_STATUS_LO			0x0000f806
+
+#define REG_A5XX_RBBM_SECVID_TSB_UCHE_STATUS_HI			0x0000f807
+
+#define REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL			0x0000f810
+
+#define REG_A5XX_VSC_PIPE_DATA_LENGTH_0				0x00000c00
+
+#define REG_A5XX_VSC_PERFCTR_VSC_SEL_0				0x00000c60
+
+#define REG_A5XX_VSC_PERFCTR_VSC_SEL_1				0x00000c61
+
+#define REG_A5XX_VSC_BIN_SIZE					0x00000cdd
+#define A5XX_VSC_BIN_SIZE_WINDOW_OFFSET_DISABLE			0x80000000
+#define A5XX_VSC_BIN_SIZE_X__MASK				0x00007fff
+#define A5XX_VSC_BIN_SIZE_X__SHIFT				0
+static inline uint32_t A5XX_VSC_BIN_SIZE_X(uint32_t val)
+{
+	return ((val) << A5XX_VSC_BIN_SIZE_X__SHIFT) & A5XX_VSC_BIN_SIZE_X__MASK;
+}
+#define A5XX_VSC_BIN_SIZE_Y__MASK				0x7fff0000
+#define A5XX_VSC_BIN_SIZE_Y__SHIFT				16
+static inline uint32_t A5XX_VSC_BIN_SIZE_Y(uint32_t val)
+{
+	return ((val) << A5XX_VSC_BIN_SIZE_Y__SHIFT) & A5XX_VSC_BIN_SIZE_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_ADDR_MODE_CNTL				0x00000c81
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_0				0x00000c90
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_1				0x00000c91
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_2				0x00000c92
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_3				0x00000c93
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_0				0x00000c94
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_1				0x00000c95
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_2				0x00000c96
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_3				0x00000c97
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_0				0x00000c98
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_1				0x00000c99
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_2				0x00000c9a
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_3				0x00000c9b
+
+#define REG_A5XX_RB_DBG_ECO_CNTL				0x00000cc4
+
+#define REG_A5XX_RB_ADDR_MODE_CNTL				0x00000cc5
+
+#define REG_A5XX_RB_MODE_CNTL					0x00000cc6
+
+#define REG_A5XX_RB_CCU_CNTL					0x00000cc7
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_0				0x00000cd0
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_1				0x00000cd1
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_2				0x00000cd2
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_3				0x00000cd3
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_4				0x00000cd4
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_5				0x00000cd5
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_6				0x00000cd6
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_7				0x00000cd7
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_0				0x00000cd8
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_1				0x00000cd9
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_2				0x00000cda
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_3				0x00000cdb
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_0				0x00000ce0
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_1				0x00000ce1
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_2				0x00000ce2
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_3				0x00000ce3
+
+#define REG_A5XX_RB_POWERCTR_CCU_SEL_0				0x00000ce4
+
+#define REG_A5XX_RB_POWERCTR_CCU_SEL_1				0x00000ce5
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_0				0x00000cec
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_1				0x00000ced
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_2				0x00000cee
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_3				0x00000cef
+
+#define REG_A5XX_PC_DBG_ECO_CNTL				0x00000d00
+#define A5XX_PC_DBG_ECO_CNTL_TWOPASSUSEWFI			0x00000100
+
+#define REG_A5XX_PC_ADDR_MODE_CNTL				0x00000d01
+
+#define REG_A5XX_PC_MODE_CNTL					0x00000d02
+
+#define REG_A5XX_UNKNOWN_0D08					0x00000d08
+
+#define REG_A5XX_UNKNOWN_0D09					0x00000d09
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_0				0x00000d10
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_1				0x00000d11
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_2				0x00000d12
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_3				0x00000d13
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_4				0x00000d14
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_5				0x00000d15
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_6				0x00000d16
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_7				0x00000d17
+
+#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_0			0x00000e00
+
+#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_1			0x00000e01
+
+#define REG_A5XX_HLSQ_ADDR_MODE_CNTL				0x00000e05
+
+#define REG_A5XX_HLSQ_MODE_CNTL					0x00000e06
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_0			0x00000e10
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_1			0x00000e11
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_2			0x00000e12
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_3			0x00000e13
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_4			0x00000e14
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_5			0x00000e15
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_6			0x00000e16
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_7			0x00000e17
+
+#define REG_A5XX_HLSQ_SPTP_RDSEL				0x00000f08
+
+#define REG_A5XX_HLSQ_DBG_READ_SEL				0x0000bc00
+#define A5XX_HLSQ_DBG_READ_SEL_STATETYPE__MASK			0x0000ff00
+#define A5XX_HLSQ_DBG_READ_SEL_STATETYPE__SHIFT			8
+static inline uint32_t A5XX_HLSQ_DBG_READ_SEL_STATETYPE(uint32_t val)
+{
+	return ((val) << A5XX_HLSQ_DBG_READ_SEL_STATETYPE__SHIFT) & A5XX_HLSQ_DBG_READ_SEL_STATETYPE__MASK;
+}
+
+#define REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE			0x0000a000
+
+#define REG_A5XX_VFD_ADDR_MODE_CNTL				0x00000e41
+
+#define REG_A5XX_VFD_MODE_CNTL					0x00000e42
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_0				0x00000e50
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_1				0x00000e51
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_2				0x00000e52
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_3				0x00000e53
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_4				0x00000e54
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_5				0x00000e55
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_6				0x00000e56
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_7				0x00000e57
+
+#define REG_A5XX_VPC_DBG_ECO_CNTL				0x00000e60
+
+#define REG_A5XX_VPC_ADDR_MODE_CNTL				0x00000e61
+
+#define REG_A5XX_VPC_MODE_CNTL					0x00000e62
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_0				0x00000e64
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_1				0x00000e65
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_2				0x00000e66
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_3				0x00000e67
+
+#define REG_A5XX_UCHE_ADDR_MODE_CNTL				0x00000e80
+
+#define REG_A5XX_UCHE_SVM_CNTL					0x00000e82
+
+#define REG_A5XX_UCHE_WRITE_THRU_BASE_LO			0x00000e87
+
+#define REG_A5XX_UCHE_WRITE_THRU_BASE_HI			0x00000e88
+
+#define REG_A5XX_UCHE_TRAP_BASE_LO				0x00000e89
+
+#define REG_A5XX_UCHE_TRAP_BASE_HI				0x00000e8a
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MIN_LO				0x00000e8b
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MIN_HI				0x00000e8c
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MAX_LO				0x00000e8d
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MAX_HI				0x00000e8e
+
+#define REG_A5XX_UCHE_DBG_ECO_CNTL_2				0x00000e8f
+
+#define REG_A5XX_UCHE_DBG_ECO_CNTL				0x00000e90
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_LO			0x00000e91
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_HI			0x00000e92
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MAX_LO			0x00000e93
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MAX_HI			0x00000e94
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE				0x00000e95
+
+#define REG_A5XX_UCHE_CACHE_WAYS				0x00000e96
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_0			0x00000ea0
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_1			0x00000ea1
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_2			0x00000ea2
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_3			0x00000ea3
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_4			0x00000ea4
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_5			0x00000ea5
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_6			0x00000ea6
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_7			0x00000ea7
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_0			0x00000ea8
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_1			0x00000ea9
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_2			0x00000eaa
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_3			0x00000eab
+
+#define REG_A5XX_UCHE_TRAP_LOG_LO				0x00000eb1
+
+#define REG_A5XX_UCHE_TRAP_LOG_HI				0x00000eb2
+
+#define REG_A5XX_SP_DBG_ECO_CNTL				0x00000ec0
+
+#define REG_A5XX_SP_ADDR_MODE_CNTL				0x00000ec1
+
+#define REG_A5XX_SP_MODE_CNTL					0x00000ec2
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_0				0x00000ed0
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_1				0x00000ed1
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_2				0x00000ed2
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_3				0x00000ed3
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_4				0x00000ed4
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_5				0x00000ed5
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_6				0x00000ed6
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_7				0x00000ed7
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_8				0x00000ed8
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_9				0x00000ed9
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_10				0x00000eda
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_11				0x00000edb
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_0				0x00000edc
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_1				0x00000edd
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_2				0x00000ede
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_3				0x00000edf
+
+#define REG_A5XX_TPL1_ADDR_MODE_CNTL				0x00000f01
+
+#define REG_A5XX_TPL1_MODE_CNTL					0x00000f02
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_0				0x00000f10
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_1				0x00000f11
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_2				0x00000f12
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_3				0x00000f13
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_4				0x00000f14
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_5				0x00000f15
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_6				0x00000f16
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_7				0x00000f17
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_0				0x00000f18
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_1				0x00000f19
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_2				0x00000f1a
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_3				0x00000f1b
+
+#define REG_A5XX_VBIF_VERSION					0x00003000
+
+#define REG_A5XX_VBIF_CLKON					0x00003001
+#define A5XX_VBIF_CLKON_FORCE_ON				0x00000001
+#define A5XX_VBIF_CLKON_FORCE_ON_TESTBUS			0x00000002
+
+#define REG_A5XX_VBIF_ABIT_SORT					0x00003028
+
+#define REG_A5XX_VBIF_ABIT_SORT_CONF				0x00003029
+
+#define REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB			0x00003049
+
+#define REG_A5XX_VBIF_GATE_OFF_WRREQ_EN				0x0000302a
+
+#define REG_A5XX_VBIF_IN_RD_LIM_CONF0				0x0000302c
+
+#define REG_A5XX_VBIF_IN_RD_LIM_CONF1				0x0000302d
+
+#define REG_A5XX_VBIF_XIN_HALT_CTRL0				0x00003080
+
+#define REG_A5XX_VBIF_XIN_HALT_CTRL1				0x00003081
+
+#define REG_A5XX_VBIF_TEST_BUS_OUT_CTRL				0x00003084
+#define A5XX_VBIF_TEST_BUS_OUT_CTRL_TEST_BUS_CTRL_EN		0x00000001
+
+#define REG_A5XX_VBIF_TEST_BUS1_CTRL0				0x00003085
+
+#define REG_A5XX_VBIF_TEST_BUS1_CTRL1				0x00003086
+#define A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL__MASK	0x0000000f
+#define A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL__SHIFT	0
+static inline uint32_t A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL(uint32_t val)
+{
+	return ((val) << A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL__SHIFT) & A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL__MASK;
+}
+
+#define REG_A5XX_VBIF_TEST_BUS2_CTRL0				0x00003087
+
+#define REG_A5XX_VBIF_TEST_BUS2_CTRL1				0x00003088
+#define A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL__MASK	0x0000001f
+#define A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL__SHIFT	0
+static inline uint32_t A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL(uint32_t val)
+{
+	return ((val) << A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL__SHIFT) & A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL__MASK;
+}
+
+#define REG_A5XX_VBIF_TEST_BUS_OUT				0x0000308c
+
+static inline uint32_t REG_A5XX_VBIF_PERF_CNT_EN(uint32_t i0) { return 0x000030c0 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VBIF_PERF_CNT_CLR(uint32_t i0) { return 0x000030c8 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VBIF_PERF_CNT_SEL(uint32_t i0) { return 0x000030d0 + 0x1*i0; }
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW0				0x000030d8
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW1				0x000030d9
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW2				0x000030da
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW3				0x000030db
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH0				0x000030e0
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH1				0x000030e1
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH2				0x000030e2
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH3				0x000030e3
+
+static inline uint32_t REG_A5XX_VBIF_PERF_PWR_CNT_EN(uint32_t i0) { return 0x00003100 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VBIF_PERF_PWR_CNT_CLR(uint32_t i0) { return 0x00003108 + 0x1*i0; }
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW0				0x00003110
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW1				0x00003111
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW2				0x00003112
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH0			0x00003118
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH1			0x00003119
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH2			0x0000311a
+
+#define REG_A5XX_GPMU_INST_RAM_BASE				0x00008800
+
+#define REG_A5XX_GPMU_DATA_RAM_BASE				0x00009800
+
+#define REG_A5XX_GPMU_SP_POWER_CNTL				0x0000a881
+
+#define REG_A5XX_GPMU_RBCCU_CLOCK_CNTL				0x0000a886
+
+#define REG_A5XX_GPMU_RBCCU_POWER_CNTL				0x0000a887
+
+#define REG_A5XX_GPMU_SP_PWR_CLK_STATUS				0x0000a88b
+#define A5XX_GPMU_SP_PWR_CLK_STATUS_PWR_ON			0x00100000
+
+#define REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS			0x0000a88d
+#define A5XX_GPMU_RBCCU_PWR_CLK_STATUS_PWR_ON			0x00100000
+
+#define REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY			0x0000a891
+
+#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL			0x0000a892
+
+#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST			0x0000a893
+
+#define REG_A5XX_GPMU_PWR_COL_BINNING_CTRL			0x0000a894
+
+#define REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL			0x0000a8a3
+
+#define REG_A5XX_GPMU_WFI_CONFIG				0x0000a8c1
+
+#define REG_A5XX_GPMU_RBBM_INTR_INFO				0x0000a8d6
+
+#define REG_A5XX_GPMU_CM3_SYSRESET				0x0000a8d8
+
+#define REG_A5XX_GPMU_GENERAL_0					0x0000a8e0
+
+#define REG_A5XX_GPMU_GENERAL_1					0x0000a8e1
+
+#define REG_A5XX_SP_POWER_COUNTER_0_LO				0x0000a840
+
+#define REG_A5XX_SP_POWER_COUNTER_0_HI				0x0000a841
+
+#define REG_A5XX_SP_POWER_COUNTER_1_LO				0x0000a842
+
+#define REG_A5XX_SP_POWER_COUNTER_1_HI				0x0000a843
+
+#define REG_A5XX_SP_POWER_COUNTER_2_LO				0x0000a844
+
+#define REG_A5XX_SP_POWER_COUNTER_2_HI				0x0000a845
+
+#define REG_A5XX_SP_POWER_COUNTER_3_LO				0x0000a846
+
+#define REG_A5XX_SP_POWER_COUNTER_3_HI				0x0000a847
+
+#define REG_A5XX_TP_POWER_COUNTER_0_LO				0x0000a848
+
+#define REG_A5XX_TP_POWER_COUNTER_0_HI				0x0000a849
+
+#define REG_A5XX_TP_POWER_COUNTER_1_LO				0x0000a84a
+
+#define REG_A5XX_TP_POWER_COUNTER_1_HI				0x0000a84b
+
+#define REG_A5XX_TP_POWER_COUNTER_2_LO				0x0000a84c
+
+#define REG_A5XX_TP_POWER_COUNTER_2_HI				0x0000a84d
+
+#define REG_A5XX_TP_POWER_COUNTER_3_LO				0x0000a84e
+
+#define REG_A5XX_TP_POWER_COUNTER_3_HI				0x0000a84f
+
+#define REG_A5XX_RB_POWER_COUNTER_0_LO				0x0000a850
+
+#define REG_A5XX_RB_POWER_COUNTER_0_HI				0x0000a851
+
+#define REG_A5XX_RB_POWER_COUNTER_1_LO				0x0000a852
+
+#define REG_A5XX_RB_POWER_COUNTER_1_HI				0x0000a853
+
+#define REG_A5XX_RB_POWER_COUNTER_2_LO				0x0000a854
+
+#define REG_A5XX_RB_POWER_COUNTER_2_HI				0x0000a855
+
+#define REG_A5XX_RB_POWER_COUNTER_3_LO				0x0000a856
+
+#define REG_A5XX_RB_POWER_COUNTER_3_HI				0x0000a857
+
+#define REG_A5XX_CCU_POWER_COUNTER_0_LO				0x0000a858
+
+#define REG_A5XX_CCU_POWER_COUNTER_0_HI				0x0000a859
+
+#define REG_A5XX_CCU_POWER_COUNTER_1_LO				0x0000a85a
+
+#define REG_A5XX_CCU_POWER_COUNTER_1_HI				0x0000a85b
+
+#define REG_A5XX_UCHE_POWER_COUNTER_0_LO			0x0000a85c
+
+#define REG_A5XX_UCHE_POWER_COUNTER_0_HI			0x0000a85d
+
+#define REG_A5XX_UCHE_POWER_COUNTER_1_LO			0x0000a85e
+
+#define REG_A5XX_UCHE_POWER_COUNTER_1_HI			0x0000a85f
+
+#define REG_A5XX_UCHE_POWER_COUNTER_2_LO			0x0000a860
+
+#define REG_A5XX_UCHE_POWER_COUNTER_2_HI			0x0000a861
+
+#define REG_A5XX_UCHE_POWER_COUNTER_3_LO			0x0000a862
+
+#define REG_A5XX_UCHE_POWER_COUNTER_3_HI			0x0000a863
+
+#define REG_A5XX_CP_POWER_COUNTER_0_LO				0x0000a864
+
+#define REG_A5XX_CP_POWER_COUNTER_0_HI				0x0000a865
+
+#define REG_A5XX_CP_POWER_COUNTER_1_LO				0x0000a866
+
+#define REG_A5XX_CP_POWER_COUNTER_1_HI				0x0000a867
+
+#define REG_A5XX_CP_POWER_COUNTER_2_LO				0x0000a868
+
+#define REG_A5XX_CP_POWER_COUNTER_2_HI				0x0000a869
+
+#define REG_A5XX_CP_POWER_COUNTER_3_LO				0x0000a86a
+
+#define REG_A5XX_CP_POWER_COUNTER_3_HI				0x0000a86b
+
+#define REG_A5XX_GPMU_POWER_COUNTER_0_LO			0x0000a86c
+
+#define REG_A5XX_GPMU_POWER_COUNTER_0_HI			0x0000a86d
+
+#define REG_A5XX_GPMU_POWER_COUNTER_1_LO			0x0000a86e
+
+#define REG_A5XX_GPMU_POWER_COUNTER_1_HI			0x0000a86f
+
+#define REG_A5XX_GPMU_POWER_COUNTER_2_LO			0x0000a870
+
+#define REG_A5XX_GPMU_POWER_COUNTER_2_HI			0x0000a871
+
+#define REG_A5XX_GPMU_POWER_COUNTER_3_LO			0x0000a872
+
+#define REG_A5XX_GPMU_POWER_COUNTER_3_HI			0x0000a873
+
+#define REG_A5XX_GPMU_POWER_COUNTER_4_LO			0x0000a874
+
+#define REG_A5XX_GPMU_POWER_COUNTER_4_HI			0x0000a875
+
+#define REG_A5XX_GPMU_POWER_COUNTER_5_LO			0x0000a876
+
+#define REG_A5XX_GPMU_POWER_COUNTER_5_HI			0x0000a877
+
+#define REG_A5XX_GPMU_POWER_COUNTER_ENABLE			0x0000a878
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_LO			0x0000a879
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_HI			0x0000a87a
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_RESET			0x0000a87b
+
+#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_0			0x0000a87c
+
+#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_1			0x0000a87d
+
+#define REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL			0x0000a8a3
+
+#define REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL		0x0000a8a8
+
+#define REG_A5XX_GPMU_TEMP_SENSOR_ID				0x0000ac00
+
+#define REG_A5XX_GPMU_TEMP_SENSOR_CONFIG			0x0000ac01
+#define A5XX_GPMU_TEMP_SENSOR_CONFIG_ISENSE_STATUS__MASK	0x0000000f
+#define A5XX_GPMU_TEMP_SENSOR_CONFIG_ISENSE_STATUS__SHIFT	0
+static inline uint32_t A5XX_GPMU_TEMP_SENSOR_CONFIG_ISENSE_STATUS(uint32_t val)
+{
+	return ((val) << A5XX_GPMU_TEMP_SENSOR_CONFIG_ISENSE_STATUS__SHIFT) & A5XX_GPMU_TEMP_SENSOR_CONFIG_ISENSE_STATUS__MASK;
+}
+#define A5XX_GPMU_TEMP_SENSOR_CONFIG_BCL_ENABLED		0x00000002
+#define A5XX_GPMU_TEMP_SENSOR_CONFIG_LLM_ENABLED		0x00000200
+
+#define REG_A5XX_GPMU_TEMP_VAL					0x0000ac02
+
+#define REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD			0x0000ac03
+
+#define REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_STATUS		0x0000ac05
+
+#define REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK		0x0000ac06
+
+#define REG_A5XX_GPMU_LEAKAGE_TEMP_COEFF_0_1			0x0000ac40
+
+#define REG_A5XX_GPMU_LEAKAGE_TEMP_COEFF_2_3			0x0000ac41
+
+#define REG_A5XX_GPMU_LEAKAGE_VTG_COEFF_0_1			0x0000ac42
+
+#define REG_A5XX_GPMU_LEAKAGE_VTG_COEFF_2_3			0x0000ac43
+
+#define REG_A5XX_GPMU_BASE_LEAKAGE				0x0000ac46
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE				0x0000ac60
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_STATUS			0x0000ac61
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK			0x0000ac62
+
+#define REG_A5XX_GPMU_GPMU_PWR_THRESHOLD			0x0000ac80
+
+#define REG_A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL			0x0000acc4
+#define A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_IDLE_FULL_LM		0x00000001
+#define A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_STATE_OF_CHILD__MASK	0x00000030
+#define A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_STATE_OF_CHILD__SHIFT	4
+static inline uint32_t A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_STATE_OF_CHILD(uint32_t val)
+{
+	return ((val) << A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_STATE_OF_CHILD__SHIFT) & A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_STATE_OF_CHILD__MASK;
+}
+
+#define REG_A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS			0x0000acc5
+#define A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS_IDLE_FULL_ACK	0x00000001
+#define A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS_WAKEUP_ACK		0x00000002
+
+#define REG_A5XX_GDPM_CONFIG1					0x0000b80c
+
+#define REG_A5XX_GDPM_CONFIG2					0x0000b80d
+
+#define REG_A5XX_GDPM_INT_EN					0x0000b80f
+
+#define REG_A5XX_GDPM_INT_MASK					0x0000b811
+
+#define REG_A5XX_GPMU_BEC_ENABLE				0x0000b9a0
+
+#define REG_A5XX_GPU_CS_SENSOR_GENERAL_STATUS			0x0000c41a
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_0		0x0000c41d
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_2		0x0000c41f
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_4		0x0000c421
+
+#define REG_A5XX_GPU_CS_ENABLE_REG				0x0000c520
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_CONTROL1		0x0000c557
+
+#define REG_A5XX_GRAS_CL_CNTL					0x0000e000
+
+#define REG_A5XX_UNKNOWN_E001					0x0000e001
+
+#define REG_A5XX_UNKNOWN_E004					0x0000e004
+
+#define REG_A5XX_GRAS_CLEAR_CNTL				0x0000e005
+#define A5XX_GRAS_CLEAR_CNTL_NOT_FASTCLEAR			0x00000001
+
+#define REG_A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ			0x0000e006
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK		0x000003ff
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT		0
+static inline uint32_t A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(uint32_t val)
+{
+	return ((val) << A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT) & A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK;
+}
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK		0x000ffc00
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT		10
+static inline uint32_t A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(uint32_t val)
+{
+	return ((val) << A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT) & A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_XOFFSET_0			0x0000e010
+#define A5XX_GRAS_CL_VPORT_XOFFSET_0__MASK			0xffffffff
+#define A5XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT			0
+static inline uint32_t A5XX_GRAS_CL_VPORT_XOFFSET_0(float val)
+{
+	return ((fui(val)) << A5XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_XOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_XSCALE_0				0x0000e011
+#define A5XX_GRAS_CL_VPORT_XSCALE_0__MASK			0xffffffff
+#define A5XX_GRAS_CL_VPORT_XSCALE_0__SHIFT			0
+static inline uint32_t A5XX_GRAS_CL_VPORT_XSCALE_0(float val)
+{
+	return ((fui(val)) << A5XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_XSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_YOFFSET_0			0x0000e012
+#define A5XX_GRAS_CL_VPORT_YOFFSET_0__MASK			0xffffffff
+#define A5XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT			0
+static inline uint32_t A5XX_GRAS_CL_VPORT_YOFFSET_0(float val)
+{
+	return ((fui(val)) << A5XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_YOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_YSCALE_0				0x0000e013
+#define A5XX_GRAS_CL_VPORT_YSCALE_0__MASK			0xffffffff
+#define A5XX_GRAS_CL_VPORT_YSCALE_0__SHIFT			0
+static inline uint32_t A5XX_GRAS_CL_VPORT_YSCALE_0(float val)
+{
+	return ((fui(val)) << A5XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_YSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_ZOFFSET_0			0x0000e014
+#define A5XX_GRAS_CL_VPORT_ZOFFSET_0__MASK			0xffffffff
+#define A5XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT			0
+static inline uint32_t A5XX_GRAS_CL_VPORT_ZOFFSET_0(float val)
+{
+	return ((fui(val)) << A5XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_ZOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_ZSCALE_0				0x0000e015
+#define A5XX_GRAS_CL_VPORT_ZSCALE_0__MASK			0xffffffff
+#define A5XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT			0
+static inline uint32_t A5XX_GRAS_CL_VPORT_ZSCALE_0(float val)
+{
+	return ((fui(val)) << A5XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_ZSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_CNTL					0x0000e090
+#define A5XX_GRAS_SU_CNTL_FRONT_CW				0x00000004
+#define A5XX_GRAS_SU_CNTL_POLY_OFFSET				0x00000800
+
+#define REG_A5XX_GRAS_SU_POINT_MINMAX				0x0000e091
+#define A5XX_GRAS_SU_POINT_MINMAX_MIN__MASK			0x0000ffff
+#define A5XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT			0
+static inline uint32_t A5XX_GRAS_SU_POINT_MINMAX_MIN(float val)
+{
+	return ((((uint32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A5XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A5XX_GRAS_SU_POINT_MINMAX_MAX__MASK			0xffff0000
+#define A5XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT			16
+static inline uint32_t A5XX_GRAS_SU_POINT_MINMAX_MAX(float val)
+{
+	return ((((uint32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A5XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POINT_SIZE				0x0000e092
+#define A5XX_GRAS_SU_POINT_SIZE__MASK				0xffffffff
+#define A5XX_GRAS_SU_POINT_SIZE__SHIFT				0
+static inline uint32_t A5XX_GRAS_SU_POINT_SIZE(float val)
+{
+	return ((((int32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_SIZE__SHIFT) & A5XX_GRAS_SU_POINT_SIZE__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E093					0x0000e093
+
+#define REG_A5XX_GRAS_SU_DEPTH_PLANE_CNTL			0x0000e094
+#define A5XX_GRAS_SU_DEPTH_PLANE_CNTL_ALPHA_TEST_ENABLE		0x00000001
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_SCALE			0x0000e095
+#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK			0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT			0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_SCALE(float val)
+{
+	return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_OFFSET			0x0000e096
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK			0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT			0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
+{
+	return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP		0x0000e097
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK		0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT		0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(float val)
+{
+	return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_DEPTH_BUFFER_INFO			0x0000e098
+#define A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK	0x00000007
+#define A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT	0
+static inline uint32_t A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_format val)
+{
+	return ((val) << A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_CONSERVATIVE_RAS_CNTL			0x0000e099
+
+#define REG_A5XX_GRAS_SC_CNTL					0x0000e0a0
+
+#define REG_A5XX_GRAS_SC_BIN_CNTL				0x0000e0a1
+
+#define REG_A5XX_GRAS_SC_RAS_MSAA_CNTL				0x0000e0a2
+#define A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK		0x00000003
+#define A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT		0
+static inline uint32_t A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+	return ((val) << A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_DEST_MSAA_CNTL				0x0000e0a3
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK		0x00000003
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT		0
+static inline uint32_t A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+	return ((val) << A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_MSAA_DISABLE		0x00000004
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_CNTL			0x0000e0a4
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0			0x0000e0aa
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE	0x80000000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK		0x00007fff
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT		0
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(uint32_t val)
+{
+	return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK;
+}
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK		0x7fff0000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT		16
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(uint32_t val)
+{
+	return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0			0x0000e0ab
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE	0x80000000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK		0x00007fff
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT		0
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X(uint32_t val)
+{
+	return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK;
+}
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK		0x7fff0000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT		16
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y(uint32_t val)
+{
+	return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0			0x0000e0ca
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE	0x80000000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK		0x00007fff
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT		0
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(uint32_t val)
+{
+	return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK;
+}
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK		0x7fff0000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT		16
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(uint32_t val)
+{
+	return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0			0x0000e0cb
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE	0x80000000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK		0x00007fff
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT		0
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X(uint32_t val)
+{
+	return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK;
+}
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK		0x7fff0000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT		16
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y(uint32_t val)
+{
+	return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_WINDOW_SCISSOR_TL			0x0000e0ea
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE	0x80000000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK			0x00007fff
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT			0
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+	return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK			0x7fff0000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT			16
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+	return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_WINDOW_SCISSOR_BR			0x0000e0eb
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE	0x80000000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK			0x00007fff
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT			0
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+	return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK			0x7fff0000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT			16
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+	return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_LRZ_CNTL					0x0000e100
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_LO			0x0000e101
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_HI			0x0000e102
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_PITCH				0x0000e103
+
+#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO		0x0000e104
+
+#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI		0x0000e105
+
+#define REG_A5XX_RB_CNTL					0x0000e140
+#define A5XX_RB_CNTL_WIDTH__MASK				0x000000ff
+#define A5XX_RB_CNTL_WIDTH__SHIFT				0
+static inline uint32_t A5XX_RB_CNTL_WIDTH(uint32_t val)
+{
+	return ((val >> 5) << A5XX_RB_CNTL_WIDTH__SHIFT) & A5XX_RB_CNTL_WIDTH__MASK;
+}
+#define A5XX_RB_CNTL_HEIGHT__MASK				0x0001fe00
+#define A5XX_RB_CNTL_HEIGHT__SHIFT				9
+static inline uint32_t A5XX_RB_CNTL_HEIGHT(uint32_t val)
+{
+	return ((val >> 5) << A5XX_RB_CNTL_HEIGHT__SHIFT) & A5XX_RB_CNTL_HEIGHT__MASK;
+}
+#define A5XX_RB_CNTL_BYPASS					0x00020000
+
+#define REG_A5XX_RB_RENDER_CNTL					0x0000e141
+#define A5XX_RB_RENDER_CNTL_ENABLED_MRTS__MASK			0x00ff0000
+#define A5XX_RB_RENDER_CNTL_ENABLED_MRTS__SHIFT			16
+static inline uint32_t A5XX_RB_RENDER_CNTL_ENABLED_MRTS(uint32_t val)
+{
+	return ((val) << A5XX_RB_RENDER_CNTL_ENABLED_MRTS__SHIFT) & A5XX_RB_RENDER_CNTL_ENABLED_MRTS__MASK;
+}
+#define A5XX_RB_RENDER_CNTL_ENABLED_MRTS2__MASK			0xff000000
+#define A5XX_RB_RENDER_CNTL_ENABLED_MRTS2__SHIFT		24
+static inline uint32_t A5XX_RB_RENDER_CNTL_ENABLED_MRTS2(uint32_t val)
+{
+	return ((val) << A5XX_RB_RENDER_CNTL_ENABLED_MRTS2__SHIFT) & A5XX_RB_RENDER_CNTL_ENABLED_MRTS2__MASK;
+}
+
+#define REG_A5XX_RB_RAS_MSAA_CNTL				0x0000e142
+#define A5XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK			0x00000003
+#define A5XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT			0
+static inline uint32_t A5XX_RB_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+	return ((val) << A5XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A5XX_RB_DEST_MSAA_CNTL				0x0000e143
+#define A5XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK			0x00000003
+#define A5XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT			0
+static inline uint32_t A5XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+	return ((val) << A5XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A5XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE			0x00000004
+
+#define REG_A5XX_RB_RENDER_CONTROL0				0x0000e144
+#define A5XX_RB_RENDER_CONTROL0_VARYING				0x00000001
+#define A5XX_RB_RENDER_CONTROL0_XCOORD				0x00000040
+#define A5XX_RB_RENDER_CONTROL0_YCOORD				0x00000080
+#define A5XX_RB_RENDER_CONTROL0_ZCOORD				0x00000100
+#define A5XX_RB_RENDER_CONTROL0_WCOORD				0x00000200
+
+#define REG_A5XX_RB_RENDER_CONTROL1				0x0000e145
+#define A5XX_RB_RENDER_CONTROL1_FACENESS			0x00000002
+
+#define REG_A5XX_RB_FS_OUTPUT_CNTL				0x0000e146
+#define A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK			0x0000000f
+#define A5XX_RB_FS_OUTPUT_CNTL_MRT__SHIFT			0
+static inline uint32_t A5XX_RB_FS_OUTPUT_CNTL_MRT(uint32_t val)
+{
+	return ((val) << A5XX_RB_FS_OUTPUT_CNTL_MRT__SHIFT) & A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK;
+}
+#define A5XX_RB_FS_OUTPUT_CNTL_FRAG_WRITES_Z			0x00000020
+
+static inline uint32_t REG_A5XX_RB_MRT(uint32_t i0) { return 0x0000e150 + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_CONTROL(uint32_t i0) { return 0x0000e150 + 0x7*i0; }
+#define A5XX_RB_MRT_CONTROL_BLEND				0x00000001
+#define A5XX_RB_MRT_CONTROL_BLEND2				0x00000002
+#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK		0x00000780
+#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT		7
+static inline uint32_t A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
+{
+	return ((val) << A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x0000e151 + 0x7*i0; }
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK		0x0000001f
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT		0
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+	return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK	0x000000e0
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT	5
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+	return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK		0x00001f00
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT	8
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+	return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK	0x001f0000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT	16
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+	return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK	0x00e00000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT	21
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+	return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK	0x1f000000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT	24
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+	return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x0000e152 + 0x7*i0; }
+#define A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK			0x0000007f
+#define A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT		0
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+	return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK		0x00000300
+#define A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT		8
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a5xx_tile_mode val)
+{
+	return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
+}
+#define A5XX_RB_MRT_BUF_INFO_COLOR_SRGB				0x00008000
+
+static inline uint32_t REG_A5XX_RB_MRT_PITCH(uint32_t i0) { return 0x0000e153 + 0x7*i0; }
+#define A5XX_RB_MRT_PITCH_COLOR_BUF_PITCH__MASK			0x0007ffff
+#define A5XX_RB_MRT_PITCH_COLOR_BUF_PITCH__SHIFT		0
+static inline uint32_t A5XX_RB_MRT_PITCH_COLOR_BUF_PITCH(uint32_t val)
+{
+	return ((val >> 4) << A5XX_RB_MRT_PITCH_COLOR_BUF_PITCH__SHIFT) & A5XX_RB_MRT_PITCH_COLOR_BUF_PITCH__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_ARRAY_PITCH(uint32_t i0) { return 0x0000e154 + 0x7*i0; }
+#define A5XX_RB_MRT_ARRAY_PITCH_COLOR_BUF_SIZE__MASK		0x01ffffff
+#define A5XX_RB_MRT_ARRAY_PITCH_COLOR_BUF_SIZE__SHIFT		0
+static inline uint32_t A5XX_RB_MRT_ARRAY_PITCH_COLOR_BUF_SIZE(uint32_t val)
+{
+	return ((val >> 6) << A5XX_RB_MRT_ARRAY_PITCH_COLOR_BUF_SIZE__SHIFT) & A5XX_RB_MRT_ARRAY_PITCH_COLOR_BUF_SIZE__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BASE_LO(uint32_t i0) { return 0x0000e155 + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_BASE_HI(uint32_t i0) { return 0x0000e156 + 0x7*i0; }
+
+#define REG_A5XX_RB_BLEND_RED					0x0000e1a0
+#define A5XX_RB_BLEND_RED_UINT__MASK				0x000000ff
+#define A5XX_RB_BLEND_RED_UINT__SHIFT				0
+static inline uint32_t A5XX_RB_BLEND_RED_UINT(uint32_t val)
+{
+	return ((val) << A5XX_RB_BLEND_RED_UINT__SHIFT) & A5XX_RB_BLEND_RED_UINT__MASK;
+}
+#define A5XX_RB_BLEND_RED_SINT__MASK				0x0000ff00
+#define A5XX_RB_BLEND_RED_SINT__SHIFT				8
+static inline uint32_t A5XX_RB_BLEND_RED_SINT(uint32_t val)
+{
+	return ((val) << A5XX_RB_BLEND_RED_SINT__SHIFT) & A5XX_RB_BLEND_RED_SINT__MASK;
+}
+#define A5XX_RB_BLEND_RED_FLOAT__MASK				0xffff0000
+#define A5XX_RB_BLEND_RED_FLOAT__SHIFT				16
+static inline uint32_t A5XX_RB_BLEND_RED_FLOAT(float val)
+{
+	return ((util_float_to_half(val)) << A5XX_RB_BLEND_RED_FLOAT__SHIFT) & A5XX_RB_BLEND_RED_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_RED_F32				0x0000e1a1
+#define A5XX_RB_BLEND_RED_F32__MASK				0xffffffff
+#define A5XX_RB_BLEND_RED_F32__SHIFT				0
+static inline uint32_t A5XX_RB_BLEND_RED_F32(float val)
+{
+	return ((fui(val)) << A5XX_RB_BLEND_RED_F32__SHIFT) & A5XX_RB_BLEND_RED_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_GREEN					0x0000e1a2
+#define A5XX_RB_BLEND_GREEN_UINT__MASK				0x000000ff
+#define A5XX_RB_BLEND_GREEN_UINT__SHIFT				0
+static inline uint32_t A5XX_RB_BLEND_GREEN_UINT(uint32_t val)
+{
+	return ((val) << A5XX_RB_BLEND_GREEN_UINT__SHIFT) & A5XX_RB_BLEND_GREEN_UINT__MASK;
+}
+#define A5XX_RB_BLEND_GREEN_SINT__MASK				0x0000ff00
+#define A5XX_RB_BLEND_GREEN_SINT__SHIFT				8
+static inline uint32_t A5XX_RB_BLEND_GREEN_SINT(uint32_t val)
+{
+	return ((val) << A5XX_RB_BLEND_GREEN_SINT__SHIFT) & A5XX_RB_BLEND_GREEN_SINT__MASK;
+}
+#define A5XX_RB_BLEND_GREEN_FLOAT__MASK				0xffff0000
+#define A5XX_RB_BLEND_GREEN_FLOAT__SHIFT			16
+static inline uint32_t A5XX_RB_BLEND_GREEN_FLOAT(float val)
+{
+	return ((util_float_to_half(val)) << A5XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A5XX_RB_BLEND_GREEN_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_GREEN_F32				0x0000e1a3
+#define A5XX_RB_BLEND_GREEN_F32__MASK				0xffffffff
+#define A5XX_RB_BLEND_GREEN_F32__SHIFT				0
+static inline uint32_t A5XX_RB_BLEND_GREEN_F32(float val)
+{
+	return ((fui(val)) << A5XX_RB_BLEND_GREEN_F32__SHIFT) & A5XX_RB_BLEND_GREEN_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_BLUE					0x0000e1a4
+#define A5XX_RB_BLEND_BLUE_UINT__MASK				0x000000ff
+#define A5XX_RB_BLEND_BLUE_UINT__SHIFT				0
+static inline uint32_t A5XX_RB_BLEND_BLUE_UINT(uint32_t val)
+{
+	return ((val) << A5XX_RB_BLEND_BLUE_UINT__SHIFT) & A5XX_RB_BLEND_BLUE_UINT__MASK;
+}
+#define A5XX_RB_BLEND_BLUE_SINT__MASK				0x0000ff00
+#define A5XX_RB_BLEND_BLUE_SINT__SHIFT				8
+static inline uint32_t A5XX_RB_BLEND_BLUE_SINT(uint32_t val)
+{
+	return ((val) << A5XX_RB_BLEND_BLUE_SINT__SHIFT) & A5XX_RB_BLEND_BLUE_SINT__MASK;
+}
+#define A5XX_RB_BLEND_BLUE_FLOAT__MASK				0xffff0000
+#define A5XX_RB_BLEND_BLUE_FLOAT__SHIFT				16
+static inline uint32_t A5XX_RB_BLEND_BLUE_FLOAT(float val)
+{
+	return ((util_float_to_half(val)) << A5XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A5XX_RB_BLEND_BLUE_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_BLUE_F32				0x0000e1a5
+#define A5XX_RB_BLEND_BLUE_F32__MASK				0xffffffff
+#define A5XX_RB_BLEND_BLUE_F32__SHIFT				0
+static inline uint32_t A5XX_RB_BLEND_BLUE_F32(float val)
+{
+	return ((fui(val)) << A5XX_RB_BLEND_BLUE_F32__SHIFT) & A5XX_RB_BLEND_BLUE_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_ALPHA					0x0000e1a6
+#define A5XX_RB_BLEND_ALPHA_UINT__MASK				0x000000ff
+#define A5XX_RB_BLEND_ALPHA_UINT__SHIFT				0
+static inline uint32_t A5XX_RB_BLEND_ALPHA_UINT(uint32_t val)
+{
+	return ((val) << A5XX_RB_BLEND_ALPHA_UINT__SHIFT) & A5XX_RB_BLEND_ALPHA_UINT__MASK;
+}
+#define A5XX_RB_BLEND_ALPHA_SINT__MASK				0x0000ff00
+#define A5XX_RB_BLEND_ALPHA_SINT__SHIFT				8
+static inline uint32_t A5XX_RB_BLEND_ALPHA_SINT(uint32_t val)
+{
+	return ((val) << A5XX_RB_BLEND_ALPHA_SINT__SHIFT) & A5XX_RB_BLEND_ALPHA_SINT__MASK;
+}
+#define A5XX_RB_BLEND_ALPHA_FLOAT__MASK				0xffff0000
+#define A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT			16
+static inline uint32_t A5XX_RB_BLEND_ALPHA_FLOAT(float val)
+{
+	return ((util_float_to_half(val)) << A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A5XX_RB_BLEND_ALPHA_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_ALPHA_F32				0x0000e1a7
+#define A5XX_RB_BLEND_ALPHA_F32__MASK				0xffffffff
+#define A5XX_RB_BLEND_ALPHA_F32__SHIFT				0
+static inline uint32_t A5XX_RB_BLEND_ALPHA_F32(float val)
+{
+	return ((fui(val)) << A5XX_RB_BLEND_ALPHA_F32__SHIFT) & A5XX_RB_BLEND_ALPHA_F32__MASK;
+}
+
+#define REG_A5XX_RB_ALPHA_CONTROL				0x0000e1a8
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK			0x000000ff
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT			0
+static inline uint32_t A5XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val)
+{
+	return ((val) << A5XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A5XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK;
+}
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST			0x00000100
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK		0x00000e00
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT		9
+static inline uint32_t A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
+{
+	return ((val) << A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_CNTL					0x0000e1a9
+#define A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK			0x000000ff
+#define A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT			0
+static inline uint32_t A5XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
+{
+	return ((val) << A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
+}
+#define A5XX_RB_BLEND_CNTL_INDEPENDENT_BLEND			0x00000100
+#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK			0xffff0000
+#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT			16
+static inline uint32_t A5XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
+{
+	return ((val) << A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT) & A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_PLANE_CNTL				0x0000e1b0
+#define A5XX_RB_DEPTH_PLANE_CNTL_FRAG_WRITES_Z			0x00000001
+
+#define REG_A5XX_RB_DEPTH_CNTL					0x0000e1b1
+#define A5XX_RB_DEPTH_CNTL_Z_ENABLE				0x00000001
+#define A5XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE			0x00000002
+#define A5XX_RB_DEPTH_CNTL_ZFUNC__MASK				0x0000001c
+#define A5XX_RB_DEPTH_CNTL_ZFUNC__SHIFT				2
+static inline uint32_t A5XX_RB_DEPTH_CNTL_ZFUNC(enum adreno_compare_func val)
+{
+	return ((val) << A5XX_RB_DEPTH_CNTL_ZFUNC__SHIFT) & A5XX_RB_DEPTH_CNTL_ZFUNC__MASK;
+}
+#define A5XX_RB_DEPTH_CNTL_Z_TEST_ENABLE			0x00000040
+
+#define REG_A5XX_RB_DEPTH_BUFFER_INFO				0x0000e1b2
+#define A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK		0x00000007
+#define A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT		0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_format val)
+{
+	return ((val) << A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_BUFFER_BASE_LO			0x0000e1b3
+
+#define REG_A5XX_RB_DEPTH_BUFFER_BASE_HI			0x0000e1b4
+
+#define REG_A5XX_RB_DEPTH_BUFFER_PITCH				0x0000e1b5
+#define A5XX_RB_DEPTH_BUFFER_PITCH__MASK			0xffffffff
+#define A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT			0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_PITCH(uint32_t val)
+{
+	return ((val >> 5) << A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH			0x0000e1b6
+#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK			0xffffffff
+#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT			0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+	return ((val >> 5) << A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_STENCIL_CONTROL				0x0000e1c0
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE			0x00000001
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF		0x00000002
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_READ			0x00000004
+#define A5XX_RB_STENCIL_CONTROL_FUNC__MASK			0x00000700
+#define A5XX_RB_STENCIL_CONTROL_FUNC__SHIFT			8
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
+{
+	return ((val) << A5XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A5XX_RB_STENCIL_CONTROL_FUNC__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FAIL__MASK			0x00003800
+#define A5XX_RB_STENCIL_CONTROL_FAIL__SHIFT			11
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
+{
+	return ((val) << A5XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A5XX_RB_STENCIL_CONTROL_FAIL__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZPASS__MASK			0x0001c000
+#define A5XX_RB_STENCIL_CONTROL_ZPASS__SHIFT			14
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
+{
+	return ((val) << A5XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZPASS__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL__MASK			0x000e0000
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT			17
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
+{
+	return ((val) << A5XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FUNC_BF__MASK			0x00700000
+#define A5XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT			20
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
+{
+	return ((val) << A5XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FAIL_BF__MASK			0x03800000
+#define A5XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT			23
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
+{
+	return ((val) << A5XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK			0x1c000000
+#define A5XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT			26
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
+{
+	return ((val) << A5XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK			0xe0000000
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT			29
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
+{
+	return ((val) << A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
+}
+
+#define REG_A5XX_RB_STENCIL_INFO				0x0000e1c2
+#define A5XX_RB_STENCIL_INFO_SEPARATE_STENCIL			0x00000001
+#define A5XX_RB_STENCIL_INFO_STENCIL_BASE__MASK			0xfffff000
+#define A5XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT		12
+static inline uint32_t A5XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val)
+{
+	return ((val >> 12) << A5XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A5XX_RB_STENCIL_INFO_STENCIL_BASE__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E1C3					0x0000e1c3
+
+#define REG_A5XX_RB_STENCILREFMASK				0x0000e1c6
+#define A5XX_RB_STENCILREFMASK_STENCILREF__MASK			0x000000ff
+#define A5XX_RB_STENCILREFMASK_STENCILREF__SHIFT		0
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
+{
+	return ((val) << A5XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILREF__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_STENCILMASK__MASK		0x0000ff00
+#define A5XX_RB_STENCILREFMASK_STENCILMASK__SHIFT		8
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
+{
+	return ((val) << A5XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILMASK__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK		0x00ff0000
+#define A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT		16
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
+{
+	return ((val) << A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A5XX_RB_WINDOW_OFFSET				0x0000e1d0
+#define A5XX_RB_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE		0x80000000
+#define A5XX_RB_WINDOW_OFFSET_X__MASK				0x00007fff
+#define A5XX_RB_WINDOW_OFFSET_X__SHIFT				0
+static inline uint32_t A5XX_RB_WINDOW_OFFSET_X(uint32_t val)
+{
+	return ((val) << A5XX_RB_WINDOW_OFFSET_X__SHIFT) & A5XX_RB_WINDOW_OFFSET_X__MASK;
+}
+#define A5XX_RB_WINDOW_OFFSET_Y__MASK				0x7fff0000
+#define A5XX_RB_WINDOW_OFFSET_Y__SHIFT				16
+static inline uint32_t A5XX_RB_WINDOW_OFFSET_Y(uint32_t val)
+{
+	return ((val) << A5XX_RB_WINDOW_OFFSET_Y__SHIFT) & A5XX_RB_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A5XX_RB_RESOLVE_CNTL_1				0x0000e211
+#define A5XX_RB_RESOLVE_CNTL_1_WINDOW_OFFSET_DISABLE		0x80000000
+#define A5XX_RB_RESOLVE_CNTL_1_X__MASK				0x00007fff
+#define A5XX_RB_RESOLVE_CNTL_1_X__SHIFT				0
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_1_X(uint32_t val)
+{
+	return ((val) << A5XX_RB_RESOLVE_CNTL_1_X__SHIFT) & A5XX_RB_RESOLVE_CNTL_1_X__MASK;
+}
+#define A5XX_RB_RESOLVE_CNTL_1_Y__MASK				0x7fff0000
+#define A5XX_RB_RESOLVE_CNTL_1_Y__SHIFT				16
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_1_Y(uint32_t val)
+{
+	return ((val) << A5XX_RB_RESOLVE_CNTL_1_Y__SHIFT) & A5XX_RB_RESOLVE_CNTL_1_Y__MASK;
+}
+
+#define REG_A5XX_RB_RESOLVE_CNTL_2				0x0000e212
+#define A5XX_RB_RESOLVE_CNTL_2_WINDOW_OFFSET_DISABLE		0x80000000
+#define A5XX_RB_RESOLVE_CNTL_2_X__MASK				0x00007fff
+#define A5XX_RB_RESOLVE_CNTL_2_X__SHIFT				0
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_X(uint32_t val)
+{
+	return ((val) << A5XX_RB_RESOLVE_CNTL_2_X__SHIFT) & A5XX_RB_RESOLVE_CNTL_2_X__MASK;
+}
+#define A5XX_RB_RESOLVE_CNTL_2_Y__MASK				0x7fff0000
+#define A5XX_RB_RESOLVE_CNTL_2_Y__SHIFT				16
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_Y(uint32_t val)
+{
+	return ((val) << A5XX_RB_RESOLVE_CNTL_2_Y__SHIFT) & A5XX_RB_RESOLVE_CNTL_2_Y__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_LO			0x0000e240
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_HI			0x0000e241
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_PITCH			0x0000e242
+
+#define REG_A5XX_VPC_CNTL_0					0x0000e280
+#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK			0x0000007f
+#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT			0
+static inline uint32_t A5XX_VPC_CNTL_0_STRIDE_IN_VPC(uint32_t val)
+{
+	return ((val) << A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT) & A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK;
+}
+
+static inline uint32_t REG_A5XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x0000e282 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x0000e282 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x0000e28a + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x0000e28a + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VAR(uint32_t i0) { return 0x0000e294 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VAR_DISABLE(uint32_t i0) { return 0x0000e294 + 0x1*i0; }
+
+#define REG_A5XX_VPC_GS_SIV_CNTL				0x0000e298
+
+#define REG_A5XX_VPC_PACK					0x0000e29d
+#define A5XX_VPC_PACK_NUMNONPOSVAR__MASK			0x000000ff
+#define A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT			0
+static inline uint32_t A5XX_VPC_PACK_NUMNONPOSVAR(uint32_t val)
+{
+	return ((val) << A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT) & A5XX_VPC_PACK_NUMNONPOSVAR__MASK;
+}
+
+#define REG_A5XX_VPC_FS_PRIMITIVEID_CNTL			0x0000e2a0
+
+#define REG_A5XX_VPC_SO_OVERRIDE				0x0000e2a2
+
+#define REG_A5XX_VPC_SO_BUFFER_BASE_LO_0			0x0000e2a7
+
+#define REG_A5XX_VPC_SO_BUFFER_BASE_HI_0			0x0000e2a8
+
+#define REG_A5XX_VPC_SO_BUFFER_SIZE_0				0x0000e2a9
+
+#define REG_A5XX_VPC_SO_FLUSH_BASE_LO_0				0x0000e2ac
+
+#define REG_A5XX_VPC_SO_FLUSH_BASE_HI_0				0x0000e2ad
+
+#define REG_A5XX_PC_PRIMITIVE_CNTL				0x0000e384
+#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK		0x0000007f
+#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT		0
+static inline uint32_t A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+	return ((val) << A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT) & A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK;
+}
+
+#define REG_A5XX_PC_RASTER_CNTL					0x0000e388
+
+#define REG_A5XX_PC_RESTART_INDEX				0x0000e38c
+
+#define REG_A5XX_PC_GS_PARAM					0x0000e38e
+
+#define REG_A5XX_PC_HS_PARAM					0x0000e38f
+
+#define REG_A5XX_PC_POWER_CNTL					0x0000e3b0
+
+#define REG_A5XX_VFD_CONTROL_0					0x0000e400
+#define A5XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK		0x0000003f
+#define A5XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT		0
+static inline uint32_t A5XX_VFD_CONTROL_0_STRMDECINSTRCNT(uint32_t val)
+{
+	return ((val) << A5XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT) & A5XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK;
+}
+
+#define REG_A5XX_VFD_CONTROL_1					0x0000e401
+#define A5XX_VFD_CONTROL_1_REGID4INST__MASK			0x0000ff00
+#define A5XX_VFD_CONTROL_1_REGID4INST__SHIFT			8
+static inline uint32_t A5XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
+{
+	return ((val) << A5XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A5XX_VFD_CONTROL_1_REGID4INST__MASK;
+}
+#define A5XX_VFD_CONTROL_1_REGID4VTX__MASK			0x00ff0000
+#define A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT			16
+static inline uint32_t A5XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
+{
+	return ((val) << A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A5XX_VFD_CONTROL_1_REGID4VTX__MASK;
+}
+
+#define REG_A5XX_VFD_CONTROL_2					0x0000e402
+
+#define REG_A5XX_VFD_CONTROL_3					0x0000e403
+
+#define REG_A5XX_VFD_CONTROL_4					0x0000e404
+
+#define REG_A5XX_VFD_CONTROL_5					0x0000e405
+
+#define REG_A5XX_VFD_INDEX_OFFSET				0x0000e408
+
+#define REG_A5XX_VFD_INSTANCE_START_OFFSET			0x0000e409
+
+static inline uint32_t REG_A5XX_VFD_FETCH(uint32_t i0) { return 0x0000e40a + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_BASE_LO(uint32_t i0) { return 0x0000e40a + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_BASE_HI(uint32_t i0) { return 0x0000e40b + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_SIZE(uint32_t i0) { return 0x0000e40c + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_STRIDE(uint32_t i0) { return 0x0000e40d + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DECODE(uint32_t i0) { return 0x0000e48a + 0x2*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000e48a + 0x2*i0; }
+#define A5XX_VFD_DECODE_INSTR_IDX__MASK				0x0000001f
+#define A5XX_VFD_DECODE_INSTR_IDX__SHIFT			0
+static inline uint32_t A5XX_VFD_DECODE_INSTR_IDX(uint32_t val)
+{
+	return ((val) << A5XX_VFD_DECODE_INSTR_IDX__SHIFT) & A5XX_VFD_DECODE_INSTR_IDX__MASK;
+}
+#define A5XX_VFD_DECODE_INSTR_FORMAT__MASK			0x3ff00000
+#define A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT			20
+static inline uint32_t A5XX_VFD_DECODE_INSTR_FORMAT(enum a5xx_vtx_fmt val)
+{
+	return ((val) << A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A5XX_VFD_DECODE_INSTR_FORMAT__MASK;
+}
+
+static inline uint32_t REG_A5XX_VFD_DECODE_STEP_RATE(uint32_t i0) { return 0x0000e48b + 0x2*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DEST_CNTL(uint32_t i0) { return 0x0000e4ca + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DEST_CNTL_INSTR(uint32_t i0) { return 0x0000e4ca + 0x1*i0; }
+#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK		0x0000000f
+#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT		0
+static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK(uint32_t val)
+{
+	return ((val) << A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT) & A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK;
+}
+#define A5XX_VFD_DEST_CNTL_INSTR_REGID__MASK			0x00000ff0
+#define A5XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT			4
+static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_REGID(uint32_t val)
+{
+	return ((val) << A5XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT) & A5XX_VFD_DEST_CNTL_INSTR_REGID__MASK;
+}
+
+#define REG_A5XX_VFD_POWER_CNTL					0x0000e4f0
+
+#define REG_A5XX_SP_SP_CNTL					0x0000e580
+
+#define REG_A5XX_SP_VS_CONTROL_REG				0x0000e584
+#define A5XX_SP_VS_CONTROL_REG_ENABLED				0x00000001
+#define A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK		0x000000fe
+#define A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT		1
+static inline uint32_t A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+	return ((val) << A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__MASK		0x00007f00
+#define A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT		8
+static inline uint32_t A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+	return ((val) << A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_FS_CONTROL_REG				0x0000e585
+#define A5XX_SP_FS_CONTROL_REG_ENABLED				0x00000001
+#define A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK		0x000000fe
+#define A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT		1
+static inline uint32_t A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+	return ((val) << A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__MASK		0x00007f00
+#define A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT		8
+static inline uint32_t A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+	return ((val) << A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_HS_CONTROL_REG				0x0000e586
+#define A5XX_SP_HS_CONTROL_REG_ENABLED				0x00000001
+#define A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK		0x000000fe
+#define A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT		1
+static inline uint32_t A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+	return ((val) << A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__MASK		0x00007f00
+#define A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT		8
+static inline uint32_t A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+	return ((val) << A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_DS_CONTROL_REG				0x0000e587
+#define A5XX_SP_DS_CONTROL_REG_ENABLED				0x00000001
+#define A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK		0x000000fe
+#define A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT		1
+static inline uint32_t A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+	return ((val) << A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__MASK		0x00007f00
+#define A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT		8
+static inline uint32_t A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+	return ((val) << A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_GS_CONTROL_REG				0x0000e588
+#define A5XX_SP_GS_CONTROL_REG_ENABLED				0x00000001
+#define A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK		0x000000fe
+#define A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT		1
+static inline uint32_t A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+	return ((val) << A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__MASK		0x00007f00
+#define A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT		8
+static inline uint32_t A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+	return ((val) << A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_CS_CONFIG					0x0000e589
+
+#define REG_A5XX_SP_VS_CONFIG_MAX_CONST				0x0000e58a
+
+#define REG_A5XX_SP_FS_CONFIG_MAX_CONST				0x0000e58b
+
+#define REG_A5XX_SP_VS_CTRL_REG0				0x0000e590
+#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK		0x000003f0
+#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT		4
+static inline uint32_t A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+	return ((val) << A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK		0x0000fc00
+#define A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT		10
+static inline uint32_t A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+	return ((val) << A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_VS_CTRL_REG0_VARYING				0x00010000
+#define A5XX_SP_VS_CTRL_REG0_PIXLODENABLE			0x00100000
+
+static inline uint32_t REG_A5XX_SP_VS_OUT(uint32_t i0) { return 0x0000e593 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_VS_OUT_REG(uint32_t i0) { return 0x0000e593 + 0x1*i0; }
+#define A5XX_SP_VS_OUT_REG_A_REGID__MASK			0x000000ff
+#define A5XX_SP_VS_OUT_REG_A_REGID__SHIFT			0
+static inline uint32_t A5XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
+{
+	return ((val) << A5XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A5XX_SP_VS_OUT_REG_A_REGID__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_A_COMPMASK__MASK			0x00000f00
+#define A5XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT			8
+static inline uint32_t A5XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+	return ((val) << A5XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_B_REGID__MASK			0x00ff0000
+#define A5XX_SP_VS_OUT_REG_B_REGID__SHIFT			16
+static inline uint32_t A5XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
+{
+	return ((val) << A5XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A5XX_SP_VS_OUT_REG_B_REGID__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK			0x0f000000
+#define A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT			24
+static inline uint32_t A5XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+	return ((val) << A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A5XX_SP_VS_VPC_DST(uint32_t i0) { return 0x0000e5a3 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x0000e5a3 + 0x1*i0; }
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK			0x000000ff
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT			0
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+	return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK			0x0000ff00
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT			8
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+	return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK			0x00ff0000
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT			16
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+	return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK			0xff000000
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT			24
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+	return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A5XX_SP_VS_OBJ_START_LO				0x0000e5ac
+
+#define REG_A5XX_SP_VS_OBJ_START_HI				0x0000e5ad
+
+#define REG_A5XX_SP_FS_CTRL_REG0				0x0000e5c0
+#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK		0x000003f0
+#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT		4
+static inline uint32_t A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+	return ((val) << A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK		0x0000fc00
+#define A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT		10
+static inline uint32_t A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+	return ((val) << A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_FS_CTRL_REG0_VARYING				0x00010000
+#define A5XX_SP_FS_CTRL_REG0_PIXLODENABLE			0x00100000
+
+#define REG_A5XX_SP_FS_OBJ_START_LO				0x0000e5c3
+
+#define REG_A5XX_SP_FS_OBJ_START_HI				0x0000e5c4
+
+#define REG_A5XX_SP_BLEND_CNTL					0x0000e5c9
+
+#define REG_A5XX_SP_FS_OUTPUT_CNTL				0x0000e5ca
+#define A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK			0x0000000f
+#define A5XX_SP_FS_OUTPUT_CNTL_MRT__SHIFT			0
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_MRT(uint32_t val)
+{
+	return ((val) << A5XX_SP_FS_OUTPUT_CNTL_MRT__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__MASK		0x00001fe0
+#define A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__SHIFT		5
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID(uint32_t val)
+{
+	return ((val) << A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK		0x001fe000
+#define A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT		13
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID(uint32_t val)
+{
+	return ((val) << A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK;
+}
+
+static inline uint32_t REG_A5XX_SP_FS_OUTPUT(uint32_t i0) { return 0x0000e5cb + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_FS_OUTPUT_REG(uint32_t i0) { return 0x0000e5cb + 0x1*i0; }
+#define A5XX_SP_FS_OUTPUT_REG_REGID__MASK			0x000000ff
+#define A5XX_SP_FS_OUTPUT_REG_REGID__SHIFT			0
+static inline uint32_t A5XX_SP_FS_OUTPUT_REG_REGID(uint32_t val)
+{
+	return ((val) << A5XX_SP_FS_OUTPUT_REG_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_REG_REGID__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_REG_HALF_PRECISION			0x00000100
+
+static inline uint32_t REG_A5XX_SP_FS_MRT(uint32_t i0) { return 0x0000e5d3 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_FS_MRT_REG(uint32_t i0) { return 0x0000e5d3 + 0x1*i0; }
+#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK			0x0000007f
+#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT			0
+static inline uint32_t A5XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+	return ((val) << A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK;
+}
+
+#define REG_A5XX_SP_CS_CNTL_0					0x0000e5f0
+
+#define REG_A5XX_TPL1_TP_RAS_MSAA_CNTL				0x0000e704
+
+#define REG_A5XX_TPL1_TP_DEST_MSAA_CNTL				0x0000e705
+
+#define REG_A5XX_TPL1_VS_TEX_SAMP_LO				0x0000e722
+
+#define REG_A5XX_TPL1_VS_TEX_SAMP_HI				0x0000e723
+
+#define REG_A5XX_TPL1_VS_TEX_CONST_LO				0x0000e72a
+
+#define REG_A5XX_TPL1_VS_TEX_CONST_HI				0x0000e72b
+
+#define REG_A5XX_TPL1_FS_TEX_CONST_LO				0x0000e75a
+
+#define REG_A5XX_TPL1_FS_TEX_CONST_HI				0x0000e75b
+
+#define REG_A5XX_TPL1_FS_TEX_SAMP_LO				0x0000e75e
+
+#define REG_A5XX_TPL1_FS_TEX_SAMP_HI				0x0000e75f
+
+#define REG_A5XX_TPL1_TP_FS_ROTATION_CNTL			0x0000e764
+
+#define REG_A5XX_HLSQ_CONTROL_0_REG				0x0000e784
+
+#define REG_A5XX_HLSQ_CONTROL_1_REG				0x0000e785
+
+#define REG_A5XX_HLSQ_CONTROL_2_REG				0x0000e786
+#define A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK			0x000000ff
+#define A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT		0
+static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
+{
+	return ((val) << A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_CONTROL_3_REG				0x0000e787
+#define A5XX_HLSQ_CONTROL_3_REG_REGID__MASK			0x000000ff
+#define A5XX_HLSQ_CONTROL_3_REG_REGID__SHIFT			0
+static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_REGID(uint32_t val)
+{
+	return ((val) << A5XX_HLSQ_CONTROL_3_REG_REGID__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_REGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_CONTROL_4_REG				0x0000e788
+#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK		0x00ff0000
+#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT		16
+static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val)
+{
+	return ((val) << A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK;
+}
+#define A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK		0xff000000
+#define A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT		24
+static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val)
+{
+	return ((val) << A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_UPDATE_CNTL				0x0000e78a
+
+#define REG_A5XX_HLSQ_VS_CONTROL_REG				0x0000e78b
+#define A5XX_HLSQ_VS_CONTROL_REG_ENABLED			0x00000001
+#define A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK	0x000000fe
+#define A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT	1
+static inline uint32_t A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+	return ((val) << A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK		0x00007f00
+#define A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT		8
+static inline uint32_t A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+	return ((val) << A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_FS_CONTROL_REG				0x0000e78c
+#define A5XX_HLSQ_FS_CONTROL_REG_ENABLED			0x00000001
+#define A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK	0x000000fe
+#define A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT	1
+static inline uint32_t A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+	return ((val) << A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK		0x00007f00
+#define A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT		8
+static inline uint32_t A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+	return ((val) << A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_HS_CONTROL_REG				0x0000e78d
+#define A5XX_HLSQ_HS_CONTROL_REG_ENABLED			0x00000001
+#define A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK	0x000000fe
+#define A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT	1
+static inline uint32_t A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+	return ((val) << A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK		0x00007f00
+#define A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT		8
+static inline uint32_t A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+	return ((val) << A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_DS_CONTROL_REG				0x0000e78e
+#define A5XX_HLSQ_DS_CONTROL_REG_ENABLED			0x00000001
+#define A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK	0x000000fe
+#define A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT	1
+static inline uint32_t A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+	return ((val) << A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK		0x00007f00
+#define A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT		8
+static inline uint32_t A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+	return ((val) << A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_GS_CONTROL_REG				0x0000e78f
+#define A5XX_HLSQ_GS_CONTROL_REG_ENABLED			0x00000001
+#define A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK	0x000000fe
+#define A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT	1
+static inline uint32_t A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+	return ((val) << A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK		0x00007f00
+#define A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT		8
+static inline uint32_t A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+	return ((val) << A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_CONFIG					0x0000e790
+
+#define REG_A5XX_HLSQ_VS_CNTL					0x0000e791
+
+#define REG_A5XX_HLSQ_FS_CNTL					0x0000e792
+
+#define REG_A5XX_HLSQ_CS_CNTL					0x0000e796
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_X				0x0000e7b9
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Y				0x0000e7ba
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Z				0x0000e7bb
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_0				0x0000e7b0
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_1				0x0000e7b1
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_2				0x0000e7b2
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_3				0x0000e7b3
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_4				0x0000e7b4
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_5				0x0000e7b5
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_6				0x0000e7b6
+
+#define REG_A5XX_HLSQ_CS_CNTL_0					0x0000e7b7
+
+#define REG_A5XX_HLSQ_CS_CNTL_1					0x0000e7b8
+
+#define REG_A5XX_HLSQ_VS_CONSTLEN				0x0000e7c3
+
+#define REG_A5XX_HLSQ_VS_INSTRLEN				0x0000e7c4
+
+#define REG_A5XX_HLSQ_FS_CONSTLEN				0x0000e7d7
+
+#define REG_A5XX_HLSQ_FS_INSTRLEN				0x0000e7d8
+
+#define REG_A5XX_HLSQ_CONTEXT_SWITCH_CS_SW_3			0x0000e7dc
+
+#define REG_A5XX_HLSQ_CONTEXT_SWITCH_CS_SW_4			0x0000e7dd
+
+#define REG_A5XX_TEX_SAMP_0					0x00000000
+#define A5XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR			0x00000001
+#define A5XX_TEX_SAMP_0_XY_MAG__MASK				0x00000006
+#define A5XX_TEX_SAMP_0_XY_MAG__SHIFT				1
+static inline uint32_t A5XX_TEX_SAMP_0_XY_MAG(enum a5xx_tex_filter val)
+{
+	return ((val) << A5XX_TEX_SAMP_0_XY_MAG__SHIFT) & A5XX_TEX_SAMP_0_XY_MAG__MASK;
+}
+#define A5XX_TEX_SAMP_0_XY_MIN__MASK				0x00000018
+#define A5XX_TEX_SAMP_0_XY_MIN__SHIFT				3
+static inline uint32_t A5XX_TEX_SAMP_0_XY_MIN(enum a5xx_tex_filter val)
+{
+	return ((val) << A5XX_TEX_SAMP_0_XY_MIN__SHIFT) & A5XX_TEX_SAMP_0_XY_MIN__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_S__MASK				0x000000e0
+#define A5XX_TEX_SAMP_0_WRAP_S__SHIFT				5
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_S(enum a5xx_tex_clamp val)
+{
+	return ((val) << A5XX_TEX_SAMP_0_WRAP_S__SHIFT) & A5XX_TEX_SAMP_0_WRAP_S__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_T__MASK				0x00000700
+#define A5XX_TEX_SAMP_0_WRAP_T__SHIFT				8
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_T(enum a5xx_tex_clamp val)
+{
+	return ((val) << A5XX_TEX_SAMP_0_WRAP_T__SHIFT) & A5XX_TEX_SAMP_0_WRAP_T__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_R__MASK				0x00003800
+#define A5XX_TEX_SAMP_0_WRAP_R__SHIFT				11
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_R(enum a5xx_tex_clamp val)
+{
+	return ((val) << A5XX_TEX_SAMP_0_WRAP_R__SHIFT) & A5XX_TEX_SAMP_0_WRAP_R__MASK;
+}
+#define A5XX_TEX_SAMP_0_ANISO__MASK				0x0001c000
+#define A5XX_TEX_SAMP_0_ANISO__SHIFT				14
+static inline uint32_t A5XX_TEX_SAMP_0_ANISO(enum a5xx_tex_aniso val)
+{
+	return ((val) << A5XX_TEX_SAMP_0_ANISO__SHIFT) & A5XX_TEX_SAMP_0_ANISO__MASK;
+}
+#define A5XX_TEX_SAMP_0_LOD_BIAS__MASK				0xfff80000
+#define A5XX_TEX_SAMP_0_LOD_BIAS__SHIFT				19
+static inline uint32_t A5XX_TEX_SAMP_0_LOD_BIAS(float val)
+{
+	return ((((int32_t)(val * 256.0))) << A5XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A5XX_TEX_SAMP_0_LOD_BIAS__MASK;
+}
+
+#define REG_A5XX_TEX_SAMP_1					0x00000001
+#define A5XX_TEX_SAMP_1_COMPARE_FUNC__MASK			0x0000000e
+#define A5XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT			1
+static inline uint32_t A5XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val)
+{
+	return ((val) << A5XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A5XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
+}
+#define A5XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF			0x00000010
+#define A5XX_TEX_SAMP_1_UNNORM_COORDS				0x00000020
+#define A5XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR			0x00000040
+#define A5XX_TEX_SAMP_1_MAX_LOD__MASK				0x000fff00
+#define A5XX_TEX_SAMP_1_MAX_LOD__SHIFT				8
+static inline uint32_t A5XX_TEX_SAMP_1_MAX_LOD(float val)
+{
+	return ((((uint32_t)(val * 256.0))) << A5XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A5XX_TEX_SAMP_1_MAX_LOD__MASK;
+}
+#define A5XX_TEX_SAMP_1_MIN_LOD__MASK				0xfff00000
+#define A5XX_TEX_SAMP_1_MIN_LOD__SHIFT				20
+static inline uint32_t A5XX_TEX_SAMP_1_MIN_LOD(float val)
+{
+	return ((((uint32_t)(val * 256.0))) << A5XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A5XX_TEX_SAMP_1_MIN_LOD__MASK;
+}
+
+#define REG_A5XX_TEX_SAMP_2					0x00000002
+
+#define REG_A5XX_TEX_SAMP_3					0x00000003
+
+#define REG_A5XX_TEX_CONST_0					0x00000000
+#define A5XX_TEX_CONST_0_TILED					0x00000001
+#define A5XX_TEX_CONST_0_SRGB					0x00000004
+#define A5XX_TEX_CONST_0_SWIZ_X__MASK				0x00000070
+#define A5XX_TEX_CONST_0_SWIZ_X__SHIFT				4
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_X(enum a5xx_tex_swiz val)
+{
+	return ((val) << A5XX_TEX_CONST_0_SWIZ_X__SHIFT) & A5XX_TEX_CONST_0_SWIZ_X__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_Y__MASK				0x00000380
+#define A5XX_TEX_CONST_0_SWIZ_Y__SHIFT				7
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_Y(enum a5xx_tex_swiz val)
+{
+	return ((val) << A5XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A5XX_TEX_CONST_0_SWIZ_Y__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_Z__MASK				0x00001c00
+#define A5XX_TEX_CONST_0_SWIZ_Z__SHIFT				10
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_Z(enum a5xx_tex_swiz val)
+{
+	return ((val) << A5XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A5XX_TEX_CONST_0_SWIZ_Z__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_W__MASK				0x0000e000
+#define A5XX_TEX_CONST_0_SWIZ_W__SHIFT				13
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_W(enum a5xx_tex_swiz val)
+{
+	return ((val) << A5XX_TEX_CONST_0_SWIZ_W__SHIFT) & A5XX_TEX_CONST_0_SWIZ_W__MASK;
+}
+#define A5XX_TEX_CONST_0_FMT__MASK				0x3fc00000
+#define A5XX_TEX_CONST_0_FMT__SHIFT				22
+static inline uint32_t A5XX_TEX_CONST_0_FMT(enum a5xx_tex_fmt val)
+{
+	return ((val) << A5XX_TEX_CONST_0_FMT__SHIFT) & A5XX_TEX_CONST_0_FMT__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_1					0x00000001
+#define A5XX_TEX_CONST_1_WIDTH__MASK				0x00007fff
+#define A5XX_TEX_CONST_1_WIDTH__SHIFT				0
+static inline uint32_t A5XX_TEX_CONST_1_WIDTH(uint32_t val)
+{
+	return ((val) << A5XX_TEX_CONST_1_WIDTH__SHIFT) & A5XX_TEX_CONST_1_WIDTH__MASK;
+}
+#define A5XX_TEX_CONST_1_HEIGHT__MASK				0x3fff8000
+#define A5XX_TEX_CONST_1_HEIGHT__SHIFT				15
+static inline uint32_t A5XX_TEX_CONST_1_HEIGHT(uint32_t val)
+{
+	return ((val) << A5XX_TEX_CONST_1_HEIGHT__SHIFT) & A5XX_TEX_CONST_1_HEIGHT__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_2					0x00000002
+#define A5XX_TEX_CONST_2_FETCHSIZE__MASK			0x0000000f
+#define A5XX_TEX_CONST_2_FETCHSIZE__SHIFT			0
+static inline uint32_t A5XX_TEX_CONST_2_FETCHSIZE(enum a5xx_tex_fetchsize val)
+{
+	return ((val) << A5XX_TEX_CONST_2_FETCHSIZE__SHIFT) & A5XX_TEX_CONST_2_FETCHSIZE__MASK;
+}
+#define A5XX_TEX_CONST_2_PITCH__MASK				0x1fffff00
+#define A5XX_TEX_CONST_2_PITCH__SHIFT				8
+static inline uint32_t A5XX_TEX_CONST_2_PITCH(uint32_t val)
+{
+	return ((val) << A5XX_TEX_CONST_2_PITCH__SHIFT) & A5XX_TEX_CONST_2_PITCH__MASK;
+}
+#define A5XX_TEX_CONST_2_TYPE__MASK				0x60000000
+#define A5XX_TEX_CONST_2_TYPE__SHIFT				29
+static inline uint32_t A5XX_TEX_CONST_2_TYPE(enum a5xx_tex_type val)
+{
+	return ((val) << A5XX_TEX_CONST_2_TYPE__SHIFT) & A5XX_TEX_CONST_2_TYPE__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_3					0x00000003
+#define A5XX_TEX_CONST_3_LAYERSZ__MASK				0x00003fff
+#define A5XX_TEX_CONST_3_LAYERSZ__SHIFT				0
+static inline uint32_t A5XX_TEX_CONST_3_LAYERSZ(uint32_t val)
+{
+	return ((val >> 12) << A5XX_TEX_CONST_3_LAYERSZ__SHIFT) & A5XX_TEX_CONST_3_LAYERSZ__MASK;
+}
+#define A5XX_TEX_CONST_3_LAYERSZ2__MASK				0xff800000
+#define A5XX_TEX_CONST_3_LAYERSZ2__SHIFT			23
+static inline uint32_t A5XX_TEX_CONST_3_LAYERSZ2(uint32_t val)
+{
+	return ((val >> 12) << A5XX_TEX_CONST_3_LAYERSZ2__SHIFT) & A5XX_TEX_CONST_3_LAYERSZ2__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_4					0x00000004
+#define A5XX_TEX_CONST_4_BASE__MASK				0xffffffe0
+#define A5XX_TEX_CONST_4_BASE__SHIFT				5
+static inline uint32_t A5XX_TEX_CONST_4_BASE(uint32_t val)
+{
+	return ((val >> 5) << A5XX_TEX_CONST_4_BASE__SHIFT) & A5XX_TEX_CONST_4_BASE__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_5					0x00000005
+#define A5XX_TEX_CONST_5_DEPTH__MASK				0x3ffe0000
+#define A5XX_TEX_CONST_5_DEPTH__SHIFT				17
+static inline uint32_t A5XX_TEX_CONST_5_DEPTH(uint32_t val)
+{
+	return ((val) << A5XX_TEX_CONST_5_DEPTH__SHIFT) & A5XX_TEX_CONST_5_DEPTH__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_6					0x00000006
+
+#define REG_A5XX_TEX_CONST_7					0x00000007
+
+#define REG_A5XX_TEX_CONST_8					0x00000008
+
+#define REG_A5XX_TEX_CONST_9					0x00000009
+
+#define REG_A5XX_TEX_CONST_10					0x0000000a
+
+#define REG_A5XX_TEX_CONST_11					0x0000000b
+
+
+#endif /* A5XX_XML */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/dba_bridge.c	2019-10-29 09:26:23.621202963 +0100
@@ -0,0 +1,367 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <video/msm_dba.h>
+#include "drm_edid.h"
+#include "sde_kms.h"
+#include "dba_bridge.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt)	"dba_bridge:[%s] " fmt, __func__
+
+/**
+ * struct dba_bridge - DBA bridge information
+ * @base:               drm_bridge base
+ * @client_name:        Client's name who calls the init
+ * @chip_name:          Bridge chip name
+ * @name:               Bridge chip name
+ * @id:                 Bridge driver index
+ * @display:            Private display handle
+ * @list:               Bridge chip driver list node
+ * @ops:                DBA operation container
+ * @dba_ctx:            DBA context
+ * @mode:               DRM mode info
+ * @hdmi_mode:          HDMI or DVI mode for the sink
+ * @num_of_input_lanes: Number of input lanes in case of DSI/LVDS
+ * @pluggable:          If it's pluggable
+ * @panel_count:        Number of panels attached to this display
+ */
+struct dba_bridge {
+	struct drm_bridge base;
+	char client_name[MSM_DBA_CLIENT_NAME_LEN];
+	char chip_name[MSM_DBA_CHIP_NAME_MAX_LEN];
+	u32 id;
+	void *display;
+	struct list_head list;
+	struct msm_dba_ops ops;
+	void *dba_ctx;
+	struct drm_display_mode mode;
+	bool hdmi_mode;
+	u32 num_of_input_lanes;
+	bool pluggable;
+	u32 panel_count;
+};
+#define to_dba_bridge(x)     container_of((x), struct dba_bridge, base)
+
+static void _dba_bridge_cb(void *data, enum msm_dba_callback_event event)
+{
+	struct dba_bridge *d_bridge = data;
+
+	if (!d_bridge) {
+		SDE_ERROR("Invalid data\n");
+		return;
+	}
+
+	DRM_DEBUG("event: %d\n", event);
+
+	switch (event) {
+	case MSM_DBA_CB_HPD_CONNECT:
+		DRM_DEBUG("HPD CONNECT\n");
+		break;
+	case MSM_DBA_CB_HPD_DISCONNECT:
+		DRM_DEBUG("HPD DISCONNECT\n");
+		break;
+	default:
+		DRM_DEBUG("event:%d is not supported\n", event);
+		break;
+	}
+}
+
+static int _dba_bridge_attach(struct drm_bridge *bridge)
+{
+	struct dba_bridge *d_bridge = to_dba_bridge(bridge);
+	struct msm_dba_reg_info info;
+	int ret = 0;
+
+	if (!bridge) {
+		SDE_ERROR("Invalid params\n");
+		return -EINVAL;
+	}
+
+	memset(&info, 0, sizeof(info));
+	/* initialize DBA registration data */
+	strlcpy(info.client_name, d_bridge->client_name,
+					MSM_DBA_CLIENT_NAME_LEN);
+	strlcpy(info.chip_name, d_bridge->chip_name,
+					MSM_DBA_CHIP_NAME_MAX_LEN);
+	info.instance_id = d_bridge->id;
+	info.cb = _dba_bridge_cb;
+	info.cb_data = d_bridge;
+
+	/* register client with DBA and get device's ops*/
+	if (IS_ENABLED(CONFIG_MSM_DBA)) {
+		d_bridge->dba_ctx = msm_dba_register_client(&info,
+							&d_bridge->ops);
+		if (IS_ERR_OR_NULL(d_bridge->dba_ctx)) {
+			SDE_ERROR("dba register failed\n");
+			ret = PTR_ERR(d_bridge->dba_ctx);
+			goto error;
+		}
+	} else {
+		SDE_ERROR("DBA not enabled\n");
+		ret = -ENODEV;
+		goto error;
+	}
+
+	DRM_INFO("client:%s bridge:[%s:%d] attached\n",
+		d_bridge->client_name, d_bridge->chip_name, d_bridge->id);
+
+error:
+	return ret;
+}
+
+static void _dba_bridge_pre_enable(struct drm_bridge *bridge)
+{
+	if (!bridge) {
+		SDE_ERROR("Invalid params\n");
+		return;
+	}
+}
+
+static void _dba_bridge_enable(struct drm_bridge *bridge)
+{
+	int rc = 0;
+	struct dba_bridge *d_bridge = to_dba_bridge(bridge);
+	struct msm_dba_video_cfg video_cfg;
+	struct drm_display_mode *mode;
+	struct hdmi_avi_infoframe avi_frame;
+
+	if (!bridge) {
+		SDE_ERROR("Invalid params\n");
+		return;
+	}
+
+	memset(&video_cfg, 0, sizeof(video_cfg));
+	memset(&avi_frame, 0, sizeof(avi_frame));
+	mode = &d_bridge->mode;
+	video_cfg.h_active = mode->hdisplay;
+	video_cfg.v_active = mode->vdisplay;
+	video_cfg.h_front_porch = mode->hsync_start - mode->hdisplay;
+	video_cfg.v_front_porch = mode->vsync_start - mode->vdisplay;
+	video_cfg.h_back_porch = mode->htotal - mode->hsync_end;
+	video_cfg.v_back_porch = mode->vtotal - mode->vsync_end;
+	video_cfg.h_pulse_width = mode->hsync_end - mode->hsync_start;
+	video_cfg.v_pulse_width = mode->vsync_end - mode->vsync_start;
+	video_cfg.pclk_khz = mode->clock;
+	video_cfg.hdmi_mode = d_bridge->hdmi_mode;
+	video_cfg.num_of_input_lanes = d_bridge->num_of_input_lanes;
+
+	SDE_DEBUG(
+		"video=h[%d,%d,%d,%d] v[%d,%d,%d,%d] pclk=%d hdmi=%d lane=%d\n",
+		video_cfg.h_active, video_cfg.h_front_porch,
+		video_cfg.h_pulse_width, video_cfg.h_back_porch,
+		video_cfg.v_active, video_cfg.v_front_porch,
+		video_cfg.v_pulse_width, video_cfg.v_back_porch,
+		video_cfg.pclk_khz, video_cfg.hdmi_mode,
+		video_cfg.num_of_input_lanes);
+
+	rc = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, mode);
+	if (rc) {
+		SDE_ERROR("get avi frame failed ret=%d\n", rc);
+	} else {
+		video_cfg.scaninfo = avi_frame.scan_mode;
+		switch (avi_frame.picture_aspect) {
+		case HDMI_PICTURE_ASPECT_4_3:
+			video_cfg.ar = MSM_DBA_AR_4_3;
+			break;
+		case HDMI_PICTURE_ASPECT_16_9:
+			video_cfg.ar = MSM_DBA_AR_16_9;
+			break;
+		default:
+			break;
+		}
+		video_cfg.vic = avi_frame.video_code;
+		DRM_INFO("scaninfo=%d ar=%d vic=%d\n",
+			video_cfg.scaninfo, video_cfg.ar, video_cfg.vic);
+	}
+
+	if (d_bridge->ops.video_on) {
+		rc = d_bridge->ops.video_on(d_bridge->dba_ctx, true,
+						&video_cfg, 0);
+		if (rc)
+			SDE_ERROR("video on failed ret=%d\n", rc);
+	}
+}
+
+static void _dba_bridge_disable(struct drm_bridge *bridge)
+{
+	int rc = 0;
+	struct dba_bridge *d_bridge = to_dba_bridge(bridge);
+
+	if (!bridge) {
+		SDE_ERROR("Invalid params\n");
+		return;
+	}
+
+	if (d_bridge->ops.video_on) {
+		rc = d_bridge->ops.video_on(d_bridge->dba_ctx,
+				false, NULL, 0);
+		if (rc)
+			SDE_ERROR("video off failed ret=%d\n", rc);
+	}
+}
+
+static void _dba_bridge_post_disable(struct drm_bridge *bridge)
+{
+	int rc = 0;
+	struct dba_bridge *d_bridge = to_dba_bridge(bridge);
+
+	if (!bridge) {
+		SDE_ERROR("Invalid params\n");
+		return;
+	}
+
+	if (d_bridge->ops.power_on) {
+		rc = d_bridge->ops.power_on(d_bridge->dba_ctx, false, 0);
+		if (rc)
+			SDE_ERROR("power off failed ret=%d\n", rc);
+	}
+}
+
+static void _dba_bridge_mode_set(struct drm_bridge *bridge,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	struct dba_bridge *d_bridge = to_dba_bridge(bridge);
+
+	if (!bridge || !mode || !adjusted_mode || !d_bridge) {
+		SDE_ERROR("Invalid params\n");
+		return;
+	} else if (!d_bridge->panel_count) {
+		SDE_ERROR("Panel count is 0\n");
+		return;
+	}
+
+	d_bridge->mode = *adjusted_mode;
+	/* Adjust mode according to number of panels */
+	d_bridge->mode.hdisplay /= d_bridge->panel_count;
+	d_bridge->mode.hsync_start /= d_bridge->panel_count;
+	d_bridge->mode.hsync_end /= d_bridge->panel_count;
+	d_bridge->mode.htotal /= d_bridge->panel_count;
+	d_bridge->mode.clock /= d_bridge->panel_count;
+}
+
+static bool _dba_bridge_mode_fixup(struct drm_bridge *bridge,
+				  const struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
+{
+	bool ret = true;
+
+	if (!bridge || !mode || !adjusted_mode) {
+		SDE_ERROR("Invalid params\n");
+		return false;
+	}
+
+	return ret;
+}
+
+static const struct drm_bridge_funcs _dba_bridge_ops = {
+	.attach       = _dba_bridge_attach,
+	.mode_fixup   = _dba_bridge_mode_fixup,
+	.pre_enable   = _dba_bridge_pre_enable,
+	.enable       = _dba_bridge_enable,
+	.disable      = _dba_bridge_disable,
+	.post_disable = _dba_bridge_post_disable,
+	.mode_set     = _dba_bridge_mode_set,
+};
+
+struct drm_bridge *dba_bridge_init(struct drm_device *dev,
+				struct drm_encoder *encoder,
+				struct dba_bridge_init *data)
+{
+	int rc = 0;
+	struct dba_bridge *bridge;
+	struct msm_drm_private *priv = NULL;
+
+	if (!dev || !encoder || !data) {
+		SDE_ERROR("dev=%pK or encoder=%pK or data=%pK is NULL\n",
+				dev, encoder, data);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	priv = dev->dev_private;
+	if (!priv) {
+		SDE_ERROR("Private data is not present\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+	if (!bridge) {
+		SDE_ERROR("out of memory\n");
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	INIT_LIST_HEAD(&bridge->list);
+	strlcpy(bridge->client_name, data->client_name,
+					MSM_DBA_CLIENT_NAME_LEN);
+	strlcpy(bridge->chip_name, data->chip_name,
+					MSM_DBA_CHIP_NAME_MAX_LEN);
+	bridge->id = data->id;
+	bridge->display = data->display;
+	bridge->hdmi_mode = data->hdmi_mode;
+	bridge->num_of_input_lanes = data->num_of_input_lanes;
+	bridge->pluggable = data->pluggable;
+	bridge->panel_count = data->panel_count;
+	bridge->base.funcs = &_dba_bridge_ops;
+	bridge->base.encoder = encoder;
+
+	rc = drm_bridge_attach(dev, &bridge->base);
+	if (rc) {
+		SDE_ERROR("failed to attach bridge, rc=%d\n", rc);
+		goto error_free_bridge;
+	}
+
+	if (data->precede_bridge) {
+		/* Insert current bridge */
+		bridge->base.next = data->precede_bridge->next;
+		data->precede_bridge->next = &bridge->base;
+	} else {
+		encoder->bridge = &bridge->base;
+	}
+
+	if (!bridge->pluggable) {
+		if (bridge->ops.power_on)
+			bridge->ops.power_on(bridge->dba_ctx, true, 0);
+		if (bridge->ops.check_hpd)
+			bridge->ops.check_hpd(bridge->dba_ctx, 0);
+	}
+
+	return &bridge->base;
+
+error_free_bridge:
+	kfree(bridge);
+error:
+	return ERR_PTR(rc);
+}
+
+void dba_bridge_cleanup(struct drm_bridge *bridge)
+{
+	struct dba_bridge *d_bridge = to_dba_bridge(bridge);
+
+	if (!bridge)
+		return;
+
+	if (IS_ENABLED(CONFIG_MSM_DBA)) {
+		if (!IS_ERR_OR_NULL(d_bridge->dba_ctx))
+			msm_dba_deregister_client(d_bridge->dba_ctx);
+	}
+
+	if (d_bridge->base.encoder)
+		d_bridge->base.encoder->bridge = NULL;
+
+	kfree(bridge);
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/dba_bridge.h	2019-01-22 16:16:23.487246262 +0100
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DBA_BRIDGE_H_
+#define _DBA_BRIDGE_H_
+
+#include <linux/types.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "msm_drv.h"
+
+/**
+ * struct dba_bridge_init - Init parameters for DBA bridge
+ * @client_name:          Client's name who calls the init
+ * @chip_name:            Bridge chip name
+ * @id:                   Bridge driver index
+ * @display:              Private display handle
+ * @hdmi_mode:            HDMI or DVI mode for the sink
+ * @num_of_input_lanes:   Number of input lanes in case of DSI/LVDS
+ * @precede_bridge:       Precede bridge chip
+ * @pluggable:            If it's pluggable
+ * @panel_count:          Number of panels attached to this display
+ */
+struct dba_bridge_init {
+	const char *client_name;
+	const char *chip_name;
+	u32 id;
+	void *display;
+	bool hdmi_mode;
+	u32 num_of_input_lanes;
+	struct drm_bridge *precede_bridge;
+	bool pluggable;
+	u32 panel_count;
+};
+
+/**
+ * dba_bridge_init - Initialize the DBA bridge
+ * @dev:           Pointer to drm device handle
+ * @encoder:       Pointer to drm encoder handle
+ * @data:          Pointer to init data
+ * Returns: pointer of struct drm_bridge
+ */
+struct drm_bridge *dba_bridge_init(struct drm_device *dev,
+				struct drm_encoder *encoder,
+				struct dba_bridge_init *data);
+
+/**
+ * dba_bridge_cleanup - Clean up the DBA bridge
+ * @bridge:           Pointer to DBA bridge handle
+ * Returns: void
+ */
+void dba_bridge_cleanup(struct drm_bridge *bridge);
+
+#endif /* _DBA_BRIDGE_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_catalog.c linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_catalog.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c	2019-01-22 16:16:23.487246262 +0100
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "msm-dsi-catalog:[%s] " fmt, __func__
+#include <linux/errno.h>
+
+#include "dsi_catalog.h"
+
+/**
+ * dsi_catalog_14_init() - catalog init for dsi controller v1.4
+ */
+static void dsi_catalog_14_init(struct dsi_ctrl_hw *ctrl)
+{
+	ctrl->ops.host_setup             = dsi_ctrl_hw_14_host_setup;
+	ctrl->ops.setup_lane_map         = dsi_ctrl_hw_14_setup_lane_map;
+	ctrl->ops.video_engine_en        = dsi_ctrl_hw_14_video_engine_en;
+	ctrl->ops.video_engine_setup     = dsi_ctrl_hw_14_video_engine_setup;
+	ctrl->ops.set_video_timing       = dsi_ctrl_hw_14_set_video_timing;
+	ctrl->ops.cmd_engine_setup       = dsi_ctrl_hw_14_cmd_engine_setup;
+	ctrl->ops.setup_cmd_stream       = dsi_ctrl_hw_14_setup_cmd_stream;
+	ctrl->ops.ctrl_en                = dsi_ctrl_hw_14_ctrl_en;
+	ctrl->ops.cmd_engine_en          = dsi_ctrl_hw_14_cmd_engine_en;
+	ctrl->ops.phy_sw_reset           = dsi_ctrl_hw_14_phy_sw_reset;
+	ctrl->ops.soft_reset             = dsi_ctrl_hw_14_soft_reset;
+	ctrl->ops.kickoff_command        = dsi_ctrl_hw_14_kickoff_command;
+	ctrl->ops.kickoff_fifo_command   = dsi_ctrl_hw_14_kickoff_fifo_command;
+	ctrl->ops.reset_cmd_fifo         = dsi_ctrl_hw_14_reset_cmd_fifo;
+	ctrl->ops.trigger_command_dma    = dsi_ctrl_hw_14_trigger_command_dma;
+	ctrl->ops.ulps_request           = dsi_ctrl_hw_14_ulps_request;
+	ctrl->ops.ulps_exit              = dsi_ctrl_hw_14_ulps_exit;
+	ctrl->ops.clear_ulps_request     = dsi_ctrl_hw_14_clear_ulps_request;
+	ctrl->ops.get_lanes_in_ulps      = dsi_ctrl_hw_14_get_lanes_in_ulps;
+	ctrl->ops.clamp_enable           = dsi_ctrl_hw_14_clamp_enable;
+	ctrl->ops.clamp_disable          = dsi_ctrl_hw_14_clamp_disable;
+	ctrl->ops.get_interrupt_status   = dsi_ctrl_hw_14_get_interrupt_status;
+	ctrl->ops.get_error_status       = dsi_ctrl_hw_14_get_error_status;
+	ctrl->ops.clear_error_status     = dsi_ctrl_hw_14_clear_error_status;
+	ctrl->ops.clear_interrupt_status =
+		dsi_ctrl_hw_14_clear_interrupt_status;
+	ctrl->ops.enable_status_interrupts =
+		dsi_ctrl_hw_14_enable_status_interrupts;
+	ctrl->ops.enable_error_interrupts =
+		dsi_ctrl_hw_14_enable_error_interrupts;
+	ctrl->ops.video_test_pattern_setup =
+		dsi_ctrl_hw_14_video_test_pattern_setup;
+	ctrl->ops.cmd_test_pattern_setup =
+		dsi_ctrl_hw_14_cmd_test_pattern_setup;
+	ctrl->ops.test_pattern_enable    = dsi_ctrl_hw_14_test_pattern_enable;
+	ctrl->ops.trigger_cmd_test_pattern =
+		dsi_ctrl_hw_14_trigger_cmd_test_pattern;
+	ctrl->ops.reg_dump_to_buffer    = dsi_ctrl_hw_14_reg_dump_to_buffer;
+}
+
+/**
+ * dsi_catalog_20_init() - catalog init for dsi controller v2.0
+ */
+static void dsi_catalog_20_init(struct dsi_ctrl_hw *ctrl)
+{
+	set_bit(DSI_CTRL_CPHY, ctrl->feature_map);
+}
+
+/**
+ * dsi_catalog_ctrl_setup() - return catalog info for dsi controller
+ * @ctrl:        Pointer to DSI controller hw object.
+ * @version:     DSI controller version.
+ * @index:       DSI controller instance ID.
+ *
+ * This function setups the catalog information in the dsi_ctrl_hw object.
+ *
+ * return: error code for failure and 0 for success.
+ */
+int dsi_catalog_ctrl_setup(struct dsi_ctrl_hw *ctrl,
+			   enum dsi_ctrl_version version,
+			   u32 index)
+{
+	int rc = 0;
+
+	if (version == DSI_CTRL_VERSION_UNKNOWN ||
+	    version >= DSI_CTRL_VERSION_MAX) {
+		pr_err("Unsupported version: %d\n", version);
+		return -ENOTSUPP;
+	}
+
+	ctrl->index = index;
+	set_bit(DSI_CTRL_VIDEO_TPG, ctrl->feature_map);
+	set_bit(DSI_CTRL_CMD_TPG, ctrl->feature_map);
+	set_bit(DSI_CTRL_VARIABLE_REFRESH_RATE, ctrl->feature_map);
+	set_bit(DSI_CTRL_DYNAMIC_REFRESH, ctrl->feature_map);
+	set_bit(DSI_CTRL_DESKEW_CALIB, ctrl->feature_map);
+	set_bit(DSI_CTRL_DPHY, ctrl->feature_map);
+
+	switch (version) {
+	case DSI_CTRL_VERSION_1_4:
+		dsi_catalog_14_init(ctrl);
+		break;
+	case DSI_CTRL_VERSION_2_0:
+		dsi_catalog_20_init(ctrl);
+		break;
+	default:
+		return -ENOTSUPP;
+	}
+
+	return rc;
+}
+
+/**
+ * dsi_catalog_phy_4_0_init() - catalog init for DSI PHY v4.0
+ */
+static void dsi_catalog_phy_4_0_init(struct dsi_phy_hw *phy)
+{
+	phy->ops.regulator_enable = dsi_phy_hw_v4_0_regulator_enable;
+	phy->ops.regulator_disable = dsi_phy_hw_v4_0_regulator_disable;
+	phy->ops.enable = dsi_phy_hw_v4_0_enable;
+	phy->ops.disable = dsi_phy_hw_v4_0_disable;
+	phy->ops.calculate_timing_params =
+		dsi_phy_hw_v4_0_calculate_timing_params;
+}
+
+/**
+ * dsi_catalog_phy_setup() - return catalog info for dsi phy hardware
+ * @ctrl:        Pointer to DSI PHY hw object.
+ * @version:     DSI PHY version.
+ * @index:       DSI PHY instance ID.
+ *
+ * This function setups the catalog information in the dsi_phy_hw object.
+ *
+ * return: error code for failure and 0 for success.
+ */
+int dsi_catalog_phy_setup(struct dsi_phy_hw *phy,
+			  enum dsi_phy_version version,
+			  u32 index)
+{
+	int rc = 0;
+
+	if (version == DSI_PHY_VERSION_UNKNOWN ||
+	    version >= DSI_PHY_VERSION_MAX) {
+		pr_err("Unsupported version: %d\n", version);
+		return -ENOTSUPP;
+	}
+
+	phy->index = index;
+	set_bit(DSI_PHY_DPHY, phy->feature_map);
+
+	switch (version) {
+	case DSI_PHY_VERSION_4_0:
+		dsi_catalog_phy_4_0_init(phy);
+		break;
+	case DSI_PHY_VERSION_1_0:
+	case DSI_PHY_VERSION_2_0:
+	case DSI_PHY_VERSION_3_0:
+	default:
+		return -ENOTSUPP;
+	}
+
+	return rc;
+}
+
+
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_catalog.h linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_catalog.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h	2019-01-22 16:16:23.487246262 +0100
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_CATALOG_H_
+#define _DSI_CATALOG_H_
+
+#include "dsi_ctrl_hw.h"
+#include "dsi_phy_hw.h"
+
+/**
+ * dsi_catalog_ctrl_setup() - return catalog info for dsi controller
+ * @ctrl:        Pointer to DSI controller hw object.
+ * @version:     DSI controller version.
+ * @index:       DSI controller instance ID.
+ *
+ * This function setups the catalog information in the dsi_ctrl_hw object.
+ *
+ * return: error code for failure and 0 for success.
+ */
+int dsi_catalog_ctrl_setup(struct dsi_ctrl_hw *ctrl,
+			   enum dsi_ctrl_version version,
+			   u32 index);
+
+/**
+ * dsi_catalog_phy_setup() - return catalog info for dsi phy hardware
+ * @ctrl:        Pointer to DSI PHY hw object.
+ * @version:     DSI PHY version.
+ * @index:       DSI PHY instance ID.
+ *
+ * This function setups the catalog information in the dsi_phy_hw object.
+ *
+ * return: error code for failure and 0 for success.
+ */
+int dsi_catalog_phy_setup(struct dsi_phy_hw *phy,
+			  enum dsi_phy_version version,
+			  u32 index);
+
+/* Definitions for 4.0 PHY hardware driver */
+void dsi_phy_hw_v4_0_regulator_enable(struct dsi_phy_hw *phy,
+				      struct dsi_phy_per_lane_cfgs *cfg);
+void dsi_phy_hw_v4_0_regulator_disable(struct dsi_phy_hw *phy);
+void dsi_phy_hw_v4_0_enable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
+void dsi_phy_hw_v4_0_disable(struct dsi_phy_hw *phy);
+int dsi_phy_hw_v4_0_calculate_timing_params(struct dsi_phy_hw *phy,
+					    struct dsi_mode_info *mode,
+					    struct dsi_host_common_cfg *cfg,
+					   struct dsi_phy_per_lane_cfgs
+					   *timing);
+
+/* Definitions for 1.4 controller hardware driver */
+void dsi_ctrl_hw_14_host_setup(struct dsi_ctrl_hw *ctrl,
+			       struct dsi_host_common_cfg *config);
+void dsi_ctrl_hw_14_video_engine_en(struct dsi_ctrl_hw *ctrl, bool on);
+void dsi_ctrl_hw_14_video_engine_setup(struct dsi_ctrl_hw *ctrl,
+				       struct dsi_host_common_cfg *common_cfg,
+				       struct dsi_video_engine_cfg *cfg);
+void dsi_ctrl_hw_14_set_video_timing(struct dsi_ctrl_hw *ctrl,
+			 struct dsi_mode_info *mode);
+
+void dsi_ctrl_hw_14_cmd_engine_setup(struct dsi_ctrl_hw *ctrl,
+				     struct dsi_host_common_cfg *common_cfg,
+				     struct dsi_cmd_engine_cfg *cfg);
+
+void dsi_ctrl_hw_14_ctrl_en(struct dsi_ctrl_hw *ctrl, bool on);
+void dsi_ctrl_hw_14_cmd_engine_en(struct dsi_ctrl_hw *ctrl, bool on);
+
+void dsi_ctrl_hw_14_setup_cmd_stream(struct dsi_ctrl_hw *ctrl,
+				     u32 width_in_pixels,
+				     u32 h_stride,
+				     u32 height_in_lines,
+				     u32 vc_id);
+void dsi_ctrl_hw_14_phy_sw_reset(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_14_soft_reset(struct dsi_ctrl_hw *ctrl);
+
+void dsi_ctrl_hw_14_setup_lane_map(struct dsi_ctrl_hw *ctrl,
+		       struct dsi_lane_mapping *lane_map);
+void dsi_ctrl_hw_14_kickoff_command(struct dsi_ctrl_hw *ctrl,
+			struct dsi_ctrl_cmd_dma_info *cmd,
+			u32 flags);
+
+void dsi_ctrl_hw_14_kickoff_fifo_command(struct dsi_ctrl_hw *ctrl,
+			     struct dsi_ctrl_cmd_dma_fifo_info *cmd,
+			     u32 flags);
+void dsi_ctrl_hw_14_reset_cmd_fifo(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_14_trigger_command_dma(struct dsi_ctrl_hw *ctrl);
+
+void dsi_ctrl_hw_14_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes);
+void dsi_ctrl_hw_14_ulps_exit(struct dsi_ctrl_hw *ctrl, u32 lanes);
+void dsi_ctrl_hw_14_clear_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes);
+u32 dsi_ctrl_hw_14_get_lanes_in_ulps(struct dsi_ctrl_hw *ctrl);
+
+void dsi_ctrl_hw_14_clamp_enable(struct dsi_ctrl_hw *ctrl,
+				 u32 lanes,
+				 bool enable_ulps);
+
+void dsi_ctrl_hw_14_clamp_disable(struct dsi_ctrl_hw *ctrl,
+				  u32 lanes,
+				  bool disable_ulps);
+u32 dsi_ctrl_hw_14_get_interrupt_status(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_14_clear_interrupt_status(struct dsi_ctrl_hw *ctrl, u32 ints);
+void dsi_ctrl_hw_14_enable_status_interrupts(struct dsi_ctrl_hw *ctrl,
+					     u32 ints);
+
+u64 dsi_ctrl_hw_14_get_error_status(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_14_clear_error_status(struct dsi_ctrl_hw *ctrl, u64 errors);
+void dsi_ctrl_hw_14_enable_error_interrupts(struct dsi_ctrl_hw *ctrl,
+					    u64 errors);
+
+void dsi_ctrl_hw_14_video_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
+				 enum dsi_test_pattern type,
+				 u32 init_val);
+void dsi_ctrl_hw_14_cmd_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
+			       enum dsi_test_pattern  type,
+			       u32 init_val,
+			       u32 stream_id);
+void dsi_ctrl_hw_14_test_pattern_enable(struct dsi_ctrl_hw *ctrl, bool enable);
+void dsi_ctrl_hw_14_trigger_cmd_test_pattern(struct dsi_ctrl_hw *ctrl,
+				 u32 stream_id);
+ssize_t dsi_ctrl_hw_14_reg_dump_to_buffer(struct dsi_ctrl_hw *ctrl,
+					  char *buf,
+					  u32 size);
+#endif /* _DSI_CATALOG_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_clk_pwr.c linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_clk_pwr.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_clk_pwr.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_clk_pwr.c	2019-01-22 16:16:23.487246262 +0100
@@ -0,0 +1,727 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/of.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include "dsi_clk_pwr.h"
+
+#define INC_REFCOUNT(s, start_func) \
+	({ \
+		int rc = 0; \
+		if ((s)->refcount == 0) { \
+			rc = start_func(s); \
+			if (rc) \
+				pr_err("failed to enable, rc = %d\n", rc); \
+		} \
+		(s)->refcount++; \
+		rc; \
+	})
+
+#define DEC_REFCOUNT(s, stop_func) \
+	({ \
+		int rc = 0; \
+		if ((s)->refcount == 0) { \
+			pr_err("unbalanced refcount\n"); \
+		} else { \
+			(s)->refcount--; \
+			if ((s)->refcount == 0) { \
+				rc = stop_func(s); \
+				if (rc) \
+					pr_err("disable failed, rc=%d\n", rc); \
+			} \
+		} \
+		rc; \
+	})
+
+static int dsi_core_clk_start(struct dsi_core_clk_info *clks)
+{
+	int rc = 0;
+
+	rc = clk_prepare_enable(clks->mdp_core_clk);
+	if (rc) {
+		pr_err("failed to enable mdp_core_clk, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = clk_prepare_enable(clks->iface_clk);
+	if (rc) {
+		pr_err("failed to enable iface_clk, rc=%d\n", rc);
+		goto error_disable_core_clk;
+	}
+
+	rc = clk_prepare_enable(clks->bus_clk);
+	if (rc) {
+		pr_err("failed to enable bus_clk, rc=%d\n", rc);
+		goto error_disable_iface_clk;
+	}
+
+	rc = clk_prepare_enable(clks->core_mmss_clk);
+	if (rc) {
+		pr_err("failed to enable core_mmss_clk, rc=%d\n", rc);
+		goto error_disable_bus_clk;
+	}
+
+	return rc;
+
+error_disable_bus_clk:
+	clk_disable_unprepare(clks->bus_clk);
+error_disable_iface_clk:
+	clk_disable_unprepare(clks->iface_clk);
+error_disable_core_clk:
+	clk_disable_unprepare(clks->mdp_core_clk);
+error:
+	return rc;
+}
+
+static int dsi_core_clk_stop(struct dsi_core_clk_info *clks)
+{
+	clk_disable_unprepare(clks->core_mmss_clk);
+	clk_disable_unprepare(clks->bus_clk);
+	clk_disable_unprepare(clks->iface_clk);
+	clk_disable_unprepare(clks->mdp_core_clk);
+
+	return 0;
+}
+
+static int dsi_link_clk_set_rate(struct dsi_link_clk_info *l_clks)
+{
+	int rc = 0;
+
+	rc = clk_set_rate(l_clks->esc_clk, l_clks->esc_clk_rate);
+	if (rc) {
+		pr_err("clk_set_rate failed for esc_clk rc = %d\n", rc);
+		goto error;
+	}
+
+	rc = clk_set_rate(l_clks->byte_clk, l_clks->byte_clk_rate);
+	if (rc) {
+		pr_err("clk_set_rate failed for byte_clk rc = %d\n", rc);
+		goto error;
+	}
+
+	rc = clk_set_rate(l_clks->pixel_clk, l_clks->pixel_clk_rate);
+	if (rc) {
+		pr_err("clk_set_rate failed for pixel_clk rc = %d\n", rc);
+		goto error;
+	}
+error:
+	return rc;
+}
+
+static int dsi_link_clk_prepare(struct dsi_link_clk_info *l_clks)
+{
+	int rc = 0;
+
+	rc = clk_prepare(l_clks->esc_clk);
+	if (rc) {
+		pr_err("Failed to prepare dsi esc clk, rc=%d\n", rc);
+		goto esc_clk_err;
+	}
+
+	rc = clk_prepare(l_clks->byte_clk);
+	if (rc) {
+		pr_err("Failed to prepare dsi byte clk, rc=%d\n", rc);
+		goto byte_clk_err;
+	}
+
+	rc = clk_prepare(l_clks->pixel_clk);
+	if (rc) {
+		pr_err("Failed to prepare dsi pixel clk, rc=%d\n", rc);
+		goto pixel_clk_err;
+	}
+
+	return rc;
+
+pixel_clk_err:
+	clk_unprepare(l_clks->byte_clk);
+byte_clk_err:
+	clk_unprepare(l_clks->esc_clk);
+esc_clk_err:
+	return rc;
+}
+
+static void dsi_link_clk_unprepare(struct dsi_link_clk_info *l_clks)
+{
+	clk_unprepare(l_clks->pixel_clk);
+	clk_unprepare(l_clks->byte_clk);
+	clk_unprepare(l_clks->esc_clk);
+}
+
+static int dsi_link_clk_enable(struct dsi_link_clk_info *l_clks)
+{
+	int rc = 0;
+
+	rc = clk_enable(l_clks->esc_clk);
+	if (rc) {
+		pr_err("Failed to enable dsi esc clk, rc=%d\n", rc);
+		goto esc_clk_err;
+	}
+
+	rc = clk_enable(l_clks->byte_clk);
+	if (rc) {
+		pr_err("Failed to enable dsi byte clk, rc=%d\n", rc);
+		goto byte_clk_err;
+	}
+
+	rc = clk_enable(l_clks->pixel_clk);
+	if (rc) {
+		pr_err("Failed to enable dsi pixel clk, rc=%d\n", rc);
+		goto pixel_clk_err;
+	}
+
+	return rc;
+
+pixel_clk_err:
+	clk_disable(l_clks->byte_clk);
+byte_clk_err:
+	clk_disable(l_clks->esc_clk);
+esc_clk_err:
+	return rc;
+}
+
+static void dsi_link_clk_disable(struct dsi_link_clk_info *l_clks)
+{
+	clk_disable(l_clks->esc_clk);
+	clk_disable(l_clks->pixel_clk);
+	clk_disable(l_clks->byte_clk);
+}
+
+/**
+ * dsi_link_clk_start() - enable dsi link clocks
+ */
+static int dsi_link_clk_start(struct dsi_link_clk_info *clks)
+{
+	int rc = 0;
+
+	if (clks->set_new_rate) {
+		rc = dsi_link_clk_set_rate(clks);
+		if (rc) {
+			pr_err("failed to set clk rates, rc = %d\n", rc);
+			goto error;
+		} else {
+			clks->set_new_rate = false;
+		}
+	}
+
+	rc = dsi_link_clk_prepare(clks);
+	if (rc) {
+		pr_err("failed to prepare link clks, rc = %d\n", rc);
+		goto error;
+	}
+
+	rc = dsi_link_clk_enable(clks);
+	if (rc) {
+		pr_err("failed to enable link clks, rc = %d\n", rc);
+		goto error_unprepare;
+	}
+
+	pr_debug("Link clocks are enabled\n");
+	return rc;
+error_unprepare:
+	dsi_link_clk_unprepare(clks);
+error:
+	return rc;
+}
+
+/**
+ * dsi_link_clk_stop() - Stop DSI link clocks.
+ */
+static int dsi_link_clk_stop(struct dsi_link_clk_info *clks)
+{
+	dsi_link_clk_disable(clks);
+	dsi_link_clk_unprepare(clks);
+
+	pr_debug("Link clocks disabled\n");
+
+	return 0;
+}
+
+/*
+ * dsi_pwr_parse_supply_node() - parse power supply node from root device node
+ */
+static int dsi_pwr_parse_supply_node(struct device_node *root,
+				     struct dsi_regulator_info *regs)
+{
+	int rc = 0;
+	int i = 0;
+	u32 tmp = 0;
+	struct device_node *node = NULL;
+
+	for_each_child_of_node(root, node) {
+		const char *st = NULL;
+
+		rc = of_property_read_string(node, "qcom,supply-name", &st);
+		if (rc) {
+			pr_err("failed to read name, rc = %d\n", rc);
+			goto error;
+		}
+
+		snprintf(regs->vregs[i].vreg_name,
+			 ARRAY_SIZE(regs->vregs[i].vreg_name),
+			 "%s", st);
+
+		rc = of_property_read_u32(node, "qcom,supply-min-voltage",
+					  &tmp);
+		if (rc) {
+			pr_err("failed to read min voltage, rc = %d\n", rc);
+			goto error;
+		}
+		regs->vregs[i].min_voltage = tmp;
+
+		rc = of_property_read_u32(node, "qcom,supply-max-voltage",
+					  &tmp);
+		if (rc) {
+			pr_err("failed to read max voltage, rc = %d\n", rc);
+			goto error;
+		}
+		regs->vregs[i].max_voltage = tmp;
+
+		rc = of_property_read_u32(node, "qcom,supply-enable-load",
+					  &tmp);
+		if (rc) {
+			pr_err("failed to read enable load, rc = %d\n", rc);
+			goto error;
+		}
+		regs->vregs[i].enable_load = tmp;
+
+		rc = of_property_read_u32(node, "qcom,supply-disable-load",
+					  &tmp);
+		if (rc) {
+			pr_err("failed to read disable load, rc = %d\n", rc);
+			goto error;
+		}
+		regs->vregs[i].disable_load = tmp;
+
+		/* Optional values */
+		rc = of_property_read_u32(node, "qcom,supply-pre-on-sleep",
+					  &tmp);
+		if (rc) {
+			pr_debug("pre-on-sleep not specified\n");
+			rc = 0;
+		} else {
+			regs->vregs[i].pre_on_sleep = tmp;
+		}
+
+		rc = of_property_read_u32(node, "qcom,supply-pre-off-sleep",
+					  &tmp);
+		if (rc) {
+			pr_debug("pre-off-sleep not specified\n");
+			rc = 0;
+		} else {
+			regs->vregs[i].pre_off_sleep = tmp;
+		}
+
+		rc = of_property_read_u32(node, "qcom,supply-post-on-sleep",
+					  &tmp);
+		if (rc) {
+			pr_debug("post-on-sleep not specified\n");
+			rc = 0;
+		} else {
+			regs->vregs[i].post_on_sleep = tmp;
+		}
+
+		rc = of_property_read_u32(node, "qcom,supply-post-off-sleep",
+					  &tmp);
+		if (rc) {
+			pr_debug("post-off-sleep not specified\n");
+			rc = 0;
+		} else {
+			regs->vregs[i].post_off_sleep = tmp;
+		}
+
+		++i;
+		pr_debug("[%s] minv=%d maxv=%d, en_load=%d, dis_load=%d\n",
+			 regs->vregs[i].vreg_name,
+			 regs->vregs[i].min_voltage,
+			 regs->vregs[i].max_voltage,
+			 regs->vregs[i].enable_load,
+			 regs->vregs[i].disable_load);
+	}
+
+error:
+	return rc;
+}
+
+/**
+ * dsi_pwr_enable_vregs() - enable/disable regulators
+ */
+static int dsi_pwr_enable_vregs(struct dsi_regulator_info *regs, bool enable)
+{
+	int rc = 0, i = 0;
+	struct dsi_vreg *vreg;
+	int num_of_v = 0;
+
+	if (enable) {
+		for (i = 0; i < regs->count; i++) {
+			vreg = &regs->vregs[i];
+			if (vreg->pre_on_sleep)
+				msleep(vreg->pre_on_sleep);
+
+			rc = regulator_set_load(vreg->vreg,
+						vreg->enable_load);
+			if (rc < 0) {
+				pr_err("Setting optimum mode failed for %s\n",
+				       vreg->vreg_name);
+				goto error;
+			}
+			num_of_v = regulator_count_voltages(vreg->vreg);
+			if (num_of_v > 0) {
+				rc = regulator_set_voltage(vreg->vreg,
+							   vreg->min_voltage,
+							   vreg->max_voltage);
+				if (rc) {
+					pr_err("Set voltage(%s) fail, rc=%d\n",
+						 vreg->vreg_name, rc);
+					goto error_disable_opt_mode;
+				}
+			}
+
+			rc = regulator_enable(vreg->vreg);
+			if (rc) {
+				pr_err("enable failed for %s, rc=%d\n",
+				       vreg->vreg_name, rc);
+				goto error_disable_voltage;
+			}
+
+			if (vreg->post_on_sleep)
+				msleep(vreg->post_on_sleep);
+		}
+	} else {
+		for (i = (regs->count - 1); i >= 0; i--) {
+			if (regs->vregs[i].pre_off_sleep)
+				msleep(regs->vregs[i].pre_off_sleep);
+
+			(void)regulator_set_load(regs->vregs[i].vreg,
+						regs->vregs[i].disable_load);
+			(void)regulator_disable(regs->vregs[i].vreg);
+
+			if (regs->vregs[i].post_off_sleep)
+				msleep(regs->vregs[i].post_off_sleep);
+		}
+	}
+
+	return 0;
+error_disable_opt_mode:
+	(void)regulator_set_load(regs->vregs[i].vreg,
+				 regs->vregs[i].disable_load);
+
+error_disable_voltage:
+	if (num_of_v > 0)
+		(void)regulator_set_voltage(regs->vregs[i].vreg,
+					    0, regs->vregs[i].max_voltage);
+error:
+	for (i--; i >= 0; i--) {
+		if (regs->vregs[i].pre_off_sleep)
+			msleep(regs->vregs[i].pre_off_sleep);
+
+		(void)regulator_set_load(regs->vregs[i].vreg,
+					 regs->vregs[i].disable_load);
+
+		num_of_v = regulator_count_voltages(regs->vregs[i].vreg);
+		if (num_of_v > 0)
+			(void)regulator_set_voltage(regs->vregs[i].vreg,
+					    0, regs->vregs[i].max_voltage);
+
+		(void)regulator_disable(regs->vregs[i].vreg);
+
+		if (regs->vregs[i].post_off_sleep)
+			msleep(regs->vregs[i].post_off_sleep);
+	}
+
+	return rc;
+}
+
+/**
+* dsi_clk_pwr_of_get_vreg_data - Parse regulator supply information
+* @of_node:        Device of node to parse for supply information.
+* @regs:           Pointer where regulator information will be copied to.
+* @supply_name:    Name of the supply node.
+*
+* return: error code in case of failure or 0 for success.
+*/
+int dsi_clk_pwr_of_get_vreg_data(struct device_node *of_node,
+				 struct dsi_regulator_info *regs,
+				 char *supply_name)
+{
+	int rc = 0;
+	struct device_node *supply_root_node = NULL;
+
+	if (!of_node || !regs) {
+		pr_err("Bad params\n");
+		return -EINVAL;
+	}
+
+	regs->count = 0;
+	supply_root_node = of_get_child_by_name(of_node, supply_name);
+	if (!supply_root_node) {
+		supply_root_node = of_parse_phandle(of_node, supply_name, 0);
+		if (!supply_root_node) {
+			pr_err("No supply entry present for %s\n", supply_name);
+			return -EINVAL;
+		}
+	}
+
+	regs->count = of_get_available_child_count(supply_root_node);
+	if (regs->count == 0) {
+		pr_err("No vregs defined for %s\n", supply_name);
+		return -EINVAL;
+	}
+
+	regs->vregs = kcalloc(regs->count, sizeof(*regs->vregs), GFP_KERNEL);
+	if (!regs->vregs) {
+		regs->count = 0;
+		return -ENOMEM;
+	}
+
+	rc = dsi_pwr_parse_supply_node(supply_root_node, regs);
+	if (rc) {
+		pr_err("failed to parse supply node for %s, rc = %d\n",
+			supply_name, rc);
+
+		kfree(regs->vregs);
+		regs->vregs = NULL;
+		regs->count = 0;
+	}
+
+	return rc;
+}
+
+/**
+ * dsi_clk_pwr_get_dt_vreg_data - parse regulator supply information
+ * @dev:            Device whose of_node needs to be parsed.
+ * @regs:           Pointer where regulator information will be copied to.
+ * @supply_name:    Name of the supply node.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_pwr_get_dt_vreg_data(struct device *dev,
+				 struct dsi_regulator_info *regs,
+				 char *supply_name)
+{
+	int rc = 0;
+	struct device_node *of_node = NULL;
+	struct device_node *supply_node = NULL;
+	struct device_node *supply_root_node = NULL;
+
+	if (!dev || !regs) {
+		pr_err("Bad params\n");
+		return -EINVAL;
+	}
+
+	of_node = dev->of_node;
+	regs->count = 0;
+	supply_root_node = of_get_child_by_name(of_node, supply_name);
+	if (!supply_root_node) {
+		supply_root_node = of_parse_phandle(of_node, supply_name, 0);
+		if (!supply_root_node) {
+			pr_err("No supply entry present for %s\n", supply_name);
+			return -EINVAL;
+		}
+	}
+
+	for_each_child_of_node(supply_root_node, supply_node)
+		regs->count++;
+
+	if (regs->count == 0) {
+		pr_err("No vregs defined for %s\n", supply_name);
+		return -EINVAL;
+	}
+
+	regs->vregs = devm_kcalloc(dev, regs->count, sizeof(*regs->vregs),
+				   GFP_KERNEL);
+	if (!regs->vregs) {
+		regs->count = 0;
+		return -ENOMEM;
+	}
+
+	rc = dsi_pwr_parse_supply_node(supply_root_node, regs);
+	if (rc) {
+		pr_err("failed to parse supply node for %s, rc = %d\n",
+		       supply_name, rc);
+		devm_kfree(dev, regs->vregs);
+		regs->vregs = NULL;
+		regs->count = 0;
+	}
+
+	return rc;
+}
+
+/**
+ * dsi_pwr_enable_regulator() - enable a set of regulators
+ * @regs:       Pointer to set of regulators to enable or disable.
+ * @enable:     Enable/Disable regulators.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_pwr_enable_regulator(struct dsi_regulator_info *regs, bool enable)
+{
+	int rc = 0;
+
+	if (enable) {
+		if (regs->refcount == 0) {
+			rc = dsi_pwr_enable_vregs(regs, true);
+			if (rc)
+				pr_err("failed to enable regulators\n");
+		}
+		regs->refcount++;
+	} else {
+		if (regs->refcount == 0) {
+			pr_err("Unbalanced regulator off\n");
+		} else {
+			regs->refcount--;
+			if (regs->refcount == 0) {
+				rc = dsi_pwr_enable_vregs(regs, false);
+				if (rc)
+					pr_err("failed to disable vregs\n");
+			}
+		}
+	}
+
+	return rc;
+}
+
+/**
+ * dsi_clk_enable_core_clks() - enable DSI core clocks
+ * @clks:      DSI core clock information.
+ * @enable:    enable/disable DSI core clocks.
+ *
+ * A ref count is maintained, so caller should make sure disable and enable
+ * calls are balanced.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_enable_core_clks(struct dsi_core_clk_info *clks, bool enable)
+{
+	int rc = 0;
+
+	if (enable)
+		rc = INC_REFCOUNT(clks, dsi_core_clk_start);
+	else
+		rc = DEC_REFCOUNT(clks, dsi_core_clk_stop);
+
+	return rc;
+}
+
+/**
+ * dsi_clk_enable_link_clks() - enable DSI link clocks
+ * @clks:      DSI link clock information.
+ * @enable:    enable/disable DSI link clocks.
+ *
+ * A ref count is maintained, so caller should make sure disable and enable
+ * calls are balanced.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_enable_link_clks(struct dsi_link_clk_info *clks, bool enable)
+{
+	int rc = 0;
+
+	if (enable)
+		rc = INC_REFCOUNT(clks, dsi_link_clk_start);
+	else
+		rc = DEC_REFCOUNT(clks, dsi_link_clk_stop);
+
+	return rc;
+}
+
+/**
+ * dsi_clk_set_link_frequencies() - set frequencies for link clks
+ * @clks:         Link clock information
+ * @pixel_clk:    pixel clock frequency in KHz.
+ * @byte_clk:     Byte clock frequency in KHz.
+ * @esc_clk:      Escape clock frequency in KHz.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_set_link_frequencies(struct dsi_link_clk_info *clks,
+				 u64 pixel_clk,
+				 u64 byte_clk,
+				 u64 esc_clk)
+{
+	int rc = 0;
+
+	clks->pixel_clk_rate = pixel_clk;
+	clks->byte_clk_rate = byte_clk;
+	clks->esc_clk_rate = esc_clk;
+	clks->set_new_rate = true;
+
+	return rc;
+}
+
+/**
+ * dsi_clk_set_pixel_clk_rate() - set frequency for pixel clock
+ * @clks:      DSI link clock information.
+ * @pixel_clk: Pixel clock rate in KHz.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_set_pixel_clk_rate(struct dsi_link_clk_info *clks, u64 pixel_clk)
+{
+	int rc = 0;
+
+	rc = clk_set_rate(clks->pixel_clk, pixel_clk);
+	if (rc)
+		pr_err("failed to set clk rate for pixel clk, rc=%d\n", rc);
+	else
+		clks->pixel_clk_rate = pixel_clk;
+
+	return rc;
+}
+
+/**
+ * dsi_clk_set_byte_clk_rate() - set frequency for byte clock
+ * @clks:      DSI link clock information.
+ * @byte_clk: Byte clock rate in KHz.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_set_byte_clk_rate(struct dsi_link_clk_info *clks, u64 byte_clk)
+{
+	int rc = 0;
+
+	rc = clk_set_rate(clks->byte_clk, byte_clk);
+	if (rc)
+		pr_err("failed to set clk rate for byte clk, rc=%d\n", rc);
+	else
+		clks->byte_clk_rate = byte_clk;
+
+	return rc;
+}
+
+/**
+ * dsi_clk_update_parent() - update parent clocks for specified clock
+ * @parent:       link clock pair which are set as parent.
+ * @child:        link clock pair whose parent has to be set.
+ */
+int dsi_clk_update_parent(struct dsi_clk_link_set *parent,
+			  struct dsi_clk_link_set *child)
+{
+	int rc = 0;
+
+	rc = clk_set_parent(child->byte_clk, parent->byte_clk);
+	if (rc) {
+		pr_err("failed to set byte clk parent\n");
+		goto error;
+	}
+
+	rc = clk_set_parent(child->pixel_clk, parent->pixel_clk);
+	if (rc) {
+		pr_err("failed to set pixel clk parent\n");
+		goto error;
+	}
+error:
+	return rc;
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_clk_pwr.h linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_clk_pwr.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_clk_pwr.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_clk_pwr.h	2019-01-22 16:16:23.487246262 +0100
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_CLK_PWR_H_
+#define _DSI_CLK_PWR_H_
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+
+/**
+ * struct dsi_vreg - regulator information for DSI regulators
+ * @vreg:            Handle to the regulator.
+ * @vreg_name:       Regulator name.
+ * @min_voltage:     Minimum voltage in uV.
+ * @max_voltage:     Maximum voltage in uV.
+ * @enable_load:     Load, in uA, when enabled.
+ * @disable_load:    Load, in uA, when disabled.
+ * @pre_on_sleep:    Sleep, in ms, before enabling the regulator.
+ * @post_on_sleep:   Sleep, in ms, after enabling the regulator.
+ * @pre_off_sleep:   Sleep, in ms, before disabling the regulator.
+ * @post_off_sleep:  Sleep, in ms, after disabling the regulator.
+ */
+struct dsi_vreg {
+	struct regulator *vreg;
+	char vreg_name[32];
+	u32 min_voltage;
+	u32 max_voltage;
+	u32 enable_load;
+	u32 disable_load;
+	u32 pre_on_sleep;
+	u32 post_on_sleep;
+	u32 pre_off_sleep;
+	u32 post_off_sleep;
+};
+
+/**
+ * struct dsi_regulator_info - set of vregs that are turned on/off together.
+ * @vregs:       Array of dsi_vreg structures.
+ * @count:       Number of vregs.
+ * @refcount:    Reference counting for enabling.
+ */
+struct dsi_regulator_info {
+	struct dsi_vreg *vregs;
+	u32 count;
+	u32 refcount;
+};
+
+/**
+ * struct dsi_core_clk_info - Core clock information for DSI hardware
+ * @mdp_core_clk:        Handle to MDP core clock.
+ * @iface_clk:           Handle to MDP interface clock.
+ * @core_mmss_clk:       Handle to MMSS core clock.
+ * @bus_clk:             Handle to bus clock.
+ * @refcount:            Reference count for core clocks.
+ * @clk_state:           Current clock state.
+ */
+struct dsi_core_clk_info {
+	struct clk *mdp_core_clk;
+	struct clk *iface_clk;
+	struct clk *core_mmss_clk;
+	struct clk *bus_clk;
+
+	u32 refcount;
+	u32 clk_state;
+};
+
+/**
+ * struct dsi_link_clk_info - Link clock information for DSI hardware.
+ * @byte_clk:        Handle to DSI byte clock.
+ * @byte_clk_rate:   Frequency of DSI byte clock in KHz.
+ * @pixel_clk:       Handle to DSI pixel clock.
+ * @pixel_clk_rate:  Frequency of DSI pixel clock in KHz.
+ * @esc_clk:         Handle to DSI escape clock.
+ * @esc_clk_rate:    Frequency of DSI escape clock in KHz.
+ * @refcount:        Reference count for link clocks.
+ * @clk_state:       Current clock state.
+ * @set_new_rate:    private flag used by clock utility.
+ */
+struct dsi_link_clk_info {
+	struct clk *byte_clk;
+	u64 byte_clk_rate;
+
+	struct clk *pixel_clk;
+	u64 pixel_clk_rate;
+
+	struct clk *esc_clk;
+	u64 esc_clk_rate;
+
+	u32 refcount;
+	u32 clk_state;
+	bool set_new_rate;
+};
+
+/**
+ * struct dsi_clk_link_set - Pair of clock handles to describe link clocks
+ * @byte_clk:     Handle to DSi byte clock.
+ * @pixel_clk:    Handle to DSI pixel clock.
+ */
+struct dsi_clk_link_set {
+	struct clk *byte_clk;
+	struct clk *pixel_clk;
+};
+
+/**
+ * dsi_clk_pwr_of_get_vreg_data - parse regulator supply information
+ * @of_node:        Device of node to parse for supply information.
+ * @regs:           Pointer where regulator information will be copied to.
+ * @supply_name:    Name of the supply node.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_pwr_of_get_vreg_data(struct device_node *of_node,
+				 struct dsi_regulator_info *regs,
+				 char *supply_name);
+
+/**
+ * dsi_clk_pwr_get_dt_vreg_data - parse regulator supply information
+ * @dev:            Device whose of_node needs to be parsed.
+ * @regs:           Pointer where regulator information will be copied to.
+ * @supply_name:    Name of the supply node.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_pwr_get_dt_vreg_data(struct device *dev,
+				 struct dsi_regulator_info *regs,
+				 char *supply_name);
+
+/**
+ * dsi_pwr_enable_regulator() - enable a set of regulators
+ * @regs:       Pointer to set of regulators to enable or disable.
+ * @enable:     Enable/Disable regulators.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_pwr_enable_regulator(struct dsi_regulator_info *regs, bool enable);
+
+/**
+ * dsi_clk_enable_core_clks() - enable DSI core clocks
+ * @clks:      DSI core clock information.
+ * @enable:    enable/disable DSI core clocks.
+ *
+ * A ref count is maintained, so caller should make sure disable and enable
+ * calls are balanced.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_enable_core_clks(struct dsi_core_clk_info *clks, bool enable);
+
+/**
+ * dsi_clk_enable_link_clks() - enable DSI link clocks
+ * @clks:      DSI link clock information.
+ * @enable:    enable/disable DSI link clocks.
+ *
+ * A ref count is maintained, so caller should make sure disable and enable
+ * calls are balanced.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_enable_link_clks(struct dsi_link_clk_info *clks, bool enable);
+
+/**
+ * dsi_clk_set_link_frequencies() - set frequencies for link clks
+ * @clks:         Link clock information
+ * @pixel_clk:    pixel clock frequency in KHz.
+ * @byte_clk:     Byte clock frequency in KHz.
+ * @esc_clk:      Escape clock frequency in KHz.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_set_link_frequencies(struct dsi_link_clk_info *clks,
+				 u64 pixel_clk,
+				 u64 byte_clk,
+				 u64 esc_clk);
+
+/**
+ * dsi_clk_set_pixel_clk_rate() - set frequency for pixel clock
+ * @clks:      DSI link clock information.
+ * @pixel_clk: Pixel clock rate in KHz.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_set_pixel_clk_rate(struct dsi_link_clk_info *clks, u64 pixel_clk);
+
+/**
+ * dsi_clk_set_byte_clk_rate() - set frequency for byte clock
+ * @clks:      DSI link clock information.
+ * @byte_clk: Byte clock rate in KHz.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_set_byte_clk_rate(struct dsi_link_clk_info *clks, u64 byte_clk);
+
+/**
+ * dsi_clk_update_parent() - update parent clocks for specified clock
+ * @parent:       link clock pair which are set as parent.
+ * @child:        link clock pair whose parent has to be set.
+ */
+int dsi_clk_update_parent(struct dsi_clk_link_set *parent,
+			  struct dsi_clk_link_set *child);
+#endif /* _DSI_CLK_PWR_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_ctrl.c linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_ctrl.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c	2019-10-29 09:26:23.621202963 +0100
@@ -0,0 +1,2302 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"dsi-ctrl:[%s] " fmt, __func__
+
+#include <linux/of_device.h>
+#include <linux/err.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#include <linux/msm-bus.h>
+#include <linux/of_irq.h>
+#include <video/mipi_display.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "msm_gpu.h"
+#include "dsi_ctrl.h"
+#include "dsi_ctrl_hw.h"
+#include "dsi_clk_pwr.h"
+#include "dsi_catalog.h"
+
+#define DSI_CTRL_DEFAULT_LABEL "MDSS DSI CTRL"
+
+#define DSI_CTRL_TX_TO_MS     200
+
+#define TO_ON_OFF(x) ((x) ? "ON" : "OFF")
+/**
+ * enum dsi_ctrl_driver_ops - controller driver ops
+ */
+enum dsi_ctrl_driver_ops {
+	DSI_CTRL_OP_POWER_STATE_CHANGE,
+	DSI_CTRL_OP_CMD_ENGINE,
+	DSI_CTRL_OP_VID_ENGINE,
+	DSI_CTRL_OP_HOST_ENGINE,
+	DSI_CTRL_OP_CMD_TX,
+	DSI_CTRL_OP_ULPS_TOGGLE,
+	DSI_CTRL_OP_CLAMP_TOGGLE,
+	DSI_CTRL_OP_SET_CLK_SOURCE,
+	DSI_CTRL_OP_HOST_INIT,
+	DSI_CTRL_OP_TPG,
+	DSI_CTRL_OP_PHY_SW_RESET,
+	DSI_CTRL_OP_ASYNC_TIMING,
+	DSI_CTRL_OP_MAX
+};
+
+struct dsi_ctrl_list_item {
+	struct dsi_ctrl *ctrl;
+	struct list_head list;
+};
+
+static LIST_HEAD(dsi_ctrl_list);
+static DEFINE_MUTEX(dsi_ctrl_list_lock);
+
+static const enum dsi_ctrl_version dsi_ctrl_v1_4 = DSI_CTRL_VERSION_1_4;
+static const enum dsi_ctrl_version dsi_ctrl_v2_0 = DSI_CTRL_VERSION_2_0;
+
+static const struct of_device_id msm_dsi_of_match[] = {
+	{
+		.compatible = "qcom,dsi-ctrl-hw-v1.4",
+		.data = &dsi_ctrl_v1_4,
+	},
+	{
+		.compatible = "qcom,dsi-ctrl-hw-v2.0",
+		.data = &dsi_ctrl_v2_0,
+	},
+	{}
+};
+
+static ssize_t debugfs_state_info_read(struct file *file,
+				       char __user *buff,
+				       size_t count,
+				       loff_t *ppos)
+{
+	struct dsi_ctrl *dsi_ctrl = file->private_data;
+	char *buf;
+	u32 len = 0;
+
+	if (!dsi_ctrl)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0;
+
+	buf = kzalloc(SZ_4K, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	/* Dump current state */
+	len += snprintf((buf + len), (SZ_4K - len), "Current State:\n");
+	len += snprintf((buf + len), (SZ_4K - len),
+			"\tPOWER_STATUS = %s\n\tCORE_CLOCK = %s\n",
+			TO_ON_OFF(dsi_ctrl->current_state.pwr_enabled),
+			TO_ON_OFF(dsi_ctrl->current_state.core_clk_enabled));
+	len += snprintf((buf + len), (SZ_4K - len),
+			"\tLINK_CLOCK = %s\n\tULPS_STATUS = %s\n",
+			TO_ON_OFF(dsi_ctrl->current_state.link_clk_enabled),
+			TO_ON_OFF(dsi_ctrl->current_state.ulps_enabled));
+	len += snprintf((buf + len), (SZ_4K - len),
+			"\tCLAMP_STATUS = %s\n\tCTRL_ENGINE = %s\n",
+			TO_ON_OFF(dsi_ctrl->current_state.clamp_enabled),
+			TO_ON_OFF(dsi_ctrl->current_state.controller_state));
+	len += snprintf((buf + len), (SZ_4K - len),
+			"\tVIDEO_ENGINE = %s\n\tCOMMAND_ENGINE = %s\n",
+			TO_ON_OFF(dsi_ctrl->current_state.vid_engine_state),
+			TO_ON_OFF(dsi_ctrl->current_state.cmd_engine_state));
+
+	/* Dump clock information */
+	len += snprintf((buf + len), (SZ_4K - len), "\nClock Info:\n");
+	len += snprintf((buf + len), (SZ_4K - len),
+			"\tBYTE_CLK = %llu, PIXEL_CLK = %llu, ESC_CLK = %llu\n",
+			dsi_ctrl->clk_info.link_clks.byte_clk_rate,
+			dsi_ctrl->clk_info.link_clks.pixel_clk_rate,
+			dsi_ctrl->clk_info.link_clks.esc_clk_rate);
+
+	/* TODO: make sure that this does not exceed 4K */
+	if (copy_to_user(buff, buf, len)) {
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	*ppos += len;
+	kfree(buf);
+	return len;
+}
+
+static ssize_t debugfs_reg_dump_read(struct file *file,
+				     char __user *buff,
+				     size_t count,
+				     loff_t *ppos)
+{
+	struct dsi_ctrl *dsi_ctrl = file->private_data;
+	char *buf;
+	u32 len = 0;
+
+	if (!dsi_ctrl)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0;
+
+	buf = kzalloc(SZ_4K, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	if (dsi_ctrl->current_state.core_clk_enabled) {
+		len = dsi_ctrl->hw.ops.reg_dump_to_buffer(&dsi_ctrl->hw,
+							  buf,
+							  SZ_4K);
+	} else {
+		len = snprintf((buf + len), (SZ_4K - len),
+			       "Core clocks are not turned on, cannot read\n");
+	}
+
+	/* TODO: make sure that this does not exceed 4K */
+	if (copy_to_user(buff, buf, len)) {
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	*ppos += len;
+	kfree(buf);
+	return len;
+}
+
+static const struct file_operations state_info_fops = {
+	.open = simple_open,
+	.read = debugfs_state_info_read,
+};
+
+static const struct file_operations reg_dump_fops = {
+	.open = simple_open,
+	.read = debugfs_reg_dump_read,
+};
+
+static int dsi_ctrl_debugfs_init(struct dsi_ctrl *dsi_ctrl,
+				 struct dentry *parent)
+{
+	int rc = 0;
+	struct dentry *dir, *state_file, *reg_dump;
+
+	dir = debugfs_create_dir(dsi_ctrl->name, parent);
+	if (IS_ERR_OR_NULL(dir)) {
+		rc = PTR_ERR(dir);
+		pr_err("[DSI_%d] debugfs create dir failed, rc=%d\n",
+		       dsi_ctrl->index, rc);
+		goto error;
+	}
+
+	state_file = debugfs_create_file("state_info",
+					 0444,
+					 dir,
+					 dsi_ctrl,
+					 &state_info_fops);
+	if (IS_ERR_OR_NULL(state_file)) {
+		rc = PTR_ERR(state_file);
+		pr_err("[DSI_%d] state file failed, rc=%d\n",
+		       dsi_ctrl->index, rc);
+		goto error_remove_dir;
+	}
+
+	reg_dump = debugfs_create_file("reg_dump",
+				       0444,
+				       dir,
+				       dsi_ctrl,
+				       &reg_dump_fops);
+	if (IS_ERR_OR_NULL(reg_dump)) {
+		rc = PTR_ERR(reg_dump);
+		pr_err("[DSI_%d] reg dump file failed, rc=%d\n",
+		       dsi_ctrl->index, rc);
+		goto error_remove_dir;
+	}
+
+	dsi_ctrl->debugfs_root = dir;
+error_remove_dir:
+	debugfs_remove(dir);
+error:
+	return rc;
+}
+
+static int dsi_ctrl_debugfs_deinit(struct dsi_ctrl *dsi_ctrl)
+{
+	debugfs_remove(dsi_ctrl->debugfs_root);
+	return 0;
+}
+
+static int dsi_ctrl_check_state(struct dsi_ctrl *dsi_ctrl,
+				enum dsi_ctrl_driver_ops op,
+				u32 op_state)
+{
+	int rc = 0;
+	struct dsi_ctrl_state_info *state = &dsi_ctrl->current_state;
+
+	switch (op) {
+	case DSI_CTRL_OP_POWER_STATE_CHANGE:
+		if (state->power_state == op_state) {
+			pr_debug("[%d] No change in state, pwr_state=%d\n",
+			       dsi_ctrl->index, op_state);
+			rc = -EINVAL;
+		} else if (state->power_state == DSI_CTRL_POWER_LINK_CLK_ON) {
+			if ((state->cmd_engine_state == DSI_CTRL_ENGINE_ON) ||
+			    (state->vid_engine_state == DSI_CTRL_ENGINE_ON) ||
+			    (state->controller_state == DSI_CTRL_ENGINE_ON)) {
+				pr_debug("[%d]State error: op=%d: %d, %d, %d\n",
+				       dsi_ctrl->index,
+				       op_state,
+				       state->cmd_engine_state,
+				       state->vid_engine_state,
+				       state->controller_state);
+				rc = -EINVAL;
+			}
+		}
+		break;
+	case DSI_CTRL_OP_CMD_ENGINE:
+		if (state->cmd_engine_state == op_state) {
+			pr_debug("[%d] No change in state, cmd_state=%d\n",
+			       dsi_ctrl->index, op_state);
+			rc = -EINVAL;
+		} else if ((state->power_state != DSI_CTRL_POWER_LINK_CLK_ON) ||
+			   (state->controller_state != DSI_CTRL_ENGINE_ON)) {
+			pr_debug("[%d]State error: op=%d: %d, %d\n",
+			       dsi_ctrl->index,
+			       op,
+			       state->power_state,
+			       state->controller_state);
+			rc = -EINVAL;
+		}
+		break;
+	case DSI_CTRL_OP_VID_ENGINE:
+		if (state->vid_engine_state == op_state) {
+			pr_debug("[%d] No change in state, cmd_state=%d\n",
+			       dsi_ctrl->index, op_state);
+			rc = -EINVAL;
+		} else if ((state->power_state != DSI_CTRL_POWER_LINK_CLK_ON) ||
+			   (state->controller_state != DSI_CTRL_ENGINE_ON)) {
+			pr_debug("[%d]State error: op=%d: %d, %d\n",
+			       dsi_ctrl->index,
+			       op,
+			       state->power_state,
+			       state->controller_state);
+			rc = -EINVAL;
+		}
+		break;
+	case DSI_CTRL_OP_HOST_ENGINE:
+		if (state->controller_state == op_state) {
+			pr_debug("[%d] No change in state, ctrl_state=%d\n",
+			       dsi_ctrl->index, op_state);
+			rc = -EINVAL;
+		} else if (state->power_state != DSI_CTRL_POWER_LINK_CLK_ON) {
+			pr_debug("[%d]State error (link is off): op=%d:, %d\n",
+			       dsi_ctrl->index,
+			       op_state,
+			       state->power_state);
+			rc = -EINVAL;
+		} else if ((op_state == DSI_CTRL_ENGINE_OFF) &&
+			   ((state->cmd_engine_state != DSI_CTRL_ENGINE_OFF) ||
+			    (state->vid_engine_state != DSI_CTRL_ENGINE_OFF))) {
+			pr_debug("[%d]State error (eng on): op=%d: %d, %d\n",
+				  dsi_ctrl->index,
+				  op_state,
+				  state->cmd_engine_state,
+				  state->vid_engine_state);
+			rc = -EINVAL;
+		}
+		break;
+	case DSI_CTRL_OP_CMD_TX:
+		if ((state->power_state != DSI_CTRL_POWER_LINK_CLK_ON) ||
+		    (state->host_initialized != true) ||
+		    (state->cmd_engine_state != DSI_CTRL_ENGINE_ON)) {
+			pr_debug("[%d]State error: op=%d: %d, %d, %d\n",
+			       dsi_ctrl->index,
+			       op,
+			       state->power_state,
+			       state->host_initialized,
+			       state->cmd_engine_state);
+			rc = -EINVAL;
+		}
+		break;
+	case DSI_CTRL_OP_HOST_INIT:
+		if (state->host_initialized == op_state) {
+			pr_debug("[%d] No change in state, host_init=%d\n",
+			       dsi_ctrl->index, op_state);
+			rc = -EINVAL;
+		} else if (state->power_state != DSI_CTRL_POWER_CORE_CLK_ON) {
+			pr_debug("[%d]State error: op=%d: %d\n",
+			       dsi_ctrl->index, op, state->power_state);
+			rc = -EINVAL;
+		}
+		break;
+	case DSI_CTRL_OP_ULPS_TOGGLE:
+		if (state->ulps_enabled == op_state) {
+			pr_debug("[%d] No change in state, ulps_enabled=%d\n",
+			       dsi_ctrl->index, op_state);
+			rc = -EINVAL;
+		} else if ((state->power_state != DSI_CTRL_POWER_LINK_CLK_ON) ||
+			   (state->controller_state != DSI_CTRL_ENGINE_ON)) {
+			pr_debug("[%d]State error: op=%d: %d, %d\n",
+			       dsi_ctrl->index,
+			       op,
+			       state->power_state,
+			       state->controller_state);
+			rc = -EINVAL;
+		}
+		break;
+	case DSI_CTRL_OP_CLAMP_TOGGLE:
+		if (state->clamp_enabled == op_state) {
+			pr_debug("[%d] No change in state, clamp_enabled=%d\n",
+			       dsi_ctrl->index, op_state);
+			rc = -EINVAL;
+		} else if ((state->power_state != DSI_CTRL_POWER_LINK_CLK_ON) ||
+			   (state->controller_state != DSI_CTRL_ENGINE_ON)) {
+			pr_debug("[%d]State error: op=%d: %d, %d\n",
+			       dsi_ctrl->index,
+			       op,
+			       state->power_state,
+			       state->controller_state);
+			rc = -EINVAL;
+		}
+		break;
+	case DSI_CTRL_OP_SET_CLK_SOURCE:
+		if (state->power_state == DSI_CTRL_POWER_LINK_CLK_ON) {
+			pr_debug("[%d] State error: op=%d: %d\n",
+			       dsi_ctrl->index,
+			       op,
+			       state->power_state);
+			rc = -EINVAL;
+		}
+		break;
+	case DSI_CTRL_OP_TPG:
+		if (state->tpg_enabled == op_state) {
+			pr_debug("[%d] No change in state, tpg_enabled=%d\n",
+			       dsi_ctrl->index, op_state);
+			rc = -EINVAL;
+		} else if ((state->power_state != DSI_CTRL_POWER_LINK_CLK_ON) ||
+			   (state->controller_state != DSI_CTRL_ENGINE_ON)) {
+			pr_debug("[%d]State error: op=%d: %d, %d\n",
+			       dsi_ctrl->index,
+			       op,
+			       state->power_state,
+			       state->controller_state);
+			rc = -EINVAL;
+		}
+		break;
+	case DSI_CTRL_OP_PHY_SW_RESET:
+		if (state->power_state != DSI_CTRL_POWER_CORE_CLK_ON) {
+			pr_debug("[%d]State error: op=%d: %d\n",
+			       dsi_ctrl->index, op, state->power_state);
+			rc = -EINVAL;
+		}
+		break;
+	case DSI_CTRL_OP_ASYNC_TIMING:
+		if (state->vid_engine_state != op_state) {
+			pr_err("[%d] Unexpected engine state vid_state=%d\n",
+			       dsi_ctrl->index, op_state);
+			rc = -EINVAL;
+		}
+		break;
+	default:
+		rc = -ENOTSUPP;
+		break;
+	}
+
+	return rc;
+}
+
+static void dsi_ctrl_update_state(struct dsi_ctrl *dsi_ctrl,
+				  enum dsi_ctrl_driver_ops op,
+				  u32 op_state)
+{
+	struct dsi_ctrl_state_info *state = &dsi_ctrl->current_state;
+
+	switch (op) {
+	case DSI_CTRL_OP_POWER_STATE_CHANGE:
+		state->power_state = op_state;
+		if (op_state == DSI_CTRL_POWER_OFF) {
+			state->pwr_enabled = false;
+			state->core_clk_enabled = false;
+			state->link_clk_enabled = false;
+		} else if (op_state == DSI_CTRL_POWER_VREG_ON) {
+			state->pwr_enabled = true;
+			state->core_clk_enabled = false;
+			state->link_clk_enabled = false;
+		} else if (op_state == DSI_CTRL_POWER_CORE_CLK_ON) {
+			state->pwr_enabled = true;
+			state->core_clk_enabled = true;
+			state->link_clk_enabled = false;
+		} else if (op_state == DSI_CTRL_POWER_LINK_CLK_ON) {
+			state->pwr_enabled = true;
+			state->core_clk_enabled = true;
+			state->link_clk_enabled = true;
+		}
+		break;
+	case DSI_CTRL_OP_CMD_ENGINE:
+		state->cmd_engine_state = op_state;
+		break;
+	case DSI_CTRL_OP_VID_ENGINE:
+		state->vid_engine_state = op_state;
+		break;
+	case DSI_CTRL_OP_HOST_ENGINE:
+		state->controller_state = op_state;
+		break;
+	case DSI_CTRL_OP_ULPS_TOGGLE:
+		state->ulps_enabled = (op_state == 1) ? true : false;
+		break;
+	case DSI_CTRL_OP_CLAMP_TOGGLE:
+		state->clamp_enabled = (op_state == 1) ? true : false;
+		break;
+	case DSI_CTRL_OP_SET_CLK_SOURCE:
+		state->clk_source_set = (op_state == 1) ? true : false;
+		break;
+	case DSI_CTRL_OP_HOST_INIT:
+		state->host_initialized = (op_state == 1) ? true : false;
+		break;
+	case DSI_CTRL_OP_TPG:
+		state->tpg_enabled = (op_state == 1) ? true : false;
+		break;
+	case DSI_CTRL_OP_CMD_TX:
+	case DSI_CTRL_OP_PHY_SW_RESET:
+	default:
+		break;
+	}
+}
+
+static int dsi_ctrl_init_regmap(struct platform_device *pdev,
+				struct dsi_ctrl *ctrl)
+{
+	int rc = 0;
+	void __iomem *ptr;
+
+	ptr = msm_ioremap(pdev, "dsi_ctrl", ctrl->name);
+	if (IS_ERR(ptr)) {
+		rc = PTR_ERR(ptr);
+		return rc;
+	}
+
+	ctrl->hw.base = ptr;
+	pr_debug("[%s] map dsi_ctrl registers to %pK\n", ctrl->name,
+		 ctrl->hw.base);
+
+	ptr = msm_ioremap(pdev, "mmss_misc", ctrl->name);
+	if (IS_ERR(ptr)) {
+		rc = PTR_ERR(ptr);
+		return rc;
+	}
+
+	ctrl->hw.mmss_misc_base = ptr;
+	pr_debug("[%s] map mmss_misc registers to %p\n", ctrl->name,
+		 ctrl->hw.mmss_misc_base);
+	return rc;
+}
+
+static int dsi_ctrl_clocks_deinit(struct dsi_ctrl *ctrl)
+{
+	struct dsi_core_clk_info *core = &ctrl->clk_info.core_clks;
+	struct dsi_link_clk_info *link = &ctrl->clk_info.link_clks;
+	struct dsi_clk_link_set *rcg = &ctrl->clk_info.rcg_clks;
+
+	if (core->mdp_core_clk)
+		devm_clk_put(&ctrl->pdev->dev, core->mdp_core_clk);
+	if (core->iface_clk)
+		devm_clk_put(&ctrl->pdev->dev, core->iface_clk);
+	if (core->core_mmss_clk)
+		devm_clk_put(&ctrl->pdev->dev, core->core_mmss_clk);
+	if (core->bus_clk)
+		devm_clk_put(&ctrl->pdev->dev, core->bus_clk);
+
+	memset(core, 0x0, sizeof(*core));
+
+	if (link->byte_clk)
+		devm_clk_put(&ctrl->pdev->dev, link->byte_clk);
+	if (link->pixel_clk)
+		devm_clk_put(&ctrl->pdev->dev, link->pixel_clk);
+	if (link->esc_clk)
+		devm_clk_put(&ctrl->pdev->dev, link->esc_clk);
+
+	memset(link, 0x0, sizeof(*link));
+
+	if (rcg->byte_clk)
+		devm_clk_put(&ctrl->pdev->dev, rcg->byte_clk);
+	if (rcg->pixel_clk)
+		devm_clk_put(&ctrl->pdev->dev, rcg->pixel_clk);
+
+	memset(rcg, 0x0, sizeof(*rcg));
+
+	return 0;
+}
+
+static int dsi_ctrl_clocks_init(struct platform_device *pdev,
+				struct dsi_ctrl *ctrl)
+{
+	int rc = 0;
+	struct dsi_core_clk_info *core = &ctrl->clk_info.core_clks;
+	struct dsi_link_clk_info *link = &ctrl->clk_info.link_clks;
+	struct dsi_clk_link_set *rcg = &ctrl->clk_info.rcg_clks;
+
+	core->mdp_core_clk = devm_clk_get(&pdev->dev, "mdp_core_clk");
+	if (IS_ERR(core->mdp_core_clk)) {
+		rc = PTR_ERR(core->mdp_core_clk);
+		pr_err("failed to get mdp_core_clk, rc=%d\n", rc);
+		goto fail;
+	}
+
+	core->iface_clk = devm_clk_get(&pdev->dev, "iface_clk");
+	if (IS_ERR(core->iface_clk)) {
+		rc = PTR_ERR(core->iface_clk);
+		pr_err("failed to get iface_clk, rc=%d\n", rc);
+		goto fail;
+	}
+
+	core->core_mmss_clk = devm_clk_get(&pdev->dev, "core_mmss_clk");
+	if (IS_ERR(core->core_mmss_clk)) {
+		rc = PTR_ERR(core->core_mmss_clk);
+		pr_err("failed to get core_mmss_clk, rc=%d\n", rc);
+		goto fail;
+	}
+
+	core->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
+	if (IS_ERR(core->bus_clk)) {
+		rc = PTR_ERR(core->bus_clk);
+		pr_err("failed to get bus_clk, rc=%d\n", rc);
+		goto fail;
+	}
+
+	link->byte_clk = devm_clk_get(&pdev->dev, "byte_clk");
+	if (IS_ERR(link->byte_clk)) {
+		rc = PTR_ERR(link->byte_clk);
+		pr_err("failed to get byte_clk, rc=%d\n", rc);
+		goto fail;
+	}
+
+	link->pixel_clk = devm_clk_get(&pdev->dev, "pixel_clk");
+	if (IS_ERR(link->pixel_clk)) {
+		rc = PTR_ERR(link->pixel_clk);
+		pr_err("failed to get pixel_clk, rc=%d\n", rc);
+		goto fail;
+	}
+
+	link->esc_clk = devm_clk_get(&pdev->dev, "core_clk");
+	if (IS_ERR(link->esc_clk)) {
+		rc = PTR_ERR(link->esc_clk);
+		pr_err("failed to get esc_clk, rc=%d\n", rc);
+		goto fail;
+	}
+
+	rcg->byte_clk = devm_clk_get(&pdev->dev, "byte_clk_rcg");
+	if (IS_ERR(rcg->byte_clk)) {
+		rc = PTR_ERR(rcg->byte_clk);
+		pr_err("failed to get byte_clk_rcg, rc=%d\n", rc);
+		goto fail;
+	}
+
+	rcg->pixel_clk = devm_clk_get(&pdev->dev, "pixel_clk_rcg");
+	if (IS_ERR(rcg->pixel_clk)) {
+		rc = PTR_ERR(rcg->pixel_clk);
+		pr_err("failed to get pixel_clk_rcg, rc=%d\n", rc);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	dsi_ctrl_clocks_deinit(ctrl);
+	return rc;
+}
+
+static int dsi_ctrl_supplies_deinit(struct dsi_ctrl *ctrl)
+{
+	int i = 0;
+	int rc = 0;
+	struct dsi_regulator_info *regs;
+
+	regs = &ctrl->pwr_info.digital;
+	for (i = 0; i < regs->count; i++) {
+		if (!regs->vregs[i].vreg)
+			pr_err("vreg is NULL, should not reach here\n");
+		else
+			devm_regulator_put(regs->vregs[i].vreg);
+	}
+
+	regs = &ctrl->pwr_info.host_pwr;
+	for (i = 0; i < regs->count; i++) {
+		if (!regs->vregs[i].vreg)
+			pr_err("vreg is NULL, should not reach here\n");
+		else
+			devm_regulator_put(regs->vregs[i].vreg);
+	}
+
+	if (!ctrl->pwr_info.host_pwr.vregs) {
+		devm_kfree(&ctrl->pdev->dev, ctrl->pwr_info.host_pwr.vregs);
+		ctrl->pwr_info.host_pwr.vregs = NULL;
+		ctrl->pwr_info.host_pwr.count = 0;
+	}
+
+	if (!ctrl->pwr_info.digital.vregs) {
+		devm_kfree(&ctrl->pdev->dev, ctrl->pwr_info.digital.vregs);
+		ctrl->pwr_info.digital.vregs = NULL;
+		ctrl->pwr_info.digital.count = 0;
+	}
+
+	return rc;
+}
+
+static int dsi_ctrl_supplies_init(struct platform_device *pdev,
+				  struct dsi_ctrl *ctrl)
+{
+	int rc = 0;
+	int i = 0;
+	struct dsi_regulator_info *regs;
+	struct regulator *vreg = NULL;
+
+	rc = dsi_clk_pwr_get_dt_vreg_data(&pdev->dev,
+					  &ctrl->pwr_info.digital,
+					  "qcom,core-supply-entries");
+	if (rc) {
+		pr_err("failed to get digital supply, rc = %d\n", rc);
+		goto error;
+	}
+
+	rc = dsi_clk_pwr_get_dt_vreg_data(&pdev->dev,
+					  &ctrl->pwr_info.host_pwr,
+					  "qcom,ctrl-supply-entries");
+	if (rc) {
+		pr_err("failed to get host power supplies, rc = %d\n", rc);
+		goto error_digital;
+	}
+
+	regs = &ctrl->pwr_info.digital;
+	for (i = 0; i < regs->count; i++) {
+		vreg = devm_regulator_get(&pdev->dev, regs->vregs[i].vreg_name);
+		if (IS_ERR(vreg)) {
+			pr_err("failed to get %s regulator\n",
+			       regs->vregs[i].vreg_name);
+			rc = PTR_ERR(vreg);
+			goto error_host_pwr;
+		}
+		regs->vregs[i].vreg = vreg;
+	}
+
+	regs = &ctrl->pwr_info.host_pwr;
+	for (i = 0; i < regs->count; i++) {
+		vreg = devm_regulator_get(&pdev->dev, regs->vregs[i].vreg_name);
+		if (IS_ERR(vreg)) {
+			pr_err("failed to get %s regulator\n",
+			       regs->vregs[i].vreg_name);
+			for (--i; i >= 0; i--)
+				devm_regulator_put(regs->vregs[i].vreg);
+			rc = PTR_ERR(vreg);
+			goto error_digital_put;
+		}
+		regs->vregs[i].vreg = vreg;
+	}
+
+	return rc;
+
+error_digital_put:
+	regs = &ctrl->pwr_info.digital;
+	for (i = 0; i < regs->count; i++)
+		devm_regulator_put(regs->vregs[i].vreg);
+error_host_pwr:
+	devm_kfree(&pdev->dev, ctrl->pwr_info.host_pwr.vregs);
+	ctrl->pwr_info.host_pwr.vregs = NULL;
+	ctrl->pwr_info.host_pwr.count = 0;
+error_digital:
+	devm_kfree(&pdev->dev, ctrl->pwr_info.digital.vregs);
+	ctrl->pwr_info.digital.vregs = NULL;
+	ctrl->pwr_info.digital.count = 0;
+error:
+	return rc;
+}
+
+static int dsi_ctrl_axi_bus_client_init(struct platform_device *pdev,
+					struct dsi_ctrl *ctrl)
+{
+	int rc = 0;
+	struct dsi_ctrl_bus_scale_info *bus = &ctrl->axi_bus_info;
+
+	bus->bus_scale_table = msm_bus_cl_get_pdata(pdev);
+	if (IS_ERR_OR_NULL(bus->bus_scale_table)) {
+		rc = PTR_ERR(bus->bus_scale_table);
+		pr_err("msm_bus_cl_get_pdata() failed, rc = %d\n", rc);
+		bus->bus_scale_table = NULL;
+		return rc;
+	}
+
+	bus->bus_handle = msm_bus_scale_register_client(bus->bus_scale_table);
+	if (!bus->bus_handle) {
+		rc = -EINVAL;
+		pr_err("failed to register axi bus client\n");
+	}
+
+	return rc;
+}
+
+static int dsi_ctrl_axi_bus_client_deinit(struct dsi_ctrl *ctrl)
+{
+	struct dsi_ctrl_bus_scale_info *bus = &ctrl->axi_bus_info;
+
+	if (bus->bus_handle) {
+		msm_bus_scale_unregister_client(bus->bus_handle);
+
+		bus->bus_handle = 0;
+	}
+
+	return 0;
+}
+
+static int dsi_ctrl_validate_panel_info(struct dsi_ctrl *dsi_ctrl,
+					struct dsi_host_config *config)
+{
+	int rc = 0;
+	struct dsi_host_common_cfg *host_cfg = &config->common_config;
+
+	if (config->panel_mode >= DSI_OP_MODE_MAX) {
+		pr_err("Invalid dsi operation mode (%d)\n", config->panel_mode);
+		rc = -EINVAL;
+		goto err;
+	}
+
+	if ((host_cfg->data_lanes & (DSI_CLOCK_LANE - 1)) == 0) {
+		pr_err("No data lanes are enabled\n");
+		rc = -EINVAL;
+		goto err;
+	}
+err:
+	return rc;
+}
+
+static int dsi_ctrl_update_link_freqs(struct dsi_ctrl *dsi_ctrl,
+				      struct dsi_host_config *config)
+{
+	int rc = 0;
+	u32 num_of_lanes = 0;
+	u32 bpp = 3;
+	u64 h_period, v_period, bit_rate, pclk_rate, bit_rate_per_lane,
+	    byte_clk_rate;
+	struct dsi_host_common_cfg *host_cfg = &config->common_config;
+	struct dsi_mode_info *timing = &config->video_timing;
+
+	if (host_cfg->data_lanes & DSI_DATA_LANE_0)
+		num_of_lanes++;
+	if (host_cfg->data_lanes & DSI_DATA_LANE_1)
+		num_of_lanes++;
+	if (host_cfg->data_lanes & DSI_DATA_LANE_2)
+		num_of_lanes++;
+	if (host_cfg->data_lanes & DSI_DATA_LANE_3)
+		num_of_lanes++;
+
+	h_period = DSI_H_TOTAL(timing);
+	v_period = DSI_V_TOTAL(timing);
+
+	bit_rate = h_period * v_period * timing->refresh_rate * bpp * 8;
+	bit_rate_per_lane = bit_rate;
+	do_div(bit_rate_per_lane, num_of_lanes);
+	pclk_rate = bit_rate;
+	do_div(pclk_rate, (8 * bpp));
+	byte_clk_rate = bit_rate_per_lane;
+	do_div(byte_clk_rate, 8);
+	pr_debug("bit_clk_rate = %llu, bit_clk_rate_per_lane = %llu\n",
+		 bit_rate, bit_rate_per_lane);
+	pr_debug("byte_clk_rate = %llu, pclk_rate = %llu\n",
+		  byte_clk_rate, pclk_rate);
+
+	rc = dsi_clk_set_link_frequencies(&dsi_ctrl->clk_info.link_clks,
+					  pclk_rate,
+					  byte_clk_rate,
+					  config->esc_clk_rate_hz);
+	if (rc)
+		pr_err("Failed to update link frequencies\n");
+
+	return rc;
+}
+
+static int dsi_ctrl_enable_supplies(struct dsi_ctrl *dsi_ctrl, bool enable)
+{
+	int rc = 0;
+
+	if (enable) {
+		rc = dsi_pwr_enable_regulator(&dsi_ctrl->pwr_info.host_pwr,
+					      true);
+		if (rc) {
+			pr_err("failed to enable host power regs, rc=%d\n", rc);
+			goto error;
+		}
+
+		rc = dsi_pwr_enable_regulator(&dsi_ctrl->pwr_info.digital,
+					      true);
+		if (rc) {
+			pr_err("failed to enable gdsc, rc=%d\n", rc);
+			(void)dsi_pwr_enable_regulator(
+						&dsi_ctrl->pwr_info.host_pwr,
+						false
+						);
+			goto error;
+		}
+	} else {
+		rc = dsi_pwr_enable_regulator(&dsi_ctrl->pwr_info.digital,
+					      false);
+		if (rc) {
+			pr_err("failed to disable gdsc, rc=%d\n", rc);
+			goto error;
+		}
+
+		rc = dsi_pwr_enable_regulator(&dsi_ctrl->pwr_info.host_pwr,
+					      false);
+		if (rc) {
+			pr_err("failed to disable host power regs, rc=%d\n",
+			       rc);
+			goto error;
+		}
+	}
+error:
+	return rc;
+}
+
+static int dsi_ctrl_vote_for_bandwidth(struct dsi_ctrl *dsi_ctrl, bool on)
+{
+	int rc = 0;
+	bool changed = false;
+	struct dsi_ctrl_bus_scale_info *axi_bus = &dsi_ctrl->axi_bus_info;
+
+	if (on) {
+		if (axi_bus->refcount == 0)
+			changed = true;
+
+		axi_bus->refcount++;
+	} else {
+		if (axi_bus->refcount != 0) {
+			axi_bus->refcount--;
+
+			if (axi_bus->refcount == 0)
+				changed = true;
+		} else {
+			pr_err("bus bw votes are not balanced\n");
+		}
+	}
+
+	if (changed) {
+		rc = msm_bus_scale_client_update_request(axi_bus->bus_handle,
+							 on ? 1 : 0);
+		if (rc)
+			pr_err("bus scale client update failed, rc=%d\n", rc);
+	}
+
+	return rc;
+}
+
+static int dsi_ctrl_copy_and_pad_cmd(struct dsi_ctrl *dsi_ctrl,
+				     const struct mipi_dsi_packet *packet,
+				     u8 **buffer,
+				     u32 *size)
+{
+	int rc = 0;
+	u8 *buf = NULL;
+	u32 len, i;
+
+	len = packet->size;
+	len += 0x3; len &= ~0x03; /* Align to 32 bits */
+
+	buf = devm_kzalloc(&dsi_ctrl->pdev->dev, len * sizeof(u8), GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	for (i = 0; i < len; i++) {
+		if (i >= packet->size)
+			buf[i] = 0xFF;
+		else if (i < sizeof(packet->header))
+			buf[i] = packet->header[i];
+		else
+			buf[i] = packet->payload[i - sizeof(packet->header)];
+	}
+
+	if (packet->payload_length > 0)
+		buf[3] |= BIT(6);
+
+	buf[3] |= BIT(7);
+	*buffer = buf;
+	*size = len;
+
+	return rc;
+}
+
+static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl,
+			  const struct mipi_dsi_msg *msg,
+			  u32 flags)
+{
+	int rc = 0;
+	struct mipi_dsi_packet packet;
+	struct dsi_ctrl_cmd_dma_fifo_info cmd;
+	u32 hw_flags = 0;
+	u32 length = 0;
+	u8 *buffer = NULL;
+
+	if (!(flags & DSI_CTRL_CMD_FIFO_STORE)) {
+		pr_err("Memory DMA is not supported, use FIFO\n");
+		goto error;
+	}
+
+	rc = mipi_dsi_create_packet(&packet, msg);
+	if (rc) {
+		pr_err("Failed to create message packet, rc=%d\n", rc);
+		goto error;
+	}
+
+	if (flags & DSI_CTRL_CMD_FIFO_STORE) {
+		rc = dsi_ctrl_copy_and_pad_cmd(dsi_ctrl,
+					       &packet,
+					       &buffer,
+					       &length);
+		if (rc) {
+			pr_err("[%s] failed to copy message, rc=%d\n",
+			       dsi_ctrl->name, rc);
+			goto error;
+		}
+		cmd.command =  (u32 *)buffer;
+		cmd.size = length;
+		cmd.en_broadcast = (flags & DSI_CTRL_CMD_BROADCAST) ?
+				     true : false;
+		cmd.is_master = (flags & DSI_CTRL_CMD_BROADCAST_MASTER) ?
+				  true : false;
+		cmd.use_lpm = (msg->flags & MIPI_DSI_MSG_USE_LPM) ?
+				  true : false;
+	}
+
+	hw_flags |= (flags & DSI_CTRL_CMD_DEFER_TRIGGER) ?
+			DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER : 0;
+
+	if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER))
+		reinit_completion(&dsi_ctrl->int_info.cmd_dma_done);
+
+	if (flags & DSI_CTRL_CMD_FIFO_STORE)
+		dsi_ctrl->hw.ops.kickoff_fifo_command(&dsi_ctrl->hw,
+						      &cmd,
+						      hw_flags);
+
+	if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER)) {
+		u32 retry = 10;
+		u32 status = 0;
+		u64 error = 0;
+		u32 mask = (DSI_CMD_MODE_DMA_DONE);
+
+		while ((status == 0) && (retry > 0)) {
+			udelay(1000);
+			status = dsi_ctrl->hw.ops.get_interrupt_status(
+								&dsi_ctrl->hw);
+			error = dsi_ctrl->hw.ops.get_error_status(
+								&dsi_ctrl->hw);
+			status &= mask;
+			retry--;
+			dsi_ctrl->hw.ops.clear_interrupt_status(&dsi_ctrl->hw,
+								status);
+			dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
+							    error);
+		}
+		pr_debug("INT STATUS = %x, retry = %d\n", status, retry);
+		if (retry == 0)
+			pr_err("[DSI_%d]Command transfer failed\n",
+			       dsi_ctrl->index);
+
+		dsi_ctrl->hw.ops.reset_cmd_fifo(&dsi_ctrl->hw);
+	}
+error:
+	if (buffer)
+		devm_kfree(&dsi_ctrl->pdev->dev, buffer);
+	return rc;
+}
+
+static int dsi_set_max_return_size(struct dsi_ctrl *dsi_ctrl,
+				   const struct mipi_dsi_msg *rx_msg,
+				   u32 size)
+{
+	int rc = 0;
+	u8 tx[2] = { (u8)(size & 0xFF), (u8)(size >> 8) };
+	struct mipi_dsi_msg msg = {
+		.channel = rx_msg->channel,
+		.type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
+		.tx_len = 2,
+		.tx_buf = tx,
+	};
+
+	rc = dsi_message_tx(dsi_ctrl, &msg, 0x0);
+	if (rc)
+		pr_err("failed to send max return size packet, rc=%d\n", rc);
+
+	return rc;
+}
+
+static int dsi_message_rx(struct dsi_ctrl *dsi_ctrl,
+			  const struct mipi_dsi_msg *msg,
+			  u32 flags)
+{
+	int rc = 0;
+	u32 rd_pkt_size;
+	u32 total_read_len;
+	u32 bytes_read = 0, tot_bytes_read = 0;
+	u32 current_read_len;
+	bool short_resp = false;
+	bool read_done = false;
+
+	if (msg->rx_len <= 2) {
+		short_resp = true;
+		rd_pkt_size = msg->rx_len;
+		total_read_len = 4;
+	} else {
+		short_resp = false;
+		current_read_len = 10;
+		if (msg->rx_len < current_read_len)
+			rd_pkt_size = msg->rx_len;
+		else
+			rd_pkt_size = current_read_len;
+
+		total_read_len = current_read_len + 6;
+	}
+
+	while (!read_done) {
+		rc = dsi_set_max_return_size(dsi_ctrl, msg, rd_pkt_size);
+		if (rc) {
+			pr_err("Failed to set max return packet size, rc=%d\n",
+			       rc);
+			goto error;
+		}
+
+		rc = dsi_message_tx(dsi_ctrl, msg, flags);
+		if (rc) {
+			pr_err("Message transmission failed, rc=%d\n", rc);
+			goto error;
+		}
+
+
+		tot_bytes_read += bytes_read;
+		if (short_resp)
+			read_done = true;
+		else if (msg->rx_len <= tot_bytes_read)
+			read_done = true;
+	}
+error:
+	return rc;
+}
+
+
+static int dsi_enable_ulps(struct dsi_ctrl *dsi_ctrl)
+{
+	int rc = 0;
+	u32 lanes;
+	u32 ulps_lanes;
+
+	if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE)
+		lanes = dsi_ctrl->host_config.common_config.data_lanes;
+
+	lanes |= DSI_CLOCK_LANE;
+	dsi_ctrl->hw.ops.ulps_request(&dsi_ctrl->hw, lanes);
+
+	ulps_lanes = dsi_ctrl->hw.ops.get_lanes_in_ulps(&dsi_ctrl->hw);
+
+	if ((lanes & ulps_lanes) != lanes) {
+		pr_err("Failed to enter ULPS, request=0x%x, actual=0x%x\n",
+		       lanes, ulps_lanes);
+		rc = -EIO;
+	}
+
+	return rc;
+}
+
+static int dsi_disable_ulps(struct dsi_ctrl *dsi_ctrl)
+{
+	int rc = 0;
+	u32 ulps_lanes, lanes = 0;
+
+	if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE)
+		lanes = dsi_ctrl->host_config.common_config.data_lanes;
+
+	lanes |= DSI_CLOCK_LANE;
+	ulps_lanes = dsi_ctrl->hw.ops.get_lanes_in_ulps(&dsi_ctrl->hw);
+
+	if ((lanes & ulps_lanes) != lanes)
+		pr_err("Mismatch between lanes in ULPS\n");
+
+	lanes &= ulps_lanes;
+
+	dsi_ctrl->hw.ops.ulps_exit(&dsi_ctrl->hw, lanes);
+
+	/* 1 ms delay is recommended by specification */
+	udelay(1000);
+
+	dsi_ctrl->hw.ops.clear_ulps_request(&dsi_ctrl->hw, lanes);
+
+	ulps_lanes = dsi_ctrl->hw.ops.get_lanes_in_ulps(&dsi_ctrl->hw);
+	if (ulps_lanes & lanes) {
+		pr_err("Lanes (0x%x) stuck in ULPS\n", ulps_lanes);
+		rc = -EIO;
+	}
+
+	return rc;
+}
+
+static int dsi_ctrl_drv_state_init(struct dsi_ctrl *dsi_ctrl)
+{
+	int rc = 0;
+	bool splash_enabled = false;
+	struct dsi_ctrl_state_info *state = &dsi_ctrl->current_state;
+
+	if (!splash_enabled) {
+		state->power_state = DSI_CTRL_POWER_OFF;
+		state->cmd_engine_state = DSI_CTRL_ENGINE_OFF;
+		state->vid_engine_state = DSI_CTRL_ENGINE_OFF;
+		state->pwr_enabled = false;
+		state->core_clk_enabled = false;
+		state->link_clk_enabled = false;
+		state->ulps_enabled = false;
+		state->clamp_enabled = false;
+		state->clk_source_set = false;
+	}
+
+	return rc;
+}
+
+int dsi_ctrl_intr_deinit(struct dsi_ctrl *dsi_ctrl)
+{
+	struct dsi_ctrl_interrupts *ints = &dsi_ctrl->int_info;
+
+	devm_free_irq(&dsi_ctrl->pdev->dev, ints->irq, dsi_ctrl);
+
+	return 0;
+}
+
+static int dsi_ctrl_buffer_deinit(struct dsi_ctrl *dsi_ctrl)
+{
+	if (dsi_ctrl->tx_cmd_buf) {
+		msm_gem_put_iova(dsi_ctrl->tx_cmd_buf, 0);
+
+		msm_gem_free_object(dsi_ctrl->tx_cmd_buf);
+		dsi_ctrl->tx_cmd_buf = NULL;
+	}
+
+	return 0;
+}
+
+int dsi_ctrl_buffer_init(struct dsi_ctrl *dsi_ctrl)
+{
+	int rc = 0;
+	u64 iova = 0;
+
+	dsi_ctrl->tx_cmd_buf = msm_gem_new(dsi_ctrl->drm_dev,
+					   SZ_4K,
+					   MSM_BO_UNCACHED);
+
+	if (IS_ERR(dsi_ctrl->tx_cmd_buf)) {
+		rc = PTR_ERR(dsi_ctrl->tx_cmd_buf);
+		pr_err("failed to allocate gem, rc=%d\n", rc);
+		dsi_ctrl->tx_cmd_buf = NULL;
+		goto error;
+	}
+
+	dsi_ctrl->cmd_buffer_size = SZ_4K;
+
+	rc = msm_gem_get_iova(dsi_ctrl->tx_cmd_buf, 0, &iova);
+	if (rc) {
+		pr_err("failed to get iova, rc=%d\n", rc);
+		(void)dsi_ctrl_buffer_deinit(dsi_ctrl);
+		goto error;
+	}
+
+	if (iova & 0x07) {
+		pr_err("Tx command buffer is not 8 byte aligned\n");
+		rc = -ENOTSUPP;
+		(void)dsi_ctrl_buffer_deinit(dsi_ctrl);
+		goto error;
+	}
+error:
+	return rc;
+}
+
+static int dsi_enable_io_clamp(struct dsi_ctrl *dsi_ctrl, bool enable)
+{
+	bool en_ulps = dsi_ctrl->current_state.ulps_enabled;
+	u32 lanes = 0;
+
+	if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE)
+		lanes = dsi_ctrl->host_config.common_config.data_lanes;
+
+	lanes |= DSI_CLOCK_LANE;
+
+	if (enable)
+		dsi_ctrl->hw.ops.clamp_enable(&dsi_ctrl->hw, lanes, en_ulps);
+	else
+		dsi_ctrl->hw.ops.clamp_disable(&dsi_ctrl->hw, lanes, en_ulps);
+
+	return 0;
+}
+
+static int dsi_ctrl_dev_probe(struct platform_device *pdev)
+{
+	struct dsi_ctrl *dsi_ctrl;
+	struct dsi_ctrl_list_item *item;
+	const struct of_device_id *id;
+	enum dsi_ctrl_version version;
+	u32 index = 0;
+	int rc = 0;
+
+	id = of_match_node(msm_dsi_of_match, pdev->dev.of_node);
+	if (!id)
+		return -ENODEV;
+
+	version = *(enum dsi_ctrl_version *)id->data;
+
+	item = devm_kzalloc(&pdev->dev, sizeof(*item), GFP_KERNEL);
+	if (!item)
+		return -ENOMEM;
+
+	dsi_ctrl = devm_kzalloc(&pdev->dev, sizeof(*dsi_ctrl), GFP_KERNEL);
+	if (!dsi_ctrl)
+		return -ENOMEM;
+
+	rc = of_property_read_u32(pdev->dev.of_node, "cell-index", &index);
+	if (rc) {
+		pr_debug("cell index not set, default to 0\n");
+		index = 0;
+	}
+
+	dsi_ctrl->index = index;
+
+	dsi_ctrl->name = of_get_property(pdev->dev.of_node, "label", NULL);
+	if (!dsi_ctrl->name)
+		dsi_ctrl->name = DSI_CTRL_DEFAULT_LABEL;
+
+	rc = dsi_ctrl_init_regmap(pdev, dsi_ctrl);
+	if (rc) {
+		pr_err("Failed to parse register information, rc = %d\n", rc);
+		goto fail;
+	}
+
+	rc = dsi_ctrl_clocks_init(pdev, dsi_ctrl);
+	if (rc) {
+		pr_err("Failed to parse clock information, rc = %d\n", rc);
+		goto fail;
+	}
+
+	rc = dsi_ctrl_supplies_init(pdev, dsi_ctrl);
+	if (rc) {
+		pr_err("Failed to parse voltage supplies, rc = %d\n", rc);
+		goto fail_clks;
+	}
+
+	dsi_ctrl->version = version;
+	rc = dsi_catalog_ctrl_setup(&dsi_ctrl->hw, dsi_ctrl->version,
+				    dsi_ctrl->index);
+	if (rc) {
+		pr_err("Catalog does not support version (%d)\n",
+		       dsi_ctrl->version);
+		goto fail_supplies;
+	}
+
+	rc = dsi_ctrl_axi_bus_client_init(pdev, dsi_ctrl);
+	if (rc)
+		pr_err("failed to init axi bus client, rc = %d\n", rc);
+
+	item->ctrl = dsi_ctrl;
+
+	mutex_lock(&dsi_ctrl_list_lock);
+	list_add(&item->list, &dsi_ctrl_list);
+	mutex_unlock(&dsi_ctrl_list_lock);
+
+	mutex_init(&dsi_ctrl->ctrl_lock);
+
+	dsi_ctrl->pdev = pdev;
+	platform_set_drvdata(pdev, dsi_ctrl);
+
+	pr_debug("Probe successful for %s\n", dsi_ctrl->name);
+
+	return 0;
+
+fail_supplies:
+	(void)dsi_ctrl_supplies_deinit(dsi_ctrl);
+fail_clks:
+	(void)dsi_ctrl_clocks_deinit(dsi_ctrl);
+fail:
+	return rc;
+}
+
+static int dsi_ctrl_dev_remove(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct dsi_ctrl *dsi_ctrl;
+	struct list_head *pos, *tmp;
+
+	dsi_ctrl = platform_get_drvdata(pdev);
+
+	mutex_lock(&dsi_ctrl_list_lock);
+	list_for_each_safe(pos, tmp, &dsi_ctrl_list) {
+		struct dsi_ctrl_list_item *n = list_entry(pos,
+						  struct dsi_ctrl_list_item,
+						  list);
+		if (n->ctrl == dsi_ctrl) {
+			list_del(&n->list);
+			break;
+		}
+	}
+	mutex_unlock(&dsi_ctrl_list_lock);
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+	rc = dsi_ctrl_axi_bus_client_deinit(dsi_ctrl);
+	if (rc)
+		pr_err("failed to deinitialize axi bus client, rc = %d\n", rc);
+
+	rc = dsi_ctrl_supplies_deinit(dsi_ctrl);
+	if (rc)
+		pr_err("failed to deinitialize voltage supplies, rc=%d\n", rc);
+
+	rc = dsi_ctrl_clocks_deinit(dsi_ctrl);
+	if (rc)
+		pr_err("failed to deinitialize clocks, rc=%d\n", rc);
+
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+
+	mutex_destroy(&dsi_ctrl->ctrl_lock);
+	devm_kfree(&pdev->dev, dsi_ctrl);
+
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static struct platform_driver dsi_ctrl_driver = {
+	.probe = dsi_ctrl_dev_probe,
+	.remove = dsi_ctrl_dev_remove,
+	.driver = {
+		.name = "drm_dsi_ctrl",
+		.of_match_table = msm_dsi_of_match,
+	},
+};
+
+/**
+ * dsi_ctrl_get() - get a dsi_ctrl handle from an of_node
+ * @of_node:    of_node of the DSI controller.
+ *
+ * Gets the DSI controller handle for the corresponding of_node. The ref count
+ * is incremented to one and all subsequent gets will fail until the original
+ * clients calls a put.
+ *
+ * Return: DSI Controller handle.
+ */
+struct dsi_ctrl *dsi_ctrl_get(struct device_node *of_node)
+{
+	struct list_head *pos, *tmp;
+	struct dsi_ctrl *ctrl = NULL;
+
+	mutex_lock(&dsi_ctrl_list_lock);
+	list_for_each_safe(pos, tmp, &dsi_ctrl_list) {
+		struct dsi_ctrl_list_item *n;
+
+		n = list_entry(pos, struct dsi_ctrl_list_item, list);
+		if (n->ctrl->pdev->dev.of_node == of_node) {
+			ctrl = n->ctrl;
+			break;
+		}
+	}
+	mutex_unlock(&dsi_ctrl_list_lock);
+
+	if (!ctrl) {
+		pr_err("Device with of node not found\n");
+		ctrl = ERR_PTR(-EPROBE_DEFER);
+		return ctrl;
+	}
+
+	mutex_lock(&ctrl->ctrl_lock);
+	if (ctrl->refcount == 1) {
+		pr_err("[%s] Device in use\n", ctrl->name);
+		ctrl = ERR_PTR(-EBUSY);
+	} else {
+		ctrl->refcount++;
+	}
+	mutex_unlock(&ctrl->ctrl_lock);
+	return ctrl;
+}
+
+/**
+ * dsi_ctrl_put() - releases a dsi controller handle.
+ * @dsi_ctrl:       DSI controller handle.
+ *
+ * Releases the DSI controller. Driver will clean up all resources and puts back
+ * the DSI controller into reset state.
+ */
+void dsi_ctrl_put(struct dsi_ctrl *dsi_ctrl)
+{
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+
+	if (dsi_ctrl->refcount == 0)
+		pr_err("Unbalanced dsi_ctrl_put call\n");
+	else
+		dsi_ctrl->refcount--;
+
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+}
+
+/**
+ * dsi_ctrl_drv_init() - initialize dsi controller driver.
+ * @dsi_ctrl:      DSI controller handle.
+ * @parent:        Parent directory for debug fs.
+ *
+ * Initializes DSI controller driver. Driver should be initialized after
+ * dsi_ctrl_get() succeeds.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_drv_init(struct dsi_ctrl *dsi_ctrl, struct dentry *parent)
+{
+	int rc = 0;
+
+	if (!dsi_ctrl || !parent) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+	rc = dsi_ctrl_drv_state_init(dsi_ctrl);
+	if (rc) {
+		pr_err("Failed to initialize driver state, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = dsi_ctrl_debugfs_init(dsi_ctrl, parent);
+	if (rc) {
+		pr_err("[DSI_%d] failed to init debug fs, rc=%d\n",
+		       dsi_ctrl->index, rc);
+		goto error;
+	}
+
+error:
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+	return rc;
+}
+
+/**
+ * dsi_ctrl_drv_deinit() - de-initializes dsi controller driver
+ * @dsi_ctrl:      DSI controller handle.
+ *
+ * Releases all resources acquired by dsi_ctrl_drv_init().
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_drv_deinit(struct dsi_ctrl *dsi_ctrl)
+{
+	int rc = 0;
+
+	if (!dsi_ctrl) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+
+	rc = dsi_ctrl_debugfs_deinit(dsi_ctrl);
+	if (rc)
+		pr_err("failed to release debugfs root, rc=%d\n", rc);
+
+	rc = dsi_ctrl_buffer_deinit(dsi_ctrl);
+	if (rc)
+		pr_err("Failed to free cmd buffers, rc=%d\n", rc);
+
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+	return rc;
+}
+
+/**
+ * dsi_ctrl_phy_sw_reset() - perform a PHY software reset
+ * @dsi_ctrl:         DSI controller handle.
+ *
+ * Performs a PHY software reset on the DSI controller. Reset should be done
+ * when the controller power state is DSI_CTRL_POWER_CORE_CLK_ON and the PHY is
+ * not enabled.
+ *
+ * This function will fail if driver is in any other state.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_phy_sw_reset(struct dsi_ctrl *dsi_ctrl)
+{
+	int rc = 0;
+
+	if (!dsi_ctrl) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_PHY_SW_RESET, 0x0);
+	if (rc) {
+		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+		       dsi_ctrl->index, rc);
+		goto error;
+	}
+
+	dsi_ctrl->hw.ops.phy_sw_reset(&dsi_ctrl->hw);
+
+	pr_debug("[DSI_%d] PHY soft reset done\n", dsi_ctrl->index);
+	dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_PHY_SW_RESET, 0x0);
+error:
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+	return rc;
+}
+
+/**
+ * dsi_ctrl_seamless_timing_update() - update only controller timing
+ * @dsi_ctrl:          DSI controller handle.
+ * @timing:            New DSI timing info
+ *
+ * Updates host timing values to conduct a seamless transition to new timing
+ * For example, to update the porch values in a dynamic fps switch.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_async_timing_update(struct dsi_ctrl *dsi_ctrl,
+		struct dsi_mode_info *timing)
+{
+	struct dsi_mode_info *host_mode;
+	int rc = 0;
+
+	if (!dsi_ctrl || !timing) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+
+	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_ASYNC_TIMING,
+			DSI_CTRL_ENGINE_ON);
+	if (rc) {
+		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+		       dsi_ctrl->index, rc);
+		goto exit;
+	}
+
+	host_mode = &dsi_ctrl->host_config.video_timing;
+	memcpy(host_mode, timing, sizeof(*host_mode));
+
+	dsi_ctrl->hw.ops.set_video_timing(&dsi_ctrl->hw, host_mode);
+
+exit:
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+	return rc;
+}
+
+/**
+ * dsi_ctrl_host_init() - Initialize DSI host hardware.
+ * @dsi_ctrl:        DSI controller handle.
+ *
+ * Initializes DSI controller hardware with host configuration provided by
+ * dsi_ctrl_update_host_config(). Initialization can be performed only during
+ * DSI_CTRL_POWER_CORE_CLK_ON state and after the PHY SW reset has been
+ * performed.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl)
+{
+	int rc = 0;
+
+	if (!dsi_ctrl) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, 0x1);
+	if (rc) {
+		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+		       dsi_ctrl->index, rc);
+		goto error;
+	}
+
+	dsi_ctrl->hw.ops.setup_lane_map(&dsi_ctrl->hw,
+					&dsi_ctrl->host_config.lane_map);
+
+	dsi_ctrl->hw.ops.host_setup(&dsi_ctrl->hw,
+				    &dsi_ctrl->host_config.common_config);
+
+	if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE) {
+		dsi_ctrl->hw.ops.cmd_engine_setup(&dsi_ctrl->hw,
+					&dsi_ctrl->host_config.common_config,
+					&dsi_ctrl->host_config.u.cmd_engine);
+
+		dsi_ctrl->hw.ops.setup_cmd_stream(&dsi_ctrl->hw,
+				dsi_ctrl->host_config.video_timing.h_active,
+				dsi_ctrl->host_config.video_timing.h_active * 3,
+				dsi_ctrl->host_config.video_timing.v_active,
+				0x0);
+	} else {
+		dsi_ctrl->hw.ops.video_engine_setup(&dsi_ctrl->hw,
+					&dsi_ctrl->host_config.common_config,
+					&dsi_ctrl->host_config.u.video_engine);
+		dsi_ctrl->hw.ops.set_video_timing(&dsi_ctrl->hw,
+					  &dsi_ctrl->host_config.video_timing);
+	}
+
+
+
+	dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw, 0x0);
+	dsi_ctrl->hw.ops.enable_error_interrupts(&dsi_ctrl->hw, 0x0);
+
+	/* Perform a soft reset before enabling dsi controller */
+	dsi_ctrl->hw.ops.soft_reset(&dsi_ctrl->hw);
+	pr_debug("[DSI_%d]Host initialization complete\n", dsi_ctrl->index);
+	dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, 0x1);
+error:
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+	return rc;
+}
+
+/**
+ * dsi_ctrl_host_deinit() - De-Initialize DSI host hardware.
+ * @dsi_ctrl:        DSI controller handle.
+ *
+ * De-initializes DSI controller hardware. It can be performed only during
+ * DSI_CTRL_POWER_CORE_CLK_ON state after LINK clocks have been turned off.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_host_deinit(struct dsi_ctrl *dsi_ctrl)
+{
+	int rc = 0;
+
+	if (!dsi_ctrl) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+
+	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, 0x0);
+	if (rc) {
+		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+		       dsi_ctrl->index, rc);
+		pr_err("driver state check failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	pr_debug("[DSI_%d] Host deinitization complete\n", dsi_ctrl->index);
+	dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, 0x0);
+error:
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+	return rc;
+}
+
+/**
+ * dsi_ctrl_update_host_config() - update dsi host configuration
+ * @dsi_ctrl:          DSI controller handle.
+ * @config:            DSI host configuration.
+ * @flags:             dsi_mode_flags modifying the behavior
+ *
+ * Updates driver with new Host configuration to use for host initialization.
+ * This function call will only update the software context. The stored
+ * configuration information will be used when the host is initialized.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_update_host_config(struct dsi_ctrl *ctrl,
+				struct dsi_host_config *config,
+				int flags)
+{
+	int rc = 0;
+
+	if (!ctrl || !config) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctrl->ctrl_lock);
+
+	rc = dsi_ctrl_validate_panel_info(ctrl, config);
+	if (rc) {
+		pr_err("panel validation failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	if (!(flags & DSI_MODE_FLAG_SEAMLESS)) {
+		rc = dsi_ctrl_update_link_freqs(ctrl, config);
+		if (rc) {
+			pr_err("[%s] failed to update link frequencies, rc=%d\n",
+			       ctrl->name, rc);
+			goto error;
+		}
+	}
+
+	pr_debug("[DSI_%d]Host config updated\n", ctrl->index);
+	memcpy(&ctrl->host_config, config, sizeof(ctrl->host_config));
+error:
+	mutex_unlock(&ctrl->ctrl_lock);
+	return rc;
+}
+
+/**
+ * dsi_ctrl_validate_timing() - validate a video timing configuration
+ * @dsi_ctrl:       DSI controller handle.
+ * @timing:         Pointer to timing data.
+ *
+ * Driver will validate if the timing configuration is supported on the
+ * controller hardware.
+ *
+ * Return: error code if timing is not supported.
+ */
+int dsi_ctrl_validate_timing(struct dsi_ctrl *dsi_ctrl,
+			     struct dsi_mode_info *mode)
+{
+	int rc = 0;
+
+	if (!dsi_ctrl || !mode) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+
+	return rc;
+}
+
+/**
+ * dsi_ctrl_cmd_transfer() - Transfer commands on DSI link
+ * @dsi_ctrl:             DSI controller handle.
+ * @msg:                  Message to transfer on DSI link.
+ * @flags:                Modifiers for message transfer.
+ *
+ * Command transfer can be done only when command engine is enabled. The
+ * transfer API will block until either the command transfer finishes or
+ * the timeout value is reached. If the trigger is deferred, it will return
+ * without triggering the transfer. Command parameters are programmed to
+ * hardware.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_cmd_transfer(struct dsi_ctrl *dsi_ctrl,
+			  const struct mipi_dsi_msg *msg,
+			  u32 flags)
+{
+	int rc = 0;
+
+	if (!dsi_ctrl || !msg) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+
+	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_CMD_TX, 0x0);
+	if (rc) {
+		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+		       dsi_ctrl->index, rc);
+		goto error;
+	}
+
+	rc = dsi_ctrl_vote_for_bandwidth(dsi_ctrl, true);
+	if (rc) {
+		pr_err("bandwidth request failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	if (flags & DSI_CTRL_CMD_READ) {
+		rc = dsi_message_rx(dsi_ctrl, msg, flags);
+		if (rc)
+			pr_err("read message failed, rc=%d\n", rc);
+	} else {
+		rc = dsi_message_tx(dsi_ctrl, msg, flags);
+		if (rc)
+			pr_err("command msg transfer failed, rc = %d\n", rc);
+	}
+
+	dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_CMD_TX, 0x0);
+
+	(void)dsi_ctrl_vote_for_bandwidth(dsi_ctrl, false);
+error:
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+	return rc;
+}
+
+/**
+ * dsi_ctrl_cmd_tx_trigger() - Trigger a deferred command.
+ * @dsi_ctrl:              DSI controller handle.
+ * @flags:                 Modifiers.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags)
+{
+	int rc = 0;
+	u32 status = 0;
+	u32 mask = (DSI_CMD_MODE_DMA_DONE);
+
+	if (!dsi_ctrl) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+
+	reinit_completion(&dsi_ctrl->int_info.cmd_dma_done);
+
+	dsi_ctrl->hw.ops.trigger_command_dma(&dsi_ctrl->hw);
+
+	if ((flags & DSI_CTRL_CMD_BROADCAST) &&
+	    (flags & DSI_CTRL_CMD_BROADCAST_MASTER)) {
+		u32 retry = 10;
+
+		while ((status == 0) && (retry > 0)) {
+			udelay(1000);
+			status = dsi_ctrl->hw.ops.get_interrupt_status(
+								&dsi_ctrl->hw);
+			status &= mask;
+			retry--;
+			dsi_ctrl->hw.ops.clear_interrupt_status(&dsi_ctrl->hw,
+								status);
+		}
+		pr_debug("INT STATUS = %x, retry = %d\n", status, retry);
+		if (retry == 0)
+			pr_err("[DSI_%d]Command transfer failed\n",
+			       dsi_ctrl->index);
+	}
+
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+	return rc;
+}
+
+/**
+ * dsi_ctrl_set_power_state() - set power state for dsi controller
+ * @dsi_ctrl:          DSI controller handle.
+ * @state:             Power state.
+ *
+ * Set power state for DSI controller. Power state can be changed only when
+ * Controller, Video and Command engines are turned off.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_power_state(struct dsi_ctrl *dsi_ctrl,
+			     enum dsi_power_state state)
+{
+	int rc = 0;
+	bool core_clk_enable = false;
+	bool link_clk_enable = false;
+	bool reg_enable = false;
+	struct dsi_ctrl_state_info *drv_state;
+
+	if (!dsi_ctrl || (state >= DSI_CTRL_POWER_MAX)) {
+		pr_err("Invalid Params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+
+	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_POWER_STATE_CHANGE,
+				  state);
+	if (rc) {
+		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+		       dsi_ctrl->index, rc);
+		goto error;
+	}
+
+	if (state == DSI_CTRL_POWER_LINK_CLK_ON)
+		reg_enable = core_clk_enable = link_clk_enable = true;
+	else if (state == DSI_CTRL_POWER_CORE_CLK_ON)
+		reg_enable = core_clk_enable = true;
+	else if (state == DSI_CTRL_POWER_VREG_ON)
+		reg_enable = true;
+
+	drv_state = &dsi_ctrl->current_state;
+
+	if ((reg_enable) && (reg_enable != drv_state->pwr_enabled)) {
+		rc = dsi_ctrl_enable_supplies(dsi_ctrl, true);
+		if (rc) {
+			pr_err("[%d]failed to enable voltage supplies, rc=%d\n",
+			       dsi_ctrl->index, rc);
+			goto error;
+		}
+	}
+
+	if ((core_clk_enable) &&
+	    (core_clk_enable != drv_state->core_clk_enabled)) {
+		rc = dsi_clk_enable_core_clks(&dsi_ctrl->clk_info.core_clks,
+					      true);
+		if (rc) {
+			pr_err("[%d] failed to enable core clocks, rc=%d\n",
+			       dsi_ctrl->index, rc);
+			goto error;
+		}
+	}
+
+	if (link_clk_enable != drv_state->link_clk_enabled) {
+		rc = dsi_clk_enable_link_clks(&dsi_ctrl->clk_info.link_clks,
+					      link_clk_enable);
+		if (rc) {
+			pr_err("[%d] failed to enable link clocks, rc=%d\n",
+			       dsi_ctrl->index, rc);
+			goto error;
+		}
+	}
+
+	if ((!core_clk_enable) &&
+	    (core_clk_enable != drv_state->core_clk_enabled)) {
+		rc = dsi_clk_enable_core_clks(&dsi_ctrl->clk_info.core_clks,
+					      false);
+		if (rc) {
+			pr_err("[%d] failed to disable core clocks, rc=%d\n",
+			       dsi_ctrl->index, rc);
+			goto error;
+		}
+	}
+
+	if ((!reg_enable) && (reg_enable != drv_state->pwr_enabled)) {
+		rc = dsi_ctrl_enable_supplies(dsi_ctrl, false);
+		if (rc) {
+			pr_err("[%d]failed to disable vreg supplies, rc=%d\n",
+			       dsi_ctrl->index, rc);
+			goto error;
+		}
+	}
+
+	pr_debug("[DSI_%d] Power state updated to %d\n", dsi_ctrl->index,
+		 state);
+	dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_POWER_STATE_CHANGE, state);
+error:
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+	return rc;
+}
+
+/**
+ * dsi_ctrl_set_tpg_state() - enable/disable test pattern on the controller
+ * @dsi_ctrl:          DSI controller handle.
+ * @on:                enable/disable test pattern.
+ *
+ * Test pattern can be enabled only after Video engine (for video mode panels)
+ * or command engine (for cmd mode panels) is enabled.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_tpg_state(struct dsi_ctrl *dsi_ctrl, bool on)
+{
+	int rc = 0;
+
+	if (!dsi_ctrl) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+
+	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_TPG, on);
+	if (rc) {
+		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+		       dsi_ctrl->index, rc);
+		goto error;
+	}
+
+	if (on) {
+		if (dsi_ctrl->host_config.panel_mode == DSI_OP_VIDEO_MODE) {
+			dsi_ctrl->hw.ops.video_test_pattern_setup(&dsi_ctrl->hw,
+							  DSI_TEST_PATTERN_INC,
+							  0xFFFF);
+		} else {
+			dsi_ctrl->hw.ops.cmd_test_pattern_setup(
+							&dsi_ctrl->hw,
+							DSI_TEST_PATTERN_INC,
+							0xFFFF,
+							0x0);
+		}
+	}
+	dsi_ctrl->hw.ops.test_pattern_enable(&dsi_ctrl->hw, on);
+
+	pr_debug("[DSI_%d]Set test pattern state=%d\n", dsi_ctrl->index, on);
+	dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_TPG, on);
+error:
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+	return rc;
+}
+
+/**
+ * dsi_ctrl_set_host_engine_state() - set host engine state
+ * @dsi_ctrl:            DSI Controller handle.
+ * @state:               Engine state.
+ *
+ * Host engine state can be modified only when DSI controller power state is
+ * set to DSI_CTRL_POWER_LINK_CLK_ON and cmd, video engines are disabled.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_host_engine_state(struct dsi_ctrl *dsi_ctrl,
+				   enum dsi_engine_state state)
+{
+	int rc = 0;
+
+	if (!dsi_ctrl || (state >= DSI_CTRL_ENGINE_MAX)) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+
+	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_HOST_ENGINE, state);
+	if (rc) {
+		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+		       dsi_ctrl->index, rc);
+		goto error;
+	}
+
+	if (state == DSI_CTRL_ENGINE_ON)
+		dsi_ctrl->hw.ops.ctrl_en(&dsi_ctrl->hw, true);
+	else
+		dsi_ctrl->hw.ops.ctrl_en(&dsi_ctrl->hw, false);
+
+	pr_debug("[DSI_%d] Set host engine state = %d\n", dsi_ctrl->index,
+		 state);
+	dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_HOST_ENGINE, state);
+error:
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+	return rc;
+}
+
+/**
+ * dsi_ctrl_set_cmd_engine_state() - set command engine state
+ * @dsi_ctrl:            DSI Controller handle.
+ * @state:               Engine state.
+ *
+ * Command engine state can be modified only when DSI controller power state is
+ * set to DSI_CTRL_POWER_LINK_CLK_ON.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_cmd_engine_state(struct dsi_ctrl *dsi_ctrl,
+				  enum dsi_engine_state state)
+{
+	int rc = 0;
+
+	if (!dsi_ctrl || (state >= DSI_CTRL_ENGINE_MAX)) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+
+	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_CMD_ENGINE, state);
+	if (rc) {
+		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+		       dsi_ctrl->index, rc);
+		goto error;
+	}
+
+	if (state == DSI_CTRL_ENGINE_ON)
+		dsi_ctrl->hw.ops.cmd_engine_en(&dsi_ctrl->hw, true);
+	else
+		dsi_ctrl->hw.ops.cmd_engine_en(&dsi_ctrl->hw, false);
+
+	pr_debug("[DSI_%d] Set cmd engine state = %d\n", dsi_ctrl->index,
+		 state);
+	dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_CMD_ENGINE, state);
+error:
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+	return rc;
+}
+
+/**
+ * dsi_ctrl_set_vid_engine_state() - set video engine state
+ * @dsi_ctrl:            DSI Controller handle.
+ * @state:               Engine state.
+ *
+ * Video engine state can be modified only when DSI controller power state is
+ * set to DSI_CTRL_POWER_LINK_CLK_ON.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_vid_engine_state(struct dsi_ctrl *dsi_ctrl,
+				  enum dsi_engine_state state)
+{
+	int rc = 0;
+	bool on;
+
+	if (!dsi_ctrl || (state >= DSI_CTRL_ENGINE_MAX)) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+
+	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_VID_ENGINE, state);
+	if (rc) {
+		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+		       dsi_ctrl->index, rc);
+		goto error;
+	}
+
+	on = (state == DSI_CTRL_ENGINE_ON) ? true : false;
+	dsi_ctrl->hw.ops.video_engine_en(&dsi_ctrl->hw, on);
+
+	/* perform a reset when turning off video engine */
+	if (!on)
+		dsi_ctrl->hw.ops.soft_reset(&dsi_ctrl->hw);
+
+	pr_debug("[DSI_%d] Set video engine state = %d\n", dsi_ctrl->index,
+		 state);
+	dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_VID_ENGINE, state);
+error:
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+	return rc;
+}
+
+/**
+ * dsi_ctrl_set_ulps() - set ULPS state for DSI lanes.
+ * @dsi_ctrl:         DSI controller handle.
+ * @enable:           enable/disable ULPS.
+ *
+ * ULPS can be enabled/disabled after DSI host engine is turned on.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_ulps(struct dsi_ctrl *dsi_ctrl, bool enable)
+{
+	int rc = 0;
+
+	if (!dsi_ctrl) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+
+	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_ULPS_TOGGLE, enable);
+	if (rc) {
+		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+		       dsi_ctrl->index, rc);
+		goto error;
+	}
+
+	if (enable)
+		rc = dsi_enable_ulps(dsi_ctrl);
+	else
+		rc = dsi_disable_ulps(dsi_ctrl);
+
+	if (rc) {
+		pr_err("[DSI_%d] Ulps state change(%d) failed, rc=%d\n",
+		       dsi_ctrl->index, enable, rc);
+		goto error;
+	}
+
+	pr_debug("[DSI_%d] ULPS state = %d\n", dsi_ctrl->index, enable);
+	dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_ULPS_TOGGLE, enable);
+error:
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+	return rc;
+}
+
+/**
+ * dsi_ctrl_set_clamp_state() - set clamp state for DSI phy
+ * @dsi_ctrl:             DSI controller handle.
+ * @enable:               enable/disable clamping.
+ *
+ * Clamps can be enabled/disabled while DSI contoller is still turned on.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_clamp_state(struct dsi_ctrl *dsi_ctrl, bool enable)
+{
+	int rc = 0;
+
+	if (!dsi_ctrl) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+
+	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_CLAMP_TOGGLE, enable);
+	if (rc) {
+		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+		       dsi_ctrl->index, rc);
+		goto error;
+	}
+
+	rc = dsi_enable_io_clamp(dsi_ctrl, enable);
+	if (rc) {
+		pr_err("[DSI_%d] Failed to enable IO clamp\n", dsi_ctrl->index);
+		goto error;
+	}
+
+	pr_debug("[DSI_%d] Clamp state = %d\n", dsi_ctrl->index, enable);
+	dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_CLAMP_TOGGLE, enable);
+error:
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+	return rc;
+}
+
+/**
+ * dsi_ctrl_set_clock_source() - set clock source fpr dsi link clocks
+ * @dsi_ctrl:        DSI controller handle.
+ * @source_clks:     Source clocks for DSI link clocks.
+ *
+ * Clock source should be changed while link clocks are disabled.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_clock_source(struct dsi_ctrl *dsi_ctrl,
+			      struct dsi_clk_link_set *source_clks)
+{
+	int rc = 0;
+	u32 op_state = 0;
+
+	if (!dsi_ctrl || !source_clks) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+
+	if (source_clks->pixel_clk && source_clks->byte_clk)
+		op_state = 1;
+
+	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_SET_CLK_SOURCE,
+				 op_state);
+	if (rc) {
+		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+		       dsi_ctrl->index, rc);
+		goto error;
+	}
+
+	rc = dsi_clk_update_parent(source_clks, &dsi_ctrl->clk_info.rcg_clks);
+	if (rc) {
+		pr_err("[DSI_%d]Failed to update link clk parent, rc=%d\n",
+		       dsi_ctrl->index, rc);
+		(void)dsi_clk_update_parent(&dsi_ctrl->clk_info.pll_op_clks,
+					    &dsi_ctrl->clk_info.rcg_clks);
+		goto error;
+	}
+
+	dsi_ctrl->clk_info.pll_op_clks.byte_clk = source_clks->byte_clk;
+	dsi_ctrl->clk_info.pll_op_clks.pixel_clk = source_clks->pixel_clk;
+
+	pr_debug("[DSI_%d] Source clocks are updated\n", dsi_ctrl->index);
+
+	dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_SET_CLK_SOURCE, op_state);
+
+error:
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+	return rc;
+}
+
+/**
+ * dsi_ctrl_drv_register() - register platform driver for dsi controller
+ */
+void dsi_ctrl_drv_register(void)
+{
+	platform_driver_register(&dsi_ctrl_driver);
+}
+
+/**
+ * dsi_ctrl_drv_unregister() - unregister platform driver
+ */
+void dsi_ctrl_drv_unregister(void)
+{
+	platform_driver_unregister(&dsi_ctrl_driver);
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_ctrl.h linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_ctrl.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h	2019-01-22 16:16:23.487246262 +0100
@@ -0,0 +1,489 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_CTRL_H_
+#define _DSI_CTRL_H_
+
+#include <linux/debugfs.h>
+
+#include "dsi_defs.h"
+#include "dsi_ctrl_hw.h"
+#include "dsi_clk_pwr.h"
+#include "drm_mipi_dsi.h"
+
+/*
+ * DSI Command transfer modifiers
+ * @DSI_CTRL_CMD_READ:             The current transfer involves reading data.
+ * @DSI_CTRL_CMD_BROADCAST:        The current transfer needs to be done in
+ *				   broadcast mode to multiple slaves.
+ * @DSI_CTRL_CMD_BROADCAST_MASTER: This controller is the master and the slaves
+ *				   sync to this trigger.
+ * @DSI_CTRL_CMD_DEFER_TRIGGER:    Defer the command trigger to later.
+ * @DSI_CTRL_CMD_FIFO_STORE:       Use FIFO for command transfer in place of
+ *				   reading data from memory.
+ */
+#define DSI_CTRL_CMD_READ             0x1
+#define DSI_CTRL_CMD_BROADCAST        0x2
+#define DSI_CTRL_CMD_BROADCAST_MASTER 0x4
+#define DSI_CTRL_CMD_DEFER_TRIGGER    0x8
+#define DSI_CTRL_CMD_FIFO_STORE       0x10
+
+/**
+ * enum dsi_power_state - defines power states for dsi controller.
+ * @DSI_CTRL_POWER_OFF:         DSI controller is powered down.
+ * @DSI_CTRL_POWER_VREG_ON:     Digital and analog supplies for DSI controller
+ *				are powered on.
+ * @DSI_CTRL_POWER_CORE_CLK_ON: DSI core clocks for register access are enabled.
+ * @DSI_CTRL_POWER_LINK_CLK_ON: DSI link clocks for link transfer are enabled.
+ * @DSI_CTRL_POWER_MAX:         Maximum value.
+ */
+enum dsi_power_state {
+	DSI_CTRL_POWER_OFF = 0,
+	DSI_CTRL_POWER_VREG_ON,
+	DSI_CTRL_POWER_CORE_CLK_ON,
+	DSI_CTRL_POWER_LINK_CLK_ON,
+	DSI_CTRL_POWER_MAX,
+};
+
+/**
+ * enum dsi_engine_state - define engine status for dsi controller.
+ * @DSI_CTRL_ENGINE_OFF:  Engine is turned off.
+ * @DSI_CTRL_ENGINE_ON:   Engine is turned on.
+ * @DSI_CTRL_ENGINE_MAX:  Maximum value.
+ */
+enum dsi_engine_state {
+	DSI_CTRL_ENGINE_OFF = 0,
+	DSI_CTRL_ENGINE_ON,
+	DSI_CTRL_ENGINE_MAX,
+};
+
+/**
+ * struct dsi_ctrl_power_info - digital and analog power supplies for dsi host
+ * @digital:  Digital power supply required to turn on DSI controller hardware.
+ * @host_pwr: Analog power supplies required to turn on DSI controller hardware.
+ *            Even though DSI controller it self does not require an analog
+ *            power supply, supplies required for PLL can be defined here to
+ *            allow proper control over these supplies.
+ */
+struct dsi_ctrl_power_info {
+	struct dsi_regulator_info digital;
+	struct dsi_regulator_info host_pwr;
+};
+
+/**
+ * struct dsi_ctrl_clk_info - clock information for DSI controller
+ * @core_clks:          Core clocks needed to access DSI controller registers.
+ * @link_clks:          Link clocks required to transmit data over DSI link.
+ * @rcg_clks:           Root clock generation clocks generated in MMSS_CC. The
+ *			output of the PLL is set as parent for these root
+ *			clocks. These clocks are specific to controller
+ *			instance.
+ * @mux_clks:           Mux clocks used for Dynamic refresh feature.
+ * @ext_clks:           External byte/pixel clocks from the MMSS block. These
+ *			clocks are set as parent to rcg clocks.
+ * @pll_op_clks:        TODO:
+ * @shadow_clks:        TODO:
+ */
+struct dsi_ctrl_clk_info {
+	/* Clocks parsed from DT */
+	struct dsi_core_clk_info core_clks;
+	struct dsi_link_clk_info link_clks;
+	struct dsi_clk_link_set rcg_clks;
+
+	/* Clocks set by DSI Manager */
+	struct dsi_clk_link_set mux_clks;
+	struct dsi_clk_link_set ext_clks;
+	struct dsi_clk_link_set pll_op_clks;
+	struct dsi_clk_link_set shadow_clks;
+};
+
+/**
+ * struct dsi_ctrl_bus_scale_info - Bus scale info for msm-bus bandwidth voting
+ * @bus_scale_table:        Bus scale voting usecases.
+ * @bus_handle:             Handle used for voting bandwidth.
+ * @refcount:               reference count.
+ */
+struct dsi_ctrl_bus_scale_info {
+	struct msm_bus_scale_pdata *bus_scale_table;
+	u32 bus_handle;
+	u32 refcount;
+};
+
+/**
+ * struct dsi_ctrl_state_info - current driver state information
+ * @power_state:        Controller power state.
+ * @cmd_engine_state:   Status of DSI command engine.
+ * @vid_engine_state:   Status of DSI video engine.
+ * @controller_state:   Status of DSI Controller engine.
+ * @pwr_enabled:        Set to true, if voltage supplies are enabled.
+ * @core_clk_enabled:   Set to true, if core clocks are enabled.
+ * @lin_clk_enabled:    Set to true, if link clocks are enabled.
+ * @ulps_enabled:       Set to true, if lanes are in ULPS state.
+ * @clamp_enabled:      Set to true, if PHY output is clamped.
+ * @clk_source_set:     Set to true, if parent is set for DSI link clocks.
+ */
+struct dsi_ctrl_state_info {
+	enum dsi_power_state power_state;
+	enum dsi_engine_state cmd_engine_state;
+	enum dsi_engine_state vid_engine_state;
+	enum dsi_engine_state controller_state;
+	bool pwr_enabled;
+	bool core_clk_enabled;
+	bool link_clk_enabled;
+	bool ulps_enabled;
+	bool clamp_enabled;
+	bool clk_source_set;
+	bool host_initialized;
+	bool tpg_enabled;
+};
+
+/**
+ * struct dsi_ctrl_interrupts - define interrupt information
+ * @irq:                   IRQ id for the DSI controller.
+ * @intr_lock:             Spinlock to protect access to interrupt registers.
+ * @interrupt_status:      Status interrupts which need to be serviced.
+ * @error_status:          Error interurpts which need to be serviced.
+ * @interrupts_enabled:    Status interrupts which are enabled.
+ * @errors_enabled:        Error interrupts which are enabled.
+ * @cmd_dma_done:          Completion signal for DSI_CMD_MODE_DMA_DONE interrupt
+ * @vid_frame_done:        Completion signal for DSI_VIDEO_MODE_FRAME_DONE int.
+ * @cmd_frame_done:        Completion signal for DSI_CMD_FRAME_DONE interrupt.
+ * @interrupt_done_work:   Work item for servicing status interrupts.
+ * @error_status_work:     Work item for servicing error interrupts.
+ */
+struct dsi_ctrl_interrupts {
+	u32 irq;
+	spinlock_t intr_lock; /* protects access to interrupt registers */
+	u32 interrupt_status;
+	u64 error_status;
+
+	u32 interrupts_enabled;
+	u64 errors_enabled;
+
+	struct completion cmd_dma_done;
+	struct completion vid_frame_done;
+	struct completion cmd_frame_done;
+
+	struct work_struct interrupt_done_work;
+	struct work_struct error_status_work;
+};
+
+/**
+ * struct dsi_ctrl - DSI controller object
+ * @pdev:                Pointer to platform device.
+ * @index:               Instance id.
+ * @name:                Name of the controller instance.
+ * @refcount:            ref counter.
+ * @ctrl_lock:           Mutex for hardware and object access.
+ * @drm_dev:             Pointer to DRM device.
+ * @version:             DSI controller version.
+ * @hw:                  DSI controller hardware object.
+ * @current_state;       Current driver and hardware state.
+ * @int_info:            Interrupt information.
+ * @clk_info:            Clock information.
+ * @pwr_info:            Power information.
+ * @axi_bus_info:        AXI bus information.
+ * @host_config:         Current host configuration.
+ * @tx_cmd_buf:          Tx command buffer.
+ * @cmd_buffer_size:     Size of command buffer.
+ * @debugfs_root:        Root for debugfs entries.
+ */
+struct dsi_ctrl {
+	struct platform_device *pdev;
+	u32 index;
+	const char *name;
+	u32 refcount;
+	struct mutex ctrl_lock;
+	struct drm_device *drm_dev;
+
+	enum dsi_ctrl_version version;
+	struct dsi_ctrl_hw hw;
+
+	/* Current state */
+	struct dsi_ctrl_state_info current_state;
+
+	struct dsi_ctrl_interrupts int_info;
+	/* Clock and power states */
+	struct dsi_ctrl_clk_info clk_info;
+	struct dsi_ctrl_power_info pwr_info;
+	struct dsi_ctrl_bus_scale_info axi_bus_info;
+
+	struct dsi_host_config host_config;
+	/* Command tx and rx */
+	struct drm_gem_object *tx_cmd_buf;
+	u32 cmd_buffer_size;
+
+	/* Debug Information */
+	struct dentry *debugfs_root;
+
+};
+
+/**
+ * dsi_ctrl_get() - get a dsi_ctrl handle from an of_node
+ * @of_node:    of_node of the DSI controller.
+ *
+ * Gets the DSI controller handle for the corresponding of_node. The ref count
+ * is incremented to one and all subsequent gets will fail until the original
+ * clients calls a put.
+ *
+ * Return: DSI Controller handle.
+ */
+struct dsi_ctrl *dsi_ctrl_get(struct device_node *of_node);
+
+/**
+ * dsi_ctrl_put() - releases a dsi controller handle.
+ * @dsi_ctrl:       DSI controller handle.
+ *
+ * Releases the DSI controller. Driver will clean up all resources and puts back
+ * the DSI controller into reset state.
+ */
+void dsi_ctrl_put(struct dsi_ctrl *dsi_ctrl);
+
+/**
+ * dsi_ctrl_drv_init() - initialize dsi controller driver.
+ * @dsi_ctrl:      DSI controller handle.
+ * @parent:        Parent directory for debug fs.
+ *
+ * Initializes DSI controller driver. Driver should be initialized after
+ * dsi_ctrl_get() succeeds.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_drv_init(struct dsi_ctrl *dsi_ctrl, struct dentry *parent);
+
+/**
+ * dsi_ctrl_drv_deinit() - de-initializes dsi controller driver
+ * @dsi_ctrl:      DSI controller handle.
+ *
+ * Releases all resources acquired by dsi_ctrl_drv_init().
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_drv_deinit(struct dsi_ctrl *dsi_ctrl);
+
+/**
+ * dsi_ctrl_validate_timing() - validate a video timing configuration
+ * @dsi_ctrl:       DSI controller handle.
+ * @timing:         Pointer to timing data.
+ *
+ * Driver will validate if the timing configuration is supported on the
+ * controller hardware.
+ *
+ * Return: error code if timing is not supported.
+ */
+int dsi_ctrl_validate_timing(struct dsi_ctrl *dsi_ctrl,
+			     struct dsi_mode_info *timing);
+
+/**
+ * dsi_ctrl_update_host_config() - update dsi host configuration
+ * @dsi_ctrl:          DSI controller handle.
+ * @config:            DSI host configuration.
+ * @flags:             dsi_mode_flags modifying the behavior
+ *
+ * Updates driver with new Host configuration to use for host initialization.
+ * This function call will only update the software context. The stored
+ * configuration information will be used when the host is initialized.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_update_host_config(struct dsi_ctrl *dsi_ctrl,
+				struct dsi_host_config *config,
+				int flags);
+
+/**
+ * dsi_ctrl_async_timing_update() - update only controller timing
+ * @dsi_ctrl:          DSI controller handle.
+ * @timing:            New DSI timing info
+ *
+ * Updates host timing values to asynchronously transition to new timing
+ * For example, to update the porch values in a seamless/dynamic fps switch.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_async_timing_update(struct dsi_ctrl *dsi_ctrl,
+		struct dsi_mode_info *timing);
+
+/**
+ * dsi_ctrl_phy_sw_reset() - perform a PHY software reset
+ * @dsi_ctrl:         DSI controller handle.
+ *
+ * Performs a PHY software reset on the DSI controller. Reset should be done
+ * when the controller power state is DSI_CTRL_POWER_CORE_CLK_ON and the PHY is
+ * not enabled.
+ *
+ * This function will fail if driver is in any other state.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_phy_sw_reset(struct dsi_ctrl *dsi_ctrl);
+
+/**
+ * dsi_ctrl_host_init() - Initialize DSI host hardware.
+ * @dsi_ctrl:        DSI controller handle.
+ *
+ * Initializes DSI controller hardware with host configuration provided by
+ * dsi_ctrl_update_host_config(). Initialization can be performed only during
+ * DSI_CTRL_POWER_CORE_CLK_ON state and after the PHY SW reset has been
+ * performed.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl);
+
+/**
+ * dsi_ctrl_host_deinit() - De-Initialize DSI host hardware.
+ * @dsi_ctrl:        DSI controller handle.
+ *
+ * De-initializes DSI controller hardware. It can be performed only during
+ * DSI_CTRL_POWER_CORE_CLK_ON state after LINK clocks have been turned off.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_host_deinit(struct dsi_ctrl *dsi_ctrl);
+
+/**
+ * dsi_ctrl_set_tpg_state() - enable/disable test pattern on the controller
+ * @dsi_ctrl:          DSI controller handle.
+ * @on:                enable/disable test pattern.
+ *
+ * Test pattern can be enabled only after Video engine (for video mode panels)
+ * or command engine (for cmd mode panels) is enabled.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_tpg_state(struct dsi_ctrl *dsi_ctrl, bool on);
+
+/**
+ * dsi_ctrl_cmd_transfer() - Transfer commands on DSI link
+ * @dsi_ctrl:             DSI controller handle.
+ * @msg:                  Message to transfer on DSI link.
+ * @flags:                Modifiers for message transfer.
+ *
+ * Command transfer can be done only when command engine is enabled. The
+ * transfer API will until either the command transfer finishes or the timeout
+ * value is reached. If the trigger is deferred, it will return without
+ * triggering the transfer. Command parameters are programmed to hardware.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_cmd_transfer(struct dsi_ctrl *dsi_ctrl,
+			  const struct mipi_dsi_msg *msg,
+			  u32 flags);
+
+/**
+ * dsi_ctrl_cmd_tx_trigger() - Trigger a deferred command.
+ * @dsi_ctrl:              DSI controller handle.
+ * @flags:                 Modifiers.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags);
+
+/**
+ * dsi_ctrl_set_power_state() - set power state for dsi controller
+ * @dsi_ctrl:          DSI controller handle.
+ * @state:             Power state.
+ *
+ * Set power state for DSI controller. Power state can be changed only when
+ * Controller, Video and Command engines are turned off.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_power_state(struct dsi_ctrl *dsi_ctrl,
+			     enum dsi_power_state state);
+
+/**
+ * dsi_ctrl_set_cmd_engine_state() - set command engine state
+ * @dsi_ctrl:            DSI Controller handle.
+ * @state:               Engine state.
+ *
+ * Command engine state can be modified only when DSI controller power state is
+ * set to DSI_CTRL_POWER_LINK_CLK_ON.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_cmd_engine_state(struct dsi_ctrl *dsi_ctrl,
+				  enum dsi_engine_state state);
+
+/**
+ * dsi_ctrl_set_vid_engine_state() - set video engine state
+ * @dsi_ctrl:            DSI Controller handle.
+ * @state:               Engine state.
+ *
+ * Video engine state can be modified only when DSI controller power state is
+ * set to DSI_CTRL_POWER_LINK_CLK_ON.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_vid_engine_state(struct dsi_ctrl *dsi_ctrl,
+				  enum dsi_engine_state state);
+
+/**
+ * dsi_ctrl_set_host_engine_state() - set host engine state
+ * @dsi_ctrl:            DSI Controller handle.
+ * @state:               Engine state.
+ *
+ * Host engine state can be modified only when DSI controller power state is
+ * set to DSI_CTRL_POWER_LINK_CLK_ON and cmd, video engines are disabled.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_host_engine_state(struct dsi_ctrl *dsi_ctrl,
+				   enum dsi_engine_state state);
+
+/**
+ * dsi_ctrl_set_ulps() - set ULPS state for DSI lanes.
+ * @dsi_ctrl:         DSI controller handle.
+ * @enable:           enable/disable ULPS.
+ *
+ * ULPS can be enabled/disabled after DSI host engine is turned on.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_ulps(struct dsi_ctrl *dsi_ctrl, bool enable);
+
+/**
+ * dsi_ctrl_set_clamp_state() - set clamp state for DSI phy
+ * @dsi_ctrl:             DSI controller handle.
+ * @enable:               enable/disable clamping.
+ *
+ * Clamps can be enabled/disabled while DSI contoller is still turned on.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_clamp_state(struct dsi_ctrl *dsi_Ctrl, bool enable);
+
+/**
+ * dsi_ctrl_set_clock_source() - set clock source fpr dsi link clocks
+ * @dsi_ctrl:        DSI controller handle.
+ * @source_clks:     Source clocks for DSI link clocks.
+ *
+ * Clock source should be changed while link clocks are disabled.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_clock_source(struct dsi_ctrl *dsi_ctrl,
+			      struct dsi_clk_link_set *source_clks);
+
+/**
+ * dsi_ctrl_drv_register() - register platform driver for dsi controller
+ */
+void dsi_ctrl_drv_register(void);
+
+/**
+ * dsi_ctrl_drv_unregister() - unregister platform driver
+ */
+void dsi_ctrl_drv_unregister(void);
+
+#endif /* _DSI_CTRL_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_ctrl_hw_1_4.c linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_ctrl_hw_1_4.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c	2019-10-29 09:26:23.621202963 +0100
@@ -0,0 +1,1533 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "dsi-hw:" fmt
+#include <linux/delay.h>
+
+#include "dsi_ctrl_hw.h"
+#include "dsi_ctrl_reg_1_4.h"
+#include "dsi_hw.h"
+
+#define MMSS_MISC_CLAMP_REG_OFF           0x0014
+
+/* Unsupported formats default to RGB888 */
+static const u8 cmd_mode_format_map[DSI_PIXEL_FORMAT_MAX] = {
+	0x6, 0x7, 0x8, 0x8, 0x0, 0x3, 0x4 };
+static const u8 video_mode_format_map[DSI_PIXEL_FORMAT_MAX] = {
+	0x0, 0x1, 0x2, 0x3, 0x3, 0x3, 0x3 };
+
+
+/**
+ * dsi_setup_trigger_controls() - setup dsi trigger configurations
+ * @ctrl:             Pointer to the controller host hardware.
+ * @cfg:              DSI host configuration that is common to both video and
+ *                    command modes.
+ */
+static void dsi_setup_trigger_controls(struct dsi_ctrl_hw *ctrl,
+				       struct dsi_host_common_cfg *cfg)
+{
+	u32 reg = 0;
+	const u8 trigger_map[DSI_TRIGGER_MAX] = {
+		0x0, 0x2, 0x1, 0x4, 0x5, 0x6 };
+
+	reg |= (cfg->te_mode == DSI_TE_ON_EXT_PIN) ? BIT(31) : 0;
+	reg |= (trigger_map[cfg->dma_cmd_trigger] & 0x7);
+	reg |= (trigger_map[cfg->mdp_cmd_trigger] & 0x7) << 4;
+	DSI_W32(ctrl, DSI_TRIG_CTRL, reg);
+}
+
+/**
+ * dsi_ctrl_hw_14_host_setup() - setup dsi host configuration
+ * @ctrl:             Pointer to the controller host hardware.
+ * @cfg:              DSI host configuration that is common to both video and
+ *                    command modes.
+ */
+void dsi_ctrl_hw_14_host_setup(struct dsi_ctrl_hw *ctrl,
+			       struct dsi_host_common_cfg *cfg)
+{
+	u32 reg_value = 0;
+
+	dsi_setup_trigger_controls(ctrl, cfg);
+
+	/* Setup clocking timing controls */
+	reg_value = ((cfg->t_clk_post & 0x3F) << 8);
+	reg_value |= (cfg->t_clk_pre & 0x3F);
+	DSI_W32(ctrl, DSI_CLKOUT_TIMING_CTRL, reg_value);
+
+	/* EOT packet control */
+	reg_value = cfg->append_tx_eot ? 1 : 0;
+	reg_value |= (cfg->ignore_rx_eot ? (1 << 4) : 0);
+	DSI_W32(ctrl, DSI_EOT_PACKET_CTRL, reg_value);
+
+	/* Turn on dsi clocks */
+	DSI_W32(ctrl, DSI_CLK_CTRL, 0x23F);
+
+	/* Setup DSI control register */
+	reg_value = 0;
+	reg_value |= (cfg->en_crc_check ? BIT(24) : 0);
+	reg_value |= (cfg->en_ecc_check ? BIT(20) : 0);
+	reg_value |= BIT(8); /* Clock lane */
+	reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_3) ? BIT(7) : 0);
+	reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_2) ? BIT(6) : 0);
+	reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_1) ? BIT(5) : 0);
+	reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_0) ? BIT(4) : 0);
+
+	DSI_W32(ctrl, DSI_CTRL, reg_value);
+
+	/* Force clock lane in HS */
+	reg_value = DSI_R32(ctrl, DSI_LANE_CTRL);
+	if (cfg->force_clk_lane_hs)
+		reg_value |= BIT(28);
+	else
+		reg_value &= ~BIT(28);
+	DSI_W32(ctrl, DSI_LANE_CTRL, reg_value);
+
+	pr_debug("[DSI_%d]Host configuration complete\n", ctrl->index);
+}
+
+/**
+ * phy_sw_reset() - perform a soft reset on the PHY.
+ * @ctrl:        Pointer to the controller host hardware.
+ */
+void dsi_ctrl_hw_14_phy_sw_reset(struct dsi_ctrl_hw *ctrl)
+{
+	DSI_W32(ctrl, DSI_PHY_SW_RESET, 0x1);
+	udelay(1000);
+	DSI_W32(ctrl, DSI_PHY_SW_RESET, 0x0);
+	udelay(100);
+
+	pr_debug("[DSI_%d] phy sw reset done\n", ctrl->index);
+}
+
+/**
+ * soft_reset() - perform a soft reset on DSI controller
+ * @ctrl:          Pointer to the controller host hardware.
+ *
+ * The video, command and controller engines will be disable before the
+ * reset is triggered. These engines will not be enabled after the reset
+ * is complete. Caller must re-enable the engines.
+ *
+ * If the reset is done while MDP timing engine is turned on, the video
+ * enigne should be re-enabled only during the vertical blanking time.
+ */
+void dsi_ctrl_hw_14_soft_reset(struct dsi_ctrl_hw *ctrl)
+{
+	u32 reg = 0;
+	u32 reg_ctrl = 0;
+
+	/* Clear DSI_EN, VIDEO_MODE_EN, CMD_MODE_EN */
+	reg_ctrl = DSI_R32(ctrl, DSI_CTRL);
+	DSI_W32(ctrl, DSI_CTRL, reg_ctrl & ~0x7);
+
+	/* Force enable PCLK, BYTECLK, AHBM_HCLK */
+	reg = DSI_R32(ctrl, DSI_CLK_CTRL);
+	reg |= 0x23F;
+	DSI_W32(ctrl, DSI_CLK_CTRL, reg);
+
+	/* Trigger soft reset */
+	DSI_W32(ctrl, DSI_SOFT_RESET, 0x1);
+	udelay(1);
+	DSI_W32(ctrl, DSI_SOFT_RESET, 0x0);
+
+	/* Disable force clock on */
+	reg &= ~(BIT(20) | BIT(11));
+	DSI_W32(ctrl, DSI_CLK_CTRL, reg);
+
+	/* Re-enable DSI controller */
+	DSI_W32(ctrl, DSI_CTRL, reg_ctrl);
+	pr_debug("[DSI_%d] ctrl soft reset done\n", ctrl->index);
+}
+
+/**
+ * set_video_timing() - set up the timing for video frame
+ * @ctrl:          Pointer to controller host hardware.
+ * @mode:          Video mode information.
+ *
+ * Set up the video timing parameters for the DSI video mode operation.
+ */
+void dsi_ctrl_hw_14_set_video_timing(struct dsi_ctrl_hw *ctrl,
+				     struct dsi_mode_info *mode)
+{
+	u32 reg = 0;
+	u32 hs_start = 0;
+	u32 hs_end, active_h_start, active_h_end, h_total;
+	u32 vs_start = 0, vs_end = 0;
+	u32 vpos_start = 0, vpos_end, active_v_start, active_v_end, v_total;
+
+	hs_end = mode->h_sync_width;
+	active_h_start = mode->h_sync_width + mode->h_back_porch;
+	active_h_end = active_h_start + mode->h_active;
+	h_total = (mode->h_sync_width + mode->h_back_porch + mode->h_active +
+		   mode->h_front_porch) - 1;
+
+	vpos_end = mode->v_sync_width;
+	active_v_start = mode->v_sync_width + mode->v_back_porch;
+	active_v_end = active_v_start + mode->v_active;
+	v_total = (mode->v_sync_width + mode->v_back_porch + mode->v_active +
+		   mode->v_front_porch) - 1;
+
+	reg = ((active_h_end & 0xFFFF) << 16) | (active_h_start & 0xFFFF);
+	DSI_W32(ctrl, DSI_VIDEO_MODE_ACTIVE_H, reg);
+
+	reg = ((active_v_end & 0xFFFF) << 16) | (active_v_start & 0xFFFF);
+	DSI_W32(ctrl, DSI_VIDEO_MODE_ACTIVE_V, reg);
+
+	reg = ((v_total & 0xFFFF) << 16) | (h_total & 0xFFFF);
+	DSI_W32(ctrl, DSI_VIDEO_MODE_TOTAL, reg);
+
+	reg = ((hs_end & 0xFFFF) << 16) | (hs_start & 0xFFFF);
+	DSI_W32(ctrl, DSI_VIDEO_MODE_HSYNC, reg);
+
+	reg = ((vs_end & 0xFFFF) << 16) | (vs_start & 0xFFFF);
+	DSI_W32(ctrl, DSI_VIDEO_MODE_VSYNC, reg);
+
+	reg = ((vpos_end & 0xFFFF) << 16) | (vpos_start & 0xFFFF);
+	DSI_W32(ctrl, DSI_VIDEO_MODE_VSYNC_VPOS, reg);
+
+	/* TODO: HS TIMER value? */
+	DSI_W32(ctrl, DSI_HS_TIMER_CTRL, 0x3FD08);
+	DSI_W32(ctrl, DSI_MISR_VIDEO_CTRL, 0x10100);
+	DSI_W32(ctrl, DSI_DSI_TIMING_FLUSH, 0x1);
+	pr_debug("[DSI_%d] ctrl video parameters updated\n", ctrl->index);
+}
+
+/**
+ * setup_cmd_stream() - set up parameters for command pixel streams
+ * @ctrl:          Pointer to controller host hardware.
+ * @width_in_pixels:   Width of the stream in pixels.
+ * @h_stride:          Horizontal stride in bytes.
+ * @height_inLines:    Number of lines in the stream.
+ * @vc_id:             stream_id
+ *
+ * Setup parameters for command mode pixel stream size.
+ */
+void dsi_ctrl_hw_14_setup_cmd_stream(struct dsi_ctrl_hw *ctrl,
+				     u32 width_in_pixels,
+				     u32 h_stride,
+				     u32 height_in_lines,
+				     u32 vc_id)
+{
+	u32 reg = 0;
+
+	reg = (h_stride + 1) << 16;
+	reg |= (vc_id & 0x3) << 8;
+	reg |= 0x39; /* packet data type */
+	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM0_CTRL, reg);
+	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM1_CTRL, reg);
+
+	reg = (height_in_lines << 16) | width_in_pixels;
+	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM0_TOTAL, reg);
+	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM1_TOTAL, reg);
+}
+
+/**
+ * video_engine_setup() - Setup dsi host controller for video mode
+ * @ctrl:          Pointer to controller host hardware.
+ * @common_cfg:    Common configuration parameters.
+ * @cfg:           Video mode configuration.
+ *
+ * Set up DSI video engine with a specific configuration. Controller and
+ * video engine are not enabled as part of this function.
+ */
+void dsi_ctrl_hw_14_video_engine_setup(struct dsi_ctrl_hw *ctrl,
+				       struct dsi_host_common_cfg *common_cfg,
+				       struct dsi_video_engine_cfg *cfg)
+{
+	u32 reg = 0;
+
+	reg |= (cfg->last_line_interleave_en ? BIT(31) : 0);
+	reg |= (cfg->pulse_mode_hsa_he ? BIT(28) : 0);
+	reg |= (cfg->hfp_lp11_en ? BIT(24) : 0);
+	reg |= (cfg->hbp_lp11_en ? BIT(20) : 0);
+	reg |= (cfg->hsa_lp11_en ? BIT(16) : 0);
+	reg |= (cfg->eof_bllp_lp11_en ? BIT(15) : 0);
+	reg |= (cfg->bllp_lp11_en ? BIT(12) : 0);
+	reg |= (cfg->traffic_mode & 0x3) << 8;
+	reg |= (cfg->vc_id & 0x3);
+	reg |= (video_mode_format_map[common_cfg->dst_format] & 0x3) << 4;
+	DSI_W32(ctrl, DSI_VIDEO_MODE_CTRL, reg);
+
+	reg = (common_cfg->swap_mode & 0x7) << 12;
+	reg |= (common_cfg->bit_swap_red ? BIT(0) : 0);
+	reg |= (common_cfg->bit_swap_green ? BIT(4) : 0);
+	reg |= (common_cfg->bit_swap_blue ? BIT(8) : 0);
+	DSI_W32(ctrl, DSI_VIDEO_MODE_DATA_CTRL, reg);
+	/* Enable Timing double buffering */
+	DSI_W32(ctrl, DSI_DSI_TIMING_DB_MODE, 0x1);
+
+
+	pr_debug("[DSI_%d] Video engine setup done\n", ctrl->index);
+}
+
+/**
+ * cmd_engine_setup() - setup dsi host controller for command mode
+ * @ctrl:          Pointer to the controller host hardware.
+ * @common_cfg:    Common configuration parameters.
+ * @cfg:           Command mode configuration.
+ *
+ * Setup DSI CMD engine with a specific configuration. Controller and
+ * command engine are not enabled as part of this function.
+ */
+void dsi_ctrl_hw_14_cmd_engine_setup(struct dsi_ctrl_hw *ctrl,
+				     struct dsi_host_common_cfg *common_cfg,
+				     struct dsi_cmd_engine_cfg *cfg)
+{
+	u32 reg = 0;
+
+	reg = (cfg->max_cmd_packets_interleave & 0xF) << 20;
+	reg |= (common_cfg->bit_swap_red ? BIT(4) : 0);
+	reg |= (common_cfg->bit_swap_green ? BIT(8) : 0);
+	reg |= (common_cfg->bit_swap_blue ? BIT(12) : 0);
+	reg |= cmd_mode_format_map[common_cfg->dst_format];
+	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_CTRL, reg);
+
+	reg = DSI_R32(ctrl, DSI_COMMAND_MODE_MDP_CTRL2);
+	reg |= BIT(16);
+	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_CTRL2, reg);
+
+	reg = cfg->wr_mem_start & 0xFF;
+	reg |= (cfg->wr_mem_continue & 0xFF) << 8;
+	reg |= (cfg->insert_dcs_command ? BIT(16) : 0);
+	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL, reg);
+
+	pr_debug("[DSI_%d] Cmd engine setup done\n", ctrl->index);
+}
+
+/**
+ * video_engine_en() - enable DSI video engine
+ * @ctrl:          Pointer to controller host hardware.
+ * @on:            Enable/disabel video engine.
+ */
+void dsi_ctrl_hw_14_video_engine_en(struct dsi_ctrl_hw *ctrl, bool on)
+{
+	u32 reg = 0;
+
+	/* Set/Clear VIDEO_MODE_EN bit */
+	reg = DSI_R32(ctrl, DSI_CTRL);
+	if (on)
+		reg |= BIT(1);
+	else
+		reg &= ~BIT(1);
+
+	DSI_W32(ctrl, DSI_CTRL, reg);
+
+	pr_debug("[DSI_%d] Video engine = %d\n", ctrl->index, on);
+}
+
+/**
+ * ctrl_en() - enable DSI controller engine
+ * @ctrl:          Pointer to the controller host hardware.
+ * @on:            turn on/off the DSI controller engine.
+ */
+void dsi_ctrl_hw_14_ctrl_en(struct dsi_ctrl_hw *ctrl, bool on)
+{
+	u32 reg = 0;
+
+	/* Set/Clear DSI_EN bit */
+	reg = DSI_R32(ctrl, DSI_CTRL);
+	if (on)
+		reg |= BIT(0);
+	else
+		reg &= ~BIT(0);
+
+	DSI_W32(ctrl, DSI_CTRL, reg);
+
+	pr_debug("[DSI_%d] Controller engine = %d\n", ctrl->index, on);
+}
+
+/**
+ * cmd_engine_en() - enable DSI controller command engine
+ * @ctrl:          Pointer to the controller host hardware.
+ * @on:            Turn on/off the DSI command engine.
+ */
+void dsi_ctrl_hw_14_cmd_engine_en(struct dsi_ctrl_hw *ctrl, bool on)
+{
+	u32 reg = 0;
+
+	/* Set/Clear CMD_MODE_EN bit */
+	reg = DSI_R32(ctrl, DSI_CTRL);
+	if (on)
+		reg |= BIT(2);
+	else
+		reg &= ~BIT(2);
+
+	DSI_W32(ctrl, DSI_CTRL, reg);
+
+	pr_debug("[DSI_%d] command engine = %d\n", ctrl->index, on);
+}
+
+/**
+ * setup_lane_map() - setup mapping between logical and physical lanes
+ * @ctrl:          Pointer to the controller host hardware.
+ * @lane_map:      Structure defining the mapping between DSI logical
+ *                 lanes and physical lanes.
+ */
+void dsi_ctrl_hw_14_setup_lane_map(struct dsi_ctrl_hw *ctrl,
+			       struct dsi_lane_mapping *lane_map)
+{
+	u32 reg_value = 0;
+	u32 lane_number = ((lane_map->physical_lane0 * 1000)+
+			   (lane_map->physical_lane1 * 100) +
+			   (lane_map->physical_lane2 * 10) +
+			   (lane_map->physical_lane3));
+
+	if (lane_number == 123)
+		reg_value = 0;
+	else if (lane_number == 3012)
+		reg_value = 1;
+	else if (lane_number == 2301)
+		reg_value = 2;
+	else if (lane_number == 1230)
+		reg_value = 3;
+	else if (lane_number == 321)
+		reg_value = 4;
+	else if (lane_number == 1032)
+		reg_value = 5;
+	else if (lane_number == 2103)
+		reg_value = 6;
+	else if (lane_number == 3210)
+		reg_value = 7;
+
+	DSI_W32(ctrl, DSI_LANE_SWAP_CTRL, reg_value);
+
+	pr_debug("[DSI_%d] Lane swap setup complete\n", ctrl->index);
+}
+
+/**
+ * kickoff_command() - transmits commands stored in memory
+ * @ctrl:          Pointer to the controller host hardware.
+ * @cmd:           Command information.
+ * @flags:         Modifiers for command transmission.
+ *
+ * The controller hardware is programmed with address and size of the
+ * command buffer. The transmission is kicked off if
+ * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
+ * set, caller should make a separate call to trigger_command_dma() to
+ * transmit the command.
+ */
+void dsi_ctrl_hw_14_kickoff_command(struct dsi_ctrl_hw *ctrl,
+				    struct dsi_ctrl_cmd_dma_info *cmd,
+				    u32 flags)
+{
+	u32 reg = 0;
+
+	/*Set BROADCAST_EN and EMBEDDED_MODE */
+	reg = DSI_R32(ctrl, DSI_COMMAND_MODE_DMA_CTRL);
+	if (cmd->en_broadcast)
+		reg |= BIT(31);
+	else
+		reg &= ~BIT(31);
+
+	if (cmd->is_master)
+		reg |= BIT(30);
+	else
+		reg &= ~BIT(30);
+
+	if (cmd->use_lpm)
+		reg |= BIT(26);
+	else
+		reg &= ~BIT(26);
+
+	reg |= BIT(28);
+	DSI_W32(ctrl, DSI_COMMAND_MODE_DMA_CTRL, reg);
+
+	DSI_W32(ctrl, DSI_DMA_CMD_OFFSET, cmd->offset);
+	DSI_W32(ctrl, DSI_DMA_CMD_LENGTH, (cmd->length & 0xFFFFFF));
+
+	/* wait for writes to complete before kick off */
+	wmb();
+
+	if (!(flags & DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER))
+		DSI_W32(ctrl, DSI_CMD_MODE_DMA_SW_TRIGGER, 0x1);
+}
+
+/**
+ * kickoff_fifo_command() - transmits a command using FIFO in dsi
+ *                          hardware.
+ * @ctrl:          Pointer to the controller host hardware.
+ * @cmd:           Command information.
+ * @flags:         Modifiers for command transmission.
+ *
+ * The controller hardware FIFO is programmed with command header and
+ * payload. The transmission is kicked off if
+ * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
+ * set, caller should make a separate call to trigger_command_dma() to
+ * transmit the command.
+ */
+void dsi_ctrl_hw_14_kickoff_fifo_command(struct dsi_ctrl_hw *ctrl,
+					 struct dsi_ctrl_cmd_dma_fifo_info *cmd,
+					 u32 flags)
+{
+	u32 reg = 0, i = 0;
+	u32 *ptr = cmd->command;
+	/*
+	 * Set CMD_DMA_TPG_EN, TPG_DMA_FIFO_MODE and
+	 * CMD_DMA_PATTERN_SEL = custom pattern stored in TPG DMA FIFO
+	 */
+	reg = (BIT(1) | BIT(2) | (0x3 << 16));
+	DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg);
+
+	/*
+	 * Program the FIFO with command buffer. Hardware requires an extra
+	 * DWORD (set to zero) if the length of command buffer is odd DWORDS.
+	 */
+	for (i = 0; i < cmd->size; i += 4) {
+		DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL, *ptr);
+		ptr++;
+	}
+
+	if ((cmd->size / 4) & 0x1)
+		DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL, 0);
+
+	/*Set BROADCAST_EN and EMBEDDED_MODE */
+	reg = DSI_R32(ctrl, DSI_COMMAND_MODE_DMA_CTRL);
+	if (cmd->en_broadcast)
+		reg |= BIT(31);
+	else
+		reg &= ~BIT(31);
+
+	if (cmd->is_master)
+		reg |= BIT(30);
+	else
+		reg &= ~BIT(30);
+
+	if (cmd->use_lpm)
+		reg |= BIT(26);
+	else
+		reg &= ~BIT(26);
+
+	reg |= BIT(28);
+
+	DSI_W32(ctrl, DSI_COMMAND_MODE_DMA_CTRL, reg);
+
+	DSI_W32(ctrl, DSI_DMA_CMD_LENGTH, (cmd->size & 0xFFFFFFFF));
+	/* Finish writes before command trigger */
+	wmb();
+
+	if (!(flags & DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER))
+		DSI_W32(ctrl, DSI_CMD_MODE_DMA_SW_TRIGGER, 0x1);
+
+	pr_debug("[DSI_%d]size=%d, trigger = %d\n",
+		 ctrl->index, cmd->size,
+		 (flags & DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER) ? false : true);
+}
+
+void dsi_ctrl_hw_14_reset_cmd_fifo(struct dsi_ctrl_hw *ctrl)
+{
+	/* disable cmd dma tpg */
+	DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, 0x0);
+
+	DSI_W32(ctrl, DSI_TPG_DMA_FIFO_RESET, 0x1);
+	udelay(1);
+	DSI_W32(ctrl, DSI_TPG_DMA_FIFO_RESET, 0x0);
+}
+
+/**
+ * trigger_command_dma() - trigger transmission of command buffer.
+ * @ctrl:          Pointer to the controller host hardware.
+ *
+ * This trigger can be only used if there was a prior call to
+ * kickoff_command() of kickoff_fifo_command() with
+ * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag.
+ */
+void dsi_ctrl_hw_14_trigger_command_dma(struct dsi_ctrl_hw *ctrl)
+{
+	DSI_W32(ctrl, DSI_CMD_MODE_DMA_SW_TRIGGER, 0x1);
+	pr_debug("[DSI_%d] CMD DMA triggered\n", ctrl->index);
+}
+
+/**
+ * get_cmd_read_data() - get data read from the peripheral
+ * @ctrl:           Pointer to the controller host hardware.
+ * @rd_buf:         Buffer where data will be read into.
+ * @total_read_len: Number of bytes to read.
+ *
+ * return: number of bytes read.
+ */
+u32 dsi_ctrl_hw_14_get_cmd_read_data(struct dsi_ctrl_hw *ctrl,
+				     u8 *rd_buf,
+				     u32 read_offset,
+				     u32 total_read_len)
+{
+	u32 *lp, *temp, data;
+	int i, j = 0, cnt;
+	u32 read_cnt;
+	u32 rx_byte = 0;
+	u32 repeated_bytes = 0;
+	u8 reg[16];
+	u32 pkt_size = 0;
+	int buf_offset = read_offset;
+
+	lp = (u32 *)rd_buf;
+	temp = (u32 *)reg;
+	cnt = (rx_byte + 3) >> 2;
+
+	if (cnt > 4)
+		cnt = 4;
+
+	if (rx_byte == 4)
+		read_cnt = 4;
+	else
+		read_cnt = pkt_size + 6;
+
+	if (read_cnt > 16) {
+		int bytes_shifted;
+
+		bytes_shifted = read_cnt - 16;
+		repeated_bytes = buf_offset - bytes_shifted;
+	}
+
+	for (i = cnt - 1; i >= 0; i--) {
+		data = DSI_R32(ctrl, DSI_RDBK_DATA0 + i*4);
+		*temp++ = ntohl(data);
+	}
+
+	for (i = repeated_bytes; i < 16; i++)
+		rd_buf[j++] = reg[i];
+
+	pr_debug("[DSI_%d] Read %d bytes\n", ctrl->index, j);
+	return j;
+}
+/**
+ * ulps_request() - request ulps entry for specified lanes
+ * @ctrl:          Pointer to the controller host hardware.
+ * @lanes:         ORed list of lanes (enum dsi_data_lanes) which need
+ *                 to enter ULPS.
+ *
+ * Caller should check if lanes are in ULPS mode by calling
+ * get_lanes_in_ulps() operation.
+ */
+void dsi_ctrl_hw_14_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes)
+{
+	u32 reg = 0;
+
+	reg = DSI_R32(ctrl, DSI_LANE_CTRL);
+	if (lanes & DSI_CLOCK_LANE)
+		reg |= BIT(4);
+	if (lanes & DSI_DATA_LANE_0)
+		reg |= BIT(0);
+	if (lanes & DSI_DATA_LANE_1)
+		reg |= BIT(1);
+	if (lanes & DSI_DATA_LANE_2)
+		reg |= BIT(2);
+	if (lanes & DSI_DATA_LANE_3)
+		reg |= BIT(3);
+
+	DSI_W32(ctrl, DSI_LANE_CTRL, reg);
+
+	pr_debug("[DSI_%d] ULPS requested for lanes 0x%x\n", ctrl->index,
+		 lanes);
+}
+
+/**
+ * ulps_exit() - exit ULPS on specified lanes
+ * @ctrl:          Pointer to the controller host hardware.
+ * @lanes:         ORed list of lanes (enum dsi_data_lanes) which need
+ *                 to exit ULPS.
+ *
+ * Caller should check if lanes are in active mode by calling
+ * get_lanes_in_ulps() operation.
+ */
+void dsi_ctrl_hw_14_ulps_exit(struct dsi_ctrl_hw *ctrl, u32 lanes)
+{
+	u32 reg = 0;
+
+	reg = DSI_R32(ctrl, DSI_LANE_CTRL);
+	if (lanes & DSI_CLOCK_LANE)
+		reg |= BIT(12);
+	if (lanes & DSI_DATA_LANE_0)
+		reg |= BIT(8);
+	if (lanes & DSI_DATA_LANE_1)
+		reg |= BIT(9);
+	if (lanes & DSI_DATA_LANE_2)
+		reg |= BIT(10);
+	if (lanes & DSI_DATA_LANE_3)
+		reg |= BIT(11);
+
+	DSI_W32(ctrl, DSI_LANE_CTRL, reg);
+
+	pr_debug("[DSI_%d] ULPS exit request for lanes=0x%x\n",
+		 ctrl->index, lanes);
+}
+
+/**
+ * clear_ulps_request() - clear ulps request once all lanes are active
+ * @ctrl:          Pointer to controller host hardware.
+ * @lanes:         ORed list of lanes (enum dsi_data_lanes).
+ *
+ * ULPS request should be cleared after the lanes have exited ULPS.
+ */
+void dsi_ctrl_hw_14_clear_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes)
+{
+	u32 reg = 0;
+
+	reg = DSI_R32(ctrl, DSI_LANE_CTRL);
+	if (lanes & DSI_CLOCK_LANE)
+		reg &= ~BIT(4); /* clock lane */
+	if (lanes & DSI_DATA_LANE_0)
+		reg &= ~BIT(0);
+	if (lanes & DSI_DATA_LANE_1)
+		reg &= ~BIT(1);
+	if (lanes & DSI_DATA_LANE_2)
+		reg &= ~BIT(2);
+	if (lanes & DSI_DATA_LANE_3)
+		reg &= ~BIT(3);
+
+	DSI_W32(ctrl, DSI_LANE_CTRL, reg);
+	/*
+	 * HPG recommends separate writes for clearing ULPS_REQUEST and
+	 * ULPS_EXIT.
+	 */
+	reg = DSI_R32(ctrl, DSI_LANE_CTRL);
+	if (lanes & DSI_CLOCK_LANE)
+		reg &= ~BIT(12);
+	if (lanes & DSI_DATA_LANE_0)
+		reg &= ~BIT(8);
+	if (lanes & DSI_DATA_LANE_1)
+		reg &= ~BIT(9);
+	if (lanes & DSI_DATA_LANE_2)
+		reg &= ~BIT(10);
+	if (lanes & DSI_DATA_LANE_3)
+		reg &= ~BIT(11);
+	DSI_W32(ctrl, DSI_LANE_CTRL, reg);
+
+	pr_debug("[DSI_%d] ULPS request cleared\n", ctrl->index);
+}
+
+/**
+ * get_lanes_in_ulps() - returns the list of lanes in ULPS mode
+ * @ctrl:          Pointer to the controller host hardware.
+ *
+ * Returns an ORed list of lanes (enum dsi_data_lanes) that are in ULPS
+ * state. If 0 is returned, all the lanes are active.
+ *
+ * Return: List of lanes in ULPS state.
+ */
+u32 dsi_ctrl_hw_14_get_lanes_in_ulps(struct dsi_ctrl_hw *ctrl)
+{
+	u32 reg = 0;
+	u32 lanes = 0;
+
+	reg = DSI_R32(ctrl, DSI_LANE_STATUS);
+	if (!(reg & BIT(8)))
+		lanes |= DSI_DATA_LANE_0;
+	if (!(reg & BIT(9)))
+		lanes |= DSI_DATA_LANE_1;
+	if (!(reg & BIT(10)))
+		lanes |= DSI_DATA_LANE_2;
+	if (!(reg & BIT(11)))
+		lanes |= DSI_DATA_LANE_3;
+	if (!(reg & BIT(12)))
+		lanes |= DSI_CLOCK_LANE;
+
+	pr_debug("[DSI_%d] lanes in ulps = 0x%x\n", ctrl->index, lanes);
+	return lanes;
+}
+
+/**
+ * clamp_enable() - enable DSI clamps to keep PHY driving a stable link
+ * @ctrl:          Pointer to the controller host hardware.
+ * @lanes:         ORed list of lanes which need to be clamped.
+ * @enable_ulps:   TODO:??
+ */
+void dsi_ctrl_hw_14_clamp_enable(struct dsi_ctrl_hw *ctrl,
+				 u32 lanes,
+				 bool enable_ulps)
+{
+	u32 clamp_reg = 0;
+	u32 bit_shift = 0;
+	u32 reg = 0;
+
+	if (ctrl->index == 1)
+		bit_shift = 16;
+
+	if (lanes & DSI_CLOCK_LANE) {
+		clamp_reg |= BIT(9);
+		if (enable_ulps)
+			clamp_reg |= BIT(8);
+	}
+
+	if (lanes & DSI_DATA_LANE_0) {
+		clamp_reg |= BIT(7);
+		if (enable_ulps)
+			clamp_reg |= BIT(6);
+	}
+
+	if (lanes & DSI_DATA_LANE_1) {
+		clamp_reg |= BIT(5);
+		if (enable_ulps)
+			clamp_reg |= BIT(4);
+	}
+
+	if (lanes & DSI_DATA_LANE_2) {
+		clamp_reg |= BIT(3);
+		if (enable_ulps)
+			clamp_reg |= BIT(2);
+	}
+
+	if (lanes & DSI_DATA_LANE_3) {
+		clamp_reg |= BIT(1);
+		if (enable_ulps)
+			clamp_reg |= BIT(0);
+	}
+
+	clamp_reg |= BIT(15); /* Enable clamp */
+
+	reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
+	reg |= (clamp_reg << bit_shift);
+	DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
+
+
+	reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
+	reg |= BIT(30);
+	DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
+
+	pr_debug("[DSI_%d] Clamps enabled for lanes=0x%x\n", ctrl->index,
+		 lanes);
+}
+
+/**
+ * clamp_disable() - disable DSI clamps
+ * @ctrl:          Pointer to the controller host hardware.
+ * @lanes:         ORed list of lanes which need to have clamps released.
+ * @disable_ulps:   TODO:??
+ */
+void dsi_ctrl_hw_14_clamp_disable(struct dsi_ctrl_hw *ctrl,
+				  u32 lanes,
+				  bool disable_ulps)
+{
+	u32 clamp_reg = 0;
+	u32 bit_shift = 0;
+	u32 reg = 0;
+
+	if (ctrl->index == 1)
+		bit_shift = 16;
+
+	if (lanes & DSI_CLOCK_LANE) {
+		clamp_reg |= BIT(9);
+		if (disable_ulps)
+			clamp_reg |= BIT(8);
+	}
+
+	if (lanes & DSI_DATA_LANE_0) {
+		clamp_reg |= BIT(7);
+		if (disable_ulps)
+			clamp_reg |= BIT(6);
+	}
+
+	if (lanes & DSI_DATA_LANE_1) {
+		clamp_reg |= BIT(5);
+		if (disable_ulps)
+			clamp_reg |= BIT(4);
+	}
+
+	if (lanes & DSI_DATA_LANE_2) {
+		clamp_reg |= BIT(3);
+		if (disable_ulps)
+			clamp_reg |= BIT(2);
+	}
+
+	if (lanes & DSI_DATA_LANE_3) {
+		clamp_reg |= BIT(1);
+		if (disable_ulps)
+			clamp_reg |= BIT(0);
+	}
+
+	clamp_reg |= BIT(15); /* Enable clamp */
+	clamp_reg <<= bit_shift;
+
+	/* Disable PHY reset skip */
+	reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
+	reg &= ~BIT(30);
+	DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
+
+	reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
+	reg &= ~(clamp_reg);
+	DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
+
+	pr_debug("[DSI_%d] Disable clamps for lanes=%d\n", ctrl->index, lanes);
+}
+
+/**
+ * get_interrupt_status() - returns the interrupt status
+ * @ctrl:          Pointer to the controller host hardware.
+ *
+ * Returns the ORed list of interrupts(enum dsi_status_int_type) that
+ * are active. This list does not include any error interrupts. Caller
+ * should call get_error_status for error interrupts.
+ *
+ * Return: List of active interrupts.
+ */
+u32 dsi_ctrl_hw_14_get_interrupt_status(struct dsi_ctrl_hw *ctrl)
+{
+	u32 reg = 0;
+	u32 ints = 0;
+
+	reg = DSI_R32(ctrl, DSI_INT_CTRL);
+
+	if (reg & BIT(0))
+		ints |= DSI_CMD_MODE_DMA_DONE;
+	if (reg & BIT(8))
+		ints |= DSI_CMD_FRAME_DONE;
+	if (reg & BIT(10))
+		ints |= DSI_CMD_STREAM0_FRAME_DONE;
+	if (reg & BIT(12))
+		ints |= DSI_CMD_STREAM1_FRAME_DONE;
+	if (reg & BIT(14))
+		ints |= DSI_CMD_STREAM2_FRAME_DONE;
+	if (reg & BIT(16))
+		ints |= DSI_VIDEO_MODE_FRAME_DONE;
+	if (reg & BIT(20))
+		ints |= DSI_BTA_DONE;
+	if (reg & BIT(28))
+		ints |= DSI_DYN_REFRESH_DONE;
+	if (reg & BIT(30))
+		ints |= DSI_DESKEW_DONE;
+
+	pr_debug("[DSI_%d] Interrupt status = 0x%x, INT_CTRL=0x%x\n",
+		 ctrl->index, ints, reg);
+	return ints;
+}
+
+/**
+ * clear_interrupt_status() - clears the specified interrupts
+ * @ctrl:          Pointer to the controller host hardware.
+ * @ints:          List of interrupts to be cleared.
+ */
+void dsi_ctrl_hw_14_clear_interrupt_status(struct dsi_ctrl_hw *ctrl, u32 ints)
+{
+	u32 reg = 0;
+
+	if (ints & DSI_CMD_MODE_DMA_DONE)
+		reg |= BIT(0);
+	if (ints & DSI_CMD_FRAME_DONE)
+		reg |= BIT(8);
+	if (ints & DSI_CMD_STREAM0_FRAME_DONE)
+		reg |= BIT(10);
+	if (ints & DSI_CMD_STREAM1_FRAME_DONE)
+		reg |= BIT(12);
+	if (ints & DSI_CMD_STREAM2_FRAME_DONE)
+		reg |= BIT(14);
+	if (ints & DSI_VIDEO_MODE_FRAME_DONE)
+		reg |= BIT(16);
+	if (ints & DSI_BTA_DONE)
+		reg |= BIT(20);
+	if (ints & DSI_DYN_REFRESH_DONE)
+		reg |= BIT(28);
+	if (ints & DSI_DESKEW_DONE)
+		reg |= BIT(30);
+
+	DSI_W32(ctrl, DSI_INT_CTRL, reg);
+
+	pr_debug("[DSI_%d] Clear interrupts, ints = 0x%x, INT_CTRL=0x%x\n",
+		 ctrl->index, ints, reg);
+}
+
+/**
+ * enable_status_interrupts() - enable the specified interrupts
+ * @ctrl:          Pointer to the controller host hardware.
+ * @ints:          List of interrupts to be enabled.
+ *
+ * Enables the specified interrupts. This list will override the
+ * previous interrupts enabled through this function. Caller has to
+ * maintain the state of the interrupts enabled. To disable all
+ * interrupts, set ints to 0.
+ */
+void dsi_ctrl_hw_14_enable_status_interrupts(struct dsi_ctrl_hw *ctrl, u32 ints)
+{
+	u32 reg = 0;
+
+	/* Do not change value of DSI_ERROR_MASK bit */
+	reg |= (DSI_R32(ctrl, DSI_INT_CTRL) & BIT(25));
+	if (ints & DSI_CMD_MODE_DMA_DONE)
+		reg |= BIT(1);
+	if (ints & DSI_CMD_FRAME_DONE)
+		reg |= BIT(9);
+	if (ints & DSI_CMD_STREAM0_FRAME_DONE)
+		reg |= BIT(11);
+	if (ints & DSI_CMD_STREAM1_FRAME_DONE)
+		reg |= BIT(13);
+	if (ints & DSI_CMD_STREAM2_FRAME_DONE)
+		reg |= BIT(15);
+	if (ints & DSI_VIDEO_MODE_FRAME_DONE)
+		reg |= BIT(17);
+	if (ints & DSI_BTA_DONE)
+		reg |= BIT(21);
+	if (ints & DSI_DYN_REFRESH_DONE)
+		reg |= BIT(29);
+	if (ints & DSI_DESKEW_DONE)
+		reg |= BIT(31);
+
+	DSI_W32(ctrl, DSI_INT_CTRL, reg);
+
+	pr_debug("[DSI_%d] Enable interrupts 0x%x, INT_CTRL=0x%x\n",
+		 ctrl->index, ints, reg);
+}
+
+/**
+ * get_error_status() - returns the error status
+ * @ctrl:          Pointer to the controller host hardware.
+ *
+ * Returns the ORed list of errors(enum dsi_error_int_type) that are
+ * active. This list does not include any status interrupts. Caller
+ * should call get_interrupt_status for status interrupts.
+ *
+ * Return: List of active error interrupts.
+ */
+u64 dsi_ctrl_hw_14_get_error_status(struct dsi_ctrl_hw *ctrl)
+{
+	u32 dln0_phy_err;
+	u32 fifo_status;
+	u32 ack_error;
+	u32 timeout_errors;
+	u32 clk_error;
+	u32 dsi_status;
+	u64 errors = 0;
+
+	dln0_phy_err = DSI_R32(ctrl, DSI_DLN0_PHY_ERR);
+	if (dln0_phy_err & BIT(0))
+		errors |= DSI_DLN0_ESC_ENTRY_ERR;
+	if (dln0_phy_err & BIT(4))
+		errors |= DSI_DLN0_ESC_SYNC_ERR;
+	if (dln0_phy_err & BIT(8))
+		errors |= DSI_DLN0_LP_CONTROL_ERR;
+	if (dln0_phy_err & BIT(12))
+		errors |= DSI_DLN0_LP0_CONTENTION;
+	if (dln0_phy_err & BIT(16))
+		errors |= DSI_DLN0_LP1_CONTENTION;
+
+	fifo_status = DSI_R32(ctrl, DSI_FIFO_STATUS);
+	if (fifo_status & BIT(7))
+		errors |= DSI_CMD_MDP_FIFO_UNDERFLOW;
+	if (fifo_status & BIT(10))
+		errors |= DSI_CMD_DMA_FIFO_UNDERFLOW;
+	if (fifo_status & BIT(18))
+		errors |= DSI_DLN0_HS_FIFO_OVERFLOW;
+	if (fifo_status & BIT(19))
+		errors |= DSI_DLN0_HS_FIFO_UNDERFLOW;
+	if (fifo_status & BIT(22))
+		errors |= DSI_DLN1_HS_FIFO_OVERFLOW;
+	if (fifo_status & BIT(23))
+		errors |= DSI_DLN1_HS_FIFO_UNDERFLOW;
+	if (fifo_status & BIT(26))
+		errors |= DSI_DLN2_HS_FIFO_OVERFLOW;
+	if (fifo_status & BIT(27))
+		errors |= DSI_DLN2_HS_FIFO_UNDERFLOW;
+	if (fifo_status & BIT(30))
+		errors |= DSI_DLN3_HS_FIFO_OVERFLOW;
+	if (fifo_status & BIT(31))
+		errors |= DSI_DLN3_HS_FIFO_UNDERFLOW;
+
+	ack_error = DSI_R32(ctrl, DSI_ACK_ERR_STATUS);
+	if (ack_error & BIT(16))
+		errors |= DSI_RDBK_SINGLE_ECC_ERR;
+	if (ack_error & BIT(17))
+		errors |= DSI_RDBK_MULTI_ECC_ERR;
+	if (ack_error & BIT(20))
+		errors |= DSI_RDBK_CRC_ERR;
+	if (ack_error & BIT(23))
+		errors |= DSI_RDBK_INCOMPLETE_PKT;
+	if (ack_error & BIT(24))
+		errors |= DSI_PERIPH_ERROR_PKT;
+
+	timeout_errors = DSI_R32(ctrl, DSI_TIMEOUT_STATUS);
+	if (timeout_errors & BIT(0))
+		errors |= DSI_HS_TX_TIMEOUT;
+	if (timeout_errors & BIT(4))
+		errors |= DSI_LP_RX_TIMEOUT;
+	if (timeout_errors & BIT(8))
+		errors |= DSI_BTA_TIMEOUT;
+
+	clk_error = DSI_R32(ctrl, DSI_CLK_STATUS);
+	if (clk_error & BIT(16))
+		errors |= DSI_PLL_UNLOCK;
+
+	dsi_status = DSI_R32(ctrl, DSI_STATUS);
+	if (dsi_status & BIT(31))
+		errors |= DSI_INTERLEAVE_OP_CONTENTION;
+
+	pr_debug("[DSI_%d] Error status = 0x%llx, phy=0x%x, fifo=0x%x",
+		 ctrl->index, errors, dln0_phy_err, fifo_status);
+	pr_debug("[DSI_%d] ack=0x%x, timeout=0x%x, clk=0x%x, dsi=0x%x\n",
+		 ctrl->index, ack_error, timeout_errors, clk_error, dsi_status);
+	return errors;
+}
+
+/**
+ * clear_error_status() - clears the specified errors
+ * @ctrl:          Pointer to the controller host hardware.
+ * @errors:          List of errors to be cleared.
+ */
+void dsi_ctrl_hw_14_clear_error_status(struct dsi_ctrl_hw *ctrl, u64 errors)
+{
+	u32 dln0_phy_err = 0;
+	u32 fifo_status = 0;
+	u32 ack_error = 0;
+	u32 timeout_error = 0;
+	u32 clk_error = 0;
+	u32 dsi_status = 0;
+	u32 int_ctrl = 0;
+
+	if (errors & DSI_RDBK_SINGLE_ECC_ERR)
+		ack_error |= BIT(16);
+	if (errors & DSI_RDBK_MULTI_ECC_ERR)
+		ack_error |= BIT(17);
+	if (errors & DSI_RDBK_CRC_ERR)
+		ack_error |= BIT(20);
+	if (errors & DSI_RDBK_INCOMPLETE_PKT)
+		ack_error |= BIT(23);
+	if (errors & DSI_PERIPH_ERROR_PKT)
+		ack_error |= BIT(24);
+
+	if (errors & DSI_LP_RX_TIMEOUT)
+		timeout_error |= BIT(4);
+	if (errors & DSI_HS_TX_TIMEOUT)
+		timeout_error |= BIT(0);
+	if (errors & DSI_BTA_TIMEOUT)
+		timeout_error |= BIT(8);
+
+	if (errors & DSI_PLL_UNLOCK)
+		clk_error |= BIT(16);
+
+	if (errors & DSI_DLN0_LP0_CONTENTION)
+		dln0_phy_err |= BIT(12);
+	if (errors & DSI_DLN0_LP1_CONTENTION)
+		dln0_phy_err |= BIT(16);
+	if (errors & DSI_DLN0_ESC_ENTRY_ERR)
+		dln0_phy_err |= BIT(0);
+	if (errors & DSI_DLN0_ESC_SYNC_ERR)
+		dln0_phy_err |= BIT(4);
+	if (errors & DSI_DLN0_LP_CONTROL_ERR)
+		dln0_phy_err |= BIT(8);
+
+	if (errors & DSI_CMD_DMA_FIFO_UNDERFLOW)
+		fifo_status |= BIT(10);
+	if (errors & DSI_CMD_MDP_FIFO_UNDERFLOW)
+		fifo_status |= BIT(7);
+	if (errors & DSI_DLN0_HS_FIFO_OVERFLOW)
+		fifo_status |= BIT(18);
+	if (errors & DSI_DLN1_HS_FIFO_OVERFLOW)
+		fifo_status |= BIT(22);
+	if (errors & DSI_DLN2_HS_FIFO_OVERFLOW)
+		fifo_status |= BIT(26);
+	if (errors & DSI_DLN3_HS_FIFO_OVERFLOW)
+		fifo_status |= BIT(30);
+	if (errors & DSI_DLN0_HS_FIFO_UNDERFLOW)
+		fifo_status |= BIT(19);
+	if (errors & DSI_DLN1_HS_FIFO_UNDERFLOW)
+		fifo_status |= BIT(23);
+	if (errors & DSI_DLN2_HS_FIFO_UNDERFLOW)
+		fifo_status |= BIT(27);
+	if (errors & DSI_DLN3_HS_FIFO_UNDERFLOW)
+		fifo_status |= BIT(31);
+
+	if (errors & DSI_INTERLEAVE_OP_CONTENTION)
+		dsi_status |= BIT(31);
+
+	DSI_W32(ctrl, DSI_DLN0_PHY_ERR, dln0_phy_err);
+	DSI_W32(ctrl, DSI_FIFO_STATUS, fifo_status);
+	DSI_W32(ctrl, DSI_ACK_ERR_STATUS, ack_error);
+	DSI_W32(ctrl, DSI_TIMEOUT_STATUS, timeout_error);
+	DSI_W32(ctrl, DSI_CLK_STATUS, clk_error);
+	DSI_W32(ctrl, DSI_STATUS, dsi_status);
+
+	int_ctrl = DSI_R32(ctrl, DSI_INT_CTRL);
+	int_ctrl |= BIT(24);
+	DSI_W32(ctrl, DSI_INT_CTRL, int_ctrl);
+	pr_debug("[DSI_%d] clear errors = 0x%llx, phy=0x%x, fifo=0x%x",
+		 ctrl->index, errors, dln0_phy_err, fifo_status);
+	pr_debug("[DSI_%d] ack=0x%x, timeout=0x%x, clk=0x%x, dsi=0x%x\n",
+		 ctrl->index, ack_error, timeout_error, clk_error, dsi_status);
+}
+
+/**
+ * enable_error_interrupts() - enable the specified interrupts
+ * @ctrl:          Pointer to the controller host hardware.
+ * @errors:        List of errors to be enabled.
+ *
+ * Enables the specified interrupts. This list will override the
+ * previous interrupts enabled through this function. Caller has to
+ * maintain the state of the interrupts enabled. To disable all
+ * interrupts, set errors to 0.
+ */
+void dsi_ctrl_hw_14_enable_error_interrupts(struct dsi_ctrl_hw *ctrl,
+					    u64 errors)
+{
+	u32 int_ctrl = 0;
+	u32 int_mask0 = 0x7FFF3BFF;
+
+	int_ctrl = DSI_R32(ctrl, DSI_INT_CTRL);
+	if (errors)
+		int_ctrl |= BIT(25);
+	else
+		int_ctrl &= ~BIT(25);
+
+	if (errors & DSI_RDBK_SINGLE_ECC_ERR)
+		int_mask0 &= ~BIT(0);
+	if (errors & DSI_RDBK_MULTI_ECC_ERR)
+		int_mask0 &= ~BIT(1);
+	if (errors & DSI_RDBK_CRC_ERR)
+		int_mask0 &= ~BIT(2);
+	if (errors & DSI_RDBK_INCOMPLETE_PKT)
+		int_mask0 &= ~BIT(3);
+	if (errors & DSI_PERIPH_ERROR_PKT)
+		int_mask0 &= ~BIT(4);
+
+	if (errors & DSI_LP_RX_TIMEOUT)
+		int_mask0 &= ~BIT(5);
+	if (errors & DSI_HS_TX_TIMEOUT)
+		int_mask0 &= ~BIT(6);
+	if (errors & DSI_BTA_TIMEOUT)
+		int_mask0 &= ~BIT(7);
+
+	if (errors & DSI_PLL_UNLOCK)
+		int_mask0 &= ~BIT(28);
+
+	if (errors & DSI_DLN0_LP0_CONTENTION)
+		int_mask0 &= ~BIT(24);
+	if (errors & DSI_DLN0_LP1_CONTENTION)
+		int_mask0 &= ~BIT(25);
+	if (errors & DSI_DLN0_ESC_ENTRY_ERR)
+		int_mask0 &= ~BIT(21);
+	if (errors & DSI_DLN0_ESC_SYNC_ERR)
+		int_mask0 &= ~BIT(22);
+	if (errors & DSI_DLN0_LP_CONTROL_ERR)
+		int_mask0 &= ~BIT(23);
+
+	if (errors & DSI_CMD_DMA_FIFO_UNDERFLOW)
+		int_mask0 &= ~BIT(9);
+	if (errors & DSI_CMD_MDP_FIFO_UNDERFLOW)
+		int_mask0 &= ~BIT(11);
+	if (errors & DSI_DLN0_HS_FIFO_OVERFLOW)
+		int_mask0 &= ~BIT(16);
+	if (errors & DSI_DLN1_HS_FIFO_OVERFLOW)
+		int_mask0 &= ~BIT(17);
+	if (errors & DSI_DLN2_HS_FIFO_OVERFLOW)
+		int_mask0 &= ~BIT(18);
+	if (errors & DSI_DLN3_HS_FIFO_OVERFLOW)
+		int_mask0 &= ~BIT(19);
+	if (errors & DSI_DLN0_HS_FIFO_UNDERFLOW)
+		int_mask0 &= ~BIT(26);
+	if (errors & DSI_DLN1_HS_FIFO_UNDERFLOW)
+		int_mask0 &= ~BIT(27);
+	if (errors & DSI_DLN2_HS_FIFO_UNDERFLOW)
+		int_mask0 &= ~BIT(29);
+	if (errors & DSI_DLN3_HS_FIFO_UNDERFLOW)
+		int_mask0 &= ~BIT(30);
+
+	if (errors & DSI_INTERLEAVE_OP_CONTENTION)
+		int_mask0 &= ~BIT(8);
+
+	DSI_W32(ctrl, DSI_INT_CTRL, int_ctrl);
+	DSI_W32(ctrl, DSI_ERR_INT_MASK0, int_mask0);
+
+	pr_debug("[DSI_%d] enable errors = 0x%llx, int_mask0=0x%x\n",
+		 ctrl->index, errors, int_mask0);
+}
+
+/**
+ * video_test_pattern_setup() - setup test pattern engine for video mode
+ * @ctrl:          Pointer to the controller host hardware.
+ * @type:          Type of test pattern.
+ * @init_val:      Initial value to use for generating test pattern.
+ */
+void dsi_ctrl_hw_14_video_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
+					     enum dsi_test_pattern type,
+					     u32 init_val)
+{
+	u32 reg = 0;
+
+	DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL, init_val);
+
+	switch (type) {
+	case DSI_TEST_PATTERN_FIXED:
+		reg |= (0x2 << 4);
+		break;
+	case DSI_TEST_PATTERN_INC:
+		reg |= (0x1 << 4);
+		break;
+	case DSI_TEST_PATTERN_POLY:
+		DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_VIDEO_POLY, 0xF0F0F);
+		break;
+	default:
+		break;
+	}
+
+	DSI_W32(ctrl, DSI_TPG_MAIN_CONTROL, 0x100);
+	DSI_W32(ctrl, DSI_TPG_VIDEO_CONFIG, 0x5);
+	DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg);
+
+	pr_debug("[DSI_%d] Video test pattern setup done\n", ctrl->index);
+}
+
+/**
+ * cmd_test_pattern_setup() - setup test patttern engine for cmd mode
+ * @ctrl:          Pointer to the controller host hardware.
+ * @type:          Type of test pattern.
+ * @init_val:      Initial value to use for generating test pattern.
+ * @stream_id:     Stream Id on which packets are generated.
+ */
+void dsi_ctrl_hw_14_cmd_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
+					   enum dsi_test_pattern type,
+					   u32 init_val,
+					   u32 stream_id)
+{
+	u32 reg = 0;
+	u32 init_offset;
+	u32 poly_offset;
+	u32 pattern_sel_shift;
+
+	switch (stream_id) {
+	case 0:
+		init_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0;
+		poly_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM0_POLY;
+		pattern_sel_shift = 8;
+		break;
+	case 1:
+		init_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL1;
+		poly_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM1_POLY;
+		pattern_sel_shift = 12;
+		break;
+	case 2:
+		init_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL2;
+		poly_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM2_POLY;
+		pattern_sel_shift = 20;
+		break;
+	default:
+		return;
+	}
+
+	DSI_W32(ctrl, init_offset, init_val);
+
+	switch (type) {
+	case DSI_TEST_PATTERN_FIXED:
+		reg |= (0x2 << pattern_sel_shift);
+		break;
+	case DSI_TEST_PATTERN_INC:
+		reg |= (0x1 << pattern_sel_shift);
+		break;
+	case DSI_TEST_PATTERN_POLY:
+		DSI_W32(ctrl, poly_offset, 0xF0F0F);
+		break;
+	default:
+		break;
+	}
+
+	DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg);
+	pr_debug("[DSI_%d] Cmd test pattern setup done\n", ctrl->index);
+}
+
+/**
+ * test_pattern_enable() - enable test pattern engine
+ * @ctrl:          Pointer to the controller host hardware.
+ * @enable:        Enable/Disable test pattern engine.
+ */
+void dsi_ctrl_hw_14_test_pattern_enable(struct dsi_ctrl_hw *ctrl,
+					bool enable)
+{
+	u32 reg = DSI_R32(ctrl, DSI_TEST_PATTERN_GEN_CTRL);
+
+	if (enable)
+		reg |= BIT(0);
+	else
+		reg &= ~BIT(0);
+
+	DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg);
+
+	pr_debug("[DSI_%d] Test pattern enable=%d\n", ctrl->index, enable);
+}
+
+/**
+ * trigger_cmd_test_pattern() - trigger a command mode frame update with
+ *                              test pattern
+ * @ctrl:          Pointer to the controller host hardware.
+ * @stream_id:     Stream on which frame update is sent.
+ */
+void dsi_ctrl_hw_14_trigger_cmd_test_pattern(struct dsi_ctrl_hw *ctrl,
+					     u32 stream_id)
+{
+	switch (stream_id) {
+	case 0:
+		DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER, 0x1);
+		break;
+	case 1:
+		DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_STREAM1_TRIGGER, 0x1);
+		break;
+	case 2:
+		DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_STREAM2_TRIGGER, 0x1);
+		break;
+	default:
+		break;
+	}
+
+	pr_debug("[DSI_%d] Cmd Test pattern trigger\n", ctrl->index);
+}
+
+#define DUMP_REG_VALUE(off) "\t%-30s: 0x%08x\n", #off, DSI_R32(ctrl, off)
+ssize_t dsi_ctrl_hw_14_reg_dump_to_buffer(struct dsi_ctrl_hw *ctrl,
+					  char *buf,
+					  u32 size)
+{
+	u32 len = 0;
+
+	len += snprintf((buf + len), (size - len), "CONFIGURATION REGS:\n");
+
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_HW_VERSION));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_CTRL));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_STATUS));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_FIFO_STATUS));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_VIDEO_MODE_CTRL));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_VIDEO_MODE_SYNC_DATATYPE));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_VIDEO_MODE_PIXEL_DATATYPE));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_VIDEO_MODE_BLANKING_DATATYPE));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_VIDEO_MODE_DATA_CTRL));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_VIDEO_MODE_ACTIVE_H));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_VIDEO_MODE_ACTIVE_V));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_VIDEO_MODE_TOTAL));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_VIDEO_MODE_HSYNC));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_VIDEO_MODE_VSYNC));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_VIDEO_MODE_VSYNC_VPOS));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_COMMAND_MODE_DMA_CTRL));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_CTRL));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_DMA_CMD_OFFSET));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_DMA_CMD_LENGTH));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_DMA_FIFO_CTRL));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_DMA_NULL_PACKET_DATA));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM0_CTRL));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM0_TOTAL));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM1_CTRL));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM1_TOTAL));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_ACK_ERR_STATUS));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_RDBK_DATA0));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_RDBK_DATA1));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_RDBK_DATA2));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_RDBK_DATA3));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_RDBK_DATATYPE0));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_RDBK_DATATYPE1));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_TRIG_CTRL));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_EXT_MUX));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_EXT_MUX_TE_PULSE_DETECT_CTRL));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_CMD_MODE_DMA_SW_TRIGGER));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_CMD_MODE_MDP_SW_TRIGGER));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_CMD_MODE_BTA_SW_TRIGGER));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_RESET_SW_TRIGGER));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_LANE_STATUS));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_LANE_CTRL));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_LANE_SWAP_CTRL));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_DLN0_PHY_ERR));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_LP_TIMER_CTRL));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_HS_TIMER_CTRL));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_TIMEOUT_STATUS));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_CLKOUT_TIMING_CTRL));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_EOT_PACKET));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_EOT_PACKET_CTRL));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_GENERIC_ESC_TX_TRIGGER));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_ERR_INT_MASK0));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_INT_CTRL));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_SOFT_RESET));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_CLK_CTRL));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_CLK_STATUS));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_PHY_SW_RESET));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_AXI2AHB_CTRL));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_CTRL2));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM2_CTRL));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM2_TOTAL));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_VBIF_CTRL));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_AES_CTRL));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_RDBK_DATA_CTRL));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL2));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_TPG_DMA_FIFO_STATUS));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_TPG_DMA_FIFO_WRITE_TRIGGER));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_DSI_TIMING_FLUSH));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_DSI_TIMING_DB_MODE));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_TPG_DMA_FIFO_RESET));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_VERSION));
+
+	pr_err("LLENGTH = %d\n", len);
+	return len;
+}
+
+
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_ctrl_hw.h linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_ctrl_hw.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h	2019-01-22 16:16:23.487246262 +0100
@@ -0,0 +1,578 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_CTRL_HW_H_
+#define _DSI_CTRL_HW_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/bitmap.h>
+
+#include "dsi_defs.h"
+
+/**
+ * Modifier flag for command transmission. If this flag is set, command
+ * information is programmed to hardware and transmission is not triggered.
+ * Caller should call the trigger_command_dma() to start the transmission. This
+ * flag is valed for kickoff_command() and kickoff_fifo_command() operations.
+ */
+#define DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER            0x1
+
+/**
+ * enum dsi_ctrl_version - version of the dsi host controller
+ * @DSI_CTRL_VERSION_UNKNOWN: Unknown controller version
+ * @DSI_CTRL_VERSION_1_4:     DSI host v1.4 controller
+ * @DSI_CTRL_VERSION_2_0:     DSI host v2.0 controller
+ * @DSI_CTRL_VERSION_MAX:     max version
+ */
+enum dsi_ctrl_version {
+	DSI_CTRL_VERSION_UNKNOWN,
+	DSI_CTRL_VERSION_1_4,
+	DSI_CTRL_VERSION_2_0,
+	DSI_CTRL_VERSION_MAX
+};
+
+/**
+ * enum dsi_ctrl_hw_features - features supported by dsi host controller
+ * @DSI_CTRL_VIDEO_TPG:               Test pattern support for video mode.
+ * @DSI_CTRL_CMD_TPG:                 Test pattern support for command mode.
+ * @DSI_CTRL_VARIABLE_REFRESH_RATE:   variable panel timing
+ * @DSI_CTRL_DYNAMIC_REFRESH:         variable pixel clock rate
+ * @DSI_CTRL_NULL_PACKET_INSERTION:   NULL packet insertion
+ * @DSI_CTRL_DESKEW_CALIB:            Deskew calibration support
+ * @DSI_CTRL_DPHY:                    Controller support for DPHY
+ * @DSI_CTRL_CPHY:                    Controller support for CPHY
+ * @DSI_CTRL_MAX_FEATURES:
+ */
+enum dsi_ctrl_hw_features {
+	DSI_CTRL_VIDEO_TPG,
+	DSI_CTRL_CMD_TPG,
+	DSI_CTRL_VARIABLE_REFRESH_RATE,
+	DSI_CTRL_DYNAMIC_REFRESH,
+	DSI_CTRL_NULL_PACKET_INSERTION,
+	DSI_CTRL_DESKEW_CALIB,
+	DSI_CTRL_DPHY,
+	DSI_CTRL_CPHY,
+	DSI_CTRL_MAX_FEATURES
+};
+
+/**
+ * enum dsi_test_pattern - test pattern type
+ * @DSI_TEST_PATTERN_FIXED:     Test pattern is fixed, based on init value.
+ * @DSI_TEST_PATTERN_INC:       Incremental test pattern, base on init value.
+ * @DSI_TEST_PATTERN_POLY:      Pattern generated from polynomial and init val.
+ * @DSI_TEST_PATTERN_MAX:
+ */
+enum dsi_test_pattern {
+	DSI_TEST_PATTERN_FIXED = 0,
+	DSI_TEST_PATTERN_INC,
+	DSI_TEST_PATTERN_POLY,
+	DSI_TEST_PATTERN_MAX
+};
+
+/**
+ * enum dsi_status_int_type - status interrupts generated by DSI controller
+ * @DSI_CMD_MODE_DMA_DONE:        Command mode DMA packets are sent out.
+ * @DSI_CMD_STREAM0_FRAME_DONE:   A frame of command mode stream0 is sent out.
+ * @DSI_CMD_STREAM1_FRAME_DONE:   A frame of command mode stream1 is sent out.
+ * @DSI_CMD_STREAM2_FRAME_DONE:   A frame of command mode stream2 is sent out.
+ * @DSI_VIDEO_MODE_FRAME_DONE:    A frame of video mode stream is sent out.
+ * @DSI_BTA_DONE:                 A BTA is completed.
+ * @DSI_CMD_FRAME_DONE:           A frame of selected command mode stream is
+ *                                sent out by MDP.
+ * @DSI_DYN_REFRESH_DONE:         The dynamic refresh operation has completed.
+ * @DSI_DESKEW_DONE:              The deskew calibration operation has completed
+ * @DSI_DYN_BLANK_DMA_DONE:       The dynamic blankin DMA operation has
+ *                                completed.
+ */
+enum dsi_status_int_type {
+	DSI_CMD_MODE_DMA_DONE = BIT(0),
+	DSI_CMD_STREAM0_FRAME_DONE = BIT(1),
+	DSI_CMD_STREAM1_FRAME_DONE = BIT(2),
+	DSI_CMD_STREAM2_FRAME_DONE = BIT(3),
+	DSI_VIDEO_MODE_FRAME_DONE = BIT(4),
+	DSI_BTA_DONE = BIT(5),
+	DSI_CMD_FRAME_DONE = BIT(6),
+	DSI_DYN_REFRESH_DONE = BIT(7),
+	DSI_DESKEW_DONE = BIT(8),
+	DSI_DYN_BLANK_DMA_DONE = BIT(9)
+};
+
+/**
+ * enum dsi_error_int_type - error interrupts generated by DSI controller
+ * @DSI_RDBK_SINGLE_ECC_ERR:        Single bit ECC error in read packet.
+ * @DSI_RDBK_MULTI_ECC_ERR:         Multi bit ECC error in read packet.
+ * @DSI_RDBK_CRC_ERR:               CRC error in read packet.
+ * @DSI_RDBK_INCOMPLETE_PKT:        Incomplete read packet.
+ * @DSI_PERIPH_ERROR_PKT:           Error packet returned from peripheral,
+ * @DSI_LP_RX_TIMEOUT:              Low power reverse transmission timeout.
+ * @DSI_HS_TX_TIMEOUT:              High speed forward transmission timeout.
+ * @DSI_BTA_TIMEOUT:                BTA timeout.
+ * @DSI_PLL_UNLOCK:                 PLL has unlocked.
+ * @DSI_DLN0_ESC_ENTRY_ERR:         Incorrect LP Rx escape entry.
+ * @DSI_DLN0_ESC_SYNC_ERR:          LP Rx data is not byte aligned.
+ * @DSI_DLN0_LP_CONTROL_ERR:        Incorrect LP Rx state sequence.
+ * @DSI_PENDING_HS_TX_TIMEOUT:      Pending High-speed transfer timeout.
+ * @DSI_INTERLEAVE_OP_CONTENTION:   Interleave operation contention.
+ * @DSI_CMD_DMA_FIFO_UNDERFLOW:     Command mode DMA FIFO underflow.
+ * @DSI_CMD_MDP_FIFO_UNDERFLOW:     Command MDP FIFO underflow (failed to
+ *                                  receive one complete line from MDP).
+ * @DSI_DLN0_HS_FIFO_OVERFLOW:      High speed FIFO for data lane 0 overflows.
+ * @DSI_DLN1_HS_FIFO_OVERFLOW:      High speed FIFO for data lane 1 overflows.
+ * @DSI_DLN2_HS_FIFO_OVERFLOW:      High speed FIFO for data lane 2 overflows.
+ * @DSI_DLN3_HS_FIFO_OVERFLOW:      High speed FIFO for data lane 3 overflows.
+ * @DSI_DLN0_HS_FIFO_UNDERFLOW:     High speed FIFO for data lane 0 underflows.
+ * @DSI_DLN1_HS_FIFO_UNDERFLOW:     High speed FIFO for data lane 1 underflows.
+ * @DSI_DLN2_HS_FIFO_UNDERFLOW:     High speed FIFO for data lane 2 underflows.
+ * @DSI_DLN3_HS_FIFO_UNDERFLOW:     High speed FIFO for data lane 3 undeflows.
+ * @DSI_DLN0_LP0_CONTENTION:        PHY level contention while lane 0 is low.
+ * @DSI_DLN1_LP0_CONTENTION:        PHY level contention while lane 1 is low.
+ * @DSI_DLN2_LP0_CONTENTION:        PHY level contention while lane 2 is low.
+ * @DSI_DLN3_LP0_CONTENTION:        PHY level contention while lane 3 is low.
+ * @DSI_DLN0_LP1_CONTENTION:        PHY level contention while lane 0 is high.
+ * @DSI_DLN1_LP1_CONTENTION:        PHY level contention while lane 1 is high.
+ * @DSI_DLN2_LP1_CONTENTION:        PHY level contention while lane 2 is high.
+ * @DSI_DLN3_LP1_CONTENTION:        PHY level contention while lane 3 is high.
+ */
+enum dsi_error_int_type {
+	DSI_RDBK_SINGLE_ECC_ERR = BIT(0),
+	DSI_RDBK_MULTI_ECC_ERR = BIT(1),
+	DSI_RDBK_CRC_ERR = BIT(2),
+	DSI_RDBK_INCOMPLETE_PKT = BIT(3),
+	DSI_PERIPH_ERROR_PKT = BIT(4),
+	DSI_LP_RX_TIMEOUT = BIT(5),
+	DSI_HS_TX_TIMEOUT = BIT(6),
+	DSI_BTA_TIMEOUT = BIT(7),
+	DSI_PLL_UNLOCK = BIT(8),
+	DSI_DLN0_ESC_ENTRY_ERR = BIT(9),
+	DSI_DLN0_ESC_SYNC_ERR = BIT(10),
+	DSI_DLN0_LP_CONTROL_ERR = BIT(11),
+	DSI_PENDING_HS_TX_TIMEOUT = BIT(12),
+	DSI_INTERLEAVE_OP_CONTENTION = BIT(13),
+	DSI_CMD_DMA_FIFO_UNDERFLOW = BIT(14),
+	DSI_CMD_MDP_FIFO_UNDERFLOW = BIT(15),
+	DSI_DLN0_HS_FIFO_OVERFLOW = BIT(16),
+	DSI_DLN1_HS_FIFO_OVERFLOW = BIT(17),
+	DSI_DLN2_HS_FIFO_OVERFLOW = BIT(18),
+	DSI_DLN3_HS_FIFO_OVERFLOW = BIT(19),
+	DSI_DLN0_HS_FIFO_UNDERFLOW = BIT(20),
+	DSI_DLN1_HS_FIFO_UNDERFLOW = BIT(21),
+	DSI_DLN2_HS_FIFO_UNDERFLOW = BIT(22),
+	DSI_DLN3_HS_FIFO_UNDERFLOW = BIT(23),
+	DSI_DLN0_LP0_CONTENTION = BIT(24),
+	DSI_DLN1_LP0_CONTENTION = BIT(25),
+	DSI_DLN2_LP0_CONTENTION = BIT(26),
+	DSI_DLN3_LP0_CONTENTION = BIT(27),
+	DSI_DLN0_LP1_CONTENTION = BIT(28),
+	DSI_DLN1_LP1_CONTENTION = BIT(29),
+	DSI_DLN2_LP1_CONTENTION = BIT(30),
+	DSI_DLN3_LP1_CONTENTION = BIT(31),
+};
+
+/**
+ * struct dsi_ctrl_cmd_dma_info - command buffer information
+ * @offset:        IOMMU VA for command buffer address.
+ * @length:        Length of the command buffer.
+ * @en_broadcast:  Enable broadcast mode if set to true.
+ * @is_master:     Is master in broadcast mode.
+ * @use_lpm:       Use low power mode for command transmission.
+ */
+struct dsi_ctrl_cmd_dma_info {
+	u32 offset;
+	u32 length;
+	bool en_broadcast;
+	bool is_master;
+	bool use_lpm;
+};
+
+/**
+ * struct dsi_ctrl_cmd_dma_fifo_info - command payload tp be sent using FIFO
+ * @command:        VA for command buffer.
+ * @size:           Size of the command buffer.
+ * @en_broadcast:   Enable broadcast mode if set to true.
+ * @is_master:      Is master in broadcast mode.
+ * @use_lpm:        Use low power mode for command transmission.
+ */
+struct dsi_ctrl_cmd_dma_fifo_info {
+	u32 *command;
+	u32 size;
+	bool en_broadcast;
+	bool is_master;
+	bool use_lpm;
+};
+
+struct dsi_ctrl_hw;
+
+/**
+ * struct dsi_ctrl_hw_ops - operations supported by dsi host hardware
+ */
+struct dsi_ctrl_hw_ops {
+
+	/**
+	 * host_setup() - Setup DSI host configuration
+	 * @ctrl:          Pointer to controller host hardware.
+	 * @config:        Configuration for DSI host controller
+	 */
+	void (*host_setup)(struct dsi_ctrl_hw *ctrl,
+			   struct dsi_host_common_cfg *config);
+
+	/**
+	 * video_engine_en() - enable DSI video engine
+	 * @ctrl:          Pointer to controller host hardware.
+	 * @on:            Enable/disabel video engine.
+	 */
+	void (*video_engine_en)(struct dsi_ctrl_hw *ctrl, bool on);
+
+	/**
+	 * video_engine_setup() - Setup dsi host controller for video mode
+	 * @ctrl:          Pointer to controller host hardware.
+	 * @common_cfg:    Common configuration parameters.
+	 * @cfg:           Video mode configuration.
+	 *
+	 * Set up DSI video engine with a specific configuration. Controller and
+	 * video engine are not enabled as part of this function.
+	 */
+	void (*video_engine_setup)(struct dsi_ctrl_hw *ctrl,
+				   struct dsi_host_common_cfg *common_cfg,
+				   struct dsi_video_engine_cfg *cfg);
+
+	/**
+	 * set_video_timing() - set up the timing for video frame
+	 * @ctrl:          Pointer to controller host hardware.
+	 * @mode:          Video mode information.
+	 *
+	 * Set up the video timing parameters for the DSI video mode operation.
+	 */
+	void (*set_video_timing)(struct dsi_ctrl_hw *ctrl,
+				 struct dsi_mode_info *mode);
+
+	/**
+	 * cmd_engine_setup() - setup dsi host controller for command mode
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @common_cfg:    Common configuration parameters.
+	 * @cfg:           Command mode configuration.
+	 *
+	 * Setup DSI CMD engine with a specific configuration. Controller and
+	 * command engine are not enabled as part of this function.
+	 */
+	void (*cmd_engine_setup)(struct dsi_ctrl_hw *ctrl,
+				 struct dsi_host_common_cfg *common_cfg,
+				 struct dsi_cmd_engine_cfg *cfg);
+
+	/**
+	 * setup_cmd_stream() - set up parameters for command pixel streams
+	 * @ctrl:          Pointer to controller host hardware.
+	 * @width_in_pixels:   Width of the stream in pixels.
+	 * @h_stride:          Horizontal stride in bytes.
+	 * @height_inLines:    Number of lines in the stream.
+	 * @vc_id:             stream_id.
+	 *
+	 * Setup parameters for command mode pixel stream size.
+	 */
+	void (*setup_cmd_stream)(struct dsi_ctrl_hw *ctrl,
+				 u32 width_in_pixels,
+				 u32 h_stride,
+				 u32 height_in_lines,
+				 u32 vc_id);
+
+	/**
+	 * ctrl_en() - enable DSI controller engine
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @on:            turn on/off the DSI controller engine.
+	 */
+	void (*ctrl_en)(struct dsi_ctrl_hw *ctrl, bool on);
+
+	/**
+	 * cmd_engine_en() - enable DSI controller command engine
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @on:            Turn on/off the DSI command engine.
+	 */
+	void (*cmd_engine_en)(struct dsi_ctrl_hw *ctrl, bool on);
+
+	/**
+	 * phy_sw_reset() - perform a soft reset on the PHY.
+	 * @ctrl:        Pointer to the controller host hardware.
+	 */
+	void (*phy_sw_reset)(struct dsi_ctrl_hw *ctrl);
+
+	/**
+	 * soft_reset() - perform a soft reset on DSI controller
+	 * @ctrl:          Pointer to the controller host hardware.
+	 *
+	 * The video, command and controller engines will be disable before the
+	 * reset is triggered. These engines will not be enabled after the reset
+	 * is complete. Caller must re-enable the engines.
+	 *
+	 * If the reset is done while MDP timing engine is turned on, the video
+	 * enigne should be re-enabled only during the vertical blanking time.
+	 */
+	void (*soft_reset)(struct dsi_ctrl_hw *ctrl);
+
+	/**
+	 * setup_lane_map() - setup mapping between logical and physical lanes
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @lane_map:      Structure defining the mapping between DSI logical
+	 *                 lanes and physical lanes.
+	 */
+	void (*setup_lane_map)(struct dsi_ctrl_hw *ctrl,
+			       struct dsi_lane_mapping *lane_map);
+
+	/**
+	 * kickoff_command() - transmits commands stored in memory
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @cmd:           Command information.
+	 * @flags:         Modifiers for command transmission.
+	 *
+	 * The controller hardware is programmed with address and size of the
+	 * command buffer. The transmission is kicked off if
+	 * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
+	 * set, caller should make a separate call to trigger_command_dma() to
+	 * transmit the command.
+	 */
+	void (*kickoff_command)(struct dsi_ctrl_hw *ctrl,
+				struct dsi_ctrl_cmd_dma_info *cmd,
+				u32 flags);
+
+	/**
+	 * kickoff_fifo_command() - transmits a command using FIFO in dsi
+	 *                          hardware.
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @cmd:           Command information.
+	 * @flags:         Modifiers for command transmission.
+	 *
+	 * The controller hardware FIFO is programmed with command header and
+	 * payload. The transmission is kicked off if
+	 * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
+	 * set, caller should make a separate call to trigger_command_dma() to
+	 * transmit the command.
+	 */
+	void (*kickoff_fifo_command)(struct dsi_ctrl_hw *ctrl,
+				     struct dsi_ctrl_cmd_dma_fifo_info *cmd,
+				     u32 flags);
+
+	void (*reset_cmd_fifo)(struct dsi_ctrl_hw *ctrl);
+	/**
+	 * trigger_command_dma() - trigger transmission of command buffer.
+	 * @ctrl:          Pointer to the controller host hardware.
+	 *
+	 * This trigger can be only used if there was a prior call to
+	 * kickoff_command() of kickoff_fifo_command() with
+	 * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag.
+	 */
+	void (*trigger_command_dma)(struct dsi_ctrl_hw *ctrl);
+
+	/**
+	 * get_cmd_read_data() - get data read from the peripheral
+	 * @ctrl:           Pointer to the controller host hardware.
+	 * @rd_buf:         Buffer where data will be read into.
+	 * @total_read_len: Number of bytes to read.
+	 */
+	u32 (*get_cmd_read_data)(struct dsi_ctrl_hw *ctrl,
+				 u8 *rd_buf,
+				 u32 total_read_len);
+
+	/**
+	 * ulps_request() - request ulps entry for specified lanes
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @lanes:         ORed list of lanes (enum dsi_data_lanes) which need
+	 *                 to enter ULPS.
+	 *
+	 * Caller should check if lanes are in ULPS mode by calling
+	 * get_lanes_in_ulps() operation.
+	 */
+	void (*ulps_request)(struct dsi_ctrl_hw *ctrl, u32 lanes);
+
+	/**
+	 * ulps_exit() - exit ULPS on specified lanes
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @lanes:         ORed list of lanes (enum dsi_data_lanes) which need
+	 *                 to exit ULPS.
+	 *
+	 * Caller should check if lanes are in active mode by calling
+	 * get_lanes_in_ulps() operation.
+	 */
+	void (*ulps_exit)(struct dsi_ctrl_hw *ctrl, u32 lanes);
+
+	/**
+	 * clear_ulps_request() - clear ulps request once all lanes are active
+	 * @ctrl:          Pointer to controller host hardware.
+	 * @lanes:         ORed list of lanes (enum dsi_data_lanes).
+	 *
+	 * ULPS request should be cleared after the lanes have exited ULPS.
+	 */
+	void (*clear_ulps_request)(struct dsi_ctrl_hw *ctrl, u32 lanes);
+
+	/**
+	 * get_lanes_in_ulps() - returns the list of lanes in ULPS mode
+	 * @ctrl:          Pointer to the controller host hardware.
+	 *
+	 * Returns an ORed list of lanes (enum dsi_data_lanes) that are in ULPS
+	 * state. If 0 is returned, all the lanes are active.
+	 *
+	 * Return: List of lanes in ULPS state.
+	 */
+	u32 (*get_lanes_in_ulps)(struct dsi_ctrl_hw *ctrl);
+
+	/**
+	 * clamp_enable() - enable DSI clamps to keep PHY driving a stable link
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @lanes:         ORed list of lanes which need to be clamped.
+	 * @enable_ulps:   TODO:??
+	 */
+	void (*clamp_enable)(struct dsi_ctrl_hw *ctrl,
+			     u32 lanes,
+			     bool enable_ulps);
+
+	/**
+	 * clamp_disable() - disable DSI clamps
+	 * @ctrl:         Pointer to the controller host hardware.
+	 * @lanes:        ORed list of lanes which need to have clamps released.
+	 * @disable_ulps: TODO:??
+	 */
+	void (*clamp_disable)(struct dsi_ctrl_hw *ctrl,
+			      u32 lanes,
+			      bool disable_ulps);
+
+	/**
+	 * get_interrupt_status() - returns the interrupt status
+	 * @ctrl:          Pointer to the controller host hardware.
+	 *
+	 * Returns the ORed list of interrupts(enum dsi_status_int_type) that
+	 * are active. This list does not include any error interrupts. Caller
+	 * should call get_error_status for error interrupts.
+	 *
+	 * Return: List of active interrupts.
+	 */
+	u32 (*get_interrupt_status)(struct dsi_ctrl_hw *ctrl);
+
+	/**
+	 * clear_interrupt_status() - clears the specified interrupts
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @ints:          List of interrupts to be cleared.
+	 */
+	void (*clear_interrupt_status)(struct dsi_ctrl_hw *ctrl, u32 ints);
+
+	/**
+	 * enable_status_interrupts() - enable the specified interrupts
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @ints:          List of interrupts to be enabled.
+	 *
+	 * Enables the specified interrupts. This list will override the
+	 * previous interrupts enabled through this function. Caller has to
+	 * maintain the state of the interrupts enabled. To disable all
+	 * interrupts, set ints to 0.
+	 */
+	void (*enable_status_interrupts)(struct dsi_ctrl_hw *ctrl, u32 ints);
+
+	/**
+	 * get_error_status() - returns the error status
+	 * @ctrl:          Pointer to the controller host hardware.
+	 *
+	 * Returns the ORed list of errors(enum dsi_error_int_type) that are
+	 * active. This list does not include any status interrupts. Caller
+	 * should call get_interrupt_status for status interrupts.
+	 *
+	 * Return: List of active error interrupts.
+	 */
+	u64 (*get_error_status)(struct dsi_ctrl_hw *ctrl);
+
+	/**
+	 * clear_error_status() - clears the specified errors
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @errors:          List of errors to be cleared.
+	 */
+	void (*clear_error_status)(struct dsi_ctrl_hw *ctrl, u64 errors);
+
+	/**
+	 * enable_error_interrupts() - enable the specified interrupts
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @errors:        List of errors to be enabled.
+	 *
+	 * Enables the specified interrupts. This list will override the
+	 * previous interrupts enabled through this function. Caller has to
+	 * maintain the state of the interrupts enabled. To disable all
+	 * interrupts, set errors to 0.
+	 */
+	void (*enable_error_interrupts)(struct dsi_ctrl_hw *ctrl, u64 errors);
+
+	/**
+	 * video_test_pattern_setup() - setup test pattern engine for video mode
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @type:          Type of test pattern.
+	 * @init_val:      Initial value to use for generating test pattern.
+	 */
+	void (*video_test_pattern_setup)(struct dsi_ctrl_hw *ctrl,
+					 enum dsi_test_pattern type,
+					 u32 init_val);
+
+	/**
+	 * cmd_test_pattern_setup() - setup test patttern engine for cmd mode
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @type:          Type of test pattern.
+	 * @init_val:      Initial value to use for generating test pattern.
+	 * @stream_id:     Stream Id on which packets are generated.
+	 */
+	void (*cmd_test_pattern_setup)(struct dsi_ctrl_hw *ctrl,
+				       enum dsi_test_pattern  type,
+				       u32 init_val,
+				       u32 stream_id);
+
+	/**
+	 * test_pattern_enable() - enable test pattern engine
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @enable:        Enable/Disable test pattern engine.
+	 */
+	void (*test_pattern_enable)(struct dsi_ctrl_hw *ctrl, bool enable);
+
+	/**
+	 * trigger_cmd_test_pattern() - trigger a command mode frame update with
+	 *                              test pattern
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @stream_id:     Stream on which frame update is sent.
+	 */
+	void (*trigger_cmd_test_pattern)(struct dsi_ctrl_hw *ctrl,
+					 u32 stream_id);
+
+	ssize_t (*reg_dump_to_buffer)(struct dsi_ctrl_hw *ctrl,
+				      char *buf,
+				      u32 size);
+};
+
+/*
+ * struct dsi_ctrl_hw - DSI controller hardware object specific to an instance
+ * @base:           VA for the DSI controller base address.
+ * @length:         Length of the DSI controller register map.
+ * @index:          Instance ID of the controller.
+ * @feature_map:    Features supported by the DSI controller.
+ * @ops:            Function pointers to the operations supported by the
+ *                  controller.
+ */
+struct dsi_ctrl_hw {
+	void __iomem *base;
+	u32 length;
+	void __iomem *mmss_misc_base;
+	u32 mmss_misc_length;
+	u32 index;
+
+	/* features */
+	DECLARE_BITMAP(feature_map, DSI_CTRL_MAX_FEATURES);
+	struct dsi_ctrl_hw_ops ops;
+
+	/* capabilities */
+	u32 supported_interrupts;
+	u64 supported_errors;
+};
+
+#endif /* _DSI_CTRL_HW_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_ctrl_reg_1_4.h linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg_1_4.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_ctrl_reg_1_4.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg_1_4.h	2019-01-22 16:16:23.487246262 +0100
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_CTRL_REG_H_
+#define _DSI_CTRL_REG_H_
+
+#define DSI_HW_VERSION                             (0x0000)
+#define DSI_CTRL                                   (0x0004)
+#define DSI_STATUS                                 (0x0008)
+#define DSI_FIFO_STATUS                            (0x000C)
+#define DSI_VIDEO_MODE_CTRL                        (0x0010)
+#define DSI_VIDEO_MODE_SYNC_DATATYPE               (0x0014)
+#define DSI_VIDEO_MODE_PIXEL_DATATYPE              (0x0018)
+#define DSI_VIDEO_MODE_BLANKING_DATATYPE           (0x001C)
+#define DSI_VIDEO_MODE_DATA_CTRL                   (0x0020)
+#define DSI_VIDEO_MODE_ACTIVE_H                    (0x0024)
+#define DSI_VIDEO_MODE_ACTIVE_V                    (0x0028)
+#define DSI_VIDEO_MODE_TOTAL                       (0x002C)
+#define DSI_VIDEO_MODE_HSYNC                       (0x0030)
+#define DSI_VIDEO_MODE_VSYNC                       (0x0034)
+#define DSI_VIDEO_MODE_VSYNC_VPOS                  (0x0038)
+#define DSI_COMMAND_MODE_DMA_CTRL                  (0x003C)
+#define DSI_COMMAND_MODE_MDP_CTRL                  (0x0040)
+#define DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL          (0x0044)
+#define DSI_DMA_CMD_OFFSET                         (0x0048)
+#define DSI_DMA_CMD_LENGTH                         (0x004C)
+#define DSI_DMA_FIFO_CTRL                          (0x0050)
+#define DSI_DMA_NULL_PACKET_DATA                   (0x0054)
+#define DSI_COMMAND_MODE_MDP_STREAM0_CTRL          (0x0058)
+#define DSI_COMMAND_MODE_MDP_STREAM0_TOTAL         (0x005C)
+#define DSI_COMMAND_MODE_MDP_STREAM1_CTRL          (0x0060)
+#define DSI_COMMAND_MODE_MDP_STREAM1_TOTAL         (0x0064)
+#define DSI_ACK_ERR_STATUS                         (0x0068)
+#define DSI_RDBK_DATA0                             (0x006C)
+#define DSI_RDBK_DATA1                             (0x0070)
+#define DSI_RDBK_DATA2                             (0x0074)
+#define DSI_RDBK_DATA3                             (0x0078)
+#define DSI_RDBK_DATATYPE0                         (0x007C)
+#define DSI_RDBK_DATATYPE1                         (0x0080)
+#define DSI_TRIG_CTRL                              (0x0084)
+#define DSI_EXT_MUX                                (0x0088)
+#define DSI_EXT_MUX_TE_PULSE_DETECT_CTRL           (0x008C)
+#define DSI_CMD_MODE_DMA_SW_TRIGGER                (0x0090)
+#define DSI_CMD_MODE_MDP_SW_TRIGGER                (0x0094)
+#define DSI_CMD_MODE_BTA_SW_TRIGGER                (0x0098)
+#define DSI_RESET_SW_TRIGGER                       (0x009C)
+#define DSI_MISR_CMD_CTRL                          (0x00A0)
+#define DSI_MISR_VIDEO_CTRL                        (0x00A4)
+#define DSI_LANE_STATUS                            (0x00A8)
+#define DSI_LANE_CTRL                              (0x00AC)
+#define DSI_LANE_SWAP_CTRL                         (0x00B0)
+#define DSI_DLN0_PHY_ERR                           (0x00B4)
+#define DSI_LP_TIMER_CTRL                          (0x00B8)
+#define DSI_HS_TIMER_CTRL                          (0x00BC)
+#define DSI_TIMEOUT_STATUS                         (0x00C0)
+#define DSI_CLKOUT_TIMING_CTRL                     (0x00C4)
+#define DSI_EOT_PACKET                             (0x00C8)
+#define DSI_EOT_PACKET_CTRL                        (0x00CC)
+#define DSI_GENERIC_ESC_TX_TRIGGER                 (0x00D0)
+#define DSI_CAM_BIST_CTRL                          (0x00D4)
+#define DSI_CAM_BIST_FRAME_SIZE                    (0x00D8)
+#define DSI_CAM_BIST_BLOCK_SIZE                    (0x00DC)
+#define DSI_CAM_BIST_FRAME_CONFIG                  (0x00E0)
+#define DSI_CAM_BIST_LSFR_CTRL                     (0x00E4)
+#define DSI_CAM_BIST_LSFR_INIT                     (0x00E8)
+#define DSI_CAM_BIST_START                         (0x00EC)
+#define DSI_CAM_BIST_STATUS                        (0x00F0)
+#define DSI_ERR_INT_MASK0                          (0x010C)
+#define DSI_INT_CTRL                               (0x0110)
+#define DSI_IOBIST_CTRL                            (0x0114)
+#define DSI_SOFT_RESET                             (0x0118)
+#define DSI_CLK_CTRL                               (0x011C)
+#define DSI_CLK_STATUS                             (0x0120)
+#define DSI_PHY_SW_RESET                           (0x012C)
+#define DSI_AXI2AHB_CTRL                           (0x0130)
+#define DSI_MISR_CMD_MDP0_32BIT                    (0x0134)
+#define DSI_MISR_CMD_MDP1_32BIT                    (0x0138)
+#define DSI_MISR_CMD_DMA_32BIT                     (0x013C)
+#define DSI_MISR_VIDEO_32BIT                       (0x0140)
+#define DSI_LANE_MISR_CTRL                         (0x0144)
+#define DSI_LANE0_MISR                             (0x0148)
+#define DSI_LANE1_MISR                             (0x014C)
+#define DSI_LANE2_MISR                             (0x0150)
+#define DSI_LANE3_MISR                             (0x0154)
+#define DSI_TEST_PATTERN_GEN_CTRL                  (0x015C)
+#define DSI_TEST_PATTERN_GEN_VIDEO_POLY            (0x0160)
+#define DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL        (0x0164)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM0_POLY  (0x0168)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0     (0x016C)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM1_POLY  (0x0170)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL1     (0x0174)
+#define DSI_TEST_PATTERN_GEN_CMD_DMA_POLY          (0x0178)
+#define DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL      (0x017C)
+#define DSI_TEST_PATTERN_GEN_VIDEO_ENABLE          (0x0180)
+#define DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER   (0x0184)
+#define DSI_TEST_PATTERN_GEN_CMD_STREAM1_TRIGGER   (0x0188)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL2     (0x018C)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM2_POLY  (0x0190)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM2_POLY  (0x0190)
+#define DSI_COMMAND_MODE_MDP_IDLE_CTRL             (0x0194)
+#define DSI_TEST_PATTERN_GEN_CMD_STREAM2_TRIGGER   (0x0198)
+#define DSI_TPG_MAIN_CONTROL                       (0x019C)
+#define DSI_TPG_MAIN_CONTROL2                      (0x01A0)
+#define DSI_TPG_VIDEO_CONFIG                       (0x01A4)
+#define DSI_TPG_COMPONENT_LIMITS                   (0x01A8)
+#define DSI_TPG_RECTANGLE                          (0x01AC)
+#define DSI_TPG_BLACK_WHITE_PATTERN_FRAMES         (0x01B0)
+#define DSI_TPG_RGB_MAPPING                        (0x01B4)
+#define DSI_COMMAND_MODE_MDP_CTRL2                 (0x01B8)
+#define DSI_COMMAND_MODE_MDP_STREAM2_CTRL          (0x01BC)
+#define DSI_COMMAND_MODE_MDP_STREAM2_TOTAL         (0x01C0)
+#define DSI_MISR_CMD_MDP2_8BIT                     (0x01C4)
+#define DSI_MISR_CMD_MDP2_32BIT                    (0x01C8)
+#define DSI_VBIF_CTRL                              (0x01CC)
+#define DSI_AES_CTRL                               (0x01D0)
+#define DSI_RDBK_DATA_CTRL                         (0x01D4)
+#define DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL2     (0x01D8)
+#define DSI_TPG_DMA_FIFO_STATUS                    (0x01DC)
+#define DSI_TPG_DMA_FIFO_WRITE_TRIGGER             (0x01E0)
+#define DSI_DSI_TIMING_FLUSH                       (0x01E4)
+#define DSI_DSI_TIMING_DB_MODE                     (0x01E8)
+#define DSI_TPG_DMA_FIFO_RESET                     (0x01EC)
+#define DSI_SCRATCH_REGISTER_0                     (0x01F0)
+#define DSI_VERSION                                (0x01F4)
+#define DSI_SCRATCH_REGISTER_1                     (0x01F8)
+#define DSI_SCRATCH_REGISTER_2                     (0x01FC)
+#define DSI_DYNAMIC_REFRESH_CTRL                   (0x0200)
+#define DSI_DYNAMIC_REFRESH_PIPE_DELAY             (0x0204)
+#define DSI_DYNAMIC_REFRESH_PIPE_DELAY2            (0x0208)
+#define DSI_DYNAMIC_REFRESH_PLL_DELAY              (0x020C)
+#define DSI_DYNAMIC_REFRESH_STATUS                 (0x0210)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL0              (0x0214)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL1              (0x0218)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL2              (0x021C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL3              (0x0220)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL4              (0x0224)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL5              (0x0228)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL6              (0x022C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL7              (0x0230)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL8              (0x0234)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL9              (0x0238)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL10             (0x023C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL11             (0x0240)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL12             (0x0244)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL13             (0x0248)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL14             (0x024C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL15             (0x0250)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL16             (0x0254)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL17             (0x0258)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL18             (0x025C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL19             (0x0260)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL20             (0x0264)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL21             (0x0268)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL22             (0x026C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL23             (0x0270)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL24             (0x0274)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL25             (0x0278)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL26             (0x027C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL27             (0x0280)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL28             (0x0284)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL29             (0x0288)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL30             (0x028C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL31             (0x0290)
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR         (0x0294)
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2        (0x0298)
+#define DSI_VIDEO_COMPRESSION_MODE_CTRL            (0x02A0)
+#define DSI_VIDEO_COMPRESSION_MODE_CTRL2           (0x02A4)
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL          (0x02A8)
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL2         (0x02AC)
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL3         (0x02B0)
+#define DSI_COMMAND_MODE_NULL_INSERTION_CTRL       (0x02B4)
+#define DSI_READ_BACK_DISABLE_STATUS               (0x02B8)
+#define DSI_DESKEW_CTRL                            (0x02BC)
+#define DSI_DESKEW_DELAY_CTRL                      (0x02C0)
+#define DSI_DESKEW_SW_TRIGGER                      (0x02C4)
+#define DSI_SECURE_DISPLAY_STATUS                  (0x02CC)
+#define DSI_SECURE_DISPLAY_BLOCK_COMMAND_COLOR     (0x02D0)
+#define DSI_SECURE_DISPLAY_BLOCK_VIDEO_COLOR       (0x02D4)
+
+
+#endif /* _DSI_CTRL_REG_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_defs.h linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_defs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h	2019-01-22 16:16:23.487246262 +0100
@@ -0,0 +1,374 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_DEFS_H_
+#define _DSI_DEFS_H_
+
+#include <linux/types.h>
+
+#define DSI_H_TOTAL(t) (((t)->h_active) + ((t)->h_back_porch) + \
+			((t)->h_sync_width) + ((t)->h_front_porch))
+
+#define DSI_V_TOTAL(t) (((t)->v_active) + ((t)->v_back_porch) + \
+			((t)->v_sync_width) + ((t)->v_front_porch))
+
+/**
+ * enum dsi_pixel_format - DSI pixel formats
+ * @DSI_PIXEL_FORMAT_RGB565:
+ * @DSI_PIXEL_FORMAT_RGB666:
+ * @DSI_PIXEL_FORMAT_RGB666_LOOSE:
+ * @DSI_PIXEL_FORMAT_RGB888:
+ * @DSI_PIXEL_FORMAT_RGB111:
+ * @DSI_PIXEL_FORMAT_RGB332:
+ * @DSI_PIXEL_FORMAT_RGB444:
+ * @DSI_PIXEL_FORMAT_MAX:
+ */
+enum dsi_pixel_format {
+	DSI_PIXEL_FORMAT_RGB565 = 0,
+	DSI_PIXEL_FORMAT_RGB666,
+	DSI_PIXEL_FORMAT_RGB666_LOOSE,
+	DSI_PIXEL_FORMAT_RGB888,
+	DSI_PIXEL_FORMAT_RGB111,
+	DSI_PIXEL_FORMAT_RGB332,
+	DSI_PIXEL_FORMAT_RGB444,
+	DSI_PIXEL_FORMAT_MAX
+};
+
+/**
+ * enum dsi_op_mode - dsi operation mode
+ * @DSI_OP_VIDEO_MODE: DSI video mode operation
+ * @DSI_OP_CMD_MODE:   DSI Command mode operation
+ * @DSI_OP_MODE_MAX:
+ */
+enum dsi_op_mode {
+	DSI_OP_VIDEO_MODE = 0,
+	DSI_OP_CMD_MODE,
+	DSI_OP_MODE_MAX
+};
+
+/**
+ * enum dsi_mode_flags - flags to signal other drm components via private flags
+ * @DSI_MODE_FLAG_SEAMLESS:	Seamless transition requested by user
+ * @DSI_MODE_FLAG_DFPS:		Seamless transition is DynamicFPS
+ * @DSI_MODE_FLAG_VBLANK_PRE_MODESET:	Transition needs VBLANK before Modeset
+ */
+enum dsi_mode_flags {
+	DSI_MODE_FLAG_SEAMLESS			= BIT(0),
+	DSI_MODE_FLAG_DFPS			= BIT(1),
+	DSI_MODE_FLAG_VBLANK_PRE_MODESET	= BIT(2)
+};
+
+/**
+ * enum dsi_data_lanes - dsi physical lanes
+ * @DSI_DATA_LANE_0: Physical lane 0
+ * @DSI_DATA_LANE_1: Physical lane 1
+ * @DSI_DATA_LANE_2: Physical lane 2
+ * @DSI_DATA_LANE_3: Physical lane 3
+ * @DSI_CLOCK_LANE:  Physical clock lane
+ */
+enum dsi_data_lanes {
+	DSI_DATA_LANE_0 = BIT(0),
+	DSI_DATA_LANE_1 = BIT(1),
+	DSI_DATA_LANE_2 = BIT(2),
+	DSI_DATA_LANE_3 = BIT(3),
+	DSI_CLOCK_LANE  = BIT(4)
+};
+
+/**
+ * enum dsi_logical_lane - dsi logical lanes
+ * @DSI_LOGICAL_LANE_0:     Logical lane 0
+ * @DSI_LOGICAL_LANE_1:     Logical lane 1
+ * @DSI_LOGICAL_LANE_2:     Logical lane 2
+ * @DSI_LOGICAL_LANE_3:     Logical lane 3
+ * @DSI_LOGICAL_CLOCK_LANE: Clock lane
+ * @DSI_LANE_MAX:           Maximum lanes supported
+ */
+enum dsi_logical_lane {
+	DSI_LOGICAL_LANE_0 = 0,
+	DSI_LOGICAL_LANE_1,
+	DSI_LOGICAL_LANE_2,
+	DSI_LOGICAL_LANE_3,
+	DSI_LOGICAL_CLOCK_LANE,
+	DSI_LANE_MAX
+};
+
+/**
+ * enum dsi_trigger_type - dsi trigger type
+ * @DSI_TRIGGER_NONE:     No trigger.
+ * @DSI_TRIGGER_TE:       TE trigger.
+ * @DSI_TRIGGER_SEOF:     Start or End of frame.
+ * @DSI_TRIGGER_SW:       Software trigger.
+ * @DSI_TRIGGER_SW_SEOF:  Software trigger and start/end of frame.
+ * @DSI_TRIGGER_SW_TE:    Software and TE triggers.
+ * @DSI_TRIGGER_MAX:      Max trigger values.
+ */
+enum dsi_trigger_type {
+	DSI_TRIGGER_NONE = 0,
+	DSI_TRIGGER_TE,
+	DSI_TRIGGER_SEOF,
+	DSI_TRIGGER_SW,
+	DSI_TRIGGER_SW_SEOF,
+	DSI_TRIGGER_SW_TE,
+	DSI_TRIGGER_MAX
+};
+
+/**
+ * enum dsi_color_swap_mode - color swap mode
+ * @DSI_COLOR_SWAP_RGB:
+ * @DSI_COLOR_SWAP_RBG:
+ * @DSI_COLOR_SWAP_BGR:
+ * @DSI_COLOR_SWAP_BRG:
+ * @DSI_COLOR_SWAP_GRB:
+ * @DSI_COLOR_SWAP_GBR:
+ */
+enum dsi_color_swap_mode {
+	DSI_COLOR_SWAP_RGB = 0,
+	DSI_COLOR_SWAP_RBG,
+	DSI_COLOR_SWAP_BGR,
+	DSI_COLOR_SWAP_BRG,
+	DSI_COLOR_SWAP_GRB,
+	DSI_COLOR_SWAP_GBR
+};
+
+/**
+ * enum dsi_dfps_type - Dynamic FPS support type
+ * @DSI_DFPS_NONE:           Dynamic FPS is not supported.
+ * @DSI_DFPS_SUSPEND_RESUME:
+ * @DSI_DFPS_IMMEDIATE_CLK:
+ * @DSI_DFPS_IMMEDIATE_HFP:
+ * @DSI_DFPS_IMMEDIATE_VFP:
+ * @DSI_DPFS_MAX:
+ */
+enum dsi_dfps_type {
+	DSI_DFPS_NONE = 0,
+	DSI_DFPS_SUSPEND_RESUME,
+	DSI_DFPS_IMMEDIATE_CLK,
+	DSI_DFPS_IMMEDIATE_HFP,
+	DSI_DFPS_IMMEDIATE_VFP,
+	DSI_DFPS_MAX
+};
+
+/**
+ * enum dsi_phy_type - DSI phy types
+ * @DSI_PHY_TYPE_DPHY:
+ * @DSI_PHY_TYPE_CPHY:
+ */
+enum dsi_phy_type {
+	DSI_PHY_TYPE_DPHY,
+	DSI_PHY_TYPE_CPHY
+};
+
+/**
+ * enum dsi_te_mode - dsi te source
+ * @DSI_TE_ON_DATA_LINK:    TE read from DSI link
+ * @DSI_TE_ON_EXT_PIN:      TE signal on an external GPIO
+ */
+enum dsi_te_mode {
+	DSI_TE_ON_DATA_LINK = 0,
+	DSI_TE_ON_EXT_PIN,
+};
+
+/**
+ * enum dsi_video_traffic_mode - video mode pixel transmission type
+ * @DSI_VIDEO_TRAFFIC_SYNC_PULSES:       Non-burst mode with sync pulses.
+ * @DSI_VIDEO_TRAFFIC_SYNC_START_EVENTS: Non-burst mode with sync start events.
+ * @DSI_VIDEO_TRAFFIC_BURST_MODE:        Burst mode using sync start events.
+ */
+enum dsi_video_traffic_mode {
+	DSI_VIDEO_TRAFFIC_SYNC_PULSES = 0,
+	DSI_VIDEO_TRAFFIC_SYNC_START_EVENTS,
+	DSI_VIDEO_TRAFFIC_BURST_MODE,
+};
+
+/**
+ * struct dsi_mode_info - video mode information dsi frame
+ * @h_active:         Active width of one frame in pixels.
+ * @h_back_porch:     Horizontal back porch in pixels.
+ * @h_sync_width:     HSYNC width in pixels.
+ * @h_front_porch:    Horizontal fron porch in pixels.
+ * @h_skew:
+ * @h_sync_polarity:  Polarity of HSYNC (false is active high).
+ * @v_active:         Active height of one frame in lines.
+ * @v_back_porch:     Vertical back porch in lines.
+ * @v_sync_width:     VSYNC width in lines.
+ * @v_front_porch:    Vertical front porch in lines.
+ * @v_sync_polarity:  Polarity of VSYNC (false is active high).
+ * @refresh_rate:     Refresh rate in Hz.
+ */
+struct dsi_mode_info {
+	u32 h_active;
+	u32 h_back_porch;
+	u32 h_sync_width;
+	u32 h_front_porch;
+	u32 h_skew;
+	bool h_sync_polarity;
+
+	u32 v_active;
+	u32 v_back_porch;
+	u32 v_sync_width;
+	u32 v_front_porch;
+	bool v_sync_polarity;
+
+	u32 refresh_rate;
+};
+
+/**
+ * struct dsi_lane_mapping - Mapping between DSI logical and physical lanes
+ * @physical_lane0:   Logical lane to which physical lane 0 is mapped.
+ * @physical_lane1:   Logical lane to which physical lane 1 is mapped.
+ * @physical_lane2:   Logical lane to which physical lane 2 is mapped.
+ * @physical_lane3:   Logical lane to which physical lane 3 is mapped.
+ */
+struct dsi_lane_mapping {
+	enum dsi_logical_lane physical_lane0;
+	enum dsi_logical_lane physical_lane1;
+	enum dsi_logical_lane physical_lane2;
+	enum dsi_logical_lane physical_lane3;
+};
+
+/**
+ * struct dsi_host_common_cfg - Host configuration common to video and cmd mode
+ * @dst_format:          Destination pixel format.
+ * @data_lanes:          Physical data lanes to be enabled.
+ * @en_crc_check:        Enable CRC checks.
+ * @en_ecc_check:        Enable ECC checks.
+ * @te_mode:             Source for TE signalling.
+ * @mdp_cmd_trigger:     MDP frame update trigger for command mode.
+ * @dma_cmd_trigger:     Command DMA trigger.
+ * @cmd_trigger_stream:  Command mode stream to trigger.
+ * @bit_swap_read:       Is red color bit swapped.
+ * @bit_swap_green:      Is green color bit swapped.
+ * @bit_swap_blue:       Is blue color bit swapped.
+ * @t_clk_post:          Number of byte clock cycles that the transmitter shall
+ *                       continue sending after last data lane has transitioned
+ *                       to LP mode.
+ * @t_clk_pre:           Number of byte clock cycles that the high spped clock
+ *                       shall be driven prior to data lane transitions from LP
+ *                       to HS mode.
+ * @ignore_rx_eot:       Ignore Rx EOT packets if set to true.
+ * @append_tx_eot:       Append EOT packets for forward transmissions if set to
+ *                       true.
+ * @force_clk_lane_hs:   Force clock lane in high speed mode.
+ */
+struct dsi_host_common_cfg {
+	enum dsi_pixel_format dst_format;
+	enum dsi_data_lanes data_lanes;
+	bool en_crc_check;
+	bool en_ecc_check;
+	enum dsi_te_mode te_mode;
+	enum dsi_trigger_type mdp_cmd_trigger;
+	enum dsi_trigger_type dma_cmd_trigger;
+	u32 cmd_trigger_stream;
+	enum dsi_color_swap_mode swap_mode;
+	bool bit_swap_red;
+	bool bit_swap_green;
+	bool bit_swap_blue;
+	u32 t_clk_post;
+	u32 t_clk_pre;
+	bool ignore_rx_eot;
+	bool append_tx_eot;
+	bool force_clk_lane_hs;
+};
+
+/**
+ * struct dsi_video_engine_cfg - DSI video engine configuration
+ * @host_cfg:                  Pointer to host common configuration.
+ * @last_line_interleave_en:   Allow command mode op interleaved on last line of
+ *                             video stream.
+ * @pulse_mode_hsa_he:         Send HSA and HE following VS/VE packet if set to
+ *                             true.
+ * @hfp_lp11_en:               Enter low power stop mode (LP-11) during HFP.
+ * @hbp_lp11_en:               Enter low power stop mode (LP-11) during HBP.
+ * @hsa_lp11_en:               Enter low power stop mode (LP-11) during HSA.
+ * @eof_bllp_lp11_en:          Enter low power stop mode (LP-11) during BLLP of
+ *                             last line of a frame.
+ * @bllp_lp11_en:              Enter low power stop mode (LP-11) during BLLP.
+ * @traffic_mode:              Traffic mode for video stream.
+ * @vc_id:                     Virtual channel identifier.
+ */
+struct dsi_video_engine_cfg {
+	bool last_line_interleave_en;
+	bool pulse_mode_hsa_he;
+	bool hfp_lp11_en;
+	bool hbp_lp11_en;
+	bool hsa_lp11_en;
+	bool eof_bllp_lp11_en;
+	bool bllp_lp11_en;
+	enum dsi_video_traffic_mode traffic_mode;
+	u32 vc_id;
+};
+
+/**
+ * struct dsi_cmd_engine_cfg - DSI command engine configuration
+ * @host_cfg:                  Pointer to host common configuration.
+ * @host_cfg:                      Common host configuration
+ * @max_cmd_packets_interleave     Maximum number of command mode RGB packets to
+ *                                 send with in one horizontal blanking period
+ *                                 of the video mode frame.
+ * @wr_mem_start:                  DCS command for write_memory_start.
+ * @wr_mem_continue:               DCS command for write_memory_continue.
+ * @insert_dcs_command:            Insert DCS command as first byte of payload
+ *                                 of the pixel data.
+ * @mdp_transfer_time_us   Specifies the mdp transfer time for command mode
+ *                         panels in microseconds
+ */
+struct dsi_cmd_engine_cfg {
+	u32 max_cmd_packets_interleave;
+	u32 wr_mem_start;
+	u32 wr_mem_continue;
+	bool insert_dcs_command;
+	u32 mdp_transfer_time_us;
+};
+
+/**
+ * struct dsi_host_config - DSI host configuration parameters.
+ * @panel_mode:            Operation mode for panel (video or cmd mode).
+ * @common_config:         Host configuration common to both Video and Cmd mode.
+ * @video_engine:          Video engine configuration if panel is in video mode.
+ * @cmd_engine:            Cmd engine configuration if panel is in cmd mode.
+ * @esc_clk_rate_khz:      Esc clock frequency in Hz.
+ * @bit_clk_rate_hz:       Bit clock frequency in Hz.
+ * @video_timing:          Video timing information of a frame.
+ * @lane_map:              Mapping between logical and physical lanes.
+ * @phy_type:              PHY type to be used.
+ */
+struct dsi_host_config {
+	enum dsi_op_mode panel_mode;
+	struct dsi_host_common_cfg common_config;
+	union {
+		struct dsi_video_engine_cfg video_engine;
+		struct dsi_cmd_engine_cfg cmd_engine;
+	} u;
+	u64 esc_clk_rate_hz;
+	u64 bit_clk_rate_hz;
+	struct dsi_mode_info video_timing;
+	struct dsi_lane_mapping lane_map;
+};
+
+/**
+ * struct dsi_display_mode - specifies mode for dsi display
+ * @timing:         Timing parameters for the panel.
+ * @pixel_clk_khz:  Pixel clock in Khz.
+ * @panel_mode:     Panel operation mode.
+ * @flags:          Additional flags.
+ */
+struct dsi_display_mode {
+	struct dsi_mode_info timing;
+	u32 pixel_clk_khz;
+	enum dsi_op_mode panel_mode;
+
+	u32 flags;
+};
+
+#endif /* _DSI_DEFS_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_display.c linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_display.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_display.c	2019-10-29 09:26:23.625203002 +0100
@@ -0,0 +1,2773 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"msm-dsi-display:[%s] " fmt, __func__
+
+#include <linux/list.h>
+#include <linux/of.h>
+
+#include "msm_drv.h"
+#include "sde_kms.h"
+#include "dsi_display.h"
+#include "dsi_panel.h"
+#include "dsi_ctrl.h"
+#include "dsi_ctrl_hw.h"
+#include "dsi_drm.h"
+#include "dba_bridge.h"
+
+#define to_dsi_display(x) container_of(x, struct dsi_display, host)
+#define DSI_DBA_CLIENT_NAME "dsi"
+
+static DEFINE_MUTEX(dsi_display_list_lock);
+static LIST_HEAD(dsi_display_list);
+
+static const struct of_device_id dsi_display_dt_match[] = {
+	{.compatible = "qcom,dsi-display"},
+	{}
+};
+
+static struct dsi_display *main_display;
+
+int dsi_display_set_backlight(void *display, u32 bl_lvl)
+{
+	struct dsi_display *dsi_display = display;
+	struct dsi_panel *panel;
+	int rc = 0;
+
+	if (dsi_display == NULL)
+		return -EINVAL;
+
+	panel = dsi_display->panel[0];
+
+	rc = dsi_panel_set_backlight(panel, bl_lvl);
+	if (rc)
+		pr_err("unable to set backlight\n");
+
+	return rc;
+}
+
+static ssize_t debugfs_dump_info_read(struct file *file,
+				      char __user *buff,
+				      size_t count,
+				      loff_t *ppos)
+{
+	struct dsi_display *display = file->private_data;
+	char *buf;
+	u32 len = 0;
+	int i;
+
+	if (!display)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0;
+
+	buf = kzalloc(SZ_4K, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	len += snprintf(buf + len, (SZ_4K - len), "name = %s\n", display->name);
+	len += snprintf(buf + len, (SZ_4K - len),
+			"\tResolution = %dx%d\n",
+			display->config.video_timing.h_active,
+			display->config.video_timing.v_active);
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		len += snprintf(buf + len, (SZ_4K - len),
+				"\tCTRL_%d:\n\t\tctrl = %s\n\t\tphy = %s\n",
+				i, display->ctrl[i].ctrl->name,
+				display->ctrl[i].phy->name);
+	}
+
+	for (i = 0; i < display->panel_count; i++)
+		len += snprintf(buf + len, (SZ_4K - len),
+			"\tPanel_%d = %s\n", i, display->panel[i]->name);
+
+	len += snprintf(buf + len, (SZ_4K - len),
+			"\tClock master = %s\n",
+			display->ctrl[display->clk_master_idx].ctrl->name);
+
+	if (copy_to_user(buff, buf, len)) {
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	*ppos += len;
+
+	kfree(buf);
+	return len;
+}
+
+
+static const struct file_operations dump_info_fops = {
+	.open = simple_open,
+	.read = debugfs_dump_info_read,
+};
+
+static int dsi_display_debugfs_init(struct dsi_display *display)
+{
+	int rc = 0;
+	struct dentry *dir, *dump_file;
+
+	dir = debugfs_create_dir(display->name, NULL);
+	if (IS_ERR_OR_NULL(dir)) {
+		rc = PTR_ERR(dir);
+		pr_err("[%s] debugfs create dir failed, rc = %d\n",
+		       display->name, rc);
+		goto error;
+	}
+
+	dump_file = debugfs_create_file("dump_info",
+					0444,
+					dir,
+					display,
+					&dump_info_fops);
+	if (IS_ERR_OR_NULL(dump_file)) {
+		rc = PTR_ERR(dump_file);
+		pr_err("[%s] debugfs create file failed, rc=%d\n",
+		       display->name, rc);
+		goto error_remove_dir;
+	}
+
+	display->root = dir;
+	return rc;
+error_remove_dir:
+	debugfs_remove(dir);
+error:
+	return rc;
+}
+
+static int dsi_display_debugfs_deinit(struct dsi_display *display)
+{
+	debugfs_remove_recursive(display->root);
+
+	return 0;
+}
+
+static void adjust_timing_by_ctrl_count(const struct dsi_display *display,
+					struct dsi_display_mode *mode)
+{
+	if (display->ctrl_count > 1) {
+		mode->timing.h_active /= display->ctrl_count;
+		mode->timing.h_front_porch /= display->ctrl_count;
+		mode->timing.h_sync_width /= display->ctrl_count;
+		mode->timing.h_back_porch /= display->ctrl_count;
+		mode->timing.h_skew /= display->ctrl_count;
+		mode->pixel_clk_khz /= display->ctrl_count;
+	}
+}
+
+static int dsi_display_ctrl_power_on(struct dsi_display *display)
+{
+	int rc = 0;
+	int i;
+	struct dsi_display_ctrl *ctrl;
+
+	/* Sequence does not matter for split dsi usecases */
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl)
+			continue;
+
+		rc = dsi_ctrl_set_power_state(ctrl->ctrl,
+					      DSI_CTRL_POWER_VREG_ON);
+		if (rc) {
+			pr_err("[%s] Failed to set power state, rc=%d\n",
+			       ctrl->ctrl->name, rc);
+			goto error;
+		}
+	}
+
+	return rc;
+error:
+	for (i = i - 1; i >= 0; i--) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl)
+			continue;
+		(void)dsi_ctrl_set_power_state(ctrl->ctrl, DSI_CTRL_POWER_OFF);
+	}
+	return rc;
+}
+
+static int dsi_display_ctrl_power_off(struct dsi_display *display)
+{
+	int rc = 0;
+	int i;
+	struct dsi_display_ctrl *ctrl;
+
+	/* Sequence does not matter for split dsi usecases */
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl)
+			continue;
+
+		rc = dsi_ctrl_set_power_state(ctrl->ctrl, DSI_CTRL_POWER_OFF);
+		if (rc) {
+			pr_err("[%s] Failed to power off, rc=%d\n",
+			       ctrl->ctrl->name, rc);
+			goto error;
+		}
+	}
+error:
+	return rc;
+}
+
+static int dsi_display_phy_power_on(struct dsi_display *display)
+{
+	int rc = 0;
+	int i;
+	struct dsi_display_ctrl *ctrl;
+
+	/* Sequence does not matter for split dsi usecases */
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl)
+			continue;
+
+		rc = dsi_phy_set_power_state(ctrl->phy, true);
+		if (rc) {
+			pr_err("[%s] Failed to set power state, rc=%d\n",
+			       ctrl->phy->name, rc);
+			goto error;
+		}
+	}
+
+	return rc;
+error:
+	for (i = i - 1; i >= 0; i--) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->phy)
+			continue;
+		(void)dsi_phy_set_power_state(ctrl->phy, false);
+	}
+	return rc;
+}
+
+static int dsi_display_phy_power_off(struct dsi_display *display)
+{
+	int rc = 0;
+	int i;
+	struct dsi_display_ctrl *ctrl;
+
+	/* Sequence does not matter for split dsi usecases */
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->phy)
+			continue;
+
+		rc = dsi_phy_set_power_state(ctrl->phy, false);
+		if (rc) {
+			pr_err("[%s] Failed to power off, rc=%d\n",
+			       ctrl->ctrl->name, rc);
+			goto error;
+		}
+	}
+error:
+	return rc;
+}
+
+static int dsi_display_ctrl_core_clk_on(struct dsi_display *display)
+{
+	int rc = 0;
+	int i;
+	struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+	/*
+	 * In case of split DSI usecases, the clock for master controller should
+	 * be enabled before the other controller. Master controller in the
+	 * clock context refers to the controller that sources the clock.
+	 */
+
+	m_ctrl = &display->ctrl[display->clk_master_idx];
+
+	rc = dsi_ctrl_set_power_state(m_ctrl->ctrl, DSI_CTRL_POWER_CORE_CLK_ON);
+	if (rc) {
+		pr_err("[%s] failed to turn on clocks, rc=%d\n",
+		       display->name, rc);
+		goto error;
+	}
+
+	/* Turn on rest of the controllers */
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl || (ctrl == m_ctrl))
+			continue;
+
+		rc = dsi_ctrl_set_power_state(ctrl->ctrl,
+					      DSI_CTRL_POWER_CORE_CLK_ON);
+		if (rc) {
+			pr_err("[%s] failed to turn on clock, rc=%d\n",
+			       display->name, rc);
+			goto error_disable_master;
+		}
+	}
+	return rc;
+error_disable_master:
+	(void)dsi_ctrl_set_power_state(m_ctrl->ctrl, DSI_CTRL_POWER_VREG_ON);
+error:
+	return rc;
+}
+
+static int dsi_display_ctrl_link_clk_on(struct dsi_display *display)
+{
+	int rc = 0;
+	int i;
+	struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+	/*
+	 * In case of split DSI usecases, the clock for master controller should
+	 * be enabled before the other controller. Master controller in the
+	 * clock context refers to the controller that sources the clock.
+	 */
+
+	m_ctrl = &display->ctrl[display->clk_master_idx];
+
+	rc = dsi_ctrl_set_clock_source(m_ctrl->ctrl,
+				       &display->clock_info.src_clks);
+	if (rc) {
+		pr_err("[%s] failed to set source clocks for master, rc=%d\n",
+		       display->name, rc);
+		goto error;
+	}
+
+	rc = dsi_ctrl_set_power_state(m_ctrl->ctrl, DSI_CTRL_POWER_LINK_CLK_ON);
+	if (rc) {
+		pr_err("[%s] failed to turn on clocks, rc=%d\n",
+		       display->name, rc);
+		goto error;
+	}
+
+	/* Turn on rest of the controllers */
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl || (ctrl == m_ctrl))
+			continue;
+
+		rc = dsi_ctrl_set_clock_source(ctrl->ctrl,
+					       &display->clock_info.src_clks);
+		if (rc) {
+			pr_err("[%s] failed to set source clocks, rc=%d\n",
+			       display->name, rc);
+			goto error_disable_master;
+		}
+
+		rc = dsi_ctrl_set_power_state(ctrl->ctrl,
+					      DSI_CTRL_POWER_LINK_CLK_ON);
+		if (rc) {
+			pr_err("[%s] failed to turn on clock, rc=%d\n",
+			       display->name, rc);
+			goto error_disable_master;
+		}
+	}
+	return rc;
+error_disable_master:
+	(void)dsi_ctrl_set_power_state(m_ctrl->ctrl,
+				       DSI_CTRL_POWER_CORE_CLK_ON);
+error:
+	return rc;
+}
+
+static int dsi_display_ctrl_core_clk_off(struct dsi_display *display)
+{
+	int rc = 0;
+	int i;
+	struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+	/*
+	 * In case of split DSI usecases, clock for slave DSI controllers should
+	 * be disabled first before disabling clock for master controller. Slave
+	 * controllers in the clock context refer to controller which source
+	 * clock from another controller.
+	 */
+
+	m_ctrl = &display->ctrl[display->clk_master_idx];
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl || (ctrl == m_ctrl))
+			continue;
+
+		rc = dsi_ctrl_set_power_state(ctrl->ctrl,
+					      DSI_CTRL_POWER_VREG_ON);
+		if (rc) {
+			pr_err("[%s] failed to turn off clock, rc=%d\n",
+			       display->name, rc);
+		}
+	}
+
+	rc = dsi_ctrl_set_power_state(m_ctrl->ctrl, DSI_CTRL_POWER_VREG_ON);
+	if (rc)
+		pr_err("[%s] failed to turn off clocks, rc=%d\n",
+		       display->name, rc);
+
+	return rc;
+}
+
+static int dsi_display_ctrl_link_clk_off(struct dsi_display *display)
+{
+	int rc = 0;
+	int i;
+	struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+	/*
+	 * In case of split DSI usecases, clock for slave DSI controllers should
+	 * be disabled first before disabling clock for master controller. Slave
+	 * controllers in the clock context refer to controller which source
+	 * clock from another controller.
+	 */
+
+	m_ctrl = &display->ctrl[display->clk_master_idx];
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl || (ctrl == m_ctrl))
+			continue;
+
+		rc = dsi_ctrl_set_power_state(ctrl->ctrl,
+					      DSI_CTRL_POWER_CORE_CLK_ON);
+		if (rc) {
+			pr_err("[%s] failed to turn off clock, rc=%d\n",
+			       display->name, rc);
+		}
+	}
+	rc = dsi_ctrl_set_power_state(m_ctrl->ctrl, DSI_CTRL_POWER_CORE_CLK_ON);
+	if (rc)
+		pr_err("[%s] failed to turn off clocks, rc=%d\n",
+		       display->name, rc);
+	return rc;
+}
+
+static int dsi_display_ctrl_init(struct dsi_display *display)
+{
+	int rc = 0;
+	int i;
+	struct dsi_display_ctrl *ctrl;
+
+	for (i = 0 ; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		rc = dsi_ctrl_host_init(ctrl->ctrl);
+		if (rc) {
+			pr_err("[%s] failed to init host_%d, rc=%d\n",
+			       display->name, i, rc);
+			goto error_host_deinit;
+		}
+	}
+
+	return 0;
+error_host_deinit:
+	for (i = i - 1; i >= 0; i--) {
+		ctrl = &display->ctrl[i];
+		(void)dsi_ctrl_host_deinit(ctrl->ctrl);
+	}
+	return rc;
+}
+
+static int dsi_display_ctrl_deinit(struct dsi_display *display)
+{
+	int rc = 0;
+	int i;
+	struct dsi_display_ctrl *ctrl;
+
+	for (i = 0 ; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		rc = dsi_ctrl_host_deinit(ctrl->ctrl);
+		if (rc) {
+			pr_err("[%s] failed to deinit host_%d, rc=%d\n",
+			       display->name, i, rc);
+		}
+	}
+
+	return rc;
+}
+
+static int dsi_display_cmd_engine_enable(struct dsi_display *display)
+{
+	int rc = 0;
+	int i;
+	struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+	if (display->cmd_engine_refcount > 0) {
+		display->cmd_engine_refcount++;
+		return 0;
+	}
+
+	m_ctrl = &display->ctrl[display->cmd_master_idx];
+
+	rc = dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_ON);
+	if (rc) {
+		pr_err("[%s] failed to enable cmd engine, rc=%d\n",
+		       display->name, rc);
+		goto error;
+	}
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl || (ctrl == m_ctrl))
+			continue;
+
+		rc = dsi_ctrl_set_cmd_engine_state(ctrl->ctrl,
+						   DSI_CTRL_ENGINE_ON);
+		if (rc) {
+			pr_err("[%s] failed to enable cmd engine, rc=%d\n",
+			       display->name, rc);
+			goto error_disable_master;
+		}
+	}
+
+	display->cmd_engine_refcount++;
+	return rc;
+error_disable_master:
+	(void)dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
+error:
+	return rc;
+}
+
+static int dsi_display_cmd_engine_disable(struct dsi_display *display)
+{
+	int rc = 0;
+	int i;
+	struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+	if (display->cmd_engine_refcount == 0) {
+		pr_err("[%s] Invalid refcount\n", display->name);
+		return 0;
+	} else if (display->cmd_engine_refcount > 1) {
+		display->cmd_engine_refcount--;
+		return 0;
+	}
+
+	m_ctrl = &display->ctrl[display->cmd_master_idx];
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl || (ctrl == m_ctrl))
+			continue;
+
+		rc = dsi_ctrl_set_cmd_engine_state(ctrl->ctrl,
+						   DSI_CTRL_ENGINE_OFF);
+		if (rc)
+			pr_err("[%s] failed to enable cmd engine, rc=%d\n",
+			       display->name, rc);
+	}
+
+	rc = dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
+	if (rc) {
+		pr_err("[%s] failed to enable cmd engine, rc=%d\n",
+		       display->name, rc);
+		goto error;
+	}
+
+error:
+	display->cmd_engine_refcount = 0;
+	return rc;
+}
+
+static int dsi_display_ctrl_host_enable(struct dsi_display *display)
+{
+	int rc = 0;
+	int i;
+	struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+	m_ctrl = &display->ctrl[display->cmd_master_idx];
+
+	rc = dsi_ctrl_set_host_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_ON);
+	if (rc) {
+		pr_err("[%s] failed to enable host engine, rc=%d\n",
+		       display->name, rc);
+		goto error;
+	}
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl || (ctrl == m_ctrl))
+			continue;
+
+		rc = dsi_ctrl_set_host_engine_state(ctrl->ctrl,
+						    DSI_CTRL_ENGINE_ON);
+		if (rc) {
+			pr_err("[%s] failed to enable sl host engine, rc=%d\n",
+			       display->name, rc);
+			goto error_disable_master;
+		}
+	}
+
+	return rc;
+error_disable_master:
+	(void)dsi_ctrl_set_host_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
+error:
+	return rc;
+}
+
+static int dsi_display_ctrl_host_disable(struct dsi_display *display)
+{
+	int rc = 0;
+	int i;
+	struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+	m_ctrl = &display->ctrl[display->cmd_master_idx];
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl || (ctrl == m_ctrl))
+			continue;
+
+		rc = dsi_ctrl_set_host_engine_state(ctrl->ctrl,
+						    DSI_CTRL_ENGINE_OFF);
+		if (rc)
+			pr_err("[%s] failed to disable host engine, rc=%d\n",
+			       display->name, rc);
+	}
+
+	rc = dsi_ctrl_set_host_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
+	if (rc) {
+		pr_err("[%s] failed to disable host engine, rc=%d\n",
+		       display->name, rc);
+		goto error;
+	}
+
+error:
+	return rc;
+}
+
+static int dsi_display_vid_engine_enable(struct dsi_display *display)
+{
+	int rc = 0;
+	int i;
+	struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+	m_ctrl = &display->ctrl[display->video_master_idx];
+
+	rc = dsi_ctrl_set_vid_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_ON);
+	if (rc) {
+		pr_err("[%s] failed to enable vid engine, rc=%d\n",
+		       display->name, rc);
+		goto error;
+	}
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl || (ctrl == m_ctrl))
+			continue;
+
+		rc = dsi_ctrl_set_vid_engine_state(ctrl->ctrl,
+						   DSI_CTRL_ENGINE_ON);
+		if (rc) {
+			pr_err("[%s] failed to enable vid engine, rc=%d\n",
+			       display->name, rc);
+			goto error_disable_master;
+		}
+	}
+
+	return rc;
+error_disable_master:
+	(void)dsi_ctrl_set_vid_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
+error:
+	return rc;
+}
+
+static int dsi_display_vid_engine_disable(struct dsi_display *display)
+{
+	int rc = 0;
+	int i;
+	struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+	m_ctrl = &display->ctrl[display->video_master_idx];
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl || (ctrl == m_ctrl))
+			continue;
+
+		rc = dsi_ctrl_set_vid_engine_state(ctrl->ctrl,
+						   DSI_CTRL_ENGINE_OFF);
+		if (rc)
+			pr_err("[%s] failed to disable vid engine, rc=%d\n",
+			       display->name, rc);
+	}
+
+	rc = dsi_ctrl_set_vid_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
+	if (rc)
+		pr_err("[%s] failed to disable mvid engine, rc=%d\n",
+		       display->name, rc);
+
+	return rc;
+}
+
+static int dsi_display_phy_enable(struct dsi_display *display)
+{
+	int rc = 0;
+	int i;
+	struct dsi_display_ctrl *m_ctrl, *ctrl;
+	enum dsi_phy_pll_source m_src = DSI_PLL_SOURCE_STANDALONE;
+
+	m_ctrl = &display->ctrl[display->clk_master_idx];
+	if (display->ctrl_count > 1)
+		m_src = DSI_PLL_SOURCE_NATIVE;
+
+	rc = dsi_phy_enable(m_ctrl->phy,
+			    &display->config,
+			    m_src,
+			    true);
+	if (rc) {
+		pr_err("[%s] failed to enable DSI PHY, rc=%d\n",
+		       display->name, rc);
+		goto error;
+	}
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl || (ctrl == m_ctrl))
+			continue;
+
+		rc = dsi_phy_enable(ctrl->phy,
+				    &display->config,
+				    DSI_PLL_SOURCE_NON_NATIVE,
+				    true);
+		if (rc) {
+			pr_err("[%s] failed to enable DSI PHY, rc=%d\n",
+			       display->name, rc);
+			goto error_disable_master;
+		}
+	}
+
+	return rc;
+
+error_disable_master:
+	(void)dsi_phy_disable(m_ctrl->phy);
+error:
+	return rc;
+}
+
+static int dsi_display_phy_disable(struct dsi_display *display)
+{
+	int rc = 0;
+	int i;
+	struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+	m_ctrl = &display->ctrl[display->clk_master_idx];
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl || (ctrl == m_ctrl))
+			continue;
+
+		rc = dsi_phy_disable(ctrl->phy);
+		if (rc)
+			pr_err("[%s] failed to disable DSI PHY, rc=%d\n",
+			       display->name, rc);
+	}
+
+	rc = dsi_phy_disable(m_ctrl->phy);
+	if (rc)
+		pr_err("[%s] failed to disable DSI PHY, rc=%d\n",
+		       display->name, rc);
+
+	return rc;
+}
+
+static int dsi_display_wake_up(struct dsi_display *display)
+{
+	return 0;
+}
+
+static int dsi_display_broadcast_cmd(struct dsi_display *display,
+				     const struct mipi_dsi_msg *msg)
+{
+	int rc = 0;
+	u32 flags, m_flags;
+	struct dsi_display_ctrl *ctrl, *m_ctrl;
+	int i;
+
+	m_flags = (DSI_CTRL_CMD_BROADCAST | DSI_CTRL_CMD_BROADCAST_MASTER |
+		   DSI_CTRL_CMD_DEFER_TRIGGER | DSI_CTRL_CMD_FIFO_STORE);
+	flags = (DSI_CTRL_CMD_BROADCAST | DSI_CTRL_CMD_DEFER_TRIGGER |
+		 DSI_CTRL_CMD_FIFO_STORE);
+
+	/*
+	 * 1. Setup commands in FIFO
+	 * 2. Trigger commands
+	 */
+	m_ctrl = &display->ctrl[display->cmd_master_idx];
+	rc = dsi_ctrl_cmd_transfer(m_ctrl->ctrl, msg, m_flags);
+	if (rc) {
+		pr_err("[%s] cmd transfer failed on master,rc=%d\n",
+		       display->name, rc);
+		goto error;
+	}
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		if (ctrl == m_ctrl)
+			continue;
+
+		rc = dsi_ctrl_cmd_transfer(ctrl->ctrl, msg, flags);
+		if (rc) {
+			pr_err("[%s] cmd transfer failed, rc=%d\n",
+			       display->name, rc);
+			goto error;
+		}
+
+		rc = dsi_ctrl_cmd_tx_trigger(ctrl->ctrl,
+			DSI_CTRL_CMD_BROADCAST);
+		if (rc) {
+			pr_err("[%s] cmd trigger failed, rc=%d\n",
+			       display->name, rc);
+			goto error;
+		}
+	}
+
+	rc = dsi_ctrl_cmd_tx_trigger(m_ctrl->ctrl,
+				(DSI_CTRL_CMD_BROADCAST_MASTER |
+				 DSI_CTRL_CMD_BROADCAST));
+	if (rc) {
+		pr_err("[%s] cmd trigger failed for master, rc=%d\n",
+		       display->name, rc);
+		goto error;
+	}
+
+error:
+	return rc;
+}
+
+static int dsi_display_phy_sw_reset(struct dsi_display *display)
+{
+	int rc = 0;
+	int i;
+	struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+	m_ctrl = &display->ctrl[display->cmd_master_idx];
+
+	rc = dsi_ctrl_phy_sw_reset(m_ctrl->ctrl);
+	if (rc) {
+		pr_err("[%s] failed to reset phy, rc=%d\n", display->name, rc);
+		goto error;
+	}
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl || (ctrl == m_ctrl))
+			continue;
+
+		rc = dsi_ctrl_phy_sw_reset(ctrl->ctrl);
+		if (rc) {
+			pr_err("[%s] failed to reset phy, rc=%d\n",
+			       display->name, rc);
+			goto error;
+		}
+	}
+
+error:
+	return rc;
+}
+
+static int dsi_host_attach(struct mipi_dsi_host *host,
+			   struct mipi_dsi_device *dsi)
+{
+	return 0;
+}
+
+static int dsi_host_detach(struct mipi_dsi_host *host,
+			   struct mipi_dsi_device *dsi)
+{
+	return 0;
+}
+
+static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
+				 const struct mipi_dsi_msg *msg)
+{
+	struct dsi_display *display = to_dsi_display(host);
+
+	int rc = 0;
+
+	if (!host || !msg) {
+		pr_err("Invalid params\n");
+		return 0;
+	}
+
+	rc = dsi_display_wake_up(display);
+	if (rc) {
+		pr_err("[%s] failed to wake up display, rc=%d\n",
+		       display->name, rc);
+		goto error;
+	}
+
+	rc = dsi_display_cmd_engine_enable(display);
+	if (rc) {
+		pr_err("[%s] failed to enable cmd engine, rc=%d\n",
+		       display->name, rc);
+		goto error;
+	}
+
+	if (display->ctrl_count > 1) {
+		rc = dsi_display_broadcast_cmd(display, msg);
+		if (rc) {
+			pr_err("[%s] cmd broadcast failed, rc=%d\n",
+			       display->name, rc);
+			goto error_disable_cmd_engine;
+		}
+	} else {
+		rc = dsi_ctrl_cmd_transfer(display->ctrl[0].ctrl, msg,
+					  DSI_CTRL_CMD_FIFO_STORE);
+		if (rc) {
+			pr_err("[%s] cmd transfer failed, rc=%d\n",
+			       display->name, rc);
+			goto error_disable_cmd_engine;
+		}
+	}
+error_disable_cmd_engine:
+	(void)dsi_display_cmd_engine_disable(display);
+error:
+	return rc;
+}
+
+
+static struct mipi_dsi_host_ops dsi_host_ops = {
+	.attach = dsi_host_attach,
+	.detach = dsi_host_detach,
+	.transfer = dsi_host_transfer,
+};
+
+static int dsi_display_mipi_host_init(struct dsi_display *display)
+{
+	int rc = 0;
+	struct mipi_dsi_host *host = &display->host;
+
+	host->dev = &display->pdev->dev;
+	host->ops = &dsi_host_ops;
+
+	rc = mipi_dsi_host_register(host);
+	if (rc) {
+		pr_err("[%s] failed to register mipi dsi host, rc=%d\n",
+		       display->name, rc);
+		goto error;
+	}
+
+error:
+	return rc;
+}
+static int dsi_display_mipi_host_deinit(struct dsi_display *display)
+{
+	int rc = 0;
+	struct mipi_dsi_host *host = &display->host;
+
+	mipi_dsi_host_unregister(host);
+
+	host->dev = NULL;
+	host->ops = NULL;
+
+	return rc;
+}
+
+static int dsi_display_clocks_deinit(struct dsi_display *display)
+{
+	int rc = 0;
+	struct dsi_clk_link_set *src = &display->clock_info.src_clks;
+	struct dsi_clk_link_set *mux = &display->clock_info.mux_clks;
+	struct dsi_clk_link_set *shadow = &display->clock_info.shadow_clks;
+
+	if (src->byte_clk) {
+		devm_clk_put(&display->pdev->dev, src->byte_clk);
+		src->byte_clk = NULL;
+	}
+
+	if (src->pixel_clk) {
+		devm_clk_put(&display->pdev->dev, src->pixel_clk);
+		src->pixel_clk = NULL;
+	}
+
+	if (mux->byte_clk) {
+		devm_clk_put(&display->pdev->dev, mux->byte_clk);
+		mux->byte_clk = NULL;
+	}
+
+	if (mux->pixel_clk) {
+		devm_clk_put(&display->pdev->dev, mux->pixel_clk);
+		mux->pixel_clk = NULL;
+	}
+
+	if (shadow->byte_clk) {
+		devm_clk_put(&display->pdev->dev, shadow->byte_clk);
+		shadow->byte_clk = NULL;
+	}
+
+	if (shadow->pixel_clk) {
+		devm_clk_put(&display->pdev->dev, shadow->pixel_clk);
+		shadow->pixel_clk = NULL;
+	}
+
+	return rc;
+}
+
+static int dsi_display_clocks_init(struct dsi_display *display)
+{
+	int rc = 0;
+	struct dsi_clk_link_set *src = &display->clock_info.src_clks;
+	struct dsi_clk_link_set *mux = &display->clock_info.mux_clks;
+	struct dsi_clk_link_set *shadow = &display->clock_info.shadow_clks;
+
+	src->byte_clk = devm_clk_get(&display->pdev->dev, "src_byte_clk");
+	if (IS_ERR_OR_NULL(src->byte_clk)) {
+		rc = PTR_ERR(src->byte_clk);
+		src->byte_clk = NULL;
+		pr_err("failed to get src_byte_clk, rc=%d\n", rc);
+		goto error;
+	}
+
+	src->pixel_clk = devm_clk_get(&display->pdev->dev, "src_pixel_clk");
+	if (IS_ERR_OR_NULL(src->pixel_clk)) {
+		rc = PTR_ERR(src->pixel_clk);
+		src->pixel_clk = NULL;
+		pr_err("failed to get src_pixel_clk, rc=%d\n", rc);
+		goto error;
+	}
+
+	mux->byte_clk = devm_clk_get(&display->pdev->dev, "mux_byte_clk");
+	if (IS_ERR_OR_NULL(mux->byte_clk)) {
+		rc = PTR_ERR(mux->byte_clk);
+		pr_err("failed to get mux_byte_clk, rc=%d\n", rc);
+		mux->byte_clk = NULL;
+		/*
+		 * Skip getting rest of clocks since one failed. This is a
+		 * non-critical failure since these clocks are requied only for
+		 * dynamic refresh use cases.
+		 */
+		rc = 0;
+		goto done;
+	};
+
+	mux->pixel_clk = devm_clk_get(&display->pdev->dev, "mux_pixel_clk");
+	if (IS_ERR_OR_NULL(mux->pixel_clk)) {
+		rc = PTR_ERR(mux->pixel_clk);
+		mux->pixel_clk = NULL;
+		pr_err("failed to get mux_pixel_clk, rc=%d\n", rc);
+		/*
+		 * Skip getting rest of clocks since one failed. This is a
+		 * non-critical failure since these clocks are requied only for
+		 * dynamic refresh use cases.
+		 */
+		rc = 0;
+		goto done;
+	};
+
+	shadow->byte_clk = devm_clk_get(&display->pdev->dev, "shadow_byte_clk");
+	if (IS_ERR_OR_NULL(shadow->byte_clk)) {
+		rc = PTR_ERR(shadow->byte_clk);
+		shadow->byte_clk = NULL;
+		pr_err("failed to get shadow_byte_clk, rc=%d\n", rc);
+		/*
+		 * Skip getting rest of clocks since one failed. This is a
+		 * non-critical failure since these clocks are requied only for
+		 * dynamic refresh use cases.
+		 */
+		rc = 0;
+		goto done;
+	};
+
+	shadow->pixel_clk = devm_clk_get(&display->pdev->dev,
+					 "shadow_pixel_clk");
+	if (IS_ERR_OR_NULL(shadow->pixel_clk)) {
+		rc = PTR_ERR(shadow->pixel_clk);
+		shadow->pixel_clk = NULL;
+		pr_err("failed to get shadow_pixel_clk, rc=%d\n", rc);
+		/*
+		 * Skip getting rest of clocks since one failed. This is a
+		 * non-critical failure since these clocks are requied only for
+		 * dynamic refresh use cases.
+		 */
+		rc = 0;
+		goto done;
+	};
+
+done:
+	return 0;
+error:
+	(void)dsi_display_clocks_deinit(display);
+	return rc;
+}
+
+static int dsi_display_parse_lane_map(struct dsi_display *display)
+{
+	int rc = 0;
+
+	display->lane_map.physical_lane0 = DSI_LOGICAL_LANE_0;
+	display->lane_map.physical_lane1 = DSI_LOGICAL_LANE_1;
+	display->lane_map.physical_lane2 = DSI_LOGICAL_LANE_2;
+	display->lane_map.physical_lane3 = DSI_LOGICAL_LANE_3;
+	return rc;
+}
+
+static int dsi_display_parse_dt(struct dsi_display *display)
+{
+	int rc = 0;
+	int i, size;
+	u32 phy_count = 0;
+	struct device_node *of_node;
+
+	/* Parse controllers */
+	for (i = 0; i < MAX_DSI_CTRLS_PER_DISPLAY; i++) {
+		of_node = of_parse_phandle(display->pdev->dev.of_node,
+					   "qcom,dsi-ctrl", i);
+		if (!of_node) {
+			if (!i) {
+				pr_err("No controllers present\n");
+				return -ENODEV;
+			}
+			break;
+		}
+
+		display->ctrl[i].ctrl_of_node = of_node;
+		display->ctrl_count++;
+	}
+
+	/* Parse Phys */
+	for (i = 0; i < MAX_DSI_CTRLS_PER_DISPLAY; i++) {
+		of_node = of_parse_phandle(display->pdev->dev.of_node,
+					   "qcom,dsi-phy", i);
+		if (!of_node) {
+			if (!i) {
+				pr_err("No PHY devices present\n");
+				rc = -ENODEV;
+				goto error;
+			}
+			break;
+		}
+
+		display->ctrl[i].phy_of_node = of_node;
+		phy_count++;
+	}
+
+	if (phy_count != display->ctrl_count) {
+		pr_err("Number of controllers does not match PHYs\n");
+		rc = -ENODEV;
+		goto error;
+	}
+
+	if (of_get_property(display->pdev->dev.of_node, "qcom,dsi-panel",
+			&size)) {
+		display->panel_count = size / sizeof(int);
+		display->panel_of = devm_kzalloc(&display->pdev->dev,
+			sizeof(struct device_node *) * display->panel_count,
+			GFP_KERNEL);
+		if (!display->panel_of) {
+			SDE_ERROR("out of memory for panel_of\n");
+			rc = -ENOMEM;
+			goto error;
+		}
+		display->panel = devm_kzalloc(&display->pdev->dev,
+			sizeof(struct dsi_panel *) * display->panel_count,
+			GFP_KERNEL);
+		if (!display->panel) {
+			SDE_ERROR("out of memory for panel\n");
+			rc = -ENOMEM;
+			goto error;
+		}
+		for (i = 0; i < display->panel_count; i++) {
+			display->panel_of[i] =
+				of_parse_phandle(display->pdev->dev.of_node,
+				"qcom,dsi-panel", i);
+			if (!display->panel_of[i]) {
+				SDE_ERROR("of_parse dsi-panel failed\n");
+				rc = -ENODEV;
+				goto error;
+			}
+		}
+	} else {
+		SDE_ERROR("No qcom,dsi-panel of node\n");
+		rc = -ENODEV;
+		goto error;
+	}
+
+	if (of_get_property(display->pdev->dev.of_node, "qcom,bridge-index",
+			&size)) {
+		if (size / sizeof(int) != display->panel_count) {
+			SDE_ERROR("size=%lu is different than count=%u\n",
+				size / sizeof(int), display->panel_count);
+			rc = -EINVAL;
+			goto error;
+		}
+		display->bridge_idx = devm_kzalloc(&display->pdev->dev,
+			sizeof(u32) * display->panel_count, GFP_KERNEL);
+		if (!display->bridge_idx) {
+			SDE_ERROR("out of memory for bridge_idx\n");
+			rc = -ENOMEM;
+			goto error;
+		}
+		for (i = 0; i < display->panel_count; i++) {
+			rc = of_property_read_u32_index(
+				display->pdev->dev.of_node,
+				"qcom,bridge-index", i,
+				&(display->bridge_idx[i]));
+			if (rc) {
+				SDE_ERROR(
+					"read bridge-index error,i=%d rc=%d\n",
+					i, rc);
+				rc = -ENODEV;
+				goto error;
+			}
+		}
+	}
+
+	rc = dsi_display_parse_lane_map(display);
+	if (rc) {
+		pr_err("Lane map not found, rc=%d\n", rc);
+		goto error;
+	}
+error:
+	if (rc) {
+		if (display->panel_of)
+			for (i = 0; i < display->panel_count; i++)
+				if (display->panel_of[i])
+					of_node_put(display->panel_of[i]);
+		devm_kfree(&display->pdev->dev, display->panel_of);
+		devm_kfree(&display->pdev->dev, display->panel);
+		devm_kfree(&display->pdev->dev, display->bridge_idx);
+		display->panel_count = 0;
+	}
+	return rc;
+}
+
+static int dsi_display_res_init(struct dsi_display *display)
+{
+	int rc = 0;
+	int i;
+	struct dsi_display_ctrl *ctrl;
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		ctrl->ctrl = dsi_ctrl_get(ctrl->ctrl_of_node);
+		if (IS_ERR_OR_NULL(ctrl->ctrl)) {
+			rc = PTR_ERR(ctrl->ctrl);
+			pr_err("failed to get dsi controller, rc=%d\n", rc);
+			ctrl->ctrl = NULL;
+			goto error_ctrl_put;
+		}
+
+		ctrl->phy = dsi_phy_get(ctrl->phy_of_node);
+		if (IS_ERR_OR_NULL(ctrl->phy)) {
+			rc = PTR_ERR(ctrl->phy);
+			pr_err("failed to get phy controller, rc=%d\n", rc);
+			dsi_ctrl_put(ctrl->ctrl);
+			ctrl->phy = NULL;
+			goto error_ctrl_put;
+		}
+	}
+
+	for (i = 0; i < display->panel_count; i++) {
+		display->panel[i] = dsi_panel_get(&display->pdev->dev,
+					display->panel_of[i]);
+		if (IS_ERR_OR_NULL(display->panel)) {
+			rc = PTR_ERR(display->panel);
+			pr_err("failed to get panel, rc=%d\n", rc);
+			display->panel[i] = NULL;
+			goto error_ctrl_put;
+		}
+	}
+
+	rc = dsi_display_clocks_init(display);
+	if (rc) {
+		pr_err("Failed to parse clock data, rc=%d\n", rc);
+		goto error_ctrl_put;
+	}
+
+	return 0;
+error_ctrl_put:
+	for (i = i - 1; i >= 0; i--) {
+		ctrl = &display->ctrl[i];
+		dsi_ctrl_put(ctrl->ctrl);
+		dsi_phy_put(ctrl->phy);
+	}
+	return rc;
+}
+
+static int dsi_display_res_deinit(struct dsi_display *display)
+{
+	int rc = 0;
+	int i;
+	struct dsi_display_ctrl *ctrl;
+
+	rc = dsi_display_clocks_deinit(display);
+	if (rc)
+		pr_err("clocks deinit failed, rc=%d\n", rc);
+
+	for (i = 0; i < display->panel_count; i++)
+		dsi_panel_put(display->panel[i]);
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		dsi_phy_put(ctrl->phy);
+		dsi_ctrl_put(ctrl->ctrl);
+	}
+
+	return rc;
+}
+
+static int dsi_display_validate_mode_set(struct dsi_display *display,
+					 struct dsi_display_mode *mode,
+					 u32 flags)
+{
+	int rc = 0;
+	int i;
+	struct dsi_display_ctrl *ctrl;
+
+	/*
+	 * To set a mode:
+	 * 1. Controllers should be turned off.
+	 * 2. Link clocks should be off.
+	 * 3. Phy should be disabled.
+	 */
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		if ((ctrl->power_state > DSI_CTRL_POWER_VREG_ON) ||
+		    (ctrl->phy_enabled)) {
+			rc = -EINVAL;
+			goto error;
+		}
+	}
+
+error:
+	return rc;
+}
+
+static bool dsi_display_is_seamless_dfps_possible(
+		const struct dsi_display *display,
+		const struct dsi_display_mode *tgt,
+		const enum dsi_dfps_type dfps_type)
+{
+	struct dsi_display_mode *cur;
+
+	if (!display || !tgt) {
+		pr_err("Invalid params\n");
+		return false;
+	}
+
+	cur = &display->panel[0]->mode;
+
+	if (cur->timing.h_active != tgt->timing.h_active) {
+		pr_debug("timing.h_active differs %d %d\n",
+				cur->timing.h_active, tgt->timing.h_active);
+		return false;
+	}
+
+	if (cur->timing.h_back_porch != tgt->timing.h_back_porch) {
+		pr_debug("timing.h_back_porch differs %d %d\n",
+				cur->timing.h_back_porch,
+				tgt->timing.h_back_porch);
+		return false;
+	}
+
+	if (cur->timing.h_sync_width != tgt->timing.h_sync_width) {
+		pr_debug("timing.h_sync_width differs %d %d\n",
+				cur->timing.h_sync_width,
+				tgt->timing.h_sync_width);
+		return false;
+	}
+
+	if (cur->timing.h_front_porch != tgt->timing.h_front_porch) {
+		pr_debug("timing.h_front_porch differs %d %d\n",
+				cur->timing.h_front_porch,
+				tgt->timing.h_front_porch);
+		if (dfps_type != DSI_DFPS_IMMEDIATE_HFP)
+			return false;
+	}
+
+	if (cur->timing.h_skew != tgt->timing.h_skew) {
+		pr_debug("timing.h_skew differs %d %d\n",
+				cur->timing.h_skew,
+				tgt->timing.h_skew);
+		return false;
+	}
+
+	/* skip polarity comparison */
+
+	if (cur->timing.v_active != tgt->timing.v_active) {
+		pr_debug("timing.v_active differs %d %d\n",
+				cur->timing.v_active,
+				tgt->timing.v_active);
+		return false;
+	}
+
+	if (cur->timing.v_back_porch != tgt->timing.v_back_porch) {
+		pr_debug("timing.v_back_porch differs %d %d\n",
+				cur->timing.v_back_porch,
+				tgt->timing.v_back_porch);
+		return false;
+	}
+
+	if (cur->timing.v_sync_width != tgt->timing.v_sync_width) {
+		pr_debug("timing.v_sync_width differs %d %d\n",
+				cur->timing.v_sync_width,
+				tgt->timing.v_sync_width);
+		return false;
+	}
+
+	if (cur->timing.v_front_porch != tgt->timing.v_front_porch) {
+		pr_debug("timing.v_front_porch differs %d %d\n",
+				cur->timing.v_front_porch,
+				tgt->timing.v_front_porch);
+		if (dfps_type != DSI_DFPS_IMMEDIATE_VFP)
+			return false;
+	}
+
+	/* skip polarity comparison */
+
+	if (cur->timing.refresh_rate == tgt->timing.refresh_rate) {
+		pr_debug("timing.refresh_rate identical %d %d\n",
+				cur->timing.refresh_rate,
+				tgt->timing.refresh_rate);
+		return false;
+	}
+
+	if (cur->pixel_clk_khz != tgt->pixel_clk_khz)
+		pr_debug("pixel_clk_khz differs %d %d\n",
+				cur->pixel_clk_khz, tgt->pixel_clk_khz);
+
+	if (cur->panel_mode != tgt->panel_mode) {
+		pr_debug("panel_mode differs %d %d\n",
+				cur->panel_mode, tgt->panel_mode);
+		return false;
+	}
+
+	if (cur->flags != tgt->flags)
+		pr_debug("flags differs %d %d\n", cur->flags, tgt->flags);
+
+	return true;
+}
+
+static int dsi_display_dfps_update(struct dsi_display *display,
+				   struct dsi_display_mode *dsi_mode)
+{
+	struct dsi_mode_info *timing;
+	struct dsi_display_ctrl *m_ctrl, *ctrl;
+	struct dsi_display_mode *panel_mode;
+	struct dsi_dfps_capabilities dfps_caps;
+	int rc = 0;
+	int i;
+
+	if (!display || !dsi_mode) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+	timing = &dsi_mode->timing;
+
+	dsi_panel_get_dfps_caps(display->panel[0], &dfps_caps);
+	if (!dfps_caps.dfps_support) {
+		pr_err("dfps not supported\n");
+		return -ENOTSUPP;
+	}
+
+	if (dfps_caps.type == DSI_DFPS_IMMEDIATE_CLK) {
+		pr_err("dfps clock method not supported\n");
+		return -ENOTSUPP;
+	}
+
+	/* For split DSI, update the clock master first */
+
+	pr_debug("configuring seamless dynamic fps\n\n");
+
+	m_ctrl = &display->ctrl[display->clk_master_idx];
+	rc = dsi_ctrl_async_timing_update(m_ctrl->ctrl, timing);
+	if (rc) {
+		pr_err("[%s] failed to dfps update host_%d, rc=%d\n",
+				display->name, i, rc);
+		goto error;
+	}
+
+	/* Update the rest of the controllers */
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl || (ctrl == m_ctrl))
+			continue;
+
+		rc = dsi_ctrl_async_timing_update(ctrl->ctrl, timing);
+		if (rc) {
+			pr_err("[%s] failed to dfps update host_%d, rc=%d\n",
+					display->name, i, rc);
+			goto error;
+		}
+	}
+
+	panel_mode = &display->panel[0]->mode;
+	memcpy(panel_mode, dsi_mode, sizeof(*panel_mode));
+
+error:
+	return rc;
+}
+
+static int dsi_display_dfps_calc_front_porch(
+		u64 clk_hz,
+		u32 new_fps,
+		u32 a_total,
+		u32 b_total,
+		u32 b_fp,
+		u32 *b_fp_out)
+{
+	s32 b_fp_new;
+
+	if (!b_fp_out) {
+		pr_err("Invalid params");
+		return -EINVAL;
+	}
+
+	if (!a_total || !new_fps) {
+		pr_err("Invalid pixel total or new fps in mode request\n");
+		return -EINVAL;
+	}
+
+	/**
+	 * Keep clock, other porches constant, use new fps, calc front porch
+	 * clk = (hor * ver * fps)
+	 * hfront = clk / (vtotal * fps)) - hactive - hback - hsync
+	 */
+	b_fp_new = (clk_hz / (a_total * new_fps)) - (b_total - b_fp);
+
+	pr_debug("clk %llu fps %u a %u b %u b_fp %u new_fp %d\n",
+			clk_hz, new_fps, a_total, b_total, b_fp, b_fp_new);
+
+	if (b_fp_new < 0) {
+		pr_err("Invalid new_hfp calcluated%d\n", b_fp_new);
+		return -EINVAL;
+	}
+
+	/**
+	 * TODO: To differentiate from clock method when communicating to the
+	 * other components, perhaps we should set clk here to original value
+	 */
+	*b_fp_out = b_fp_new;
+
+	return 0;
+}
+
+static int dsi_display_get_dfps_timing(struct dsi_display *display,
+				       struct dsi_display_mode *adj_mode)
+{
+	struct dsi_dfps_capabilities dfps_caps;
+	struct dsi_display_mode per_ctrl_mode;
+	struct dsi_mode_info *timing;
+	struct dsi_ctrl *m_ctrl;
+	u64 clk_hz;
+
+	int rc = 0;
+
+	if (!display || !adj_mode) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+	m_ctrl = display->ctrl[display->clk_master_idx].ctrl;
+
+	/* Only check the first panel */
+	dsi_panel_get_dfps_caps(display->panel[0], &dfps_caps);
+	if (!dfps_caps.dfps_support) {
+		pr_err("dfps not supported by panel\n");
+		return -EINVAL;
+	}
+
+	per_ctrl_mode = *adj_mode;
+	adjust_timing_by_ctrl_count(display, &per_ctrl_mode);
+
+	if (!dsi_display_is_seamless_dfps_possible(display,
+			&per_ctrl_mode, dfps_caps.type)) {
+		pr_err("seamless dynamic fps not supported for mode\n");
+		return -EINVAL;
+	}
+
+	/* TODO: Remove this direct reference to the dsi_ctrl */
+	clk_hz = m_ctrl->clk_info.link_clks.pixel_clk_rate;
+	timing = &per_ctrl_mode.timing;
+
+	switch (dfps_caps.type) {
+	case DSI_DFPS_IMMEDIATE_VFP:
+		rc = dsi_display_dfps_calc_front_porch(
+				clk_hz,
+				timing->refresh_rate,
+				DSI_H_TOTAL(timing),
+				DSI_V_TOTAL(timing),
+				timing->v_front_porch,
+				&adj_mode->timing.v_front_porch);
+		break;
+
+	case DSI_DFPS_IMMEDIATE_HFP:
+		rc = dsi_display_dfps_calc_front_porch(
+				clk_hz,
+				timing->refresh_rate,
+				DSI_V_TOTAL(timing),
+				DSI_H_TOTAL(timing),
+				timing->h_front_porch,
+				&adj_mode->timing.h_front_porch);
+		if (!rc)
+			adj_mode->timing.h_front_porch *= display->ctrl_count;
+		break;
+
+	default:
+		pr_err("Unsupported DFPS mode %d\n", dfps_caps.type);
+		rc = -ENOTSUPP;
+	}
+
+	return rc;
+}
+
+static bool dsi_display_validate_mode_seamless(struct dsi_display *display,
+		struct dsi_display_mode *adj_mode)
+{
+	int rc = 0;
+
+	if (!display || !adj_mode) {
+		pr_err("Invalid params\n");
+		return false;
+	}
+
+	/* Currently the only seamless transition is dynamic fps */
+	rc = dsi_display_get_dfps_timing(display, adj_mode);
+	if (rc) {
+		pr_debug("Dynamic FPS not supported for seamless\n");
+	} else {
+		pr_debug("Mode switch is seamless Dynamic FPS\n");
+		adj_mode->flags |= DSI_MODE_FLAG_DFPS |
+				DSI_MODE_FLAG_VBLANK_PRE_MODESET;
+	}
+
+	return rc;
+}
+
+static int dsi_display_set_mode_sub(struct dsi_display *display,
+				    struct dsi_display_mode *mode,
+				    u32 flags)
+{
+	int rc = 0;
+	int i;
+	struct dsi_display_ctrl *ctrl;
+
+	rc = dsi_panel_get_host_cfg_for_mode(display->panel[0],
+					     mode,
+					     &display->config);
+	if (rc) {
+		pr_err("[%s] failed to get host config for mode, rc=%d\n",
+		       display->name, rc);
+		goto error;
+	}
+
+	memcpy(&display->config.lane_map, &display->lane_map,
+	       sizeof(display->lane_map));
+
+	if (mode->flags & DSI_MODE_FLAG_DFPS) {
+		rc = dsi_display_dfps_update(display, mode);
+		if (rc) {
+			pr_err("[%s]DSI dfps update failed, rc=%d\n",
+					display->name, rc);
+			goto error;
+		}
+	}
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		rc = dsi_ctrl_update_host_config(ctrl->ctrl, &display->config,
+				mode->flags);
+		if (rc) {
+			pr_err("[%s] failed to update ctrl config, rc=%d\n",
+			       display->name, rc);
+			goto error;
+		}
+
+	}
+error:
+	return rc;
+}
+
+/**
+ * _dsi_display_dev_init - initializes the display device
+ * Initialization will acquire references to the resources required for the
+ * display hardware to function.
+ * @display:         Handle to the display
+ * Returns:          Zero on success
+ */
+static int _dsi_display_dev_init(struct dsi_display *display)
+{
+	int rc = 0;
+
+	if (!display) {
+		pr_err("invalid display\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&display->display_lock);
+
+	rc = dsi_display_parse_dt(display);
+	if (rc) {
+		pr_err("[%s] failed to parse dt, rc=%d\n", display->name, rc);
+		goto error;
+	}
+
+	rc = dsi_display_res_init(display);
+	if (rc) {
+		pr_err("[%s] failed to initialize resources, rc=%d\n",
+		       display->name, rc);
+		goto error;
+	}
+error:
+	mutex_unlock(&display->display_lock);
+	return rc;
+}
+
+/**
+ * _dsi_display_dev_deinit - deinitializes the display device
+ * All the resources acquired during device init will be released.
+ * @display:        Handle to the display
+ * Returns:         Zero on success
+ */
+static int _dsi_display_dev_deinit(struct dsi_display *display)
+{
+	int rc = 0;
+
+	if (!display) {
+		pr_err("invalid display\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&display->display_lock);
+
+	rc = dsi_display_res_deinit(display);
+	if (rc)
+		pr_err("[%s] failed to deinitialize resource, rc=%d\n",
+		       display->name, rc);
+
+	mutex_unlock(&display->display_lock);
+
+	return rc;
+}
+
+/**
+ * dsi_display_bind - bind dsi device with controlling device
+ * @dev:        Pointer to base of platform device
+ * @master:     Pointer to container of drm device
+ * @data:       Pointer to private data
+ * Returns:     Zero on success
+ */
+static int dsi_display_bind(struct device *dev,
+		struct device *master,
+		void *data)
+{
+	struct dsi_display_ctrl *display_ctrl;
+	struct drm_device *drm;
+	struct dsi_display *display;
+	struct platform_device *pdev = to_platform_device(dev);
+	int i, j, rc = 0;
+
+	if (!dev || !pdev || !master) {
+		pr_err("invalid param(s), dev %pK, pdev %pK, master %pK\n",
+				dev, pdev, master);
+		return -EINVAL;
+	}
+
+	drm = dev_get_drvdata(master);
+	display = platform_get_drvdata(pdev);
+	if (!drm || !display) {
+		pr_err("invalid param(s), drm %pK, display %pK\n",
+				drm, display);
+		return -EINVAL;
+	}
+
+	mutex_lock(&display->display_lock);
+
+	rc = dsi_display_debugfs_init(display);
+	if (rc) {
+		pr_err("[%s] debugfs init failed, rc=%d\n", display->name, rc);
+		goto error;
+	}
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		display_ctrl = &display->ctrl[i];
+
+		rc = dsi_ctrl_drv_init(display_ctrl->ctrl, display->root);
+		if (rc) {
+			pr_err("[%s] failed to initialize ctrl[%d], rc=%d\n",
+			       display->name, i, rc);
+			goto error_ctrl_deinit;
+		}
+
+		rc = dsi_phy_drv_init(display_ctrl->phy);
+		if (rc) {
+			pr_err("[%s] Failed to initialize phy[%d], rc=%d\n",
+				display->name, i, rc);
+			(void)dsi_ctrl_drv_deinit(display_ctrl->ctrl);
+			goto error_ctrl_deinit;
+		}
+	}
+
+	rc = dsi_display_mipi_host_init(display);
+	if (rc) {
+		pr_err("[%s] failed to initialize mipi host, rc=%d\n",
+		       display->name, rc);
+		goto error_ctrl_deinit;
+	}
+
+	for (j = 0; j < display->panel_count; j++) {
+		rc = dsi_panel_drv_init(display->panel[j], &display->host);
+		if (rc) {
+			if (rc != -EPROBE_DEFER)
+				SDE_ERROR(
+				"[%s]Failed to init panel driver, rc=%d\n",
+				display->name, rc);
+			goto error_panel_deinit;
+		}
+	}
+
+	rc = dsi_panel_get_mode_count(display->panel[0],
+					&display->num_of_modes);
+	if (rc) {
+		pr_err("[%s] failed to get mode count, rc=%d\n",
+		       display->name, rc);
+		goto error_panel_deinit;
+	}
+
+	display->drm_dev = drm;
+	goto error;
+
+error_panel_deinit:
+	for (j--; j >= 0; j--)
+		(void)dsi_panel_drv_deinit(display->panel[j]);
+	(void)dsi_display_mipi_host_deinit(display);
+error_ctrl_deinit:
+	for (i = i - 1; i >= 0; i--) {
+		display_ctrl = &display->ctrl[i];
+		(void)dsi_phy_drv_deinit(display_ctrl->phy);
+		(void)dsi_ctrl_drv_deinit(display_ctrl->ctrl);
+	}
+	(void)dsi_display_debugfs_deinit(display);
+error:
+	mutex_unlock(&display->display_lock);
+	return rc;
+}
+
+/**
+ * dsi_display_unbind - unbind dsi from controlling device
+ * @dev:        Pointer to base of platform device
+ * @master:     Pointer to container of drm device
+ * @data:       Pointer to private data
+ */
+static void dsi_display_unbind(struct device *dev,
+		struct device *master, void *data)
+{
+	struct dsi_display_ctrl *display_ctrl;
+	struct dsi_display *display;
+	struct platform_device *pdev = to_platform_device(dev);
+	int i, rc = 0;
+
+	if (!dev || !pdev) {
+		pr_err("invalid param(s)\n");
+		return;
+	}
+
+	display = platform_get_drvdata(pdev);
+	if (!display) {
+		pr_err("invalid display\n");
+		return;
+	}
+
+	mutex_lock(&display->display_lock);
+
+	for (i = 0; i < display->panel_count; i++) {
+		rc = dsi_panel_drv_deinit(display->panel[i]);
+		if (rc)
+			SDE_ERROR("[%s] failed to deinit panel driver, rc=%d\n",
+					display->name, rc);
+	}
+
+	rc = dsi_display_mipi_host_deinit(display);
+	if (rc)
+		pr_err("[%s] failed to deinit mipi hosts, rc=%d\n",
+		       display->name,
+		       rc);
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		display_ctrl = &display->ctrl[i];
+
+		rc = dsi_phy_drv_deinit(display_ctrl->phy);
+		if (rc)
+			pr_err("[%s] failed to deinit phy%d driver, rc=%d\n",
+			       display->name, i, rc);
+
+		rc = dsi_ctrl_drv_deinit(display_ctrl->ctrl);
+		if (rc)
+			pr_err("[%s] failed to deinit ctrl%d driver, rc=%d\n",
+			       display->name, i, rc);
+	}
+	(void)dsi_display_debugfs_deinit(display);
+
+	mutex_unlock(&display->display_lock);
+}
+
+static const struct component_ops dsi_display_comp_ops = {
+	.bind = dsi_display_bind,
+	.unbind = dsi_display_unbind,
+};
+
+static struct platform_driver dsi_display_driver = {
+	.probe = dsi_display_dev_probe,
+	.remove = dsi_display_dev_remove,
+	.driver = {
+		.name = "msm-dsi-display",
+		.of_match_table = dsi_display_dt_match,
+	},
+};
+
+int dsi_display_dev_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct dsi_display *display;
+
+	if (!pdev || !pdev->dev.of_node) {
+		pr_err("pdev not found\n");
+		return -ENODEV;
+	}
+
+	display = devm_kzalloc(&pdev->dev, sizeof(*display), GFP_KERNEL);
+	if (!display)
+		return -ENOMEM;
+
+	display->name = of_get_property(pdev->dev.of_node, "label", NULL);
+
+	display->is_active = of_property_read_bool(pdev->dev.of_node,
+						"qcom,dsi-display-active");
+
+	display->display_type = of_get_property(pdev->dev.of_node,
+						"qcom,display-type", NULL);
+	if (!display->display_type)
+		display->display_type = "unknown";
+
+	mutex_init(&display->display_lock);
+
+	display->pdev = pdev;
+	platform_set_drvdata(pdev, display);
+	mutex_lock(&dsi_display_list_lock);
+	list_add_tail(&display->list, &dsi_display_list);
+	mutex_unlock(&dsi_display_list_lock);
+
+	if (display->is_active) {
+		main_display = display;
+		rc = _dsi_display_dev_init(display);
+		if (rc) {
+			pr_err("device init failed, rc=%d\n", rc);
+			return rc;
+		}
+
+		rc = component_add(&pdev->dev, &dsi_display_comp_ops);
+		if (rc)
+			pr_err("component add failed, rc=%d\n", rc);
+	}
+	return rc;
+}
+
+int dsi_display_dev_remove(struct platform_device *pdev)
+{
+	int rc = 0, i;
+	struct dsi_display *display;
+	struct dsi_display *pos, *tmp;
+
+	if (!pdev) {
+		pr_err("Invalid device\n");
+		return -EINVAL;
+	}
+
+	display = platform_get_drvdata(pdev);
+
+	(void)_dsi_display_dev_deinit(display);
+
+	mutex_lock(&dsi_display_list_lock);
+	list_for_each_entry_safe(pos, tmp, &dsi_display_list, list) {
+		if (pos == display) {
+			list_del(&display->list);
+			break;
+		}
+	}
+	mutex_unlock(&dsi_display_list_lock);
+
+	platform_set_drvdata(pdev, NULL);
+	if (display->panel_of)
+		for (i = 0; i < display->panel_count; i++)
+			if (display->panel_of[i])
+				of_node_put(display->panel_of[i]);
+	devm_kfree(&pdev->dev, display->panel_of);
+	devm_kfree(&pdev->dev, display->panel);
+	devm_kfree(&pdev->dev, display->bridge_idx);
+	devm_kfree(&pdev->dev, display);
+	return rc;
+}
+
+int dsi_display_get_num_of_displays(void)
+{
+	int count = 0;
+	struct dsi_display *display;
+
+	mutex_lock(&dsi_display_list_lock);
+
+	list_for_each_entry(display, &dsi_display_list, list) {
+		count++;
+	}
+
+	mutex_unlock(&dsi_display_list_lock);
+	return count;
+}
+
+int dsi_display_get_active_displays(void **display_array, u32 max_display_count)
+{
+	struct dsi_display *pos;
+	int i = 0;
+
+	if (!display_array || !max_display_count) {
+		if (!display_array)
+			pr_err("invalid params\n");
+		return 0;
+	}
+
+	mutex_lock(&dsi_display_list_lock);
+
+	list_for_each_entry(pos, &dsi_display_list, list) {
+		if (i >= max_display_count) {
+			pr_err("capping display count to %d\n", i);
+			break;
+		}
+		if (pos->is_active)
+			display_array[i++] = pos;
+	}
+
+	mutex_unlock(&dsi_display_list_lock);
+	return i;
+}
+
+struct dsi_display *dsi_display_get_display_by_name(const char *name)
+{
+	struct dsi_display *display = NULL, *pos;
+
+	mutex_lock(&dsi_display_list_lock);
+
+	list_for_each_entry(pos, &dsi_display_list, list) {
+		if (!strcmp(name, pos->name))
+			display = pos;
+	}
+
+	mutex_unlock(&dsi_display_list_lock);
+
+	return display;
+}
+
+void dsi_display_set_active_state(struct dsi_display *display, bool is_active)
+{
+	mutex_lock(&display->display_lock);
+	display->is_active = is_active;
+	mutex_unlock(&display->display_lock);
+}
+
+int dsi_display_drm_bridge_init(struct dsi_display *display,
+		struct drm_encoder *enc)
+{
+	int rc = 0, i;
+	struct dsi_bridge *bridge;
+	struct drm_bridge *dba_bridge;
+	struct dba_bridge_init init_data;
+	struct drm_bridge *precede_bridge;
+	struct msm_drm_private *priv = NULL;
+	struct dsi_panel *panel;
+	u32 *bridge_idx;
+	u32 num_of_lanes = 0;
+
+	if (!display || !display->drm_dev || !enc) {
+		pr_err("invalid param(s)\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&display->display_lock);
+	priv = display->drm_dev->dev_private;
+
+	if (!priv) {
+		SDE_ERROR("Private data is not present\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (display->bridge) {
+		SDE_ERROR("display is already initialize\n");
+		goto out;
+	}
+
+	bridge = dsi_drm_bridge_init(display, display->drm_dev, enc);
+	if (IS_ERR_OR_NULL(bridge)) {
+		rc = PTR_ERR(bridge);
+		SDE_ERROR("[%s] brige init failed, %d\n", display->name, rc);
+		goto out;
+	}
+
+	display->bridge = bridge;
+	priv->bridges[priv->num_bridges++] = &bridge->base;
+	precede_bridge = &bridge->base;
+
+	if (display->panel_count >= MAX_BRIDGES - 1) {
+		SDE_ERROR("too many bridge chips=%d\n", display->panel_count);
+		goto error_bridge;
+	}
+
+	for (i = 0; i < display->panel_count; i++) {
+		panel = display->panel[i];
+		if (panel && display->bridge_idx &&
+			panel->dba_config.dba_panel) {
+			bridge_idx = display->bridge_idx + i;
+			num_of_lanes = 0;
+			memset(&init_data, 0x00, sizeof(init_data));
+			if (panel->host_config.data_lanes & DSI_DATA_LANE_0)
+				num_of_lanes++;
+			if (panel->host_config.data_lanes & DSI_DATA_LANE_1)
+				num_of_lanes++;
+			if (panel->host_config.data_lanes & DSI_DATA_LANE_2)
+				num_of_lanes++;
+			if (panel->host_config.data_lanes & DSI_DATA_LANE_3)
+				num_of_lanes++;
+			init_data.client_name = DSI_DBA_CLIENT_NAME;
+			init_data.chip_name = panel->dba_config.bridge_name;
+			init_data.id = *bridge_idx;
+			init_data.display = display;
+			init_data.hdmi_mode = panel->dba_config.hdmi_mode;
+			init_data.num_of_input_lanes = num_of_lanes;
+			init_data.precede_bridge = precede_bridge;
+			init_data.panel_count = display->panel_count;
+			dba_bridge = dba_bridge_init(display->drm_dev, enc,
+							&init_data);
+			if (IS_ERR_OR_NULL(dba_bridge)) {
+				rc = PTR_ERR(dba_bridge);
+				SDE_ERROR("[%s:%d] dba brige init failed, %d\n",
+					init_data.chip_name, init_data.id, rc);
+				goto error_dba_bridge;
+			}
+			priv->bridges[priv->num_bridges++] = dba_bridge;
+			precede_bridge = dba_bridge;
+		}
+	}
+
+	goto out;
+
+error_dba_bridge:
+	for (i = 1; i < MAX_BRIDGES; i++) {
+		dba_bridge_cleanup(priv->bridges[i]);
+		priv->bridges[i] = NULL;
+	}
+error_bridge:
+	dsi_drm_bridge_cleanup(display->bridge);
+	display->bridge = NULL;
+	priv->bridges[0] = NULL;
+	priv->num_bridges = 0;
+out:
+	mutex_unlock(&display->display_lock);
+	return rc;
+}
+
+int dsi_display_drm_bridge_deinit(struct dsi_display *display)
+{
+	int rc = 0, i;
+	struct msm_drm_private *priv = NULL;
+
+	if (!display) {
+		SDE_ERROR("Invalid params\n");
+		return -EINVAL;
+	}
+	priv = display->drm_dev->dev_private;
+
+	if (!priv) {
+		SDE_ERROR("Private data is not present\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&display->display_lock);
+
+	for (i = 1; i < MAX_BRIDGES; i++) {
+		dba_bridge_cleanup(priv->bridges[i]);
+		priv->bridges[i] = NULL;
+	}
+
+	dsi_drm_bridge_cleanup(display->bridge);
+	display->bridge = NULL;
+	priv->bridges[0] = NULL;
+	priv->num_bridges = 0;
+
+	mutex_unlock(&display->display_lock);
+	return rc;
+}
+
+int dsi_display_get_info(struct msm_display_info *info, void *disp)
+{
+	struct dsi_display *display;
+	struct dsi_panel_phy_props phy_props;
+	int i, rc;
+
+	if (!info || !disp) {
+		pr_err("invalid params\n");
+		return -EINVAL;
+	}
+	display = disp;
+
+	mutex_lock(&display->display_lock);
+	rc = dsi_panel_get_phy_props(display->panel[0], &phy_props);
+	if (rc) {
+		pr_err("[%s] failed to get panel phy props, rc=%d\n",
+		       display->name, rc);
+		goto error;
+	}
+
+	info->intf_type = DRM_MODE_CONNECTOR_DSI;
+
+	info->num_of_h_tiles = display->ctrl_count;
+	for (i = 0; i < info->num_of_h_tiles; i++)
+		info->h_tile_instance[i] = display->ctrl[i].ctrl->index;
+
+	info->is_connected = true;
+	info->width_mm = phy_props.panel_width_mm;
+	info->height_mm = phy_props.panel_height_mm;
+	info->max_width = 1920;
+	info->max_height = 1080;
+	info->compression = MSM_DISPLAY_COMPRESS_NONE;
+
+	switch (display->panel[0]->mode.panel_mode) {
+	case DSI_OP_VIDEO_MODE:
+		info->capabilities |= MSM_DISPLAY_CAP_VID_MODE;
+		break;
+	case DSI_OP_CMD_MODE:
+		info->capabilities |= MSM_DISPLAY_CAP_CMD_MODE;
+		break;
+	default:
+		pr_err("unknwown dsi panel mode %d\n",
+				display->panel[0]->mode.panel_mode);
+		break;
+	}
+error:
+	mutex_unlock(&display->display_lock);
+	return rc;
+}
+
+int dsi_display_get_modes(struct dsi_display *display,
+			  struct dsi_display_mode *modes,
+			  u32 *count)
+{
+	int rc = 0;
+	int i;
+	struct dsi_dfps_capabilities dfps_caps;
+	int num_dfps_rates;
+
+	if (!display || !count) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&display->display_lock);
+
+	rc = dsi_panel_get_dfps_caps(display->panel[0], &dfps_caps);
+	if (rc) {
+		pr_err("[%s] failed to get dfps caps from panel\n",
+				display->name);
+		goto error;
+	}
+
+	num_dfps_rates = !dfps_caps.dfps_support ? 1 :
+			dfps_caps.max_refresh_rate -
+			dfps_caps.min_refresh_rate + 1;
+
+	if (!modes) {
+		/* Inflate num_of_modes by fps in dfps */
+		*count = display->num_of_modes * num_dfps_rates;
+		goto error;
+	}
+
+	for (i = 0; i < *count; i++) {
+		/* Insert the dfps "sub-modes" between main panel modes */
+		int panel_mode_idx = i / num_dfps_rates;
+
+		rc = dsi_panel_get_mode(display->panel[0], panel_mode_idx,
+					modes);
+		if (rc) {
+			pr_err("[%s] failed to get mode from panel\n",
+			       display->name);
+			goto error;
+		}
+
+		if (dfps_caps.dfps_support) {
+			modes->timing.refresh_rate = dfps_caps.min_refresh_rate
+					+ (i % num_dfps_rates);
+			modes->pixel_clk_khz = (DSI_H_TOTAL(&modes->timing) *
+					DSI_V_TOTAL(&modes->timing) *
+					modes->timing.refresh_rate) / 1000;
+		}
+
+		if (display->ctrl_count > 1) { /* TODO: remove if */
+			modes->timing.h_active *= display->ctrl_count;
+			modes->timing.h_front_porch *= display->ctrl_count;
+			modes->timing.h_sync_width *= display->ctrl_count;
+			modes->timing.h_back_porch *= display->ctrl_count;
+			modes->timing.h_skew *= display->ctrl_count;
+			modes->pixel_clk_khz *= display->ctrl_count;
+		}
+
+		modes++;
+	}
+
+error:
+	mutex_unlock(&display->display_lock);
+	return rc;
+}
+
+int dsi_display_validate_mode(struct dsi_display *display,
+			      struct dsi_display_mode *mode,
+			      u32 flags)
+{
+	int rc = 0;
+	int i;
+	struct dsi_display_ctrl *ctrl;
+	struct dsi_display_mode adj_mode;
+
+	if (!display || !mode) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&display->display_lock);
+
+	adj_mode = *mode;
+	adjust_timing_by_ctrl_count(display, &adj_mode);
+
+	rc = dsi_panel_validate_mode(display->panel[0], &adj_mode);
+	if (rc) {
+		pr_err("[%s] panel mode validation failed, rc=%d\n",
+		       display->name, rc);
+		goto error;
+	}
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		rc = dsi_ctrl_validate_timing(ctrl->ctrl, &adj_mode.timing);
+		if (rc) {
+			pr_err("[%s] ctrl mode validation failed, rc=%d\n",
+			       display->name, rc);
+			goto error;
+		}
+
+		rc = dsi_phy_validate_mode(ctrl->phy, &adj_mode.timing);
+		if (rc) {
+			pr_err("[%s] phy mode validation failed, rc=%d\n",
+			       display->name, rc);
+			goto error;
+		}
+	}
+
+	if ((flags & DSI_VALIDATE_FLAG_ALLOW_ADJUST) &&
+			(mode->flags & DSI_MODE_FLAG_SEAMLESS)) {
+		rc = dsi_display_validate_mode_seamless(display, mode);
+		if (rc) {
+			pr_err("[%s] seamless not possible rc=%d\n",
+				display->name, rc);
+			goto error;
+		}
+	}
+
+error:
+	mutex_unlock(&display->display_lock);
+	return rc;
+}
+
+int dsi_display_set_mode(struct dsi_display *display,
+			 struct dsi_display_mode *mode,
+			 u32 flags)
+{
+	int rc = 0;
+	struct dsi_display_mode adj_mode;
+
+	if (!display || !mode) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&display->display_lock);
+
+	adj_mode = *mode;
+	adjust_timing_by_ctrl_count(display, &adj_mode);
+
+	rc = dsi_display_validate_mode_set(display, &adj_mode, flags);
+	if (rc) {
+		pr_err("[%s] mode cannot be set\n", display->name);
+		goto error;
+	}
+
+	rc = dsi_display_set_mode_sub(display, &adj_mode, flags);
+	if (rc) {
+		pr_err("[%s] failed to set mode\n", display->name);
+		goto error;
+	}
+error:
+	mutex_unlock(&display->display_lock);
+	return rc;
+}
+
+int dsi_display_set_tpg_state(struct dsi_display *display, bool enable)
+{
+	int rc = 0;
+	int i;
+	struct dsi_display_ctrl *ctrl;
+
+	if (!display) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		rc = dsi_ctrl_set_tpg_state(ctrl->ctrl, enable);
+		if (rc) {
+			pr_err("[%s] failed to set tpg state for host_%d\n",
+			       display->name, i);
+			goto error;
+		}
+	}
+
+	display->is_tpg_enabled = enable;
+error:
+	return rc;
+}
+
+int dsi_display_prepare(struct dsi_display *display)
+{
+	int rc = 0, i, j;
+
+	if (!display) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&display->display_lock);
+
+	for (i = 0; i < display->panel_count; i++) {
+		rc = dsi_panel_pre_prepare(display->panel[i]);
+		if (rc) {
+			SDE_ERROR("[%s] panel pre-prepare failed, rc=%d\n",
+					display->name, rc);
+			goto error_panel_post_unprep;
+		}
+	}
+
+	rc = dsi_display_ctrl_power_on(display);
+	if (rc) {
+		pr_err("[%s] failed to power on dsi controllers, rc=%d\n",
+		       display->name, rc);
+		goto error_panel_post_unprep;
+	}
+
+	rc = dsi_display_phy_power_on(display);
+	if (rc) {
+		pr_err("[%s] failed to power on dsi phy, rc = %d\n",
+		       display->name, rc);
+		goto error_ctrl_pwr_off;
+	}
+
+	rc = dsi_display_ctrl_core_clk_on(display);
+	if (rc) {
+		pr_err("[%s] failed to enable DSI core clocks, rc=%d\n",
+		       display->name, rc);
+		goto error_phy_pwr_off;
+	}
+
+	rc = dsi_display_phy_sw_reset(display);
+	if (rc) {
+		pr_err("[%s] failed to reset phy, rc=%d\n", display->name, rc);
+		goto error_ctrl_clk_off;
+	}
+
+	rc = dsi_display_phy_enable(display);
+	if (rc) {
+		pr_err("[%s] failed to enable DSI PHY, rc=%d\n",
+		       display->name, rc);
+		goto error_ctrl_clk_off;
+	}
+
+	rc = dsi_display_ctrl_init(display);
+	if (rc) {
+		pr_err("[%s] failed to setup DSI controller, rc=%d\n",
+		       display->name, rc);
+		goto error_phy_disable;
+	}
+
+	rc = dsi_display_ctrl_link_clk_on(display);
+	if (rc) {
+		pr_err("[%s] failed to enable DSI link clocks, rc=%d\n",
+		       display->name, rc);
+		goto error_ctrl_deinit;
+	}
+
+	rc = dsi_display_ctrl_host_enable(display);
+	if (rc) {
+		pr_err("[%s] failed to enable DSI host, rc=%d\n",
+		       display->name, rc);
+		goto error_ctrl_link_off;
+	}
+
+	for (j = 0; j < display->panel_count; j++) {
+		rc = dsi_panel_prepare(display->panel[j]);
+		if (rc) {
+			SDE_ERROR("[%s] panel prepare failed, rc=%d\n",
+					display->name, rc);
+			goto error_panel_unprep;
+		}
+	}
+
+	goto error;
+
+error_panel_unprep:
+	for (j--; j >= 0; j--)
+		(void)dsi_panel_unprepare(display->panel[j]);
+	(void)dsi_display_ctrl_host_disable(display);
+error_ctrl_link_off:
+	(void)dsi_display_ctrl_link_clk_off(display);
+error_ctrl_deinit:
+	(void)dsi_display_ctrl_deinit(display);
+error_phy_disable:
+	(void)dsi_display_phy_disable(display);
+error_ctrl_clk_off:
+	(void)dsi_display_ctrl_core_clk_off(display);
+error_phy_pwr_off:
+	(void)dsi_display_phy_power_off(display);
+error_ctrl_pwr_off:
+	(void)dsi_display_ctrl_power_off(display);
+error_panel_post_unprep:
+	for (i--; i >= 0; i--)
+		(void)dsi_panel_post_unprepare(display->panel[i]);
+error:
+	mutex_unlock(&display->display_lock);
+	return rc;
+}
+
+int dsi_display_enable(struct dsi_display *display)
+{
+	int rc = 0, i;
+
+	if (!display) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&display->display_lock);
+
+	for (i = 0; i < display->panel_count; i++) {
+		rc = dsi_panel_enable(display->panel[i]);
+		if (rc) {
+			SDE_ERROR("[%s] failed to enable DSI panel, rc=%d\n",
+					display->name, rc);
+			goto error_disable_panel;
+		}
+	}
+
+	if (display->config.panel_mode == DSI_OP_VIDEO_MODE) {
+		rc = dsi_display_vid_engine_enable(display);
+		if (rc) {
+			pr_err("[%s]failed to enable DSI video engine, rc=%d\n",
+			       display->name, rc);
+			goto error_disable_panel;
+		}
+	} else if (display->config.panel_mode == DSI_OP_CMD_MODE) {
+		rc = dsi_display_cmd_engine_enable(display);
+		if (rc) {
+			pr_err("[%s]failed to enable DSI cmd engine, rc=%d\n",
+			       display->name, rc);
+			goto error_disable_panel;
+		}
+	} else {
+		pr_err("[%s] Invalid configuration\n", display->name);
+		rc = -EINVAL;
+		goto error_disable_panel;
+	}
+
+	goto error;
+
+error_disable_panel:
+	for (i--; i >= 0; i--)
+		(void)dsi_panel_disable(display->panel[i]);
+error:
+	mutex_unlock(&display->display_lock);
+	return rc;
+}
+
+int dsi_display_post_enable(struct dsi_display *display)
+{
+	int rc = 0, i;
+
+	if (!display) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&display->display_lock);
+
+	for (i = 0; i < display->panel_count; i++) {
+		rc = dsi_panel_post_enable(display->panel[i]);
+		if (rc)
+			SDE_ERROR("[%s] panel post-enable failed, rc=%d\n",
+					display->name, rc);
+	}
+
+	mutex_unlock(&display->display_lock);
+	return rc;
+}
+
+int dsi_display_pre_disable(struct dsi_display *display)
+{
+	int rc = 0, i;
+
+	if (!display) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&display->display_lock);
+
+	for (i = 0; i < display->panel_count; i++) {
+		rc = dsi_panel_pre_disable(display->panel[i]);
+		if (rc)
+			SDE_ERROR("[%s] panel pre-disable failed, rc=%d\n",
+					display->name, rc);
+	}
+
+	mutex_unlock(&display->display_lock);
+	return rc;
+}
+
+int dsi_display_disable(struct dsi_display *display)
+{
+	int rc = 0, i;
+
+	if (!display) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&display->display_lock);
+
+	rc = dsi_display_wake_up(display);
+	if (rc)
+		pr_err("[%s] display wake up failed, rc=%d\n",
+		       display->name, rc);
+
+	for (i = 0; i < display->panel_count; i++) {
+		rc = dsi_panel_disable(display->panel[i]);
+		if (rc)
+			SDE_ERROR("[%s] failed to disable DSI panel, rc=%d\n",
+					display->name, rc);
+	}
+
+	if (display->config.panel_mode == DSI_OP_VIDEO_MODE) {
+		rc = dsi_display_vid_engine_disable(display);
+		if (rc)
+			pr_err("[%s]failed to disable DSI vid engine, rc=%d\n",
+			       display->name, rc);
+	} else if (display->config.panel_mode == DSI_OP_CMD_MODE) {
+		rc = dsi_display_cmd_engine_disable(display);
+		if (rc)
+			pr_err("[%s]failed to disable DSI cmd engine, rc=%d\n",
+			       display->name, rc);
+	} else {
+		pr_err("[%s] Invalid configuration\n", display->name);
+		rc = -EINVAL;
+	}
+
+	mutex_unlock(&display->display_lock);
+	return rc;
+}
+
+int dsi_display_unprepare(struct dsi_display *display)
+{
+	int rc = 0, i;
+
+	if (!display) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&display->display_lock);
+
+	rc = dsi_display_wake_up(display);
+	if (rc)
+		pr_err("[%s] display wake up failed, rc=%d\n",
+		       display->name, rc);
+
+	for (i = 0; i < display->panel_count; i++) {
+		rc = dsi_panel_unprepare(display->panel[i]);
+		if (rc)
+			SDE_ERROR("[%s] panel unprepare failed, rc=%d\n",
+					display->name, rc);
+	}
+
+	rc = dsi_display_ctrl_host_disable(display);
+	if (rc)
+		pr_err("[%s] failed to disable DSI host, rc=%d\n",
+		       display->name, rc);
+
+	rc = dsi_display_ctrl_link_clk_off(display);
+	if (rc)
+		pr_err("[%s] failed to disable Link clocks, rc=%d\n",
+		       display->name, rc);
+
+	rc = dsi_display_ctrl_deinit(display);
+	if (rc)
+		pr_err("[%s] failed to deinit controller, rc=%d\n",
+		       display->name, rc);
+
+	rc = dsi_display_phy_disable(display);
+	if (rc)
+		pr_err("[%s] failed to disable DSI PHY, rc=%d\n",
+		       display->name, rc);
+
+	rc = dsi_display_ctrl_core_clk_off(display);
+	if (rc)
+		pr_err("[%s] failed to disable DSI clocks, rc=%d\n",
+		       display->name, rc);
+
+	rc = dsi_display_phy_power_off(display);
+	if (rc)
+		pr_err("[%s] failed to power off PHY, rc=%d\n",
+		       display->name, rc);
+
+	rc = dsi_display_ctrl_power_off(display);
+	if (rc)
+		pr_err("[%s] failed to power DSI vregs, rc=%d\n",
+		       display->name, rc);
+
+	for (i = 0; i < display->panel_count; i++) {
+		rc = dsi_panel_post_unprepare(display->panel[i]);
+		if (rc)
+			pr_err("[%s] panel post-unprepare failed, rc=%d\n",
+				display->name, rc);
+	}
+
+	mutex_unlock(&display->display_lock);
+	return rc;
+}
+
+static int __init dsi_display_register(void)
+{
+	dsi_phy_drv_register();
+	dsi_ctrl_drv_register();
+	return platform_driver_register(&dsi_display_driver);
+}
+
+static void __exit dsi_display_unregister(void)
+{
+	platform_driver_unregister(&dsi_display_driver);
+	dsi_ctrl_drv_unregister();
+	dsi_phy_drv_unregister();
+}
+
+module_init(dsi_display_register);
+module_exit(dsi_display_unregister);
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_display.h linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_display.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_display.h	2019-01-22 16:16:23.491246298 +0100
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_DISPLAY_H_
+#define _DSI_DISPLAY_H_
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/of_device.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+
+#include "msm_drv.h"
+#include "dsi_defs.h"
+#include "dsi_ctrl.h"
+#include "dsi_phy.h"
+#include "dsi_panel.h"
+
+#define MAX_DSI_CTRLS_PER_DISPLAY             2
+
+/*
+ * DSI Validate Mode modifiers
+ * @DSI_VALIDATE_FLAG_ALLOW_ADJUST:	Allow mode validation to also do fixup
+ */
+#define DSI_VALIDATE_FLAG_ALLOW_ADJUST	0x1
+
+/**
+ * enum dsi_display_type - enumerates DSI display types
+ * @DSI_DISPLAY_SINGLE:       A panel connected on a single DSI interface.
+ * @DSI_DISPLAY_EXT_BRIDGE:   A bridge is connected between panel and DSI host.
+ *			      It utilizes a single DSI interface.
+ * @DSI_DISPLAY_SPLIT:        A panel that utilizes more than one DSI
+ *			      interfaces.
+ * @DSI_DISPLAY_SPLIT_EXT_BRIDGE: A bridge is present between panel and DSI
+ *				  host. It utilizes more than one DSI interface.
+ */
+enum dsi_display_type {
+	DSI_DISPLAY_SINGLE = 0,
+	DSI_DISPLAY_EXT_BRIDGE,
+	DSI_DISPLAY_SPLIT,
+	DSI_DISPLAY_SPLIT_EXT_BRIDGE,
+	DSI_DISPLAY_MAX,
+};
+
+/**
+ * struct dsi_display_ctrl - dsi ctrl/phy information for the display
+ * @ctrl:           Handle to the DSI controller device.
+ * @ctrl_of_node:   pHandle to the DSI controller device.
+ * @dsi_ctrl_idx:   DSI controller instance id.
+ * @power_state:    Current power state of the DSI controller.
+ * @phy:                  Handle to the DSI PHY device.
+ * @phy_of_node:          pHandle to the DSI PHY device.
+ * @phy_enabled:          PHY power status.
+ */
+struct dsi_display_ctrl {
+	/* controller info */
+	struct dsi_ctrl *ctrl;
+	struct device_node *ctrl_of_node;
+	u32 dsi_ctrl_idx;
+
+	enum dsi_power_state power_state;
+
+	/* phy info */
+	struct msm_dsi_phy *phy;
+	struct device_node *phy_of_node;
+
+	bool phy_enabled;
+};
+
+/**
+ * struct dsi_display_clk_info - dsi display clock source information
+ * @src_clks:          Source clocks for DSI display.
+ * @mux_clks:          Mux clocks used for DFPS.
+ * @shadow_clks:       Used for DFPS.
+ */
+struct dsi_display_clk_info {
+	struct dsi_clk_link_set src_clks;
+	struct dsi_clk_link_set mux_clks;
+	struct dsi_clk_link_set shadow_clks;
+};
+
+/**
+ * struct dsi_display - dsi display information
+ * @pdev:             Pointer to platform device.
+ * @drm_dev:          DRM device associated with the display.
+ * @name:             Name of the display.
+ * @display_type:     Display type as defined in device tree.
+ * @list:             List pointer.
+ * @is_active:        Is display active.
+ * @display_lock:     Mutex for dsi_display interface.
+ * @ctrl_count:       Number of DSI interfaces required by panel.
+ * @ctrl:             Controller information for DSI display.
+ * @panel_count:      Number of DSI panel.
+ * @panel:            Handle to DSI panel.
+ * @panel_of:         pHandle to DSI panel, it's an array with panel_count
+ *		      of struct device_node pointers.
+ * @bridge_idx:       Bridge chip index for each panel_of.
+ * @type:             DSI display type.
+ * @clk_master_idx:   The master controller for controlling clocks. This is an
+ *		      index into the ctrl[MAX_DSI_CTRLS_PER_DISPLAY] array.
+ * @cmd_master_idx:   The master controller for sending DSI commands to panel.
+ * @video_master_idx: The master controller for enabling video engine.
+ * @clock_info:       Clock sourcing for DSI display.
+ * @lane_map:         Lane mapping between DSI host and Panel.
+ * @num_of_modes:     Number of modes supported by display.
+ * @is_tpg_enabled:   TPG state.
+ * @host:             DRM MIPI DSI Host.
+ * @connector:        Pointer to DRM connector object.
+ * @bridge:           Pointer to DRM bridge object.
+ * @cmd_engine_refcount:  Reference count enforcing single instance of cmd eng
+ * @root:                 Debugfs root directory
+ */
+struct dsi_display {
+	struct platform_device *pdev;
+	struct drm_device *drm_dev;
+
+	const char *name;
+	const char *display_type;
+	struct list_head list;
+	bool is_active;
+	struct mutex display_lock;
+
+	u32 ctrl_count;
+	struct dsi_display_ctrl ctrl[MAX_DSI_CTRLS_PER_DISPLAY];
+
+	/* panel info */
+	u32 panel_count;
+	struct dsi_panel **panel;
+	struct device_node **panel_of;
+	u32 *bridge_idx;
+
+	enum dsi_display_type type;
+	u32 clk_master_idx;
+	u32 cmd_master_idx;
+	u32 video_master_idx;
+
+	struct dsi_display_clk_info clock_info;
+	struct dsi_host_config config;
+	struct dsi_lane_mapping lane_map;
+	u32 num_of_modes;
+	bool is_tpg_enabled;
+
+	struct mipi_dsi_host host;
+	struct dsi_bridge    *bridge;
+	u32 cmd_engine_refcount;
+
+	/* DEBUG FS */
+	struct dentry *root;
+};
+
+int dsi_display_dev_probe(struct platform_device *pdev);
+int dsi_display_dev_remove(struct platform_device *pdev);
+
+/**
+ * dsi_display_get_num_of_displays() - returns number of display devices
+ *				       supported.
+ *
+ * Return: number of displays.
+ */
+int dsi_display_get_num_of_displays(void);
+
+/**
+ * dsi_display_get_active_displays - returns pointers for active display devices
+ * @display_array: Pointer to display array to be filled
+ * @max_display_count: Size of display_array
+ * @Returns: Number of display entries filled
+ */
+int dsi_display_get_active_displays(void **display_array,
+		u32 max_display_count);
+
+/**
+ * dsi_display_get_display_by_name()- finds display by name
+ * @index:      name of the display.
+ *
+ * Return: handle to the display or error code.
+ */
+struct dsi_display *dsi_display_get_display_by_name(const char *name);
+
+/**
+ * dsi_display_set_active_state() - sets the state of the display
+ * @display:        Handle to display.
+ * @is_active:      state
+ */
+void dsi_display_set_active_state(struct dsi_display *display, bool is_active);
+
+/**
+ * dsi_display_drm_bridge_init() - initializes DRM bridge object for DSI
+ * @display:            Handle to the display.
+ * @encoder:            Pointer to the encoder object which is connected to the
+ *			display.
+ *
+ * Return: error code.
+ */
+int dsi_display_drm_bridge_init(struct dsi_display *display,
+		struct drm_encoder *enc);
+
+/**
+ * dsi_display_drm_bridge_deinit() - destroys DRM bridge for the display
+ * @display:        Handle to the display.
+ *
+ * Return: error code.
+ */
+int dsi_display_drm_bridge_deinit(struct dsi_display *display);
+
+/**
+ * dsi_display_get_info() - returns the display properties
+ * @info:             Pointer to the structure where info is stored.
+ * @disp:             Handle to the display.
+ *
+ * Return: error code.
+ */
+int dsi_display_get_info(struct msm_display_info *info, void *disp);
+
+/**
+ * dsi_display_get_modes() - get modes supported by display
+ * @display:            Handle to display.
+ * @modes;              Pointer to array of modes. Memory allocated should be
+ *			big enough to store (count * struct dsi_display_mode)
+ *			elements. If modes pointer is NULL, number of modes will
+ *			be stored in the memory pointed to by count.
+ * @count:              If modes is NULL, number of modes will be stored. If
+ *			not, mode information will be copied (number of modes
+ *			copied will be equal to *count).
+ *
+ * Return: error code.
+ */
+int dsi_display_get_modes(struct dsi_display *display,
+			  struct dsi_display_mode *modes,
+			  u32 *count);
+
+/**
+ * dsi_display_validate_mode() - validates if mode is supported by display
+ * @display:             Handle to display.
+ * @mode:                Mode to be validated.
+ * @flags:               Modifier flags.
+ *
+ * Return: 0 if supported or error code.
+ */
+int dsi_display_validate_mode(struct dsi_display *display,
+			      struct dsi_display_mode *mode,
+			      u32 flags);
+
+/**
+ * dsi_display_set_mode() - Set mode on the display.
+ * @display:           Handle to display.
+ * @mode:              mode to be set.
+ * @flags:             Modifier flags.
+ *
+ * Return: error code.
+ */
+int dsi_display_set_mode(struct dsi_display *display,
+			 struct dsi_display_mode *mode,
+			 u32 flags);
+
+/**
+ * dsi_display_prepare() - prepare display
+ * @display:          Handle to display.
+ *
+ * Prepare will perform power up sequences for the host and panel hardware.
+ * Power and clock resources might be turned on (depending on the panel mode).
+ * The video engine is not enabled.
+ *
+ * Return: error code.
+ */
+int dsi_display_prepare(struct dsi_display *display);
+
+/**
+ * dsi_display_enable() - enable display
+ * @display:            Handle to display.
+ *
+ * Enable will turn on the host engine and the panel. At the end of the enable
+ * function, Host and panel hardware are ready to accept pixel data from
+ * upstream.
+ *
+ * Return: error code.
+ */
+int dsi_display_enable(struct dsi_display *display);
+
+/**
+ * dsi_display_post_enable() - perform post enable operations.
+ * @display:         Handle to display.
+ *
+ * Some panels might require some commands to be sent after pixel data
+ * transmission has started. Such commands are sent as part of the post_enable
+ * function.
+ *
+ * Return: error code.
+ */
+int dsi_display_post_enable(struct dsi_display *display);
+
+/**
+ * dsi_display_pre_disable() - perform pre disable operations.
+ * @display:          Handle to display.
+ *
+ * If a panel requires commands to be sent before pixel data transmission is
+ * stopped, those can be sent as part of pre_disable.
+ *
+ * Return: error code.
+ */
+int dsi_display_pre_disable(struct dsi_display *display);
+
+/**
+ * dsi_display_disable() - disable panel and host hardware.
+ * @display:             Handle to display.
+ *
+ * Disable host and panel hardware and pixel data transmission can not continue.
+ *
+ * Return: error code.
+ */
+int dsi_display_disable(struct dsi_display *display);
+
+/**
+ * dsi_display_unprepare() - power off display hardware.
+ * @display:            Handle to display.
+ *
+ * Host and panel hardware is turned off. Panel will be in reset state at the
+ * end of the function.
+ *
+ * Return: error code.
+ */
+int dsi_display_unprepare(struct dsi_display *display);
+
+int dsi_display_set_tpg_state(struct dsi_display *display, bool enable);
+
+int dsi_display_clock_gate(struct dsi_display *display, bool enable);
+int dsi_dispaly_static_frame(struct dsi_display *display, bool enable);
+
+int dsi_display_set_backlight(void *display, u32 bl_lvl);
+#endif /* _DSI_DISPLAY_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_display_test.c linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_display_test.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_display_test.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_display_test.c	2019-01-22 16:16:23.491246298 +0100
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include "dsi_display_test.h"
+
+static void dsi_display_test_dump_modes(struct dsi_display_mode *mode, u32
+					count)
+{
+}
+
+static void dsi_display_test_work(struct work_struct *work)
+{
+	struct dsi_display_test *test;
+	struct dsi_display *display;
+	struct dsi_display_mode *modes;
+	u32 count = 0;
+	u32 size = 0;
+	int rc = 0;
+
+	test = container_of(work, struct dsi_display_test, test_work);
+
+	display = test->display;
+	rc = dsi_display_get_modes(display, NULL, &count);
+	if (rc) {
+		pr_err("failed to get modes count, rc=%d\n", rc);
+		goto test_fail;
+	}
+
+	size = count * sizeof(*modes);
+	modes = kzalloc(size, GFP_KERNEL);
+	if (!modes) {
+		rc = -ENOMEM;
+		goto test_fail;
+	}
+
+	rc = dsi_display_get_modes(display, modes, &count);
+	if (rc) {
+		pr_err("failed to get modes, rc=%d\n", rc);
+		goto test_fail_free_modes;
+	}
+
+	dsi_display_test_dump_modes(modes, count);
+
+	rc = dsi_display_set_mode(display, &modes[0], 0x0);
+	if (rc) {
+		pr_err("failed to set mode, rc=%d\n", rc);
+		goto test_fail_free_modes;
+	}
+
+	rc = dsi_display_prepare(display);
+	if (rc) {
+		pr_err("failed to prepare display, rc=%d\n", rc);
+		goto test_fail_free_modes;
+	}
+
+	rc = dsi_display_enable(display);
+	if (rc) {
+		pr_err("failed to enable display, rc=%d\n", rc);
+		goto test_fail_unprep_disp;
+	}
+	return;
+
+test_fail_unprep_disp:
+	if (rc) {
+		pr_err("failed to unprep display, rc=%d\n", rc);
+		goto test_fail_free_modes;
+	}
+
+test_fail_free_modes:
+	kfree(modes);
+test_fail:
+	return;
+}
+
+int dsi_display_test_init(struct dsi_display *display)
+{
+	static int done;
+	int rc = 0;
+	struct dsi_display_test *test;
+
+	if (done)
+		return rc;
+
+	done = 1;
+	if (!display) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	test = kzalloc(sizeof(*test), GFP_KERNEL);
+	if (!test)
+		return -ENOMEM;
+
+	test->display = display;
+	INIT_WORK(&test->test_work, dsi_display_test_work);
+
+	dsi_display_test_work(&test->test_work);
+	return rc;
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_display_test.h linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_display_test.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_display_test.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_display_test.h	2019-01-22 16:16:23.491246298 +0100
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_DISPLAY_TEST_H_
+#define _DSI_DISPLAY_TEST_H_
+
+#include "dsi_display.h"
+#include "dsi_ctrl_hw.h"
+#include "dsi_ctrl.h"
+
+struct dsi_display_test {
+	struct dsi_display *display;
+
+	struct work_struct test_work;
+};
+
+int dsi_display_test_init(struct dsi_display *display);
+
+
+#endif /* _DSI_DISPLAY_TEST_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_drm.c linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_drm.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c	2019-10-29 09:26:23.625203002 +0100
@@ -0,0 +1,544 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+
+#define pr_fmt(fmt)	"dsi-drm:[%s] " fmt, __func__
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic.h>
+
+#include "msm_kms.h"
+#include "sde_connector.h"
+#include "dsi_drm.h"
+#include "sde_trace.h"
+
+#define to_dsi_bridge(x)     container_of((x), struct dsi_bridge, base)
+#define to_dsi_state(x)      container_of((x), struct dsi_connector_state, base)
+
+static void convert_to_dsi_mode(const struct drm_display_mode *drm_mode,
+				struct dsi_display_mode *dsi_mode)
+{
+	memset(dsi_mode, 0, sizeof(*dsi_mode));
+
+	dsi_mode->timing.h_active = drm_mode->hdisplay;
+	dsi_mode->timing.h_back_porch = drm_mode->htotal - drm_mode->hsync_end;
+	dsi_mode->timing.h_sync_width = drm_mode->htotal -
+			(drm_mode->hsync_start + dsi_mode->timing.h_back_porch);
+	dsi_mode->timing.h_front_porch = drm_mode->hsync_start -
+					 drm_mode->hdisplay;
+	dsi_mode->timing.h_skew = drm_mode->hskew;
+
+	dsi_mode->timing.v_active = drm_mode->vdisplay;
+	dsi_mode->timing.v_back_porch = drm_mode->vtotal - drm_mode->vsync_end;
+	dsi_mode->timing.v_sync_width = drm_mode->vtotal -
+		(drm_mode->vsync_start + dsi_mode->timing.v_back_porch);
+
+	dsi_mode->timing.v_front_porch = drm_mode->vsync_start -
+					 drm_mode->vdisplay;
+
+	dsi_mode->timing.refresh_rate = drm_mode->vrefresh;
+
+	dsi_mode->pixel_clk_khz = drm_mode->clock;
+	dsi_mode->panel_mode = 0; /* TODO: Panel Mode */
+
+	if (msm_is_mode_seamless(drm_mode))
+		dsi_mode->flags |= DSI_MODE_FLAG_SEAMLESS;
+	if (msm_is_mode_dynamic_fps(drm_mode))
+		dsi_mode->flags |= DSI_MODE_FLAG_DFPS;
+	if (msm_needs_vblank_pre_modeset(drm_mode))
+		dsi_mode->flags |= DSI_MODE_FLAG_VBLANK_PRE_MODESET;
+	dsi_mode->timing.h_sync_polarity =
+		(drm_mode->flags & DRM_MODE_FLAG_PHSYNC) ? false : true;
+	dsi_mode->timing.v_sync_polarity =
+		(drm_mode->flags & DRM_MODE_FLAG_PVSYNC) ? false : true;
+}
+
+static void convert_to_drm_mode(const struct dsi_display_mode *dsi_mode,
+				struct drm_display_mode *drm_mode)
+{
+	memset(drm_mode, 0, sizeof(*drm_mode));
+
+	drm_mode->hdisplay = dsi_mode->timing.h_active;
+	drm_mode->hsync_start = drm_mode->hdisplay +
+				dsi_mode->timing.h_front_porch;
+	drm_mode->hsync_end = drm_mode->hsync_start +
+			      dsi_mode->timing.h_sync_width;
+	drm_mode->htotal = drm_mode->hsync_end + dsi_mode->timing.h_back_porch;
+	drm_mode->hskew = dsi_mode->timing.h_skew;
+
+	drm_mode->vdisplay = dsi_mode->timing.v_active;
+	drm_mode->vsync_start = drm_mode->vdisplay +
+				dsi_mode->timing.v_front_porch;
+	drm_mode->vsync_end = drm_mode->vsync_start +
+			      dsi_mode->timing.v_sync_width;
+	drm_mode->vtotal = drm_mode->vsync_end + dsi_mode->timing.v_back_porch;
+
+	drm_mode->vrefresh = dsi_mode->timing.refresh_rate;
+	drm_mode->clock = dsi_mode->pixel_clk_khz;
+
+	if (dsi_mode->flags & DSI_MODE_FLAG_SEAMLESS)
+		drm_mode->flags |= DRM_MODE_FLAG_SEAMLESS;
+	if (dsi_mode->flags & DSI_MODE_FLAG_DFPS)
+		drm_mode->private_flags |= MSM_MODE_FLAG_SEAMLESS_DYNAMIC_FPS;
+	if (dsi_mode->flags & DSI_MODE_FLAG_VBLANK_PRE_MODESET)
+		drm_mode->private_flags |= MSM_MODE_FLAG_VBLANK_PRE_MODESET;
+	drm_mode->flags |= (dsi_mode->timing.h_sync_polarity) ?
+				DRM_MODE_FLAG_NHSYNC : DRM_MODE_FLAG_PHSYNC;
+	drm_mode->flags |= (dsi_mode->timing.v_sync_polarity) ?
+				DRM_MODE_FLAG_NVSYNC : DRM_MODE_FLAG_PVSYNC;
+
+	drm_mode_set_name(drm_mode);
+}
+
+static int dsi_bridge_attach(struct drm_bridge *bridge)
+{
+	struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
+
+	if (!bridge) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	pr_debug("[%d] attached\n", c_bridge->id);
+
+	return 0;
+
+}
+
+static void dsi_bridge_pre_enable(struct drm_bridge *bridge)
+{
+	int rc = 0;
+	struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
+
+	if (!bridge) {
+		pr_err("Invalid params\n");
+		return;
+	}
+
+	/* By this point mode should have been validated through mode_fixup */
+	rc = dsi_display_set_mode(c_bridge->display,
+			&(c_bridge->dsi_mode), 0x0);
+	if (rc) {
+		pr_err("[%d] failed to perform a mode set, rc=%d\n",
+		       c_bridge->id, rc);
+		return;
+	}
+
+	if (c_bridge->dsi_mode.flags & DSI_MODE_FLAG_SEAMLESS) {
+		pr_debug("[%d] seamless pre-enable\n", c_bridge->id);
+		return;
+	}
+
+	SDE_ATRACE_BEGIN("dsi_bridge_pre_enable");
+	rc = dsi_display_prepare(c_bridge->display);
+	if (rc) {
+		pr_err("[%d] DSI display prepare failed, rc=%d\n",
+		       c_bridge->id, rc);
+		SDE_ATRACE_END("dsi_bridge_pre_enable");
+		return;
+	}
+
+	SDE_ATRACE_BEGIN("dsi_display_enable");
+	rc = dsi_display_enable(c_bridge->display);
+	if (rc) {
+		pr_err("[%d] DSI display enable failed, rc=%d\n",
+		       c_bridge->id, rc);
+		(void)dsi_display_unprepare(c_bridge->display);
+	}
+	SDE_ATRACE_END("dsi_display_enable");
+	SDE_ATRACE_END("dsi_bridge_pre_enable");
+}
+
+static void dsi_bridge_enable(struct drm_bridge *bridge)
+{
+	int rc = 0;
+	struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
+
+	if (!bridge) {
+		pr_err("Invalid params\n");
+		return;
+	}
+
+	if (c_bridge->dsi_mode.flags & DSI_MODE_FLAG_SEAMLESS) {
+		pr_debug("[%d] seamless enable\n", c_bridge->id);
+		return;
+	}
+
+	rc = dsi_display_post_enable(c_bridge->display);
+	if (rc)
+		pr_err("[%d] DSI display post enabled failed, rc=%d\n",
+		       c_bridge->id, rc);
+}
+
+static void dsi_bridge_disable(struct drm_bridge *bridge)
+{
+	int rc = 0;
+	struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
+
+	if (!bridge) {
+		pr_err("Invalid params\n");
+		return;
+	}
+
+	rc = dsi_display_pre_disable(c_bridge->display);
+	if (rc) {
+		pr_err("[%d] DSI display pre disable failed, rc=%d\n",
+		       c_bridge->id, rc);
+	}
+}
+
+static void dsi_bridge_post_disable(struct drm_bridge *bridge)
+{
+	int rc = 0;
+	struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
+
+	if (!bridge) {
+		pr_err("Invalid params\n");
+		return;
+	}
+
+	SDE_ATRACE_BEGIN("dsi_bridge_post_disable");
+	SDE_ATRACE_BEGIN("dsi_display_disable");
+	rc = dsi_display_disable(c_bridge->display);
+	if (rc) {
+		pr_err("[%d] DSI display disable failed, rc=%d\n",
+		       c_bridge->id, rc);
+		SDE_ATRACE_END("dsi_display_disable");
+		return;
+	}
+	SDE_ATRACE_END("dsi_display_disable");
+
+	rc = dsi_display_unprepare(c_bridge->display);
+	if (rc) {
+		pr_err("[%d] DSI display unprepare failed, rc=%d\n",
+		       c_bridge->id, rc);
+		SDE_ATRACE_END("dsi_bridge_post_disable");
+		return;
+	}
+	SDE_ATRACE_END("dsi_bridge_post_disable");
+}
+
+static void dsi_bridge_mode_set(struct drm_bridge *bridge,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
+	struct dsi_panel *panel;
+
+	if (!bridge || !mode || !adjusted_mode || !c_bridge->display ||
+		!c_bridge->display->panel[0]) {
+		pr_err("Invalid params\n");
+		return;
+	}
+
+	/* dsi drm bridge is always the first panel */
+	panel = c_bridge->display->panel[0];
+	memset(&(c_bridge->dsi_mode), 0x0, sizeof(struct dsi_display_mode));
+	convert_to_dsi_mode(adjusted_mode, &(c_bridge->dsi_mode));
+
+	pr_debug("note: using panel cmd/vid mode instead of user val\n");
+	c_bridge->dsi_mode.panel_mode = panel->mode.panel_mode;
+}
+
+static bool dsi_bridge_mode_fixup(struct drm_bridge *bridge,
+				  const struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
+{
+	int rc = 0;
+	bool ret = true;
+	struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
+	struct dsi_display_mode dsi_mode;
+
+	if (!bridge || !mode || !adjusted_mode) {
+		pr_err("Invalid params\n");
+		return false;
+	}
+
+	convert_to_dsi_mode(mode, &dsi_mode);
+
+	rc = dsi_display_validate_mode(c_bridge->display, &dsi_mode,
+			DSI_VALIDATE_FLAG_ALLOW_ADJUST);
+	if (rc) {
+		pr_err("[%d] mode is not valid, rc=%d\n", c_bridge->id, rc);
+		ret = false;
+	} else {
+		convert_to_drm_mode(&dsi_mode, adjusted_mode);
+	}
+
+	return ret;
+}
+
+static const struct drm_bridge_funcs dsi_bridge_ops = {
+	.attach       = dsi_bridge_attach,
+	.mode_fixup   = dsi_bridge_mode_fixup,
+	.pre_enable   = dsi_bridge_pre_enable,
+	.enable       = dsi_bridge_enable,
+	.disable      = dsi_bridge_disable,
+	.post_disable = dsi_bridge_post_disable,
+	.mode_set     = dsi_bridge_mode_set,
+};
+
+int dsi_conn_post_init(struct drm_connector *connector,
+		void *info,
+		void *display)
+{
+	struct dsi_display *dsi_display = display;
+	struct dsi_panel *panel;
+	int i;
+
+	if (!info || !dsi_display)
+		return -EINVAL;
+
+	sde_kms_info_add_keystr(info,
+		"display type", dsi_display->display_type);
+
+	switch (dsi_display->type) {
+	case DSI_DISPLAY_SINGLE:
+		sde_kms_info_add_keystr(info, "display config",
+					"single display");
+		break;
+	case DSI_DISPLAY_EXT_BRIDGE:
+		sde_kms_info_add_keystr(info, "display config", "ext bridge");
+		break;
+	case DSI_DISPLAY_SPLIT:
+		sde_kms_info_add_keystr(info, "display config",
+					"split display");
+		break;
+	case DSI_DISPLAY_SPLIT_EXT_BRIDGE:
+		sde_kms_info_add_keystr(info, "display config",
+					"split ext bridge");
+		break;
+	default:
+		pr_debug("invalid display type:%d\n", dsi_display->type);
+		break;
+	}
+
+	for (i = 0; i < dsi_display->panel_count; i++) {
+		if (!dsi_display->panel[i]) {
+			pr_debug("invalid panel data\n");
+			goto end;
+		}
+
+		panel = dsi_display->panel[i];
+		sde_kms_info_add_keystr(info, "panel name", panel->name);
+
+		switch (panel->mode.panel_mode) {
+		case DSI_OP_VIDEO_MODE:
+			sde_kms_info_add_keystr(info, "panel mode", "video");
+			break;
+		case DSI_OP_CMD_MODE:
+			sde_kms_info_add_keystr(info, "panel mode", "command");
+			break;
+		default:
+			pr_debug("invalid panel type:%d\n",
+					panel->mode.panel_mode);
+			break;
+		}
+		sde_kms_info_add_keystr(info, "dfps support",
+				panel->dfps_caps.dfps_support ?
+					"true" : "false");
+
+		switch (panel->phy_props.rotation) {
+		case DSI_PANEL_ROTATE_NONE:
+			sde_kms_info_add_keystr(info, "panel orientation",
+						"none");
+			break;
+		case DSI_PANEL_ROTATE_H_FLIP:
+			sde_kms_info_add_keystr(info, "panel orientation",
+						"horz flip");
+			break;
+		case DSI_PANEL_ROTATE_V_FLIP:
+			sde_kms_info_add_keystr(info, "panel orientation",
+						"vert flip");
+			break;
+		default:
+			pr_debug("invalid panel rotation:%d\n",
+						panel->phy_props.rotation);
+			break;
+		}
+
+		switch (panel->bl_config.type) {
+		case DSI_BACKLIGHT_PWM:
+			sde_kms_info_add_keystr(info, "backlight type", "pwm");
+			break;
+		case DSI_BACKLIGHT_WLED:
+			sde_kms_info_add_keystr(info, "backlight type", "wled");
+			break;
+		case DSI_BACKLIGHT_DCS:
+			sde_kms_info_add_keystr(info, "backlight type", "dcs");
+			break;
+		default:
+			pr_debug("invalid panel backlight type:%d\n",
+							panel->bl_config.type);
+			break;
+		}
+	}
+
+end:
+	return 0;
+}
+
+enum drm_connector_status dsi_conn_detect(struct drm_connector *conn,
+		bool force,
+		void *display)
+{
+	enum drm_connector_status status = connector_status_unknown;
+	struct msm_display_info info;
+	int rc;
+
+	if (!conn || !display)
+		return status;
+
+	/* get display dsi_info */
+	memset(&info, 0x0, sizeof(info));
+	rc = dsi_display_get_info(&info, display);
+	if (rc) {
+		pr_err("failed to get display info, rc=%d\n", rc);
+		return connector_status_disconnected;
+	}
+
+	if (info.capabilities & MSM_DISPLAY_CAP_HOT_PLUG)
+		status = (info.is_connected ? connector_status_connected :
+					      connector_status_disconnected);
+	else
+		status = connector_status_connected;
+
+	conn->display_info.width_mm = info.width_mm;
+	conn->display_info.height_mm = info.height_mm;
+
+	return status;
+}
+
+int dsi_connector_get_modes(struct drm_connector *connector,
+		void *display)
+{
+	u32 count = 0;
+	u32 size = 0;
+	struct dsi_display_mode *modes;
+	struct drm_display_mode drm_mode;
+	int rc, i;
+
+	if (sde_connector_get_panel(connector)) {
+		/*
+		 * TODO: If drm_panel is attached, query modes from the panel.
+		 * This is complicated in split dsi cases because panel is not
+		 * attached to both connectors.
+		 */
+		goto end;
+	}
+	rc = dsi_display_get_modes(display, NULL, &count);
+	if (rc) {
+		pr_err("failed to get num of modes, rc=%d\n", rc);
+		goto error;
+	}
+
+	size = count * sizeof(*modes);
+	modes = kzalloc(size,  GFP_KERNEL);
+	if (!modes) {
+		count = 0;
+		goto end;
+	}
+
+	rc = dsi_display_get_modes(display, modes, &count);
+	if (rc) {
+		pr_err("failed to get modes, rc=%d\n", rc);
+		count = 0;
+		goto error;
+	}
+
+	for (i = 0; i < count; i++) {
+		struct drm_display_mode *m;
+
+		memset(&drm_mode, 0x0, sizeof(drm_mode));
+		convert_to_drm_mode(&modes[i], &drm_mode);
+		m = drm_mode_duplicate(connector->dev, &drm_mode);
+		if (!m) {
+			pr_err("failed to add mode %ux%u\n",
+			       drm_mode.hdisplay,
+			       drm_mode.vdisplay);
+			count = -ENOMEM;
+			goto error;
+		}
+		m->width_mm = connector->display_info.width_mm;
+		m->height_mm = connector->display_info.height_mm;
+		drm_mode_probed_add(connector, m);
+	}
+error:
+	kfree(modes);
+end:
+	pr_debug("MODE COUNT =%d\n\n", count);
+	return count;
+}
+
+enum drm_mode_status dsi_conn_mode_valid(struct drm_connector *connector,
+		struct drm_display_mode *mode,
+		void *display)
+{
+	struct dsi_display_mode dsi_mode;
+	int rc;
+
+	if (!connector || !mode) {
+		pr_err("Invalid params\n");
+		return MODE_ERROR;
+	}
+
+	convert_to_dsi_mode(mode, &dsi_mode);
+
+	rc = dsi_display_validate_mode(display, &dsi_mode,
+			DSI_VALIDATE_FLAG_ALLOW_ADJUST);
+	if (rc) {
+		pr_err("mode not supported, rc=%d\n", rc);
+		return MODE_BAD;
+	}
+
+	return MODE_OK;
+}
+
+struct dsi_bridge *dsi_drm_bridge_init(struct dsi_display *display,
+				       struct drm_device *dev,
+				       struct drm_encoder *encoder)
+{
+	int rc = 0;
+	struct dsi_bridge *bridge;
+
+	bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+	if (!bridge) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	bridge->display = display;
+	bridge->base.funcs = &dsi_bridge_ops;
+	bridge->base.encoder = encoder;
+
+	rc = drm_bridge_attach(dev, &bridge->base);
+	if (rc) {
+		pr_err("failed to attach bridge, rc=%d\n", rc);
+		goto error_free_bridge;
+	}
+
+	encoder->bridge = &bridge->base;
+	return bridge;
+error_free_bridge:
+	kfree(bridge);
+error:
+	return ERR_PTR(rc);
+}
+
+void dsi_drm_bridge_cleanup(struct dsi_bridge *bridge)
+{
+	if (bridge && bridge->base.encoder)
+		bridge->base.encoder->bridge = NULL;
+
+	kfree(bridge);
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_drm.h linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_drm.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h	2019-01-22 16:16:23.491246298 +0100
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_DRM_H_
+#define _DSI_DRM_H_
+
+#include <linux/types.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "msm_drv.h"
+
+#include "dsi_display.h"
+
+struct dsi_bridge {
+	struct drm_bridge base;
+	u32 id;
+
+	struct dsi_display *display;
+	struct dsi_display_mode dsi_mode;
+};
+
+/**
+ * dsi_conn_post_init - callback to perform additional initialization steps
+ * @connector: Pointer to drm connector structure
+ * @info: Pointer to sde connector info structure
+ * @display: Pointer to private display handle
+ * Returns: Zero on success
+ */
+int dsi_conn_post_init(struct drm_connector *connector,
+		void *info,
+		void *display);
+
+/**
+ * dsi_conn_detect - callback to determine if connector is connected
+ * @connector: Pointer to drm connector structure
+ * @force: Force detect setting from drm framework
+ * @display: Pointer to private display handle
+ * Returns: Connector 'is connected' status
+ */
+enum drm_connector_status dsi_conn_detect(struct drm_connector *conn,
+		bool force,
+		void *display);
+
+/**
+ * dsi_connector_get_modes - callback to add drm modes via drm_mode_probed_add()
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+ * Returns: Number of modes added
+ */
+int dsi_connector_get_modes(struct drm_connector *connector,
+		void *display);
+
+/**
+ * dsi_conn_mode_valid - callback to determine if specified mode is valid
+ * @connector: Pointer to drm connector structure
+ * @mode: Pointer to drm mode structure
+ * @display: Pointer to private display handle
+ * Returns: Validity status for specified mode
+ */
+enum drm_mode_status dsi_conn_mode_valid(struct drm_connector *connector,
+		struct drm_display_mode *mode,
+		void *display);
+
+struct dsi_bridge *dsi_drm_bridge_init(struct dsi_display *display,
+		struct drm_device *dev,
+		struct drm_encoder *encoder);
+
+void dsi_drm_bridge_cleanup(struct dsi_bridge *bridge);
+
+#endif /* _DSI_DRM_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_hw.h linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_hw.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h	2019-01-22 16:16:23.491246298 +0100
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_HW_H_
+#define _DSI_HW_H_
+#include <linux/io.h>
+
+#define DSI_R32(dsi_hw, off) readl_relaxed((dsi_hw)->base + (off))
+#define DSI_W32(dsi_hw, off, val) \
+	do {\
+		pr_debug("[DSI_%d][%s] - [0x%08x]\n", \
+			(dsi_hw)->index, #off, val); \
+		writel_relaxed((val), (dsi_hw)->base + (off)); \
+	} while (0)
+
+#define DSI_MMSS_MISC_R32(dsi_hw, off) \
+	readl_relaxed((dsi_hw)->mmss_misc_base + (off))
+#define DSI_MMSS_MISC_W32(dsi_hw, off, val) \
+	do {\
+		pr_debug("[DSI_%d][%s] - [0x%08x]\n", \
+			(dsi_hw)->index, #off, val); \
+		writel_relaxed((val), (dsi_hw)->mmss_misc_base + (off)); \
+	} while (0)
+
+#define DSI_R64(dsi_hw, off) readq_relaxed((dsi_hw)->base + (off))
+#define DSI_W64(dsi_hw, off, val) writeq_relaxed((val), (dsi_hw)->base + (off))
+
+#endif /* _DSI_HW_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_panel.c linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_panel.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c	2019-01-22 16:16:23.491246298 +0100
@@ -0,0 +1,2039 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+
+#include "sde_kms.h"
+#include "dsi_panel.h"
+#include "dsi_ctrl_hw.h"
+
+#define DSI_PANEL_DEFAULT_LABEL  "Default dsi panel"
+
+#define DEFAULT_MDP_TRANSFER_TIME 14000
+
+static int dsi_panel_vreg_get(struct dsi_panel *panel)
+{
+	int rc = 0;
+	int i;
+	struct regulator *vreg = NULL;
+
+	for (i = 0; i < panel->power_info.count; i++) {
+		vreg = devm_regulator_get(panel->parent,
+					  panel->power_info.vregs[i].vreg_name);
+		rc = PTR_RET(vreg);
+		if (rc) {
+			pr_err("failed to get %s regulator\n",
+			       panel->power_info.vregs[i].vreg_name);
+			goto error_put;
+		}
+		panel->power_info.vregs[i].vreg = vreg;
+	}
+
+	return rc;
+error_put:
+	for (i = i - 1; i >= 0; i--) {
+		devm_regulator_put(panel->power_info.vregs[i].vreg);
+		panel->power_info.vregs[i].vreg = NULL;
+	}
+	return rc;
+}
+
+static int dsi_panel_vreg_put(struct dsi_panel *panel)
+{
+	int rc = 0;
+	int i;
+
+	for (i = panel->power_info.count - 1; i >= 0; i--)
+		devm_regulator_put(panel->power_info.vregs[i].vreg);
+
+	return rc;
+}
+
+static int dsi_panel_gpio_request(struct dsi_panel *panel)
+{
+	int rc = 0;
+	struct dsi_panel_reset_config *r_config = &panel->reset_config;
+
+	if (gpio_is_valid(r_config->reset_gpio)) {
+		rc = gpio_request(r_config->reset_gpio, "reset_gpio");
+		if (rc) {
+			pr_err("request for reset_gpio failed, rc=%d\n", rc);
+			goto error;
+		}
+	}
+
+	if (gpio_is_valid(r_config->disp_en_gpio)) {
+		rc = gpio_request(r_config->disp_en_gpio, "disp_en_gpio");
+		if (rc) {
+			pr_err("request for disp_en_gpio failed, rc=%d\n", rc);
+			goto error_release_reset;
+		}
+	}
+
+	if (gpio_is_valid(panel->bl_config.en_gpio)) {
+		rc = gpio_request(panel->bl_config.en_gpio, "bklt_en_gpio");
+		if (rc) {
+			pr_err("request for bklt_en_gpio failed, rc=%d\n", rc);
+			goto error_release_disp_en;
+		}
+	}
+
+	goto error;
+error_release_disp_en:
+	if (gpio_is_valid(r_config->disp_en_gpio))
+		gpio_free(r_config->disp_en_gpio);
+error_release_reset:
+	if (gpio_is_valid(r_config->reset_gpio))
+		gpio_free(r_config->reset_gpio);
+error:
+	return rc;
+}
+
+static int dsi_panel_gpio_release(struct dsi_panel *panel)
+{
+	int rc = 0;
+	struct dsi_panel_reset_config *r_config = &panel->reset_config;
+
+	if (gpio_is_valid(r_config->reset_gpio))
+		gpio_free(r_config->reset_gpio);
+
+	if (gpio_is_valid(r_config->disp_en_gpio))
+		gpio_free(r_config->disp_en_gpio);
+
+	if (gpio_is_valid(panel->bl_config.en_gpio))
+		gpio_free(panel->bl_config.en_gpio);
+
+	return rc;
+}
+
+static int dsi_panel_reset(struct dsi_panel *panel)
+{
+	int rc = 0;
+	struct dsi_panel_reset_config *r_config = &panel->reset_config;
+	int i;
+
+	if (gpio_is_valid(panel->reset_config.disp_en_gpio)) {
+		rc = gpio_direction_output(panel->bl_config.en_gpio, 1);
+		if (rc) {
+			pr_err("unable to set dir for disp gpio rc=%d\n", rc);
+			goto exit;
+		}
+	}
+
+	if (r_config->count) {
+		rc = gpio_direction_output(r_config->reset_gpio,
+			r_config->sequence[0].level);
+		if (rc) {
+			pr_err("unable to set dir for rst gpio rc=%d\n", rc);
+			goto exit;
+		}
+	}
+
+	for (i = 0; i < r_config->count; i++) {
+		gpio_set_value(r_config->reset_gpio,
+			       r_config->sequence[i].level);
+
+
+		if (r_config->sequence[i].sleep_ms)
+			usleep_range(r_config->sequence[i].sleep_ms * 1000,
+				     r_config->sequence[i].sleep_ms * 1000);
+	}
+
+	if (gpio_is_valid(panel->bl_config.en_gpio)) {
+		rc = gpio_direction_output(panel->bl_config.en_gpio, 1);
+		if (rc)
+			pr_err("unable to set dir for bklt gpio rc=%d\n", rc);
+	}
+exit:
+	return rc;
+}
+
+static int dsi_panel_set_pinctrl_state(struct dsi_panel *panel, bool enable)
+{
+	int rc = 0;
+	struct pinctrl_state *state;
+
+	if (enable)
+		state = panel->pinctrl.active;
+	else
+		state = panel->pinctrl.suspend;
+
+	if (panel->pinctrl.pinctrl && state) {
+		rc = pinctrl_select_state(panel->pinctrl.pinctrl, state);
+		if (rc)
+			pr_err("[%s] failed to set pin state, rc=%d\n",
+				panel->name, rc);
+	}
+
+	return rc;
+}
+
+
+static int dsi_panel_power_on(struct dsi_panel *panel)
+{
+	int rc = 0;
+
+	rc = dsi_pwr_enable_regulator(&panel->power_info, true);
+	if (rc) {
+		pr_err("[%s] failed to enable vregs, rc=%d\n", panel->name, rc);
+		goto exit;
+	}
+
+	rc = dsi_panel_set_pinctrl_state(panel, true);
+	if (rc) {
+		pr_err("[%s] failed to set pinctrl, rc=%d\n", panel->name, rc);
+		goto error_disable_vregs;
+	}
+
+	rc = dsi_panel_reset(panel);
+	if (rc) {
+		pr_err("[%s] failed to reset panel, rc=%d\n", panel->name, rc);
+		goto error_disable_gpio;
+	}
+
+	goto exit;
+
+error_disable_gpio:
+	if (gpio_is_valid(panel->reset_config.disp_en_gpio))
+		gpio_set_value(panel->reset_config.disp_en_gpio, 0);
+
+	if (gpio_is_valid(panel->bl_config.en_gpio))
+		gpio_set_value(panel->bl_config.en_gpio, 0);
+
+	(void)dsi_panel_set_pinctrl_state(panel, false);
+
+error_disable_vregs:
+	(void)dsi_pwr_enable_regulator(&panel->power_info, false);
+
+exit:
+	return rc;
+}
+
+static int dsi_panel_power_off(struct dsi_panel *panel)
+{
+	int rc = 0;
+
+	if (gpio_is_valid(panel->reset_config.disp_en_gpio))
+		gpio_set_value(panel->reset_config.disp_en_gpio, 0);
+
+	if (gpio_is_valid(panel->reset_config.reset_gpio))
+		gpio_set_value(panel->reset_config.reset_gpio, 0);
+
+	rc = dsi_panel_set_pinctrl_state(panel, false);
+	if (rc) {
+		pr_err("[%s] failed set pinctrl state, rc=%d\n", panel->name,
+		       rc);
+	}
+
+	rc = dsi_pwr_enable_regulator(&panel->power_info, false);
+	if (rc)
+		pr_err("[%s] failed to enable vregs, rc=%d\n", panel->name, rc);
+
+	return rc;
+}
+static int dsi_panel_tx_cmd_set(struct dsi_panel *panel,
+				enum dsi_cmd_set_type type)
+{
+	int rc = 0, i = 0;
+	ssize_t len;
+	struct dsi_cmd_desc *cmds = panel->cmd_sets[type].cmds;
+	u32 count = panel->cmd_sets[type].count;
+	enum dsi_cmd_set_state state = panel->cmd_sets[type].state;
+	const struct mipi_dsi_host_ops *ops = panel->host->ops;
+
+	if (count == 0) {
+		pr_debug("[%s] No commands to be sent for state(%d)\n",
+			 panel->name, type);
+		goto error;
+	}
+
+	for (i = 0; i < count; i++) {
+		/* TODO:  handle last command */
+		if (state == DSI_CMD_SET_STATE_LP)
+			cmds->msg.flags |= MIPI_DSI_MSG_USE_LPM;
+
+		len = ops->transfer(panel->host, &cmds->msg);
+		if (len < 0) {
+			rc = len;
+			pr_err("failed to set cmds(%d), rc=%d\n", type, rc);
+			goto error;
+		}
+		if (cmds->post_wait_ms)
+			msleep(cmds->post_wait_ms);
+		cmds++;
+	}
+error:
+	return rc;
+}
+
+static int dsi_panel_pinctrl_deinit(struct dsi_panel *panel)
+{
+	int rc = 0;
+
+	devm_pinctrl_put(panel->pinctrl.pinctrl);
+
+	return rc;
+}
+
+static int dsi_panel_pinctrl_init(struct dsi_panel *panel)
+{
+	int rc = 0;
+
+	/* TODO:  pinctrl is defined in dsi dt node */
+	panel->pinctrl.pinctrl = devm_pinctrl_get(panel->parent);
+	if (IS_ERR_OR_NULL(panel->pinctrl.pinctrl)) {
+		rc = PTR_ERR(panel->pinctrl.pinctrl);
+		pr_err("failed to get pinctrl, rc=%d\n", rc);
+		goto error;
+	}
+
+	panel->pinctrl.active = pinctrl_lookup_state(panel->pinctrl.pinctrl,
+						       "panel_active");
+	if (IS_ERR_OR_NULL(panel->pinctrl.active)) {
+		rc = PTR_ERR(panel->pinctrl.active);
+		pr_err("failed to get pinctrl active state, rc=%d\n", rc);
+		goto error;
+	}
+
+	panel->pinctrl.suspend =
+		pinctrl_lookup_state(panel->pinctrl.pinctrl, "panel_suspend");
+
+	if (IS_ERR_OR_NULL(panel->pinctrl.suspend)) {
+		rc = PTR_ERR(panel->pinctrl.suspend);
+		pr_err("failed to get pinctrl suspend state, rc=%d\n", rc);
+		goto error;
+	}
+
+error:
+	return rc;
+}
+
+#ifdef CONFIG_LEDS_TRIGGERS
+static int dsi_panel_led_bl_register(struct dsi_panel *panel,
+				struct dsi_backlight_config *bl)
+{
+	int rc = 0;
+
+	led_trigger_register_simple("bkl-trigger", &bl->wled);
+
+	/* LED APIs don't tell us directly whether a classdev has yet
+	 * been registered to service this trigger. Until classdev is
+	 * registered, calling led_trigger has no effect, and doesn't
+	 * fail. Classdevs are associated with any registered triggers
+	 * when they do register, but that is too late for FBCon.
+	 * Check the cdev list directly and defer if appropriate.
+	 */
+	if (!bl->wled) {
+		pr_err("[%s] backlight registration failed\n", panel->name);
+		rc = -EINVAL;
+	} else {
+		read_lock(&bl->wled->leddev_list_lock);
+		if (list_empty(&bl->wled->led_cdevs))
+			rc = -EPROBE_DEFER;
+		read_unlock(&bl->wled->leddev_list_lock);
+
+		if (rc) {
+			pr_info("[%s] backlight %s not ready, defer probe\n",
+				panel->name, bl->wled->name);
+			led_trigger_unregister_simple(bl->wled);
+		}
+	}
+
+	return rc;
+}
+#else
+static int dsi_panel_led_bl_register(struct dsi_panel *panel,
+				struct dsi_backlight_config *bl)
+{
+	return 0;
+}
+#endif
+
+int dsi_panel_set_backlight(struct dsi_panel *panel, u32 bl_lvl)
+{
+	int rc = 0;
+	struct dsi_backlight_config *bl = &panel->bl_config;
+
+	switch (bl->type) {
+	case DSI_BACKLIGHT_WLED:
+		led_trigger_event(bl->wled, bl_lvl);
+		break;
+	default:
+		pr_err("Backlight type(%d) not supported\n", bl->type);
+		rc = -ENOTSUPP;
+	}
+
+	return rc;
+}
+
+static int dsi_panel_bl_register(struct dsi_panel *panel)
+{
+	int rc = 0;
+	struct dsi_backlight_config *bl = &panel->bl_config;
+
+	switch (bl->type) {
+	case DSI_BACKLIGHT_WLED:
+		rc = dsi_panel_led_bl_register(panel, bl);
+		break;
+	case DSI_BACKLIGHT_UNKNOWN:
+		DRM_INFO("backlight type is unknown\n");
+		break;
+	default:
+		pr_err("Backlight type(%d) not supported\n", bl->type);
+		rc = -ENOTSUPP;
+		goto error;
+	}
+
+error:
+	return rc;
+}
+
+static int dsi_panel_bl_unregister(struct dsi_panel *panel)
+{
+	int rc = 0;
+	struct dsi_backlight_config *bl = &panel->bl_config;
+
+	switch (bl->type) {
+	case DSI_BACKLIGHT_WLED:
+		led_trigger_unregister_simple(bl->wled);
+		break;
+	default:
+		pr_err("Backlight type(%d) not supported\n", bl->type);
+		rc = -ENOTSUPP;
+		goto error;
+	}
+
+error:
+	return rc;
+}
+static int dsi_panel_parse_timing(struct dsi_mode_info *mode,
+				  struct device_node *of_node)
+{
+	int rc = 0;
+
+	rc = of_property_read_u32(of_node, "qcom,mdss-dsi-panel-framerate",
+				  &mode->refresh_rate);
+	if (rc) {
+		pr_err("failed to read qcom,mdss-dsi-panel-framerate, rc=%d\n",
+		       rc);
+		goto error;
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,mdss-dsi-panel-width",
+				  &mode->h_active);
+	if (rc) {
+		pr_err("failed to read qcom,mdss-dsi-panel-width, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,mdss-dsi-h-front-porch",
+				  &mode->h_front_porch);
+	if (rc) {
+		pr_err("failed to read qcom,mdss-dsi-h-front-porch, rc=%d\n",
+		       rc);
+		goto error;
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,mdss-dsi-h-back-porch",
+				  &mode->h_back_porch);
+	if (rc) {
+		pr_err("failed to read qcom,mdss-dsi-h-back-porch, rc=%d\n",
+		       rc);
+		goto error;
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,mdss-dsi-h-pulse-width",
+				  &mode->h_sync_width);
+	if (rc) {
+		pr_err("failed to read qcom,mdss-dsi-h-pulse-width, rc=%d\n",
+		       rc);
+		goto error;
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,mdss-dsi-h-sync-skew",
+				  &mode->h_skew);
+	if (rc)
+		pr_err("qcom,mdss-dsi-h-sync-skew is not defined, rc=%d\n", rc);
+
+	rc = of_property_read_u32(of_node, "qcom,mdss-dsi-panel-height",
+				  &mode->v_active);
+	if (rc) {
+		pr_err("failed to read qcom,mdss-dsi-panel-height, rc=%d\n",
+		       rc);
+		goto error;
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,mdss-dsi-v-back-porch",
+				  &mode->v_back_porch);
+	if (rc) {
+		pr_err("failed to read qcom,mdss-dsi-v-back-porch, rc=%d\n",
+		       rc);
+		goto error;
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,mdss-dsi-v-front-porch",
+				  &mode->v_front_porch);
+	if (rc) {
+		pr_err("failed to read qcom,mdss-dsi-v-back-porch, rc=%d\n",
+		       rc);
+		goto error;
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,mdss-dsi-v-pulse-width",
+				  &mode->v_sync_width);
+	if (rc) {
+		pr_err("failed to read qcom,mdss-dsi-v-pulse-width, rc=%d\n",
+		       rc);
+		goto error;
+	}
+
+error:
+	return rc;
+}
+
+static int dsi_panel_parse_pixel_format(struct dsi_host_common_cfg *host,
+					struct device_node *of_node,
+					const char *name)
+{
+	int rc = 0;
+	u32 bpp = 0;
+	enum dsi_pixel_format fmt;
+	const char *packing;
+
+	rc = of_property_read_u32(of_node, "qcom,mdss-dsi-bpp", &bpp);
+	if (rc) {
+		pr_err("[%s] failed to read qcom,mdss-dsi-bpp, rc=%d\n",
+		       name, rc);
+		return rc;
+	}
+
+	switch (bpp) {
+	case 3:
+		fmt = DSI_PIXEL_FORMAT_RGB111;
+		break;
+	case 8:
+		fmt = DSI_PIXEL_FORMAT_RGB332;
+		break;
+	case 12:
+		fmt = DSI_PIXEL_FORMAT_RGB444;
+		break;
+	case 16:
+		fmt = DSI_PIXEL_FORMAT_RGB565;
+		break;
+	case 18:
+		fmt = DSI_PIXEL_FORMAT_RGB666;
+		break;
+	case 24:
+	default:
+		fmt = DSI_PIXEL_FORMAT_RGB888;
+		break;
+	}
+
+	if (fmt == DSI_PIXEL_FORMAT_RGB666) {
+		packing = of_get_property(of_node,
+					  "qcom,mdss-dsi-pixel-packing",
+					  NULL);
+		if (packing && !strcmp(packing, "loose"))
+			fmt = DSI_PIXEL_FORMAT_RGB666_LOOSE;
+	}
+
+	host->dst_format = fmt;
+	return rc;
+}
+
+static int dsi_panel_parse_lane_states(struct dsi_host_common_cfg *host,
+				       struct device_node *of_node,
+				       const char *name)
+{
+	int rc = 0;
+	bool lane_enabled;
+
+	lane_enabled = of_property_read_bool(of_node,
+					    "qcom,mdss-dsi-lane-0-state");
+	host->data_lanes |= (lane_enabled ? DSI_DATA_LANE_0 : 0);
+
+	lane_enabled = of_property_read_bool(of_node,
+					     "qcom,mdss-dsi-lane-1-state");
+	host->data_lanes |= (lane_enabled ? DSI_DATA_LANE_1 : 0);
+
+	lane_enabled = of_property_read_bool(of_node,
+					    "qcom,mdss-dsi-lane-2-state");
+	host->data_lanes |= (lane_enabled ? DSI_DATA_LANE_2 : 0);
+
+	lane_enabled = of_property_read_bool(of_node,
+					     "qcom,mdss-dsi-lane-3-state");
+	host->data_lanes |= (lane_enabled ? DSI_DATA_LANE_3 : 0);
+
+	if (host->data_lanes == 0) {
+		pr_err("[%s] No data lanes are enabled, rc=%d\n", name, rc);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static int dsi_panel_parse_color_swap(struct dsi_host_common_cfg *host,
+				      struct device_node *of_node,
+				      const char *name)
+{
+	int rc = 0;
+	const char *swap_mode;
+
+	swap_mode = of_get_property(of_node, "qcom,mdss-dsi-color-order", NULL);
+	if (swap_mode) {
+		if (!strcmp(swap_mode, "rgb_swap_rgb")) {
+			host->swap_mode = DSI_COLOR_SWAP_RGB;
+		} else if (!strcmp(swap_mode, "rgb_swap_rbg")) {
+			host->swap_mode = DSI_COLOR_SWAP_RBG;
+		} else if (!strcmp(swap_mode, "rgb_swap_brg")) {
+			host->swap_mode = DSI_COLOR_SWAP_BRG;
+		} else if (!strcmp(swap_mode, "rgb_swap_grb")) {
+			host->swap_mode = DSI_COLOR_SWAP_GRB;
+		} else if (!strcmp(swap_mode, "rgb_swap_gbr")) {
+			host->swap_mode = DSI_COLOR_SWAP_GBR;
+		} else {
+			pr_err("[%s] Unrecognized color order-%s\n",
+			       name, swap_mode);
+			rc = -EINVAL;
+		}
+	} else {
+		pr_debug("[%s] Falling back to default color order\n", name);
+		host->swap_mode = DSI_COLOR_SWAP_RGB;
+	}
+
+	/* bit swap on color channel is not defined in dt */
+	host->bit_swap_red = false;
+	host->bit_swap_green = false;
+	host->bit_swap_blue = false;
+	return rc;
+}
+
+static int dsi_panel_parse_triggers(struct dsi_host_common_cfg *host,
+				    struct device_node *of_node,
+				    const char *name)
+{
+	const char *trig;
+	int rc = 0;
+
+	trig = of_get_property(of_node, "qcom,mdss-dsi-mdp-trigger", NULL);
+	if (trig) {
+		if (!strcmp(trig, "none")) {
+			host->mdp_cmd_trigger = DSI_TRIGGER_NONE;
+		} else if (!strcmp(trig, "trigger_te")) {
+			host->mdp_cmd_trigger = DSI_TRIGGER_TE;
+		} else if (!strcmp(trig, "trigger_sw")) {
+			host->mdp_cmd_trigger = DSI_TRIGGER_SW;
+		} else if (!strcmp(trig, "trigger_sw_te")) {
+			host->mdp_cmd_trigger = DSI_TRIGGER_SW_TE;
+		} else {
+			pr_err("[%s] Unrecognized mdp trigger type (%s)\n",
+			       name, trig);
+			rc = -EINVAL;
+		}
+
+	} else {
+		pr_debug("[%s] Falling back to default MDP trigger\n",
+			 name);
+		host->mdp_cmd_trigger = DSI_TRIGGER_SW;
+	}
+
+	trig = of_get_property(of_node, "qcom,mdss-dsi-dma-trigger", NULL);
+	if (trig) {
+		if (!strcmp(trig, "none")) {
+			host->dma_cmd_trigger = DSI_TRIGGER_NONE;
+		} else if (!strcmp(trig, "trigger_te")) {
+			host->dma_cmd_trigger = DSI_TRIGGER_TE;
+		} else if (!strcmp(trig, "trigger_sw")) {
+			host->dma_cmd_trigger = DSI_TRIGGER_SW;
+		} else if (!strcmp(trig, "trigger_sw_seof")) {
+			host->dma_cmd_trigger = DSI_TRIGGER_SW_SEOF;
+		} else if (!strcmp(trig, "trigger_sw_te")) {
+			host->dma_cmd_trigger = DSI_TRIGGER_SW_TE;
+		} else {
+			pr_err("[%s] Unrecognized mdp trigger type (%s)\n",
+			       name, trig);
+			rc = -EINVAL;
+		}
+
+	} else {
+		pr_debug("[%s] Falling back to default MDP trigger\n", name);
+		host->dma_cmd_trigger = DSI_TRIGGER_SW;
+	}
+
+
+	return rc;
+}
+
+static int dsi_panel_parse_misc_host_config(struct dsi_host_common_cfg *host,
+					    struct device_node *of_node,
+					    const char *name)
+{
+	u32 val = 0;
+	int rc = 0;
+
+	rc = of_property_read_u32(of_node, "qcom,mdss-dsi-t-clk-post", &val);
+	if (rc) {
+		pr_debug("[%s] Fallback to default t_clk_post value\n", name);
+		host->t_clk_post = 0x03;
+	} else {
+		host->t_clk_post = val;
+		pr_debug("[%s] t_clk_post = %d\n", name, val);
+	}
+
+	val = 0;
+	rc = of_property_read_u32(of_node, "qcom,mdss-dsi-t-clk-pre", &val);
+	if (rc) {
+		pr_debug("[%s] Fallback to default t_clk_pre value\n", name);
+		host->t_clk_pre = 0x24;
+	} else {
+		host->t_clk_pre = val;
+		pr_debug("[%s] t_clk_pre = %d\n", name, val);
+	}
+
+	host->ignore_rx_eot = of_property_read_bool(of_node,
+						"qcom,mdss-dsi-rx-eot-ignore");
+
+	host->append_tx_eot = of_property_read_bool(of_node,
+						"qcom,mdss-dsi-tx-eot-append");
+
+	host->force_clk_lane_hs = of_property_read_bool(of_node,
+					"qcom,mdss-dsi-force-clock-lane-hs");
+	return 0;
+}
+
+static int dsi_panel_parse_host_config(struct dsi_panel *panel,
+				       struct device_node *of_node)
+{
+	int rc = 0;
+
+	rc = dsi_panel_parse_pixel_format(&panel->host_config, of_node,
+					  panel->name);
+	if (rc) {
+		pr_err("[%s] failed to get pixel format, rc=%d\n",
+		       panel->name, rc);
+		goto error;
+	}
+
+	rc = dsi_panel_parse_lane_states(&panel->host_config, of_node,
+					 panel->name);
+	if (rc) {
+		pr_err("[%s] failed to parse lane states, rc=%d\n",
+		       panel->name, rc);
+		goto error;
+	}
+
+	rc = dsi_panel_parse_color_swap(&panel->host_config, of_node,
+					panel->name);
+	if (rc) {
+		pr_err("[%s] failed to parse color swap config, rc=%d\n",
+		       panel->name, rc);
+		goto error;
+	}
+
+	rc = dsi_panel_parse_triggers(&panel->host_config, of_node,
+				      panel->name);
+	if (rc) {
+		pr_err("[%s] failed to parse triggers, rc=%d\n",
+		       panel->name, rc);
+		goto error;
+	}
+
+	rc = dsi_panel_parse_misc_host_config(&panel->host_config, of_node,
+					      panel->name);
+	if (rc) {
+		pr_err("[%s] failed to parse misc host config, rc=%d\n",
+		       panel->name, rc);
+		goto error;
+	}
+
+error:
+	return rc;
+}
+
+static int dsi_panel_parse_dfps_caps(struct dsi_dfps_capabilities *dfps_caps,
+				     struct device_node *of_node,
+				     const char *name)
+{
+	int rc = 0;
+	bool supported = false;
+	const char *type;
+	u32 val = 0;
+
+	supported = of_property_read_bool(of_node,
+					"qcom,mdss-dsi-pan-enable-dynamic-fps");
+
+	if (!supported) {
+		pr_debug("[%s] DFPS is not supported\n", name);
+		dfps_caps->dfps_support = false;
+	} else {
+
+		type = of_get_property(of_node,
+				       "qcom,mdss-dsi-pan-fps-update",
+				       NULL);
+		if (!type) {
+			pr_err("[%s] dfps type not defined\n", name);
+			rc = -EINVAL;
+			goto error;
+		} else if (!strcmp(type, "dfps_suspend_resume_mode")) {
+			dfps_caps->type = DSI_DFPS_SUSPEND_RESUME;
+		} else if (!strcmp(type, "dfps_immediate_clk_mode")) {
+			dfps_caps->type = DSI_DFPS_IMMEDIATE_CLK;
+		} else if (!strcmp(type, "dfps_immediate_porch_mode_hfp")) {
+			dfps_caps->type = DSI_DFPS_IMMEDIATE_HFP;
+		} else if (!strcmp(type, "dfps_immediate_porch_mode_vfp")) {
+			dfps_caps->type = DSI_DFPS_IMMEDIATE_VFP;
+		} else {
+			pr_err("[%s] dfps type is not recognized\n", name);
+			rc = -EINVAL;
+			goto error;
+		}
+
+		rc = of_property_read_u32(of_node,
+					  "qcom,mdss-dsi-min-refresh-rate",
+					  &val);
+		if (rc) {
+			pr_err("[%s] Min refresh rate is not defined\n", name);
+			rc = -EINVAL;
+			goto error;
+		}
+		dfps_caps->min_refresh_rate = val;
+
+		rc = of_property_read_u32(of_node,
+					  "qcom,mdss-dsi-max-refresh-rate",
+					  &val);
+		if (rc) {
+			pr_debug("[%s] Using default refresh rate\n", name);
+			rc = of_property_read_u32(of_node,
+						"qcom,mdss-dsi-panel-framerate",
+						&val);
+			if (rc) {
+				pr_err("[%s] max refresh rate is not defined\n",
+				       name);
+				rc = -EINVAL;
+				goto error;
+			}
+		}
+		dfps_caps->max_refresh_rate = val;
+
+		if (dfps_caps->min_refresh_rate > dfps_caps->max_refresh_rate) {
+			pr_err("[%s] min rate > max rate\n", name);
+			rc = -EINVAL;
+		}
+
+		pr_debug("[%s] DFPS is supported %d-%d, mode %d\n", name,
+				dfps_caps->min_refresh_rate,
+				dfps_caps->max_refresh_rate,
+				dfps_caps->type);
+		dfps_caps->dfps_support = true;
+	}
+
+error:
+	return rc;
+}
+
+static int dsi_panel_parse_video_host_config(struct dsi_video_engine_cfg *cfg,
+					     struct device_node *of_node,
+					     const char *name)
+{
+	int rc = 0;
+	const char *traffic_mode;
+	u32 vc_id = 0;
+	u32 val = 0;
+
+	rc = of_property_read_u32(of_node, "qcom,mdss-dsi-h-sync-pulse", &val);
+	if (rc) {
+		pr_debug("[%s] fallback to default h-sync-pulse\n", name);
+		cfg->pulse_mode_hsa_he = false;
+	} else if (val == 1) {
+		cfg->pulse_mode_hsa_he = true;
+	} else if (val == 0) {
+		cfg->pulse_mode_hsa_he = false;
+	} else {
+		pr_err("[%s] Unrecognized value for mdss-dsi-h-sync-pulse\n",
+		       name);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	cfg->hfp_lp11_en = of_property_read_bool(of_node,
+						"qcom,mdss-dsi-hfp-power-mode");
+
+	cfg->hbp_lp11_en = of_property_read_bool(of_node,
+						"qcom,mdss-dsi-hbp-power-mode");
+
+	cfg->hsa_lp11_en = of_property_read_bool(of_node,
+						"qcom,mdss-dsi-hsa-power-mode");
+
+	cfg->last_line_interleave_en = of_property_read_bool(of_node,
+					"qcom,mdss-dsi-last-line-interleave");
+
+	cfg->eof_bllp_lp11_en = of_property_read_bool(of_node,
+					"qcom,mdss-dsi-bllp-eof-power-mode");
+
+	cfg->bllp_lp11_en = of_property_read_bool(of_node,
+					"qcom,mdss-dsi-bllp-power-mode");
+
+	traffic_mode = of_get_property(of_node,
+				       "qcom,mdss-dsi-traffic-mode",
+				       NULL);
+	if (!traffic_mode) {
+		pr_debug("[%s] Falling back to default traffic mode\n", name);
+		cfg->traffic_mode = DSI_VIDEO_TRAFFIC_SYNC_PULSES;
+	} else if (!strcmp(traffic_mode, "non_burst_sync_pulse")) {
+		cfg->traffic_mode = DSI_VIDEO_TRAFFIC_SYNC_PULSES;
+	} else if (!strcmp(traffic_mode, "non_burst_sync_event")) {
+		cfg->traffic_mode = DSI_VIDEO_TRAFFIC_SYNC_START_EVENTS;
+	} else if (!strcmp(traffic_mode, "burst_mode")) {
+		cfg->traffic_mode = DSI_VIDEO_TRAFFIC_BURST_MODE;
+	} else {
+		pr_err("[%s] Unrecognized traffic mode-%s\n", name,
+		       traffic_mode);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,mdss-dsi-virtual-channel-id",
+				  &vc_id);
+	if (rc) {
+		pr_debug("[%s] Fallback to default vc id\n", name);
+		cfg->vc_id = 0;
+	} else {
+		cfg->vc_id = vc_id;
+	}
+
+error:
+	return rc;
+}
+
+static int dsi_panel_parse_cmd_host_config(struct dsi_cmd_engine_cfg *cfg,
+					   struct device_node *of_node,
+					   const char *name)
+{
+	u32 val = 0;
+	int rc = 0;
+
+	rc = of_property_read_u32(of_node, "qcom,mdss-dsi-wr-mem-start", &val);
+	if (rc) {
+		pr_debug("[%s] Fallback to default wr-mem-start\n", name);
+		cfg->wr_mem_start = 0x2C;
+	} else {
+		cfg->wr_mem_start = val;
+	}
+
+	val = 0;
+	rc = of_property_read_u32(of_node, "qcom,mdss-dsi-wr-mem-continue",
+				  &val);
+	if (rc) {
+		pr_debug("[%s] Fallback to default wr-mem-continue\n", name);
+		cfg->wr_mem_continue = 0x3C;
+	} else {
+		cfg->wr_mem_continue = val;
+	}
+
+	/* TODO:  fix following */
+	cfg->max_cmd_packets_interleave = 0;
+
+	val = 0;
+	rc = of_property_read_u32(of_node, "qcom,mdss-dsi-te-dcs-command",
+				  &val);
+	if (rc) {
+		pr_debug("[%s] fallback to default te-dcs-cmd\n", name);
+		cfg->insert_dcs_command = true;
+	} else if (val == 1) {
+		cfg->insert_dcs_command = true;
+	} else if (val == 0) {
+		cfg->insert_dcs_command = false;
+	} else {
+		pr_err("[%s] Unrecognized value for mdss-dsi-te-dcs-command\n",
+		       name);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (of_property_read_u32(of_node, "qcom,mdss-mdp-transfer-time-us",
+				&val)) {
+		pr_debug("[%s] Fallback to default transfer-time-us\n", name);
+		cfg->mdp_transfer_time_us = DEFAULT_MDP_TRANSFER_TIME;
+	} else {
+		cfg->mdp_transfer_time_us = val;
+	}
+
+error:
+	return rc;
+}
+
+static int dsi_panel_parse_panel_mode(struct dsi_panel *panel,
+				      struct device_node *of_node)
+{
+	int rc = 0;
+	enum dsi_op_mode panel_mode;
+	const char *mode;
+
+	mode = of_get_property(of_node, "qcom,mdss-dsi-panel-type", NULL);
+	if (!mode) {
+		pr_debug("[%s] Fallback to default panel mode\n", panel->name);
+		panel_mode = DSI_OP_VIDEO_MODE;
+	} else if (!strcmp(mode, "dsi_video_mode")) {
+		panel_mode = DSI_OP_VIDEO_MODE;
+	} else if (!strcmp(mode, "dsi_cmd_mode")) {
+		panel_mode = DSI_OP_CMD_MODE;
+	} else {
+		pr_err("[%s] Unrecognized panel type-%s\n", panel->name, mode);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (panel_mode == DSI_OP_VIDEO_MODE) {
+		rc = dsi_panel_parse_video_host_config(&panel->video_config,
+						       of_node,
+						       panel->name);
+		if (rc) {
+			pr_err("[%s] Failed to parse video host cfg, rc=%d\n",
+			       panel->name, rc);
+			goto error;
+		}
+	}
+
+	if (panel_mode == DSI_OP_CMD_MODE) {
+		rc = dsi_panel_parse_cmd_host_config(&panel->cmd_config,
+						     of_node,
+						     panel->name);
+		if (rc) {
+			pr_err("[%s] Failed to parse cmd host config, rc=%d\n",
+			       panel->name, rc);
+			goto error;
+		}
+	}
+
+	panel->mode.panel_mode = panel_mode;
+error:
+	return rc;
+}
+
+static int dsi_panel_parse_phy_props(struct dsi_panel_phy_props *props,
+				     struct device_node *of_node,
+				     const char *name)
+{
+	int rc = 0;
+	u32 val = 0;
+	const char *str;
+
+	rc = of_property_read_u32(of_node,
+				  "qcom,mdss-pan-physical-width-dimension",
+				  &val);
+	if (rc) {
+		pr_debug("[%s] Physical panel width is not defined\n", name);
+		props->panel_width_mm = 0;
+		rc = 0;
+	} else {
+		props->panel_width_mm = val;
+	}
+
+	rc = of_property_read_u32(of_node,
+				  "qcom,mdss-pan-physical-height-dimension",
+				  &val);
+	if (rc) {
+		pr_debug("[%s] Physical panel height is not defined\n", name);
+		props->panel_height_mm = 0;
+		rc = 0;
+	} else {
+		props->panel_height_mm = val;
+	}
+
+	str = of_get_property(of_node, "qcom,mdss-dsi-panel-orientation", NULL);
+	if (!str) {
+		props->rotation = DSI_PANEL_ROTATE_NONE;
+	} else if (!strcmp(str, "180")) {
+		props->rotation = DSI_PANEL_ROTATE_HV_FLIP;
+	} else if (!strcmp(str, "hflip")) {
+		props->rotation = DSI_PANEL_ROTATE_H_FLIP;
+	} else if (!strcmp(str, "vflip")) {
+		props->rotation = DSI_PANEL_ROTATE_V_FLIP;
+	} else {
+		pr_err("[%s] Unrecognized panel rotation-%s\n", name, str);
+		rc = -EINVAL;
+		goto error;
+	}
+error:
+	return rc;
+}
+const char *cmd_set_prop_map[DSI_CMD_SET_MAX] = {
+	"qcom,mdss-dsi-pre-on-command",
+	"qcom,mdss-dsi-on-command",
+	"qcom,mdss-dsi-post-panel-on-command",
+	"qcom,mdss-dsi-pre-off-command",
+	"qcom,mdss-dsi-off-command",
+	"qcom,mdss-dsi-post-off-command",
+	"qcom,mdss-dsi-pre-res-switch",
+	"qcom,mdss-dsi-res-switch",
+	"qcom,mdss-dsi-post-res-switch",
+	"qcom,cmd-to-video-mode-switch-commands",
+	"qcom,cmd-to-video-mode-post-switch-commands",
+	"qcom,video-to-cmd-mode-switch-commands",
+	"qcom,video-to-cmd-mode-post-switch-commands",
+	"qcom,mdss-dsi-panel-status-command",
+};
+
+const char *cmd_set_state_map[DSI_CMD_SET_MAX] = {
+	"qcom,mdss-dsi-pre-on-command-state",
+	"qcom,mdss-dsi-on-command-state",
+	"qcom,mdss-dsi-post-on-command-state",
+	"qcom,mdss-dsi-pre-off-command-state",
+	"qcom,mdss-dsi-off-command-state",
+	"qcom,mdss-dsi-post-off-command-state",
+	"qcom,mdss-dsi-pre-res-switch-state",
+	"qcom,mdss-dsi-res-switch-state",
+	"qcom,mdss-dsi-post-res-switch-state",
+	"qcom,cmd-to-video-mode-switch-commands-state",
+	"qcom,cmd-to-video-mode-post-switch-commands-state",
+	"qcom,video-to-cmd-mode-switch-commands-state",
+	"qcom,video-to-cmd-mode-post-switch-commands-state",
+	"qcom,mdss-dsi-panel-status-command-state",
+};
+
+static int dsi_panel_get_cmd_pkt_count(const char *data, u32 length, u32 *cnt)
+{
+	const u32 cmd_set_min_size = 7;
+	u32 count = 0;
+	u32 packet_length;
+	u32 tmp;
+
+	while (length >= cmd_set_min_size) {
+		packet_length = cmd_set_min_size;
+		tmp = ((data[5] << 8) | (data[6]));
+		packet_length += tmp;
+		if (packet_length > length) {
+			pr_err("FORMAT ERROR\n");
+			return -EINVAL;
+		}
+		length -= packet_length;
+		data += packet_length;
+		count++;
+	};
+
+	*cnt = count;
+	return 0;
+}
+
+static int dsi_panel_create_cmd_packets(const char *data,
+					u32 length,
+					u32 count,
+					struct dsi_cmd_desc *cmd)
+{
+	int rc = 0;
+	int i, j;
+	u8 *payload;
+
+	for (i = 0; i < count; i++) {
+		u32 size;
+
+		cmd[i].msg.type = data[0];
+		cmd[i].last_command = (data[1] == 1 ? true : false);
+		cmd[i].msg.channel = data[2];
+		cmd[i].msg.flags |= (data[3] == 1 ? MIPI_DSI_MSG_REQ_ACK : 0);
+		cmd[i].post_wait_ms = data[4];
+		cmd[i].msg.tx_len = ((data[5] << 8) | (data[6]));
+
+		size = cmd[i].msg.tx_len * sizeof(u8);
+
+		payload = kzalloc(size, GFP_KERNEL);
+		if (!payload) {
+			rc = -ENOMEM;
+			goto error_free_payloads;
+		}
+
+		for (j = 0; j < cmd[i].msg.tx_len; j++)
+			payload[j] = data[7 + j];
+
+		cmd[i].msg.tx_buf = payload;
+		data += (7 + cmd[i].msg.tx_len);
+	}
+
+	return rc;
+error_free_payloads:
+	for (i = i - 1; i >= 0; i--) {
+		cmd--;
+		kfree(cmd->msg.tx_buf);
+	}
+
+	return rc;
+}
+
+static void dsi_panel_destroy_cmd_packets(struct dsi_panel_cmd_set *set)
+{
+	u32 i = 0;
+	struct dsi_cmd_desc *cmd;
+
+	for (i = 0; i < set->count; i++) {
+		cmd = &set->cmds[i];
+		kfree(cmd->msg.tx_buf);
+	}
+
+	kfree(set->cmds);
+}
+
+static int dsi_panel_parse_cmd_sets_sub(struct dsi_panel_cmd_set *cmd,
+					enum dsi_cmd_set_type type,
+					struct device_node *of_node)
+{
+	int rc = 0;
+	u32 length = 0;
+	u32 size;
+	const char *data;
+	const char *state;
+	u32 packet_count = 0;
+
+	data = of_get_property(of_node, cmd_set_prop_map[type], &length);
+	if (!data) {
+		pr_err("%s commands not defined\n", cmd_set_prop_map[type]);
+		rc = -ENOTSUPP;
+		goto error;
+	}
+
+	rc = dsi_panel_get_cmd_pkt_count(data, length, &packet_count);
+	if (rc) {
+		pr_err("commands failed, rc=%d\n", rc);
+		goto error;
+	}
+	pr_debug("[%s] packet-count=%d, %d\n", cmd_set_prop_map[type],
+		 packet_count, length);
+
+	size = packet_count * sizeof(*cmd->cmds);
+	cmd->cmds = kzalloc(size, GFP_KERNEL);
+	if (!cmd->cmds) {
+		rc = -ENOMEM;
+		goto error;
+	}
+	cmd->count = packet_count;
+
+	rc = dsi_panel_create_cmd_packets(data, length, packet_count,
+					  cmd->cmds);
+	if (rc) {
+		pr_err("Failed to create cmd packets, rc=%d\n", rc);
+		goto error_free_mem;
+	}
+
+	state = of_get_property(of_node, cmd_set_state_map[type], NULL);
+	if (!state || !strcmp(state, "dsi_lp_mode")) {
+		cmd->state = DSI_CMD_SET_STATE_LP;
+	} else if (!strcmp(state, "dsi_hs_mode")) {
+		cmd->state = DSI_CMD_SET_STATE_HS;
+	} else {
+		pr_err("[%s] Command state unrecognized-%s\n",
+		       cmd_set_state_map[type], state);
+		goto error_free_mem;
+	}
+
+	return rc;
+error_free_mem:
+	kfree(cmd->cmds);
+	cmd->cmds = NULL;
+error:
+	return rc;
+
+}
+
+static int dsi_panel_parse_cmd_sets(struct dsi_panel *panel,
+				    struct device_node *of_node)
+{
+	int rc = 0;
+	struct dsi_panel_cmd_set *set;
+	u32 i;
+
+	for (i = DSI_CMD_SET_PRE_ON; i < DSI_CMD_SET_MAX; i++) {
+		set = &panel->cmd_sets[i];
+		set->type = i;
+		rc = dsi_panel_parse_cmd_sets_sub(set, i, of_node);
+		if (rc)
+			pr_err("[%s] failed to parse set %d\n", panel->name, i);
+	}
+
+	rc = 0;
+	return rc;
+}
+
+static int dsi_panel_parse_reset_sequence(struct dsi_panel *panel,
+				      struct device_node *of_node)
+{
+	int rc = 0;
+	int i;
+	u32 length = 0;
+	u32 count = 0;
+	u32 size = 0;
+	u32 *arr_32 = NULL;
+	const u32 *arr;
+	struct dsi_reset_seq *seq;
+
+	arr = of_get_property(of_node, "qcom,mdss-dsi-reset-sequence", &length);
+	if (!arr) {
+		pr_err("[%s] dsi-reset-sequence not found\n", panel->name);
+		rc = -EINVAL;
+		goto error;
+	}
+	if (length & 0x1) {
+		pr_err("[%s] syntax error for dsi-reset-sequence\n",
+		       panel->name);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	pr_err("RESET SEQ LENGTH = %d\n", length);
+	length = length / sizeof(u32);
+
+	size = length * sizeof(u32);
+
+	arr_32 = kzalloc(size, GFP_KERNEL);
+	if (!arr_32) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,mdss-dsi-reset-sequence",
+					arr_32, length);
+	if (rc) {
+		pr_err("[%s] cannot read dso-reset-seqience\n", panel->name);
+		goto error_free_arr_32;
+	}
+
+	count = length / 2;
+	size = count * sizeof(*seq);
+	seq = kzalloc(size, GFP_KERNEL);
+	if (!seq) {
+		rc = -ENOMEM;
+		goto error_free_arr_32;
+	}
+
+	panel->reset_config.sequence = seq;
+	panel->reset_config.count = count;
+
+	for (i = 0; i < length; i += 2) {
+		seq->level = arr_32[i];
+		seq->sleep_ms = arr_32[i + 1];
+		seq++;
+	}
+
+
+error_free_arr_32:
+	kfree(arr_32);
+error:
+	return rc;
+}
+
+static int dsi_panel_parse_power_cfg(struct device *parent,
+				     struct dsi_panel *panel,
+				     struct device_node *of_node)
+{
+	int rc = 0;
+
+	rc = dsi_clk_pwr_of_get_vreg_data(of_node,
+					  &panel->power_info,
+					  "qcom,panel-supply-entries");
+	if (rc) {
+		pr_err("[%s] failed to parse vregs\n", panel->name);
+		goto error;
+	}
+
+error:
+	return rc;
+}
+
+static int dsi_panel_parse_gpios(struct dsi_panel *panel,
+				 struct device_node *of_node)
+{
+	int rc = 0;
+
+	/* Need to set GPIO default value to -1, since 0 is a valid value */
+	panel->reset_config.disp_en_gpio = -1;
+	panel->reset_config.reset_gpio = of_get_named_gpio(of_node,
+					      "qcom,platform-reset-gpio",
+					      0);
+	if (!gpio_is_valid(panel->reset_config.reset_gpio)) {
+		pr_err("[%s] failed get reset gpio, rc=%d\n", panel->name, rc);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	panel->reset_config.disp_en_gpio = of_get_named_gpio(of_node,
+						"qcom,5v-boost-gpio",
+						0);
+	if (!gpio_is_valid(panel->reset_config.disp_en_gpio)) {
+		pr_debug("[%s] 5v-boot-gpio is not set, rc=%d\n",
+			 panel->name, rc);
+		panel->reset_config.disp_en_gpio = of_get_named_gpio(of_node,
+							"qcom,platform-en-gpio",
+							0);
+		if (!gpio_is_valid(panel->reset_config.disp_en_gpio)) {
+			pr_debug("[%s] platform-en-gpio is not set, rc=%d\n",
+				 panel->name, rc);
+		}
+	}
+
+	/* TODO:  release memory */
+	rc = dsi_panel_parse_reset_sequence(panel, of_node);
+	if (rc) {
+		pr_err("[%s] failed to parse reset sequence, rc=%d\n",
+		       panel->name, rc);
+		goto error;
+	}
+
+error:
+	return rc;
+}
+
+static int dsi_panel_parse_bl_pwm_config(struct dsi_backlight_config *config,
+					 struct device_node *of_node)
+{
+	int rc = 0;
+	u32 val;
+
+	rc = of_property_read_u32(of_node, "qcom,dsi-bl-pmic-bank-select",
+				  &val);
+	if (rc) {
+		pr_err("bl-pmic-bank-select is not defined, rc=%d\n", rc);
+		goto error;
+	}
+	config->pwm_pmic_bank = val;
+
+	rc = of_property_read_u32(of_node, "qcom,dsi-bl-pmic-pwm-frequency",
+				  &val);
+	if (rc) {
+		pr_err("bl-pmic-bank-select is not defined, rc=%d\n", rc);
+		goto error;
+	}
+	config->pwm_period_usecs = val;
+
+	config->pwm_pmi_control = of_property_read_bool(of_node,
+						"qcom,mdss-dsi-bl-pwm-pmi");
+
+	config->pwm_gpio = of_get_named_gpio(of_node,
+					     "qcom,mdss-dsi-pwm-gpio",
+					     0);
+	if (!gpio_is_valid(config->pwm_gpio)) {
+		pr_err("pwm gpio is invalid\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+error:
+	return rc;
+}
+
+static int dsi_panel_parse_bl_config(struct dsi_panel *panel,
+				     struct device_node *of_node)
+{
+	int rc = 0;
+	const char *bl_type;
+	u32 val = 0;
+
+	bl_type = of_get_property(of_node,
+				  "qcom,mdss-dsi-bl-pmic-control-type",
+				  NULL);
+	if (!bl_type) {
+		panel->bl_config.type = DSI_BACKLIGHT_UNKNOWN;
+	} else if (!strcmp(bl_type, "bl_ctrl_pwm")) {
+		panel->bl_config.type = DSI_BACKLIGHT_PWM;
+	} else if (!strcmp(bl_type, "bl_ctrl_wled")) {
+		panel->bl_config.type = DSI_BACKLIGHT_WLED;
+	} else if (!strcmp(bl_type, "bl_ctrl_dcs")) {
+		panel->bl_config.type = DSI_BACKLIGHT_DCS;
+	} else {
+		pr_debug("[%s] bl-pmic-control-type unknown-%s\n",
+			 panel->name, bl_type);
+		panel->bl_config.type = DSI_BACKLIGHT_UNKNOWN;
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,mdss-dsi-bl-min-level", &val);
+	if (rc) {
+		pr_debug("[%s] bl-min-level unspecified, defaulting to zero\n",
+			 panel->name);
+		panel->bl_config.bl_min_level = 0;
+	} else {
+		panel->bl_config.bl_min_level = val;
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,mdss-dsi-bl-max-level", &val);
+	if (rc) {
+		pr_debug("[%s] bl-max-level unspecified, defaulting to max level\n",
+			 panel->name);
+		panel->bl_config.bl_max_level = MAX_BL_LEVEL;
+	} else {
+		panel->bl_config.bl_max_level = val;
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,mdss-brightness-max-level",
+		&val);
+	if (rc) {
+		pr_debug("[%s] brigheness-max-level unspecified, defaulting to 255\n",
+			 panel->name);
+		panel->bl_config.brightness_max_level = 255;
+	} else {
+		panel->bl_config.brightness_max_level = val;
+	}
+
+	if (panel->bl_config.type == DSI_BACKLIGHT_PWM) {
+		rc = dsi_panel_parse_bl_pwm_config(&panel->bl_config, of_node);
+		if (rc) {
+			pr_err("[%s] failed to parse pwm config, rc=%d\n",
+			       panel->name, rc);
+			goto error;
+		}
+	}
+
+	panel->bl_config.en_gpio = of_get_named_gpio(of_node,
+					      "qcom,platform-bklight-en-gpio",
+					      0);
+	if (!gpio_is_valid(panel->bl_config.en_gpio)) {
+		pr_err("[%s] failed get bklt gpio, rc=%d\n", panel->name, rc);
+		rc = -EINVAL;
+		goto error;
+	}
+
+error:
+	return rc;
+}
+
+static int dsi_panel_parse_dba_config(struct dsi_panel *panel,
+					struct device_node *of_node)
+{
+	int rc = 0, len = 0;
+
+	panel->dba_config.dba_panel = of_property_read_bool(of_node,
+		"qcom,dba-panel");
+
+	if (panel->dba_config.dba_panel) {
+		panel->dba_config.hdmi_mode = of_property_read_bool(of_node,
+			"qcom,hdmi-mode");
+
+		panel->dba_config.bridge_name = of_get_property(of_node,
+			"qcom,bridge-name", &len);
+		if (!panel->dba_config.bridge_name || len <= 0) {
+			SDE_ERROR(
+			"%s:%d Unable to read bridge_name, data=%pK,len=%d\n",
+			__func__, __LINE__, panel->dba_config.bridge_name, len);
+			rc = -EINVAL;
+			goto error;
+		}
+	}
+
+error:
+	return rc;
+}
+
+struct dsi_panel *dsi_panel_get(struct device *parent,
+				struct device_node *of_node)
+{
+	struct dsi_panel *panel;
+	int rc = 0;
+
+	panel = kzalloc(sizeof(*panel), GFP_KERNEL);
+	if (!panel)
+		return ERR_PTR(-ENOMEM);
+
+	panel->name = of_get_property(of_node, "qcom,mdss-dsi-panel-name",
+				      NULL);
+	if (!panel->name)
+		panel->name = DSI_PANEL_DEFAULT_LABEL;
+
+	rc = dsi_panel_parse_timing(&panel->mode.timing, of_node);
+	if (rc) {
+		pr_err("failed to parse panel timing, rc=%d\n", rc);
+		goto error;
+	}
+
+	panel->mode.pixel_clk_khz = (DSI_H_TOTAL(&panel->mode.timing) *
+				    DSI_V_TOTAL(&panel->mode.timing) *
+				    panel->mode.timing.refresh_rate) / 1000;
+	rc = dsi_panel_parse_host_config(panel, of_node);
+	if (rc) {
+		pr_err("failed to parse host configuration, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = dsi_panel_parse_panel_mode(panel, of_node);
+	if (rc) {
+		pr_err("failed to parse panel mode configuration, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = dsi_panel_parse_dfps_caps(&panel->dfps_caps, of_node, panel->name);
+	if (rc)
+		pr_err("failed to parse dfps configuration, rc=%d\n", rc);
+
+	rc = dsi_panel_parse_phy_props(&panel->phy_props, of_node, panel->name);
+	if (rc) {
+		pr_err("failed to parse panel physical dimension, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = dsi_panel_parse_cmd_sets(panel, of_node);
+	if (rc) {
+		pr_err("failed to parse command sets, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = dsi_panel_parse_power_cfg(parent, panel, of_node);
+	if (rc)
+		pr_err("failed to parse power config, rc=%d\n", rc);
+
+	rc = dsi_panel_parse_gpios(panel, of_node);
+	if (rc)
+		pr_err("failed to parse panel gpios, rc=%d\n", rc);
+
+	rc = dsi_panel_parse_bl_config(panel, of_node);
+	if (rc)
+		pr_err("failed to parse backlight config, rc=%d\n", rc);
+
+	rc = dsi_panel_parse_dba_config(panel, of_node);
+	if (rc)
+		pr_err("failed to parse dba config, rc=%d\n", rc);
+
+	panel->panel_of_node = of_node;
+	drm_panel_init(&panel->drm_panel);
+	mutex_init(&panel->panel_lock);
+	panel->parent = parent;
+	return panel;
+error:
+	kfree(panel);
+	return ERR_PTR(rc);
+}
+
+void dsi_panel_put(struct dsi_panel *panel)
+{
+	u32 i;
+
+	if (!panel)
+		return;
+
+	for (i = 0; i < DSI_CMD_SET_MAX; i++)
+		dsi_panel_destroy_cmd_packets(&panel->cmd_sets[i]);
+
+	/* TODO:  more free */
+	kfree(panel);
+}
+
+int dsi_panel_drv_init(struct dsi_panel *panel,
+		       struct mipi_dsi_host *host)
+{
+	int rc = 0;
+	struct mipi_dsi_device *dev;
+
+	if (!panel || !host) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&panel->panel_lock);
+
+	dev = &panel->mipi_device;
+
+	dev->host = host;
+	/*
+	 * We dont have device structure since panel is not a device node.
+	 * When using drm panel framework, the device is probed when the host is
+	 * create.
+	 */
+	dev->channel = 0;
+	dev->lanes = 4;
+
+	panel->host = host;
+	rc = dsi_panel_vreg_get(panel);
+	if (rc) {
+		pr_err("[%s] Failed to get panel regulators, rc=%d\n",
+		       panel->name, rc);
+		goto exit;
+	}
+
+	rc = dsi_panel_pinctrl_init(panel);
+	if (rc)
+		pr_err("[%s] failed to init pinctrl, rc=%d\n", panel->name, rc);
+
+	rc = dsi_panel_gpio_request(panel);
+	if (rc) {
+		pr_err("[%s] failed to request gpios, rc=%d\n", panel->name,
+		       rc);
+		goto error_pinctrl_deinit;
+	}
+
+	rc = dsi_panel_bl_register(panel);
+	if (rc) {
+		if (rc != -EPROBE_DEFER)
+			pr_err("[%s] failed to register backlight, rc=%d\n",
+			       panel->name, rc);
+		goto error_gpio_release;
+	}
+
+	goto exit;
+
+error_gpio_release:
+	(void)dsi_panel_gpio_release(panel);
+error_pinctrl_deinit:
+	(void)dsi_panel_pinctrl_deinit(panel);
+	(void)dsi_panel_vreg_put(panel);
+exit:
+	mutex_unlock(&panel->panel_lock);
+	return rc;
+}
+
+int dsi_panel_drv_deinit(struct dsi_panel *panel)
+{
+	int rc = 0;
+
+	if (!panel) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&panel->panel_lock);
+
+	rc = dsi_panel_bl_unregister(panel);
+	if (rc)
+		pr_err("[%s] failed to unregister backlight, rc=%d\n",
+		       panel->name, rc);
+
+	rc = dsi_panel_gpio_release(panel);
+	if (rc)
+		pr_err("[%s] failed to release gpios, rc=%d\n", panel->name,
+		       rc);
+
+	rc = dsi_panel_pinctrl_deinit(panel);
+	if (rc)
+		pr_err("[%s] failed to deinit gpios, rc=%d\n", panel->name,
+		       rc);
+
+	rc = dsi_panel_vreg_put(panel);
+	if (rc)
+		pr_err("[%s] failed to put regs, rc=%d\n", panel->name, rc);
+
+	panel->host = NULL;
+	memset(&panel->mipi_device, 0x0, sizeof(panel->mipi_device));
+
+	mutex_unlock(&panel->panel_lock);
+	return rc;
+}
+
+int dsi_panel_validate_mode(struct dsi_panel *panel,
+			    struct dsi_display_mode *mode)
+{
+	return 0;
+}
+
+int dsi_panel_get_mode_count(struct dsi_panel *panel, u32 *count)
+{
+	int rc = 0;
+
+	if (!panel || !count) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&panel->panel_lock);
+	/* TODO:  DT format has not been decided for multiple modes. */
+	*count = 1;
+
+	mutex_unlock(&panel->panel_lock);
+	return rc;
+}
+
+int dsi_panel_get_phy_props(struct dsi_panel *panel,
+			    struct dsi_panel_phy_props *phy_props)
+{
+	int rc = 0;
+
+	if (!panel || !phy_props) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&panel->panel_lock);
+
+	memcpy(phy_props, &panel->phy_props, sizeof(*phy_props));
+
+	mutex_unlock(&panel->panel_lock);
+	return rc;
+}
+
+int dsi_panel_get_dfps_caps(struct dsi_panel *panel,
+			    struct dsi_dfps_capabilities *dfps_caps)
+{
+	int rc = 0;
+
+	if (!panel || !dfps_caps) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&panel->panel_lock);
+
+	memcpy(dfps_caps, &panel->dfps_caps, sizeof(*dfps_caps));
+
+	mutex_unlock(&panel->panel_lock);
+	return rc;
+}
+
+int dsi_panel_get_mode(struct dsi_panel *panel,
+			u32 index,
+			struct dsi_display_mode *mode)
+{
+	int rc = 0;
+
+	if (!panel || !mode) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&panel->panel_lock);
+	if (index != 0)
+		rc = -ENOTSUPP; /* TODO: Support more than one mode */
+	else
+		memcpy(mode, &panel->mode, sizeof(*mode));
+
+	mutex_unlock(&panel->panel_lock);
+	return rc;
+}
+
+int dsi_panel_get_host_cfg_for_mode(struct dsi_panel *panel,
+				    struct dsi_display_mode *mode,
+				    struct dsi_host_config *config)
+{
+	int rc = 0;
+
+	if (!panel || !mode || !config) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&panel->panel_lock);
+
+	config->panel_mode = panel->mode.panel_mode;
+	memcpy(&config->common_config, &panel->host_config,
+	       sizeof(config->common_config));
+
+	if (mode->panel_mode == DSI_OP_VIDEO_MODE) {
+		memcpy(&config->u.video_engine, &panel->video_config,
+		       sizeof(config->u.video_engine));
+	} else {
+		memcpy(&config->u.cmd_engine, &panel->cmd_config,
+		       sizeof(config->u.cmd_engine));
+	}
+
+	memcpy(&config->video_timing, &mode->timing,
+	       sizeof(config->video_timing));
+
+	config->esc_clk_rate_hz = 19200000;
+	mutex_unlock(&panel->panel_lock);
+	return rc;
+}
+
+int dsi_panel_pre_prepare(struct dsi_panel *panel)
+{
+	int rc = 0;
+
+	if (!panel) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&panel->panel_lock);
+
+	/* If LP11_INIT is set, panel will be powered up during prepare() */
+	if (panel->lp11_init)
+		goto error;
+
+	rc = dsi_panel_power_on(panel);
+	if (rc) {
+		pr_err("[%s] Panel power on failed, rc=%d\n", panel->name, rc);
+		goto error;
+	}
+
+error:
+	mutex_unlock(&panel->panel_lock);
+	return rc;
+}
+
+int dsi_panel_prepare(struct dsi_panel *panel)
+{
+	int rc = 0;
+
+	if (!panel) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&panel->panel_lock);
+
+	if (panel->lp11_init) {
+		rc = dsi_panel_power_on(panel);
+		if (rc) {
+			pr_err("[%s] panel power on failed, rc=%d\n",
+			       panel->name, rc);
+			goto error;
+		}
+	}
+
+	rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_PRE_ON);
+	if (rc) {
+		pr_err("[%s] failed to send DSI_CMD_SET_PRE_ON cmds, rc=%d\n",
+		       panel->name, rc);
+		goto error;
+	}
+
+error:
+	mutex_unlock(&panel->panel_lock);
+	return rc;
+}
+
+int dsi_panel_enable(struct dsi_panel *panel)
+{
+	int rc = 0;
+
+	if (!panel) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&panel->panel_lock);
+
+	rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_ON);
+	if (rc) {
+		pr_err("[%s] failed to send DSI_CMD_SET_ON cmds, rc=%d\n",
+		       panel->name, rc);
+	}
+	mutex_unlock(&panel->panel_lock);
+	return rc;
+}
+
+int dsi_panel_post_enable(struct dsi_panel *panel)
+{
+	int rc = 0;
+
+	if (!panel) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&panel->panel_lock);
+
+	rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_POST_ON);
+	if (rc) {
+		pr_err("[%s] failed to send DSI_CMD_SET_POST_ON cmds, rc=%d\n",
+		       panel->name, rc);
+		goto error;
+	}
+error:
+	mutex_unlock(&panel->panel_lock);
+	return rc;
+}
+
+int dsi_panel_pre_disable(struct dsi_panel *panel)
+{
+	int rc = 0;
+
+	if (!panel) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&panel->panel_lock);
+
+	rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_PRE_OFF);
+	if (rc) {
+		pr_err("[%s] failed to send DSI_CMD_SET_PRE_OFF cmds, rc=%d\n",
+		       panel->name, rc);
+		goto error;
+	}
+
+error:
+	mutex_unlock(&panel->panel_lock);
+	return rc;
+}
+
+int dsi_panel_disable(struct dsi_panel *panel)
+{
+	int rc = 0;
+
+	if (!panel) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&panel->panel_lock);
+
+	rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_OFF);
+	if (rc) {
+		pr_err("[%s] failed to send DSI_CMD_SET_OFF cmds, rc=%d\n",
+		       panel->name, rc);
+		goto error;
+	}
+error:
+	mutex_unlock(&panel->panel_lock);
+	return rc;
+}
+
+int dsi_panel_unprepare(struct dsi_panel *panel)
+{
+	int rc = 0;
+
+	if (!panel) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&panel->panel_lock);
+
+	rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_POST_OFF);
+	if (rc) {
+		pr_err("[%s] failed to send DSI_CMD_SET_POST_OFF cmds, rc=%d\n",
+		       panel->name, rc);
+		goto error;
+	}
+
+	if (panel->lp11_init) {
+		rc = dsi_panel_power_off(panel);
+		if (rc) {
+			pr_err("[%s] panel power_Off failed, rc=%d\n",
+			       panel->name, rc);
+			goto error;
+		}
+	}
+error:
+	mutex_unlock(&panel->panel_lock);
+	return rc;
+}
+
+int dsi_panel_post_unprepare(struct dsi_panel *panel)
+{
+	int rc = 0;
+
+	if (!panel) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&panel->panel_lock);
+
+	if (!panel->lp11_init) {
+		rc = dsi_panel_power_off(panel);
+		if (rc) {
+			pr_err("[%s] panel power_Off failed, rc=%d\n",
+			       panel->name, rc);
+			goto error;
+		}
+	}
+error:
+	mutex_unlock(&panel->panel_lock);
+	return rc;
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_panel.h linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_panel.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h	2019-01-22 16:16:23.491246298 +0100
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_PANEL_H_
+#define _DSI_PANEL_H_
+
+#include <linux/of_device.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/leds.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_mipi_dsi.h>
+
+#include "dsi_defs.h"
+#include "dsi_ctrl_hw.h"
+#include "dsi_clk_pwr.h"
+
+#define MAX_BL_LEVEL 4096
+
+enum dsi_panel_rotation {
+	DSI_PANEL_ROTATE_NONE = 0,
+	DSI_PANEL_ROTATE_HV_FLIP,
+	DSI_PANEL_ROTATE_H_FLIP,
+	DSI_PANEL_ROTATE_V_FLIP
+};
+
+enum dsi_cmd_set_type {
+	DSI_CMD_SET_PRE_ON = 0,
+	DSI_CMD_SET_ON,
+	DSI_CMD_SET_POST_ON,
+	DSI_CMD_SET_PRE_OFF,
+	DSI_CMD_SET_OFF,
+	DSI_CMD_SET_POST_OFF,
+	DSI_CMD_SET_PRE_RES_SWITCH,
+	DSI_CMD_SET_RES_SWITCH,
+	DSI_CMD_SET_POST_RES_SWITCH,
+	DSI_CMD_SET_CMD_TO_VID_SWITCH,
+	DSI_CMD_SET_POST_CMD_TO_VID_SWITCH,
+	DSI_CMD_SET_VID_TO_CMD_SWITCH,
+	DSI_CMD_SET_POST_VID_TO_CMD_SWITCH,
+	DSI_CMD_SET_PANEL_STATUS,
+	DSI_CMD_SET_MAX
+};
+
+enum dsi_cmd_set_state {
+	DSI_CMD_SET_STATE_LP = 0,
+	DSI_CMD_SET_STATE_HS,
+	DSI_CMD_SET_STATE_MAX
+};
+
+enum dsi_backlight_type {
+	DSI_BACKLIGHT_PWM = 0,
+	DSI_BACKLIGHT_WLED,
+	DSI_BACKLIGHT_DCS,
+	DSI_BACKLIGHT_UNKNOWN,
+	DSI_BACKLIGHT_MAX,
+};
+
+struct dsi_dfps_capabilities {
+	bool dfps_support;
+	enum dsi_dfps_type type;
+	u32 min_refresh_rate;
+	u32 max_refresh_rate;
+};
+
+struct dsi_pinctrl_info {
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *active;
+	struct pinctrl_state *suspend;
+};
+
+struct dsi_panel_phy_props {
+	u32 panel_width_mm;
+	u32 panel_height_mm;
+	enum dsi_panel_rotation rotation;
+};
+
+struct dsi_cmd_desc {
+	struct mipi_dsi_msg msg;
+	bool last_command;
+	u32  post_wait_ms;
+};
+
+struct dsi_panel_cmd_set {
+	enum dsi_cmd_set_type type;
+	enum dsi_cmd_set_state state;
+	u32 count;
+	struct dsi_cmd_desc *cmds;
+};
+
+struct dsi_backlight_config {
+	enum dsi_backlight_type type;
+
+	u32 bl_min_level;
+	u32 bl_max_level;
+	u32 brightness_max_level;
+
+	int en_gpio;
+	/* PWM params */
+	bool pwm_pmi_control;
+	u32 pwm_pmic_bank;
+	u32 pwm_period_usecs;
+	int pwm_gpio;
+
+	/* WLED params */
+	struct led_trigger *wled;
+	struct backlight_device *bd;
+};
+
+struct dsi_reset_seq {
+	u32 level;
+	u32 sleep_ms;
+};
+
+struct dsi_panel_reset_config {
+	struct dsi_reset_seq *sequence;
+	u32 count;
+
+	int reset_gpio;
+	int disp_en_gpio;
+};
+
+/**
+ * struct dsi_panel_dba - DSI DBA panel information
+ * @dba_panel:          Indicate if it's DBA panel
+ * @bridge_name:        Bridge chip name
+ * @hdmi_mode:          If bridge chip is in hdmi mode.
+ */
+struct dsi_panel_dba {
+	bool dba_panel;
+	const char *bridge_name;
+	bool hdmi_mode;
+};
+
+struct dsi_panel {
+	const char *name;
+	struct device_node *panel_of_node;
+	struct mipi_dsi_device mipi_device;
+
+	struct mutex panel_lock;
+	struct drm_panel drm_panel;
+	struct mipi_dsi_host *host;
+	struct device *parent;
+
+	struct dsi_host_common_cfg host_config;
+	struct dsi_video_engine_cfg video_config;
+	struct dsi_cmd_engine_cfg cmd_config;
+
+	struct dsi_dfps_capabilities dfps_caps;
+
+	struct dsi_panel_cmd_set cmd_sets[DSI_CMD_SET_MAX];
+	struct dsi_panel_phy_props phy_props;
+
+	struct dsi_regulator_info power_info;
+	struct dsi_display_mode mode;
+
+	struct dsi_backlight_config bl_config;
+	struct dsi_panel_reset_config reset_config;
+	struct dsi_pinctrl_info pinctrl;
+
+	struct dsi_panel_dba dba_config;
+
+	bool lp11_init;
+};
+
+struct dsi_panel *dsi_panel_get(struct device *parent,
+				struct device_node *of_node);
+void dsi_panel_put(struct dsi_panel *panel);
+
+int dsi_panel_drv_init(struct dsi_panel *panel, struct mipi_dsi_host *host);
+int dsi_panel_drv_deinit(struct dsi_panel *panel);
+
+int dsi_panel_get_mode_count(struct dsi_panel *panel, u32 *count);
+int dsi_panel_get_mode(struct dsi_panel *panel,
+		       u32 index,
+		       struct dsi_display_mode *mode);
+int dsi_panel_validate_mode(struct dsi_panel *panel,
+			    struct dsi_display_mode *mode);
+int dsi_panel_get_host_cfg_for_mode(struct dsi_panel *panel,
+				    struct dsi_display_mode *mode,
+				    struct dsi_host_config *config);
+
+int dsi_panel_get_phy_props(struct dsi_panel *panel,
+			    struct dsi_panel_phy_props *phy_props);
+int dsi_panel_get_dfps_caps(struct dsi_panel *panel,
+			    struct dsi_dfps_capabilities *dfps_caps);
+
+int dsi_panel_pre_prepare(struct dsi_panel *panel);
+
+int dsi_panel_prepare(struct dsi_panel *panel);
+
+int dsi_panel_enable(struct dsi_panel *panel);
+
+int dsi_panel_post_enable(struct dsi_panel *panel);
+
+int dsi_panel_pre_disable(struct dsi_panel *panel);
+
+int dsi_panel_disable(struct dsi_panel *panel);
+
+int dsi_panel_unprepare(struct dsi_panel *panel);
+
+int dsi_panel_post_unprepare(struct dsi_panel *panel);
+
+int dsi_panel_set_backlight(struct dsi_panel *panel, u32 bl_lvl);
+#endif /* _DSI_PANEL_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_phy.c linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_phy.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c	2019-10-29 09:26:23.625203002 +0100
@@ -0,0 +1,860 @@
+/*
+ * Copyright (c) 2016, 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"msm-dsi-phy:[%s] " fmt, __func__
+
+#include <linux/of_device.h>
+#include <linux/err.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#include <linux/msm-bus.h>
+#include <linux/list.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "msm_gpu.h"
+#include "dsi_phy.h"
+#include "dsi_phy_hw.h"
+#include "dsi_clk_pwr.h"
+#include "dsi_catalog.h"
+
+#define DSI_PHY_DEFAULT_LABEL "MDSS PHY CTRL"
+
+struct dsi_phy_list_item {
+	struct msm_dsi_phy *phy;
+	struct list_head list;
+};
+
+static LIST_HEAD(dsi_phy_list);
+static DEFINE_MUTEX(dsi_phy_list_lock);
+
+static const struct dsi_ver_spec_info dsi_phy_v1_0 = {
+	.version = DSI_PHY_VERSION_1_0,
+	.lane_cfg_count = 4,
+	.strength_cfg_count = 2,
+	.regulator_cfg_count = 1,
+	.timing_cfg_count = 8,
+};
+static const struct dsi_ver_spec_info dsi_phy_v2_0 = {
+	.version = DSI_PHY_VERSION_2_0,
+	.lane_cfg_count = 4,
+	.strength_cfg_count = 2,
+	.regulator_cfg_count = 1,
+	.timing_cfg_count = 8,
+};
+static const struct dsi_ver_spec_info dsi_phy_v3_0 = {
+	.version = DSI_PHY_VERSION_3_0,
+	.lane_cfg_count = 4,
+	.strength_cfg_count = 2,
+	.regulator_cfg_count = 1,
+	.timing_cfg_count = 8,
+};
+static const struct dsi_ver_spec_info dsi_phy_v4_0 = {
+	.version = DSI_PHY_VERSION_4_0,
+	.lane_cfg_count = 4,
+	.strength_cfg_count = 2,
+	.regulator_cfg_count = 1,
+	.timing_cfg_count = 8,
+};
+
+static const struct of_device_id msm_dsi_phy_of_match[] = {
+	{ .compatible = "qcom,dsi-phy-v1.0",
+	  .data = &dsi_phy_v1_0,},
+	{ .compatible = "qcom,dsi-phy-v2.0",
+	  .data = &dsi_phy_v2_0,},
+	{ .compatible = "qcom,dsi-phy-v3.0",
+	  .data = &dsi_phy_v3_0,},
+	{ .compatible = "qcom,dsi-phy-v4.0",
+	  .data = &dsi_phy_v4_0,},
+	{}
+};
+
+static int dsi_phy_regmap_init(struct platform_device *pdev,
+			       struct msm_dsi_phy *phy)
+{
+	int rc = 0;
+	void __iomem *ptr;
+
+	ptr = msm_ioremap(pdev, "dsi_phy", phy->name);
+	if (IS_ERR(ptr)) {
+		rc = PTR_ERR(ptr);
+		return rc;
+	}
+
+	phy->hw.base = ptr;
+
+	pr_debug("[%s] map dsi_phy registers to %pK\n",
+		phy->name, phy->hw.base);
+
+	return rc;
+}
+
+static int dsi_phy_regmap_deinit(struct msm_dsi_phy *phy)
+{
+	pr_debug("[%s] unmap registers\n", phy->name);
+	return 0;
+}
+
+static int dsi_phy_clocks_deinit(struct msm_dsi_phy *phy)
+{
+	int rc = 0;
+	struct dsi_core_clk_info *core = &phy->clks.core_clks;
+
+	if (core->mdp_core_clk)
+		devm_clk_put(&phy->pdev->dev, core->mdp_core_clk);
+	if (core->iface_clk)
+		devm_clk_put(&phy->pdev->dev, core->iface_clk);
+	if (core->core_mmss_clk)
+		devm_clk_put(&phy->pdev->dev, core->core_mmss_clk);
+	if (core->bus_clk)
+		devm_clk_put(&phy->pdev->dev, core->bus_clk);
+
+	memset(core, 0x0, sizeof(*core));
+
+	return rc;
+}
+
+static int dsi_phy_clocks_init(struct platform_device *pdev,
+			       struct msm_dsi_phy *phy)
+{
+	int rc = 0;
+	struct dsi_core_clk_info *core = &phy->clks.core_clks;
+
+	core->mdp_core_clk = devm_clk_get(&pdev->dev, "mdp_core_clk");
+	if (IS_ERR(core->mdp_core_clk)) {
+		rc = PTR_ERR(core->mdp_core_clk);
+		pr_err("failed to get mdp_core_clk, rc=%d\n", rc);
+		goto fail;
+	}
+
+	core->iface_clk = devm_clk_get(&pdev->dev, "iface_clk");
+	if (IS_ERR(core->iface_clk)) {
+		rc = PTR_ERR(core->iface_clk);
+		pr_err("failed to get iface_clk, rc=%d\n", rc);
+		goto fail;
+	}
+
+	core->core_mmss_clk = devm_clk_get(&pdev->dev, "core_mmss_clk");
+	if (IS_ERR(core->core_mmss_clk)) {
+		rc = PTR_ERR(core->core_mmss_clk);
+		pr_err("failed to get core_mmss_clk, rc=%d\n", rc);
+		goto fail;
+	}
+
+	core->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
+	if (IS_ERR(core->bus_clk)) {
+		rc = PTR_ERR(core->bus_clk);
+		pr_err("failed to get bus_clk, rc=%d\n", rc);
+		goto fail;
+	}
+
+	return rc;
+fail:
+	dsi_phy_clocks_deinit(phy);
+	return rc;
+}
+
+static int dsi_phy_supplies_init(struct platform_device *pdev,
+				 struct msm_dsi_phy *phy)
+{
+	int rc = 0;
+	int i = 0;
+	struct dsi_regulator_info *regs;
+	struct regulator *vreg = NULL;
+
+	regs = &phy->pwr_info.digital;
+	regs->vregs = devm_kzalloc(&pdev->dev, sizeof(struct dsi_vreg),
+				   GFP_KERNEL);
+	if (!regs->vregs)
+		goto error;
+
+	regs->count = 1;
+	snprintf(regs->vregs->vreg_name,
+		 ARRAY_SIZE(regs->vregs[i].vreg_name),
+		 "%s", "gdsc");
+
+	rc = dsi_clk_pwr_get_dt_vreg_data(&pdev->dev,
+					  &phy->pwr_info.phy_pwr,
+					  "qcom,phy-supply-entries");
+	if (rc) {
+		pr_err("failed to get host power supplies, rc = %d\n", rc);
+		goto error_digital;
+	}
+
+	regs = &phy->pwr_info.digital;
+	for (i = 0; i < regs->count; i++) {
+		vreg = devm_regulator_get(&pdev->dev, regs->vregs[i].vreg_name);
+		rc = PTR_RET(vreg);
+		if (rc) {
+			pr_err("failed to get %s regulator\n",
+			       regs->vregs[i].vreg_name);
+			goto error_host_pwr;
+		}
+		regs->vregs[i].vreg = vreg;
+	}
+
+	regs = &phy->pwr_info.phy_pwr;
+	for (i = 0; i < regs->count; i++) {
+		vreg = devm_regulator_get(&pdev->dev, regs->vregs[i].vreg_name);
+		rc = PTR_RET(vreg);
+		if (rc) {
+			pr_err("failed to get %s regulator\n",
+			       regs->vregs[i].vreg_name);
+			for (--i; i >= 0; i--)
+				devm_regulator_put(regs->vregs[i].vreg);
+			goto error_digital_put;
+		}
+		regs->vregs[i].vreg = vreg;
+	}
+
+	return rc;
+
+error_digital_put:
+	regs = &phy->pwr_info.digital;
+	for (i = 0; i < regs->count; i++)
+		devm_regulator_put(regs->vregs[i].vreg);
+error_host_pwr:
+	devm_kfree(&pdev->dev, phy->pwr_info.phy_pwr.vregs);
+	phy->pwr_info.phy_pwr.vregs = NULL;
+	phy->pwr_info.phy_pwr.count = 0;
+error_digital:
+	devm_kfree(&pdev->dev, phy->pwr_info.digital.vregs);
+	phy->pwr_info.digital.vregs = NULL;
+	phy->pwr_info.digital.count = 0;
+error:
+	return rc;
+}
+
+static int dsi_phy_supplies_deinit(struct msm_dsi_phy *phy)
+{
+	int i = 0;
+	int rc = 0;
+	struct dsi_regulator_info *regs;
+
+	regs = &phy->pwr_info.digital;
+	for (i = 0; i < regs->count; i++) {
+		if (!regs->vregs[i].vreg)
+			pr_err("vreg is NULL, should not reach here\n");
+		else
+			devm_regulator_put(regs->vregs[i].vreg);
+	}
+
+	regs = &phy->pwr_info.phy_pwr;
+	for (i = 0; i < regs->count; i++) {
+		if (!regs->vregs[i].vreg)
+			pr_err("vreg is NULL, should not reach here\n");
+		else
+			devm_regulator_put(regs->vregs[i].vreg);
+	}
+
+	if (phy->pwr_info.phy_pwr.vregs) {
+		devm_kfree(&phy->pdev->dev, phy->pwr_info.phy_pwr.vregs);
+		phy->pwr_info.phy_pwr.vregs = NULL;
+		phy->pwr_info.phy_pwr.count = 0;
+	}
+	if (phy->pwr_info.digital.vregs) {
+		devm_kfree(&phy->pdev->dev, phy->pwr_info.digital.vregs);
+		phy->pwr_info.digital.vregs = NULL;
+		phy->pwr_info.digital.count = 0;
+	}
+
+	return rc;
+}
+
+static int dsi_phy_parse_dt_per_lane_cfgs(struct platform_device *pdev,
+					  struct dsi_phy_per_lane_cfgs *cfg,
+					  char *property)
+{
+	int rc = 0, i = 0, j = 0;
+	const u8 *data;
+	u32 len = 0;
+
+	data = of_get_property(pdev->dev.of_node, property, &len);
+	if (!data) {
+		pr_err("Unable to read Phy %s settings\n", property);
+		return -EINVAL;
+	}
+
+	if (len != DSI_LANE_MAX * cfg->count_per_lane) {
+		pr_err("incorrect phy %s settings, exp=%d, act=%d\n",
+		       property, (DSI_LANE_MAX * cfg->count_per_lane), len);
+		return -EINVAL;
+	}
+
+	for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
+		for (j = 0; j < cfg->count_per_lane; j++) {
+			cfg->lane[i][j] = *data;
+			data++;
+		}
+	}
+
+	return rc;
+}
+
+static int dsi_phy_settings_init(struct platform_device *pdev,
+				 struct msm_dsi_phy *phy)
+{
+	int rc = 0;
+	struct dsi_phy_per_lane_cfgs *lane = &phy->cfg.lanecfg;
+	struct dsi_phy_per_lane_cfgs *strength = &phy->cfg.strength;
+	struct dsi_phy_per_lane_cfgs *timing = &phy->cfg.timing;
+	struct dsi_phy_per_lane_cfgs *regs = &phy->cfg.regulators;
+
+	lane->count_per_lane = phy->ver_info->lane_cfg_count;
+	rc = dsi_phy_parse_dt_per_lane_cfgs(pdev, lane,
+					    "qcom,platform-lane-config");
+	if (rc) {
+		pr_err("failed to parse lane cfgs, rc=%d\n", rc);
+		goto err;
+	}
+
+	strength->count_per_lane = phy->ver_info->strength_cfg_count;
+	rc = dsi_phy_parse_dt_per_lane_cfgs(pdev, strength,
+					    "qcom,platform-strength-ctrl");
+	if (rc) {
+		pr_err("failed to parse lane cfgs, rc=%d\n", rc);
+		goto err;
+	}
+
+	regs->count_per_lane = phy->ver_info->regulator_cfg_count;
+	rc = dsi_phy_parse_dt_per_lane_cfgs(pdev, regs,
+					    "qcom,platform-regulator-settings");
+	if (rc) {
+		pr_err("failed to parse lane cfgs, rc=%d\n", rc);
+		goto err;
+	}
+
+	/* Actual timing values are dependent on panel */
+	timing->count_per_lane = phy->ver_info->timing_cfg_count;
+
+err:
+	lane->count_per_lane = 0;
+	strength->count_per_lane = 0;
+	regs->count_per_lane = 0;
+	timing->count_per_lane = 0;
+	return rc;
+}
+
+static int dsi_phy_settings_deinit(struct msm_dsi_phy *phy)
+{
+	memset(&phy->cfg.lanecfg, 0x0, sizeof(phy->cfg.lanecfg));
+	memset(&phy->cfg.strength, 0x0, sizeof(phy->cfg.strength));
+	memset(&phy->cfg.timing, 0x0, sizeof(phy->cfg.timing));
+	memset(&phy->cfg.regulators, 0x0, sizeof(phy->cfg.regulators));
+	return 0;
+}
+
+static int dsi_phy_driver_probe(struct platform_device *pdev)
+{
+	struct msm_dsi_phy *dsi_phy;
+	struct dsi_phy_list_item *item;
+	const struct of_device_id *id;
+	const struct dsi_ver_spec_info *ver_info;
+	int rc = 0;
+	u32 index = 0;
+
+	if (!pdev || !pdev->dev.of_node) {
+		pr_err("pdev not found\n");
+		return -ENODEV;
+	}
+
+	id = of_match_node(msm_dsi_phy_of_match, pdev->dev.of_node);
+	if (!id)
+		return -ENODEV;
+
+	ver_info = id->data;
+
+	item = devm_kzalloc(&pdev->dev, sizeof(*item), GFP_KERNEL);
+	if (!item)
+		return -ENOMEM;
+
+
+	dsi_phy = devm_kzalloc(&pdev->dev, sizeof(*dsi_phy), GFP_KERNEL);
+	if (!dsi_phy) {
+		devm_kfree(&pdev->dev, item);
+		return -ENOMEM;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node, "cell-index", &index);
+	if (rc) {
+		pr_debug("cell index not set, default to 0\n");
+		index = 0;
+	}
+
+	dsi_phy->index = index;
+
+	dsi_phy->name = of_get_property(pdev->dev.of_node, "label", NULL);
+	if (!dsi_phy->name)
+		dsi_phy->name = DSI_PHY_DEFAULT_LABEL;
+
+	pr_debug("Probing %s device\n", dsi_phy->name);
+
+	rc = dsi_phy_regmap_init(pdev, dsi_phy);
+	if (rc) {
+		pr_err("Failed to parse register information, rc=%d\n", rc);
+		goto fail;
+	}
+
+	rc = dsi_phy_clocks_init(pdev, dsi_phy);
+	if (rc) {
+		pr_err("failed to parse clock information, rc = %d\n", rc);
+		goto fail_regmap;
+	}
+
+	rc = dsi_phy_supplies_init(pdev, dsi_phy);
+	if (rc) {
+		pr_err("failed to parse voltage supplies, rc = %d\n", rc);
+		goto fail_clks;
+	}
+
+	rc = dsi_catalog_phy_setup(&dsi_phy->hw, ver_info->version,
+				   dsi_phy->index);
+	if (rc) {
+		pr_err("Catalog does not support version (%d)\n",
+		       ver_info->version);
+		goto fail_supplies;
+	}
+
+	dsi_phy->ver_info = ver_info;
+	rc = dsi_phy_settings_init(pdev, dsi_phy);
+	if (rc) {
+		pr_err("Failed to parse phy setting, rc=%d\n", rc);
+		goto fail_supplies;
+	}
+
+	item->phy = dsi_phy;
+
+	mutex_lock(&dsi_phy_list_lock);
+	list_add(&item->list, &dsi_phy_list);
+	mutex_unlock(&dsi_phy_list_lock);
+
+	mutex_init(&dsi_phy->phy_lock);
+	/** TODO: initialize debugfs */
+	dsi_phy->pdev = pdev;
+	platform_set_drvdata(pdev, dsi_phy);
+	pr_debug("Probe successful for %s\n", dsi_phy->name);
+	return 0;
+
+fail_supplies:
+	(void)dsi_phy_supplies_deinit(dsi_phy);
+fail_clks:
+	(void)dsi_phy_clocks_deinit(dsi_phy);
+fail_regmap:
+	(void)dsi_phy_regmap_deinit(dsi_phy);
+fail:
+	devm_kfree(&pdev->dev, dsi_phy);
+	devm_kfree(&pdev->dev, item);
+	return rc;
+}
+
+static int dsi_phy_driver_remove(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct msm_dsi_phy *phy = platform_get_drvdata(pdev);
+	struct list_head *pos, *tmp;
+
+	if (!pdev || !phy) {
+		pr_err("Invalid device\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dsi_phy_list_lock);
+	list_for_each_safe(pos, tmp, &dsi_phy_list) {
+		struct dsi_phy_list_item *n;
+
+		n = list_entry(pos, struct dsi_phy_list_item, list);
+		if (n->phy == phy) {
+			list_del(&n->list);
+			devm_kfree(&pdev->dev, n);
+			break;
+		}
+	}
+	mutex_unlock(&dsi_phy_list_lock);
+
+	mutex_lock(&phy->phy_lock);
+	rc = dsi_phy_settings_deinit(phy);
+	if (rc)
+		pr_err("failed to deinitialize phy settings, rc=%d\n", rc);
+
+	rc = dsi_phy_supplies_deinit(phy);
+	if (rc)
+		pr_err("failed to deinitialize voltage supplies, rc=%d\n", rc);
+
+	rc = dsi_phy_clocks_deinit(phy);
+	if (rc)
+		pr_err("failed to deinitialize clocks, rc=%d\n", rc);
+
+	rc = dsi_phy_regmap_deinit(phy);
+	if (rc)
+		pr_err("failed to deinitialize regmap, rc=%d\n", rc);
+	mutex_unlock(&phy->phy_lock);
+
+	mutex_destroy(&phy->phy_lock);
+	devm_kfree(&pdev->dev, phy);
+
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static struct platform_driver dsi_phy_platform_driver = {
+	.probe      = dsi_phy_driver_probe,
+	.remove     = dsi_phy_driver_remove,
+	.driver     = {
+		.name   = "dsi_phy",
+		.of_match_table = msm_dsi_phy_of_match,
+	},
+};
+
+static void dsi_phy_enable_hw(struct msm_dsi_phy *phy)
+{
+	if (phy->hw.ops.regulator_enable)
+		phy->hw.ops.regulator_enable(&phy->hw, &phy->cfg.regulators);
+
+	if (phy->hw.ops.enable)
+		phy->hw.ops.enable(&phy->hw, &phy->cfg);
+}
+
+static void dsi_phy_disable_hw(struct msm_dsi_phy *phy)
+{
+	if (phy->hw.ops.disable)
+		phy->hw.ops.disable(&phy->hw);
+
+	if (phy->hw.ops.regulator_disable)
+		phy->hw.ops.regulator_disable(&phy->hw);
+}
+
+/**
+ * dsi_phy_get() - get a dsi phy handle from device node
+ * @of_node:           device node for dsi phy controller
+ *
+ * Gets the DSI PHY handle for the corresponding of_node. The ref count is
+ * incremented to one all subsequents get will fail until the original client
+ * calls a put.
+ *
+ * Return: DSI PHY handle or an error code.
+ */
+struct msm_dsi_phy *dsi_phy_get(struct device_node *of_node)
+{
+	struct list_head *pos, *tmp;
+	struct msm_dsi_phy *phy = NULL;
+
+	mutex_lock(&dsi_phy_list_lock);
+	list_for_each_safe(pos, tmp, &dsi_phy_list) {
+		struct dsi_phy_list_item *n;
+
+		n = list_entry(pos, struct dsi_phy_list_item, list);
+		if (n->phy->pdev->dev.of_node == of_node) {
+			phy = n->phy;
+			break;
+		}
+	}
+	mutex_unlock(&dsi_phy_list_lock);
+
+	if (!phy) {
+		pr_err("Device with of node not found\n");
+		phy = ERR_PTR(-EPROBE_DEFER);
+		return phy;
+	}
+
+	mutex_lock(&phy->phy_lock);
+	if (phy->refcount > 0) {
+		pr_err("[PHY_%d] Device under use\n", phy->index);
+		phy = ERR_PTR(-EINVAL);
+	} else {
+		phy->refcount++;
+	}
+	mutex_unlock(&phy->phy_lock);
+	return phy;
+}
+
+/**
+ * dsi_phy_put() - release dsi phy handle
+ * @dsi_phy:              DSI PHY handle.
+ *
+ * Release the DSI PHY hardware. Driver will clean up all resources and puts
+ * back the DSI PHY into reset state.
+ */
+void dsi_phy_put(struct msm_dsi_phy *dsi_phy)
+{
+	mutex_lock(&dsi_phy->phy_lock);
+
+	if (dsi_phy->refcount == 0)
+		pr_err("Unbalanced dsi_phy_put call\n");
+	else
+		dsi_phy->refcount--;
+
+	mutex_unlock(&dsi_phy->phy_lock);
+}
+
+/**
+ * dsi_phy_drv_init() - initialize dsi phy driver
+ * @dsi_phy:         DSI PHY handle.
+ *
+ * Initializes DSI PHY driver. Should be called after dsi_phy_get().
+ *
+ * Return: error code.
+ */
+int dsi_phy_drv_init(struct msm_dsi_phy *dsi_phy)
+{
+	return 0;
+}
+
+/**
+ * dsi_phy_drv_deinit() - de-initialize dsi phy driver
+ * @dsi_phy:          DSI PHY handle.
+ *
+ * Release all resources acquired by dsi_phy_drv_init().
+ *
+ * Return: error code.
+ */
+int dsi_phy_drv_deinit(struct msm_dsi_phy *dsi_phy)
+{
+	return 0;
+}
+
+/**
+ * dsi_phy_validate_mode() - validate a display mode
+ * @dsi_phy:            DSI PHY handle.
+ * @mode:               Mode information.
+ *
+ * Validation will fail if the mode cannot be supported by the PHY driver or
+ * hardware.
+ *
+ * Return: error code.
+ */
+int dsi_phy_validate_mode(struct msm_dsi_phy *dsi_phy,
+			  struct dsi_mode_info *mode)
+{
+	int rc = 0;
+
+	if (!dsi_phy || !mode) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dsi_phy->phy_lock);
+
+	pr_debug("[PHY_%d] Skipping validation\n", dsi_phy->index);
+
+	mutex_unlock(&dsi_phy->phy_lock);
+	return rc;
+}
+
+/**
+ * dsi_phy_set_power_state() - enable/disable dsi phy power supplies
+ * @dsi_phy:               DSI PHY handle.
+ * @enable:                Boolean flag to enable/disable.
+ *
+ * Return: error code.
+ */
+int dsi_phy_set_power_state(struct msm_dsi_phy *dsi_phy, bool enable)
+{
+	int rc = 0;
+
+	if (!dsi_phy) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dsi_phy->phy_lock);
+
+	if (enable == dsi_phy->power_state) {
+		pr_err("[PHY_%d] No state change\n", dsi_phy->index);
+		goto error;
+	}
+
+	if (enable) {
+		rc = dsi_pwr_enable_regulator(&dsi_phy->pwr_info.digital, true);
+		if (rc) {
+			pr_err("failed to enable digital regulator\n");
+			goto error;
+		}
+		rc = dsi_pwr_enable_regulator(&dsi_phy->pwr_info.phy_pwr, true);
+		if (rc) {
+			pr_err("failed to enable phy power\n");
+			(void)dsi_pwr_enable_regulator(
+						&dsi_phy->pwr_info.digital,
+						false
+						);
+			goto error;
+		}
+	} else {
+		rc = dsi_pwr_enable_regulator(&dsi_phy->pwr_info.phy_pwr,
+					      false);
+		if (rc) {
+			pr_err("failed to enable digital regulator\n");
+			goto error;
+		}
+		rc = dsi_pwr_enable_regulator(&dsi_phy->pwr_info.digital,
+					      false);
+		if (rc) {
+			pr_err("failed to enable phy power\n");
+			goto error;
+		}
+	}
+
+	dsi_phy->power_state = enable;
+error:
+	mutex_unlock(&dsi_phy->phy_lock);
+	return rc;
+}
+
+/**
+ * dsi_phy_enable() - enable DSI PHY hardware
+ * @dsi_phy:            DSI PHY handle.
+ * @config:             DSI host configuration.
+ * @pll_source:         Source PLL for PHY clock.
+ * @skip_validation:    Validation will not be performed on parameters.
+ *
+ * Validates and enables DSI PHY.
+ *
+ * Return: error code.
+ */
+int dsi_phy_enable(struct msm_dsi_phy *phy,
+		   struct dsi_host_config *config,
+		   enum dsi_phy_pll_source pll_source,
+		   bool skip_validation)
+{
+	int rc = 0;
+
+	if (!phy || !config) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&phy->phy_lock);
+
+	if (!skip_validation)
+		pr_debug("[PHY_%d] TODO: perform validation\n", phy->index);
+
+	rc = dsi_clk_enable_core_clks(&phy->clks.core_clks, true);
+	if (rc) {
+		pr_err("failed to enable core clocks, rc=%d\n", rc);
+		goto error;
+	}
+
+	memcpy(&phy->mode, &config->video_timing, sizeof(phy->mode));
+	phy->data_lanes = config->common_config.data_lanes;
+	phy->dst_format = config->common_config.dst_format;
+	phy->lane_map = config->lane_map;
+	phy->cfg.pll_source = pll_source;
+
+	rc = phy->hw.ops.calculate_timing_params(&phy->hw,
+						 &phy->mode,
+						 &config->common_config,
+						 &phy->cfg.timing);
+	if (rc) {
+		pr_err("[%s] failed to set timing, rc=%d\n", phy->name, rc);
+		goto error_disable_clks;
+	}
+
+	dsi_phy_enable_hw(phy);
+
+error_disable_clks:
+	rc = dsi_clk_enable_core_clks(&phy->clks.core_clks, false);
+	if (rc) {
+		pr_err("failed to disable clocks, skip phy disable\n");
+		goto error;
+	}
+error:
+	mutex_unlock(&phy->phy_lock);
+	return rc;
+}
+
+/**
+ * dsi_phy_disable() - disable DSI PHY hardware.
+ * @phy:        DSI PHY handle.
+ *
+ * Return: error code.
+ */
+int dsi_phy_disable(struct msm_dsi_phy *phy)
+{
+	int rc = 0;
+
+	if (!phy) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&phy->phy_lock);
+
+	rc = dsi_clk_enable_core_clks(&phy->clks.core_clks, true);
+	if (rc) {
+		pr_err("failed to enable core clocks, rc=%d\n", rc);
+		goto error;
+	}
+
+	dsi_phy_disable_hw(phy);
+
+	rc = dsi_clk_enable_core_clks(&phy->clks.core_clks, false);
+	if (rc) {
+		pr_err("failed to disable core clocks, rc=%d\n", rc);
+		goto error;
+	}
+
+error:
+	mutex_unlock(&phy->phy_lock);
+	return rc;
+}
+
+/**
+ * dsi_phy_set_timing_params() - timing parameters for the panel
+ * @phy:          DSI PHY handle
+ * @timing:       array holding timing params.
+ * @size:         size of the array.
+ *
+ * When PHY timing calculator is not implemented, this array will be used to
+ * pass PHY timing information.
+ *
+ * Return: error code.
+ */
+int dsi_phy_set_timing_params(struct msm_dsi_phy *phy,
+			      u8 *timing, u32 size)
+{
+	int rc = 0;
+	int i, j;
+	struct dsi_phy_per_lane_cfgs *timing_cfg;
+
+	if (!phy || !timing || !size) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&phy->phy_lock);
+
+	if (size != (DSI_LANE_MAX * phy->cfg.timing.count_per_lane)) {
+		pr_err("Unexpected timing array size %d\n", size);
+		rc = -EINVAL;
+	} else {
+		timing_cfg = &phy->cfg.timing;
+		for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
+			for (j = 0; j < timing_cfg->count_per_lane; j++) {
+				timing_cfg->lane[i][j] = *timing;
+				timing++;
+			}
+		}
+	}
+	mutex_unlock(&phy->phy_lock);
+	return rc;
+}
+
+void dsi_phy_drv_register(void)
+{
+	platform_driver_register(&dsi_phy_platform_driver);
+}
+
+void dsi_phy_drv_unregister(void)
+{
+	platform_driver_unregister(&dsi_phy_platform_driver);
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_phy.h linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_phy.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h	2019-01-22 16:16:23.491246298 +0100
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DSI_PHY_H_
+#define _DSI_PHY_H_
+
+#include "dsi_defs.h"
+#include "dsi_clk_pwr.h"
+#include "dsi_phy_hw.h"
+
+struct dsi_ver_spec_info {
+	enum dsi_phy_version version;
+	u32 lane_cfg_count;
+	u32 strength_cfg_count;
+	u32 regulator_cfg_count;
+	u32 timing_cfg_count;
+};
+
+/**
+ * struct dsi_phy_clk_info - clock information for DSI controller
+ * @core_clks:         Core clocks needed to access PHY registers.
+ */
+struct dsi_phy_clk_info {
+	struct dsi_core_clk_info core_clks;
+};
+
+/**
+ * struct dsi_phy_power_info - digital and analog power supplies for DSI PHY
+ * @digital:       Digital power supply for DSI PHY.
+ * @phy_pwr:       Analog power supplies for DSI PHY to work.
+ */
+struct dsi_phy_power_info {
+	struct dsi_regulator_info digital;
+	struct dsi_regulator_info phy_pwr;
+};
+
+/**
+ * struct msm_dsi_phy - DSI PHY object
+ * @pdev:              Pointer to platform device.
+ * @index:             Instance id.
+ * @name:              Name of the PHY instance.
+ * @refcount:          Reference count.
+ * @phy_lock:          Mutex for hardware and object access.
+ * @ver_info:          Version specific phy parameters.
+ * @hw:                DSI PHY hardware object.
+ * @cfg:               DSI phy configuration.
+ * @power_state:       True if PHY is powered on.
+ * @mode:              Current mode.
+ * @data_lanes:        Number of data lanes used.
+ * @dst_format:        Destination format.
+ * @lane_map:          Map between logical and physical lanes.
+ */
+struct msm_dsi_phy {
+	struct platform_device *pdev;
+	int index;
+	const char *name;
+	u32 refcount;
+	struct mutex phy_lock;
+
+	const struct dsi_ver_spec_info *ver_info;
+	struct dsi_phy_hw hw;
+
+	struct dsi_phy_clk_info clks;
+	struct dsi_phy_power_info pwr_info;
+
+	struct dsi_phy_cfg cfg;
+
+	bool power_state;
+	struct dsi_mode_info mode;
+	enum dsi_data_lanes data_lanes;
+	enum dsi_pixel_format dst_format;
+	struct dsi_lane_mapping lane_map;
+};
+
+/**
+ * dsi_phy_get() - get a dsi phy handle from device node
+ * @of_node:           device node for dsi phy controller
+ *
+ * Gets the DSI PHY handle for the corresponding of_node. The ref count is
+ * incremented to one all subsequents get will fail until the original client
+ * calls a put.
+ *
+ * Return: DSI PHY handle or an error code.
+ */
+struct msm_dsi_phy *dsi_phy_get(struct device_node *of_node);
+
+/**
+ * dsi_phy_put() - release dsi phy handle
+ * @dsi_phy:              DSI PHY handle.
+ *
+ * Release the DSI PHY hardware. Driver will clean up all resources and puts
+ * back the DSI PHY into reset state.
+ */
+void dsi_phy_put(struct msm_dsi_phy *dsi_phy);
+
+/**
+ * dsi_phy_drv_init() - initialize dsi phy driver
+ * @dsi_phy:         DSI PHY handle.
+ *
+ * Initializes DSI PHY driver. Should be called after dsi_phy_get().
+ *
+ * Return: error code.
+ */
+int dsi_phy_drv_init(struct msm_dsi_phy *dsi_phy);
+
+/**
+ * dsi_phy_drv_deinit() - de-initialize dsi phy driver
+ * @dsi_phy:          DSI PHY handle.
+ *
+ * Release all resources acquired by dsi_phy_drv_init().
+ *
+ * Return: error code.
+ */
+int dsi_phy_drv_deinit(struct msm_dsi_phy *dsi_phy);
+
+/**
+ * dsi_phy_validate_mode() - validate a display mode
+ * @dsi_phy:            DSI PHY handle.
+ * @mode:               Mode information.
+ *
+ * Validation will fail if the mode cannot be supported by the PHY driver or
+ * hardware.
+ *
+ * Return: error code.
+ */
+int dsi_phy_validate_mode(struct msm_dsi_phy *dsi_phy,
+			  struct dsi_mode_info *mode);
+
+/**
+ * dsi_phy_set_power_state() - enable/disable dsi phy power supplies
+ * @dsi_phy:               DSI PHY handle.
+ * @enable:                Boolean flag to enable/disable.
+ *
+ * Return: error code.
+ */
+int dsi_phy_set_power_state(struct msm_dsi_phy *dsi_phy, bool enable);
+
+/**
+ * dsi_phy_enable() - enable DSI PHY hardware
+ * @dsi_phy:            DSI PHY handle.
+ * @config:             DSI host configuration.
+ * @pll_source:         Source PLL for PHY clock.
+ * @skip_validation:    Validation will not be performed on parameters.
+ *
+ * Validates and enables DSI PHY.
+ *
+ * Return: error code.
+ */
+int dsi_phy_enable(struct msm_dsi_phy *dsi_phy,
+		   struct dsi_host_config *config,
+		   enum dsi_phy_pll_source pll_source,
+		   bool skip_validation);
+
+/**
+ * dsi_phy_disable() - disable DSI PHY hardware.
+ * @phy:        DSI PHY handle.
+ *
+ * Return: error code.
+ */
+int dsi_phy_disable(struct msm_dsi_phy *phy);
+
+/**
+ * dsi_phy_set_timing_params() - timing parameters for the panel
+ * @phy:          DSI PHY handle
+ * @timing:       array holding timing params.
+ * @size:         size of the array.
+ *
+ * When PHY timing calculator is not implemented, this array will be used to
+ * pass PHY timing information.
+ *
+ * Return: error code.
+ */
+int dsi_phy_set_timing_params(struct msm_dsi_phy *phy,
+			      u8 *timing, u32 size);
+
+/**
+ * dsi_phy_drv_register() - register platform driver for dsi phy
+ */
+void dsi_phy_drv_register(void);
+
+/**
+ * dsi_phy_drv_unregister() - unregister platform driver
+ */
+void dsi_phy_drv_unregister(void);
+
+#endif /* _DSI_PHY_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_phy_hw.h linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_phy_hw.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h	2019-01-22 16:16:23.491246298 +0100
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DSI_PHY_HW_H_
+#define _DSI_PHY_HW_H_
+
+#include "dsi_defs.h"
+
+#define DSI_MAX_SETTINGS 8
+
+/**
+ * enum dsi_phy_version - DSI PHY version enumeration
+ * @DSI_PHY_VERSION_UNKNOWN:    Unknown version.
+ * @DSI_PHY_VERSION_1_0:        28nm-HPM.
+ * @DSI_PHY_VERSION_2_0:        28nm-LPM.
+ * @DSI_PHY_VERSION_3_0:        20nm.
+ * @DSI_PHY_VERSION_4_0:        14nm.
+ * @DSI_PHY_VERSION_MAX:
+ */
+enum dsi_phy_version {
+	DSI_PHY_VERSION_UNKNOWN,
+	DSI_PHY_VERSION_1_0, /* 28nm-HPM */
+	DSI_PHY_VERSION_2_0, /* 28nm-LPM */
+	DSI_PHY_VERSION_3_0, /* 20nm */
+	DSI_PHY_VERSION_4_0, /* 14nm */
+	DSI_PHY_VERSION_MAX
+};
+
+/**
+ * enum dsi_phy_hw_features - features supported by DSI PHY hardware
+ * @DSI_PHY_DPHY:        Supports DPHY
+ * @DSI_PHY_CPHY:        Supports CPHY
+ */
+enum dsi_phy_hw_features {
+	DSI_PHY_DPHY,
+	DSI_PHY_CPHY,
+	DSI_PHY_MAX_FEATURES
+};
+
+/**
+ * enum dsi_phy_pll_source - pll clock source for PHY.
+ * @DSI_PLL_SOURCE_STANDALONE:    Clock is sourced from native PLL and is not
+ *				  shared by other PHYs.
+ * @DSI_PLL_SOURCE_NATIVE:        Clock is sourced from native PLL and is
+ *				  shared by other PHYs.
+ * @DSI_PLL_SOURCE_NON_NATIVE:    Clock is sourced from other PHYs.
+ * @DSI_PLL_SOURCE_MAX:
+ */
+enum dsi_phy_pll_source {
+	DSI_PLL_SOURCE_STANDALONE = 0,
+	DSI_PLL_SOURCE_NATIVE,
+	DSI_PLL_SOURCE_NON_NATIVE,
+	DSI_PLL_SOURCE_MAX
+};
+
+/**
+ * struct dsi_phy_per_lane_cfgs - Holds register values for PHY parameters
+ * @lane:           A set of maximum 8 values for each lane.
+ * @count_per_lane: Number of values per each lane.
+ */
+struct dsi_phy_per_lane_cfgs {
+	u8 lane[DSI_LANE_MAX][DSI_MAX_SETTINGS];
+	u32 count_per_lane;
+};
+
+/**
+ * struct dsi_phy_cfg - DSI PHY configuration
+ * @lanecfg:          Lane configuration settings.
+ * @strength:         Strength settings for lanes.
+ * @timing:           Timing parameters for lanes.
+ * @regulators:       Regulator settings for lanes.
+ * @pll_source:       PLL source.
+ */
+struct dsi_phy_cfg {
+	struct dsi_phy_per_lane_cfgs lanecfg;
+	struct dsi_phy_per_lane_cfgs strength;
+	struct dsi_phy_per_lane_cfgs timing;
+	struct dsi_phy_per_lane_cfgs regulators;
+	enum dsi_phy_pll_source pll_source;
+};
+
+struct dsi_phy_hw;
+
+/**
+ * struct dsi_phy_hw_ops - Operations for DSI PHY hardware.
+ * @regulator_enable:          Enable PHY regulators.
+ * @regulator_disable:         Disable PHY regulators.
+ * @enable:                    Enable PHY.
+ * @disable:                   Disable PHY.
+ * @calculate_timing_params:   Calculate PHY timing params from mode information
+ */
+struct dsi_phy_hw_ops {
+	/**
+	 * regulator_enable() - enable regulators for DSI PHY
+	 * @phy:      Pointer to DSI PHY hardware object.
+	 * @reg_cfg:  Regulator configuration for all DSI lanes.
+	 */
+	void (*regulator_enable)(struct dsi_phy_hw *phy,
+				 struct dsi_phy_per_lane_cfgs *reg_cfg);
+
+	/**
+	 * regulator_disable() - disable regulators
+	 * @phy:      Pointer to DSI PHY hardware object.
+	 */
+	void (*regulator_disable)(struct dsi_phy_hw *phy);
+
+	/**
+	 * enable() - Enable PHY hardware
+	 * @phy:      Pointer to DSI PHY hardware object.
+	 * @cfg:      Per lane configurations for timing, strength and lane
+	 *	      configurations.
+	 */
+	void (*enable)(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
+
+	/**
+	 * disable() - Disable PHY hardware
+	 * @phy:      Pointer to DSI PHY hardware object.
+	 */
+	void (*disable)(struct dsi_phy_hw *phy);
+
+	/**
+	 * calculate_timing_params() - calculates timing parameters.
+	 * @phy:      Pointer to DSI PHY hardware object.
+	 * @mode:     Mode information for which timing has to be calculated.
+	 * @config:   DSI host configuration for this mode.
+	 * @timing:   Timing parameters for each lane which will be returned.
+	 */
+	int (*calculate_timing_params)(struct dsi_phy_hw *phy,
+				       struct dsi_mode_info *mode,
+				       struct dsi_host_common_cfg *config,
+				       struct dsi_phy_per_lane_cfgs *timing);
+};
+
+/**
+ * struct dsi_phy_hw - DSI phy hardware object specific to an instance
+ * @base:                  VA for the DSI PHY base address.
+ * @length:                Length of the DSI PHY register base map.
+ * @index:                 Instance ID of the controller.
+ * @version:               DSI PHY version.
+ * @feature_map:           Features supported by DSI PHY.
+ * @ops:                   Function pointer to PHY operations.
+ */
+struct dsi_phy_hw {
+	void __iomem *base;
+	u32 length;
+	u32 index;
+
+	enum dsi_phy_version version;
+
+	DECLARE_BITMAP(feature_map, DSI_PHY_MAX_FEATURES);
+	struct dsi_phy_hw_ops ops;
+};
+
+#endif /* _DSI_PHY_HW_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_phy_hw_v4_0.c linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging./dsi_phy_hw_v4_0.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c	2019-01-22 16:16:23.491246298 +0100
@@ -0,0 +1,858 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "dsi-phy-hw:" fmt
+#include <linux/math64.h>
+#include <linux/delay.h>
+#include "dsi_hw.h"
+#include "dsi_phy_hw.h"
+
+#define DSIPHY_CMN_REVISION_ID0                   0x0000
+#define DSIPHY_CMN_REVISION_ID1                   0x0004
+#define DSIPHY_CMN_REVISION_ID2                   0x0008
+#define DSIPHY_CMN_REVISION_ID3                   0x000C
+#define DSIPHY_CMN_CLK_CFG0                       0x0010
+#define DSIPHY_CMN_CLK_CFG1                       0x0014
+#define DSIPHY_CMN_GLBL_TEST_CTRL                 0x0018
+#define DSIPHY_CMN_CTRL_0                         0x001C
+#define DSIPHY_CMN_CTRL_1                         0x0020
+#define DSIPHY_CMN_CAL_HW_TRIGGER                 0x0024
+#define DSIPHY_CMN_CAL_SW_CFG0                    0x0028
+#define DSIPHY_CMN_CAL_SW_CFG1                    0x002C
+#define DSIPHY_CMN_CAL_SW_CFG2                    0x0030
+#define DSIPHY_CMN_CAL_HW_CFG0                    0x0034
+#define DSIPHY_CMN_CAL_HW_CFG1                    0x0038
+#define DSIPHY_CMN_CAL_HW_CFG2                    0x003C
+#define DSIPHY_CMN_CAL_HW_CFG3                    0x0040
+#define DSIPHY_CMN_CAL_HW_CFG4                    0x0044
+#define DSIPHY_CMN_PLL_CNTRL                      0x0048
+#define DSIPHY_CMN_LDO_CNTRL                      0x004C
+
+#define DSIPHY_CMN_REGULATOR_CAL_STATUS0          0x0064
+#define DSIPHY_CMN_REGULATOR_CAL_STATUS1          0x0068
+
+/* n = 0..3 for data lanes and n = 4 for clock lane */
+#define DSIPHY_DLNX_CFG0(n)                     (0x100 + ((n) * 0x80))
+#define DSIPHY_DLNX_CFG1(n)                     (0x104 + ((n) * 0x80))
+#define DSIPHY_DLNX_CFG2(n)                     (0x108 + ((n) * 0x80))
+#define DSIPHY_DLNX_CFG3(n)                     (0x10C + ((n) * 0x80))
+#define DSIPHY_DLNX_TEST_DATAPATH(n)            (0x110 + ((n) * 0x80))
+#define DSIPHY_DLNX_TEST_STR(n)                 (0x114 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_4(n)            (0x118 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_5(n)            (0x11C + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_6(n)            (0x120 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_7(n)            (0x124 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_8(n)            (0x128 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_9(n)            (0x12C + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_10(n)           (0x130 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_11(n)           (0x134 + ((n) * 0x80))
+#define DSIPHY_DLNX_STRENGTH_CTRL_0(n)          (0x138 + ((n) * 0x80))
+#define DSIPHY_DLNX_STRENGTH_CTRL_1(n)          (0x13C + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_POLY(n)                (0x140 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_SEED0(n)               (0x144 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_SEED1(n)               (0x148 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_HEAD(n)                (0x14C + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_SOT(n)                 (0x150 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_CTRL0(n)               (0x154 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_CTRL1(n)               (0x158 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_CTRL2(n)               (0x15C + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_CTRL3(n)               (0x160 + ((n) * 0x80))
+#define DSIPHY_DLNX_VREG_CNTRL(n)               (0x164 + ((n) * 0x80))
+#define DSIPHY_DLNX_HSTX_STR_STATUS(n)          (0x168 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_STATUS0(n)             (0x16C + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_STATUS1(n)             (0x170 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_STATUS2(n)             (0x174 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_STATUS3(n)             (0x178 + ((n) * 0x80))
+#define DSIPHY_DLNX_MISR_STATUS(n)              (0x17C + ((n) * 0x80))
+
+#define DSIPHY_PLL_CLKBUFLR_EN                  0x041C
+#define DSIPHY_PLL_PLL_BANDGAP                  0x0508
+
+/**
+ * struct timing_entry - Calculated values for each timing parameter.
+ * @mipi_min:
+ * @mipi_max:
+ * @rec_min:
+ * @rec_max:
+ * @rec:
+ * @reg_value:       Value to be programmed in register.
+ */
+struct timing_entry {
+	s32 mipi_min;
+	s32 mipi_max;
+	s32 rec_min;
+	s32 rec_max;
+	s32 rec;
+	u8 reg_value;
+};
+
+/**
+ * struct phy_timing_desc - Timing parameters for DSI PHY.
+ */
+struct phy_timing_desc {
+	struct timing_entry clk_prepare;
+	struct timing_entry clk_zero;
+	struct timing_entry clk_trail;
+	struct timing_entry hs_prepare;
+	struct timing_entry hs_zero;
+	struct timing_entry hs_trail;
+	struct timing_entry hs_rqst;
+	struct timing_entry hs_rqst_clk;
+	struct timing_entry hs_exit;
+	struct timing_entry ta_go;
+	struct timing_entry ta_sure;
+	struct timing_entry ta_set;
+	struct timing_entry clk_post;
+	struct timing_entry clk_pre;
+};
+
+/**
+ * struct phy_clk_params - Clock parameters for PHY timing calculations.
+ */
+struct phy_clk_params {
+	u32 bitclk_mbps;
+	u32 escclk_numer;
+	u32 escclk_denom;
+	u32 tlpx_numer_ns;
+	u32 treot_ns;
+};
+
+/**
+ * regulator_enable() - enable regulators for DSI PHY
+ * @phy:      Pointer to DSI PHY hardware object.
+ * @reg_cfg:  Regulator configuration for all DSI lanes.
+ */
+void dsi_phy_hw_v4_0_regulator_enable(struct dsi_phy_hw *phy,
+				      struct dsi_phy_per_lane_cfgs *reg_cfg)
+{
+	int i;
+
+	for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++)
+		DSI_W32(phy, DSIPHY_DLNX_VREG_CNTRL(i), reg_cfg->lane[i][0]);
+
+	/* make sure all values are written to hardware */
+	wmb();
+
+	pr_debug("[DSI_%d] Phy regulators enabled\n", phy->index);
+}
+
+/**
+ * regulator_disable() - disable regulators
+ * @phy:      Pointer to DSI PHY hardware object.
+ */
+void dsi_phy_hw_v4_0_regulator_disable(struct dsi_phy_hw *phy)
+{
+	pr_debug("[DSI_%d] Phy regulators disabled\n", phy->index);
+}
+
+/**
+ * enable() - Enable PHY hardware
+ * @phy:      Pointer to DSI PHY hardware object.
+ * @cfg:      Per lane configurations for timing, strength and lane
+ *	      configurations.
+ */
+void dsi_phy_hw_v4_0_enable(struct dsi_phy_hw *phy,
+			    struct dsi_phy_cfg *cfg)
+{
+	int i;
+	struct dsi_phy_per_lane_cfgs *timing = &cfg->timing;
+	u32 data;
+
+	DSI_W32(phy, DSIPHY_CMN_LDO_CNTRL, 0x1C);
+
+	DSI_W32(phy, DSIPHY_CMN_GLBL_TEST_CTRL, 0x1);
+	for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
+
+		DSI_W32(phy, DSIPHY_DLNX_CFG0(i), cfg->lanecfg.lane[i][0]);
+		DSI_W32(phy, DSIPHY_DLNX_CFG1(i), cfg->lanecfg.lane[i][1]);
+		DSI_W32(phy, DSIPHY_DLNX_CFG2(i), cfg->lanecfg.lane[i][2]);
+		DSI_W32(phy, DSIPHY_DLNX_CFG3(i), cfg->lanecfg.lane[i][3]);
+
+		DSI_W32(phy, DSIPHY_DLNX_TEST_STR(i), 0x88);
+
+		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_4(i), timing->lane[i][0]);
+		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_5(i), timing->lane[i][1]);
+		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_6(i), timing->lane[i][2]);
+		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_7(i), timing->lane[i][3]);
+		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_8(i), timing->lane[i][4]);
+		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_9(i), timing->lane[i][5]);
+		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_10(i), timing->lane[i][6]);
+		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_11(i), timing->lane[i][7]);
+
+		DSI_W32(phy, DSIPHY_DLNX_STRENGTH_CTRL_0(i),
+			cfg->strength.lane[i][0]);
+		DSI_W32(phy, DSIPHY_DLNX_STRENGTH_CTRL_1(i),
+			cfg->strength.lane[i][1]);
+	}
+
+	/* make sure all values are written to hardware before enabling phy */
+	wmb();
+
+	DSI_W32(phy, DSIPHY_CMN_CTRL_1, 0x80);
+	udelay(100);
+	DSI_W32(phy, DSIPHY_CMN_CTRL_1, 0x00);
+
+	data = DSI_R32(phy, DSIPHY_CMN_GLBL_TEST_CTRL);
+
+	switch (cfg->pll_source) {
+	case DSI_PLL_SOURCE_STANDALONE:
+		DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0x01);
+		data &= ~BIT(2);
+		break;
+	case DSI_PLL_SOURCE_NATIVE:
+		DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0x03);
+		data &= ~BIT(2);
+		break;
+	case DSI_PLL_SOURCE_NON_NATIVE:
+		DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0x00);
+		data |= BIT(2);
+		break;
+	default:
+		break;
+	}
+
+	DSI_W32(phy, DSIPHY_CMN_GLBL_TEST_CTRL, data);
+
+	/* Enable bias current for pll1 during split display case */
+	if (cfg->pll_source == DSI_PLL_SOURCE_NON_NATIVE)
+		DSI_W32(phy, DSIPHY_PLL_PLL_BANDGAP, 0x3);
+
+	pr_debug("[DSI_%d]Phy enabled ", phy->index);
+}
+
+/**
+ * disable() - Disable PHY hardware
+ * @phy:      Pointer to DSI PHY hardware object.
+ */
+void dsi_phy_hw_v4_0_disable(struct dsi_phy_hw *phy)
+{
+	DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0);
+	DSI_W32(phy, DSIPHY_CMN_GLBL_TEST_CTRL, 0);
+	DSI_W32(phy, DSIPHY_CMN_CTRL_0, 0);
+	pr_debug("[DSI_%d]Phy disabled ", phy->index);
+}
+
+static const u32 bits_per_pixel[DSI_PIXEL_FORMAT_MAX] = {
+	16, 18, 18, 24, 3, 8, 12 };
+
+/**
+ * calc_clk_prepare - calculates prepare timing params for clk lane.
+ */
+static int calc_clk_prepare(struct phy_clk_params *clk_params,
+			    struct phy_timing_desc *desc,
+			    s32 *actual_frac,
+			    s64 *actual_intermediate)
+{
+	u32 const min_prepare_frac = 50;
+	u64 const multiplier = BIT(20);
+
+	struct timing_entry *t = &desc->clk_prepare;
+	int rc = 0;
+	u64 dividend, temp, temp_multiple;
+	s32 frac = 0;
+	s64 intermediate;
+	s64 clk_prep_actual;
+
+	dividend = ((t->rec_max - t->rec_min) * min_prepare_frac * multiplier);
+	temp  = roundup(div_s64(dividend, 100), multiplier);
+	temp += (t->rec_min * multiplier);
+	t->rec = div_s64(temp, multiplier);
+
+	if (t->rec & 0xffffff00) {
+		pr_err("Incorrect rec valuefor clk_prepare\n");
+		rc = -EINVAL;
+	} else {
+		t->reg_value = t->rec;
+	}
+
+	/* calculate theoretical value */
+	temp_multiple = 8 * t->reg_value * clk_params->tlpx_numer_ns
+			 * multiplier;
+	intermediate = div_s64(temp_multiple, clk_params->bitclk_mbps);
+	div_s64_rem(temp_multiple, clk_params->bitclk_mbps, &frac);
+	clk_prep_actual = div_s64((intermediate + frac), multiplier);
+
+	pr_debug("CLK_PREPARE:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d",
+		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max);
+	pr_debug(" reg_value=%d, actual=%lld\n", t->reg_value, clk_prep_actual);
+
+	*actual_frac = frac;
+	*actual_intermediate = intermediate;
+
+	return rc;
+}
+
+/**
+ * calc_clk_zero - calculates zero timing params for clk lane.
+ */
+static int calc_clk_zero(struct phy_clk_params *clk_params,
+			 struct phy_timing_desc *desc,
+			 s32 actual_frac,
+			 s64 actual_intermediate)
+{
+	u32 const clk_zero_min_frac = 2;
+	u64 const multiplier = BIT(20);
+
+	int rc = 0;
+	struct timing_entry *t = &desc->clk_zero;
+	s64 mipi_min, rec_temp1, rec_temp2, rec_temp3, rec_min;
+
+	mipi_min = ((300 * multiplier) - (actual_intermediate + actual_frac));
+	t->mipi_min = div_s64(mipi_min, multiplier);
+
+	rec_temp1 = div_s64((mipi_min * clk_params->bitclk_mbps),
+			    clk_params->tlpx_numer_ns);
+	rec_temp2 = (rec_temp1 - (11 * multiplier));
+	rec_temp3 = roundup(div_s64(rec_temp2, 8), multiplier);
+	rec_min = (div_s64(rec_temp3, multiplier) - 3);
+	t->rec_min = rec_min;
+	t->rec_max = ((t->rec_min > 255) ? 511 : 255);
+
+	t->rec = DIV_ROUND_UP(
+			(((t->rec_max - t->rec_min) * clk_zero_min_frac) +
+			 (t->rec_min * 100)),
+			100);
+
+	if (t->rec & 0xffffff00) {
+		pr_err("Incorrect rec valuefor clk_zero\n");
+		rc = -EINVAL;
+	} else {
+		t->reg_value = t->rec;
+	}
+
+	pr_debug("CLK_ZERO:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+		 t->reg_value);
+	return rc;
+}
+
+/**
+ * calc_clk_trail - calculates prepare trail params for clk lane.
+ */
+static int calc_clk_trail(struct phy_clk_params *clk_params,
+			  struct phy_timing_desc *desc,
+			  s64 *teot_clk_lane)
+{
+	u64 const multiplier = BIT(20);
+	u32 const phy_timing_frac = 30;
+
+	int rc = 0;
+	struct timing_entry *t = &desc->clk_trail;
+	u64 temp_multiple;
+	s32 frac;
+	s64 mipi_max_tr, rec_temp1, rec_temp2, rec_temp3, mipi_max;
+	s64 teot_clk_lane1;
+
+	temp_multiple = div_s64(
+			(12 * multiplier * clk_params->tlpx_numer_ns),
+			clk_params->bitclk_mbps);
+	div_s64_rem(temp_multiple, multiplier, &frac);
+
+	mipi_max_tr = ((105 * multiplier) +
+		       (temp_multiple + frac));
+	teot_clk_lane1 = div_s64(mipi_max_tr, multiplier);
+
+	mipi_max = (mipi_max_tr - (clk_params->treot_ns * multiplier));
+	t->mipi_max = div_s64(mipi_max, multiplier);
+
+	temp_multiple = div_s64(
+			(t->mipi_min * multiplier * clk_params->bitclk_mbps),
+			clk_params->tlpx_numer_ns);
+
+	div_s64_rem(temp_multiple, multiplier, &frac);
+	rec_temp1 = temp_multiple + frac + (3 * multiplier);
+	rec_temp2 = div_s64(rec_temp1, 8);
+	rec_temp3 = roundup(rec_temp2, multiplier);
+
+	t->rec_min = div_s64(rec_temp3, multiplier);
+
+	/* recommended max */
+	rec_temp1 = div_s64((mipi_max * clk_params->bitclk_mbps),
+			    clk_params->tlpx_numer_ns);
+	rec_temp2 = rec_temp1 + (3 * multiplier);
+	rec_temp3 = rec_temp2 / 8;
+	t->rec_max = div_s64(rec_temp3, multiplier);
+
+	t->rec = DIV_ROUND_UP(
+		(((t->rec_max - t->rec_min) * phy_timing_frac) +
+		 (t->rec_min * 100)),
+		 100);
+
+	if (t->rec & 0xffffff00) {
+		pr_err("Incorrect rec valuefor clk_zero\n");
+		rc = -EINVAL;
+	} else {
+		t->reg_value = t->rec;
+	}
+
+	*teot_clk_lane = teot_clk_lane1;
+	pr_debug("CLK_TRAIL:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+		 t->reg_value);
+	return rc;
+
+}
+
+/**
+ * calc_hs_prepare - calculates prepare timing params for data lanes in HS.
+ */
+static int calc_hs_prepare(struct phy_clk_params *clk_params,
+			   struct phy_timing_desc *desc,
+			   u64 *temp_mul)
+{
+	u64 const multiplier = BIT(20);
+	u32 const min_prepare_frac = 50;
+	int rc = 0;
+	struct timing_entry *t = &desc->hs_prepare;
+	u64 temp_multiple, dividend, temp;
+	s32 frac;
+	s64 rec_temp1, rec_temp2, mipi_max, mipi_min;
+	u32 low_clk_multiplier = 0;
+
+	if (clk_params->bitclk_mbps <= 120)
+		low_clk_multiplier = 2;
+	/* mipi min */
+	temp_multiple = div_s64((4 * multiplier * clk_params->tlpx_numer_ns),
+				clk_params->bitclk_mbps);
+	div_s64_rem(temp_multiple, multiplier, &frac);
+	mipi_min = (40 * multiplier) + (temp_multiple + frac);
+	t->mipi_min = div_s64(mipi_min, multiplier);
+
+	/* mipi_max */
+	temp_multiple = div_s64(
+			(6 * multiplier * clk_params->tlpx_numer_ns),
+			clk_params->bitclk_mbps);
+	div_s64_rem(temp_multiple, multiplier, &frac);
+	mipi_max = (85 * multiplier) + temp_multiple;
+	t->mipi_max = div_s64(mipi_max, multiplier);
+
+	/* recommended min */
+	temp_multiple = div_s64((mipi_min * clk_params->bitclk_mbps),
+				clk_params->tlpx_numer_ns);
+	temp_multiple -= (low_clk_multiplier * multiplier);
+	div_s64_rem(temp_multiple, multiplier, &frac);
+	rec_temp1 = roundup(((temp_multiple + frac) / 8), multiplier);
+	t->rec_min = div_s64(rec_temp1, multiplier);
+
+	/* recommended max */
+	temp_multiple = div_s64((mipi_max * clk_params->bitclk_mbps),
+				clk_params->tlpx_numer_ns);
+	temp_multiple -= (low_clk_multiplier * multiplier);
+	div_s64_rem(temp_multiple, multiplier, &frac);
+	rec_temp2 = rounddown((temp_multiple / 8), multiplier);
+	t->rec_max = div_s64(rec_temp2, multiplier);
+
+	/* register value */
+	dividend = ((rec_temp2 - rec_temp1) * min_prepare_frac);
+	temp = roundup(div_u64(dividend, 100), multiplier);
+	t->rec = div_s64((temp + rec_temp1), multiplier);
+
+	if (t->rec & 0xffffff00) {
+		pr_err("Incorrect rec valuefor hs_prepare\n");
+		rc = -EINVAL;
+	} else {
+		t->reg_value = t->rec;
+	}
+
+	temp_multiple = div_s64(
+			(8 * (temp + rec_temp1) * clk_params->tlpx_numer_ns),
+			clk_params->bitclk_mbps);
+
+	*temp_mul = temp_multiple;
+	pr_debug("HS_PREP:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+		 t->reg_value);
+	return rc;
+}
+
+/**
+ * calc_hs_zero - calculates zero timing params for data lanes in HS.
+ */
+static int calc_hs_zero(struct phy_clk_params *clk_params,
+			struct phy_timing_desc *desc,
+			u64 temp_multiple)
+{
+	u32 const hs_zero_min_frac = 10;
+	u64 const multiplier = BIT(20);
+	int rc = 0;
+	struct timing_entry *t = &desc->hs_zero;
+	s64 rec_temp1, rec_temp2, rec_temp3, mipi_min;
+	s64 rec_min;
+
+	mipi_min = div_s64((10 * clk_params->tlpx_numer_ns * multiplier),
+			   clk_params->bitclk_mbps);
+	rec_temp1 = (145 * multiplier) + mipi_min - temp_multiple;
+	t->mipi_min = div_s64(rec_temp1, multiplier);
+
+	/* recommended min */
+	rec_temp1 = div_s64((rec_temp1 * clk_params->bitclk_mbps),
+			    clk_params->tlpx_numer_ns);
+	rec_temp2 = rec_temp1 - (11 * multiplier);
+	rec_temp3 = roundup((rec_temp2 / 8), multiplier);
+	rec_min = rec_temp3 - (3 * multiplier);
+	t->rec_min =  div_s64(rec_min, multiplier);
+	t->rec_max = ((t->rec_min > 255) ? 511 : 255);
+
+	t->rec = DIV_ROUND_UP(
+			(((t->rec_max - t->rec_min) * hs_zero_min_frac) +
+			 (t->rec_min * 100)),
+			100);
+
+	if (t->rec & 0xffffff00) {
+		pr_err("Incorrect rec valuefor hs_zero\n");
+		rc = -EINVAL;
+	} else {
+		t->reg_value = t->rec;
+	}
+
+	pr_debug("HS_ZERO:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+		 t->reg_value);
+
+	return rc;
+}
+
+/**
+ * calc_hs_trail - calculates trail timing params for data lanes in HS.
+ */
+static int calc_hs_trail(struct phy_clk_params *clk_params,
+			 struct phy_timing_desc *desc,
+			 u64 teot_clk_lane)
+{
+	u32 const phy_timing_frac = 30;
+	int rc = 0;
+	struct timing_entry *t = &desc->hs_trail;
+	s64 rec_temp1;
+
+	t->mipi_min = 60 +
+			mult_frac(clk_params->tlpx_numer_ns, 4,
+				  clk_params->bitclk_mbps);
+
+	t->mipi_max = teot_clk_lane - clk_params->treot_ns;
+
+	t->rec_min = DIV_ROUND_UP(
+		((t->mipi_min * clk_params->bitclk_mbps) +
+		 (3 * clk_params->tlpx_numer_ns)),
+		(8 * clk_params->tlpx_numer_ns));
+
+	rec_temp1 = ((t->mipi_max * clk_params->bitclk_mbps) +
+		     (3 * clk_params->tlpx_numer_ns));
+	t->rec_max = (rec_temp1 / (8 * clk_params->tlpx_numer_ns));
+	rec_temp1 = DIV_ROUND_UP(
+			((t->rec_max - t->rec_min) * phy_timing_frac),
+			100);
+	t->rec = rec_temp1 + t->rec_min;
+
+	if (t->rec & 0xffffff00) {
+		pr_err("Incorrect rec valuefor hs_trail\n");
+		rc = -EINVAL;
+	} else {
+		t->reg_value = t->rec;
+	}
+
+	pr_debug("HS_TRAIL:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+		 t->reg_value);
+
+	return rc;
+}
+
+/**
+ * calc_hs_rqst - calculates rqst timing params for data lanes in HS.
+ */
+static int calc_hs_rqst(struct phy_clk_params *clk_params,
+			struct phy_timing_desc *desc)
+{
+	int rc = 0;
+	struct timing_entry *t = &desc->hs_rqst;
+
+	t->rec = DIV_ROUND_UP(
+		((t->mipi_min * clk_params->bitclk_mbps) -
+		 (8 * clk_params->tlpx_numer_ns)),
+		(8 * clk_params->tlpx_numer_ns));
+
+	if (t->rec & 0xffffff00) {
+		pr_err("Incorrect rec valuefor hs_rqst, %d\n", t->rec);
+		rc = -EINVAL;
+	} else {
+		t->reg_value = t->rec;
+	}
+
+	pr_debug("HS_RQST:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+		 t->reg_value);
+
+	return rc;
+}
+
+/**
+ * calc_hs_exit - calculates exit timing params for data lanes in HS.
+ */
+static int calc_hs_exit(struct phy_clk_params *clk_params,
+			struct phy_timing_desc *desc)
+{
+	u32 const hs_exit_min_frac = 10;
+	int rc = 0;
+	struct timing_entry *t = &desc->hs_exit;
+
+	t->rec_min = (DIV_ROUND_UP(
+			(t->mipi_min * clk_params->bitclk_mbps),
+			(8 * clk_params->tlpx_numer_ns)) - 1);
+
+	t->rec = DIV_ROUND_UP(
+		(((t->rec_max - t->rec_min) * hs_exit_min_frac) +
+		 (t->rec_min * 100)),
+		100);
+
+	if (t->rec & 0xffffff00) {
+		pr_err("Incorrect rec valuefor hs_exit\n");
+		rc = -EINVAL;
+	} else {
+		t->reg_value = t->rec;
+	}
+
+	pr_debug("HS_EXIT:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+		 t->reg_value);
+
+	return rc;
+}
+
+/**
+ * calc_hs_rqst_clk - calculates rqst timing params for clock lane..
+ */
+static int calc_hs_rqst_clk(struct phy_clk_params *clk_params,
+			    struct phy_timing_desc *desc)
+{
+	int rc = 0;
+	struct timing_entry *t = &desc->hs_rqst_clk;
+
+	t->rec = DIV_ROUND_UP(
+		((t->mipi_min * clk_params->bitclk_mbps) -
+		 (8 * clk_params->tlpx_numer_ns)),
+		(8 * clk_params->tlpx_numer_ns));
+
+	if (t->rec & 0xffffff00) {
+		pr_err("Incorrect rec valuefor hs_rqst_clk\n");
+		rc = -EINVAL;
+	} else {
+		t->reg_value = t->rec;
+	}
+
+	pr_debug("HS_RQST_CLK:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+		 t->reg_value);
+
+	return rc;
+}
+
+/**
+ * dsi_phy_calc_timing_params - calculates timing paramets for a given bit clock
+ */
+static int dsi_phy_calc_timing_params(struct phy_clk_params *clk_params,
+				      struct phy_timing_desc *desc)
+{
+	int rc = 0;
+	s32 actual_frac = 0;
+	s64 actual_intermediate = 0;
+	u64 temp_multiple;
+	s64 teot_clk_lane;
+
+	rc = calc_clk_prepare(clk_params, desc, &actual_frac,
+			      &actual_intermediate);
+	if (rc) {
+		pr_err("clk_prepare calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = calc_clk_zero(clk_params, desc, actual_frac, actual_intermediate);
+	if (rc) {
+		pr_err("clk_zero calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = calc_clk_trail(clk_params, desc, &teot_clk_lane);
+	if (rc) {
+		pr_err("clk_trail calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = calc_hs_prepare(clk_params, desc, &temp_multiple);
+	if (rc) {
+		pr_err("hs_prepare calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = calc_hs_zero(clk_params, desc, temp_multiple);
+	if (rc) {
+		pr_err("hs_zero calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = calc_hs_trail(clk_params, desc, teot_clk_lane);
+	if (rc) {
+		pr_err("hs_trail calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = calc_hs_rqst(clk_params, desc);
+	if (rc) {
+		pr_err("hs_rqst calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = calc_hs_exit(clk_params, desc);
+	if (rc) {
+		pr_err("hs_exit calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = calc_hs_rqst_clk(clk_params, desc);
+	if (rc) {
+		pr_err("hs_rqst_clk calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+error:
+	return rc;
+}
+
+/**
+ * calculate_timing_params() - calculates timing parameters.
+ * @phy:      Pointer to DSI PHY hardware object.
+ * @mode:     Mode information for which timing has to be calculated.
+ * @config:   DSI host configuration for this mode.
+ * @timing:   Timing parameters for each lane which will be returned.
+ */
+int dsi_phy_hw_v4_0_calculate_timing_params(struct dsi_phy_hw *phy,
+					    struct dsi_mode_info *mode,
+					    struct dsi_host_common_cfg *host,
+					   struct dsi_phy_per_lane_cfgs *timing)
+{
+	/* constants */
+	u32 const esc_clk_mhz = 192; /* TODO: esc clock is hardcoded */
+	u32 const esc_clk_mmss_cc_prediv = 10;
+	u32 const tlpx_numer = 1000;
+	u32 const tr_eot = 20;
+	u32 const clk_prepare_spec_min = 38;
+	u32 const clk_prepare_spec_max = 95;
+	u32 const clk_trail_spec_min = 60;
+	u32 const hs_exit_spec_min = 100;
+	u32 const hs_exit_reco_max = 255;
+	u32 const hs_rqst_spec_min = 50;
+
+	/* local vars */
+	int rc = 0;
+	int i;
+	u32 h_total, v_total;
+	u64 inter_num;
+	u32 num_of_lanes = 0;
+	u32 bpp;
+	u64 x, y;
+	struct phy_timing_desc desc;
+	struct phy_clk_params clk_params = {0};
+
+	memset(&desc, 0x0, sizeof(desc));
+	h_total = DSI_H_TOTAL(mode);
+	v_total = DSI_V_TOTAL(mode);
+
+	bpp = bits_per_pixel[host->dst_format];
+
+	inter_num = bpp * mode->refresh_rate;
+
+	if (host->data_lanes & DSI_DATA_LANE_0)
+		num_of_lanes++;
+	if (host->data_lanes & DSI_DATA_LANE_1)
+		num_of_lanes++;
+	if (host->data_lanes & DSI_DATA_LANE_2)
+		num_of_lanes++;
+	if (host->data_lanes & DSI_DATA_LANE_3)
+		num_of_lanes++;
+
+
+	x = mult_frac(v_total * h_total, inter_num, num_of_lanes);
+	y = rounddown(x, 1);
+
+	clk_params.bitclk_mbps = rounddown(mult_frac(y, 1, 1000000), 1);
+	clk_params.escclk_numer = esc_clk_mhz;
+	clk_params.escclk_denom = esc_clk_mmss_cc_prediv;
+	clk_params.tlpx_numer_ns = tlpx_numer;
+	clk_params.treot_ns = tr_eot;
+
+
+	/* Setup default parameters */
+	desc.clk_prepare.mipi_min = clk_prepare_spec_min;
+	desc.clk_prepare.mipi_max = clk_prepare_spec_max;
+	desc.clk_trail.mipi_min = clk_trail_spec_min;
+	desc.hs_exit.mipi_min = hs_exit_spec_min;
+	desc.hs_exit.rec_max = hs_exit_reco_max;
+
+	desc.clk_prepare.rec_min = DIV_ROUND_UP(
+			(desc.clk_prepare.mipi_min * clk_params.bitclk_mbps),
+			(8 * clk_params.tlpx_numer_ns)
+			);
+
+	desc.clk_prepare.rec_max = rounddown(
+		mult_frac((desc.clk_prepare.mipi_max * clk_params.bitclk_mbps),
+			  1, (8 * clk_params.tlpx_numer_ns)),
+		1);
+
+	desc.hs_rqst.mipi_min = hs_rqst_spec_min;
+	desc.hs_rqst_clk.mipi_min = hs_rqst_spec_min;
+
+	pr_debug("BIT CLOCK = %d, tlpx_numer_ns=%d, treot_ns=%d\n",
+	       clk_params.bitclk_mbps, clk_params.tlpx_numer_ns,
+	       clk_params.treot_ns);
+	rc = dsi_phy_calc_timing_params(&clk_params, &desc);
+	if (rc) {
+		pr_err("Timing calc failed, rc=%d\n", rc);
+		goto error;
+	}
+
+
+	for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
+		timing->lane[i][0] = desc.hs_exit.reg_value;
+
+		if (i == DSI_LOGICAL_CLOCK_LANE)
+			timing->lane[i][1] = desc.clk_zero.reg_value;
+		else
+			timing->lane[i][1] = desc.hs_zero.reg_value;
+
+		if (i == DSI_LOGICAL_CLOCK_LANE)
+			timing->lane[i][2] = desc.clk_prepare.reg_value;
+		else
+			timing->lane[i][2] = desc.hs_prepare.reg_value;
+
+		if (i == DSI_LOGICAL_CLOCK_LANE)
+			timing->lane[i][3] = desc.clk_trail.reg_value;
+		else
+			timing->lane[i][3] = desc.hs_trail.reg_value;
+
+		if (i == DSI_LOGICAL_CLOCK_LANE)
+			timing->lane[i][4] = desc.hs_rqst_clk.reg_value;
+		else
+			timing->lane[i][4] = desc.hs_rqst.reg_value;
+
+		timing->lane[i][5] = 0x3;
+		timing->lane[i][6] = 0x4;
+		timing->lane[i][7] = 0xA0;
+		pr_debug("[%d][%d %d %d %d %d]\n", i, timing->lane[i][0],
+						    timing->lane[i][1],
+						    timing->lane[i][2],
+						    timing->lane[i][3],
+						    timing->lane[i][4]);
+	}
+	timing->count_per_lane = 8;
+
+error:
+	return rc;
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi/hdmi_util.c	2019-04-24 19:28:47.284498086 +0200
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/of_irq.h>
+#include "hdmi.h"
+
+void init_ddc(struct hdmi *hdmi)
+{
+
+	uint32_t ddc_speed;
+
+	hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
+			HDMI_DDC_CTRL_SW_STATUS_RESET);
+	hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
+			HDMI_DDC_CTRL_SOFT_RESET);
+
+	ddc_speed = hdmi_read(hdmi, REG_HDMI_DDC_SPEED);
+	ddc_speed |= HDMI_DDC_SPEED_THRESHOLD(2);
+	ddc_speed |= HDMI_DDC_SPEED_PRESCALE(12);
+
+	hdmi_write(hdmi, REG_HDMI_DDC_SPEED,
+			ddc_speed);
+
+	hdmi_write(hdmi, REG_HDMI_DDC_SETUP,
+			HDMI_DDC_SETUP_TIMEOUT(0xff));
+
+	/* enable reference timer for 19us */
+	hdmi_write(hdmi, REG_HDMI_DDC_REF,
+			HDMI_DDC_REF_REFTIMER_ENABLE |
+			HDMI_DDC_REF_REFTIMER(19));
+}
+
+int ddc_clear_irq(struct hdmi *hdmi)
+{
+	struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(hdmi->i2c);
+	struct drm_device *dev = hdmi->dev;
+	uint32_t retry = 0xffff;
+	uint32_t ddc_int_ctrl;
+
+	do {
+		--retry;
+
+		hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL,
+				HDMI_DDC_INT_CTRL_SW_DONE_ACK |
+				HDMI_DDC_INT_CTRL_SW_DONE_MASK);
+
+		ddc_int_ctrl = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL);
+
+	} while ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT) && retry);
+
+	if (!retry) {
+		dev_err(dev->dev, "timeout waiting for DDC\n");
+		return -ETIMEDOUT;
+	}
+
+	hdmi_i2c->sw_done = false;
+
+	return 0;
+}
+
+int hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset,
+u8 *data, u16 data_len, bool self_retry)
+{
+	int rc;
+	int retry = 10;
+	struct i2c_msg msgs[] = {
+		{
+			.addr	= addr >> 1,
+			.flags	= 0,
+			.len	= 1,
+			.buf	= &offset,
+		}, {
+			.addr	= addr >> 1,
+			.flags	= I2C_M_RD,
+			.len	= data_len,
+			.buf	= data,
+		}
+	};
+
+	DBG("Start DDC read");
+retry:
+	rc = i2c_transfer(hdmi->i2c, msgs, 2);
+	retry--;
+
+	if (rc == 2)
+		rc = 0;
+	else if (self_retry && (retry > 0))
+		goto retry;
+	else
+		rc = -EIO;
+
+	DBG("End DDC read %d", rc);
+
+	return rc;
+}
+
+#define HDCP_DDC_WRITE_MAX_BYTE_NUM 1024
+
+int hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset,
+				   u8 *data, u16 data_len, bool self_retry)
+{
+	int rc;
+	int retry = 10;
+	u8 buf[HDCP_DDC_WRITE_MAX_BYTE_NUM];
+	struct i2c_msg msgs[] = {
+		{
+			.addr	= addr >> 1,
+			.flags	= 0,
+			.len	= 1,
+		}
+	};
+
+	pr_debug("TESTING ! REMOVE RETRY Start DDC write");
+	if (data_len > (HDCP_DDC_WRITE_MAX_BYTE_NUM - 1)) {
+		pr_err("%s: write size too big\n", __func__);
+		return -ERANGE;
+	}
+
+	buf[0] = offset;
+	memcpy(&buf[1], data, data_len);
+	msgs[0].buf = buf;
+	msgs[0].len = data_len + 1;
+retry:
+	rc = i2c_transfer(hdmi->i2c, msgs, 1);
+	retry--;
+	if (rc == 1)
+		rc = 0;
+	else if (self_retry && (retry > 0))
+		goto retry;
+	else
+		rc = -EIO;
+
+	DBG("End DDC write %d", rc);
+
+	return rc;
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging./sde_hdmi_audio.c linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging./sde_hdmi_audio.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c	2019-01-22 16:16:23.495246334 +0100
@@ -0,0 +1,357 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/iopoll.h>
+#include <linux/types.h>
+#include <linux/switch.h>
+#include <linux/gcd.h>
+
+#include "drm_edid.h"
+#include "sde_kms.h"
+#include "sde_hdmi.h"
+#include "sde_hdmi_regs.h"
+#include "hdmi.h"
+
+#define HDMI_AUDIO_INFO_FRAME_PACKET_HEADER 0x84
+#define HDMI_AUDIO_INFO_FRAME_PACKET_VERSION 0x1
+#define HDMI_AUDIO_INFO_FRAME_PACKET_LENGTH 0x0A
+
+#define HDMI_ACR_N_MULTIPLIER 128
+#define DEFAULT_AUDIO_SAMPLE_RATE_HZ 48000
+
+/* Supported HDMI Audio channels */
+enum hdmi_audio_channels {
+	AUDIO_CHANNEL_2 = 2,
+	AUDIO_CHANNEL_3,
+	AUDIO_CHANNEL_4,
+	AUDIO_CHANNEL_5,
+	AUDIO_CHANNEL_6,
+	AUDIO_CHANNEL_7,
+	AUDIO_CHANNEL_8,
+};
+
+/* parameters for clock regeneration */
+struct hdmi_audio_acr {
+	u32 n;
+	u32 cts;
+};
+
+enum hdmi_audio_sample_rates {
+	AUDIO_SAMPLE_RATE_32KHZ,
+	AUDIO_SAMPLE_RATE_44_1KHZ,
+	AUDIO_SAMPLE_RATE_48KHZ,
+	AUDIO_SAMPLE_RATE_88_2KHZ,
+	AUDIO_SAMPLE_RATE_96KHZ,
+	AUDIO_SAMPLE_RATE_176_4KHZ,
+	AUDIO_SAMPLE_RATE_192KHZ,
+	AUDIO_SAMPLE_RATE_MAX
+};
+
+struct sde_hdmi_audio {
+	struct hdmi *hdmi;
+	struct msm_ext_disp_audio_setup_params params;
+	u32 pclk;
+};
+
+static void _sde_hdmi_audio_get_audio_sample_rate(u32 *sample_rate_hz)
+{
+	u32 rate = *sample_rate_hz;
+
+	switch (rate) {
+	case 32000:
+		*sample_rate_hz = AUDIO_SAMPLE_RATE_32KHZ;
+		break;
+	case 44100:
+		*sample_rate_hz = AUDIO_SAMPLE_RATE_44_1KHZ;
+		break;
+	case 48000:
+		*sample_rate_hz = AUDIO_SAMPLE_RATE_48KHZ;
+		break;
+	case 88200:
+		*sample_rate_hz = AUDIO_SAMPLE_RATE_88_2KHZ;
+		break;
+	case 96000:
+		*sample_rate_hz = AUDIO_SAMPLE_RATE_96KHZ;
+		break;
+	case 176400:
+		*sample_rate_hz = AUDIO_SAMPLE_RATE_176_4KHZ;
+		break;
+	case 192000:
+		*sample_rate_hz = AUDIO_SAMPLE_RATE_192KHZ;
+		break;
+	default:
+		SDE_ERROR("%d unchanged\n", rate);
+		break;
+	}
+}
+
+static void _sde_hdmi_audio_get_acr_param(u32 pclk, u32 fs,
+	struct hdmi_audio_acr *acr)
+{
+	u32 div, mul;
+
+	if (!acr) {
+		SDE_ERROR("invalid data\n");
+		return;
+	}
+
+	/*
+	 * as per HDMI specification, N/CTS = (128*fs)/pclk.
+	 * get the ratio using this formula.
+	 */
+	acr->n = HDMI_ACR_N_MULTIPLIER * fs;
+	acr->cts = pclk;
+
+	/* get the greatest common divisor for the ratio */
+	div = gcd(acr->n, acr->cts);
+
+	/* get the n and cts values wrt N/CTS formula */
+	acr->n /= div;
+	acr->cts /= div;
+
+	/*
+	 * as per HDMI specification, 300 <= 128*fs/N <= 1500
+	 * with a target of 128*fs/N = 1000. To get closest
+	 * value without truncating fractional values, find
+	 * the corresponding multiplier
+	 */
+	mul = ((HDMI_ACR_N_MULTIPLIER * fs / HDMI_KHZ_TO_HZ)
+		+ (acr->n - 1)) / acr->n;
+
+	acr->n *= mul;
+	acr->cts *= mul;
+}
+
+static void _sde_hdmi_audio_acr_enable(struct sde_hdmi_audio *audio)
+{
+	struct hdmi_audio_acr acr;
+	struct msm_ext_disp_audio_setup_params *params;
+	u32 pclk, layout, multiplier = 1, sample_rate;
+	u32 acr_pkt_ctl, aud_pkt_ctl2, acr_reg_cts, acr_reg_n;
+	struct hdmi *hdmi;
+
+	hdmi = audio->hdmi;
+	params = &audio->params;
+	pclk = audio->pclk;
+	sample_rate = params->sample_rate_hz;
+
+	_sde_hdmi_audio_get_acr_param(pclk, sample_rate, &acr);
+	_sde_hdmi_audio_get_audio_sample_rate(&sample_rate);
+
+	layout = (params->num_of_channels == AUDIO_CHANNEL_2) ? 0 : 1;
+
+	SDE_DEBUG("n=%u, cts=%u, layout=%u\n", acr.n, acr.cts, layout);
+
+	/* AUDIO_PRIORITY | SOURCE */
+	acr_pkt_ctl = BIT(31) | BIT(8);
+
+	switch (sample_rate) {
+	case AUDIO_SAMPLE_RATE_44_1KHZ:
+		acr_pkt_ctl |= 0x2 << 4;
+		acr.cts <<= 12;
+
+		acr_reg_cts = HDMI_ACR_44_0;
+		acr_reg_n = HDMI_ACR_44_1;
+		break;
+	case AUDIO_SAMPLE_RATE_48KHZ:
+		acr_pkt_ctl |= 0x3 << 4;
+		acr.cts <<= 12;
+
+		acr_reg_cts = HDMI_ACR_48_0;
+		acr_reg_n = HDMI_ACR_48_1;
+		break;
+	case AUDIO_SAMPLE_RATE_192KHZ:
+		multiplier = 4;
+		acr.n >>= 2;
+
+		acr_pkt_ctl |= 0x3 << 4;
+		acr.cts <<= 12;
+
+		acr_reg_cts = HDMI_ACR_48_0;
+		acr_reg_n = HDMI_ACR_48_1;
+		break;
+	case AUDIO_SAMPLE_RATE_176_4KHZ:
+		multiplier = 4;
+		acr.n >>= 2;
+
+		acr_pkt_ctl |= 0x2 << 4;
+		acr.cts <<= 12;
+
+		acr_reg_cts = HDMI_ACR_44_0;
+		acr_reg_n = HDMI_ACR_44_1;
+		break;
+	case AUDIO_SAMPLE_RATE_96KHZ:
+		multiplier = 2;
+		acr.n >>= 1;
+
+		acr_pkt_ctl |= 0x3 << 4;
+		acr.cts <<= 12;
+
+		acr_reg_cts = HDMI_ACR_48_0;
+		acr_reg_n = HDMI_ACR_48_1;
+		break;
+	case AUDIO_SAMPLE_RATE_88_2KHZ:
+		multiplier = 2;
+		acr.n >>= 1;
+
+		acr_pkt_ctl |= 0x2 << 4;
+		acr.cts <<= 12;
+
+		acr_reg_cts = HDMI_ACR_44_0;
+		acr_reg_n = HDMI_ACR_44_1;
+		break;
+	default:
+		multiplier = 1;
+
+		acr_pkt_ctl |= 0x1 << 4;
+		acr.cts <<= 12;
+
+		acr_reg_cts = HDMI_ACR_32_0;
+		acr_reg_n = HDMI_ACR_32_1;
+		break;
+	}
+
+	aud_pkt_ctl2 = BIT(0) | (layout << 1);
+
+	/* N_MULTIPLE(multiplier) */
+	acr_pkt_ctl &= ~(7 << 16);
+	acr_pkt_ctl |= (multiplier & 0x7) << 16;
+
+	/* SEND | CONT */
+	acr_pkt_ctl |= BIT(0) | BIT(1);
+
+	hdmi_write(hdmi, acr_reg_cts, acr.cts);
+	hdmi_write(hdmi, acr_reg_n, acr.n);
+	hdmi_write(hdmi, HDMI_ACR_PKT_CTRL, acr_pkt_ctl);
+	hdmi_write(hdmi, HDMI_AUDIO_PKT_CTRL2, aud_pkt_ctl2);
+}
+
+static void _sde_hdmi_audio_acr_setup(struct sde_hdmi_audio *audio, bool on)
+{
+	if (on)
+		_sde_hdmi_audio_acr_enable(audio);
+	else
+		hdmi_write(audio->hdmi, HDMI_ACR_PKT_CTRL, 0);
+}
+
+static void _sde_hdmi_audio_infoframe_setup(struct sde_hdmi_audio *audio,
+	bool enabled)
+{
+	struct hdmi *hdmi = audio->hdmi;
+	u32 channels, channel_allocation, level_shift, down_mix, layout;
+	u32 hdmi_debug_reg = 0, audio_info_0_reg = 0, audio_info_1_reg = 0;
+	u32 audio_info_ctrl_reg, aud_pck_ctrl_2_reg;
+	u32 check_sum, sample_present;
+
+	audio_info_ctrl_reg = hdmi_read(hdmi, HDMI_INFOFRAME_CTRL0);
+	audio_info_ctrl_reg &= ~0xF0;
+
+	if (!enabled)
+		goto end;
+
+	channels           = audio->params.num_of_channels - 1;
+	channel_allocation = audio->params.channel_allocation;
+	level_shift        = audio->params.level_shift;
+	down_mix           = audio->params.down_mix;
+	sample_present     = audio->params.sample_present;
+
+	layout = (audio->params.num_of_channels == AUDIO_CHANNEL_2) ? 0 : 1;
+	aud_pck_ctrl_2_reg = BIT(0) | (layout << 1);
+	hdmi_write(hdmi, HDMI_AUDIO_PKT_CTRL2, aud_pck_ctrl_2_reg);
+
+	audio_info_1_reg |= channel_allocation & 0xFF;
+	audio_info_1_reg |= ((level_shift & 0xF) << 11);
+	audio_info_1_reg |= ((down_mix & 0x1) << 15);
+
+	check_sum = 0;
+	check_sum += HDMI_AUDIO_INFO_FRAME_PACKET_HEADER;
+	check_sum += HDMI_AUDIO_INFO_FRAME_PACKET_VERSION;
+	check_sum += HDMI_AUDIO_INFO_FRAME_PACKET_LENGTH;
+	check_sum += channels;
+	check_sum += channel_allocation;
+	check_sum += (level_shift & 0xF) << 3 | (down_mix & 0x1) << 7;
+	check_sum &= 0xFF;
+	check_sum = (u8) (256 - check_sum);
+
+	audio_info_0_reg |= check_sum & 0xFF;
+	audio_info_0_reg |= ((channels & 0x7) << 8);
+
+	/* Enable Audio InfoFrame Transmission */
+	audio_info_ctrl_reg |= 0xF0;
+
+	if (layout) {
+		/* Set the Layout bit */
+		hdmi_debug_reg |= BIT(4);
+
+		/* Set the Sample Present bits */
+		hdmi_debug_reg |= sample_present & 0xF;
+	}
+end:
+	hdmi_write(hdmi, HDMI_DEBUG, hdmi_debug_reg);
+	hdmi_write(hdmi, HDMI_AUDIO_INFO0, audio_info_0_reg);
+	hdmi_write(hdmi, HDMI_AUDIO_INFO1, audio_info_1_reg);
+	hdmi_write(hdmi, HDMI_INFOFRAME_CTRL0, audio_info_ctrl_reg);
+}
+
+int sde_hdmi_audio_on(struct hdmi *hdmi,
+	struct msm_ext_disp_audio_setup_params *params)
+{
+	struct sde_hdmi_audio audio;
+	int rc = 0;
+
+	if (!hdmi) {
+		SDE_ERROR("invalid HDMI Ctrl\n");
+		rc = -ENODEV;
+		goto end;
+	}
+
+	audio.pclk = hdmi->pixclock;
+	audio.params = *params;
+	audio.hdmi = hdmi;
+
+	if (!audio.params.num_of_channels) {
+		audio.params.sample_rate_hz = DEFAULT_AUDIO_SAMPLE_RATE_HZ;
+		audio.params.num_of_channels = AUDIO_CHANNEL_2;
+	}
+
+	_sde_hdmi_audio_acr_setup(&audio, true);
+	_sde_hdmi_audio_infoframe_setup(&audio, true);
+
+	SDE_DEBUG("HDMI Audio: Enabled\n");
+end:
+	return rc;
+}
+
+void sde_hdmi_audio_off(struct hdmi *hdmi)
+{
+	struct sde_hdmi_audio audio;
+	int rc = 0;
+
+	if (!hdmi) {
+		SDE_ERROR("invalid HDMI Ctrl\n");
+		rc = -ENODEV;
+		return;
+	}
+
+	audio.hdmi = hdmi;
+
+	_sde_hdmi_audio_infoframe_setup(&audio, false);
+	_sde_hdmi_audio_acr_setup(&audio, false);
+
+	SDE_DEBUG("HDMI Audio: Disabled\n");
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging./sde_hdmi_bridge.c linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging./sde_hdmi_bridge.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c	2019-10-29 09:26:23.629203041 +0100
@@ -0,0 +1,1049 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "drm_edid.h"
+#include "sde_kms.h"
+#include "sde_connector.h"
+#include "sde_hdmi.h"
+#include "hdmi.h"
+
+/*
+ * Add these register definitions to support the latest chipsets. These
+ * are derived from hdmi.xml.h and are going to be replaced by a chipset
+ * based mask approach.
+ */
+#define SDE_HDMI_ACTIVE_HSYNC_START__MASK 0x00001fff
+static inline uint32_t SDE_HDMI_ACTIVE_HSYNC_START(uint32_t val)
+{
+	return ((val) << HDMI_ACTIVE_HSYNC_START__SHIFT) &
+		SDE_HDMI_ACTIVE_HSYNC_START__MASK;
+}
+#define SDE_HDMI_ACTIVE_HSYNC_END__MASK 0x1fff0000
+static inline uint32_t SDE_HDMI_ACTIVE_HSYNC_END(uint32_t val)
+{
+	return ((val) << HDMI_ACTIVE_HSYNC_END__SHIFT) &
+		SDE_HDMI_ACTIVE_HSYNC_END__MASK;
+}
+
+#define SDE_HDMI_ACTIVE_VSYNC_START__MASK 0x00001fff
+static inline uint32_t SDE_HDMI_ACTIVE_VSYNC_START(uint32_t val)
+{
+	return ((val) << HDMI_ACTIVE_VSYNC_START__SHIFT) &
+		SDE_HDMI_ACTIVE_VSYNC_START__MASK;
+}
+#define SDE_HDMI_ACTIVE_VSYNC_END__MASK 0x1fff0000
+static inline uint32_t SDE_HDMI_ACTIVE_VSYNC_END(uint32_t val)
+{
+	return ((val) << HDMI_ACTIVE_VSYNC_END__SHIFT) &
+		SDE_HDMI_ACTIVE_VSYNC_END__MASK;
+}
+
+#define SDE_HDMI_VSYNC_ACTIVE_F2_START__MASK 0x00001fff
+static inline uint32_t SDE_HDMI_VSYNC_ACTIVE_F2_START(uint32_t val)
+{
+	return ((val) << HDMI_VSYNC_ACTIVE_F2_START__SHIFT) &
+		SDE_HDMI_VSYNC_ACTIVE_F2_START__MASK;
+}
+#define SDE_HDMI_VSYNC_ACTIVE_F2_END__MASK 0x1fff0000
+static inline uint32_t SDE_HDMI_VSYNC_ACTIVE_F2_END(uint32_t val)
+{
+	return ((val) << HDMI_VSYNC_ACTIVE_F2_END__SHIFT) &
+		SDE_HDMI_VSYNC_ACTIVE_F2_END__MASK;
+}
+
+#define SDE_HDMI_TOTAL_H_TOTAL__MASK 0x00001fff
+static inline uint32_t SDE_HDMI_TOTAL_H_TOTAL(uint32_t val)
+{
+	return ((val) << HDMI_TOTAL_H_TOTAL__SHIFT) &
+		SDE_HDMI_TOTAL_H_TOTAL__MASK;
+}
+
+#define SDE_HDMI_TOTAL_V_TOTAL__MASK 0x1fff0000
+static inline uint32_t SDE_HDMI_TOTAL_V_TOTAL(uint32_t val)
+{
+	return ((val) << HDMI_TOTAL_V_TOTAL__SHIFT) &
+		SDE_HDMI_TOTAL_V_TOTAL__MASK;
+}
+
+#define SDE_HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK 0x00001fff
+static inline uint32_t SDE_HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val)
+{
+	return ((val) << HDMI_VSYNC_TOTAL_F2_V_TOTAL__SHIFT) &
+		SDE_HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK;
+}
+
+struct sde_hdmi_bridge {
+	struct drm_bridge base;
+	struct hdmi *hdmi;
+};
+#define to_hdmi_bridge(x) container_of(x, struct sde_hdmi_bridge, base)
+
+/* TX major version that supports scrambling */
+#define HDMI_TX_SCRAMBLER_MIN_TX_VERSION 0x04
+#define HDMI_TX_SCRAMBLER_THRESHOLD_RATE_KHZ 340000
+#define HDMI_TX_SCRAMBLER_TIMEOUT_MSEC 200
+
+
+#define HDMI_SPD_INFOFRAME_BUFFER_SIZE \
+	(HDMI_INFOFRAME_HEADER_SIZE + HDMI_SPD_INFOFRAME_SIZE)
+#define HDMI_DEFAULT_VENDOR_NAME "Freebox"
+#define HDMI_DEFAULT_PRODUCT_NAME "Player"
+#define HDMI_AVI_IFRAME_LINE_NUMBER 1
+#define HDMI_VENDOR_IFRAME_LINE_NUMBER 3
+
+void _sde_hdmi_bridge_destroy(struct drm_bridge *bridge)
+{
+}
+
+static void sde_hdmi_clear_hdr_info(struct drm_bridge *bridge)
+{
+	struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+	struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+	struct drm_connector *connector = hdmi->connector;
+
+	connector->hdr_eotf = SDE_HDMI_HDR_EOTF_NONE;
+	connector->hdr_metadata_type_one = false;
+	connector->hdr_max_luminance = SDE_HDMI_HDR_LUMINANCE_NONE;
+	connector->hdr_avg_luminance = SDE_HDMI_HDR_LUMINANCE_NONE;
+	connector->hdr_min_luminance = SDE_HDMI_HDR_LUMINANCE_NONE;
+	connector->hdr_supported = false;
+}
+
+static void sde_hdmi_clear_colorimetry(struct drm_bridge *bridge)
+{
+	struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+	struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+	struct drm_connector *connector = hdmi->connector;
+
+	connector->color_enc_fmt = 0;
+}
+
+static void sde_hdmi_clear_vsdb_info(struct drm_bridge *bridge)
+{
+	struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+	struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+	struct drm_connector *connector = hdmi->connector;
+
+	connector->max_tmds_clock = 0;
+	connector->latency_present[0] = false;
+	connector->latency_present[1] = false;
+	connector->video_latency[0] = false;
+	connector->video_latency[1] = false;
+	connector->audio_latency[0] = false;
+	connector->audio_latency[1] = false;
+}
+
+static void sde_hdmi_clear_hf_vsdb_info(struct drm_bridge *bridge)
+{
+	struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+	struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+	struct drm_connector *connector = hdmi->connector;
+
+	connector->max_tmds_char = 0;
+	connector->scdc_present = false;
+	connector->rr_capable = false;
+	connector->supports_scramble = false;
+	connector->flags_3d = 0;
+}
+
+static void sde_hdmi_clear_vcdb_info(struct drm_bridge *bridge)
+{
+	struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+	struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+	struct drm_connector *connector = hdmi->connector;
+
+	connector->pt_scan_info = 0;
+	connector->it_scan_info = 0;
+	connector->ce_scan_info = 0;
+	connector->rgb_qs = false;
+	connector->yuv_qs = false;
+}
+
+static void sde_hdmi_clear_vsdbs(struct drm_bridge *bridge)
+{
+	/* Clear fields of HDMI VSDB */
+	sde_hdmi_clear_vsdb_info(bridge);
+	/* Clear fields of HDMI forum VSDB */
+	sde_hdmi_clear_hf_vsdb_info(bridge);
+}
+
+static void _sde_hdmi_bridge_power_on(struct drm_bridge *bridge)
+{
+	struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+	struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+	const struct hdmi_platform_config *config = hdmi->config;
+	int i, ret;
+
+	for (i = 0; i < config->pwr_reg_cnt; i++) {
+		ret = regulator_enable(hdmi->pwr_regs[i]);
+		if (ret) {
+			SDE_ERROR("failed to enable pwr regulator: %s (%d)\n",
+					config->pwr_reg_names[i], ret);
+		}
+	}
+
+	if (config->pwr_clk_cnt > 0) {
+		DRM_DEBUG("pixclock: %lu", hdmi->pixclock);
+		ret = clk_set_rate(hdmi->pwr_clks[0], hdmi->pixclock);
+		if (ret) {
+			SDE_ERROR("failed to set pixel clk: %s (%d)\n",
+					config->pwr_clk_names[0], ret);
+		}
+	}
+
+	for (i = 0; i < config->pwr_clk_cnt; i++) {
+		ret = clk_prepare_enable(hdmi->pwr_clks[i]);
+		if (ret) {
+			SDE_ERROR("failed to enable pwr clk: %s (%d)\n",
+					config->pwr_clk_names[i], ret);
+		}
+	}
+}
+
+static void _sde_hdmi_bridge_power_off(struct drm_bridge *bridge)
+{
+	struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+	struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+	const struct hdmi_platform_config *config = hdmi->config;
+	int i, ret;
+
+	/* Wait for vsync */
+	msleep(20);
+
+	for (i = 0; i < config->pwr_clk_cnt; i++)
+		clk_disable_unprepare(hdmi->pwr_clks[i]);
+
+	for (i = 0; i < config->pwr_reg_cnt; i++) {
+		ret = regulator_disable(hdmi->pwr_regs[i]);
+		if (ret) {
+			SDE_ERROR("failed to disable pwr regulator: %s (%d)\n",
+					config->pwr_reg_names[i], ret);
+		}
+	}
+}
+
+static int _sde_hdmi_bridge_ddc_clear_irq(struct hdmi *hdmi,
+			char *what)
+{
+	u32 ddc_int_ctrl, ddc_status, in_use, timeout;
+	u32 sw_done_mask = BIT(2);
+	u32 sw_done_ack  = BIT(1);
+	u32 in_use_by_sw = BIT(0);
+	u32 in_use_by_hw = BIT(1);
+
+	/* clear and enable interrutps */
+	ddc_int_ctrl = sw_done_mask | sw_done_ack;
+
+	hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL, ddc_int_ctrl);
+
+	/* wait until DDC HW is free */
+	timeout = 100;
+	do {
+		ddc_status = hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS);
+		in_use = ddc_status & (in_use_by_sw | in_use_by_hw);
+		if (in_use) {
+			SDE_DEBUG("ddc is in use by %s, timeout(%d)\n",
+			ddc_status & in_use_by_sw ? "sw" : "hw",
+			timeout);
+			udelay(100);
+		}
+	} while (in_use && --timeout);
+
+	if (!timeout) {
+		SDE_ERROR("%s: timedout\n", what);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int _sde_hdmi_bridge_scrambler_ddc_check_status(struct hdmi *hdmi)
+{
+	int rc = 0;
+	u32 reg_val;
+
+	/* check for errors and clear status */
+	reg_val = hdmi_read(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_STATUS);
+	if (reg_val & BIT(4)) {
+		SDE_ERROR("ddc aborted\n");
+		reg_val |= BIT(5);
+		rc = -ECONNABORTED;
+	}
+
+	if (reg_val & BIT(8)) {
+		SDE_ERROR("timed out\n");
+		reg_val |= BIT(9);
+		rc = -ETIMEDOUT;
+	}
+
+	if (reg_val & BIT(12)) {
+		SDE_ERROR("NACK0\n");
+		reg_val |= BIT(13);
+		rc = -EIO;
+	}
+	if (reg_val & BIT(14)) {
+		SDE_ERROR("NACK1\n");
+		reg_val |= BIT(15);
+		rc = -EIO;
+	}
+	hdmi_write(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_STATUS, reg_val);
+
+	return rc;
+}
+
+static int _sde_hdmi_bridge_scrambler_status_timer_setup(struct hdmi *hdmi,
+			u32 timeout_hsync)
+{
+	u32 reg_val;
+	int rc;
+	struct sde_connector *c_conn;
+	struct drm_connector *connector = NULL;
+	struct sde_hdmi *display;
+
+	if (!hdmi) {
+		SDE_ERROR("invalid input\n");
+		return -EINVAL;
+	}
+	connector = hdmi->connector;
+	c_conn = to_sde_connector(hdmi->connector);
+	display = (struct sde_hdmi *)c_conn->display;
+
+	_sde_hdmi_bridge_ddc_clear_irq(hdmi, "scrambler");
+
+	hdmi_write(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL,
+			   timeout_hsync);
+	hdmi_write(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL2,
+			   timeout_hsync);
+	reg_val = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL5);
+	reg_val |= BIT(10);
+	hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL5, reg_val);
+
+	reg_val = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL2);
+	/* Trigger interrupt if scrambler status is 0 or DDC failure */
+	reg_val |= BIT(10);
+	reg_val &= ~(BIT(15) | BIT(16));
+	reg_val |= BIT(16);
+	hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL2, reg_val);
+
+	/* Enable DDC access */
+	reg_val = hdmi_read(hdmi, REG_HDMI_HW_DDC_CTRL);
+
+	reg_val &= ~(BIT(8) | BIT(9));
+	reg_val |= BIT(8);
+	hdmi_write(hdmi, REG_HDMI_HW_DDC_CTRL, reg_val);
+
+	/* WAIT for 200ms as per HDMI 2.0 standard for sink to respond */
+	msleep(200);
+
+	/* clear the scrambler status */
+	rc = _sde_hdmi_bridge_scrambler_ddc_check_status(hdmi);
+	if (rc)
+		SDE_ERROR("scrambling ddc error %d\n", rc);
+
+	_sde_hdmi_scrambler_ddc_disable((void *)display);
+
+	return rc;
+}
+
+static int _sde_hdmi_bridge_setup_ddc_timers(struct hdmi *hdmi,
+			u32 type, u32 to_in_num_lines)
+{
+	if (type >= HDMI_TX_DDC_TIMER_MAX) {
+		SDE_ERROR("Invalid timer type %d\n", type);
+		return -EINVAL;
+	}
+
+	switch (type) {
+	case HDMI_TX_DDC_TIMER_SCRAMBLER_STATUS:
+		_sde_hdmi_bridge_scrambler_status_timer_setup(hdmi,
+		to_in_num_lines);
+		break;
+	default:
+		SDE_ERROR("%d type not supported\n", type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int _sde_hdmi_bridge_setup_scrambler(struct hdmi *hdmi,
+			struct drm_display_mode *mode)
+{
+	int rc = 0;
+	int timeout_hsync;
+	u32 reg_val = 0;
+	u32 tmds_clock_ratio = 0;
+	bool scrambler_on = false;
+	struct sde_connector *c_conn;
+	struct drm_connector *connector = NULL;
+	struct sde_hdmi *display;
+
+	if (!hdmi || !mode) {
+		SDE_ERROR("invalid input\n");
+		return -EINVAL;
+	}
+	connector = hdmi->connector;
+	c_conn = to_sde_connector(hdmi->connector);
+	display = (struct sde_hdmi *)c_conn->display;
+
+	/* Read HDMI version */
+	reg_val = hdmi_read(hdmi, REG_HDMI_VERSION);
+	reg_val = (reg_val & 0xF0000000) >> 28;
+	/* Scrambling is supported from HDMI TX 4.0 */
+	if (reg_val < HDMI_TX_SCRAMBLER_MIN_TX_VERSION) {
+		DRM_INFO("scrambling not supported by tx\n");
+		return 0;
+	}
+
+	/* use actual clock instead of mode clock */
+	if (hdmi->pixclock >
+		HDMI_TX_SCRAMBLER_THRESHOLD_RATE_KHZ * HDMI_KHZ_TO_HZ) {
+		scrambler_on = true;
+		tmds_clock_ratio = 1;
+	} else {
+		tmds_clock_ratio = 0;
+		scrambler_on = connector->supports_scramble;
+	}
+
+	DRM_INFO("scrambler %s\n", scrambler_on ? "on" : "off");
+
+	if (scrambler_on) {
+		rc = sde_hdmi_scdc_write(hdmi,
+				HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE,
+				tmds_clock_ratio);
+		if (rc) {
+			SDE_ERROR("TMDS CLK RATIO ERR\n");
+			return rc;
+		}
+
+		reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+		reg_val |= BIT(28); /* Set SCRAMBLER_EN bit */
+
+		hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+
+		rc = sde_hdmi_scdc_write(hdmi,
+				HDMI_TX_SCDC_SCRAMBLING_ENABLE, 0x1);
+		if (rc) {
+			SDE_ERROR("failed to enable scrambling\n");
+			return rc;
+		}
+
+		/*
+		 * Setup hardware to periodically check for scrambler
+		 * status bit on the sink. Sink should set this bit
+		 * with in 200ms after scrambler is enabled.
+		 */
+		timeout_hsync = _sde_hdmi_get_timeout_in_hysnc(
+						(void *)display,
+						HDMI_TX_SCRAMBLER_TIMEOUT_MSEC);
+
+		if (timeout_hsync <= 0) {
+			SDE_ERROR("err in timeout hsync calc\n");
+			timeout_hsync = HDMI_DEFAULT_TIMEOUT_HSYNC;
+		}
+		SDE_DEBUG("timeout for scrambling en: %d hsyncs\n",
+				  timeout_hsync);
+
+		rc = _sde_hdmi_bridge_setup_ddc_timers(hdmi,
+			HDMI_TX_DDC_TIMER_SCRAMBLER_STATUS, timeout_hsync);
+	} else {
+		/* reset tmds clock ratio */
+		rc = sde_hdmi_scdc_write(hdmi,
+				HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE,
+				tmds_clock_ratio);
+		/* scdc write can fail if sink doesn't support SCDC */
+		if (rc && connector->scdc_present)
+			SDE_ERROR("SCDC present, TMDS clk ratio err\n");
+
+		sde_hdmi_scdc_write(hdmi, HDMI_TX_SCDC_SCRAMBLING_ENABLE, 0x0);
+		reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+		reg_val &= ~BIT(28); /* Unset SCRAMBLER_EN bit */
+		hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+	}
+	return rc;
+}
+
+static void _sde_hdmi_bridge_setup_deep_color(struct hdmi *hdmi)
+{
+	struct drm_connector *connector = hdmi->connector;
+	struct sde_connector *c_conn = to_sde_connector(connector);
+	struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+	u32 hdmi_ctrl_reg, vbi_pkt_reg;
+
+	SDE_DEBUG("Deep Color: %s\n", display->dc_enable ? "On" : "Off");
+
+	if (display->dc_enable) {
+		hdmi_ctrl_reg = hdmi_read(hdmi, REG_HDMI_CTRL);
+
+		/* GC CD override */
+		hdmi_ctrl_reg |= BIT(27);
+
+		/* enable deep color for RGB888/YUV444/YUV420 30 bits */
+		hdmi_ctrl_reg |= BIT(24);
+		hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl_reg);
+		/* Enable GC_CONT and GC_SEND in General Control Packet
+		 * (GCP) register so that deep color data is
+		 * transmitted to the sink on every frame, allowing
+		 * the sink to decode the data correctly.
+		 *
+		 * GC_CONT: 0x1 - Send GCP on every frame
+		 * GC_SEND: 0x1 - Enable GCP Transmission
+		 */
+		vbi_pkt_reg = hdmi_read(hdmi, REG_HDMI_VBI_PKT_CTRL);
+		vbi_pkt_reg |= BIT(5) | BIT(4);
+		hdmi_write(hdmi, REG_HDMI_VBI_PKT_CTRL, vbi_pkt_reg);
+	} else {
+		hdmi_ctrl_reg = hdmi_read(hdmi, REG_HDMI_CTRL);
+
+		/* disable GC CD override */
+		hdmi_ctrl_reg &= ~BIT(27);
+		/* disable deep color for RGB888/YUV444/YUV420 30 bits */
+		hdmi_ctrl_reg &= ~BIT(24);
+		hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl_reg);
+
+		/* disable the GC packet sending */
+		vbi_pkt_reg = hdmi_read(hdmi, REG_HDMI_VBI_PKT_CTRL);
+		vbi_pkt_reg &= ~(BIT(5) | BIT(4));
+		hdmi_write(hdmi, REG_HDMI_VBI_PKT_CTRL, vbi_pkt_reg);
+	}
+}
+
+static void _sde_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
+{
+	struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+	struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+	struct hdmi_phy *phy = hdmi->phy;
+	struct sde_connector *c_conn = to_sde_connector(hdmi->connector);
+	struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+
+	DRM_DEBUG("power up");
+
+	if (!hdmi->power_on) {
+		_sde_hdmi_bridge_power_on(bridge);
+		hdmi->power_on = true;
+	}
+
+	if (phy)
+		phy->funcs->powerup(phy, hdmi->pixclock);
+
+	sde_hdmi_set_mode(hdmi, true);
+
+	if (hdmi->hdcp_ctrl && hdmi->is_hdcp_supported)
+		hdmi_hdcp_ctrl_on(hdmi->hdcp_ctrl);
+
+	mutex_lock(&display->display_lock);
+	if (display->codec_ready)
+		sde_hdmi_notify_clients(display, display->connected);
+	else
+		display->client_notify_pending = true;
+	mutex_unlock(&display->display_lock);
+}
+
+static void sde_hdmi_update_hdcp_info(struct drm_connector *connector)
+{
+	void *fd = NULL;
+	struct sde_hdcp_ops *ops = NULL;
+	struct sde_connector *c_conn = to_sde_connector(connector);
+	struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+
+	if (!display) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	/* check first if hdcp2p2 is supported */
+	fd = display->hdcp_feat_data[SDE_HDCP_2P2];
+	if (fd)
+		ops = sde_hdmi_hdcp2p2_start(fd);
+
+	/* If ops is true, sink supports hdcp */
+	if (ops)
+		display->sink_hdcp22_support = true;
+
+	if (ops && ops->feature_supported)
+		display->hdcp22_present = ops->feature_supported(fd);
+	else
+		display->hdcp22_present = false;
+
+	/* if hdcp22_present is true, src supports hdcp 2p2 */
+	if (display->hdcp22_present)
+		display->src_hdcp22_support = true;
+
+	if (!display->hdcp22_present) {
+		if (display->hdcp1_use_sw_keys) {
+			display->hdcp14_present =
+				hdcp1_check_if_supported_load_app();
+		}
+		if (display->hdcp14_present) {
+			fd = display->hdcp_feat_data[SDE_HDCP_1x];
+			if (fd)
+				ops = sde_hdcp_1x_start(fd);
+		}
+	}
+
+	if (display->sink_hdcp22_support)
+		display->sink_hdcp_ver = SDE_HDMI_HDCP_22;
+	else
+		display->sink_hdcp_ver = SDE_HDMI_HDCP_14;
+
+	/* update internal data about hdcp */
+	display->hdcp_data = fd;
+	display->hdcp_ops = ops;
+}
+
+static void _sde_hdmi_bridge_enable(struct drm_bridge *bridge)
+{
+	struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+	struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+	struct sde_connector *c_conn = to_sde_connector(hdmi->connector);
+	struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+
+	/* need to update hdcp info here to ensure right HDCP support*/
+	sde_hdmi_update_hdcp_info(hdmi->connector);
+
+	/* start HDCP authentication */
+	sde_hdmi_start_hdcp(hdmi->connector);
+
+	/* reset HDR state */
+	display->curr_hdr_state = HDR_DISABLE;
+}
+
+static void _sde_hdmi_bridge_disable(struct drm_bridge *bridge)
+{
+	struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+	struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+	struct sde_connector *c_conn = to_sde_connector(hdmi->connector);
+	struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+	struct sde_connector_state *c_state = to_sde_connector_state(hdmi->connector->state);
+
+	mutex_lock(&display->display_lock);
+
+	if (!bridge) {
+		SDE_ERROR("Invalid params\n");
+		mutex_unlock(&display->display_lock);
+		return;
+	}
+
+	if (c_state) {
+		c_state->hdr_ctrl.hdr_state = HDR_DISABLE;
+	}
+
+	display->pll_update_enable = false;
+	display->sink_hdcp_ver = SDE_HDMI_HDCP_NONE;
+	display->sink_hdcp22_support = false;
+
+	if (sde_hdmi_tx_is_hdcp_enabled(display))
+		sde_hdmi_hdcp_off(display);
+
+	sde_hdmi_clear_hdr_info(bridge);
+	/* Clear HDMI VSDB blocks info */
+	sde_hdmi_clear_vsdbs(bridge);
+	/* Clear HDMI VCDB block info */
+	sde_hdmi_clear_vcdb_info(bridge);
+	/* Clear HDMI colorimetry data block info */
+	sde_hdmi_clear_colorimetry(bridge);
+
+	mutex_unlock(&display->display_lock);
+}
+
+static void _sde_hdmi_bridge_post_disable(struct drm_bridge *bridge)
+{
+	struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+	struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+	struct hdmi_phy *phy = hdmi->phy;
+	struct sde_connector *c_conn = to_sde_connector(hdmi->connector);
+	struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+
+	sde_hdmi_notify_clients(display, display->connected);
+
+	sde_hdmi_audio_off(hdmi);
+
+	DRM_DEBUG("power down");
+	sde_hdmi_set_mode(hdmi, false);
+
+	if (phy)
+		phy->funcs->powerdown(phy);
+
+	/* HDMI teardown sequence */
+	sde_hdmi_ctrl_reset(hdmi);
+
+	if (hdmi->power_on) {
+		_sde_hdmi_bridge_power_off(bridge);
+		hdmi->power_on = false;
+	}
+
+	/* Powering-on the controller for HPD */
+	sde_hdmi_ctrl_cfg(hdmi, 1);
+}
+
+static void _sde_hdmi_bridge_set_avi_infoframe(struct hdmi *hdmi,
+	struct drm_display_mode *mode)
+{
+	u8 avi_iframe[HDMI_AVI_INFOFRAME_BUFFER_SIZE] = {0};
+	u8 *avi_frame = &avi_iframe[HDMI_INFOFRAME_HEADER_SIZE];
+	u8 checksum;
+	u32 reg_val;
+	u32 mode_fmt_flags = 0;
+	struct hdmi_avi_infoframe info;
+	struct drm_connector *connector;
+
+	if (!hdmi || !mode) {
+		SDE_ERROR("invalid input\n");
+		return;
+	}
+
+	connector = hdmi->connector;
+
+	if (!connector) {
+		SDE_ERROR("invalid input\n");
+		return;
+	}
+
+	/* Cache the format flags before clearing */
+	mode_fmt_flags = mode->flags;
+	/**
+	 * Clear the RGB/YUV format flags before calling upstream API
+	 * as the API also compares the flags and then returns a mode
+	 */
+	mode->flags &= ~SDE_DRM_MODE_FLAG_FMT_MASK;
+	drm_hdmi_avi_infoframe_from_display_mode(&info, mode);
+	/* Restore the format flags */
+	mode->flags = mode_fmt_flags;
+
+	if (mode->private_flags & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420) {
+		info.colorspace = HDMI_COLORSPACE_YUV420;
+		/**
+		 * If sink supports quantization select,
+		 * override to full range
+		 */
+		if (connector->yuv_qs)
+			info.ycc_quantization_range =
+				HDMI_YCC_QUANTIZATION_RANGE_FULL;
+	} else {
+		if (connector->rgb_qs)
+			info.quantization_range =
+				HDMI_QUANTIZATION_RANGE_FULL;
+		else
+			info.quantization_range =
+				HDMI_QUANTIZATION_RANGE_DEFAULT;
+	}
+
+	hdmi_avi_infoframe_pack(&info, avi_iframe, sizeof(avi_iframe));
+	checksum = avi_iframe[HDMI_INFOFRAME_HEADER_SIZE - 1];
+
+	reg_val = checksum |
+		LEFT_SHIFT_BYTE(avi_frame[0]) |
+		LEFT_SHIFT_WORD(avi_frame[1]) |
+		LEFT_SHIFT_24BITS(avi_frame[2]);
+	hdmi_write(hdmi, REG_HDMI_AVI_INFO(0), reg_val);
+
+	reg_val = avi_frame[3] |
+		LEFT_SHIFT_BYTE(avi_frame[4]) |
+		LEFT_SHIFT_WORD(avi_frame[5]) |
+		LEFT_SHIFT_24BITS(avi_frame[6]);
+	hdmi_write(hdmi, REG_HDMI_AVI_INFO(1), reg_val);
+
+	reg_val = avi_frame[7] |
+		LEFT_SHIFT_BYTE(avi_frame[8]) |
+		LEFT_SHIFT_WORD(avi_frame[9]) |
+		LEFT_SHIFT_24BITS(avi_frame[10]);
+	hdmi_write(hdmi, REG_HDMI_AVI_INFO(2), reg_val);
+
+	reg_val = avi_frame[11] |
+		LEFT_SHIFT_BYTE(avi_frame[12]) |
+		LEFT_SHIFT_24BITS(avi_iframe[1]);
+	hdmi_write(hdmi, REG_HDMI_AVI_INFO(3), reg_val);
+
+	/* AVI InfFrame enable (every frame) */
+	hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0,
+		hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0) | BIT(1) | BIT(0));
+
+	reg_val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+	reg_val &= ~0x3F;
+	reg_val |= HDMI_AVI_IFRAME_LINE_NUMBER;
+	hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, reg_val);
+}
+
+static void _sde_hdmi_bridge_set_vs_infoframe(struct hdmi *hdmi,
+	const struct drm_display_mode *mode)
+{
+	u8 vs_iframe[HDMI_VS_INFOFRAME_BUFFER_SIZE] = {0};
+	u32 reg_val;
+	struct hdmi_vendor_infoframe info;
+	int rc = 0;
+
+	rc = drm_hdmi_vendor_infoframe_from_display_mode(&info, mode);
+	if (rc < 0) {
+		SDE_DEBUG("don't send vendor infoframe\n");
+		return;
+	}
+	hdmi_vendor_infoframe_pack(&info, vs_iframe, sizeof(vs_iframe));
+
+	reg_val = (info.s3d_struct << 24) | (info.vic << 16) |
+			(vs_iframe[3] << 8) | (vs_iframe[7] << 5) |
+			vs_iframe[2];
+	hdmi_write(hdmi, REG_HDMI_VENSPEC_INFO0, reg_val);
+
+	/* vendor specific info-frame enable (every frame) */
+	hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0,
+		hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0) | BIT(13) | BIT(12));
+
+	reg_val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+	reg_val &= ~0x3F000000;
+	reg_val |= (HDMI_VENDOR_IFRAME_LINE_NUMBER << 24);
+	hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, reg_val);
+}
+
+static void _sde_hdmi_bridge_set_spd_infoframe(struct hdmi *hdmi,
+	const struct drm_display_mode *mode)
+{
+	u8 spd_iframe[HDMI_SPD_INFOFRAME_BUFFER_SIZE] = {0};
+	u32 packet_payload, packet_control, packet_header;
+	struct hdmi_spd_infoframe info;
+	int i;
+
+	/* Need to query vendor and product name from platform setup */
+	hdmi_spd_infoframe_init(&info, HDMI_DEFAULT_VENDOR_NAME,
+		HDMI_DEFAULT_PRODUCT_NAME);
+	hdmi_spd_infoframe_pack(&info, spd_iframe, sizeof(spd_iframe));
+
+	packet_header = spd_iframe[0]
+			| LEFT_SHIFT_BYTE(spd_iframe[1] & 0x7f)
+			| LEFT_SHIFT_WORD(spd_iframe[2] & 0x7f);
+	hdmi_write(hdmi, REG_HDMI_GENERIC1_HDR, packet_header);
+
+	for (i = 0; i < MAX_REG_HDMI_GENERIC1_INDEX; i++) {
+		packet_payload = spd_iframe[3 + i * 4]
+			| LEFT_SHIFT_BYTE(spd_iframe[4 + i * 4] & 0x7f)
+			| LEFT_SHIFT_WORD(spd_iframe[5 + i * 4] & 0x7f)
+			| LEFT_SHIFT_24BITS(spd_iframe[6 + i * 4] & 0x7f);
+		hdmi_write(hdmi, REG_HDMI_GENERIC1(i), packet_payload);
+	}
+
+	packet_payload = (spd_iframe[27] & 0x7f)
+			| LEFT_SHIFT_BYTE(spd_iframe[28] & 0x7f);
+	hdmi_write(hdmi, REG_HDMI_GENERIC1(MAX_REG_HDMI_GENERIC1_INDEX),
+		packet_payload);
+
+	/*
+	 * GENERIC1_LINE | GENERIC1_CONT | GENERIC1_SEND
+	 * Setup HDMI TX generic packet control
+	 * Enable this packet to transmit every frame
+	 * Enable HDMI TX engine to transmit Generic packet 1
+	 */
+	packet_control = hdmi_read(hdmi, REG_HDMI_GEN_PKT_CTRL);
+	packet_control |= ((0x1 << 24) | (1 << 5) | (1 << 4));
+	hdmi_write(hdmi, REG_HDMI_GEN_PKT_CTRL, packet_control);
+}
+
+static inline void _sde_hdmi_save_mode(struct hdmi *hdmi,
+	struct drm_display_mode *mode)
+{
+	struct sde_connector *c_conn = to_sde_connector(hdmi->connector);
+	struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+
+	drm_mode_copy(&display->mode, mode);
+}
+
+static u32 _sde_hdmi_choose_best_format(struct hdmi *hdmi,
+	struct drm_display_mode *mode)
+{
+	/*
+	 * choose priority:
+	 * 1. DC + RGB
+	 * 2. DC + YUV
+	 * 3. RGB
+	 * 4. YUV
+	 */
+	int dc_format;
+	struct drm_connector *connector = hdmi->connector;
+
+	dc_format = sde_hdmi_sink_dc_support(connector, mode);
+	if (dc_format & MSM_MODE_FLAG_RGB444_DC_ENABLE)
+		return (MSM_MODE_FLAG_COLOR_FORMAT_RGB444
+			| MSM_MODE_FLAG_RGB444_DC_ENABLE);
+	else if (dc_format & MSM_MODE_FLAG_YUV420_DC_ENABLE)
+		return (MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420
+			| MSM_MODE_FLAG_YUV420_DC_ENABLE);
+	else if (mode->flags & DRM_MODE_FLAG_SUPPORTS_RGB)
+		return MSM_MODE_FLAG_COLOR_FORMAT_RGB444;
+	else if (mode->flags & DRM_MODE_FLAG_SUPPORTS_YUV)
+		return MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420;
+
+	SDE_ERROR("Can't get available best display format\n");
+
+	return MSM_MODE_FLAG_COLOR_FORMAT_RGB444;
+}
+
+static void _sde_hdmi_bridge_mode_set(struct drm_bridge *bridge,
+		 struct drm_display_mode *mode,
+		 struct drm_display_mode *adjusted_mode)
+{
+	struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+	struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+	struct drm_connector *connector = hdmi->connector;
+	struct sde_connector *c_conn = to_sde_connector(connector);
+	struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+	int hstart, hend, vstart, vend;
+	uint32_t frame_ctrl;
+	u32 div = 0;
+
+	mode = adjusted_mode;
+
+	display->dc_enable = mode->private_flags &
+				(MSM_MODE_FLAG_RGB444_DC_ENABLE |
+				 MSM_MODE_FLAG_YUV420_DC_ENABLE);
+	/* compute pixclock as per color format and bit depth */
+	hdmi->pixclock = sde_hdmi_calc_pixclk(
+				mode->clock * HDMI_KHZ_TO_HZ,
+				mode->private_flags,
+				display->dc_enable);
+	SDE_DEBUG("Actual PCLK: %lu, Mode PCLK: %d\n",
+		hdmi->pixclock, mode->clock);
+
+	if (mode->private_flags & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420)
+		div = 1;
+
+	hstart = (mode->htotal - mode->hsync_start) >> div;
+	hend   = (mode->htotal - mode->hsync_start + mode->hdisplay) >> div;
+
+	vstart = mode->vtotal - mode->vsync_start - 1;
+	vend   = mode->vtotal - mode->vsync_start + mode->vdisplay - 1;
+
+	SDE_DEBUG(
+		"htotal=%d, vtotal=%d, hstart=%d, hend=%d, vstart=%d, vend=%d",
+		mode->htotal, mode->vtotal, hstart, hend, vstart, vend);
+
+	hdmi_write(hdmi, REG_HDMI_TOTAL,
+			SDE_HDMI_TOTAL_H_TOTAL((mode->htotal >> div) - 1) |
+			SDE_HDMI_TOTAL_V_TOTAL(mode->vtotal - 1));
+
+	hdmi_write(hdmi, REG_HDMI_ACTIVE_HSYNC,
+			SDE_HDMI_ACTIVE_HSYNC_START(hstart) |
+			SDE_HDMI_ACTIVE_HSYNC_END(hend));
+	hdmi_write(hdmi, REG_HDMI_ACTIVE_VSYNC,
+			SDE_HDMI_ACTIVE_VSYNC_START(vstart) |
+			SDE_HDMI_ACTIVE_VSYNC_END(vend));
+
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+		hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2,
+				SDE_HDMI_VSYNC_TOTAL_F2_V_TOTAL(mode->vtotal));
+		hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2,
+				SDE_HDMI_VSYNC_ACTIVE_F2_START(vstart + 1) |
+				SDE_HDMI_VSYNC_ACTIVE_F2_END(vend + 1));
+	} else {
+		hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2,
+				SDE_HDMI_VSYNC_TOTAL_F2_V_TOTAL(0));
+		hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2,
+				SDE_HDMI_VSYNC_ACTIVE_F2_START(0) |
+				SDE_HDMI_VSYNC_ACTIVE_F2_END(0));
+	}
+
+	frame_ctrl = 0;
+	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+		frame_ctrl |= HDMI_FRAME_CTRL_HSYNC_LOW;
+	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+		frame_ctrl |= HDMI_FRAME_CTRL_VSYNC_LOW;
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		frame_ctrl |= HDMI_FRAME_CTRL_INTERLACED_EN;
+	DRM_DEBUG("frame_ctrl=%08x\n", frame_ctrl);
+	hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl);
+
+	/*
+	 * Setup info frame
+	 * Current drm_edid driver doesn't have all CEA formats defined in
+	 * latest CEA-861(CTA-861) spec. So, don't check if mode is CEA mode
+	 * in here. Once core framework is updated, the check needs to be
+	 * added back.
+	 */
+	if (hdmi->hdmi_mode) {
+		_sde_hdmi_bridge_set_avi_infoframe(hdmi, mode);
+		_sde_hdmi_bridge_set_vs_infoframe(hdmi, mode);
+		_sde_hdmi_bridge_set_spd_infoframe(hdmi, mode);
+		DRM_DEBUG("hdmi setup info frame\n");
+	}
+
+	_sde_hdmi_save_mode(hdmi, mode);
+	_sde_hdmi_bridge_setup_scrambler(hdmi, mode);
+	_sde_hdmi_bridge_setup_deep_color(hdmi);
+}
+
+static bool _sde_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
+	 const struct drm_display_mode *mode,
+	 struct drm_display_mode *adjusted_mode)
+{
+	struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+	struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+
+	/* Clear the private flags before assigning new one */
+	adjusted_mode->private_flags = 0;
+
+	adjusted_mode->private_flags |=
+		_sde_hdmi_choose_best_format(hdmi, adjusted_mode);
+	SDE_DEBUG("Adjusted mode private flags: 0x%x\n",
+		  adjusted_mode->private_flags);
+
+	return true;
+}
+
+void sde_hdmi_bridge_power_on(struct drm_bridge *bridge)
+{
+	_sde_hdmi_bridge_power_on(bridge);
+}
+
+static const struct drm_bridge_funcs _sde_hdmi_bridge_funcs = {
+		.pre_enable = _sde_hdmi_bridge_pre_enable,
+		.enable = _sde_hdmi_bridge_enable,
+		.disable = _sde_hdmi_bridge_disable,
+		.post_disable = _sde_hdmi_bridge_post_disable,
+		.mode_set = _sde_hdmi_bridge_mode_set,
+		.mode_fixup = _sde_hdmi_bridge_mode_fixup,
+};
+
+
+/* initialize bridge */
+struct drm_bridge *sde_hdmi_bridge_init(struct hdmi *hdmi)
+{
+	struct drm_bridge *bridge = NULL;
+	struct sde_hdmi_bridge *sde_hdmi_bridge;
+	int ret;
+
+	sde_hdmi_bridge = devm_kzalloc(hdmi->dev->dev,
+			sizeof(*sde_hdmi_bridge), GFP_KERNEL);
+	if (!sde_hdmi_bridge) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	sde_hdmi_bridge->hdmi = hdmi;
+
+	bridge = &sde_hdmi_bridge->base;
+	bridge->funcs = &_sde_hdmi_bridge_funcs;
+
+	ret = drm_bridge_attach(hdmi->dev, bridge);
+	if (ret)
+		goto fail;
+
+	return bridge;
+
+fail:
+	if (bridge)
+		_sde_hdmi_bridge_destroy(bridge);
+
+	return ERR_PTR(ret);
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging./sde_hdmi.c linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging./sde_hdmi.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c	2019-10-29 09:26:23.629203041 +0100
@@ -0,0 +1,3291 @@
+/*
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt)	"sde-hdmi:[%s] " fmt, __func__
+
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/gpio.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/irqdomain.h>
+
+#include "sde_kms.h"
+#include "sde_connector.h"
+#include "msm_drv.h"
+#include "sde_hdmi.h"
+#include "sde_hdmi_regs.h"
+#include "hdmi.h"
+
+static DEFINE_MUTEX(sde_hdmi_list_lock);
+static LIST_HEAD(sde_hdmi_list);
+
+/* HDMI SCDC register offsets */
+#define HDMI_SCDC_UPDATE_0              0x10
+#define HDMI_SCDC_UPDATE_1              0x11
+#define HDMI_SCDC_TMDS_CONFIG           0x20
+#define HDMI_SCDC_SCRAMBLER_STATUS      0x21
+#define HDMI_SCDC_CONFIG_0              0x30
+#define HDMI_SCDC_STATUS_FLAGS_0        0x40
+#define HDMI_SCDC_STATUS_FLAGS_1        0x41
+#define HDMI_SCDC_ERR_DET_0_L           0x50
+#define HDMI_SCDC_ERR_DET_0_H           0x51
+#define HDMI_SCDC_ERR_DET_1_L           0x52
+#define HDMI_SCDC_ERR_DET_1_H           0x53
+#define HDMI_SCDC_ERR_DET_2_L           0x54
+#define HDMI_SCDC_ERR_DET_2_H           0x55
+#define HDMI_SCDC_ERR_DET_CHECKSUM      0x56
+
+#define HDMI_DISPLAY_MAX_WIDTH          4096
+#define HDMI_DISPLAY_MAX_HEIGHT         2160
+
+static const struct of_device_id sde_hdmi_dt_match[] = {
+	{.compatible = "qcom,hdmi-display"},
+	{}
+};
+
+static ssize_t _sde_hdmi_debugfs_dump_info_read(struct file *file,
+						char __user *buff,
+						size_t count,
+						loff_t *ppos)
+{
+	struct sde_hdmi *display = file->private_data;
+	char *buf;
+	u32 len = 0;
+
+	if (!display)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0;
+
+	buf = kzalloc(SZ_1K, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	len += snprintf(buf, SZ_4K, "name = %s\n", display->name);
+
+	if (copy_to_user(buff, buf, len)) {
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	*ppos += len;
+
+	kfree(buf);
+	return len;
+}
+
+static ssize_t _sde_hdmi_debugfs_edid_modes_read(struct file *file,
+						char __user *buff,
+						size_t count,
+						loff_t *ppos)
+{
+	struct sde_hdmi *display = file->private_data;
+	char *buf;
+	u32 len = 0;
+	struct drm_connector *connector;
+	u32 mode_count = 0;
+	struct drm_display_mode *mode;
+
+	if (!display)
+		return -ENODEV;
+
+	if (!display->ctrl.ctrl ||
+		!display->ctrl.ctrl->connector) {
+		SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n",
+				  display);
+		return -ENOMEM;
+	}
+
+	if (*ppos)
+		return 0;
+
+	connector = display->ctrl.ctrl->connector;
+
+	list_for_each_entry(mode, &connector->modes, head) {
+		mode_count++;
+	}
+
+	/* Adding one more to store title */
+	mode_count++;
+
+	buf = kzalloc((mode_count * sizeof(*mode)), GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	len += snprintf(buf + len, PAGE_SIZE - len,
+					"name refresh (Hz) hdisp hss hse htot vdisp");
+
+	len += snprintf(buf + len, PAGE_SIZE - len,
+					" vss vse vtot flags\n");
+
+	list_for_each_entry(mode, &connector->modes, head) {
+		len += snprintf(buf + len, SZ_4K - len,
+		"%s %d %d %d %d %d %d %d %d %d 0x%x\n",
+		mode->name, mode->vrefresh, mode->hdisplay,
+		mode->hsync_start, mode->hsync_end, mode->htotal,
+		mode->vdisplay, mode->vsync_start, mode->vsync_end,
+		mode->vtotal, mode->flags);
+	}
+
+	if (copy_to_user(buff, buf, len)) {
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	*ppos += len;
+
+	kfree(buf);
+	return len;
+}
+
+static ssize_t _sde_hdmi_debugfs_edid_vsdb_info_read(struct file *file,
+						char __user *buff,
+						size_t count,
+						loff_t *ppos)
+{
+	struct sde_hdmi *display = file->private_data;
+	char buf[200];
+	u32 len = 0;
+	struct drm_connector *connector;
+
+	if (!display)
+		return -ENODEV;
+
+	if (!display->ctrl.ctrl ||
+		!display->ctrl.ctrl->connector) {
+		SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n",
+				  display);
+		return -ENOMEM;
+	}
+
+	SDE_HDMI_DEBUG("%s +", __func__);
+	if (*ppos)
+		return 0;
+
+	connector = display->ctrl.ctrl->connector;
+	len += snprintf(buf + len, sizeof(buf) - len,
+					"max_tmds_clock = %d\n",
+					connector->max_tmds_clock);
+	len += snprintf(buf + len, sizeof(buf) - len,
+					"latency_present %d %d\n",
+					connector->latency_present[0],
+					connector->latency_present[1]);
+	len += snprintf(buf + len, sizeof(buf) - len,
+					"video_latency %d %d\n",
+					connector->video_latency[0],
+					connector->video_latency[1]);
+	len += snprintf(buf + len, sizeof(buf) - len,
+					"audio_latency %d %d\n",
+					connector->audio_latency[0],
+					connector->audio_latency[1]);
+	len += snprintf(buf + len, sizeof(buf) - len,
+					"dvi_dual %d\n",
+					(int)connector->dvi_dual);
+
+	if (copy_to_user(buff, buf, len))
+		return -EFAULT;
+
+	*ppos += len;
+	SDE_HDMI_DEBUG("%s - ", __func__);
+	return len;
+}
+
+static ssize_t _sde_hdmi_debugfs_edid_hdr_info_read(struct file *file,
+						char __user *buff,
+						size_t count,
+						loff_t *ppos)
+{
+	struct sde_hdmi *display = file->private_data;
+	char buf[200];
+	u32 len = 0;
+	struct drm_connector *connector;
+
+	if (!display)
+		return -ENODEV;
+
+	if (!display->ctrl.ctrl ||
+		!display->ctrl.ctrl->connector) {
+		SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n",
+				  display);
+		return -ENOMEM;
+	}
+
+	SDE_HDMI_DEBUG("%s +", __func__);
+	if (*ppos)
+		return 0;
+
+	connector = display->ctrl.ctrl->connector;
+	len += snprintf(buf, sizeof(buf), "hdr_eotf = %d\n"
+					"hdr_metadata_type_one %d\n"
+					"hdr_max_luminance %d\n"
+					"hdr_avg_luminance %d\n"
+					"hdr_min_luminance %d\n"
+					"hdr_supported %d\n",
+					connector->hdr_eotf,
+					connector->hdr_metadata_type_one,
+					connector->hdr_max_luminance,
+					connector->hdr_avg_luminance,
+					connector->hdr_min_luminance,
+					(int)connector->hdr_supported);
+
+	if (copy_to_user(buff, buf, len))
+		return -EFAULT;
+
+	*ppos += len;
+	SDE_HDMI_DEBUG("%s - ", __func__);
+	return len;
+}
+
+static ssize_t _sde_hdmi_debugfs_edid_hfvsdb_info_read(struct file *file,
+						char __user *buff,
+						size_t count,
+						loff_t *ppos)
+{
+	struct sde_hdmi *display = file->private_data;
+	char buf[200];
+	u32 len = 0;
+	struct drm_connector *connector;
+
+	if (!display)
+		return -ENODEV;
+
+	if (!display->ctrl.ctrl ||
+		!display->ctrl.ctrl->connector) {
+		SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n",
+				  display);
+		return -ENOMEM;
+	}
+
+	SDE_HDMI_DEBUG("%s +", __func__);
+	if (*ppos)
+		return 0;
+
+	connector = display->ctrl.ctrl->connector;
+	len += snprintf(buf, PAGE_SIZE - len, "max_tmds_char = %d\n"
+					"scdc_present %d\n"
+					"rr_capable %d\n"
+					"supports_scramble %d\n"
+					"flags_3d %d\n",
+					connector->max_tmds_char,
+					(int)connector->scdc_present,
+					(int)connector->rr_capable,
+					(int)connector->supports_scramble,
+					connector->flags_3d);
+
+	if (copy_to_user(buff, buf, len))
+		return -EFAULT;
+
+	*ppos += len;
+	return len;
+}
+
+static ssize_t _sde_hdmi_debugfs_edid_vcdb_info_read(struct file *file,
+						char __user *buff,
+						size_t count,
+						loff_t *ppos)
+{
+	struct sde_hdmi *display = file->private_data;
+	char buf[100];
+	u32 len = 0;
+	struct drm_connector *connector;
+
+	if (!display)
+		return -ENODEV;
+
+	if (!display->ctrl.ctrl ||
+		!display->ctrl.ctrl->connector) {
+		SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n",
+				  display);
+		return -ENOMEM;
+	}
+
+	SDE_HDMI_DEBUG("%s +", __func__);
+	if (*ppos)
+		return 0;
+
+	connector = display->ctrl.ctrl->connector;
+	len += snprintf(buf, PAGE_SIZE - len, "pt_scan_info = %d\n"
+					"it_scan_info = %d\n"
+					"ce_scan_info = %d\n",
+					(int)connector->pt_scan_info,
+					(int)connector->it_scan_info,
+					(int)connector->ce_scan_info);
+
+	if (copy_to_user(buff, buf, len))
+		return -EFAULT;
+
+	*ppos += len;
+	SDE_HDMI_DEBUG("%s - ", __func__);
+	return len;
+}
+
+static ssize_t _sde_hdmi_edid_vendor_name_read(struct file *file,
+						char __user *buff,
+						size_t count,
+						loff_t *ppos)
+{
+	struct sde_hdmi *display = file->private_data;
+	char buf[100];
+	u32 len = 0;
+	struct drm_connector *connector;
+
+	if (!display)
+		return -ENODEV;
+
+	if (!display->ctrl.ctrl ||
+		!display->ctrl.ctrl->connector) {
+		SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n",
+				  display);
+		return -ENOMEM;
+	}
+
+	SDE_HDMI_DEBUG("%s +", __func__);
+	if (*ppos)
+		return 0;
+
+	connector = display->ctrl.ctrl->connector;
+	len += snprintf(buf, PAGE_SIZE - len, "Vendor ID is %s\n",
+					display->edid_ctrl->vendor_id);
+
+	if (copy_to_user(buff, buf, len))
+		return -EFAULT;
+
+	*ppos += len;
+	SDE_HDMI_DEBUG("%s - ", __func__);
+	return len;
+}
+
+static ssize_t _sde_hdmi_src_hdcp14_support_read(struct file *file,
+						char __user *buff,
+						size_t count,
+						loff_t *ppos)
+{
+	struct sde_hdmi *display = file->private_data;
+	char buf[SZ_128];
+	u32 len = 0;
+
+	if (!display)
+		return -ENODEV;
+
+	if (!display->ctrl.ctrl) {
+		SDE_ERROR("hdmi is NULL\n");
+		return -ENOMEM;
+	}
+
+	SDE_HDMI_DEBUG("%s +", __func__);
+	if (*ppos)
+		return 0;
+
+	if (display->hdcp14_present)
+		len += snprintf(buf, SZ_128 - len, "true\n");
+	else
+		len += snprintf(buf, SZ_128 - len, "false\n");
+
+	if (copy_to_user(buff, buf, len))
+		return -EFAULT;
+
+	*ppos += len;
+	SDE_HDMI_DEBUG("%s - ", __func__);
+	return len;
+}
+
+static ssize_t _sde_hdmi_src_hdcp22_support_read(struct file *file,
+						char __user *buff,
+						size_t count,
+						loff_t *ppos)
+{
+	struct sde_hdmi *display = file->private_data;
+	char buf[SZ_128];
+	u32 len = 0;
+
+	if (!display)
+		return -ENODEV;
+
+	if (!display->ctrl.ctrl) {
+		SDE_ERROR("hdmi is NULL\n");
+		return -ENOMEM;
+	}
+
+	SDE_HDMI_DEBUG("%s +", __func__);
+	if (*ppos)
+		return 0;
+
+	if (display->src_hdcp22_support)
+		len += snprintf(buf, SZ_128 - len, "true\n");
+	else
+		len += snprintf(buf, SZ_128 - len, "false\n");
+
+	if (copy_to_user(buff, buf, len))
+		return -EFAULT;
+
+	*ppos += len;
+	SDE_HDMI_DEBUG("%s - ", __func__);
+	return len;
+}
+
+static ssize_t _sde_hdmi_sink_hdcp22_support_read(struct file *file,
+						char __user *buff,
+						size_t count,
+						loff_t *ppos)
+{
+	struct sde_hdmi *display = file->private_data;
+	char buf[SZ_128];
+	u32 len = 0;
+
+	if (!display)
+		return -ENODEV;
+
+	if (!display->ctrl.ctrl) {
+		SDE_ERROR("hdmi is NULL\n");
+		return -ENOMEM;
+	}
+
+	SDE_HDMI_DEBUG("%s +", __func__);
+	if (*ppos)
+		return 0;
+
+	if (display->sink_hdcp22_support)
+		len += snprintf(buf, SZ_128 - len, "true\n");
+	else
+		len += snprintf(buf, SZ_128 - len, "false\n");
+
+	if (copy_to_user(buff, buf, len))
+		return -EFAULT;
+
+	*ppos += len;
+	SDE_HDMI_DEBUG("%s - ", __func__);
+	return len;
+}
+
+static ssize_t _sde_hdmi_hdcp_state_read(struct file *file,
+						char __user *buff,
+						size_t count,
+						loff_t *ppos)
+{
+	struct sde_hdmi *display = file->private_data;
+	char buf[SZ_128];
+	u32 len = 0;
+
+	if (!display)
+		return -ENODEV;
+
+	SDE_HDMI_DEBUG("%s +", __func__);
+	if (*ppos)
+		return 0;
+
+	len += snprintf(buf, SZ_128 - len, "HDCP state : %s\n",
+			sde_hdcp_state_name(display->hdcp_status));
+
+	if (copy_to_user(buff, buf, len))
+		return -EFAULT;
+
+	*ppos += len;
+	SDE_HDMI_DEBUG("%s - ", __func__);
+	return len;
+}
+
+static const struct file_operations dump_info_fops = {
+	.open = simple_open,
+	.read = _sde_hdmi_debugfs_dump_info_read,
+};
+
+static const struct file_operations edid_modes_fops = {
+	.open = simple_open,
+	.read = _sde_hdmi_debugfs_edid_modes_read,
+};
+
+static const struct file_operations edid_vsdb_info_fops = {
+	.open = simple_open,
+	.read = _sde_hdmi_debugfs_edid_vsdb_info_read,
+};
+
+static const struct file_operations edid_hdr_info_fops = {
+	.open = simple_open,
+	.read = _sde_hdmi_debugfs_edid_hdr_info_read,
+};
+
+static const struct file_operations edid_hfvsdb_info_fops = {
+	.open = simple_open,
+	.read = _sde_hdmi_debugfs_edid_hfvsdb_info_read,
+};
+
+static const struct file_operations edid_vcdb_info_fops = {
+	.open = simple_open,
+	.read = _sde_hdmi_debugfs_edid_vcdb_info_read,
+};
+
+static const struct file_operations edid_vendor_name_fops = {
+	.open = simple_open,
+	.read = _sde_hdmi_edid_vendor_name_read,
+};
+
+static const struct file_operations hdcp_src_14_support_fops = {
+	.open = simple_open,
+	.read = _sde_hdmi_src_hdcp14_support_read,
+};
+
+static const struct file_operations hdcp_src_22_support_fops = {
+	.open = simple_open,
+	.read = _sde_hdmi_src_hdcp22_support_read,
+};
+
+static const struct file_operations hdcp_sink_22_support_fops = {
+	.open = simple_open,
+	.read = _sde_hdmi_sink_hdcp22_support_read,
+};
+
+static const struct file_operations sde_hdmi_hdcp_state_fops = {
+	.open = simple_open,
+	.read = _sde_hdmi_hdcp_state_read,
+};
+
+static u64 _sde_hdmi_clip_valid_pclk(struct hdmi *hdmi, u64 pclk_in)
+{
+	u32 pclk_delta, pclk;
+	u64 pclk_clip = pclk_in;
+
+	/* as per standard, 0.5% of deviation is allowed */
+	pclk = hdmi->pixclock;
+	pclk_delta = pclk * 5 / 1000;
+
+	if (pclk_in < (pclk - pclk_delta))
+		pclk_clip = pclk - pclk_delta;
+	else if (pclk_in > (pclk + pclk_delta))
+		pclk_clip = pclk + pclk_delta;
+
+	if (pclk_in != pclk_clip)
+		pr_warn("clip pclk from %lld to %lld\n", pclk_in, pclk_clip);
+
+	return pclk_clip;
+}
+
+static void sde_hdmi_tx_hdcp_cb(void *ptr, enum sde_hdcp_states status)
+{
+	struct sde_hdmi *hdmi_ctrl = (struct sde_hdmi *)ptr;
+	struct hdmi *hdmi;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	hdmi = hdmi_ctrl->ctrl.ctrl;
+	hdmi_ctrl->hdcp_status = status;
+	queue_delayed_work(hdmi->workq, &hdmi_ctrl->hdcp_cb_work, HZ/4);
+}
+
+static void sde_hdmi_tx_set_avmute(void *ptr)
+{
+	struct sde_hdmi *hdmi_ctrl = (struct sde_hdmi *)ptr;
+	struct hdmi *hdmi;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	hdmi = hdmi_ctrl->ctrl.ctrl;
+
+	/*
+	 * When we try to continuously re-auth there
+	 * is no need to enforce avmute for clear
+	 * content. Hence check the current encryption level
+	 * before enforcing avmute on authentication failure
+	 */
+	if (sde_hdmi_tx_is_encryption_set(hdmi_ctrl))
+		sde_hdmi_config_avmute(hdmi, true);
+}
+
+void sde_hdmi_hdcp_off(struct sde_hdmi *hdmi_ctrl)
+{
+
+	if (!hdmi_ctrl) {
+		SDE_ERROR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	if (hdmi_ctrl->hdcp_ops)
+		hdmi_ctrl->hdcp_ops->off(hdmi_ctrl->hdcp_data);
+
+	flush_delayed_work(&hdmi_ctrl->hdcp_cb_work);
+
+	hdmi_ctrl->hdcp_ops = NULL;
+}
+
+static void sde_hdmi_tx_hdcp_cb_work(struct work_struct *work)
+{
+	struct sde_hdmi *hdmi_ctrl = NULL;
+	struct delayed_work *dw = to_delayed_work(work);
+	int rc = 0;
+	struct hdmi *hdmi;
+
+	hdmi_ctrl = container_of(dw, struct sde_hdmi, hdcp_cb_work);
+	if (!hdmi_ctrl) {
+		DEV_DBG("%s: invalid input\n", __func__);
+		return;
+	}
+
+	hdmi = hdmi_ctrl->ctrl.ctrl;
+
+	switch (hdmi_ctrl->hdcp_status) {
+	case HDCP_STATE_AUTHENTICATED:
+		hdmi_ctrl->auth_state = true;
+
+		if (sde_hdmi_tx_is_panel_on(hdmi_ctrl) &&
+			sde_hdmi_tx_is_stream_shareable(hdmi_ctrl)) {
+			rc = sde_hdmi_config_avmute(hdmi, false);
+		}
+
+		if (hdmi_ctrl->hdcp1_use_sw_keys &&
+			hdmi_ctrl->hdcp14_present) {
+			if (!hdmi_ctrl->hdcp22_present)
+				hdcp1_set_enc(true);
+		}
+		break;
+	case HDCP_STATE_AUTH_FAIL:
+		if (hdmi_ctrl->hdcp1_use_sw_keys && hdmi_ctrl->hdcp14_present) {
+			if (hdmi_ctrl->auth_state && !hdmi_ctrl->hdcp22_present)
+				hdcp1_set_enc(false);
+		}
+
+		hdmi_ctrl->auth_state = false;
+
+		if (sde_hdmi_tx_is_panel_on(hdmi_ctrl)) {
+			pr_debug("%s: Reauthenticating\n", __func__);
+			if (hdmi_ctrl->hdcp_ops && hdmi_ctrl->hdcp_data) {
+				rc = hdmi_ctrl->hdcp_ops->reauthenticate(
+					 hdmi_ctrl->hdcp_data);
+				if (rc)
+					pr_err("%s: HDCP reauth failed. rc=%d\n",
+						   __func__, rc);
+			} else
+				pr_err("%s: NULL HDCP Ops and Data\n",
+					   __func__);
+		} else {
+			pr_debug("%s: Not reauthenticating. Cable not conn\n",
+					 __func__);
+		}
+
+		break;
+	case HDCP_STATE_AUTH_FAIL_NOREAUTH:
+		if (hdmi_ctrl->hdcp1_use_sw_keys && hdmi_ctrl->hdcp14_present) {
+			if (hdmi_ctrl->auth_state && !hdmi_ctrl->hdcp22_present)
+				hdcp1_set_enc(false);
+		}
+
+		hdmi_ctrl->auth_state = false;
+
+		break;
+	case HDCP_STATE_AUTH_ENC_NONE:
+		hdmi_ctrl->enc_lvl = HDCP_STATE_AUTH_ENC_NONE;
+		if (sde_hdmi_tx_is_panel_on(hdmi_ctrl))
+			rc = sde_hdmi_config_avmute(hdmi, false);
+		break;
+	case HDCP_STATE_AUTH_ENC_1X:
+	case HDCP_STATE_AUTH_ENC_2P2:
+		hdmi_ctrl->enc_lvl = hdmi_ctrl->hdcp_status;
+		if (sde_hdmi_tx_is_panel_on(hdmi_ctrl) &&
+			sde_hdmi_tx_is_stream_shareable(hdmi_ctrl)) {
+			rc = sde_hdmi_config_avmute(hdmi, false);
+		} else {
+			rc = sde_hdmi_config_avmute(hdmi, true);
+		}
+		break;
+	default:
+		break;
+		/* do nothing */
+	}
+}
+
+/**
+ * _sde_hdmi_update_pll_delta() - Update the HDMI pixel clock as per input ppm
+ *
+ * @ppm: ppm is parts per million multiplied by 1000.
+ * return: 0 on success, non-zero in case of failure.
+ *
+ * The input ppm will be clipped if it's more than or less than 5% of the TMDS
+ * clock rate defined by HDMI spec.
+ */
+static int _sde_hdmi_update_pll_delta(struct sde_hdmi *display, s32 ppm)
+{
+	struct hdmi *hdmi = display->ctrl.ctrl;
+	u64 cur_pclk, dst_pclk;
+	u64 clip_pclk;
+	int rc = 0;
+
+	mutex_lock(&display->display_lock);
+
+	if (!hdmi->power_on || !display->connected) {
+		SDE_ERROR("HDMI display is not ready\n");
+		mutex_unlock(&display->display_lock);
+		return -EINVAL;
+	}
+
+	if (!display->pll_update_enable) {
+		SDE_ERROR("PLL update function is not enabled\n");
+		mutex_unlock(&display->display_lock);
+		return -EINVAL;
+	}
+
+	/* get current pclk */
+	cur_pclk = hdmi->actual_pixclock;
+	/* get desired pclk */
+	dst_pclk = cur_pclk * (1000000000 + ppm);
+	do_div(dst_pclk, 1000000000);
+
+	clip_pclk = _sde_hdmi_clip_valid_pclk(hdmi, dst_pclk);
+
+	/* update pclk */
+	if (clip_pclk != cur_pclk) {
+		SDE_DEBUG("PCLK changes from %llu to %llu when delta is %d\n",
+				cur_pclk, clip_pclk, ppm);
+
+		rc = clk_set_rate(hdmi->pwr_clks[0], clip_pclk);
+		if (rc < 0) {
+			SDE_ERROR("HDMI PLL update failed\n");
+			mutex_unlock(&display->display_lock);
+			return rc;
+		}
+
+		hdmi->actual_pixclock = clip_pclk;
+	}
+
+	mutex_unlock(&display->display_lock);
+
+	return rc;
+}
+
+static ssize_t _sde_hdmi_debugfs_pll_delta_write(struct file *file,
+		    const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct sde_hdmi *display = file->private_data;
+	char buf[10];
+	int ppm = 0;
+
+	if (!display)
+		return -ENODEV;
+
+	if (count >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+
+	buf[count] = 0;	/* end of string */
+
+	if (kstrtoint(buf, 0, &ppm))
+		return -EFAULT;
+
+	if (ppm)
+		_sde_hdmi_update_pll_delta(display, ppm);
+
+	return count;
+}
+
+static const struct file_operations pll_delta_fops = {
+	.open = simple_open,
+	.write = _sde_hdmi_debugfs_pll_delta_write,
+};
+
+/**
+ * _sde_hdmi_enable_pll_update() - Enable the HDMI PLL update function
+ *
+ * @enable: non-zero to enable PLL update function, 0 to disable.
+ * return: 0 on success, non-zero in case of failure.
+ *
+ */
+static int _sde_hdmi_enable_pll_update(struct sde_hdmi *display, s32 enable)
+{
+	struct hdmi *hdmi = display->ctrl.ctrl;
+	int rc = 0;
+
+	mutex_lock(&display->display_lock);
+
+	if (!hdmi->power_on || !display->connected) {
+		SDE_ERROR("HDMI display is not ready\n");
+		mutex_unlock(&display->display_lock);
+		return -EINVAL;
+	}
+
+	if (!enable && hdmi->actual_pixclock != hdmi->pixclock) {
+		/* reset pixel clock when disable */
+		rc = clk_set_rate(hdmi->pwr_clks[0], hdmi->pixclock);
+		if (rc < 0) {
+			SDE_ERROR("reset clock rate failed\n");
+			mutex_unlock(&display->display_lock);
+			return rc;
+		}
+	}
+	hdmi->actual_pixclock = hdmi->pixclock;
+
+	display->pll_update_enable = !!enable;
+
+	mutex_unlock(&display->display_lock);
+
+	SDE_DEBUG("HDMI PLL update: %s\n",
+			display->pll_update_enable ? "enable" : "disable");
+
+	return rc;
+}
+
+static ssize_t _sde_hdmi_debugfs_pll_enable_read(struct file *file,
+		char __user *buff, size_t count, loff_t *ppos)
+{
+	struct sde_hdmi *display = file->private_data;
+	char *buf;
+	u32 len = 0;
+
+	if (!display)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0;
+
+	buf = kzalloc(SZ_1K, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	len += snprintf(buf, SZ_4K, "%s\n",
+			display->pll_update_enable ? "enable" : "disable");
+
+	if (copy_to_user(buff, buf, len)) {
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	*ppos += len;
+
+	kfree(buf);
+	return len;
+}
+
+static ssize_t _sde_hdmi_debugfs_pll_enable_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct sde_hdmi *display = file->private_data;
+	char buf[10];
+	int enable = 0;
+
+	if (!display)
+		return -ENODEV;
+
+	if (count >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+
+	buf[count] = 0; /* end of string */
+
+	if (kstrtoint(buf, 0, &enable))
+		return -EFAULT;
+
+	_sde_hdmi_enable_pll_update(display, enable);
+
+	return count;
+}
+
+static const struct file_operations pll_enable_fops = {
+	.open = simple_open,
+	.read = _sde_hdmi_debugfs_pll_enable_read,
+	.write = _sde_hdmi_debugfs_pll_enable_write,
+};
+
+static int _sde_hdmi_debugfs_init(struct sde_hdmi *display)
+{
+	int rc = 0;
+	struct dentry *dir, *dump_file, *edid_modes;
+	struct dentry *edid_vsdb_info, *edid_hdr_info, *edid_hfvsdb_info;
+	struct dentry *edid_vcdb_info, *edid_vendor_name;
+	struct dentry *src_hdcp14_support, *src_hdcp22_support;
+	struct dentry *sink_hdcp22_support, *hdmi_hdcp_state;
+	struct dentry *pll_delta_file, *pll_enable_file;
+
+	dir = debugfs_create_dir(display->name, NULL);
+	if (!dir) {
+		rc = -ENOMEM;
+		SDE_ERROR("[%s]debugfs create dir failed, rc = %d\n",
+			display->name, rc);
+		goto error;
+	}
+
+	dump_file = debugfs_create_file("dump_info",
+					0444,
+					dir,
+					display,
+					&dump_info_fops);
+	if (IS_ERR_OR_NULL(dump_file)) {
+		rc = PTR_ERR(dump_file);
+		SDE_ERROR("[%s]debugfs create dump_info file failed, rc=%d\n",
+		       display->name, rc);
+		goto error_remove_dir;
+	}
+
+	pll_delta_file = debugfs_create_file("pll_delta",
+					0644,
+					dir,
+					display,
+					&pll_delta_fops);
+	if (IS_ERR_OR_NULL(pll_delta_file)) {
+		rc = PTR_ERR(pll_delta_file);
+		SDE_ERROR("[%s]debugfs create pll_delta file failed, rc=%d\n",
+		       display->name, rc);
+		goto error_remove_dir;
+	}
+
+	pll_enable_file = debugfs_create_file("pll_enable",
+					0644,
+					dir,
+					display,
+					&pll_enable_fops);
+	if (IS_ERR_OR_NULL(pll_enable_file)) {
+		rc = PTR_ERR(pll_enable_file);
+		SDE_ERROR("[%s]debugfs create pll_enable file failed, rc=%d\n",
+		       display->name, rc);
+		goto error_remove_dir;
+	}
+
+	edid_modes = debugfs_create_file("edid_modes",
+					0444,
+					dir,
+					display,
+					&edid_modes_fops);
+
+	if (IS_ERR_OR_NULL(edid_modes)) {
+		rc = PTR_ERR(edid_modes);
+		SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
+		       display->name, rc);
+		goto error_remove_dir;
+	}
+
+	edid_vsdb_info = debugfs_create_file("edid_vsdb_info",
+						0444,
+						dir,
+						display,
+						&edid_vsdb_info_fops);
+
+	if (IS_ERR_OR_NULL(edid_vsdb_info)) {
+		rc = PTR_ERR(edid_vsdb_info);
+		SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
+		       display->name, rc);
+		goto error_remove_dir;
+	}
+
+	edid_hdr_info = debugfs_create_file("edid_hdr_info",
+						0444,
+						dir,
+						display,
+						&edid_hdr_info_fops);
+	if (IS_ERR_OR_NULL(edid_hdr_info)) {
+		rc = PTR_ERR(edid_hdr_info);
+		SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
+		       display->name, rc);
+		goto error_remove_dir;
+	}
+
+	edid_hfvsdb_info = debugfs_create_file("edid_hfvsdb_info",
+						0444,
+						dir,
+						display,
+						&edid_hfvsdb_info_fops);
+
+	if (IS_ERR_OR_NULL(edid_hfvsdb_info)) {
+		rc = PTR_ERR(edid_hfvsdb_info);
+		SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
+		       display->name, rc);
+		goto error_remove_dir;
+	}
+
+	edid_vcdb_info = debugfs_create_file("edid_vcdb_info",
+						0444,
+						dir,
+						display,
+						&edid_vcdb_info_fops);
+
+	if (IS_ERR_OR_NULL(edid_vcdb_info)) {
+		rc = PTR_ERR(edid_vcdb_info);
+		SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
+		       display->name, rc);
+		goto error_remove_dir;
+	}
+
+	edid_vendor_name = debugfs_create_file("edid_vendor_name",
+						0444,
+						dir,
+						display,
+						&edid_vendor_name_fops);
+
+	if (IS_ERR_OR_NULL(edid_vendor_name)) {
+		rc = PTR_ERR(edid_vendor_name);
+		SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
+		       display->name, rc);
+		goto error_remove_dir;
+	}
+
+	src_hdcp14_support = debugfs_create_file("src_hdcp14_support",
+						0444,
+						dir,
+						display,
+						&hdcp_src_14_support_fops);
+
+	if (IS_ERR_OR_NULL(src_hdcp14_support)) {
+		rc = PTR_ERR(src_hdcp14_support);
+		SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
+		       display->name, rc);
+		goto error_remove_dir;
+	}
+
+	src_hdcp22_support = debugfs_create_file("src_hdcp22_support",
+						0444,
+						dir,
+						display,
+						&hdcp_src_22_support_fops);
+
+	if (IS_ERR_OR_NULL(src_hdcp22_support)) {
+		rc = PTR_ERR(src_hdcp22_support);
+		SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
+		       display->name, rc);
+		goto error_remove_dir;
+	}
+
+	sink_hdcp22_support = debugfs_create_file("sink_hdcp22_support",
+						0444,
+						dir,
+						display,
+						&hdcp_sink_22_support_fops);
+
+	if (IS_ERR_OR_NULL(sink_hdcp22_support)) {
+		rc = PTR_ERR(sink_hdcp22_support);
+		SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
+		       display->name, rc);
+		goto error_remove_dir;
+	}
+
+	hdmi_hdcp_state = debugfs_create_file("hdmi_hdcp_state",
+						0444,
+						dir,
+						display,
+						&sde_hdmi_hdcp_state_fops);
+
+	if (IS_ERR_OR_NULL(hdmi_hdcp_state)) {
+		rc = PTR_ERR(hdmi_hdcp_state);
+		SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
+		       display->name, rc);
+		goto error_remove_dir;
+	}
+
+	display->root = dir;
+	return rc;
+error_remove_dir:
+	debugfs_remove(dir);
+error:
+	return rc;
+}
+
+static void _sde_hdmi_debugfs_deinit(struct sde_hdmi *display)
+{
+	debugfs_remove(display->root);
+}
+
+static void _sde_hdmi_phy_reset(struct hdmi *hdmi)
+{
+	unsigned int val;
+
+	val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
+
+	if (val & HDMI_PHY_CTRL_SW_RESET_LOW)
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val & ~HDMI_PHY_CTRL_SW_RESET);
+	 else
+		 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val | HDMI_PHY_CTRL_SW_RESET);
+
+	if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW)
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+	else
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val | HDMI_PHY_CTRL_SW_RESET_PLL);
+
+	if (val & HDMI_PHY_CTRL_SW_RESET_LOW)
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val | HDMI_PHY_CTRL_SW_RESET);
+	 else
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val & ~HDMI_PHY_CTRL_SW_RESET);
+
+	if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW)
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val | HDMI_PHY_CTRL_SW_RESET_PLL);
+	else
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+}
+
+static int _sde_hdmi_gpio_config(struct hdmi *hdmi, bool on)
+{
+	const struct hdmi_platform_config *config = hdmi->config;
+	int ret;
+
+	if (on) {
+		if (config->ddc_clk_gpio != -1) {
+			ret = gpio_request(config->ddc_clk_gpio,
+				"HDMI_DDC_CLK");
+			if (ret) {
+				SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+					"HDMI_DDC_CLK", config->ddc_clk_gpio,
+					ret);
+				goto error_ddc_clk_gpio;
+			}
+			gpio_set_value_cansleep(config->ddc_clk_gpio, 1);
+		}
+
+		if (config->ddc_data_gpio != -1) {
+			ret = gpio_request(config->ddc_data_gpio,
+				"HDMI_DDC_DATA");
+			if (ret) {
+				SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+					"HDMI_DDC_DATA", config->ddc_data_gpio,
+					ret);
+				goto error_ddc_data_gpio;
+			}
+			gpio_set_value_cansleep(config->ddc_data_gpio, 1);
+		}
+
+		ret = gpio_request(config->hpd_gpio, "HDMI_HPD");
+		if (ret) {
+			SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+				"HDMI_HPD", config->hpd_gpio, ret);
+			goto error_hpd_gpio;
+		}
+		gpio_direction_output(config->hpd_gpio, 1);
+		if (config->hpd5v_gpio != -1) {
+			ret = gpio_request(config->hpd5v_gpio, "HDMI_HPD_5V");
+			if (ret) {
+				SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+						  "HDMI_HPD_5V",
+						  config->hpd5v_gpio,
+						  ret);
+				goto error_hpd5v_gpio;
+			}
+			gpio_set_value_cansleep(config->hpd5v_gpio, 1);
+		}
+
+		if (config->mux_en_gpio != -1) {
+			ret = gpio_request(config->mux_en_gpio, "HDMI_MUX_EN");
+			if (ret) {
+				SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+					"HDMI_MUX_EN", config->mux_en_gpio,
+					ret);
+				goto error_en_gpio;
+			}
+			gpio_set_value_cansleep(config->mux_en_gpio, 1);
+		}
+
+		if (config->mux_sel_gpio != -1) {
+			ret = gpio_request(config->mux_sel_gpio,
+				"HDMI_MUX_SEL");
+			if (ret) {
+				SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+					"HDMI_MUX_SEL", config->mux_sel_gpio,
+					ret);
+				goto error_sel_gpio;
+			}
+			gpio_set_value_cansleep(config->mux_sel_gpio, 0);
+		}
+
+		if (config->mux_lpm_gpio != -1) {
+			ret = gpio_request(config->mux_lpm_gpio,
+					"HDMI_MUX_LPM");
+			if (ret) {
+				SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+					"HDMI_MUX_LPM",
+					config->mux_lpm_gpio, ret);
+				goto error_lpm_gpio;
+			}
+			gpio_set_value_cansleep(config->mux_lpm_gpio, 1);
+		}
+		SDE_DEBUG("gpio on");
+	} else {
+		if (config->ddc_clk_gpio != -1)
+			gpio_free(config->ddc_clk_gpio);
+
+		if (config->ddc_data_gpio != -1)
+			gpio_free(config->ddc_data_gpio);
+
+		gpio_free(config->hpd_gpio);
+
+		if (config->hpd5v_gpio != -1)
+			gpio_free(config->hpd5v_gpio);
+
+		if (config->mux_en_gpio != -1) {
+			gpio_set_value_cansleep(config->mux_en_gpio, 0);
+			gpio_free(config->mux_en_gpio);
+		}
+
+		if (config->mux_sel_gpio != -1) {
+			gpio_set_value_cansleep(config->mux_sel_gpio, 1);
+			gpio_free(config->mux_sel_gpio);
+		}
+
+		if (config->mux_lpm_gpio != -1) {
+			gpio_set_value_cansleep(config->mux_lpm_gpio, 0);
+			gpio_free(config->mux_lpm_gpio);
+		}
+		SDE_DEBUG("gpio off");
+	}
+
+	return 0;
+
+error_lpm_gpio:
+	if (config->mux_sel_gpio != -1)
+		gpio_free(config->mux_sel_gpio);
+error_sel_gpio:
+	if (config->mux_en_gpio != -1)
+		gpio_free(config->mux_en_gpio);
+error_en_gpio:
+	gpio_free(config->hpd5v_gpio);
+error_hpd5v_gpio:
+	gpio_free(config->hpd_gpio);
+error_hpd_gpio:
+	if (config->ddc_data_gpio != -1)
+		gpio_free(config->ddc_data_gpio);
+error_ddc_data_gpio:
+	if (config->ddc_clk_gpio != -1)
+		gpio_free(config->ddc_clk_gpio);
+error_ddc_clk_gpio:
+	return ret;
+}
+
+static int _sde_hdmi_hpd_enable(struct sde_hdmi *sde_hdmi)
+{
+	struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
+	const struct hdmi_platform_config *config = hdmi->config;
+	struct device *dev = &hdmi->pdev->dev;
+	uint32_t hpd_ctrl;
+	int i, ret;
+	unsigned long flags;
+	struct drm_connector *connector;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+
+	connector = hdmi->connector;
+	priv = connector->dev->dev_private;
+	sde_kms = to_sde_kms(priv->kms);
+
+	for (i = 0; i < config->hpd_reg_cnt; i++) {
+		ret = regulator_enable(hdmi->hpd_regs[i]);
+		if (ret) {
+			SDE_ERROR("failed to enable hpd regulator: %s (%d)\n",
+					config->hpd_reg_names[i], ret);
+			goto fail;
+		}
+	}
+
+	ret = pinctrl_pm_select_default_state(dev);
+	if (ret) {
+		SDE_ERROR("pinctrl state chg failed: %d\n", ret);
+		goto fail;
+	}
+
+	ret = _sde_hdmi_gpio_config(hdmi, true);
+	if (ret) {
+		SDE_ERROR("failed to configure GPIOs: %d\n", ret);
+		goto fail;
+	}
+
+	for (i = 0; i < config->hpd_clk_cnt; i++) {
+		if (config->hpd_freq && config->hpd_freq[i]) {
+			ret = clk_set_rate(hdmi->hpd_clks[i],
+					config->hpd_freq[i]);
+			if (ret)
+				pr_warn("failed to set clk %s (%d)\n",
+						config->hpd_clk_names[i], ret);
+		}
+
+		ret = clk_prepare_enable(hdmi->hpd_clks[i]);
+		if (ret) {
+			SDE_ERROR("failed to enable hpd clk: %s (%d)\n",
+					config->hpd_clk_names[i], ret);
+			goto fail;
+		}
+	}
+
+	if (!sde_kms->splash_info.handoff) {
+		sde_hdmi_set_mode(hdmi, false);
+		_sde_hdmi_phy_reset(hdmi);
+		sde_hdmi_set_mode(hdmi, true);
+	}
+
+	hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b);
+
+	/* set timeout to 4.1ms (max) for hardware debounce */
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	hpd_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+	hpd_ctrl |= HDMI_HPD_CTRL_TIMEOUT(0x1fff);
+
+	hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
+			HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
+
+	/* enable HPD events: */
+	hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
+			HDMI_HPD_INT_CTRL_INT_CONNECT |
+			HDMI_HPD_INT_CTRL_INT_EN);
+
+	/* Toggle HPD circuit to trigger HPD sense */
+	hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
+			~HDMI_HPD_CTRL_ENABLE & hpd_ctrl);
+	hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
+			HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	hdmi->hpd_off = false;
+	SDE_DEBUG("enabled hdmi hpd\n");
+	return 0;
+
+fail:
+	return ret;
+}
+
+static void _sde_hdmi_hpd_disable(struct sde_hdmi *sde_hdmi)
+{
+	struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
+	const struct hdmi_platform_config *config = hdmi->config;
+	struct device *dev = &hdmi->pdev->dev;
+	int i, ret = 0;
+
+	if (hdmi->hpd_off) {
+		pr_warn("hdmi display hpd was already disabled\n");
+		return;
+	}
+
+	/* Disable HPD interrupt */
+	hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0);
+
+	sde_hdmi_set_mode(hdmi, false);
+
+	for (i = 0; i < config->hpd_clk_cnt; i++)
+		clk_disable_unprepare(hdmi->hpd_clks[i]);
+
+	ret = _sde_hdmi_gpio_config(hdmi, false);
+	if (ret)
+		pr_warn("failed to unconfigure GPIOs: %d\n", ret);
+
+	ret = pinctrl_pm_select_sleep_state(dev);
+	if (ret)
+		pr_warn("pinctrl state chg failed: %d\n", ret);
+
+	for (i = 0; i < config->hpd_reg_cnt; i++) {
+		ret = regulator_disable(hdmi->hpd_regs[i]);
+		if (ret)
+			pr_warn("failed to disable hpd regulator: %s (%d)\n",
+					config->hpd_reg_names[i], ret);
+	}
+	hdmi->hpd_off = true;
+	SDE_DEBUG("disabled hdmi hpd\n");
+}
+
+/**
+ * _sde_hdmi_update_hpd_state() - Update the HDMI HPD clock state
+ *
+ * @state: non-zero to disbale HPD clock, 0 to enable.
+ * return: 0 on success, non-zero in case of failure.
+ *
+ */
+static int
+_sde_hdmi_update_hpd_state(struct sde_hdmi *hdmi_display, u64 state)
+{
+	struct hdmi *hdmi = hdmi_display->ctrl.ctrl;
+	int rc = 0;
+
+	if (hdmi_display->non_pluggable)
+		return 0;
+
+	SDE_DEBUG("changing hdmi hpd state to %llu\n", state);
+
+	if (state == SDE_MODE_HPD_ON) {
+		if (!hdmi->hpd_off)
+			pr_warn("hdmi display hpd was already enabled\n");
+		rc = _sde_hdmi_hpd_enable(hdmi_display);
+	} else
+		_sde_hdmi_hpd_disable(hdmi_display);
+
+	return rc;
+}
+
+static void _sde_hdmi_cec_update_phys_addr(struct sde_hdmi *display)
+{
+	struct edid *edid = display->edid_ctrl->edid;
+
+	if (edid)
+		cec_notifier_set_phys_addr_from_edid(display->notifier, edid);
+	else
+		cec_notifier_set_phys_addr(display->notifier,
+			CEC_PHYS_ADDR_INVALID);
+
+}
+
+static void _sde_hdmi_init_ddc(struct sde_hdmi *display, struct hdmi *hdmi)
+{
+	display->ddc_ctrl.io = &display->io[HDMI_TX_CORE_IO];
+	init_completion(&display->ddc_ctrl.rx_status_done);
+}
+
+static void _sde_hdmi_map_regs(struct sde_hdmi *display, struct hdmi *hdmi)
+{
+	display->io[HDMI_TX_CORE_IO].base = hdmi->mmio;
+	display->io[HDMI_TX_CORE_IO].len = hdmi->mmio_len;
+	display->io[HDMI_TX_QFPROM_IO].base = hdmi->qfprom_mmio;
+	display->io[HDMI_TX_QFPROM_IO].len = hdmi->qfprom_mmio_len;
+	display->io[HDMI_TX_HDCP_IO].base = hdmi->hdcp_mmio;
+	display->io[HDMI_TX_HDCP_IO].len = hdmi->hdcp_mmio_len;
+}
+
+static void _sde_hdmi_hotplug_work(struct work_struct *work)
+{
+	struct sde_hdmi *sde_hdmi =
+		container_of(work, struct sde_hdmi, hpd_work);
+	struct drm_connector *connector;
+	struct hdmi *hdmi = NULL;
+	u32 hdmi_ctrl;
+
+	if (!sde_hdmi || !sde_hdmi->ctrl.ctrl ||
+		!sde_hdmi->ctrl.ctrl->connector ||
+		!sde_hdmi->edid_ctrl) {
+		SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n",
+				sde_hdmi);
+		return;
+	}
+	hdmi = sde_hdmi->ctrl.ctrl;
+	connector = sde_hdmi->ctrl.ctrl->connector;
+
+	if (sde_hdmi->connected) {
+		hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
+		hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE);
+		sde_get_edid(connector, hdmi->i2c,
+		(void **)&sde_hdmi->edid_ctrl);
+		hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
+		hdmi->hdmi_mode = sde_detect_hdmi_monitor(sde_hdmi->edid_ctrl);
+	} else
+		sde_free_edid((void **)&sde_hdmi->edid_ctrl);
+
+	drm_helper_hpd_irq_event(connector->dev);
+	_sde_hdmi_cec_update_phys_addr(sde_hdmi);
+}
+
+static void _sde_hdmi_connector_irq(struct sde_hdmi *sde_hdmi)
+{
+	struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
+	uint32_t hpd_int_status, hpd_int_ctrl;
+
+	/* Process HPD: */
+	hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
+	hpd_int_ctrl   = hdmi_read(hdmi, REG_HDMI_HPD_INT_CTRL);
+
+	if ((hpd_int_ctrl & HDMI_HPD_INT_CTRL_INT_EN) &&
+			(hpd_int_status & HDMI_HPD_INT_STATUS_INT)) {
+		sde_hdmi->connected = !!(hpd_int_status &
+					HDMI_HPD_INT_STATUS_CABLE_DETECTED);
+		/* ack & disable (temporarily) HPD events: */
+		hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
+			HDMI_HPD_INT_CTRL_INT_ACK);
+
+		SDE_HDMI_DEBUG("status=%04x, ctrl=%04x", hpd_int_status,
+				hpd_int_ctrl);
+
+		/* detect disconnect if we are connected or visa versa: */
+		hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
+		if (!sde_hdmi->connected)
+			hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
+		hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
+
+		queue_work(hdmi->workq, &sde_hdmi->hpd_work);
+	}
+}
+
+static void _sde_hdmi_cec_irq(struct sde_hdmi *sde_hdmi)
+{
+	struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
+	u32 cec_intr = hdmi_read(hdmi, REG_HDMI_CEC_INT);
+
+	/* Routing interrupt to external CEC drivers */
+	if (cec_intr)
+		generic_handle_irq(irq_find_mapping(
+				sde_hdmi->irq_domain, 1));
+}
+
+
+static irqreturn_t _sde_hdmi_irq(int irq, void *dev_id)
+{
+	struct sde_hdmi *display = dev_id;
+	struct hdmi *hdmi;
+
+	if (!display || !display->ctrl.ctrl) {
+		SDE_ERROR("sde_hdmi=%pK or hdmi is NULL\n", display);
+		return IRQ_NONE;
+	}
+
+	hdmi = display->ctrl.ctrl;
+	/* Process HPD: */
+	_sde_hdmi_connector_irq(display);
+
+	/* Process Scrambling ISR */
+	sde_hdmi_ddc_scrambling_isr((void *)display);
+
+	/* Process DDC2 */
+	sde_hdmi_ddc_hdcp2p2_isr((void *)display);
+
+	/* Process DDC: */
+	hdmi_i2c_irq(hdmi->i2c);
+
+	/* Process HDCP: */
+	if (display->hdcp_ops && display->hdcp_data) {
+		if (display->hdcp_ops->isr) {
+			if (display->hdcp_ops->isr(
+				display->hdcp_data))
+				DEV_ERR("%s: hdcp_1x_isr failed\n",
+						__func__);
+		}
+	}
+
+	/* Process CEC: */
+	_sde_hdmi_cec_irq(display);
+
+	return IRQ_HANDLED;
+}
+
+static int _sde_hdmi_audio_info_setup(struct platform_device *pdev,
+	struct msm_ext_disp_audio_setup_params *params)
+{
+	int rc = -EPERM;
+	struct sde_hdmi *display = NULL;
+	struct hdmi *hdmi = NULL;
+
+	display = platform_get_drvdata(pdev);
+
+	if (!display || !params) {
+		SDE_ERROR("invalid param(s), display %pK, params %pK\n",
+				display, params);
+		return -ENODEV;
+	}
+
+	hdmi = display->ctrl.ctrl;
+
+	if (hdmi->hdmi_mode)
+		rc = sde_hdmi_audio_on(hdmi, params);
+
+	return rc;
+}
+
+static int _sde_hdmi_get_audio_edid_blk(struct platform_device *pdev,
+	struct msm_ext_disp_audio_edid_blk *blk)
+{
+	struct sde_hdmi *display = platform_get_drvdata(pdev);
+
+	if (!display || !blk) {
+		SDE_ERROR("invalid param(s), display %pK, blk %pK\n",
+			display, blk);
+		return -ENODEV;
+	}
+
+	blk->audio_data_blk = display->edid_ctrl->audio_data_block;
+	blk->audio_data_blk_size = display->edid_ctrl->adb_size;
+
+	blk->spk_alloc_data_blk = display->edid_ctrl->spkr_alloc_data_block;
+	blk->spk_alloc_data_blk_size = display->edid_ctrl->sadb_size;
+
+	return 0;
+}
+
+static int _sde_hdmi_get_cable_status(struct platform_device *pdev, u32 vote)
+{
+	struct sde_hdmi *display = NULL;
+	struct hdmi *hdmi = NULL;
+
+	display = platform_get_drvdata(pdev);
+
+	if (!display) {
+		SDE_ERROR("invalid param(s), display %pK\n", display);
+		return -ENODEV;
+	}
+
+	hdmi = display->ctrl.ctrl;
+
+	return hdmi->power_on && display->connected;
+}
+
+static void _sde_hdmi_audio_codec_ready(struct platform_device *pdev)
+{
+	struct sde_hdmi *display = platform_get_drvdata(pdev);
+
+	if (!display) {
+		SDE_ERROR("invalid param(s), display %pK\n", display);
+		return;
+	}
+
+	mutex_lock(&display->display_lock);
+	if (!display->codec_ready) {
+		display->codec_ready = true;
+
+		if (display->client_notify_pending)
+			sde_hdmi_notify_clients(display, display->connected);
+	}
+	mutex_unlock(&display->display_lock);
+}
+
+static int _sde_hdmi_ext_disp_init(struct sde_hdmi *display)
+{
+	int rc = 0;
+	struct device_node *pd_np;
+	const char *phandle = "qcom,msm_ext_disp";
+
+	if (!display) {
+		SDE_ERROR("[%s]Invalid params\n", display->name);
+		return -EINVAL;
+	}
+
+	display->ext_audio_data.type = EXT_DISPLAY_TYPE_HDMI;
+	display->ext_audio_data.pdev = display->pdev;
+	display->ext_audio_data.codec_ops.audio_info_setup =
+		_sde_hdmi_audio_info_setup;
+	display->ext_audio_data.codec_ops.get_audio_edid_blk =
+		_sde_hdmi_get_audio_edid_blk;
+	display->ext_audio_data.codec_ops.cable_status =
+		_sde_hdmi_get_cable_status;
+	display->ext_audio_data.codec_ops.codec_ready =
+		_sde_hdmi_audio_codec_ready;
+
+	if (!display->pdev->dev.of_node) {
+		SDE_ERROR("[%s]cannot find sde_hdmi of_node\n", display->name);
+		return -ENODEV;
+	}
+
+	pd_np = of_parse_phandle(display->pdev->dev.of_node, phandle, 0);
+	if (!pd_np) {
+		SDE_ERROR("[%s]cannot find %s device node\n",
+			display->name, phandle);
+		return -ENODEV;
+	}
+
+	display->ext_pdev = of_find_device_by_node(pd_np);
+	if (!display->ext_pdev) {
+		SDE_ERROR("[%s]cannot find %s platform device\n",
+			display->name, phandle);
+		return -ENODEV;
+	}
+
+	rc = msm_ext_disp_register_intf(display->ext_pdev,
+			&display->ext_audio_data);
+	if (rc)
+		SDE_ERROR("[%s]failed to register disp\n", display->name);
+
+	return rc;
+}
+
+void sde_hdmi_notify_clients(struct sde_hdmi *display, bool connected)
+{
+	int state = connected ?
+		EXT_DISPLAY_CABLE_CONNECT : EXT_DISPLAY_CABLE_DISCONNECT;
+
+	if (display && display->ext_audio_data.intf_ops.hpd) {
+		struct hdmi *hdmi = display->ctrl.ctrl;
+		u32 flags = MSM_EXT_DISP_HPD_ASYNC_VIDEO;
+
+		if (hdmi->hdmi_mode)
+			flags |= MSM_EXT_DISP_HPD_AUDIO;
+
+		display->ext_audio_data.intf_ops.hpd(display->ext_pdev,
+				display->ext_audio_data.type, state, flags);
+	}
+}
+
+void sde_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
+{
+	uint32_t ctrl = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
+	if (power_on) {
+		ctrl |= HDMI_CTRL_ENABLE;
+		if (!hdmi->hdmi_mode) {
+			ctrl |= HDMI_CTRL_HDMI;
+			hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
+			ctrl &= ~HDMI_CTRL_HDMI;
+		} else {
+			ctrl |= HDMI_CTRL_HDMI;
+		}
+	} else {
+		ctrl &= ~HDMI_CTRL_HDMI;
+	}
+
+	hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+	SDE_HDMI_DEBUG("HDMI Core: %s, HDMI_CTRL=0x%08x\n",
+			power_on ? "Enable" : "Disable", ctrl);
+}
+
+#define DDC_WRITE_MAX_BYTE_NUM 32
+
+int sde_hdmi_scdc_read(struct hdmi *hdmi, u32 data_type, u32 *val)
+{
+	int rc = 0;
+	u8 data_buf[2] = {0};
+	u16 dev_addr, data_len;
+	u8 offset;
+
+	if (!hdmi || !hdmi->i2c || !val) {
+		SDE_ERROR("Bad Parameters\n");
+		return -EINVAL;
+	}
+
+	if (data_type >= HDMI_TX_SCDC_MAX) {
+		SDE_ERROR("Unsupported data type\n");
+		return -EINVAL;
+	}
+
+	dev_addr = 0xA8;
+
+	switch (data_type) {
+	case HDMI_TX_SCDC_SCRAMBLING_STATUS:
+		data_len = 1;
+		offset = HDMI_SCDC_SCRAMBLER_STATUS;
+		break;
+	case HDMI_TX_SCDC_SCRAMBLING_ENABLE:
+	case HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE:
+		data_len = 1;
+		offset = HDMI_SCDC_TMDS_CONFIG;
+		break;
+	case HDMI_TX_SCDC_CLOCK_DET_STATUS:
+	case HDMI_TX_SCDC_CH0_LOCK_STATUS:
+	case HDMI_TX_SCDC_CH1_LOCK_STATUS:
+	case HDMI_TX_SCDC_CH2_LOCK_STATUS:
+		data_len = 1;
+		offset = HDMI_SCDC_STATUS_FLAGS_0;
+		break;
+	case HDMI_TX_SCDC_CH0_ERROR_COUNT:
+		data_len = 2;
+		offset = HDMI_SCDC_ERR_DET_0_L;
+		break;
+	case HDMI_TX_SCDC_CH1_ERROR_COUNT:
+		data_len = 2;
+		offset = HDMI_SCDC_ERR_DET_1_L;
+		break;
+	case HDMI_TX_SCDC_CH2_ERROR_COUNT:
+		data_len = 2;
+		offset = HDMI_SCDC_ERR_DET_2_L;
+		break;
+	case HDMI_TX_SCDC_READ_ENABLE:
+		data_len = 1;
+		offset = HDMI_SCDC_CONFIG_0;
+		break;
+	default:
+		break;
+	}
+
+	rc = hdmi_ddc_read(hdmi, dev_addr, offset, data_buf,
+					   data_len, true);
+	if (rc) {
+		SDE_ERROR("DDC Read failed for %d\n", data_type);
+		return rc;
+	}
+
+	switch (data_type) {
+	case HDMI_TX_SCDC_SCRAMBLING_STATUS:
+		*val = (data_buf[0] & BIT(0)) ? 1 : 0;
+		break;
+	case HDMI_TX_SCDC_SCRAMBLING_ENABLE:
+		*val = (data_buf[0] & BIT(0)) ? 1 : 0;
+		break;
+	case HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE:
+		*val = (data_buf[0] & BIT(1)) ? 1 : 0;
+		break;
+	case HDMI_TX_SCDC_CLOCK_DET_STATUS:
+		*val = (data_buf[0] & BIT(0)) ? 1 : 0;
+		break;
+	case HDMI_TX_SCDC_CH0_LOCK_STATUS:
+		*val = (data_buf[0] & BIT(1)) ? 1 : 0;
+		break;
+	case HDMI_TX_SCDC_CH1_LOCK_STATUS:
+		*val = (data_buf[0] & BIT(2)) ? 1 : 0;
+		break;
+	case HDMI_TX_SCDC_CH2_LOCK_STATUS:
+		*val = (data_buf[0] & BIT(3)) ? 1 : 0;
+		break;
+	case HDMI_TX_SCDC_CH0_ERROR_COUNT:
+	case HDMI_TX_SCDC_CH1_ERROR_COUNT:
+	case HDMI_TX_SCDC_CH2_ERROR_COUNT:
+		if (data_buf[1] & BIT(7))
+			*val = (data_buf[0] | ((data_buf[1] & 0x7F) << 8));
+		else
+			*val = 0;
+		break;
+	case HDMI_TX_SCDC_READ_ENABLE:
+		*val = (data_buf[0] & BIT(0)) ? 1 : 0;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+int sde_hdmi_scdc_write(struct hdmi *hdmi, u32 data_type, u32 val)
+{
+	int rc = 0;
+	u8 data_buf[2] = {0};
+	u8 read_val = 0;
+	u16 dev_addr, data_len;
+	u8 offset;
+
+	if (!hdmi || !hdmi->i2c) {
+		SDE_ERROR("Bad Parameters\n");
+		return -EINVAL;
+	}
+
+	if (data_type >= HDMI_TX_SCDC_MAX) {
+		SDE_ERROR("Unsupported data type\n");
+		return -EINVAL;
+	}
+
+	dev_addr = 0xA8;
+
+	switch (data_type) {
+	case HDMI_TX_SCDC_SCRAMBLING_ENABLE:
+	case HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE:
+		dev_addr = 0xA8;
+		data_len = 1;
+		offset = HDMI_SCDC_TMDS_CONFIG;
+		rc = hdmi_ddc_read(hdmi, dev_addr, offset, &read_val,
+						   data_len, true);
+		if (rc) {
+			SDE_ERROR("scdc read failed\n");
+			return rc;
+		}
+		if (data_type == HDMI_TX_SCDC_SCRAMBLING_ENABLE) {
+			data_buf[0] = ((((u8)(read_val & 0xFF)) & (~BIT(0))) |
+						   ((u8)(val & BIT(0))));
+		} else {
+			data_buf[0] = ((((u8)(read_val & 0xFF)) & (~BIT(1))) |
+						   (((u8)(val & BIT(0))) << 1));
+		}
+		break;
+	case HDMI_TX_SCDC_READ_ENABLE:
+		data_len = 1;
+		offset = HDMI_SCDC_CONFIG_0;
+		data_buf[0] = (u8)(val & 0x1);
+		break;
+	default:
+		SDE_ERROR("Cannot write to read only reg (%d)\n",
+				  data_type);
+		return -EINVAL;
+	}
+
+	rc = hdmi_ddc_write(hdmi, dev_addr, offset, data_buf,
+						data_len, true);
+	if (rc) {
+		SDE_ERROR("DDC Read failed for %d\n", data_type);
+		return rc;
+	}
+	return 0;
+}
+
+int sde_hdmi_get_info(struct msm_display_info *info,
+				void *display)
+{
+	int rc = 0;
+	struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display;
+	struct hdmi *hdmi = hdmi_display->ctrl.ctrl;
+
+	if (!display || !info) {
+		SDE_ERROR("display=%p or info=%p is NULL\n", display, info);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hdmi_display->display_lock);
+
+	info->intf_type = DRM_MODE_CONNECTOR_HDMIA;
+	info->num_of_h_tiles = 1;
+	info->h_tile_instance[0] = 0;
+	if (hdmi_display->non_pluggable) {
+		info->capabilities = MSM_DISPLAY_CAP_VID_MODE;
+		hdmi_display->connected = true;
+		hdmi->hdmi_mode = true;
+	} else {
+		info->capabilities = MSM_DISPLAY_CAP_HOT_PLUG |
+				MSM_DISPLAY_CAP_EDID | MSM_DISPLAY_CAP_VID_MODE;
+	}
+	info->is_connected = hdmi_display->connected;
+	info->max_width = HDMI_DISPLAY_MAX_WIDTH;
+	info->max_height = HDMI_DISPLAY_MAX_HEIGHT;
+	info->compression = MSM_DISPLAY_COMPRESS_NONE;
+
+	mutex_unlock(&hdmi_display->display_lock);
+	return rc;
+}
+
+static void sde_hdmi_panel_set_hdr_infoframe(struct sde_hdmi *display,
+struct drm_msm_ext_panel_hdr_metadata *hdr_meta)
+{
+	u32 packet_payload = 0;
+	u32 packet_header = 0;
+	u32 packet_control = 0;
+	u32 const type_code = 0x87;
+	u32 const version = 0x01;
+	u32 const length = 0x1a;
+	u32 const descriptor_id = 0x00;
+	u8 checksum;
+	struct hdmi *hdmi;
+	struct drm_connector *connector;
+
+	if (!display || !hdr_meta) {
+		SDE_ERROR("invalid input\n");
+		return;
+	}
+
+	hdmi = display->ctrl.ctrl;
+	connector = display->ctrl.ctrl->connector;
+
+	if (!hdmi || !connector) {
+		SDE_ERROR("invalid input\n");
+		return;
+	}
+
+	/* Setup the line number to send the packet on */
+	packet_control = hdmi_read(hdmi, HDMI_GEN_PKT_CTRL);
+	packet_control |= BIT(16);
+	hdmi_write(hdmi, HDMI_GEN_PKT_CTRL, packet_control);
+
+	/* Setup the packet to be sent every frame */
+	packet_control = hdmi_read(hdmi, HDMI_GEN_PKT_CTRL);
+	packet_control |= BIT(1);
+	hdmi_write(hdmi, HDMI_GEN_PKT_CTRL, packet_control);
+
+	/* Setup Packet header and payload */
+	packet_header = type_code | (version << 8) | (length << 16);
+	hdmi_write(hdmi, HDMI_GENERIC0_HDR, packet_header);
+
+	/**
+	 * Checksum is not a mandatory field for
+	 * the HDR infoframe as per CEA-861-3 specification.
+	 * However some HDMI sinks still expect a
+	 * valid checksum to be included as part of
+	 * the infoframe. Hence compute and add
+	 * the checksum to improve sink interoperability
+	 * for our HDR solution on HDMI.
+	 */
+	checksum = sde_hdmi_hdr_set_chksum(hdr_meta);
+
+	packet_payload = (hdr_meta->eotf << 8) | checksum;
+
+	if (connector->hdr_metadata_type_one) {
+		packet_payload |= (descriptor_id << 16)
+			| (HDMI_GET_LSB(hdr_meta->display_primaries_x[0])
+			   << 24);
+		hdmi_write(hdmi, HDMI_GENERIC0_0, packet_payload);
+	} else {
+		pr_debug("Metadata Type 1 not supported\n");
+		hdmi_write(hdmi, HDMI_GENERIC0_0, packet_payload);
+		goto enable_packet_control;
+	}
+
+	packet_payload =
+	(HDMI_GET_MSB(hdr_meta->display_primaries_x[0]))
+	| (HDMI_GET_LSB(hdr_meta->display_primaries_y[0]) << 8)
+	| (HDMI_GET_MSB(hdr_meta->display_primaries_y[0]) << 16)
+	| (HDMI_GET_LSB(hdr_meta->display_primaries_x[1]) << 24);
+	hdmi_write(hdmi, HDMI_GENERIC0_1, packet_payload);
+
+	packet_payload =
+		(HDMI_GET_MSB(hdr_meta->display_primaries_x[1]))
+		| (HDMI_GET_LSB(hdr_meta->display_primaries_y[1]) << 8)
+		| (HDMI_GET_MSB(hdr_meta->display_primaries_y[1]) << 16)
+		| (HDMI_GET_LSB(hdr_meta->display_primaries_x[2]) << 24);
+	hdmi_write(hdmi, HDMI_GENERIC0_2, packet_payload);
+
+	packet_payload =
+		(HDMI_GET_MSB(hdr_meta->display_primaries_x[2]))
+		| (HDMI_GET_LSB(hdr_meta->display_primaries_y[2]) << 8)
+		| (HDMI_GET_MSB(hdr_meta->display_primaries_y[2]) << 16)
+		| (HDMI_GET_LSB(hdr_meta->white_point_x) << 24);
+	hdmi_write(hdmi, HDMI_GENERIC0_3, packet_payload);
+
+	packet_payload =
+		(HDMI_GET_MSB(hdr_meta->white_point_x))
+		| (HDMI_GET_LSB(hdr_meta->white_point_y) << 8)
+		| (HDMI_GET_MSB(hdr_meta->white_point_y) << 16)
+		| (HDMI_GET_LSB(hdr_meta->max_luminance) << 24);
+	hdmi_write(hdmi, HDMI_GENERIC0_4, packet_payload);
+
+	packet_payload =
+		(HDMI_GET_MSB(hdr_meta->max_luminance))
+		| (HDMI_GET_LSB(hdr_meta->min_luminance) << 8)
+		| (HDMI_GET_MSB(hdr_meta->min_luminance) << 16)
+		| (HDMI_GET_LSB(hdr_meta->max_content_light_level) << 24);
+	hdmi_write(hdmi, HDMI_GENERIC0_5, packet_payload);
+
+	packet_payload =
+		(HDMI_GET_MSB(hdr_meta->max_content_light_level))
+		| (HDMI_GET_LSB(hdr_meta->max_average_light_level) << 8)
+		| (HDMI_GET_MSB(hdr_meta->max_average_light_level) << 16);
+	hdmi_write(hdmi, HDMI_GENERIC0_6, packet_payload);
+
+enable_packet_control:
+
+	/* Flush the contents to the register */
+	packet_control = hdmi_read(hdmi, HDMI_GEN_PKT_CTRL);
+	packet_control |= BIT(2);
+	hdmi_write(hdmi, HDMI_GEN_PKT_CTRL, packet_control);
+
+	/* Clear the flush bit of the register */
+	packet_control = hdmi_read(hdmi, HDMI_GEN_PKT_CTRL);
+	packet_control &= ~BIT(2);
+	hdmi_write(hdmi, HDMI_GEN_PKT_CTRL, packet_control);
+
+	/* Start sending the packets*/
+	packet_control = hdmi_read(hdmi, HDMI_GEN_PKT_CTRL);
+	packet_control |= BIT(0);
+	hdmi_write(hdmi, HDMI_GEN_PKT_CTRL, packet_control);
+}
+
+static void sde_hdmi_update_colorimetry(struct sde_hdmi *display,
+	bool use_bt2020)
+{
+	struct hdmi *hdmi;
+	struct drm_connector *connector;
+	bool mode_is_yuv = false;
+	struct drm_display_mode *mode;
+	u32 mode_fmt_flags = 0;
+	u8 checksum;
+	u32 avi_info0 = 0;
+	u32 avi_info1 = 0;
+	u8 avi_iframe[HDMI_AVI_INFOFRAME_BUFFER_SIZE] = {0};
+	u8 *avi_frame = &avi_iframe[HDMI_INFOFRAME_HEADER_SIZE];
+	struct hdmi_avi_infoframe info;
+
+	if (!display) {
+		SDE_ERROR("invalid input\n");
+		return;
+	}
+
+	hdmi = display->ctrl.ctrl;
+
+	if (!hdmi) {
+		SDE_ERROR("invalid input\n");
+		return;
+	}
+
+	connector = display->ctrl.ctrl->connector;
+
+	if (!connector) {
+		SDE_ERROR("invalid input\n");
+		return;
+	}
+
+	if (!connector->hdr_supported) {
+		SDE_DEBUG("HDR is not supported\n");
+		return;
+	}
+
+	/* If sink doesn't support BT2020, just return */
+	if (!(connector->color_enc_fmt & DRM_EDID_COLORIMETRY_BT2020_YCC) ||
+	    !(connector->color_enc_fmt & DRM_EDID_COLORIMETRY_BT2020_RGB)) {
+		SDE_DEBUG("BT2020 colorimetry is not supported\n");
+		return;
+	}
+
+	/* If there is no change in colorimetry, just return */
+	if (use_bt2020 && display->bt2020_colorimetry)
+		return;
+	else if (!use_bt2020 && !display->bt2020_colorimetry)
+		return;
+
+	mode = &display->mode;
+	/* Cache the format flags before clearing */
+	mode_fmt_flags = mode->flags;
+	/**
+	 * Clear the RGB/YUV format flags before calling upstream API
+	 * as the API also compares the flags and then returns a mode
+	 */
+	mode->flags &= ~SDE_DRM_MODE_FLAG_FMT_MASK;
+	drm_hdmi_avi_infoframe_from_display_mode(&info, mode);
+	/* Restore the format flags */
+	mode->flags = mode_fmt_flags;
+
+	/* Mode should only support YUV and not both to set the flag */
+	if ((mode->private_flags & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420)
+	    && !(mode->private_flags & MSM_MODE_FLAG_COLOR_FORMAT_RGB444)) {
+		mode_is_yuv = true;
+	}
+
+
+	if (!display->bt2020_colorimetry && use_bt2020) {
+		/**
+		 * 1. Update colorimetry to use extended
+		 * 2. Change extended to use BT2020
+		 * 3. Change colorspace based on mode
+		 * 4. Use limited as BT2020 is always limited
+		 */
+		info.colorimetry = SDE_HDMI_USE_EXTENDED_COLORIMETRY;
+		info.extended_colorimetry = SDE_HDMI_BT2020_COLORIMETRY;
+		if (mode_is_yuv)
+			info.colorspace = HDMI_COLORSPACE_YUV420;
+		if (connector->yuv_qs)
+			info.ycc_quantization_range =
+				HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
+	} else if (display->bt2020_colorimetry && !use_bt2020) {
+		/**
+		 * 1. Update colorimetry to non-extended
+		 * 2. Change colorspace based on mode
+		 * 3. Restore quantization to full if QS
+		 *	  is enabled
+		 */
+		info.colorimetry = SDE_HDMI_DEFAULT_COLORIMETRY;
+		if (mode_is_yuv)
+			info.colorspace = HDMI_COLORSPACE_YUV420;
+		if (connector->yuv_qs)
+			info.ycc_quantization_range =
+				HDMI_YCC_QUANTIZATION_RANGE_FULL;
+	}
+
+	hdmi_avi_infoframe_pack(&info, avi_iframe, sizeof(avi_iframe));
+	checksum = avi_iframe[HDMI_INFOFRAME_HEADER_SIZE - 1];
+	avi_info0 = checksum |
+		LEFT_SHIFT_BYTE(avi_frame[0]) |
+		LEFT_SHIFT_WORD(avi_frame[1]) |
+		LEFT_SHIFT_24BITS(avi_frame[2]);
+
+	avi_info1 = avi_frame[3] |
+		LEFT_SHIFT_BYTE(avi_frame[4]) |
+		LEFT_SHIFT_WORD(avi_frame[5]) |
+		LEFT_SHIFT_24BITS(avi_frame[6]);
+
+	hdmi_write(hdmi, REG_HDMI_AVI_INFO(0), avi_info0);
+	hdmi_write(hdmi, REG_HDMI_AVI_INFO(1), avi_info1);
+	display->bt2020_colorimetry = use_bt2020;
+}
+
+static void sde_hdmi_clear_hdr_infoframe(struct sde_hdmi *display)
+{
+	struct hdmi *hdmi;
+	struct drm_connector *connector;
+	u32 packet_control = 0;
+
+	if (!display) {
+		SDE_ERROR("invalid input\n");
+		return;
+	}
+
+	hdmi = display->ctrl.ctrl;
+	connector = display->ctrl.ctrl->connector;
+
+	if (!hdmi || !connector) {
+		SDE_ERROR("invalid input\n");
+		return;
+	}
+
+	packet_control = hdmi_read(hdmi, HDMI_GEN_PKT_CTRL);
+	packet_control &= ~HDMI_GEN_PKT_CTRL_CLR_MASK;
+	hdmi_write(hdmi, HDMI_GEN_PKT_CTRL, packet_control);
+}
+
+int sde_hdmi_set_property(struct drm_connector *connector,
+			struct drm_connector_state *state,
+			int property_index,
+			uint64_t value,
+			void *display)
+{
+	int rc = 0;
+
+	if (!connector || !display) {
+		SDE_ERROR("connector=%pK or display=%pK is NULL\n",
+			connector, display);
+		return -EINVAL;
+	}
+
+	SDE_DEBUG("\n");
+
+	if (property_index == CONNECTOR_PROP_PLL_ENABLE)
+		rc = _sde_hdmi_enable_pll_update(display, value);
+	else if (property_index == CONNECTOR_PROP_PLL_DELTA)
+		rc = _sde_hdmi_update_pll_delta(display, value);
+	else if (property_index == CONNECTOR_PROP_HPD_OFF)
+		rc = _sde_hdmi_update_hpd_state(display, value);
+
+	return rc;
+}
+
+int sde_hdmi_get_property(struct drm_connector *connector,
+	struct drm_connector_state *state,
+	int property_index,
+	uint64_t *value,
+	void *display)
+{
+	struct sde_hdmi *hdmi_display = display;
+	int rc = 0;
+
+	if (!connector || !hdmi_display) {
+		SDE_ERROR("connector=%pK or display=%pK is NULL\n",
+			connector, hdmi_display);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hdmi_display->display_lock);
+	if (property_index == CONNECTOR_PROP_PLL_ENABLE)
+		*value = hdmi_display->pll_update_enable ? 1 : 0;
+	if (property_index == CONNECTOR_PROP_HDCP_VERSION)
+		*value = hdmi_display->sink_hdcp_ver;
+	mutex_unlock(&hdmi_display->display_lock);
+
+	return rc;
+}
+
+u32 sde_hdmi_get_num_of_displays(void)
+{
+	u32 count = 0;
+	struct sde_hdmi *display;
+
+	mutex_lock(&sde_hdmi_list_lock);
+
+	list_for_each_entry(display, &sde_hdmi_list, list)
+		count++;
+
+	mutex_unlock(&sde_hdmi_list_lock);
+	return count;
+}
+
+int sde_hdmi_get_displays(void **display_array, u32 max_display_count)
+{
+	struct sde_hdmi *display;
+	int i = 0;
+
+	SDE_DEBUG("\n");
+
+	if (!display_array || !max_display_count) {
+		if (!display_array)
+			SDE_ERROR("invalid param\n");
+		return 0;
+	}
+
+	mutex_lock(&sde_hdmi_list_lock);
+	list_for_each_entry(display, &sde_hdmi_list, list) {
+		if (i >= max_display_count)
+			break;
+		display_array[i++] = display;
+	}
+	mutex_unlock(&sde_hdmi_list_lock);
+
+	return i;
+}
+
+int sde_hdmi_connector_pre_deinit(struct drm_connector *connector,
+		void *display)
+{
+	struct sde_hdmi *sde_hdmi = (struct sde_hdmi *)display;
+
+	if (!sde_hdmi || !sde_hdmi->ctrl.ctrl) {
+		SDE_ERROR("sde_hdmi=%p or hdmi is NULL\n", sde_hdmi);
+		return -EINVAL;
+	}
+
+	_sde_hdmi_hpd_disable(sde_hdmi);
+
+	return 0;
+}
+
+static void _sde_hdmi_get_tx_version(struct sde_hdmi *sde_hdmi)
+{
+	struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
+
+	sde_hdmi->hdmi_tx_version = hdmi_read(hdmi, REG_HDMI_VERSION);
+	sde_hdmi->hdmi_tx_major_version =
+		SDE_GET_MAJOR_VER(sde_hdmi->hdmi_tx_version);
+
+	switch (sde_hdmi->hdmi_tx_major_version) {
+	case (HDMI_TX_VERSION_3):
+		sde_hdmi->max_pclk_khz = HDMI_TX_3_MAX_PCLK_RATE;
+		break;
+	case (HDMI_TX_VERSION_4):
+		sde_hdmi->max_pclk_khz = HDMI_TX_4_MAX_PCLK_RATE;
+		break;
+	default:
+		sde_hdmi->max_pclk_khz = HDMI_DEFAULT_MAX_PCLK_RATE;
+		break;
+	}
+	SDE_DEBUG("sde_hdmi->hdmi_tx_version = 0x%x\n",
+		sde_hdmi->hdmi_tx_version);
+	SDE_DEBUG("sde_hdmi->hdmi_tx_major_version = 0x%x\n",
+		sde_hdmi->hdmi_tx_major_version);
+	SDE_DEBUG("sde_hdmi->max_pclk_khz = 0x%x\n",
+		sde_hdmi->max_pclk_khz);
+}
+
+static int sde_hdmi_tx_check_capability(struct sde_hdmi *sde_hdmi)
+{
+	u32 hdmi_disabled, hdcp_disabled, reg_val;
+	int ret = 0;
+	struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
+
+	/* check if hdmi and hdcp are disabled */
+	if (sde_hdmi->hdmi_tx_major_version < HDMI_TX_VERSION_4) {
+		hdcp_disabled = hdmi_qfprom_read(hdmi,
+		QFPROM_RAW_FEAT_CONFIG_ROW0_LSB) & BIT(31);
+
+		hdmi_disabled = hdmi_qfprom_read(hdmi,
+		QFPROM_RAW_FEAT_CONFIG_ROW0_MSB) & BIT(0);
+	} else {
+		reg_val = hdmi_qfprom_read(hdmi,
+		QFPROM_RAW_FEAT_CONFIG_ROW0_LSB + QFPROM_RAW_VERSION_4);
+		hdcp_disabled = reg_val & BIT(12);
+
+		hdmi_disabled = reg_val & BIT(13);
+
+		reg_val = hdmi_qfprom_read(hdmi, SEC_CTRL_HW_VERSION);
+
+		SDE_DEBUG("SEC_CTRL_HW_VERSION reg_val = 0x%x\n", reg_val);
+		/*
+		 * With HDCP enabled on capable hardware, check if HW
+		 * or SW keys should be used.
+		 */
+		if (!hdcp_disabled && (reg_val >= HDCP_SEL_MIN_SEC_VERSION)) {
+			reg_val = hdmi_qfprom_read(hdmi,
+			QFPROM_RAW_FEAT_CONFIG_ROW0_MSB +
+			QFPROM_RAW_VERSION_4);
+
+			if (!(reg_val & BIT(23)))
+				sde_hdmi->hdcp1_use_sw_keys = true;
+		}
+	}
+
+	if (sde_hdmi->hdmi_tx_major_version >= HDMI_TX_VERSION_4)
+		sde_hdmi->dc_feature_supported = true;
+
+	SDE_DEBUG("%s: Features <HDMI:%s, HDCP:%s, Deep Color:%s>\n", __func__,
+			hdmi_disabled ? "OFF" : "ON",
+			hdcp_disabled ? "OFF" : "ON",
+			sde_hdmi->dc_feature_supported ? "ON" : "OFF");
+
+	if (hdmi_disabled) {
+		DEV_ERR("%s: HDMI disabled\n", __func__);
+		ret = -ENODEV;
+		goto end;
+	}
+
+	sde_hdmi->hdcp14_present = !hdcp_disabled;
+
+ end:
+	return ret;
+} /* hdmi_tx_check_capability */
+
+static int _sde_hdmi_init_hdcp(struct sde_hdmi *hdmi_ctrl)
+{
+	struct sde_hdcp_init_data hdcp_init_data;
+	void *hdcp_data;
+	int rc = 0;
+	struct hdmi *hdmi;
+
+	if (!hdmi_ctrl) {
+		SDE_ERROR("sde_hdmi is NULL\n");
+		return -EINVAL;
+	}
+
+	hdmi = hdmi_ctrl->ctrl.ctrl;
+	hdcp_init_data.phy_addr      = hdmi->mmio_phy_addr;
+	hdcp_init_data.core_io       = &hdmi_ctrl->io[HDMI_TX_CORE_IO];
+	hdcp_init_data.qfprom_io     = &hdmi_ctrl->io[HDMI_TX_QFPROM_IO];
+	hdcp_init_data.hdcp_io       = &hdmi_ctrl->io[HDMI_TX_HDCP_IO];
+	hdcp_init_data.mutex         = &hdmi_ctrl->hdcp_mutex;
+	hdcp_init_data.workq         = hdmi->workq;
+	hdcp_init_data.notify_status = sde_hdmi_tx_hdcp_cb;
+	hdcp_init_data.avmute_sink   = sde_hdmi_tx_set_avmute;
+	hdcp_init_data.cb_data       = (void *)hdmi_ctrl;
+	hdcp_init_data.hdmi_tx_ver   = hdmi_ctrl->hdmi_tx_major_version;
+	hdcp_init_data.sec_access    = true;
+	hdcp_init_data.client_id     = HDCP_CLIENT_HDMI;
+	hdcp_init_data.ddc_ctrl      = &hdmi_ctrl->ddc_ctrl;
+
+	if (hdmi_ctrl->hdcp14_present) {
+		hdcp_data = sde_hdcp_1x_init(&hdcp_init_data);
+
+		if (IS_ERR_OR_NULL(hdcp_data)) {
+			DEV_ERR("%s: hdcp 1.4 init failed\n", __func__);
+			rc = -EINVAL;
+			kfree(hdcp_data);
+			goto end;
+		} else {
+			hdmi_ctrl->hdcp_feat_data[SDE_HDCP_1x] = hdcp_data;
+			SDE_HDMI_DEBUG("%s: HDCP 1.4 initialized\n", __func__);
+		}
+	}
+
+	hdcp_data = sde_hdmi_hdcp2p2_init(&hdcp_init_data);
+
+	if (IS_ERR_OR_NULL(hdcp_data)) {
+		DEV_ERR("%s: hdcp 2.2 init failed\n", __func__);
+		rc = -EINVAL;
+		goto end;
+	} else {
+		hdmi_ctrl->hdcp_feat_data[SDE_HDCP_2P2] = hdcp_data;
+		SDE_HDMI_DEBUG("%s: HDCP 2.2 initialized\n", __func__);
+	}
+
+end:
+	return rc;
+}
+
+int sde_hdmi_connector_post_init(struct drm_connector *connector,
+		void *info,
+		void *display)
+{
+	int rc = 0;
+	struct sde_hdmi *sde_hdmi = (struct sde_hdmi *)display;
+	struct hdmi *hdmi;
+
+	if (!sde_hdmi) {
+		SDE_ERROR("sde_hdmi is NULL\n");
+		return -EINVAL;
+	}
+
+	hdmi = sde_hdmi->ctrl.ctrl;
+	if (!hdmi) {
+		SDE_ERROR("hdmi is NULL\n");
+		return -EINVAL;
+	}
+
+	if (info)
+		sde_kms_info_add_keystr(info,
+				"display type",
+				sde_hdmi->display_type);
+
+	hdmi->connector = connector;
+	INIT_WORK(&sde_hdmi->hpd_work, _sde_hdmi_hotplug_work);
+
+	/* Enable HPD detection */
+	rc = _sde_hdmi_hpd_enable(sde_hdmi);
+	if (rc)
+		SDE_ERROR("failed to enable HPD: %d\n", rc);
+
+	_sde_hdmi_get_tx_version(sde_hdmi);
+
+	sde_hdmi_tx_check_capability(sde_hdmi);
+
+	_sde_hdmi_init_hdcp(sde_hdmi);
+
+	return rc;
+}
+
+int sde_hdmi_start_hdcp(struct drm_connector *connector)
+{
+	int rc;
+	struct sde_connector *c_conn = to_sde_connector(connector);
+	struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+	struct hdmi *hdmi = display->ctrl.ctrl;
+
+	if (!hdmi) {
+		SDE_ERROR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!sde_hdmi_tx_is_hdcp_enabled(display))
+		return 0;
+
+	if (sde_hdmi_tx_is_encryption_set(display))
+		sde_hdmi_config_avmute(hdmi, true);
+
+	rc = display->hdcp_ops->authenticate(display->hdcp_data);
+	if (rc)
+		SDE_ERROR("%s: hdcp auth failed. rc=%d\n", __func__, rc);
+
+	return rc;
+}
+
+enum drm_connector_status
+sde_hdmi_connector_detect(struct drm_connector *connector,
+		bool force,
+		void *display)
+{
+	enum drm_connector_status status = connector_status_unknown;
+	struct msm_display_info info;
+	int rc;
+
+	if (!connector || !display) {
+		SDE_ERROR("connector=%p or display=%p is NULL\n",
+			connector, display);
+		return status;
+	}
+
+	/* get display dsi_info */
+	memset(&info, 0x0, sizeof(info));
+	rc = sde_hdmi_get_info(&info, display);
+	if (rc) {
+		SDE_ERROR("failed to get display info, rc=%d\n", rc);
+		return connector_status_disconnected;
+	}
+
+	if (info.capabilities & MSM_DISPLAY_CAP_HOT_PLUG)
+		status = (info.is_connected ? connector_status_connected :
+					      connector_status_disconnected);
+	else
+		status = connector_status_connected;
+
+	connector->display_info.width_mm = info.width_mm;
+	connector->display_info.height_mm = info.height_mm;
+
+	return status;
+}
+
+int sde_hdmi_pre_kickoff(struct drm_connector *connector,
+	void *display,
+	struct msm_display_kickoff_params *params)
+{
+	struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display;
+	struct drm_msm_ext_panel_hdr_ctrl *hdr_ctrl;
+	struct drm_msm_ext_panel_hdr_metadata *hdr_meta;
+	u8 hdr_op;
+
+	if (!connector || !display || !params ||
+		!params->hdr_ctrl) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	hdr_ctrl = params->hdr_ctrl;
+	hdr_meta = &hdr_ctrl->hdr_meta;
+
+	if (!hdr_meta) {
+		SDE_ERROR("Invalid params\n");
+		return -EINVAL;
+	}
+
+	hdr_op = sde_hdmi_hdr_get_ops(hdmi_display->curr_hdr_state,
+		hdr_ctrl->hdr_state);
+
+	if (hdr_op == HDR_SEND_INFO) {
+		if (connector->hdr_supported)
+			sde_hdmi_panel_set_hdr_infoframe(display,
+				&hdr_ctrl->hdr_meta);
+		if (hdr_meta->eotf)
+			sde_hdmi_update_colorimetry(hdmi_display,
+				true);
+		else
+			sde_hdmi_update_colorimetry(hdmi_display,
+				false);
+	} else if (hdr_op == HDR_CLEAR_INFO)
+		sde_hdmi_clear_hdr_infoframe(display);
+
+	hdmi_display->curr_hdr_state = hdr_ctrl->hdr_state;
+
+	return 0;
+}
+
+bool sde_hdmi_mode_needs_full_range(void *display)
+{
+	struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display;
+	struct drm_display_mode *mode;
+	u32 mode_fmt_flags = 0;
+	u32 cea_mode;
+
+	if (!hdmi_display) {
+		SDE_ERROR("invalid input\n");
+		return false;
+	}
+
+	mode = &hdmi_display->mode;
+	/* Cache the format flags before clearing */
+	mode_fmt_flags = mode->flags;
+	/**
+	 * Clear the RGB/YUV format flags before calling upstream API
+	 * as the API also compares the flags and then returns a mode
+	 */
+	mode->flags &= ~SDE_DRM_MODE_FLAG_FMT_MASK;
+	cea_mode = drm_match_cea_mode(mode);
+	/* Restore the format flags */
+	mode->flags = mode_fmt_flags;
+
+	if (cea_mode > SDE_HDMI_VIC_640x480)
+		return false;
+
+	return true;
+}
+
+enum sde_csc_type sde_hdmi_get_csc_type(struct drm_connector *conn,
+	void *display)
+{
+	struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display;
+	struct sde_connector_state *c_state;
+	struct drm_msm_ext_panel_hdr_ctrl *hdr_ctrl;
+	struct drm_msm_ext_panel_hdr_metadata *hdr_meta;
+
+	if (!hdmi_display || !conn) {
+		SDE_ERROR("invalid input\n");
+		goto error;
+	}
+
+	c_state = to_sde_connector_state(conn->state);
+
+	if (!c_state) {
+		SDE_ERROR("invalid input\n");
+		goto error;
+	}
+
+	hdr_ctrl = &c_state->hdr_ctrl;
+	hdr_meta = &hdr_ctrl->hdr_meta;
+
+	if ((hdr_ctrl->hdr_state == HDR_ENABLE)
+		&& (hdr_meta->eotf != 0))
+		return SDE_CSC_RGB2YUV_2020L;
+	else if (sde_hdmi_mode_needs_full_range(hdmi_display)
+		|| conn->yuv_qs)
+		return SDE_CSC_RGB2YUV_709FR;
+
+error:
+	return SDE_CSC_RGB2YUV_709L;
+}
+
+int sde_hdmi_connector_get_modes(struct drm_connector *connector, void *display)
+{
+	struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display;
+	struct drm_display_mode *mode, *m;
+	int ret = 0;
+
+	if (!connector || !display) {
+		SDE_ERROR("connector=%p or display=%p is NULL\n",
+			connector, display);
+		return 0;
+	}
+
+	if (hdmi_display->non_pluggable) {
+		list_for_each_entry(mode, &hdmi_display->mode_list, head) {
+			m = drm_mode_duplicate(connector->dev, mode);
+			if (!m) {
+				SDE_ERROR("failed to add hdmi mode %dx%d\n",
+					mode->hdisplay, mode->vdisplay);
+				break;
+			}
+			drm_mode_probed_add(connector, m);
+		}
+		ret = hdmi_display->num_of_modes;
+	} else {
+		/* pluggable case assumes EDID is read when HPD */
+		ret = _sde_edid_update_modes(connector,
+			hdmi_display->edid_ctrl);
+	}
+
+	return ret;
+}
+
+enum drm_mode_status sde_hdmi_mode_valid(struct drm_connector *connector,
+		struct drm_display_mode *mode,
+		void *display)
+{
+	struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display;
+	struct hdmi *hdmi;
+	struct msm_drm_private *priv;
+	struct msm_kms *kms;
+	long actual, requested;
+
+	if (!connector || !display || !mode) {
+		SDE_ERROR("connector=%p or display=%p or mode=%p is NULL\n",
+			connector, display, mode);
+		return 0;
+	}
+
+	hdmi = hdmi_display->ctrl.ctrl;
+	priv = connector->dev->dev_private;
+	kms = priv->kms;
+	requested = 1000 * mode->clock;
+	actual = kms->funcs->round_pixclk(kms,
+			requested, hdmi->encoder);
+
+	SDE_HDMI_DEBUG("requested=%ld, actual=%ld", requested, actual);
+
+	if (actual != requested)
+		return MODE_CLOCK_RANGE;
+
+	/* if no format flags are present remove the mode */
+	if (!(mode->flags & SDE_DRM_MODE_FLAG_FMT_MASK)) {
+		SDE_HDMI_DEBUG("removing following mode from list\n");
+		drm_mode_debug_printmodeline(mode);
+		return MODE_BAD;
+	}
+
+	return MODE_OK;
+}
+
+int sde_hdmi_dev_init(struct sde_hdmi *display)
+{
+	if (!display) {
+		SDE_ERROR("Invalid params\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int sde_hdmi_dev_deinit(struct sde_hdmi *display)
+{
+	if (!display) {
+		SDE_ERROR("Invalid params\n");
+		return -EINVAL;
+	}
+	if (display->hdcp_feat_data[SDE_HDCP_1x])
+		sde_hdcp_1x_deinit(display->hdcp_feat_data[SDE_HDCP_1x]);
+
+	if (display->hdcp_feat_data[SDE_HDCP_2P2])
+		sde_hdmi_hdcp2p2_deinit(display->hdcp_feat_data[SDE_HDCP_2P2]);
+
+	return 0;
+}
+
+static int _sde_hdmi_cec_init(struct sde_hdmi *display)
+{
+	struct platform_device *pdev = display->pdev;
+
+	display->notifier = cec_notifier_get(&pdev->dev);
+	if (!display->notifier) {
+		SDE_ERROR("CEC notifier get failed\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void _sde_hdmi_cec_deinit(struct sde_hdmi *display)
+{
+	cec_notifier_set_phys_addr(display->notifier, CEC_PHYS_ADDR_INVALID);
+	cec_notifier_put(display->notifier);
+}
+
+static int sde_hdmi_bind(struct device *dev, struct device *master, void *data)
+{
+	int rc = 0;
+	struct sde_hdmi_ctrl *display_ctrl = NULL;
+	struct sde_hdmi *display = NULL;
+	struct drm_device *drm = NULL;
+	struct msm_drm_private *priv = NULL;
+	struct platform_device *pdev = to_platform_device(dev);
+
+	SDE_HDMI_DEBUG(" %s +\n", __func__);
+	if (!dev || !pdev || !master) {
+		pr_err("invalid param(s), dev %pK, pdev %pK, master %pK\n",
+			dev, pdev, master);
+		return -EINVAL;
+	}
+
+	drm = dev_get_drvdata(master);
+	display = platform_get_drvdata(pdev);
+	if (!drm || !display) {
+		pr_err("invalid param(s), drm %pK, display %pK\n",
+			   drm, display);
+		return -EINVAL;
+	}
+
+	priv = drm->dev_private;
+	mutex_lock(&display->display_lock);
+
+	rc = _sde_hdmi_debugfs_init(display);
+	if (rc) {
+		SDE_ERROR("[%s]Debugfs init failed, rc=%d\n",
+				display->name, rc);
+		goto debug_error;
+	}
+
+	rc = _sde_hdmi_ext_disp_init(display);
+	if (rc) {
+		SDE_ERROR("[%s]Ext Disp init failed, rc=%d\n",
+				display->name, rc);
+		goto ext_error;
+	}
+
+	rc = _sde_hdmi_cec_init(display);
+	if (rc) {
+		SDE_ERROR("[%s]CEC init failed, rc=%d\n",
+				display->name, rc);
+		goto ext_error;
+	}
+
+	display->edid_ctrl = sde_edid_init();
+	if (!display->edid_ctrl) {
+		SDE_ERROR("[%s]sde edid init failed\n",
+				display->name);
+		rc = -ENOMEM;
+		goto cec_error;
+	}
+
+	display_ctrl = &display->ctrl;
+	display_ctrl->ctrl = priv->hdmi;
+	display->drm_dev = drm;
+
+	_sde_hdmi_map_regs(display, priv->hdmi);
+	_sde_hdmi_init_ddc(display, priv->hdmi);
+
+	display->enc_lvl = HDCP_STATE_AUTH_ENC_NONE;
+
+	INIT_DELAYED_WORK(&display->hdcp_cb_work,
+					  sde_hdmi_tx_hdcp_cb_work);
+	mutex_init(&display->hdcp_mutex);
+	mutex_unlock(&display->display_lock);
+	return rc;
+
+cec_error:
+	(void)_sde_hdmi_cec_deinit(display);
+ext_error:
+	(void)_sde_hdmi_debugfs_deinit(display);
+debug_error:
+	mutex_unlock(&display->display_lock);
+	return rc;
+}
+
+
+static void sde_hdmi_unbind(struct device *dev, struct device *master,
+		void *data)
+{
+	struct sde_hdmi *display = NULL;
+
+	if (!dev) {
+		SDE_ERROR("invalid params\n");
+		return;
+	}
+
+	display = platform_get_drvdata(to_platform_device(dev));
+	if (!display) {
+		SDE_ERROR("Invalid display device\n");
+		return;
+	}
+	mutex_lock(&display->display_lock);
+	(void)_sde_hdmi_debugfs_deinit(display);
+	(void)sde_edid_deinit((void **)&display->edid_ctrl);
+	(void)_sde_hdmi_cec_deinit(display);
+	display->drm_dev = NULL;
+	mutex_unlock(&display->display_lock);
+}
+
+static const struct component_ops sde_hdmi_comp_ops = {
+	.bind = sde_hdmi_bind,
+	.unbind = sde_hdmi_unbind,
+};
+
+static int _sde_hdmi_parse_dt_modes(struct device_node *np,
+					struct list_head *head,
+					u32 *num_of_modes)
+{
+	int rc = 0;
+	struct drm_display_mode *mode;
+	u32 mode_count = 0;
+	struct device_node *node = NULL;
+	struct device_node *root_node = NULL;
+	const char *name;
+	u32 h_front_porch, h_pulse_width, h_back_porch;
+	u32 v_front_porch, v_pulse_width, v_back_porch;
+	bool h_active_high, v_active_high;
+	u32 flags = 0;
+	root_node = of_get_child_by_name(np, "qcom,customize-modes");
+	if (!root_node) {
+		root_node = of_parse_phandle(np, "qcom,customize-modes", 0);
+		if (!root_node) {
+			DRM_INFO("No entry present for qcom,customize-modes");
+			goto end;
+		}
+	}
+	for_each_child_of_node(root_node, node) {
+		rc = 0;
+		mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+		if (!mode) {
+			SDE_ERROR("Out of memory\n");
+			rc =  -ENOMEM;
+			continue;
+		}
+
+		rc = of_property_read_string(node, "qcom,mode-name",
+						&name);
+		if (rc) {
+			SDE_ERROR("failed to read qcom,mode-name, rc=%d\n", rc);
+			goto fail;
+		}
+		strlcpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
+
+		rc = of_property_read_u32(node, "qcom,mode-h-active",
+						&mode->hdisplay);
+		if (rc) {
+			SDE_ERROR("failed to read h-active, rc=%d\n", rc);
+			goto fail;
+		}
+
+		rc = of_property_read_u32(node, "qcom,mode-h-front-porch",
+						&h_front_porch);
+		if (rc) {
+			SDE_ERROR("failed to read h-front-porch, rc=%d\n", rc);
+			goto fail;
+		}
+
+		rc = of_property_read_u32(node, "qcom,mode-h-pulse-width",
+						&h_pulse_width);
+		if (rc) {
+			SDE_ERROR("failed to read h-pulse-width, rc=%d\n", rc);
+			goto fail;
+		}
+
+		rc = of_property_read_u32(node, "qcom,mode-h-back-porch",
+						&h_back_porch);
+		if (rc) {
+			SDE_ERROR("failed to read h-back-porch, rc=%d\n", rc);
+			goto fail;
+		}
+
+		h_active_high = of_property_read_bool(node,
+						"qcom,mode-h-active-high");
+
+		rc = of_property_read_u32(node, "qcom,mode-v-active",
+						&mode->vdisplay);
+		if (rc) {
+			SDE_ERROR("failed to read v-active, rc=%d\n", rc);
+			goto fail;
+		}
+
+		rc = of_property_read_u32(node, "qcom,mode-v-front-porch",
+						&v_front_porch);
+		if (rc) {
+			SDE_ERROR("failed to read v-front-porch, rc=%d\n", rc);
+			goto fail;
+		}
+
+		rc = of_property_read_u32(node, "qcom,mode-v-pulse-width",
+						&v_pulse_width);
+		if (rc) {
+			SDE_ERROR("failed to read v-pulse-width, rc=%d\n", rc);
+			goto fail;
+		}
+
+		rc = of_property_read_u32(node, "qcom,mode-v-back-porch",
+						&v_back_porch);
+		if (rc) {
+			SDE_ERROR("failed to read v-back-porch, rc=%d\n", rc);
+			goto fail;
+		}
+
+		v_active_high = of_property_read_bool(node,
+						"qcom,mode-v-active-high");
+
+		rc = of_property_read_u32(node, "qcom,mode-refresh-rate",
+						&mode->vrefresh);
+		if (rc) {
+			SDE_ERROR("failed to read refresh-rate, rc=%d\n", rc);
+			goto fail;
+		}
+
+		rc = of_property_read_u32(node, "qcom,mode-clock-in-khz",
+						&mode->clock);
+		if (rc) {
+			SDE_ERROR("failed to read clock, rc=%d\n", rc);
+			goto fail;
+		}
+
+		mode->hsync_start = mode->hdisplay + h_front_porch;
+		mode->hsync_end = mode->hsync_start + h_pulse_width;
+		mode->htotal = mode->hsync_end + h_back_porch;
+		mode->vsync_start = mode->vdisplay + v_front_porch;
+		mode->vsync_end = mode->vsync_start + v_pulse_width;
+		mode->vtotal = mode->vsync_end + v_back_porch;
+		if (h_active_high)
+			flags |= DRM_MODE_FLAG_PHSYNC;
+		else
+			flags |= DRM_MODE_FLAG_NHSYNC;
+		if (v_active_high)
+			flags |= DRM_MODE_FLAG_PVSYNC;
+		else
+			flags |= DRM_MODE_FLAG_NVSYNC;
+		mode->flags = flags;
+
+		if (!rc) {
+			mode_count++;
+			list_add_tail(&mode->head, head);
+		}
+
+		SDE_DEBUG("mode[%d] h[%d,%d,%d,%d] v[%d,%d,%d,%d] %d %xH %d\n",
+			mode_count - 1, mode->hdisplay, mode->hsync_start,
+			mode->hsync_end, mode->htotal, mode->vdisplay,
+			mode->vsync_start, mode->vsync_end, mode->vtotal,
+			mode->vrefresh, mode->flags, mode->clock);
+fail:
+		if (rc) {
+			kfree(mode);
+			continue;
+		}
+	}
+
+	if (num_of_modes)
+		*num_of_modes = mode_count;
+
+end:
+	return rc;
+}
+
+static int _sde_hdmi_parse_dt(struct device_node *node,
+				struct sde_hdmi *display)
+{
+	int rc = 0;
+
+	display->name = of_get_property(node, "label", NULL);
+
+	display->display_type = of_get_property(node,
+						"qcom,display-type", NULL);
+	if (!display->display_type)
+		display->display_type = "unknown";
+
+	display->non_pluggable = of_property_read_bool(node,
+						"qcom,non-pluggable");
+
+	rc = _sde_hdmi_parse_dt_modes(node, &display->mode_list,
+					&display->num_of_modes);
+	if (rc)
+		SDE_ERROR("parse_dt_modes failed rc=%d\n", rc);
+
+	return rc;
+}
+
+static int _sde_hdmi_dev_probe(struct platform_device *pdev)
+{
+	int rc;
+	struct sde_hdmi *display;
+	int ret = 0;
+
+
+	SDE_DEBUG("\n");
+
+	if (!pdev || !pdev->dev.of_node) {
+		SDE_ERROR("pdev not found\n");
+		return -ENODEV;
+	}
+
+	display = devm_kzalloc(&pdev->dev, sizeof(*display), GFP_KERNEL);
+	if (!display)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&display->mode_list);
+	rc = _sde_hdmi_parse_dt(pdev->dev.of_node, display);
+	if (rc)
+		SDE_ERROR("parse dt failed, rc=%d\n", rc);
+
+	mutex_init(&display->display_lock);
+	display->pdev = pdev;
+	platform_set_drvdata(pdev, display);
+	mutex_lock(&sde_hdmi_list_lock);
+	list_add(&display->list, &sde_hdmi_list);
+	mutex_unlock(&sde_hdmi_list_lock);
+	if (!sde_hdmi_dev_init(display)) {
+		ret = component_add(&pdev->dev, &sde_hdmi_comp_ops);
+		if (ret) {
+			pr_err("component add failed\n");
+			goto out;
+		}
+	}
+	return 0;
+
+out:
+	if (rc)
+		devm_kfree(&pdev->dev, display);
+	return rc;
+}
+
+static int _sde_hdmi_dev_remove(struct platform_device *pdev)
+{
+	struct sde_hdmi *display;
+	struct sde_hdmi *pos, *tmp;
+	struct drm_display_mode *mode, *n;
+
+	if (!pdev) {
+		SDE_ERROR("Invalid device\n");
+		return -EINVAL;
+	}
+
+	display = platform_get_drvdata(pdev);
+
+	mutex_lock(&sde_hdmi_list_lock);
+	list_for_each_entry_safe(pos, tmp, &sde_hdmi_list, list) {
+		if (pos == display) {
+			list_del(&display->list);
+			break;
+		}
+	}
+	mutex_unlock(&sde_hdmi_list_lock);
+
+	list_for_each_entry_safe(mode, n, &display->mode_list, head) {
+		list_del(&mode->head);
+		kfree(mode);
+	}
+
+	platform_set_drvdata(pdev, NULL);
+	devm_kfree(&pdev->dev, display);
+	return 0;
+}
+
+static struct platform_driver sde_hdmi_driver = {
+	.probe = _sde_hdmi_dev_probe,
+	.remove = _sde_hdmi_dev_remove,
+	.driver = {
+		.name = "sde_hdmi",
+		.of_match_table = sde_hdmi_dt_match,
+	},
+};
+
+static int sde_hdmi_irqdomain_map(struct irq_domain *domain,
+		unsigned int irq, irq_hw_number_t hwirq)
+{
+	struct sde_hdmi *display;
+	int rc;
+
+	if (!domain || !domain->host_data) {
+		pr_err("invalid parameters domain\n");
+		return -EINVAL;
+	}
+	display = domain->host_data;
+
+	irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_level_irq);
+	rc = irq_set_chip_data(irq, display);
+
+	return rc;
+}
+
+static const struct irq_domain_ops sde_hdmi_irqdomain_ops = {
+	.map = sde_hdmi_irqdomain_map,
+	.xlate = irq_domain_xlate_onecell,
+};
+
+int sde_hdmi_drm_init(struct sde_hdmi *display, struct drm_encoder *enc)
+{
+	int rc = 0;
+	struct msm_drm_private *priv = NULL;
+	struct hdmi *hdmi;
+	struct platform_device *pdev;
+	struct sde_kms *sde_kms;
+
+	DBG("");
+	if (!display || !display->drm_dev || !enc) {
+		SDE_ERROR("display=%p or enc=%p or drm_dev is NULL\n",
+			display, enc);
+		return -EINVAL;
+	}
+
+	mutex_lock(&display->display_lock);
+	priv = display->drm_dev->dev_private;
+	hdmi = display->ctrl.ctrl;
+
+	if (!priv || !hdmi) {
+		SDE_ERROR("priv=%p or hdmi=%p is NULL\n",
+			priv, hdmi);
+		mutex_unlock(&display->display_lock);
+		return -EINVAL;
+	}
+
+	pdev = hdmi->pdev;
+	hdmi->dev = display->drm_dev;
+	hdmi->encoder = enc;
+
+	hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
+
+	hdmi->bridge = sde_hdmi_bridge_init(hdmi);
+	if (IS_ERR(hdmi->bridge)) {
+		rc = PTR_ERR(hdmi->bridge);
+		SDE_ERROR("failed to create HDMI bridge: %d\n", rc);
+		hdmi->bridge = NULL;
+		goto error;
+	}
+	hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+	if (hdmi->irq < 0) {
+		rc = hdmi->irq;
+		SDE_ERROR("failed to get irq: %d\n", rc);
+		goto error;
+	}
+
+	rc = devm_request_irq(&pdev->dev, hdmi->irq,
+			_sde_hdmi_irq, IRQF_TRIGGER_HIGH,
+			"sde_hdmi_isr", display);
+	if (rc < 0) {
+		SDE_ERROR("failed to request IRQ%u: %d\n",
+				hdmi->irq, rc);
+		goto error;
+	}
+
+	display->irq_domain = irq_domain_add_linear(pdev->dev.of_node, 8,
+				&sde_hdmi_irqdomain_ops, display);
+	if (!display->irq_domain) {
+		SDE_ERROR("failed to create IRQ domain\n");
+		goto error;
+	}
+
+	enc->bridge = hdmi->bridge;
+	priv->bridges[priv->num_bridges++] = hdmi->bridge;
+
+	/*
+	 * After initialising HDMI bridge, we need to check
+	 * whether the early display is enabled for HDMI.
+	 * If yes, we need to increase refcount of hdmi power
+	 * clocks. This can skip the clock disabling operation in
+	 * clock_late_init when finding clk.count == 1.
+	 */
+	sde_kms = to_sde_kms(priv->kms);
+	if (sde_kms->splash_info.handoff) {
+		sde_hdmi_bridge_power_on(hdmi->bridge);
+		hdmi->power_on = true;
+	}
+
+	mutex_unlock(&display->display_lock);
+	return 0;
+
+error:
+	/* bridge is normally destroyed by drm: */
+	if (hdmi->bridge) {
+		hdmi_bridge_destroy(hdmi->bridge);
+		hdmi->bridge = NULL;
+	}
+	mutex_unlock(&display->display_lock);
+	return rc;
+}
+
+int sde_hdmi_drm_deinit(struct sde_hdmi *display)
+{
+	int rc = 0;
+
+	if (!display) {
+		SDE_ERROR("Invalid params\n");
+		return -EINVAL;
+	}
+
+	if (display->irq_domain)
+		irq_domain_remove(display->irq_domain);
+
+	return rc;
+}
+
+static int __init sde_hdmi_register(void)
+{
+	int rc = 0;
+
+	DBG("");
+	rc = platform_driver_register(&sde_hdmi_driver);
+	return rc;
+}
+
+static void __exit sde_hdmi_unregister(void)
+{
+	platform_driver_unregister(&sde_hdmi_driver);
+}
+
+module_init(sde_hdmi_register);
+module_exit(sde_hdmi_unregister);
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging./sde_hdmi.h linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging./sde_hdmi.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h	2019-10-29 09:26:23.629203041 +0100
@@ -0,0 +1,659 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SDE_HDMI_H_
+#define _SDE_HDMI_H_
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/of_device.h>
+#include <linux/msm_ext_display.h>
+#include <linux/hdcp_qseecom.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <media/cec-notifier.h>
+#include "hdmi.h"
+#include "sde_kms.h"
+#include "sde_connector.h"
+#include "msm_drv.h"
+#include "sde_edid_parser.h"
+#include "sde_hdmi_util.h"
+#include "sde_hdcp.h"
+
+#ifndef MIN
+#define MIN(x, y) (((x) < (y)) ? (x) : (y))
+#endif
+#ifdef HDMI_DEBUG_ENABLE
+#define SDE_HDMI_DEBUG(fmt, args...)   SDE_ERROR(fmt, ##args)
+#else
+#define SDE_HDMI_DEBUG(fmt, args...)   SDE_DEBUG(fmt, ##args)
+#endif
+
+/* HW Revisions for different SDE targets */
+#define SDE_GET_MAJOR_VER(rev)((rev) >> 28)
+#define SDE_GET_MINOR_VER(rev)(((rev) >> 16) & 0xFFF)
+
+/**
+ * struct sde_hdmi_info - defines hdmi display properties
+ * @display_type:      Display type as defined by device tree.
+ * @is_hot_pluggable:  Can panel be hot plugged.
+ * @is_connected:      Is panel connected.
+ * @is_edid_supported: Does panel support reading EDID information.
+ * @width_mm:          Physical width of panel in millimeters.
+ * @height_mm:         Physical height of panel in millimeters.
+ */
+struct sde_hdmi_info {
+	const char *display_type;
+
+	/* HPD */
+	bool is_hot_pluggable;
+	bool is_connected;
+	bool is_edid_supported;
+
+	/* Physical properties */
+	u32 width_mm;
+	u32 height_mm;
+};
+
+/**
+ * struct sde_hdmi_ctrl - hdmi ctrl/phy information for the display
+ * @ctrl:           Handle to the HDMI controller device.
+ * @ctrl_of_node:   pHandle to the HDMI controller device.
+ * @hdmi_ctrl_idx:   HDMI controller instance id.
+ */
+struct sde_hdmi_ctrl {
+	/* controller info */
+	struct hdmi *ctrl;
+	struct device_node *ctrl_of_node;
+	u32 hdmi_ctrl_idx;
+};
+
+enum hdmi_tx_io_type {
+	HDMI_TX_CORE_IO,
+	HDMI_TX_QFPROM_IO,
+	HDMI_TX_HDCP_IO,
+	HDMI_TX_MAX_IO
+};
+
+enum hdmi_tx_feature_type {
+	SDE_HDCP_1x,
+	SDE_HDCP_2P2
+};
+
+/**
+ * struct sde_hdmi - hdmi display information
+ * @pdev:             Pointer to platform device.
+ * @drm_dev:          DRM device associated with the display.
+ * @name:             Name of the display.
+ * @display_type:     Display type as defined in device tree.
+ * @list:             List pointer.
+ * @display_lock:     Mutex for sde_hdmi interface.
+ * @ctrl:             Controller information for HDMI display.
+ * @non_pluggable:    If HDMI display is non pluggable
+ * @num_of_modes:     Number of modes supported by display if non pluggable.
+ * @mode_list:        Mode list if non pluggable.
+ * @mode:             Current display mode.
+ * @connected:        If HDMI display is connected.
+ * @is_tpg_enabled:   TPG state.
+ * @hdmi_tx_version:  HDMI TX version
+ * @hdmi_tx_major_version: HDMI TX major version
+ * @max_pclk_khz: Max pixel clock supported
+ * @hdcp1_use_sw_keys: If HDCP1 engine uses SW keys
+ * @hdcp14_present: If the sink supports HDCP 1.4
+ * @hdcp22_present: If the sink supports HDCP 2.2
+ * @hdcp_status: Current HDCP status
+ * @sink_hdcp_ver: HDCP version of the sink
+ * @enc_lvl: Current encryption level
+ * @curr_hdr_state: Current HDR state of the HDMI connector
+ * @auth_state: Current authentication state of HDCP
+ * @sink_hdcp22_support: If the sink supports HDCP 2.2
+ * @src_hdcp22_support: If the source supports HDCP 2.2
+ * @hdcp_data: Call back data registered by the client with HDCP lib
+ * @hdcp_feat_data: Handle to HDCP feature data
+ * @hdcp_ops: Function ops registered by the client with the HDCP lib
+ * @ddc_ctrl: Handle to HDMI DDC Controller
+ * @hpd_work:         HPD work structure.
+ * @codec_ready:      If audio codec is ready.
+ * @client_notify_pending: If there is client notification pending.
+ * @irq_domain:       IRQ domain structure.
+ * @notifier:         CEC notifider to convey physical address information.
+ * @pll_update_enable: if it's allowed to update HDMI PLL ppm.
+ * @dc_enable:        If deep color is enabled. Only DC_30 so far.
+ * @dc_feature_supported: If deep color feature is supported.
+ * @bt2020_colorimetry: If BT2020 colorimetry is supported by sink
+ * @hdcp_cb_work: Callback function for HDCP
+ * @io: Handle to IO base addresses for HDMI
+ * @root:             Debug fs root entry.
+ */
+struct sde_hdmi {
+	struct platform_device *pdev;
+	struct drm_device *drm_dev;
+
+	const char *name;
+	const char *display_type;
+	struct list_head list;
+	struct mutex display_lock;
+	struct mutex hdcp_mutex;
+	struct sde_hdmi_ctrl ctrl;
+
+	struct platform_device *ext_pdev;
+	struct msm_ext_disp_init_data ext_audio_data;
+	struct sde_edid_ctrl *edid_ctrl;
+
+	bool non_pluggable;
+	u32 num_of_modes;
+	struct list_head mode_list;
+	struct drm_display_mode mode;
+	bool connected;
+	bool is_tpg_enabled;
+	u32 hdmi_tx_version;
+	u32 hdmi_tx_major_version;
+	u32 max_pclk_khz;
+	bool hdcp1_use_sw_keys;
+	u32 hdcp14_present;
+	u32 hdcp22_present;
+	u8 hdcp_status;
+	u8 sink_hdcp_ver;
+	u32 enc_lvl;
+	u8 curr_hdr_state;
+	bool auth_state;
+	bool sink_hdcp22_support;
+	bool src_hdcp22_support;
+
+	/*hold final data
+	 *based on hdcp support
+	 */
+	void *hdcp_data;
+	/*hold hdcp init data*/
+	void *hdcp_feat_data[2];
+	struct sde_hdcp_ops *hdcp_ops;
+	struct sde_hdmi_tx_ddc_ctrl ddc_ctrl;
+	struct work_struct hpd_work;
+	bool codec_ready;
+	bool client_notify_pending;
+
+	struct irq_domain *irq_domain;
+	struct cec_notifier *notifier;
+	bool pll_update_enable;
+	bool dc_enable;
+	bool dc_feature_supported;
+	bool bt2020_colorimetry;
+
+	struct delayed_work hdcp_cb_work;
+	struct dss_io_data io[HDMI_TX_MAX_IO];
+	/* DEBUG FS */
+	struct dentry *root;
+};
+
+/**
+ * hdmi_tx_scdc_access_type() - hdmi 2.0 DDC functionalities.
+ */
+enum hdmi_tx_scdc_access_type {
+	HDMI_TX_SCDC_SCRAMBLING_STATUS,
+	HDMI_TX_SCDC_SCRAMBLING_ENABLE,
+	HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE,
+	HDMI_TX_SCDC_CLOCK_DET_STATUS,
+	HDMI_TX_SCDC_CH0_LOCK_STATUS,
+	HDMI_TX_SCDC_CH1_LOCK_STATUS,
+	HDMI_TX_SCDC_CH2_LOCK_STATUS,
+	HDMI_TX_SCDC_CH0_ERROR_COUNT,
+	HDMI_TX_SCDC_CH1_ERROR_COUNT,
+	HDMI_TX_SCDC_CH2_ERROR_COUNT,
+	HDMI_TX_SCDC_READ_ENABLE,
+	HDMI_TX_SCDC_MAX,
+};
+
+#define HDMI_KHZ_TO_HZ 1000
+#define HDMI_MHZ_TO_HZ 1000000
+#define HDMI_YUV420_24BPP_PCLK_TMDS_CH_RATE_RATIO 2
+#define HDMI_RGB_24BPP_PCLK_TMDS_CH_RATE_RATIO 1
+
+#define HDMI_GEN_PKT_CTRL_CLR_MASK 0x3f0007
+
+/* for AVI program */
+#define HDMI_AVI_INFOFRAME_BUFFER_SIZE \
+	(HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE)
+#define HDMI_VS_INFOFRAME_BUFFER_SIZE (HDMI_INFOFRAME_HEADER_SIZE + 6)
+
+#define LEFT_SHIFT_BYTE(x) ((x) << 8)
+#define LEFT_SHIFT_WORD(x) ((x) << 16)
+#define LEFT_SHIFT_24BITS(x) ((x) << 24)
+
+/* Maximum pixel clock rates for hdmi tx */
+#define HDMI_DEFAULT_MAX_PCLK_RATE	148500
+#define HDMI_TX_3_MAX_PCLK_RATE		297000
+#define HDMI_TX_4_MAX_PCLK_RATE		600000
+/**
+ * hdmi_tx_ddc_timer_type() - hdmi DDC timer functionalities.
+ */
+enum hdmi_tx_ddc_timer_type {
+	HDMI_TX_DDC_TIMER_HDCP2P2_RD_MSG,
+	HDMI_TX_DDC_TIMER_SCRAMBLER_STATUS,
+	HDMI_TX_DDC_TIMER_UPDATE_FLAGS,
+	HDMI_TX_DDC_TIMER_STATUS_FLAGS,
+	HDMI_TX_DDC_TIMER_CED,
+	HDMI_TX_DDC_TIMER_MAX,
+	};
+
+#ifdef CONFIG_DRM_SDE_HDMI
+/**
+ * sde_hdmi_get_num_of_displays() - returns number of display devices
+ *				       supported.
+ *
+ * Return: number of displays.
+ */
+u32 sde_hdmi_get_num_of_displays(void);
+
+/**
+ * sde_hdmi_get_displays() - returns the display list that's available.
+ * @display_array: Pointer to display list
+ * @max_display_count: Number of maximum displays in the list
+ *
+ * Return: number of available displays.
+ */
+int sde_hdmi_get_displays(void **display_array, u32 max_display_count);
+
+/**
+ * sde_hdmi_connector_pre_deinit()- perform additional deinitialization steps
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+ *
+ * Return: error code
+ */
+int sde_hdmi_connector_pre_deinit(struct drm_connector *connector,
+		void *display);
+
+/**
+ * sde_hdmi_connector_post_init()- perform additional initialization steps
+ * @connector: Pointer to drm connector structure
+ * @info: Pointer to sde connector info structure
+ * @display: Pointer to private display handle
+ *
+ * Return: error code
+ */
+int sde_hdmi_connector_post_init(struct drm_connector *connector,
+		void *info,
+		void *display);
+
+/**
+ * sde_hdmi_connector_detect()- determine if connector is connected
+ * @connector: Pointer to drm connector structure
+ * @force: Force detect setting from drm framework
+ * @display: Pointer to private display handle
+ *
+ * Return: error code
+ */
+enum drm_connector_status
+sde_hdmi_connector_detect(struct drm_connector *connector,
+		bool force,
+		void *display);
+
+/**
+ * sde_hdmi_connector_get_modes - add drm modes via drm_mode_probed_add()
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+
+ * Returns: Number of modes added
+ */
+int sde_hdmi_connector_get_modes(struct drm_connector *connector,
+		void *display);
+
+/**
+ * sde_hdmi_mode_valid - determine if specified mode is valid
+ * @connector: Pointer to drm connector structure
+ * @mode: Pointer to drm mode structure
+ * @display: Pointer to private display handle
+ *
+ * Returns: Validity status for specified mode
+ */
+enum drm_mode_status sde_hdmi_mode_valid(struct drm_connector *connector,
+		struct drm_display_mode *mode,
+		void *display);
+
+/**
+ * sde_hdmi_dev_init() - Initializes the display device
+ * @display:         Handle to the display.
+ *
+ * Initialization will acquire references to the resources required for the
+ * display hardware to function.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_dev_init(struct sde_hdmi *display);
+
+/**
+ * sde_hdmi_dev_deinit() - Desinitializes the display device
+ * @display:        Handle to the display.
+ *
+ * All the resources acquired during device init will be released.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_dev_deinit(struct sde_hdmi *display);
+
+/**
+ * sde_hdmi_drm_init() - initializes DRM objects for the display device.
+ * @display:            Handle to the display.
+ * @encoder:            Pointer to the encoder object which is connected to the
+ *			display.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_drm_init(struct sde_hdmi *display,
+				struct drm_encoder *enc);
+
+/**
+ * sde_hdmi_drm_deinit() - destroys DRM objects assosciated with the display
+ * @display:        Handle to the display.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_drm_deinit(struct sde_hdmi *display);
+
+/**
+ * sde_hdmi_get_info() - returns the display properties
+ * @display:          Handle to the display.
+ * @info:             Pointer to the structure where info is stored.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_get_info(struct msm_display_info *info,
+				void *display);
+
+/**
+ * sde_hdmi_set_property() - set the connector properties
+ * @connector:        Handle to the connector.
+ * @state:            Handle to the connector state.
+ * @property_index:   property index.
+ * @value:            property value.
+ * @display:          Handle to the display.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_set_property(struct drm_connector *connector,
+			struct drm_connector_state *state,
+			int property_index,
+			uint64_t value,
+			void *display);
+/**
+ * sde_hdmi_bridge_power_on -- A wrapper of _sde_hdmi_bridge_power_on.
+ * @bridge:          Handle to the drm bridge.
+ *
+ * Return: void.
+ */
+void sde_hdmi_bridge_power_on(struct drm_bridge *bridge);
+
+/**
+ * sde_hdmi_get_property() - get the connector properties
+ * @connector:        Handle to the connector.
+ * @state:            Handle to the connector state.
+ * @property_index:   property index.
+ * @value:            property value.
+ * @display:          Handle to the display.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_get_property(struct drm_connector *connector,
+			struct drm_connector_state *state,
+			int property_index,
+			uint64_t *value,
+			void *display);
+
+/**
+ * sde_hdmi_bridge_init() - init sde hdmi bridge
+ * @hdmi:          Handle to the hdmi.
+ *
+ * Return: struct drm_bridge *.
+ */
+struct drm_bridge *sde_hdmi_bridge_init(struct hdmi *hdmi);
+
+/**
+ * sde_hdmi_set_mode() - Set HDMI mode API.
+ * @hdmi:          Handle to the hdmi.
+ * @power_on:      Power on/off request.
+ *
+ * Return: void.
+ */
+void sde_hdmi_set_mode(struct hdmi *hdmi, bool power_on);
+
+/**
+ * sde_hdmi_scdc_read() - hdmi 2.0 ddc read API.
+ * @hdmi:          Handle to the hdmi.
+ * @data_type:     DDC data type, refer to enum hdmi_tx_scdc_access_type.
+ * @val:           Read back value.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_scdc_read(struct hdmi *hdmi, u32 data_type, u32 *val);
+
+/**
+ * sde_hdmi_scdc_write() - hdmi 2.0 ddc write API.
+ * @hdmi:          Handle to the hdmi.
+ * @data_type:     DDC data type, refer to enum hdmi_tx_scdc_access_type.
+ * @val:           Value write through DDC.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_scdc_write(struct hdmi *hdmi, u32 data_type, u32 val);
+
+/**
+ * sde_hdmi_audio_on() - enable hdmi audio.
+ * @hdmi:          Handle to the hdmi.
+ * @params:        audio setup parameters from codec.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_audio_on(struct hdmi *hdmi,
+	struct msm_ext_disp_audio_setup_params *params);
+
+/**
+ * sde_hdmi_audio_off() - disable hdmi audio.
+ * @hdmi:          Handle to the hdmi.
+ *
+ * Return: void.
+ */
+void sde_hdmi_audio_off(struct hdmi *hdmi);
+
+/**
+ * sde_hdmi_config_avmute() - mute hdmi.
+ * @hdmi:          Handle to the hdmi.
+ * @set:           enable/disable avmute.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_config_avmute(struct hdmi *hdmi, bool set);
+
+/**
+ * sde_hdmi_notify_clients() - notify hdmi clients of the connection status.
+ * @display:       Handle to sde_hdmi.
+ * @connected:     connection status.
+ *
+ * Return: void.
+ */
+void sde_hdmi_notify_clients(struct sde_hdmi *display, bool connected);
+
+/**
+ * sde_hdmi_ack_state() - acknowledge the connection status.
+ * @connector:     Handle to the drm_connector.
+ * @status:        connection status.
+ *
+ * Return: void.
+ */
+void sde_hdmi_ack_state(struct drm_connector *connector,
+	enum drm_connector_status status);
+
+bool sde_hdmi_tx_is_hdcp_enabled(struct sde_hdmi *hdmi_ctrl);
+bool sde_hdmi_tx_is_encryption_set(struct sde_hdmi *hdmi_ctrl);
+bool sde_hdmi_tx_is_stream_shareable(struct sde_hdmi *hdmi_ctrl);
+bool sde_hdmi_tx_is_panel_on(struct sde_hdmi *hdmi_ctrl);
+int sde_hdmi_start_hdcp(struct drm_connector *connector);
+void sde_hdmi_hdcp_off(struct sde_hdmi *hdmi_ctrl);
+
+
+/*
+ * sde_hdmi_pre_kickoff - program kickoff-time features
+ * @display: Pointer to private display structure
+ * @params: Parameters for kickoff-time programming
+ * Returns: Zero on success
+ */
+int sde_hdmi_pre_kickoff(struct drm_connector *connector,
+		void *display,
+		struct msm_display_kickoff_params *params);
+
+/*
+ * sde_hdmi_mode_needs_full_range - does mode need full range
+ * quantization
+ * @display: Pointer to private display structure
+ * Returns: true or false based on mode
+ */
+bool sde_hdmi_mode_needs_full_range(void *display);
+
+/*
+ * sde_hdmi_get_csc_type - returns the CSC type to be
+ * used based on state of HDR playback
+ * @conn: Pointer to DRM connector
+ * @display: Pointer to private display structure
+ * Returns: true or false based on mode
+ */
+enum sde_csc_type sde_hdmi_get_csc_type(struct drm_connector *conn,
+	void *display);
+#else /*#ifdef CONFIG_DRM_SDE_HDMI*/
+
+static inline u32 sde_hdmi_get_num_of_displays(void)
+{
+	return 0;
+}
+
+static inline int sde_hdmi_get_displays(void **display_array,
+		u32 max_display_count)
+{
+	return 0;
+}
+
+static inline int sde_hdmi_connector_pre_deinit(struct drm_connector *connector,
+		void *display)
+{
+	return 0;
+}
+
+static inline int sde_hdmi_connector_post_init(struct drm_connector *connector,
+		void *info,
+		void *display)
+{
+	return 0;
+}
+
+static inline enum drm_connector_status
+sde_hdmi_connector_detect(struct drm_connector *connector,
+		bool force,
+		void *display)
+{
+	return connector_status_disconnected;
+}
+
+static inline int sde_hdmi_connector_get_modes(struct drm_connector *connector,
+		void *display)
+{
+	return 0;
+}
+
+static inline enum drm_mode_status sde_hdmi_mode_valid(
+		struct drm_connector *connector,
+		struct drm_display_mode *mode,
+		void *display)
+{
+	return MODE_OK;
+}
+
+static inline int sde_hdmi_dev_init(struct sde_hdmi *display)
+{
+	return 0;
+}
+
+static inline int sde_hdmi_dev_deinit(struct sde_hdmi *display)
+{
+	return 0;
+}
+
+bool hdmi_tx_is_hdcp_enabled(struct sde_hdmi *hdmi_ctrl)
+{
+	return false;
+}
+
+bool sde_hdmi_tx_is_encryption_set(struct sde_hdmi *hdmi_ctrl)
+{
+	return false;
+}
+
+bool sde_hdmi_tx_is_stream_shareable(struct sde_hdmi *hdmi_ctrl)
+{
+	return false;
+}
+
+bool sde_hdmi_tx_is_panel_on(struct sde_hdmi *hdmi_ctrl)
+{
+	return false;
+}
+
+static inline int sde_hdmi_drm_init(struct sde_hdmi *display,
+				struct drm_encoder *enc)
+{
+	return 0;
+}
+
+int sde_hdmi_start_hdcp(struct drm_connector *connector)
+{
+	return 0;
+}
+
+void sde_hdmi_hdcp_off(struct sde_hdmi *hdmi_ctrl)
+{
+
+}
+
+static inline int sde_hdmi_drm_deinit(struct sde_hdmi *display)
+{
+	return 0;
+}
+
+static inline int sde_hdmi_get_info(struct msm_display_info *info,
+				void *display)
+{
+	return 0;
+}
+
+static inline int sde_hdmi_set_property(struct drm_connector *connector,
+			struct drm_connector_state *state,
+			int property_index,
+			uint64_t value,
+			void *display)
+{
+	return 0;
+}
+
+static inline bool sde_hdmi_mode_needs_full_range(void *display)
+{
+	return false;
+}
+
+enum sde_csc_type sde_hdmi_get_csc_type(struct drm_connector *conn,
+	void *display)
+{
+	return 0;
+}
+
+#endif /*#else of CONFIG_DRM_SDE_HDMI*/
+#endif /* _SDE_HDMI_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging./sde_hdmi_hdcp2p2.c linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_hdcp2p2.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging./sde_hdmi_hdcp2p2.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_hdcp2p2.c	2019-01-22 16:16:23.495246334 +0100
@@ -0,0 +1,1053 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/types.h>
+#include <linux/kthread.h>
+
+#include <linux/hdcp_qseecom.h>
+#include "sde_hdcp.h"
+#include "video/msm_hdmi_hdcp_mgr.h"
+#include "sde_hdmi_util.h"
+
+/*
+ * Defined addresses and offsets of standard HDCP 2.2 sink registers
+ * for DDC, as defined in HDCP 2.2 spec section 2.14 table 2.7
+ */
+#define HDCP_SINK_DDC_SLAVE_ADDR 0x74            /* Sink DDC slave address */
+#define HDCP_SINK_DDC_HDCP2_VERSION 0x50         /* Does sink support HDCP2.2 */
+#define HDCP_SINK_DDC_HDCP2_WRITE_MESSAGE 0x60   /* HDCP Tx writes here */
+#define HDCP_SINK_DDC_HDCP2_RXSTATUS 0x70        /* RxStatus, 2 bytes */
+#define HDCP_SINK_DDC_HDCP2_READ_MESSAGE 0x80    /* HDCP Tx reads here */
+
+#define HDCP2P2_DEFAULT_TIMEOUT 500
+
+/*
+ * HDCP 2.2 encryption requires the data encryption block that is present in
+ * HDMI controller version 4.0.0 and above
+ */
+#define MIN_HDMI_TX_MAJOR_VERSION 4
+
+enum sde_hdmi_hdcp2p2_sink_status {
+	SINK_DISCONNECTED,
+	SINK_CONNECTED
+};
+
+enum sde_hdmi_auth_status {
+	HDMI_HDCP_AUTH_STATUS_FAILURE,
+	HDMI_HDCP_AUTH_STATUS_SUCCESS
+};
+
+struct sde_hdmi_hdcp2p2_ctrl {
+	atomic_t auth_state;
+	enum sde_hdmi_hdcp2p2_sink_status sink_status; /* Is sink connected */
+	struct sde_hdcp_init_data init_data; /* Feature data from HDMI drv */
+	struct mutex mutex; /* mutex to protect access to ctrl */
+	struct mutex msg_lock; /* mutex to protect access to msg buffer */
+	struct mutex wakeup_mutex; /* mutex to protect access to wakeup call*/
+	struct sde_hdcp_ops *ops;
+	void *lib_ctx; /* Handle to HDCP 2.2 Trustzone library */
+	struct hdcp_txmtr_ops *lib; /* Ops for driver to call into TZ */
+
+	enum hdmi_hdcp_wakeup_cmd wakeup_cmd;
+	enum sde_hdmi_auth_status auth_status;
+	char *send_msg_buf;
+	uint32_t send_msg_len;
+	uint32_t timeout;
+	uint32_t timeout_left;
+
+	struct task_struct *thread;
+	struct kthread_worker worker;
+	struct kthread_work status;
+	struct kthread_work auth;
+	struct kthread_work send_msg;
+	struct kthread_work recv_msg;
+	struct kthread_work link;
+	struct kthread_work poll;
+};
+
+static int sde_hdmi_hdcp2p2_auth(struct sde_hdmi_hdcp2p2_ctrl *ctrl);
+static void sde_hdmi_hdcp2p2_send_msg(struct sde_hdmi_hdcp2p2_ctrl *ctrl);
+static void sde_hdmi_hdcp2p2_recv_msg(struct sde_hdmi_hdcp2p2_ctrl *ctrl);
+static void sde_hdmi_hdcp2p2_auth_status(struct sde_hdmi_hdcp2p2_ctrl *ctrl);
+static int sde_hdmi_hdcp2p2_link_check(struct sde_hdmi_hdcp2p2_ctrl *ctrl);
+
+static bool sde_hdcp2p2_is_valid_state(struct sde_hdmi_hdcp2p2_ctrl *ctrl)
+{
+	if (ctrl->wakeup_cmd == HDMI_HDCP_WKUP_CMD_AUTHENTICATE)
+		return true;
+
+	if (atomic_read(&ctrl->auth_state) != HDCP_STATE_INACTIVE)
+		return true;
+
+	return false;
+}
+
+static int sde_hdmi_hdcp2p2_copy_buf(struct sde_hdmi_hdcp2p2_ctrl *ctrl,
+	struct hdmi_hdcp_wakeup_data *data)
+{
+	mutex_lock(&ctrl->msg_lock);
+
+	if (!data->send_msg_len) {
+		mutex_unlock(&ctrl->msg_lock);
+		return 0;
+	}
+
+	ctrl->send_msg_len = data->send_msg_len;
+
+	kzfree(ctrl->send_msg_buf);
+
+	ctrl->send_msg_buf = kzalloc(data->send_msg_len, GFP_KERNEL);
+
+	if (!ctrl->send_msg_buf) {
+		mutex_unlock(&ctrl->msg_lock);
+		return -ENOMEM;
+	}
+
+	memcpy(ctrl->send_msg_buf, data->send_msg_buf, ctrl->send_msg_len);
+
+	mutex_unlock(&ctrl->msg_lock);
+
+	return 0;
+}
+
+static int sde_hdmi_hdcp2p2_wakeup(struct hdmi_hdcp_wakeup_data *data)
+{
+	struct sde_hdmi_hdcp2p2_ctrl *ctrl;
+
+	if (!data) {
+		SDE_ERROR("invalid input\n");
+		return -EINVAL;
+	}
+
+	ctrl = data->context;
+	if (!ctrl) {
+		SDE_ERROR("invalid ctrl\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctrl->wakeup_mutex);
+
+	SDE_HDCP_DEBUG("cmd: %s, timeout %dms\n",
+	hdmi_hdcp_cmd_to_str(data->cmd),
+	data->timeout);
+
+	ctrl->wakeup_cmd = data->cmd;
+
+	if (data->timeout)
+		ctrl->timeout = data->timeout * 2;
+	else
+		ctrl->timeout = HDCP2P2_DEFAULT_TIMEOUT;
+
+	if (!sde_hdcp2p2_is_valid_state(ctrl)) {
+		SDE_ERROR("invalid state\n");
+		goto exit;
+	}
+
+	if (sde_hdmi_hdcp2p2_copy_buf(ctrl, data))
+		goto exit;
+
+	if (ctrl->wakeup_cmd == HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS)
+		ctrl->auth_status = HDMI_HDCP_AUTH_STATUS_SUCCESS;
+	else if (ctrl->wakeup_cmd == HDMI_HDCP_WKUP_CMD_STATUS_FAILED)
+		ctrl->auth_status = HDMI_HDCP_AUTH_STATUS_FAILURE;
+
+	switch (ctrl->wakeup_cmd) {
+	case HDMI_HDCP_WKUP_CMD_SEND_MESSAGE:
+		queue_kthread_work(&ctrl->worker, &ctrl->send_msg);
+		break;
+	case HDMI_HDCP_WKUP_CMD_RECV_MESSAGE:
+		queue_kthread_work(&ctrl->worker, &ctrl->recv_msg);
+		break;
+	case HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS:
+	case HDMI_HDCP_WKUP_CMD_STATUS_FAILED:
+		queue_kthread_work(&ctrl->worker, &ctrl->status);
+		break;
+	case HDMI_HDCP_WKUP_CMD_LINK_POLL:
+		queue_kthread_work(&ctrl->worker, &ctrl->poll);
+		break;
+	case HDMI_HDCP_WKUP_CMD_AUTHENTICATE:
+		queue_kthread_work(&ctrl->worker, &ctrl->auth);
+		break;
+	default:
+		SDE_ERROR("invalid wakeup command %d\n", ctrl->wakeup_cmd);
+	}
+exit:
+	mutex_unlock(&ctrl->wakeup_mutex);
+	return 0;
+}
+
+static int sde_hdmi_hdcp2p2_wakeup_lib(struct sde_hdmi_hdcp2p2_ctrl *ctrl,
+	struct hdcp_lib_wakeup_data *data)
+{
+	int rc = 0;
+
+	if (ctrl && ctrl->lib && ctrl->lib->wakeup &&
+		data && (data->cmd != HDCP_LIB_WKUP_CMD_INVALID)) {
+		rc = ctrl->lib->wakeup(data);
+		if (rc)
+			SDE_ERROR("error sending %s to lib\n",
+				hdcp_lib_cmd_to_str(data->cmd));
+	}
+
+	return rc;
+}
+
+static void sde_hdmi_hdcp2p2_reset(struct sde_hdmi_hdcp2p2_ctrl *ctrl)
+{
+	if (!ctrl) {
+		SDE_ERROR("invalid input\n");
+		return;
+	}
+
+	ctrl->sink_status = SINK_DISCONNECTED;
+	atomic_set(&ctrl->auth_state, HDCP_STATE_INACTIVE);
+}
+
+static void sde_hdmi_hdcp2p2_off(void *input)
+{
+	struct sde_hdmi_hdcp2p2_ctrl *ctrl;
+	struct hdmi_hdcp_wakeup_data cdata = {HDMI_HDCP_WKUP_CMD_AUTHENTICATE};
+
+	ctrl = (struct sde_hdmi_hdcp2p2_ctrl *)input;
+
+	if (!ctrl) {
+		SDE_ERROR("invalid input\n");
+		return;
+	}
+
+	sde_hdmi_hdcp2p2_reset(ctrl);
+
+	flush_kthread_worker(&ctrl->worker);
+
+	cdata.context = input;
+	sde_hdmi_hdcp2p2_wakeup(&cdata);
+
+	/* There could be upto one frame delay
+	 * between the time encryption disable is
+	 * requested till the time we get encryption
+	 * disabled interrupt
+	 */
+	msleep(20);
+	sde_hdmi_hdcp2p2_ddc_disable((void *)ctrl->init_data.cb_data);
+}
+
+static int sde_hdmi_hdcp2p2_authenticate(void *input)
+{
+	struct sde_hdmi_hdcp2p2_ctrl *ctrl = input;
+	struct hdmi_hdcp_wakeup_data cdata = {HDMI_HDCP_WKUP_CMD_AUTHENTICATE};
+	u32 regval;
+	int rc = 0;
+
+	/* Enable authentication success interrupt */
+	regval = DSS_REG_R(ctrl->init_data.core_io, HDMI_HDCP_INT_CTRL2);
+	regval |= BIT(1) | BIT(2);
+
+	DSS_REG_W(ctrl->init_data.core_io, HDMI_HDCP_INT_CTRL2, regval);
+
+	flush_kthread_worker(&ctrl->worker);
+
+	ctrl->sink_status = SINK_CONNECTED;
+	atomic_set(&ctrl->auth_state, HDCP_STATE_AUTHENTICATING);
+
+	/* make sure ddc is idle before starting hdcp 2.2 authentication */
+	_sde_hdmi_scrambler_ddc_disable((void *)ctrl->init_data.cb_data);
+	sde_hdmi_hdcp2p2_ddc_disable((void *)ctrl->init_data.cb_data);
+
+	cdata.context = input;
+	sde_hdmi_hdcp2p2_wakeup(&cdata);
+
+	return rc;
+}
+
+static int sde_hdmi_hdcp2p2_reauthenticate(void *input)
+{
+	struct sde_hdmi_hdcp2p2_ctrl *ctrl;
+
+	ctrl = (struct sde_hdmi_hdcp2p2_ctrl *)input;
+
+	if (!ctrl) {
+		SDE_ERROR("invalid input\n");
+		return -EINVAL;
+	}
+
+	sde_hdmi_hdcp2p2_reset(ctrl);
+
+	return  sde_hdmi_hdcp2p2_authenticate(input);
+}
+
+static void sde_hdmi_hdcp2p2_min_level_change(void *client_ctx,
+int min_enc_lvl)
+{
+	struct sde_hdmi_hdcp2p2_ctrl *ctrl =
+		(struct sde_hdmi_hdcp2p2_ctrl *)client_ctx;
+	struct hdcp_lib_wakeup_data cdata = {
+		HDCP_LIB_WKUP_CMD_QUERY_STREAM_TYPE};
+	bool enc_notify = true;
+	enum sde_hdcp_states enc_lvl;
+
+	if (!ctrl) {
+		SDE_ERROR("invalid input\n");
+		return;
+	}
+
+	switch (min_enc_lvl) {
+	case 0:
+		enc_lvl = HDCP_STATE_AUTH_ENC_NONE;
+		break;
+	case 1:
+		enc_lvl = HDCP_STATE_AUTH_ENC_1X;
+		break;
+	case 2:
+		enc_lvl = HDCP_STATE_AUTH_ENC_2P2;
+		break;
+	default:
+		enc_notify = false;
+	}
+
+	SDE_HDCP_DEBUG("enc level changed %d\n", min_enc_lvl);
+
+	/* notify the client first about the new level */
+	if (enc_notify && ctrl->init_data.notify_status)
+		ctrl->init_data.notify_status(ctrl->init_data.cb_data, enc_lvl);
+
+	cdata.context = ctrl->lib_ctx;
+	sde_hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+}
+
+static void sde_hdmi_hdcp2p2_mute_sink(void *client_ctx)
+{
+	struct sde_hdmi_hdcp2p2_ctrl *ctrl =
+		(struct sde_hdmi_hdcp2p2_ctrl *)client_ctx;
+
+	if (!ctrl) {
+		SDE_ERROR("invalid input\n");
+		return;
+	}
+
+	/* call into client to send avmute to the sink */
+	if (ctrl->init_data.avmute_sink)
+		ctrl->init_data.avmute_sink(ctrl->init_data.cb_data);
+}
+
+static void sde_hdmi_hdcp2p2_auth_failed(struct sde_hdmi_hdcp2p2_ctrl *ctrl)
+{
+	if (!ctrl) {
+		SDE_ERROR("invalid input\n");
+		return;
+	}
+
+	atomic_set(&ctrl->auth_state, HDCP_STATE_AUTH_FAIL);
+
+	sde_hdmi_hdcp2p2_ddc_disable(ctrl->init_data.cb_data);
+
+	/* notify hdmi tx about HDCP failure */
+	ctrl->init_data.notify_status(ctrl->init_data.cb_data,
+		HDCP_STATE_AUTH_FAIL);
+}
+
+static void sde_hdmi_hdcp2p2_fail_noreauth(struct sde_hdmi_hdcp2p2_ctrl *ctrl)
+{
+	if (!ctrl) {
+		SDE_ERROR("invalid input\n");
+		return;
+	}
+
+	atomic_set(&ctrl->auth_state, HDCP_STATE_AUTH_FAIL);
+
+	sde_hdmi_hdcp2p2_ddc_disable(ctrl->init_data.cb_data);
+
+	/* notify hdmi tx about HDCP failure */
+	ctrl->init_data.notify_status(ctrl->init_data.cb_data,
+		HDCP_STATE_AUTH_FAIL_NOREAUTH);
+}
+
+static void sde_hdmi_hdcp2p2_srm_cb(void *client_ctx)
+{
+	struct sde_hdmi_hdcp2p2_ctrl *ctrl =
+		(struct sde_hdmi_hdcp2p2_ctrl *)client_ctx;
+	struct hdcp_lib_wakeup_data cdata = {
+		HDCP_LIB_WKUP_CMD_INVALID};
+
+	if (!ctrl) {
+		SDE_ERROR("invalid input\n");
+		return;
+	}
+
+	cdata.context = ctrl->lib_ctx;
+	cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+	sde_hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+
+	sde_hdmi_hdcp2p2_fail_noreauth(ctrl);
+}
+
+static int sde_hdmi_hdcp2p2_ddc_rd_message(struct sde_hdmi_hdcp2p2_ctrl *ctrl,
+	u8 *buf, int size, u32 timeout)
+{
+	struct sde_hdmi_tx_ddc_data *ddc_data;
+	struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl;
+
+	int rc;
+
+	if (!ctrl) {
+		SDE_ERROR("invalid ctrl\n");
+		return -EINVAL;
+	}
+
+	ddc_ctrl = ctrl->init_data.ddc_ctrl;
+	ddc_data = &ddc_ctrl->ddc_data;
+
+	if (!ddc_data) {
+		SDE_ERROR("invalid ddc data\n");
+		return -EINVAL;
+	}
+
+	if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
+		SDE_ERROR("hdcp is off\n");
+		return -EINVAL;
+	}
+
+	memset(ddc_data, 0, sizeof(*ddc_data));
+	ddc_data->dev_addr = HDCP_SINK_DDC_SLAVE_ADDR;
+	ddc_data->offset = HDCP_SINK_DDC_HDCP2_READ_MESSAGE;
+	ddc_data->data_buf = buf;
+	ddc_data->data_len = size;
+	ddc_data->request_len = size;
+	ddc_data->retry = 0;
+	ddc_data->hard_timeout = timeout;
+	ddc_data->what = "HDCP2ReadMessage";
+
+	rc = sde_hdmi_ddc_read(ctrl->init_data.cb_data);
+	if (rc)
+		SDE_ERROR("Cannot read HDCP message register\n");
+
+	ctrl->timeout_left = ddc_data->timeout_left;
+
+	return rc;
+}
+
+static int sde_hdmi_hdcp2p2_ddc_wt_message(struct sde_hdmi_hdcp2p2_ctrl *ctrl,
+	u8 *buf, size_t size)
+{
+	struct sde_hdmi_tx_ddc_data *ddc_data;
+	struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl;
+
+	int rc;
+
+	if (!ctrl) {
+		SDE_ERROR("invalid ctrl\n");
+		return -EINVAL;
+	}
+
+	ddc_ctrl = ctrl->init_data.ddc_ctrl;
+	ddc_data = &ddc_ctrl->ddc_data;
+
+	if (!ddc_data) {
+		SDE_ERROR("invalid ddc data\n");
+		return -EINVAL;
+	}
+
+	memset(ddc_data, 0, sizeof(*ddc_data));
+	ddc_data->dev_addr = HDCP_SINK_DDC_SLAVE_ADDR;
+	ddc_data->offset = HDCP_SINK_DDC_HDCP2_WRITE_MESSAGE;
+	ddc_data->data_buf = buf;
+	ddc_data->data_len = size;
+	ddc_data->hard_timeout = ctrl->timeout;
+	ddc_data->what = "HDCP2WriteMessage";
+
+	rc = sde_hdmi_ddc_write((void *)ctrl->init_data.cb_data);
+	if (rc)
+		SDE_ERROR("Cannot write HDCP message register\n");
+
+	ctrl->timeout_left = ddc_data->timeout_left;
+
+	return rc;
+}
+
+static int sde_hdmi_hdcp2p2_read_version(struct sde_hdmi_hdcp2p2_ctrl *ctrl,
+		u8 *hdcp2version)
+{
+	struct sde_hdmi_tx_ddc_data *ddc_data;
+	struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl;
+	int rc;
+
+	if (!ctrl) {
+		SDE_ERROR("invalid ctrl\n");
+		return -EINVAL;
+	}
+
+	ddc_ctrl = ctrl->init_data.ddc_ctrl;
+	ddc_data = &ddc_ctrl->ddc_data;
+
+	if (!ddc_data) {
+		SDE_ERROR("invalid ddc data\n");
+		return -EINVAL;
+	}
+	memset(ddc_data, 0, sizeof(*ddc_data));
+	ddc_data->dev_addr = HDCP_SINK_DDC_SLAVE_ADDR;
+	ddc_data->offset = HDCP_SINK_DDC_HDCP2_VERSION;
+	ddc_data->data_buf = hdcp2version;
+	ddc_data->data_len = 1;
+	ddc_data->request_len = 1;
+	ddc_data->retry = 1;
+	ddc_data->what = "HDCP2Version";
+
+	rc = sde_hdmi_ddc_read((void *)ctrl->init_data.cb_data);
+	if (rc) {
+		SDE_ERROR("Cannot read HDCP2Version register");
+		return rc;
+	}
+
+	SDE_HDCP_DEBUG("Read HDCP2Version as %u\n", *hdcp2version);
+	return rc;
+}
+
+static bool sde_hdmi_hdcp2p2_feature_supported(void *input)
+{
+	struct sde_hdmi_hdcp2p2_ctrl *ctrl = input;
+	struct hdcp_txmtr_ops *lib = NULL;
+	bool supported = false;
+
+	if (!ctrl) {
+		SDE_ERROR("invalid input\n");
+		goto end;
+	}
+
+	lib = ctrl->lib;
+	if (!lib) {
+		SDE_ERROR("invalid lib ops data\n");
+		goto end;
+	}
+
+	if (lib->feature_supported) {
+		supported = lib->feature_supported(
+			ctrl->lib_ctx);
+	}
+
+end:
+	return supported;
+}
+
+static void sde_hdmi_hdcp2p2_send_msg(struct sde_hdmi_hdcp2p2_ctrl *ctrl)
+{
+	int rc = 0;
+	struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID};
+	uint32_t msglen;
+	char *msg = NULL;
+
+	if (!ctrl) {
+		SDE_ERROR("invalid input\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	cdata.context = ctrl->lib_ctx;
+
+	if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
+		SDE_ERROR("hdcp is off\n");
+		goto exit;
+	}
+
+	mutex_lock(&ctrl->msg_lock);
+	msglen = ctrl->send_msg_len;
+
+	if (!msglen) {
+		mutex_unlock(&ctrl->msg_lock);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	msg = kzalloc(msglen, GFP_KERNEL);
+	if (!msg) {
+		mutex_unlock(&ctrl->msg_lock);
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	memcpy(msg, ctrl->send_msg_buf, msglen);
+	mutex_unlock(&ctrl->msg_lock);
+
+	/* Forward the message to the sink */
+	rc = sde_hdmi_hdcp2p2_ddc_wt_message(ctrl,
+			msg, (size_t)msglen);
+	if (rc) {
+		SDE_ERROR("Error sending msg to sink %d\n", rc);
+		cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_SEND_FAILED;
+	} else {
+		cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_SEND_SUCCESS;
+		cdata.timeout = ctrl->timeout_left;
+	}
+exit:
+	kfree(msg);
+
+	sde_hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+}
+
+static void sde_hdmi_hdcp2p2_send_msg_work(struct kthread_work *work)
+{
+	struct sde_hdmi_hdcp2p2_ctrl *ctrl = container_of(work,
+		struct sde_hdmi_hdcp2p2_ctrl, send_msg);
+
+	sde_hdmi_hdcp2p2_send_msg(ctrl);
+}
+
+static void sde_hdmi_hdcp2p2_link_cb(void *data)
+{
+	struct sde_hdmi_hdcp2p2_ctrl *ctrl = data;
+
+	if (!ctrl) {
+		SDE_HDCP_DEBUG("invalid input\n");
+		return;
+	}
+
+	if (atomic_read(&ctrl->auth_state) != HDCP_STATE_INACTIVE)
+		queue_kthread_work(&ctrl->worker, &ctrl->link);
+}
+
+static void sde_hdmi_hdcp2p2_recv_msg(struct sde_hdmi_hdcp2p2_ctrl *ctrl)
+{
+	int timeout_hsync = 0, rc = 0;
+	char *recvd_msg_buf = NULL;
+	struct sde_hdmi_tx_hdcp2p2_ddc_data *ddc_data;
+	struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl;
+	struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID};
+
+	if (!ctrl) {
+		SDE_ERROR("invalid input\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	cdata.context = ctrl->lib_ctx;
+
+	if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
+		SDE_ERROR("hdcp is off\n");
+		goto exit;
+	}
+
+	ddc_ctrl = ctrl->init_data.ddc_ctrl;
+	if (!ddc_ctrl) {
+		pr_err("invalid ddc ctrl\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	ddc_data = &ddc_ctrl->sde_hdcp2p2_ddc_data;
+	memset(ddc_data, 0, sizeof(*ddc_data));
+
+	timeout_hsync = _sde_hdmi_get_timeout_in_hysnc(
+	(void *)ctrl->init_data.cb_data, ctrl->timeout);
+
+	if (timeout_hsync <= 0) {
+		SDE_ERROR("err in timeout hsync calc\n");
+		timeout_hsync = HDMI_DEFAULT_TIMEOUT_HSYNC;
+	}
+
+	SDE_HDCP_DEBUG("timeout for rxstatus %dms, %d hsync\n",
+	ctrl->timeout, timeout_hsync);
+
+	ddc_data->intr_mask = RXSTATUS_MESSAGE_SIZE | RXSTATUS_REAUTH_REQ;
+	ddc_data->timeout_ms = ctrl->timeout;
+	ddc_data->timeout_hsync = timeout_hsync;
+	ddc_data->periodic_timer_hsync = timeout_hsync / 20;
+	ddc_data->read_method = HDCP2P2_RXSTATUS_HW_DDC_SW_TRIGGER;
+	ddc_data->wait = true;
+
+	rc = sde_hdmi_hdcp2p2_read_rxstatus(ctrl->init_data.cb_data);
+	if (rc) {
+		SDE_ERROR("error reading rxstatus %d\n", rc);
+		goto exit;
+	}
+
+	if (ddc_data->reauth_req) {
+		ddc_data->reauth_req = false;
+
+		SDE_HDCP_DEBUG("reauth triggered by sink\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	ctrl->timeout_left = ddc_data->timeout_left;
+
+	SDE_HDCP_DEBUG("timeout left after rxstatus %dms, msg size %d\n",
+	ctrl->timeout_left, ddc_data->message_size);
+
+	if (!ddc_data->message_size) {
+		SDE_ERROR("recvd invalid message size\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	recvd_msg_buf = kzalloc(ddc_data->message_size, GFP_KERNEL);
+	if (!recvd_msg_buf) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	rc = sde_hdmi_hdcp2p2_ddc_rd_message(ctrl, recvd_msg_buf,
+		ddc_data->message_size, ctrl->timeout_left);
+	if (rc) {
+		SDE_ERROR("error reading message %d\n", rc);
+		goto exit;
+	}
+
+	cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_RECV_SUCCESS;
+	cdata.recvd_msg_buf = recvd_msg_buf;
+	cdata.recvd_msg_len = ddc_data->message_size;
+	cdata.timeout = ctrl->timeout_left;
+exit:
+	if (rc == -ETIMEDOUT)
+		cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_RECV_TIMEOUT;
+	else if (rc)
+		cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_RECV_FAILED;
+
+	sde_hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+	kfree(recvd_msg_buf);
+}
+
+static void sde_hdmi_hdcp2p2_recv_msg_work(struct kthread_work *work)
+{
+	struct sde_hdmi_hdcp2p2_ctrl *ctrl = container_of(work,
+		struct sde_hdmi_hdcp2p2_ctrl, recv_msg);
+
+	sde_hdmi_hdcp2p2_recv_msg(ctrl);
+}
+
+static int sde_hdmi_hdcp2p2_link_check(struct sde_hdmi_hdcp2p2_ctrl *ctrl)
+{
+	struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl;
+	struct sde_hdmi_tx_hdcp2p2_ddc_data *ddc_data;
+	int timeout_hsync;
+	int ret;
+
+	ddc_ctrl = ctrl->init_data.ddc_ctrl;
+
+	if (!ddc_ctrl)
+		return -EINVAL;
+
+	sde_hdmi_ddc_config(ctrl->init_data.cb_data);
+
+	ddc_data = &ddc_ctrl->sde_hdcp2p2_ddc_data;
+
+	memset(ddc_data, 0, sizeof(*ddc_data));
+
+	timeout_hsync = _sde_hdmi_get_timeout_in_hysnc(
+					(void *)ctrl->init_data.cb_data,
+					jiffies_to_msecs(HZ / 2));
+
+	if (timeout_hsync <= 0) {
+		SDE_ERROR("err in timeout hsync calc\n");
+		timeout_hsync = HDMI_DEFAULT_TIMEOUT_HSYNC;
+	}
+	SDE_HDCP_DEBUG("timeout for rxstatus %d hsyncs\n", timeout_hsync);
+
+	ddc_data->intr_mask = RXSTATUS_READY | RXSTATUS_MESSAGE_SIZE |
+		RXSTATUS_REAUTH_REQ;
+	ddc_data->timeout_hsync = timeout_hsync;
+	ddc_data->periodic_timer_hsync = timeout_hsync;
+	ddc_data->read_method = HDCP2P2_RXSTATUS_HW_DDC_SW_TRIGGER;
+	ddc_data->link_cb = sde_hdmi_hdcp2p2_link_cb;
+	ddc_data->link_data = ctrl;
+
+	ret = sde_hdmi_hdcp2p2_read_rxstatus((void *)ctrl->init_data.cb_data);
+	return ret;
+}
+
+static void sde_hdmi_hdcp2p2_poll_work(struct kthread_work *work)
+{
+	struct sde_hdmi_hdcp2p2_ctrl *ctrl = container_of(work,
+		struct sde_hdmi_hdcp2p2_ctrl, poll);
+
+	sde_hdmi_hdcp2p2_link_check(ctrl);
+}
+
+static void sde_hdmi_hdcp2p2_auth_status(struct sde_hdmi_hdcp2p2_ctrl *ctrl)
+{
+	if (!ctrl) {
+		SDE_ERROR("invalid input\n");
+		return;
+	}
+
+	if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
+		SDE_ERROR("hdcp is off\n");
+		return;
+	}
+
+	if (ctrl->auth_status == HDMI_HDCP_AUTH_STATUS_SUCCESS) {
+		ctrl->init_data.notify_status(ctrl->init_data.cb_data,
+			HDCP_STATE_AUTHENTICATED);
+
+		atomic_set(&ctrl->auth_state, HDCP_STATE_AUTHENTICATED);
+	} else {
+		sde_hdmi_hdcp2p2_auth_failed(ctrl);
+	}
+}
+
+static void sde_hdmi_hdcp2p2_auth_status_work(struct kthread_work *work)
+{
+	struct sde_hdmi_hdcp2p2_ctrl *ctrl = container_of(work,
+		struct sde_hdmi_hdcp2p2_ctrl, status);
+
+	sde_hdmi_hdcp2p2_auth_status(ctrl);
+}
+
+static void sde_hdmi_hdcp2p2_link_work(struct kthread_work *work)
+{
+	int rc = 0;
+	struct sde_hdmi_hdcp2p2_ctrl *ctrl = container_of(work,
+		struct sde_hdmi_hdcp2p2_ctrl, link);
+	struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID};
+	char *recvd_msg_buf = NULL;
+	struct sde_hdmi_tx_hdcp2p2_ddc_data *ddc_data;
+	struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl;
+
+	if (!ctrl) {
+		SDE_ERROR("invalid input\n");
+		return;
+	}
+
+	cdata.context = ctrl->lib_ctx;
+
+	ddc_ctrl = ctrl->init_data.ddc_ctrl;
+	if (!ddc_ctrl) {
+		rc = -EINVAL;
+		cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+		goto exit;
+	}
+
+	ddc_data = &ddc_ctrl->sde_hdcp2p2_ddc_data;
+
+	if (ddc_data->reauth_req) {
+		SDE_HDCP_DEBUG("reauth triggered by sink\n");
+
+		ddc_data->reauth_req = false;
+		rc = -ENOLINK;
+		cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+		goto exit;
+	}
+
+	if (ddc_data->ready && ddc_data->message_size) {
+		SDE_HDCP_DEBUG("topology changed. rxstatus msg size %d\n",
+			ddc_data->message_size);
+
+		ddc_data->ready  = false;
+
+		recvd_msg_buf = kzalloc(ddc_data->message_size, GFP_KERNEL);
+		if (!recvd_msg_buf) {
+			cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+			goto exit;
+		}
+
+		rc = sde_hdmi_hdcp2p2_ddc_rd_message(ctrl, recvd_msg_buf,
+			ddc_data->message_size, HDCP2P2_DEFAULT_TIMEOUT);
+		if (rc) {
+			cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+			SDE_ERROR("error reading message %d\n", rc);
+		} else {
+			cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_RECV_SUCCESS;
+			cdata.recvd_msg_buf = recvd_msg_buf;
+			cdata.recvd_msg_len = ddc_data->message_size;
+		}
+
+		ddc_data->message_size = 0;
+	}
+exit:
+	sde_hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+	kfree(recvd_msg_buf);
+
+	if (rc) {
+		sde_hdmi_hdcp2p2_auth_failed(ctrl);
+		return;
+	}
+}
+
+static int sde_hdmi_hdcp2p2_auth(struct sde_hdmi_hdcp2p2_ctrl *ctrl)
+{
+	struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID};
+	int rc = 0;
+
+	if (!ctrl) {
+		SDE_ERROR("invalid input\n");
+		return -EINVAL;
+	}
+
+	cdata.context = ctrl->lib_ctx;
+
+	if (atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTHENTICATING)
+		cdata.cmd = HDCP_LIB_WKUP_CMD_START;
+	else
+		cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+
+	rc = sde_hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+	if (rc)
+		sde_hdmi_hdcp2p2_auth_failed(ctrl);
+
+	return rc;
+}
+
+static void sde_hdmi_hdcp2p2_auth_work(struct kthread_work *work)
+{
+	struct sde_hdmi_hdcp2p2_ctrl *ctrl = container_of(work,
+		struct sde_hdmi_hdcp2p2_ctrl, auth);
+
+	sde_hdmi_hdcp2p2_auth(ctrl);
+}
+
+void sde_hdmi_hdcp2p2_deinit(void *input)
+{
+	struct sde_hdmi_hdcp2p2_ctrl *ctrl;
+	struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID};
+
+	ctrl = (struct sde_hdmi_hdcp2p2_ctrl *)input;
+
+	if (!ctrl) {
+		SDE_ERROR("invalid input\n");
+		return;
+	}
+
+	cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+	cdata.context = ctrl->lib_ctx;
+	sde_hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+
+	kthread_stop(ctrl->thread);
+
+	mutex_destroy(&ctrl->mutex);
+	mutex_destroy(&ctrl->msg_lock);
+	mutex_destroy(&ctrl->wakeup_mutex);
+	kfree(ctrl);
+}
+
+void *sde_hdmi_hdcp2p2_init(struct sde_hdcp_init_data *init_data)
+{
+	int rc;
+	struct sde_hdmi_hdcp2p2_ctrl *ctrl;
+	static struct sde_hdcp_ops ops = {
+		.reauthenticate = sde_hdmi_hdcp2p2_reauthenticate,
+		.authenticate = sde_hdmi_hdcp2p2_authenticate,
+		.feature_supported = sde_hdmi_hdcp2p2_feature_supported,
+		.off = sde_hdmi_hdcp2p2_off
+	};
+
+	static struct hdcp_client_ops client_ops = {
+		.wakeup = sde_hdmi_hdcp2p2_wakeup,
+		.notify_lvl_change = sde_hdmi_hdcp2p2_min_level_change,
+		.srm_cb = sde_hdmi_hdcp2p2_srm_cb,
+		.mute_sink = sde_hdmi_hdcp2p2_mute_sink,
+	};
+
+	static struct hdcp_txmtr_ops txmtr_ops;
+	struct hdcp_register_data register_data;
+
+	SDE_HDCP_DEBUG("HDCP2P2 feature initialization\n");
+
+	if (!init_data || !init_data->core_io || !init_data->mutex ||
+		!init_data->ddc_ctrl || !init_data->notify_status ||
+		!init_data->workq || !init_data->cb_data) {
+		SDE_ERROR("invalid input\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (init_data->hdmi_tx_ver < MIN_HDMI_TX_MAJOR_VERSION) {
+		SDE_ERROR("HDMI Tx does not support HDCP 2.2\n");
+		return ERR_PTR(-ENODEV);
+	}
+
+	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+	if (!ctrl)
+		return ERR_PTR(-ENOMEM);
+
+	ctrl->init_data = *init_data;
+	ctrl->lib = &txmtr_ops;
+
+	ctrl->sink_status = SINK_DISCONNECTED;
+
+	atomic_set(&ctrl->auth_state, HDCP_STATE_INACTIVE);
+
+	ctrl->ops = &ops;
+	mutex_init(&ctrl->mutex);
+	mutex_init(&ctrl->msg_lock);
+	mutex_init(&ctrl->wakeup_mutex);
+
+	register_data.hdcp_ctx = &ctrl->lib_ctx;
+	register_data.client_ops = &client_ops;
+	register_data.txmtr_ops = &txmtr_ops;
+	register_data.device_type = HDCP_TXMTR_HDMI;
+	register_data.client_ctx = ctrl;
+
+	rc = hdcp_library_register(&register_data);
+	if (rc) {
+		SDE_ERROR("Unable to register with HDCP 2.2 library\n");
+		goto error;
+	}
+
+	init_kthread_worker(&ctrl->worker);
+
+	init_kthread_work(&ctrl->auth,     sde_hdmi_hdcp2p2_auth_work);
+	init_kthread_work(&ctrl->send_msg, sde_hdmi_hdcp2p2_send_msg_work);
+	init_kthread_work(&ctrl->recv_msg, sde_hdmi_hdcp2p2_recv_msg_work);
+	init_kthread_work(&ctrl->status,   sde_hdmi_hdcp2p2_auth_status_work);
+	init_kthread_work(&ctrl->link,     sde_hdmi_hdcp2p2_link_work);
+	init_kthread_work(&ctrl->poll,     sde_hdmi_hdcp2p2_poll_work);
+
+	ctrl->thread = kthread_run(kthread_worker_fn,
+		&ctrl->worker, "hdmi_hdcp2p2");
+
+	if (IS_ERR(ctrl->thread)) {
+		SDE_ERROR("unable to start hdcp2p2 thread\n");
+		rc = PTR_ERR(ctrl->thread);
+		ctrl->thread = NULL;
+		goto error;
+	}
+
+	return ctrl;
+error:
+	kfree(ctrl);
+	return ERR_PTR(rc);
+}
+
+static bool sde_hdmi_hdcp2p2_supported(struct sde_hdmi_hdcp2p2_ctrl *ctrl)
+{
+	u8 hdcp2version = 0;
+	int rc = sde_hdmi_hdcp2p2_read_version(ctrl, &hdcp2version);
+
+	if (rc)
+		goto error;
+
+	if (hdcp2version & BIT(2)) {
+		SDE_HDCP_DEBUG("Sink is HDCP 2.2 capable\n");
+		return true;
+	}
+
+error:
+	SDE_HDCP_DEBUG("Sink is not HDCP 2.2 capable\n");
+	return false;
+}
+
+struct sde_hdcp_ops *sde_hdmi_hdcp2p2_start(void *input)
+{
+	struct sde_hdmi_hdcp2p2_ctrl *ctrl;
+
+	ctrl = (struct sde_hdmi_hdcp2p2_ctrl *)input;
+
+	SDE_HDCP_DEBUG("Checking sink capability\n");
+	if (sde_hdmi_hdcp2p2_supported(ctrl))
+		return ctrl->ops;
+	else
+		return NULL;
+
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging./sde_hdmi_regs.h linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_regs.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging./sde_hdmi_regs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_regs.h	2019-01-22 16:16:23.495246334 +0100
@@ -0,0 +1,300 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HDMI_REGS_H
+#define _SDE_HDMI_REGS_H
+
+/* HDMI_TX Registers */
+#define HDMI_CTRL                        (0x00000000)
+#define HDMI_TEST_PATTERN                (0x00000010)
+#define HDMI_RANDOM_PATTERN              (0x00000014)
+#define HDMI_PKT_BLK_CTRL                (0x00000018)
+#define HDMI_STATUS                      (0x0000001C)
+#define HDMI_AUDIO_PKT_CTRL              (0x00000020)
+#define HDMI_ACR_PKT_CTRL                (0x00000024)
+#define HDMI_VBI_PKT_CTRL                (0x00000028)
+#define HDMI_INFOFRAME_CTRL0             (0x0000002C)
+#define HDMI_INFOFRAME_CTRL1             (0x00000030)
+#define HDMI_GEN_PKT_CTRL                (0x00000034)
+#define HDMI_ACP                         (0x0000003C)
+#define HDMI_GC                          (0x00000040)
+#define HDMI_AUDIO_PKT_CTRL2             (0x00000044)
+#define HDMI_ISRC1_0                     (0x00000048)
+#define HDMI_ISRC1_1                     (0x0000004C)
+#define HDMI_ISRC1_2                     (0x00000050)
+#define HDMI_ISRC1_3                     (0x00000054)
+#define HDMI_ISRC1_4                     (0x00000058)
+#define HDMI_ISRC2_0                     (0x0000005C)
+#define HDMI_ISRC2_1                     (0x00000060)
+#define HDMI_ISRC2_2                     (0x00000064)
+#define HDMI_ISRC2_3                     (0x00000068)
+#define HDMI_AVI_INFO0                   (0x0000006C)
+#define HDMI_AVI_INFO1                   (0x00000070)
+#define HDMI_AVI_INFO2                   (0x00000074)
+#define HDMI_AVI_INFO3                   (0x00000078)
+#define HDMI_MPEG_INFO0                  (0x0000007C)
+#define HDMI_MPEG_INFO1                  (0x00000080)
+#define HDMI_GENERIC0_HDR                (0x00000084)
+#define HDMI_GENERIC0_0                  (0x00000088)
+#define HDMI_GENERIC0_1                  (0x0000008C)
+#define HDMI_GENERIC0_2                  (0x00000090)
+#define HDMI_GENERIC0_3                  (0x00000094)
+#define HDMI_GENERIC0_4                  (0x00000098)
+#define HDMI_GENERIC0_5                  (0x0000009C)
+#define HDMI_GENERIC0_6                  (0x000000A0)
+#define HDMI_GENERIC1_HDR                (0x000000A4)
+#define HDMI_GENERIC1_0                  (0x000000A8)
+#define HDMI_GENERIC1_1                  (0x000000AC)
+#define HDMI_GENERIC1_2                  (0x000000B0)
+#define HDMI_GENERIC1_3                  (0x000000B4)
+#define HDMI_GENERIC1_4                  (0x000000B8)
+#define HDMI_GENERIC1_5                  (0x000000BC)
+#define HDMI_GENERIC1_6                  (0x000000C0)
+#define HDMI_ACR_32_0                    (0x000000C4)
+#define HDMI_ACR_32_1                    (0x000000C8)
+#define HDMI_ACR_44_0                    (0x000000CC)
+#define HDMI_ACR_44_1                    (0x000000D0)
+#define HDMI_ACR_48_0                    (0x000000D4)
+#define HDMI_ACR_48_1                    (0x000000D8)
+#define HDMI_ACR_STATUS_0                (0x000000DC)
+#define HDMI_ACR_STATUS_1                (0x000000E0)
+#define HDMI_AUDIO_INFO0                 (0x000000E4)
+#define HDMI_AUDIO_INFO1                 (0x000000E8)
+#define HDMI_CS_60958_0                  (0x000000EC)
+#define HDMI_CS_60958_1                  (0x000000F0)
+#define HDMI_RAMP_CTRL0                  (0x000000F8)
+#define HDMI_RAMP_CTRL1                  (0x000000FC)
+#define HDMI_RAMP_CTRL2                  (0x00000100)
+#define HDMI_RAMP_CTRL3                  (0x00000104)
+#define HDMI_CS_60958_2                  (0x00000108)
+#define HDMI_HDCP_CTRL2                  (0x0000010C)
+#define HDMI_HDCP_CTRL                   (0x00000110)
+#define HDMI_HDCP_DEBUG_CTRL             (0x00000114)
+#define HDMI_HDCP_INT_CTRL               (0x00000118)
+#define HDMI_HDCP_LINK0_STATUS           (0x0000011C)
+#define HDMI_HDCP_DDC_CTRL_0             (0x00000120)
+#define HDMI_HDCP_DDC_CTRL_1             (0x00000124)
+#define HDMI_HDCP_DDC_STATUS             (0x00000128)
+#define HDMI_HDCP_ENTROPY_CTRL0          (0x0000012C)
+#define HDMI_HDCP_RESET                  (0x00000130)
+#define HDMI_HDCP_RCVPORT_DATA0          (0x00000134)
+#define HDMI_HDCP_RCVPORT_DATA1          (0x00000138)
+#define HDMI_HDCP_RCVPORT_DATA2_0        (0x0000013C)
+#define HDMI_HDCP_RCVPORT_DATA2_1        (0x00000140)
+#define HDMI_HDCP_RCVPORT_DATA3          (0x00000144)
+#define HDMI_HDCP_RCVPORT_DATA4          (0x00000148)
+#define HDMI_HDCP_RCVPORT_DATA5          (0x0000014C)
+#define HDMI_HDCP_RCVPORT_DATA6          (0x00000150)
+#define HDMI_HDCP_RCVPORT_DATA7          (0x00000154)
+#define HDMI_HDCP_RCVPORT_DATA8          (0x00000158)
+#define HDMI_HDCP_RCVPORT_DATA9          (0x0000015C)
+#define HDMI_HDCP_RCVPORT_DATA10         (0x00000160)
+#define HDMI_HDCP_RCVPORT_DATA11         (0x00000164)
+#define HDMI_HDCP_RCVPORT_DATA12         (0x00000168)
+#define HDMI_VENSPEC_INFO0               (0x0000016C)
+#define HDMI_VENSPEC_INFO1               (0x00000170)
+#define HDMI_VENSPEC_INFO2               (0x00000174)
+#define HDMI_VENSPEC_INFO3               (0x00000178)
+#define HDMI_VENSPEC_INFO4               (0x0000017C)
+#define HDMI_VENSPEC_INFO5               (0x00000180)
+#define HDMI_VENSPEC_INFO6               (0x00000184)
+#define HDMI_HDCP_DEBUG                  (0x00000194)
+#define HDMI_TMDS_CTRL_CHAR              (0x0000019C)
+#define HDMI_TMDS_CTRL_SEL               (0x000001A4)
+#define HDMI_TMDS_SYNCCHAR01             (0x000001A8)
+#define HDMI_TMDS_SYNCCHAR23             (0x000001AC)
+#define HDMI_TMDS_DEBUG                  (0x000001B4)
+#define HDMI_TMDS_CTL_BITS               (0x000001B8)
+#define HDMI_TMDS_DCBAL_CTRL             (0x000001BC)
+#define HDMI_TMDS_DCBAL_CHAR             (0x000001C0)
+#define HDMI_TMDS_CTL01_GEN              (0x000001C8)
+#define HDMI_TMDS_CTL23_GEN              (0x000001CC)
+#define HDMI_AUDIO_CFG                   (0x000001D0)
+#define HDMI_DEBUG                       (0x00000204)
+#define HDMI_USEC_REFTIMER               (0x00000208)
+#define HDMI_DDC_CTRL                    (0x0000020C)
+#define HDMI_DDC_ARBITRATION             (0x00000210)
+#define HDMI_DDC_INT_CTRL                (0x00000214)
+#define HDMI_DDC_SW_STATUS               (0x00000218)
+#define HDMI_DDC_HW_STATUS               (0x0000021C)
+#define HDMI_DDC_SPEED                   (0x00000220)
+#define HDMI_DDC_SETUP                   (0x00000224)
+#define HDMI_DDC_TRANS0                  (0x00000228)
+#define HDMI_DDC_TRANS1                  (0x0000022C)
+#define HDMI_DDC_TRANS2                  (0x00000230)
+#define HDMI_DDC_TRANS3                  (0x00000234)
+#define HDMI_DDC_DATA                    (0x00000238)
+#define HDMI_HDCP_SHA_CTRL               (0x0000023C)
+#define HDMI_HDCP_SHA_STATUS             (0x00000240)
+#define HDMI_HDCP_SHA_DATA               (0x00000244)
+#define HDMI_HDCP_SHA_DBG_M0_0           (0x00000248)
+#define HDMI_HDCP_SHA_DBG_M0_1           (0x0000024C)
+#define HDMI_HPD_INT_STATUS              (0x00000250)
+#define HDMI_HPD_INT_CTRL                (0x00000254)
+#define HDMI_HPD_CTRL                    (0x00000258)
+#define HDMI_HDCP_ENTROPY_CTRL1          (0x0000025C)
+#define HDMI_HDCP_SW_UPPER_AN            (0x00000260)
+#define HDMI_HDCP_SW_LOWER_AN            (0x00000264)
+#define HDMI_CRC_CTRL                    (0x00000268)
+#define HDMI_VID_CRC                     (0x0000026C)
+#define HDMI_AUD_CRC                     (0x00000270)
+#define HDMI_VBI_CRC                     (0x00000274)
+#define HDMI_DDC_REF                     (0x0000027C)
+#define HDMI_HDCP_SW_UPPER_AKSV          (0x00000284)
+#define HDMI_HDCP_SW_LOWER_AKSV          (0x00000288)
+#define HDMI_CEC_CTRL                    (0x0000028C)
+#define HDMI_CEC_WR_DATA                 (0x00000290)
+#define HDMI_CEC_RETRANSMIT              (0x00000294)
+#define HDMI_CEC_STATUS                  (0x00000298)
+#define HDMI_CEC_INT                     (0x0000029C)
+#define HDMI_CEC_ADDR                    (0x000002A0)
+#define HDMI_CEC_TIME                    (0x000002A4)
+#define HDMI_CEC_REFTIMER                (0x000002A8)
+#define HDMI_CEC_RD_DATA                 (0x000002AC)
+#define HDMI_CEC_RD_FILTER               (0x000002B0)
+#define HDMI_ACTIVE_H                    (0x000002B4)
+#define HDMI_ACTIVE_V                    (0x000002B8)
+#define HDMI_ACTIVE_V_F2                 (0x000002BC)
+#define HDMI_TOTAL                       (0x000002C0)
+#define HDMI_V_TOTAL_F2                  (0x000002C4)
+#define HDMI_FRAME_CTRL                  (0x000002C8)
+#define HDMI_AUD_INT                     (0x000002CC)
+#define HDMI_DEBUG_BUS_CTRL              (0x000002D0)
+#define HDMI_PHY_CTRL                    (0x000002D4)
+#define HDMI_CEC_WR_RANGE                (0x000002DC)
+#define HDMI_CEC_RD_RANGE                (0x000002E0)
+#define HDMI_VERSION                     (0x000002E4)
+#define HDMI_BIST_ENABLE                 (0x000002F4)
+#define HDMI_TIMING_ENGINE_EN            (0x000002F8)
+#define HDMI_INTF_CONFIG                 (0x000002FC)
+#define HDMI_HSYNC_CTL                   (0x00000300)
+#define HDMI_VSYNC_PERIOD_F0             (0x00000304)
+#define HDMI_VSYNC_PERIOD_F1             (0x00000308)
+#define HDMI_VSYNC_PULSE_WIDTH_F0        (0x0000030C)
+#define HDMI_VSYNC_PULSE_WIDTH_F1        (0x00000310)
+#define HDMI_DISPLAY_V_START_F0          (0x00000314)
+#define HDMI_DISPLAY_V_START_F1          (0x00000318)
+#define HDMI_DISPLAY_V_END_F0            (0x0000031C)
+#define HDMI_DISPLAY_V_END_F1            (0x00000320)
+#define HDMI_ACTIVE_V_START_F0           (0x00000324)
+#define HDMI_ACTIVE_V_START_F1           (0x00000328)
+#define HDMI_ACTIVE_V_END_F0             (0x0000032C)
+#define HDMI_ACTIVE_V_END_F1             (0x00000330)
+#define HDMI_DISPLAY_HCTL                (0x00000334)
+#define HDMI_ACTIVE_HCTL                 (0x00000338)
+#define HDMI_HSYNC_SKEW                  (0x0000033C)
+#define HDMI_POLARITY_CTL                (0x00000340)
+#define HDMI_TPG_MAIN_CONTROL            (0x00000344)
+#define HDMI_TPG_VIDEO_CONFIG            (0x00000348)
+#define HDMI_TPG_COMPONENT_LIMITS        (0x0000034C)
+#define HDMI_TPG_RECTANGLE               (0x00000350)
+#define HDMI_TPG_INITIAL_VALUE           (0x00000354)
+#define HDMI_TPG_BLK_WHT_PATTERN_FRAMES  (0x00000358)
+#define HDMI_TPG_RGB_MAPPING             (0x0000035C)
+#define HDMI_CEC_COMPL_CTL               (0x00000360)
+#define HDMI_CEC_RD_START_RANGE          (0x00000364)
+#define HDMI_CEC_RD_TOTAL_RANGE          (0x00000368)
+#define HDMI_CEC_RD_ERR_RESP_LO          (0x0000036C)
+#define HDMI_CEC_WR_CHECK_CONFIG         (0x00000370)
+#define HDMI_INTERNAL_TIMING_MODE        (0x00000374)
+#define HDMI_CTRL_SW_RESET               (0x00000378)
+#define HDMI_CTRL_AUDIO_RESET            (0x0000037C)
+#define HDMI_SCRATCH                     (0x00000380)
+#define HDMI_CLK_CTRL                    (0x00000384)
+#define HDMI_CLK_ACTIVE                  (0x00000388)
+#define HDMI_VBI_CFG                     (0x0000038C)
+#define HDMI_DDC_INT_CTRL0               (0x00000430)
+#define HDMI_DDC_INT_CTRL1               (0x00000434)
+#define HDMI_DDC_INT_CTRL2               (0x00000438)
+#define HDMI_DDC_INT_CTRL3               (0x0000043C)
+#define HDMI_DDC_INT_CTRL4               (0x00000440)
+#define HDMI_DDC_INT_CTRL5               (0x00000444)
+#define HDMI_HDCP2P2_DDC_CTRL            (0x0000044C)
+#define HDMI_HDCP2P2_DDC_TIMER_CTRL      (0x00000450)
+#define HDMI_HDCP2P2_DDC_TIMER_CTRL2     (0x00000454)
+#define HDMI_HDCP2P2_DDC_STATUS          (0x00000458)
+#define HDMI_SCRAMBLER_STATUS_DDC_CTRL   (0x00000464)
+#define HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL    (0x00000468)
+#define HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL2   (0x0000046C)
+#define HDMI_SCRAMBLER_STATUS_DDC_STATUS        (0x00000470)
+#define HDMI_SCRAMBLER_STATUS_DDC_TIMER_STATUS  (0x00000474)
+#define HDMI_SCRAMBLER_STATUS_DDC_TIMER_STATUS2 (0x00000478)
+#define HDMI_HW_DDC_CTRL                 (0x000004CC)
+#define HDMI_HDCP2P2_DDC_SW_TRIGGER      (0x000004D0)
+#define HDMI_HDCP_STATUS                 (0x00000500)
+#define HDMI_HDCP_INT_CTRL2              (0x00000504)
+
+/* HDMI PHY Registers */
+#define HDMI_PHY_ANA_CFG0                (0x00000000)
+#define HDMI_PHY_ANA_CFG1                (0x00000004)
+#define HDMI_PHY_PD_CTRL0                (0x00000010)
+#define HDMI_PHY_PD_CTRL1                (0x00000014)
+#define HDMI_PHY_BIST_CFG0               (0x00000034)
+#define HDMI_PHY_BIST_PATN0              (0x0000003C)
+#define HDMI_PHY_BIST_PATN1              (0x00000040)
+#define HDMI_PHY_BIST_PATN2              (0x00000044)
+#define HDMI_PHY_BIST_PATN3              (0x00000048)
+
+/* QFPROM Registers for HDMI/HDCP */
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_LSB  (0x000000F8)
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_MSB  (0x000000FC)
+#define QFPROM_RAW_VERSION_4             (0x000000A8)
+#define SEC_CTRL_HW_VERSION              (0x00006000)
+#define HDCP_KSV_LSB                     (0x000060D8)
+#define HDCP_KSV_MSB                     (0x000060DC)
+#define HDCP_KSV_VERSION_4_OFFSET        (0x00000014)
+
+/* SEC_CTRL version that supports HDCP SEL */
+#define HDCP_SEL_MIN_SEC_VERSION         (0x50010000)
+
+#define LPASS_LPAIF_RDDMA_CTL0           (0xFE152000)
+#define LPASS_LPAIF_RDDMA_PER_CNT0       (0x00000014)
+
+/* TX major version that supports scrambling */
+#define HDMI_TX_SCRAMBLER_MIN_TX_VERSION 0x04
+
+/* TX major versions */
+#define HDMI_TX_VERSION_4         4
+#define HDMI_TX_VERSION_3         3
+
+/* HDMI SCDC register offsets */
+#define HDMI_SCDC_UPDATE_0              0x10
+#define HDMI_SCDC_UPDATE_1              0x11
+#define HDMI_SCDC_TMDS_CONFIG           0x20
+#define HDMI_SCDC_SCRAMBLER_STATUS      0x21
+#define HDMI_SCDC_CONFIG_0              0x30
+#define HDMI_SCDC_STATUS_FLAGS_0        0x40
+#define HDMI_SCDC_STATUS_FLAGS_1        0x41
+#define HDMI_SCDC_ERR_DET_0_L           0x50
+#define HDMI_SCDC_ERR_DET_0_H           0x51
+#define HDMI_SCDC_ERR_DET_1_L           0x52
+#define HDMI_SCDC_ERR_DET_1_H           0x53
+#define HDMI_SCDC_ERR_DET_2_L           0x54
+#define HDMI_SCDC_ERR_DET_2_H           0x55
+#define HDMI_SCDC_ERR_DET_CHECKSUM      0x56
+
+/* HDCP secure registers directly accessible to HLOS since HDMI controller
+ * version major version 4.0
+ */
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA0  (0x00000004)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA1  (0x00000008)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA7  (0x0000000C)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA8  (0x00000010)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA9  (0x00000014)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA10 (0x00000018)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA11 (0x0000001C)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA12 (0x00000020)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_SHA_CTRL       (0x00000024)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_SHA_DATA       (0x00000028)
+
+#endif /* _SDE_HDMI_REGS_H */
+
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging./sde_hdmi_util.c linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging./sde_hdmi_util.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.c	2019-10-29 09:26:23.629203041 +0100
@@ -0,0 +1,1142 @@
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/iopoll.h>
+#include <linux/types.h>
+#include <linux/switch.h>
+#include <linux/gcd.h>
+
+#include "drm_edid.h"
+#include "sde_kms.h"
+#include "sde_hdmi.h"
+#include "sde_hdmi_regs.h"
+#include "hdmi.h"
+
+#define HDMI_SEC_TO_MS 1000
+#define HDMI_MS_TO_US 1000
+#define HDMI_SEC_TO_US (HDMI_SEC_TO_MS * HDMI_MS_TO_US)
+#define HDMI_KHZ_TO_HZ 1000
+#define HDMI_BUSY_WAIT_DELAY_US 100
+
+static void sde_hdmi_hdcp2p2_ddc_clear_status(struct sde_hdmi *display)
+{
+	u32 reg_val;
+	struct hdmi *hdmi;
+
+	if (!display) {
+		pr_err("invalid ddc ctrl\n");
+		return;
+	}
+	hdmi = display->ctrl.ctrl;
+	/* check for errors and clear status */
+	reg_val = hdmi_read(hdmi, HDMI_HDCP2P2_DDC_STATUS);
+
+	if (reg_val & BIT(4)) {
+		pr_debug("ddc aborted\n");
+		reg_val |= BIT(5);
+	}
+
+	if (reg_val & BIT(8)) {
+		pr_debug("timed out\n");
+		reg_val |= BIT(9);
+	}
+
+	if (reg_val & BIT(12)) {
+		pr_debug("NACK0\n");
+		reg_val |= BIT(13);
+	}
+
+	if (reg_val & BIT(14)) {
+		pr_debug("NACK1\n");
+		reg_val |= BIT(15);
+	}
+
+	hdmi_write(hdmi, HDMI_HDCP2P2_DDC_STATUS, reg_val);
+}
+
+static const char *sde_hdmi_hdr_sname(enum sde_hdmi_hdr_state hdr_state)
+{
+	switch (hdr_state) {
+	case HDR_DISABLE: return "HDR_DISABLE";
+	case HDR_ENABLE: return "HDR_ENABLE";
+	case HDR_RESET: return "HDR_RESET";
+	default: return "HDR_INVALID_STATE";
+	}
+}
+
+static u8 sde_hdmi_infoframe_checksum(u8 *ptr, size_t size)
+{
+	u8 csum = 0;
+	size_t i;
+
+	/* compute checksum */
+	for (i = 0; i < size; i++)
+		csum += ptr[i];
+
+	return 256 - csum;
+}
+
+u8 sde_hdmi_hdr_set_chksum(struct drm_msm_ext_panel_hdr_metadata *hdr_meta)
+{
+	u8 *buff;
+	u8 *ptr;
+	u32 length;
+	u32 size;
+	u32 chksum = 0;
+	u32 const type_code = 0x87;
+	u32 const version = 0x01;
+	u32 const descriptor_id = 0x00;
+
+	/* length of metadata is 26 bytes */
+	length = 0x1a;
+	/* add 4 bytes for the header */
+	size = length + HDMI_INFOFRAME_HEADER_SIZE;
+
+	buff = kzalloc(size, GFP_KERNEL);
+
+	if (!buff) {
+		SDE_ERROR("invalid buff\n");
+		goto err_alloc;
+	}
+
+	ptr = buff;
+
+	buff[0] = type_code;
+	buff[1] = version;
+	buff[2] = length;
+	buff[3] = 0;
+	/* start infoframe payload */
+	buff += HDMI_INFOFRAME_HEADER_SIZE;
+
+	buff[0] = hdr_meta->eotf;
+	buff[1] = descriptor_id;
+
+	buff[2] = hdr_meta->display_primaries_x[0] & 0xff;
+	buff[3] = hdr_meta->display_primaries_x[0] >> 8;
+
+	buff[4] = hdr_meta->display_primaries_x[1] & 0xff;
+	buff[5] = hdr_meta->display_primaries_x[1] >> 8;
+
+	buff[6] = hdr_meta->display_primaries_x[2] & 0xff;
+	buff[7] = hdr_meta->display_primaries_x[2] >> 8;
+
+	buff[8] = hdr_meta->display_primaries_y[0] & 0xff;
+	buff[9] = hdr_meta->display_primaries_y[0] >> 8;
+
+	buff[10] = hdr_meta->display_primaries_y[1] & 0xff;
+	buff[11] = hdr_meta->display_primaries_y[1] >> 8;
+
+	buff[12] = hdr_meta->display_primaries_y[2] & 0xff;
+	buff[13] = hdr_meta->display_primaries_y[2] >> 8;
+
+	buff[14] = hdr_meta->white_point_x & 0xff;
+	buff[15] = hdr_meta->white_point_x >> 8;
+	buff[16] = hdr_meta->white_point_y & 0xff;
+	buff[17] = hdr_meta->white_point_y >> 8;
+
+	buff[18] = hdr_meta->max_luminance & 0xff;
+	buff[19] = hdr_meta->max_luminance >> 8;
+
+	buff[20] = hdr_meta->min_luminance & 0xff;
+	buff[21] = hdr_meta->min_luminance >> 8;
+
+	buff[22] = hdr_meta->max_content_light_level & 0xff;
+	buff[23] = hdr_meta->max_content_light_level >> 8;
+
+	buff[24] = hdr_meta->max_average_light_level & 0xff;
+	buff[25] = hdr_meta->max_average_light_level >> 8;
+
+	chksum = sde_hdmi_infoframe_checksum(ptr, size);
+
+	kfree(ptr);
+
+err_alloc:
+	return chksum;
+}
+
+/**
+ * sde_hdmi_dump_regs - utility to dump HDMI regs
+ * @hdmi_display: Pointer to private display handle
+ * Return : void
+ */
+
+void sde_hdmi_dump_regs(void *hdmi_display)
+{
+	struct sde_hdmi *display = (struct sde_hdmi *)hdmi_display;
+	struct hdmi *hdmi;
+	int i;
+	u32 addr_off = 0;
+	u32 len = 0;
+
+	if (!display) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	hdmi = display->ctrl.ctrl;
+
+	if (!hdmi) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	if (!hdmi->power_on || !display->connected) {
+		SDE_ERROR("HDMI display is not ready\n");
+		return;
+	}
+
+	len = hdmi->mmio_len;
+
+	if (len % 16)
+		len += 16;
+	len /= 16;
+
+	pr_info("HDMI CORE regs\n");
+	for (i = 0; i < len; i++) {
+		u32 x0, x4, x8, xc;
+
+		x0 = hdmi_read(hdmi, addr_off+0x0);
+		x4 = hdmi_read(hdmi, addr_off+0x4);
+		x8 = hdmi_read(hdmi, addr_off+0x8);
+		xc = hdmi_read(hdmi, addr_off+0xc);
+
+		pr_info("%08x : %08x %08x %08x %08x\n", addr_off, x0, x4, x8,
+				xc);
+
+		addr_off += 16;
+	}
+}
+
+int sde_hdmi_ddc_hdcp2p2_isr(void *hdmi_display)
+{
+	struct sde_hdmi_tx_hdcp2p2_ddc_data *data;
+	u32 intr0, intr2, intr5;
+	u32 msg_size;
+	int rc = 0;
+	struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl;
+	struct sde_hdmi *display = (struct sde_hdmi *)hdmi_display;
+	struct hdmi *hdmi;
+
+	ddc_ctrl = &display->ddc_ctrl;
+	data = &ddc_ctrl->sde_hdcp2p2_ddc_data;
+	hdmi = display->ctrl.ctrl;
+
+	if (!hdmi) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	intr0 = hdmi_read(hdmi, HDMI_DDC_INT_CTRL0);
+	intr2 = hdmi_read(hdmi, HDMI_HDCP_INT_CTRL2);
+	intr5 = hdmi_read(hdmi, HDMI_DDC_INT_CTRL5);
+
+	pr_debug("intr0: 0x%x, intr2: 0x%x, intr5: 0x%x\n",
+			 intr0, intr2, intr5);
+
+	/* check if encryption is enabled */
+	if (intr2 & BIT(0)) {
+		/*
+		 * ack encryption ready interrupt.
+		 * disable encryption ready interrupt.
+		 * enable encryption not ready interrupt.
+		 */
+		intr2 &= ~BIT(2);
+		intr2 |= BIT(1) | BIT(6);
+
+		pr_info("HDCP 2.2 Encryption enabled\n");
+		data->encryption_ready = true;
+	}
+
+	/* check if encryption is disabled */
+	if (intr2 & BIT(4)) {
+		/*
+		 * ack encryption not ready interrupt.
+		 * disable encryption not ready interrupt.
+		 * enable encryption ready interrupt.
+		 */
+		intr2  &= ~BIT(6);
+		intr2  |= BIT(5) | BIT(2);
+
+		pr_info("HDCP 2.2 Encryption disabled\n");
+		data->encryption_ready = false;
+	}
+
+	hdmi_write(hdmi, HDMI_HDCP_INT_CTRL2, intr2);
+
+	/* get the message size bits 29:20 */
+	msg_size = (intr0 & (0x3FF << 20)) >> 20;
+
+	if (msg_size) {
+		/* ack and disable message size interrupt */
+		intr0 |= BIT(30);
+		intr0 &= ~BIT(31);
+
+		data->message_size = msg_size;
+	}
+
+	/* check and disable ready interrupt */
+	if (intr0 & BIT(16)) {
+	/* ack ready/not ready interrupt */
+		intr0 |= BIT(17);
+		intr0 &= ~BIT(18);
+		pr_debug("got ready interrupt\n");
+		data->ready = true;
+	}
+
+	/* check for reauth req interrupt */
+	if (intr0 & BIT(12)) {
+		/* ack and disable reauth req interrupt */
+		intr0 |= BIT(13);
+		intr0 &= ~BIT(14);
+		pr_err("got reauth interrupt\n");
+		data->reauth_req = true;
+	}
+
+	/* check for ddc fail interrupt */
+	if (intr0 & BIT(8)) {
+		/* ack ddc fail interrupt */
+		intr0 |= BIT(9);
+		pr_err("got ddc fail interrupt\n");
+		data->ddc_max_retries_fail = true;
+	}
+
+	/* check for ddc done interrupt */
+	if (intr0 & BIT(4)) {
+		/* ack ddc done interrupt */
+		intr0 |= BIT(5);
+		pr_debug("got ddc done interrupt\n");
+		data->ddc_done = true;
+	}
+
+	/* check for ddc read req interrupt */
+	if (intr0 & BIT(0)) {
+		/* ack read req interrupt */
+		intr0 |= BIT(1);
+
+		data->ddc_read_req = true;
+	}
+
+	hdmi_write(hdmi, HDMI_DDC_INT_CTRL0, intr0);
+
+	if (intr5 & BIT(0)) {
+		pr_err("RXSTATUS_DDC_REQ_TIMEOUT\n");
+
+		/* ack and disable timeout interrupt */
+		intr5 |= BIT(1);
+		intr5 &= ~BIT(2);
+
+		data->ddc_timeout = true;
+	}
+	hdmi_write(hdmi, HDMI_DDC_INT_CTRL5, intr5);
+
+	if (data->message_size || data->ready || data->reauth_req) {
+		if (data->wait) {
+			complete(&ddc_ctrl->rx_status_done);
+		} else if (data->link_cb && data->link_data) {
+			data->link_cb(data->link_data);
+		} else {
+			pr_err("new msg/reauth not handled\n");
+			rc = -EINVAL;
+		}
+	}
+
+	sde_hdmi_hdcp2p2_ddc_clear_status(display);
+
+	return rc;
+}
+
+int sde_hdmi_ddc_scrambling_isr(void *hdmi_display)
+{
+
+	bool scrambler_timer_off = false;
+	u32 intr2, intr5;
+	struct sde_hdmi *display = (struct sde_hdmi *)hdmi_display;
+	struct hdmi *hdmi;
+
+
+	hdmi = display->ctrl.ctrl;
+
+	if (!hdmi) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	intr2 = hdmi_read(hdmi, HDMI_DDC_INT_CTRL2);
+	intr5 = hdmi_read(hdmi, HDMI_DDC_INT_CTRL5);
+
+	pr_debug("intr2: 0x%x, intr5: 0x%x\n", intr2, intr5);
+
+	if (intr2 & BIT(12)) {
+		pr_err("SCRAMBLER_STATUS_NOT\n");
+
+		intr2 |= BIT(14);
+		scrambler_timer_off = true;
+	}
+
+	if (intr2 & BIT(8)) {
+		pr_err("SCRAMBLER_STATUS_DDC_FAILED\n");
+
+		intr2 |= BIT(9);
+
+		scrambler_timer_off = true;
+	}
+	hdmi_write(hdmi, HDMI_DDC_INT_CTRL2, intr2);
+
+	if (intr5 & BIT(8)) {
+		pr_err("SCRAMBLER_STATUS_DDC_REQ_TIMEOUT\n");
+		intr5 |= BIT(9);
+		intr5 &= ~BIT(10);
+		scrambler_timer_off = true;
+	}
+	hdmi_write(hdmi, HDMI_DDC_INT_CTRL5, intr5);
+
+	if (scrambler_timer_off)
+		_sde_hdmi_scrambler_ddc_disable((void *)display);
+
+	return 0;
+}
+
+static int sde_hdmi_ddc_read_retry(struct sde_hdmi *display)
+{
+	int status;
+	int busy_wait_us;
+	struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl;
+	struct sde_hdmi_tx_ddc_data *ddc_data;
+	struct hdmi *hdmi;
+
+	if (!display) {
+		SDE_ERROR("invalid input\n");
+		return -EINVAL;
+	}
+
+	hdmi = display->ctrl.ctrl;
+	ddc_ctrl = &display->ddc_ctrl;
+	ddc_data = &ddc_ctrl->ddc_data;
+
+	if (!ddc_data) {
+		SDE_ERROR("invalid input\n");
+		return -EINVAL;
+	}
+
+	if (!ddc_data->data_buf) {
+		status = -EINVAL;
+		SDE_ERROR("%s: invalid buf\n", ddc_data->what);
+		goto error;
+	}
+
+	if (ddc_data->retry < 0) {
+		SDE_ERROR("invalid no. of retries %d\n", ddc_data->retry);
+		status = -EINVAL;
+		goto error;
+	}
+
+	do {
+		if (ddc_data->hard_timeout) {
+			HDMI_UTIL_DEBUG("using hard_timeout %dms\n",
+					 ddc_data->hard_timeout);
+
+			busy_wait_us = ddc_data->hard_timeout * HDMI_MS_TO_US;
+			hdmi->use_hard_timeout = true;
+			hdmi->busy_wait_us = busy_wait_us;
+		}
+
+		/* Calling upstream ddc read method */
+		status = hdmi_ddc_read(hdmi, ddc_data->dev_addr,
+			ddc_data->offset,
+			ddc_data->data_buf, ddc_data->request_len,
+			false);
+
+		if (ddc_data->hard_timeout)
+			ddc_data->timeout_left = hdmi->timeout_count;
+
+
+		if (ddc_data->hard_timeout && !hdmi->timeout_count) {
+			HDMI_UTIL_DEBUG("%s: timedout\n", ddc_data->what);
+			status = -ETIMEDOUT;
+		}
+
+	} while (status && ddc_data->retry--);
+
+	if (status) {
+		HDMI_UTIL_ERROR("%s: failed status = %d\n",
+						ddc_data->what, status);
+		goto error;
+	}
+
+	HDMI_UTIL_DEBUG("%s: success\n",  ddc_data->what);
+
+error:
+	return status;
+} /* sde_hdmi_ddc_read_retry */
+
+int sde_hdmi_ddc_read(void *cb_data)
+{
+	int rc = 0;
+	int retry;
+	struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl;
+	struct sde_hdmi_tx_ddc_data *ddc_data;
+	struct sde_hdmi *display = (struct sde_hdmi *)cb_data;
+
+	if (!display) {
+		SDE_ERROR("invalid ddc ctrl\n");
+		return -EINVAL;
+	}
+
+	ddc_ctrl = &display->ddc_ctrl;
+	ddc_data = &ddc_ctrl->ddc_data;
+	retry = ddc_data->retry;
+
+	rc = sde_hdmi_ddc_read_retry(display);
+	if (!rc)
+		return rc;
+
+	if (ddc_data->retry_align) {
+		ddc_data->retry = retry;
+
+		ddc_data->request_len = 32 * ((ddc_data->data_len + 31) / 32);
+		rc = sde_hdmi_ddc_read_retry(display);
+	}
+
+	return rc;
+} /* hdmi_ddc_read */
+
+int sde_hdmi_ddc_write(void *cb_data)
+{
+	int status;
+	struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl;
+	struct sde_hdmi_tx_ddc_data *ddc_data;
+	int busy_wait_us;
+	struct hdmi *hdmi;
+	struct sde_hdmi *display = (struct sde_hdmi *)cb_data;
+
+	if (!display) {
+		SDE_ERROR("invalid input\n");
+		return -EINVAL;
+	}
+
+	hdmi = display->ctrl.ctrl;
+	ddc_ctrl = &display->ddc_ctrl;
+
+	ddc_data = &ddc_ctrl->ddc_data;
+
+	if (!ddc_data) {
+		SDE_ERROR("invalid input\n");
+		return -EINVAL;
+	}
+
+	if (!ddc_data->data_buf) {
+		status = -EINVAL;
+		SDE_ERROR("%s: invalid buf\n", ddc_data->what);
+		goto error;
+	}
+
+	if (ddc_data->retry < 0) {
+		SDE_ERROR("invalid no. of retries %d\n", ddc_data->retry);
+		status = -EINVAL;
+		goto error;
+	}
+
+	do {
+		if (ddc_data->hard_timeout) {
+			busy_wait_us = ddc_data->hard_timeout * HDMI_MS_TO_US;
+			hdmi->use_hard_timeout = true;
+			hdmi->busy_wait_us = busy_wait_us;
+		}
+
+		status = hdmi_ddc_write(hdmi,
+			ddc_data->dev_addr, ddc_data->offset,
+			ddc_data->data_buf, ddc_data->data_len,
+			false);
+
+		if (ddc_data->hard_timeout)
+			ddc_data->timeout_left = hdmi->timeout_count;
+
+		if (ddc_data->hard_timeout && !hdmi->timeout_count) {
+			HDMI_UTIL_ERROR("%s timout\n",  ddc_data->what);
+			status = -ETIMEDOUT;
+		}
+
+	} while (status && ddc_data->retry--);
+
+	if (status) {
+		HDMI_UTIL_ERROR("%s: failed status = %d\n",
+						ddc_data->what, status);
+		goto error;
+	}
+
+	HDMI_UTIL_DEBUG("%s: success\n", ddc_data->what);
+error:
+	return status;
+} /* hdmi_ddc_write */
+
+bool sde_hdmi_tx_is_hdcp_enabled(struct sde_hdmi *hdmi_ctrl)
+{
+	if (!hdmi_ctrl) {
+		SDE_ERROR("%s: invalid input\n", __func__);
+		return false;
+	}
+
+	return (hdmi_ctrl->hdcp14_present || hdmi_ctrl->hdcp22_present) &&
+		hdmi_ctrl->hdcp_ops;
+}
+
+bool sde_hdmi_tx_is_encryption_set(struct sde_hdmi *hdmi_ctrl)
+{
+	bool enc_en = true;
+	u32 reg_val;
+	struct hdmi *hdmi;
+
+	if (!hdmi_ctrl) {
+		SDE_ERROR("%s: invalid input\n", __func__);
+		goto end;
+	}
+
+	hdmi = hdmi_ctrl->ctrl.ctrl;
+
+	/* Check if encryption was enabled */
+	if (hdmi_ctrl->hdmi_tx_major_version <= HDMI_TX_VERSION_3) {
+		reg_val = hdmi_read(hdmi, HDMI_HDCP_CTRL2);
+		if ((reg_val & BIT(0)) && (reg_val & BIT(1)))
+			goto end;
+
+		if (hdmi_read(hdmi, HDMI_CTRL) & BIT(2))
+			goto end;
+	} else {
+		reg_val = hdmi_read(hdmi, HDMI_HDCP_STATUS);
+		if (reg_val)
+			goto end;
+	}
+
+	return false;
+
+end:
+	return enc_en;
+} /* sde_hdmi_tx_is_encryption_set */
+
+bool sde_hdmi_tx_is_stream_shareable(struct sde_hdmi *hdmi_ctrl)
+{
+	bool ret;
+
+	if (!hdmi_ctrl) {
+		SDE_ERROR("%s: invalid input\n", __func__);
+		return false;
+	}
+
+	switch (hdmi_ctrl->enc_lvl) {
+	case HDCP_STATE_AUTH_ENC_NONE:
+		ret = true;
+		break;
+	case HDCP_STATE_AUTH_ENC_1X:
+		ret = sde_hdmi_tx_is_hdcp_enabled(hdmi_ctrl) &&
+				hdmi_ctrl->auth_state;
+		break;
+	case HDCP_STATE_AUTH_ENC_2P2:
+		ret = hdmi_ctrl->hdcp22_present &&
+			hdmi_ctrl->auth_state;
+		break;
+	default:
+		ret = false;
+	}
+
+	return ret;
+}
+
+bool sde_hdmi_tx_is_panel_on(struct sde_hdmi *hdmi_ctrl)
+{
+	struct hdmi *hdmi;
+
+	if (!hdmi_ctrl) {
+		SDE_ERROR("%s: invalid input\n", __func__);
+		return false;
+	}
+
+	hdmi = hdmi_ctrl->ctrl.ctrl;
+
+	return hdmi_ctrl->connected && hdmi->power_on;
+}
+
+int sde_hdmi_config_avmute(struct hdmi *hdmi, bool set)
+{
+	u32 av_mute_status;
+	bool av_pkt_en = false;
+
+	if (!hdmi) {
+		SDE_ERROR("invalid HDMI Ctrl\n");
+		return -ENODEV;
+	}
+
+	av_mute_status = hdmi_read(hdmi, HDMI_GC);
+
+	if (set) {
+		if (!(av_mute_status & BIT(0))) {
+			hdmi_write(hdmi, HDMI_GC, av_mute_status | BIT(0));
+			av_pkt_en = true;
+		}
+	} else {
+		if (av_mute_status & BIT(0)) {
+			hdmi_write(hdmi, HDMI_GC, av_mute_status & ~BIT(0));
+			av_pkt_en = true;
+		}
+	}
+
+	/* Enable AV Mute tranmission here */
+	if (av_pkt_en)
+		hdmi_write(hdmi, HDMI_VBI_PKT_CTRL,
+			hdmi_read(hdmi, HDMI_VBI_PKT_CTRL) | (BIT(4) & BIT(5)));
+
+	pr_info("AVMUTE %s\n", set ? "set" : "cleared");
+
+	return 0;
+}
+
+int _sde_hdmi_get_timeout_in_hysnc(void *hdmi_display, u32 timeout_ms)
+{
+	struct sde_hdmi *display = (struct sde_hdmi *)hdmi_display;
+	struct drm_display_mode mode = display->mode;
+	/*
+	 * pixel clock  = h_total * v_total * fps
+	 * 1 sec = pixel clock number of pixels are transmitted.
+	 * time taken by one line (h_total) = 1s / (v_total * fps).
+	 * lines for give time = (time_ms * 1000) / (1000000 / (v_total * fps))
+	 *                     = (time_ms * clock) / h_total
+	 */
+
+	return (timeout_ms * mode.clock / mode.htotal);
+}
+
+static void sde_hdmi_hdcp2p2_ddc_reset(struct sde_hdmi *hdmi_ctrl)
+{
+	u32 reg_val;
+	struct hdmi *hdmi = hdmi_ctrl->ctrl.ctrl;
+
+	if (!hdmi) {
+		pr_err("Invalid parameters\n");
+		return;
+	}
+
+	/*
+	 * Clear acks for DDC_REQ, DDC_DONE, DDC_FAILED, RXSTATUS_READY,
+	 * RXSTATUS_MSG_SIZE
+	 */
+	reg_val = BIT(30) | BIT(17) | BIT(13) | BIT(9) | BIT(5) | BIT(1);
+	hdmi_write(hdmi, HDMI_DDC_INT_CTRL0, reg_val);
+	/* Reset DDC timers */
+	reg_val = BIT(0) | hdmi_read(hdmi, HDMI_HDCP2P2_DDC_CTRL);
+	hdmi_write(hdmi, HDMI_HDCP2P2_DDC_CTRL, reg_val);
+	reg_val = hdmi_read(hdmi, HDMI_HDCP2P2_DDC_CTRL);
+	reg_val &= ~BIT(0);
+	hdmi_write(hdmi, HDMI_HDCP2P2_DDC_CTRL, reg_val);
+}
+
+void sde_hdmi_hdcp2p2_ddc_disable(void *hdmi_display)
+{
+	struct sde_hdmi *display = (struct sde_hdmi *)hdmi_display;
+	u32 reg_val;
+	struct hdmi *hdmi = display->ctrl.ctrl;
+
+	if (!hdmi) {
+		pr_err("Invalid parameters\n");
+		return;
+	}
+
+	sde_hdmi_hdcp2p2_ddc_reset(display);
+
+	/* Disable HW DDC access to RxStatus register */
+	reg_val = hdmi_read(hdmi, HDMI_HW_DDC_CTRL);
+	reg_val &= ~(BIT(1) | BIT(0));
+
+	hdmi_write(hdmi, HDMI_HW_DDC_CTRL, reg_val);
+}
+
+static void _sde_hdmi_scrambler_ddc_reset(struct hdmi *hdmi)
+{
+	u32 reg_val;
+
+	/* clear ack and disable interrupts */
+	reg_val = BIT(14) | BIT(9) | BIT(5) | BIT(1);
+	hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL2, reg_val);
+
+	/* Reset DDC timers */
+	reg_val = BIT(0) | hdmi_read(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_CTRL);
+	hdmi_write(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_CTRL, reg_val);
+
+	reg_val = hdmi_read(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_CTRL);
+	reg_val &= ~BIT(0);
+	hdmi_write(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_CTRL, reg_val);
+}
+
+void sde_hdmi_ctrl_cfg(struct hdmi *hdmi, bool power_on)
+{
+	uint32_t ctrl = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
+
+	if (power_on)
+		ctrl |= HDMI_CTRL_ENABLE;
+	else
+		ctrl &= ~HDMI_CTRL_ENABLE;
+
+	hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	HDMI_UTIL_DEBUG("HDMI Core: %s, HDMI_CTRL=0x%08x\n",
+			power_on ? "Enable" : "Disable", ctrl);
+}
+
+static void sde_hdmi_clear_pkt_send(struct hdmi *hdmi)
+{
+	uint32_t reg_val;
+
+	/* Clear audio sample send */
+	reg_val = hdmi_read(hdmi, HDMI_AUDIO_PKT_CTRL);
+	reg_val &= ~BIT(0);
+	hdmi_write(hdmi, HDMI_AUDIO_PKT_CTRL, reg_val);
+
+	/* Clear sending VBI ctrl packets */
+	reg_val = hdmi_read(hdmi, HDMI_VBI_PKT_CTRL);
+	reg_val &= ~(BIT(4) | BIT(8) | BIT(12));
+	hdmi_write(hdmi, HDMI_VBI_PKT_CTRL, reg_val);
+
+	/* Clear sending infoframe packets */
+	reg_val = hdmi_read(hdmi, HDMI_INFOFRAME_CTRL0);
+	reg_val &= ~(BIT(0) | BIT(4) | BIT(8) | BIT(12)
+				 | BIT(15) | BIT(19));
+	hdmi_write(hdmi, HDMI_INFOFRAME_CTRL0, reg_val);
+
+	/* Clear sending general ctrl packets */
+	reg_val = hdmi_read(hdmi, HDMI_GEN_PKT_CTRL);
+	reg_val &= ~(BIT(0) | BIT(4));
+	hdmi_write(hdmi, HDMI_GEN_PKT_CTRL, reg_val);
+}
+
+void sde_hdmi_ctrl_reset(struct hdmi *hdmi)
+{
+	uint32_t reg_val;
+
+	/* Assert HDMI CTRL SW reset */
+	reg_val = hdmi_read(hdmi, HDMI_CTRL_SW_RESET);
+	reg_val |= BIT(0);
+	hdmi_write(hdmi, HDMI_CTRL_SW_RESET, reg_val);
+
+	/* disable the controller and put to known state */
+	sde_hdmi_ctrl_cfg(hdmi, 0);
+
+	/* disable the audio engine */
+	reg_val = hdmi_read(hdmi, HDMI_AUDIO_CFG);
+	reg_val &= ~BIT(0);
+	hdmi_write(hdmi, HDMI_AUDIO_CFG, reg_val);
+
+	/* clear sending packets to sink */
+	sde_hdmi_clear_pkt_send(hdmi);
+
+	/* De-assert HDMI CTRL SW reset */
+	reg_val = hdmi_read(hdmi, HDMI_CTRL_SW_RESET);
+	reg_val &= ~BIT(0);
+	hdmi_write(hdmi, HDMI_CTRL_SW_RESET, reg_val);
+}
+
+void _sde_hdmi_scrambler_ddc_disable(void *hdmi_display)
+{
+	struct sde_hdmi *display = (struct sde_hdmi *)hdmi_display;
+	u32 reg_val;
+
+	struct hdmi *hdmi = display->ctrl.ctrl;
+
+	if (!hdmi) {
+		pr_err("Invalid parameters\n");
+		return;
+	}
+
+	_sde_hdmi_scrambler_ddc_reset(hdmi);
+	/* Disable HW DDC access to RxStatus register */
+	reg_val = hdmi_read(hdmi, REG_HDMI_HW_DDC_CTRL);
+	reg_val &= ~(BIT(8) | BIT(9));
+	hdmi_write(hdmi, REG_HDMI_HW_DDC_CTRL, reg_val);
+}
+
+void sde_hdmi_ddc_config(void *hdmi_display)
+{
+	struct sde_hdmi *display = (struct sde_hdmi *)hdmi_display;
+	struct hdmi *hdmi = display->ctrl.ctrl;
+	uint32_t ddc_speed;
+
+	if (!hdmi) {
+		pr_err("Invalid parameters\n");
+		return;
+	}
+
+	ddc_speed = hdmi_read(hdmi, REG_HDMI_DDC_SPEED);
+	ddc_speed |= HDMI_DDC_SPEED_THRESHOLD(2);
+	ddc_speed |= HDMI_DDC_SPEED_PRESCALE(12);
+
+	hdmi_write(hdmi, REG_HDMI_DDC_SPEED,
+			   ddc_speed);
+
+	hdmi_write(hdmi, REG_HDMI_DDC_SETUP,
+			   HDMI_DDC_SETUP_TIMEOUT(0xff));
+
+	/* enable reference timer for 19us */
+	hdmi_write(hdmi, REG_HDMI_DDC_REF,
+			   HDMI_DDC_REF_REFTIMER_ENABLE |
+			   HDMI_DDC_REF_REFTIMER(19));
+}
+
+int sde_hdmi_hdcp2p2_read_rxstatus(void *hdmi_display)
+{
+	u32 reg_val;
+	u32 intr_en_mask;
+	u32 timeout;
+	u32 timer;
+	int rc = 0;
+	int busy_wait_us;
+	struct sde_hdmi_tx_hdcp2p2_ddc_data *data;
+	struct sde_hdmi *display = (struct sde_hdmi *)hdmi_display;
+	struct hdmi *hdmi = display->ctrl.ctrl;
+	struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl;
+	u32 rem;
+
+	if (!hdmi) {
+		pr_err("Invalid ddc data\n");
+		return -EINVAL;
+	}
+
+	ddc_ctrl = &display->ddc_ctrl;
+	data = &ddc_ctrl->sde_hdcp2p2_ddc_data;
+	if (!data) {
+		pr_err("Invalid ddc data\n");
+		return -EINVAL;
+	}
+
+	rc = ddc_clear_irq(hdmi);
+	if (rc) {
+		pr_err("DDC clear irq failed\n");
+		return rc;
+	}
+	intr_en_mask = data->intr_mask;
+	intr_en_mask |= BIT(HDCP2P2_RXSTATUS_DDC_FAILED_INTR_MASK);
+
+	/* Disable short read for now, sinks don't support it */
+	reg_val = hdmi_read(hdmi, HDMI_HDCP2P2_DDC_CTRL);
+	reg_val |= BIT(4);
+	hdmi_write(hdmi, HDMI_HDCP2P2_DDC_CTRL, reg_val);
+	/*
+	 * Setup the DDC timers for HDMI_HDCP2P2_DDC_TIMER_CTRL1 and
+	 *  HDMI_HDCP2P2_DDC_TIMER_CTRL2.
+	 * Following are the timers:
+	 * 1. DDC_REQUEST_TIMER: Timeout in hsyncs in which to wait for the
+	 *    HDCP 2.2 sink to respond to an RxStatus request
+	 * 2. DDC_URGENT_TIMER: Time period in hsyncs to issue an urgent flag
+	 *    when an RxStatus DDC request is made but not accepted by I2C
+	 *    engine
+	 * 3. DDC_TIMEOUT_TIMER: Timeout in hsyncs which starts counting when
+	 *    a request is made and stops when it is accepted by DDC arbiter
+	*/
+
+	timeout = data->timeout_hsync;
+	timer = data->periodic_timer_hsync;
+
+	hdmi_write(hdmi, HDMI_HDCP2P2_DDC_TIMER_CTRL, timer);
+	/* Set both urgent and hw-timeout fields to the same value */
+	hdmi_write(hdmi, HDMI_HDCP2P2_DDC_TIMER_CTRL2,
+			   (timeout << 16 | timeout));
+	/* enable interrupts */
+	reg_val = intr_en_mask;
+	/* Clear interrupt status bits */
+	reg_val |= intr_en_mask >> 1;
+
+	hdmi_write(hdmi, HDMI_DDC_INT_CTRL0, reg_val);
+	reg_val = hdmi_read(hdmi, HDMI_DDC_INT_CTRL5);
+	/* clear and enable RxStatus read timeout */
+	reg_val |= BIT(2) | BIT(1);
+
+	hdmi_write(hdmi, HDMI_DDC_INT_CTRL5, reg_val);
+	/*
+	 * Enable hardware DDC access to RxStatus register
+	 *
+	 * HDMI_HW_DDC_CTRL:Bits 1:0 (RXSTATUS_DDC_ENABLE) read like this:
+	 *
+	 * 0 = disable HW controlled DDC access to RxStatus
+	 * 1 = automatic on when HDCP 2.2 is authenticated and loop based on
+	 * request timer (i.e. the hardware will loop automatically)
+	 * 2 = force on and loop based on request timer (hardware will loop)
+	 * 3 = enable by sw trigger and loop until interrupt is generated for
+	 * RxStatus.reauth_req, RxStatus.ready or RxStatus.message_Size.
+	 *
+	 * Depending on the value of ddc_data::poll_sink, we make the decision
+	 * to use either SW_TRIGGER(3) (poll_sink = false) which means that the
+	 * hardware will poll sink and generate interrupt when sink responds,
+	 * or use AUTOMATIC_LOOP(1) (poll_sink = true) which will poll the sink
+	 * based on request timer
+	 */
+
+	reg_val = hdmi_read(hdmi, HDMI_HW_DDC_CTRL);
+	reg_val &= ~(BIT(1) | BIT(0));
+
+	busy_wait_us = data->timeout_ms * HDMI_MS_TO_US;
+
+	/* read method: HDCP2P2_RXSTATUS_HW_DDC_SW_TRIGGER */
+	reg_val |= BIT(1) | BIT(0);
+	hdmi_write(hdmi, HDMI_HW_DDC_CTRL, reg_val);
+
+	hdmi_write(hdmi, HDMI_HDCP2P2_DDC_SW_TRIGGER, 1);
+	if (data->wait) {
+		reinit_completion(&ddc_ctrl->rx_status_done);
+		rem = wait_for_completion_timeout(&ddc_ctrl->rx_status_done,
+		HZ);
+		data->timeout_left = jiffies_to_msecs(rem);
+
+		if (!data->timeout_left) {
+			pr_err("sw ddc rxstatus timeout\n");
+			rc = -ETIMEDOUT;
+		}
+		sde_hdmi_hdcp2p2_ddc_disable((void *)display);
+	}
+	return rc;
+}
+
+unsigned long sde_hdmi_calc_pixclk(unsigned long pixel_freq,
+	u32 out_format, bool dc_enable)
+{
+	u32 rate_ratio = HDMI_RGB_24BPP_PCLK_TMDS_CH_RATE_RATIO;
+
+	if (out_format & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420)
+		rate_ratio = HDMI_YUV420_24BPP_PCLK_TMDS_CH_RATE_RATIO;
+
+	pixel_freq /= rate_ratio;
+
+	if (dc_enable)
+		pixel_freq += pixel_freq >> 2;
+
+	return pixel_freq;
+
+}
+
+bool sde_hdmi_validate_pixclk(struct drm_connector *connector,
+	unsigned long pclk)
+{
+	struct sde_connector *c_conn = to_sde_connector(connector);
+	struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+	unsigned long max_pclk = display->max_pclk_khz * HDMI_KHZ_TO_HZ;
+
+	if (connector->max_tmds_char)
+		max_pclk = MIN(max_pclk,
+			connector->max_tmds_char * HDMI_MHZ_TO_HZ);
+	else if (connector->max_tmds_clock)
+		max_pclk = MIN(max_pclk,
+			connector->max_tmds_clock * HDMI_MHZ_TO_HZ);
+
+	SDE_DEBUG("MAX PCLK = %ld, PCLK = %ld\n", max_pclk, pclk);
+
+	return pclk < max_pclk;
+}
+
+static bool sde_hdmi_check_dc_clock(struct drm_connector *connector,
+	struct drm_display_mode *mode, u32 format)
+{
+	struct sde_connector *c_conn = to_sde_connector(connector);
+	struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+
+	 u32 tmds_clk_with_dc = sde_hdmi_calc_pixclk(
+					mode->clock * HDMI_KHZ_TO_HZ,
+					format,
+					true);
+
+	return (display->dc_feature_supported &&
+		 sde_hdmi_validate_pixclk(connector, tmds_clk_with_dc));
+}
+
+int sde_hdmi_sink_dc_support(struct drm_connector *connector,
+	struct drm_display_mode *mode)
+{
+	int dc_format = 0;
+
+	if ((mode->flags & DRM_MODE_FLAG_SUPPORTS_YUV) &&
+	    (connector->display_info.edid_hdmi_dc_modes
+	     & DRM_EDID_YCBCR420_DC_30))
+		if (sde_hdmi_check_dc_clock(connector, mode,
+				MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420))
+			dc_format |= MSM_MODE_FLAG_YUV420_DC_ENABLE;
+
+	if ((mode->flags & DRM_MODE_FLAG_SUPPORTS_RGB) &&
+	    (connector->display_info.edid_hdmi_dc_modes
+	     & DRM_EDID_HDMI_DC_30))
+		if (sde_hdmi_check_dc_clock(connector, mode,
+				MSM_MODE_FLAG_COLOR_FORMAT_RGB444))
+			dc_format |= MSM_MODE_FLAG_RGB444_DC_ENABLE;
+
+	return dc_format;
+}
+
+u8 sde_hdmi_hdr_get_ops(u8 curr_state,
+	u8 new_state)
+{
+
+	/** There could be 4 valid state transitions:
+	 * 1. HDR_DISABLE -> HDR_ENABLE
+	 *
+	 * In this transition, we shall start sending
+	 * HDR metadata with metadata from the HDR clip
+	 *
+	 * 2. HDR_ENABLE -> HDR_RESET
+	 *
+	 * In this transition, we will keep sending
+	 * HDR metadata but with EOTF and metadata as 0
+	 *
+	 * 3. HDR_RESET -> HDR_ENABLE
+	 *
+	 * In this transition, we will start sending
+	 * HDR metadata with metadata from the HDR clip
+	 *
+	 * 4. HDR_RESET -> HDR_DISABLE
+	 *
+	 * In this transition, we will stop sending
+	 * metadata to the sink and clear PKT_CTRL register
+	 * bits.
+	 */
+
+	if ((curr_state == HDR_DISABLE)
+				&& (new_state == HDR_ENABLE)) {
+		HDMI_UTIL_DEBUG("State changed %s ---> %s\n",
+						sde_hdmi_hdr_sname(curr_state),
+						sde_hdmi_hdr_sname(new_state));
+		return HDR_SEND_INFO;
+	} else if ((curr_state == HDR_ENABLE)
+				&& (new_state == HDR_RESET)) {
+		HDMI_UTIL_DEBUG("State changed %s ---> %s\n",
+						sde_hdmi_hdr_sname(curr_state),
+						sde_hdmi_hdr_sname(new_state));
+		return HDR_SEND_INFO;
+	} else if ((curr_state == HDR_RESET)
+				&& (new_state == HDR_ENABLE)) {
+		HDMI_UTIL_DEBUG("State changed %s ---> %s\n",
+						sde_hdmi_hdr_sname(curr_state),
+						sde_hdmi_hdr_sname(new_state));
+		return HDR_SEND_INFO;
+	} else if ((curr_state == HDR_RESET)
+				&& (new_state == HDR_DISABLE)) {
+		HDMI_UTIL_DEBUG("State changed %s ---> %s\n",
+						sde_hdmi_hdr_sname(curr_state),
+						sde_hdmi_hdr_sname(new_state));
+		return HDR_CLEAR_INFO;
+	}
+
+	HDMI_UTIL_DEBUG("Unsupported OR no state change\n");
+	return HDR_UNSUPPORTED_OP;
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging./sde_hdmi_util.h linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging./sde_hdmi_util.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h	2019-10-29 09:26:23.629203041 +0100
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SDE_HDMI_UTIL_H_
+#define _SDE_HDMI_UTIL_H_
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/of_device.h>
+#include <linux/msm_ext_display.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include "hdmi.h"
+#include "sde_kms.h"
+#include "sde_connector.h"
+#include "msm_drv.h"
+#include "sde_hdmi_regs.h"
+
+#ifdef HDMI_UTIL_DEBUG_ENABLE
+#define HDMI_UTIL_DEBUG(fmt, args...)   SDE_ERROR(fmt, ##args)
+#else
+#define HDMI_UTIL_DEBUG(fmt, args...)   SDE_DEBUG(fmt, ##args)
+#endif
+
+#define HDMI_UTIL_ERROR(fmt, args...)   SDE_ERROR(fmt, ##args)
+
+/*
+ * Offsets in HDMI_DDC_INT_CTRL0 register
+ *
+ * The HDMI_DDC_INT_CTRL0 register is intended for HDCP 2.2 RxStatus
+ * register manipulation. It reads like this:
+ *
+ * Bit 31: RXSTATUS_MESSAGE_SIZE_MASK (1 = generate interrupt when size > 0)
+ * Bit 30: RXSTATUS_MESSAGE_SIZE_ACK  (1 = Acknowledge message size intr)
+ * Bits 29-20: RXSTATUS_MESSAGE_SIZE  (Actual size of message available)
+ * Bits 19-18: RXSTATUS_READY_MASK    (1 = generate interrupt when ready = 1
+ *       2 = generate interrupt when ready = 0)
+ * Bit 17: RXSTATUS_READY_ACK         (1 = Acknowledge ready bit interrupt)
+ * Bit 16: RXSTATUS_READY      (1 = Rxstatus ready bit read is 1)
+ * Bit 15: RXSTATUS_READY_NOT         (1 = Rxstatus ready bit read is 0)
+ * Bit 14: RXSTATUS_REAUTH_REQ_MASK   (1 = generate interrupt when reauth is
+ *   requested by sink)
+ * Bit 13: RXSTATUS_REAUTH_REQ_ACK    (1 = Acknowledge Reauth req interrupt)
+ * Bit 12: RXSTATUS_REAUTH_REQ        (1 = Rxstatus reauth req bit read is 1)
+ * Bit 10: RXSTATUS_DDC_FAILED_MASK   (1 = generate interrupt when DDC
+ *   tranasaction fails)
+ * Bit 9:  RXSTATUS_DDC_FAILED_ACK    (1 = Acknowledge ddc failure interrupt)
+ * Bit 8:  RXSTATUS_DDC_FAILED      (1 = DDC transaction failed)
+ * Bit 6:  RXSTATUS_DDC_DONE_MASK     (1 = generate interrupt when DDC
+ *   transaction completes)
+ * Bit 5:  RXSTATUS_DDC_DONE_ACK      (1 = Acknowledge ddc done interrupt)
+ * Bit 4:  RXSTATUS_DDC_DONE      (1 = DDC transaction is done)
+ * Bit 2:  RXSTATUS_DDC_REQ_MASK      (1 = generate interrupt when DDC Read
+ *   request for RXstatus is made)
+ * Bit 1:  RXSTATUS_DDC_REQ_ACK       (1 = Acknowledge Rxstatus read interrupt)
+ * Bit 0:  RXSTATUS_DDC_REQ           (1 = RXStatus DDC read request is made)
+ *
+ */
+
+#define HDCP2P2_RXSTATUS_MESSAGE_SIZE_SHIFT         20
+#define HDCP2P2_RXSTATUS_MESSAGE_SIZE_MASK          0x3ff00000
+#define HDCP2P2_RXSTATUS_MESSAGE_SIZE_ACK_SHIFT     30
+#define HDCP2P2_RXSTATUS_MESSAGE_SIZE_INTR_SHIFT    31
+
+#define HDCP2P2_RXSTATUS_REAUTH_REQ_SHIFT           12
+#define HDCP2P2_RXSTATUS_REAUTH_REQ_MASK             1
+#define HDCP2P2_RXSTATUS_REAUTH_REQ_ACK_SHIFT    13
+#define HDCP2P2_RXSTATUS_REAUTH_REQ_INTR_SHIFT    14
+
+#define HDCP2P2_RXSTATUS_READY_SHIFT    16
+#define HDCP2P2_RXSTATUS_READY_MASK                  1
+#define HDCP2P2_RXSTATUS_READY_ACK_SHIFT            17
+#define HDCP2P2_RXSTATUS_READY_INTR_SHIFT           18
+#define HDCP2P2_RXSTATUS_READY_INTR_MASK            18
+
+#define HDCP2P2_RXSTATUS_DDC_FAILED_SHIFT           8
+#define HDCP2P2_RXSTATUS_DDC_FAILED_ACKSHIFT        9
+#define HDCP2P2_RXSTATUS_DDC_FAILED_INTR_MASK       10
+#define HDCP2P2_RXSTATUS_DDC_DONE                   6
+
+/* default hsyncs for 4k@60 for 200ms */
+#define HDMI_DEFAULT_TIMEOUT_HSYNC 28571
+
+#define HDMI_GET_MSB(x)(x >> 8)
+#define HDMI_GET_LSB(x)(x & 0xff)
+
+#define SDE_HDMI_VIC_640x480 0x1
+#define SDE_HDMI_YCC_QUANT_MASK (0x3 << 14)
+#define SDE_HDMI_COLORIMETRY_MASK (0x3 << 22)
+
+#define SDE_HDMI_DEFAULT_COLORIMETRY 0x0
+#define SDE_HDMI_USE_EXTENDED_COLORIMETRY 0x3
+#define SDE_HDMI_BT2020_COLORIMETRY 0x6
+
+#define SDE_HDMI_HDCP_22 0x22
+#define SDE_HDMI_HDCP_14 0x14
+#define SDE_HDMI_HDCP_NONE 0x0
+
+#define SDE_HDMI_HDR_LUMINANCE_NONE 0x0
+#define SDE_HDMI_HDR_EOTF_NONE 0x0
+
+/*
+ * Bits 1:0 in HDMI_HW_DDC_CTRL that dictate how the HDCP 2.2 RxStatus will be
+ * read by the hardware
+ */
+#define HDCP2P2_RXSTATUS_HW_DDC_DISABLE             0
+#define HDCP2P2_RXSTATUS_HW_DDC_AUTOMATIC_LOOP      1
+#define HDCP2P2_RXSTATUS_HW_DDC_FORCE_LOOP          2
+#define HDCP2P2_RXSTATUS_HW_DDC_SW_TRIGGER          3
+
+struct sde_hdmi_tx_ddc_data {
+	char *what;
+	u8 *data_buf;
+	u32 data_len;
+	u32 dev_addr;
+	u32 offset;
+	u32 request_len;
+	u32 retry_align;
+	u32 hard_timeout;
+	u32 timeout_left;
+	int retry;
+};
+
+enum sde_hdmi_tx_hdcp2p2_rxstatus_intr_mask {
+	RXSTATUS_MESSAGE_SIZE = BIT(31),
+	RXSTATUS_READY = BIT(18),
+	RXSTATUS_REAUTH_REQ = BIT(14),
+};
+
+enum sde_hdmi_hdr_state {
+	HDR_DISABLE = DRM_MSM_HDR_DISABLE,
+	HDR_ENABLE  = DRM_MSM_HDR_ENABLE,
+	HDR_RESET   = DRM_MSM_HDR_RESET
+};
+
+enum sde_hdmi_hdr_op {
+	HDR_UNSUPPORTED_OP,
+	HDR_SEND_INFO,
+	HDR_CLEAR_INFO
+};
+
+struct sde_hdmi_tx_hdcp2p2_ddc_data {
+	enum sde_hdmi_tx_hdcp2p2_rxstatus_intr_mask intr_mask;
+	u32 timeout_ms;
+	u32 timeout_hsync;
+	u32 periodic_timer_hsync;
+	u32 timeout_left;
+	u32 read_method;
+	u32 message_size;
+	bool encryption_ready;
+	bool ready;
+	bool reauth_req;
+	bool ddc_max_retries_fail;
+	bool ddc_done;
+	bool ddc_read_req;
+	bool ddc_timeout;
+	bool wait;
+	int irq_wait_count;
+	void (*link_cb)(void *data);
+	void *link_data;
+};
+
+struct sde_hdmi_tx_ddc_ctrl {
+	struct completion rx_status_done;
+	struct dss_io_data *io;
+	struct sde_hdmi_tx_ddc_data ddc_data;
+	struct sde_hdmi_tx_hdcp2p2_ddc_data sde_hdcp2p2_ddc_data;
+};
+
+/* DDC */
+int sde_hdmi_ddc_write(void *cb_data);
+int sde_hdmi_ddc_read(void *cb_data);
+int sde_hdmi_ddc_scrambling_isr(void *hdmi_display);
+int _sde_hdmi_get_timeout_in_hysnc(void *hdmi_display, u32 timeout_ms);
+void _sde_hdmi_scrambler_ddc_disable(void *hdmi_display);
+void sde_hdmi_hdcp2p2_ddc_disable(void *hdmi_display);
+int sde_hdmi_hdcp2p2_read_rxstatus(void *hdmi_display);
+void sde_hdmi_ddc_config(void *hdmi_display);
+int sde_hdmi_ddc_hdcp2p2_isr(void *hdmi_display);
+void sde_hdmi_dump_regs(void *hdmi_display);
+unsigned long sde_hdmi_calc_pixclk(unsigned long pixel_freq,
+	u32 out_format, bool dc_enable);
+bool sde_hdmi_validate_pixclk(struct drm_connector *connector,
+	unsigned long pclk);
+int sde_hdmi_sink_dc_support(struct drm_connector *connector,
+	struct drm_display_mode *mode);
+u8 sde_hdmi_hdr_get_ops(u8 curr_state,
+	u8 new_state);
+void sde_hdmi_ctrl_reset(struct hdmi *hdmi);
+void sde_hdmi_ctrl_cfg(struct hdmi *hdmi, bool power_on);
+u8 sde_hdmi_hdr_set_chksum(struct drm_msm_ext_panel_hdr_metadata *hdr_meta);
+
+#endif /* _SDE_HDMI_UTIL_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_gem_vma.c	2019-01-22 16:16:23.507246443 +0100
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_gem.h"
+#include "msm_iommu.h"
+
+static void
+msm_gem_address_space_destroy(struct kref *kref)
+{
+	struct msm_gem_address_space *aspace = container_of(kref,
+			struct msm_gem_address_space, kref);
+
+	if (aspace->va_len)
+		drm_mm_takedown(&aspace->mm);
+
+	aspace->mmu->funcs->destroy(aspace->mmu);
+
+	kfree(aspace);
+}
+
+void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
+{
+	if (aspace)
+		kref_put(&aspace->kref, msm_gem_address_space_destroy);
+}
+
+static struct msm_gem_address_space *
+msm_gem_address_space_new(struct msm_mmu *mmu, const char *name,
+		uint64_t start, uint64_t end)
+{
+	struct msm_gem_address_space *aspace;
+
+	if (!mmu)
+		return ERR_PTR(-EINVAL);
+
+	aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
+	if (!aspace)
+		return ERR_PTR(-ENOMEM);
+
+	spin_lock_init(&aspace->lock);
+	aspace->name = name;
+	aspace->mmu = mmu;
+
+	aspace->va_len = end - start;
+
+	if (aspace->va_len)
+		drm_mm_init(&aspace->mm, (start >> PAGE_SHIFT),
+			(aspace->va_len >> PAGE_SHIFT));
+
+	kref_init(&aspace->kref);
+
+	return aspace;
+}
+
+static int allocate_iova(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma, struct sg_table *sgt,
+		u64 *iova)
+{
+	struct scatterlist *sg;
+	size_t size = 0;
+	int ret, i;
+
+	if (!aspace->va_len)
+		return 0;
+
+	for_each_sg(sgt->sgl, sg, sgt->nents, i)
+		size += sg->length + sg->offset;
+
+	spin_lock(&aspace->lock);
+
+	if (WARN_ON(drm_mm_node_allocated(&vma->node))) {
+		spin_unlock(&aspace->lock);
+		return 0;
+	}
+	ret = drm_mm_insert_node(&aspace->mm, &vma->node,
+			size >> PAGE_SHIFT, 0, DRM_MM_SEARCH_BOTTOM_UP);
+
+	spin_unlock(&aspace->lock);
+
+	if (!ret && iova)
+		*iova = vma->node.start << PAGE_SHIFT;
+
+	return ret;
+}
+
+int msm_gem_reserve_iova(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma,
+		uint64_t hostptr, uint64_t size)
+{
+	struct drm_mm *mm = &aspace->mm;
+	uint64_t start = hostptr >> PAGE_SHIFT;
+	uint64_t last = (hostptr + size - 1) >> PAGE_SHIFT;
+	int ret;
+
+	spin_lock(&aspace->lock);
+
+	if (drm_mm_interval_first(mm, start, last)) {
+		/* iova already in use, fail */
+		spin_unlock(&aspace->lock);
+		return -EADDRINUSE;
+	}
+
+	vma->node.start = hostptr >> PAGE_SHIFT;
+	vma->node.size = size >> PAGE_SHIFT;
+	vma->node.color = 0;
+
+	ret = drm_mm_reserve_node(mm, &vma->node);
+	if (!ret)
+		vma->iova = hostptr;
+
+	spin_unlock(&aspace->lock);
+
+	return ret;
+}
+
+void msm_gem_release_iova(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma)
+{
+	spin_lock(&aspace->lock);
+	if (drm_mm_node_allocated(&vma->node))
+		drm_mm_remove_node(&vma->node);
+	spin_unlock(&aspace->lock);
+}
+
+int msm_gem_map_vma(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma, struct sg_table *sgt,
+		void *priv, unsigned int flags)
+{
+	u64 iova = 0;
+	int ret;
+
+	if (!aspace)
+		return -EINVAL;
+
+	ret = allocate_iova(aspace, vma, sgt, &iova);
+	if (ret)
+		return ret;
+
+	ret = aspace->mmu->funcs->map(aspace->mmu, iova, sgt,
+		flags, priv);
+
+	if (ret) {
+		msm_gem_release_iova(aspace, vma);
+		return ret;
+	}
+
+	vma->iova = sg_dma_address(sgt->sgl);
+	kref_get(&aspace->kref);
+
+	return 0;
+}
+
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma, struct sg_table *sgt,
+		void *priv, bool invalidated)
+{
+	if (!aspace || !vma->iova)
+		return;
+
+	if (!invalidated)
+		aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, priv);
+
+	msm_gem_release_iova(aspace, vma);
+
+	vma->iova = 0;
+
+	msm_gem_address_space_put(aspace);
+}
+
+struct msm_gem_address_space *
+msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
+		const char *name)
+{
+	return msm_gem_address_space_new(mmu, name, 0, 0);
+}
+
+struct msm_gem_address_space *
+msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
+		int type, const char *name)
+{
+	struct msm_mmu *mmu = msm_iommu_new(dev, type, domain);
+
+	if (IS_ERR(mmu))
+		return (struct msm_gem_address_space *) mmu;
+
+	return msm_gem_address_space_new(mmu, name,
+		domain->geometry.aperture_start,
+		domain->geometry.aperture_end);
+}
+
+/* Create a new dynamic instance */
+struct msm_gem_address_space *
+msm_gem_address_space_create_instance(struct msm_mmu *parent, const char *name,
+		uint64_t start, uint64_t end)
+{
+	struct msm_mmu *child = msm_iommu_new_dynamic(parent);
+
+	if (IS_ERR(child))
+		return (struct msm_gem_address_space *) child;
+
+	return msm_gem_address_space_new(child, name, start, end);
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_iommu.h	2019-01-22 16:16:23.507246443 +0100
@@ -0,0 +1,46 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_IOMMU_H_
+#define _MSM_IOMMU_H_
+
+#include "msm_mmu.h"
+
+struct msm_iommu {
+	struct msm_mmu base;
+	struct iommu_domain *domain;
+	int cb;
+	phys_addr_t ttbr0;
+	uint32_t contextidr;
+	bool allow_dynamic;
+
+	struct clk *clocks[5];
+	int nr_clocks;
+
+	bool is_coherent;
+};
+#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
+
+static inline bool msm_iommu_allow_dynamic(struct msm_mmu *mmu)
+{
+	struct msm_iommu *iommu = to_msm_iommu(mmu);
+
+	return iommu->allow_dynamic;
+}
+
+static inline bool msm_iommu_coherent(struct msm_mmu *mmu)
+{
+	struct msm_iommu *iommu = to_msm_iommu(mmu);
+
+	return iommu->is_coherent;
+}
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_prop.c	2019-01-22 16:16:23.507246443 +0100
@@ -0,0 +1,734 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm_prop.h"
+
+void msm_property_init(struct msm_property_info *info,
+		struct drm_mode_object *base,
+		struct drm_device *dev,
+		struct drm_property **property_array,
+		struct msm_property_data *property_data,
+		uint32_t property_count,
+		uint32_t blob_count,
+		uint32_t state_size)
+{
+	int i;
+
+	/* prevent access if any of these are NULL */
+	if (!base || !dev || !property_array || !property_data) {
+		property_count = 0;
+		blob_count = 0;
+
+		DRM_ERROR("invalid arguments, forcing zero properties\n");
+		return;
+	}
+
+	/* can't have more blob properties than total properties */
+	if (blob_count > property_count) {
+		blob_count = property_count;
+
+		DBG("Capping number of blob properties to %d", blob_count);
+	}
+
+	if (!info) {
+		DRM_ERROR("info pointer is NULL\n");
+	} else {
+		info->base = base;
+		info->dev = dev;
+		info->property_array = property_array;
+		info->property_data = property_data;
+		info->property_count = property_count;
+		info->blob_count = blob_count;
+		info->install_request = 0;
+		info->install_count = 0;
+		info->recent_idx = 0;
+		info->is_active = false;
+		info->state_size = state_size;
+		info->state_cache_size = 0;
+		mutex_init(&info->property_lock);
+
+		memset(property_data,
+				0,
+				sizeof(struct msm_property_data) *
+				property_count);
+		INIT_LIST_HEAD(&info->dirty_list);
+
+		for (i = 0; i < property_count; ++i)
+			INIT_LIST_HEAD(&property_data[i].dirty_node);
+	}
+}
+
+void msm_property_destroy(struct msm_property_info *info)
+{
+	if (!info)
+		return;
+
+	/* reset dirty list */
+	INIT_LIST_HEAD(&info->dirty_list);
+
+	/* free state cache */
+	while (info->state_cache_size > 0)
+		kfree(info->state_cache[--(info->state_cache_size)]);
+
+	mutex_destroy(&info->property_lock);
+}
+
+int msm_property_pop_dirty(struct msm_property_info *info)
+{
+	struct list_head *item;
+	int rc = 0;
+
+	if (!info) {
+		DRM_ERROR("invalid info\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&info->property_lock);
+	if (list_empty(&info->dirty_list)) {
+		rc = -EAGAIN;
+	} else {
+		item = info->dirty_list.next;
+		list_del_init(item);
+		rc = container_of(item, struct msm_property_data, dirty_node)
+			- info->property_data;
+		DRM_DEBUG_KMS("property %d dirty\n", rc);
+	}
+	mutex_unlock(&info->property_lock);
+
+	return rc;
+}
+
+/**
+ * _msm_property_set_dirty_no_lock - flag given property as being dirty
+ *                                   This function doesn't mutex protect the
+ *                                   dirty linked list.
+ * @info: Pointer to property info container struct
+ * @property_idx: Property index
+ */
+static void _msm_property_set_dirty_no_lock(
+		struct msm_property_info *info,
+		uint32_t property_idx)
+{
+	if (!info || property_idx >= info->property_count) {
+		DRM_ERROR("invalid argument(s), info %pK, idx %u\n",
+				info, property_idx);
+		return;
+	}
+
+	/* avoid re-inserting if already dirty */
+	if (!list_empty(&info->property_data[property_idx].dirty_node)) {
+		DRM_DEBUG_KMS("property %u already dirty\n", property_idx);
+		return;
+	}
+
+	list_add_tail(&info->property_data[property_idx].dirty_node,
+			&info->dirty_list);
+}
+
+/**
+ * _msm_property_install_integer - install standard drm range property
+ * @info: Pointer to property info container struct
+ * @name: Property name
+ * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
+ * @min: Min property value
+ * @max: Max property value
+ * @init: Default Property value
+ * @property_idx: Property index
+ * @force_dirty: Whether or not to filter 'dirty' status on unchanged values
+ */
+static void _msm_property_install_integer(struct msm_property_info *info,
+		const char *name, int flags, uint64_t min, uint64_t max,
+		uint64_t init, uint32_t property_idx, bool force_dirty)
+{
+	struct drm_property **prop;
+
+	if (!info)
+		return;
+
+	++info->install_request;
+
+	if (!name || (property_idx >= info->property_count)) {
+		DRM_ERROR("invalid argument(s), %s\n", name ? name : "null");
+	} else {
+		prop = &info->property_array[property_idx];
+		/*
+		 * Properties need to be attached to each drm object that
+		 * uses them, but only need to be created once
+		 */
+		if (*prop == 0) {
+			*prop = drm_property_create_range(info->dev,
+					flags, name, min, max);
+			if (*prop == 0)
+				DRM_ERROR("create %s property failed\n", name);
+		}
+
+		/* save init value for later */
+		info->property_data[property_idx].default_value = init;
+		info->property_data[property_idx].force_dirty = force_dirty;
+
+		/* always attach property, if created */
+		if (*prop) {
+			drm_object_attach_property(info->base, *prop, init);
+			++info->install_count;
+		}
+	}
+}
+
+/**
+ * _msm_property_install_integer - install signed drm range property
+ * @info: Pointer to property info container struct
+ * @name: Property name
+ * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
+ * @min: Min property value
+ * @max: Max property value
+ * @init: Default Property value
+ * @property_idx: Property index
+ * @force_dirty: Whether or not to filter 'dirty' status on unchanged values
+ */
+static void _msm_property_install_signed_integer(struct msm_property_info *info,
+		const char *name, int flags, int64_t min, int64_t max,
+		int64_t init, uint32_t property_idx, bool force_dirty)
+{
+	struct drm_property **prop;
+
+	if (!info)
+		return;
+
+	++info->install_request;
+
+	if (!name || (property_idx >= info->property_count)) {
+		DRM_ERROR("invalid argument(s), %s\n", name ? name : "null");
+	} else {
+		prop = &info->property_array[property_idx];
+		/*
+		 * Properties need to be attached to each drm object that
+		 * uses them, but only need to be created once
+		 */
+		if (*prop == 0) {
+			*prop = drm_property_create_signed_range(info->dev,
+					flags, name, min, max);
+			if (*prop == 0)
+				DRM_ERROR("create %s property failed\n", name);
+		}
+
+		/* save init value for later */
+		info->property_data[property_idx].default_value = I642U64(init);
+		info->property_data[property_idx].force_dirty = force_dirty;
+
+		/* always attach property, if created */
+		if (*prop) {
+			drm_object_attach_property(info->base, *prop, init);
+			++info->install_count;
+		}
+	}
+}
+
+void msm_property_install_range(struct msm_property_info *info,
+		const char *name, int flags, uint64_t min, uint64_t max,
+		uint64_t init, uint32_t property_idx)
+{
+	_msm_property_install_integer(info, name, flags,
+			min, max, init, property_idx, false);
+}
+
+void msm_property_install_volatile_range(struct msm_property_info *info,
+		const char *name, int flags, uint64_t min, uint64_t max,
+		uint64_t init, uint32_t property_idx)
+{
+	_msm_property_install_integer(info, name, flags,
+			min, max, init, property_idx, true);
+}
+
+void msm_property_install_signed_range(struct msm_property_info *info,
+		const char *name, int flags, int64_t min, int64_t max,
+		int64_t init, uint32_t property_idx)
+{
+	_msm_property_install_signed_integer(info, name, flags,
+			min, max, init, property_idx, false);
+}
+
+void msm_property_install_volatile_signed_range(struct msm_property_info *info,
+		const char *name, int flags, int64_t min, int64_t max,
+		int64_t init, uint32_t property_idx)
+{
+	_msm_property_install_signed_integer(info, name, flags,
+			min, max, init, property_idx, true);
+}
+
+void msm_property_install_rotation(struct msm_property_info *info,
+		unsigned int supported_rotations, uint32_t property_idx)
+{
+	struct drm_property **prop;
+
+	if (!info)
+		return;
+
+	++info->install_request;
+
+	if (property_idx >= info->property_count) {
+		DRM_ERROR("invalid property index %d\n", property_idx);
+	} else {
+		prop = &info->property_array[property_idx];
+		/*
+		 * Properties need to be attached to each drm object that
+		 * uses them, but only need to be created once
+		 */
+		if (*prop == 0) {
+			*prop = drm_mode_create_rotation_property(info->dev,
+					supported_rotations);
+			if (*prop == 0)
+				DRM_ERROR("create rotation property failed\n");
+		}
+
+		/* save init value for later */
+		info->property_data[property_idx].default_value = 0;
+		info->property_data[property_idx].force_dirty = false;
+
+		/* always attach property, if created */
+		if (*prop) {
+			drm_object_attach_property(info->base, *prop, 0);
+			++info->install_count;
+		}
+	}
+}
+
+void msm_property_install_enum(struct msm_property_info *info,
+		const char *name, int flags, int is_bitmask,
+		const struct drm_prop_enum_list *values, int num_values,
+		uint32_t property_idx, uint64_t default_value)
+{
+	struct drm_property **prop;
+
+	if (!info)
+		return;
+
+	++info->install_request;
+
+	if (!name || !values || !num_values ||
+			(property_idx >= info->property_count)) {
+		DRM_ERROR("invalid argument(s), %s\n", name ? name : "null");
+	} else {
+		prop = &info->property_array[property_idx];
+		/*
+		 * Properties need to be attached to each drm object that
+		 * uses them, but only need to be created once
+		 */
+		if (*prop == 0) {
+			/* 'bitmask' is a special type of 'enum' */
+			if (is_bitmask)
+				*prop = drm_property_create_bitmask(info->dev,
+						DRM_MODE_PROP_BITMASK | flags,
+						name, values, num_values, -1);
+			else
+				*prop = drm_property_create_enum(info->dev,
+						DRM_MODE_PROP_ENUM | flags,
+						name, values, num_values);
+			if (*prop == 0)
+				DRM_ERROR("create %s property failed\n", name);
+		}
+
+		/* save init value for later */
+		info->property_data[property_idx].default_value = default_value;
+		info->property_data[property_idx].force_dirty = false;
+
+		/* select first defined value for enums */
+		if (!is_bitmask)
+			info->property_data[property_idx].default_value =
+				values->type;
+
+		/* always attach property, if created */
+		if (*prop) {
+			drm_object_attach_property(info->base, *prop,
+					info->property_data
+					[property_idx].default_value);
+			++info->install_count;
+		}
+	}
+}
+
+void msm_property_install_blob(struct msm_property_info *info,
+		const char *name, int flags, uint32_t property_idx)
+{
+	struct drm_property **prop;
+
+	if (!info)
+		return;
+
+	++info->install_request;
+
+	if (!name || (property_idx >= info->blob_count)) {
+		DRM_ERROR("invalid argument(s), %s\n", name ? name : "null");
+	} else {
+		prop = &info->property_array[property_idx];
+		/*
+		 * Properties need to be attached to each drm object that
+		 * uses them, but only need to be created once
+		 */
+		if (*prop == 0) {
+			/* use 'create' for blob property place holder */
+			*prop = drm_property_create(info->dev,
+					DRM_MODE_PROP_BLOB | flags, name, 0);
+			if (*prop == 0)
+				DRM_ERROR("create %s property failed\n", name);
+		}
+
+		/* save init value for later */
+		info->property_data[property_idx].default_value = 0;
+		info->property_data[property_idx].force_dirty = true;
+
+		/* always attach property, if created */
+		if (*prop) {
+			drm_object_attach_property(info->base, *prop, -1);
+			++info->install_count;
+		}
+	}
+}
+
+int msm_property_install_get_status(struct msm_property_info *info)
+{
+	int rc = -ENOMEM;
+
+	if (info && (info->install_request == info->install_count))
+		rc = 0;
+
+	return rc;
+}
+
+int msm_property_index(struct msm_property_info *info,
+		struct drm_property *property)
+{
+	uint32_t count;
+	int32_t idx;
+	int rc = -EINVAL;
+
+	if (!info || !property) {
+		DRM_ERROR("invalid argument(s)\n");
+	} else {
+		/*
+		 * Linear search, but start from last found index. This will
+		 * help if any single property is accessed multiple times in a
+		 * row. Ideally, we could keep a list of properties sorted in
+		 * the order of most recent access, but that may be overkill
+		 * for now.
+		 */
+		mutex_lock(&info->property_lock);
+		idx = info->recent_idx;
+		count = info->property_count;
+		while (count) {
+			--count;
+
+			/* stop searching on match */
+			if (info->property_array[idx] == property) {
+				info->recent_idx = idx;
+				rc = idx;
+				break;
+			}
+
+			/* move to next valid index */
+			if (--idx < 0)
+				idx = info->property_count - 1;
+		}
+		mutex_unlock(&info->property_lock);
+	}
+
+	return rc;
+}
+
+int msm_property_atomic_set(struct msm_property_info *info,
+		uint64_t *property_values,
+		struct drm_property_blob **property_blobs,
+		struct drm_property *property, uint64_t val)
+{
+	struct drm_property_blob *blob;
+	int property_idx, rc = -EINVAL;
+
+	property_idx = msm_property_index(info, property);
+	if (!info || (property_idx == -EINVAL) || !property_values) {
+		DRM_DEBUG("Invalid argument(s)\n");
+	} else {
+		/* extra handling for incoming properties */
+		mutex_lock(&info->property_lock);
+		if ((property->flags & DRM_MODE_PROP_BLOB) &&
+			(property_idx < info->blob_count) &&
+			property_blobs) {
+			/* DRM lookup also takes a reference */
+			blob = drm_property_lookup_blob(info->dev,
+				(uint32_t)val);
+			if (!blob) {
+				DRM_ERROR("blob not found\n");
+				val = 0;
+			} else {
+				DBG("Blob %u saved", blob->base.id);
+				val = blob->base.id;
+
+				/* save blob - need to clear previous ref */
+				if (property_blobs[property_idx])
+					drm_property_unreference_blob(
+						property_blobs[property_idx]);
+				property_blobs[property_idx] = blob;
+			}
+		}
+
+		/* update value and flag as dirty */
+		if (property_values[property_idx] != val ||
+				info->property_data[property_idx].force_dirty) {
+			property_values[property_idx] = val;
+			_msm_property_set_dirty_no_lock(info, property_idx);
+
+			DBG("%s - %lld", property->name, val);
+		}
+		mutex_unlock(&info->property_lock);
+		rc = 0;
+	}
+
+	return rc;
+}
+
+int msm_property_atomic_get(struct msm_property_info *info,
+		uint64_t *property_values,
+		struct drm_property_blob **property_blobs,
+		struct drm_property *property, uint64_t *val)
+{
+	int property_idx, rc = -EINVAL;
+
+	property_idx = msm_property_index(info, property);
+	if (!info || (property_idx == -EINVAL) || !property_values || !val) {
+		DRM_DEBUG("Invalid argument(s)\n");
+	} else {
+		mutex_lock(&info->property_lock);
+		*val = property_values[property_idx];
+		mutex_unlock(&info->property_lock);
+		rc = 0;
+	}
+
+	return rc;
+}
+
+void *msm_property_alloc_state(struct msm_property_info *info)
+{
+	void *state = NULL;
+
+	if (!info) {
+		DRM_ERROR("invalid property info\n");
+		return NULL;
+	}
+
+	mutex_lock(&info->property_lock);
+	if (info->state_cache_size)
+		state = info->state_cache[--(info->state_cache_size)];
+	mutex_unlock(&info->property_lock);
+
+	if (!state && info->state_size)
+		state = kmalloc(info->state_size, GFP_KERNEL);
+
+	if (!state)
+		DRM_ERROR("failed to allocate state\n");
+
+	return state;
+}
+
+/**
+ * _msm_property_free_state - helper function for freeing local state objects
+ * @info: Pointer to property info container struct
+ * @st: Pointer to state object
+ */
+static void _msm_property_free_state(struct msm_property_info *info, void *st)
+{
+	if (!info || !st)
+		return;
+
+	mutex_lock(&info->property_lock);
+	if (info->state_cache_size < MSM_PROP_STATE_CACHE_SIZE)
+		info->state_cache[(info->state_cache_size)++] = st;
+	else
+		kfree(st);
+	mutex_unlock(&info->property_lock);
+}
+
+void msm_property_reset_state(struct msm_property_info *info, void *state,
+		uint64_t *property_values,
+		struct drm_property_blob **property_blobs)
+{
+	uint32_t i;
+
+	if (!info) {
+		DRM_ERROR("invalid property info\n");
+		return;
+	}
+
+	if (state)
+		memset(state, 0, info->state_size);
+
+	/*
+	 * Assign default property values. This helper is mostly used
+	 * to initialize newly created state objects.
+	 */
+	if (property_values)
+		for (i = 0; i < info->property_count; ++i)
+			property_values[i] =
+				info->property_data[i].default_value;
+
+	if (property_blobs)
+		for (i = 0; i < info->blob_count; ++i)
+			property_blobs[i] = 0;
+}
+
+void msm_property_duplicate_state(struct msm_property_info *info,
+		void *old_state, void *state,
+		uint64_t *property_values,
+		struct drm_property_blob **property_blobs)
+{
+	uint32_t i;
+
+	if (!info || !old_state || !state) {
+		DRM_ERROR("invalid argument(s)\n");
+		return;
+	}
+
+	memcpy(state, old_state, info->state_size);
+
+	if (property_blobs) {
+		/* add ref count for blobs */
+		for (i = 0; i < info->blob_count; ++i)
+			if (property_blobs[i])
+				drm_property_reference_blob(property_blobs[i]);
+	}
+}
+
+void msm_property_destroy_state(struct msm_property_info *info, void *state,
+		uint64_t *property_values,
+		struct drm_property_blob **property_blobs)
+{
+	uint32_t i;
+
+	if (!info || !state) {
+		DRM_ERROR("invalid argument(s)\n");
+		return;
+	}
+	if (property_blobs) {
+		/* remove ref count for blobs */
+		for (i = 0; i < info->blob_count; ++i)
+			if (property_blobs[i])
+				drm_property_unreference_blob(
+						property_blobs[i]);
+	}
+
+	_msm_property_free_state(info, state);
+}
+
+void *msm_property_get_blob(struct msm_property_info *info,
+		struct drm_property_blob **property_blobs,
+		size_t *byte_len,
+		uint32_t property_idx)
+{
+	struct drm_property_blob *blob;
+	size_t len = 0;
+	void *rc = 0;
+
+	if (!info || !property_blobs || (property_idx >= info->blob_count)) {
+		DRM_ERROR("invalid argument(s)\n");
+	} else {
+		blob = property_blobs[property_idx];
+		if (blob) {
+			len = blob->length;
+			rc = &blob->data;
+		}
+	}
+
+	if (byte_len)
+		*byte_len = len;
+
+	return rc;
+}
+
+int msm_property_set_blob(struct msm_property_info *info,
+		struct drm_property_blob **blob_reference,
+		void *blob_data,
+		size_t byte_len,
+		uint32_t property_idx)
+{
+	struct drm_property_blob *blob = NULL;
+	int rc = -EINVAL;
+
+	if (!info || !blob_reference || (property_idx >= info->blob_count)) {
+		DRM_ERROR("invalid argument(s)\n");
+	} else {
+		/* create blob */
+		if (blob_data && byte_len) {
+			blob = drm_property_create_blob(info->dev,
+					byte_len,
+					blob_data);
+			if (IS_ERR_OR_NULL(blob)) {
+				rc = PTR_ERR(blob);
+				DRM_ERROR("failed to create blob, %d\n", rc);
+				goto exit;
+			}
+		}
+
+		/* update drm object */
+		rc = drm_object_property_set_value(info->base,
+				info->property_array[property_idx],
+				blob ? blob->base.id : 0);
+		if (rc) {
+			DRM_ERROR("failed to set blob to property\n");
+			if (blob)
+				drm_property_unreference_blob(blob);
+			goto exit;
+		}
+
+		/* update local reference */
+		if (*blob_reference)
+			drm_property_unreference_blob(*blob_reference);
+		*blob_reference = blob;
+	}
+
+exit:
+	return rc;
+}
+
+int msm_property_set_property(struct msm_property_info *info,
+		uint64_t *property_values,
+		uint32_t property_idx,
+		uint64_t val)
+{
+	int rc = -EINVAL;
+
+	if (!info || (property_idx >= info->property_count) ||
+			property_idx < info->blob_count || !property_values) {
+		DRM_ERROR("invalid argument(s)\n");
+	} else {
+		struct drm_property *drm_prop;
+
+		mutex_lock(&info->property_lock);
+
+		/* update cached value */
+		if (property_values)
+			property_values[property_idx] = val;
+
+		/* update the new default value for immutables */
+		drm_prop = info->property_array[property_idx];
+		if (drm_prop->flags & DRM_MODE_PROP_IMMUTABLE)
+			info->property_data[property_idx].default_value = val;
+
+		mutex_unlock(&info->property_lock);
+
+		/* update drm object */
+		rc = drm_object_property_set_value(info->base, drm_prop, val);
+		if (rc)
+			DRM_ERROR("failed set property value, idx %d rc %d\n",
+					property_idx, rc);
+
+	}
+
+	return rc;
+}
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_prop.h	2019-01-22 16:16:23.507246443 +0100
@@ -0,0 +1,432 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_PROP_H_
+#define _MSM_PROP_H_
+
+#include <linux/list.h>
+#include "msm_drv.h"
+
+#define MSM_PROP_STATE_CACHE_SIZE	2
+
+/**
+ * struct msm_property_data - opaque structure for tracking per
+ *                            drm-object per property stuff
+ * @default_value: Default property value for this drm object
+ * @dirty_node: Linked list node to track if property is dirty or not
+ * @force_dirty: Always dirty property on incoming sets, rather than checking
+ *               for modified values
+ */
+struct msm_property_data {
+	uint64_t default_value;
+	struct list_head dirty_node;
+	bool force_dirty;
+};
+
+/**
+ * struct msm_property_info: Structure for property/state helper functions
+ * @base: Pointer to base drm object (plane/crtc/etc.)
+ * @dev: Pointer to drm device object
+ * @property_array: Pointer to array for storing created property objects
+ * @property_data: Pointer to array for storing private property data
+ * @property_count: Total number of properties
+ * @blob_count: Total number of blob properties, should be <= count
+ * @install_request: Total number of property 'install' requests
+ * @install_count: Total number of successful 'install' requests
+ * @recent_idx: Index of property most recently accessed by set/get
+ * @dirty_list: List of all properties that have been 'atomic_set' but not
+ *              yet cleared with 'msm_property_pop_dirty'
+ * @is_active: Whether or not drm component properties are 'active'
+ * @state_cache: Cache of local states, to prevent alloc/free thrashing
+ * @state_size: Size of local state structures
+ * @state_cache_size: Number of state structures currently stored in state_cache
+ * @property_lock: Mutex to protect local variables
+ */
+struct msm_property_info {
+	struct drm_mode_object *base;
+	struct drm_device *dev;
+
+	struct drm_property **property_array;
+	struct msm_property_data *property_data;
+	uint32_t property_count;
+	uint32_t blob_count;
+	uint32_t install_request;
+	uint32_t install_count;
+
+	int32_t recent_idx;
+
+	struct list_head dirty_list;
+	bool is_active;
+
+	void *state_cache[MSM_PROP_STATE_CACHE_SIZE];
+	uint32_t state_size;
+	int32_t state_cache_size;
+	struct mutex property_lock;
+};
+
+/**
+ * msm_property_get_default - query default value of a property
+ * @info: Pointer to property info container struct
+ * @property_idx: Property index
+ * Returns: Default value for specified property
+ */
+static inline
+uint64_t msm_property_get_default(struct msm_property_info *info,
+		uint32_t property_idx)
+{
+	uint64_t rc = 0;
+
+	if (!info)
+		return 0;
+
+	mutex_lock(&info->property_lock);
+	if (property_idx < info->property_count)
+		rc = info->property_data[property_idx].default_value;
+	mutex_unlock(&info->property_lock);
+
+	return rc;
+}
+
+/**
+ * msm_property_set_is_active - set overall 'active' status for all properties
+ * @info: Pointer to property info container struct
+ * @is_active: New 'is active' status
+ */
+static inline
+void msm_property_set_is_active(struct msm_property_info *info, bool is_active)
+{
+	if (info) {
+		mutex_lock(&info->property_lock);
+		info->is_active = is_active;
+		mutex_unlock(&info->property_lock);
+	}
+}
+
+/**
+ * msm_property_get_is_active - query property 'is active' status
+ * @info: Pointer to property info container struct
+ * Returns: Current 'is active's status
+ */
+static inline
+bool msm_property_get_is_active(struct msm_property_info *info)
+{
+	bool rc = false;
+
+	if (info) {
+		mutex_lock(&info->property_lock);
+		rc = info->is_active;
+		mutex_unlock(&info->property_lock);
+	}
+
+	return rc;
+}
+
+/**
+ * msm_property_pop_dirty - determine next dirty property and clear
+ *                          its dirty flag
+ * @info: Pointer to property info container struct
+ * Returns: Valid msm property index on success,
+ *          -EAGAIN if no dirty properties are available
+ *          Property indicies returned from this function are similar
+ *          to those returned by the msm_property_index function.
+ */
+int msm_property_pop_dirty(struct msm_property_info *info);
+
+/**
+ * msm_property_init - initialize property info structure
+ * @info: Pointer to property info container struct
+ * @base: Pointer to base drm object (plane/crtc/etc.)
+ * @dev: Pointer to drm device object
+ * @property_array: Pointer to array for storing created property objects
+ * @property_data: Pointer to array for storing private property data
+ * @property_count: Total number of properties
+ * @blob_count: Total number of blob properties, should be <= count
+ * @state_size: Size of local state object
+ */
+void msm_property_init(struct msm_property_info *info,
+		struct drm_mode_object *base,
+		struct drm_device *dev,
+		struct drm_property **property_array,
+		struct msm_property_data *property_data,
+		uint32_t property_count,
+		uint32_t blob_count,
+		uint32_t state_size);
+
+/**
+ * msm_property_destroy - destroy helper info structure
+ *
+ * @info: Pointer to property info container struct
+ */
+void msm_property_destroy(struct msm_property_info *info);
+
+/**
+ * msm_property_install_range - install standard drm range property
+ * @info: Pointer to property info container struct
+ * @name: Property name
+ * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
+ * @min: Min property value
+ * @max: Max property value
+ * @init: Default Property value
+ * @property_idx: Property index
+ */
+void msm_property_install_range(struct msm_property_info *info,
+		const char *name,
+		int flags,
+		uint64_t min,
+		uint64_t max,
+		uint64_t init,
+		uint32_t property_idx);
+
+/**
+ * msm_property_install_volatile_range - install drm range property
+ *	This function is similar to msm_property_install_range, but assumes
+ *	that the property is meant for holding user pointers or descriptors
+ *	that may reference volatile data without having an updated value.
+ * @info: Pointer to property info container struct
+ * @name: Property name
+ * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
+ * @min: Min property value
+ * @max: Max property value
+ * @init: Default Property value
+ * @property_idx: Property index
+ */
+void msm_property_install_volatile_range(struct msm_property_info *info,
+		const char *name,
+		int flags,
+		uint64_t min,
+		uint64_t max,
+		uint64_t init,
+		uint32_t property_idx);
+
+/**
+ * msm_property_install_signed_range - install signed drm range property
+ * @info: Pointer to property info container struct
+ * @name: Property name
+ * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
+ * @min: Min property value
+ * @max: Max property value
+ * @init: Default Property value
+ * @property_idx: Property index
+ */
+void msm_property_install_signed_range(struct msm_property_info *info,
+		const char *name,
+		int flags,
+		int64_t min,
+		int64_t max,
+		int64_t init,
+		uint32_t property_idx);
+
+/**
+ * msm_property_install_volatile_signed_range - install signed range property
+ *	This function is similar to msm_property_install_range, but assumes
+ *	that the property is meant for holding user pointers or descriptors
+ *	that may reference volatile data without having an updated value.
+ * @info: Pointer to property info container struct
+ * @name: Property name
+ * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
+ * @min: Min property value
+ * @max: Max property value
+ * @init: Default Property value
+ * @property_idx: Property index
+ */
+void msm_property_install_volatile_signed_range(struct msm_property_info *info,
+		const char *name,
+		int flags,
+		int64_t min,
+		int64_t max,
+		int64_t init,
+		uint32_t property_idx);
+
+/**
+ * msm_property_install_rotation - install standard drm rotation property
+ * @info: Pointer to property info container struct
+ * @supported_rotations: Bitmask of supported rotation values (see
+ *                       drm_mode_create_rotation_property for more details)
+ * @property_idx: Property index
+ */
+void msm_property_install_rotation(struct msm_property_info *info,
+		unsigned int supported_rotations,
+		uint32_t property_idx);
+
+/**
+ * msm_property_install_enum - install standard drm enum/bitmask property
+ * @info: Pointer to property info container struct
+ * @name: Property name
+ * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
+ * @is_bitmask: Set to non-zero to create a bitmask property, rather than an
+ *              enumeration one
+ * @values: Array of allowable enumeration/bitmask values
+ * @num_values: Size of values array
+ * @property_idx: Property index
+ * @default_value: Default value of current property
+ */
+void msm_property_install_enum(struct msm_property_info *info,
+		const char *name,
+		int flags,
+		int is_bitmask,
+		const struct drm_prop_enum_list *values,
+		int num_values,
+		uint32_t property_idx,
+		uint64_t default_value);
+
+/**
+ * msm_property_install_blob - install standard drm blob property
+ * @info: Pointer to property info container struct
+ * @name: Property name
+ * @flags: Extra flags for property creation
+ * @property_idx: Property index
+ */
+void msm_property_install_blob(struct msm_property_info *info,
+		const char *name,
+		int flags,
+		uint32_t property_idx);
+
+/**
+ * msm_property_install_get_status - query overal status of property additions
+ * @info: Pointer to property info container struct
+ * Returns: Zero if previous property install calls were all successful
+ */
+int msm_property_install_get_status(struct msm_property_info *info);
+
+/**
+ * msm_property_index - determine property index from drm_property ptr
+ * @info: Pointer to property info container struct
+ * @property: Incoming property pointer
+ * Returns: Valid property index, or -EINVAL on error
+ */
+int msm_property_index(struct msm_property_info *info,
+		struct drm_property *property);
+
+/**
+ * msm_property_atomic_set - helper function for atomic property set callback
+ * @info: Pointer to property info container struct
+ * @property_values: Pointer to property values cache array
+ * @property_blobs: Pointer to property blobs cache array
+ * @property: Incoming property pointer
+ * @val: Incoming property value
+ * Returns: Zero on success
+ */
+int msm_property_atomic_set(struct msm_property_info *info,
+		uint64_t *property_values,
+		struct drm_property_blob **property_blobs,
+		struct drm_property *property,
+		uint64_t val);
+
+/**
+ * msm_property_atomic_get - helper function for atomic property get callback
+ * @info: Pointer to property info container struct
+ * @property_values: Pointer to property values cache array
+ * @property_blobs: Pointer to property blobs cache array
+ * @property: Incoming property pointer
+ * @val: Pointer to variable for receiving property value
+ * Returns: Zero on success
+ */
+int msm_property_atomic_get(struct msm_property_info *info,
+		uint64_t *property_values,
+		struct drm_property_blob **property_blobs,
+		struct drm_property *property,
+		uint64_t *val);
+
+/**
+ * msm_property_alloc_state - helper function for allocating local state objects
+ * @info: Pointer to property info container struct
+ */
+void *msm_property_alloc_state(struct msm_property_info *info);
+
+/**
+ * msm_property_reset_state - helper function for state reset callback
+ * @info: Pointer to property info container struct
+ * @state: Pointer to local state structure
+ * @property_values: Pointer to property values cache array
+ * @property_blobs: Pointer to property blobs cache array
+ */
+void msm_property_reset_state(struct msm_property_info *info,
+		void *state,
+		uint64_t *property_values,
+		struct drm_property_blob **property_blobs);
+
+/**
+ * msm_property_duplicate_state - helper function for duplicate state cb
+ * @info: Pointer to property info container struct
+ * @old_state: Pointer to original state structure
+ * @state: Pointer to newly created state structure
+ * @property_values: Pointer to property values cache array
+ * @property_blobs: Pointer to property blobs cache array
+ */
+void msm_property_duplicate_state(struct msm_property_info *info,
+		void *old_state,
+		void *state,
+		uint64_t *property_values,
+		struct drm_property_blob **property_blobs);
+
+/**
+ * msm_property_destroy_state - helper function for destroy state cb
+ * @info: Pointer to property info container struct
+ * @state: Pointer to local state structure
+ * @property_values: Pointer to property values cache array
+ * @property_blobs: Pointer to property blobs cache array
+ */
+void msm_property_destroy_state(struct msm_property_info *info,
+		void *state,
+		uint64_t *property_values,
+		struct drm_property_blob **property_blobs);
+
+/**
+ * msm_property_get_blob - obtain cached data pointer for drm blob property
+ * @info: Pointer to property info container struct
+ * @property_blobs: Pointer to property blobs cache array
+ * @byte_len: Optional pointer to variable for accepting blob size
+ * @property_idx: Property index
+ * Returns: Pointer to blob data
+ */
+void *msm_property_get_blob(struct msm_property_info *info,
+		struct drm_property_blob **property_blobs,
+		size_t *byte_len,
+		uint32_t property_idx);
+
+/**
+ * msm_property_set_blob - update blob property on a drm object
+ * This function updates the blob property value of the given drm object. Its
+ * intended use is to update blob properties that have been created with the
+ * DRM_MODE_PROP_IMMUTABLE flag set.
+ * @info: Pointer to property info container struct
+ * @blob_reference: Reference to a pointer that holds the created data blob
+ * @blob_data: Pointer to blob data
+ * @byte_len: Length of blob data, in bytes
+ * @property_idx: Property index
+ * Returns: Zero on success
+ */
+int msm_property_set_blob(struct msm_property_info *info,
+		struct drm_property_blob **blob_reference,
+		void *blob_data,
+		size_t byte_len,
+		uint32_t property_idx);
+
+/**
+ * msm_property_set_property - update property on a drm object
+ * This function updates the property value of the given drm object. Its
+ * intended use is to update properties that have been created with the
+ * DRM_MODE_PROP_IMMUTABLE flag set.
+ * Note: This function cannot be called on a blob.
+ * @info: Pointer to property info container struct
+ * @property_values: Pointer to property values cache array
+ * @property_idx: Property index
+ * @val: value of the property to set
+ * Returns: Zero on success
+ */
+int msm_property_set_property(struct msm_property_info *info,
+		uint64_t *property_values,
+		uint32_t property_idx,
+		uint64_t val);
+
+#endif /* _MSM_PROP_H_ */
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_smmu.c	2019-01-22 16:16:23.511246479 +0100
@@ -0,0 +1,452 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/msm_dma_iommu_mapping.h>
+
+#include <asm/dma-iommu.h>
+#include <soc/qcom/secure_buffer.h>
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+
+#ifndef SZ_4G
+#define SZ_4G	(((size_t) SZ_1G) * 4)
+#endif
+
+struct msm_smmu_client {
+	struct device *dev;
+	struct dma_iommu_mapping *mmu_mapping;
+	bool domain_attached;
+};
+
+struct msm_smmu {
+	struct msm_mmu base;
+	struct device *client_dev;
+	struct msm_smmu_client *client;
+};
+
+struct msm_smmu_domain {
+	const char *label;
+	size_t va_start;
+	size_t va_size;
+	bool secure;
+};
+
+#define to_msm_smmu(x) container_of(x, struct msm_smmu, base)
+#define msm_smmu_to_client(smmu) (smmu->client)
+
+
+static int msm_smmu_fault_handler(struct iommu_domain *iommu,
+	 struct device *dev, unsigned long iova, int flags, void *arg)
+{
+
+	dev_info(dev, "%s: iova=0x%08lx, flags=0x%x, iommu=%pK\n", __func__,
+			iova, flags, iommu);
+	return 0;
+}
+
+
+static int _msm_smmu_create_mapping(struct msm_smmu_client *client,
+	const struct msm_smmu_domain *domain);
+
+static int msm_smmu_attach(struct msm_mmu *mmu, const char **names, int cnt)
+{
+	struct msm_smmu *smmu = to_msm_smmu(mmu);
+	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+	int rc = 0;
+
+	if (!client) {
+		pr_err("undefined smmu client\n");
+		return -EINVAL;
+	}
+
+	/* domain attach only once */
+	if (client->domain_attached)
+		return 0;
+
+	rc = arm_iommu_attach_device(client->dev,
+			client->mmu_mapping);
+	if (rc) {
+		dev_err(client->dev, "iommu attach dev failed (%d)\n",
+				rc);
+		return rc;
+	}
+
+	client->domain_attached = true;
+
+	dev_dbg(client->dev, "iommu domain attached\n");
+
+	return 0;
+}
+
+static void msm_smmu_detach(struct msm_mmu *mmu)
+{
+	struct msm_smmu *smmu = to_msm_smmu(mmu);
+	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+
+	if (!client) {
+		pr_err("undefined smmu client\n");
+		return;
+	}
+
+	if (!client->domain_attached)
+		return;
+
+	arm_iommu_detach_device(client->dev);
+	client->domain_attached = false;
+	dev_dbg(client->dev, "iommu domain detached\n");
+}
+
+static int msm_smmu_map(struct msm_mmu *mmu, uint64_t iova,
+		struct sg_table *sgt, u32 flags, void *priv)
+{
+	struct msm_smmu *smmu = to_msm_smmu(mmu);
+	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+	struct iommu_domain *domain;
+	int ret;
+
+	if (!client || !sgt)
+		return -EINVAL;
+
+	if (iova != 0) {
+		if (!client->mmu_mapping || !client->mmu_mapping->domain)
+			return -EINVAL;
+
+		domain = client->mmu_mapping->domain;
+
+		return iommu_map_sg(domain, iova, sgt->sgl,
+				sgt->nents, flags);
+	} else {
+		if (priv)
+			ret = msm_dma_map_sg_lazy(client->dev, sgt->sgl,
+					sgt->nents, DMA_BIDIRECTIONAL, priv);
+		else
+			ret = dma_map_sg(client->dev, sgt->sgl, sgt->nents,
+				DMA_BIDIRECTIONAL);
+
+		return (ret != sgt->nents) ? -ENOMEM : 0;
+	}
+}
+
+static void msm_smmu_unmap(struct msm_mmu *mmu, uint64_t iova,
+		struct sg_table *sgt, void *priv)
+{
+	struct msm_smmu *smmu = to_msm_smmu(mmu);
+	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+
+	if (priv)
+		msm_dma_unmap_sg(client->dev, sgt->sgl, sgt->nents,
+			DMA_BIDIRECTIONAL, priv);
+	else
+		dma_unmap_sg(client->dev, sgt->sgl, sgt->nents,
+			DMA_BIDIRECTIONAL);
+}
+
+static void msm_smmu_destroy(struct msm_mmu *mmu)
+{
+	struct msm_smmu *smmu = to_msm_smmu(mmu);
+	struct platform_device *pdev = to_platform_device(smmu->client_dev);
+
+	if (smmu->client_dev)
+		platform_device_unregister(pdev);
+	kfree(smmu);
+}
+
+/* user can call this API to set the attribute of smmu*/
+static int msm_smmu_set_property(struct msm_mmu *mmu,
+		enum iommu_attr attr, void *data)
+{
+	struct msm_smmu *smmu = to_msm_smmu(mmu);
+	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+	struct iommu_domain *domain;
+	int ret = 0;
+
+	if (!client)
+		return -EINVAL;
+
+	domain = client->mmu_mapping->domain;
+	if (!domain)
+		return -EINVAL;
+
+	ret = iommu_domain_set_attr(domain, attr, data);
+	if (ret)
+		DRM_ERROR("set domain attribute failed\n");
+
+	return ret;
+}
+
+static const struct msm_mmu_funcs funcs = {
+	.attach = msm_smmu_attach,
+	.detach = msm_smmu_detach,
+	.map = msm_smmu_map,
+	.unmap = msm_smmu_unmap,
+	.destroy = msm_smmu_destroy,
+	.set_property = msm_smmu_set_property,
+};
+
+static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = {
+	[MSM_SMMU_DOMAIN_UNSECURE] = {
+		.label = "mdp_ns",
+		.va_start = SZ_128K,
+		.va_size = SZ_4G - SZ_128K,
+		.secure = false,
+	},
+	[MSM_SMMU_DOMAIN_SECURE] = {
+		.label = "mdp_s",
+		.va_start = SZ_128K,
+		.va_size = SZ_4G - SZ_128K,
+		.secure = true,
+	},
+	[MSM_SMMU_DOMAIN_NRT_UNSECURE] = {
+		.label = "rot_ns",
+		.va_start = SZ_128K,
+		.va_size = SZ_4G - SZ_128K,
+		.secure = false,
+	},
+	[MSM_SMMU_DOMAIN_NRT_SECURE] = {
+		.label = "rot_s",
+		.va_start = SZ_128K,
+		.va_size = SZ_4G - SZ_128K,
+		.secure = true,
+	},
+};
+
+static const struct of_device_id msm_smmu_dt_match[] = {
+	{ .compatible = "qcom,smmu_sde_unsec",
+		.data = &msm_smmu_domains[MSM_SMMU_DOMAIN_UNSECURE] },
+	{ .compatible = "qcom,smmu_sde_sec",
+		.data = &msm_smmu_domains[MSM_SMMU_DOMAIN_SECURE] },
+	{ .compatible = "qcom,smmu_sde_nrt_unsec",
+		.data = &msm_smmu_domains[MSM_SMMU_DOMAIN_NRT_UNSECURE] },
+	{ .compatible = "qcom,smmu_sde_nrt_sec",
+		.data = &msm_smmu_domains[MSM_SMMU_DOMAIN_NRT_SECURE] },
+	{}
+};
+MODULE_DEVICE_TABLE(of, msm_smmu_dt_match);
+
+static struct device *msm_smmu_device_create(struct device *dev,
+		enum msm_mmu_domain_type domain,
+		struct msm_smmu *smmu)
+{
+	struct device_node *child;
+	struct platform_device *pdev;
+	int i;
+	const char *compat = NULL;
+
+	for (i = 0; i < ARRAY_SIZE(msm_smmu_dt_match); i++) {
+		if (msm_smmu_dt_match[i].data == &msm_smmu_domains[domain]) {
+			compat = msm_smmu_dt_match[i].compatible;
+			break;
+		}
+	}
+
+	if (!compat) {
+		DRM_ERROR("unable to find matching domain for %d\n", domain);
+		return ERR_PTR(-ENOENT);
+	}
+	DRM_INFO("found domain %d compat: %s\n", domain, compat);
+
+	if (domain == MSM_SMMU_DOMAIN_UNSECURE) {
+		int rc;
+
+		smmu->client = devm_kzalloc(dev,
+				sizeof(struct msm_smmu_client), GFP_KERNEL);
+		if (!smmu->client)
+			return ERR_PTR(-ENOMEM);
+
+		smmu->client->dev = dev;
+
+		rc = _msm_smmu_create_mapping(msm_smmu_to_client(smmu),
+			msm_smmu_dt_match[i].data);
+		if (rc) {
+			devm_kfree(dev, smmu->client);
+			smmu->client = NULL;
+			return ERR_PTR(rc);
+		}
+
+		return NULL;
+	}
+
+	child = of_find_compatible_node(dev->of_node, NULL, compat);
+	if (!child) {
+		DRM_ERROR("unable to find compatible node for %s\n", compat);
+		return ERR_PTR(-ENODEV);
+	}
+
+	pdev = of_platform_device_create(child, NULL, dev);
+	if (!pdev) {
+		DRM_ERROR("unable to create smmu platform dev for domain %d\n",
+				domain);
+		return ERR_PTR(-ENODEV);
+	}
+
+	smmu->client = platform_get_drvdata(pdev);
+
+	return &pdev->dev;
+}
+
+struct msm_mmu *msm_smmu_new(struct device *dev,
+		enum msm_mmu_domain_type domain)
+{
+	struct msm_smmu *smmu;
+	struct device *client_dev;
+	struct msm_smmu_client *client;
+
+	smmu = kzalloc(sizeof(*smmu), GFP_KERNEL);
+	if (!smmu)
+		return ERR_PTR(-ENOMEM);
+
+	client_dev = msm_smmu_device_create(dev, domain, smmu);
+	if (IS_ERR(client_dev)) {
+		kfree(smmu);
+		return (void *)client_dev ? : ERR_PTR(-ENODEV);
+	}
+
+	smmu->client_dev = client_dev;
+	msm_mmu_init(&smmu->base, dev, &funcs);
+
+	client = msm_smmu_to_client(smmu);
+	if (client)
+		iommu_set_fault_handler(client->mmu_mapping->domain,
+					msm_smmu_fault_handler, dev);
+
+	return &smmu->base;
+}
+
+static int _msm_smmu_create_mapping(struct msm_smmu_client *client,
+	const struct msm_smmu_domain *domain)
+{
+	int rc;
+
+	client->mmu_mapping = arm_iommu_create_mapping(&platform_bus_type,
+			domain->va_start, domain->va_size);
+	if (IS_ERR(client->mmu_mapping)) {
+		dev_err(client->dev,
+			"iommu create mapping failed for domain=%s\n",
+			domain->label);
+		return PTR_ERR(client->mmu_mapping);
+	}
+
+	if (domain->secure) {
+		int secure_vmid = VMID_CP_PIXEL;
+
+		rc = iommu_domain_set_attr(client->mmu_mapping->domain,
+				DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
+		if (rc) {
+			dev_err(client->dev, "couldn't set secure pix vmid\n");
+			goto error;
+		}
+	}
+
+	DRM_INFO("Created domain %s [%zx,%zx] secure=%d\n",
+			domain->label, domain->va_start, domain->va_size,
+			domain->secure);
+
+	return 0;
+
+error:
+	arm_iommu_release_mapping(client->mmu_mapping);
+	return rc;
+}
+
+/**
+ * msm_smmu_probe()
+ * @pdev: platform device
+ *
+ * Each smmu context acts as a separate device and the context banks are
+ * configured with a VA range.
+ * Registers the clks as each context bank has its own clks, for which voting
+ * has to be done everytime before using that context bank.
+ */
+static int msm_smmu_probe(struct platform_device *pdev)
+{
+	const struct of_device_id *match;
+	struct msm_smmu_client *client;
+	const struct msm_smmu_domain *domain;
+	int rc;
+
+	match = of_match_device(msm_smmu_dt_match, &pdev->dev);
+	if (!match || !match->data) {
+		dev_err(&pdev->dev, "probe failed as match data is invalid\n");
+		return -EINVAL;
+	}
+
+	domain = match->data;
+	if (!domain) {
+		dev_err(&pdev->dev, "no matching device found\n");
+		return -EINVAL;
+	}
+
+	DRM_INFO("probing device %s\n", match->compatible);
+
+	client = devm_kzalloc(&pdev->dev, sizeof(*client), GFP_KERNEL);
+	if (!client)
+		return -ENOMEM;
+
+	client->dev = &pdev->dev;
+
+	rc = _msm_smmu_create_mapping(client, domain);
+	platform_set_drvdata(pdev, client);
+
+	return rc;
+}
+
+static int msm_smmu_remove(struct platform_device *pdev)
+{
+	struct msm_smmu_client *client;
+
+	client = platform_get_drvdata(pdev);
+	if (client->domain_attached) {
+		arm_iommu_detach_device(client->dev);
+		client->domain_attached = false;
+	}
+	arm_iommu_release_mapping(client->mmu_mapping);
+
+	return 0;
+}
+
+static struct platform_driver msm_smmu_driver = {
+	.probe = msm_smmu_probe,
+	.remove = msm_smmu_remove,
+	.driver = {
+		.name = "msmdrm_smmu",
+		.of_match_table = msm_smmu_dt_match,
+	},
+};
+
+int __init msm_smmu_driver_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&msm_smmu_driver);
+	if (ret)
+		pr_err("mdss_smmu_register_driver() failed!\n");
+
+	return ret;
+}
+
+void __exit msm_smmu_driver_cleanup(void)
+{
+	platform_driver_unregister(&msm_smmu_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM SMMU driver");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_snapshot_api.h	2019-01-22 16:16:23.511246479 +0100
@@ -0,0 +1,134 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_SNAPSHOT_API_H_
+#define MSM_SNAPSHOT_API_H_
+
+#include <linux/types.h>
+
+/* High word is the magic, low word is the snapshot header version */
+#define SNAPSHOT_MAGIC 0x504D0002
+
+struct msm_snapshot_header {
+	__u32 magic;
+	__u32 gpuid;
+	__u32 chipid;
+} __packed;
+
+#define SNAPSHOT_SECTION_MAGIC 0xABCD
+
+struct msm_snapshot_section_header {
+	__u16 magic;
+	__u16 id;
+	__u32 size;
+} __packed;
+
+/* Section identifiers */
+#define SNAPSHOT_SECTION_OS		0x0101
+#define SNAPSHOT_SECTION_REGS_V2	0x0202
+#define SNAPSHOT_SECTION_RB_V2		0x0302
+#define SNAPSHOT_SECTION_IB_V2		0x0402
+#define SNAPSHOT_SECTION_INDEXED_REGS	0x0501
+#define SNAPSHOT_SECTION_DEBUG		0x0901
+#define SNAPSHOT_SECTION_DEBUGBUS	0x0A01
+#define SNAPSHOT_SECTION_GPU_OBJECT_V2	0x0B02
+#define SNAPSHOT_SECTION_MEMLIST_V2	0x0E02
+#define SNAPSHOT_SECTION_SHADER		0x1201
+#define SNAPSHOT_SECTION_END		0xFFFF
+
+#define SNAPSHOT_OS_LINUX_V3          0x00000202
+
+struct msm_snapshot_linux {
+	struct msm_snapshot_section_header header;
+	int osid;
+	__u32 seconds;
+	__u32 power_flags;
+	__u32 power_level;
+	__u32 power_interval_timeout;
+	__u32 grpclk;
+	__u32 busclk;
+	__u64 ptbase;
+	__u32 pid;
+	__u32 current_context;
+	__u32 ctxtcount;
+	unsigned char release[32];
+	unsigned char version[32];
+	unsigned char comm[16];
+} __packed;
+
+struct msm_snapshot_ringbuffer {
+	struct msm_snapshot_section_header header;
+	int start;
+	int end;
+	int rbsize;
+	int wptr;
+	int rptr;
+	int count;
+	__u32 timestamp_queued;
+	__u32 timestamp_retired;
+	__u64 gpuaddr;
+	__u32 id;
+} __packed;
+
+struct msm_snapshot_regs {
+	struct msm_snapshot_section_header header;
+	__u32 count;
+} __packed;
+
+struct msm_snapshot_indexed_regs {
+	struct msm_snapshot_section_header header;
+	__u32 index_reg;
+	__u32 data_reg;
+	__u32 start;
+	__u32 count;
+} __packed;
+
+#define SNAPSHOT_DEBUG_CP_MEQ		7
+#define SNAPSHOT_DEBUG_CP_PM4_RAM	8
+#define SNAPSHOT_DEBUG_CP_PFP_RAM	9
+#define SNAPSHOT_DEBUG_CP_ROQ		10
+#define SNAPSHOT_DEBUG_SHADER_MEMORY	11
+#define SNAPSHOT_DEBUG_CP_MERCIU	12
+
+struct msm_snapshot_debug {
+	struct msm_snapshot_section_header header;
+	__u32 type;
+	__u32 size;
+} __packed;
+
+struct msm_snapshot_debugbus {
+	struct msm_snapshot_section_header header;
+	__u32 id;
+	__u32 count;
+} __packed;
+
+struct msm_snapshot_shader {
+	struct msm_snapshot_section_header header;
+	__u32 type;
+	__u32 index;
+	__u32 size;
+} __packed;
+
+#define SNAPSHOT_GPU_OBJECT_SHADER  1
+#define SNAPSHOT_GPU_OBJECT_IB      2
+#define SNAPSHOT_GPU_OBJECT_GENERIC 3
+#define SNAPSHOT_GPU_OBJECT_DRAW    4
+#define SNAPSHOT_GPU_OBJECT_GLOBAL  5
+
+struct msm_snapshot_gpu_object {
+	struct msm_snapshot_section_header header;
+	__u32 type;
+	__u64 gpuaddr;
+	__u64 pt_base;
+	__u64 size;
+} __packed;
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_snapshot.c	2019-01-22 16:16:23.511246479 +0100
@@ -0,0 +1,105 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm_gpu.h"
+#include "msm_gem.h"
+#include "msm_snapshot_api.h"
+
+void msm_snapshot_destroy(struct msm_gpu *gpu, struct msm_snapshot *snapshot)
+{
+	struct drm_device *dev = gpu->dev;
+	struct msm_drm_private *priv = dev->dev_private;
+	struct platform_device *pdev = priv->gpu_pdev;
+
+	if (!snapshot)
+		return;
+
+	dma_free_coherent(&pdev->dev, SZ_1M, snapshot->ptr,
+		snapshot->physaddr);
+
+	kfree(snapshot);
+}
+
+struct msm_snapshot *msm_snapshot_new(struct msm_gpu *gpu)
+{
+	struct drm_device *dev = gpu->dev;
+	struct msm_drm_private *priv = dev->dev_private;
+	struct platform_device *pdev = priv->gpu_pdev;
+	struct msm_snapshot *snapshot;
+
+	snapshot = kzalloc(sizeof(*snapshot), GFP_KERNEL);
+	if (!snapshot)
+		return ERR_PTR(-ENOMEM);
+
+	snapshot->ptr = dma_alloc_coherent(&pdev->dev, SZ_1M,
+		&snapshot->physaddr, GFP_KERNEL);
+
+	if (!snapshot->ptr) {
+		kfree(snapshot);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	seq_buf_init(&snapshot->buf, snapshot->ptr, SZ_1M);
+
+	return snapshot;
+}
+
+int msm_gpu_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot)
+{
+	int ret;
+	struct msm_snapshot_header header;
+	uint64_t val;
+
+	if (!snapshot)
+		return -ENOMEM;
+
+	/*
+	 * For now, blow away the snapshot and take a new one  - the most
+	 * interesting hang is the last one we saw
+	 */
+	seq_buf_init(&snapshot->buf, snapshot->ptr, SZ_1M);
+
+	header.magic = SNAPSHOT_MAGIC;
+	gpu->funcs->get_param(gpu, MSM_PARAM_GPU_ID, &val);
+	header.gpuid = lower_32_bits(val);
+
+	gpu->funcs->get_param(gpu, MSM_PARAM_CHIP_ID, &val);
+	header.chipid = lower_32_bits(val);
+
+	seq_buf_putmem(&snapshot->buf, &header, sizeof(header));
+
+	ret = gpu->funcs->snapshot(gpu, snapshot);
+
+	if (!ret) {
+		struct msm_snapshot_section_header end;
+
+		end.magic = SNAPSHOT_SECTION_MAGIC;
+		end.id = SNAPSHOT_SECTION_END;
+		end.size = sizeof(end);
+
+		seq_buf_putmem(&snapshot->buf, &end, sizeof(end));
+
+		dev_info(gpu->dev->dev, "GPU snapshot created [0x%pa (%d bytes)]\n",
+			&snapshot->physaddr, seq_buf_used(&snapshot->buf));
+	}
+
+	return ret;
+}
+
+int msm_snapshot_write(struct msm_gpu *gpu, struct seq_file *m)
+{
+	if (gpu && gpu->snapshot)
+		seq_write(m, gpu->snapshot->ptr,
+			seq_buf_used(&gpu->snapshot->buf));
+
+	return 0;
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_snapshot.h	2019-01-22 16:16:23.511246479 +0100
@@ -0,0 +1,85 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_SNAPSHOT_H_
+#define MSM_SNAPSHOT_H_
+
+#include <linux/string.h>
+#include <linux/seq_buf.h>
+#include "msm_snapshot_api.h"
+
+struct msm_snapshot {
+	void *ptr;
+	struct seq_buf buf;
+	phys_addr_t physaddr;
+	uint32_t index;
+	uint32_t remain;
+	unsigned long timestamp;
+	void *priv;
+};
+
+/* Write a uint32_t value to the next position in the snapshot buffer */
+static inline void SNAPSHOT_WRITE_U32(struct msm_snapshot *snapshot,
+		uint32_t value)
+{
+	seq_buf_putmem(&snapshot->buf, &value, sizeof(value));
+}
+
+/* Copy a block of memory to the next position in the snapshot buffer */
+static inline void SNAPSHOT_MEMCPY(struct msm_snapshot *snapshot, void *src,
+		uint32_t size)
+{
+	if (size)
+		seq_buf_putmem(&snapshot->buf, src, size);
+}
+
+static inline bool _snapshot_header(struct msm_snapshot *snapshot,
+		struct msm_snapshot_section_header *header,
+		u32 headsz, u32 datasz, u32 id)
+{
+	u32 size = headsz + datasz;
+
+	if (seq_buf_buffer_left(&snapshot->buf) <= size)
+		return false;
+
+	/* Write the section header */
+	header->magic = SNAPSHOT_SECTION_MAGIC;
+	header->id = id;
+	header->size = headsz + datasz;
+
+	/* Write the section header */
+	seq_buf_putmem(&snapshot->buf, header, headsz);
+
+	/* The caller will fill in the data from here */
+	return true;
+}
+
+/* SNAPSHOT_HEADER
+ * _snapshot: pointer to struct msm_snapshot
+ * _header: Local variable containing the sub-section header
+ * _id: Section ID to write
+ * _dword: Size of the data section (in dword)
+ */
+#define SNAPSHOT_HEADER(_snapshot, _header, _id, _dwords) \
+	_snapshot_header((_snapshot), \
+		(struct msm_snapshot_section_header *) &(_header), \
+		sizeof(_header), (_dwords) << 2, (_id))
+
+struct msm_gpu;
+
+struct msm_snapshot *msm_snapshot_new(struct msm_gpu *gpu);
+void msm_snapshot_destroy(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
+int msm_gpu_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
+int msm_snapshot_write(struct msm_gpu *gpu, struct seq_file *m);
+
+#endif
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_submitqueue.c	2019-01-22 16:16:23.511246479 +0100
@@ -0,0 +1,151 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kref.h>
+#include "msm_gpu.h"
+
+void msm_submitqueue_destroy(struct kref *kref)
+{
+	struct msm_gpu_submitqueue *queue = container_of(kref,
+		struct msm_gpu_submitqueue, ref);
+
+	kfree(queue);
+}
+
+struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
+		u32 id)
+{
+	struct msm_gpu_submitqueue *entry;
+
+	if (!ctx)
+		return NULL;
+
+	read_lock(&ctx->queuelock);
+
+	list_for_each_entry(entry, &ctx->submitqueues, node) {
+		if (entry->id == id) {
+			kref_get(&entry->ref);
+			read_unlock(&ctx->queuelock);
+
+			return entry;
+		}
+	}
+
+	read_unlock(&ctx->queuelock);
+	return NULL;
+}
+
+void msm_submitqueue_close(struct msm_file_private *ctx)
+{
+	struct msm_gpu_submitqueue *entry, *tmp;
+
+	/*
+	 * No lock needed in close and there won't
+	 * be any more user ioctls coming our way
+	 */
+
+	list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node)
+		msm_submitqueue_put(entry);
+}
+
+int msm_submitqueue_create(struct msm_file_private *ctx, u32 prio, u32 flags,
+		u32 *id)
+{
+	struct msm_gpu_submitqueue *queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+
+	if (!queue)
+		return -ENOMEM;
+
+	kref_init(&queue->ref);
+	queue->flags = flags;
+	queue->prio = prio;
+
+	write_lock(&ctx->queuelock);
+
+	queue->id = ctx->queueid++;
+
+	if (id)
+		*id = queue->id;
+
+	list_add_tail(&queue->node, &ctx->submitqueues);
+
+	write_unlock(&ctx->queuelock);
+
+	return 0;
+}
+
+int msm_submitqueue_init(struct msm_file_private *ctx)
+{
+	INIT_LIST_HEAD(&ctx->submitqueues);
+
+	rwlock_init(&ctx->queuelock);
+
+	/*
+	 * Add the "default" submitqueue with id 0
+	 * "low" priority (2) and no flags
+	 */
+
+	return msm_submitqueue_create(ctx, 2, 0, NULL);
+}
+
+int msm_submitqueue_query(struct msm_file_private *ctx, u32 id, u32 param,
+		void __user *data, u32 len)
+{
+	struct msm_gpu_submitqueue *queue = msm_submitqueue_get(ctx, id);
+	int ret = 0;
+
+	if (!queue)
+		return -ENOENT;
+
+	if (param == MSM_SUBMITQUEUE_PARAM_FAULTS) {
+		u32 size = min_t(u32, len, sizeof(queue->faults));
+
+		if (copy_to_user(data, &queue->faults, size))
+			ret = -EFAULT;
+	} else {
+		ret = -EINVAL;
+	}
+
+	msm_submitqueue_put(queue);
+
+	return ret;
+}
+
+int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
+{
+	struct msm_gpu_submitqueue *entry;
+
+	/*
+	 * id 0 is the "default" queue and can't be destroyed
+	 * by the user
+	 */
+
+	if (!id)
+		return -ENOENT;
+
+	write_lock(&ctx->queuelock);
+
+	list_for_each_entry(entry, &ctx->submitqueues, node) {
+		if (entry->id == id) {
+			list_del(&entry->node);
+			write_unlock(&ctx->queuelock);
+
+			msm_submitqueue_put(entry);
+			return 0;
+		}
+	}
+
+	write_unlock(&ctx->queuelock);
+	return -ENOENT;
+}
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_trace.h	2019-01-22 16:16:23.511246479 +0100
@@ -0,0 +1,98 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#if !defined(_MSM_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MSM_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM msm_drm
+#define TRACE_INCLUDE_FILE msm_trace
+
+TRACE_EVENT(msm_queued,
+	TP_PROTO(struct msm_gem_submit *submit),
+	TP_ARGS(submit),
+	TP_STRUCT__entry(
+		__field(uint32_t, queue_id)
+		__field(uint32_t, fence_id)
+		__field(int, ring)
+	),
+	TP_fast_assign(
+		__entry->queue_id = submit->queue->id;
+		__entry->fence_id = submit->fence;
+		__entry->ring = submit->ring;
+	),
+	TP_printk(
+		"queue=%u fence=%u ring=%d",
+		__entry->queue_id, __entry->fence_id, __entry->ring
+	)
+);
+
+TRACE_EVENT(msm_submitted,
+	TP_PROTO(struct msm_gem_submit *submit, uint64_t ticks, uint64_t nsecs),
+	TP_ARGS(submit, ticks, nsecs),
+	TP_STRUCT__entry(
+		__field(uint32_t, queue_id)
+		__field(uint32_t, fence_id)
+		__field(int, ring)
+		__field(uint64_t, ticks)
+		__field(uint64_t, nsecs)
+	),
+	TP_fast_assign(
+		__entry->queue_id = submit->queue->id;
+		__entry->fence_id = submit->fence;
+		__entry->ring = submit->ring;
+		__entry->ticks = ticks;
+		__entry->nsecs = nsecs;
+	),
+	TP_printk(
+		"queue=%u fence=%u ring=%d ticks=%lld nsecs=%llu",
+		__entry->queue_id, __entry->fence_id, __entry->ring,
+		__entry->ticks, __entry->nsecs
+	)
+);
+
+TRACE_EVENT(msm_retired,
+	TP_PROTO(struct msm_gem_submit *submit, uint64_t start_ticks,
+		uint64_t retire_ticks),
+	TP_ARGS(submit, start_ticks, retire_ticks),
+	TP_STRUCT__entry(
+		__field(uint32_t, queue_id)
+		__field(uint32_t, fence_id)
+		__field(int, ring)
+		__field(uint64_t, start_ticks)
+		__field(uint64_t, retire_ticks)
+	),
+	TP_fast_assign(
+		__entry->queue_id = submit->queue->id;
+		__entry->fence_id = submit->fence;
+		__entry->ring = submit->ring;
+		__entry->start_ticks = start_ticks;
+		__entry->retire_ticks = retire_ticks;
+	),
+	TP_printk(
+		"queue=%u fence=%u ring=%d started=%lld retired=%lld",
+		__entry->queue_id, __entry->fence_id, __entry->ring,
+		__entry->start_ticks, __entry->retire_ticks
+	)
+);
+
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/msm_trace_points.c	2019-01-22 16:16:23.511246479 +0100
@@ -0,0 +1,18 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_gem.h"
+#include "msm_gpu.h"
+
+#define CREATE_TRACE_POINTS
+#include "msm_trace.h"
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_backlight.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_backlight.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_backlight.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_backlight.c	2019-01-22 16:16:23.511246479 +0100
@@ -0,0 +1,103 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_connector.h"
+#include <linux/backlight.h>
+#include "dsi_drm.h"
+
+#define SDE_BRIGHT_TO_BL(out, v, bl_max, max_bright) do {\
+	out = (2 * (v) * (bl_max) + max_bright);\
+	do_div(out, 2 * max_bright);\
+} while (0)
+
+static int sde_backlight_device_update_status(struct backlight_device *bd)
+{
+	int brightness;
+	struct drm_connector *connector;
+	struct dsi_display *display;
+	struct sde_connector *c_conn;
+	int bl_lvl;
+
+	brightness = bd->props.brightness;
+
+	if ((bd->props.power != FB_BLANK_UNBLANK) ||
+			(bd->props.state & BL_CORE_FBBLANK) ||
+			(bd->props.state & BL_CORE_SUSPENDED))
+		brightness = 0;
+
+	connector = bl_get_data(bd);
+	c_conn = to_sde_connector(connector);
+	display = (struct dsi_display *) c_conn->display;
+	if (brightness > display->panel[0]->bl_config.bl_max_level)
+		brightness = display->panel[0]->bl_config.bl_max_level;
+
+	/* This maps UI brightness into driver backlight level with
+	 *        rounding
+	 */
+	SDE_BRIGHT_TO_BL(bl_lvl, brightness,
+			display->panel[0]->bl_config.bl_max_level,
+			display->panel[0]->bl_config.brightness_max_level);
+
+	if (!bl_lvl && brightness)
+		bl_lvl = 1;
+
+	if (c_conn->ops.set_backlight)
+		c_conn->ops.set_backlight(c_conn->display, bl_lvl);
+
+	return 0;
+}
+
+static int sde_backlight_device_get_brightness(struct backlight_device *bd)
+{
+	return 0;
+}
+
+static const struct backlight_ops sde_backlight_device_ops = {
+	.update_status = sde_backlight_device_update_status,
+	.get_brightness = sde_backlight_device_get_brightness,
+};
+
+int sde_backlight_setup(struct drm_connector *connector)
+{
+	struct sde_connector *c_conn;
+	struct backlight_device *bd;
+	struct backlight_properties props;
+	struct dsi_display *display;
+	struct dsi_backlight_config *bl_config;
+
+	if (!connector)
+		return -EINVAL;
+
+	c_conn = to_sde_connector(connector);
+	memset(&props, 0, sizeof(props));
+	props.type = BACKLIGHT_RAW;
+	props.power = FB_BLANK_UNBLANK;
+
+	switch (c_conn->connector_type) {
+	case DRM_MODE_CONNECTOR_DSI:
+		display = (struct dsi_display *) c_conn->display;
+		bl_config = &display->panel[0]->bl_config;
+		props.max_brightness = bl_config->brightness_max_level;
+		props.brightness = bl_config->brightness_max_level;
+		bd = backlight_device_register("sde-backlight",
+				connector->kdev,
+				connector,
+				&sde_backlight_device_ops, &props);
+		if (IS_ERR(bd)) {
+			pr_err("Failed to register backlight: %ld\n",
+					    PTR_ERR(bd));
+			return -ENODEV;
+		}
+	}
+
+	return 0;
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_backlight.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_backlight.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_backlight.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_backlight.h	2019-01-22 16:16:23.511246479 +0100
@@ -0,0 +1,18 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_BACKLIGHT_H_
+#define _SDE_BACKLIGHT_H_
+
+int sde_backlight_setup(struct drm_connector *connector);
+
+#endif /* _SDE_BACKLIGHT_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_color_processing.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_color_processing.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_color_processing.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_color_processing.c	2019-01-22 16:16:23.511246479 +0100
@@ -0,0 +1,993 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <drm/msm_drm_pp.h>
+#include "sde_color_processing.h"
+#include "sde_kms.h"
+#include "sde_crtc.h"
+#include "sde_hw_dspp.h"
+#include "sde_hw_lm.h"
+
+struct sde_cp_node {
+	u32 property_id;
+	u32 prop_flags;
+	u32 feature;
+	void *blob_ptr;
+	uint64_t prop_val;
+	const struct sde_pp_blk *pp_blk;
+	struct list_head feature_list;
+	struct list_head active_list;
+	struct list_head dirty_list;
+	bool is_dspp_feature;
+};
+
+struct sde_cp_prop_attach {
+	struct drm_crtc *crtc;
+	struct drm_property *prop;
+	struct sde_cp_node *prop_node;
+	u32 feature;
+	uint64_t val;
+};
+
+static void dspp_pcc_install_property(struct drm_crtc *crtc);
+
+static void dspp_hsic_install_property(struct drm_crtc *crtc);
+
+static void dspp_ad_install_property(struct drm_crtc *crtc);
+
+static void dspp_vlut_install_property(struct drm_crtc *crtc);
+
+typedef void (*dspp_prop_install_func_t)(struct drm_crtc *crtc);
+
+static dspp_prop_install_func_t dspp_prop_install_func[SDE_DSPP_MAX];
+
+#define setup_dspp_prop_install_funcs(func) \
+do { \
+	func[SDE_DSPP_PCC] = dspp_pcc_install_property; \
+	func[SDE_DSPP_HSIC] = dspp_hsic_install_property; \
+	func[SDE_DSPP_AD] = dspp_ad_install_property; \
+	func[SDE_DSPP_VLUT] = dspp_vlut_install_property; \
+} while (0)
+
+typedef void (*lm_prop_install_func_t)(struct drm_crtc *crtc);
+
+static lm_prop_install_func_t lm_prop_install_func[SDE_MIXER_MAX];
+
+static void lm_gc_install_property(struct drm_crtc *crtc);
+
+#define setup_lm_prop_install_funcs(func) \
+	(func[SDE_MIXER_GC] = lm_gc_install_property)
+
+enum {
+	/* Append new DSPP features before SDE_CP_CRTC_DSPP_MAX */
+	/* DSPP Features start */
+	SDE_CP_CRTC_DSPP_IGC,
+	SDE_CP_CRTC_DSPP_PCC,
+	SDE_CP_CRTC_DSPP_GC,
+	SDE_CP_CRTC_DSPP_HUE,
+	SDE_CP_CRTC_DSPP_SAT,
+	SDE_CP_CRTC_DSPP_VAL,
+	SDE_CP_CRTC_DSPP_CONT,
+	SDE_CP_CRTC_DSPP_MEMCOLOR,
+	SDE_CP_CRTC_DSPP_SIXZONE,
+	SDE_CP_CRTC_DSPP_GAMUT,
+	SDE_CP_CRTC_DSPP_DITHER,
+	SDE_CP_CRTC_DSPP_HIST,
+	SDE_CP_CRTC_DSPP_AD,
+	SDE_CP_CRTC_DSPP_VLUT,
+	SDE_CP_CRTC_DSPP_MAX,
+	/* DSPP features end */
+
+	/* Append new LM features before SDE_CP_CRTC_MAX_FEATURES */
+	/* LM feature start*/
+	SDE_CP_CRTC_LM_GC,
+	/* LM feature end*/
+
+	SDE_CP_CRTC_MAX_FEATURES,
+};
+
+#define INIT_PROP_ATTACH(p, crtc, prop, node, feature, val) \
+	do { \
+		(p)->crtc = crtc; \
+		(p)->prop = prop; \
+		(p)->prop_node = node; \
+		(p)->feature = feature; \
+		(p)->val = val; \
+	} while (0)
+
+static void sde_cp_get_hw_payload(struct sde_cp_node *prop_node,
+				  struct sde_hw_cp_cfg *hw_cfg,
+				  bool *feature_enabled)
+{
+
+	struct drm_property_blob *blob = NULL;
+
+	memset(hw_cfg, 0, sizeof(*hw_cfg));
+	*feature_enabled = false;
+
+	blob = prop_node->blob_ptr;
+	if (prop_node->prop_flags & DRM_MODE_PROP_BLOB) {
+		if (blob) {
+			hw_cfg->len = blob->length;
+			hw_cfg->payload = blob->data;
+			*feature_enabled = true;
+		}
+	} else if (prop_node->prop_flags & DRM_MODE_PROP_RANGE) {
+		/* Check if local blob is Set */
+		if (!blob) {
+			hw_cfg->len = sizeof(prop_node->prop_val);
+			if (prop_node->prop_val)
+				hw_cfg->payload = &prop_node->prop_val;
+		} else {
+			hw_cfg->len = (prop_node->prop_val) ? blob->length :
+					0;
+			hw_cfg->payload = (prop_node->prop_val) ? blob->data
+						: NULL;
+		}
+		if (prop_node->prop_val)
+			*feature_enabled = true;
+	} else {
+		DRM_ERROR("property type is not supported\n");
+	}
+}
+
+static int sde_cp_disable_crtc_blob_property(struct sde_cp_node *prop_node)
+{
+	struct drm_property_blob *blob = prop_node->blob_ptr;
+
+	if (!blob)
+		return -EINVAL;
+	drm_property_unreference_blob(blob);
+	prop_node->blob_ptr = NULL;
+	return 0;
+}
+
+static int sde_cp_create_local_blob(struct drm_crtc *crtc, u32 feature, int len)
+{
+	int ret = -EINVAL;
+	bool found = false;
+	struct sde_cp_node *prop_node = NULL;
+	struct drm_property_blob *blob_ptr;
+	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+
+	list_for_each_entry(prop_node, &sde_crtc->feature_list, feature_list) {
+		if (prop_node->feature == feature) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found || prop_node->prop_flags & DRM_MODE_PROP_BLOB) {
+		DRM_ERROR("local blob create failed prop found %d flags %d\n",
+		       found, prop_node->prop_flags);
+		return ret;
+	}
+
+	blob_ptr = drm_property_create_blob(crtc->dev, len, NULL);
+	ret = (IS_ERR_OR_NULL(blob_ptr)) ? PTR_ERR(blob_ptr) : 0;
+	if (!ret)
+		prop_node->blob_ptr = blob_ptr;
+
+	return ret;
+}
+
+static void sde_cp_destroy_local_blob(struct sde_cp_node *prop_node)
+{
+	if (!(prop_node->prop_flags & DRM_MODE_PROP_BLOB) &&
+		prop_node->blob_ptr)
+		drm_property_unreference_blob(prop_node->blob_ptr);
+}
+
+static int sde_cp_handle_range_property(struct sde_cp_node *prop_node,
+					uint64_t val)
+{
+	int ret = 0;
+	struct drm_property_blob *blob_ptr = prop_node->blob_ptr;
+
+	if (!blob_ptr) {
+		prop_node->prop_val = val;
+		return 0;
+	}
+
+	if (!val) {
+		prop_node->prop_val = 0;
+		return 0;
+	}
+
+	ret = copy_from_user(blob_ptr->data, (void *)val, blob_ptr->length);
+	if (ret) {
+		DRM_ERROR("failed to get the property info ret %d", ret);
+		ret = -EFAULT;
+	} else {
+		prop_node->prop_val = val;
+	}
+
+	return ret;
+}
+
+static int sde_cp_disable_crtc_property(struct drm_crtc *crtc,
+					 struct drm_property *property,
+					 struct sde_cp_node *prop_node)
+{
+	int ret = -EINVAL;
+
+	if (property->flags & DRM_MODE_PROP_BLOB)
+		ret = sde_cp_disable_crtc_blob_property(prop_node);
+	else if (property->flags & DRM_MODE_PROP_RANGE)
+		ret = sde_cp_handle_range_property(prop_node, 0);
+	return ret;
+}
+
+static int sde_cp_enable_crtc_blob_property(struct drm_crtc *crtc,
+					       struct sde_cp_node *prop_node,
+					       uint64_t val)
+{
+	struct drm_property_blob *blob = NULL;
+
+	/**
+	 * For non-blob based properties add support to create a blob
+	 * using the val and store the blob_ptr in prop_node.
+	 */
+	blob = drm_property_lookup_blob(crtc->dev, val);
+	if (!blob) {
+		DRM_ERROR("invalid blob id %lld\n", val);
+		return -EINVAL;
+	}
+	/* Release refernce to existing payload of the property */
+	if (prop_node->blob_ptr)
+		drm_property_unreference_blob(prop_node->blob_ptr);
+
+	prop_node->blob_ptr = blob;
+	return 0;
+}
+
+static int sde_cp_enable_crtc_property(struct drm_crtc *crtc,
+				       struct drm_property *property,
+				       struct sde_cp_node *prop_node,
+				       uint64_t val)
+{
+	int ret = -EINVAL;
+
+	if (property->flags & DRM_MODE_PROP_BLOB)
+		ret = sde_cp_enable_crtc_blob_property(crtc, prop_node, val);
+	else if (property->flags & DRM_MODE_PROP_RANGE)
+		ret = sde_cp_handle_range_property(prop_node, val);
+	return ret;
+}
+
+static struct sde_kms *get_kms(struct drm_crtc *crtc)
+{
+	struct msm_drm_private *priv = crtc->dev->dev_private;
+
+	return to_sde_kms(priv->kms);
+}
+
+static void sde_cp_crtc_prop_attach(struct sde_cp_prop_attach *prop_attach)
+{
+
+	struct sde_crtc *sde_crtc = to_sde_crtc(prop_attach->crtc);
+
+	drm_object_attach_property(&prop_attach->crtc->base,
+				   prop_attach->prop, prop_attach->val);
+
+	INIT_LIST_HEAD(&prop_attach->prop_node->active_list);
+	INIT_LIST_HEAD(&prop_attach->prop_node->dirty_list);
+
+	prop_attach->prop_node->property_id = prop_attach->prop->base.id;
+	prop_attach->prop_node->prop_flags = prop_attach->prop->flags;
+	prop_attach->prop_node->feature = prop_attach->feature;
+
+	if (prop_attach->feature < SDE_CP_CRTC_DSPP_MAX)
+		prop_attach->prop_node->is_dspp_feature = true;
+	else
+		prop_attach->prop_node->is_dspp_feature = false;
+
+	list_add(&prop_attach->prop_node->feature_list,
+		 &sde_crtc->feature_list);
+}
+
+void sde_cp_crtc_init(struct drm_crtc *crtc)
+{
+	struct sde_crtc *sde_crtc = NULL;
+
+	if (!crtc) {
+		DRM_ERROR("invalid crtc %pK\n", crtc);
+		return;
+	}
+
+	sde_crtc = to_sde_crtc(crtc);
+	if (!sde_crtc) {
+		DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
+		return;
+	}
+
+	INIT_LIST_HEAD(&sde_crtc->active_list);
+	INIT_LIST_HEAD(&sde_crtc->dirty_list);
+	INIT_LIST_HEAD(&sde_crtc->feature_list);
+}
+
+static void sde_cp_crtc_install_immutable_property(struct drm_crtc *crtc,
+						   char *name,
+						   u32 feature)
+{
+	struct drm_property *prop;
+	struct sde_cp_node *prop_node = NULL;
+	struct msm_drm_private *priv;
+	struct sde_cp_prop_attach prop_attach;
+	uint64_t val = 0;
+
+	if (feature >=  SDE_CP_CRTC_MAX_FEATURES) {
+		DRM_ERROR("invalid feature %d max %d\n", feature,
+		       SDE_CP_CRTC_MAX_FEATURES);
+		return;
+	}
+
+	prop_node = kzalloc(sizeof(*prop_node), GFP_KERNEL);
+	if (!prop_node)
+		return;
+
+	priv = crtc->dev->dev_private;
+	prop = priv->cp_property[feature];
+
+	if (!prop) {
+		prop = drm_property_create_range(crtc->dev,
+				DRM_MODE_PROP_IMMUTABLE, name, 0, 1);
+		if (!prop) {
+			DRM_ERROR("property create failed: %s\n", name);
+			kfree(prop_node);
+			return;
+		}
+		priv->cp_property[feature] = prop;
+	}
+
+	INIT_PROP_ATTACH(&prop_attach, crtc, prop, prop_node,
+				feature, val);
+	sde_cp_crtc_prop_attach(&prop_attach);
+}
+
+static void sde_cp_crtc_install_range_property(struct drm_crtc *crtc,
+					     char *name,
+					     u32 feature,
+					     uint64_t min, uint64_t max,
+					     uint64_t val)
+{
+	struct drm_property *prop;
+	struct sde_cp_node *prop_node = NULL;
+	struct msm_drm_private *priv;
+	struct sde_cp_prop_attach prop_attach;
+
+	if (feature >=  SDE_CP_CRTC_MAX_FEATURES) {
+		DRM_ERROR("invalid feature %d max %d\n", feature,
+			  SDE_CP_CRTC_MAX_FEATURES);
+		return;
+	}
+
+	prop_node = kzalloc(sizeof(*prop_node), GFP_KERNEL);
+	if (!prop_node)
+		return;
+
+	priv = crtc->dev->dev_private;
+	prop = priv->cp_property[feature];
+
+	if (!prop) {
+		prop = drm_property_create_range(crtc->dev, 0, name, min, max);
+		if (!prop) {
+			DRM_ERROR("property create failed: %s\n", name);
+			kfree(prop_node);
+			return;
+		}
+		priv->cp_property[feature] = prop;
+	}
+
+	INIT_PROP_ATTACH(&prop_attach, crtc, prop, prop_node,
+				feature, val);
+
+	sde_cp_crtc_prop_attach(&prop_attach);
+}
+
+static void sde_cp_crtc_create_blob_property(struct drm_crtc *crtc, char *name,
+					     u32 feature)
+{
+	struct drm_property *prop;
+	struct sde_cp_node *prop_node = NULL;
+	struct msm_drm_private *priv;
+	uint64_t val = 0;
+	struct sde_cp_prop_attach prop_attach;
+
+	if (feature >=  SDE_CP_CRTC_MAX_FEATURES) {
+		DRM_ERROR("invalid feature %d max %d\n", feature,
+		       SDE_CP_CRTC_MAX_FEATURES);
+		return;
+	}
+
+	prop_node = kzalloc(sizeof(*prop_node), GFP_KERNEL);
+	if (!prop_node)
+		return;
+
+	priv = crtc->dev->dev_private;
+	prop = priv->cp_property[feature];
+
+	if (!prop) {
+		prop = drm_property_create(crtc->dev,
+					   DRM_MODE_PROP_BLOB, name, 0);
+		if (!prop) {
+			DRM_ERROR("property create failed: %s\n", name);
+			kfree(prop_node);
+			return;
+		}
+		priv->cp_property[feature] = prop;
+	}
+
+	INIT_PROP_ATTACH(&prop_attach, crtc, prop, prop_node,
+				feature, val);
+
+	sde_cp_crtc_prop_attach(&prop_attach);
+}
+
+static void sde_cp_crtc_setfeature(struct sde_cp_node *prop_node,
+				   struct sde_crtc *sde_crtc)
+{
+	struct sde_hw_cp_cfg hw_cfg;
+	struct sde_hw_mixer *hw_lm;
+	struct sde_hw_dspp *hw_dspp;
+	u32 num_mixers = sde_crtc->num_mixers;
+	int i = 0;
+	bool feature_enabled = false;
+	int ret = 0;
+
+	sde_cp_get_hw_payload(prop_node, &hw_cfg, &feature_enabled);
+
+	for (i = 0; i < num_mixers && !ret; i++) {
+		hw_lm = sde_crtc->mixers[i].hw_lm;
+		hw_dspp = sde_crtc->mixers[i].hw_dspp;
+
+		switch (prop_node->feature) {
+		case SDE_CP_CRTC_DSPP_VLUT:
+			if (!hw_dspp || !hw_dspp->ops.setup_vlut) {
+				ret = -EINVAL;
+				continue;
+			}
+			hw_dspp->ops.setup_vlut(hw_dspp, &hw_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_PCC:
+			if (!hw_dspp || !hw_dspp->ops.setup_pcc) {
+				ret = -EINVAL;
+				continue;
+			}
+			hw_dspp->ops.setup_pcc(hw_dspp, &hw_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_IGC:
+			if (!hw_dspp || !hw_dspp->ops.setup_igc) {
+				ret = -EINVAL;
+				continue;
+			}
+			hw_dspp->ops.setup_igc(hw_dspp, &hw_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_GC:
+			if (!hw_dspp || !hw_dspp->ops.setup_gc) {
+				ret = -EINVAL;
+				continue;
+			}
+			hw_dspp->ops.setup_gc(hw_dspp, &hw_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_HUE:
+			if (!hw_dspp || !hw_dspp->ops.setup_hue) {
+				ret = -EINVAL;
+				continue;
+			}
+			hw_dspp->ops.setup_hue(hw_dspp, &hw_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_SAT:
+			if (!hw_dspp || !hw_dspp->ops.setup_sat) {
+				ret = -EINVAL;
+				continue;
+			}
+			hw_dspp->ops.setup_sat(hw_dspp, &hw_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_VAL:
+			if (!hw_dspp || !hw_dspp->ops.setup_val) {
+				ret = -EINVAL;
+				continue;
+			}
+			hw_dspp->ops.setup_val(hw_dspp, &hw_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_CONT:
+			if (!hw_dspp || !hw_dspp->ops.setup_cont) {
+				ret = -EINVAL;
+				continue;
+			}
+			hw_dspp->ops.setup_cont(hw_dspp, &hw_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_MEMCOLOR:
+			if (!hw_dspp || !hw_dspp->ops.setup_pa_memcolor) {
+				ret = -EINVAL;
+				continue;
+			}
+			hw_dspp->ops.setup_pa_memcolor(hw_dspp, &hw_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_SIXZONE:
+			if (!hw_dspp || !hw_dspp->ops.setup_sixzone) {
+				ret = -EINVAL;
+				continue;
+			}
+			hw_dspp->ops.setup_sixzone(hw_dspp, &hw_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_GAMUT:
+			if (!hw_dspp || !hw_dspp->ops.setup_gamut) {
+				ret = -EINVAL;
+				continue;
+			}
+			hw_dspp->ops.setup_gamut(hw_dspp, &hw_cfg);
+			break;
+		case SDE_CP_CRTC_LM_GC:
+			if (!hw_lm || !hw_lm->ops.setup_gc) {
+				ret = -EINVAL;
+				continue;
+			}
+			hw_lm->ops.setup_gc(hw_lm, &hw_cfg);
+			break;
+		default:
+			ret = -EINVAL;
+			break;
+		}
+	}
+
+	if (ret) {
+		DRM_ERROR("failed to %s feature %d\n",
+			((feature_enabled) ? "enable" : "disable"),
+			prop_node->feature);
+		return;
+	}
+
+	if (feature_enabled) {
+		DRM_DEBUG_DRIVER("Add feature to active list %d\n",
+				 prop_node->property_id);
+		list_add_tail(&prop_node->active_list, &sde_crtc->active_list);
+	} else {
+		DRM_DEBUG_DRIVER("remove feature from active list %d\n",
+			 prop_node->property_id);
+		list_del_init(&prop_node->active_list);
+	}
+	/* Programming of feature done remove from dirty list */
+	list_del_init(&prop_node->dirty_list);
+}
+
+void sde_cp_crtc_apply_properties(struct drm_crtc *crtc)
+{
+	struct sde_crtc *sde_crtc = NULL;
+	bool set_dspp_flush = false, set_lm_flush = false;
+	struct sde_cp_node *prop_node = NULL, *n = NULL;
+	struct sde_hw_ctl *ctl;
+	uint32_t flush_mask = 0;
+	u32 num_mixers = 0, i = 0;
+
+	if (!crtc || !crtc->dev) {
+		DRM_ERROR("invalid crtc %pK dev %pK\n", crtc,
+			  (crtc ? crtc->dev : NULL));
+		return;
+	}
+
+	sde_crtc = to_sde_crtc(crtc);
+	if (!sde_crtc) {
+		DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
+		return;
+	}
+
+	num_mixers = sde_crtc->num_mixers;
+	if (!num_mixers) {
+		DRM_DEBUG_DRIVER("no mixers for this crtc\n");
+		return;
+	}
+
+	/* Check if dirty list is empty for early return */
+	if (list_empty(&sde_crtc->dirty_list)) {
+		DRM_DEBUG_DRIVER("Dirty list is empty\n");
+		return;
+	}
+
+	list_for_each_entry_safe(prop_node, n, &sde_crtc->dirty_list,
+							dirty_list) {
+		sde_cp_crtc_setfeature(prop_node, sde_crtc);
+		/* Set the flush flag to true */
+		if (prop_node->is_dspp_feature)
+			set_dspp_flush = true;
+		else
+			set_lm_flush = true;
+	}
+
+	for (i = 0; i < num_mixers; i++) {
+		ctl = sde_crtc->mixers[i].hw_ctl;
+		if (!ctl)
+			continue;
+		if (set_dspp_flush && ctl->ops.get_bitmask_dspp
+				&& sde_crtc->mixers[i].hw_dspp) {
+			ctl->ops.get_bitmask_dspp(ctl,
+					&flush_mask,
+					sde_crtc->mixers[i].hw_dspp->idx);
+			ctl->ops.update_pending_flush(ctl, flush_mask);
+		}
+		if (set_lm_flush && ctl->ops.get_bitmask_mixer
+				&& sde_crtc->mixers[i].hw_lm) {
+			flush_mask = ctl->ops.get_bitmask_mixer(ctl,
+					sde_crtc->mixers[i].hw_lm->idx);
+			ctl->ops.update_pending_flush(ctl, flush_mask);
+		}
+	}
+}
+
+void sde_cp_crtc_install_properties(struct drm_crtc *crtc)
+{
+	struct sde_kms *kms = NULL;
+	struct sde_crtc *sde_crtc = NULL;
+	struct sde_mdss_cfg *catalog = NULL;
+	unsigned long features = 0;
+	int i = 0;
+	struct msm_drm_private *priv;
+
+	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+		DRM_ERROR("invalid crtc %pK dev %pK\n",
+		       crtc, ((crtc) ? crtc->dev : NULL));
+		return;
+	}
+
+	sde_crtc = to_sde_crtc(crtc);
+	if (!sde_crtc) {
+		DRM_ERROR("sde_crtc %pK\n", sde_crtc);
+		return;
+	}
+
+	kms = get_kms(crtc);
+	if (!kms || !kms->catalog) {
+		DRM_ERROR("invalid sde kms %pK catalog %pK sde_crtc %pK\n",
+		 kms, ((kms) ? kms->catalog : NULL), sde_crtc);
+		return;
+	}
+
+	/**
+	 * Function can be called during the atomic_check with test_only flag
+	 * and actual commit. Allocate properties only if feature list is
+	 * empty during the atomic_check with test_only flag.
+	 */
+	if (!list_empty(&sde_crtc->feature_list))
+		return;
+
+	catalog = kms->catalog;
+	priv = crtc->dev->dev_private;
+	/**
+	 * DSPP/LM properties are global to all the CRTCS.
+	 * Properties are created for first CRTC and re-used for later
+	 * crtcs.
+	 */
+	if (!priv->cp_property) {
+		priv->cp_property = kzalloc((sizeof(priv->cp_property) *
+				SDE_CP_CRTC_MAX_FEATURES), GFP_KERNEL);
+		setup_dspp_prop_install_funcs(dspp_prop_install_func);
+		setup_lm_prop_install_funcs(lm_prop_install_func);
+	}
+	if (!priv->cp_property)
+		return;
+
+	if (!catalog->dspp_count)
+		goto lm_property;
+
+	/* Check for all the DSPP properties and attach it to CRTC */
+	features = catalog->dspp[0].features;
+	for (i = 0; i < SDE_DSPP_MAX; i++) {
+		if (!test_bit(i, &features))
+			continue;
+		if (dspp_prop_install_func[i])
+			dspp_prop_install_func[i](crtc);
+	}
+
+lm_property:
+	if (!catalog->mixer_count)
+		return;
+
+	/* Check for all the LM properties and attach it to CRTC */
+	features = catalog->mixer[0].features;
+	for (i = 0; i < SDE_MIXER_MAX; i++) {
+		if (!test_bit(i, &features))
+			continue;
+		if (lm_prop_install_func[i])
+			lm_prop_install_func[i](crtc);
+	}
+}
+
+int sde_cp_crtc_set_property(struct drm_crtc *crtc,
+				struct drm_property *property,
+				uint64_t val)
+{
+	struct sde_cp_node *prop_node = NULL;
+	struct sde_crtc *sde_crtc = NULL;
+	int ret = 0, i = 0, dspp_cnt, lm_cnt;
+	u8 found = 0;
+
+	if (!crtc || !property) {
+		DRM_ERROR("invalid crtc %pK property %pK\n", crtc, property);
+		return -EINVAL;
+	}
+
+	sde_crtc = to_sde_crtc(crtc);
+	if (!sde_crtc) {
+		DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
+		return -EINVAL;
+	}
+
+	list_for_each_entry(prop_node, &sde_crtc->feature_list, feature_list) {
+		if (property->base.id == prop_node->property_id) {
+			found = 1;
+			break;
+		}
+	}
+
+	if (!found)
+		return 0;
+	/**
+	 * sde_crtc is virtual ensure that hardware has been attached to the
+	 * crtc. Check LM and dspp counts based on whether feature is a
+	 * dspp/lm feature.
+	 */
+	if (!sde_crtc->num_mixers ||
+	    sde_crtc->num_mixers > ARRAY_SIZE(sde_crtc->mixers)) {
+		DRM_ERROR("Invalid mixer config act cnt %d max cnt %ld\n",
+			sde_crtc->num_mixers, ARRAY_SIZE(sde_crtc->mixers));
+		return -EINVAL;
+	}
+
+	dspp_cnt = 0;
+	lm_cnt = 0;
+	for (i = 0; i < sde_crtc->num_mixers; i++) {
+		if (sde_crtc->mixers[i].hw_dspp)
+			dspp_cnt++;
+		if (sde_crtc->mixers[i].hw_lm)
+			lm_cnt++;
+	}
+
+	if (prop_node->is_dspp_feature && dspp_cnt < sde_crtc->num_mixers) {
+		DRM_ERROR("invalid dspp cnt %d mixer cnt %d\n", dspp_cnt,
+			sde_crtc->num_mixers);
+		return -EINVAL;
+	} else if (lm_cnt < sde_crtc->num_mixers) {
+		DRM_ERROR("invalid lm cnt %d mixer cnt %d\n", lm_cnt,
+			sde_crtc->num_mixers);
+		return -EINVAL;
+	}
+	/* remove the property from dirty list */
+	list_del_init(&prop_node->dirty_list);
+
+	if (!val)
+		ret = sde_cp_disable_crtc_property(crtc, property, prop_node);
+	else
+		ret = sde_cp_enable_crtc_property(crtc, property,
+						  prop_node, val);
+
+	if (!ret) {
+		/* remove the property from active list */
+		list_del_init(&prop_node->active_list);
+		/* Mark the feature as dirty */
+		list_add_tail(&prop_node->dirty_list, &sde_crtc->dirty_list);
+	}
+	return ret;
+}
+
+int sde_cp_crtc_get_property(struct drm_crtc *crtc,
+			     struct drm_property *property, uint64_t *val)
+{
+	struct sde_cp_node *prop_node = NULL;
+	struct sde_crtc *sde_crtc = NULL;
+
+	if (!crtc || !property || !val) {
+		DRM_ERROR("invalid crtc %pK property %pK val %pK\n",
+			  crtc, property, val);
+		return -EINVAL;
+	}
+
+	sde_crtc = to_sde_crtc(crtc);
+	if (!sde_crtc) {
+		DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
+		return -EINVAL;
+	}
+	/* Return 0 if property is not supported */
+	*val = 0;
+	list_for_each_entry(prop_node, &sde_crtc->feature_list, feature_list) {
+		if (property->base.id == prop_node->property_id) {
+			*val = prop_node->prop_val;
+			break;
+		}
+	}
+	return 0;
+}
+
+void sde_cp_crtc_destroy_properties(struct drm_crtc *crtc)
+{
+	struct sde_crtc *sde_crtc = NULL;
+	struct sde_cp_node *prop_node = NULL, *n = NULL;
+
+	if (!crtc) {
+		DRM_ERROR("invalid crtc %pK\n", crtc);
+		return;
+	}
+
+	sde_crtc = to_sde_crtc(crtc);
+	if (!sde_crtc) {
+		DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
+		return;
+	}
+
+	list_for_each_entry_safe(prop_node, n, &sde_crtc->feature_list,
+				 feature_list) {
+		if (prop_node->prop_flags & DRM_MODE_PROP_BLOB
+		    && prop_node->blob_ptr)
+			drm_property_unreference_blob(prop_node->blob_ptr);
+
+		list_del_init(&prop_node->active_list);
+		list_del_init(&prop_node->dirty_list);
+		list_del_init(&prop_node->feature_list);
+		sde_cp_destroy_local_blob(prop_node);
+		kfree(prop_node);
+	}
+
+	INIT_LIST_HEAD(&sde_crtc->active_list);
+	INIT_LIST_HEAD(&sde_crtc->dirty_list);
+	INIT_LIST_HEAD(&sde_crtc->feature_list);
+}
+
+void sde_cp_crtc_suspend(struct drm_crtc *crtc)
+{
+	struct sde_crtc *sde_crtc = NULL;
+	struct sde_cp_node *prop_node = NULL, *n = NULL;
+
+	if (!crtc) {
+		DRM_ERROR("crtc %pK\n", crtc);
+		return;
+	}
+	sde_crtc = to_sde_crtc(crtc);
+	if (!sde_crtc) {
+		DRM_ERROR("sde_crtc %pK\n", sde_crtc);
+		return;
+	}
+
+	list_for_each_entry_safe(prop_node, n, &sde_crtc->active_list,
+				 active_list) {
+		list_add_tail(&prop_node->dirty_list, &sde_crtc->dirty_list);
+		list_del_init(&prop_node->active_list);
+	}
+}
+
+void sde_cp_crtc_resume(struct drm_crtc *crtc)
+{
+	/* placeholder for operations needed during resume */
+}
+
+static void dspp_pcc_install_property(struct drm_crtc *crtc)
+{
+	char feature_name[256];
+	struct sde_kms *kms = NULL;
+	struct sde_mdss_cfg *catalog = NULL;
+	u32 version;
+
+	kms = get_kms(crtc);
+	catalog = kms->catalog;
+
+	version = catalog->dspp[0].sblk->pcc.version >> 16;
+	snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
+		"SDE_DSPP_PCC_V", version);
+	switch (version) {
+	case 1:
+		sde_cp_crtc_create_blob_property(crtc, feature_name,
+					SDE_CP_CRTC_DSPP_PCC);
+		break;
+	default:
+		DRM_ERROR("version %d not supported\n", version);
+		break;
+	}
+}
+
+static void dspp_hsic_install_property(struct drm_crtc *crtc)
+{
+	char feature_name[256];
+	struct sde_kms *kms = NULL;
+	struct sde_mdss_cfg *catalog = NULL;
+	u32 version;
+
+	kms = get_kms(crtc);
+	catalog = kms->catalog;
+	version = catalog->dspp[0].sblk->hsic.version >> 16;
+	switch (version) {
+	case 1:
+		snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
+			"SDE_DSPP_HUE_V", version);
+		sde_cp_crtc_install_range_property(crtc, feature_name,
+			SDE_CP_CRTC_DSPP_HUE, 0, U32_MAX, 0);
+		break;
+	default:
+		DRM_ERROR("version %d not supported\n", version);
+		break;
+	}
+}
+
+static void dspp_vlut_install_property(struct drm_crtc *crtc)
+{
+	char feature_name[256];
+	struct sde_kms *kms = NULL;
+	struct sde_mdss_cfg *catalog = NULL;
+	u32 version;
+
+	kms = get_kms(crtc);
+	catalog = kms->catalog;
+	version = catalog->dspp[0].sblk->vlut.version >> 16;
+	snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
+		"SDE_DSPP_VLUT_V", version);
+	switch (version) {
+	case 1:
+		sde_cp_crtc_install_range_property(crtc, feature_name,
+			SDE_CP_CRTC_DSPP_VLUT, 0, U64_MAX, 0);
+		sde_cp_create_local_blob(crtc,
+			SDE_CP_CRTC_DSPP_VLUT,
+			sizeof(struct drm_msm_pa_vlut));
+		break;
+	default:
+		DRM_ERROR("version %d not supported\n", version);
+		break;
+	}
+}
+
+static void dspp_ad_install_property(struct drm_crtc *crtc)
+{
+	char feature_name[256];
+	struct sde_kms *kms = NULL;
+	struct sde_mdss_cfg *catalog = NULL;
+	u32 version;
+
+	kms = get_kms(crtc);
+	catalog = kms->catalog;
+	version = catalog->dspp[0].sblk->ad.version >> 16;
+	snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
+		"SDE_DSPP_AD_V", version);
+	switch (version) {
+	case 3:
+		sde_cp_crtc_install_immutable_property(crtc,
+			feature_name, SDE_CP_CRTC_DSPP_AD);
+		break;
+	default:
+		DRM_ERROR("version %d not supported\n", version);
+		break;
+	}
+}
+
+static void lm_gc_install_property(struct drm_crtc *crtc)
+{
+	char feature_name[256];
+	struct sde_kms *kms = NULL;
+	struct sde_mdss_cfg *catalog = NULL;
+	u32 version;
+
+	kms = get_kms(crtc);
+	catalog = kms->catalog;
+	version = catalog->mixer[0].sblk->gc.version >> 16;
+	snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
+		 "SDE_LM_GC_V", version);
+	switch (version) {
+	case 1:
+		sde_cp_crtc_create_blob_property(crtc, feature_name,
+			SDE_CP_CRTC_LM_GC);
+		break;
+	default:
+		DRM_ERROR("version %d not supported\n", version);
+		break;
+	}
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_color_processing.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_color_processing.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_color_processing.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_color_processing.h	2019-01-22 16:16:23.511246479 +0100
@@ -0,0 +1,95 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SDE_COLOR_PROCESSING_H
+#define _SDE_COLOR_PROCESSING_H
+#include <drm/drm_crtc.h>
+
+/*
+ * PA MEMORY COLOR types
+ * @MEMCOLOR_SKIN          Skin memory color type
+ * @MEMCOLOR_SKY           Sky memory color type
+ * @MEMCOLOR_FOLIAGE       Foliage memory color type
+ */
+enum sde_memcolor_type {
+	MEMCOLOR_SKIN = 0,
+	MEMCOLOR_SKY,
+	MEMCOLOR_FOLIAGE
+};
+
+/**
+ * sde_cp_crtc_init(): Initialize color processing lists for a crtc.
+ *                     Should be called during crtc initialization.
+ * @crtc:  Pointer to sde_crtc.
+ */
+void sde_cp_crtc_init(struct drm_crtc *crtc);
+
+/**
+ * sde_cp_crtc_install_properties(): Installs the color processing
+ *                                properties for a crtc.
+ *                                Should be called during crtc initialization.
+ * @crtc:  Pointer to crtc.
+ */
+void sde_cp_crtc_install_properties(struct drm_crtc *crtc);
+
+/**
+ * sde_cp_crtc_destroy_properties: Destroys color processing
+ *                                            properties for a crtc.
+ * should be called during crtc de-initialization.
+ * @crtc:  Pointer to crtc.
+ */
+void sde_cp_crtc_destroy_properties(struct drm_crtc *crtc);
+
+/**
+ * sde_cp_crtc_set_property: Set a color processing property
+ *                                      for a crtc.
+ *                                      Should be during atomic set property.
+ * @crtc: Pointer to crtc.
+ * @property: Property that needs to enabled/disabled.
+ * @val: Value of property.
+ */
+int sde_cp_crtc_set_property(struct drm_crtc *crtc,
+				struct drm_property *property, uint64_t val);
+
+/**
+ * sde_cp_crtc_apply_properties: Enable/disable properties
+ *                               for a crtc.
+ *                               Should be called during atomic commit call.
+ * @crtc: Pointer to crtc.
+ */
+void sde_cp_crtc_apply_properties(struct drm_crtc *crtc);
+
+/**
+ * sde_cp_crtc_get_property: Get value of color processing property
+ *                                      for a crtc.
+ *                                      Should be during atomic get property.
+ * @crtc: Pointer to crtc.
+ * @property: Property that needs to enabled/disabled.
+ * @val: Value of property.
+ *
+ */
+int sde_cp_crtc_get_property(struct drm_crtc *crtc,
+				struct drm_property *property, uint64_t *val);
+
+/**
+ * sde_cp_crtc_suspend: Suspend the crtc features
+ * @crtc: Pointer to crtc.
+ */
+void sde_cp_crtc_suspend(struct drm_crtc *crtc);
+
+/**
+ * sde_cp_crtc_resume: Resume the crtc features
+ * @crtc: Pointer to crtc.
+ */
+void sde_cp_crtc_resume(struct drm_crtc *crtc);
+#endif /*_SDE_COLOR_PROCESSING_H */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_connector.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_connector.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_connector.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_connector.c	2019-10-29 09:26:23.637203119 +0100
@@ -0,0 +1,999 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+#include "msm_drv.h"
+
+#include "sde_kms.h"
+#include "sde_connector.h"
+#include "sde_backlight.h"
+#include "sde_splash.h"
+
+#define SDE_DEBUG_CONN(c, fmt, ...) SDE_DEBUG("conn%d " fmt,\
+		(c) ? (c)->base.base.id : -1, ##__VA_ARGS__)
+
+#define SDE_ERROR_CONN(c, fmt, ...) SDE_ERROR("conn%d " fmt,\
+		(c) ? (c)->base.base.id : -1, ##__VA_ARGS__)
+
+static const struct drm_prop_enum_list e_topology_name[] = {
+	{SDE_RM_TOPOLOGY_UNKNOWN,	"sde_unknown"},
+	{SDE_RM_TOPOLOGY_SINGLEPIPE,	"sde_singlepipe"},
+	{SDE_RM_TOPOLOGY_DUALPIPE,	"sde_dualpipe"},
+	{SDE_RM_TOPOLOGY_PPSPLIT,	"sde_ppsplit"},
+	{SDE_RM_TOPOLOGY_DUALPIPEMERGE,	"sde_dualpipemerge"}
+};
+static const struct drm_prop_enum_list e_topology_control[] = {
+	{SDE_RM_TOPCTL_RESERVE_LOCK,	"reserve_lock"},
+	{SDE_RM_TOPCTL_RESERVE_CLEAR,	"reserve_clear"},
+	{SDE_RM_TOPCTL_DSPP,		"dspp"},
+	{SDE_RM_TOPCTL_FORCE_TILING,	"force_tiling"},
+	{SDE_RM_TOPCTL_PPSPLIT,		"ppsplit"}
+};
+
+static const struct drm_prop_enum_list e_power_mode[] = {
+	{SDE_MODE_DPMS_ON,      "ON"},
+	{SDE_MODE_DPMS_LP1,     "LP1"},
+	{SDE_MODE_DPMS_LP2,     "LP2"},
+	{SDE_MODE_DPMS_OFF,     "OFF"},
+};
+
+static const struct drm_prop_enum_list hpd_clock_state[] = {
+	{SDE_MODE_HPD_ON,      "ON"},
+	{SDE_MODE_HPD_OFF,     "OFF"},
+};
+
+int sde_connector_get_info(struct drm_connector *connector,
+		struct msm_display_info *info)
+{
+	struct sde_connector *c_conn;
+
+	if (!connector || !info) {
+		SDE_ERROR("invalid argument(s), conn %pK, info %pK\n",
+				connector, info);
+		return -EINVAL;
+	}
+
+	c_conn = to_sde_connector(connector);
+
+	if (!c_conn->display || !c_conn->ops.get_info) {
+		SDE_ERROR("display info not supported for %pK\n",
+				c_conn->display);
+		return -EINVAL;
+	}
+
+	return c_conn->ops.get_info(info, c_conn->display);
+}
+
+int sde_connector_pre_kickoff(struct drm_connector *connector)
+{
+	struct sde_connector *c_conn;
+	struct sde_connector_state *c_state;
+	struct msm_display_kickoff_params params;
+	int rc;
+
+	if (!connector) {
+		SDE_ERROR("invalid argument\n");
+		return -EINVAL;
+	}
+
+	c_conn = to_sde_connector(connector);
+	c_state = to_sde_connector_state(connector->state);
+
+	if (!c_conn->display) {
+		SDE_ERROR("invalid argument\n");
+		return -EINVAL;
+	}
+
+	if (!c_conn->ops.pre_kickoff)
+		return 0;
+
+	params.hdr_ctrl = &c_state->hdr_ctrl;
+
+	rc = c_conn->ops.pre_kickoff(connector, c_conn->display, &params);
+
+	return rc;
+}
+
+enum sde_csc_type sde_connector_get_csc_type(struct drm_connector *conn)
+{
+	struct sde_connector *c_conn;
+
+	if (!conn) {
+		SDE_ERROR("invalid argument\n");
+		return -EINVAL;
+	}
+
+	c_conn = to_sde_connector(conn);
+
+	if (!c_conn->display) {
+		SDE_ERROR("invalid argument\n");
+		return -EINVAL;
+	}
+
+	if (!c_conn->ops.get_csc_type)
+		return SDE_CSC_RGB2YUV_709L;
+
+	return c_conn->ops.get_csc_type(conn, c_conn->display);
+}
+
+bool sde_connector_mode_needs_full_range(struct drm_connector *connector)
+{
+	struct sde_connector *c_conn;
+
+	if (!connector) {
+		SDE_ERROR("invalid argument\n");
+		return false;
+	}
+
+	c_conn = to_sde_connector(connector);
+
+	if (!c_conn->display) {
+		SDE_ERROR("invalid argument\n");
+		return false;
+	}
+
+	if (!c_conn->ops.mode_needs_full_range)
+		return false;
+
+	return c_conn->ops.mode_needs_full_range(c_conn->display);
+}
+
+static void sde_connector_destroy(struct drm_connector *connector)
+{
+	struct sde_connector *c_conn;
+
+	if (!connector) {
+		SDE_ERROR("invalid connector\n");
+		return;
+	}
+
+	c_conn = to_sde_connector(connector);
+
+	if (c_conn->ops.pre_deinit)
+		c_conn->ops.pre_deinit(connector, c_conn->display);
+
+	if (c_conn->blob_caps)
+		drm_property_unreference_blob(c_conn->blob_caps);
+	if (c_conn->blob_hdr)
+		drm_property_unreference_blob(c_conn->blob_hdr);
+	msm_property_destroy(&c_conn->property_info);
+
+	drm_connector_unregister(connector);
+	mutex_destroy(&c_conn->lock);
+	sde_fence_deinit(&c_conn->retire_fence);
+	drm_connector_cleanup(connector);
+	kfree(c_conn);
+}
+
+/**
+ * _sde_connector_destroy_fb - clean up connector state's out_fb buffer
+ * @c_conn: Pointer to sde connector structure
+ * @c_state: Pointer to sde connector state structure
+ */
+static void _sde_connector_destroy_fb(struct sde_connector *c_conn,
+		struct sde_connector_state *c_state)
+{
+	if (!c_state || !c_state->out_fb) {
+		SDE_ERROR("invalid state %pK\n", c_state);
+		return;
+	}
+
+	msm_framebuffer_cleanup(c_state->out_fb, c_state->aspace);
+	drm_framebuffer_unreference(c_state->out_fb);
+	c_state->out_fb = NULL;
+
+	if (c_conn) {
+		c_state->property_values[CONNECTOR_PROP_OUT_FB] =
+			msm_property_get_default(&c_conn->property_info,
+					CONNECTOR_PROP_OUT_FB);
+	} else {
+		c_state->property_values[CONNECTOR_PROP_OUT_FB] = ~0;
+	}
+}
+
+static void sde_connector_atomic_destroy_state(struct drm_connector *connector,
+		struct drm_connector_state *state)
+{
+	struct sde_connector *c_conn = NULL;
+	struct sde_connector_state *c_state = NULL;
+
+	if (!state) {
+		SDE_ERROR("invalid state\n");
+		return;
+	}
+
+	/*
+	 * The base DRM framework currently always passes in a NULL
+	 * connector pointer. This is not correct, but attempt to
+	 * handle that case as much as possible.
+	 */
+	if (connector)
+		c_conn = to_sde_connector(connector);
+	c_state = to_sde_connector_state(state);
+
+	if (c_state->out_fb)
+		_sde_connector_destroy_fb(c_conn, c_state);
+
+	if (!c_conn) {
+		kfree(c_state);
+	} else {
+		/* destroy value helper */
+		msm_property_destroy_state(&c_conn->property_info, c_state,
+				c_state->property_values, 0);
+	}
+}
+
+static void sde_connector_atomic_reset(struct drm_connector *connector)
+{
+	struct sde_connector *c_conn;
+	struct sde_connector_state *c_state;
+
+	if (!connector) {
+		SDE_ERROR("invalid connector\n");
+		return;
+	}
+
+	c_conn = to_sde_connector(connector);
+
+	if (connector->state) {
+		sde_connector_atomic_destroy_state(connector, connector->state);
+		connector->state = 0;
+	}
+
+	c_state = msm_property_alloc_state(&c_conn->property_info);
+	if (!c_state) {
+		SDE_ERROR("state alloc failed\n");
+		return;
+	}
+
+	/* reset value helper, zero out state structure and reset properties */
+	msm_property_reset_state(&c_conn->property_info, c_state,
+			c_state->property_values, 0);
+
+	c_state->base.connector = connector;
+	connector->state = &c_state->base;
+}
+
+static struct drm_connector_state *
+sde_connector_atomic_duplicate_state(struct drm_connector *connector)
+{
+	struct sde_connector *c_conn;
+	struct sde_connector_state *c_state, *c_oldstate;
+	int rc;
+
+	if (!connector || !connector->state) {
+		SDE_ERROR("invalid connector %pK\n", connector);
+		return NULL;
+	}
+
+	c_conn = to_sde_connector(connector);
+	c_oldstate = to_sde_connector_state(connector->state);
+	c_state = msm_property_alloc_state(&c_conn->property_info);
+	if (!c_state) {
+		SDE_ERROR("state alloc failed\n");
+		return NULL;
+	}
+
+	/* duplicate value helper */
+	msm_property_duplicate_state(&c_conn->property_info,
+			c_oldstate, c_state, c_state->property_values, 0);
+
+	/* additional handling for drm framebuffer objects */
+	if (c_state->out_fb) {
+		drm_framebuffer_reference(c_state->out_fb);
+		rc = msm_framebuffer_prepare(c_state->out_fb,
+				c_state->aspace);
+		if (rc)
+			SDE_ERROR("failed to prepare fb, %d\n", rc);
+	}
+
+	return &c_state->base;
+}
+
+static int _sde_connector_set_hdr_info(
+	struct sde_connector *c_conn,
+	struct sde_connector_state *c_state,
+	void *usr_ptr)
+{
+	struct drm_connector *connector;
+	struct drm_msm_ext_panel_hdr_ctrl *hdr_ctrl;
+	struct drm_msm_ext_panel_hdr_metadata *hdr_meta;
+	int i;
+
+	if (!c_conn || !c_state) {
+		SDE_ERROR_CONN(c_conn, "invalid args\n");
+		return -EINVAL;
+	}
+
+	connector = &c_conn->base;
+
+	if (!connector->hdr_supported) {
+		SDE_ERROR_CONN(c_conn, "sink doesn't support HDR\n");
+		return -ENOTSUPP;
+	}
+
+	memset(&c_state->hdr_ctrl, 0, sizeof(c_state->hdr_ctrl));
+
+	if (!usr_ptr) {
+		SDE_DEBUG_CONN(c_conn, "hdr control cleared\n");
+		return 0;
+	}
+
+	if (copy_from_user(&c_state->hdr_ctrl,
+		(void __user *)usr_ptr,
+			sizeof(*hdr_ctrl))) {
+		SDE_ERROR_CONN(c_conn, "failed to copy hdr control\n");
+		return -EFAULT;
+	}
+
+	hdr_ctrl = &c_state->hdr_ctrl;
+
+	SDE_DEBUG_CONN(c_conn, "hdr_supported %d\n",
+				   hdr_ctrl->hdr_state);
+
+	hdr_meta = &hdr_ctrl->hdr_meta;
+
+	SDE_DEBUG_CONN(c_conn, "hdr_supported %d\n",
+				   hdr_meta->hdr_supported);
+	SDE_DEBUG_CONN(c_conn, "eotf %d\n",
+				   hdr_meta->eotf);
+	SDE_DEBUG_CONN(c_conn, "white_point_x %d\n",
+				   hdr_meta->white_point_x);
+	SDE_DEBUG_CONN(c_conn, "white_point_y %d\n",
+				   hdr_meta->white_point_y);
+	SDE_DEBUG_CONN(c_conn, "max_luminance %d\n",
+				   hdr_meta->max_luminance);
+	SDE_DEBUG_CONN(c_conn, "max_content_light_level %d\n",
+				   hdr_meta->max_content_light_level);
+	SDE_DEBUG_CONN(c_conn, "max_average_light_level %d\n",
+				   hdr_meta->max_average_light_level);
+
+	for (i = 0; i < HDR_PRIMARIES_COUNT; i++) {
+		SDE_DEBUG_CONN(c_conn, "display_primaries_x [%d]\n",
+				   hdr_meta->display_primaries_x[i]);
+		SDE_DEBUG_CONN(c_conn, "display_primaries_y [%d]\n",
+				   hdr_meta->display_primaries_y[i]);
+	}
+
+	return 0;
+}
+
+static int _sde_connector_update_power_locked(struct sde_connector *c_conn)
+{
+	struct drm_connector *connector;
+	void *display;
+	int (*set_power)(struct drm_connector *, int, void *);
+	int mode, rc = 0;
+
+	if (!c_conn)
+		return -EINVAL;
+	connector = &c_conn->base;
+
+	mode = c_conn->lp_mode;
+	if (c_conn->dpms_mode != DRM_MODE_DPMS_ON)
+		mode = SDE_MODE_DPMS_OFF;
+	switch (c_conn->dpms_mode) {
+	case DRM_MODE_DPMS_ON:
+		mode = c_conn->lp_mode;
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+		mode = SDE_MODE_DPMS_STANDBY;
+		break;
+	case DRM_MODE_DPMS_SUSPEND:
+		mode = SDE_MODE_DPMS_SUSPEND;
+		break;
+	case DRM_MODE_DPMS_OFF:
+		mode = SDE_MODE_DPMS_OFF;
+		break;
+	default:
+		mode = c_conn->lp_mode;
+		SDE_ERROR("conn %d dpms set to unrecognized mode %d\n",
+				connector->base.id, mode);
+		break;
+	}
+
+	SDE_DEBUG("conn %d - dpms %d, lp %d, panel %d\n", connector->base.id,
+			c_conn->dpms_mode, c_conn->lp_mode, mode);
+
+	if (mode != c_conn->last_panel_power_mode && c_conn->ops.set_power) {
+		display = c_conn->display;
+		set_power = c_conn->ops.set_power;
+
+		mutex_unlock(&c_conn->lock);
+		rc = set_power(connector, mode, display);
+		mutex_lock(&c_conn->lock);
+	}
+	c_conn->last_panel_power_mode = mode;
+
+	return rc;
+}
+
+static int sde_connector_atomic_set_property(struct drm_connector *connector,
+		struct drm_connector_state *state,
+		struct drm_property *property,
+		uint64_t val)
+{
+	struct sde_connector *c_conn;
+	struct sde_connector_state *c_state;
+	int idx, rc;
+
+	if (!connector || !state || !property) {
+		SDE_ERROR("invalid argument(s), conn %pK, state %pK, prp %pK\n",
+				connector, state, property);
+		return -EINVAL;
+	}
+
+	c_conn = to_sde_connector(connector);
+	c_state = to_sde_connector_state(state);
+
+	/* generic property handling */
+	rc = msm_property_atomic_set(&c_conn->property_info,
+			c_state->property_values, 0, property, val);
+	if (rc)
+		goto end;
+
+	/* connector-specific property handling */
+	idx = msm_property_index(&c_conn->property_info, property);
+	switch (idx) {
+	case CONNECTOR_PROP_OUT_FB:
+		/* clear old fb, if present */
+		if (c_state->out_fb)
+			_sde_connector_destroy_fb(c_conn, c_state);
+
+		/* convert fb val to drm framebuffer and prepare it */
+		c_state->out_fb =
+			drm_framebuffer_lookup(connector->dev, val);
+		if (!c_state->out_fb) {
+			SDE_ERROR("failed to look up fb %lld\n", val);
+			rc = -EFAULT;
+		} else {
+			if (c_state->out_fb->flags & DRM_MODE_FB_SECURE)
+				c_state->aspace =
+				c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE];
+			else
+				c_state->aspace =
+				c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE];
+
+			rc = msm_framebuffer_prepare(c_state->out_fb,
+					c_state->aspace);
+			if (rc)
+				SDE_ERROR("prep fb failed, %d\n", rc);
+		}
+		break;
+	case CONNECTOR_PROP_TOPOLOGY_CONTROL:
+		rc = sde_rm_check_property_topctl(val);
+		if (rc)
+			SDE_ERROR("invalid topology_control: 0x%llX\n", val);
+		break;
+	case CONNECTOR_PROP_LP:
+		mutex_lock(&c_conn->lock);
+		c_conn->lp_mode = val;
+		_sde_connector_update_power_locked(c_conn);
+		mutex_unlock(&c_conn->lock);
+		break;
+	case CONNECTOR_PROP_HPD_OFF:
+		c_conn->hpd_mode = val;
+		break;
+	default:
+		break;
+	}
+
+	if (idx == CONNECTOR_PROP_HDR_CONTROL) {
+		rc = _sde_connector_set_hdr_info(c_conn, c_state, (void *)val);
+		if (rc)
+			SDE_ERROR_CONN(c_conn, "cannot set hdr info %d\n", rc);
+	}
+
+	/* check for custom property handling */
+	if (!rc && c_conn->ops.set_property) {
+		rc = c_conn->ops.set_property(connector,
+				state,
+				idx,
+				val,
+				c_conn->display);
+
+		/* potentially clean up out_fb if rc != 0 */
+		if ((idx == CONNECTOR_PROP_OUT_FB) && rc)
+			_sde_connector_destroy_fb(c_conn, c_state);
+	}
+end:
+	return rc;
+}
+
+static int sde_connector_set_property(struct drm_connector *connector,
+		struct drm_property *property,
+		uint64_t val)
+{
+	if (!connector) {
+		SDE_ERROR("invalid connector\n");
+		return -EINVAL;
+	}
+
+	return sde_connector_atomic_set_property(connector,
+			connector->state, property, val);
+}
+
+static int sde_connector_atomic_get_property(struct drm_connector *connector,
+		const struct drm_connector_state *state,
+		struct drm_property *property,
+		uint64_t *val)
+{
+	struct sde_connector *c_conn;
+	struct sde_connector_state *c_state;
+	int idx, rc = -EINVAL;
+
+	if (!connector || !state) {
+		SDE_ERROR("invalid argument(s), conn %pK, state %pK\n",
+				connector, state);
+		return -EINVAL;
+	}
+
+	c_conn = to_sde_connector(connector);
+	c_state = to_sde_connector_state(state);
+
+	idx = msm_property_index(&c_conn->property_info, property);
+	if (idx == CONNECTOR_PROP_RETIRE_FENCE)
+		rc = sde_fence_create(&c_conn->retire_fence, val, 0);
+	else
+		/* get cached property value */
+		rc = msm_property_atomic_get(&c_conn->property_info,
+				c_state->property_values, 0, property, val);
+
+	/* allow for custom override */
+	if (c_conn->ops.get_property)
+		rc = c_conn->ops.get_property(connector,
+				(struct drm_connector_state *)state,
+				idx,
+				val,
+				c_conn->display);
+	return rc;
+}
+
+void sde_connector_prepare_fence(struct drm_connector *connector)
+{
+	if (!connector) {
+		SDE_ERROR("invalid connector\n");
+		return;
+	}
+
+	sde_fence_prepare(&to_sde_connector(connector)->retire_fence);
+}
+
+void sde_connector_complete_commit(struct drm_connector *connector)
+{
+	struct drm_device *dev;
+	struct msm_drm_private *priv;
+	struct sde_connector *c_conn;
+	struct sde_kms *sde_kms;
+
+	if (!connector) {
+		SDE_ERROR("invalid connector\n");
+		return;
+	}
+
+	dev = connector->dev;
+	priv = dev->dev_private;
+	sde_kms = to_sde_kms(priv->kms);
+
+	/* signal connector's retire fence */
+	sde_fence_signal(&to_sde_connector(connector)->retire_fence, 0);
+
+	/* after first vsync comes,
+	 * early splash resource should start to be released.
+	 */
+	if (sde_splash_get_lk_complete_status(&sde_kms->splash_info)) {
+		c_conn = to_sde_connector(connector);
+
+		sde_splash_clean_up_free_resource(priv->kms,
+						&priv->phandle,
+						c_conn->connector_type,
+						c_conn->display);
+	}
+
+}
+
+static int sde_connector_dpms(struct drm_connector *connector,
+		int mode)
+{
+	struct sde_connector *c_conn;
+
+	if (!connector) {
+		SDE_ERROR("invalid connector\n");
+		return -EINVAL;
+	}
+	c_conn = to_sde_connector(connector);
+
+	/* validate incoming dpms request */
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		SDE_DEBUG("conn %d dpms set to %d\n",
+			connector->base.id, mode);
+		break;
+	default:
+		SDE_ERROR("conn %d dpms set to unrecognized mode %d\n",
+			connector->base.id, mode);
+		break;
+	}
+
+	mutex_lock(&c_conn->lock);
+	c_conn->dpms_mode = mode;
+	_sde_connector_update_power_locked(c_conn);
+	mutex_unlock(&c_conn->lock);
+
+	/* use helper for boilerplate handling */
+	return drm_atomic_helper_connector_dpms(connector, mode);
+}
+
+int sde_connector_get_dpms(struct drm_connector *connector)
+{
+	struct sde_connector *c_conn;
+	int rc;
+
+	if (!connector) {
+		SDE_DEBUG("invalid connector\n");
+		return DRM_MODE_DPMS_OFF;
+	}
+
+	c_conn = to_sde_connector(connector);
+
+	mutex_lock(&c_conn->lock);
+	rc = c_conn->dpms_mode;
+	mutex_unlock(&c_conn->lock);
+
+	return rc;
+}
+
+static void sde_connector_update_hdr_props(struct drm_connector *connector)
+{
+	struct sde_connector *c_conn = to_sde_connector(connector);
+	struct drm_msm_ext_panel_hdr_properties hdr_prop = {};
+
+	hdr_prop.hdr_supported = connector->hdr_supported;
+
+	if (hdr_prop.hdr_supported) {
+		hdr_prop.hdr_eotf =
+		  connector->hdr_eotf;
+		hdr_prop.hdr_metadata_type_one =
+		  connector->hdr_metadata_type_one;
+		hdr_prop.hdr_max_luminance =
+		  connector->hdr_max_luminance;
+		hdr_prop.hdr_avg_luminance =
+		  connector->hdr_avg_luminance;
+		hdr_prop.hdr_min_luminance =
+		  connector->hdr_min_luminance;
+	}
+	msm_property_set_blob(&c_conn->property_info,
+			      &c_conn->blob_hdr,
+			      &hdr_prop,
+			      sizeof(hdr_prop),
+			      CONNECTOR_PROP_HDR_INFO);
+}
+
+static enum drm_connector_status
+sde_connector_detect(struct drm_connector *connector, bool force)
+{
+	enum drm_connector_status status = connector_status_unknown;
+	struct sde_connector *c_conn;
+
+	if (!connector) {
+		SDE_ERROR("invalid connector\n");
+		return status;
+	}
+
+	c_conn = to_sde_connector(connector);
+
+	if (c_conn->ops.detect)
+		status = c_conn->ops.detect(connector,
+				force,
+				c_conn->display);
+
+	return status;
+}
+
+static const struct drm_connector_funcs sde_connector_ops = {
+	.dpms =                   sde_connector_dpms,
+	.reset =                  sde_connector_atomic_reset,
+	.detect =                 sde_connector_detect,
+	.destroy =                sde_connector_destroy,
+	.fill_modes =             drm_helper_probe_single_connector_modes,
+	.atomic_duplicate_state = sde_connector_atomic_duplicate_state,
+	.atomic_destroy_state =   sde_connector_atomic_destroy_state,
+	.atomic_set_property =    sde_connector_atomic_set_property,
+	.atomic_get_property =    sde_connector_atomic_get_property,
+	.set_property =           sde_connector_set_property,
+};
+
+static int sde_connector_get_modes(struct drm_connector *connector)
+{
+	struct sde_connector *c_conn;
+	int ret = 0;
+
+	if (!connector) {
+		SDE_ERROR("invalid connector\n");
+		return 0;
+	}
+
+	c_conn = to_sde_connector(connector);
+	if (!c_conn->ops.get_modes) {
+		SDE_DEBUG("missing get_modes callback\n");
+		return 0;
+	}
+	ret = c_conn->ops.get_modes(connector, c_conn->display);
+	if (ret)
+		sde_connector_update_hdr_props(connector);
+
+	return ret;
+}
+
+static enum drm_mode_status
+sde_connector_mode_valid(struct drm_connector *connector,
+		struct drm_display_mode *mode)
+{
+	struct sde_connector *c_conn;
+
+	if (!connector || !mode) {
+		SDE_ERROR("invalid argument(s), conn %pK, mode %pK\n",
+				connector, mode);
+		return MODE_ERROR;
+	}
+
+	c_conn = to_sde_connector(connector);
+
+	if (c_conn->ops.mode_valid)
+		return c_conn->ops.mode_valid(connector, mode, c_conn->display);
+
+	/* assume all modes okay by default */
+	return MODE_OK;
+}
+
+static struct drm_encoder *
+sde_connector_best_encoder(struct drm_connector *connector)
+{
+	struct sde_connector *c_conn = to_sde_connector(connector);
+
+	if (!connector) {
+		SDE_ERROR("invalid connector\n");
+		return NULL;
+	}
+
+	/*
+	 * This is true for now, revisit this code when multiple encoders are
+	 * supported.
+	 */
+	return c_conn->encoder;
+}
+
+static const struct drm_connector_helper_funcs sde_connector_helper_ops = {
+	.get_modes =    sde_connector_get_modes,
+	.mode_valid =   sde_connector_mode_valid,
+	.best_encoder = sde_connector_best_encoder,
+};
+
+struct drm_connector *sde_connector_init(struct drm_device *dev,
+		struct drm_encoder *encoder,
+		struct drm_panel *panel,
+		void *display,
+		const struct sde_connector_ops *ops,
+		int connector_poll,
+		int connector_type)
+{
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+	struct sde_kms_info *info;
+	struct sde_connector *c_conn = NULL;
+	struct sde_splash_info *sinfo;
+	int rc;
+
+	if (!dev || !dev->dev_private || !encoder) {
+		SDE_ERROR("invalid argument(s), dev %pK, enc %pK\n",
+				dev, encoder);
+		return ERR_PTR(-EINVAL);
+	}
+
+	priv = dev->dev_private;
+	if (!priv->kms) {
+		SDE_ERROR("invalid kms reference\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	c_conn = kzalloc(sizeof(*c_conn), GFP_KERNEL);
+	if (!c_conn) {
+		SDE_ERROR("failed to alloc sde connector\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	rc = drm_connector_init(dev,
+			&c_conn->base,
+			&sde_connector_ops,
+			connector_type);
+	if (rc)
+		goto error_free_conn;
+
+	c_conn->connector_type = connector_type;
+	c_conn->encoder = encoder;
+	c_conn->panel = panel;
+	c_conn->display = display;
+
+	c_conn->dpms_mode = DRM_MODE_DPMS_ON;
+	c_conn->hpd_mode = SDE_MODE_HPD_ON;
+	c_conn->lp_mode = 0;
+	c_conn->last_panel_power_mode = SDE_MODE_DPMS_ON;
+
+
+	sde_kms = to_sde_kms(priv->kms);
+	if (sde_kms->vbif[VBIF_NRT]) {
+		c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+			sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_UNSECURE];
+		c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+			sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_SECURE];
+	} else {
+		c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+			sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
+		c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+			sde_kms->aspace[MSM_SMMU_DOMAIN_SECURE];
+	}
+
+	if (ops)
+		c_conn->ops = *ops;
+
+	c_conn->base.helper_private = &sde_connector_helper_ops;
+	c_conn->base.polled = connector_poll;
+	c_conn->base.interlace_allowed = 0;
+	c_conn->base.doublescan_allowed = 0;
+
+	snprintf(c_conn->name,
+			SDE_CONNECTOR_NAME_SIZE,
+			"conn%u",
+			c_conn->base.base.id);
+
+	rc = sde_fence_init(&c_conn->retire_fence, c_conn->name,
+			c_conn->base.base.id);
+	if (rc) {
+		SDE_ERROR("failed to init fence, %d\n", rc);
+		goto error_cleanup_conn;
+	}
+
+	mutex_init(&c_conn->lock);
+
+	rc = drm_connector_register(&c_conn->base);
+	if (rc) {
+		SDE_ERROR("failed to register drm connector, %d\n", rc);
+		goto error_cleanup_fence;
+	}
+
+	rc = drm_mode_connector_attach_encoder(&c_conn->base, encoder);
+	if (rc) {
+		SDE_ERROR("failed to attach encoder to connector, %d\n", rc);
+		goto error_unregister_conn;
+	}
+
+	/* create properties */
+	msm_property_init(&c_conn->property_info, &c_conn->base.base, dev,
+			priv->conn_property, c_conn->property_data,
+			CONNECTOR_PROP_COUNT, CONNECTOR_PROP_BLOBCOUNT,
+			sizeof(struct sde_connector_state));
+
+	if (c_conn->ops.post_init) {
+		info = kmalloc(sizeof(*info), GFP_KERNEL);
+		if (!info) {
+			SDE_ERROR("failed to allocate info buffer\n");
+			rc = -ENOMEM;
+			goto error_unregister_conn;
+		}
+
+		sde_kms_info_reset(info);
+		rc = c_conn->ops.post_init(&c_conn->base, info, display);
+		if (rc) {
+			SDE_ERROR("post-init failed, %d\n", rc);
+			kfree(info);
+			goto error_unregister_conn;
+		}
+
+		msm_property_install_blob(&c_conn->property_info,
+				"capabilities",
+				DRM_MODE_PROP_IMMUTABLE,
+				CONNECTOR_PROP_SDE_INFO);
+
+		msm_property_set_blob(&c_conn->property_info,
+				&c_conn->blob_caps,
+				SDE_KMS_INFO_DATA(info),
+				SDE_KMS_INFO_DATALEN(info),
+				CONNECTOR_PROP_SDE_INFO);
+		kfree(info);
+	}
+
+	if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+		msm_property_install_blob(&c_conn->property_info,
+				"hdr_properties",
+				DRM_MODE_PROP_IMMUTABLE,
+				CONNECTOR_PROP_HDR_INFO);
+	}
+
+	msm_property_install_volatile_range(&c_conn->property_info,
+		"hdr_control", 0x0, 0, ~0, 0,
+		CONNECTOR_PROP_HDR_CONTROL);
+
+#if 0
+	msm_property_install_range(&c_conn->property_info, "RETIRE_FENCE",
+			0x0, 0, INR_OPEN_MAX, 0, CONNECTOR_PROP_RETIRE_FENCE);
+#endif
+
+	msm_property_install_volatile_signed_range(&c_conn->property_info,
+			"PLL_DELTA", 0x0, INT_MIN, INT_MAX, 0,
+			CONNECTOR_PROP_PLL_DELTA);
+
+	msm_property_install_volatile_range(&c_conn->property_info,
+			"PLL_ENABLE", 0x0, 0, 1, 0,
+			CONNECTOR_PROP_PLL_ENABLE);
+
+	msm_property_install_volatile_range(&c_conn->property_info,
+			"HDCP_VERSION", 0x0, 0, U8_MAX, 0,
+			CONNECTOR_PROP_HDCP_VERSION);
+
+	/* enum/bitmask properties */
+	msm_property_install_enum(&c_conn->property_info, "topology_name",
+			DRM_MODE_PROP_IMMUTABLE, 0, e_topology_name,
+			ARRAY_SIZE(e_topology_name),
+			CONNECTOR_PROP_TOPOLOGY_NAME, 0);
+	msm_property_install_enum(&c_conn->property_info, "topology_control",
+			0, 1, e_topology_control,
+			ARRAY_SIZE(e_topology_control),
+			CONNECTOR_PROP_TOPOLOGY_CONTROL, 0);
+
+	msm_property_install_enum(&c_conn->property_info, "LP",
+			0, 0, e_power_mode,
+			ARRAY_SIZE(e_power_mode),
+			CONNECTOR_PROP_LP, 0);
+
+	msm_property_install_enum(&c_conn->property_info, "HPD_OFF",
+			DRM_MODE_PROP_ATOMIC, 0, hpd_clock_state,
+			ARRAY_SIZE(hpd_clock_state),
+			CONNECTOR_PROP_HPD_OFF, 0);
+
+	rc = msm_property_install_get_status(&c_conn->property_info);
+	if (rc) {
+		SDE_ERROR("failed to create one or more properties\n");
+		goto error_destroy_property;
+	}
+
+	SDE_DEBUG("connector %d attach encoder %d\n",
+			c_conn->base.base.id, encoder->base.id);
+
+	sinfo = &sde_kms->splash_info;
+	if (sinfo && sinfo->handoff)
+		sde_splash_setup_connector_count(sinfo, connector_type);
+
+	priv->connectors[priv->num_connectors++] = &c_conn->base;
+
+	return &c_conn->base;
+
+error_destroy_property:
+	if (c_conn->blob_caps)
+		drm_property_unreference_blob(c_conn->blob_caps);
+	if (c_conn->blob_hdr)
+		drm_property_unreference_blob(c_conn->blob_hdr);
+	msm_property_destroy(&c_conn->property_info);
+error_unregister_conn:
+	drm_connector_unregister(&c_conn->base);
+error_cleanup_fence:
+	mutex_destroy(&c_conn->lock);
+	sde_fence_deinit(&c_conn->retire_fence);
+error_cleanup_conn:
+	drm_connector_cleanup(&c_conn->base);
+error_free_conn:
+	kfree(c_conn);
+
+	return ERR_PTR(rc);
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_connector.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_connector.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_connector.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_connector.h	2019-10-29 09:26:23.637203119 +0100
@@ -0,0 +1,415 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_CONNECTOR_H_
+#define _SDE_CONNECTOR_H_
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_panel.h>
+
+#include "msm_drv.h"
+#include "msm_prop.h"
+#include "sde_kms.h"
+#include "sde_fence.h"
+
+#define SDE_MODE_HPD_ON        0
+#define SDE_MODE_HPD_OFF       1
+
+#define SDE_CONNECTOR_NAME_SIZE	16
+
+struct sde_connector;
+struct sde_connector_state;
+
+/**
+ * struct sde_connector_ops - callback functions for generic sde connector
+ * Individual callbacks documented below.
+ */
+struct sde_connector_ops {
+	/**
+	 * post_init - perform additional initialization steps
+	 * @connector: Pointer to drm connector structure
+	 * @info: Pointer to sde connector info structure
+	 * @display: Pointer to private display handle
+	 * Returns: Zero on success
+	 */
+	int (*post_init)(struct drm_connector *connector,
+			void *info,
+			void *display);
+
+	/**
+	 * pre_deinit - perform additional deinitialization steps
+	 * @connector: Pointer to drm connector structure
+	 * @display: Pointer to private display handle
+	 * Returns: Zero on success
+	 */
+	int (*pre_deinit)(struct drm_connector *connector,
+			void *display);
+
+	/**
+	 * detect - determine if connector is connected
+	 * @connector: Pointer to drm connector structure
+	 * @force: Force detect setting from drm framework
+	 * @display: Pointer to private display handle
+	 * Returns: Connector 'is connected' status
+	 */
+	enum drm_connector_status (*detect)(struct drm_connector *connector,
+			bool force,
+			void *display);
+
+	/**
+	 * get_modes - add drm modes via drm_mode_probed_add()
+	 * @connector: Pointer to drm connector structure
+	 * @display: Pointer to private display handle
+	 * Returns: Number of modes added
+	 */
+	int (*get_modes)(struct drm_connector *connector,
+			void *display);
+
+	/**
+	 * mode_valid - determine if specified mode is valid
+	 * @connector: Pointer to drm connector structure
+	 * @mode: Pointer to drm mode structure
+	 * @display: Pointer to private display handle
+	 * Returns: Validity status for specified mode
+	 */
+	enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
+			struct drm_display_mode *mode,
+			void *display);
+
+	/**
+	 * set_property - set property value
+	 * @connector: Pointer to drm connector structure
+	 * @state: Pointer to drm connector state structure
+	 * @property_index: DRM property index
+	 * @value: Incoming property value
+	 * @display: Pointer to private display structure
+	 * Returns: Zero on success
+	 */
+	int (*set_property)(struct drm_connector *connector,
+			struct drm_connector_state *state,
+			int property_index,
+			uint64_t value,
+			void *display);
+
+	/**
+	 * get_property - get property value
+	 * @connector: Pointer to drm connector structure
+	 * @state: Pointer to drm connector state structure
+	 * @property_index: DRM property index
+	 * @value: Pointer to variable for accepting property value
+	 * @display: Pointer to private display structure
+	 * Returns: Zero on success
+	 */
+	int (*get_property)(struct drm_connector *connector,
+			struct drm_connector_state *state,
+			int property_index,
+			uint64_t *value,
+			void *display);
+
+	/**
+	 * get_info - get display information
+	 * @info: Pointer to msm display info structure
+	 * @display: Pointer to private display structure
+	 * Returns: Zero on success
+	 */
+	int (*get_info)(struct msm_display_info *info, void *display);
+
+	int (*set_backlight)(void *display, u32 bl_lvl);
+
+
+	/**
+	 * pre_kickoff - trigger display to program kickoff-time features
+	 * @connector: Pointer to drm connector structure
+	 * @display: Pointer to private display structure
+	 * @params: Parameter bundle of connector-stored information for
+	 *	kickoff-time programming into the display
+	 * Returns: Zero on success
+	 */
+	int (*pre_kickoff)(struct drm_connector *connector,
+		void *display,
+		struct msm_display_kickoff_params *params);
+
+	/**
+	 * mode_needs_full_range - does the mode need full range
+	 * quantization
+	 * @display: Pointer to private display structure
+	 * Returns: true or false based on whether full range is needed
+	 */
+	bool (*mode_needs_full_range)(void *display);
+
+	/**
+	 * get_csc_type - returns the CSC type to be used
+	 * by the CDM block based on HDR state
+	 * @connector: Pointer to drm connector structure
+	 * @display: Pointer to private display structure
+	 * Returns: type of CSC matrix to be used
+	 */
+	enum sde_csc_type (*get_csc_type)(struct drm_connector *connector,
+		void *display);
+
+	/**
+	 * set_power - update dpms setting
+	 * @connector: Pointer to drm connector structure
+	 * @power_mode: One of the following,
+	 *		SDE_MODE_DPMS_ON
+	 *		SDE_MODE_DPMS_LP1
+	 *		SDE_MODE_DPMS_LP2
+	 *		SDE_MODE_DPMS_OFF
+	 * @display: Pointer to private display structure
+	 * Returns: Zero on success
+	 */
+	int (*set_power)(struct drm_connector *connector,
+			int power_mode, void *display);
+};
+
+/**
+ * struct sde_connector - local sde connector structure
+ * @base: Base drm connector structure
+ * @connector_type: Set to one of DRM_MODE_CONNECTOR_ types
+ * @encoder: Pointer to preferred drm encoder
+ * @panel: Pointer to drm panel, if present
+ * @display: Pointer to private display data structure
+ * @mmu_secure: MMU id for secure buffers
+ * @mmu_unsecure: MMU id for unsecure buffers
+ * @name: ASCII name of connector
+ * @lock: Mutex lock object for this structure
+ * @retire_fence: Retire fence reference
+ * @ops: Local callback function pointer table
+ * @dpms_mode: DPMS property setting from user space
+ * @lp_mode: LP property setting from user space
+ * @last_panel_power_mode: Last consolidated dpms/lp mode setting
+ * @property_info: Private structure for generic property handling
+ * @property_data: Array of private data for generic property handling
+ * @blob_caps: Pointer to blob structure for 'capabilities' property
+ * @blob_hdr: Pointer to blob structure for 'hdr_properties' property
+ */
+struct sde_connector {
+	struct drm_connector base;
+
+	int connector_type;
+
+	struct drm_encoder *encoder;
+	struct drm_panel *panel;
+	void *display;
+
+	struct msm_gem_address_space *aspace[SDE_IOMMU_DOMAIN_MAX];
+
+	char name[SDE_CONNECTOR_NAME_SIZE];
+
+	struct mutex lock;
+	struct sde_fence retire_fence;
+	struct sde_connector_ops ops;
+	int dpms_mode;
+	u64 hpd_mode;
+	int lp_mode;
+	int last_panel_power_mode;
+
+	struct msm_property_info property_info;
+	struct msm_property_data property_data[CONNECTOR_PROP_COUNT];
+	struct drm_property_blob *blob_caps;
+	struct drm_property_blob *blob_hdr;
+};
+
+/**
+ * to_sde_connector - convert drm_connector pointer to sde connector pointer
+ * @X: Pointer to drm_connector structure
+ * Returns: Pointer to sde_connector structure
+ */
+#define to_sde_connector(x)     container_of((x), struct sde_connector, base)
+
+/**
+ * sde_connector_get_display - get sde connector's private display pointer
+ * @C: Pointer to drm connector structure
+ * Returns: Pointer to associated private display structure
+ */
+#define sde_connector_get_display(C) \
+	((C) ? to_sde_connector((C))->display : 0)
+
+/**
+ * sde_connector_get_panel - get sde connector's private panel pointer
+ * @C: Pointer to drm connector structure
+ * Returns: Pointer to associated private display structure
+ */
+#define sde_connector_get_panel(C) \
+	((C) ? to_sde_connector((C))->panel : 0)
+
+/**
+ * sde_connector_get_encoder - get sde connector's private encoder pointer
+ * @C: Pointer to drm connector structure
+ * Returns: Pointer to associated private encoder structure
+ */
+#define sde_connector_get_encoder(C) \
+	((C) ? to_sde_connector((C))->encoder : 0)
+
+/**
+ * sde_connector_get_propinfo - get sde connector's property info pointer
+ * @C: Pointer to drm connector structure
+ * Returns: Pointer to associated private property info structure
+ */
+#define sde_connector_get_propinfo(C) \
+	((C) ? &to_sde_connector((C))->property_info : 0)
+
+/**
+ * struct sde_connector_state - private connector status structure
+ * @base: Base drm connector structure
+ * @out_fb: Pointer to output frame buffer, if applicable
+ * @aspace: Address space for accessing frame buffer objects, if applicable
+ * @property_values: Local cache of current connector property values
+ * @hdr_ctrl: HDR control info passed from userspace
+ */
+struct sde_connector_state {
+	struct drm_connector_state base;
+	struct drm_framebuffer *out_fb;
+	struct msm_gem_address_space *aspace;
+	uint64_t property_values[CONNECTOR_PROP_COUNT];
+	struct drm_msm_ext_panel_hdr_ctrl hdr_ctrl;
+};
+
+/**
+ * to_sde_connector_state - convert drm_connector_state pointer to
+ *                          sde connector state pointer
+ * @X: Pointer to drm_connector_state structure
+ * Returns: Pointer to sde_connector_state structure
+ */
+#define to_sde_connector_state(x) \
+	container_of((x), struct sde_connector_state, base)
+
+/**
+ * sde_connector_get_property - query integer value of connector property
+ * @S: Pointer to drm connector state
+ * @X: Property index, from enum msm_mdp_connector_property
+ * Returns: Integer value of requested property
+ */
+#define sde_connector_get_property(S, X) \
+	((S) && ((X) < CONNECTOR_PROP_COUNT) ? \
+	 (to_sde_connector_state((S))->property_values[(X)]) : 0)
+
+/**
+ * sde_connector_get_property_values - retrieve property values cache
+ * @S: Pointer to drm connector state
+ * Returns: Integer value of requested property
+ */
+#define sde_connector_get_property_values(S) \
+	((S) ? (to_sde_connector_state((S))->property_values) : 0)
+
+/**
+ * sde_connector_get_out_fb - query out_fb value from sde connector state
+ * @S: Pointer to drm connector state
+ * Returns: Output fb associated with specified connector state
+ */
+#define sde_connector_get_out_fb(S) \
+	((S) ? to_sde_connector_state((S))->out_fb : 0)
+
+/**
+ * sde_connector_get_topology_name - helper accessor to retrieve topology_name
+ * @connector: pointer to drm connector
+ * Returns: value of the CONNECTOR_PROP_TOPOLOGY_NAME property or 0
+ */
+static inline uint64_t sde_connector_get_topology_name(
+		struct drm_connector *connector)
+{
+	if (!connector || !connector->state)
+		return 0;
+	return sde_connector_get_property(connector->state,
+			CONNECTOR_PROP_TOPOLOGY_NAME);
+}
+
+/**
+ * sde_connector_init - create drm connector object for a given display
+ * @dev: Pointer to drm device struct
+ * @encoder: Pointer to associated encoder
+ * @panel: Pointer to associated panel, can be NULL
+ * @display: Pointer to associated display object
+ * @ops: Pointer to callback operations function table
+ * @connector_poll: Set to appropriate DRM_CONNECTOR_POLL_ setting
+ * @connector_type: Set to appropriate DRM_MODE_CONNECTOR_ type
+ * Returns: Pointer to newly created drm connector struct
+ */
+struct drm_connector *sde_connector_init(struct drm_device *dev,
+		struct drm_encoder *encoder,
+		struct drm_panel *panel,
+		void *display,
+		const struct sde_connector_ops *ops,
+		int connector_poll,
+		int connector_type);
+
+/**
+ * sde_connector_prepare_fence - prepare fence support for current commit
+ * @connector: Pointer to drm connector object
+ */
+void sde_connector_prepare_fence(struct drm_connector *connector);
+
+/**
+ * sde_connector_complete_commit - signal completion of current commit
+ * @connector: Pointer to drm connector object
+ */
+void sde_connector_complete_commit(struct drm_connector *connector);
+
+/**
+ * sde_connector_get_info - query display specific information
+ * @connector: Pointer to drm connector object
+ * @info: Pointer to msm display information structure
+ * Returns: Zero on success
+ */
+int sde_connector_get_info(struct drm_connector *connector,
+		struct msm_display_info *info);
+
+/**
+ * sde_connector_pre_kickoff - trigger kickoff time feature programming
+ * @connector: Pointer to drm connector object
+ * Returns: Zero on success
+ */
+int sde_connector_pre_kickoff(struct drm_connector *connector);
+
+/**
+ * sde_connector_mode_needs_full_range - query quantization type
+ * for the connector mode
+ * @connector: Pointer to drm connector object
+ * Returns: true OR false based on connector mode
+ */
+bool sde_connector_mode_needs_full_range(struct drm_connector *connector);
+
+/**
+ * sde_connector_get_csc_type - query csc type
+ * to be used for the connector
+ * @connector: Pointer to drm connector object
+ * Returns: csc type based on connector HDR state
+ */
+enum sde_csc_type sde_connector_get_csc_type(struct drm_connector *conn);
+
+/**
+ * sde_connector_get_dpms - query dpms setting
+ * @connector: Pointer to drm connector structure
+ * Returns: Current DPMS setting for connector
+ */
+int sde_connector_get_dpms(struct drm_connector *connector);
+
+/**
+ * sde_connector_needs_offset - adjust the output fence offset based on
+ *                              display type
+ * @connector: Pointer to drm connector object
+ * Returns: true if offset is required, false for all other cases.
+ */
+static inline bool sde_connector_needs_offset(struct drm_connector *connector)
+{
+	struct sde_connector *c_conn;
+
+	if (!connector)
+		return false;
+
+	c_conn = to_sde_connector(connector);
+	return (c_conn->connector_type != DRM_MODE_CONNECTOR_VIRTUAL);
+}
+
+#endif /* _SDE_CONNECTOR_H_ */
+
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_core_irq.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_core_irq.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_core_irq.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_core_irq.c	2019-10-29 09:26:23.637203119 +0100
@@ -0,0 +1,565 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+
+#include "sde_core_irq.h"
+#include "sde_power_handle.h"
+
+/**
+ * sde_core_irq_callback_handler - dispatch core interrupts
+ * @arg:		private data of callback handler
+ * @irq_idx:		interrupt index
+ */
+static void sde_core_irq_callback_handler(void *arg, int irq_idx)
+{
+	struct sde_kms *sde_kms = arg;
+	struct sde_irq *irq_obj = &sde_kms->irq_obj;
+	struct sde_irq_callback *cb;
+	unsigned long irq_flags;
+	bool cb_tbl_error = false;
+	int enable_counts = 0;
+
+	pr_debug("irq_idx=%d\n", irq_idx);
+
+	spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
+	if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) {
+		/* print error outside lock */
+		cb_tbl_error = true;
+		enable_counts = atomic_read(
+				&sde_kms->irq_obj.enable_counts[irq_idx]);
+	}
+
+	atomic_inc(&irq_obj->irq_counts[irq_idx]);
+
+	/*
+	 * Perform registered function callback
+	 */
+	list_for_each_entry(cb, &irq_obj->irq_cb_tbl[irq_idx], list)
+		if (cb->func)
+			cb->func(cb->arg, irq_idx);
+	spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
+
+	if (cb_tbl_error) {
+		SDE_ERROR("irq has no registered callback, idx %d enables %d\n",
+				irq_idx, enable_counts);
+		SDE_EVT32_IRQ(irq_idx, enable_counts, SDE_EVTLOG_ERROR);
+	}
+
+	/*
+	 * Clear pending interrupt status in HW.
+	 * NOTE: sde_core_irq_callback_handler is protected by top-level
+	 *       spinlock, so it is safe to clear any interrupt status here.
+	 */
+	sde_kms->hw_intr->ops.clear_interrupt_status(
+			sde_kms->hw_intr,
+			irq_idx);
+}
+
+int sde_core_irq_idx_lookup(struct sde_kms *sde_kms,
+		enum sde_intr_type intr_type, u32 instance_idx)
+{
+	if (!sde_kms || !sde_kms->hw_intr ||
+			!sde_kms->hw_intr->ops.irq_idx_lookup)
+		return -EINVAL;
+
+	return sde_kms->hw_intr->ops.irq_idx_lookup(intr_type,
+			instance_idx);
+}
+
+/**
+ * _sde_core_irq_enable - enable core interrupt given by the index
+ * @sde_kms:		Pointer to sde kms context
+ * @irq_idx:		interrupt index
+ */
+static int _sde_core_irq_enable(struct sde_kms *sde_kms, int irq_idx)
+{
+	unsigned long irq_flags;
+	int ret = 0;
+
+	if (!sde_kms || !sde_kms->hw_intr ||
+			!sde_kms->irq_obj.enable_counts ||
+			!sde_kms->irq_obj.irq_counts) {
+		SDE_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->irq_idx_tbl_size) {
+		SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	SDE_DEBUG("irq_idx=%d enable_count=%d\n", irq_idx,
+			atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
+
+	spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
+	SDE_EVT32(irq_idx,
+			atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
+	if (atomic_inc_return(&sde_kms->irq_obj.enable_counts[irq_idx]) == 1) {
+		ret = sde_kms->hw_intr->ops.enable_irq(
+				sde_kms->hw_intr,
+				irq_idx);
+		if (ret)
+			SDE_ERROR("Fail to enable IRQ for irq_idx:%d\n",
+					irq_idx);
+
+		SDE_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+
+		/* empty callback list but interrupt is enabled */
+		if (list_empty(&sde_kms->irq_obj.irq_cb_tbl[irq_idx]))
+			SDE_ERROR("irq_idx=%d enabled with no callback\n",
+					irq_idx);
+	}
+	spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
+
+	return ret;
+}
+
+int sde_core_irq_enable(struct sde_kms *sde_kms, int *irq_idxs, u32 irq_count)
+{
+	int i;
+	int ret = 0;
+
+	if (!sde_kms || !irq_idxs || !irq_count) {
+		SDE_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; (i < irq_count) && !ret; i++)
+		ret = _sde_core_irq_enable(sde_kms, irq_idxs[i]);
+
+	return ret;
+}
+
+/**
+ * _sde_core_irq_disable - disable core interrupt given by the index
+ * @sde_kms:		Pointer to sde kms context
+ * @irq_idx:		interrupt index
+ */
+static int _sde_core_irq_disable(struct sde_kms *sde_kms, int irq_idx)
+{
+	unsigned long irq_flags;
+	int ret = 0;
+
+	if (!sde_kms || !sde_kms->hw_intr || !sde_kms->irq_obj.enable_counts) {
+		SDE_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->irq_idx_tbl_size) {
+		SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	SDE_DEBUG("irq_idx=%d enable_count=%d\n", irq_idx,
+			atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
+
+	spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
+	SDE_EVT32(irq_idx,
+			atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
+	if (atomic_dec_return(&sde_kms->irq_obj.enable_counts[irq_idx]) == 0) {
+		ret = sde_kms->hw_intr->ops.disable_irq(
+				sde_kms->hw_intr,
+				irq_idx);
+		if (ret)
+			SDE_ERROR("Fail to disable IRQ for irq_idx:%d\n",
+					irq_idx);
+		SDE_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+	}
+	spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
+
+	return ret;
+}
+
+int sde_core_irq_disable(struct sde_kms *sde_kms, int *irq_idxs, u32 irq_count)
+{
+	int i;
+	int ret = 0;
+
+	if (!sde_kms || !irq_idxs || !irq_count) {
+		SDE_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; (i < irq_count) && !ret; i++)
+		ret = _sde_core_irq_disable(sde_kms, irq_idxs[i]);
+
+	return ret;
+}
+
+u32 sde_core_irq_read(struct sde_kms *sde_kms, int irq_idx, bool clear)
+{
+	if (!sde_kms || !sde_kms->hw_intr ||
+			!sde_kms->hw_intr->ops.get_interrupt_status)
+		return 0;
+
+	return sde_kms->hw_intr->ops.get_interrupt_status(sde_kms->hw_intr,
+			irq_idx, clear);
+}
+
+int sde_core_irq_register_callback(struct sde_kms *sde_kms, int irq_idx,
+		struct sde_irq_callback *register_irq_cb)
+{
+	unsigned long irq_flags;
+
+	if (!sde_kms || !register_irq_cb || !register_irq_cb->func ||
+			!sde_kms->irq_obj.irq_cb_tbl) {
+		SDE_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->irq_idx_tbl_size) {
+		SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	SDE_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+	spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
+	SDE_EVT32(irq_idx, register_irq_cb);
+	list_del_init(&register_irq_cb->list);
+	list_add_tail(&register_irq_cb->list,
+			&sde_kms->irq_obj.irq_cb_tbl[irq_idx]);
+	spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
+
+	return 0;
+}
+
+int sde_core_irq_unregister_callback(struct sde_kms *sde_kms, int irq_idx,
+		struct sde_irq_callback *register_irq_cb)
+{
+	unsigned long irq_flags;
+
+	if (!sde_kms || !register_irq_cb || !register_irq_cb->func ||
+			!sde_kms->irq_obj.irq_cb_tbl) {
+		SDE_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->irq_idx_tbl_size) {
+		SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	SDE_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+	spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
+	SDE_EVT32(irq_idx, register_irq_cb);
+	list_del_init(&register_irq_cb->list);
+	/* empty callback list but interrupt is still enabled */
+	if (list_empty(&sde_kms->irq_obj.irq_cb_tbl[irq_idx]) &&
+			atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]))
+		SDE_ERROR("irq_idx=%d enabled with no callback\n", irq_idx);
+	spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
+
+	return 0;
+}
+
+static void sde_clear_all_irqs(struct sde_kms *sde_kms)
+{
+	if (!sde_kms || !sde_kms->hw_intr ||
+			!sde_kms->hw_intr->ops.clear_all_irqs)
+		return;
+
+	sde_kms->hw_intr->ops.clear_all_irqs(sde_kms->hw_intr);
+}
+
+static void sde_disable_all_irqs(struct sde_kms *sde_kms)
+{
+	if (!sde_kms || !sde_kms->hw_intr ||
+			!sde_kms->hw_intr->ops.disable_all_irqs)
+		return;
+
+	sde_kms->hw_intr->ops.disable_all_irqs(sde_kms->hw_intr);
+}
+
+#ifdef CONFIG_DEBUG_FS
+#define DEFINE_SDE_DEBUGFS_SEQ_FOPS(__prefix)				\
+static int __prefix ## _open(struct inode *inode, struct file *file)	\
+{									\
+	return single_open(file, __prefix ## _show, inode->i_private);	\
+}									\
+static const struct file_operations __prefix ## _fops = {		\
+	.owner = THIS_MODULE,						\
+	.open = __prefix ## _open,					\
+	.release = single_release,					\
+	.read = seq_read,						\
+	.llseek = seq_lseek,						\
+}
+
+static int sde_debugfs_core_irq_show(struct seq_file *s, void *v)
+{
+	struct sde_irq *irq_obj = s->private;
+	struct sde_irq_callback *cb;
+	unsigned long irq_flags;
+	int i, irq_count, enable_count, cb_count;
+
+	if (!irq_obj || !irq_obj->enable_counts || !irq_obj->irq_cb_tbl) {
+		SDE_ERROR("invalid parameters\n");
+		return 0;
+	}
+
+	for (i = 0; i < irq_obj->total_irqs; i++) {
+		spin_lock_irqsave(&irq_obj->cb_lock, irq_flags);
+		cb_count = 0;
+		irq_count = atomic_read(&irq_obj->irq_counts[i]);
+		enable_count = atomic_read(&irq_obj->enable_counts[i]);
+		list_for_each_entry(cb, &irq_obj->irq_cb_tbl[i], list)
+			cb_count++;
+		spin_unlock_irqrestore(&irq_obj->cb_lock, irq_flags);
+
+		if (irq_count || enable_count || cb_count)
+			seq_printf(s, "idx:%d irq:%d enable:%d cb:%d\n",
+					i, irq_count, enable_count, cb_count);
+	}
+
+	return 0;
+}
+
+DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_core_irq);
+
+static int sde_debugfs_core_irq_init(struct sde_kms *sde_kms,
+		struct dentry *parent)
+{
+	sde_kms->irq_obj.debugfs_file = debugfs_create_file("core_irq", 0644,
+			parent, &sde_kms->irq_obj,
+			&sde_debugfs_core_irq_fops);
+
+	return 0;
+}
+
+static void sde_debugfs_core_irq_destroy(struct sde_kms *sde_kms)
+{
+	debugfs_remove(sde_kms->irq_obj.debugfs_file);
+	sde_kms->irq_obj.debugfs_file = NULL;
+}
+
+#else
+static int sde_debugfs_core_irq_init(struct sde_kms *sde_kms,
+		struct dentry *parent)
+{
+	return 0;
+}
+
+static void sde_debugfs_core_irq_destroy(struct sde_kms *sde_kms)
+{
+}
+#endif
+
+void sde_core_irq_preinstall(struct sde_kms *sde_kms)
+{
+	struct msm_drm_private *priv;
+	int i;
+
+	if (!sde_kms) {
+		SDE_ERROR("invalid sde_kms\n");
+		return;
+	} else if (!sde_kms->dev) {
+		SDE_ERROR("invalid drm device\n");
+		return;
+	} else if (!sde_kms->dev->dev_private) {
+		SDE_ERROR("invalid device private\n");
+		return;
+	}
+	priv = sde_kms->dev->dev_private;
+
+	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+	sde_clear_all_irqs(sde_kms);
+	sde_disable_all_irqs(sde_kms);
+	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+
+	spin_lock_init(&sde_kms->irq_obj.cb_lock);
+
+	/* Create irq callbacks for all possible irq_idx */
+	sde_kms->irq_obj.total_irqs = sde_kms->hw_intr->irq_idx_tbl_size;
+	sde_kms->irq_obj.irq_cb_tbl = kcalloc(sde_kms->irq_obj.total_irqs,
+			sizeof(struct list_head), GFP_KERNEL);
+	sde_kms->irq_obj.enable_counts = kcalloc(sde_kms->irq_obj.total_irqs,
+			sizeof(atomic_t), GFP_KERNEL);
+	sde_kms->irq_obj.irq_counts = kcalloc(sde_kms->irq_obj.total_irqs,
+			sizeof(atomic_t), GFP_KERNEL);
+	for (i = 0; i < sde_kms->irq_obj.total_irqs; i++) {
+		INIT_LIST_HEAD(&sde_kms->irq_obj.irq_cb_tbl[i]);
+		atomic_set(&sde_kms->irq_obj.enable_counts[i], 0);
+		atomic_set(&sde_kms->irq_obj.irq_counts[i], 0);
+	}
+
+	sde_debugfs_core_irq_init(sde_kms, sde_kms->debugfs_root);
+}
+
+int sde_core_irq_postinstall(struct sde_kms *sde_kms)
+{
+	return 0;
+}
+
+void sde_core_irq_uninstall(struct sde_kms *sde_kms)
+{
+	struct msm_drm_private *priv;
+	int i;
+
+	if (!sde_kms) {
+		SDE_ERROR("invalid sde_kms\n");
+		return;
+	} else if (!sde_kms->dev) {
+		SDE_ERROR("invalid drm device\n");
+		return;
+	} else if (!sde_kms->dev->dev_private) {
+		SDE_ERROR("invalid device private\n");
+		return;
+	}
+	priv = sde_kms->dev->dev_private;
+
+	sde_debugfs_core_irq_destroy(sde_kms);
+
+	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+	for (i = 0; i < sde_kms->irq_obj.total_irqs; i++)
+		if (atomic_read(&sde_kms->irq_obj.enable_counts[i]) ||
+				!list_empty(&sde_kms->irq_obj.irq_cb_tbl[i]))
+			SDE_ERROR("irq_idx=%d still enabled/registered\n", i);
+
+	sde_clear_all_irqs(sde_kms);
+	sde_disable_all_irqs(sde_kms);
+	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+
+	kfree(sde_kms->irq_obj.irq_cb_tbl);
+	kfree(sde_kms->irq_obj.enable_counts);
+	kfree(sde_kms->irq_obj.irq_counts);
+	sde_kms->irq_obj.irq_cb_tbl = NULL;
+	sde_kms->irq_obj.enable_counts = NULL;
+	sde_kms->irq_obj.irq_counts = NULL;
+	sde_kms->irq_obj.total_irqs = 0;
+}
+
+static void sde_hw_irq_mask(struct irq_data *irqd)
+{
+	struct sde_kms *sde_kms;
+
+	if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
+		SDE_ERROR("invalid parameters irqd %d\n", irqd != 0);
+		return;
+	}
+	sde_kms = irq_data_get_irq_chip_data(irqd);
+
+	smp_mb__before_atomic();
+	clear_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
+	smp_mb__after_atomic();
+}
+
+static void sde_hw_irq_unmask(struct irq_data *irqd)
+{
+	struct sde_kms *sde_kms;
+
+	if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
+		SDE_ERROR("invalid parameters irqd %d\n", irqd != 0);
+		return;
+	}
+	sde_kms = irq_data_get_irq_chip_data(irqd);
+
+	smp_mb__before_atomic();
+	set_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
+	smp_mb__after_atomic();
+}
+
+static struct irq_chip sde_hw_irq_chip = {
+	.name = "sde",
+	.irq_mask = sde_hw_irq_mask,
+	.irq_unmask = sde_hw_irq_unmask,
+};
+
+static int sde_hw_irqdomain_map(struct irq_domain *domain,
+		unsigned int irq, irq_hw_number_t hwirq)
+{
+	struct sde_kms *sde_kms;
+	int rc;
+
+	if (!domain || !domain->host_data) {
+		SDE_ERROR("invalid parameters domain %d\n", domain != 0);
+		return -EINVAL;
+	}
+	sde_kms = domain->host_data;
+
+	irq_set_chip_and_handler(irq, &sde_hw_irq_chip, handle_level_irq);
+	rc = irq_set_chip_data(irq, sde_kms);
+
+	return rc;
+}
+
+static struct irq_domain_ops sde_hw_irqdomain_ops = {
+	.map = sde_hw_irqdomain_map,
+	.xlate = irq_domain_xlate_onecell,
+};
+
+int sde_core_irq_domain_add(struct sde_kms *sde_kms)
+{
+	struct device *dev;
+	struct irq_domain *domain;
+
+	if (!sde_kms->dev || !sde_kms->dev->dev) {
+		pr_err("invalid device handles\n");
+		return -EINVAL;
+	}
+
+	dev = sde_kms->dev->dev;
+
+	domain = irq_domain_add_linear(dev->of_node, 32,
+			&sde_hw_irqdomain_ops, sde_kms);
+	if (!domain) {
+		pr_err("failed to add irq_domain\n");
+		return -EINVAL;
+	}
+
+	sde_kms->irq_controller.enabled_mask = 0;
+	sde_kms->irq_controller.domain = domain;
+
+	return 0;
+}
+
+int sde_core_irq_domain_fini(struct sde_kms *sde_kms)
+{
+	if (sde_kms->irq_controller.domain) {
+		irq_domain_remove(sde_kms->irq_controller.domain);
+		sde_kms->irq_controller.domain = NULL;
+	}
+	return 0;
+}
+
+irqreturn_t sde_core_irq(struct sde_kms *sde_kms)
+{
+	/*
+	 * Read interrupt status from all sources. Interrupt status are
+	 * stored within hw_intr.
+	 * Function will also clear the interrupt status after reading.
+	 * Individual interrupt status bit will only get stored if it
+	 * is enabled.
+	 */
+	sde_kms->hw_intr->ops.get_interrupt_statuses(sde_kms->hw_intr);
+
+	/*
+	 * Dispatch to HW driver to handle interrupt lookup that is being
+	 * fired. When matching interrupt is located, HW driver will call to
+	 * sde_core_irq_callback_handler with the irq_idx from the lookup table.
+	 * sde_core_irq_callback_handler will perform the registered function
+	 * callback, and do the interrupt status clearing once the registered
+	 * callback is finished.
+	 */
+	sde_kms->hw_intr->ops.dispatch_irqs(
+			sde_kms->hw_intr,
+			sde_core_irq_callback_handler,
+			sde_kms);
+
+	return IRQ_HANDLED;
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_core_irq.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_core_irq.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_core_irq.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_core_irq.h	2019-01-22 16:16:23.511246479 +0100
@@ -0,0 +1,152 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_CORE_IRQ_H__
+#define __SDE_CORE_IRQ_H__
+
+#include "sde_kms.h"
+#include "sde_hw_interrupts.h"
+
+/**
+ * sde_core_irq_preinstall - perform pre-installation of core IRQ handler
+ * @sde_kms:		SDE handle
+ * @return:		none
+ */
+void sde_core_irq_preinstall(struct sde_kms *sde_kms);
+
+/**
+ * sde_core_irq_postinstall - perform post-installation of core IRQ handler
+ * @sde_kms:		SDE handle
+ * @return:		0 if success; error code otherwise
+ */
+int sde_core_irq_postinstall(struct sde_kms *sde_kms);
+
+/**
+ * sde_core_irq_uninstall - uninstall core IRQ handler
+ * @sde_kms:		SDE handle
+ * @return:		none
+ */
+void sde_core_irq_uninstall(struct sde_kms *sde_kms);
+
+/**
+ * sde_core_irq_domain_add - Add core IRQ domain for SDE
+ * @sde_kms:		SDE handle
+ * @return:		none
+ */
+int sde_core_irq_domain_add(struct sde_kms *sde_kms);
+
+/**
+ * sde_core_irq_domain_fini - uninstall core IRQ domain
+ * @sde_kms:		SDE handle
+ * @return:		0 if success; error code otherwise
+ */
+int sde_core_irq_domain_fini(struct sde_kms *sde_kms);
+
+/**
+ * sde_core_irq - core IRQ handler
+ * @sde_kms:		SDE handle
+ * @return:		interrupt handling status
+ */
+irqreturn_t sde_core_irq(struct sde_kms *sde_kms);
+
+/**
+ * sde_core_irq_idx_lookup - IRQ helper function for lookup irq_idx from HW
+ *                      interrupt mapping table.
+ * @sde_kms:		SDE handle
+ * @intr_type:		SDE HW interrupt type for lookup
+ * @instance_idx:	SDE HW block instance defined in sde_hw_mdss.h
+ * @return:		irq_idx or -EINVAL when fail to lookup
+ */
+int sde_core_irq_idx_lookup(
+		struct sde_kms *sde_kms,
+		enum sde_intr_type intr_type,
+		uint32_t instance_idx);
+
+/**
+ * sde_core_irq_enable - IRQ helper function for enabling one or more IRQs
+ * @sde_kms:		SDE handle
+ * @irq_idxs:		Array of irq index
+ * @irq_count:		Number of irq_idx provided in the array
+ * @return:		0 for success enabling IRQ, otherwise failure
+ *
+ * This function increments count on each enable and decrements on each
+ * disable.  Interrupts is enabled if count is 0 before increment.
+ */
+int sde_core_irq_enable(
+		struct sde_kms *sde_kms,
+		int *irq_idxs,
+		uint32_t irq_count);
+
+/**
+ * sde_core_irq_disable - IRQ helper function for disabling one of more IRQs
+ * @sde_kms:		SDE handle
+ * @irq_idxs:		Array of irq index
+ * @irq_count:		Number of irq_idx provided in the array
+ * @return:		0 for success disabling IRQ, otherwise failure
+ *
+ * This function increments count on each enable and decrements on each
+ * disable.  Interrupts is disabled if count is 0 after decrement.
+ */
+int sde_core_irq_disable(
+		struct sde_kms *sde_kms,
+		int *irq_idxs,
+		uint32_t irq_count);
+
+/**
+ * sde_core_irq_read - IRQ helper function for reading IRQ status
+ * @sde_kms:		SDE handle
+ * @irq_idx:		irq index
+ * @clear:		True to clear the irq after read
+ * @return:		non-zero if irq detected; otherwise no irq detected
+ */
+u32 sde_core_irq_read(
+		struct sde_kms *sde_kms,
+		int irq_idx,
+		bool clear);
+
+/**
+ * sde_core_irq_register_callback - For registering callback function on IRQ
+ *                             interrupt
+ * @sde_kms:		SDE handle
+ * @irq_idx:		irq index
+ * @irq_cb:		IRQ callback structure, containing callback function
+ *			and argument. Passing NULL for irq_cb will unregister
+ *			the callback for the given irq_idx
+ *			This must exist until un-registration.
+ * @return:		0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
+int sde_core_irq_register_callback(
+		struct sde_kms *sde_kms,
+		int irq_idx,
+		struct sde_irq_callback *irq_cb);
+
+/**
+ * sde_core_irq_unregister_callback - For unregistering callback function on IRQ
+ *                             interrupt
+ * @sde_kms:		SDE handle
+ * @irq_idx:		irq index
+ * @irq_cb:		IRQ callback structure, containing callback function
+ *			and argument. Passing NULL for irq_cb will unregister
+ *			the callback for the given irq_idx
+ *			This must match with registration.
+ * @return:		0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
+int sde_core_irq_unregister_callback(
+		struct sde_kms *sde_kms,
+		int irq_idx,
+		struct sde_irq_callback *irq_cb);
+
+#endif /* __SDE_CORE_IRQ_H__ */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_core_perf.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_core_perf.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_core_perf.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_core_perf.c	2019-01-22 16:16:23.511246479 +0100
@@ -0,0 +1,634 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/sort.h>
+#include <linux/clk.h>
+#include <linux/bitmap.h>
+
+#include "msm_prop.h"
+
+#include "sde_kms.h"
+#include "sde_fence.h"
+#include "sde_formats.h"
+#include "sde_hw_sspp.h"
+#include "sde_trace.h"
+#include "sde_crtc.h"
+#include "sde_plane.h"
+#include "sde_encoder.h"
+#include "sde_wb.h"
+#include "sde_core_perf.h"
+#include "sde_trace.h"
+
+static struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc)
+{
+	struct msm_drm_private *priv;
+
+	if (!crtc->dev || !crtc->dev->dev_private) {
+		SDE_ERROR("invalid device\n");
+		return NULL;
+	}
+
+	priv = crtc->dev->dev_private;
+	if (!priv || !priv->kms) {
+		SDE_ERROR("invalid kms\n");
+		return NULL;
+	}
+
+	return to_sde_kms(priv->kms);
+}
+
+static bool _sde_core_perf_crtc_is_power_on(struct drm_crtc *crtc)
+{
+	return sde_crtc_is_enabled(crtc);
+}
+
+static bool _sde_core_video_mode_intf_connected(struct drm_crtc *crtc)
+{
+	struct drm_crtc *tmp_crtc;
+
+	if (!crtc)
+		return 0;
+
+	drm_for_each_crtc(tmp_crtc, crtc->dev) {
+		if ((sde_crtc_get_intf_mode(tmp_crtc) == INTF_MODE_VIDEO) &&
+				_sde_core_perf_crtc_is_power_on(tmp_crtc)) {
+			SDE_DEBUG("video interface connected crtc:%d\n",
+				tmp_crtc->base.id);
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static void _sde_core_perf_calc_crtc(struct drm_crtc *crtc,
+	struct drm_crtc_state *state,
+	struct sde_core_perf_params *perf)
+{
+	struct sde_crtc_state *sde_cstate;
+
+	if (!crtc || !state || !perf) {
+		SDE_ERROR("invalid parameters\n");
+		return;
+	}
+
+	sde_cstate = to_sde_crtc_state(state);
+	memset(perf, 0, sizeof(struct sde_core_perf_params));
+
+	perf->bw_ctl = sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
+	perf->max_per_pipe_ib =
+		sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_IB);
+	perf->core_clk_rate =
+		sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_CLK);
+
+	SDE_DEBUG("crtc=%d clk_rate=%u ib=%llu ab=%llu\n",
+			  crtc->base.id, perf->core_clk_rate,
+			  perf->max_per_pipe_ib, perf->bw_ctl);
+}
+
+int sde_core_perf_crtc_check(struct drm_crtc *crtc,
+		struct drm_crtc_state *state)
+{
+	u32 bw, threshold;
+	u64 bw_sum_of_intfs = 0;
+	bool is_video_mode;
+	struct sde_crtc_state *sde_cstate;
+	struct drm_crtc *tmp_crtc;
+	struct sde_kms *kms;
+
+	if (!crtc || !state) {
+		SDE_ERROR("invalid crtc\n");
+		return -EINVAL;
+	}
+
+	kms = _sde_crtc_get_kms(crtc);
+	if (!kms || !kms->catalog) {
+		SDE_ERROR("invalid parameters\n");
+		return 0;
+	}
+
+	/* we only need bandwidth check on real-time clients (interfaces) */
+	if (sde_crtc_is_wb(crtc))
+		return 0;
+
+	sde_cstate = to_sde_crtc_state(state);
+
+	_sde_core_perf_calc_crtc(crtc, state, &sde_cstate->new_perf);
+
+	bw_sum_of_intfs = sde_cstate->new_perf.bw_ctl;
+
+	drm_for_each_crtc(tmp_crtc, crtc->dev) {
+		if (_sde_core_perf_crtc_is_power_on(tmp_crtc) &&
+				sde_crtc_is_rt(tmp_crtc) && tmp_crtc != crtc) {
+			struct sde_crtc_state *tmp_cstate =
+					to_sde_crtc_state(tmp_crtc->state);
+
+			bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl;
+		}
+	}
+
+	/* convert bandwidth to kb */
+	bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
+	SDE_DEBUG("calculated bandwidth=%uk\n", bw);
+
+	is_video_mode = sde_crtc_get_intf_mode(crtc) == INTF_MODE_VIDEO;
+	threshold = (is_video_mode ||
+		_sde_core_video_mode_intf_connected(crtc)) ?
+		kms->catalog->perf.max_bw_low : kms->catalog->perf.max_bw_high;
+
+	SDE_DEBUG("final threshold bw limit = %d\n", threshold);
+
+	if (!threshold) {
+		SDE_ERROR("no bandwidth limits specified\n");
+		return -E2BIG;
+	} else if (bw > threshold) {
+		SDE_DEBUG("exceeds bandwidth: %ukb > %ukb\n", bw, threshold);
+		return -E2BIG;
+	}
+
+	return 0;
+}
+
+static u64 _sde_core_perf_crtc_calc_client_vote(struct sde_kms *kms,
+		struct drm_crtc *crtc, struct sde_core_perf_params *perf,
+		bool nrt_client, u32 core_clk)
+{
+	u64 bw_sum_of_intfs = 0;
+	struct drm_crtc *tmp_crtc;
+
+	drm_for_each_crtc(tmp_crtc, crtc->dev) {
+		if (_sde_core_perf_crtc_is_power_on(crtc) &&
+		    /* RealTime clients */
+		    ((!nrt_client) ||
+		    /* Non-RealTime clients */
+		    (nrt_client && sde_crtc_is_nrt(tmp_crtc)))) {
+			struct sde_crtc_state *sde_cstate =
+					to_sde_crtc_state(tmp_crtc->state);
+
+			perf->max_per_pipe_ib = max(perf->max_per_pipe_ib,
+				sde_cstate->new_perf.max_per_pipe_ib);
+
+			bw_sum_of_intfs += sde_cstate->new_perf.bw_ctl;
+
+			SDE_DEBUG("crtc=%d bw=%llu\n",
+				tmp_crtc->base.id,
+				sde_cstate->new_perf.bw_ctl);
+		}
+	}
+
+	return bw_sum_of_intfs;
+}
+
+static void _sde_core_perf_crtc_update_client_vote(struct sde_kms *kms,
+	struct sde_core_perf_params *params, bool nrt_client, u64 bw_vote)
+{
+	struct msm_drm_private *priv = kms->dev->dev_private;
+	u64 bus_ab_quota, bus_ib_quota;
+
+	bus_ab_quota = max(bw_vote, kms->perf.perf_tune.min_bus_vote);
+	bus_ib_quota = params->max_per_pipe_ib;
+
+	SDE_ATRACE_INT("bus_quota", bus_ib_quota);
+	sde_power_data_bus_set_quota(&priv->phandle, kms->core_client,
+		nrt_client ? SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT :
+				SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT,
+		bus_ab_quota, bus_ib_quota);
+	SDE_DEBUG("client:%s ab=%llu ib=%llu\n", nrt_client ? "nrt" : "rt",
+		bus_ab_quota, bus_ib_quota);
+}
+
+static void _sde_core_perf_crtc_update_bus(struct sde_kms *kms,
+		struct drm_crtc *crtc, u32 core_clk)
+{
+	u64 bw_sum_of_rt_intfs = 0, bw_sum_of_nrt_intfs = 0;
+	struct sde_core_perf_params params = {0};
+
+	SDE_ATRACE_BEGIN(__func__);
+
+	/*
+	 * non-real time client
+	 */
+	if (sde_crtc_is_nrt(crtc)) {
+		bw_sum_of_nrt_intfs = _sde_core_perf_crtc_calc_client_vote(
+				kms, crtc, &params, true, core_clk);
+		_sde_core_perf_crtc_update_client_vote(kms, &params, true,
+			bw_sum_of_nrt_intfs);
+	}
+
+	/*
+	 * real time client
+	 */
+	if (!sde_crtc_is_nrt(crtc) ||
+		sde_crtc_is_wb(crtc)) {
+		bw_sum_of_rt_intfs = _sde_core_perf_crtc_calc_client_vote(kms,
+				crtc, &params, false, core_clk);
+		_sde_core_perf_crtc_update_client_vote(kms, &params, false,
+			bw_sum_of_rt_intfs);
+	}
+
+	SDE_ATRACE_END(__func__);
+}
+
+/**
+ * @sde_core_perf_crtc_release_bw() - request zero bandwidth
+ * @crtc - pointer to a crtc
+ *
+ * Function checks a state variable for the crtc, if all pending commit
+ * requests are done, meaning no more bandwidth is needed, release
+ * bandwidth request.
+ */
+void sde_core_perf_crtc_release_bw(struct drm_crtc *crtc)
+{
+	struct drm_crtc *tmp_crtc;
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *sde_cstate;
+	struct sde_kms *kms;
+
+	if (!crtc) {
+		SDE_ERROR("invalid crtc\n");
+		return;
+	}
+
+	kms = _sde_crtc_get_kms(crtc);
+	if (!kms || !kms->catalog) {
+		SDE_ERROR("invalid kms\n");
+		return;
+	}
+
+	sde_crtc = to_sde_crtc(crtc);
+	sde_cstate = to_sde_crtc_state(crtc->state);
+
+	/* only do this for command panel or writeback */
+	if ((sde_crtc_get_intf_mode(crtc) != INTF_MODE_CMD) &&
+			(sde_crtc_get_intf_mode(crtc) != INTF_MODE_WB_LINE))
+		return;
+
+	/*
+	 * If video interface present, cmd panel bandwidth cannot be
+	 * released.
+	 */
+	if (sde_crtc_get_intf_mode(crtc) == INTF_MODE_CMD)
+		drm_for_each_crtc(tmp_crtc, crtc->dev) {
+			if (_sde_core_perf_crtc_is_power_on(tmp_crtc) &&
+				sde_crtc_get_intf_mode(tmp_crtc) ==
+						INTF_MODE_VIDEO)
+				return;
+		}
+
+	/* Release the bandwidth */
+	if (kms->perf.enable_bw_release) {
+		trace_sde_cmd_release_bw(crtc->base.id);
+		sde_crtc->cur_perf.bw_ctl = 0;
+		SDE_DEBUG("Release BW crtc=%d\n", crtc->base.id);
+		_sde_core_perf_crtc_update_bus(kms, crtc, 0);
+	}
+}
+
+static int _sde_core_select_clk_lvl(struct sde_kms *kms,
+			u32 clk_rate)
+{
+	return clk_round_rate(kms->perf.core_clk, clk_rate);
+}
+
+static u32 _sde_core_perf_get_core_clk_rate(struct sde_kms *kms,
+	struct sde_core_perf_params *crct_perf, struct drm_crtc *crtc)
+{
+	u32 clk_rate = 0;
+	struct drm_crtc *tmp_crtc;
+	struct sde_crtc_state *sde_cstate;
+	int ncrtc = 0;
+	u32 tmp_rate;
+
+	drm_for_each_crtc(tmp_crtc, kms->dev) {
+		if (_sde_core_perf_crtc_is_power_on(tmp_crtc)) {
+
+			if (crtc->base.id == tmp_crtc->base.id) {
+				/* for current CRTC, use the cached value */
+				tmp_rate = crct_perf->core_clk_rate;
+			} else {
+				sde_cstate = to_sde_crtc_state(tmp_crtc->state);
+				tmp_rate = sde_cstate->new_perf.core_clk_rate;
+			}
+
+			clk_rate = max(tmp_rate, clk_rate);
+			clk_rate = clk_round_rate(kms->perf.core_clk, clk_rate);
+		}
+		ncrtc++;
+	}
+	clk_rate = _sde_core_select_clk_lvl(kms, clk_rate);
+
+	SDE_DEBUG("clk:%u ncrtc:%d\n", clk_rate, ncrtc);
+
+	return clk_rate;
+}
+
+void sde_core_perf_crtc_update(struct drm_crtc *crtc,
+		int params_changed, bool stop_req)
+{
+	struct sde_core_perf_params *new, *old;
+	int update_bus = 0, update_clk = 0;
+	u32 clk_rate = 0;
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *sde_cstate;
+	int ret;
+	struct msm_drm_private *priv;
+	struct sde_kms *kms;
+
+	if (!crtc) {
+		SDE_ERROR("invalid crtc\n");
+		return;
+	}
+
+	kms = _sde_crtc_get_kms(crtc);
+	if (!kms || !kms->catalog) {
+		SDE_ERROR("invalid kms\n");
+		return;
+	}
+	priv = kms->dev->dev_private;
+
+	sde_crtc = to_sde_crtc(crtc);
+	sde_cstate = to_sde_crtc_state(crtc->state);
+
+	SDE_DEBUG("crtc:%d stop_req:%d core_clk:%u\n",
+			crtc->base.id, stop_req, kms->perf.core_clk_rate);
+
+	SDE_ATRACE_BEGIN(__func__);
+
+	/*
+	 * cache the performance numbers in the crtc prior to the
+	 * crtc kickoff, so the same numbers are used during the
+	 * perf update that happens post kickoff.
+	 */
+
+	if (params_changed)
+		memcpy(&sde_crtc->new_perf, &sde_cstate->new_perf,
+			   sizeof(struct sde_core_perf_params));
+
+	old = &sde_crtc->cur_perf;
+	new = &sde_crtc->new_perf;
+
+	if (_sde_core_perf_crtc_is_power_on(crtc) && !stop_req) {
+		/*
+		 * cases for bus bandwidth update.
+		 * 1. new bandwidth vote or writeback output vote
+		 *    are higher than current vote for update request.
+		 * 2. new bandwidth vote or writeback output vote are
+		 *    lower than current vote at end of commit or stop.
+		 */
+		if ((params_changed && ((new->bw_ctl > old->bw_ctl))) ||
+		    (!params_changed && ((new->bw_ctl < old->bw_ctl)))) {
+			SDE_DEBUG("crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
+				crtc->base.id, params_changed, new->bw_ctl,
+				old->bw_ctl);
+			old->bw_ctl = new->bw_ctl;
+			old->max_per_pipe_ib = new->max_per_pipe_ib;
+			update_bus = 1;
+		}
+
+		if ((params_changed &&
+				(new->core_clk_rate > old->core_clk_rate)) ||
+				(!params_changed &&
+				(new->core_clk_rate < old->core_clk_rate))) {
+			old->core_clk_rate = new->core_clk_rate;
+			update_clk = 1;
+		}
+	} else {
+		SDE_DEBUG("crtc=%d disable\n", crtc->base.id);
+		memset(old, 0, sizeof(*old));
+		memset(new, 0, sizeof(*new));
+		update_bus = 1;
+		update_clk = 1;
+	}
+
+	/*
+	 * Calculate mdp clock before bandwidth calculation. If traffic shaper
+	 * is enabled and clock increased, the bandwidth calculation can
+	 * use the new clock for the rotator bw calculation.
+	 */
+	if (update_clk)
+		clk_rate = _sde_core_perf_get_core_clk_rate(kms, old, crtc);
+
+	if (update_bus)
+		_sde_core_perf_crtc_update_bus(kms, crtc, clk_rate);
+
+	/*
+	 * Update the clock after bandwidth vote to ensure
+	 * bandwidth is available before clock rate is increased.
+	 */
+	if (update_clk) {
+		SDE_ATRACE_INT(kms->perf.clk_name, clk_rate);
+		SDE_EVT32(kms->dev, stop_req, clk_rate, params_changed,
+				  old->core_clk_rate, new->core_clk_rate);
+
+		ret = sde_power_clk_set_rate(&priv->phandle,
+				kms->perf.clk_name, clk_rate);
+		if (ret) {
+			SDE_ERROR("failed to set %s clock rate %u\n",
+					kms->perf.clk_name, clk_rate);
+			goto end;
+		}
+
+		kms->perf.core_clk_rate = clk_rate;
+		SDE_DEBUG("update clk rate = %d HZ\n", clk_rate);
+	}
+
+end:
+	SDE_ATRACE_END(__func__);
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static ssize_t _sde_core_perf_mode_write(struct file *file,
+		    const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct sde_core_perf *perf = file->private_data;
+	struct sde_perf_cfg *cfg = &perf->catalog->perf;
+	int perf_mode = 0;
+	char buf[10];
+
+	if (!perf)
+		return -ENODEV;
+
+	if (count >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+
+	buf[count] = 0;	/* end of string */
+
+	if (kstrtoint(buf, 0, &perf_mode))
+		return -EFAULT;
+
+	if (perf_mode) {
+		/* run the driver with max clk and BW vote */
+		perf->perf_tune.min_core_clk = perf->max_core_clk_rate;
+		perf->perf_tune.min_bus_vote =
+				(u64) cfg->max_bw_high * 1000;
+	} else {
+		/* reset the perf tune params to 0 */
+		perf->perf_tune.min_core_clk = 0;
+		perf->perf_tune.min_bus_vote = 0;
+	}
+	return count;
+}
+
+static ssize_t _sde_core_perf_mode_read(struct file *file,
+			char __user *buff, size_t count, loff_t *ppos)
+{
+	struct sde_core_perf *perf = file->private_data;
+	int len = 0;
+	char buf[40] = {'\0'};
+
+	if (!perf)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0;	/* the end */
+
+	len = snprintf(buf, sizeof(buf), "min_mdp_clk %lu min_bus_vote %llu\n",
+			perf->perf_tune.min_core_clk,
+			perf->perf_tune.min_bus_vote);
+	if (len < 0 || len >= sizeof(buf))
+		return 0;
+
+	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+		return -EFAULT;
+
+	*ppos += len;   /* increase offset */
+
+	return len;
+}
+
+static const struct file_operations sde_core_perf_mode_fops = {
+	.open = simple_open,
+	.read = _sde_core_perf_mode_read,
+	.write = _sde_core_perf_mode_write,
+};
+
+static void sde_debugfs_core_perf_destroy(struct sde_core_perf *perf)
+{
+	debugfs_remove_recursive(perf->debugfs_root);
+	perf->debugfs_root = NULL;
+}
+
+static int sde_debugfs_core_perf_init(struct sde_core_perf *perf,
+		struct dentry *parent)
+{
+	struct sde_mdss_cfg *catalog = perf->catalog;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+
+	priv = perf->dev->dev_private;
+	if (!priv || !priv->kms) {
+		SDE_ERROR("invalid KMS reference\n");
+		return -EINVAL;
+	}
+
+	sde_kms = to_sde_kms(priv->kms);
+
+	perf->debugfs_root = debugfs_create_dir("core_perf", parent);
+	if (!perf->debugfs_root) {
+		SDE_ERROR("failed to create core perf debugfs\n");
+		return -EINVAL;
+	}
+
+	debugfs_create_u64("max_core_clk_rate", 0644, perf->debugfs_root,
+			&perf->max_core_clk_rate);
+	debugfs_create_u32("core_clk_rate", 0644, perf->debugfs_root,
+			&perf->core_clk_rate);
+	debugfs_create_u32("enable_bw_release", 0644, perf->debugfs_root,
+			(u32 *)&perf->enable_bw_release);
+	debugfs_create_u32("threshold_low", 0644, perf->debugfs_root,
+			(u32 *)&catalog->perf.max_bw_low);
+	debugfs_create_u32("threshold_high", 0644, perf->debugfs_root,
+			(u32 *)&catalog->perf.max_bw_high);
+	debugfs_create_file("perf_mode", 0644, perf->debugfs_root,
+			(u32 *)perf, &sde_core_perf_mode_fops);
+
+	return 0;
+}
+#else
+static void sde_debugfs_core_perf_destroy(struct sde_core_perf *perf)
+{
+}
+
+static int sde_debugfs_core_perf_init(struct sde_core_perf *perf,
+		struct dentry *parent)
+{
+	return 0;
+}
+#endif
+
+void sde_core_perf_destroy(struct sde_core_perf *perf)
+{
+	if (!perf) {
+		SDE_ERROR("invalid parameters\n");
+		return;
+	}
+
+	sde_debugfs_core_perf_destroy(perf);
+	perf->max_core_clk_rate = 0;
+	perf->core_clk = NULL;
+	mutex_destroy(&perf->perf_lock);
+	perf->clk_name = NULL;
+	perf->phandle = NULL;
+	perf->catalog = NULL;
+	perf->dev = NULL;
+}
+
+int sde_core_perf_init(struct sde_core_perf *perf,
+		struct drm_device *dev,
+		struct sde_mdss_cfg *catalog,
+		struct sde_power_handle *phandle,
+		struct sde_power_client *pclient,
+		char *clk_name,
+		struct dentry *debugfs_parent)
+{
+	if (!perf || !catalog || !phandle || !pclient ||
+			!clk_name || !debugfs_parent) {
+		SDE_ERROR("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	perf->dev = dev;
+	perf->catalog = catalog;
+	perf->phandle = phandle;
+	perf->pclient = pclient;
+	perf->clk_name = clk_name;
+	mutex_init(&perf->perf_lock);
+
+	perf->core_clk = sde_power_clk_get_clk(phandle, clk_name);
+	if (!perf->core_clk) {
+		SDE_ERROR("invalid core clk\n");
+		goto err;
+	}
+
+	perf->max_core_clk_rate = sde_power_clk_get_max_rate(phandle, clk_name);
+	if (!perf->max_core_clk_rate) {
+		SDE_ERROR("invalid max core clk rate\n");
+		goto err;
+	}
+
+	sde_debugfs_core_perf_init(perf, debugfs_parent);
+
+	return 0;
+
+err:
+	sde_core_perf_destroy(perf);
+	return -ENODEV;
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_core_perf.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_core_perf.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_core_perf.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_core_perf.h	2019-01-22 16:16:23.511246479 +0100
@@ -0,0 +1,124 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_CORE_PERF_H__
+#define __SDE_CORE_PERF_H__
+
+#include <linux/types.h>
+#include <linux/dcache.h>
+#include <linux/mutex.h>
+#include <drm/drm_crtc.h>
+
+#include "sde_hw_catalog.h"
+#include "sde_power_handle.h"
+
+/**
+ * struct sde_core_perf_params - definition of performance parameters
+ * @max_per_pipe_ib: maximum instantaneous bandwidth request
+ * @bw_ctl: arbitrated bandwidth request
+ * @core_clk_rate: core clock rate request
+ */
+struct sde_core_perf_params {
+	u64 max_per_pipe_ib;
+	u64 bw_ctl;
+	u32 core_clk_rate;
+};
+
+/**
+ * struct sde_core_perf_tune - definition of performance tuning control
+ * @min_core_clk: minimum core clock
+ * @min_bus_vote: minimum bus vote
+ */
+struct sde_core_perf_tune {
+	unsigned long min_core_clk;
+	u64 min_bus_vote;
+};
+
+/**
+ * struct sde_core_perf - definition of core performance context
+ * @dev: Pointer to drm device
+ * @debugfs_root: top level debug folder
+ * @perf_lock: serialization lock for this context
+ * @catalog: Pointer to catalog configuration
+ * @phandle: Pointer to power handler
+ * @pclient: Pointer to power client
+ * @clk_name: core clock name
+ * @core_clk: Pointer to core clock structure
+ * @core_clk_rate: current core clock rate
+ * @max_core_clk_rate: maximum allowable core clock rate
+ * @perf_tune: debug control for performance tuning
+ * @enable_bw_release: debug control for bandwidth release
+ */
+struct sde_core_perf {
+	struct drm_device *dev;
+	struct dentry *debugfs_root;
+	struct mutex perf_lock;
+	struct sde_mdss_cfg *catalog;
+	struct sde_power_handle *phandle;
+	struct sde_power_client *pclient;
+	char *clk_name;
+	struct clk *core_clk;
+	u32 core_clk_rate;
+	u64 max_core_clk_rate;
+	struct sde_core_perf_tune perf_tune;
+	u32 enable_bw_release;
+};
+
+/**
+ * sde_core_perf_crtc_check - validate performance of the given crtc state
+ * @crtc: Pointer to crtc
+ * @state: Pointer to new crtc state
+ * return: zero if success, or error code otherwise
+ */
+int sde_core_perf_crtc_check(struct drm_crtc *crtc,
+		struct drm_crtc_state *state);
+
+/**
+ * sde_core_perf_crtc_update - update performance of the given crtc
+ * @crtc: Pointer to crtc
+ * @params_changed: true if crtc parameters are modified
+ * @stop_req: true if this is a stop request
+ */
+void sde_core_perf_crtc_update(struct drm_crtc *crtc,
+		int params_changed, bool stop_req);
+
+/**
+ * sde_core_perf_crtc_release_bw - release bandwidth of the given crtc
+ * @crtc: Pointer to crtc
+ */
+void sde_core_perf_crtc_release_bw(struct drm_crtc *crtc);
+
+/**
+ * sde_core_perf_destroy - destroy the given core performance context
+ * @perf: Pointer to core performance context
+ */
+void sde_core_perf_destroy(struct sde_core_perf *perf);
+
+/**
+ * sde_core_perf_init - initialize the given core performance context
+ * @perf: Pointer to core performance context
+ * @dev: Pointer to drm device
+ * @catalog: Pointer to catalog
+ * @phandle: Pointer to power handle
+ * @pclient: Pointer to power client
+ * @clk_name: core clock name
+ * @debugfs_parent: Pointer to parent debugfs
+ */
+int sde_core_perf_init(struct sde_core_perf *perf,
+		struct drm_device *dev,
+		struct sde_mdss_cfg *catalog,
+		struct sde_power_handle *phandle,
+		struct sde_power_client *pclient,
+		char *clk_name,
+		struct dentry *debugfs_parent);
+
+#endif /* __SDE_CORE_PERF_H__ */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_crtc.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_crtc.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_crtc.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_crtc.c	2019-10-29 09:26:23.637203119 +0100
@@ -0,0 +1,2032 @@
+/*
+ * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/sort.h>
+#include <linux/debugfs.h>
+#include <linux/ktime.h>
+#include <uapi/drm/sde_drm.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_flip_work.h>
+
+#include "sde_kms.h"
+#include "sde_hw_lm.h"
+#include "sde_hw_ctl.h"
+#include "sde_crtc.h"
+#include "sde_plane.h"
+#include "sde_color_processing.h"
+#include "sde_encoder.h"
+#include "sde_connector.h"
+#include "sde_power_handle.h"
+#include "sde_core_perf.h"
+#include "sde_trace.h"
+
+/* default input fence timeout, in ms */
+#define SDE_CRTC_INPUT_FENCE_TIMEOUT    2000
+
+/*
+ * The default input fence timeout is 2 seconds while max allowed
+ * range is 10 seconds. Any value above 10 seconds adds glitches beyond
+ * tolerance limit.
+ */
+#define SDE_CRTC_MAX_INPUT_FENCE_TIMEOUT 10000
+
+/* layer mixer index on sde_crtc */
+#define LEFT_MIXER 0
+#define RIGHT_MIXER 1
+
+/* indicates pending page flip events */
+#define PENDING_FLIP   0x2
+
+static inline struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc)
+{
+	struct msm_drm_private *priv;
+
+	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+		SDE_ERROR("invalid crtc\n");
+		return NULL;
+	}
+	priv = crtc->dev->dev_private;
+	if (!priv || !priv->kms) {
+		SDE_ERROR("invalid kms\n");
+		return NULL;
+	}
+
+	return to_sde_kms(priv->kms);
+}
+
+static void sde_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+
+	SDE_DEBUG("\n");
+
+	if (!crtc)
+		return;
+
+	if (sde_crtc->blob_info)
+		drm_property_unreference_blob(sde_crtc->blob_info);
+	msm_property_destroy(&sde_crtc->property_info);
+	sde_cp_crtc_destroy_properties(crtc);
+
+	debugfs_remove_recursive(sde_crtc->debugfs_root);
+	sde_fence_deinit(&sde_crtc->output_fence);
+
+	drm_crtc_cleanup(crtc);
+	mutex_destroy(&sde_crtc->crtc_lock);
+	kfree(sde_crtc);
+}
+
+static bool sde_crtc_mode_fixup(struct drm_crtc *crtc,
+		const struct drm_display_mode *mode,
+		struct drm_display_mode *adjusted_mode)
+{
+	SDE_DEBUG("\n");
+
+	if (msm_is_mode_seamless(adjusted_mode) &&
+		(!crtc->enabled || crtc->state->active_changed)) {
+		SDE_ERROR("crtc state prevents seamless transition\n");
+		return false;
+	}
+
+	return true;
+}
+
+static void _sde_crtc_setup_blend_cfg(struct sde_crtc_mixer *mixer,
+	struct sde_plane_state *pstate, struct sde_format *format)
+{
+	uint32_t blend_op, fg_alpha, bg_alpha;
+	uint32_t blend_type;
+	struct sde_hw_mixer *lm = mixer->hw_lm;
+
+	/* default to opaque blending */
+	fg_alpha = sde_plane_get_property(pstate, PLANE_PROP_ALPHA);
+	bg_alpha = 0xFF - fg_alpha;
+	blend_op = SDE_BLEND_FG_ALPHA_FG_CONST | SDE_BLEND_BG_ALPHA_BG_CONST;
+	blend_type = sde_plane_get_property(pstate, PLANE_PROP_BLEND_OP);
+
+	SDE_DEBUG("blend type:0x%x blend alpha:0x%x\n", blend_type, fg_alpha);
+
+	switch (blend_type) {
+
+	case SDE_DRM_BLEND_OP_OPAQUE:
+		blend_op = SDE_BLEND_FG_ALPHA_FG_CONST |
+			SDE_BLEND_BG_ALPHA_BG_CONST;
+		break;
+
+	case SDE_DRM_BLEND_OP_PREMULTIPLIED:
+		if (format->alpha_enable) {
+			blend_op = SDE_BLEND_FG_ALPHA_FG_CONST |
+				SDE_BLEND_BG_ALPHA_FG_PIXEL;
+			if (fg_alpha != 0xff) {
+				bg_alpha = fg_alpha;
+				blend_op |= SDE_BLEND_BG_MOD_ALPHA |
+					SDE_BLEND_BG_INV_MOD_ALPHA;
+			} else {
+				blend_op |= SDE_BLEND_BG_INV_ALPHA;
+			}
+		}
+		break;
+
+	case SDE_DRM_BLEND_OP_COVERAGE:
+		if (format->alpha_enable) {
+			blend_op = SDE_BLEND_FG_ALPHA_FG_PIXEL |
+				SDE_BLEND_BG_ALPHA_FG_PIXEL;
+			if (fg_alpha != 0xff) {
+				bg_alpha = fg_alpha;
+				blend_op |= SDE_BLEND_FG_MOD_ALPHA |
+					SDE_BLEND_FG_INV_MOD_ALPHA |
+					SDE_BLEND_BG_MOD_ALPHA |
+					SDE_BLEND_BG_INV_MOD_ALPHA;
+			} else {
+				blend_op |= SDE_BLEND_BG_INV_ALPHA;
+			}
+		}
+		break;
+	default:
+		/* do nothing */
+		break;
+	}
+
+	lm->ops.setup_blend_config(lm, pstate->stage, fg_alpha,
+						bg_alpha, blend_op);
+	SDE_DEBUG("format 0x%x, alpha_enable %u fg alpha:0x%x bg alpha:0x%x \"\
+		 blend_op:0x%x\n", format->base.pixel_format,
+		format->alpha_enable, fg_alpha, bg_alpha, blend_op);
+}
+
+static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
+	struct sde_crtc *sde_crtc, struct sde_crtc_mixer *mixer)
+{
+	struct drm_plane *plane;
+
+	struct sde_plane_state *pstate = NULL;
+	struct sde_format *format;
+	struct sde_hw_ctl *ctl = mixer->hw_ctl;
+	struct sde_hw_stage_cfg *stage_cfg = &sde_crtc->stage_cfg;
+
+	u32 flush_mask = 0, crtc_split_width;
+	uint32_t lm_idx = LEFT_MIXER, idx;
+	bool bg_alpha_enable[CRTC_DUAL_MIXERS] = {false};
+	bool lm_right = false;
+	int left_crtc_zpos_cnt[SDE_STAGE_MAX + 1] = {0};
+	int right_crtc_zpos_cnt[SDE_STAGE_MAX + 1] = {0};
+
+	crtc_split_width = get_crtc_split_width(crtc);
+
+	drm_atomic_crtc_for_each_plane(plane, crtc) {
+
+		pstate = to_sde_plane_state(plane->state);
+
+		/* always stage plane on either left or right lm */
+		if (plane->state->crtc_x >= crtc_split_width) {
+			lm_idx = RIGHT_MIXER;
+			idx = right_crtc_zpos_cnt[pstate->stage]++;
+		} else {
+			lm_idx = LEFT_MIXER;
+			idx = left_crtc_zpos_cnt[pstate->stage]++;
+		}
+
+		/* stage plane on right LM if it crosses the boundary */
+		lm_right = (lm_idx == LEFT_MIXER) &&
+		   (plane->state->crtc_x + plane->state->crtc_w >
+							crtc_split_width);
+
+		/*
+		 * program each mixer with two hw pipes in dual mixer mode,
+		 */
+		if (sde_crtc->num_mixers == CRTC_DUAL_MIXERS && lm_right) {
+			stage_cfg->stage[LEFT_MIXER][pstate->stage][1] =
+				sde_plane_pipe(plane, 1);
+
+			flush_mask = ctl->ops.get_bitmask_sspp(ctl,
+				sde_plane_pipe(plane, 1));
+		}
+
+		flush_mask |= ctl->ops.get_bitmask_sspp(ctl,
+				sde_plane_pipe(plane, lm_idx ? 1 : 0));
+
+
+
+		stage_cfg->stage[lm_idx][pstate->stage][idx] =
+					sde_plane_pipe(plane, lm_idx ? 1 : 0);
+
+		mixer[lm_idx].flush_mask |= flush_mask;
+
+		SDE_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
+				crtc->base.id,
+				pstate->stage,
+				plane->base.id,
+				sde_plane_pipe(plane,
+					lm_idx ? 1 : 0) - SSPP_VIG0,
+				plane->state->fb ?
+				plane->state->fb->base.id : -1);
+
+		format = to_sde_format(msm_framebuffer_format(pstate->base.fb));
+
+		/* blend config update */
+		if (pstate->stage != SDE_STAGE_BASE) {
+			_sde_crtc_setup_blend_cfg(mixer + lm_idx, pstate,
+								format);
+
+			if (bg_alpha_enable[lm_idx] && !format->alpha_enable)
+				mixer[lm_idx].mixer_op_mode = 0;
+			else
+				mixer[lm_idx].mixer_op_mode |=
+					1 << pstate->stage;
+		} else if (format->alpha_enable) {
+			bg_alpha_enable[lm_idx] = true;
+		}
+
+		if (lm_right) {
+			idx = right_crtc_zpos_cnt[pstate->stage]++;
+
+			/*
+			 * program each mixer with two hw pipes
+			   in dual mixer mode,
+			 */
+			if (sde_crtc->num_mixers == CRTC_DUAL_MIXERS) {
+				stage_cfg->stage[RIGHT_MIXER][pstate->stage][1]
+					= sde_plane_pipe(plane, 0);
+			}
+
+			stage_cfg->stage[RIGHT_MIXER][pstate->stage][idx]
+				= sde_plane_pipe(plane, 1);
+
+			mixer[RIGHT_MIXER].flush_mask |= flush_mask;
+
+			/* blend config update */
+			if (pstate->stage != SDE_STAGE_BASE) {
+				_sde_crtc_setup_blend_cfg(mixer + RIGHT_MIXER,
+							pstate, format);
+
+				if (bg_alpha_enable[RIGHT_MIXER] &&
+						!format->alpha_enable)
+					mixer[RIGHT_MIXER].mixer_op_mode = 0;
+				else
+					mixer[RIGHT_MIXER].mixer_op_mode |=
+						1 << pstate->stage;
+			} else if (format->alpha_enable) {
+				bg_alpha_enable[RIGHT_MIXER] = true;
+			}
+		}
+	}
+}
+
+/**
+ * _sde_crtc_blend_setup - configure crtc mixers
+ * @crtc: Pointer to drm crtc structure
+ */
+static void _sde_crtc_blend_setup(struct drm_crtc *crtc)
+{
+	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+	struct sde_crtc_mixer *mixer = sde_crtc->mixers;
+	struct sde_hw_ctl *ctl;
+	struct sde_hw_mixer *lm;
+
+	int i;
+
+	SDE_DEBUG("%s\n", sde_crtc->name);
+
+	if (sde_crtc->num_mixers > CRTC_DUAL_MIXERS) {
+		SDE_ERROR("invalid number mixers: %d\n", sde_crtc->num_mixers);
+		return;
+	}
+
+	for (i = 0; i < sde_crtc->num_mixers; i++) {
+		if (!mixer[i].hw_lm || !mixer[i].hw_ctl) {
+			SDE_ERROR("invalid lm or ctl assigned to mixer\n");
+			return;
+		}
+		mixer[i].mixer_op_mode = 0;
+		mixer[i].flush_mask = 0;
+		if (mixer[i].hw_ctl->ops.clear_all_blendstages)
+			mixer[i].hw_ctl->ops.clear_all_blendstages(
+					mixer[i].hw_ctl);
+	}
+
+	/* initialize stage cfg */
+	memset(&sde_crtc->stage_cfg, 0, sizeof(struct sde_hw_stage_cfg));
+
+	_sde_crtc_blend_setup_mixer(crtc, sde_crtc, mixer);
+
+	for (i = 0; i < sde_crtc->num_mixers; i++) {
+		ctl = mixer[i].hw_ctl;
+		lm = mixer[i].hw_lm;
+
+		lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
+
+		mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
+			mixer[i].hw_lm->idx);
+
+		/* stage config flush mask */
+		ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
+
+		SDE_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
+			mixer[i].hw_lm->idx - LM_0,
+			mixer[i].mixer_op_mode,
+			ctl->idx - CTL_0,
+			mixer[i].flush_mask);
+
+		ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
+			&sde_crtc->stage_cfg, i);
+	}
+}
+
+void sde_crtc_prepare_commit(struct drm_crtc *crtc,
+		struct drm_crtc_state *old_state)
+{
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *cstate;
+	struct drm_connector *conn;
+
+	if (!crtc || !crtc->state) {
+		SDE_ERROR("invalid crtc\n");
+		return;
+	}
+
+	sde_crtc = to_sde_crtc(crtc);
+	cstate = to_sde_crtc_state(crtc->state);
+	SDE_EVT32(DRMID(crtc));
+
+	/* identify connectors attached to this crtc */
+	cstate->is_rt = false;
+	cstate->num_connectors = 0;
+
+	drm_for_each_connector(conn, crtc->dev)
+		if (conn->state && conn->state->crtc == crtc &&
+				cstate->num_connectors < MAX_CONNECTORS) {
+			cstate->connectors[cstate->num_connectors++] = conn;
+			sde_connector_prepare_fence(conn);
+
+			if (conn->connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
+				cstate->is_rt = true;
+		}
+
+	/* prepare main output fence */
+	sde_fence_prepare(&sde_crtc->output_fence);
+}
+
+bool sde_crtc_is_rt(struct drm_crtc *crtc)
+{
+	if (!crtc || !crtc->state) {
+		SDE_ERROR("invalid crtc or state\n");
+		return true;
+	}
+	return to_sde_crtc_state(crtc->state)->is_rt;
+}
+
+/**
+ *  _sde_crtc_complete_flip - signal pending page_flip events
+ * Any pending vblank events are added to the vblank_event_list
+ * so that the next vblank interrupt shall signal them.
+ * However PAGE_FLIP events are not handled through the vblank_event_list.
+ * This API signals any pending PAGE_FLIP events requested through
+ * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the sde_crtc->event.
+ * if file!=NULL, this is preclose potential cancel-flip path
+ * @crtc: Pointer to drm crtc structure
+ * @file: Pointer to drm file
+ */
+static void _sde_crtc_complete_flip(struct drm_crtc *crtc,
+		struct drm_file *file)
+{
+	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct drm_pending_vblank_event *event;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	event = sde_crtc->event;
+	if (event) {
+		/* if regular vblank case (!file) or if cancel-flip from
+		 * preclose on file that requested flip, then send the
+		 * event:
+		 */
+		if (!file || (event->base.file_priv == file)) {
+			sde_crtc->event = NULL;
+			DRM_DEBUG_VBL("%s: send event: %pK\n",
+						sde_crtc->name, event);
+			SDE_EVT32(DRMID(crtc));
+			drm_crtc_send_vblank_event(crtc, event);
+		}
+	}
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+enum sde_intf_mode sde_crtc_get_intf_mode(struct drm_crtc *crtc)
+{
+	struct drm_encoder *encoder;
+
+	if (!crtc || !crtc->dev) {
+		SDE_ERROR("invalid crtc\n");
+		return INTF_MODE_NONE;
+	}
+
+	drm_for_each_encoder(encoder, crtc->dev)
+		if (encoder->crtc == crtc)
+			return sde_encoder_get_intf_mode(encoder);
+
+	return INTF_MODE_NONE;
+}
+
+static void sde_crtc_vblank_cb(void *data)
+{
+	struct drm_crtc *crtc = (struct drm_crtc *)data;
+	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+	unsigned pending;
+
+	pending = atomic_xchg(&sde_crtc->pending, 0);
+	/* keep statistics on vblank callback - with auto reset via debugfs */
+	if (ktime_equal(sde_crtc->vblank_cb_time, ktime_set(0, 0)))
+		sde_crtc->vblank_cb_time = ktime_get();
+	else
+		sde_crtc->vblank_cb_count++;
+
+	if (pending & PENDING_FLIP)
+		_sde_crtc_complete_flip(crtc, NULL);
+
+	drm_crtc_handle_vblank(crtc);
+	DRM_DEBUG_VBL("crtc%d\n", crtc->base.id);
+	SDE_EVT32_IRQ(DRMID(crtc));
+}
+
+static void sde_crtc_frame_event_work(struct kthread_work *work)
+{
+	struct msm_drm_private *priv;
+	struct sde_crtc_frame_event *fevent;
+	struct drm_crtc *crtc;
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *cstate;
+	struct sde_kms *sde_kms;
+	unsigned long flags;
+
+	if (!work) {
+		SDE_ERROR("invalid work handle\n");
+		return;
+	}
+
+	fevent = container_of(work, struct sde_crtc_frame_event, work);
+	if (!fevent->crtc || !fevent->crtc->state) {
+		SDE_ERROR("invalid crtc\n");
+		return;
+	}
+
+	crtc = fevent->crtc;
+	sde_crtc = to_sde_crtc(crtc);
+	cstate = to_sde_crtc_state(crtc->state);
+
+	sde_kms = _sde_crtc_get_kms(crtc);
+	if (!sde_kms) {
+		SDE_ERROR("invalid kms handle\n");
+		return;
+	}
+	priv = sde_kms->dev->dev_private;
+
+	SDE_DEBUG("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
+			ktime_to_ns(fevent->ts));
+
+	if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE ||
+			fevent->event == SDE_ENCODER_FRAME_EVENT_ERROR) {
+
+		if (atomic_read(&sde_crtc->frame_pending) < 1) {
+			/* this should not happen */
+			SDE_ERROR("crtc%d ts:%lld invalid frame_pending:%d\n",
+					crtc->base.id,
+					ktime_to_ns(fevent->ts),
+					atomic_read(&sde_crtc->frame_pending));
+			SDE_EVT32(DRMID(crtc), fevent->event, 0);
+		} else if (atomic_dec_return(&sde_crtc->frame_pending) == 0) {
+			/* release bandwidth and other resources */
+			SDE_DEBUG("crtc%d ts:%lld last pending\n",
+					crtc->base.id,
+					ktime_to_ns(fevent->ts));
+			SDE_EVT32(DRMID(crtc), fevent->event, 1);
+			sde_power_data_bus_bandwidth_ctrl(&priv->phandle,
+					sde_kms->core_client, false);
+			sde_core_perf_crtc_release_bw(crtc);
+		} else {
+			SDE_EVT32(DRMID(crtc), fevent->event, 2);
+		}
+
+		if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE)
+			sde_core_perf_crtc_update(crtc, 0, false);
+	} else {
+		SDE_ERROR("crtc%d ts:%lld unknown event %u\n", crtc->base.id,
+				ktime_to_ns(fevent->ts),
+				fevent->event);
+		SDE_EVT32(DRMID(crtc), fevent->event, 3);
+	}
+
+	spin_lock_irqsave(&sde_crtc->spin_lock, flags);
+	list_add_tail(&fevent->list, &sde_crtc->frame_event_list);
+	spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
+}
+
+static void sde_crtc_frame_event_cb(void *data, u32 event)
+{
+	struct drm_crtc *crtc = (struct drm_crtc *)data;
+	struct sde_crtc *sde_crtc;
+	struct msm_drm_private *priv;
+	struct sde_crtc_frame_event *fevent;
+	unsigned long flags;
+	int pipe_id;
+
+	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+		SDE_ERROR("invalid parameters\n");
+		return;
+	}
+	sde_crtc = to_sde_crtc(crtc);
+	priv = crtc->dev->dev_private;
+	pipe_id = drm_crtc_index(crtc);
+
+	SDE_DEBUG("crtc%d\n", crtc->base.id);
+
+	SDE_EVT32(DRMID(crtc), event);
+
+	spin_lock_irqsave(&sde_crtc->spin_lock, flags);
+	fevent = list_first_entry_or_null(&sde_crtc->frame_event_list,
+			struct sde_crtc_frame_event, list);
+	if (fevent)
+		list_del_init(&fevent->list);
+	spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
+
+	if (!fevent) {
+		SDE_ERROR("crtc%d event %d overflow\n",
+				crtc->base.id, event);
+		SDE_EVT32(DRMID(crtc), event);
+		return;
+	}
+
+	fevent->event = event;
+	fevent->crtc = crtc;
+	fevent->ts = ktime_get();
+	queue_kthread_work(&priv->disp_thread[pipe_id].worker, &fevent->work);
+}
+
+/**
+ *  sde_crtc_request_flip_cb - callback to request page_flip events
+ * Once the HW flush is complete , userspace must be notified of
+ * PAGE_FLIP completed event in the next vblank event.
+ * Using this callback, a hint is set to signal any callers waiting
+ * for a PAGE_FLIP complete event.
+ * This is called within the enc_spinlock.
+ * @data: Pointer to cb private data
+ */
+static void sde_crtc_request_flip_cb(void *data)
+{
+	struct drm_crtc *crtc = (struct drm_crtc *)data;
+	struct sde_crtc *sde_crtc;
+
+	if (!crtc) {
+		SDE_ERROR("invalid parameters\n");
+		return;
+	}
+	sde_crtc = to_sde_crtc(crtc);
+	atomic_or(PENDING_FLIP, &sde_crtc->pending);
+}
+
+void sde_crtc_complete_commit(struct drm_crtc *crtc,
+		struct drm_crtc_state *old_state)
+{
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *cstate;
+	int i;
+
+	if (!crtc || !crtc->state) {
+		SDE_ERROR("invalid crtc\n");
+		return;
+	}
+
+	sde_crtc = to_sde_crtc(crtc);
+	cstate = to_sde_crtc_state(crtc->state);
+	SDE_EVT32(DRMID(crtc));
+
+	/* signal output fence(s) at end of commit */
+	sde_fence_signal(&sde_crtc->output_fence, 0);
+
+	for (i = 0; i < cstate->num_connectors; ++i)
+		sde_connector_complete_commit(cstate->connectors[i]);
+}
+
+/**
+ * _sde_crtc_set_input_fence_timeout - update ns version of in fence timeout
+ * @cstate: Pointer to sde crtc state
+ */
+static void _sde_crtc_set_input_fence_timeout(struct sde_crtc_state *cstate)
+{
+	if (!cstate) {
+		SDE_ERROR("invalid cstate\n");
+		return;
+	}
+	cstate->input_fence_timeout_ns =
+		sde_crtc_get_property(cstate, CRTC_PROP_INPUT_FENCE_TIMEOUT);
+	cstate->input_fence_timeout_ns *= NSEC_PER_MSEC;
+}
+
+/**
+ * _sde_crtc_wait_for_fences - wait for incoming framebuffer sync fences
+ * @crtc: Pointer to CRTC object
+ */
+static void _sde_crtc_wait_for_fences(struct drm_crtc *crtc)
+{
+	struct drm_plane *plane = NULL;
+	uint32_t wait_ms = 1;
+	ktime_t kt_end, kt_wait;
+
+	SDE_DEBUG("\n");
+
+	if (!crtc || !crtc->state) {
+		SDE_ERROR("invalid crtc/state %pK\n", crtc);
+		return;
+	}
+
+	/* use monotonic timer to limit total fence wait time */
+	kt_end = ktime_add_ns(ktime_get(),
+		to_sde_crtc_state(crtc->state)->input_fence_timeout_ns);
+
+	/*
+	 * Wait for fences sequentially, as all of them need to be signalled
+	 * before we can proceed.
+	 *
+	 * Limit total wait time to INPUT_FENCE_TIMEOUT, but still call
+	 * sde_plane_wait_input_fence with wait_ms == 0 after the timeout so
+	 * that each plane can check its fence status and react appropriately
+	 * if its fence has timed out.
+	 */
+	SDE_ATRACE_BEGIN("plane_wait_input_fence");
+	drm_atomic_crtc_for_each_plane(plane, crtc) {
+		if (wait_ms) {
+			/* determine updated wait time */
+			kt_wait = ktime_sub(kt_end, ktime_get());
+			if (ktime_compare(kt_wait, ktime_set(0, 0)) >= 0)
+				wait_ms = ktime_to_ms(kt_wait);
+			else
+				wait_ms = 0;
+		}
+		sde_plane_wait_input_fence(plane, wait_ms);
+	}
+	SDE_ATRACE_END("plane_wait_input_fence");
+}
+
+static void _sde_crtc_setup_mixer_for_encoder(
+		struct drm_crtc *crtc,
+		struct drm_encoder *enc)
+{
+	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+	struct sde_kms *sde_kms = _sde_crtc_get_kms(crtc);
+	struct sde_rm *rm = &sde_kms->rm;
+	struct sde_crtc_mixer *mixer;
+	struct sde_hw_ctl *last_valid_ctl = NULL;
+	int i;
+	struct sde_rm_hw_iter lm_iter, ctl_iter, dspp_iter;
+
+	sde_rm_init_hw_iter(&lm_iter, enc->base.id, SDE_HW_BLK_LM);
+	sde_rm_init_hw_iter(&ctl_iter, enc->base.id, SDE_HW_BLK_CTL);
+	sde_rm_init_hw_iter(&dspp_iter, enc->base.id, SDE_HW_BLK_DSPP);
+
+	/* Set up all the mixers and ctls reserved by this encoder */
+	for (i = sde_crtc->num_mixers; i < ARRAY_SIZE(sde_crtc->mixers); i++) {
+		mixer = &sde_crtc->mixers[i];
+
+		if (!sde_rm_get_hw(rm, &lm_iter))
+			break;
+		mixer->hw_lm = (struct sde_hw_mixer *)lm_iter.hw;
+
+		/* CTL may be <= LMs, if <, multiple LMs controlled by 1 CTL */
+		if (!sde_rm_get_hw(rm, &ctl_iter)) {
+			SDE_DEBUG("no ctl assigned to lm %d, using previous\n",
+					mixer->hw_lm->idx - LM_0);
+			mixer->hw_ctl = last_valid_ctl;
+		} else {
+			mixer->hw_ctl = (struct sde_hw_ctl *)ctl_iter.hw;
+			last_valid_ctl = mixer->hw_ctl;
+		}
+
+		/* Shouldn't happen, mixers are always >= ctls */
+		if (!mixer->hw_ctl) {
+			SDE_ERROR("no valid ctls found for lm %d\n",
+					mixer->hw_lm->idx - LM_0);
+			return;
+		}
+
+		/* Dspp may be null */
+		(void) sde_rm_get_hw(rm, &dspp_iter);
+		mixer->hw_dspp = (struct sde_hw_dspp *)dspp_iter.hw;
+
+		mixer->encoder = enc;
+
+		sde_crtc->num_mixers++;
+		SDE_DEBUG("setup mixer %d: lm %d\n",
+				i, mixer->hw_lm->idx - LM_0);
+		SDE_DEBUG("setup mixer %d: ctl %d\n",
+				i, mixer->hw_ctl->idx - CTL_0);
+	}
+}
+
+static void _sde_crtc_setup_mixers(struct drm_crtc *crtc)
+{
+	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+	struct drm_encoder *enc;
+
+	sde_crtc->num_mixers = 0;
+	memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers));
+
+	mutex_lock(&sde_crtc->crtc_lock);
+	/* Check for mixers on all encoders attached to this crtc */
+	list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
+		if (enc->crtc != crtc)
+			continue;
+
+		_sde_crtc_setup_mixer_for_encoder(crtc, enc);
+	}
+	mutex_unlock(&sde_crtc->crtc_lock);
+}
+
+static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
+		struct drm_crtc_state *old_state)
+{
+	struct sde_crtc *sde_crtc;
+	struct drm_device *dev;
+	u32 i;
+
+	if (!crtc) {
+		SDE_ERROR("invalid crtc\n");
+		return;
+	}
+
+	if (!crtc->state->enable) {
+		SDE_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
+				crtc->base.id, crtc->state->enable);
+		return;
+	}
+
+	SDE_DEBUG("crtc%d\n", crtc->base.id);
+
+	sde_crtc = to_sde_crtc(crtc);
+	dev = crtc->dev;
+
+	if (!sde_crtc->num_mixers)
+		_sde_crtc_setup_mixers(crtc);
+
+	/* Reset flush mask from previous commit */
+	for (i = 0; i < ARRAY_SIZE(sde_crtc->mixers); i++) {
+		struct sde_hw_ctl *ctl = sde_crtc->mixers[i].hw_ctl;
+
+		if (ctl)
+			ctl->ops.clear_pending_flush(ctl);
+	}
+
+	/*
+	 * If no mixers have been allocated in sde_crtc_atomic_check(),
+	 * it means we are trying to flush a CRTC whose state is disabled:
+	 * nothing else needs to be done.
+	 */
+	if (unlikely(!sde_crtc->num_mixers))
+		return;
+
+	_sde_crtc_blend_setup(crtc);
+	sde_cp_crtc_apply_properties(crtc);
+
+	/*
+	 * PP_DONE irq is only used by command mode for now.
+	 * It is better to request pending before FLUSH and START trigger
+	 * to make sure no pp_done irq missed.
+	 * This is safe because no pp_done will happen before SW trigger
+	 * in command mode.
+	 */
+}
+
+static void sde_crtc_atomic_flush(struct drm_crtc *crtc,
+		struct drm_crtc_state *old_crtc_state)
+{
+	struct sde_crtc *sde_crtc;
+	struct drm_device *dev;
+	struct drm_plane *plane;
+	unsigned long flags;
+
+	if (!crtc) {
+		SDE_ERROR("invalid crtc\n");
+		return;
+	}
+
+	if (!crtc->state->enable) {
+		SDE_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
+				crtc->base.id, crtc->state->enable);
+		return;
+	}
+
+	SDE_DEBUG("crtc%d\n", crtc->base.id);
+
+	sde_crtc = to_sde_crtc(crtc);
+
+	dev = crtc->dev;
+
+	if (sde_crtc->event) {
+		SDE_ERROR("%s already received sde_crtc->event\n",
+				  sde_crtc->name);
+	} else {
+		spin_lock_irqsave(&dev->event_lock, flags);
+		sde_crtc->event = crtc->state->event;
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+	}
+
+	/*
+	 * If no mixers has been allocated in sde_crtc_atomic_check(),
+	 * it means we are trying to flush a CRTC whose state is disabled:
+	 * nothing else needs to be done.
+	 */
+	if (unlikely(!sde_crtc->num_mixers))
+		return;
+
+	/* wait for acquire fences before anything else is done */
+	_sde_crtc_wait_for_fences(crtc);
+
+	/* update performance setting before crtc kickoff */
+	sde_core_perf_crtc_update(crtc, 1, false);
+
+	/*
+	 * Final plane updates: Give each plane a chance to complete all
+	 *                      required writes/flushing before crtc's "flush
+	 *                      everything" call below.
+	 */
+	drm_atomic_crtc_for_each_plane(plane, crtc)
+		sde_plane_flush(plane);
+
+	/* Kickoff will be scheduled by outer layer */
+}
+
+/**
+ * sde_crtc_destroy_state - state destroy hook
+ * @crtc: drm CRTC
+ * @state: CRTC state object to release
+ */
+static void sde_crtc_destroy_state(struct drm_crtc *crtc,
+		struct drm_crtc_state *state)
+{
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *cstate;
+
+	if (!crtc || !state) {
+		SDE_ERROR("invalid argument(s)\n");
+		return;
+	}
+
+	sde_crtc = to_sde_crtc(crtc);
+	cstate = to_sde_crtc_state(state);
+
+	SDE_DEBUG("crtc%d\n", crtc->base.id);
+
+	__drm_atomic_helper_crtc_destroy_state(crtc, state);
+
+	/* destroy value helper */
+	msm_property_destroy_state(&sde_crtc->property_info, cstate,
+			cstate->property_values, cstate->property_blobs);
+}
+
+void sde_crtc_commit_kickoff(struct drm_crtc *crtc)
+{
+	struct drm_encoder *encoder;
+	struct drm_device *dev;
+	struct sde_crtc *sde_crtc;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+
+	if (!crtc) {
+		SDE_ERROR("invalid argument\n");
+		return;
+	}
+	dev = crtc->dev;
+	sde_crtc = to_sde_crtc(crtc);
+	sde_kms = _sde_crtc_get_kms(crtc);
+	priv = sde_kms->dev->dev_private;
+
+	/*
+	 * If no mixers has been allocated in sde_crtc_atomic_check(),
+	 * it means we are trying to start a CRTC whose state is disabled:
+	 * nothing else needs to be done.
+	 */
+	if (unlikely(!sde_crtc->num_mixers))
+		return;
+
+
+	SDE_ATRACE_BEGIN("crtc_commit");
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc != crtc)
+			continue;
+
+		/*
+		 * Encoder will flush/start now, unless it has a tx pending.
+		 * If so, it may delay and flush at an irq event (e.g. ppdone)
+		 */
+		sde_encoder_prepare_for_kickoff(encoder);
+	}
+
+	if (atomic_read(&sde_crtc->frame_pending) > 2) {
+		/* framework allows only 1 outstanding + current */
+		SDE_ERROR("crtc%d invalid frame pending\n",
+				crtc->base.id);
+		SDE_EVT32(DRMID(crtc), 0);
+		goto end;
+	} else if (atomic_inc_return(&sde_crtc->frame_pending) == 1) {
+		/* acquire bandwidth and other resources */
+		SDE_DEBUG("crtc%d first commit\n", crtc->base.id);
+		SDE_EVT32(DRMID(crtc), 1);
+		sde_power_data_bus_bandwidth_ctrl(&priv->phandle,
+				sde_kms->core_client, true);
+	} else {
+		SDE_DEBUG("crtc%d commit\n", crtc->base.id);
+		SDE_EVT32(DRMID(crtc), 2);
+	}
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc != crtc)
+			continue;
+
+		sde_encoder_kickoff(encoder);
+	}
+end:
+	SDE_ATRACE_END("crtc_commit");
+	return;
+}
+
+/**
+ * _sde_crtc_vblank_enable_nolock - update power resource and vblank request
+ * @sde_crtc: Pointer to sde crtc structure
+ * @enable: Whether to enable/disable vblanks
+ *
+ * @Return: error code
+ */
+static int _sde_crtc_vblank_enable_no_lock(
+		struct sde_crtc *sde_crtc, bool enable)
+{
+	struct drm_device *dev;
+	struct drm_crtc *crtc;
+	struct drm_encoder *enc;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+	int ret = 0;
+
+	if (!sde_crtc) {
+		SDE_ERROR("invalid crtc\n");
+		return -EINVAL;
+	}
+
+	crtc = &sde_crtc->base;
+	dev = crtc->dev;
+	priv = dev->dev_private;
+
+	if (!priv->kms) {
+		SDE_ERROR("invalid kms\n");
+		return -EINVAL;
+	}
+	sde_kms = to_sde_kms(priv->kms);
+
+	if (enable) {
+		ret = sde_power_resource_enable(&priv->phandle,
+				sde_kms->core_client, true);
+		if (ret)
+			return ret;
+
+		list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+			if (enc->crtc != crtc)
+				continue;
+
+			SDE_EVT32(DRMID(crtc), DRMID(enc), enable);
+
+			sde_encoder_register_vblank_callback(enc,
+					sde_crtc_vblank_cb, (void *)crtc);
+		}
+	} else {
+		list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+			if (enc->crtc != crtc)
+				continue;
+
+			SDE_EVT32(DRMID(crtc), DRMID(enc), enable);
+
+			sde_encoder_register_vblank_callback(enc, NULL, NULL);
+		}
+		ret = sde_power_resource_enable(&priv->phandle,
+				sde_kms->core_client, false);
+	}
+
+	return ret;
+}
+
+/**
+ * _sde_crtc_set_suspend - notify crtc of suspend enable/disable
+ * @crtc: Pointer to drm crtc object
+ * @enable: true to enable suspend, false to indicate resume
+ */
+static void _sde_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
+{
+	struct sde_crtc *sde_crtc;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+
+	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+		SDE_ERROR("invalid crtc\n");
+		return;
+	}
+	sde_crtc = to_sde_crtc(crtc);
+	priv = crtc->dev->dev_private;
+
+	if (!priv->kms) {
+		SDE_ERROR("invalid crtc kms\n");
+		return;
+	}
+	sde_kms = to_sde_kms(priv->kms);
+
+	SDE_DEBUG("crtc%d suspend = %d\n", crtc->base.id, enable);
+
+	mutex_lock(&sde_crtc->crtc_lock);
+
+	/*
+	 * Update CP on suspend/resume transitions
+	 */
+	if (enable && !sde_crtc->suspend)
+		sde_cp_crtc_suspend(crtc);
+	else if (!enable && sde_crtc->suspend)
+		sde_cp_crtc_resume(crtc);
+
+	/*
+	 * If the vblank refcount != 0, release a power reference on suspend
+	 * and take it back during resume (if it is still != 0).
+	 */
+	if (sde_crtc->suspend == enable)
+		SDE_DEBUG("crtc%d suspend already set to %d, ignoring update\n",
+				crtc->base.id, enable);
+	else if (sde_crtc->enabled && sde_crtc->vblank_requested)
+		_sde_crtc_vblank_enable_no_lock(sde_crtc, !enable);
+
+	sde_crtc->suspend = enable;
+
+	mutex_unlock(&sde_crtc->crtc_lock);
+}
+
+/**
+ * sde_crtc_duplicate_state - state duplicate hook
+ * @crtc: Pointer to drm crtc structure
+ * @Returns: Pointer to new drm_crtc_state structure
+ */
+static struct drm_crtc_state *sde_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *cstate, *old_cstate;
+
+	if (!crtc || !crtc->state) {
+		SDE_ERROR("invalid argument(s)\n");
+		return NULL;
+	}
+
+	sde_crtc = to_sde_crtc(crtc);
+	old_cstate = to_sde_crtc_state(crtc->state);
+	cstate = msm_property_alloc_state(&sde_crtc->property_info);
+	if (!cstate) {
+		SDE_ERROR("failed to allocate state\n");
+		return NULL;
+	}
+
+	/* duplicate value helper */
+	msm_property_duplicate_state(&sde_crtc->property_info,
+			old_cstate, cstate,
+			cstate->property_values, cstate->property_blobs);
+
+	/* duplicate base helper */
+	__drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
+
+	return &cstate->base;
+}
+
+/**
+ * sde_crtc_reset - reset hook for CRTCs
+ * Resets the atomic state for @crtc by freeing the state pointer (which might
+ * be NULL, e.g. at driver load time) and allocating a new empty state object.
+ * @crtc: Pointer to drm crtc structure
+ */
+static void sde_crtc_reset(struct drm_crtc *crtc)
+{
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *cstate;
+
+	if (!crtc) {
+		SDE_ERROR("invalid crtc\n");
+		return;
+	}
+
+	/* revert suspend actions, if necessary */
+	if (msm_is_suspend_state(crtc->dev))
+		_sde_crtc_set_suspend(crtc, false);
+
+	/* remove previous state, if present */
+	if (crtc->state) {
+		sde_crtc_destroy_state(crtc, crtc->state);
+		crtc->state = 0;
+	}
+
+	sde_crtc = to_sde_crtc(crtc);
+	cstate = msm_property_alloc_state(&sde_crtc->property_info);
+	if (!cstate) {
+		SDE_ERROR("failed to allocate state\n");
+		return;
+	}
+
+	/* reset value helper */
+	msm_property_reset_state(&sde_crtc->property_info, cstate,
+			cstate->property_values, cstate->property_blobs);
+
+	_sde_crtc_set_input_fence_timeout(cstate);
+
+	cstate->base.crtc = crtc;
+	crtc->state = &cstate->base;
+}
+
+static void sde_crtc_disable(struct drm_crtc *crtc)
+{
+	struct drm_encoder *encoder;
+	struct sde_crtc *sde_crtc;
+	struct sde_kms *sde_kms;
+	struct msm_drm_private *priv;
+	int ret = 0;
+
+	if (!crtc || !crtc->dev || !crtc->state) {
+		SDE_ERROR("invalid crtc\n");
+		return;
+	}
+	sde_crtc = to_sde_crtc(crtc);
+	sde_kms = _sde_crtc_get_kms(crtc);
+	if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private) {
+		SDE_ERROR("invalid kms handle\n");
+		return;
+	}
+	priv = sde_kms->dev->dev_private;
+
+	SDE_DEBUG("crtc%d\n", crtc->base.id);
+
+	if (msm_is_suspend_state(crtc->dev))
+		_sde_crtc_set_suspend(crtc, true);
+
+	mutex_lock(&sde_crtc->crtc_lock);
+	SDE_EVT32(DRMID(crtc), sde_crtc->enabled, sde_crtc->suspend,
+			sde_crtc->vblank_requested);
+
+	if (sde_crtc->enabled && !sde_crtc->suspend &&
+			sde_crtc->vblank_requested) {
+		ret = _sde_crtc_vblank_enable_no_lock(sde_crtc, false);
+		if (ret)
+			SDE_ERROR("%s vblank disable failed: %d\n",
+				sde_crtc->name, ret);
+	}
+
+	sde_crtc->enabled = false;
+
+	if (atomic_read(&sde_crtc->frame_pending)) {
+		/* release bandwidth and other resources */
+		SDE_ERROR("crtc%d invalid frame pending\n",
+				crtc->base.id);
+		SDE_EVT32(DRMID(crtc));
+		sde_power_data_bus_bandwidth_ctrl(&priv->phandle,
+				sde_kms->core_client, false);
+		sde_core_perf_crtc_release_bw(crtc);
+		atomic_set(&sde_crtc->frame_pending, 0);
+	}
+
+	sde_core_perf_crtc_update(crtc, 0, true);
+
+	drm_for_each_encoder(encoder, crtc->dev) {
+		if (encoder->crtc != crtc)
+			continue;
+		sde_encoder_register_frame_event_callback(encoder, NULL, NULL);
+		sde_encoder_register_request_flip_callback(encoder, NULL, NULL);
+	}
+
+	memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers));
+	sde_crtc->num_mixers = 0;
+	mutex_unlock(&sde_crtc->crtc_lock);
+}
+
+static void sde_crtc_enable(struct drm_crtc *crtc)
+{
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_mixer *mixer;
+	struct sde_hw_mixer *lm;
+	struct drm_display_mode *mode;
+	struct sde_hw_mixer_cfg cfg;
+	struct drm_encoder *encoder;
+	int i;
+	int ret = 0;
+
+	if (!crtc) {
+		SDE_ERROR("invalid crtc\n");
+		return;
+	}
+
+	SDE_DEBUG("crtc%d\n", crtc->base.id);
+	SDE_EVT32(DRMID(crtc));
+
+	sde_crtc = to_sde_crtc(crtc);
+	mixer = sde_crtc->mixers;
+
+	if (WARN_ON(!crtc->state))
+		return;
+
+	mode = &crtc->state->adjusted_mode;
+
+	drm_mode_debug_printmodeline(mode);
+
+	drm_for_each_encoder(encoder, crtc->dev) {
+		if (encoder->crtc != crtc)
+			continue;
+		sde_encoder_register_frame_event_callback(encoder,
+				sde_crtc_frame_event_cb, (void *)crtc);
+		sde_encoder_register_request_flip_callback(encoder,
+				sde_crtc_request_flip_cb, (void *)crtc);
+	}
+
+	mutex_lock(&sde_crtc->crtc_lock);
+	SDE_EVT32(DRMID(crtc), sde_crtc->enabled, sde_crtc->suspend,
+			sde_crtc->vblank_requested);
+	if (!sde_crtc->enabled && !sde_crtc->suspend &&
+			sde_crtc->vblank_requested) {
+		ret = _sde_crtc_vblank_enable_no_lock(sde_crtc, true);
+		if (ret)
+			SDE_ERROR("%s vblank enable failed: %d\n",
+				sde_crtc->name, ret);
+	}
+	sde_crtc->enabled = true;
+	mutex_unlock(&sde_crtc->crtc_lock);
+
+	for (i = 0; i < sde_crtc->num_mixers; i++) {
+		lm = mixer[i].hw_lm;
+		cfg.out_width = sde_crtc_mixer_width(sde_crtc, mode);
+		cfg.out_height = mode->vdisplay;
+		cfg.right_mixer = (i == 0) ? false : true;
+		cfg.flags = 0;
+		lm->ops.setup_mixer_out(lm, &cfg);
+	}
+}
+
+struct plane_state {
+	struct sde_plane_state *sde_pstate;
+	struct drm_plane_state *drm_pstate;
+
+	int stage;
+};
+
+static int pstate_cmp(const void *a, const void *b)
+{
+	struct plane_state *pa = (struct plane_state *)a;
+	struct plane_state *pb = (struct plane_state *)b;
+	int rc = 0;
+	int pa_zpos, pb_zpos;
+
+	pa_zpos = sde_plane_get_property(pa->sde_pstate, PLANE_PROP_ZPOS);
+	pb_zpos = sde_plane_get_property(pb->sde_pstate, PLANE_PROP_ZPOS);
+
+	if (pa_zpos != pb_zpos)
+		rc = pa_zpos - pb_zpos;
+	else
+		rc = pa->drm_pstate->crtc_x - pb->drm_pstate->crtc_x;
+
+	return rc;
+}
+
+static int sde_crtc_atomic_check(struct drm_crtc *crtc,
+		struct drm_crtc_state *state)
+{
+	struct sde_crtc *sde_crtc;
+	struct plane_state pstates[SDE_STAGE_MAX * 2];
+
+	struct drm_plane_state *pstate;
+	struct drm_plane *plane;
+	struct drm_display_mode *mode;
+
+	int cnt = 0, rc = 0, mixer_width, i, z_pos;
+	int left_zpos_cnt = 0, right_zpos_cnt = 0;
+
+	if (!crtc) {
+		SDE_ERROR("invalid crtc\n");
+		return -EINVAL;
+	}
+
+	if (!state->enable || !state->active) {
+		SDE_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
+				crtc->base.id, state->enable, state->active);
+		return 0;
+	}
+
+	sde_crtc = to_sde_crtc(crtc);
+	mode = &state->adjusted_mode;
+	SDE_DEBUG("%s: check", sde_crtc->name);
+
+	/* force a full mode set if active state changed */
+	if (state->active_changed)
+		state->mode_changed = true;
+
+	mixer_width = sde_crtc_mixer_width(sde_crtc, mode);
+
+	 /* get plane state for all drm planes associated with crtc state */
+	drm_atomic_crtc_state_for_each_plane(plane, state) {
+		pstate = drm_atomic_get_existing_plane_state(
+				state->state, plane);
+		if (IS_ERR_OR_NULL(pstate)) {
+			SDE_DEBUG("%s: failed to get plane%d state, %d\n",
+					sde_crtc->name, plane->base.id, rc);
+			continue;
+		}
+		if (cnt >= ARRAY_SIZE(pstates))
+			continue;
+
+		pstates[cnt].sde_pstate = to_sde_plane_state(pstate);
+		pstates[cnt].drm_pstate = pstate;
+		pstates[cnt].stage = sde_plane_get_property(
+				pstates[cnt].sde_pstate, PLANE_PROP_ZPOS);
+		cnt++;
+
+		if (CHECK_LAYER_BOUNDS(pstate->crtc_y, pstate->crtc_h,
+				mode->vdisplay) ||
+		    CHECK_LAYER_BOUNDS(pstate->crtc_x, pstate->crtc_w,
+				mode->hdisplay)) {
+			SDE_ERROR("invalid vertical/horizontal destination\n");
+			SDE_ERROR("y:%d h:%d vdisp:%d x:%d w:%d hdisp:%d\n",
+				pstate->crtc_y, pstate->crtc_h, mode->vdisplay,
+				pstate->crtc_x, pstate->crtc_w, mode->hdisplay);
+			rc = -E2BIG;
+			goto end;
+		}
+	}
+
+	/* assign mixer stages based on sorted zpos property */
+	sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
+
+	if (!sde_is_custom_client()) {
+		int stage_old = pstates[0].stage;
+
+		z_pos = 0;
+		for (i = 0; i < cnt; i++) {
+			if (stage_old != pstates[i].stage)
+				++z_pos;
+			stage_old = pstates[i].stage;
+			pstates[i].stage = z_pos;
+		}
+	}
+
+	z_pos = -1;
+	for (i = 0; i < cnt; i++) {
+		/* reset counts at every new blend stage */
+		if (pstates[i].stage != z_pos) {
+			left_zpos_cnt = 0;
+			right_zpos_cnt = 0;
+			z_pos = pstates[i].stage;
+		}
+
+		/* verify z_pos setting before using it */
+		if (z_pos >= SDE_STAGE_MAX - SDE_STAGE_0) {
+			SDE_ERROR("> %d plane stages assigned\n",
+					SDE_STAGE_MAX - SDE_STAGE_0);
+			rc = -EINVAL;
+			goto end;
+		} else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
+			if (left_zpos_cnt == 2) {
+				SDE_ERROR("> 2 planes @ stage %d on left\n",
+					z_pos);
+				rc = -EINVAL;
+				goto end;
+			}
+			left_zpos_cnt++;
+
+		} else {
+			if (right_zpos_cnt == 2) {
+				SDE_ERROR("> 2 planes @ stage %d on right\n",
+					z_pos);
+				rc = -EINVAL;
+				goto end;
+			}
+			right_zpos_cnt++;
+		}
+
+		pstates[i].sde_pstate->stage = z_pos + SDE_STAGE_0;
+		SDE_DEBUG("%s: zpos %d", sde_crtc->name, z_pos);
+	}
+
+	rc = sde_core_perf_crtc_check(crtc, state);
+	if (rc) {
+		SDE_ERROR("crtc%d failed performance check %d\n",
+				crtc->base.id, rc);
+		goto end;
+	}
+
+	/* validate source split:
+	 * use pstates sorted by stage to check planes on same stage
+	 * we assume that all pipes are in source split so its valid to compare
+	 * without taking into account left/right mixer placement
+	 */
+	for (i = 1; i < cnt; i++) {
+		struct plane_state *prv_pstate, *cur_pstate;
+		struct sde_rect left_rect, right_rect;
+		int32_t left_pid, right_pid;
+		int32_t stage;
+
+		prv_pstate = &pstates[i - 1];
+		cur_pstate = &pstates[i];
+		if (prv_pstate->stage != cur_pstate->stage)
+			continue;
+
+		stage = cur_pstate->stage;
+
+		left_pid = prv_pstate->sde_pstate->base.plane->base.id;
+		POPULATE_RECT(&left_rect, prv_pstate->drm_pstate->crtc_x,
+			prv_pstate->drm_pstate->crtc_y,
+			prv_pstate->drm_pstate->crtc_w,
+			prv_pstate->drm_pstate->crtc_h, false);
+
+		right_pid = cur_pstate->sde_pstate->base.plane->base.id;
+		POPULATE_RECT(&right_rect, cur_pstate->drm_pstate->crtc_x,
+			cur_pstate->drm_pstate->crtc_y,
+			cur_pstate->drm_pstate->crtc_w,
+			cur_pstate->drm_pstate->crtc_h, false);
+
+		if (right_rect.x < left_rect.x) {
+			swap(left_pid, right_pid);
+			swap(left_rect, right_rect);
+		}
+
+		/**
+		 * - planes are enumerated in pipe-priority order such that
+		 *   planes with lower drm_id must be left-most in a shared
+		 *   blend-stage when using source split.
+		 * - planes in source split must be contiguous in width
+		 * - planes in source split must have same dest yoff and height
+		 */
+		if (right_pid < left_pid) {
+			SDE_ERROR(
+				"invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
+				stage, left_pid, right_pid);
+			rc = -EINVAL;
+			goto end;
+		} else if (right_rect.x != (left_rect.x + left_rect.w)) {
+			SDE_ERROR(
+				"non-contiguous coordinates for src split. stage: %d left: %d - %d right: %d - %d\n",
+				stage, left_rect.x, left_rect.w,
+				right_rect.x, right_rect.w);
+			rc = -EINVAL;
+			goto end;
+		} else if ((left_rect.y != right_rect.y) ||
+				(left_rect.h != right_rect.h)) {
+			SDE_ERROR(
+				"source split at stage: %d. invalid yoff/height: l_y: %d r_y: %d l_h: %d r_h: %d\n",
+				stage, left_rect.y, right_rect.y,
+				left_rect.h, right_rect.h);
+			rc = -EINVAL;
+			goto end;
+		}
+	}
+
+
+end:
+	return rc;
+}
+
+int sde_crtc_vblank(struct drm_crtc *crtc, bool en)
+{
+	struct sde_crtc *sde_crtc;
+	int ret;
+
+	if (!crtc) {
+		SDE_ERROR("invalid crtc\n");
+		return -EINVAL;
+	}
+	sde_crtc = to_sde_crtc(crtc);
+
+	mutex_lock(&sde_crtc->crtc_lock);
+	if (sde_crtc->vblank_requested != en) {
+		SDE_EVT32(DRMID(&sde_crtc->base), en, sde_crtc->enabled,
+				sde_crtc->suspend, sde_crtc->vblank_requested);
+		if (sde_crtc->enabled && !sde_crtc->suspend) {
+			ret = _sde_crtc_vblank_enable_no_lock(sde_crtc, en);
+			if (ret)
+				SDE_ERROR("%s vblank enable failed: %d\n",
+						sde_crtc->name, ret);
+		}
+
+		sde_crtc->vblank_requested = en;
+	}
+	mutex_unlock(&sde_crtc->crtc_lock);
+
+	return 0;
+}
+
+void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc,
+	struct drm_file *file)
+{
+	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+
+	SDE_DEBUG("%s: cancel: %pK\n", sde_crtc->name, file);
+	_sde_crtc_complete_flip(crtc, file);
+}
+
+/**
+ * sde_crtc_install_properties - install all drm properties for crtc
+ * @crtc: Pointer to drm crtc structure
+ */
+static void sde_crtc_install_properties(struct drm_crtc *crtc,
+				struct sde_mdss_cfg *catalog)
+{
+	struct sde_crtc *sde_crtc;
+	struct drm_device *dev;
+	struct sde_kms_info *info;
+	struct sde_kms *sde_kms;
+	static const struct drm_prop_enum_list e_secure_level[] = {
+		{SDE_DRM_SEC_NON_SEC, "sec_and_non_sec"},
+		{SDE_DRM_SEC_ONLY, "sec_only"},
+	};
+
+	SDE_DEBUG("\n");
+
+	if (!crtc || !catalog) {
+		SDE_ERROR("invalid crtc or catalog\n");
+		return;
+	}
+
+	sde_crtc = to_sde_crtc(crtc);
+	dev = crtc->dev;
+	sde_kms = _sde_crtc_get_kms(crtc);
+
+	info = kzalloc(sizeof(struct sde_kms_info), GFP_KERNEL);
+	if (!info) {
+		SDE_ERROR("failed to allocate info memory\n");
+		return;
+	}
+
+	/* range properties */
+	msm_property_install_range(&sde_crtc->property_info,
+		"input_fence_timeout", 0x0, 0, SDE_CRTC_MAX_INPUT_FENCE_TIMEOUT,
+		SDE_CRTC_INPUT_FENCE_TIMEOUT, CRTC_PROP_INPUT_FENCE_TIMEOUT);
+
+#if 0
+	msm_property_install_range(&sde_crtc->property_info, "output_fence",
+			0x0, 0, INR_OPEN_MAX, 0x0, CRTC_PROP_OUTPUT_FENCE);
+
+	msm_property_install_range(&sde_crtc->property_info,
+			"output_fence_offset", 0x0, 0, 1, 0,
+			CRTC_PROP_OUTPUT_FENCE_OFFSET);
+#endif
+
+	msm_property_install_range(&sde_crtc->property_info,
+			"core_clk", 0x0, 0, U64_MAX,
+			sde_kms->perf.max_core_clk_rate,
+			CRTC_PROP_CORE_CLK);
+	msm_property_install_range(&sde_crtc->property_info,
+			"core_ab", 0x0, 0, U64_MAX,
+			SDE_POWER_HANDLE_DATA_BUS_AB_QUOTA,
+			CRTC_PROP_CORE_AB);
+	msm_property_install_range(&sde_crtc->property_info,
+			"core_ib", 0x0, 0, U64_MAX,
+			SDE_POWER_HANDLE_DATA_BUS_IB_QUOTA,
+			CRTC_PROP_CORE_IB);
+
+	msm_property_install_blob(&sde_crtc->property_info, "capabilities",
+		DRM_MODE_PROP_IMMUTABLE, CRTC_PROP_INFO);
+
+	msm_property_install_enum(&sde_crtc->property_info, "security_level",
+			0x0, 0, e_secure_level,
+			ARRAY_SIZE(e_secure_level),
+			CRTC_PROP_SECURITY_LEVEL, SDE_DRM_SEC_NON_SEC);
+
+	sde_kms_info_reset(info);
+
+	sde_kms_info_add_keyint(info, "hw_version", catalog->hwversion);
+	sde_kms_info_add_keyint(info, "max_linewidth",
+			catalog->max_mixer_width);
+	sde_kms_info_add_keyint(info, "max_blendstages",
+			catalog->max_mixer_blendstages);
+	if (catalog->qseed_type == SDE_SSPP_SCALER_QSEED2)
+		sde_kms_info_add_keystr(info, "qseed_type", "qseed2");
+	if (catalog->qseed_type == SDE_SSPP_SCALER_QSEED3)
+		sde_kms_info_add_keystr(info, "qseed_type", "qseed3");
+	sde_kms_info_add_keyint(info, "has_src_split", catalog->has_src_split);
+	sde_kms_info_add_keyint(info, "has_hdr", catalog->has_hdr);
+	if (catalog->perf.max_bw_low)
+		sde_kms_info_add_keyint(info, "max_bandwidth_low",
+				catalog->perf.max_bw_low);
+	if (catalog->perf.max_bw_high)
+		sde_kms_info_add_keyint(info, "max_bandwidth_high",
+				catalog->perf.max_bw_high);
+	if (sde_kms->perf.max_core_clk_rate)
+		sde_kms_info_add_keyint(info, "max_mdp_clk",
+				sde_kms->perf.max_core_clk_rate);
+	msm_property_set_blob(&sde_crtc->property_info, &sde_crtc->blob_info,
+			info->data, SDE_KMS_INFO_DATALEN(info), CRTC_PROP_INFO);
+
+	kfree(info);
+}
+
+/**
+ * sde_crtc_atomic_set_property - atomically set a crtc drm property
+ * @crtc: Pointer to drm crtc structure
+ * @state: Pointer to drm crtc state structure
+ * @property: Pointer to targeted drm property
+ * @val: Updated property value
+ * @Returns: Zero on success
+ */
+static int sde_crtc_atomic_set_property(struct drm_crtc *crtc,
+		struct drm_crtc_state *state,
+		struct drm_property *property,
+		uint64_t val)
+{
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *cstate;
+	int idx, ret = -EINVAL;
+
+	if (!crtc || !state || !property) {
+		SDE_ERROR("invalid argument(s)\n");
+	} else {
+		sde_crtc = to_sde_crtc(crtc);
+		cstate = to_sde_crtc_state(state);
+		ret = msm_property_atomic_set(&sde_crtc->property_info,
+				cstate->property_values, cstate->property_blobs,
+				property, val);
+		if (!ret) {
+			idx = msm_property_index(&sde_crtc->property_info,
+					property);
+			if (idx == CRTC_PROP_INPUT_FENCE_TIMEOUT)
+				_sde_crtc_set_input_fence_timeout(cstate);
+		} else {
+			ret = sde_cp_crtc_set_property(crtc,
+					property, val);
+		}
+		if (ret)
+			DRM_ERROR("failed to set the property\n");
+	}
+
+	return ret;
+}
+
+/**
+ * sde_crtc_set_property - set a crtc drm property
+ * @crtc: Pointer to drm crtc structure
+ * @property: Pointer to targeted drm property
+ * @val: Updated property value
+ * @Returns: Zero on success
+ */
+static int sde_crtc_set_property(struct drm_crtc *crtc,
+		struct drm_property *property, uint64_t val)
+{
+	SDE_DEBUG("\n");
+
+	return sde_crtc_atomic_set_property(crtc, crtc->state, property, val);
+}
+
+/**
+ * sde_crtc_atomic_get_property - retrieve a crtc drm property
+ * @crtc: Pointer to drm crtc structure
+ * @state: Pointer to drm crtc state structure
+ * @property: Pointer to targeted drm property
+ * @val: Pointer to variable for receiving property value
+ * @Returns: Zero on success
+ */
+static int sde_crtc_atomic_get_property(struct drm_crtc *crtc,
+		const struct drm_crtc_state *state,
+		struct drm_property *property,
+		uint64_t *val)
+{
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *cstate;
+	int i, ret = -EINVAL;
+	bool conn_offset = 0;
+
+	if (!crtc || !state) {
+		SDE_ERROR("invalid argument(s)\n");
+	} else {
+		sde_crtc = to_sde_crtc(crtc);
+		cstate = to_sde_crtc_state(state);
+
+		for (i = 0; i < cstate->num_connectors; ++i) {
+			conn_offset = sde_connector_needs_offset(
+						cstate->connectors[i]);
+			if (conn_offset)
+				break;
+		}
+
+		i = msm_property_index(&sde_crtc->property_info, property);
+		if (i == CRTC_PROP_OUTPUT_FENCE) {
+			int offset = sde_crtc_get_property(cstate,
+					CRTC_PROP_OUTPUT_FENCE_OFFSET);
+
+			ret = sde_fence_create(&sde_crtc->output_fence, val,
+							offset + conn_offset);
+			if (ret)
+				SDE_ERROR("fence create failed\n");
+		} else {
+			ret = msm_property_atomic_get(&sde_crtc->property_info,
+					cstate->property_values,
+					cstate->property_blobs, property, val);
+			if (ret)
+				ret = sde_cp_crtc_get_property(crtc,
+					property, val);
+		}
+		if (ret)
+			DRM_ERROR("get property failed\n");
+	}
+	return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _sde_debugfs_status_show(struct seq_file *s, void *data)
+{
+	struct sde_crtc *sde_crtc;
+	struct sde_plane_state *pstate = NULL;
+	struct sde_crtc_mixer *m;
+
+	struct drm_crtc *crtc;
+	struct drm_plane *plane;
+	struct drm_display_mode *mode;
+	struct drm_framebuffer *fb;
+	struct drm_plane_state *state;
+
+	int i, out_width;
+
+	if (!s || !s->private)
+		return -EINVAL;
+
+	sde_crtc = s->private;
+	crtc = &sde_crtc->base;
+
+	mutex_lock(&sde_crtc->crtc_lock);
+	mode = &crtc->state->adjusted_mode;
+	out_width = sde_crtc_mixer_width(sde_crtc, mode);
+
+	seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
+				mode->hdisplay, mode->vdisplay);
+
+	seq_puts(s, "\n");
+
+	for (i = 0; i < sde_crtc->num_mixers; ++i) {
+		m = &sde_crtc->mixers[i];
+		if (!m->hw_lm)
+			seq_printf(s, "\tmixer[%d] has no lm\n", i);
+		else if (!m->hw_ctl)
+			seq_printf(s, "\tmixer[%d] has no ctl\n", i);
+		else
+			seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
+				m->hw_lm->idx - LM_0, m->hw_ctl->idx - CTL_0,
+				out_width, mode->vdisplay);
+	}
+
+	seq_puts(s, "\n");
+
+	drm_atomic_crtc_for_each_plane(plane, crtc) {
+		pstate = to_sde_plane_state(plane->state);
+		state = plane->state;
+
+		if (!pstate || !state)
+			continue;
+
+		seq_printf(s, "\tplane:%u pipe:%u stage:%d\n", plane->base.id,
+			   sde_plane_pipe(plane, 0), pstate->stage);
+
+		if (plane->state->fb) {
+			fb = plane->state->fb;
+
+			seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u bpp:%d\n",
+				fb->base.id, (char *) &fb->pixel_format,
+				fb->width, fb->height, fb->bits_per_pixel);
+
+			seq_puts(s, "\t");
+			for (i = 0; i < ARRAY_SIZE(fb->modifier); i++)
+				seq_printf(s, "modifier[%d]:0x%8llx ", i,
+							fb->modifier[i]);
+			seq_puts(s, "\n");
+
+			seq_puts(s, "\t");
+			for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
+				seq_printf(s, "pitches[%d]:%8u ", i,
+							fb->pitches[i]);
+			seq_puts(s, "\n");
+
+			seq_puts(s, "\t");
+			for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
+				seq_printf(s, "offsets[%d]:%8u ", i,
+							fb->offsets[i]);
+			seq_puts(s, "\n");
+		}
+
+		seq_printf(s, "\tsrc_x:%4u.%06u src_y:%4u.%06u src_w:%4u.%06u src_h:%4u.%06u\n",
+			state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
+			state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10,
+			state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
+			state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10);
+
+		seq_printf(s, "\tdst x:%4u        dst_y:%4u        dst_w:%4u        dst_h:%4u\n",
+			state->crtc_x, state->crtc_y, state->crtc_w,
+			state->crtc_h);
+		seq_puts(s, "\n");
+	}
+
+	if (sde_crtc->vblank_cb_count) {
+		ktime_t diff = ktime_sub(ktime_get(), sde_crtc->vblank_cb_time);
+		s64 diff_ms = ktime_to_ms(diff);
+		s64 fps = diff_ms ? DIV_ROUND_CLOSEST(
+				sde_crtc->vblank_cb_count * 1000, diff_ms) : 0;
+
+		seq_printf(s,
+			"vblank fps:%lld count:%u total:%llums\n",
+				fps,
+				sde_crtc->vblank_cb_count,
+				ktime_to_ms(diff));
+
+		/* reset time & count for next measurement */
+		sde_crtc->vblank_cb_count = 0;
+		sde_crtc->vblank_cb_time = ktime_set(0, 0);
+	}
+
+	seq_printf(s, "vblank_enable:%d\n", sde_crtc->vblank_requested);
+
+	mutex_unlock(&sde_crtc->crtc_lock);
+
+	return 0;
+}
+
+static int _sde_debugfs_status_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, _sde_debugfs_status_show, inode->i_private);
+}
+#endif
+
+static void sde_crtc_suspend(struct drm_crtc *crtc)
+{
+	sde_cp_crtc_suspend(crtc);
+}
+
+static void sde_crtc_resume(struct drm_crtc *crtc)
+{
+	sde_cp_crtc_resume(crtc);
+}
+
+static const struct drm_crtc_funcs sde_crtc_funcs = {
+	.set_config = drm_atomic_helper_set_config,
+	.destroy = sde_crtc_destroy,
+	.page_flip = drm_atomic_helper_page_flip,
+	.set_property = sde_crtc_set_property,
+	.atomic_set_property = sde_crtc_atomic_set_property,
+	.atomic_get_property = sde_crtc_atomic_get_property,
+	.reset = sde_crtc_reset,
+	.atomic_duplicate_state = sde_crtc_duplicate_state,
+	.atomic_destroy_state = sde_crtc_destroy_state,
+	.save = sde_crtc_suspend,
+	.restore = sde_crtc_resume,
+};
+
+static const struct drm_crtc_helper_funcs sde_crtc_helper_funcs = {
+	.mode_fixup = sde_crtc_mode_fixup,
+	.disable = sde_crtc_disable,
+	.enable = sde_crtc_enable,
+	.atomic_check = sde_crtc_atomic_check,
+	.atomic_begin = sde_crtc_atomic_begin,
+	.atomic_flush = sde_crtc_atomic_flush,
+};
+
+#ifdef CONFIG_DEBUG_FS
+#define DEFINE_SDE_DEBUGFS_SEQ_FOPS(__prefix)				\
+static int __prefix ## _open(struct inode *inode, struct file *file)	\
+{									\
+	return single_open(file, __prefix ## _show, inode->i_private);	\
+}									\
+static const struct file_operations __prefix ## _fops = {		\
+	.owner = THIS_MODULE,						\
+	.open = __prefix ## _open,					\
+	.release = single_release,					\
+	.read = seq_read,						\
+	.llseek = seq_lseek,						\
+}
+
+static int sde_crtc_debugfs_state_show(struct seq_file *s, void *v)
+{
+	struct drm_crtc *crtc = (struct drm_crtc *) s->private;
+	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+	struct sde_crtc_state *cstate = to_sde_crtc_state(crtc->state);
+
+	seq_printf(s, "num_connectors: %d\n", cstate->num_connectors);
+	seq_printf(s, "is_rt: %d\n", cstate->is_rt);
+	seq_printf(s, "intf_mode: %d\n", sde_crtc_get_intf_mode(crtc));
+
+	seq_printf(s, "bw_ctl: %llu\n", sde_crtc->cur_perf.bw_ctl);
+	seq_printf(s, "core_clk_rate: %u\n",
+			sde_crtc->cur_perf.core_clk_rate);
+	seq_printf(s, "max_per_pipe_ib: %llu\n",
+			sde_crtc->cur_perf.max_per_pipe_ib);
+
+	return 0;
+}
+DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_crtc_debugfs_state);
+
+static void _sde_crtc_init_debugfs(struct sde_crtc *sde_crtc,
+		struct sde_kms *sde_kms)
+{
+	static const struct file_operations debugfs_status_fops = {
+		.open =		_sde_debugfs_status_open,
+		.read =		seq_read,
+		.llseek =	seq_lseek,
+		.release =	single_release,
+	};
+
+	if (sde_crtc && sde_kms) {
+		sde_crtc->debugfs_root = debugfs_create_dir(sde_crtc->name,
+				sde_debugfs_get_root(sde_kms));
+		if (sde_crtc->debugfs_root) {
+			/* don't error check these */
+			debugfs_create_file("status", S_IRUGO,
+					sde_crtc->debugfs_root,
+					sde_crtc, &debugfs_status_fops);
+			debugfs_create_file("state", S_IRUGO | S_IWUSR,
+					sde_crtc->debugfs_root,
+					&sde_crtc->base,
+					&sde_crtc_debugfs_state_fops);
+		}
+	}
+}
+#else
+static void _sde_crtc_init_debugfs(struct sde_crtc *sde_crtc,
+		struct sde_kms *sde_kms)
+{
+}
+#endif
+
+/* initialize crtc */
+struct drm_crtc *sde_crtc_init(struct drm_device *dev,
+	struct drm_plane *plane)
+{
+	struct drm_crtc *crtc = NULL;
+	struct sde_crtc *sde_crtc = NULL;
+	struct msm_drm_private *priv = NULL;
+	struct sde_kms *kms = NULL;
+	int i;
+
+	priv = dev->dev_private;
+	kms = to_sde_kms(priv->kms);
+
+	sde_crtc = kzalloc(sizeof(*sde_crtc), GFP_KERNEL);
+	if (!sde_crtc)
+		return ERR_PTR(-ENOMEM);
+
+	crtc = &sde_crtc->base;
+	crtc->dev = dev;
+
+	mutex_init(&sde_crtc->crtc_lock);
+	spin_lock_init(&sde_crtc->spin_lock);
+	atomic_set(&sde_crtc->frame_pending, 0);
+
+	INIT_LIST_HEAD(&sde_crtc->frame_event_list);
+	for (i = 0; i < ARRAY_SIZE(sde_crtc->frame_events); i++) {
+		INIT_LIST_HEAD(&sde_crtc->frame_events[i].list);
+		list_add(&sde_crtc->frame_events[i].list,
+				&sde_crtc->frame_event_list);
+		init_kthread_work(&sde_crtc->frame_events[i].work,
+				sde_crtc_frame_event_work);
+	}
+
+	drm_crtc_init_with_planes(dev, crtc, plane, NULL, &sde_crtc_funcs);
+
+	drm_crtc_helper_add(crtc, &sde_crtc_helper_funcs);
+	plane->crtc = crtc;
+
+	/* save user friendly CRTC name for later */
+	snprintf(sde_crtc->name, SDE_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
+
+	/* initialize output fence support */
+	sde_fence_init(&sde_crtc->output_fence, sde_crtc->name, crtc->base.id);
+
+	/* initialize debugfs support */
+	_sde_crtc_init_debugfs(sde_crtc, kms);
+
+	/* create CRTC properties */
+	msm_property_init(&sde_crtc->property_info, &crtc->base, dev,
+			priv->crtc_property, sde_crtc->property_data,
+			CRTC_PROP_COUNT, CRTC_PROP_BLOBCOUNT,
+			sizeof(struct sde_crtc_state));
+
+	sde_crtc_install_properties(crtc, kms->catalog);
+
+	/* Install color processing properties */
+	sde_cp_crtc_init(crtc);
+	sde_cp_crtc_install_properties(crtc);
+
+	SDE_DEBUG("%s: successfully initialized crtc\n", sde_crtc->name);
+	return crtc;
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_crtc.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_crtc.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_crtc.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_crtc.h	2019-01-22 16:16:23.511246479 +0100
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _SDE_CRTC_H_
+#define _SDE_CRTC_H_
+
+#include "drm_crtc.h"
+#include "msm_prop.h"
+#include "sde_fence.h"
+#include "sde_kms.h"
+#include "sde_core_perf.h"
+
+#define SDE_CRTC_NAME_SIZE	12
+
+/* define the maximum number of in-flight frame events */
+#define SDE_CRTC_FRAME_EVENT_SIZE	2
+
+/**
+ * struct sde_crtc_mixer: stores the map for each virtual pipeline in the CRTC
+ * @hw_lm:	LM HW Driver context
+ * @hw_ctl:	CTL Path HW driver context
+ * @hw_dspp:	DSPP HW driver context
+ * @encoder:	Encoder attached to this lm & ctl
+ * @mixer_op_mode: mixer blending operation mode
+ * @flush_mask:	mixer flush mask for ctl, mixer and pipe
+ */
+struct sde_crtc_mixer {
+	struct sde_hw_mixer *hw_lm;
+	struct sde_hw_ctl *hw_ctl;
+	struct sde_hw_dspp  *hw_dspp;
+	struct drm_encoder *encoder;
+	u32 mixer_op_mode;
+	u32 flush_mask;
+};
+
+/**
+ * struct sde_crtc_frame_event: stores crtc frame event for crtc processing
+ * @work:	base work structure
+ * @crtc:	Pointer to crtc handling this event
+ * @list:	event list
+ * @ts:		timestamp at queue entry
+ * @event:	event identifier
+ */
+struct sde_crtc_frame_event {
+	struct kthread_work work;
+	struct drm_crtc *crtc;
+	struct list_head list;
+	ktime_t ts;
+	u32 event;
+};
+
+/**
+ * struct sde_crtc - virtualized CRTC data structure
+ * @base          : Base drm crtc structure
+ * @name          : ASCII description of this crtc
+ * @num_ctls      : Number of ctl paths in use
+ * @num_mixers    : Number of mixers in use
+ * @mixer         : List of active mixers
+ * @event         : Pointer to last received drm vblank event. If there is a
+ *                  pending vblank event, this will be non-null.
+ * @vsync_count   : Running count of received vsync events
+ * @drm_requested_vblank : Whether vblanks have been enabled in the encoder
+ * @property_info : Opaque structure for generic property support
+ * @property_defaults : Array of default values for generic property support
+ * @stage_cfg     : H/w mixer stage configuration
+ * @debugfs_root  : Parent of debugfs node
+ * @vblank_cb_count : count of vblank callback since last reset
+ * @vblank_cb_time  : ktime at vblank count reset
+ * @vblank_requested : whether the user has requested vblank events
+ * @suspend         : whether or not a suspend operation is in progress
+ * @enabled       : whether the SDE CRTC is currently enabled. updated in the
+ *                  commit-thread, not state-swap time which is earlier, so
+ *                  safe to make decisions on during VBLANK on/off work
+ * @feature_list  : list of color processing features supported on a crtc
+ * @active_list   : list of color processing features are active
+ * @dirty_list    : list of color processing features are dirty
+ * @crtc_lock     : crtc lock around create, destroy and access.
+ * @frame_pending : Whether or not an update is pending
+ * @frame_events  : static allocation of in-flight frame events
+ * @frame_event_list : available frame event list
+ * @pending       : Whether any page-flip events are pending signal
+ * @spin_lock     : spin lock for frame event, transaction status, etc...
+ * @cur_perf      : current performance committed to clock/bandwidth driver
+ * @new_perf      : new performance committed to clock/bandwidth driver
+ */
+struct sde_crtc {
+	struct drm_crtc base;
+	char name[SDE_CRTC_NAME_SIZE];
+
+	/* HW Resources reserved for the crtc */
+	u32 num_ctls;
+	u32 num_mixers;
+	struct sde_crtc_mixer mixers[CRTC_DUAL_MIXERS];
+
+	struct drm_pending_vblank_event *event;
+	u32 vsync_count;
+
+	struct msm_property_info property_info;
+	struct msm_property_data property_data[CRTC_PROP_COUNT];
+	struct drm_property_blob *blob_info;
+
+	/* output fence support */
+	struct sde_fence output_fence;
+	atomic_t pending;
+	struct sde_hw_stage_cfg stage_cfg;
+	struct dentry *debugfs_root;
+
+	u32 vblank_cb_count;
+	ktime_t vblank_cb_time;
+	bool vblank_requested;
+	bool suspend;
+	bool enabled;
+
+	struct list_head feature_list;
+	struct list_head active_list;
+	struct list_head dirty_list;
+
+	struct mutex crtc_lock;
+
+	atomic_t frame_pending;
+	struct sde_crtc_frame_event frame_events[SDE_CRTC_FRAME_EVENT_SIZE];
+	struct list_head frame_event_list;
+	spinlock_t spin_lock;
+
+	struct sde_core_perf_params cur_perf;
+	struct sde_core_perf_params new_perf;
+};
+
+#define to_sde_crtc(x) container_of(x, struct sde_crtc, base)
+
+/**
+ * struct sde_crtc_state - sde container for atomic crtc state
+ * @base: Base drm crtc state structure
+ * @connectors    : Currently associated drm connectors
+ * @num_connectors: Number of associated drm connectors
+ * @is_rt         : Whether or not the current commit contains RT connectors
+ * @intf_mode     : Interface mode of the primary connector
+ * @property_values: Current crtc property values
+ * @input_fence_timeout_ns : Cached input fence timeout, in ns
+ * @property_blobs: Reference pointers for blob properties
+ * @new_perf: new performance state being requested
+ */
+struct sde_crtc_state {
+	struct drm_crtc_state base;
+
+	struct drm_connector *connectors[MAX_CONNECTORS];
+	int num_connectors;
+	bool is_rt;
+	enum sde_intf_mode intf_mode;
+
+	uint64_t property_values[CRTC_PROP_COUNT];
+	uint64_t input_fence_timeout_ns;
+	struct drm_property_blob *property_blobs[CRTC_PROP_COUNT];
+
+	struct sde_core_perf_params new_perf;
+};
+
+#define to_sde_crtc_state(x) \
+	container_of(x, struct sde_crtc_state, base)
+
+/**
+ * sde_crtc_get_property - query integer value of crtc property
+ * @S: Pointer to crtc state
+ * @X: Property index, from enum msm_mdp_crtc_property
+ * Returns: Integer value of requested property
+ */
+#define sde_crtc_get_property(S, X) \
+	((S) && ((X) < CRTC_PROP_COUNT) ? ((S)->property_values[(X)]) : 0)
+
+static inline int sde_crtc_mixer_width(struct sde_crtc *sde_crtc,
+	struct drm_display_mode *mode)
+{
+	if (!sde_crtc || !mode)
+		return 0;
+
+	return  sde_crtc->num_mixers == CRTC_DUAL_MIXERS ?
+		mode->hdisplay / CRTC_DUAL_MIXERS : mode->hdisplay;
+}
+
+static inline uint32_t get_crtc_split_width(struct drm_crtc *crtc)
+{
+	struct drm_display_mode *mode;
+	struct sde_crtc *sde_crtc;
+
+	if (!crtc)
+		return 0;
+
+	sde_crtc = to_sde_crtc(crtc);
+	mode = &crtc->state->adjusted_mode;
+	return sde_crtc_mixer_width(sde_crtc, mode);
+}
+
+/**
+ * sde_crtc_vblank - enable or disable vblanks for this crtc
+ * @crtc: Pointer to drm crtc object
+ * @en: true to enable vblanks, false to disable
+ */
+int sde_crtc_vblank(struct drm_crtc *crtc, bool en);
+
+/**
+ * sde_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
+ * @crtc: Pointer to drm crtc object
+ */
+void sde_crtc_commit_kickoff(struct drm_crtc *crtc);
+
+/**
+ * sde_crtc_prepare_commit - callback to prepare for output fences
+ * @crtc: Pointer to drm crtc object
+ * @old_state: Pointer to drm crtc old state object
+ */
+void sde_crtc_prepare_commit(struct drm_crtc *crtc,
+		struct drm_crtc_state *old_state);
+
+/**
+ * sde_crtc_complete_commit - callback signalling completion of current commit
+ * @crtc: Pointer to drm crtc object
+ * @old_state: Pointer to drm crtc old state object
+ */
+void sde_crtc_complete_commit(struct drm_crtc *crtc,
+		struct drm_crtc_state *old_state);
+
+/**
+ * sde_crtc_init - create a new crtc object
+ * @dev: sde device
+ * @plane: base plane
+ * @Return: new crtc object or error
+ */
+struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane);
+
+/**
+ * sde_crtc_cancel_pending_flip - complete flip for clients on lastclose
+ * @crtc: Pointer to drm crtc object
+ * @file: client to cancel's file handle
+ */
+void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
+
+/**
+ * sde_crtc_is_rt - query whether real time connectors are present on the crtc
+ * @crtc: Pointer to drm crtc structure
+ * Returns: True if a connector is present with real time constraints
+ */
+bool sde_crtc_is_rt(struct drm_crtc *crtc);
+
+/**
+ * sde_crtc_get_intf_mode - get primary interface mode of the given crtc
+ * @crtc: Pointert to crtc
+ */
+enum sde_intf_mode sde_crtc_get_intf_mode(struct drm_crtc *crtc);
+
+/**
+ * sde_core_perf_crtc_is_wb - check if writeback is primary output of this crtc
+ * @crtc: Pointer to crtc
+ */
+static inline bool sde_crtc_is_wb(struct drm_crtc *crtc)
+{
+	struct sde_crtc_state *cstate =
+			crtc ? to_sde_crtc_state(crtc->state) : NULL;
+
+	return cstate ? (cstate->intf_mode == INTF_MODE_WB_LINE) : false;
+}
+
+/**
+ * sde_crtc_is_nrt - check if primary output of this crtc is non-realtime client
+ * @crtc: Pointer to crtc
+ */
+static inline bool sde_crtc_is_nrt(struct drm_crtc *crtc)
+{
+	return sde_crtc_is_wb(crtc);
+}
+
+/**
+ * sde_crtc_is_enabled - check if sde crtc is enabled or not
+ * @crtc: Pointer to crtc
+ */
+static inline bool sde_crtc_is_enabled(struct drm_crtc *crtc)
+{
+	return crtc ? crtc->enabled : false;
+}
+
+#endif /* _SDE_CRTC_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_encoder.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_encoder.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_encoder.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_encoder.c	2019-10-29 09:26:23.637203119 +0100
@@ -0,0 +1,1608 @@
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "msm_drv.h"
+#include "sde_kms.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_intf.h"
+#include "sde_hw_ctl.h"
+#include "sde_formats.h"
+#include "sde_encoder_phys.h"
+#include "sde_color_processing.h"
+#include "sde_trace.h"
+
+#define SDE_DEBUG_ENC(e, fmt, ...) SDE_DEBUG("enc%d " fmt,\
+		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+
+#define SDE_ERROR_ENC(e, fmt, ...) SDE_ERROR("enc%d " fmt,\
+		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+
+/* timeout in frames waiting for frame done */
+#define SDE_ENCODER_FRAME_DONE_TIMEOUT	60
+
+#define MISR_BUFF_SIZE	256
+
+/*
+ * Two to anticipate panels that can do cmd/vid dynamic switching
+ * plan is to create all possible physical encoder types, and switch between
+ * them at runtime
+ */
+#define NUM_PHYS_ENCODER_TYPES 2
+
+#define MAX_PHYS_ENCODERS_PER_VIRTUAL \
+	(MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
+
+#define MAX_CHANNELS_PER_ENC 2
+
+/* rgb to yuv color space conversion matrix */
+static struct sde_csc_cfg sde_csc_10bit_convert[SDE_MAX_CSC] = {
+	[SDE_CSC_RGB2YUV_601L] = {
+		{
+			TO_S15D16(0x0083), TO_S15D16(0x0102), TO_S15D16(0x0032),
+			TO_S15D16(0xffb4), TO_S15D16(0xff6b), TO_S15D16(0x00e1),
+			TO_S15D16(0x00e1), TO_S15D16(0xff44), TO_S15D16(0xffdb),
+		},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0040, 0x0200, 0x0200,},
+		{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+		{ 0x0040, 0x03ac, 0x0040, 0x03c0, 0x0040, 0x03c0,},
+	},
+
+	[SDE_CSC_RGB2YUV_601FR] = {
+		{
+			TO_S15D16(0x0099), TO_S15D16(0x012d), TO_S15D16(0x003a),
+			TO_S15D16(0xffaa), TO_S15D16(0xff56), TO_S15D16(0x0100),
+			TO_S15D16(0x0100), TO_S15D16(0xff2a), TO_S15D16(0xffd6),
+		},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0000, 0x0200, 0x0200,},
+		{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+		{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+	},
+
+	[SDE_CSC_RGB2YUV_709L] = {
+		{
+			TO_S15D16(0x005d), TO_S15D16(0x013a), TO_S15D16(0x0020),
+			TO_S15D16(0xffcc), TO_S15D16(0xff53), TO_S15D16(0x00e1),
+			TO_S15D16(0x00e1), TO_S15D16(0xff34), TO_S15D16(0xffeb),
+		},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0040, 0x0200, 0x0200,},
+		{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+		{ 0x0040, 0x03ac, 0x0040, 0x03c0, 0x0040, 0x03c0,},
+	},
+
+	[SDE_CSC_RGB2YUV_709FR] = {
+		{
+			TO_S15D16(0x006c), TO_S15D16(0x016e), TO_S15D16(0x0024),
+			TO_S15D16(0xffc4), TO_S15D16(0xff37), TO_S15D16(0x0105),
+			TO_S15D16(0x0105), TO_S15D16(0xff13), TO_S15D16(0xffe8),
+		},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0000, 0x0200, 0x0200,},
+		{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+		{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+	},
+
+	[SDE_CSC_RGB2YUV_2020L] = {
+		{
+			TO_S15D16(0x0073), TO_S15D16(0x0129), TO_S15D16(0x001a),
+			TO_S15D16(0xffc1), TO_S15D16(0xff5e), TO_S15D16(0x00e0),
+			TO_S15D16(0x00e0), TO_S15D16(0xff32), TO_S15D16(0xffee),
+		},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0040, 0x0200, 0x0200,},
+		{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+		{ 0x0040, 0x03ac, 0x0040, 0x03c0, 0x0040, 0x03c0,},
+	},
+
+	[SDE_CSC_RGB2YUV_2020FR] = {
+		{
+			TO_S15D16(0x0086), TO_S15D16(0x015b), TO_S15D16(0x001e),
+			TO_S15D16(0xffb9), TO_S15D16(0xff47), TO_S15D16(0x0100),
+			TO_S15D16(0x0100), TO_S15D16(0xff15), TO_S15D16(0xffeb),
+		},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0, 0x0200, 0x0200,},
+		{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+		{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+	},
+};
+
+/**
+ * struct sde_encoder_virt - virtual encoder. Container of one or more physical
+ *	encoders. Virtual encoder manages one "logical" display. Physical
+ *	encoders manage one intf block, tied to a specific panel/sub-panel.
+ *	Virtual encoder defers as much as possible to the physical encoders.
+ *	Virtual encoder registers itself with the DRM Framework as the encoder.
+ * @base:		drm_encoder base class for registration with DRM
+ * @enc_spin_lock:	Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @bus_scaling_client:	Client handle to the bus scaling interface
+ * @num_phys_encs:	Actual number of physical encoders contained.
+ * @phys_encs:		Container of physical encoders managed.
+ * @cur_master:		Pointer to the current master in this mode. Optimization
+ *			Only valid after enable. Cleared as disable.
+ * @hw_pp		Handle to the pingpong blocks used for the display. No.
+ *                      pingpong blocks can be different than num_phys_encs.
+ * @crtc_vblank_cb:	Callback into the upper layer / CRTC for
+ *			notification of the VBLANK
+ * @crtc_vblank_cb_data:	Data from upper layer for VBLANK notification
+ * @crtc_kickoff_cb:		Callback into CRTC that will flush & start
+ *				all CTL paths
+ * @crtc_kickoff_cb_data:	Opaque user data given to crtc_kickoff_cb
+ * @debugfs_root:		Debug file system root file node
+ * @enc_lock:			Lock around physical encoder create/destroy and
+				access.
+ * @frame_busy_mask:		Bitmask tracking which phys_enc we are still
+ *				busy processing current command.
+ *				Bit0 = phys_encs[0] etc.
+ * @crtc_frame_event_cb:	callback handler for frame event
+ * @crtc_frame_event_cb_data:	callback handler private data
+ * @crtc_request_flip_cb:	callback handler for requesting page-flip event
+ * @crtc_request_flip_cb_data:	callback handler private data
+ * @crtc_frame_event:		callback event
+ * @frame_done_timeout:		frame done timeout in Hz
+ * @frame_done_timer:		watchdog timer for frame done event
+ */
+struct sde_encoder_virt {
+	struct drm_encoder base;
+	spinlock_t enc_spinlock;
+	uint32_t bus_scaling_client;
+
+	uint32_t display_num_of_h_tiles;
+
+	unsigned int num_phys_encs;
+	struct sde_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
+	struct sde_encoder_phys *cur_master;
+	struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
+
+	void (*crtc_vblank_cb)(void *);
+	void *crtc_vblank_cb_data;
+
+	struct dentry *debugfs_root;
+	struct mutex enc_lock;
+	DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
+	void (*crtc_frame_event_cb)(void *, u32 event);
+	void *crtc_frame_event_cb_data;
+	void (*crtc_request_flip_cb)(void *);
+	void *crtc_request_flip_cb_data;
+	u32 crtc_frame_event;
+	atomic_t frame_done_timeout;
+	struct timer_list frame_done_timer;
+};
+
+#define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
+
+void sde_encoder_get_hw_resources(struct drm_encoder *drm_enc,
+		struct sde_encoder_hw_resources *hw_res,
+		struct drm_connector_state *conn_state)
+{
+	struct sde_encoder_virt *sde_enc = NULL;
+	int i = 0;
+
+	if (!hw_res || !drm_enc || !conn_state) {
+		SDE_ERROR("invalid argument(s), drm_enc %d, res %d, state %d\n",
+				drm_enc != 0, hw_res != 0, conn_state != 0);
+		return;
+	}
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	SDE_DEBUG_ENC(sde_enc, "\n");
+
+	/* Query resources used by phys encs, expected to be without overlap */
+	memset(hw_res, 0, sizeof(*hw_res));
+	hw_res->display_num_of_h_tiles = sde_enc->display_num_of_h_tiles;
+
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+		if (phys && phys->ops.get_hw_resources)
+			phys->ops.get_hw_resources(phys, hw_res, conn_state);
+	}
+}
+
+void sde_encoder_destroy(struct drm_encoder *drm_enc)
+{
+	struct sde_encoder_virt *sde_enc = NULL;
+	int i = 0;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	SDE_DEBUG_ENC(sde_enc, "\n");
+
+	mutex_lock(&sde_enc->enc_lock);
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+		if (phys && phys->ops.destroy) {
+			phys->ops.destroy(phys);
+			--sde_enc->num_phys_encs;
+			sde_enc->phys_encs[i] = NULL;
+		}
+	}
+
+	if (sde_enc->num_phys_encs)
+		SDE_ERROR_ENC(sde_enc, "expected 0 num_phys_encs not %d\n",
+				sde_enc->num_phys_encs);
+	sde_enc->num_phys_encs = 0;
+	mutex_unlock(&sde_enc->enc_lock);
+
+	drm_encoder_cleanup(drm_enc);
+	debugfs_remove_recursive(sde_enc->debugfs_root);
+	mutex_destroy(&sde_enc->enc_lock);
+
+	kfree(sde_enc);
+}
+
+void sde_encoder_helper_split_config(
+		struct sde_encoder_phys *phys_enc,
+		enum sde_intf interface)
+{
+	struct sde_encoder_virt *sde_enc;
+	struct split_pipe_cfg cfg = { 0 };
+	struct sde_hw_mdp *hw_mdptop;
+	enum sde_rm_topology_name topology;
+
+	if (!phys_enc || !phys_enc->hw_mdptop || !phys_enc->parent) {
+		SDE_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+		return;
+	}
+
+	sde_enc = to_sde_encoder_virt(phys_enc->parent);
+	hw_mdptop = phys_enc->hw_mdptop;
+	cfg.en = phys_enc->split_role != ENC_ROLE_SOLO;
+	cfg.mode = phys_enc->intf_mode;
+	cfg.intf = interface;
+
+	if (cfg.en && phys_enc->ops.needs_single_flush &&
+			phys_enc->ops.needs_single_flush(phys_enc))
+		cfg.split_flush_en = true;
+
+	topology = sde_connector_get_topology_name(phys_enc->connector);
+	if (topology == SDE_RM_TOPOLOGY_PPSPLIT)
+		cfg.pp_split_slave = cfg.intf;
+	else
+		cfg.pp_split_slave = INTF_MAX;
+
+	if (phys_enc->split_role != ENC_ROLE_SLAVE) {
+		/* master/solo encoder */
+		SDE_DEBUG_ENC(sde_enc, "enable %d\n", cfg.en);
+
+		if (hw_mdptop->ops.setup_split_pipe)
+			hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
+	} else {
+		/*
+		 * slave encoder
+		 * - determine split index from master index,
+		 *   assume master is first pp
+		 */
+		cfg.pp_split_index = sde_enc->hw_pp[0]->idx - PINGPONG_0;
+		SDE_DEBUG_ENC(sde_enc, "master using pp%d\n",
+				cfg.pp_split_index);
+
+		if (hw_mdptop->ops.setup_pp_split)
+			hw_mdptop->ops.setup_pp_split(hw_mdptop, &cfg);
+	}
+}
+
+static int sde_encoder_virt_atomic_check(
+		struct drm_encoder *drm_enc,
+		struct drm_crtc_state *crtc_state,
+		struct drm_connector_state *conn_state)
+{
+	struct sde_encoder_virt *sde_enc;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+	const struct drm_display_mode *mode;
+	struct drm_display_mode *adj_mode;
+	int i = 0;
+	int ret = 0;
+
+	if (!drm_enc || !crtc_state || !conn_state) {
+		SDE_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
+				drm_enc != 0, crtc_state != 0, conn_state != 0);
+		return -EINVAL;
+	}
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	SDE_DEBUG_ENC(sde_enc, "\n");
+
+	priv = drm_enc->dev->dev_private;
+	sde_kms = to_sde_kms(priv->kms);
+	mode = &crtc_state->mode;
+	adj_mode = &crtc_state->adjusted_mode;
+	SDE_EVT32(DRMID(drm_enc));
+
+	/* perform atomic check on the first physical encoder (master) */
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+		if (phys && phys->ops.atomic_check)
+			ret = phys->ops.atomic_check(phys, crtc_state,
+					conn_state);
+		else if (phys && phys->ops.mode_fixup)
+			if (!phys->ops.mode_fixup(phys, mode, adj_mode))
+				ret = -EINVAL;
+
+		if (ret) {
+			SDE_ERROR_ENC(sde_enc,
+					"mode unsupported, phys idx %d\n", i);
+			break;
+		}
+	}
+
+	/* Reserve dynamic resources now. Indicating AtomicTest phase */
+	if (!ret)
+		ret = sde_rm_reserve(&sde_kms->rm, drm_enc, crtc_state,
+				conn_state, true);
+
+	if (!ret)
+		drm_mode_set_crtcinfo(adj_mode, 0);
+
+	SDE_EVT32(DRMID(drm_enc), adj_mode->flags, adj_mode->private_flags);
+
+	return ret;
+}
+
+static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
+				      struct drm_display_mode *mode,
+				      struct drm_display_mode *adj_mode)
+{
+	struct sde_encoder_virt *sde_enc;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+	struct list_head *connector_list;
+	struct drm_connector *conn = NULL, *conn_iter;
+	struct sde_rm_hw_iter pp_iter;
+	int i = 0, ret;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	SDE_DEBUG_ENC(sde_enc, "\n");
+
+	priv = drm_enc->dev->dev_private;
+	sde_kms = to_sde_kms(priv->kms);
+	connector_list = &sde_kms->dev->mode_config.connector_list;
+
+	SDE_EVT32(DRMID(drm_enc));
+
+	list_for_each_entry(conn_iter, connector_list, head)
+		if (conn_iter->encoder == drm_enc)
+			conn = conn_iter;
+
+	if (!conn) {
+		SDE_ERROR_ENC(sde_enc, "failed to find attached connector\n");
+		return;
+	} else if (!conn->state) {
+		SDE_ERROR_ENC(sde_enc, "invalid connector state\n");
+		return;
+	}
+
+	/* Reserve dynamic resources now. Indicating non-AtomicTest phase */
+	ret = sde_rm_reserve(&sde_kms->rm, drm_enc, drm_enc->crtc->state,
+			conn->state, false);
+	if (ret) {
+		SDE_ERROR_ENC(sde_enc,
+				"failed to reserve hw resources, %d\n", ret);
+		return;
+	}
+
+	sde_rm_init_hw_iter(&pp_iter, drm_enc->base.id, SDE_HW_BLK_PINGPONG);
+	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+		sde_enc->hw_pp[i] = NULL;
+		if (!sde_rm_get_hw(&sde_kms->rm, &pp_iter))
+			break;
+		sde_enc->hw_pp[i] = (struct sde_hw_pingpong *) pp_iter.hw;
+	}
+
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+		if (phys) {
+			if (!sde_enc->hw_pp[i]) {
+				SDE_ERROR_ENC(sde_enc,
+				    "invalid pingpong block for the encoder\n");
+				return;
+			}
+			phys->hw_pp = sde_enc->hw_pp[i];
+			phys->connector = conn->state->connector;
+			if (phys->ops.mode_set)
+				phys->ops.mode_set(phys, mode, adj_mode);
+		}
+	}
+}
+
+static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
+{
+	struct sde_encoder_virt *sde_enc = NULL;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+	int i = 0;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	} else if (!drm_enc->dev) {
+		SDE_ERROR("invalid dev\n");
+		return;
+	} else if (!drm_enc->dev->dev_private) {
+		SDE_ERROR("invalid dev_private\n");
+		return;
+	}
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	priv = drm_enc->dev->dev_private;
+	sde_kms = to_sde_kms(priv->kms);
+
+	SDE_DEBUG_ENC(sde_enc, "\n");
+	SDE_EVT32(DRMID(drm_enc));
+
+	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+
+	sde_enc->cur_master = NULL;
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+		if (phys) {
+			atomic_set(&phys->vsync_cnt, 0);
+			atomic_set(&phys->underrun_cnt, 0);
+
+			if (phys->ops.is_master && phys->ops.is_master(phys)) {
+				SDE_DEBUG_ENC(sde_enc,
+						"master is now idx %d\n", i);
+				sde_enc->cur_master = phys;
+			} else if (phys->ops.enable) {
+				phys->ops.enable(phys);
+			}
+		}
+	}
+
+	if (!sde_enc->cur_master)
+		SDE_ERROR("virt encoder has no master! num_phys %d\n", i);
+	else if (sde_enc->cur_master->ops.enable)
+		sde_enc->cur_master->ops.enable(sde_enc->cur_master);
+}
+
+static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
+{
+	struct sde_encoder_virt *sde_enc = NULL;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+	int i = 0;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	} else if (!drm_enc->dev) {
+		SDE_ERROR("invalid dev\n");
+		return;
+	} else if (!drm_enc->dev->dev_private) {
+		SDE_ERROR("invalid dev_private\n");
+		return;
+	}
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	SDE_DEBUG_ENC(sde_enc, "\n");
+
+	priv = drm_enc->dev->dev_private;
+	sde_kms = to_sde_kms(priv->kms);
+
+	SDE_EVT32(DRMID(drm_enc));
+
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+		if (phys) {
+			if (phys->ops.disable && !phys->ops.is_master(phys))
+				phys->ops.disable(phys);
+			phys->connector = NULL;
+			atomic_set(&phys->vsync_cnt, 0);
+			atomic_set(&phys->underrun_cnt, 0);
+		}
+	}
+
+	/* after phys waits for frame-done, should be no more frames pending */
+	if (atomic_xchg(&sde_enc->frame_done_timeout, 0)) {
+		SDE_ERROR("enc%d timeout pending\n", drm_enc->base.id);
+		del_timer_sync(&sde_enc->frame_done_timer);
+	}
+
+	if (sde_enc->cur_master && sde_enc->cur_master->ops.disable)
+		sde_enc->cur_master->ops.disable(sde_enc->cur_master);
+
+	sde_enc->cur_master = NULL;
+	SDE_DEBUG_ENC(sde_enc, "cleared master\n");
+
+	sde_rm_release(&sde_kms->rm, drm_enc);
+
+	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+}
+
+static const struct drm_encoder_helper_funcs sde_encoder_helper_funcs = {
+	.mode_set = sde_encoder_virt_mode_set,
+	.disable = sde_encoder_virt_disable,
+	.enable = sde_encoder_virt_enable,
+	.atomic_check = sde_encoder_virt_atomic_check,
+};
+
+static const struct drm_encoder_funcs sde_encoder_funcs = {
+		.destroy = sde_encoder_destroy,
+};
+
+static enum sde_intf sde_encoder_get_intf(struct sde_mdss_cfg *catalog,
+		enum sde_intf_type type, u32 controller_id)
+{
+	int i = 0;
+
+	for (i = 0; i < catalog->intf_count; i++) {
+		if (catalog->intf[i].type == type
+		    && catalog->intf[i].controller_id == controller_id) {
+			return catalog->intf[i].id;
+		}
+	}
+
+	return INTF_MAX;
+}
+
+static enum sde_wb sde_encoder_get_wb(struct sde_mdss_cfg *catalog,
+		enum sde_intf_type type, u32 controller_id)
+{
+	if (controller_id < catalog->wb_count)
+		return catalog->wb[controller_id].id;
+
+	return WB_MAX;
+}
+
+static void sde_encoder_vblank_callback(struct drm_encoder *drm_enc,
+		struct sde_encoder_phys *phy_enc)
+{
+	struct sde_encoder_virt *sde_enc = NULL;
+	unsigned long lock_flags;
+
+	if (!drm_enc || !phy_enc)
+		return;
+
+	SDE_ATRACE_BEGIN("encoder_vblank_callback");
+	sde_enc = to_sde_encoder_virt(drm_enc);
+
+	spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
+	if (sde_enc->crtc_vblank_cb)
+		sde_enc->crtc_vblank_cb(sde_enc->crtc_vblank_cb_data);
+	spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
+
+	atomic_inc(&phy_enc->vsync_cnt);
+	SDE_ATRACE_END("encoder_vblank_callback");
+}
+
+static void sde_encoder_underrun_callback(struct drm_encoder *drm_enc,
+		struct sde_encoder_phys *phy_enc)
+{
+	if (!phy_enc)
+		return;
+
+	SDE_ATRACE_BEGIN("encoder_underrun_callback");
+	atomic_inc(&phy_enc->underrun_cnt);
+	SDE_EVT32(DRMID(drm_enc), atomic_read(&phy_enc->underrun_cnt));
+
+	trace_sde_encoder_underrun(DRMID(drm_enc),
+		atomic_read(&phy_enc->underrun_cnt));
+	SDE_DBG_CTRL("stop_ftrace");
+	SDE_DBG_CTRL("panic_underrun");
+
+	SDE_ATRACE_END("encoder_underrun_callback");
+}
+
+void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
+		void (*vbl_cb)(void *), void *vbl_data)
+{
+	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
+	unsigned long lock_flags;
+	bool enable;
+	int i;
+
+	enable = vbl_cb ? true : false;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+	SDE_DEBUG_ENC(sde_enc, "\n");
+	SDE_EVT32(DRMID(drm_enc), enable);
+
+	spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
+	sde_enc->crtc_vblank_cb = vbl_cb;
+	sde_enc->crtc_vblank_cb_data = vbl_data;
+	spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
+
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+		if (phys && phys->ops.control_vblank_irq)
+			phys->ops.control_vblank_irq(phys, enable);
+	}
+}
+
+void sde_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
+		void (*frame_event_cb)(void *, u32 event),
+		void *frame_event_cb_data)
+{
+	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
+	unsigned long lock_flags;
+	bool enable;
+
+	enable = frame_event_cb ? true : false;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+	SDE_DEBUG_ENC(sde_enc, "\n");
+	SDE_EVT32(DRMID(drm_enc), enable, 0);
+
+	spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
+	sde_enc->crtc_frame_event_cb = frame_event_cb;
+	sde_enc->crtc_frame_event_cb_data = frame_event_cb_data;
+	spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
+}
+
+void sde_encoder_register_request_flip_callback(struct drm_encoder *drm_enc,
+		void (*request_flip_cb)(void *),
+		void *request_flip_cb_data)
+{
+	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
+	unsigned long lock_flags;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+
+	spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
+	sde_enc->crtc_request_flip_cb = request_flip_cb;
+	sde_enc->crtc_request_flip_cb_data = request_flip_cb_data;
+	spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
+}
+
+static void sde_encoder_frame_done_callback(
+		struct drm_encoder *drm_enc,
+		struct sde_encoder_phys *ready_phys, u32 event)
+{
+	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
+	unsigned int i;
+
+	/* One of the physical encoders has become idle */
+	for (i = 0; i < sde_enc->num_phys_encs; i++)
+		if (sde_enc->phys_encs[i] == ready_phys) {
+			clear_bit(i, sde_enc->frame_busy_mask);
+			sde_enc->crtc_frame_event |= event;
+			SDE_EVT32(DRMID(drm_enc), i,
+					sde_enc->frame_busy_mask[0]);
+		}
+
+	if (!sde_enc->frame_busy_mask[0]) {
+		atomic_set(&sde_enc->frame_done_timeout, 0);
+		del_timer(&sde_enc->frame_done_timer);
+
+		if (sde_enc->crtc_frame_event_cb)
+			sde_enc->crtc_frame_event_cb(
+					sde_enc->crtc_frame_event_cb_data,
+					sde_enc->crtc_frame_event);
+	}
+}
+
+/**
+ * _sde_encoder_trigger_flush - trigger flush for a physical encoder
+ * drm_enc: Pointer to drm encoder structure
+ * phys: Pointer to physical encoder structure
+ * extra_flush_bits: Additional bit mask to include in flush trigger
+ */
+static inline void _sde_encoder_trigger_flush(struct drm_encoder *drm_enc,
+		struct sde_encoder_phys *phys, uint32_t extra_flush_bits)
+{
+	struct sde_hw_ctl *ctl;
+	int pending_kickoff_cnt;
+
+	if (!drm_enc || !phys) {
+		SDE_ERROR("invalid argument(s), drm_enc %d, phys_enc %d\n",
+				drm_enc != 0, phys != 0);
+		return;
+	}
+
+	ctl = phys->hw_ctl;
+	if (!ctl || !ctl->ops.trigger_flush) {
+		SDE_ERROR("missing trigger cb\n");
+		return;
+	}
+
+	pending_kickoff_cnt = sde_encoder_phys_inc_pending(phys);
+	SDE_EVT32(DRMID(&to_sde_encoder_virt(drm_enc)->base),
+			phys->intf_idx, pending_kickoff_cnt);
+
+	if (extra_flush_bits && ctl->ops.update_pending_flush)
+		ctl->ops.update_pending_flush(ctl, extra_flush_bits);
+
+	ctl->ops.trigger_flush(ctl);
+	SDE_EVT32(DRMID(drm_enc), ctl->idx);
+}
+
+/**
+ * _sde_encoder_trigger_start - trigger start for a physical encoder
+ * phys: Pointer to physical encoder structure
+ */
+static inline void _sde_encoder_trigger_start(struct sde_encoder_phys *phys)
+{
+	if (!phys) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+
+	if (phys->ops.trigger_start && phys->enable_state != SDE_ENC_DISABLED)
+		phys->ops.trigger_start(phys);
+}
+
+void sde_encoder_helper_trigger_start(struct sde_encoder_phys *phys_enc)
+{
+	struct sde_hw_ctl *ctl;
+	int ctl_idx = -1;
+
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+
+	ctl = phys_enc->hw_ctl;
+	if (ctl && ctl->ops.trigger_start) {
+		ctl->ops.trigger_start(ctl);
+		ctl_idx = ctl->idx;
+	}
+
+	if (phys_enc && phys_enc->parent)
+		SDE_EVT32(DRMID(phys_enc->parent), ctl_idx);
+}
+
+int sde_encoder_helper_wait_event_timeout(
+		int32_t drm_id,
+		int32_t hw_id,
+		wait_queue_head_t *wq,
+		atomic_t *cnt,
+		s64 timeout_ms)
+{
+	int rc = 0;
+	s64 expected_time = ktime_to_ms(ktime_get()) + timeout_ms;
+	s64 jiffies = msecs_to_jiffies(timeout_ms);
+	s64 time;
+
+	do {
+		rc = wait_event_timeout(*wq, atomic_read(cnt) == 0, jiffies);
+		time = ktime_to_ms(ktime_get());
+
+		SDE_EVT32(drm_id, hw_id, rc, time, expected_time,
+				atomic_read(cnt));
+	/* If we timed out, counter is valid and time is less, wait again */
+	} while (atomic_read(cnt) && (rc == 0) && (time < expected_time));
+
+	return rc;
+}
+
+/**
+ * _sde_encoder_kickoff_phys - handle physical encoder kickoff
+ *	Iterate through the physical encoders and perform consolidated flush
+ *	and/or control start triggering as needed. This is done in the virtual
+ *	encoder rather than the individual physical ones in order to handle
+ *	use cases that require visibility into multiple physical encoders at
+ *	a time.
+ * sde_enc: Pointer to virtual encoder structure
+ */
+static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc)
+{
+	struct sde_hw_ctl *ctl;
+	uint32_t i, pending_flush;
+	unsigned long lock_flags;
+
+	if (!sde_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+
+	pending_flush = 0x0;
+	sde_enc->crtc_frame_event = 0;
+
+	/* update pending counts and trigger kickoff ctl flush atomically */
+	spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
+
+	/* don't perform flush/start operations for slave encoders */
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+		if (!phys || phys->enable_state == SDE_ENC_DISABLED)
+			continue;
+
+		ctl = phys->hw_ctl;
+		if (!ctl)
+			continue;
+
+		set_bit(i, sde_enc->frame_busy_mask);
+
+		if (!phys->ops.needs_single_flush ||
+				!phys->ops.needs_single_flush(phys))
+			_sde_encoder_trigger_flush(&sde_enc->base, phys, 0x0);
+		else if (ctl->ops.get_pending_flush)
+			pending_flush |= ctl->ops.get_pending_flush(ctl);
+	}
+
+	/* for split flush, combine pending flush masks and send to master */
+	if (pending_flush && sde_enc->cur_master) {
+		_sde_encoder_trigger_flush(
+				&sde_enc->base,
+				sde_enc->cur_master,
+				pending_flush);
+	}
+
+	/* HW flush has happened, request a flip complete event now */
+	if (sde_enc->crtc_request_flip_cb)
+		sde_enc->crtc_request_flip_cb(
+		sde_enc->crtc_request_flip_cb_data);
+
+	_sde_encoder_trigger_start(sde_enc->cur_master);
+
+	spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
+}
+
+void sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
+{
+	struct sde_encoder_virt *sde_enc;
+	struct sde_encoder_phys *phys;
+	struct drm_connector *conn_mas = NULL;
+	unsigned int i;
+	enum sde_csc_type conn_csc;
+	struct drm_display_mode *mode;
+	struct sde_hw_cdm *hw_cdm;
+	int mode_is_yuv = 0;
+	int rc;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+	sde_enc = to_sde_encoder_virt(drm_enc);
+
+	SDE_DEBUG_ENC(sde_enc, "\n");
+	SDE_EVT32(DRMID(drm_enc));
+
+	/* prepare for next kickoff, may include waiting on previous kickoff */
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		phys = sde_enc->phys_encs[i];
+		if (phys && phys->ops.prepare_for_kickoff)
+			phys->ops.prepare_for_kickoff(phys);
+	}
+
+	if (sde_enc->cur_master && sde_enc->cur_master->connector) {
+		conn_mas = sde_enc->cur_master->connector;
+		rc = sde_connector_pre_kickoff(conn_mas);
+		if (rc)
+			SDE_ERROR_ENC(sde_enc,
+				"kickoff conn%d failed rc %d\n",
+				conn_mas->base.id,
+				rc);
+
+		for (i = 0; i < sde_enc->num_phys_encs; i++) {
+			phys = sde_enc->phys_encs[i];
+			if (phys) {
+				mode = &phys->cached_mode;
+				mode_is_yuv = (mode->private_flags &
+					MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420);
+			}
+			/**
+			 * Check the CSC matrix type to which the
+			 * CDM CSC matrix should be updated to based
+			 * on the connector HDR state
+			 */
+			conn_csc = sde_connector_get_csc_type(conn_mas);
+			if (phys && mode_is_yuv) {
+				if (phys->enc_cdm_csc != conn_csc) {
+					hw_cdm = phys->hw_cdm;
+					rc = hw_cdm->ops.setup_csc_data(hw_cdm,
+					&sde_csc_10bit_convert[conn_csc]);
+
+					if (rc)
+						SDE_ERROR_ENC(sde_enc,
+							"CSC setup failed rc %d\n",
+							rc);
+					SDE_DEBUG_ENC(sde_enc,
+						"updating CSC %d to %d\n",
+						phys->enc_cdm_csc,
+						conn_csc);
+					phys->enc_cdm_csc = conn_csc;
+
+				}
+			}
+		}
+	}
+}
+
+void sde_encoder_kickoff(struct drm_encoder *drm_enc)
+{
+	struct sde_encoder_virt *sde_enc;
+	struct sde_encoder_phys *phys;
+	unsigned int i;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+	SDE_ATRACE_BEGIN("encoder_kickoff");
+	sde_enc = to_sde_encoder_virt(drm_enc);
+
+	SDE_DEBUG_ENC(sde_enc, "\n");
+
+	atomic_set(&sde_enc->frame_done_timeout,
+			SDE_ENCODER_FRAME_DONE_TIMEOUT * 1000 /
+			drm_enc->crtc->state->adjusted_mode.vrefresh);
+	mod_timer(&sde_enc->frame_done_timer, jiffies +
+		((atomic_read(&sde_enc->frame_done_timeout) * HZ) / 1000));
+
+	/* All phys encs are ready to go, trigger the kickoff */
+	_sde_encoder_kickoff_phys(sde_enc);
+
+	/* allow phys encs to handle any post-kickoff business */
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		phys = sde_enc->phys_encs[i];
+		if (phys && phys->ops.handle_post_kickoff)
+			phys->ops.handle_post_kickoff(phys);
+	}
+	SDE_ATRACE_END("encoder_kickoff");
+}
+
+static int _sde_encoder_status_show(struct seq_file *s, void *data)
+{
+	struct sde_encoder_virt *sde_enc;
+	int i;
+
+	if (!s || !s->private)
+		return -EINVAL;
+
+	sde_enc = s->private;
+
+	mutex_lock(&sde_enc->enc_lock);
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+		if (!phys)
+			continue;
+
+		seq_printf(s, "intf:%d    vsync:%8d     underrun:%8d    ",
+				phys->intf_idx - INTF_0,
+				atomic_read(&phys->vsync_cnt),
+				atomic_read(&phys->underrun_cnt));
+
+		switch (phys->intf_mode) {
+		case INTF_MODE_VIDEO:
+			seq_puts(s, "mode: video\n");
+			break;
+		case INTF_MODE_CMD:
+			seq_puts(s, "mode: command\n");
+			break;
+		case INTF_MODE_WB_BLOCK:
+			seq_puts(s, "mode: wb block\n");
+			break;
+		case INTF_MODE_WB_LINE:
+			seq_puts(s, "mode: wb line\n");
+			break;
+		default:
+			seq_puts(s, "mode: ???\n");
+			break;
+		}
+	}
+	mutex_unlock(&sde_enc->enc_lock);
+
+	return 0;
+}
+
+static int _sde_encoder_debugfs_status_open(struct inode *inode,
+		struct file *file)
+{
+	return single_open(file, _sde_encoder_status_show, inode->i_private);
+}
+
+static void _sde_set_misr_params(struct sde_encoder_phys *phys, u32 enable,
+					u32 frame_count)
+{
+	int j;
+
+	if (!phys->misr_map)
+		return;
+
+	phys->misr_map->enable = enable;
+
+	if (frame_count <= SDE_CRC_BATCH_SIZE)
+		phys->misr_map->frame_count = frame_count;
+	else if (frame_count <= 0)
+		phys->misr_map->frame_count = 0;
+	else
+		phys->misr_map->frame_count = SDE_CRC_BATCH_SIZE;
+
+	if (!enable) {
+		phys->misr_map->last_idx = 0;
+		phys->misr_map->frame_count = 0;
+		for (j = 0; j < SDE_CRC_BATCH_SIZE; j++)
+			phys->misr_map->crc_value[j] = 0;
+	}
+}
+
+static ssize_t _sde_encoder_misr_set(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct sde_encoder_virt *sde_enc;
+	struct drm_encoder *drm_enc;
+	int i = 0;
+	char buf[MISR_BUFF_SIZE + 1];
+	size_t buff_copy;
+	u32 enable, frame_count;
+
+	drm_enc = file->private_data;
+	sde_enc = to_sde_encoder_virt(drm_enc);
+
+	buff_copy = min_t(size_t, MISR_BUFF_SIZE, count);
+	if (copy_from_user(buf, user_buf, buff_copy))
+		return -EINVAL;
+
+	buf[buff_copy] = 0; /* end of string */
+
+	if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
+		return -EFAULT;
+
+	mutex_lock(&sde_enc->enc_lock);
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+		if (!phys || !phys->misr_map || !phys->ops.setup_misr)
+			continue;
+
+		_sde_set_misr_params(phys, enable, frame_count);
+		phys->ops.setup_misr(phys, phys->misr_map);
+	}
+	mutex_unlock(&sde_enc->enc_lock);
+	return count;
+}
+
+static ssize_t _sde_encoder_misr_read(
+		struct file *file,
+		char __user *buff, size_t count, loff_t *ppos)
+{
+	struct sde_encoder_virt *sde_enc;
+	struct drm_encoder *drm_enc;
+	int i = 0, j = 0, len = 0;
+	char buf[512] = {'\0'};
+
+	if (*ppos)
+		return 0;
+
+	drm_enc = file->private_data;
+	sde_enc = to_sde_encoder_virt(drm_enc);
+
+	mutex_lock(&sde_enc->enc_lock);
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+		struct sde_misr_params *misr_map;
+
+		if (!phys || !phys->misr_map)
+			continue;
+
+		misr_map = phys->misr_map;
+
+		len += snprintf(buf+len, sizeof(buf), "INTF%d\n", i);
+		for (j = 0; j < SDE_CRC_BATCH_SIZE; j++)
+			len += snprintf(buf+len, sizeof(buf), "%x\n",
+						misr_map->crc_value[j]);
+	}
+
+	if (len < 0 || len >= sizeof(buf))
+		return 0;
+
+	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+		return -EFAULT;
+
+	*ppos += len;   /* increase offset */
+	mutex_unlock(&sde_enc->enc_lock);
+
+	return len;
+}
+
+static void _sde_encoder_init_debugfs(struct drm_encoder *drm_enc,
+	struct sde_encoder_virt *sde_enc, struct sde_kms *sde_kms)
+{
+	static const struct file_operations debugfs_status_fops = {
+		.open =		_sde_encoder_debugfs_status_open,
+		.read =		seq_read,
+		.llseek =	seq_lseek,
+		.release =	single_release,
+	};
+
+	static const struct file_operations debugfs_misr_fops = {
+		.open = simple_open,
+		.read = _sde_encoder_misr_read,
+		.write = _sde_encoder_misr_set,
+	};
+
+	char name[SDE_NAME_SIZE];
+
+	if (!drm_enc || !sde_enc || !sde_kms) {
+		SDE_ERROR("invalid encoder or kms\n");
+		return;
+	}
+
+	snprintf(name, SDE_NAME_SIZE, "encoder%u", drm_enc->base.id);
+
+	/* create overall sub-directory for the encoder */
+	sde_enc->debugfs_root = debugfs_create_dir(name,
+					sde_debugfs_get_root(sde_kms));
+	if (sde_enc->debugfs_root) {
+		/* don't error check these */
+		debugfs_create_file("status", S_IRUGO | S_IWUSR,
+			sde_enc->debugfs_root, sde_enc, &debugfs_status_fops);
+
+		debugfs_create_file("misr_data", S_IRUGO | S_IWUSR,
+			sde_enc->debugfs_root, drm_enc, &debugfs_misr_fops);
+
+	}
+}
+
+static int sde_encoder_virt_add_phys_encs(
+		u32 display_caps,
+		struct sde_encoder_virt *sde_enc,
+		struct sde_enc_phys_init_params *params)
+{
+	struct sde_encoder_phys *enc = NULL;
+
+	SDE_DEBUG_ENC(sde_enc, "\n");
+
+	/*
+	 * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
+	 * in this function, check up-front.
+	 */
+	if (sde_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
+			ARRAY_SIZE(sde_enc->phys_encs)) {
+		SDE_ERROR_ENC(sde_enc, "too many physical encoders %d\n",
+			  sde_enc->num_phys_encs);
+		return -EINVAL;
+	}
+
+	if (display_caps & MSM_DISPLAY_CAP_VID_MODE) {
+		enc = sde_encoder_phys_vid_init(params);
+
+		if (IS_ERR_OR_NULL(enc)) {
+			SDE_ERROR_ENC(sde_enc, "failed to init vid enc: %ld\n",
+				PTR_ERR(enc));
+			return enc == 0 ? -EINVAL : PTR_ERR(enc);
+		}
+
+		sde_enc->phys_encs[sde_enc->num_phys_encs] = enc;
+		++sde_enc->num_phys_encs;
+	}
+
+	if (display_caps & MSM_DISPLAY_CAP_CMD_MODE) {
+		enc = sde_encoder_phys_cmd_init(params);
+
+		if (IS_ERR_OR_NULL(enc)) {
+			SDE_ERROR_ENC(sde_enc, "failed to init cmd enc: %ld\n",
+				PTR_ERR(enc));
+			return enc == 0 ? -EINVAL : PTR_ERR(enc);
+		}
+
+		sde_enc->phys_encs[sde_enc->num_phys_encs] = enc;
+		++sde_enc->num_phys_encs;
+	}
+
+	return 0;
+}
+
+static int sde_encoder_virt_add_phys_enc_wb(struct sde_encoder_virt *sde_enc,
+		struct sde_enc_phys_init_params *params)
+{
+	struct sde_encoder_phys *enc = NULL;
+
+	if (!sde_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+
+	SDE_DEBUG_ENC(sde_enc, "\n");
+
+	if (sde_enc->num_phys_encs + 1 >= ARRAY_SIZE(sde_enc->phys_encs)) {
+		SDE_ERROR_ENC(sde_enc, "too many physical encoders %d\n",
+			  sde_enc->num_phys_encs);
+		return -EINVAL;
+	}
+
+	enc = sde_encoder_phys_wb_init(params);
+
+	if (IS_ERR_OR_NULL(enc)) {
+		SDE_ERROR_ENC(sde_enc, "failed to init wb enc: %ld\n",
+			PTR_ERR(enc));
+		return enc == 0 ? -EINVAL : PTR_ERR(enc);
+	}
+
+	sde_enc->phys_encs[sde_enc->num_phys_encs] = enc;
+	++sde_enc->num_phys_encs;
+
+	return 0;
+}
+
+static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc,
+				 struct sde_kms *sde_kms,
+				 struct msm_display_info *disp_info,
+				 int *drm_enc_mode)
+{
+	int ret = 0;
+	int i = 0;
+	enum sde_intf_type intf_type;
+	struct sde_encoder_virt_ops parent_ops = {
+		sde_encoder_vblank_callback,
+		sde_encoder_underrun_callback,
+		sde_encoder_frame_done_callback,
+	};
+	struct sde_enc_phys_init_params phys_params;
+
+	if (!sde_enc || !sde_kms) {
+		SDE_ERROR("invalid arg(s), enc %d kms %d\n",
+				sde_enc != 0, sde_kms != 0);
+		return -EINVAL;
+	}
+
+	memset(&phys_params, 0, sizeof(phys_params));
+	phys_params.sde_kms = sde_kms;
+	phys_params.parent = &sde_enc->base;
+	phys_params.parent_ops = parent_ops;
+	phys_params.enc_spinlock = &sde_enc->enc_spinlock;
+
+	SDE_DEBUG("\n");
+
+	if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI) {
+		*drm_enc_mode = DRM_MODE_ENCODER_DSI;
+		intf_type = INTF_DSI;
+	} else if (disp_info->intf_type == DRM_MODE_CONNECTOR_HDMIA) {
+		*drm_enc_mode = DRM_MODE_ENCODER_TMDS;
+		intf_type = INTF_HDMI;
+	} else if (disp_info->intf_type == DRM_MODE_CONNECTOR_VIRTUAL) {
+		*drm_enc_mode = DRM_MODE_ENCODER_VIRTUAL;
+		intf_type = INTF_WB;
+	} else {
+		SDE_ERROR_ENC(sde_enc, "unsupported display interface type\n");
+		return -EINVAL;
+	}
+
+	WARN_ON(disp_info->num_of_h_tiles < 1);
+
+	sde_enc->display_num_of_h_tiles = disp_info->num_of_h_tiles;
+
+	SDE_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
+
+	mutex_lock(&sde_enc->enc_lock);
+	for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
+		/*
+		 * Left-most tile is at index 0, content is controller id
+		 * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
+		 * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
+		 */
+		u32 controller_id = disp_info->h_tile_instance[i];
+
+		if (disp_info->num_of_h_tiles > 1) {
+			if (i == 0)
+				phys_params.split_role = ENC_ROLE_MASTER;
+			else
+				phys_params.split_role = ENC_ROLE_SLAVE;
+		} else {
+			phys_params.split_role = ENC_ROLE_SOLO;
+		}
+
+		SDE_DEBUG("h_tile_instance %d = %d, split_role %d\n",
+				i, controller_id, phys_params.split_role);
+
+		if (intf_type == INTF_WB) {
+			phys_params.intf_idx = INTF_MAX;
+			phys_params.wb_idx = sde_encoder_get_wb(
+					sde_kms->catalog,
+					intf_type, controller_id);
+			if (phys_params.wb_idx == WB_MAX) {
+				SDE_ERROR_ENC(sde_enc,
+					"could not get wb: type %d, id %d\n",
+					intf_type, controller_id);
+				ret = -EINVAL;
+			}
+		} else {
+			phys_params.wb_idx = WB_MAX;
+			phys_params.intf_idx = sde_encoder_get_intf(
+					sde_kms->catalog, intf_type,
+					controller_id);
+			if (phys_params.intf_idx == INTF_MAX) {
+				SDE_ERROR_ENC(sde_enc,
+					"could not get wb: type %d, id %d\n",
+					intf_type, controller_id);
+				ret = -EINVAL;
+			}
+		}
+
+		if (!ret) {
+			if (intf_type == INTF_WB)
+				ret = sde_encoder_virt_add_phys_enc_wb(sde_enc,
+						&phys_params);
+			else
+				ret = sde_encoder_virt_add_phys_encs(
+						disp_info->capabilities,
+						sde_enc,
+						&phys_params);
+			if (ret)
+				SDE_ERROR_ENC(sde_enc,
+						"failed to add phys encs\n");
+		}
+	}
+	mutex_unlock(&sde_enc->enc_lock);
+
+
+	return ret;
+}
+
+static void sde_encoder_frame_done_timeout(unsigned long data)
+{
+	struct drm_encoder *drm_enc = (struct drm_encoder *) data;
+	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
+	struct msm_drm_private *priv;
+
+	if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+		SDE_ERROR("invalid parameters\n");
+		return;
+	}
+	priv = drm_enc->dev->dev_private;
+
+	if (!sde_enc->frame_busy_mask[0] || !sde_enc->crtc_frame_event_cb) {
+		SDE_DEBUG("enc%d invalid timeout\n", drm_enc->base.id);
+		SDE_EVT32(DRMID(drm_enc),
+				sde_enc->frame_busy_mask[0], 0);
+		return;
+	} else if (!atomic_xchg(&sde_enc->frame_done_timeout, 0)) {
+		SDE_ERROR("enc%d invalid timeout\n", drm_enc->base.id);
+		SDE_EVT32(DRMID(drm_enc), 0, 1);
+		return;
+	}
+
+	SDE_EVT32(DRMID(drm_enc), 0, 2);
+	sde_enc->crtc_frame_event_cb(sde_enc->crtc_frame_event_cb_data,
+			SDE_ENCODER_FRAME_EVENT_ERROR);
+}
+
+struct drm_encoder *sde_encoder_init(
+		struct drm_device *dev,
+		struct msm_display_info *disp_info)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	struct sde_kms *sde_kms = to_sde_kms(priv->kms);
+	struct drm_encoder *drm_enc = NULL;
+	struct sde_encoder_virt *sde_enc = NULL;
+	int drm_enc_mode = DRM_MODE_ENCODER_NONE;
+	int ret = 0;
+
+	sde_enc = kzalloc(sizeof(*sde_enc), GFP_KERNEL);
+	if (!sde_enc) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	mutex_init(&sde_enc->enc_lock);
+	ret = sde_encoder_setup_display(sde_enc, sde_kms, disp_info,
+			&drm_enc_mode);
+	if (ret)
+		goto fail;
+
+	sde_enc->cur_master = NULL;
+	spin_lock_init(&sde_enc->enc_spinlock);
+	drm_enc = &sde_enc->base;
+	drm_encoder_init(dev, drm_enc, &sde_encoder_funcs, drm_enc_mode);
+	drm_encoder_helper_add(drm_enc, &sde_encoder_helper_funcs);
+
+	atomic_set(&sde_enc->frame_done_timeout, 0);
+	setup_timer(&sde_enc->frame_done_timer, sde_encoder_frame_done_timeout,
+			(unsigned long) sde_enc);
+
+	_sde_encoder_init_debugfs(drm_enc, sde_enc, sde_kms);
+
+	SDE_DEBUG_ENC(sde_enc, "created\n");
+
+	return drm_enc;
+
+fail:
+	SDE_ERROR("failed to create encoder\n");
+	if (drm_enc)
+		sde_encoder_destroy(drm_enc);
+
+	return ERR_PTR(ret);
+}
+
+int sde_encoder_wait_for_commit_done(struct drm_encoder *drm_enc)
+{
+	struct sde_encoder_virt *sde_enc = NULL;
+	int i, ret = 0;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	SDE_DEBUG_ENC(sde_enc, "\n");
+
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+		if (phys && phys->ops.wait_for_commit_done) {
+			ret = phys->ops.wait_for_commit_done(phys);
+			if (ret)
+				return ret;
+		}
+
+		if (phys && phys->ops.collect_misr)
+			if (phys->misr_map && phys->misr_map->enable)
+				phys->ops.collect_misr(phys, phys->misr_map);
+	}
+
+	return ret;
+}
+
+enum sde_intf_mode sde_encoder_get_intf_mode(struct drm_encoder *encoder)
+{
+	struct sde_encoder_virt *sde_enc = NULL;
+	int i;
+
+	if (!encoder) {
+		SDE_ERROR("invalid encoder\n");
+		return INTF_MODE_NONE;
+	}
+	sde_enc = to_sde_encoder_virt(encoder);
+
+	if (sde_enc->cur_master)
+		return sde_enc->cur_master->intf_mode;
+
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+		if (phys)
+			return phys->intf_mode;
+	}
+
+	return INTF_MODE_NONE;
+}
+
+/**
+ * sde_encoder_phys_setup_cdm - setup chroma down block
+ * @phys_enc:	Pointer to physical encoder
+ * @output_type: HDMI/WB
+ * @format:	Output format
+ * @roi: Output size
+ */
+void sde_encoder_phys_setup_cdm(struct sde_encoder_phys *phys_enc,
+		const struct sde_format *format, u32 output_type,
+		struct sde_rect *roi)
+{
+	struct drm_encoder *encoder = phys_enc->parent;
+	struct sde_encoder_virt *sde_enc = NULL;
+	struct sde_hw_cdm *hw_cdm = phys_enc->hw_cdm;
+	struct sde_hw_cdm_cfg *cdm_cfg = &phys_enc->cdm_cfg;
+	struct drm_connector *connector = phys_enc->connector;
+	int ret;
+	u32 csc_type;
+
+	if (!encoder) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+	sde_enc = to_sde_encoder_virt(encoder);
+
+	if (!SDE_FORMAT_IS_YUV(format)) {
+		SDE_DEBUG_ENC(sde_enc, "[cdm_disable fmt:%x]\n",
+				format->base.pixel_format);
+
+		if (hw_cdm && hw_cdm->ops.disable)
+			hw_cdm->ops.disable(hw_cdm);
+
+		return;
+	}
+
+	memset(cdm_cfg, 0, sizeof(struct sde_hw_cdm_cfg));
+
+	cdm_cfg->output_width = roi->w;
+	cdm_cfg->output_height = roi->h;
+	cdm_cfg->output_fmt = format;
+	cdm_cfg->output_type = output_type;
+	cdm_cfg->output_bit_depth = SDE_FORMAT_IS_DX(format) ?
+		CDM_CDWN_OUTPUT_10BIT : CDM_CDWN_OUTPUT_8BIT;
+
+	/* enable 10 bit logic */
+	switch (cdm_cfg->output_fmt->chroma_sample) {
+	case SDE_CHROMA_RGB:
+		cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
+		cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
+		break;
+	case SDE_CHROMA_H2V1:
+		cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
+		cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
+		break;
+	case SDE_CHROMA_420:
+		cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
+		cdm_cfg->v_cdwn_type = CDM_CDWN_OFFSITE;
+		break;
+	case SDE_CHROMA_H1V2:
+	default:
+		SDE_ERROR("unsupported chroma sampling type\n");
+		cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
+		cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
+		break;
+	}
+
+	SDE_DEBUG_ENC(sde_enc, "[cdm_enable:%d,%d,%X,%d,%d,%d,%d]\n",
+			cdm_cfg->output_width,
+			cdm_cfg->output_height,
+			cdm_cfg->output_fmt->base.pixel_format,
+			cdm_cfg->output_type,
+			cdm_cfg->output_bit_depth,
+			cdm_cfg->h_cdwn_type,
+			cdm_cfg->v_cdwn_type);
+
+	csc_type = sde_connector_get_csc_type(connector);
+
+	if (hw_cdm && hw_cdm->ops.setup_csc_data) {
+		ret = hw_cdm->ops.setup_csc_data(hw_cdm,
+				&sde_csc_10bit_convert[csc_type]);
+		if (ret < 0) {
+			SDE_ERROR("failed to setup CSC %d\n", ret);
+			return;
+		}
+	}
+
+	/* Cache the CSC default matrix type */
+	phys_enc->enc_cdm_csc = csc_type;
+
+	if (hw_cdm && hw_cdm->ops.setup_cdwn) {
+		ret = hw_cdm->ops.setup_cdwn(hw_cdm, cdm_cfg);
+		if (ret < 0) {
+			SDE_ERROR("failed to setup CDM %d\n", ret);
+			return;
+		}
+	}
+
+	if (hw_cdm && hw_cdm->ops.enable) {
+		ret = hw_cdm->ops.enable(hw_cdm, cdm_cfg);
+		if (ret < 0) {
+			SDE_ERROR("failed to enable CDM %d\n", ret);
+			return;
+		}
+	}
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_encoder.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_encoder.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_encoder.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_encoder.h	2019-01-22 16:16:23.511246479 +0100
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __SDE_ENCODER_H__
+#define __SDE_ENCODER_H__
+
+#include <drm/drm_crtc.h>
+
+#include "msm_prop.h"
+#include "sde_hw_mdss.h"
+
+#define SDE_ENCODER_FRAME_EVENT_DONE	BIT(0)
+#define SDE_ENCODER_FRAME_EVENT_ERROR	BIT(1)
+
+/**
+ * Encoder functions and data types
+ * @intfs:	Interfaces this encoder is using, INTF_MODE_NONE if unused
+ * @wbs:	Writebacks this encoder is using, INTF_MODE_NONE if unused
+ * @needs_cdm:	Encoder requests a CDM based on pixel format conversion needs
+ * @display_num_of_h_tiles:
+ */
+struct sde_encoder_hw_resources {
+	enum sde_intf_mode intfs[INTF_MAX];
+	enum sde_intf_mode wbs[WB_MAX];
+	bool needs_cdm;
+	u32 display_num_of_h_tiles;
+};
+
+/**
+ * sde_encoder_get_hw_resources - Populate table of required hardware resources
+ * @encoder:	encoder pointer
+ * @hw_res:	resource table to populate with encoder required resources
+ * @conn_state:	report hw reqs based on this proposed connector state
+ */
+void sde_encoder_get_hw_resources(struct drm_encoder *encoder,
+		struct sde_encoder_hw_resources *hw_res,
+		struct drm_connector_state *conn_state);
+
+/**
+ * sde_encoder_register_vblank_callback - provide callback to encoder that
+ *	will be called on the next vblank.
+ * @encoder:	encoder pointer
+ * @cb:		callback pointer, provide NULL to deregister and disable IRQs
+ * @data:	user data provided to callback
+ */
+void sde_encoder_register_vblank_callback(struct drm_encoder *encoder,
+		void (*cb)(void *), void *data);
+
+/**
+ * sde_encoder_register_frame_event_callback - provide callback to encoder that
+ *	will be called after the request is complete, or other events.
+ * @encoder:	encoder pointer
+ * @cb:		callback pointer, provide NULL to deregister
+ * @data:	user data provided to callback
+ */
+void sde_encoder_register_frame_event_callback(struct drm_encoder *encoder,
+		void (*cb)(void *, u32), void *data);
+
+/**
+ * sde_encoder_register_request_flip_callback - provide callback to encoder that
+ * will be called after HW flush is complete to request
+ * a page flip event from CRTC.
+ * @encoder:	encoder pointer
+ * @cb:		callback pointer, provide NULL to deregister
+ * @data:	user data provided to callback
+ */
+void sde_encoder_register_request_flip_callback(struct drm_encoder *encoder,
+		void (*cb)(void *), void *data);
+
+/**
+ * sde_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl
+ *	path (i.e. ctl flush and start) at next appropriate time.
+ *	Immediately: if no previous commit is outstanding.
+ *	Delayed: Block until next trigger can be issued.
+ * @encoder:	encoder pointer
+ */
+void sde_encoder_prepare_for_kickoff(struct drm_encoder *encoder);
+
+/**
+ * sde_encoder_kickoff - trigger a double buffer flip of the ctl path
+ *	(i.e. ctl flush and start) immediately.
+ * @encoder:	encoder pointer
+ */
+void sde_encoder_kickoff(struct drm_encoder *encoder);
+
+/**
+ * sde_encoder_wait_nxt_committed - Wait for hardware to have flushed the
+ *	current pending frames to hardware at a vblank or ctl_start
+ *	Encoders will map this differently depending on irqs
+ *	vid mode -> vsync_irq
+ * @encoder:	encoder pointer
+ * Returns: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
+ */
+int sde_encoder_wait_for_commit_done(struct drm_encoder *drm_encoder);
+
+/*
+ * sde_encoder_get_intf_mode - get interface mode of the given encoder
+ * @encoder: Pointer to drm encoder object
+ */
+enum sde_intf_mode sde_encoder_get_intf_mode(struct drm_encoder *encoder);
+
+/**
+ * sde_encoder_init - initialize virtual encoder object
+ * @dev:        Pointer to drm device structure
+ * @disp_info:  Pointer to display information structure
+ * Returns:     Pointer to newly created drm encoder
+ */
+struct drm_encoder *sde_encoder_init(
+		struct drm_device *dev,
+		struct msm_display_info *disp_info);
+
+/**
+ * sde_encoder_destroy - destroy previously initialized virtual encoder
+ * @drm_enc:    Pointer to previously created drm encoder structure
+ */
+void sde_encoder_destroy(struct drm_encoder *drm_enc);
+
+#endif /* __SDE_ENCODER_H__ */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_encoder_phys_cmd.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_encoder_phys_cmd.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c	2019-01-22 16:16:23.515246515 +0100
@@ -0,0 +1,712 @@
+/*
+ * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+#include "sde_encoder_phys.h"
+#include "sde_hw_interrupts.h"
+#include "sde_core_irq.h"
+#include "sde_formats.h"
+
+#define SDE_DEBUG_CMDENC(e, fmt, ...) SDE_DEBUG("enc%d intf%d " fmt, \
+		(e) && (e)->base.parent ? \
+		(e)->base.parent->base.id : -1, \
+		(e) ? (e)->intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define SDE_ERROR_CMDENC(e, fmt, ...) SDE_ERROR("enc%d intf%d " fmt, \
+		(e) && (e)->base.parent ? \
+		(e)->base.parent->base.id : -1, \
+		(e) ? (e)->intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define to_sde_encoder_phys_cmd(x) \
+	container_of(x, struct sde_encoder_phys_cmd, base)
+
+/*
+ * Tearcheck sync start and continue thresholds are empirically found
+ * based on common panels In the future, may want to allow panels to override
+ * these default values
+ */
+#define DEFAULT_TEARCHECK_SYNC_THRESH_START	4
+#define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE	4
+
+static inline bool sde_encoder_phys_cmd_is_master(
+		struct sde_encoder_phys *phys_enc)
+{
+	return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
+}
+
+static bool sde_encoder_phys_cmd_mode_fixup(
+		struct sde_encoder_phys *phys_enc,
+		const struct drm_display_mode *mode,
+		struct drm_display_mode *adj_mode)
+{
+	if (phys_enc)
+		SDE_DEBUG_CMDENC(to_sde_encoder_phys_cmd(phys_enc), "\n");
+	return true;
+}
+
+static void sde_encoder_phys_cmd_mode_set(
+		struct sde_encoder_phys *phys_enc,
+		struct drm_display_mode *mode,
+		struct drm_display_mode *adj_mode)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+		to_sde_encoder_phys_cmd(phys_enc);
+	struct sde_rm *rm = &phys_enc->sde_kms->rm;
+	struct sde_rm_hw_iter iter;
+	int i, instance;
+
+	if (!phys_enc || !mode || !adj_mode) {
+		SDE_ERROR("invalid arg(s), enc %d mode %d adj_mode %d\n",
+				phys_enc != 0, mode != 0, adj_mode != 0);
+		return;
+	}
+	phys_enc->cached_mode = *adj_mode;
+	SDE_DEBUG_CMDENC(cmd_enc, "caching mode:\n");
+	drm_mode_debug_printmodeline(adj_mode);
+
+	instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
+
+	/* Retrieve previously allocated HW Resources. Shouldn't fail */
+	sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CTL);
+	for (i = 0; i <= instance; i++) {
+		if (sde_rm_get_hw(rm, &iter))
+			phys_enc->hw_ctl = (struct sde_hw_ctl *)iter.hw;
+	}
+
+	if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
+		SDE_ERROR_CMDENC(cmd_enc, "failed to init ctl: %ld\n",
+				PTR_ERR(phys_enc->hw_ctl));
+		phys_enc->hw_ctl = NULL;
+		return;
+	}
+}
+
+static void sde_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
+{
+	struct sde_encoder_phys_cmd *cmd_enc = arg;
+	struct sde_encoder_phys *phys_enc;
+	unsigned long lock_flags;
+	int new_cnt;
+
+	if (!cmd_enc)
+		return;
+
+	phys_enc = &cmd_enc->base;
+
+	/* notify all synchronous clients first, then asynchronous clients */
+	if (phys_enc->parent_ops.handle_frame_done)
+		phys_enc->parent_ops.handle_frame_done(phys_enc->parent,
+				phys_enc, SDE_ENCODER_FRAME_EVENT_DONE);
+
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+	SDE_EVT32_IRQ(DRMID(phys_enc->parent),
+			phys_enc->hw_pp->idx - PINGPONG_0, new_cnt);
+
+	/* Signal any waiting atomic commit thread */
+	wake_up_all(&phys_enc->pending_kickoff_wq);
+}
+
+static void sde_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
+{
+	struct sde_encoder_phys_cmd *cmd_enc = arg;
+	struct sde_encoder_phys *phys_enc = &cmd_enc->base;
+
+	if (!cmd_enc)
+		return;
+
+	if (phys_enc->parent_ops.handle_vblank_virt)
+		phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
+			phys_enc);
+}
+
+static bool _sde_encoder_phys_is_ppsplit_slave(
+		struct sde_encoder_phys *phys_enc)
+{
+	enum sde_rm_topology_name topology;
+
+	if (!phys_enc)
+		return false;
+
+	topology = sde_connector_get_topology_name(phys_enc->connector);
+	if (topology == SDE_RM_TOPOLOGY_PPSPLIT &&
+			phys_enc->split_role == ENC_ROLE_SLAVE)
+		return true;
+
+	return false;
+}
+
+static int _sde_encoder_phys_cmd_wait_for_idle(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+			to_sde_encoder_phys_cmd(phys_enc);
+	u32 irq_status;
+	int ret;
+
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+
+	/* slave encoder doesn't enable for ppsplit */
+	if (_sde_encoder_phys_is_ppsplit_slave(phys_enc))
+		return 0;
+
+	/* return EWOULDBLOCK since we know the wait isn't necessary */
+	if (phys_enc->enable_state == SDE_ENC_DISABLED) {
+		SDE_ERROR_CMDENC(cmd_enc, "encoder is disabled\n");
+		return -EWOULDBLOCK;
+	}
+
+	/* wait for previous kickoff to complete */
+	ret = sde_encoder_helper_wait_event_timeout(
+			DRMID(phys_enc->parent),
+			phys_enc->hw_pp->idx - PINGPONG_0,
+			&phys_enc->pending_kickoff_wq,
+			&phys_enc->pending_kickoff_cnt,
+			KICKOFF_TIMEOUT_MS);
+	if (ret <= 0) {
+		irq_status = sde_core_irq_read(phys_enc->sde_kms,
+				INTR_IDX_PINGPONG, true);
+		if (irq_status) {
+			SDE_EVT32(DRMID(phys_enc->parent),
+					phys_enc->hw_pp->idx - PINGPONG_0);
+			SDE_DEBUG_CMDENC(cmd_enc,
+					"pp:%d done but irq not triggered\n",
+					phys_enc->hw_pp->idx - PINGPONG_0);
+			sde_encoder_phys_cmd_pp_tx_done_irq(cmd_enc,
+					INTR_IDX_PINGPONG);
+			ret = 0;
+		} else {
+			SDE_EVT32(DRMID(phys_enc->parent),
+					phys_enc->hw_pp->idx - PINGPONG_0);
+			SDE_ERROR_CMDENC(cmd_enc, "pp:%d kickoff timed out\n",
+					phys_enc->hw_pp->idx - PINGPONG_0);
+			if (phys_enc->parent_ops.handle_frame_done)
+				phys_enc->parent_ops.handle_frame_done(
+						phys_enc->parent, phys_enc,
+						SDE_ENCODER_FRAME_EVENT_ERROR);
+			ret = -ETIMEDOUT;
+		}
+	} else {
+		ret = 0;
+	}
+
+	return ret;
+}
+
+static void sde_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
+{
+	struct sde_encoder_phys_cmd *cmd_enc = arg;
+	struct sde_encoder_phys *phys_enc;
+
+	if (!cmd_enc)
+		return;
+
+	phys_enc = &cmd_enc->base;
+	if (phys_enc->parent_ops.handle_underrun_virt)
+		phys_enc->parent_ops.handle_underrun_virt(phys_enc->parent,
+			phys_enc);
+}
+
+static int sde_encoder_phys_cmd_register_irq(struct sde_encoder_phys *phys_enc,
+	enum sde_intr_type intr_type, int idx,
+	void (*irq_func)(void *, int), const char *irq_name)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+			to_sde_encoder_phys_cmd(phys_enc);
+	int ret = 0;
+
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+
+	cmd_enc->irq_idx[idx] = sde_core_irq_idx_lookup(phys_enc->sde_kms,
+			intr_type, phys_enc->hw_pp->idx);
+	if (cmd_enc->irq_idx[idx] < 0) {
+		SDE_ERROR_CMDENC(cmd_enc,
+			"failed to lookup IRQ index for %s with pp=%d\n",
+			irq_name,
+			phys_enc->hw_pp->idx - PINGPONG_0);
+		return -EINVAL;
+	}
+
+	cmd_enc->irq_cb[idx].func = irq_func;
+	cmd_enc->irq_cb[idx].arg = cmd_enc;
+	ret = sde_core_irq_register_callback(phys_enc->sde_kms,
+			cmd_enc->irq_idx[idx], &cmd_enc->irq_cb[idx]);
+	if (ret) {
+		SDE_ERROR_CMDENC(cmd_enc,
+				"failed to register IRQ callback %s\n",
+				irq_name);
+		return ret;
+	}
+
+	ret = sde_core_irq_enable(phys_enc->sde_kms, &cmd_enc->irq_idx[idx], 1);
+	if (ret) {
+		SDE_ERROR_CMDENC(cmd_enc,
+			"failed to enable IRQ for %s, pp %d, irq_idx %d\n",
+			irq_name,
+			phys_enc->hw_pp->idx - PINGPONG_0,
+			cmd_enc->irq_idx[idx]);
+		cmd_enc->irq_idx[idx] = -EINVAL;
+
+		/* Unregister callback on IRQ enable failure */
+		sde_core_irq_unregister_callback(phys_enc->sde_kms,
+				cmd_enc->irq_idx[idx], &cmd_enc->irq_cb[idx]);
+		return ret;
+	}
+
+	SDE_DEBUG_CMDENC(cmd_enc, "registered IRQ %s for pp %d, irq_idx %d\n",
+			irq_name,
+			phys_enc->hw_pp->idx - PINGPONG_0,
+			cmd_enc->irq_idx[idx]);
+
+	return ret;
+}
+
+static int sde_encoder_phys_cmd_unregister_irq(
+		struct sde_encoder_phys *phys_enc, int idx)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+			to_sde_encoder_phys_cmd(phys_enc);
+
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+
+	sde_core_irq_disable(phys_enc->sde_kms, &cmd_enc->irq_idx[idx], 1);
+	sde_core_irq_unregister_callback(phys_enc->sde_kms,
+			cmd_enc->irq_idx[idx], &cmd_enc->irq_cb[idx]);
+
+	SDE_DEBUG_CMDENC(cmd_enc, "unregistered IRQ for pp %d, irq_idx %d\n",
+			phys_enc->hw_pp->idx - PINGPONG_0,
+			cmd_enc->irq_idx[idx]);
+
+	return 0;
+}
+
+static void sde_encoder_phys_cmd_tearcheck_config(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+		to_sde_encoder_phys_cmd(phys_enc);
+	struct sde_hw_tear_check tc_cfg = { 0 };
+	struct drm_display_mode *mode = &phys_enc->cached_mode;
+	bool tc_enable = true;
+	u32 vsync_hz;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+
+	SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+
+	if (!phys_enc->hw_pp->ops.setup_tearcheck ||
+		!phys_enc->hw_pp->ops.enable_tearcheck) {
+		SDE_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
+		return;
+	}
+
+	sde_kms = phys_enc->sde_kms;
+	priv = sde_kms->dev->dev_private;
+	/*
+	 * TE default: dsi byte clock calculated base on 70 fps;
+	 * around 14 ms to complete a kickoff cycle if te disabled;
+	 * vclk_line base on 60 fps; write is faster than read;
+	 * init == start == rdptr;
+	 *
+	 * vsync_count is ratio of MDP VSYNC clock frequency to LCD panel
+	 * frequency divided by the no. of rows (lines) in the LCDpanel.
+	 */
+	vsync_hz = sde_power_clk_get_rate(&priv->phandle, "vsync_clk");
+	if (!vsync_hz) {
+		SDE_DEBUG_CMDENC(cmd_enc, "invalid vsync clock rate\n");
+		return;
+	}
+
+	tc_cfg.vsync_count = vsync_hz / (mode->vtotal * mode->vrefresh);
+	tc_cfg.hw_vsync_mode = 1;
+
+	/*
+	 * By setting sync_cfg_height to near max register value, we essentially
+	 * disable sde hw generated TE signal, since hw TE will arrive first.
+	 * Only caveat is if due to error, we hit wrap-around.
+	 */
+	tc_cfg.sync_cfg_height = 0xFFF0;
+	tc_cfg.vsync_init_val = mode->vdisplay;
+	tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
+	tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
+	tc_cfg.start_pos = mode->vdisplay;
+	tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
+
+	SDE_DEBUG_CMDENC(cmd_enc,
+		"tc %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n",
+		phys_enc->hw_pp->idx - PINGPONG_0, vsync_hz,
+		mode->vtotal, mode->vrefresh);
+	SDE_DEBUG_CMDENC(cmd_enc,
+		"tc %d enable %u start_pos %u rd_ptr_irq %u\n",
+		phys_enc->hw_pp->idx - PINGPONG_0, tc_enable, tc_cfg.start_pos,
+		tc_cfg.rd_ptr_irq);
+	SDE_DEBUG_CMDENC(cmd_enc,
+		"tc %d hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
+		phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.hw_vsync_mode,
+		tc_cfg.vsync_count, tc_cfg.vsync_init_val);
+	SDE_DEBUG_CMDENC(cmd_enc,
+		"tc %d cfgheight %u thresh_start %u thresh_cont %u\n",
+		phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.sync_cfg_height,
+		tc_cfg.sync_threshold_start, tc_cfg.sync_threshold_continue);
+
+	phys_enc->hw_pp->ops.setup_tearcheck(phys_enc->hw_pp, &tc_cfg);
+	phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, tc_enable);
+}
+
+static void sde_encoder_phys_cmd_pingpong_config(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+		to_sde_encoder_phys_cmd(phys_enc);
+	struct sde_hw_intf_cfg intf_cfg = { 0 };
+
+	if (!phys_enc || !phys_enc->hw_ctl ||
+			!phys_enc->hw_ctl->ops.setup_intf_cfg) {
+		SDE_ERROR("invalid arg(s), enc %d\n", phys_enc != 0);
+		return;
+	}
+
+	SDE_DEBUG_CMDENC(cmd_enc, "pp %d, enabling mode:\n",
+			phys_enc->hw_pp->idx - PINGPONG_0);
+	drm_mode_debug_printmodeline(&phys_enc->cached_mode);
+
+	intf_cfg.intf = cmd_enc->intf_idx;
+	intf_cfg.intf_mode_sel = SDE_CTL_MODE_SEL_CMD;
+	intf_cfg.stream_sel = cmd_enc->stream_sel;
+	intf_cfg.mode_3d = sde_encoder_helper_get_3d_blend_mode(phys_enc);
+
+	phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+
+	sde_encoder_phys_cmd_tearcheck_config(phys_enc);
+}
+
+static bool sde_encoder_phys_cmd_needs_single_flush(
+		struct sde_encoder_phys *phys_enc)
+{
+	enum sde_rm_topology_name topology;
+
+	if (!phys_enc)
+		return false;
+
+	topology = sde_connector_get_topology_name(phys_enc->connector);
+	return topology == SDE_RM_TOPOLOGY_PPSPLIT;
+}
+
+static int sde_encoder_phys_cmd_control_vblank_irq(
+		struct sde_encoder_phys *phys_enc,
+		bool enable)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+		to_sde_encoder_phys_cmd(phys_enc);
+	int ret = 0;
+
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+
+	/* Slave encoders don't report vblank */
+	if (!sde_encoder_phys_cmd_is_master(phys_enc))
+		goto end;
+
+	SDE_DEBUG_CMDENC(cmd_enc, "[%pS] enable=%d/%d\n",
+			__builtin_return_address(0),
+			enable, atomic_read(&phys_enc->vblank_refcount));
+
+	SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
+			enable, atomic_read(&phys_enc->vblank_refcount));
+
+	if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+		ret = sde_encoder_phys_cmd_register_irq(phys_enc,
+				SDE_IRQ_TYPE_PING_PONG_RD_PTR,
+				INTR_IDX_RDPTR,
+				sde_encoder_phys_cmd_pp_rd_ptr_irq,
+				"pp_rd_ptr");
+	else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+		ret = sde_encoder_phys_cmd_unregister_irq(phys_enc,
+				INTR_IDX_RDPTR);
+
+end:
+	if (ret)
+		SDE_ERROR_CMDENC(cmd_enc,
+				"control vblank irq error %d, enable %d\n",
+				ret, enable);
+
+	return ret;
+}
+
+static void sde_encoder_phys_cmd_enable(struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+		to_sde_encoder_phys_cmd(phys_enc);
+	struct sde_hw_ctl *ctl;
+	u32 flush_mask;
+	int ret;
+
+	if (!phys_enc || !phys_enc->hw_ctl) {
+		SDE_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+		return;
+	}
+	SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+
+	if (phys_enc->enable_state == SDE_ENC_ENABLED) {
+		SDE_ERROR("already enabled\n");
+		return;
+	}
+
+	sde_encoder_helper_split_config(phys_enc, cmd_enc->intf_idx);
+
+	sde_encoder_phys_cmd_pingpong_config(phys_enc);
+
+	if (_sde_encoder_phys_is_ppsplit_slave(phys_enc))
+		goto update_flush;
+
+	/* Both master and slave need to register for pp_tx_done */
+	ret = sde_encoder_phys_cmd_register_irq(phys_enc,
+			SDE_IRQ_TYPE_PING_PONG_COMP,
+			INTR_IDX_PINGPONG,
+			sde_encoder_phys_cmd_pp_tx_done_irq,
+			"pp_tx_done");
+	if (ret)
+		return;
+
+	ret = sde_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
+	if (ret) {
+		sde_encoder_phys_cmd_unregister_irq(phys_enc,
+				INTR_IDX_PINGPONG);
+		return;
+	}
+
+	ret = sde_encoder_phys_cmd_register_irq(phys_enc,
+			SDE_IRQ_TYPE_INTF_UNDER_RUN,
+			INTR_IDX_UNDERRUN,
+			sde_encoder_phys_cmd_underrun_irq,
+			"underrun");
+	if (ret) {
+		sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
+		sde_encoder_phys_cmd_unregister_irq(phys_enc,
+				INTR_IDX_PINGPONG);
+		return;
+	}
+
+update_flush:
+	ctl = phys_enc->hw_ctl;
+	ctl->ops.get_bitmask_intf(ctl, &flush_mask, cmd_enc->intf_idx);
+	ctl->ops.update_pending_flush(ctl, flush_mask);
+	phys_enc->enable_state = SDE_ENC_ENABLED;
+
+	SDE_DEBUG_CMDENC(cmd_enc, "update pending flush ctl %d flush_mask %x\n",
+			ctl->idx - CTL_0, flush_mask);
+}
+
+static void sde_encoder_phys_cmd_disable(struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+		to_sde_encoder_phys_cmd(phys_enc);
+	int ret;
+
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+	SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+
+	if (phys_enc->enable_state == SDE_ENC_DISABLED) {
+		SDE_ERROR_CMDENC(cmd_enc, "already disabled\n");
+		return;
+	}
+
+	SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0);
+
+	if (!_sde_encoder_phys_is_ppsplit_slave(phys_enc)) {
+		ret = _sde_encoder_phys_cmd_wait_for_idle(phys_enc);
+		if (ret) {
+			atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+			SDE_ERROR_CMDENC(cmd_enc,
+					"pp %d failed wait for idle, %d\n",
+					phys_enc->hw_pp->idx - PINGPONG_0, ret);
+			SDE_EVT32(DRMID(phys_enc->parent),
+					phys_enc->hw_pp->idx - PINGPONG_0, ret);
+		}
+
+		sde_encoder_phys_cmd_unregister_irq(
+				phys_enc, INTR_IDX_UNDERRUN);
+		sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
+		sde_encoder_phys_cmd_unregister_irq(
+				phys_enc, INTR_IDX_PINGPONG);
+	}
+
+	phys_enc->enable_state = SDE_ENC_DISABLED;
+
+	if (atomic_read(&phys_enc->vblank_refcount))
+		SDE_ERROR("enc:%d role:%d invalid vblank refcount %d\n",
+				phys_enc->parent->base.id,
+				phys_enc->split_role,
+				atomic_read(&phys_enc->vblank_refcount));
+}
+
+static void sde_encoder_phys_cmd_destroy(struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+		to_sde_encoder_phys_cmd(phys_enc);
+
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+	kfree(cmd_enc);
+}
+
+static void sde_encoder_phys_cmd_get_hw_resources(
+		struct sde_encoder_phys *phys_enc,
+		struct sde_encoder_hw_resources *hw_res,
+		struct drm_connector_state *conn_state)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+		to_sde_encoder_phys_cmd(phys_enc);
+
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+	SDE_DEBUG_CMDENC(cmd_enc, "\n");
+	hw_res->intfs[cmd_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
+}
+
+static int sde_encoder_phys_cmd_wait_for_commit_done(
+		struct sde_encoder_phys *phys_enc)
+{
+	/*
+	 * Since ctl_start "commits" the transaction to hardware, and the
+	 * tearcheck block takes it from there, there is no need to have a
+	 * separate wait for committed, a la wait-for-vsync in video mode
+	 */
+
+	return 0;
+}
+
+static void sde_encoder_phys_cmd_prepare_for_kickoff(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+			to_sde_encoder_phys_cmd(phys_enc);
+	int ret;
+
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+	SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+	SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0);
+
+	/*
+	 * Mark kickoff request as outstanding. If there are more than one,
+	 * outstanding, then we have to wait for the previous one to complete
+	 */
+	ret = _sde_encoder_phys_cmd_wait_for_idle(phys_enc);
+	if (ret) {
+		/* force pending_kickoff_cnt 0 to discard failed kickoff */
+		atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+		SDE_EVT32(DRMID(phys_enc->parent),
+				phys_enc->hw_pp->idx - PINGPONG_0);
+		SDE_ERROR("failed wait_for_idle: %d\n", ret);
+	}
+}
+
+static void sde_encoder_phys_cmd_init_ops(
+		struct sde_encoder_phys_ops *ops)
+{
+	ops->is_master = sde_encoder_phys_cmd_is_master;
+	ops->mode_set = sde_encoder_phys_cmd_mode_set;
+	ops->mode_fixup = sde_encoder_phys_cmd_mode_fixup;
+	ops->enable = sde_encoder_phys_cmd_enable;
+	ops->disable = sde_encoder_phys_cmd_disable;
+	ops->destroy = sde_encoder_phys_cmd_destroy;
+	ops->get_hw_resources = sde_encoder_phys_cmd_get_hw_resources;
+	ops->control_vblank_irq = sde_encoder_phys_cmd_control_vblank_irq;
+	ops->wait_for_commit_done = sde_encoder_phys_cmd_wait_for_commit_done;
+	ops->prepare_for_kickoff = sde_encoder_phys_cmd_prepare_for_kickoff;
+	ops->trigger_start = sde_encoder_helper_trigger_start;
+	ops->needs_single_flush = sde_encoder_phys_cmd_needs_single_flush;
+}
+
+struct sde_encoder_phys *sde_encoder_phys_cmd_init(
+		struct sde_enc_phys_init_params *p)
+{
+	struct sde_encoder_phys *phys_enc = NULL;
+	struct sde_encoder_phys_cmd *cmd_enc = NULL;
+	struct sde_hw_mdp *hw_mdp;
+	int i, ret = 0;
+
+	SDE_DEBUG("intf %d\n", p->intf_idx - INTF_0);
+
+	cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL);
+	if (!cmd_enc) {
+		ret = -ENOMEM;
+		SDE_ERROR("failed to allocate\n");
+		goto fail;
+	}
+	phys_enc = &cmd_enc->base;
+
+	hw_mdp = sde_rm_get_mdp(&p->sde_kms->rm);
+	if (IS_ERR_OR_NULL(hw_mdp)) {
+		ret = PTR_ERR(hw_mdp);
+		SDE_ERROR("failed to get mdptop\n");
+		goto fail_mdp_init;
+	}
+	phys_enc->hw_mdptop = hw_mdp;
+
+	cmd_enc->intf_idx = p->intf_idx;
+	phys_enc->intf_idx = p->intf_idx;
+
+	sde_encoder_phys_cmd_init_ops(&phys_enc->ops);
+	phys_enc->parent = p->parent;
+	phys_enc->parent_ops = p->parent_ops;
+	phys_enc->sde_kms = p->sde_kms;
+	phys_enc->split_role = p->split_role;
+	phys_enc->intf_mode = INTF_MODE_CMD;
+	phys_enc->enc_spinlock = p->enc_spinlock;
+	cmd_enc->stream_sel = 0;
+	phys_enc->enable_state = SDE_ENC_DISABLED;
+	for (i = 0; i < INTR_IDX_MAX; i++)
+		INIT_LIST_HEAD(&cmd_enc->irq_cb[i].list);
+	atomic_set(&phys_enc->vblank_refcount, 0);
+	atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+	init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+
+	SDE_DEBUG_CMDENC(cmd_enc, "created\n");
+
+	return phys_enc;
+
+fail_mdp_init:
+	kfree(cmd_enc);
+fail:
+	return ERR_PTR(ret);
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_encoder_phys.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_encoder_phys.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_encoder_phys.h	2019-01-22 16:16:23.511246479 +0100
@@ -0,0 +1,408 @@
+/*
+ * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SDE_ENCODER_PHYS_H__
+#define __SDE_ENCODER_PHYS_H__
+
+#include <linux/jiffies.h>
+
+#include "sde_kms.h"
+#include "sde_hw_intf.h"
+#include "sde_hw_pingpong.h"
+#include "sde_hw_ctl.h"
+#include "sde_hw_top.h"
+#include "sde_hw_wb.h"
+#include "sde_hw_cdm.h"
+#include "sde_encoder.h"
+#include "sde_connector.h"
+
+#define SDE_ENCODER_NAME_MAX	16
+
+/* wait for at most 2 vsync for lowest refresh rate (24hz) */
+#define KICKOFF_TIMEOUT_MS		84
+#define KICKOFF_TIMEOUT_JIFFIES		msecs_to_jiffies(KICKOFF_TIMEOUT_MS)
+
+/**
+ * enum sde_enc_split_role - Role this physical encoder will play in a
+ *	split-panel configuration, where one panel is master, and others slaves.
+ *	Masters have extra responsibilities, like managing the VBLANK IRQ.
+ * @ENC_ROLE_SOLO:	This is the one and only panel. This encoder is master.
+ * @ENC_ROLE_MASTER:	This encoder is the master of a split panel config.
+ * @ENC_ROLE_SLAVE:	This encoder is not the master of a split panel config.
+ */
+enum sde_enc_split_role {
+	ENC_ROLE_SOLO,
+	ENC_ROLE_MASTER,
+	ENC_ROLE_SLAVE
+};
+
+struct sde_encoder_phys;
+
+/**
+ * struct sde_encoder_virt_ops - Interface the containing virtual encoder
+ *	provides for the physical encoders to use to callback.
+ * @handle_vblank_virt:	Notify virtual encoder of vblank IRQ reception
+ *			Note: This is called from IRQ handler context.
+ * @handle_underrun_virt: Notify virtual encoder of underrun IRQ reception
+ *			Note: This is called from IRQ handler context.
+ * @handle_frame_done:	Notify virtual encoder that this phys encoder
+ *			completes last request frame.
+ */
+struct sde_encoder_virt_ops {
+	void (*handle_vblank_virt)(struct drm_encoder *,
+			struct sde_encoder_phys *phys);
+	void (*handle_underrun_virt)(struct drm_encoder *,
+			struct sde_encoder_phys *phys);
+	void (*handle_frame_done)(struct drm_encoder *,
+			struct sde_encoder_phys *phys, u32 event);
+};
+
+/**
+ * struct sde_encoder_phys_ops - Interface the physical encoders provide to
+ *	the containing virtual encoder.
+ * @is_master:			Whether this phys_enc is the current master
+ *				encoder. Can be switched at enable time. Based
+ *				on split_role and current mode (CMD/VID).
+ * @mode_fixup:			DRM Call. Fixup a DRM mode.
+ * @mode_set:			DRM Call. Set a DRM mode.
+ *				This likely caches the mode, for use at enable.
+ * @enable:			DRM Call. Enable a DRM mode.
+ * @disable:			DRM Call. Disable mode.
+ * @atomic_check:		DRM Call. Atomic check new DRM state.
+ * @destroy:			DRM Call. Destroy and release resources.
+ * @get_hw_resources:		Populate the structure with the hardware
+ *				resources that this phys_enc is using.
+ *				Expect no overlap between phys_encs.
+ * @control_vblank_irq		Register/Deregister for VBLANK IRQ
+ * @wait_for_commit_done:	Wait for hardware to have flushed the
+ *				current pending frames to hardware
+ * @prepare_for_kickoff:	Do any work necessary prior to a kickoff
+ *				For CMD encoder, may wait for previous tx done
+ * @handle_post_kickoff:	Do any work necessary post-kickoff work
+ * @trigger_start:		Process start event on physical encoder
+ * @needs_single_flush:		Whether encoder slaves need to be flushed
+ * @setup_misr:		Sets up MISR, enable and disables based on sysfs
+ * @collect_misr:		Collects MISR data on frame update
+ */
+
+struct sde_encoder_phys_ops {
+	bool (*is_master)(struct sde_encoder_phys *encoder);
+	bool (*mode_fixup)(struct sde_encoder_phys *encoder,
+			const struct drm_display_mode *mode,
+			struct drm_display_mode *adjusted_mode);
+	void (*mode_set)(struct sde_encoder_phys *encoder,
+			struct drm_display_mode *mode,
+			struct drm_display_mode *adjusted_mode);
+	void (*enable)(struct sde_encoder_phys *encoder);
+	void (*disable)(struct sde_encoder_phys *encoder);
+	int (*atomic_check)(struct sde_encoder_phys *encoder,
+			    struct drm_crtc_state *crtc_state,
+			    struct drm_connector_state *conn_state);
+	void (*destroy)(struct sde_encoder_phys *encoder);
+	void (*get_hw_resources)(struct sde_encoder_phys *encoder,
+			struct sde_encoder_hw_resources *hw_res,
+			struct drm_connector_state *conn_state);
+	int (*control_vblank_irq)(struct sde_encoder_phys *enc, bool enable);
+	int (*wait_for_commit_done)(struct sde_encoder_phys *phys_enc);
+	void (*prepare_for_kickoff)(struct sde_encoder_phys *phys_enc);
+	void (*handle_post_kickoff)(struct sde_encoder_phys *phys_enc);
+	void (*trigger_start)(struct sde_encoder_phys *phys_enc);
+	bool (*needs_single_flush)(struct sde_encoder_phys *phys_enc);
+
+	void (*setup_misr)(struct sde_encoder_phys *phys_encs,
+			struct sde_misr_params *misr_map);
+	void (*collect_misr)(struct sde_encoder_phys *phys_enc,
+			struct sde_misr_params *misr_map);
+};
+
+/**
+ * enum sde_enc_enable_state - current enabled state of the physical encoder
+ * @SDE_ENC_DISABLED:	Encoder is disabled
+ * @SDE_ENC_ENABLING:	Encoder transitioning to enabled
+ *			Events bounding transition are encoder type specific
+ * @SDE_ENC_ENABLED:	Encoder is enabled
+ */
+enum sde_enc_enable_state {
+	SDE_ENC_DISABLED,
+	SDE_ENC_ENABLING,
+	SDE_ENC_ENABLED
+};
+
+/**
+ * enum sde_intr_idx - sde encoder interrupt index
+ * @INTR_IDX_VSYNC:    Vsync interrupt for video mode panel
+ * @INTR_IDX_PINGPONG: Pingpong done interrupt for cmd mode panel
+ * @INTR_IDX_UNDERRUN: Underrun interrupt for video and cmd mode panel
+ * @INTR_IDX_RDPTR:    Readpointer done interrupt for cmd mode panel
+ */
+enum sde_intr_idx {
+	INTR_IDX_VSYNC,
+	INTR_IDX_PINGPONG,
+	INTR_IDX_UNDERRUN,
+	INTR_IDX_RDPTR,
+	INTR_IDX_MAX,
+};
+
+/**
+ * struct sde_encoder_phys - physical encoder that drives a single INTF block
+ *	tied to a specific panel / sub-panel. Abstract type, sub-classed by
+ *	phys_vid or phys_cmd for video mode or command mode encs respectively.
+ * @parent:		Pointer to the containing virtual encoder
+ * @connector:		If a mode is set, cached pointer to the active connector
+ * @ops:		Operations exposed to the virtual encoder
+ * @parent_ops:		Callbacks exposed by the parent to the phys_enc
+ * @hw_mdptop:		Hardware interface to the top registers
+ * @hw_ctl:		Hardware interface to the ctl registers
+ * @hw_cdm:		Hardware interface to the cdm registers
+ * @cdm_cfg:		Chroma-down hardware configuration
+ * @hw_pp:		Hardware interface to the ping pong registers
+ * @sde_kms:		Pointer to the sde_kms top level
+ * @cached_mode:	DRM mode cached at mode_set time, acted on in enable
+ * @misr_map:		Interface for setting and collecting MISR data
+ * @enabled:		Whether the encoder has enabled and running a mode
+ * @split_role:		Role to play in a split-panel configuration
+ * @intf_mode:		Interface mode
+ * @intf_idx:		Interface index on sde hardware
+ * @enc_cdm_csc:	Cached CSC type of CDM block
+ * @enc_spinlock:	Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @enable_state:	Enable state tracking
+ * @vblank_refcount:	Reference count of vblank request
+ * @vsync_cnt:		Vsync count for the physical encoder
+ * @underrun_cnt:	Underrun count for the physical encoder
+ * @pending_kickoff_cnt:	Atomic counter tracking the number of kickoffs
+ *				vs. the number of done/vblank irqs. Should hover
+ *				between 0-2 Incremented when a new kickoff is
+ *				scheduled. Decremented in irq handler
+ * @pending_kickoff_wq:		Wait queue for blocking until kickoff completes
+ */
+struct sde_encoder_phys {
+	struct drm_encoder *parent;
+	struct drm_connector *connector;
+	struct sde_encoder_phys_ops ops;
+	struct sde_encoder_virt_ops parent_ops;
+	struct sde_hw_mdp *hw_mdptop;
+	struct sde_hw_ctl *hw_ctl;
+	struct sde_hw_cdm *hw_cdm;
+	struct sde_hw_cdm_cfg cdm_cfg;
+	struct sde_hw_pingpong *hw_pp;
+	struct sde_kms *sde_kms;
+	struct drm_display_mode cached_mode;
+	struct sde_misr_params *misr_map;
+	enum sde_enc_split_role split_role;
+	enum sde_intf_mode intf_mode;
+	enum sde_intf intf_idx;
+	enum sde_csc_type enc_cdm_csc;
+	spinlock_t *enc_spinlock;
+	enum sde_enc_enable_state enable_state;
+	atomic_t vblank_refcount;
+	atomic_t vsync_cnt;
+	atomic_t underrun_cnt;
+	atomic_t pending_kickoff_cnt;
+	wait_queue_head_t pending_kickoff_wq;
+};
+
+static inline int sde_encoder_phys_inc_pending(struct sde_encoder_phys *phys)
+{
+	return atomic_inc_return(&phys->pending_kickoff_cnt);
+}
+
+/**
+ * struct sde_encoder_phys_vid - sub-class of sde_encoder_phys to handle video
+ *	mode specific operations
+ * @base:	Baseclass physical encoder structure
+ * @irq_idx:	IRQ interface lookup index
+ * @irq_cb:	interrupt callback
+ * @hw_intf:	Hardware interface to the intf registers
+ */
+struct sde_encoder_phys_vid {
+	struct sde_encoder_phys base;
+	int irq_idx[INTR_IDX_MAX];
+	struct sde_irq_callback irq_cb[INTR_IDX_MAX];
+	struct sde_hw_intf *hw_intf;
+};
+
+/**
+ * struct sde_encoder_phys_cmd - sub-class of sde_encoder_phys to handle command
+ *	mode specific operations
+ * @base:	Baseclass physical encoder structure
+ * @intf_idx:	Intf Block index used by this phys encoder
+ * @stream_sel:	Stream selection for multi-stream interfaces
+ * @pp_rd_ptr_irq_idx:	IRQ signifying panel's frame read pointer
+ *			For CMD encoders, VBLANK is driven by the PP RD Done IRQ
+ * @pp_tx_done_irq_idx:	IRQ signifying frame transmission to panel complete
+ * @irq_cb:	interrupt callback
+ */
+struct sde_encoder_phys_cmd {
+	struct sde_encoder_phys base;
+	int intf_idx;
+	int stream_sel;
+	int irq_idx[INTR_IDX_MAX];
+	struct sde_irq_callback irq_cb[INTR_IDX_MAX];
+};
+
+/**
+ * struct sde_encoder_phys_wb - sub-class of sde_encoder_phys to handle
+ *	writeback specific operations
+ * @base:		Baseclass physical encoder structure
+ * @hw_wb:		Hardware interface to the wb registers
+ * @irq_idx:		IRQ interface lookup index
+ * @wbdone_timeout:	Timeout value for writeback done in msec
+ * @bypass_irqreg:	Bypass irq register/unregister if non-zero
+ * @wbdone_complete:	for wbdone irq synchronization
+ * @wb_cfg:		Writeback hardware configuration
+ * @intf_cfg:		Interface hardware configuration
+ * @wb_roi:		Writeback region-of-interest
+ * @wb_fmt:		Writeback pixel format
+ * @frame_count:	Counter of completed writeback operations
+ * @kickoff_count:	Counter of issued writeback operations
+ * @aspace:		address space identifier for non-secure/secure domain
+ * @wb_dev:		Pointer to writeback device
+ * @start_time:		Start time of writeback latest request
+ * @end_time:		End time of writeback latest request
+ * @wb_name:		Name of this writeback device
+ * @debugfs_root:	Root entry of writeback debugfs
+ */
+struct sde_encoder_phys_wb {
+	struct sde_encoder_phys base;
+	struct sde_hw_wb *hw_wb;
+	int irq_idx;
+	struct sde_irq_callback irq_cb;
+	u32 wbdone_timeout;
+	u32 bypass_irqreg;
+	struct completion wbdone_complete;
+	struct sde_hw_wb_cfg wb_cfg;
+	struct sde_hw_intf_cfg intf_cfg;
+	struct sde_rect wb_roi;
+	const struct sde_format *wb_fmt;
+	u32 frame_count;
+	u32 kickoff_count;
+	struct msm_gem_address_space *aspace[SDE_IOMMU_DOMAIN_MAX];
+	struct sde_wb_device *wb_dev;
+	ktime_t start_time;
+	ktime_t end_time;
+#ifdef CONFIG_DEBUG_FS
+	char wb_name[SDE_ENCODER_NAME_MAX];
+	struct dentry *debugfs_root;
+#endif
+};
+
+/**
+ * struct sde_enc_phys_init_params - initialization parameters for phys encs
+ * @sde_kms:		Pointer to the sde_kms top level
+ * @parent:		Pointer to the containing virtual encoder
+ * @parent_ops:		Callbacks exposed by the parent to the phys_enc
+ * @split_role:		Role to play in a split-panel configuration
+ * @intf_idx:		Interface index this phys_enc will control
+ * @wb_idx:		Writeback index this phys_enc will control
+ * @enc_spinlock:	Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ */
+struct sde_enc_phys_init_params {
+	struct sde_kms *sde_kms;
+	struct drm_encoder *parent;
+	struct sde_encoder_virt_ops parent_ops;
+	enum sde_enc_split_role split_role;
+	enum sde_intf intf_idx;
+	enum sde_wb wb_idx;
+	spinlock_t *enc_spinlock;
+};
+
+/**
+ * sde_encoder_phys_vid_init - Construct a new video mode physical encoder
+ * @p:	Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+struct sde_encoder_phys *sde_encoder_phys_vid_init(
+		struct sde_enc_phys_init_params *p);
+
+/**
+ * sde_encoder_phys_cmd_init - Construct a new command mode physical encoder
+ * @p:	Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+struct sde_encoder_phys *sde_encoder_phys_cmd_init(
+		struct sde_enc_phys_init_params *p);
+
+/**
+ * sde_encoder_phys_wb_init - Construct a new writeback physical encoder
+ * @p:	Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+#ifdef CONFIG_DRM_SDE_WB
+struct sde_encoder_phys *sde_encoder_phys_wb_init(
+		struct sde_enc_phys_init_params *p);
+#else
+static inline
+struct sde_encoder_phys *sde_encoder_phys_wb_init(
+		struct sde_enc_phys_init_params *p)
+{
+	return NULL;
+}
+#endif
+
+void sde_encoder_phys_setup_cdm(struct sde_encoder_phys *phys_enc,
+		const struct sde_format *format, u32 output_type,
+		struct sde_rect *roi);
+
+/**
+ * sde_encoder_helper_trigger_start - control start helper function
+ *	This helper function may be optionally specified by physical
+ *	encoders if they require ctl_start triggering.
+ * @phys_enc: Pointer to physical encoder structure
+ */
+void sde_encoder_helper_trigger_start(struct sde_encoder_phys *phys_enc);
+
+/**
+ * sde_encoder_helper_wait_event_timeout - wait for event with timeout
+ *	taking into account that jiffies may jump between reads leading to
+ *	incorrectly detected timeouts. Prevent failure in this scenario by
+ *	making sure that elapsed time during wait is valid.
+ * @drm_id: drm object id for logging
+ * @hw_id: hw instance id for logging
+ * @wq: wait queue structure
+ * @cnt: atomic counter to wait on
+ * @timeout_ms: timeout value in milliseconds
+ */
+int sde_encoder_helper_wait_event_timeout(
+		int32_t drm_id,
+		int32_t hw_id,
+		wait_queue_head_t *wq,
+		atomic_t *cnt,
+		s64 timeout_ms);
+
+
+static inline enum sde_3d_blend_mode sde_encoder_helper_get_3d_blend_mode(
+		struct sde_encoder_phys *phys_enc)
+{
+	enum sde_rm_topology_name topology;
+
+	topology = sde_connector_get_topology_name(phys_enc->connector);
+	if (phys_enc->split_role == ENC_ROLE_SOLO &&
+			topology == SDE_RM_TOPOLOGY_DUALPIPEMERGE)
+		return BLEND_3D_H_ROW_INT;
+
+	return BLEND_3D_NONE;
+}
+
+/**
+ * sde_encoder_helper_split_config - split display configuration helper function
+ *	This helper function may be used by physical encoders to configure
+ *	the split display related registers.
+ * @phys_enc: Pointer to physical encoder structure
+ * @interface: enum sde_intf setting
+ */
+void sde_encoder_helper_split_config(
+		struct sde_encoder_phys *phys_enc,
+		enum sde_intf interface);
+
+#endif /* __sde_encoder_phys_H__ */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_encoder_phys_vid.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_encoder_phys_vid.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c	2019-10-29 09:26:23.637203119 +0100
@@ -0,0 +1,984 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+#include "sde_encoder_phys.h"
+#include "sde_hw_interrupts.h"
+#include "sde_core_irq.h"
+#include "sde_formats.h"
+
+#define SDE_DEBUG_VIDENC(e, fmt, ...) SDE_DEBUG("enc%d intf%d " fmt, \
+		(e) && (e)->base.parent ? \
+		(e)->base.parent->base.id : -1, \
+		(e) && (e)->hw_intf ? \
+		(e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define SDE_ERROR_VIDENC(e, fmt, ...) SDE_ERROR("enc%d intf%d " fmt, \
+		(e) && (e)->base.parent ? \
+		(e)->base.parent->base.id : -1, \
+		(e) && (e)->hw_intf ? \
+		(e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define to_sde_encoder_phys_vid(x) \
+	container_of(x, struct sde_encoder_phys_vid, base)
+
+static bool sde_encoder_phys_vid_is_master(
+		struct sde_encoder_phys *phys_enc)
+{
+	bool ret = false;
+
+	if (phys_enc->split_role != ENC_ROLE_SLAVE)
+		ret = true;
+
+	return ret;
+}
+
+static void drm_mode_to_intf_timing_params(
+		const struct sde_encoder_phys_vid *vid_enc,
+		const struct drm_display_mode *mode,
+		struct intf_timing_params *timing)
+{
+	memset(timing, 0, sizeof(*timing));
+	/*
+	 * https://www.kernel.org/doc/htmldocs/drm/ch02s05.html
+	 *  Active Region      Front Porch   Sync   Back Porch
+	 * <-----------------><------------><-----><----------->
+	 * <- [hv]display --->
+	 * <--------- [hv]sync_start ------>
+	 * <----------------- [hv]sync_end ------->
+	 * <---------------------------- [hv]total ------------->
+	 */
+	timing->width = mode->hdisplay;	/* active width */
+	timing->height = mode->vdisplay;	/* active height */
+	timing->xres = timing->width;
+	timing->yres = timing->height;
+	timing->h_back_porch = mode->htotal - mode->hsync_end;
+	timing->h_front_porch = mode->hsync_start - mode->hdisplay;
+	timing->v_back_porch = mode->vtotal - mode->vsync_end;
+	timing->v_front_porch = mode->vsync_start - mode->vdisplay;
+	timing->hsync_pulse_width = mode->hsync_end - mode->hsync_start;
+	timing->vsync_pulse_width = mode->vsync_end - mode->vsync_start;
+	timing->hsync_polarity = (mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0;
+	timing->vsync_polarity = (mode->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
+	timing->border_clr = 0;
+	timing->underflow_clr = 0xff;
+	timing->hsync_skew = mode->hskew;
+
+	/*
+	 * For edp only:
+	 * DISPLAY_V_START = (VBP * HCYCLE) + HBP
+	 * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
+	 */
+	/*
+	 * if (vid_enc->hw->cap->type == INTF_EDP) {
+	 * display_v_start += mode->htotal - mode->hsync_start;
+	 * display_v_end -= mode->hsync_start - mode->hdisplay;
+	 * }
+	 */
+}
+
+static inline u32 get_horizontal_total(const struct intf_timing_params *timing)
+{
+	u32 active = timing->xres;
+	u32 inactive =
+	    timing->h_back_porch + timing->h_front_porch +
+	    timing->hsync_pulse_width;
+	return active + inactive;
+}
+
+static inline u32 get_vertical_total(const struct intf_timing_params *timing)
+{
+	u32 active = timing->yres;
+	u32 inactive =
+	    timing->v_back_porch + timing->v_front_porch +
+	    timing->vsync_pulse_width;
+	return active + inactive;
+}
+
+/*
+ * programmable_fetch_get_num_lines:
+ *	Number of fetch lines in vertical front porch
+ * @timing: Pointer to the intf timing information for the requested mode
+ *
+ * Returns the number of fetch lines in vertical front porch at which mdp
+ * can start fetching the next frame.
+ *
+ * Number of needed prefetch lines is anything that cannot be absorbed in the
+ * start of frame time (back porch + vsync pulse width).
+ *
+ * Some panels have very large VFP, however we only need a total number of
+ * lines based on the chip worst case latencies.
+ */
+static u32 programmable_fetch_get_num_lines(
+		struct sde_encoder_phys_vid *vid_enc,
+		const struct intf_timing_params *timing)
+{
+	u32 worst_case_needed_lines =
+	    vid_enc->hw_intf->cap->prog_fetch_lines_worst_case;
+	u32 start_of_frame_lines =
+	    timing->v_back_porch + timing->vsync_pulse_width;
+	u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines;
+	u32 actual_vfp_lines = 0;
+
+	/* Fetch must be outside active lines, otherwise undefined. */
+	if (start_of_frame_lines >= worst_case_needed_lines) {
+		SDE_DEBUG_VIDENC(vid_enc,
+				"prog fetch is not needed, large vbp+vsw\n");
+		actual_vfp_lines = 0;
+	} else if (timing->v_front_porch < needed_vfp_lines) {
+		/* Warn fetch needed, but not enough porch in panel config */
+		pr_warn_once
+			("low vbp+vfp may lead to perf issues in some cases\n");
+		SDE_DEBUG_VIDENC(vid_enc,
+				"less vfp than fetch req, using entire vfp\n");
+		actual_vfp_lines = timing->v_front_porch;
+	} else {
+		SDE_DEBUG_VIDENC(vid_enc, "room in vfp for needed prefetch\n");
+		actual_vfp_lines = needed_vfp_lines;
+	}
+
+	SDE_DEBUG_VIDENC(vid_enc,
+		"v_front_porch %u v_back_porch %u vsync_pulse_width %u\n",
+		timing->v_front_porch, timing->v_back_porch,
+		timing->vsync_pulse_width);
+	SDE_DEBUG_VIDENC(vid_enc,
+		"wc_lines %u needed_vfp_lines %u actual_vfp_lines %u\n",
+		worst_case_needed_lines, needed_vfp_lines, actual_vfp_lines);
+
+	return actual_vfp_lines;
+}
+
+/*
+ * programmable_fetch_config: Programs HW to prefetch lines by offsetting
+ *	the start of fetch into the vertical front porch for cases where the
+ *	vsync pulse width and vertical back porch time is insufficient
+ *
+ *	Gets # of lines to pre-fetch, then calculate VSYNC counter value.
+ *	HW layer requires VSYNC counter of first pixel of tgt VFP line.
+ *
+ * @timing: Pointer to the intf timing information for the requested mode
+ */
+static void programmable_fetch_config(struct sde_encoder_phys *phys_enc,
+				      const struct intf_timing_params *timing)
+{
+	struct sde_encoder_phys_vid *vid_enc =
+		to_sde_encoder_phys_vid(phys_enc);
+	struct intf_prog_fetch f = { 0 };
+	u32 vfp_fetch_lines = 0;
+	u32 horiz_total = 0;
+	u32 vert_total = 0;
+	u32 vfp_fetch_start_vsync_counter = 0;
+	unsigned long lock_flags;
+
+	if (WARN_ON_ONCE(!vid_enc->hw_intf->ops.setup_prg_fetch))
+		return;
+
+	vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc, timing);
+	if (vfp_fetch_lines) {
+		vert_total = get_vertical_total(timing);
+		horiz_total = get_horizontal_total(timing);
+		vfp_fetch_start_vsync_counter =
+		    (vert_total - vfp_fetch_lines) * horiz_total + 1;
+		f.enable = 1;
+		f.fetch_start = vfp_fetch_start_vsync_counter;
+	}
+
+	SDE_DEBUG_VIDENC(vid_enc,
+		"vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u\n",
+		vfp_fetch_lines, vfp_fetch_start_vsync_counter);
+
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	vid_enc->hw_intf->ops.setup_prg_fetch(vid_enc->hw_intf, &f);
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+}
+
+static bool sde_encoder_phys_vid_mode_fixup(
+		struct sde_encoder_phys *phys_enc,
+		const struct drm_display_mode *mode,
+		struct drm_display_mode *adj_mode)
+{
+	if (phys_enc)
+		SDE_DEBUG_VIDENC(to_sde_encoder_phys_vid(phys_enc), "\n");
+
+	/*
+	 * Modifying mode has consequences when the mode comes back to us
+	 */
+	return true;
+}
+
+static void sde_encoder_phys_vid_setup_timing_engine(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_vid *vid_enc;
+	struct drm_display_mode mode;
+	struct intf_timing_params timing_params = { 0 };
+	const struct sde_format *fmt = NULL;
+	u32 fmt_fourcc = DRM_FORMAT_RGB888;
+	unsigned long lock_flags;
+	struct sde_hw_intf_cfg intf_cfg = { 0 };
+
+	if (!phys_enc || !phys_enc->hw_ctl ||
+		!phys_enc->hw_ctl->ops.setup_intf_cfg) {
+		SDE_ERROR("invalid encoder %d\n", phys_enc != 0);
+		return;
+	}
+
+	mode = phys_enc->cached_mode;
+	vid_enc = to_sde_encoder_phys_vid(phys_enc);
+	if (!vid_enc->hw_intf->ops.setup_timing_gen) {
+		SDE_ERROR("timing engine setup is not supported\n");
+		return;
+	}
+
+	SDE_DEBUG_VIDENC(vid_enc, "enabling mode:\n");
+	drm_mode_debug_printmodeline(&mode);
+
+	if (phys_enc->split_role != ENC_ROLE_SOLO ||
+	    (mode.private_flags & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420)) {
+		mode.hdisplay >>= 1;
+		mode.htotal >>= 1;
+		mode.hsync_start >>= 1;
+		mode.hsync_end >>= 1;
+		mode.hskew >>= 1;
+
+		SDE_DEBUG_VIDENC(vid_enc,
+			"split_role %d, halve horizontal %d %d %d %d %d\n",
+			phys_enc->split_role,
+			mode.hdisplay, mode.htotal,
+			mode.hsync_start, mode.hsync_end,
+			mode.hskew);
+	}
+
+	drm_mode_to_intf_timing_params(vid_enc, &mode, &timing_params);
+
+	fmt = sde_get_sde_format(fmt_fourcc);
+	SDE_DEBUG_VIDENC(vid_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
+
+	intf_cfg.intf = vid_enc->hw_intf->idx;
+	intf_cfg.intf_mode_sel = SDE_CTL_MODE_SEL_VID;
+	intf_cfg.stream_sel = 0; /* Don't care value for video mode */
+	intf_cfg.mode_3d = sde_encoder_helper_get_3d_blend_mode(phys_enc);
+
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	vid_enc->hw_intf->ops.setup_timing_gen(vid_enc->hw_intf,
+			&timing_params, fmt);
+	phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+	programmable_fetch_config(phys_enc, &timing_params);
+}
+
+static void sde_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
+{
+	struct sde_encoder_phys_vid *vid_enc = arg;
+	struct sde_encoder_phys *phys_enc;
+	struct sde_hw_ctl *hw_ctl;
+	unsigned long lock_flags;
+	u32 flush_register = 0;
+	int new_cnt = -1, old_cnt = -1;
+
+	if (!vid_enc)
+		return;
+
+	phys_enc = &vid_enc->base;
+	hw_ctl = phys_enc->hw_ctl;
+
+	if (phys_enc->parent_ops.handle_vblank_virt)
+		phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
+				phys_enc);
+
+	old_cnt  = atomic_read(&phys_enc->pending_kickoff_cnt);
+
+	/*
+	 * only decrement the pending flush count if we've actually flushed
+	 * hardware. due to sw irq latency, vblank may have already happened
+	 * so we need to double-check with hw that it accepted the flush bits
+	 */
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	if (hw_ctl && hw_ctl->ops.get_flush_register)
+		flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
+
+	if (flush_register == 0)
+		new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
+				-1, 0);
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+	SDE_EVT32_IRQ(DRMID(phys_enc->parent), vid_enc->hw_intf->idx - INTF_0,
+			old_cnt, new_cnt, flush_register);
+
+	/* Signal any waiting atomic commit thread */
+	wake_up_all(&phys_enc->pending_kickoff_wq);
+}
+
+static void sde_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
+{
+	struct sde_encoder_phys_vid *vid_enc = arg;
+	struct sde_encoder_phys *phys_enc;
+
+	if (!vid_enc)
+		return;
+
+	phys_enc = &vid_enc->base;
+	if (phys_enc->parent_ops.handle_underrun_virt)
+		phys_enc->parent_ops.handle_underrun_virt(phys_enc->parent,
+			phys_enc);
+}
+
+static bool _sde_encoder_phys_is_ppsplit(struct sde_encoder_phys *phys_enc)
+{
+	enum sde_rm_topology_name topology;
+
+	if (!phys_enc)
+		return false;
+
+	topology = sde_connector_get_topology_name(phys_enc->connector);
+	if (topology == SDE_RM_TOPOLOGY_PPSPLIT)
+		return true;
+
+	return false;
+}
+
+static bool sde_encoder_phys_vid_needs_single_flush(
+		struct sde_encoder_phys *phys_enc)
+{
+	return phys_enc && _sde_encoder_phys_is_ppsplit(phys_enc);
+}
+
+static int sde_encoder_phys_vid_register_irq(struct sde_encoder_phys *phys_enc,
+	enum sde_intr_type intr_type, int idx,
+	void (*irq_func)(void *, int), const char *irq_name)
+{
+	struct sde_encoder_phys_vid *vid_enc;
+	int ret = 0;
+
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+
+	vid_enc = to_sde_encoder_phys_vid(phys_enc);
+	vid_enc->irq_idx[idx] = sde_core_irq_idx_lookup(phys_enc->sde_kms,
+			intr_type, vid_enc->hw_intf->idx);
+	if (vid_enc->irq_idx[idx] < 0) {
+		SDE_ERROR_VIDENC(vid_enc,
+			"failed to lookup IRQ index for %s type:%d\n", irq_name,
+			intr_type);
+		return -EINVAL;
+	}
+
+	vid_enc->irq_cb[idx].func = irq_func;
+	vid_enc->irq_cb[idx].arg = vid_enc;
+	ret = sde_core_irq_register_callback(phys_enc->sde_kms,
+			vid_enc->irq_idx[idx], &vid_enc->irq_cb[idx]);
+	if (ret) {
+		SDE_ERROR_VIDENC(vid_enc,
+			"failed to register IRQ callback for %s\n", irq_name);
+		return ret;
+	}
+
+	ret = sde_core_irq_enable(phys_enc->sde_kms, &vid_enc->irq_idx[idx], 1);
+	if (ret) {
+		SDE_ERROR_VIDENC(vid_enc,
+			"enable IRQ for intr:%s failed, irq_idx %d\n",
+			irq_name, vid_enc->irq_idx[idx]);
+		vid_enc->irq_idx[idx] = -EINVAL;
+
+		/* unregister callback on IRQ enable failure */
+		sde_core_irq_unregister_callback(phys_enc->sde_kms,
+				vid_enc->irq_idx[idx], &vid_enc->irq_cb[idx]);
+		return ret;
+	}
+
+	SDE_DEBUG_VIDENC(vid_enc, "registered irq %s idx: %d\n",
+			irq_name, vid_enc->irq_idx[idx]);
+
+	return ret;
+}
+
+static int sde_encoder_phys_vid_unregister_irq(
+	struct sde_encoder_phys *phys_enc, int idx)
+{
+	struct sde_encoder_phys_vid *vid_enc;
+
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		goto end;
+	}
+
+	vid_enc = to_sde_encoder_phys_vid(phys_enc);
+	sde_core_irq_disable(phys_enc->sde_kms, &vid_enc->irq_idx[idx], 1);
+
+	sde_core_irq_unregister_callback(phys_enc->sde_kms,
+			vid_enc->irq_idx[idx], &vid_enc->irq_cb[idx]);
+
+	SDE_DEBUG_VIDENC(vid_enc, "unregistered %d\n", vid_enc->irq_idx[idx]);
+
+end:
+	return 0;
+}
+
+static void sde_encoder_phys_vid_mode_set(
+		struct sde_encoder_phys *phys_enc,
+		struct drm_display_mode *mode,
+		struct drm_display_mode *adj_mode)
+{
+	struct sde_rm *rm;
+	struct sde_rm_hw_iter iter;
+	int i, instance;
+	struct sde_encoder_phys_vid *vid_enc;
+
+	if (!phys_enc || !phys_enc->sde_kms) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+
+	phys_enc->hw_ctl = NULL;
+	phys_enc->hw_cdm = NULL;
+
+	rm = &phys_enc->sde_kms->rm;
+	vid_enc = to_sde_encoder_phys_vid(phys_enc);
+	phys_enc->cached_mode = *adj_mode;
+	SDE_DEBUG_VIDENC(vid_enc, "caching mode:\n");
+	drm_mode_debug_printmodeline(adj_mode);
+
+	instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
+
+	/* Retrieve previously allocated HW Resources. Shouldn't fail */
+	sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CTL);
+	for (i = 0; i <= instance; i++) {
+		if (sde_rm_get_hw(rm, &iter))
+			phys_enc->hw_ctl = (struct sde_hw_ctl *)iter.hw;
+	}
+	if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
+		SDE_ERROR_VIDENC(vid_enc, "failed to init ctl, %ld\n",
+				PTR_ERR(phys_enc->hw_ctl));
+		phys_enc->hw_ctl = NULL;
+		return;
+	}
+
+	/* CDM is optional */
+	sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CDM);
+	for (i = 0; i <= instance; i++) {
+		sde_rm_get_hw(rm, &iter);
+		if (i == instance)
+			phys_enc->hw_cdm = (struct sde_hw_cdm *) iter.hw;
+	}
+
+	if (IS_ERR(phys_enc->hw_cdm)) {
+		SDE_ERROR("CDM required but not allocated: %ld\n",
+				PTR_ERR(phys_enc->hw_cdm));
+		phys_enc->hw_cdm = NULL;
+	}
+}
+
+static int sde_encoder_phys_vid_control_vblank_irq(
+		struct sde_encoder_phys *phys_enc,
+		bool enable)
+{
+	int ret = 0;
+	struct sde_encoder_phys_vid *vid_enc;
+
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+
+	vid_enc = to_sde_encoder_phys_vid(phys_enc);
+
+	/* Slave encoders don't report vblank */
+	if (!sde_encoder_phys_vid_is_master(phys_enc))
+		return 0;
+
+	SDE_DEBUG_VIDENC(vid_enc, "[%pS] enable=%d/%d\n",
+			__builtin_return_address(0),
+			enable, atomic_read(&phys_enc->vblank_refcount));
+
+	SDE_EVT32(DRMID(phys_enc->parent), enable,
+			atomic_read(&phys_enc->vblank_refcount));
+
+	if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+		ret = sde_encoder_phys_vid_register_irq(phys_enc,
+			SDE_IRQ_TYPE_INTF_VSYNC,
+			INTR_IDX_VSYNC,
+			sde_encoder_phys_vid_vblank_irq, "vsync_irq");
+	else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+		ret = sde_encoder_phys_vid_unregister_irq(phys_enc,
+			INTR_IDX_VSYNC);
+
+	if (ret)
+		SDE_ERROR_VIDENC(vid_enc,
+				"control vblank irq error %d, enable %d\n",
+				ret, enable);
+
+	return ret;
+}
+
+static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc)
+{
+	struct msm_drm_private *priv;
+	struct sde_encoder_phys_vid *vid_enc;
+	struct sde_hw_intf *intf;
+	struct sde_hw_ctl *ctl;
+	struct sde_hw_cdm *hw_cdm = NULL;
+	struct drm_display_mode mode;
+	const struct sde_format *fmt = NULL;
+	u32 flush_mask = 0;
+	int ret;
+
+	if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
+			!phys_enc->parent->dev->dev_private) {
+		SDE_ERROR("invalid encoder/device\n");
+		return;
+	}
+	hw_cdm = phys_enc->hw_cdm;
+	priv = phys_enc->parent->dev->dev_private;
+	mode = phys_enc->cached_mode;
+
+	vid_enc = to_sde_encoder_phys_vid(phys_enc);
+	intf = vid_enc->hw_intf;
+	ctl = phys_enc->hw_ctl;
+	if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+		SDE_ERROR("invalid hw_intf %d hw_ctl %d\n",
+				vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+		return;
+	}
+
+	SDE_DEBUG_VIDENC(vid_enc, "\n");
+
+	if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+		return;
+
+	sde_power_data_bus_bandwidth_ctrl(&priv->phandle,
+			phys_enc->sde_kms->core_client, true);
+
+	sde_encoder_helper_split_config(phys_enc, vid_enc->hw_intf->idx);
+
+	sde_encoder_phys_vid_setup_timing_engine(phys_enc);
+	ret = sde_encoder_phys_vid_control_vblank_irq(phys_enc, true);
+	if (ret)
+		goto end;
+
+	ret = sde_encoder_phys_vid_register_irq(phys_enc,
+		SDE_IRQ_TYPE_INTF_UNDER_RUN,
+		INTR_IDX_UNDERRUN,
+		sde_encoder_phys_vid_underrun_irq, "underrun");
+	if (ret) {
+		sde_encoder_phys_vid_control_vblank_irq(phys_enc, false);
+		goto end;
+	}
+
+	if (mode.private_flags & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420)
+		fmt = sde_get_sde_format(DRM_FORMAT_YUV420);
+
+	if (fmt) {
+		struct sde_rect hdmi_roi;
+
+		hdmi_roi.w = mode.hdisplay;
+		hdmi_roi.h = mode.vdisplay;
+		sde_encoder_phys_setup_cdm(phys_enc, fmt,
+			CDM_CDWN_OUTPUT_HDMI, &hdmi_roi);
+	}
+
+	ctl->ops.get_bitmask_intf(ctl, &flush_mask, intf->idx);
+	if (ctl->ops.get_bitmask_cdm && hw_cdm)
+		ctl->ops.get_bitmask_cdm(ctl, &flush_mask, hw_cdm->idx);
+	ctl->ops.update_pending_flush(ctl, flush_mask);
+
+	SDE_DEBUG_VIDENC(vid_enc, "update pending flush ctl %d flush_mask %x\n",
+		ctl->idx - CTL_0, flush_mask);
+
+	/* ctl_flush & timing engine enable will be triggered by framework */
+	if (phys_enc->enable_state == SDE_ENC_DISABLED)
+		phys_enc->enable_state = SDE_ENC_ENABLING;
+
+end:
+	return;
+}
+
+static void sde_encoder_phys_vid_destroy(struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_vid *vid_enc;
+
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+
+	vid_enc = to_sde_encoder_phys_vid(phys_enc);
+	SDE_DEBUG_VIDENC(vid_enc, "\n");
+	kfree(vid_enc);
+}
+
+static void sde_encoder_phys_vid_get_hw_resources(
+		struct sde_encoder_phys *phys_enc,
+		struct sde_encoder_hw_resources *hw_res,
+		struct drm_connector_state *conn_state)
+{
+	struct sde_encoder_phys_vid *vid_enc;
+	struct sde_mdss_cfg *vid_catalog;
+
+	if (!phys_enc || !hw_res) {
+		SDE_ERROR("invalid arg(s), enc %d hw_res %d conn_state %d\n",
+			phys_enc != NULL, hw_res != NULL, conn_state != NULL);
+		return;
+	}
+
+	vid_catalog = phys_enc->sde_kms->catalog;
+	vid_enc = to_sde_encoder_phys_vid(phys_enc);
+	if (!vid_enc->hw_intf || !vid_catalog) {
+		SDE_ERROR("invalid arg(s), hw_intf %d vid_catalog %d\n",
+			  vid_enc->hw_intf != NULL, vid_catalog != NULL);
+		return;
+	}
+
+	SDE_DEBUG_VIDENC(vid_enc, "\n");
+	if (vid_enc->hw_intf->idx > INTF_MAX) {
+		SDE_ERROR("invalid arg(s), idx %d\n",
+			  vid_enc->hw_intf->idx);
+		return;
+	}
+	hw_res->intfs[vid_enc->hw_intf->idx - INTF_0] = INTF_MODE_VIDEO;
+
+	if (vid_catalog->intf[vid_enc->hw_intf->idx - INTF_0].type
+			== INTF_HDMI)
+		hw_res->needs_cdm = true;
+	SDE_DEBUG_DRIVER("[vid] needs_cdm=%d\n", hw_res->needs_cdm);
+}
+
+static int sde_encoder_phys_vid_wait_for_vblank(
+		struct sde_encoder_phys *phys_enc, bool notify)
+{
+	struct sde_encoder_phys_vid *vid_enc =
+			to_sde_encoder_phys_vid(phys_enc);
+	u32 irq_status;
+	int ret;
+
+	if (!sde_encoder_phys_vid_is_master(phys_enc)) {
+		/* always signal done for slave video encoder */
+		if (notify && phys_enc->parent_ops.handle_frame_done)
+			phys_enc->parent_ops.handle_frame_done(
+					phys_enc->parent, phys_enc,
+					SDE_ENCODER_FRAME_EVENT_DONE);
+		return 0;
+	}
+
+	if (phys_enc->enable_state != SDE_ENC_ENABLED) {
+		SDE_ERROR("encoder not enabled\n");
+		return -EWOULDBLOCK;
+	}
+
+	SDE_EVT32(DRMID(phys_enc->parent), vid_enc->hw_intf->idx - INTF_0,
+			SDE_EVTLOG_FUNC_ENTRY);
+
+	/* Wait for kickoff to complete */
+	ret = sde_encoder_helper_wait_event_timeout(
+			DRMID(phys_enc->parent),
+			vid_enc->hw_intf->idx - INTF_0,
+			&phys_enc->pending_kickoff_wq,
+			&phys_enc->pending_kickoff_cnt,
+			KICKOFF_TIMEOUT_MS);
+	if (ret <= 0) {
+		irq_status = sde_core_irq_read(phys_enc->sde_kms,
+				vid_enc->irq_idx[INTR_IDX_VSYNC], true);
+		if (irq_status) {
+			SDE_EVT32(DRMID(phys_enc->parent),
+					vid_enc->hw_intf->idx - INTF_0);
+			SDE_DEBUG_VIDENC(vid_enc, "done, irq not triggered\n");
+			if (notify && phys_enc->parent_ops.handle_frame_done)
+				phys_enc->parent_ops.handle_frame_done(
+						phys_enc->parent, phys_enc,
+						SDE_ENCODER_FRAME_EVENT_DONE);
+			sde_encoder_phys_vid_vblank_irq(vid_enc,
+					INTR_IDX_VSYNC);
+			ret = 0;
+		} else {
+			SDE_EVT32(DRMID(phys_enc->parent),
+					vid_enc->hw_intf->idx - INTF_0);
+			SDE_ERROR_VIDENC(vid_enc, "kickoff timed out\n");
+			if (notify && phys_enc->parent_ops.handle_frame_done)
+				phys_enc->parent_ops.handle_frame_done(
+						phys_enc->parent, phys_enc,
+						SDE_ENCODER_FRAME_EVENT_ERROR);
+			ret = -ETIMEDOUT;
+		}
+	} else {
+		if (notify && phys_enc->parent_ops.handle_frame_done)
+			phys_enc->parent_ops.handle_frame_done(
+					phys_enc->parent, phys_enc,
+					SDE_ENCODER_FRAME_EVENT_DONE);
+		ret = 0;
+	}
+
+	return 0;
+}
+
+static int sde_encoder_phys_vid_wait_for_commit_done(
+		struct sde_encoder_phys *phys_enc)
+{
+	int ret;
+
+	ret = sde_encoder_phys_vid_wait_for_vblank(phys_enc, true);
+
+	return ret;
+}
+
+static void sde_encoder_phys_vid_prepare_for_kickoff(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_vid *vid_enc;
+	struct sde_hw_ctl *ctl;
+	int rc;
+
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+	vid_enc = to_sde_encoder_phys_vid(phys_enc);
+
+	ctl = phys_enc->hw_ctl;
+	if (!ctl || !ctl->ops.wait_reset_status)
+		return;
+
+	/*
+	 * hw supports hardware initiated ctl reset, so before we kickoff a new
+	 * frame, need to check and wait for hw initiated ctl reset completion
+	 */
+	rc = ctl->ops.wait_reset_status(ctl);
+	if (rc) {
+		SDE_ERROR_VIDENC(vid_enc, "ctl %d reset failure: %d\n",
+				ctl->idx, rc);
+		SDE_DBG_DUMP("panic");
+	}
+}
+
+static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc)
+{
+	struct msm_drm_private *priv;
+	struct sde_encoder_phys_vid *vid_enc;
+	unsigned long lock_flags;
+	int ret;
+
+	if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
+			!phys_enc->parent->dev->dev_private) {
+		SDE_ERROR("invalid encoder/device\n");
+		return;
+	}
+	priv = phys_enc->parent->dev->dev_private;
+
+	vid_enc = to_sde_encoder_phys_vid(phys_enc);
+	if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+		SDE_ERROR("invalid hw_intf %d hw_ctl %d\n",
+				vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+		return;
+	}
+
+	SDE_DEBUG_VIDENC(vid_enc, "\n");
+
+	if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+		return;
+
+	if (phys_enc->enable_state == SDE_ENC_DISABLED) {
+		SDE_ERROR("already disabled\n");
+		return;
+	}
+
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 0);
+	if (sde_encoder_phys_vid_is_master(phys_enc))
+		sde_encoder_phys_inc_pending(phys_enc);
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+	/*
+	 * Wait for a vsync so we know the ENABLE=0 latched before
+	 * the (connector) source of the vsync's gets disabled,
+	 * otherwise we end up in a funny state if we re-enable
+	 * before the disable latches, which results that some of
+	 * the settings changes for the new modeset (like new
+	 * scanout buffer) don't latch properly..
+	 */
+	if (sde_encoder_phys_vid_is_master(phys_enc)) {
+		ret = sde_encoder_phys_vid_wait_for_vblank(phys_enc, false);
+		if (ret) {
+			atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+			SDE_ERROR_VIDENC(vid_enc,
+					"failure waiting for disable: %d\n",
+					ret);
+			SDE_EVT32(DRMID(phys_enc->parent),
+					vid_enc->hw_intf->idx - INTF_0, ret);
+		}
+		sde_encoder_phys_vid_control_vblank_irq(phys_enc, false);
+	}
+
+	sde_power_data_bus_bandwidth_ctrl(&priv->phandle,
+			phys_enc->sde_kms->core_client, false);
+
+	if (atomic_read(&phys_enc->vblank_refcount))
+		SDE_ERROR_VIDENC(vid_enc, "invalid vblank refcount %d\n",
+				atomic_read(&phys_enc->vblank_refcount));
+
+	if (phys_enc->hw_cdm && phys_enc->hw_cdm->ops.disable) {
+		SDE_DEBUG_DRIVER("[cdm_disable]\n");
+		phys_enc->hw_cdm->ops.disable(phys_enc->hw_cdm);
+	}
+
+	phys_enc->enable_state = SDE_ENC_DISABLED;
+}
+
+static void sde_encoder_phys_vid_handle_post_kickoff(
+		struct sde_encoder_phys *phys_enc)
+{
+	unsigned long lock_flags;
+	struct sde_encoder_phys_vid *vid_enc;
+
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+
+	vid_enc = to_sde_encoder_phys_vid(phys_enc);
+	SDE_DEBUG_VIDENC(vid_enc, "enable_state %d\n", phys_enc->enable_state);
+
+	/*
+	 * Video mode must flush CTL before enabling timing engine
+	 * Video encoders need to turn on their interfaces now
+	 */
+	if (phys_enc->enable_state == SDE_ENC_ENABLING) {
+		SDE_EVT32(DRMID(phys_enc->parent),
+				vid_enc->hw_intf->idx - INTF_0);
+		spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+		vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 1);
+		spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+		phys_enc->enable_state = SDE_ENC_ENABLED;
+	}
+}
+
+static void sde_encoder_phys_vid_setup_misr(struct sde_encoder_phys *phys_enc,
+			struct sde_misr_params *misr_map)
+{
+	struct sde_encoder_phys_vid *vid_enc =
+		to_sde_encoder_phys_vid(phys_enc);
+
+	if (vid_enc && vid_enc->hw_intf && vid_enc->hw_intf->ops.setup_misr)
+		vid_enc->hw_intf->ops.setup_misr(vid_enc->hw_intf, misr_map);
+}
+
+static void sde_encoder_phys_vid_collect_misr(struct sde_encoder_phys *phys_enc,
+			struct sde_misr_params *misr_map)
+{
+	struct sde_encoder_phys_vid *vid_enc =
+			to_sde_encoder_phys_vid(phys_enc);
+
+	if (vid_enc && vid_enc->hw_intf && vid_enc->hw_intf->ops.collect_misr)
+		vid_enc->hw_intf->ops.collect_misr(vid_enc->hw_intf, misr_map);
+}
+
+static void sde_encoder_phys_vid_init_ops(struct sde_encoder_phys_ops *ops)
+{
+	ops->is_master = sde_encoder_phys_vid_is_master;
+	ops->mode_set = sde_encoder_phys_vid_mode_set;
+	ops->mode_fixup = sde_encoder_phys_vid_mode_fixup;
+	ops->enable = sde_encoder_phys_vid_enable;
+	ops->disable = sde_encoder_phys_vid_disable;
+	ops->destroy = sde_encoder_phys_vid_destroy;
+	ops->get_hw_resources = sde_encoder_phys_vid_get_hw_resources;
+	ops->control_vblank_irq = sde_encoder_phys_vid_control_vblank_irq;
+	ops->wait_for_commit_done = sde_encoder_phys_vid_wait_for_commit_done;
+	ops->prepare_for_kickoff = sde_encoder_phys_vid_prepare_for_kickoff;
+	ops->handle_post_kickoff = sde_encoder_phys_vid_handle_post_kickoff;
+	ops->needs_single_flush = sde_encoder_phys_vid_needs_single_flush;
+	ops->setup_misr = sde_encoder_phys_vid_setup_misr;
+	ops->collect_misr = sde_encoder_phys_vid_collect_misr;
+}
+
+struct sde_encoder_phys *sde_encoder_phys_vid_init(
+		struct sde_enc_phys_init_params *p)
+{
+	struct sde_encoder_phys *phys_enc = NULL;
+	struct sde_encoder_phys_vid *vid_enc = NULL;
+	struct sde_rm_hw_iter iter;
+	struct sde_hw_mdp *hw_mdp;
+	int i, ret = 0;
+
+	if (!p) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	vid_enc = kzalloc(sizeof(*vid_enc), GFP_KERNEL);
+	if (!vid_enc) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	phys_enc = &vid_enc->base;
+
+	hw_mdp = sde_rm_get_mdp(&p->sde_kms->rm);
+	if (IS_ERR_OR_NULL(hw_mdp)) {
+		ret = PTR_ERR(hw_mdp);
+		SDE_ERROR("failed to get mdptop\n");
+		goto fail;
+	}
+	phys_enc->hw_mdptop = hw_mdp;
+	phys_enc->intf_idx = p->intf_idx;
+
+	/**
+	 * hw_intf resource permanently assigned to this encoder
+	 * Other resources allocated at atomic commit time by use case
+	 */
+	sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_INTF);
+	while (sde_rm_get_hw(&p->sde_kms->rm, &iter)) {
+		struct sde_hw_intf *hw_intf = (struct sde_hw_intf *)iter.hw;
+
+		if (hw_intf->idx == p->intf_idx) {
+			vid_enc->hw_intf = hw_intf;
+			break;
+		}
+	}
+
+	if (!vid_enc->hw_intf) {
+		ret = -EINVAL;
+		SDE_ERROR("failed to get hw_intf\n");
+		goto fail;
+	}
+
+	phys_enc->misr_map = kzalloc(sizeof(struct sde_misr_params),
+						GFP_KERNEL);
+	if (!phys_enc->misr_map)
+		SDE_ERROR("sde misr map allocation failed\n");
+
+	SDE_DEBUG_VIDENC(vid_enc, "\n");
+
+	sde_encoder_phys_vid_init_ops(&phys_enc->ops);
+	phys_enc->parent = p->parent;
+	phys_enc->parent_ops = p->parent_ops;
+	phys_enc->sde_kms = p->sde_kms;
+	phys_enc->split_role = p->split_role;
+	phys_enc->intf_mode = INTF_MODE_VIDEO;
+	phys_enc->enc_spinlock = p->enc_spinlock;
+	for (i = 0; i < INTR_IDX_MAX; i++)
+		INIT_LIST_HEAD(&vid_enc->irq_cb[i].list);
+	atomic_set(&phys_enc->vblank_refcount, 0);
+	atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+	init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+	phys_enc->enable_state = SDE_ENC_DISABLED;
+
+	SDE_DEBUG_VIDENC(vid_enc, "created intf idx:%d\n", p->intf_idx);
+
+	return phys_enc;
+
+fail:
+	SDE_ERROR("failed to create encoder\n");
+	if (vid_enc)
+		sde_encoder_phys_vid_destroy(phys_enc);
+
+	return ERR_PTR(ret);
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_encoder_phys_wb.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_encoder_phys_wb.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c	2019-01-22 16:16:23.515246515 +0100
@@ -0,0 +1,1017 @@
+/*
+ * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/debugfs.h>
+
+#include "sde_encoder_phys.h"
+#include "sde_formats.h"
+#include "sde_hw_top.h"
+#include "sde_hw_interrupts.h"
+#include "sde_core_irq.h"
+#include "sde_wb.h"
+#include "sde_vbif.h"
+
+#define to_sde_encoder_phys_wb(x) \
+	container_of(x, struct sde_encoder_phys_wb, base)
+
+#define WBID(wb_enc) ((wb_enc) ? wb_enc->wb_dev->wb_idx : -1)
+
+/**
+ * sde_encoder_phys_wb_is_master - report wb always as master encoder
+ */
+static bool sde_encoder_phys_wb_is_master(struct sde_encoder_phys *phys_enc)
+{
+	return true;
+}
+
+/**
+ * sde_encoder_phys_wb_get_intr_type - get interrupt type based on block mode
+ * @hw_wb:	Pointer to h/w writeback driver
+ */
+static enum sde_intr_type sde_encoder_phys_wb_get_intr_type(
+		struct sde_hw_wb *hw_wb)
+{
+	return (hw_wb->caps->features & BIT(SDE_WB_BLOCK_MODE)) ?
+			SDE_IRQ_TYPE_WB_ROT_COMP : SDE_IRQ_TYPE_WB_WFD_COMP;
+}
+
+/**
+ * sde_encoder_phys_wb_set_ot_limit - set OT limit for writeback interface
+ * @phys_enc:	Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_set_ot_limit(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+	struct sde_vbif_set_ot_params ot_params;
+
+	memset(&ot_params, 0, sizeof(ot_params));
+	ot_params.xin_id = hw_wb->caps->xin_id;
+	ot_params.num = hw_wb->idx - WB_0;
+	ot_params.width = wb_enc->wb_roi.w;
+	ot_params.height = wb_enc->wb_roi.h;
+	ot_params.is_wfd = true;
+	ot_params.frame_rate = phys_enc->cached_mode.vrefresh;
+	ot_params.vbif_idx = hw_wb->caps->vbif_idx;
+	ot_params.clk_ctrl = hw_wb->caps->clk_ctrl;
+	ot_params.rd = false;
+
+	sde_vbif_set_ot_limit(phys_enc->sde_kms, &ot_params);
+}
+
+/**
+ * sde_encoder_phys_wb_set_traffic_shaper - set traffic shaper for writeback
+ * @phys_enc:	Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_set_traffic_shaper(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+	struct sde_hw_wb_cfg *wb_cfg = &wb_enc->wb_cfg;
+
+	/* traffic shaper is only enabled for rotator */
+	wb_cfg->ts_cfg.en = false;
+}
+
+/**
+ * sde_encoder_phys_wb_setup_fb - setup output framebuffer
+ * @phys_enc:	Pointer to physical encoder
+ * @fb:		Pointer to output framebuffer
+ * @wb_roi:	Pointer to output region of interest
+ */
+static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
+		struct drm_framebuffer *fb, struct sde_rect *wb_roi)
+{
+	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+	struct sde_hw_wb *hw_wb;
+	struct sde_hw_wb_cfg *wb_cfg;
+	const struct msm_format *format;
+	int ret;
+	struct msm_gem_address_space *aspace;
+
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+
+	hw_wb = wb_enc->hw_wb;
+	wb_cfg = &wb_enc->wb_cfg;
+	memset(wb_cfg, 0, sizeof(struct sde_hw_wb_cfg));
+
+	wb_cfg->intf_mode = phys_enc->intf_mode;
+	wb_cfg->is_secure = (fb->flags & DRM_MODE_FB_SECURE) ? true : false;
+	aspace = (wb_cfg->is_secure) ?
+			wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] :
+			wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE];
+
+	SDE_DEBUG("[fb_secure:%d]\n", wb_cfg->is_secure);
+
+	format = msm_framebuffer_format(fb);
+	if (!format) {
+		SDE_DEBUG("invalid format for fb\n");
+		return;
+	}
+
+	wb_cfg->dest.format = sde_get_sde_format_ext(
+			format->pixel_format,
+			fb->modifier,
+			drm_format_num_planes(fb->pixel_format));
+	if (!wb_cfg->dest.format) {
+		/* this error should be detected during atomic_check */
+		SDE_ERROR("failed to get format %x\n", format->pixel_format);
+		return;
+	}
+	wb_cfg->roi = *wb_roi;
+
+	if (hw_wb->caps->features & BIT(SDE_WB_XY_ROI_OFFSET)) {
+		ret = sde_format_populate_layout(aspace, fb, &wb_cfg->dest);
+		if (ret) {
+			SDE_DEBUG("failed to populate layout %d\n", ret);
+			return;
+		}
+		wb_cfg->dest.width = fb->width;
+		wb_cfg->dest.height = fb->height;
+		wb_cfg->dest.num_planes = wb_cfg->dest.format->num_planes;
+	} else {
+		ret = sde_format_populate_layout_with_roi(aspace, fb, wb_roi,
+			&wb_cfg->dest);
+		if (ret) {
+			/* this error should be detected during atomic_check */
+			SDE_DEBUG("failed to populate layout %d\n", ret);
+			return;
+		}
+	}
+
+	if ((wb_cfg->dest.format->fetch_planes == SDE_PLANE_PLANAR) &&
+			(wb_cfg->dest.format->element[0] == C1_B_Cb))
+		swap(wb_cfg->dest.plane_addr[1], wb_cfg->dest.plane_addr[2]);
+
+	SDE_DEBUG("[fb_offset:%8.8x,%8.8x,%8.8x,%8.8x]\n",
+			wb_cfg->dest.plane_addr[0],
+			wb_cfg->dest.plane_addr[1],
+			wb_cfg->dest.plane_addr[2],
+			wb_cfg->dest.plane_addr[3]);
+	SDE_DEBUG("[fb_stride:%8.8x,%8.8x,%8.8x,%8.8x]\n",
+			wb_cfg->dest.plane_pitch[0],
+			wb_cfg->dest.plane_pitch[1],
+			wb_cfg->dest.plane_pitch[2],
+			wb_cfg->dest.plane_pitch[3]);
+
+	if (hw_wb->ops.setup_roi)
+		hw_wb->ops.setup_roi(hw_wb, wb_cfg);
+
+	if (hw_wb->ops.setup_outformat)
+		hw_wb->ops.setup_outformat(hw_wb, wb_cfg);
+
+	if (hw_wb->ops.setup_outaddress)
+		hw_wb->ops.setup_outaddress(hw_wb, wb_cfg);
+}
+
+/**
+ * sde_encoder_phys_wb_setup_cdp - setup chroma down prefetch block
+ * @phys_enc:	Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_setup_cdp(struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+	struct sde_hw_intf_cfg *intf_cfg = &wb_enc->intf_cfg;
+
+	memset(intf_cfg, 0, sizeof(struct sde_hw_intf_cfg));
+
+	intf_cfg->intf = SDE_NONE;
+	intf_cfg->wb = hw_wb->idx;
+	intf_cfg->mode_3d = sde_encoder_helper_get_3d_blend_mode(phys_enc);
+
+	if (phys_enc->hw_ctl && phys_enc->hw_ctl->ops.setup_intf_cfg)
+		phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl,
+				intf_cfg);
+}
+
+/**
+ * sde_encoder_phys_wb_atomic_check - verify and fixup given atomic states
+ * @phys_enc:	Pointer to physical encoder
+ * @crtc_state:	Pointer to CRTC atomic state
+ * @conn_state:	Pointer to connector atomic state
+ */
+static int sde_encoder_phys_wb_atomic_check(
+		struct sde_encoder_phys *phys_enc,
+		struct drm_crtc_state *crtc_state,
+		struct drm_connector_state *conn_state)
+{
+	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+	const struct sde_wb_cfg *wb_cfg = hw_wb->caps;
+	struct drm_framebuffer *fb;
+	const struct sde_format *fmt;
+	struct sde_rect wb_roi;
+	const struct drm_display_mode *mode = &crtc_state->mode;
+	int rc;
+
+	SDE_DEBUG("[atomic_check:%d,%d,\"%s\",%d,%d]\n",
+			hw_wb->idx - WB_0, mode->base.id, mode->name,
+			mode->hdisplay, mode->vdisplay);
+
+	if (!conn_state || !conn_state->connector) {
+		SDE_ERROR("invalid connector state\n");
+		return -EINVAL;
+	} else if (conn_state->connector->status !=
+			connector_status_connected) {
+		SDE_ERROR("connector not connected %d\n",
+				conn_state->connector->status);
+		return -EINVAL;
+	}
+
+	memset(&wb_roi, 0, sizeof(struct sde_rect));
+
+	rc = sde_wb_connector_state_get_output_roi(conn_state, &wb_roi);
+	if (rc) {
+		SDE_ERROR("failed to get roi %d\n", rc);
+		return rc;
+	}
+
+	SDE_DEBUG("[roi:%u,%u,%u,%u]\n", wb_roi.x, wb_roi.y,
+			wb_roi.w, wb_roi.h);
+
+	fb = sde_wb_connector_state_get_output_fb(conn_state);
+	if (!fb) {
+		SDE_ERROR("no output framebuffer\n");
+		return -EINVAL;
+	}
+
+	SDE_DEBUG("[fb_id:%u][fb:%u,%u]\n", fb->base.id,
+			fb->width, fb->height);
+
+	fmt = sde_get_sde_format_ext(fb->pixel_format, fb->modifier,
+			drm_format_num_planes(fb->pixel_format));
+	if (!fmt) {
+		SDE_ERROR("unsupported output pixel format:%x\n",
+				fb->pixel_format);
+		return -EINVAL;
+	}
+
+	SDE_DEBUG("[fb_fmt:%x,%llx]\n", fb->pixel_format,
+			fb->modifier[0]);
+
+	if (SDE_FORMAT_IS_YUV(fmt) &&
+			!(wb_cfg->features & BIT(SDE_WB_YUV_CONFIG))) {
+		SDE_ERROR("invalid output format %x\n", fmt->base.pixel_format);
+		return -EINVAL;
+	}
+
+	if (SDE_FORMAT_IS_UBWC(fmt) &&
+			!(wb_cfg->features & BIT(SDE_WB_UBWC_1_0))) {
+		SDE_ERROR("invalid output format %x\n", fmt->base.pixel_format);
+		return -EINVAL;
+	}
+
+	if (SDE_FORMAT_IS_YUV(fmt) != !!phys_enc->hw_cdm)
+		crtc_state->mode_changed = true;
+
+	if (wb_roi.w && wb_roi.h) {
+		if (wb_roi.w != mode->hdisplay) {
+			SDE_ERROR("invalid roi w=%d, mode w=%d\n", wb_roi.w,
+					mode->hdisplay);
+			return -EINVAL;
+		} else if (wb_roi.h != mode->vdisplay) {
+			SDE_ERROR("invalid roi h=%d, mode h=%d\n", wb_roi.h,
+					mode->vdisplay);
+			return -EINVAL;
+		} else if (wb_roi.x + wb_roi.w > fb->width) {
+			SDE_ERROR("invalid roi x=%d, w=%d, fb w=%d\n",
+					wb_roi.x, wb_roi.w, fb->width);
+			return -EINVAL;
+		} else if (wb_roi.y + wb_roi.h > fb->height) {
+			SDE_ERROR("invalid roi y=%d, h=%d, fb h=%d\n",
+					wb_roi.y, wb_roi.h, fb->height);
+			return -EINVAL;
+		} else if (wb_roi.w > wb_cfg->sblk->maxlinewidth) {
+			SDE_ERROR("invalid roi w=%d, maxlinewidth=%u\n",
+					wb_roi.w, wb_cfg->sblk->maxlinewidth);
+			return -EINVAL;
+		}
+	} else {
+		if (wb_roi.x || wb_roi.y) {
+			SDE_ERROR("invalid roi x=%d, y=%d\n",
+					wb_roi.x, wb_roi.y);
+			return -EINVAL;
+		} else if (fb->width != mode->hdisplay) {
+			SDE_ERROR("invalid fb w=%d, mode w=%d\n", fb->width,
+					mode->hdisplay);
+			return -EINVAL;
+		} else if (fb->height != mode->vdisplay) {
+			SDE_ERROR("invalid fb h=%d, mode h=%d\n", fb->height,
+					mode->vdisplay);
+			return -EINVAL;
+		} else if (fb->width > wb_cfg->sblk->maxlinewidth) {
+			SDE_ERROR("invalid fb w=%d, maxlinewidth=%u\n",
+					fb->width, wb_cfg->sblk->maxlinewidth);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * sde_encoder_phys_wb_flush - flush hardware update
+ * @phys_enc:	Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_flush(struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+	struct sde_hw_ctl *hw_ctl = phys_enc->hw_ctl;
+	struct sde_hw_cdm *hw_cdm = phys_enc->hw_cdm;
+	u32 flush_mask = 0;
+
+	SDE_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
+
+	if (!hw_ctl) {
+		SDE_DEBUG("[wb:%d] no ctl assigned\n", hw_wb->idx - WB_0);
+		return;
+	}
+
+	if (hw_ctl->ops.get_bitmask_wb)
+		hw_ctl->ops.get_bitmask_wb(hw_ctl, &flush_mask, hw_wb->idx);
+
+	if (hw_ctl->ops.get_bitmask_cdm && hw_cdm)
+		hw_ctl->ops.get_bitmask_cdm(hw_ctl, &flush_mask, hw_cdm->idx);
+
+	if (hw_ctl->ops.update_pending_flush)
+		hw_ctl->ops.update_pending_flush(hw_ctl, flush_mask);
+
+	SDE_DEBUG("Flushing CTL_ID %d, flush_mask %x, WB %d\n",
+			hw_ctl->idx - CTL_0, flush_mask, hw_wb->idx - WB_0);
+}
+
+/**
+ * sde_encoder_phys_wb_setup - setup writeback encoder
+ * @phys_enc:	Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_setup(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+	struct drm_display_mode mode = phys_enc->cached_mode;
+	struct drm_framebuffer *fb;
+	struct sde_rect *wb_roi = &wb_enc->wb_roi;
+
+	SDE_DEBUG("[mode_set:%d,%d,\"%s\",%d,%d]\n",
+			hw_wb->idx - WB_0, mode.base.id, mode.name,
+			mode.hdisplay, mode.vdisplay);
+
+	memset(wb_roi, 0, sizeof(struct sde_rect));
+
+	fb = sde_wb_get_output_fb(wb_enc->wb_dev);
+	if (!fb) {
+		SDE_DEBUG("no output framebuffer\n");
+		return;
+	}
+
+	SDE_DEBUG("[fb_id:%u][fb:%u,%u]\n", fb->base.id,
+			fb->width, fb->height);
+
+	sde_wb_get_output_roi(wb_enc->wb_dev, wb_roi);
+	if (wb_roi->w == 0 || wb_roi->h == 0) {
+		wb_roi->x = 0;
+		wb_roi->y = 0;
+		wb_roi->w = fb->width;
+		wb_roi->h = fb->height;
+	}
+
+	SDE_DEBUG("[roi:%u,%u,%u,%u]\n", wb_roi->x, wb_roi->y,
+			wb_roi->w, wb_roi->h);
+
+	wb_enc->wb_fmt = sde_get_sde_format_ext(fb->pixel_format, fb->modifier,
+			drm_format_num_planes(fb->pixel_format));
+	if (!wb_enc->wb_fmt) {
+		SDE_ERROR("unsupported output pixel format: %d\n",
+				fb->pixel_format);
+		return;
+	}
+
+	SDE_DEBUG("[fb_fmt:%x,%llx]\n", fb->pixel_format,
+			fb->modifier[0]);
+
+	sde_encoder_phys_wb_set_ot_limit(phys_enc);
+
+	sde_encoder_phys_wb_set_traffic_shaper(phys_enc);
+
+	sde_encoder_phys_setup_cdm(phys_enc, wb_enc->wb_fmt,
+		CDM_CDWN_OUTPUT_WB, wb_roi);
+
+	sde_encoder_phys_wb_setup_fb(phys_enc, fb, wb_roi);
+
+	sde_encoder_phys_wb_setup_cdp(phys_enc);
+}
+
+/**
+ * sde_encoder_phys_wb_unregister_irq - unregister writeback interrupt handler
+ * @phys_enc:	Pointer to physical encoder
+ */
+static int sde_encoder_phys_wb_unregister_irq(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+
+	if (wb_enc->bypass_irqreg)
+		return 0;
+
+	sde_core_irq_disable(phys_enc->sde_kms, &wb_enc->irq_idx, 1);
+	sde_core_irq_unregister_callback(phys_enc->sde_kms, wb_enc->irq_idx,
+			&wb_enc->irq_cb);
+
+	SDE_DEBUG("un-register IRQ for wb %d, irq_idx=%d\n",
+			hw_wb->idx - WB_0,
+			wb_enc->irq_idx);
+
+	return 0;
+}
+
+/**
+ * sde_encoder_phys_wb_done_irq - writeback interrupt handler
+ * @arg:	Pointer to writeback encoder
+ * @irq_idx:	interrupt index
+ */
+static void sde_encoder_phys_wb_done_irq(void *arg, int irq_idx)
+{
+	struct sde_encoder_phys_wb *wb_enc = arg;
+	struct sde_encoder_phys *phys_enc = &wb_enc->base;
+	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+
+	SDE_DEBUG("[wb:%d,%u]\n", hw_wb->idx - WB_0,
+			wb_enc->frame_count);
+
+	if (phys_enc->parent_ops.handle_frame_done)
+		phys_enc->parent_ops.handle_frame_done(phys_enc->parent,
+				phys_enc, SDE_ENCODER_FRAME_EVENT_DONE);
+
+	phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
+			phys_enc);
+
+	complete_all(&wb_enc->wbdone_complete);
+}
+
+/**
+ * sde_encoder_phys_wb_register_irq - register writeback interrupt handler
+ * @phys_enc:	Pointer to physical encoder
+ */
+static int sde_encoder_phys_wb_register_irq(struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+	struct sde_irq_callback *irq_cb = &wb_enc->irq_cb;
+	enum sde_intr_type intr_type;
+	int ret = 0;
+
+	if (wb_enc->bypass_irqreg)
+		return 0;
+
+	intr_type = sde_encoder_phys_wb_get_intr_type(hw_wb);
+	wb_enc->irq_idx = sde_core_irq_idx_lookup(phys_enc->sde_kms,
+			intr_type, hw_wb->idx);
+	if (wb_enc->irq_idx < 0) {
+		SDE_ERROR(
+			"failed to lookup IRQ index for WB_DONE with wb=%d\n",
+			hw_wb->idx - WB_0);
+		return -EINVAL;
+	}
+
+	irq_cb->func = sde_encoder_phys_wb_done_irq;
+	irq_cb->arg = wb_enc;
+	ret = sde_core_irq_register_callback(phys_enc->sde_kms,
+			wb_enc->irq_idx, irq_cb);
+	if (ret) {
+		SDE_ERROR("failed to register IRQ callback WB_DONE\n");
+		return ret;
+	}
+
+	ret = sde_core_irq_enable(phys_enc->sde_kms, &wb_enc->irq_idx, 1);
+	if (ret) {
+		SDE_ERROR(
+			"failed to enable IRQ for WB_DONE, wb %d, irq_idx=%d\n",
+				hw_wb->idx - WB_0,
+				wb_enc->irq_idx);
+		wb_enc->irq_idx = -EINVAL;
+
+		/* Unregister callback on IRQ enable failure */
+		sde_core_irq_unregister_callback(phys_enc->sde_kms,
+				wb_enc->irq_idx, irq_cb);
+		return ret;
+	}
+
+	SDE_DEBUG("registered IRQ for wb %d, irq_idx=%d\n",
+			hw_wb->idx - WB_0,
+			wb_enc->irq_idx);
+
+	return ret;
+}
+
+/**
+ * sde_encoder_phys_wb_mode_set - set display mode
+ * @phys_enc:	Pointer to physical encoder
+ * @mode:	Pointer to requested display mode
+ * @adj_mode:	Pointer to adjusted display mode
+ */
+static void sde_encoder_phys_wb_mode_set(
+		struct sde_encoder_phys *phys_enc,
+		struct drm_display_mode *mode,
+		struct drm_display_mode *adj_mode)
+{
+	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+	struct sde_rm *rm = &phys_enc->sde_kms->rm;
+	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+	struct sde_rm_hw_iter iter;
+	int i, instance;
+
+	phys_enc->cached_mode = *adj_mode;
+	instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
+
+	SDE_DEBUG("[mode_set_cache:%d,%d,\"%s\",%d,%d]\n",
+			hw_wb->idx - WB_0, mode->base.id,
+			mode->name, mode->hdisplay, mode->vdisplay);
+
+	phys_enc->hw_ctl = NULL;
+	phys_enc->hw_cdm = NULL;
+
+	/* Retrieve previously allocated HW Resources. CTL shouldn't fail */
+	sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CTL);
+	for (i = 0; i <= instance; i++) {
+		sde_rm_get_hw(rm, &iter);
+		if (i == instance)
+			phys_enc->hw_ctl = (struct sde_hw_ctl *) iter.hw;
+	}
+
+	if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
+		SDE_ERROR("failed init ctl: %ld\n", PTR_ERR(phys_enc->hw_ctl));
+		phys_enc->hw_ctl = NULL;
+		return;
+	}
+
+	/* CDM is optional */
+	sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CDM);
+	for (i = 0; i <= instance; i++) {
+		sde_rm_get_hw(rm, &iter);
+		if (i == instance)
+			phys_enc->hw_cdm = (struct sde_hw_cdm *) iter.hw;
+	}
+
+	if (IS_ERR(phys_enc->hw_cdm)) {
+		SDE_ERROR("CDM required but not allocated: %ld\n",
+				PTR_ERR(phys_enc->hw_cdm));
+		phys_enc->hw_ctl = NULL;
+	}
+}
+
+/**
+ * sde_encoder_phys_wb_wait_for_commit_done - wait until request is committed
+ * @phys_enc:	Pointer to physical encoder
+ */
+static int sde_encoder_phys_wb_wait_for_commit_done(
+		struct sde_encoder_phys *phys_enc)
+{
+	unsigned long ret;
+	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+	u32 irq_status;
+	u64 wb_time = 0;
+	int rc = 0;
+
+	/* Return EWOULDBLOCK since we know the wait isn't necessary */
+	if (WARN_ON(phys_enc->enable_state != SDE_ENC_ENABLED))
+		return -EWOULDBLOCK;
+
+	SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), wb_enc->frame_count);
+
+	ret = wait_for_completion_timeout(&wb_enc->wbdone_complete,
+			KICKOFF_TIMEOUT_JIFFIES);
+
+	if (!ret) {
+		SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc),
+				wb_enc->frame_count);
+
+		irq_status = sde_core_irq_read(phys_enc->sde_kms,
+				wb_enc->irq_idx, true);
+		if (irq_status) {
+			SDE_DEBUG("wb:%d done but irq not triggered\n",
+					wb_enc->wb_dev->wb_idx - WB_0);
+			sde_encoder_phys_wb_done_irq(wb_enc, wb_enc->irq_idx);
+		} else {
+			SDE_ERROR("wb:%d kickoff timed out\n",
+					wb_enc->wb_dev->wb_idx - WB_0);
+			if (phys_enc->parent_ops.handle_frame_done)
+				phys_enc->parent_ops.handle_frame_done(
+						phys_enc->parent, phys_enc,
+						SDE_ENCODER_FRAME_EVENT_ERROR);
+			rc = -ETIMEDOUT;
+		}
+	}
+
+	sde_encoder_phys_wb_unregister_irq(phys_enc);
+
+	if (!rc)
+		wb_enc->end_time = ktime_get();
+
+	/* once operation is done, disable traffic shaper */
+	if (wb_enc->wb_cfg.ts_cfg.en && wb_enc->hw_wb &&
+			wb_enc->hw_wb->ops.setup_trafficshaper) {
+		wb_enc->wb_cfg.ts_cfg.en = false;
+		wb_enc->hw_wb->ops.setup_trafficshaper(
+				wb_enc->hw_wb, &wb_enc->wb_cfg);
+	}
+
+	/* remove vote for iommu/clk/bus */
+	wb_enc->frame_count++;
+
+	if (!rc) {
+		wb_time = (u64)ktime_to_us(wb_enc->end_time) -
+				(u64)ktime_to_us(wb_enc->start_time);
+		SDE_DEBUG("wb:%d took %llu us\n",
+			wb_enc->wb_dev->wb_idx - WB_0, wb_time);
+	}
+
+	SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), wb_enc->frame_count,
+			wb_time);
+
+	return rc;
+}
+
+/**
+ * sde_encoder_phys_wb_prepare_for_kickoff - pre-kickoff processing
+ * @phys_enc:	Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_prepare_for_kickoff(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+	int ret;
+
+	SDE_DEBUG("[wb:%d,%u]\n", wb_enc->hw_wb->idx - WB_0,
+			wb_enc->kickoff_count);
+
+	reinit_completion(&wb_enc->wbdone_complete);
+
+	ret = sde_encoder_phys_wb_register_irq(phys_enc);
+	if (ret) {
+		SDE_ERROR("failed to register irq %d\n", ret);
+		return;
+	}
+
+	wb_enc->kickoff_count++;
+
+	/* set OT limit & enable traffic shaper */
+	sde_encoder_phys_wb_setup(phys_enc);
+
+	sde_encoder_phys_wb_flush(phys_enc);
+
+	/* vote for iommu/clk/bus */
+	wb_enc->start_time = ktime_get();
+
+	SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), wb_enc->kickoff_count);
+}
+
+/**
+ * sde_encoder_phys_wb_handle_post_kickoff - post-kickoff processing
+ * @phys_enc:	Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_handle_post_kickoff(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+
+	SDE_DEBUG("[wb:%d]\n", wb_enc->hw_wb->idx - WB_0);
+
+	SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc));
+}
+
+/**
+ * sde_encoder_phys_wb_enable - enable writeback encoder
+ * @phys_enc:	Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_enable(struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+	struct drm_device *dev;
+	struct drm_connector *connector;
+
+	SDE_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
+
+	if (!wb_enc->base.parent || !wb_enc->base.parent->dev) {
+		SDE_ERROR("invalid drm device\n");
+		return;
+	}
+	dev = wb_enc->base.parent->dev;
+
+	/* find associated writeback connector */
+	mutex_lock(&dev->mode_config.mutex);
+	drm_for_each_connector(connector, phys_enc->parent->dev) {
+		if (connector->encoder == phys_enc->parent)
+			break;
+	}
+	mutex_unlock(&dev->mode_config.mutex);
+
+	if (!connector || connector->encoder != phys_enc->parent) {
+		SDE_ERROR("failed to find writeback connector\n");
+		return;
+	}
+	wb_enc->wb_dev = sde_wb_connector_get_wb(connector);
+
+	phys_enc->enable_state = SDE_ENC_ENABLED;
+}
+
+/**
+ * sde_encoder_phys_wb_disable - disable writeback encoder
+ * @phys_enc:	Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_disable(struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+
+	SDE_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
+
+	if (phys_enc->enable_state == SDE_ENC_DISABLED) {
+		SDE_ERROR("encoder is already disabled\n");
+		return;
+	}
+
+	if (wb_enc->frame_count != wb_enc->kickoff_count) {
+		SDE_DEBUG("[wait_for_done: wb:%d, frame:%u, kickoff:%u]\n",
+				hw_wb->idx - WB_0, wb_enc->frame_count,
+				wb_enc->kickoff_count);
+		sde_encoder_phys_wb_wait_for_commit_done(phys_enc);
+	}
+
+	if (phys_enc->hw_cdm && phys_enc->hw_cdm->ops.disable) {
+		SDE_DEBUG_DRIVER("[cdm_disable]\n");
+		phys_enc->hw_cdm->ops.disable(phys_enc->hw_cdm);
+	}
+
+	phys_enc->enable_state = SDE_ENC_DISABLED;
+}
+
+/**
+ * sde_encoder_phys_wb_get_hw_resources - get hardware resources
+ * @phys_enc:	Pointer to physical encoder
+ * @hw_res:	Pointer to encoder resources
+ */
+static void sde_encoder_phys_wb_get_hw_resources(
+		struct sde_encoder_phys *phys_enc,
+		struct sde_encoder_hw_resources *hw_res,
+		struct drm_connector_state *conn_state)
+{
+	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+	struct sde_hw_wb *hw_wb;
+	struct drm_framebuffer *fb;
+	const struct sde_format *fmt;
+
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+
+	fb = sde_wb_connector_state_get_output_fb(conn_state);
+	if (!fb) {
+		SDE_ERROR("no output framebuffer\n");
+		return;
+	}
+
+	fmt = sde_get_sde_format_ext(fb->pixel_format, fb->modifier,
+			drm_format_num_planes(fb->pixel_format));
+	if (!fmt) {
+		SDE_ERROR("unsupported output pixel format:%d\n",
+				fb->pixel_format);
+		return;
+	}
+
+	hw_wb = wb_enc->hw_wb;
+	hw_res->wbs[hw_wb->idx - WB_0] = phys_enc->intf_mode;
+	hw_res->needs_cdm = SDE_FORMAT_IS_YUV(fmt);
+	SDE_DEBUG("[wb:%d] intf_mode=%d needs_cdm=%d\n", hw_wb->idx - WB_0,
+			hw_res->wbs[hw_wb->idx - WB_0],
+			hw_res->needs_cdm);
+}
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * sde_encoder_phys_wb_init_debugfs - initialize writeback encoder debugfs
+ * @phys_enc:	Pointer to physical encoder
+ * @sde_kms:	Pointer to SDE KMS object
+ */
+static int sde_encoder_phys_wb_init_debugfs(
+		struct sde_encoder_phys *phys_enc, struct sde_kms *kms)
+{
+	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+
+	if (!phys_enc || !kms || !wb_enc->hw_wb)
+		return -EINVAL;
+
+	snprintf(wb_enc->wb_name, ARRAY_SIZE(wb_enc->wb_name), "encoder_wb%d",
+			wb_enc->hw_wb->idx - WB_0);
+
+	wb_enc->debugfs_root =
+		debugfs_create_dir(wb_enc->wb_name,
+				sde_debugfs_get_root(kms));
+	if (!wb_enc->debugfs_root) {
+		SDE_ERROR("failed to create debugfs\n");
+		return -ENOMEM;
+	}
+
+	if (!debugfs_create_u32("wbdone_timeout", S_IRUGO | S_IWUSR,
+			wb_enc->debugfs_root, &wb_enc->wbdone_timeout)) {
+		SDE_ERROR("failed to create debugfs/wbdone_timeout\n");
+		return -ENOMEM;
+	}
+
+	if (!debugfs_create_u32("bypass_irqreg", S_IRUGO | S_IWUSR,
+			wb_enc->debugfs_root, &wb_enc->bypass_irqreg)) {
+		SDE_ERROR("failed to create debugfs/bypass_irqreg\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/**
+ * sde_encoder_phys_wb_destroy_debugfs - destroy writeback encoder debugfs
+ * @phys_enc:	Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_destroy_debugfs(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+
+	if (!phys_enc)
+		return;
+
+	debugfs_remove_recursive(wb_enc->debugfs_root);
+}
+#else
+static int sde_encoder_phys_wb_init_debugfs(
+		struct sde_encoder_phys *phys_enc, struct sde_kms *kms)
+{
+	return 0;
+}
+static void sde_encoder_phys_wb_destroy_debugfs(
+		struct sde_encoder_phys *phys_enc)
+{
+}
+#endif
+
+/**
+ * sde_encoder_phys_wb_destroy - destroy writeback encoder
+ * @phys_enc:	Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_destroy(struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+
+	SDE_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
+
+	if (!phys_enc)
+		return;
+
+	sde_encoder_phys_wb_destroy_debugfs(phys_enc);
+
+	kfree(wb_enc);
+}
+
+/**
+ * sde_encoder_phys_wb_init_ops - initialize writeback operations
+ * @ops:	Pointer to encoder operation table
+ */
+static void sde_encoder_phys_wb_init_ops(struct sde_encoder_phys_ops *ops)
+{
+	ops->is_master = sde_encoder_phys_wb_is_master;
+	ops->mode_set = sde_encoder_phys_wb_mode_set;
+	ops->enable = sde_encoder_phys_wb_enable;
+	ops->disable = sde_encoder_phys_wb_disable;
+	ops->destroy = sde_encoder_phys_wb_destroy;
+	ops->atomic_check = sde_encoder_phys_wb_atomic_check;
+	ops->get_hw_resources = sde_encoder_phys_wb_get_hw_resources;
+	ops->wait_for_commit_done = sde_encoder_phys_wb_wait_for_commit_done;
+	ops->prepare_for_kickoff = sde_encoder_phys_wb_prepare_for_kickoff;
+	ops->handle_post_kickoff = sde_encoder_phys_wb_handle_post_kickoff;
+	ops->trigger_start = sde_encoder_helper_trigger_start;
+}
+
+/**
+ * sde_encoder_phys_wb_init - initialize writeback encoder
+ * @init:	Pointer to init info structure with initialization params
+ */
+struct sde_encoder_phys *sde_encoder_phys_wb_init(
+		struct sde_enc_phys_init_params *p)
+{
+	struct sde_encoder_phys *phys_enc;
+	struct sde_encoder_phys_wb *wb_enc;
+	struct sde_hw_mdp *hw_mdp;
+	int ret = 0;
+
+	SDE_DEBUG("\n");
+
+	wb_enc = kzalloc(sizeof(*wb_enc), GFP_KERNEL);
+	if (!wb_enc) {
+		ret = -ENOMEM;
+		goto fail_alloc;
+	}
+	wb_enc->irq_idx = -EINVAL;
+	wb_enc->wbdone_timeout = KICKOFF_TIMEOUT_MS;
+	init_completion(&wb_enc->wbdone_complete);
+
+	phys_enc = &wb_enc->base;
+
+	if (p->sde_kms->vbif[VBIF_NRT]) {
+		wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+			p->sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_UNSECURE];
+		wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+			p->sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_SECURE];
+	} else {
+		wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+			p->sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
+		wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+			p->sde_kms->aspace[MSM_SMMU_DOMAIN_SECURE];
+	}
+
+	hw_mdp = sde_rm_get_mdp(&p->sde_kms->rm);
+	if (IS_ERR_OR_NULL(hw_mdp)) {
+		ret = PTR_ERR(hw_mdp);
+		SDE_ERROR("failed to init hw_top: %d\n", ret);
+		goto fail_mdp_init;
+	}
+	phys_enc->hw_mdptop = hw_mdp;
+
+	/**
+	 * hw_wb resource permanently assigned to this encoder
+	 * Other resources allocated at atomic commit time by use case
+	 */
+	if (p->wb_idx != SDE_NONE) {
+		struct sde_rm_hw_iter iter;
+
+		sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_WB);
+		while (sde_rm_get_hw(&p->sde_kms->rm, &iter)) {
+			struct sde_hw_wb *hw_wb = (struct sde_hw_wb *)iter.hw;
+
+			if (hw_wb->idx == p->wb_idx) {
+				wb_enc->hw_wb = hw_wb;
+				break;
+			}
+		}
+
+		if (!wb_enc->hw_wb) {
+			ret = -EINVAL;
+			SDE_ERROR("failed to init hw_wb%d\n", p->wb_idx - WB_0);
+			goto fail_wb_init;
+		}
+	} else {
+		ret = -EINVAL;
+		SDE_ERROR("invalid wb_idx\n");
+		goto fail_wb_check;
+	}
+
+	sde_encoder_phys_wb_init_ops(&phys_enc->ops);
+	phys_enc->parent = p->parent;
+	phys_enc->parent_ops = p->parent_ops;
+	phys_enc->sde_kms = p->sde_kms;
+	phys_enc->split_role = p->split_role;
+	phys_enc->intf_mode = INTF_MODE_WB_LINE;
+	phys_enc->intf_idx = p->intf_idx;
+	phys_enc->enc_spinlock = p->enc_spinlock;
+	INIT_LIST_HEAD(&wb_enc->irq_cb.list);
+
+	ret = sde_encoder_phys_wb_init_debugfs(phys_enc, p->sde_kms);
+	if (ret) {
+		SDE_ERROR("failed to init debugfs %d\n", ret);
+		goto fail_debugfs_init;
+	}
+
+	SDE_DEBUG("Created sde_encoder_phys_wb for wb %d\n",
+			wb_enc->hw_wb->idx - WB_0);
+
+	return phys_enc;
+
+fail_debugfs_init:
+fail_wb_init:
+fail_wb_check:
+fail_mdp_init:
+	kfree(wb_enc);
+fail_alloc:
+	return ERR_PTR(ret);
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_fence.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_fence.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_fence.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_fence.c	2019-01-22 16:16:23.515246515 +0100
@@ -0,0 +1,232 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <sync.h>
+#include <sw_sync.h>
+#include "msm_drv.h"
+#include "sde_kms.h"
+#include "sde_fence.h"
+
+void *sde_sync_get(uint64_t fd)
+{
+	/* force signed compare, fdget accepts an int argument */
+	return (signed int)fd >= 0 ? sync_fence_fdget(fd) : NULL;
+}
+
+void sde_sync_put(void *fence)
+{
+	if (fence)
+		sync_fence_put(fence);
+}
+
+int sde_sync_wait(void *fence, long timeout_ms)
+{
+	if (!fence)
+		return -EINVAL;
+	return sync_fence_wait(fence, timeout_ms);
+}
+
+uint32_t sde_sync_get_name_prefix(void *fence)
+{
+	char *name;
+	uint32_t i, prefix;
+
+	if (!fence)
+		return 0x0;
+
+	name = ((struct sync_fence *)fence)->name;
+	prefix = 0x0;
+	for (i = 0; i < sizeof(uint32_t) && name[i]; ++i)
+		prefix = (prefix << CHAR_BIT) | name[i];
+
+	return prefix;
+}
+
+#if IS_ENABLED(CONFIG_SW_SYNC)
+/**
+ * _sde_fence_create_fd - create fence object and return an fd for it
+ * This function is NOT thread-safe.
+ * @timeline: Timeline to associate with fence
+ * @name: Name for fence
+ * @val: Timeline value at which to signal the fence
+ * Return: File descriptor on success, or error code on error
+ */
+static int _sde_fence_create_fd(void *timeline, const char *name, uint32_t val)
+{
+	struct sync_pt *sync_pt;
+	struct sync_fence *fence;
+	signed int fd = -EINVAL;
+
+	if (!timeline) {
+		SDE_ERROR("invalid timeline\n");
+		goto exit;
+	}
+
+	if (!name)
+		name = "sde_fence";
+
+	/* create sync point */
+	sync_pt = sw_sync_pt_create(timeline, val);
+	if (sync_pt == NULL) {
+		SDE_ERROR("failed to create sync point, %s\n", name);
+		goto exit;
+	}
+
+	/* create fence */
+	fence = sync_fence_create(name, sync_pt);
+	if (fence == NULL) {
+		sync_pt_free(sync_pt);
+		SDE_ERROR("couldn't create fence, %s\n", name);
+		goto exit;
+	}
+
+	/* create fd */
+	fd = get_unused_fd_flags(0);
+	if (fd < 0) {
+		SDE_ERROR("failed to get_unused_fd_flags(), %s\n", name);
+		sync_fence_put(fence);
+		goto exit;
+	}
+
+	sync_fence_install(fence, fd);
+exit:
+	return fd;
+}
+
+/**
+ * SDE_FENCE_TIMELINE_NAME - macro for accessing s/w timeline's name
+ * @fence: Pointer to sde fence structure
+ * @drm_id: ID number of owning DRM Object
+ * Returns: Pointer to timeline name string
+ */
+#define SDE_FENCE_TIMELINE_NAME(fence) \
+	(((struct sw_sync_timeline *)fence->timeline)->obj.name)
+
+int sde_fence_init(struct sde_fence *fence,
+		const char *name,
+		uint32_t drm_id)
+{
+	if (!fence) {
+		SDE_ERROR("invalid argument(s)\n");
+		return -EINVAL;
+	}
+
+	fence->timeline = sw_sync_timeline_create(name ? name : "sde");
+	if (!fence->timeline) {
+		SDE_ERROR("failed to create timeline\n");
+		return -ENOMEM;
+	}
+
+	fence->commit_count = 0;
+	fence->done_count = 0;
+	fence->drm_id = drm_id;
+
+	mutex_init(&fence->fence_lock);
+	return 0;
+
+}
+
+void sde_fence_deinit(struct sde_fence *fence)
+{
+	if (!fence) {
+		SDE_ERROR("invalid fence\n");
+		return;
+	}
+
+	mutex_destroy(&fence->fence_lock);
+	if (fence->timeline)
+		sync_timeline_destroy(fence->timeline);
+}
+
+int sde_fence_prepare(struct sde_fence *fence)
+{
+	if (!fence) {
+		SDE_ERROR("invalid fence\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&fence->fence_lock);
+	++fence->commit_count;
+	SDE_EVT32(fence->drm_id, fence->commit_count, fence->done_count);
+	mutex_unlock(&fence->fence_lock);
+	return 0;
+}
+
+int sde_fence_create(struct sde_fence *fence, uint64_t *val, int offset)
+{
+	uint32_t trigger_value;
+	int fd, rc = -EINVAL;
+
+	if (!fence || !fence->timeline || !val) {
+		SDE_ERROR("invalid argument(s), fence %pK, pval %pK\n",
+				fence, val);
+	} else  {
+		/*
+		 * Allow created fences to have a constant offset with respect
+		 * to the timeline. This allows us to delay the fence signalling
+		 * w.r.t. the commit completion (e.g., an offset of +1 would
+		 * cause fences returned during a particular commit to signal
+		 * after an additional delay of one commit, rather than at the
+		 * end of the current one.
+		 */
+		mutex_lock(&fence->fence_lock);
+		trigger_value = fence->commit_count + (int32_t)offset;
+		fd = _sde_fence_create_fd(fence->timeline,
+				SDE_FENCE_TIMELINE_NAME(fence),
+				trigger_value);
+		*val = fd;
+
+		SDE_EVT32(fence->drm_id, trigger_value, fd);
+		mutex_unlock(&fence->fence_lock);
+
+		if (fd >= 0)
+			rc = 0;
+	}
+
+	return rc;
+}
+
+void sde_fence_signal(struct sde_fence *fence, bool is_error)
+{
+	if (!fence || !fence->timeline) {
+		SDE_ERROR("invalid fence, %pK\n", fence);
+		return;
+	}
+
+	mutex_lock(&fence->fence_lock);
+	if ((fence->done_count - fence->commit_count) < 0)
+		++fence->done_count;
+	else
+		SDE_ERROR("detected extra signal attempt!\n");
+
+	/*
+	 * Always advance 'done' counter,
+	 * but only advance timeline if !error
+	 */
+	if (!is_error) {
+		int32_t val;
+
+		val = fence->done_count;
+		val -= ((struct sw_sync_timeline *)
+				fence->timeline)->value;
+		if (val < 0)
+			SDE_ERROR("invalid value\n");
+		else
+			sw_sync_timeline_inc(fence->timeline, (int)val);
+	}
+
+	SDE_EVT32(fence->drm_id, fence->done_count,
+			((struct sw_sync_timeline *) fence->timeline)->value);
+
+	mutex_unlock(&fence->fence_lock);
+}
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_fence.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_fence.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_fence.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_fence.h	2019-01-22 16:16:23.515246515 +0100
@@ -0,0 +1,177 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_FENCE_H_
+#define _SDE_FENCE_H_
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+
+#ifndef CHAR_BIT
+#define CHAR_BIT 8 /* define this if limits.h not available */
+#endif
+
+#ifdef CONFIG_SYNC
+/**
+ * sde_sync_get - Query sync fence object from a file handle
+ *
+ * On success, this function also increments the refcount of the sync fence
+ *
+ * @fd: Integer sync fence handle
+ *
+ * Return: Pointer to sync fence object, or NULL
+ */
+void *sde_sync_get(uint64_t fd);
+
+/**
+ * sde_sync_put - Releases a sync fence object acquired by @sde_sync_get
+ *
+ * This function decrements the sync fence's reference count; the object will
+ * be released if the reference count goes to zero.
+ *
+ * @fence: Pointer to sync fence
+ */
+void sde_sync_put(void *fence);
+
+/**
+ * sde_sync_wait - Query sync fence object from a file handle
+ *
+ * @fence: Pointer to sync fence
+ * @timeout_ms: Time to wait, in milliseconds. Waits forever if timeout_ms < 0
+ *
+ * Return: Zero on success, or -ETIME on timeout
+ */
+int sde_sync_wait(void *fence, long timeout_ms);
+
+/**
+ * sde_sync_get_name_prefix - get integer representation of fence name prefix
+ * @fence: Pointer to opaque fence structure
+ *
+ * Return: 32-bit integer containing first 4 characters of fence name,
+ *         big-endian notation
+ */
+uint32_t sde_sync_get_name_prefix(void *fence);
+#else
+static inline void *sde_sync_get(uint64_t fd)
+{
+	return NULL;
+}
+
+static inline void sde_sync_put(void *fence)
+{
+}
+
+static inline int sde_sync_wait(void *fence, long timeout_ms)
+{
+	return 0;
+}
+
+static inline uint32_t sde_sync_get_name_prefix(void *fence)
+{
+	return 0x0;
+}
+#endif
+
+/**
+ * struct sde_fence - output fence container structure
+ * @timeline: Pointer to fence timeline
+ * @commit_count: Number of detected commits since bootup
+ * @done_count: Number of completed commits since bootup
+ * @drm_id: ID number of owning DRM Object
+ * @fence_lock: Mutex object to protect local fence variables
+ */
+struct sde_fence {
+	void *timeline;
+	int32_t commit_count;
+	int32_t done_count;
+	uint32_t drm_id;
+	struct mutex fence_lock;
+};
+
+#if IS_ENABLED(CONFIG_SW_SYNC)
+/**
+ * sde_fence_init - initialize fence object
+ * @fence: Pointer to crtc fence object
+ * @drm_id: ID number of owning DRM Object
+ * @name: Timeline name
+ * Returns: Zero on success
+ */
+int sde_fence_init(struct sde_fence *fence,
+		const char *name,
+		uint32_t drm_id);
+
+/**
+ * sde_fence_deinit - deinit fence container
+ * @fence: Pointer fence container
+ */
+void sde_fence_deinit(struct sde_fence *fence);
+
+/**
+ * sde_fence_prepare - prepare to return fences for current commit
+ * @fence: Pointer fence container
+ * Returns: Zero on success
+ */
+int sde_fence_prepare(struct sde_fence *fence);
+
+/**
+ * sde_fence_create - create output fence object
+ * @fence: Pointer fence container
+ * @val: Pointer to output value variable, fence fd will be placed here
+ * @offset: Fence signal commit offset, e.g., +1 to signal on next commit
+ * Returns: Zero on success
+ */
+int sde_fence_create(struct sde_fence *fence, uint64_t *val, int offset);
+
+/**
+ * sde_fence_signal - advance fence timeline to signal outstanding fences
+ * @fence: Pointer fence container
+ * @is_error: Set to non-zero if the commit didn't complete successfully
+ */
+void sde_fence_signal(struct sde_fence *fence, bool is_error);
+#else
+static inline int sde_fence_init(struct sde_fence *fence,
+		const char *name,
+		uint32_t drm_id)
+{
+	/* do nothing */
+	return 0;
+}
+
+static inline void sde_fence_deinit(struct sde_fence *fence)
+{
+	/* do nothing */
+}
+
+static inline void sde_fence_prepare(struct sde_fence *fence)
+{
+	/* do nothing */
+}
+
+static inline int sde_fence_get(struct sde_fence *fence, uint64_t *val)
+{
+	return -EINVAL;
+}
+
+static inline void sde_fence_signal(struct sde_fence *fence, bool is_error)
+{
+	/* do nothing */
+}
+
+static inline int sde_fence_create(struct sde_fence *fence, uint64_t *val,
+								int offset)
+{
+	return 0;
+}
+#endif /* IS_ENABLED(CONFIG_SW_SYNC) */
+
+#endif /* _SDE_FENCE_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_formats.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_formats.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_formats.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_formats.c	2019-10-29 09:26:23.641203159 +0100
@@ -0,0 +1,1294 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <uapi/drm/drm_fourcc.h>
+#include <uapi/media/msm_media_info.h>
+
+#include "sde_kms.h"
+#include "sde_formats.h"
+
+#define SDE_UBWC_META_MACRO_W_H		16
+#define SDE_UBWC_META_BLOCK_SIZE	256
+#define SDE_UBWC_PLANE_SIZE_ALIGNMENT	4096
+
+#define SDE_TILE_HEIGHT_DEFAULT	1
+#define SDE_TILE_HEIGHT_TILED	4
+#define SDE_TILE_HEIGHT_UBWC	4
+#define SDE_TILE_HEIGHT_NV12	8
+
+#define SDE_MAX_IMG_WIDTH		0x3FFF
+#define SDE_MAX_IMG_HEIGHT		0x3FFF
+
+/**
+ * SDE supported format packing, bpp, and other format
+ * information.
+ * SDE currently only supports interleaved RGB formats
+ * UBWC support for a pixel format is indicated by the flag,
+ * there is additional meta data plane for such formats
+ */
+
+#define INTERLEAVED_RGB_FMT(fmt, a, r, g, b, e0, e1, e2, e3, uc, alpha,   \
+bp, flg, fm, np)                                                          \
+{                                                                         \
+	.base.pixel_format = DRM_FORMAT_ ## fmt,                          \
+	.fetch_planes = SDE_PLANE_INTERLEAVED,                            \
+	.alpha_enable = alpha,                                            \
+	.element = { (e0), (e1), (e2), (e3) },                            \
+	.bits = { g, b, r, a },                                           \
+	.chroma_sample = SDE_CHROMA_RGB,                                  \
+	.unpack_align_msb = 0,                                            \
+	.unpack_tight = 1,                                                \
+	.unpack_count = uc,                                               \
+	.bpp = bp,                                                        \
+	.fetch_mode = fm,                                                 \
+	.flag = {(flg)},                                                  \
+	.num_planes = np,                                                 \
+	.tile_height = SDE_TILE_HEIGHT_DEFAULT                            \
+}
+
+#define INTERLEAVED_RGB_FMT_TILED(fmt, a, r, g, b, e0, e1, e2, e3, uc,    \
+alpha, bp, flg, fm, np, th)                                               \
+{                                                                         \
+	.base.pixel_format = DRM_FORMAT_ ## fmt,                          \
+	.fetch_planes = SDE_PLANE_INTERLEAVED,                            \
+	.alpha_enable = alpha,                                            \
+	.element = { (e0), (e1), (e2), (e3) },                            \
+	.bits = { g, b, r, a },                                           \
+	.chroma_sample = SDE_CHROMA_RGB,                                  \
+	.unpack_align_msb = 0,                                            \
+	.unpack_tight = 1,                                                \
+	.unpack_count = uc,                                               \
+	.bpp = bp,                                                        \
+	.fetch_mode = fm,                                                 \
+	.flag = {(flg)},                                                  \
+	.num_planes = np,                                                 \
+	.tile_height = th                                                 \
+}
+
+
+#define INTERLEAVED_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, e3,              \
+alpha, chroma, count, bp, flg, fm, np)                                    \
+{                                                                         \
+	.base.pixel_format = DRM_FORMAT_ ## fmt,                          \
+	.fetch_planes = SDE_PLANE_INTERLEAVED,                            \
+	.alpha_enable = alpha,                                            \
+	.element = { (e0), (e1), (e2), (e3)},                             \
+	.bits = { g, b, r, a },                                           \
+	.chroma_sample = chroma,                                          \
+	.unpack_align_msb = 0,                                            \
+	.unpack_tight = 1,                                                \
+	.unpack_count = count,                                            \
+	.bpp = bp,                                                        \
+	.fetch_mode = fm,                                                 \
+	.flag = {(flg)},                                                  \
+	.num_planes = np,                                                 \
+	.tile_height = SDE_TILE_HEIGHT_DEFAULT                            \
+}
+
+#define PSEUDO_YUV_FMT(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np)      \
+{                                                                         \
+	.base.pixel_format = DRM_FORMAT_ ## fmt,                          \
+	.fetch_planes = SDE_PLANE_PSEUDO_PLANAR,                          \
+	.alpha_enable = false,                                            \
+	.element = { (e0), (e1), 0, 0 },                                  \
+	.bits = { g, b, r, a },                                           \
+	.chroma_sample = chroma,                                          \
+	.unpack_align_msb = 0,                                            \
+	.unpack_tight = 1,                                                \
+	.unpack_count = 2,                                                \
+	.bpp = 2,                                                         \
+	.fetch_mode = fm,                                                 \
+	.flag = {(flg)},                                                  \
+	.num_planes = np,                                                 \
+	.tile_height = SDE_TILE_HEIGHT_DEFAULT                            \
+}
+
+#define PSEUDO_YUV_FMT_TILED(fmt, a, r, g, b, e0, e1, chroma,             \
+flg, fm, np, th)                                                          \
+{                                                                         \
+	.base.pixel_format = DRM_FORMAT_ ## fmt,                          \
+	.fetch_planes = SDE_PLANE_PSEUDO_PLANAR,                          \
+	.alpha_enable = false,                                            \
+	.element = { (e0), (e1), 0, 0 },                                  \
+	.bits = { g, b, r, a },                                           \
+	.chroma_sample = chroma,                                          \
+	.unpack_align_msb = 0,                                            \
+	.unpack_tight = 1,                                                \
+	.unpack_count = 2,                                                \
+	.bpp = 2,                                                         \
+	.fetch_mode = fm,                                                 \
+	.flag = {(flg)},                                                  \
+	.num_planes = np,                                                 \
+	.tile_height = th                                                 \
+}
+
+#define PSEUDO_YUV_FMT_LOOSE(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np)\
+{                                                                         \
+	.base.pixel_format = DRM_FORMAT_ ## fmt,                          \
+	.fetch_planes = SDE_PLANE_PSEUDO_PLANAR,                          \
+	.alpha_enable = false,                                            \
+	.element = { (e0), (e1), 0, 0 },                                  \
+	.bits = { g, b, r, a },                                           \
+	.chroma_sample = chroma,                                          \
+	.unpack_align_msb = 1,                                            \
+	.unpack_tight = 0,                                                \
+	.unpack_count = 2,                                                \
+	.bpp = 2,                                                         \
+	.fetch_mode = fm,                                                 \
+	.flag = {(flg)},                                                  \
+	.num_planes = np,                                                 \
+	.tile_height = SDE_TILE_HEIGHT_DEFAULT                            \
+}
+
+#define PSEUDO_YUV_FMT_LOOSE_TILED(fmt, a, r, g, b, e0, e1, chroma,       \
+flg, fm, np, th)                                                          \
+{                                                                         \
+	.base.pixel_format = DRM_FORMAT_ ## fmt,                          \
+	.fetch_planes = SDE_PLANE_PSEUDO_PLANAR,                          \
+	.alpha_enable = false,                                            \
+	.element = { (e0), (e1), 0, 0 },                                  \
+	.bits = { g, b, r, a },                                           \
+	.chroma_sample = chroma,                                          \
+	.unpack_align_msb = 1,                                            \
+	.unpack_tight = 0,                                                \
+	.unpack_count = 2,                                                \
+	.bpp = 2,                                                         \
+	.fetch_mode = fm,                                                 \
+	.flag = {(flg)},                                                  \
+	.num_planes = np,                                                 \
+	.tile_height = th                                                 \
+}
+
+
+#define PLANAR_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, alpha, chroma, bp,    \
+flg, fm, np)                                                      \
+{                                                                         \
+	.base.pixel_format = DRM_FORMAT_ ## fmt,                          \
+	.fetch_planes = SDE_PLANE_PLANAR,                                 \
+	.alpha_enable = alpha,                                            \
+	.element = { (e0), (e1), (e2), 0 },                               \
+	.bits = { g, b, r, a },                                           \
+	.chroma_sample = chroma,                                          \
+	.unpack_align_msb = 0,                                            \
+	.unpack_tight = 1,                                                \
+	.unpack_count = 1,                                                \
+	.bpp = bp,                                                        \
+	.fetch_mode = fm,                                                 \
+	.flag = {(flg)},                                                  \
+	.num_planes = np,                                                 \
+	.tile_height = SDE_TILE_HEIGHT_DEFAULT                            \
+}
+
+/*
+ * struct sde_media_color_map - maps drm format to media format
+ * @format: DRM base pixel format
+ * @color: Media API color related to DRM format
+ */
+struct sde_media_color_map {
+	uint32_t format;
+	uint32_t color;
+};
+
+static const struct sde_format sde_format_map[] = {
+	INTERLEAVED_RGB_FMT(ARGB8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		true, 4, 0,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(ABGR8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		true, 4, 0,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(XBGR8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		true, 4, 0,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(RGBA8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		true, 4, 0,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(BGRA8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		true, 4, 0,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(BGRX8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		false, 4, 0,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(XRGB8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		false, 4, 0,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(RGBX8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		false, 4, 0,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(RGB888,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+		false, 3, 0,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(BGR888,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+		false, 3, 0,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(RGB565,
+		0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+		false, 2, 0,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(BGR565,
+		0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+		false, 2, 0,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(ARGB1555,
+		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		true, 2, 0,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(ABGR1555,
+		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		true, 2, 0,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(RGBA5551,
+		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		true, 2, 0,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(BGRA5551,
+		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		true, 2, 0,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(XRGB1555,
+		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		false, 2, 0,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(XBGR1555,
+		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		false, 2, 0,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(RGBX5551,
+		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		false, 2, 0,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(BGRX5551,
+		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		false, 2, 0,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(ARGB4444,
+		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		true, 2, 0,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(ABGR4444,
+		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		true, 2, 0,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(RGBA4444,
+		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		true, 2, 0,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(BGRA4444,
+		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		true, 2, 0,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(XRGB4444,
+		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		false, 2, 0,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(XBGR4444,
+		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		false, 2, 0,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(RGBX4444,
+		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		false, 2, 0,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(BGRX4444,
+		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		false, 2, 0,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(BGRA1010102,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		true, 4, SDE_FORMAT_FLAG_DX,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(RGBA1010102,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		true, 4, SDE_FORMAT_FLAG_DX,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(ABGR2101010,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		true, 4, SDE_FORMAT_FLAG_DX,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(ARGB2101010,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		true, 4, SDE_FORMAT_FLAG_DX,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(XRGB2101010,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		false, 4, SDE_FORMAT_FLAG_DX,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(BGRX1010102,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		false, 4, SDE_FORMAT_FLAG_DX,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(XBGR2101010,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		false, 4, SDE_FORMAT_FLAG_DX,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(RGBX1010102,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		false, 4, SDE_FORMAT_FLAG_DX,
+		SDE_FETCH_LINEAR, 1),
+
+	PSEUDO_YUV_FMT(NV12,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C2_R_Cr,
+		SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV,
+		SDE_FETCH_LINEAR, 2),
+
+	PSEUDO_YUV_FMT(NV21,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C1_B_Cb,
+		SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV,
+		SDE_FETCH_LINEAR, 2),
+
+	PSEUDO_YUV_FMT(NV16,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C2_R_Cr,
+		SDE_CHROMA_H2V1, SDE_FORMAT_FLAG_YUV,
+		SDE_FETCH_LINEAR, 2),
+
+	PSEUDO_YUV_FMT(NV61,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C1_B_Cb,
+		SDE_CHROMA_H2V1, SDE_FORMAT_FLAG_YUV,
+		SDE_FETCH_LINEAR, 2),
+
+	INTERLEAVED_YUV_FMT(VYUY,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y,
+		false, SDE_CHROMA_H2V1, 4, 2, SDE_FORMAT_FLAG_YUV,
+		SDE_FETCH_LINEAR, 2),
+
+	INTERLEAVED_YUV_FMT(UYVY,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y,
+		false, SDE_CHROMA_H2V1, 4, 2, SDE_FORMAT_FLAG_YUV,
+		SDE_FETCH_LINEAR, 2),
+
+	INTERLEAVED_YUV_FMT(YUYV,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C0_G_Y, C1_B_Cb, C0_G_Y, C2_R_Cr,
+		false, SDE_CHROMA_H2V1, 4, 2, SDE_FORMAT_FLAG_YUV,
+		SDE_FETCH_LINEAR, 2),
+
+	INTERLEAVED_YUV_FMT(YVYU,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C0_G_Y, C2_R_Cr, C0_G_Y, C1_B_Cb,
+		false, SDE_CHROMA_H2V1, 4, 2, SDE_FORMAT_FLAG_YUV,
+		SDE_FETCH_LINEAR, 2),
+
+	PLANAR_YUV_FMT(YUV420,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C1_B_Cb, C0_G_Y,
+		false, SDE_CHROMA_420, 1, SDE_FORMAT_FLAG_YUV,
+		SDE_FETCH_LINEAR, 3),
+
+	PLANAR_YUV_FMT(YVU420,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C2_R_Cr, C0_G_Y,
+		false, SDE_CHROMA_420, 1, SDE_FORMAT_FLAG_YUV,
+		SDE_FETCH_LINEAR, 3),
+};
+
+/*
+ * A5x tile formats tables:
+ * These tables hold the A5x tile formats supported.
+ */
+static const struct sde_format sde_format_map_tile[] = {
+	INTERLEAVED_RGB_FMT_TILED(BGR565,
+		0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+		false, 2, 0,
+		SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
+
+	INTERLEAVED_RGB_FMT_TILED(ARGB8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		true, 4, 0,
+		SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
+
+	INTERLEAVED_RGB_FMT_TILED(ABGR8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		true, 4, 0,
+		SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
+
+	INTERLEAVED_RGB_FMT_TILED(XBGR8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		false, 4, 0,
+		SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
+
+	INTERLEAVED_RGB_FMT_TILED(RGBA8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		true, 4, 0,
+		SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
+
+	INTERLEAVED_RGB_FMT_TILED(BGRA8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		true, 4, 0,
+		SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
+
+	INTERLEAVED_RGB_FMT_TILED(BGRX8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		false, 4, 0,
+		SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
+
+	INTERLEAVED_RGB_FMT_TILED(XRGB8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		false, 4, 0,
+		SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
+
+	INTERLEAVED_RGB_FMT_TILED(RGBX8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		false, 4, 0,
+		SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
+
+	INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		true, 4, SDE_FORMAT_FLAG_DX,
+		SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
+
+	INTERLEAVED_RGB_FMT_TILED(XBGR2101010,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		true, 4, SDE_FORMAT_FLAG_DX,
+		SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
+
+	PSEUDO_YUV_FMT_TILED(NV12,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C2_R_Cr,
+		SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV,
+		SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_NV12),
+
+	PSEUDO_YUV_FMT_TILED(NV21,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C1_B_Cb,
+		SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV,
+		SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_NV12),
+};
+
+static const struct sde_format sde_format_map_p010_tile[] = {
+	PSEUDO_YUV_FMT_LOOSE_TILED(NV12,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C2_R_Cr,
+		SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX),
+		SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_NV12),
+};
+
+static const struct sde_format sde_format_map_tp10_tile[] = {
+	PSEUDO_YUV_FMT_TILED(NV12,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C2_R_Cr,
+		SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX),
+		SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_NV12),
+};
+
+/*
+ * UBWC formats table:
+ * This table holds the UBWC formats supported.
+ * If a compression ratio needs to be used for this or any other format,
+ * the data will be passed by user-space.
+ */
+static const struct sde_format sde_format_map_ubwc[] = {
+	INTERLEAVED_RGB_FMT_TILED(BGR565,
+		0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+		false, 2, SDE_FORMAT_FLAG_COMPRESSED,
+		SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_UBWC),
+
+	INTERLEAVED_RGB_FMT_TILED(ABGR8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		true, 4, SDE_FORMAT_FLAG_COMPRESSED,
+		SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_UBWC),
+
+	INTERLEAVED_RGB_FMT_TILED(XBGR8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		false, 4, SDE_FORMAT_FLAG_COMPRESSED,
+		SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_UBWC),
+
+	INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		true, 4, SDE_FORMAT_FLAG_DX | SDE_FORMAT_FLAG_COMPRESSED,
+		SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_UBWC),
+
+	INTERLEAVED_RGB_FMT_TILED(XBGR2101010,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		true, 4, SDE_FORMAT_FLAG_DX | SDE_FORMAT_FLAG_COMPRESSED,
+		SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_UBWC),
+
+	PSEUDO_YUV_FMT_TILED(NV12,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C2_R_Cr,
+		SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV |
+				SDE_FORMAT_FLAG_COMPRESSED,
+		SDE_FETCH_UBWC, 4, SDE_TILE_HEIGHT_NV12),
+};
+
+static const struct sde_format sde_format_map_p010[] = {
+	PSEUDO_YUV_FMT_LOOSE(NV12,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C2_R_Cr,
+		SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX),
+		SDE_FETCH_LINEAR, 2),
+};
+
+static const struct sde_format sde_format_map_p010_ubwc[] = {
+	PSEUDO_YUV_FMT_LOOSE_TILED(NV12,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C2_R_Cr,
+		SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX |
+				SDE_FORMAT_FLAG_COMPRESSED),
+		SDE_FETCH_UBWC, 4, SDE_TILE_HEIGHT_NV12),
+};
+
+static const struct sde_format sde_format_map_tp10_ubwc[] = {
+	PSEUDO_YUV_FMT_TILED(NV12,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C2_R_Cr,
+		SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX |
+				SDE_FORMAT_FLAG_COMPRESSED),
+		SDE_FETCH_UBWC, 4, SDE_TILE_HEIGHT_NV12),
+};
+
+/* _sde_get_v_h_subsample_rate - Get subsample rates for all formats we support
+ *   Note: Not using the drm_format_*_subsampling since we have formats
+ */
+static void _sde_get_v_h_subsample_rate(
+	enum sde_chroma_samp_type chroma_sample,
+	uint32_t *v_sample,
+	uint32_t *h_sample)
+{
+	if (!v_sample || !h_sample)
+		return;
+
+	switch (chroma_sample) {
+	case SDE_CHROMA_H2V1:
+		*v_sample = 1;
+		*h_sample = 2;
+		break;
+	case SDE_CHROMA_H1V2:
+		*v_sample = 2;
+		*h_sample = 1;
+		break;
+	case SDE_CHROMA_420:
+		*v_sample = 2;
+		*h_sample = 2;
+		break;
+	default:
+		*v_sample = 1;
+		*h_sample = 1;
+		break;
+	}
+}
+
+static int _sde_format_get_media_color_ubwc(const struct sde_format *fmt)
+{
+	static const struct sde_media_color_map sde_media_ubwc_map[] = {
+		{DRM_FORMAT_ABGR8888, COLOR_FMT_RGBA8888_UBWC},
+		{DRM_FORMAT_XBGR8888, COLOR_FMT_RGBA8888_UBWC},
+		{DRM_FORMAT_ABGR2101010, COLOR_FMT_RGBA1010102_UBWC},
+		{DRM_FORMAT_XBGR2101010, COLOR_FMT_RGBA1010102_UBWC},
+		{DRM_FORMAT_BGR565, COLOR_FMT_RGB565_UBWC},
+	};
+	int color_fmt = -1;
+	int i;
+
+	if (fmt->base.pixel_format == DRM_FORMAT_NV12) {
+		if (SDE_FORMAT_IS_DX(fmt)) {
+			if (fmt->unpack_tight)
+				color_fmt = COLOR_FMT_NV12_BPP10_UBWC;
+			else
+				color_fmt = COLOR_FMT_P010_UBWC;
+		} else
+			color_fmt = COLOR_FMT_NV12_UBWC;
+		return color_fmt;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(sde_media_ubwc_map); ++i)
+		if (fmt->base.pixel_format == sde_media_ubwc_map[i].format) {
+			color_fmt = sde_media_ubwc_map[i].color;
+			break;
+		}
+	return color_fmt;
+}
+
+static int _sde_format_get_plane_sizes_ubwc(
+		const struct sde_format *fmt,
+		const uint32_t width,
+		const uint32_t height,
+		struct sde_hw_fmt_layout *layout)
+{
+	int i;
+	int color;
+	bool meta = SDE_FORMAT_IS_UBWC(fmt);
+
+	memset(layout, 0, sizeof(struct sde_hw_fmt_layout));
+	layout->format = fmt;
+	layout->width = width;
+	layout->height = height;
+	layout->num_planes = fmt->num_planes;
+
+	color = _sde_format_get_media_color_ubwc(fmt);
+	if (color < 0) {
+		DRM_ERROR("UBWC format not supported for fmt:0x%X\n",
+			fmt->base.pixel_format);
+		return -EINVAL;
+	}
+
+	if (SDE_FORMAT_IS_YUV(layout->format)) {
+		uint32_t y_sclines, uv_sclines;
+		uint32_t y_meta_scanlines = 0;
+		uint32_t uv_meta_scanlines = 0;
+
+		layout->num_planes = 2;
+		layout->plane_pitch[0] = VENUS_Y_STRIDE(color, width);
+		y_sclines = VENUS_Y_SCANLINES(color, height);
+		layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
+			y_sclines, SDE_UBWC_PLANE_SIZE_ALIGNMENT);
+
+		layout->plane_pitch[1] = VENUS_UV_STRIDE(color, width);
+		uv_sclines = VENUS_UV_SCANLINES(color, height);
+		layout->plane_size[1] = MSM_MEDIA_ALIGN(layout->plane_pitch[1] *
+			uv_sclines, SDE_UBWC_PLANE_SIZE_ALIGNMENT);
+
+		if (!meta)
+			goto done;
+
+		layout->num_planes += 2;
+		layout->plane_pitch[2] = VENUS_Y_META_STRIDE(color, width);
+		y_meta_scanlines = VENUS_Y_META_SCANLINES(color, height);
+		layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
+			y_meta_scanlines, SDE_UBWC_PLANE_SIZE_ALIGNMENT);
+
+		layout->plane_pitch[3] = VENUS_UV_META_STRIDE(color, width);
+		uv_meta_scanlines = VENUS_UV_META_SCANLINES(color, height);
+		layout->plane_size[3] = MSM_MEDIA_ALIGN(layout->plane_pitch[3] *
+			uv_meta_scanlines, SDE_UBWC_PLANE_SIZE_ALIGNMENT);
+
+	} else {
+		uint32_t rgb_scanlines, rgb_meta_scanlines;
+
+		layout->num_planes = 1;
+
+		layout->plane_pitch[0] = VENUS_RGB_STRIDE(color, width);
+		rgb_scanlines = VENUS_RGB_SCANLINES(color, height);
+		layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
+			rgb_scanlines, SDE_UBWC_PLANE_SIZE_ALIGNMENT);
+
+		if (!meta)
+			goto done;
+		layout->num_planes += 2;
+		layout->plane_pitch[2] = VENUS_RGB_META_STRIDE(color, width);
+		rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color, height);
+		layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
+			rgb_meta_scanlines, SDE_UBWC_PLANE_SIZE_ALIGNMENT);
+	}
+
+done:
+	for (i = 0; i < SDE_MAX_PLANES; i++)
+		layout->total_size += layout->plane_size[i];
+
+	return 0;
+}
+
+static int _sde_format_get_plane_sizes_linear(
+		const struct sde_format *fmt,
+		const uint32_t width,
+		const uint32_t height,
+		struct sde_hw_fmt_layout *layout)
+{
+	int i;
+
+	memset(layout, 0, sizeof(struct sde_hw_fmt_layout));
+	layout->format = fmt;
+	layout->width = width;
+	layout->height = height;
+	layout->num_planes = fmt->num_planes;
+
+	/* Due to memset above, only need to set planes of interest */
+	if (fmt->fetch_planes == SDE_PLANE_INTERLEAVED) {
+		layout->num_planes = 1;
+		layout->plane_size[0] = width * height * layout->format->bpp;
+		layout->plane_pitch[0] = width * layout->format->bpp;
+	} else {
+		uint32_t v_subsample, h_subsample;
+		uint32_t chroma_samp;
+		uint32_t bpp = 1;
+
+		chroma_samp = fmt->chroma_sample;
+		_sde_get_v_h_subsample_rate(chroma_samp, &v_subsample,
+				&h_subsample);
+
+		if (width % h_subsample || height % v_subsample) {
+			DRM_ERROR("mismatch in subsample vs dimensions\n");
+			return -EINVAL;
+		}
+
+		if ((fmt->base.pixel_format == DRM_FORMAT_NV12) &&
+			(SDE_FORMAT_IS_DX(fmt)))
+			bpp = 2;
+		layout->plane_pitch[0] = width * bpp;
+		layout->plane_pitch[1] = layout->plane_pitch[0] / h_subsample;
+		layout->plane_size[0] = layout->plane_pitch[0] * height;
+		layout->plane_size[1] = layout->plane_pitch[1] *
+				(height / v_subsample);
+
+		if (fmt->fetch_planes == SDE_PLANE_PSEUDO_PLANAR) {
+			layout->num_planes = 2;
+			layout->plane_size[1] *= 2;
+			layout->plane_pitch[1] *= 2;
+		} else {
+			/* planar */
+			layout->num_planes = 3;
+			layout->plane_size[2] = layout->plane_size[1];
+			layout->plane_pitch[2] = layout->plane_pitch[1];
+		}
+	}
+
+	for (i = 0; i < SDE_MAX_PLANES; i++)
+		layout->total_size += layout->plane_size[i];
+
+	return 0;
+}
+
+int sde_format_get_plane_sizes(
+		const struct sde_format *fmt,
+		const uint32_t w,
+		const uint32_t h,
+		struct sde_hw_fmt_layout *layout)
+{
+	if (!layout || !fmt) {
+		DRM_ERROR("invalid pointer\n");
+		return -EINVAL;
+	}
+
+	if ((w > SDE_MAX_IMG_WIDTH) || (h > SDE_MAX_IMG_HEIGHT)) {
+		DRM_ERROR("image dimensions outside max range\n");
+		return -ERANGE;
+	}
+
+	if (SDE_FORMAT_IS_UBWC(fmt) || SDE_FORMAT_IS_TILE(fmt))
+		return _sde_format_get_plane_sizes_ubwc(fmt, w, h, layout);
+
+	return _sde_format_get_plane_sizes_linear(fmt, w, h, layout);
+}
+
+static int _sde_format_populate_addrs_ubwc(
+		struct msm_gem_address_space *aspace,
+		struct drm_framebuffer *fb,
+		struct sde_hw_fmt_layout *layout)
+{
+	uint32_t base_addr;
+	bool meta;
+
+	if (!fb || !layout) {
+		DRM_ERROR("invalid pointers\n");
+		return -EINVAL;
+	}
+
+	base_addr = msm_framebuffer_iova(fb, aspace, 0);
+	if (!base_addr) {
+		DRM_ERROR("failed to retrieve base addr\n");
+		return -EFAULT;
+	}
+
+	meta = SDE_FORMAT_IS_UBWC(layout->format);
+
+	/* Per-format logic for verifying active planes */
+	if (SDE_FORMAT_IS_YUV(layout->format)) {
+		/************************************************/
+		/*      UBWC            **                      */
+		/*      buffer          **      SDE PLANE       */
+		/*      format          **                      */
+		/************************************************/
+		/* -------------------  ** -------------------- */
+		/* |      Y meta     |  ** |    Y bitstream   | */
+		/* |       data      |  ** |       plane      | */
+		/* -------------------  ** -------------------- */
+		/* |    Y bitstream  |  ** |  CbCr bitstream  | */
+		/* |       data      |  ** |       plane      | */
+		/* -------------------  ** -------------------- */
+		/* |   Cbcr metadata |  ** |       Y meta     | */
+		/* |       data      |  ** |       plane      | */
+		/* -------------------  ** -------------------- */
+		/* |  CbCr bitstream |  ** |     CbCr meta    | */
+		/* |       data      |  ** |       plane      | */
+		/* -------------------  ** -------------------- */
+		/************************************************/
+
+		/* configure Y bitstream plane */
+		layout->plane_addr[0] = base_addr + layout->plane_size[2];
+
+		/* configure CbCr bitstream plane */
+		layout->plane_addr[1] = base_addr + layout->plane_size[0]
+			+ layout->plane_size[2] + layout->plane_size[3];
+
+		if (!meta)
+			goto done;
+
+		/* configure Y metadata plane */
+		layout->plane_addr[2] = base_addr;
+
+		/* configure CbCr metadata plane */
+		layout->plane_addr[3] = base_addr + layout->plane_size[0]
+			+ layout->plane_size[2];
+
+	} else {
+		/************************************************/
+		/*      UBWC            **                      */
+		/*      buffer          **      SDE PLANE       */
+		/*      format          **                      */
+		/************************************************/
+		/* -------------------  ** -------------------- */
+		/* |      RGB meta   |  ** |   RGB bitstream  | */
+		/* |       data      |  ** |       plane      | */
+		/* -------------------  ** -------------------- */
+		/* |  RGB bitstream  |  ** |       NONE       | */
+		/* |       data      |  ** |                  | */
+		/* -------------------  ** -------------------- */
+		/*                      ** |     RGB meta     | */
+		/*                      ** |       plane      | */
+		/*                      ** -------------------- */
+		/************************************************/
+
+		layout->plane_addr[0] = base_addr + layout->plane_size[2];
+		layout->plane_addr[1] = 0;
+
+		if (!meta)
+			goto done;
+
+		layout->plane_addr[2] = base_addr;
+		layout->plane_addr[3] = 0;
+	}
+done:
+	return 0;
+}
+
+static int _sde_format_populate_addrs_linear(
+		struct msm_gem_address_space *aspace,
+		struct drm_framebuffer *fb,
+		struct sde_hw_fmt_layout *layout)
+{
+	unsigned int i;
+
+	/* Update layout pitches from fb */
+	for (i = 0; i < layout->num_planes; ++i) {
+		if (layout->plane_pitch[i] != fb->pitches[i]) {
+			SDE_DEBUG("plane %u expected pitch %u, fb %u\n",
+				i, layout->plane_pitch[i], fb->pitches[i]);
+			layout->plane_pitch[i] = fb->pitches[i];
+		}
+	}
+
+	/* Populate addresses for simple formats here */
+	for (i = 0; i < layout->num_planes; ++i) {
+		layout->plane_addr[i] = msm_framebuffer_iova(fb, aspace, i);
+		if (!layout->plane_addr[i]) {
+			DRM_ERROR("failed to retrieve base addr\n");
+			return -EFAULT;
+		}
+	}
+
+	return 0;
+}
+
+int sde_format_populate_layout(
+		struct msm_gem_address_space *aspace,
+		struct drm_framebuffer *fb,
+		struct sde_hw_fmt_layout *layout)
+{
+	uint32_t plane_addr[SDE_MAX_PLANES];
+	int i, ret;
+
+	if (!fb || !layout) {
+		DRM_ERROR("invalid arguments\n");
+		return -EINVAL;
+	}
+
+	if ((fb->width > SDE_MAX_IMG_WIDTH) ||
+			(fb->height > SDE_MAX_IMG_HEIGHT)) {
+		DRM_ERROR("image dimensions outside max range\n");
+		return -ERANGE;
+	}
+
+	layout->format = to_sde_format(msm_framebuffer_format(fb));
+
+	/* Populate the plane sizes etc via get_format */
+	ret = sde_format_get_plane_sizes(layout->format, fb->width, fb->height,
+			layout);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < SDE_MAX_PLANES; ++i)
+		plane_addr[i] = layout->plane_addr[i];
+
+	/* Populate the addresses given the fb */
+	if (SDE_FORMAT_IS_UBWC(layout->format) ||
+			SDE_FORMAT_IS_TILE(layout->format))
+		ret = _sde_format_populate_addrs_ubwc(aspace, fb, layout);
+	else
+		ret = _sde_format_populate_addrs_linear(aspace, fb, layout);
+
+	/* check if anything changed */
+	if (!ret && !memcmp(plane_addr, layout->plane_addr, sizeof(plane_addr)))
+		ret = -EAGAIN;
+
+	return ret;
+}
+
+static void _sde_format_calc_offset_linear(struct sde_hw_fmt_layout *source,
+		u32 x, u32 y)
+{
+	if ((x == 0) && (y == 0))
+		return;
+
+	source->plane_addr[0] += y * source->plane_pitch[0];
+
+	if (source->num_planes == 1) {
+		source->plane_addr[0] += x * source->format->bpp;
+	} else {
+		uint32_t xoff, yoff;
+		uint32_t v_subsample = 1;
+		uint32_t h_subsample = 1;
+
+		_sde_get_v_h_subsample_rate(source->format->chroma_sample,
+				&v_subsample, &h_subsample);
+
+		xoff = x / h_subsample;
+		yoff = y / v_subsample;
+
+		source->plane_addr[0] += x;
+		source->plane_addr[1] += xoff +
+				(yoff * source->plane_pitch[1]);
+		if (source->num_planes == 2) /* pseudo planar */
+			source->plane_addr[1] += xoff;
+		else /* planar */
+			source->plane_addr[2] += xoff +
+				(yoff * source->plane_pitch[2]);
+	}
+}
+
+int sde_format_populate_layout_with_roi(
+		struct msm_gem_address_space *aspace,
+		struct drm_framebuffer *fb,
+		struct sde_rect *roi,
+		struct sde_hw_fmt_layout *layout)
+{
+	int ret;
+
+	ret = sde_format_populate_layout(aspace, fb, layout);
+	if (ret || !roi)
+		return ret;
+
+	if (!roi->w || !roi->h || (roi->x + roi->w > fb->width) ||
+			(roi->y + roi->h > fb->height)) {
+		DRM_ERROR("invalid roi=[%d,%d,%d,%d], fb=[%u,%u]\n",
+				roi->x, roi->y, roi->w, roi->h,
+				fb->width, fb->height);
+		ret = -EINVAL;
+	} else if (SDE_FORMAT_IS_LINEAR(layout->format)) {
+		_sde_format_calc_offset_linear(layout, roi->x, roi->y);
+		layout->width = roi->w;
+		layout->height = roi->h;
+	} else if (roi->x || roi->y || (roi->w != fb->width) ||
+			(roi->h != fb->height)) {
+		DRM_ERROR("non-linear layout with roi not supported\n");
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+int sde_format_check_modified_format(
+		const struct msm_kms *kms,
+		const struct msm_format *msm_fmt,
+		const struct drm_mode_fb_cmd2 *cmd,
+		struct drm_gem_object **bos)
+{
+	int ret, i, num_base_fmt_planes;
+	const struct sde_format *fmt;
+	struct sde_hw_fmt_layout layout;
+	uint32_t bos_total_size = 0;
+
+	if (!msm_fmt || !cmd || !bos) {
+		DRM_ERROR("invalid arguments\n");
+		return -EINVAL;
+	}
+
+	fmt = to_sde_format(msm_fmt);
+	num_base_fmt_planes = drm_format_num_planes(fmt->base.pixel_format);
+
+	ret = sde_format_get_plane_sizes(fmt, cmd->width, cmd->height,
+			&layout);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < num_base_fmt_planes; i++) {
+		if (!bos[i]) {
+			DRM_ERROR("invalid handle for plane %d\n", i);
+			return -EINVAL;
+		}
+		if ((i == 0) || (bos[i] != bos[0]))
+			bos_total_size += bos[i]->size;
+	}
+
+	if (bos_total_size < layout.total_size) {
+		DRM_ERROR("buffers total size too small %u expected %u\n",
+				bos_total_size, layout.total_size);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+const struct sde_format *sde_get_sde_format_ext(
+		const uint32_t format,
+		const uint64_t *modifiers,
+		const uint32_t modifiers_len)
+{
+	uint32_t i = 0;
+	uint64_t mod0 = 0;
+	const struct sde_format *fmt = NULL;
+	const struct sde_format *map = NULL;
+	ssize_t map_size = 0;
+
+	/*
+	 * Currently only support exactly zero or one modifier.
+	 * All planes used must specify the same modifier.
+	 */
+	if (modifiers_len && !modifiers) {
+		SDE_ERROR("invalid modifiers array\n");
+		return NULL;
+	} else if (modifiers && modifiers_len && modifiers[0]) {
+		mod0 = modifiers[0];
+		SDE_DEBUG("plane format modifier 0x%llX\n", mod0);
+		for (i = 1; i < modifiers_len; i++) {
+			if (modifiers[i] != mod0) {
+				SDE_ERROR("bad fmt mod 0x%llX on plane %d\n",
+					modifiers[i], i);
+				return NULL;
+			}
+		}
+	}
+
+	switch (mod0) {
+	case 0:
+		map = sde_format_map;
+		map_size = ARRAY_SIZE(sde_format_map);
+		break;
+	case DRM_FORMAT_MOD_QCOM_COMPRESSED:
+	case DRM_FORMAT_MOD_QCOM_COMPRESSED | DRM_FORMAT_MOD_QCOM_TILE:
+		map = sde_format_map_ubwc;
+		map_size = ARRAY_SIZE(sde_format_map_ubwc);
+		SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_COMPRESSED\n",
+				format);
+		break;
+	case DRM_FORMAT_MOD_QCOM_DX:
+		map = sde_format_map_p010;
+		map_size = ARRAY_SIZE(sde_format_map_p010);
+		SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_DX\n", format);
+		break;
+	case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED):
+	case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED |
+			DRM_FORMAT_MOD_QCOM_TILE):
+		map = sde_format_map_p010_ubwc;
+		map_size = ARRAY_SIZE(sde_format_map_p010_ubwc);
+		SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_COMPRESSED/DX\n",
+				format);
+		break;
+	case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED |
+		DRM_FORMAT_MOD_QCOM_TIGHT):
+	case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED |
+		DRM_FORMAT_MOD_QCOM_TIGHT | DRM_FORMAT_MOD_QCOM_TILE):
+		map = sde_format_map_tp10_ubwc;
+		map_size = ARRAY_SIZE(sde_format_map_tp10_ubwc);
+		SDE_DEBUG(
+			"found fmt 0x%X DRM_FORMAT_MOD_QCOM_COMPRESSED/DX/TIGHT\n",
+			format);
+		break;
+	case DRM_FORMAT_MOD_QCOM_TILE:
+		map = sde_format_map_tile;
+		map_size = ARRAY_SIZE(sde_format_map_tile);
+		SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_TILE\n", format);
+		break;
+	case (DRM_FORMAT_MOD_QCOM_TILE | DRM_FORMAT_MOD_QCOM_DX):
+		map = sde_format_map_p010_tile;
+		map_size = ARRAY_SIZE(sde_format_map_p010_tile);
+		SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_TILE/DX\n",
+				format);
+		break;
+	case (DRM_FORMAT_MOD_QCOM_TILE | DRM_FORMAT_MOD_QCOM_DX |
+			DRM_FORMAT_MOD_QCOM_TIGHT):
+		map = sde_format_map_tp10_tile;
+		map_size = ARRAY_SIZE(sde_format_map_tp10_tile);
+		SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_TILE/DX/TIGHT\n",
+				format);
+		break;
+	default:
+		SDE_ERROR("unsupported format modifier %llX\n", mod0);
+		return NULL;
+	}
+
+	for (i = 0; i < map_size; i++) {
+		if (format == map[i].base.pixel_format) {
+			fmt = &map[i];
+			break;
+		}
+	}
+
+	if (fmt == NULL)
+		SDE_ERROR("unsupported fmt 0x%X modifier 0x%llX\n",
+				format, mod0);
+	else
+		SDE_DEBUG("fmt %s mod 0x%llX ubwc %d yuv %d\n",
+				drm_get_format_name(format), mod0,
+				SDE_FORMAT_IS_UBWC(fmt),
+				SDE_FORMAT_IS_YUV(fmt));
+
+	return fmt;
+}
+
+const struct msm_format *sde_get_msm_format(
+		struct msm_kms *kms,
+		const uint32_t format,
+		const uint64_t *modifiers,
+		const uint32_t modifiers_len)
+{
+	const struct sde_format *fmt = sde_get_sde_format_ext(format,
+			modifiers, modifiers_len);
+	if (fmt)
+		return &fmt->base;
+	return NULL;
+}
+
+uint32_t sde_populate_formats(
+		const struct sde_format_extended *format_list,
+		uint32_t *pixel_formats,
+		uint64_t *pixel_modifiers,
+		uint32_t pixel_formats_max)
+{
+	uint32_t i, fourcc_format;
+
+	if (!format_list || !pixel_formats)
+		return 0;
+
+	for (i = 0, fourcc_format = 0;
+			format_list->fourcc_format && i < pixel_formats_max;
+			++format_list) {
+		/* verify if listed format is in sde_format_map? */
+
+		/* optionally return modified formats */
+		if (pixel_modifiers) {
+			/* assume same modifier for all fb planes */
+			pixel_formats[i] = format_list->fourcc_format;
+			pixel_modifiers[i++] = format_list->modifier;
+		} else {
+			/* assume base formats grouped together */
+			if (fourcc_format != format_list->fourcc_format) {
+				fourcc_format = format_list->fourcc_format;
+				pixel_formats[i++] = fourcc_format;
+			}
+		}
+	}
+
+	return i;
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_formats.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_formats.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_formats.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_formats.h	2019-01-22 16:16:23.515246515 +0100
@@ -0,0 +1,123 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_FORMATS_H
+#define _SDE_FORMATS_H
+
+#include <drm/drm_fourcc.h>
+#include "msm_gem.h"
+#include "sde_hw_mdss.h"
+
+/**
+ * sde_get_sde_format_ext() - Returns sde format structure pointer.
+ * @format:          DRM FourCC Code
+ * @modifiers:       format modifier array from client, one per plane
+ * @modifiers_len:   number of planes and array size for plane_modifiers
+ */
+const struct sde_format *sde_get_sde_format_ext(
+		const uint32_t format,
+		const uint64_t *modifiers,
+		const uint32_t modifiers_len);
+
+#define sde_get_sde_format(f) sde_get_sde_format_ext(f, NULL, 0)
+
+/**
+ * sde_get_msm_format - get an sde_format by its msm_format base
+ *                     callback function registers with the msm_kms layer
+ * @kms:             kms driver
+ * @format:          DRM FourCC Code
+ * @modifiers:       format modifier array from client, one per plane
+ * @modifiers_len:   number of planes and array size for plane_modifiers
+ */
+const struct msm_format *sde_get_msm_format(
+		struct msm_kms *kms,
+		const uint32_t format,
+		const uint64_t *modifiers,
+		const uint32_t modifiers_len);
+
+/**
+ * sde_populate_formats - populate the given array with fourcc codes supported
+ * @format_list:       pointer to list of possible formats
+ * @pixel_formats:     array to populate with fourcc codes
+ * @pixel_modifiers:   array to populate with drm modifiers, can be NULL
+ * @pixel_formats_max: length of pixel formats array
+ * Return: number of elements populated
+ */
+uint32_t sde_populate_formats(
+		const struct sde_format_extended *format_list,
+		uint32_t *pixel_formats,
+		uint64_t *pixel_modifiers,
+		uint32_t pixel_formats_max);
+
+/**
+ * sde_format_get_plane_sizes - calculate size and layout of given buffer format
+ * @fmt:             pointer to sde_format
+ * @w:               width of the buffer
+ * @h:               height of the buffer
+ * @layout:          layout of the buffer
+ *
+ * Return: size of the buffer
+ */
+int sde_format_get_plane_sizes(
+		const struct sde_format *fmt,
+		const uint32_t w,
+		const uint32_t h,
+		struct sde_hw_fmt_layout *layout);
+
+/**
+ * sde_format_check_modified_format - validate format and buffers for
+ *                   sde non-standard, i.e. modified format
+ * @kms:             kms driver
+ * @msm_fmt:         pointer to the msm_fmt base pointer of an sde_format
+ * @cmd:             fb_cmd2 structure user request
+ * @bos:             gem buffer object list
+ *
+ * Return: error code on failure, 0 on success
+ */
+int sde_format_check_modified_format(
+		const struct msm_kms *kms,
+		const struct msm_format *msm_fmt,
+		const struct drm_mode_fb_cmd2 *cmd,
+		struct drm_gem_object **bos);
+
+/**
+ * sde_format_populate_layout - populate the given format layout based on
+ *                     mmu, fb, and format found in the fb
+ * @aspace:            address space pointer
+ * @fb:                framebuffer pointer
+ * @fmtl:              format layout structure to populate
+ *
+ * Return: error code on failure, -EAGAIN if success but the addresses
+ *         are the same as before or 0 if new addresses were populated
+ */
+int sde_format_populate_layout(
+		struct msm_gem_address_space *aspace,
+		struct drm_framebuffer *fb,
+		struct sde_hw_fmt_layout *fmtl);
+
+/**
+ * sde_format_populate_layout_with_roi - populate the given format layout
+ *                     based on mmu, fb, roi, and format found in the fb
+ * @aspace:            mmu id handle
+ * @fb:                framebuffer pointer
+ * @roi:               region of interest (optional)
+ * @fmtl:              format layout structure to populate
+ *
+ * Return: error code on failure, 0 on success
+ */
+int sde_format_populate_layout_with_roi(
+		struct msm_gem_address_space *aspace,
+		struct drm_framebuffer *fb,
+		struct sde_rect *roi,
+		struct sde_hw_fmt_layout *fmtl);
+
+#endif /*_SDE_FORMATS_H */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_catalog.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_catalog.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_catalog.c	2019-10-29 09:26:23.641203159 +0100
@@ -0,0 +1,2396 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/slab.h>
+#include <linux/of_address.h>
+
+#include <linux/of_platform.h>
+
+#include "sde_hw_mdss.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_catalog_format.h"
+#include "sde_kms.h"
+
+/*************************************************************
+ * MACRO DEFINITION
+ *************************************************************/
+
+/**
+ * Max hardware block in certain hardware. For ex: sspp pipes
+ * can have QSEED, pcc, igc, pa, csc, qos entries, etc. This count is
+ * 64 based on software design. It should be increased if any of the
+ * hardware block has more subblocks.
+ */
+#define MAX_SDE_HW_BLK  64
+
+/* each entry will have register address and bit offset in that register */
+#define MAX_BIT_OFFSET 2
+
+/* default line width for sspp */
+#define DEFAULT_SDE_LINE_WIDTH 2048
+
+/* max mixer blend stages */
+#define DEFAULT_SDE_MIXER_BLENDSTAGES 7
+
+/* max bank bit for macro tile and ubwc format */
+#define DEFAULT_SDE_HIGHEST_BANK_BIT 15
+
+/* default hardware block size if dtsi entry is not present */
+#define DEFAULT_SDE_HW_BLOCK_LEN 0x100
+
+/* default rects for multi rect case */
+#define DEFAULT_SDE_SSPP_MAX_RECTS 1
+
+/* total number of intf - dp, dsi, hdmi */
+#define INTF_COUNT			3
+
+#define MAX_SSPP_UPSCALE		20
+#define MAX_SSPP_DOWNSCALE		4
+#define SSPP_UNITY_SCALE		1
+
+#define MAX_HORZ_DECIMATION		4
+#define MAX_VERT_DECIMATION		4
+
+#define MAX_SPLIT_DISPLAY_CTL		2
+#define MAX_PP_SPLIT_DISPLAY_CTL	1
+
+#define MDSS_BASE_OFFSET		0x0
+
+#define ROT_LM_OFFSET			3
+#define LINE_LM_OFFSET			5
+#define LINE_MODE_WB_OFFSET		2
+
+/* maximum XIN halt timeout in usec */
+#define VBIF_XIN_HALT_TIMEOUT		0x4000
+
+#define DEFAULT_CREQ_LUT_NRT		0x0
+#define DEFAULT_PIXEL_RAM_SIZE		(50 * 1024)
+
+/* access property value based on prop_type and hardware index */
+#define PROP_VALUE_ACCESS(p, i, j)		((p + i)->value[j])
+
+/*
+ * access element within PROP_TYPE_BIT_OFFSET_ARRAYs based on prop_type,
+ * hardware index and offset array index
+ */
+#define PROP_BITVALUE_ACCESS(p, i, j, k)	((p + i)->bit_value[j][k])
+
+/*************************************************************
+ *  DTSI PROPERTY INDEX
+ *************************************************************/
+enum {
+	HW_OFF,
+	HW_LEN,
+	HW_PROP_MAX,
+};
+
+enum sde_prop {
+	SDE_OFF,
+	SDE_LEN,
+	SSPP_LINEWIDTH,
+	MIXER_LINEWIDTH,
+	MIXER_BLEND,
+	WB_LINEWIDTH,
+	BANK_BIT,
+	QSEED_TYPE,
+	CSC_TYPE,
+	PANIC_PER_PIPE,
+	CDP,
+	SRC_SPLIT,
+	SDE_PROP_MAX,
+};
+
+enum {
+	PERF_MAX_BW_LOW,
+	PERF_MAX_BW_HIGH,
+	PERF_PROP_MAX,
+};
+
+enum {
+	SSPP_OFF,
+	SSPP_SIZE,
+	SSPP_TYPE,
+	SSPP_XIN,
+	SSPP_CLK_CTRL,
+	SSPP_CLK_STATUS,
+	SSPP_DANGER,
+	SSPP_SAFE,
+	SSPP_MAX_RECTS,
+	SSPP_SCALE_SIZE,
+	SSPP_VIG_BLOCKS,
+	SSPP_RGB_BLOCKS,
+	SSPP_PROP_MAX,
+};
+
+enum {
+	VIG_QSEED_OFF,
+	VIG_QSEED_LEN,
+	VIG_CSC_OFF,
+	VIG_HSIC_PROP,
+	VIG_MEMCOLOR_PROP,
+	VIG_PCC_PROP,
+	VIG_PROP_MAX,
+};
+
+enum {
+	RGB_SCALER_OFF,
+	RGB_SCALER_LEN,
+	RGB_PCC_PROP,
+	RGB_PROP_MAX,
+};
+
+enum {
+	INTF_OFF,
+	INTF_LEN,
+	INTF_PREFETCH,
+	INTF_TYPE,
+	INTF_PROP_MAX,
+};
+
+enum {
+	PP_OFF,
+	PP_LEN,
+	TE_OFF,
+	TE_LEN,
+	TE2_OFF,
+	TE2_LEN,
+	DSC_OFF,
+	DSC_LEN,
+	PP_SLAVE,
+	PP_PROP_MAX,
+};
+
+enum {
+	DSPP_OFF,
+	DSPP_SIZE,
+	DSPP_BLOCKS,
+	DSPP_PROP_MAX,
+};
+
+enum {
+	DSPP_IGC_PROP,
+	DSPP_PCC_PROP,
+	DSPP_GC_PROP,
+	DSPP_HSIC_PROP,
+	DSPP_MEMCOLOR_PROP,
+	DSPP_SIXZONE_PROP,
+	DSPP_GAMUT_PROP,
+	DSPP_DITHER_PROP,
+	DSPP_HIST_PROP,
+	DSPP_VLUT_PROP,
+	DSPP_BLOCKS_PROP_MAX,
+};
+
+enum {
+	AD_OFF,
+	AD_VERSION,
+	AD_PROP_MAX,
+};
+
+enum {
+	MIXER_OFF,
+	MIXER_LEN,
+	MIXER_BLOCKS,
+	MIXER_PROP_MAX,
+};
+
+enum {
+	MIXER_GC_PROP,
+	MIXER_BLOCKS_PROP_MAX,
+};
+
+enum {
+	WB_OFF,
+	WB_LEN,
+	WB_ID,
+	WB_XIN_ID,
+	WB_CLK_CTRL,
+	WB_PROP_MAX,
+};
+
+enum {
+	VBIF_OFF,
+	VBIF_LEN,
+	VBIF_ID,
+	VBIF_DEFAULT_OT_RD_LIMIT,
+	VBIF_DEFAULT_OT_WR_LIMIT,
+	VBIF_DYNAMIC_OT_RD_LIMIT,
+	VBIF_DYNAMIC_OT_WR_LIMIT,
+	VBIF_PROP_MAX,
+};
+
+/*************************************************************
+ * dts property definition
+ *************************************************************/
+enum prop_type {
+	PROP_TYPE_BOOL,
+	PROP_TYPE_U32,
+	PROP_TYPE_U32_ARRAY,
+	PROP_TYPE_STRING,
+	PROP_TYPE_STRING_ARRAY,
+	PROP_TYPE_BIT_OFFSET_ARRAY,
+	PROP_TYPE_NODE,
+};
+
+struct sde_prop_type {
+	/* use property index from enum property for readability purpose */
+	u8 id;
+	/* it should be property name based on dtsi documentation */
+	char *prop_name;
+	/**
+	 * if property is marked mandatory then it will fail parsing
+	 * when property is not present
+	 */
+	u32  is_mandatory;
+	/* property type based on "enum prop_type"  */
+	enum prop_type type;
+};
+
+struct sde_prop_value {
+	u32 value[MAX_SDE_HW_BLK];
+	u32 bit_value[MAX_SDE_HW_BLK][MAX_BIT_OFFSET];
+};
+
+/*************************************************************
+ * dts property list
+ *************************************************************/
+static struct sde_prop_type sde_prop[] = {
+	{SDE_OFF, "qcom,sde-off", true, PROP_TYPE_U32},
+	{SDE_LEN, "qcom,sde-len", false, PROP_TYPE_U32},
+	{SSPP_LINEWIDTH, "qcom,sde-sspp-linewidth", false, PROP_TYPE_U32},
+	{MIXER_LINEWIDTH, "qcom,sde-mixer-linewidth", false, PROP_TYPE_U32},
+	{MIXER_BLEND, "qcom,sde-mixer-blendstages", false, PROP_TYPE_U32},
+	{WB_LINEWIDTH, "qcom,sde-wb-linewidth", false, PROP_TYPE_U32},
+	{BANK_BIT, "qcom,sde-highest-bank-bit", false, PROP_TYPE_U32},
+	{QSEED_TYPE, "qcom,sde-qseed-type", false, PROP_TYPE_STRING},
+	{CSC_TYPE, "qcom,sde-csc-type", false, PROP_TYPE_STRING},
+	{PANIC_PER_PIPE, "qcom,sde-panic-per-pipe", false, PROP_TYPE_BOOL},
+	{CDP, "qcom,sde-has-cdp", false, PROP_TYPE_BOOL},
+	{SRC_SPLIT, "qcom,sde-has-src-split", false, PROP_TYPE_BOOL},
+};
+
+static struct sde_prop_type sde_perf_prop[] = {
+	{PERF_MAX_BW_LOW, "qcom,sde-max-bw-low-kbps", false, PROP_TYPE_U32},
+	{PERF_MAX_BW_HIGH, "qcom,sde-max-bw-high-kbps", false, PROP_TYPE_U32},
+};
+
+static struct sde_prop_type sspp_prop[] = {
+	{SSPP_OFF, "qcom,sde-sspp-off", true, PROP_TYPE_U32_ARRAY},
+	{SSPP_SIZE, "qcom,sde-sspp-src-size", false, PROP_TYPE_U32},
+	{SSPP_TYPE, "qcom,sde-sspp-type", true, PROP_TYPE_STRING_ARRAY},
+	{SSPP_XIN, "qcom,sde-sspp-xin-id", true, PROP_TYPE_U32_ARRAY},
+	{SSPP_CLK_CTRL, "qcom,sde-sspp-clk-ctrl", false,
+		PROP_TYPE_BIT_OFFSET_ARRAY},
+	{SSPP_CLK_STATUS, "qcom,sde-sspp-clk-status", false,
+		PROP_TYPE_BIT_OFFSET_ARRAY},
+	{SSPP_DANGER, "qcom,sde-sspp-danger-lut", false, PROP_TYPE_U32_ARRAY},
+	{SSPP_SAFE, "qcom,sde-sspp-safe-lut", false, PROP_TYPE_U32_ARRAY},
+	{SSPP_MAX_RECTS, "qcom,sde-sspp-max-rects", false, PROP_TYPE_U32_ARRAY},
+	{SSPP_SCALE_SIZE, "qcom,sde-sspp-scale-size", false, PROP_TYPE_U32},
+	{SSPP_VIG_BLOCKS, "qcom,sde-sspp-vig-blocks", false, PROP_TYPE_NODE},
+	{SSPP_RGB_BLOCKS, "qcom,sde-sspp-rgb-blocks", false, PROP_TYPE_NODE},
+};
+
+static struct sde_prop_type vig_prop[] = {
+	{VIG_QSEED_OFF, "qcom,sde-vig-qseed-off", false, PROP_TYPE_U32},
+	{VIG_QSEED_LEN, "qcom,sde-vig-qseed-size", false, PROP_TYPE_U32},
+	{VIG_CSC_OFF, "qcom,sde-vig-csc-off", false, PROP_TYPE_U32},
+	{VIG_HSIC_PROP, "qcom,sde-vig-hsic", false, PROP_TYPE_U32_ARRAY},
+	{VIG_MEMCOLOR_PROP, "qcom,sde-vig-memcolor", false,
+		PROP_TYPE_U32_ARRAY},
+	{VIG_PCC_PROP, "qcom,sde-vig-pcc", false, PROP_TYPE_U32_ARRAY},
+};
+
+static struct sde_prop_type rgb_prop[] = {
+	{RGB_SCALER_OFF, "qcom,sde-rgb-scaler-off", false, PROP_TYPE_U32},
+	{RGB_SCALER_LEN, "qcom,sde-rgb-scaler-size", false, PROP_TYPE_U32},
+	{RGB_PCC_PROP, "qcom,sde-rgb-pcc", false, PROP_TYPE_U32_ARRAY},
+};
+
+static struct sde_prop_type ctl_prop[] = {
+	{HW_OFF, "qcom,sde-ctl-off", true, PROP_TYPE_U32_ARRAY},
+	{HW_LEN, "qcom,sde-ctl-size", false, PROP_TYPE_U32},
+};
+
+static struct sde_prop_type mixer_prop[] = {
+	{MIXER_OFF, "qcom,sde-mixer-off", true, PROP_TYPE_U32_ARRAY},
+	{MIXER_LEN, "qcom,sde-mixer-size", false, PROP_TYPE_U32},
+	{MIXER_BLOCKS, "qcom,sde-mixer-blocks", false, PROP_TYPE_NODE},
+};
+
+static struct sde_prop_type mixer_blocks_prop[] = {
+	{MIXER_GC_PROP, "qcom,sde-mixer-gc", false, PROP_TYPE_U32_ARRAY},
+};
+
+static struct sde_prop_type dspp_prop[] = {
+	{DSPP_OFF, "qcom,sde-dspp-off", true, PROP_TYPE_U32_ARRAY},
+	{DSPP_SIZE, "qcom,sde-dspp-size", false, PROP_TYPE_U32},
+	{DSPP_BLOCKS, "qcom,sde-dspp-blocks", false, PROP_TYPE_NODE},
+};
+
+static struct sde_prop_type dspp_blocks_prop[] = {
+	{DSPP_IGC_PROP, "qcom,sde-dspp-igc", false, PROP_TYPE_U32_ARRAY},
+	{DSPP_PCC_PROP, "qcom,sde-dspp-pcc", false, PROP_TYPE_U32_ARRAY},
+	{DSPP_GC_PROP, "qcom,sde-dspp-gc", false, PROP_TYPE_U32_ARRAY},
+	{DSPP_HSIC_PROP, "qcom,sde-dspp-hsic", false, PROP_TYPE_U32_ARRAY},
+	{DSPP_MEMCOLOR_PROP, "qcom,sde-dspp-memcolor", false,
+		PROP_TYPE_U32_ARRAY},
+	{DSPP_SIXZONE_PROP, "qcom,sde-dspp-sixzone", false,
+		PROP_TYPE_U32_ARRAY},
+	{DSPP_GAMUT_PROP, "qcom,sde-dspp-gamut", false, PROP_TYPE_U32_ARRAY},
+	{DSPP_DITHER_PROP, "qcom,sde-dspp-dither", false, PROP_TYPE_U32_ARRAY},
+	{DSPP_HIST_PROP, "qcom,sde-dspp-hist", false, PROP_TYPE_U32_ARRAY},
+	{DSPP_VLUT_PROP, "qcom,sde-dspp-vlut", false, PROP_TYPE_U32_ARRAY},
+};
+
+static struct sde_prop_type ad_prop[] = {
+	{AD_OFF, "qcom,sde-dspp-ad-off", false, PROP_TYPE_U32_ARRAY},
+	{AD_VERSION, "qcom,sde-dspp-ad-version", false, PROP_TYPE_U32},
+};
+
+static struct sde_prop_type pp_prop[] = {
+	{PP_OFF, "qcom,sde-pp-off", true, PROP_TYPE_U32_ARRAY},
+	{PP_LEN, "qcom,sde-pp-size", false, PROP_TYPE_U32},
+	{TE_OFF, "qcom,sde-te-off", false, PROP_TYPE_U32_ARRAY},
+	{TE_LEN, "qcom,sde-te-size", false, PROP_TYPE_U32},
+	{TE2_OFF, "qcom,sde-te2-off", false, PROP_TYPE_U32_ARRAY},
+	{TE2_LEN, "qcom,sde-te2-size", false, PROP_TYPE_U32},
+	{DSC_OFF, "qcom,sde-dsc-off", false, PROP_TYPE_U32_ARRAY},
+	{DSC_LEN, "qcom,sde-dsc-size", false, PROP_TYPE_U32},
+	{PP_SLAVE, "qcom,sde-pp-slave", false, PROP_TYPE_U32_ARRAY},
+};
+
+static struct sde_prop_type cdm_prop[] = {
+	{HW_OFF, "qcom,sde-cdm-off", false, PROP_TYPE_U32_ARRAY},
+	{HW_LEN, "qcom,sde-cdm-size", false, PROP_TYPE_U32},
+};
+
+static struct sde_prop_type intf_prop[] = {
+	{INTF_OFF, "qcom,sde-intf-off", true, PROP_TYPE_U32_ARRAY},
+	{INTF_LEN, "qcom,sde-intf-size", false, PROP_TYPE_U32},
+	{INTF_PREFETCH, "qcom,sde-intf-max-prefetch-lines", false,
+						PROP_TYPE_U32_ARRAY},
+	{INTF_TYPE, "qcom,sde-intf-type", false, PROP_TYPE_STRING_ARRAY},
+};
+
+static struct sde_prop_type wb_prop[] = {
+	{WB_OFF, "qcom,sde-wb-off", true, PROP_TYPE_U32_ARRAY},
+	{WB_LEN, "qcom,sde-wb-size", false, PROP_TYPE_U32},
+	{WB_ID, "qcom,sde-wb-id", true, PROP_TYPE_U32_ARRAY},
+	{WB_XIN_ID, "qcom,sde-wb-xin-id", false, PROP_TYPE_U32_ARRAY},
+	{WB_CLK_CTRL, "qcom,sde-wb-clk-ctrl", false,
+		PROP_TYPE_BIT_OFFSET_ARRAY},
+};
+
+static struct sde_prop_type vbif_prop[] = {
+	{VBIF_OFF, "qcom,sde-vbif-off", true, PROP_TYPE_U32_ARRAY},
+	{VBIF_LEN, "qcom,sde-vbif-size", false, PROP_TYPE_U32},
+	{VBIF_ID, "qcom,sde-vbif-id", false, PROP_TYPE_U32_ARRAY},
+	{VBIF_DEFAULT_OT_RD_LIMIT, "qcom,sde-vbif-default-ot-rd-limit", false,
+		PROP_TYPE_U32},
+	{VBIF_DEFAULT_OT_WR_LIMIT, "qcom,sde-vbif-default-ot-wr-limit", false,
+		PROP_TYPE_U32},
+	{VBIF_DYNAMIC_OT_RD_LIMIT, "qcom,sde-vbif-dynamic-ot-rd-limit", false,
+		PROP_TYPE_U32_ARRAY},
+	{VBIF_DYNAMIC_OT_WR_LIMIT, "qcom,sde-vbif-dynamic-ot-wr-limit", false,
+		PROP_TYPE_U32_ARRAY},
+};
+
+/*************************************************************
+ * static API list
+ *************************************************************/
+
+/**
+ * _sde_copy_formats   - copy formats from src_list to dst_list
+ * @dst_list:          pointer to destination list where to copy formats
+ * @dst_list_size:     size of destination list
+ * @dst_list_pos:      starting position on the list where to copy formats
+ * @src_list:          pointer to source list where to copy formats from
+ * @src_list_size:     size of source list
+ * Return: number of elements populated
+ */
+static uint32_t _sde_copy_formats(
+		struct sde_format_extended *dst_list,
+		uint32_t dst_list_size,
+		uint32_t dst_list_pos,
+		const struct sde_format_extended *src_list,
+		uint32_t src_list_size)
+{
+	uint32_t cur_pos, i;
+
+	if (!dst_list || !src_list || (dst_list_pos >= (dst_list_size - 1)))
+		return 0;
+
+	for (i = 0, cur_pos = dst_list_pos;
+		(cur_pos < (dst_list_size - 1)) && (i < src_list_size)
+		&& src_list[i].fourcc_format; ++i, ++cur_pos)
+		dst_list[cur_pos] = src_list[i];
+
+	dst_list[cur_pos].fourcc_format = 0;
+
+	return i;
+}
+
+static int _parse_dt_u32_handler(struct device_node *np,
+	char *prop_name, u32 *offsets, int len, bool mandatory)
+{
+	int rc = -EINVAL;
+
+	if (len > MAX_SDE_HW_BLK) {
+		SDE_ERROR(
+			"prop: %s tries out of bound access for u32 array read len: %d\n",
+				prop_name, len);
+		return -E2BIG;
+	}
+
+	rc = of_property_read_u32_array(np, prop_name, offsets, len);
+	if (rc && mandatory)
+		SDE_ERROR("mandatory prop: %s u32 array read len:%d\n",
+				prop_name, len);
+	else if (rc)
+		SDE_DEBUG("optional prop: %s u32 array read len:%d\n",
+				prop_name, len);
+
+	return rc;
+}
+
+static int _parse_dt_bit_offset(struct device_node *np,
+	char *prop_name, struct sde_prop_value *prop_value, u32 prop_index,
+	u32 count, bool mandatory)
+{
+	int rc = 0, len, i, j;
+	const u32 *arr;
+
+	arr = of_get_property(np, prop_name, &len);
+	if (arr) {
+		len /= sizeof(u32);
+		len &= ~0x1;
+
+		if (len > (MAX_SDE_HW_BLK * MAX_BIT_OFFSET)) {
+			SDE_ERROR(
+				"prop: %s len: %d will lead to out of bound access\n",
+				prop_name, len / MAX_BIT_OFFSET);
+			return -E2BIG;
+		}
+
+		for (i = 0, j = 0; i < len; j++) {
+			PROP_BITVALUE_ACCESS(prop_value, prop_index, j, 0) =
+				be32_to_cpu(arr[i]);
+			i++;
+			PROP_BITVALUE_ACCESS(prop_value, prop_index, j, 1) =
+				be32_to_cpu(arr[i]);
+			i++;
+		}
+	} else {
+		if (mandatory) {
+			SDE_ERROR("error mandatory property '%s' not found\n",
+				prop_name);
+			rc = -EINVAL;
+		} else {
+			SDE_DEBUG("error optional property '%s' not found\n",
+				prop_name);
+		}
+	}
+
+	return rc;
+}
+
+static int _validate_dt_entry(struct device_node *np,
+	struct sde_prop_type *sde_prop, u32 prop_size, int *prop_count,
+	int *off_count)
+{
+	int rc = 0, i, val;
+	struct device_node *snp = NULL;
+
+	if (off_count) {
+		*off_count = of_property_count_u32_elems(np,
+				sde_prop[0].prop_name);
+		if ((*off_count > MAX_BLOCKS) || (*off_count < 0)) {
+			if (sde_prop[0].is_mandatory) {
+				SDE_ERROR(
+					"invalid hw offset prop name:%s count: %d\n",
+					sde_prop[0].prop_name, *off_count);
+				rc = -EINVAL;
+			}
+			*off_count = 0;
+			memset(prop_count, 0, sizeof(int) * prop_size);
+			return rc;
+		}
+	}
+
+	for (i = 0; i < prop_size; i++) {
+		switch (sde_prop[i].type) {
+		case PROP_TYPE_U32:
+			rc = of_property_read_u32(np, sde_prop[i].prop_name,
+				&val);
+			break;
+		case PROP_TYPE_U32_ARRAY:
+			prop_count[i] = of_property_count_u32_elems(np,
+				sde_prop[i].prop_name);
+			if (prop_count[i] < 0)
+				rc = prop_count[i];
+			break;
+		case PROP_TYPE_STRING_ARRAY:
+			prop_count[i] = of_property_count_strings(np,
+				sde_prop[i].prop_name);
+			if (prop_count[i] < 0)
+				rc = prop_count[i];
+			break;
+		case PROP_TYPE_BIT_OFFSET_ARRAY:
+			of_get_property(np, sde_prop[i].prop_name, &val);
+			prop_count[i] = val / (MAX_BIT_OFFSET * sizeof(u32));
+			break;
+		case PROP_TYPE_NODE:
+			snp = of_get_child_by_name(np,
+					sde_prop[i].prop_name);
+			if (!snp)
+				rc = -EINVAL;
+			break;
+		default:
+			SDE_DEBUG("invalid property type:%d\n",
+							sde_prop[i].type);
+			break;
+		}
+		SDE_DEBUG(
+			"prop id:%d prop name:%s prop type:%d prop_count:%d\n",
+			i, sde_prop[i].prop_name,
+			sde_prop[i].type, prop_count[i]);
+
+		if (rc && sde_prop[i].is_mandatory &&
+		   ((sde_prop[i].type == PROP_TYPE_U32) ||
+		    (sde_prop[i].type == PROP_TYPE_NODE))) {
+			SDE_ERROR("prop:%s not present\n",
+						sde_prop[i].prop_name);
+			goto end;
+		} else if (sde_prop[i].type == PROP_TYPE_U32 ||
+			sde_prop[i].type == PROP_TYPE_BOOL ||
+			sde_prop[i].type == PROP_TYPE_NODE) {
+			rc = 0;
+			continue;
+		}
+
+		if (off_count && (prop_count[i] != *off_count) &&
+				sde_prop[i].is_mandatory) {
+			SDE_ERROR(
+				"prop:%s count:%d is different compared to offset array:%d\n",
+				sde_prop[i].prop_name,
+				prop_count[i], *off_count);
+			rc = -EINVAL;
+			goto end;
+		} else if (off_count && prop_count[i] != *off_count) {
+			SDE_DEBUG(
+				"prop:%s count:%d is different compared to offset array:%d\n",
+				sde_prop[i].prop_name,
+				prop_count[i], *off_count);
+			rc = 0;
+			prop_count[i] = 0;
+		}
+		if (!off_count && prop_count[i] < 0) {
+			prop_count[i] = 0;
+			if (sde_prop[i].is_mandatory) {
+				SDE_ERROR("prop:%s count:%d is negative\n",
+					sde_prop[i].prop_name, prop_count[i]);
+				rc = -EINVAL;
+			} else {
+				rc = 0;
+				SDE_DEBUG("prop:%s count:%d is negative\n",
+					sde_prop[i].prop_name, prop_count[i]);
+			}
+		}
+	}
+
+end:
+	return rc;
+}
+
+static int _read_dt_entry(struct device_node *np,
+	struct sde_prop_type *sde_prop, u32 prop_size, int *prop_count,
+	bool *prop_exists,
+	struct sde_prop_value *prop_value)
+{
+	int rc = 0, i, j;
+
+	for (i = 0; i < prop_size; i++) {
+		prop_exists[i] = true;
+		switch (sde_prop[i].type) {
+		case PROP_TYPE_U32:
+			rc = of_property_read_u32(np, sde_prop[i].prop_name,
+				&PROP_VALUE_ACCESS(prop_value, i, 0));
+			SDE_DEBUG(
+				"prop id:%d prop name:%s prop type:%d value:0x%x\n",
+				i, sde_prop[i].prop_name,
+				sde_prop[i].type,
+				PROP_VALUE_ACCESS(prop_value, i, 0));
+			if (rc)
+				prop_exists[i] = false;
+			break;
+		case PROP_TYPE_BOOL:
+			PROP_VALUE_ACCESS(prop_value, i, 0) =
+				of_property_read_bool(np,
+					sde_prop[i].prop_name);
+			SDE_DEBUG(
+				"prop id:%d prop name:%s prop type:%d value:0x%x\n",
+				i, sde_prop[i].prop_name,
+				sde_prop[i].type,
+				PROP_VALUE_ACCESS(prop_value, i, 0));
+			break;
+		case PROP_TYPE_U32_ARRAY:
+			rc = _parse_dt_u32_handler(np, sde_prop[i].prop_name,
+				&PROP_VALUE_ACCESS(prop_value, i, 0),
+				prop_count[i], sde_prop[i].is_mandatory);
+			if (rc && sde_prop[i].is_mandatory) {
+				SDE_ERROR(
+					"%s prop validation success but read failed\n",
+					sde_prop[i].prop_name);
+				prop_exists[i] = false;
+				goto end;
+			} else {
+				if (rc)
+					prop_exists[i] = false;
+				/* only for debug purpose */
+				SDE_DEBUG("prop id:%d prop name:%s prop \"\
+					type:%d", i, sde_prop[i].prop_name,
+					sde_prop[i].type);
+				for (j = 0; j < prop_count[i]; j++)
+					SDE_DEBUG(" value[%d]:0x%x ", j,
+						PROP_VALUE_ACCESS(prop_value, i,
+								j));
+				SDE_DEBUG("\n");
+			}
+			break;
+		case PROP_TYPE_BIT_OFFSET_ARRAY:
+			rc = _parse_dt_bit_offset(np, sde_prop[i].prop_name,
+				prop_value, i, prop_count[i],
+				sde_prop[i].is_mandatory);
+			if (rc && sde_prop[i].is_mandatory) {
+				SDE_ERROR(
+					"%s prop validation success but read failed\n",
+					sde_prop[i].prop_name);
+				prop_exists[i] = false;
+				goto end;
+			} else {
+				if (rc)
+					prop_exists[i] = false;
+				SDE_DEBUG(
+					"prop id:%d prop name:%s prop type:%d",
+					i, sde_prop[i].prop_name,
+					sde_prop[i].type);
+				for (j = 0; j < prop_count[i]; j++)
+					SDE_DEBUG(
+					"count[%d]: bit:0x%x off:0x%x\n", j,
+					PROP_BITVALUE_ACCESS(prop_value,
+						i, j, 0),
+					PROP_BITVALUE_ACCESS(prop_value,
+						i, j, 1));
+				SDE_DEBUG("\n");
+			}
+			break;
+		case PROP_TYPE_NODE:
+			/* Node will be parsed in calling function */
+			rc = 0;
+			break;
+		default:
+			SDE_DEBUG("invalid property type:%d\n",
+							sde_prop[i].type);
+			break;
+		}
+		rc = 0;
+	}
+
+end:
+	return rc;
+}
+
+static void _sde_sspp_setup_vig(struct sde_mdss_cfg *sde_cfg,
+	struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
+	bool *prop_exists, struct sde_prop_value *prop_value, u32 *vig_count)
+{
+	sblk->maxupscale = MAX_SSPP_UPSCALE;
+	sblk->maxdwnscale = MAX_SSPP_DOWNSCALE;
+	sblk->format_list = plane_formats_yuv;
+	sspp->id = SSPP_VIG0 + *vig_count;
+	snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u",
+			sspp->id - SSPP_VIG0);
+	sspp->clk_ctrl = SDE_CLK_CTRL_VIG0 + *vig_count;
+	sspp->type = SSPP_TYPE_VIG;
+	set_bit(SDE_SSPP_QOS, &sspp->features);
+	(*vig_count)++;
+
+	if (!prop_value)
+		return;
+
+	if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED2) {
+		set_bit(SDE_SSPP_SCALER_QSEED2, &sspp->features);
+		sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED2;
+		sblk->scaler_blk.base = PROP_VALUE_ACCESS(prop_value,
+			VIG_QSEED_OFF, 0);
+		sblk->scaler_blk.len = PROP_VALUE_ACCESS(prop_value,
+			VIG_QSEED_LEN, 0);
+		snprintf(sblk->scaler_blk.name, SDE_HW_BLK_NAME_LEN,
+				"sspp_scaler%u", sspp->id - SSPP_VIG0);
+	} else if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED3) {
+		set_bit(SDE_SSPP_SCALER_QSEED3, &sspp->features);
+		sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED3;
+		sblk->scaler_blk.base = PROP_VALUE_ACCESS(prop_value,
+			VIG_QSEED_OFF, 0);
+		sblk->scaler_blk.len = PROP_VALUE_ACCESS(prop_value,
+			VIG_QSEED_LEN, 0);
+		snprintf(sblk->scaler_blk.name, SDE_HW_BLK_NAME_LEN,
+			"sspp_scaler%u", sspp->id - SSPP_VIG0);
+	}
+
+	sblk->csc_blk.id = SDE_SSPP_CSC;
+	snprintf(sblk->csc_blk.name, SDE_HW_BLK_NAME_LEN,
+			"sspp_csc%u", sspp->id - SSPP_VIG0);
+	if (sde_cfg->csc_type == SDE_SSPP_CSC) {
+		set_bit(SDE_SSPP_CSC, &sspp->features);
+		sblk->csc_blk.base = PROP_VALUE_ACCESS(prop_value,
+							VIG_CSC_OFF, 0);
+	} else if (sde_cfg->csc_type == SDE_SSPP_CSC_10BIT) {
+		set_bit(SDE_SSPP_CSC_10BIT, &sspp->features);
+		sblk->csc_blk.base = PROP_VALUE_ACCESS(prop_value,
+							VIG_CSC_OFF, 0);
+	}
+
+	sblk->hsic_blk.id = SDE_SSPP_HSIC;
+	snprintf(sblk->hsic_blk.name, SDE_HW_BLK_NAME_LEN,
+			"sspp_hsic%u", sspp->id - SSPP_VIG0);
+	if (prop_exists[VIG_HSIC_PROP]) {
+		sblk->hsic_blk.base = PROP_VALUE_ACCESS(prop_value,
+			VIG_HSIC_PROP, 0);
+		sblk->hsic_blk.version = PROP_VALUE_ACCESS(prop_value,
+			VIG_HSIC_PROP, 1);
+		sblk->hsic_blk.len = 0;
+		set_bit(SDE_SSPP_HSIC, &sspp->features);
+	}
+
+	sblk->memcolor_blk.id = SDE_SSPP_MEMCOLOR;
+	snprintf(sblk->memcolor_blk.name, SDE_HW_BLK_NAME_LEN,
+			"sspp_memcolor%u", sspp->id - SSPP_VIG0);
+	if (prop_exists[VIG_MEMCOLOR_PROP]) {
+		sblk->memcolor_blk.base = PROP_VALUE_ACCESS(prop_value,
+			VIG_MEMCOLOR_PROP, 0);
+		sblk->memcolor_blk.version = PROP_VALUE_ACCESS(prop_value,
+			VIG_MEMCOLOR_PROP, 1);
+		sblk->memcolor_blk.len = 0;
+		set_bit(SDE_SSPP_MEMCOLOR, &sspp->features);
+	}
+
+	sblk->pcc_blk.id = SDE_SSPP_PCC;
+	snprintf(sblk->pcc_blk.name, SDE_HW_BLK_NAME_LEN,
+			"sspp_pcc%u", sspp->id - SSPP_VIG0);
+	if (prop_exists[VIG_PCC_PROP]) {
+		sblk->pcc_blk.base = PROP_VALUE_ACCESS(prop_value,
+			VIG_PCC_PROP, 0);
+		sblk->pcc_blk.version = PROP_VALUE_ACCESS(prop_value,
+			VIG_PCC_PROP, 1);
+		sblk->pcc_blk.len = 0;
+		set_bit(SDE_SSPP_PCC, &sspp->features);
+	}
+	snprintf(sspp->name, sizeof(sspp->name), "vig%d", *vig_count-1);
+}
+
+static void _sde_sspp_setup_rgb(struct sde_mdss_cfg *sde_cfg,
+	struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
+	bool *prop_exists, struct sde_prop_value *prop_value, u32 *rgb_count)
+{
+	sblk->maxupscale = MAX_SSPP_UPSCALE;
+	sblk->maxdwnscale = MAX_SSPP_DOWNSCALE;
+	sblk->format_list = plane_formats;
+	sspp->id = SSPP_RGB0 + *rgb_count;
+	snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u",
+			sspp->id - SSPP_VIG0);
+	sspp->clk_ctrl = SDE_CLK_CTRL_RGB0 + *rgb_count;
+	sspp->type = SSPP_TYPE_RGB;
+	set_bit(SDE_SSPP_QOS, &sspp->features);
+	(*rgb_count)++;
+
+	if (!prop_value)
+		return;
+
+	if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED2) {
+		set_bit(SDE_SSPP_SCALER_RGB, &sspp->features);
+		sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED2;
+		sblk->scaler_blk.base = PROP_VALUE_ACCESS(prop_value,
+			RGB_SCALER_OFF, 0);
+		sblk->scaler_blk.len = PROP_VALUE_ACCESS(prop_value,
+			RGB_SCALER_LEN, 0);
+		snprintf(sblk->scaler_blk.name, SDE_HW_BLK_NAME_LEN,
+			"sspp_scaler%u", sspp->id - SSPP_VIG0);
+	} else if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED3) {
+		set_bit(SDE_SSPP_SCALER_RGB, &sspp->features);
+		sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED3;
+		sblk->scaler_blk.base = PROP_VALUE_ACCESS(prop_value,
+			RGB_SCALER_LEN, 0);
+		sblk->scaler_blk.len = PROP_VALUE_ACCESS(prop_value,
+			SSPP_SCALE_SIZE, 0);
+		snprintf(sblk->scaler_blk.name, SDE_HW_BLK_NAME_LEN,
+			"sspp_scaler%u", sspp->id - SSPP_VIG0);
+	}
+
+	sblk->pcc_blk.id = SDE_SSPP_PCC;
+	if (prop_exists[RGB_PCC_PROP]) {
+		sblk->pcc_blk.base = PROP_VALUE_ACCESS(prop_value,
+			RGB_PCC_PROP, 0);
+		sblk->pcc_blk.version = PROP_VALUE_ACCESS(prop_value,
+			RGB_PCC_PROP, 1);
+		sblk->pcc_blk.len = 0;
+		set_bit(SDE_SSPP_PCC, &sspp->features);
+	}
+	snprintf(sspp->name, sizeof(sspp->name), "rgb%d", *rgb_count-1);
+}
+
+static void _sde_sspp_setup_cursor(struct sde_mdss_cfg *sde_cfg,
+	struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
+	struct sde_prop_value *prop_value, u32 *cursor_count)
+{
+	set_bit(SDE_SSPP_CURSOR, &sspp->features);
+	sblk->maxupscale = SSPP_UNITY_SCALE;
+	sblk->maxdwnscale = SSPP_UNITY_SCALE;
+	sblk->format_list = cursor_formats;
+	sspp->id = SSPP_CURSOR0 + *cursor_count;
+	snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u",
+			sspp->id - SSPP_VIG0);
+	sspp->clk_ctrl = SDE_CLK_CTRL_CURSOR0 + *cursor_count;
+	sspp->type = SSPP_TYPE_CURSOR;
+	(*cursor_count)++;
+	snprintf(sspp->name, sizeof(sspp->name), "cursor%d", *cursor_count-1);
+}
+
+static void _sde_sspp_setup_dma(struct sde_mdss_cfg *sde_cfg,
+	struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
+	struct sde_prop_value *prop_value, u32 *dma_count)
+{
+	sblk->maxupscale = SSPP_UNITY_SCALE;
+	sblk->maxdwnscale = SSPP_UNITY_SCALE;
+	sblk->format_list = plane_formats;
+	sspp->id = SSPP_DMA0 + *dma_count;
+	sspp->clk_ctrl = SDE_CLK_CTRL_DMA0 + *dma_count;
+	sspp->type = SSPP_TYPE_DMA;
+	snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u",
+			sspp->id - SSPP_VIG0);
+	set_bit(SDE_SSPP_QOS, &sspp->features);
+	(*dma_count)++;
+	snprintf(sspp->name, sizeof(sspp->name), "dma%d", *dma_count-1);
+}
+
+static int sde_sspp_parse_dt(struct device_node *np,
+	struct sde_mdss_cfg *sde_cfg)
+{
+	int rc, prop_count[SSPP_PROP_MAX], off_count, i, j;
+	int vig_prop_count[VIG_PROP_MAX], rgb_prop_count[RGB_PROP_MAX];
+	bool prop_exists[SSPP_PROP_MAX], vig_prop_exists[VIG_PROP_MAX];
+	bool rgb_prop_exists[RGB_PROP_MAX];
+	struct sde_prop_value *prop_value = NULL;
+	struct sde_prop_value *vig_prop_value = NULL, *rgb_prop_value = NULL;
+	const char *type;
+	struct sde_sspp_cfg *sspp;
+	struct sde_sspp_sub_blks *sblk;
+	u32 vig_count = 0, dma_count = 0, rgb_count = 0, cursor_count = 0;
+	u32 danger_count = 0, safe_count = 0;
+	struct device_node *snp = NULL;
+
+	prop_value = kzalloc(SSPP_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!prop_value) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	rc = _validate_dt_entry(np, sspp_prop, ARRAY_SIZE(sspp_prop),
+		prop_count, &off_count);
+	if (rc)
+		goto end;
+
+	rc = _validate_dt_entry(np, &sspp_prop[SSPP_DANGER], 1,
+			&prop_count[SSPP_DANGER], &danger_count);
+	if (rc)
+		goto end;
+
+	rc = _validate_dt_entry(np, &sspp_prop[SSPP_SAFE], 1,
+			&prop_count[SSPP_SAFE], &safe_count);
+	if (rc)
+		goto end;
+
+	rc = _read_dt_entry(np, sspp_prop, ARRAY_SIZE(sspp_prop), prop_count,
+					prop_exists, prop_value);
+	if (rc)
+		goto end;
+
+	sde_cfg->sspp_count = off_count;
+
+	/* get vig feature dt properties if they exist */
+	snp = of_get_child_by_name(np, sspp_prop[SSPP_VIG_BLOCKS].prop_name);
+	if (snp) {
+		vig_prop_value = kzalloc(VIG_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+		if (!vig_prop_value) {
+			rc = -ENOMEM;
+			goto end;
+		}
+		rc = _validate_dt_entry(snp, vig_prop, ARRAY_SIZE(vig_prop),
+			vig_prop_count, NULL);
+		if (rc)
+			goto end;
+		rc = _read_dt_entry(snp, vig_prop, ARRAY_SIZE(vig_prop),
+				vig_prop_count, vig_prop_exists,
+				vig_prop_value);
+	}
+
+	/* get rgb feature dt properties if they exist */
+	snp = of_get_child_by_name(np, sspp_prop[SSPP_RGB_BLOCKS].prop_name);
+	if (snp) {
+		rgb_prop_value = kzalloc(RGB_PROP_MAX *
+					sizeof(struct sde_prop_value),
+					GFP_KERNEL);
+		if (!rgb_prop_value) {
+			rc = -ENOMEM;
+			goto end;
+		}
+		rc = _validate_dt_entry(snp, rgb_prop, ARRAY_SIZE(rgb_prop),
+			rgb_prop_count, NULL);
+		if (rc)
+			goto end;
+		rc = _read_dt_entry(snp, rgb_prop, ARRAY_SIZE(rgb_prop),
+				rgb_prop_count, rgb_prop_exists,
+				rgb_prop_value);
+	}
+
+	for (i = 0; i < off_count; i++) {
+		sspp = sde_cfg->sspp + i;
+		sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
+		if (!sblk) {
+			rc = -ENOMEM;
+			/* catalog deinit will release the allocated blocks */
+			goto end;
+		}
+		sspp->sblk = sblk;
+
+		sspp->base = PROP_VALUE_ACCESS(prop_value, SSPP_OFF, i);
+		sspp->len = PROP_VALUE_ACCESS(prop_value, SSPP_SIZE, 0);
+		sblk->maxlinewidth = sde_cfg->max_sspp_linewidth;
+
+		set_bit(SDE_SSPP_SRC, &sspp->features);
+		sblk->src_blk.id = SDE_SSPP_SRC;
+
+		of_property_read_string_index(np,
+				sspp_prop[SSPP_TYPE].prop_name, i, &type);
+		if (!strcmp(type, "vig")) {
+			_sde_sspp_setup_vig(sde_cfg, sspp, sblk,
+				vig_prop_exists, vig_prop_value, &vig_count);
+		} else if (!strcmp(type, "rgb")) {
+			_sde_sspp_setup_rgb(sde_cfg, sspp, sblk,
+				rgb_prop_exists, rgb_prop_value, &rgb_count);
+		} else if (!strcmp(type, "cursor")) {
+			/* No prop values for cursor pipes */
+			_sde_sspp_setup_cursor(sde_cfg, sspp, sblk, NULL,
+								&cursor_count);
+		} else if (!strcmp(type, "dma")) {
+			/* No prop values for DMA pipes */
+			_sde_sspp_setup_dma(sde_cfg, sspp, sblk, NULL,
+								&dma_count);
+		} else {
+			SDE_ERROR("invalid sspp type:%s\n", type);
+			rc = -EINVAL;
+			goto end;
+		}
+
+		snprintf(sblk->src_blk.name, SDE_HW_BLK_NAME_LEN, "sspp_src_%u",
+				sspp->id - SSPP_VIG0);
+
+		if (sspp->clk_ctrl >= SDE_CLK_CTRL_MAX) {
+			SDE_ERROR("%s: invalid clk ctrl: %d\n",
+					sblk->src_blk.name, sspp->clk_ctrl);
+			rc = -EINVAL;
+			goto end;
+		}
+
+		sblk->maxhdeciexp = MAX_HORZ_DECIMATION;
+		sblk->maxvdeciexp = MAX_VERT_DECIMATION;
+
+		sspp->xin_id = PROP_VALUE_ACCESS(prop_value, SSPP_XIN, i);
+		sblk->danger_lut_linear =
+			PROP_VALUE_ACCESS(prop_value, SSPP_DANGER, 0);
+		sblk->danger_lut_tile =
+			PROP_VALUE_ACCESS(prop_value, SSPP_DANGER, 1);
+		sblk->danger_lut_nrt =
+			PROP_VALUE_ACCESS(prop_value, SSPP_DANGER, 2);
+		sblk->safe_lut_linear =
+			PROP_VALUE_ACCESS(prop_value, SSPP_SAFE, 0);
+		sblk->safe_lut_tile =
+			PROP_VALUE_ACCESS(prop_value, SSPP_SAFE, 1);
+		sblk->safe_lut_nrt =
+			PROP_VALUE_ACCESS(prop_value, SSPP_SAFE, 2);
+		sblk->creq_lut_nrt = DEFAULT_CREQ_LUT_NRT;
+		sblk->pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE;
+		sblk->src_blk.len = PROP_VALUE_ACCESS(prop_value, SSPP_SIZE, 0);
+
+		for (j = 0; j < sde_cfg->mdp_count; j++) {
+			sde_cfg->mdp[j].clk_ctrls[sspp->clk_ctrl].reg_off =
+				PROP_BITVALUE_ACCESS(prop_value,
+						SSPP_CLK_CTRL, i, 0);
+			sde_cfg->mdp[j].clk_ctrls[sspp->clk_ctrl].bit_off =
+				PROP_BITVALUE_ACCESS(prop_value,
+						SSPP_CLK_CTRL, i, 1);
+		}
+
+		SDE_DEBUG(
+			"xin:%d danger:%x/%x/%x safe:%x/%x/%x creq:%x ram:%d clk%d:%x/%d\n",
+			sspp->xin_id,
+			sblk->danger_lut_linear,
+			sblk->danger_lut_tile,
+			sblk->danger_lut_nrt,
+			sblk->safe_lut_linear,
+			sblk->safe_lut_tile,
+			sblk->safe_lut_nrt,
+			sblk->creq_lut_nrt,
+			sblk->pixel_ram_size,
+			sspp->clk_ctrl,
+			sde_cfg->mdp[0].clk_ctrls[sspp->clk_ctrl].reg_off,
+			sde_cfg->mdp[0].clk_ctrls[sspp->clk_ctrl].bit_off);
+	}
+
+end:
+	kfree(prop_value);
+	kfree(vig_prop_value);
+	kfree(rgb_prop_value);
+	return rc;
+}
+
+static int sde_ctl_parse_dt(struct device_node *np,
+		struct sde_mdss_cfg *sde_cfg)
+{
+	int rc, prop_count[HW_PROP_MAX], i;
+	bool prop_exists[HW_PROP_MAX];
+	struct sde_prop_value *prop_value = NULL;
+	struct sde_ctl_cfg *ctl;
+	u32 off_count;
+
+	if (!sde_cfg) {
+		SDE_ERROR("invalid argument input param\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	prop_value = kzalloc(HW_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!prop_value) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	rc = _validate_dt_entry(np, ctl_prop, ARRAY_SIZE(ctl_prop), prop_count,
+		&off_count);
+	if (rc)
+		goto end;
+
+	sde_cfg->ctl_count = off_count;
+
+	rc = _read_dt_entry(np, ctl_prop, ARRAY_SIZE(ctl_prop), prop_count,
+		prop_exists, prop_value);
+	if (rc)
+		goto end;
+
+	for (i = 0; i < off_count; i++) {
+		ctl = sde_cfg->ctl + i;
+		ctl->base = PROP_VALUE_ACCESS(prop_value, HW_OFF, i);
+		ctl->len = PROP_VALUE_ACCESS(prop_value, HW_LEN, 0);
+		ctl->id = CTL_0 + i;
+		snprintf(ctl->name, SDE_HW_BLK_NAME_LEN, "ctl_%u",
+				ctl->id - CTL_0);
+
+		if (i < MAX_SPLIT_DISPLAY_CTL)
+			set_bit(SDE_CTL_SPLIT_DISPLAY, &ctl->features);
+		if (i < MAX_PP_SPLIT_DISPLAY_CTL)
+			set_bit(SDE_CTL_PINGPONG_SPLIT, &ctl->features);
+	}
+
+end:
+	kfree(prop_value);
+	return rc;
+}
+
+static int sde_mixer_parse_dt(struct device_node *np,
+						struct sde_mdss_cfg *sde_cfg)
+{
+	int rc, prop_count[MIXER_PROP_MAX], i;
+	int blocks_prop_count[MIXER_BLOCKS_PROP_MAX];
+	bool prop_exists[MIXER_PROP_MAX];
+	bool blocks_prop_exists[MIXER_BLOCKS_PROP_MAX];
+	struct sde_prop_value *prop_value = NULL, *blocks_prop_value = NULL;
+	u32 off_count, max_blendstages;
+	u32 blend_reg_base[] = {0x20, 0x50, 0x80, 0xb0, 0x230, 0x260, 0x290};
+	u32 lm_pair_mask[] = {LM_1, LM_0, LM_5, 0x0, 0x0, LM_2};
+	struct sde_lm_cfg *mixer;
+	struct sde_lm_sub_blks *sblk;
+	int pp_count, dspp_count;
+	u32 pp_idx, dspp_idx;
+	struct device_node *snp = NULL;
+
+	if (!sde_cfg) {
+		SDE_ERROR("invalid argument input param\n");
+		rc = -EINVAL;
+		goto end;
+	}
+	max_blendstages = sde_cfg->max_mixer_blendstages;
+
+	prop_value = kzalloc(MIXER_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!prop_value) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	rc = _validate_dt_entry(np, mixer_prop, ARRAY_SIZE(mixer_prop),
+		prop_count, &off_count);
+	if (rc)
+		goto end;
+
+	sde_cfg->mixer_count = off_count;
+
+	rc = _read_dt_entry(np, mixer_prop, ARRAY_SIZE(mixer_prop), prop_count,
+		prop_exists, prop_value);
+	if (rc)
+		goto end;
+
+	pp_count = sde_cfg->pingpong_count;
+	dspp_count = sde_cfg->dspp_count;
+
+	/* get mixer feature dt properties if they exist */
+	snp = of_get_child_by_name(np, mixer_prop[MIXER_BLOCKS].prop_name);
+	if (snp) {
+		blocks_prop_value = kzalloc(MIXER_BLOCKS_PROP_MAX *
+				MAX_SDE_HW_BLK * sizeof(struct sde_prop_value),
+				GFP_KERNEL);
+		if (!blocks_prop_value) {
+			rc = -ENOMEM;
+			goto end;
+		}
+		rc = _validate_dt_entry(snp, mixer_blocks_prop,
+			ARRAY_SIZE(mixer_blocks_prop), blocks_prop_count, NULL);
+		if (rc)
+			goto end;
+		rc = _read_dt_entry(snp, mixer_blocks_prop,
+				ARRAY_SIZE(mixer_blocks_prop),
+				blocks_prop_count, blocks_prop_exists,
+				blocks_prop_value);
+	}
+
+	for (i = 0, pp_idx = 0, dspp_idx = 0; i < off_count; i++) {
+		mixer = sde_cfg->mixer + i;
+		sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
+		if (!sblk) {
+			rc = -ENOMEM;
+			/* catalog deinit will release the allocated blocks */
+			goto end;
+		}
+		mixer->sblk = sblk;
+
+		mixer->base = PROP_VALUE_ACCESS(prop_value, MIXER_OFF, i);
+		mixer->len = PROP_VALUE_ACCESS(prop_value, MIXER_LEN, 0);
+		mixer->id = LM_0 + i;
+		snprintf(mixer->name, SDE_HW_BLK_NAME_LEN, "lm_%u",
+				mixer->id - LM_0);
+
+		if (!prop_exists[MIXER_LEN])
+			mixer->len = DEFAULT_SDE_HW_BLOCK_LEN;
+
+		if (lm_pair_mask[i])
+			mixer->lm_pair_mask = 1 << lm_pair_mask[i];
+
+		sblk->maxblendstages = max_blendstages;
+		sblk->maxwidth = sde_cfg->max_mixer_width;
+		memcpy(sblk->blendstage_base, blend_reg_base, sizeof(u32) *
+			min_t(u32, MAX_BLOCKS, min_t(u32,
+			ARRAY_SIZE(blend_reg_base), max_blendstages)));
+		if (sde_cfg->has_src_split)
+			set_bit(SDE_MIXER_SOURCESPLIT, &mixer->features);
+
+		if ((i < ROT_LM_OFFSET) || (i >= LINE_LM_OFFSET)) {
+			mixer->pingpong = pp_count > 0 ? pp_idx + PINGPONG_0
+								: PINGPONG_MAX;
+			mixer->dspp = dspp_count > 0 ? dspp_idx + DSPP_0
+								: DSPP_MAX;
+			pp_count--;
+			dspp_count--;
+			pp_idx++;
+			dspp_idx++;
+		} else {
+			mixer->pingpong = PINGPONG_MAX;
+			mixer->dspp = DSPP_MAX;
+		}
+
+		sblk->gc.id = SDE_MIXER_GC;
+		if (blocks_prop_value && blocks_prop_exists[MIXER_GC_PROP]) {
+			sblk->gc.base = PROP_VALUE_ACCESS(blocks_prop_value,
+					MIXER_GC_PROP, 0);
+			sblk->gc.version = PROP_VALUE_ACCESS(blocks_prop_value,
+					MIXER_GC_PROP, 1);
+			sblk->gc.len = 0;
+			set_bit(SDE_MIXER_GC, &mixer->features);
+		}
+	}
+
+end:
+	kfree(prop_value);
+	kfree(blocks_prop_value);
+	return rc;
+}
+
+static int sde_intf_parse_dt(struct device_node *np,
+						struct sde_mdss_cfg *sde_cfg)
+{
+	int rc, prop_count[INTF_PROP_MAX], i;
+	struct sde_prop_value *prop_value = NULL;
+	bool prop_exists[INTF_PROP_MAX];
+	u32 off_count;
+	u32 dsi_count = 0, none_count = 0, hdmi_count = 0, dp_count = 0;
+	const char *type;
+	struct sde_intf_cfg *intf;
+
+	if (!sde_cfg) {
+		SDE_ERROR("invalid argument\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	prop_value = kzalloc(INTF_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!prop_value) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	rc = _validate_dt_entry(np, intf_prop, ARRAY_SIZE(intf_prop),
+		prop_count, &off_count);
+	if (rc)
+		goto end;
+
+	sde_cfg->intf_count = off_count;
+
+	rc = _read_dt_entry(np, intf_prop, ARRAY_SIZE(intf_prop), prop_count,
+		prop_exists, prop_value);
+	if (rc)
+		goto end;
+
+	for (i = 0; i < off_count; i++) {
+		intf = sde_cfg->intf + i;
+		intf->base = PROP_VALUE_ACCESS(prop_value, INTF_OFF, i);
+		intf->len = PROP_VALUE_ACCESS(prop_value, INTF_LEN, 0);
+		intf->id = INTF_0 + i;
+		snprintf(intf->name, SDE_HW_BLK_NAME_LEN, "intf_%u",
+				intf->id - INTF_0);
+
+		if (!prop_exists[INTF_LEN])
+			intf->len = DEFAULT_SDE_HW_BLOCK_LEN;
+
+		intf->prog_fetch_lines_worst_case =
+				PROP_VALUE_ACCESS(prop_value, INTF_PREFETCH, i);
+
+		of_property_read_string_index(np,
+				intf_prop[INTF_TYPE].prop_name, i, &type);
+		if (!strcmp(type, "dsi")) {
+			intf->type = INTF_DSI;
+			intf->controller_id = dsi_count;
+			dsi_count++;
+		} else if (!strcmp(type, "hdmi")) {
+			intf->type = INTF_HDMI;
+			intf->controller_id = hdmi_count;
+			hdmi_count++;
+		} else if (!strcmp(type, "dp")) {
+			intf->type = INTF_DP;
+			intf->controller_id = dp_count;
+			dp_count++;
+		} else {
+			intf->type = INTF_NONE;
+			intf->controller_id = none_count;
+			none_count++;
+		}
+	}
+
+end:
+	kfree(prop_value);
+	return rc;
+}
+
+static int sde_wb_parse_dt(struct device_node *np,
+	struct sde_mdss_cfg *sde_cfg)
+{
+	int rc, prop_count[WB_PROP_MAX], i, j;
+	struct sde_prop_value *prop_value = NULL;
+	bool prop_exists[WB_PROP_MAX];
+	u32 off_count;
+	struct sde_wb_cfg *wb;
+	struct sde_wb_sub_blocks *sblk;
+
+	if (!sde_cfg) {
+		SDE_ERROR("invalid argument\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	prop_value = kzalloc(WB_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!prop_value) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	rc = _validate_dt_entry(np, wb_prop, ARRAY_SIZE(wb_prop), prop_count,
+		&off_count);
+	if (rc)
+		goto end;
+
+	sde_cfg->wb_count = off_count;
+
+	rc = _read_dt_entry(np, wb_prop, ARRAY_SIZE(wb_prop), prop_count,
+		prop_exists, prop_value);
+	if (rc)
+		goto end;
+
+	for (i = 0; i < off_count; i++) {
+		wb = sde_cfg->wb + i;
+		sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
+		if (!sblk) {
+			rc = -ENOMEM;
+			/* catalog deinit will release the allocated blocks */
+			goto end;
+		}
+		wb->sblk = sblk;
+
+		wb->base = PROP_VALUE_ACCESS(prop_value, WB_OFF, i);
+		wb->id = WB_0 + PROP_VALUE_ACCESS(prop_value, WB_ID, i);
+		snprintf(wb->name, SDE_HW_BLK_NAME_LEN, "wb_%u",
+				wb->id - WB_0);
+		wb->clk_ctrl = SDE_CLK_CTRL_WB0 +
+			PROP_VALUE_ACCESS(prop_value, WB_ID, i);
+		wb->xin_id = PROP_VALUE_ACCESS(prop_value, WB_XIN_ID, i);
+		wb->vbif_idx = VBIF_NRT;
+
+		if (wb->clk_ctrl >= SDE_CLK_CTRL_MAX) {
+			SDE_ERROR("%s: invalid clk ctrl: %d\n",
+					wb->name, wb->clk_ctrl);
+			rc = -EINVAL;
+			goto end;
+		}
+
+		wb->len = PROP_VALUE_ACCESS(prop_value, WB_LEN, 0);
+		wb->format_list = wb2_formats;
+		if (!prop_exists[WB_LEN])
+			wb->len = DEFAULT_SDE_HW_BLOCK_LEN;
+		sblk->maxlinewidth = sde_cfg->max_wb_linewidth;
+
+		if (wb->id >= LINE_MODE_WB_OFFSET)
+			set_bit(SDE_WB_LINE_MODE, &wb->features);
+		else
+			set_bit(SDE_WB_BLOCK_MODE, &wb->features);
+		set_bit(SDE_WB_TRAFFIC_SHAPER, &wb->features);
+		set_bit(SDE_WB_YUV_CONFIG, &wb->features);
+
+		for (j = 0; j < sde_cfg->mdp_count; j++) {
+			sde_cfg->mdp[j].clk_ctrls[wb->clk_ctrl].reg_off =
+				PROP_BITVALUE_ACCESS(prop_value,
+						WB_CLK_CTRL, i, 0);
+			sde_cfg->mdp[j].clk_ctrls[wb->clk_ctrl].bit_off =
+				PROP_BITVALUE_ACCESS(prop_value,
+						WB_CLK_CTRL, i, 1);
+		}
+
+		SDE_DEBUG(
+			"wb:%d xin:%d vbif:%d clk%d:%x/%d\n",
+			wb->id - WB_0,
+			wb->xin_id,
+			wb->vbif_idx,
+			wb->clk_ctrl,
+			sde_cfg->mdp[0].clk_ctrls[wb->clk_ctrl].reg_off,
+			sde_cfg->mdp[0].clk_ctrls[wb->clk_ctrl].bit_off);
+	}
+
+end:
+	kfree(prop_value);
+	return rc;
+}
+
+static void _sde_dspp_setup_blocks(struct sde_mdss_cfg *sde_cfg,
+	struct sde_dspp_cfg *dspp, struct sde_dspp_sub_blks *sblk,
+	bool *prop_exists, struct sde_prop_value *prop_value)
+{
+	sblk->igc.id = SDE_DSPP_IGC;
+	if (prop_exists[DSPP_IGC_PROP]) {
+		sblk->igc.base = PROP_VALUE_ACCESS(prop_value,
+			DSPP_IGC_PROP, 0);
+		sblk->igc.version = PROP_VALUE_ACCESS(prop_value,
+			DSPP_IGC_PROP, 1);
+		sblk->igc.len = 0;
+		set_bit(SDE_DSPP_IGC, &dspp->features);
+	}
+
+	sblk->pcc.id = SDE_DSPP_PCC;
+	if (prop_exists[DSPP_PCC_PROP]) {
+		sblk->pcc.base = PROP_VALUE_ACCESS(prop_value,
+			DSPP_PCC_PROP, 0);
+		sblk->pcc.version = PROP_VALUE_ACCESS(prop_value,
+			DSPP_PCC_PROP, 1);
+		sblk->pcc.len = 0;
+		set_bit(SDE_DSPP_PCC, &dspp->features);
+	}
+
+	sblk->gc.id = SDE_DSPP_GC;
+	if (prop_exists[DSPP_GC_PROP]) {
+		sblk->gc.base = PROP_VALUE_ACCESS(prop_value, DSPP_GC_PROP, 0);
+		sblk->gc.version = PROP_VALUE_ACCESS(prop_value,
+			DSPP_GC_PROP, 1);
+		sblk->gc.len = 0;
+		set_bit(SDE_DSPP_GC, &dspp->features);
+	}
+
+	sblk->gamut.id = SDE_DSPP_GAMUT;
+	if (prop_exists[DSPP_GAMUT_PROP]) {
+		sblk->gamut.base = PROP_VALUE_ACCESS(prop_value,
+			DSPP_GAMUT_PROP, 0);
+		sblk->gamut.version = PROP_VALUE_ACCESS(prop_value,
+			DSPP_GAMUT_PROP, 1);
+		sblk->gamut.len = 0;
+		set_bit(SDE_DSPP_GAMUT, &dspp->features);
+	}
+
+	sblk->dither.id = SDE_DSPP_DITHER;
+	if (prop_exists[DSPP_DITHER_PROP]) {
+		sblk->dither.base = PROP_VALUE_ACCESS(prop_value,
+			DSPP_DITHER_PROP, 0);
+		sblk->dither.version = PROP_VALUE_ACCESS(prop_value,
+			DSPP_DITHER_PROP, 1);
+		sblk->dither.len = 0;
+		set_bit(SDE_DSPP_DITHER, &dspp->features);
+	}
+
+	sblk->hist.id = SDE_DSPP_HIST;
+	if (prop_exists[DSPP_HIST_PROP]) {
+		sblk->hist.base = PROP_VALUE_ACCESS(prop_value,
+			DSPP_HIST_PROP, 0);
+		sblk->hist.version = PROP_VALUE_ACCESS(prop_value,
+			DSPP_HIST_PROP, 1);
+		sblk->hist.len = 0;
+		set_bit(SDE_DSPP_HIST, &dspp->features);
+	}
+
+	sblk->hsic.id = SDE_DSPP_HSIC;
+	if (prop_exists[DSPP_HSIC_PROP]) {
+		sblk->hsic.base = PROP_VALUE_ACCESS(prop_value,
+			DSPP_HSIC_PROP, 0);
+		sblk->hsic.version = PROP_VALUE_ACCESS(prop_value,
+			DSPP_HSIC_PROP, 1);
+		sblk->hsic.len = 0;
+		set_bit(SDE_DSPP_HSIC, &dspp->features);
+	}
+
+	sblk->memcolor.id = SDE_DSPP_MEMCOLOR;
+	if (prop_exists[DSPP_MEMCOLOR_PROP]) {
+		sblk->memcolor.base = PROP_VALUE_ACCESS(prop_value,
+			DSPP_MEMCOLOR_PROP, 0);
+		sblk->memcolor.version = PROP_VALUE_ACCESS(prop_value,
+			DSPP_MEMCOLOR_PROP, 1);
+		sblk->memcolor.len = 0;
+		set_bit(SDE_DSPP_MEMCOLOR, &dspp->features);
+	}
+
+	sblk->sixzone.id = SDE_DSPP_SIXZONE;
+	if (prop_exists[DSPP_SIXZONE_PROP]) {
+		sblk->sixzone.base = PROP_VALUE_ACCESS(prop_value,
+			DSPP_SIXZONE_PROP, 0);
+		sblk->sixzone.version = PROP_VALUE_ACCESS(prop_value,
+			DSPP_SIXZONE_PROP, 1);
+		sblk->sixzone.len = 0;
+		set_bit(SDE_DSPP_SIXZONE, &dspp->features);
+	}
+
+	sblk->vlut.id = SDE_DSPP_VLUT;
+	if (prop_exists[DSPP_VLUT_PROP]) {
+		sblk->vlut.base = PROP_VALUE_ACCESS(prop_value,
+			DSPP_VLUT_PROP, 0);
+		sblk->vlut.version = PROP_VALUE_ACCESS(prop_value,
+			DSPP_VLUT_PROP, 1);
+		sblk->sixzone.len = 0;
+		set_bit(SDE_DSPP_VLUT, &dspp->features);
+	}
+}
+
+static int sde_dspp_parse_dt(struct device_node *np,
+						struct sde_mdss_cfg *sde_cfg)
+{
+	int rc, prop_count[DSPP_PROP_MAX], i;
+	int ad_prop_count[AD_PROP_MAX];
+	bool prop_exists[DSPP_PROP_MAX], ad_prop_exists[AD_PROP_MAX];
+	bool blocks_prop_exists[DSPP_BLOCKS_PROP_MAX];
+	struct sde_prop_value *ad_prop_value = NULL;
+	int blocks_prop_count[DSPP_BLOCKS_PROP_MAX];
+	struct sde_prop_value *prop_value = NULL, *blocks_prop_value = NULL;
+	u32 off_count, ad_off_count;
+	struct sde_dspp_cfg *dspp;
+	struct sde_dspp_sub_blks *sblk;
+	struct device_node *snp = NULL;
+
+	if (!sde_cfg) {
+		SDE_ERROR("invalid argument\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	prop_value = kzalloc(DSPP_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!prop_value) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	rc = _validate_dt_entry(np, dspp_prop, ARRAY_SIZE(dspp_prop),
+		prop_count, &off_count);
+	if (rc)
+		goto end;
+
+	sde_cfg->dspp_count = off_count;
+
+	rc = _read_dt_entry(np, dspp_prop, ARRAY_SIZE(dspp_prop), prop_count,
+		prop_exists, prop_value);
+	if (rc)
+		goto end;
+
+	/* Parse AD dtsi entries */
+	ad_prop_value = kzalloc(AD_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!ad_prop_value) {
+		rc = -ENOMEM;
+		goto end;
+	}
+	rc = _validate_dt_entry(np, ad_prop, ARRAY_SIZE(ad_prop),
+		ad_prop_count, &ad_off_count);
+	if (rc)
+		goto end;
+	rc = _read_dt_entry(np, ad_prop, ARRAY_SIZE(ad_prop), ad_prop_count,
+		ad_prop_exists, ad_prop_value);
+	if (rc)
+		goto end;
+
+	/* get DSPP feature dt properties if they exist */
+	snp = of_get_child_by_name(np, dspp_prop[DSPP_BLOCKS].prop_name);
+	if (snp) {
+		blocks_prop_value = kzalloc(DSPP_BLOCKS_PROP_MAX *
+				MAX_SDE_HW_BLK * sizeof(struct sde_prop_value),
+				GFP_KERNEL);
+		if (!blocks_prop_value) {
+			rc = -ENOMEM;
+			goto end;
+		}
+		rc = _validate_dt_entry(snp, dspp_blocks_prop,
+			ARRAY_SIZE(dspp_blocks_prop), blocks_prop_count, NULL);
+		if (rc)
+			goto end;
+		rc = _read_dt_entry(snp, dspp_blocks_prop,
+			ARRAY_SIZE(dspp_blocks_prop), blocks_prop_count,
+			blocks_prop_exists, blocks_prop_value);
+		if (rc)
+			goto end;
+	}
+
+	for (i = 0; i < off_count; i++) {
+		dspp = sde_cfg->dspp + i;
+		dspp->base = PROP_VALUE_ACCESS(prop_value, DSPP_OFF, i);
+		dspp->len = PROP_VALUE_ACCESS(prop_value, DSPP_SIZE, 0);
+		dspp->id = DSPP_0 + i;
+		snprintf(dspp->name, SDE_HW_BLK_NAME_LEN, "dspp_%u",
+				dspp->id - DSPP_0);
+
+		sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
+		if (!sblk) {
+			rc = -ENOMEM;
+			/* catalog deinit will release the allocated blocks */
+			goto end;
+		}
+		dspp->sblk = sblk;
+
+		if (blocks_prop_value)
+			_sde_dspp_setup_blocks(sde_cfg, dspp, sblk,
+					blocks_prop_exists, blocks_prop_value);
+
+		sblk->ad.id = SDE_DSPP_AD;
+		if (ad_prop_value && (i < ad_off_count) &&
+		    ad_prop_exists[AD_OFF]) {
+			sblk->ad.base = PROP_VALUE_ACCESS(ad_prop_value,
+				AD_OFF, i);
+			sblk->ad.version = PROP_VALUE_ACCESS(ad_prop_value,
+				AD_VERSION, 0);
+			set_bit(SDE_DSPP_AD, &dspp->features);
+		}
+	}
+
+end:
+	kfree(prop_value);
+	kfree(ad_prop_value);
+	kfree(blocks_prop_value);
+	return rc;
+}
+
+static int sde_cdm_parse_dt(struct device_node *np,
+				struct sde_mdss_cfg *sde_cfg)
+{
+	int rc, prop_count[HW_PROP_MAX], i;
+	struct sde_prop_value *prop_value = NULL;
+	bool prop_exists[HW_PROP_MAX];
+	u32 off_count;
+	struct sde_cdm_cfg *cdm;
+
+	if (!sde_cfg) {
+		SDE_ERROR("invalid argument\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	prop_value = kzalloc(HW_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!prop_value) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	rc = _validate_dt_entry(np, cdm_prop, ARRAY_SIZE(cdm_prop), prop_count,
+		&off_count);
+	if (rc)
+		goto end;
+
+	sde_cfg->cdm_count = off_count;
+
+	rc = _read_dt_entry(np, cdm_prop, ARRAY_SIZE(cdm_prop), prop_count,
+		prop_exists, prop_value);
+	if (rc)
+		goto end;
+
+	for (i = 0; i < off_count; i++) {
+		cdm = sde_cfg->cdm + i;
+		cdm->base = PROP_VALUE_ACCESS(prop_value, HW_OFF, i);
+		cdm->id = CDM_0 + i;
+		snprintf(cdm->name, SDE_HW_BLK_NAME_LEN, "cdm_%u",
+				cdm->id - CDM_0);
+		cdm->len = PROP_VALUE_ACCESS(prop_value, HW_LEN, 0);
+
+		/* intf3 and wb2 for cdm block */
+		cdm->wb_connect = sde_cfg->wb_count ? BIT(WB_2) : BIT(31);
+		cdm->intf_connect = sde_cfg->intf_count ? BIT(INTF_3) : BIT(31);
+	}
+
+end:
+	kfree(prop_value);
+	return rc;
+}
+
+static int sde_vbif_parse_dt(struct device_node *np,
+				struct sde_mdss_cfg *sde_cfg)
+{
+	int rc, prop_count[VBIF_PROP_MAX], i, j, k;
+	struct sde_prop_value *prop_value = NULL;
+	bool prop_exists[VBIF_PROP_MAX];
+	u32 off_count, vbif_len, rd_len = 0, wr_len = 0;
+	struct sde_vbif_cfg *vbif;
+
+	if (!sde_cfg) {
+		SDE_ERROR("invalid argument\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	prop_value = kzalloc(VBIF_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!prop_value) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	rc = _validate_dt_entry(np, vbif_prop, ARRAY_SIZE(vbif_prop),
+			prop_count, &off_count);
+	if (rc)
+		goto end;
+
+	rc = _validate_dt_entry(np, &vbif_prop[VBIF_DYNAMIC_OT_RD_LIMIT], 1,
+			&prop_count[VBIF_DYNAMIC_OT_RD_LIMIT], &rd_len);
+	if (rc)
+		goto end;
+
+	rc = _validate_dt_entry(np, &vbif_prop[VBIF_DYNAMIC_OT_WR_LIMIT], 1,
+			&prop_count[VBIF_DYNAMIC_OT_WR_LIMIT], &wr_len);
+	if (rc)
+		goto end;
+
+	sde_cfg->vbif_count = off_count;
+
+	rc = _read_dt_entry(np, vbif_prop, ARRAY_SIZE(vbif_prop), prop_count,
+		prop_exists, prop_value);
+	if (rc)
+		goto end;
+
+	vbif_len = PROP_VALUE_ACCESS(prop_value, VBIF_LEN, 0);
+	if (!prop_exists[VBIF_LEN])
+		vbif_len = DEFAULT_SDE_HW_BLOCK_LEN;
+
+	for (i = 0; i < off_count; i++) {
+		vbif = sde_cfg->vbif + i;
+		vbif->base = PROP_VALUE_ACCESS(prop_value, VBIF_OFF, i);
+		vbif->len = vbif_len;
+		vbif->id = VBIF_0 + PROP_VALUE_ACCESS(prop_value, VBIF_ID, i);
+		snprintf(vbif->name, SDE_HW_BLK_NAME_LEN, "vbif_%u",
+				vbif->id - VBIF_0);
+
+		SDE_DEBUG("vbif:%d\n", vbif->id - VBIF_0);
+
+		vbif->xin_halt_timeout = VBIF_XIN_HALT_TIMEOUT;
+
+		vbif->default_ot_rd_limit = PROP_VALUE_ACCESS(prop_value,
+				VBIF_DEFAULT_OT_RD_LIMIT, 0);
+		SDE_DEBUG("default_ot_rd_limit=%u\n",
+				vbif->default_ot_rd_limit);
+
+		vbif->default_ot_wr_limit = PROP_VALUE_ACCESS(prop_value,
+				VBIF_DEFAULT_OT_WR_LIMIT, 0);
+		SDE_DEBUG("default_ot_wr_limit=%u\n",
+				vbif->default_ot_wr_limit);
+
+		vbif->dynamic_ot_rd_tbl.count =
+				prop_count[VBIF_DYNAMIC_OT_RD_LIMIT] / 2;
+		SDE_DEBUG("dynamic_ot_rd_tbl.count=%u\n",
+				vbif->dynamic_ot_rd_tbl.count);
+		if (vbif->dynamic_ot_rd_tbl.count) {
+			vbif->dynamic_ot_rd_tbl.cfg = kcalloc(
+				vbif->dynamic_ot_rd_tbl.count,
+				sizeof(struct sde_vbif_dynamic_ot_cfg),
+				GFP_KERNEL);
+			if (!vbif->dynamic_ot_rd_tbl.cfg) {
+				rc = -ENOMEM;
+				goto end;
+			}
+		}
+
+		for (j = 0, k = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
+			vbif->dynamic_ot_rd_tbl.cfg[j].pps = (u64)
+				PROP_VALUE_ACCESS(prop_value,
+				VBIF_DYNAMIC_OT_RD_LIMIT, k++);
+			vbif->dynamic_ot_rd_tbl.cfg[j].ot_limit =
+				PROP_VALUE_ACCESS(prop_value,
+				VBIF_DYNAMIC_OT_RD_LIMIT, k++);
+			SDE_DEBUG("dynamic_ot_rd_tbl[%d].cfg=<%llu %u>\n", j,
+				vbif->dynamic_ot_rd_tbl.cfg[j].pps,
+				vbif->dynamic_ot_rd_tbl.cfg[j].ot_limit);
+		}
+
+		vbif->dynamic_ot_wr_tbl.count =
+				prop_count[VBIF_DYNAMIC_OT_WR_LIMIT] / 2;
+		SDE_DEBUG("dynamic_ot_wr_tbl.count=%u\n",
+				vbif->dynamic_ot_wr_tbl.count);
+		if (vbif->dynamic_ot_wr_tbl.count) {
+			vbif->dynamic_ot_wr_tbl.cfg = kcalloc(
+				vbif->dynamic_ot_wr_tbl.count,
+				sizeof(struct sde_vbif_dynamic_ot_cfg),
+				GFP_KERNEL);
+			if (!vbif->dynamic_ot_wr_tbl.cfg) {
+				rc = -ENOMEM;
+				goto end;
+			}
+		}
+
+		for (j = 0, k = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
+			vbif->dynamic_ot_wr_tbl.cfg[j].pps = (u64)
+				PROP_VALUE_ACCESS(prop_value,
+				VBIF_DYNAMIC_OT_WR_LIMIT, k++);
+			vbif->dynamic_ot_wr_tbl.cfg[j].ot_limit =
+				PROP_VALUE_ACCESS(prop_value,
+				VBIF_DYNAMIC_OT_WR_LIMIT, k++);
+			SDE_DEBUG("dynamic_ot_wr_tbl[%d].cfg=<%llu %u>\n", j,
+				vbif->dynamic_ot_wr_tbl.cfg[j].pps,
+				vbif->dynamic_ot_wr_tbl.cfg[j].ot_limit);
+		}
+
+		if (vbif->default_ot_rd_limit || vbif->default_ot_wr_limit ||
+				vbif->dynamic_ot_rd_tbl.count ||
+				vbif->dynamic_ot_wr_tbl.count)
+			set_bit(SDE_VBIF_QOS_OTLIM, &vbif->features);
+	}
+
+end:
+	kfree(prop_value);
+	return rc;
+}
+
+static int sde_pp_parse_dt(struct device_node *np,
+	struct sde_mdss_cfg *sde_cfg)
+{
+	int rc, prop_count[PP_PROP_MAX], i;
+	struct sde_prop_value *prop_value = NULL;
+	bool prop_exists[PP_PROP_MAX];
+	u32 off_count;
+	struct sde_pingpong_cfg *pp;
+	struct sde_pingpong_sub_blks *sblk;
+
+	if (!sde_cfg) {
+		SDE_ERROR("invalid argument\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	prop_value = kzalloc(PP_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!prop_value) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	rc = _validate_dt_entry(np, pp_prop, ARRAY_SIZE(pp_prop), prop_count,
+		&off_count);
+	if (rc)
+		goto end;
+
+	sde_cfg->pingpong_count = off_count;
+
+	rc = _read_dt_entry(np, pp_prop, ARRAY_SIZE(pp_prop), prop_count,
+		prop_exists, prop_value);
+	if (rc)
+		goto end;
+
+	for (i = 0; i < off_count; i++) {
+		pp = sde_cfg->pingpong + i;
+		sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
+		if (!sblk) {
+			rc = -ENOMEM;
+			/* catalog deinit will release the allocated blocks */
+			goto end;
+		}
+		pp->sblk = sblk;
+
+		pp->base = PROP_VALUE_ACCESS(prop_value, PP_OFF, i);
+		pp->id = PINGPONG_0 + i;
+		snprintf(pp->name, SDE_HW_BLK_NAME_LEN, "pingpong_%u",
+				pp->id - PINGPONG_0);
+		pp->len = PROP_VALUE_ACCESS(prop_value, PP_LEN, 0);
+
+		sblk->te.base = PROP_VALUE_ACCESS(prop_value, TE_OFF, i);
+		sblk->te.id = SDE_PINGPONG_TE;
+		snprintf(sblk->te.name, SDE_HW_BLK_NAME_LEN, "te_%u",
+				pp->id - PINGPONG_0);
+		set_bit(SDE_PINGPONG_TE, &pp->features);
+
+		sblk->te2.base = PROP_VALUE_ACCESS(prop_value, TE2_OFF, i);
+		if (sblk->te2.base) {
+			sblk->te2.id = SDE_PINGPONG_TE2;
+			snprintf(sblk->te2.name, SDE_HW_BLK_NAME_LEN, "te2_%u",
+					pp->id - PINGPONG_0);
+			set_bit(SDE_PINGPONG_TE2, &pp->features);
+			set_bit(SDE_PINGPONG_SPLIT, &pp->features);
+		}
+
+		if (PROP_VALUE_ACCESS(prop_value, PP_SLAVE, i))
+			set_bit(SDE_PINGPONG_SLAVE, &pp->features);
+
+		sblk->dsc.base = PROP_VALUE_ACCESS(prop_value, DSC_OFF, i);
+		if (sblk->dsc.base) {
+			sblk->dsc.id = SDE_PINGPONG_DSC;
+			snprintf(sblk->dsc.name, SDE_HW_BLK_NAME_LEN, "dsc_%u",
+					sblk->dsc.id - PINGPONG_0);
+			set_bit(SDE_PINGPONG_DSC, &pp->features);
+		}
+	}
+
+end:
+	kfree(prop_value);
+	return rc;
+}
+
+static inline u32 _sde_parse_sspp_id(struct sde_mdss_cfg *cfg,
+	const char *name)
+{
+	int i;
+
+	for (i = 0; i < cfg->sspp_count; i++) {
+		if (!strcmp(cfg->sspp[i].name, name))
+			return cfg->sspp[i].id;
+	}
+
+	return SSPP_NONE;
+}
+
+static int _sde_vp_parse_dt(struct device_node *np,
+	struct sde_mdss_cfg *cfg)
+{
+	int rc = 0, i = 0;
+	struct device_node *node = NULL;
+	struct device_node *root_node = NULL;
+	struct sde_vp_cfg *vp;
+	struct sde_vp_sub_blks *vp_sub, *vp_sub_next;
+	struct property *prop;
+	const char *cname;
+
+	root_node = of_get_child_by_name(np, "qcom,sde-plane-id-map");
+	if (!root_node) {
+		root_node = of_parse_phandle(np, "qcom,sde-plane-id-map", 0);
+		if (!root_node) {
+			SDE_ERROR("No entry present for qcom,sde-plane-id-map");
+			rc = -EINVAL;
+			goto end;
+		}
+	}
+
+	for_each_child_of_node(root_node, node) {
+		if (i >= MAX_BLOCKS) {
+			SDE_ERROR("num of nodes(%d) is bigger than max(%d)\n",
+					i, MAX_BLOCKS);
+			rc = -EINVAL;
+			goto end;
+		}
+		cfg->vp_count++;
+		vp = &(cfg->vp[i]);
+		vp->id = i;
+		rc = of_property_read_string(node, "qcom,display-type",
+						&(vp->display_type));
+		if (rc) {
+			SDE_ERROR("failed to read display-type, rc = %d\n", rc);
+			goto end;
+		}
+
+		rc = of_property_read_string(node, "qcom,plane-type",
+						&(vp->plane_type));
+		if (rc) {
+			SDE_ERROR("failed to read plane-type, rc = %d\n", rc);
+			goto end;
+		}
+
+		INIT_LIST_HEAD(&vp->sub_blks);
+		of_property_for_each_string(node, "qcom,plane-name",
+						prop, cname) {
+			vp_sub = kzalloc(sizeof(*vp_sub), GFP_KERNEL);
+			if (!vp_sub) {
+				rc = -ENOMEM;
+				goto end;
+			}
+			vp_sub->sspp_id = _sde_parse_sspp_id(cfg, cname);
+			list_add_tail(&vp_sub->pipeid_list, &vp->sub_blks);
+		}
+		i++;
+	}
+
+end:
+	if (rc && cfg->vp_count) {
+		vp = &(cfg->vp[i]);
+		for (i = 0; i < cfg->vp_count; i++) {
+			list_for_each_entry_safe(vp_sub, vp_sub_next,
+				&vp->sub_blks, pipeid_list) {
+				list_del(&vp_sub->pipeid_list);
+				kfree(vp_sub);
+			}
+		}
+		memset(&(cfg->vp[0]), 0, sizeof(cfg->vp));
+		cfg->vp_count = 0;
+	}
+	return rc;
+}
+
+static int sde_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg)
+{
+	int rc, len, prop_count[SDE_PROP_MAX];
+	struct sde_prop_value *prop_value = NULL;
+	bool prop_exists[SDE_PROP_MAX];
+	const char *type;
+
+	if (!cfg) {
+		SDE_ERROR("invalid argument\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	prop_value = kzalloc(SDE_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!prop_value) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	rc = _validate_dt_entry(np, sde_prop, ARRAY_SIZE(sde_prop), prop_count,
+		&len);
+	if (rc)
+		goto end;
+
+	rc = _read_dt_entry(np, sde_prop, ARRAY_SIZE(sde_prop), prop_count,
+		prop_exists, prop_value);
+	if (rc)
+		goto end;
+
+	cfg->mdss_count = 1;
+	cfg->mdss[0].base = MDSS_BASE_OFFSET;
+	cfg->mdss[0].id = MDP_TOP;
+	snprintf(cfg->mdss[0].name, SDE_HW_BLK_NAME_LEN, "mdss_%u",
+			cfg->mdss[0].id - MDP_TOP);
+
+	cfg->mdp_count = 1;
+	cfg->mdp[0].id = MDP_TOP;
+	snprintf(cfg->mdp[0].name, SDE_HW_BLK_NAME_LEN, "top_%u",
+		cfg->mdp[0].id - MDP_TOP);
+	cfg->mdp[0].base = PROP_VALUE_ACCESS(prop_value, SDE_OFF, 0);
+	cfg->mdp[0].len = PROP_VALUE_ACCESS(prop_value, SDE_LEN, 0);
+	if (!prop_exists[SDE_LEN])
+		cfg->mdp[0].len = DEFAULT_SDE_HW_BLOCK_LEN;
+
+	cfg->max_sspp_linewidth = PROP_VALUE_ACCESS(prop_value,
+			SSPP_LINEWIDTH, 0);
+	if (!prop_exists[SSPP_LINEWIDTH])
+		cfg->max_sspp_linewidth = DEFAULT_SDE_LINE_WIDTH;
+
+	cfg->max_mixer_width = PROP_VALUE_ACCESS(prop_value,
+			MIXER_LINEWIDTH, 0);
+	if (!prop_exists[MIXER_LINEWIDTH])
+		cfg->max_mixer_width = DEFAULT_SDE_LINE_WIDTH;
+
+	cfg->max_mixer_blendstages = PROP_VALUE_ACCESS(prop_value,
+			MIXER_BLEND, 0);
+	if (!prop_exists[MIXER_BLEND])
+		cfg->max_mixer_blendstages = DEFAULT_SDE_MIXER_BLENDSTAGES;
+
+	cfg->max_wb_linewidth = PROP_VALUE_ACCESS(prop_value, WB_LINEWIDTH, 0);
+	if (!prop_exists[WB_LINEWIDTH])
+		cfg->max_wb_linewidth = DEFAULT_SDE_LINE_WIDTH;
+
+	cfg->mdp[0].highest_bank_bit = PROP_VALUE_ACCESS(prop_value,
+			BANK_BIT, 0);
+	if (!prop_exists[BANK_BIT])
+		cfg->mdp[0].highest_bank_bit = DEFAULT_SDE_HIGHEST_BANK_BIT;
+
+	rc = of_property_read_string(np, sde_prop[QSEED_TYPE].prop_name, &type);
+	if (!rc && !strcmp(type, "qseedv3"))
+		cfg->qseed_type = SDE_SSPP_SCALER_QSEED3;
+	else if (!rc && !strcmp(type, "qseedv2"))
+		cfg->qseed_type = SDE_SSPP_SCALER_QSEED2;
+	else if (rc) {
+		SDE_DEBUG("qseed property not found\n");
+		rc = 0;
+	}
+
+	rc = of_property_read_string(np, sde_prop[CSC_TYPE].prop_name, &type);
+	if (!rc && !strcmp(type, "csc"))
+		cfg->csc_type = SDE_SSPP_CSC;
+	else if (!rc && !strcmp(type, "csc-10bit"))
+		cfg->csc_type = SDE_SSPP_CSC_10BIT;
+	else if (rc) {
+		SDE_DEBUG("CSC property not found\n");
+		rc = 0;
+	}
+
+	cfg->has_src_split = PROP_VALUE_ACCESS(prop_value, SRC_SPLIT, 0);
+end:
+	kfree(prop_value);
+	return rc;
+}
+
+static int sde_perf_parse_dt(struct device_node *np,
+	struct sde_mdss_cfg *cfg)
+{
+	int rc, len, prop_count[PERF_PROP_MAX];
+	struct sde_prop_value *prop_value = NULL;
+	bool prop_exists[PERF_PROP_MAX];
+
+	if (!cfg) {
+		SDE_ERROR("invalid argument\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	prop_value = kzalloc(PERF_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!prop_value) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	rc = _validate_dt_entry(np, sde_perf_prop, ARRAY_SIZE(sde_perf_prop),
+			prop_count, &len);
+	if (rc)
+		goto freeprop;
+
+	rc = _read_dt_entry(np, sde_perf_prop, ARRAY_SIZE(sde_perf_prop),
+			prop_count, prop_exists, prop_value);
+	if (rc)
+		goto freeprop;
+
+	cfg->perf.max_bw_low =
+			PROP_VALUE_ACCESS(prop_value, PERF_MAX_BW_LOW, 0);
+	cfg->perf.max_bw_high =
+			PROP_VALUE_ACCESS(prop_value, PERF_MAX_BW_HIGH, 0);
+
+freeprop:
+	kfree(prop_value);
+end:
+	return rc;
+}
+
+static int sde_hardware_format_caps(struct sde_mdss_cfg *sde_cfg,
+	uint32_t hw_rev)
+{
+	int i, rc = 0;
+	uint32_t dma_list_size, vig_list_size, wb2_list_size;
+	uint32_t cursor_list_size = 0;
+	struct sde_sspp_sub_blks *sblk;
+	uint32_t index = 0;
+
+	if (IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_300)) {
+		cursor_list_size = ARRAY_SIZE(cursor_formats);
+		sde_cfg->cursor_formats = kcalloc(cursor_list_size,
+			sizeof(struct sde_format_extended), GFP_KERNEL);
+		if (!sde_cfg->cursor_formats) {
+			rc = -ENOMEM;
+			goto end;
+		}
+		index = _sde_copy_formats(sde_cfg->cursor_formats,
+			cursor_list_size, 0, cursor_formats,
+			ARRAY_SIZE(cursor_formats));
+	}
+
+	dma_list_size = ARRAY_SIZE(plane_formats);
+	vig_list_size = ARRAY_SIZE(plane_formats_yuv);
+	wb2_list_size = ARRAY_SIZE(wb2_formats);
+
+	dma_list_size += ARRAY_SIZE(rgb_10bit_formats);
+	vig_list_size += ARRAY_SIZE(rgb_10bit_formats)
+		+ ARRAY_SIZE(tp10_ubwc_formats)
+		+ ARRAY_SIZE(p010_formats);
+	wb2_list_size += ARRAY_SIZE(rgb_10bit_formats)
+		+ ARRAY_SIZE(tp10_ubwc_formats);
+
+	sde_cfg->dma_formats = kcalloc(dma_list_size,
+		sizeof(struct sde_format_extended), GFP_KERNEL);
+	if (!sde_cfg->dma_formats) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	sde_cfg->vig_formats = kcalloc(vig_list_size,
+		sizeof(struct sde_format_extended), GFP_KERNEL);
+	if (!sde_cfg->vig_formats) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	sde_cfg->wb_formats = kcalloc(wb2_list_size,
+		sizeof(struct sde_format_extended), GFP_KERNEL);
+	if (!sde_cfg->wb_formats) {
+		SDE_ERROR("failed to allocate wb format list\n");
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	if (IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_300) ||
+		IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_301)) {
+		sde_cfg->has_hdr = true;
+	}
+
+	index = _sde_copy_formats(sde_cfg->dma_formats, dma_list_size,
+		0, plane_formats, ARRAY_SIZE(plane_formats));
+	index += _sde_copy_formats(sde_cfg->dma_formats, dma_list_size,
+		index, rgb_10bit_formats,
+		ARRAY_SIZE(rgb_10bit_formats));
+
+	index = _sde_copy_formats(sde_cfg->vig_formats, vig_list_size,
+		0, plane_formats_yuv, ARRAY_SIZE(plane_formats_yuv));
+	index += _sde_copy_formats(sde_cfg->vig_formats, vig_list_size,
+		index, rgb_10bit_formats,
+		ARRAY_SIZE(rgb_10bit_formats));
+	index += _sde_copy_formats(sde_cfg->vig_formats, vig_list_size,
+		index, p010_formats, ARRAY_SIZE(p010_formats));
+
+	index += _sde_copy_formats(sde_cfg->vig_formats, vig_list_size,
+		index, tp10_ubwc_formats,
+		ARRAY_SIZE(tp10_ubwc_formats));
+
+	index = _sde_copy_formats(sde_cfg->wb_formats, wb2_list_size,
+		0, wb2_formats, ARRAY_SIZE(wb2_formats));
+	index += _sde_copy_formats(sde_cfg->wb_formats, wb2_list_size,
+		index, rgb_10bit_formats,
+		ARRAY_SIZE(rgb_10bit_formats));
+	index += _sde_copy_formats(sde_cfg->wb_formats, wb2_list_size,
+		index, tp10_ubwc_formats,
+		ARRAY_SIZE(tp10_ubwc_formats));
+
+	for (i = 0; i < sde_cfg->sspp_count; ++i) {
+		struct sde_sspp_cfg *sspp = &sde_cfg->sspp[i];
+
+		sblk = (struct sde_sspp_sub_blks *)sspp->sblk;
+		switch (sspp->type) {
+		case SSPP_TYPE_VIG:
+			sblk->format_list = sde_cfg->vig_formats;
+			break;
+		case SSPP_TYPE_CURSOR:
+			if (IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_300))
+				sblk->format_list = sde_cfg->cursor_formats;
+			else
+				SDE_ERROR("invalid sspp type %d, xin id %d\n",
+					sspp->type, sspp->xin_id);
+			break;
+		case SSPP_TYPE_DMA:
+			sblk->format_list = sde_cfg->dma_formats;
+			break;
+		default:
+			SDE_ERROR("invalid sspp type %d\n", sspp->type);
+			rc = -EINVAL;
+			goto end;
+		}
+	}
+
+	for (i = 0; i < sde_cfg->wb_count; ++i)
+		sde_cfg->wb[i].format_list = sde_cfg->wb_formats;
+
+end:
+	return rc;
+}
+
+static int sde_hardware_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
+{
+	int rc = 0;
+
+	switch (hw_rev) {
+	case SDE_HW_VER_170:
+	case SDE_HW_VER_171:
+	case SDE_HW_VER_172:
+		/* update msm8996 target here */
+		break;
+	case SDE_HW_VER_300:
+	case SDE_HW_VER_301:
+	case SDE_HW_VER_400:
+		/* update cobalt and skunk target here */
+		rc = sde_hardware_format_caps(sde_cfg, hw_rev);
+		break;
+	}
+
+	return rc;
+}
+
+void sde_hw_catalog_deinit(struct sde_mdss_cfg *sde_cfg)
+{
+	int i;
+	struct sde_vp_sub_blks *vp_sub, *vp_sub_next;
+
+	if (!sde_cfg)
+		return;
+
+	for (i = 0; i < sde_cfg->sspp_count; i++)
+		kfree(sde_cfg->sspp[i].sblk);
+
+	for (i = 0; i < sde_cfg->mixer_count; i++)
+		kfree(sde_cfg->mixer[i].sblk);
+
+	for (i = 0; i < sde_cfg->wb_count; i++)
+		kfree(sde_cfg->wb[i].sblk);
+
+	for (i = 0; i < sde_cfg->dspp_count; i++)
+		kfree(sde_cfg->dspp[i].sblk);
+
+	for (i = 0; i < sde_cfg->pingpong_count; i++)
+		kfree(sde_cfg->pingpong[i].sblk);
+
+	for (i = 0; i < sde_cfg->vbif_count; i++) {
+		kfree(sde_cfg->vbif[i].dynamic_ot_rd_tbl.cfg);
+		kfree(sde_cfg->vbif[i].dynamic_ot_wr_tbl.cfg);
+	}
+
+	for (i = 0; i < sde_cfg->vp_count; i++) {
+		list_for_each_entry_safe(vp_sub, vp_sub_next,
+			&sde_cfg->vp[i].sub_blks, pipeid_list) {
+			list_del(&vp_sub->pipeid_list);
+			kfree(vp_sub);
+		}
+	}
+
+	kfree(sde_cfg->dma_formats);
+	kfree(sde_cfg->cursor_formats);
+	kfree(sde_cfg->vig_formats);
+	kfree(sde_cfg->wb_formats);
+
+	kfree(sde_cfg);
+}
+
+/*************************************************************
+ * hardware catalog init
+ *************************************************************/
+struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev,
+	u32 hw_rev)
+{
+	int rc;
+	struct sde_mdss_cfg *sde_cfg;
+	struct device_node *np = dev->dev->of_node;
+
+	sde_cfg = kzalloc(sizeof(*sde_cfg), GFP_KERNEL);
+	if (!sde_cfg)
+		return ERR_PTR(-ENOMEM);
+
+	sde_cfg->hwversion = hw_rev;
+
+	rc = sde_parse_dt(np, sde_cfg);
+	if (rc)
+		goto end;
+
+	rc = sde_ctl_parse_dt(np, sde_cfg);
+	if (rc)
+		goto end;
+
+	rc = sde_sspp_parse_dt(np, sde_cfg);
+	if (rc)
+		goto end;
+
+	rc = sde_dspp_parse_dt(np, sde_cfg);
+	if (rc)
+		goto end;
+
+	rc = sde_pp_parse_dt(np, sde_cfg);
+	if (rc)
+		goto end;
+
+	/* mixer parsing should be done after dspp and pp for mapping setup */
+	rc = sde_mixer_parse_dt(np, sde_cfg);
+	if (rc)
+		goto end;
+
+	rc = sde_intf_parse_dt(np, sde_cfg);
+	if (rc)
+		goto end;
+
+	rc = sde_wb_parse_dt(np, sde_cfg);
+	if (rc)
+		goto end;
+
+	/* cdm parsing should be done after intf and wb for mapping setup */
+	rc = sde_cdm_parse_dt(np, sde_cfg);
+	if (rc)
+		goto end;
+
+	rc = sde_vbif_parse_dt(np, sde_cfg);
+	if (rc)
+		goto end;
+
+	rc = sde_perf_parse_dt(np, sde_cfg);
+	if (rc)
+		goto end;
+
+	rc = _sde_vp_parse_dt(np, sde_cfg);
+	if (rc)
+		SDE_DEBUG("virtual plane is not supported.\n");
+
+	rc = sde_hardware_caps(sde_cfg, hw_rev);
+	if (rc)
+		goto end;
+
+	return sde_cfg;
+
+end:
+	sde_hw_catalog_deinit(sde_cfg);
+	return NULL;
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_catalog_format.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_catalog_format.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h	2019-01-22 16:16:23.515246515 +0100
@@ -0,0 +1,177 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hw_mdss.h"
+
+static const struct sde_format_extended plane_formats[] = {
+	{DRM_FORMAT_ARGB8888, 0},
+	{DRM_FORMAT_ABGR8888, 0},
+	{DRM_FORMAT_RGBA8888, 0},
+	{DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_BGRA8888, 0},
+	{DRM_FORMAT_XRGB8888, 0},
+	{DRM_FORMAT_RGBX8888, 0},
+	{DRM_FORMAT_BGRX8888, 0},
+	{DRM_FORMAT_XBGR8888, 0},
+	{DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_RGB888, 0},
+	{DRM_FORMAT_BGR888, 0},
+	{DRM_FORMAT_RGB565, 0},
+	{DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_BGR565, 0},
+	{DRM_FORMAT_ARGB1555, 0},
+	{DRM_FORMAT_ABGR1555, 0},
+	{DRM_FORMAT_RGBA5551, 0},
+	{DRM_FORMAT_BGRA5551, 0},
+	{DRM_FORMAT_XRGB1555, 0},
+	{DRM_FORMAT_XBGR1555, 0},
+	{DRM_FORMAT_RGBX5551, 0},
+	{DRM_FORMAT_BGRX5551, 0},
+	{DRM_FORMAT_ARGB4444, 0},
+	{DRM_FORMAT_ABGR4444, 0},
+	{DRM_FORMAT_RGBA4444, 0},
+	{DRM_FORMAT_BGRA4444, 0},
+	{DRM_FORMAT_XRGB4444, 0},
+	{DRM_FORMAT_XBGR4444, 0},
+	{DRM_FORMAT_RGBX4444, 0},
+	{DRM_FORMAT_BGRX4444, 0},
+	{0, 0},
+};
+
+static const struct sde_format_extended plane_formats_yuv[] = {
+	{DRM_FORMAT_ARGB8888, 0},
+	{DRM_FORMAT_ABGR8888, 0},
+	{DRM_FORMAT_RGBA8888, 0},
+	{DRM_FORMAT_BGRX8888, 0},
+	{DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_BGRA8888, 0},
+	{DRM_FORMAT_XRGB8888, 0},
+	{DRM_FORMAT_XBGR8888, 0},
+	{DRM_FORMAT_RGBX8888, 0},
+	{DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_RGB888, 0},
+	{DRM_FORMAT_BGR888, 0},
+	{DRM_FORMAT_RGB565, 0},
+	{DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_BGR565, 0},
+	{DRM_FORMAT_ARGB1555, 0},
+	{DRM_FORMAT_ABGR1555, 0},
+	{DRM_FORMAT_RGBA5551, 0},
+	{DRM_FORMAT_BGRA5551, 0},
+	{DRM_FORMAT_XRGB1555, 0},
+	{DRM_FORMAT_XBGR1555, 0},
+	{DRM_FORMAT_RGBX5551, 0},
+	{DRM_FORMAT_BGRX5551, 0},
+	{DRM_FORMAT_ARGB4444, 0},
+	{DRM_FORMAT_ABGR4444, 0},
+	{DRM_FORMAT_RGBA4444, 0},
+	{DRM_FORMAT_BGRA4444, 0},
+	{DRM_FORMAT_XRGB4444, 0},
+	{DRM_FORMAT_XBGR4444, 0},
+	{DRM_FORMAT_RGBX4444, 0},
+	{DRM_FORMAT_BGRX4444, 0},
+
+	{DRM_FORMAT_NV12, 0},
+	{DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_NV21, 0},
+	{DRM_FORMAT_NV16, 0},
+	{DRM_FORMAT_NV61, 0},
+	{DRM_FORMAT_VYUY, 0},
+	{DRM_FORMAT_UYVY, 0},
+	{DRM_FORMAT_YUYV, 0},
+	{DRM_FORMAT_YVYU, 0},
+	{DRM_FORMAT_YUV420, 0},
+	{DRM_FORMAT_YVU420, 0},
+	{0, 0},
+};
+
+static const struct sde_format_extended cursor_formats[] = {
+	{DRM_FORMAT_ARGB8888, 0},
+	{DRM_FORMAT_ABGR8888, 0},
+	{DRM_FORMAT_RGBA8888, 0},
+	{DRM_FORMAT_BGRA8888, 0},
+	{DRM_FORMAT_XRGB8888, 0},
+	{DRM_FORMAT_ARGB1555, 0},
+	{DRM_FORMAT_ABGR1555, 0},
+	{DRM_FORMAT_RGBA5551, 0},
+	{DRM_FORMAT_BGRA5551, 0},
+	{DRM_FORMAT_ARGB4444, 0},
+	{DRM_FORMAT_ABGR4444, 0},
+	{DRM_FORMAT_RGBA4444, 0},
+	{DRM_FORMAT_BGRA4444, 0},
+	{0, 0},
+};
+
+static const struct sde_format_extended wb2_formats[] = {
+	{DRM_FORMAT_RGB565, 0},
+	{DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_RGB888, 0},
+	{DRM_FORMAT_ARGB8888, 0},
+	{DRM_FORMAT_RGBA8888, 0},
+	{DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_XRGB8888, 0},
+	{DRM_FORMAT_RGBX8888, 0},
+	{DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_ARGB1555, 0},
+	{DRM_FORMAT_RGBA5551, 0},
+	{DRM_FORMAT_XRGB1555, 0},
+	{DRM_FORMAT_RGBX5551, 0},
+	{DRM_FORMAT_ARGB4444, 0},
+	{DRM_FORMAT_RGBA4444, 0},
+	{DRM_FORMAT_RGBX4444, 0},
+	{DRM_FORMAT_XRGB4444, 0},
+
+	{DRM_FORMAT_BGR565, 0},
+	{DRM_FORMAT_BGR888, 0},
+	{DRM_FORMAT_ABGR8888, 0},
+	{DRM_FORMAT_BGRA8888, 0},
+	{DRM_FORMAT_BGRX8888, 0},
+	{DRM_FORMAT_XBGR8888, 0},
+	{DRM_FORMAT_ABGR1555, 0},
+	{DRM_FORMAT_BGRA5551, 0},
+	{DRM_FORMAT_XBGR1555, 0},
+	{DRM_FORMAT_BGRX5551, 0},
+	{DRM_FORMAT_ABGR4444, 0},
+	{DRM_FORMAT_BGRA4444, 0},
+	{DRM_FORMAT_BGRX4444, 0},
+	{DRM_FORMAT_XBGR4444, 0},
+
+	{DRM_FORMAT_YUV420, 0},
+	{DRM_FORMAT_NV12, 0},
+	{DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_NV16, 0},
+	{DRM_FORMAT_YUYV, 0},
+
+	{0, 0},
+};
+
+static const struct sde_format_extended rgb_10bit_formats[] = {
+	{DRM_FORMAT_BGRA1010102, 0},
+	{DRM_FORMAT_BGRX1010102, 0},
+	{DRM_FORMAT_RGBA1010102, 0},
+	{DRM_FORMAT_RGBX1010102, 0},
+	{DRM_FORMAT_ABGR2101010, 0},
+	{DRM_FORMAT_ABGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_XBGR2101010, 0},
+	{DRM_FORMAT_XBGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_ARGB2101010, 0},
+	{DRM_FORMAT_XRGB2101010, 0},
+};
+
+static const struct sde_format_extended p010_formats[] = {
+	{DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_DX},
+};
+
+static const struct sde_format_extended tp10_ubwc_formats[] = {
+	{DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED |
+		DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_TIGHT},
+};
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_catalog.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_catalog.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_catalog.h	2019-01-22 16:16:23.515246515 +0100
@@ -0,0 +1,763 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_CATALOG_H
+#define _SDE_HW_CATALOG_H
+
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/bitmap.h>
+#include <linux/err.h>
+#include <linux/msm-bus.h>
+#include <drm/drmP.h>
+
+/**
+ * Max hardware block count: For ex: max 12 SSPP pipes or
+ * 5 ctl paths. In all cases, it can have max 12 hardware blocks
+ * based on current design
+ */
+#define MAX_BLOCKS    12
+
+#define SDE_HW_VER(MAJOR, MINOR, STEP) (((MAJOR & 0xF) << 28)    |\
+		((MINOR & 0xFFF) << 16)  |\
+		(STEP & 0xFFFF))
+
+#define SDE_HW_MAJOR(rev)		((rev) >> 28)
+#define SDE_HW_MINOR(rev)		(((rev) >> 16) & 0xFFF)
+#define SDE_HW_STEP(rev)		((rev) & 0xFFFF)
+#define SDE_HW_MAJOR_MINOR(rev)		((rev) >> 16)
+
+#define IS_SDE_MAJOR_MINOR_SAME(rev1, rev2)   \
+	(SDE_HW_MAJOR_MINOR((rev1)) == SDE_HW_MAJOR_MINOR((rev2)))
+
+#define SDE_HW_VER_170	SDE_HW_VER(1, 7, 0) /* 8996 v1.0 */
+#define SDE_HW_VER_171	SDE_HW_VER(1, 7, 1) /* 8996 v2.0 */
+#define SDE_HW_VER_172	SDE_HW_VER(1, 7, 2) /* 8996 v3.0 */
+#define SDE_HW_VER_300	SDE_HW_VER(3, 0, 0) /* 8998 v1.0 */
+#define SDE_HW_VER_301	SDE_HW_VER(3, 0, 1) /* 8998 v1.1 */
+#define SDE_HW_VER_400	SDE_HW_VER(4, 0, 0) /* sdm845 v1.0 */
+
+#define IS_MSMSKUNK_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_400)
+
+#define SDE_HW_BLK_NAME_LEN	16
+
+#define MAX_IMG_WIDTH 0x3fff
+#define MAX_IMG_HEIGHT 0x3fff
+
+#define CRTC_DUAL_MIXERS	2
+
+#define SDE_COLOR_PROCESS_VER(MAJOR, MINOR) \
+		((((MAJOR) & 0xFFFF) << 16) | (((MINOR) & 0xFFFF)))
+#define SDE_COLOR_PROCESS_MAJOR(version) (((version) & 0xFFFF0000) >> 16)
+#define SDE_COLOR_PROCESS_MINOR(version) ((version) & 0xFFFF)
+
+/**
+ * MDP TOP BLOCK features
+ * @SDE_MDP_PANIC_PER_PIPE Panic configuration needs to be be done per pipe
+ * @SDE_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats
+ * @SDE_MDP_BWC,           MDSS HW supports Bandwidth compression.
+ * @SDE_MDP_UBWC_1_0,      This chipsets supports Universal Bandwidth
+ *                         compression initial revision
+ * @SDE_MDP_UBWC_1_5,      Universal Bandwidth compression version 1.5
+ * @SDE_MDP_CDP,           Client driven prefetch
+ * @SDE_MDP_MAX            Maximum value
+
+ */
+enum {
+	SDE_MDP_PANIC_PER_PIPE = 0x1,
+	SDE_MDP_10BIT_SUPPORT,
+	SDE_MDP_BWC,
+	SDE_MDP_UBWC_1_0,
+	SDE_MDP_UBWC_1_5,
+	SDE_MDP_CDP,
+	SDE_MDP_MAX
+};
+
+/**
+ * SSPP sub-blocks/features
+ * @SDE_SSPP_SRC             Src and fetch part of the pipes,
+ * @SDE_SSPP_SCALER_QSEED2,  QSEED2 algorithm support
+ * @SDE_SSPP_SCALER_QSEED3,  QSEED3 alogorithm support
+ * @SDE_SSPP_SCALER_RGB,     RGB Scaler, supported by RGB pipes
+ * @SDE_SSPP_CSC,            Support of Color space converion
+ * @SDE_SSPP_CSC_10BIT,      Support of 10-bit Color space conversion
+ * @SDE_SSPP_HSIC,           Global HSIC control
+ * @SDE_SSPP_MEMCOLOR        Memory Color Support
+ * @SDE_SSPP_IGC,            Inverse gamma correction
+ * @SDE_SSPP_PCC,            Color correction support
+ * @SDE_SSPP_CURSOR,         SSPP can be used as a cursor layer
+ * @SDE_SSPP_QOS,            SSPP support QoS control, danger/safe/creq
+ * @SDE_SSPP_MAX             maximum value
+ */
+enum {
+	SDE_SSPP_SRC = 0x1,
+	SDE_SSPP_SCALER_QSEED2,
+	SDE_SSPP_SCALER_QSEED3,
+	SDE_SSPP_SCALER_RGB,
+	SDE_SSPP_CSC,
+	SDE_SSPP_CSC_10BIT,
+	SDE_SSPP_HSIC,
+	SDE_SSPP_MEMCOLOR,
+	SDE_SSPP_IGC,
+	SDE_SSPP_PCC,
+	SDE_SSPP_CURSOR,
+	SDE_SSPP_QOS,
+	SDE_SSPP_MAX
+};
+
+/*
+ * MIXER sub-blocks/features
+ * @SDE_MIXER_LAYER           Layer mixer layer blend configuration,
+ * @SDE_MIXER_SOURCESPLIT     Layer mixer supports source-split configuration
+ * @SDE_MIXER_GC              Gamma correction block
+ * @SDE_MIXER_MAX             maximum value
+ */
+enum {
+	SDE_MIXER_LAYER = 0x1,
+	SDE_MIXER_SOURCESPLIT,
+	SDE_MIXER_GC,
+	SDE_MIXER_MAX
+};
+
+/**
+ * DSPP sub-blocks
+ * @SDE_DSPP_IGC             DSPP Inverse gamma correction block
+ * @SDE_DSPP_PCC             Panel color correction block
+ * @SDE_DSPP_GC              Gamma correction block
+ * @SDE_DSPP_HSIC            Global HSIC block
+ * @SDE_DSPP_MEMCOLOR        Memory Color block
+ * @SDE_DSPP_SIXZONE         Six zone block
+ * @SDE_DSPP_GAMUT           Gamut bloc
+ * @SDE_DSPP_DITHER          Dither block
+ * @SDE_DSPP_HIST            Histogram block
+ * @SDE_DSPP_VLUT            PA VLUT block
+ * @SDE_DSPP_AD              AD block
+ * @SDE_DSPP_MAX             maximum value
+ */
+enum {
+	SDE_DSPP_IGC = 0x1,
+	SDE_DSPP_PCC,
+	SDE_DSPP_GC,
+	SDE_DSPP_HSIC,
+	SDE_DSPP_MEMCOLOR,
+	SDE_DSPP_SIXZONE,
+	SDE_DSPP_GAMUT,
+	SDE_DSPP_DITHER,
+	SDE_DSPP_HIST,
+	SDE_DSPP_VLUT,
+	SDE_DSPP_AD,
+	SDE_DSPP_MAX
+};
+
+/**
+ * PINGPONG sub-blocks
+ * @SDE_PINGPONG_TE         Tear check block
+ * @SDE_PINGPONG_TE2        Additional tear check block for split pipes
+ * @SDE_PINGPONG_SPLIT      PP block supports split fifo
+ * @SDE_PINGPONG_SLAVE      PP block is a suitable slave for split fifo
+ * @SDE_PINGPONG_DSC,       Display stream compression blocks
+ * @SDE_PINGPONG_MAX
+ */
+enum {
+	SDE_PINGPONG_TE = 0x1,
+	SDE_PINGPONG_TE2,
+	SDE_PINGPONG_SPLIT,
+	SDE_PINGPONG_SLAVE,
+	SDE_PINGPONG_DSC,
+	SDE_PINGPONG_MAX
+};
+
+/**
+ * CTL sub-blocks
+ * @SDE_CTL_SPLIT_DISPLAY       CTL supports video mode split display
+ * @SDE_CTL_PINGPONG_SPLIT      CTL supports pingpong split
+ * @SDE_CTL_MAX
+ */
+enum {
+	SDE_CTL_SPLIT_DISPLAY = 0x1,
+	SDE_CTL_PINGPONG_SPLIT,
+	SDE_CTL_MAX
+};
+
+/**
+ * WB sub-blocks and features
+ * @SDE_WB_LINE_MODE        Writeback module supports line/linear mode
+ * @SDE_WB_BLOCK_MODE       Writeback module supports block mode read
+ * @SDE_WB_ROTATE           rotation support,this is available if writeback
+ *                          supports block mode read
+ * @SDE_WB_CSC              Writeback color conversion block support
+ * @SDE_WB_CHROMA_DOWN,     Writeback chroma down block,
+ * @SDE_WB_DOWNSCALE,       Writeback integer downscaler,
+ * @SDE_WB_DITHER,          Dither block
+ * @SDE_WB_TRAFFIC_SHAPER,  Writeback traffic shaper bloc
+ * @SDE_WB_UBWC_1_0,        Writeback Universal bandwidth compression 1.0
+ *                          support
+ * @SDE_WB_UBWC_1_5         UBWC 1.5 support
+ * @SDE_WB_YUV_CONFIG       Writeback supports output of YUV colorspace
+ * @SDE_WB_PIPE_ALPHA       Writeback supports pipe alpha
+ * @SDE_WB_XY_ROI_OFFSET    Writeback supports x/y-offset of out ROI in
+ *                          the destination image
+ * @SDE_WB_MAX              maximum value
+ */
+enum {
+	SDE_WB_LINE_MODE = 0x1,
+	SDE_WB_BLOCK_MODE,
+	SDE_WB_ROTATE = SDE_WB_BLOCK_MODE,
+	SDE_WB_CSC,
+	SDE_WB_CHROMA_DOWN,
+	SDE_WB_DOWNSCALE,
+	SDE_WB_DITHER,
+	SDE_WB_TRAFFIC_SHAPER,
+	SDE_WB_UBWC_1_0,
+	SDE_WB_YUV_CONFIG,
+	SDE_WB_PIPE_ALPHA,
+	SDE_WB_XY_ROI_OFFSET,
+	SDE_WB_MAX
+};
+
+/**
+ * VBIF sub-blocks and features
+ * @SDE_VBIF_QOS_OTLIM        VBIF supports OT Limit
+ * @SDE_VBIF_MAX              maximum value
+ */
+enum {
+	SDE_VBIF_QOS_OTLIM = 0x1,
+	SDE_VBIF_MAX
+};
+
+/**
+ * MACRO SDE_HW_BLK_INFO - information of HW blocks inside SDE
+ * @name:              string name for debug purposes
+ * @id:                enum identifying this block
+ * @base:              register base offset to mdss
+ * @len:               length of hardware block
+ * @features           bit mask identifying sub-blocks/features
+ */
+#define SDE_HW_BLK_INFO \
+	char name[SDE_HW_BLK_NAME_LEN]; \
+	u32 id; \
+	u32 base; \
+	u32 len; \
+	unsigned long features; \
+
+/**
+ * MACRO SDE_HW_SUBBLK_INFO - information of HW sub-block inside SDE
+ * @name:              string name for debug purposes
+ * @id:                enum identifying this sub-block
+ * @base:              offset of this sub-block relative to the block
+ *                     offset
+ * @len                register block length of this sub-block
+ */
+#define SDE_HW_SUBBLK_INFO \
+	char name[SDE_HW_BLK_NAME_LEN]; \
+	u32 id; \
+	u32 base; \
+	u32 len
+
+/**
+ * struct sde_src_blk: SSPP part of the source pipes
+ * @info:   HW register and features supported by this sub-blk
+ */
+struct sde_src_blk {
+	SDE_HW_SUBBLK_INFO;
+};
+
+/**
+ * struct sde_scaler_blk: Scaler information
+ * @info:   HW register and features supported by this sub-blk
+ * @version: qseed block revision
+ */
+struct sde_scaler_blk {
+	SDE_HW_SUBBLK_INFO;
+	u32 version;
+};
+
+struct sde_csc_blk {
+	SDE_HW_SUBBLK_INFO;
+};
+
+/**
+ * struct sde_pp_blk : Pixel processing sub-blk information
+ * @info:   HW register and features supported by this sub-blk
+ * @version: HW Algorithm version
+ */
+struct sde_pp_blk {
+	SDE_HW_SUBBLK_INFO;
+	u32 version;
+};
+
+/**
+ * struct sde_format_extended - define sde specific pixel format+modifier
+ * @fourcc_format: Base FOURCC pixel format code
+ * @modifier: 64-bit drm format modifier, same modifier must be applied to all
+ *            framebuffer planes
+ */
+struct sde_format_extended {
+	uint32_t fourcc_format;
+	uint64_t modifier;
+};
+
+/**
+ * struct sde_sspp_sub_blks : SSPP sub-blocks
+ * @maxdwnscale: max downscale ratio supported(without DECIMATION)
+ * @maxupscale:  maxupscale ratio supported
+ * @maxwidth:    max pixelwidth supported by this pipe
+ * @danger_lut_linear: LUT to generate danger signals for linear format
+ * @safe_lut_linear: LUT to generate safe signals for linear format
+ * @danger_lut_tile: LUT to generate danger signals for tile format
+ * @safe_lut_tile: LUT to generate safe signals for tile format
+ * @danger_lut_nrt: LUT to generate danger signals for non-realtime use case
+ * @safe_lut_nrt: LUT to generate safe signals for non-realtime use case
+ * @creq_lut_nrt: LUT to generate creq signals for non-realtime use case
+ * @creq_vblank: creq priority during vertical blanking
+ * @danger_vblank: danger priority during vertical blanking
+ * @pixel_ram_size: size of latency hiding and de-tiling buffer in bytes
+ * @src_blk:
+ * @scaler_blk:
+ * @csc_blk:
+ * @hsic:
+ * @memcolor:
+ * @pcc_blk:
+ * @igc_blk:
+ * @format_list: Pointer to list of supported formats
+ */
+struct sde_sspp_sub_blks {
+	u32 maxlinewidth;
+	u32 danger_lut_linear;
+	u32 safe_lut_linear;
+	u32 danger_lut_tile;
+	u32 safe_lut_tile;
+	u32 danger_lut_nrt;
+	u32 safe_lut_nrt;
+	u32 creq_lut_nrt;
+	u32 creq_vblank;
+	u32 danger_vblank;
+	u32 pixel_ram_size;
+	u32 maxdwnscale;
+	u32 maxupscale;
+	u32 maxhdeciexp; /* max decimation is 2^value */
+	u32 maxvdeciexp; /* max decimation is 2^value */
+	struct sde_src_blk src_blk;
+	struct sde_scaler_blk scaler_blk;
+	struct sde_pp_blk csc_blk;
+	struct sde_pp_blk hsic_blk;
+	struct sde_pp_blk memcolor_blk;
+	struct sde_pp_blk pcc_blk;
+	struct sde_pp_blk igc_blk;
+
+	const struct sde_format_extended *format_list;
+};
+
+/**
+ * struct sde_lm_sub_blks:      information of mixer block
+ * @maxwidth:               Max pixel width supported by this mixer
+ * @maxblendstages:         Max number of blend-stages supported
+ * @blendstage_base:        Blend-stage register base offset
+ * @gc: gamma correction block
+ */
+struct sde_lm_sub_blks {
+	u32 maxwidth;
+	u32 maxblendstages;
+	u32 blendstage_base[MAX_BLOCKS];
+	struct sde_pp_blk gc;
+};
+
+struct sde_dspp_sub_blks {
+	struct sde_pp_blk igc;
+	struct sde_pp_blk pcc;
+	struct sde_pp_blk gc;
+	struct sde_pp_blk hsic;
+	struct sde_pp_blk memcolor;
+	struct sde_pp_blk sixzone;
+	struct sde_pp_blk gamut;
+	struct sde_pp_blk dither;
+	struct sde_pp_blk hist;
+	struct sde_pp_blk ad;
+	struct sde_pp_blk vlut;
+};
+
+struct sde_pingpong_sub_blks {
+	struct sde_pp_blk te;
+	struct sde_pp_blk te2;
+	struct sde_pp_blk dsc;
+};
+
+struct sde_wb_sub_blocks {
+	u32 maxlinewidth;
+};
+
+struct sde_mdss_base_cfg {
+	SDE_HW_BLK_INFO;
+};
+
+/**
+ * sde_clk_ctrl_type - Defines top level clock control signals
+ */
+enum sde_clk_ctrl_type {
+	SDE_CLK_CTRL_NONE,
+	SDE_CLK_CTRL_VIG0,
+	SDE_CLK_CTRL_VIG1,
+	SDE_CLK_CTRL_VIG2,
+	SDE_CLK_CTRL_VIG3,
+	SDE_CLK_CTRL_VIG4,
+	SDE_CLK_CTRL_RGB0,
+	SDE_CLK_CTRL_RGB1,
+	SDE_CLK_CTRL_RGB2,
+	SDE_CLK_CTRL_RGB3,
+	SDE_CLK_CTRL_DMA0,
+	SDE_CLK_CTRL_DMA1,
+	SDE_CLK_CTRL_CURSOR0,
+	SDE_CLK_CTRL_CURSOR1,
+	SDE_CLK_CTRL_WB0,
+	SDE_CLK_CTRL_WB1,
+	SDE_CLK_CTRL_WB2,
+	SDE_CLK_CTRL_MAX,
+};
+
+/* struct sde_clk_ctrl_reg : Clock control register
+ * @reg_off:           register offset
+ * @bit_off:           bit offset
+ */
+struct sde_clk_ctrl_reg {
+	u32 reg_off;
+	u32 bit_off;
+};
+
+/* struct sde_mdp_cfg : MDP TOP-BLK instance info
+ * @id:                index identifying this block
+ * @base:              register base offset to mdss
+ * @features           bit mask identifying sub-blocks/features
+ * @highest_bank_bit:  UBWC parameter
+ * @clk_ctrls          clock control register definition
+ */
+struct sde_mdp_cfg {
+	SDE_HW_BLK_INFO;
+	u32 highest_bank_bit;
+	struct sde_clk_ctrl_reg clk_ctrls[SDE_CLK_CTRL_MAX];
+};
+
+/* struct sde_mdp_cfg : MDP TOP-BLK instance info
+ * @id:                index identifying this block
+ * @base:              register base offset to mdss
+ * @features           bit mask identifying sub-blocks/features
+ */
+struct sde_ctl_cfg {
+	SDE_HW_BLK_INFO;
+};
+
+/**
+ * struct sde_sspp_cfg - information of source pipes
+ * @id:                index identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @sblk:              SSPP sub-blocks information
+ * @xin_id:            bus client identifier
+ * @clk_ctrl           clock control identifier
+ * @type               sspp type identifier
+ */
+struct sde_sspp_cfg {
+	SDE_HW_BLK_INFO;
+	const struct sde_sspp_sub_blks *sblk;
+	u32 xin_id;
+	enum sde_clk_ctrl_type clk_ctrl;
+	u32 type;
+};
+
+/**
+ * struct sde_lm_cfg - information of layer mixer blocks
+ * @id:                index identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @sblk:              LM Sub-blocks information
+ * @dspp:              ID of connected DSPP, DSPP_MAX if unsupported
+ * @pingpong:          ID of connected PingPong, PINGPONG_MAX if unsupported
+ * @lm_pair_mask:      Bitmask of LMs that can be controlled by same CTL
+ */
+struct sde_lm_cfg {
+	SDE_HW_BLK_INFO;
+	const struct sde_lm_sub_blks *sblk;
+	u32 dspp;
+	u32 pingpong;
+	unsigned long lm_pair_mask;
+};
+
+/**
+ * struct sde_dspp_cfg - information of DSPP blocks
+ * @id                 enum identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ *                     supported by this block
+ * @sblk               sub-blocks information
+ */
+struct sde_dspp_cfg  {
+	SDE_HW_BLK_INFO;
+	const struct sde_dspp_sub_blks *sblk;
+};
+
+/**
+ * struct sde_pingpong_cfg - information of PING-PONG blocks
+ * @id                 enum identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @sblk               sub-blocks information
+ */
+struct sde_pingpong_cfg  {
+	SDE_HW_BLK_INFO;
+	const struct sde_pingpong_sub_blks *sblk;
+};
+
+/**
+ * struct sde_cdm_cfg - information of chroma down blocks
+ * @id                 enum identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @intf_connect       Bitmask of INTF IDs this CDM can connect to
+ * @wb_connect:        Bitmask of Writeback IDs this CDM can connect to
+ */
+struct sde_cdm_cfg   {
+	SDE_HW_BLK_INFO;
+	unsigned long intf_connect;
+	unsigned long wb_connect;
+};
+
+/**
+ * struct sde_intf_cfg - information of timing engine blocks
+ * @id                 enum identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @type:              Interface type(DSI, DP, HDMI)
+ * @controller_id:     Controller Instance ID in case of multiple of intf type
+ * @prog_fetch_lines_worst_case	Worst case latency num lines needed to prefetch
+ */
+struct sde_intf_cfg  {
+	SDE_HW_BLK_INFO;
+	u32 type;   /* interface type*/
+	u32 controller_id;
+	u32 prog_fetch_lines_worst_case;
+};
+
+/**
+ * struct sde_wb_cfg - information of writeback blocks
+ * @id                 enum identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @sblk               sub-block information
+ * @format_list: Pointer to list of supported formats
+ * @vbif_idx           vbif identifier
+ * @xin_id             client interface identifier
+ * @clk_ctrl           clock control identifier
+ */
+struct sde_wb_cfg {
+	SDE_HW_BLK_INFO;
+	const struct sde_wb_sub_blocks *sblk;
+	const struct sde_format_extended *format_list;
+	u32 vbif_idx;
+	u32 xin_id;
+	enum sde_clk_ctrl_type clk_ctrl;
+};
+
+/**
+ * struct sde_vbif_dynamic_ot_cfg - dynamic OT setting
+ * @pps                pixel per seconds
+ * @ot_limit           OT limit to use up to specified pixel per second
+ */
+struct sde_vbif_dynamic_ot_cfg {
+	u64 pps;
+	u32 ot_limit;
+};
+
+/**
+ * struct sde_vbif_dynamic_ot_tbl - dynamic OT setting table
+ * @count              length of cfg
+ * @cfg                pointer to array of configuration settings with
+ *                     ascending requirements
+ */
+struct sde_vbif_dynamic_ot_tbl {
+	u32 count;
+	struct sde_vbif_dynamic_ot_cfg *cfg;
+};
+
+/**
+ * struct sde_vbif_cfg - information of VBIF blocks
+ * @id                 enum identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @ot_rd_limit        default OT read limit
+ * @ot_wr_limit        default OT write limit
+ * @xin_halt_timeout   maximum time (in usec) for xin to halt
+ * @dynamic_ot_rd_tbl  dynamic OT read configuration table
+ * @dynamic_ot_wr_tbl  dynamic OT write configuration table
+ */
+struct sde_vbif_cfg {
+	SDE_HW_BLK_INFO;
+	u32 default_ot_rd_limit;
+	u32 default_ot_wr_limit;
+	u32 xin_halt_timeout;
+	struct sde_vbif_dynamic_ot_tbl dynamic_ot_rd_tbl;
+	struct sde_vbif_dynamic_ot_tbl dynamic_ot_wr_tbl;
+};
+
+/**
+ * struct sde_perf_cfg - performance control settings
+ * @max_bw_low         low threshold of maximum bandwidth (kbps)
+ * @max_bw_high        high threshold of maximum bandwidth (kbps)
+ */
+struct sde_perf_cfg {
+	u32 max_bw_low;
+	u32 max_bw_high;
+};
+
+/**
+* struct sde_vp_sub_blks - Virtual Plane sub-blocks
+* @pipeid_list             list for hw pipe id
+* @sspp_id                 SSPP ID, refer to enum sde_sspp.
+*/
+struct sde_vp_sub_blks {
+	struct list_head pipeid_list;
+	u32 sspp_id;
+};
+
+/**
+* struct sde_vp_cfg - information of Virtual Plane SW blocks
+* @id                 enum identifying this block
+* @sub_blks           list head for virtual plane sub blocks
+* @plane_type         plane type, such as primary, overlay or cursor
+* @display_type       which display the plane bound to, such as primary,
+*                     secondary or tertiary
+*/
+struct sde_vp_cfg {
+	u32 id;
+	struct list_head sub_blks;
+	const char *plane_type;
+	const char *display_type;
+};
+
+/**
+ * struct sde_mdss_cfg - information of MDSS HW
+ * This is the main catalog data structure representing
+ * this HW version. Contains number of instances,
+ * register offsets, capabilities of the all MDSS HW sub-blocks.
+ *
+ * @max_sspp_linewidth max source pipe line width support.
+ * @max_mixer_width    max layer mixer line width support.
+ * @max_mixer_blendstages max layer mixer blend stages or
+ *                       supported z order
+ * @max_wb_linewidth   max writeback line width support.
+ * @highest_bank_bit   highest memory bit setting for tile buffers.
+ * @qseed_type         qseed2 or qseed3 support.
+ * @csc_type           csc or csc_10bit support.
+ * @has_src_split      source split feature status
+ * @has_cdp            Client driver prefetch feature status
+ * @has_hdr            HDR feature support
+ * @dma_formats        Supported formats for dma pipe
+ * @cursor_formats     Supported formats for cursor pipe
+ * @vig_formats        Supported formats for vig pipe
+ * @wb_formats         Supported formats for wb
+ */
+struct sde_mdss_cfg {
+	u32 hwversion;
+
+	u32 max_sspp_linewidth;
+	u32 max_mixer_width;
+	u32 max_mixer_blendstages;
+	u32 max_wb_linewidth;
+	u32 highest_bank_bit;
+	u32 qseed_type;
+	u32 csc_type;
+	bool has_src_split;
+	bool has_cdp;
+	bool has_hdr;
+	u32 mdss_count;
+	struct sde_mdss_base_cfg mdss[MAX_BLOCKS];
+
+	u32 mdp_count;
+	struct sde_mdp_cfg mdp[MAX_BLOCKS];
+
+	u32 ctl_count;
+	struct sde_ctl_cfg ctl[MAX_BLOCKS];
+
+	u32 sspp_count;
+	struct sde_sspp_cfg sspp[MAX_BLOCKS];
+
+	u32 mixer_count;
+	struct sde_lm_cfg mixer[MAX_BLOCKS];
+
+	u32 dspp_count;
+	struct sde_dspp_cfg dspp[MAX_BLOCKS];
+
+	u32 pingpong_count;
+	struct sde_pingpong_cfg pingpong[MAX_BLOCKS];
+
+	u32 cdm_count;
+	struct sde_cdm_cfg cdm[MAX_BLOCKS];
+
+	u32 intf_count;
+	struct sde_intf_cfg intf[MAX_BLOCKS];
+
+	u32 wb_count;
+	struct sde_wb_cfg wb[MAX_BLOCKS];
+
+	u32 vbif_count;
+	struct sde_vbif_cfg vbif[MAX_BLOCKS];
+	/* Add additional block data structures here */
+
+	struct sde_perf_cfg perf;
+
+	u32 vp_count;
+	struct sde_vp_cfg vp[MAX_BLOCKS];
+
+	struct sde_format_extended *dma_formats;
+	struct sde_format_extended *cursor_formats;
+	struct sde_format_extended *vig_formats;
+	struct sde_format_extended *wb_formats;
+};
+
+struct sde_mdss_hw_cfg_handler {
+	u32 major;
+	u32 minor;
+	struct sde_mdss_cfg* (*cfg_init)(u32);
+};
+
+/*
+ * Access Macros
+ */
+#define BLK_MDP(s) ((s)->mdp)
+#define BLK_CTL(s) ((s)->ctl)
+#define BLK_VIG(s) ((s)->vig)
+#define BLK_RGB(s) ((s)->rgb)
+#define BLK_DMA(s) ((s)->dma)
+#define BLK_CURSOR(s) ((s)->cursor)
+#define BLK_MIXER(s) ((s)->mixer)
+#define BLK_DSPP(s) ((s)->dspp)
+#define BLK_PINGPONG(s) ((s)->pingpong)
+#define BLK_CDM(s) ((s)->cdm)
+#define BLK_INTF(s) ((s)->intf)
+#define BLK_WB(s) ((s)->wb)
+#define BLK_AD(s) ((s)->ad)
+
+/**
+ * sde_hw_catalog_init - sde hardware catalog init API parses dtsi property
+ * and stores all parsed offset, hardware capabilities in config structure.
+ * @dev:          drm device node.
+ * @hw_rev:       caller needs provide the hardware revision before parsing.
+ *
+ * Return: parsed sde config structure
+ */
+struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev, u32 hw_rev);
+
+/**
+ * sde_hw_catalog_deinit - sde hardware catalog cleanup
+ * @sde_cfg:      pointer returned from init function
+ */
+void sde_hw_catalog_deinit(struct sde_mdss_cfg *sde_cfg);
+
+#endif /* _SDE_HW_CATALOG_H */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_cdm.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_cdm.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_cdm.c	2019-10-29 09:26:23.641203159 +0100
@@ -0,0 +1,309 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hw_mdss.h"
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_cdm.h"
+#include "sde_dbg.h"
+
+#define CDM_CSC_10_OPMODE                  0x000
+#define CDM_CSC_10_BASE                    0x004
+
+#define CDM_CDWN2_OP_MODE                  0x100
+#define CDM_CDWN2_CLAMP_OUT                0x104
+#define CDM_CDWN2_PARAMS_3D_0              0x108
+#define CDM_CDWN2_PARAMS_3D_1              0x10C
+#define CDM_CDWN2_COEFF_COSITE_H_0         0x110
+#define CDM_CDWN2_COEFF_COSITE_H_1         0x114
+#define CDM_CDWN2_COEFF_COSITE_H_2         0x118
+#define CDM_CDWN2_COEFF_OFFSITE_H_0        0x11C
+#define CDM_CDWN2_COEFF_OFFSITE_H_1        0x120
+#define CDM_CDWN2_COEFF_OFFSITE_H_2        0x124
+#define CDM_CDWN2_COEFF_COSITE_V           0x128
+#define CDM_CDWN2_COEFF_OFFSITE_V          0x12C
+#define CDM_CDWN2_OUT_SIZE                 0x130
+
+#define CDM_HDMI_PACK_OP_MODE              0x200
+#define CDM_CSC_10_MATRIX_COEFF_0          0x004
+
+/**
+ * Horizontal coefficients for cosite chroma downscale
+ * s13 representation of coefficients
+ */
+static u32 cosite_h_coeff[] = {0x00000016, 0x000001cc, 0x0100009e};
+
+/**
+ * Horizontal coefficients for offsite chroma downscale
+ */
+static u32 offsite_h_coeff[] = {0x000b0005, 0x01db01eb, 0x00e40046};
+
+/**
+ * Vertical coefficients for cosite chroma downscale
+ */
+static u32 cosite_v_coeff[] = {0x00080004};
+/**
+ * Vertical coefficients for offsite chroma downscale
+ */
+static u32 offsite_v_coeff[] = {0x00060002};
+
+/* Limited Range rgb2yuv coeff with clamp and bias values for CSC 10 module */
+static struct sde_csc_cfg rgb2yuv_cfg = {
+	{
+		0x0083, 0x0102, 0x0032,
+		0x1fb5, 0x1f6c, 0x00e1,
+		0x00e1, 0x1f45, 0x1fdc
+	},
+	{ 0x00, 0x00, 0x00 },
+	{ 0x0040, 0x0200, 0x0200 },
+	{ 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff },
+	{ 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 },
+};
+
+static struct sde_cdm_cfg *_cdm_offset(enum sde_cdm cdm,
+		struct sde_mdss_cfg *m,
+		void __iomem *addr,
+		struct sde_hw_blk_reg_map *b)
+{
+	int i;
+
+	for (i = 0; i < m->cdm_count; i++) {
+		if (cdm == m->cdm[i].id) {
+			b->base_off = addr;
+			b->blk_off = m->cdm[i].base;
+			b->length = m->cdm[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = SDE_DBG_MASK_CDM;
+			return &m->cdm[i];
+		}
+	}
+
+	return ERR_PTR(-EINVAL);
+}
+
+static int sde_hw_cdm_setup_csc_10bit(struct sde_hw_cdm *ctx,
+		struct sde_csc_cfg *data)
+{
+	sde_hw_csc_setup(&ctx->hw, CDM_CSC_10_MATRIX_COEFF_0, data, true);
+
+	return 0;
+}
+
+static int sde_hw_cdm_setup_cdwn(struct sde_hw_cdm *ctx,
+		struct sde_hw_cdm_cfg *cfg)
+{
+	struct sde_hw_blk_reg_map *c = &ctx->hw;
+	u32 opmode = 0;
+	u32 out_size = 0;
+
+	if (cfg->output_bit_depth == CDM_CDWN_OUTPUT_10BIT)
+		opmode &= ~BIT(7);
+	else
+		opmode |= BIT(7);
+
+	/* ENABLE DWNS_H bit */
+	opmode |= BIT(1);
+
+	switch (cfg->h_cdwn_type) {
+	case CDM_CDWN_DISABLE:
+		/* CLEAR METHOD_H field */
+		opmode &= ~(0x18);
+		/* CLEAR DWNS_H bit */
+		opmode &= ~BIT(1);
+		break;
+	case CDM_CDWN_PIXEL_DROP:
+		/* Clear METHOD_H field (pixel drop is 0) */
+		opmode &= ~(0x18);
+		break;
+	case CDM_CDWN_AVG:
+		/* Clear METHOD_H field (Average is 0x1) */
+		opmode &= ~(0x18);
+		opmode |= (0x1 << 0x3);
+		break;
+	case CDM_CDWN_COSITE:
+		/* Clear METHOD_H field (Average is 0x2) */
+		opmode &= ~(0x18);
+		opmode |= (0x2 << 0x3);
+		/* Co-site horizontal coefficients */
+		SDE_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_0,
+				cosite_h_coeff[0]);
+		SDE_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_1,
+				cosite_h_coeff[1]);
+		SDE_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_2,
+				cosite_h_coeff[2]);
+		break;
+	case CDM_CDWN_OFFSITE:
+		/* Clear METHOD_H field (Average is 0x3) */
+		opmode &= ~(0x18);
+		opmode |= (0x3 << 0x3);
+
+		/* Off-site horizontal coefficients */
+		SDE_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_0,
+				offsite_h_coeff[0]);
+		SDE_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_1,
+				offsite_h_coeff[1]);
+		SDE_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_2,
+				offsite_h_coeff[2]);
+		break;
+	default:
+		pr_err("%s invalid horz down sampling type\n", __func__);
+		return -EINVAL;
+	}
+
+	/* ENABLE DWNS_V bit */
+	opmode |= BIT(2);
+
+	switch (cfg->v_cdwn_type) {
+	case CDM_CDWN_DISABLE:
+		/* CLEAR METHOD_V field */
+		opmode &= ~(0x60);
+		/* CLEAR DWNS_V bit */
+		opmode &= ~BIT(2);
+		break;
+	case CDM_CDWN_PIXEL_DROP:
+		/* Clear METHOD_V field (pixel drop is 0) */
+		opmode &= ~(0x60);
+		break;
+	case CDM_CDWN_AVG:
+		/* Clear METHOD_V field (Average is 0x1) */
+		opmode &= ~(0x60);
+		opmode |= (0x1 << 0x5);
+		break;
+	case CDM_CDWN_COSITE:
+		/* Clear METHOD_V field (Average is 0x2) */
+		opmode &= ~(0x60);
+		opmode |= (0x2 << 0x5);
+		/* Co-site vertical coefficients */
+		SDE_REG_WRITE(c,
+				CDM_CDWN2_COEFF_COSITE_V,
+				cosite_v_coeff[0]);
+		break;
+	case CDM_CDWN_OFFSITE:
+		/* Clear METHOD_V field (Average is 0x3) */
+		opmode &= ~(0x60);
+		opmode |= (0x3 << 0x5);
+
+		/* Off-site vertical coefficients */
+		SDE_REG_WRITE(c,
+				CDM_CDWN2_COEFF_OFFSITE_V,
+				offsite_v_coeff[0]);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (cfg->v_cdwn_type || cfg->h_cdwn_type)
+		opmode |= BIT(0); /* EN CDWN module */
+	else
+		opmode &= ~BIT(0);
+
+	out_size = (cfg->output_width & 0xFFFF) |
+		((cfg->output_height & 0xFFFF) << 16);
+	SDE_REG_WRITE(c, CDM_CDWN2_OUT_SIZE, out_size);
+	SDE_REG_WRITE(c, CDM_CDWN2_OP_MODE, opmode);
+	SDE_REG_WRITE(c, CDM_CDWN2_CLAMP_OUT,
+			((0x3FF << 16) | 0x0));
+
+	return 0;
+}
+
+int sde_hw_cdm_enable(struct sde_hw_cdm *ctx,
+		struct sde_hw_cdm_cfg *cdm)
+{
+	struct sde_hw_blk_reg_map *c = &ctx->hw;
+	const struct sde_format *fmt = cdm->output_fmt;
+	struct cdm_output_cfg cdm_cfg = { 0 };
+	u32 opmode = 0;
+	u32 csc = 0;
+
+	if (!SDE_FORMAT_IS_YUV(fmt))
+		return -EINVAL;
+
+	if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) {
+		if (fmt->chroma_sample == SDE_CHROMA_H1V2)
+			return -EINVAL; /*unsupported format */
+		opmode = BIT(0);
+		opmode |= (fmt->chroma_sample << 1);
+		cdm_cfg.intf_en = true;
+	} else {
+		opmode = 0;
+		cdm_cfg.wb_en = true;
+	}
+
+	csc |= BIT(2);
+	csc &= ~BIT(1);
+	csc |= BIT(0);
+
+	if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
+		ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
+
+	SDE_REG_WRITE(c, CDM_CSC_10_OPMODE, csc);
+	SDE_REG_WRITE(c, CDM_HDMI_PACK_OP_MODE, opmode);
+	return 0;
+}
+
+void sde_hw_cdm_disable(struct sde_hw_cdm *ctx)
+{
+	struct cdm_output_cfg cdm_cfg = { 0 };
+
+	if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
+		ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
+}
+
+static void _setup_cdm_ops(struct sde_hw_cdm_ops *ops,
+	unsigned long features)
+{
+	ops->setup_csc_data = sde_hw_cdm_setup_csc_10bit;
+	ops->setup_cdwn = sde_hw_cdm_setup_cdwn;
+	ops->enable = sde_hw_cdm_enable;
+	ops->disable = sde_hw_cdm_disable;
+}
+
+struct sde_hw_cdm *sde_hw_cdm_init(enum sde_cdm idx,
+		void __iomem *addr,
+		struct sde_mdss_cfg *m,
+		struct sde_hw_mdp *hw_mdp)
+{
+	struct sde_hw_cdm *c;
+	struct sde_cdm_cfg *cfg;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _cdm_offset(idx, m, addr, &c->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(c);
+		return ERR_PTR(-EINVAL);
+	}
+
+	c->idx = idx;
+	c->cdm_hw_cap = cfg;
+	_setup_cdm_ops(&c->ops, c->cdm_hw_cap->features);
+	c->hw_mdp = hw_mdp;
+
+	/*
+	 * Perform any default initialization for the chroma down module
+	 * @setup default csc coefficients
+	 */
+	sde_hw_cdm_setup_csc_10bit(c, &rgb2yuv_cfg);
+
+	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
+			c->hw.blk_off + c->hw.length, c->hw.xin_id);
+
+	return c;
+}
+
+void sde_hw_cdm_destroy(struct sde_hw_cdm *cdm)
+{
+	kfree(cdm);
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_cdm.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_cdm.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_cdm.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_cdm.h	2019-01-22 16:16:23.515246515 +0100
@@ -0,0 +1,128 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_CDM_H
+#define _SDE_HW_CDM_H
+
+#include "sde_hw_mdss.h"
+#include "sde_hw_top.h"
+
+struct sde_hw_cdm;
+
+struct sde_hw_cdm_cfg {
+	u32 output_width;
+	u32 output_height;
+	u32 output_bit_depth;
+	u32 h_cdwn_type;
+	u32 v_cdwn_type;
+	const struct sde_format *output_fmt;
+	u32 output_type;
+	int flags;
+};
+
+enum sde_hw_cdwn_type {
+	CDM_CDWN_DISABLE,
+	CDM_CDWN_PIXEL_DROP,
+	CDM_CDWN_AVG,
+	CDM_CDWN_COSITE,
+	CDM_CDWN_OFFSITE,
+};
+
+enum sde_hw_cdwn_output_type {
+	CDM_CDWN_OUTPUT_HDMI,
+	CDM_CDWN_OUTPUT_WB,
+};
+
+enum sde_hw_cdwn_output_bit_depth {
+	CDM_CDWN_OUTPUT_8BIT,
+	CDM_CDWN_OUTPUT_10BIT,
+};
+
+/**
+ * struct sde_hw_cdm_ops : Interface to the chroma down Hw driver functions
+ *                         Assumption is these functions will be called after
+ *                         clocks are enabled
+ *  @setup_csc:            Programs the csc matrix
+ *  @setup_cdwn:           Sets up the chroma down sub module
+ *  @enable:               Enables the output to interface and programs the
+ *                         output packer
+ *  @disable:              Puts the cdm in bypass mode
+ */
+struct sde_hw_cdm_ops {
+	/**
+	 * Programs the CSC matrix for conversion from RGB space to YUV space,
+	 * it is optional to call this function as this matrix is automatically
+	 * set during initialization, user should call this if it wants
+	 * to program a different matrix than default matrix.
+	 * @cdm:          Pointer to the chroma down context structure
+	 * @data          Pointer to CSC configuration data
+	 * return:        0 if success; error code otherwise
+	 */
+	int (*setup_csc_data)(struct sde_hw_cdm *cdm,
+			struct sde_csc_cfg *data);
+
+	/**
+	 * Programs the Chroma downsample part.
+	 * @cdm         Pointer to chroma down context
+	 */
+	int (*setup_cdwn)(struct sde_hw_cdm *cdm,
+	struct sde_hw_cdm_cfg *cfg);
+
+	/**
+	 * Enable the CDM module
+	 * @cdm         Pointer to chroma down context
+	 */
+	int (*enable)(struct sde_hw_cdm *cdm,
+	struct sde_hw_cdm_cfg *cfg);
+
+	/**
+	 * Disable the CDM module
+	 * @cdm         Pointer to chroma down context
+	 */
+	void (*disable)(struct sde_hw_cdm *cdm);
+};
+
+struct sde_hw_cdm {
+	/* base */
+	struct sde_hw_blk_reg_map hw;
+
+	/* chroma down */
+	const struct sde_cdm_cfg   *cdm_hw_cap;
+	enum  sde_cdm  idx;
+
+	/* mdp top hw driver */
+	struct sde_hw_mdp *hw_mdp;
+
+	/* ops */
+	struct sde_hw_cdm_ops ops;
+};
+
+/**
+ * sde_hw_cdm_init - initializes the cdm hw driver object.
+ * should be called once before accessing every cdm.
+ * @idx:  cdm index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m :   pointer to mdss catalog data
+ * @hw_mdp:  pointer to mdp top hw driver object
+ */
+struct sde_hw_cdm *sde_hw_cdm_init(enum sde_cdm idx,
+		void __iomem *addr,
+		struct sde_mdss_cfg *m,
+		struct sde_hw_mdp *hw_mdp);
+
+/**
+ * sde_hw_cdm_destroy - destroys CDM driver context
+ * @cdm:   pointer to CDM driver context
+ */
+void sde_hw_cdm_destroy(struct sde_hw_cdm *cdm);
+
+#endif /*_SDE_HW_CDM_H */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_color_processing.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_color_processing.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_color_processing.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_color_processing.h	2019-01-22 16:16:23.515246515 +0100
@@ -0,0 +1,18 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_COLOR_PROCESSING_H
+#define _SDE_HW_COLOR_PROCESSING_H
+
+#include "sde_hw_color_processing_v1_7.h"
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_color_processing_v1_7.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_color_processing_v1_7.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c	2019-10-29 09:26:23.641203159 +0100
@@ -0,0 +1,453 @@
+/* Copyright (c) 2016,2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/msm_drm_pp.h>
+#include "sde_hw_color_processing_v1_7.h"
+
+#define PA_HUE_VIG_OFF		0x110
+#define PA_SAT_VIG_OFF		0x114
+#define PA_VAL_VIG_OFF		0x118
+#define PA_CONT_VIG_OFF		0x11C
+
+#define PA_HUE_DSPP_OFF		0x238
+#define PA_SAT_DSPP_OFF		0x23C
+#define PA_VAL_DSPP_OFF		0x240
+#define PA_CONT_DSPP_OFF	0x244
+
+#define PA_LUTV_DSPP_OFF	0x1400
+#define PA_LUT_SWAP_OFF		0x234
+
+#define PA_HUE_MASK		0xFFF
+#define PA_SAT_MASK		0xFFFF
+#define PA_VAL_MASK		0xFF
+#define PA_CONT_MASK		0xFF
+
+#define MEMCOL_PWL0_OFF		0x88
+#define MEMCOL_PWL0_MASK	0xFFFF07FF
+#define MEMCOL_PWL1_OFF		0x8C
+#define MEMCOL_PWL1_MASK	0xFFFFFFFF
+#define MEMCOL_HUE_REGION_OFF	0x90
+#define MEMCOL_HUE_REGION_MASK	0x7FF07FF
+#define MEMCOL_SAT_REGION_OFF	0x94
+#define MEMCOL_SAT_REGION_MASK	0xFFFFFF
+#define MEMCOL_VAL_REGION_OFF	0x98
+#define MEMCOL_VAL_REGION_MASK	0xFFFFFF
+#define MEMCOL_P0_LEN		0x14
+#define MEMCOL_P1_LEN		0x8
+#define MEMCOL_PWL2_OFF		0x218
+#define MEMCOL_PWL2_MASK	0xFFFFFFFF
+#define MEMCOL_BLEND_GAIN_OFF	0x21C
+#define MEMCOL_PWL_HOLD_OFF	0x214
+
+#define VIG_OP_PA_EN		BIT(4)
+#define VIG_OP_PA_SKIN_EN	BIT(5)
+#define VIG_OP_PA_FOL_EN	BIT(6)
+#define VIG_OP_PA_SKY_EN	BIT(7)
+#define VIG_OP_PA_HUE_EN	BIT(25)
+#define VIG_OP_PA_SAT_EN	BIT(26)
+#define VIG_OP_PA_VAL_EN	BIT(27)
+#define VIG_OP_PA_CONT_EN	BIT(28)
+
+#define DSPP_OP_SZ_VAL_EN	BIT(31)
+#define DSPP_OP_SZ_SAT_EN	BIT(30)
+#define DSPP_OP_SZ_HUE_EN	BIT(29)
+#define DSPP_OP_PA_HUE_EN	BIT(25)
+#define DSPP_OP_PA_SAT_EN	BIT(26)
+#define DSPP_OP_PA_VAL_EN	BIT(27)
+#define DSPP_OP_PA_CONT_EN	BIT(28)
+#define DSPP_OP_PA_EN		BIT(20)
+#define DSPP_OP_PA_LUTV_EN	BIT(19)
+#define DSPP_OP_PA_SKIN_EN	BIT(5)
+#define DSPP_OP_PA_FOL_EN	BIT(6)
+#define DSPP_OP_PA_SKY_EN	BIT(7)
+
+#define REG_MASK(n) ((BIT(n)) - 1)
+
+#define PA_VIG_DISABLE_REQUIRED(x) \
+			!((x) & (VIG_OP_PA_SKIN_EN | VIG_OP_PA_SKY_EN | \
+			VIG_OP_PA_FOL_EN | VIG_OP_PA_HUE_EN | \
+			VIG_OP_PA_SAT_EN | VIG_OP_PA_VAL_EN | \
+			VIG_OP_PA_CONT_EN))
+
+
+#define PA_DSPP_DISABLE_REQUIRED(x) \
+			!((x) & (DSPP_OP_PA_SKIN_EN | DSPP_OP_PA_SKY_EN | \
+			DSPP_OP_PA_FOL_EN | DSPP_OP_PA_HUE_EN | \
+			DSPP_OP_PA_SAT_EN | DSPP_OP_PA_VAL_EN | \
+			DSPP_OP_PA_CONT_EN | DSPP_OP_PA_LUTV_EN))
+
+#define DSPP_OP_PCC_ENABLE	BIT(0)
+#define PCC_OP_MODE_OFF		0
+#define PCC_CONST_COEFF_OFF	4
+#define PCC_R_COEFF_OFF		0x10
+#define PCC_G_COEFF_OFF		0x1C
+#define PCC_B_COEFF_OFF		0x28
+#define PCC_RG_COEFF_OFF	0x34
+#define PCC_RB_COEFF_OFF	0x40
+#define PCC_GB_COEFF_OFF	0x4C
+#define PCC_RGB_COEFF_OFF	0x58
+#define PCC_CONST_COEFF_MASK	0xFFFF
+#define PCC_COEFF_MASK		0x3FFFF
+
+#define SSPP	0
+#define DSPP	1
+
+static void __setup_pa_hue(struct sde_hw_blk_reg_map *hw,
+			const struct sde_pp_blk *blk, uint32_t hue,
+			int location)
+{
+	u32 base = blk->base;
+	u32 offset = (location == DSPP) ? PA_HUE_DSPP_OFF : PA_HUE_VIG_OFF;
+	u32 op_hue_en = (location == DSPP) ? DSPP_OP_PA_HUE_EN :
+					VIG_OP_PA_HUE_EN;
+	u32 op_pa_en = (location == DSPP) ? DSPP_OP_PA_EN : VIG_OP_PA_EN;
+	u32 disable_req;
+	u32 opmode;
+
+	SDE_REG_WRITE(hw, base + offset, hue & PA_HUE_MASK);
+
+	opmode = SDE_REG_READ(hw, base);
+
+	if (!hue) {
+		opmode &= ~op_hue_en;
+		disable_req = (location == DSPP) ?
+			PA_DSPP_DISABLE_REQUIRED(opmode) :
+			PA_VIG_DISABLE_REQUIRED(opmode);
+		if (disable_req)
+			opmode &= ~op_pa_en;
+	} else {
+		opmode |= op_hue_en | op_pa_en;
+	}
+
+	SDE_REG_WRITE(hw, base, opmode);
+}
+
+void sde_setup_pipe_pa_hue_v1_7(struct sde_hw_pipe *ctx, void *cfg)
+{
+	uint32_t hue = *((uint32_t *)cfg);
+
+	__setup_pa_hue(&ctx->hw, &ctx->cap->sblk->hsic_blk, hue, SSPP);
+}
+
+void sde_setup_dspp_pa_hue_v1_7(struct sde_hw_dspp *ctx, void *cfg)
+{
+	uint32_t hue = *((uint32_t *)cfg);
+
+	__setup_pa_hue(&ctx->hw, &ctx->cap->sblk->hsic, hue, DSPP);
+}
+
+static void __setup_pa_sat(struct sde_hw_blk_reg_map *hw,
+			const struct sde_pp_blk *blk, uint32_t sat,
+			int location)
+{
+	u32 base = blk->base;
+	u32 offset = (location == DSPP) ? PA_SAT_DSPP_OFF : PA_SAT_VIG_OFF;
+	u32 op_sat_en = (location == DSPP) ?
+			DSPP_OP_PA_SAT_EN : VIG_OP_PA_SAT_EN;
+	u32 op_pa_en = (location == DSPP) ? DSPP_OP_PA_EN : VIG_OP_PA_EN;
+	u32 disable_req;
+	u32 opmode;
+
+	SDE_REG_WRITE(hw, base + offset, sat & PA_SAT_MASK);
+
+	opmode = SDE_REG_READ(hw, base);
+
+	if (!sat) {
+		opmode &= ~op_sat_en;
+		disable_req = (location == DSPP) ?
+			PA_DSPP_DISABLE_REQUIRED(opmode) :
+			PA_VIG_DISABLE_REQUIRED(opmode);
+		if (disable_req)
+			opmode &= ~op_pa_en;
+	} else {
+		opmode |= op_sat_en | op_pa_en;
+	}
+
+	SDE_REG_WRITE(hw, base, opmode);
+}
+
+void sde_setup_pipe_pa_sat_v1_7(struct sde_hw_pipe *ctx, void *cfg)
+{
+	uint32_t sat = *((uint32_t *)cfg);
+
+	__setup_pa_sat(&ctx->hw, &ctx->cap->sblk->hsic_blk, sat, SSPP);
+}
+
+static void __setup_pa_val(struct sde_hw_blk_reg_map *hw,
+			const struct sde_pp_blk *blk, uint32_t value,
+			int location)
+{
+	u32 base = blk->base;
+	u32 offset = (location == DSPP) ? PA_VAL_DSPP_OFF : PA_VAL_VIG_OFF;
+	u32 op_val_en = (location == DSPP) ?
+			DSPP_OP_PA_VAL_EN : VIG_OP_PA_VAL_EN;
+	u32 op_pa_en = (location == DSPP) ? DSPP_OP_PA_EN : VIG_OP_PA_EN;
+	u32 disable_req;
+	u32 opmode;
+
+	SDE_REG_WRITE(hw, base + offset, value & PA_VAL_MASK);
+
+	opmode = SDE_REG_READ(hw, base);
+
+	if (!value) {
+		opmode &= ~op_val_en;
+		disable_req = (location == DSPP) ?
+			PA_DSPP_DISABLE_REQUIRED(opmode) :
+			PA_VIG_DISABLE_REQUIRED(opmode);
+		if (disable_req)
+			opmode &= ~op_pa_en;
+	} else {
+		opmode |= op_val_en | op_pa_en;
+	}
+
+	SDE_REG_WRITE(hw, base, opmode);
+}
+
+void sde_setup_pipe_pa_val_v1_7(struct sde_hw_pipe *ctx, void *cfg)
+{
+	uint32_t value = *((uint32_t *)cfg);
+
+	__setup_pa_val(&ctx->hw, &ctx->cap->sblk->hsic_blk, value, SSPP);
+}
+
+static void __setup_pa_cont(struct sde_hw_blk_reg_map *hw,
+			const struct sde_pp_blk *blk, uint32_t contrast,
+			int location)
+{
+	u32 base = blk->base;
+	u32 offset = (location == DSPP) ? PA_CONT_DSPP_OFF : PA_CONT_VIG_OFF;
+	u32 op_cont_en = (location == DSPP) ? DSPP_OP_PA_CONT_EN :
+					VIG_OP_PA_CONT_EN;
+	u32 op_pa_en = (location == DSPP) ? DSPP_OP_PA_EN : VIG_OP_PA_EN;
+	u32 disable_req;
+	u32 opmode;
+
+	SDE_REG_WRITE(hw, base + offset, contrast & PA_CONT_MASK);
+
+	opmode = SDE_REG_READ(hw, base);
+
+	if (!contrast) {
+		opmode &= ~op_cont_en;
+		disable_req = (location == DSPP) ?
+			PA_DSPP_DISABLE_REQUIRED(opmode) :
+			PA_VIG_DISABLE_REQUIRED(opmode);
+		if (disable_req)
+			opmode &= ~op_pa_en;
+	} else {
+		opmode |= op_cont_en | op_pa_en;
+	}
+
+	SDE_REG_WRITE(hw, base, opmode);
+}
+
+void sde_setup_pipe_pa_cont_v1_7(struct sde_hw_pipe *ctx, void *cfg)
+{
+	uint32_t contrast = *((uint32_t *)cfg);
+
+	__setup_pa_cont(&ctx->hw, &ctx->cap->sblk->hsic_blk, contrast, SSPP);
+}
+
+void sde_setup_pipe_pa_memcol_v1_7(struct sde_hw_pipe *ctx,
+				   enum sde_memcolor_type type,
+				   void *cfg)
+{
+	struct drm_msm_memcol *mc = cfg;
+	u32 base = ctx->cap->sblk->memcolor_blk.base;
+	u32 off, op, mc_en, hold = 0;
+	u32 mc_i = 0;
+
+	switch (type) {
+	case MEMCOLOR_SKIN:
+		mc_en = VIG_OP_PA_SKIN_EN;
+		mc_i = 0;
+		break;
+	case MEMCOLOR_SKY:
+		mc_en = VIG_OP_PA_SKY_EN;
+		mc_i = 1;
+		break;
+	case MEMCOLOR_FOLIAGE:
+		mc_en = VIG_OP_PA_FOL_EN;
+		mc_i = 2;
+		break;
+	default:
+		DRM_ERROR("Invalid memory color type %d\n", type);
+		return;
+	}
+
+	op = SDE_REG_READ(&ctx->hw, base);
+	if (!mc) {
+		op &= ~mc_en;
+		if (PA_VIG_DISABLE_REQUIRED(op))
+			op &= ~VIG_OP_PA_EN;
+		SDE_REG_WRITE(&ctx->hw, base, op);
+		return;
+	}
+
+	off = base + (mc_i * MEMCOL_P0_LEN);
+	SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_PWL0_OFF),
+		      mc->color_adjust_p0 & MEMCOL_PWL0_MASK);
+	SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_PWL1_OFF),
+		      mc->color_adjust_p1 & MEMCOL_PWL1_MASK);
+	SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_HUE_REGION_OFF),
+		      mc->hue_region & MEMCOL_HUE_REGION_MASK);
+	SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_SAT_REGION_OFF),
+		      mc->sat_region & MEMCOL_SAT_REGION_MASK);
+	SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_VAL_REGION_OFF),
+		      mc->val_region & MEMCOL_VAL_REGION_MASK);
+
+	off = base + (mc_i * MEMCOL_P1_LEN);
+	SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_PWL2_OFF),
+		      mc->color_adjust_p2 & MEMCOL_PWL2_MASK);
+	SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_BLEND_GAIN_OFF), mc->blend_gain);
+
+	hold = SDE_REG_READ(&ctx->hw, off + MEMCOL_PWL_HOLD_OFF);
+	hold &= ~(0xF << (mc_i * 4));
+	hold |= ((mc->sat_hold & 0x3) << (mc_i * 4));
+	hold |= ((mc->val_hold & 0x3) << ((mc_i * 4) + 2));
+	SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_PWL_HOLD_OFF), hold);
+
+	op |= VIG_OP_PA_EN | mc_en;
+	SDE_REG_WRITE(&ctx->hw, base, op);
+}
+
+void sde_setup_dspp_pcc_v1_7(struct sde_hw_dspp *ctx, void *cfg)
+{
+	struct sde_hw_cp_cfg *hw_cfg = cfg;
+	struct drm_msm_pcc *pcc;
+	void  __iomem *base;
+
+	if (!hw_cfg  || (hw_cfg->len != sizeof(*pcc)  && hw_cfg->payload)) {
+		DRM_ERROR("invalid params hw %pK payload %pK payloadsize %d \"\
+			  exp size %zd\n",
+			   hw_cfg, ((hw_cfg) ? hw_cfg->payload : NULL),
+			   ((hw_cfg) ? hw_cfg->len : 0), sizeof(*pcc));
+		return;
+	}
+	base = ctx->hw.base_off + ctx->cap->base;
+
+	/* Turn off feature */
+	if (!hw_cfg->payload) {
+		SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base,
+			      PCC_OP_MODE_OFF);
+		return;
+	}
+	DRM_DEBUG_DRIVER("Enable PCC feature\n");
+	pcc = hw_cfg->payload;
+
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_CONST_COEFF_OFF,
+				  pcc->r.c & PCC_CONST_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw,
+		      ctx->cap->sblk->pcc.base + PCC_CONST_COEFF_OFF + 4,
+		      pcc->g.c & PCC_CONST_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw,
+		      ctx->cap->sblk->pcc.base + PCC_CONST_COEFF_OFF + 8,
+		      pcc->b.c & PCC_CONST_COEFF_MASK);
+
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_R_COEFF_OFF,
+				  pcc->r.r & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_R_COEFF_OFF + 4,
+				  pcc->g.r & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_R_COEFF_OFF + 8,
+				  pcc->b.r & PCC_COEFF_MASK);
+
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_G_COEFF_OFF,
+				  pcc->r.g & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_G_COEFF_OFF + 4,
+				  pcc->g.g & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_G_COEFF_OFF + 8,
+				  pcc->b.g & PCC_COEFF_MASK);
+
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_B_COEFF_OFF,
+				  pcc->r.b & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_B_COEFF_OFF + 4,
+				  pcc->g.b & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_B_COEFF_OFF + 8,
+				  pcc->b.b & PCC_COEFF_MASK);
+
+
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RG_COEFF_OFF,
+				  pcc->r.rg & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RG_COEFF_OFF + 4,
+				  pcc->g.rg & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RG_COEFF_OFF + 8,
+				  pcc->b.rg & PCC_COEFF_MASK);
+
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RB_COEFF_OFF,
+				  pcc->r.rb & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RB_COEFF_OFF + 4,
+				  pcc->g.rb & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RB_COEFF_OFF + 8,
+				  pcc->b.rb & PCC_COEFF_MASK);
+
+
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_GB_COEFF_OFF,
+				  pcc->r.gb & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_GB_COEFF_OFF + 4,
+				  pcc->g.gb & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_GB_COEFF_OFF + 8,
+				  pcc->b.gb & PCC_COEFF_MASK);
+
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RGB_COEFF_OFF,
+				  pcc->r.rgb & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw,
+		      ctx->cap->sblk->pcc.base + PCC_RGB_COEFF_OFF + 4,
+		      pcc->g.rgb & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw,
+		      ctx->cap->sblk->pcc.base + PCC_RGB_COEFF_OFF + 8,
+		      pcc->b.rgb & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base, DSPP_OP_PCC_ENABLE);
+}
+
+void sde_setup_dspp_pa_vlut_v1_7(struct sde_hw_dspp *ctx, void *cfg)
+{
+	struct drm_msm_pa_vlut *payload = NULL;
+	struct sde_hw_cp_cfg *hw_cfg = cfg;
+	u32 base = ctx->cap->sblk->vlut.base;
+	u32 offset = base + PA_LUTV_DSPP_OFF;
+	u32 op_mode, tmp;
+	int i = 0, j = 0;
+
+	if (!hw_cfg || (hw_cfg->payload && hw_cfg->len !=
+			sizeof(struct drm_msm_pa_vlut))) {
+		DRM_ERROR("hw %pK payload %pK payloadsize %d exp size %zd\n",
+			  hw_cfg, ((hw_cfg) ? hw_cfg->payload : NULL),
+			  ((hw_cfg) ? hw_cfg->len : 0),
+			  sizeof(struct drm_msm_pa_vlut));
+		return;
+	}
+	op_mode = SDE_REG_READ(&ctx->hw, base);
+	if (!hw_cfg->payload) {
+		DRM_DEBUG_DRIVER("Disable vlut feature\n");
+		/**
+		 * In the PA_VLUT disable case, remove PA_VLUT enable bit(19)
+		 * first, then check whether any other PA sub-features are
+		 * enabled or not. If none of the sub-features are enabled,
+		 * remove the PA global enable bit(20).
+		 */
+		op_mode &= ~((u32)DSPP_OP_PA_LUTV_EN);
+		if (PA_DSPP_DISABLE_REQUIRED(op_mode))
+			op_mode &= ~((u32)DSPP_OP_PA_EN);
+		SDE_REG_WRITE(&ctx->hw, base, op_mode);
+		return;
+	}
+	payload = hw_cfg->payload;
+	DRM_DEBUG_DRIVER("Enable vlut feature flags %llx\n", payload->flags);
+	for (i = 0, j = 0; i < ARRAY_SIZE(payload->val); i += 2, j += 4) {
+		tmp = (payload->val[i] & REG_MASK(10)) |
+			((payload->val[i + 1] & REG_MASK(10)) << 16);
+		SDE_REG_WRITE(&ctx->hw, (offset + j),
+			     tmp);
+	}
+	SDE_REG_WRITE(&ctx->hw, (base + PA_LUT_SWAP_OFF), 1);
+	op_mode |= DSPP_OP_PA_EN | DSPP_OP_PA_LUTV_EN;
+	SDE_REG_WRITE(&ctx->hw, base, op_mode);
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_color_processing_v1_7.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_color_processing_v1_7.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h	2019-01-22 16:16:23.515246515 +0100
@@ -0,0 +1,78 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_COLOR_PROCESSING_V1_7_H
+#define _SDE_HW_COLOR_PROCESSING_V1_7_H
+
+#include "sde_hw_sspp.h"
+#include "sde_hw_dspp.h"
+
+/**
+ * sde_setup_pipe_pa_hue_v1_7 - setup SSPP hue feature in v1.7 hardware
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to hue data
+ */
+void sde_setup_pipe_pa_hue_v1_7(struct sde_hw_pipe *ctx, void *cfg);
+
+/**
+ * sde_setup_pipe_pa_sat_v1_7 - setup SSPP saturation feature in v1.7 hardware
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to saturation data
+ */
+void sde_setup_pipe_pa_sat_v1_7(struct sde_hw_pipe *ctx, void *cfg);
+
+/**
+ * sde_setup_pipe_pa_val_v1_7 - setup SSPP value feature in v1.7 hardware
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to value data
+ */
+void sde_setup_pipe_pa_val_v1_7(struct sde_hw_pipe *ctx, void *cfg);
+
+/**
+ * sde_setup_pipe_pa_cont_v1_7 - setup SSPP contrast feature in v1.7 hardware
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to contrast data
+ */
+void sde_setup_pipe_pa_cont_v1_7(struct sde_hw_pipe *ctx, void *cfg);
+
+/**
+ * sde_setup_pipe_pa_memcol_v1_7 - setup SSPP memory color in v1.7 hardware
+ * @ctx: Pointer to pipe context
+ * @type: Memory color type (Skin, sky, or foliage)
+ * @cfg: Pointer to memory color config data
+ */
+void sde_setup_pipe_pa_memcol_v1_7(struct sde_hw_pipe *ctx,
+				   enum sde_memcolor_type type,
+				   void *cfg);
+
+/**
+ * sde_setup_dspp_pcc_v1_7 - setup DSPP PCC veature in v1.7 hardware
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to PCC data
+ */
+void sde_setup_dspp_pcc_v1_7(struct sde_hw_dspp *ctx, void *cfg);
+
+/**
+ * sde_setup_dspp_pa_hue_v1_7 - setup DSPP hue feature in v1.7 hardware
+ * @ctx: Pointer to DSPP context
+ * @cfg: Pointer to hue data
+ */
+void sde_setup_dspp_pa_hue_v1_7(struct sde_hw_dspp *ctx, void *cfg);
+
+/**
+ * sde_setup_dspp_pa_vlut_v1_7 - setup DSPP PA vLUT feature in v1.7 hardware
+ * @ctx: Pointer to DSPP context
+ * @cfg: Pointer to vLUT data
+ */
+void sde_setup_dspp_pa_vlut_v1_7(struct sde_hw_dspp *ctx, void *cfg);
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_ctl.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_ctl.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_ctl.c	2019-10-29 09:26:23.641203159 +0100
@@ -0,0 +1,516 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include "sde_hwio.h"
+#include "sde_hw_ctl.h"
+#include "sde_dbg.h"
+
+#define   CTL_LAYER(lm)                 \
+	(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
+#define   CTL_LAYER_EXT(lm)             \
+	(0x40 + (((lm) - LM_0) * 0x004))
+#define   CTL_LAYER_EXT2(lm)             \
+	(0x70 + (((lm) - LM_0) * 0x004))
+#define   CTL_LAYER_EXT3(lm)             \
+	(0xA0 + (((lm) - LM_0) * 0x004))
+
+#define   CTL_TOP                       0x014
+#define   CTL_FLUSH                     0x018
+#define   CTL_START                     0x01C
+#define   CTL_SW_RESET                  0x030
+#define   CTL_LAYER_EXTN_OFFSET         0x40
+
+#define SDE_REG_RESET_TIMEOUT_COUNT    20
+
+static struct sde_ctl_cfg *_ctl_offset(enum sde_ctl ctl,
+		struct sde_mdss_cfg *m,
+		void __iomem *addr,
+		struct sde_hw_blk_reg_map *b)
+{
+	int i;
+
+	for (i = 0; i < m->ctl_count; i++) {
+		if (ctl == m->ctl[i].id) {
+			b->base_off = addr;
+			b->blk_off = m->ctl[i].base;
+			b->length = m->ctl[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = SDE_DBG_MASK_CTL;
+			return &m->ctl[i];
+		}
+	}
+	return ERR_PTR(-ENOMEM);
+}
+
+static int _mixer_stages(const struct sde_lm_cfg *mixer, int count,
+		enum sde_lm lm)
+{
+	int i;
+	int stages = -EINVAL;
+
+	for (i = 0; i < count; i++) {
+		if (lm == mixer[i].id) {
+			stages = mixer[i].sblk->maxblendstages;
+			break;
+		}
+	}
+
+	return stages;
+}
+
+static inline void sde_hw_ctl_trigger_start(struct sde_hw_ctl *ctx)
+{
+	SDE_REG_WRITE(&ctx->hw, CTL_START, 0x1);
+}
+
+static inline void sde_hw_ctl_clear_pending_flush(struct sde_hw_ctl *ctx)
+{
+	ctx->pending_flush_mask = 0x0;
+}
+
+static inline void sde_hw_ctl_update_pending_flush(struct sde_hw_ctl *ctx,
+		u32 flushbits)
+{
+	ctx->pending_flush_mask |= flushbits;
+}
+
+static u32 sde_hw_ctl_get_pending_flush(struct sde_hw_ctl *ctx)
+{
+	if (!ctx)
+		return 0x0;
+
+	return ctx->pending_flush_mask;
+}
+
+static inline void sde_hw_ctl_trigger_flush(struct sde_hw_ctl *ctx)
+{
+	SDE_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
+}
+
+static inline u32 sde_hw_ctl_get_flush_register(struct sde_hw_ctl *ctx)
+{
+	struct sde_hw_blk_reg_map *c = &ctx->hw;
+
+	return SDE_REG_READ(c, CTL_FLUSH);
+}
+
+static inline uint32_t sde_hw_ctl_get_bitmask_sspp(struct sde_hw_ctl *ctx,
+	enum sde_sspp sspp)
+{
+	uint32_t flushbits = 0;
+
+	switch (sspp) {
+	case SSPP_VIG0:
+		flushbits =  BIT(0);
+		break;
+	case SSPP_VIG1:
+		flushbits = BIT(1);
+		break;
+	case SSPP_VIG2:
+		flushbits = BIT(2);
+		break;
+	case SSPP_VIG3:
+		flushbits = BIT(18);
+		break;
+	case SSPP_RGB0:
+		flushbits = BIT(3);
+		break;
+	case SSPP_RGB1:
+		flushbits = BIT(4);
+		break;
+	case SSPP_RGB2:
+		flushbits = BIT(5);
+		break;
+	case SSPP_RGB3:
+		flushbits = BIT(19);
+		break;
+	case SSPP_DMA0:
+		flushbits = BIT(11);
+		break;
+	case SSPP_DMA1:
+		flushbits = BIT(12);
+		break;
+	case SSPP_DMA2:
+		flushbits = BIT(24);
+		break;
+	case SSPP_DMA3:
+		flushbits = BIT(25);
+		break;
+	case SSPP_CURSOR0:
+		flushbits = BIT(22);
+		break;
+	case SSPP_CURSOR1:
+		flushbits = BIT(23);
+		break;
+	default:
+		break;
+	}
+
+	return flushbits;
+}
+
+static inline uint32_t sde_hw_ctl_get_bitmask_mixer(struct sde_hw_ctl *ctx,
+	enum sde_lm lm)
+{
+	uint32_t flushbits = 0;
+
+	switch (lm) {
+	case LM_0:
+		flushbits = BIT(6);
+		break;
+	case LM_1:
+		flushbits = BIT(7);
+		break;
+	case LM_2:
+		flushbits = BIT(8);
+		break;
+	case LM_3:
+		flushbits = BIT(9);
+		break;
+	case LM_4:
+		flushbits = BIT(10);
+		break;
+	case LM_5:
+		flushbits = BIT(20);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	flushbits |= BIT(17); /* CTL */
+
+	return flushbits;
+}
+
+static inline int sde_hw_ctl_get_bitmask_dspp(struct sde_hw_ctl *ctx,
+		u32 *flushbits, enum sde_dspp dspp)
+{
+	switch (dspp) {
+	case DSPP_0:
+		*flushbits |= BIT(13);
+		break;
+	case DSPP_1:
+		*flushbits |= BIT(14);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static inline int sde_hw_ctl_get_bitmask_intf(struct sde_hw_ctl *ctx,
+		u32 *flushbits, enum sde_intf intf)
+{
+	switch (intf) {
+	case INTF_0:
+		*flushbits |= BIT(31);
+		break;
+	case INTF_1:
+		*flushbits |= BIT(30);
+		break;
+	case INTF_2:
+		*flushbits |= BIT(29);
+		break;
+	case INTF_3:
+		*flushbits |= BIT(28);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static inline int sde_hw_ctl_get_bitmask_wb(struct sde_hw_ctl *ctx,
+		u32 *flushbits, enum sde_wb wb)
+{
+	switch (wb) {
+	case WB_0:
+	case WB_1:
+	case WB_2:
+		*flushbits |= BIT(16);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static inline int sde_hw_ctl_get_bitmask_cdm(struct sde_hw_ctl *ctx,
+		u32 *flushbits, enum sde_cdm cdm)
+{
+	switch (cdm) {
+	case CDM_0:
+		*flushbits |= BIT(26);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static u32 sde_hw_ctl_poll_reset_status(struct sde_hw_ctl *ctx, u32 count)
+{
+	struct sde_hw_blk_reg_map *c = &ctx->hw;
+	u32 status;
+
+	/* protect to do at least one iteration */
+	if (!count)
+		count = 1;
+
+	/*
+	 * it takes around 30us to have mdp finish resetting its ctl path
+	 * poll every 50us so that reset should be completed at 1st poll
+	 */
+	do {
+		status = SDE_REG_READ(c, CTL_SW_RESET);
+		status &= 0x01;
+		if (status)
+			usleep_range(20, 50);
+	} while (status && --count > 0);
+
+	return status;
+}
+
+static int sde_hw_ctl_reset_control(struct sde_hw_ctl *ctx)
+{
+	struct sde_hw_blk_reg_map *c = &ctx->hw;
+
+	pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
+	SDE_REG_WRITE(c, CTL_SW_RESET, 0x1);
+	if (sde_hw_ctl_poll_reset_status(ctx, SDE_REG_RESET_TIMEOUT_COUNT))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int sde_hw_ctl_wait_reset_status(struct sde_hw_ctl *ctx)
+{
+	struct sde_hw_blk_reg_map *c = &ctx->hw;
+	u32 status;
+
+	status = SDE_REG_READ(c, CTL_SW_RESET);
+	status &= 0x01;
+	if (!status)
+		return 0;
+
+	pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
+	if (sde_hw_ctl_poll_reset_status(ctx, SDE_REG_RESET_TIMEOUT_COUNT)) {
+		pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void sde_hw_ctl_clear_all_blendstages(struct sde_hw_ctl *ctx)
+{
+	struct sde_hw_blk_reg_map *c = &ctx->hw;
+	int i;
+
+	for (i = 0; i < ctx->mixer_count; i++) {
+		int mixer_id = ctx->mixer_hw_caps[i].id;
+
+		SDE_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
+		SDE_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
+		SDE_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
+		SDE_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
+	}
+}
+
+static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
+	enum sde_lm lm, struct sde_hw_stage_cfg *stage_cfg, u32 index)
+{
+	struct sde_hw_blk_reg_map *c = &ctx->hw;
+	u32 mixercfg, mixercfg_ext, mix, ext, mixercfg_ext2;
+	int i, j;
+	u8 stages;
+	int pipes_per_stage;
+
+	if (index >= CRTC_DUAL_MIXERS)
+		return;
+
+	stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
+	if (stages < 0)
+		return;
+
+	if (test_bit(SDE_MIXER_SOURCESPLIT,
+		&ctx->mixer_hw_caps->features))
+		pipes_per_stage = PIPES_PER_STAGE;
+	else
+		pipes_per_stage = 1;
+
+	mixercfg = BIT(24); /* always set BORDER_OUT */
+	mixercfg_ext = 0;
+	mixercfg_ext2 = 0;
+
+	for (i = 0; i <= stages; i++) {
+		/* overflow to ext register if 'i + 1 > 7' */
+		mix = (i + 1) & 0x7;
+		ext = i >= 7;
+
+		for (j = 0 ; j < pipes_per_stage; j++) {
+			switch (stage_cfg->stage[index][i][j]) {
+			case SSPP_VIG0:
+				mixercfg |= mix << 0;
+				mixercfg_ext |= ext << 0;
+				break;
+			case SSPP_VIG1:
+				mixercfg |= mix << 3;
+				mixercfg_ext |= ext << 2;
+				break;
+			case SSPP_VIG2:
+				mixercfg |= mix << 6;
+				mixercfg_ext |= ext << 4;
+				break;
+			case SSPP_VIG3:
+				mixercfg |= mix << 26;
+				mixercfg_ext |= ext << 6;
+				break;
+			case SSPP_RGB0:
+				mixercfg |= mix << 9;
+				mixercfg_ext |= ext << 8;
+				break;
+			case SSPP_RGB1:
+				mixercfg |= mix << 12;
+				mixercfg_ext |= ext << 10;
+				break;
+			case SSPP_RGB2:
+				mixercfg |= mix << 15;
+				mixercfg_ext |= ext << 12;
+				break;
+			case SSPP_RGB3:
+				mixercfg |= mix << 29;
+				mixercfg_ext |= ext << 14;
+				break;
+			case SSPP_DMA0:
+				mixercfg |= mix << 18;
+				mixercfg_ext |= ext << 16;
+				break;
+			case SSPP_DMA1:
+				mixercfg |= mix << 21;
+				mixercfg_ext |= ext << 18;
+				break;
+			case SSPP_DMA2:
+				mix = (i + 1) & 0xf;
+				mixercfg_ext2 |= mix << 0;
+				break;
+			case SSPP_DMA3:
+				mix = (i + 1) & 0xf;
+				mixercfg_ext2 |= mix << 4;
+				break;
+			case SSPP_CURSOR0:
+				mixercfg_ext |= ((i + 1) & 0xF) << 20;
+				break;
+			case SSPP_CURSOR1:
+				mixercfg_ext |= ((i + 1) & 0xF) << 26;
+				break;
+			default:
+				break;
+			}
+		}
+	}
+
+	SDE_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
+	SDE_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
+	SDE_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
+}
+
+static void sde_hw_ctl_intf_cfg(struct sde_hw_ctl *ctx,
+		struct sde_hw_intf_cfg *cfg)
+{
+	struct sde_hw_blk_reg_map *c = &ctx->hw;
+	u32 intf_cfg = 0;
+
+	intf_cfg |= (cfg->intf & 0xF) << 4;
+
+	if (cfg->wb)
+		intf_cfg |= (cfg->wb & 0x3) + 2;
+
+	if (cfg->mode_3d) {
+		intf_cfg |= BIT(19);
+		intf_cfg |= (cfg->mode_3d - 0x1) << 20;
+	}
+
+	switch (cfg->intf_mode_sel) {
+	case SDE_CTL_MODE_SEL_VID:
+		intf_cfg &= ~BIT(17);
+		intf_cfg &= ~(0x3 << 15);
+		break;
+	case SDE_CTL_MODE_SEL_CMD:
+		intf_cfg |= BIT(17);
+		intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
+		break;
+	default:
+		pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
+		return;
+	}
+
+	SDE_REG_WRITE(c, CTL_TOP, intf_cfg);
+}
+
+static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops,
+		unsigned long cap)
+{
+	ops->clear_pending_flush = sde_hw_ctl_clear_pending_flush;
+	ops->update_pending_flush = sde_hw_ctl_update_pending_flush;
+	ops->get_pending_flush = sde_hw_ctl_get_pending_flush;
+	ops->trigger_flush = sde_hw_ctl_trigger_flush;
+	ops->get_flush_register = sde_hw_ctl_get_flush_register;
+	ops->trigger_start = sde_hw_ctl_trigger_start;
+	ops->setup_intf_cfg = sde_hw_ctl_intf_cfg;
+	ops->reset = sde_hw_ctl_reset_control;
+	ops->wait_reset_status = sde_hw_ctl_wait_reset_status;
+	ops->clear_all_blendstages = sde_hw_ctl_clear_all_blendstages;
+	ops->setup_blendstage = sde_hw_ctl_setup_blendstage;
+	ops->get_bitmask_sspp = sde_hw_ctl_get_bitmask_sspp;
+	ops->get_bitmask_mixer = sde_hw_ctl_get_bitmask_mixer;
+	ops->get_bitmask_dspp = sde_hw_ctl_get_bitmask_dspp;
+	ops->get_bitmask_intf = sde_hw_ctl_get_bitmask_intf;
+	ops->get_bitmask_cdm = sde_hw_ctl_get_bitmask_cdm;
+	ops->get_bitmask_wb = sde_hw_ctl_get_bitmask_wb;
+};
+
+struct sde_hw_ctl *sde_hw_ctl_init(enum sde_ctl idx,
+		void __iomem *addr,
+		struct sde_mdss_cfg *m)
+{
+	struct sde_hw_ctl *c;
+	struct sde_ctl_cfg *cfg;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _ctl_offset(idx, m, addr, &c->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(c);
+		pr_err("failed to create sde_hw_ctl %d\n", idx);
+		return ERR_PTR(-EINVAL);
+	}
+
+	c->caps = cfg;
+	_setup_ctl_ops(&c->ops, c->caps->features);
+	c->idx = idx;
+	c->mixer_count = m->mixer_count;
+	c->mixer_hw_caps = m->mixer;
+
+	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
+			c->hw.blk_off + c->hw.length, c->hw.xin_id);
+
+	return c;
+}
+
+void sde_hw_ctl_destroy(struct sde_hw_ctl *ctx)
+{
+	kfree(ctx);
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_ctl.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_ctl.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_ctl.h	2019-01-22 16:16:23.515246515 +0100
@@ -0,0 +1,204 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_CTL_H
+#define _SDE_HW_CTL_H
+
+#include "sde_hw_mdss.h"
+#include "sde_hw_util.h"
+#include "sde_hw_catalog.h"
+
+/**
+ * sde_ctl_mode_sel: Interface mode selection
+ * SDE_CTL_MODE_SEL_VID:    Video mode interface
+ * SDE_CTL_MODE_SEL_CMD:    Command mode interface
+ */
+enum sde_ctl_mode_sel {
+	SDE_CTL_MODE_SEL_VID = 0,
+	SDE_CTL_MODE_SEL_CMD
+};
+
+struct sde_hw_ctl;
+/**
+ * struct sde_hw_stage_cfg - blending stage cfg
+ * @stage
+ */
+struct sde_hw_stage_cfg {
+	enum sde_sspp stage[CRTC_DUAL_MIXERS][SDE_STAGE_MAX][PIPES_PER_STAGE];
+};
+
+/**
+ * struct sde_hw_intf_cfg :Describes how the SDE writes data to output interface
+ * @intf :                 Interface id
+ * @wb:                    Writeback id
+ * @mode_3d:               3d mux configuration
+ * @intf_mode_sel:         Interface mode, cmd / vid
+ * @stream_sel:            Stream selection for multi-stream interfaces
+ */
+struct sde_hw_intf_cfg {
+	enum sde_intf intf;
+	enum sde_wb wb;
+	enum sde_3d_blend_mode mode_3d;
+	enum sde_ctl_mode_sel intf_mode_sel;
+	int stream_sel;
+};
+
+/**
+ * struct sde_hw_ctl_ops - Interface to the wb Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct sde_hw_ctl_ops {
+	/**
+	 * kickoff hw operation for Sw controlled interfaces
+	 * DSI cmd mode and WB interface are SW controlled
+	 * @ctx       : ctl path ctx pointer
+	 */
+	void (*trigger_start)(struct sde_hw_ctl *ctx);
+
+	/**
+	 * Clear the value of the cached pending_flush_mask
+	 * No effect on hardware
+	 * @ctx       : ctl path ctx pointer
+	 */
+	void (*clear_pending_flush)(struct sde_hw_ctl *ctx);
+
+	/**
+	 * Query the value of the cached pending_flush_mask
+	 * No effect on hardware
+	 * @ctx       : ctl path ctx pointer
+	 */
+	u32 (*get_pending_flush)(struct sde_hw_ctl *ctx);
+
+	/**
+	 * OR in the given flushbits to the cached pending_flush_mask
+	 * No effect on hardware
+	 * @ctx       : ctl path ctx pointer
+	 * @flushbits : module flushmask
+	 */
+	void (*update_pending_flush)(struct sde_hw_ctl *ctx,
+		u32 flushbits);
+
+	/**
+	 * Write the value of the pending_flush_mask to hardware
+	 * @ctx       : ctl path ctx pointer
+	 */
+	void (*trigger_flush)(struct sde_hw_ctl *ctx);
+
+	/**
+	 * Read the value of the flush register
+	 * @ctx       : ctl path ctx pointer
+	 * @Return: value of the ctl flush register.
+	 */
+	u32 (*get_flush_register)(struct sde_hw_ctl *ctx);
+
+	/**
+	 * Setup ctl_path interface config
+	 * @ctx
+	 * @cfg    : interface config structure pointer
+	 */
+	void (*setup_intf_cfg)(struct sde_hw_ctl *ctx,
+		struct sde_hw_intf_cfg *cfg);
+
+	int (*reset)(struct sde_hw_ctl *c);
+
+	/*
+	 * wait_reset_status - checks ctl reset status
+	 * @ctx       : ctl path ctx pointer
+	 *
+	 * This function checks the ctl reset status bit.
+	 * If the reset bit is set, it keeps polling the status till the hw
+	 * reset is complete.
+	 * Returns: 0 on success or -error if reset incomplete within interval
+	 */
+	int (*wait_reset_status)(struct sde_hw_ctl *ctx);
+
+	uint32_t (*get_bitmask_sspp)(struct sde_hw_ctl *ctx,
+		enum sde_sspp blk);
+
+	uint32_t (*get_bitmask_mixer)(struct sde_hw_ctl *ctx,
+		enum sde_lm blk);
+
+	int (*get_bitmask_dspp)(struct sde_hw_ctl *ctx,
+		u32 *flushbits,
+		enum sde_dspp blk);
+
+	int (*get_bitmask_intf)(struct sde_hw_ctl *ctx,
+		u32 *flushbits,
+		enum sde_intf blk);
+
+	int (*get_bitmask_cdm)(struct sde_hw_ctl *ctx,
+		u32 *flushbits,
+		enum sde_cdm blk);
+
+	int (*get_bitmask_wb)(struct sde_hw_ctl *ctx,
+		u32 *flushbits,
+		enum sde_wb blk);
+
+	/**
+	 * Set all blend stages to disabled
+	 * @ctx       : ctl path ctx pointer
+	 */
+	void (*clear_all_blendstages)(struct sde_hw_ctl *ctx);
+
+	/**
+	 * Configure layer mixer to pipe configuration
+	 * @ctx       : ctl path ctx pointer
+	 * @lm        : layer mixer enumeration
+	 * @cfg       : blend stage configuration
+	 */
+	void (*setup_blendstage)(struct sde_hw_ctl *ctx,
+		enum sde_lm lm, struct sde_hw_stage_cfg *cfg, u32 index);
+};
+
+/**
+ * struct sde_hw_ctl : CTL PATH driver object
+ * @hw: block register map object
+ * @idx: control path index
+ * @ctl_hw_caps: control path capabilities
+ * @mixer_count: number of mixers
+ * @mixer_hw_caps: mixer hardware capabilities
+ * @pending_flush_mask: storage for pending ctl_flush managed via ops
+ * @ops: operation list
+ */
+struct sde_hw_ctl {
+	/* base */
+	struct sde_hw_blk_reg_map hw;
+
+	/* ctl path */
+	int idx;
+	const struct sde_ctl_cfg *caps;
+	int mixer_count;
+	const struct sde_lm_cfg *mixer_hw_caps;
+	u32 pending_flush_mask;
+
+	/* ops */
+	struct sde_hw_ctl_ops ops;
+};
+
+/**
+ * sde_hw_ctl_init(): Initializes the ctl_path hw driver object.
+ * should be called before accessing every ctl path registers.
+ * @idx:  ctl_path index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m :   pointer to mdss catalog data
+ */
+struct sde_hw_ctl *sde_hw_ctl_init(enum sde_ctl idx,
+		void __iomem *addr,
+		struct sde_mdss_cfg *m);
+
+/**
+ * sde_hw_ctl_destroy(): Destroys ctl driver context
+ * should be called to free the context
+ */
+void sde_hw_ctl_destroy(struct sde_hw_ctl *ctx);
+
+#endif /*_SDE_HW_CTL_H */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_dspp.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_dspp.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_dspp.c	2019-10-29 09:26:23.641203159 +0100
@@ -0,0 +1,125 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <drm/msm_drm_pp.h>
+#include "sde_hw_mdss.h"
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_dspp.h"
+#include "sde_hw_color_processing.h"
+#include "sde_dbg.h"
+
+static struct sde_dspp_cfg *_dspp_offset(enum sde_dspp dspp,
+		struct sde_mdss_cfg *m,
+		void __iomem *addr,
+		struct sde_hw_blk_reg_map *b)
+{
+	int i;
+
+	for (i = 0; i < m->dspp_count; i++) {
+		if (dspp == m->dspp[i].id) {
+			b->base_off = addr;
+			b->blk_off = m->dspp[i].base;
+			b->length = m->dspp[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = SDE_DBG_MASK_DSPP;
+			return &m->dspp[i];
+		}
+	}
+
+	return ERR_PTR(-EINVAL);
+}
+
+void sde_dspp_setup_histogram(struct sde_hw_dspp *ctx, void *cfg)
+{
+}
+
+void sde_dspp_read_histogram(struct sde_hw_dspp *ctx, void *cfg)
+{
+}
+
+void sde_dspp_update_igc(struct sde_hw_dspp *ctx, void *cfg)
+{
+}
+
+void sde_dspp_setup_sharpening(struct sde_hw_dspp *ctx, void *cfg)
+{
+}
+
+void sde_dspp_setup_danger_safe(struct sde_hw_dspp *ctx, void *cfg)
+{
+}
+
+void sde_dspp_setup_dither(struct sde_hw_dspp *ctx, void *cfg)
+{
+}
+
+static void _setup_dspp_ops(struct sde_hw_dspp *c, unsigned long features)
+{
+	int i = 0;
+
+	for (i = 0; i < SDE_DSPP_MAX; i++) {
+		if (!test_bit(i, &features))
+			continue;
+		switch (i) {
+		case SDE_DSPP_PCC:
+			if (c->cap->sblk->pcc.version ==
+				(SDE_COLOR_PROCESS_VER(0x1, 0x7)))
+				c->ops.setup_pcc = sde_setup_dspp_pcc_v1_7;
+			break;
+		case SDE_DSPP_HSIC:
+			if (c->cap->sblk->hsic.version ==
+				(SDE_COLOR_PROCESS_VER(0x1, 0x7)))
+				c->ops.setup_hue = sde_setup_dspp_pa_hue_v1_7;
+			break;
+		case SDE_DSPP_VLUT:
+			if (c->cap->sblk->vlut.version ==
+				(SDE_COLOR_PROCESS_VER(0x1, 0x7))) {
+				c->ops.setup_vlut = sde_setup_dspp_pa_vlut_v1_7;
+			}
+		default:
+			break;
+		}
+	}
+}
+
+struct sde_hw_dspp *sde_hw_dspp_init(enum sde_dspp idx,
+			void __iomem *addr,
+			struct sde_mdss_cfg *m)
+{
+	struct sde_hw_dspp *c;
+	struct sde_dspp_cfg *cfg;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _dspp_offset(idx, m, addr, &c->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(c);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Assign ops */
+	c->idx = idx;
+	c->cap = cfg;
+	_setup_dspp_ops(c, c->cap->features);
+
+	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
+			c->hw.blk_off + c->hw.length, c->hw.xin_id);
+
+	return c;
+}
+
+void sde_hw_dspp_destroy(struct sde_hw_dspp *dspp)
+{
+	kfree(dspp);
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_dspp.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_dspp.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_dspp.h	2019-01-22 16:16:23.515246515 +0100
@@ -0,0 +1,183 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_DSPP_H
+#define _SDE_HW_DSPP_H
+
+struct sde_hw_dspp;
+
+/**
+ * struct sde_hw_dspp_ops - interface to the dspp hardware driver functions
+ * Caller must call the init function to get the dspp context for each dspp
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct sde_hw_dspp_ops {
+	/**
+	 * setup_histogram - setup dspp histogram
+	 * @ctx: Pointer to dspp context
+	 * @cfg: Pointer to configuration
+	 */
+	void (*setup_histogram)(struct sde_hw_dspp *ctx, void *cfg);
+
+	/**
+	 * read_histogram - read dspp histogram
+	 * @ctx: Pointer to dspp context
+	 * @cfg: Pointer to configuration
+	 */
+	void (*read_histogram)(struct sde_hw_dspp *ctx, void *cfg);
+
+	/**
+	 * setup_igc - update dspp igc
+	 * @ctx: Pointer to dspp context
+	 * @cfg: Pointer to configuration
+	 */
+	void (*setup_igc)(struct sde_hw_dspp *ctx, void *cfg);
+
+	/**
+	 * setup_pa - setup dspp pa
+	 * @ctx: Pointer to dspp context
+	 * @cfg: Pointer to configuration
+	 */
+	void (*setup_pa)(struct sde_hw_dspp *dspp, void *cfg);
+
+	/**
+	 * setup_pcc - setup dspp pcc
+	 * @ctx: Pointer to dspp context
+	 * @cfg: Pointer to configuration
+	 */
+	void (*setup_pcc)(struct sde_hw_dspp *ctx, void *cfg);
+
+	/**
+	 * setup_sharpening - setup dspp sharpening
+	 * @ctx: Pointer to dspp context
+	 * @cfg: Pointer to configuration
+	 */
+	void (*setup_sharpening)(struct sde_hw_dspp *ctx, void *cfg);
+
+	/**
+	 * setup_pa_memcolor - setup dspp memcolor
+	 * @ctx: Pointer to dspp context
+	 * @cfg: Pointer to configuration
+	 */
+	void (*setup_pa_memcolor)(struct sde_hw_dspp *ctx, void *cfg);
+
+	/**
+	 * setup_sixzone - setup dspp six zone
+	 * @ctx: Pointer to dspp context
+	 * @cfg: Pointer to configuration
+	 */
+	void (*setup_sixzone)(struct sde_hw_dspp *dspp, void *cfg);
+
+	/**
+	 * setup_danger_safe - setup danger safe LUTS
+	 * @ctx: Pointer to dspp context
+	 * @cfg: Pointer to configuration
+	 */
+	void (*setup_danger_safe)(struct sde_hw_dspp *ctx, void *cfg);
+
+	/**
+	 * setup_dither - setup dspp dither
+	 * @ctx: Pointer to dspp context
+	 * @cfg: Pointer to configuration
+	 */
+	void (*setup_dither)(struct sde_hw_dspp *ctx, void *cfg);
+
+	/**
+	 * setup_hue - setup dspp PA hue
+	 * @ctx: Pointer to dspp context
+	 * @cfg: Pointer to configuration
+	 */
+	void (*setup_hue)(struct sde_hw_dspp *ctx, void *cfg);
+
+	/**
+	 * setup_sat - setup dspp PA saturation
+	 * @ctx: Pointer to dspp context
+	 * @cfg: Pointer to configuration
+	 */
+	void (*setup_sat)(struct sde_hw_dspp *ctx, void *cfg);
+
+	/**
+	 * setup_val - setup dspp PA value
+	 * @ctx: Pointer to dspp context
+	 * @cfg: Pointer to configuration
+	 */
+	void (*setup_val)(struct sde_hw_dspp *ctx, void *cfg);
+
+	/**
+	 * setup_cont - setup dspp PA contrast
+	 * @ctx: Pointer to dspp context
+	 * @cfg: Pointer to configuration
+	 */
+	void (*setup_cont)(struct sde_hw_dspp *ctx, void *cfg);
+
+	/**
+	 * setup_vlut - setup dspp PA VLUT
+	 * @ctx: Pointer to dspp context
+	 * @cfg: Pointer to configuration
+	 */
+	void (*setup_vlut)(struct sde_hw_dspp *ctx, void *cfg);
+
+	/**
+	 * setup_gc - update dspp gc
+	 * @ctx: Pointer to dspp context
+	 * @cfg: Pointer to configuration
+	 */
+	void (*setup_gc)(struct sde_hw_dspp *ctx, void *cfg);
+
+	/**
+	 * setup_gamut - update dspp gamut
+	 * @ctx: Pointer to dspp context
+	 * @cfg: Pointer to configuration
+	 */
+	void (*setup_gamut)(struct sde_hw_dspp *ctx, void *cfg);
+};
+
+/**
+ * struct sde_hw_dspp - dspp description
+ * @base_off:     MDP register mapped offset
+ * @blk_off:      DSPP offset relative to mdss offset
+ * @length        Length of register block offset
+ * @hwversion     Mdss hw version number
+ * @idx:          DSPP index
+ * @dspp_hw_cap:  Pointer to layer_cfg
+ * @highest_bank_bit:
+ * @ops:          Pointer to operations possible for this dspp
+ */
+struct sde_hw_dspp {
+	/* base */
+	 struct sde_hw_blk_reg_map hw;
+
+	/* dspp */
+	enum sde_dspp idx;
+	const struct sde_dspp_cfg *cap;
+
+	/* Ops */
+	struct sde_hw_dspp_ops ops;
+};
+
+/**
+ * sde_hw_dspp_init - initializes the dspp hw driver object.
+ * should be called once before accessing every dspp.
+ * @idx:  DSPP index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ */
+struct sde_hw_dspp *sde_hw_dspp_init(enum sde_dspp idx,
+			void __iomem *addr,
+			struct sde_mdss_cfg *m);
+
+/**
+ * sde_hw_dspp_destroy(): Destroys DSPP driver context
+ * @dspp:   Pointer to DSPP driver context
+ */
+void sde_hw_dspp_destroy(struct sde_hw_dspp *dspp);
+
+#endif /*_SDE_HW_DSPP_H */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_interrupts.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_interrupts.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c	2019-01-22 16:16:23.515246515 +0100
@@ -0,0 +1,986 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/slab.h>
+
+#include "sde_kms.h"
+#include "sde_hw_interrupts.h"
+#include "sde_hw_util.h"
+#include "sde_hw_mdss.h"
+
+/**
+ * Register offsets in MDSS register file for the interrupt registers
+ * w.r.t. to the MDSS base
+ */
+#define HW_INTR_STATUS			0x0010
+#define MDP_SSPP_TOP0_OFF		0x1000
+#define MDP_INTF_0_OFF			0x6B000
+#define MDP_INTF_1_OFF			0x6B800
+#define MDP_INTF_2_OFF			0x6C000
+#define MDP_INTF_3_OFF			0x6C800
+#define MDP_INTF_4_OFF			0x6D000
+
+/**
+ * WB interrupt status bit definitions
+ */
+#define SDE_INTR_WB_0_DONE BIT(0)
+#define SDE_INTR_WB_1_DONE BIT(1)
+#define SDE_INTR_WB_2_DONE BIT(4)
+
+/**
+ * WDOG timer interrupt status bit definitions
+ */
+#define SDE_INTR_WD_TIMER_0_DONE BIT(2)
+#define SDE_INTR_WD_TIMER_1_DONE BIT(3)
+#define SDE_INTR_WD_TIMER_2_DONE BIT(5)
+#define SDE_INTR_WD_TIMER_3_DONE BIT(6)
+#define SDE_INTR_WD_TIMER_4_DONE BIT(7)
+
+/**
+ * Pingpong interrupt status bit definitions
+ */
+#define SDE_INTR_PING_PONG_0_DONE BIT(8)
+#define SDE_INTR_PING_PONG_1_DONE BIT(9)
+#define SDE_INTR_PING_PONG_2_DONE BIT(10)
+#define SDE_INTR_PING_PONG_3_DONE BIT(11)
+#define SDE_INTR_PING_PONG_0_RD_PTR BIT(12)
+#define SDE_INTR_PING_PONG_1_RD_PTR BIT(13)
+#define SDE_INTR_PING_PONG_2_RD_PTR BIT(14)
+#define SDE_INTR_PING_PONG_3_RD_PTR BIT(15)
+#define SDE_INTR_PING_PONG_0_WR_PTR BIT(16)
+#define SDE_INTR_PING_PONG_1_WR_PTR BIT(17)
+#define SDE_INTR_PING_PONG_2_WR_PTR BIT(18)
+#define SDE_INTR_PING_PONG_3_WR_PTR BIT(19)
+#define SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE BIT(20)
+#define SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE BIT(21)
+#define SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE BIT(22)
+#define SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE BIT(23)
+
+/**
+ * Interface interrupt status bit definitions
+ */
+#define SDE_INTR_INTF_0_UNDERRUN BIT(24)
+#define SDE_INTR_INTF_1_UNDERRUN BIT(26)
+#define SDE_INTR_INTF_2_UNDERRUN BIT(28)
+#define SDE_INTR_INTF_3_UNDERRUN BIT(30)
+#define SDE_INTR_INTF_0_VSYNC BIT(25)
+#define SDE_INTR_INTF_1_VSYNC BIT(27)
+#define SDE_INTR_INTF_2_VSYNC BIT(29)
+#define SDE_INTR_INTF_3_VSYNC BIT(31)
+
+/**
+ * Pingpong Secondary interrupt status bit definitions
+ */
+#define SDE_INTR_PING_PONG_S0_AUTOREFRESH_DONE BIT(0)
+#define	SDE_INTR_PING_PONG_S0_WR_PTR BIT(4)
+#define SDE_INTR_PING_PONG_S0_RD_PTR BIT(8)
+#define SDE_INTR_PING_PONG_S0_TEAR_DETECTED BIT(22)
+#define SDE_INTR_PING_PONG_S0_TE_DETECTED BIT(28)
+
+/**
+ * Pingpong TEAR detection interrupt status bit definitions
+ */
+#define SDE_INTR_PING_PONG_0_TEAR_DETECTED BIT(16)
+#define SDE_INTR_PING_PONG_1_TEAR_DETECTED BIT(17)
+#define SDE_INTR_PING_PONG_2_TEAR_DETECTED BIT(18)
+#define SDE_INTR_PING_PONG_3_TEAR_DETECTED BIT(19)
+
+/**
+ * Pingpong TE detection interrupt status bit definitions
+ */
+#define SDE_INTR_PING_PONG_0_TE_DETECTED BIT(24)
+#define SDE_INTR_PING_PONG_1_TE_DETECTED BIT(25)
+#define SDE_INTR_PING_PONG_2_TE_DETECTED BIT(26)
+#define SDE_INTR_PING_PONG_3_TE_DETECTED BIT(27)
+
+/**
+ * Concurrent WB overflow interrupt status bit definitions
+ */
+#define SDE_INTR_CWB_2_OVERFLOW BIT(14)
+#define SDE_INTR_CWB_3_OVERFLOW BIT(15)
+
+/**
+ * Histogram VIG done interrupt status bit definitions
+ */
+#define SDE_INTR_HIST_VIG_0_DONE BIT(0)
+#define SDE_INTR_HIST_VIG_1_DONE BIT(4)
+#define SDE_INTR_HIST_VIG_2_DONE BIT(8)
+#define SDE_INTR_HIST_VIG_3_DONE BIT(10)
+
+/**
+ * Histogram VIG reset Sequence done interrupt status bit definitions
+ */
+#define SDE_INTR_HIST_VIG_0_RSTSEQ_DONE BIT(1)
+#define SDE_INTR_HIST_VIG_1_RSTSEQ_DONE BIT(5)
+#define SDE_INTR_HIST_VIG_2_RSTSEQ_DONE BIT(9)
+#define SDE_INTR_HIST_VIG_3_RSTSEQ_DONE BIT(11)
+
+/**
+ * Histogram DSPP done interrupt status bit definitions
+ */
+#define SDE_INTR_HIST_DSPP_0_DONE BIT(12)
+#define SDE_INTR_HIST_DSPP_1_DONE BIT(16)
+#define SDE_INTR_HIST_DSPP_2_DONE BIT(20)
+#define SDE_INTR_HIST_DSPP_3_DONE BIT(22)
+
+/**
+ * Histogram DSPP reset Sequence done interrupt status bit definitions
+ */
+#define SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE BIT(13)
+#define SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE BIT(17)
+#define SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE BIT(21)
+#define SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE BIT(23)
+
+/**
+ * INTF interrupt status bit definitions
+ */
+#define SDE_INTR_VIDEO_INTO_STATIC BIT(0)
+#define SDE_INTR_VIDEO_OUTOF_STATIC BIT(1)
+#define SDE_INTR_DSICMD_0_INTO_STATIC BIT(2)
+#define SDE_INTR_DSICMD_0_OUTOF_STATIC BIT(3)
+#define SDE_INTR_DSICMD_1_INTO_STATIC BIT(4)
+#define SDE_INTR_DSICMD_1_OUTOF_STATIC BIT(5)
+#define SDE_INTR_DSICMD_2_INTO_STATIC BIT(6)
+#define SDE_INTR_DSICMD_2_OUTOF_STATIC BIT(7)
+#define SDE_INTR_PROG_LINE BIT(8)
+
+/**
+ * struct sde_intr_reg - array of SDE register sets
+ * @clr_off:	offset to CLEAR reg
+ * @en_off:	offset to ENABLE reg
+ * @status_off:	offset to STATUS reg
+ */
+struct sde_intr_reg {
+	u32 clr_off;
+	u32 en_off;
+	u32 status_off;
+};
+
+/**
+ * struct sde_irq_type - maps each irq with i/f
+ * @intr_type:		type of interrupt listed in sde_intr_type
+ * @instance_idx:	instance index of the associated HW block in SDE
+ * @irq_mask:		corresponding bit in the interrupt status reg
+ * @reg_idx:		which reg set to use
+ */
+struct sde_irq_type {
+	u32 intr_type;
+	u32 instance_idx;
+	u32 irq_mask;
+	u32 reg_idx;
+};
+
+/**
+ * List of SDE interrupt registers
+ */
+static const struct sde_intr_reg sde_intr_set[] = {
+	{
+		MDP_SSPP_TOP0_OFF+INTR_CLEAR,
+		MDP_SSPP_TOP0_OFF+INTR_EN,
+		MDP_SSPP_TOP0_OFF+INTR_STATUS
+	},
+	{
+		MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
+		MDP_SSPP_TOP0_OFF+INTR2_EN,
+		MDP_SSPP_TOP0_OFF+INTR2_STATUS
+	},
+	{
+		MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
+		MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
+		MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
+	},
+	{
+		MDP_INTF_0_OFF+INTF_INTR_CLEAR,
+		MDP_INTF_0_OFF+INTF_INTR_EN,
+		MDP_INTF_0_OFF+INTF_INTR_STATUS
+	},
+	{
+		MDP_INTF_1_OFF+INTF_INTR_CLEAR,
+		MDP_INTF_1_OFF+INTF_INTR_EN,
+		MDP_INTF_1_OFF+INTF_INTR_STATUS
+	},
+	{
+		MDP_INTF_2_OFF+INTF_INTR_CLEAR,
+		MDP_INTF_2_OFF+INTF_INTR_EN,
+		MDP_INTF_2_OFF+INTF_INTR_STATUS
+	},
+	{
+		MDP_INTF_3_OFF+INTF_INTR_CLEAR,
+		MDP_INTF_3_OFF+INTF_INTR_EN,
+		MDP_INTF_3_OFF+INTF_INTR_STATUS
+	},
+	{
+		MDP_INTF_4_OFF+INTF_INTR_CLEAR,
+		MDP_INTF_4_OFF+INTF_INTR_EN,
+		MDP_INTF_4_OFF+INTF_INTR_STATUS
+	}
+};
+
+/**
+ * IRQ mapping table - use for lookup an irq_idx in this table that have
+ *                     a matching interface type and instance index.
+ */
+static const struct sde_irq_type sde_irq_map[] = {
+	/* BEGIN MAP_RANGE: 0-31, INTR */
+	/* irq_idx: 0-3 */
+	{ SDE_IRQ_TYPE_WB_ROT_COMP, WB_0, SDE_INTR_WB_0_DONE, 0},
+	{ SDE_IRQ_TYPE_WB_ROT_COMP, WB_1, SDE_INTR_WB_1_DONE, 0},
+	{ SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_0, SDE_INTR_WD_TIMER_0_DONE, 0},
+	{ SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_1, SDE_INTR_WD_TIMER_1_DONE, 0},
+	/* irq_idx: 4-7 */
+	{ SDE_IRQ_TYPE_WB_WFD_COMP, WB_2, SDE_INTR_WB_2_DONE, 0},
+	{ SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_2, SDE_INTR_WD_TIMER_2_DONE, 0},
+	{ SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_3, SDE_INTR_WD_TIMER_3_DONE, 0},
+	{ SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_4, SDE_INTR_WD_TIMER_4_DONE, 0},
+	/* irq_idx: 8-11 */
+	{ SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_0,
+		SDE_INTR_PING_PONG_0_DONE, 0},
+	{ SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_1,
+		SDE_INTR_PING_PONG_1_DONE, 0},
+	{ SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_2,
+		SDE_INTR_PING_PONG_2_DONE, 0},
+	{ SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_3,
+		SDE_INTR_PING_PONG_3_DONE, 0},
+	/* irq_idx: 12-15 */
+	{ SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_0,
+		SDE_INTR_PING_PONG_0_RD_PTR, 0},
+	{ SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_1,
+		SDE_INTR_PING_PONG_1_RD_PTR, 0},
+	{ SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_2,
+		SDE_INTR_PING_PONG_2_RD_PTR, 0},
+	{ SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_3,
+		SDE_INTR_PING_PONG_3_RD_PTR, 0},
+	/* irq_idx: 16-19 */
+	{ SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_0,
+		SDE_INTR_PING_PONG_0_WR_PTR, 0},
+	{ SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_1,
+		SDE_INTR_PING_PONG_1_WR_PTR, 0},
+	{ SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_2,
+		SDE_INTR_PING_PONG_2_WR_PTR, 0},
+	{ SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_3,
+		SDE_INTR_PING_PONG_3_WR_PTR, 0},
+	/* irq_idx: 20-23 */
+	{ SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_0,
+		SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE, 0},
+	{ SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_1,
+		SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE, 0},
+	{ SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_2,
+		SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE, 0},
+	{ SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_3,
+		SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE, 0},
+	/* irq_idx: 24-27 */
+	{ SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_0, SDE_INTR_INTF_0_UNDERRUN, 0},
+	{ SDE_IRQ_TYPE_INTF_VSYNC, INTF_0, SDE_INTR_INTF_0_VSYNC, 0},
+	{ SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_1, SDE_INTR_INTF_1_UNDERRUN, 0},
+	{ SDE_IRQ_TYPE_INTF_VSYNC, INTF_1, SDE_INTR_INTF_1_VSYNC, 0},
+	/* irq_idx: 28-31 */
+	{ SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_2, SDE_INTR_INTF_2_UNDERRUN, 0},
+	{ SDE_IRQ_TYPE_INTF_VSYNC, INTF_2, SDE_INTR_INTF_2_VSYNC, 0},
+	{ SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_3, SDE_INTR_INTF_3_UNDERRUN, 0},
+	{ SDE_IRQ_TYPE_INTF_VSYNC, INTF_3, SDE_INTR_INTF_3_VSYNC, 0},
+
+	/* BEGIN MAP_RANGE: 32-64, INTR2 */
+	/* irq_idx: 32-35 */
+	{ SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_S0,
+		SDE_INTR_PING_PONG_S0_AUTOREFRESH_DONE, 1},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+	/* irq_idx: 36-39 */
+	{ SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_S0,
+		SDE_INTR_PING_PONG_S0_WR_PTR, 1},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+	/* irq_idx: 40-43 */
+	{ SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_S0,
+		SDE_INTR_PING_PONG_S0_RD_PTR, 1},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+	/* irq_idx: 44-47 */
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_2, SDE_INTR_CWB_2_OVERFLOW, 1},
+	{ SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_3, SDE_INTR_CWB_3_OVERFLOW, 1},
+	/* irq_idx: 48-51 */
+	{ SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_0,
+		SDE_INTR_PING_PONG_0_TEAR_DETECTED, 1},
+	{ SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_1,
+		SDE_INTR_PING_PONG_1_TEAR_DETECTED, 1},
+	{ SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_2,
+		SDE_INTR_PING_PONG_2_TEAR_DETECTED, 1},
+	{ SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_3,
+		SDE_INTR_PING_PONG_3_TEAR_DETECTED, 1},
+	/* irq_idx: 52-55 */
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_S0,
+		SDE_INTR_PING_PONG_S0_TEAR_DETECTED, 1},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+	/* irq_idx: 56-59 */
+	{ SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_0,
+		SDE_INTR_PING_PONG_0_TE_DETECTED, 1},
+	{ SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_1,
+		SDE_INTR_PING_PONG_1_TE_DETECTED, 1},
+	{ SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_2,
+		SDE_INTR_PING_PONG_2_TE_DETECTED, 1},
+	{ SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_3,
+		SDE_INTR_PING_PONG_3_TE_DETECTED, 1},
+	/* irq_idx: 60-63 */
+	{ SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_S0,
+		SDE_INTR_PING_PONG_S0_TE_DETECTED, 1},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+
+	/* BEGIN MAP_RANGE: 64-95 HIST */
+	/* irq_idx: 64-67 */
+	{ SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG0, SDE_INTR_HIST_VIG_0_DONE, 2},
+	{ SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG0,
+		SDE_INTR_HIST_VIG_0_RSTSEQ_DONE, 2},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+	/* irq_idx: 68-71 */
+	{ SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG1, SDE_INTR_HIST_VIG_1_DONE, 2},
+	{ SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG1,
+		SDE_INTR_HIST_VIG_1_RSTSEQ_DONE, 2},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+	/* irq_idx: 72-75 */
+	{ SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, SDE_INTR_HIST_VIG_2_DONE, 2},
+	{ SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
+		SDE_INTR_HIST_VIG_2_RSTSEQ_DONE, 2},
+	{ SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, SDE_INTR_HIST_VIG_3_DONE, 2},
+	{ SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3,
+		SDE_INTR_HIST_VIG_3_RSTSEQ_DONE, 2},
+	/* irq_idx: 76-79 */
+	{ SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, SDE_INTR_HIST_DSPP_0_DONE, 2},
+	{ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
+		SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE, 2},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+	/* irq_idx: 80-83 */
+	{ SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, SDE_INTR_HIST_DSPP_1_DONE, 2},
+	{ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
+		SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE, 2},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+	/* irq_idx: 84-87 */
+	{ SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, SDE_INTR_HIST_DSPP_2_DONE, 2},
+	{ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
+		SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE, 2},
+	{ SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, SDE_INTR_HIST_DSPP_3_DONE, 2},
+	{ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3,
+		SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE, 2},
+	/* irq_idx: 88-91 */
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+	/* irq_idx: 92-95 */
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+
+	/* BEGIN MAP_RANGE: 96-127 INTF_0_INTR */
+	/* irq_idx: 96-99 */
+	{ SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_0,
+		SDE_INTR_VIDEO_INTO_STATIC, 3},
+	{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_0,
+		SDE_INTR_VIDEO_OUTOF_STATIC, 3},
+	{ SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_0,
+		SDE_INTR_DSICMD_0_INTO_STATIC, 3},
+	{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_0,
+		SDE_INTR_DSICMD_0_OUTOF_STATIC, 3},
+	/* irq_idx: 100-103 */
+	{ SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_0,
+		SDE_INTR_DSICMD_1_INTO_STATIC, 3},
+	{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_0,
+		SDE_INTR_DSICMD_1_OUTOF_STATIC, 3},
+	{ SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_0,
+		SDE_INTR_DSICMD_2_INTO_STATIC, 3},
+	{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_0,
+		SDE_INTR_DSICMD_2_OUTOF_STATIC, 3},
+	/* irq_idx: 104-107 */
+	{ SDE_IRQ_TYPE_PROG_LINE, INTF_0, SDE_INTR_PROG_LINE, 3},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+	/* irq_idx: 108-111 */
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+	/* irq_idx: 112-115 */
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+	/* irq_idx: 116-119 */
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+	/* irq_idx: 120-123 */
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+	/* irq_idx: 124-127 */
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+
+	/* BEGIN MAP_RANGE: 128-159 INTF_1_INTR */
+	/* irq_idx: 128-131 */
+	{ SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_1,
+		SDE_INTR_VIDEO_INTO_STATIC, 4},
+	{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_1,
+		SDE_INTR_VIDEO_OUTOF_STATIC, 4},
+	{ SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_1,
+		SDE_INTR_DSICMD_0_INTO_STATIC, 4},
+	{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_1,
+		SDE_INTR_DSICMD_0_OUTOF_STATIC, 4},
+	/* irq_idx: 132-135 */
+	{ SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_1,
+		SDE_INTR_DSICMD_1_INTO_STATIC, 4},
+	{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_1,
+		SDE_INTR_DSICMD_1_OUTOF_STATIC, 4},
+	{ SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_1,
+		SDE_INTR_DSICMD_2_INTO_STATIC, 4},
+	{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_1,
+		SDE_INTR_DSICMD_2_OUTOF_STATIC, 4},
+	/* irq_idx: 136-139 */
+	{ SDE_IRQ_TYPE_PROG_LINE, INTF_1, SDE_INTR_PROG_LINE, 4},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+	/* irq_idx: 140-143 */
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+	/* irq_idx: 144-147 */
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+	/* irq_idx: 148-151 */
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+	/* irq_idx: 152-155 */
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+	/* irq_idx: 156-159 */
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+
+	/* BEGIN MAP_RANGE: 160-191 INTF_2_INTR */
+	/* irq_idx: 160-163 */
+	{ SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_2,
+		SDE_INTR_VIDEO_INTO_STATIC, 5},
+	{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_2,
+		SDE_INTR_VIDEO_OUTOF_STATIC, 5},
+	{ SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_2,
+		SDE_INTR_DSICMD_0_INTO_STATIC, 5},
+	{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_2,
+		SDE_INTR_DSICMD_0_OUTOF_STATIC, 5},
+	/* irq_idx: 164-167 */
+	{ SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_2,
+		SDE_INTR_DSICMD_1_INTO_STATIC, 5},
+	{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_2,
+		SDE_INTR_DSICMD_1_OUTOF_STATIC, 5},
+	{ SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_2,
+		SDE_INTR_DSICMD_2_INTO_STATIC, 5},
+	{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_2,
+		SDE_INTR_DSICMD_2_OUTOF_STATIC, 5},
+	/* irq_idx: 168-171 */
+	{ SDE_IRQ_TYPE_PROG_LINE, INTF_2, SDE_INTR_PROG_LINE, 5},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+	/* irq_idx: 172-175 */
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+	/* irq_idx: 176-179 */
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+	/* irq_idx: 180-183 */
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+	/* irq_idx: 184-187 */
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+	/* irq_idx: 188-191 */
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+
+	/* BEGIN MAP_RANGE: 192-223 INTF_3_INTR */
+	/* irq_idx: 192-195 */
+	{ SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_3,
+		SDE_INTR_VIDEO_INTO_STATIC, 6},
+	{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_3,
+		SDE_INTR_VIDEO_OUTOF_STATIC, 6},
+	{ SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_3,
+		SDE_INTR_DSICMD_0_INTO_STATIC, 6},
+	{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_3,
+		SDE_INTR_DSICMD_0_OUTOF_STATIC, 6},
+	/* irq_idx: 196-199 */
+	{ SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_3,
+		SDE_INTR_DSICMD_1_INTO_STATIC, 6},
+	{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_3,
+		SDE_INTR_DSICMD_1_OUTOF_STATIC, 6},
+	{ SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_3,
+		SDE_INTR_DSICMD_2_INTO_STATIC, 6},
+	{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_3,
+		SDE_INTR_DSICMD_2_OUTOF_STATIC, 6},
+	/* irq_idx: 200-203 */
+	{ SDE_IRQ_TYPE_PROG_LINE, INTF_3, SDE_INTR_PROG_LINE, 6},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+	/* irq_idx: 204-207 */
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+	/* irq_idx: 208-211 */
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+	/* irq_idx: 212-215 */
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+	/* irq_idx: 216-219 */
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+	/* irq_idx: 220-223 */
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+
+	/* BEGIN MAP_RANGE: 224-255 INTF_4_INTR */
+	/* irq_idx: 224-227 */
+	{ SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_4,
+		SDE_INTR_VIDEO_INTO_STATIC, 7},
+	{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_4,
+		SDE_INTR_VIDEO_OUTOF_STATIC, 7},
+	{ SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_4,
+		SDE_INTR_DSICMD_0_INTO_STATIC, 7},
+	{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_4,
+		SDE_INTR_DSICMD_0_OUTOF_STATIC, 7},
+	/* irq_idx: 228-231 */
+	{ SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_4,
+		SDE_INTR_DSICMD_1_INTO_STATIC, 7},
+	{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_4,
+		SDE_INTR_DSICMD_1_OUTOF_STATIC, 7},
+	{ SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_4,
+		SDE_INTR_DSICMD_2_INTO_STATIC, 7},
+	{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_4,
+		SDE_INTR_DSICMD_2_OUTOF_STATIC, 7},
+	/* irq_idx: 232-235 */
+	{ SDE_IRQ_TYPE_PROG_LINE, INTF_4, SDE_INTR_PROG_LINE, 7},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+	/* irq_idx: 236-239 */
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+	/* irq_idx: 240-243 */
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+	/* irq_idx: 244-247 */
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+	/* irq_idx: 248-251 */
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+	/* irq_idx: 252-255 */
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+};
+
+static int sde_hw_intr_irqidx_lookup(enum sde_intr_type intr_type,
+		u32 instance_idx)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(sde_irq_map); i++) {
+		if (intr_type == sde_irq_map[i].intr_type &&
+			instance_idx == sde_irq_map[i].instance_idx)
+			return i;
+	}
+
+	pr_debug("IRQ lookup fail!! intr_type=%d, instance_idx=%d\n",
+			intr_type, instance_idx);
+	return -EINVAL;
+}
+
+static void sde_hw_intr_set_mask(struct sde_hw_intr *intr, uint32_t reg_off,
+		uint32_t mask)
+{
+	SDE_REG_WRITE(&intr->hw, reg_off, mask);
+}
+
+static void sde_hw_intr_dispatch_irq(struct sde_hw_intr *intr,
+		void (*cbfunc)(void *, int),
+		void *arg)
+{
+	int reg_idx;
+	int irq_idx;
+	int start_idx;
+	int end_idx;
+	u32 irq_status;
+	unsigned long irq_flags;
+
+	/*
+	 * The dispatcher will save the IRQ status before calling here.
+	 * Now need to go through each IRQ status and find matching
+	 * irq lookup index.
+	 */
+	spin_lock_irqsave(&intr->status_lock, irq_flags);
+	for (reg_idx = 0; reg_idx < ARRAY_SIZE(sde_intr_set); reg_idx++) {
+		irq_status = intr->save_irq_status[reg_idx];
+
+		/*
+		 * Each Interrupt register has a range of 32 indexes, and
+		 * that is static for sde_irq_map.
+		 */
+		start_idx = reg_idx * 32;
+		end_idx = start_idx + 32;
+
+		/*
+		 * Search through matching intr status from irq map.
+		 * start_idx and end_idx defined the search range in
+		 * the sde_irq_map.
+		 */
+		for (irq_idx = start_idx;
+				(irq_idx < end_idx) && irq_status;
+				irq_idx++)
+			if ((irq_status & sde_irq_map[irq_idx].irq_mask) &&
+				(sde_irq_map[irq_idx].reg_idx == reg_idx)) {
+				/*
+				 * Once a match on irq mask, perform a callback
+				 * to the given cbfunc. cbfunc will take care
+				 * the interrupt status clearing. If cbfunc is
+				 * not provided, then the interrupt clearing
+				 * is here.
+				 */
+				if (cbfunc)
+					cbfunc(arg, irq_idx);
+				else
+					intr->ops.clear_interrupt_status(
+							intr, irq_idx);
+
+				/*
+				 * When callback finish, clear the irq_status
+				 * with the matching mask. Once irq_status
+				 * is all cleared, the search can be stopped.
+				 */
+				irq_status &= ~sde_irq_map[irq_idx].irq_mask;
+			}
+	}
+	spin_unlock_irqrestore(&intr->status_lock, irq_flags);
+}
+
+static int sde_hw_intr_enable_irq(struct sde_hw_intr *intr, int irq_idx)
+{
+	int reg_idx;
+	unsigned long irq_flags;
+	const struct sde_intr_reg *reg;
+	const struct sde_irq_type *irq;
+	const char *dbgstr = NULL;
+	uint32_t cache_irq_mask;
+
+	if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(sde_irq_map)) {
+		pr_err("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	irq = &sde_irq_map[irq_idx];
+	reg_idx = irq->reg_idx;
+	reg = &sde_intr_set[reg_idx];
+
+	spin_lock_irqsave(&intr->mask_lock, irq_flags);
+	cache_irq_mask = intr->cache_irq_mask[reg_idx];
+	if (cache_irq_mask & irq->irq_mask) {
+		dbgstr = "SDE IRQ already set:";
+	} else {
+		dbgstr = "SDE IRQ enabled:";
+
+		cache_irq_mask |= irq->irq_mask;
+		/* Cleaning any pending interrupt */
+		SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+		/* Enabling interrupts with the new mask */
+		SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+
+		intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+	}
+	spin_unlock_irqrestore(&intr->mask_lock, irq_flags);
+
+	pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
+			irq->irq_mask, cache_irq_mask);
+
+	return 0;
+}
+
+static int sde_hw_intr_disable_irq(struct sde_hw_intr *intr, int irq_idx)
+{
+	int reg_idx;
+	unsigned long irq_flags;
+	const struct sde_intr_reg *reg;
+	const struct sde_irq_type *irq;
+	const char *dbgstr = NULL;
+	uint32_t cache_irq_mask;
+
+	if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(sde_irq_map)) {
+		pr_err("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	irq = &sde_irq_map[irq_idx];
+	reg_idx = irq->reg_idx;
+	reg = &sde_intr_set[reg_idx];
+
+	spin_lock_irqsave(&intr->mask_lock, irq_flags);
+	cache_irq_mask = intr->cache_irq_mask[reg_idx];
+	if ((cache_irq_mask & irq->irq_mask) == 0) {
+		dbgstr = "SDE IRQ is already cleared:";
+	} else {
+		dbgstr = "SDE IRQ mask disable:";
+
+		cache_irq_mask &= ~irq->irq_mask;
+		/* Disable interrupts based on the new mask */
+		SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+		/* Cleaning any pending interrupt */
+		SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+
+		intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+	}
+	spin_unlock_irqrestore(&intr->mask_lock, irq_flags);
+
+	pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
+			irq->irq_mask, cache_irq_mask);
+
+	return 0;
+}
+
+static int sde_hw_intr_clear_irqs(struct sde_hw_intr *intr)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++)
+		SDE_REG_WRITE(&intr->hw, sde_intr_set[i].clr_off, 0xffffffff);
+
+	return 0;
+}
+
+static int sde_hw_intr_disable_irqs(struct sde_hw_intr *intr)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++)
+		SDE_REG_WRITE(&intr->hw, sde_intr_set[i].en_off, 0x00000000);
+
+	return 0;
+}
+
+static int sde_hw_intr_get_valid_interrupts(struct sde_hw_intr *intr,
+		uint32_t *mask)
+{
+	*mask = IRQ_SOURCE_MDP | IRQ_SOURCE_DSI0 | IRQ_SOURCE_DSI1
+		| IRQ_SOURCE_HDMI | IRQ_SOURCE_EDP;
+	return 0;
+}
+
+static int sde_hw_intr_get_interrupt_sources(struct sde_hw_intr *intr,
+		uint32_t *sources)
+{
+	*sources = SDE_REG_READ(&intr->hw, HW_INTR_STATUS);
+	return 0;
+}
+
+static void sde_hw_intr_get_interrupt_statuses(struct sde_hw_intr *intr)
+{
+	int i;
+	u32 enable_mask;
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&intr->status_lock, irq_flags);
+	for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++) {
+		/* Read interrupt status */
+		intr->save_irq_status[i] = SDE_REG_READ(&intr->hw,
+				sde_intr_set[i].status_off);
+
+		/* Read enable mask */
+		enable_mask = SDE_REG_READ(&intr->hw, sde_intr_set[i].en_off);
+
+		/* and clear the interrupt */
+		if (intr->save_irq_status[i])
+			SDE_REG_WRITE(&intr->hw, sde_intr_set[i].clr_off,
+					intr->save_irq_status[i]);
+
+		/* Finally update IRQ status based on enable mask */
+		intr->save_irq_status[i] &= enable_mask;
+	}
+	spin_unlock_irqrestore(&intr->status_lock, irq_flags);
+}
+
+static void sde_hw_intr_clear_interrupt_status(struct sde_hw_intr *intr,
+		int irq_idx)
+{
+	int reg_idx;
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&intr->mask_lock, irq_flags);
+
+	reg_idx = sde_irq_map[irq_idx].reg_idx;
+	SDE_REG_WRITE(&intr->hw, sde_intr_set[reg_idx].clr_off,
+			sde_irq_map[irq_idx].irq_mask);
+
+	spin_unlock_irqrestore(&intr->mask_lock, irq_flags);
+}
+
+static u32 sde_hw_intr_get_interrupt_status(struct sde_hw_intr *intr,
+		int irq_idx, bool clear)
+{
+	int reg_idx;
+	unsigned long irq_flags;
+	u32 intr_status;
+
+	spin_lock_irqsave(&intr->mask_lock, irq_flags);
+
+	reg_idx = sde_irq_map[irq_idx].reg_idx;
+	intr_status = SDE_REG_READ(&intr->hw,
+			sde_intr_set[reg_idx].status_off) &
+					sde_irq_map[irq_idx].irq_mask;
+	if (intr_status && clear)
+		SDE_REG_WRITE(&intr->hw, sde_intr_set[reg_idx].clr_off,
+				intr_status);
+
+	spin_unlock_irqrestore(&intr->mask_lock, irq_flags);
+
+	return intr_status;
+}
+
+static void __setup_intr_ops(struct sde_hw_intr_ops *ops)
+{
+	ops->set_mask = sde_hw_intr_set_mask;
+	ops->irq_idx_lookup = sde_hw_intr_irqidx_lookup;
+	ops->enable_irq = sde_hw_intr_enable_irq;
+	ops->disable_irq = sde_hw_intr_disable_irq;
+	ops->dispatch_irqs = sde_hw_intr_dispatch_irq;
+	ops->clear_all_irqs = sde_hw_intr_clear_irqs;
+	ops->disable_all_irqs = sde_hw_intr_disable_irqs;
+	ops->get_valid_interrupts = sde_hw_intr_get_valid_interrupts;
+	ops->get_interrupt_sources = sde_hw_intr_get_interrupt_sources;
+	ops->get_interrupt_statuses = sde_hw_intr_get_interrupt_statuses;
+	ops->clear_interrupt_status = sde_hw_intr_clear_interrupt_status;
+	ops->get_interrupt_status = sde_hw_intr_get_interrupt_status;
+}
+
+static struct sde_mdss_base_cfg *__intr_offset(struct sde_mdss_cfg *m,
+		void __iomem *addr, struct sde_hw_blk_reg_map *hw)
+{
+	if (m->mdp_count == 0)
+		return NULL;
+
+	hw->base_off = addr;
+	hw->blk_off = m->mdss[0].base;
+	hw->hwversion = m->hwversion;
+	return &m->mdss[0];
+}
+
+struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr,
+		struct sde_mdss_cfg *m)
+{
+	struct sde_hw_intr *intr = kzalloc(sizeof(*intr), GFP_KERNEL);
+	struct sde_mdss_base_cfg *cfg;
+
+	if (!intr)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = __intr_offset(m, addr, &intr->hw);
+	if (!cfg) {
+		kfree(intr);
+		return ERR_PTR(-EINVAL);
+	}
+	__setup_intr_ops(&intr->ops);
+
+	intr->irq_idx_tbl_size = ARRAY_SIZE(sde_irq_map);
+
+	intr->cache_irq_mask = kcalloc(ARRAY_SIZE(sde_intr_set), sizeof(u32),
+			GFP_KERNEL);
+	if (intr->cache_irq_mask == NULL) {
+		kfree(intr);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	intr->save_irq_status = kcalloc(ARRAY_SIZE(sde_intr_set), sizeof(u32),
+			GFP_KERNEL);
+	if (intr->save_irq_status == NULL) {
+		kfree(intr->cache_irq_mask);
+		kfree(intr);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	spin_lock_init(&intr->mask_lock);
+	spin_lock_init(&intr->status_lock);
+
+	return intr;
+}
+
+void sde_hw_intr_destroy(struct sde_hw_intr *intr)
+{
+	if (intr) {
+		kfree(intr->cache_irq_mask);
+		kfree(intr->save_irq_status);
+		kfree(intr);
+	}
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_interrupts.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_interrupts.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h	2019-01-22 16:16:23.515246515 +0100
@@ -0,0 +1,257 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_INTERRUPTS_H
+#define _SDE_HW_INTERRUPTS_H
+
+#include <linux/types.h>
+
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_util.h"
+#include "sde_hw_mdss.h"
+
+#define IRQ_SOURCE_MDP		BIT(0)
+#define IRQ_SOURCE_DSI0		BIT(4)
+#define IRQ_SOURCE_DSI1		BIT(5)
+#define IRQ_SOURCE_HDMI		BIT(8)
+#define IRQ_SOURCE_EDP		BIT(12)
+#define	IRQ_SOURCE_MHL		BIT(16)
+
+/**
+ * sde_intr_type - HW Interrupt Type
+ * @SDE_IRQ_TYPE_WB_ROT_COMP:		WB rotator done
+ * @SDE_IRQ_TYPE_WB_WFD_COMP:		WB WFD done
+ * @SDE_IRQ_TYPE_PING_PONG_COMP:	PingPong done
+ * @SDE_IRQ_TYPE_PING_PONG_RD_PTR:	PingPong read pointer
+ * @SDE_IRQ_TYPE_PING_PONG_WR_PTR:	PingPong write pointer
+ * @SDE_IRQ_TYPE_PING_PONG_AUTO_REF:	PingPong auto refresh
+ * @SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK:	PingPong Tear check
+ * @SDE_IRQ_TYPE_PING_PONG_TE_CHECK:	PingPong TE detection
+ * @SDE_IRQ_TYPE_INTF_UNDER_RUN:	INTF underrun
+ * @SDE_IRQ_TYPE_INTF_VSYNC:		INTF VSYNC
+ * @SDE_IRQ_TYPE_CWB_OVERFLOW:		Concurrent WB overflow
+ * @SDE_IRQ_TYPE_HIST_VIG_DONE:		VIG Histogram done
+ * @SDE_IRQ_TYPE_HIST_VIG_RSTSEQ:	VIG Histogram reset
+ * @SDE_IRQ_TYPE_HIST_DSPP_DONE:	DSPP Histogram done
+ * @SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ:	DSPP Histogram reset
+ * @SDE_IRQ_TYPE_WD_TIMER:		Watchdog timer
+ * @SDE_IRQ_TYPE_SFI_VIDEO_IN:		Video static frame INTR into static
+ * @SDE_IRQ_TYPE_SFI_VIDEO_OUT:		Video static frame INTR out-of static
+ * @SDE_IRQ_TYPE_SFI_CMD_0_IN:		DSI CMD0 static frame INTR into static
+ * @SDE_IRQ_TYPE_SFI_CMD_0_OUT:		DSI CMD0 static frame INTR out-of static
+ * @SDE_IRQ_TYPE_SFI_CMD_1_IN:		DSI CMD1 static frame INTR into static
+ * @SDE_IRQ_TYPE_SFI_CMD_1_OUT:		DSI CMD1 static frame INTR out-of static
+ * @SDE_IRQ_TYPE_SFI_CMD_2_IN:		DSI CMD2 static frame INTR into static
+ * @SDE_IRQ_TYPE_SFI_CMD_2_OUT:		DSI CMD2 static frame INTR out-of static
+ * @SDE_IRQ_TYPE_PROG_LINE:		Programmable Line interrupt
+ * @SDE_IRQ_TYPE_RESERVED:		Reserved for expansion
+ */
+enum sde_intr_type {
+	SDE_IRQ_TYPE_WB_ROT_COMP,
+	SDE_IRQ_TYPE_WB_WFD_COMP,
+	SDE_IRQ_TYPE_PING_PONG_COMP,
+	SDE_IRQ_TYPE_PING_PONG_RD_PTR,
+	SDE_IRQ_TYPE_PING_PONG_WR_PTR,
+	SDE_IRQ_TYPE_PING_PONG_AUTO_REF,
+	SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK,
+	SDE_IRQ_TYPE_PING_PONG_TE_CHECK,
+	SDE_IRQ_TYPE_INTF_UNDER_RUN,
+	SDE_IRQ_TYPE_INTF_VSYNC,
+	SDE_IRQ_TYPE_CWB_OVERFLOW,
+	SDE_IRQ_TYPE_HIST_VIG_DONE,
+	SDE_IRQ_TYPE_HIST_VIG_RSTSEQ,
+	SDE_IRQ_TYPE_HIST_DSPP_DONE,
+	SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ,
+	SDE_IRQ_TYPE_WD_TIMER,
+	SDE_IRQ_TYPE_SFI_VIDEO_IN,
+	SDE_IRQ_TYPE_SFI_VIDEO_OUT,
+	SDE_IRQ_TYPE_SFI_CMD_0_IN,
+	SDE_IRQ_TYPE_SFI_CMD_0_OUT,
+	SDE_IRQ_TYPE_SFI_CMD_1_IN,
+	SDE_IRQ_TYPE_SFI_CMD_1_OUT,
+	SDE_IRQ_TYPE_SFI_CMD_2_IN,
+	SDE_IRQ_TYPE_SFI_CMD_2_OUT,
+	SDE_IRQ_TYPE_PROG_LINE,
+	SDE_IRQ_TYPE_RESERVED,
+};
+
+struct sde_hw_intr;
+
+/**
+ * Interrupt operations.
+ */
+struct sde_hw_intr_ops {
+	/**
+	 * set_mask - Programs the given interrupt register with the
+	 *            given interrupt mask. Register value will get overwritten.
+	 * @intr:	HW interrupt handle
+	 * @reg_off:	MDSS HW register offset
+	 * @irqmask:	IRQ mask value
+	 */
+	void (*set_mask)(
+			struct sde_hw_intr *intr,
+			uint32_t reg,
+			uint32_t irqmask);
+
+	/**
+	 * irq_idx_lookup - Lookup IRQ index on the HW interrupt type
+	 *                 Used for all irq related ops
+	 * @intr_type:		Interrupt type defined in sde_intr_type
+	 * @instance_idx:	HW interrupt block instance
+	 * @return:		irq_idx or -EINVAL for lookup fail
+	 */
+	int (*irq_idx_lookup)(
+			enum sde_intr_type intr_type,
+			u32 instance_idx);
+
+	/**
+	 * enable_irq - Enable IRQ based on lookup IRQ index
+	 * @intr:	HW interrupt handle
+	 * @irq_idx:	Lookup irq index return from irq_idx_lookup
+	 * @return:	0 for success, otherwise failure
+	 */
+	int (*enable_irq)(
+			struct sde_hw_intr *intr,
+			int irq_idx);
+
+	/**
+	 * disable_irq - Disable IRQ based on lookup IRQ index
+	 * @intr:	HW interrupt handle
+	 * @irq_idx:	Lookup irq index return from irq_idx_lookup
+	 * @return:	0 for success, otherwise failure
+	 */
+	int (*disable_irq)(
+			struct sde_hw_intr *intr,
+			int irq_idx);
+
+	/**
+	 * clear_all_irqs - Clears all the interrupts (i.e. acknowledges
+	 *                  any asserted IRQs). Useful during reset.
+	 * @intr:	HW interrupt handle
+	 * @return:	0 for success, otherwise failure
+	 */
+	int (*clear_all_irqs)(
+			struct sde_hw_intr *intr);
+
+	/**
+	 * disable_all_irqs - Disables all the interrupts. Useful during reset.
+	 * @intr:	HW interrupt handle
+	 * @return:	0 for success, otherwise failure
+	 */
+	int (*disable_all_irqs)(
+			struct sde_hw_intr *intr);
+
+	/**
+	 * dispatch_irqs - IRQ dispatcher will call the given callback
+	 *                 function when a matching interrupt status bit is
+	 *                 found in the irq mapping table.
+	 * @intr:	HW interrupt handle
+	 * @cbfunc:	Callback function pointer
+	 * @arg:	Argument to pass back during callback
+	 */
+	void (*dispatch_irqs)(
+			struct sde_hw_intr *intr,
+			void (*cbfunc)(void *arg, int irq_idx),
+			void *arg);
+
+	/**
+	 * get_interrupt_statuses - Gets and store value from all interrupt
+	 *                          status registers that are currently fired.
+	 * @intr:	HW interrupt handle
+	 */
+	void (*get_interrupt_statuses)(
+			struct sde_hw_intr *intr);
+
+	/**
+	 * clear_interrupt_status - Clears HW interrupt status based on given
+	 *                          lookup IRQ index.
+	 * @intr:	HW interrupt handle
+	 * @irq_idx:	Lookup irq index return from irq_idx_lookup
+	 */
+	void (*clear_interrupt_status)(
+			struct sde_hw_intr *intr,
+			int irq_idx);
+
+	/**
+	 * get_interrupt_status - Gets HW interrupt status, and clear if set,
+	 *                        based on given lookup IRQ index.
+	 * @intr:	HW interrupt handle
+	 * @irq_idx:	Lookup irq index return from irq_idx_lookup
+	 * @clear:	True to clear irq after read
+	 */
+	u32 (*get_interrupt_status)(
+			struct sde_hw_intr *intr,
+			int irq_idx,
+			bool clear);
+
+	/**
+	 * get_valid_interrupts - Gets a mask of all valid interrupt sources
+	 *                        within SDE. These are actually status bits
+	 *                        within interrupt registers that specify the
+	 *                        source of the interrupt in IRQs. For example,
+	 *                        valid interrupt sources can be MDP, DSI,
+	 *                        HDMI etc.
+	 * @intr:	HW interrupt handle
+	 * @mask:	Returning the interrupt source MASK
+	 * @return:	0 for success, otherwise failure
+	 */
+	int (*get_valid_interrupts)(
+			struct sde_hw_intr *intr,
+			uint32_t *mask);
+
+	/**
+	 * get_interrupt_sources - Gets the bitmask of the SDE interrupt
+	 *                         source that are currently fired.
+	 * @intr:	HW interrupt handle
+	 * @sources:	Returning the SDE interrupt source status bit mask
+	 * @return:	0 for success, otherwise failure
+	 */
+	int (*get_interrupt_sources)(
+			struct sde_hw_intr *intr,
+			uint32_t *sources);
+};
+
+/**
+ * struct sde_hw_intr: hw interrupts handling data structure
+ * @hw:               virtual address mapping
+ * @ops:              function pointer mapping for IRQ handling
+ * @cache_irq_mask:   array of IRQ enable masks reg storage created during init
+ * @save_irq_status:  array of IRQ status reg storage created during init
+ * @irq_idx_tbl_size: total number of irq_idx mapped in the hw_interrupts
+ * @mask_lock:        spinlock for accessing IRQ mask
+ * @status_lock:      spinlock for accessing IRQ status
+ */
+struct sde_hw_intr {
+	struct sde_hw_blk_reg_map hw;
+	struct sde_hw_intr_ops ops;
+	u32 *cache_irq_mask;
+	u32 *save_irq_status;
+	u32 irq_idx_tbl_size;
+	spinlock_t mask_lock;
+	spinlock_t status_lock;
+};
+
+/**
+ * sde_hw_intr_init(): Initializes the interrupts hw object
+ * @addr: mapped register io address of MDP
+ * @m :   pointer to mdss catalog data
+ */
+struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr,
+		struct sde_mdss_cfg *m);
+
+/**
+ * sde_hw_intr_destroy(): Cleanup interrutps hw object
+ * @intr: pointer to interrupts hw object
+ */
+void sde_hw_intr_destroy(struct sde_hw_intr *intr);
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_intf.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_intf.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_intf.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_intf.c	2019-10-29 09:26:23.641203159 +0100
@@ -0,0 +1,339 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_intf.h"
+#include "sde_dbg.h"
+
+#define INTF_TIMING_ENGINE_EN           0x000
+#define INTF_CONFIG                     0x004
+#define INTF_HSYNC_CTL                  0x008
+#define INTF_VSYNC_PERIOD_F0            0x00C
+#define INTF_VSYNC_PERIOD_F1            0x010
+#define INTF_VSYNC_PULSE_WIDTH_F0       0x014
+#define INTF_VSYNC_PULSE_WIDTH_F1       0x018
+#define INTF_DISPLAY_V_START_F0         0x01C
+#define INTF_DISPLAY_V_START_F1         0x020
+#define INTF_DISPLAY_V_END_F0           0x024
+#define INTF_DISPLAY_V_END_F1           0x028
+#define INTF_ACTIVE_V_START_F0          0x02C
+#define INTF_ACTIVE_V_START_F1          0x030
+#define INTF_ACTIVE_V_END_F0            0x034
+#define INTF_ACTIVE_V_END_F1            0x038
+#define INTF_DISPLAY_HCTL               0x03C
+#define INTF_ACTIVE_HCTL                0x040
+#define INTF_BORDER_COLOR               0x044
+#define INTF_UNDERFLOW_COLOR            0x048
+#define INTF_HSYNC_SKEW                 0x04C
+#define INTF_POLARITY_CTL               0x050
+#define INTF_TEST_CTL                   0x054
+#define INTF_TP_COLOR0                  0x058
+#define INTF_TP_COLOR1                  0x05C
+#define INTF_FRAME_LINE_COUNT_EN        0x0A8
+#define INTF_FRAME_COUNT                0x0AC
+#define   INTF_LINE_COUNT               0x0B0
+
+#define   INTF_DEFLICKER_CONFIG         0x0F0
+#define   INTF_DEFLICKER_STRNG_COEFF    0x0F4
+#define   INTF_DEFLICKER_WEAK_COEFF     0x0F8
+
+#define   INTF_DSI_CMD_MODE_TRIGGER_EN  0x084
+#define   INTF_PANEL_FORMAT             0x090
+#define   INTF_TPG_ENABLE               0x100
+#define   INTF_TPG_MAIN_CONTROL         0x104
+#define   INTF_TPG_VIDEO_CONFIG         0x108
+#define   INTF_TPG_COMPONENT_LIMITS     0x10C
+#define   INTF_TPG_RECTANGLE            0x110
+#define   INTF_TPG_INITIAL_VALUE        0x114
+#define   INTF_TPG_BLK_WHITE_PATTERN_FRAMES   0x118
+#define   INTF_TPG_RGB_MAPPING          0x11C
+#define   INTF_PROG_FETCH_START         0x170
+
+#define   INTF_FRAME_LINE_COUNT_EN      0x0A8
+#define   INTF_FRAME_COUNT              0x0AC
+#define   INTF_LINE_COUNT               0x0B0
+
+#define INTF_MISR_CTRL			0x180
+#define INTF_MISR_SIGNATURE		0x184
+
+#define MISR_FRAME_COUNT_MASK		0xFF
+#define MISR_CTRL_ENABLE		BIT(8)
+#define MISR_CTRL_STATUS		BIT(9)
+#define MISR_CTRL_STATUS_CLEAR		BIT(10)
+#define INTF_MISR_CTRL_FREE_RUN_MASK	BIT(31)
+
+static struct sde_intf_cfg *_intf_offset(enum sde_intf intf,
+		struct sde_mdss_cfg *m,
+		void __iomem *addr,
+		struct sde_hw_blk_reg_map *b)
+{
+	int i;
+
+	for (i = 0; i < m->intf_count; i++) {
+		if ((intf == m->intf[i].id) &&
+		(m->intf[i].type != INTF_NONE)) {
+			b->base_off = addr;
+			b->blk_off = m->intf[i].base;
+			b->length = m->intf[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = SDE_DBG_MASK_INTF;
+			return &m->intf[i];
+		}
+	}
+
+	return ERR_PTR(-EINVAL);
+}
+
+static void sde_hw_intf_setup_timing_engine(struct sde_hw_intf *ctx,
+		const struct intf_timing_params *p,
+		const struct sde_format *fmt)
+{
+	struct sde_hw_blk_reg_map *c = &ctx->hw;
+	u32 hsync_period, vsync_period;
+	u32 display_v_start, display_v_end;
+	u32 hsync_start_x, hsync_end_x;
+	u32 active_h_start, active_h_end;
+	u32 active_v_start, active_v_end;
+	u32 active_hctl, display_hctl, hsync_ctl;
+	u32 polarity_ctl, den_polarity, hsync_polarity, vsync_polarity;
+	u32 panel_format;
+	u32 intf_cfg;
+
+	/* read interface_cfg */
+	intf_cfg = SDE_REG_READ(c, INTF_CONFIG);
+	hsync_period = p->hsync_pulse_width + p->h_back_porch + p->width +
+	p->h_front_porch;
+	vsync_period = p->vsync_pulse_width + p->v_back_porch + p->height +
+	p->v_front_porch;
+
+	display_v_start = ((p->vsync_pulse_width + p->v_back_porch) *
+	hsync_period) + p->hsync_skew;
+	display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) +
+	p->hsync_skew - 1;
+
+	if (ctx->cap->type == INTF_EDP) {
+		display_v_start += p->hsync_pulse_width + p->h_back_porch;
+		display_v_end -= p->h_front_porch;
+	}
+
+	hsync_start_x = p->h_back_porch + p->hsync_pulse_width;
+	hsync_end_x = hsync_period - p->h_front_porch - 1;
+
+	if (p->width != p->xres) {
+		active_h_start = hsync_start_x;
+		active_h_end = active_h_start + p->xres - 1;
+	} else {
+		active_h_start = 0;
+		active_h_end = 0;
+	}
+
+	if (p->height != p->yres) {
+		active_v_start = display_v_start;
+		active_v_end = active_v_start + (p->yres * hsync_period) - 1;
+	} else {
+		active_v_start = 0;
+		active_v_end = 0;
+	}
+
+	if (active_h_end) {
+		active_hctl = (active_h_end << 16) | active_h_start;
+		intf_cfg |= BIT(29);	/* ACTIVE_H_ENABLE */
+	} else {
+		active_hctl = 0;
+	}
+
+	if (active_v_end)
+		intf_cfg |= BIT(30); /* ACTIVE_V_ENABLE */
+
+	hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
+	display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+	den_polarity = 0;
+	hsync_polarity = p->hsync_polarity;
+	vsync_polarity = p->vsync_polarity;
+	polarity_ctl = (den_polarity << 2) | /*  DEN Polarity  */
+		(vsync_polarity << 1) | /* VSYNC Polarity */
+		(hsync_polarity << 0);  /* HSYNC Polarity */
+
+	if (!SDE_FORMAT_IS_YUV(fmt))
+		panel_format = (fmt->bits[C0_G_Y] |
+				(fmt->bits[C1_B_Cb] << 2) |
+				(fmt->bits[C2_R_Cr] << 4) |
+				(0x21 << 8));
+	 else
+		/* Interface treats all the pixel data in RGB888 format */
+		panel_format = (COLOR_8BIT |
+				(COLOR_8BIT << 2) |
+				(COLOR_8BIT << 4) |
+				(0x21 << 8));
+
+	SDE_REG_WRITE(c, INTF_HSYNC_CTL, hsync_ctl);
+	SDE_REG_WRITE(c, INTF_VSYNC_PERIOD_F0, vsync_period * hsync_period);
+	SDE_REG_WRITE(c, INTF_VSYNC_PULSE_WIDTH_F0,
+			p->vsync_pulse_width * hsync_period);
+	SDE_REG_WRITE(c, INTF_DISPLAY_HCTL, display_hctl);
+	SDE_REG_WRITE(c, INTF_DISPLAY_V_START_F0, display_v_start);
+	SDE_REG_WRITE(c, INTF_DISPLAY_V_END_F0, display_v_end);
+	SDE_REG_WRITE(c, INTF_ACTIVE_HCTL,  active_hctl);
+	SDE_REG_WRITE(c, INTF_ACTIVE_V_START_F0, active_v_start);
+	SDE_REG_WRITE(c, INTF_ACTIVE_V_END_F0, active_v_end);
+	SDE_REG_WRITE(c, INTF_BORDER_COLOR, p->border_clr);
+	SDE_REG_WRITE(c, INTF_UNDERFLOW_COLOR, p->underflow_clr);
+	SDE_REG_WRITE(c, INTF_HSYNC_SKEW, p->hsync_skew);
+	SDE_REG_WRITE(c, INTF_POLARITY_CTL, polarity_ctl);
+	SDE_REG_WRITE(c, INTF_FRAME_LINE_COUNT_EN, 0x3);
+	SDE_REG_WRITE(c, INTF_CONFIG, intf_cfg);
+	SDE_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
+}
+
+static void sde_hw_intf_enable_timing_engine(
+		struct sde_hw_intf *intf,
+		u8 enable)
+{
+	struct sde_hw_blk_reg_map *c = &intf->hw;
+	/* Note: Display interface select is handled in top block hw layer */
+	SDE_REG_WRITE(c, INTF_TIMING_ENGINE_EN, enable != 0);
+}
+
+static void sde_hw_intf_setup_prg_fetch(
+		struct sde_hw_intf *intf,
+		const struct intf_prog_fetch *fetch)
+{
+	struct sde_hw_blk_reg_map *c = &intf->hw;
+	int fetch_enable;
+
+	/*
+	 * Fetch should always be outside the active lines. If the fetching
+	 * is programmed within active region, hardware behavior is unknown.
+	 */
+
+	fetch_enable = SDE_REG_READ(c, INTF_CONFIG);
+	if (fetch->enable) {
+		fetch_enable |= BIT(31);
+		SDE_REG_WRITE(c, INTF_PROG_FETCH_START,
+				fetch->fetch_start);
+	} else {
+		fetch_enable &= ~BIT(31);
+	}
+
+	SDE_REG_WRITE(c, INTF_CONFIG, fetch_enable);
+}
+
+static void sde_hw_intf_get_status(
+		struct sde_hw_intf *intf,
+		struct intf_status *s)
+{
+	struct sde_hw_blk_reg_map *c = &intf->hw;
+
+	s->is_en = SDE_REG_READ(c, INTF_TIMING_ENGINE_EN);
+	if (s->is_en) {
+		s->frame_count = SDE_REG_READ(c, INTF_FRAME_COUNT);
+		s->line_count = SDE_REG_READ(c, INTF_LINE_COUNT);
+	} else {
+		s->line_count = 0;
+		s->frame_count = 0;
+	}
+}
+
+static void sde_hw_intf_set_misr(struct sde_hw_intf *intf,
+		struct sde_misr_params *misr_map)
+{
+	struct sde_hw_blk_reg_map *c = &intf->hw;
+	u32 config = 0;
+
+	if (!misr_map)
+		return;
+
+	SDE_REG_WRITE(c, INTF_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
+	/* Clear data */
+	wmb();
+
+	if (misr_map->enable) {
+		config = (MISR_FRAME_COUNT_MASK & 1) |
+			(MISR_CTRL_ENABLE);
+
+		SDE_REG_WRITE(c, INTF_MISR_CTRL, config);
+	} else {
+		SDE_REG_WRITE(c, INTF_MISR_CTRL, 0);
+	}
+}
+
+static void sde_hw_intf_collect_misr(struct sde_hw_intf *intf,
+		struct sde_misr_params *misr_map)
+{
+	struct sde_hw_blk_reg_map *c = &intf->hw;
+
+	if (!misr_map)
+		return;
+
+	if (misr_map->enable) {
+		if (misr_map->last_idx < misr_map->frame_count &&
+			misr_map->last_idx < SDE_CRC_BATCH_SIZE)
+			misr_map->crc_value[misr_map->last_idx] =
+				SDE_REG_READ(c, INTF_MISR_SIGNATURE);
+	}
+
+	misr_map->enable =
+		misr_map->enable & (misr_map->last_idx <= SDE_CRC_BATCH_SIZE);
+
+	misr_map->last_idx++;
+}
+
+static void _setup_intf_ops(struct sde_hw_intf_ops *ops,
+		unsigned long cap)
+{
+	ops->setup_timing_gen = sde_hw_intf_setup_timing_engine;
+	ops->setup_prg_fetch  = sde_hw_intf_setup_prg_fetch;
+	ops->get_status = sde_hw_intf_get_status;
+	ops->enable_timing = sde_hw_intf_enable_timing_engine;
+	ops->setup_misr = sde_hw_intf_set_misr;
+	ops->collect_misr = sde_hw_intf_collect_misr;
+}
+
+struct sde_hw_intf *sde_hw_intf_init(enum sde_intf idx,
+		void __iomem *addr,
+		struct sde_mdss_cfg *m)
+{
+	struct sde_hw_intf *c;
+	struct sde_intf_cfg *cfg;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _intf_offset(idx, m, addr, &c->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(c);
+		pr_err("failed to create sde_hw_intf %d\n", idx);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/*
+	 * Assign ops
+	 */
+	c->idx = idx;
+	c->cap = cfg;
+	c->mdss = m;
+	_setup_intf_ops(&c->ops, c->cap->features);
+
+	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
+			c->hw.blk_off + c->hw.length, c->hw.xin_id);
+
+	return c;
+}
+
+void sde_hw_intf_destroy(struct sde_hw_intf *intf)
+{
+	kfree(intf);
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_intf.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_intf.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_intf.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_intf.h	2019-01-22 16:16:23.515246515 +0100
@@ -0,0 +1,133 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_INTF_H
+#define _SDE_HW_INTF_H
+
+#include "sde_hw_catalog.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_util.h"
+
+struct sde_hw_intf;
+
+/* Batch size of frames for collecting MISR data */
+#define SDE_CRC_BATCH_SIZE 16
+
+/**
+ * struct sde_misr_params : Interface for getting and setting MISR data
+ *  Assumption is these functions will be called after clocks are enabled
+ * @ enable : enables/disables MISR
+ * @ frame_count : represents number of frames for which MISR is enabled
+ * @ last_idx: number of frames for which MISR data is collected
+ * @ crc_value: stores the collected MISR data
+ */
+struct sde_misr_params {
+	bool enable;
+	u32 frame_count;
+	u32 last_idx;
+	u32 crc_value[SDE_CRC_BATCH_SIZE];
+};
+
+/* intf timing settings */
+struct intf_timing_params {
+	u32 width;		/* active width */
+	u32 height;		/* active height */
+	u32 xres;		/* Display panel width */
+	u32 yres;		/* Display panel height */
+
+	u32 h_back_porch;
+	u32 h_front_porch;
+	u32 v_back_porch;
+	u32 v_front_porch;
+	u32 hsync_pulse_width;
+	u32 vsync_pulse_width;
+	u32 hsync_polarity;
+	u32 vsync_polarity;
+	u32 border_clr;
+	u32 underflow_clr;
+	u32 hsync_skew;
+};
+
+struct intf_prog_fetch {
+	u8 enable;
+	/* vsync counter for the front porch pixel line */
+	u32 fetch_start;
+};
+
+struct intf_status {
+	u8 is_en;		/* interface timing engine is enabled or not */
+	u32 frame_count;	/* frame count since timing engine enabled */
+	u32 line_count;		/* current line count including blanking */
+};
+
+/**
+ * struct sde_hw_intf_ops : Interface to the interface Hw driver functions
+ *  Assumption is these functions will be called after clocks are enabled
+ * @ setup_timing_gen : programs the timing engine
+ * @ setup_prog_fetch : enables/disables the programmable fetch logic
+ * @ enable_timing: enable/disable timing engine
+ * @ get_status: returns if timing engine is enabled or not
+ * @ setup_misr: enables/disables MISR in HW register
+ * @ collect_misr: reads and stores MISR data from HW register
+ */
+struct sde_hw_intf_ops {
+	void (*setup_timing_gen)(struct sde_hw_intf *intf,
+			const struct intf_timing_params *p,
+			const struct sde_format *fmt);
+
+	void (*setup_prg_fetch)(struct sde_hw_intf *intf,
+			const struct intf_prog_fetch *fetch);
+
+	void (*enable_timing)(struct sde_hw_intf *intf,
+			u8 enable);
+
+	void (*get_status)(struct sde_hw_intf *intf,
+			struct intf_status *status);
+
+	void (*setup_misr)(struct sde_hw_intf *intf,
+			struct sde_misr_params *misr_map);
+
+	void (*collect_misr)(struct sde_hw_intf *intf,
+			struct sde_misr_params *misr_map);
+};
+
+struct sde_hw_intf {
+	/* base */
+	struct sde_hw_blk_reg_map hw;
+
+	/* intf */
+	enum sde_intf idx;
+	const struct sde_intf_cfg *cap;
+	const struct sde_mdss_cfg *mdss;
+
+	/* ops */
+	struct sde_hw_intf_ops ops;
+};
+
+/**
+ * sde_hw_intf_init(): Initializes the intf driver for the passed
+ * interface idx.
+ * @idx:  interface index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m :   pointer to mdss catalog data
+ */
+struct sde_hw_intf *sde_hw_intf_init(enum sde_intf idx,
+		void __iomem *addr,
+		struct sde_mdss_cfg *m);
+
+/**
+ * sde_hw_intf_destroy(): Destroys INTF driver context
+ * @intf:   Pointer to INTF driver context
+ */
+void sde_hw_intf_destroy(struct sde_hw_intf *intf);
+
+#endif /*_SDE_HW_INTF_H */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hwio.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hwio.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hwio.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hwio.h	2019-01-22 16:16:23.519246551 +0100
@@ -0,0 +1,59 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HWIO_H
+#define _SDE_HWIO_H
+
+#include "sde_hw_util.h"
+
+/**
+ * MDP TOP block Register and bit fields and defines
+ */
+#define DISP_INTF_SEL                   0x004
+#define INTR_EN                         0x010
+#define INTR_STATUS                     0x014
+#define INTR_CLEAR                      0x018
+#define INTR2_EN                        0x008
+#define INTR2_STATUS                    0x00c
+#define INTR2_CLEAR                     0x02c
+#define HIST_INTR_EN                    0x01c
+#define HIST_INTR_STATUS                0x020
+#define HIST_INTR_CLEAR                 0x024
+#define INTF_INTR_EN                    0x1C0
+#define INTF_INTR_STATUS                0x1C4
+#define INTF_INTR_CLEAR                 0x1C8
+#define SPLIT_DISPLAY_EN                0x2F4
+#define SPLIT_DISPLAY_UPPER_PIPE_CTRL   0x2F8
+#define DSPP_IGC_COLOR0_RAM_LUTN        0x300
+#define DSPP_IGC_COLOR1_RAM_LUTN        0x304
+#define DSPP_IGC_COLOR2_RAM_LUTN        0x308
+#define PPB0_CNTL                       0x330
+#define PPB0_CONFIG                     0x334
+#define PPB1_CNTL                       0x338
+#define PPB1_CONFIG                     0x33C
+#define HW_EVENTS_CTL                   0x37C
+#define CLK_CTRL3                       0x3A8
+#define CLK_STATUS3                     0x3AC
+#define CLK_CTRL4                       0x3B0
+#define CLK_STATUS4                     0x3B4
+#define CLK_CTRL5                       0x3B8
+#define CLK_STATUS5                     0x3BC
+#define CLK_CTRL7                       0x3D0
+#define CLK_STATUS7                     0x3D4
+#define SPLIT_DISPLAY_LOWER_PIPE_CTRL   0x3F0
+#define SPLIT_DISPLAY_TE_LINE_INTERVAL  0x3F4
+#define INTF_SW_RESET_MASK              0x3FC
+#define MDP_OUT_CTL_0                   0x410
+#define MDP_VSYNC_SEL                   0x414
+#define DCE_SEL                         0x450
+
+#endif /*_SDE_HWIO_H */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_lm.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_lm.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_lm.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_lm.c	2019-10-29 09:26:23.641203159 +0100
@@ -0,0 +1,209 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hw_catalog.h"
+#include "sde_hwio.h"
+#include "sde_hw_lm.h"
+#include "sde_hw_mdss.h"
+#include "sde_dbg.h"
+
+#define LM_OP_MODE                        0x00
+#define LM_OUT_SIZE                       0x04
+#define LM_BORDER_COLOR_0                 0x08
+#define LM_BORDER_COLOR_1                 0x010
+
+/* These register are offset to mixer base + stage base */
+#define LM_BLEND0_OP                     0x00
+#define LM_BLEND0_CONST_ALPHA            0x04
+#define LM_BLEND0_FG_ALPHA               0x04
+#define LM_BLEND0_BG_ALPHA               0x08
+
+static struct sde_lm_cfg *_lm_offset(enum sde_lm mixer,
+		struct sde_mdss_cfg *m,
+		void __iomem *addr,
+		struct sde_hw_blk_reg_map *b)
+{
+	int i;
+
+	for (i = 0; i < m->mixer_count; i++) {
+		if (mixer == m->mixer[i].id) {
+			b->base_off = addr;
+			b->blk_off = m->mixer[i].base;
+			b->length = m->mixer[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = SDE_DBG_MASK_LM;
+			return &m->mixer[i];
+		}
+	}
+
+	return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * _stage_offset(): returns the relative offset of the blend registers
+ * for the stage to be setup
+ * @c:     mixer ctx contains the mixer to be programmed
+ * @stage: stage index to setup
+ */
+static inline int _stage_offset(struct sde_hw_mixer *ctx, enum sde_stage stage)
+{
+	const struct sde_lm_sub_blks *sblk = ctx->cap->sblk;
+	int rc;
+
+	if (stage == SDE_STAGE_BASE)
+		rc = -EINVAL;
+	else if (stage <= sblk->maxblendstages)
+		rc = sblk->blendstage_base[stage - 1];
+	else
+		rc = -EINVAL;
+
+	return rc;
+}
+
+static void sde_hw_lm_setup_out(struct sde_hw_mixer *ctx,
+		struct sde_hw_mixer_cfg *mixer)
+{
+	struct sde_hw_blk_reg_map *c = &ctx->hw;
+	u32 outsize;
+	u32 op_mode;
+
+	op_mode = SDE_REG_READ(c, LM_OP_MODE);
+
+	outsize = mixer->out_height << 16 | mixer->out_width;
+	SDE_REG_WRITE(c, LM_OUT_SIZE, outsize);
+
+	/* SPLIT_LEFT_RIGHT */
+	if (mixer->right_mixer)
+		op_mode |= BIT(31);
+	else
+		op_mode &= ~BIT(31);
+	SDE_REG_WRITE(c, LM_OP_MODE, op_mode);
+}
+
+static void sde_hw_lm_setup_border_color(struct sde_hw_mixer *ctx,
+		struct sde_mdss_color *color,
+		u8 border_en)
+{
+	struct sde_hw_blk_reg_map *c = &ctx->hw;
+
+	if (border_en) {
+		SDE_REG_WRITE(c, LM_BORDER_COLOR_0,
+			(color->color_0 & 0xFFF) |
+			((color->color_1 & 0xFFF) << 0x10));
+		SDE_REG_WRITE(c, LM_BORDER_COLOR_1,
+			(color->color_2 & 0xFFF) |
+			((color->color_3 & 0xFFF) << 0x10));
+	}
+}
+
+static void sde_hw_lm_setup_blend_config_msmskunk(struct sde_hw_mixer *ctx,
+	u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
+{
+	struct sde_hw_blk_reg_map *c = &ctx->hw;
+	int stage_off;
+	u32 const_alpha;
+
+	if (stage == SDE_STAGE_BASE)
+		return;
+
+	stage_off = _stage_offset(ctx, stage);
+	if (WARN_ON(stage_off < 0))
+		return;
+
+	const_alpha = (bg_alpha & 0xFF) | ((fg_alpha & 0xFF) << 16);
+	SDE_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, const_alpha);
+	SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
+static void sde_hw_lm_setup_blend_config(struct sde_hw_mixer *ctx,
+	u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
+{
+	struct sde_hw_blk_reg_map *c = &ctx->hw;
+	int stage_off;
+
+	if (stage == SDE_STAGE_BASE)
+		return;
+
+	stage_off = _stage_offset(ctx, stage);
+	if (WARN_ON(stage_off < 0))
+		return;
+
+	SDE_REG_WRITE(c, LM_BLEND0_FG_ALPHA + stage_off, fg_alpha);
+	SDE_REG_WRITE(c, LM_BLEND0_BG_ALPHA + stage_off, bg_alpha);
+	SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
+static void sde_hw_lm_setup_color3(struct sde_hw_mixer *ctx,
+	uint32_t mixer_op_mode)
+{
+	struct sde_hw_blk_reg_map *c = &ctx->hw;
+	int op_mode;
+
+	/* read the existing op_mode configuration */
+	op_mode = SDE_REG_READ(c, LM_OP_MODE);
+
+	op_mode = (op_mode & (BIT(31) | BIT(30))) | mixer_op_mode;
+
+	SDE_REG_WRITE(c, LM_OP_MODE, op_mode);
+}
+
+static void sde_hw_lm_gc(struct sde_hw_mixer *mixer,
+			void *cfg)
+{
+}
+
+static void _setup_mixer_ops(struct sde_mdss_cfg *m,
+		struct sde_hw_lm_ops *ops,
+		unsigned long cap)
+{
+	ops->setup_mixer_out = sde_hw_lm_setup_out;
+	if (IS_MSMSKUNK_TARGET(m->hwversion))
+		ops->setup_blend_config = sde_hw_lm_setup_blend_config_msmskunk;
+	else
+		ops->setup_blend_config = sde_hw_lm_setup_blend_config;
+	ops->setup_alpha_out = sde_hw_lm_setup_color3;
+	ops->setup_border_color = sde_hw_lm_setup_border_color;
+	ops->setup_gc = sde_hw_lm_gc;
+};
+
+struct sde_hw_mixer *sde_hw_lm_init(enum sde_lm idx,
+		void __iomem *addr,
+		struct sde_mdss_cfg *m)
+{
+	struct sde_hw_mixer *c;
+	struct sde_lm_cfg *cfg;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _lm_offset(idx, m, addr, &c->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(c);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Assign ops */
+	c->idx = idx;
+	c->cap = cfg;
+	_setup_mixer_ops(m, &c->ops, c->cap->features);
+
+	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
+			c->hw.blk_off + c->hw.length, c->hw.xin_id);
+
+	return c;
+}
+
+void sde_hw_lm_destroy(struct sde_hw_mixer *lm)
+{
+	kfree(lm);
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_lm.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_lm.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_lm.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_lm.h	2019-01-22 16:16:23.515246515 +0100
@@ -0,0 +1,102 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_LM_H
+#define _SDE_HW_LM_H
+
+#include "sde_hw_mdss.h"
+#include "sde_hw_util.h"
+
+struct sde_hw_mixer;
+
+struct sde_hw_mixer_cfg {
+	u32 out_width;
+	u32 out_height;
+	bool right_mixer;
+	int flags;
+};
+
+struct sde_hw_color3_cfg {
+	u8 keep_fg[SDE_STAGE_MAX];
+};
+
+/**
+ *
+ * struct sde_hw_lm_ops : Interface to the mixer Hw driver functions
+ *  Assumption is these functions will be called after clocks are enabled
+ */
+struct sde_hw_lm_ops {
+	/*
+	 * Sets up mixer output width and height
+	 * and border color if enabled
+	 */
+	void (*setup_mixer_out)(struct sde_hw_mixer *ctx,
+		struct sde_hw_mixer_cfg *cfg);
+
+	/*
+	 * Alpha blending configuration
+	 * for the specified stage
+	 */
+	void (*setup_blend_config)(struct sde_hw_mixer *ctx, uint32_t stage,
+		uint32_t fg_alpha, uint32_t bg_alpha, uint32_t blend_op);
+
+	/*
+	 * Alpha color component selection from either fg or bg
+	 */
+	void (*setup_alpha_out)(struct sde_hw_mixer *ctx, uint32_t mixer_op);
+
+	/**
+	 * setup_border_color : enable/disable border color
+	 */
+	void (*setup_border_color)(struct sde_hw_mixer *ctx,
+		struct sde_mdss_color *color,
+		u8 border_en);
+	/**
+	 * setup_gc : enable/disable gamma correction feature
+	 */
+	void (*setup_gc)(struct sde_hw_mixer *mixer,
+			void *cfg);
+
+};
+
+struct sde_hw_mixer {
+	/* base */
+	struct sde_hw_blk_reg_map hw;
+
+	/* lm */
+	enum sde_lm  idx;
+	const struct sde_lm_cfg   *cap;
+	const struct sde_mdp_cfg  *mdp;
+	const struct sde_ctl_cfg  *ctl;
+
+	/* ops */
+	struct sde_hw_lm_ops ops;
+};
+
+/**
+ * sde_hw_lm_init(): Initializes the mixer hw driver object.
+ * should be called once before accessing every mixer.
+ * @idx:  mixer index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m :   pointer to mdss catalog data
+ */
+struct sde_hw_mixer *sde_hw_lm_init(enum sde_lm idx,
+		void __iomem *addr,
+		struct sde_mdss_cfg *m);
+
+/**
+ * sde_hw_lm_destroy(): Destroys layer mixer driver context
+ * @lm:   Pointer to LM driver context
+ */
+void sde_hw_lm_destroy(struct sde_hw_mixer *lm);
+
+#endif /*_SDE_HW_LM_H */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_mdss.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_mdss.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_mdss.h	2019-10-29 09:26:23.641203159 +0100
@@ -0,0 +1,471 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_MDSS_H
+#define _SDE_HW_MDSS_H
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+
+#include "msm_drv.h"
+
+#define SDE_DBG_NAME			"sde"
+
+#define SDE_NONE                        0
+
+#ifndef SDE_CSC_MATRIX_COEFF_SIZE
+#define SDE_CSC_MATRIX_COEFF_SIZE	9
+#endif
+
+#ifndef SDE_CSC_CLAMP_SIZE
+#define SDE_CSC_CLAMP_SIZE		6
+#endif
+
+#ifndef SDE_CSC_BIAS_SIZE
+#define SDE_CSC_BIAS_SIZE		3
+#endif
+
+#ifndef SDE_MAX_PLANES
+#define SDE_MAX_PLANES			4
+#endif
+
+#define PIPES_PER_STAGE			2
+#ifndef SDE_MAX_DE_CURVES
+#define SDE_MAX_DE_CURVES		3
+#endif
+
+enum sde_format_flags {
+	SDE_FORMAT_FLAG_YUV_BIT,
+	SDE_FORMAT_FLAG_DX_BIT,
+	SDE_FORMAT_FLAG_COMPRESSED_BIT,
+	SDE_FORMAT_FLAG_BIT_MAX,
+};
+
+#define SDE_FORMAT_FLAG_YUV		BIT(SDE_FORMAT_FLAG_YUV_BIT)
+#define SDE_FORMAT_FLAG_DX		BIT(SDE_FORMAT_FLAG_DX_BIT)
+#define SDE_FORMAT_FLAG_COMPRESSED	BIT(SDE_FORMAT_FLAG_COMPRESSED_BIT)
+#define SDE_FORMAT_IS_YUV(X)		\
+	(test_bit(SDE_FORMAT_FLAG_YUV_BIT, (X)->flag))
+#define SDE_FORMAT_IS_DX(X)		\
+	(test_bit(SDE_FORMAT_FLAG_DX_BIT, (X)->flag))
+#define SDE_FORMAT_IS_LINEAR(X)		((X)->fetch_mode == SDE_FETCH_LINEAR)
+#define SDE_FORMAT_IS_TILE(X) \
+	(((X)->fetch_mode == SDE_FETCH_UBWC) && \
+			!test_bit(SDE_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
+#define SDE_FORMAT_IS_UBWC(X) \
+	(((X)->fetch_mode == SDE_FETCH_UBWC) && \
+			test_bit(SDE_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
+
+#define TO_S15D16(_x_) ((_x_) << 7)
+
+#define SDE_BLEND_FG_ALPHA_FG_CONST	(0 << 0)
+#define SDE_BLEND_FG_ALPHA_BG_CONST	(1 << 0)
+#define SDE_BLEND_FG_ALPHA_FG_PIXEL	(2 << 0)
+#define SDE_BLEND_FG_ALPHA_BG_PIXEL	(3 << 0)
+#define SDE_BLEND_FG_INV_ALPHA		(1 << 2)
+#define SDE_BLEND_FG_MOD_ALPHA		(1 << 3)
+#define SDE_BLEND_FG_INV_MOD_ALPHA	(1 << 4)
+#define SDE_BLEND_FG_TRANSP_EN		(1 << 5)
+#define SDE_BLEND_BG_ALPHA_FG_CONST	(0 << 8)
+#define SDE_BLEND_BG_ALPHA_BG_CONST	(1 << 8)
+#define SDE_BLEND_BG_ALPHA_FG_PIXEL	(2 << 8)
+#define SDE_BLEND_BG_ALPHA_BG_PIXEL	(3 << 8)
+#define SDE_BLEND_BG_INV_ALPHA		(1 << 10)
+#define SDE_BLEND_BG_MOD_ALPHA		(1 << 11)
+#define SDE_BLEND_BG_INV_MOD_ALPHA	(1 << 12)
+#define SDE_BLEND_BG_TRANSP_EN		(1 << 13)
+
+enum sde_hw_blk_type {
+	SDE_HW_BLK_TOP = 0,
+	SDE_HW_BLK_SSPP,
+	SDE_HW_BLK_LM,
+	SDE_HW_BLK_DSPP,
+	SDE_HW_BLK_CTL,
+	SDE_HW_BLK_CDM,
+	SDE_HW_BLK_PINGPONG,
+	SDE_HW_BLK_INTF,
+	SDE_HW_BLK_WB,
+	SDE_HW_BLK_MAX,
+};
+
+enum sde_mdp {
+	MDP_TOP = 0x1,
+	MDP_MAX,
+};
+
+enum sde_sspp {
+	SSPP_NONE,
+	SSPP_VIG0,
+	SSPP_VIG1,
+	SSPP_VIG2,
+	SSPP_VIG3,
+	SSPP_RGB0,
+	SSPP_RGB1,
+	SSPP_RGB2,
+	SSPP_RGB3,
+	SSPP_DMA0,
+	SSPP_DMA1,
+	SSPP_DMA2,
+	SSPP_DMA3,
+	SSPP_CURSOR0,
+	SSPP_CURSOR1,
+	SSPP_MAX
+};
+
+enum sde_sspp_type {
+	SSPP_TYPE_VIG,
+	SSPP_TYPE_RGB,
+	SSPP_TYPE_DMA,
+	SSPP_TYPE_CURSOR,
+	SSPP_TYPE_MAX
+};
+
+enum sde_lm {
+	LM_0 = 1,
+	LM_1,
+	LM_2,
+	LM_3,
+	LM_4,
+	LM_5,
+	LM_6,
+	LM_MAX
+};
+
+enum sde_stage {
+	SDE_STAGE_BASE = 0,
+	SDE_STAGE_0,
+	SDE_STAGE_1,
+	SDE_STAGE_2,
+	SDE_STAGE_3,
+	SDE_STAGE_4,
+	SDE_STAGE_5,
+	SDE_STAGE_6,
+	SDE_STAGE_MAX
+};
+enum sde_dspp {
+	DSPP_0 = 1,
+	DSPP_1,
+	DSPP_2,
+	DSPP_3,
+	DSPP_MAX
+};
+
+enum sde_ctl {
+	CTL_0 = 1,
+	CTL_1,
+	CTL_2,
+	CTL_3,
+	CTL_4,
+	CTL_MAX
+};
+
+enum sde_cdm {
+	CDM_0 = 1,
+	CDM_1,
+	CDM_MAX
+};
+
+enum sde_pingpong {
+	PINGPONG_0 = 1,
+	PINGPONG_1,
+	PINGPONG_2,
+	PINGPONG_3,
+	PINGPONG_4,
+	PINGPONG_S0,
+	PINGPONG_MAX
+};
+
+enum sde_intf {
+	INTF_0 = 1,
+	INTF_1,
+	INTF_2,
+	INTF_3,
+	INTF_4,
+	INTF_5,
+	INTF_6,
+	INTF_MAX
+};
+
+enum sde_intf_type {
+	INTF_NONE = 0x0,
+	INTF_DSI = 0x1,
+	INTF_HDMI = 0x3,
+	INTF_LCDC = 0x5,
+	INTF_EDP = 0x9,
+	INTF_DP = 0xa,
+	INTF_TYPE_MAX,
+
+	/* virtual interfaces */
+	INTF_WB = 0x100,
+};
+
+enum sde_intf_mode {
+	INTF_MODE_NONE = 0,
+	INTF_MODE_CMD,
+	INTF_MODE_VIDEO,
+	INTF_MODE_WB_BLOCK,
+	INTF_MODE_WB_LINE,
+	INTF_MODE_MAX
+};
+
+enum sde_wb {
+	WB_0 = 1,
+	WB_1,
+	WB_2,
+	WB_3,
+	WB_MAX
+};
+
+enum sde_ad {
+	AD_0 = 0x1,
+	AD_1,
+	AD_MAX
+};
+
+enum sde_cwb {
+	CWB_0 = 0x1,
+	CWB_1,
+	CWB_2,
+	CWB_3,
+	CWB_MAX
+};
+
+enum sde_wd_timer {
+	WD_TIMER_0 = 0x1,
+	WD_TIMER_1,
+	WD_TIMER_2,
+	WD_TIMER_3,
+	WD_TIMER_4,
+	WD_TIMER_5,
+	WD_TIMER_MAX
+};
+
+enum sde_vbif {
+	VBIF_0,
+	VBIF_1,
+	VBIF_MAX,
+	VBIF_RT = VBIF_0,
+	VBIF_NRT = VBIF_1
+};
+
+enum sde_iommu_domain {
+	SDE_IOMMU_DOMAIN_UNSECURE,
+	SDE_IOMMU_DOMAIN_SECURE,
+	SDE_IOMMU_DOMAIN_MAX
+};
+
+/**
+ * SDE HW,Component order color map
+ */
+enum {
+	C0_G_Y = 0,
+	C1_B_Cb = 1,
+	C2_R_Cr = 2,
+	C3_ALPHA = 3
+};
+
+/**
+ * enum sde_plane_type - defines how the color component pixel packing
+ * @SDE_PLANE_INTERLEAVED   : Color components in single plane
+ * @SDE_PLANE_PLANAR        : Color component in separate planes
+ * @SDE_PLANE_PSEUDO_PLANAR : Chroma components interleaved in separate plane
+ */
+enum sde_plane_type {
+	SDE_PLANE_INTERLEAVED,
+	SDE_PLANE_PLANAR,
+	SDE_PLANE_PSEUDO_PLANAR,
+};
+
+/**
+ * enum sde_chroma_samp_type - chroma sub-samplng type
+ * @SDE_CHROMA_RGB   : No chroma subsampling
+ * @SDE_CHROMA_H2V1  : Chroma pixels are horizontally subsampled
+ * @SDE_CHROMA_H1V2  : Chroma pixels are vertically subsampled
+ * @SDE_CHROMA_420   : 420 subsampling
+ */
+enum sde_chroma_samp_type {
+	SDE_CHROMA_RGB,
+	SDE_CHROMA_H2V1,
+	SDE_CHROMA_H1V2,
+	SDE_CHROMA_420
+};
+
+/**
+ * sde_fetch_type - Defines How SDE HW fetches data
+ * @SDE_FETCH_LINEAR   : fetch is line by line
+ * @SDE_FETCH_TILE     : fetches data in Z order from a tile
+ * @SDE_FETCH_UBWC     : fetch and decompress data
+ */
+enum sde_fetch_type {
+	SDE_FETCH_LINEAR,
+	SDE_FETCH_TILE,
+	SDE_FETCH_UBWC
+};
+
+/**
+ * Value of enum chosen to fit the number of bits
+ * expected by the HW programming.
+ */
+enum {
+	COLOR_ALPHA_1BIT = 0,
+	COLOR_ALPHA_4BIT = 1,
+	COLOR_4BIT = 0,
+	COLOR_5BIT = 1, /* No 5-bit Alpha */
+	COLOR_6BIT = 2, /* 6-Bit Alpha also = 2 */
+	COLOR_8BIT = 3, /* 8-Bit Alpha also = 3 */
+};
+
+/**
+ * enum sde_3d_blend_mode
+ * Desribes how the 3d data is blended
+ * @BLEND_3D_NONE      : 3d blending not enabled
+ * @BLEND_3D_FRAME_INT : Frame interleaving
+ * @BLEND_3D_H_ROW_INT : Horizontal row interleaving
+ * @BLEND_3D_V_ROW_INT : vertical row interleaving
+ * @BLEND_3D_COL_INT   : column interleaving
+ * @BLEND_3D_MAX       :
+ */
+enum sde_3d_blend_mode {
+	BLEND_3D_NONE = 0,
+	BLEND_3D_FRAME_INT,
+	BLEND_3D_H_ROW_INT,
+	BLEND_3D_V_ROW_INT,
+	BLEND_3D_COL_INT,
+	BLEND_3D_MAX
+};
+
+enum sde_csc_type {
+	SDE_CSC_RGB2YUV_601L,
+	SDE_CSC_RGB2YUV_601FR,
+	SDE_CSC_RGB2YUV_709L,
+	SDE_CSC_RGB2YUV_709FR,
+	SDE_CSC_RGB2YUV_2020L,
+	SDE_CSC_RGB2YUV_2020FR,
+	SDE_MAX_CSC
+};
+
+/** struct sde_format - defines the format configuration which
+ * allows SDE HW to correctly fetch and decode the format
+ * @base: base msm_format struture containing fourcc code
+ * @fetch_planes: how the color components are packed in pixel format
+ * @element: element color ordering
+ * @bits: element bit widths
+ * @chroma_sample: chroma sub-samplng type
+ * @unpack_align_msb: unpack aligned, 0 to LSB, 1 to MSB
+ * @unpack_tight: 0 for loose, 1 for tight
+ * @unpack_count: 0 = 1 component, 1 = 2 component
+ * @bpp: bytes per pixel
+ * @alpha_enable: whether the format has an alpha channel
+ * @num_planes: number of planes (including meta data planes)
+ * @fetch_mode: linear, tiled, or ubwc hw fetch behavior
+ * @is_yuv: is format a yuv variant
+ * @flag: usage bit flags
+ * @tile_width: format tile width
+ * @tile_height: format tile height
+ */
+struct sde_format {
+	struct msm_format base;
+	enum sde_plane_type fetch_planes;
+	u8 element[SDE_MAX_PLANES];
+	u8 bits[SDE_MAX_PLANES];
+	enum sde_chroma_samp_type chroma_sample;
+	u8 unpack_align_msb;
+	u8 unpack_tight;
+	u8 unpack_count;
+	u8 bpp;
+	u8 alpha_enable;
+	u8 num_planes;
+	enum sde_fetch_type fetch_mode;
+	DECLARE_BITMAP(flag, SDE_FORMAT_FLAG_BIT_MAX);
+	u16 tile_width;
+	u16 tile_height;
+};
+#define to_sde_format(x) container_of(x, struct sde_format, base)
+
+/**
+ * struct sde_hw_fmt_layout - format information of the source pixel data
+ * @format: pixel format parameters
+ * @num_planes: number of planes (including meta data planes)
+ * @width: image width
+ * @height: image height
+ * @total_size: total size in bytes
+ * @plane_addr: address of each plane
+ * @plane_size: length of each plane
+ * @plane_pitch: pitch of each plane
+ */
+struct sde_hw_fmt_layout {
+	const struct sde_format *format;
+	uint32_t num_planes;
+	uint32_t width;
+	uint32_t height;
+	uint32_t total_size;
+	uint32_t plane_addr[SDE_MAX_PLANES];
+	uint32_t plane_size[SDE_MAX_PLANES];
+	uint32_t plane_pitch[SDE_MAX_PLANES];
+};
+
+struct sde_rect {
+	u16 x;
+	u16 y;
+	u16 w;
+	u16 h;
+};
+
+struct sde_csc_cfg {
+	/* matrix coefficients in S15.16 format */
+	uint32_t csc_mv[SDE_CSC_MATRIX_COEFF_SIZE];
+	uint32_t csc_pre_bv[SDE_CSC_BIAS_SIZE];
+	uint32_t csc_post_bv[SDE_CSC_BIAS_SIZE];
+	uint32_t csc_pre_lv[SDE_CSC_CLAMP_SIZE];
+	uint32_t csc_post_lv[SDE_CSC_CLAMP_SIZE];
+};
+
+/**
+ * struct sde_mdss_color - mdss color description
+ * color 0 : green
+ * color 1 : blue
+ * color 2 : red
+ * color 3 : alpha
+ */
+struct sde_mdss_color {
+	u32 color_0;
+	u32 color_1;
+	u32 color_2;
+	u32 color_3;
+};
+
+/*
+ * Define bit masks for h/w logging.
+ */
+#define SDE_DBG_MASK_NONE     (1 << 0)
+#define SDE_DBG_MASK_CDM      (1 << 1)
+#define SDE_DBG_MASK_DSPP     (1 << 2)
+#define SDE_DBG_MASK_INTF     (1 << 3)
+#define SDE_DBG_MASK_LM       (1 << 4)
+#define SDE_DBG_MASK_CTL      (1 << 5)
+#define SDE_DBG_MASK_PINGPONG (1 << 6)
+#define SDE_DBG_MASK_SSPP     (1 << 7)
+#define SDE_DBG_MASK_WB       (1 << 8)
+#define SDE_DBG_MASK_TOP      (1 << 9)
+#define SDE_DBG_MASK_VBIF     (1 << 10)
+
+/**
+ * struct sde_hw_cp_cfg: hardware dspp/lm feature payload.
+ * @payload: Feature specific payload.
+ * @len: Length of the payload.
+ */
+struct sde_hw_cp_cfg {
+	void *payload;
+	u32 len;
+};
+
+#endif  /* _SDE_HW_MDSS_H */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_pingpong.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_pingpong.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c	2019-10-29 09:26:23.641203159 +0100
@@ -0,0 +1,173 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hw_mdss.h"
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_pingpong.h"
+#include "sde_dbg.h"
+
+#define PP_TEAR_CHECK_EN                0x000
+#define PP_SYNC_CONFIG_VSYNC            0x004
+#define PP_SYNC_CONFIG_HEIGHT           0x008
+#define PP_SYNC_WRCOUNT                 0x00C
+#define PP_VSYNC_INIT_VAL               0x010
+#define PP_INT_COUNT_VAL                0x014
+#define PP_SYNC_THRESH                  0x018
+#define PP_START_POS                    0x01C
+#define PP_RD_PTR_IRQ                   0x020
+#define PP_WR_PTR_IRQ                   0x024
+#define PP_OUT_LINE_COUNT               0x028
+#define PP_LINE_COUNT                   0x02C
+#define PP_AUTOREFRESH_CONFIG           0x030
+
+#define PP_FBC_MODE                     0x034
+#define PP_FBC_BUDGET_CTL               0x038
+#define PP_FBC_LOSSY_MODE               0x03C
+#define PP_DSC_MODE                     0x0a0
+#define PP_DCE_DATA_IN_SWAP             0x0ac
+#define PP_DCE_DATA_OUT_SWAP            0x0c8
+
+static struct sde_pingpong_cfg *_pingpong_offset(enum sde_pingpong pp,
+		struct sde_mdss_cfg *m,
+		void __iomem *addr,
+		struct sde_hw_blk_reg_map *b)
+{
+	int i;
+
+	for (i = 0; i < m->pingpong_count; i++) {
+		if (pp == m->pingpong[i].id) {
+			b->base_off = addr;
+			b->blk_off = m->pingpong[i].base;
+			b->length = m->pingpong[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = SDE_DBG_MASK_PINGPONG;
+			return &m->pingpong[i];
+		}
+	}
+
+	return ERR_PTR(-EINVAL);
+}
+
+static int sde_hw_pp_setup_te_config(struct sde_hw_pingpong *pp,
+		struct sde_hw_tear_check *te)
+{
+	struct sde_hw_blk_reg_map *c = &pp->hw;
+	int cfg;
+
+	cfg = BIT(19); /*VSYNC_COUNTER_EN */
+	if (te->hw_vsync_mode)
+		cfg |= BIT(20);
+
+	cfg |= te->vsync_count;
+
+	SDE_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
+	SDE_REG_WRITE(c, PP_SYNC_CONFIG_HEIGHT, te->sync_cfg_height);
+	SDE_REG_WRITE(c, PP_VSYNC_INIT_VAL, te->vsync_init_val);
+	SDE_REG_WRITE(c, PP_RD_PTR_IRQ, te->rd_ptr_irq);
+	SDE_REG_WRITE(c, PP_START_POS, te->start_pos);
+	SDE_REG_WRITE(c, PP_SYNC_THRESH,
+			((te->sync_threshold_continue << 16) |
+			 te->sync_threshold_start));
+	SDE_REG_WRITE(c, PP_SYNC_WRCOUNT,
+			(te->start_pos + te->sync_threshold_start + 1));
+
+	return 0;
+}
+
+int sde_hw_pp_setup_autorefresh_config(struct sde_hw_pingpong *pp,
+		struct sde_hw_autorefresh *cfg)
+{
+	struct sde_hw_blk_reg_map *c = &pp->hw;
+	u32 refresh_cfg;
+
+	if (cfg->enable)
+		refresh_cfg = BIT(31) | cfg->frame_count;
+	else
+		refresh_cfg = 0;
+
+	SDE_REG_WRITE(c, PP_AUTOREFRESH_CONFIG,
+			refresh_cfg);
+
+	return 0;
+}
+
+int sde_hw_pp_setup_dsc_compression(struct sde_hw_pingpong *pp,
+		struct sde_hw_dsc_cfg *cfg)
+{
+	return 0;
+}
+int sde_hw_pp_enable_te(struct sde_hw_pingpong *pp, bool enable)
+{
+	struct sde_hw_blk_reg_map *c = &pp->hw;
+
+	SDE_REG_WRITE(c, PP_TEAR_CHECK_EN, enable);
+	return 0;
+}
+
+int sde_hw_pp_get_vsync_info(struct sde_hw_pingpong *pp,
+		struct sde_hw_pp_vsync_info *info)
+{
+	struct sde_hw_blk_reg_map *c = &pp->hw;
+	u32 val;
+
+	val = SDE_REG_READ(c, PP_VSYNC_INIT_VAL);
+	info->init_val = val & 0xffff;
+
+	val = SDE_REG_READ(c, PP_INT_COUNT_VAL);
+	info->vsync_count = (val & 0xffff0000) >> 16;
+	info->line_count = val & 0xffff;
+
+	return 0;
+}
+
+static void _setup_pingpong_ops(struct sde_hw_pingpong_ops *ops,
+		unsigned long cap)
+{
+	ops->setup_tearcheck = sde_hw_pp_setup_te_config;
+	ops->enable_tearcheck = sde_hw_pp_enable_te;
+	ops->get_vsync_info = sde_hw_pp_get_vsync_info;
+	ops->setup_autorefresh = sde_hw_pp_setup_autorefresh_config;
+	ops->setup_dsc = sde_hw_pp_setup_dsc_compression;
+};
+
+struct sde_hw_pingpong *sde_hw_pingpong_init(enum sde_pingpong idx,
+		void __iomem *addr,
+		struct sde_mdss_cfg *m)
+{
+	struct sde_hw_pingpong *c;
+	struct sde_pingpong_cfg *cfg;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _pingpong_offset(idx, m, addr, &c->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(c);
+		return ERR_PTR(-EINVAL);
+	}
+
+	c->idx = idx;
+	c->pingpong_hw_cap = cfg;
+	_setup_pingpong_ops(&c->ops, c->pingpong_hw_cap->features);
+
+	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
+			c->hw.blk_off + c->hw.length, c->hw.xin_id);
+
+	return c;
+}
+
+void sde_hw_pingpong_destroy(struct sde_hw_pingpong *pp)
+{
+	kfree(pp);
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_pingpong.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_pingpong.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h	2019-01-22 16:16:23.515246515 +0100
@@ -0,0 +1,123 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_PINGPONG_H
+#define _SDE_HW_PINGPONG_H
+
+struct sde_hw_pingpong;
+
+struct sde_hw_tear_check {
+	/*
+	 * This is ratio of MDP VSYNC clk freq(Hz) to
+	 * refresh rate divided by no of lines
+	 */
+	u32 vsync_count;
+	u32 sync_cfg_height;
+	u32 vsync_init_val;
+	u32 sync_threshold_start;
+	u32 sync_threshold_continue;
+	u32 start_pos;
+	u32 rd_ptr_irq;
+	u8 hw_vsync_mode;
+};
+
+struct sde_hw_autorefresh {
+	bool  enable;
+	u32 frame_count;
+};
+
+struct sde_hw_pp_vsync_info {
+	u32 init_val; /* value of rd pointer at vsync edge */
+	u32 vsync_count;    /* mdp clocks to complete one line */
+	u32 line_count;   /* current line count */
+};
+
+struct sde_hw_dsc_cfg {
+	u8 enable;
+};
+
+/**
+ *
+ * struct sde_hw_pingpong_ops : Interface to the pingpong Hw driver functions
+ *  Assumption is these functions will be called after clocks are enabled
+ *  @setup_tearcheck :
+ *  @enable_tearcheck :
+ *  @get_vsync_info :
+ *  @setup_autorefresh :
+ *  #setup_dsc :
+ */
+struct sde_hw_pingpong_ops {
+	/**
+	 * enables vysnc generation and sets up init value of
+	 * read pointer and programs the tear check cofiguration
+	 */
+	int (*setup_tearcheck)(struct sde_hw_pingpong *pp,
+			struct sde_hw_tear_check *cfg);
+
+	/**
+	 * enables tear check block
+	 */
+	int (*enable_tearcheck)(struct sde_hw_pingpong *pp,
+			bool enable);
+
+	/**
+	 * provides the programmed and current
+	 * line_count
+	 */
+	int (*get_vsync_info)(struct sde_hw_pingpong *pp,
+			struct sde_hw_pp_vsync_info  *info);
+
+	/**
+	 * configure and enable the autorefresh config
+	 */
+	int (*setup_autorefresh)(struct sde_hw_pingpong *pp,
+			struct sde_hw_autorefresh *cfg);
+
+	/**
+	 * Program the dsc compression block
+	 */
+	int (*setup_dsc)(struct sde_hw_pingpong *pp,
+			struct sde_hw_dsc_cfg *cfg);
+};
+
+struct sde_hw_pingpong {
+	/* base */
+	struct sde_hw_blk_reg_map hw;
+
+	/* pingpong */
+	enum sde_pingpong idx;
+	const struct sde_pingpong_cfg *pingpong_hw_cap;
+
+	/* ops */
+	struct sde_hw_pingpong_ops ops;
+};
+
+/**
+ * sde_hw_pingpong_init - initializes the pingpong driver for the passed
+ *	pingpong idx.
+ * @idx:  Pingpong index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m:    Pointer to mdss catalog data
+ * Returns: Error code or allocated sde_hw_pingpong context
+ */
+struct sde_hw_pingpong *sde_hw_pingpong_init(enum sde_pingpong idx,
+		void __iomem *addr,
+		struct sde_mdss_cfg *m);
+
+/**
+ * sde_hw_pingpong_destroy - destroys pingpong driver context
+ *	should be called to free the context
+ * @pp:   Pointer to PP driver context returned by sde_hw_pingpong_init
+ */
+void sde_hw_pingpong_destroy(struct sde_hw_pingpong *pp);
+
+#endif /*_SDE_HW_PINGPONG_H */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_sspp.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_sspp.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_sspp.c	2019-10-29 09:26:23.641203159 +0100
@@ -0,0 +1,959 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_lm.h"
+#include "sde_hw_sspp.h"
+#include "sde_hw_color_processing.h"
+#include "sde_dbg.h"
+
+#define SDE_FETCH_CONFIG_RESET_VALUE   0x00000087
+
+/* SDE_SSPP_SRC */
+#define SSPP_SRC_SIZE                      0x00
+#define SSPP_SRC_XY                        0x08
+#define SSPP_OUT_SIZE                      0x0c
+#define SSPP_OUT_XY                        0x10
+#define SSPP_SRC0_ADDR                     0x14
+#define SSPP_SRC1_ADDR                     0x18
+#define SSPP_SRC2_ADDR                     0x1C
+#define SSPP_SRC3_ADDR                     0x20
+#define SSPP_SRC_YSTRIDE0                  0x24
+#define SSPP_SRC_YSTRIDE1                  0x28
+#define SSPP_SRC_FORMAT                    0x30
+#define SSPP_SRC_UNPACK_PATTERN            0x34
+#define SSPP_SRC_OP_MODE                   0x38
+#define MDSS_MDP_OP_DEINTERLACE            BIT(22)
+
+#define MDSS_MDP_OP_DEINTERLACE_ODD        BIT(23)
+#define MDSS_MDP_OP_IGC_ROM_1              BIT(18)
+#define MDSS_MDP_OP_IGC_ROM_0              BIT(17)
+#define MDSS_MDP_OP_IGC_EN                 BIT(16)
+#define MDSS_MDP_OP_FLIP_UD                BIT(14)
+#define MDSS_MDP_OP_FLIP_LR                BIT(13)
+#define MDSS_MDP_OP_BWC_EN                 BIT(0)
+#define MDSS_MDP_OP_PE_OVERRIDE            BIT(31)
+#define MDSS_MDP_OP_BWC_LOSSLESS           (0 << 1)
+#define MDSS_MDP_OP_BWC_Q_HIGH             (1 << 1)
+#define MDSS_MDP_OP_BWC_Q_MED              (2 << 1)
+
+#define SSPP_SRC_CONSTANT_COLOR            0x3c
+#define SSPP_FETCH_CONFIG                  0x048
+#define SSPP_DANGER_LUT                    0x60
+#define SSPP_SAFE_LUT                      0x64
+#define SSPP_CREQ_LUT                      0x68
+#define SSPP_QOS_CTRL                      0x6C
+#define SSPP_DECIMATION_CONFIG             0xB4
+#define SSPP_SRC_ADDR_SW_STATUS            0x70
+#define SSPP_SW_PIX_EXT_C0_LR              0x100
+#define SSPP_SW_PIX_EXT_C0_TB              0x104
+#define SSPP_SW_PIX_EXT_C0_REQ_PIXELS      0x108
+#define SSPP_SW_PIX_EXT_C1C2_LR            0x110
+#define SSPP_SW_PIX_EXT_C1C2_TB            0x114
+#define SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS    0x118
+#define SSPP_SW_PIX_EXT_C3_LR              0x120
+#define SSPP_SW_PIX_EXT_C3_TB              0x124
+#define SSPP_SW_PIX_EXT_C3_REQ_PIXELS      0x128
+#define SSPP_UBWC_ERROR_STATUS             0x138
+#define SSPP_VIG_OP_MODE                   0x0
+#define SSPP_VIG_CSC_10_OP_MODE            0x0
+
+/* SSPP_QOS_CTRL */
+#define SSPP_QOS_CTRL_VBLANK_EN            BIT(16)
+#define SSPP_QOS_CTRL_DANGER_SAFE_EN       BIT(0)
+#define SSPP_QOS_CTRL_DANGER_VBLANK_MASK   0x3
+#define SSPP_QOS_CTRL_DANGER_VBLANK_OFF    4
+#define SSPP_QOS_CTRL_CREQ_VBLANK_MASK     0x3
+#define SSPP_QOS_CTRL_CREQ_VBLANK_OFF      20
+
+/* SDE_SSPP_SCALER_QSEED2 */
+#define SCALE_CONFIG                       0x04
+#define COMP0_3_PHASE_STEP_X               0x10
+#define COMP0_3_PHASE_STEP_Y               0x14
+#define COMP1_2_PHASE_STEP_X               0x18
+#define COMP1_2_PHASE_STEP_Y               0x1c
+#define COMP0_3_INIT_PHASE_X               0x20
+#define COMP0_3_INIT_PHASE_Y               0x24
+#define COMP1_2_INIT_PHASE_X               0x28
+#define COMP1_2_INIT_PHASE_Y               0x2C
+#define VIG_0_QSEED2_SHARP                 0x30
+
+/* SDE_SSPP_SCALER_QSEED3 */
+#define QSEED3_HW_VERSION                  0x00
+#define QSEED3_OP_MODE                     0x04
+#define QSEED3_RGB2Y_COEFF                 0x08
+#define QSEED3_PHASE_INIT                  0x0C
+#define QSEED3_PHASE_STEP_Y_H              0x10
+#define QSEED3_PHASE_STEP_Y_V              0x14
+#define QSEED3_PHASE_STEP_UV_H             0x18
+#define QSEED3_PHASE_STEP_UV_V             0x1C
+#define QSEED3_PRELOAD                     0x20
+#define QSEED3_DE_SHARPEN                  0x24
+#define QSEED3_DE_SHARPEN_CTL              0x28
+#define QSEED3_DE_SHAPE_CTL                0x2C
+#define QSEED3_DE_THRESHOLD                0x30
+#define QSEED3_DE_ADJUST_DATA_0            0x34
+#define QSEED3_DE_ADJUST_DATA_1            0x38
+#define QSEED3_DE_ADJUST_DATA_2            0x3C
+#define QSEED3_SRC_SIZE_Y_RGB_A            0x40
+#define QSEED3_SRC_SIZE_UV                 0x44
+#define QSEED3_DST_SIZE                    0x48
+#define QSEED3_COEF_LUT_CTRL               0x4C
+#define QSEED3_COEF_LUT_SWAP_BIT           0
+#define QSEED3_COEF_LUT_DIR_BIT            1
+#define QSEED3_COEF_LUT_Y_CIR_BIT          2
+#define QSEED3_COEF_LUT_UV_CIR_BIT         3
+#define QSEED3_COEF_LUT_Y_SEP_BIT          4
+#define QSEED3_COEF_LUT_UV_SEP_BIT         5
+#define QSEED3_BUFFER_CTRL                 0x50
+#define QSEED3_CLK_CTRL0                   0x54
+#define QSEED3_CLK_CTRL1                   0x58
+#define QSEED3_CLK_STATUS                  0x5C
+#define QSEED3_MISR_CTRL                   0x70
+#define QSEED3_MISR_SIGNATURE_0            0x74
+#define QSEED3_MISR_SIGNATURE_1            0x78
+#define QSEED3_PHASE_INIT_Y_H              0x90
+#define QSEED3_PHASE_INIT_Y_V              0x94
+#define QSEED3_PHASE_INIT_UV_H             0x98
+#define QSEED3_PHASE_INIT_UV_V             0x9C
+#define QSEED3_COEF_LUT                    0x100
+#define QSEED3_FILTERS                     5
+#define QSEED3_LUT_REGIONS                 4
+#define QSEED3_CIRCULAR_LUTS               9
+#define QSEED3_SEPARABLE_LUTS              10
+#define QSEED3_LUT_SIZE                    60
+#define QSEED3_ENABLE                      2
+#define QSEED3_DIR_LUT_SIZE                (200 * sizeof(u32))
+#define QSEED3_CIR_LUT_SIZE \
+	(QSEED3_LUT_SIZE * QSEED3_CIRCULAR_LUTS * sizeof(u32))
+#define QSEED3_SEP_LUT_SIZE \
+	(QSEED3_LUT_SIZE * QSEED3_SEPARABLE_LUTS * sizeof(u32))
+
+/*
+ * Definitions for ViG op modes
+ */
+#define VIG_OP_CSC_DST_DATAFMT BIT(19)
+#define VIG_OP_CSC_SRC_DATAFMT BIT(18)
+#define VIG_OP_CSC_EN          BIT(17)
+#define VIG_OP_MEM_PROT_CONT   BIT(15)
+#define VIG_OP_MEM_PROT_VAL    BIT(14)
+#define VIG_OP_MEM_PROT_SAT    BIT(13)
+#define VIG_OP_MEM_PROT_HUE    BIT(12)
+#define VIG_OP_HIST            BIT(8)
+#define VIG_OP_SKY_COL         BIT(7)
+#define VIG_OP_FOIL            BIT(6)
+#define VIG_OP_SKIN_COL        BIT(5)
+#define VIG_OP_PA_EN           BIT(4)
+#define VIG_OP_PA_SAT_ZERO_EXP BIT(2)
+#define VIG_OP_MEM_PROT_BLEND  BIT(1)
+
+/*
+ * Definitions for CSC 10 op modes
+ */
+#define VIG_CSC_10_SRC_DATAFMT BIT(1)
+#define VIG_CSC_10_EN          BIT(0)
+#define CSC_10BIT_OFFSET       4
+
+static inline int _sspp_subblk_offset(struct sde_hw_pipe *ctx,
+		int s_id,
+		u32 *idx)
+{
+	int rc = 0;
+	const struct sde_sspp_sub_blks *sblk = ctx->cap->sblk;
+
+	if (!ctx)
+		return -EINVAL;
+
+	switch (s_id) {
+	case SDE_SSPP_SRC:
+		*idx = sblk->src_blk.base;
+		break;
+	case SDE_SSPP_SCALER_QSEED2:
+	case SDE_SSPP_SCALER_QSEED3:
+	case SDE_SSPP_SCALER_RGB:
+		*idx = sblk->scaler_blk.base;
+		break;
+	case SDE_SSPP_CSC:
+	case SDE_SSPP_CSC_10BIT:
+		*idx = sblk->csc_blk.base;
+		break;
+	case SDE_SSPP_HSIC:
+		*idx = sblk->hsic_blk.base;
+		break;
+	case SDE_SSPP_PCC:
+		*idx = sblk->pcc_blk.base;
+		break;
+	case SDE_SSPP_MEMCOLOR:
+		*idx = sblk->memcolor_blk.base;
+		break;
+	default:
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static void _sspp_setup_opmode(struct sde_hw_pipe *ctx,
+		u32 mask, u8 en)
+{
+	u32 idx;
+	u32 opmode;
+
+	if (!test_bit(SDE_SSPP_SCALER_QSEED2, &ctx->cap->features) ||
+		_sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED2, &idx) ||
+		!test_bit(SDE_SSPP_CSC, &ctx->cap->features))
+		return;
+
+	opmode = SDE_REG_READ(&ctx->hw, SSPP_VIG_OP_MODE + idx);
+
+	if (en)
+		opmode |= mask;
+	else
+		opmode &= ~mask;
+
+	SDE_REG_WRITE(&ctx->hw, SSPP_VIG_OP_MODE + idx, opmode);
+}
+
+static void _sspp_setup_csc10_opmode(struct sde_hw_pipe *ctx,
+		u32 mask, u8 en)
+{
+	u32 idx;
+	u32 opmode;
+
+	if (_sspp_subblk_offset(ctx, SDE_SSPP_CSC_10BIT, &idx))
+		return;
+
+	opmode = SDE_REG_READ(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx);
+	if (en)
+		opmode |= mask;
+	else
+		opmode &= ~mask;
+
+	SDE_REG_WRITE(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx, opmode);
+}
+
+/**
+ * Setup source pixel format, flip,
+ */
+static void sde_hw_sspp_setup_format(struct sde_hw_pipe *ctx,
+		const struct sde_format *fmt, u32 flags)
+{
+	struct sde_hw_blk_reg_map *c;
+	u32 chroma_samp, unpack, src_format;
+	u32 secure = 0;
+	u32 opmode = 0;
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx) || !fmt)
+		return;
+
+	c = &ctx->hw;
+	opmode = SDE_REG_READ(c, SSPP_SRC_OP_MODE + idx);
+	opmode &= ~(MDSS_MDP_OP_FLIP_LR | MDSS_MDP_OP_FLIP_UD |
+			MDSS_MDP_OP_BWC_EN | MDSS_MDP_OP_PE_OVERRIDE);
+
+	if (flags & SDE_SSPP_SECURE_OVERLAY_SESSION)
+		secure = 0xF;
+
+	if (flags & SDE_SSPP_FLIP_LR)
+		opmode |= MDSS_MDP_OP_FLIP_LR;
+	if (flags & SDE_SSPP_FLIP_UD)
+		opmode |= MDSS_MDP_OP_FLIP_UD;
+
+	chroma_samp = fmt->chroma_sample;
+	if (flags & SDE_SSPP_SOURCE_ROTATED_90) {
+		if (chroma_samp == SDE_CHROMA_H2V1)
+			chroma_samp = SDE_CHROMA_H1V2;
+		else if (chroma_samp == SDE_CHROMA_H1V2)
+			chroma_samp = SDE_CHROMA_H2V1;
+	}
+
+	src_format = (chroma_samp << 23) | (fmt->fetch_planes << 19) |
+		(fmt->bits[C3_ALPHA] << 6) | (fmt->bits[C2_R_Cr] << 4) |
+		(fmt->bits[C1_B_Cb] << 2) | (fmt->bits[C0_G_Y] << 0);
+
+	if (flags & SDE_SSPP_ROT_90)
+		src_format |= BIT(11); /* ROT90 */
+
+	if (fmt->alpha_enable && fmt->fetch_planes == SDE_PLANE_INTERLEAVED)
+		src_format |= BIT(8); /* SRCC3_EN */
+
+	if (flags & SDE_SSPP_SOLID_FILL)
+		src_format |= BIT(22);
+
+	unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
+		(fmt->element[1] << 8) | (fmt->element[0] << 0);
+	src_format |= ((fmt->unpack_count - 1) << 12) |
+		(fmt->unpack_tight << 17) |
+		(fmt->unpack_align_msb << 18) |
+		((fmt->bpp - 1) << 9);
+
+	if (fmt->fetch_mode != SDE_FETCH_LINEAR) {
+		if (SDE_FORMAT_IS_UBWC(fmt))
+			opmode |= MDSS_MDP_OP_BWC_EN;
+		src_format |= (fmt->fetch_mode & 3) << 30; /*FRAME_FORMAT */
+		SDE_REG_WRITE(c, SSPP_FETCH_CONFIG,
+			SDE_FETCH_CONFIG_RESET_VALUE |
+			ctx->highest_bank_bit << 18);
+	}
+
+	opmode |= MDSS_MDP_OP_PE_OVERRIDE;
+
+	/* if this is YUV pixel format, enable CSC */
+	if (SDE_FORMAT_IS_YUV(fmt))
+		src_format |= BIT(15);
+
+	if (SDE_FORMAT_IS_DX(fmt))
+		src_format |= BIT(14);
+
+	/* update scaler opmode, if appropriate */
+	if (test_bit(SDE_SSPP_CSC, &ctx->cap->features))
+		_sspp_setup_opmode(ctx, VIG_OP_CSC_EN | VIG_OP_CSC_SRC_DATAFMT,
+			SDE_FORMAT_IS_YUV(fmt));
+	else if (test_bit(SDE_SSPP_CSC_10BIT, &ctx->cap->features))
+		_sspp_setup_csc10_opmode(ctx,
+			VIG_CSC_10_EN | VIG_CSC_10_SRC_DATAFMT,
+			SDE_FORMAT_IS_YUV(fmt));
+
+	SDE_REG_WRITE(c, SSPP_SRC_FORMAT + idx, src_format);
+	SDE_REG_WRITE(c, SSPP_SRC_UNPACK_PATTERN + idx, unpack);
+	SDE_REG_WRITE(c, SSPP_SRC_OP_MODE + idx, opmode);
+	SDE_REG_WRITE(c, SSPP_SRC_ADDR_SW_STATUS + idx, secure);
+
+	/* clear previous UBWC error */
+	SDE_REG_WRITE(c, SSPP_UBWC_ERROR_STATUS + idx, BIT(31));
+}
+
+static void sde_hw_sspp_setup_pe_config(struct sde_hw_pipe *ctx,
+		struct sde_hw_pixel_ext *pe_ext)
+{
+	struct sde_hw_blk_reg_map *c;
+	u8 color;
+	u32 lr_pe[4], tb_pe[4], tot_req_pixels[4];
+	const u32 bytemask = 0xff;
+	const u32 shortmask = 0xffff;
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx) || !pe_ext)
+		return;
+
+	c = &ctx->hw;
+
+	/* program SW pixel extension override for all pipes*/
+	for (color = 0; color < SDE_MAX_PLANES; color++) {
+		/* color 2 has the same set of registers as color 1 */
+		if (color == 2)
+			continue;
+
+		lr_pe[color] = ((pe_ext->right_ftch[color] & bytemask) << 24)|
+			((pe_ext->right_rpt[color] & bytemask) << 16)|
+			((pe_ext->left_ftch[color] & bytemask) << 8)|
+			(pe_ext->left_rpt[color] & bytemask);
+
+		tb_pe[color] = ((pe_ext->btm_ftch[color] & bytemask) << 24)|
+			((pe_ext->btm_rpt[color] & bytemask) << 16)|
+			((pe_ext->top_ftch[color] & bytemask) << 8)|
+			(pe_ext->top_rpt[color] & bytemask);
+
+		tot_req_pixels[color] = (((pe_ext->roi_h[color] +
+			pe_ext->num_ext_pxls_top[color] +
+			pe_ext->num_ext_pxls_btm[color]) & shortmask) << 16) |
+			((pe_ext->roi_w[color] +
+			pe_ext->num_ext_pxls_left[color] +
+			pe_ext->num_ext_pxls_right[color]) & shortmask);
+	}
+
+	/* color 0 */
+	SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_LR + idx, lr_pe[0]);
+	SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_TB + idx, tb_pe[0]);
+	SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_REQ_PIXELS + idx,
+			tot_req_pixels[0]);
+
+	/* color 1 and color 2 */
+	SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_LR + idx, lr_pe[1]);
+	SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_TB + idx, tb_pe[1]);
+	SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS + idx,
+			tot_req_pixels[1]);
+
+	/* color 3 */
+	SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_LR + idx, lr_pe[3]);
+	SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_TB + idx, lr_pe[3]);
+	SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_REQ_PIXELS + idx,
+			tot_req_pixels[3]);
+}
+
+static void _sde_hw_sspp_setup_scaler(struct sde_hw_pipe *ctx,
+		struct sde_hw_pipe_cfg *sspp,
+		struct sde_hw_pixel_ext *pe,
+		void *scaler_cfg)
+{
+	struct sde_hw_blk_reg_map *c;
+	int config_h = 0x0;
+	int config_v = 0x0;
+	u32 idx;
+
+	(void)sspp;
+	(void)scaler_cfg;
+	if (_sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED2, &idx) || !pe)
+		return;
+
+	c = &ctx->hw;
+
+	/* enable scaler(s) if valid filter set */
+	if (pe->horz_filter[SDE_SSPP_COMP_0] < SDE_SCALE_FILTER_MAX)
+		config_h |= pe->horz_filter[SDE_SSPP_COMP_0] << 8;
+	if (pe->horz_filter[SDE_SSPP_COMP_1_2] < SDE_SCALE_FILTER_MAX)
+		config_h |= pe->horz_filter[SDE_SSPP_COMP_1_2] << 12;
+	if (pe->horz_filter[SDE_SSPP_COMP_3] < SDE_SCALE_FILTER_MAX)
+		config_h |= pe->horz_filter[SDE_SSPP_COMP_3] << 16;
+
+	if (config_h)
+		config_h |= BIT(0);
+
+	if (pe->vert_filter[SDE_SSPP_COMP_0] < SDE_SCALE_FILTER_MAX)
+		config_v |= pe->vert_filter[SDE_SSPP_COMP_0] << 10;
+	if (pe->vert_filter[SDE_SSPP_COMP_1_2] < SDE_SCALE_FILTER_MAX)
+		config_v |= pe->vert_filter[SDE_SSPP_COMP_1_2] << 14;
+	if (pe->vert_filter[SDE_SSPP_COMP_3] < SDE_SCALE_FILTER_MAX)
+		config_v |= pe->vert_filter[SDE_SSPP_COMP_3] << 18;
+
+	if (config_v)
+		config_v |= BIT(1);
+
+	SDE_REG_WRITE(c, SCALE_CONFIG + idx,  config_h | config_v);
+	SDE_REG_WRITE(c, COMP0_3_INIT_PHASE_X + idx,
+		pe->init_phase_x[SDE_SSPP_COMP_0]);
+	SDE_REG_WRITE(c, COMP0_3_INIT_PHASE_Y + idx,
+		pe->init_phase_y[SDE_SSPP_COMP_0]);
+	SDE_REG_WRITE(c, COMP0_3_PHASE_STEP_X + idx,
+		pe->phase_step_x[SDE_SSPP_COMP_0]);
+	SDE_REG_WRITE(c, COMP0_3_PHASE_STEP_Y + idx,
+		pe->phase_step_y[SDE_SSPP_COMP_0]);
+
+	SDE_REG_WRITE(c, COMP1_2_INIT_PHASE_X + idx,
+		pe->init_phase_x[SDE_SSPP_COMP_1_2]);
+	SDE_REG_WRITE(c, COMP1_2_INIT_PHASE_Y + idx,
+		pe->init_phase_y[SDE_SSPP_COMP_1_2]);
+	SDE_REG_WRITE(c, COMP1_2_PHASE_STEP_X + idx,
+		pe->phase_step_x[SDE_SSPP_COMP_1_2]);
+	SDE_REG_WRITE(c, COMP1_2_PHASE_STEP_Y + idx,
+		pe->phase_step_y[SDE_SSPP_COMP_1_2]);
+}
+
+static void _sde_hw_sspp_setup_scaler3_lut(struct sde_hw_pipe *ctx,
+		struct sde_hw_scaler3_cfg *scaler3_cfg)
+{
+	u32 idx;
+	int i, j, filter;
+	int config_lut = 0x0;
+	unsigned long lut_flags;
+	u32 lut_addr, lut_offset, lut_len;
+	u32 *lut[QSEED3_FILTERS] = {NULL, NULL, NULL, NULL, NULL};
+	static const uint32_t offset[QSEED3_FILTERS][QSEED3_LUT_REGIONS][2] = {
+		{{18, 0x000}, {12, 0x120}, {12, 0x1E0}, {8, 0x2A0} },
+		{{6, 0x320}, {3, 0x3E0}, {3, 0x440}, {3, 0x4A0} },
+		{{6, 0x500}, {3, 0x5c0}, {3, 0x620}, {3, 0x680} },
+		{{6, 0x380}, {3, 0x410}, {3, 0x470}, {3, 0x4d0} },
+		{{6, 0x560}, {3, 0x5f0}, {3, 0x650}, {3, 0x6b0} },
+	};
+
+	if (_sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED3, &idx) ||
+		!scaler3_cfg)
+		return;
+
+	lut_flags = (unsigned long) scaler3_cfg->lut_flag;
+	if (test_bit(QSEED3_COEF_LUT_DIR_BIT, &lut_flags) &&
+		(scaler3_cfg->dir_len == QSEED3_DIR_LUT_SIZE)) {
+		lut[0] = scaler3_cfg->dir_lut;
+		config_lut = 1;
+	}
+	if (test_bit(QSEED3_COEF_LUT_Y_CIR_BIT, &lut_flags) &&
+		(scaler3_cfg->y_rgb_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
+		(scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
+		lut[1] = scaler3_cfg->cir_lut +
+			scaler3_cfg->y_rgb_cir_lut_idx * QSEED3_LUT_SIZE;
+		config_lut = 1;
+	}
+	if (test_bit(QSEED3_COEF_LUT_UV_CIR_BIT, &lut_flags) &&
+		(scaler3_cfg->uv_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
+		(scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
+		lut[2] = scaler3_cfg->cir_lut +
+			scaler3_cfg->uv_cir_lut_idx * QSEED3_LUT_SIZE;
+		config_lut = 1;
+	}
+	if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) &&
+		(scaler3_cfg->y_rgb_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
+		(scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
+		lut[3] = scaler3_cfg->sep_lut +
+			scaler3_cfg->y_rgb_sep_lut_idx * QSEED3_LUT_SIZE;
+		config_lut = 1;
+	}
+	if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) &&
+		(scaler3_cfg->uv_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
+		(scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
+		lut[4] = scaler3_cfg->sep_lut +
+			scaler3_cfg->uv_sep_lut_idx * QSEED3_LUT_SIZE;
+		config_lut = 1;
+	}
+
+	if (config_lut) {
+		for (filter = 0; filter < QSEED3_FILTERS; filter++) {
+			if (!lut[filter])
+				continue;
+			lut_offset = 0;
+			for (i = 0; i < QSEED3_LUT_REGIONS; i++) {
+				lut_addr = QSEED3_COEF_LUT + idx
+					+ offset[filter][i][1];
+				lut_len = offset[filter][i][0] << 2;
+				for (j = 0; j < lut_len; j++) {
+					SDE_REG_WRITE(&ctx->hw,
+						lut_addr,
+						(lut[filter])[lut_offset++]);
+					lut_addr += 4;
+				}
+			}
+		}
+	}
+
+	if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags))
+		SDE_REG_WRITE(&ctx->hw, QSEED3_COEF_LUT_CTRL + idx, BIT(0));
+
+}
+
+static void _sde_hw_sspp_setup_scaler3_de(struct sde_hw_pipe *ctx,
+		struct sde_hw_scaler3_de_cfg *de_cfg)
+{
+	u32 idx;
+	u32 sharp_lvl, sharp_ctl, shape_ctl, de_thr;
+	u32 adjust_a, adjust_b, adjust_c;
+	struct sde_hw_blk_reg_map *hw;
+
+	if (_sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED3, &idx) || !de_cfg)
+		return;
+
+	if (!de_cfg->enable)
+		return;
+
+	hw = &ctx->hw;
+	sharp_lvl = (de_cfg->sharpen_level1 & 0x1FF) |
+		((de_cfg->sharpen_level2 & 0x1FF) << 16);
+
+	sharp_ctl = ((de_cfg->limit & 0xF) << 9) |
+		((de_cfg->prec_shift & 0x7) << 13) |
+		((de_cfg->clip & 0x7) << 16);
+
+	shape_ctl = (de_cfg->thr_quiet & 0xFF) |
+		((de_cfg->thr_dieout & 0x3FF) << 16);
+
+	de_thr = (de_cfg->thr_low & 0x3FF) |
+		((de_cfg->thr_high & 0x3FF) << 16);
+
+	adjust_a = (de_cfg->adjust_a[0] & 0x3FF) |
+		((de_cfg->adjust_a[1] & 0x3FF) << 10) |
+		((de_cfg->adjust_a[2] & 0x3FF) << 20);
+
+	adjust_b = (de_cfg->adjust_b[0] & 0x3FF) |
+		((de_cfg->adjust_b[1] & 0x3FF) << 10) |
+		((de_cfg->adjust_b[2] & 0x3FF) << 20);
+
+	adjust_c = (de_cfg->adjust_c[0] & 0x3FF) |
+		((de_cfg->adjust_c[1] & 0x3FF) << 10) |
+		((de_cfg->adjust_c[2] & 0x3FF) << 20);
+
+	SDE_REG_WRITE(hw, QSEED3_DE_SHARPEN + idx, sharp_lvl);
+	SDE_REG_WRITE(hw, QSEED3_DE_SHARPEN_CTL + idx, sharp_ctl);
+	SDE_REG_WRITE(hw, QSEED3_DE_SHAPE_CTL + idx, shape_ctl);
+	SDE_REG_WRITE(hw, QSEED3_DE_THRESHOLD + idx, de_thr);
+	SDE_REG_WRITE(hw, QSEED3_DE_ADJUST_DATA_0 + idx, adjust_a);
+	SDE_REG_WRITE(hw, QSEED3_DE_ADJUST_DATA_1 + idx, adjust_b);
+	SDE_REG_WRITE(hw, QSEED3_DE_ADJUST_DATA_2 + idx, adjust_c);
+
+}
+
+static void _sde_hw_sspp_setup_scaler3(struct sde_hw_pipe *ctx,
+		struct sde_hw_pipe_cfg *sspp,
+		struct sde_hw_pixel_ext *pe,
+		void *scaler_cfg)
+{
+	u32 idx;
+	u32 op_mode = 0;
+	u32 phase_init, preload, src_y_rgb, src_uv, dst;
+	struct sde_hw_scaler3_cfg *scaler3_cfg = scaler_cfg;
+
+	(void)pe;
+	if (_sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED3, &idx) || !sspp
+		|| !scaler3_cfg || !ctx || !ctx->cap || !ctx->cap->sblk)
+		return;
+
+	if (!scaler3_cfg->enable)
+		goto end;
+
+	op_mode |= BIT(0);
+	op_mode |= (scaler3_cfg->y_rgb_filter_cfg & 0x3) << 16;
+
+	if (SDE_FORMAT_IS_YUV(sspp->layout.format)) {
+		op_mode |= BIT(12);
+		op_mode |= (scaler3_cfg->uv_filter_cfg & 0x3) << 24;
+	}
+
+	op_mode |= (scaler3_cfg->blend_cfg & 1) << 31;
+	op_mode |= (scaler3_cfg->dir_en) ? BIT(4) : 0;
+
+	preload =
+		((scaler3_cfg->preload_x[0] & 0x7F) << 0) |
+		((scaler3_cfg->preload_y[0] & 0x7F) << 8) |
+		((scaler3_cfg->preload_x[1] & 0x7F) << 16) |
+		((scaler3_cfg->preload_y[1] & 0x7F) << 24);
+
+	src_y_rgb = (scaler3_cfg->src_width[0] & 0x1FFFF) |
+		((scaler3_cfg->src_height[0] & 0x1FFFF) << 16);
+
+	src_uv = (scaler3_cfg->src_width[1] & 0x1FFFF) |
+		((scaler3_cfg->src_height[1] & 0x1FFFF) << 16);
+
+	dst = (scaler3_cfg->dst_width & 0x1FFFF) |
+		((scaler3_cfg->dst_height & 0x1FFFF) << 16);
+
+	if (scaler3_cfg->de.enable) {
+		_sde_hw_sspp_setup_scaler3_de(ctx, &scaler3_cfg->de);
+		op_mode |= BIT(8);
+	}
+
+	if (scaler3_cfg->lut_flag)
+		_sde_hw_sspp_setup_scaler3_lut(ctx, scaler3_cfg);
+
+	if (ctx->cap->sblk->scaler_blk.version == 0x1002) {
+		phase_init =
+			((scaler3_cfg->init_phase_x[0] & 0x3F) << 0) |
+			((scaler3_cfg->init_phase_y[0] & 0x3F) << 8) |
+			((scaler3_cfg->init_phase_x[1] & 0x3F) << 16) |
+			((scaler3_cfg->init_phase_y[1] & 0x3F) << 24);
+		SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_INIT + idx, phase_init);
+	} else {
+		SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_INIT_Y_H + idx,
+			scaler3_cfg->init_phase_x[0] & 0x1FFFFF);
+		SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_INIT_Y_V + idx,
+			scaler3_cfg->init_phase_y[0] & 0x1FFFFF);
+		SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_INIT_UV_H + idx,
+			scaler3_cfg->init_phase_x[1] & 0x1FFFFF);
+		SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_INIT_UV_V + idx,
+			scaler3_cfg->init_phase_y[1] & 0x1FFFFF);
+	}
+
+	SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_STEP_Y_H + idx,
+		scaler3_cfg->phase_step_x[0] & 0xFFFFFF);
+
+	SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_STEP_Y_V + idx,
+		scaler3_cfg->phase_step_y[0] & 0xFFFFFF);
+
+	SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_STEP_UV_H + idx,
+		scaler3_cfg->phase_step_x[1] & 0xFFFFFF);
+
+	SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_STEP_UV_V + idx,
+		scaler3_cfg->phase_step_y[1] & 0xFFFFFF);
+
+	SDE_REG_WRITE(&ctx->hw, QSEED3_PRELOAD + idx, preload);
+
+	SDE_REG_WRITE(&ctx->hw, QSEED3_SRC_SIZE_Y_RGB_A + idx, src_y_rgb);
+
+	SDE_REG_WRITE(&ctx->hw, QSEED3_SRC_SIZE_UV + idx, src_uv);
+
+	SDE_REG_WRITE(&ctx->hw, QSEED3_DST_SIZE + idx, dst);
+
+end:
+	if (!SDE_FORMAT_IS_DX(sspp->layout.format))
+		op_mode |= BIT(14);
+
+	if (sspp->layout.format->alpha_enable) {
+		op_mode |= BIT(10);
+		if (ctx->cap->sblk->scaler_blk.version == 0x1002)
+			op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x1) << 30;
+		else
+			op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x3) << 29;
+	}
+	SDE_REG_WRITE(&ctx->hw, QSEED3_OP_MODE + idx, op_mode);
+}
+
+/**
+ * sde_hw_sspp_setup_rects()
+ */
+static void sde_hw_sspp_setup_rects(struct sde_hw_pipe *ctx,
+		struct sde_hw_pipe_cfg *cfg,
+		struct sde_hw_pixel_ext *pe_ext,
+		void *scale_cfg)
+{
+	struct sde_hw_blk_reg_map *c;
+	u32 src_size, src_xy, dst_size, dst_xy, ystride0, ystride1;
+	u32 decimation = 0;
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx) || !cfg)
+		return;
+
+	c = &ctx->hw;
+
+	/* program pixel extension override */
+	if (pe_ext)
+		sde_hw_sspp_setup_pe_config(ctx, pe_ext);
+
+	/* src and dest rect programming */
+	src_xy = (cfg->src_rect.y << 16) | (cfg->src_rect.x);
+	src_size = (cfg->src_rect.h << 16) | (cfg->src_rect.w);
+	dst_xy = (cfg->dst_rect.y << 16) | (cfg->dst_rect.x);
+	dst_size = (cfg->dst_rect.h << 16) | (cfg->dst_rect.w);
+
+	ystride0 = (cfg->layout.plane_pitch[0]) |
+			(cfg->layout.plane_pitch[1] << 16);
+	ystride1 = (cfg->layout.plane_pitch[2]) |
+			(cfg->layout.plane_pitch[3] << 16);
+
+	/* program scaler, phase registers, if pipes supporting scaling */
+	if (ctx->cap->features & SDE_SSPP_SCALER) {
+		/* program decimation */
+		decimation = ((1 << cfg->horz_decimation) - 1) << 8;
+		decimation |= ((1 << cfg->vert_decimation) - 1);
+		ctx->ops.setup_scaler(ctx, cfg, pe_ext, scale_cfg);
+	}
+
+	/* rectangle register programming */
+	SDE_REG_WRITE(c, SSPP_SRC_SIZE + idx, src_size);
+	SDE_REG_WRITE(c, SSPP_SRC_XY + idx, src_xy);
+	SDE_REG_WRITE(c, SSPP_OUT_SIZE + idx, dst_size);
+	SDE_REG_WRITE(c, SSPP_OUT_XY + idx, dst_xy);
+
+	SDE_REG_WRITE(c, SSPP_SRC_YSTRIDE0 + idx, ystride0);
+	SDE_REG_WRITE(c, SSPP_SRC_YSTRIDE1 + idx, ystride1);
+	SDE_REG_WRITE(c, SSPP_DECIMATION_CONFIG + idx, decimation);
+}
+
+static void sde_hw_sspp_setup_sourceaddress(struct sde_hw_pipe *ctx,
+		struct sde_hw_pipe_cfg *cfg)
+{
+	int i;
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(cfg->layout.plane_addr); i++)
+		SDE_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx + i * 0x4,
+			cfg->layout.plane_addr[i]);
+}
+
+static void sde_hw_sspp_setup_csc(struct sde_hw_pipe *ctx,
+		struct sde_csc_cfg *data)
+{
+	u32 idx;
+	bool csc10 = false;
+
+	if (_sspp_subblk_offset(ctx, SDE_SSPP_CSC, &idx) || !data)
+		return;
+
+	if (test_bit(SDE_SSPP_CSC_10BIT, &ctx->cap->features)) {
+		idx += CSC_10BIT_OFFSET;
+		csc10 = true;
+	}
+
+	sde_hw_csc_setup(&ctx->hw, idx, data, csc10);
+}
+
+static void sde_hw_sspp_setup_sharpening(struct sde_hw_pipe *ctx,
+		struct sde_hw_sharp_cfg *cfg)
+{
+	struct sde_hw_blk_reg_map *c;
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED2, &idx) || !cfg ||
+			!test_bit(SDE_SSPP_SCALER_QSEED2, &ctx->cap->features))
+		return;
+
+	c = &ctx->hw;
+
+	SDE_REG_WRITE(c, VIG_0_QSEED2_SHARP + idx, cfg->strength);
+	SDE_REG_WRITE(c, VIG_0_QSEED2_SHARP + idx + 0x4, cfg->edge_thr);
+	SDE_REG_WRITE(c, VIG_0_QSEED2_SHARP + idx + 0x8, cfg->smooth_thr);
+	SDE_REG_WRITE(c, VIG_0_QSEED2_SHARP + idx + 0xC, cfg->noise_thr);
+}
+
+static void sde_hw_sspp_setup_solidfill(struct sde_hw_pipe *ctx, u32 color)
+{
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+		return;
+
+	SDE_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR + idx, color);
+}
+
+static void sde_hw_sspp_setup_danger_safe_lut(struct sde_hw_pipe *ctx,
+		struct sde_hw_pipe_qos_cfg *cfg)
+{
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+		return;
+
+	SDE_REG_WRITE(&ctx->hw, SSPP_DANGER_LUT + idx, cfg->danger_lut);
+	SDE_REG_WRITE(&ctx->hw, SSPP_SAFE_LUT + idx, cfg->safe_lut);
+}
+
+static void sde_hw_sspp_setup_creq_lut(struct sde_hw_pipe *ctx,
+		struct sde_hw_pipe_qos_cfg *cfg)
+{
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+		return;
+
+	SDE_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT + idx, cfg->creq_lut);
+}
+
+static void sde_hw_sspp_setup_qos_ctrl(struct sde_hw_pipe *ctx,
+		struct sde_hw_pipe_qos_cfg *cfg)
+{
+	u32 idx;
+	u32 qos_ctrl = 0;
+
+	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+		return;
+
+	if (cfg->vblank_en) {
+		qos_ctrl |= ((cfg->creq_vblank &
+				SSPP_QOS_CTRL_CREQ_VBLANK_MASK) <<
+				SSPP_QOS_CTRL_CREQ_VBLANK_OFF);
+		qos_ctrl |= ((cfg->danger_vblank &
+				SSPP_QOS_CTRL_DANGER_VBLANK_MASK) <<
+				SSPP_QOS_CTRL_DANGER_VBLANK_OFF);
+		qos_ctrl |= SSPP_QOS_CTRL_VBLANK_EN;
+	}
+
+	if (cfg->danger_safe_en)
+		qos_ctrl |= SSPP_QOS_CTRL_DANGER_SAFE_EN;
+
+	SDE_REG_WRITE(&ctx->hw, SSPP_QOS_CTRL + idx, qos_ctrl);
+}
+
+static void _setup_layer_ops(struct sde_hw_pipe *c,
+		unsigned long features)
+{
+	if (test_bit(SDE_SSPP_SRC, &features)) {
+		c->ops.setup_format = sde_hw_sspp_setup_format;
+		c->ops.setup_rects = sde_hw_sspp_setup_rects;
+		c->ops.setup_sourceaddress = sde_hw_sspp_setup_sourceaddress;
+		c->ops.setup_solidfill = sde_hw_sspp_setup_solidfill;
+	}
+	if (test_bit(SDE_SSPP_QOS, &features)) {
+		c->ops.setup_danger_safe_lut =
+			sde_hw_sspp_setup_danger_safe_lut;
+		c->ops.setup_creq_lut = sde_hw_sspp_setup_creq_lut;
+		c->ops.setup_qos_ctrl = sde_hw_sspp_setup_qos_ctrl;
+	}
+
+	if (test_bit(SDE_SSPP_CSC, &features) ||
+		test_bit(SDE_SSPP_CSC_10BIT, &features))
+		c->ops.setup_csc = sde_hw_sspp_setup_csc;
+
+	if (test_bit(SDE_SSPP_SCALER_QSEED2, &features))
+		c->ops.setup_sharpening = sde_hw_sspp_setup_sharpening;
+
+	if (test_bit(SDE_SSPP_SCALER_QSEED3, &features))
+		c->ops.setup_scaler = _sde_hw_sspp_setup_scaler3;
+	else
+		c->ops.setup_scaler = _sde_hw_sspp_setup_scaler;
+
+	if (test_bit(SDE_SSPP_HSIC, &features)) {
+		/* TODO: add version based assignment here as inline or macro */
+		if (c->cap->sblk->hsic_blk.version ==
+			(SDE_COLOR_PROCESS_VER(0x1, 0x7))) {
+			c->ops.setup_pa_hue = sde_setup_pipe_pa_hue_v1_7;
+			c->ops.setup_pa_sat = sde_setup_pipe_pa_sat_v1_7;
+			c->ops.setup_pa_val = sde_setup_pipe_pa_val_v1_7;
+			c->ops.setup_pa_cont = sde_setup_pipe_pa_cont_v1_7;
+		}
+	}
+
+	if (test_bit(SDE_SSPP_MEMCOLOR, &features)) {
+		if (c->cap->sblk->memcolor_blk.version ==
+			(SDE_COLOR_PROCESS_VER(0x1, 0x7)))
+			c->ops.setup_pa_memcolor =
+				sde_setup_pipe_pa_memcol_v1_7;
+	}
+}
+
+static struct sde_sspp_cfg *_sspp_offset(enum sde_sspp sspp,
+		void __iomem *addr,
+		struct sde_mdss_cfg *catalog,
+		struct sde_hw_blk_reg_map *b)
+{
+	int i;
+
+	if ((sspp < SSPP_MAX) && catalog && addr && b) {
+		for (i = 0; i < catalog->sspp_count; i++) {
+			if (sspp == catalog->sspp[i].id) {
+				b->base_off = addr;
+				b->blk_off = catalog->sspp[i].base;
+				b->length = catalog->sspp[i].len;
+				b->hwversion = catalog->hwversion;
+				b->log_mask = SDE_DBG_MASK_SSPP;
+				return &catalog->sspp[i];
+			}
+		}
+	}
+
+	return ERR_PTR(-ENOMEM);
+}
+
+struct sde_hw_pipe *sde_hw_sspp_init(enum sde_sspp idx,
+			void __iomem *addr,
+			struct sde_mdss_cfg *catalog)
+{
+	struct sde_hw_pipe *hw_pipe;
+	struct sde_sspp_cfg *cfg;
+
+	hw_pipe = kzalloc(sizeof(*hw_pipe), GFP_KERNEL);
+	if (!hw_pipe)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _sspp_offset(idx, addr, catalog, &hw_pipe->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(hw_pipe);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Assign ops */
+	hw_pipe->idx = idx;
+	hw_pipe->cap = cfg;
+	_setup_layer_ops(hw_pipe, hw_pipe->cap->features);
+	hw_pipe->highest_bank_bit = catalog->mdp[0].highest_bank_bit;
+
+	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name,
+			hw_pipe->hw.blk_off,
+			hw_pipe->hw.blk_off + hw_pipe->hw.length,
+			hw_pipe->hw.xin_id);
+
+	if (cfg->sblk->scaler_blk.len)
+		sde_dbg_reg_register_dump_range(SDE_DBG_NAME,
+			cfg->sblk->scaler_blk.name,
+			hw_pipe->hw.blk_off + cfg->sblk->scaler_blk.base,
+			hw_pipe->hw.blk_off + cfg->sblk->scaler_blk.base +
+				cfg->sblk->scaler_blk.len,
+			hw_pipe->hw.xin_id);
+
+	return hw_pipe;
+}
+
+void sde_hw_sspp_destroy(struct sde_hw_pipe *ctx)
+{
+	kfree(ctx);
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_sspp.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_sspp.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_sspp.h	2019-01-22 16:16:23.515246515 +0100
@@ -0,0 +1,479 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_SSPP_H
+#define _SDE_HW_SSPP_H
+
+#include "sde_hw_catalog.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_util.h"
+#include "sde_formats.h"
+#include "sde_color_processing.h"
+
+struct sde_hw_pipe;
+
+/**
+ * Flags
+ */
+#define SDE_SSPP_SECURE_OVERLAY_SESSION 0x1
+#define SDE_SSPP_FLIP_LR	 0x2
+#define SDE_SSPP_FLIP_UD	 0x4
+#define SDE_SSPP_SOURCE_ROTATED_90 0x8
+#define SDE_SSPP_ROT_90  0x10
+#define SDE_SSPP_SOLID_FILL 0x20
+
+/**
+ * Define all scaler feature bits in catalog
+ */
+#define SDE_SSPP_SCALER ((1UL << SDE_SSPP_SCALER_RGB) | \
+	(1UL << SDE_SSPP_SCALER_QSEED2) | \
+	(1UL << SDE_SSPP_SCALER_QSEED3))
+
+/**
+ * Component indices
+ */
+enum {
+	SDE_SSPP_COMP_0,
+	SDE_SSPP_COMP_1_2,
+	SDE_SSPP_COMP_2,
+	SDE_SSPP_COMP_3,
+
+	SDE_SSPP_COMP_MAX
+};
+
+enum {
+	SDE_FRAME_LINEAR,
+	SDE_FRAME_TILE_A4X,
+	SDE_FRAME_TILE_A5X,
+};
+
+enum sde_hw_filter {
+	SDE_SCALE_FILTER_NEAREST = 0,
+	SDE_SCALE_FILTER_BIL,
+	SDE_SCALE_FILTER_PCMN,
+	SDE_SCALE_FILTER_CA,
+	SDE_SCALE_FILTER_MAX
+};
+
+enum sde_hw_filter_alpa {
+	SDE_SCALE_ALPHA_PIXEL_REP,
+	SDE_SCALE_ALPHA_BIL
+};
+
+enum sde_hw_filter_yuv {
+	SDE_SCALE_2D_4X4,
+	SDE_SCALE_2D_CIR,
+	SDE_SCALE_1D_SEP,
+	SDE_SCALE_BIL
+};
+
+struct sde_hw_sharp_cfg {
+	u32 strength;
+	u32 edge_thr;
+	u32 smooth_thr;
+	u32 noise_thr;
+};
+
+struct sde_hw_pixel_ext {
+	/* scaling factors are enabled for this input layer */
+	uint8_t enable_pxl_ext;
+
+	int init_phase_x[SDE_MAX_PLANES];
+	int phase_step_x[SDE_MAX_PLANES];
+	int init_phase_y[SDE_MAX_PLANES];
+	int phase_step_y[SDE_MAX_PLANES];
+
+	/*
+	 * Number of pixels extension in left, right, top and bottom direction
+	 * for all color components. This pixel value for each color component
+	 * should be sum of fetch + repeat pixels.
+	 */
+	int num_ext_pxls_left[SDE_MAX_PLANES];
+	int num_ext_pxls_right[SDE_MAX_PLANES];
+	int num_ext_pxls_top[SDE_MAX_PLANES];
+	int num_ext_pxls_btm[SDE_MAX_PLANES];
+
+	/*
+	 * Number of pixels needs to be overfetched in left, right, top and
+	 * bottom directions from source image for scaling.
+	 */
+	int left_ftch[SDE_MAX_PLANES];
+	int right_ftch[SDE_MAX_PLANES];
+	int top_ftch[SDE_MAX_PLANES];
+	int btm_ftch[SDE_MAX_PLANES];
+
+	/*
+	 * Number of pixels needs to be repeated in left, right, top and
+	 * bottom directions for scaling.
+	 */
+	int left_rpt[SDE_MAX_PLANES];
+	int right_rpt[SDE_MAX_PLANES];
+	int top_rpt[SDE_MAX_PLANES];
+	int btm_rpt[SDE_MAX_PLANES];
+
+	uint32_t roi_w[SDE_MAX_PLANES];
+	uint32_t roi_h[SDE_MAX_PLANES];
+
+	/*
+	 * Filter type to be used for scaling in horizontal and vertical
+	 * directions
+	 */
+	enum sde_hw_filter horz_filter[SDE_MAX_PLANES];
+	enum sde_hw_filter vert_filter[SDE_MAX_PLANES];
+
+};
+
+/**
+ * struct sde_hw_scaler3_de_cfg : QSEEDv3 detail enhancer configuration
+ * @enable:         detail enhancer enable/disable
+ * @sharpen_level1: sharpening strength for noise
+ * @sharpen_level2: sharpening strength for signal
+ * @ clip:          clip shift
+ * @ limit:         limit value
+ * @ thr_quiet:     quiet threshold
+ * @ thr_dieout:    dieout threshold
+ * @ thr_high:      low threshold
+ * @ thr_high:      high threshold
+ * @ prec_shift:    precision shift
+ * @ adjust_a:      A-coefficients for mapping curve
+ * @ adjust_b:      B-coefficients for mapping curve
+ * @ adjust_c:      C-coefficients for mapping curve
+ */
+struct sde_hw_scaler3_de_cfg {
+	u32 enable;
+	int16_t sharpen_level1;
+	int16_t sharpen_level2;
+	uint16_t clip;
+	uint16_t limit;
+	uint16_t thr_quiet;
+	uint16_t thr_dieout;
+	uint16_t thr_low;
+	uint16_t thr_high;
+	uint16_t prec_shift;
+	int16_t adjust_a[SDE_MAX_DE_CURVES];
+	int16_t adjust_b[SDE_MAX_DE_CURVES];
+	int16_t adjust_c[SDE_MAX_DE_CURVES];
+};
+
+/**
+ * struct sde_hw_scaler3_cfg : QSEEDv3 configuration
+ * @enable:        scaler enable
+ * @dir_en:        direction detection block enable
+ * @ init_phase_x: horizontal initial phase
+ * @ phase_step_x: horizontal phase step
+ * @ init_phase_y: vertical initial phase
+ * @ phase_step_y: vertical phase step
+ * @ preload_x:    horizontal preload value
+ * @ preload_y:    vertical preload value
+ * @ src_width:    source width
+ * @ src_height:   source height
+ * @ dst_width:    destination width
+ * @ dst_height:   destination height
+ * @ y_rgb_filter_cfg: y/rgb plane filter configuration
+ * @ uv_filter_cfg: uv plane filter configuration
+ * @ alpha_filter_cfg: alpha filter configuration
+ * @ blend_cfg:    blend coefficients configuration
+ * @ lut_flag:     scaler LUT update flags
+ *                 0x1 swap LUT bank
+ *                 0x2 update 2D filter LUT
+ *                 0x4 update y circular filter LUT
+ *                 0x8 update uv circular filter LUT
+ *                 0x10 update y separable filter LUT
+ *                 0x20 update uv separable filter LUT
+ * @ dir_lut_idx:  2D filter LUT index
+ * @ y_rgb_cir_lut_idx: y circular filter LUT index
+ * @ uv_cir_lut_idx: uv circular filter LUT index
+ * @ y_rgb_sep_lut_idx: y circular filter LUT index
+ * @ uv_sep_lut_idx: uv separable filter LUT index
+ * @ dir_lut:      pointer to 2D LUT
+ * @ cir_lut:      pointer to circular filter LUT
+ * @ sep_lut:      pointer to separable filter LUT
+ * @ de: detail enhancer configuration
+ */
+struct sde_hw_scaler3_cfg {
+	u32 enable;
+	u32 dir_en;
+	int32_t init_phase_x[SDE_MAX_PLANES];
+	int32_t phase_step_x[SDE_MAX_PLANES];
+	int32_t init_phase_y[SDE_MAX_PLANES];
+	int32_t phase_step_y[SDE_MAX_PLANES];
+
+	u32 preload_x[SDE_MAX_PLANES];
+	u32 preload_y[SDE_MAX_PLANES];
+	u32 src_width[SDE_MAX_PLANES];
+	u32 src_height[SDE_MAX_PLANES];
+
+	u32 dst_width;
+	u32 dst_height;
+
+	u32 y_rgb_filter_cfg;
+	u32 uv_filter_cfg;
+	u32 alpha_filter_cfg;
+	u32 blend_cfg;
+
+	u32 lut_flag;
+	u32 dir_lut_idx;
+
+	u32 y_rgb_cir_lut_idx;
+	u32 uv_cir_lut_idx;
+	u32 y_rgb_sep_lut_idx;
+	u32 uv_sep_lut_idx;
+	u32 *dir_lut;
+	size_t dir_len;
+	u32 *cir_lut;
+	size_t cir_len;
+	u32 *sep_lut;
+	size_t sep_len;
+
+	/*
+	 * Detail enhancer settings
+	 */
+	struct sde_hw_scaler3_de_cfg de;
+};
+
+/**
+ * struct sde_hw_pipe_cfg : Pipe description
+ * @layout:    format layout information for programming buffer to hardware
+ * @src_rect:  src ROI, caller takes into account the different operations
+ *             such as decimation, flip etc to program this field
+ * @dest_rect: destination ROI.
+ * @ horz_decimation : horizontal decimation factor( 0, 2, 4, 8, 16)
+ * @ vert_decimation : vertical decimation factor( 0, 2, 4, 8, 16)
+ *              2: Read 1 line/pixel drop 1 line/pixel
+ *              4: Read 1 line/pixel drop 3  lines/pixels
+ *              8: Read 1 line/pixel drop 7 lines/pixels
+ *              16: Read 1 line/pixel drop 15 line/pixels
+ */
+struct sde_hw_pipe_cfg {
+	struct sde_hw_fmt_layout layout;
+	struct sde_rect src_rect;
+	struct sde_rect dst_rect;
+	u8 horz_decimation;
+	u8 vert_decimation;
+};
+
+/**
+ * struct sde_hw_pipe_qos_cfg : Source pipe QoS configuration
+ * @danger_lut: LUT for generate danger level based on fill level
+ * @safe_lut: LUT for generate safe level based on fill level
+ * @creq_lut: LUT for generate creq level based on fill level
+ * @creq_vblank: creq value generated to vbif during vertical blanking
+ * @danger_vblank: danger value generated during vertical blanking
+ * @vblank_en: enable creq_vblank and danger_vblank during vblank
+ * @danger_safe_en: enable danger safe generation
+ */
+struct sde_hw_pipe_qos_cfg {
+	u32 danger_lut;
+	u32 safe_lut;
+	u32 creq_lut;
+	u32 creq_vblank;
+	u32 danger_vblank;
+	bool vblank_en;
+	bool danger_safe_en;
+};
+
+/**
+ * struct sde_hw_sspp_ops - interface to the SSPP Hw driver functions
+ * Caller must call the init function to get the pipe context for each pipe
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct sde_hw_sspp_ops {
+	/**
+	 * setup_format - setup pixel format cropping rectangle, flip
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to pipe config structure
+	 * @flags: Extra flags for format config
+	 */
+	void (*setup_format)(struct sde_hw_pipe *ctx,
+			const struct sde_format *fmt, u32 flags);
+
+	/**
+	 * setup_rects - setup pipe ROI rectangles
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to pipe config structure
+	 * @pe_ext: Pointer to pixel ext settings
+	 * @scale_cfg: Pointer to scaler settings
+	 */
+	void (*setup_rects)(struct sde_hw_pipe *ctx,
+			struct sde_hw_pipe_cfg *cfg,
+			struct sde_hw_pixel_ext *pe_ext,
+			void *scale_cfg);
+
+	/**
+	 * setup_sourceaddress - setup pipe source addresses
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to pipe config structure
+	 */
+	void (*setup_sourceaddress)(struct sde_hw_pipe *ctx,
+			struct sde_hw_pipe_cfg *cfg);
+
+	/**
+	 * setup_csc - setup color space coversion
+	 * @ctx: Pointer to pipe context
+	 * @data: Pointer to config structure
+	 */
+	void (*setup_csc)(struct sde_hw_pipe *ctx, struct sde_csc_cfg *data);
+
+	/**
+	 * setup_solidfill - enable/disable colorfill
+	 * @ctx: Pointer to pipe context
+	 * @const_color: Fill color value
+	 * @flags: Pipe flags
+	 */
+	void (*setup_solidfill)(struct sde_hw_pipe *ctx, u32 color);
+
+	/**
+	 * setup_sharpening - setup sharpening
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to config structure
+	 */
+	void (*setup_sharpening)(struct sde_hw_pipe *ctx,
+			struct sde_hw_sharp_cfg *cfg);
+
+
+	/**
+	 * setup_pa_hue(): Setup source hue adjustment
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to hue data
+	 */
+	void (*setup_pa_hue)(struct sde_hw_pipe *ctx, void *cfg);
+
+	/**
+	 * setup_pa_sat(): Setup source saturation adjustment
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to saturation data
+	 */
+	void (*setup_pa_sat)(struct sde_hw_pipe *ctx, void *cfg);
+
+	/**
+	 * setup_pa_val(): Setup source value adjustment
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to value data
+	 */
+	void (*setup_pa_val)(struct sde_hw_pipe *ctx, void *cfg);
+
+	/**
+	 * setup_pa_cont(): Setup source contrast adjustment
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer contrast data
+	 */
+	void (*setup_pa_cont)(struct sde_hw_pipe *ctx, void *cfg);
+
+	/**
+	 * setup_pa_memcolor - setup source color processing
+	 * @ctx: Pointer to pipe context
+	 * @type: Memcolor type (Skin, sky or foliage)
+	 * @cfg: Pointer to memory color config data
+	 */
+	void (*setup_pa_memcolor)(struct sde_hw_pipe *ctx,
+			enum sde_memcolor_type type, void *cfg);
+
+	/**
+	 * setup_igc - setup inverse gamma correction
+	 * @ctx: Pointer to pipe context
+	 */
+	void (*setup_igc)(struct sde_hw_pipe *ctx);
+
+	/**
+	 * setup_danger_safe_lut - setup danger safe LUTs
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to pipe QoS configuration
+	 *
+	 */
+	void (*setup_danger_safe_lut)(struct sde_hw_pipe *ctx,
+			struct sde_hw_pipe_qos_cfg *cfg);
+
+	/**
+	 * setup_creq_lut - setup CREQ LUT
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to pipe QoS configuration
+	 *
+	 */
+	void (*setup_creq_lut)(struct sde_hw_pipe *ctx,
+			struct sde_hw_pipe_qos_cfg *cfg);
+
+	/**
+	 * setup_qos_ctrl - setup QoS control
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to pipe QoS configuration
+	 *
+	 */
+	void (*setup_qos_ctrl)(struct sde_hw_pipe *ctx,
+			struct sde_hw_pipe_qos_cfg *cfg);
+
+	/**
+	 * setup_histogram - setup histograms
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to histogram configuration
+	 */
+	void (*setup_histogram)(struct sde_hw_pipe *ctx,
+			void *cfg);
+
+	/**
+	 * setup_scaler - setup scaler
+	 * @ctx: Pointer to pipe context
+	 * @pipe_cfg: Pointer to pipe configuration
+	 * @pe_cfg: Pointer to pixel extension configuration
+	 * @scaler_cfg: Pointer to scaler configuration
+	 */
+	void (*setup_scaler)(struct sde_hw_pipe *ctx,
+		struct sde_hw_pipe_cfg *pipe_cfg,
+		struct sde_hw_pixel_ext *pe_cfg,
+		void *scaler_cfg);
+};
+
+/**
+ * struct sde_hw_pipe - pipe description
+ * @base_off:     mdp register mapped offset
+ * @blk_off:      pipe offset relative to mdss offset
+ * @length        length of register block offset
+ * @hwversion     mdss hw version number
+ * @idx:          pipe index
+ * @type :        pipe type, VIG/DMA/RGB/CURSOR, certain operations are not
+ *                supported for each pipe type
+ * @pipe_hw_cap:  pointer to layer_cfg
+ * @highest_bank_bit:
+ * @ops:          pointer to operations possible for this pipe
+ */
+struct sde_hw_pipe {
+	/* base */
+	 struct sde_hw_blk_reg_map hw;
+
+	/* Pipe */
+	enum sde_sspp idx;
+	const struct sde_sspp_cfg *cap;
+	u32 highest_bank_bit;
+
+	/* Ops */
+	struct sde_hw_sspp_ops ops;
+};
+
+/**
+ * sde_hw_sspp_init - initializes the sspp hw driver object.
+ * Should be called once before accessing every pipe.
+ * @idx:  Pipe index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @catalog : Pointer to mdss catalog data
+ */
+struct sde_hw_pipe *sde_hw_sspp_init(enum sde_sspp idx,
+			void __iomem *addr,
+			struct sde_mdss_cfg *catalog);
+
+/**
+ * sde_hw_sspp_destroy(): Destroys SSPP driver context
+ * should be called during Hw pipe cleanup.
+ * @ctx:  Pointer to SSPP driver context returned by sde_hw_sspp_init
+ */
+void sde_hw_sspp_destroy(struct sde_hw_pipe *ctx);
+
+#endif /*_SDE_HW_SSPP_H */
+
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_top.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_top.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_top.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_top.c	2019-10-29 09:26:23.641203159 +0100
@@ -0,0 +1,275 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_top.h"
+#include "sde_dbg.h"
+
+#define SSPP_SPARE                        0x28
+
+#define FLD_SPLIT_DISPLAY_CMD             BIT(1)
+#define FLD_SMART_PANEL_FREE_RUN          BIT(2)
+#define FLD_INTF_1_SW_TRG_MUX             BIT(4)
+#define FLD_INTF_2_SW_TRG_MUX             BIT(8)
+#define FLD_TE_LINE_INTER_WATERLEVEL_MASK 0xFFFF
+
+#define DANGER_STATUS                     0x360
+#define SAFE_STATUS                       0x364
+
+#define TE_LINE_INTERVAL                  0x3F4
+
+#define TRAFFIC_SHAPER_EN                 BIT(31)
+#define TRAFFIC_SHAPER_RD_CLIENT(num)     (0x030 + (num * 4))
+#define TRAFFIC_SHAPER_WR_CLIENT(num)     (0x060 + (num * 4))
+#define TRAFFIC_SHAPER_FIXPOINT_FACTOR    4
+
+static void sde_hw_setup_split_pipe(struct sde_hw_mdp *mdp,
+		struct split_pipe_cfg *cfg)
+{
+	struct sde_hw_blk_reg_map *c = &mdp->hw;
+	u32 upper_pipe = 0;
+	u32 lower_pipe = 0;
+
+	if (!mdp || !cfg)
+		return;
+
+	/* The SPLIT registers are only for DSI interfaces */
+	if ((cfg->intf != INTF_1) && (cfg->intf != INTF_2))
+		return;
+
+	if (cfg->en) {
+		if (cfg->mode == INTF_MODE_CMD) {
+			lower_pipe = FLD_SPLIT_DISPLAY_CMD;
+			/* interface controlling sw trigger */
+			if (cfg->intf == INTF_2)
+				lower_pipe |= FLD_INTF_1_SW_TRG_MUX;
+			else
+				lower_pipe |= FLD_INTF_2_SW_TRG_MUX;
+
+			/* free run */
+			if (cfg->pp_split_slave != INTF_MAX)
+				lower_pipe = FLD_SMART_PANEL_FREE_RUN;
+
+			upper_pipe = lower_pipe;
+		} else {
+			if (cfg->intf == INTF_2) {
+				lower_pipe = FLD_INTF_1_SW_TRG_MUX;
+				upper_pipe = FLD_INTF_2_SW_TRG_MUX;
+			} else {
+				lower_pipe = FLD_INTF_2_SW_TRG_MUX;
+				upper_pipe = FLD_INTF_1_SW_TRG_MUX;
+			}
+		}
+	}
+
+	SDE_REG_WRITE(c, SSPP_SPARE, cfg->split_flush_en ? 0x1 : 0x0);
+	SDE_REG_WRITE(c, SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower_pipe);
+	SDE_REG_WRITE(c, SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper_pipe);
+	SDE_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1);
+}
+
+static void sde_hw_setup_pp_split(struct sde_hw_mdp *mdp,
+		struct split_pipe_cfg *cfg)
+{
+	u32 ppb_config = 0x0;
+	u32 ppb_control = 0x0;
+
+	if (!mdp || !cfg)
+		return;
+
+	if (cfg->en && cfg->pp_split_slave != INTF_MAX) {
+		ppb_config |= (cfg->pp_split_slave - INTF_0 + 1) << 20;
+		ppb_config |= BIT(16); /* split enable */
+		ppb_control = BIT(5); /* horz split*/
+	}
+	if (cfg->pp_split_index) {
+		SDE_REG_WRITE(&mdp->hw, PPB0_CONFIG, 0x0);
+		SDE_REG_WRITE(&mdp->hw, PPB0_CNTL, 0x0);
+		SDE_REG_WRITE(&mdp->hw, PPB1_CONFIG, ppb_config);
+		SDE_REG_WRITE(&mdp->hw, PPB1_CNTL, ppb_control);
+	} else {
+		SDE_REG_WRITE(&mdp->hw, PPB0_CONFIG, ppb_config);
+		SDE_REG_WRITE(&mdp->hw, PPB0_CNTL, ppb_control);
+		SDE_REG_WRITE(&mdp->hw, PPB1_CONFIG, 0x0);
+		SDE_REG_WRITE(&mdp->hw, PPB1_CNTL, 0x0);
+	}
+}
+
+static void sde_hw_setup_cdm_output(struct sde_hw_mdp *mdp,
+		struct cdm_output_cfg *cfg)
+{
+	struct sde_hw_blk_reg_map *c = &mdp->hw;
+	u32 out_ctl = 0;
+
+	if (cfg->wb_en)
+		out_ctl |= BIT(24);
+	else if (cfg->intf_en)
+		out_ctl |= BIT(19);
+
+	SDE_REG_WRITE(c, MDP_OUT_CTL_0, out_ctl);
+}
+
+static bool sde_hw_setup_clk_force_ctrl(struct sde_hw_mdp *mdp,
+		enum sde_clk_ctrl_type clk_ctrl, bool enable)
+{
+	struct sde_hw_blk_reg_map *c = &mdp->hw;
+	u32 reg_off, bit_off;
+	u32 reg_val, new_val;
+	bool clk_forced_on;
+
+	if (clk_ctrl <= SDE_CLK_CTRL_NONE || clk_ctrl >= SDE_CLK_CTRL_MAX)
+		return false;
+
+	reg_off = mdp->cap->clk_ctrls[clk_ctrl].reg_off;
+	bit_off = mdp->cap->clk_ctrls[clk_ctrl].bit_off;
+
+	reg_val = SDE_REG_READ(c, reg_off);
+
+	if (enable)
+		new_val = reg_val | BIT(bit_off);
+	else
+		new_val = reg_val & ~BIT(bit_off);
+
+	SDE_REG_WRITE(c, reg_off, new_val);
+
+	clk_forced_on = !(reg_val & BIT(bit_off));
+
+	return clk_forced_on;
+}
+
+
+static void sde_hw_get_danger_status(struct sde_hw_mdp *mdp,
+		struct sde_danger_safe_status *status)
+{
+	struct sde_hw_blk_reg_map *c = &mdp->hw;
+	u32 value;
+
+	value = SDE_REG_READ(c, DANGER_STATUS);
+	status->mdp = (value >> 0) & 0x3;
+	status->sspp[SSPP_VIG0] = (value >> 4) & 0x3;
+	status->sspp[SSPP_VIG1] = (value >> 6) & 0x3;
+	status->sspp[SSPP_VIG2] = (value >> 8) & 0x3;
+	status->sspp[SSPP_VIG3] = (value >> 10) & 0x3;
+	status->sspp[SSPP_RGB0] = (value >> 12) & 0x3;
+	status->sspp[SSPP_RGB1] = (value >> 14) & 0x3;
+	status->sspp[SSPP_RGB2] = (value >> 16) & 0x3;
+	status->sspp[SSPP_RGB3] = (value >> 18) & 0x3;
+	status->sspp[SSPP_DMA0] = (value >> 20) & 0x3;
+	status->sspp[SSPP_DMA1] = (value >> 22) & 0x3;
+	status->sspp[SSPP_DMA2] = (value >> 28) & 0x3;
+	status->sspp[SSPP_DMA3] = (value >> 30) & 0x3;
+	status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x3;
+	status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x3;
+	status->wb[WB_0] = 0;
+	status->wb[WB_1] = 0;
+	status->wb[WB_2] = (value >> 2) & 0x3;
+	status->wb[WB_3] = 0;
+}
+
+static void sde_hw_get_safe_status(struct sde_hw_mdp *mdp,
+		struct sde_danger_safe_status *status)
+{
+	struct sde_hw_blk_reg_map *c = &mdp->hw;
+	u32 value;
+
+	value = SDE_REG_READ(c, SAFE_STATUS);
+	status->mdp = (value >> 0) & 0x1;
+	status->sspp[SSPP_VIG0] = (value >> 4) & 0x1;
+	status->sspp[SSPP_VIG1] = (value >> 6) & 0x1;
+	status->sspp[SSPP_VIG2] = (value >> 8) & 0x1;
+	status->sspp[SSPP_VIG3] = (value >> 10) & 0x1;
+	status->sspp[SSPP_RGB0] = (value >> 12) & 0x1;
+	status->sspp[SSPP_RGB1] = (value >> 14) & 0x1;
+	status->sspp[SSPP_RGB2] = (value >> 16) & 0x1;
+	status->sspp[SSPP_RGB3] = (value >> 18) & 0x1;
+	status->sspp[SSPP_DMA0] = (value >> 20) & 0x1;
+	status->sspp[SSPP_DMA1] = (value >> 22) & 0x1;
+	status->sspp[SSPP_DMA2] = (value >> 28) & 0x1;
+	status->sspp[SSPP_DMA3] = (value >> 30) & 0x1;
+	status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x1;
+	status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x1;
+	status->wb[WB_0] = 0;
+	status->wb[WB_1] = 0;
+	status->wb[WB_2] = (value >> 2) & 0x1;
+	status->wb[WB_3] = 0;
+}
+
+static void _setup_mdp_ops(struct sde_hw_mdp_ops *ops,
+		unsigned long cap)
+{
+	ops->setup_split_pipe = sde_hw_setup_split_pipe;
+	ops->setup_pp_split = sde_hw_setup_pp_split;
+	ops->setup_cdm_output = sde_hw_setup_cdm_output;
+	ops->setup_clk_force_ctrl = sde_hw_setup_clk_force_ctrl;
+	ops->get_danger_status = sde_hw_get_danger_status;
+	ops->get_safe_status = sde_hw_get_safe_status;
+}
+
+static const struct sde_mdp_cfg *_top_offset(enum sde_mdp mdp,
+		const struct sde_mdss_cfg *m,
+		void __iomem *addr,
+		struct sde_hw_blk_reg_map *b)
+{
+	int i;
+
+	for (i = 0; i < m->mdp_count; i++) {
+		if (mdp == m->mdp[i].id) {
+			b->base_off = addr;
+			b->blk_off = m->mdp[i].base;
+			b->length = m->mdp[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = SDE_DBG_MASK_TOP;
+			return &m->mdp[i];
+		}
+	}
+
+	return ERR_PTR(-EINVAL);
+}
+
+struct sde_hw_mdp *sde_hw_mdptop_init(enum sde_mdp idx,
+		void __iomem *addr,
+		const struct sde_mdss_cfg *m)
+{
+	struct sde_hw_mdp *mdp;
+	const struct sde_mdp_cfg *cfg;
+
+	mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
+	if (!mdp)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _top_offset(idx, m, addr, &mdp->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(mdp);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/*
+	 * Assign ops
+	 */
+	mdp->idx = idx;
+	mdp->cap = cfg;
+	_setup_mdp_ops(&mdp->ops, mdp->cap->features);
+
+	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name,
+			mdp->hw.blk_off, mdp->hw.blk_off + mdp->hw.length,
+			mdp->hw.xin_id);
+	sde_dbg_set_sde_top_offset(mdp->hw.blk_off);
+
+	return mdp;
+}
+
+void sde_hw_mdp_destroy(struct sde_hw_mdp *mdp)
+{
+	kfree(mdp);
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_top.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_top.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_top.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_top.h	2019-01-22 16:16:23.515246515 +0100
@@ -0,0 +1,170 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_TOP_H
+#define _SDE_HW_TOP_H
+
+#include "sde_hw_catalog.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_util.h"
+
+struct sde_hw_mdp;
+
+/**
+ * struct traffic_shaper_cfg: traffic shaper configuration
+ * @en        : enable/disable traffic shaper
+ * @rd_client : true if read client; false if write client
+ * @client_id : client identifier
+ * @bpc_denom : denominator of byte per clk
+ * @bpc_numer : numerator of byte per clk
+ */
+struct traffic_shaper_cfg {
+	bool en;
+	bool rd_client;
+	u32 client_id;
+	u32 bpc_denom;
+	u64 bpc_numer;
+};
+
+/**
+ * struct split_pipe_cfg - pipe configuration for dual display panels
+ * @en        : Enable/disable dual pipe confguration
+ * @mode      : Panel interface mode
+ * @intf      : Interface id for main control path
+ * @pp_split_slave: Slave interface for ping pong split, INTF_MAX to disable
+ * @pp_split_idx:   Ping pong index for ping pong split
+ * @split_flush_en: Allows both the paths to be flushed when master path is
+ *              flushed
+ */
+struct split_pipe_cfg {
+	bool en;
+	enum sde_intf_mode mode;
+	enum sde_intf intf;
+	enum sde_intf pp_split_slave;
+	u32 pp_split_index;
+	bool split_flush_en;
+};
+
+/**
+ * struct cdm_output_cfg: output configuration for cdm
+ * @wb_en     : enable/disable writeback output
+ * @intf_en   : enable/disable interface output
+ */
+struct cdm_output_cfg {
+	bool wb_en;
+	bool intf_en;
+};
+
+/**
+ * struct sde_danger_safe_status: danger and safe status signals
+ * @mdp: top level status
+ * @sspp: source pipe status
+ * @wb: writebck output status
+ */
+struct sde_danger_safe_status {
+	u8 mdp;
+	u8 sspp[SSPP_MAX];
+	u8 wb[WB_MAX];
+};
+
+/**
+ * struct sde_hw_mdp_ops - interface to the MDP TOP Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled.
+ * @setup_split_pipe : Programs the pipe control registers
+ * @setup_pp_split : Programs the pp split control registers
+ * @setup_cdm_output : programs cdm control
+ * @setup_traffic_shaper : programs traffic shaper control
+ */
+struct sde_hw_mdp_ops {
+	/** setup_split_pipe() : Regsiters are not double buffered, thisk
+	 * function should be called before timing control enable
+	 * @mdp  : mdp top context driver
+	 * @cfg  : upper and lower part of pipe configuration
+	 */
+	void (*setup_split_pipe)(struct sde_hw_mdp *mdp,
+			struct split_pipe_cfg *p);
+
+	/** setup_pp_split() : Configure pp split related registers
+	 * @mdp  : mdp top context driver
+	 * @cfg  : upper and lower part of pipe configuration
+	 */
+	void (*setup_pp_split)(struct sde_hw_mdp *mdp,
+			struct split_pipe_cfg *cfg);
+
+	/**
+	 * setup_cdm_output() : Setup selection control of the cdm data path
+	 * @mdp  : mdp top context driver
+	 * @cfg  : cdm output configuration
+	 */
+	void (*setup_cdm_output)(struct sde_hw_mdp *mdp,
+			struct cdm_output_cfg *cfg);
+
+	/**
+	 * setup_traffic_shaper() : Setup traffic shaper control
+	 * @mdp  : mdp top context driver
+	 * @cfg  : traffic shaper configuration
+	 */
+	void (*setup_traffic_shaper)(struct sde_hw_mdp *mdp,
+			struct traffic_shaper_cfg *cfg);
+
+	/**
+	 * setup_clk_force_ctrl - set clock force control
+	 * @mdp: mdp top context driver
+	 * @clk_ctrl: clock to be controlled
+	 * @enable: force on enable
+	 * @return: if the clock is forced-on by this function
+	 */
+	bool (*setup_clk_force_ctrl)(struct sde_hw_mdp *mdp,
+			enum sde_clk_ctrl_type clk_ctrl, bool enable);
+
+	/**
+	 * get_danger_status - get danger status
+	 * @mdp: mdp top context driver
+	 * @status: Pointer to danger safe status
+	 */
+	void (*get_danger_status)(struct sde_hw_mdp *mdp,
+			struct sde_danger_safe_status *status);
+
+	/**
+	 * get_safe_status - get safe status
+	 * @mdp: mdp top context driver
+	 * @status: Pointer to danger safe status
+	 */
+	void (*get_safe_status)(struct sde_hw_mdp *mdp,
+			struct sde_danger_safe_status *status);
+};
+
+struct sde_hw_mdp {
+	/* base */
+	struct sde_hw_blk_reg_map hw;
+
+	/* intf */
+	enum sde_mdp idx;
+	const struct sde_mdp_cfg *cap;
+
+	/* ops */
+	struct sde_hw_mdp_ops ops;
+};
+
+/**
+ * sde_hw_intf_init - initializes the intf driver for the passed interface idx
+ * @idx:  Interface index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m:    Pointer to mdss catalog data
+ */
+struct sde_hw_mdp *sde_hw_mdptop_init(enum sde_mdp idx,
+		void __iomem *addr,
+		const struct sde_mdss_cfg *m);
+
+void sde_hw_mdp_destroy(struct sde_hw_mdp *mdp);
+
+#endif /*_SDE_HW_TOP_H */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_util.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_util.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_util.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_util.c	2019-01-22 16:16:23.519246551 +0100
@@ -0,0 +1,93 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+#include "msm_drv.h"
+#include "sde_kms.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_util.h"
+
+/* using a file static variables for debugfs access */
+static u32 sde_hw_util_log_mask = SDE_DBG_MASK_NONE;
+
+void sde_reg_write(struct sde_hw_blk_reg_map *c,
+		u32 reg_off,
+		u32 val,
+		const char *name)
+{
+	/* don't need to mutex protect this */
+	if (c->log_mask & sde_hw_util_log_mask)
+		SDE_DEBUG_DRIVER("[%s:0x%X] <= 0x%X\n",
+				name, c->blk_off + reg_off, val);
+	writel_relaxed(val, c->base_off + c->blk_off + reg_off);
+}
+
+int sde_reg_read(struct sde_hw_blk_reg_map *c, u32 reg_off)
+{
+	return readl_relaxed(c->base_off + c->blk_off + reg_off);
+}
+
+u32 *sde_hw_util_get_log_mask_ptr(void)
+{
+	return &sde_hw_util_log_mask;
+}
+
+void sde_hw_csc_setup(struct sde_hw_blk_reg_map *c,
+		u32 csc_reg_off,
+		struct sde_csc_cfg *data, bool csc10)
+{
+	static const u32 matrix_shift = 7;
+	u32 clamp_shift = csc10 ? 16 : 8;
+	u32 val;
+
+	/* matrix coeff - convert S15.16 to S4.9 */
+	val = ((data->csc_mv[0] >> matrix_shift) & 0x1FFF) |
+		(((data->csc_mv[1] >> matrix_shift) & 0x1FFF) << 16);
+	SDE_REG_WRITE(c, csc_reg_off, val);
+	val = ((data->csc_mv[2] >> matrix_shift) & 0x1FFF) |
+		(((data->csc_mv[3] >> matrix_shift) & 0x1FFF) << 16);
+	SDE_REG_WRITE(c, csc_reg_off + 0x4, val);
+	val = ((data->csc_mv[4] >> matrix_shift) & 0x1FFF) |
+		(((data->csc_mv[5] >> matrix_shift) & 0x1FFF) << 16);
+	SDE_REG_WRITE(c, csc_reg_off + 0x8, val);
+	val = ((data->csc_mv[6] >> matrix_shift) & 0x1FFF) |
+		(((data->csc_mv[7] >> matrix_shift) & 0x1FFF) << 16);
+	SDE_REG_WRITE(c, csc_reg_off + 0xc, val);
+	val = (data->csc_mv[8] >> matrix_shift) & 0x1FFF;
+	SDE_REG_WRITE(c, csc_reg_off + 0x10, val);
+
+	/* Pre clamp */
+	val = (data->csc_pre_lv[0] << clamp_shift) | data->csc_pre_lv[1];
+	SDE_REG_WRITE(c, csc_reg_off + 0x14, val);
+	val = (data->csc_pre_lv[2] << clamp_shift) | data->csc_pre_lv[3];
+	SDE_REG_WRITE(c, csc_reg_off + 0x18, val);
+	val = (data->csc_pre_lv[4] << clamp_shift) | data->csc_pre_lv[5];
+	SDE_REG_WRITE(c, csc_reg_off + 0x1c, val);
+
+	/* Post clamp */
+	val = (data->csc_post_lv[0] << clamp_shift) | data->csc_post_lv[1];
+	SDE_REG_WRITE(c, csc_reg_off + 0x20, val);
+	val = (data->csc_post_lv[2] << clamp_shift) | data->csc_post_lv[3];
+	SDE_REG_WRITE(c, csc_reg_off + 0x24, val);
+	val = (data->csc_post_lv[4] << clamp_shift) | data->csc_post_lv[5];
+	SDE_REG_WRITE(c, csc_reg_off + 0x28, val);
+
+	/* Pre-Bias */
+	SDE_REG_WRITE(c, csc_reg_off + 0x2c, data->csc_pre_bv[0]);
+	SDE_REG_WRITE(c, csc_reg_off + 0x30, data->csc_pre_bv[1]);
+	SDE_REG_WRITE(c, csc_reg_off + 0x34, data->csc_pre_bv[2]);
+
+	/* Post-Bias */
+	SDE_REG_WRITE(c, csc_reg_off + 0x38, data->csc_post_bv[0]);
+	SDE_REG_WRITE(c, csc_reg_off + 0x3c, data->csc_post_bv[1]);
+	SDE_REG_WRITE(c, csc_reg_off + 0x40, data->csc_post_bv[2]);
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_util.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_util.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_util.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_util.h	2019-01-22 16:16:23.519246551 +0100
@@ -0,0 +1,57 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_UTIL_H
+#define _SDE_HW_UTIL_H
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "sde_hw_mdss.h"
+
+/*
+ * This is the common struct maintained by each sub block
+ * for mapping the register offsets in this block to the
+ * absoulute IO address
+ * @base_off:     mdp register mapped offset
+ * @blk_off:      pipe offset relative to mdss offset
+ * @length        length of register block offset
+ * @xin_id        xin id
+ * @hwversion     mdss hw version number
+ */
+struct sde_hw_blk_reg_map {
+	void __iomem *base_off;
+	u32 blk_off;
+	u32 length;
+	u32 xin_id;
+	u32 hwversion;
+	u32 log_mask;
+};
+
+u32 *sde_hw_util_get_log_mask_ptr(void);
+
+void sde_reg_write(struct sde_hw_blk_reg_map *c,
+		u32 reg_off,
+		u32 val,
+		const char *name);
+int sde_reg_read(struct sde_hw_blk_reg_map *c, u32 reg_off);
+
+#define SDE_REG_WRITE(c, off, val) sde_reg_write(c, off, val, #off)
+#define SDE_REG_READ(c, off) sde_reg_read(c, off)
+
+void *sde_hw_util_get_dir(void);
+
+void sde_hw_csc_setup(struct sde_hw_blk_reg_map  *c,
+		u32 csc_reg_off,
+		struct sde_csc_cfg *data, bool csc10);
+
+#endif /* _SDE_HW_UTIL_H */
+
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_vbif.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_vbif.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_vbif.c	2019-10-29 09:26:23.641203159 +0100
@@ -0,0 +1,169 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_vbif.h"
+#include "sde_dbg.h"
+
+#define VBIF_VERSION			0x0000
+#define VBIF_CLK_FORCE_CTRL0		0x0008
+#define VBIF_CLK_FORCE_CTRL1		0x000C
+#define VBIF_QOS_REMAP_00		0x0020
+#define VBIF_QOS_REMAP_01		0x0024
+#define VBIF_QOS_REMAP_10		0x0028
+#define VBIF_QOS_REMAP_11		0x002C
+#define VBIF_WRITE_GATHTER_EN		0x00AC
+#define VBIF_IN_RD_LIM_CONF0		0x00B0
+#define VBIF_IN_RD_LIM_CONF1		0x00B4
+#define VBIF_IN_RD_LIM_CONF2		0x00B8
+#define VBIF_IN_WR_LIM_CONF0		0x00C0
+#define VBIF_IN_WR_LIM_CONF1		0x00C4
+#define VBIF_IN_WR_LIM_CONF2		0x00C8
+#define VBIF_OUT_RD_LIM_CONF0		0x00D0
+#define VBIF_OUT_WR_LIM_CONF0		0x00D4
+#define VBIF_XIN_HALT_CTRL0		0x0200
+#define VBIF_XIN_HALT_CTRL1		0x0204
+
+static void sde_hw_set_limit_conf(struct sde_hw_vbif *vbif,
+		u32 xin_id, bool rd, u32 limit)
+{
+	struct sde_hw_blk_reg_map *c = &vbif->hw;
+	u32 reg_val;
+	u32 reg_off;
+	u32 bit_off;
+
+	if (rd)
+		reg_off = VBIF_IN_RD_LIM_CONF0;
+	else
+		reg_off = VBIF_IN_WR_LIM_CONF0;
+
+	reg_off += (xin_id / 4) * 4;
+	bit_off = (xin_id % 4) * 8;
+	reg_val = SDE_REG_READ(c, reg_off);
+	reg_val &= ~(0xFF << bit_off);
+	reg_val |= (limit) << bit_off;
+	SDE_REG_WRITE(c, reg_off, reg_val);
+}
+
+static u32 sde_hw_get_limit_conf(struct sde_hw_vbif *vbif,
+		u32 xin_id, bool rd)
+{
+	struct sde_hw_blk_reg_map *c = &vbif->hw;
+	u32 reg_val;
+	u32 reg_off;
+	u32 bit_off;
+	u32 limit;
+
+	if (rd)
+		reg_off = VBIF_IN_RD_LIM_CONF0;
+	else
+		reg_off = VBIF_IN_WR_LIM_CONF0;
+
+	reg_off += (xin_id / 4) * 4;
+	bit_off = (xin_id % 4) * 8;
+	reg_val = SDE_REG_READ(c, reg_off);
+	limit = (reg_val >> bit_off) & 0xFF;
+
+	return limit;
+}
+
+static void sde_hw_set_halt_ctrl(struct sde_hw_vbif *vbif,
+		u32 xin_id, bool enable)
+{
+	struct sde_hw_blk_reg_map *c = &vbif->hw;
+	u32 reg_val;
+
+	reg_val = SDE_REG_READ(c, VBIF_XIN_HALT_CTRL0);
+
+	if (enable)
+		reg_val |= BIT(xin_id);
+	else
+		reg_val &= ~BIT(xin_id);
+
+	SDE_REG_WRITE(c, VBIF_XIN_HALT_CTRL0, reg_val);
+}
+
+static bool sde_hw_get_halt_ctrl(struct sde_hw_vbif *vbif,
+		u32 xin_id)
+{
+	struct sde_hw_blk_reg_map *c = &vbif->hw;
+	u32 reg_val;
+
+	reg_val = SDE_REG_READ(c, VBIF_XIN_HALT_CTRL1);
+
+	return (reg_val & BIT(xin_id)) ? true : false;
+}
+
+static void _setup_vbif_ops(struct sde_hw_vbif_ops *ops,
+		unsigned long cap)
+{
+	ops->set_limit_conf = sde_hw_set_limit_conf;
+	ops->get_limit_conf = sde_hw_get_limit_conf;
+	ops->set_halt_ctrl = sde_hw_set_halt_ctrl;
+	ops->get_halt_ctrl = sde_hw_get_halt_ctrl;
+}
+
+static const struct sde_vbif_cfg *_top_offset(enum sde_vbif vbif,
+		const struct sde_mdss_cfg *m,
+		void __iomem *addr,
+		struct sde_hw_blk_reg_map *b)
+{
+	int i;
+
+	for (i = 0; i < m->vbif_count; i++) {
+		if (vbif == m->vbif[i].id) {
+			b->base_off = addr;
+			b->blk_off = m->vbif[i].base;
+			b->length = m->vbif[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = SDE_DBG_MASK_VBIF;
+			return &m->vbif[i];
+		}
+	}
+
+	return ERR_PTR(-EINVAL);
+}
+
+struct sde_hw_vbif *sde_hw_vbif_init(enum sde_vbif idx,
+		void __iomem *addr,
+		const struct sde_mdss_cfg *m)
+{
+	struct sde_hw_vbif *c;
+	const struct sde_vbif_cfg *cfg;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _top_offset(idx, m, addr, &c->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(c);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/*
+	 * Assign ops
+	 */
+	c->idx = idx;
+	c->cap = cfg;
+	_setup_vbif_ops(&c->ops, c->cap->features);
+
+	/* no need to register sub-range in sde dbg, dump entire vbif io base */
+
+	return c;
+}
+
+void sde_hw_vbif_destroy(struct sde_hw_vbif *vbif)
+{
+	kfree(vbif);
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_vbif.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_vbif.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_vbif.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_vbif.h	2019-01-22 16:16:23.519246551 +0100
@@ -0,0 +1,90 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_VBIF_H
+#define _SDE_HW_VBIF_H
+
+#include "sde_hw_catalog.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_util.h"
+
+struct sde_hw_vbif;
+
+/**
+ * struct sde_hw_vbif_ops : Interface to the VBIF hardware driver functions
+ *  Assumption is these functions will be called after clocks are enabled
+ */
+struct sde_hw_vbif_ops {
+	/**
+	 * set_limit_conf - set transaction limit config
+	 * @vbif: vbif context driver
+	 * @xin_id: client interface identifier
+	 * @rd: true for read limit; false for write limit
+	 * @limit: outstanding transaction limit
+	 */
+	void (*set_limit_conf)(struct sde_hw_vbif *vbif,
+			u32 xin_id, bool rd, u32 limit);
+
+	/**
+	 * get_limit_conf - get transaction limit config
+	 * @vbif: vbif context driver
+	 * @xin_id: client interface identifier
+	 * @rd: true for read limit; false for write limit
+	 * @return: outstanding transaction limit
+	 */
+	u32 (*get_limit_conf)(struct sde_hw_vbif *vbif,
+			u32 xin_id, bool rd);
+
+	/**
+	 * set_halt_ctrl - set halt control
+	 * @vbif: vbif context driver
+	 * @xin_id: client interface identifier
+	 * @enable: halt control enable
+	 */
+	void (*set_halt_ctrl)(struct sde_hw_vbif *vbif,
+			u32 xin_id, bool enable);
+
+	/**
+	 * get_halt_ctrl - get halt control
+	 * @vbif: vbif context driver
+	 * @xin_id: client interface identifier
+	 * @return: halt control enable
+	 */
+	bool (*get_halt_ctrl)(struct sde_hw_vbif *vbif,
+			u32 xin_id);
+};
+
+struct sde_hw_vbif {
+	/* base */
+	struct sde_hw_blk_reg_map hw;
+
+	/* vbif */
+	enum sde_vbif idx;
+	const struct sde_vbif_cfg *cap;
+
+	/* ops */
+	struct sde_hw_vbif_ops ops;
+};
+
+/**
+ * sde_hw_vbif_init - initializes the vbif driver for the passed interface idx
+ * @idx:  Interface index for which driver object is required
+ * @addr: Mapped register io address of MDSS
+ * @m:    Pointer to mdss catalog data
+ */
+struct sde_hw_vbif *sde_hw_vbif_init(enum sde_vbif idx,
+		void __iomem *addr,
+		const struct sde_mdss_cfg *m);
+
+void sde_hw_vbif_destroy(struct sde_hw_vbif *vbif);
+
+#endif /*_SDE_HW_VBIF_H */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_wb.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_wb.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_wb.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_wb.c	2019-10-29 09:26:23.641203159 +0100
@@ -0,0 +1,229 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hw_mdss.h"
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_wb.h"
+#include "sde_formats.h"
+#include "sde_dbg.h"
+
+#define WB_DST_FORMAT			0x000
+#define WB_DST_OP_MODE			0x004
+#define WB_DST_PACK_PATTERN		0x008
+#define WB_DST0_ADDR			0x00C
+#define WB_DST1_ADDR			0x010
+#define WB_DST2_ADDR			0x014
+#define WB_DST3_ADDR			0x018
+#define WB_DST_YSTRIDE0			0x01C
+#define WB_DST_YSTRIDE1			0x020
+#define WB_DST_YSTRIDE1			0x020
+#define WB_DST_DITHER_BITDEPTH		0x024
+#define WB_DST_MATRIX_ROW0		0x030
+#define WB_DST_MATRIX_ROW1		0x034
+#define WB_DST_MATRIX_ROW2		0x038
+#define WB_DST_MATRIX_ROW3		0x03C
+#define WB_DST_WRITE_CONFIG		0x048
+#define WB_ROTATION_DNSCALER		0x050
+#define WB_ROTATOR_PIPE_DOWNSCALER	0x054
+#define WB_N16_INIT_PHASE_X_C03		0x060
+#define WB_N16_INIT_PHASE_X_C12		0x064
+#define WB_N16_INIT_PHASE_Y_C03		0x068
+#define WB_N16_INIT_PHASE_Y_C12		0x06C
+#define WB_OUT_SIZE			0x074
+#define WB_ALPHA_X_VALUE		0x078
+#define WB_CSC_BASE			0x260
+#define WB_DST_ADDR_SW_STATUS		0x2B0
+#define WB_CDP_CTRL			0x2B4
+#define WB_OUT_IMAGE_SIZE		0x2C0
+#define WB_OUT_XY			0x2C4
+
+static struct sde_wb_cfg *_wb_offset(enum sde_wb wb,
+		struct sde_mdss_cfg *m,
+		void __iomem *addr,
+		struct sde_hw_blk_reg_map *b)
+{
+	int i;
+
+	for (i = 0; i < m->wb_count; i++) {
+		if (wb == m->wb[i].id) {
+			b->base_off = addr;
+			b->blk_off = m->wb[i].base;
+			b->length = m->wb[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = SDE_DBG_MASK_WB;
+			return &m->wb[i];
+		}
+	}
+	return ERR_PTR(-EINVAL);
+}
+
+static void sde_hw_wb_setup_outaddress(struct sde_hw_wb *ctx,
+		struct sde_hw_wb_cfg *data)
+{
+	struct sde_hw_blk_reg_map *c = &ctx->hw;
+
+	SDE_REG_WRITE(c, WB_DST0_ADDR, data->dest.plane_addr[0]);
+	SDE_REG_WRITE(c, WB_DST1_ADDR, data->dest.plane_addr[1]);
+	SDE_REG_WRITE(c, WB_DST2_ADDR, data->dest.plane_addr[2]);
+	SDE_REG_WRITE(c, WB_DST3_ADDR, data->dest.plane_addr[3]);
+}
+
+static void sde_hw_wb_setup_format(struct sde_hw_wb *ctx,
+		struct sde_hw_wb_cfg *data)
+{
+	struct sde_hw_blk_reg_map *c = &ctx->hw;
+	const struct sde_format *fmt = data->dest.format;
+	u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp;
+	u32 write_config = 0;
+	u32 opmode = 0;
+	u32 dst_addr_sw = 0;
+	u32 cdp_settings = 0x0;
+
+	chroma_samp = fmt->chroma_sample;
+
+	dst_format = (chroma_samp << 23) |
+			(fmt->fetch_planes << 19) |
+			(fmt->bits[C3_ALPHA] << 6) |
+			(fmt->bits[C2_R_Cr] << 4) |
+			(fmt->bits[C1_B_Cb] << 2) |
+			(fmt->bits[C0_G_Y] << 0);
+
+	if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
+		dst_format |= BIT(8); /* DSTC3_EN */
+		if (!fmt->alpha_enable ||
+				!(ctx->caps->features & BIT(SDE_WB_PIPE_ALPHA)))
+			dst_format |= BIT(14); /* DST_ALPHA_X */
+	}
+
+	if (SDE_FORMAT_IS_YUV(fmt) &&
+			(ctx->caps->features & BIT(SDE_WB_YUV_CONFIG)))
+		dst_format |= BIT(15);
+
+	if (SDE_FORMAT_IS_DX(fmt))
+		dst_format |= BIT(21);
+
+	pattern = (fmt->element[3] << 24) |
+			(fmt->element[2] << 16) |
+			(fmt->element[1] << 8)  |
+			(fmt->element[0] << 0);
+
+	dst_format |= (fmt->unpack_align_msb << 18) |
+			(fmt->unpack_tight << 17) |
+			((fmt->unpack_count - 1) << 12) |
+			((fmt->bpp - 1) << 9);
+
+	ystride0 = data->dest.plane_pitch[0] |
+			(data->dest.plane_pitch[1] << 16);
+	ystride1 = data->dest.plane_pitch[2] |
+			(data->dest.plane_pitch[3] << 16);
+
+	if (data->roi.h && data->roi.w)
+		outsize = (data->roi.h << 16) | data->roi.w;
+	else
+		outsize = (data->dest.height << 16) | data->dest.width;
+
+	if (SDE_FORMAT_IS_UBWC(fmt)) {
+		opmode |= BIT(0);
+		dst_format |= BIT(31);
+		if (ctx->highest_bank_bit)
+			write_config |= (ctx->highest_bank_bit << 8);
+		if (fmt->base.pixel_format == DRM_FORMAT_RGB565)
+			write_config |= 0x8;
+	}
+
+	if (data->is_secure)
+		dst_addr_sw |= BIT(0);
+
+	SDE_REG_WRITE(c, WB_ALPHA_X_VALUE, 0xFF);
+	SDE_REG_WRITE(c, WB_DST_FORMAT, dst_format);
+	SDE_REG_WRITE(c, WB_DST_OP_MODE, opmode);
+	SDE_REG_WRITE(c, WB_DST_PACK_PATTERN, pattern);
+	SDE_REG_WRITE(c, WB_DST_YSTRIDE0, ystride0);
+	SDE_REG_WRITE(c, WB_DST_YSTRIDE1, ystride1);
+	SDE_REG_WRITE(c, WB_OUT_SIZE, outsize);
+	SDE_REG_WRITE(c, WB_DST_WRITE_CONFIG, write_config);
+	SDE_REG_WRITE(c, WB_DST_ADDR_SW_STATUS, dst_addr_sw);
+
+	/* Enable CDP */
+	cdp_settings = BIT(0);
+
+	if (!SDE_FORMAT_IS_LINEAR(fmt))
+		cdp_settings |= BIT(1);
+
+	/* Enable 64 transactions if line mode*/
+	if (data->intf_mode == INTF_MODE_WB_LINE)
+		cdp_settings |= BIT(3);
+
+	SDE_REG_WRITE(c, WB_CDP_CTRL, cdp_settings);
+}
+
+static void sde_hw_wb_roi(struct sde_hw_wb *ctx, struct sde_hw_wb_cfg *wb)
+{
+	struct sde_hw_blk_reg_map *c = &ctx->hw;
+	u32 image_size, out_size, out_xy;
+
+	image_size = (wb->dest.height << 16) | wb->dest.width;
+	out_xy = (wb->roi.y << 16) | wb->roi.x;
+	out_size = (wb->roi.h << 16) | wb->roi.w;
+
+	SDE_REG_WRITE(c, WB_OUT_IMAGE_SIZE, image_size);
+	SDE_REG_WRITE(c, WB_OUT_XY, out_xy);
+	SDE_REG_WRITE(c, WB_OUT_SIZE, out_size);
+}
+
+static void _setup_wb_ops(struct sde_hw_wb_ops *ops,
+	unsigned long features)
+{
+	ops->setup_outaddress = sde_hw_wb_setup_outaddress;
+	ops->setup_outformat = sde_hw_wb_setup_format;
+
+	if (test_bit(SDE_WB_XY_ROI_OFFSET, &features))
+		ops->setup_roi = sde_hw_wb_roi;
+}
+
+struct sde_hw_wb *sde_hw_wb_init(enum sde_wb idx,
+		void __iomem *addr,
+		struct sde_mdss_cfg *m,
+		struct sde_hw_mdp *hw_mdp)
+{
+	struct sde_hw_wb *c;
+	struct sde_wb_cfg *cfg;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _wb_offset(idx, m, addr, &c->hw);
+	if (IS_ERR(cfg)) {
+		WARN(1, "Unable to find wb idx=%d\n", idx);
+		kfree(c);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Assign ops */
+	c->idx = idx;
+	c->caps = cfg;
+	_setup_wb_ops(&c->ops, c->caps->features);
+	c->highest_bank_bit = m->mdp[0].highest_bank_bit;
+	c->hw_mdp = hw_mdp;
+
+	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
+			c->hw.blk_off + c->hw.length, c->hw.xin_id);
+
+	return c;
+}
+
+void sde_hw_wb_destroy(struct sde_hw_wb *hw_wb)
+{
+	kfree(hw_wb);
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_wb.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_wb.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_hw_wb.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_hw_wb.h	2019-01-22 16:16:23.519246551 +0100
@@ -0,0 +1,105 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_WB_H
+#define _SDE_HW_WB_H
+
+#include "sde_hw_catalog.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_top.h"
+#include "sde_hw_util.h"
+
+struct sde_hw_wb;
+
+struct sde_hw_wb_cfg {
+	struct sde_hw_fmt_layout dest;
+	enum sde_intf_mode intf_mode;
+	struct traffic_shaper_cfg ts_cfg;
+	struct sde_rect roi;
+	bool is_secure;
+};
+
+/**
+ *
+ * struct sde_hw_wb_ops : Interface to the wb Hw driver functions
+ *  Assumption is these functions will be called after clocks are enabled
+ */
+struct sde_hw_wb_ops {
+	void (*setup_csc_data)(struct sde_hw_wb *ctx,
+			struct sde_csc_cfg *data);
+
+	void (*setup_outaddress)(struct sde_hw_wb *ctx,
+		struct sde_hw_wb_cfg *wb);
+
+	void (*setup_outformat)(struct sde_hw_wb *ctx,
+		struct sde_hw_wb_cfg *wb);
+
+	void (*setup_rotator)(struct sde_hw_wb *ctx,
+		struct sde_hw_wb_cfg *wb);
+
+	void (*setup_dither)(struct sde_hw_wb *ctx,
+		struct sde_hw_wb_cfg *wb);
+
+	void (*setup_cdwn)(struct sde_hw_wb *ctx,
+		struct sde_hw_wb_cfg *wb);
+
+	void (*setup_trafficshaper)(struct sde_hw_wb *ctx,
+		struct sde_hw_wb_cfg *wb);
+
+	void (*setup_roi)(struct sde_hw_wb *ctx,
+		struct sde_hw_wb_cfg *wb);
+};
+
+/**
+ * struct sde_hw_wb : WB driver object
+ * @struct sde_hw_blk_reg_map *hw;
+ * @idx
+ * @wb_hw_caps
+ * @ops
+ * @highest_bank_bit: GPU highest memory bank bit used
+ * @hw_mdp: MDP top level hardware block
+ */
+struct sde_hw_wb {
+	/* base */
+	struct sde_hw_blk_reg_map hw;
+
+	/* wb path */
+	int idx;
+	const struct sde_wb_cfg *caps;
+
+	/* ops */
+	struct sde_hw_wb_ops ops;
+
+	u32 highest_bank_bit;
+
+	struct sde_hw_mdp *hw_mdp;
+};
+
+/**
+ * sde_hw_wb_init(): Initializes and return writeback hw driver object.
+ * @idx:  wb_path index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m :   pointer to mdss catalog data
+ * @hw_mdp: pointer to mdp top hw driver object
+ */
+struct sde_hw_wb *sde_hw_wb_init(enum sde_wb idx,
+		void __iomem *addr,
+		struct sde_mdss_cfg *m,
+		struct sde_hw_mdp *hw_mdp);
+
+/**
+ * sde_hw_wb_destroy(): Destroy writeback hw driver object.
+ * @hw_wb:  Pointer to writeback hw driver object
+ */
+void sde_hw_wb_destroy(struct sde_hw_wb *hw_wb);
+
+#endif /*_SDE_HW_WB_H */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_irq.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_irq.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_irq.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_irq.c	2019-01-22 16:16:23.519246551 +0100
@@ -0,0 +1,112 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+
+#include "sde_irq.h"
+#include "sde_core_irq.h"
+
+static uint32_t g_sde_irq_status;
+
+irqreturn_t sde_irq(struct msm_kms *kms)
+{
+	struct sde_kms *sde_kms = to_sde_kms(kms);
+	u32 interrupts;
+
+	sde_kms->hw_intr->ops.get_interrupt_sources(sde_kms->hw_intr,
+			&interrupts);
+
+	/* store irq status in case of irq-storm debugging */
+	g_sde_irq_status = interrupts;
+
+	/*
+	 * Taking care of MDP interrupt
+	 */
+	if (interrupts & IRQ_SOURCE_MDP) {
+		interrupts &= ~IRQ_SOURCE_MDP;
+		sde_core_irq(sde_kms);
+	}
+
+	/*
+	 * Routing all other interrupts to external drivers
+	 */
+	while (interrupts) {
+		irq_hw_number_t hwirq = fls(interrupts) - 1;
+		unsigned int mapping;
+		int rc;
+
+		mapping = irq_find_mapping(sde_kms->irq_controller.domain,
+				hwirq);
+		if (mapping == 0) {
+			SDE_EVT32(hwirq, SDE_EVTLOG_ERROR);
+			goto error;
+		}
+
+		rc = generic_handle_irq(mapping);
+		if (rc < 0) {
+			SDE_EVT32(hwirq, mapping, rc, SDE_EVTLOG_ERROR);
+			goto error;
+		}
+
+		interrupts &= ~(1 << hwirq);
+	}
+
+	return IRQ_HANDLED;
+
+error:
+	/* bad situation, inform irq system, it may disable overall MDSS irq */
+	return IRQ_NONE;
+}
+
+void sde_irq_preinstall(struct msm_kms *kms)
+{
+	struct sde_kms *sde_kms = to_sde_kms(kms);
+
+	if (!sde_kms->dev || !sde_kms->dev->dev) {
+		pr_err("invalid device handles\n");
+		return;
+	}
+
+	sde_core_irq_preinstall(sde_kms);
+}
+
+int sde_irq_postinstall(struct msm_kms *kms)
+{
+	struct sde_kms *sde_kms = to_sde_kms(kms);
+	int rc;
+
+	if (!kms) {
+		SDE_ERROR("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	rc = sde_core_irq_postinstall(sde_kms);
+
+	return rc;
+}
+
+void sde_irq_uninstall(struct msm_kms *kms)
+{
+	struct sde_kms *sde_kms = to_sde_kms(kms);
+
+	if (!kms) {
+		SDE_ERROR("invalid parameters\n");
+		return;
+	}
+
+	sde_core_irq_uninstall(sde_kms);
+	sde_core_irq_domain_fini(sde_kms);
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_irq.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_irq.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_irq.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_irq.h	2019-01-22 16:16:23.519246551 +0100
@@ -0,0 +1,59 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_IRQ_H__
+#define __SDE_IRQ_H__
+
+#include <linux/kernel.h>
+#include <linux/irqdomain.h>
+
+#include "msm_kms.h"
+
+/**
+ * sde_irq_controller - define MDSS level interrupt controller context
+ * @enabled_mask:	enable status of MDSS level interrupt
+ * @domain:		interrupt domain of this controller
+ */
+struct sde_irq_controller {
+	unsigned long enabled_mask;
+	struct irq_domain *domain;
+};
+
+/**
+ * sde_irq_preinstall - perform pre-installation of MDSS IRQ handler
+ * @kms:		pointer to kms context
+ * @return:		none
+ */
+void sde_irq_preinstall(struct msm_kms *kms);
+
+/**
+ * sde_irq_postinstall - perform post-installation of MDSS IRQ handler
+ * @kms:		pointer to kms context
+ * @return:		0 if success; error code otherwise
+ */
+int sde_irq_postinstall(struct msm_kms *kms);
+
+/**
+ * sde_irq_uninstall - uninstall MDSS IRQ handler
+ * @drm_dev:		pointer to kms context
+ * @return:		none
+ */
+void sde_irq_uninstall(struct msm_kms *kms);
+
+/**
+ * sde_irq - MDSS level IRQ handler
+ * @kms:		pointer to kms context
+ * @return:		interrupt handling status
+ */
+irqreturn_t sde_irq(struct msm_kms *kms);
+
+#endif /* __SDE_IRQ_H__ */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_kms.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_kms.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_kms.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_kms.c	2019-10-29 09:26:23.645203198 +0100
@@ -0,0 +1,1461 @@
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <drm/drm_crtc.h>
+#include <linux/debugfs.h>
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+
+#include "dsi_display.h"
+#include "dsi_drm.h"
+#include "sde_wb.h"
+#include "sde_hdmi.h"
+
+#include "sde_kms.h"
+#include "sde_core_irq.h"
+#include "sde_formats.h"
+#include "sde_hw_vbif.h"
+#include "sde_vbif.h"
+#include "sde_encoder.h"
+#include "sde_plane.h"
+#include "sde_crtc.h"
+
+#define CREATE_TRACE_POINTS
+#include "sde_trace.h"
+
+/**
+ * Controls size of event log buffer. Specified as a power of 2.
+ */
+#define SDE_EVTLOG_SIZE	1024
+
+/*
+ * To enable overall DRM driver logging
+ * # echo 0x2 > /sys/module/drm/parameters/debug
+ *
+ * To enable DRM driver h/w logging
+ * # echo <mask> > /sys/kernel/debug/dri/0/hw_log_mask
+ *
+ * See sde_hw_mdss.h for h/w logging mask definitions (search for SDE_DBG_MASK_)
+ */
+#define SDE_DEBUGFS_DIR "msm_sde"
+#define SDE_DEBUGFS_HWMASKNAME "hw_log_mask"
+
+/**
+ * sdecustom - enable certain driver customizations for sde clients
+ *	Enabling this modifies the standard DRM behavior slightly and assumes
+ *	that the clients have specific knowledge about the modifications that
+ *	are involved, so don't enable this unless you know what you're doing.
+ *
+ *	Parts of the driver that are affected by this setting may be located by
+ *	searching for invocations of the 'sde_is_custom_client()' function.
+ *
+ *	This is disabled by default.
+ */
+static bool sdecustom = true;
+module_param(sdecustom, bool, 0400);
+MODULE_PARM_DESC(sdecustom, "Enable customizations for sde clients");
+
+static int sde_kms_hw_init(struct msm_kms *kms);
+static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms);
+
+bool sde_is_custom_client(void)
+{
+	return sdecustom;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _sde_danger_signal_status(struct seq_file *s,
+		bool danger_status)
+{
+	struct sde_kms *kms = (struct sde_kms *)s->private;
+	struct msm_drm_private *priv;
+	struct sde_danger_safe_status status;
+	int i;
+
+	if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
+		SDE_ERROR("invalid arg(s)\n");
+		return 0;
+	}
+
+	priv = kms->dev->dev_private;
+	memset(&status, 0, sizeof(struct sde_danger_safe_status));
+
+	sde_power_resource_enable(&priv->phandle, kms->core_client, true);
+	if (danger_status) {
+		seq_puts(s, "\nDanger signal status:\n");
+		if (kms->hw_mdp->ops.get_danger_status)
+			kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+					&status);
+	} else {
+		seq_puts(s, "\nSafe signal status:\n");
+		if (kms->hw_mdp->ops.get_danger_status)
+			kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+					&status);
+	}
+	sde_power_resource_enable(&priv->phandle, kms->core_client, false);
+
+	seq_printf(s, "MDP     :  0x%x\n", status.mdp);
+
+	for (i = SSPP_VIG0; i < SSPP_MAX; i++)
+		seq_printf(s, "SSPP%d   :  0x%x  \t", i - SSPP_VIG0,
+				status.sspp[i]);
+	seq_puts(s, "\n");
+
+	for (i = WB_0; i < WB_MAX; i++)
+		seq_printf(s, "WB%d     :  0x%x  \t", i - WB_0,
+				status.wb[i]);
+	seq_puts(s, "\n");
+
+	return 0;
+}
+
+#define DEFINE_SDE_DEBUGFS_SEQ_FOPS(__prefix)				\
+static int __prefix ## _open(struct inode *inode, struct file *file)	\
+{									\
+	return single_open(file, __prefix ## _show, inode->i_private);	\
+}									\
+static const struct file_operations __prefix ## _fops = {		\
+	.owner = THIS_MODULE,						\
+	.open = __prefix ## _open,					\
+	.release = single_release,					\
+	.read = seq_read,						\
+	.llseek = seq_lseek,						\
+}
+
+static int sde_debugfs_danger_stats_show(struct seq_file *s, void *v)
+{
+	return _sde_danger_signal_status(s, true);
+}
+DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_danger_stats);
+
+static int sde_debugfs_safe_stats_show(struct seq_file *s, void *v)
+{
+	return _sde_danger_signal_status(s, false);
+}
+DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_safe_stats);
+
+static void sde_debugfs_danger_destroy(struct sde_kms *sde_kms)
+{
+	debugfs_remove_recursive(sde_kms->debugfs_danger);
+	sde_kms->debugfs_danger = NULL;
+}
+
+static int sde_debugfs_danger_init(struct sde_kms *sde_kms,
+		struct dentry *parent)
+{
+	sde_kms->debugfs_danger = debugfs_create_dir("danger",
+			parent);
+	if (!sde_kms->debugfs_danger) {
+		SDE_ERROR("failed to create danger debugfs\n");
+		return -EINVAL;
+	}
+
+	debugfs_create_file("danger_status", 0644, sde_kms->debugfs_danger,
+			sde_kms, &sde_debugfs_danger_stats_fops);
+	debugfs_create_file("safe_status", 0644, sde_kms->debugfs_danger,
+			sde_kms, &sde_debugfs_safe_stats_fops);
+
+	return 0;
+}
+
+static int _sde_debugfs_show_regset32(struct seq_file *s, void *data)
+{
+	struct sde_debugfs_regset32 *regset;
+	struct sde_kms *sde_kms;
+	struct drm_device *dev;
+	struct msm_drm_private *priv;
+	void __iomem *base;
+	uint32_t i, addr;
+
+	if (!s || !s->private)
+		return 0;
+
+	regset = s->private;
+
+	sde_kms = regset->sde_kms;
+	if (!sde_kms || !sde_kms->mmio)
+		return 0;
+
+	dev = sde_kms->dev;
+	if (!dev)
+		return 0;
+
+	priv = dev->dev_private;
+	if (!priv)
+		return 0;
+
+	base = sde_kms->mmio + regset->offset;
+
+	/* insert padding spaces, if needed */
+	if (regset->offset & 0xF) {
+		seq_printf(s, "[%x]", regset->offset & ~0xF);
+		for (i = 0; i < (regset->offset & 0xF); i += 4)
+			seq_puts(s, "         ");
+	}
+
+	if (sde_power_resource_enable(&priv->phandle,
+				sde_kms->core_client, true)) {
+		seq_puts(s, "failed to enable sde clocks\n");
+		return 0;
+	}
+
+	/* main register output */
+	for (i = 0; i < regset->blk_len; i += 4) {
+		addr = regset->offset + i;
+		if ((addr & 0xF) == 0x0)
+			seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
+		seq_printf(s, " %08x", readl_relaxed(base + i));
+	}
+	seq_puts(s, "\n");
+	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+
+	return 0;
+}
+
+static int sde_debugfs_open_regset32(struct inode *inode,
+		struct file *file)
+{
+	return single_open(file, _sde_debugfs_show_regset32, inode->i_private);
+}
+
+static const struct file_operations sde_fops_regset32 = {
+	.open =		sde_debugfs_open_regset32,
+	.read =		seq_read,
+	.llseek =	seq_lseek,
+	.release =	single_release,
+};
+
+void sde_debugfs_setup_regset32(struct sde_debugfs_regset32 *regset,
+		uint32_t offset, uint32_t length, struct sde_kms *sde_kms)
+{
+	if (regset) {
+		regset->offset = offset;
+		regset->blk_len = length;
+		regset->sde_kms = sde_kms;
+	}
+}
+
+void *sde_debugfs_create_regset32(const char *name, umode_t mode,
+		void *parent, struct sde_debugfs_regset32 *regset)
+{
+	if (!name || !regset || !regset->sde_kms || !regset->blk_len)
+		return NULL;
+
+	/* make sure offset is a multiple of 4 */
+	regset->offset = round_down(regset->offset, 4);
+
+	return debugfs_create_file(name, mode, parent,
+			regset, &sde_fops_regset32);
+}
+
+void *sde_debugfs_get_root(struct sde_kms *sde_kms)
+{
+	return sde_kms ? sde_kms->debugfs_root : 0;
+}
+
+static int _sde_debugfs_init(struct sde_kms *sde_kms)
+{
+	void *p;
+
+	p = sde_hw_util_get_log_mask_ptr();
+
+	if (!sde_kms || !p)
+		return -EINVAL;
+
+	if (sde_kms->dev && sde_kms->dev->primary)
+		sde_kms->debugfs_root = sde_kms->dev->primary->debugfs_root;
+	else
+		sde_kms->debugfs_root = debugfs_create_dir(SDE_DEBUGFS_DIR, 0);
+
+	/* allow debugfs_root to be NULL */
+	debugfs_create_x32(SDE_DEBUGFS_HWMASKNAME,
+			0644, sde_kms->debugfs_root, p);
+
+	/* create common folder for debug information */
+	sde_kms->debugfs_debug = debugfs_create_dir("debug",
+			sde_kms->debugfs_root);
+	if (!sde_kms->debugfs_debug)
+		SDE_ERROR("failed to create debugfs debug directory\n");
+
+	sde_debugfs_danger_init(sde_kms, sde_kms->debugfs_debug);
+	sde_debugfs_vbif_init(sde_kms, sde_kms->debugfs_debug);
+
+	return 0;
+}
+
+static void _sde_debugfs_destroy(struct sde_kms *sde_kms)
+{
+	/* don't need to NULL check debugfs_root */
+	if (sde_kms) {
+		sde_debugfs_vbif_destroy(sde_kms);
+		sde_debugfs_danger_destroy(sde_kms);
+		debugfs_remove_recursive(sde_kms->debugfs_debug);
+		sde_kms->debugfs_debug = 0;
+		debugfs_remove_recursive(sde_kms->debugfs_root);
+		sde_kms->debugfs_root = 0;
+	}
+}
+#else
+static void sde_debugfs_danger_destroy(struct sde_kms *sde_kms,
+		struct dentry *parent)
+{
+}
+
+static int sde_debugfs_danger_init(struct sde_kms *sde_kms,
+		struct dentry *parent)
+{
+	return 0;
+}
+#endif
+
+static int sde_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+	return sde_crtc_vblank(crtc, true);
+}
+
+static void sde_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+	sde_crtc_vblank(crtc, false);
+}
+
+static void sde_kms_prepare_commit(struct msm_kms *kms,
+		struct drm_atomic_state *state)
+{
+	struct sde_kms *sde_kms = to_sde_kms(kms);
+	struct drm_device *dev = sde_kms->dev;
+	struct msm_drm_private *priv = dev->dev_private;
+
+	if (sde_kms->splash_info.handoff)
+		sde_splash_clean_up_exit_lk(kms);
+
+	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+}
+
+static void sde_kms_commit(struct msm_kms *kms,
+		struct drm_atomic_state *old_state)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *old_crtc_state;
+	int i;
+
+	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+		if (crtc->state->active) {
+			SDE_EVT32(DRMID(crtc));
+			sde_crtc_commit_kickoff(crtc);
+		}
+	}
+}
+
+static void sde_kms_complete_commit(struct msm_kms *kms,
+		struct drm_atomic_state *old_state)
+{
+	struct sde_kms *sde_kms = to_sde_kms(kms);
+	struct drm_device *dev = sde_kms->dev;
+	struct msm_drm_private *priv = dev->dev_private;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *old_crtc_state;
+	int i;
+
+	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
+		sde_crtc_complete_commit(crtc, old_crtc_state);
+	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+
+	SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
+}
+
+static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
+		struct drm_crtc *crtc)
+{
+	struct drm_encoder *encoder;
+	struct drm_device *dev = crtc->dev;
+	int ret;
+
+	if (!kms || !crtc || !crtc->state) {
+		SDE_ERROR("invalid params\n");
+		return;
+	}
+
+	if (!crtc->state->enable) {
+		SDE_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
+		return;
+	}
+
+	if (!crtc->state->active) {
+		SDE_DEBUG("[crtc:%d] not active\n", crtc->base.id);
+		return;
+	}
+
+	ret = drm_crtc_vblank_get(crtc);
+	if (ret)
+		return;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc != crtc)
+			continue;
+		/*
+		 * Wait post-flush if necessary to delay before plane_cleanup
+		 * For example, wait for vsync in case of video mode panels
+		 * This should be a no-op for command mode panels
+		 */
+		SDE_EVT32(DRMID(crtc));
+		ret = sde_encoder_wait_for_commit_done(encoder);
+		if (ret && ret != -EWOULDBLOCK) {
+			SDE_ERROR("wait for commit done returned %d\n", ret);
+			break;
+		}
+	}
+
+	drm_crtc_vblank_put(crtc);
+}
+
+static void sde_kms_prepare_fence(struct msm_kms *kms,
+		struct drm_atomic_state *old_state)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *old_crtc_state;
+	int i, rc;
+
+	if (!kms || !old_state || !old_state->dev || !old_state->acquire_ctx) {
+		SDE_ERROR("invalid argument(s)\n");
+		return;
+	}
+
+retry:
+	/* attempt to acquire ww mutex for connection */
+	rc = drm_modeset_lock(&old_state->dev->mode_config.connection_mutex,
+			       old_state->acquire_ctx);
+
+	if (rc == -EDEADLK) {
+		drm_modeset_backoff(old_state->acquire_ctx);
+		goto retry;
+	}
+
+	/* old_state actually contains updated crtc pointers */
+	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
+		sde_crtc_prepare_commit(crtc, old_crtc_state);
+}
+
+/**
+ * _sde_kms_get_displays - query for underlying display handles and cache them
+ * @sde_kms:    Pointer to sde kms structure
+ * Returns:     Zero on success
+ */
+static int _sde_kms_get_displays(struct sde_kms *sde_kms)
+{
+	int rc = -ENOMEM;
+
+	if (!sde_kms) {
+		SDE_ERROR("invalid sde kms\n");
+		return -EINVAL;
+	}
+
+	/* dsi */
+	sde_kms->dsi_displays = NULL;
+	sde_kms->dsi_display_count = dsi_display_get_num_of_displays();
+	if (sde_kms->dsi_display_count) {
+		sde_kms->dsi_displays = kcalloc(sde_kms->dsi_display_count,
+				sizeof(void *),
+				GFP_KERNEL);
+		if (!sde_kms->dsi_displays) {
+			SDE_ERROR("failed to allocate dsi displays\n");
+			goto exit_deinit_dsi;
+		}
+		sde_kms->dsi_display_count =
+			dsi_display_get_active_displays(sde_kms->dsi_displays,
+					sde_kms->dsi_display_count);
+	}
+
+	/* wb */
+	sde_kms->wb_displays = NULL;
+	sde_kms->wb_display_count = sde_wb_get_num_of_displays();
+	if (sde_kms->wb_display_count) {
+		sde_kms->wb_displays = kcalloc(sde_kms->wb_display_count,
+				sizeof(void *),
+				GFP_KERNEL);
+		if (!sde_kms->wb_displays) {
+			SDE_ERROR("failed to allocate wb displays\n");
+			goto exit_deinit_wb;
+		}
+		sde_kms->wb_display_count =
+			wb_display_get_displays(sde_kms->wb_displays,
+					sde_kms->wb_display_count);
+	}
+
+	/* hdmi */
+	sde_kms->hdmi_displays = NULL;
+	sde_kms->hdmi_display_count = sde_hdmi_get_num_of_displays();
+	SDE_DEBUG("hdmi display count=%d", sde_kms->hdmi_display_count);
+	if (sde_kms->hdmi_display_count) {
+		sde_kms->hdmi_displays = kcalloc(sde_kms->hdmi_display_count,
+				  sizeof(void *),
+				  GFP_KERNEL);
+		if (!sde_kms->hdmi_displays) {
+			SDE_ERROR("failed to allocate hdmi displays\n");
+			goto exit_deinit_hdmi;
+		}
+		sde_kms->hdmi_display_count =
+			sde_hdmi_get_displays(sde_kms->hdmi_displays,
+				sde_kms->hdmi_display_count);
+	}
+
+	return 0;
+
+exit_deinit_hdmi:
+	sde_kms->hdmi_display_count = 0;
+	sde_kms->hdmi_displays = NULL;
+
+exit_deinit_wb:
+	kfree(sde_kms->wb_displays);
+	sde_kms->wb_display_count = 0;
+	sde_kms->wb_displays = NULL;
+
+exit_deinit_dsi:
+	kfree(sde_kms->dsi_displays);
+	sde_kms->dsi_display_count = 0;
+	sde_kms->dsi_displays = NULL;
+	return rc;
+}
+
+/**
+ * _sde_kms_release_displays - release cache of underlying display handles
+ * @sde_kms:    Pointer to sde kms structure
+ */
+static void _sde_kms_release_displays(struct sde_kms *sde_kms)
+{
+	if (!sde_kms) {
+		SDE_ERROR("invalid sde kms\n");
+		return;
+	}
+	kfree(sde_kms->hdmi_displays);
+	sde_kms->hdmi_display_count = 0;
+	sde_kms->hdmi_displays = NULL;
+
+	kfree(sde_kms->wb_displays);
+	sde_kms->wb_displays = NULL;
+	sde_kms->wb_display_count = 0;
+
+	kfree(sde_kms->dsi_displays);
+	sde_kms->dsi_displays = NULL;
+	sde_kms->dsi_display_count = 0;
+}
+
+/**
+ * _sde_kms_setup_displays - create encoders, bridges and connectors
+ *                           for underlying displays
+ * @dev:        Pointer to drm device structure
+ * @priv:       Pointer to private drm device data
+ * @sde_kms:    Pointer to sde kms structure
+ * Returns:     Zero on success
+ */
+static int _sde_kms_setup_displays(struct drm_device *dev,
+		struct msm_drm_private *priv,
+		struct sde_kms *sde_kms)
+{
+	static const struct sde_connector_ops dsi_ops = {
+		.post_init =  dsi_conn_post_init,
+		.detect =     dsi_conn_detect,
+		.get_modes =  dsi_connector_get_modes,
+		.mode_valid = dsi_conn_mode_valid,
+		.get_info =   dsi_display_get_info,
+		.set_backlight = dsi_display_set_backlight
+	};
+	static const struct sde_connector_ops wb_ops = {
+		.post_init =    sde_wb_connector_post_init,
+		.detect =       sde_wb_connector_detect,
+		.get_modes =    sde_wb_connector_get_modes,
+		.set_property = sde_wb_connector_set_property,
+		.get_info =     sde_wb_get_info,
+	};
+	static const struct sde_connector_ops hdmi_ops = {
+		.pre_deinit = sde_hdmi_connector_pre_deinit,
+		.post_init =  sde_hdmi_connector_post_init,
+		.detect =     sde_hdmi_connector_detect,
+		.get_modes =  sde_hdmi_connector_get_modes,
+		.mode_valid = sde_hdmi_mode_valid,
+		.get_info =   sde_hdmi_get_info,
+		.set_property = sde_hdmi_set_property,
+		.get_property = sde_hdmi_get_property,
+		.pre_kickoff = sde_hdmi_pre_kickoff,
+		.mode_needs_full_range = sde_hdmi_mode_needs_full_range,
+		.get_csc_type = sde_hdmi_get_csc_type
+	};
+	struct msm_display_info info = {0};
+	struct drm_encoder *encoder;
+	void *display, *connector;
+	int i, max_encoders;
+	int rc = 0;
+	int connector_poll;
+
+	if (!dev || !priv || !sde_kms) {
+		SDE_ERROR("invalid argument(s)\n");
+		return -EINVAL;
+	}
+
+	max_encoders = sde_kms->dsi_display_count +
+		sde_kms->wb_display_count +
+		sde_kms->hdmi_display_count;
+
+	if (max_encoders > ARRAY_SIZE(priv->encoders)) {
+		max_encoders = ARRAY_SIZE(priv->encoders);
+		SDE_ERROR("capping number of displays to %d", max_encoders);
+	}
+
+	/* dsi */
+	for (i = 0; i < sde_kms->dsi_display_count &&
+		priv->num_encoders < max_encoders; ++i) {
+		display = sde_kms->dsi_displays[i];
+		encoder = NULL;
+
+		memset(&info, 0x0, sizeof(info));
+		rc = dsi_display_get_info(&info, display);
+		if (rc) {
+			SDE_ERROR("dsi get_info %d failed\n", i);
+			continue;
+		}
+
+		encoder = sde_encoder_init(dev, &info);
+		if (IS_ERR_OR_NULL(encoder)) {
+			SDE_ERROR("encoder init failed for dsi %d\n", i);
+			continue;
+		}
+
+		rc = dsi_display_drm_bridge_init(display, encoder);
+		if (rc) {
+			SDE_ERROR("dsi bridge %d init failed, %d\n", i, rc);
+			sde_encoder_destroy(encoder);
+			continue;
+		}
+
+		connector = sde_connector_init(dev,
+					encoder,
+					0,
+					display,
+					&dsi_ops,
+					DRM_CONNECTOR_POLL_HPD,
+					DRM_MODE_CONNECTOR_DSI);
+		if (connector) {
+			priv->encoders[priv->num_encoders++] = encoder;
+		} else {
+			SDE_ERROR("dsi %d connector init failed\n", i);
+			dsi_display_drm_bridge_deinit(display);
+			sde_encoder_destroy(encoder);
+		}
+	}
+
+	/* wb */
+	for (i = 0; i < sde_kms->wb_display_count &&
+		priv->num_encoders < max_encoders; ++i) {
+		display = sde_kms->wb_displays[i];
+		encoder = NULL;
+
+		memset(&info, 0x0, sizeof(info));
+		rc = sde_wb_get_info(&info, display);
+		if (rc) {
+			SDE_ERROR("wb get_info %d failed\n", i);
+			continue;
+		}
+
+		encoder = sde_encoder_init(dev, &info);
+		if (IS_ERR_OR_NULL(encoder)) {
+			SDE_ERROR("encoder init failed for wb %d\n", i);
+			continue;
+		}
+
+		rc = sde_wb_drm_init(display, encoder);
+		if (rc) {
+			SDE_ERROR("wb bridge %d init failed, %d\n", i, rc);
+			sde_encoder_destroy(encoder);
+			continue;
+		}
+
+		connector = sde_connector_init(dev,
+				encoder,
+				0,
+				display,
+				&wb_ops,
+				DRM_CONNECTOR_POLL_HPD,
+				DRM_MODE_CONNECTOR_VIRTUAL);
+		if (connector) {
+			priv->encoders[priv->num_encoders++] = encoder;
+		} else {
+			SDE_ERROR("wb %d connector init failed\n", i);
+			sde_wb_drm_deinit(display);
+			sde_encoder_destroy(encoder);
+		}
+	}
+
+	/* hdmi */
+	for (i = 0; i < sde_kms->hdmi_display_count &&
+		priv->num_encoders < max_encoders; ++i) {
+		display = sde_kms->hdmi_displays[i];
+		encoder = NULL;
+
+		memset(&info, 0x0, sizeof(info));
+		rc = sde_hdmi_dev_init(display);
+		if (rc) {
+			SDE_ERROR("hdmi dev_init %d failed\n", i);
+			continue;
+		}
+		rc = sde_hdmi_get_info(&info, display);
+		if (rc) {
+			SDE_ERROR("hdmi get_info %d failed\n", i);
+			continue;
+		}
+		if (info.capabilities & MSM_DISPLAY_CAP_HOT_PLUG)
+			connector_poll = DRM_CONNECTOR_POLL_HPD;
+		else
+			connector_poll = 0;
+		encoder = sde_encoder_init(dev, &info);
+		if (IS_ERR_OR_NULL(encoder)) {
+			SDE_ERROR("encoder init failed for hdmi %d\n", i);
+			continue;
+		}
+
+		rc = sde_hdmi_drm_init(display, encoder);
+		if (rc) {
+			SDE_ERROR("hdmi drm %d init failed, %d\n", i, rc);
+			sde_encoder_destroy(encoder);
+			continue;
+		}
+
+		connector = sde_connector_init(dev,
+					encoder,
+					0,
+					display,
+					&hdmi_ops,
+					connector_poll,
+					DRM_MODE_CONNECTOR_HDMIA);
+		if (connector) {
+			priv->encoders[priv->num_encoders++] = encoder;
+		} else {
+			SDE_ERROR("hdmi %d connector init failed\n", i);
+			sde_hdmi_dev_deinit(display);
+			sde_hdmi_drm_deinit(display);
+			sde_encoder_destroy(encoder);
+		}
+	}
+
+	return 0;
+}
+
+static void _sde_kms_drm_obj_destroy(struct sde_kms *sde_kms)
+{
+	struct msm_drm_private *priv;
+	int i;
+
+	if (!sde_kms) {
+		SDE_ERROR("invalid sde_kms\n");
+		return;
+	} else if (!sde_kms->dev) {
+		SDE_ERROR("invalid dev\n");
+		return;
+	} else if (!sde_kms->dev->dev_private) {
+		SDE_ERROR("invalid dev_private\n");
+		return;
+	}
+	priv = sde_kms->dev->dev_private;
+
+	for (i = 0; i < priv->num_crtcs; i++)
+		priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
+	priv->num_crtcs = 0;
+
+	for (i = 0; i < priv->num_planes; i++)
+		priv->planes[i]->funcs->destroy(priv->planes[i]);
+	priv->num_planes = 0;
+
+	for (i = 0; i < priv->num_connectors; i++)
+		priv->connectors[i]->funcs->destroy(priv->connectors[i]);
+	priv->num_connectors = 0;
+
+	for (i = 0; i < priv->num_encoders; i++)
+		priv->encoders[i]->funcs->destroy(priv->encoders[i]);
+	priv->num_encoders = 0;
+
+	_sde_kms_release_displays(sde_kms);
+}
+
+static inline int sde_get_crtc_id(const char *display_type)
+{
+	if (!strcmp(display_type, "primary"))
+		return 0;
+	else if (!strcmp(display_type, "secondary"))
+		return 1;
+	else
+		return 2;
+}
+
+static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
+{
+	struct drm_device *dev;
+	struct drm_plane *primary_planes[MAX_PLANES], *plane;
+	struct drm_crtc *crtc;
+
+	struct msm_drm_private *priv;
+	struct sde_mdss_cfg *catalog;
+
+	int primary_planes_idx, i, ret;
+	int max_crtc_count, max_plane_count;
+
+	if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
+		SDE_ERROR("invalid sde_kms\n");
+		return -EINVAL;
+	}
+
+	dev = sde_kms->dev;
+	priv = dev->dev_private;
+	catalog = sde_kms->catalog;
+
+	ret = sde_core_irq_domain_add(sde_kms);
+	if (ret)
+		goto fail_irq;
+	/*
+	 * Query for underlying display drivers, and create connectors,
+	 * bridges and encoders for them.
+	 */
+	if (!_sde_kms_get_displays(sde_kms))
+		(void)_sde_kms_setup_displays(dev, priv, sde_kms);
+
+	max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
+
+	/* Create the planes */
+	primary_planes_idx = 0;
+	if (catalog->vp_count) {
+		max_plane_count = min_t(u32, catalog->vp_count, MAX_PLANES);
+
+		for (i = 0; i < max_plane_count; i++) {
+			bool primary = true;
+			int crtc_id =
+				sde_get_crtc_id(catalog->vp[i].display_type);
+
+			if (strcmp(catalog->vp[i].plane_type, "primary"))
+				primary = false;
+
+			plane = sde_plane_init(dev, catalog->vp[i].id,
+					primary, 1UL << crtc_id, true);
+			if (IS_ERR(plane)) {
+				SDE_ERROR("sde_plane_init failed\n");
+				ret = PTR_ERR(plane);
+				goto fail;
+			}
+			priv->planes[priv->num_planes++] = plane;
+
+			if (primary) {
+				primary_planes[crtc_id] = plane;
+				primary_planes_idx++;
+			}
+		}
+	} else {
+		max_plane_count = min_t(u32, catalog->sspp_count, MAX_PLANES);
+
+		for (i = 0; i < max_plane_count; i++) {
+			bool primary = true;
+
+			if (catalog->sspp[i].features & BIT(SDE_SSPP_CURSOR)
+				|| primary_planes_idx >= max_crtc_count)
+				primary = false;
+
+			plane = sde_plane_init(dev, catalog->sspp[i].id,
+					primary, (1UL << max_crtc_count) - 1,
+					false);
+			if (IS_ERR(plane)) {
+				SDE_ERROR("sde_plane_init failed\n");
+				ret = PTR_ERR(plane);
+				goto fail;
+			}
+			priv->planes[priv->num_planes++] = plane;
+
+			if (primary)
+				primary_planes[primary_planes_idx++] = plane;
+		}
+	}
+
+	max_crtc_count = min(max_crtc_count, primary_planes_idx);
+
+	/* Create one CRTC per encoder */
+	for (i = 0; i < max_crtc_count; i++) {
+		crtc = sde_crtc_init(dev, primary_planes[i]);
+		if (IS_ERR(crtc)) {
+			ret = PTR_ERR(crtc);
+			goto fail;
+		}
+		priv->crtcs[priv->num_crtcs++] = crtc;
+	}
+
+	if (sde_is_custom_client()) {
+		/* All CRTCs are compatible with all planes */
+		for (i = 0; i < priv->num_planes; i++)
+			priv->planes[i]->possible_crtcs =
+				(1 << priv->num_crtcs) - 1;
+	}
+
+	/* All CRTCs are compatible with all encoders */
+	for (i = 0; i < priv->num_encoders; i++)
+		priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
+
+	return 0;
+fail:
+	_sde_kms_drm_obj_destroy(sde_kms);
+fail_irq:
+	sde_core_irq_domain_fini(sde_kms);
+	return ret;
+}
+
+static int sde_kms_postinit(struct msm_kms *kms)
+{
+	struct sde_kms *sde_kms = to_sde_kms(kms);
+	struct drm_device *dev;
+
+	if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
+		SDE_ERROR("invalid sde_kms\n");
+		return -EINVAL;
+	}
+
+	dev = sde_kms->dev;
+
+	/*
+	 * Allow vblank interrupt to be disabled by drm vblank timer.
+	 */
+	dev->vblank_disable_allowed = true;
+
+	return 0;
+}
+
+static long sde_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
+		struct drm_encoder *encoder)
+{
+	return rate;
+}
+
+static void _sde_kms_hw_destroy(struct sde_kms *sde_kms,
+		struct platform_device *pdev)
+{
+	struct drm_device *dev;
+	struct msm_drm_private *priv;
+	int i;
+
+	if (!sde_kms || !pdev)
+		return;
+
+	dev = sde_kms->dev;
+	if (!dev)
+		return;
+
+	priv = dev->dev_private;
+	if (!priv)
+		return;
+
+	if (sde_kms->hw_intr)
+		sde_hw_intr_destroy(sde_kms->hw_intr);
+	sde_kms->hw_intr = NULL;
+
+	_sde_kms_release_displays(sde_kms);
+
+	/* safe to call these more than once during shutdown */
+	_sde_debugfs_destroy(sde_kms);
+	_sde_kms_mmu_destroy(sde_kms);
+	sde_core_perf_destroy(&sde_kms->perf);
+
+	if (sde_kms->catalog) {
+		for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
+			u32 vbif_idx = sde_kms->catalog->vbif[i].id;
+
+			if ((vbif_idx < VBIF_MAX) && sde_kms->hw_vbif[vbif_idx])
+				sde_hw_vbif_destroy(sde_kms->hw_vbif[vbif_idx]);
+		}
+	}
+
+	if (sde_kms->rm_init)
+		sde_rm_destroy(&sde_kms->rm);
+	sde_kms->rm_init = false;
+
+	if (sde_kms->catalog)
+		sde_hw_catalog_deinit(sde_kms->catalog);
+	sde_kms->catalog = NULL;
+
+	if (sde_kms->splash_info.handoff) {
+		if (sde_kms->core_client)
+			sde_splash_destroy(&sde_kms->splash_info,
+				&priv->phandle, sde_kms->core_client);
+	}
+
+	if (sde_kms->core_client)
+		sde_power_client_destroy(&priv->phandle,
+				sde_kms->core_client);
+	sde_kms->core_client = NULL;
+
+	if (sde_kms->vbif[VBIF_NRT])
+		msm_iounmap(pdev, sde_kms->vbif[VBIF_NRT]);
+	sde_kms->vbif[VBIF_NRT] = NULL;
+
+	if (sde_kms->vbif[VBIF_RT])
+		msm_iounmap(pdev, sde_kms->vbif[VBIF_RT]);
+	sde_kms->vbif[VBIF_RT] = NULL;
+
+	if (sde_kms->mmio)
+		msm_iounmap(pdev, sde_kms->mmio);
+	sde_kms->mmio = NULL;
+}
+
+static void sde_kms_destroy(struct msm_kms *kms)
+{
+	struct sde_kms *sde_kms;
+	struct drm_device *dev;
+
+	if (!kms) {
+		SDE_ERROR("invalid kms\n");
+		return;
+	}
+
+	sde_kms = to_sde_kms(kms);
+	dev = sde_kms->dev;
+	if (!dev) {
+		SDE_ERROR("invalid device\n");
+		return;
+	}
+
+	_sde_kms_hw_destroy(sde_kms, dev->platformdev);
+	kfree(sde_kms);
+}
+
+static void sde_kms_preclose(struct msm_kms *kms, struct drm_file *file)
+{
+	struct sde_kms *sde_kms = to_sde_kms(kms);
+	struct drm_device *dev = sde_kms->dev;
+	struct msm_drm_private *priv = dev->dev_private;
+	unsigned i;
+
+	for (i = 0; i < priv->num_crtcs; i++)
+		sde_crtc_cancel_pending_flip(priv->crtcs[i], file);
+}
+
+static const struct msm_kms_funcs kms_funcs = {
+	.hw_init         = sde_kms_hw_init,
+	.postinit        = sde_kms_postinit,
+	.irq_preinstall  = sde_irq_preinstall,
+	.irq_postinstall = sde_irq_postinstall,
+	.irq_uninstall   = sde_irq_uninstall,
+	.irq             = sde_irq,
+	.preclose        = sde_kms_preclose,
+	.prepare_fence   = sde_kms_prepare_fence,
+	.prepare_commit  = sde_kms_prepare_commit,
+	.commit          = sde_kms_commit,
+	.complete_commit = sde_kms_complete_commit,
+	.wait_for_crtc_commit_done = sde_kms_wait_for_commit_done,
+	.enable_vblank   = sde_kms_enable_vblank,
+	.disable_vblank  = sde_kms_disable_vblank,
+	.check_modified_format = sde_format_check_modified_format,
+	.get_format      = sde_get_msm_format,
+	.round_pixclk    = sde_kms_round_pixclk,
+	.destroy         = sde_kms_destroy,
+};
+
+/* the caller api needs to turn on clock before calling it */
+static inline void _sde_kms_core_hw_rev_init(struct sde_kms *sde_kms)
+{
+	sde_kms->core_rev = readl_relaxed(sde_kms->mmio + 0x0);
+}
+
+static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
+{
+	struct msm_mmu *mmu;
+	int i;
+
+	for (i = ARRAY_SIZE(sde_kms->aspace) - 1; i >= 0; i--) {
+		if (!sde_kms->aspace[i])
+			continue;
+
+		mmu = sde_kms->aspace[i]->mmu;
+
+		mmu->funcs->detach(mmu);
+		msm_gem_address_space_put(sde_kms->aspace[i]);
+
+		sde_kms->aspace[i] = NULL;
+	}
+
+	return 0;
+}
+
+static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
+{
+	struct msm_mmu *mmu;
+	int i, ret;
+
+	for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
+		struct msm_gem_address_space *aspace;
+
+		mmu = msm_smmu_new(sde_kms->dev->dev, i);
+		if (IS_ERR(mmu)) {
+			/* MMU's can be optional depending on platform */
+			ret = PTR_ERR(mmu);
+			DRM_INFO("failed to init iommu id %d: rc: %d\n", i,
+					ret);
+			continue;
+		}
+
+		/* Attaching smmu means IOMMU HW starts to work immediately.
+		 * However, display HW in LK is still accessing memory
+		 * while the memory map is not done yet.
+		 * So first set DOMAIN_ATTR_EARLY_MAP attribute 1 to bypass
+		 * stage 1 translation in IOMMU HW.
+		 */
+		if ((i == MSM_SMMU_DOMAIN_UNSECURE) &&
+				sde_kms->splash_info.handoff) {
+			ret = mmu->funcs->set_property(mmu,
+					DOMAIN_ATTR_EARLY_MAP,
+					&sde_kms->splash_info.handoff);
+			if (ret) {
+				SDE_ERROR("failed to set map att: %d\n", ret);
+				mmu->funcs->destroy(mmu);
+				goto fail;
+			}
+		}
+
+		aspace = msm_gem_smmu_address_space_create(sde_kms->dev->dev,
+			mmu, "sde");
+		if (IS_ERR(aspace)) {
+			ret = PTR_ERR(aspace);
+			mmu->funcs->destroy(mmu);
+			goto fail;
+		}
+
+		sde_kms->aspace[i] = aspace;
+
+		ret = mmu->funcs->attach(mmu, NULL, 0);
+		if (ret) {
+			SDE_ERROR("failed to attach iommu %d: %d\n", i, ret);
+			msm_gem_address_space_put(aspace);
+			goto fail;
+		}
+
+		/*
+		 * It's safe now to map the physical memory blcok LK accesses.
+		 */
+		if ((i == MSM_SMMU_DOMAIN_UNSECURE) &&
+				sde_kms->splash_info.handoff) {
+			ret = sde_splash_smmu_map(sde_kms->dev, mmu,
+					&sde_kms->splash_info);
+			if (ret) {
+				SDE_ERROR("map rsv mem failed: %d\n", ret);
+				msm_gem_address_space_put(aspace);
+				goto fail;
+			}
+		}
+	}
+
+	return 0;
+fail:
+	_sde_kms_mmu_destroy(sde_kms);
+
+	return ret;
+}
+
+static void __iomem *_sde_kms_ioremap(struct platform_device *pdev,
+		const char *name, unsigned long *out_size)
+{
+	struct resource *res;
+	unsigned long size;
+	void __iomem *ptr;
+
+	if (out_size)
+		*out_size = 0;
+
+	if (name)
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+	else
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	if (!res) {
+		/* availability depends on platform */
+		SDE_DEBUG("failed to get memory resource: %s\n", name);
+		return NULL;
+	}
+
+	size = resource_size(res);
+
+	ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
+	if (!ptr) {
+		SDE_ERROR("failed to ioremap: %s\n", name);
+		return NULL;
+	}
+
+	SDE_DEBUG("IO:region %s %pK %08lx\n", name, ptr, size);
+
+	if (out_size)
+		*out_size = size;
+
+	return ptr;
+}
+
+
+static int sde_kms_hw_init(struct msm_kms *kms)
+{
+	struct sde_kms *sde_kms;
+	struct drm_device *dev;
+	struct msm_drm_private *priv;
+	struct sde_splash_info *sinfo;
+	int i, rc = -EINVAL;
+
+	if (!kms) {
+		SDE_ERROR("invalid kms\n");
+		goto end;
+	}
+
+	sde_kms = to_sde_kms(kms);
+	dev = sde_kms->dev;
+	if (!dev || !dev->platformdev) {
+		SDE_ERROR("invalid device\n");
+		goto end;
+	}
+
+	priv = dev->dev_private;
+	if (!priv) {
+		SDE_ERROR("invalid private data\n");
+		goto end;
+	}
+
+	sde_kms->mmio = _sde_kms_ioremap(dev->platformdev, "mdp_phys",
+			&sde_kms->mmio_len);
+	if (!sde_kms->mmio) {
+		SDE_ERROR("mdp register memory map failed\n");
+		goto error;
+	}
+	DRM_INFO("mapped mdp address space @%pK\n", sde_kms->mmio);
+
+	rc = sde_dbg_reg_register_base(SDE_DBG_NAME, sde_kms->mmio,
+			sde_kms->mmio_len);
+	if (rc)
+		SDE_ERROR("dbg base register kms failed: %d\n", rc);
+
+	sde_kms->vbif[VBIF_RT] = _sde_kms_ioremap(dev->platformdev, "vbif_phys",
+			&sde_kms->vbif_len[VBIF_RT]);
+	if (!sde_kms->vbif[VBIF_RT]) {
+		SDE_ERROR("vbif register memory map failed\n");
+		goto error;
+	}
+
+	rc = sde_dbg_reg_register_base("vbif_rt", sde_kms->vbif[VBIF_RT],
+				sde_kms->vbif_len[VBIF_RT]);
+	if (rc)
+		SDE_ERROR("dbg base register vbif_rt failed: %d\n", rc);
+
+	sde_kms->vbif[VBIF_NRT] = _sde_kms_ioremap(dev->platformdev,
+			"vbif_nrt_phys", &sde_kms->vbif_len[VBIF_NRT]);
+	if (!sde_kms->vbif[VBIF_NRT]) {
+		SDE_DEBUG("VBIF NRT is not defined");
+	} else {
+		rc = sde_dbg_reg_register_base("vbif_nrt",
+				sde_kms->vbif[VBIF_NRT],
+				sde_kms->vbif_len[VBIF_NRT]);
+		if (rc)
+			SDE_ERROR("dbg base register vbif_nrt failed: %d\n",
+					rc);
+	}
+
+	sde_kms->core_client = sde_power_client_create(&priv->phandle, "core");
+	if (IS_ERR_OR_NULL(sde_kms->core_client)) {
+		rc = PTR_ERR(sde_kms->core_client);
+		SDE_ERROR("sde power client create failed: %d\n", rc);
+		sde_kms->core_client = NULL;
+		goto error;
+	}
+
+	rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
+		true);
+	if (rc) {
+		SDE_ERROR("resource enable failed: %d\n", rc);
+		goto error;
+	}
+
+	_sde_kms_core_hw_rev_init(sde_kms);
+
+	pr_info("sde hardware revision:0x%x\n", sde_kms->core_rev);
+
+	sde_kms->catalog = sde_hw_catalog_init(dev, sde_kms->core_rev);
+	if (IS_ERR_OR_NULL(sde_kms->catalog)) {
+		rc = PTR_ERR(sde_kms->catalog);
+		SDE_ERROR("catalog init failed: %d\n", rc);
+		sde_kms->catalog = NULL;
+		goto power_error;
+	}
+
+	sde_dbg_init_dbg_buses(sde_kms->core_rev);
+
+	rc = sde_rm_init(&sde_kms->rm, sde_kms->catalog, sde_kms->mmio,
+			sde_kms->dev);
+	if (rc) {
+		SDE_ERROR("rm init failed: %d\n", rc);
+		goto power_error;
+	}
+
+	sde_kms->rm_init = true;
+
+	sde_kms->hw_mdp = sde_rm_get_mdp(&sde_kms->rm);
+	if (IS_ERR_OR_NULL(sde_kms->hw_mdp)) {
+		rc = PTR_ERR(sde_kms->hw_mdp);
+		SDE_ERROR("failed to get hw_mdp: %d\n", rc);
+		sde_kms->hw_mdp = NULL;
+		goto power_error;
+	}
+
+	/*
+	 * Read the DISP_INTF_SEL register to check
+	 * whether early display is enabled in LK.
+	 */
+	rc = sde_splash_get_handoff_status(kms);
+	if (rc) {
+		SDE_ERROR("get early splash status failed: %d\n", rc);
+		goto power_error;
+	}
+
+	/*
+	 * when LK has enabled early display, sde_splash_parse_dt and
+	 * sde_splash_init must be called. The first function is to parse the
+	 * mandatory memory node for splash function, and the second function
+	 * will first do bandwidth voting job, because display hardware is now
+	 * accessing AHB data bus, otherwise device reboot will happen, and then
+	 * to check if the memory is reserved.
+	 */
+	sinfo = &sde_kms->splash_info;
+	if (sinfo->handoff) {
+		rc = sde_splash_parse_dt(dev);
+		if (rc) {
+			SDE_ERROR("parse dt for splash info failed: %d\n", rc);
+			goto power_error;
+		}
+
+		sde_splash_init(&priv->phandle, kms);
+	}
+
+	for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
+		u32 vbif_idx = sde_kms->catalog->vbif[i].id;
+
+		sde_kms->hw_vbif[i] = sde_hw_vbif_init(vbif_idx,
+				sde_kms->vbif[vbif_idx], sde_kms->catalog);
+		if (IS_ERR_OR_NULL(sde_kms->hw_vbif[vbif_idx])) {
+			rc = PTR_ERR(sde_kms->hw_vbif[vbif_idx]);
+			SDE_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
+			sde_kms->hw_vbif[vbif_idx] = NULL;
+			goto power_error;
+		}
+	}
+
+	/*
+	 * Now we need to read the HW catalog and initialize resources such as
+	 * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
+	 */
+	rc = _sde_kms_mmu_init(sde_kms);
+	if (rc) {
+		SDE_ERROR("sde_kms_mmu_init failed: %d\n", rc);
+		goto power_error;
+	}
+
+	/*
+	 * NOTE: Calling sde_debugfs_init here so that the drm_minor device for
+	 *       'primary' is already created.
+	 */
+	rc = _sde_debugfs_init(sde_kms);
+	if (rc) {
+		SDE_ERROR("sde_debugfs init failed: %d\n", rc);
+		goto power_error;
+	}
+
+	rc = sde_core_perf_init(&sde_kms->perf, dev, sde_kms->catalog,
+			&priv->phandle, priv->pclient, "core_clk_src",
+			sde_kms->debugfs_debug);
+	if (rc) {
+		SDE_ERROR("failed to init perf %d\n", rc);
+		goto perf_err;
+	}
+
+	sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
+	if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
+		rc = PTR_ERR(sde_kms->hw_intr);
+		SDE_ERROR("hw_intr init failed: %d\n", rc);
+		sde_kms->hw_intr = NULL;
+		goto hw_intr_init_err;
+	}
+
+	/*
+	 * _sde_kms_drm_obj_init should create the DRM related objects
+	 * i.e. CRTCs, planes, encoders, connectors and so forth
+	 */
+	rc = _sde_kms_drm_obj_init(sde_kms);
+	if (rc) {
+		SDE_ERROR("modeset init failed: %d\n", rc);
+		goto drm_obj_init_err;
+	}
+
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+
+	/*
+	 * max crtc width is equal to the max mixer width * 2 and max height is
+	 * is 4K
+	 */
+	dev->mode_config.max_width = sde_kms->catalog->max_mixer_width * 2;
+	dev->mode_config.max_height = 4096;
+
+	/*
+	 * Support format modifiers for compression etc.
+	 */
+	dev->mode_config.allow_fb_modifiers = true;
+
+	if (!sde_kms->splash_info.handoff)
+		sde_power_resource_enable(&priv->phandle,
+				sde_kms->core_client, false);
+
+	return 0;
+
+drm_obj_init_err:
+	sde_core_perf_destroy(&sde_kms->perf);
+hw_intr_init_err:
+perf_err:
+power_error:
+	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+error:
+	_sde_kms_hw_destroy(sde_kms, dev->platformdev);
+end:
+	return rc;
+}
+
+struct msm_kms *sde_kms_init(struct drm_device *dev)
+{
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+
+	if (!dev || !dev->dev_private) {
+		SDE_ERROR("drm device node invalid\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	priv = dev->dev_private;
+
+	sde_kms = kzalloc(sizeof(*sde_kms), GFP_KERNEL);
+	if (!sde_kms) {
+		SDE_ERROR("failed to allocate sde kms\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	msm_kms_init(&sde_kms->base, &kms_funcs);
+	sde_kms->dev = dev;
+
+	return &sde_kms->base;
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_kms.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_kms.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_kms.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_kms.h	2019-10-29 09:26:23.645203198 +0100
@@ -0,0 +1,422 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __SDE_KMS_H__
+#define __SDE_KMS_H__
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "msm_mmu.h"
+#include "msm_gem.h"
+#include "sde_dbg.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_ctl.h"
+#include "sde_hw_lm.h"
+#include "sde_hw_interrupts.h"
+#include "sde_hw_wb.h"
+#include "sde_hw_top.h"
+#include "sde_rm.h"
+#include "sde_power_handle.h"
+#include "sde_irq.h"
+#include "sde_core_perf.h"
+#include "sde_splash.h"
+
+#define DRMID(x) ((x) ? (x)->base.id : -1)
+
+/**
+ * SDE_DEBUG - macro for kms/plane/crtc/encoder/connector logs
+ * @fmt: Pointer to format string
+ */
+#define SDE_DEBUG(fmt, ...)                                                \
+	do {                                                               \
+		if (unlikely(drm_debug & DRM_UT_KMS))                      \
+			drm_ut_debug_printk(__func__, fmt, ##__VA_ARGS__); \
+		else                                                       \
+			pr_debug(fmt, ##__VA_ARGS__);                      \
+	} while (0)
+
+/**
+ * SDE_DEBUG_DRIVER - macro for hardware driver logging
+ * @fmt: Pointer to format string
+ */
+#define SDE_DEBUG_DRIVER(fmt, ...)                                         \
+	do {                                                               \
+		if (unlikely(drm_debug & DRM_UT_DRIVER))                   \
+			drm_ut_debug_printk(__func__, fmt, ##__VA_ARGS__); \
+		else                                                       \
+			pr_debug(fmt, ##__VA_ARGS__);                      \
+	} while (0)
+
+#define SDE_ERROR(fmt, ...) pr_err("[sde error]" fmt, ##__VA_ARGS__)
+
+#define POPULATE_RECT(rect, a, b, c, d, Q16_flag) \
+	do {						\
+		(rect)->x = (Q16_flag) ? (a) >> 16 : (a);    \
+		(rect)->y = (Q16_flag) ? (b) >> 16 : (b);    \
+		(rect)->w = (Q16_flag) ? (c) >> 16 : (c);    \
+		(rect)->h = (Q16_flag) ? (d) >> 16 : (d);    \
+	} while (0)
+
+#define CHECK_LAYER_BOUNDS(offset, size, max_size) \
+	(((size) > (max_size)) || ((offset) > ((max_size) - (size))))
+
+/**
+ * ktime_compare_safe - compare two ktime structures
+ *	This macro is similar to the standard ktime_compare() function, but
+ *	attempts to also handle ktime overflows.
+ * @A: First ktime value
+ * @B: Second ktime value
+ * Returns: -1 if A < B, 0 if A == B, 1 if A > B
+ */
+#define ktime_compare_safe(A, B) \
+	ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0))
+
+#define SDE_NAME_SIZE  12
+
+/*
+ * struct sde_irq_callback - IRQ callback handlers
+ * @list: list to callback
+ * @func: intr handler
+ * @arg: argument for the handler
+ */
+struct sde_irq_callback {
+	struct list_head list;
+	void (*func)(void *arg, int irq_idx);
+	void *arg;
+};
+
+/**
+ * struct sde_irq: IRQ structure contains callback registration info
+ * @total_irq:    total number of irq_idx obtained from HW interrupts mapping
+ * @irq_cb_tbl:   array of IRQ callbacks setting
+ * @enable_counts array of IRQ enable counts
+ * @cb_lock:      callback lock
+ * @debugfs_file: debugfs file for irq statistics
+ */
+struct sde_irq {
+	u32 total_irqs;
+	struct list_head *irq_cb_tbl;
+	atomic_t *enable_counts;
+	atomic_t *irq_counts;
+	spinlock_t cb_lock;
+	struct dentry *debugfs_file;
+};
+
+struct sde_kms {
+	struct msm_kms base;
+	struct drm_device *dev;
+	int core_rev;
+	struct sde_mdss_cfg *catalog;
+
+	struct msm_gem_address_space *aspace[MSM_SMMU_DOMAIN_MAX];
+	struct sde_power_client *core_client;
+
+	/* directory entry for debugfs */
+	void *debugfs_root;
+	struct dentry *debugfs_debug;
+	struct dentry *debugfs_danger;
+	struct dentry *debugfs_vbif;
+
+	/* io/register spaces: */
+	void __iomem *mmio, *vbif[VBIF_MAX];
+	unsigned long mmio_len, vbif_len[VBIF_MAX];
+
+	struct regulator *vdd;
+	struct regulator *mmagic;
+	struct regulator *venus;
+
+	struct sde_irq_controller irq_controller;
+
+	struct sde_hw_intr *hw_intr;
+	struct sde_irq irq_obj;
+
+	struct sde_core_perf perf;
+
+	struct sde_rm rm;
+	bool rm_init;
+
+	struct sde_hw_vbif *hw_vbif[VBIF_MAX];
+	struct sde_hw_mdp *hw_mdp;
+	int dsi_display_count;
+	void **dsi_displays;
+	int wb_display_count;
+	void **wb_displays;
+	bool has_danger_ctrl;
+	void **hdmi_displays;
+	int hdmi_display_count;
+
+	/* splash handoff structure */
+	struct sde_splash_info splash_info;
+};
+
+struct vsync_info {
+	u32 frame_count;
+	u32 line_count;
+};
+
+#define to_sde_kms(x) container_of(x, struct sde_kms, base)
+
+/**
+ * sde_is_custom_client - whether or not to enable non-standard customizations
+ *
+ * Return: Whether or not the 'sdeclient' module parameter was set on boot up
+ */
+bool sde_is_custom_client(void);
+
+/**
+ * Debugfs functions - extra helper functions for debugfs support
+ *
+ * Main debugfs documentation is located at,
+ *
+ * Documentation/filesystems/debugfs.txt
+ *
+ * @sde_debugfs_setup_regset32: Initialize data for sde_debugfs_create_regset32
+ * @sde_debugfs_create_regset32: Create 32-bit register dump file
+ * @sde_debugfs_get_root: Get root dentry for SDE_KMS's debugfs node
+ */
+
+/**
+ * Companion structure for sde_debugfs_create_regset32. Do not initialize the
+ * members of this structure explicitly; use sde_debugfs_setup_regset32 instead.
+ */
+struct sde_debugfs_regset32 {
+	uint32_t offset;
+	uint32_t blk_len;
+	struct sde_kms *sde_kms;
+};
+
+/**
+ * sde_debugfs_setup_regset32 - Initialize register block definition for debugfs
+ * This function is meant to initialize sde_debugfs_regset32 structures for use
+ * with sde_debugfs_create_regset32.
+ * @regset: opaque register definition structure
+ * @offset: sub-block offset
+ * @length: sub-block length, in bytes
+ * @sde_kms: pointer to sde kms structure
+ */
+void sde_debugfs_setup_regset32(struct sde_debugfs_regset32 *regset,
+		uint32_t offset, uint32_t length, struct sde_kms *sde_kms);
+
+/**
+ * sde_debugfs_create_regset32 - Create register read back file for debugfs
+ *
+ * This function is almost identical to the standard debugfs_create_regset32()
+ * function, with the main difference being that a list of register
+ * names/offsets do not need to be provided. The 'read' function simply outputs
+ * sequential register values over a specified range.
+ *
+ * Similar to the related debugfs_create_regset32 API, the structure pointed to
+ * by regset needs to persist for the lifetime of the created file. The calling
+ * code is responsible for initialization/management of this structure.
+ *
+ * The structure pointed to by regset is meant to be opaque. Please use
+ * sde_debugfs_setup_regset32 to initialize it.
+ *
+ * @name:   File name within debugfs
+ * @mode:   File mode within debugfs
+ * @parent: Parent directory entry within debugfs, can be NULL
+ * @regset: Pointer to persistent register block definition
+ *
+ * Return: dentry pointer for newly created file, use either debugfs_remove()
+ *         or debugfs_remove_recursive() (on a parent directory) to remove the
+ *         file
+ */
+void *sde_debugfs_create_regset32(const char *name, umode_t mode,
+		void *parent, struct sde_debugfs_regset32 *regset);
+
+/**
+ * sde_debugfs_get_root - Return root directory entry for SDE's debugfs
+ *
+ * The return value should be passed as the 'parent' argument to subsequent
+ * debugfs create calls.
+ *
+ * @sde_kms: Pointer to SDE's KMS structure
+ *
+ * Return: dentry pointer for SDE's debugfs location
+ */
+void *sde_debugfs_get_root(struct sde_kms *sde_kms);
+
+/**
+ * SDE info management functions
+ * These functions/definitions allow for building up a 'sde_info' structure
+ * containing one or more "key=value\n" entries.
+ */
+#define SDE_KMS_INFO_MAX_SIZE	4096
+
+/**
+ * struct sde_kms_info - connector information structure container
+ * @data: Array of information character data
+ * @len: Current length of information data
+ * @staged_len: Temporary data buffer length, commit to
+ *              len using sde_kms_info_stop
+ * @start: Whether or not a partial data entry was just started
+ */
+struct sde_kms_info {
+	char data[SDE_KMS_INFO_MAX_SIZE];
+	uint32_t len;
+	uint32_t staged_len;
+	bool start;
+};
+
+/**
+ * SDE_KMS_INFO_DATA - Macro for accessing sde_kms_info data bytes
+ * @S: Pointer to sde_kms_info structure
+ * Returns: Pointer to byte data
+ */
+#define SDE_KMS_INFO_DATA(S)    ((S) ? ((struct sde_kms_info *)(S))->data : 0)
+
+/**
+ * SDE_KMS_INFO_DATALEN - Macro for accessing sde_kms_info data length
+ *			it adds an extra character length to count null.
+ * @S: Pointer to sde_kms_info structure
+ * Returns: Size of available byte data
+ */
+#define SDE_KMS_INFO_DATALEN(S) ((S) ? ((struct sde_kms_info *)(S))->len + 1 \
+							: 0)
+
+/**
+ * sde_kms_info_reset - reset sde_kms_info structure
+ * @info: Pointer to sde_kms_info structure
+ */
+void sde_kms_info_reset(struct sde_kms_info *info);
+
+/**
+ * sde_kms_info_add_keyint - add integer value to 'sde_kms_info'
+ * @info: Pointer to sde_kms_info structure
+ * @key: Pointer to key string
+ * @value: Signed 32-bit integer value
+ */
+void sde_kms_info_add_keyint(struct sde_kms_info *info,
+		const char *key,
+		int32_t value);
+
+/**
+ * sde_kms_info_add_keystr - add string value to 'sde_kms_info'
+ * @info: Pointer to sde_kms_info structure
+ * @key: Pointer to key string
+ * @value: Pointer to string value
+ */
+void sde_kms_info_add_keystr(struct sde_kms_info *info,
+		const char *key,
+		const char *value);
+
+/**
+ * sde_kms_info_start - begin adding key to 'sde_kms_info'
+ * Usage:
+ *      sde_kms_info_start(key)
+ *      sde_kms_info_append(val_1)
+ *      ...
+ *      sde_kms_info_append(val_n)
+ *      sde_kms_info_stop
+ * @info: Pointer to sde_kms_info structure
+ * @key: Pointer to key string
+ */
+void sde_kms_info_start(struct sde_kms_info *info,
+		const char *key);
+
+/**
+ * sde_kms_info_append - append value string to 'sde_kms_info'
+ * Usage:
+ *      sde_kms_info_start(key)
+ *      sde_kms_info_append(val_1)
+ *      ...
+ *      sde_kms_info_append(val_n)
+ *      sde_kms_info_stop
+ * @info: Pointer to sde_kms_info structure
+ * @str: Pointer to partial value string
+ */
+void sde_kms_info_append(struct sde_kms_info *info,
+		const char *str);
+
+/**
+ * sde_kms_info_append_format - append format code string to 'sde_kms_info'
+ * Usage:
+ *      sde_kms_info_start(key)
+ *      sde_kms_info_append_format(fourcc, modifier)
+ *      ...
+ *      sde_kms_info_stop
+ * @info: Pointer to sde_kms_info structure
+ * @pixel_format: FOURCC format code
+ * @modifier: 64-bit drm format modifier
+ */
+void sde_kms_info_append_format(struct sde_kms_info *info,
+		uint32_t pixel_format,
+		uint64_t modifier);
+
+/**
+ * sde_kms_info_stop - finish adding key to 'sde_kms_info'
+ * Usage:
+ *      sde_kms_info_start(key)
+ *      sde_kms_info_append(val_1)
+ *      ...
+ *      sde_kms_info_append(val_n)
+ *      sde_kms_info_stop
+ * @info: Pointer to sde_kms_info structure
+ */
+void sde_kms_info_stop(struct sde_kms_info *info);
+
+/**
+ * sde_kms_rect_intersect - intersect two rectangles
+ * @r1: first rectangle
+ * @r2: scissor rectangle
+ * @result: result rectangle, all 0's on no intersection found
+ */
+void sde_kms_rect_intersect(const struct sde_rect *r1,
+		const struct sde_rect *r2,
+		struct sde_rect *result);
+
+/**
+ * sde_kms_rect_is_equal - compares two rects
+ * @r1: rect value to compare
+ * @r2: rect value to compare
+ *
+ * Returns 1 if the rects are same, 0 otherwise.
+ */
+static inline bool sde_kms_rect_is_equal(struct sde_rect *r1,
+		struct sde_rect *r2)
+{
+	if ((!r1 && r2) || (r1 && !r2))
+		return false;
+
+	if (!r1 && !r2)
+		return true;
+
+	return r1->x == r2->x && r1->y == r2->y && r1->w == r2->w &&
+			r1->h == r2->h;
+}
+
+/**
+ * sde_kms_rect_is_null - returns true if the width or height of a rect is 0
+ * @rect: rectangle to check for zero size
+ * @Return: True if width or height of rectangle is 0
+ */
+static inline bool sde_kms_rect_is_null(const struct sde_rect *r)
+{
+	if (!r)
+		return true;
+
+	return (!r->w || !r->h);
+}
+
+/**
+ * Vblank enable/disable functions
+ */
+int sde_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+void sde_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+
+#endif /* __sde_kms_H__ */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_kms_utils.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_kms_utils.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_kms_utils.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_kms_utils.c	2019-01-22 16:16:23.519246551 +0100
@@ -0,0 +1,177 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"sde-kms_utils:[%s] " fmt, __func__
+
+#include "sde_kms.h"
+
+void sde_kms_info_reset(struct sde_kms_info *info)
+{
+	if (info) {
+		info->len = 0;
+		info->staged_len = 0;
+	}
+}
+
+void sde_kms_info_add_keyint(struct sde_kms_info *info,
+		const char *key,
+		int32_t value)
+{
+	uint32_t len;
+
+	if (info && key) {
+		len = snprintf(info->data + info->len,
+				SDE_KMS_INFO_MAX_SIZE - info->len,
+				"%s=%d\n",
+				key,
+				value);
+
+		/* check if snprintf truncated the string */
+		if ((info->len + len) < SDE_KMS_INFO_MAX_SIZE)
+			info->len += len;
+	}
+}
+
+void sde_kms_info_add_keystr(struct sde_kms_info *info,
+		const char *key,
+		const char *value)
+{
+	uint32_t len;
+
+	if (info && key && value) {
+		len = snprintf(info->data + info->len,
+				SDE_KMS_INFO_MAX_SIZE - info->len,
+				"%s=%s\n",
+				key,
+				value);
+
+		/* check if snprintf truncated the string */
+		if ((info->len + len) < SDE_KMS_INFO_MAX_SIZE)
+			info->len += len;
+	}
+}
+
+void sde_kms_info_start(struct sde_kms_info *info,
+		const char *key)
+{
+	uint32_t len;
+
+	if (info && key) {
+		len = snprintf(info->data + info->len,
+				SDE_KMS_INFO_MAX_SIZE - info->len,
+				"%s=",
+				key);
+
+		info->start = true;
+
+		/* check if snprintf truncated the string */
+		if ((info->len + len) < SDE_KMS_INFO_MAX_SIZE)
+			info->staged_len = info->len + len;
+	}
+}
+
+void sde_kms_info_append(struct sde_kms_info *info,
+		const char *str)
+{
+	uint32_t len;
+
+	if (info) {
+		len = snprintf(info->data + info->staged_len,
+				SDE_KMS_INFO_MAX_SIZE - info->staged_len,
+				"%s",
+				str);
+
+		/* check if snprintf truncated the string */
+		if ((info->staged_len + len) < SDE_KMS_INFO_MAX_SIZE) {
+			info->staged_len += len;
+			info->start = false;
+		}
+	}
+}
+
+void sde_kms_info_append_format(struct sde_kms_info *info,
+		uint32_t pixel_format,
+		uint64_t modifier)
+{
+	uint32_t len;
+
+	if (!info)
+		return;
+
+	if (modifier) {
+		len = snprintf(info->data + info->staged_len,
+				SDE_KMS_INFO_MAX_SIZE - info->staged_len,
+				info->start ?
+				"%c%c%c%c/%llX/%llX" : " %c%c%c%c/%llX/%llX",
+				(pixel_format >> 0) & 0xFF,
+				(pixel_format >> 8) & 0xFF,
+				(pixel_format >> 16) & 0xFF,
+				(pixel_format >> 24) & 0xFF,
+				(modifier >> 56) & 0xFF,
+				modifier & ((1ULL << 56) - 1));
+	} else {
+		len = snprintf(info->data + info->staged_len,
+				SDE_KMS_INFO_MAX_SIZE - info->staged_len,
+				info->start ?
+				"%c%c%c%c" : " %c%c%c%c",
+				(pixel_format >> 0) & 0xFF,
+				(pixel_format >> 8) & 0xFF,
+				(pixel_format >> 16) & 0xFF,
+				(pixel_format >> 24) & 0xFF);
+	}
+
+	/* check if snprintf truncated the string */
+	if ((info->staged_len + len) < SDE_KMS_INFO_MAX_SIZE) {
+		info->staged_len += len;
+		info->start = false;
+	}
+}
+
+void sde_kms_info_stop(struct sde_kms_info *info)
+{
+	uint32_t len;
+
+	if (info) {
+		/* insert final delimiter */
+		len = snprintf(info->data + info->staged_len,
+				SDE_KMS_INFO_MAX_SIZE - info->staged_len,
+				"\n");
+
+		/* check if snprintf truncated the string */
+		if ((info->staged_len + len) < SDE_KMS_INFO_MAX_SIZE)
+			info->len = info->staged_len + len;
+	}
+}
+
+void sde_kms_rect_intersect(const struct sde_rect *r1,
+		const struct sde_rect *r2,
+		struct sde_rect *result)
+{
+	int l, t, r, b;
+
+	if (!r1 || !r2 || !result)
+		return;
+
+	l = max(r1->x, r2->x);
+	t = max(r1->y, r2->y);
+	r = min((r1->x + r1->w), (r2->x + r2->w));
+	b = min((r1->y + r1->h), (r2->y + r2->h));
+
+	if (r < l || b < t) {
+		memset(result, 0, sizeof(*result));
+	} else {
+		result->x = l;
+		result->y = t;
+		result->w = r - l;
+		result->h = b - t;
+	}
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_plane.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_plane.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_plane.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_plane.c	2019-10-29 09:26:23.645203198 +0100
@@ -0,0 +1,2878 @@
+/*
+ * Copyright (C) 2014-2017 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <uapi/drm/sde_drm.h>
+#include <uapi/drm/msm_drm_pp.h>
+
+#include "msm_prop.h"
+
+#include "sde_kms.h"
+#include "sde_fence.h"
+#include "sde_formats.h"
+#include "sde_hw_sspp.h"
+#include "sde_trace.h"
+#include "sde_crtc.h"
+#include "sde_vbif.h"
+#include "sde_plane.h"
+#include "sde_color_processing.h"
+
+static bool suspend_blank = true;
+module_param(suspend_blank, bool, 0400);
+MODULE_PARM_DESC(suspend_blank,
+		"If set, active planes will force their outputs to black,\n"
+		"by temporarily enabling the color fill, when recovering\n"
+		"from a system resume instead of attempting to display the\n"
+		"last provided frame buffer.");
+
+#define SDE_DEBUG_PLANE(pl, fmt, ...) SDE_DEBUG("plane%d " fmt,\
+		(pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
+
+#define SDE_ERROR_PLANE(pl, fmt, ...) SDE_ERROR("plane%d " fmt,\
+		(pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DECIMATED_DIMENSION(dim, deci) (((dim) + ((1 << (deci)) - 1)) >> (deci))
+#define PHASE_STEP_SHIFT	21
+#define PHASE_STEP_UNIT_SCALE   ((int) (1 << PHASE_STEP_SHIFT))
+#define PHASE_RESIDUAL		15
+
+#define SHARP_STRENGTH_DEFAULT	32
+#define SHARP_EDGE_THR_DEFAULT	112
+#define SHARP_SMOOTH_THR_DEFAULT	8
+#define SHARP_NOISE_THR_DEFAULT	2
+
+#define SDE_NAME_SIZE  12
+
+#define SDE_PLANE_COLOR_FILL_FLAG	BIT(31)
+
+/* dirty bits for update function */
+#define SDE_PLANE_DIRTY_RECTS	0x1
+#define SDE_PLANE_DIRTY_FORMAT	0x2
+#define SDE_PLANE_DIRTY_SHARPEN	0x4
+#define SDE_PLANE_DIRTY_ALL	0xFFFFFFFF
+
+#define SDE_QSEED3_DEFAULT_PRELOAD_H 0x4
+#define SDE_QSEED3_DEFAULT_PRELOAD_V 0x3
+
+/**
+ * enum sde_plane_qos - Different qos configurations for each pipe
+ *
+ * @SDE_PLANE_QOS_VBLANK_CTRL: Setup VBLANK qos for the pipe.
+ * @SDE_PLANE_QOS_VBLANK_AMORTIZE: Enables Amortization within pipe.
+ *	this configuration is mutually exclusive from VBLANK_CTRL.
+ * @SDE_PLANE_QOS_PANIC_CTRL: Setup panic for the pipe.
+ */
+enum sde_plane_qos {
+	SDE_PLANE_QOS_VBLANK_CTRL = BIT(0),
+	SDE_PLANE_QOS_VBLANK_AMORTIZE = BIT(1),
+	SDE_PLANE_QOS_PANIC_CTRL = BIT(2),
+};
+
+/*
+ * struct sde_phy_plane - physical plane structure
+ * @sde_plane: Points to virtual plane
+ * @phy_plane_list: list of hw pipe(physical plane)
+ * @index: index of physical plane (starts from 0, order from left to right)
+ * @features: capabilities from catalog
+ * @csc_cfg: Decoded user configuration for csc
+ * @csc_usr_ptr: Points to csc_cfg if valid user config available
+ * @csc_ptr: Points to sde_csc_cfg structure to use for current
+ */
+struct sde_phy_plane {
+	struct sde_plane *sde_plane;
+	struct list_head phy_plane_list;
+	enum sde_sspp pipe;
+	uint32_t index;
+
+	uint32_t features;
+	uint32_t nformats;
+	uint32_t formats[64];
+
+	struct sde_hw_pipe *pipe_hw;
+	struct sde_hw_pipe_cfg pipe_cfg;
+	struct sde_hw_sharp_cfg sharp_cfg;
+	struct sde_hw_scaler3_cfg *scaler3_cfg;
+	struct sde_hw_pipe_qos_cfg pipe_qos_cfg;
+	uint32_t color_fill;
+	bool is_rt_pipe;
+
+	struct sde_hw_pixel_ext pixel_ext;
+	bool pixel_ext_usr;
+
+	struct sde_csc_cfg csc_cfg;
+	struct sde_csc_cfg *csc_usr_ptr;
+	struct sde_csc_cfg *csc_ptr;
+
+	const struct sde_sspp_sub_blks *pipe_sblk;
+};
+
+/*
+ * struct sde_plane - local sde plane structure
+ */
+struct sde_plane {
+	struct drm_plane base;
+
+	struct msm_gem_address_space *aspace;
+	struct mutex lock;
+	bool is_error;
+	char pipe_name[SDE_NAME_SIZE];
+
+	struct list_head phy_plane_head;
+	u32 num_of_phy_planes;
+
+	struct msm_property_info property_info;
+	struct msm_property_data property_data[PLANE_PROP_COUNT];
+	struct drm_property_blob *blob_info;
+
+	/* debugfs related stuff */
+	struct dentry *debugfs_root;
+	struct sde_debugfs_regset32 debugfs_src;
+	struct sde_debugfs_regset32 debugfs_scaler;
+	struct sde_debugfs_regset32 debugfs_csc;
+	bool debugfs_default_scale;
+};
+
+#define to_sde_plane(x) container_of(x, struct sde_plane, base)
+
+static bool sde_plane_enabled(struct drm_plane_state *state)
+{
+	return state && state->fb && state->crtc;
+}
+
+static struct sde_kms *_sde_plane_get_kms(struct drm_plane *plane)
+{
+	struct msm_drm_private *priv;
+
+	if (!plane || !plane->dev)
+		return NULL;
+
+	priv = plane->dev->dev_private;
+	if (!priv)
+		return NULL;
+
+	return to_sde_kms(priv->kms);
+}
+
+/**
+ * _sde_plane_calc_fill_level - calculate fill level of the given source format
+ * @plane:		Pointer to drm plane
+ * @fmt:		Pointer to source buffer format
+ * @src_wdith:		width of source buffer
+ * Return: fill level corresponding to the source buffer/format or 0 if error
+ */
+static inline int _sde_plane_calc_fill_level(struct sde_phy_plane *pp,
+		const struct sde_format *fmt, u32 src_width)
+{
+	struct sde_plane *psde;
+	u32 fixed_buff_size;
+	u32 total_fl;
+
+	if (!pp || !fmt) {
+		SDE_ERROR("invalid arguments\n");
+		return 0;
+	}
+
+	psde = pp->sde_plane;
+	fixed_buff_size = pp->pipe_sblk->pixel_ram_size;
+
+	if (fmt->fetch_planes == SDE_PLANE_PSEUDO_PLANAR) {
+		if (fmt->chroma_sample == SDE_CHROMA_420) {
+			/* NV12 */
+			total_fl = (fixed_buff_size / 2) /
+				((src_width + 32) * fmt->bpp);
+		} else {
+			/* non NV12 */
+			total_fl = (fixed_buff_size) /
+				((src_width + 32) * fmt->bpp);
+		}
+	} else {
+		total_fl = (fixed_buff_size * 2) /
+			((src_width + 32) * fmt->bpp);
+	}
+
+	SDE_DEBUG("plane%u: pnum:%d fmt:%x w:%u fl:%u\n",
+			psde->base.base.id, pp->pipe - SSPP_VIG0,
+			fmt->base.pixel_format, src_width, total_fl);
+
+	return total_fl;
+}
+
+/**
+ * _sde_plane_get_qos_lut_linear - get linear LUT mapping
+ * @total_fl:		fill level
+ * Return: LUT setting corresponding to the fill level
+ */
+static inline u32 _sde_plane_get_qos_lut_linear(u32 total_fl)
+{
+	u32 qos_lut;
+
+	if (total_fl <= 4)
+		qos_lut = 0x1B;
+	else if (total_fl <= 5)
+		qos_lut = 0x5B;
+	else if (total_fl <= 6)
+		qos_lut = 0x15B;
+	else if (total_fl <= 7)
+		qos_lut = 0x55B;
+	else if (total_fl <= 8)
+		qos_lut = 0x155B;
+	else if (total_fl <= 9)
+		qos_lut = 0x555B;
+	else if (total_fl <= 10)
+		qos_lut = 0x1555B;
+	else if (total_fl <= 11)
+		qos_lut = 0x5555B;
+	else if (total_fl <= 12)
+		qos_lut = 0x15555B;
+	else
+		qos_lut = 0x55555B;
+
+	return qos_lut;
+}
+
+/**
+ * _sde_plane_get_qos_lut_macrotile - get macrotile LUT mapping
+ * @total_fl:		fill level
+ * Return: LUT setting corresponding to the fill level
+ */
+static inline u32 _sde_plane_get_qos_lut_macrotile(u32 total_fl)
+{
+	u32 qos_lut;
+
+	if (total_fl <= 10)
+		qos_lut = 0x1AAff;
+	else if (total_fl <= 11)
+		qos_lut = 0x5AAFF;
+	else if (total_fl <= 12)
+		qos_lut = 0x15AAFF;
+	else
+		qos_lut = 0x55AAFF;
+
+	return qos_lut;
+}
+
+/**
+ * _sde_plane_set_qos_lut - set QoS LUT of the given plane
+ * @plane:		Pointer to drm plane
+ * @fb:			Pointer to framebuffer associated with the given plane
+ */
+static void _sde_plane_set_qos_lut(struct sde_phy_plane *pp,
+		struct drm_framebuffer *fb)
+{
+	struct sde_plane *psde;
+	const struct sde_format *fmt = NULL;
+	u32 qos_lut;
+	u32 total_fl = 0;
+
+	if (!pp || !fb) {
+		SDE_ERROR("invalid arguments phy_plane %d fb %d\n",
+				pp != NULL, fb != NULL);
+		return;
+	}
+
+	psde = pp->sde_plane;
+
+	if (!pp->pipe_hw || !pp->pipe_sblk) {
+		SDE_ERROR("invalid arguments\n");
+		return;
+	} else if (!pp->pipe_hw->ops.setup_creq_lut) {
+		return;
+	}
+
+	if (!pp->is_rt_pipe) {
+		qos_lut = pp->pipe_sblk->creq_lut_nrt;
+	} else {
+		fmt = sde_get_sde_format_ext(
+				fb->pixel_format,
+				fb->modifier,
+				drm_format_num_planes(fb->pixel_format));
+		total_fl = _sde_plane_calc_fill_level(pp, fmt,
+				pp->pipe_cfg.src_rect.w);
+
+		if (SDE_FORMAT_IS_LINEAR(fmt))
+			qos_lut = _sde_plane_get_qos_lut_linear(total_fl);
+		else
+			qos_lut = _sde_plane_get_qos_lut_macrotile(total_fl);
+	}
+
+	pp->pipe_qos_cfg.creq_lut = qos_lut;
+
+	trace_sde_perf_set_qos_luts(pp->pipe - SSPP_VIG0,
+			(fmt) ? fmt->base.pixel_format : 0,
+			pp->is_rt_pipe, total_fl, qos_lut,
+			(fmt) ? SDE_FORMAT_IS_LINEAR(fmt) : 0);
+
+	SDE_DEBUG("plane%u: pnum:%d fmt:%x rt:%d fl:%u lut:0x%x\n",
+			psde->base.base.id,
+			pp->pipe - SSPP_VIG0,
+			(fmt) ? fmt->base.pixel_format : 0,
+			pp->is_rt_pipe, total_fl, qos_lut);
+
+	pp->pipe_hw->ops.setup_creq_lut(pp->pipe_hw, &pp->pipe_qos_cfg);
+}
+
+/**
+ * _sde_plane_set_panic_lut - set danger/safe LUT of the given plane
+ * @plane:		Pointer to drm plane
+ * @fb:			Pointer to framebuffer associated with the given plane
+ */
+static void _sde_plane_set_danger_lut(struct sde_phy_plane *pp,
+		struct drm_framebuffer *fb)
+{
+	struct sde_plane *psde;
+	const struct sde_format *fmt = NULL;
+	u32 danger_lut, safe_lut;
+
+	if (!pp || !fb) {
+		SDE_ERROR("invalid arguments\n");
+		return;
+	}
+
+	psde = pp->sde_plane;
+
+	if (!pp->pipe_hw || !pp->pipe_sblk) {
+		SDE_ERROR("invalid arguments\n");
+		return;
+	} else if (!pp->pipe_hw->ops.setup_danger_safe_lut) {
+		return;
+	}
+
+	if (!pp->is_rt_pipe) {
+		danger_lut = pp->pipe_sblk->danger_lut_nrt;
+		safe_lut = pp->pipe_sblk->safe_lut_nrt;
+	} else {
+		fmt = sde_get_sde_format_ext(
+				fb->pixel_format,
+				fb->modifier,
+				drm_format_num_planes(fb->pixel_format));
+
+		if (SDE_FORMAT_IS_LINEAR(fmt)) {
+			danger_lut = pp->pipe_sblk->danger_lut_linear;
+			safe_lut = pp->pipe_sblk->safe_lut_linear;
+		} else {
+			danger_lut = pp->pipe_sblk->danger_lut_tile;
+			safe_lut = pp->pipe_sblk->safe_lut_tile;
+		}
+	}
+
+	pp->pipe_qos_cfg.danger_lut = danger_lut;
+	pp->pipe_qos_cfg.safe_lut = safe_lut;
+
+	trace_sde_perf_set_danger_luts(pp->pipe - SSPP_VIG0,
+			(fmt) ? fmt->base.pixel_format : 0,
+			(fmt) ? fmt->fetch_mode : 0,
+			pp->pipe_qos_cfg.danger_lut,
+			pp->pipe_qos_cfg.safe_lut);
+
+	SDE_DEBUG("plane%u: pnum:%d fmt:%x mode:%d luts[0x%x, 0x%x]\n",
+		psde->base.base.id,
+		pp->pipe - SSPP_VIG0,
+		fmt ? fmt->base.pixel_format : 0,
+		fmt ? fmt->fetch_mode : -1,
+		pp->pipe_qos_cfg.danger_lut,
+		pp->pipe_qos_cfg.safe_lut);
+
+	pp->pipe_hw->ops.setup_danger_safe_lut(pp->pipe_hw,
+			&pp->pipe_qos_cfg);
+}
+
+/**
+ * _sde_plane_set_qos_ctrl - set QoS control of the given plane
+ * @plane:		Pointer to drm plane
+ * @enable:		true to enable QoS control
+ * @flags:		QoS control mode (enum sde_plane_qos)
+ */
+static void _sde_plane_set_qos_ctrl(struct sde_phy_plane *pp,
+	bool enable, u32 flags)
+{
+	struct sde_plane *psde;
+
+	if (!pp) {
+		SDE_ERROR("invalid arguments\n");
+		return;
+	}
+
+	psde = pp->sde_plane;
+
+	if (!pp->pipe_hw || !pp->pipe_sblk) {
+		SDE_ERROR("invalid arguments\n");
+		return;
+	} else if (!pp->pipe_hw->ops.setup_qos_ctrl) {
+		return;
+	}
+
+	if (flags & SDE_PLANE_QOS_VBLANK_CTRL) {
+		pp->pipe_qos_cfg.creq_vblank = pp->pipe_sblk->creq_vblank;
+		pp->pipe_qos_cfg.danger_vblank =
+				pp->pipe_sblk->danger_vblank;
+		pp->pipe_qos_cfg.vblank_en = enable;
+	}
+
+	 if (flags & SDE_PLANE_QOS_VBLANK_AMORTIZE) {
+		/* this feature overrules previous VBLANK_CTRL */
+		pp->pipe_qos_cfg.vblank_en = false;
+		pp->pipe_qos_cfg.creq_vblank = 0; /* clear vblank bits */
+	}
+
+	if (flags & SDE_PLANE_QOS_PANIC_CTRL)
+		pp->pipe_qos_cfg.danger_safe_en = enable;
+
+	if (!pp->is_rt_pipe) {
+		pp->pipe_qos_cfg.vblank_en = false;
+		pp->pipe_qos_cfg.danger_safe_en = false;
+	}
+
+	SDE_DEBUG("plane%u: pnum:%d ds:%d vb:%d pri[0x%x, 0x%x] is_rt:%d\n",
+		psde->base.base.id,
+		pp->pipe - SSPP_VIG0,
+		pp->pipe_qos_cfg.danger_safe_en,
+		pp->pipe_qos_cfg.vblank_en,
+		pp->pipe_qos_cfg.creq_vblank,
+		pp->pipe_qos_cfg.danger_vblank,
+		pp->is_rt_pipe);
+
+	pp->pipe_hw->ops.setup_qos_ctrl(pp->pipe_hw,
+			&pp->pipe_qos_cfg);
+}
+
+static int sde_plane_danger_signal_ctrl(struct sde_phy_plane *pp, bool enable)
+{
+	struct sde_plane *psde;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+
+	if (!pp) {
+		SDE_ERROR("invalid arguments\n");
+		return -EINVAL;
+	}
+	psde = pp->sde_plane;
+
+	if (!psde->base.dev) {
+		SDE_ERROR("invalid arguments\n");
+		return -EINVAL;
+	}
+
+	priv = psde->base.dev->dev_private;
+	if (!priv || !priv->kms) {
+		SDE_ERROR("invalid KMS reference\n");
+		return -EINVAL;
+	}
+
+	sde_kms = to_sde_kms(priv->kms);
+
+	if (!pp->is_rt_pipe)
+		goto end;
+
+	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+
+	_sde_plane_set_qos_ctrl(pp, enable, SDE_PLANE_QOS_PANIC_CTRL);
+
+	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+
+end:
+	return 0;
+}
+
+/**
+ * _sde_plane_set_ot_limit - set OT limit for the given plane
+ * @plane:		Pointer to drm plane
+ * @crtc:		Pointer to drm crtc
+ */
+static void _sde_plane_set_ot_limit(struct sde_phy_plane *pp,
+		struct drm_crtc *crtc)
+{
+	struct sde_plane *psde;
+	struct sde_vbif_set_ot_params ot_params;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+
+	if (!pp || !crtc) {
+		SDE_ERROR("invalid arguments phy_plane %d crtc %d\n",
+				pp != NULL, crtc != NULL);
+		return;
+	}
+	psde = pp->sde_plane;
+	if (!psde->base.dev) {
+		SDE_ERROR("invalid DRM device\n");
+		return;
+	}
+
+	priv = psde->base.dev->dev_private;
+	if (!priv || !priv->kms) {
+		SDE_ERROR("invalid KMS reference\n");
+		return;
+	}
+
+	sde_kms = to_sde_kms(priv->kms);
+	if (!pp->pipe_hw) {
+		SDE_ERROR("invalid pipe reference\n");
+		return;
+	}
+
+	memset(&ot_params, 0, sizeof(ot_params));
+	ot_params.xin_id = pp->pipe_hw->cap->xin_id;
+	ot_params.num = pp->pipe_hw->idx - SSPP_NONE;
+	ot_params.width = pp->pipe_cfg.src_rect.w;
+	ot_params.height = pp->pipe_cfg.src_rect.h;
+	ot_params.is_wfd = !pp->is_rt_pipe;
+	ot_params.frame_rate = crtc->mode.vrefresh;
+	ot_params.vbif_idx = VBIF_RT;
+	ot_params.clk_ctrl = pp->pipe_hw->cap->clk_ctrl;
+	ot_params.rd = true;
+
+	sde_vbif_set_ot_limit(sde_kms, &ot_params);
+}
+
+/* helper to update a state's input fence pointer from the property */
+static void _sde_plane_set_input_fence(struct sde_plane *psde,
+		struct sde_plane_state *pstate, uint64_t fd)
+{
+	if (!psde || !pstate) {
+		SDE_ERROR("invalid arg(s), plane %d state %d\n",
+				psde != 0, pstate != 0);
+		return;
+	}
+
+	/* clear previous reference */
+	if (pstate->input_fence)
+		sde_sync_put(pstate->input_fence);
+
+	/* get fence pointer for later */
+	pstate->input_fence = sde_sync_get(fd);
+
+	SDE_DEBUG_PLANE(psde, "0x%llX\n", fd);
+}
+
+int sde_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms)
+{
+	struct sde_plane *psde;
+	struct sde_plane_state *pstate;
+	uint32_t prefix;
+	void *input_fence;
+	int ret = -EINVAL;
+
+	if (!plane) {
+		SDE_ERROR("invalid plane\n");
+	} else if (!plane->state) {
+		SDE_ERROR_PLANE(to_sde_plane(plane), "invalid state\n");
+	} else {
+		psde = to_sde_plane(plane);
+		pstate = to_sde_plane_state(plane->state);
+		input_fence = pstate->input_fence;
+
+		if (input_fence) {
+			prefix = sde_sync_get_name_prefix(input_fence);
+			ret = sde_sync_wait(input_fence, wait_ms);
+
+			SDE_EVT32(DRMID(plane), -ret, prefix);
+
+			switch (ret) {
+			case 0:
+				SDE_DEBUG_PLANE(psde, "signaled\n");
+				break;
+			case -ETIME:
+				SDE_ERROR_PLANE(psde, "%ums timeout on %08X\n",
+						wait_ms, prefix);
+				psde->is_error = true;
+				break;
+			default:
+				SDE_ERROR_PLANE(psde, "error %d on %08X\n",
+						ret, prefix);
+				psde->is_error = true;
+				break;
+			}
+		} else {
+			ret = 0;
+		}
+	}
+	return ret;
+}
+
+/**
+ * _sde_plane_get_aspace: gets the address space based on the
+ *            fb_translation mode property
+ */
+static int _sde_plane_get_aspace(
+		struct sde_plane *psde,
+		struct sde_plane_state *pstate,
+		struct msm_gem_address_space **aspace)
+{
+	struct sde_kms *kms;
+	int mode;
+
+	if (!psde || !pstate || !aspace) {
+		SDE_ERROR("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	kms = _sde_plane_get_kms(&psde->base);
+	if (!kms) {
+		SDE_ERROR("invalid kms\n");
+		return -EINVAL;
+	}
+
+	mode = sde_plane_get_property(pstate,
+			PLANE_PROP_FB_TRANSLATION_MODE);
+
+	switch (mode) {
+	case SDE_DRM_FB_NON_SEC:
+		*aspace = kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
+		if (!aspace)
+			return -EINVAL;
+		break;
+	case SDE_DRM_FB_SEC:
+		*aspace = kms->aspace[MSM_SMMU_DOMAIN_SECURE];
+		if (!aspace)
+			return -EINVAL;
+		break;
+	case SDE_DRM_FB_SEC_DIR_TRANS:
+	case SDE_DRM_FB_NON_SEC_DIR_TRANS:
+		*aspace = NULL;
+		break;
+	default:
+		SDE_ERROR("invalid fb_translation mode:%d\n", mode);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static inline void _sde_plane_set_scanout(struct sde_phy_plane *pp,
+		struct sde_plane_state *pstate,
+		struct sde_hw_pipe_cfg *pipe_cfg,
+		struct drm_framebuffer *fb)
+{
+	struct sde_plane *psde;
+	struct msm_gem_address_space *aspace = NULL;
+	int ret;
+
+	if (!pp || !pstate || !pipe_cfg || !fb) {
+		SDE_ERROR(
+			"invalid arg(s), phy_plane %d state %d cfg %d fb %d\n",
+			pp != 0, pstate != 0, pipe_cfg != 0, fb != 0);
+		return;
+	}
+
+	psde = pp->sde_plane;
+	if (!pp->pipe_hw) {
+		SDE_ERROR_PLANE(psde, "invalid pipe_hw\n");
+		return;
+	}
+
+	ret = _sde_plane_get_aspace(psde, pstate, &aspace);
+	if (ret) {
+		SDE_ERROR_PLANE(psde, "Failed to get aspace %d\n", ret);
+		return;
+	}
+
+	ret = sde_format_populate_layout(aspace, fb, &pipe_cfg->layout);
+	if (ret == -EAGAIN)
+		SDE_DEBUG_PLANE(psde, "not updating same src addrs\n");
+	else if (ret)
+		SDE_ERROR_PLANE(psde, "failed to get format layout, %d\n", ret);
+	else if (pp->pipe_hw && pp->pipe_hw->ops.setup_sourceaddress)
+		pp->pipe_hw->ops.setup_sourceaddress(pp->pipe_hw, pipe_cfg);
+}
+
+static int _sde_plane_setup_scaler3_lut(struct sde_phy_plane *pp,
+		struct sde_plane_state *pstate)
+{
+	struct sde_plane *psde = pp->sde_plane;
+	struct sde_hw_scaler3_cfg *cfg;
+	int ret = 0;
+
+	if (!pp || !pp->scaler3_cfg) {
+		SDE_ERROR("invalid args\n");
+		return -EINVAL;
+	} else if (!pstate) {
+		/* pstate is expected to be null on forced color fill */
+		SDE_DEBUG("null pstate\n");
+		return -EINVAL;
+	}
+
+	cfg = pp->scaler3_cfg;
+
+	cfg->dir_lut = msm_property_get_blob(
+			&psde->property_info,
+			pstate->property_blobs, &cfg->dir_len,
+			PLANE_PROP_SCALER_LUT_ED);
+	cfg->cir_lut = msm_property_get_blob(
+			&psde->property_info,
+			pstate->property_blobs, &cfg->cir_len,
+			PLANE_PROP_SCALER_LUT_CIR);
+	cfg->sep_lut = msm_property_get_blob(
+			&psde->property_info,
+			pstate->property_blobs, &cfg->sep_len,
+			PLANE_PROP_SCALER_LUT_SEP);
+	if (!cfg->dir_lut || !cfg->cir_lut || !cfg->sep_lut)
+		ret = -ENODATA;
+	return ret;
+}
+
+static void _sde_plane_setup_scaler3(struct sde_phy_plane *pp,
+		uint32_t src_w, uint32_t src_h, uint32_t dst_w, uint32_t dst_h,
+		struct sde_hw_scaler3_cfg *scale_cfg,
+		const struct sde_format *fmt,
+		uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v)
+{
+	uint32_t decimated, i;
+
+	if (!pp || !scale_cfg || !fmt || !chroma_subsmpl_h ||
+			!chroma_subsmpl_v) {
+		SDE_ERROR("psde %pK scale_cfg %pK fmt %pK smp_h %d smp_v %d\n"
+			, pp, scale_cfg, fmt, chroma_subsmpl_h,
+			chroma_subsmpl_v);
+		return;
+	}
+
+	memset(scale_cfg, 0, sizeof(*scale_cfg));
+	memset(&pp->pixel_ext, 0, sizeof(struct sde_hw_pixel_ext));
+
+	decimated = DECIMATED_DIMENSION(src_w,
+			pp->pipe_cfg.horz_decimation);
+	scale_cfg->phase_step_x[SDE_SSPP_COMP_0] =
+		mult_frac((1 << PHASE_STEP_SHIFT), decimated, dst_w);
+	decimated = DECIMATED_DIMENSION(src_h,
+			pp->pipe_cfg.vert_decimation);
+	scale_cfg->phase_step_y[SDE_SSPP_COMP_0] =
+		mult_frac((1 << PHASE_STEP_SHIFT), decimated, dst_h);
+
+
+	scale_cfg->phase_step_y[SDE_SSPP_COMP_1_2] =
+		scale_cfg->phase_step_y[SDE_SSPP_COMP_0] / chroma_subsmpl_v;
+	scale_cfg->phase_step_x[SDE_SSPP_COMP_1_2] =
+		scale_cfg->phase_step_x[SDE_SSPP_COMP_0] / chroma_subsmpl_h;
+
+	scale_cfg->phase_step_x[SDE_SSPP_COMP_2] =
+		scale_cfg->phase_step_x[SDE_SSPP_COMP_1_2];
+	scale_cfg->phase_step_y[SDE_SSPP_COMP_2] =
+		scale_cfg->phase_step_y[SDE_SSPP_COMP_1_2];
+
+	scale_cfg->phase_step_x[SDE_SSPP_COMP_3] =
+		scale_cfg->phase_step_x[SDE_SSPP_COMP_0];
+	scale_cfg->phase_step_y[SDE_SSPP_COMP_3] =
+		scale_cfg->phase_step_y[SDE_SSPP_COMP_0];
+
+	for (i = 0; i < SDE_MAX_PLANES; i++) {
+		scale_cfg->src_width[i] = DECIMATED_DIMENSION(src_w,
+				pp->pipe_cfg.horz_decimation);
+		scale_cfg->src_height[i] = DECIMATED_DIMENSION(src_h,
+				pp->pipe_cfg.vert_decimation);
+		if (SDE_FORMAT_IS_YUV(fmt))
+			scale_cfg->src_width[i] &= ~0x1;
+		if (i == SDE_SSPP_COMP_1_2 || i == SDE_SSPP_COMP_2) {
+			scale_cfg->src_width[i] /= chroma_subsmpl_h;
+			scale_cfg->src_height[i] /= chroma_subsmpl_v;
+		}
+		scale_cfg->preload_x[i] = SDE_QSEED3_DEFAULT_PRELOAD_H;
+		scale_cfg->preload_y[i] = SDE_QSEED3_DEFAULT_PRELOAD_V;
+		pp->pixel_ext.num_ext_pxls_top[i] =
+			scale_cfg->src_height[i];
+		pp->pixel_ext.num_ext_pxls_left[i] =
+			scale_cfg->src_width[i];
+	}
+	if (!(SDE_FORMAT_IS_YUV(fmt)) && (src_h == dst_h)
+		&& (src_w == dst_w))
+		return;
+
+	scale_cfg->dst_width = dst_w;
+	scale_cfg->dst_height = dst_h;
+	scale_cfg->y_rgb_filter_cfg = SDE_SCALE_BIL;
+	scale_cfg->uv_filter_cfg = SDE_SCALE_BIL;
+	scale_cfg->alpha_filter_cfg = SDE_SCALE_ALPHA_BIL;
+	scale_cfg->lut_flag = 0;
+	scale_cfg->blend_cfg = 1;
+	scale_cfg->enable = 1;
+}
+
+/**
+ * _sde_plane_setup_scaler2 - determine default scaler phase steps/filter type
+ * @psde: Pointer to SDE plane object
+ * @src: Source size
+ * @dst: Destination size
+ * @phase_steps: Pointer to output array for phase steps
+ * @filter: Pointer to output array for filter type
+ * @fmt: Pointer to format definition
+ * @chroma_subsampling: Subsampling amount for chroma channel
+ *
+ * Returns: 0 on success
+ */
+static int _sde_plane_setup_scaler2(struct sde_plane *psde,
+		uint32_t src, uint32_t dst, uint32_t *phase_steps,
+		enum sde_hw_filter *filter, const struct sde_format *fmt,
+		uint32_t chroma_subsampling)
+{
+	if (!psde || !phase_steps || !filter || !fmt) {
+		SDE_ERROR(
+			"invalid arg(s), plane %d phase %d filter %d fmt %d\n",
+			psde != 0, phase_steps != 0, filter != 0, fmt != 0);
+		return -EINVAL;
+	}
+
+	/* calculate phase steps, leave init phase as zero */
+	phase_steps[SDE_SSPP_COMP_0] =
+		mult_frac(1 << PHASE_STEP_SHIFT, src, dst);
+	phase_steps[SDE_SSPP_COMP_1_2] =
+		phase_steps[SDE_SSPP_COMP_0] / chroma_subsampling;
+	phase_steps[SDE_SSPP_COMP_2] = phase_steps[SDE_SSPP_COMP_1_2];
+	phase_steps[SDE_SSPP_COMP_3] = phase_steps[SDE_SSPP_COMP_0];
+
+	/* calculate scaler config, if necessary */
+	if (SDE_FORMAT_IS_YUV(fmt) || src != dst) {
+		filter[SDE_SSPP_COMP_3] =
+			(src <= dst) ? SDE_SCALE_FILTER_BIL :
+			SDE_SCALE_FILTER_PCMN;
+
+		if (SDE_FORMAT_IS_YUV(fmt)) {
+			filter[SDE_SSPP_COMP_0] = SDE_SCALE_FILTER_CA;
+			filter[SDE_SSPP_COMP_1_2] = filter[SDE_SSPP_COMP_3];
+		} else {
+			filter[SDE_SSPP_COMP_0] = filter[SDE_SSPP_COMP_3];
+			filter[SDE_SSPP_COMP_1_2] =
+				SDE_SCALE_FILTER_NEAREST;
+		}
+	} else {
+		/* disable scaler */
+		filter[SDE_SSPP_COMP_0] = SDE_SCALE_FILTER_MAX;
+		filter[SDE_SSPP_COMP_1_2] = SDE_SCALE_FILTER_MAX;
+		filter[SDE_SSPP_COMP_3] = SDE_SCALE_FILTER_MAX;
+	}
+	return 0;
+}
+
+/**
+ * _sde_plane_setup_pixel_ext - determine default pixel extension values
+ * @psde: Pointer to SDE plane object
+ * @src: Source size
+ * @dst: Destination size
+ * @decimated_src: Source size after decimation, if any
+ * @phase_steps: Pointer to output array for phase steps
+ * @out_src: Output array for pixel extension values
+ * @out_edge1: Output array for pixel extension first edge
+ * @out_edge2: Output array for pixel extension second edge
+ * @filter: Pointer to array for filter type
+ * @fmt: Pointer to format definition
+ * @chroma_subsampling: Subsampling amount for chroma channel
+ * @post_compare: Whether to chroma subsampled source size for comparisions
+ */
+static void _sde_plane_setup_pixel_ext(struct sde_plane *psde,
+		uint32_t src, uint32_t dst, uint32_t decimated_src,
+		uint32_t *phase_steps, uint32_t *out_src, int *out_edge1,
+		int *out_edge2, enum sde_hw_filter *filter,
+		const struct sde_format *fmt, uint32_t chroma_subsampling,
+		bool post_compare)
+{
+	int64_t edge1, edge2, caf;
+	uint32_t src_work;
+	int i, tmp;
+
+	if (psde && phase_steps && out_src && out_edge1 &&
+			out_edge2 && filter && fmt) {
+		/* handle CAF for YUV formats */
+		if (SDE_FORMAT_IS_YUV(fmt) && *filter == SDE_SCALE_FILTER_CA)
+			caf = PHASE_STEP_UNIT_SCALE;
+		else
+			caf = 0;
+
+		for (i = 0; i < SDE_MAX_PLANES; i++) {
+			src_work = decimated_src;
+			if (i == SDE_SSPP_COMP_1_2 || i == SDE_SSPP_COMP_2)
+				src_work /= chroma_subsampling;
+			if (post_compare)
+				src = src_work;
+			if (!SDE_FORMAT_IS_YUV(fmt) && (src == dst)) {
+				/* unity */
+				edge1 = 0;
+				edge2 = 0;
+			} else if (dst >= src) {
+				/* upscale */
+				edge1 = (1 << PHASE_RESIDUAL);
+				edge1 -= caf;
+				edge2 = (1 << PHASE_RESIDUAL);
+				edge2 += (dst - 1) * *(phase_steps + i);
+				edge2 -= (src_work - 1) * PHASE_STEP_UNIT_SCALE;
+				edge2 += caf;
+				edge2 = -(edge2);
+			} else {
+				/* downscale */
+				edge1 = 0;
+				edge2 = (dst - 1) * *(phase_steps + i);
+				edge2 -= (src_work - 1) * PHASE_STEP_UNIT_SCALE;
+				edge2 += *(phase_steps + i);
+				edge2 = -(edge2);
+			}
+
+			/* only enable CAF for luma plane */
+			caf = 0;
+
+			/* populate output arrays */
+			*(out_src + i) = src_work;
+
+			/* edge updates taken from __pxl_extn_helper */
+			if (edge1 >= 0) {
+				tmp = (uint32_t)edge1;
+				tmp >>= PHASE_STEP_SHIFT;
+				*(out_edge1 + i) = -tmp;
+			} else {
+				tmp = (uint32_t)(-edge1);
+				*(out_edge1 + i) =
+					(tmp + PHASE_STEP_UNIT_SCALE - 1) >>
+					PHASE_STEP_SHIFT;
+			}
+			if (edge2 >= 0) {
+				tmp = (uint32_t)edge2;
+				tmp >>= PHASE_STEP_SHIFT;
+				*(out_edge2 + i) = -tmp;
+			} else {
+				tmp = (uint32_t)(-edge2);
+				*(out_edge2 + i) =
+					(tmp + PHASE_STEP_UNIT_SCALE - 1) >>
+					PHASE_STEP_SHIFT;
+			}
+		}
+	}
+}
+
+static inline void _sde_plane_setup_csc(struct sde_phy_plane *pp)
+{
+	static const struct sde_csc_cfg sde_csc_YUV2RGB_601L = {
+		{
+			/* S15.16 format */
+			0x00012A00, 0x00000000, 0x00019880,
+			0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+			0x00012A00, 0x00020480, 0x00000000,
+		},
+		/* signed bias */
+		{ 0xfff0, 0xff80, 0xff80,},
+		{ 0x0, 0x0, 0x0,},
+		/* unsigned clamp */
+		{ 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
+		{ 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,},
+	};
+	static const struct sde_csc_cfg sde_csc10_YUV2RGB_601L = {
+		{
+			/* S15.16 format */
+			0x00012A00, 0x00000000, 0x00019880,
+			0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+			0x00012A00, 0x00020480, 0x00000000,
+			},
+		/* signed bias */
+		{ 0xffc0, 0xfe00, 0xfe00,},
+		{ 0x0, 0x0, 0x0,},
+		/* unsigned clamp */
+		{ 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
+		{ 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,},
+	};
+
+	struct sde_plane *psde;
+
+	if (!pp) {
+		SDE_ERROR("invalid plane\n");
+		return;
+	}
+	psde = pp->sde_plane;
+
+	/* revert to kernel default if override not available */
+	if (pp->csc_usr_ptr)
+		pp->csc_ptr = pp->csc_usr_ptr;
+	else if (BIT(SDE_SSPP_CSC_10BIT) & pp->features)
+		pp->csc_ptr = (struct sde_csc_cfg *)&sde_csc10_YUV2RGB_601L;
+	else
+		pp->csc_ptr = (struct sde_csc_cfg *)&sde_csc_YUV2RGB_601L;
+
+	SDE_DEBUG_PLANE(psde, "using 0x%X 0x%X 0x%X...\n",
+			pp->csc_ptr->csc_mv[0],
+			pp->csc_ptr->csc_mv[1],
+			pp->csc_ptr->csc_mv[2]);
+}
+
+static void sde_color_process_plane_setup(struct drm_plane *plane,
+	struct sde_phy_plane *pp)
+{
+	struct sde_plane *psde;
+	struct sde_plane_state *pstate;
+	uint32_t hue, saturation, value, contrast;
+	struct drm_msm_memcol *memcol = NULL;
+	size_t memcol_sz = 0;
+
+	psde = pp->sde_plane;
+	pstate = to_sde_plane_state(plane->state);
+
+	hue = (uint32_t) sde_plane_get_property(pstate, PLANE_PROP_HUE_ADJUST);
+	if (pp->pipe_hw->ops.setup_pa_hue)
+		pp->pipe_hw->ops.setup_pa_hue(pp->pipe_hw, &hue);
+	saturation = (uint32_t) sde_plane_get_property(pstate,
+		PLANE_PROP_SATURATION_ADJUST);
+	if (pp->pipe_hw->ops.setup_pa_sat)
+		pp->pipe_hw->ops.setup_pa_sat(pp->pipe_hw, &saturation);
+	value = (uint32_t) sde_plane_get_property(pstate,
+		PLANE_PROP_VALUE_ADJUST);
+	if (pp->pipe_hw->ops.setup_pa_val)
+		pp->pipe_hw->ops.setup_pa_val(pp->pipe_hw, &value);
+	contrast = (uint32_t) sde_plane_get_property(pstate,
+		PLANE_PROP_CONTRAST_ADJUST);
+	if (pp->pipe_hw->ops.setup_pa_cont)
+		pp->pipe_hw->ops.setup_pa_cont(pp->pipe_hw, &contrast);
+
+	if (pp->pipe_hw->ops.setup_pa_memcolor) {
+		/* Skin memory color setup */
+		memcol = msm_property_get_blob(&psde->property_info,
+					pstate->property_blobs,
+					&memcol_sz,
+					PLANE_PROP_SKIN_COLOR);
+		pp->pipe_hw->ops.setup_pa_memcolor(pp->pipe_hw,
+					MEMCOLOR_SKIN, memcol);
+
+		/* Sky memory color setup */
+		memcol = msm_property_get_blob(&psde->property_info,
+					pstate->property_blobs,
+					&memcol_sz,
+					PLANE_PROP_SKY_COLOR);
+		pp->pipe_hw->ops.setup_pa_memcolor(pp->pipe_hw,
+					MEMCOLOR_SKY, memcol);
+
+		/* Foliage memory color setup */
+		memcol = msm_property_get_blob(&psde->property_info,
+					pstate->property_blobs,
+					&memcol_sz,
+					PLANE_PROP_FOLIAGE_COLOR);
+		pp->pipe_hw->ops.setup_pa_memcolor(pp->pipe_hw,
+					MEMCOLOR_FOLIAGE, memcol);
+	}
+}
+
+static void _sde_plane_setup_scaler(struct sde_phy_plane *pp,
+		const struct sde_format *fmt,
+		struct sde_plane_state *pstate)
+{
+	struct sde_hw_pixel_ext *pe;
+	uint32_t chroma_subsmpl_h, chroma_subsmpl_v;
+	struct sde_plane *psde;
+
+	if (!pp || !fmt || !pstate || !pp->sde_plane) {
+		SDE_ERROR("invalid arg(s), phy_plane %d fmt %d\n",
+				pp != NULL, fmt != NULL);
+		return;
+	}
+	psde = pp->sde_plane;
+
+	pe = &(pp->pixel_ext);
+
+	pp->pipe_cfg.horz_decimation =
+		sde_plane_get_property(pstate, PLANE_PROP_H_DECIMATE);
+	pp->pipe_cfg.vert_decimation =
+		sde_plane_get_property(pstate, PLANE_PROP_V_DECIMATE);
+
+	/* don't chroma subsample if decimating */
+	chroma_subsmpl_h = pp->pipe_cfg.horz_decimation ? 1 :
+		drm_format_horz_chroma_subsampling(fmt->base.pixel_format);
+	chroma_subsmpl_v = pp->pipe_cfg.vert_decimation ? 1 :
+		drm_format_vert_chroma_subsampling(fmt->base.pixel_format);
+
+	/* update scaler */
+	if (pp->features & BIT(SDE_SSPP_SCALER_QSEED3)) {
+		int error;
+
+		error = _sde_plane_setup_scaler3_lut(pp, pstate);
+		if (error || !pp->pixel_ext_usr ||
+				psde->debugfs_default_scale) {
+			memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
+			/* calculate default config for QSEED3 */
+			_sde_plane_setup_scaler3(pp,
+					pp->pipe_cfg.src_rect.w,
+					pp->pipe_cfg.src_rect.h,
+					pp->pipe_cfg.dst_rect.w,
+					pp->pipe_cfg.dst_rect.h,
+					pp->scaler3_cfg, fmt,
+					chroma_subsmpl_h, chroma_subsmpl_v);
+		}
+	} else if (!pp->pixel_ext_usr || !pstate ||
+			psde->debugfs_default_scale) {
+		uint32_t deci_dim, i;
+
+		/* calculate default configuration for QSEED2 */
+		memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
+
+		SDE_DEBUG_PLANE(psde, "default config\n");
+		deci_dim = DECIMATED_DIMENSION(pp->pipe_cfg.src_rect.w,
+				pp->pipe_cfg.horz_decimation);
+		_sde_plane_setup_scaler2(psde,
+				deci_dim,
+				pp->pipe_cfg.dst_rect.w,
+				pe->phase_step_x,
+				pe->horz_filter, fmt, chroma_subsmpl_h);
+
+		if (SDE_FORMAT_IS_YUV(fmt))
+			deci_dim &= ~0x1;
+		_sde_plane_setup_pixel_ext(psde, pp->pipe_cfg.src_rect.w,
+				pp->pipe_cfg.dst_rect.w, deci_dim,
+				pe->phase_step_x,
+				pe->roi_w,
+				pe->num_ext_pxls_left,
+				pe->num_ext_pxls_right, pe->horz_filter, fmt,
+				chroma_subsmpl_h, 0);
+
+		deci_dim = DECIMATED_DIMENSION(pp->pipe_cfg.src_rect.h,
+				pp->pipe_cfg.vert_decimation);
+		_sde_plane_setup_scaler2(psde,
+				deci_dim,
+				pp->pipe_cfg.dst_rect.h,
+				pe->phase_step_y,
+				pe->vert_filter, fmt, chroma_subsmpl_v);
+		_sde_plane_setup_pixel_ext(psde, pp->pipe_cfg.src_rect.h,
+				pp->pipe_cfg.dst_rect.h, deci_dim,
+				pe->phase_step_y,
+				pe->roi_h,
+				pe->num_ext_pxls_top,
+				pe->num_ext_pxls_btm, pe->vert_filter, fmt,
+				chroma_subsmpl_v, 1);
+
+		for (i = 0; i < SDE_MAX_PLANES; i++) {
+			if (pe->num_ext_pxls_left[i] >= 0)
+				pe->left_rpt[i] = pe->num_ext_pxls_left[i];
+			else
+				pe->left_ftch[i] = pe->num_ext_pxls_left[i];
+
+			if (pe->num_ext_pxls_right[i] >= 0)
+				pe->right_rpt[i] = pe->num_ext_pxls_right[i];
+			else
+				pe->right_ftch[i] = pe->num_ext_pxls_right[i];
+
+			if (pe->num_ext_pxls_top[i] >= 0)
+				pe->top_rpt[i] = pe->num_ext_pxls_top[i];
+			else
+				pe->top_ftch[i] = pe->num_ext_pxls_top[i];
+
+			if (pe->num_ext_pxls_btm[i] >= 0)
+				pe->btm_rpt[i] = pe->num_ext_pxls_btm[i];
+			else
+				pe->btm_ftch[i] = pe->num_ext_pxls_btm[i];
+		}
+	}
+}
+
+/**
+ * _sde_plane_color_fill - enables color fill on plane
+ * @psde:   Pointer to SDE plane object
+ * @color:  RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
+ * @alpha:  8-bit fill alpha value, 255 selects 100% alpha
+ * Returns: 0 on success
+ */
+static int _sde_plane_color_fill(struct sde_phy_plane *pp,
+		uint32_t color, uint32_t alpha)
+{
+	const struct sde_format *fmt;
+
+	if (!pp) {
+		SDE_ERROR("invalid plane\n");
+		return -EINVAL;
+	}
+
+	if (!pp->pipe_hw) {
+		SDE_ERROR_PLANE(pp->sde_plane, "invalid plane h/w pointer\n");
+		return -EINVAL;
+	}
+
+	SDE_DEBUG_PLANE(pp->sde_plane, "\n");
+
+	/*
+	 * select fill format to match user property expectation,
+	 * h/w only supports RGB variants
+	 */
+	fmt = sde_get_sde_format(DRM_FORMAT_ABGR8888);
+
+	/* update sspp */
+	if (fmt && pp->pipe_hw->ops.setup_solidfill) {
+		pp->pipe_hw->ops.setup_solidfill(pp->pipe_hw,
+				(color & 0xFFFFFF) | ((alpha & 0xFF) << 24));
+
+		/* override scaler/decimation if solid fill */
+		pp->pipe_cfg.src_rect.x = 0;
+		pp->pipe_cfg.src_rect.y = 0;
+		pp->pipe_cfg.src_rect.w = pp->pipe_cfg.dst_rect.w;
+		pp->pipe_cfg.src_rect.h = pp->pipe_cfg.dst_rect.h;
+
+		_sde_plane_setup_scaler(pp, fmt, NULL);
+
+		if (pp->pipe_hw->ops.setup_format)
+			pp->pipe_hw->ops.setup_format(pp->pipe_hw,
+					fmt, SDE_SSPP_SOLID_FILL);
+
+		if (pp->pipe_hw->ops.setup_rects)
+			pp->pipe_hw->ops.setup_rects(pp->pipe_hw,
+					&pp->pipe_cfg, &pp->pixel_ext,
+					pp->scaler3_cfg);
+	}
+
+	return 0;
+}
+
+static int _sde_plane_mode_set(struct drm_plane *plane,
+		struct drm_plane_state *state)
+{
+	uint32_t nplanes, src_flags = 0x0;
+	struct sde_plane *psde;
+	struct sde_plane_state *pstate;
+	const struct sde_format *fmt;
+	struct drm_crtc *crtc;
+	struct drm_framebuffer *fb;
+	struct sde_rect src, dst;
+	bool q16_data = true;
+	int idx;
+	struct sde_phy_plane *pp;
+	uint32_t num_of_phy_planes = 0;
+	int mode = 0;
+	uint32_t crtc_split_width;
+	bool is_across_mixer_boundary  = false;
+
+	if (!plane) {
+		SDE_ERROR("invalid plane\n");
+		return -EINVAL;
+	} else if (!plane->state) {
+		SDE_ERROR("invalid plane state\n");
+		return -EINVAL;
+	}
+
+	psde = to_sde_plane(plane);
+	pstate = to_sde_plane_state(plane->state);
+
+	crtc = state->crtc;
+	crtc_split_width = get_crtc_split_width(crtc);
+	fb = state->fb;
+	if (!crtc || !fb) {
+		SDE_ERROR_PLANE(psde, "invalid crtc %d or fb %d\n",
+				crtc != 0, fb != 0);
+		return -EINVAL;
+	}
+	fmt = to_sde_format(msm_framebuffer_format(fb));
+	nplanes = fmt->num_planes;
+
+	/* determine what needs to be refreshed */
+	while ((idx = msm_property_pop_dirty(&psde->property_info)) >= 0) {
+		switch (idx) {
+		case PLANE_PROP_SCALER_V1:
+		case PLANE_PROP_SCALER_V2:
+		case PLANE_PROP_H_DECIMATE:
+		case PLANE_PROP_V_DECIMATE:
+		case PLANE_PROP_SRC_CONFIG:
+		case PLANE_PROP_ZPOS:
+			pstate->dirty |= SDE_PLANE_DIRTY_RECTS;
+			break;
+		case PLANE_PROP_CSC_V1:
+			pstate->dirty |= SDE_PLANE_DIRTY_FORMAT;
+			break;
+		case PLANE_PROP_COLOR_FILL:
+			/* potentially need to refresh everything */
+			pstate->dirty = SDE_PLANE_DIRTY_ALL;
+			break;
+		case PLANE_PROP_ROTATION:
+			pstate->dirty |= SDE_PLANE_DIRTY_FORMAT;
+			break;
+		case PLANE_PROP_INFO:
+		case PLANE_PROP_ALPHA:
+		case PLANE_PROP_INPUT_FENCE:
+		case PLANE_PROP_BLEND_OP:
+			/* no special action required */
+			break;
+		default:
+			/* unknown property, refresh everything */
+			pstate->dirty |= SDE_PLANE_DIRTY_ALL;
+			SDE_DEBUG("executing full mode set, prp_idx %d\n", idx);
+			break;
+		}
+	}
+
+	list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+		if (pstate->dirty & SDE_PLANE_DIRTY_RECTS)
+			memset(&(pp->pipe_cfg), 0,
+					sizeof(struct sde_hw_pipe_cfg));
+
+		_sde_plane_set_scanout(pp, pstate, &pp->pipe_cfg, fb);
+
+		pstate->pending = true;
+
+		pp->is_rt_pipe = sde_crtc_is_rt(crtc);
+		_sde_plane_set_qos_ctrl(pp, false, SDE_PLANE_QOS_PANIC_CTRL);
+	}
+
+	/* early out if nothing dirty */
+	if (!pstate->dirty)
+		return 0;
+
+	memset(&src, 0, sizeof(struct sde_rect));
+
+	/* update secure session flag */
+	mode = sde_plane_get_property(pstate,
+			PLANE_PROP_FB_TRANSLATION_MODE);
+	if ((mode == SDE_DRM_FB_SEC) ||
+			(mode == SDE_DRM_FB_SEC_DIR_TRANS))
+		src_flags |= SDE_SSPP_SECURE_OVERLAY_SESSION;
+
+
+	/* update roi config */
+	if (pstate->dirty & SDE_PLANE_DIRTY_RECTS) {
+		POPULATE_RECT(&src, state->src_x, state->src_y,
+			state->src_w, state->src_h, q16_data);
+		POPULATE_RECT(&dst, state->crtc_x, state->crtc_y,
+			state->crtc_w, state->crtc_h, !q16_data);
+
+		SDE_DEBUG_PLANE(psde,
+			"FB[%u] %u,%u,%ux%u->crtc%u %d,%d,%ux%u, %s ubwc %d\n",
+				fb->base.id, src.x, src.y, src.w, src.h,
+				crtc->base.id, dst.x, dst.y, dst.w, dst.h,
+				drm_get_format_name(fmt->base.pixel_format),
+				SDE_FORMAT_IS_UBWC(fmt));
+
+		if (sde_plane_get_property(pstate, PLANE_PROP_SRC_CONFIG) &
+			BIT(SDE_DRM_DEINTERLACE)) {
+			SDE_DEBUG_PLANE(psde, "deinterlace\n");
+			for (idx = 0; idx < SDE_MAX_PLANES; ++idx)
+				pp->pipe_cfg.layout.plane_pitch[idx] <<= 1;
+			src.h /= 2;
+			src.y  = DIV_ROUND_UP(src.y, 2);
+			src.y &= ~0x1;
+		}
+	}
+
+	list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list)
+		num_of_phy_planes++;
+
+	/*
+	 * Only need to use one physical plane if plane width is still within
+	 * the limitation.
+	 */
+	is_across_mixer_boundary = (plane->state->crtc_x < crtc_split_width) &&
+				(plane->state->crtc_x + plane->state->crtc_w >
+				crtc_split_width);
+	if (crtc_split_width >= (src.x + src.w) && !is_across_mixer_boundary)
+		num_of_phy_planes = 1;
+
+	if (num_of_phy_planes > 1) {
+		/* Adjust width for multi-pipe */
+		src.w /= num_of_phy_planes;
+		dst.w /= num_of_phy_planes;
+	}
+
+	list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+		/* Adjust offset for multi-pipe */
+		if (num_of_phy_planes > 1) {
+			src.x += src.w * pp->index;
+			dst.x += dst.w * pp->index;
+		}
+		pp->pipe_cfg.src_rect = src;
+		pp->pipe_cfg.dst_rect = dst;
+
+		/* check for color fill */
+		pp->color_fill = (uint32_t)sde_plane_get_property(pstate,
+				PLANE_PROP_COLOR_FILL);
+		if (pp->color_fill & SDE_PLANE_COLOR_FILL_FLAG) {
+			/* skip remaining processing on color fill */
+			pstate->dirty = 0x0;
+		} else if (pp->pipe_hw->ops.setup_rects) {
+			_sde_plane_setup_scaler(pp, fmt, pstate);
+
+			pp->pipe_hw->ops.setup_rects(pp->pipe_hw,
+					&pp->pipe_cfg, &pp->pixel_ext,
+					pp->scaler3_cfg);
+		}
+
+	if (((pstate->dirty & SDE_PLANE_DIRTY_FORMAT) ||
+				(src_flags &
+				 SDE_SSPP_SECURE_OVERLAY_SESSION)) &&
+				pp->pipe_hw->ops.setup_format) {
+		SDE_DEBUG_PLANE(psde, "rotation 0x%llX\n",
+			sde_plane_get_property(pstate, PLANE_PROP_ROTATION));
+			if (sde_plane_get_property(pstate, PLANE_PROP_ROTATION)
+				& BIT(DRM_REFLECT_X))
+				src_flags |= SDE_SSPP_FLIP_LR;
+			if (sde_plane_get_property(pstate,
+				PLANE_PROP_ROTATION) & BIT(DRM_REFLECT_Y))
+				src_flags |= SDE_SSPP_FLIP_UD;
+
+			/* update format */
+			pp->pipe_hw->ops.setup_format(pp->pipe_hw,
+				fmt, src_flags);
+
+			/* update csc */
+			if (SDE_FORMAT_IS_YUV(fmt))
+				_sde_plane_setup_csc(pp);
+			else
+				pp->csc_ptr = NULL;
+		}
+
+		sde_color_process_plane_setup(plane, pp);
+
+		/* update sharpening */
+		if ((pstate->dirty & SDE_PLANE_DIRTY_SHARPEN) &&
+			pp->pipe_hw->ops.setup_sharpening) {
+			pp->sharp_cfg.strength = SHARP_STRENGTH_DEFAULT;
+			pp->sharp_cfg.edge_thr = SHARP_EDGE_THR_DEFAULT;
+			pp->sharp_cfg.smooth_thr = SHARP_SMOOTH_THR_DEFAULT;
+			pp->sharp_cfg.noise_thr = SHARP_NOISE_THR_DEFAULT;
+
+			pp->pipe_hw->ops.setup_sharpening(pp->pipe_hw,
+					&pp->sharp_cfg);
+		}
+
+		_sde_plane_set_qos_lut(pp, fb);
+		_sde_plane_set_danger_lut(pp, fb);
+
+		if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+			_sde_plane_set_qos_ctrl(pp, true,
+				SDE_PLANE_QOS_PANIC_CTRL);
+			_sde_plane_set_ot_limit(pp, crtc);
+		}
+	}
+
+	/* clear dirty */
+	pstate->dirty = 0x0;
+
+	return 0;
+}
+
+static int sde_plane_prepare_fb(struct drm_plane *plane,
+		const struct drm_plane_state *new_state)
+{
+	struct drm_framebuffer *fb = new_state->fb;
+	struct sde_plane *psde = to_sde_plane(plane);
+	struct sde_plane_state *pstate;
+	int rc;
+
+	if (!psde || !new_state)
+		return -EINVAL;
+
+	if (!new_state->fb)
+		return 0;
+
+	pstate = to_sde_plane_state(new_state);
+	rc = _sde_plane_get_aspace(psde, pstate, &psde->aspace);
+
+	if (rc) {
+		SDE_ERROR_PLANE(psde, "Failed to get aspace %d\n", rc);
+		return rc;
+	}
+
+	SDE_DEBUG_PLANE(psde, "FB[%u]\n", fb->base.id);
+	return msm_framebuffer_prepare(fb, psde->aspace);
+}
+
+static void sde_plane_cleanup_fb(struct drm_plane *plane,
+		const struct drm_plane_state *old_state)
+{
+	struct drm_framebuffer *fb = old_state ? old_state->fb : NULL;
+	struct sde_plane *psde = plane ? to_sde_plane(plane) : NULL;
+
+	if (!fb || !psde)
+		return;
+
+	SDE_DEBUG_PLANE(psde, "FB[%u]\n", fb->base.id);
+	msm_framebuffer_cleanup(fb, psde->aspace);
+}
+
+static void _sde_plane_atomic_check_mode_changed(struct sde_plane *psde,
+		struct drm_plane_state *state,
+		struct drm_plane_state *old_state)
+{
+	struct sde_plane_state *pstate = to_sde_plane_state(state);
+
+	/* no need to check it again */
+	if (pstate->dirty == SDE_PLANE_DIRTY_ALL)
+		return;
+
+	if (!sde_plane_enabled(state) || !sde_plane_enabled(old_state)
+			|| psde->is_error) {
+		SDE_DEBUG_PLANE(psde,
+			"enabling/disabling full modeset required\n");
+		pstate->dirty |= SDE_PLANE_DIRTY_ALL;
+	} else if (to_sde_plane_state(old_state)->pending) {
+		SDE_DEBUG_PLANE(psde, "still pending\n");
+		pstate->dirty |= SDE_PLANE_DIRTY_ALL;
+	} else if (state->src_w != old_state->src_w ||
+		   state->src_h != old_state->src_h ||
+		   state->src_x != old_state->src_x ||
+		   state->src_y != old_state->src_y) {
+		SDE_DEBUG_PLANE(psde, "src rect updated\n");
+		pstate->dirty |= SDE_PLANE_DIRTY_RECTS;
+	} else if (state->crtc_w != old_state->crtc_w ||
+		   state->crtc_h != old_state->crtc_h ||
+		   state->crtc_x != old_state->crtc_x ||
+		   state->crtc_y != old_state->crtc_y) {
+		SDE_DEBUG_PLANE(psde, "crtc rect updated\n");
+		pstate->dirty |= SDE_PLANE_DIRTY_RECTS;
+	}
+
+	if (!state->fb || !old_state->fb) {
+		SDE_DEBUG_PLANE(psde, "can't compare fb handles\n");
+	} else if (state->fb->pixel_format != old_state->fb->pixel_format) {
+		SDE_DEBUG_PLANE(psde, "format change\n");
+		pstate->dirty |= SDE_PLANE_DIRTY_FORMAT | SDE_PLANE_DIRTY_RECTS;
+	} else {
+		uint64_t *new_mods = state->fb->modifier;
+		uint64_t *old_mods = old_state->fb->modifier;
+		uint32_t *new_pitches = state->fb->pitches;
+		uint32_t *old_pitches = old_state->fb->pitches;
+		uint32_t *new_offset = state->fb->offsets;
+		uint32_t *old_offset = old_state->fb->offsets;
+		int i;
+
+		for (i = 0; i < ARRAY_SIZE(state->fb->modifier); i++) {
+			if (new_mods[i] != old_mods[i]) {
+				SDE_DEBUG_PLANE(psde,
+					"format modifiers change\"\
+					plane:%d new_mode:%llu old_mode:%llu\n",
+					i, new_mods[i], old_mods[i]);
+				pstate->dirty |= SDE_PLANE_DIRTY_FORMAT |
+					SDE_PLANE_DIRTY_RECTS;
+				break;
+			}
+		}
+		for (i = 0; i < ARRAY_SIZE(state->fb->pitches); i++) {
+			if (new_pitches[i] != old_pitches[i]) {
+				SDE_DEBUG_PLANE(psde,
+					"pitches change plane:%d\"\
+					old_pitches:%u new_pitches:%u\n",
+					i, old_pitches[i], new_pitches[i]);
+				pstate->dirty |= SDE_PLANE_DIRTY_RECTS;
+				break;
+			}
+		}
+		for (i = 0; i < ARRAY_SIZE(state->fb->offsets); i++) {
+			if (new_offset[i] != old_offset[i]) {
+				SDE_DEBUG_PLANE(psde,
+					"offset change plane:%d\"\
+					old_offset:%u new_offset:%u\n",
+					i, old_offset[i], new_offset[i]);
+				pstate->dirty |= SDE_PLANE_DIRTY_FORMAT |
+					SDE_PLANE_DIRTY_RECTS;
+				break;
+			}
+		}
+	}
+}
+
+static int sde_plane_atomic_check(struct drm_plane *plane,
+		struct drm_plane_state *state)
+{
+	int ret = 0;
+	struct sde_plane *psde;
+	struct sde_plane_state *pstate;
+	const struct sde_format *fmt;
+	struct sde_rect src, dst;
+	uint32_t deci_w, deci_h, src_deci_w, src_deci_h;
+	uint32_t max_upscale, max_downscale, min_src_size, max_linewidth;
+	bool q16_data = true;
+	struct sde_phy_plane *pp;
+	uint32_t num_of_phy_planes = 0;
+
+	if (!plane || !state) {
+		SDE_ERROR("invalid arg(s), plane %d state %d.\n",
+				plane != NULL, state != NULL);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	psde = to_sde_plane(plane);
+	pstate = to_sde_plane_state(state);
+
+	list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list)
+		num_of_phy_planes++;
+
+	deci_w = sde_plane_get_property(pstate, PLANE_PROP_H_DECIMATE);
+	deci_h = sde_plane_get_property(pstate, PLANE_PROP_V_DECIMATE);
+
+	/* src values are in Q16 fixed point, convert to integer */
+	POPULATE_RECT(&src, state->src_x, state->src_y, state->src_w,
+		state->src_h, q16_data);
+	POPULATE_RECT(&dst, state->crtc_x, state->crtc_y, state->crtc_w,
+		state->crtc_h, !q16_data);
+
+	src_deci_w = DECIMATED_DIMENSION(src.w, deci_w);
+	src_deci_h = DECIMATED_DIMENSION(src.h, deci_h);
+
+	SDE_DEBUG_PLANE(psde, "check %d -> %d\n",
+		sde_plane_enabled(plane->state), sde_plane_enabled(state));
+
+	if (!sde_plane_enabled(state))
+		goto modeset_update;
+
+	fmt = to_sde_format(msm_framebuffer_format(state->fb));
+
+	min_src_size = SDE_FORMAT_IS_YUV(fmt) ? 2 : 1;
+
+	list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+		if (!pp->pipe_sblk) {
+			SDE_ERROR("invalid plane catalog\n");
+			ret = -EINVAL;
+			goto exit;
+		}
+
+		max_upscale = pp->pipe_sblk->maxupscale;
+		max_downscale = pp->pipe_sblk->maxdwnscale;
+		max_linewidth = pp->pipe_sblk->maxlinewidth;
+
+		if (SDE_FORMAT_IS_YUV(fmt) &&
+			(!(pp->features & SDE_SSPP_SCALER) ||
+			 !(pp->features & (BIT(SDE_SSPP_CSC)
+			 | BIT(SDE_SSPP_CSC_10BIT))))) {
+			SDE_ERROR_PLANE(psde,
+					"plane doesn't have scaler/csc for yuv\n");
+			ret = -EINVAL;
+
+		/* check src bounds */
+		} else if (state->fb->width > MAX_IMG_WIDTH ||
+			state->fb->height > MAX_IMG_HEIGHT ||
+			src.w < min_src_size || src.h < min_src_size ||
+			CHECK_LAYER_BOUNDS(src.x, src.w, state->fb->width) ||
+			CHECK_LAYER_BOUNDS(src.y, src.h, state->fb->height)) {
+			SDE_ERROR_PLANE(psde, "invalid source %u, %u, %ux%u\n",
+				src.x, src.y, src.w, src.h);
+			ret = -E2BIG;
+
+		/* valid yuv image */
+		} else if (SDE_FORMAT_IS_YUV(fmt) && ((src.x & 0x1)
+					|| (src.y & 0x1) || (src.w & 0x1)
+					|| (src.h & 0x1))) {
+			SDE_ERROR_PLANE(psde, "invalid yuv source %u, %u,\"\
+				%ux%u\n", src.x, src.y, src.w, src.h);
+			ret = -EINVAL;
+
+		/* min dst support */
+		} else if (dst.w < 0x1 || dst.h < 0x1) {
+			SDE_ERROR_PLANE(psde, "invalid dest rect %u, %u,\"\
+				%ux%u\n", dst.x, dst.y, dst.w, dst.h);
+			ret = -EINVAL;
+
+		/* decimation validation */
+		} else if (deci_w || deci_h) {
+			if ((deci_w > pp->pipe_sblk->maxhdeciexp) ||
+				(deci_h > pp->pipe_sblk->maxvdeciexp)) {
+				SDE_ERROR_PLANE(psde,
+						"too much decimation requested\n");
+				ret = -EINVAL;
+			} else if (fmt->fetch_mode != SDE_FETCH_LINEAR) {
+				SDE_ERROR_PLANE(psde,
+						"decimation requires linear fetch\n");
+				ret = -EINVAL;
+			}
+
+		} else if (!(pp->features & SDE_SSPP_SCALER) &&
+			((src.w != dst.w) || (src.h != dst.h))) {
+			SDE_ERROR_PLANE(psde,
+				"pipe doesn't support scaling %ux%u->%ux%u\n",
+				src.w, src.h, dst.w, dst.h);
+			ret = -EINVAL;
+
+		/* check decimated source width */
+		} else if (src_deci_w > max_linewidth * num_of_phy_planes) {
+			SDE_ERROR_PLANE(psde,
+				"invalid src w:%u, deci w:%u, line w:%u, num_phy_planes:%u\n",
+				src.w, src_deci_w, max_linewidth,
+				num_of_phy_planes);
+			ret = -E2BIG;
+
+		/* check max scaler capability */
+		} else if (((src_deci_w * max_upscale) < dst.w) ||
+			((src_deci_h * max_upscale) < dst.h) ||
+			((dst.w * max_downscale) < src_deci_w) ||
+			((dst.h * max_downscale) < src_deci_h)) {
+			SDE_ERROR_PLANE(psde,
+				"too much scaling requested %ux%u->%ux%u\n",
+				src_deci_w, src_deci_h, dst.w, dst.h);
+			ret = -E2BIG;
+		}
+	}
+
+modeset_update:
+	if (!ret)
+		_sde_plane_atomic_check_mode_changed(psde, state, plane->state);
+exit:
+	return ret;
+}
+
+/**
+ * sde_plane_flush - final plane operations before commit flush
+ * @plane: Pointer to drm plane structure
+ */
+void sde_plane_flush(struct drm_plane *plane)
+{
+	struct sde_plane *psde;
+	struct sde_phy_plane *pp;
+
+	if (!plane) {
+		SDE_ERROR("invalid plane\n");
+		return;
+	}
+
+	psde = to_sde_plane(plane);
+
+	/*
+	 * These updates have to be done immediately before the plane flush
+	 * timing, and may not be moved to the atomic_update/mode_set functions.
+	 */
+	list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+		if (psde->is_error)
+		/* force white frame with 100% alpha pipe output on error */
+			_sde_plane_color_fill(pp, 0xFFFFFF, 0xFF);
+		else if (pp->color_fill & SDE_PLANE_COLOR_FILL_FLAG)
+			/* force 100% alpha */
+			_sde_plane_color_fill(pp, pp->color_fill, 0xFF);
+		else if (pp->pipe_hw && pp->csc_ptr &&
+					pp->pipe_hw->ops.setup_csc)
+			pp->pipe_hw->ops.setup_csc(pp->pipe_hw, pp->csc_ptr);
+	}
+
+	/* force black color fill during suspend */
+	if (msm_is_suspend_state(plane->dev) && suspend_blank)
+		_sde_plane_color_fill(pp, 0x0, 0x0);
+
+	/* flag h/w flush complete */
+	if (plane->state)
+		to_sde_plane_state(plane->state)->pending = false;
+}
+
+static void sde_plane_atomic_update(struct drm_plane *plane,
+				struct drm_plane_state *old_state)
+{
+	struct sde_plane *psde;
+	struct drm_plane_state *state;
+	struct sde_plane_state *pstate;
+
+	if (!plane) {
+		SDE_ERROR("invalid plane\n");
+		return;
+	} else if (!plane->state) {
+		SDE_ERROR("invalid plane state\n");
+		return;
+	}
+
+	psde = to_sde_plane(plane);
+	psde->is_error = false;
+	state = plane->state;
+	pstate = to_sde_plane_state(state);
+
+	SDE_DEBUG_PLANE(psde, "\n");
+
+	if (!sde_plane_enabled(state)) {
+		pstate->pending = true;
+	} else {
+		int ret;
+
+		ret = _sde_plane_mode_set(plane, state);
+		/* atomic_check should have ensured that this doesn't fail */
+		WARN_ON(ret < 0);
+	}
+}
+
+
+/* helper to install properties which are common to planes and crtcs */
+static void _sde_plane_install_properties(struct drm_plane *plane,
+	struct sde_mdss_cfg *catalog)
+{
+	static const struct drm_prop_enum_list e_blend_op[] = {
+		{SDE_DRM_BLEND_OP_NOT_DEFINED,    "not_defined"},
+		{SDE_DRM_BLEND_OP_OPAQUE,         "opaque"},
+		{SDE_DRM_BLEND_OP_PREMULTIPLIED,  "premultiplied"},
+		{SDE_DRM_BLEND_OP_COVERAGE,       "coverage"}
+	};
+	static const struct drm_prop_enum_list e_src_config[] = {
+		{SDE_DRM_DEINTERLACE, "deinterlace"}
+	};
+	static const struct drm_prop_enum_list e_fb_translation_mode[] = {
+		{SDE_DRM_FB_NON_SEC, "non_sec"},
+		{SDE_DRM_FB_SEC, "sec"},
+		{SDE_DRM_FB_NON_SEC_DIR_TRANS, "non_sec_direct_translation"},
+		{SDE_DRM_FB_SEC_DIR_TRANS, "sec_direct_translation"},
+	};
+	const struct sde_format_extended *format_list;
+	struct sde_kms_info *info;
+	struct sde_plane *psde = to_sde_plane(plane);
+	int zpos_max = 255;
+	int zpos_def = 0;
+	char feature_name[256];
+	struct sde_phy_plane *pp;
+	uint32_t features = 0xFFFFFFFF, nformats = 64;
+	u32 maxlinewidth = 0, maxupscale = 0, maxdwnscale = 0;
+	u32 maxhdeciexp = 0, maxvdeciexp = 0;
+
+	if (!plane || !psde) {
+		SDE_ERROR("invalid plane\n");
+		return;
+	}
+	list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+		if (!pp->pipe_hw || !pp->pipe_sblk) {
+			SDE_ERROR("invalid phy_plane, pipe_hw %d\"\
+				pipe_sblk %d\n", pp->pipe_hw != NULL,
+				pp->pipe_sblk != NULL);
+			return;
+		}
+	}
+	if (!catalog) {
+		SDE_ERROR("invalid catalog\n");
+		return;
+	}
+
+	list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+		/* Get common features for all pipes */
+		features &= pp->features;
+		if (nformats > pp->nformats) {
+			nformats = pp->nformats;
+			format_list = pp->pipe_sblk->format_list;
+		}
+		if (maxlinewidth < pp->pipe_sblk->maxlinewidth)
+			maxlinewidth = pp->pipe_sblk->maxlinewidth;
+		if (maxupscale < pp->pipe_sblk->maxupscale)
+			maxupscale = pp->pipe_sblk->maxupscale;
+		if (maxdwnscale < pp->pipe_sblk->maxdwnscale)
+			maxdwnscale = pp->pipe_sblk->maxdwnscale;
+		if (maxhdeciexp < pp->pipe_sblk->maxhdeciexp)
+			maxhdeciexp = pp->pipe_sblk->maxhdeciexp;
+		if (maxvdeciexp < pp->pipe_sblk->maxvdeciexp)
+			maxvdeciexp = pp->pipe_sblk->maxvdeciexp;
+		break;
+	}
+
+	if (sde_is_custom_client()) {
+		if (catalog->mixer_count && catalog->mixer &&
+				catalog->mixer[0].sblk->maxblendstages) {
+			zpos_max = catalog->mixer[0].sblk->maxblendstages - 1;
+			if (zpos_max > SDE_STAGE_MAX - SDE_STAGE_0 - 1)
+				zpos_max = SDE_STAGE_MAX - SDE_STAGE_0 - 1;
+		}
+	} else if (plane->type != DRM_PLANE_TYPE_PRIMARY) {
+		/* reserve zpos == 0 for primary planes */
+		zpos_def = drm_plane_index(plane) + 1;
+	}
+
+	msm_property_install_range(&psde->property_info, "zpos",
+		0x0, 0, zpos_max, zpos_def, PLANE_PROP_ZPOS);
+
+	msm_property_install_range(&psde->property_info, "alpha",
+		0x0, 0, 255, 255, PLANE_PROP_ALPHA);
+
+	/* linux default file descriptor range on each process */
+	msm_property_install_range(&psde->property_info, "input_fence",
+		0x0, 0, INR_OPEN_MAX, 0, PLANE_PROP_INPUT_FENCE);
+
+	list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+		if (pp->pipe_sblk->maxhdeciexp) {
+			msm_property_install_range(&psde->property_info,
+				"h_decimate", 0x0, 0,
+				pp->pipe_sblk->maxhdeciexp, 0,
+				PLANE_PROP_H_DECIMATE);
+		}
+
+		if (pp->pipe_sblk->maxvdeciexp) {
+			msm_property_install_range(&psde->property_info,
+				"v_decimate", 0x0, 0,
+				pp->pipe_sblk->maxvdeciexp, 0,
+				PLANE_PROP_V_DECIMATE);
+		}
+		break;
+	}
+
+	if (features & BIT(SDE_SSPP_SCALER_QSEED3)) {
+		msm_property_install_volatile_range(&psde->property_info,
+			"scaler_v2", 0x0, 0, ~0, 0, PLANE_PROP_SCALER_V2);
+		msm_property_install_blob(&psde->property_info, "lut_ed", 0,
+			PLANE_PROP_SCALER_LUT_ED);
+		msm_property_install_blob(&psde->property_info, "lut_cir", 0,
+			PLANE_PROP_SCALER_LUT_CIR);
+		msm_property_install_blob(&psde->property_info, "lut_sep", 0,
+			PLANE_PROP_SCALER_LUT_SEP);
+	} else if (features & SDE_SSPP_SCALER) {
+		msm_property_install_volatile_range(&psde->property_info,
+			"scaler_v1", 0x0, 0, ~0, 0, PLANE_PROP_SCALER_V1);
+	}
+
+	if (features & BIT(SDE_SSPP_CSC)) {
+		msm_property_install_volatile_range(&psde->property_info,
+			"csc_v1", 0x0, 0, ~0, 0, PLANE_PROP_CSC_V1);
+	}
+
+	if (features & BIT(SDE_SSPP_HSIC)) {
+		snprintf(feature_name, sizeof(feature_name), "%s%d",
+			"SDE_SSPP_HUE_V",
+			pp->pipe_sblk->hsic_blk.version >> 16);
+		msm_property_install_range(&psde->property_info,
+			feature_name, 0, 0, 0xFFFFFFFF, 0,
+			PLANE_PROP_HUE_ADJUST);
+		snprintf(feature_name, sizeof(feature_name), "%s%d",
+			"SDE_SSPP_SATURATION_V",
+			pp->pipe_sblk->hsic_blk.version >> 16);
+		msm_property_install_range(&psde->property_info,
+			feature_name, 0, 0, 0xFFFFFFFF, 0,
+			PLANE_PROP_SATURATION_ADJUST);
+		snprintf(feature_name, sizeof(feature_name), "%s%d",
+			"SDE_SSPP_VALUE_V",
+			pp->pipe_sblk->hsic_blk.version >> 16);
+		msm_property_install_range(&psde->property_info,
+			feature_name, 0, 0, 0xFFFFFFFF, 0,
+			PLANE_PROP_VALUE_ADJUST);
+		snprintf(feature_name, sizeof(feature_name), "%s%d",
+			"SDE_SSPP_CONTRAST_V",
+			pp->pipe_sblk->hsic_blk.version >> 16);
+		msm_property_install_range(&psde->property_info,
+			feature_name, 0, 0, 0xFFFFFFFF, 0,
+			PLANE_PROP_CONTRAST_ADJUST);
+	}
+
+	/* standard properties */
+	msm_property_install_rotation(&psde->property_info,
+		BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y), PLANE_PROP_ROTATION);
+
+	msm_property_install_enum(&psde->property_info, "blend_op", 0x0, 0,
+		e_blend_op, ARRAY_SIZE(e_blend_op), PLANE_PROP_BLEND_OP,
+		SDE_DRM_BLEND_OP_PREMULTIPLIED);
+
+	msm_property_install_enum(&psde->property_info, "src_config", 0x0, 1,
+		e_src_config, ARRAY_SIZE(e_src_config), PLANE_PROP_SRC_CONFIG,
+		0);
+
+	list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+		if (pp->pipe_hw->ops.setup_solidfill)
+			msm_property_install_range(&psde->property_info,
+				"color_fill", 0, 0, 0xFFFFFFFF, 0,
+				PLANE_PROP_COLOR_FILL);
+		break;
+	}
+
+	info = kzalloc(sizeof(struct sde_kms_info), GFP_KERNEL);
+	if (!info) {
+		SDE_ERROR("failed to allocate info memory\n");
+		return;
+	}
+
+	msm_property_install_blob(&psde->property_info, "capabilities",
+		DRM_MODE_PROP_IMMUTABLE, PLANE_PROP_INFO);
+	sde_kms_info_reset(info);
+
+	if (format_list) {
+		sde_kms_info_start(info, "pixel_formats");
+		while (format_list->fourcc_format) {
+			sde_kms_info_append_format(info,
+					format_list->fourcc_format,
+					format_list->modifier);
+			++format_list;
+		}
+		sde_kms_info_stop(info);
+	}
+
+	sde_kms_info_add_keyint(info, "max_linewidth", maxlinewidth);
+	sde_kms_info_add_keyint(info, "max_upscale", maxupscale);
+	sde_kms_info_add_keyint(info, "max_downscale", maxdwnscale);
+	sde_kms_info_add_keyint(info, "max_horizontal_deci", maxhdeciexp);
+	sde_kms_info_add_keyint(info, "max_vertical_deci", maxvdeciexp);
+	msm_property_set_blob(&psde->property_info, &psde->blob_info,
+			info->data, info->len, PLANE_PROP_INFO);
+
+	kfree(info);
+
+	if (features & BIT(SDE_SSPP_MEMCOLOR)) {
+		snprintf(feature_name, sizeof(feature_name), "%s%d",
+			"SDE_SSPP_SKIN_COLOR_V",
+			pp->pipe_sblk->memcolor_blk.version >> 16);
+		msm_property_install_blob(&psde->property_info, feature_name, 0,
+			PLANE_PROP_SKIN_COLOR);
+		snprintf(feature_name, sizeof(feature_name), "%s%d",
+			"SDE_SSPP_SKY_COLOR_V",
+			pp->pipe_sblk->memcolor_blk.version >> 16);
+		msm_property_install_blob(&psde->property_info, feature_name, 0,
+			PLANE_PROP_SKY_COLOR);
+		snprintf(feature_name, sizeof(feature_name), "%s%d",
+			"SDE_SSPP_FOLIAGE_COLOR_V",
+			pp->pipe_sblk->memcolor_blk.version >> 16);
+		msm_property_install_blob(&psde->property_info, feature_name, 0,
+			PLANE_PROP_FOLIAGE_COLOR);
+	}
+
+	msm_property_install_enum(&psde->property_info, "fb_translation_mode",
+			0x0,
+			0, e_fb_translation_mode,
+			ARRAY_SIZE(e_fb_translation_mode),
+			PLANE_PROP_FB_TRANSLATION_MODE, SDE_DRM_FB_NON_SEC);
+}
+
+static inline void _sde_plane_set_csc_v1(struct sde_phy_plane *pp,
+	void *usr_ptr)
+{
+	struct sde_drm_csc_v1 csc_v1;
+	struct sde_plane *psde;
+	int i;
+
+	if (!pp) {
+		SDE_ERROR("invalid phy_plane\n");
+		return;
+	}
+	psde = pp->sde_plane;
+
+	pp->csc_usr_ptr = NULL;
+	if (!usr_ptr) {
+		SDE_DEBUG_PLANE(psde, "csc data removed\n");
+		return;
+	}
+
+	if (copy_from_user(&csc_v1, usr_ptr, sizeof(csc_v1))) {
+		SDE_ERROR_PLANE(psde, "failed to copy csc data\n");
+		return;
+	}
+
+	/* populate from user space */
+	for (i = 0; i < SDE_CSC_MATRIX_COEFF_SIZE; ++i)
+		pp->csc_cfg.csc_mv[i] = csc_v1.ctm_coeff[i] >> 16;
+	for (i = 0; i < SDE_CSC_BIAS_SIZE; ++i) {
+		pp->csc_cfg.csc_pre_bv[i] = csc_v1.pre_bias[i];
+		pp->csc_cfg.csc_post_bv[i] = csc_v1.post_bias[i];
+	}
+	for (i = 0; i < SDE_CSC_CLAMP_SIZE; ++i) {
+		pp->csc_cfg.csc_pre_lv[i] = csc_v1.pre_clamp[i];
+		pp->csc_cfg.csc_post_lv[i] = csc_v1.post_clamp[i];
+	}
+	pp->csc_usr_ptr = &pp->csc_cfg;
+}
+
+static inline void _sde_plane_set_scaler_v1(struct sde_phy_plane *pp,
+	void *usr)
+{
+	struct sde_drm_scaler_v1 scale_v1;
+	struct sde_hw_pixel_ext *pe;
+	struct sde_plane *psde;
+	int i;
+
+	if (!pp) {
+		SDE_ERROR("invalid phy_plane\n");
+		return;
+	}
+	psde = pp->sde_plane;
+
+	pp->pixel_ext_usr = false;
+	if (!usr) {
+		SDE_DEBUG_PLANE(psde, "scale data removed\n");
+		return;
+	}
+
+	if (copy_from_user(&scale_v1, usr, sizeof(scale_v1))) {
+		SDE_ERROR_PLANE(psde, "failed to copy scale data\n");
+		return;
+	}
+
+	/* populate from user space */
+	pe = &(pp->pixel_ext);
+	memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
+	for (i = 0; i < SDE_MAX_PLANES; i++) {
+		pe->init_phase_x[i] = scale_v1.init_phase_x[i];
+		pe->phase_step_x[i] = scale_v1.phase_step_x[i];
+		pe->init_phase_y[i] = scale_v1.init_phase_y[i];
+		pe->phase_step_y[i] = scale_v1.phase_step_y[i];
+
+		pe->horz_filter[i] = scale_v1.horz_filter[i];
+		pe->vert_filter[i] = scale_v1.vert_filter[i];
+	}
+	for (i = 0; i < SDE_MAX_PLANES; i++) {
+		pe->left_ftch[i] = scale_v1.pe.left_ftch[i];
+		pe->right_ftch[i] = scale_v1.pe.right_ftch[i];
+		pe->left_rpt[i] = scale_v1.pe.left_rpt[i];
+		pe->right_rpt[i] = scale_v1.pe.right_rpt[i];
+		pe->roi_w[i] = scale_v1.pe.num_ext_pxls_lr[i];
+
+		pe->top_ftch[i] = scale_v1.pe.top_ftch[i];
+		pe->btm_ftch[i] = scale_v1.pe.btm_ftch[i];
+		pe->top_rpt[i] = scale_v1.pe.top_rpt[i];
+		pe->btm_rpt[i] = scale_v1.pe.btm_rpt[i];
+		pe->roi_h[i] = scale_v1.pe.num_ext_pxls_tb[i];
+	}
+
+	pp->pixel_ext_usr = true;
+
+	SDE_DEBUG_PLANE(psde, "user property data copied\n");
+}
+
+static inline void _sde_plane_set_scaler_v2(struct sde_phy_plane *pp,
+		struct sde_plane_state *pstate, void *usr)
+{
+	struct sde_drm_scaler_v2 scale_v2;
+	struct sde_hw_pixel_ext *pe;
+	int i;
+	struct sde_hw_scaler3_cfg *cfg;
+	struct sde_plane *psde;
+
+	if (!pp) {
+		SDE_ERROR("invalid phy_plane\n");
+		return;
+	}
+	psde = pp->sde_plane;
+
+	cfg = pp->scaler3_cfg;
+	pp->pixel_ext_usr = false;
+	if (!usr) {
+		SDE_DEBUG_PLANE(psde, "scale data removed\n");
+		return;
+	}
+
+	if (copy_from_user(&scale_v2, usr, sizeof(scale_v2))) {
+		SDE_ERROR_PLANE(psde, "failed to copy scale data\n");
+		return;
+	}
+
+	/* detach/ignore user data if 'disabled' */
+	if (!scale_v2.enable) {
+		SDE_DEBUG_PLANE(psde, "scale data removed\n");
+		return;
+	}
+
+	/* populate from user space */
+	pe = &(pp->pixel_ext);
+	memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
+	cfg->enable = scale_v2.enable;
+	cfg->dir_en = scale_v2.dir_en;
+	for (i = 0; i < SDE_MAX_PLANES; i++) {
+		cfg->init_phase_x[i] = scale_v2.init_phase_x[i];
+		cfg->phase_step_x[i] = scale_v2.phase_step_x[i];
+		cfg->init_phase_y[i] = scale_v2.init_phase_y[i];
+		cfg->phase_step_y[i] = scale_v2.phase_step_y[i];
+
+		cfg->preload_x[i] = scale_v2.preload_x[i];
+		cfg->preload_y[i] = scale_v2.preload_y[i];
+		cfg->src_width[i] = scale_v2.src_width[i];
+		cfg->src_height[i] = scale_v2.src_height[i];
+	}
+	cfg->dst_width = scale_v2.dst_width;
+	cfg->dst_height = scale_v2.dst_height;
+
+	cfg->y_rgb_filter_cfg = scale_v2.y_rgb_filter_cfg;
+	cfg->uv_filter_cfg = scale_v2.uv_filter_cfg;
+	cfg->alpha_filter_cfg = scale_v2.alpha_filter_cfg;
+	cfg->blend_cfg = scale_v2.blend_cfg;
+
+	cfg->lut_flag = scale_v2.lut_flag;
+	cfg->dir_lut_idx = scale_v2.dir_lut_idx;
+	cfg->y_rgb_cir_lut_idx = scale_v2.y_rgb_cir_lut_idx;
+	cfg->uv_cir_lut_idx = scale_v2.uv_cir_lut_idx;
+	cfg->y_rgb_sep_lut_idx = scale_v2.y_rgb_sep_lut_idx;
+	cfg->uv_sep_lut_idx = scale_v2.uv_sep_lut_idx;
+
+	cfg->de.enable = scale_v2.de.enable;
+	cfg->de.sharpen_level1 = scale_v2.de.sharpen_level1;
+	cfg->de.sharpen_level2 = scale_v2.de.sharpen_level2;
+	cfg->de.clip = scale_v2.de.clip;
+	cfg->de.limit = scale_v2.de.limit;
+	cfg->de.thr_quiet = scale_v2.de.thr_quiet;
+	cfg->de.thr_dieout = scale_v2.de.thr_dieout;
+	cfg->de.thr_low = scale_v2.de.thr_low;
+	cfg->de.thr_high = scale_v2.de.thr_high;
+	cfg->de.prec_shift = scale_v2.de.prec_shift;
+	for (i = 0; i < SDE_MAX_DE_CURVES; i++) {
+		cfg->de.adjust_a[i] = scale_v2.de.adjust_a[i];
+		cfg->de.adjust_b[i] = scale_v2.de.adjust_b[i];
+		cfg->de.adjust_c[i] = scale_v2.de.adjust_c[i];
+	}
+	for (i = 0; i < SDE_MAX_PLANES; i++) {
+		pe->left_ftch[i] = scale_v2.pe.left_ftch[i];
+		pe->right_ftch[i] = scale_v2.pe.right_ftch[i];
+		pe->left_rpt[i] = scale_v2.pe.left_rpt[i];
+		pe->right_rpt[i] = scale_v2.pe.right_rpt[i];
+		pe->roi_w[i] = scale_v2.pe.num_ext_pxls_lr[i];
+
+		pe->top_ftch[i] = scale_v2.pe.top_ftch[i];
+		pe->btm_ftch[i] = scale_v2.pe.btm_ftch[i];
+		pe->top_rpt[i] = scale_v2.pe.top_rpt[i];
+		pe->btm_rpt[i] = scale_v2.pe.btm_rpt[i];
+		pe->roi_h[i] = scale_v2.pe.num_ext_pxls_tb[i];
+	}
+	pp->pixel_ext_usr = true;
+
+	SDE_DEBUG_PLANE(psde, "user property data copied\n");
+}
+
+static int sde_plane_atomic_set_property(struct drm_plane *plane,
+		struct drm_plane_state *state, struct drm_property *property,
+		uint64_t val)
+{
+	struct sde_plane *psde = plane ? to_sde_plane(plane) : NULL;
+	struct sde_plane_state *pstate;
+	int idx, ret = -EINVAL;
+	struct sde_phy_plane *pp;
+
+	SDE_DEBUG_PLANE(psde, "\n");
+
+	if (!plane) {
+		SDE_ERROR("invalid plane\n");
+	} else if (!state) {
+		SDE_ERROR_PLANE(psde, "invalid state\n");
+	} else {
+		pstate = to_sde_plane_state(state);
+		ret = msm_property_atomic_set(&psde->property_info,
+				pstate->property_values, pstate->property_blobs,
+				property, val);
+		if (!ret) {
+			idx = msm_property_index(&psde->property_info,
+					property);
+			switch (idx) {
+			case PLANE_PROP_INPUT_FENCE:
+				_sde_plane_set_input_fence(psde, pstate, val);
+				break;
+			case PLANE_PROP_CSC_V1:
+				list_for_each_entry(pp, &psde->phy_plane_head,
+					phy_plane_list) {
+					_sde_plane_set_csc_v1(pp, (void *)val);
+				}
+				break;
+			case PLANE_PROP_SCALER_V1:
+				list_for_each_entry(pp, &psde->phy_plane_head,
+					phy_plane_list) {
+					_sde_plane_set_scaler_v1(pp,
+						(void *)val);
+				}
+				break;
+			case PLANE_PROP_SCALER_V2:
+				list_for_each_entry(pp, &psde->phy_plane_head,
+					phy_plane_list) {
+					_sde_plane_set_scaler_v2(pp, pstate,
+						(void *)val);
+				}
+				break;
+			default:
+				/* nothing to do */
+				break;
+			}
+		}
+	}
+
+	return ret;
+}
+
+static int sde_plane_set_property(struct drm_plane *plane,
+		struct drm_property *property, uint64_t val)
+{
+	SDE_DEBUG("\n");
+
+	return sde_plane_atomic_set_property(plane,
+			plane->state, property, val);
+}
+
+static int sde_plane_atomic_get_property(struct drm_plane *plane,
+		const struct drm_plane_state *state,
+		struct drm_property *property, uint64_t *val)
+{
+	struct sde_plane *psde = plane ? to_sde_plane(plane) : NULL;
+	struct sde_plane_state *pstate;
+	int ret = -EINVAL;
+
+	if (!plane) {
+		SDE_ERROR("invalid plane\n");
+	} else if (!state) {
+		SDE_ERROR("invalid state\n");
+	} else {
+		SDE_DEBUG_PLANE(psde, "\n");
+		pstate = to_sde_plane_state(state);
+		ret = msm_property_atomic_get(&psde->property_info,
+				pstate->property_values, pstate->property_blobs,
+				property, val);
+	}
+
+	return ret;
+}
+
+static void sde_plane_destroy(struct drm_plane *plane)
+{
+	struct sde_plane *psde = plane ? to_sde_plane(plane) : NULL;
+	struct sde_phy_plane *pp, *n;
+
+	SDE_DEBUG_PLANE(psde, "\n");
+
+	if (psde) {
+		list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+			_sde_plane_set_qos_ctrl(pp,
+				false, SDE_PLANE_QOS_PANIC_CTRL);
+		}
+		debugfs_remove_recursive(psde->debugfs_root);
+
+		if (psde->blob_info)
+			drm_property_unreference_blob(psde->blob_info);
+		msm_property_destroy(&psde->property_info);
+		mutex_destroy(&psde->lock);
+
+		drm_plane_helper_disable(plane);
+
+		/* this will destroy the states as well */
+		drm_plane_cleanup(plane);
+
+		list_for_each_entry_safe(pp, n,
+				&psde->phy_plane_head, phy_plane_list) {
+			if (pp->pipe_hw)
+				sde_hw_sspp_destroy(pp->pipe_hw);
+			list_del(&pp->phy_plane_list);
+			kfree(pp);
+		}
+
+		kfree(psde);
+	}
+}
+
+static void sde_plane_destroy_state(struct drm_plane *plane,
+		struct drm_plane_state *state)
+{
+	struct sde_plane *psde;
+	struct sde_plane_state *pstate;
+
+	if (!plane || !state) {
+		SDE_ERROR("invalid arg(s), plane %d state %d\n",
+				plane != 0, state != 0);
+		return;
+	}
+
+	psde = to_sde_plane(plane);
+	pstate = to_sde_plane_state(state);
+
+	SDE_DEBUG_PLANE(psde, "\n");
+
+	/* remove ref count for frame buffers */
+	if (state->fb)
+		drm_framebuffer_unreference(state->fb);
+
+	/* remove ref count for fence */
+	if (pstate->input_fence)
+		sde_sync_put(pstate->input_fence);
+
+	/* destroy value helper */
+	msm_property_destroy_state(&psde->property_info, pstate,
+			pstate->property_values, pstate->property_blobs);
+}
+
+static struct drm_plane_state *
+sde_plane_duplicate_state(struct drm_plane *plane)
+{
+	struct sde_plane *psde;
+	struct sde_plane_state *pstate;
+	struct sde_plane_state *old_state;
+	uint64_t input_fence_default;
+
+	if (!plane) {
+		SDE_ERROR("invalid plane\n");
+		return NULL;
+	} else if (!plane->state) {
+		SDE_ERROR("invalid plane state\n");
+		return NULL;
+	}
+
+	old_state = to_sde_plane_state(plane->state);
+	psde = to_sde_plane(plane);
+	pstate = msm_property_alloc_state(&psde->property_info);
+	if (!pstate) {
+		SDE_ERROR_PLANE(psde, "failed to allocate state\n");
+		return NULL;
+	}
+
+	SDE_DEBUG_PLANE(psde, "\n");
+
+	/* duplicate value helper */
+	msm_property_duplicate_state(&psde->property_info, old_state, pstate,
+			pstate->property_values, pstate->property_blobs);
+
+	/* add ref count for frame buffer */
+	if (pstate->base.fb)
+		drm_framebuffer_reference(pstate->base.fb);
+
+	/* clear out any input fence */
+	pstate->input_fence = 0;
+	input_fence_default = msm_property_get_default(
+			&psde->property_info, PLANE_PROP_INPUT_FENCE);
+	msm_property_set_property(&psde->property_info, pstate->property_values,
+			PLANE_PROP_INPUT_FENCE, input_fence_default);
+
+	pstate->dirty = 0x0;
+	pstate->pending = false;
+
+	return &pstate->base;
+}
+
+static void sde_plane_reset(struct drm_plane *plane)
+{
+	struct sde_plane *psde;
+	struct sde_plane_state *pstate;
+
+	if (!plane) {
+		SDE_ERROR("invalid plane\n");
+		return;
+	}
+
+	psde = to_sde_plane(plane);
+	SDE_DEBUG_PLANE(psde, "\n");
+
+	/* remove previous state, if present */
+	if (plane->state) {
+		sde_plane_destroy_state(plane, plane->state);
+		plane->state = 0;
+	}
+
+	pstate = msm_property_alloc_state(&psde->property_info);
+	if (!pstate) {
+		SDE_ERROR_PLANE(psde, "failed to allocate state\n");
+		return;
+	}
+
+	/* reset value helper */
+	msm_property_reset_state(&psde->property_info, pstate,
+			pstate->property_values, pstate->property_blobs);
+
+	pstate->base.plane = plane;
+
+	plane->state = &pstate->base;
+}
+
+static const struct drm_plane_funcs sde_plane_funcs = {
+		.update_plane = drm_atomic_helper_update_plane,
+		.disable_plane = drm_atomic_helper_disable_plane,
+		.destroy = sde_plane_destroy,
+		.set_property = sde_plane_set_property,
+		.atomic_set_property = sde_plane_atomic_set_property,
+		.atomic_get_property = sde_plane_atomic_get_property,
+		.reset = sde_plane_reset,
+		.atomic_duplicate_state = sde_plane_duplicate_state,
+		.atomic_destroy_state = sde_plane_destroy_state,
+};
+
+static const struct drm_plane_helper_funcs sde_plane_helper_funcs = {
+		.prepare_fb = sde_plane_prepare_fb,
+		.cleanup_fb = sde_plane_cleanup_fb,
+		.atomic_check = sde_plane_atomic_check,
+		.atomic_update = sde_plane_atomic_update,
+};
+
+enum sde_sspp sde_plane_pipe(struct drm_plane *plane, uint32_t index)
+{
+	struct sde_plane *sde_plane = to_sde_plane(plane);
+	struct sde_phy_plane *pp;
+	int i = 0;
+	enum sde_sspp default_sspp = SSPP_NONE;
+
+	list_for_each_entry(pp, &sde_plane->phy_plane_head, phy_plane_list) {
+		if (i == 0)
+			default_sspp = pp->pipe;
+		if (i ==  index)
+			return pp->pipe;
+		i++;
+	}
+
+	return default_sspp;
+}
+
+static ssize_t _sde_plane_danger_read(struct file *file,
+			char __user *buff, size_t count, loff_t *ppos)
+{
+	struct sde_kms *kms = file->private_data;
+	struct sde_mdss_cfg *cfg = kms->catalog;
+	int len = 0;
+	char buf[40] = {'\0'};
+
+	if (!cfg)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0; /* the end */
+
+	len = snprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
+	if (len < 0 || len >= sizeof(buf))
+		return 0;
+
+	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+		return -EFAULT;
+
+	*ppos += len;   /* increase offset */
+
+	return len;
+}
+
+static void _sde_plane_set_danger_state(struct sde_kms *kms, bool enable)
+{
+	struct drm_plane *plane;
+	struct sde_plane *psde;
+	struct sde_phy_plane *pp;
+
+	drm_for_each_plane(plane, kms->dev) {
+		if (plane->fb && plane->state) {
+			psde = to_sde_plane(plane);
+			list_for_each_entry(pp, &psde->phy_plane_head,
+				phy_plane_list) {
+				sde_plane_danger_signal_ctrl(pp, enable);
+			}
+			SDE_DEBUG("plane:%d img:%dx%d ",
+				plane->base.id, plane->fb->width,
+				plane->fb->height);
+			SDE_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n",
+				plane->state->src_x >> 16,
+				plane->state->src_y >> 16,
+				plane->state->src_w >> 16,
+				plane->state->src_h >> 16,
+				plane->state->crtc_x, plane->state->crtc_y,
+				plane->state->crtc_w, plane->state->crtc_h);
+		} else {
+			SDE_DEBUG("Inactive plane:%d\n", plane->base.id);
+		}
+	}
+}
+
+static ssize_t _sde_plane_danger_write(struct file *file,
+			const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct sde_kms *kms = file->private_data;
+	struct sde_mdss_cfg *cfg = kms->catalog;
+	int disable_panic;
+	char buf[10];
+
+	if (!cfg)
+		return -EFAULT;
+
+	if (count >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+
+	buf[count] = 0;	/* end of string */
+
+	if (kstrtoint(buf, 0, &disable_panic))
+		return -EFAULT;
+
+	if (disable_panic) {
+		/* Disable panic signal for all active pipes */
+		SDE_DEBUG("Disabling danger:\n");
+		_sde_plane_set_danger_state(kms, false);
+		kms->has_danger_ctrl = false;
+	} else {
+		/* Enable panic signal for all active pipes */
+		SDE_DEBUG("Enabling danger:\n");
+		kms->has_danger_ctrl = true;
+		_sde_plane_set_danger_state(kms, true);
+	}
+
+	return count;
+}
+
+static const struct file_operations sde_plane_danger_enable = {
+	.open = simple_open,
+	.read = _sde_plane_danger_read,
+	.write = _sde_plane_danger_write,
+};
+
+static void _sde_plane_init_debugfs(struct sde_plane *psde,
+	struct sde_kms *kms)
+{
+	const struct sde_sspp_sub_blks *sblk = 0;
+	const struct sde_sspp_cfg *cfg = 0;
+	struct sde_phy_plane *pp;
+
+	if (!psde || !kms) {
+		SDE_ERROR("invalid arg(s), psde %d   kms %d\n",
+					psde != NULL, kms != NULL);
+		return;
+	}
+
+	/* create overall sub-directory for the pipe */
+	psde->debugfs_root = debugfs_create_dir(psde->pipe_name,
+				sde_debugfs_get_root(kms));
+	if (!psde->debugfs_root)
+		return;
+
+	list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+		debugfs_create_u32("pipe", S_IRUGO | S_IWUSR,
+				psde->debugfs_root, &pp->pipe);
+
+		if (!pp->pipe_hw || !pp->pipe_hw->cap ||
+			!pp->pipe_hw->cap->sblk)
+			continue;
+		cfg = pp->pipe_hw->cap;
+		sblk = cfg->sblk;
+
+		/* don't error check these */
+		debugfs_create_x32("features", S_IRUGO | S_IWUSR,
+				psde->debugfs_root, &pp->features);
+
+		/* add register dump support */
+		sde_debugfs_setup_regset32(&psde->debugfs_src,
+				sblk->src_blk.base + cfg->base,
+				sblk->src_blk.len,
+				kms);
+		sde_debugfs_create_regset32("src_blk", S_IRUGO,
+				psde->debugfs_root, &psde->debugfs_src);
+
+		sde_debugfs_setup_regset32(&psde->debugfs_scaler,
+				sblk->scaler_blk.base + cfg->base,
+				sblk->scaler_blk.len,
+				kms);
+		sde_debugfs_create_regset32("scaler_blk", S_IRUGO,
+				psde->debugfs_root,
+				&psde->debugfs_scaler);
+		debugfs_create_bool("default_scaling",
+				0644,
+				psde->debugfs_root,
+				&psde->debugfs_default_scale);
+
+		sde_debugfs_setup_regset32(&psde->debugfs_csc,
+				sblk->csc_blk.base + cfg->base,
+				sblk->csc_blk.len,
+				kms);
+		sde_debugfs_create_regset32("csc_blk", S_IRUGO,
+				psde->debugfs_root, &psde->debugfs_csc);
+
+		debugfs_create_u32("xin_id",
+				S_IRUGO,
+				psde->debugfs_root,
+				(u32 *) &cfg->xin_id);
+		debugfs_create_u32("clk_ctrl",
+				S_IRUGO,
+				psde->debugfs_root,
+				(u32 *) &cfg->clk_ctrl);
+		debugfs_create_x32("creq_vblank",
+				S_IRUGO | S_IWUSR,
+				psde->debugfs_root,
+				(u32 *) &sblk->creq_vblank);
+		debugfs_create_x32("danger_vblank",
+				S_IRUGO | S_IWUSR,
+				psde->debugfs_root,
+				(u32 *) &sblk->danger_vblank);
+
+		debugfs_create_file("disable_danger",
+				S_IRUGO | S_IWUSR,
+				psde->debugfs_root,
+				kms, &sde_plane_danger_enable);
+
+		break;
+	}
+}
+
+static int _sde_init_phy_plane(struct sde_kms *sde_kms,
+	struct sde_plane *psde,	uint32_t pipe, uint32_t index,
+	struct sde_phy_plane *pp)
+{
+	int rc = 0;
+
+	pp->pipe_hw = sde_rm_get_hw_by_id(&sde_kms->rm,
+		SDE_HW_BLK_SSPP, pipe);
+	if (!pp->pipe_hw) {
+		SDE_ERROR("Not found resource for id=%d\n", pipe);
+		rc = -EINVAL;
+		goto end;
+	} else if (!pp->pipe_hw->cap || !pp->pipe_hw->cap->sblk) {
+		SDE_ERROR("[%u]SSPP returned invalid cfg\n", pipe);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* cache features mask for later */
+	pp->features = pp->pipe_hw->cap->features;
+	pp->pipe_sblk = pp->pipe_hw->cap->sblk;
+	if (!pp->pipe_sblk) {
+		SDE_ERROR("invalid sblk on pipe %d\n", pipe);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (pp->features & BIT(SDE_SSPP_SCALER_QSEED3)) {
+		pp->scaler3_cfg = kzalloc(sizeof(struct sde_hw_scaler3_cfg),
+			GFP_KERNEL);
+		if (!pp->scaler3_cfg) {
+			SDE_ERROR("[%u]failed to allocate scale struct\n",
+				pipe);
+			rc = -ENOMEM;
+			goto end;
+		}
+	}
+
+	/* add plane to DRM framework */
+	pp->nformats = sde_populate_formats(
+				pp->pipe_sblk->format_list,
+				pp->formats,
+				NULL,
+				ARRAY_SIZE(pp->formats));
+
+	if (!pp->nformats) {
+		SDE_ERROR("[%u]no valid formats for plane\n", pipe);
+		if (pp->scaler3_cfg)
+			kzfree(pp->scaler3_cfg);
+
+		rc = -EINVAL;
+		goto end;
+	}
+
+	pp->sde_plane = psde;
+	pp->pipe = pipe;
+	pp->index = index;
+
+end:
+	return rc;
+}
+
+/* initialize plane */
+struct drm_plane *sde_plane_init(struct drm_device *dev,
+		uint32_t pipe, bool primary_plane,
+		unsigned long possible_crtcs, bool vp_enabled)
+{
+	struct drm_plane *plane = NULL;
+	struct sde_plane *psde;
+	struct sde_phy_plane *pp, *n;
+	struct msm_drm_private *priv;
+	struct sde_kms *kms;
+	enum drm_plane_type type;
+	int ret = -EINVAL;
+	struct sde_vp_cfg *vp;
+	struct sde_vp_sub_blks *vp_sub;
+	uint32_t features = 0xFFFFFFFF, nformats = 64, formats[64];
+	uint32_t index = 0;
+
+	if (!dev) {
+		SDE_ERROR("[%u]device is NULL\n", pipe);
+		goto exit;
+	}
+
+	priv = dev->dev_private;
+	if (!priv) {
+		SDE_ERROR("[%u]private data is NULL\n", pipe);
+		goto exit;
+	}
+
+	if (!priv->kms) {
+		SDE_ERROR("[%u]invalid KMS reference\n", pipe);
+		goto exit;
+	}
+	kms = to_sde_kms(priv->kms);
+
+	if (!kms->catalog) {
+		SDE_ERROR("[%u]invalid catalog reference\n", pipe);
+		goto exit;
+	}
+
+	/* create and zero local structure */
+	psde = kzalloc(sizeof(*psde), GFP_KERNEL);
+	if (!psde) {
+		SDE_ERROR("[%u]failed to allocate local plane struct\n", pipe);
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	/* cache local stuff for later */
+	plane = &psde->base;
+
+	INIT_LIST_HEAD(&psde->phy_plane_head);
+
+	/* initialize underlying h/w driver */
+	if (vp_enabled) {
+		vp = &(kms->catalog->vp[pipe]);
+		list_for_each_entry(vp_sub, &vp->sub_blks, pipeid_list) {
+			pp = kzalloc(sizeof(*pp), GFP_KERNEL);
+			if (!pp) {
+				SDE_ERROR("out of memory\n");
+				ret = -ENOMEM;
+				goto clean_plane;
+			}
+
+			ret = _sde_init_phy_plane(kms, psde, vp_sub->sspp_id,
+				index, pp);
+			if (ret) {
+				SDE_ERROR("_sde_init_phy_plane error vp=%d\n",
+					pipe);
+				kfree(pp);
+				ret = -EINVAL;
+				goto clean_plane;
+			}
+			/* Get common features for all pipes */
+			features &= pp->features;
+			if (nformats > pp->nformats) {
+				nformats = pp->nformats;
+				memcpy(formats, pp->formats,
+					sizeof(formats));
+			}
+			list_add_tail(&pp->phy_plane_list,
+							&psde->phy_plane_head);
+			index++;
+			psde->num_of_phy_planes++;
+		}
+	} else {
+		pp = kzalloc(sizeof(*pp), GFP_KERNEL);
+		if (!pp) {
+			SDE_ERROR("out of memory\n");
+			ret = -ENOMEM;
+			goto clean_plane;
+		}
+
+		ret = _sde_init_phy_plane(kms, psde, pipe, index, pp);
+		if (ret) {
+			SDE_ERROR("_sde_init_phy_plane error id=%d\n",
+				pipe);
+			kfree(pp);
+			ret = -EINVAL;
+			goto clean_plane;
+		}
+		features = pp->features;
+		nformats = pp->nformats;
+		memcpy(formats, pp->formats,
+			sizeof(uint32_t) * 64);
+		list_add_tail(&pp->phy_plane_list,
+						&psde->phy_plane_head);
+		psde->num_of_phy_planes++;
+	}
+
+	if (features & BIT(SDE_SSPP_CURSOR))
+		type = DRM_PLANE_TYPE_CURSOR;
+	else if (primary_plane)
+		type = DRM_PLANE_TYPE_PRIMARY;
+	else
+		type = DRM_PLANE_TYPE_OVERLAY;
+	ret = drm_universal_plane_init(dev, plane, possible_crtcs,
+			&sde_plane_funcs, formats, nformats, type);
+	if (ret)
+		goto clean_plane;
+
+	/* success! finalize initialization */
+	drm_plane_helper_add(plane, &sde_plane_helper_funcs);
+
+	msm_property_init(&psde->property_info, &plane->base, dev,
+			priv->plane_property, psde->property_data,
+			PLANE_PROP_COUNT, PLANE_PROP_BLOBCOUNT,
+			sizeof(struct sde_plane_state));
+
+	_sde_plane_install_properties(plane, kms->catalog);
+
+	/* save user friendly pipe name for later */
+	snprintf(psde->pipe_name, SDE_NAME_SIZE, "plane%u", plane->base.id);
+
+	mutex_init(&psde->lock);
+
+	_sde_plane_init_debugfs(psde, kms);
+
+	DRM_INFO("%s created for pipe %u\n", psde->pipe_name, pipe);
+	return plane;
+
+clean_plane:
+	if (psde) {
+		list_for_each_entry_safe(pp, n,
+			&psde->phy_plane_head, phy_plane_list) {
+			if (pp->pipe_hw)
+				sde_hw_sspp_destroy(pp->pipe_hw);
+
+			kfree(pp->scaler3_cfg);
+			list_del(&pp->phy_plane_list);
+			kfree(pp);
+		}
+		kfree(psde);
+	}
+
+exit:
+	return ERR_PTR(ret);
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_plane.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_plane.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_plane.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_plane.h	2019-01-22 16:16:23.523246588 +0100
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _SDE_PLANE_H_
+#define _SDE_PLANE_H_
+
+#include <drm/drm_crtc.h>
+
+#include "msm_prop.h"
+#include "sde_hw_mdss.h"
+
+/**
+ * struct sde_plane_state: Define sde extension of drm plane state object
+ * @base:	base drm plane state object
+ * @property_values:	cached plane property values
+ * @property_blobs:	blob properties
+ * @input_fence:	dereferenced input fence pointer
+ * @stage:	assigned by crtc blender
+ * @dirty:	bitmask for which pipe h/w config functions need to be updated
+ * @pending:	whether the current update is still pending
+ */
+struct sde_plane_state {
+	struct drm_plane_state base;
+	uint64_t property_values[PLANE_PROP_COUNT];
+	struct drm_property_blob *property_blobs[PLANE_PROP_BLOBCOUNT];
+	void *input_fence;
+	enum sde_stage stage;
+	uint32_t dirty;
+	bool pending;
+};
+
+#define to_sde_plane_state(x) \
+	container_of(x, struct sde_plane_state, base)
+
+/**
+ * sde_plane_get_property - Query integer value of plane property
+ * @S: Pointer to plane state
+ * @X: Property index, from enum msm_mdp_plane_property
+ * Returns: Integer value of requested property
+ */
+#define sde_plane_get_property(S, X) \
+	((S) && ((X) < PLANE_PROP_COUNT) ? ((S)->property_values[(X)]) : 0)
+
+/**
+ * sde_plane_pipe - return sspp identifier for the given plane
+ * @plane:   Pointer to DRM plane object
+ * @index:   Plane index
+ * Returns: sspp identifier of the given plane
+ */
+enum sde_sspp sde_plane_pipe(struct drm_plane *plane, uint32_t index);
+
+/**
+ * sde_plane_flush - final plane operations before commit flush
+ * @plane: Pointer to drm plane structure
+ */
+void sde_plane_flush(struct drm_plane *plane);
+
+/**
+ * sde_plane_init - create new sde plane for the given pipe
+ * @dev:   Pointer to DRM device
+ * @pipe:  sde hardware pipe identifier
+ * @primary_plane: true if this pipe is primary plane for crtc
+ * @possible_crtcs: bitmask of crtc that can be attached to the given pipe
+ * @vp_enabled:  Flag indicating if virtual planes enabled
+ */
+struct drm_plane *sde_plane_init(struct drm_device *dev,
+		uint32_t pipe, bool primary_plane,
+		unsigned long possible_crtcs, bool vp_enabled);
+
+/**
+ * sde_plane_wait_input_fence - wait for input fence object
+ * @plane:   Pointer to DRM plane object
+ * @wait_ms: Wait timeout value
+ * Returns: Zero on success
+ */
+int sde_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms);
+
+/**
+ * sde_plane_color_fill - enables color fill on plane
+ * @plane:  Pointer to DRM plane object
+ * @color:  RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
+ * @alpha:  8-bit fill alpha value, 255 selects 100% alpha
+ * Returns: 0 on success
+ */
+int sde_plane_color_fill(struct drm_plane *plane,
+		uint32_t color, uint32_t alpha);
+
+#endif /* _SDE_PLANE_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_rm.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_rm.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_rm.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_rm.c	2019-01-22 16:16:23.523246588 +0100
@@ -0,0 +1,1354 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm:%s] " fmt, __func__
+#include "sde_kms.h"
+#include "sde_hw_lm.h"
+#include "sde_hw_ctl.h"
+#include "sde_hw_cdm.h"
+#include "sde_hw_dspp.h"
+#include "sde_hw_pingpong.h"
+#include "sde_hw_intf.h"
+#include "sde_hw_wb.h"
+#include "sde_encoder.h"
+#include "sde_connector.h"
+#include "sde_hw_sspp.h"
+
+#define RESERVED_BY_OTHER(h, r) \
+	((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id))
+
+#define RM_RQ_LOCK(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_RESERVE_LOCK))
+#define RM_RQ_CLEAR(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_RESERVE_CLEAR))
+#define RM_RQ_DSPP(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_DSPP))
+#define RM_RQ_PPSPLIT(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_PPSPLIT))
+#define RM_RQ_FORCE_TILING(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_FORCE_TILING))
+
+/**
+ * struct sde_rm_requirements - Reservation requirements parameter bundle
+ * @top_name:	DRM<->HW topology use case user is trying to enable
+ * @dspp:	Whether the user requires a DSPP
+ * @num_lm:	Number of layer mixers needed in the use case
+ * @hw_res:	Hardware resources required as reported by the encoders
+ */
+struct sde_rm_requirements {
+	enum sde_rm_topology_name top_name;
+	uint64_t top_ctrl;
+	int num_lm;
+	int num_ctl;
+	bool needs_split_display;
+	struct sde_encoder_hw_resources hw_res;
+};
+
+/**
+ * struct sde_rm_rsvp - Use Case Reservation tagging structure
+ *	Used to tag HW blocks as reserved by a CRTC->Encoder->Connector chain
+ *	By using as a tag, rather than lists of pointers to HW blocks used
+ *	we can avoid some list management since we don't know how many blocks
+ *	of each type a given use case may require.
+ * @list:	List head for list of all reservations
+ * @seq:	Global RSVP sequence number for debugging, especially for
+ *		differentiating differenct allocations for same encoder.
+ * @enc_id:	Reservations are tracked by Encoder DRM object ID.
+ *		CRTCs may be connected to multiple Encoders.
+ *		An encoder or connector id identifies the display path.
+ * @topology	DRM<->HW topology use case
+ */
+struct sde_rm_rsvp {
+	struct list_head list;
+	uint32_t seq;
+	uint32_t enc_id;
+	enum sde_rm_topology_name topology;
+};
+
+/**
+ * struct sde_rm_hw_blk - hardware block tracking list member
+ * @list:	List head for list of all hardware blocks tracking items
+ * @rsvp:	Pointer to use case reservation if reserved by a client
+ * @rsvp_nxt:	Temporary pointer used during reservation to the incoming
+ *		request. Will be swapped into rsvp if proposal is accepted
+ * @type:	Type of hardware block this structure tracks
+ * @id:		Hardware ID number, within it's own space, ie. LM_X
+ * @catalog:	Pointer to the hardware catalog entry for this block
+ * @hw:		Pointer to the hardware register access object for this block
+ */
+struct sde_rm_hw_blk {
+	struct list_head list;
+	struct sde_rm_rsvp *rsvp;
+	struct sde_rm_rsvp *rsvp_nxt;
+	enum sde_hw_blk_type type;
+	const char *type_name;
+	uint32_t id;
+	void *catalog;
+	void *hw;
+};
+
+/**
+ * sde_rm_dbg_rsvp_stage - enum of steps in making reservation for event logging
+ */
+enum sde_rm_dbg_rsvp_stage {
+	SDE_RM_STAGE_BEGIN,
+	SDE_RM_STAGE_AFTER_CLEAR,
+	SDE_RM_STAGE_AFTER_RSVPNEXT,
+	SDE_RM_STAGE_FINAL
+};
+
+static void _sde_rm_print_rsvps(
+		struct sde_rm *rm,
+		enum sde_rm_dbg_rsvp_stage stage)
+{
+	struct sde_rm_rsvp *rsvp;
+	struct sde_rm_hw_blk *blk;
+	enum sde_hw_blk_type type;
+
+	SDE_DEBUG("%d\n", stage);
+
+	list_for_each_entry(rsvp, &rm->rsvps, list) {
+		SDE_DEBUG("%d rsvp[s%ue%u] topology %d\n", stage, rsvp->seq,
+				rsvp->enc_id, rsvp->topology);
+		SDE_EVT32(stage, rsvp->seq, rsvp->enc_id, rsvp->topology);
+	}
+
+	for (type = 0; type < SDE_HW_BLK_MAX; type++) {
+		list_for_each_entry(blk, &rm->hw_blks[type], list) {
+			if (!blk->rsvp && !blk->rsvp_nxt)
+				continue;
+
+			SDE_DEBUG("%d rsvp[s%ue%u->s%ue%u] %s %d\n", stage,
+				(blk->rsvp) ? blk->rsvp->seq : 0,
+				(blk->rsvp) ? blk->rsvp->enc_id : 0,
+				(blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0,
+				(blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0,
+				blk->type_name, blk->id);
+
+			SDE_EVT32(stage,
+				(blk->rsvp) ? blk->rsvp->seq : 0,
+				(blk->rsvp) ? blk->rsvp->enc_id : 0,
+				(blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0,
+				(blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0,
+				blk->type, blk->id);
+		}
+	}
+}
+
+struct sde_hw_mdp *sde_rm_get_mdp(struct sde_rm *rm)
+{
+	return rm->hw_mdp;
+}
+
+void sde_rm_init_hw_iter(
+		struct sde_rm_hw_iter *iter,
+		uint32_t enc_id,
+		enum sde_hw_blk_type type)
+{
+	memset(iter, 0, sizeof(*iter));
+	iter->enc_id = enc_id;
+	iter->type = type;
+}
+
+static bool _sde_rm_get_hw_locked(struct sde_rm *rm, struct sde_rm_hw_iter *i)
+{
+	struct list_head *blk_list;
+
+	if (!rm || !i || i->type >= SDE_HW_BLK_MAX) {
+		SDE_ERROR("invalid rm\n");
+		return false;
+	}
+
+	i->hw = NULL;
+	blk_list = &rm->hw_blks[i->type];
+
+	if (i->blk && (&i->blk->list == blk_list)) {
+		SDE_ERROR("attempt resume iteration past last\n");
+		return false;
+	}
+
+	i->blk = list_prepare_entry(i->blk, blk_list, list);
+
+	list_for_each_entry_continue(i->blk, blk_list, list) {
+		struct sde_rm_rsvp *rsvp = i->blk->rsvp;
+
+		if (i->blk->type != i->type) {
+			SDE_ERROR("found incorrect block type %d on %d list\n",
+					i->blk->type, i->type);
+			return false;
+		}
+
+		if ((i->enc_id == 0) || (rsvp && rsvp->enc_id == i->enc_id)) {
+			i->hw = i->blk->hw;
+			SDE_DEBUG("found type %d %s id %d for enc %d\n",
+					i->type, i->blk->type_name, i->blk->id,
+					i->enc_id);
+			return true;
+		}
+	}
+
+	SDE_DEBUG("no match, type %d for enc %d\n", i->type, i->enc_id);
+
+	return false;
+}
+
+bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *i)
+{
+	bool ret;
+
+	mutex_lock(&rm->rm_lock);
+	ret = _sde_rm_get_hw_locked(rm, i);
+	mutex_unlock(&rm->rm_lock);
+
+	return ret;
+}
+
+static void *_sde_rm_get_hw_by_id_locked(
+		struct sde_rm *rm,
+		enum sde_hw_blk_type type,
+		int id)
+{
+	struct list_head *blk_list;
+	struct sde_rm_hw_blk *blk;
+	void *hw = NULL;
+
+	if (!rm || type >= SDE_HW_BLK_MAX) {
+		SDE_ERROR("invalid rm\n");
+		return hw;
+	}
+
+	blk_list = &rm->hw_blks[type];
+
+	list_for_each_entry(blk, blk_list, list) {
+		if (blk->id == id) {
+			hw = blk->hw;
+			SDE_DEBUG("found type %d %s id %d\n",
+					type, blk->type_name, blk->id);
+			return hw;
+		}
+	}
+
+	SDE_DEBUG("no match, type %d id=%d\n", type, id);
+
+	return hw;
+}
+
+void *sde_rm_get_hw_by_id(struct sde_rm *rm, enum sde_hw_blk_type type, int id)
+{
+	void *ret = NULL;
+
+	mutex_lock(&rm->rm_lock);
+	ret = _sde_rm_get_hw_by_id_locked(rm, type, id);
+	mutex_unlock(&rm->rm_lock);
+
+	return ret;
+}
+
+static void _sde_rm_hw_destroy(enum sde_hw_blk_type type, void *hw)
+{
+	switch (type) {
+	case SDE_HW_BLK_LM:
+		sde_hw_lm_destroy(hw);
+		break;
+	case SDE_HW_BLK_DSPP:
+		sde_hw_dspp_destroy(hw);
+		break;
+	case SDE_HW_BLK_CTL:
+		sde_hw_ctl_destroy(hw);
+		break;
+	case SDE_HW_BLK_CDM:
+		sde_hw_cdm_destroy(hw);
+		break;
+	case SDE_HW_BLK_PINGPONG:
+		sde_hw_pingpong_destroy(hw);
+		break;
+	case SDE_HW_BLK_INTF:
+		sde_hw_intf_destroy(hw);
+		break;
+	case SDE_HW_BLK_WB:
+		sde_hw_wb_destroy(hw);
+		break;
+	case SDE_HW_BLK_SSPP:
+		sde_hw_sspp_destroy(hw);
+		break;
+	case SDE_HW_BLK_TOP:
+		/* Top is a singleton, not managed in hw_blks list */
+	case SDE_HW_BLK_MAX:
+	default:
+		SDE_ERROR("unsupported block type %d\n", type);
+		break;
+	}
+}
+
+int sde_rm_destroy(struct sde_rm *rm)
+{
+
+	struct sde_rm_rsvp *rsvp_cur, *rsvp_nxt;
+	struct sde_rm_hw_blk *hw_cur, *hw_nxt;
+	enum sde_hw_blk_type type;
+
+	if (!rm) {
+		SDE_ERROR("invalid rm\n");
+		return -EINVAL;
+	}
+
+	list_for_each_entry_safe(rsvp_cur, rsvp_nxt, &rm->rsvps, list) {
+		list_del(&rsvp_cur->list);
+		kfree(rsvp_cur);
+	}
+
+
+	for (type = 0; type < SDE_HW_BLK_MAX; type++) {
+		list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type],
+				list) {
+			list_del(&hw_cur->list);
+			_sde_rm_hw_destroy(hw_cur->type, hw_cur->hw);
+			kfree(hw_cur);
+		}
+	}
+
+	sde_hw_mdp_destroy(rm->hw_mdp);
+	rm->hw_mdp = NULL;
+
+	mutex_destroy(&rm->rm_lock);
+
+	return 0;
+}
+
+static int _sde_rm_hw_blk_create(
+		struct sde_rm *rm,
+		struct sde_mdss_cfg *cat,
+		void *mmio,
+		enum sde_hw_blk_type type,
+		uint32_t id,
+		void *hw_catalog_info)
+{
+	struct sde_rm_hw_blk *blk;
+	struct sde_hw_mdp *hw_mdp;
+	const char *name;
+	void *hw;
+
+	hw_mdp = rm->hw_mdp;
+
+	switch (type) {
+	case SDE_HW_BLK_LM:
+		hw = sde_hw_lm_init(id, mmio, cat);
+		name = "lm";
+		break;
+	case SDE_HW_BLK_DSPP:
+		hw = sde_hw_dspp_init(id, mmio, cat);
+		name = "dspp";
+		break;
+	case SDE_HW_BLK_CTL:
+		hw = sde_hw_ctl_init(id, mmio, cat);
+		name = "ctl";
+		break;
+	case SDE_HW_BLK_CDM:
+		hw = sde_hw_cdm_init(id, mmio, cat, hw_mdp);
+		name = "cdm";
+		break;
+	case SDE_HW_BLK_PINGPONG:
+		hw = sde_hw_pingpong_init(id, mmio, cat);
+		name = "pp";
+		break;
+	case SDE_HW_BLK_INTF:
+		hw = sde_hw_intf_init(id, mmio, cat);
+		name = "intf";
+		break;
+	case SDE_HW_BLK_WB:
+		hw = sde_hw_wb_init(id, mmio, cat, hw_mdp);
+		name = "wb";
+		break;
+	case SDE_HW_BLK_SSPP:
+		hw = sde_hw_sspp_init(id, (void __iomem *)mmio, cat);
+		name = "sspp";
+		break;
+	case SDE_HW_BLK_TOP:
+		/* Top is a singleton, not managed in hw_blks list */
+	case SDE_HW_BLK_MAX:
+	default:
+		SDE_ERROR("unsupported block type %d\n", type);
+		return -EINVAL;
+	}
+
+	if (IS_ERR_OR_NULL(hw)) {
+		SDE_ERROR("failed hw object creation: type %d, err %ld\n",
+				type, PTR_ERR(hw));
+		return -EFAULT;
+	}
+
+	blk = kzalloc(sizeof(*blk), GFP_KERNEL);
+	if (!blk) {
+		_sde_rm_hw_destroy(type, hw);
+		return -ENOMEM;
+	}
+
+	blk->type_name = name;
+	blk->type = type;
+	blk->id = id;
+	blk->catalog = hw_catalog_info;
+	blk->hw = hw;
+	list_add_tail(&blk->list, &rm->hw_blks[type]);
+
+	return 0;
+}
+
+int sde_rm_init(struct sde_rm *rm,
+		struct sde_mdss_cfg *cat,
+		void *mmio,
+		struct drm_device *dev)
+{
+	int rc, i;
+	enum sde_hw_blk_type type;
+
+	if (!rm || !cat || !mmio || !dev) {
+		SDE_ERROR("invalid kms\n");
+		return -EINVAL;
+	}
+
+	/* Clear, setup lists */
+	memset(rm, 0, sizeof(*rm));
+
+	mutex_init(&rm->rm_lock);
+
+	INIT_LIST_HEAD(&rm->rsvps);
+	for (type = 0; type < SDE_HW_BLK_MAX; type++)
+		INIT_LIST_HEAD(&rm->hw_blks[type]);
+
+	/* Some of the sub-blocks require an mdptop to be created */
+	rm->hw_mdp = sde_hw_mdptop_init(MDP_TOP, mmio, cat);
+	if (IS_ERR_OR_NULL(rm->hw_mdp)) {
+		rc = PTR_ERR(rm->hw_mdp);
+		rm->hw_mdp = NULL;
+		SDE_ERROR("failed: mdp hw not available\n");
+		goto fail;
+	}
+
+	for (i = 0; i < cat->sspp_count; i++) {
+		rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_SSPP,
+				cat->sspp[i].id, &cat->sspp[i]);
+		if (rc)
+			goto fail;
+	}
+
+	/* Interrogate HW catalog and create tracking items for hw blocks */
+	for (i = 0; i < cat->mixer_count; i++) {
+		struct sde_lm_cfg *lm = &cat->mixer[i];
+
+		if (lm->pingpong == PINGPONG_MAX) {
+			SDE_DEBUG("skip mixer %d without pingpong\n", lm->id);
+			continue;
+		}
+
+		rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_LM,
+				cat->mixer[i].id, &cat->mixer[i]);
+		if (rc) {
+			SDE_ERROR("failed: lm hw not available\n");
+			goto fail;
+		}
+
+		if (!rm->lm_max_width) {
+			rm->lm_max_width = lm->sblk->maxwidth;
+		} else if (rm->lm_max_width != lm->sblk->maxwidth) {
+			/*
+			 * Don't expect to have hw where lm max widths differ.
+			 * If found, take the min.
+			 */
+			SDE_ERROR("unsupported: lm maxwidth differs\n");
+			if (rm->lm_max_width > lm->sblk->maxwidth)
+				rm->lm_max_width = lm->sblk->maxwidth;
+		}
+	}
+
+	for (i = 0; i < cat->dspp_count; i++) {
+		rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_DSPP,
+				cat->dspp[i].id, &cat->dspp[i]);
+		if (rc) {
+			SDE_ERROR("failed: dspp hw not available\n");
+			goto fail;
+		}
+	}
+
+	for (i = 0; i < cat->pingpong_count; i++) {
+		rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_PINGPONG,
+				cat->pingpong[i].id, &cat->pingpong[i]);
+		if (rc) {
+			SDE_ERROR("failed: pp hw not available\n");
+			goto fail;
+		}
+	}
+
+	for (i = 0; i < cat->intf_count; i++) {
+		if (cat->intf[i].type == INTF_NONE) {
+			SDE_DEBUG("skip intf %d with type none\n", i);
+			continue;
+		}
+
+		rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_INTF,
+				cat->intf[i].id, &cat->intf[i]);
+		if (rc) {
+			SDE_ERROR("failed: intf hw not available\n");
+			goto fail;
+		}
+	}
+
+	for (i = 0; i < cat->wb_count; i++) {
+		rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_WB,
+				cat->wb[i].id, &cat->wb[i]);
+		if (rc) {
+			SDE_ERROR("failed: wb hw not available\n");
+			goto fail;
+		}
+	}
+
+	for (i = 0; i < cat->ctl_count; i++) {
+		rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_CTL,
+				cat->ctl[i].id, &cat->ctl[i]);
+		if (rc) {
+			SDE_ERROR("failed: ctl hw not available\n");
+			goto fail;
+		}
+	}
+
+	for (i = 0; i < cat->cdm_count; i++) {
+		rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_CDM,
+				cat->cdm[i].id, &cat->cdm[i]);
+		if (rc) {
+			SDE_ERROR("failed: cdm hw not available\n");
+			goto fail;
+		}
+	}
+
+	return 0;
+
+fail:
+	sde_rm_destroy(rm);
+
+	return rc;
+}
+
+/**
+ * _sde_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
+ *	proposed use case requirements, incl. hardwired dependent blocks like
+ *	pingpong, and dspp.
+ * @rm: sde resource manager handle
+ * @rsvp: reservation currently being created
+ * @reqs: proposed use case requirements
+ * @lm: proposed layer mixer, function checks if lm, and all other hardwired
+ *      blocks connected to the lm (pp, dspp) are available and appropriate
+ * @dspp: output parameter, dspp block attached to the layer mixer.
+ *        NULL if dspp was not available, or not matching requirements.
+ * @pp: output parameter, pingpong block attached to the layer mixer.
+ *      NULL if dspp was not available, or not matching requirements.
+ * @primary_lm: if non-null, this function check if lm is compatible primary_lm
+ *              as well as satisfying all other requirements
+ * @Return: true if lm matches all requirements, false otherwise
+ */
+static bool _sde_rm_check_lm_and_get_connected_blks(
+		struct sde_rm *rm,
+		struct sde_rm_rsvp *rsvp,
+		struct sde_rm_requirements *reqs,
+		struct sde_rm_hw_blk *lm,
+		struct sde_rm_hw_blk **dspp,
+		struct sde_rm_hw_blk **pp,
+		struct sde_rm_hw_blk *primary_lm)
+{
+	struct sde_lm_cfg *lm_cfg = (struct sde_lm_cfg *)lm->catalog;
+	struct sde_pingpong_cfg *pp_cfg;
+	struct sde_rm_hw_iter iter;
+
+	*dspp = NULL;
+	*pp = NULL;
+
+	SDE_DEBUG("check lm %d: dspp %d pp %d\n", lm_cfg->id, lm_cfg->dspp,
+			lm_cfg->pingpong);
+
+	/* Check if this layer mixer is a peer of the proposed primary LM */
+	if (primary_lm) {
+		struct sde_lm_cfg *prim_lm_cfg =
+				(struct sde_lm_cfg *)primary_lm->catalog;
+
+		if (!test_bit(lm_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
+			SDE_DEBUG("lm %d not peer of lm %d\n", lm_cfg->id,
+					prim_lm_cfg->id);
+			return false;
+		}
+	}
+
+	/* Matches user requirements? */
+	if ((RM_RQ_DSPP(reqs) && lm_cfg->dspp == DSPP_MAX) ||
+			(!RM_RQ_DSPP(reqs) && lm_cfg->dspp != DSPP_MAX)) {
+		SDE_DEBUG("dspp req mismatch lm %d reqdspp %d, lm->dspp %d\n",
+				lm_cfg->id, (bool)(RM_RQ_DSPP(reqs)),
+				lm_cfg->dspp);
+		return false;
+	}
+
+	/* Already reserved? */
+	if (RESERVED_BY_OTHER(lm, rsvp)) {
+		SDE_DEBUG("lm %d already reserved\n", lm_cfg->id);
+		return false;
+	}
+
+	if (lm_cfg->dspp != DSPP_MAX) {
+		sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_DSPP);
+		while (_sde_rm_get_hw_locked(rm, &iter)) {
+			if (iter.blk->id == lm_cfg->dspp) {
+				*dspp = iter.blk;
+				break;
+			}
+		}
+
+		if (!*dspp) {
+			SDE_DEBUG("lm %d failed to retrieve dspp %d\n", lm->id,
+					lm_cfg->dspp);
+			return false;
+		}
+
+		if (RESERVED_BY_OTHER(*dspp, rsvp)) {
+			SDE_DEBUG("lm %d dspp %d already reserved\n",
+					lm->id, (*dspp)->id);
+			return false;
+		}
+	}
+
+	sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_PINGPONG);
+	while (_sde_rm_get_hw_locked(rm, &iter)) {
+		if (iter.blk->id == lm_cfg->pingpong) {
+			*pp = iter.blk;
+			break;
+		}
+	}
+
+	if (!*pp) {
+		SDE_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
+		return false;
+	}
+
+	if (RESERVED_BY_OTHER(*pp, rsvp)) {
+		SDE_DEBUG("lm %d pp %d already reserved\n", lm->id,
+				(*pp)->id);
+		*dspp = NULL;
+		return false;
+	}
+
+	pp_cfg = (struct sde_pingpong_cfg *)((*pp)->catalog);
+	if ((reqs->top_name == SDE_RM_TOPOLOGY_PPSPLIT) &&
+			!(test_bit(SDE_PINGPONG_SPLIT, &pp_cfg->features))) {
+		SDE_DEBUG("pp %d doesn't support ppsplit\n", pp_cfg->id);
+		*dspp = NULL;
+		return false;
+	}
+
+	return true;
+}
+
+static int _sde_rm_reserve_lms(
+		struct sde_rm *rm,
+		struct sde_rm_rsvp *rsvp,
+		struct sde_rm_requirements *reqs)
+
+{
+	struct sde_rm_hw_blk *lm[MAX_BLOCKS];
+	struct sde_rm_hw_blk *dspp[MAX_BLOCKS];
+	struct sde_rm_hw_blk *pp[MAX_BLOCKS];
+	struct sde_rm_hw_iter iter_i, iter_j;
+	int lm_count = 0;
+	int i, rc = 0;
+
+	if (!reqs->num_lm) {
+		SDE_ERROR("invalid number of lm: %d\n", reqs->num_lm);
+		return -EINVAL;
+	}
+
+	/* Find a primary mixer */
+	sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_LM);
+	while (lm_count != reqs->num_lm &&
+			_sde_rm_get_hw_locked(rm, &iter_i)) {
+		memset(&lm, 0, sizeof(lm));
+		memset(&dspp, 0, sizeof(dspp));
+		memset(&pp, 0, sizeof(pp));
+
+		lm_count = 0;
+		lm[lm_count] = iter_i.blk;
+
+		if (!_sde_rm_check_lm_and_get_connected_blks(rm, rsvp, reqs,
+				lm[lm_count], &dspp[lm_count], &pp[lm_count],
+				NULL))
+			continue;
+
+		++lm_count;
+
+		/* Valid primary mixer found, find matching peers */
+		sde_rm_init_hw_iter(&iter_j, 0, SDE_HW_BLK_LM);
+
+		while (lm_count != reqs->num_lm &&
+				_sde_rm_get_hw_locked(rm, &iter_j)) {
+			if (iter_i.blk == iter_j.blk)
+				continue;
+
+			if (!_sde_rm_check_lm_and_get_connected_blks(rm, rsvp,
+					reqs, iter_j.blk, &dspp[lm_count],
+					&pp[lm_count], iter_i.blk))
+				continue;
+
+			lm[lm_count] = iter_j.blk;
+			++lm_count;
+		}
+	}
+
+	if (lm_count != reqs->num_lm) {
+		SDE_DEBUG("unable to find appropriate mixers\n");
+		return -ENAVAIL;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(lm); i++) {
+		if (!lm[i])
+			break;
+
+		lm[i]->rsvp_nxt = rsvp;
+		pp[i]->rsvp_nxt = rsvp;
+		if (dspp[i])
+			dspp[i]->rsvp_nxt = rsvp;
+
+		SDE_EVT32(lm[i]->type, rsvp->enc_id, lm[i]->id, pp[i]->id,
+				dspp[i] ? dspp[i]->id : 0);
+	}
+
+	if (reqs->top_name == SDE_RM_TOPOLOGY_PPSPLIT) {
+		/* reserve a free PINGPONG_SLAVE block */
+		rc = -ENAVAIL;
+		sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_PINGPONG);
+		while (_sde_rm_get_hw_locked(rm, &iter_i)) {
+			struct sde_pingpong_cfg *pp_cfg =
+				(struct sde_pingpong_cfg *)
+				(iter_i.blk->catalog);
+
+			if (!(test_bit(SDE_PINGPONG_SLAVE, &pp_cfg->features)))
+				continue;
+			if (RESERVED_BY_OTHER(iter_i.blk, rsvp))
+				continue;
+
+			iter_i.blk->rsvp_nxt = rsvp;
+			rc = 0;
+			break;
+		}
+	}
+
+	return rc;
+}
+
+static int _sde_rm_reserve_ctls(
+		struct sde_rm *rm,
+		struct sde_rm_rsvp *rsvp,
+		struct sde_rm_requirements *reqs)
+{
+	struct sde_rm_hw_blk *ctls[MAX_BLOCKS];
+	struct sde_rm_hw_iter iter;
+	int i = 0;
+
+	memset(&ctls, 0, sizeof(ctls));
+
+	sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_CTL);
+	while (_sde_rm_get_hw_locked(rm, &iter)) {
+		unsigned long caps;
+		bool has_split_display, has_ppsplit;
+
+		if (RESERVED_BY_OTHER(iter.blk, rsvp))
+			continue;
+
+		caps = ((struct sde_ctl_cfg *)iter.blk->catalog)->features;
+		has_split_display = BIT(SDE_CTL_SPLIT_DISPLAY) & caps;
+		has_ppsplit = BIT(SDE_CTL_PINGPONG_SPLIT) & caps;
+
+		SDE_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, caps);
+
+		if (reqs->needs_split_display != has_split_display)
+			continue;
+
+		if (reqs->top_name == SDE_RM_TOPOLOGY_PPSPLIT && !has_ppsplit)
+			continue;
+
+		ctls[i] = iter.blk;
+		SDE_DEBUG("ctl %d match\n", iter.blk->id);
+
+		if (++i == reqs->num_ctl)
+			break;
+	}
+
+	if (i != reqs->num_ctl)
+		return -ENAVAIL;
+
+	for (i = 0; i < ARRAY_SIZE(ctls) && i < reqs->num_ctl; i++) {
+		ctls[i]->rsvp_nxt = rsvp;
+		SDE_EVT32(ctls[i]->type, rsvp->enc_id, ctls[i]->id);
+	}
+
+	return 0;
+}
+
+static int _sde_rm_reserve_cdm(
+		struct sde_rm *rm,
+		struct sde_rm_rsvp *rsvp,
+		uint32_t id,
+		enum sde_hw_blk_type type)
+{
+	struct sde_rm_hw_iter iter;
+	struct sde_cdm_cfg *cdm;
+
+	sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_CDM);
+	while (_sde_rm_get_hw_locked(rm, &iter)) {
+		bool match = false;
+
+		if (RESERVED_BY_OTHER(iter.blk, rsvp))
+			continue;
+
+		cdm = (struct sde_cdm_cfg *)(iter.blk->catalog);
+
+		if (type == SDE_HW_BLK_INTF && id != INTF_MAX)
+			match = test_bit(id, &cdm->intf_connect);
+		else if (type == SDE_HW_BLK_WB && id != WB_MAX)
+			match = test_bit(id, &cdm->wb_connect);
+
+		SDE_DEBUG("type %d id %d, cdm intfs %lu wbs %lu match %d\n",
+				type, id, cdm->intf_connect, cdm->wb_connect,
+				match);
+
+		if (!match)
+			continue;
+
+		iter.blk->rsvp_nxt = rsvp;
+		SDE_EVT32(iter.blk->type, rsvp->enc_id, iter.blk->id);
+		break;
+	}
+
+	if (!iter.hw) {
+		SDE_ERROR("couldn't reserve cdm for type %d id %d\n", type, id);
+		return -ENAVAIL;
+	}
+
+	return 0;
+}
+
+static int _sde_rm_reserve_intf_or_wb(
+		struct sde_rm *rm,
+		struct sde_rm_rsvp *rsvp,
+		uint32_t id,
+		enum sde_hw_blk_type type,
+		bool needs_cdm)
+{
+	struct sde_rm_hw_iter iter;
+	int ret = 0;
+
+	/* Find the block entry in the rm, and note the reservation */
+	sde_rm_init_hw_iter(&iter, 0, type);
+	while (_sde_rm_get_hw_locked(rm, &iter)) {
+		if (iter.blk->id != id)
+			continue;
+
+		if (RESERVED_BY_OTHER(iter.blk, rsvp)) {
+			SDE_ERROR("type %d id %d already reserved\n", type, id);
+			return -ENAVAIL;
+		}
+
+		iter.blk->rsvp_nxt = rsvp;
+		SDE_EVT32(iter.blk->type, rsvp->enc_id, iter.blk->id);
+		break;
+	}
+
+	/* Shouldn't happen since wbs / intfs are fixed at probe */
+	if (!iter.hw) {
+		SDE_ERROR("couldn't find type %d id %d\n", type, id);
+		return -EINVAL;
+	}
+
+	/* Expected only one intf or wb will request cdm */
+	if (needs_cdm)
+		ret = _sde_rm_reserve_cdm(rm, rsvp, id, type);
+
+	return ret;
+}
+
+static int _sde_rm_reserve_intf_related_hw(
+		struct sde_rm *rm,
+		struct sde_rm_rsvp *rsvp,
+		struct sde_encoder_hw_resources *hw_res)
+{
+	int i, ret = 0;
+	u32 id;
+
+	for (i = 0; i < ARRAY_SIZE(hw_res->intfs); i++) {
+		if (hw_res->intfs[i] == INTF_MODE_NONE)
+			continue;
+		id = i + INTF_0;
+		ret = _sde_rm_reserve_intf_or_wb(rm, rsvp, id,
+				SDE_HW_BLK_INTF, hw_res->needs_cdm);
+		if (ret)
+			return ret;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(hw_res->wbs); i++) {
+		if (hw_res->wbs[i] == INTF_MODE_NONE)
+			continue;
+		id = i + WB_0;
+		ret = _sde_rm_reserve_intf_or_wb(rm, rsvp, id,
+				SDE_HW_BLK_WB, hw_res->needs_cdm);
+		if (ret)
+			return ret;
+	}
+
+	return ret;
+}
+
+static int _sde_rm_make_next_rsvp(
+		struct sde_rm *rm,
+		struct drm_encoder *enc,
+		struct drm_crtc_state *crtc_state,
+		struct drm_connector_state *conn_state,
+		struct sde_rm_rsvp *rsvp,
+		struct sde_rm_requirements *reqs)
+{
+	int ret;
+
+	/* Create reservation info, tag reserved blocks with it as we go */
+	rsvp->seq = ++rm->rsvp_next_seq;
+	rsvp->enc_id = enc->base.id;
+	rsvp->topology = reqs->top_name;
+	list_add_tail(&rsvp->list, &rm->rsvps);
+
+	/*
+	 * Assign LMs and blocks whose usage is tied to them: DSPP & Pingpong.
+	 * Do assignment preferring to give away low-resource mixers first:
+	 * - Check mixers without DSPPs
+	 * - Only then allow to grab from mixers with DSPP capability
+	 */
+	ret = _sde_rm_reserve_lms(rm, rsvp, reqs);
+	if (ret && !RM_RQ_DSPP(reqs)) {
+		reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DSPP);
+		ret = _sde_rm_reserve_lms(rm, rsvp, reqs);
+	}
+
+	if (ret) {
+		SDE_ERROR("unable to find appropriate mixers\n");
+		return ret;
+	}
+
+	/*
+	 * Do assignment preferring to give away low-resource CTLs first:
+	 * - Check mixers without Split Display
+	 * - Only then allow to grab from CTLs with split display capability
+	 */
+	_sde_rm_reserve_ctls(rm, rsvp, reqs);
+	if (ret && !reqs->needs_split_display) {
+		reqs->needs_split_display = true;
+		_sde_rm_reserve_ctls(rm, rsvp, reqs);
+	}
+	if (ret) {
+		SDE_ERROR("unable to find appropriate CTL\n");
+		return ret;
+	}
+
+	/* Assign INTFs, WBs, and blks whose usage is tied to them: CTL & CDM */
+	ret = _sde_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res);
+	if (ret)
+		return ret;
+
+	return ret;
+}
+
+static int _sde_rm_populate_requirements(
+		struct sde_rm *rm,
+		struct drm_encoder *enc,
+		struct drm_crtc_state *crtc_state,
+		struct drm_connector_state *conn_state,
+		struct sde_rm_requirements *reqs)
+{
+	const struct drm_display_mode *mode = &crtc_state->mode;
+
+	/**
+	 * DRM<->HW Topologies
+	 *
+	 * Name: SINGLEPIPE
+	 * Description: 1 LM, 1 PP, 1 INTF
+	 * Condition: 1 DRM Encoder w/ 1 Display Tiles (Default)
+	 *
+	 * Name: DUALPIPE
+	 * Description: 2 LM, 2 PP, 2 INTF
+	 * Condition: 1 DRM Encoder w/ 2 Display Tiles
+	 *
+	 * Name: PPSPLIT
+	 * Description: 1 LM, 1 PP + 1 Slave PP, 2 INTF
+	 * Condition:
+	 *	1 DRM Encoder w/ 2 Display Tiles
+	 *	topology_control & SDE_TOPREQ_PPSPLIT
+	 *
+	 * Name: DUALPIPEMERGE
+	 * Description: 2 LM, 2 PP, 3DMux, 1 INTF
+	 * Condition:
+	 *	1 DRM Encoder w/ 1 Display Tiles
+	 *	display_info.max_width >= layer_mixer.max_width
+	 *
+	 * Name: DUALPIPEMERGE
+	 * Description: 2 LM, 2 PP, 3DMux, 1 INTF
+	 * Condition:
+	 *	1 DRM Encoder w/ 1 Display Tiles
+	 *	display_info.max_width <= layer_mixer.max_width
+	 *	topology_control & SDE_TOPREQ_FORCE_TILING
+	 */
+
+	memset(reqs, 0, sizeof(*reqs));
+
+	reqs->top_ctrl = sde_connector_get_property(conn_state,
+			CONNECTOR_PROP_TOPOLOGY_CONTROL);
+	sde_encoder_get_hw_resources(enc, &reqs->hw_res, conn_state);
+
+	/* Base assumption is LMs = h_tiles, conditions below may override */
+	reqs->num_lm = reqs->hw_res.display_num_of_h_tiles;
+
+	if (reqs->num_lm == 2) {
+		if (RM_RQ_PPSPLIT(reqs)) {
+			/* user requests serving dual display with 1 lm */
+			reqs->top_name = SDE_RM_TOPOLOGY_PPSPLIT;
+			reqs->num_lm = 1;
+			reqs->num_ctl = 1;
+			reqs->needs_split_display = true;
+		} else {
+			/* dual display, serve with 2 lms */
+			reqs->top_name = SDE_RM_TOPOLOGY_DUALPIPE;
+			reqs->num_ctl = 2;
+			reqs->needs_split_display = true;
+		}
+
+	} else if (reqs->num_lm == 1) {
+		if (mode->hdisplay > rm->lm_max_width) {
+			/* wide display, must split across 2 lm and merge */
+			reqs->top_name = SDE_RM_TOPOLOGY_DUALPIPEMERGE;
+			reqs->num_lm = 2;
+			reqs->num_ctl = 1;
+			reqs->needs_split_display = false;
+		} else if (RM_RQ_FORCE_TILING(reqs)) {
+			/* thin display, but user requests 2 lm and merge */
+			reqs->top_name = SDE_RM_TOPOLOGY_DUALPIPEMERGE;
+			reqs->num_lm = 2;
+			reqs->num_ctl = 1;
+			reqs->needs_split_display = false;
+		} else {
+			/* thin display, serve with only 1 lm */
+			reqs->top_name = SDE_RM_TOPOLOGY_SINGLEPIPE;
+			reqs->num_ctl = 1;
+			reqs->needs_split_display = false;
+		}
+
+	} else {
+		/* Currently no configurations with # LM > 2 */
+		SDE_ERROR("unsupported # of mixers %d\n", reqs->num_lm);
+		return -EINVAL;
+	}
+
+	SDE_DEBUG("top_ctrl 0x%llX num_h_tiles %d\n", reqs->top_ctrl,
+			reqs->hw_res.display_num_of_h_tiles);
+	SDE_DEBUG("display_max_width %d rm->lm_max_width %d\n",
+			mode->hdisplay, rm->lm_max_width);
+	SDE_DEBUG("num_lm %d num_ctl %d topology_name %d\n", reqs->num_lm,
+			reqs->num_ctl, reqs->top_name);
+	SDE_DEBUG("num_lm %d topology_name %d\n", reqs->num_lm,
+			reqs->top_name);
+	SDE_EVT32(mode->hdisplay, rm->lm_max_width, reqs->num_lm,
+			reqs->top_ctrl, reqs->top_name, reqs->num_ctl);
+
+	return 0;
+}
+
+static struct sde_rm_rsvp *_sde_rm_get_rsvp(
+		struct sde_rm *rm,
+		struct drm_encoder *enc)
+{
+	struct sde_rm_rsvp *i;
+
+	if (!rm || !enc) {
+		SDE_ERROR("invalid params\n");
+		return NULL;
+	}
+
+	if (list_empty(&rm->rsvps))
+		return NULL;
+
+	list_for_each_entry(i, &rm->rsvps, list)
+		if (i->enc_id == enc->base.id)
+			return i;
+
+	return NULL;
+}
+
+static struct drm_connector *_sde_rm_get_connector(
+		struct drm_encoder *enc)
+{
+	struct drm_connector *conn = NULL;
+	struct list_head *connector_list =
+			&enc->dev->mode_config.connector_list;
+
+	list_for_each_entry(conn, connector_list, head)
+		if (conn->encoder == enc)
+			return conn;
+
+	return NULL;
+}
+
+/**
+ * _sde_rm_release_rsvp - release resources and release a reservation
+ * @rm:	KMS handle
+ * @rsvp:	RSVP pointer to release and release resources for
+ */
+static void _sde_rm_release_rsvp(
+		struct sde_rm *rm,
+		struct sde_rm_rsvp *rsvp,
+		struct drm_connector *conn)
+{
+	struct sde_rm_rsvp *rsvp_c, *rsvp_n;
+	struct sde_rm_hw_blk *blk;
+	enum sde_hw_blk_type type;
+
+	if (!rsvp)
+		return;
+
+	SDE_DEBUG("rel rsvp %d enc %d\n", rsvp->seq, rsvp->enc_id);
+
+	list_for_each_entry_safe(rsvp_c, rsvp_n, &rm->rsvps, list) {
+		if (rsvp == rsvp_c) {
+			list_del(&rsvp_c->list);
+			break;
+		}
+	}
+
+	for (type = 0; type < SDE_HW_BLK_MAX; type++) {
+		list_for_each_entry(blk, &rm->hw_blks[type], list) {
+			if (blk->rsvp == rsvp) {
+				blk->rsvp = NULL;
+				SDE_DEBUG("rel rsvp %d enc %d %s %d\n",
+						rsvp->seq, rsvp->enc_id,
+						blk->type_name, blk->id);
+			}
+			if (blk->rsvp_nxt == rsvp) {
+				blk->rsvp_nxt = NULL;
+				SDE_DEBUG("rel rsvp_nxt %d enc %d %s %d\n",
+						rsvp->seq, rsvp->enc_id,
+						blk->type_name, blk->id);
+			}
+		}
+	}
+
+	kfree(rsvp);
+}
+
+void sde_rm_release(struct sde_rm *rm, struct drm_encoder *enc)
+{
+	struct sde_rm_rsvp *rsvp;
+	struct drm_connector *conn;
+	uint64_t top_ctrl;
+
+	if (!rm || !enc) {
+		SDE_ERROR("invalid params\n");
+		return;
+	}
+
+	mutex_lock(&rm->rm_lock);
+
+	rsvp = _sde_rm_get_rsvp(rm, enc);
+	if (!rsvp) {
+		SDE_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
+		goto end;
+	}
+
+	conn = _sde_rm_get_connector(enc);
+	if (!conn) {
+		SDE_ERROR("failed to get connector for enc %d\n", enc->base.id);
+		goto end;
+	}
+
+	top_ctrl = sde_connector_get_property(conn->state,
+			CONNECTOR_PROP_TOPOLOGY_CONTROL);
+
+	if (top_ctrl & BIT(SDE_RM_TOPCTL_RESERVE_LOCK)) {
+		SDE_DEBUG("rsvp[s%de%d] not releasing locked resources\n",
+				rsvp->seq, rsvp->enc_id);
+	} else {
+		SDE_DEBUG("release rsvp[s%de%d]\n", rsvp->seq,
+				rsvp->enc_id);
+		_sde_rm_release_rsvp(rm, rsvp, conn);
+
+		(void) msm_property_set_property(
+				sde_connector_get_propinfo(conn),
+				sde_connector_get_property_values(conn->state),
+				CONNECTOR_PROP_TOPOLOGY_NAME,
+				SDE_RM_TOPOLOGY_UNKNOWN);
+	}
+
+end:
+	mutex_unlock(&rm->rm_lock);
+}
+
+static int _sde_rm_commit_rsvp(
+		struct sde_rm *rm,
+		struct sde_rm_rsvp *rsvp,
+		struct drm_connector_state *conn_state)
+{
+	struct sde_rm_hw_blk *blk;
+	enum sde_hw_blk_type type;
+	int ret = 0;
+
+	ret = msm_property_set_property(
+			sde_connector_get_propinfo(conn_state->connector),
+			sde_connector_get_property_values(conn_state),
+			CONNECTOR_PROP_TOPOLOGY_NAME,
+			rsvp->topology);
+	if (ret) {
+		SDE_ERROR("failed to set topology name property, ret %d\n",
+				ret);
+		_sde_rm_release_rsvp(rm, rsvp, conn_state->connector);
+		return ret;
+	}
+
+	/* Swap next rsvp to be the active */
+	for (type = 0; type < SDE_HW_BLK_MAX; type++) {
+		list_for_each_entry(blk, &rm->hw_blks[type], list) {
+			if (blk->rsvp_nxt) {
+				blk->rsvp = blk->rsvp_nxt;
+				blk->rsvp_nxt = NULL;
+			}
+		}
+	}
+
+	if (!ret) {
+		SDE_DEBUG("rsrv enc %d topology %d\n", rsvp->enc_id,
+				rsvp->topology);
+		SDE_EVT32(rsvp->enc_id, rsvp->topology);
+	}
+
+	return ret;
+}
+
+int sde_rm_check_property_topctl(uint64_t val)
+{
+	if ((BIT(SDE_RM_TOPCTL_FORCE_TILING) & val) &&
+			(BIT(SDE_RM_TOPCTL_PPSPLIT) & val)) {
+		SDE_ERROR("ppsplit & force_tiling are incompatible\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int sde_rm_reserve(
+		struct sde_rm *rm,
+		struct drm_encoder *enc,
+		struct drm_crtc_state *crtc_state,
+		struct drm_connector_state *conn_state,
+		bool test_only)
+{
+	struct sde_rm_rsvp *rsvp_cur, *rsvp_nxt;
+	struct sde_rm_requirements reqs;
+	int ret;
+
+	if (!rm || !enc || !crtc_state || !conn_state) {
+		SDE_ERROR("invalid arguments\n");
+		return -EINVAL;
+	}
+
+	/* Check if this is just a page-flip */
+	if (!drm_atomic_crtc_needs_modeset(crtc_state))
+		return 0;
+
+	SDE_DEBUG("reserving hw for conn %d enc %d crtc %d test_only %d\n",
+			conn_state->connector->base.id, enc->base.id,
+			crtc_state->crtc->base.id, test_only);
+	SDE_EVT32(enc->base.id, conn_state->connector->base.id);
+
+	mutex_lock(&rm->rm_lock);
+
+	_sde_rm_print_rsvps(rm, SDE_RM_STAGE_BEGIN);
+
+	ret = _sde_rm_populate_requirements(rm, enc, crtc_state,
+			conn_state, &reqs);
+	if (ret) {
+		SDE_ERROR("failed to populate hw requirements\n");
+		goto end;
+	}
+
+	/*
+	 * We only support one active reservation per-hw-block. But to implement
+	 * transactional semantics for test-only, and for allowing failure while
+	 * modifying your existing reservation, over the course of this
+	 * function we can have two reservations:
+	 * Current: Existing reservation
+	 * Next: Proposed reservation. The proposed reservation may fail, or may
+	 *       be discarded if in test-only mode.
+	 * If reservation is successful, and we're not in test-only, then we
+	 * replace the current with the next.
+	 */
+	rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL);
+	if (!rsvp_nxt) {
+		ret = -ENOMEM;
+		goto end;
+	}
+
+	rsvp_cur = _sde_rm_get_rsvp(rm, enc);
+
+	/*
+	 * User can request that we clear out any reservation during the
+	 * atomic_check phase by using this CLEAR bit
+	 */
+	if (rsvp_cur && test_only && RM_RQ_CLEAR(&reqs)) {
+		SDE_DEBUG("test_only & CLEAR: clear rsvp[s%de%d]\n",
+				rsvp_cur->seq, rsvp_cur->enc_id);
+		_sde_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
+		rsvp_cur = NULL;
+		_sde_rm_print_rsvps(rm, SDE_RM_STAGE_AFTER_CLEAR);
+		(void) msm_property_set_property(
+				sde_connector_get_propinfo(
+						conn_state->connector),
+				sde_connector_get_property_values(conn_state),
+				CONNECTOR_PROP_TOPOLOGY_NAME,
+				SDE_RM_TOPOLOGY_UNKNOWN);
+	}
+
+	/* Check the proposed reservation, store it in hw's "next" field */
+	ret = _sde_rm_make_next_rsvp(rm, enc, crtc_state, conn_state,
+			rsvp_nxt, &reqs);
+
+	_sde_rm_print_rsvps(rm, SDE_RM_STAGE_AFTER_RSVPNEXT);
+
+	if (ret) {
+		SDE_ERROR("failed to reserve hw resources: %d\n", ret);
+		_sde_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
+	} else if (test_only && !RM_RQ_LOCK(&reqs)) {
+		/*
+		 * Normally, if test_only, test the reservation and then undo
+		 * However, if the user requests LOCK, then keep the reservation
+		 * made during the atomic_check phase.
+		 */
+		SDE_DEBUG("test_only: discard test rsvp[s%de%d]\n",
+				rsvp_nxt->seq, rsvp_nxt->enc_id);
+		_sde_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
+	} else {
+		if (test_only && RM_RQ_LOCK(&reqs))
+			SDE_DEBUG("test_only & LOCK: lock rsvp[s%de%d]\n",
+					rsvp_nxt->seq, rsvp_nxt->enc_id);
+
+		_sde_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
+
+		ret = _sde_rm_commit_rsvp(rm, rsvp_nxt, conn_state);
+	}
+
+	_sde_rm_print_rsvps(rm, SDE_RM_STAGE_FINAL);
+
+end:
+	mutex_unlock(&rm->rm_lock);
+
+	return ret;
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_rm.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_rm.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_rm.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_rm.h	2019-01-22 16:16:23.523246588 +0100
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SDE_RM_H__
+#define __SDE_RM_H__
+
+#include <linux/list.h>
+
+#include "msm_kms.h"
+#include "sde_hw_top.h"
+
+/**
+ * enum sde_rm_topology_name - HW resource use case in use by connector
+ * @SDE_RM_TOPOLOGY_UNKNOWN: No topology in use currently
+ * @SDE_RM_TOPOLOGY_SINGLEPIPE: 1 LM, 1 PP, 1 INTF/WB
+ * @SDE_RM_TOPOLOGY_DUALPIPE: 2 LM, 2 PP, 2 INTF/WB
+ * @SDE_RM_TOPOLOGY_PPSPLIT: 1 LM, 2 PPs, 2 INTF/WB
+ * @SDE_RM_TOPOLOGY_DUALPIPEMERGE: 2 LM, 2 PP, 3DMux, 1 INTF/WB
+ */
+enum sde_rm_topology_name {
+	SDE_RM_TOPOLOGY_UNKNOWN = 0,
+	SDE_RM_TOPOLOGY_SINGLEPIPE,
+	SDE_RM_TOPOLOGY_DUALPIPE,
+	SDE_RM_TOPOLOGY_PPSPLIT,
+	SDE_RM_TOPOLOGY_DUALPIPEMERGE,
+};
+
+/**
+ * enum sde_rm_topology_control - HW resource use case in use by connector
+ * @SDE_RM_TOPCTL_RESERVE_LOCK: If set, in AtomicTest phase, after a successful
+ *                              test, reserve the resources for this display.
+ *                              Normal behavior would not impact the reservation
+ *                              list during the AtomicTest phase.
+ * @SDE_RM_TOPCTL_RESERVE_CLEAR: If set, in AtomicTest phase, before testing,
+ *                               release any reservation held by this display.
+ *                               Normal behavior would not impact the
+ *                               reservation list during the AtomicTest phase.
+ * @SDE_RM_TOPCTL_DSPP: Require layer mixers with DSPP capabilities
+ * @SDE_RM_TOPCTL_FORCE_TILING: Require kernel to split across multiple layer
+ *                              mixers, despite width fitting within capability
+ *                              of a single layer mixer.
+ * @SDE_RM_TOPCTL_PPSPLIT: Require kernel to use pingpong split pipe
+ *                         configuration instead of dual pipe.
+ */
+enum sde_rm_topology_control {
+	SDE_RM_TOPCTL_RESERVE_LOCK,
+	SDE_RM_TOPCTL_RESERVE_CLEAR,
+	SDE_RM_TOPCTL_DSPP,
+	SDE_RM_TOPCTL_FORCE_TILING,
+	SDE_RM_TOPCTL_PPSPLIT,
+};
+
+/**
+ * struct sde_rm - SDE dynamic hardware resource manager
+ * @dev: device handle for event logging purposes
+ * @rsvps: list of hardware reservations by each crtc->encoder->connector
+ * @hw_blks: array of lists of hardware resources present in the system, one
+ *	list per type of hardware block
+ * @hw_mdp: hardware object for mdp_top
+ * @lm_max_width: cached layer mixer maximum width
+ * @rsvp_next_seq: sequence number for next reservation for debugging purposes
+ * @rm_lock: resource manager mutex
+ */
+struct sde_rm {
+	struct drm_device *dev;
+	struct list_head rsvps;
+	struct list_head hw_blks[SDE_HW_BLK_MAX];
+	struct sde_hw_mdp *hw_mdp;
+	uint32_t lm_max_width;
+	uint32_t rsvp_next_seq;
+	struct mutex rm_lock;
+};
+
+/**
+ *  struct sde_rm_hw_blk - resource manager internal structure
+ *	forward declaration for single iterator definition without void pointer
+ */
+struct sde_rm_hw_blk;
+
+/**
+ * struct sde_rm_hw_iter - iterator for use with sde_rm
+ * @hw: sde_hw object requested, or NULL on failure
+ * @blk: sde_rm internal block representation. Clients ignore. Used as iterator.
+ * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
+ * @type: Hardware Block Type client wishes to search for.
+ */
+struct sde_rm_hw_iter {
+	void *hw;
+	struct sde_rm_hw_blk *blk;
+	uint32_t enc_id;
+	enum sde_hw_blk_type type;
+};
+
+/**
+ * sde_rm_init - Read hardware catalog and create reservation tracking objects
+ *	for all HW blocks.
+ * @rm: SDE Resource Manager handle
+ * @cat: Pointer to hardware catalog
+ * @mmio: mapped register io address of MDP
+ * @dev: device handle for event logging purposes
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int sde_rm_init(struct sde_rm *rm,
+		struct sde_mdss_cfg *cat,
+		void *mmio,
+		struct drm_device *dev);
+
+/**
+ * sde_rm_destroy - Free all memory allocated by sde_rm_init
+ * @rm: SDE Resource Manager handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int sde_rm_destroy(struct sde_rm *rm);
+
+/**
+ * sde_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze
+ *	the use connections and user requirements, specified through related
+ *	topology control properties, and reserve hardware blocks to that
+ *	display chain.
+ *	HW blocks can then be accessed through sde_rm_get_* functions.
+ *	HW Reservations should be released via sde_rm_release_hw.
+ * @rm: SDE Resource Manager handle
+ * @drm_enc: DRM Encoder handle
+ * @crtc_state: Proposed Atomic DRM CRTC State handle
+ * @conn_state: Proposed Atomic DRM Connector State handle
+ * @test_only: Atomic-Test phase, discard results (unless property overrides)
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int sde_rm_reserve(struct sde_rm *rm,
+		struct drm_encoder *drm_enc,
+		struct drm_crtc_state *crtc_state,
+		struct drm_connector_state *conn_state,
+		bool test_only);
+
+/**
+ * sde_rm_reserve - Given the encoder for the display chain, release any
+ *	HW blocks previously reserved for that use case.
+ * @rm: SDE Resource Manager handle
+ * @enc: DRM Encoder handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+void sde_rm_release(struct sde_rm *rm, struct drm_encoder *enc);
+
+/**
+ * sde_rm_get_mdp - Retrieve HW block for MDP TOP.
+ *	This is never reserved, and is usable by any display.
+ * @rm: SDE Resource Manager handle
+ * @Return: Pointer to hw block or NULL
+ */
+struct sde_hw_mdp *sde_rm_get_mdp(struct sde_rm *rm);
+
+/**
+ * sde_rm_init_hw_iter - setup given iterator for new iteration over hw list
+ *	using sde_rm_get_hw
+ * @iter: iter object to initialize
+ * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
+ * @type: Hardware Block Type client wishes to search for.
+ */
+void sde_rm_init_hw_iter(
+		struct sde_rm_hw_iter *iter,
+		uint32_t enc_id,
+		enum sde_hw_blk_type type);
+/**
+ * sde_rm_get_hw - retrieve reserved hw object given encoder and hw type
+ *	Meant to do a single pass through the hardware list to iteratively
+ *	retrieve hardware blocks of a given type for a given encoder.
+ *	Initialize an iterator object.
+ *	Set hw block type of interest. Set encoder id of interest, 0 for any.
+ *	Function returns first hw of type for that encoder.
+ *	Subsequent calls will return the next reserved hw of that type in-order.
+ *	Iterator HW pointer will be null on failure to find hw.
+ * @rm: SDE Resource Manager handle
+ * @iter: iterator object
+ * @Return: true on match found, false on no match found
+ */
+bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *iter);
+
+/**
+ * sde_rm_get_hw_by_id - retrieve hw object given hw type and hw id
+ *	Meant to do a single pass through the hardware list to iteratively
+ *	retrieve hardware blocks of a given type and id.
+ *	Function returns the hw resource pointer.
+ * @rm: SDE Resource Manager handle
+ * @type: hw type
+ * @id: hw id
+ * @Return: hw resource pointer on match found, NULL on no match found
+ */
+void *sde_rm_get_hw_by_id(struct sde_rm *rm, enum sde_hw_blk_type type, int id);
+
+/**
+ * sde_rm_check_property_topctl - validate property bitmask before it is set
+ * @val: user's proposed topology control bitmask
+ * @Return: 0 on success or error
+ */
+int sde_rm_check_property_topctl(uint64_t val);
+
+/**
+ * sde_rm_check_property_topctl - validate property bitmask before it is set
+ * @val: user's proposed topology control bitmask
+ * @Return: 0 on success or error
+ */
+int sde_rm_check_property_topctl(uint64_t val);
+
+#endif /* __SDE_RM_H__ */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_splash.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_splash.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_splash.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_splash.c	2019-01-22 16:16:23.523246588 +0100
@@ -0,0 +1,682 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/of_address.h>
+#include <linux/debugfs.h>
+#include <linux/memblock.h>
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+#include "sde_kms.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_util.h"
+#include "sde_hw_intf.h"
+#include "sde_hw_catalog.h"
+#include "dsi_display.h"
+
+#define MDP_SSPP_TOP0_OFF		0x1000
+#define DISP_INTF_SEL			0x004
+#define SPLIT_DISPLAY_EN		0x2F4
+
+/* scratch registers */
+#define SCRATCH_REGISTER_0		0x014
+#define SCRATCH_REGISTER_1		0x018
+#define SCRATCH_REGISTER_2		0x01C
+
+#define SDE_LK_RUNNING_VALUE		0xC001CAFE
+#define SDE_LK_SHUT_DOWN_VALUE		0xDEADDEAD
+#define SDE_LK_EXIT_VALUE		0xDEADBEEF
+
+#define SDE_LK_EXIT_MAX_LOOP		20
+
+static DEFINE_MUTEX(sde_splash_lock);
+
+/*
+ * In order to free reseved memory from bootup, and we are not
+ * able to call the __init free functions, so we need to free
+ * this memory by ourselves using the free_reserved_page() function.
+ */
+static void _sde_splash_free_bootup_memory_to_system(phys_addr_t phys,
+						size_t size)
+{
+	unsigned long pfn_start, pfn_end, pfn_idx;
+
+	memblock_free(phys, size);
+
+	pfn_start = phys >> PAGE_SHIFT;
+	pfn_end = (phys + size) >> PAGE_SHIFT;
+
+	for (pfn_idx = pfn_start; pfn_idx < pfn_end; pfn_idx++)
+		free_reserved_page(pfn_to_page(pfn_idx));
+}
+
+static int _sde_splash_parse_dt_get_lk_pool_node(struct drm_device *dev,
+					struct sde_splash_info *sinfo)
+{
+	struct device_node *parent, *node;
+	struct resource r;
+	int ret = 0;
+
+	if (!sinfo)
+		return -EINVAL;
+
+	parent = of_find_node_by_path("/reserved-memory");
+	if (!parent)
+		return -EINVAL;
+
+	node = of_find_node_by_name(parent, "lk_pool");
+	if (!node) {
+		SDE_ERROR("mem reservation for lk_pool is not presented\n");
+		ret = -EINVAL;
+		goto parent_node_err;
+	}
+
+	/* find the mode */
+	if (of_address_to_resource(node, 0, &r)) {
+		ret = -EINVAL;
+		goto child_node_err;
+	}
+
+	sinfo->lk_pool_paddr = (dma_addr_t)r.start;
+	sinfo->lk_pool_size = r.end - r.start;
+
+	DRM_INFO("lk_pool: addr:%pK, size:%pK\n",
+			(void *)sinfo->lk_pool_paddr,
+			(void *)sinfo->lk_pool_size);
+
+child_node_err:
+	of_node_put(node);
+
+parent_node_err:
+	of_node_put(parent);
+
+	return ret;
+}
+
+static int _sde_splash_parse_dt_get_display_node(struct drm_device *dev,
+					struct sde_splash_info *sinfo)
+{
+	unsigned long size = 0;
+	dma_addr_t start;
+	struct device_node *node;
+	int ret = 0, i = 0, len = 0;
+
+	/* get reserved memory for display module */
+	if (of_get_property(dev->dev->of_node, "contiguous-region", &len))
+		sinfo->splash_mem_num = len / sizeof(u32);
+	else
+		sinfo->splash_mem_num = 0;
+
+	sinfo->splash_mem_paddr =
+			kmalloc(sizeof(phys_addr_t) * sinfo->splash_mem_num,
+				GFP_KERNEL);
+	if (!sinfo->splash_mem_paddr) {
+		SDE_ERROR("alloc splash_mem_paddr failed\n");
+		return -ENOMEM;
+	}
+
+	sinfo->splash_mem_size =
+			kmalloc(sizeof(size_t) * sinfo->splash_mem_num,
+				GFP_KERNEL);
+	if (!sinfo->splash_mem_size) {
+		SDE_ERROR("alloc splash_mem_size failed\n");
+		goto error;
+	}
+
+	sinfo->obj = kmalloc(sizeof(struct drm_gem_object *) *
+				sinfo->splash_mem_num, GFP_KERNEL);
+	if (!sinfo->obj) {
+		SDE_ERROR("construct splash gem objects failed\n");
+		goto error;
+	}
+
+	for (i = 0; i < sinfo->splash_mem_num; i++) {
+		node = of_parse_phandle(dev->dev->of_node,
+					"contiguous-region", i);
+
+		if (node) {
+			struct resource r;
+
+			ret = of_address_to_resource(node, 0, &r);
+			if (ret)
+				return ret;
+
+			size = r.end - r.start;
+			start = (dma_addr_t)r.start;
+
+			sinfo->splash_mem_paddr[i] = start;
+			sinfo->splash_mem_size[i] = size;
+
+			DRM_INFO("blk: %d, addr:%pK, size:%pK\n",
+				i, (void *)sinfo->splash_mem_paddr[i],
+				(void *)sinfo->splash_mem_size[i]);
+
+			of_node_put(node);
+		}
+	}
+
+	return ret;
+
+error:
+	kfree(sinfo->splash_mem_paddr);
+	sinfo->splash_mem_paddr = NULL;
+
+	kfree(sinfo->splash_mem_size);
+	sinfo->splash_mem_size = NULL;
+
+	return -ENOMEM;
+}
+
+static bool _sde_splash_lk_check(struct sde_hw_intr *intr)
+{
+	return (SDE_LK_RUNNING_VALUE == SDE_REG_READ(&intr->hw,
+			SCRATCH_REGISTER_1)) ? true : false;
+}
+
+/**
+ * _sde_splash_notify_lk_to_exit.
+ *
+ * Function to monitor LK's status and tell it to exit.
+ */
+static void _sde_splash_notify_lk_exit(struct sde_hw_intr *intr)
+{
+	int i = 0;
+
+	/* first is to write exit signal to scratch register*/
+	SDE_REG_WRITE(&intr->hw, SCRATCH_REGISTER_1, SDE_LK_SHUT_DOWN_VALUE);
+
+	while ((SDE_LK_EXIT_VALUE !=
+		SDE_REG_READ(&intr->hw, SCRATCH_REGISTER_1)) &&
+					(++i < SDE_LK_EXIT_MAX_LOOP)) {
+		DRM_INFO("wait for LK's exit");
+		msleep(20);
+	}
+
+	if (i == SDE_LK_EXIT_MAX_LOOP)
+		SDE_ERROR("Loop LK's exit failed\n");
+}
+
+static int _sde_splash_gem_new(struct drm_device *dev,
+				struct sde_splash_info *sinfo)
+{
+	int i, ret;
+
+	for (i = 0; i < sinfo->splash_mem_num; i++) {
+		sinfo->obj[i] = msm_gem_new(dev,
+				sinfo->splash_mem_size[i], MSM_BO_UNCACHED);
+
+		if (IS_ERR(sinfo->obj[i])) {
+			ret = PTR_ERR(sinfo->obj[i]);
+			SDE_ERROR("failed to allocate gem, ret=%d\n", ret);
+			goto error;
+		}
+	}
+
+	return 0;
+
+error:
+	for (i = 0; i < sinfo->splash_mem_num; i++) {
+		if (sinfo->obj[i])
+			msm_gem_free_object(sinfo->obj[i]);
+		sinfo->obj[i] = NULL;
+	}
+
+	return ret;
+}
+
+static int _sde_splash_get_pages(struct drm_gem_object *obj, phys_addr_t phys)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	struct page **p;
+	dma_addr_t paddr;
+	int npages = obj->size >> PAGE_SHIFT;
+	int i;
+
+	p = drm_malloc_ab(npages, sizeof(struct page *));
+	if (!p)
+		return -ENOMEM;
+
+	paddr = phys;
+
+	for (i = 0; i < npages; i++) {
+		p[i] = phys_to_page(paddr);
+		paddr += PAGE_SIZE;
+	}
+
+	msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
+	if (IS_ERR(msm_obj->sgt)) {
+		SDE_ERROR("failed to allocate sgt\n");
+		return -ENOMEM;
+	}
+
+	msm_obj->pages = p;
+
+	return 0;
+}
+
+static void _sde_splash_destroy_gem_object(struct msm_gem_object *msm_obj)
+{
+	if (msm_obj->pages) {
+		sg_free_table(msm_obj->sgt);
+		kfree(msm_obj->sgt);
+		drm_free_large(msm_obj->pages);
+		msm_obj->pages = NULL;
+	}
+}
+
+static void _sde_splash_destroy_splash_node(struct sde_splash_info *sinfo)
+{
+	kfree(sinfo->splash_mem_paddr);
+	sinfo->splash_mem_paddr = NULL;
+
+	kfree(sinfo->splash_mem_size);
+	sinfo->splash_mem_size = NULL;
+}
+
+static void _sde_splash_get_connector_ref_cnt(struct sde_splash_info *sinfo,
+					u32 *hdmi_cnt, u32 *dsi_cnt)
+{
+	mutex_lock(&sde_splash_lock);
+	*hdmi_cnt = sinfo->hdmi_connector_cnt;
+	*dsi_cnt = sinfo->dsi_connector_cnt;
+	mutex_unlock(&sde_splash_lock);
+}
+
+static int _sde_splash_free_resource(struct msm_mmu *mmu,
+		struct sde_splash_info *sinfo, enum splash_connector_type conn)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(sinfo->obj[conn]);
+
+	if (!msm_obj)
+		return -EINVAL;
+
+	if (mmu->funcs && mmu->funcs->unmap)
+		mmu->funcs->unmap(mmu, sinfo->splash_mem_paddr[conn],
+				msm_obj->sgt, NULL);
+
+	_sde_splash_free_bootup_memory_to_system(sinfo->splash_mem_paddr[conn],
+						sinfo->splash_mem_size[conn]);
+
+	_sde_splash_destroy_gem_object(msm_obj);
+
+	return 0;
+}
+
+__ref int sde_splash_init(struct sde_power_handle *phandle, struct msm_kms *kms)
+{
+	struct sde_kms *sde_kms;
+	struct sde_splash_info *sinfo;
+	int i = 0;
+
+	if (!phandle || !kms) {
+		SDE_ERROR("invalid phandle/kms\n");
+		return -EINVAL;
+	}
+
+	sde_kms = to_sde_kms(kms);
+	sinfo = &sde_kms->splash_info;
+
+	sinfo->dsi_connector_cnt = 0;
+	sinfo->hdmi_connector_cnt = 0;
+
+	sde_power_data_bus_bandwidth_ctrl(phandle,
+		sde_kms->core_client, true);
+
+	for (i = 0; i < sinfo->splash_mem_num; i++) {
+		if (!memblock_is_reserved(sinfo->splash_mem_paddr[i])) {
+			SDE_ERROR("failed to reserve memory\n");
+
+			/* withdraw the vote when failed. */
+			sde_power_data_bus_bandwidth_ctrl(phandle,
+					sde_kms->core_client, false);
+
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+void sde_splash_destroy(struct sde_splash_info *sinfo,
+			struct sde_power_handle *phandle,
+			struct sde_power_client *pclient)
+{
+	struct msm_gem_object *msm_obj;
+	int i = 0;
+
+	if (!sinfo || !phandle || !pclient) {
+		SDE_ERROR("invalid sde_kms/phandle/pclient\n");
+		return;
+	}
+
+	for (i = 0; i < sinfo->splash_mem_num; i++) {
+		msm_obj = to_msm_bo(sinfo->obj[i]);
+
+		if (msm_obj)
+			_sde_splash_destroy_gem_object(msm_obj);
+	}
+
+	sde_power_data_bus_bandwidth_ctrl(phandle, pclient, false);
+
+	_sde_splash_destroy_splash_node(sinfo);
+}
+
+/*
+ * sde_splash_parse_dt.
+ * In the function, it will parse and reserve two kinds of memory node.
+ * First is to get the reserved memory for display buffers.
+ * Second is to get the memory node LK's code stack is running on.
+ */
+int sde_splash_parse_dt(struct drm_device *dev)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	struct sde_kms *sde_kms;
+	struct sde_splash_info *sinfo;
+
+	if (!priv || !priv->kms) {
+		SDE_ERROR("Invalid kms\n");
+		return -EINVAL;
+	}
+
+	sde_kms = to_sde_kms(priv->kms);
+	sinfo = &sde_kms->splash_info;
+
+	if (_sde_splash_parse_dt_get_display_node(dev, sinfo)) {
+		SDE_ERROR("get display node failed\n");
+		return -EINVAL;
+	}
+
+	if (_sde_splash_parse_dt_get_lk_pool_node(dev, sinfo)) {
+		SDE_ERROR("get LK pool node failed\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int sde_splash_get_handoff_status(struct msm_kms *kms)
+{
+	uint32_t intf_sel = 0;
+	uint32_t split_display = 0;
+	uint32_t num_of_display_on = 0;
+	uint32_t i = 0;
+	struct sde_kms *sde_kms = to_sde_kms(kms);
+	struct sde_rm *rm;
+	struct sde_hw_blk_reg_map *c;
+	struct sde_splash_info *sinfo;
+	struct sde_mdss_cfg *catalog;
+
+	sinfo = &sde_kms->splash_info;
+	if (!sinfo) {
+		SDE_ERROR("%s(%d): invalid splash info\n",
+				__func__, __LINE__);
+		return -EINVAL;
+	}
+
+	rm = &sde_kms->rm;
+
+	if (!rm || !rm->hw_mdp) {
+		SDE_ERROR("invalid rm.\n");
+		return -EINVAL;
+	}
+
+	c = &rm->hw_mdp->hw;
+	if (c) {
+		intf_sel = SDE_REG_READ(c, DISP_INTF_SEL);
+		split_display = SDE_REG_READ(c, SPLIT_DISPLAY_EN);
+	}
+
+	catalog = sde_kms->catalog;
+
+	if (intf_sel != 0) {
+		for (i = 0; i < catalog->intf_count; i++)
+			if ((intf_sel >> i*8) & 0x000000FF)
+				num_of_display_on++;
+
+		/*
+		 * For split display enabled - DSI0, DSI1 interfaces are
+		 * considered as single display. So decrement
+		 * 'num_of_display_on' by 1
+		 */
+		if (split_display)
+			num_of_display_on--;
+	}
+
+	if (num_of_display_on) {
+		sinfo->handoff = true;
+		sinfo->program_scratch_regs = true;
+		sinfo->lk_is_exited = false;
+	} else {
+		sinfo->handoff = false;
+		sinfo->program_scratch_regs = false;
+		sinfo->lk_is_exited = true;
+	}
+
+	return 0;
+}
+
+int sde_splash_smmu_map(struct drm_device *dev, struct msm_mmu *mmu,
+				struct sde_splash_info *sinfo)
+{
+	struct msm_gem_object *msm_obj;
+	int i = 0, ret = 0;
+
+	if (!mmu || !sinfo)
+		return -EINVAL;
+
+	/* first is to construct drm_gem_objects for splash memory */
+	if (_sde_splash_gem_new(dev, sinfo))
+		return -ENOMEM;
+
+	/* second is to contruct sgt table for calling smmu map */
+	for (i = 0; i < sinfo->splash_mem_num; i++) {
+		if (_sde_splash_get_pages(sinfo->obj[i],
+				sinfo->splash_mem_paddr[i]))
+			return -ENOMEM;
+	}
+
+	for (i = 0; i < sinfo->splash_mem_num; i++) {
+		msm_obj = to_msm_bo(sinfo->obj[i]);
+
+		if (mmu->funcs && mmu->funcs->map) {
+			ret = mmu->funcs->map(mmu, sinfo->splash_mem_paddr[i],
+				msm_obj->sgt, IOMMU_READ | IOMMU_NOEXEC, NULL);
+
+			if (!ret) {
+				SDE_ERROR("Map blk %d @%pK failed.\n",
+					i, (void *)sinfo->splash_mem_paddr[i]);
+				return ret;
+			}
+		}
+	}
+
+	return ret ? 0 : -ENOMEM;
+}
+
+void sde_splash_setup_connector_count(struct sde_splash_info *sinfo,
+					int connector_type)
+{
+	switch (connector_type) {
+	case DRM_MODE_CONNECTOR_HDMIA:
+		sinfo->hdmi_connector_cnt++;
+		break;
+	case DRM_MODE_CONNECTOR_DSI:
+		sinfo->dsi_connector_cnt++;
+		break;
+	default:
+		SDE_ERROR("invalid connector_type %d\n", connector_type);
+	}
+}
+
+bool sde_splash_get_lk_complete_status(struct sde_splash_info *sinfo)
+{
+	bool ret = 0;
+
+	mutex_lock(&sde_splash_lock);
+	ret = !sinfo->handoff && !sinfo->lk_is_exited;
+	mutex_unlock(&sde_splash_lock);
+
+	return ret;
+}
+
+int sde_splash_clean_up_free_resource(struct msm_kms *kms,
+				struct sde_power_handle *phandle,
+				int connector_type, void *display)
+{
+	struct sde_kms *sde_kms;
+	struct sde_splash_info *sinfo;
+	struct msm_mmu *mmu;
+	struct dsi_display *dsi_display = display;
+	int ret = 0;
+	int hdmi_conn_count = 0;
+	int dsi_conn_count = 0;
+	static const char *last_commit_display_type = "unknown";
+
+	if (!phandle || !kms) {
+		SDE_ERROR("invalid phandle/kms.\n");
+		return -EINVAL;
+	}
+
+	sde_kms = to_sde_kms(kms);
+	sinfo = &sde_kms->splash_info;
+	if (!sinfo) {
+		SDE_ERROR("%s(%d): invalid splash info\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	_sde_splash_get_connector_ref_cnt(sinfo, &hdmi_conn_count,
+						&dsi_conn_count);
+
+	mutex_lock(&sde_splash_lock);
+	if (hdmi_conn_count == 0 && dsi_conn_count == 0 &&
+					!sinfo->lk_is_exited) {
+		/* When both hdmi's and dsi's handoff are finished,
+		 * 1. Destroy splash node objects.
+		 * 2. Release the memory which LK's stack is running on.
+		 * 3. Withdraw AHB data bus bandwidth voting.
+		 */
+		DRM_INFO("HDMI and DSI resource handoff is completed\n");
+
+		sinfo->lk_is_exited = true;
+
+		_sde_splash_destroy_splash_node(sinfo);
+
+		_sde_splash_free_bootup_memory_to_system(sinfo->lk_pool_paddr,
+							sinfo->lk_pool_size);
+
+		sde_power_data_bus_bandwidth_ctrl(phandle,
+				sde_kms->core_client, false);
+
+		mutex_unlock(&sde_splash_lock);
+		return 0;
+	}
+
+	mmu = sde_kms->aspace[0]->mmu;
+
+	switch (connector_type) {
+	case DRM_MODE_CONNECTOR_HDMIA:
+		if (sinfo->hdmi_connector_cnt == 1) {
+			sinfo->hdmi_connector_cnt--;
+
+			ret = _sde_splash_free_resource(mmu,
+					sinfo, SPLASH_HDMI);
+		}
+		break;
+	case DRM_MODE_CONNECTOR_DSI:
+		/*
+		 * Basically, we have commits coming on two DSI connectors.
+		 * So when releasing DSI resource, it's ensured that the
+		 * coming commits should happen on different DSIs, to promise
+		 * the handoff has finished on the two DSIs, then it's safe
+		 * to release DSI resource, otherwise, problem happens when
+		 * freeing memory, while DSI0 or DSI1 is still visiting
+		 * the memory.
+		 */
+		if (strcmp(dsi_display->display_type, "unknown") &&
+			strcmp(last_commit_display_type,
+					dsi_display->display_type)) {
+			if (sinfo->dsi_connector_cnt > 1)
+				sinfo->dsi_connector_cnt--;
+			else if (sinfo->dsi_connector_cnt == 1) {
+				ret = _sde_splash_free_resource(mmu,
+					sinfo, SPLASH_DSI);
+
+				sinfo->dsi_connector_cnt--;
+			}
+
+			last_commit_display_type = dsi_display->display_type;
+		}
+		break;
+	default:
+		ret = -EINVAL;
+		SDE_ERROR("%s: invalid connector_type %d\n",
+				__func__, connector_type);
+	}
+
+	mutex_unlock(&sde_splash_lock);
+
+	return ret;
+}
+
+/*
+ * In below function, it will
+ * 1. Notify LK to exit and wait for exiting is done.
+ * 2. Set DOMAIN_ATTR_EARLY_MAP to 1 to enable stage 1 translation in iommu.
+ */
+int sde_splash_clean_up_exit_lk(struct msm_kms *kms)
+{
+	struct sde_splash_info *sinfo;
+	struct msm_mmu *mmu;
+	struct sde_kms *sde_kms = to_sde_kms(kms);
+	int ret;
+
+	sinfo = &sde_kms->splash_info;
+
+	if (!sinfo) {
+		SDE_ERROR("%s(%d): invalid splash info\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	/* Monitor LK's status and tell it to exit. */
+	mutex_lock(&sde_splash_lock);
+	if (sinfo->program_scratch_regs) {
+		if (_sde_splash_lk_check(sde_kms->hw_intr))
+			_sde_splash_notify_lk_exit(sde_kms->hw_intr);
+
+		sinfo->handoff = false;
+		sinfo->program_scratch_regs = false;
+	}
+	mutex_unlock(&sde_splash_lock);
+
+	if (!sde_kms->aspace[0] || !sde_kms->aspace[0]->mmu) {
+		/* We do not return fault value here, to ensure
+		 * flag "lk_is_exited" is set.
+		 */
+		SDE_ERROR("invalid mmu\n");
+		WARN_ON(1);
+	} else {
+		mmu = sde_kms->aspace[0]->mmu;
+		/* After LK has exited, set early domain map attribute
+		 * to 1 to enable stage 1 translation in iommu driver.
+		 */
+		if (mmu->funcs && mmu->funcs->set_property) {
+			ret = mmu->funcs->set_property(mmu,
+				DOMAIN_ATTR_EARLY_MAP, &sinfo->handoff);
+
+			if (ret)
+				SDE_ERROR("set_property failed\n");
+		}
+	}
+
+	return 0;
+}
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_splash.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_splash.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_splash.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_splash.h	2019-01-22 16:16:23.523246588 +0100
@@ -0,0 +1,132 @@
+/**
+ * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef SDE_SPLASH_H_
+#define SDE_SPLASH_H_
+
+#include "msm_kms.h"
+#include "msm_mmu.h"
+
+enum splash_connector_type {
+	SPLASH_DSI = 0,
+	SPLASH_HDMI,
+};
+
+struct sde_splash_info {
+	/* handoff flag */
+	bool handoff;
+
+	/* flag of display scratch registers */
+	bool program_scratch_regs;
+
+	/* to indicate LK is totally exited */
+	bool lk_is_exited;
+
+	/* memory node used for display buffer */
+	uint32_t splash_mem_num;
+
+	/* physical address of memory node for display buffer */
+	phys_addr_t *splash_mem_paddr;
+
+	/* size of memory node */
+	size_t *splash_mem_size;
+
+	/* constructed gem objects for smmu mapping */
+	struct drm_gem_object **obj;
+
+	/* physical address of lk pool */
+	phys_addr_t lk_pool_paddr;
+
+	/* memory size of lk pool */
+	size_t lk_pool_size;
+
+	/* registered hdmi connector count */
+	uint32_t hdmi_connector_cnt;
+
+	/* registered dst connector count */
+	uint32_t dsi_connector_cnt;
+};
+
+/* APIs for early splash handoff functions */
+
+/**
+ * sde_splash_get_handoff_status.
+ *
+ * This function will read DISP_INTF_SEL regsiter to get
+ * the status of early splash.
+ */
+int sde_splash_get_handoff_status(struct msm_kms *kms);
+
+/**
+ * sde_splash_init
+ *
+ * This function will do bandwidth vote and reserved memory
+ */
+int sde_splash_init(struct sde_power_handle *phandle, struct msm_kms *kms);
+
+/**
+ *sde_splash_setup_connector_count
+ *
+ * To count connector numbers for DSI and HDMI respectively.
+ */
+void sde_splash_setup_connector_count(struct sde_splash_info *sinfo,
+				int connector_type);
+
+/**
+ * sde_splash_clean_up_exit_lk.
+ *
+ * Tell LK to exit, and clean up the resource.
+ */
+int sde_splash_clean_up_exit_lk(struct msm_kms *kms);
+
+/**
+ * sde_splash_clean_up_free_resource.
+ *
+ * According to input connector_type, free
+ * HDMI's and DSI's resource respectively.
+ */
+int sde_splash_clean_up_free_resource(struct msm_kms *kms,
+				struct sde_power_handle *phandle,
+				int connector_type, void *display);
+
+/**
+ * sde_splash_parse_dt.
+ *
+ * Parse reserved memory block from DT for early splash.
+ */
+int sde_splash_parse_dt(struct drm_device *dev);
+
+/**
+ * sde_splash_smmu_map.
+ *
+ * Map the physical memory LK visited into iommu driver.
+ */
+int sde_splash_smmu_map(struct drm_device *dev, struct msm_mmu *mmu,
+			struct sde_splash_info *sinfo);
+
+/**
+ * sde_splash_destroy
+ *
+ * Destroy the resource in failed case.
+ */
+void sde_splash_destroy(struct sde_splash_info *sinfo,
+			struct sde_power_handle *phandle,
+			struct sde_power_client *pclient);
+
+/**
+ * sde_splash_get_lk_complete_status
+ *
+ * Get LK's status to check if it has been stopped.
+ */
+bool sde_splash_get_lk_complete_status(struct sde_splash_info *sinfo);
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_trace.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_trace.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_trace.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_trace.h	2019-01-22 16:16:23.523246588 +0100
@@ -0,0 +1,211 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#if !defined(_SDE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _SDE_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sde
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE sde_trace
+
+TRACE_EVENT(sde_perf_set_qos_luts,
+	TP_PROTO(u32 pnum, u32 fmt, bool rt, u32 fl,
+		u32 lut, bool linear),
+	TP_ARGS(pnum, fmt, rt, fl, lut, linear),
+	TP_STRUCT__entry(
+			__field(u32, pnum)
+			__field(u32, fmt)
+			__field(bool, rt)
+			__field(u32, fl)
+			__field(u32, lut)
+			__field(bool, linear)
+	),
+	TP_fast_assign(
+			__entry->pnum = pnum;
+			__entry->fmt = fmt;
+			__entry->rt = rt;
+			__entry->fl = fl;
+			__entry->lut = lut;
+			__entry->linear = linear;
+	),
+	TP_printk("pnum=%d fmt=%x rt=%d fl=%d lut=0x%x lin=%d",
+			__entry->pnum, __entry->fmt,
+			__entry->rt, __entry->fl,
+			__entry->lut, __entry->linear)
+);
+
+TRACE_EVENT(sde_perf_set_danger_luts,
+	TP_PROTO(u32 pnum, u32 fmt, u32 mode, u32 danger_lut,
+		u32 safe_lut),
+	TP_ARGS(pnum, fmt, mode, danger_lut, safe_lut),
+	TP_STRUCT__entry(
+			__field(u32, pnum)
+			__field(u32, fmt)
+			__field(u32, mode)
+			__field(u32, danger_lut)
+			__field(u32, safe_lut)
+	),
+	TP_fast_assign(
+			__entry->pnum = pnum;
+			__entry->fmt = fmt;
+			__entry->mode = mode;
+			__entry->danger_lut = danger_lut;
+			__entry->safe_lut = safe_lut;
+	),
+	TP_printk("pnum=%d fmt=%x mode=%d luts[0x%x, 0x%x]",
+			__entry->pnum, __entry->fmt,
+			__entry->mode, __entry->danger_lut,
+			__entry->safe_lut)
+);
+
+TRACE_EVENT(sde_perf_set_ot,
+	TP_PROTO(u32 pnum, u32 xin_id, u32 rd_lim, u32 vbif_idx),
+	TP_ARGS(pnum, xin_id, rd_lim, vbif_idx),
+	TP_STRUCT__entry(
+			__field(u32, pnum)
+			__field(u32, xin_id)
+			__field(u32, rd_lim)
+			__field(u32, vbif_idx)
+	),
+	TP_fast_assign(
+			__entry->pnum = pnum;
+			__entry->xin_id = xin_id;
+			__entry->rd_lim = rd_lim;
+			__entry->vbif_idx = vbif_idx;
+	),
+	TP_printk("pnum:%d xin_id:%d ot:%d vbif:%d",
+			__entry->pnum, __entry->xin_id, __entry->rd_lim,
+			__entry->vbif_idx)
+)
+
+TRACE_EVENT(sde_perf_update_bus,
+	TP_PROTO(int client, unsigned long long ab_quota,
+	unsigned long long ib_quota),
+	TP_ARGS(client, ab_quota, ib_quota),
+	TP_STRUCT__entry(
+			__field(int, client)
+			__field(u64, ab_quota)
+			__field(u64, ib_quota)
+	),
+	TP_fast_assign(
+			__entry->client = client;
+			__entry->ab_quota = ab_quota;
+			__entry->ib_quota = ib_quota;
+	),
+	TP_printk("Request client:%d ab=%llu ib=%llu",
+			__entry->client,
+			__entry->ab_quota,
+			__entry->ib_quota)
+)
+
+
+TRACE_EVENT(sde_cmd_release_bw,
+	TP_PROTO(u32 crtc_id),
+	TP_ARGS(crtc_id),
+	TP_STRUCT__entry(
+			__field(u32, crtc_id)
+	),
+	TP_fast_assign(
+			__entry->crtc_id = crtc_id;
+	),
+	TP_printk("crtc:%d", __entry->crtc_id)
+);
+
+TRACE_EVENT(sde_encoder_underrun,
+	TP_PROTO(u32 enc_id, u32 underrun_cnt),
+	TP_ARGS(enc_id, underrun_cnt),
+	TP_STRUCT__entry(
+			__field(u32, enc_id)
+			__field(u32, underrun_cnt)
+	),
+	TP_fast_assign(
+			__entry->enc_id = enc_id;
+			__entry->underrun_cnt = underrun_cnt;
+
+	),
+	TP_printk("enc:%d underrun_cnt:%d", __entry->enc_id,
+		__entry->underrun_cnt)
+);
+
+TRACE_EVENT(sde_mark_write,
+	TP_PROTO(int pid, const char *name, bool trace_begin),
+	TP_ARGS(pid, name, trace_begin),
+	TP_STRUCT__entry(
+			__field(int, pid)
+			__string(trace_name, name)
+			__field(bool, trace_begin)
+	),
+	TP_fast_assign(
+			__entry->pid = pid;
+			__assign_str(trace_name, name);
+			__entry->trace_begin = trace_begin;
+	),
+	TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
+		__entry->pid, __get_str(trace_name))
+)
+
+TRACE_EVENT(sde_trace_counter,
+	TP_PROTO(int pid, char *name, int value),
+	TP_ARGS(pid, name, value),
+	TP_STRUCT__entry(
+			__field(int, pid)
+			__string(counter_name, name)
+			__field(int, value)
+	),
+	TP_fast_assign(
+			__entry->pid = current->tgid;
+			__assign_str(counter_name, name);
+			__entry->value = value;
+	),
+	TP_printk("%d|%s|%d", __entry->pid,
+			__get_str(counter_name), __entry->value)
+)
+
+TRACE_EVENT(sde_evtlog,
+	TP_PROTO(const char *tag, u32 tag_id, u64 value1, u64 value2),
+	TP_ARGS(tag, tag_id, value1, value2),
+	TP_STRUCT__entry(
+			__field(int, pid)
+			__string(evtlog_tag, tag)
+			__field(u32, tag_id)
+			__field(u64, value1)
+			__field(u64, value2)
+	),
+	TP_fast_assign(
+			__entry->pid = current->tgid;
+			__assign_str(evtlog_tag, tag);
+			__entry->tag_id = tag_id;
+			__entry->value1 = value1;
+			__entry->value2 = value2;
+	),
+	TP_printk("%d|%s:%d|%llu|%llu", __entry->pid, __get_str(evtlog_tag),
+			__entry->tag_id, __entry->value1, __entry->value2)
+)
+
+#define SDE_ATRACE_END(name) trace_sde_mark_write(current->tgid, name, 0)
+#define SDE_ATRACE_BEGIN(name) trace_sde_mark_write(current->tgid, name, 1)
+#define SDE_ATRACE_FUNC() SDE_ATRACE_BEGIN(__func__)
+
+#define SDE_ATRACE_INT(name, value) \
+	trace_sde_trace_counter(current->tgid, name, value)
+
+#endif /* _SDE_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_vbif.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_vbif.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_vbif.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_vbif.c	2019-01-22 16:16:23.523246588 +0100
@@ -0,0 +1,284 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+
+#include "sde_vbif.h"
+#include "sde_hw_vbif.h"
+#include "sde_trace.h"
+
+/**
+ * _sde_vbif_wait_for_xin_halt - wait for the xin to halt
+ * @vbif:	Pointer to hardware vbif driver
+ * @xin_id:	Client interface identifier
+ * @return:	0 if success; error code otherwise
+ */
+static int _sde_vbif_wait_for_xin_halt(struct sde_hw_vbif *vbif, u32 xin_id)
+{
+	ktime_t timeout;
+	bool status;
+	int rc;
+
+	if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) {
+		SDE_ERROR("invalid arguments vbif %d\n", vbif != 0);
+		return -EINVAL;
+	}
+
+	timeout = ktime_add_us(ktime_get(), vbif->cap->xin_halt_timeout);
+	for (;;) {
+		status = vbif->ops.get_halt_ctrl(vbif, xin_id);
+		if (status)
+			break;
+		if (ktime_compare_safe(ktime_get(), timeout) > 0) {
+			status = vbif->ops.get_halt_ctrl(vbif, xin_id);
+			break;
+		}
+		usleep_range(501, 1000);
+	}
+
+	if (!status) {
+		rc = -ETIMEDOUT;
+		SDE_ERROR("VBIF %d client %d not halting. TIMEDOUT.\n",
+				vbif->idx - VBIF_0, xin_id);
+	} else {
+		rc = 0;
+		SDE_DEBUG("VBIF %d client %d is halted\n",
+				vbif->idx - VBIF_0, xin_id);
+	}
+
+	return rc;
+}
+
+/**
+ * _sde_vbif_apply_dynamic_ot_limit - determine OT based on usecase parameters
+ * @vbif:	Pointer to hardware vbif driver
+ * @ot_lim:	Pointer to OT limit to be modified
+ * @params:	Pointer to usecase parameters
+ */
+static void _sde_vbif_apply_dynamic_ot_limit(struct sde_hw_vbif *vbif,
+		u32 *ot_lim, struct sde_vbif_set_ot_params *params)
+{
+	u64 pps;
+	const struct sde_vbif_dynamic_ot_tbl *tbl;
+	u32 i;
+
+	if (!vbif || !(vbif->cap->features & BIT(SDE_VBIF_QOS_OTLIM)))
+		return;
+
+	/* Dynamic OT setting done only for WFD */
+	if (!params->is_wfd)
+		return;
+
+	pps = params->frame_rate;
+	pps *= params->width;
+	pps *= params->height;
+
+	tbl = params->rd ? &vbif->cap->dynamic_ot_rd_tbl :
+			&vbif->cap->dynamic_ot_wr_tbl;
+
+	for (i = 0; i < tbl->count; i++) {
+		if (pps <= tbl->cfg[i].pps) {
+			*ot_lim = tbl->cfg[i].ot_limit;
+			break;
+		}
+	}
+
+	SDE_DEBUG("vbif:%d xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
+			vbif->idx - VBIF_0, params->xin_id,
+			params->width, params->height, params->frame_rate,
+			pps, *ot_lim);
+}
+
+/**
+ * _sde_vbif_get_ot_limit - get OT based on usecase & configuration parameters
+ * @vbif:	Pointer to hardware vbif driver
+ * @params:	Pointer to usecase parameters
+ * @return:	OT limit
+ */
+static u32 _sde_vbif_get_ot_limit(struct sde_hw_vbif *vbif,
+	struct sde_vbif_set_ot_params *params)
+{
+	u32 ot_lim = 0;
+	u32 val;
+
+	if (!vbif || !vbif->cap) {
+		SDE_ERROR("invalid arguments vbif %d\n", vbif != 0);
+		return -EINVAL;
+	}
+
+	if (vbif->cap->default_ot_wr_limit && !params->rd)
+		ot_lim = vbif->cap->default_ot_wr_limit;
+	else if (vbif->cap->default_ot_rd_limit && params->rd)
+		ot_lim = vbif->cap->default_ot_rd_limit;
+
+	/*
+	 * If default ot is not set from dt/catalog,
+	 * then do not configure it.
+	 */
+	if (ot_lim == 0)
+		goto exit;
+
+	/* Modify the limits if the target and the use case requires it */
+	_sde_vbif_apply_dynamic_ot_limit(vbif, &ot_lim, params);
+
+	if (vbif && vbif->ops.get_limit_conf) {
+		val = vbif->ops.get_limit_conf(vbif,
+				params->xin_id, params->rd);
+		if (val == ot_lim)
+			ot_lim = 0;
+	}
+
+exit:
+	SDE_DEBUG("vbif:%d xin:%d ot_lim:%d\n",
+			vbif->idx - VBIF_0, params->xin_id, ot_lim);
+	return ot_lim;
+}
+
+/**
+ * sde_vbif_set_ot_limit - set OT based on usecase & configuration parameters
+ * @vbif:	Pointer to hardware vbif driver
+ * @params:	Pointer to usecase parameters
+ *
+ * Note this function would block waiting for bus halt.
+ */
+void sde_vbif_set_ot_limit(struct sde_kms *sde_kms,
+		struct sde_vbif_set_ot_params *params)
+{
+	struct sde_hw_vbif *vbif = NULL;
+	struct sde_hw_mdp *mdp;
+	bool forced_on = false;
+	u32 ot_lim;
+	int ret, i;
+
+	if (!sde_kms) {
+		SDE_ERROR("invalid arguments\n");
+		return;
+	}
+	mdp = sde_kms->hw_mdp;
+
+	for (i = 0; i < ARRAY_SIZE(sde_kms->hw_vbif); i++) {
+		if (sde_kms->hw_vbif[i] &&
+				sde_kms->hw_vbif[i]->idx == params->vbif_idx)
+			vbif = sde_kms->hw_vbif[i];
+	}
+
+	if (!vbif || !mdp) {
+		SDE_DEBUG("invalid arguments vbif %d mdp %d\n",
+				vbif != 0, mdp != 0);
+		return;
+	}
+
+	if (!mdp->ops.setup_clk_force_ctrl ||
+			!vbif->ops.set_limit_conf ||
+			!vbif->ops.set_halt_ctrl)
+		return;
+
+	ot_lim = _sde_vbif_get_ot_limit(vbif, params) & 0xFF;
+
+	if (ot_lim == 0)
+		goto exit;
+
+	trace_sde_perf_set_ot(params->num, params->xin_id, ot_lim,
+		params->vbif_idx);
+
+	forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
+
+	vbif->ops.set_limit_conf(vbif, params->xin_id, params->rd, ot_lim);
+
+	vbif->ops.set_halt_ctrl(vbif, params->xin_id, true);
+
+	ret = _sde_vbif_wait_for_xin_halt(vbif, params->xin_id);
+	if (ret)
+		SDE_EVT32(vbif->idx, params->xin_id);
+
+	vbif->ops.set_halt_ctrl(vbif, params->xin_id, false);
+
+	if (forced_on)
+		mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
+exit:
+	return;
+}
+
+#ifdef CONFIG_DEBUG_FS
+void sde_debugfs_vbif_destroy(struct sde_kms *sde_kms)
+{
+	debugfs_remove_recursive(sde_kms->debugfs_vbif);
+	sde_kms->debugfs_vbif = NULL;
+}
+
+int sde_debugfs_vbif_init(struct sde_kms *sde_kms, struct dentry *debugfs_root)
+{
+	char vbif_name[32];
+	struct dentry *debugfs_vbif;
+	int i, j;
+
+	sde_kms->debugfs_vbif = debugfs_create_dir("vbif",
+			sde_kms->debugfs_root);
+	if (!sde_kms->debugfs_vbif) {
+		SDE_ERROR("failed to create vbif debugfs\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
+		struct sde_vbif_cfg *vbif = &sde_kms->catalog->vbif[i];
+
+		snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
+
+		debugfs_vbif = debugfs_create_dir(vbif_name,
+				sde_kms->debugfs_vbif);
+
+		debugfs_create_u32("features", 0644, debugfs_vbif,
+			(u32 *)&vbif->features);
+
+		debugfs_create_u32("xin_halt_timeout", S_IRUGO, debugfs_vbif,
+			(u32 *)&vbif->xin_halt_timeout);
+
+		debugfs_create_u32("default_rd_ot_limit", S_IRUGO, debugfs_vbif,
+			(u32 *)&vbif->default_ot_rd_limit);
+
+		debugfs_create_u32("default_wr_ot_limit", S_IRUGO, debugfs_vbif,
+			(u32 *)&vbif->default_ot_wr_limit);
+
+		for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
+			struct sde_vbif_dynamic_ot_cfg *cfg =
+					&vbif->dynamic_ot_rd_tbl.cfg[j];
+
+			snprintf(vbif_name, sizeof(vbif_name),
+					"dynamic_ot_rd_%d_pps", j);
+			debugfs_create_u64(vbif_name, S_IRUGO, debugfs_vbif,
+					(u64 *)&cfg->pps);
+			snprintf(vbif_name, sizeof(vbif_name),
+					"dynamic_ot_rd_%d_ot_limit", j);
+			debugfs_create_u32(vbif_name, S_IRUGO, debugfs_vbif,
+					(u32 *)&cfg->ot_limit);
+		}
+
+		for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
+			struct sde_vbif_dynamic_ot_cfg *cfg =
+					&vbif->dynamic_ot_wr_tbl.cfg[j];
+
+			snprintf(vbif_name, sizeof(vbif_name),
+					"dynamic_ot_wr_%d_pps", j);
+			debugfs_create_u64(vbif_name, S_IRUGO, debugfs_vbif,
+					(u64 *)&cfg->pps);
+			snprintf(vbif_name, sizeof(vbif_name),
+					"dynamic_ot_wr_%d_ot_limit", j);
+			debugfs_create_u32(vbif_name, S_IRUGO, debugfs_vbif,
+					(u32 *)&cfg->ot_limit);
+		}
+	}
+
+	return 0;
+}
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_vbif.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_vbif.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_vbif.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_vbif.h	2019-01-22 16:16:23.523246588 +0100
@@ -0,0 +1,51 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_VBIF_H__
+#define __SDE_VBIF_H__
+
+#include "sde_kms.h"
+
+struct sde_vbif_set_ot_params {
+	u32 xin_id;
+	u32 num;
+	u32 width;
+	u32 height;
+	u32 frame_rate;
+	bool rd;
+	bool is_wfd;
+	u32 vbif_idx;
+	u32 clk_ctrl;
+};
+
+/**
+ * sde_vbif_set_ot_limit - set OT limit for vbif client
+ * @sde_kms:	SDE handler
+ * @params:	Pointer to OT configuration parameters
+ */
+void sde_vbif_set_ot_limit(struct sde_kms *sde_kms,
+		struct sde_vbif_set_ot_params *params);
+
+#ifdef CONFIG_DEBUG_FS
+int sde_debugfs_vbif_init(struct sde_kms *sde_kms, struct dentry *debugfs_root);
+void sde_debugfs_vbif_destroy(struct sde_kms *sde_kms);
+#else
+static inline int sde_debugfs_vbif_init(struct sde_kms *sde_kms,
+		struct dentry *debugfs_root)
+{
+	return 0;
+}
+static inline void sde_debugfs_vbif_destroy(struct sde_kms *sde_kms)
+{
+}
+#endif
+#endif /* __SDE_VBIF_H__ */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_wb.c linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_wb.c
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_wb.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_wb.c	2019-01-22 16:16:23.523246588 +0100
@@ -0,0 +1,745 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include "msm_kms.h"
+#include "sde_kms.h"
+#include "sde_wb.h"
+#include "sde_formats.h"
+
+/* maximum display mode resolution if not available from catalog */
+#define SDE_WB_MODE_MAX_WIDTH	4096
+#define SDE_WB_MODE_MAX_HEIGHT	4096
+
+/* Serialization lock for sde_wb_list */
+static DEFINE_MUTEX(sde_wb_list_lock);
+
+/* List of all writeback devices installed */
+static LIST_HEAD(sde_wb_list);
+
+/**
+ * sde_wb_is_format_valid - check if given format/modifier is supported
+ * @wb_dev:	Pointer to writeback device
+ * @pixel_format:	Fourcc pixel format
+ * @format_modifier:	Format modifier
+ * Returns:		true if valid; false otherwise
+ */
+static int sde_wb_is_format_valid(struct sde_wb_device *wb_dev,
+		u32 pixel_format, u64 format_modifier)
+{
+	const struct sde_format_extended *fmts = wb_dev->wb_cfg->format_list;
+	int i;
+
+	if (!fmts)
+		return false;
+
+	for (i = 0; fmts[i].fourcc_format; i++)
+		if ((fmts[i].modifier == format_modifier) &&
+				(fmts[i].fourcc_format == pixel_format))
+			return true;
+
+	return false;
+}
+
+enum drm_connector_status
+sde_wb_connector_detect(struct drm_connector *connector,
+		bool force,
+		void *display)
+{
+	enum drm_connector_status rc = connector_status_unknown;
+
+	SDE_DEBUG("\n");
+
+	if (display)
+		rc = ((struct sde_wb_device *)display)->detect_status;
+
+	return rc;
+}
+
+int sde_wb_connector_get_modes(struct drm_connector *connector, void *display)
+{
+	struct sde_wb_device *wb_dev;
+	int num_modes = 0;
+
+	if (!connector || !display)
+		return 0;
+
+	wb_dev = display;
+
+	SDE_DEBUG("\n");
+
+	mutex_lock(&wb_dev->wb_lock);
+	if (wb_dev->count_modes && wb_dev->modes) {
+		struct drm_display_mode *mode;
+		int i, ret;
+
+		for (i = 0; i < wb_dev->count_modes; i++) {
+			mode = drm_mode_create(connector->dev);
+			if (!mode) {
+				SDE_ERROR("failed to create mode\n");
+				break;
+			}
+			ret = drm_mode_convert_umode(mode,
+					&wb_dev->modes[i]);
+			if (ret) {
+				SDE_ERROR("failed to convert mode %d\n", ret);
+				break;
+			}
+
+			drm_mode_probed_add(connector, mode);
+			num_modes++;
+		}
+	} else {
+		u32 max_width = (wb_dev->wb_cfg && wb_dev->wb_cfg->sblk) ?
+				wb_dev->wb_cfg->sblk->maxlinewidth :
+				SDE_WB_MODE_MAX_WIDTH;
+
+		num_modes = drm_add_modes_noedid(connector, max_width,
+				SDE_WB_MODE_MAX_HEIGHT);
+	}
+	mutex_unlock(&wb_dev->wb_lock);
+	return num_modes;
+}
+
+struct drm_framebuffer *
+sde_wb_connector_state_get_output_fb(struct drm_connector_state *state)
+{
+	if (!state || !state->connector ||
+		(state->connector->connector_type !=
+				DRM_MODE_CONNECTOR_VIRTUAL)) {
+		SDE_ERROR("invalid params\n");
+		return NULL;
+	}
+
+	SDE_DEBUG("\n");
+
+	return sde_connector_get_out_fb(state);
+}
+
+int sde_wb_connector_state_get_output_roi(struct drm_connector_state *state,
+		struct sde_rect *roi)
+{
+	if (!state || !roi || !state->connector ||
+		(state->connector->connector_type !=
+				DRM_MODE_CONNECTOR_VIRTUAL)) {
+		SDE_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	SDE_DEBUG("\n");
+
+	roi->x = sde_connector_get_property(state, CONNECTOR_PROP_DST_X);
+	roi->y = sde_connector_get_property(state, CONNECTOR_PROP_DST_Y);
+	roi->w = sde_connector_get_property(state, CONNECTOR_PROP_DST_W);
+	roi->h = sde_connector_get_property(state, CONNECTOR_PROP_DST_H);
+
+	return 0;
+}
+
+/**
+ * sde_wb_connector_set_modes - set writeback modes and connection status
+ * @wb_dev:	Pointer to write back device
+ * @count_modes:	Count of modes
+ * @modes:	Pointer to writeback mode requested
+ * @connected:	Connection status requested
+ * Returns:	0 if success; error code otherwise
+ */
+static
+int sde_wb_connector_set_modes(struct sde_wb_device *wb_dev,
+		u32 count_modes, struct drm_mode_modeinfo __user *modes,
+		bool connected)
+{
+	int ret = 0;
+
+	if (!wb_dev || !wb_dev->connector ||
+			(wb_dev->connector->connector_type !=
+			 DRM_MODE_CONNECTOR_VIRTUAL)) {
+		SDE_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	SDE_DEBUG("\n");
+
+	if (connected) {
+		SDE_DEBUG("connect\n");
+
+		if (wb_dev->modes) {
+			wb_dev->count_modes = 0;
+
+			kfree(wb_dev->modes);
+			wb_dev->modes = NULL;
+		}
+
+		if (count_modes && modes) {
+			wb_dev->modes = kcalloc(count_modes,
+					sizeof(struct drm_mode_modeinfo),
+					GFP_KERNEL);
+			if (!wb_dev->modes) {
+				SDE_ERROR("invalid params\n");
+				ret = -ENOMEM;
+				goto error;
+			}
+
+			if (copy_from_user(wb_dev->modes, modes,
+					count_modes *
+					sizeof(struct drm_mode_modeinfo))) {
+				SDE_ERROR("failed to copy modes\n");
+				kfree(wb_dev->modes);
+				wb_dev->modes = NULL;
+				ret = -EFAULT;
+				goto error;
+			}
+
+			wb_dev->count_modes = count_modes;
+		}
+
+		wb_dev->detect_status = connector_status_connected;
+	} else {
+		SDE_DEBUG("disconnect\n");
+
+		if (wb_dev->modes) {
+			wb_dev->count_modes = 0;
+
+			kfree(wb_dev->modes);
+			wb_dev->modes = NULL;
+		}
+
+		wb_dev->detect_status = connector_status_disconnected;
+	}
+
+error:
+	return ret;
+}
+
+int sde_wb_connector_set_property(struct drm_connector *connector,
+		struct drm_connector_state *state,
+		int property_index,
+		uint64_t value,
+		void *display)
+{
+	struct sde_wb_device *wb_dev = display;
+	struct drm_framebuffer *out_fb;
+	int rc = 0;
+
+	SDE_DEBUG("\n");
+
+	if (state && (property_index == CONNECTOR_PROP_OUT_FB)) {
+		const struct sde_format *sde_format;
+
+		out_fb = sde_connector_get_out_fb(state);
+		if (!out_fb)
+			goto done;
+
+		sde_format = sde_get_sde_format_ext(out_fb->pixel_format,
+				out_fb->modifier,
+				drm_format_num_planes(out_fb->pixel_format));
+		if (!sde_format) {
+			SDE_ERROR("failed to get sde format\n");
+			rc = -EINVAL;
+			goto done;
+		}
+
+		if (!sde_wb_is_format_valid(wb_dev, out_fb->pixel_format,
+				out_fb->modifier[0])) {
+			SDE_ERROR("unsupported writeback format 0x%x/0x%llx\n",
+					out_fb->pixel_format,
+					out_fb->modifier[0]);
+			rc = -EINVAL;
+			goto done;
+		}
+	}
+
+done:
+	return rc;
+}
+
+int sde_wb_get_info(struct msm_display_info *info, void *display)
+{
+	struct sde_wb_device *wb_dev = display;
+
+	if (!info || !wb_dev) {
+		pr_err("invalid params\n");
+		return -EINVAL;
+	}
+
+	info->intf_type = DRM_MODE_CONNECTOR_VIRTUAL;
+	info->num_of_h_tiles = 1;
+	info->h_tile_instance[0] = sde_wb_get_index(display);
+	info->is_connected = true;
+	info->capabilities = MSM_DISPLAY_CAP_HOT_PLUG | MSM_DISPLAY_CAP_EDID;
+	info->max_width = (wb_dev->wb_cfg && wb_dev->wb_cfg->sblk) ?
+			wb_dev->wb_cfg->sblk->maxlinewidth :
+			SDE_WB_MODE_MAX_WIDTH;
+	info->max_height = SDE_WB_MODE_MAX_HEIGHT;
+	info->compression = MSM_DISPLAY_COMPRESS_NONE;
+	return 0;
+}
+
+int sde_wb_connector_post_init(struct drm_connector *connector,
+		void *info,
+		void *display)
+{
+	struct sde_connector *c_conn;
+	struct sde_wb_device *wb_dev = display;
+	const struct sde_format_extended *format_list;
+
+	if (!connector || !info || !display || !wb_dev->wb_cfg) {
+		SDE_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	c_conn = to_sde_connector(connector);
+	wb_dev->connector = connector;
+	wb_dev->detect_status = connector_status_connected;
+	format_list = wb_dev->wb_cfg->format_list;
+
+	/*
+	 * Add extra connector properties
+	 */
+	msm_property_install_range(&c_conn->property_info, "FB_ID",
+			0x0, 0, ~0, ~0, CONNECTOR_PROP_OUT_FB);
+	msm_property_install_range(&c_conn->property_info, "DST_X",
+			0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_X);
+	msm_property_install_range(&c_conn->property_info, "DST_Y",
+			0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_Y);
+	msm_property_install_range(&c_conn->property_info, "DST_W",
+			0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_W);
+	msm_property_install_range(&c_conn->property_info, "DST_H",
+			0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_H);
+
+	/*
+	 * Populate info buffer
+	 */
+	if (format_list) {
+		sde_kms_info_start(info, "pixel_formats");
+		while (format_list->fourcc_format) {
+			sde_kms_info_append_format(info,
+					format_list->fourcc_format,
+					format_list->modifier);
+			++format_list;
+		}
+		sde_kms_info_stop(info);
+	}
+
+	sde_kms_info_add_keyint(info,
+			"wb_intf_index",
+			wb_dev->wb_idx - WB_0);
+
+	sde_kms_info_add_keyint(info,
+			"maxlinewidth",
+			wb_dev->wb_cfg->sblk->maxlinewidth);
+
+	sde_kms_info_start(info, "features");
+	if (wb_dev->wb_cfg && (wb_dev->wb_cfg->features & SDE_WB_UBWC_1_0))
+		sde_kms_info_append(info, "wb_ubwc");
+	sde_kms_info_stop(info);
+
+	return 0;
+}
+
+struct drm_framebuffer *sde_wb_get_output_fb(struct sde_wb_device *wb_dev)
+{
+	struct drm_framebuffer *fb;
+
+	if (!wb_dev || !wb_dev->connector) {
+		SDE_ERROR("invalid params\n");
+		return NULL;
+	}
+
+	SDE_DEBUG("\n");
+
+	mutex_lock(&wb_dev->wb_lock);
+	fb = sde_wb_connector_state_get_output_fb(wb_dev->connector->state);
+	mutex_unlock(&wb_dev->wb_lock);
+
+	return fb;
+}
+
+int sde_wb_get_output_roi(struct sde_wb_device *wb_dev, struct sde_rect *roi)
+{
+	int rc;
+
+	if (!wb_dev || !wb_dev->connector || !roi) {
+		SDE_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	SDE_DEBUG("\n");
+
+	mutex_lock(&wb_dev->wb_lock);
+	rc = sde_wb_connector_state_get_output_roi(
+			wb_dev->connector->state, roi);
+	mutex_unlock(&wb_dev->wb_lock);
+
+	return rc;
+}
+
+u32 sde_wb_get_num_of_displays(void)
+{
+	u32 count = 0;
+	struct sde_wb_device *wb_dev;
+
+	SDE_DEBUG("\n");
+
+	mutex_lock(&sde_wb_list_lock);
+	list_for_each_entry(wb_dev, &sde_wb_list, wb_list) {
+		count++;
+	}
+	mutex_unlock(&sde_wb_list_lock);
+
+	return count;
+}
+
+int wb_display_get_displays(void **display_array, u32 max_display_count)
+{
+	struct sde_wb_device *curr;
+	int i = 0;
+
+	SDE_DEBUG("\n");
+
+	if (!display_array || !max_display_count) {
+		if (!display_array)
+			SDE_ERROR("invalid param\n");
+		return 0;
+	}
+
+	mutex_lock(&sde_wb_list_lock);
+	list_for_each_entry(curr, &sde_wb_list, wb_list) {
+		if (i >= max_display_count)
+			break;
+		display_array[i++] = curr;
+	}
+	mutex_unlock(&sde_wb_list_lock);
+
+	return i;
+}
+
+int sde_wb_config(struct drm_device *drm_dev, void *data,
+				struct drm_file *file_priv)
+{
+	struct sde_drm_wb_cfg *config = data;
+	struct msm_drm_private *priv;
+	struct sde_wb_device *wb_dev = NULL;
+	struct sde_wb_device *curr;
+	struct drm_connector *connector;
+	uint32_t flags;
+	uint32_t connector_id;
+	uint32_t count_modes;
+	uint64_t modes;
+	int rc;
+
+	if (!drm_dev || !data) {
+		SDE_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	SDE_DEBUG("\n");
+
+	flags = config->flags;
+	connector_id = config->connector_id;
+	count_modes = config->count_modes;
+	modes = config->modes;
+
+	priv = drm_dev->dev_private;
+
+	connector = drm_connector_find(drm_dev, connector_id);
+	if (!connector) {
+		SDE_ERROR("failed to find connector\n");
+		rc = -ENOENT;
+		goto fail;
+	}
+
+	mutex_lock(&sde_wb_list_lock);
+	list_for_each_entry(curr, &sde_wb_list, wb_list) {
+		if (curr->connector == connector) {
+			wb_dev = curr;
+			break;
+		}
+	}
+	mutex_unlock(&sde_wb_list_lock);
+
+	if (!wb_dev) {
+		SDE_ERROR("failed to find wb device\n");
+		rc = -ENOENT;
+		goto fail;
+	}
+
+	mutex_lock(&wb_dev->wb_lock);
+
+	rc = sde_wb_connector_set_modes(wb_dev, count_modes,
+		(struct drm_mode_modeinfo __user *) (uintptr_t) modes,
+		(flags & SDE_DRM_WB_CFG_FLAGS_CONNECTED) ? true : false);
+
+	mutex_unlock(&wb_dev->wb_lock);
+	drm_helper_hpd_irq_event(drm_dev);
+fail:
+	return rc;
+}
+
+/**
+ * _sde_wb_dev_init - perform device initialization
+ * @wb_dev:	Pointer to writeback device
+ */
+static int _sde_wb_dev_init(struct sde_wb_device *wb_dev)
+{
+	int rc = 0;
+
+	if (!wb_dev) {
+		SDE_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	SDE_DEBUG("\n");
+
+	return rc;
+}
+
+/**
+ * _sde_wb_dev_deinit - perform device de-initialization
+ * @wb_dev:	Pointer to writeback device
+ */
+static int _sde_wb_dev_deinit(struct sde_wb_device *wb_dev)
+{
+	int rc = 0;
+
+	if (!wb_dev) {
+		SDE_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	SDE_DEBUG("\n");
+
+	return rc;
+}
+
+/**
+ * sde_wb_bind - bind writeback device with controlling device
+ * @dev:        Pointer to base of platform device
+ * @master:     Pointer to container of drm device
+ * @data:       Pointer to private data
+ * Returns:     Zero on success
+ */
+static int sde_wb_bind(struct device *dev, struct device *master, void *data)
+{
+	struct sde_wb_device *wb_dev;
+
+	if (!dev || !master) {
+		SDE_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	wb_dev = platform_get_drvdata(to_platform_device(dev));
+	if (!wb_dev) {
+		SDE_ERROR("invalid wb device\n");
+		return -EINVAL;
+	}
+
+	SDE_DEBUG("\n");
+
+	mutex_lock(&wb_dev->wb_lock);
+	wb_dev->drm_dev = dev_get_drvdata(master);
+	mutex_unlock(&wb_dev->wb_lock);
+
+	return 0;
+}
+
+/**
+ * sde_wb_unbind - unbind writeback from controlling device
+ * @dev:        Pointer to base of platform device
+ * @master:     Pointer to container of drm device
+ * @data:       Pointer to private data
+ */
+static void sde_wb_unbind(struct device *dev,
+		struct device *master, void *data)
+{
+	struct sde_wb_device *wb_dev;
+
+	if (!dev) {
+		SDE_ERROR("invalid params\n");
+		return;
+	}
+
+	wb_dev = platform_get_drvdata(to_platform_device(dev));
+	if (!wb_dev) {
+		SDE_ERROR("invalid wb device\n");
+		return;
+	}
+
+	SDE_DEBUG("\n");
+
+	mutex_lock(&wb_dev->wb_lock);
+	wb_dev->drm_dev = NULL;
+	mutex_unlock(&wb_dev->wb_lock);
+}
+
+static const struct component_ops sde_wb_comp_ops = {
+	.bind = sde_wb_bind,
+	.unbind = sde_wb_unbind,
+};
+
+/**
+ * sde_wb_drm_init - perform DRM initialization
+ * @wb_dev:	Pointer to writeback device
+ * @encoder:	Pointer to associated encoder
+ */
+int sde_wb_drm_init(struct sde_wb_device *wb_dev, struct drm_encoder *encoder)
+{
+	int rc = 0;
+
+	if (!wb_dev || !wb_dev->drm_dev || !encoder) {
+		SDE_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	SDE_DEBUG("\n");
+
+	mutex_lock(&wb_dev->wb_lock);
+
+	if (wb_dev->drm_dev->dev_private) {
+		struct msm_drm_private *priv = wb_dev->drm_dev->dev_private;
+		struct sde_kms *sde_kms = to_sde_kms(priv->kms);
+
+		if (wb_dev->index < sde_kms->catalog->wb_count) {
+			wb_dev->wb_idx = sde_kms->catalog->wb[wb_dev->index].id;
+			wb_dev->wb_cfg = &sde_kms->catalog->wb[wb_dev->index];
+		}
+	}
+
+	wb_dev->drm_dev = encoder->dev;
+	wb_dev->encoder = encoder;
+	mutex_unlock(&wb_dev->wb_lock);
+	return rc;
+}
+
+int sde_wb_drm_deinit(struct sde_wb_device *wb_dev)
+{
+	int rc = 0;
+
+	if (!wb_dev) {
+		SDE_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	SDE_DEBUG("\n");
+
+	return rc;
+}
+
+/**
+ * sde_wb_probe - load writeback module
+ * @pdev:	Pointer to platform device
+ */
+static int sde_wb_probe(struct platform_device *pdev)
+{
+	struct sde_wb_device *wb_dev;
+	int ret;
+
+	wb_dev = devm_kzalloc(&pdev->dev, sizeof(*wb_dev), GFP_KERNEL);
+	if (!wb_dev)
+		return -ENOMEM;
+
+	SDE_DEBUG("\n");
+
+	ret = of_property_read_u32(pdev->dev.of_node, "cell-index",
+			&wb_dev->index);
+	if (ret) {
+		SDE_DEBUG("cell index not set, default to 0\n");
+		wb_dev->index = 0;
+	}
+
+	wb_dev->name = of_get_property(pdev->dev.of_node, "label", NULL);
+	if (!wb_dev->name) {
+		SDE_DEBUG("label not set, default to unknown\n");
+		wb_dev->name = "unknown";
+	}
+
+	wb_dev->wb_idx = SDE_NONE;
+
+	mutex_init(&wb_dev->wb_lock);
+	platform_set_drvdata(pdev, wb_dev);
+
+	mutex_lock(&sde_wb_list_lock);
+	list_add(&wb_dev->wb_list, &sde_wb_list);
+	mutex_unlock(&sde_wb_list_lock);
+
+	if (!_sde_wb_dev_init(wb_dev)) {
+		ret = component_add(&pdev->dev, &sde_wb_comp_ops);
+		if (ret)
+			pr_err("component add failed\n");
+	}
+
+	return ret;
+}
+
+/**
+ * sde_wb_remove - unload writeback module
+ * @pdev:	Pointer to platform device
+ */
+static int sde_wb_remove(struct platform_device *pdev)
+{
+	struct sde_wb_device *wb_dev;
+	struct sde_wb_device *curr, *next;
+
+	wb_dev = platform_get_drvdata(pdev);
+	if (!wb_dev)
+		return 0;
+
+	SDE_DEBUG("\n");
+
+	(void)_sde_wb_dev_deinit(wb_dev);
+
+	mutex_lock(&sde_wb_list_lock);
+	list_for_each_entry_safe(curr, next, &sde_wb_list, wb_list) {
+		if (curr == wb_dev) {
+			list_del(&wb_dev->wb_list);
+			break;
+		}
+	}
+	mutex_unlock(&sde_wb_list_lock);
+
+	kfree(wb_dev->modes);
+	mutex_destroy(&wb_dev->wb_lock);
+
+	platform_set_drvdata(pdev, NULL);
+	devm_kfree(&pdev->dev, wb_dev);
+
+	return 0;
+}
+
+static const struct of_device_id dt_match[] = {
+	{ .compatible = "qcom,wb-display"},
+	{}
+};
+
+static struct platform_driver sde_wb_driver = {
+	.probe = sde_wb_probe,
+	.remove = sde_wb_remove,
+	.driver = {
+		.name = "sde_wb",
+		.of_match_table = dt_match,
+	},
+};
+
+static int __init sde_wb_register(void)
+{
+	return platform_driver_register(&sde_wb_driver);
+}
+
+static void __exit sde_wb_unregister(void)
+{
+	platform_driver_unregister(&sde_wb_driver);
+}
+
+module_init(sde_wb_register);
+module_exit(sde_wb_unregister);
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_wb.h linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_wb.h
--- linux-4.4.115-fbx/drivers/gpu/drm/msm/sde./sde_wb.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde/sde_wb.h	2019-01-22 16:16:23.523246588 +0100
@@ -0,0 +1,321 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_WB_H__
+#define __SDE_WB_H__
+
+#include <linux/platform_device.h>
+
+#include "msm_kms.h"
+#include "sde_kms.h"
+#include "sde_connector.h"
+
+/**
+ * struct sde_wb_device - Writeback device context
+ * @drm_dev:		Pointer to controlling DRM device
+ * @index:		Index of hardware instance from device tree
+ * @wb_idx:		Writeback identifier of enum sde_wb
+ * @wb_cfg:		Writeback configuration catalog
+ * @name:		Name of writeback device from device tree
+ * @display_type:	Display type from device tree
+ * @wb_list		List of all writeback devices
+ * @wb_lock		Serialization lock for writeback context structure
+ * @connector:		Connector associated with writeback device
+ * @encoder:		Encoder associated with writeback device
+ * @count_modes:	Length of writeback connector modes array
+ * @modes:		Writeback connector modes array
+ */
+struct sde_wb_device {
+	struct drm_device *drm_dev;
+
+	u32 index;
+	u32 wb_idx;
+	struct sde_wb_cfg *wb_cfg;
+	const char *name;
+
+	struct list_head wb_list;
+	struct mutex wb_lock;
+
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+
+	enum drm_connector_status detect_status;
+	u32 count_modes;
+	struct drm_mode_modeinfo *modes;
+};
+
+/**
+ * sde_wb_get_index - get device index of the given writeback device
+ * @wb_dev:	Pointer to writeback device
+ * Returns:	Index of hardware instance
+ */
+static inline
+int sde_wb_get_index(struct sde_wb_device *wb_dev)
+{
+	return wb_dev ? wb_dev->index : -1;
+}
+
+#ifdef CONFIG_DRM_SDE_WB
+/**
+ * sde_wb_get_output_fb - get framebuffer in current atomic state
+ * @wb_dev:	Pointer to writeback device
+ * Returns:	Pointer to framebuffer
+ */
+struct drm_framebuffer *sde_wb_get_output_fb(struct sde_wb_device *wb_dev);
+
+/**
+ * sde_wb_get_output_roi - get region-of-interest in current atomic state
+ * @wb_dev:	Pointer to writeback device
+ * @roi:	Pointer to region of interest
+ * Returns:	0 if success; error code otherwise
+ */
+int sde_wb_get_output_roi(struct sde_wb_device *wb_dev, struct sde_rect *roi);
+
+/**
+ * sde_wb_get_num_of_displays - get total number of writeback devices
+ * Returns:	Number of writeback devices
+ */
+u32 sde_wb_get_num_of_displays(void);
+
+/**
+ * wb_display_get_displays - returns pointers for supported display devices
+ * @display_array: Pointer to display array to be filled
+ * @max_display_count: Size of display_array
+ * @Returns: Number of display entries filled
+ */
+int wb_display_get_displays(void **display_array, u32 max_display_count);
+
+void sde_wb_set_active_state(struct sde_wb_device *wb_dev, bool is_active);
+bool sde_wb_is_active(struct sde_wb_device *wb_dev);
+
+/**
+ * sde_wb_drm_init - perform DRM initialization
+ * @wb_dev:	Pointer to writeback device
+ * @encoder:	Pointer to associated encoder
+ * Returns:	0 if success; error code otherwise
+ */
+int sde_wb_drm_init(struct sde_wb_device *wb_dev, struct drm_encoder *encoder);
+
+/**
+ * sde_wb_drm_deinit - perform DRM de-initialization
+ * @wb_dev:	Pointer to writeback device
+ * Returns:	0 if success; error code otherwise
+ */
+int sde_wb_drm_deinit(struct sde_wb_device *wb_dev);
+
+/**
+ * sde_wb_config - setup connection status and available drm modes of the
+ *			given writeback connector
+ * @drm_dev:	Pointer to DRM device
+ * @data:	Pointer to writeback configuration
+ * @file_priv:	Pointer file private data
+ * Returns:	0 if success; error code otherwise
+ *
+ * This function will initiate hot-plug detection event.
+ */
+int sde_wb_config(struct drm_device *drm_dev, void *data,
+				struct drm_file *file_priv);
+
+/**
+ * sde_wb_connector_post_init - perform writeback specific initialization
+ * @connector: Pointer to drm connector structure
+ * @info: Pointer to connector info
+ * @display: Pointer to private display structure
+ * Returns: Zero on success
+ */
+int sde_wb_connector_post_init(struct drm_connector *connector,
+		void *info,
+		void *display);
+
+/**
+ * sde_wb_connector_detect - perform writeback connection status detection
+ * @connector:	Pointer to connector
+ * @force:	Indicate force detection
+ * @display:	Pointer to writeback device
+ * Returns:	connector status
+ */
+enum drm_connector_status
+sde_wb_connector_detect(struct drm_connector *connector,
+		bool force,
+		void *display);
+
+/**
+ * sde_wb_connector_get_modes - get display modes of connector
+ * @connector:	Pointer to connector
+ * @display:	Pointer to writeback device
+ * Returns:	Number of modes
+ *
+ * If display modes are not specified in writeback configuration IOCTL, this
+ * function will install default EDID modes up to maximum resolution support.
+ */
+int sde_wb_connector_get_modes(struct drm_connector *connector, void *display);
+
+/**
+ * sde_wb_connector_set_property - set atomic connector property
+ * @connector: Pointer to drm connector structure
+ * @state: Pointer to drm connector state structure
+ * @property_index: DRM property index
+ * @value: Incoming property value
+ * @display: Pointer to private display structure
+ * Returns: Zero on success
+ */
+int sde_wb_connector_set_property(struct drm_connector *connector,
+		struct drm_connector_state *state,
+		int property_index,
+		uint64_t value,
+		void *display);
+
+/**
+ * sde_wb_get_info - retrieve writeback 'display' information
+ * @info: Pointer to display info structure
+ * @display: Pointer to private display structure
+ * Returns: Zero on success
+ */
+int sde_wb_get_info(struct msm_display_info *info, void *display);
+
+/**
+ * sde_wb_connector_get_wb - retrieve writeback device of the given connector
+ * @connector: Pointer to drm connector
+ * Returns: Pointer to writeback device on success; NULL otherwise
+ */
+static inline
+struct sde_wb_device *sde_wb_connector_get_wb(struct drm_connector *connector)
+{
+	if (!connector ||
+		(connector->connector_type != DRM_MODE_CONNECTOR_VIRTUAL)) {
+		SDE_ERROR("invalid params\n");
+		return NULL;
+	}
+
+	return sde_connector_get_display(connector);
+}
+
+/**
+ * sde_wb_connector_state_get_output_fb - get framebuffer of given state
+ * @state:	Pointer to connector state
+ * Returns:	Pointer to framebuffer
+ */
+struct drm_framebuffer *
+sde_wb_connector_state_get_output_fb(struct drm_connector_state *state);
+
+/**
+ * sde_wb_connector_state_get_output_roi - get roi from given atomic state
+ * @state:	Pointer to atomic state
+ * @roi:	Pointer to region of interest
+ * Returns:	0 if success; error code otherwise
+ */
+int sde_wb_connector_state_get_output_roi(struct drm_connector_state *state,
+		struct sde_rect *roi);
+
+#else
+static inline
+struct drm_framebuffer *sde_wb_get_output_fb(struct sde_wb_device *wb_dev)
+{
+	return NULL;
+}
+static inline
+int sde_wb_get_output_roi(struct sde_wb_device *wb_dev, struct sde_rect *roi)
+{
+	return 0;
+}
+static inline
+u32 sde_wb_get_num_of_displays(void)
+{
+	return 0;
+}
+static inline
+int wb_display_get_displays(void **display_array, u32 max_display_count)
+{
+	return 0;
+}
+static inline
+void sde_wb_set_active_state(struct sde_wb_device *wb_dev, bool is_active)
+{
+}
+static inline
+bool sde_wb_is_active(struct sde_wb_device *wb_dev)
+{
+	return false;
+}
+static inline
+int sde_wb_drm_init(struct sde_wb_device *wb_dev, struct drm_encoder *encoder)
+{
+	return 0;
+}
+static inline
+int sde_wb_drm_deinit(struct sde_wb_device *wb_dev)
+{
+	return 0;
+}
+static inline
+int sde_wb_config(struct drm_device *drm_dev, void *data,
+				struct drm_file *file_priv)
+{
+	return 0;
+}
+static inline
+int sde_wb_connector_post_init(struct drm_connector *connector,
+		void *info,
+		void *display)
+{
+	return 0;
+}
+static inline
+enum drm_connector_status
+sde_wb_connector_detect(struct drm_connector *connector,
+		bool force,
+		void *display)
+{
+	return connector_status_disconnected;
+}
+static inline
+int sde_wb_connector_get_modes(struct drm_connector *connector, void *display)
+{
+	return -EINVAL;
+}
+static inline
+int sde_wb_connector_set_property(struct drm_connector *connector,
+		struct drm_connector_state *state,
+		int property_index,
+		uint64_t value,
+		void *display)
+{
+	return 0;
+}
+static inline
+int sde_wb_get_info(struct msm_display_info *info, void *display)
+{
+	return 0;
+}
+static inline
+struct sde_wb_device *sde_wb_connector_get_wb(struct drm_connector *connector)
+{
+	return NULL;
+}
+
+static inline
+struct drm_framebuffer *
+sde_wb_connector_state_get_output_fb(struct drm_connector_state *state)
+{
+	return NULL;
+}
+
+static inline
+int sde_wb_connector_state_get_output_roi(struct drm_connector_state *state,
+		struct sde_rect *roi)
+{
+	return 0;
+}
+
+#endif
+#endif /* __SDE_WB_H__ */
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde_dbg.c	2019-04-24 19:28:47.284498086 +0200
@@ -0,0 +1,2323 @@
+/* Copyright (c) 2009-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/dma-buf.h>
+#include <linux/slab.h>
+#include <linux/list_sort.h>
+
+#include "sde_dbg.h"
+#include "sde/sde_hw_catalog.h"
+
+#define SDE_DBG_BASE_MAX		10
+
+#define DEFAULT_PANIC		1
+#define DEFAULT_REGDUMP		SDE_DBG_DUMP_IN_MEM
+#define DEFAULT_DBGBUS_SDE	SDE_DBG_DUMP_IN_MEM
+#define DEFAULT_DBGBUS_VBIFRT	SDE_DBG_DUMP_IN_MEM
+#define DEFAULT_BASE_REG_CNT	0x100
+#define GROUP_BYTES		4
+#define ROW_BYTES		16
+#define RANGE_NAME_LEN		40
+#define REG_BASE_NAME_LEN	80
+
+#define DBGBUS_FLAGS_DSPP	BIT(0)
+#define DBGBUS_DSPP_STATUS	0x34C
+
+#define DBGBUS_NAME_SDE		"sde"
+#define DBGBUS_NAME_VBIF_RT	"vbif_rt"
+
+/* offsets from sde top address for the debug buses */
+#define DBGBUS_SSPP0	0x188
+#define DBGBUS_SSPP1	0x298
+#define DBGBUS_DSPP	0x348
+#define DBGBUS_PERIPH	0x418
+
+#define TEST_MASK(id, tp)	((id << 4) | (tp << 1) | BIT(0))
+
+/* following offsets are with respect to MDP VBIF base for DBG BUS access */
+#define MMSS_VBIF_CLKON			0x4
+#define MMSS_VBIF_TEST_BUS_OUT_CTRL	0x210
+#define MMSS_VBIF_TEST_BUS_OUT		0x230
+
+/* print debug ranges in groups of 4 u32s */
+#define REG_DUMP_ALIGN		16
+#define DBG_CTRL_STOP_FTRACE        BIT(0)
+#define DBG_CTRL_PANIC_UNDERRUN     BIT(1)
+#define DBG_CTRL_MAX                BIT(2)
+
+/**
+ * struct sde_dbg_reg_offset - tracking for start and end of region
+ * @start: start offset
+ * @start: end offset
+ */
+struct sde_dbg_reg_offset {
+	u32 start;
+	u32 end;
+};
+
+/**
+ * struct sde_dbg_reg_range - register dumping named sub-range
+ * @head: head of this node
+ * @reg_dump: address for the mem dump
+ * @range_name: name of this range
+ * @offset: offsets for range to dump
+ * @xin_id: client xin id
+ */
+struct sde_dbg_reg_range {
+	struct list_head head;
+	u32 *reg_dump;
+	char range_name[RANGE_NAME_LEN];
+	struct sde_dbg_reg_offset offset;
+	uint32_t xin_id;
+};
+
+/**
+ * struct sde_dbg_reg_base - register region base.
+ *	may sub-ranges: sub-ranges are used for dumping
+ *	or may not have sub-ranges: dumping is base -> max_offset
+ * @reg_base_head: head of this node
+ * @sub_range_list: head to the list with dump ranges
+ * @name: register base name
+ * @base: base pointer
+ * @off: cached offset of region for manual register dumping
+ * @cnt: cached range of region for manual register dumping
+ * @max_offset: length of region
+ * @buf: buffer used for manual register dumping
+ * @buf_len:  buffer length used for manual register dumping
+ * @reg_dump: address for the mem dump if no ranges used
+ */
+struct sde_dbg_reg_base {
+	struct list_head reg_base_head;
+	struct list_head sub_range_list;
+	char name[REG_BASE_NAME_LEN];
+	void __iomem *base;
+	size_t off;
+	size_t cnt;
+	size_t max_offset;
+	char *buf;
+	size_t buf_len;
+	u32 *reg_dump;
+};
+
+struct sde_debug_bus_entry {
+	u32 wr_addr;
+	u32 block_id;
+	u32 test_id;
+};
+
+struct vbif_debug_bus_entry {
+	u32 disable_bus_addr;
+	u32 block_bus_addr;
+	u32 bit_offset;
+	u32 block_cnt;
+	u32 test_pnt_start;
+	u32 test_pnt_cnt;
+};
+
+struct sde_dbg_debug_bus_common {
+	char *name;
+	u32 enable_mask;
+	bool include_in_deferred_work;
+	u32 flags;
+	u32 entries_size;
+	u32 *dumped_content;
+};
+
+struct sde_dbg_sde_debug_bus {
+	struct sde_dbg_debug_bus_common cmn;
+	struct sde_debug_bus_entry *entries;
+	u32 top_blk_off;
+};
+
+struct sde_dbg_vbif_debug_bus {
+	struct sde_dbg_debug_bus_common cmn;
+	struct vbif_debug_bus_entry *entries;
+};
+
+/**
+ * struct sde_dbg_base - global sde debug base structure
+ * @evtlog: event log instance
+ * @reg_base_list: list of register dumping regions
+ * @root: base debugfs root
+ * @dev: device pointer
+ * @mutex: mutex to serialize access to serialze dumps, debugfs access
+ * @power_ctrl: callback structure for enabling power for reading hw registers
+ * @req_dump_blks: list of blocks requested for dumping
+ * @panic_on_err: whether to kernel panic after triggering dump via debugfs
+ * @dump_work: work struct for deferring register dump work to separate thread
+ * @work_panic: panic after dump if internal user passed "panic" special region
+ * @enable_reg_dump: whether to dump registers into memory, kernel log, or both
+ * @dbgbus_sde: debug bus structure for the sde
+ * @dbgbus_vbif_rt: debug bus structure for the realtime vbif
+ * @dump_all: dump all entries in register dump
+ */
+static struct sde_dbg_base {
+	struct sde_dbg_evtlog *evtlog;
+	struct list_head reg_base_list;
+	struct dentry *root;
+	struct device *dev;
+	struct mutex mutex;
+	struct sde_dbg_power_ctrl power_ctrl;
+
+	struct sde_dbg_reg_base *req_dump_blks[SDE_DBG_BASE_MAX];
+
+	u32 panic_on_err;
+	struct work_struct dump_work;
+	bool work_panic;
+	u32 enable_reg_dump;
+
+	struct sde_dbg_sde_debug_bus dbgbus_sde;
+	struct sde_dbg_vbif_debug_bus dbgbus_vbif_rt;
+	bool dump_all;
+	u32 debugfs_ctrl;
+} sde_dbg_base;
+
+/* sde_dbg_base_evtlog - global pointer to main sde event log for macro use */
+struct sde_dbg_evtlog *sde_dbg_base_evtlog;
+
+static struct sde_debug_bus_entry dbg_bus_sde_8998[] = {
+
+	/* Unpack 0 sspp 0*/
+	{ DBGBUS_SSPP0, 50, 2 },
+	{ DBGBUS_SSPP0, 60, 2 },
+	{ DBGBUS_SSPP0, 70, 2 },
+	{ DBGBUS_SSPP0, 85, 2 },
+
+	/* Upack 0 sspp 1*/
+	{ DBGBUS_SSPP1, 50, 2 },
+	{ DBGBUS_SSPP1, 60, 2 },
+	{ DBGBUS_SSPP1, 70, 2 },
+	{ DBGBUS_SSPP1, 85, 2 },
+
+	/* scheduler */
+	{ DBGBUS_DSPP, 130, 0 },
+	{ DBGBUS_DSPP, 130, 1 },
+	{ DBGBUS_DSPP, 130, 2 },
+	{ DBGBUS_DSPP, 130, 3 },
+	{ DBGBUS_DSPP, 130, 4 },
+	{ DBGBUS_DSPP, 130, 5 },
+
+	/* qseed */
+	{ DBGBUS_SSPP0, 6, 0},
+	{ DBGBUS_SSPP0, 6, 1},
+	{ DBGBUS_SSPP0, 26, 0},
+	{ DBGBUS_SSPP0, 26, 1},
+	{ DBGBUS_SSPP1, 6, 0},
+	{ DBGBUS_SSPP1, 6, 1},
+	{ DBGBUS_SSPP1, 26, 0},
+	{ DBGBUS_SSPP1, 26, 1},
+
+	/* scale */
+	{ DBGBUS_SSPP0, 16, 0},
+	{ DBGBUS_SSPP0, 16, 1},
+	{ DBGBUS_SSPP0, 36, 0},
+	{ DBGBUS_SSPP0, 36, 1},
+	{ DBGBUS_SSPP1, 16, 0},
+	{ DBGBUS_SSPP1, 16, 1},
+	{ DBGBUS_SSPP1, 36, 0},
+	{ DBGBUS_SSPP1, 36, 1},
+
+	/* fetch sspp0 */
+
+	/* vig 0 */
+	{ DBGBUS_SSPP0, 0, 0 },
+	{ DBGBUS_SSPP0, 0, 1 },
+	{ DBGBUS_SSPP0, 0, 2 },
+	{ DBGBUS_SSPP0, 0, 3 },
+	{ DBGBUS_SSPP0, 0, 4 },
+	{ DBGBUS_SSPP0, 0, 5 },
+	{ DBGBUS_SSPP0, 0, 6 },
+	{ DBGBUS_SSPP0, 0, 7 },
+
+	{ DBGBUS_SSPP0, 1, 0 },
+	{ DBGBUS_SSPP0, 1, 1 },
+	{ DBGBUS_SSPP0, 1, 2 },
+	{ DBGBUS_SSPP0, 1, 3 },
+	{ DBGBUS_SSPP0, 1, 4 },
+	{ DBGBUS_SSPP0, 1, 5 },
+	{ DBGBUS_SSPP0, 1, 6 },
+	{ DBGBUS_SSPP0, 1, 7 },
+
+	{ DBGBUS_SSPP0, 2, 0 },
+	{ DBGBUS_SSPP0, 2, 1 },
+	{ DBGBUS_SSPP0, 2, 2 },
+	{ DBGBUS_SSPP0, 2, 3 },
+	{ DBGBUS_SSPP0, 2, 4 },
+	{ DBGBUS_SSPP0, 2, 5 },
+	{ DBGBUS_SSPP0, 2, 6 },
+	{ DBGBUS_SSPP0, 2, 7 },
+
+	{ DBGBUS_SSPP0, 4, 0 },
+	{ DBGBUS_SSPP0, 4, 1 },
+	{ DBGBUS_SSPP0, 4, 2 },
+	{ DBGBUS_SSPP0, 4, 3 },
+	{ DBGBUS_SSPP0, 4, 4 },
+	{ DBGBUS_SSPP0, 4, 5 },
+	{ DBGBUS_SSPP0, 4, 6 },
+	{ DBGBUS_SSPP0, 4, 7 },
+
+	{ DBGBUS_SSPP0, 5, 0 },
+	{ DBGBUS_SSPP0, 5, 1 },
+	{ DBGBUS_SSPP0, 5, 2 },
+	{ DBGBUS_SSPP0, 5, 3 },
+	{ DBGBUS_SSPP0, 5, 4 },
+	{ DBGBUS_SSPP0, 5, 5 },
+	{ DBGBUS_SSPP0, 5, 6 },
+	{ DBGBUS_SSPP0, 5, 7 },
+
+	/* vig 2 */
+	{ DBGBUS_SSPP0, 20, 0 },
+	{ DBGBUS_SSPP0, 20, 1 },
+	{ DBGBUS_SSPP0, 20, 2 },
+	{ DBGBUS_SSPP0, 20, 3 },
+	{ DBGBUS_SSPP0, 20, 4 },
+	{ DBGBUS_SSPP0, 20, 5 },
+	{ DBGBUS_SSPP0, 20, 6 },
+	{ DBGBUS_SSPP0, 20, 7 },
+
+	{ DBGBUS_SSPP0, 21, 0 },
+	{ DBGBUS_SSPP0, 21, 1 },
+	{ DBGBUS_SSPP0, 21, 2 },
+	{ DBGBUS_SSPP0, 21, 3 },
+	{ DBGBUS_SSPP0, 21, 4 },
+	{ DBGBUS_SSPP0, 21, 5 },
+	{ DBGBUS_SSPP0, 21, 6 },
+	{ DBGBUS_SSPP0, 21, 7 },
+
+	{ DBGBUS_SSPP0, 22, 0 },
+	{ DBGBUS_SSPP0, 22, 1 },
+	{ DBGBUS_SSPP0, 22, 2 },
+	{ DBGBUS_SSPP0, 22, 3 },
+	{ DBGBUS_SSPP0, 22, 4 },
+	{ DBGBUS_SSPP0, 22, 5 },
+	{ DBGBUS_SSPP0, 22, 6 },
+	{ DBGBUS_SSPP0, 22, 7 },
+
+	{ DBGBUS_SSPP0, 24, 0 },
+	{ DBGBUS_SSPP0, 24, 1 },
+	{ DBGBUS_SSPP0, 24, 2 },
+	{ DBGBUS_SSPP0, 24, 3 },
+	{ DBGBUS_SSPP0, 24, 4 },
+	{ DBGBUS_SSPP0, 24, 5 },
+	{ DBGBUS_SSPP0, 24, 6 },
+	{ DBGBUS_SSPP0, 24, 7 },
+
+	{ DBGBUS_SSPP0, 25, 0 },
+	{ DBGBUS_SSPP0, 25, 1 },
+	{ DBGBUS_SSPP0, 25, 2 },
+	{ DBGBUS_SSPP0, 25, 3 },
+	{ DBGBUS_SSPP0, 25, 4 },
+	{ DBGBUS_SSPP0, 25, 5 },
+	{ DBGBUS_SSPP0, 25, 6 },
+	{ DBGBUS_SSPP0, 25, 7 },
+
+	/* dma 2 */
+	{ DBGBUS_SSPP0, 30, 0 },
+	{ DBGBUS_SSPP0, 30, 1 },
+	{ DBGBUS_SSPP0, 30, 2 },
+	{ DBGBUS_SSPP0, 30, 3 },
+	{ DBGBUS_SSPP0, 30, 4 },
+	{ DBGBUS_SSPP0, 30, 5 },
+	{ DBGBUS_SSPP0, 30, 6 },
+	{ DBGBUS_SSPP0, 30, 7 },
+
+	{ DBGBUS_SSPP0, 31, 0 },
+	{ DBGBUS_SSPP0, 31, 1 },
+	{ DBGBUS_SSPP0, 31, 2 },
+	{ DBGBUS_SSPP0, 31, 3 },
+	{ DBGBUS_SSPP0, 31, 4 },
+	{ DBGBUS_SSPP0, 31, 5 },
+	{ DBGBUS_SSPP0, 31, 6 },
+	{ DBGBUS_SSPP0, 31, 7 },
+
+	{ DBGBUS_SSPP0, 32, 0 },
+	{ DBGBUS_SSPP0, 32, 1 },
+	{ DBGBUS_SSPP0, 32, 2 },
+	{ DBGBUS_SSPP0, 32, 3 },
+	{ DBGBUS_SSPP0, 32, 4 },
+	{ DBGBUS_SSPP0, 32, 5 },
+	{ DBGBUS_SSPP0, 32, 6 },
+	{ DBGBUS_SSPP0, 32, 7 },
+
+	{ DBGBUS_SSPP0, 33, 0 },
+	{ DBGBUS_SSPP0, 33, 1 },
+	{ DBGBUS_SSPP0, 33, 2 },
+	{ DBGBUS_SSPP0, 33, 3 },
+	{ DBGBUS_SSPP0, 33, 4 },
+	{ DBGBUS_SSPP0, 33, 5 },
+	{ DBGBUS_SSPP0, 33, 6 },
+	{ DBGBUS_SSPP0, 33, 7 },
+
+	{ DBGBUS_SSPP0, 34, 0 },
+	{ DBGBUS_SSPP0, 34, 1 },
+	{ DBGBUS_SSPP0, 34, 2 },
+	{ DBGBUS_SSPP0, 34, 3 },
+	{ DBGBUS_SSPP0, 34, 4 },
+	{ DBGBUS_SSPP0, 34, 5 },
+	{ DBGBUS_SSPP0, 34, 6 },
+	{ DBGBUS_SSPP0, 34, 7 },
+
+	{ DBGBUS_SSPP0, 35, 0 },
+	{ DBGBUS_SSPP0, 35, 1 },
+	{ DBGBUS_SSPP0, 35, 2 },
+	{ DBGBUS_SSPP0, 35, 3 },
+
+	/* dma 0 */
+	{ DBGBUS_SSPP0, 40, 0 },
+	{ DBGBUS_SSPP0, 40, 1 },
+	{ DBGBUS_SSPP0, 40, 2 },
+	{ DBGBUS_SSPP0, 40, 3 },
+	{ DBGBUS_SSPP0, 40, 4 },
+	{ DBGBUS_SSPP0, 40, 5 },
+	{ DBGBUS_SSPP0, 40, 6 },
+	{ DBGBUS_SSPP0, 40, 7 },
+
+	{ DBGBUS_SSPP0, 41, 0 },
+	{ DBGBUS_SSPP0, 41, 1 },
+	{ DBGBUS_SSPP0, 41, 2 },
+	{ DBGBUS_SSPP0, 41, 3 },
+	{ DBGBUS_SSPP0, 41, 4 },
+	{ DBGBUS_SSPP0, 41, 5 },
+	{ DBGBUS_SSPP0, 41, 6 },
+	{ DBGBUS_SSPP0, 41, 7 },
+
+	{ DBGBUS_SSPP0, 42, 0 },
+	{ DBGBUS_SSPP0, 42, 1 },
+	{ DBGBUS_SSPP0, 42, 2 },
+	{ DBGBUS_SSPP0, 42, 3 },
+	{ DBGBUS_SSPP0, 42, 4 },
+	{ DBGBUS_SSPP0, 42, 5 },
+	{ DBGBUS_SSPP0, 42, 6 },
+	{ DBGBUS_SSPP0, 42, 7 },
+
+	{ DBGBUS_SSPP0, 44, 0 },
+	{ DBGBUS_SSPP0, 44, 1 },
+	{ DBGBUS_SSPP0, 44, 2 },
+	{ DBGBUS_SSPP0, 44, 3 },
+	{ DBGBUS_SSPP0, 44, 4 },
+	{ DBGBUS_SSPP0, 44, 5 },
+	{ DBGBUS_SSPP0, 44, 6 },
+	{ DBGBUS_SSPP0, 44, 7 },
+
+	{ DBGBUS_SSPP0, 45, 0 },
+	{ DBGBUS_SSPP0, 45, 1 },
+	{ DBGBUS_SSPP0, 45, 2 },
+	{ DBGBUS_SSPP0, 45, 3 },
+	{ DBGBUS_SSPP0, 45, 4 },
+	{ DBGBUS_SSPP0, 45, 5 },
+	{ DBGBUS_SSPP0, 45, 6 },
+	{ DBGBUS_SSPP0, 45, 7 },
+
+	/* fetch sspp1 */
+	/* vig 1 */
+	{ DBGBUS_SSPP1, 0, 0 },
+	{ DBGBUS_SSPP1, 0, 1 },
+	{ DBGBUS_SSPP1, 0, 2 },
+	{ DBGBUS_SSPP1, 0, 3 },
+	{ DBGBUS_SSPP1, 0, 4 },
+	{ DBGBUS_SSPP1, 0, 5 },
+	{ DBGBUS_SSPP1, 0, 6 },
+	{ DBGBUS_SSPP1, 0, 7 },
+
+	{ DBGBUS_SSPP1, 1, 0 },
+	{ DBGBUS_SSPP1, 1, 1 },
+	{ DBGBUS_SSPP1, 1, 2 },
+	{ DBGBUS_SSPP1, 1, 3 },
+	{ DBGBUS_SSPP1, 1, 4 },
+	{ DBGBUS_SSPP1, 1, 5 },
+	{ DBGBUS_SSPP1, 1, 6 },
+	{ DBGBUS_SSPP1, 1, 7 },
+
+	{ DBGBUS_SSPP1, 2, 0 },
+	{ DBGBUS_SSPP1, 2, 1 },
+	{ DBGBUS_SSPP1, 2, 2 },
+	{ DBGBUS_SSPP1, 2, 3 },
+	{ DBGBUS_SSPP1, 2, 4 },
+	{ DBGBUS_SSPP1, 2, 5 },
+	{ DBGBUS_SSPP1, 2, 6 },
+	{ DBGBUS_SSPP1, 2, 7 },
+
+	{ DBGBUS_SSPP1, 4, 0 },
+	{ DBGBUS_SSPP1, 4, 1 },
+	{ DBGBUS_SSPP1, 4, 2 },
+	{ DBGBUS_SSPP1, 4, 3 },
+	{ DBGBUS_SSPP1, 4, 4 },
+	{ DBGBUS_SSPP1, 4, 5 },
+	{ DBGBUS_SSPP1, 4, 6 },
+	{ DBGBUS_SSPP1, 4, 7 },
+
+	{ DBGBUS_SSPP1, 5, 0 },
+	{ DBGBUS_SSPP1, 5, 1 },
+	{ DBGBUS_SSPP1, 5, 2 },
+	{ DBGBUS_SSPP1, 5, 3 },
+	{ DBGBUS_SSPP1, 5, 4 },
+	{ DBGBUS_SSPP1, 5, 5 },
+	{ DBGBUS_SSPP1, 5, 6 },
+	{ DBGBUS_SSPP1, 5, 7 },
+
+	/* vig 3 */
+	{ DBGBUS_SSPP1, 20, 0 },
+	{ DBGBUS_SSPP1, 20, 1 },
+	{ DBGBUS_SSPP1, 20, 2 },
+	{ DBGBUS_SSPP1, 20, 3 },
+	{ DBGBUS_SSPP1, 20, 4 },
+	{ DBGBUS_SSPP1, 20, 5 },
+	{ DBGBUS_SSPP1, 20, 6 },
+	{ DBGBUS_SSPP1, 20, 7 },
+
+	{ DBGBUS_SSPP1, 21, 0 },
+	{ DBGBUS_SSPP1, 21, 1 },
+	{ DBGBUS_SSPP1, 21, 2 },
+	{ DBGBUS_SSPP1, 21, 3 },
+	{ DBGBUS_SSPP1, 21, 4 },
+	{ DBGBUS_SSPP1, 21, 5 },
+	{ DBGBUS_SSPP1, 21, 6 },
+	{ DBGBUS_SSPP1, 21, 7 },
+
+	{ DBGBUS_SSPP1, 22, 0 },
+	{ DBGBUS_SSPP1, 22, 1 },
+	{ DBGBUS_SSPP1, 22, 2 },
+	{ DBGBUS_SSPP1, 22, 3 },
+	{ DBGBUS_SSPP1, 22, 4 },
+	{ DBGBUS_SSPP1, 22, 5 },
+	{ DBGBUS_SSPP1, 22, 6 },
+	{ DBGBUS_SSPP1, 22, 7 },
+
+	{ DBGBUS_SSPP1, 24, 0 },
+	{ DBGBUS_SSPP1, 24, 1 },
+	{ DBGBUS_SSPP1, 24, 2 },
+	{ DBGBUS_SSPP1, 24, 3 },
+	{ DBGBUS_SSPP1, 24, 4 },
+	{ DBGBUS_SSPP1, 24, 5 },
+	{ DBGBUS_SSPP1, 24, 6 },
+	{ DBGBUS_SSPP1, 24, 7 },
+
+	{ DBGBUS_SSPP1, 25, 0 },
+	{ DBGBUS_SSPP1, 25, 1 },
+	{ DBGBUS_SSPP1, 25, 2 },
+	{ DBGBUS_SSPP1, 25, 3 },
+	{ DBGBUS_SSPP1, 25, 4 },
+	{ DBGBUS_SSPP1, 25, 5 },
+	{ DBGBUS_SSPP1, 25, 6 },
+	{ DBGBUS_SSPP1, 25, 7 },
+
+	/* dma 3 */
+	{ DBGBUS_SSPP1, 30, 0 },
+	{ DBGBUS_SSPP1, 30, 1 },
+	{ DBGBUS_SSPP1, 30, 2 },
+	{ DBGBUS_SSPP1, 30, 3 },
+	{ DBGBUS_SSPP1, 30, 4 },
+	{ DBGBUS_SSPP1, 30, 5 },
+	{ DBGBUS_SSPP1, 30, 6 },
+	{ DBGBUS_SSPP1, 30, 7 },
+
+	{ DBGBUS_SSPP1, 31, 0 },
+	{ DBGBUS_SSPP1, 31, 1 },
+	{ DBGBUS_SSPP1, 31, 2 },
+	{ DBGBUS_SSPP1, 31, 3 },
+	{ DBGBUS_SSPP1, 31, 4 },
+	{ DBGBUS_SSPP1, 31, 5 },
+	{ DBGBUS_SSPP1, 31, 6 },
+	{ DBGBUS_SSPP1, 31, 7 },
+
+	{ DBGBUS_SSPP1, 32, 0 },
+	{ DBGBUS_SSPP1, 32, 1 },
+	{ DBGBUS_SSPP1, 32, 2 },
+	{ DBGBUS_SSPP1, 32, 3 },
+	{ DBGBUS_SSPP1, 32, 4 },
+	{ DBGBUS_SSPP1, 32, 5 },
+	{ DBGBUS_SSPP1, 32, 6 },
+	{ DBGBUS_SSPP1, 32, 7 },
+
+	{ DBGBUS_SSPP1, 33, 0 },
+	{ DBGBUS_SSPP1, 33, 1 },
+	{ DBGBUS_SSPP1, 33, 2 },
+	{ DBGBUS_SSPP1, 33, 3 },
+	{ DBGBUS_SSPP1, 33, 4 },
+	{ DBGBUS_SSPP1, 33, 5 },
+	{ DBGBUS_SSPP1, 33, 6 },
+	{ DBGBUS_SSPP1, 33, 7 },
+
+	{ DBGBUS_SSPP1, 34, 0 },
+	{ DBGBUS_SSPP1, 34, 1 },
+	{ DBGBUS_SSPP1, 34, 2 },
+	{ DBGBUS_SSPP1, 34, 3 },
+	{ DBGBUS_SSPP1, 34, 4 },
+	{ DBGBUS_SSPP1, 34, 5 },
+	{ DBGBUS_SSPP1, 34, 6 },
+	{ DBGBUS_SSPP1, 34, 7 },
+
+	{ DBGBUS_SSPP1, 35, 0 },
+	{ DBGBUS_SSPP1, 35, 1 },
+	{ DBGBUS_SSPP1, 35, 2 },
+
+	/* dma 1 */
+	{ DBGBUS_SSPP1, 40, 0 },
+	{ DBGBUS_SSPP1, 40, 1 },
+	{ DBGBUS_SSPP1, 40, 2 },
+	{ DBGBUS_SSPP1, 40, 3 },
+	{ DBGBUS_SSPP1, 40, 4 },
+	{ DBGBUS_SSPP1, 40, 5 },
+	{ DBGBUS_SSPP1, 40, 6 },
+	{ DBGBUS_SSPP1, 40, 7 },
+
+	{ DBGBUS_SSPP1, 41, 0 },
+	{ DBGBUS_SSPP1, 41, 1 },
+	{ DBGBUS_SSPP1, 41, 2 },
+	{ DBGBUS_SSPP1, 41, 3 },
+	{ DBGBUS_SSPP1, 41, 4 },
+	{ DBGBUS_SSPP1, 41, 5 },
+	{ DBGBUS_SSPP1, 41, 6 },
+	{ DBGBUS_SSPP1, 41, 7 },
+
+	{ DBGBUS_SSPP1, 42, 0 },
+	{ DBGBUS_SSPP1, 42, 1 },
+	{ DBGBUS_SSPP1, 42, 2 },
+	{ DBGBUS_SSPP1, 42, 3 },
+	{ DBGBUS_SSPP1, 42, 4 },
+	{ DBGBUS_SSPP1, 42, 5 },
+	{ DBGBUS_SSPP1, 42, 6 },
+	{ DBGBUS_SSPP1, 42, 7 },
+
+	{ DBGBUS_SSPP1, 44, 0 },
+	{ DBGBUS_SSPP1, 44, 1 },
+	{ DBGBUS_SSPP1, 44, 2 },
+	{ DBGBUS_SSPP1, 44, 3 },
+	{ DBGBUS_SSPP1, 44, 4 },
+	{ DBGBUS_SSPP1, 44, 5 },
+	{ DBGBUS_SSPP1, 44, 6 },
+	{ DBGBUS_SSPP1, 44, 7 },
+
+	{ DBGBUS_SSPP1, 45, 0 },
+	{ DBGBUS_SSPP1, 45, 1 },
+	{ DBGBUS_SSPP1, 45, 2 },
+	{ DBGBUS_SSPP1, 45, 3 },
+	{ DBGBUS_SSPP1, 45, 4 },
+	{ DBGBUS_SSPP1, 45, 5 },
+	{ DBGBUS_SSPP1, 45, 6 },
+	{ DBGBUS_SSPP1, 45, 7 },
+
+	/* cursor 1 */
+	{ DBGBUS_SSPP1, 80, 0 },
+	{ DBGBUS_SSPP1, 80, 1 },
+	{ DBGBUS_SSPP1, 80, 2 },
+	{ DBGBUS_SSPP1, 80, 3 },
+	{ DBGBUS_SSPP1, 80, 4 },
+	{ DBGBUS_SSPP1, 80, 5 },
+	{ DBGBUS_SSPP1, 80, 6 },
+	{ DBGBUS_SSPP1, 80, 7 },
+
+	{ DBGBUS_SSPP1, 81, 0 },
+	{ DBGBUS_SSPP1, 81, 1 },
+	{ DBGBUS_SSPP1, 81, 2 },
+	{ DBGBUS_SSPP1, 81, 3 },
+	{ DBGBUS_SSPP1, 81, 4 },
+	{ DBGBUS_SSPP1, 81, 5 },
+	{ DBGBUS_SSPP1, 81, 6 },
+	{ DBGBUS_SSPP1, 81, 7 },
+
+	{ DBGBUS_SSPP1, 82, 0 },
+	{ DBGBUS_SSPP1, 82, 1 },
+	{ DBGBUS_SSPP1, 82, 2 },
+	{ DBGBUS_SSPP1, 82, 3 },
+	{ DBGBUS_SSPP1, 82, 4 },
+	{ DBGBUS_SSPP1, 82, 5 },
+	{ DBGBUS_SSPP1, 82, 6 },
+	{ DBGBUS_SSPP1, 82, 7 },
+
+	{ DBGBUS_SSPP1, 83, 0 },
+	{ DBGBUS_SSPP1, 83, 1 },
+	{ DBGBUS_SSPP1, 83, 2 },
+	{ DBGBUS_SSPP1, 83, 3 },
+	{ DBGBUS_SSPP1, 83, 4 },
+	{ DBGBUS_SSPP1, 83, 5 },
+	{ DBGBUS_SSPP1, 83, 6 },
+	{ DBGBUS_SSPP1, 83, 7 },
+
+	{ DBGBUS_SSPP1, 84, 0 },
+	{ DBGBUS_SSPP1, 84, 1 },
+	{ DBGBUS_SSPP1, 84, 2 },
+	{ DBGBUS_SSPP1, 84, 3 },
+	{ DBGBUS_SSPP1, 84, 4 },
+	{ DBGBUS_SSPP1, 84, 5 },
+	{ DBGBUS_SSPP1, 84, 6 },
+	{ DBGBUS_SSPP1, 84, 7 },
+
+	/* dspp */
+	{ DBGBUS_DSPP, 13, 0 },
+	{ DBGBUS_DSPP, 19, 0 },
+	{ DBGBUS_DSPP, 14, 0 },
+	{ DBGBUS_DSPP, 14, 1 },
+	{ DBGBUS_DSPP, 14, 3 },
+	{ DBGBUS_DSPP, 20, 0 },
+	{ DBGBUS_DSPP, 20, 1 },
+	{ DBGBUS_DSPP, 20, 3 },
+
+	/* ppb_0 */
+	{ DBGBUS_DSPP, 31, 0 },
+	{ DBGBUS_DSPP, 33, 0 },
+	{ DBGBUS_DSPP, 35, 0 },
+	{ DBGBUS_DSPP, 42, 0 },
+
+	/* ppb_1 */
+	{ DBGBUS_DSPP, 32, 0 },
+	{ DBGBUS_DSPP, 34, 0 },
+	{ DBGBUS_DSPP, 36, 0 },
+	{ DBGBUS_DSPP, 43, 0 },
+
+	/* lm_lut */
+	{ DBGBUS_DSPP, 109, 0 },
+	{ DBGBUS_DSPP, 105, 0 },
+	{ DBGBUS_DSPP, 103, 0 },
+
+	/* tear-check */
+	{ DBGBUS_PERIPH, 63, 0 },
+	{ DBGBUS_PERIPH, 64, 0 },
+	{ DBGBUS_PERIPH, 65, 0 },
+	{ DBGBUS_PERIPH, 73, 0 },
+	{ DBGBUS_PERIPH, 74, 0 },
+
+	/* crossbar */
+	{ DBGBUS_DSPP, 0, 0},
+
+	/* rotator */
+	{ DBGBUS_DSPP, 9, 0},
+
+	/* blend */
+	/* LM0 */
+	{ DBGBUS_DSPP, 63, 0},
+	{ DBGBUS_DSPP, 63, 1},
+	{ DBGBUS_DSPP, 63, 2},
+	{ DBGBUS_DSPP, 63, 3},
+	{ DBGBUS_DSPP, 63, 4},
+	{ DBGBUS_DSPP, 63, 5},
+	{ DBGBUS_DSPP, 63, 6},
+	{ DBGBUS_DSPP, 63, 7},
+
+	{ DBGBUS_DSPP, 64, 0},
+	{ DBGBUS_DSPP, 64, 1},
+	{ DBGBUS_DSPP, 64, 2},
+	{ DBGBUS_DSPP, 64, 3},
+	{ DBGBUS_DSPP, 64, 4},
+	{ DBGBUS_DSPP, 64, 5},
+	{ DBGBUS_DSPP, 64, 6},
+	{ DBGBUS_DSPP, 64, 7},
+
+	{ DBGBUS_DSPP, 65, 0},
+	{ DBGBUS_DSPP, 65, 1},
+	{ DBGBUS_DSPP, 65, 2},
+	{ DBGBUS_DSPP, 65, 3},
+	{ DBGBUS_DSPP, 65, 4},
+	{ DBGBUS_DSPP, 65, 5},
+	{ DBGBUS_DSPP, 65, 6},
+	{ DBGBUS_DSPP, 65, 7},
+
+	{ DBGBUS_DSPP, 66, 0},
+	{ DBGBUS_DSPP, 66, 1},
+	{ DBGBUS_DSPP, 66, 2},
+	{ DBGBUS_DSPP, 66, 3},
+	{ DBGBUS_DSPP, 66, 4},
+	{ DBGBUS_DSPP, 66, 5},
+	{ DBGBUS_DSPP, 66, 6},
+	{ DBGBUS_DSPP, 66, 7},
+
+	{ DBGBUS_DSPP, 67, 0},
+	{ DBGBUS_DSPP, 67, 1},
+	{ DBGBUS_DSPP, 67, 2},
+	{ DBGBUS_DSPP, 67, 3},
+	{ DBGBUS_DSPP, 67, 4},
+	{ DBGBUS_DSPP, 67, 5},
+	{ DBGBUS_DSPP, 67, 6},
+	{ DBGBUS_DSPP, 67, 7},
+
+	{ DBGBUS_DSPP, 68, 0},
+	{ DBGBUS_DSPP, 68, 1},
+	{ DBGBUS_DSPP, 68, 2},
+	{ DBGBUS_DSPP, 68, 3},
+	{ DBGBUS_DSPP, 68, 4},
+	{ DBGBUS_DSPP, 68, 5},
+	{ DBGBUS_DSPP, 68, 6},
+	{ DBGBUS_DSPP, 68, 7},
+
+	{ DBGBUS_DSPP, 69, 0},
+	{ DBGBUS_DSPP, 69, 1},
+	{ DBGBUS_DSPP, 69, 2},
+	{ DBGBUS_DSPP, 69, 3},
+	{ DBGBUS_DSPP, 69, 4},
+	{ DBGBUS_DSPP, 69, 5},
+	{ DBGBUS_DSPP, 69, 6},
+	{ DBGBUS_DSPP, 69, 7},
+
+	/* LM1 */
+	{ DBGBUS_DSPP, 70, 0},
+	{ DBGBUS_DSPP, 70, 1},
+	{ DBGBUS_DSPP, 70, 2},
+	{ DBGBUS_DSPP, 70, 3},
+	{ DBGBUS_DSPP, 70, 4},
+	{ DBGBUS_DSPP, 70, 5},
+	{ DBGBUS_DSPP, 70, 6},
+	{ DBGBUS_DSPP, 70, 7},
+
+	{ DBGBUS_DSPP, 71, 0},
+	{ DBGBUS_DSPP, 71, 1},
+	{ DBGBUS_DSPP, 71, 2},
+	{ DBGBUS_DSPP, 71, 3},
+	{ DBGBUS_DSPP, 71, 4},
+	{ DBGBUS_DSPP, 71, 5},
+	{ DBGBUS_DSPP, 71, 6},
+	{ DBGBUS_DSPP, 71, 7},
+
+	{ DBGBUS_DSPP, 72, 0},
+	{ DBGBUS_DSPP, 72, 1},
+	{ DBGBUS_DSPP, 72, 2},
+	{ DBGBUS_DSPP, 72, 3},
+	{ DBGBUS_DSPP, 72, 4},
+	{ DBGBUS_DSPP, 72, 5},
+	{ DBGBUS_DSPP, 72, 6},
+	{ DBGBUS_DSPP, 72, 7},
+
+	{ DBGBUS_DSPP, 73, 0},
+	{ DBGBUS_DSPP, 73, 1},
+	{ DBGBUS_DSPP, 73, 2},
+	{ DBGBUS_DSPP, 73, 3},
+	{ DBGBUS_DSPP, 73, 4},
+	{ DBGBUS_DSPP, 73, 5},
+	{ DBGBUS_DSPP, 73, 6},
+	{ DBGBUS_DSPP, 73, 7},
+
+	{ DBGBUS_DSPP, 74, 0},
+	{ DBGBUS_DSPP, 74, 1},
+	{ DBGBUS_DSPP, 74, 2},
+	{ DBGBUS_DSPP, 74, 3},
+	{ DBGBUS_DSPP, 74, 4},
+	{ DBGBUS_DSPP, 74, 5},
+	{ DBGBUS_DSPP, 74, 6},
+	{ DBGBUS_DSPP, 74, 7},
+
+	{ DBGBUS_DSPP, 75, 0},
+	{ DBGBUS_DSPP, 75, 1},
+	{ DBGBUS_DSPP, 75, 2},
+	{ DBGBUS_DSPP, 75, 3},
+	{ DBGBUS_DSPP, 75, 4},
+	{ DBGBUS_DSPP, 75, 5},
+	{ DBGBUS_DSPP, 75, 6},
+	{ DBGBUS_DSPP, 75, 7},
+
+	{ DBGBUS_DSPP, 76, 0},
+	{ DBGBUS_DSPP, 76, 1},
+	{ DBGBUS_DSPP, 76, 2},
+	{ DBGBUS_DSPP, 76, 3},
+	{ DBGBUS_DSPP, 76, 4},
+	{ DBGBUS_DSPP, 76, 5},
+	{ DBGBUS_DSPP, 76, 6},
+	{ DBGBUS_DSPP, 76, 7},
+
+	/* LM2 */
+	{ DBGBUS_DSPP, 77, 0},
+	{ DBGBUS_DSPP, 77, 1},
+	{ DBGBUS_DSPP, 77, 2},
+	{ DBGBUS_DSPP, 77, 3},
+	{ DBGBUS_DSPP, 77, 4},
+	{ DBGBUS_DSPP, 77, 5},
+	{ DBGBUS_DSPP, 77, 6},
+	{ DBGBUS_DSPP, 77, 7},
+
+	{ DBGBUS_DSPP, 78, 0},
+	{ DBGBUS_DSPP, 78, 1},
+	{ DBGBUS_DSPP, 78, 2},
+	{ DBGBUS_DSPP, 78, 3},
+	{ DBGBUS_DSPP, 78, 4},
+	{ DBGBUS_DSPP, 78, 5},
+	{ DBGBUS_DSPP, 78, 6},
+	{ DBGBUS_DSPP, 78, 7},
+
+	{ DBGBUS_DSPP, 79, 0},
+	{ DBGBUS_DSPP, 79, 1},
+	{ DBGBUS_DSPP, 79, 2},
+	{ DBGBUS_DSPP, 79, 3},
+	{ DBGBUS_DSPP, 79, 4},
+	{ DBGBUS_DSPP, 79, 5},
+	{ DBGBUS_DSPP, 79, 6},
+	{ DBGBUS_DSPP, 79, 7},
+
+	{ DBGBUS_DSPP, 80, 0},
+	{ DBGBUS_DSPP, 80, 1},
+	{ DBGBUS_DSPP, 80, 2},
+	{ DBGBUS_DSPP, 80, 3},
+	{ DBGBUS_DSPP, 80, 4},
+	{ DBGBUS_DSPP, 80, 5},
+	{ DBGBUS_DSPP, 80, 6},
+	{ DBGBUS_DSPP, 80, 7},
+
+	{ DBGBUS_DSPP, 81, 0},
+	{ DBGBUS_DSPP, 81, 1},
+	{ DBGBUS_DSPP, 81, 2},
+	{ DBGBUS_DSPP, 81, 3},
+	{ DBGBUS_DSPP, 81, 4},
+	{ DBGBUS_DSPP, 81, 5},
+	{ DBGBUS_DSPP, 81, 6},
+	{ DBGBUS_DSPP, 81, 7},
+
+	{ DBGBUS_DSPP, 82, 0},
+	{ DBGBUS_DSPP, 82, 1},
+	{ DBGBUS_DSPP, 82, 2},
+	{ DBGBUS_DSPP, 82, 3},
+	{ DBGBUS_DSPP, 82, 4},
+	{ DBGBUS_DSPP, 82, 5},
+	{ DBGBUS_DSPP, 82, 6},
+	{ DBGBUS_DSPP, 82, 7},
+
+	{ DBGBUS_DSPP, 83, 0},
+	{ DBGBUS_DSPP, 83, 1},
+	{ DBGBUS_DSPP, 83, 2},
+	{ DBGBUS_DSPP, 83, 3},
+	{ DBGBUS_DSPP, 83, 4},
+	{ DBGBUS_DSPP, 83, 5},
+	{ DBGBUS_DSPP, 83, 6},
+	{ DBGBUS_DSPP, 83, 7},
+
+	/* csc */
+	{ DBGBUS_SSPP0, 7, 0},
+	{ DBGBUS_SSPP0, 7, 1},
+	{ DBGBUS_SSPP0, 27, 0},
+	{ DBGBUS_SSPP0, 27, 1},
+	{ DBGBUS_SSPP1, 7, 0},
+	{ DBGBUS_SSPP1, 7, 1},
+	{ DBGBUS_SSPP1, 27, 0},
+	{ DBGBUS_SSPP1, 27, 1},
+
+	/* pcc */
+	{ DBGBUS_SSPP0, 3,  3},
+	{ DBGBUS_SSPP0, 23, 3},
+	{ DBGBUS_SSPP0, 33, 3},
+	{ DBGBUS_SSPP0, 43, 3},
+	{ DBGBUS_SSPP1, 3,  3},
+	{ DBGBUS_SSPP1, 23, 3},
+	{ DBGBUS_SSPP1, 33, 3},
+	{ DBGBUS_SSPP1, 43, 3},
+
+	/* spa */
+	{ DBGBUS_SSPP0, 8,  0},
+	{ DBGBUS_SSPP0, 28, 0},
+	{ DBGBUS_SSPP1, 8,  0},
+	{ DBGBUS_SSPP1, 28, 0},
+	{ DBGBUS_DSPP, 13, 0},
+	{ DBGBUS_DSPP, 19, 0},
+
+	/* igc */
+	{ DBGBUS_SSPP0, 9,  0},
+	{ DBGBUS_SSPP0, 9,  1},
+	{ DBGBUS_SSPP0, 9,  3},
+	{ DBGBUS_SSPP0, 29, 0},
+	{ DBGBUS_SSPP0, 29, 1},
+	{ DBGBUS_SSPP0, 29, 3},
+	{ DBGBUS_SSPP0, 17, 0},
+	{ DBGBUS_SSPP0, 17, 1},
+	{ DBGBUS_SSPP0, 17, 3},
+	{ DBGBUS_SSPP0, 37, 0},
+	{ DBGBUS_SSPP0, 37, 1},
+	{ DBGBUS_SSPP0, 37, 3},
+	{ DBGBUS_SSPP0, 46, 0},
+	{ DBGBUS_SSPP0, 46, 1},
+	{ DBGBUS_SSPP0, 46, 3},
+
+	{ DBGBUS_SSPP1, 9,  0},
+	{ DBGBUS_SSPP1, 9,  1},
+	{ DBGBUS_SSPP1, 9,  3},
+	{ DBGBUS_SSPP1, 29, 0},
+	{ DBGBUS_SSPP1, 29, 1},
+	{ DBGBUS_SSPP1, 29, 3},
+	{ DBGBUS_SSPP1, 17, 0},
+	{ DBGBUS_SSPP1, 17, 1},
+	{ DBGBUS_SSPP1, 17, 3},
+	{ DBGBUS_SSPP1, 37, 0},
+	{ DBGBUS_SSPP1, 37, 1},
+	{ DBGBUS_SSPP1, 37, 3},
+	{ DBGBUS_SSPP1, 46, 0},
+	{ DBGBUS_SSPP1, 46, 1},
+	{ DBGBUS_SSPP1, 46, 3},
+
+	{ DBGBUS_DSPP, 14, 0},
+	{ DBGBUS_DSPP, 14, 1},
+	{ DBGBUS_DSPP, 14, 3},
+	{ DBGBUS_DSPP, 20, 0},
+	{ DBGBUS_DSPP, 20, 1},
+	{ DBGBUS_DSPP, 20, 3},
+
+	{ DBGBUS_PERIPH, 60, 0},
+};
+
+static struct vbif_debug_bus_entry vbif_dbg_bus_msm8998[] = {
+	{0x214, 0x21c, 16, 2, 0x0, 0xd},     /* arb clients */
+	{0x214, 0x21c, 16, 2, 0x80, 0xc0},   /* arb clients */
+	{0x214, 0x21c, 16, 2, 0x100, 0x140}, /* arb clients */
+	{0x214, 0x21c, 0, 16, 0x0, 0xf},     /* xin blocks - axi side */
+	{0x214, 0x21c, 0, 16, 0x80, 0xa4},   /* xin blocks - axi side */
+	{0x214, 0x21c, 0, 15, 0x100, 0x124}, /* xin blocks - axi side */
+	{0x21c, 0x214, 0, 14, 0, 0xc}, /* xin blocks - clock side */
+};
+
+/**
+ * _sde_dbg_enable_power - use callback to turn power on for hw register access
+ * @enable: whether to turn power on or off
+ */
+static inline void _sde_dbg_enable_power(int enable)
+{
+	if (!sde_dbg_base.power_ctrl.enable_fn)
+		return;
+	sde_dbg_base.power_ctrl.enable_fn(
+			sde_dbg_base.power_ctrl.handle,
+			sde_dbg_base.power_ctrl.client,
+			enable);
+}
+
+/**
+ * _sde_dump_reg - helper function for dumping rotator register set content
+ * @dump_name: register set name
+ * @reg_dump_flag: dumping flag controlling in-log/memory dump location
+ * @base_addr: starting address of io region for calculating offsets to print
+ * @addr: starting address offset for dumping
+ * @len_bytes: range of the register set
+ * @dump_mem: output buffer for memory dump location option
+ * @from_isr: whether being called from isr context
+ */
+static void _sde_dump_reg(const char *dump_name, u32 reg_dump_flag,
+		char __iomem *base_addr, char __iomem *addr, size_t len_bytes,
+		u32 **dump_mem, bool from_isr)
+{
+	u32 in_log, in_mem, len_align, len_padded;
+	u32 *dump_addr = NULL;
+	char __iomem *end_addr;
+	int i;
+
+	if (!len_bytes)
+		return;
+
+	in_log = (reg_dump_flag & SDE_DBG_DUMP_IN_LOG);
+	in_mem = (reg_dump_flag & SDE_DBG_DUMP_IN_MEM);
+
+	pr_debug("%s: reg_dump_flag=%d in_log=%d in_mem=%d\n",
+		dump_name, reg_dump_flag, in_log, in_mem);
+
+	if (!in_log && !in_mem)
+		return;
+
+	if (in_log)
+		dev_info(sde_dbg_base.dev, "%s: start_offset 0x%lx len 0x%zx\n",
+				dump_name, addr - base_addr, len_bytes);
+
+	len_align = (len_bytes + REG_DUMP_ALIGN - 1) / REG_DUMP_ALIGN;
+	len_padded = len_align * REG_DUMP_ALIGN;
+	end_addr = addr + len_bytes;
+
+	if (in_mem) {
+		if (dump_mem && !(*dump_mem)) {
+			phys_addr_t phys = 0;
+			*dump_mem = dma_alloc_coherent(sde_dbg_base.dev,
+					len_padded, &phys, GFP_KERNEL);
+		}
+
+		if (dump_mem && *dump_mem) {
+			dump_addr = *dump_mem;
+			dev_info(sde_dbg_base.dev,
+				"%s: start_addr:0x%pK len:0x%x reg_offset=0x%lx\n",
+				dump_name, dump_addr, len_padded,
+				addr - base_addr);
+		} else {
+			in_mem = 0;
+			pr_err("dump_mem: kzalloc fails!\n");
+		}
+	}
+
+	if (!from_isr)
+		_sde_dbg_enable_power(true);
+
+	for (i = 0; i < len_align; i++) {
+		u32 x0, x4, x8, xc;
+
+		x0 = (addr < end_addr) ? readl_relaxed(addr + 0x0) : 0;
+		x4 = (addr + 0x4 < end_addr) ? readl_relaxed(addr + 0x4) : 0;
+		x8 = (addr + 0x8 < end_addr) ? readl_relaxed(addr + 0x8) : 0;
+		xc = (addr + 0xc < end_addr) ? readl_relaxed(addr + 0xc) : 0;
+
+		if (in_log)
+			dev_info(sde_dbg_base.dev,
+					"0x%lx : %08x %08x %08x %08x\n",
+					addr - base_addr, x0, x4, x8, xc);
+
+		if (dump_addr) {
+			dump_addr[i * 4] = x0;
+			dump_addr[i * 4 + 1] = x4;
+			dump_addr[i * 4 + 2] = x8;
+			dump_addr[i * 4 + 3] = xc;
+		}
+
+		addr += REG_DUMP_ALIGN;
+	}
+
+	if (!from_isr)
+		_sde_dbg_enable_power(false);
+}
+
+/**
+ * _sde_dbg_get_dump_range - helper to retrieve dump length for a range node
+ * @range_node: range node to dump
+ * @max_offset: max offset of the register base
+ * @Return: length
+ */
+static u32 _sde_dbg_get_dump_range(struct sde_dbg_reg_offset *range_node,
+		size_t max_offset)
+{
+	u32 length = 0;
+
+	if ((range_node->start > range_node->end) ||
+		(range_node->end > max_offset) || (range_node->start == 0
+		&& range_node->end == 0)) {
+		length = max_offset;
+	} else {
+		length = range_node->end - range_node->start;
+	}
+
+	return length;
+}
+
+static int _sde_dump_reg_range_cmp(void *priv, struct list_head *a,
+		struct list_head *b)
+{
+	struct sde_dbg_reg_range *ar, *br;
+
+	if (!a || !b)
+		return 0;
+
+	ar = container_of(a, struct sde_dbg_reg_range, head);
+	br = container_of(b, struct sde_dbg_reg_range, head);
+
+	return ar->offset.start - br->offset.start;
+}
+
+/**
+ * _sde_dump_reg_by_ranges - dump ranges or full range of the register blk base
+ * @dbg: register blk base structure
+ * @reg_dump_flag: dump target, memory, kernel log, or both
+ */
+static void _sde_dump_reg_by_ranges(struct sde_dbg_reg_base *dbg,
+	u32 reg_dump_flag)
+{
+	char __iomem *addr;
+	size_t len;
+	struct sde_dbg_reg_range *range_node;
+
+	if (!dbg || !dbg->base) {
+		pr_err("dbg base is null!\n");
+		return;
+	}
+
+	dev_info(sde_dbg_base.dev, "%s:=========%s DUMP=========\n", __func__,
+			dbg->name);
+
+	/* If there is a list to dump the registers by ranges, use the ranges */
+	if (!list_empty(&dbg->sub_range_list)) {
+		/* sort the list by start address first */
+		list_sort(NULL, &dbg->sub_range_list, _sde_dump_reg_range_cmp);
+		list_for_each_entry(range_node, &dbg->sub_range_list, head) {
+			len = _sde_dbg_get_dump_range(&range_node->offset,
+				dbg->max_offset);
+			addr = dbg->base + range_node->offset.start;
+			pr_debug("%s: range_base=0x%pK start=0x%x end=0x%x\n",
+				range_node->range_name,
+				addr, range_node->offset.start,
+				range_node->offset.end);
+
+			_sde_dump_reg(range_node->range_name, reg_dump_flag,
+					dbg->base, addr, len,
+					&range_node->reg_dump, false);
+		}
+	} else {
+		/* If there is no list to dump ranges, dump all registers */
+		dev_info(sde_dbg_base.dev,
+				"Ranges not found, will dump full registers\n");
+		dev_info(sde_dbg_base.dev, "base:0x%pK len:0x%zx\n", dbg->base,
+				dbg->max_offset);
+		addr = dbg->base;
+		len = dbg->max_offset;
+		_sde_dump_reg(dbg->name, reg_dump_flag, dbg->base, addr, len,
+				&dbg->reg_dump, false);
+	}
+}
+
+/**
+ * _sde_dump_reg_by_blk - dump a named register base region
+ * @blk_name: register blk name
+ */
+static void _sde_dump_reg_by_blk(const char *blk_name)
+{
+	struct sde_dbg_base *dbg_base = &sde_dbg_base;
+	struct sde_dbg_reg_base *blk_base;
+
+	if (!dbg_base)
+		return;
+
+	list_for_each_entry(blk_base, &dbg_base->reg_base_list, reg_base_head) {
+		if (strlen(blk_base->name) &&
+			!strcmp(blk_base->name, blk_name)) {
+			_sde_dump_reg_by_ranges(blk_base,
+				dbg_base->enable_reg_dump);
+			break;
+		}
+	}
+}
+
+/**
+ * _sde_dump_reg_all - dump all register regions
+ */
+static void _sde_dump_reg_all(void)
+{
+	struct sde_dbg_base *dbg_base = &sde_dbg_base;
+	struct sde_dbg_reg_base *blk_base;
+
+	if (!dbg_base)
+		return;
+
+	list_for_each_entry(blk_base, &dbg_base->reg_base_list, reg_base_head)
+		if (strlen(blk_base->name))
+			_sde_dump_reg_by_blk(blk_base->name);
+}
+
+/**
+ * _sde_dump_get_blk_addr - retrieve register block address by name
+ * @blk_name: register blk name
+ * @Return: register blk base, or NULL
+ */
+static struct sde_dbg_reg_base *_sde_dump_get_blk_addr(const char *blk_name)
+{
+	struct sde_dbg_base *dbg_base = &sde_dbg_base;
+	struct sde_dbg_reg_base *blk_base;
+
+	list_for_each_entry(blk_base, &dbg_base->reg_base_list, reg_base_head)
+		if (strlen(blk_base->name) && !strcmp(blk_base->name, blk_name))
+			return blk_base;
+
+	return NULL;
+}
+
+static void _sde_dbg_dump_sde_dbg_bus(struct sde_dbg_sde_debug_bus *bus)
+{
+	bool in_log, in_mem;
+	u32 **dump_mem = NULL;
+	u32 *dump_addr = NULL;
+	u32 status = 0;
+	struct sde_debug_bus_entry *head;
+	phys_addr_t phys = 0;
+	int list_size;
+	int i;
+	u32 offset;
+	void __iomem *mem_base = NULL;
+	struct sde_dbg_reg_base *reg_base;
+
+	if (!bus || !bus->cmn.entries_size)
+		return;
+
+	list_for_each_entry(reg_base, &sde_dbg_base.reg_base_list,
+			reg_base_head)
+		if (strlen(reg_base->name) &&
+			!strcmp(reg_base->name, bus->cmn.name))
+			mem_base = reg_base->base + bus->top_blk_off;
+
+	if (!mem_base) {
+		pr_err("unable to find mem_base for %s\n", bus->cmn.name);
+		return;
+	}
+
+	dump_mem = &bus->cmn.dumped_content;
+
+	/* will keep in memory 4 entries of 4 bytes each */
+	list_size = (bus->cmn.entries_size * 4 * 4);
+
+	in_log = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_LOG);
+	in_mem = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_MEM);
+
+	if (!in_log && !in_mem)
+		return;
+
+	dev_info(sde_dbg_base.dev, "======== start %s dump =========\n",
+			bus->cmn.name);
+
+	if (in_mem) {
+		if (!(*dump_mem))
+			*dump_mem = dma_alloc_coherent(sde_dbg_base.dev,
+				list_size, &phys, GFP_KERNEL);
+
+		if (*dump_mem) {
+			dump_addr = *dump_mem;
+			dev_info(sde_dbg_base.dev,
+				"%s: start_addr:0x%pK len:0x%x\n",
+				__func__, dump_addr, list_size);
+		} else {
+			in_mem = false;
+			pr_err("dump_mem: allocation fails\n");
+		}
+	}
+
+	_sde_dbg_enable_power(true);
+	for (i = 0; i < bus->cmn.entries_size; i++) {
+		head = bus->entries + i;
+		writel_relaxed(TEST_MASK(head->block_id, head->test_id),
+				mem_base + head->wr_addr);
+		wmb(); /* make sure test bits were written */
+
+		if (bus->cmn.flags & DBGBUS_FLAGS_DSPP)
+			offset = DBGBUS_DSPP_STATUS;
+		else
+			offset = head->wr_addr + 0x4;
+
+		status = readl_relaxed(mem_base + offset);
+
+		if (in_log)
+			dev_info(sde_dbg_base.dev,
+					"waddr=0x%x blk=%d tst=%d val=0x%x\n",
+					head->wr_addr, head->block_id,
+					head->test_id, status);
+
+		if (dump_addr && in_mem) {
+			dump_addr[i*4]     = head->wr_addr;
+			dump_addr[i*4 + 1] = head->block_id;
+			dump_addr[i*4 + 2] = head->test_id;
+			dump_addr[i*4 + 3] = status;
+		}
+
+		/* Disable debug bus once we are done */
+		writel_relaxed(0, mem_base + head->wr_addr);
+
+	}
+	_sde_dbg_enable_power(false);
+
+	dev_info(sde_dbg_base.dev, "======== end %s dump =========\n",
+			bus->cmn.name);
+}
+
+static void _sde_dbg_dump_vbif_debug_bus_entry(
+		struct vbif_debug_bus_entry *head, void __iomem *mem_base,
+		u32 *dump_addr, bool in_log)
+{
+	int i, j;
+	u32 val;
+
+	if (!dump_addr && !in_log)
+		return;
+
+	for (i = 0; i < head->block_cnt; i++) {
+		writel_relaxed(1 << (i + head->bit_offset),
+				mem_base + head->block_bus_addr);
+		/* make sure that current bus blcok enable */
+		wmb();
+		for (j = head->test_pnt_start; j < head->test_pnt_cnt; j++) {
+			writel_relaxed(j, mem_base + head->block_bus_addr + 4);
+			/* make sure that test point is enabled */
+			wmb();
+			val = readl_relaxed(mem_base + MMSS_VBIF_TEST_BUS_OUT);
+			if (dump_addr) {
+				*dump_addr++ = head->block_bus_addr;
+				*dump_addr++ = i;
+				*dump_addr++ = j;
+				*dump_addr++ = val;
+			}
+			if (in_log)
+				dev_info(sde_dbg_base.dev,
+					"testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
+					head->block_bus_addr, i, j, val);
+		}
+	}
+}
+
+static void _sde_dbg_dump_vbif_dbg_bus(struct sde_dbg_vbif_debug_bus *bus)
+{
+	bool in_log, in_mem;
+	u32 **dump_mem = NULL;
+	u32 *dump_addr = NULL;
+	u32 value;
+	struct vbif_debug_bus_entry *head;
+	phys_addr_t phys = 0;
+	int i, list_size = 0;
+	void __iomem *mem_base = NULL;
+	struct vbif_debug_bus_entry *dbg_bus;
+	u32 bus_size;
+	struct sde_dbg_reg_base *reg_base;
+
+	if (!bus || !bus->cmn.entries_size)
+		return;
+
+	list_for_each_entry(reg_base, &sde_dbg_base.reg_base_list,
+			reg_base_head)
+		if (strlen(reg_base->name) &&
+			!strcmp(reg_base->name, bus->cmn.name))
+			mem_base = reg_base->base;
+
+	if (!mem_base) {
+		pr_err("unable to find mem_base for %s\n", bus->cmn.name);
+		return;
+	}
+
+	dbg_bus = bus->entries;
+	bus_size = bus->cmn.entries_size;
+	list_size = bus->cmn.entries_size;
+	dump_mem = &bus->cmn.dumped_content;
+
+	dev_info(sde_dbg_base.dev, "======== start %s dump =========\n",
+			bus->cmn.name);
+
+	if (!dump_mem || !dbg_bus || !bus_size || !list_size)
+		return;
+
+	/* allocate memory for each test point */
+	for (i = 0; i < bus_size; i++) {
+		head = dbg_bus + i;
+		list_size += (head->block_cnt * head->test_pnt_cnt);
+	}
+
+	/* 4 bytes * 4 entries for each test point*/
+	list_size *= 16;
+
+	in_log = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_LOG);
+	in_mem = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_MEM);
+
+	if (!in_log && !in_mem)
+		return;
+
+	if (in_mem) {
+		if (!(*dump_mem))
+			*dump_mem = dma_alloc_coherent(sde_dbg_base.dev,
+				list_size, &phys, GFP_KERNEL);
+
+		if (*dump_mem) {
+			dump_addr = *dump_mem;
+			dev_info(sde_dbg_base.dev,
+				"%s: start_addr:0x%pK len:0x%x\n",
+				__func__, dump_addr, list_size);
+		} else {
+			in_mem = false;
+			pr_err("dump_mem: allocation fails\n");
+		}
+	}
+
+	_sde_dbg_enable_power(true);
+
+	value = readl_relaxed(mem_base + MMSS_VBIF_CLKON);
+	writel_relaxed(value | BIT(1), mem_base + MMSS_VBIF_CLKON);
+
+	/* make sure that vbif core is on */
+	wmb();
+
+	for (i = 0; i < bus_size; i++) {
+		head = dbg_bus + i;
+
+		writel_relaxed(0, mem_base + head->disable_bus_addr);
+		writel_relaxed(BIT(0), mem_base + MMSS_VBIF_TEST_BUS_OUT_CTRL);
+		/* make sure that other bus is off */
+		wmb();
+
+		_sde_dbg_dump_vbif_debug_bus_entry(head, mem_base, dump_addr,
+				in_log);
+		if (dump_addr)
+			dump_addr += (head->block_cnt * head->test_pnt_cnt * 4);
+	}
+
+	_sde_dbg_enable_power(false);
+
+	dev_info(sde_dbg_base.dev, "======== end %s dump =========\n",
+			bus->cmn.name);
+}
+
+/**
+ * _sde_dump_array - dump array of register bases
+ * @blk_arr: array of register base pointers
+ * @len: length of blk_arr
+ * @do_panic: whether to trigger a panic after dumping
+ * @name: string indicating origin of dump
+ * @dump_dbgbus_sde: whether to dump the sde debug bus
+ * @dump_dbgbus_vbif_rt: whether to dump the vbif rt debug bus
+ */
+static void _sde_dump_array(struct sde_dbg_reg_base *blk_arr[],
+	u32 len, bool do_panic, const char *name, bool dump_dbgbus_sde,
+	bool dump_dbgbus_vbif_rt, bool dump_all)
+{
+	int i;
+
+	mutex_lock(&sde_dbg_base.mutex);
+
+	for (i = 0; i < len; i++) {
+		if (blk_arr[i] != NULL)
+			_sde_dump_reg_by_ranges(blk_arr[i],
+				sde_dbg_base.enable_reg_dump);
+	}
+
+	if (dump_all)
+		sde_evtlog_dump_all(sde_dbg_base.evtlog);
+
+	if (dump_dbgbus_sde)
+		_sde_dbg_dump_sde_dbg_bus(&sde_dbg_base.dbgbus_sde);
+
+	if (dump_dbgbus_vbif_rt)
+		_sde_dbg_dump_vbif_dbg_bus(&sde_dbg_base.dbgbus_vbif_rt);
+
+	if (do_panic && sde_dbg_base.panic_on_err)
+		panic(name);
+
+	mutex_unlock(&sde_dbg_base.mutex);
+}
+
+/**
+ * _sde_dump_work - deferred dump work function
+ * @work: work structure
+ */
+static void _sde_dump_work(struct work_struct *work)
+{
+	_sde_dump_array(sde_dbg_base.req_dump_blks,
+		ARRAY_SIZE(sde_dbg_base.req_dump_blks),
+		sde_dbg_base.work_panic, "evtlog_workitem",
+		sde_dbg_base.dbgbus_sde.cmn.include_in_deferred_work,
+		sde_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work,
+		sde_dbg_base.dump_all);
+}
+
+void sde_dbg_dump(bool queue_work, const char *name, ...)
+{
+	int i, index = 0;
+	bool do_panic = false;
+	bool dump_dbgbus_sde = false;
+	bool dump_dbgbus_vbif_rt = false;
+	bool dump_all = false;
+	va_list args;
+	char *blk_name = NULL;
+	struct sde_dbg_reg_base *blk_base = NULL;
+	struct sde_dbg_reg_base **blk_arr;
+	u32 blk_len;
+
+	if (!sde_evtlog_is_enabled(sde_dbg_base.evtlog, SDE_EVTLOG_DEFAULT))
+		return;
+
+	if (queue_work && work_pending(&sde_dbg_base.dump_work))
+		return;
+
+	blk_arr = &sde_dbg_base.req_dump_blks[0];
+	blk_len = ARRAY_SIZE(sde_dbg_base.req_dump_blks);
+
+	memset(sde_dbg_base.req_dump_blks, 0,
+			sizeof(sde_dbg_base.req_dump_blks));
+	sde_dbg_base.dump_all = false;
+
+	va_start(args, name);
+	i = 0;
+	while ((blk_name = va_arg(args, char*))) {
+		if (i++ >= SDE_EVTLOG_MAX_DATA) {
+			pr_err("could not parse all dump arguments\n");
+			break;
+		}
+		if (IS_ERR_OR_NULL(blk_name))
+			break;
+
+		blk_base = _sde_dump_get_blk_addr(blk_name);
+		if (blk_base) {
+			if (index < blk_len) {
+				blk_arr[index] = blk_base;
+				index++;
+			} else {
+				pr_err("insufficient space to to dump %s\n",
+						blk_name);
+			}
+		}
+		if (!strcmp(blk_name, "all"))
+			dump_all = true;
+
+		if (!strcmp(blk_name, "dbg_bus"))
+			dump_dbgbus_sde = true;
+
+		if (!strcmp(blk_name, "vbif_dbg_bus"))
+			dump_dbgbus_vbif_rt = true;
+
+		if (!strcmp(blk_name, "panic"))
+			do_panic = true;
+	}
+	va_end(args);
+
+	if (queue_work) {
+		/* schedule work to dump later */
+		sde_dbg_base.work_panic = do_panic;
+		sde_dbg_base.dbgbus_sde.cmn.include_in_deferred_work =
+				dump_dbgbus_sde;
+		sde_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work =
+				dump_dbgbus_vbif_rt;
+		sde_dbg_base.dump_all = dump_all;
+		schedule_work(&sde_dbg_base.dump_work);
+	} else {
+		_sde_dump_array(blk_arr, blk_len, do_panic, name,
+				dump_dbgbus_sde, dump_dbgbus_vbif_rt, dump_all);
+	}
+}
+
+void sde_dbg_ctrl(const char *name, ...)
+{
+	int i = 0;
+	va_list args;
+	char *blk_name = NULL;
+
+
+	/* no debugfs controlled events are enabled, just return */
+	if (!sde_dbg_base.debugfs_ctrl)
+		return;
+
+	va_start(args, name);
+
+	while ((blk_name = va_arg(args, char*))) {
+		if (i++ >= SDE_EVTLOG_MAX_DATA) {
+			pr_err("could not parse all dbg arguments\n");
+			break;
+		}
+
+		if (IS_ERR_OR_NULL(blk_name))
+			break;
+
+		if (!strcmp(blk_name, "stop_ftrace") &&
+				sde_dbg_base.debugfs_ctrl &
+				DBG_CTRL_STOP_FTRACE) {
+			pr_debug("tracing off\n");
+			tracing_off();
+		}
+
+		if (!strcmp(blk_name, "panic_underrun") &&
+				sde_dbg_base.debugfs_ctrl &
+				DBG_CTRL_PANIC_UNDERRUN) {
+			pr_debug("panic underrun\n");
+			panic("underrun");
+		}
+	}
+
+}
+
+/*
+ * sde_dbg_debugfs_open - debugfs open handler for evtlog dump
+ * @inode: debugfs inode
+ * @file: file handle
+ */
+static int sde_dbg_debugfs_open(struct inode *inode, struct file *file)
+{
+	if (!inode || !file)
+		return -EINVAL;
+
+	/* non-seekable */
+	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+/**
+ * sde_evtlog_dump_read - debugfs read handler for evtlog dump
+ * @file: file handler
+ * @buff: user buffer content for debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t sde_evtlog_dump_read(struct file *file, char __user *buff,
+		size_t count, loff_t *ppos)
+{
+	ssize_t len = 0;
+	char evtlog_buf[SDE_EVTLOG_BUF_MAX];
+
+	if (!buff || !ppos)
+		return -EINVAL;
+
+	len = sde_evtlog_dump_to_buffer(sde_dbg_base.evtlog, evtlog_buf,
+			SDE_EVTLOG_BUF_MAX, true);
+	if (len < 0 || len > count) {
+		pr_err("len is more than user buffer size");
+		return 0;
+	}
+
+	if (copy_to_user(buff, evtlog_buf, len))
+		return -EFAULT;
+	*ppos += len;
+
+	return len;
+}
+
+/**
+ * sde_evtlog_dump_write - debugfs write handler for evtlog dump
+ * @file: file handler
+ * @user_buf: user buffer content from debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t sde_evtlog_dump_write(struct file *file,
+	const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	_sde_dump_reg_all();
+
+	sde_evtlog_dump_all(sde_dbg_base.evtlog);
+
+	_sde_dbg_dump_sde_dbg_bus(&sde_dbg_base.dbgbus_sde);
+	_sde_dbg_dump_vbif_dbg_bus(&sde_dbg_base.dbgbus_vbif_rt);
+
+	if (sde_dbg_base.panic_on_err)
+		panic("sde");
+
+	return count;
+}
+
+static const struct file_operations sde_evtlog_fops = {
+	.open = sde_dbg_debugfs_open,
+	.read = sde_evtlog_dump_read,
+	.write = sde_evtlog_dump_write,
+};
+
+/**
+ * sde_dbg_ctrl_read - debugfs read handler for debug ctrl read
+ * @file: file handler
+ * @buff: user buffer content for debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t sde_dbg_ctrl_read(struct file *file, char __user *buff,
+		size_t count, loff_t *ppos)
+{
+	ssize_t len = 0;
+	char buf[24] = {'\0'};
+
+	if (!buff || !ppos)
+		return -EINVAL;
+
+	if (*ppos)
+		return 0;	/* the end */
+
+	len = snprintf(buf, sizeof(buf), "0x%x\n", sde_dbg_base.debugfs_ctrl);
+	pr_debug("%s: ctrl:0x%x len:0x%zx\n",
+		__func__, sde_dbg_base.debugfs_ctrl, len);
+
+	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len)) {
+		pr_err("error copying the buffer! count:0x%zx\n", count);
+		return -EFAULT;
+	}
+
+	*ppos += len;	/* increase offset */
+	return len;
+}
+
+/**
+ * sde_dbg_ctrl_write - debugfs read handler for debug ctrl write
+ * @file: file handler
+ * @user_buf: user buffer content from debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t sde_dbg_ctrl_write(struct file *file,
+	const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	u32 dbg_ctrl = 0;
+	char buf[24];
+
+	if (!file) {
+		pr_err("DbgDbg: %s: error no file --\n", __func__);
+		return -EINVAL;
+	}
+
+	if (count >= sizeof(buf))
+		return -EFAULT;
+
+
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+
+	buf[count] = 0; /* end of string */
+
+	if (kstrtouint(buf, 0, &dbg_ctrl)) {
+		pr_err("%s: error in the number of bytes\n", __func__);
+		return -EFAULT;
+	}
+
+	pr_debug("dbg_ctrl_read:0x%x\n", dbg_ctrl);
+	sde_dbg_base.debugfs_ctrl = dbg_ctrl;
+
+	return count;
+}
+
+static const struct file_operations sde_dbg_ctrl_fops = {
+	.open = sde_dbg_debugfs_open,
+	.read = sde_dbg_ctrl_read,
+	.write = sde_dbg_ctrl_write,
+};
+
+void sde_dbg_init_dbg_buses(u32 hwversion)
+{
+	static struct sde_dbg_base *dbg = &sde_dbg_base;
+	char debug_name[80] = "";
+
+	memset(&dbg->dbgbus_sde, 0, sizeof(dbg->dbgbus_sde));
+	memset(&dbg->dbgbus_vbif_rt, 0, sizeof(dbg->dbgbus_vbif_rt));
+
+	switch (hwversion) {
+	case SDE_HW_VER_300:
+	case SDE_HW_VER_301:
+		dbg->dbgbus_sde.entries = dbg_bus_sde_8998;
+		dbg->dbgbus_sde.cmn.entries_size = ARRAY_SIZE(dbg_bus_sde_8998);
+		dbg->dbgbus_sde.cmn.flags = DBGBUS_FLAGS_DSPP;
+
+		dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
+		dbg->dbgbus_vbif_rt.cmn.entries_size =
+				ARRAY_SIZE(vbif_dbg_bus_msm8998);
+		break;
+	default:
+		pr_err("unsupported chipset id %u\n", hwversion);
+		break;
+	}
+
+	if (dbg->dbgbus_sde.entries) {
+		dbg->dbgbus_sde.cmn.name = DBGBUS_NAME_SDE;
+		snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
+				dbg->dbgbus_sde.cmn.name);
+		dbg->dbgbus_sde.cmn.enable_mask = DEFAULT_DBGBUS_SDE;
+		debugfs_create_u32(debug_name, 0600, dbg->root,
+				&dbg->dbgbus_sde.cmn.enable_mask);
+	}
+
+	if (dbg->dbgbus_vbif_rt.entries) {
+		dbg->dbgbus_vbif_rt.cmn.name = DBGBUS_NAME_VBIF_RT;
+		snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
+				dbg->dbgbus_vbif_rt.cmn.name);
+		dbg->dbgbus_vbif_rt.cmn.enable_mask = DEFAULT_DBGBUS_VBIFRT;
+		debugfs_create_u32(debug_name, 0600, dbg->root,
+				&dbg->dbgbus_vbif_rt.cmn.enable_mask);
+	}
+}
+
+int sde_dbg_init(struct dentry *debugfs_root, struct device *dev,
+		struct sde_dbg_power_ctrl *power_ctrl)
+{
+	int i;
+
+	mutex_init(&sde_dbg_base.mutex);
+	INIT_LIST_HEAD(&sde_dbg_base.reg_base_list);
+	sde_dbg_base.dev = dev;
+	sde_dbg_base.power_ctrl = *power_ctrl;
+
+
+	sde_dbg_base.evtlog = sde_evtlog_init();
+	if (IS_ERR_OR_NULL(sde_dbg_base.evtlog))
+		return PTR_ERR(sde_dbg_base.evtlog);
+
+	sde_dbg_base_evtlog = sde_dbg_base.evtlog;
+
+	sde_dbg_base.root = debugfs_create_dir("evt_dbg", debugfs_root);
+	if (IS_ERR_OR_NULL(sde_dbg_base.root)) {
+		pr_err("debugfs_create_dir fail, error %ld\n",
+		       PTR_ERR(sde_dbg_base.root));
+		sde_dbg_base.root = NULL;
+		return -ENODEV;
+	}
+
+	INIT_WORK(&sde_dbg_base.dump_work, _sde_dump_work);
+	sde_dbg_base.work_panic = false;
+
+	for (i = 0; i < SDE_EVTLOG_ENTRY; i++)
+		sde_dbg_base.evtlog->logs[i].counter = i;
+
+	debugfs_create_file("dbg_ctrl", 0600, sde_dbg_base.root, NULL,
+			&sde_dbg_ctrl_fops);
+	debugfs_create_file("dump", 0600, sde_dbg_base.root, NULL,
+						&sde_evtlog_fops);
+	debugfs_create_u32("enable", 0600, sde_dbg_base.root,
+			&(sde_dbg_base.evtlog->enable));
+	debugfs_create_u32("panic", 0600, sde_dbg_base.root,
+			&sde_dbg_base.panic_on_err);
+	debugfs_create_u32("reg_dump", 0600, sde_dbg_base.root,
+			&sde_dbg_base.enable_reg_dump);
+
+	sde_dbg_base.panic_on_err = DEFAULT_PANIC;
+	sde_dbg_base.enable_reg_dump = DEFAULT_REGDUMP;
+
+	pr_info("evtlog_status: enable:%d, panic:%d, dump:%d\n",
+		sde_dbg_base.evtlog->enable, sde_dbg_base.panic_on_err,
+		sde_dbg_base.enable_reg_dump);
+
+	return 0;
+}
+
+/**
+ * sde_dbg_destroy - destroy sde debug facilities
+ */
+void sde_dbg_destroy(void)
+{
+	debugfs_remove_recursive(sde_dbg_base.root);
+	sde_dbg_base.root = NULL;
+
+	sde_dbg_base_evtlog = NULL;
+	sde_evtlog_destroy(sde_dbg_base.evtlog);
+	sde_dbg_base.evtlog = NULL;
+	mutex_destroy(&sde_dbg_base.mutex);
+}
+
+/**
+ * sde_dbg_reg_base_release - release allocated reg dump file private data
+ * @inode: debugfs inode
+ * @file: file handle
+ * @Return: 0 on success
+ */
+static int sde_dbg_reg_base_release(struct inode *inode, struct file *file)
+{
+	struct sde_dbg_reg_base *dbg;
+
+	if (!file)
+		return -EINVAL;
+
+	dbg = file->private_data;
+	if (!dbg)
+		return -ENODEV;
+
+	mutex_lock(&sde_dbg_base.mutex);
+	if (dbg && dbg->buf) {
+		kfree(dbg->buf);
+		dbg->buf_len = 0;
+		dbg->buf = NULL;
+	}
+	mutex_unlock(&sde_dbg_base.mutex);
+
+	return 0;
+}
+
+/**
+ * sde_dbg_reg_base_is_valid_range - verify if requested memory range is valid
+ * @off: address offset in bytes
+ * @cnt: memory size in bytes
+ * Return: true if valid; false otherwise
+ */
+static bool sde_dbg_reg_base_is_valid_range(u32 off, u32 cnt)
+{
+	static struct sde_dbg_base *dbg_base = &sde_dbg_base;
+	struct sde_dbg_reg_range *node;
+	struct sde_dbg_reg_base *base;
+
+	pr_debug("check offset=0x%x cnt=0x%x\n", off, cnt);
+
+	list_for_each_entry(base, &dbg_base->reg_base_list, reg_base_head) {
+		list_for_each_entry(node, &base->sub_range_list, head) {
+			pr_debug("%s: start=0x%x end=0x%x\n", node->range_name,
+					node->offset.start, node->offset.end);
+
+			if (node->offset.start <= off
+					&& off <= node->offset.end
+					&& off + cnt <= node->offset.end) {
+				pr_debug("valid range requested\n");
+				return true;
+			}
+		}
+	}
+
+	pr_err("invalid range requested\n");
+	return false;
+}
+
+/**
+ * sde_dbg_reg_base_offset_write - set new offset and len to debugfs reg base
+ * @file: file handler
+ * @user_buf: user buffer content from debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t sde_dbg_reg_base_offset_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct sde_dbg_reg_base *dbg;
+	u32 off = 0;
+	u32 cnt = DEFAULT_BASE_REG_CNT;
+	char buf[24];
+	ssize_t rc = count;
+
+	if (!file)
+		return -EINVAL;
+
+	dbg = file->private_data;
+	if (!dbg)
+		return -ENODEV;
+
+	if (count >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+
+	buf[count] = 0;	/* end of string */
+
+	if (sscanf(buf, "%x %x", &off, &cnt) != 2)
+		return -EFAULT;
+
+	mutex_lock(&sde_dbg_base.mutex);
+	if (off > dbg->max_offset) {
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if (off % sizeof(u32)) {
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if (cnt > (dbg->max_offset - off))
+		cnt = dbg->max_offset - off;
+
+	if (cnt % sizeof(u32)) {
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if (cnt == 0) {
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if (!sde_dbg_reg_base_is_valid_range(off, cnt)) {
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	dbg->off = off;
+	dbg->cnt = cnt;
+
+exit:
+	mutex_unlock(&sde_dbg_base.mutex);
+	pr_debug("offset=%x cnt=%x\n", off, cnt);
+
+	return rc;
+}
+
+/**
+ * sde_dbg_reg_base_offset_read - read current offset and len of register base
+ * @file: file handler
+ * @user_buf: user buffer content from debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t sde_dbg_reg_base_offset_read(struct file *file,
+			char __user *buff, size_t count, loff_t *ppos)
+{
+	struct sde_dbg_reg_base *dbg;
+	int len = 0;
+	char buf[24] = {'\0'};
+
+	if (!file)
+		return -EINVAL;
+
+	dbg = file->private_data;
+	if (!dbg)
+		return -ENODEV;
+
+	if (!ppos)
+		return -EINVAL;
+
+	if (*ppos)
+		return 0;	/* the end */
+
+	mutex_lock(&sde_dbg_base.mutex);
+	if (dbg->off % sizeof(u32)) {
+		mutex_unlock(&sde_dbg_base.mutex);
+		return -EFAULT;
+	}
+
+	len = snprintf(buf, sizeof(buf), "0x%08zx %zx\n", dbg->off, dbg->cnt);
+	if (len < 0 || len >= sizeof(buf)) {
+		mutex_unlock(&sde_dbg_base.mutex);
+		return 0;
+	}
+
+	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len)) {
+		mutex_unlock(&sde_dbg_base.mutex);
+		return -EFAULT;
+	}
+
+	*ppos += len;	/* increase offset */
+	mutex_unlock(&sde_dbg_base.mutex);
+
+	return len;
+}
+
+/**
+ * sde_dbg_reg_base_reg_write - write to reg base hw at offset a given value
+ * @file: file handler
+ * @user_buf: user buffer content from debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t sde_dbg_reg_base_reg_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct sde_dbg_reg_base *dbg;
+	size_t off;
+	u32 data, cnt;
+	char buf[24];
+
+	if (!file)
+		return -EINVAL;
+
+	dbg = file->private_data;
+	if (!dbg)
+		return -ENODEV;
+
+	if (count >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+
+	buf[count] = 0;	/* end of string */
+
+	cnt = sscanf(buf, "%zx %x", &off, &data);
+
+	if (cnt < 2)
+		return -EFAULT;
+
+	mutex_lock(&sde_dbg_base.mutex);
+	if (off >= dbg->max_offset) {
+		mutex_unlock(&sde_dbg_base.mutex);
+		return -EFAULT;
+	}
+
+	_sde_dbg_enable_power(true);
+
+	writel_relaxed(data, dbg->base + off);
+
+	_sde_dbg_enable_power(false);
+
+	mutex_unlock(&sde_dbg_base.mutex);
+
+	pr_debug("addr=%zx data=%x\n", off, data);
+
+	return count;
+}
+
+/**
+ * sde_dbg_reg_base_reg_read - read len from reg base hw at current offset
+ * @file: file handler
+ * @user_buf: user buffer content from debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t sde_dbg_reg_base_reg_read(struct file *file,
+			char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct sde_dbg_reg_base *dbg;
+	size_t len;
+
+	if (!file)
+		return -EINVAL;
+
+	dbg = file->private_data;
+	if (!dbg) {
+		pr_err("invalid handle\n");
+		return -ENODEV;
+	}
+
+	if (!ppos)
+		return -EINVAL;
+
+	mutex_lock(&sde_dbg_base.mutex);
+	if (!dbg->buf) {
+		char *hwbuf;
+		char dump_buf[64];
+		char __iomem *ioptr;
+		int cnt, tot;
+
+		dbg->buf_len = sizeof(dump_buf) *
+			DIV_ROUND_UP(dbg->cnt, ROW_BYTES);
+
+		if (dbg->buf_len % sizeof(u32))
+			return -EINVAL;
+
+		dbg->buf = kzalloc(dbg->buf_len, GFP_KERNEL);
+
+		if (!dbg->buf) {
+			mutex_unlock(&sde_dbg_base.mutex);
+			return -ENOMEM;
+		}
+
+		hwbuf = kzalloc(ROW_BYTES, GFP_KERNEL);
+		if (!hwbuf) {
+			kfree(dbg->buf);
+			mutex_unlock(&sde_dbg_base.mutex);
+			return -ENOMEM;
+		}
+
+		ioptr = dbg->base + dbg->off;
+		tot = 0;
+		_sde_dbg_enable_power(true);
+
+		for (cnt = dbg->cnt; cnt > 0; cnt -= ROW_BYTES) {
+			memcpy_fromio(hwbuf, ioptr, ROW_BYTES);
+			hex_dump_to_buffer(hwbuf,
+					   min(cnt, ROW_BYTES),
+					   ROW_BYTES, GROUP_BYTES, dump_buf,
+					   sizeof(dump_buf), false);
+			len = scnprintf(dbg->buf + tot, dbg->buf_len - tot,
+					"0x%08x: %s\n",
+					((int) (unsigned long) ioptr) -
+					((int) (unsigned long) dbg->base),
+					dump_buf);
+
+			ioptr += ROW_BYTES;
+			tot += len;
+			if (tot >= dbg->buf_len)
+				break;
+		}
+
+		_sde_dbg_enable_power(false);
+
+		dbg->buf_len = tot;
+		kfree(hwbuf);
+	}
+
+	if (*ppos >= dbg->buf_len) {
+		mutex_unlock(&sde_dbg_base.mutex);
+		return 0; /* done reading */
+	}
+
+	len = min(count, dbg->buf_len - (size_t) *ppos);
+	if (copy_to_user(user_buf, dbg->buf + *ppos, len)) {
+		mutex_unlock(&sde_dbg_base.mutex);
+		pr_err("failed to copy to user\n");
+		return -EFAULT;
+	}
+
+	*ppos += len; /* increase offset */
+	mutex_unlock(&sde_dbg_base.mutex);
+
+	return len;
+}
+
+static const struct file_operations sde_off_fops = {
+	.open = sde_dbg_debugfs_open,
+	.release = sde_dbg_reg_base_release,
+	.read = sde_dbg_reg_base_offset_read,
+	.write = sde_dbg_reg_base_offset_write,
+};
+
+static const struct file_operations sde_reg_fops = {
+	.open = sde_dbg_debugfs_open,
+	.release = sde_dbg_reg_base_release,
+	.read = sde_dbg_reg_base_reg_read,
+	.write = sde_dbg_reg_base_reg_write,
+};
+
+int sde_dbg_reg_register_base(const char *name, void __iomem *base,
+		size_t max_offset)
+{
+	struct sde_dbg_base *dbg_base = &sde_dbg_base;
+	struct sde_dbg_reg_base *reg_base;
+	struct dentry *ent_off, *ent_reg;
+	char dn[80] = "";
+	int prefix_len = 0;
+
+	reg_base = kzalloc(sizeof(*reg_base), GFP_KERNEL);
+	if (!reg_base)
+		return -ENOMEM;
+
+	if (name)
+		strlcpy(reg_base->name, name, sizeof(reg_base->name));
+	reg_base->base = base;
+	reg_base->max_offset = max_offset;
+	reg_base->off = 0;
+	reg_base->cnt = DEFAULT_BASE_REG_CNT;
+	reg_base->reg_dump = NULL;
+
+	if (name)
+		prefix_len = snprintf(dn, sizeof(dn), "%s_", name);
+	strlcpy(dn + prefix_len, "off", sizeof(dn) - prefix_len);
+	ent_off = debugfs_create_file(dn, 0600, dbg_base->root, reg_base,
+			&sde_off_fops);
+	if (IS_ERR_OR_NULL(ent_off)) {
+		pr_err("debugfs_create_file: offset fail\n");
+		goto off_fail;
+	}
+
+	strlcpy(dn + prefix_len, "reg", sizeof(dn) - prefix_len);
+	ent_reg = debugfs_create_file(dn, 0600, dbg_base->root, reg_base,
+			&sde_reg_fops);
+	if (IS_ERR_OR_NULL(ent_reg)) {
+		pr_err("debugfs_create_file: reg fail\n");
+		goto reg_fail;
+	}
+
+	/* Initialize list to make sure check for null list will be valid */
+	INIT_LIST_HEAD(&reg_base->sub_range_list);
+
+	pr_debug("%s base: %pK max_offset 0x%zX\n", reg_base->name,
+			reg_base->base, reg_base->max_offset);
+
+	list_add(&reg_base->reg_base_head, &dbg_base->reg_base_list);
+
+	return 0;
+reg_fail:
+	debugfs_remove(ent_off);
+off_fail:
+	kfree(reg_base);
+	return -ENODEV;
+}
+
+void sde_dbg_reg_register_dump_range(const char *base_name,
+		const char *range_name, u32 offset_start, u32 offset_end,
+		uint32_t xin_id)
+{
+	struct sde_dbg_reg_base *reg_base;
+	struct sde_dbg_reg_range *range;
+
+	reg_base = _sde_dump_get_blk_addr(base_name);
+	if (!reg_base) {
+		pr_err("error: for range %s unable to locate base %s\n",
+				range_name, base_name);
+		return;
+	}
+
+	if (!range_name || strlen(range_name) == 0) {
+		pr_err("%pS: bad range name, base_name %s, offset_start 0x%X, end 0x%X\n",
+				__builtin_return_address(0), base_name,
+				offset_start, offset_end);
+		return;
+	}
+
+	if (offset_end - offset_start < REG_DUMP_ALIGN ||
+			offset_start > offset_end) {
+		pr_err("%pS: bad range, base_name %s, range_name %s, offset_start 0x%X, end 0x%X\n",
+				__builtin_return_address(0), base_name,
+				range_name, offset_start, offset_end);
+		return;
+	}
+
+	range = kzalloc(sizeof(*range), GFP_KERNEL);
+	if (!range)
+		return;
+
+	strlcpy(range->range_name, range_name, sizeof(range->range_name));
+	range->offset.start = offset_start;
+	range->offset.end = offset_end;
+	range->xin_id = xin_id;
+	list_add_tail(&range->head, &reg_base->sub_range_list);
+
+	pr_debug("base %s, range %s, start 0x%X, end 0x%X\n",
+			base_name, range->range_name,
+			range->offset.start, range->offset.end);
+}
+
+void sde_dbg_set_sde_top_offset(u32 blk_off)
+{
+	sde_dbg_base.dbgbus_sde.top_blk_off = blk_off;
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde_dbg_evtlog.c	2019-01-22 16:16:23.523246588 +0100
@@ -0,0 +1,198 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"sde_dbg:[%s] " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/dma-buf.h>
+#include <linux/slab.h>
+
+#include "sde_dbg.h"
+#include "sde_trace.h"
+
+bool sde_evtlog_is_enabled(struct sde_dbg_evtlog *evtlog, u32 flag)
+{
+	if (!evtlog)
+		return false;
+
+	return (flag & evtlog->enable) ||
+		(flag == SDE_EVTLOG_ALL && evtlog->enable);
+}
+
+void sde_evtlog_log(struct sde_dbg_evtlog *evtlog, const char *name, int line,
+		int flag, ...)
+{
+	unsigned long flags;
+	int i, val = 0;
+	va_list args;
+	struct sde_dbg_evtlog_log *log;
+
+	if (!evtlog)
+		return;
+
+	if (!sde_evtlog_is_enabled(evtlog, flag))
+		return;
+
+	spin_lock_irqsave(&evtlog->spin_lock, flags);
+	log = &evtlog->logs[evtlog->curr];
+	log->time = ktime_to_us(ktime_get());
+	log->name = name;
+	log->line = line;
+	log->data_cnt = 0;
+	log->pid = current->pid;
+
+	va_start(args, flag);
+	for (i = 0; i < SDE_EVTLOG_MAX_DATA; i++) {
+
+		val = va_arg(args, int);
+		if (val == SDE_EVTLOG_DATA_LIMITER)
+			break;
+
+		log->data[i] = val;
+	}
+	va_end(args);
+	log->data_cnt = i;
+	evtlog->curr = (evtlog->curr + 1) % SDE_EVTLOG_ENTRY;
+	evtlog->last++;
+
+	trace_sde_evtlog(name, line, i > 0 ? log->data[0] : 0,
+			i > 1 ? log->data[1] : 0);
+
+	spin_unlock_irqrestore(&evtlog->spin_lock, flags);
+}
+
+/* always dump the last entries which are not dumped yet */
+static bool _sde_evtlog_dump_calc_range(struct sde_dbg_evtlog *evtlog,
+	bool update_last_entry)
+{
+	bool need_dump = true;
+	unsigned long flags;
+
+	if (!evtlog)
+		return false;
+
+	spin_lock_irqsave(&evtlog->spin_lock, flags);
+
+	evtlog->first = evtlog->next;
+
+	if (update_last_entry)
+		evtlog->last_dump = evtlog->last;
+
+	if (evtlog->last_dump == evtlog->first) {
+		need_dump = false;
+		goto dump_exit;
+	}
+
+	if (evtlog->last_dump < evtlog->first) {
+		evtlog->first %= SDE_EVTLOG_ENTRY;
+		if (evtlog->last_dump < evtlog->first)
+			evtlog->last_dump += SDE_EVTLOG_ENTRY;
+	}
+
+	if ((evtlog->last_dump - evtlog->first) > SDE_EVTLOG_PRINT_ENTRY) {
+		pr_info("evtlog skipping %d entries, last=%d\n",
+			evtlog->last_dump - evtlog->first -
+			SDE_EVTLOG_PRINT_ENTRY,
+			evtlog->last_dump - 1);
+		evtlog->first = evtlog->last_dump - SDE_EVTLOG_PRINT_ENTRY;
+	}
+	evtlog->next = evtlog->first + 1;
+
+dump_exit:
+	spin_unlock_irqrestore(&evtlog->spin_lock, flags);
+
+	return need_dump;
+}
+
+ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog,
+		char *evtlog_buf, ssize_t evtlog_buf_size,
+		bool update_last_entry)
+{
+	int i;
+	ssize_t off = 0;
+	struct sde_dbg_evtlog_log *log, *prev_log;
+	unsigned long flags;
+
+	if (!evtlog || !evtlog_buf)
+		return 0;
+
+	/* update markers, exit if nothing to print */
+	if (!_sde_evtlog_dump_calc_range(evtlog, update_last_entry))
+		return 0;
+
+	spin_lock_irqsave(&evtlog->spin_lock, flags);
+
+	log = &evtlog->logs[evtlog->first % SDE_EVTLOG_ENTRY];
+
+	prev_log = &evtlog->logs[(evtlog->first - 1) %
+		SDE_EVTLOG_ENTRY];
+
+	off = snprintf((evtlog_buf + off), (evtlog_buf_size - off), "%s:%-4d",
+		log->name, log->line);
+
+	if (off < SDE_EVTLOG_BUF_ALIGN) {
+		memset((evtlog_buf + off), 0x20, (SDE_EVTLOG_BUF_ALIGN - off));
+		off = SDE_EVTLOG_BUF_ALIGN;
+	}
+
+	off += snprintf((evtlog_buf + off), (evtlog_buf_size - off),
+		"=>[%-8d:%-11llu:%9llu][%-4d]:", evtlog->first,
+		log->time, (log->time - prev_log->time), log->pid);
+
+	for (i = 0; i < log->data_cnt; i++)
+		off += snprintf((evtlog_buf + off), (evtlog_buf_size - off),
+			"%x ", log->data[i]);
+
+	off += snprintf((evtlog_buf + off), (evtlog_buf_size - off), "\n");
+
+	spin_unlock_irqrestore(&evtlog->spin_lock, flags);
+
+	return off;
+}
+
+void sde_evtlog_dump_all(struct sde_dbg_evtlog *evtlog)
+{
+	char buf[SDE_EVTLOG_BUF_MAX];
+	bool update_last_entry = true;
+
+	if (!evtlog)
+		return;
+
+	while (sde_evtlog_dump_to_buffer(evtlog, buf, sizeof(buf),
+		update_last_entry)) {
+		pr_info("%s", buf);
+		update_last_entry = false;
+	}
+}
+
+struct sde_dbg_evtlog *sde_evtlog_init(void)
+{
+	struct sde_dbg_evtlog *evtlog;
+
+	evtlog = kzalloc(sizeof(*evtlog), GFP_KERNEL);
+	if (!evtlog)
+		return ERR_PTR(-ENOMEM);
+
+	spin_lock_init(&evtlog->spin_lock);
+	evtlog->enable = SDE_EVTLOG_DEFAULT_ENABLE;
+
+	return evtlog;
+}
+
+void sde_evtlog_destroy(struct sde_dbg_evtlog *evtlog)
+{
+	kfree(evtlog);
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde_dbg.h	2019-01-22 16:16:23.523246588 +0100
@@ -0,0 +1,341 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef SDE_DBG_H_
+#define SDE_DBG_H_
+
+#include <stdarg.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+
+#define SDE_EVTLOG_DATA_LIMITER	(0xC0DEBEEF)
+#define SDE_EVTLOG_FUNC_ENTRY	0x1111
+#define SDE_EVTLOG_FUNC_EXIT	0x2222
+#define SDE_EVTLOG_ERROR	0xebad
+
+#define SDE_DBG_DUMP_DATA_LIMITER (NULL)
+
+enum sde_dbg_evtlog_flag {
+	SDE_EVTLOG_DEFAULT = BIT(0),
+	SDE_EVTLOG_IRQ = BIT(1),
+	SDE_EVTLOG_ALL = BIT(7)
+};
+
+enum sde_dbg_dump_flag {
+	SDE_DBG_DUMP_IN_LOG = BIT(0),
+	SDE_DBG_DUMP_IN_MEM = BIT(1),
+};
+
+#ifdef CONFIG_DRM_SDE_EVTLOG_DEBUG
+#define SDE_EVTLOG_DEFAULT_ENABLE 1
+#else
+#define SDE_EVTLOG_DEFAULT_ENABLE 0
+#endif
+
+/*
+ * evtlog will print this number of entries when it is called through
+ * sysfs node or panic. This prevents kernel log from evtlog message
+ * flood.
+ */
+#define SDE_EVTLOG_PRINT_ENTRY	256
+
+/*
+ * evtlog keeps this number of entries in memory for debug purpose. This
+ * number must be greater than print entry to prevent out of bound evtlog
+ * entry array access.
+ */
+#define SDE_EVTLOG_ENTRY	(SDE_EVTLOG_PRINT_ENTRY * 8)
+#define SDE_EVTLOG_MAX_DATA 15
+#define SDE_EVTLOG_BUF_MAX 512
+#define SDE_EVTLOG_BUF_ALIGN 32
+
+struct sde_dbg_power_ctrl {
+	void *handle;
+	void *client;
+	int (*enable_fn)(void *handle, void *client, bool enable);
+};
+
+struct sde_dbg_evtlog_log {
+	u32 counter;
+	s64 time;
+	const char *name;
+	int line;
+	u32 data[SDE_EVTLOG_MAX_DATA];
+	u32 data_cnt;
+	int pid;
+};
+
+struct sde_dbg_evtlog {
+	struct sde_dbg_evtlog_log logs[SDE_EVTLOG_ENTRY];
+	u32 first;
+	u32 last;
+	u32 last_dump;
+	u32 curr;
+	u32 next;
+	u32 enable;
+	spinlock_t spin_lock;
+};
+
+extern struct sde_dbg_evtlog *sde_dbg_base_evtlog;
+
+/**
+ * SDE_EVT32 - Write a list of 32bit values to the event log, default area
+ * ... - variable arguments
+ */
+#define SDE_EVT32(...) sde_evtlog_log(sde_dbg_base_evtlog, __func__, \
+		__LINE__, SDE_EVTLOG_DEFAULT, ##__VA_ARGS__, \
+		SDE_EVTLOG_DATA_LIMITER)
+
+/**
+ * SDE_EVT32_IRQ - Write a list of 32bit values to the event log, IRQ area
+ * ... - variable arguments
+ */
+#define SDE_EVT32_IRQ(...) sde_evtlog_log(sde_dbg_base_evtlog, __func__, \
+		__LINE__, SDE_EVTLOG_IRQ, ##__VA_ARGS__, \
+		SDE_EVTLOG_DATA_LIMITER)
+
+/**
+ * SDE_DBG_DUMP - trigger dumping of all sde_dbg facilities
+ * @va_args:	list of named register dump ranges and regions to dump, as
+ *		registered previously through sde_dbg_reg_register_base and
+ *		sde_dbg_reg_register_dump_range.
+ *		Including the special name "panic" will trigger a panic after
+ *		the dumping work has completed.
+ */
+#define SDE_DBG_DUMP(...) sde_dbg_dump(false, __func__, ##__VA_ARGS__, \
+		SDE_DBG_DUMP_DATA_LIMITER)
+
+/**
+ * SDE_DBG_DUMP_WQ - trigger dumping of all sde_dbg facilities, queuing the work
+ * @va_args:	list of named register dump ranges and regions to dump, as
+ *		registered previously through sde_dbg_reg_register_base and
+ *		sde_dbg_reg_register_dump_range.
+ *		Including the special name "panic" will trigger a panic after
+ *		the dumping work has completed.
+ */
+#define SDE_DBG_DUMP_WQ(...) sde_dbg_dump(true, __func__, ##__VA_ARGS__, \
+		SDE_DBG_DUMP_DATA_LIMITER)
+
+/**
+ * SDE_DBG_EVT_CTRL - trigger a different driver events
+ *  event: event that trigger different behavior in the driver
+ */
+#define SDE_DBG_CTRL(...) sde_dbg_ctrl(__func__, ##__VA_ARGS__, \
+		SDE_DBG_DUMP_DATA_LIMITER)
+
+#if defined(CONFIG_DEBUG_FS)
+
+/**
+ * sde_evtlog_init - allocate a new event log object
+ * Returns:	evtlog or -ERROR
+ */
+struct sde_dbg_evtlog *sde_evtlog_init(void);
+
+/**
+ * sde_evtlog_destroy - destroy previously allocated event log
+ * @evtlog:	pointer to evtlog
+ * Returns:	none
+ */
+void sde_evtlog_destroy(struct sde_dbg_evtlog *evtlog);
+
+/**
+ * sde_evtlog_log - log an entry into the event log.
+ *	log collection may be enabled/disabled entirely via debugfs
+ *	log area collection may be filtered by user provided flags via debugfs.
+ * @evtlog:	pointer to evtlog
+ * @name:	function name of call site
+ * @line:	line number of call site
+ * @flag:	log area filter flag checked against user's debugfs request
+ * Returns:	none
+ */
+void sde_evtlog_log(struct sde_dbg_evtlog *evtlog, const char *name, int line,
+		int flag, ...);
+
+/**
+ * sde_evtlog_dump_all - print all entries in event log to kernel log
+ * @evtlog:	pointer to evtlog
+ * Returns:	none
+ */
+void sde_evtlog_dump_all(struct sde_dbg_evtlog *evtlog);
+
+/**
+ * sde_evtlog_is_enabled - check whether log collection is enabled for given
+ *	event log and log area flag
+ * @evtlog:	pointer to evtlog
+ * @flag:	log area filter flag
+ * Returns:	none
+ */
+bool sde_evtlog_is_enabled(struct sde_dbg_evtlog *evtlog, u32 flag);
+
+/**
+ * sde_evtlog_dump_to_buffer - print content of event log to the given buffer
+ * @evtlog:		pointer to evtlog
+ * @evtlog_buf:		target buffer to print into
+ * @evtlog_buf_size:	size of target buffer
+ * @update_last_entry:Â»       whether or not to stop at most recent entry
+ * Returns:		number of bytes written to buffer
+ */
+ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog,
+		char *evtlog_buf, ssize_t evtlog_buf_size,
+		bool update_last_entry);
+
+/**
+ * sde_dbg_init_dbg_buses - initialize debug bus dumping support for the chipset
+ * @hwversion:		Chipset revision
+ */
+void sde_dbg_init_dbg_buses(u32 hwversion);
+
+/**
+ * sde_dbg_init - initialize global sde debug facilities: evtlog, regdump
+ * @debugfs_root:	debugfs root in which to create sde debug entries
+ * @dev:		device handle
+ * @power_ctrl:		power control callback structure for enabling clocks
+ *			during register dumping
+ * Returns:		0 or -ERROR
+ */
+int sde_dbg_init(struct dentry *debugfs_root, struct device *dev,
+		struct sde_dbg_power_ctrl *power_ctrl);
+
+/**
+ * sde_dbg_destroy - destroy the global sde debug facilities
+ * Returns:	none
+ */
+void sde_dbg_destroy(void);
+
+/**
+ * sde_dbg_dump - trigger dumping of all sde_dbg facilities
+ * @queue_work:	whether to queue the dumping work to the work_struct
+ * @name:	string indicating origin of dump
+ * @va_args:	list of named register dump ranges and regions to dump, as
+ *		registered previously through sde_dbg_reg_register_base and
+ *		sde_dbg_reg_register_dump_range.
+ *		Including the special name "panic" will trigger a panic after
+ *		the dumping work has completed.
+ * Returns:	none
+ */
+void sde_dbg_dump(bool queue_work, const char *name, ...);
+
+/**
+ * sde_dbg_ctrl - trigger specific actions for the driver with debugging
+ *		purposes. Those actions need to be enabled by the debugfs entry
+ *		so the driver executes those actions in the corresponding calls.
+ * @va_args:	list of actions to trigger
+ * Returns:	none
+ */
+void sde_dbg_ctrl(const char *name, ...);
+
+/**
+ * sde_dbg_reg_register_base - register a hw register address section for later
+ *	dumping. call this before calling sde_dbg_reg_register_dump_range
+ *	to be able to specify sub-ranges within the base hw range.
+ * @name:	name of base region
+ * @base:	base pointer of region
+ * @max_offset:	length of region
+ * Returns:	0 or -ERROR
+ */
+int sde_dbg_reg_register_base(const char *name, void __iomem *base,
+		size_t max_offset);
+
+/**
+ * sde_dbg_reg_register_dump_range - register a hw register sub-region for
+ *	later register dumping associated with base specified by
+ *	sde_dbg_reg_register_base
+ * @base_name:		name of base region
+ * @range_name:		name of sub-range within base region
+ * @offset_start:	sub-range's start offset from base's base pointer
+ * @offset_end:		sub-range's end offset from base's base pointer
+ * @xin_id:		xin id
+ * Returns:		none
+ */
+void sde_dbg_reg_register_dump_range(const char *base_name,
+		const char *range_name, u32 offset_start, u32 offset_end,
+		uint32_t xin_id);
+
+/**
+ * sde_dbg_set_sde_top_offset - set the target specific offset from mdss base
+ *	address of the top registers. Used for accessing debug bus controls.
+ * @blk_off: offset from mdss base of the top block
+ */
+void sde_dbg_set_sde_top_offset(u32 blk_off);
+#else
+static inline struct sde_dbg_evtlog *sde_evtlog_init(void)
+{
+	return NULL;
+}
+
+static inline void sde_evtlog_destroy(struct sde_dbg_evtlog *evtlog)
+{
+}
+
+static inline void sde_evtlog_log(struct sde_dbg_evtlog *evtlog,
+		const char *name, int line, int flag, ...)
+{
+}
+
+static inline void sde_evtlog_dump_all(struct sde_dbg_evtlog *evtlog)
+{
+}
+
+static inline bool sde_evtlog_is_enabled(struct sde_dbg_evtlog *evtlog,
+		u32 flag)
+{
+	return false;
+}
+
+static inline ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog,
+		char *evtlog_buf, ssize_t evtlog_buf_size,
+		bool update_last_entry)
+{
+	return 0;
+}
+
+void sde_dbg_init_dbg_buses(u32 hwversion)
+{
+}
+
+static inline int sde_dbg_init(struct dentry *debugfs_root, struct device *dev,
+		struct sde_dbg_power_ctrl *power_ctrl)
+{
+	return 0;
+}
+
+static inline void sde_dbg_destroy(void)
+{
+}
+
+static inline void sde_dbg_dump(bool queue_work, const char *name, ...)
+{
+}
+
+static inline void sde_dbg_ctrl(const char *name, ...)
+{
+}
+
+static inline int sde_dbg_reg_register_base(const char *name,
+		void __iomem *base, size_t max_offset)
+{
+	return 0;
+}
+
+static inline void sde_dbg_reg_register_dump_range(const char *base_name,
+		const char *range_name, u32 offset_start, u32 offset_end,
+		uint32_t xin_id)
+{
+}
+
+void sde_dbg_set_sde_top_offset(u32 blk_off)
+{
+}
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+
+#endif /* SDE_DBG_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde_edid_parser.c	2019-10-29 09:26:23.645203198 +0100
@@ -0,0 +1,631 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drm_edid.h>
+
+#include "sde_kms.h"
+#include "sde_edid_parser.h"
+
+/* TODO: copy from drm_edid.c and mdss_hdmi_edid.c. remove if using ELD */
+#define DBC_START_OFFSET 4
+#define EDID_DTD_LEN 18
+
+enum data_block_types {
+	RESERVED,
+	AUDIO_DATA_BLOCK,
+	VIDEO_DATA_BLOCK,
+	VENDOR_SPECIFIC_DATA_BLOCK,
+	SPEAKER_ALLOCATION_DATA_BLOCK,
+	VESA_DTC_DATA_BLOCK,
+	RESERVED2,
+	USE_EXTENDED_TAG
+};
+
+static u8 *sde_find_edid_extension(struct edid *edid, int ext_id)
+{
+	u8 *edid_ext = NULL;
+	int i;
+
+	/* No EDID or EDID extensions */
+	if (edid == NULL || edid->extensions == 0)
+		return NULL;
+
+	/* Find CEA extension */
+	for (i = 0; i < edid->extensions; i++) {
+		edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
+		if (edid_ext[0] == ext_id)
+			break;
+	}
+
+	if (i == edid->extensions)
+		return NULL;
+
+	return edid_ext;
+}
+
+static u8 *sde_find_cea_extension(struct edid *edid)
+{
+	return sde_find_edid_extension(edid, SDE_CEA_EXT);
+}
+
+static int
+sde_cea_db_payload_len(const u8 *db)
+{
+	return db[0] & 0x1f;
+}
+
+static int
+sde_cea_db_tag(const u8 *db)
+{
+	return db[0] >> 5;
+}
+
+static int
+sde_cea_revision(const u8 *cea)
+{
+	return cea[1];
+}
+
+static int
+sde_cea_db_offsets(const u8 *cea, int *start, int *end)
+{
+	/* Data block offset in CEA extension block */
+	*start = 4;
+	*end = cea[2];
+	if (*end == 0)
+		*end = 127;
+	if (*end < 4 || *end > 127)
+		return -ERANGE;
+	return 0;
+}
+
+#define sde_for_each_cea_db(cea, i, start, end) \
+for ((i) = (start); \
+(i) < (end) && (i) + sde_cea_db_payload_len(&(cea)[(i)]) < (end); \
+(i) += sde_cea_db_payload_len(&(cea)[(i)]) + 1)
+
+static bool sde_cea_db_is_hdmi_hf_vsdb(const u8 *db)
+{
+	int hdmi_id;
+
+	if (sde_cea_db_tag(db) != VENDOR_SPECIFIC_DATA_BLOCK)
+		return false;
+
+	if (sde_cea_db_payload_len(db) < 7)
+		return false;
+
+	hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
+
+	return hdmi_id == HDMI_IEEE_OUI_HF;
+}
+
+static u8 *sde_edid_find_extended_tag_block(struct edid *edid, int blk_id)
+{
+	u8 *db = NULL;
+	u8 *cea = NULL;
+
+	if (!edid) {
+		pr_err("%s: invalid input\n", __func__);
+		return NULL;
+	}
+
+	cea = sde_find_cea_extension(edid);
+
+	if (cea && sde_cea_revision(cea) >= 3) {
+		int i, start, end;
+
+		if (sde_cea_db_offsets(cea, &start, &end))
+			return NULL;
+
+		sde_for_each_cea_db(cea, i, start, end) {
+			db = &cea[i];
+			if ((sde_cea_db_tag(db) == SDE_EXTENDED_TAG) &&
+				(db[1] == blk_id))
+				return db;
+		}
+	}
+	return NULL;
+}
+
+static u8 *
+sde_edid_find_block(struct edid *edid, int blk_id)
+{
+	u8 *db = NULL;
+	u8 *cea = NULL;
+
+	if (!edid) {
+		pr_err("%s: invalid input\n", __func__);
+		return NULL;
+	}
+
+	cea = sde_find_cea_extension(edid);
+
+	if (cea && sde_cea_revision(cea) >= 3) {
+		int i, start, end;
+
+		if (sde_cea_db_offsets(cea, &start, &end))
+			return 0;
+
+		sde_for_each_cea_db(cea, i, start, end) {
+			db = &cea[i];
+			if (sde_cea_db_tag(db) == blk_id)
+				return db;
+		}
+	}
+	return NULL;
+}
+
+
+static const u8 *_sde_edid_find_block(const u8 *in_buf, u32 start_offset,
+	u8 type, u8 *len)
+{
+	/* the start of data block collection, start of Video Data Block */
+	u32 offset = start_offset;
+	u32 dbc_offset = in_buf[2];
+
+	SDE_EDID_DEBUG("%s +", __func__);
+	/*
+	 * * edid buffer 1, byte 2 being 4 means no non-DTD/Data block
+	 *   collection present.
+	 * * edid buffer 1, byte 2 being 0 means no non-DTD/DATA block
+	 *   collection present and no DTD data present.
+	 */
+	if ((dbc_offset == 0) || (dbc_offset == 4)) {
+		SDE_ERROR("EDID: no DTD or non-DTD data present\n");
+		return NULL;
+	}
+
+	while (offset < dbc_offset) {
+		u8 block_len = in_buf[offset] & 0x1F;
+
+		if ((offset + block_len <= dbc_offset) &&
+		    (in_buf[offset] >> 5) == type) {
+			*len = block_len + 1;
+			SDE_EDID_DEBUG("block=%d found @ 0x%x w/ len=%d\n",
+				type, offset, block_len);
+
+			return in_buf + offset;
+		}
+		offset += 1 + block_len;
+	}
+
+	return NULL;
+}
+
+static void sde_edid_extract_vendor_id(struct sde_edid_ctrl *edid_ctrl)
+{
+	char *vendor_id;
+	u32 id_codes;
+
+	SDE_EDID_DEBUG("%s +", __func__);
+	if (!edid_ctrl) {
+		SDE_ERROR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	vendor_id = edid_ctrl->vendor_id;
+	id_codes = ((u32)edid_ctrl->edid->mfg_id[0] << 8) +
+		edid_ctrl->edid->mfg_id[1];
+
+	vendor_id[0] = 'A' - 1 + ((id_codes >> 10) & 0x1F);
+	vendor_id[1] = 'A' - 1 + ((id_codes >> 5) & 0x1F);
+	vendor_id[2] = 'A' - 1 + (id_codes & 0x1F);
+	vendor_id[3] = 0;
+	SDE_EDID_DEBUG("vendor id is %s ", vendor_id);
+	SDE_EDID_DEBUG("%s -", __func__);
+}
+
+static void sde_edid_set_y420_support(struct drm_connector *connector,
+u32 video_format)
+{
+	u8 cea_mode = 0;
+	struct drm_display_mode *mode;
+	u32 mode_fmt_flags = 0;
+
+	/* Need to add Y420 support flag to the modes */
+	list_for_each_entry(mode, &connector->probed_modes, head) {
+		/* Cache the format flags before clearing */
+		mode_fmt_flags = mode->flags;
+		/* Clear the RGB/YUV format flags before calling upstream API */
+		mode->flags &= ~SDE_DRM_MODE_FLAG_FMT_MASK;
+		cea_mode = drm_match_cea_mode(mode);
+		/* Restore the format flags */
+		mode->flags = mode_fmt_flags;
+		if ((cea_mode != 0) && (cea_mode == video_format)) {
+			SDE_EDID_DEBUG("found match for %d\n", video_format);
+			mode->flags |= DRM_MODE_FLAG_SUPPORTS_YUV;
+		}
+	}
+}
+
+static void sde_edid_parse_Y420CMDB(
+struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl,
+const u8 *db)
+{
+	u8 cmdb_len = 0;
+	u8 svd_len = 0;
+	const u8 *svd = NULL;
+	u32 i = 0;
+	u32 video_format = 0;
+	u32 num_cmdb_svd = 0;
+	const u32 mult = 8;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: edid_ctrl is NULL\n", __func__);
+		return;
+	}
+
+	if (!db) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+	SDE_EDID_DEBUG("%s +\n", __func__);
+	cmdb_len = db[0] & 0x1f;
+
+	if (cmdb_len < 1)
+		return;
+
+	svd = sde_edid_find_block(edid_ctrl->edid, VIDEO_DATA_BLOCK);
+	if (svd) {
+		/*moving to the next byte as vic info begins there*/
+		svd_len = svd[0] & 0x1f;
+		++svd;
+	}
+
+	if (cmdb_len == 1)
+		num_cmdb_svd = svd_len;
+	else {
+		num_cmdb_svd = (cmdb_len - 1) * mult;
+		if (num_cmdb_svd > svd_len)
+			num_cmdb_svd = svd_len;
+	}
+
+	for (i = 0; i < num_cmdb_svd; i++) {
+		video_format = *(svd + i) & 0x7F;
+		/*
+		 * If cmdb_len is 1, it means all SVDs support YUV
+		 * Else, we check each byte of the cmdb bitmap bitwise
+		 * and match those bits with the formats populated
+		 * during the parsing of the Video Data Blocks.
+		 * Refer to CTA 861-F section 7.5.11 YCBCR 4:2:0 Capability
+		 * Map Data Block for more details on this.
+		 */
+		if (cmdb_len == 1 || (db[2 + i / mult] & (1 << (i % mult))))
+			sde_edid_set_y420_support(connector, video_format);
+	}
+
+	SDE_EDID_DEBUG("%s -\n", __func__);
+
+}
+
+static void sde_edid_parse_Y420VDB(
+struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl,
+const u8 *db)
+{
+	u8 len = db[0] & 0x1f;
+	u32 i = 0;
+	u32 video_format = 0;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	SDE_EDID_DEBUG("%s +\n", __func__);
+
+	/* Offset to byte 3 */
+	db += 2;
+	for (i = 0; i < len - 1; i++) {
+		video_format = *(db + i) & 0x7F;
+		/*
+		 * mode was already added in get_modes()
+		 * only need to set the Y420 support flag
+		 */
+		sde_edid_set_y420_support(connector, video_format);
+	}
+	SDE_EDID_DEBUG("%s -", __func__);
+}
+
+static void sde_edid_set_mode_format(
+struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl)
+{
+	const u8 *db = NULL;
+	struct drm_display_mode *mode;
+
+	SDE_EDID_DEBUG("%s +\n", __func__);
+	/* Set YUV mode support flags for YCbcr420VDB */
+	db = sde_edid_find_extended_tag_block(edid_ctrl->edid,
+			Y420_VIDEO_DATA_BLOCK);
+	if (db)
+		sde_edid_parse_Y420VDB(connector, edid_ctrl, db);
+	else
+		SDE_EDID_DEBUG("YCbCr420 VDB is not present\n");
+
+	/* Set RGB supported on all modes where YUV is not set */
+	list_for_each_entry(mode, &connector->probed_modes, head) {
+		if (!(mode->flags & DRM_MODE_FLAG_SUPPORTS_YUV))
+			mode->flags |= DRM_MODE_FLAG_SUPPORTS_RGB;
+	}
+
+
+	db = sde_edid_find_extended_tag_block(edid_ctrl->edid,
+			Y420_CAPABILITY_MAP_DATA_BLOCK);
+	if (db)
+		sde_edid_parse_Y420CMDB(connector, edid_ctrl, db);
+	else
+		SDE_EDID_DEBUG("YCbCr420 CMDB is not present\n");
+
+	/*
+	 * As per HDMI 2.0 spec, a sink supporting any modes
+	 * requiring more than 340Mhz clock rate should support
+	 * SCDC as well. This is required because we need the SCDC
+	 * channel to set the TMDS clock ratio. However in cases
+	 * where the TV publishes such a mode in its list of modes
+	 * but does not have SCDC support as per HDMI HFVSDB block
+	 * remove RGB mode support from the flags. Currently, in
+	 * the list of modes not having deep color support only RGB
+	 * modes shall requre a clock of 340Mhz and above such as the
+	 * 4K@60fps case. All other modes shall be YUV.
+	 * Deep color case is handled separately while choosing the
+	 * best mode in the _sde_hdmi_choose_best_format API where
+	 * we enable deep color only if it satisfies both source and
+	 * sink requirements. However, that API assumes that at least
+	 * RGB mode is supported on the mode. Hence, it would be better
+	 * to remove the format support flags while parsing the EDID
+	 * itself if it doesn't satisfy the HDMI spec requirement.
+	 */
+
+	list_for_each_entry(mode, &connector->probed_modes, head) {
+		if ((mode->clock > MIN_SCRAMBLER_REQ_RATE) &&
+			!connector->scdc_present) {
+			mode->flags &= ~DRM_MODE_FLAG_SUPPORTS_RGB;
+		}
+	}
+
+	SDE_EDID_DEBUG("%s -\n", __func__);
+}
+
+static void _sde_edid_update_dc_modes(
+struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl)
+{
+	int i, start, end;
+	u8 *edid_ext, *hdmi;
+	struct drm_display_info *disp_info;
+	u32 hdmi_dc_yuv_modes = 0;
+
+	SDE_EDID_DEBUG("%s +\n", __func__);
+
+	if (!connector || !edid_ctrl) {
+		SDE_ERROR("invalid input\n");
+		return;
+	}
+
+	disp_info = &connector->display_info;
+
+	edid_ext = sde_find_cea_extension(edid_ctrl->edid);
+
+	if (!edid_ext) {
+		SDE_ERROR("no cea extension\n");
+		return;
+	}
+
+	if (sde_cea_db_offsets(edid_ext, &start, &end))
+		return;
+
+	sde_for_each_cea_db(edid_ext, i, start, end) {
+		if (sde_cea_db_is_hdmi_hf_vsdb(&edid_ext[i])) {
+
+			hdmi = &edid_ext[i];
+
+			if (sde_cea_db_payload_len(hdmi) < 7)
+				continue;
+
+			if (hdmi[7] & DRM_EDID_YCBCR420_DC_30) {
+				hdmi_dc_yuv_modes |= DRM_EDID_YCBCR420_DC_30;
+				SDE_EDID_DEBUG("Y420 30-bit supported\n");
+			}
+
+			if (hdmi[7] & DRM_EDID_YCBCR420_DC_36) {
+				hdmi_dc_yuv_modes |= DRM_EDID_YCBCR420_DC_36;
+				SDE_EDID_DEBUG("Y420 36-bit supported\n");
+			}
+
+			if (hdmi[7] & DRM_EDID_YCBCR420_DC_48) {
+				hdmi_dc_yuv_modes |= DRM_EDID_YCBCR420_DC_48;
+				SDE_EDID_DEBUG("Y420 48-bit supported\n");
+			}
+		}
+	}
+
+	disp_info->edid_hdmi_dc_modes |= hdmi_dc_yuv_modes;
+
+	SDE_EDID_DEBUG("%s -\n", __func__);
+}
+
+static void _sde_edid_extract_audio_data_blocks(
+	struct sde_edid_ctrl *edid_ctrl)
+{
+	u8 len = 0;
+	u8 adb_max = 0;
+	const u8 *adb = NULL;
+	u32 offset = DBC_START_OFFSET;
+	u8 *cea = NULL;
+
+	if (!edid_ctrl) {
+		SDE_ERROR("invalid edid_ctrl\n");
+		return;
+	}
+	SDE_EDID_DEBUG("%s +", __func__);
+	cea = sde_find_cea_extension(edid_ctrl->edid);
+	if (!cea) {
+		SDE_DEBUG("CEA extension not found\n");
+		return;
+	}
+
+	edid_ctrl->adb_size = 0;
+
+	memset(edid_ctrl->audio_data_block, 0,
+		sizeof(edid_ctrl->audio_data_block));
+
+	do {
+		len = 0;
+		adb = _sde_edid_find_block(cea, offset, AUDIO_DATA_BLOCK,
+			&len);
+
+		if ((adb == NULL) || (len > MAX_AUDIO_DATA_BLOCK_SIZE ||
+			adb_max >= MAX_NUMBER_ADB)) {
+			if (!edid_ctrl->adb_size) {
+				SDE_DEBUG("No/Invalid Audio Data Block\n");
+				return;
+			}
+
+			continue;
+		}
+
+		memcpy(edid_ctrl->audio_data_block + edid_ctrl->adb_size,
+			adb, len);
+		offset = (adb - cea) + len;
+
+		edid_ctrl->adb_size += len;
+		adb_max++;
+	} while (adb);
+	SDE_EDID_DEBUG("%s -", __func__);
+}
+
+static void _sde_edid_extract_speaker_allocation_data(
+	struct sde_edid_ctrl *edid_ctrl)
+{
+	u8 len;
+	const u8 *sadb = NULL;
+	u8 *cea = NULL;
+
+	if (!edid_ctrl) {
+		SDE_ERROR("invalid edid_ctrl\n");
+		return;
+	}
+	SDE_EDID_DEBUG("%s +", __func__);
+	cea = sde_find_cea_extension(edid_ctrl->edid);
+	if (!cea) {
+		SDE_DEBUG("CEA extension not found\n");
+		return;
+	}
+
+	sadb = _sde_edid_find_block(cea, DBC_START_OFFSET,
+		SPEAKER_ALLOCATION_DATA_BLOCK, &len);
+	if ((sadb == NULL) || (len != MAX_SPKR_ALLOC_DATA_BLOCK_SIZE)) {
+		SDE_DEBUG("No/Invalid Speaker Allocation Data Block\n");
+		return;
+	}
+
+	memcpy(edid_ctrl->spkr_alloc_data_block, sadb, len);
+	edid_ctrl->sadb_size = len;
+
+	SDE_EDID_DEBUG("speaker alloc data SP byte = %08x %s%s%s%s%s%s%s\n",
+		sadb[1],
+		(sadb[1] & BIT(0)) ? "FL/FR," : "",
+		(sadb[1] & BIT(1)) ? "LFE," : "",
+		(sadb[1] & BIT(2)) ? "FC," : "",
+		(sadb[1] & BIT(3)) ? "RL/RR," : "",
+		(sadb[1] & BIT(4)) ? "RC," : "",
+		(sadb[1] & BIT(5)) ? "FLC/FRC," : "",
+		(sadb[1] & BIT(6)) ? "RLC/RRC," : "");
+	SDE_EDID_DEBUG("%s -", __func__);
+}
+
+struct sde_edid_ctrl *sde_edid_init(void)
+{
+	struct sde_edid_ctrl *edid_ctrl = NULL;
+
+	SDE_EDID_DEBUG("%s +\n", __func__);
+	edid_ctrl = kzalloc(sizeof(*edid_ctrl), GFP_KERNEL);
+	if (!edid_ctrl) {
+		SDE_ERROR("edid_ctrl alloc failed\n");
+		return NULL;
+	}
+	memset((edid_ctrl), 0, sizeof(*edid_ctrl));
+	SDE_EDID_DEBUG("%s -\n", __func__);
+	return edid_ctrl;
+}
+
+void sde_free_edid(void **input)
+{
+	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(*input);
+
+	SDE_EDID_DEBUG("%s +", __func__);
+	kfree(edid_ctrl->edid);
+	edid_ctrl->edid = NULL;
+}
+
+void sde_edid_deinit(void **input)
+{
+	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(*input);
+
+	SDE_EDID_DEBUG("%s +", __func__);
+	sde_free_edid((void *)&edid_ctrl);
+	kfree(edid_ctrl);
+	SDE_EDID_DEBUG("%s -", __func__);
+}
+
+int _sde_edid_update_modes(struct drm_connector *connector,
+	void *input)
+{
+	int rc = 0;
+	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input);
+	struct drm_display_info *disp_info;
+
+	disp_info = &connector->display_info;
+
+	if (disp_info)
+		disp_info->edid_hdmi_dc_modes = 0;
+
+	SDE_EDID_DEBUG("%s +", __func__);
+	if (edid_ctrl->edid) {
+		drm_mode_connector_update_edid_property(connector,
+			edid_ctrl->edid);
+
+		rc = drm_add_edid_modes(connector, edid_ctrl->edid);
+		sde_edid_set_mode_format(connector, edid_ctrl);
+		_sde_edid_update_dc_modes(connector, edid_ctrl);
+		SDE_EDID_DEBUG("%s -", __func__);
+		return rc;
+	}
+
+	drm_mode_connector_update_edid_property(connector, NULL);
+	SDE_EDID_DEBUG("%s null edid -", __func__);
+	return rc;
+}
+
+bool sde_detect_hdmi_monitor(void *input)
+{
+	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input);
+
+	return drm_detect_hdmi_monitor(edid_ctrl->edid);
+}
+
+void sde_get_edid(struct drm_connector *connector,
+				  struct i2c_adapter *adapter, void **input)
+{
+	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(*input);
+
+	edid_ctrl->edid = drm_get_edid(connector, adapter);
+	SDE_EDID_DEBUG("%s +\n", __func__);
+
+	if (!edid_ctrl->edid)
+		SDE_ERROR("EDID read failed\n");
+
+	if (edid_ctrl->edid) {
+		sde_edid_extract_vendor_id(edid_ctrl);
+		_sde_edid_extract_audio_data_blocks(edid_ctrl);
+		_sde_edid_extract_speaker_allocation_data(edid_ctrl);
+	}
+	SDE_EDID_DEBUG("%s -\n", __func__);
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde_edid_parser.h	2019-01-22 16:16:23.523246588 +0100
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SDE_EDID_PARSER_H_
+#define _SDE_EDID_PARSER_H_
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/of_device.h>
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+
+
+#define MAX_NUMBER_ADB 5
+#define MAX_AUDIO_DATA_BLOCK_SIZE 31
+#define MAX_SPKR_ALLOC_DATA_BLOCK_SIZE 4
+#define EDID_VENDOR_ID_SIZE     4
+
+#define SDE_CEA_EXT    0x02
+#define SDE_EXTENDED_TAG 0x07
+
+#define MIN_SCRAMBLER_REQ_RATE 340000
+
+#define SDE_DRM_MODE_FLAG_FMT_MASK (0x3 << 20)
+
+enum extended_data_block_types {
+	VIDEO_CAPABILITY_DATA_BLOCK = 0x0,
+	VENDOR_SPECIFIC_VIDEO_DATA_BLOCK = 0x01,
+	HDMI_VIDEO_DATA_BLOCK = 0x04,
+	HDR_STATIC_METADATA_DATA_BLOCK = 0x06,
+	Y420_VIDEO_DATA_BLOCK = 0x0E,
+	VIDEO_FORMAT_PREFERENCE_DATA_BLOCK = 0x0D,
+	Y420_CAPABILITY_MAP_DATA_BLOCK = 0x0F,
+	VENDOR_SPECIFIC_AUDIO_DATA_BLOCK = 0x11,
+	INFOFRAME_DATA_BLOCK = 0x20,
+};
+
+#ifdef SDE_EDID_DEBUG_ENABLE
+#define SDE_EDID_DEBUG(fmt, args...)   SDE_ERROR(fmt, ##args)
+#else
+#define SDE_EDID_DEBUG(fmt, args...)   SDE_DEBUG(fmt, ##args)
+#endif
+
+/*
+ * struct hdmi_edid_hdr_data - HDR Static Metadata
+ * @eotf: Electro-Optical Transfer Function
+ * @metadata_type_one: Static Metadata Type 1 support
+ * @max_luminance: Desired Content Maximum Luminance
+ * @avg_luminance: Desired Content Frame-average Luminance
+ * @min_luminance: Desired Content Minimum Luminance
+ */
+struct sde_edid_hdr_data {
+	u32 eotf;
+	bool metadata_type_one;
+	u32 max_luminance;
+	u32 avg_luminance;
+	u32 min_luminance;
+};
+
+struct sde_edid_sink_caps {
+	u32 max_pclk_in_hz;
+	bool scdc_present;
+	bool scramble_support; /* scramble support for less than 340Mcsc */
+	bool read_req_support;
+	bool osd_disparity;
+	bool dual_view_support;
+	bool ind_view_support;
+};
+
+struct sde_edid_ctrl {
+	struct edid *edid;
+	u8 pt_scan_info;
+	u8 it_scan_info;
+	u8 ce_scan_info;
+	u8 audio_data_block[MAX_NUMBER_ADB * MAX_AUDIO_DATA_BLOCK_SIZE];
+	int adb_size;
+	u8 spkr_alloc_data_block[MAX_SPKR_ALLOC_DATA_BLOCK_SIZE];
+	int sadb_size;
+	bool hdr_supported;
+	char vendor_id[EDID_VENDOR_ID_SIZE];
+	struct sde_edid_sink_caps sink_caps;
+	struct sde_edid_hdr_data hdr_data;
+};
+
+/**
+ * sde_edid_init() - init edid structure.
+ * @edid_ctrl:     Handle to the edid_ctrl structure.
+ * Return: handle to sde_edid_ctrl for the client.
+ */
+struct sde_edid_ctrl *sde_edid_init(void);
+
+/**
+ * sde_edid_deinit() - deinit edid structure.
+ * @edid_ctrl:     Handle to the edid_ctrl structure.
+ *
+ * Return: void.
+ */
+void sde_edid_deinit(void **edid_ctrl);
+
+/**
+ * sde_get_edid() - get edid info.
+ * @connector:   Handle to the drm_connector.
+ * @adapter:     handle to i2c adapter for DDC read
+ * @edid_ctrl:   Handle to the edid_ctrl structure.
+ *
+ * Return: void.
+ */
+void sde_get_edid(struct drm_connector *connector,
+struct i2c_adapter *adapter,
+void **edid_ctrl);
+
+/**
+ * sde_free_edid() - free edid structure.
+ * @edid_ctrl:     Handle to the edid_ctrl structure.
+ *
+ * Return: void.
+ */
+void sde_free_edid(void **edid_ctrl);
+
+/**
+ * sde_detect_hdmi_monitor() - detect HDMI mode.
+ * @edid_ctrl:     Handle to the edid_ctrl structure.
+ *
+ * Return: error code.
+ */
+bool sde_detect_hdmi_monitor(void *edid_ctrl);
+
+/**
+ * _sde_edid_update_modes() - populate EDID modes.
+ * @edid_ctrl:     Handle to the edid_ctrl structure.
+ *
+ * Return: error code.
+ */
+int _sde_edid_update_modes(struct drm_connector *connector,
+							void *edid_ctrl);
+
+#endif /* _SDE_EDID_PARSER_H_ */
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde_hdcp_1x.c	2019-05-06 19:11:55.984227148 +0200
@@ -0,0 +1,1910 @@
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/iopoll.h>
+#include <linux/hdcp_qseecom.h>
+#include "sde_hdcp.h"
+#include "sde_hdmi_util.h"
+#include "video/msm_hdmi_hdcp_mgr.h"
+
+#define SDE_HDCP_STATE_NAME (sde_hdcp_state_name(hdcp->hdcp_state))
+
+/* HDCP Keys state based on HDMI_HDCP_LINK0_STATUS:KEYS_STATE */
+#define HDCP_KEYS_STATE_NO_KEYS		0
+#define HDCP_KEYS_STATE_NOT_CHECKED	1
+#define HDCP_KEYS_STATE_CHECKING	2
+#define HDCP_KEYS_STATE_VALID		3
+#define HDCP_KEYS_STATE_AKSV_NOT_VALID	4
+#define HDCP_KEYS_STATE_CHKSUM_MISMATCH	5
+#define HDCP_KEYS_STATE_PROD_AKSV	6
+#define HDCP_KEYS_STATE_RESERVED	7
+
+#define TZ_HDCP_CMD_ID 0x00004401
+
+#define HDCP_INT_CLR (isr->auth_success_ack | isr->auth_fail_ack | \
+			isr->auth_fail_info_ack | isr->tx_req_ack | \
+			isr->encryption_ready_ack | \
+			isr->encryption_not_ready_ack | isr->tx_req_done_ack)
+
+#define HDCP_INT_EN (isr->auth_success_mask | isr->auth_fail_mask | \
+			isr->encryption_ready_mask | \
+			isr->encryption_not_ready_mask)
+
+#define HDCP_POLL_SLEEP_US   (20 * 1000)
+#define HDCP_POLL_TIMEOUT_US (HDCP_POLL_SLEEP_US * 100)
+
+#define sde_hdcp_1x_state(x) (hdcp->hdcp_state == x)
+
+struct sde_hdcp_sink_addr {
+	char *name;
+	u32 addr;
+	u32 len;
+};
+
+struct sde_hdcp_1x_reg_data {
+	u32 reg_id;
+	struct sde_hdcp_sink_addr *sink;
+};
+
+struct sde_hdcp_skaddr_map {
+	/* addresses to read from sink */
+	struct sde_hdcp_sink_addr bcaps;
+	struct sde_hdcp_sink_addr bksv;
+	struct sde_hdcp_sink_addr r0;
+	struct sde_hdcp_sink_addr bstatus;
+	struct sde_hdcp_sink_addr cp_irq_status;
+	struct sde_hdcp_sink_addr ksv_fifo;
+	struct sde_hdcp_sink_addr v_h0;
+	struct sde_hdcp_sink_addr v_h1;
+	struct sde_hdcp_sink_addr v_h2;
+	struct sde_hdcp_sink_addr v_h3;
+	struct sde_hdcp_sink_addr v_h4;
+
+	/* addresses to write to sink */
+	struct sde_hdcp_sink_addr an;
+	struct sde_hdcp_sink_addr aksv;
+	struct sde_hdcp_sink_addr ainfo;
+};
+
+struct sde_hdcp_int_set {
+	/* interrupt register */
+	u32 int_reg;
+
+	/* interrupt enable/disable masks */
+	u32 auth_success_mask;
+	u32 auth_fail_mask;
+	u32 encryption_ready_mask;
+	u32 encryption_not_ready_mask;
+	u32 tx_req_mask;
+	u32 tx_req_done_mask;
+
+	/* interrupt acknowledgment */
+	u32 auth_success_ack;
+	u32 auth_fail_ack;
+	u32 auth_fail_info_ack;
+	u32 encryption_ready_ack;
+	u32 encryption_not_ready_ack;
+	u32 tx_req_ack;
+	u32 tx_req_done_ack;
+
+	/* interrupt status */
+	u32 auth_success_int;
+	u32 auth_fail_int;
+	u32 encryption_ready;
+	u32 encryption_not_ready;
+	u32 tx_req_int;
+	u32 tx_req_done_int;
+};
+
+struct sde_hdcp_reg_set {
+	u32 status;
+	u32 keys_offset;
+	u32 r0_offset;
+	u32 v_offset;
+	u32 ctrl;
+	u32 aksv_lsb;
+	u32 aksv_msb;
+	u32 entropy_ctrl0;
+	u32 entropy_ctrl1;
+	u32 sec_sha_ctrl;
+	u32 sec_sha_data;
+	u32 sha_status;
+
+	u32 data2_0;
+	u32 data3;
+	u32 data4;
+	u32 data5;
+	u32 data6;
+
+	u32 sec_data0;
+	u32 sec_data1;
+	u32 sec_data7;
+	u32 sec_data8;
+	u32 sec_data9;
+	u32 sec_data10;
+	u32 sec_data11;
+	u32 sec_data12;
+
+	u32 reset;
+	u32 reset_bit;
+
+	u32 repeater;
+};
+
+#define HDCP_REG_SET_CLIENT_HDMI \
+	{HDMI_HDCP_LINK0_STATUS, 28, 24, 20, HDMI_HDCP_CTRL, \
+	 HDMI_HDCP_SW_LOWER_AKSV, HDMI_HDCP_SW_UPPER_AKSV, \
+	 HDMI_HDCP_ENTROPY_CTRL0, HDMI_HDCP_ENTROPY_CTRL1, \
+	 HDCP_SEC_TZ_HV_HLOS_HDCP_SHA_CTRL, \
+	 HDCP_SEC_TZ_HV_HLOS_HDCP_SHA_DATA, \
+	 HDMI_HDCP_SHA_STATUS, HDMI_HDCP_RCVPORT_DATA2_0, \
+	 HDMI_HDCP_RCVPORT_DATA3, HDMI_HDCP_RCVPORT_DATA4, \
+	 HDMI_HDCP_RCVPORT_DATA5, HDMI_HDCP_RCVPORT_DATA6, \
+	 HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA0, \
+	 HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA1, \
+	 HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA7, \
+	 HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA8, \
+	 HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA9, \
+	 HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA10, \
+	 HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA11, \
+	 HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA12, \
+	 HDMI_HDCP_RESET, BIT(0), BIT(6)}
+
+/* To do for DP */
+#define HDCP_REG_SET_CLIENT_DP \
+	{0}
+
+#define HDCP_HDMI_SINK_ADDR_MAP \
+	{{"bcaps", 0x40, 1}, {"bksv", 0x00, 5}, {"r0'", 0x08, 2}, \
+	 {"bstatus", 0x41, 2}, {"??", 0x0, 0}, {"ksv-fifo", 0x43, 0}, \
+	 {"v_h0", 0x20, 4}, {"v_h1", 0x24, 4}, {"v_h2", 0x28, 4}, \
+	 {"v_h3", 0x2c, 4}, {"v_h4", 0x30, 4}, {"an", 0x18, 8}, \
+	 {"aksv", 0x10, 5}, {"ainfo", 0x00, 0},}
+
+#define HDCP_DP_SINK_ADDR_MAP \
+	{{"bcaps", 0x68028, 1}, {"bksv", 0x68000, 5}, {"r0'", 0x68005, 2}, \
+	 {"binfo", 0x6802A, 2}, {"cp_irq_status", 0x68029, 1}, \
+	 {"ksv-fifo", 0x6802C, 0}, {"v_h0", 0x68014, 4}, {"v_h1", 0x68018, 4}, \
+	 {"v_h2", 0x6801C, 4}, {"v_h3", 0x68020, 4}, {"v_h4", 0x68024, 4}, \
+	 {"an", 0x6800C, 8}, {"aksv", 0x68007, 5}, {"ainfo", 0x6803B, 1} }
+
+#define HDCP_HDMI_INT_SET \
+	{HDMI_HDCP_INT_CTRL, \
+	 BIT(2), BIT(6), 0, 0, 0, 0, \
+	 BIT(1), BIT(5), BIT(7), 0, 0, 0, 0, \
+	 BIT(0), BIT(4), 0, 0, 0, 0}
+
+#define HDCP_DP_INT_SET \
+	{DP_INTR_STATUS2, \
+	 BIT(17), BIT(20), BIT(24), BIT(27), 0, 0, \
+	 BIT(16), BIT(19), BIT(21), BIT(23), BIT(26), 0, 0, \
+	 BIT(15), BIT(18), BIT(22), BIT(25), 0, 0}
+
+struct sde_hdcp_1x {
+	u8 bcaps;
+	u32 tp_msgid;
+	u32 an_0, an_1, aksv_0, aksv_1;
+	bool sink_r0_ready;
+	bool reauth;
+	bool ksv_ready;
+	enum sde_hdcp_states hdcp_state;
+	struct HDCP_V2V1_MSG_TOPOLOGY cached_tp;
+	struct HDCP_V2V1_MSG_TOPOLOGY current_tp;
+	struct delayed_work hdcp_auth_work;
+	struct completion r0_checked;
+	struct completion sink_r0_available;
+	struct sde_hdcp_init_data init_data;
+	struct sde_hdcp_ops *ops;
+	struct sde_hdcp_reg_set reg_set;
+	struct sde_hdcp_int_set int_set;
+	struct sde_hdcp_skaddr_map sink_addr;
+	struct workqueue_struct *workq;
+};
+
+const char *sde_hdcp_state_name(enum sde_hdcp_states hdcp_state)
+{
+	switch (hdcp_state) {
+	case HDCP_STATE_INACTIVE:	return "HDCP_STATE_INACTIVE";
+	case HDCP_STATE_AUTHENTICATING:	return "HDCP_STATE_AUTHENTICATING";
+	case HDCP_STATE_AUTHENTICATED:	return "HDCP_STATE_AUTHENTICATED";
+	case HDCP_STATE_AUTH_FAIL:	return "HDCP_STATE_AUTH_FAIL";
+	default:			return "???";
+	}
+}
+
+static int sde_hdcp_1x_count_one(u8 *array, u8 len)
+{
+	int i, j, count = 0;
+
+	for (i = 0; i < len; i++)
+		for (j = 0; j < 8; j++)
+			count += (((array[i] >> j) & 0x1) ? 1 : 0);
+	return count;
+}
+
+static void reset_hdcp_ddc_failures(struct sde_hdcp_1x *hdcp)
+{
+	int hdcp_ddc_ctrl1_reg;
+	int hdcp_ddc_status;
+	int failure;
+	int nack0;
+	struct dss_io_data *io;
+
+	if (!hdcp || !hdcp->init_data.core_io) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	io = hdcp->init_data.core_io;
+
+	/* Check for any DDC transfer failures */
+	hdcp_ddc_status = DSS_REG_R(io, HDMI_HDCP_DDC_STATUS);
+	failure = (hdcp_ddc_status >> 16) & BIT(0);
+	nack0 = (hdcp_ddc_status >> 14) & BIT(0);
+	SDE_HDCP_DEBUG("%s: HDCP_DDC_STATUS=0x%x, FAIL=%d, NACK0=%d\n",
+		SDE_HDCP_STATE_NAME, hdcp_ddc_status, failure, nack0);
+
+	if (failure) {
+		/*
+		 * Indicates that the last HDCP HW DDC transfer failed.
+		 * This occurs when a transfer is attempted with HDCP DDC
+		 * disabled (HDCP_DDC_DISABLE=1) or the number of retries
+		 * matches HDCP_DDC_RETRY_CNT.
+		 * Failure occurred,  let's clear it.
+		 */
+		SDE_HDCP_DEBUG("%s: DDC failure HDCP_DDC_STATUS=0x%08x\n",
+			 SDE_HDCP_STATE_NAME, hdcp_ddc_status);
+
+		/* First, Disable DDC */
+		DSS_REG_W(io, HDMI_HDCP_DDC_CTRL_0, BIT(0));
+
+		/* ACK the Failure to Clear it */
+		hdcp_ddc_ctrl1_reg = DSS_REG_R(io, HDMI_HDCP_DDC_CTRL_1);
+		DSS_REG_W(io, HDMI_HDCP_DDC_CTRL_1,
+			hdcp_ddc_ctrl1_reg | BIT(0));
+
+		/* Check if the FAILURE got Cleared */
+		hdcp_ddc_status = DSS_REG_R(io, HDMI_HDCP_DDC_STATUS);
+		hdcp_ddc_status = (hdcp_ddc_status >> 16) & BIT(0);
+		if (hdcp_ddc_status == 0x0)
+			SDE_HDCP_DEBUG("%s: HDCP DDC Failure cleared\n",
+				SDE_HDCP_STATE_NAME);
+		else
+			SDE_ERROR("%s: Unable to clear HDCP DDC Failure",
+				SDE_HDCP_STATE_NAME);
+
+		/* Re-Enable HDCP DDC */
+		DSS_REG_W(io, HDMI_HDCP_DDC_CTRL_0, 0);
+	}
+
+	if (nack0) {
+		SDE_HDCP_DEBUG("%s: Before: HDMI_DDC_SW_STATUS=0x%08x\n",
+			SDE_HDCP_STATE_NAME, DSS_REG_R(io, HDMI_DDC_SW_STATUS));
+		/* Reset HDMI DDC software status */
+		DSS_REG_W_ND(io, HDMI_DDC_CTRL,
+			DSS_REG_R(io, HDMI_DDC_CTRL) | BIT(3));
+		msleep(20);
+		DSS_REG_W_ND(io, HDMI_DDC_CTRL,
+			DSS_REG_R(io, HDMI_DDC_CTRL) & ~(BIT(3)));
+
+		/* Reset HDMI DDC Controller */
+		DSS_REG_W_ND(io, HDMI_DDC_CTRL,
+			DSS_REG_R(io, HDMI_DDC_CTRL) | BIT(1));
+		msleep(20);
+		DSS_REG_W_ND(io, HDMI_DDC_CTRL,
+			DSS_REG_R(io, HDMI_DDC_CTRL) & ~BIT(1));
+		SDE_HDCP_DEBUG("%s: After: HDMI_DDC_SW_STATUS=0x%08x\n",
+			SDE_HDCP_STATE_NAME, DSS_REG_R(io, HDMI_DDC_SW_STATUS));
+	}
+
+	hdcp_ddc_status = DSS_REG_R(io, HDMI_HDCP_DDC_STATUS);
+
+	failure = (hdcp_ddc_status >> 16) & BIT(0);
+	nack0 = (hdcp_ddc_status >> 14) & BIT(0);
+	SDE_HDCP_DEBUG("%s: On Exit: HDCP_DDC_STATUS=0x%x, FAIL=%d, NACK0=%d\n",
+		SDE_HDCP_STATE_NAME, hdcp_ddc_status, failure, nack0);
+} /* reset_hdcp_ddc_failures */
+
+static void sde_hdcp_1x_hw_ddc_clean(struct sde_hdcp_1x *hdcp)
+{
+	struct dss_io_data *io = NULL;
+	u32 hdcp_ddc_status, ddc_hw_status;
+	u32 ddc_xfer_done, ddc_xfer_req;
+	u32 ddc_hw_req, ddc_hw_not_idle;
+	bool ddc_hw_not_ready, xfer_not_done, hw_not_done;
+	u32 timeout_count;
+
+	if (!hdcp || !hdcp->init_data.core_io) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	io = hdcp->init_data.core_io;
+	if (!io->base) {
+		pr_err("core io not inititalized\n");
+		return;
+	}
+
+	/* Wait to be clean on DDC HW engine */
+	timeout_count = 100;
+	do {
+		hdcp_ddc_status = DSS_REG_R(io, HDMI_HDCP_DDC_STATUS);
+		ddc_xfer_req    = hdcp_ddc_status & BIT(4);
+		ddc_xfer_done   = hdcp_ddc_status & BIT(10);
+
+		ddc_hw_status   = DSS_REG_R(io, HDMI_DDC_HW_STATUS);
+		ddc_hw_req      = ddc_hw_status & BIT(16);
+		ddc_hw_not_idle = ddc_hw_status & (BIT(0) | BIT(1));
+
+		/* ddc transfer was requested but not completed */
+		xfer_not_done = ddc_xfer_req && !ddc_xfer_done;
+
+		/* ddc status is not idle or a hw request pending */
+		hw_not_done = ddc_hw_not_idle || ddc_hw_req;
+
+		ddc_hw_not_ready = xfer_not_done || hw_not_done;
+
+		SDE_HDCP_DEBUG("%s: timeout count(%d): ddc hw%sready\n",
+			SDE_HDCP_STATE_NAME, timeout_count,
+				ddc_hw_not_ready ? " not " : " ");
+		SDE_HDCP_DEBUG("hdcp_ddc_status[0x%x], ddc_hw_status[0x%x]\n",
+				hdcp_ddc_status, ddc_hw_status);
+		if (ddc_hw_not_ready)
+			msleep(20);
+		} while (ddc_hw_not_ready && --timeout_count);
+} /* hdcp_1x_hw_ddc_clean */
+
+static int sde_hdcp_1x_load_keys(void *input)
+{
+	int rc = 0;
+	bool use_sw_keys = false;
+	u32 reg_val;
+	u32 ksv_lsb_addr, ksv_msb_addr;
+	u32 aksv_lsb, aksv_msb;
+	u8 aksv[5];
+	struct dss_io_data *io;
+	struct dss_io_data *qfprom_io;
+	struct sde_hdcp_1x *hdcp = input;
+	struct sde_hdcp_reg_set *reg_set;
+
+	if (!hdcp || !hdcp->init_data.core_io ||
+		!hdcp->init_data.qfprom_io) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (!sde_hdcp_1x_state(HDCP_STATE_INACTIVE) &&
+	    !sde_hdcp_1x_state(HDCP_STATE_AUTH_FAIL)) {
+		pr_err("%s: invalid state. returning\n",
+			SDE_HDCP_STATE_NAME);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	io = hdcp->init_data.core_io;
+	qfprom_io = hdcp->init_data.qfprom_io;
+	reg_set = &hdcp->reg_set;
+
+	/* On compatible hardware, use SW keys */
+	reg_val = DSS_REG_R(qfprom_io, SEC_CTRL_HW_VERSION);
+	if (reg_val >= HDCP_SEL_MIN_SEC_VERSION) {
+		reg_val = DSS_REG_R(qfprom_io,
+			QFPROM_RAW_FEAT_CONFIG_ROW0_MSB +
+			QFPROM_RAW_VERSION_4);
+
+		if (!(reg_val & BIT(23)))
+			use_sw_keys = true;
+	}
+
+	if (use_sw_keys) {
+		if (hdcp1_set_keys(&aksv_msb, &aksv_lsb)) {
+			pr_err("setting hdcp SW keys failed\n");
+			rc = -EINVAL;
+			goto end;
+		}
+	} else {
+		/* Fetch aksv from QFPROM, this info should be public. */
+		ksv_lsb_addr = HDCP_KSV_LSB;
+		ksv_msb_addr = HDCP_KSV_MSB;
+
+		if (hdcp->init_data.sec_access) {
+			ksv_lsb_addr += HDCP_KSV_VERSION_4_OFFSET;
+			ksv_msb_addr += HDCP_KSV_VERSION_4_OFFSET;
+		}
+
+		aksv_lsb = DSS_REG_R(qfprom_io, ksv_lsb_addr);
+		aksv_msb = DSS_REG_R(qfprom_io, ksv_msb_addr);
+	}
+
+	SDE_HDCP_DEBUG("%s: AKSV=%02x%08x\n", SDE_HDCP_STATE_NAME,
+		aksv_msb, aksv_lsb);
+
+	aksv[0] =  aksv_lsb        & 0xFF;
+	aksv[1] = (aksv_lsb >> 8)  & 0xFF;
+	aksv[2] = (aksv_lsb >> 16) & 0xFF;
+	aksv[3] = (aksv_lsb >> 24) & 0xFF;
+	aksv[4] =  aksv_msb        & 0xFF;
+
+	/* check there are 20 ones in AKSV */
+	if (sde_hdcp_1x_count_one(aksv, 5) != 20) {
+		pr_err("AKSV bit count failed\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	DSS_REG_W(io, reg_set->aksv_lsb, aksv_lsb);
+	DSS_REG_W(io, reg_set->aksv_msb, aksv_msb);
+
+	/* Setup seed values for random number An */
+	DSS_REG_W(io, reg_set->entropy_ctrl0, 0xB1FFB0FF);
+	DSS_REG_W(io, reg_set->entropy_ctrl1, 0xF00DFACE);
+
+	/* make sure hw is programmed */
+	wmb();
+
+	/* enable hdcp engine */
+	DSS_REG_W(io, reg_set->ctrl, 0x1);
+
+	hdcp->hdcp_state = HDCP_STATE_AUTHENTICATING;
+end:
+	return rc;
+}
+
+static int sde_hdcp_1x_read(struct sde_hdcp_1x *hdcp,
+			  struct sde_hdcp_sink_addr *sink,
+			  u8 *buf, bool realign)
+{
+	u32 rc = 0;
+	struct sde_hdmi_tx_ddc_data *ddc_data;
+	struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl;
+
+	if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI) {
+		reset_hdcp_ddc_failures(hdcp);
+
+		ddc_ctrl = hdcp->init_data.ddc_ctrl;
+		ddc_data = &ddc_ctrl->ddc_data;
+		if (!ddc_data) {
+			SDE_ERROR("invalid ddc data\n");
+			return -EINVAL;
+		}
+		memset(ddc_data, 0, sizeof(*ddc_data));
+		ddc_data->dev_addr = 0x74;
+		ddc_data->offset = sink->addr;
+		ddc_data->data_buf = buf;
+		ddc_data->data_len = sink->len;
+		ddc_data->request_len = sink->len;
+		ddc_data->retry = 5;
+		ddc_data->what = sink->name;
+		ddc_data->retry_align = realign;
+
+		rc = sde_hdmi_ddc_read((void *)hdcp->init_data.cb_data);
+		if (rc)
+			SDE_ERROR("%s: %s read failed\n",
+				SDE_HDCP_STATE_NAME, sink->name);
+	} else if (hdcp->init_data.client_id == HDCP_CLIENT_DP) {
+		/* To-do DP APIs go here */
+	}
+
+	return rc;
+}
+
+static int sde_hdcp_1x_write(struct sde_hdcp_1x *hdcp,
+			   struct sde_hdcp_sink_addr *sink, u8 *buf)
+{
+	int rc = 0;
+	struct sde_hdmi_tx_ddc_data *ddc_data;
+	struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl;
+
+	if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI) {
+		ddc_ctrl = hdcp->init_data.ddc_ctrl;
+		ddc_data = &ddc_ctrl->ddc_data;
+
+		if (!ddc_data) {
+			SDE_ERROR("invalid ddc data\n");
+			return -EINVAL;
+		}
+		memset(ddc_data, 0, sizeof(*ddc_data));
+
+		ddc_data->dev_addr = 0x74;
+		ddc_data->offset = sink->addr;
+		ddc_data->data_buf = buf;
+		ddc_data->data_len = sink->len;
+		ddc_data->what = sink->name;
+
+		rc = sde_hdmi_ddc_write((void *)hdcp->init_data.cb_data);
+		if (rc)
+			SDE_ERROR("%s: %s write failed\n",
+				   SDE_HDCP_STATE_NAME, sink->name);
+	} else if (hdcp->init_data.client_id == HDCP_CLIENT_DP) {
+		/* To-do DP APIs go here */
+	}
+
+	return rc;
+}
+
+static void sde_hdcp_1x_enable_interrupts(struct sde_hdcp_1x *hdcp)
+{
+	u32 intr_reg;
+	struct dss_io_data *io;
+	struct sde_hdcp_int_set *isr;
+
+	io = hdcp->init_data.core_io;
+	isr = &hdcp->int_set;
+
+	intr_reg = DSS_REG_R(io, isr->int_reg);
+
+	intr_reg |= HDCP_INT_CLR | HDCP_INT_EN;
+
+	DSS_REG_W(io, isr->int_reg, intr_reg);
+}
+
+static int sde_hdcp_1x_read_bcaps(struct sde_hdcp_1x *hdcp)
+{
+	int rc;
+	struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
+	struct dss_io_data *hdcp_io  = hdcp->init_data.hdcp_io;
+
+	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+		pr_err("invalid state\n");
+		return -EINVAL;
+	}
+
+	rc = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.bcaps,
+		&hdcp->bcaps, false);
+	if (IS_ERR_VALUE(rc)) {
+		pr_err("error reading bcaps\n");
+		goto error;
+	}
+
+	SDE_HDCP_DEBUG("bcaps read: 0x%x\n", hdcp->bcaps);
+
+	hdcp->current_tp.ds_type = hdcp->bcaps & reg_set->repeater ?
+			DS_REPEATER : DS_RECEIVER;
+
+	SDE_HDCP_DEBUG("ds: %s\n", hdcp->current_tp.ds_type == DS_REPEATER ?
+			"repeater" : "receiver");
+
+	/* Write BCAPS to the hardware */
+	DSS_REG_W(hdcp_io, reg_set->sec_data12, hdcp->bcaps);
+error:
+	return rc;
+}
+
+static int sde_hdcp_1x_wait_for_hw_ready(struct sde_hdcp_1x *hdcp)
+{
+	int rc;
+	u32 link0_status;
+	struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
+	struct dss_io_data *io = hdcp->init_data.core_io;
+
+	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+		pr_err("invalid state\n");
+		return -EINVAL;
+	}
+
+	/* Wait for HDCP keys to be checked and validated */
+	rc = readl_poll_timeout(io->base + reg_set->status, link0_status,
+				((link0_status >> reg_set->keys_offset) & 0x7)
+					== HDCP_KEYS_STATE_VALID ||
+				!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
+				HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US);
+	if (IS_ERR_VALUE(rc)) {
+		pr_err("key not ready\n");
+		goto error;
+	}
+
+	/*
+	 * 1.1_Features turned off by default.
+	 * No need to write AInfo since 1.1_Features is disabled.
+	 */
+	DSS_REG_W(io, reg_set->data4, 0);
+
+	/* Wait for An0 and An1 bit to be ready */
+	rc = readl_poll_timeout(io->base + reg_set->status, link0_status,
+				(link0_status & (BIT(8) | BIT(9))) ||
+				!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
+				HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US);
+	if (IS_ERR_VALUE(rc)) {
+		pr_err("An not ready\n");
+		goto error;
+	}
+
+	/* As per hardware recommendations, wait before reading An */
+	msleep(20);
+error:
+	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING))
+		rc = -EINVAL;
+
+	return rc;
+}
+
+static int sde_hdcp_1x_send_an_aksv_to_sink(struct sde_hdcp_1x *hdcp)
+{
+	int rc;
+	u8 an[8], aksv[5];
+
+	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+		pr_err("invalid state\n");
+		return -EINVAL;
+	}
+
+	an[0] =  hdcp->an_0        & 0xFF;
+	an[1] = (hdcp->an_0 >> 8)  & 0xFF;
+	an[2] = (hdcp->an_0 >> 16) & 0xFF;
+	an[3] = (hdcp->an_0 >> 24) & 0xFF;
+	an[4] =  hdcp->an_1        & 0xFF;
+	an[5] = (hdcp->an_1 >> 8)  & 0xFF;
+	an[6] = (hdcp->an_1 >> 16) & 0xFF;
+	an[7] = (hdcp->an_1 >> 24) & 0xFF;
+
+	SDE_HDCP_DEBUG("an read: 0x%2x%2x%2x%2x%2x%2x%2x%2x\n",
+		an[7], an[6], an[5], an[4], an[3], an[2], an[1], an[0]);
+
+	rc = sde_hdcp_1x_write(hdcp, &hdcp->sink_addr.an, an);
+	if (IS_ERR_VALUE(rc)) {
+		pr_err("error writing an to sink\n");
+		goto error;
+	}
+
+	/* Copy An and AKSV to byte arrays for transmission */
+	aksv[0] =  hdcp->aksv_0        & 0xFF;
+	aksv[1] = (hdcp->aksv_0 >> 8)  & 0xFF;
+	aksv[2] = (hdcp->aksv_0 >> 16) & 0xFF;
+	aksv[3] = (hdcp->aksv_0 >> 24) & 0xFF;
+	aksv[4] =  hdcp->aksv_1        & 0xFF;
+
+	SDE_HDCP_DEBUG("aksv read: 0x%2x%2x%2x%2x%2x\n",
+		aksv[4], aksv[3], aksv[2], aksv[1], aksv[0]);
+
+	rc = sde_hdcp_1x_write(hdcp, &hdcp->sink_addr.aksv, aksv);
+	if (IS_ERR_VALUE(rc)) {
+		pr_err("error writing aksv to sink\n");
+		goto error;
+	}
+error:
+	return rc;
+}
+
+static int sde_hdcp_1x_read_an_aksv_from_hw(struct sde_hdcp_1x *hdcp)
+{
+	struct dss_io_data *io = hdcp->init_data.core_io;
+	struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
+
+	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+		pr_err("invalid state\n");
+		return -EINVAL;
+	}
+
+	hdcp->an_0 = DSS_REG_R(io, reg_set->data5);
+	if (hdcp->init_data.client_id == HDCP_CLIENT_DP) {
+		udelay(1);
+		hdcp->an_0 = DSS_REG_R(io, reg_set->data5);
+	}
+
+	hdcp->an_1 = DSS_REG_R(io, reg_set->data6);
+	if (hdcp->init_data.client_id == HDCP_CLIENT_DP) {
+		udelay(1);
+		hdcp->an_1 = DSS_REG_R(io, reg_set->data6);
+	}
+
+	/* Read AKSV */
+	hdcp->aksv_0 = DSS_REG_R(io, reg_set->data3);
+	hdcp->aksv_1 = DSS_REG_R(io, reg_set->data4);
+
+	return 0;
+}
+
+static int sde_hdcp_1x_get_bksv_from_sink(struct sde_hdcp_1x *hdcp)
+{
+	int rc;
+	u8 *bksv = hdcp->current_tp.bksv;
+	u32 link0_bksv_0, link0_bksv_1;
+	struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
+	struct dss_io_data *hdcp_io  = hdcp->init_data.hdcp_io;
+
+	rc = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.bksv, bksv, false);
+	if (IS_ERR_VALUE(rc)) {
+		pr_err("error reading bksv from sink\n");
+		goto error;
+	}
+
+	SDE_HDCP_DEBUG("bksv read: 0x%2x%2x%2x%2x%2x\n",
+		bksv[4], bksv[3], bksv[2], bksv[1], bksv[0]);
+
+	/* check there are 20 ones in BKSV */
+	if (sde_hdcp_1x_count_one(bksv, 5) != 20) {
+		pr_err("%s: BKSV doesn't have 20 1's and 20 0's\n",
+			SDE_HDCP_STATE_NAME);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	link0_bksv_0 = bksv[3];
+	link0_bksv_0 = (link0_bksv_0 << 8) | bksv[2];
+	link0_bksv_0 = (link0_bksv_0 << 8) | bksv[1];
+	link0_bksv_0 = (link0_bksv_0 << 8) | bksv[0];
+	link0_bksv_1 = bksv[4];
+
+	DSS_REG_W(hdcp_io, reg_set->sec_data0, link0_bksv_0);
+	DSS_REG_W(hdcp_io, reg_set->sec_data1, link0_bksv_1);
+error:
+	return rc;
+}
+
+static u8 *sde_hdcp_1x_swap_byte_order(u8 *bksv_in, int num_dev)
+{
+	u8 *bksv_out;
+	u8 *tmp_out;
+	u8 *tmp_in;
+	int i, j;
+
+	/* Dont exceed max downstream devices */
+	if (num_dev > MAX_DEVICES_SUPPORTED) {
+		pr_err("invalid params\n");
+		return NULL;
+	}
+
+	bksv_out = kzalloc(RECV_ID_SIZE * num_dev, GFP_KERNEL);
+
+	if (!bksv_out)
+		return NULL;
+
+	SDE_HDCP_DEBUG("num_dev = %d\n", num_dev);
+
+	/* Store temporarily for return */
+	tmp_out = bksv_out;
+	tmp_in = bksv_in;
+
+	for (i = 0; i < num_dev; i++) {
+		for (j = 0; j < RECV_ID_SIZE; j++)
+			bksv_out[j] = tmp_in[RECV_ID_SIZE - j - 1];
+
+		/* Each KSV is 5 bytes long */
+		bksv_out += RECV_ID_SIZE;
+		tmp_in += RECV_ID_SIZE;
+	}
+
+	return tmp_out;
+}
+
+static int sde_hdcp_1x_revoked_rcv_chk(struct sde_hdcp_1x *hdcp)
+{
+	int rc = 0;
+	u8 *bksv = hdcp->current_tp.bksv;
+	u8 *bksv_out;
+	struct hdcp_srm_device_id_t *bksv_srm;
+
+	bksv_out = sde_hdcp_1x_swap_byte_order(bksv, 1);
+
+	if (!bksv_out) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	SDE_HDCP_DEBUG("bksv_out : 0x%2x%2x%2x%2x%2x\n",
+		bksv_out[4], bksv_out[3], bksv_out[2],
+		bksv_out[1], bksv_out[0]);
+
+	bksv_srm = (struct hdcp_srm_device_id_t *)bksv_out;
+	/* Here we are checking only receiver ID
+	 * hence the device count is one
+	 */
+	rc = hdcp1_validate_receiver_ids(bksv_srm, 1);
+
+	kfree(bksv_out);
+
+exit:
+	return rc;
+}
+
+static int sde_hdcp_1x_revoked_rpt_chk(struct sde_hdcp_1x *hdcp)
+{
+	int rc = 0;
+	int i;
+	u8 *bksv = hdcp->current_tp.ksv_list;
+	u8 *bksv_out;
+	struct hdcp_srm_device_id_t *bksv_srm;
+
+	for (i = 0; i < hdcp->sink_addr.ksv_fifo.len;
+		 i += RECV_ID_SIZE) {
+		SDE_HDCP_DEBUG("bksv : 0x%2x%2x%2x%2x%2x\n",
+		bksv[i + 4],
+		bksv[i + 3], bksv[i + 2],
+		bksv[i + 1], bksv[i]);
+	}
+
+	bksv_out = sde_hdcp_1x_swap_byte_order(bksv,
+		hdcp->current_tp.dev_count);
+
+	if (!bksv_out) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	bksv_srm = (struct hdcp_srm_device_id_t *)bksv_out;
+	/* Here we are checking repeater ksv list */
+	rc = hdcp1_validate_receiver_ids(bksv_srm,
+			hdcp->current_tp.dev_count);
+
+	kfree(bksv_out);
+
+exit:
+	return rc;
+}
+
+static void sde_hdcp_1x_enable_sink_irq_hpd(struct sde_hdcp_1x *hdcp)
+{
+	int rc;
+	u8 const required_major = 1, required_minor = 2;
+	u8 sink_major = 0, sink_minor = 0;
+	u8 enable_hpd_irq = 0x1;
+	u16 version;
+
+	if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI)
+		return;
+
+	version = *hdcp->init_data.version;
+	sink_major = (version >> 4) & 0x0f;
+	sink_minor = version & 0x0f;
+
+	if ((sink_minor < required_minor) || (sink_major < required_major) ||
+		(hdcp->current_tp.ds_type != DS_REPEATER)) {
+		pr_debug("sink irq hpd not enabled\n");
+		return;
+	}
+
+	rc = sde_hdcp_1x_write(hdcp, &hdcp->sink_addr.ainfo, &enable_hpd_irq);
+	if (IS_ERR_VALUE(rc))
+		SDE_HDCP_DEBUG("error writing ainfo to sink\n");
+}
+
+static int sde_hdcp_1x_verify_r0(struct sde_hdcp_1x *hdcp)
+{
+	int rc, r0_retry = 3;
+	u8 buf[2];
+	u32 link0_status, timeout_count;
+	u32 const r0_read_delay_us = 1;
+	u32 const r0_read_timeout_us = r0_read_delay_us * 10;
+	struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
+	struct dss_io_data *io = hdcp->init_data.core_io;
+
+	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+		pr_err("invalid state\n");
+		return -EINVAL;
+	}
+
+	/* Wait for HDCP R0 computation to be completed */
+	rc = readl_poll_timeout(io->base + reg_set->status, link0_status,
+				(link0_status & BIT(reg_set->r0_offset)) ||
+				!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
+				HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US);
+	if (IS_ERR_VALUE(rc)) {
+		pr_err("R0 not ready\n");
+		goto error;
+	}
+
+	/*
+	 * HDCP Compliace Test case 1A-01:
+	 * Wait here at least 100ms before reading R0'
+	 */
+	if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI) {
+		msleep(100);
+	} else {
+		if (!hdcp->sink_r0_ready) {
+			reinit_completion(&hdcp->sink_r0_available);
+			timeout_count = wait_for_completion_timeout(
+				&hdcp->sink_r0_available, HZ / 2);
+
+			if (hdcp->reauth) {
+				pr_err("sink R0 not ready\n");
+				rc = -EINVAL;
+				goto error;
+			}
+		}
+	}
+
+	do {
+		memset(buf, 0, sizeof(buf));
+
+		rc = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.r0,
+			buf, false);
+		if (IS_ERR_VALUE(rc)) {
+			pr_err("error reading R0' from sink\n");
+			goto error;
+		}
+
+		SDE_HDCP_DEBUG("sink R0'read: %2x%2x\n", buf[1], buf[0]);
+
+		DSS_REG_W(io, reg_set->data2_0, (((u32)buf[1]) << 8) | buf[0]);
+
+		rc = readl_poll_timeout(io->base + reg_set->status,
+			link0_status, (link0_status & BIT(12)) ||
+			!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
+			r0_read_delay_us, r0_read_timeout_us);
+	} while (rc && --r0_retry);
+error:
+	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING))
+		rc = -EINVAL;
+
+	return rc;
+}
+
+static int sde_hdcp_1x_authentication_part1(struct sde_hdcp_1x *hdcp)
+{
+	int rc;
+
+	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+		pr_err("invalid state\n");
+		return -EINVAL;
+	}
+
+	sde_hdcp_1x_enable_interrupts(hdcp);
+
+	rc = sde_hdcp_1x_read_bcaps(hdcp);
+	if (rc)
+		goto error;
+
+	rc = sde_hdcp_1x_wait_for_hw_ready(hdcp);
+	if (rc)
+		goto error;
+
+	rc = sde_hdcp_1x_read_an_aksv_from_hw(hdcp);
+	if (rc)
+		goto error;
+
+	rc = sde_hdcp_1x_get_bksv_from_sink(hdcp);
+	if (rc)
+		goto error;
+
+	rc = sde_hdcp_1x_revoked_rcv_chk(hdcp);
+	if (rc) {
+		rc = -SDE_HDCP_SRM_FAIL;
+		goto error;
+	}
+
+	rc = sde_hdcp_1x_send_an_aksv_to_sink(hdcp);
+	if (rc)
+		goto error;
+
+	sde_hdcp_1x_enable_sink_irq_hpd(hdcp);
+
+	rc = sde_hdcp_1x_verify_r0(hdcp);
+	if (rc)
+		goto error;
+
+	pr_info("SUCCESSFUL\n");
+
+	return 0;
+error:
+	pr_err("%s: FAILED\n", SDE_HDCP_STATE_NAME);
+
+	return rc;
+}
+
+static int sde_hdcp_1x_transfer_v_h(struct sde_hdcp_1x *hdcp)
+{
+	int rc = 0;
+	struct dss_io_data *io = hdcp->init_data.hdcp_io;
+	struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
+	struct sde_hdcp_1x_reg_data reg_data[]  = {
+		{reg_set->sec_data7,  &hdcp->sink_addr.v_h0},
+		{reg_set->sec_data8,  &hdcp->sink_addr.v_h1},
+		{reg_set->sec_data9,  &hdcp->sink_addr.v_h2},
+		{reg_set->sec_data10, &hdcp->sink_addr.v_h3},
+		{reg_set->sec_data11, &hdcp->sink_addr.v_h4},
+	};
+	struct sde_hdcp_sink_addr sink = {"V", reg_data->sink->addr};
+	u32 size = ARRAY_SIZE(reg_data);
+	u8 buf[0xFF] = {0};
+	u32 i = 0, len = 0;
+
+	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+		pr_err("invalid state\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < size; i++) {
+		struct sde_hdcp_1x_reg_data *rd = reg_data + i;
+
+		len += rd->sink->len;
+	}
+
+	sink.len = len;
+
+	rc = sde_hdcp_1x_read(hdcp, &sink, buf, false);
+	if (IS_ERR_VALUE(rc)) {
+		pr_err("error reading %s\n", sink.name);
+		goto end;
+	}
+
+
+	for (i = 0; i < size; i++) {
+		struct sde_hdcp_1x_reg_data *rd = reg_data + i;
+		u32 reg_data;
+
+		memcpy(&reg_data, buf + (sizeof(u32) * i), sizeof(u32));
+		DSS_REG_W(io, rd->reg_id, reg_data);
+	}
+end:
+	return rc;
+}
+
+static int sde_hdcp_1x_validate_downstream(struct sde_hdcp_1x *hdcp)
+{
+	int rc;
+	u8 buf[2] = {0, 0};
+	u8 device_count, depth;
+	u8 max_cascade_exceeded, max_devs_exceeded;
+	u16 bstatus;
+	struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
+
+	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+		pr_err("invalid state\n");
+		return -EINVAL;
+	}
+
+	rc = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.bstatus,
+			buf, false);
+	if (IS_ERR_VALUE(rc)) {
+		pr_err("error reading bstatus\n");
+		goto end;
+	}
+
+	bstatus = buf[1];
+	bstatus = (bstatus << 8) | buf[0];
+
+	device_count = bstatus & 0x7F;
+
+	SDE_HDCP_DEBUG("device count %d\n", device_count);
+
+	/* Cascaded repeater depth */
+	depth = (bstatus >> 8) & 0x7;
+	SDE_HDCP_DEBUG("depth %d\n", depth);
+
+	/*
+	 * HDCP Compliance 1B-05:
+	 * Check if no. of devices connected to repeater
+	 * exceed max_devices_connected from bit 7 of Bstatus.
+	 */
+	max_devs_exceeded = (bstatus & BIT(7)) >> 7;
+	if (max_devs_exceeded == 0x01) {
+		pr_err("no. of devs connected exceed max allowed\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/*
+	 * HDCP Compliance 1B-06:
+	 * Check if no. of cascade connected to repeater
+	 * exceed max_cascade_connected from bit 11 of Bstatus.
+	 */
+	max_cascade_exceeded = (bstatus & BIT(11)) >> 11;
+	if (max_cascade_exceeded == 0x01) {
+		pr_err("no. of cascade connections exceed max allowed\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* Update topology information */
+	hdcp->current_tp.dev_count = device_count;
+	hdcp->current_tp.max_cascade_exceeded = max_cascade_exceeded;
+	hdcp->current_tp.max_dev_exceeded = max_devs_exceeded;
+	hdcp->current_tp.depth = depth;
+
+	DSS_REG_W(hdcp->init_data.hdcp_io,
+		  reg_set->sec_data12, hdcp->bcaps | (bstatus << 8));
+end:
+	return rc;
+}
+
+static int sde_hdcp_1x_read_ksv_fifo(struct sde_hdcp_1x *hdcp)
+{
+	u32 ksv_read_retry = 20, ksv_bytes, rc = 0;
+	u8 *ksv_fifo = hdcp->current_tp.ksv_list;
+
+	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+		pr_err("invalid state\n");
+		return -EINVAL;
+	}
+
+	memset(ksv_fifo, 0, sizeof(hdcp->current_tp.ksv_list));
+
+	/* each KSV is 5 bytes long */
+	ksv_bytes = 5 * hdcp->current_tp.dev_count;
+	hdcp->sink_addr.ksv_fifo.len = ksv_bytes;
+
+	while (ksv_bytes && --ksv_read_retry) {
+		rc = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.ksv_fifo,
+				ksv_fifo, true);
+		if (IS_ERR_VALUE(rc))
+			pr_err("could not read ksv fifo (%d)\n",
+				ksv_read_retry);
+		else
+			break;
+	}
+
+	if (rc)
+		pr_err("error reading ksv_fifo\n");
+
+	return rc;
+}
+
+static int sde_hdcp_1x_write_ksv_fifo(struct sde_hdcp_1x *hdcp)
+{
+	int i, rc = 0;
+	u8 *ksv_fifo = hdcp->current_tp.ksv_list;
+	u32 ksv_bytes = hdcp->sink_addr.ksv_fifo.len;
+	struct dss_io_data *io = hdcp->init_data.core_io;
+	struct dss_io_data *sec_io = hdcp->init_data.hdcp_io;
+	struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
+	u32 sha_status = 0, status;
+
+	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+		pr_err("invalid state\n");
+		return -EINVAL;
+	}
+
+	/* reset SHA Controller */
+	DSS_REG_W(sec_io, reg_set->sec_sha_ctrl, 0x1);
+	DSS_REG_W(sec_io, reg_set->sec_sha_ctrl, 0x0);
+
+	for (i = 0; i < ksv_bytes - 1; i++) {
+		/* Write KSV byte and do not set DONE bit[0] */
+		DSS_REG_W_ND(sec_io, reg_set->sec_sha_data, ksv_fifo[i] << 16);
+
+		/*
+		 * Once 64 bytes have been written, we need to poll for
+		 * HDCP_SHA_BLOCK_DONE before writing any further
+		 */
+		if (i && !((i + 1) % 64)) {
+			rc = readl_poll_timeout(io->base + reg_set->sha_status,
+				sha_status, (sha_status & BIT(0)) ||
+				!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
+				HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US);
+			if (IS_ERR_VALUE(rc)) {
+				pr_err("block not done\n");
+				goto error;
+			}
+		}
+	}
+
+	/* Write l to DONE bit[0] */
+	DSS_REG_W_ND(sec_io, reg_set->sec_sha_data,
+		(ksv_fifo[ksv_bytes - 1] << 16) | 0x1);
+
+	/* Now wait for HDCP_SHA_COMP_DONE */
+	rc = readl_poll_timeout(io->base + reg_set->sha_status, sha_status,
+				(sha_status & BIT(4)) ||
+				!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
+				HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US);
+	if (IS_ERR_VALUE(rc)) {
+		pr_err("V computation not done\n");
+		goto error;
+	}
+
+	/* Wait for V_MATCHES */
+	rc = readl_poll_timeout(io->base + reg_set->status, status,
+				(status & BIT(reg_set->v_offset)) ||
+				!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
+				HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US);
+	if (IS_ERR_VALUE(rc)) {
+		pr_err("V mismatch\n");
+		rc = -EINVAL;
+	}
+error:
+	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING))
+		rc = -EINVAL;
+
+	return rc;
+}
+
+static int sde_hdcp_1x_wait_for_ksv_ready(struct sde_hdcp_1x *hdcp)
+{
+	int rc, timeout;
+
+	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+		pr_err("invalid state\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Wait until READY bit is set in BCAPS, as per HDCP specifications
+	 * maximum permitted time to check for READY bit is five seconds.
+	 */
+	rc = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.bcaps,
+		&hdcp->bcaps, false);
+	if (IS_ERR_VALUE(rc)) {
+		pr_err("error reading bcaps\n");
+		goto error;
+	}
+
+	if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI) {
+		timeout = 50;
+
+		while (!(hdcp->bcaps & BIT(5)) && --timeout) {
+			rc = sde_hdcp_1x_read(hdcp,
+				&hdcp->sink_addr.bcaps,
+				&hdcp->bcaps, false);
+			if (IS_ERR_VALUE(rc) ||
+			   !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+				pr_err("error reading bcaps\n");
+				goto error;
+			}
+			msleep(100);
+		}
+	} else {
+		u8 cp_buf = 0;
+		struct sde_hdcp_sink_addr *sink =
+			&hdcp->sink_addr.cp_irq_status;
+
+		timeout = jiffies_to_msecs(jiffies);
+
+		while (1) {
+			rc = sde_hdcp_1x_read(hdcp, sink, &cp_buf, false);
+			if (rc)
+				goto error;
+
+			if (cp_buf & BIT(0))
+				break;
+
+			/* max timeout of 5 sec as per hdcp 1.x spec */
+			if (abs(timeout - jiffies_to_msecs(jiffies)) > 5000) {
+				timeout = 0;
+				break;
+			}
+
+			if (hdcp->ksv_ready || hdcp->reauth ||
+			    !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING))
+				break;
+
+			/* re-read after a minimum delay */
+			msleep(20);
+		}
+	}
+
+	if (!timeout || hdcp->reauth ||
+	    !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+		pr_err("DS KSV not ready\n");
+		rc = -EINVAL;
+	} else {
+		hdcp->ksv_ready = true;
+	}
+error:
+	return rc;
+}
+
+static int sde_hdcp_1x_authentication_part2(struct sde_hdcp_1x *hdcp)
+{
+	int rc;
+	int v_retry = 3;
+
+	rc = sde_hdcp_1x_validate_downstream(hdcp);
+	if (rc)
+		goto error;
+
+	rc = sde_hdcp_1x_read_ksv_fifo(hdcp);
+	if (rc)
+		goto error;
+
+	rc = sde_hdcp_1x_revoked_rpt_chk(hdcp);
+	if (rc) {
+		rc = -SDE_HDCP_SRM_FAIL;
+		goto error;
+	}
+
+	do {
+		/*
+		 * Do not proceed further if no device connected
+		 * If no downstream devices are attached to the repeater
+		 * then part II fails.
+		 */
+
+		if (!hdcp->current_tp.dev_count) {
+			rc = -EINVAL;
+			goto error;
+		}
+
+		rc = sde_hdcp_1x_transfer_v_h(hdcp);
+		if (rc)
+			goto error;
+
+		rc = sde_hdcp_1x_write_ksv_fifo(hdcp);
+	} while (--v_retry && rc);
+error:
+	if (rc) {
+		pr_err("%s: FAILED\n", SDE_HDCP_STATE_NAME);
+	} else {
+		hdcp->hdcp_state = HDCP_STATE_AUTHENTICATED;
+
+		pr_info("SUCCESSFUL\n");
+	}
+
+	return rc;
+}
+
+static void sde_hdcp_1x_cache_topology(struct sde_hdcp_1x *hdcp)
+{
+	if (!hdcp || !hdcp->init_data.core_io) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	memcpy((void *)&hdcp->cached_tp,
+		(void *) &hdcp->current_tp,
+		sizeof(hdcp->cached_tp));
+	hdcp1_cache_repeater_topology((void *)&hdcp->cached_tp);
+}
+
+static void sde_hdcp_1x_notify_topology(void)
+{
+	hdcp1_notify_topology();
+}
+
+static void sde_hdcp_1x_update_auth_status(struct sde_hdcp_1x *hdcp)
+{
+	if (sde_hdcp_1x_state(HDCP_STATE_AUTH_FAIL))
+		hdcp->init_data.avmute_sink(hdcp->init_data.cb_data);
+
+	if (sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATED)) {
+		sde_hdcp_1x_cache_topology(hdcp);
+		sde_hdcp_1x_notify_topology();
+	}
+
+	if (hdcp->init_data.notify_status &&
+	    !sde_hdcp_1x_state(HDCP_STATE_INACTIVE)) {
+		hdcp->init_data.notify_status(
+			hdcp->init_data.cb_data,
+			hdcp->hdcp_state);
+	}
+}
+
+static void sde_hdcp_1x_auth_work(struct work_struct *work)
+{
+	int rc;
+	struct delayed_work *dw = to_delayed_work(work);
+	struct sde_hdcp_1x *hdcp = container_of(dw,
+		struct sde_hdcp_1x, hdcp_auth_work);
+	struct dss_io_data *io;
+
+	if (!hdcp) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+		pr_err("invalid state\n");
+		return;
+	}
+
+	hdcp->sink_r0_ready = false;
+	hdcp->reauth = false;
+	hdcp->ksv_ready = false;
+
+	io = hdcp->init_data.core_io;
+	/* Enabling Software DDC for HDMI and REF timer for DP */
+	if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI)
+		DSS_REG_W_ND(io, HDMI_DDC_ARBITRATION, DSS_REG_R(io,
+				HDMI_DDC_ARBITRATION) & ~(BIT(4)));
+	else if (hdcp->init_data.client_id == HDCP_CLIENT_DP) {
+		/* To do for DP */
+	}
+
+	/*
+	 * program hw to enable encryption as soon as
+	 * authentication is successful.
+	 */
+	hdcp1_set_enc(true);
+
+	rc = sde_hdcp_1x_authentication_part1(hdcp);
+	if (rc)
+		goto end;
+
+	if (hdcp->current_tp.ds_type == DS_REPEATER) {
+		rc = sde_hdcp_1x_wait_for_ksv_ready(hdcp);
+		if (rc)
+			goto end;
+	} else {
+		hdcp->hdcp_state = HDCP_STATE_AUTHENTICATED;
+		goto end;
+	}
+
+	hdcp->ksv_ready = false;
+
+	rc = sde_hdcp_1x_authentication_part2(hdcp);
+	if (rc)
+		goto end;
+
+
+end:
+	if (rc && !sde_hdcp_1x_state(HDCP_STATE_INACTIVE)) {
+		hdcp->hdcp_state = HDCP_STATE_AUTH_FAIL;
+		if (rc == -SDE_HDCP_SRM_FAIL)
+			hdcp->hdcp_state = HDCP_STATE_AUTH_FAIL_NOREAUTH;
+	}
+
+	/*
+	 * Disabling software DDC before going into part3 to make sure
+	 * there is no Arbitration between software and hardware for DDC
+	 */
+	if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI)
+		DSS_REG_W_ND(io, HDMI_DDC_ARBITRATION, DSS_REG_R(io,
+				HDMI_DDC_ARBITRATION) | (BIT(4)));
+
+	sde_hdcp_1x_update_auth_status(hdcp);
+}
+
+static int sde_hdcp_1x_authenticate(void *input)
+{
+	struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input;
+
+	if (!hdcp) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	flush_delayed_work(&hdcp->hdcp_auth_work);
+
+	if (!sde_hdcp_1x_state(HDCP_STATE_INACTIVE)) {
+		pr_err("invalid state\n");
+		return -EINVAL;
+	}
+
+	if (!sde_hdcp_1x_load_keys(input)) {
+
+		queue_delayed_work(hdcp->workq,
+			&hdcp->hdcp_auth_work, HZ/2);
+	} else {
+		hdcp->hdcp_state = HDCP_STATE_AUTH_FAIL;
+		sde_hdcp_1x_update_auth_status(hdcp);
+	}
+
+	return 0;
+} /* hdcp_1x_authenticate */
+
+static int sde_hdcp_1x_reauthenticate(void *input)
+{
+	struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input;
+	struct dss_io_data *io;
+	struct sde_hdcp_reg_set *reg_set;
+	struct sde_hdcp_int_set *isr;
+	u32 hdmi_hw_version;
+	u32 ret = 0, reg;
+
+	if (!hdcp || !hdcp->init_data.core_io) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	io = hdcp->init_data.core_io;
+	reg_set = &hdcp->reg_set;
+	isr = &hdcp->int_set;
+
+	if (!sde_hdcp_1x_state(HDCP_STATE_AUTH_FAIL)) {
+		pr_err("invalid state\n");
+		return -EINVAL;
+	}
+
+	if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI) {
+		hdmi_hw_version = DSS_REG_R(io, HDMI_VERSION);
+		if (hdmi_hw_version >= 0x30030000) {
+			DSS_REG_W(io, HDMI_CTRL_SW_RESET, BIT(1));
+			DSS_REG_W(io, HDMI_CTRL_SW_RESET, 0);
+		}
+
+		/* Wait to be clean on DDC HW engine */
+		sde_hdcp_1x_hw_ddc_clean(hdcp);
+	}
+
+	/* Disable HDCP interrupts */
+	DSS_REG_W(io, isr->int_reg, DSS_REG_R(io, isr->int_reg) & ~HDCP_INT_EN);
+
+	reg = DSS_REG_R(io, reg_set->reset);
+	DSS_REG_W(io, reg_set->reset, reg | reg_set->reset_bit);
+
+	/* Disable encryption and disable the HDCP block */
+	DSS_REG_W(io, reg_set->ctrl, 0);
+
+	DSS_REG_W(io, reg_set->reset, reg & ~reg_set->reset_bit);
+
+	hdcp->hdcp_state = HDCP_STATE_INACTIVE;
+	sde_hdcp_1x_authenticate(hdcp);
+
+	return ret;
+} /* hdcp_1x_reauthenticate */
+
+static void sde_hdcp_1x_off(void *input)
+{
+	struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input;
+	struct dss_io_data *io;
+	struct sde_hdcp_reg_set *reg_set;
+	struct sde_hdcp_int_set *isr;
+	int rc = 0;
+	u32 reg;
+
+	if (!hdcp || !hdcp->init_data.core_io) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	io = hdcp->init_data.core_io;
+	reg_set = &hdcp->reg_set;
+	isr = &hdcp->int_set;
+
+	if (sde_hdcp_1x_state(HDCP_STATE_INACTIVE)) {
+		pr_err("invalid state\n");
+		return;
+	}
+
+	/*
+	 * Disable HDCP interrupts.
+	 * Also, need to set the state to inactive here so that any ongoing
+	 * reauth works will know that the HDCP session has been turned off.
+	 */
+	mutex_lock(hdcp->init_data.mutex);
+	DSS_REG_W(io, isr->int_reg,
+		DSS_REG_R(io, isr->int_reg) & ~HDCP_INT_EN);
+	hdcp->hdcp_state = HDCP_STATE_INACTIVE;
+	mutex_unlock(hdcp->init_data.mutex);
+
+	/* complete any wait pending */
+	complete_all(&hdcp->sink_r0_available);
+	complete_all(&hdcp->r0_checked);
+	/*
+	 * Cancel any pending auth/reauth attempts.
+	 * If one is ongoing, this will wait for it to finish.
+	 * No more reauthentiaction attempts will be scheduled since we
+	 * set the currect state to inactive.
+	 */
+	rc = cancel_delayed_work_sync(&hdcp->hdcp_auth_work);
+	if (rc)
+		SDE_HDCP_DEBUG("%s: Deleted hdcp auth work\n",
+			SDE_HDCP_STATE_NAME);
+
+	hdcp1_set_enc(false);
+
+	reg = DSS_REG_R(io, reg_set->reset);
+	DSS_REG_W(io, reg_set->reset, reg | reg_set->reset_bit);
+
+	/* Disable encryption and disable the HDCP block */
+	DSS_REG_W(io, reg_set->ctrl, 0);
+
+	DSS_REG_W(io, reg_set->reset, reg & ~reg_set->reset_bit);
+
+	hdcp->sink_r0_ready = false;
+
+	SDE_HDCP_DEBUG("%s: HDCP: Off\n", SDE_HDCP_STATE_NAME);
+} /* hdcp_1x_off */
+
+static int sde_hdcp_1x_isr(void *input)
+{
+	struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input;
+	int rc = 0;
+	struct dss_io_data *io;
+	u32 hdcp_int_val;
+	struct sde_hdcp_reg_set *reg_set;
+	struct sde_hdcp_int_set *isr;
+
+	if (!hdcp || !hdcp->init_data.core_io) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	io = hdcp->init_data.core_io;
+	reg_set = &hdcp->reg_set;
+	isr = &hdcp->int_set;
+
+	hdcp_int_val = DSS_REG_R(io, isr->int_reg);
+
+	/* Ignore HDCP interrupts if HDCP is disabled */
+	if (sde_hdcp_1x_state(HDCP_STATE_INACTIVE)) {
+		DSS_REG_W(io, isr->int_reg, hdcp_int_val | HDCP_INT_CLR);
+		return 0;
+	}
+
+	if (hdcp_int_val & isr->auth_success_int) {
+		/* AUTH_SUCCESS_INT */
+		DSS_REG_W(io, isr->int_reg,
+			(hdcp_int_val | isr->auth_success_ack));
+		SDE_HDCP_DEBUG("%s: AUTH SUCCESS\n", SDE_HDCP_STATE_NAME);
+
+		if (sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING))
+			complete_all(&hdcp->r0_checked);
+	}
+
+	if (hdcp_int_val & isr->auth_fail_int) {
+		/* AUTH_FAIL_INT */
+		u32 link_status = DSS_REG_R(io, reg_set->status);
+
+		DSS_REG_W(io, isr->int_reg,
+			(hdcp_int_val | isr->auth_fail_ack));
+
+		SDE_HDCP_DEBUG("%s: AUTH FAIL, LINK0_STATUS=0x%08x\n",
+			SDE_HDCP_STATE_NAME, link_status);
+
+		/* Clear AUTH_FAIL_INFO as well */
+		DSS_REG_W(io, isr->int_reg,
+			(hdcp_int_val | isr->auth_fail_info_ack));
+
+		if (sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATED)) {
+			hdcp->hdcp_state = HDCP_STATE_AUTH_FAIL;
+			sde_hdcp_1x_update_auth_status(hdcp);
+		} else if (sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+			complete_all(&hdcp->r0_checked);
+		}
+
+	}
+
+	if (hdcp_int_val & isr->tx_req_int) {
+		/* DDC_XFER_REQ_INT */
+		DSS_REG_W(io, isr->int_reg,
+			(hdcp_int_val | isr->tx_req_ack));
+		SDE_HDCP_DEBUG("%s: DDC_XFER_REQ_INT received\n",
+			SDE_HDCP_STATE_NAME);
+	}
+
+	if (hdcp_int_val & isr->tx_req_done_int) {
+		/* DDC_XFER_DONE_INT */
+		DSS_REG_W(io, isr->int_reg,
+			(hdcp_int_val | isr->tx_req_done_ack));
+		SDE_HDCP_DEBUG("%s: DDC_XFER_DONE received\n",
+			SDE_HDCP_STATE_NAME);
+	}
+
+	if (hdcp_int_val & isr->encryption_ready) {
+		/* Encryption enabled */
+		DSS_REG_W(io, isr->int_reg,
+			(hdcp_int_val | isr->encryption_ready_ack));
+		SDE_HDCP_DEBUG("%s: encryption ready received\n",
+			SDE_HDCP_STATE_NAME);
+	}
+
+	if (hdcp_int_val & isr->encryption_not_ready) {
+		/* Encryption enabled */
+		DSS_REG_W(io, isr->int_reg,
+			(hdcp_int_val | isr->encryption_not_ready_ack));
+		SDE_HDCP_DEBUG("%s: encryption not ready received\n",
+			SDE_HDCP_STATE_NAME);
+	}
+
+error:
+	return rc;
+}
+
+void sde_hdcp_1x_deinit(void *input)
+{
+	struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input;
+
+	if (!hdcp) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	if (hdcp->workq)
+		destroy_workqueue(hdcp->workq);
+
+	hdcp1_client_unregister();
+	kfree(hdcp);
+} /* hdcp_1x_deinit */
+
+static void sde_hdcp_1x_update_client_reg_set(struct sde_hdcp_1x *hdcp)
+{
+
+	if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI) {
+		struct sde_hdcp_reg_set reg_set = HDCP_REG_SET_CLIENT_HDMI;
+		struct sde_hdcp_skaddr_map sink_addr = HDCP_HDMI_SINK_ADDR_MAP;
+		struct sde_hdcp_int_set isr = HDCP_HDMI_INT_SET;
+
+		hdcp->reg_set = reg_set;
+		hdcp->sink_addr = sink_addr;
+		hdcp->int_set = isr;
+	} else if (hdcp->init_data.client_id == HDCP_CLIENT_DP) {
+		/* TO DO for DP
+		 * Will be filled later
+		 */
+	}
+}
+
+static bool sde_hdcp_1x_is_cp_irq_raised(struct sde_hdcp_1x *hdcp)
+{
+	int ret;
+	u8 buf = 0;
+	struct sde_hdcp_sink_addr sink = {"irq", 0x201, 1};
+
+	ret = sde_hdcp_1x_read(hdcp, &sink, &buf, false);
+	if (IS_ERR_VALUE(ret))
+		pr_err("error reading irq_vector\n");
+
+	return buf & BIT(2) ? true : false;
+}
+
+static void sde_hdcp_1x_clear_cp_irq(struct sde_hdcp_1x *hdcp)
+{
+	int ret;
+	u8 buf = BIT(2);
+	struct sde_hdcp_sink_addr sink = {"irq", 0x201, 1};
+
+	ret = sde_hdcp_1x_write(hdcp, &sink, &buf);
+	if (IS_ERR_VALUE(ret))
+		pr_err("error clearing irq_vector\n");
+}
+
+static int sde_hdcp_1x_cp_irq(void *input)
+{
+	struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input;
+	u8 buf = 0;
+	int ret;
+
+	if (!hdcp) {
+		pr_err("invalid input\n");
+		goto irq_not_handled;
+	}
+
+	if (!sde_hdcp_1x_is_cp_irq_raised(hdcp)) {
+		SDE_HDCP_DEBUG("cp_irq not raised\n");
+		goto irq_not_handled;
+	}
+
+	ret = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.cp_irq_status,
+			&buf, false);
+	if (IS_ERR_VALUE(ret)) {
+		pr_err("error reading cp_irq_status\n");
+		goto irq_not_handled;
+	}
+
+	if ((buf & BIT(2)) || (buf & BIT(3))) {
+		pr_err("%s\n",
+			buf & BIT(2) ? "LINK_INTEGRITY_FAILURE" :
+				"REAUTHENTICATION_REQUEST");
+
+		hdcp->reauth = true;
+
+		if (!sde_hdcp_1x_state(HDCP_STATE_INACTIVE))
+			hdcp->hdcp_state = HDCP_STATE_AUTH_FAIL;
+
+		complete_all(&hdcp->sink_r0_available);
+		sde_hdcp_1x_update_auth_status(hdcp);
+	} else if (buf & BIT(1)) {
+		SDE_HDCP_DEBUG("R0' AVAILABLE\n");
+		hdcp->sink_r0_ready = true;
+		complete_all(&hdcp->sink_r0_available);
+	} else if ((buf & BIT(0))) {
+		SDE_HDCP_DEBUG("KSVs READY\n");
+
+		hdcp->ksv_ready = true;
+	} else {
+		SDE_HDCP_DEBUG("spurious interrupt\n");
+	}
+
+	sde_hdcp_1x_clear_cp_irq(hdcp);
+	return 0;
+
+irq_not_handled:
+	return -EINVAL;
+}
+
+static void sde_hdcp_1x_srm_cb(void *input)
+{
+
+	struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input;
+	int rc = 0;
+
+	if (!hdcp) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	rc = sde_hdcp_1x_revoked_rcv_chk(hdcp);
+
+	if (rc) {
+		pr_err("receiver failed SRM check\n");
+		goto fail_noreauth;
+	}
+
+	/* If its not a repeater we are done */
+	if (hdcp->current_tp.ds_type != DS_REPEATER)
+		return;
+
+
+	/* Check the repeater KSV against SRM */
+	rc = sde_hdcp_1x_revoked_rpt_chk(hdcp);
+	if (rc) {
+		pr_err("repeater failed SRM check\n");
+		goto fail_noreauth;
+	}
+
+	return;
+
+ fail_noreauth:
+	/* No reauth in case of SRM failure */
+	hdcp->hdcp_state = HDCP_STATE_AUTH_FAIL_NOREAUTH;
+	sde_hdcp_1x_update_auth_status(hdcp);
+}
+
+void *sde_hdcp_1x_init(struct sde_hdcp_init_data *init_data)
+{
+	struct sde_hdcp_1x *hdcp = NULL;
+	char name[20];
+	static struct sde_hdcp_ops ops = {
+		.isr = sde_hdcp_1x_isr,
+		.cp_irq = sde_hdcp_1x_cp_irq,
+		.reauthenticate = sde_hdcp_1x_reauthenticate,
+		.authenticate = sde_hdcp_1x_authenticate,
+		.off = sde_hdcp_1x_off
+	};
+
+	static struct hdcp_client_ops client_ops = {
+		.srm_cb = sde_hdcp_1x_srm_cb,
+	};
+
+	if (!init_data || !init_data->core_io || !init_data->qfprom_io ||
+		!init_data->mutex || !init_data->notify_status ||
+		!init_data->workq || !init_data->cb_data ||
+		!init_data->avmute_sink) {
+		pr_err("invalid input\n");
+		goto error;
+	}
+
+	if (init_data->sec_access && !init_data->hdcp_io) {
+		pr_err("hdcp_io required\n");
+		goto error;
+	}
+
+	hdcp = kzalloc(sizeof(*hdcp), GFP_KERNEL);
+	if (!hdcp)
+		goto error;
+
+	hdcp->init_data = *init_data;
+	hdcp->ops = &ops;
+
+	snprintf(name, sizeof(name), "hdcp_1x_%d",
+		hdcp->init_data.client_id);
+
+	hdcp->workq = create_workqueue(name);
+	if (!hdcp->workq) {
+		pr_err("Error creating workqueue\n");
+		kfree(hdcp);
+		goto error;
+	}
+
+	sde_hdcp_1x_update_client_reg_set(hdcp);
+
+	INIT_DELAYED_WORK(&hdcp->hdcp_auth_work, sde_hdcp_1x_auth_work);
+
+	hdcp->hdcp_state = HDCP_STATE_INACTIVE;
+	init_completion(&hdcp->r0_checked);
+	init_completion(&hdcp->sink_r0_available);
+
+	/* Register client ctx and the srm_cb with hdcp lib */
+	hdcp1_client_register((void *)hdcp, &client_ops);
+	SDE_HDCP_DEBUG("HDCP module initialized. HDCP_STATE=%s\n",
+		SDE_HDCP_STATE_NAME);
+
+	return (void *)hdcp;
+
+error:
+	return NULL;
+} /* hdcp_1x_init */
+
+struct sde_hdcp_ops *sde_hdcp_1x_start(void *input)
+{
+	return ((struct sde_hdcp_1x *)input)->ops;
+}
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde_hdcp.h	2019-01-22 16:16:23.523246588 +0100
@@ -0,0 +1,88 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_HDCP_H__
+#define __SDE_HDCP_H__
+
+#include <soc/qcom/scm.h>
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/of_device.h>
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include "hdmi.h"
+#include "sde_kms.h"
+#include "sde_hdmi_util.h"
+
+#ifdef SDE_HDCP_DEBUG_ENABLE
+#define SDE_HDCP_DEBUG(fmt, args...)   SDE_ERROR(fmt, ##args)
+#else
+#define SDE_HDCP_DEBUG(fmt, args...)   SDE_DEBUG(fmt, ##args)
+#endif
+
+#define SDE_HDCP_SRM_FAIL 29
+
+enum sde_hdcp_client_id {
+	HDCP_CLIENT_HDMI,
+	HDCP_CLIENT_DP,
+};
+
+enum sde_hdcp_states {
+	HDCP_STATE_INACTIVE,
+	HDCP_STATE_AUTHENTICATING,
+	HDCP_STATE_AUTHENTICATED,
+	HDCP_STATE_AUTH_FAIL,
+	HDCP_STATE_AUTH_FAIL_NOREAUTH,
+	HDCP_STATE_AUTH_ENC_NONE,
+	HDCP_STATE_AUTH_ENC_1X,
+	HDCP_STATE_AUTH_ENC_2P2
+};
+
+struct sde_hdcp_init_data {
+	struct dss_io_data *core_io;
+	struct dss_io_data *qfprom_io;
+	struct dss_io_data *hdcp_io;
+	struct mutex *mutex;
+	struct workqueue_struct *workq;
+	void *cb_data;
+	void (*notify_status)(void *cb_data, enum sde_hdcp_states status);
+	void (*avmute_sink)(void *cb_data);
+	struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl;
+	u8 sink_rx_status;
+	u16 *version;
+	u32 phy_addr;
+	u32 hdmi_tx_ver;
+	bool sec_access;
+	enum sde_hdcp_client_id client_id;
+};
+
+struct sde_hdcp_ops {
+	int (*isr)(void *ptr);
+	int (*cp_irq)(void *ptr);
+	int (*reauthenticate)(void *input);
+	int (*authenticate)(void *hdcp_ctrl);
+	bool (*feature_supported)(void *input);
+	void (*off)(void *hdcp_ctrl);
+};
+
+void *sde_hdcp_1x_init(struct sde_hdcp_init_data *init_data);
+void sde_hdcp_1x_deinit(void *input);
+struct sde_hdcp_ops *sde_hdcp_1x_start(void *input);
+void *sde_hdmi_hdcp2p2_init(struct sde_hdcp_init_data *init_data);
+void sde_hdmi_hdcp2p2_deinit(void *input);
+const char *sde_hdcp_state_name(enum sde_hdcp_states hdcp_state);
+struct sde_hdcp_ops *sde_hdmi_hdcp2p2_start(void *input);
+#endif /* __SDE_HDCP_H__ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde_io_util.c	2019-01-22 16:16:23.527246624 +0100
@@ -0,0 +1,502 @@
+/* Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+#include <linux/sde_io_util.h>
+
+#define MAX_I2C_CMDS  16
+void dss_reg_w(struct dss_io_data *io, u32 offset, u32 value, u32 debug)
+{
+	u32 in_val;
+
+	if (!io || !io->base) {
+		DEV_ERR("%pS->%s: invalid input\n",
+			__builtin_return_address(0), __func__);
+		return;
+	}
+
+	if (offset > io->len) {
+		DEV_ERR("%pS->%s: offset out of range\n",
+			__builtin_return_address(0), __func__);
+		return;
+	}
+
+	writel_relaxed(value, io->base + offset);
+	if (debug) {
+		in_val = readl_relaxed(io->base + offset);
+		DEV_DBG("[%08x] => %08x [%08x]\n",
+			(u32)(unsigned long)(io->base + offset),
+			value, in_val);
+	}
+} /* dss_reg_w */
+EXPORT_SYMBOL(dss_reg_w);
+
+u32 dss_reg_r(struct dss_io_data *io, u32 offset, u32 debug)
+{
+	u32 value;
+
+	if (!io || !io->base) {
+		DEV_ERR("%pS->%s: invalid input\n",
+			__builtin_return_address(0), __func__);
+		return -EINVAL;
+	}
+
+	if (offset > io->len) {
+		DEV_ERR("%pS->%s: offset out of range\n",
+			__builtin_return_address(0), __func__);
+		return -EINVAL;
+	}
+
+	value = readl_relaxed(io->base + offset);
+	if (debug)
+		DEV_DBG("[%08x] <= %08x\n",
+			(u32)(unsigned long)(io->base + offset), value);
+
+	return value;
+} /* dss_reg_r */
+EXPORT_SYMBOL(dss_reg_r);
+
+void dss_reg_dump(void __iomem *base, u32 length, const char *prefix,
+	u32 debug)
+{
+	if (debug)
+		print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 32, 4,
+			(void *)base, length, false);
+} /* dss_reg_dump */
+EXPORT_SYMBOL(dss_reg_dump);
+
+static struct resource *msm_dss_get_res_byname(struct platform_device *pdev,
+	unsigned int type, const char *name)
+{
+	struct resource *res = NULL;
+
+	res = platform_get_resource_byname(pdev, type, name);
+	if (!res)
+		DEV_ERR("%s: '%s' resource not found\n", __func__, name);
+
+	return res;
+} /* msm_dss_get_res_byname */
+EXPORT_SYMBOL(msm_dss_get_res_byname);
+
+int msm_dss_ioremap_byname(struct platform_device *pdev,
+	struct dss_io_data *io_data, const char *name)
+{
+	struct resource *res = NULL;
+
+	if (!pdev || !io_data) {
+		DEV_ERR("%pS->%s: invalid input\n",
+			__builtin_return_address(0), __func__);
+		return -EINVAL;
+	}
+
+	res = msm_dss_get_res_byname(pdev, IORESOURCE_MEM, name);
+	if (!res) {
+		DEV_ERR("%pS->%s: '%s' msm_dss_get_res_byname failed\n",
+			__builtin_return_address(0), __func__, name);
+		return -ENODEV;
+	}
+
+	io_data->len = (u32)resource_size(res);
+	io_data->base = ioremap(res->start, io_data->len);
+	if (!io_data->base) {
+		DEV_ERR("%pS->%s: '%s' ioremap failed\n",
+			__builtin_return_address(0), __func__, name);
+		return -EIO;
+	}
+
+	return 0;
+} /* msm_dss_ioremap_byname */
+EXPORT_SYMBOL(msm_dss_ioremap_byname);
+
+void msm_dss_iounmap(struct dss_io_data *io_data)
+{
+	if (!io_data) {
+		DEV_ERR("%pS->%s: invalid input\n",
+			__builtin_return_address(0), __func__);
+		return;
+	}
+
+	if (io_data->base) {
+		iounmap(io_data->base);
+		io_data->base = NULL;
+	}
+	io_data->len = 0;
+} /* msm_dss_iounmap */
+EXPORT_SYMBOL(msm_dss_iounmap);
+
+int msm_dss_config_vreg(struct device *dev, struct dss_vreg *in_vreg,
+	int num_vreg, int config)
+{
+	int i = 0, rc = 0;
+	struct dss_vreg *curr_vreg = NULL;
+	enum dss_vreg_type type;
+
+	if (!in_vreg || !num_vreg)
+		return rc;
+
+	if (config) {
+		for (i = 0; i < num_vreg; i++) {
+			curr_vreg = &in_vreg[i];
+			curr_vreg->vreg = regulator_get(dev,
+				curr_vreg->vreg_name);
+			rc = PTR_RET(curr_vreg->vreg);
+			if (rc) {
+				DEV_ERR("%pS->%s: %s get failed. rc=%d\n",
+					 __builtin_return_address(0), __func__,
+					 curr_vreg->vreg_name, rc);
+				curr_vreg->vreg = NULL;
+				goto vreg_get_fail;
+			}
+			type = (regulator_count_voltages(curr_vreg->vreg) > 0)
+					? DSS_REG_LDO : DSS_REG_VS;
+			if (type == DSS_REG_LDO) {
+				rc = regulator_set_voltage(
+					curr_vreg->vreg,
+					curr_vreg->min_voltage,
+					curr_vreg->max_voltage);
+				if (rc < 0) {
+					DEV_ERR("%pS->%s: %s set vltg fail\n",
+						__builtin_return_address(0),
+						__func__,
+						curr_vreg->vreg_name);
+					goto vreg_set_voltage_fail;
+				}
+			}
+		}
+	} else {
+		for (i = num_vreg-1; i >= 0; i--) {
+			curr_vreg = &in_vreg[i];
+			if (curr_vreg->vreg) {
+				type = (regulator_count_voltages(
+					curr_vreg->vreg) > 0)
+					? DSS_REG_LDO : DSS_REG_VS;
+				if (type == DSS_REG_LDO) {
+					regulator_set_voltage(curr_vreg->vreg,
+						0, curr_vreg->max_voltage);
+				}
+				regulator_put(curr_vreg->vreg);
+				curr_vreg->vreg = NULL;
+			}
+		}
+	}
+	return 0;
+
+vreg_unconfig:
+if (type == DSS_REG_LDO)
+	regulator_set_load(curr_vreg->vreg, 0);
+
+vreg_set_voltage_fail:
+	regulator_put(curr_vreg->vreg);
+	curr_vreg->vreg = NULL;
+
+vreg_get_fail:
+	for (i--; i >= 0; i--) {
+		curr_vreg = &in_vreg[i];
+		type = (regulator_count_voltages(curr_vreg->vreg) > 0)
+			? DSS_REG_LDO : DSS_REG_VS;
+		goto vreg_unconfig;
+	}
+	return rc;
+} /* msm_dss_config_vreg */
+EXPORT_SYMBOL(msm_dss_config_vreg);
+
+int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg, int enable)
+{
+	int i = 0, rc = 0;
+	bool need_sleep;
+
+	if (enable) {
+		for (i = 0; i < num_vreg; i++) {
+			rc = PTR_RET(in_vreg[i].vreg);
+			if (rc) {
+				DEV_ERR("%pS->%s: %s regulator error. rc=%d\n",
+					__builtin_return_address(0), __func__,
+					in_vreg[i].vreg_name, rc);
+				goto vreg_set_opt_mode_fail;
+			}
+			need_sleep = !regulator_is_enabled(in_vreg[i].vreg);
+			if (in_vreg[i].pre_on_sleep && need_sleep)
+				usleep_range(in_vreg[i].pre_on_sleep * 1000,
+					in_vreg[i].pre_on_sleep * 1000);
+			rc = regulator_set_load(in_vreg[i].vreg,
+				in_vreg[i].enable_load);
+			if (rc < 0) {
+				DEV_ERR("%pS->%s: %s set opt m fail\n",
+					__builtin_return_address(0), __func__,
+					in_vreg[i].vreg_name);
+				goto vreg_set_opt_mode_fail;
+			}
+			rc = regulator_enable(in_vreg[i].vreg);
+			if (in_vreg[i].post_on_sleep && need_sleep)
+				usleep_range(in_vreg[i].post_on_sleep * 1000,
+					in_vreg[i].post_on_sleep * 1000);
+			if (rc < 0) {
+				DEV_ERR("%pS->%s: %s enable failed\n",
+					__builtin_return_address(0), __func__,
+					in_vreg[i].vreg_name);
+				goto disable_vreg;
+			}
+		}
+	} else {
+		for (i = num_vreg-1; i >= 0; i--) {
+			if (in_vreg[i].pre_off_sleep)
+				usleep_range(in_vreg[i].pre_off_sleep * 1000,
+					in_vreg[i].pre_off_sleep * 1000);
+			regulator_set_load(in_vreg[i].vreg,
+				in_vreg[i].disable_load);
+			regulator_disable(in_vreg[i].vreg);
+			if (in_vreg[i].post_off_sleep)
+				usleep_range(in_vreg[i].post_off_sleep * 1000,
+					in_vreg[i].post_off_sleep * 1000);
+		}
+	}
+	return rc;
+
+disable_vreg:
+	regulator_set_load(in_vreg[i].vreg, in_vreg[i].disable_load);
+
+vreg_set_opt_mode_fail:
+	for (i--; i >= 0; i--) {
+		if (in_vreg[i].pre_off_sleep)
+			usleep_range(in_vreg[i].pre_off_sleep * 1000,
+				in_vreg[i].pre_off_sleep * 1000);
+		regulator_set_load(in_vreg[i].vreg,
+			in_vreg[i].disable_load);
+		regulator_disable(in_vreg[i].vreg);
+		if (in_vreg[i].post_off_sleep)
+			usleep_range(in_vreg[i].post_off_sleep * 1000,
+				in_vreg[i].post_off_sleep * 1000);
+	}
+
+	return rc;
+} /* msm_dss_enable_vreg */
+EXPORT_SYMBOL(msm_dss_enable_vreg);
+
+int msm_dss_enable_gpio(struct dss_gpio *in_gpio, int num_gpio, int enable)
+{
+	int i = 0, rc = 0;
+
+	if (enable) {
+		for (i = 0; i < num_gpio; i++) {
+			DEV_DBG("%pS->%s: %s enable\n",
+				__builtin_return_address(0), __func__,
+				in_gpio[i].gpio_name);
+
+			rc = gpio_request(in_gpio[i].gpio,
+				in_gpio[i].gpio_name);
+			if (rc < 0) {
+				DEV_ERR("%pS->%s: %s enable failed\n",
+					__builtin_return_address(0), __func__,
+					in_gpio[i].gpio_name);
+				goto disable_gpio;
+			}
+			gpio_set_value(in_gpio[i].gpio, in_gpio[i].value);
+		}
+	} else {
+		for (i = num_gpio-1; i >= 0; i--) {
+			DEV_DBG("%pS->%s: %s disable\n",
+				__builtin_return_address(0), __func__,
+				in_gpio[i].gpio_name);
+			if (in_gpio[i].gpio)
+				gpio_free(in_gpio[i].gpio);
+		}
+	}
+	return rc;
+
+disable_gpio:
+	for (i--; i >= 0; i--)
+		if (in_gpio[i].gpio)
+			gpio_free(in_gpio[i].gpio);
+
+	return rc;
+} /* msm_dss_enable_gpio */
+EXPORT_SYMBOL(msm_dss_enable_gpio);
+
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk)
+{
+	int i;
+
+	for (i = num_clk - 1; i >= 0; i--) {
+		if (clk_arry[i].clk)
+			clk_put(clk_arry[i].clk);
+		clk_arry[i].clk = NULL;
+	}
+} /* msm_dss_put_clk */
+EXPORT_SYMBOL(msm_dss_put_clk);
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk)
+{
+	int i, rc = 0;
+
+	for (i = 0; i < num_clk; i++) {
+		clk_arry[i].clk = clk_get(dev, clk_arry[i].clk_name);
+		rc = PTR_RET(clk_arry[i].clk);
+		if (rc) {
+			DEV_ERR("%pS->%s: '%s' get failed. rc=%d\n",
+				__builtin_return_address(0), __func__,
+				clk_arry[i].clk_name, rc);
+			goto error;
+		}
+	}
+
+	return rc;
+
+error:
+	msm_dss_put_clk(clk_arry, num_clk);
+
+	return rc;
+} /* msm_dss_get_clk */
+EXPORT_SYMBOL(msm_dss_get_clk);
+
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk)
+{
+	int i, rc = 0;
+
+	for (i = 0; i < num_clk; i++) {
+		if (clk_arry[i].clk) {
+			if (clk_arry[i].type != DSS_CLK_AHB) {
+				DEV_DBG("%pS->%s: '%s' rate %ld\n",
+					__builtin_return_address(0), __func__,
+					clk_arry[i].clk_name,
+					clk_arry[i].rate);
+				rc = clk_set_rate(clk_arry[i].clk,
+					clk_arry[i].rate);
+				if (rc) {
+					DEV_ERR("%pS->%s: %s failed. rc=%d\n",
+						__builtin_return_address(0),
+						__func__,
+						clk_arry[i].clk_name, rc);
+					break;
+				}
+			}
+		} else {
+			DEV_ERR("%pS->%s: '%s' is not available\n",
+				__builtin_return_address(0), __func__,
+				clk_arry[i].clk_name);
+			rc = -EPERM;
+			break;
+		}
+	}
+
+	return rc;
+} /* msm_dss_clk_set_rate */
+EXPORT_SYMBOL(msm_dss_clk_set_rate);
+
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
+{
+	int i, rc = 0;
+
+	if (enable) {
+		for (i = 0; i < num_clk; i++) {
+			DEV_DBG("%pS->%s: enable '%s'\n",
+				__builtin_return_address(0), __func__,
+				clk_arry[i].clk_name);
+			if (clk_arry[i].clk) {
+				rc = clk_prepare_enable(clk_arry[i].clk);
+				if (rc)
+					DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
+						__builtin_return_address(0),
+						__func__,
+						clk_arry[i].clk_name, rc);
+			} else {
+				DEV_ERR("%pS->%s: '%s' is not available\n",
+					__builtin_return_address(0), __func__,
+					clk_arry[i].clk_name);
+				rc = -EPERM;
+			}
+
+			if (rc) {
+				msm_dss_enable_clk(&clk_arry[i],
+					i, false);
+				break;
+			}
+		}
+	} else {
+		for (i = num_clk - 1; i >= 0; i--) {
+			DEV_DBG("%pS->%s: disable '%s'\n",
+				__builtin_return_address(0), __func__,
+				clk_arry[i].clk_name);
+
+			if (clk_arry[i].clk)
+				clk_disable_unprepare(clk_arry[i].clk);
+			else
+				DEV_ERR("%pS->%s: '%s' is not available\n",
+					__builtin_return_address(0), __func__,
+					clk_arry[i].clk_name);
+		}
+	}
+
+	return rc;
+} /* msm_dss_enable_clk */
+EXPORT_SYMBOL(msm_dss_enable_clk);
+
+
+int sde_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr,
+			uint8_t reg_offset, uint8_t *read_buf)
+{
+	struct i2c_msg msgs[2];
+	int ret = -1;
+
+	pr_debug("%s: reading from slave_addr=[%x] and offset=[%x]\n",
+		 __func__, slave_addr, reg_offset);
+
+	msgs[0].addr = slave_addr >> 1;
+	msgs[0].flags = 0;
+	msgs[0].buf = &reg_offset;
+	msgs[0].len = 1;
+
+	msgs[1].addr = slave_addr >> 1;
+	msgs[1].flags = I2C_M_RD;
+	msgs[1].buf = read_buf;
+	msgs[1].len = 1;
+
+	ret = i2c_transfer(client->adapter, msgs, 2);
+	if (ret < 1) {
+		pr_err("%s: I2C READ FAILED=[%d]\n", __func__, ret);
+		return -EACCES;
+	}
+	pr_debug("%s: i2c buf is [%x]\n", __func__, *read_buf);
+	return 0;
+}
+EXPORT_SYMBOL(sde_i2c_byte_read);
+
+int sde_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr,
+			uint8_t reg_offset, uint8_t *value)
+{
+	struct i2c_msg msgs[1];
+	uint8_t data[2];
+	int status = -EACCES;
+
+	pr_debug("%s: writing from slave_addr=[%x] and offset=[%x]\n",
+		 __func__, slave_addr, reg_offset);
+
+	data[0] = reg_offset;
+	data[1] = *value;
+
+	msgs[0].addr = slave_addr >> 1;
+	msgs[0].flags = 0;
+	msgs[0].len = 2;
+	msgs[0].buf = data;
+
+	status = i2c_transfer(client->adapter, msgs, 1);
+	if (status < 1) {
+		pr_err("I2C WRITE FAILED=[%d]\n", status);
+		return -EACCES;
+	}
+	pr_debug("%s: I2C write status=%x\n", __func__, status);
+	return status;
+}
+EXPORT_SYMBOL(sde_i2c_byte_write);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde_power_handle.c	2019-01-22 16:16:23.527246624 +0100
@@ -0,0 +1,925 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d]: " fmt, __func__, __LINE__
+
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/string.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/sde_io_util.h>
+
+#include "sde_power_handle.h"
+#include "sde_trace.h"
+
+struct sde_power_client *sde_power_client_create(
+	struct sde_power_handle *phandle, char *client_name)
+{
+	struct sde_power_client *client;
+	static u32 id;
+
+	if (!client_name || !phandle) {
+		pr_err("client name is null or invalid power data\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	client = kzalloc(sizeof(struct sde_power_client), GFP_KERNEL);
+	if (!client)
+		return ERR_PTR(-ENOMEM);
+
+	mutex_lock(&phandle->phandle_lock);
+	strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
+	client->usecase_ndx = VOTE_INDEX_DISABLE;
+	client->id = id;
+	pr_debug("client %s created:%pK id :%d\n", client_name,
+		client, id);
+	id++;
+	list_add(&client->list, &phandle->power_client_clist);
+	mutex_unlock(&phandle->phandle_lock);
+
+	return client;
+}
+
+void sde_power_client_destroy(struct sde_power_handle *phandle,
+	struct sde_power_client *client)
+{
+	if (!client  || !phandle) {
+		pr_err("reg bus vote: invalid client handle\n");
+	} else {
+		pr_debug("bus vote client %s destroyed:%pK id:%u\n",
+			client->name, client, client->id);
+		mutex_lock(&phandle->phandle_lock);
+		list_del_init(&client->list);
+		mutex_unlock(&phandle->phandle_lock);
+		kfree(client);
+	}
+}
+
+static int sde_power_parse_dt_supply(struct platform_device *pdev,
+				struct dss_module_power *mp)
+{
+	int i = 0, rc = 0;
+	u32 tmp = 0;
+	struct device_node *of_node = NULL, *supply_root_node = NULL;
+	struct device_node *supply_node = NULL;
+
+	if (!pdev || !mp) {
+		pr_err("invalid input param pdev:%pK mp:%pK\n", pdev, mp);
+		return -EINVAL;
+	}
+
+	of_node = pdev->dev.of_node;
+
+	mp->num_vreg = 0;
+	supply_root_node = of_get_child_by_name(of_node,
+						"qcom,platform-supply-entries");
+	if (!supply_root_node) {
+		pr_debug("no supply entry present\n");
+		return rc;
+	}
+
+	for_each_child_of_node(supply_root_node, supply_node)
+		mp->num_vreg++;
+
+	if (mp->num_vreg == 0) {
+		pr_debug("no vreg\n");
+		return rc;
+	}
+
+	pr_debug("vreg found. count=%d\n", mp->num_vreg);
+	mp->vreg_config = devm_kzalloc(&pdev->dev, sizeof(struct dss_vreg) *
+						mp->num_vreg, GFP_KERNEL);
+	if (!mp->vreg_config) {
+		rc = -ENOMEM;
+		return rc;
+	}
+
+	for_each_child_of_node(supply_root_node, supply_node) {
+
+		const char *st = NULL;
+
+		rc = of_property_read_string(supply_node,
+						"qcom,supply-name", &st);
+		if (rc) {
+			pr_err("error reading name. rc=%d\n", rc);
+			goto error;
+		}
+
+		strlcpy(mp->vreg_config[i].vreg_name, st,
+					sizeof(mp->vreg_config[i].vreg_name));
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-min-voltage", &tmp);
+		if (rc) {
+			pr_err("error reading min volt. rc=%d\n", rc);
+			goto error;
+		}
+		mp->vreg_config[i].min_voltage = tmp;
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-max-voltage", &tmp);
+		if (rc) {
+			pr_err("error reading max volt. rc=%d\n", rc);
+			goto error;
+		}
+		mp->vreg_config[i].max_voltage = tmp;
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-enable-load", &tmp);
+		if (rc) {
+			pr_err("error reading enable load. rc=%d\n", rc);
+			goto error;
+		}
+		mp->vreg_config[i].enable_load = tmp;
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-disable-load", &tmp);
+		if (rc) {
+			pr_err("error reading disable load. rc=%d\n", rc);
+			goto error;
+		}
+		mp->vreg_config[i].disable_load = tmp;
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-pre-on-sleep", &tmp);
+		if (rc)
+			pr_debug("error reading supply pre sleep value. rc=%d\n",
+							rc);
+
+		mp->vreg_config[i].pre_on_sleep = (!rc ? tmp : 0);
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-pre-off-sleep", &tmp);
+		if (rc)
+			pr_debug("error reading supply pre sleep value. rc=%d\n",
+							rc);
+
+		mp->vreg_config[i].pre_off_sleep = (!rc ? tmp : 0);
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-post-on-sleep", &tmp);
+		if (rc)
+			pr_debug("error reading supply post sleep value. rc=%d\n",
+							rc);
+
+		mp->vreg_config[i].post_on_sleep = (!rc ? tmp : 0);
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-post-off-sleep", &tmp);
+		if (rc)
+			pr_debug("error reading supply post sleep value. rc=%d\n",
+							rc);
+
+		mp->vreg_config[i].post_off_sleep = (!rc ? tmp : 0);
+
+		pr_debug("%s min=%d, max=%d, enable=%d, disable=%d, preonsleep=%d, postonsleep=%d, preoffsleep=%d, postoffsleep=%d\n",
+					mp->vreg_config[i].vreg_name,
+					mp->vreg_config[i].min_voltage,
+					mp->vreg_config[i].max_voltage,
+					mp->vreg_config[i].enable_load,
+					mp->vreg_config[i].disable_load,
+					mp->vreg_config[i].pre_on_sleep,
+					mp->vreg_config[i].post_on_sleep,
+					mp->vreg_config[i].pre_off_sleep,
+					mp->vreg_config[i].post_off_sleep);
+		++i;
+
+		rc = 0;
+	}
+
+	return rc;
+
+error:
+	if (mp->vreg_config) {
+		devm_kfree(&pdev->dev, mp->vreg_config);
+		mp->vreg_config = NULL;
+		mp->num_vreg = 0;
+	}
+
+	return rc;
+}
+
+static int sde_power_parse_dt_clock(struct platform_device *pdev,
+					struct dss_module_power *mp)
+{
+	u32 i = 0, rc = 0;
+	const char *clock_name;
+	u32 clock_rate = 0;
+	u32 clock_max_rate = 0;
+	int num_clk = 0;
+
+	if (!pdev || !mp) {
+		pr_err("invalid input param pdev:%pK mp:%pK\n", pdev, mp);
+		return -EINVAL;
+	}
+
+	mp->num_clk = 0;
+	num_clk = of_property_count_strings(pdev->dev.of_node,
+							"clock-names");
+	if (num_clk <= 0) {
+		pr_debug("clocks are not defined\n");
+		goto clk_err;
+	}
+
+	mp->num_clk = num_clk;
+	mp->clk_config = devm_kzalloc(&pdev->dev,
+			sizeof(struct dss_clk) * num_clk, GFP_KERNEL);
+	if (!mp->clk_config) {
+		rc = -ENOMEM;
+		mp->num_clk = 0;
+		goto clk_err;
+	}
+
+	for (i = 0; i < num_clk; i++) {
+		of_property_read_string_index(pdev->dev.of_node, "clock-names",
+							i, &clock_name);
+		strlcpy(mp->clk_config[i].clk_name, clock_name,
+				sizeof(mp->clk_config[i].clk_name));
+
+		of_property_read_u32_index(pdev->dev.of_node, "clock-rate",
+							i, &clock_rate);
+		mp->clk_config[i].rate = clock_rate;
+
+		if (!clock_rate)
+			mp->clk_config[i].type = DSS_CLK_AHB;
+		else
+			mp->clk_config[i].type = DSS_CLK_PCLK;
+
+		clock_max_rate = 0;
+		of_property_read_u32_index(pdev->dev.of_node, "clock-max-rate",
+							i, &clock_max_rate);
+		mp->clk_config[i].max_rate = clock_max_rate;
+	}
+
+clk_err:
+	return rc;
+}
+
+#ifdef CONFIG_QCOM_BUS_SCALING
+
+#define MAX_AXI_PORT_COUNT 3
+
+static int _sde_power_data_bus_set_quota(
+		struct sde_power_data_bus_handle *pdbus,
+		u64 ab_quota_rt, u64 ab_quota_nrt,
+		u64 ib_quota_rt, u64 ib_quota_nrt)
+{
+	int new_uc_idx;
+	u64 ab_quota[MAX_AXI_PORT_COUNT] = {0, 0};
+	u64 ib_quota[MAX_AXI_PORT_COUNT] = {0, 0};
+	int rc;
+
+	if (pdbus->data_bus_hdl < 1) {
+		pr_err("invalid bus handle %d\n", pdbus->data_bus_hdl);
+		return -EINVAL;
+	}
+
+	if (!ab_quota_rt && !ab_quota_nrt && !ib_quota_rt && !ib_quota_nrt)  {
+		new_uc_idx = 0;
+	} else {
+		int i;
+		struct msm_bus_vectors *vect = NULL;
+		struct msm_bus_scale_pdata *bw_table =
+			pdbus->data_bus_scale_table;
+		u32 nrt_axi_port_cnt = pdbus->nrt_axi_port_cnt;
+		u32 total_axi_port_cnt = pdbus->axi_port_cnt;
+		u32 rt_axi_port_cnt = total_axi_port_cnt - nrt_axi_port_cnt;
+		int match_cnt = 0;
+
+		if (!bw_table || !total_axi_port_cnt ||
+		    total_axi_port_cnt > MAX_AXI_PORT_COUNT) {
+			pr_err("invalid input\n");
+			return -EINVAL;
+		}
+
+		if (pdbus->bus_channels) {
+			ib_quota_rt = div_u64(ib_quota_rt,
+						pdbus->bus_channels);
+			ib_quota_nrt = div_u64(ib_quota_nrt,
+						pdbus->bus_channels);
+		}
+
+		if (nrt_axi_port_cnt) {
+
+			ab_quota_rt = div_u64(ab_quota_rt, rt_axi_port_cnt);
+			ab_quota_nrt = div_u64(ab_quota_nrt, nrt_axi_port_cnt);
+
+			for (i = 0; i < total_axi_port_cnt; i++) {
+				if (i < rt_axi_port_cnt) {
+					ab_quota[i] = ab_quota_rt;
+					ib_quota[i] = ib_quota_rt;
+				} else {
+					ab_quota[i] = ab_quota_nrt;
+					ib_quota[i] = ib_quota_nrt;
+				}
+			}
+		} else {
+			ab_quota[0] = div_u64(ab_quota_rt + ab_quota_nrt,
+					total_axi_port_cnt);
+			ib_quota[0] = ib_quota_rt + ib_quota_nrt;
+
+			for (i = 1; i < total_axi_port_cnt; i++) {
+				ab_quota[i] = ab_quota[0];
+				ib_quota[i] = ib_quota[0];
+			}
+		}
+
+		for (i = 0; i < total_axi_port_cnt; i++) {
+			vect = &bw_table->usecase
+				[pdbus->curr_bw_uc_idx].vectors[i];
+			/* avoid performing updates for small changes */
+			if ((ab_quota[i] == vect->ab) &&
+				(ib_quota[i] == vect->ib))
+				match_cnt++;
+		}
+
+		if (match_cnt == total_axi_port_cnt) {
+			pr_debug("skip BW vote\n");
+			return 0;
+		}
+
+		new_uc_idx = (pdbus->curr_bw_uc_idx %
+			(bw_table->num_usecases - 1)) + 1;
+
+		for (i = 0; i < total_axi_port_cnt; i++) {
+			vect = &bw_table->usecase[new_uc_idx].vectors[i];
+			vect->ab = ab_quota[i];
+			vect->ib = ib_quota[i];
+
+			pr_debug("uc_idx=%d %s path idx=%d ab=%llu ib=%llu\n",
+				new_uc_idx, (i < rt_axi_port_cnt) ? "rt" : "nrt"
+				, i, vect->ab, vect->ib);
+		}
+	}
+	pdbus->curr_bw_uc_idx = new_uc_idx;
+	pdbus->ao_bw_uc_idx = new_uc_idx;
+
+	if ((pdbus->bus_ref_cnt == 0) && pdbus->curr_bw_uc_idx) {
+		rc = 0;
+	} else { /* vote BW if bus_bw_cnt > 0 or uc_idx is zero */
+		SDE_ATRACE_BEGIN("msm_bus_scale_req");
+		rc = msm_bus_scale_client_update_request(pdbus->data_bus_hdl,
+			new_uc_idx);
+		SDE_ATRACE_END("msm_bus_scale_req");
+	}
+	return rc;
+}
+
+int sde_power_data_bus_set_quota(struct sde_power_handle *phandle,
+		struct sde_power_client *pclient,
+		int bus_client, u64 ab_quota, u64 ib_quota)
+{
+	int rc = 0;
+	int i;
+	u64 total_ab_rt = 0, total_ib_rt = 0;
+	u64 total_ab_nrt = 0, total_ib_nrt = 0;
+	struct sde_power_client *client;
+
+	if (!phandle || !pclient ||
+			bus_client >= SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX) {
+		pr_err("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&phandle->phandle_lock);
+
+	pclient->ab[bus_client] = ab_quota;
+	pclient->ib[bus_client] = ib_quota;
+	trace_sde_perf_update_bus(bus_client, ab_quota, ib_quota);
+
+	list_for_each_entry(client, &phandle->power_client_clist, list) {
+		for (i = 0; i < SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX; i++) {
+			if (i == SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT) {
+				total_ab_nrt += client->ab[i];
+				total_ib_nrt += client->ib[i];
+			} else {
+				total_ab_rt += client->ab[i];
+				total_ib_rt = max(total_ib_rt, client->ib[i]);
+			}
+		}
+	}
+
+	rc = _sde_power_data_bus_set_quota(&phandle->data_bus_handle,
+			total_ab_rt, total_ab_nrt,
+			total_ib_rt, total_ib_nrt);
+
+	mutex_unlock(&phandle->phandle_lock);
+
+	return rc;
+}
+
+static void sde_power_data_bus_unregister(
+		struct sde_power_data_bus_handle *pdbus)
+{
+	if (pdbus->data_bus_hdl) {
+		msm_bus_scale_unregister_client(pdbus->data_bus_hdl);
+		pdbus->data_bus_hdl = 0;
+	}
+}
+
+static int sde_power_data_bus_parse(struct platform_device *pdev,
+	struct sde_power_data_bus_handle *pdbus)
+{
+	struct device_node *node;
+	int rc = 0;
+	int paths;
+
+	pdbus->bus_channels = 1;
+	rc = of_property_read_u32(pdev->dev.of_node,
+		"qcom,sde-dram-channels", &pdbus->bus_channels);
+	if (rc) {
+		pr_debug("number of channels property not specified\n");
+		rc = 0;
+	}
+
+	pdbus->nrt_axi_port_cnt = 0;
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,sde-num-nrt-paths",
+			&pdbus->nrt_axi_port_cnt);
+	if (rc) {
+		pr_debug("number of axi port property not specified\n");
+		rc = 0;
+	}
+
+	node = of_get_child_by_name(pdev->dev.of_node, "qcom,sde-data-bus");
+	if (node) {
+		rc = of_property_read_u32(node,
+				"qcom,msm-bus,num-paths", &paths);
+		if (rc) {
+			pr_err("Error. qcom,msm-bus,num-paths not found\n");
+			return rc;
+		}
+		pdbus->axi_port_cnt = paths;
+
+		pdbus->data_bus_scale_table =
+				msm_bus_pdata_from_node(pdev, node);
+		if (IS_ERR_OR_NULL(pdbus->data_bus_scale_table)) {
+			pr_err("reg bus handle parsing failed\n");
+			rc = PTR_ERR(pdbus->data_bus_scale_table);
+			goto end;
+		}
+		pdbus->data_bus_hdl = msm_bus_scale_register_client(
+				pdbus->data_bus_scale_table);
+		if (!pdbus->data_bus_hdl) {
+			pr_err("data_bus_client register failed\n");
+			rc = -EINVAL;
+			goto end;
+		}
+		pr_debug("register data_bus_hdl=%x\n", pdbus->data_bus_hdl);
+
+		/*
+		 * Following call will not result in actual vote rather update
+		 * the current index and ab/ib value. When continuous splash
+		 * is enabled, actual vote will happen when splash handoff is
+		 * done.
+		 */
+		return _sde_power_data_bus_set_quota(pdbus,
+				SDE_POWER_HANDLE_DATA_BUS_AB_QUOTA,
+				SDE_POWER_HANDLE_DATA_BUS_AB_QUOTA,
+				SDE_POWER_HANDLE_DATA_BUS_IB_QUOTA,
+				SDE_POWER_HANDLE_DATA_BUS_IB_QUOTA);
+	}
+
+end:
+	return rc;
+}
+
+static int sde_power_reg_bus_parse(struct platform_device *pdev,
+	struct sde_power_handle *phandle)
+{
+	struct device_node *node;
+	struct msm_bus_scale_pdata *bus_scale_table;
+	int rc = 0;
+
+	node = of_get_child_by_name(pdev->dev.of_node, "qcom,sde-reg-bus");
+	if (node) {
+		bus_scale_table = msm_bus_pdata_from_node(pdev, node);
+		if (IS_ERR_OR_NULL(bus_scale_table)) {
+			pr_err("reg bus handle parsing failed\n");
+			rc = PTR_ERR(bus_scale_table);
+			goto end;
+		}
+		phandle->reg_bus_hdl = msm_bus_scale_register_client(
+			      bus_scale_table);
+		if (!phandle->reg_bus_hdl) {
+			pr_err("reg_bus_client register failed\n");
+			rc = -EINVAL;
+			goto end;
+		}
+		pr_debug("register reg_bus_hdl=%x\n", phandle->reg_bus_hdl);
+	}
+
+end:
+	return rc;
+}
+
+static void sde_power_reg_bus_unregister(u32 reg_bus_hdl)
+{
+	if (reg_bus_hdl)
+		msm_bus_scale_unregister_client(reg_bus_hdl);
+}
+
+static int sde_power_reg_bus_update(u32 reg_bus_hdl, u32 usecase_ndx)
+{
+	int rc = 0;
+
+	if (reg_bus_hdl)
+		rc = msm_bus_scale_client_update_request(reg_bus_hdl,
+								usecase_ndx);
+	if (rc)
+		pr_err("failed to set reg bus vote rc=%d\n", rc);
+
+	return rc;
+}
+#else
+static int sde_power_data_bus_parse(struct platform_device *pdev,
+	struct sde_power_data_bus_handle *pdbus)
+{
+	return 0;
+}
+
+static void sde_power_data_bus_unregister(
+		struct sde_power_data_bus_handle *pdbus)
+{
+}
+
+int sde_power_data_bus_set_quota(struct sde_power_handle *phandle,
+		struct sde_power_client *pclient,
+		int bus_client, u64 ab_quota, u64 ib_quota)
+{
+	return 0;
+}
+
+static int sde_power_reg_bus_parse(struct platform_device *pdev,
+	struct sde_power_handle *phandle)
+{
+	return 0;
+}
+
+static void sde_power_reg_bus_unregister(u32 reg_bus_hdl)
+{
+}
+
+static int sde_power_reg_bus_update(u32 reg_bus_hdl, u32 usecase_ndx)
+{
+	return 0;
+}
+#endif
+
+void sde_power_data_bus_bandwidth_ctrl(struct sde_power_handle *phandle,
+		struct sde_power_client *pclient, int enable)
+{
+	struct sde_power_data_bus_handle *pdbus;
+	int changed = 0;
+
+	if (!phandle || !pclient) {
+		pr_err("invalid power/client handle\n");
+		return;
+	}
+
+	pdbus = &phandle->data_bus_handle;
+
+	mutex_lock(&phandle->phandle_lock);
+	if (enable) {
+		if (pdbus->bus_ref_cnt == 0)
+			changed++;
+		pdbus->bus_ref_cnt++;
+	} else {
+		if (pdbus->bus_ref_cnt) {
+			pdbus->bus_ref_cnt--;
+			if (pdbus->bus_ref_cnt == 0)
+				changed++;
+		} else {
+			pr_debug("Can not be turned off\n");
+		}
+	}
+
+	pr_debug("%pS: task:%s bw_cnt=%d changed=%d enable=%d\n",
+		__builtin_return_address(0), current->group_leader->comm,
+		pdbus->bus_ref_cnt, changed, enable);
+
+	if (changed) {
+		SDE_ATRACE_INT("data_bus_ctrl", enable);
+
+		if (!enable) {
+			if (!pdbus->handoff_pending) {
+				msm_bus_scale_client_update_request(
+						pdbus->data_bus_hdl, 0);
+				pdbus->ao_bw_uc_idx = 0;
+			}
+		} else {
+			msm_bus_scale_client_update_request(
+					pdbus->data_bus_hdl,
+					pdbus->curr_bw_uc_idx);
+		}
+	}
+
+	mutex_unlock(&phandle->phandle_lock);
+}
+
+int sde_power_resource_init(struct platform_device *pdev,
+	struct sde_power_handle *phandle)
+{
+	int rc = 0;
+	struct dss_module_power *mp;
+
+	if (!phandle || !pdev) {
+		pr_err("invalid input param\n");
+		rc = -EINVAL;
+		goto end;
+	}
+	mp = &phandle->mp;
+	phandle->dev = &pdev->dev;
+
+	rc = sde_power_parse_dt_clock(pdev, mp);
+	if (rc) {
+		pr_err("device clock parsing failed\n");
+		goto end;
+	}
+
+	rc = sde_power_parse_dt_supply(pdev, mp);
+	if (rc) {
+		pr_err("device vreg supply parsing failed\n");
+		goto parse_vreg_err;
+	}
+
+	rc = msm_dss_config_vreg(&pdev->dev,
+				mp->vreg_config, mp->num_vreg, 1);
+	if (rc) {
+		pr_err("vreg config failed rc=%d\n", rc);
+		goto vreg_err;
+	}
+
+	rc = msm_dss_get_clk(&pdev->dev, mp->clk_config, mp->num_clk);
+	if (rc) {
+		pr_err("clock get failed rc=%d\n", rc);
+		goto clk_err;
+	}
+
+	rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk);
+	if (rc) {
+		pr_err("clock set rate failed rc=%d\n", rc);
+		goto bus_err;
+	}
+
+	rc = sde_power_reg_bus_parse(pdev, phandle);
+	if (rc) {
+		pr_err("register bus parse failed rc=%d\n", rc);
+		goto bus_err;
+	}
+
+	rc = sde_power_data_bus_parse(pdev, &phandle->data_bus_handle);
+	if (rc) {
+		pr_err("register data bus parse failed rc=%d\n", rc);
+		goto data_bus_err;
+	}
+
+	INIT_LIST_HEAD(&phandle->power_client_clist);
+	mutex_init(&phandle->phandle_lock);
+
+	return rc;
+
+data_bus_err:
+	sde_power_reg_bus_unregister(phandle->reg_bus_hdl);
+bus_err:
+	msm_dss_put_clk(mp->clk_config, mp->num_clk);
+clk_err:
+	msm_dss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg, 0);
+vreg_err:
+	devm_kfree(&pdev->dev, mp->vreg_config);
+	mp->num_vreg = 0;
+parse_vreg_err:
+	devm_kfree(&pdev->dev, mp->clk_config);
+	mp->num_clk = 0;
+end:
+	return rc;
+}
+
+void sde_power_resource_deinit(struct platform_device *pdev,
+	struct sde_power_handle *phandle)
+{
+	struct dss_module_power *mp;
+
+	if (!phandle || !pdev) {
+		pr_err("invalid input param\n");
+		return;
+	}
+	mp = &phandle->mp;
+
+	sde_power_data_bus_unregister(&phandle->data_bus_handle);
+
+	sde_power_reg_bus_unregister(phandle->reg_bus_hdl);
+
+	msm_dss_put_clk(mp->clk_config, mp->num_clk);
+
+	msm_dss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg, 0);
+
+	if (mp->clk_config)
+		devm_kfree(&pdev->dev, mp->clk_config);
+
+	if (mp->vreg_config)
+		devm_kfree(&pdev->dev, mp->vreg_config);
+
+	mp->num_vreg = 0;
+	mp->num_clk = 0;
+}
+
+int sde_power_resource_enable(struct sde_power_handle *phandle,
+	struct sde_power_client *pclient, bool enable)
+{
+	int rc = 0;
+	bool changed = false;
+	u32 max_usecase_ndx = VOTE_INDEX_DISABLE, prev_usecase_ndx;
+	struct sde_power_client *client;
+	struct dss_module_power *mp;
+
+	if (!phandle || !pclient) {
+		pr_err("invalid input argument\n");
+		return -EINVAL;
+	}
+
+	mp = &phandle->mp;
+
+	mutex_lock(&phandle->phandle_lock);
+	if (enable)
+		pclient->refcount++;
+	else if (pclient->refcount)
+		pclient->refcount--;
+
+	if (pclient->refcount)
+		pclient->usecase_ndx = VOTE_INDEX_LOW;
+	else
+		pclient->usecase_ndx = VOTE_INDEX_DISABLE;
+
+	list_for_each_entry(client, &phandle->power_client_clist, list) {
+		if (client->usecase_ndx < VOTE_INDEX_MAX &&
+		    client->usecase_ndx > max_usecase_ndx)
+			max_usecase_ndx = client->usecase_ndx;
+	}
+
+	if (phandle->current_usecase_ndx != max_usecase_ndx) {
+		changed = true;
+		prev_usecase_ndx = phandle->current_usecase_ndx;
+		phandle->current_usecase_ndx = max_usecase_ndx;
+	}
+
+	pr_debug("%pS: changed=%d current idx=%d request client %s id:%u enable:%d refcount:%d\n",
+		__builtin_return_address(0), changed, max_usecase_ndx,
+		pclient->name, pclient->id, enable, pclient->refcount);
+
+	if (!changed)
+		goto end;
+
+	if (enable) {
+		rc = msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, enable);
+		if (rc) {
+			pr_err("failed to enable vregs rc=%d\n", rc);
+			goto vreg_err;
+		}
+
+		rc = sde_power_reg_bus_update(phandle->reg_bus_hdl,
+							max_usecase_ndx);
+		if (rc) {
+			pr_err("failed to set reg bus vote rc=%d\n", rc);
+			goto reg_bus_hdl_err;
+		}
+
+		rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
+		if (rc) {
+			pr_err("clock enable failed rc:%d\n", rc);
+			goto clk_err;
+		}
+	} else {
+		msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
+
+		sde_power_reg_bus_update(phandle->reg_bus_hdl,
+							max_usecase_ndx);
+
+		msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, enable);
+	}
+
+end:
+	mutex_unlock(&phandle->phandle_lock);
+	return rc;
+
+clk_err:
+	sde_power_reg_bus_update(phandle->reg_bus_hdl, prev_usecase_ndx);
+reg_bus_hdl_err:
+	msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, 0);
+vreg_err:
+	phandle->current_usecase_ndx = prev_usecase_ndx;
+	mutex_unlock(&phandle->phandle_lock);
+	return rc;
+}
+
+int sde_power_clk_set_rate(struct sde_power_handle *phandle, char *clock_name,
+	u64 rate)
+{
+	int i, rc = -EINVAL;
+	struct dss_module_power *mp;
+
+	if (!phandle) {
+		pr_err("invalid input power handle\n");
+		return -EINVAL;
+	}
+	mp = &phandle->mp;
+
+	for (i = 0; i < mp->num_clk; i++) {
+		if (!strcmp(mp->clk_config[i].clk_name, clock_name)) {
+			if (mp->clk_config[i].max_rate &&
+					(rate > mp->clk_config[i].max_rate))
+				rate = mp->clk_config[i].max_rate;
+
+			mp->clk_config[i].rate = rate;
+			rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk);
+			break;
+		}
+	}
+
+	return rc;
+}
+
+u64 sde_power_clk_get_rate(struct sde_power_handle *phandle, char *clock_name)
+{
+	int i;
+	struct dss_module_power *mp;
+	u64 rate = -EINVAL;
+
+	if (!phandle) {
+		pr_err("invalid input power handle\n");
+		return -EINVAL;
+	}
+	mp = &phandle->mp;
+
+	for (i = 0; i < mp->num_clk; i++) {
+		if (!strcmp(mp->clk_config[i].clk_name, clock_name)) {
+			rate = clk_get_rate(mp->clk_config[i].clk);
+			break;
+		}
+	}
+
+	return rate;
+}
+
+u64 sde_power_clk_get_max_rate(struct sde_power_handle *phandle,
+		char *clock_name)
+{
+	int i;
+	struct dss_module_power *mp;
+	u64 rate = 0;
+
+	if (!phandle) {
+		pr_err("invalid input power handle\n");
+		return 0;
+	}
+	mp = &phandle->mp;
+
+	for (i = 0; i < mp->num_clk; i++) {
+		if (!strcmp(mp->clk_config[i].clk_name, clock_name)) {
+			rate = mp->clk_config[i].max_rate;
+			break;
+		}
+	}
+
+	return rate;
+}
+
+struct clk *sde_power_clk_get_clk(struct sde_power_handle *phandle,
+		char *clock_name)
+{
+	int i;
+	struct dss_module_power *mp;
+	struct clk *clk = NULL;
+
+	if (!phandle) {
+		pr_err("invalid input power handle\n");
+		return 0;
+	}
+	mp = &phandle->mp;
+
+	for (i = 0; i < mp->num_clk; i++) {
+		if (!strcmp(mp->clk_config[i].clk_name, clock_name)) {
+			clk = mp->clk_config[i].clk;
+			break;
+		}
+	}
+
+	return clk;
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm/sde_power_handle.h	2019-01-22 16:16:23.527246624 +0100
@@ -0,0 +1,229 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SDE_POWER_HANDLE_H_
+#define _SDE_POWER_HANDLE_H_
+
+#define MAX_CLIENT_NAME_LEN 128
+
+#define SDE_POWER_HANDLE_DATA_BUS_IB_QUOTA 2000000000
+#define SDE_POWER_HANDLE_DATA_BUS_AB_QUOTA 2000000000
+
+/**
+ * mdss_bus_vote_type: register bus vote type
+ * VOTE_INDEX_DISABLE: removes the client vote
+ * VOTE_INDEX_LOW: keeps the lowest vote for register bus
+ * VOTE_INDEX_MAX: invalid
+ */
+enum mdss_bus_vote_type {
+	VOTE_INDEX_DISABLE,
+	VOTE_INDEX_LOW,
+	VOTE_INDEX_MAX,
+};
+
+/**
+ * enum sde_power_handle_data_bus_client - type of axi bus clients
+ * @SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT: core real-time bus client
+ * @SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT: core non-real-time bus client
+ * @SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX: maximum number of bus client type
+ */
+enum sde_power_handle_data_bus_client {
+	SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT,
+	SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
+	SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX
+};
+
+/**
+ * struct sde_power_client: stores the power client for sde driver
+ * @name:	name of the client
+ * @usecase_ndx: current regs bus vote type
+ * @refcount:	current refcount if multiple modules are using same
+ *              same client for enable/disable. Power module will
+ *              aggregate the refcount and vote accordingly for this
+ *              client.
+ * @id:		assigned during create. helps for debugging.
+ * @list:	list to attach power handle master list
+ * @ab:         arbitrated bandwidth for each bus client
+ * @ib:         instantaneous bandwidth for each bus client
+ */
+struct sde_power_client {
+	char name[MAX_CLIENT_NAME_LEN];
+	short usecase_ndx;
+	short refcount;
+	u32 id;
+	struct list_head list;
+	u64 ab[SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
+	u64 ib[SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
+};
+
+/**
+ * struct sde_power_data_handle: power handle struct for data bus
+ * @data_bus_scale_table: pointer to bus scaling table
+ * @data_bus_hdl: current data bus handle
+ * @axi_port_cnt: number of rt axi ports
+ * @nrt_axi_port_cnt: number of nrt axi ports
+ * @bus_channels: number of memory bus channels
+ * @curr_bw_uc_idx: current use case index of data bus
+ * @ao_bw_uc_idx: active only use case index of data bus
+ * @bus_ref_cnt: reference count of data bus enable request
+ * @handoff_pending: True to indicate if bootloader hand-over is pending
+ */
+struct sde_power_data_bus_handle {
+	struct msm_bus_scale_pdata *data_bus_scale_table;
+	u32 data_bus_hdl;
+	u32 axi_port_cnt;
+	u32 nrt_axi_port_cnt;
+	u32 bus_channels;
+	u32 curr_bw_uc_idx;
+	u32 ao_bw_uc_idx;
+	u32 bus_ref_cnt;
+	int handoff_pending;
+};
+
+/**
+ * struct sde_power_handle: power handle main struct
+ * @mp:		module power for clock and regulator
+ * @client_clist: master list to store all clients
+ * @phandle_lock: lock to synchronize the enable/disable
+ * @dev: pointer to device structure
+ * @usecase_ndx: current usecase index
+ * @reg_bus_hdl: current register bus handle
+ * @data_bus_handle: context structure for data bus control
+ */
+struct sde_power_handle {
+	struct dss_module_power mp;
+	struct list_head power_client_clist;
+	struct mutex phandle_lock;
+	struct device *dev;
+	u32 current_usecase_ndx;
+	u32 reg_bus_hdl;
+	struct sde_power_data_bus_handle data_bus_handle;
+};
+
+/**
+ * sde_power_resource_init() - initializes the sde power handle
+ * @pdev:   platform device to search the power resources
+ * @pdata:  power handle to store the power resources
+ *
+ * Return: error code.
+ */
+int sde_power_resource_init(struct platform_device *pdev,
+	struct sde_power_handle *pdata);
+
+/**
+ * sde_power_resource_deinit() - release the sde power handle
+ * @pdev:   platform device for power resources
+ * @pdata:  power handle containing the resources
+ *
+ * Return: error code.
+ */
+void sde_power_resource_deinit(struct platform_device *pdev,
+	struct sde_power_handle *pdata);
+
+/**
+ * sde_power_client_create() - create the client on power handle
+ * @pdata:  power handle containing the resources
+ * @client_name: new client name for registration
+ *
+ * Return: error code.
+ */
+struct sde_power_client *sde_power_client_create(struct sde_power_handle *pdata,
+	char *client_name);
+
+/**
+ * sde_power_client_destroy() - destroy the client on power handle
+ * @pdata:  power handle containing the resources
+ * @client_name: new client name for registration
+ *
+ * Return: none
+ */
+void sde_power_client_destroy(struct sde_power_handle *phandle,
+	struct sde_power_client *client);
+
+/**
+ * sde_power_resource_enable() - enable/disable the power resources
+ * @pdata:  power handle containing the resources
+ * @client: client information to enable/disable its vote
+ * @enable: boolean request for enable/disable
+ *
+ * Return: error code.
+ */
+int sde_power_resource_enable(struct sde_power_handle *pdata,
+	struct sde_power_client *pclient, bool enable);
+
+/**
+ * sde_power_clk_set_rate() - set the clock rate
+ * @pdata:  power handle containing the resources
+ * @clock_name: clock name which needs rate update.
+ * @rate:       Requested rate.
+ *
+ * Return: error code.
+ */
+int sde_power_clk_set_rate(struct sde_power_handle *pdata, char *clock_name,
+	u64 rate);
+
+/**
+ * sde_power_clk_get_rate() - get the clock rate
+ * @pdata:  power handle containing the resources
+ * @clock_name: clock name to get the rate
+ *
+ * Return: current clock rate
+ */
+u64 sde_power_clk_get_rate(struct sde_power_handle *pdata, char *clock_name);
+
+/**
+ * sde_power_clk_get_max_rate() - get the maximum clock rate
+ * @pdata:  power handle containing the resources
+ * @clock_name: clock name to get the max rate.
+ *
+ * Return: maximum clock rate or 0 if not found.
+ */
+u64 sde_power_clk_get_max_rate(struct sde_power_handle *pdata,
+		char *clock_name);
+
+/**
+ * sde_power_clk_get_clk() - get the clock
+ * @pdata:  power handle containing the resources
+ * @clock_name: clock name to get the clk pointer.
+ *
+ * Return: Pointer to clock
+ */
+struct clk *sde_power_clk_get_clk(struct sde_power_handle *phandle,
+		char *clock_name);
+
+/**
+ * sde_power_data_bus_set_quota() - set data bus quota for power client
+ * @phandle:  power handle containing the resources
+ * @client: client information to set quota
+ * @bus_client: real-time or non-real-time bus client
+ * @ab_quota: arbitrated bus bandwidth
+ * @ib_quota: instantaneous bus bandwidth
+ *
+ * Return: zero if success, or error code otherwise
+ */
+int sde_power_data_bus_set_quota(struct sde_power_handle *phandle,
+		struct sde_power_client *pclient,
+		int bus_client, u64 ab_quota, u64 ib_quota);
+
+/**
+ * sde_power_data_bus_bandwidth_ctrl() - control data bus bandwidth enable
+ * @phandle:  power handle containing the resources
+ * @client: client information to bandwidth control
+ * @enable: true to enable bandwidth for data base
+ *
+ * Return: none
+ */
+void sde_power_data_bus_bandwidth_ctrl(struct sde_power_handle *phandle,
+		struct sde_power_client *pclient, int enable);
+
+#endif /* _SDE_POWER_HANDLE_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm-hyp./Kconfig linux-4.4.115-fbx/drivers/gpu/drm/msm-hyp/Kconfig
--- linux-4.4.115-fbx/drivers/gpu/drm/msm-hyp./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm-hyp/Kconfig	2019-10-29 09:26:23.621202963 +0100
@@ -0,0 +1,15 @@
+#
+# Drm MSM hypervisor configuration
+#
+# This driver provides support for the User Space DRM Masters
+#
+config DRM_MSM_HYP
+	tristate "MSM DRM HYP"
+	depends on DRM
+	depends on MSM_GVM_QUIN
+	depends on OF
+	default y
+	help
+	  DRM/KMS driver for MSM/snapdragon in Guest VM mode. This driver registers
+	  with DRM framework to create /dev/dri/card0 path and issue events to User
+	  Space listeners.
diff -Nruw linux-4.4.115-fbx/drivers/gpu/drm/msm-hyp./Makefile linux-4.4.115-fbx/drivers/gpu/drm/msm-hyp/Makefile
--- linux-4.4.115-fbx/drivers/gpu/drm/msm-hyp./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/drm/msm-hyp/Makefile	2019-01-22 16:16:23.479246189 +0100
@@ -0,0 +1,4 @@
+ccflags-y := -Iinclude/drm
+
+obj-y := \
+	msm_drv_hyp.o
diff -Nruw linux-4.4.115-fbx/drivers/gpu/msm./Kconfig linux-4.4.115-fbx/drivers/gpu/msm/Kconfig
--- linux-4.4.115-fbx/drivers/gpu/msm./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/msm/Kconfig	2019-01-22 16:16:23.711248290 +0100
@@ -0,0 +1,47 @@
+config QCOM_KGSL
+	tristate "Qualcomm 3D Graphics driver"
+	default n
+	depends on ARCH_QCOM
+	select GENERIC_ALLOCATOR
+	select FW_LOADER
+	select PM_DEVFREQ
+	select DEVFREQ_GOV_SIMPLE_ONDEMAND
+	select DEVFREQ_GOV_PERFORMANCE
+	select DEVFREQ_GOV_QCOM_ADRENO_TZ
+	select DEVFREQ_GOV_QCOM_GPUBW_MON
+	select ONESHOT_SYNC if SYNC
+	---help---
+	  3D graphics driver for the Adreno family of GPUs from Qualcomm.
+	  Required to use hardware accelerated OpenGL and compute on Qualcomm
+	  targets.
+
+config QCOM_KGSL_CFF_DUMP
+	bool "Enable KGSL Common File Format (CFF) Dump Feature [Use with caution]"
+	default n
+	depends on QCOM_KGSL
+	select RELAY
+	---help---
+	  This is an analysis and diagnostic feature only, and should only be
+	  turned on during KGSL GPU diagnostics and will slow down the KGSL
+	  performance sigificantly, hence *do not use in production builds*.
+	  When enabled, CFF Dump is on at boot. It can be turned off at runtime
+	  via 'echo 0 > /d/kgsl/cff_dump'.  The log can be captured via
+	  /d/kgsl-cff/cpu[0|1].
+
+config QCOM_KGSL_CFF_DUMP_NO_CONTEXT_MEM_DUMP
+	bool "When selected will disable KGSL CFF Dump for context switches"
+	default n
+	depends on QCOM_KGSL_CFF_DUMP
+	---help---
+	  Dumping all the memory for every context switch can produce quite
+	  huge log files, to reduce this, turn this feature on.
+
+config QCOM_ADRENO_DEFAULT_GOVERNOR
+	string "devfreq governor for the adreno core"
+	default "msm-adreno-tz" if DEVFREQ_GOV_QCOM_ADRENO_TZ
+	default "simple_ondemand"
+	depends on QCOM_KGSL
+
+config QCOM_KGSL_IOMMU
+	bool
+	default y if QCOM_KGSL && (MSM_IOMMU || ARM_SMMU)
diff -Nruw linux-4.4.115-fbx/drivers/gpu/msm./Makefile linux-4.4.115-fbx/drivers/gpu/msm/Makefile
--- linux-4.4.115-fbx/drivers/gpu/msm./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/gpu/msm/Makefile	2019-01-22 16:16:23.711248290 +0100
@@ -0,0 +1,55 @@
+ccflags-y := -Idrivers/staging/android
+
+msm_kgsl_core-y = \
+	kgsl.o \
+	kgsl_trace.o \
+	kgsl_drawobj.o \
+	kgsl_ioctl.o \
+	kgsl_sharedmem.o \
+	kgsl_pwrctrl.o \
+	kgsl_pwrscale.o \
+	kgsl_mmu.o \
+	kgsl_snapshot.o \
+	kgsl_events.o \
+	kgsl_pool.o
+
+CFLAGS_kgsl_trace.o = -I$(src)
+
+msm_kgsl_core-$(CONFIG_QCOM_KGSL_IOMMU) += kgsl_iommu.o
+msm_kgsl_core-$(CONFIG_DEBUG_FS) += kgsl_debugfs.o
+msm_kgsl_core-$(CONFIG_QCOM_KGSL_CFF_DUMP) += kgsl_cffdump.o
+msm_kgsl_core-$(CONFIG_SYNC) += kgsl_sync.o
+msm_kgsl_core-$(CONFIG_COMPAT) += kgsl_compat.o
+
+msm_adreno-y += \
+	adreno_ioctl.o \
+	adreno_ringbuffer.o \
+	adreno_drawctxt.o \
+	adreno_dispatch.o \
+	adreno_snapshot.o \
+	adreno_coresight.o \
+	adreno_trace.o \
+	adreno_a3xx.o \
+	adreno_a4xx.o \
+	adreno_a5xx.o \
+	adreno_a3xx_snapshot.o \
+	adreno_a4xx_snapshot.o \
+	adreno_a5xx_snapshot.o \
+	adreno_a4xx_preempt.o \
+	adreno_a5xx_preempt.o \
+	adreno_sysfs.o \
+	adreno.o \
+	adreno_cp_parser.o \
+	adreno_perfcounter.o
+
+CFLAGS_adreno_trace.o = -I$(src)
+
+msm_adreno-$(CONFIG_QCOM_KGSL_IOMMU) += adreno_iommu.o
+msm_adreno-$(CONFIG_DEBUG_FS) += adreno_debugfs.o adreno_profile.o
+msm_adreno-$(CONFIG_COMPAT) += adreno_compat.o
+
+msm_kgsl_core-objs = $(msm_kgsl_core-y)
+msm_adreno-objs = $(msm_adreno-y)
+
+obj-$(CONFIG_QCOM_KGSL) += msm_kgsl_core.o
+obj-$(CONFIG_QCOM_KGSL) += msm_adreno.o
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/hid/hid-fbx-remote-audio.c	2019-01-22 16:16:23.735248507 +0100
@@ -0,0 +1,925 @@
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+
+#include <sound/initval.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+
+#include "hid-ids.h"
+
+/*
+ * report id for freebox BT remote
+ */
+enum {
+	REMOTE_REPORT_KEYS = 1,
+	REMOTE_REPORT_AUDIO_DATA,
+	REMOTE_REPORT_AUDIO_ENABLE,
+	REMOTE_REPORT_AUDIO_CONFIG,
+	REMOTE_REPORT_BATTERY = 0x20,
+};
+
+/*
+ * alsa period size (frames unit), this is used as a jitter buffer, we
+ * don't actually start sending elapsed interrupt until we have at
+ * least a period worth of data from BT
+ *
+ * make sure rate is a multiple of it (8000 % PERIOD_SIZE == 0)
+ */
+#define PERIOD_SIZE		800
+
+/*
+ * alsa buffer size (frames unit)
+ */
+#define BUFFER_SIZE		(PERIOD_SIZE * 8)
+
+#define REPEAT_LAST_FRAME
+
+#define TMP_SAMPLE_COUNT_MAX 200
+
+/*
+ * BT data internal ring buffer size
+ */
+#define BT_RING_SIZE		(8000 * 2 * sizeof (s16))
+
+
+struct adpcm_state {
+	s16	reference;
+	u8	index;
+};
+
+struct ring_buffer {
+	u8		*data;
+	unsigned int	size;
+	u8		*start;
+	u8		*end;
+};
+
+struct fbx_remote {
+	/* report used to enable/disable audio */
+	struct hid_report *enable_report;
+
+	/* connect time */
+	unsigned long connect_time;
+
+	/* alsa device, registered at probe time */
+	struct snd_card *card;
+	struct snd_pcm *pcm;
+
+	/* timer used to report period interrupts */
+	struct timer_list pcm_timer;
+	bool buffering;
+
+	/* virtual hardward pointer in dma buffer */
+	snd_pcm_uframes_t pcm_cur_pos;
+
+	/* r->substream is made visible when it is safe to push data
+	 * into it, only BT data handler uses it */
+	spinlock_t pcm_lock;
+	struct snd_pcm_substream *substream;
+
+	/* link to associated hid dev */
+	struct hid_device *hdev;
+
+	/* temp buffer to do ADPCM => PCM */
+	struct ring_buffer bt_pcm_ring;
+	s16 bt_pcm_tmp_data[TMP_SAMPLE_COUNT_MAX];
+	s16 last_seqno;
+};
+
+/*
+ * ring buffer helper
+ */
+static int ring_init(struct ring_buffer *rb, size_t size)
+{
+	rb->data = vmalloc(size);
+	if (!rb->data)
+                return 1;
+
+        rb->start = rb->end = rb->data;
+        rb->size = size;
+
+        return 0;
+}
+
+/*
+ * release ring buffer memory
+ */
+static void ring_release(struct ring_buffer *rb)
+{
+        vfree(rb->data);
+        rb->data = NULL;
+}
+
+/*
+ * empty ring buffer
+ */
+static void ring_discard(struct ring_buffer *rb)
+{
+        rb->start = rb->end = rb->data;
+}
+
+/*
+ * return amount of data in ring buffer
+ */
+static unsigned int ring_data_size(const struct ring_buffer *rb)
+{
+        if (rb->start > rb->end)
+                return rb->size - (rb->start - rb->end);
+        else
+                return rb->end - rb->start;
+}
+
+/*
+ * return room available in ring buffer
+ */
+static unsigned int ring_room(const struct ring_buffer *rb)
+{
+        if (rb->start > rb->end)
+                return rb->start - rb->end - 1;
+        else
+                return rb->size - (rb->end - rb->start) - 1;
+}
+
+/*
+ * return amount of linear room in ring buffer
+ */
+static unsigned int ring_linear_room(const struct ring_buffer *rb)
+{
+	unsigned int room;
+
+	if (rb->start > rb->end)
+		return rb->start - rb->end - 1;
+
+	room = rb->data + rb->size - rb->end;
+	if (rb->start == rb->data)
+		room--;
+
+	return room;
+}
+
+/*
+ * add data to ring buffer
+ */
+static void ring_put(struct ring_buffer *rb, unsigned int size)
+{
+	rb->end += size;
+	if (rb->end >= rb->data + rb->size)
+		rb->end -= rb->size;
+}
+
+/*
+ * append data to ring buffer tail
+ */
+static void ring_append(struct ring_buffer *rb, const void *buf,
+			unsigned int size)
+{
+	size_t todo, done;
+
+	todo = ring_linear_room(rb);
+	if (todo > size)
+		todo = size;
+
+	memcpy(rb->end, buf, todo);
+
+	done = todo;
+	buf += done;
+	size -= done;
+
+	if (size) {
+		todo = rb->start - rb->data;
+		if (todo > size)
+			todo = size;
+
+		memcpy(rb->data, buf, todo);
+		done += todo;
+	}
+
+	ring_put(rb, done);
+}
+
+static void ring_fill(struct ring_buffer *rb, u8 data, unsigned int size)
+{
+	size_t todo, done;
+
+	todo = ring_linear_room(rb);
+	if (todo > size)
+		todo = size;
+
+	memset(rb->end, data, todo);
+
+	done = todo;
+	size -= done;
+
+	if (size) {
+		todo = rb->start - rb->data;
+		if (todo > size)
+			todo = size;
+
+		memset(rb->data, data, todo);
+		done += todo;
+	}
+
+	ring_put(rb, done);
+}
+
+/*
+ * remove data from ring buffer head
+ */
+static void ring_pull(struct ring_buffer *rb, unsigned int size)
+{
+	rb->start += size;
+	if (rb->start >= rb->data + rb->size)
+		rb->start -= rb->size;
+}
+
+/*
+ * copy data from ring buffer head into user buffer
+ */
+static size_t ring_extract(struct ring_buffer *rb, void __user *buf,
+			   unsigned int size)
+{
+	unsigned int todo, done;
+	int ret;
+
+	todo = size;
+	if (rb->end > rb->start)
+		todo = rb->end - rb->start;
+	else
+		todo = rb->data + rb->size - rb->start;
+
+	if (todo > size)
+		todo = size;
+
+	ret = copy_to_user(buf, rb->start, todo);
+	if (ret)
+		return -EFAULT;
+
+	done = todo;
+	buf += done;
+	size -= done;
+
+	if (!size || rb->end > rb->start) {
+		ring_pull(rb, done);
+		return done;
+	}
+
+	todo = rb->end - rb->data;
+	if (todo > size)
+		todo = size;
+
+	if (copy_to_user(buf, rb->data, todo))
+		return -EFAULT;
+
+	done += todo;
+
+	ring_pull(rb, done);
+	return done;
+}
+
+/*
+ * ADPCM stuff
+ */
+static const s8 index_table[] = {
+	-1, -1, -1, -1, 2, 4, 6, 8,
+	-1, -1, -1, -1, 2, 4, 6, 8,
+};
+
+static const s16 step_size_table[] = {
+	7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 19, 21, 23, 25,
+	28, 31, 34, 37, 41, 45, 50, 55, 60, 66, 73, 80, 88, 97, 107, 118,
+	130, 143, 157, 173, 190, 209, 230, 253, 279, 307, 337, 371, 408, 449,
+	494, 544, 598, 658, 724, 796, 876, 963, 1060, 1166, 1282, 1411, 1552,
+	1707, 1878, 2066, 2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428,
+	4871, 5358, 5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635,
+	13899, 15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767
+};
+
+static void adpcm_state_set(struct adpcm_state *state,
+			s16 reference, s8 index)
+{
+	state->reference = reference;
+	state->index = index;
+}
+
+static s16 adpcm_sample_decode(struct adpcm_state *state, uint8_t coded)
+{
+	s32 step_size = step_size_table[state->index];
+	s32 difference = 0, next;
+
+	if (coded & 4) difference += step_size;
+	if (coded & 2) difference += step_size >> 1;
+	if (coded & 1) difference += step_size >> 2;
+	difference += step_size >> 3;
+
+	// Next sample calculation
+
+	if (coded & 8)
+		next = state->reference - difference;
+	else
+		next = state->reference + difference;
+
+	if (next < -32767)
+		next = -32767;
+	if (next > 32767)
+		next = 32767;
+
+	state->reference = next;
+
+	// Next predictor calculation
+
+	next = state->index + index_table[coded];
+	if (next < 0)
+		next = 0;
+	else if (next >= ARRAY_SIZE(step_size_table))
+		next = ARRAY_SIZE(step_size_table) - 1;
+	state->index = next;
+
+	return state->reference;
+}
+
+static void adpcm_decode(struct adpcm_state *state, s16 *output,
+			 const u8 *input, size_t sample_count)
+{
+	unsigned int i;
+
+	for (i = 0; i < sample_count; i += 2) {
+		output[i] = adpcm_sample_decode(state, input[i / 2] >> 4);
+		output[i + 1] = adpcm_sample_decode(state, input[i / 2] & 0xf);
+	}
+}
+
+/*
+ * send the HID audio_enable command
+ */
+static int hid_set_audio_enabled(struct fbx_remote *r, bool en)
+{
+	struct hid_report *rep = r->enable_report;
+	int ret;
+	uint8_t val[1];
+
+	hid_dbg(r->hdev, "hid_set_audio_enabled: %d\n", en);
+	val[0] = en ? 1 : 0;
+
+	ret = hid_hw_raw_request(r->hdev, REMOTE_REPORT_AUDIO_ENABLE,
+		val, 1, HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+
+	hid_dbg(r->hdev, "hid_hw_raw_request ret: %d\n", ret);
+	if (ret < 0) {
+		hid_err(r->hdev, "hid_set_audio_enabled %d failed: %d\n", en, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+/*
+ * (re)arm the timer used to report period interrupts
+ */
+static void timer_arm(struct fbx_remote *r, bool no_delay)
+{
+	r->pcm_timer.expires = jiffies;
+	if (!no_delay)
+		r->pcm_timer.expires += HZ / (8000 / PERIOD_SIZE);
+	add_timer(&r->pcm_timer);
+}
+
+/*
+ * stop the timer used to report period interrupts
+ */
+static void timer_stop(struct fbx_remote *r)
+{
+	del_timer_sync(&r->pcm_timer);
+}
+
+/*
+ * called on incoming ADPCM data from BT
+ */
+static int handle_adpcm_data(struct fbx_remote *r,
+			s16 reference, u8 index,
+			const u8 *adpcm, snd_pcm_sframes_t sample_count,
+			u8 seqno, u8 seqno_mask)
+{
+	struct snd_pcm_runtime *runtime;
+	struct adpcm_state adpcm_state;
+	snd_pcm_sframes_t avail;
+	s16 *pcm = r->bt_pcm_tmp_data;
+	s16 repeated = 0;
+
+	spin_lock_bh(&r->pcm_lock);
+
+	if (!r->substream)
+		goto out_unlock;
+
+	runtime = r->substream->runtime;
+
+	if (sample_count > ARRAY_SIZE(r->bt_pcm_tmp_data))
+		sample_count = ARRAY_SIZE(r->bt_pcm_tmp_data);
+
+	adpcm_state_set(&adpcm_state, reference, index);
+	adpcm_decode(&adpcm_state, pcm, adpcm, sample_count);
+
+	seqno &= seqno_mask;
+
+	/* Reinit seqno on first packet */
+	if (r->last_seqno == -1)
+		r->last_seqno = (seqno - 1) & seqno_mask;
+
+	do {
+		/* Consider no missed frames if ring is full */
+		if (ring_room(&r->bt_pcm_ring) < sample_count * sizeof(s16)) {
+			hid_warn(r->hdev, "overflow, cannot write audio data\n");
+			r->last_seqno = seqno;
+			break;
+		}
+
+		r->last_seqno = (r->last_seqno + 1) & seqno_mask;
+		repeated++;
+
+#ifndef REPEAT_LAST_FRAME
+		if (r->last_seqno != seqno)
+			ring_fill(&r->bt_pcm_ring, 0, sample_count * sizeof(s16));
+		else
+#endif
+			ring_append(&r->bt_pcm_ring, pcm, sample_count * sizeof(s16));
+	} while (r->last_seqno != seqno);
+
+	if (repeated > 1)
+		hid_dbg(r->hdev, "%d frames repeated %d times from seq %d\n",
+			(int)sample_count, repeated, seqno);
+
+	/* schedule first period interrupt asap when we have enough data */
+	avail = ring_data_size(&r->bt_pcm_ring) / sizeof(s16);
+	if (r->buffering && avail >= runtime->period_size) {
+		timer_arm(r, false);
+		r->buffering = false;
+	}
+
+out_unlock:
+	spin_unlock_bh(&r->pcm_lock);
+	return 0;
+}
+
+/*
+ * callback of timer used to report period interrupts
+ */
+static void remote_pcm_timer_callback(unsigned long data)
+{
+	struct snd_pcm_substream *substream = (struct snd_pcm_substream *)data;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct fbx_remote *r = snd_pcm_substream_chip(substream);
+
+	timer_arm(r, false);
+
+	r->pcm_cur_pos += runtime->period_size;
+	if (r->pcm_cur_pos >= runtime->buffer_size)
+		r->pcm_cur_pos -= runtime->buffer_size;
+
+	snd_pcm_period_elapsed(r->substream);
+}
+
+/*
+ *
+ */
+static int remote_pcm_copy(struct snd_pcm_substream *substream,
+			   int channel, snd_pcm_uframes_t pos,
+			   void __user *dst, snd_pcm_uframes_t count)
+{
+	struct fbx_remote *r = snd_pcm_substream_chip(substream);
+	snd_pcm_uframes_t to_copy, avail;
+	int ret;
+
+	/* dequeue requested data from BT ring buffer */
+	avail = ring_data_size(&r->bt_pcm_ring) / sizeof (s16);
+	to_copy = count;
+	if (to_copy > avail)
+		to_copy = avail;
+
+	ret = ring_extract(&r->bt_pcm_ring, dst, to_copy * sizeof (s16));
+	if (ret)
+		return ret;
+
+	if (count > to_copy) {
+		/* zero pad */
+		if (clear_user(dst + to_copy * sizeof (s16),
+			       (count - to_copy) * sizeof (s16)))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+/*
+ *
+ */
+static int remote_pcm_prepare(struct snd_pcm_substream *substream)
+{
+	struct fbx_remote *r = snd_pcm_substream_chip(substream);
+
+	/* stop timer if it was started */
+	timer_stop(r);
+
+	/* reset virtual pointer */
+	ring_discard(&r->bt_pcm_ring);
+	r->pcm_cur_pos = 0;
+
+	/* go back into buffering state */
+	r->buffering = true;
+	r->last_seqno = -1;
+
+	/* queue report to enable audio on remote */
+	hid_set_audio_enabled(r, true);
+
+	return 0;
+}
+
+/*
+ *
+ */
+static snd_pcm_uframes_t
+remote_pcm_pointer(struct snd_pcm_substream *substream)
+{
+	struct fbx_remote *r = snd_pcm_substream_chip(substream);
+	return r->pcm_cur_pos;
+}
+
+/*
+ *
+ */
+static int remote_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+	struct fbx_remote *r = snd_pcm_substream_chip(substream);
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		/* make substream visible to BT data rx handler */
+		spin_lock(&r->pcm_lock);
+		r->substream = substream;
+		spin_unlock(&r->pcm_lock);
+		return 0;
+
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+		timer_stop(r);
+		spin_lock(&r->pcm_lock);
+		r->substream = NULL;
+		spin_unlock(&r->pcm_lock);
+		return 0;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * alsa pcm hardware ops
+ */
+static struct snd_pcm_hardware remote_playback_hw = {
+	.info = (SNDRV_PCM_INFO_MMAP |
+		 SNDRV_PCM_INFO_BLOCK_TRANSFER |
+		 SNDRV_PCM_INFO_INTERLEAVED |
+		 SNDRV_PCM_INFO_MMAP_VALID),
+	.formats = SNDRV_PCM_FMTBIT_S16,
+	.rates = SNDRV_PCM_RATE_8000,
+	.rate_min = 8000,
+	.rate_max = 8000,
+	.channels_min =	1,
+	.channels_max =	1,
+	.buffer_bytes_max = BUFFER_SIZE * 2,
+	.period_bytes_min = PERIOD_SIZE * 2,
+	.period_bytes_max = PERIOD_SIZE * 2,
+	.periods_min = 2,
+	.periods_max = BUFFER_SIZE / PERIOD_SIZE,
+};
+
+/*
+ *
+ */
+static int remote_pcm_open(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct fbx_remote *r = snd_pcm_substream_chip(substream);
+
+	/* assign default hardware ops */
+	runtime->hw = remote_playback_hw;
+
+	/* prepare timer used to dequeue */
+	init_timer(&r->pcm_timer);
+	r->pcm_timer.data = (unsigned long)substream;
+	r->pcm_timer.function = remote_pcm_timer_callback;
+
+	return 0;
+}
+
+/*
+ *
+ */
+static int remote_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+	return 0;
+}
+
+/*
+ *
+ */
+static int remote_pcm_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params)
+{
+	return 0;
+}
+
+/*
+ *
+ */
+static int remote_pcm_close(struct snd_pcm_substream *substream)
+{
+	struct fbx_remote *r = snd_pcm_substream_chip(substream);
+
+	spin_lock_bh(&r->pcm_lock);
+	r->substream = NULL;
+	spin_unlock_bh(&r->pcm_lock);
+	ring_discard(&r->bt_pcm_ring);
+	timer_stop(r);
+	hid_set_audio_enabled(r, false);
+
+	return 0;
+}
+
+/*
+ * alsa PCM device callbacks
+ */
+static const struct snd_pcm_ops snd_remote_capture_ops = {
+	.open		= remote_pcm_open,
+	.close		= remote_pcm_close,
+	.ioctl		= snd_pcm_lib_ioctl,
+	.hw_params	= remote_pcm_hw_params,
+	.hw_free	= remote_pcm_hw_free,
+	.prepare	= remote_pcm_prepare,
+	.trigger	= remote_pcm_trigger,
+	.pointer	= remote_pcm_pointer,
+	.copy		= remote_pcm_copy,
+};
+
+/*
+ *
+ */
+static int remote_alsa_register(struct fbx_remote *r)
+{
+	struct snd_card *card;
+	struct snd_pcm *pcm;
+	int ret;
+
+	/* Setup sound card */
+	ret = snd_card_new(&r->hdev->dev,
+			   SNDRV_DEFAULT_IDX1,
+			   "fbxremote",
+			   THIS_MODULE, 0, &card);
+	if (ret < 0) {
+		hid_err(r->hdev, "failed to create sound card: %d\n", ret);
+		return 1;
+	}
+
+	snprintf(card->driver, sizeof (card->driver),
+		 "fbxremote");
+	snprintf(card->shortname, sizeof (card->shortname),
+		 "Freebox BLE remote");
+	snprintf(card->longname, sizeof (card->longname),
+		 "Freebox BLE remote");
+
+	/* allocate new pcm device */
+	ret = snd_pcm_new(card, "fbxremote-mic", 0, 0, 1, &pcm);
+	if (ret) {
+		hid_err(r->hdev, "snd_pcm_new failed %d\n", ret);
+		snd_card_free(card);
+		return 1;
+	}
+
+	snd_card_set_dev(card, &r->hdev->dev);
+	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
+			&snd_remote_capture_ops);
+
+	snprintf(pcm->name, sizeof (pcm->name),
+		 "Remote microphone");
+
+	ret = snd_card_register(card);
+	if (ret) {
+		hid_err(r->hdev, "failed to register card: %d\n", ret);
+		snd_card_free(card);
+		return 1;
+	}
+
+	pcm->private_data = r;
+	r->card = card;
+	r->pcm = pcm;
+	return 0;
+}
+
+/*
+ * hid stack callback before parsing report
+ */
+static int fbx_remote_raw_event(struct hid_device *hdev,
+				struct hid_report *report,
+				u8 *data, int size)
+{
+	struct fbx_remote *r = hid_get_drvdata(hdev);
+
+	if (!r)
+		return 0;
+
+	if (report->id != REMOTE_REPORT_AUDIO_DATA) {
+		/* let stack process it */
+		return 0;
+	}
+
+	switch (size) {
+	case 101:
+		/* Ti fragmented mode */
+		handle_adpcm_data(r, ((u16)data[3] << 8) | data[2], data[4],
+					data + 5, 192, data[1] >> 3, 0x1f);
+		break;
+
+	case 21:
+		/* Fbx non-fragmented mode */
+		handle_adpcm_data(r, ((u16)data[3] << 8) | data[2], data[4],
+				data + 5, 32, data[1], 0x7f);
+		break;
+	}
+
+	return 1;
+}
+
+/*
+ * hid stack callback after parsing report
+ */
+static int fbx_remote_event(struct hid_device *hdev, struct hid_field *field,
+			    struct hid_usage *usage, __s32 value)
+
+{
+	struct fbx_remote *r = hid_get_drvdata(hdev);
+
+	if (!r)
+		return 0;
+
+	if (!(hdev->claimed & HID_CLAIMED_INPUT))
+		return 0;
+
+	if (!time_after(jiffies, r->connect_time + HZ / 5)) {
+		hid_info(hdev, "force ignore event %d code %d",
+			 usage->type, usage->code);
+		return 1;
+	}
+
+	return 0;
+}
+
+/*
+ * locate the HID report used to enable/disable audio streaming on
+ * remote
+ */
+static struct hid_report *find_enable_report(struct hid_device *hdev)
+{
+	const struct hid_report_enum *renum;
+	struct hid_report *report;
+
+	renum = &hdev->report_enum[HID_FEATURE_REPORT];
+	report = renum->report_id_hash[REMOTE_REPORT_AUDIO_ENABLE];
+	if (!report) {
+		hid_err(hdev, "audio-enable report id not found\n");
+		return NULL;
+	}
+
+	if (report->maxfield < 1 ||
+	    report->field[0]->report_count < 1 ||
+	    report->field[0]->report_size < 8) {
+		hid_err(hdev, "audio-enable report count/size invalid\n");
+		return NULL;
+	}
+
+	return report;
+}
+
+/*
+ * probe method called by kernel on hid device registration
+ */
+static int fbx_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+	struct fbx_remote *r;
+	struct hid_report *report;
+	int error;
+
+	error = hid_parse(hdev);
+	if (error) {
+		hid_err(hdev, "parse failed\n");
+		return error;
+	}
+
+	error = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
+	if (error) {
+		hid_err(hdev, "hw start failed\n");
+		return error;
+	}
+
+	report = find_enable_report(hdev);
+	if (!report) {
+		/* let generic driver handle it */
+		hid_info(hdev, "HID descriptor invalid, fallback to "
+			 "generic HID");
+		return 0;
+	}
+
+	r = kzalloc(sizeof (*r), GFP_KERNEL);
+	if (!r) {
+		hid_hw_stop(hdev);
+		return -ENOMEM;
+	}
+
+	if (ring_init(&r->bt_pcm_ring, BT_RING_SIZE)) {
+		hid_hw_stop(hdev);
+		kfree(r);
+		return -ENOMEM;
+	}
+
+	spin_lock_init(&r->pcm_lock);
+	r->enable_report = report;
+	r->hdev = hdev;
+	r->connect_time = jiffies;
+
+	if (remote_alsa_register(r)) {
+		hid_err(hdev, "failed to register alsa driver");
+		/* ignore */
+	}
+
+	hid_set_drvdata(hdev, r);
+	hid_info(hdev, "remote control ready");
+
+	return 0;
+}
+
+/*
+ * release method called by kernel when HID device is removed
+ */
+static void fbx_remove(struct hid_device *hdev)
+{
+	struct fbx_remote *r = hid_get_drvdata(hdev);
+
+	if (r && r->card)
+		snd_card_free(r->card);
+
+	hid_hw_close(hdev);
+	hid_hw_stop(hdev);
+	hid_set_drvdata(hdev, NULL);
+
+	if (r) {
+		ring_release(&r->bt_pcm_ring);
+		kfree(r);
+	}
+}
+
+static int fbx_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+		struct hid_field *field, struct hid_usage *usage,
+		unsigned long **bit, int *max)
+{
+	// ignore custom report id that confuses the android inputflinger
+	if (usage->hid & 0xFF000000) {
+		// hid_info(hdev, "ignore custom report id");
+		return -1;
+	}
+	return 0;
+}
+
+static int fbx_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+		struct hid_field *field, struct hid_usage *usage,
+		unsigned long **bit, int *max)
+{
+	clear_bit(BTN_MISC, *bit);
+	return 0;
+}
+
+static const struct hid_device_id fbx_devices[] = {
+	{ HID_BLUETOOTH_DEVICE(0x10eb, 0x0023) },
+	{ HID_BLUETOOTH_DEVICE(0x10eb, 0x0024) },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(hid, fbx_devices);
+
+static struct hid_driver fbx_driver = {
+	.name		= "fbx-remote-audio",
+	.id_table	= fbx_devices,
+	.input_mapping	= fbx_input_mapping,
+	.input_mapped	= fbx_input_mapped,
+	.raw_event	= fbx_remote_raw_event,
+	.event		= fbx_remote_event,
+	.probe		= fbx_probe,
+	.remove		= fbx_remove,
+};
+
+module_hid_driver(fbx_driver);
+
+MODULE_AUTHOR("Maxime Bizon");
+MODULE_DESCRIPTION("Audio over HID on Freebox BLE remote");
+MODULE_LICENSE("GPL");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/hwmon/qpnp-adc-common.c	2019-10-29 09:26:23.721203941 +0100
@@ -0,0 +1,2094 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/qpnp/qpnp-adc.h>
+
+#define KELVINMIL_DEGMIL	273160
+#define QPNP_VADC_LDO_VOLTAGE_MIN	1800000
+#define QPNP_VADC_LDO_VOLTAGE_MAX	1800000
+#define QPNP_VADC_OK_VOLTAGE_MIN	1000000
+#define QPNP_VADC_OK_VOLTAGE_MAX	1000000
+#define PMI_CHG_SCALE_1		-138890
+#define PMI_CHG_SCALE_2		391750000000
+#define QPNP_VADC_HC_VREF_CODE		0x4000
+#define QPNP_VADC_HC_VDD_REFERENCE_MV	1875
+/* Clamp negative ADC code to 0 */
+#define QPNP_VADC_HC_MAX_CODE		0x7FFF
+
+/* Units for temperature below (on x axis) is in 0.1DegC as
+   required by the battery driver. Note the resolution used
+   here to compute the table was done for DegC to milli-volts.
+   In consideration to limit the size of the table for the given
+   temperature range below, the result is linearly interpolated
+   and provided to the battery driver in the units desired for
+   their framework which is 0.1DegC. True resolution of 0.1DegC
+   will result in the below table size to increase by 10 times */
+static const struct qpnp_vadc_map_pt adcmap_btm_threshold[] = {
+	{-300,	1642},
+	{-200,	1544},
+	{-100,	1414},
+	{0,	1260},
+	{10,	1244},
+	{20,	1228},
+	{30,	1212},
+	{40,	1195},
+	{50,	1179},
+	{60,	1162},
+	{70,	1146},
+	{80,	1129},
+	{90,	1113},
+	{100,	1097},
+	{110,	1080},
+	{120,	1064},
+	{130,	1048},
+	{140,	1032},
+	{150,	1016},
+	{160,	1000},
+	{170,	985},
+	{180,	969},
+	{190,	954},
+	{200,	939},
+	{210,	924},
+	{220,	909},
+	{230,	894},
+	{240,	880},
+	{250,	866},
+	{260,	852},
+	{270,	838},
+	{280,	824},
+	{290,	811},
+	{300,	798},
+	{310,	785},
+	{320,	773},
+	{330,	760},
+	{340,	748},
+	{350,	736},
+	{360,	725},
+	{370,	713},
+	{380,	702},
+	{390,	691},
+	{400,	681},
+	{410,	670},
+	{420,	660},
+	{430,	650},
+	{440,	640},
+	{450,	631},
+	{460,	622},
+	{470,	613},
+	{480,	604},
+	{490,	595},
+	{500,	587},
+	{510,	579},
+	{520,	571},
+	{530,	563},
+	{540,	556},
+	{550,	548},
+	{560,	541},
+	{570,	534},
+	{580,	527},
+	{590,	521},
+	{600,	514},
+	{610,	508},
+	{620,	502},
+	{630,	496},
+	{640,	490},
+	{650,	485},
+	{660,	281},
+	{670,	274},
+	{680,	267},
+	{690,	260},
+	{700,	254},
+	{710,	247},
+	{720,	241},
+	{730,	235},
+	{740,	229},
+	{750,	224},
+	{760,	218},
+	{770,	213},
+	{780,	208},
+	{790,	203}
+};
+
+static const struct qpnp_vadc_map_pt adcmap_qrd_btm_threshold[] = {
+	{-200,	1540},
+	{-180,	1517},
+	{-160,	1492},
+	{-140,	1467},
+	{-120,	1440},
+	{-100,	1412},
+	{-80,	1383},
+	{-60,	1353},
+	{-40,	1323},
+	{-20,	1292},
+	{0,	1260},
+	{20,	1228},
+	{40,	1196},
+	{60,	1163},
+	{80,	1131},
+	{100,	1098},
+	{120,	1066},
+	{140,	1034},
+	{160,	1002},
+	{180,	971},
+	{200,	941},
+	{220,	911},
+	{240,	882},
+	{260,	854},
+	{280,	826},
+	{300,	800},
+	{320,	774},
+	{340,	749},
+	{360,	726},
+	{380,	703},
+	{400,	681},
+	{420,	660},
+	{440,	640},
+	{460,	621},
+	{480,	602},
+	{500,	585},
+	{520,	568},
+	{540,	552},
+	{560,	537},
+	{580,	523},
+	{600,	510},
+	{620,	497},
+	{640,	485},
+	{660,	473},
+	{680,	462},
+	{700,	452},
+	{720,	442},
+	{740,	433},
+	{760,	424},
+	{780,	416},
+	{800,	408},
+};
+
+static const struct qpnp_vadc_map_pt adcmap_qrd_skuaa_btm_threshold[] = {
+	{-200,	1476},
+	{-180,	1450},
+	{-160,	1422},
+	{-140,	1394},
+	{-120,	1365},
+	{-100,	1336},
+	{-80,	1306},
+	{-60,	1276},
+	{-40,	1246},
+	{-20,	1216},
+	{0,	1185},
+	{20,	1155},
+	{40,	1126},
+	{60,	1096},
+	{80,	1068},
+	{100,	1040},
+	{120,	1012},
+	{140,	986},
+	{160,	960},
+	{180,	935},
+	{200,	911},
+	{220,	888},
+	{240,	866},
+	{260,	844},
+	{280,	824},
+	{300,	805},
+	{320,	786},
+	{340,	769},
+	{360,	752},
+	{380,	737},
+	{400,	722},
+	{420,	707},
+	{440,	694},
+	{460,	681},
+	{480,	669},
+	{500,	658},
+	{520,	648},
+	{540,	637},
+	{560,	628},
+	{580,	619},
+	{600,	611},
+	{620,	603},
+	{640,	595},
+	{660,	588},
+	{680,	582},
+	{700,	575},
+	{720,	569},
+	{740,	564},
+	{760,	559},
+	{780,	554},
+	{800,	549},
+};
+
+static const struct qpnp_vadc_map_pt adcmap_qrd_skug_btm_threshold[] = {
+	{-200,	1338},
+	{-180,	1307},
+	{-160,	1276},
+	{-140,	1244},
+	{-120,	1213},
+	{-100,	1182},
+	{-80,	1151},
+	{-60,	1121},
+	{-40,	1092},
+	{-20,	1063},
+	{0,	1035},
+	{20,	1008},
+	{40,	982},
+	{60,	957},
+	{80,	933},
+	{100,	910},
+	{120,	889},
+	{140,	868},
+	{160,	848},
+	{180,	830},
+	{200,	812},
+	{220,	795},
+	{240,	780},
+	{260,	765},
+	{280,	751},
+	{300,	738},
+	{320,	726},
+	{340,	714},
+	{360,	704},
+	{380,	694},
+	{400,	684},
+	{420,	675},
+	{440,	667},
+	{460,	659},
+	{480,	652},
+	{500,	645},
+	{520,	639},
+	{540,	633},
+	{560,	627},
+	{580,	622},
+	{600,	617},
+	{620,	613},
+	{640,	608},
+	{660,	604},
+	{680,	600},
+	{700,	597},
+	{720,	593},
+	{740,	590},
+	{760,	587},
+	{780,	585},
+	{800,	582},
+};
+
+static const struct qpnp_vadc_map_pt adcmap_qrd_skuh_btm_threshold[] = {
+	{-200,	1531},
+	{-180,	1508},
+	{-160,	1483},
+	{-140,	1458},
+	{-120,	1432},
+	{-100,	1404},
+	{-80,	1377},
+	{-60,	1348},
+	{-40,	1319},
+	{-20,	1290},
+	{0,	1260},
+	{20,	1230},
+	{40,	1200},
+	{60,	1171},
+	{80,	1141},
+	{100,	1112},
+	{120,	1083},
+	{140,	1055},
+	{160,	1027},
+	{180,	1000},
+	{200,	973},
+	{220,	948},
+	{240,	923},
+	{260,	899},
+	{280,	876},
+	{300,	854},
+	{320,	832},
+	{340,	812},
+	{360,	792},
+	{380,	774},
+	{400,	756},
+	{420,	739},
+	{440,	723},
+	{460,	707},
+	{480,	692},
+	{500,	679},
+	{520,	665},
+	{540,	653},
+	{560,	641},
+	{580,	630},
+	{600,	619},
+	{620,	609},
+	{640,	600},
+	{660,	591},
+	{680,	583},
+	{700,	575},
+	{720,	567},
+	{740,	560},
+	{760,	553},
+	{780,	547},
+	{800,	541},
+	{820,	535},
+	{840,	530},
+	{860,	524},
+	{880,	520},
+};
+
+static const struct qpnp_vadc_map_pt adcmap_qrd_skut1_btm_threshold[] = {
+	{-400,	1759},
+	{-350,	1742},
+	{-300,	1720},
+	{-250,	1691},
+	{-200,	1654},
+	{-150,	1619},
+	{-100,	1556},
+	{-50,	1493},
+	{0,	1422},
+	{50,	1345},
+	{100,	1264},
+	{150,	1180},
+	{200,	1097},
+	{250,	1017},
+	{300,	942},
+	{350,	873},
+	{400,	810},
+	{450,	754},
+	{500,	706},
+	{550,	664},
+	{600,	627},
+	{650,	596},
+	{700,	570},
+	{750,	547},
+	{800,	528},
+	{850,	512},
+	{900,	499},
+	{950,	487},
+	{1000,	477},
+};
+
+/* Voltage to temperature */
+static const struct qpnp_vadc_map_pt adcmap_100k_104ef_104fb[] = {
+	{1758,	-40},
+	{1742,	-35},
+	{1719,	-30},
+	{1691,	-25},
+	{1654,	-20},
+	{1608,	-15},
+	{1551,	-10},
+	{1483,	-5},
+	{1404,	0},
+	{1315,	5},
+	{1218,	10},
+	{1114,	15},
+	{1007,	20},
+	{900,	25},
+	{795,	30},
+	{696,	35},
+	{605,	40},
+	{522,	45},
+	{448,	50},
+	{383,	55},
+	{327,	60},
+	{278,	65},
+	{237,	70},
+	{202,	75},
+	{172,	80},
+	{146,	85},
+	{125,	90},
+	{107,	95},
+	{92,	100},
+	{79,	105},
+	{68,	110},
+	{59,	115},
+	{51,	120},
+	{44,	125}
+};
+
+/* Voltage to temperature */
+static const struct qpnp_vadc_map_pt adcmap_150k_104ef_104fb[] = {
+	{1738,	-40},
+	{1714,	-35},
+	{1682,	-30},
+	{1641,	-25},
+	{1589,	-20},
+	{1526,	-15},
+	{1451,	-10},
+	{1363,	-5},
+	{1266,	0},
+	{1159,	5},
+	{1048,	10},
+	{936,	15},
+	{825,	20},
+	{720,	25},
+	{622,	30},
+	{533,	35},
+	{454,	40},
+	{385,	45},
+	{326,	50},
+	{275,	55},
+	{232,	60},
+	{195,	65},
+	{165,	70},
+	{139,	75},
+	{118,	80},
+	{100,	85},
+	{85,	90},
+	{73,	95},
+	{62,	100},
+	{53,	105},
+	{46,	110},
+	{40,	115},
+	{34,	120},
+	{30,	125}
+};
+
+static const struct qpnp_vadc_map_pt adcmap_smb_batt_therm[] = {
+	{-300,	1625},
+	{-200,	1515},
+	{-100,	1368},
+	{0,	1192},
+	{10,	1173},
+	{20,	1154},
+	{30,	1135},
+	{40,	1116},
+	{50,	1097},
+	{60,	1078},
+	{70,	1059},
+	{80,	1040},
+	{90,	1020},
+	{100,	1001},
+	{110,	982},
+	{120,	963},
+	{130,	944},
+	{140,	925},
+	{150,	907},
+	{160,	888},
+	{170,	870},
+	{180,	851},
+	{190,	833},
+	{200,	815},
+	{210,	797},
+	{220,	780},
+	{230,	762},
+	{240,	745},
+	{250,	728},
+	{260,	711},
+	{270,	695},
+	{280,	679},
+	{290,	663},
+	{300,	647},
+	{310,	632},
+	{320,	616},
+	{330,	602},
+	{340,	587},
+	{350,	573},
+	{360,	559},
+	{370,	545},
+	{380,	531},
+	{390,	518},
+	{400,	505},
+	{410,	492},
+	{420,	480},
+	{430,	465},
+	{440,	456},
+	{450,	445},
+	{460,	433},
+	{470,	422},
+	{480,	412},
+	{490,	401},
+	{500,	391},
+	{510,	381},
+	{520,	371},
+	{530,	362},
+	{540,	352},
+	{550,	343},
+	{560,	335},
+	{570,	326},
+	{580,	318},
+	{590,	309},
+	{600,	302},
+	{610,	294},
+	{620,	286},
+	{630,	279},
+	{640,	272},
+	{650,	265},
+	{660,	258},
+	{670,	252},
+	{680,	245},
+	{690,	239},
+	{700,	233},
+	{710,	227},
+	{720,	221},
+	{730,	216},
+	{740,	211},
+	{750,	205},
+	{760,	200},
+	{770,	195},
+	{780,	190},
+	{790,	186}
+};
+
+/* Voltage to temperature */
+static const struct qpnp_vadc_map_pt adcmap_ncp03wf683[] = {
+	{1742,	-40},
+	{1718,	-35},
+	{1687,	-30},
+	{1647,	-25},
+	{1596,	-20},
+	{1534,	-15},
+	{1459,	-10},
+	{1372,	-5},
+	{1275,	0},
+	{1169,	5},
+	{1058,	10},
+	{945,	15},
+	{834,	20},
+	{729,	25},
+	{630,	30},
+	{541,	35},
+	{461,	40},
+	{392,	45},
+	{332,	50},
+	{280,	55},
+	{236,	60},
+	{199,	65},
+	{169,	70},
+	{142,	75},
+	{121,	80},
+	{102,	85},
+	{87,	90},
+	{74,	95},
+	{64,	100},
+	{55,	105},
+	{47,	110},
+	{40,	115},
+	{35,	120},
+	{30,	125}
+};
+
+/*
+ * Voltage to temperature table for 100k pull up for NTCG104EF104 with
+ * 1.875V reference.
+ */
+static const struct qpnp_vadc_map_pt adcmap_100k_104ef_104fb_1875_vref[] = {
+	{ 1831,	-40 },
+	{ 1814,	-35 },
+	{ 1791,	-30 },
+	{ 1761,	-25 },
+	{ 1723,	-20 },
+	{ 1675,	-15 },
+	{ 1616,	-10 },
+	{ 1545,	-5 },
+	{ 1463,	0 },
+	{ 1370,	5 },
+	{ 1268,	10 },
+	{ 1160,	15 },
+	{ 1049,	20 },
+	{ 937,	25 },
+	{ 828,	30 },
+	{ 726,	35 },
+	{ 630,	40 },
+	{ 544,	45 },
+	{ 467,	50 },
+	{ 399,	55 },
+	{ 340,	60 },
+	{ 290,	65 },
+	{ 247,	70 },
+	{ 209,	75 },
+	{ 179,	80 },
+	{ 153,	85 },
+	{ 130,	90 },
+	{ 112,	95 },
+	{ 96,	100 },
+	{ 82,	105 },
+	{ 71,	110 },
+	{ 62,	115 },
+	{ 53,	120 },
+	{ 46,	125 },
+};
+
+static int32_t qpnp_adc_map_voltage_temp(const struct qpnp_vadc_map_pt *pts,
+		uint32_t tablesize, int32_t input, int64_t *output)
+{
+	bool descending = 1;
+	uint32_t i = 0;
+
+	if (pts == NULL)
+		return -EINVAL;
+
+	/* Check if table is descending or ascending */
+	if (tablesize > 1) {
+		if (pts[0].x < pts[1].x)
+			descending = 0;
+	}
+
+	while (i < tablesize) {
+		if ((descending == 1) && (pts[i].x < input)) {
+			/* table entry is less than measured
+				value and table is descending, stop */
+			break;
+		} else if ((descending == 0) &&
+				(pts[i].x > input)) {
+			/* table entry is greater than measured
+				value and table is ascending, stop */
+			break;
+		} else {
+			i++;
+		}
+	}
+
+	if (i == 0)
+		*output = pts[0].y;
+	else if (i == tablesize)
+		*output = pts[tablesize-1].y;
+	else {
+		/* result is between search_index and search_index-1 */
+		/* interpolate linearly */
+		*output = (((int32_t) ((pts[i].y - pts[i-1].y)*
+			(input - pts[i-1].x))/
+			(pts[i].x - pts[i-1].x))+
+			pts[i-1].y);
+	}
+
+	return 0;
+}
+
+static int32_t qpnp_adc_map_temp_voltage(const struct qpnp_vadc_map_pt *pts,
+		uint32_t tablesize, int32_t input, int64_t *output)
+{
+	bool descending = 1;
+	uint32_t i = 0;
+
+	if (pts == NULL)
+		return -EINVAL;
+
+	/* Check if table is descending or ascending */
+	if (tablesize > 1) {
+		if (pts[0].y < pts[1].y)
+			descending = 0;
+	}
+
+	while (i < tablesize) {
+		if ((descending == 1) && (pts[i].y < input)) {
+			/* table entry is less than measured
+				value and table is descending, stop */
+			break;
+		} else if ((descending == 0) && (pts[i].y > input)) {
+			/* table entry is greater than measured
+				value and table is ascending, stop */
+			break;
+		} else {
+			i++;
+		}
+	}
+
+	if (i == 0) {
+		*output = pts[0].x;
+	} else if (i == tablesize) {
+		*output = pts[tablesize-1].x;
+	} else {
+		/* result is between search_index and search_index-1 */
+		/* interpolate linearly */
+		*output = (((int32_t) ((pts[i].x - pts[i-1].x)*
+			(input - pts[i-1].y))/
+			(pts[i].y - pts[i-1].y))+
+			pts[i-1].x);
+	}
+
+	return 0;
+}
+
+static void qpnp_adc_scale_with_calib_param(int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		int64_t *scale_voltage)
+{
+	*scale_voltage = (adc_code -
+		chan_properties->adc_graph[chan_properties->calib_type].adc_gnd)
+		* chan_properties->adc_graph[chan_properties->calib_type].dx;
+	*scale_voltage = div64_s64(*scale_voltage,
+		chan_properties->adc_graph[chan_properties->calib_type].dy);
+
+	if (chan_properties->calib_type == CALIB_ABSOLUTE)
+		*scale_voltage +=
+		chan_properties->adc_graph[chan_properties->calib_type].dx;
+
+	if (*scale_voltage < 0)
+		*scale_voltage = 0;
+}
+
+int32_t qpnp_adc_scale_pmic_therm(struct qpnp_vadc_chip *vadc,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int64_t pmic_voltage = 0;
+
+	if (!chan_properties || !chan_properties->offset_gain_numerator ||
+		!chan_properties->offset_gain_denominator || !adc_properties
+		|| !adc_chan_result)
+		return -EINVAL;
+
+	if (adc_properties->adc_hc) {
+		/* (ADC code * vref_vadc (1.875V)) / 0x4000 */
+		if (adc_code > QPNP_VADC_HC_MAX_CODE)
+			adc_code = 0;
+		pmic_voltage = (int64_t) adc_code;
+		pmic_voltage *= (int64_t) (adc_properties->adc_vdd_reference
+							* 1000);
+		pmic_voltage = div64_s64(pmic_voltage,
+					QPNP_VADC_HC_VREF_CODE);
+	} else {
+		if (!chan_properties->adc_graph[CALIB_ABSOLUTE].dy)
+			return -EINVAL;
+		qpnp_adc_scale_with_calib_param(adc_code, adc_properties,
+					chan_properties, &pmic_voltage);
+	}
+
+	if (pmic_voltage > 0) {
+		/* 2mV/K */
+		adc_chan_result->measurement = pmic_voltage*
+			chan_properties->offset_gain_denominator;
+
+		do_div(adc_chan_result->measurement,
+			chan_properties->offset_gain_numerator * 2);
+	} else
+		adc_chan_result->measurement = 0;
+
+	/* Change to .001 deg C */
+	adc_chan_result->measurement -= KELVINMIL_DEGMIL;
+	adc_chan_result->physical = (int32_t) adc_chan_result->measurement;
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_scale_pmic_therm);
+
+int32_t qpnp_adc_scale_millidegc_pmic_voltage_thr(struct qpnp_vadc_chip *chip,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{
+	struct qpnp_vadc_linear_graph btm_param;
+	int64_t low_output = 0, high_output = 0;
+	int rc = 0, sign = 0;
+
+	/* Convert to Kelvin and account for voltage to be written as 2mV/K */
+	low_output = (param->low_temp + KELVINMIL_DEGMIL) * 2;
+	/* Convert to Kelvin and account for voltage to be written as 2mV/K */
+	high_output = (param->high_temp + KELVINMIL_DEGMIL) * 2;
+
+	if (param->adc_tm_hc) {
+		low_output *= QPNP_VADC_HC_VREF_CODE;
+		do_div(low_output, (QPNP_VADC_HC_VDD_REFERENCE_MV * 1000));
+		high_output *= QPNP_VADC_HC_VREF_CODE;
+		do_div(high_output, (QPNP_VADC_HC_VDD_REFERENCE_MV * 1000));
+	} else {
+		rc = qpnp_get_vadc_gain_and_offset(chip, &btm_param,
+							CALIB_ABSOLUTE);
+		if (rc < 0) {
+		pr_err("Could not acquire gain and offset\n");
+		return rc;
+		}
+
+		/* Convert to voltage threshold */
+		low_output = (low_output - QPNP_ADC_625_UV) * btm_param.dy;
+		if (low_output < 0) {
+			sign = 1;
+			low_output = -low_output;
+		}
+		do_div(low_output, QPNP_ADC_625_UV);
+		if (sign)
+			low_output = -low_output;
+		low_output += btm_param.adc_gnd;
+
+		sign = 0;
+		/* Convert to voltage threshold */
+		high_output = (high_output - QPNP_ADC_625_UV) * btm_param.dy;
+		if (high_output < 0) {
+			sign = 1;
+			high_output = -high_output;
+		}
+		do_div(high_output, QPNP_ADC_625_UV);
+		if (sign)
+			high_output = -high_output;
+		high_output += btm_param.adc_gnd;
+	}
+
+	*low_threshold = (uint32_t) low_output;
+	*high_threshold = (uint32_t) high_output;
+
+	pr_debug("high_temp:%d, low_temp:%d\n", param->high_temp,
+				param->low_temp);
+	pr_debug("adc_code_high:%x, adc_code_low:%x\n", *high_threshold,
+				*low_threshold);
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_scale_millidegc_pmic_voltage_thr);
+
+/* Scales the ADC code to degC using the mapping
+ * table for the XO thermistor.
+ */
+int32_t qpnp_adc_tdkntcg_therm(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int64_t xo_thm_voltage = 0;
+
+	if (!chan_properties || !chan_properties->offset_gain_numerator ||
+		!chan_properties->offset_gain_denominator || !adc_properties
+		|| !adc_chan_result)
+		return -EINVAL;
+
+	if (adc_properties->adc_hc) {
+		/* (ADC code * vref_vadc (1.875V) * 1000) / (0x4000 * 1000) */
+		if (adc_code > QPNP_VADC_HC_MAX_CODE)
+			adc_code = 0;
+		xo_thm_voltage = (int64_t) adc_code;
+		xo_thm_voltage *= (int64_t) (adc_properties->adc_vdd_reference
+							* 1000);
+		xo_thm_voltage = div64_s64(xo_thm_voltage,
+					QPNP_VADC_HC_VREF_CODE * 1000);
+		qpnp_adc_map_voltage_temp(adcmap_100k_104ef_104fb_1875_vref,
+			ARRAY_SIZE(adcmap_100k_104ef_104fb_1875_vref),
+			xo_thm_voltage, &adc_chan_result->physical);
+	} else {
+		qpnp_adc_scale_with_calib_param(adc_code,
+			adc_properties, chan_properties, &xo_thm_voltage);
+
+		if (chan_properties->calib_type == CALIB_ABSOLUTE)
+			do_div(xo_thm_voltage, 1000);
+
+		qpnp_adc_map_voltage_temp(adcmap_100k_104ef_104fb,
+			ARRAY_SIZE(adcmap_100k_104ef_104fb),
+			xo_thm_voltage, &adc_chan_result->physical);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_tdkntcg_therm);
+
+int32_t qpnp_adc_scale_batt_therm(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int64_t bat_voltage = 0;
+
+	qpnp_adc_scale_with_calib_param(adc_code,
+			adc_properties, chan_properties, &bat_voltage);
+
+	adc_chan_result->measurement = bat_voltage;
+
+	return qpnp_adc_map_temp_voltage(
+			adcmap_btm_threshold,
+			ARRAY_SIZE(adcmap_btm_threshold),
+			bat_voltage,
+			&adc_chan_result->physical);
+}
+EXPORT_SYMBOL(qpnp_adc_scale_batt_therm);
+
+int32_t qpnp_adc_scale_qrd_batt_therm(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int64_t bat_voltage = 0;
+
+	qpnp_adc_scale_with_calib_param(adc_code,
+			adc_properties, chan_properties, &bat_voltage);
+
+	adc_chan_result->measurement = bat_voltage;
+
+	return qpnp_adc_map_temp_voltage(
+			adcmap_qrd_btm_threshold,
+			ARRAY_SIZE(adcmap_qrd_btm_threshold),
+			bat_voltage,
+			&adc_chan_result->physical);
+}
+EXPORT_SYMBOL(qpnp_adc_scale_qrd_batt_therm);
+
+int32_t qpnp_adc_scale_qrd_skuaa_batt_therm(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int64_t bat_voltage = 0;
+
+	qpnp_adc_scale_with_calib_param(adc_code,
+			adc_properties, chan_properties, &bat_voltage);
+
+	adc_chan_result->measurement = bat_voltage;
+
+	return qpnp_adc_map_temp_voltage(
+			adcmap_qrd_skuaa_btm_threshold,
+			ARRAY_SIZE(adcmap_qrd_skuaa_btm_threshold),
+			bat_voltage,
+			&adc_chan_result->physical);
+}
+EXPORT_SYMBOL(qpnp_adc_scale_qrd_skuaa_batt_therm);
+
+int32_t qpnp_adc_scale_qrd_skug_batt_therm(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int64_t bat_voltage = 0;
+
+	qpnp_adc_scale_with_calib_param(adc_code,
+			adc_properties, chan_properties, &bat_voltage);
+	adc_chan_result->measurement = bat_voltage;
+
+	return qpnp_adc_map_temp_voltage(
+			adcmap_qrd_skug_btm_threshold,
+			ARRAY_SIZE(adcmap_qrd_skug_btm_threshold),
+			bat_voltage,
+			&adc_chan_result->physical);
+}
+EXPORT_SYMBOL(qpnp_adc_scale_qrd_skug_batt_therm);
+
+int32_t qpnp_adc_scale_qrd_skuh_batt_therm(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int64_t bat_voltage = 0;
+
+	qpnp_adc_scale_with_calib_param(adc_code,
+			adc_properties, chan_properties, &bat_voltage);
+
+	return qpnp_adc_map_temp_voltage(
+			adcmap_qrd_skuh_btm_threshold,
+			ARRAY_SIZE(adcmap_qrd_skuh_btm_threshold),
+			bat_voltage,
+			&adc_chan_result->physical);
+}
+EXPORT_SYMBOL(qpnp_adc_scale_qrd_skuh_batt_therm);
+
+int32_t qpnp_adc_scale_qrd_skut1_batt_therm(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int64_t bat_voltage = 0;
+
+	qpnp_adc_scale_with_calib_param(adc_code,
+			adc_properties, chan_properties, &bat_voltage);
+
+	return qpnp_adc_map_temp_voltage(
+			adcmap_qrd_skut1_btm_threshold,
+			ARRAY_SIZE(adcmap_qrd_skut1_btm_threshold),
+			bat_voltage,
+			&adc_chan_result->physical);
+}
+EXPORT_SYMBOL(qpnp_adc_scale_qrd_skut1_batt_therm);
+
+int32_t qpnp_adc_scale_smb_batt_therm(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int64_t bat_voltage = 0;
+
+	qpnp_adc_scale_with_calib_param(adc_code,
+			adc_properties, chan_properties, &bat_voltage);
+
+	return qpnp_adc_map_temp_voltage(
+			adcmap_smb_batt_therm,
+			ARRAY_SIZE(adcmap_smb_batt_therm),
+			bat_voltage,
+			&adc_chan_result->physical);
+}
+EXPORT_SYMBOL(qpnp_adc_scale_smb_batt_therm);
+
+int32_t qpnp_adc_scale_therm_pu1(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int64_t therm_voltage = 0;
+
+	qpnp_adc_scale_with_calib_param(adc_code,
+			adc_properties, chan_properties, &therm_voltage);
+
+	qpnp_adc_map_voltage_temp(adcmap_150k_104ef_104fb,
+		ARRAY_SIZE(adcmap_150k_104ef_104fb),
+		therm_voltage, &adc_chan_result->physical);
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_scale_therm_pu1);
+
+int32_t qpnp_adc_scale_therm_pu2(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int64_t therm_voltage = 0;
+
+	if (!chan_properties || !chan_properties->offset_gain_numerator ||
+		!chan_properties->offset_gain_denominator || !adc_properties)
+		return -EINVAL;
+
+	if (adc_properties->adc_hc) {
+		/* (ADC code * vref_vadc (1.875V) * 1000) / (0x4000 * 1000) */
+		if (adc_code > QPNP_VADC_HC_MAX_CODE)
+			adc_code = 0;
+		therm_voltage = (int64_t) adc_code;
+		therm_voltage *= (int64_t) (adc_properties->adc_vdd_reference
+							* 1000);
+		therm_voltage = div64_s64(therm_voltage,
+					(QPNP_VADC_HC_VREF_CODE * 1000));
+
+		qpnp_adc_map_voltage_temp(adcmap_100k_104ef_104fb_1875_vref,
+			ARRAY_SIZE(adcmap_100k_104ef_104fb_1875_vref),
+			therm_voltage, &adc_chan_result->physical);
+	} else {
+		qpnp_adc_scale_with_calib_param(adc_code,
+			adc_properties, chan_properties, &therm_voltage);
+
+		if (chan_properties->calib_type == CALIB_ABSOLUTE)
+			do_div(therm_voltage, 1000);
+
+		qpnp_adc_map_voltage_temp(adcmap_100k_104ef_104fb,
+			ARRAY_SIZE(adcmap_100k_104ef_104fb),
+			therm_voltage, &adc_chan_result->physical);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_scale_therm_pu2);
+
+int32_t qpnp_adc_tm_scale_voltage_therm_pu2(struct qpnp_vadc_chip *chip,
+		const struct qpnp_adc_properties *adc_properties,
+					uint32_t reg, int64_t *result)
+{
+	int64_t adc_voltage = 0;
+	struct qpnp_vadc_linear_graph param1;
+	int negative_offset = 0;
+
+	if (adc_properties->adc_hc) {
+		/* (ADC code * vref_vadc (1.875V)) / 0x4000 */
+		if (reg > QPNP_VADC_HC_MAX_CODE)
+			reg = 0;
+		adc_voltage = (int64_t) reg;
+		adc_voltage *= QPNP_VADC_HC_VDD_REFERENCE_MV;
+		adc_voltage = div64_s64(adc_voltage,
+					QPNP_VADC_HC_VREF_CODE);
+		qpnp_adc_map_voltage_temp(adcmap_100k_104ef_104fb_1875_vref,
+			ARRAY_SIZE(adcmap_100k_104ef_104fb_1875_vref),
+			adc_voltage, result);
+	} else {
+		qpnp_get_vadc_gain_and_offset(chip, &param1, CALIB_RATIOMETRIC);
+
+		adc_voltage = (reg - param1.adc_gnd) * param1.adc_vref;
+		if (adc_voltage < 0) {
+			negative_offset = 1;
+			adc_voltage = -adc_voltage;
+		}
+
+		do_div(adc_voltage, param1.dy);
+
+		qpnp_adc_map_voltage_temp(adcmap_100k_104ef_104fb,
+			ARRAY_SIZE(adcmap_100k_104ef_104fb),
+			adc_voltage, result);
+		if (negative_offset)
+			adc_voltage = -adc_voltage;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_tm_scale_voltage_therm_pu2);
+
+int32_t qpnp_adc_tm_scale_therm_voltage_pu2(struct qpnp_vadc_chip *chip,
+			const struct qpnp_adc_properties *adc_properties,
+				struct qpnp_adc_tm_config *param)
+{
+	struct qpnp_vadc_linear_graph param1;
+	int rc;
+
+	if (adc_properties->adc_hc) {
+		rc = qpnp_adc_map_temp_voltage(
+			adcmap_100k_104ef_104fb_1875_vref,
+			ARRAY_SIZE(adcmap_100k_104ef_104fb_1875_vref),
+			param->low_thr_temp, &param->low_thr_voltage);
+		if (rc)
+			return rc;
+		param->low_thr_voltage *= QPNP_VADC_HC_VREF_CODE;
+		do_div(param->low_thr_voltage, QPNP_VADC_HC_VDD_REFERENCE_MV);
+
+		rc = qpnp_adc_map_temp_voltage(
+			adcmap_100k_104ef_104fb_1875_vref,
+			ARRAY_SIZE(adcmap_100k_104ef_104fb_1875_vref),
+			param->high_thr_temp, &param->high_thr_voltage);
+		if (rc)
+			return rc;
+		param->high_thr_voltage *= QPNP_VADC_HC_VREF_CODE;
+		do_div(param->high_thr_voltage, QPNP_VADC_HC_VDD_REFERENCE_MV);
+	} else {
+		qpnp_get_vadc_gain_and_offset(chip, &param1, CALIB_RATIOMETRIC);
+
+		rc = qpnp_adc_map_temp_voltage(adcmap_100k_104ef_104fb,
+			ARRAY_SIZE(adcmap_100k_104ef_104fb),
+			param->low_thr_temp, &param->low_thr_voltage);
+		if (rc)
+			return rc;
+
+		param->low_thr_voltage *= param1.dy;
+		do_div(param->low_thr_voltage, param1.adc_vref);
+		param->low_thr_voltage += param1.adc_gnd;
+
+		rc = qpnp_adc_map_temp_voltage(adcmap_100k_104ef_104fb,
+			ARRAY_SIZE(adcmap_100k_104ef_104fb),
+			param->high_thr_temp, &param->high_thr_voltage);
+		if (rc)
+			return rc;
+
+		param->high_thr_voltage *= param1.dy;
+		do_div(param->high_thr_voltage, param1.adc_vref);
+		param->high_thr_voltage += param1.adc_gnd;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_tm_scale_therm_voltage_pu2);
+
+int32_t qpnp_adc_scale_therm_ncp03(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int64_t therm_voltage = 0;
+
+	qpnp_adc_scale_with_calib_param(adc_code,
+			adc_properties, chan_properties, &therm_voltage);
+
+	qpnp_adc_map_voltage_temp(adcmap_ncp03wf683,
+		ARRAY_SIZE(adcmap_ncp03wf683),
+		therm_voltage, &adc_chan_result->physical);
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_scale_therm_ncp03);
+
+int32_t qpnp_adc_scale_batt_id(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int64_t batt_id_voltage = 0;
+
+	qpnp_adc_scale_with_calib_param(adc_code,
+			adc_properties, chan_properties, &batt_id_voltage);
+
+	adc_chan_result->physical = batt_id_voltage;
+	adc_chan_result->physical = adc_chan_result->measurement;
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_scale_batt_id);
+
+int32_t qpnp_adc_scale_default(struct qpnp_vadc_chip *vadc,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int64_t scale_voltage = 0;
+
+	if (!chan_properties || !chan_properties->offset_gain_numerator ||
+		!chan_properties->offset_gain_denominator || !adc_properties
+		|| !adc_chan_result)
+		return -EINVAL;
+
+	if (adc_properties->adc_hc) {
+		/* (ADC code * vref_vadc (1.875V)) / 0x4000 */
+		if (adc_code > QPNP_VADC_HC_MAX_CODE)
+			adc_code = 0;
+		scale_voltage = (int64_t) adc_code;
+		scale_voltage *= (adc_properties->adc_vdd_reference * 1000);
+		scale_voltage = div64_s64(scale_voltage,
+						QPNP_VADC_HC_VREF_CODE);
+	} else {
+		qpnp_adc_scale_with_calib_param(adc_code, adc_properties,
+					chan_properties, &scale_voltage);
+		if (!chan_properties->calib_type == CALIB_ABSOLUTE)
+			scale_voltage *= 1000;
+	}
+
+
+	scale_voltage *= chan_properties->offset_gain_denominator;
+	scale_voltage = div64_s64(scale_voltage,
+				chan_properties->offset_gain_numerator);
+	adc_chan_result->measurement = scale_voltage;
+	/*
+	 * Note: adc_chan_result->measurement is in the unit of
+	 * adc_properties.adc_reference. For generic channel processing,
+	 * channel measurement is a scale/ratio relative to the adc
+	 * reference input
+	 */
+	adc_chan_result->physical = adc_chan_result->measurement;
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_scale_default);
+
+int32_t qpnp_adc_usb_scaler(struct qpnp_vadc_chip *chip,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{
+	struct qpnp_vadc_linear_graph usb_param;
+
+	qpnp_get_vadc_gain_and_offset(chip, &usb_param, CALIB_RATIOMETRIC);
+
+	*low_threshold = param->low_thr * usb_param.dy;
+	do_div(*low_threshold, usb_param.adc_vref);
+	*low_threshold += usb_param.adc_gnd;
+
+	*high_threshold = param->high_thr * usb_param.dy;
+	do_div(*high_threshold, usb_param.adc_vref);
+	*high_threshold += usb_param.adc_gnd;
+
+	pr_debug("high_volt:%d, low_volt:%d\n", param->high_thr,
+				param->low_thr);
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_usb_scaler);
+
+int32_t qpnp_adc_absolute_rthr(struct qpnp_vadc_chip *chip,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{
+	struct qpnp_vadc_linear_graph vbatt_param;
+	int rc = 0, sign = 0;
+	int64_t low_thr = 0, high_thr = 0;
+
+	if (param->adc_tm_hc) {
+		low_thr = (param->low_thr/param->gain_den);
+		low_thr *= param->gain_num;
+		low_thr *= QPNP_VADC_HC_VREF_CODE;
+		do_div(low_thr, (QPNP_VADC_HC_VDD_REFERENCE_MV * 1000));
+		*low_threshold = low_thr;
+
+		high_thr = (param->high_thr/param->gain_den);
+		high_thr *= param->gain_num;
+		high_thr *= QPNP_VADC_HC_VREF_CODE;
+		do_div(high_thr, (QPNP_VADC_HC_VDD_REFERENCE_MV * 1000));
+		*high_threshold = high_thr;
+	} else {
+		rc = qpnp_get_vadc_gain_and_offset(chip, &vbatt_param,
+							CALIB_ABSOLUTE);
+		if (rc < 0)
+			return rc;
+
+		low_thr = (((param->low_thr/param->gain_den) -
+				QPNP_ADC_625_UV) * vbatt_param.dy);
+		if (low_thr < 0) {
+			sign = 1;
+			low_thr = -low_thr;
+		}
+		low_thr = low_thr * param->gain_num;
+		do_div(low_thr, QPNP_ADC_625_UV);
+		if (sign)
+			low_thr = -low_thr;
+		*low_threshold = low_thr + vbatt_param.adc_gnd;
+
+		sign = 0;
+		high_thr = (((param->high_thr/param->gain_den) -
+				QPNP_ADC_625_UV) * vbatt_param.dy);
+		if (high_thr < 0) {
+			sign = 1;
+			high_thr = -high_thr;
+		}
+		high_thr = high_thr * param->gain_num;
+		do_div(high_thr, QPNP_ADC_625_UV);
+		if (sign)
+			high_thr = -high_thr;
+		*high_threshold = high_thr + vbatt_param.adc_gnd;
+	}
+
+	pr_debug("high_volt:%d, low_volt:%d\n", param->high_thr,
+				param->low_thr);
+	pr_debug("adc_code_high:%x, adc_code_low:%x\n", *high_threshold,
+				*low_threshold);
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_absolute_rthr);
+
+int32_t qpnp_adc_vbatt_rscaler(struct qpnp_vadc_chip *chip,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{
+	return qpnp_adc_absolute_rthr(chip, param, low_threshold,
+							high_threshold);
+}
+EXPORT_SYMBOL(qpnp_adc_vbatt_rscaler);
+
+int32_t qpnp_vadc_absolute_rthr(struct qpnp_vadc_chip *chip,
+		const struct qpnp_vadc_chan_properties *chan_prop,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{
+	struct qpnp_vadc_linear_graph vbatt_param;
+	int rc = 0, sign = 0;
+	int64_t low_thr = 0, high_thr = 0;
+
+	if (!chan_prop || !chan_prop->offset_gain_numerator ||
+		!chan_prop->offset_gain_denominator)
+		return -EINVAL;
+
+	rc = qpnp_get_vadc_gain_and_offset(chip, &vbatt_param, CALIB_ABSOLUTE);
+	if (rc < 0)
+		return rc;
+
+	low_thr = (((param->low_thr)/(int)chan_prop->offset_gain_denominator
+					- QPNP_ADC_625_UV) * vbatt_param.dy);
+	if (low_thr < 0) {
+		sign = 1;
+		low_thr = -low_thr;
+	}
+	low_thr = low_thr * chan_prop->offset_gain_numerator;
+	do_div(low_thr, QPNP_ADC_625_UV);
+	if (sign)
+		low_thr = -low_thr;
+	*low_threshold = low_thr + vbatt_param.adc_gnd;
+
+	sign = 0;
+	high_thr = (((param->high_thr)/(int)chan_prop->offset_gain_denominator
+					- QPNP_ADC_625_UV) * vbatt_param.dy);
+	if (high_thr < 0) {
+		sign = 1;
+		high_thr = -high_thr;
+	}
+	high_thr = high_thr * chan_prop->offset_gain_numerator;
+	do_div(high_thr, QPNP_ADC_625_UV);
+	if (sign)
+		high_thr = -high_thr;
+	*high_threshold = high_thr + vbatt_param.adc_gnd;
+
+	pr_debug("high_volt:%d, low_volt:%d\n", param->high_thr,
+				param->low_thr);
+	pr_debug("adc_code_high:%x, adc_code_low:%x\n", *high_threshold,
+				*low_threshold);
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_vadc_absolute_rthr);
+
+int32_t qpnp_adc_btm_scaler(struct qpnp_vadc_chip *chip,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{
+	struct qpnp_vadc_linear_graph btm_param;
+	int64_t low_output = 0, high_output = 0;
+	int rc = 0;
+
+	if (param->adc_tm_hc) {
+		pr_err("Update scaling for VADC_TM_HC\n");
+		return -EINVAL;
+	}
+
+	qpnp_get_vadc_gain_and_offset(chip, &btm_param, CALIB_RATIOMETRIC);
+
+	pr_debug("warm_temp:%d and cool_temp:%d\n", param->high_temp,
+				param->low_temp);
+	rc = qpnp_adc_map_voltage_temp(
+		adcmap_btm_threshold,
+		ARRAY_SIZE(adcmap_btm_threshold),
+		(param->low_temp),
+		&low_output);
+	if (rc) {
+		pr_debug("low_temp mapping failed with %d\n", rc);
+		return rc;
+	}
+
+	pr_debug("low_output:%lld\n", low_output);
+	low_output *= btm_param.dy;
+	do_div(low_output, btm_param.adc_vref);
+	low_output += btm_param.adc_gnd;
+
+	rc = qpnp_adc_map_voltage_temp(
+		adcmap_btm_threshold,
+		ARRAY_SIZE(adcmap_btm_threshold),
+		(param->high_temp),
+		&high_output);
+	if (rc) {
+		pr_debug("high temp mapping failed with %d\n", rc);
+		return rc;
+	}
+
+	pr_debug("high_output:%lld\n", high_output);
+	high_output *= btm_param.dy;
+	do_div(high_output, btm_param.adc_vref);
+	high_output += btm_param.adc_gnd;
+
+	/* btm low temperature correspondes to high voltage threshold */
+	*low_threshold = high_output;
+	/* btm high temperature correspondes to low voltage threshold */
+	*high_threshold = low_output;
+
+	pr_debug("high_volt:%d, low_volt:%d\n", *high_threshold,
+				*low_threshold);
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_btm_scaler);
+
+int32_t qpnp_adc_qrd_skuh_btm_scaler(struct qpnp_vadc_chip *chip,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{
+	struct qpnp_vadc_linear_graph btm_param;
+	int64_t low_output = 0, high_output = 0;
+	int rc = 0;
+
+	if (param->adc_tm_hc) {
+		pr_err("Update scaling for VADC_TM_HC\n");
+		return -EINVAL;
+	}
+
+	qpnp_get_vadc_gain_and_offset(chip, &btm_param, CALIB_RATIOMETRIC);
+
+	pr_debug("warm_temp:%d and cool_temp:%d\n", param->high_temp,
+				param->low_temp);
+	rc = qpnp_adc_map_voltage_temp(
+		adcmap_qrd_skuh_btm_threshold,
+		ARRAY_SIZE(adcmap_qrd_skuh_btm_threshold),
+		(param->low_temp),
+		&low_output);
+	if (rc) {
+		pr_debug("low_temp mapping failed with %d\n", rc);
+		return rc;
+	}
+
+	pr_debug("low_output:%lld\n", low_output);
+	low_output *= btm_param.dy;
+	do_div(low_output, btm_param.adc_vref);
+	low_output += btm_param.adc_gnd;
+
+	rc = qpnp_adc_map_voltage_temp(
+		adcmap_qrd_skuh_btm_threshold,
+		ARRAY_SIZE(adcmap_qrd_skuh_btm_threshold),
+		(param->high_temp),
+		&high_output);
+	if (rc) {
+		pr_debug("high temp mapping failed with %d\n", rc);
+		return rc;
+	}
+
+	pr_debug("high_output:%lld\n", high_output);
+	high_output *= btm_param.dy;
+	do_div(high_output, btm_param.adc_vref);
+	high_output += btm_param.adc_gnd;
+
+	/* btm low temperature correspondes to high voltage threshold */
+	*low_threshold = high_output;
+	/* btm high temperature correspondes to low voltage threshold */
+	*high_threshold = low_output;
+
+	pr_debug("high_volt:%d, low_volt:%d\n", *high_threshold,
+				*low_threshold);
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_qrd_skuh_btm_scaler);
+
+int32_t qpnp_adc_qrd_skut1_btm_scaler(struct qpnp_vadc_chip *chip,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{
+	struct qpnp_vadc_linear_graph btm_param;
+	int64_t low_output = 0, high_output = 0;
+	int rc = 0;
+
+	if (param->adc_tm_hc) {
+		pr_err("Update scaling for VADC_TM_HC\n");
+		return -EINVAL;
+	}
+
+	qpnp_get_vadc_gain_and_offset(chip, &btm_param, CALIB_RATIOMETRIC);
+
+	pr_debug("warm_temp:%d and cool_temp:%d\n", param->high_temp,
+				param->low_temp);
+	rc = qpnp_adc_map_voltage_temp(
+		adcmap_qrd_skut1_btm_threshold,
+		ARRAY_SIZE(adcmap_qrd_skut1_btm_threshold),
+		(param->low_temp),
+		&low_output);
+	if (rc) {
+		pr_debug("low_temp mapping failed with %d\n", rc);
+		return rc;
+	}
+
+	pr_debug("low_output:%lld\n", low_output);
+	low_output *= btm_param.dy;
+	do_div(low_output, btm_param.adc_vref);
+	low_output += btm_param.adc_gnd;
+
+	rc = qpnp_adc_map_voltage_temp(
+		adcmap_qrd_skut1_btm_threshold,
+		ARRAY_SIZE(adcmap_qrd_skut1_btm_threshold),
+		(param->high_temp),
+		&high_output);
+	if (rc) {
+		pr_debug("high temp mapping failed with %d\n", rc);
+		return rc;
+	}
+
+	pr_debug("high_output:%lld\n", high_output);
+	high_output *= btm_param.dy;
+	do_div(high_output, btm_param.adc_vref);
+	high_output += btm_param.adc_gnd;
+
+	/* btm low temperature correspondes to high voltage threshold */
+	*low_threshold = high_output;
+	/* btm high temperature correspondes to low voltage threshold */
+	*high_threshold = low_output;
+
+	pr_debug("high_volt:%d, low_volt:%d\n", *high_threshold,
+				*low_threshold);
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_qrd_skut1_btm_scaler);
+
+int32_t qpnp_adc_smb_btm_rscaler(struct qpnp_vadc_chip *chip,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{
+	struct qpnp_vadc_linear_graph btm_param;
+	int64_t low_output = 0, high_output = 0;
+	int rc = 0;
+
+	if (param->adc_tm_hc) {
+		pr_err("Update scaling for VADC_TM_HC\n");
+		return -EINVAL;
+	}
+
+	qpnp_get_vadc_gain_and_offset(chip, &btm_param, CALIB_RATIOMETRIC);
+
+	pr_debug("warm_temp:%d and cool_temp:%d\n", param->high_temp,
+				param->low_temp);
+	rc = qpnp_adc_map_voltage_temp(
+		adcmap_smb_batt_therm,
+		ARRAY_SIZE(adcmap_smb_batt_therm),
+		(param->low_temp),
+		&low_output);
+	if (rc) {
+		pr_debug("low_temp mapping failed with %d\n", rc);
+		return rc;
+	}
+
+	pr_debug("low_output:%lld\n", low_output);
+	low_output *= btm_param.dy;
+	do_div(low_output, btm_param.adc_vref);
+	low_output += btm_param.adc_gnd;
+
+	rc = qpnp_adc_map_voltage_temp(
+		adcmap_smb_batt_therm,
+		ARRAY_SIZE(adcmap_smb_batt_therm),
+		(param->high_temp),
+		&high_output);
+	if (rc) {
+		pr_debug("high temp mapping failed with %d\n", rc);
+		return rc;
+	}
+
+	pr_debug("high_output:%lld\n", high_output);
+	high_output *= btm_param.dy;
+	do_div(high_output, btm_param.adc_vref);
+	high_output += btm_param.adc_gnd;
+
+	/* btm low temperature correspondes to high voltage threshold */
+	*low_threshold = high_output;
+	/* btm high temperature correspondes to low voltage threshold */
+	*high_threshold = low_output;
+
+	pr_debug("high_volt:%d, low_volt:%d\n", *high_threshold,
+				*low_threshold);
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_smb_btm_rscaler);
+
+int32_t qpnp_adc_scale_pmi_chg_temp(struct qpnp_vadc_chip *vadc,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int rc = 0;
+
+	rc = qpnp_adc_scale_default(vadc, adc_code, adc_properties,
+			chan_properties, adc_chan_result);
+	if (rc < 0)
+		return rc;
+
+	pr_debug("raw_code:%x, v_adc:%lld\n", adc_code,
+						adc_chan_result->physical);
+	adc_chan_result->physical = (int64_t) ((PMI_CHG_SCALE_1) *
+					(adc_chan_result->physical * 2));
+	adc_chan_result->physical = (int64_t) (adc_chan_result->physical +
+							PMI_CHG_SCALE_2);
+	adc_chan_result->physical = (int64_t) adc_chan_result->physical;
+	adc_chan_result->physical = div64_s64(adc_chan_result->physical,
+								1000000);
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_scale_pmi_chg_temp);
+
+int32_t qpnp_adc_enable_voltage(struct qpnp_adc_drv *adc)
+{
+	int rc = 0;
+
+	if (adc->hkadc_ldo) {
+		rc = regulator_enable(adc->hkadc_ldo);
+		if (rc < 0) {
+			pr_err("Failed to enable hkadc ldo\n");
+			return rc;
+		}
+	}
+
+	if (adc->hkadc_ldo_ok) {
+		rc = regulator_enable(adc->hkadc_ldo_ok);
+		if (rc < 0) {
+			pr_err("Failed to enable hkadc ok signal\n");
+			return rc;
+		}
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_adc_enable_voltage);
+
+void qpnp_adc_disable_voltage(struct qpnp_adc_drv *adc)
+{
+	if (adc->hkadc_ldo)
+		regulator_disable(adc->hkadc_ldo);
+
+	if (adc->hkadc_ldo_ok)
+		regulator_disable(adc->hkadc_ldo_ok);
+
+}
+EXPORT_SYMBOL(qpnp_adc_disable_voltage);
+
+void qpnp_adc_free_voltage_resource(struct qpnp_adc_drv *adc)
+{
+	if (adc->hkadc_ldo)
+		regulator_put(adc->hkadc_ldo);
+
+	if (adc->hkadc_ldo_ok)
+		regulator_put(adc->hkadc_ldo_ok);
+}
+EXPORT_SYMBOL(qpnp_adc_free_voltage_resource);
+
+int qpnp_adc_get_revid_version(struct device *dev)
+{
+	struct pmic_revid_data *revid_data;
+	struct device_node *revid_dev_node;
+
+	revid_dev_node = of_parse_phandle(dev->of_node,
+						"qcom,pmic-revid", 0);
+	if (!revid_dev_node) {
+		pr_debug("Missing qcom,pmic-revid property\n");
+		return -EINVAL;
+	}
+
+	revid_data = get_revid_data(revid_dev_node);
+	if (IS_ERR(revid_data)) {
+		pr_debug("revid error rc = %ld\n", PTR_ERR(revid_data));
+		return -EINVAL;
+	}
+
+	if ((revid_data->rev1 == PM8941_V3P1_REV1) &&
+		(revid_data->rev2 == PM8941_V3P1_REV2) &&
+		(revid_data->rev3 == PM8941_V3P1_REV3) &&
+		(revid_data->rev4 == PM8941_V3P1_REV4) &&
+		(revid_data->pmic_subtype == PM8941_SUBTYPE))
+			return QPNP_REV_ID_8941_3_1;
+	else if ((revid_data->rev1 == PM8941_V3P0_REV1) &&
+		(revid_data->rev2 == PM8941_V3P0_REV2) &&
+		(revid_data->rev3 == PM8941_V3P0_REV3) &&
+		(revid_data->rev4 == PM8941_V3P0_REV4) &&
+		(revid_data->pmic_subtype == PM8941_SUBTYPE))
+			return QPNP_REV_ID_8941_3_0;
+	else if ((revid_data->rev1 == PM8941_V2P0_REV1) &&
+		(revid_data->rev2 == PM8941_V2P0_REV2) &&
+		(revid_data->rev3 == PM8941_V2P0_REV3) &&
+		(revid_data->rev4 == PM8941_V2P0_REV4) &&
+		(revid_data->pmic_subtype == PM8941_SUBTYPE))
+			return QPNP_REV_ID_8941_2_0;
+	else if ((revid_data->rev1 == PM8226_V2P2_REV1) &&
+		(revid_data->rev2 == PM8226_V2P2_REV2) &&
+		(revid_data->rev3 == PM8226_V2P2_REV3) &&
+		(revid_data->rev4 == PM8226_V2P2_REV4) &&
+		(revid_data->pmic_subtype == PM8226_SUBTYPE))
+			return QPNP_REV_ID_8026_2_2;
+	else if ((revid_data->rev1 == PM8226_V2P1_REV1) &&
+		(revid_data->rev2 == PM8226_V2P1_REV2) &&
+		(revid_data->rev3 == PM8226_V2P1_REV3) &&
+		(revid_data->rev4 == PM8226_V2P1_REV4) &&
+		(revid_data->pmic_subtype == PM8226_SUBTYPE))
+			return QPNP_REV_ID_8026_2_1;
+	else if ((revid_data->rev1 == PM8226_V2P0_REV1) &&
+		(revid_data->rev2 == PM8226_V2P0_REV2) &&
+		(revid_data->rev3 == PM8226_V2P0_REV3) &&
+		(revid_data->rev4 == PM8226_V2P0_REV4) &&
+		(revid_data->pmic_subtype == PM8226_SUBTYPE))
+			return QPNP_REV_ID_8026_2_0;
+	else if ((revid_data->rev1 == PM8226_V1P0_REV1) &&
+		(revid_data->rev2 == PM8226_V1P0_REV2) &&
+		(revid_data->rev3 == PM8226_V1P0_REV3) &&
+		(revid_data->rev4 == PM8226_V1P0_REV4) &&
+		(revid_data->pmic_subtype == PM8226_SUBTYPE))
+			return QPNP_REV_ID_8026_1_0;
+	else if ((revid_data->rev1 == PM8110_V1P0_REV1) &&
+		(revid_data->rev2 == PM8110_V1P0_REV2) &&
+		(revid_data->rev3 == PM8110_V1P0_REV3) &&
+		(revid_data->rev4 == PM8110_V1P0_REV4) &&
+		(revid_data->pmic_subtype == PM8110_SUBTYPE))
+			return QPNP_REV_ID_8110_1_0;
+	else if ((revid_data->rev1 == PM8110_V2P0_REV1) &&
+		(revid_data->rev2 == PM8110_V2P0_REV2) &&
+		(revid_data->rev3 == PM8110_V2P0_REV3) &&
+		(revid_data->rev4 == PM8110_V2P0_REV4) &&
+		(revid_data->pmic_subtype == PM8110_SUBTYPE))
+			return QPNP_REV_ID_8110_2_0;
+	else if ((revid_data->rev1 == PM8916_V1P0_REV1) &&
+		(revid_data->rev2 == PM8916_V1P0_REV2) &&
+		(revid_data->rev3 == PM8916_V1P0_REV3) &&
+		(revid_data->rev4 == PM8916_V1P0_REV4) &&
+		(revid_data->pmic_subtype == PM8916_SUBTYPE))
+			return QPNP_REV_ID_8916_1_0;
+	else if ((revid_data->rev1 == PM8916_V1P1_REV1) &&
+		(revid_data->rev2 == PM8916_V1P1_REV2) &&
+		(revid_data->rev3 == PM8916_V1P1_REV3) &&
+		(revid_data->rev4 == PM8916_V1P1_REV4) &&
+		(revid_data->pmic_subtype == PM8916_SUBTYPE))
+			return QPNP_REV_ID_8916_1_1;
+	else if ((revid_data->rev1 == PM8916_V2P0_REV1) &&
+		(revid_data->rev2 == PM8916_V2P0_REV2) &&
+		(revid_data->rev3 == PM8916_V2P0_REV3) &&
+		(revid_data->rev4 == PM8916_V2P0_REV4) &&
+		(revid_data->pmic_subtype == PM8916_SUBTYPE))
+			return QPNP_REV_ID_8916_2_0;
+	else if ((revid_data->rev1 == PM8909_V1P0_REV1) &&
+		(revid_data->rev2 == PM8909_V1P0_REV2) &&
+		(revid_data->rev3 == PM8909_V1P0_REV3) &&
+		(revid_data->rev4 == PM8909_V1P0_REV4) &&
+		(revid_data->pmic_subtype == PM8909_SUBTYPE))
+			return QPNP_REV_ID_8909_1_0;
+	else if ((revid_data->rev1 == PM8909_V1P1_REV1) &&
+		(revid_data->rev2 == PM8909_V1P1_REV2) &&
+		(revid_data->rev3 == PM8909_V1P1_REV3) &&
+		(revid_data->rev4 == PM8909_V1P1_REV4) &&
+		(revid_data->pmic_subtype == PM8909_SUBTYPE))
+			return QPNP_REV_ID_8909_1_1;
+	else if ((revid_data->rev4 == PM8950_V1P0_REV4) &&
+		(revid_data->pmic_subtype == PM8950_SUBTYPE))
+			return QPNP_REV_ID_PM8950_1_0;
+	else
+		return -EINVAL;
+}
+EXPORT_SYMBOL(qpnp_adc_get_revid_version);
+
+int32_t qpnp_adc_get_devicetree_data(struct platform_device *pdev,
+			struct qpnp_adc_drv *adc_qpnp)
+{
+	struct device_node *node = pdev->dev.of_node;
+	unsigned int base;
+	struct device_node *child;
+	struct qpnp_adc_amux *adc_channel_list;
+	struct qpnp_adc_properties *adc_prop;
+	struct qpnp_adc_amux_properties *amux_prop;
+	int count_adc_channel_list = 0, decimation = 0, rc = 0, i = 0;
+	int decimation_tm_hc = 0, fast_avg_setup_tm_hc = 0, cal_val_hc = 0;
+	bool adc_hc;
+
+	if (!node)
+		return -EINVAL;
+
+	for_each_child_of_node(node, child)
+		count_adc_channel_list++;
+
+	if (!count_adc_channel_list) {
+		pr_err("No channel listing\n");
+		return -EINVAL;
+	}
+
+	adc_qpnp->pdev = pdev;
+
+	adc_prop = devm_kzalloc(&pdev->dev,
+				sizeof(struct qpnp_adc_properties),
+					GFP_KERNEL);
+	if (!adc_prop)
+		return -ENOMEM;
+
+	adc_channel_list = devm_kzalloc(&pdev->dev,
+		((sizeof(struct qpnp_adc_amux)) * count_adc_channel_list),
+				GFP_KERNEL);
+	if (!adc_channel_list)
+		return -ENOMEM;
+
+	amux_prop = devm_kzalloc(&pdev->dev,
+		sizeof(struct qpnp_adc_amux_properties) +
+		sizeof(struct qpnp_vadc_chan_properties), GFP_KERNEL);
+	if (!amux_prop) {
+		dev_err(&pdev->dev, "Unable to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	adc_qpnp->adc_channels = adc_channel_list;
+	adc_qpnp->amux_prop = amux_prop;
+	adc_hc = adc_qpnp->adc_hc;
+	adc_prop->adc_hc = adc_hc;
+
+	if (of_device_is_compatible(node, "qcom,qpnp-adc-tm-hc")) {
+		rc = of_property_read_u32(node, "qcom,decimation",
+						&decimation_tm_hc);
+		if (rc) {
+			pr_err("Invalid decimation property\n");
+			return -EINVAL;
+		}
+
+		rc = of_property_read_u32(node,
+			"qcom,fast-avg-setup", &fast_avg_setup_tm_hc);
+		if (rc) {
+			pr_err("Invalid fast average setup with %d\n", rc);
+			return -EINVAL;
+		}
+
+		if ((fast_avg_setup_tm_hc) > ADC_FAST_AVG_SAMPLE_16) {
+			pr_err("Max average support is 2^16\n");
+			return -EINVAL;
+		}
+	}
+
+	for_each_child_of_node(node, child) {
+		int channel_num, scaling = 0, post_scaling = 0;
+		int fast_avg_setup, calib_type = 0, rc, hw_settle_time = 0;
+		const char *calibration_param, *channel_name;
+
+		channel_name = of_get_property(child,
+				"label", NULL) ? : child->name;
+		if (!channel_name) {
+			pr_err("Invalid channel name\n");
+			return -EINVAL;
+		}
+
+		rc = of_property_read_u32(child, "reg", &channel_num);
+		if (rc) {
+			pr_err("Invalid channel num\n");
+			return -EINVAL;
+		}
+
+		if (!of_device_is_compatible(node, "qcom,qpnp-iadc")) {
+			rc = of_property_read_u32(child,
+				"qcom,hw-settle-time", &hw_settle_time);
+			if (rc) {
+				pr_err("Invalid channel hw settle time property\n");
+				return -EINVAL;
+			}
+			rc = of_property_read_u32(child,
+				"qcom,pre-div-channel-scaling", &scaling);
+			if (rc) {
+				pr_err("Invalid channel scaling property\n");
+				return -EINVAL;
+			}
+			rc = of_property_read_u32(child,
+				"qcom,scale-function", &post_scaling);
+			if (rc) {
+				pr_err("Invalid channel post scaling property\n");
+				return -EINVAL;
+			}
+			rc = of_property_read_string(child,
+				"qcom,calibration-type", &calibration_param);
+			if (rc) {
+				pr_err("Invalid calibration type\n");
+				return -EINVAL;
+			}
+
+			if (!strcmp(calibration_param, "absolute")) {
+				if (adc_hc)
+					calib_type = ADC_HC_ABS_CAL;
+				else
+					calib_type = CALIB_ABSOLUTE;
+			} else if (!strcmp(calibration_param, "ratiometric")) {
+				if (adc_hc)
+					calib_type = ADC_HC_RATIO_CAL;
+				else
+					calib_type = CALIB_RATIOMETRIC;
+			} else if (!strcmp(calibration_param, "no_cal")) {
+				if (adc_hc)
+					calib_type = ADC_HC_NO_CAL;
+				else {
+					pr_err("%s: Invalid calibration property\n",
+						__func__);
+					return -EINVAL;
+				}
+			} else {
+				pr_err("%s: Invalid calibration property\n",
+						__func__);
+				return -EINVAL;
+			}
+		}
+
+		/* ADC_TM_HC fast avg setting is common across channels */
+		if (!of_device_is_compatible(node, "qcom,qpnp-adc-tm-hc")) {
+			rc = of_property_read_u32(child,
+				"qcom,fast-avg-setup", &fast_avg_setup);
+			if (rc) {
+				pr_err("Invalid channel fast average setup\n");
+				return -EINVAL;
+			}
+		} else {
+			fast_avg_setup = fast_avg_setup_tm_hc;
+		}
+
+		/* ADC_TM_HC decimation setting is common across channels */
+		if (!of_device_is_compatible(node, "qcom,qpnp-adc-tm-hc")) {
+			rc = of_property_read_u32(child,
+				"qcom,decimation", &decimation);
+			if (rc) {
+				pr_err("Invalid decimation\n");
+				return -EINVAL;
+			}
+		} else {
+			decimation = decimation_tm_hc;
+		}
+
+		if (of_device_is_compatible(node, "qcom,qpnp-vadc-hc")) {
+			rc = of_property_read_u32(child, "qcom,cal-val",
+							&cal_val_hc);
+			if (rc) {
+				pr_debug("Use calibration value from timer\n");
+				adc_channel_list[i].cal_val = ADC_TIMER_CAL;
+			} else {
+				adc_channel_list[i].cal_val = cal_val_hc;
+			}
+		}
+
+		/* Individual channel properties */
+		adc_channel_list[i].name = (char *)channel_name;
+		adc_channel_list[i].channel_num = channel_num;
+		adc_channel_list[i].adc_decimation = decimation;
+		adc_channel_list[i].fast_avg_setup = fast_avg_setup;
+		if (!of_device_is_compatible(node, "qcom,qpnp-iadc")) {
+			adc_channel_list[i].chan_path_prescaling = scaling;
+			adc_channel_list[i].adc_scale_fn = post_scaling;
+			adc_channel_list[i].hw_settle_time = hw_settle_time;
+			adc_channel_list[i].calib_type = calib_type;
+		}
+		i++;
+	}
+
+	/* Get the ADC VDD reference voltage and ADC bit resolution */
+	rc = of_property_read_u32(node, "qcom,adc-vdd-reference",
+			&adc_prop->adc_vdd_reference);
+	if (rc) {
+		pr_err("Invalid adc vdd reference property\n");
+		return -EINVAL;
+	}
+	rc = of_property_read_u32(node, "qcom,adc-bit-resolution",
+			&adc_prop->bitresolution);
+	if (rc) {
+		pr_err("Invalid adc bit resolution property\n");
+		return -EINVAL;
+	}
+	adc_qpnp->adc_prop = adc_prop;
+
+	/* Get the peripheral address */
+	rc = of_property_read_u32(pdev->dev.of_node, "reg", &base);
+	if (rc < 0) {
+		dev_err(&pdev->dev,
+			"Couldn't find reg in node = %s rc = %d\n",
+			pdev->dev.of_node->full_name, rc);
+		return rc;
+	}
+
+	adc_qpnp->slave = to_spmi_device(pdev->dev.parent)->usid;
+	adc_qpnp->offset = base;
+
+	/* Register the ADC peripheral interrupt */
+	adc_qpnp->adc_irq_eoc = platform_get_irq_byname(pdev,
+							"eoc-int-en-set");
+	if (adc_qpnp->adc_irq_eoc < 0) {
+		pr_err("Invalid irq\n");
+		return -ENXIO;
+	}
+
+	init_completion(&adc_qpnp->adc_rslt_completion);
+
+	if (of_get_property(node, "hkadc_ldo-supply", NULL)) {
+		adc_qpnp->hkadc_ldo = regulator_get(&pdev->dev, "hkadc_ldo");
+		if (IS_ERR(adc_qpnp->hkadc_ldo)) {
+			pr_err("hkadc_ldo-supply node not found\n");
+			return -EINVAL;
+		}
+
+		rc = regulator_set_voltage(adc_qpnp->hkadc_ldo,
+				QPNP_VADC_LDO_VOLTAGE_MIN,
+				QPNP_VADC_LDO_VOLTAGE_MAX);
+		if (rc < 0) {
+			pr_err("setting voltage for hkadc_ldo failed\n");
+			return rc;
+		}
+
+		rc = regulator_set_load(adc_qpnp->hkadc_ldo, 100000);
+		if (rc < 0) {
+			pr_err("hkadc_ldo optimum mode failed%d\n", rc);
+			return rc;
+		}
+	}
+
+	if (of_get_property(node, "hkadc_ok-supply", NULL)) {
+		adc_qpnp->hkadc_ldo_ok = regulator_get(&pdev->dev,
+				"hkadc_ok");
+		if (IS_ERR(adc_qpnp->hkadc_ldo_ok)) {
+			pr_err("hkadc_ok node not found\n");
+			return -EINVAL;
+		}
+
+		rc = regulator_set_voltage(adc_qpnp->hkadc_ldo_ok,
+				QPNP_VADC_OK_VOLTAGE_MIN,
+				QPNP_VADC_OK_VOLTAGE_MAX);
+		if (rc < 0) {
+			pr_err("setting voltage for hkadc-ldo-ok failed\n");
+			return rc;
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_get_devicetree_data);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/hwmon/qpnp-adc-voltage.c	2019-01-22 16:16:23.795249051 +0100
@@ -0,0 +1,2909 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/thermal.h>
+
+/* QPNP VADC register definition */
+#define QPNP_VADC_REVISION1				0x0
+#define QPNP_VADC_REVISION2				0x1
+#define QPNP_VADC_REVISION3				0x2
+#define QPNP_VADC_REVISION4				0x3
+#define QPNP_VADC_PERPH_TYPE				0x4
+#define QPNP_VADC_PERH_SUBTYPE				0x5
+
+#define QPNP_VADC_SUPPORTED_REVISION2			1
+
+#define QPNP_VADC_STATUS1					0x8
+#define QPNP_VADC_STATUS1_OP_MODE				4
+#define QPNP_VADC_STATUS1_MEAS_INTERVAL_EN_STS			BIT(2)
+#define QPNP_VADC_STATUS1_REQ_STS				BIT(1)
+#define QPNP_VADC_STATUS1_EOC					BIT(0)
+#define QPNP_VADC_STATUS1_REQ_STS_EOC_MASK			0x3
+#define QPNP_VADC_STATUS1_OP_MODE_MASK				0x18
+#define QPNP_VADC_MEAS_INT_MODE					0x2
+#define QPNP_VADC_MEAS_INT_MODE_MASK				0x10
+
+#define QPNP_VADC_STATUS2					0x9
+#define QPNP_VADC_STATUS2_CONV_SEQ_STATE				6
+#define QPNP_VADC_STATUS2_FIFO_NOT_EMPTY_FLAG			BIT(1)
+#define QPNP_VADC_STATUS2_CONV_SEQ_TIMEOUT_STS			BIT(0)
+#define QPNP_VADC_STATUS2_CONV_SEQ_STATE_SHIFT			4
+#define QPNP_VADC_CONV_TIMEOUT_ERR				2
+
+#define QPNP_VADC_MODE_CTL					0x40
+#define QPNP_VADC_OP_MODE_SHIFT					3
+#define QPNP_VADC_VREF_XO_THM_FORCE				BIT(2)
+#define QPNP_VADC_AMUX_TRIM_EN					BIT(1)
+#define QPNP_VADC_TRIM_EN					BIT(0)
+#define QPNP_VADC_EN_CTL1					0x46
+#define QPNP_VADC_EN						BIT(7)
+#define QPNP_VADC_CH_SEL_CTL					0x48
+#define QPNP_VADC_DIG_PARAM					0x50
+#define QPNP_VADC_DIG_DEC_RATIO_SEL_SHIFT			3
+#define QPNP_VADC_HW_SETTLE_DELAY				0x51
+#define QPNP_VADC_CONV_REQ					0x52
+#define QPNP_VADC_CONV_REQ_SET					BIT(7)
+#define QPNP_VADC_CONV_SEQ_CTL					0x54
+#define QPNP_VADC_CONV_SEQ_HOLDOFF_SHIFT				4
+#define QPNP_VADC_CONV_SEQ_TRIG_CTL				0x55
+#define QPNP_VADC_MEAS_INTERVAL_CTL				0x57
+#define QPNP_VADC_MEAS_INTERVAL_OP_CTL				0x59
+#define QPNP_VADC_MEAS_INTERVAL_OP_SET				BIT(7)
+
+#define QPNP_VADC_CONV_SEQ_FALLING_EDGE				0x0
+#define QPNP_VADC_CONV_SEQ_RISING_EDGE				0x1
+#define QPNP_VADC_CONV_SEQ_EDGE_SHIFT				7
+#define QPNP_VADC_FAST_AVG_CTL					0x5a
+
+#define QPNP_VADC_LOW_THR_LSB					0x5c
+#define QPNP_VADC_LOW_THR_MSB					0x5d
+#define QPNP_VADC_HIGH_THR_LSB					0x5e
+#define QPNP_VADC_HIGH_THR_MSB					0x5f
+#define QPNP_VADC_ACCESS					0xd0
+#define QPNP_VADC_ACCESS_DATA					0xa5
+#define QPNP_VADC_PERH_RESET_CTL3				0xda
+#define QPNP_FOLLOW_OTST2_RB					BIT(3)
+#define QPNP_FOLLOW_WARM_RB					BIT(2)
+#define QPNP_FOLLOW_SHUTDOWN1_RB				BIT(1)
+#define QPNP_FOLLOW_SHUTDOWN2_RB				BIT(0)
+
+#define QPNP_INT_TEST_VAL					0xE1
+
+#define QPNP_VADC_DATA0						0x60
+#define QPNP_VADC_DATA1						0x61
+#define QPNP_VADC_CONV_TIMEOUT_ERR				2
+#define QPNP_VADC_CONV_TIME_MIN					1000
+#define QPNP_VADC_CONV_TIME_MAX					1100
+#define QPNP_ADC_COMPLETION_TIMEOUT				HZ
+#define QPNP_VADC_ERR_COUNT					20
+#define QPNP_OP_MODE_SHIFT					3
+
+#define QPNP_VADC_THR_LSB_MASK(val)				(val & 0xff)
+#define QPNP_VADC_THR_MSB_MASK(val)			((val & 0xff00) >> 8)
+#define QPNP_MIN_TIME						2000
+#define QPNP_MAX_TIME						2000
+#define QPNP_RETRY						100
+#define QPNP_VADC_ABSOLUTE_RECALIB_OFFSET			8
+#define QPNP_VADC_RATIOMETRIC_RECALIB_OFFSET			12
+#define QPNP_VADC_RECALIB_MAXCNT				10
+#define QPNP_VADC_OFFSET_DUMP					8
+#define QPNP_VADC_REG_DUMP					14
+
+/* QPNP VADC refreshed register set */
+#define QPNP_VADC_HC1_STATUS1					0x8
+
+#define QPNP_VADC_HC1_DATA_HOLD_CTL				0x3f
+#define QPNP_VADC_HC1_DATA_HOLD_CTL_FIELD			BIT(1)
+
+#define QPNP_VADC_HC1_ADC_DIG_PARAM				0x42
+#define QPNP_VADC_HC1_CAL_VAL					BIT(6)
+#define QPNP_VADC_HC1_CAL_VAL_SHIFT				6
+#define QPNP_VADC_HC1_CAL_SEL_MASK				0x30
+#define QPNP_VADC_HC1_CAL_SEL_SHIFT				4
+#define QPNP_VADC_HC1_DEC_RATIO_SEL				0xc
+#define QPNP_VADC_HC1_DEC_RATIO_SHIFT				2
+#define QPNP_VADC_HC1_FAST_AVG_CTL				0x43
+#define QPNP_VADC_HC1_FAST_AVG_SAMPLES_MASK			0x7
+#define QPNP_VADC_HC1_ADC_CH_SEL_CTL				0x44
+#define QPNP_VADC_HC1_DELAY_CTL					0x45
+#define QPNP_VADC_HC1_DELAY_CTL_MASK				0xf
+#define QPNP_VADC_MC1_EN_CTL1					0x46
+#define QPNP_VADC_HC1_ADC_EN					BIT(7)
+#define QPNP_VADC_MC1_CONV_REQ					0x47
+#define QPNP_VADC_HC1_CONV_REQ_START				BIT(7)
+
+#define QPNP_VADC_HC1_VBAT_MIN_THR0				0x48
+#define QPNP_VADC_HC1_VBAT_MIN_THR1				0x49
+
+#define QPNP_VADC_HC1_DATA0					0x50
+#define QPNP_VADC_HC1_DATA1					0x51
+#define QPNP_VADC_HC1_DATA_CHECK_USR				0x8000
+
+#define QPNP_VADC_HC1_VBAT_MIN_DATA0				0x52
+#define QPNP_VADC_MC1_VBAT_MIN_DATA1				0x53
+
+/*
+ * Conversion time varies between 213uS to 6827uS based on the decimation,
+ * clock rate, fast average samples with no measurement in queue.
+ */
+#define QPNP_VADC_HC1_CONV_TIME_MIN_US				213
+#define QPNP_VADC_HC1_CONV_TIME_MAX_US				214
+#define QPNP_VADC_HC1_ERR_COUNT					1600
+
+struct qpnp_vadc_mode_state {
+	bool				meas_int_mode;
+	bool				meas_int_request_in_queue;
+	bool				vadc_meas_int_enable;
+	struct qpnp_adc_tm_btm_param	*param;
+	struct qpnp_adc_amux		vadc_meas_amux;
+};
+
+struct qpnp_vadc_thermal_data {
+	bool thermal_node;
+	int thermal_chan;
+	enum qpnp_vadc_channels vadc_channel;
+	struct thermal_zone_device *tz_dev;
+	struct qpnp_vadc_chip *vadc_dev;
+};
+
+struct qpnp_vadc_chip {
+	struct device			*dev;
+	struct qpnp_adc_drv		*adc;
+	struct list_head		list;
+	struct device			*vadc_hwmon;
+	bool				vadc_init_calib;
+	int				max_channels_available;
+	bool				vadc_iadc_sync_lock;
+	u8				id;
+	struct work_struct		trigger_completion_work;
+	bool				vadc_poll_eoc;
+	bool				vadc_recalib_check;
+	u8				revision_ana_minor;
+	u8				revision_dig_major;
+	struct work_struct		trigger_high_thr_work;
+	struct work_struct		trigger_low_thr_work;
+	struct qpnp_vadc_mode_state	*state_copy;
+	struct qpnp_vadc_thermal_data	*vadc_therm_chan;
+	struct power_supply		*vadc_chg_vote;
+	bool				vadc_hc;
+	int				vadc_debug_count;
+	struct sensor_device_attribute	sens_attr[0];
+};
+
+LIST_HEAD(qpnp_vadc_device_list);
+
+static struct qpnp_vadc_scale_fn vadc_scale_fn[] = {
+	[SCALE_DEFAULT] = {qpnp_adc_scale_default},
+	[SCALE_BATT_THERM] = {qpnp_adc_scale_batt_therm},
+	[SCALE_PMIC_THERM] = {qpnp_adc_scale_pmic_therm},
+	[SCALE_XOTHERM] = {qpnp_adc_tdkntcg_therm},
+	[SCALE_THERM_100K_PULLUP] = {qpnp_adc_scale_therm_pu2},
+	[SCALE_THERM_150K_PULLUP] = {qpnp_adc_scale_therm_pu1},
+	[SCALE_QRD_BATT_THERM] = {qpnp_adc_scale_qrd_batt_therm},
+	[SCALE_QRD_SKUAA_BATT_THERM] = {qpnp_adc_scale_qrd_skuaa_batt_therm},
+	[SCALE_SMB_BATT_THERM] = {qpnp_adc_scale_smb_batt_therm},
+	[SCALE_QRD_SKUG_BATT_THERM] = {qpnp_adc_scale_qrd_skug_batt_therm},
+	[SCALE_QRD_SKUH_BATT_THERM] = {qpnp_adc_scale_qrd_skuh_batt_therm},
+	[SCALE_NCP_03WF683_THERM] = {qpnp_adc_scale_therm_ncp03},
+	[SCALE_QRD_SKUT1_BATT_THERM] = {qpnp_adc_scale_qrd_skut1_batt_therm},
+	[SCALE_PMI_CHG_TEMP] = {qpnp_adc_scale_pmi_chg_temp},
+};
+
+static struct qpnp_vadc_rscale_fn adc_vadc_rscale_fn[] = {
+	[SCALE_RVADC_ABSOLUTE] = {qpnp_vadc_absolute_rthr},
+};
+
+static int32_t qpnp_vadc_calib_device(struct qpnp_vadc_chip *vadc);
+
+static int32_t qpnp_vadc_read_reg(struct qpnp_vadc_chip *vadc, int16_t reg,
+						u8 *data, int len)
+{
+	int rc;
+
+	rc = regmap_bulk_read(vadc->adc->regmap,
+		(vadc->adc->offset + reg), data, len);
+	if (rc < 0) {
+		pr_err("qpnp adc read reg %d failed with %d\n", reg, rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int32_t qpnp_vadc_write_reg(struct qpnp_vadc_chip *vadc, int16_t reg,
+						u8 *buf, int len)
+{
+	int rc;
+
+	rc = regmap_bulk_write(vadc->adc->regmap,
+		(vadc->adc->offset + reg), buf, len);
+	if (rc < 0) {
+		pr_err("qpnp adc write reg %d failed with %d\n", reg, rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int qpnp_vadc_is_valid(struct qpnp_vadc_chip *vadc)
+{
+	struct qpnp_vadc_chip *vadc_chip = NULL;
+
+	list_for_each_entry(vadc_chip, &qpnp_vadc_device_list, list)
+		if (vadc == vadc_chip)
+			return 0;
+
+	return -EINVAL;
+}
+
+static int32_t qpnp_vadc_warm_rst_configure(struct qpnp_vadc_chip *vadc)
+{
+	int rc = 0;
+	u8 data = 0, buf = 0;
+
+	buf = QPNP_VADC_ACCESS_DATA;
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_ACCESS, &buf, 1);
+	if (rc < 0) {
+		pr_err("VADC write access failed\n");
+		return rc;
+	}
+
+	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_PERH_RESET_CTL3, &data, 1);
+	if (rc < 0) {
+		pr_err("VADC perh reset ctl3 read failed\n");
+		return rc;
+	}
+
+	buf = QPNP_VADC_ACCESS_DATA;
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_ACCESS, &buf, 1);
+	if (rc < 0) {
+		pr_err("VADC write access failed\n");
+		return rc;
+	}
+
+	data |= QPNP_FOLLOW_WARM_RB;
+
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_PERH_RESET_CTL3, &data, 1);
+	if (rc < 0) {
+		pr_err("VADC perh reset ctl3 write failed\n");
+		return rc;
+	}
+
+	return 0;
+}
+
+static int32_t qpnp_vadc_mode_select(struct qpnp_vadc_chip *vadc, u8 mode_ctl)
+{
+	int rc;
+
+	mode_ctl |= (QPNP_VADC_TRIM_EN | QPNP_VADC_AMUX_TRIM_EN);
+
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_MODE_CTL, &mode_ctl, 1);
+	if (rc < 0)
+		pr_err("vadc write mode selection err:%d\n", rc);
+
+	return rc;
+}
+
+static int32_t qpnp_vadc_enable(struct qpnp_vadc_chip *vadc, bool state)
+{
+	int rc = 0;
+	u8 data = 0;
+
+	data = QPNP_VADC_EN;
+	if (state) {
+		if (vadc->adc->hkadc_ldo && vadc->adc->hkadc_ldo_ok) {
+			rc = qpnp_adc_enable_voltage(vadc->adc);
+			if (rc) {
+				pr_err("failed enabling VADC LDO\n");
+				return rc;
+			}
+		}
+
+		rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_EN_CTL1, &data, 1);
+		if (rc < 0) {
+			pr_err("VADC enable failed\n");
+			return rc;
+		}
+	} else {
+		data = (~data & QPNP_VADC_EN);
+		rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_EN_CTL1, &data, 1);
+		if (rc < 0) {
+			pr_err("VADC disable failed\n");
+			return rc;
+		}
+
+		if (vadc->adc->hkadc_ldo && vadc->adc->hkadc_ldo_ok)
+			qpnp_adc_disable_voltage(vadc->adc);
+	}
+
+	return 0;
+}
+
+static int32_t qpnp_vadc_status_debug(struct qpnp_vadc_chip *vadc)
+{
+	int rc = 0, i = 0;
+	u8 buf[8], offset = 0;
+
+	if (vadc->vadc_debug_count < 3) {
+		for (i = 0; i < QPNP_VADC_REG_DUMP; i++) {
+			rc = qpnp_vadc_read_reg(vadc, offset, buf, 8);
+			if (rc) {
+				pr_err("debug register dump failed\n");
+				return rc;
+			}
+			offset += QPNP_VADC_OFFSET_DUMP;
+			pr_err("row%d: 0%x 0%x 0%x 0%x 0%x 0%x 0%x 0%x\n",
+				i, buf[0], buf[1], buf[2], buf[3], buf[4],
+				buf[5], buf[6], buf[7]);
+		}
+	} else
+		pr_debug("VADC peripheral dumps got printed before\n");
+
+	vadc->vadc_debug_count++;
+
+	rc = qpnp_vadc_enable(vadc, false);
+	if (rc < 0) {
+		pr_err("VADC disable failed with %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int qpnp_vadc_hc_check_conversion_status(struct qpnp_vadc_chip *vadc)
+{
+	int rc = 0, count = 0;
+	u8 status1 = 0;
+
+	while (status1 != QPNP_VADC_STATUS1_EOC) {
+		rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status1, 1);
+		if (rc < 0)
+			return rc;
+		status1 &= QPNP_VADC_STATUS1_REQ_STS_EOC_MASK;
+		if (status1 == QPNP_VADC_STATUS1_EOC)
+			break;
+		usleep_range(QPNP_VADC_HC1_CONV_TIME_MIN_US,
+				QPNP_VADC_HC1_CONV_TIME_MAX_US);
+		count++;
+		if (count > QPNP_VADC_HC1_ERR_COUNT) {
+			pr_err("retry error exceeded\n");
+			rc = qpnp_vadc_status_debug(vadc);
+			if (rc < 0)
+				pr_err("VADC disable failed with %d\n", rc);
+			return -EINVAL;
+		}
+	}
+
+	return rc;
+}
+
+static int qpnp_vadc_hc_read_data(struct qpnp_vadc_chip *vadc, int *data)
+{
+	int rc = 0;
+	u8 buf = 0, rslt_lsb = 0, rslt_msb = 0;
+
+	/* Set hold bit */
+	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_HC1_DATA_HOLD_CTL, &buf, 1);
+	if (rc) {
+		pr_err("debug register dump failed\n");
+		return rc;
+	}
+	buf |= QPNP_VADC_HC1_DATA_HOLD_CTL_FIELD;
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HC1_DATA_HOLD_CTL, &buf, 1);
+	if (rc) {
+		pr_err("debug register dump failed\n");
+		return rc;
+	}
+
+	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_HC1_DATA0, &rslt_lsb, 1);
+	if (rc < 0) {
+		pr_err("qpnp adc result read failed for data0\n");
+		return rc;
+	}
+
+	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_HC1_DATA1, &rslt_msb, 1);
+	if (rc < 0) {
+		pr_err("qpnp adc result read failed for data1\n");
+		return rc;
+	}
+
+	*data = (rslt_msb << 8) | rslt_lsb;
+
+	if (*data == QPNP_VADC_HC1_DATA_CHECK_USR) {
+		pr_err("Invalid data :0x%x\n", *data);
+		return -EINVAL;
+	}
+
+	rc = qpnp_vadc_enable(vadc, false);
+	if (rc) {
+		pr_err("VADC disable failed\n");
+		return rc;
+	}
+
+	/* De-assert hold bit */
+	buf &= ~QPNP_VADC_HC1_DATA_HOLD_CTL_FIELD;
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HC1_DATA_HOLD_CTL, &buf, 1);
+	if (rc)
+		pr_err("de-asserting hold bit failed\n");
+
+	return rc;
+}
+
+static void qpnp_vadc_hc_update_adc_dig_param(struct qpnp_vadc_chip *vadc,
+			struct qpnp_adc_amux_properties *amux_prop, u8 *data)
+{
+	/* Update CAL value */
+	*data &= ~QPNP_VADC_HC1_CAL_VAL;
+	*data |= (amux_prop->cal_val << QPNP_VADC_HC1_CAL_VAL_SHIFT);
+
+	/* Update CAL select */
+	*data &= ~QPNP_VADC_HC1_CAL_SEL_MASK;
+	*data |= (amux_prop->calib_type << QPNP_VADC_HC1_CAL_SEL_SHIFT);
+
+	/* Update Decimation ratio select */
+	*data &= ~QPNP_VADC_HC1_DEC_RATIO_SEL;
+	*data |= (amux_prop->decimation << QPNP_VADC_HC1_DEC_RATIO_SHIFT);
+
+	pr_debug("VADC_DIG_PARAM value:0x%x\n", *data);
+}
+
+static int qpnp_vadc_hc_configure(struct qpnp_vadc_chip *vadc,
+				struct qpnp_adc_amux_properties *amux_prop)
+{
+	int rc = 0;
+	u8 buf[6];
+
+	/* Read registers 0x42 through 0x46 */
+	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_HC1_ADC_DIG_PARAM, buf, 6);
+	if (rc < 0) {
+		pr_err("qpnp adc configure block read failed\n");
+		return rc;
+	}
+
+	/* ADC Digital param selection */
+	qpnp_vadc_hc_update_adc_dig_param(vadc, amux_prop, &buf[0]);
+
+	/* Update fast average sample value */
+	buf[1] &= (u8) ~QPNP_VADC_HC1_FAST_AVG_SAMPLES_MASK;
+	buf[1] |= amux_prop->fast_avg_setup;
+
+	/* Select ADC channel */
+	buf[2] = amux_prop->amux_channel;
+
+	/* Select hw settle delay for the channel */
+	buf[3] &= (u8) ~QPNP_VADC_HC1_DELAY_CTL_MASK;
+	buf[3] |= amux_prop->hw_settle_time;
+
+	/* Select ADC enable */
+	buf[4] |= QPNP_VADC_HC1_ADC_EN;
+
+	/* Select CONV request */
+	buf[5] |= QPNP_VADC_HC1_CONV_REQ_START;
+
+	if (!vadc->vadc_poll_eoc)
+		reinit_completion(&vadc->adc->adc_rslt_completion);
+
+	pr_debug("dig:0x%x, fast_avg:0x%x, channel:0x%x, hw_settle:0x%x\n",
+		buf[0], buf[1], buf[2], buf[3]);
+
+	/* Block register write from 0x42 through 0x46 */
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HC1_ADC_DIG_PARAM, buf, 6);
+	if (rc < 0) {
+		pr_err("qpnp adc block register configure failed\n");
+		return rc;
+	}
+
+	return 0;
+}
+
+int32_t qpnp_vadc_hc_read(struct qpnp_vadc_chip *vadc,
+				enum qpnp_vadc_channels channel,
+				struct qpnp_vadc_result *result)
+{
+	int rc = 0, scale_type, amux_prescaling, dt_index = 0, calib_type = 0;
+	struct qpnp_adc_amux_properties amux_prop;
+
+	if (qpnp_vadc_is_valid(vadc))
+		return -EPROBE_DEFER;
+
+	mutex_lock(&vadc->adc->adc_lock);
+
+	while ((vadc->adc->adc_channels[dt_index].channel_num
+		!= channel) && (dt_index < vadc->max_channels_available))
+		dt_index++;
+
+	if (dt_index >= vadc->max_channels_available) {
+		pr_err("not a valid VADC channel:%d\n", channel);
+		rc = -EINVAL;
+		goto fail_unlock;
+	}
+
+	if (!vadc->vadc_init_calib) {
+		rc = qpnp_vadc_calib_device(vadc);
+		if (rc) {
+			pr_err("Calibration failed\n");
+			goto fail_unlock;
+		} else {
+			vadc->vadc_init_calib = true;
+		}
+	}
+
+	calib_type = vadc->adc->adc_channels[dt_index].calib_type;
+	if (calib_type >= ADC_HC_CAL_SEL_NONE) {
+		pr_err("not a valid calib_type\n");
+		rc = -EINVAL;
+		goto fail_unlock;
+	}
+
+	amux_prop.decimation =
+			vadc->adc->adc_channels[dt_index].adc_decimation;
+	amux_prop.calib_type = vadc->adc->adc_channels[dt_index].calib_type;
+	amux_prop.cal_val = vadc->adc->adc_channels[dt_index].cal_val;
+	amux_prop.fast_avg_setup =
+			vadc->adc->adc_channels[dt_index].fast_avg_setup;
+	amux_prop.amux_channel = channel;
+	amux_prop.hw_settle_time =
+			vadc->adc->adc_channels[dt_index].hw_settle_time;
+
+	rc = qpnp_vadc_hc_configure(vadc, &amux_prop);
+	if (rc < 0) {
+		pr_err("Configuring VADC channel failed with %d\n", rc);
+		goto fail_unlock;
+	}
+
+	if (vadc->vadc_poll_eoc) {
+		rc = qpnp_vadc_hc_check_conversion_status(vadc);
+		if (rc < 0) {
+			pr_err("polling mode conversion failed\n");
+			goto fail_unlock;
+		}
+	} else {
+		rc = wait_for_completion_timeout(
+					&vadc->adc->adc_rslt_completion,
+					QPNP_ADC_COMPLETION_TIMEOUT);
+		if (!rc) {
+			rc = qpnp_vadc_hc_check_conversion_status(vadc);
+			if (rc < 0) {
+				pr_err("interrupt mode conversion failed\n");
+				goto fail_unlock;
+			}
+			pr_debug("End of conversion status set\n");
+		}
+	}
+
+	rc = qpnp_vadc_hc_read_data(vadc, &result->adc_code);
+	if (rc) {
+		pr_err("qpnp vadc read adc code failed with %d\n", rc);
+		goto fail_unlock;
+	}
+
+	amux_prescaling =
+		vadc->adc->adc_channels[dt_index].chan_path_prescaling;
+
+	if (amux_prescaling >= PATH_SCALING_NONE) {
+		rc = -EINVAL;
+		goto fail_unlock;
+	}
+
+	vadc->adc->amux_prop->chan_prop->offset_gain_numerator =
+		qpnp_vadc_amux_scaling_ratio[amux_prescaling].num;
+	vadc->adc->amux_prop->chan_prop->offset_gain_denominator =
+		 qpnp_vadc_amux_scaling_ratio[amux_prescaling].den;
+
+	scale_type = vadc->adc->adc_channels[dt_index].adc_scale_fn;
+	if (scale_type >= SCALE_NONE) {
+		rc = -EBADF;
+		goto fail_unlock;
+	}
+
+	/* Note: Scaling functions for VADC_HC do not need offset/gain */
+	vadc_scale_fn[scale_type].chan(vadc, result->adc_code,
+		vadc->adc->adc_prop, vadc->adc->amux_prop->chan_prop, result);
+
+	pr_debug("channel=0x%x, adc_code=0x%x adc_result=%lld\n",
+			channel, result->adc_code, result->physical);
+
+fail_unlock:
+	mutex_unlock(&vadc->adc->adc_lock);
+
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_vadc_hc_read);
+
+static int32_t qpnp_vadc_configure(struct qpnp_vadc_chip *vadc,
+			struct qpnp_adc_amux_properties *chan_prop)
+{
+	u8 decimation = 0, conv_sequence = 0, conv_sequence_trig = 0;
+	u8 mode_ctrl = 0, meas_int_op_ctl_data = 0, buf = 0;
+	int rc = 0;
+
+	/* Mode selection */
+	mode_ctrl |= ((chan_prop->mode_sel << QPNP_VADC_OP_MODE_SHIFT) |
+			(QPNP_VADC_TRIM_EN | QPNP_VADC_AMUX_TRIM_EN));
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_MODE_CTL, &mode_ctrl, 1);
+	if (rc < 0) {
+		pr_err("Mode configure write error\n");
+		return rc;
+	}
+
+	/* Channel selection */
+	buf = chan_prop->amux_channel;
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_CH_SEL_CTL, &buf, 1);
+	if (rc < 0) {
+		pr_err("Channel configure error\n");
+		return rc;
+	}
+
+	/* Digital parameter setup */
+	decimation = chan_prop->decimation <<
+				QPNP_VADC_DIG_DEC_RATIO_SEL_SHIFT;
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_DIG_PARAM, &decimation, 1);
+	if (rc < 0) {
+		pr_err("Digital parameter configure write error\n");
+		return rc;
+	}
+
+	/* HW settling time delay */
+	buf = chan_prop->hw_settle_time;
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HW_SETTLE_DELAY, &buf, 1);
+	if (rc < 0) {
+		pr_err("HW settling time setup error\n");
+		return rc;
+	}
+
+	pr_debug("mode:%d, channel:%d, decimation:%d, hw_settle:%d\n",
+		mode_ctrl, chan_prop->amux_channel, decimation,
+					chan_prop->hw_settle_time);
+
+	if (chan_prop->mode_sel == (ADC_OP_NORMAL_MODE <<
+					QPNP_VADC_OP_MODE_SHIFT)) {
+		/* Normal measurement mode */
+		buf = chan_prop->fast_avg_setup;
+		rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_FAST_AVG_CTL,
+								&buf, 1);
+		if (rc < 0) {
+			pr_err("Fast averaging configure error\n");
+			return rc;
+		}
+		/* Ensure MEAS_INTERVAL_OP_CTL is set to 0 */
+		rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_MEAS_INTERVAL_OP_CTL,
+						&meas_int_op_ctl_data, 1);
+		if (rc < 0) {
+			pr_err("Measurement interval OP configure error\n");
+			return rc;
+		}
+	} else if (chan_prop->mode_sel == (ADC_OP_CONVERSION_SEQUENCER <<
+					QPNP_VADC_OP_MODE_SHIFT)) {
+		/* Conversion sequence mode */
+		conv_sequence = ((ADC_SEQ_HOLD_100US <<
+				QPNP_VADC_CONV_SEQ_HOLDOFF_SHIFT) |
+				ADC_CONV_SEQ_TIMEOUT_5MS);
+		rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_CONV_SEQ_CTL,
+							&conv_sequence, 1);
+		if (rc < 0) {
+			pr_err("Conversion sequence error\n");
+			return rc;
+		}
+
+		conv_sequence_trig = ((QPNP_VADC_CONV_SEQ_RISING_EDGE <<
+				QPNP_VADC_CONV_SEQ_EDGE_SHIFT) |
+				chan_prop->trigger_channel);
+		rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_CONV_SEQ_TRIG_CTL,
+							&conv_sequence_trig, 1);
+		if (rc < 0) {
+			pr_err("Conversion trigger error\n");
+			return rc;
+		}
+	} else if (chan_prop->mode_sel == ADC_OP_MEASUREMENT_INTERVAL) {
+		buf = QPNP_VADC_MEAS_INTERVAL_OP_SET;
+		rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_MEAS_INTERVAL_OP_CTL,
+					&buf, 1);
+		if (rc < 0) {
+			pr_err("Measurement interval OP configure error\n");
+			return rc;
+		}
+	}
+
+	if (!vadc->vadc_poll_eoc)
+		reinit_completion(&vadc->adc->adc_rslt_completion);
+
+	rc = qpnp_vadc_enable(vadc, true);
+	if (rc)
+		return rc;
+
+	if (!vadc->vadc_iadc_sync_lock) {
+		/* Request conversion */
+		buf = QPNP_VADC_CONV_REQ_SET;
+		rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_CONV_REQ, &buf, 1);
+		if (rc < 0) {
+			pr_err("Request conversion failed\n");
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int32_t qpnp_vadc_read_conversion_result(struct qpnp_vadc_chip *vadc,
+								int32_t *data)
+{
+	uint8_t rslt_lsb, rslt_msb;
+	int rc = 0, status = 0;
+
+	status = qpnp_vadc_read_reg(vadc, QPNP_VADC_DATA0, &rslt_lsb, 1);
+	if (status < 0) {
+		pr_err("qpnp adc result read failed for data0\n");
+		goto fail;
+	}
+
+	status = qpnp_vadc_read_reg(vadc, QPNP_VADC_DATA1, &rslt_msb, 1);
+	if (status < 0) {
+		pr_err("qpnp adc result read failed for data1\n");
+		goto fail;
+	}
+
+	*data = (rslt_msb << 8) | rslt_lsb;
+
+fail:
+	rc = qpnp_vadc_enable(vadc, false);
+	if (rc)
+		return rc;
+
+	return status;
+}
+
+static int32_t qpnp_vadc_read_status(struct qpnp_vadc_chip *vadc, int mode_sel)
+{
+	u8 status1, status2, status2_conv_seq_state;
+	u8 status_err = QPNP_VADC_CONV_TIMEOUT_ERR;
+	int rc;
+
+	switch (mode_sel) {
+	case (ADC_OP_CONVERSION_SEQUENCER << QPNP_VADC_OP_MODE_SHIFT):
+		rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status1, 1);
+		if (rc) {
+			pr_err("qpnp_vadc read mask interrupt failed\n");
+			return rc;
+		}
+
+		rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS2, &status2, 1);
+		if (rc) {
+			pr_err("qpnp_vadc read mask interrupt failed\n");
+			return rc;
+		}
+
+		if (!(status2 & ~QPNP_VADC_STATUS2_CONV_SEQ_TIMEOUT_STS) &&
+			(status1 & (~QPNP_VADC_STATUS1_REQ_STS |
+						QPNP_VADC_STATUS1_EOC))) {
+			rc = status_err;
+			return rc;
+		}
+
+		status2_conv_seq_state = status2 >>
+					QPNP_VADC_STATUS2_CONV_SEQ_STATE_SHIFT;
+		if (status2_conv_seq_state != ADC_CONV_SEQ_IDLE) {
+			pr_err("qpnp vadc seq error with status %d\n",
+						status2);
+			rc = -EINVAL;
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static void qpnp_vadc_work(struct work_struct *work)
+{
+	struct qpnp_vadc_chip *vadc = container_of(work,
+			struct qpnp_vadc_chip, trigger_completion_work);
+
+	if (qpnp_vadc_is_valid(vadc) < 0)
+		return;
+
+	complete(&vadc->adc->adc_rslt_completion);
+}
+
+static void qpnp_vadc_low_thr_fn(struct work_struct *work)
+{
+	struct qpnp_vadc_chip *vadc = container_of(work,
+			struct qpnp_vadc_chip, trigger_low_thr_work);
+
+	vadc->state_copy->meas_int_mode = false;
+	vadc->state_copy->meas_int_request_in_queue = false;
+	vadc->state_copy->param->threshold_notification(
+			ADC_TM_LOW_STATE,
+			vadc->state_copy->param->btm_ctx);
+}
+
+static void qpnp_vadc_high_thr_fn(struct work_struct *work)
+{
+	struct qpnp_vadc_chip *vadc = container_of(work,
+			struct qpnp_vadc_chip, trigger_high_thr_work);
+
+	vadc->state_copy->meas_int_mode = false;
+	vadc->state_copy->meas_int_request_in_queue = false;
+	vadc->state_copy->param->threshold_notification(
+			ADC_TM_HIGH_STATE,
+			vadc->state_copy->param->btm_ctx);
+}
+
+static irqreturn_t qpnp_vadc_isr(int irq, void *dev_id)
+{
+	struct qpnp_vadc_chip *vadc = dev_id;
+
+	schedule_work(&vadc->trigger_completion_work);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t qpnp_vadc_low_thr_isr(int irq, void *data)
+{
+	struct qpnp_vadc_chip *vadc = data;
+	u8 mode_ctl = 0, mode = 0;
+	int rc = 0;
+
+	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_MODE_CTL, &mode, 1);
+	if (rc < 0) {
+		pr_err("mode ctl register read failed with %d\n", rc);
+		return rc;
+	}
+
+	if (!(mode & QPNP_VADC_MEAS_INT_MODE_MASK)) {
+		pr_debug("Spurious VADC threshold 0x%x\n", mode);
+		return IRQ_HANDLED;
+	}
+
+	mode_ctl = ADC_OP_NORMAL_MODE;
+	/* Set measurement in single measurement mode */
+	qpnp_vadc_mode_select(vadc, mode_ctl);
+	qpnp_vadc_enable(vadc, false);
+	schedule_work(&vadc->trigger_low_thr_work);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t qpnp_vadc_high_thr_isr(int irq, void *data)
+{
+	struct qpnp_vadc_chip *vadc = data;
+	u8 mode_ctl = 0, mode = 0;
+	int rc = 0;
+
+	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_MODE_CTL, &mode, 1);
+	if (rc < 0) {
+		pr_err("mode ctl register read failed with %d\n", rc);
+		return rc;
+	}
+
+	if (!(mode & QPNP_VADC_MEAS_INT_MODE_MASK)) {
+		pr_debug("Spurious VADC threshold 0x%x\n", mode);
+		return IRQ_HANDLED;
+	}
+
+	mode_ctl = ADC_OP_NORMAL_MODE;
+	/* Set measurement in single measurement mode */
+	qpnp_vadc_mode_select(vadc, mode_ctl);
+	qpnp_vadc_enable(vadc, false);
+	schedule_work(&vadc->trigger_high_thr_work);
+
+	return IRQ_HANDLED;
+}
+
+static int32_t qpnp_vadc_version_check(struct qpnp_vadc_chip *dev)
+{
+	uint8_t revision;
+	int rc;
+
+	rc = qpnp_vadc_read_reg(dev, QPNP_VADC_REVISION2, &revision, 1);
+	if (rc < 0) {
+		pr_err("qpnp adc result read failed with %d\n", rc);
+		return rc;
+	}
+
+	if (revision < QPNP_VADC_SUPPORTED_REVISION2) {
+		pr_err("VADC Version not supported\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int32_t
+	qpnp_vadc_channel_post_scaling_calib_check(struct qpnp_vadc_chip *vadc,
+								int channel)
+{
+	int version, rc = 0;
+
+	version = qpnp_adc_get_revid_version(vadc->dev);
+
+	if (version == QPNP_REV_ID_PM8950_1_0) {
+		if ((channel == LR_MUX7_HW_ID) ||
+			(channel == P_MUX2_1_1) ||
+			(channel == LR_MUX3_XO_THERM) ||
+			(channel == LR_MUX3_BUF_XO_THERM_BUF) ||
+			(channel == P_MUX4_1_1)) {
+			vadc->adc->amux_prop->chan_prop->calib_type =
+								CALIB_ABSOLUTE;
+			return rc;
+		}
+	}
+
+	return -EINVAL;
+}
+
+#define QPNP_VBAT_COEFF_1	3000
+#define QPNP_VBAT_COEFF_2	45810000
+#define QPNP_VBAT_COEFF_3	100000
+#define QPNP_VBAT_COEFF_4	3500
+#define QPNP_VBAT_COEFF_5	80000000
+#define QPNP_VBAT_COEFF_6	4400
+#define QPNP_VBAT_COEFF_7	32200000
+#define QPNP_VBAT_COEFF_8	3880
+#define QPNP_VBAT_COEFF_9	5770
+#define QPNP_VBAT_COEFF_10	3660
+#define QPNP_VBAT_COEFF_11	5320
+#define QPNP_VBAT_COEFF_12	8060000
+#define QPNP_VBAT_COEFF_13	102640000
+#define QPNP_VBAT_COEFF_14	22220000
+#define QPNP_VBAT_COEFF_15	83060000
+#define QPNP_VBAT_COEFF_16	2810
+#define QPNP_VBAT_COEFF_17	5260
+#define QPNP_VBAT_COEFF_18	8027
+#define QPNP_VBAT_COEFF_19	2347
+#define QPNP_VBAT_COEFF_20	6043
+#define QPNP_VBAT_COEFF_21	1914
+#define QPNP_VBAT_OFFSET_SMIC	9446
+#define QPNP_VBAT_OFFSET_GF	9441
+#define QPNP_OCV_OFFSET_SMIC	4596
+#define QPNP_OCV_OFFSET_GF	5896
+#define QPNP_VBAT_COEFF_22	6800
+#define QPNP_VBAT_COEFF_23	3500
+#define QPNP_VBAT_COEFF_24	4360
+#define QPNP_VBAT_COEFF_25	8060
+#define QPNP_VBAT_COEFF_26	7895
+#define QPNP_VBAT_COEFF_27	5658
+#define QPNP_VBAT_COEFF_28	5760
+#define QPNP_VBAT_COEFF_29	7900
+#define QPNP_VBAT_COEFF_30	5660
+#define QPNP_VBAT_COEFF_31	3620
+#define QPNP_VBAT_COEFF_32	1230
+#define QPNP_VBAT_COEFF_33	5760
+#define QPNP_VBAT_COEFF_34	4080
+#define QPNP_VBAT_COEFF_35	7000
+#define QPNP_VBAT_COEFF_36	3040
+#define QPNP_VBAT_COEFF_37	3850
+#define QPNP_VBAT_COEFF_38	5000
+#define QPNP_VBAT_COEFF_39	2610
+#define QPNP_VBAT_COEFF_40	4190
+#define QPNP_VBAT_COEFF_41	5800
+#define QPNP_VBAT_COEFF_42	2620
+#define QPNP_VBAT_COEFF_43	4030
+#define QPNP_VBAT_COEFF_44	3230
+#define QPNP_VBAT_COEFF_45	3450
+#define QPNP_VBAT_COEFF_46	2120
+#define QPNP_VBAT_COEFF_47	3560
+#define QPNP_VBAT_COEFF_48	2190
+#define QPNP_VBAT_COEFF_49	4180
+#define QPNP_VBAT_COEFF_50	27800000
+#define QPNP_VBAT_COEFF_51	5110
+#define QPNP_VBAT_COEFF_52	34444000
+
+static int32_t qpnp_ocv_comp(int64_t *result,
+			struct qpnp_vadc_chip *vadc, int64_t die_temp)
+{
+	int64_t temp_var = 0, offset = 0;
+	int64_t old = *result;
+	int version;
+
+	version = qpnp_adc_get_revid_version(vadc->dev);
+	if (version == -EINVAL)
+		return 0;
+
+	if (version == QPNP_REV_ID_8026_2_2) {
+		if (die_temp > 25000)
+			return 0;
+	}
+
+	switch (version) {
+	case QPNP_REV_ID_8941_3_1:
+		switch (vadc->id) {
+		case COMP_ID_TSMC:
+			 temp_var = ((die_temp - 25000) *
+			(-QPNP_VBAT_COEFF_4));
+			break;
+		default:
+		case COMP_ID_GF:
+			temp_var = ((die_temp - 25000) *
+			(-QPNP_VBAT_COEFF_1));
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8026_1_0:
+		switch (vadc->id) {
+		case COMP_ID_TSMC:
+			temp_var = (((die_temp *
+			(-QPNP_VBAT_COEFF_10))
+			- QPNP_VBAT_COEFF_14));
+			break;
+		default:
+		case COMP_ID_GF:
+			temp_var = (((die_temp *
+			(-QPNP_VBAT_COEFF_8))
+			+ QPNP_VBAT_COEFF_12));
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8026_2_0:
+	case QPNP_REV_ID_8026_2_1:
+		switch (vadc->id) {
+		case COMP_ID_TSMC:
+			temp_var = ((die_temp - 25000) *
+			(-QPNP_VBAT_COEFF_10));
+			break;
+		default:
+		case COMP_ID_GF:
+			temp_var = ((die_temp - 25000) *
+			(-QPNP_VBAT_COEFF_8));
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8026_2_2:
+		switch (vadc->id) {
+		case COMP_ID_TSMC:
+			*result -= QPNP_VBAT_COEFF_22;
+			temp_var = (die_temp - 25000) *
+					QPNP_VBAT_COEFF_24;
+			break;
+		default:
+		case COMP_ID_GF:
+			*result -= QPNP_VBAT_COEFF_22;
+			temp_var = (die_temp - 25000) *
+					QPNP_VBAT_COEFF_25;
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8110_2_0:
+		switch (vadc->id) {
+		case COMP_ID_SMIC:
+			*result -= QPNP_OCV_OFFSET_SMIC;
+			if (die_temp < 25000)
+				temp_var = QPNP_VBAT_COEFF_18;
+			else
+				temp_var = QPNP_VBAT_COEFF_19;
+			temp_var = (die_temp - 25000) * temp_var;
+			break;
+		default:
+		case COMP_ID_GF:
+			*result -= QPNP_OCV_OFFSET_GF;
+			if (die_temp < 25000)
+				temp_var = QPNP_VBAT_COEFF_20;
+			else
+				temp_var = QPNP_VBAT_COEFF_21;
+			temp_var = (die_temp - 25000) * temp_var;
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8916_1_0:
+		switch (vadc->id) {
+		case COMP_ID_SMIC:
+			if (die_temp < 25000)
+				temp_var = QPNP_VBAT_COEFF_26;
+			else
+				temp_var = QPNP_VBAT_COEFF_27;
+			temp_var = (die_temp - 25000) * temp_var;
+			break;
+		default:
+		case COMP_ID_GF:
+			offset = QPNP_OCV_OFFSET_GF;
+			if (die_temp < 25000)
+				temp_var = QPNP_VBAT_COEFF_26;
+			else
+				temp_var = QPNP_VBAT_COEFF_27;
+			temp_var = (die_temp - 25000) * temp_var;
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8916_1_1:
+		switch (vadc->id) {
+		/* FAB_ID is zero */
+		case COMP_ID_GF:
+			if (die_temp < 25000)
+				temp_var = QPNP_VBAT_COEFF_29;
+			else
+				temp_var = QPNP_VBAT_COEFF_30;
+			temp_var = (die_temp - 25000) * temp_var;
+			break;
+		/* FAB_ID is non-zero */
+		default:
+			if (die_temp < 25000)
+				temp_var = QPNP_VBAT_COEFF_31;
+			else
+				temp_var = (-QPNP_VBAT_COEFF_32);
+			temp_var = (die_temp - 25000) * temp_var;
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8916_2_0:
+		switch (vadc->id) {
+		case COMP_ID_SMIC:
+			offset = (-QPNP_VBAT_COEFF_38);
+			if (die_temp < 0)
+				temp_var = die_temp * QPNP_VBAT_COEFF_36;
+			else if (die_temp > 40000)
+				temp_var = ((die_temp - 40000) *
+						(-QPNP_VBAT_COEFF_37));
+			break;
+		case COMP_ID_TSMC:
+			if (die_temp < 10000)
+				temp_var = ((die_temp - 10000) *
+						QPNP_VBAT_COEFF_41);
+			else if (die_temp > 50000)
+				temp_var = ((die_temp - 50000) *
+						(-QPNP_VBAT_COEFF_42));
+			break;
+		default:
+		case COMP_ID_GF:
+			if (die_temp < 20000)
+				temp_var = ((die_temp - 20000) *
+						QPNP_VBAT_COEFF_45);
+			else if (die_temp > 40000)
+				temp_var = ((die_temp - 40000) *
+						(-QPNP_VBAT_COEFF_46));
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8909_1_0:
+		switch (vadc->id) {
+		case COMP_ID_SMIC:
+			temp_var = (-QPNP_VBAT_COEFF_50);
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8909_1_1:
+		switch (vadc->id) {
+		case COMP_ID_SMIC:
+			temp_var = (QPNP_VBAT_COEFF_52);
+			break;
+		}
+		break;
+	default:
+		temp_var = 0;
+		break;
+	}
+
+	temp_var = div64_s64(temp_var, QPNP_VBAT_COEFF_3);
+
+	temp_var = 1000000 + temp_var;
+
+	*result = *result * temp_var;
+
+	if (offset)
+		*result -= offset;
+
+	*result = div64_s64(*result, 1000000);
+	pr_debug("%lld compensated into %lld\n", old, *result);
+
+	return 0;
+}
+
+static int32_t qpnp_vbat_sns_comp(int64_t *result,
+			struct qpnp_vadc_chip *vadc, int64_t die_temp)
+{
+	int64_t temp_var = 0, offset = 0;
+	int64_t old = *result;
+	int version;
+
+	version = qpnp_adc_get_revid_version(vadc->dev);
+	if (version == -EINVAL)
+		return 0;
+
+	if (version != QPNP_REV_ID_8941_3_1) {
+		/* min(die_temp_c, 60_degC) */
+		if (die_temp > 60000)
+			die_temp = 60000;
+	}
+
+	switch (version) {
+	case QPNP_REV_ID_8941_3_1:
+		switch (vadc->id) {
+		case COMP_ID_TSMC:
+			temp_var = ((die_temp - 25000) *
+			(-QPNP_VBAT_COEFF_1));
+			break;
+		default:
+		case COMP_ID_GF:
+			/* min(die_temp_c, 60_degC) */
+			if (die_temp > 60000)
+				die_temp = 60000;
+			temp_var = ((die_temp - 25000) *
+			(-QPNP_VBAT_COEFF_1));
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8026_1_0:
+		switch (vadc->id) {
+		case COMP_ID_TSMC:
+			temp_var = (((die_temp *
+			(-QPNP_VBAT_COEFF_11))
+			+ QPNP_VBAT_COEFF_15));
+			break;
+		default:
+		case COMP_ID_GF:
+			temp_var = (((die_temp *
+			(-QPNP_VBAT_COEFF_9))
+			+ QPNP_VBAT_COEFF_13));
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8026_2_0:
+	case QPNP_REV_ID_8026_2_1:
+		switch (vadc->id) {
+		case COMP_ID_TSMC:
+			temp_var = ((die_temp - 25000) *
+			(-QPNP_VBAT_COEFF_11));
+			break;
+		default:
+		case COMP_ID_GF:
+			temp_var = ((die_temp - 25000) *
+			(-QPNP_VBAT_COEFF_9));
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8026_2_2:
+		switch (vadc->id) {
+		case COMP_ID_TSMC:
+			*result -= QPNP_VBAT_COEFF_23;
+			temp_var = 0;
+			break;
+		default:
+		case COMP_ID_GF:
+			*result -= QPNP_VBAT_COEFF_23;
+			temp_var = 0;
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8110_2_0:
+		switch (vadc->id) {
+		case COMP_ID_SMIC:
+			*result -= QPNP_VBAT_OFFSET_SMIC;
+			temp_var = ((die_temp - 25000) *
+			(QPNP_VBAT_COEFF_17));
+			break;
+		default:
+		case COMP_ID_GF:
+			*result -= QPNP_VBAT_OFFSET_GF;
+			temp_var = ((die_temp - 25000) *
+			(QPNP_VBAT_COEFF_16));
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8916_1_0:
+		switch (vadc->id) {
+		case COMP_ID_SMIC:
+			temp_var = ((die_temp - 25000) *
+			(QPNP_VBAT_COEFF_28));
+			break;
+		default:
+		case COMP_ID_GF:
+			temp_var = ((die_temp - 25000) *
+			(QPNP_VBAT_COEFF_28));
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8916_1_1:
+		switch (vadc->id) {
+		/* FAB_ID is zero */
+		case COMP_ID_GF:
+			temp_var = ((die_temp - 25000) *
+			(QPNP_VBAT_COEFF_33));
+			break;
+		/* FAB_ID is non-zero */
+		default:
+			offset = QPNP_VBAT_COEFF_35;
+			if (die_temp > 50000) {
+				temp_var = ((die_temp - 25000) *
+				(QPNP_VBAT_COEFF_34));
+			}
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8916_2_0:
+		switch (vadc->id) {
+		case COMP_ID_SMIC:
+			if (die_temp < 0) {
+				temp_var = (die_temp *
+					QPNP_VBAT_COEFF_39);
+			} else if (die_temp > 40000) {
+				temp_var = ((die_temp - 40000) *
+				(-QPNP_VBAT_COEFF_40));
+			}
+			break;
+		case COMP_ID_TSMC:
+			if (die_temp < 10000)
+				temp_var = ((die_temp - 10000) *
+					QPNP_VBAT_COEFF_43);
+			else if (die_temp > 50000)
+				temp_var = ((die_temp - 50000) *
+						(-QPNP_VBAT_COEFF_44));
+			break;
+		default:
+		case COMP_ID_GF:
+			if (die_temp < 20000)
+				temp_var = ((die_temp - 20000) *
+					QPNP_VBAT_COEFF_47);
+			else if (die_temp > 40000)
+				temp_var = ((die_temp - 40000) *
+						(-QPNP_VBAT_COEFF_48));
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8909_1_0:
+		switch (vadc->id) {
+		case COMP_ID_SMIC:
+			if (die_temp < 30000)
+				temp_var = (-QPNP_VBAT_COEFF_50);
+			else if (die_temp > 30000)
+				temp_var = (((die_temp - 30000) *
+					(-QPNP_VBAT_COEFF_49)) +
+					(-QPNP_VBAT_COEFF_50));
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8909_1_1:
+		switch (vadc->id) {
+		case COMP_ID_SMIC:
+			if (die_temp < 30000)
+				temp_var = (QPNP_VBAT_COEFF_52);
+			else if (die_temp > 30000)
+				temp_var = (((die_temp - 30000) *
+					(-QPNP_VBAT_COEFF_51)) +
+					(QPNP_VBAT_COEFF_52));
+			break;
+		}
+		break;
+	default:
+		temp_var = 0;
+		break;
+	}
+
+	temp_var = div64_s64(temp_var, QPNP_VBAT_COEFF_3);
+
+	temp_var = 1000000 + temp_var;
+
+	*result = *result * temp_var;
+
+	if (offset)
+		*result -= offset;
+
+	*result = div64_s64(*result, 1000000);
+	pr_debug("%lld compensated into %lld\n", old, *result);
+
+	return 0;
+}
+
+int32_t qpnp_vbat_sns_comp_result(struct qpnp_vadc_chip *vadc,
+					int64_t *result, bool is_pon_ocv)
+{
+	struct qpnp_vadc_result die_temp_result;
+	int rc = 0;
+
+	rc = qpnp_vadc_is_valid(vadc);
+	if (rc < 0)
+		return rc;
+
+	rc = qpnp_vadc_conv_seq_request(vadc, ADC_SEQ_NONE,
+			DIE_TEMP, &die_temp_result);
+	if (rc < 0) {
+		pr_err("Error reading die_temp\n");
+		return rc;
+	}
+
+	pr_debug("die-temp = %lld\n", die_temp_result.physical);
+
+	if (is_pon_ocv)
+		rc = qpnp_ocv_comp(result, vadc, die_temp_result.physical);
+	else
+		rc = qpnp_vbat_sns_comp(result, vadc,
+				die_temp_result.physical);
+
+	if (rc < 0)
+		pr_err("Error with vbat compensation\n");
+
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_vbat_sns_comp_result);
+
+static void qpnp_vadc_625mv_channel_sel(struct qpnp_vadc_chip *vadc,
+				uint32_t *ref_channel_sel)
+{
+	uint32_t dt_index = 0;
+
+	/* Check if the buffered 625mV channel exists */
+	while ((vadc->adc->adc_channels[dt_index].channel_num
+		!= SPARE1) && (dt_index < vadc->max_channels_available))
+		dt_index++;
+
+	if (dt_index >= vadc->max_channels_available) {
+		pr_debug("Use default 625mV ref channel\n");
+		*ref_channel_sel = REF_625MV;
+	} else {
+		pr_debug("Use buffered 625mV ref channel\n");
+		*ref_channel_sel = SPARE1;
+	}
+}
+
+int32_t qpnp_vadc_calib_vref(struct qpnp_vadc_chip *vadc,
+					enum qpnp_adc_calib_type calib_type,
+					int *calib_data)
+{
+	struct qpnp_adc_amux_properties conv;
+	int rc, count = 0, calib_read = 0;
+	u8 status1 = 0;
+
+	if (vadc->vadc_hc) {
+		if (calib_type == ADC_HC_ABS_CAL)
+			conv.amux_channel = VADC_CALIB_VREF_1P25;
+		else if (calib_type == CALIB_RATIOMETRIC)
+			conv.amux_channel = VADC_CALIB_VREF;
+	} else {
+		if (calib_type == CALIB_ABSOLUTE)
+			conv.amux_channel = REF_125V;
+		else if (calib_type == CALIB_RATIOMETRIC)
+			conv.amux_channel = VDD_VADC;
+	}
+
+	conv.decimation = DECIMATION_TYPE2;
+	conv.mode_sel = ADC_OP_NORMAL_MODE << QPNP_VADC_OP_MODE_SHIFT;
+	conv.hw_settle_time = ADC_CHANNEL_HW_SETTLE_DELAY_0US;
+	conv.fast_avg_setup = ADC_FAST_AVG_SAMPLE_1;
+	conv.cal_val = calib_type;
+
+	if (vadc->vadc_hc) {
+		rc = qpnp_vadc_hc_configure(vadc, &conv);
+		if (rc) {
+			pr_err("qpnp_vadc configure failed with %d\n", rc);
+			goto calib_fail;
+		}
+	} else {
+		rc = qpnp_vadc_configure(vadc, &conv);
+		if (rc) {
+			pr_err("qpnp_vadc configure failed with %d\n", rc);
+			goto calib_fail;
+		}
+	}
+
+	while (status1 != QPNP_VADC_STATUS1_EOC) {
+		rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status1, 1);
+		if (rc < 0)
+			return rc;
+		status1 &= QPNP_VADC_STATUS1_REQ_STS_EOC_MASK;
+		usleep_range(QPNP_VADC_CONV_TIME_MIN,
+				QPNP_VADC_CONV_TIME_MAX);
+		count++;
+		if (count > QPNP_VADC_ERR_COUNT) {
+			rc = -ENODEV;
+			goto calib_fail;
+		}
+	}
+
+	if (vadc->vadc_hc) {
+		rc = qpnp_vadc_hc_read_data(vadc, &calib_read);
+		if (rc) {
+			pr_err("qpnp vadc read adc code failed with %d\n", rc);
+			goto calib_fail;
+		}
+	} else {
+		rc = qpnp_vadc_read_conversion_result(vadc, &calib_read);
+		if (rc) {
+			pr_err("qpnp adc read adc failed with %d\n", rc);
+			goto calib_fail;
+		}
+	}
+
+	*calib_data = calib_read;
+calib_fail:
+	return rc;
+}
+
+
+int32_t qpnp_vadc_calib_gnd(struct qpnp_vadc_chip *vadc,
+					enum qpnp_adc_calib_type calib_type,
+					int *calib_data)
+{
+	struct qpnp_adc_amux_properties conv;
+	int rc, count = 0, calib_read = 0;
+	u8 status1 = 0;
+	uint32_t ref_channel_sel = 0;
+
+	if (vadc->vadc_hc) {
+		conv.amux_channel = VADC_VREF_GND;
+	} else {
+		if (calib_type == CALIB_ABSOLUTE) {
+			qpnp_vadc_625mv_channel_sel(vadc, &ref_channel_sel);
+			conv.amux_channel = ref_channel_sel;
+		} else if (calib_type == CALIB_RATIOMETRIC)
+			conv.amux_channel = GND_REF;
+	}
+
+	conv.decimation = DECIMATION_TYPE2;
+	conv.mode_sel = ADC_OP_NORMAL_MODE << QPNP_VADC_OP_MODE_SHIFT;
+	conv.hw_settle_time = ADC_CHANNEL_HW_SETTLE_DELAY_0US;
+	conv.fast_avg_setup = ADC_FAST_AVG_SAMPLE_1;
+	conv.cal_val = calib_type;
+
+	if (vadc->vadc_hc) {
+		rc = qpnp_vadc_hc_configure(vadc, &conv);
+		if (rc) {
+			pr_err("qpnp_vadc configure failed with %d\n", rc);
+			goto calib_fail;
+		}
+	} else {
+		rc = qpnp_vadc_configure(vadc, &conv);
+		if (rc) {
+			pr_err("qpnp_vadc configure failed with %d\n", rc);
+			goto calib_fail;
+		}
+	}
+
+	while (status1 != QPNP_VADC_STATUS1_EOC) {
+		rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status1, 1);
+		if (rc < 0)
+			return rc;
+		status1 &= QPNP_VADC_STATUS1_REQ_STS_EOC_MASK;
+		usleep_range(QPNP_VADC_CONV_TIME_MIN,
+				QPNP_VADC_CONV_TIME_MAX);
+		count++;
+		if (count > QPNP_VADC_ERR_COUNT) {
+			rc = -ENODEV;
+			goto calib_fail;
+		}
+	}
+
+	if (vadc->vadc_hc) {
+		rc = qpnp_vadc_hc_read_data(vadc, &calib_read);
+		if (rc) {
+			pr_err("qpnp vadc read adc code failed with %d\n", rc);
+			goto calib_fail;
+		}
+	} else {
+		rc = qpnp_vadc_read_conversion_result(vadc, &calib_read);
+		if (rc) {
+			pr_err("qpnp adc read adc failed with %d\n", rc);
+			goto calib_fail;
+		}
+	}
+	*calib_data = calib_read;
+calib_fail:
+	return rc;
+}
+
+static int32_t qpnp_vadc_calib_device(struct qpnp_vadc_chip *vadc)
+{
+	int rc, calib_read_1 = 0, calib_read_2 = 0;
+	enum qpnp_adc_calib_type calib_type;
+
+	if (vadc->vadc_hc)
+		calib_type = ADC_HC_ABS_CAL;
+	else
+		calib_type = CALIB_ABSOLUTE;
+
+	rc = qpnp_vadc_calib_vref(vadc, calib_type, &calib_read_1);
+	if (rc) {
+		pr_err("qpnp adc absolute vref calib failed with %d\n", rc);
+		goto calib_fail;
+	}
+	rc = qpnp_vadc_calib_gnd(vadc, calib_type, &calib_read_2);
+	if (rc) {
+		pr_err("qpnp adc absolute gnd calib failed with %d\n", rc);
+		goto calib_fail;
+	}
+	pr_debug("absolute reference raw: 1.25V:0x%x, 625mV/GND:0x%x\n",
+				calib_read_1, calib_read_2);
+
+	if (calib_read_1 == calib_read_2) {
+		pr_err("absolute reference raw: 1.25V:0x%x625mV:0x%x\n",
+			calib_read_2, calib_read_1);
+		rc = -EINVAL;
+		goto calib_fail;
+	}
+
+	vadc->adc->amux_prop->chan_prop->adc_graph[calib_type].dy =
+				(calib_read_1 - calib_read_2);
+
+	if (calib_type == CALIB_ABSOLUTE)
+		vadc->adc->amux_prop->chan_prop->adc_graph[calib_type].dx
+						= QPNP_ADC_625_UV;
+	else if (calib_type == ADC_HC_ABS_CAL)
+		vadc->adc->amux_prop->chan_prop->adc_graph[calib_type].dx
+						= QPNP_ADC_1P25_UV;
+	vadc->adc->amux_prop->chan_prop->adc_graph[calib_type].adc_vref =
+					calib_read_1;
+	vadc->adc->amux_prop->chan_prop->adc_graph[calib_type].adc_gnd =
+					calib_read_2;
+
+	calib_read_1 = 0;
+	calib_read_2 = 0;
+	rc = qpnp_vadc_calib_vref(vadc, CALIB_RATIOMETRIC, &calib_read_1);
+	if (rc) {
+		pr_err("qpnp adc ratiometric vref calib failed with %d\n", rc);
+		goto calib_fail;
+	}
+	rc = qpnp_vadc_calib_gnd(vadc, CALIB_RATIOMETRIC, &calib_read_2);
+	if (rc) {
+		pr_err("qpnp adc ratiometric gnd calib failed with %d\n", rc);
+		goto calib_fail;
+	}
+	pr_debug("ratiometric reference raw: VDD:0x%x GND:0x%x\n",
+				calib_read_1, calib_read_2);
+
+	if (calib_read_1 == calib_read_2) {
+		pr_err("ratiometric reference raw: VDD:0x%x GND:0x%x\n",
+				calib_read_1, calib_read_2);
+		rc = -EINVAL;
+		goto calib_fail;
+	}
+
+	vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].dy =
+					(calib_read_1 - calib_read_2);
+	vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].dx =
+					vadc->adc->adc_prop->adc_vdd_reference;
+	vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].adc_vref
+					= calib_read_1;
+	vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].adc_gnd
+					= calib_read_2;
+
+calib_fail:
+	return rc;
+}
+
+int32_t qpnp_get_vadc_gain_and_offset(struct qpnp_vadc_chip *vadc,
+				struct qpnp_vadc_linear_graph *param,
+				enum qpnp_adc_calib_type calib_type)
+{
+	int rc = 0;
+	struct qpnp_vadc_result result;
+
+	rc = qpnp_vadc_is_valid(vadc);
+	if (rc < 0)
+		return rc;
+
+	if (!vadc->vadc_init_calib) {
+		if (vadc->vadc_hc) {
+			rc = qpnp_vadc_hc_read(vadc, VADC_CALIB_VREF_1P25,
+								&result);
+			if (rc) {
+				pr_debug("vadc read failed with rc = %d\n", rc);
+				return rc;
+			}
+		} else {
+			rc = qpnp_vadc_read(vadc, REF_125V, &result);
+			if (rc) {
+				pr_debug("vadc read failed with rc = %d\n", rc);
+				return rc;
+			}
+		}
+	}
+
+	switch (calib_type) {
+	case CALIB_RATIOMETRIC:
+	param->dy =
+	vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].dy;
+	param->dx =
+	vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].dx;
+	param->adc_vref = vadc->adc->adc_prop->adc_vdd_reference;
+	param->adc_gnd =
+	vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].adc_gnd;
+	break;
+	case CALIB_ABSOLUTE:
+	case ADC_HC_ABS_CAL:
+	param->dy =
+	vadc->adc->amux_prop->chan_prop->adc_graph[calib_type].dy;
+	param->dx =
+	vadc->adc->amux_prop->chan_prop->adc_graph[calib_type].dx;
+	param->adc_vref = vadc->adc->adc_prop->adc_vdd_reference;
+	param->adc_gnd =
+	vadc->adc->amux_prop->chan_prop->adc_graph[calib_type].adc_gnd;
+	break;
+	default:
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_get_vadc_gain_and_offset);
+
+static int32_t qpnp_vadc_wait_for_req_sts_check(struct qpnp_vadc_chip *vadc)
+{
+	u8 status1 = 0;
+	int rc, count = 0;
+
+	/* Re-enable the peripheral */
+	rc = qpnp_vadc_enable(vadc, true);
+	if (rc) {
+		pr_err("vadc re-enable peripheral failed with %d\n", rc);
+		return rc;
+	}
+
+	/* The VADC_TM bank needs to be disabled for new conversion request */
+	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status1, 1);
+	if (rc) {
+		pr_err("vadc read status1 failed with %d\n", rc);
+		return rc;
+	}
+
+	/* Disable the bank if a conversion is occuring */
+	while ((status1 & QPNP_VADC_STATUS1_REQ_STS) && (count < QPNP_RETRY)) {
+		/* Wait time is based on the optimum sampling rate
+		 * and adding enough time buffer to account for ADC conversions
+		 * occurring on different peripheral banks
+		 */
+		usleep_range(QPNP_MIN_TIME, QPNP_MAX_TIME);
+		rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status1, 1);
+		if (rc < 0) {
+			pr_err("vadc disable failed with %d\n", rc);
+			return rc;
+		}
+		count++;
+	}
+
+	if (count >= QPNP_RETRY)
+		pr_err("QPNP vadc status req bit did not fall low!!\n");
+
+	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status1, 1);
+
+	/* Disable the peripheral */
+	rc = qpnp_vadc_enable(vadc, false);
+	if (rc < 0)
+		pr_err("vadc peripheral disable failed with %d\n", rc);
+
+	return rc;
+}
+
+static int32_t qpnp_vadc_manage_meas_int_requests(struct qpnp_vadc_chip *chip)
+{
+	struct qpnp_vadc_chip *vadc = dev_get_drvdata(chip->dev);
+	int rc = 0, dt_index = 0;
+	u8 mode_ctl = 0;
+
+	pr_debug("meas_int_mode:0x%x, mode_ctl:%0x\n",
+		vadc->state_copy->meas_int_mode, mode_ctl);
+
+	if (vadc->state_copy->meas_int_mode) {
+		pr_debug("meas interval in progress. Procced to disable it\n");
+		/* measurement interval in progress. Proceed to disable it */
+		mode_ctl = ADC_OP_NORMAL_MODE;
+		rc = qpnp_vadc_mode_select(vadc, mode_ctl);
+		if (rc < 0) {
+			pr_err("NORM mode select failed with %d\n", rc);
+			return rc;
+		}
+
+		/* Disable bank */
+		rc = qpnp_vadc_enable(vadc, false);
+		if (rc) {
+			pr_err("Disable bank failed with %d\n", rc);
+			return rc;
+		}
+
+		/* Check if a conversion is in progress */
+		rc = qpnp_vadc_wait_for_req_sts_check(vadc);
+		if (rc < 0) {
+			pr_err("req_sts check failed with %d\n", rc);
+			return rc;
+		}
+
+		vadc->state_copy->meas_int_mode = false;
+		vadc->state_copy->meas_int_request_in_queue = true;
+	} else if (vadc->state_copy->meas_int_request_in_queue) {
+		/* put the meas interval back in queue */
+		pr_debug("put meas interval back in queue\n");
+		vadc->adc->amux_prop->amux_channel =
+				vadc->state_copy->vadc_meas_amux.channel_num;
+		while ((vadc->adc->adc_channels[dt_index].channel_num
+			!= vadc->adc->amux_prop->amux_channel) &&
+			(dt_index < vadc->max_channels_available))
+			dt_index++;
+		if (dt_index >= vadc->max_channels_available) {
+			pr_err("not a valid VADC channel\n");
+			rc = -EINVAL;
+			return rc;
+		}
+
+		vadc->adc->amux_prop->decimation =
+			vadc->adc->amux_prop->decimation;
+		vadc->adc->amux_prop->hw_settle_time =
+			vadc->adc->amux_prop->hw_settle_time;
+		vadc->adc->amux_prop->fast_avg_setup =
+			vadc->adc->amux_prop->fast_avg_setup;
+		vadc->adc->amux_prop->mode_sel = ADC_OP_MEASUREMENT_INTERVAL;
+		rc = qpnp_vadc_configure(vadc, vadc->adc->amux_prop);
+		if (rc) {
+			pr_err("vadc configure failed with %d\n", rc);
+			return rc;
+		}
+
+		vadc->state_copy->meas_int_mode = true;
+		vadc->state_copy->meas_int_request_in_queue = false;
+	}
+	dev_set_drvdata(vadc->dev, vadc);
+
+	return 0;
+}
+
+struct qpnp_vadc_chip *qpnp_get_vadc(struct device *dev, const char *name)
+{
+	struct qpnp_vadc_chip *vadc;
+	struct device_node *node = NULL;
+	char prop_name[QPNP_MAX_PROP_NAME_LEN];
+
+	snprintf(prop_name, QPNP_MAX_PROP_NAME_LEN, "qcom,%s-vadc", name);
+
+	node = of_parse_phandle(dev->of_node, prop_name, 0);
+	if (node == NULL)
+		return ERR_PTR(-ENODEV);
+
+	list_for_each_entry(vadc, &qpnp_vadc_device_list, list)
+		if (vadc->adc->pdev->dev.of_node == node)
+			return vadc;
+	return ERR_PTR(-EPROBE_DEFER);
+}
+EXPORT_SYMBOL(qpnp_get_vadc);
+
+int32_t qpnp_vadc_conv_seq_request(struct qpnp_vadc_chip *vadc,
+				enum qpnp_vadc_trigger trigger_channel,
+					enum qpnp_vadc_channels channel,
+					struct qpnp_vadc_result *result)
+{
+	int rc = 0, scale_type, amux_prescaling, dt_index = 0, calib_type = 0;
+	uint32_t ref_channel, count = 0, local_idx = 0;
+	int32_t vref_calib = 0, gnd_calib = 0, new_vref_calib = 0, offset = 0;
+	int32_t calib_offset = 0;
+	u8 status1 = 0;
+
+	if (qpnp_vadc_is_valid(vadc))
+		return -EPROBE_DEFER;
+
+	mutex_lock(&vadc->adc->adc_lock);
+
+	if (vadc->state_copy->vadc_meas_int_enable)
+		qpnp_vadc_manage_meas_int_requests(vadc);
+
+	if (channel == REF_625MV) {
+		qpnp_vadc_625mv_channel_sel(vadc, &ref_channel);
+		channel = ref_channel;
+	}
+
+	vadc->adc->amux_prop->amux_channel = channel;
+
+	while ((vadc->adc->adc_channels[dt_index].channel_num
+		!= channel) && (dt_index < vadc->max_channels_available))
+		dt_index++;
+
+	if (dt_index >= vadc->max_channels_available) {
+		pr_err("not a valid VADC channel\n");
+		rc = -EINVAL;
+		goto fail_unlock;
+	}
+
+	calib_type = vadc->adc->adc_channels[dt_index].calib_type;
+	if (calib_type >= CALIB_NONE) {
+		pr_err("not a valid calib_type\n");
+		rc = -EINVAL;
+		goto fail_unlock;
+	}
+	calib_offset = (calib_type == CALIB_ABSOLUTE) ?
+		QPNP_VADC_ABSOLUTE_RECALIB_OFFSET :
+		QPNP_VADC_RATIOMETRIC_RECALIB_OFFSET;
+	rc = qpnp_vadc_version_check(vadc);
+	if (rc)
+		goto fail_unlock;
+	if (vadc->vadc_recalib_check) {
+		rc = qpnp_vadc_calib_vref(vadc, calib_type, &vref_calib);
+		if (rc) {
+			pr_err("Calibration failed\n");
+			goto fail_unlock;
+		}
+	} else if (!vadc->vadc_init_calib) {
+		rc = qpnp_vadc_calib_device(vadc);
+		if (rc) {
+			pr_err("Calibration failed\n");
+			goto fail_unlock;
+		} else {
+			vadc->vadc_init_calib = true;
+		}
+	}
+
+recalibrate:
+	status1 = 0;
+	vadc->adc->amux_prop->decimation =
+			vadc->adc->adc_channels[dt_index].adc_decimation;
+	vadc->adc->amux_prop->hw_settle_time =
+			vadc->adc->adc_channels[dt_index].hw_settle_time;
+	vadc->adc->amux_prop->fast_avg_setup =
+			vadc->adc->adc_channels[dt_index].fast_avg_setup;
+
+	if (trigger_channel < ADC_SEQ_NONE)
+		vadc->adc->amux_prop->mode_sel = (ADC_OP_CONVERSION_SEQUENCER
+						<< QPNP_VADC_OP_MODE_SHIFT);
+	else if (trigger_channel == ADC_SEQ_NONE)
+		vadc->adc->amux_prop->mode_sel = (ADC_OP_NORMAL_MODE
+						<< QPNP_VADC_OP_MODE_SHIFT);
+	else {
+		pr_err("Invalid trigger channel:%d\n", trigger_channel);
+		goto fail_unlock;
+	}
+
+	vadc->adc->amux_prop->trigger_channel = trigger_channel;
+
+	rc = qpnp_vadc_configure(vadc, vadc->adc->amux_prop);
+	if (rc) {
+		pr_err("qpnp vadc configure failed with %d\n", rc);
+		goto fail_unlock;
+	}
+
+	if (vadc->vadc_poll_eoc) {
+		while (status1 != QPNP_VADC_STATUS1_EOC) {
+			rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1,
+							&status1, 1);
+			if (rc < 0)
+				goto fail_unlock;
+			status1 &= QPNP_VADC_STATUS1_REQ_STS_EOC_MASK;
+			if (status1 == QPNP_VADC_STATUS1_EOC)
+				break;
+			usleep_range(QPNP_VADC_CONV_TIME_MIN,
+					QPNP_VADC_CONV_TIME_MAX);
+			count++;
+			if (count > QPNP_VADC_ERR_COUNT) {
+				pr_err("retry error exceeded\n");
+				rc = qpnp_vadc_status_debug(vadc);
+				if (rc < 0)
+					pr_err("VADC disable failed\n");
+				rc = -EINVAL;
+				goto fail_unlock;
+			}
+		}
+	} else {
+		rc = wait_for_completion_timeout(
+					&vadc->adc->adc_rslt_completion,
+					QPNP_ADC_COMPLETION_TIMEOUT);
+		if (!rc) {
+			rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1,
+							&status1, 1);
+			if (rc < 0)
+				goto fail_unlock;
+			status1 &= QPNP_VADC_STATUS1_REQ_STS_EOC_MASK;
+			if (status1 == QPNP_VADC_STATUS1_EOC)
+				pr_debug("End of conversion status set\n");
+			else {
+				rc = qpnp_vadc_status_debug(vadc);
+				if (rc < 0)
+					pr_err("VADC disable failed\n");
+				rc = -EINVAL;
+				goto fail_unlock;
+			}
+		}
+	}
+
+	if (trigger_channel < ADC_SEQ_NONE) {
+		rc = qpnp_vadc_read_status(vadc,
+					vadc->adc->amux_prop->mode_sel);
+		if (rc)
+			pr_debug("Conversion sequence timed out - %d\n", rc);
+	}
+
+	rc = qpnp_vadc_read_conversion_result(vadc, &result->adc_code);
+	if (rc) {
+		pr_err("qpnp vadc read adc code failed with %d\n", rc);
+		goto fail_unlock;
+	}
+
+	if (vadc->vadc_recalib_check) {
+		rc = qpnp_vadc_calib_gnd(vadc, calib_type, &gnd_calib);
+		if (rc) {
+			pr_err("Calibration failed\n");
+			goto fail_unlock;
+		}
+		rc = qpnp_vadc_calib_vref(vadc, calib_type, &new_vref_calib);
+		if (rc < 0) {
+			pr_err("qpnp vadc calib read failed with %d\n", rc);
+			goto fail_unlock;
+		}
+
+		if (local_idx >= QPNP_VADC_RECALIB_MAXCNT) {
+			pr_err("invalid recalib count=%d\n", local_idx);
+			rc = -EINVAL;
+			goto fail_unlock;
+		}
+		pr_debug(
+			"chan=%d, calib=%s, vref_calib=0x%x, gnd_calib=0x%x, new_vref_calib=0x%x\n",
+			channel,
+			((calib_type == CALIB_ABSOLUTE) ?
+			"ABSOLUTE" : "RATIOMETRIC"),
+			vref_calib, gnd_calib, new_vref_calib);
+
+		offset = (new_vref_calib - vref_calib);
+		if (offset < 0)
+			offset = -offset;
+		if (offset <= calib_offset) {
+			pr_debug(
+				"qpnp vadc recalibration not required,offset:%d\n",
+								offset);
+			local_idx = 0;
+		vadc->adc->amux_prop->chan_prop->adc_graph[calib_type].dy =
+						(vref_calib - gnd_calib);
+		vadc->adc->amux_prop->chan_prop->adc_graph[calib_type].dx =
+			(calib_type == CALIB_ABSOLUTE) ? QPNP_ADC_625_UV :
+					vadc->adc->adc_prop->adc_vdd_reference;
+		vadc->adc->amux_prop->chan_prop->adc_graph[calib_type].adc_vref
+								= vref_calib;
+		vadc->adc->amux_prop->chan_prop->adc_graph[calib_type].adc_gnd
+								= gnd_calib;
+		} else {
+			vref_calib = new_vref_calib;
+			local_idx = local_idx + 1;
+			if (local_idx >= QPNP_VADC_RECALIB_MAXCNT) {
+				pr_err(
+				"qpnp_vadc recalibration failed, count=%d",
+								local_idx);
+			} else {
+				pr_debug(
+				"qpnp vadc recalibration requested,offset:%d\n",
+								offset);
+				offset = 0;
+				goto recalibrate;
+			}
+		}
+	}
+
+	amux_prescaling =
+		vadc->adc->adc_channels[dt_index].chan_path_prescaling;
+
+	if (amux_prescaling >= PATH_SCALING_NONE) {
+		rc = -EINVAL;
+		goto fail_unlock;
+	}
+
+	vadc->adc->amux_prop->chan_prop->offset_gain_numerator =
+		qpnp_vadc_amux_scaling_ratio[amux_prescaling].num;
+	vadc->adc->amux_prop->chan_prop->offset_gain_denominator =
+		 qpnp_vadc_amux_scaling_ratio[amux_prescaling].den;
+	vadc->adc->amux_prop->chan_prop->calib_type =
+		vadc->adc->adc_channels[dt_index].calib_type;
+
+	scale_type = vadc->adc->adc_channels[dt_index].adc_scale_fn;
+	if (scale_type >= SCALE_NONE) {
+		rc = -EBADF;
+		goto fail_unlock;
+	}
+
+	if ((qpnp_vadc_channel_post_scaling_calib_check(vadc, channel)) < 0)
+		pr_debug("Post scaling calib type not updated\n");
+
+	vadc_scale_fn[scale_type].chan(vadc, result->adc_code,
+		vadc->adc->adc_prop, vadc->adc->amux_prop->chan_prop, result);
+
+	pr_debug("channel=%d, adc_code=%d adc_result=%lld\n",
+			channel, result->adc_code, result->physical);
+
+fail_unlock:
+	if (vadc->state_copy->vadc_meas_int_enable)
+		qpnp_vadc_manage_meas_int_requests(vadc);
+
+	mutex_unlock(&vadc->adc->adc_lock);
+
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_vadc_conv_seq_request);
+
+int32_t qpnp_vadc_read(struct qpnp_vadc_chip *vadc,
+				enum qpnp_vadc_channels channel,
+				struct qpnp_vadc_result *result)
+{
+	struct qpnp_vadc_result die_temp_result;
+	int rc = 0;
+	enum power_supply_property prop;
+	union power_supply_propval ret = {0, };
+
+	if (vadc->vadc_hc) {
+		rc = qpnp_vadc_hc_read(vadc, channel, result);
+		if (rc < 0) {
+			pr_err("Error reading vadc_hc channel %d\n", channel);
+			return rc;
+		}
+
+		return 0;
+	}
+
+	if (channel == VBAT_SNS) {
+		rc = qpnp_vadc_conv_seq_request(vadc, ADC_SEQ_NONE,
+				channel, result);
+		if (rc < 0) {
+			pr_err("Error reading vbatt\n");
+			return rc;
+		}
+
+		rc = qpnp_vadc_conv_seq_request(vadc, ADC_SEQ_NONE,
+				DIE_TEMP, &die_temp_result);
+		if (rc < 0) {
+			pr_err("Error reading die_temp\n");
+			return rc;
+		}
+
+		rc = qpnp_vbat_sns_comp(&result->physical, vadc,
+						die_temp_result.physical);
+		if (rc < 0)
+			pr_err("Error with vbat compensation\n");
+
+		return 0;
+	} else if (channel == SPARE2) {
+		/* chg temp channel */
+		if (!vadc->vadc_chg_vote) {
+			vadc->vadc_chg_vote =
+				power_supply_get_by_name("battery");
+			if (!vadc->vadc_chg_vote) {
+				pr_err("no vadc_chg_vote found\n");
+				return -EINVAL;
+			}
+		}
+
+		prop = POWER_SUPPLY_PROP_FORCE_TLIM;
+		ret.intval = 1;
+
+		rc = power_supply_set_property(vadc->vadc_chg_vote,
+								prop, &ret);
+		if (rc) {
+			pr_err("error enabling the charger circuitry vote\n");
+			return rc;
+		}
+
+		rc = qpnp_vadc_conv_seq_request(vadc, ADC_SEQ_NONE,
+				channel, result);
+		if (rc < 0)
+			pr_err("Error reading die_temp\n");
+
+		ret.intval = 0;
+		rc = power_supply_set_property(vadc->vadc_chg_vote,
+								prop, &ret);
+		if (rc) {
+			pr_err("error enabling the charger circuitry vote\n");
+			return rc;
+		}
+
+		return 0;
+	} else
+		return qpnp_vadc_conv_seq_request(vadc, ADC_SEQ_NONE,
+				channel, result);
+}
+EXPORT_SYMBOL(qpnp_vadc_read);
+
+static void qpnp_vadc_lock(struct qpnp_vadc_chip *vadc)
+{
+	mutex_lock(&vadc->adc->adc_lock);
+}
+
+static void qpnp_vadc_unlock(struct qpnp_vadc_chip *vadc)
+{
+	mutex_unlock(&vadc->adc->adc_lock);
+}
+
+int32_t qpnp_vadc_iadc_sync_request(struct qpnp_vadc_chip *vadc,
+				enum qpnp_vadc_channels channel)
+{
+	int rc = 0, dt_index = 0, calib_type = 0;
+
+	if (qpnp_vadc_is_valid(vadc))
+		return -EPROBE_DEFER;
+
+	qpnp_vadc_lock(vadc);
+
+
+	vadc->adc->amux_prop->amux_channel = channel;
+
+	while ((vadc->adc->adc_channels[dt_index].channel_num
+		!= channel) && (dt_index < vadc->max_channels_available))
+		dt_index++;
+
+	if (dt_index >= vadc->max_channels_available) {
+		pr_err("not a valid VADC channel\n");
+		rc = -EINVAL;
+		goto fail;
+	}
+
+	calib_type = vadc->adc->adc_channels[dt_index].calib_type;
+	if (!vadc->vadc_init_calib) {
+		rc = qpnp_vadc_version_check(vadc);
+		if (rc)
+			goto fail;
+
+		rc = qpnp_vadc_calib_device(vadc);
+		if (rc) {
+			pr_err("Calibration failed\n");
+			goto fail;
+		} else
+			vadc->vadc_init_calib = true;
+	}
+
+	vadc->adc->amux_prop->decimation =
+			vadc->adc->adc_channels[dt_index].adc_decimation;
+	vadc->adc->amux_prop->hw_settle_time =
+			vadc->adc->adc_channels[dt_index].hw_settle_time;
+	vadc->adc->amux_prop->fast_avg_setup =
+			vadc->adc->adc_channels[dt_index].fast_avg_setup;
+	vadc->adc->amux_prop->mode_sel = (ADC_OP_NORMAL_MODE
+					<< QPNP_VADC_OP_MODE_SHIFT);
+	vadc->vadc_iadc_sync_lock = true;
+
+	rc = qpnp_vadc_configure(vadc, vadc->adc->amux_prop);
+	if (rc) {
+		pr_err("qpnp vadc configure failed with %d\n", rc);
+		goto fail;
+	}
+
+	return rc;
+fail:
+	vadc->vadc_iadc_sync_lock = false;
+	qpnp_vadc_unlock(vadc);
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_vadc_iadc_sync_request);
+
+int32_t qpnp_vadc_iadc_sync_complete_request(struct qpnp_vadc_chip *vadc,
+					enum qpnp_vadc_channels channel,
+						struct qpnp_vadc_result *result)
+{
+	int rc = 0, scale_type, amux_prescaling, dt_index = 0;
+
+	vadc->adc->amux_prop->amux_channel = channel;
+
+	while ((vadc->adc->adc_channels[dt_index].channel_num
+		!= channel) && (dt_index < vadc->max_channels_available))
+		dt_index++;
+
+	rc = qpnp_vadc_read_conversion_result(vadc, &result->adc_code);
+	if (rc) {
+		pr_err("qpnp vadc read adc code failed with %d\n", rc);
+		goto fail;
+	}
+
+	amux_prescaling =
+		vadc->adc->adc_channels[dt_index].chan_path_prescaling;
+
+	if (amux_prescaling >= PATH_SCALING_NONE) {
+		rc = -EINVAL;
+		goto fail;
+	}
+
+	vadc->adc->amux_prop->chan_prop->offset_gain_numerator =
+		qpnp_vadc_amux_scaling_ratio[amux_prescaling].num;
+	vadc->adc->amux_prop->chan_prop->offset_gain_denominator =
+		 qpnp_vadc_amux_scaling_ratio[amux_prescaling].den;
+
+	scale_type = vadc->adc->adc_channels[dt_index].adc_scale_fn;
+	if (scale_type >= SCALE_NONE) {
+		rc = -EBADF;
+		goto fail;
+	}
+
+	vadc_scale_fn[scale_type].chan(vadc, result->adc_code,
+		vadc->adc->adc_prop, vadc->adc->amux_prop->chan_prop, result);
+
+fail:
+	vadc->vadc_iadc_sync_lock = false;
+	qpnp_vadc_unlock(vadc);
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_vadc_iadc_sync_complete_request);
+
+static int32_t qpnp_vadc_thr_update(struct qpnp_vadc_chip *vadc,
+					int32_t high_thr, int32_t low_thr)
+{
+	int rc = 0;
+	u8 buf = 0;
+
+	pr_debug("client requested high:%d and low:%d\n",
+		high_thr, low_thr);
+
+	buf = QPNP_VADC_THR_LSB_MASK(low_thr);
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_LOW_THR_LSB, &buf, 1);
+	if (rc < 0) {
+		pr_err("low threshold lsb setting failed, err:%d\n", rc);
+		return rc;
+	}
+
+	buf = QPNP_VADC_THR_MSB_MASK(low_thr);
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_LOW_THR_MSB, &buf, 1);
+	if (rc < 0) {
+		pr_err("low threshold msb setting failed, err:%d\n", rc);
+		return rc;
+	}
+
+	buf = QPNP_VADC_THR_LSB_MASK(high_thr);
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HIGH_THR_LSB, &buf, 1);
+	if (rc < 0) {
+		pr_err("high threshold lsb setting failed, err:%d\n", rc);
+		return rc;
+	}
+
+	buf = QPNP_VADC_THR_MSB_MASK(high_thr);
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HIGH_THR_MSB, &buf, 1);
+	if (rc < 0) {
+		pr_err("high threshold msb setting failed, err:%d\n", rc);
+		return rc;
+	}
+
+	pr_debug("client requested high:%d and low:%d\n", high_thr, low_thr);
+
+	return rc;
+}
+
+int32_t qpnp_vadc_channel_monitor(struct qpnp_vadc_chip *chip,
+					struct qpnp_adc_tm_btm_param *param)
+{
+	uint32_t channel, scale_type = 0;
+	uint32_t low_thr = 0, high_thr = 0;
+	int rc = 0, idx = 0, amux_prescaling = 0;
+	struct qpnp_vadc_chip *vadc = dev_get_drvdata(chip->dev);
+	u8 buf = 0;
+
+	if (qpnp_vadc_is_valid(vadc))
+		return -EPROBE_DEFER;
+
+	if (!vadc->state_copy->vadc_meas_int_enable) {
+		pr_err("Recurring measurement interval not available\n");
+		return -EINVAL;
+	}
+
+	if (param->threshold_notification == NULL) {
+		pr_debug("No notification for high/low temp??\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&vadc->adc->adc_lock);
+
+	channel = param->channel;
+	while (idx < vadc->max_channels_available) {
+		if (vadc->adc->adc_channels[idx].channel_num == channel)
+			break;
+		 else
+			idx++;
+	}
+
+	if (idx >= vadc->max_channels_available)  {
+		pr_err("not a valid VADC channel\n");
+		rc = -EINVAL;
+		goto fail_unlock;
+	}
+
+	scale_type = vadc->adc->adc_channels[idx].adc_scale_fn;
+	if (scale_type >= SCALE_RVADC_SCALE_NONE) {
+		rc = -EBADF;
+		goto fail_unlock;
+	}
+
+	amux_prescaling =
+		vadc->adc->adc_channels[idx].chan_path_prescaling;
+
+	if (amux_prescaling >= PATH_SCALING_NONE) {
+		rc = -EINVAL;
+		goto fail_unlock;
+	}
+
+	vadc->adc->amux_prop->chan_prop->offset_gain_numerator =
+		qpnp_vadc_amux_scaling_ratio[amux_prescaling].num;
+	vadc->adc->amux_prop->chan_prop->offset_gain_denominator =
+		 qpnp_vadc_amux_scaling_ratio[amux_prescaling].den;
+	vadc->adc->amux_prop->chan_prop->calib_type =
+		vadc->adc->adc_channels[idx].calib_type;
+
+	pr_debug("channel:%d, scale_type:%d, dt_idx:%d",
+					channel, scale_type, idx);
+	vadc->adc->amux_prop->amux_channel = channel;
+	vadc->adc->amux_prop->decimation =
+			vadc->adc->adc_channels[idx].adc_decimation;
+	vadc->adc->amux_prop->hw_settle_time =
+			vadc->adc->adc_channels[idx].hw_settle_time;
+	vadc->adc->amux_prop->fast_avg_setup =
+			vadc->adc->adc_channels[idx].fast_avg_setup;
+	vadc->adc->amux_prop->mode_sel = ADC_OP_MEASUREMENT_INTERVAL;
+	adc_vadc_rscale_fn[scale_type].chan(vadc,
+			vadc->adc->amux_prop->chan_prop, param,
+			&low_thr, &high_thr);
+
+	if (param->timer_interval >= ADC_MEAS1_INTERVAL_NONE) {
+		pr_err("Invalid timer interval :%d\n", param->timer_interval);
+		goto fail_unlock;
+	}
+
+	buf = param->timer_interval;
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_MEAS_INTERVAL_CTL, &buf, 1);
+	if (rc) {
+		pr_err("vadc meas timer failed with %d\n", rc);
+		goto fail_unlock;
+	}
+
+	rc = qpnp_vadc_thr_update(vadc, high_thr, low_thr);
+	if (rc) {
+		pr_err("vadc thr update failed with %d\n", rc);
+		goto fail_unlock;
+	}
+
+	rc = qpnp_vadc_configure(vadc, vadc->adc->amux_prop);
+	if (rc) {
+		pr_err("vadc configure failed with %d\n", rc);
+		goto fail_unlock;
+	}
+
+	vadc->state_copy->meas_int_mode = true;
+	vadc->state_copy->param = param;
+	vadc->state_copy->vadc_meas_amux.channel_num = channel;
+	vadc->state_copy->vadc_meas_amux.adc_decimation =
+				vadc->adc->amux_prop->decimation;
+	vadc->state_copy->vadc_meas_amux.hw_settle_time =
+				vadc->adc->amux_prop->hw_settle_time;
+	vadc->state_copy->vadc_meas_amux.fast_avg_setup =
+				vadc->adc->amux_prop->fast_avg_setup;
+	vadc->state_copy->meas_int_request_in_queue = false;
+	dev_set_drvdata(vadc->dev, vadc);
+
+fail_unlock:
+	mutex_unlock(&vadc->adc->adc_lock);
+
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_vadc_channel_monitor);
+
+int32_t qpnp_vadc_end_channel_monitor(struct qpnp_vadc_chip *chip)
+{
+	struct qpnp_vadc_chip *vadc = dev_get_drvdata(chip->dev);
+	u8 mode_ctl = 0;
+
+	if (qpnp_vadc_is_valid(vadc))
+		return -EPROBE_DEFER;
+
+	if (!vadc->state_copy->vadc_meas_int_enable) {
+		pr_err("Recurring measurement interval not available\n");
+		return -EINVAL;
+	}
+
+	vadc->state_copy->meas_int_mode = false;
+	vadc->state_copy->meas_int_request_in_queue = false;
+	dev_set_drvdata(vadc->dev, vadc);
+	mode_ctl = ADC_OP_NORMAL_MODE;
+	/* Set measurement in single measurement mode */
+	qpnp_vadc_mode_select(vadc, mode_ctl);
+	qpnp_vadc_enable(vadc, false);
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_vadc_end_channel_monitor);
+
+static ssize_t qpnp_adc_show(struct device *dev,
+			struct device_attribute *devattr, char *buf)
+{
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+	struct qpnp_vadc_chip *vadc = dev_get_drvdata(dev);
+	struct qpnp_vadc_result result;
+	int rc = -1;
+
+	rc = qpnp_vadc_read(vadc, attr->index, &result);
+
+	if (rc) {
+		pr_err("VADC read error with %d\n", rc);
+		return 0;
+	}
+
+	return snprintf(buf, QPNP_ADC_HWMON_NAME_LENGTH,
+		"Result:%lld Raw:%x\n", result.physical, result.adc_code);
+}
+
+static struct sensor_device_attribute qpnp_adc_attr =
+	SENSOR_ATTR(NULL, S_IRUGO, qpnp_adc_show, NULL, 0);
+
+static int32_t qpnp_vadc_init_hwmon(struct qpnp_vadc_chip *vadc,
+					struct platform_device *pdev)
+{
+	struct device_node *child;
+	struct device_node *node = pdev->dev.of_node;
+	int rc = 0, i = 0, channel;
+
+	for_each_child_of_node(node, child) {
+		channel = vadc->adc->adc_channels[i].channel_num;
+		qpnp_adc_attr.index = vadc->adc->adc_channels[i].channel_num;
+		qpnp_adc_attr.dev_attr.attr.name =
+						vadc->adc->adc_channels[i].name;
+		memcpy(&vadc->sens_attr[i], &qpnp_adc_attr,
+						sizeof(qpnp_adc_attr));
+		sysfs_attr_init(&vadc->sens_attr[i].dev_attr.attr);
+		rc = device_create_file(&pdev->dev,
+				&vadc->sens_attr[i].dev_attr);
+		if (rc) {
+			dev_err(&pdev->dev,
+				"device_create_file failed for dev %s\n",
+				vadc->adc->adc_channels[i].name);
+			goto hwmon_err_sens;
+		}
+		i++;
+	}
+
+	return 0;
+hwmon_err_sens:
+	pr_err("Init HWMON failed for qpnp_adc with %d\n", rc);
+	return rc;
+}
+
+static int qpnp_vadc_get_temp(struct thermal_zone_device *thermal,
+			     int *temp)
+{
+	struct qpnp_vadc_thermal_data *vadc_therm = thermal->devdata;
+	struct qpnp_vadc_chip *vadc = vadc_therm->vadc_dev;
+	struct qpnp_vadc_result result;
+	int rc = 0;
+
+	rc = qpnp_vadc_read(vadc,
+				vadc_therm->vadc_channel, &result);
+	if (rc) {
+		if (rc != -EPROBE_DEFER)
+			pr_err("VADC read error with %d\n", rc);
+		return rc;
+	}
+
+	*temp = result.physical;
+
+	return rc;
+}
+
+static struct thermal_zone_device_ops qpnp_vadc_thermal_ops = {
+	.get_temp = qpnp_vadc_get_temp,
+};
+
+static int32_t qpnp_vadc_init_thermal(struct qpnp_vadc_chip *vadc,
+					struct platform_device *pdev)
+{
+	struct device_node *child;
+	struct device_node *node = pdev->dev.of_node;
+	int rc = 0, i = 0;
+	bool thermal_node = false;
+
+	if (node == NULL)
+		goto thermal_err_sens;
+	for_each_child_of_node(node, child) {
+		char name[QPNP_THERMALNODE_NAME_LENGTH];
+
+		vadc->vadc_therm_chan[i].vadc_channel =
+			vadc->adc->adc_channels[i].channel_num;
+		vadc->vadc_therm_chan[i].thermal_chan = i;
+		thermal_node = of_property_read_bool(child,
+					"qcom,vadc-thermal-node");
+		if (thermal_node) {
+			/* Register with the thermal zone */
+			vadc->vadc_therm_chan[i].thermal_node = true;
+			snprintf(name, sizeof(name), "%s",
+				vadc->adc->adc_channels[i].name);
+			vadc->vadc_therm_chan[i].vadc_dev = vadc;
+			vadc->vadc_therm_chan[i].tz_dev =
+				thermal_zone_device_register(name,
+				0, 0, &vadc->vadc_therm_chan[i],
+				&qpnp_vadc_thermal_ops, NULL, 0, 0);
+			if (IS_ERR(vadc->vadc_therm_chan[i].tz_dev)) {
+				pr_err("thermal device register failed.\n");
+				goto thermal_err_sens;
+			}
+		}
+		i++;
+		thermal_node = false;
+	}
+	return 0;
+thermal_err_sens:
+	pr_err("Init HWMON failed for qpnp_adc with %d\n", rc);
+	return rc;
+}
+
+static const struct of_device_id qpnp_vadc_match_table[] = {
+	{	.compatible = "qcom,qpnp-vadc",
+	},
+	{	.compatible = "qcom,qpnp-vadc-hc",
+	},
+	{}
+};
+
+static int qpnp_vadc_probe(struct platform_device *pdev)
+{
+	struct qpnp_vadc_chip *vadc;
+	struct qpnp_adc_drv *adc_qpnp;
+	struct qpnp_vadc_thermal_data *adc_thermal;
+	struct device_node *node = pdev->dev.of_node;
+	struct device_node *child;
+	const struct of_device_id *id;
+	int rc, count_adc_channel_list = 0, i = 0;
+	u8 fab_id = 0;
+
+	for_each_child_of_node(node, child)
+		count_adc_channel_list++;
+
+	if (!count_adc_channel_list) {
+		pr_err("No channel listing\n");
+		return -EINVAL;
+	}
+
+	id = of_match_node(qpnp_vadc_match_table, node);
+	if (id == NULL) {
+		pr_err("qpnp_vadc_match of_node prop not present\n");
+		return -ENODEV;
+	}
+
+	vadc = devm_kzalloc(&pdev->dev, sizeof(struct qpnp_vadc_chip) +
+		(sizeof(struct sensor_device_attribute) *
+				count_adc_channel_list), GFP_KERNEL);
+	if (!vadc) {
+		dev_err(&pdev->dev, "Unable to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	vadc->dev = &(pdev->dev);
+	adc_qpnp = devm_kzalloc(&pdev->dev, sizeof(struct qpnp_adc_drv),
+			GFP_KERNEL);
+	if (!adc_qpnp)
+		return -ENOMEM;
+
+	adc_qpnp->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!adc_qpnp->regmap) {
+		dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+		return -EINVAL;
+	}
+
+	vadc->state_copy = devm_kzalloc(&pdev->dev,
+			sizeof(struct qpnp_vadc_mode_state), GFP_KERNEL);
+	if (!vadc->state_copy)
+		return -ENOMEM;
+
+	vadc->adc = adc_qpnp;
+	adc_thermal = devm_kzalloc(&pdev->dev,
+			(sizeof(struct qpnp_vadc_thermal_data) *
+				count_adc_channel_list), GFP_KERNEL);
+	if (!adc_thermal) {
+		dev_err(&pdev->dev, "Unable to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	vadc->vadc_therm_chan = adc_thermal;
+	if (!strcmp(id->compatible, "qcom,qpnp-vadc-hc")) {
+		vadc->vadc_hc = true;
+		vadc->adc->adc_hc = true;
+	}
+
+	rc = qpnp_adc_get_devicetree_data(pdev, vadc->adc);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to read device tree\n");
+		return rc;
+	}
+	mutex_init(&vadc->adc->adc_lock);
+
+	rc = qpnp_vadc_init_hwmon(vadc, pdev);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to initialize qpnp hwmon adc\n");
+		return rc;
+	}
+	vadc->vadc_hwmon = hwmon_device_register(&vadc->adc->pdev->dev);
+	rc = qpnp_vadc_init_thermal(vadc, pdev);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to initialize qpnp thermal adc\n");
+		return rc;
+	}
+	vadc->vadc_init_calib = false;
+	vadc->max_channels_available = count_adc_channel_list;
+	rc = qpnp_vadc_read_reg(vadc, QPNP_INT_TEST_VAL, &fab_id, 1);
+	if (rc < 0) {
+		pr_err("qpnp adc comp id failed with %d\n", rc);
+		goto err_setup;
+	}
+	vadc->id = fab_id;
+	pr_debug("fab_id = %d\n", fab_id);
+
+	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_REVISION2,
+				&vadc->revision_dig_major, 1);
+	if (rc < 0) {
+		pr_err("qpnp adc dig_major rev read failed with %d\n", rc);
+		goto err_setup;
+	}
+
+	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_REVISION3,
+				&vadc->revision_ana_minor, 1);
+	if (rc < 0) {
+		pr_err("qpnp adc ana_minor rev read failed with %d\n", rc);
+		goto err_setup;
+	}
+
+	rc = qpnp_vadc_warm_rst_configure(vadc);
+	if (rc < 0) {
+		pr_err("Setting perp reset on warm reset failed %d\n", rc);
+		goto err_setup;
+	}
+
+	INIT_WORK(&vadc->trigger_completion_work, qpnp_vadc_work);
+
+	vadc->vadc_recalib_check = of_property_read_bool(node,
+						"qcom,vadc-recalib-check");
+
+	vadc->vadc_poll_eoc = of_property_read_bool(node,
+						"qcom,vadc-poll-eoc");
+	if (!vadc->vadc_poll_eoc) {
+		rc = devm_request_irq(&pdev->dev, vadc->adc->adc_irq_eoc,
+				qpnp_vadc_isr, IRQF_TRIGGER_RISING,
+				"qpnp_vadc_interrupt", vadc);
+		if (rc) {
+			dev_err(&pdev->dev,
+			"failed to request adc irq with error %d\n", rc);
+			goto err_setup;
+		} else {
+			enable_irq_wake(vadc->adc->adc_irq_eoc);
+		}
+	} else
+		device_init_wakeup(vadc->dev, 1);
+
+	vadc->state_copy->vadc_meas_int_enable = of_property_read_bool(node,
+						"qcom,vadc-meas-int-mode");
+	if (vadc->state_copy->vadc_meas_int_enable) {
+		vadc->adc->adc_high_thr_irq = platform_get_irq_byname(pdev,
+								      "high-thr-en-set");
+		if (vadc->adc->adc_high_thr_irq < 0) {
+			pr_err("Invalid irq\n");
+			rc = -ENXIO;
+			goto err_setup;
+		}
+
+		vadc->adc->adc_low_thr_irq = platform_get_irq_byname(pdev,
+								     "low-thr-en-set");
+		if (vadc->adc->adc_low_thr_irq < 0) {
+			pr_err("Invalid irq\n");
+			rc = -ENXIO;
+			goto err_setup;
+		}
+
+		rc = devm_request_irq(&pdev->dev, vadc->adc->adc_high_thr_irq,
+					qpnp_vadc_high_thr_isr,
+			IRQF_TRIGGER_RISING, "qpnp_vadc_high_interrupt", vadc);
+		if (rc) {
+			dev_err(&pdev->dev, "failed to request adc irq\n");
+			goto err_setup;
+		} else {
+			enable_irq_wake(vadc->adc->adc_high_thr_irq);
+		}
+
+		rc = devm_request_irq(&pdev->dev, vadc->adc->adc_low_thr_irq,
+					qpnp_vadc_low_thr_isr,
+			IRQF_TRIGGER_RISING, "qpnp_vadc_low_interrupt", vadc);
+		if (rc) {
+			dev_err(&pdev->dev, "failed to request adc irq\n");
+			goto err_setup;
+		} else {
+			enable_irq_wake(vadc->adc->adc_low_thr_irq);
+		}
+		INIT_WORK(&vadc->trigger_high_thr_work,
+						qpnp_vadc_high_thr_fn);
+		INIT_WORK(&vadc->trigger_low_thr_work, qpnp_vadc_low_thr_fn);
+	}
+
+	vadc->vadc_iadc_sync_lock = false;
+	dev_set_drvdata(&pdev->dev, vadc);
+	list_add(&vadc->list, &qpnp_vadc_device_list);
+
+	return 0;
+
+err_setup:
+	for_each_child_of_node(node, child) {
+		device_remove_file(&pdev->dev, &vadc->sens_attr[i].dev_attr);
+		if (vadc->vadc_therm_chan[i].thermal_node)
+			thermal_zone_device_unregister(
+					vadc->vadc_therm_chan[i].tz_dev);
+		i++;
+	}
+	hwmon_device_unregister(vadc->vadc_hwmon);
+
+	return rc;
+}
+
+static int qpnp_vadc_remove(struct platform_device *pdev)
+{
+	struct qpnp_vadc_chip *vadc = dev_get_drvdata(&pdev->dev);
+	struct device_node *node = pdev->dev.of_node;
+	struct device_node *child;
+	int i = 0;
+
+	for_each_child_of_node(node, child) {
+		device_remove_file(&pdev->dev, &vadc->sens_attr[i].dev_attr);
+		if (vadc->vadc_therm_chan[i].thermal_node)
+			thermal_zone_device_unregister(
+					vadc->vadc_therm_chan[i].tz_dev);
+		i++;
+	}
+	hwmon_device_unregister(vadc->vadc_hwmon);
+	list_del(&vadc->list);
+	if (vadc->adc->hkadc_ldo && vadc->adc->hkadc_ldo_ok)
+		qpnp_adc_free_voltage_resource(vadc->adc);
+	dev_set_drvdata(&pdev->dev, NULL);
+
+	return 0;
+}
+
+static int qpnp_vadc_suspend_noirq(struct device *dev)
+{
+	struct qpnp_vadc_chip *vadc = dev_get_drvdata(dev);
+	u8 status = 0;
+
+	qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status, 1);
+	if (((status & QPNP_VADC_STATUS1_OP_MODE_MASK) >>
+		QPNP_VADC_OP_MODE_SHIFT) == QPNP_VADC_MEAS_INT_MODE) {
+		pr_debug("Meas interval in progress\n");
+	} else if (vadc->vadc_poll_eoc) {
+		status &= QPNP_VADC_STATUS1_REQ_STS_EOC_MASK;
+		pr_debug("vadc conversion status=%d\n", status);
+		if (status != QPNP_VADC_STATUS1_EOC) {
+			pr_err(
+				"Aborting suspend, adc conversion requested while suspending\n");
+			return -EBUSY;
+		}
+	}
+
+	return 0;
+}
+
+static const struct dev_pm_ops qpnp_vadc_pm_ops = {
+	.suspend_noirq	= qpnp_vadc_suspend_noirq,
+};
+
+static struct platform_driver qpnp_vadc_driver = {
+	.driver		= {
+		.name		= "qcom,qpnp-vadc",
+		.of_match_table	= qpnp_vadc_match_table,
+		.pm		= &qpnp_vadc_pm_ops,
+	},
+	.probe		= qpnp_vadc_probe,
+	.remove		= qpnp_vadc_remove,
+};
+
+static int __init qpnp_vadc_init(void)
+{
+	return platform_driver_register(&qpnp_vadc_driver);
+}
+module_init(qpnp_vadc_init);
+
+static void __exit qpnp_vadc_exit(void)
+{
+	platform_driver_unregister(&qpnp_vadc_driver);
+}
+module_exit(qpnp_vadc_exit);
+
+MODULE_DESCRIPTION("QPNP PMIC Voltage ADC driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/hwspinlock/msm_remote_spinlock.c	2019-01-22 16:16:23.807249159 +0100
@@ -0,0 +1,557 @@
+/* Copyright (c) 2008-2009, 2011-2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/msm_remote_spinlock.h>
+#include <linux/slab.h>
+
+#include <soc/qcom/smem.h>
+
+/**
+ * The local processor (APPS) is PID 0, but because 0 is reserved for an empty
+ * lock, the value PID + 1 is used as the APSS token when writing to the lock.
+ */
+#define SPINLOCK_TOKEN_APPS 1
+
+static int is_hw_lock_type;
+static DEFINE_MUTEX(ops_init_lock);
+
+struct spinlock_ops {
+	void (*lock)(raw_remote_spinlock_t *lock);
+	void (*unlock)(raw_remote_spinlock_t *lock);
+	int (*trylock)(raw_remote_spinlock_t *lock);
+	int (*release)(raw_remote_spinlock_t *lock, uint32_t pid);
+	int (*owner)(raw_remote_spinlock_t *lock);
+	void (*lock_rlock_id)(raw_remote_spinlock_t *lock, uint32_t tid);
+	void (*unlock_rlock)(raw_remote_spinlock_t *lock);
+	int (*get_hw_spinlocks_element)(raw_remote_spinlock_t *lock);
+};
+
+static struct spinlock_ops current_ops;
+
+static int remote_spinlock_init_address(int id, _remote_spinlock_t *lock);
+
+/* ldrex implementation ----------------------------------------------------- */
+static char *ldrex_compatible_string = "qcom,ipc-spinlock-ldrex";
+
+#ifdef CONFIG_ARM
+static void __raw_remote_ex_spin_lock(raw_remote_spinlock_t *lock)
+{
+	unsigned long tmp;
+
+	__asm__ __volatile__(
+"1:     ldrex   %0, [%1]\n"
+"       teq     %0, #0\n"
+"       strexeq %0, %2, [%1]\n"
+"       teqeq   %0, #0\n"
+"       bne     1b"
+	: "=&r" (tmp)
+	: "r" (&lock->lock), "r" (SPINLOCK_TOKEN_APPS)
+	: "cc");
+
+	smp_mb();
+}
+
+static int __raw_remote_ex_spin_trylock(raw_remote_spinlock_t *lock)
+{
+	unsigned long tmp;
+
+	__asm__ __volatile__(
+"       ldrex   %0, [%1]\n"
+"       teq     %0, #0\n"
+"       strexeq %0, %2, [%1]\n"
+	: "=&r" (tmp)
+	: "r" (&lock->lock), "r" (SPINLOCK_TOKEN_APPS)
+	: "cc");
+
+	if (tmp == 0) {
+		smp_mb();
+		return 1;
+	}
+	return 0;
+}
+
+static void __raw_remote_ex_spin_unlock(raw_remote_spinlock_t *lock)
+{
+	int lock_owner;
+
+	smp_mb();
+	lock_owner = readl_relaxed(&lock->lock);
+	if (lock_owner != SPINLOCK_TOKEN_APPS) {
+		pr_err("%s: spinlock not owned by Apps (actual owner is %d)\n",
+				__func__, lock_owner);
+	}
+
+	__asm__ __volatile__(
+"       str     %1, [%0]\n"
+	:
+	: "r" (&lock->lock), "r" (0)
+	: "cc");
+}
+#else
+static void __raw_remote_ex_spin_lock(raw_remote_spinlock_t *lock)
+{
+}
+
+static int __raw_remote_ex_spin_trylock(raw_remote_spinlock_t *lock)
+{
+	return 0;
+}
+
+static void __raw_remote_ex_spin_unlock(raw_remote_spinlock_t *lock)
+{
+}
+#endif /* CONFIG_ARM */
+/* end ldrex implementation ------------------------------------------------- */
+
+/* sfpb implementation ------------------------------------------------------ */
+static uint32_t lock_count;
+static phys_addr_t reg_base;
+static uint32_t reg_size;
+static uint32_t lock_offset; /* offset into the hardware block before lock 0 */
+static uint32_t lock_size;
+
+static void *hw_mutex_reg_base;
+static DEFINE_MUTEX(hw_map_init_lock);
+static int *hw_spinlocks;
+
+static char *sfpb_compatible_string = "qcom,ipc-spinlock-sfpb";
+
+static int init_hw_mutex(struct device_node *node)
+{
+	struct resource r;
+	int rc;
+
+	rc = of_address_to_resource(node, 0, &r);
+	if (rc)
+		BUG();
+
+	rc = of_property_read_u32(node, "qcom,num-locks", &lock_count);
+	if (rc)
+		BUG();
+
+	reg_base = r.start;
+	reg_size = (uint32_t)(resource_size(&r));
+	lock_offset = 0;
+	lock_size = reg_size / lock_count;
+
+	return 0;
+}
+
+static void find_and_init_hw_mutex(void)
+{
+	struct device_node *node;
+
+	node = of_find_compatible_node(NULL, NULL, sfpb_compatible_string);
+	BUG_ON(node == NULL);
+	init_hw_mutex(node);
+	hw_mutex_reg_base = ioremap(reg_base, reg_size);
+	BUG_ON(hw_mutex_reg_base == NULL);
+	hw_spinlocks = kzalloc(sizeof(int) * lock_count, GFP_KERNEL);
+	BUG_ON(hw_spinlocks == NULL);
+}
+
+static int remote_spinlock_init_address_hw(int id, _remote_spinlock_t *lock)
+{
+	/*
+	 * Optimistic locking.  Init only needs to be done once by the first
+	 * caller.  After that, serializing inits between different callers
+	 * is unnecessary.  The second check after the lock ensures init
+	 * wasn't previously completed by someone else before the lock could
+	 * be grabbed.
+	 */
+	if (!hw_mutex_reg_base) {
+		mutex_lock(&hw_map_init_lock);
+		if (!hw_mutex_reg_base)
+			find_and_init_hw_mutex();
+		mutex_unlock(&hw_map_init_lock);
+	}
+
+	if (id >= lock_count)
+		return -EINVAL;
+
+	*lock = hw_mutex_reg_base + lock_offset + id * lock_size;
+	return 0;
+}
+
+static unsigned int remote_spinlock_get_lock_id(raw_remote_spinlock_t *lock)
+{
+	unsigned int id;
+
+	BUG_ON((uintptr_t)lock < (uintptr_t)hw_mutex_reg_base);
+	BUG_ON(((uintptr_t)lock - (uintptr_t)hw_mutex_reg_base) < lock_offset);
+
+	id = (unsigned int)((uintptr_t)lock - (uintptr_t)hw_mutex_reg_base -
+			lock_offset) / lock_size;
+	BUG_ON(id >= lock_count);
+	return id;
+}
+
+static void __raw_remote_sfpb_spin_lock(raw_remote_spinlock_t *lock)
+{
+	int owner;
+	unsigned int id = remote_spinlock_get_lock_id(lock);
+
+	/*
+	 * Wait for other local processor task to release spinlock if it
+	 * already has the remote spinlock locked.  This can only happen in
+	 * test cases since the local spinlock will prevent this when using the
+	 * public APIs.
+	 */
+	while (readl_relaxed(lock) == SPINLOCK_TOKEN_APPS)
+		;
+
+	/* acquire remote spinlock */
+	do {
+		writel_relaxed(SPINLOCK_TOKEN_APPS, lock);
+		smp_mb();
+		owner = readl_relaxed(lock);
+		hw_spinlocks[id] = owner;
+	} while (owner != SPINLOCK_TOKEN_APPS);
+}
+
+static int __raw_remote_sfpb_spin_trylock(raw_remote_spinlock_t *lock)
+{
+	int owner;
+	unsigned int id = remote_spinlock_get_lock_id(lock);
+	/*
+	 * If the local processor owns the spinlock, return failure.  This can
+	 * only happen in test cases since the local spinlock will prevent this
+	 * when using the public APIs.
+	 */
+	if (readl_relaxed(lock) == SPINLOCK_TOKEN_APPS)
+		return 0;
+
+	writel_relaxed(SPINLOCK_TOKEN_APPS, lock);
+	smp_mb();
+	owner = readl_relaxed(lock);
+	hw_spinlocks[id] = owner;
+	return owner == SPINLOCK_TOKEN_APPS;
+}
+
+static void __raw_remote_sfpb_spin_unlock(raw_remote_spinlock_t *lock)
+{
+	int lock_owner;
+
+	lock_owner = readl_relaxed(lock);
+	if (lock_owner != SPINLOCK_TOKEN_APPS) {
+		pr_err("%s: spinlock not owned by Apps (actual owner is %d)\n",
+				__func__, lock_owner);
+	}
+
+	writel_relaxed(0, lock);
+	smp_mb();
+}
+
+static void __raw_remote_sfpb_spin_lock_rlock_id(raw_remote_spinlock_t *lock,
+						 uint32_t tid)
+{
+	if (unlikely(!tid)) {
+		pr_err("%s: unsupported rlock tid=0\n", __func__);
+		BUG();
+	}
+
+	do {
+		writel_relaxed(tid, lock);
+		smp_mb();
+	} while (readl_relaxed(lock) != tid);
+}
+
+static void __raw_remote_sfpb_spin_unlock_rlock(raw_remote_spinlock_t *lock)
+{
+	writel_relaxed(0, lock);
+	smp_mb();
+}
+
+static int __raw_remote_sfpb_get_hw_spinlocks_element(
+		raw_remote_spinlock_t *lock)
+{
+	return hw_spinlocks[remote_spinlock_get_lock_id(lock)];
+}
+
+/* end sfpb implementation -------------------------------------------------- */
+
+/* common spinlock API ------------------------------------------------------ */
+/**
+ * Release spinlock if it is owned by @pid.
+ *
+ * This is only to be used for situations where the processor owning
+ * the spinlock has crashed and the spinlock must be released.
+ *
+ * @lock: lock structure
+ * @pid: processor ID of processor to release
+ */
+static int __raw_remote_gen_spin_release(raw_remote_spinlock_t *lock,
+		uint32_t pid)
+{
+	int ret = 1;
+
+	/*
+	 * Since 0 is reserved for an empty lock and the PIDs start at 0, the
+	 * value PID + 1 is written to the lock.
+	 */
+	if (readl_relaxed(&lock->lock) == (pid + 1)) {
+		writel_relaxed(0, &lock->lock);
+		wmb();
+		ret = 0;
+	}
+	return ret;
+}
+
+/**
+ * Return owner of the spinlock.
+ *
+ * @lock: pointer to lock structure
+ * @returns: >= 0 owned PID; < 0 for error case
+ *
+ * Used for testing.  PID's are assumed to be 31 bits or less.
+ */
+static int __raw_remote_gen_spin_owner(raw_remote_spinlock_t *lock)
+{
+	int owner;
+	rmb();
+
+	owner = readl_relaxed(&lock->lock);
+	if (owner)
+		return owner - 1;
+	else
+		return -ENODEV;
+}
+
+
+static int dt_node_is_valid(const struct device_node *node)
+{
+	const char *status;
+	int statlen;
+
+	status = of_get_property(node, "status", &statlen);
+	if (status == NULL)
+		return 1;
+
+	if (statlen > 0) {
+		if (!strcmp(status, "okay") || !strcmp(status, "ok"))
+			return 1;
+	}
+
+	return 0;
+}
+
+static void initialize_ops(void)
+{
+	struct device_node *node;
+
+	/*
+	 * of_find_compatible_node() returns a valid pointer even if
+	 * the status property is "disabled", so the validity needs
+	 * to be checked
+	 */
+	node = of_find_compatible_node(NULL, NULL, sfpb_compatible_string);
+	if (node && dt_node_is_valid(node)) {
+		current_ops.lock = __raw_remote_sfpb_spin_lock;
+		current_ops.unlock = __raw_remote_sfpb_spin_unlock;
+		current_ops.trylock = __raw_remote_sfpb_spin_trylock;
+		current_ops.release = __raw_remote_gen_spin_release;
+		current_ops.owner = __raw_remote_gen_spin_owner;
+		current_ops.lock_rlock_id =
+				__raw_remote_sfpb_spin_lock_rlock_id;
+		current_ops.unlock_rlock = __raw_remote_sfpb_spin_unlock_rlock;
+		current_ops.get_hw_spinlocks_element =
+			__raw_remote_sfpb_get_hw_spinlocks_element;
+		is_hw_lock_type = 1;
+		return;
+	}
+
+	node = of_find_compatible_node(NULL, NULL, ldrex_compatible_string);
+	if (node && dt_node_is_valid(node)) {
+		current_ops.lock = __raw_remote_ex_spin_lock;
+		current_ops.unlock = __raw_remote_ex_spin_unlock;
+		current_ops.trylock = __raw_remote_ex_spin_trylock;
+		current_ops.release = __raw_remote_gen_spin_release;
+		current_ops.owner = __raw_remote_gen_spin_owner;
+		is_hw_lock_type = 0;
+		return;
+	}
+
+	current_ops.lock = __raw_remote_ex_spin_lock;
+	current_ops.unlock = __raw_remote_ex_spin_unlock;
+	current_ops.trylock = __raw_remote_ex_spin_trylock;
+	current_ops.release = __raw_remote_gen_spin_release;
+	current_ops.owner = __raw_remote_gen_spin_owner;
+	is_hw_lock_type = 0;
+	pr_warn("Falling back to LDREX remote spinlock implementation");
+}
+
+/**
+ * Release all spinlocks owned by @pid.
+ *
+ * This is only to be used for situations where the processor owning
+ * spinlocks has crashed and the spinlocks must be released.
+ *
+ * @pid - processor ID of processor to release
+ */
+static void remote_spin_release_all_locks(uint32_t pid, int count)
+{
+	int n;
+	 _remote_spinlock_t lock;
+
+	if (pid >= REMOTE_SPINLOCK_NUM_PID) {
+		pr_err("%s: Unsupported PID %d\n", __func__, pid);
+		return;
+	}
+
+	for (n = 0; n < count; ++n) {
+		if (remote_spinlock_init_address(n, &lock) == 0)
+			_remote_spin_release(&lock, pid);
+	}
+}
+
+void _remote_spin_release_all(uint32_t pid)
+{
+	remote_spin_release_all_locks(pid, lock_count);
+}
+
+#define SMEM_SPINLOCK_COUNT 8
+#define SMEM_SPINLOCK_ARRAY_SIZE (SMEM_SPINLOCK_COUNT * sizeof(uint32_t))
+
+static int remote_spinlock_init_address_smem(int id, _remote_spinlock_t *lock)
+{
+	_remote_spinlock_t spinlock_start;
+
+	if (id >= SMEM_SPINLOCK_COUNT)
+		return -EINVAL;
+
+	spinlock_start = smem_find(SMEM_SPINLOCK_ARRAY,
+				    SMEM_SPINLOCK_ARRAY_SIZE,
+				    0,
+				    SMEM_ANY_HOST_FLAG);
+	if (spinlock_start == NULL)
+		return -ENXIO;
+
+	*lock = spinlock_start + id;
+
+	lock_count = SMEM_SPINLOCK_COUNT;
+
+	return 0;
+}
+
+static int remote_spinlock_init_address(int id, _remote_spinlock_t *lock)
+{
+	if (is_hw_lock_type)
+		return remote_spinlock_init_address_hw(id, lock);
+	else
+		return remote_spinlock_init_address_smem(id, lock);
+}
+
+int _remote_spin_lock_init(remote_spinlock_id_t id, _remote_spinlock_t *lock)
+{
+	BUG_ON(id == NULL);
+
+	/*
+	 * Optimistic locking.  Init only needs to be done once by the first
+	 * caller.  After that, serializing inits between different callers
+	 * is unnecessary.  The second check after the lock ensures init
+	 * wasn't previously completed by someone else before the lock could
+	 * be grabbed.
+	 */
+	if (!current_ops.lock) {
+		mutex_lock(&ops_init_lock);
+		if (!current_ops.lock)
+			initialize_ops();
+		mutex_unlock(&ops_init_lock);
+	}
+
+	if (id[0] == 'S' && id[1] == ':') {
+		/* Single-digit lock ID follows "S:" */
+		BUG_ON(id[3] != '\0');
+
+		return remote_spinlock_init_address((((uint8_t)id[2])-'0'),
+			lock);
+	} else {
+		return -EINVAL;
+	}
+}
+
+/*
+ * lock comes in as a pointer to a pointer to the lock location, so it must
+ * be dereferenced and casted to the right type for the actual lock
+ * implementation functions
+ */
+void _remote_spin_lock(_remote_spinlock_t *lock)
+{
+	if (unlikely(!current_ops.lock))
+		BUG();
+	current_ops.lock((raw_remote_spinlock_t *)(*lock));
+}
+EXPORT_SYMBOL(_remote_spin_lock);
+
+void _remote_spin_unlock(_remote_spinlock_t *lock)
+{
+	if (unlikely(!current_ops.unlock))
+		BUG();
+	current_ops.unlock((raw_remote_spinlock_t *)(*lock));
+}
+EXPORT_SYMBOL(_remote_spin_unlock);
+
+int _remote_spin_trylock(_remote_spinlock_t *lock)
+{
+	if (unlikely(!current_ops.trylock))
+		BUG();
+	return current_ops.trylock((raw_remote_spinlock_t *)(*lock));
+}
+EXPORT_SYMBOL(_remote_spin_trylock);
+
+int _remote_spin_release(_remote_spinlock_t *lock, uint32_t pid)
+{
+	if (unlikely(!current_ops.release))
+		BUG();
+	return current_ops.release((raw_remote_spinlock_t *)(*lock), pid);
+}
+EXPORT_SYMBOL(_remote_spin_release);
+
+int _remote_spin_owner(_remote_spinlock_t *lock)
+{
+	if (unlikely(!current_ops.owner))
+		BUG();
+	return current_ops.owner((raw_remote_spinlock_t *)(*lock));
+}
+EXPORT_SYMBOL(_remote_spin_owner);
+
+void _remote_spin_lock_rlock_id(_remote_spinlock_t *lock, uint32_t tid)
+{
+	if (unlikely(!current_ops.lock_rlock_id))
+		BUG();
+	current_ops.lock_rlock_id((raw_remote_spinlock_t *)(*lock), tid);
+}
+EXPORT_SYMBOL(_remote_spin_lock_rlock_id);
+
+void _remote_spin_unlock_rlock(_remote_spinlock_t *lock)
+{
+	if (unlikely(!current_ops.unlock_rlock))
+		BUG();
+	current_ops.unlock_rlock((raw_remote_spinlock_t *)(*lock));
+}
+EXPORT_SYMBOL(_remote_spin_unlock_rlock);
+
+int _remote_spin_get_hw_spinlocks_element(_remote_spinlock_t *lock)
+{
+	return current_ops.get_hw_spinlocks_element(
+			(raw_remote_spinlock_t *)(*lock));
+}
+EXPORT_SYMBOL(_remote_spin_get_hw_spinlocks_element);
+
+/* end common spinlock API -------------------------------------------------- */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/i2c/busses/i2c-msm-v2.c	2019-10-29 09:26:23.733204059 +0100
@@ -0,0 +1,3050 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * I2C controller driver for Qualcomm Technologies Inc platforms
+ */
+
+#define pr_fmt(fmt) "#%d " fmt "\n", __LINE__
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/timer.h>
+#include <linux/time.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/dma-mapping.h>
+#include <linux/i2c.h>
+#include <linux/of.h>
+#include <linux/msm-sps.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/i2c/i2c-msm-v2.h>
+
+#ifdef DEBUG
+static const enum msm_i2_debug_level DEFAULT_DBG_LVL = MSM_DBG;
+#else
+static const enum msm_i2_debug_level DEFAULT_DBG_LVL = MSM_ERR;
+#endif
+
+/* Forward declarations */
+static bool i2c_msm_xfer_next_buf(struct i2c_msm_ctrl *ctrl);
+static int i2c_msm_xfer_wait_for_completion(struct i2c_msm_ctrl *ctrl,
+						struct completion *complete);
+static int  i2c_msm_pm_resume(struct device *dev);
+static void i2c_msm_pm_suspend(struct device *dev);
+static void i2c_msm_clk_path_init(struct i2c_msm_ctrl *ctrl);
+static void i2c_msm_pm_pinctrl_state(struct i2c_msm_ctrl *ctrl,
+						bool runtime_active);
+
+/* string table for enum i2c_msm_xfer_mode_id */
+const char * const i2c_msm_mode_str_tbl[] = {
+	"FIFO", "BLOCK", "DMA", "None",
+};
+
+static const u32 i2c_msm_fifo_block_sz_tbl[] = {16, 16 , 32, 0};
+
+/* from enum i2c_msm_xfer_mode_id to qup_io_modes register values */
+static const u32 i2c_msm_mode_to_reg_tbl[] = {
+	0x0, /* map I2C_MSM_XFER_MODE_FIFO -> binary 00 */
+	0x1, /* map I2C_MSM_XFER_MODE_BLOCK -> binary 01 */
+	0x3  /* map I2C_MSM_XFER_MODE_DMA -> binary 11 */
+};
+
+const char *i2c_msm_err_str_table[] = {
+	[I2C_MSM_NO_ERR]     = "NONE",
+	[I2C_MSM_ERR_NACK]   = "NACK: slave not responding, ensure its powered",
+	[I2C_MSM_ERR_ARB_LOST] = "ARB_LOST",
+	[I2C_MSM_ERR_BUS_ERR] = "BUS ERROR:noisy bus/unexpected start/stop tag",
+	[I2C_MSM_ERR_TIMEOUT]  = "TIMEOUT_ERROR",
+	[I2C_MSM_ERR_CORE_CLK] = "CLOCK OFF: Check Core Clock",
+	[I2C_MSM_ERR_OVR_UNDR_RUN] = "OVER_UNDER_RUN_ERROR",
+};
+
+static void i2c_msm_dbg_dump_diag(struct i2c_msm_ctrl *ctrl,
+				bool use_param_vals, u32 status, u32 qup_op)
+{
+	struct i2c_msm_xfer *xfer = &ctrl->xfer;
+	const char *str = i2c_msm_err_str_table[xfer->err];
+	char buf[I2C_MSM_REG_2_STR_BUF_SZ];
+
+	if (!use_param_vals) {
+		void __iomem        *base = ctrl->rsrcs.base;
+		status = readl_relaxed(base + QUP_I2C_STATUS);
+		qup_op = readl_relaxed(base + QUP_OPERATIONAL);
+	}
+
+	if (xfer->err == I2C_MSM_ERR_TIMEOUT) {
+		/*
+		 * if we are not the bus master or SDA/SCL is low then it may be
+		 * that slave is pulling the lines low. Otherwise it is likely a
+		 * GPIO issue
+		 */
+		if (!(status & QUP_BUS_MASTER))
+			snprintf(buf, I2C_MSM_REG_2_STR_BUF_SZ,
+				"%s(val:%dmsec) misconfigured GPIO or slave pulling bus line(s) low\n",
+				str, jiffies_to_msecs(xfer->timeout));
+		 else
+			snprintf(buf, I2C_MSM_REG_2_STR_BUF_SZ,
+			"%s(val:%dmsec)", str, jiffies_to_msecs(xfer->timeout));
+
+		str = buf;
+	}
+
+	/* dump xfer details */
+	dev_printk(xfer->err == I2C_MSM_ERR_NACK ? KERN_DEBUG : KERN_INFO, ctrl->dev,
+		"%s: msgs(n:%d cur:%d %s) bc(rx:%zu tx:%zu) mode:%s slv_addr:0x%0x MSTR_STS:0x%08x OPER:0x%08x\n",
+		str, xfer->msg_cnt, xfer->cur_buf.msg_idx,
+		xfer->cur_buf.is_rx ? "rx" : "tx", xfer->rx_cnt, xfer->tx_cnt,
+		i2c_msm_mode_str_tbl[xfer->mode_id], xfer->msgs->addr,
+		status, qup_op);
+}
+
+static u32 i2c_msm_reg_io_modes_out_blk_sz(u32 qup_io_modes)
+{
+	return i2c_msm_fifo_block_sz_tbl[qup_io_modes & 0x3];
+}
+
+static u32 i2c_msm_reg_io_modes_in_blk_sz(u32 qup_io_modes)
+{
+	return i2c_msm_fifo_block_sz_tbl[BITS_AT(qup_io_modes, 5, 2)];
+}
+
+static const u32 i2c_msm_fifo_sz_table[] = {2, 4 , 8, 16};
+
+static void i2c_msm_qup_fifo_calc_size(struct i2c_msm_ctrl *ctrl)
+{
+	u32 reg_data, output_fifo_size, input_fifo_size;
+	struct i2c_msm_xfer_mode_fifo *fifo = &ctrl->xfer.fifo;
+
+	/* Gurad to read fifo size only once. It hard wired and never changes */
+	if (fifo->input_fifo_sz && fifo->output_fifo_sz)
+		return;
+
+	reg_data = readl_relaxed(ctrl->rsrcs.base + QUP_IO_MODES);
+	output_fifo_size  = BITS_AT(reg_data, 2, 2);
+	input_fifo_size   = BITS_AT(reg_data, 7, 2);
+
+	fifo->input_fifo_sz = i2c_msm_reg_io_modes_in_blk_sz(reg_data) *
+					i2c_msm_fifo_sz_table[input_fifo_size];
+	fifo->output_fifo_sz = i2c_msm_reg_io_modes_out_blk_sz(reg_data) *
+					i2c_msm_fifo_sz_table[output_fifo_size];
+
+	i2c_msm_dbg(ctrl, MSM_PROF, "QUP input-sz:%zu, input-sz:%zu",
+			fifo->input_fifo_sz, fifo->output_fifo_sz);
+
+}
+
+/*
+ * i2c_msm_tag_byte: accessor for tag as four bytes array
+ */
+static u8 *i2c_msm_tag_byte(struct i2c_msm_tag *tag, int byte_n)
+{
+	return ((u8 *)tag) + byte_n;
+}
+
+/*
+ * i2c_msm_buf_to_ptr: translates a xfer buf to a pointer into the i2c_msg data
+ */
+static u8 *i2c_msm_buf_to_ptr(struct i2c_msm_xfer_buf *buf)
+{
+	struct i2c_msm_xfer *xfer =
+				container_of(buf, struct i2c_msm_xfer, cur_buf);
+	struct i2c_msg *msg = xfer->msgs + buf->msg_idx;
+	return msg->buf + buf->byte_idx;
+}
+
+/*
+ * tag_lookup_table[is_new_addr][is_last][is_rx]
+ * @is_new_addr Is start tag required? (which requires two more bytes.)
+ * @is_last     Use the XXXXX_N_STOP tag varient
+ * @is_rx       READ/WRITE
+ */
+static const struct i2c_msm_tag tag_lookup_table[2][2][2] = {
+	{{{QUP_TAG2_DATA_WRITE                                   , 2},
+	   {QUP_TAG2_DATA_READ                                   , 2} },
+	/* last buffer */
+	  {{QUP_TAG2_DATA_WRITE_N_STOP                            , 2},
+	   {QUP_TAG2_DATA_READ_N_STOP                             , 2} } } ,
+	/* new addr */
+	 {{{QUP_TAG2_START | (QUP_TAG2_DATA_WRITE           << 16), 4},
+	   {QUP_TAG2_START | (QUP_TAG2_DATA_READ            << 16), 4} },
+	/* last buffer + new addr */
+	  {{QUP_TAG2_START | (QUP_TAG2_DATA_WRITE_N_STOP    << 16), 4},
+	   {QUP_TAG2_START | (QUP_TAG2_DATA_READ_N_STOP     << 16), 4} } },
+};
+
+/*
+ * i2c_msm_tag_create: format a qup tag ver2
+ */
+static struct i2c_msm_tag i2c_msm_tag_create(bool is_new_addr, bool is_last_buf,
+					bool is_rx, u8 buf_len, u8 slave_addr)
+{
+	struct i2c_msm_tag tag;
+	/* Normalize booleans to 1 or 0 */
+	is_new_addr = is_new_addr ? 1 : 0;
+	is_last_buf = is_last_buf ? 1 : 0;
+	is_rx = is_rx ? 1 : 0;
+
+	tag = tag_lookup_table[is_new_addr][is_last_buf][is_rx];
+	/* fill in the non-const value: the address and the length */
+	if (tag.len == I2C_MSM_TAG2_MAX_LEN) {
+		*i2c_msm_tag_byte(&tag, 1) = slave_addr;
+		*i2c_msm_tag_byte(&tag, 3) = buf_len;
+	} else {
+		*i2c_msm_tag_byte(&tag, 1) = buf_len;
+	}
+
+	return tag;
+}
+
+static int
+i2c_msm_qup_state_wait_valid(struct i2c_msm_ctrl *ctrl,
+			enum i2c_msm_qup_state state, bool only_valid)
+{
+	u32 status;
+	void __iomem  *base     = ctrl->rsrcs.base;
+	int ret      = 0;
+	int read_cnt = 0;
+
+	do {
+		status = readl_relaxed(base + QUP_STATE);
+		++read_cnt;
+
+		/*
+		 * If only valid bit needs to be checked, requested state is
+		 * 'don't care'
+		 */
+		if (status & QUP_STATE_VALID) {
+			if (only_valid)
+				goto poll_valid_end;
+			else if ((state & QUP_I2C_MAST_GEN) &&
+					(status & QUP_I2C_MAST_GEN))
+				goto poll_valid_end;
+			else if ((status & QUP_STATE_MASK) == state)
+				goto poll_valid_end;
+		}
+
+		/*
+		 * Sleeping for 1-1.5 ms for every 100 iterations and break if
+		 * iterations crosses the 1500 marks allows roughly 10-15 msec
+		 * of time to get the core to valid state.
+		 */
+		if (!(read_cnt % 100))
+			usleep_range(1000, 1500);
+	} while (read_cnt <= 1500);
+
+	ret = -ETIMEDOUT;
+	dev_err(ctrl->dev,
+		"error timeout on polling for valid state. check core_clk\n");
+
+poll_valid_end:
+	if (!only_valid)
+		i2c_msm_prof_evnt_add(ctrl, MSM_DBG, I2C_MSM_VALID_END,
+				/* aggregate ret and state */
+				(((-ret) & 0xff) | ((state & 0xf) << 16)),
+				read_cnt, status);
+
+	return ret;
+}
+
+static int i2c_msm_qup_state_set(struct i2c_msm_ctrl *ctrl,
+						enum i2c_msm_qup_state state)
+{
+	if (i2c_msm_qup_state_wait_valid(ctrl, 0, true))
+		return -EIO;
+
+	writel_relaxed(state, ctrl->rsrcs.base + QUP_STATE);
+
+	if (i2c_msm_qup_state_wait_valid(ctrl, state, false))
+		return -EIO;
+
+	return 0;
+}
+
+static int i2c_msm_qup_sw_reset(struct i2c_msm_ctrl *ctrl)
+{
+	int ret;
+
+	writel_relaxed(1, ctrl->rsrcs.base + QUP_SW_RESET);
+	/*
+	 * Ensure that QUP that reset state is written before waiting for a the
+	 * reset state to be valid.
+	 */
+	wmb();
+	ret = i2c_msm_qup_state_wait_valid(ctrl, QUP_STATE_RESET, false);
+	if (ret) {
+		if (atomic_read(&ctrl->xfer.is_active))
+			ctrl->xfer.err = I2C_MSM_ERR_CORE_CLK;
+		dev_err(ctrl->dev, "error on issuing QUP software-reset\n");
+	}
+	return ret;
+}
+
+/*
+ * i2c_msm_qup_xfer_init_reset_state: setup QUP registers for the next run state
+ * @pre QUP must be in reset state.
+ * @pre xfer->mode_id is set to the chosen transfer state
+ * @post update values in QUP_MX_*_COUNT, QUP_CONFIG, QUP_IO_MODES,
+ *       and QUP_OPERATIONAL_MASK registers
+ */
+static void
+i2c_msm_qup_xfer_init_reset_state(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer *xfer = &ctrl->xfer;
+	void __iomem * const base = ctrl->rsrcs.base;
+	u32  mx_rd_cnt     = 0;
+	u32  mx_wr_cnt     = 0;
+	u32  mx_in_cnt     = 0;
+	u32  mx_out_cnt    = 0;
+	u32  no_input      = 0;
+	u32  no_output     = 0;
+	u32  input_mode    = i2c_msm_mode_to_reg_tbl[xfer->mode_id] << 12;
+	u32  output_mode   = i2c_msm_mode_to_reg_tbl[xfer->mode_id] << 10;
+	u32  config_reg;
+	u32  io_modes_reg;
+	u32  op_mask;
+	u32  rx_cnt = 0;
+	u32  tx_cnt = 0;
+	/*
+	 * DMA mode:
+	 * 1. QUP_MX_*_COUNT must be zero in all cases.
+	 * 2. both QUP_NO_INPUT and QUP_NO_OUPUT are unset.
+	 * FIFO mode:
+	 * 1. QUP_MX_INPUT_COUNT and QUP_MX_OUTPUT_COUNT are zero
+	 * 2. QUP_MX_READ_COUNT and QUP_MX_WRITE_COUNT reflect true count
+	 * 3. QUP_NO_INPUT and QUP_NO_OUPUT are set according to counts
+	 */
+	if (xfer->mode_id != I2C_MSM_XFER_MODE_DMA) {
+		rx_cnt   = xfer->rx_cnt + xfer->rx_ovrhd_cnt;
+		tx_cnt   = xfer->tx_cnt + xfer->tx_ovrhd_cnt;
+		no_input = rx_cnt  ? 0 : QUP_NO_INPUT;
+
+		switch (xfer->mode_id) {
+		case I2C_MSM_XFER_MODE_FIFO:
+			mx_rd_cnt  = rx_cnt;
+			mx_wr_cnt  = tx_cnt;
+			break;
+		case I2C_MSM_XFER_MODE_BLOCK:
+			mx_in_cnt  = rx_cnt;
+			mx_out_cnt = tx_cnt;
+			break;
+		default:
+			break;
+		}
+	}
+
+	/* init DMA/BLOCK modes counter */
+	writel_relaxed(mx_in_cnt,  base + QUP_MX_INPUT_COUNT);
+	writel_relaxed(mx_out_cnt, base + QUP_MX_OUTPUT_COUNT);
+
+	/* int FIFO mode counter */
+	writel_relaxed(mx_rd_cnt, base + QUP_MX_READ_COUNT);
+	writel_relaxed(mx_wr_cnt, base + QUP_MX_WRITE_COUNT);
+
+	/*
+	 * Set QUP mini-core to I2C tags ver-2
+	 * sets NO_INPUT / NO_OUTPUT as needed
+	 */
+	config_reg = readl_relaxed(base + QUP_CONFIG);
+	config_reg &=
+	      ~(QUP_NO_INPUT | QUP_NO_OUPUT | QUP_N_MASK | QUP_MINI_CORE_MASK);
+	config_reg |= (no_input | no_output | QUP_N_VAL |
+							QUP_MINI_CORE_I2C_VAL);
+	writel_relaxed(config_reg, base + QUP_CONFIG);
+
+	/*
+	 * Turns-on packing/unpacking
+	 * sets NO_INPUT / NO_OUTPUT as needed
+	 */
+	io_modes_reg = readl_relaxed(base + QUP_IO_MODES);
+	io_modes_reg &=
+	   ~(QUP_INPUT_MODE | QUP_OUTPUT_MODE | QUP_PACK_EN | QUP_UNPACK_EN
+	     | QUP_OUTPUT_BIT_SHIFT_EN);
+	io_modes_reg |=
+	   (input_mode | output_mode | QUP_PACK_EN | QUP_UNPACK_EN);
+	writel_relaxed(io_modes_reg, base + QUP_IO_MODES);
+
+	/*
+	 * mask INPUT and OUTPUT service flags in to prevent IRQs on FIFO status
+	 * change on DMA-mode transfers
+	 */
+	op_mask = (xfer->mode_id == I2C_MSM_XFER_MODE_DMA) ?
+		    (QUP_INPUT_SERVICE_MASK | QUP_OUTPUT_SERVICE_MASK) : 0 ;
+	writel_relaxed(op_mask, base + QUP_OPERATIONAL_MASK);
+	/* Ensure that QUP configuration is written before leaving this func */
+	wmb();
+}
+
+/*
+ * i2c_msm_clk_div_fld:
+ * @clk_freq_out output clock frequency
+ * @fs_div fs divider value
+ * @ht_div high time divider value
+ */
+struct i2c_msm_clk_div_fld {
+	u32                clk_freq_out;
+	u8                 fs_div;
+	u8                 ht_div;
+};
+
+/*
+ * divider values as per HW Designers
+ *
+ * formula is:
+ *
+ * f_sck = cxo_freq / (fs_div + ht_div + 6)
+ *
+ * where cxo_freq = 19.2 Mhz.
+ *
+ * previously hardcoded config seem to have fs_div being 2x ht_div.
+ *
+ * minimum frequency seems to be south of 50 khz, but not much below.
+ *
+ * See page 851-852 of MSM8998 Hardware Register Description.
+ */
+static struct i2c_msm_clk_div_fld i2c_msm_clk_div_map[] = {
+	{KHz(37), 255, 255},
+	{KHz(50), 189, 189},
+	{KHz(100), 93, 93},
+	{KHz(400),  28, 14},
+	{KHz(1000),  8,  5},
+};
+
+/*
+ * @return zero on success
+ * @fs_div when zero use value from table above, otherwise use given value
+ * @ht_div when zero use value from table above, otherwise use given value
+ *
+ * Format the value to be configured into the clock divider register. This
+ * register is configured every time core is moved from reset to run state.
+ */
+static int i2c_msm_set_mstr_clk_ctl(struct i2c_msm_ctrl *ctrl, int fs_div,
+			int ht_div, int noise_rjct_scl, int noise_rjct_sda)
+{
+	int ret = 0;
+	int i;
+	u32 reg_val = 0;
+	struct i2c_msm_clk_div_fld *itr = i2c_msm_clk_div_map;
+
+	/* set noise rejection values for scl and sda */
+	reg_val = I2C_MSM_SCL_NOISE_REJECTION(reg_val, noise_rjct_scl);
+	reg_val = I2C_MSM_SDA_NOISE_REJECTION(reg_val, noise_rjct_sda);
+
+	/*
+	 * find matching freq and set divider values unless they are forced
+	 * from parametr list
+	 */
+	for (i = 0; i < ARRAY_SIZE(i2c_msm_clk_div_map); ++i, ++itr) {
+		if (ctrl->rsrcs.clk_freq_out == itr->clk_freq_out) {
+			if (!fs_div)
+				fs_div = itr->fs_div;
+			if (!ht_div)
+				ht_div = itr->ht_div;
+			break;
+		}
+	}
+	if (!fs_div) {
+		dev_err(ctrl->dev, "For non-standard clock freq:%dKHz\n"
+		"clk divider value fs_div should be supply by client through\n"
+		"device tree\n", (ctrl->rsrcs.clk_freq_out / 1000));
+		return -EINVAL;
+	}
+
+	/* format values in clk-ctl cache */
+	ctrl->mstr_clk_ctl = (reg_val & (~0xff07ff)) | ((ht_div & 0xff) << 16)
+							|(fs_div & 0xff);
+
+	return ret;
+}
+
+/*
+ * i2c_msm_qup_xfer_init_run_state: set qup regs which must be set *after* reset
+ */
+static void i2c_msm_qup_xfer_init_run_state(struct i2c_msm_ctrl *ctrl)
+{
+	void __iomem *base = ctrl->rsrcs.base;
+	writel_relaxed(ctrl->mstr_clk_ctl, base + QUP_I2C_MASTER_CLK_CTL);
+
+	/* Ensure that QUP configuration is written before leaving this func */
+	wmb();
+
+	if (ctrl->dbgfs.dbg_lvl == MSM_DBG) {
+		dev_info(ctrl->dev,
+			"QUP state after programming for next transfers\n");
+		i2c_msm_dbg_qup_reg_dump(ctrl);
+	}
+}
+
+static void i2c_msm_fifo_wr_word(struct i2c_msm_ctrl *ctrl, u32 data)
+{
+	writel_relaxed(data, ctrl->rsrcs.base + QUP_OUT_FIFO_BASE);
+	i2c_msm_dbg(ctrl, MSM_DBG, "OUT-FIFO:0x%08x", data);
+}
+
+static u32 i2c_msm_fifo_rd_word(struct i2c_msm_ctrl *ctrl, u32 *data)
+{
+	u32 val;
+
+	val = readl_relaxed(ctrl->rsrcs.base + QUP_IN_FIFO_BASE);
+	i2c_msm_dbg(ctrl, MSM_DBG, "IN-FIFO :0x%08x", val);
+
+	if (data)
+		*data = val;
+
+	return val;
+}
+
+/*
+ * i2c_msm_fifo_wr_buf_flush:
+ */
+static void i2c_msm_fifo_wr_buf_flush(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer_mode_fifo *fifo = &ctrl->xfer.fifo;
+	u32 *word;
+
+	if (!fifo->out_buf_idx)
+		return;
+
+	word = (u32 *) fifo->out_buf;
+	i2c_msm_fifo_wr_word(ctrl, *word);
+	fifo->out_buf_idx = 0;
+	*word = 0;
+}
+
+/*
+ * i2c_msm_fifo_wr_buf:
+ *
+ * @len buf size (in bytes)
+ * @return number of bytes from buf which have been processed (written to
+ *         FIFO or kept in out buffer and will be written later)
+ */
+static size_t
+i2c_msm_fifo_wr_buf(struct i2c_msm_ctrl *ctrl, u8 *buf, size_t len)
+{
+	struct i2c_msm_xfer_mode_fifo *fifo = &ctrl->xfer.fifo;
+	int i;
+
+	for (i = 0 ; i < len; ++i, ++buf) {
+
+		fifo->out_buf[fifo->out_buf_idx] = *buf;
+		++fifo->out_buf_idx;
+
+		if (fifo->out_buf_idx == 4) {
+			u32 *word = (u32 *) fifo->out_buf;
+
+			i2c_msm_fifo_wr_word(ctrl, *word);
+			fifo->out_buf_idx = 0;
+			*word = 0;
+		}
+	}
+	return i;
+}
+
+static size_t i2c_msm_fifo_xfer_wr_tag(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer_buf *buf = &ctrl->xfer.cur_buf;
+	size_t len = 0;
+
+	if (ctrl->dbgfs.dbg_lvl >= MSM_DBG) {
+		char str[I2C_MSM_REG_2_STR_BUF_SZ];
+		dev_info(ctrl->dev, "tag.val:0x%llx tag.len:%d %s\n",
+			buf->out_tag.val, buf->out_tag.len,
+			i2c_msm_dbg_tag_to_str(&buf->out_tag, str,
+								sizeof(str)));
+	}
+
+	if (buf->out_tag.len) {
+		len = i2c_msm_fifo_wr_buf(ctrl, (u8 *) &buf->out_tag.val,
+							buf->out_tag.len);
+
+		if (len < buf->out_tag.len)
+			goto done;
+
+		buf->out_tag = (struct i2c_msm_tag) {0};
+	}
+done:
+	return len;
+}
+
+/*
+ * i2c_msm_fifo_read: reads up to fifo size into user's buf
+ */
+static void i2c_msm_fifo_read_xfer_buf(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer_buf *buf = &ctrl->xfer.cur_buf;
+	struct i2c_msg          *msg = ctrl->xfer.msgs + buf->msg_idx;
+	u8 *p_tag_val   = (u8 *) &buf->in_tag.val;
+	int buf_need_bc = msg->len - buf->byte_idx;
+	u8  word[4];
+	int copy_bc;
+	int word_idx;
+	int word_bc;
+
+	if (!buf->is_rx)
+		return;
+
+	while (buf_need_bc || buf->in_tag.len) {
+		i2c_msm_fifo_rd_word(ctrl, (u32 *) word);
+		word_bc  = sizeof(word);
+		word_idx = 0;
+
+		/*
+		 * copy bytes from fifo word to tag.
+		 * @note buf->in_tag.len (max 2bytes) < word_bc (4bytes)
+		 */
+		if (buf->in_tag.len) {
+			copy_bc = min_t(int, word_bc, buf->in_tag.len);
+
+			memcpy(p_tag_val + buf->in_tag.len, word, copy_bc);
+
+			word_idx        += copy_bc;
+			word_bc         -= copy_bc;
+			buf->in_tag.len -= copy_bc;
+
+			if ((ctrl->dbgfs.dbg_lvl >= MSM_DBG) &&
+							!buf->in_tag.len) {
+				char str[64];
+				dev_info(ctrl->dev, "%s\n",
+					i2c_msm_dbg_tag_to_str(&buf->in_tag,
+							str, sizeof(str)));
+			}
+		}
+
+		/* copy bytes from fifo word to user's buffer */
+		copy_bc = min_t(int, word_bc, buf_need_bc);
+		memcpy(msg->buf + buf->byte_idx, word + word_idx, copy_bc);
+
+		buf->byte_idx += copy_bc;
+		buf_need_bc   -= copy_bc;
+	}
+}
+
+/*
+ * i2c_msm_fifo_write_xfer_buf: write xfer.cur_buf (user's-buf + tag) to fifo
+ */
+static void i2c_msm_fifo_write_xfer_buf(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer_buf *buf  = &ctrl->xfer.cur_buf;
+	size_t len;
+	size_t tag_len;
+
+	tag_len = buf->out_tag.len;
+	len = i2c_msm_fifo_xfer_wr_tag(ctrl);
+	if (len < tag_len) {
+		dev_err(ctrl->dev, "error on writing tag to out FIFO\n");
+		return;
+	}
+
+	if (!buf->is_rx) {
+		if (ctrl->dbgfs.dbg_lvl >= MSM_DBG) {
+			char str[I2C_MSM_REG_2_STR_BUF_SZ];
+			int  offset = 0;
+			u8  *p      = i2c_msm_buf_to_ptr(buf);
+			int  i;
+
+			for (i = 0 ; i < len; ++i, ++p)
+				offset += snprintf(str + offset,
+						   sizeof(str) - offset,
+						   "0x%x ", *p);
+			dev_info(ctrl->dev, "data: %s\n", str);
+		}
+
+		len = i2c_msm_fifo_wr_buf(ctrl, i2c_msm_buf_to_ptr(buf),
+						buf->len);
+		if (len < buf->len)
+			dev_err(ctrl->dev, "error on xfering buf with FIFO\n");
+	}
+}
+
+/*
+ * i2c_msm_fifo_xfer_process:
+ *
+ * @pre    transfer size is less then or equal to fifo size.
+ * @pre    QUP in run state/pause
+ * @return zero on success
+ */
+static int i2c_msm_fifo_xfer_process(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer_buf first_buf = ctrl->xfer.cur_buf;
+	int ret;
+
+	/* load fifo while in pause state to avoid race conditions */
+	ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_PAUSE);
+	if (ret < 0)
+		return ret;
+
+	/* write all that goes to output fifo */
+	while (i2c_msm_xfer_next_buf(ctrl))
+		i2c_msm_fifo_write_xfer_buf(ctrl);
+
+	i2c_msm_fifo_wr_buf_flush(ctrl);
+
+	ctrl->xfer.cur_buf = first_buf;
+
+	ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RUN);
+	if (ret < 0)
+		return ret;
+
+	/* wait for input done interrupt */
+	ret = i2c_msm_xfer_wait_for_completion(ctrl, &ctrl->xfer.complete);
+	if (ret < 0)
+		return ret;
+
+	/* read all from input fifo */
+	while (i2c_msm_xfer_next_buf(ctrl))
+		i2c_msm_fifo_read_xfer_buf(ctrl);
+
+	return 0;
+}
+
+/*
+ * i2c_msm_fifo_xfer: process transfer using fifo mode
+ */
+static int i2c_msm_fifo_xfer(struct i2c_msm_ctrl *ctrl)
+{
+	int ret;
+
+	i2c_msm_dbg(ctrl, MSM_DBG, "Starting FIFO transfer");
+
+	ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RESET);
+	if (ret < 0)
+		return ret;
+
+	/* program qup registers */
+	i2c_msm_qup_xfer_init_reset_state(ctrl);
+
+	ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RUN);
+	if (ret < 0)
+		return ret;
+
+	/* program qup registers which must be set *after* reset */
+	i2c_msm_qup_xfer_init_run_state(ctrl);
+
+	ret = i2c_msm_fifo_xfer_process(ctrl);
+
+	return ret;
+}
+
+/*
+ * i2c_msm_blk_init_struct: Allocate memory and initialize blk structure
+ *
+ * @return 0 on success or error code
+ */
+static int i2c_msm_blk_init_struct(struct i2c_msm_ctrl *ctrl)
+{
+	u32 reg_data = readl_relaxed(ctrl->rsrcs.base + QUP_IO_MODES);
+	int ret;
+	struct i2c_msm_xfer_mode_blk *blk = &ctrl->xfer.blk;
+
+	blk->in_blk_sz  = i2c_msm_reg_io_modes_in_blk_sz(reg_data),
+	blk->out_blk_sz = i2c_msm_reg_io_modes_out_blk_sz(reg_data),
+
+	blk->tx_cache = kmalloc(blk->out_blk_sz, GFP_KERNEL);
+	if (!blk->tx_cache) {
+		dev_err(ctrl->dev,
+		"error on allocating memory for block tx_cache. malloc(size:%zu)\n",
+		 blk->out_blk_sz);
+		ret = -ENOMEM;
+		goto out_buf_err;
+	}
+
+	blk->rx_cache = kmalloc(blk->in_blk_sz, GFP_KERNEL);
+	if (!blk->tx_cache) {
+		dev_err(ctrl->dev,
+		"error on allocating memory for block tx_cache. malloc(size:%zu)\n",
+		 blk->out_blk_sz);
+		ret = -ENOMEM;
+		goto in_buf_err;
+	}
+
+	blk->is_init = true;
+	return 0;
+
+in_buf_err:
+	kfree(blk->tx_cache);
+out_buf_err:
+
+	return ret;
+}
+
+/*
+ * i2c_msm_blk_wr_flush: flushes internal cached block to FIFO
+ *
+ * @return 0 on success or error code
+ */
+static int i2c_msm_blk_wr_flush(struct i2c_msm_ctrl *ctrl)
+{
+	int byte_num;
+	int ret = 0;
+	struct i2c_msm_xfer_mode_blk *blk = &ctrl->xfer.blk;
+	u32 *buf_u32_ptr;
+
+	if (!blk->tx_cache_idx)
+		return 0;
+
+	/* if no blocks availble wait for interrupt */
+	ret = i2c_msm_xfer_wait_for_completion(ctrl, &blk->wait_tx_blk);
+	if (ret)
+		return ret;
+
+	/*
+	 * pause the controller until we finish loading the block in order to
+	 * avoid race conditions
+	 */
+	ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_PAUSE);
+	if (ret < 0)
+		return ret;
+	i2c_msm_dbg(ctrl, MSM_DBG, "OUT-BLK:%*phC", blk->tx_cache_idx,
+							blk->tx_cache);
+
+	for (byte_num = 0; byte_num < blk->tx_cache_idx;
+						byte_num += sizeof(u32)) {
+		buf_u32_ptr = (u32 *) (blk->tx_cache + byte_num);
+		writel_relaxed(*buf_u32_ptr,
+					ctrl->rsrcs.base + QUP_OUT_FIFO_BASE);
+		*buf_u32_ptr = 0;
+	}
+
+	/* now cache is empty */
+	blk->tx_cache_idx = 0;
+	ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RUN);
+	if (ret < 0)
+		return ret;
+
+	return ret;
+}
+
+/*
+ * i2c_msm_blk_wr_buf:
+ *
+ * @len buf size (in bytes)
+ * @return number of bytes from buf which have been processed (written to
+ *         FIFO or kept in out buffer and will be written later)
+ */
+static int
+i2c_msm_blk_wr_buf(struct i2c_msm_ctrl *ctrl, const u8 *buf, int len)
+{
+	struct i2c_msm_xfer_mode_blk *blk = &ctrl->xfer.blk;
+	int byte_num;
+	int ret = 0;
+
+	for (byte_num = 0; byte_num < len; ++byte_num, ++buf) {
+		blk->tx_cache[blk->tx_cache_idx] = *buf;
+		++blk->tx_cache_idx;
+
+		/* flush cached buffer to HW FIFO when full */
+		if (blk->tx_cache_idx == blk->out_blk_sz) {
+			ret = i2c_msm_blk_wr_flush(ctrl);
+			if (ret)
+				return ret;
+		}
+	}
+	return byte_num;
+}
+
+/*
+ * i2c_msm_blk_xfer_wr_tag: buffered writing the tag of current buf
+ * @return zero on success
+ */
+static int i2c_msm_blk_xfer_wr_tag(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer_buf *buf = &ctrl->xfer.cur_buf;
+	int len = 0;
+	if (!buf->out_tag.len)
+		return 0;
+
+	len = i2c_msm_blk_wr_buf(ctrl, (u8 *) &buf->out_tag.val,
+							buf->out_tag.len);
+	if (len != buf->out_tag.len)
+		return -EFAULT;
+
+	buf->out_tag = (struct i2c_msm_tag) {0};
+	return 0;
+}
+
+/*
+ * i2c_msm_blk_wr_xfer_buf: writes ctrl->xfer.cur_buf to HW
+ *
+ * @return zero on success
+ */
+static int i2c_msm_blk_wr_xfer_buf(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer_buf *buf  = &ctrl->xfer.cur_buf;
+	int len;
+	int ret;
+	ret = i2c_msm_blk_xfer_wr_tag(ctrl);
+	if (ret)
+		return ret;
+
+	len = i2c_msm_blk_wr_buf(ctrl, i2c_msm_buf_to_ptr(buf), buf->len);
+	if (len < buf->len)
+		return -EFAULT;
+
+	buf->byte_idx += len;
+	return 0;
+}
+
+/*
+ * i2c_msm_blk_rd_blk: read a block from HW FIFO to internal cache
+ *
+ * @return number of bytes read or negative error value
+ * @need_bc number of bytes that we need
+ *
+ * uses internal counter to keep track of number of available blocks. When
+ * zero, waits for interrupt.
+ */
+static int i2c_msm_blk_rd_blk(struct i2c_msm_ctrl *ctrl, int need_bc)
+{
+	int byte_num;
+	int ret = 0;
+	struct i2c_msm_xfer_mode_blk *blk = &ctrl->xfer.blk;
+	u32 *cache_ptr = (u32 *) blk->rx_cache;
+	int read_bc    = min_t(int, blk->in_blk_sz, need_bc);
+
+	/* wait for block avialble interrupt */
+	ret = i2c_msm_xfer_wait_for_completion(ctrl, &blk->wait_rx_blk);
+	if (ret)
+		return ret;
+
+	/* Read block from HW to cache */
+	for (byte_num = 0; byte_num < blk->in_blk_sz;
+					byte_num += sizeof(u32)) {
+		if (byte_num < read_bc) {
+			*cache_ptr = readl_relaxed(ctrl->rsrcs.base +
+							QUP_IN_FIFO_BASE);
+			++cache_ptr;
+		}
+	}
+	blk->rx_cache_idx = 0;
+	return read_bc;
+}
+
+/*
+ * i2c_msm_blk_rd_xfer_buf: fill in ctrl->xfer.cur_buf from HW
+ *
+ * @return zero on success
+ */
+static int i2c_msm_blk_rd_xfer_buf(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer_mode_blk *blk = &ctrl->xfer.blk;
+	struct i2c_msm_xfer_buf *buf      = &ctrl->xfer.cur_buf;
+	struct i2c_msg *msg               = ctrl->xfer.msgs + buf->msg_idx;
+	int    copy_bc;         /* number of bytes to copy to user's buffer */
+	int    cache_avail_bc;
+	int    ret = 0;
+
+	/* write tag to out FIFO */
+	ret = i2c_msm_blk_xfer_wr_tag(ctrl);
+	if (ret)
+		return ret;
+	i2c_msm_blk_wr_flush(ctrl);
+
+	while (buf->len || buf->in_tag.len) {
+		cache_avail_bc = i2c_msm_blk_rd_blk(ctrl,
+						buf->len + buf->in_tag.len);
+
+		i2c_msm_dbg(ctrl, MSM_DBG, "IN-BLK:%*phC\n", cache_avail_bc,
+					blk->rx_cache + blk->rx_cache_idx);
+
+		if (cache_avail_bc < 0)
+			return cache_avail_bc;
+
+		/* discard tag from input FIFO */
+		if (buf->in_tag.len) {
+			int discard_bc = min_t(int, cache_avail_bc,
+							buf->in_tag.len);
+			blk->rx_cache_idx += discard_bc;
+			buf->in_tag.len   -= discard_bc;
+			cache_avail_bc    -= discard_bc;
+		}
+
+		/* copy bytes from cached block to user's buffer */
+		copy_bc = min_t(int, cache_avail_bc, buf->len);
+		memcpy(msg->buf + buf->byte_idx,
+			blk->rx_cache + blk->rx_cache_idx, copy_bc);
+
+		blk->rx_cache_idx += copy_bc;
+		buf->len          -= copy_bc;
+		buf->byte_idx     += copy_bc;
+	}
+	return ret;
+}
+
+/*
+ * i2c_msm_blk_xfer: process transfer using block mode
+ */
+static int i2c_msm_blk_xfer(struct i2c_msm_ctrl *ctrl)
+{
+	int ret = 0;
+	struct i2c_msm_xfer_buf      *buf = &ctrl->xfer.cur_buf;
+	struct i2c_msm_xfer_mode_blk *blk = &ctrl->xfer.blk;
+
+	if (!blk->is_init) {
+		ret = i2c_msm_blk_init_struct(ctrl);
+		if (!blk->is_init)
+			return ret;
+	}
+
+	init_completion(&blk->wait_rx_blk);
+	init_completion(&blk->wait_tx_blk);
+
+	/* tx_cnt > 0 always */
+	blk->complete_mask = QUP_MAX_OUTPUT_DONE_FLAG;
+	if (ctrl->xfer.rx_cnt)
+		blk->complete_mask |= QUP_MAX_INPUT_DONE_FLAG;
+
+	/* initialize block mode for new transfer */
+	blk->tx_cache_idx = 0;
+	blk->rx_cache_idx = 0;
+
+	ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RESET);
+	if (ret < 0)
+		return ret;
+
+	/* program qup registers */
+	i2c_msm_qup_xfer_init_reset_state(ctrl);
+
+	ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RUN);
+	if (ret < 0)
+		return ret;
+
+	/* program qup registers which must be set *after* reset */
+	i2c_msm_qup_xfer_init_run_state(ctrl);
+
+	while (i2c_msm_xfer_next_buf(ctrl)) {
+		if (buf->is_rx) {
+			ret = i2c_msm_blk_rd_xfer_buf(ctrl);
+			if (ret)
+				return ret;
+			/*
+			* SW workaround to wait for extra interrupt from
+			* hardware for last block in block mode for read
+			*/
+			if (buf->is_last) {
+				ret = i2c_msm_xfer_wait_for_completion(ctrl,
+							&blk->wait_rx_blk);
+				if (!ret)
+					complete(&ctrl->xfer.complete);
+			}
+		} else {
+			ret = i2c_msm_blk_wr_xfer_buf(ctrl);
+			if (ret)
+				return ret;
+		}
+	}
+	i2c_msm_blk_wr_flush(ctrl);
+	return i2c_msm_xfer_wait_for_completion(ctrl, &ctrl->xfer.complete);
+}
+
+/*
+ * i2c_msm_dma_xfer_prepare: map DMA buffers, and create tags.
+ * @return zero on success or negative error value
+ */
+static int i2c_msm_dma_xfer_prepare(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer_mode_dma *dma  = &ctrl->xfer.dma;
+	struct i2c_msm_xfer_buf      *buf  = &ctrl->xfer.cur_buf;
+	struct i2c_msm_dma_chan      *tx = &dma->chan[I2C_MSM_DMA_TX];
+	struct i2c_msm_dma_chan      *rx = &dma->chan[I2C_MSM_DMA_RX];
+	struct i2c_msm_dma_buf *dma_buf;
+	int                     rem_buf_cnt = I2C_MSM_DMA_DESC_ARR_SIZ;
+	struct i2c_msg         *cur_msg;
+	enum dma_data_direction buf_dma_dirctn;
+	struct i2c_msm_dma_mem  data;
+	u8        *tag_arr_itr_vrtl_addr;
+	dma_addr_t tag_arr_itr_phy_addr;
+
+	tx->desc_cnt_cur    = 0;
+	rx->desc_cnt_cur    = 0;
+	dma->buf_arr_cnt      = 0;
+	dma_buf               = dma->buf_arr;
+	tag_arr_itr_vrtl_addr = ((u8 *) dma->tag_arr.vrtl_addr);
+	tag_arr_itr_phy_addr  = dma->tag_arr.phy_addr;
+
+	for (; i2c_msm_xfer_next_buf(ctrl) && rem_buf_cnt;
+		++dma_buf,
+		tag_arr_itr_phy_addr  += sizeof(dma_addr_t),
+		tag_arr_itr_vrtl_addr += sizeof(dma_addr_t)) {
+
+		/* dma-map the client's message */
+		cur_msg        = ctrl->xfer.msgs + buf->msg_idx;
+		data.vrtl_addr = cur_msg->buf + buf->byte_idx;
+		if (buf->is_rx) {
+			buf_dma_dirctn  = DMA_FROM_DEVICE;
+			rx->desc_cnt_cur += 2; /* msg + tag */
+			tx->desc_cnt_cur += 1; /* tag */
+		} else {
+			buf_dma_dirctn  = DMA_TO_DEVICE;
+			tx->desc_cnt_cur += 2; /* msg + tag */
+		}
+
+		/* for last buffer in a transfer msg */
+		if (buf->is_last) {
+			/* add ovrhead byte cnt for tags specific to DMA mode */
+			ctrl->xfer.rx_ovrhd_cnt += 2; /* EOT+FLUSH_STOP tags*/
+			ctrl->xfer.tx_ovrhd_cnt += 2; /* EOT+FLUSH_STOP tags */
+
+			/* increment rx desc cnt to read off tags and
+			 * increment tx desc cnt to queue EOT+FLUSH_STOP tags */
+			tx->desc_cnt_cur++;
+			rx->desc_cnt_cur++;
+		}
+
+		if ((rx->desc_cnt_cur >= I2C_MSM_DMA_RX_SZ) ||
+		    (tx->desc_cnt_cur >= I2C_MSM_DMA_TX_SZ))
+			return -ENOMEM;
+
+		data.phy_addr = dma_map_single(ctrl->dev, data.vrtl_addr,
+						buf->len, buf_dma_dirctn);
+
+		if (dma_mapping_error(ctrl->dev, data.phy_addr)) {
+			dev_err(ctrl->dev,
+			  "error DMA mapping DMA buffers, err:%lld buf_vrtl:0x%p data_len:%d dma_dir:%s\n",
+			  (u64) data.phy_addr, data.vrtl_addr, buf->len,
+			  ((buf_dma_dirctn == DMA_FROM_DEVICE)
+				? "DMA_FROM_DEVICE" : "DMA_TO_DEVICE"));
+			return -EFAULT;
+		}
+
+		/* copy 8 bytes. Only tag.len bytes will be used */
+		*((u64 *)tag_arr_itr_vrtl_addr) =  buf->out_tag.val;
+
+		i2c_msm_dbg(ctrl, MSM_DBG,
+			"vrtl:0x%p phy:0x%llx val:0x%llx sizeof(dma_addr_t):%zu",
+			tag_arr_itr_vrtl_addr, (u64) tag_arr_itr_phy_addr,
+			*((u64 *)tag_arr_itr_vrtl_addr), sizeof(dma_addr_t));
+
+		/*
+		 * create dma buf, in the dma buf arr, based on the buf created
+		 * by i2c_msm_xfer_next_buf()
+		 */
+		*dma_buf = (struct i2c_msm_dma_buf) {
+			.ptr      = data,
+			.len      = buf->len,
+			.dma_dir  = buf_dma_dirctn,
+			.is_rx    = buf->is_rx,
+			.is_last  = buf->is_last,
+			.tag      = (struct i2c_msm_dma_tag) {
+				.buf = tag_arr_itr_phy_addr,
+				.len = buf->out_tag.len,
+			},
+		};
+		++dma->buf_arr_cnt;
+		--rem_buf_cnt;
+	}
+	return 0;
+}
+
+/*
+ * i2c_msm_dma_xfer_unprepare: DAM unmap buffers.
+ */
+static void i2c_msm_dma_xfer_unprepare(struct i2c_msm_ctrl *ctrl)
+{
+	int i;
+	struct i2c_msm_dma_buf *buf_itr = ctrl->xfer.dma.buf_arr;
+
+	for (i = 0 ; i < ctrl->xfer.dma.buf_arr_cnt ; ++i, ++buf_itr)
+		dma_unmap_single(ctrl->dev, buf_itr->ptr.phy_addr, buf_itr->len,
+							buf_itr->dma_dir);
+}
+
+static void i2c_msm_dma_callback_tx_complete(void *dma_async_param)
+{
+	struct i2c_msm_ctrl *ctrl = dma_async_param;
+
+	complete(&ctrl->xfer.complete);
+}
+
+static void i2c_msm_dma_callback_rx_complete(void *dma_async_param)
+{
+	struct i2c_msm_ctrl *ctrl = dma_async_param;
+
+	complete(&ctrl->xfer.rx_complete);
+}
+
+/*
+ * i2c_msm_dma_xfer_process: Queue transfers to DMA
+ * @pre 1)QUP is in run state. 2) i2c_msm_dma_xfer_prepare() was called.
+ * @return zero on success or negative error value
+ */
+static int i2c_msm_dma_xfer_process(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer_mode_dma *dma = &ctrl->xfer.dma;
+	struct i2c_msm_dma_chan *tx       = &dma->chan[I2C_MSM_DMA_TX];
+	struct i2c_msm_dma_chan *rx       = &dma->chan[I2C_MSM_DMA_RX];
+	struct scatterlist *sg_rx         = NULL;
+	struct scatterlist *sg_rx_itr     = NULL;
+	struct scatterlist *sg_tx         = NULL;
+	struct scatterlist *sg_tx_itr     = NULL;
+	struct dma_async_tx_descriptor     *dma_desc_rx;
+	struct dma_async_tx_descriptor     *dma_desc_tx;
+	struct i2c_msm_dma_buf             *buf_itr;
+	int  i;
+	int  ret = 0;
+
+	i2c_msm_dbg(ctrl, MSM_DBG, "Going to enqueue %zu buffers in DMA",
+							dma->buf_arr_cnt);
+
+	/* Set the QUP State to pause while DMA completes the txn */
+	ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_PAUSE);
+	if (ret) {
+		dev_err(ctrl->dev, "transition to pause state failed before DMA transaction :%d\n",
+									ret);
+		return ret;
+	}
+
+	sg_tx = kzalloc(sizeof(struct scatterlist) * tx->desc_cnt_cur,
+								GFP_KERNEL);
+	if (!sg_tx) {
+		ret = -ENOMEM;
+		goto dma_xfer_end;
+	}
+	sg_init_table(sg_tx, tx->desc_cnt_cur);
+	sg_tx_itr = sg_tx;
+
+	sg_rx = kzalloc(sizeof(struct scatterlist) * rx->desc_cnt_cur,
+								GFP_KERNEL);
+	if (!sg_rx) {
+		ret = -ENOMEM;
+		goto dma_xfer_end;
+	}
+	sg_init_table(sg_rx, rx->desc_cnt_cur);
+	sg_rx_itr = sg_rx;
+
+	buf_itr = dma->buf_arr;
+
+	for (i = 0; i < dma->buf_arr_cnt ; ++i, ++buf_itr) {
+		/* Queue tag */
+		sg_dma_address(sg_tx_itr) = buf_itr->tag.buf;
+		sg_dma_len(sg_tx_itr) = buf_itr->tag.len;
+		++sg_tx_itr;
+
+		/* read off tag + len bytes(don't care) in input FIFO
+		 * on read transfer
+		 */
+		if (buf_itr->is_rx) {
+			/* rid of input tag */
+			sg_dma_address(sg_rx_itr) =
+					ctrl->xfer.dma.input_tag.phy_addr;
+			sg_dma_len(sg_rx_itr)     = QUP_BUF_OVERHD_BC;
+			++sg_rx_itr;
+
+			/* queue data buffer */
+			sg_dma_address(sg_rx_itr) = buf_itr->ptr.phy_addr;
+			sg_dma_len(sg_rx_itr)     = buf_itr->len;
+			++sg_rx_itr;
+		} else {
+			sg_dma_address(sg_tx_itr) = buf_itr->ptr.phy_addr;
+			sg_dma_len(sg_tx_itr)     = buf_itr->len;
+			++sg_tx_itr;
+		}
+	}
+
+	/* this tag will be copied to rx fifo */
+	sg_dma_address(sg_tx_itr) = dma->eot_n_flush_stop_tags.phy_addr;
+	sg_dma_len(sg_tx_itr)     = QUP_BUF_OVERHD_BC;
+	++sg_tx_itr;
+
+	/*
+	 * Reading the tag off the input fifo has side effects and
+	 * it is mandatory for getting the DMA's interrupt.
+	 */
+	sg_dma_address(sg_rx_itr) = ctrl->xfer.dma.input_tag.phy_addr;
+	sg_dma_len(sg_rx_itr)     = QUP_BUF_OVERHD_BC;
+	++sg_rx_itr;
+
+	/*
+	 * We only want a single BAM interrupt per transfer, and we always
+	 * add a flush-stop i2c tag as the last tx sg entry. Since the dma
+	 * driver puts the supplied BAM flags only on the last BAM descriptor,
+	 * the flush stop will always be the one which generate that interrupt
+	 * and invokes the callback.
+	 */
+	dma_desc_tx = dmaengine_prep_slave_sg(tx->dma_chan,
+						sg_tx,
+						sg_tx_itr - sg_tx,
+						tx->dir,
+						(SPS_IOVEC_FLAG_EOT |
+							SPS_IOVEC_FLAG_NWD));
+	if (dma_desc_tx < 0) {
+		dev_err(ctrl->dev, "error dmaengine_prep_slave_sg tx:%ld\n",
+							PTR_ERR(dma_desc_tx));
+		ret = PTR_ERR(dma_desc_tx);
+		goto dma_xfer_end;
+	}
+
+	/* callback defined for tx dma desc */
+	dma_desc_tx->callback       = i2c_msm_dma_callback_tx_complete;
+	dma_desc_tx->callback_param = ctrl;
+	dmaengine_submit(dma_desc_tx);
+	dma_async_issue_pending(tx->dma_chan);
+
+	/* queue the rx dma desc */
+	dma_desc_rx = dmaengine_prep_slave_sg(rx->dma_chan, sg_rx,
+					sg_rx_itr - sg_rx, rx->dir,
+					(SPS_IOVEC_FLAG_EOT |
+							SPS_IOVEC_FLAG_NWD));
+	if (dma_desc_rx < 0) {
+		dev_err(ctrl->dev,
+			"error dmaengine_prep_slave_sg rx:%ld\n",
+						PTR_ERR(dma_desc_rx));
+		ret = PTR_ERR(dma_desc_rx);
+		goto dma_xfer_end;
+	}
+
+	dma_desc_rx->callback       = i2c_msm_dma_callback_rx_complete;
+	dma_desc_rx->callback_param = ctrl;
+	dmaengine_submit(dma_desc_rx);
+	dma_async_issue_pending(rx->dma_chan);
+
+	/* Set the QUP State to Run when completes the txn */
+	ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RUN);
+	if (ret) {
+		dev_err(ctrl->dev, "transition to run state failed before DMA transaction :%d\n",
+									ret);
+		goto dma_xfer_end;
+	}
+
+	ret = i2c_msm_xfer_wait_for_completion(ctrl, &ctrl->xfer.complete);
+	if (!ret && ctrl->xfer.rx_cnt)
+		ret = i2c_msm_xfer_wait_for_completion(ctrl,
+						&ctrl->xfer.rx_complete);
+
+dma_xfer_end:
+	/* free scatter-gather lists */
+	kfree(sg_tx);
+	kfree(sg_rx);
+
+	return ret;
+}
+
+static void i2c_msm_dma_free_channels(struct i2c_msm_ctrl *ctrl)
+{
+	int i;
+	for (i = 0; i < I2C_MSM_DMA_CNT; ++i) {
+		struct i2c_msm_dma_chan *chan = &ctrl->xfer.dma.chan[i];
+		if (!chan->is_init)
+			continue;
+
+		dma_release_channel(chan->dma_chan);
+		chan->is_init  = false;
+		chan->dma_chan = NULL;
+	}
+	if (ctrl->xfer.dma.state > I2C_MSM_DMA_INIT_CORE)
+		ctrl->xfer.dma.state = I2C_MSM_DMA_INIT_CORE;
+}
+
+static const char * const i2c_msm_dma_chan_name[] = {"tx", "rx"};
+
+static int i2c_msm_dmaengine_dir[] = {
+	DMA_MEM_TO_DEV, DMA_DEV_TO_MEM
+};
+
+static int i2c_msm_dma_init_channels(struct i2c_msm_ctrl *ctrl)
+{
+	int ret = 0;
+	int i;
+	/* Iterate over the dma channels to initialize them */
+	for (i = 0; i < I2C_MSM_DMA_CNT; ++i) {
+		struct dma_slave_config cfg = {0};
+		struct i2c_msm_dma_chan *chan = &ctrl->xfer.dma.chan[i];
+		if (chan->is_init)
+			continue;
+
+		chan->name     = i2c_msm_dma_chan_name[i];
+		chan->dma_chan = dma_request_slave_channel(ctrl->dev,
+								chan->name);
+		if (!chan->dma_chan) {
+			dev_err(ctrl->dev,
+				"error dma_request_slave_channel(dev:%s chan:%s)\n",
+				dev_name(ctrl->dev), chan->name);
+			/* free the channels if allocated before */
+			i2c_msm_dma_free_channels(ctrl);
+			return -ENODEV;
+		}
+
+		chan->dir = cfg.direction = i2c_msm_dmaengine_dir[i];
+		ret = dmaengine_slave_config(chan->dma_chan, &cfg);
+		if (ret) {
+			dev_err(ctrl->dev,
+			"error:%d dmaengine_slave_config(chan:%s)\n",
+						ret, chan->name);
+			dma_release_channel(chan->dma_chan);
+			chan->dma_chan = NULL;
+			i2c_msm_dma_free_channels(ctrl);
+			return ret;
+		}
+		chan->is_init = true;
+	}
+	ctrl->xfer.dma.state = I2C_MSM_DMA_INIT_CHAN;
+	return 0;
+}
+
+static void i2c_msm_dma_teardown(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer_mode_dma *dma = &ctrl->xfer.dma;
+
+	i2c_msm_dma_free_channels(ctrl);
+
+	if (dma->state > I2C_MSM_DMA_INIT_NONE)
+		dma_free_coherent(ctrl->dev, I2C_MSM_DMA_TAG_MEM_SZ,
+				  dma->input_tag.vrtl_addr,
+				  dma->input_tag.phy_addr);
+
+	dma->state = I2C_MSM_DMA_INIT_NONE;
+}
+
+static int i2c_msm_dma_init(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer_mode_dma *dma = &ctrl->xfer.dma;
+	u8             *tags_space_virt_addr;
+	dma_addr_t      tags_space_phy_addr;
+
+	/* check if DMA core is initialized */
+	if (dma->state > I2C_MSM_DMA_INIT_NONE)
+		goto dma_core_is_init;
+
+	/*
+	 * allocate dma memory for input_tag + eot_n_flush_stop_tags + tag_arr
+	 * for more see: I2C_MSM_DMA_TAG_MEM_SZ definition
+	 */
+	tags_space_virt_addr = dma_alloc_coherent(
+						ctrl->dev,
+						I2C_MSM_DMA_TAG_MEM_SZ,
+						&tags_space_phy_addr,
+						GFP_KERNEL);
+	if (!tags_space_virt_addr) {
+		dev_err(ctrl->dev,
+		  "error alloc %d bytes of DMAable memory for DMA tags space\n",
+		  I2C_MSM_DMA_TAG_MEM_SZ);
+		return -ENOMEM;
+	}
+
+	/*
+	 * set the dma-tags virtual and physical addresses:
+	 * 1) the first tag space is for the input (throw away) tag
+	 */
+	dma->input_tag.vrtl_addr  = tags_space_virt_addr;
+	dma->input_tag.phy_addr   = tags_space_phy_addr;
+
+	/* 2) second tag space is for eot_flush_stop tag which is const value */
+	tags_space_virt_addr += I2C_MSM_TAG2_MAX_LEN;
+	tags_space_phy_addr  += I2C_MSM_TAG2_MAX_LEN;
+	dma->eot_n_flush_stop_tags.vrtl_addr = tags_space_virt_addr;
+	dma->eot_n_flush_stop_tags.phy_addr  = tags_space_phy_addr;
+
+	/* set eot_n_flush_stop_tags value */
+	*((u16 *) dma->eot_n_flush_stop_tags.vrtl_addr) =
+				QUP_TAG2_INPUT_EOT | (QUP_TAG2_FLUSH_STOP << 8);
+
+	/* 3) all other tag spaces are used for transfer tags */
+	tags_space_virt_addr  += I2C_MSM_TAG2_MAX_LEN;
+	tags_space_phy_addr   += I2C_MSM_TAG2_MAX_LEN;
+	dma->tag_arr.vrtl_addr = tags_space_virt_addr;
+	dma->tag_arr.phy_addr  = tags_space_phy_addr;
+
+	dma->state = I2C_MSM_DMA_INIT_CORE;
+
+dma_core_is_init:
+	return i2c_msm_dma_init_channels(ctrl);
+}
+
+static int i2c_msm_dma_xfer(struct i2c_msm_ctrl *ctrl)
+{
+	int ret;
+	ret = i2c_msm_dma_init(ctrl);
+	if (ret) {
+		dev_err(ctrl->dev, "DMA Init Failed: %d\n", ret);
+		return ret;
+	}
+
+	/* dma map user's buffers and create tags */
+	ret = i2c_msm_dma_xfer_prepare(ctrl);
+	if (ret < 0) {
+		dev_err(ctrl->dev, "error on i2c_msm_dma_xfer_prepare():%d\n",
+									ret);
+		goto err_dma_xfer;
+	}
+
+	ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RESET);
+	if (ret < 0)
+		goto err_dma_xfer;
+
+	/* program qup registers */
+	i2c_msm_qup_xfer_init_reset_state(ctrl);
+
+	ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RUN);
+	if (ret < 0)
+		goto err_dma_xfer;
+
+	/* program qup registers which must be set *after* reset */
+	i2c_msm_qup_xfer_init_run_state(ctrl);
+
+	/* enqueue transfer buffers */
+	ret = i2c_msm_dma_xfer_process(ctrl);
+	if (ret)
+		dev_err(ctrl->dev,
+			"error i2c_msm_dma_xfer_process(n_bufs:%zu):%d\n",
+			ctrl->xfer.dma.buf_arr_cnt, ret);
+
+err_dma_xfer:
+	i2c_msm_dma_xfer_unprepare(ctrl);
+	return ret;
+}
+
+/*
+ * i2c_msm_qup_slv_holds_bus: true when slave hold the SDA low
+ */
+static bool i2c_msm_qup_slv_holds_bus(struct i2c_msm_ctrl *ctrl)
+{
+	u32 status = readl_relaxed(ctrl->rsrcs.base + QUP_I2C_STATUS);
+
+	bool slv_holds_bus =	!(status & QUP_I2C_SDA) &&
+				(status & QUP_BUS_ACTIVE) &&
+				!(status & QUP_BUS_MASTER);
+	if (slv_holds_bus)
+		dev_info(ctrl->dev,
+			"bus lines held low by a slave detected\n");
+
+	return slv_holds_bus;
+}
+
+/*
+ * i2c_msm_qup_poll_bus_active_unset: poll until QUP_BUS_ACTIVE is unset
+ *
+ * @return zero when bus inactive, or nonzero on timeout.
+ *
+ * Loop and reads QUP_I2C_MASTER_STATUS until bus is inactive or timeout
+ * reached. Used to avoid race condition due to gap between QUP completion
+ * interrupt and QUP issuing stop signal on the bus.
+ */
+static int i2c_msm_qup_poll_bus_active_unset(struct i2c_msm_ctrl *ctrl)
+{
+	void __iomem *base    = ctrl->rsrcs.base;
+	ulong timeout = jiffies + msecs_to_jiffies(I2C_MSM_MAX_POLL_MSEC);
+	int    ret      = 0;
+	size_t read_cnt = 0;
+
+	do {
+		if (!(readl_relaxed(base + QUP_I2C_STATUS) & QUP_BUS_ACTIVE))
+			goto poll_active_end;
+		++read_cnt;
+	} while (time_before_eq(jiffies, timeout));
+
+	ret = -EBUSY;
+
+poll_active_end:
+	/* second logged value is time-left before timeout or zero if expired */
+	i2c_msm_prof_evnt_add(ctrl, MSM_DBG, I2C_MSM_ACTV_END,
+				ret, (ret ? 0 : (timeout - jiffies)), read_cnt);
+
+	return ret;
+}
+
+static void i2c_msm_clk_path_vote(struct i2c_msm_ctrl *ctrl)
+{
+	i2c_msm_clk_path_init(ctrl);
+
+	if (ctrl->rsrcs.clk_path_vote.client_hdl)
+		msm_bus_scale_client_update_request(
+					ctrl->rsrcs.clk_path_vote.client_hdl,
+					I2C_MSM_CLK_PATH_RESUME_VEC);
+}
+
+static void i2c_msm_clk_path_unvote(struct i2c_msm_ctrl *ctrl)
+{
+	if (ctrl->rsrcs.clk_path_vote.client_hdl)
+		msm_bus_scale_client_update_request(
+					ctrl->rsrcs.clk_path_vote.client_hdl,
+					I2C_MSM_CLK_PATH_SUSPEND_VEC);
+}
+
+static void i2c_msm_clk_path_teardown(struct i2c_msm_ctrl *ctrl)
+{
+	if (ctrl->rsrcs.clk_path_vote.client_hdl) {
+		msm_bus_scale_unregister_client(
+					ctrl->rsrcs.clk_path_vote.client_hdl);
+		ctrl->rsrcs.clk_path_vote.client_hdl = 0;
+	}
+}
+
+/*
+ * i2c_msm_clk_path_init_structs: internal impl detail of i2c_msm_clk_path_init
+ *
+ * allocates and initilizes the bus scaling vectors.
+ */
+static int i2c_msm_clk_path_init_structs(struct i2c_msm_ctrl *ctrl)
+{
+	struct msm_bus_vectors *paths    = NULL;
+	struct msm_bus_paths   *usecases = NULL;
+
+	i2c_msm_dbg(ctrl, MSM_PROF, "initializes path clock voting structs");
+
+	paths = devm_kzalloc(ctrl->dev, sizeof(*paths) * 2, GFP_KERNEL);
+	if (!paths) {
+		dev_err(ctrl->dev,
+			"error msm_bus_paths.paths memory allocation failed\n");
+		return -ENOMEM;
+	}
+
+	usecases = devm_kzalloc(ctrl->dev, sizeof(*usecases) * 2, GFP_KERNEL);
+	if (!usecases) {
+		dev_err(ctrl->dev,
+		"error  msm_bus_scale_pdata.usecases memory allocation failed\n");
+		goto path_init_err;
+	}
+
+	ctrl->rsrcs.clk_path_vote.pdata = devm_kzalloc(ctrl->dev,
+				       sizeof(*ctrl->rsrcs.clk_path_vote.pdata),
+				       GFP_KERNEL);
+	if (!ctrl->rsrcs.clk_path_vote.pdata) {
+		dev_err(ctrl->dev,
+			"error  msm_bus_scale_pdata memory allocation failed\n");
+		goto path_init_err;
+	}
+
+	paths[I2C_MSM_CLK_PATH_SUSPEND_VEC] = (struct msm_bus_vectors) {
+		.src = ctrl->rsrcs.clk_path_vote.mstr_id,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 0,
+		.ib  = 0,
+	};
+
+	paths[I2C_MSM_CLK_PATH_RESUME_VEC]  = (struct msm_bus_vectors) {
+		.src = ctrl->rsrcs.clk_path_vote.mstr_id,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = I2C_MSM_CLK_PATH_AVRG_BW(ctrl),
+		.ib  = I2C_MSM_CLK_PATH_BRST_BW(ctrl),
+	};
+
+	usecases[I2C_MSM_CLK_PATH_SUSPEND_VEC] = (struct msm_bus_paths) {
+		.num_paths = 1,
+		.vectors   = &paths[I2C_MSM_CLK_PATH_SUSPEND_VEC],
+	};
+
+	usecases[I2C_MSM_CLK_PATH_RESUME_VEC] = (struct msm_bus_paths) {
+		.num_paths = 1,
+		.vectors   = &paths[I2C_MSM_CLK_PATH_RESUME_VEC],
+	};
+
+	*ctrl->rsrcs.clk_path_vote.pdata = (struct msm_bus_scale_pdata) {
+		.usecase      = usecases,
+		.num_usecases = 2,
+		.name         = dev_name(ctrl->dev),
+	};
+
+	return 0;
+
+path_init_err:
+	devm_kfree(ctrl->dev, paths);
+	devm_kfree(ctrl->dev, usecases);
+	devm_kfree(ctrl->dev, ctrl->rsrcs.clk_path_vote.pdata);
+	ctrl->rsrcs.clk_path_vote.pdata = NULL;
+	return -ENOMEM;
+}
+
+/*
+ * i2c_msm_clk_path_postponed_register: reg with bus-scaling after it is probed
+ *
+ * @return zero on success
+ *
+ * Workaround: i2c driver may be probed before the bus scaling driver. Calling
+ * msm_bus_scale_register_client() will fail if the bus scaling driver is not
+ * ready yet. Thus, this function should be called not from probe but from a
+ * later context. Also, this function may be called more then once before
+ * register succeed. At this case only one error message will be logged. At boot
+ * time all clocks are on, so earlier i2c transactions should succeed.
+ */
+static int i2c_msm_clk_path_postponed_register(struct i2c_msm_ctrl *ctrl)
+{
+	ctrl->rsrcs.clk_path_vote.client_hdl =
+		msm_bus_scale_register_client(ctrl->rsrcs.clk_path_vote.pdata);
+
+	if (ctrl->rsrcs.clk_path_vote.client_hdl) {
+		if (ctrl->rsrcs.clk_path_vote.reg_err) {
+			/* log a success message if an error msg was logged */
+			ctrl->rsrcs.clk_path_vote.reg_err = false;
+			dev_err(ctrl->dev,
+				"msm_bus_scale_register_client(mstr-id:%d):0x%x (ok)",
+				ctrl->rsrcs.clk_path_vote.mstr_id,
+				ctrl->rsrcs.clk_path_vote.client_hdl);
+		}
+	} else {
+		/* guard to log only one error on multiple failure */
+		if (!ctrl->rsrcs.clk_path_vote.reg_err) {
+			ctrl->rsrcs.clk_path_vote.reg_err = true;
+
+			dev_info(ctrl->dev,
+				"msm_bus_scale_register_client(mstr-id:%d):0 (not a problem)",
+				ctrl->rsrcs.clk_path_vote.mstr_id);
+		}
+	}
+
+	return ctrl->rsrcs.clk_path_vote.client_hdl ? 0 : -EAGAIN;
+}
+
+static void i2c_msm_clk_path_init(struct i2c_msm_ctrl *ctrl)
+{
+	/*
+	 * bail out if path voting is diabled (master_id == 0) or if it is
+	 * already registered (client_hdl != 0)
+	 */
+	if (!ctrl->rsrcs.clk_path_vote.mstr_id ||
+		ctrl->rsrcs.clk_path_vote.client_hdl)
+		return;
+
+	/* if fail once then try no more */
+	if (!ctrl->rsrcs.clk_path_vote.pdata &&
+					i2c_msm_clk_path_init_structs(ctrl)) {
+		ctrl->rsrcs.clk_path_vote.mstr_id = 0;
+		return;
+	};
+
+	/* on failure try again later */
+	if (i2c_msm_clk_path_postponed_register(ctrl))
+		return;
+}
+
+/*
+ * i2c_msm_qup_isr: QUP interrupt service routine
+ */
+static irqreturn_t i2c_msm_qup_isr(int irq, void *devid)
+{
+	struct i2c_msm_ctrl *ctrl = devid;
+	void __iomem        *base = ctrl->rsrcs.base;
+	struct i2c_msm_xfer *xfer = &ctrl->xfer;
+	struct i2c_msm_xfer_mode_blk *blk = &ctrl->xfer.blk;
+	u32  err_flags  = 0;
+	u32  clr_flds   = 0;
+	bool log_event       = false;
+	bool signal_complete = false;
+	bool need_wmb        = false;
+
+	i2c_msm_prof_evnt_add(ctrl, MSM_PROF, I2C_MSM_IRQ_BGN, irq, 0, 0);
+
+	if (!atomic_read(&ctrl->xfer.is_active)) {
+		dev_info(ctrl->dev, "irq:%d when no active transfer\n", irq);
+		return IRQ_HANDLED;
+	}
+
+	ctrl->i2c_sts_reg  = readl_relaxed(base + QUP_I2C_STATUS);
+	err_flags	   = readl_relaxed(base + QUP_ERROR_FLAGS);
+	ctrl->qup_op_reg   = readl_relaxed(base + QUP_OPERATIONAL);
+
+	if (ctrl->i2c_sts_reg & QUP_MSTR_STTS_ERR_MASK) {
+		signal_complete = true;
+		log_event       = true;
+		/*
+		 * If there is more than 1 error here, last one sticks.
+		 * The order of the error set here matters.
+		 */
+		if (ctrl->i2c_sts_reg & QUP_ARB_LOST)
+			ctrl->xfer.err = I2C_MSM_ERR_ARB_LOST;
+
+		if (ctrl->i2c_sts_reg & QUP_BUS_ERROR)
+			ctrl->xfer.err = I2C_MSM_ERR_BUS_ERR;
+
+		if (ctrl->i2c_sts_reg & QUP_PACKET_NACKED)
+			ctrl->xfer.err = I2C_MSM_ERR_NACK;
+	}
+
+	/* check for FIFO over/under runs error */
+	if (err_flags & QUP_ERR_FLGS_MASK)
+		ctrl->xfer.err = I2C_MSM_ERR_OVR_UNDR_RUN;
+
+	/* Dump the register values before reset the core */
+	if (ctrl->xfer.err && ctrl->dbgfs.dbg_lvl >= MSM_DBG)
+		i2c_msm_dbg_qup_reg_dump(ctrl);
+
+	/* clear interrupts fields */
+	clr_flds = ctrl->i2c_sts_reg & QUP_MSTR_STTS_ERR_MASK;
+	if (clr_flds) {
+		writel_relaxed(clr_flds, base + QUP_I2C_STATUS);
+		need_wmb = true;
+	}
+
+	clr_flds = err_flags & QUP_ERR_FLGS_MASK;
+	if (clr_flds) {
+		writel_relaxed(clr_flds,  base + QUP_ERROR_FLAGS);
+		need_wmb = true;
+	}
+
+	clr_flds = ctrl->qup_op_reg &
+			(QUP_OUTPUT_SERVICE_FLAG |
+			QUP_INPUT_SERVICE_FLAG);
+	if (clr_flds) {
+		writel_relaxed(clr_flds, base + QUP_OPERATIONAL);
+		need_wmb = true;
+	}
+
+	if (need_wmb)
+		/*
+		 * flush writes that clear the interrupt flags before changing
+		 * state to reset.
+		 */
+		wmb();
+
+	/* Reset and bail out on error */
+	if (ctrl->xfer.err) {
+		/* Flush for the tags in case of an error and DMA Mode*/
+		if (ctrl->xfer.mode_id == I2C_MSM_XFER_MODE_DMA) {
+			writel_relaxed(QUP_I2C_FLUSH, ctrl->rsrcs.base
+								+ QUP_STATE);
+			/*
+			 * Ensure that QUP_I2C_FLUSH is written before
+			 * State reset
+			 */
+			wmb();
+		}
+
+		/* HW workaround: when interrupt is level triggerd, more
+		 * than one interrupt may fire in error cases. Thus we
+		 * change the QUP core state to Reset immediately in the
+		 * ISR to ward off the next interrupt.
+		 */
+		writel_relaxed(QUP_STATE_RESET, ctrl->rsrcs.base + QUP_STATE);
+
+		signal_complete = true;
+		log_event       = true;
+		goto isr_end;
+	}
+
+	/* handle data completion */
+	if (xfer->mode_id == I2C_MSM_XFER_MODE_BLOCK) {
+		/* block ready for writing */
+		if (ctrl->qup_op_reg & QUP_OUTPUT_SERVICE_FLAG) {
+			log_event = true;
+			if (ctrl->qup_op_reg & QUP_OUT_BLOCK_WRITE_REQ)
+				complete(&blk->wait_tx_blk);
+
+			if ((ctrl->qup_op_reg & blk->complete_mask)
+					== blk->complete_mask) {
+				log_event       = true;
+				signal_complete = true;
+			}
+		}
+		/* block ready for reading */
+		if (ctrl->qup_op_reg & QUP_INPUT_SERVICE_FLAG) {
+			log_event = true;
+			complete(&blk->wait_rx_blk);
+		}
+	} else {
+		/* for FIFO/DMA Mode*/
+		if (ctrl->qup_op_reg & QUP_MAX_INPUT_DONE_FLAG) {
+			log_event = true;
+			/*
+			 * If last transaction is an input then the entire
+			 * transfer is done
+			 */
+			if (ctrl->xfer.last_is_rx)
+				signal_complete = true;
+		}
+		/*
+		 * Ideally, would like to check QUP_MAX_OUTPUT_DONE_FLAG.
+		 * However, QUP_MAX_OUTPUT_DONE_FLAG is lagging behind
+		 * QUP_OUTPUT_SERVICE_FLAG. The only reason for
+		 * QUP_OUTPUT_SERVICE_FLAG to be set in FIFO mode is
+		 * QUP_MAX_OUTPUT_DONE_FLAG condition. The code checking
+		 * here QUP_OUTPUT_SERVICE_FLAG and assumes that
+		 * QUP_MAX_OUTPUT_DONE_FLAG.
+		 */
+		if (ctrl->qup_op_reg & (QUP_OUTPUT_SERVICE_FLAG |
+						QUP_MAX_OUTPUT_DONE_FLAG)) {
+			log_event = true;
+			/*
+			 * If last transaction is an output then the
+			 * entire transfer is done
+			 */
+			if (!ctrl->xfer.last_is_rx)
+				signal_complete = true;
+		}
+	}
+
+isr_end:
+	if (log_event || (ctrl->dbgfs.dbg_lvl >= MSM_DBG))
+		i2c_msm_prof_evnt_add(ctrl, MSM_PROF,
+					I2C_MSM_IRQ_END,
+					ctrl->i2c_sts_reg, ctrl->qup_op_reg,
+					err_flags);
+
+	if (signal_complete)
+		complete(&ctrl->xfer.complete);
+
+	return IRQ_HANDLED;
+}
+
+static void i2x_msm_blk_free_cache(struct i2c_msm_ctrl *ctrl)
+{
+	kfree(ctrl->xfer.blk.tx_cache);
+	kfree(ctrl->xfer.blk.rx_cache);
+}
+
+static void i2c_msm_qup_init(struct i2c_msm_ctrl *ctrl)
+{
+	u32 state;
+	void __iomem *base = ctrl->rsrcs.base;
+
+	i2c_msm_prof_evnt_add(ctrl, MSM_PROF, I2C_MSM_PROF_RESET, 0, 0, 0);
+
+	i2c_msm_qup_sw_reset(ctrl);
+	i2c_msm_qup_state_set(ctrl, QUP_STATE_RESET);
+
+	writel_relaxed(QUP_N_VAL | QUP_MINI_CORE_I2C_VAL, base + QUP_CONFIG);
+
+	writel_relaxed(QUP_OUTPUT_OVER_RUN_ERR_EN | QUP_INPUT_UNDER_RUN_ERR_EN
+		     | QUP_OUTPUT_UNDER_RUN_ERR_EN | QUP_INPUT_OVER_RUN_ERR_EN,
+					base + QUP_ERROR_FLAGS_EN);
+
+	writel_relaxed(QUP_INPUT_SERVICE_MASK | QUP_OUTPUT_SERVICE_MASK,
+					base + QUP_OPERATIONAL_MASK);
+
+	writel_relaxed(QUP_EN_VERSION_TWO_TAG, base + QUP_I2C_MASTER_CONFIG);
+
+	i2c_msm_qup_fifo_calc_size(ctrl);
+	/*
+	 * Ensure that QUP configuration is written and that fifo size if read
+	 * before leaving this function
+	 */
+	mb();
+
+	state = readl_relaxed(base + QUP_STATE);
+
+	if (!(state & QUP_I2C_MAST_GEN))
+		dev_err(ctrl->dev,
+			"error on verifying HW support (I2C_MAST_GEN=0)\n");
+}
+
+/*
+ * qup_i2c_try_recover_bus_busy: issue QUP bus clear command
+ */
+static int qup_i2c_try_recover_bus_busy(struct i2c_msm_ctrl *ctrl)
+{
+	int ret;
+	ulong min_sleep_usec;
+
+	/* call i2c_msm_qup_init() to set core in idle state */
+	i2c_msm_qup_init(ctrl);
+
+	/* must be in run state for bus clear */
+	ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RUN);
+	if (ret < 0) {
+		dev_err(ctrl->dev, "error: bus clear fail to set run state\n");
+		return ret;
+	}
+
+	/*
+	 * call i2c_msm_qup_xfer_init_run_state() to set clock dividers.
+	 * the dividers are necessary for bus clear.
+	 */
+	i2c_msm_qup_xfer_init_run_state(ctrl);
+
+	writel_relaxed(0x1, ctrl->rsrcs.base + QUP_I2C_MASTER_BUS_CLR);
+
+	/*
+	 * wait for recovery (9 clock pulse cycles) to complete.
+	 * min_time = 9 clock *10  (1000% margin)
+	 * max_time = 10* min_time
+	 */
+	min_sleep_usec =
+	  max_t(ulong, (9 * 10 * USEC_PER_SEC) / ctrl->rsrcs.clk_freq_out, 100);
+
+	usleep_range(min_sleep_usec, min_sleep_usec * 10);
+	return ret;
+}
+
+static int qup_i2c_recover_bus_busy(struct i2c_msm_ctrl *ctrl)
+{
+	u32 bus_clr, bus_active, status;
+	int retry = 0;
+	dev_info(ctrl->dev, "Executing bus recovery procedure (9 clk pulse)\n");
+
+	do {
+		qup_i2c_try_recover_bus_busy(ctrl);
+		bus_clr    = readl_relaxed(ctrl->rsrcs.base +
+							QUP_I2C_MASTER_BUS_CLR);
+		status     = readl_relaxed(ctrl->rsrcs.base + QUP_I2C_STATUS);
+		bus_active = status & I2C_STATUS_BUS_ACTIVE;
+		if (++retry >= I2C_QUP_MAX_BUS_RECOVERY_RETRY)
+			break;
+	} while (bus_clr || bus_active);
+
+	dev_info(ctrl->dev, "Bus recovery %s after %d retries\n",
+		(bus_clr || bus_active) ? "fail" : "success", retry);
+	return 0;
+}
+
+static int i2c_msm_qup_post_xfer(struct i2c_msm_ctrl *ctrl, int err)
+{
+	/* poll until bus is released */
+	if (err || i2c_msm_qup_poll_bus_active_unset(ctrl)) {
+		if ((ctrl->xfer.err == I2C_MSM_ERR_ARB_LOST) ||
+		    (ctrl->xfer.err == I2C_MSM_ERR_BUS_ERR)  ||
+		    (ctrl->xfer.err == I2C_MSM_ERR_TIMEOUT)) {
+			if (err || i2c_msm_qup_slv_holds_bus(ctrl))
+				qup_i2c_recover_bus_busy(ctrl);
+
+			/* do not generalize error to EIO if its already set */
+			if (!err)
+				err = -EIO;
+		}
+	}
+
+	/*
+	 * Disable the IRQ before change to reset state to avoid
+	 * spurious interrupts.
+	 *
+	 */
+	disable_irq(ctrl->rsrcs.irq);
+
+	/* flush dma data and reset the qup core in timeout error.
+	 * for other error case, its handled by the ISR
+	 */
+	if (ctrl->xfer.err & I2C_MSM_ERR_TIMEOUT) {
+		/* Flush for the DMA registers */
+		if (ctrl->xfer.mode_id == I2C_MSM_XFER_MODE_DMA)
+			writel_relaxed(QUP_I2C_FLUSH, ctrl->rsrcs.base
+								+ QUP_STATE);
+
+		/* reset the qup core */
+		i2c_msm_qup_state_set(ctrl, QUP_STATE_RESET);
+		err = -ETIMEDOUT;
+	} else if (ctrl->xfer.err == I2C_MSM_ERR_NACK) {
+		err = -ENOTCONN;
+	}
+
+	return err;
+}
+
+static enum i2c_msm_xfer_mode_id
+i2c_msm_qup_choose_mode(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer_mode_fifo *fifo = &ctrl->xfer.fifo;
+	struct i2c_msm_xfer           *xfer = &ctrl->xfer;
+	size_t rx_cnt_sum = xfer->rx_cnt + xfer->rx_ovrhd_cnt;
+	size_t tx_cnt_sum = xfer->tx_cnt + xfer->tx_ovrhd_cnt;
+
+
+	if (ctrl->dbgfs.force_xfer_mode != I2C_MSM_XFER_MODE_NONE)
+		return ctrl->dbgfs.force_xfer_mode;
+
+	if (((rx_cnt_sum < fifo->input_fifo_sz) &&
+		(tx_cnt_sum < fifo->output_fifo_sz)))
+		return I2C_MSM_XFER_MODE_FIFO;
+
+	if (ctrl->rsrcs.disable_dma)
+		return I2C_MSM_XFER_MODE_BLOCK;
+
+	return I2C_MSM_XFER_MODE_DMA;
+}
+
+/*
+ * i2c_msm_xfer_calc_timeout: calc maximum xfer time in jiffies
+ *
+ * Basically timeout = (bit_count / frequency) * safety_coefficient.
+ * The safety-coefficient also accounts for debugging delay (mostly from
+ * printk() calls).
+ */
+static void i2c_msm_xfer_calc_timeout(struct i2c_msm_ctrl *ctrl)
+{
+	size_t byte_cnt = ctrl->xfer.rx_cnt + ctrl->xfer.tx_cnt;
+	size_t bit_cnt  = byte_cnt * 9;
+	size_t bit_usec = (bit_cnt * USEC_PER_SEC) / ctrl->rsrcs.clk_freq_out;
+	size_t loging_ovrhd_coef = ctrl->dbgfs.dbg_lvl + 1;
+	size_t safety_coef   = I2C_MSM_TIMEOUT_SAFTY_COEF * loging_ovrhd_coef;
+	size_t xfer_max_usec = (bit_usec * safety_coef) +
+						I2C_MSM_TIMEOUT_MIN_USEC;
+
+	ctrl->xfer.timeout = usecs_to_jiffies(xfer_max_usec);
+}
+
+static int i2c_msm_xfer_wait_for_completion(struct i2c_msm_ctrl *ctrl,
+						struct completion *complete)
+{
+	struct i2c_msm_xfer *xfer = &ctrl->xfer;
+	long  time_left;
+	int   ret = 0;
+
+	time_left = wait_for_completion_timeout(complete,
+						xfer->timeout);
+	if (!time_left) {
+		xfer->err = I2C_MSM_ERR_TIMEOUT;
+		i2c_msm_dbg_dump_diag(ctrl, false, 0, 0);
+		ret = -EIO;
+		i2c_msm_prof_evnt_add(ctrl, MSM_ERR, I2C_MSM_COMPLT_FL,
+					xfer->timeout, time_left, 0);
+	} else {
+		/* return an error if one detected by ISR */
+		if (ctrl->xfer.err ||
+				(ctrl->dbgfs.dbg_lvl >= MSM_DBG)) {
+			i2c_msm_dbg_dump_diag(ctrl, true,
+					ctrl->i2c_sts_reg, ctrl->qup_op_reg);
+			ret = -(xfer->err);
+		}
+		i2c_msm_prof_evnt_add(ctrl, MSM_DBG, I2C_MSM_COMPLT_OK,
+					xfer->timeout, time_left, 0);
+	}
+
+	return ret;
+}
+
+static u16 i2c_msm_slv_rd_wr_addr(u16 slv_addr, bool is_rx)
+{
+	return (slv_addr << 1) | (is_rx ? 0x1 : 0x0);
+}
+
+/*
+ * @return true when the current transfer's buffer points to the last message
+ *    of the user's request.
+ */
+static bool i2c_msm_xfer_msg_is_last(struct i2c_msm_ctrl *ctrl)
+{
+	return ctrl->xfer.cur_buf.msg_idx >= (ctrl->xfer.msg_cnt - 1);
+}
+
+/*
+ * @return true when the current transfer's buffer points to the last
+ *    transferable buffer (size =< QUP_MAX_BUF_SZ) of the last message of the
+ *    user's request.
+ */
+static bool i2c_msm_xfer_buf_is_last(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer_buf *cur_buf = &ctrl->xfer.cur_buf;
+	struct i2c_msg *cur_msg = ctrl->xfer.msgs + cur_buf->msg_idx;
+
+	return i2c_msm_xfer_msg_is_last(ctrl) &&
+		((cur_buf->byte_idx + QUP_MAX_BUF_SZ) >= cur_msg->len);
+}
+
+static void i2c_msm_xfer_create_cur_tag(struct i2c_msm_ctrl *ctrl,
+								bool start_req)
+{
+	struct i2c_msm_xfer_buf *cur_buf = &ctrl->xfer.cur_buf;
+
+	cur_buf->out_tag = i2c_msm_tag_create(start_req, cur_buf->is_last,
+					cur_buf->is_rx, cur_buf->len,
+					cur_buf->slv_addr);
+
+	cur_buf->in_tag.len = cur_buf->is_rx ? QUP_BUF_OVERHD_BC : 0;
+}
+
+/*
+ * i2c_msm_xfer_next_buf: support cases when msg.len > 256 bytes
+ *
+ * @return true when next buffer exist, or false when no such buffer
+ */
+static bool i2c_msm_xfer_next_buf(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer_buf *cur_buf = &ctrl->xfer.cur_buf;
+	struct i2c_msg          *cur_msg = ctrl->xfer.msgs + cur_buf->msg_idx;
+	int bc_rem = cur_msg->len - cur_buf->end_idx;
+
+	if (cur_buf->is_init && cur_buf->end_idx && bc_rem) {
+		/* not the first buffer in a message */
+
+		cur_buf->byte_idx  = cur_buf->end_idx;
+		cur_buf->is_last   = i2c_msm_xfer_buf_is_last(ctrl);
+		cur_buf->len       = min_t(int, bc_rem, QUP_MAX_BUF_SZ);
+		cur_buf->end_idx  += cur_buf->len;
+
+		/* No Start is required if it is not a first buffer in msg */
+		i2c_msm_xfer_create_cur_tag(ctrl, false);
+	} else {
+		/* first buffer in a new message */
+		if (cur_buf->is_init) {
+			if (i2c_msm_xfer_msg_is_last(ctrl)) {
+				return false;
+			} else {
+				++cur_buf->msg_idx;
+				++cur_msg;
+			}
+		} else {
+			cur_buf->is_init = true;
+		}
+		cur_buf->byte_idx  = 0;
+		cur_buf->is_last   = i2c_msm_xfer_buf_is_last(ctrl);
+		cur_buf->len       = min_t(int, cur_msg->len, QUP_MAX_BUF_SZ);
+		cur_buf->is_rx     = (cur_msg->flags & I2C_M_RD);
+		cur_buf->end_idx   = cur_buf->len;
+		cur_buf->slv_addr  = i2c_msm_slv_rd_wr_addr(cur_msg->addr,
+								cur_buf->is_rx);
+		i2c_msm_xfer_create_cur_tag(ctrl, true);
+	}
+	i2c_msm_prof_evnt_add(ctrl, MSM_DBG, I2C_MSM_NEXT_BUF, cur_buf->msg_idx,
+							cur_buf->byte_idx, 0);
+	return  true;
+}
+
+static void i2c_msm_pm_clk_unprepare(struct i2c_msm_ctrl *ctrl)
+{
+	clk_unprepare(ctrl->rsrcs.core_clk);
+	clk_unprepare(ctrl->rsrcs.iface_clk);
+}
+
+static int i2c_msm_pm_clk_prepare(struct i2c_msm_ctrl *ctrl)
+{
+	int ret;
+	ret = clk_prepare(ctrl->rsrcs.iface_clk);
+	if (ret) {
+		dev_err(ctrl->dev,
+			"error on clk_prepare(iface_clk):%d\n", ret);
+		return ret;
+	}
+
+	ret = clk_prepare(ctrl->rsrcs.core_clk);
+	if (ret) {
+		clk_unprepare(ctrl->rsrcs.iface_clk);
+		dev_err(ctrl->dev,
+			"error clk_prepare(core_clk):%d\n", ret);
+	}
+	return ret;
+}
+
+static void i2c_msm_pm_clk_disable(struct i2c_msm_ctrl *ctrl)
+{
+	clk_disable(ctrl->rsrcs.core_clk);
+	clk_disable(ctrl->rsrcs.iface_clk);
+}
+
+static int i2c_msm_pm_clk_enable(struct i2c_msm_ctrl *ctrl)
+{
+	int ret;
+
+	ret = clk_enable(ctrl->rsrcs.iface_clk);
+	if (ret) {
+		dev_err(ctrl->dev,
+			"error on clk_enable(iface_clk):%d\n", ret);
+		i2c_msm_pm_clk_unprepare(ctrl);
+		return ret;
+	}
+	ret = clk_enable(ctrl->rsrcs.core_clk);
+	if (ret) {
+		clk_disable(ctrl->rsrcs.iface_clk);
+		i2c_msm_pm_clk_unprepare(ctrl);
+		dev_err(ctrl->dev,
+			"error clk_enable(core_clk):%d\n", ret);
+	}
+	return ret;
+}
+
+static int i2c_msm_pm_xfer_start(struct i2c_msm_ctrl *ctrl)
+{
+	int ret;
+	mutex_lock(&ctrl->xfer.mtx);
+
+	i2c_msm_pm_pinctrl_state(ctrl, true);
+	pm_runtime_get_sync(ctrl->dev);
+	/*
+	 * if runtime PM callback was not invoked (when both runtime-pm
+	 * and systme-pm are in transition concurrently)
+	 */
+	if (ctrl->pwr_state != I2C_MSM_PM_RT_ACTIVE) {
+		dev_info(ctrl->dev, "Runtime PM-callback was not invoked.\n");
+		i2c_msm_pm_resume(ctrl->dev);
+	}
+
+	ret = i2c_msm_pm_clk_enable(ctrl);
+	if (ret) {
+		mutex_unlock(&ctrl->xfer.mtx);
+		return ret;
+	}
+	i2c_msm_qup_init(ctrl);
+
+	/* Set xfer to active state (efectively enabling our ISR)*/
+	atomic_set(&ctrl->xfer.is_active, 1);
+
+	enable_irq(ctrl->rsrcs.irq);
+	return 0;
+}
+
+static void i2c_msm_pm_xfer_end(struct i2c_msm_ctrl *ctrl)
+{
+
+	atomic_set(&ctrl->xfer.is_active, 0);
+
+	/*
+	 * DMA resources are freed due to multi-EE use case.
+	 * Other EEs can potentially use the DMA
+	 * resources with in the same runtime PM vote.
+	 */
+	if (ctrl->xfer.mode_id == I2C_MSM_XFER_MODE_DMA)
+		i2c_msm_dma_free_channels(ctrl);
+
+	i2c_msm_pm_clk_disable(ctrl);
+
+	if (!pm_runtime_enabled(ctrl->dev))
+		i2c_msm_pm_suspend(ctrl->dev);
+
+	pm_runtime_mark_last_busy(ctrl->dev);
+	pm_runtime_put_autosuspend(ctrl->dev);
+	i2c_msm_pm_pinctrl_state(ctrl, false);
+	mutex_unlock(&ctrl->xfer.mtx);
+}
+
+/*
+ * i2c_msm_xfer_scan: initial input scan
+ */
+static void i2c_msm_xfer_scan(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer     *xfer      = &ctrl->xfer;
+	struct i2c_msm_xfer_buf *cur_buf   = &xfer->cur_buf;
+
+	while (i2c_msm_xfer_next_buf(ctrl)) {
+
+		if (cur_buf->is_rx)
+			xfer->rx_cnt += cur_buf->len;
+		else
+			xfer->tx_cnt += cur_buf->len;
+
+		xfer->rx_ovrhd_cnt += cur_buf->in_tag.len;
+		xfer->tx_ovrhd_cnt += cur_buf->out_tag.len;
+
+		if (i2c_msm_xfer_msg_is_last(ctrl))
+			xfer->last_is_rx = cur_buf->is_rx;
+	}
+	xfer->cur_buf = (struct i2c_msm_xfer_buf){0};
+}
+
+static int
+i2c_msm_frmwrk_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+{
+	int ret = 0;
+	struct i2c_msm_ctrl      *ctrl = i2c_get_adapdata(adap);
+	struct i2c_msm_xfer      *xfer = &ctrl->xfer;
+
+	if (num < 1) {
+		dev_err(ctrl->dev,
+		"error on number of msgs(%d) received\n", num);
+		return -EINVAL;
+	}
+
+	if (IS_ERR_OR_NULL(msgs)) {
+		dev_err(ctrl->dev, " error on msgs Accessing invalid  pointer location\n");
+		return PTR_ERR(msgs);
+	}
+
+	/* if system is suspended just bail out */
+	if (ctrl->pwr_state == I2C_MSM_PM_SYS_SUSPENDED) {
+		dev_err(ctrl->dev,
+				"slave:0x%x is calling xfer when system is suspended\n",
+				msgs->addr);
+		return -EIO;
+	}
+
+	ret = i2c_msm_pm_xfer_start(ctrl);
+	if (ret)
+		return ret;
+
+	/* init xfer */
+	xfer->msgs         = msgs;
+	xfer->msg_cnt      = num;
+	xfer->mode_id      = I2C_MSM_XFER_MODE_NONE;
+	xfer->err          = 0;
+	xfer->rx_cnt       = 0;
+	xfer->tx_cnt       = 0;
+	xfer->rx_ovrhd_cnt = 0;
+	xfer->tx_ovrhd_cnt = 0;
+	atomic_set(&xfer->event_cnt, 0);
+	init_completion(&xfer->complete);
+	init_completion(&xfer->rx_complete);
+
+	xfer->cur_buf.is_init = false;
+	xfer->cur_buf.msg_idx = 0;
+
+	i2c_msm_prof_evnt_add(ctrl, MSM_PROF, I2C_MSM_XFER_BEG, num,
+								msgs->addr, 0);
+
+	i2c_msm_xfer_scan(ctrl);
+	i2c_msm_xfer_calc_timeout(ctrl);
+	xfer->mode_id = i2c_msm_qup_choose_mode(ctrl);
+
+	dev_dbg(ctrl->dev, "xfer() mode:%d msg_cnt:%d rx_cbt:%zu tx_cnt:%zu\n",
+		xfer->mode_id, xfer->msg_cnt, xfer->rx_cnt, xfer->tx_cnt);
+
+	switch (xfer->mode_id) {
+	case I2C_MSM_XFER_MODE_FIFO:
+		ret = i2c_msm_fifo_xfer(ctrl);
+		break;
+	case I2C_MSM_XFER_MODE_BLOCK:
+		ret = i2c_msm_blk_xfer(ctrl);
+		break;
+	case I2C_MSM_XFER_MODE_DMA:
+		ret = i2c_msm_dma_xfer(ctrl);
+		break;
+	default:
+		ret = -EINTR;
+	};
+
+	i2c_msm_prof_evnt_add(ctrl, MSM_PROF, I2C_MSM_SCAN_SUM,
+		((xfer->rx_cnt & 0xff) | ((xfer->rx_ovrhd_cnt & 0xff) << 16)),
+		((xfer->tx_cnt & 0xff) | ((xfer->tx_ovrhd_cnt & 0xff) << 16)),
+		((ctrl->xfer.timeout & 0xfff) | ((xfer->mode_id & 0xf) << 24)));
+
+	ret = i2c_msm_qup_post_xfer(ctrl, ret);
+	/* on success, return number of messages sent (which is index + 1)*/
+	if (!ret)
+		ret = xfer->cur_buf.msg_idx + 1;
+
+	i2c_msm_prof_evnt_add(ctrl, MSM_PROF, I2C_MSM_XFER_END, ret, xfer->err,
+						xfer->cur_buf.msg_idx + 1);
+	/* process and dump profiling data */
+	if (xfer->err || (ctrl->dbgfs.dbg_lvl >= MSM_PROF))
+		i2c_msm_prof_evnt_dump(ctrl);
+
+	i2c_msm_pm_xfer_end(ctrl);
+	return ret;
+}
+
+enum i2c_msm_dt_entry_status {
+	DT_REQ,  /* Required:  fail if missing */
+	DT_SGST, /* Suggested: warn if missing */
+	DT_OPT,  /* Optional:  don't warn if missing */
+};
+
+enum i2c_msm_dt_entry_type {
+	DT_U32,
+	DT_BOOL,
+	DT_ID,   /* of_alias_get_id() */
+};
+
+struct i2c_msm_dt_to_pdata_map {
+	const char                  *dt_name;
+	void                        *ptr_data;
+	enum i2c_msm_dt_entry_status status;
+	enum i2c_msm_dt_entry_type   type;
+	int                          default_val;
+};
+
+static int i2c_msm_dt_to_pdata_populate(struct i2c_msm_ctrl *ctrl,
+					struct platform_device *pdev,
+					struct i2c_msm_dt_to_pdata_map *itr)
+{
+	int  ret, err = 0;
+	struct device_node *node = pdev->dev.of_node;
+
+	for (; itr->dt_name ; ++itr) {
+		switch (itr->type) {
+		case DT_U32:
+			ret = of_property_read_u32(node, itr->dt_name,
+							 (u32 *) itr->ptr_data);
+			break;
+		case DT_BOOL:
+			*((bool *) itr->ptr_data) =
+				of_property_read_bool(node, itr->dt_name);
+			ret = 0;
+			break;
+		case DT_ID:
+			ret = of_alias_get_id(node, itr->dt_name);
+			if (ret >= 0) {
+				*((int *) itr->ptr_data) = ret;
+				ret = 0;
+			}
+			break;
+		default:
+			dev_err(ctrl->dev,
+				"error %d is of unknown DT entry type\n",
+				itr->type);
+			ret = -EBADE;
+		}
+
+		i2c_msm_dbg(ctrl, MSM_PROF, "DT entry ret:%d name:%s val:%d",
+				ret, itr->dt_name, *((int *)itr->ptr_data));
+
+		if (ret) {
+			*((int *)itr->ptr_data) = itr->default_val;
+
+			if (itr->status < DT_OPT) {
+				dev_err(ctrl->dev,
+					"error Missing '%s' DT entry\n",
+					itr->dt_name);
+
+				/* cont on err to dump all missing entries */
+				if (itr->status == DT_REQ && !err)
+					err = ret;
+			}
+		}
+	}
+
+	return err;
+}
+
+
+/*
+ * i2c_msm_rsrcs_process_dt: copy data from DT to platform data
+ * @return zero on success or negative error code
+ */
+static int i2c_msm_rsrcs_process_dt(struct i2c_msm_ctrl *ctrl,
+					struct platform_device *pdev)
+{
+	u32 fs_clk_div, ht_clk_div, noise_rjct_scl, noise_rjct_sda;
+	int ret;
+
+	struct i2c_msm_dt_to_pdata_map map[] = {
+	{"i2c",				&pdev->id,	DT_REQ,  DT_ID,  -1},
+	{"qcom,clk-freq-out",		&ctrl->rsrcs.clk_freq_out,
+							DT_REQ,  DT_U32,  0},
+	{"qcom,clk-freq-in",		&ctrl->rsrcs.clk_freq_in,
+							DT_REQ,  DT_U32,  0},
+	{"qcom,disable-dma",		&(ctrl->rsrcs.disable_dma),
+							DT_OPT,  DT_BOOL, 0},
+	{"qcom,master-id",		&(ctrl->rsrcs.clk_path_vote.mstr_id),
+							DT_SGST, DT_U32,  0},
+	{"qcom,noise-rjct-scl",		&noise_rjct_scl,
+							DT_OPT,  DT_U32,  0},
+	{"qcom,noise-rjct-sda",		&noise_rjct_sda,
+							DT_OPT,  DT_U32,  0},
+	{"qcom,high-time-clk-div",	&ht_clk_div,
+							DT_OPT,  DT_U32,  0},
+	{"qcom,fs-clk-div",		&fs_clk_div,
+							DT_OPT,  DT_U32,  0},
+	{NULL,  NULL,					0,       0,       0},
+	};
+
+	ret = i2c_msm_dt_to_pdata_populate(ctrl, pdev, map);
+	if (ret)
+		return ret;
+
+	/* set divider and noise reject values */
+	return i2c_msm_set_mstr_clk_ctl(ctrl, fs_clk_div, ht_clk_div,
+						noise_rjct_scl, noise_rjct_sda);
+}
+
+/*
+ * i2c_msm_rsrcs_mem_init: reads pdata request region and ioremap it
+ * @return zero on success or negative error code
+ */
+static int i2c_msm_rsrcs_mem_init(struct platform_device *pdev,
+						struct i2c_msm_ctrl *ctrl)
+{
+	struct resource *mem_region;
+
+	ctrl->rsrcs.mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"qup_phys_addr");
+	if (!ctrl->rsrcs.mem) {
+		dev_err(ctrl->dev, "error Missing 'qup_phys_addr' resource\n");
+		return -ENODEV;
+	}
+
+	mem_region = request_mem_region(ctrl->rsrcs.mem->start,
+					resource_size(ctrl->rsrcs.mem),
+					pdev->name);
+	if (!mem_region) {
+		dev_err(ctrl->dev,
+			"QUP physical memory region already claimed\n");
+		return -EBUSY;
+	}
+
+	ctrl->rsrcs.base = devm_ioremap(ctrl->dev, ctrl->rsrcs.mem->start,
+				   resource_size(ctrl->rsrcs.mem));
+	if (!ctrl->rsrcs.base) {
+		dev_err(ctrl->dev,
+			"error failed ioremap(base:0x%llx size:0x%llx\n)",
+			(u64) ctrl->rsrcs.mem->start,
+			(u64) resource_size(ctrl->rsrcs.mem));
+		release_mem_region(ctrl->rsrcs.mem->start,
+						resource_size(ctrl->rsrcs.mem));
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void i2c_msm_rsrcs_mem_teardown(struct i2c_msm_ctrl *ctrl)
+{
+	release_mem_region(ctrl->rsrcs.mem->start,
+						resource_size(ctrl->rsrcs.mem));
+}
+
+/*
+ * i2c_msm_rsrcs_irq_init: finds irq num in pdata and requests it
+ * @return zero on success or negative error code
+ */
+static int i2c_msm_rsrcs_irq_init(struct platform_device *pdev,
+						struct i2c_msm_ctrl *ctrl)
+{
+	int ret, irq;
+
+	irq = platform_get_irq_byname(pdev, "qup_irq");
+	if (irq < 0) {
+		dev_err(ctrl->dev, "error reading irq resource\n");
+		return irq;
+	}
+
+	ret = request_irq(irq, i2c_msm_qup_isr, IRQF_TRIGGER_HIGH,
+						"i2c-msm-v2-irq", ctrl);
+	if (ret) {
+		dev_err(ctrl->dev, "error request_irq(irq_num:%d ) ret:%d\n",
+								irq, ret);
+		return ret;
+	}
+
+	disable_irq(irq);
+	ctrl->rsrcs.irq = irq;
+	return 0;
+}
+
+static void i2c_msm_rsrcs_irq_teardown(struct i2c_msm_ctrl *ctrl)
+{
+	free_irq(ctrl->rsrcs.irq, ctrl);
+}
+
+
+static struct pinctrl_state *
+i2c_msm_rsrcs_gpio_get_state(struct i2c_msm_ctrl *ctrl, const char *name)
+{
+	struct pinctrl_state *pin_state
+			= pinctrl_lookup_state(ctrl->rsrcs.pinctrl, name);
+
+	if (IS_ERR_OR_NULL(pin_state))
+		dev_info(ctrl->dev, "note pinctrl_lookup_state(%s) err:%ld\n",
+						name, PTR_ERR(pin_state));
+	return pin_state;
+}
+
+/*
+ * i2c_msm_rsrcs_gpio_pinctrl_init: initializes the pinctrl for i2c gpios
+ *
+ * @pre platform data must be initialized
+ */
+static int i2c_msm_rsrcs_gpio_pinctrl_init(struct i2c_msm_ctrl *ctrl)
+{
+	ctrl->rsrcs.pinctrl = devm_pinctrl_get(ctrl->dev);
+	if (IS_ERR_OR_NULL(ctrl->rsrcs.pinctrl)) {
+		dev_err(ctrl->dev, "error devm_pinctrl_get() failed err:%ld\n",
+				PTR_ERR(ctrl->rsrcs.pinctrl));
+		return PTR_ERR(ctrl->rsrcs.pinctrl);
+	}
+
+	ctrl->rsrcs.gpio_state_active =
+		i2c_msm_rsrcs_gpio_get_state(ctrl, I2C_MSM_PINCTRL_ACTIVE);
+
+	ctrl->rsrcs.gpio_state_suspend =
+		i2c_msm_rsrcs_gpio_get_state(ctrl, I2C_MSM_PINCTRL_SUSPEND);
+
+	return 0;
+}
+
+static void i2c_msm_pm_pinctrl_state(struct i2c_msm_ctrl *ctrl,
+				bool runtime_active)
+{
+	struct pinctrl_state *pins_state;
+	const char           *pins_state_name;
+
+	if (runtime_active) {
+		pins_state      = ctrl->rsrcs.gpio_state_active;
+		pins_state_name = I2C_MSM_PINCTRL_ACTIVE;
+	} else {
+		pins_state      = ctrl->rsrcs.gpio_state_suspend;
+		pins_state_name = I2C_MSM_PINCTRL_SUSPEND;
+	}
+
+	if (!IS_ERR_OR_NULL(pins_state)) {
+		int ret = pinctrl_select_state(ctrl->rsrcs.pinctrl, pins_state);
+		if (ret)
+			dev_err(ctrl->dev,
+			"error pinctrl_select_state(%s) err:%d\n",
+			pins_state_name, ret);
+	} else {
+		dev_err(ctrl->dev,
+			"error pinctrl state-name:'%s' is not configured\n",
+			pins_state_name);
+	}
+}
+
+/*
+ * i2c_msm_rsrcs_clk_init: get clocks and set rate
+ *
+ * @return zero on success or negative error code
+ */
+static int i2c_msm_rsrcs_clk_init(struct i2c_msm_ctrl *ctrl)
+{
+	int ret = 0;
+
+	if ((ctrl->rsrcs.clk_freq_out <= 0) ||
+	    (ctrl->rsrcs.clk_freq_out > I2C_MSM_CLK_FAST_PLUS_FREQ)) {
+		dev_err(ctrl->dev,
+			"error clock frequency %dKHZ is not supported\n",
+			(ctrl->rsrcs.clk_freq_out / 1000));
+		return -EIO;
+	}
+
+	ctrl->rsrcs.core_clk = clk_get(ctrl->dev, "core_clk");
+	if (IS_ERR(ctrl->rsrcs.core_clk)) {
+		ret = PTR_ERR(ctrl->rsrcs.core_clk);
+		dev_err(ctrl->dev, "error on clk_get(core_clk):%d\n", ret);
+		return ret;
+	}
+
+	ret = clk_set_rate(ctrl->rsrcs.core_clk, ctrl->rsrcs.clk_freq_in);
+	if (ret) {
+		dev_err(ctrl->dev, "error on clk_set_rate(core_clk, %dKHz):%d\n",
+					(ctrl->rsrcs.clk_freq_in / 1000), ret);
+		goto err_set_rate;
+	}
+
+	ctrl->rsrcs.iface_clk = clk_get(ctrl->dev, "iface_clk");
+	if (IS_ERR(ctrl->rsrcs.iface_clk)) {
+		ret = PTR_ERR(ctrl->rsrcs.iface_clk);
+		dev_err(ctrl->dev, "error on clk_get(iface_clk):%d\n", ret);
+		goto err_set_rate;
+	}
+
+	return 0;
+
+err_set_rate:
+		clk_put(ctrl->rsrcs.core_clk);
+		ctrl->rsrcs.core_clk = NULL;
+	return ret;
+}
+
+static void i2c_msm_rsrcs_clk_teardown(struct i2c_msm_ctrl *ctrl)
+{
+	clk_put(ctrl->rsrcs.core_clk);
+	clk_put(ctrl->rsrcs.iface_clk);
+	i2c_msm_clk_path_teardown(ctrl);
+}
+
+
+
+static void i2c_msm_pm_suspend(struct device *dev)
+{
+	struct i2c_msm_ctrl *ctrl = dev_get_drvdata(dev);
+
+	if (ctrl->pwr_state == I2C_MSM_PM_RT_SUSPENDED) {
+		dev_err(ctrl->dev, "attempt to suspend when suspended\n");
+		return;
+	}
+	i2c_msm_dbg(ctrl, MSM_DBG, "suspending...");
+	i2c_msm_pm_clk_unprepare(ctrl);
+	i2c_msm_clk_path_unvote(ctrl);
+
+	/*
+	 * We implement system and runtime suspend in the same way. However
+	 * it is important for us to distinguish between them in when servicing
+	 * a transfer requests. If we get transfer request while in runtime
+	 * suspend we want to simply wake up and service that request. But if we
+	 * get a transfer request while system is suspending we want to bail
+	 * out on that request. This is why if we marked that we are in system
+	 * suspend, we do not want to override that state with runtime suspend.
+	 */
+	if (ctrl->pwr_state != I2C_MSM_PM_SYS_SUSPENDED)
+		ctrl->pwr_state = I2C_MSM_PM_RT_SUSPENDED;
+	return;
+}
+
+static int i2c_msm_pm_resume(struct device *dev)
+{
+	struct i2c_msm_ctrl *ctrl = dev_get_drvdata(dev);
+
+	if (ctrl->pwr_state == I2C_MSM_PM_RT_ACTIVE)
+		return 0;
+
+	i2c_msm_dbg(ctrl, MSM_DBG, "resuming...");
+
+	i2c_msm_clk_path_vote(ctrl);
+	i2c_msm_pm_clk_prepare(ctrl);
+	ctrl->pwr_state = I2C_MSM_PM_RT_ACTIVE;
+	return 0;
+}
+
+#ifdef CONFIG_PM
+/*
+ * i2c_msm_pm_sys_suspend_noirq: system power management callback
+ */
+static int i2c_msm_pm_sys_suspend_noirq(struct device *dev)
+{
+	int ret = 0;
+	struct i2c_msm_ctrl *ctrl = dev_get_drvdata(dev);
+	enum i2c_msm_power_state prev_state = ctrl->pwr_state;
+	i2c_msm_dbg(ctrl, MSM_DBG, "pm_sys_noirq: suspending...");
+
+	/* Acquire mutex to ensure current transaction is over */
+	mutex_lock(&ctrl->xfer.mtx);
+	ctrl->pwr_state = I2C_MSM_PM_SYS_SUSPENDED;
+	mutex_unlock(&ctrl->xfer.mtx);
+	i2c_msm_dbg(ctrl, MSM_DBG, "pm_sys_noirq: suspending...");
+
+	if (prev_state == I2C_MSM_PM_RT_ACTIVE) {
+		i2c_msm_pm_suspend(dev);
+		/*
+		 * Synchronize runtime-pm and system-pm states:
+		 * at this point we are already suspended. However, the
+		 * runtime-PM framework still thinks that we are active.
+		 * The three calls below let the runtime-PM know that we are
+		 * suspended already without re-invoking the suspend callback
+		 */
+		pm_runtime_disable(dev);
+		pm_runtime_set_suspended(dev);
+		pm_runtime_enable(dev);
+	}
+
+	return ret;
+}
+
+/*
+ * i2c_msm_pm_sys_resume: system power management callback
+ * shifts the controller's power state from system suspend to runtime suspend
+ */
+static int i2c_msm_pm_sys_resume_noirq(struct device *dev)
+{
+	struct i2c_msm_ctrl *ctrl = dev_get_drvdata(dev);
+	i2c_msm_dbg(ctrl, MSM_DBG, "pm_sys_noirq: resuming...");
+	mutex_lock(&ctrl->xfer.mtx);
+	ctrl->pwr_state = I2C_MSM_PM_RT_SUSPENDED;
+	mutex_unlock(&ctrl->xfer.mtx);
+	return  0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static void i2c_msm_pm_rt_init(struct device *dev)
+{
+	pm_runtime_set_suspended(dev);
+	pm_runtime_set_autosuspend_delay(dev, (MSEC_PER_SEC >> 2));
+	pm_runtime_use_autosuspend(dev);
+	pm_runtime_enable(dev);
+}
+
+/*
+ * i2c_msm_pm_rt_suspend: runtime power management callback
+ */
+static int i2c_msm_pm_rt_suspend(struct device *dev)
+{
+	struct i2c_msm_ctrl *ctrl = dev_get_drvdata(dev);
+
+	i2c_msm_dbg(ctrl, MSM_DBG, "pm_runtime: suspending...");
+	i2c_msm_pm_suspend(dev);
+	return 0;
+}
+
+/*
+ * i2c_msm_pm_rt_resume: runtime power management callback
+ */
+static int i2c_msm_pm_rt_resume(struct device *dev)
+{
+	struct i2c_msm_ctrl *ctrl = dev_get_drvdata(dev);
+
+	i2c_msm_dbg(ctrl, MSM_DBG, "pm_runtime: resuming...");
+	return  i2c_msm_pm_resume(dev);
+}
+
+#else
+static void i2c_msm_pm_rt_init(struct device *dev) {}
+#define i2c_msm_pm_rt_suspend NULL
+#define i2c_msm_pm_rt_resume NULL
+#endif
+
+static const struct dev_pm_ops i2c_msm_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+	.suspend_noirq		= i2c_msm_pm_sys_suspend_noirq,
+	.resume_noirq		= i2c_msm_pm_sys_resume_noirq,
+#endif
+	SET_RUNTIME_PM_OPS(i2c_msm_pm_rt_suspend,
+			   i2c_msm_pm_rt_resume,
+			   NULL)
+};
+
+static u32 i2c_msm_frmwrk_func(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+}
+
+static const struct i2c_algorithm i2c_msm_frmwrk_algrtm = {
+	.master_xfer	= i2c_msm_frmwrk_xfer,
+	.functionality	= i2c_msm_frmwrk_func,
+};
+
+static const char const *i2c_msm_adapter_name = "MSM-I2C-v2-adapter";
+
+static int i2c_msm_frmwrk_reg(struct platform_device *pdev,
+						struct i2c_msm_ctrl *ctrl)
+{
+	int ret;
+
+	i2c_set_adapdata(&ctrl->adapter, ctrl);
+	ctrl->adapter.algo = &i2c_msm_frmwrk_algrtm;
+	strlcpy(ctrl->adapter.name, i2c_msm_adapter_name,
+						sizeof(ctrl->adapter.name));
+
+	ctrl->adapter.nr = pdev->id;
+	ctrl->adapter.dev.parent = &pdev->dev;
+	ctrl->adapter.dev.of_node = pdev->dev.of_node;
+	ret = i2c_add_numbered_adapter(&ctrl->adapter);
+	if (ret) {
+		dev_err(ctrl->dev, "error i2c_add_adapter failed\n");
+		return ret;
+	}
+
+	return ret;
+}
+
+static void i2c_msm_frmwrk_unreg(struct i2c_msm_ctrl *ctrl)
+{
+	i2c_del_adapter(&ctrl->adapter);
+}
+
+static int i2c_msm_probe(struct platform_device *pdev)
+{
+	struct i2c_msm_ctrl *ctrl;
+	int ret = 0;
+
+	dev_info(&pdev->dev, "probing driver i2c-msm-v2\n");
+
+	ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
+	if (!ctrl)
+		return -ENOMEM;
+	ctrl->dev = &pdev->dev;
+	platform_set_drvdata(pdev, ctrl);
+	ctrl->dbgfs.dbg_lvl         = DEFAULT_DBG_LVL;
+	ctrl->dbgfs.force_xfer_mode = I2C_MSM_XFER_MODE_NONE;
+	mutex_init(&ctrl->xfer.mtx);
+	ctrl->pwr_state = I2C_MSM_PM_RT_SUSPENDED;
+
+	if (!pdev->dev.of_node) {
+		dev_err(&pdev->dev, "error: null device-tree node");
+		return -EBADE;
+	}
+
+	ret = i2c_msm_rsrcs_process_dt(ctrl, pdev);
+	if (ret) {
+		dev_err(ctrl->dev, "error in process device tree node");
+		return ret;
+	}
+
+	ret = i2c_msm_rsrcs_mem_init(pdev, ctrl);
+	if (ret)
+		goto mem_err;
+
+	ret = i2c_msm_rsrcs_clk_init(ctrl);
+	if (ret)
+		goto clk_err;
+
+	/* vote for clock to enable reading the version number off the HW */
+	i2c_msm_clk_path_vote(ctrl);
+
+	ret = i2c_msm_pm_clk_prepare(ctrl);
+	if (ret)
+		goto clk_err;
+
+	ret = i2c_msm_pm_clk_enable(ctrl);
+	if (ret) {
+		i2c_msm_pm_clk_unprepare(ctrl);
+		goto clk_err;
+	}
+
+	/*
+	 * reset the core before registering for interrupts. This solves an
+	 * interrupt storm issue when the bootloader leaves a pending interrupt.
+	 */
+	ret = i2c_msm_qup_sw_reset(ctrl);
+	if (ret)
+		dev_err(ctrl->dev, "error error on qup software reset\n");
+
+	i2c_msm_pm_clk_disable(ctrl);
+	i2c_msm_pm_clk_unprepare(ctrl);
+	i2c_msm_clk_path_unvote(ctrl);
+
+	ret = i2c_msm_rsrcs_gpio_pinctrl_init(ctrl);
+	if (ret)
+		goto err_no_pinctrl;
+
+	i2c_msm_pm_rt_init(ctrl->dev);
+
+	ret = i2c_msm_rsrcs_irq_init(pdev, ctrl);
+	if (ret)
+		goto irq_err;
+
+	i2c_msm_dbgfs_init(ctrl);
+
+	ret = i2c_msm_frmwrk_reg(pdev, ctrl);
+	if (ret)
+		goto reg_err;
+
+	i2c_msm_dbg(ctrl, MSM_PROF, "probe() completed with success");
+	return 0;
+
+reg_err:
+	i2c_msm_dbgfs_teardown(ctrl);
+	i2c_msm_rsrcs_irq_teardown(ctrl);
+irq_err:
+	i2x_msm_blk_free_cache(ctrl);
+err_no_pinctrl:
+	i2c_msm_rsrcs_clk_teardown(ctrl);
+clk_err:
+	i2c_msm_rsrcs_mem_teardown(ctrl);
+mem_err:
+	dev_err(ctrl->dev, "error probe() failed with err:%d\n", ret);
+	devm_kfree(&pdev->dev, ctrl);
+	return ret;
+}
+
+static int i2c_msm_remove(struct platform_device *pdev)
+{
+	struct i2c_msm_ctrl *ctrl = platform_get_drvdata(pdev);
+
+	/* Grab mutex to ensure ongoing transaction is over */
+	mutex_lock(&ctrl->xfer.mtx);
+	ctrl->pwr_state = I2C_MSM_PM_SYS_SUSPENDED;
+	pm_runtime_disable(ctrl->dev);
+	/* no one can call a xfer after the next line */
+	i2c_msm_frmwrk_unreg(ctrl);
+	mutex_unlock(&ctrl->xfer.mtx);
+	mutex_destroy(&ctrl->xfer.mtx);
+
+	i2c_msm_dma_teardown(ctrl);
+	i2c_msm_dbgfs_teardown(ctrl);
+	i2c_msm_rsrcs_irq_teardown(ctrl);
+	i2c_msm_rsrcs_clk_teardown(ctrl);
+	i2c_msm_rsrcs_mem_teardown(ctrl);
+	i2x_msm_blk_free_cache(ctrl);
+	return 0;
+}
+
+static struct of_device_id i2c_msm_dt_match[] = {
+	{
+		.compatible = "qcom,i2c-msm-v2",
+	},
+	{}
+};
+
+static struct platform_driver i2c_msm_driver = {
+	.probe  = i2c_msm_probe,
+	.remove = i2c_msm_remove,
+	.driver = {
+		.name           = "i2c-msm-v2",
+		.owner          = THIS_MODULE,
+		.pm             = &i2c_msm_pm_ops,
+		.of_match_table = i2c_msm_dt_match,
+	},
+};
+
+static int i2c_msm_init(void)
+{
+	return platform_driver_register(&i2c_msm_driver);
+}
+subsys_initcall(i2c_msm_init);
+
+static void i2c_msm_exit(void)
+{
+	platform_driver_unregister(&i2c_msm_driver);
+}
+module_exit(i2c_msm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:i2c-msm-v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/iio/adc/qcom-rradc.c	2019-01-22 16:16:23.879249811 +0100
@@ -0,0 +1,1206 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "RRADC: %s: " fmt, __func__
+
+#include <linux/iio/iio.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include <linux/power_supply.h>
+
+#define FG_ADC_RR_EN_CTL			0x46
+#define FG_ADC_RR_SKIN_TEMP_LSB			0x50
+#define FG_ADC_RR_SKIN_TEMP_MSB			0x51
+#define FG_ADC_RR_RR_ADC_CTL			0x52
+#define FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL_MASK	0x8
+#define FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL	BIT(3)
+#define FG_ADC_RR_ADC_LOG			0x53
+#define FG_ADC_RR_ADC_LOG_CLR_CTRL		BIT(0)
+
+#define FG_ADC_RR_FAKE_BATT_LOW_LSB		0x58
+#define FG_ADC_RR_FAKE_BATT_LOW_MSB		0x59
+#define FG_ADC_RR_FAKE_BATT_HIGH_LSB		0x5A
+#define FG_ADC_RR_FAKE_BATT_HIGH_MSB		0x5B
+
+#define FG_ADC_RR_BATT_ID_CTRL			0x60
+#define FG_ADC_RR_BATT_ID_CTRL_CHANNEL_CONV	BIT(0)
+#define FG_ADC_RR_BATT_ID_TRIGGER		0x61
+#define FG_ADC_RR_BATT_ID_TRIGGER_CTL		BIT(0)
+#define FG_ADC_RR_BATT_ID_STS			0x62
+#define FG_ADC_RR_BATT_ID_CFG			0x63
+#define FG_ADC_RR_BATT_ID_5_LSB			0x66
+#define FG_ADC_RR_BATT_ID_5_MSB			0x67
+#define FG_ADC_RR_BATT_ID_15_LSB		0x68
+#define FG_ADC_RR_BATT_ID_15_MSB		0x69
+#define FG_ADC_RR_BATT_ID_150_LSB		0x6A
+#define FG_ADC_RR_BATT_ID_150_MSB		0x6B
+
+#define FG_ADC_RR_BATT_THERM_CTRL		0x70
+#define FG_ADC_RR_BATT_THERM_TRIGGER		0x71
+#define FG_ADC_RR_BATT_THERM_STS		0x72
+#define FG_ADC_RR_BATT_THERM_CFG		0x73
+#define FG_ADC_RR_BATT_THERM_LSB		0x74
+#define FG_ADC_RR_BATT_THERM_MSB		0x75
+#define FG_ADC_RR_BATT_THERM_FREQ		0x76
+
+#define FG_ADC_RR_AUX_THERM_CTRL		0x80
+#define FG_ADC_RR_AUX_THERM_TRIGGER		0x81
+#define FG_ADC_RR_AUX_THERM_STS			0x82
+#define FG_ADC_RR_AUX_THERM_CFG			0x83
+#define FG_ADC_RR_AUX_THERM_LSB			0x84
+#define FG_ADC_RR_AUX_THERM_MSB			0x85
+
+#define FG_ADC_RR_SKIN_HOT			0x86
+#define FG_ADC_RR_SKIN_TOO_HOT			0x87
+
+#define FG_ADC_RR_AUX_THERM_C1			0x88
+#define FG_ADC_RR_AUX_THERM_C2			0x89
+#define FG_ADC_RR_AUX_THERM_C3			0x8A
+#define FG_ADC_RR_AUX_THERM_HALF_RANGE		0x8B
+
+#define FG_ADC_RR_USB_IN_V_CTRL			0x90
+#define FG_ADC_RR_USB_IN_V_TRIGGER		0x91
+#define FG_ADC_RR_USB_IN_V_EVERY_CYCLE_MASK	0x80
+#define FG_ADC_RR_USB_IN_V_EVERY_CYCLE		BIT(7)
+#define FG_ADC_RR_USB_IN_V_STS			0x92
+#define FG_ADC_RR_USB_IN_V_LSB			0x94
+#define FG_ADC_RR_USB_IN_V_MSB			0x95
+#define FG_ADC_RR_USB_IN_I_CTRL			0x98
+#define FG_ADC_RR_USB_IN_I_TRIGGER		0x99
+#define FG_ADC_RR_USB_IN_I_STS			0x9A
+#define FG_ADC_RR_USB_IN_I_LSB			0x9C
+#define FG_ADC_RR_USB_IN_I_MSB			0x9D
+
+#define FG_ADC_RR_DC_IN_V_CTRL			0xA0
+#define FG_ADC_RR_DC_IN_V_TRIGGER		0xA1
+#define FG_ADC_RR_DC_IN_V_STS			0xA2
+#define FG_ADC_RR_DC_IN_V_LSB			0xA4
+#define FG_ADC_RR_DC_IN_V_MSB			0xA5
+#define FG_ADC_RR_DC_IN_I_CTRL			0xA8
+#define FG_ADC_RR_DC_IN_I_TRIGGER		0xA9
+#define FG_ADC_RR_DC_IN_I_STS			0xAA
+#define FG_ADC_RR_DC_IN_I_LSB			0xAC
+#define FG_ADC_RR_DC_IN_I_MSB			0xAD
+
+#define FG_ADC_RR_PMI_DIE_TEMP_CTRL		0xB0
+#define FG_ADC_RR_PMI_DIE_TEMP_TRIGGER		0xB1
+#define FG_ADC_RR_PMI_DIE_TEMP_STS		0xB2
+#define FG_ADC_RR_PMI_DIE_TEMP_CFG		0xB3
+#define FG_ADC_RR_PMI_DIE_TEMP_LSB		0xB4
+#define FG_ADC_RR_PMI_DIE_TEMP_MSB		0xB5
+
+#define FG_ADC_RR_CHARGER_TEMP_CTRL		0xB8
+#define FG_ADC_RR_CHARGER_TEMP_TRIGGER		0xB9
+#define FG_ADC_RR_CHARGER_TEMP_STS		0xBA
+#define FG_ADC_RR_CHARGER_TEMP_CFG		0xBB
+#define FG_ADC_RR_CHARGER_TEMP_LSB		0xBC
+#define FG_ADC_RR_CHARGER_TEMP_MSB		0xBD
+#define FG_ADC_RR_CHARGER_HOT			0xBE
+#define FG_ADC_RR_CHARGER_TOO_HOT		0xBF
+
+#define FG_ADC_RR_GPIO_CTRL			0xC0
+#define FG_ADC_RR_GPIO_TRIGGER			0xC1
+#define FG_ADC_RR_GPIO_STS			0xC2
+#define FG_ADC_RR_GPIO_LSB			0xC4
+#define FG_ADC_RR_GPIO_MSB			0xC5
+
+#define FG_ADC_RR_ATEST_CTRL			0xC8
+#define FG_ADC_RR_ATEST_TRIGGER			0xC9
+#define FG_ADC_RR_ATEST_STS			0xCA
+#define FG_ADC_RR_ATEST_LSB			0xCC
+#define FG_ADC_RR_ATEST_MSB			0xCD
+#define FG_ADC_RR_SEC_ACCESS			0xD0
+
+#define FG_ADC_RR_PERPH_RESET_CTL2		0xD9
+#define FG_ADC_RR_PERPH_RESET_CTL3		0xDA
+#define FG_ADC_RR_PERPH_RESET_CTL4		0xDB
+#define FG_ADC_RR_INT_TEST1			0xE0
+#define FG_ADC_RR_INT_TEST_VAL			0xE1
+
+#define FG_ADC_RR_TM_TRIGGER_CTRLS		0xE2
+#define FG_ADC_RR_TM_ADC_CTRLS			0xE3
+#define FG_ADC_RR_TM_CNL_CTRL			0xE4
+#define FG_ADC_RR_TM_BATT_ID_CTRL		0xE5
+#define FG_ADC_RR_TM_THERM_CTRL			0xE6
+#define FG_ADC_RR_TM_CONV_STS			0xE7
+#define FG_ADC_RR_TM_ADC_READ_LSB		0xE8
+#define FG_ADC_RR_TM_ADC_READ_MSB		0xE9
+#define FG_ADC_RR_TM_ATEST_MUX_1		0xEA
+#define FG_ADC_RR_TM_ATEST_MUX_2		0xEB
+#define FG_ADC_RR_TM_REFERENCES			0xED
+#define FG_ADC_RR_TM_MISC_CTL			0xEE
+#define FG_ADC_RR_TM_RR_CTRL			0xEF
+
+#define FG_ADC_RR_BATT_ID_5_MA			5
+#define FG_ADC_RR_BATT_ID_15_MA			15
+#define FG_ADC_RR_BATT_ID_150_MA		150
+#define FG_ADC_RR_BATT_ID_RANGE			820
+
+#define FG_ADC_BITS				10
+#define FG_MAX_ADC_READINGS			(1 << FG_ADC_BITS)
+#define FG_ADC_RR_FS_VOLTAGE_MV			2500
+
+/* BATT_THERM 0.25K/LSB */
+#define FG_ADC_RR_BATT_THERM_LSB_K		4
+
+#define FG_ADC_RR_TEMP_FS_VOLTAGE_NUM		5000000
+#define FG_ADC_RR_TEMP_FS_VOLTAGE_DEN		3
+#define FG_ADC_RR_DIE_TEMP_OFFSET		601400
+#define FG_ADC_RR_DIE_TEMP_SLOPE		2
+#define FG_ADC_RR_DIE_TEMP_OFFSET_MILLI_DEGC	25000
+
+#define FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV		1303168
+#define FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C	3784
+#define FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV	1338433
+#define FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C	3655
+#define FG_ADC_RR_CHG_TEMP_660_GF_OFFSET_UV	1309001
+#define FG_RR_CHG_TEMP_660_GF_SLOPE_UV_PER_C	3403
+#define FG_ADC_RR_CHG_TEMP_660_SMIC_OFFSET_UV	1295898
+#define FG_RR_CHG_TEMP_660_SMIC_SLOPE_UV_PER_C	3596
+#define FG_ADC_RR_CHG_TEMP_660_MGNA_OFFSET_UV	1314779
+#define FG_RR_CHG_TEMP_660_MGNA_SLOPE_UV_PER_C	3496
+#define FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC	25000
+#define FG_ADC_RR_CHG_THRESHOLD_SCALE		4
+
+#define FG_ADC_RR_VOLT_INPUT_FACTOR		8
+#define FG_ADC_RR_CURR_INPUT_FACTOR		2000
+#define FG_ADC_RR_CURR_USBIN_INPUT_FACTOR_MIL	1886
+#define FG_ADC_RR_CURR_USBIN_660_FACTOR_MIL	9
+#define FG_ADC_RR_CURR_USBIN_660_UV_VAL	579500
+
+#define FG_ADC_SCALE_MILLI_FACTOR		1000
+#define FG_ADC_KELVINMIL_CELSIUSMIL		273150
+
+#define FG_ADC_RR_GPIO_FS_RANGE			5000
+#define FG_RR_ADC_COHERENT_CHECK_RETRY		5
+#define FG_RR_ADC_MAX_CONTINUOUS_BUFFER_LEN	16
+#define FG_RR_ADC_STS_CHANNEL_READING_MASK	0x3
+#define FG_RR_ADC_STS_CHANNEL_STS		0x2
+
+#define FG_RR_CONV_CONTINUOUS_TIME_MIN_MS	50
+#define FG_RR_CONV_MAX_RETRY_CNT		50
+#define FG_RR_TP_REV_VERSION1		21
+#define FG_RR_TP_REV_VERSION2		29
+#define FG_RR_TP_REV_VERSION3		32
+
+/*
+ * The channel number is not a physical index in hardware,
+ * rather it's a list of supported channels and an index to
+ * select the respective channel properties such as scaling
+ * the result. Add any new additional channels supported by
+ * the RR ADC before RR_ADC_MAX.
+ */
+enum rradc_channel_id {
+	RR_ADC_BATT_ID = 0,
+	RR_ADC_BATT_THERM,
+	RR_ADC_SKIN_TEMP,
+	RR_ADC_USBIN_I,
+	RR_ADC_USBIN_V,
+	RR_ADC_DCIN_I,
+	RR_ADC_DCIN_V,
+	RR_ADC_DIE_TEMP,
+	RR_ADC_CHG_TEMP,
+	RR_ADC_GPIO,
+	RR_ADC_CHG_HOT_TEMP,
+	RR_ADC_CHG_TOO_HOT_TEMP,
+	RR_ADC_SKIN_HOT_TEMP,
+	RR_ADC_SKIN_TOO_HOT_TEMP,
+	RR_ADC_MAX
+};
+
+struct rradc_chip {
+	struct device			*dev;
+	struct mutex			lock;
+	struct regmap			*regmap;
+	u16				base;
+	struct iio_chan_spec		*iio_chans;
+	unsigned int			nchannels;
+	struct rradc_chan_prop		*chan_props;
+	struct device_node		*revid_dev_node;
+	struct pmic_revid_data		*pmic_fab_id;
+	int volt;
+	struct power_supply		*usb_trig;
+};
+
+struct rradc_channels {
+	const char			*datasheet_name;
+	enum iio_chan_type		type;
+	long				info_mask;
+	u8				lsb;
+	u8				msb;
+	u8				sts;
+	int (*scale)(struct rradc_chip *chip, struct rradc_chan_prop *prop,
+					u16 adc_code, int *result);
+};
+
+struct rradc_chan_prop {
+	enum rradc_channel_id		channel;
+	uint32_t			channel_data;
+	int (*scale)(struct rradc_chip *chip, struct rradc_chan_prop *prop,
+					u16 adc_code, int *result);
+};
+
+static int rradc_masked_write(struct rradc_chip *rr_adc, u16 offset, u8 mask,
+						u8 val)
+{
+	int rc;
+
+	rc = regmap_update_bits(rr_adc->regmap, rr_adc->base + offset,
+								mask, val);
+	if (rc) {
+		pr_err("spmi write failed: addr=%03X, rc=%d\n", offset, rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int rradc_read(struct rradc_chip *rr_adc, u16 offset, u8 *data, int len)
+{
+	int rc = 0, retry_cnt = 0, i = 0;
+	u8 data_check[FG_RR_ADC_MAX_CONTINUOUS_BUFFER_LEN];
+	bool coherent_err = false;
+
+	if (len > FG_RR_ADC_MAX_CONTINUOUS_BUFFER_LEN) {
+		pr_err("Increase the buffer length\n");
+		return -EINVAL;
+	}
+
+	while (retry_cnt < FG_RR_ADC_COHERENT_CHECK_RETRY) {
+		rc = regmap_bulk_read(rr_adc->regmap, rr_adc->base + offset,
+							data, len);
+		if (rc < 0) {
+			pr_err("rr_adc reg 0x%x failed :%d\n", offset, rc);
+			return rc;
+		}
+
+		rc = regmap_bulk_read(rr_adc->regmap, rr_adc->base + offset,
+							data_check, len);
+		if (rc < 0) {
+			pr_err("rr_adc reg 0x%x failed :%d\n", offset, rc);
+			return rc;
+		}
+
+		for (i = 0; i < len; i++) {
+			if (data[i] != data_check[i])
+				coherent_err = true;
+		}
+
+		if (coherent_err) {
+			retry_cnt++;
+			coherent_err = false;
+			pr_debug("retry_cnt:%d\n", retry_cnt);
+		} else {
+			break;
+		}
+	}
+
+	if (retry_cnt == FG_RR_ADC_COHERENT_CHECK_RETRY)
+		pr_err("Retry exceeded for coherrency check\n");
+
+	return rc;
+}
+
+static int rradc_post_process_batt_id(struct rradc_chip *chip,
+			struct rradc_chan_prop *prop, u16 adc_code,
+			int *result_ohms)
+{
+	uint32_t current_value;
+	int64_t r_id;
+
+	current_value = prop->channel_data;
+	r_id = ((int64_t)adc_code * FG_ADC_RR_FS_VOLTAGE_MV);
+	r_id = div64_s64(r_id, (FG_MAX_ADC_READINGS * current_value));
+	*result_ohms = (r_id * FG_ADC_SCALE_MILLI_FACTOR);
+
+	return 0;
+}
+
+static int rradc_post_process_therm(struct rradc_chip *chip,
+			struct rradc_chan_prop *prop, u16 adc_code,
+			int *result_millidegc)
+{
+	int64_t temp;
+
+	/* K = code/4 */
+	temp = ((int64_t)adc_code * FG_ADC_SCALE_MILLI_FACTOR);
+	temp = div64_s64(temp, FG_ADC_RR_BATT_THERM_LSB_K);
+	*result_millidegc = temp - FG_ADC_KELVINMIL_CELSIUSMIL;
+
+	return 0;
+}
+
+static int rradc_post_process_volt(struct rradc_chip *chip,
+			struct rradc_chan_prop *prop, u16 adc_code,
+			int *result_uv)
+{
+	int64_t uv = 0;
+
+	/* 8x input attenuation; 2.5V ADC full scale */
+	uv = ((int64_t)adc_code * FG_ADC_RR_VOLT_INPUT_FACTOR);
+	uv *= (FG_ADC_RR_FS_VOLTAGE_MV * FG_ADC_SCALE_MILLI_FACTOR);
+	uv = div64_s64(uv, FG_MAX_ADC_READINGS);
+	*result_uv = uv;
+
+	return 0;
+}
+
+static int rradc_post_process_usbin_curr(struct rradc_chip *chip,
+			struct rradc_chan_prop *prop, u16 adc_code,
+			int *result_ua)
+{
+	int64_t ua = 0, scale = 0;
+
+	if (!prop)
+		return -EINVAL;
+	if (chip->revid_dev_node) {
+		switch (chip->pmic_fab_id->pmic_subtype) {
+		case PM660_SUBTYPE:
+			if (((chip->pmic_fab_id->tp_rev
+				>= FG_RR_TP_REV_VERSION1)
+			&& (chip->pmic_fab_id->tp_rev
+				<= FG_RR_TP_REV_VERSION2))
+			|| (chip->pmic_fab_id->tp_rev
+				>= FG_RR_TP_REV_VERSION3)) {
+				chip->volt = div64_s64(chip->volt, 1000);
+				chip->volt = chip->volt *
+					FG_ADC_RR_CURR_USBIN_660_FACTOR_MIL;
+				chip->volt = FG_ADC_RR_CURR_USBIN_660_UV_VAL -
+					(chip->volt);
+				chip->volt = div64_s64(1000000000, chip->volt);
+				scale = chip->volt;
+			} else
+				scale = FG_ADC_RR_CURR_USBIN_INPUT_FACTOR_MIL;
+			break;
+		case PMI8998_SUBTYPE:
+			scale = FG_ADC_RR_CURR_USBIN_INPUT_FACTOR_MIL;
+			break;
+		default:
+			pr_err("No PMIC subtype found\n");
+			return -EINVAL;
+		}
+	}
+
+	/* scale * V/A; 2.5V ADC full scale */
+	ua = ((int64_t)adc_code * scale);
+	ua *= (FG_ADC_RR_FS_VOLTAGE_MV * FG_ADC_SCALE_MILLI_FACTOR);
+	ua = div64_s64(ua, (FG_MAX_ADC_READINGS * 1000));
+	*result_ua = ua;
+
+	return 0;
+}
+
+static int rradc_post_process_dcin_curr(struct rradc_chip *chip,
+			struct rradc_chan_prop *prop, u16 adc_code,
+			int *result_ua)
+{
+	int64_t ua = 0;
+
+	if (!prop)
+		return -EINVAL;
+
+	/* 0.5 V/A; 2.5V ADC full scale */
+	ua = ((int64_t)adc_code * FG_ADC_RR_CURR_INPUT_FACTOR);
+	ua *= (FG_ADC_RR_FS_VOLTAGE_MV * FG_ADC_SCALE_MILLI_FACTOR);
+	ua = div64_s64(ua, (FG_MAX_ADC_READINGS * 1000));
+	*result_ua = ua;
+
+	return 0;
+}
+
+static int rradc_post_process_die_temp(struct rradc_chip *chip,
+			struct rradc_chan_prop *prop, u16 adc_code,
+			int *result_millidegc)
+{
+	int64_t temp = 0;
+
+	temp = ((int64_t)adc_code * FG_ADC_RR_TEMP_FS_VOLTAGE_NUM);
+	temp = div64_s64(temp, (FG_ADC_RR_TEMP_FS_VOLTAGE_DEN *
+					FG_MAX_ADC_READINGS));
+	temp -= FG_ADC_RR_DIE_TEMP_OFFSET;
+	temp = div64_s64(temp, FG_ADC_RR_DIE_TEMP_SLOPE);
+	temp += FG_ADC_RR_DIE_TEMP_OFFSET_MILLI_DEGC;
+	*result_millidegc = temp;
+
+	return 0;
+}
+
+static int rradc_get_660_fab_coeff(struct rradc_chip *chip,
+		int64_t *offset, int64_t *slope)
+{
+	switch (chip->pmic_fab_id->fab_id) {
+	case PM660_FAB_ID_GF:
+		*offset = FG_ADC_RR_CHG_TEMP_660_GF_OFFSET_UV;
+		*slope = FG_RR_CHG_TEMP_660_GF_SLOPE_UV_PER_C;
+		break;
+	case PM660_FAB_ID_TSMC:
+		*offset = FG_ADC_RR_CHG_TEMP_660_SMIC_OFFSET_UV;
+		*slope = FG_RR_CHG_TEMP_660_SMIC_SLOPE_UV_PER_C;
+		break;
+	default:
+		*offset = FG_ADC_RR_CHG_TEMP_660_MGNA_OFFSET_UV;
+		*slope = FG_RR_CHG_TEMP_660_MGNA_SLOPE_UV_PER_C;
+	}
+
+	return 0;
+}
+
+static int rradc_get_8998_fab_coeff(struct rradc_chip *chip,
+		int64_t *offset, int64_t *slope)
+{
+	switch (chip->pmic_fab_id->fab_id) {
+	case PMI8998_FAB_ID_GF:
+		*offset = FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV;
+		*slope = FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C;
+		break;
+	case PMI8998_FAB_ID_SMIC:
+		*offset = FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV;
+		*slope = FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int rradc_post_process_chg_temp_hot(struct rradc_chip *chip,
+			struct rradc_chan_prop *prop, u16 adc_code,
+			int *result_millidegc)
+{
+	int64_t uv = 0, offset = 0, slope = 0;
+	int rc = 0;
+
+	if (chip->revid_dev_node) {
+		switch (chip->pmic_fab_id->pmic_subtype) {
+		case PM660_SUBTYPE:
+			rc = rradc_get_660_fab_coeff(chip, &offset, &slope);
+			if (rc < 0) {
+				pr_err("Unable to get fab id coefficients\n");
+				return -EINVAL;
+			}
+			break;
+		case PMI8998_SUBTYPE:
+			rc = rradc_get_8998_fab_coeff(chip, &offset, &slope);
+			if (rc < 0) {
+				pr_err("Unable to get fab id coefficients\n");
+				return -EINVAL;
+			}
+			break;
+		default:
+			pr_err("No PMIC subtype found\n");
+			return -EINVAL;
+		}
+	} else {
+		pr_err("No temperature scaling coefficients\n");
+		return -EINVAL;
+	}
+
+	uv = (int64_t) adc_code * FG_ADC_RR_CHG_THRESHOLD_SCALE;
+	uv = uv * FG_ADC_RR_TEMP_FS_VOLTAGE_NUM;
+	uv = div64_s64(uv, (FG_ADC_RR_TEMP_FS_VOLTAGE_DEN *
+					FG_MAX_ADC_READINGS));
+	uv = offset - uv;
+	uv = div64_s64((uv * FG_ADC_SCALE_MILLI_FACTOR), slope);
+	uv = uv + FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC;
+	*result_millidegc = uv;
+
+	return 0;
+}
+
+static int rradc_post_process_skin_temp_hot(struct rradc_chip *chip,
+			struct rradc_chan_prop *prop, u16 adc_code,
+			int *result_millidegc)
+{
+	int64_t temp = 0;
+
+	temp = (int64_t) adc_code;
+	temp = div64_s64(temp, 2);
+	temp = temp - 30;
+	temp *= FG_ADC_SCALE_MILLI_FACTOR;
+	*result_millidegc = temp;
+
+	return 0;
+}
+
+static int rradc_post_process_chg_temp(struct rradc_chip *chip,
+			struct rradc_chan_prop *prop, u16 adc_code,
+			int *result_millidegc)
+{
+	int64_t uv = 0, offset = 0, slope = 0;
+	int rc = 0;
+
+	if (chip->revid_dev_node) {
+		switch (chip->pmic_fab_id->pmic_subtype) {
+		case PM660_SUBTYPE:
+			rc = rradc_get_660_fab_coeff(chip, &offset, &slope);
+			if (rc < 0) {
+				pr_err("Unable to get fab id coefficients\n");
+				return -EINVAL;
+			}
+			break;
+		case PMI8998_SUBTYPE:
+			rc = rradc_get_8998_fab_coeff(chip, &offset, &slope);
+			if (rc < 0) {
+				pr_err("Unable to get fab id coefficients\n");
+				return -EINVAL;
+			}
+			break;
+		default:
+			pr_err("No PMIC subtype found\n");
+			return -EINVAL;
+		}
+	} else {
+		pr_err("No temperature scaling coefficients\n");
+		return -EINVAL;
+	}
+
+	uv = ((int64_t) adc_code * FG_ADC_RR_TEMP_FS_VOLTAGE_NUM);
+	uv = div64_s64(uv, (FG_ADC_RR_TEMP_FS_VOLTAGE_DEN *
+					FG_MAX_ADC_READINGS));
+	uv = offset - uv;
+	uv = div64_s64((uv * FG_ADC_SCALE_MILLI_FACTOR), slope);
+	uv += FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC;
+	*result_millidegc = uv;
+
+	return 0;
+}
+
+static int rradc_post_process_gpio(struct rradc_chip *chip,
+			struct rradc_chan_prop *prop, u16 adc_code,
+			int *result_mv)
+{
+	int64_t mv = 0;
+
+	/* 5V ADC full scale, 10 bit */
+	mv = ((int64_t)adc_code * FG_ADC_RR_GPIO_FS_RANGE);
+	mv = div64_s64(mv, FG_MAX_ADC_READINGS);
+	*result_mv = mv;
+
+	return 0;
+}
+
+#define RR_ADC_CHAN(_dname, _type, _mask, _scale, _lsb, _msb, _sts)	\
+	{								\
+		.datasheet_name = (_dname),				\
+		.type = _type,						\
+		.info_mask = _mask,					\
+		.scale = _scale,					\
+		.lsb = _lsb,						\
+		.msb = _msb,						\
+		.sts = _sts,						\
+	},								\
+
+#define RR_ADC_CHAN_TEMP(_dname, _scale, mask, _lsb, _msb, _sts)	\
+	RR_ADC_CHAN(_dname, IIO_TEMP,					\
+		mask,							\
+		_scale, _lsb, _msb, _sts)				\
+
+#define RR_ADC_CHAN_VOLT(_dname, _scale, _lsb, _msb, _sts)		\
+	RR_ADC_CHAN(_dname, IIO_VOLTAGE,				\
+		  BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),\
+		  _scale, _lsb, _msb, _sts)				\
+
+#define RR_ADC_CHAN_CURRENT(_dname, _scale, _lsb, _msb, _sts)		\
+	RR_ADC_CHAN(_dname, IIO_CURRENT,				\
+		  BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),\
+		  _scale, _lsb, _msb, _sts)				\
+
+#define RR_ADC_CHAN_RESISTANCE(_dname, _scale, _lsb, _msb, _sts)	\
+	RR_ADC_CHAN(_dname, IIO_RESISTANCE,				\
+		  BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),\
+		  _scale, _lsb, _msb, _sts)				\
+
+static const struct rradc_channels rradc_chans[] = {
+	RR_ADC_CHAN_RESISTANCE("batt_id", rradc_post_process_batt_id,
+			FG_ADC_RR_BATT_ID_5_LSB, FG_ADC_RR_BATT_ID_5_MSB,
+			FG_ADC_RR_BATT_ID_STS)
+	RR_ADC_CHAN_TEMP("batt_therm", &rradc_post_process_therm,
+			BIT(IIO_CHAN_INFO_RAW),
+			FG_ADC_RR_BATT_THERM_LSB, FG_ADC_RR_BATT_THERM_MSB,
+			FG_ADC_RR_BATT_THERM_STS)
+	RR_ADC_CHAN_TEMP("skin_temp", &rradc_post_process_therm,
+			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),
+			FG_ADC_RR_SKIN_TEMP_LSB, FG_ADC_RR_SKIN_TEMP_MSB,
+			FG_ADC_RR_AUX_THERM_STS)
+	RR_ADC_CHAN_CURRENT("usbin_i", &rradc_post_process_usbin_curr,
+			FG_ADC_RR_USB_IN_I_LSB, FG_ADC_RR_USB_IN_I_MSB,
+			FG_ADC_RR_USB_IN_I_STS)
+	RR_ADC_CHAN_VOLT("usbin_v", &rradc_post_process_volt,
+			FG_ADC_RR_USB_IN_V_LSB, FG_ADC_RR_USB_IN_V_MSB,
+			FG_ADC_RR_USB_IN_V_STS)
+	RR_ADC_CHAN_CURRENT("dcin_i", &rradc_post_process_dcin_curr,
+			FG_ADC_RR_DC_IN_I_LSB, FG_ADC_RR_DC_IN_I_MSB,
+			FG_ADC_RR_DC_IN_I_STS)
+	RR_ADC_CHAN_VOLT("dcin_v", &rradc_post_process_volt,
+			FG_ADC_RR_DC_IN_V_LSB, FG_ADC_RR_DC_IN_V_MSB,
+			FG_ADC_RR_DC_IN_V_STS)
+	RR_ADC_CHAN_TEMP("die_temp", &rradc_post_process_die_temp,
+			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),
+			FG_ADC_RR_PMI_DIE_TEMP_LSB, FG_ADC_RR_PMI_DIE_TEMP_MSB,
+			FG_ADC_RR_PMI_DIE_TEMP_STS)
+	RR_ADC_CHAN_TEMP("chg_temp", &rradc_post_process_chg_temp,
+			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),
+			FG_ADC_RR_CHARGER_TEMP_LSB, FG_ADC_RR_CHARGER_TEMP_MSB,
+			FG_ADC_RR_CHARGER_TEMP_STS)
+	RR_ADC_CHAN_VOLT("gpio", &rradc_post_process_gpio,
+			FG_ADC_RR_GPIO_LSB, FG_ADC_RR_GPIO_MSB,
+			FG_ADC_RR_GPIO_STS)
+	RR_ADC_CHAN_TEMP("chg_temp_hot", &rradc_post_process_chg_temp_hot,
+			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),
+			FG_ADC_RR_CHARGER_HOT, FG_ADC_RR_CHARGER_HOT,
+			FG_ADC_RR_CHARGER_TEMP_STS)
+	RR_ADC_CHAN_TEMP("chg_temp_too_hot", &rradc_post_process_chg_temp_hot,
+			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),
+			FG_ADC_RR_CHARGER_TOO_HOT, FG_ADC_RR_CHARGER_TOO_HOT,
+			FG_ADC_RR_CHARGER_TEMP_STS)
+	RR_ADC_CHAN_TEMP("skin_temp_hot", &rradc_post_process_skin_temp_hot,
+			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),
+			FG_ADC_RR_SKIN_HOT, FG_ADC_RR_SKIN_HOT,
+			FG_ADC_RR_AUX_THERM_STS)
+	RR_ADC_CHAN_TEMP("skin_temp_too_hot", &rradc_post_process_skin_temp_hot,
+			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),
+			FG_ADC_RR_SKIN_TOO_HOT, FG_ADC_RR_SKIN_TOO_HOT,
+			FG_ADC_RR_AUX_THERM_STS)
+};
+
+static int rradc_enable_continuous_mode(struct rradc_chip *chip)
+{
+	int rc = 0;
+
+	/* Clear channel log */
+	rc = rradc_masked_write(chip, FG_ADC_RR_ADC_LOG,
+			FG_ADC_RR_ADC_LOG_CLR_CTRL,
+			FG_ADC_RR_ADC_LOG_CLR_CTRL);
+	if (rc < 0) {
+		pr_err("log ctrl update to clear failed:%d\n", rc);
+		return rc;
+	}
+
+	rc = rradc_masked_write(chip, FG_ADC_RR_ADC_LOG,
+		FG_ADC_RR_ADC_LOG_CLR_CTRL, 0);
+	if (rc < 0) {
+		pr_err("log ctrl update to not clear failed:%d\n", rc);
+		return rc;
+	}
+
+	/* Switch to continuous mode */
+	rc = rradc_masked_write(chip, FG_ADC_RR_RR_ADC_CTL,
+		FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL_MASK,
+		FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL);
+	if (rc < 0) {
+		pr_err("Update to continuous mode failed:%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int rradc_disable_continuous_mode(struct rradc_chip *chip)
+{
+	int rc = 0;
+
+	/* Switch to non continuous mode */
+	rc = rradc_masked_write(chip, FG_ADC_RR_RR_ADC_CTL,
+			FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL_MASK, 0);
+	if (rc < 0) {
+		pr_err("Update to non-continuous mode failed:%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static bool rradc_is_usb_present(struct rradc_chip *chip)
+{
+	union power_supply_propval pval;
+	int rc;
+	bool usb_present = false;
+
+	if (!chip->usb_trig) {
+		pr_debug("USB property not present\n");
+		return usb_present;
+	}
+
+	rc = power_supply_get_property(chip->usb_trig,
+			POWER_SUPPLY_PROP_PRESENT, &pval);
+	usb_present = (rc < 0) ? 0 : pval.intval;
+
+	return usb_present;
+}
+
+static int rradc_check_status_ready_with_retry(struct rradc_chip *chip,
+		struct rradc_chan_prop *prop, u8 *buf, u16 status)
+{
+	int rc = 0, retry_cnt = 0, mask = 0;
+
+	switch (prop->channel) {
+	case RR_ADC_BATT_ID:
+		/* BATT_ID STS bit does not get set initially */
+		mask = FG_RR_ADC_STS_CHANNEL_STS;
+		break;
+	default:
+		mask = FG_RR_ADC_STS_CHANNEL_READING_MASK;
+		break;
+	}
+
+	while (((buf[0] & mask) != mask) &&
+			(retry_cnt < FG_RR_CONV_MAX_RETRY_CNT)) {
+		pr_debug("%s is not ready; nothing to read:0x%x\n",
+			rradc_chans[prop->channel].datasheet_name, buf[0]);
+
+		if (((prop->channel == RR_ADC_CHG_TEMP) ||
+			(prop->channel == RR_ADC_SKIN_TEMP) ||
+			(prop->channel == RR_ADC_USBIN_I) ||
+			(prop->channel == RR_ADC_DIE_TEMP)) &&
+					((!rradc_is_usb_present(chip)))) {
+			pr_debug("USB not present for %d\n", prop->channel);
+			rc = -ENODATA;
+			break;
+		}
+
+		msleep(FG_RR_CONV_CONTINUOUS_TIME_MIN_MS);
+		retry_cnt++;
+		rc = rradc_read(chip, status, buf, 1);
+		if (rc < 0) {
+			pr_err("status read failed:%d\n", rc);
+			return rc;
+		}
+	}
+
+	if (retry_cnt >= FG_RR_CONV_MAX_RETRY_CNT)
+		rc = -ENODATA;
+
+	return rc;
+}
+
+static int rradc_read_channel_with_continuous_mode(struct rradc_chip *chip,
+			struct rradc_chan_prop *prop, u8 *buf)
+{
+	int rc = 0, ret = 0;
+	u16 status = 0;
+
+	rc = rradc_enable_continuous_mode(chip);
+	if (rc < 0) {
+		pr_err("Failed to switch to continuous mode\n");
+		return rc;
+	}
+
+	status = rradc_chans[prop->channel].sts;
+	rc = rradc_read(chip, status, buf, 1);
+	if (rc < 0) {
+		pr_err("status read failed:%d\n", rc);
+		ret = rc;
+		goto disable;
+	}
+
+	rc = rradc_check_status_ready_with_retry(chip, prop,
+						buf, status);
+	if (rc < 0) {
+		pr_err("Status read failed:%d\n", rc);
+		ret = rc;
+	}
+
+disable:
+	rc = rradc_disable_continuous_mode(chip);
+	if (rc < 0) {
+		pr_err("Failed to switch to non continuous mode\n");
+		ret = rc;
+	}
+
+	return ret;
+}
+
+static int rradc_enable_batt_id_channel(struct rradc_chip *chip, bool enable)
+{
+	int rc = 0;
+
+	if (enable) {
+		rc = rradc_masked_write(chip, FG_ADC_RR_BATT_ID_CTRL,
+				FG_ADC_RR_BATT_ID_CTRL_CHANNEL_CONV,
+				FG_ADC_RR_BATT_ID_CTRL_CHANNEL_CONV);
+		if (rc < 0) {
+			pr_err("Enabling BATT ID channel failed:%d\n", rc);
+			return rc;
+		}
+	} else {
+		rc = rradc_masked_write(chip, FG_ADC_RR_BATT_ID_CTRL,
+				FG_ADC_RR_BATT_ID_CTRL_CHANNEL_CONV, 0);
+		if (rc < 0) {
+			pr_err("Disabling BATT ID channel failed:%d\n", rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int rradc_do_batt_id_conversion(struct rradc_chip *chip,
+		struct rradc_chan_prop *prop, u16 *data, u8 *buf)
+{
+	int rc = 0, ret = 0;
+
+	rc = rradc_enable_batt_id_channel(chip, true);
+	if (rc < 0) {
+		pr_err("Enabling BATT ID channel failed:%d\n", rc);
+		return rc;
+	}
+
+	rc = rradc_masked_write(chip, FG_ADC_RR_BATT_ID_TRIGGER,
+				FG_ADC_RR_BATT_ID_TRIGGER_CTL,
+				FG_ADC_RR_BATT_ID_TRIGGER_CTL);
+	if (rc < 0) {
+		pr_err("BATT_ID trigger set failed:%d\n", rc);
+		ret = rc;
+		rc = rradc_enable_batt_id_channel(chip, false);
+		if (rc < 0)
+			pr_err("Disabling BATT ID channel failed:%d\n", rc);
+		return ret;
+	}
+
+	rc = rradc_read_channel_with_continuous_mode(chip, prop, buf);
+	if (rc < 0) {
+		pr_err("Error reading in continuous mode:%d\n", rc);
+		ret = rc;
+	}
+
+	rc = rradc_masked_write(chip, FG_ADC_RR_BATT_ID_TRIGGER,
+			FG_ADC_RR_BATT_ID_TRIGGER_CTL, 0);
+	if (rc < 0) {
+		pr_err("BATT_ID trigger re-set failed:%d\n", rc);
+		ret = rc;
+	}
+
+	rc = rradc_enable_batt_id_channel(chip, false);
+	if (rc < 0) {
+		pr_err("Disabling BATT ID channel failed:%d\n", rc);
+		ret = rc;
+	}
+
+	return ret;
+}
+
+static int rradc_do_conversion(struct rradc_chip *chip,
+			struct rradc_chan_prop *prop, u16 *data)
+{
+	int rc = 0, bytes_to_read = 0;
+	u8 buf[6];
+	u16 offset = 0, batt_id_5 = 0, batt_id_15 = 0, batt_id_150 = 0;
+	u16 status = 0;
+
+	mutex_lock(&chip->lock);
+
+	switch (prop->channel) {
+	case RR_ADC_BATT_ID:
+		rc = rradc_do_batt_id_conversion(chip, prop, data, buf);
+		if (rc < 0) {
+			pr_err("Battery ID conversion failed:%d\n", rc);
+			goto fail;
+		}
+		break;
+	case RR_ADC_USBIN_V:
+		/* Force conversion every cycle */
+		rc = rradc_masked_write(chip, FG_ADC_RR_USB_IN_V_TRIGGER,
+				FG_ADC_RR_USB_IN_V_EVERY_CYCLE_MASK,
+				FG_ADC_RR_USB_IN_V_EVERY_CYCLE);
+		if (rc < 0) {
+			pr_err("Force every cycle update failed:%d\n", rc);
+			goto fail;
+		}
+
+		rc = rradc_read_channel_with_continuous_mode(chip, prop, buf);
+		if (rc < 0) {
+			pr_err("Error reading in continuous mode:%d\n", rc);
+			goto fail;
+		}
+
+		/* Restore usb_in trigger */
+		rc = rradc_masked_write(chip, FG_ADC_RR_USB_IN_V_TRIGGER,
+				FG_ADC_RR_USB_IN_V_EVERY_CYCLE_MASK, 0);
+		if (rc < 0) {
+			pr_err("Restore every cycle update failed:%d\n", rc);
+			goto fail;
+		}
+		break;
+	case RR_ADC_CHG_HOT_TEMP:
+	case RR_ADC_CHG_TOO_HOT_TEMP:
+	case RR_ADC_SKIN_HOT_TEMP:
+	case RR_ADC_SKIN_TOO_HOT_TEMP:
+		pr_debug("Read only the data registers\n");
+		break;
+	default:
+		status = rradc_chans[prop->channel].sts;
+		rc = rradc_read(chip, status, buf, 1);
+		if (rc < 0) {
+			pr_err("status read failed:%d\n", rc);
+			goto fail;
+		}
+
+		rc = rradc_check_status_ready_with_retry(chip, prop,
+						buf, status);
+		if (rc < 0) {
+			pr_debug("Status read failed:%d\n", rc);
+			rc = -ENODATA;
+			goto fail;
+		}
+		break;
+	}
+
+	offset = rradc_chans[prop->channel].lsb;
+	if (prop->channel == RR_ADC_BATT_ID)
+		bytes_to_read = 6;
+	else if ((prop->channel == RR_ADC_CHG_HOT_TEMP) ||
+		(prop->channel == RR_ADC_CHG_TOO_HOT_TEMP) ||
+		(prop->channel == RR_ADC_SKIN_HOT_TEMP) ||
+		(prop->channel == RR_ADC_SKIN_TOO_HOT_TEMP))
+		bytes_to_read = 1;
+	else
+		bytes_to_read = 2;
+
+	buf[0] = 0;
+	rc = rradc_read(chip, offset, buf, bytes_to_read);
+	if (rc) {
+		pr_err("read data failed\n");
+		goto fail;
+	}
+
+	if (prop->channel == RR_ADC_BATT_ID) {
+		batt_id_150 = (buf[5] << 8) | buf[4];
+		batt_id_15 = (buf[3] << 8) | buf[2];
+		batt_id_5 = (buf[1] << 8) | buf[0];
+		if ((!batt_id_150) && (!batt_id_15) && (!batt_id_5)) {
+			pr_err("Invalid batt_id values with all zeros\n");
+			rc = -EINVAL;
+			goto fail;
+		}
+
+		if (batt_id_150 <= FG_ADC_RR_BATT_ID_RANGE) {
+			pr_debug("Batt_id_150 is chosen\n");
+			*data = batt_id_150;
+			prop->channel_data = FG_ADC_RR_BATT_ID_150_MA;
+		} else if (batt_id_15 <= FG_ADC_RR_BATT_ID_RANGE) {
+			pr_debug("Batt_id_15 is chosen\n");
+			*data = batt_id_15;
+			prop->channel_data = FG_ADC_RR_BATT_ID_15_MA;
+		} else {
+			pr_debug("Batt_id_5 is chosen\n");
+			*data = batt_id_5;
+			prop->channel_data = FG_ADC_RR_BATT_ID_5_MA;
+		}
+	} else if ((prop->channel == RR_ADC_CHG_HOT_TEMP) ||
+		(prop->channel == RR_ADC_CHG_TOO_HOT_TEMP) ||
+		(prop->channel == RR_ADC_SKIN_HOT_TEMP) ||
+		(prop->channel == RR_ADC_SKIN_TOO_HOT_TEMP)) {
+		*data = buf[0];
+	} else {
+		*data = (buf[1] << 8) | buf[0];
+	}
+fail:
+	mutex_unlock(&chip->lock);
+
+	return rc;
+}
+
+static int rradc_read_raw(struct iio_dev *indio_dev,
+			 struct iio_chan_spec const *chan, int *val, int *val2,
+			 long mask)
+{
+	struct rradc_chip *chip = iio_priv(indio_dev);
+	struct rradc_chan_prop *prop;
+	u16 adc_code;
+	int rc = 0;
+
+	if (chan->address >= RR_ADC_MAX) {
+		pr_err("Invalid channel index:%ld\n", chan->address);
+		return -EINVAL;
+	}
+
+	switch (mask) {
+	case IIO_CHAN_INFO_PROCESSED:
+		if (((chip->pmic_fab_id->tp_rev
+				>= FG_RR_TP_REV_VERSION1)
+		&& (chip->pmic_fab_id->tp_rev
+				<= FG_RR_TP_REV_VERSION2))
+		|| (chip->pmic_fab_id->tp_rev
+				>= FG_RR_TP_REV_VERSION3)) {
+			if (chan->address == RR_ADC_USBIN_I) {
+				prop = &chip->chan_props[RR_ADC_USBIN_V];
+				rc = rradc_do_conversion(chip, prop, &adc_code);
+				if (rc)
+					break;
+				prop->scale(chip, prop, adc_code, &chip->volt);
+			}
+		}
+
+		prop = &chip->chan_props[chan->address];
+		rc = rradc_do_conversion(chip, prop, &adc_code);
+		if (rc)
+			break;
+
+		prop->scale(chip, prop, adc_code, val);
+
+		return IIO_VAL_INT;
+	case IIO_CHAN_INFO_RAW:
+		prop = &chip->chan_props[chan->address];
+		rc = rradc_do_conversion(chip, prop, &adc_code);
+		if (rc)
+			break;
+
+		*val = (int) adc_code;
+
+		return IIO_VAL_INT;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static const struct iio_info rradc_info = {
+	.read_raw	= &rradc_read_raw,
+	.driver_module	= THIS_MODULE,
+};
+
+static int rradc_get_dt_data(struct rradc_chip *chip, struct device_node *node)
+{
+	const struct rradc_channels *rradc_chan;
+	struct iio_chan_spec *iio_chan;
+	unsigned int i = 0, base;
+	int rc = 0;
+	struct rradc_chan_prop prop;
+
+	chip->nchannels = RR_ADC_MAX;
+	chip->iio_chans = devm_kcalloc(chip->dev, chip->nchannels,
+				       sizeof(*chip->iio_chans), GFP_KERNEL);
+	if (!chip->iio_chans)
+		return -ENOMEM;
+
+	chip->chan_props = devm_kcalloc(chip->dev, chip->nchannels,
+				       sizeof(*chip->chan_props), GFP_KERNEL);
+	if (!chip->chan_props)
+		return -ENOMEM;
+
+	/* Get the peripheral address */
+	rc = of_property_read_u32(node, "reg", &base);
+	if (rc < 0) {
+		dev_err(chip->dev,
+			"Couldn't find reg in node = %s rc = %d\n",
+			node->name, rc);
+		return rc;
+	}
+
+	chip->base = base;
+	chip->revid_dev_node = of_parse_phandle(node, "qcom,pmic-revid", 0);
+	if (chip->revid_dev_node) {
+		chip->pmic_fab_id = get_revid_data(chip->revid_dev_node);
+		if (IS_ERR(chip->pmic_fab_id)) {
+			rc = PTR_ERR(chip->pmic_fab_id);
+			if (rc != -EPROBE_DEFER)
+				pr_err("Unable to get pmic_revid rc=%d\n", rc);
+			return rc;
+		}
+
+		if (chip->pmic_fab_id->fab_id == -EINVAL) {
+			rc = chip->pmic_fab_id->fab_id;
+			pr_debug("Unable to read fabid rc=%d\n", rc);
+		}
+	}
+
+	iio_chan = chip->iio_chans;
+
+	for (i = 0; i < RR_ADC_MAX; i++) {
+		prop.channel = i;
+		prop.scale = rradc_chans[i].scale;
+		/* Private channel data used for selecting batt_id */
+		prop.channel_data = 0;
+		chip->chan_props[i] = prop;
+
+		rradc_chan = &rradc_chans[i];
+
+		iio_chan->channel = prop.channel;
+		iio_chan->datasheet_name = rradc_chan->datasheet_name;
+		iio_chan->extend_name = rradc_chan->datasheet_name;
+		iio_chan->info_mask_separate = rradc_chan->info_mask;
+		iio_chan->type = rradc_chan->type;
+		iio_chan->address = i;
+		iio_chan++;
+	}
+
+	return 0;
+}
+
+static int rradc_probe(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct device *dev = &pdev->dev;
+	struct iio_dev *indio_dev;
+	struct rradc_chip *chip;
+	int rc = 0;
+
+	indio_dev = devm_iio_device_alloc(dev, sizeof(*chip));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	chip = iio_priv(indio_dev);
+	chip->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!chip->regmap) {
+		dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+		return -EINVAL;
+	}
+
+	chip->dev = dev;
+	mutex_init(&chip->lock);
+
+	rc = rradc_get_dt_data(chip, node);
+	if (rc)
+		return rc;
+
+	indio_dev->dev.parent = dev;
+	indio_dev->dev.of_node = node;
+	indio_dev->name = pdev->name;
+	indio_dev->modes = INDIO_DIRECT_MODE;
+	indio_dev->info = &rradc_info;
+	indio_dev->channels = chip->iio_chans;
+	indio_dev->num_channels = chip->nchannels;
+
+	chip->usb_trig = power_supply_get_by_name("usb");
+	if (!chip->usb_trig)
+		pr_debug("Error obtaining usb power supply\n");
+
+	return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct of_device_id rradc_match_table[] = {
+	{ .compatible = "qcom,rradc" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, rradc_match_table);
+
+static struct platform_driver rradc_driver = {
+	.driver		= {
+		.name		= "qcom-rradc",
+		.of_match_table	= rradc_match_table,
+	},
+	.probe = rradc_probe,
+};
+module_platform_driver(rradc_driver);
+
+MODULE_DESCRIPTION("QPNP PMIC RR ADC driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/iio/adc/qcom-tadc.c	2019-01-22 16:16:23.879249811 +0100
@@ -0,0 +1,1324 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "TADC: %s: " fmt, __func__
+
+#include <linux/iio/iio.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/power_supply.h>
+#include <linux/pmic-voter.h>
+
+#define USB_PRESENT_VOTER			"USB_PRESENT_VOTER"
+#define SLEEP_VOTER				"SLEEP_VOTER"
+#define SHUTDOWN_VOTER				"SHUTDOWN_VOTER"
+#define TADC_REVISION1_REG			0x00
+#define TADC_REVISION2_REG			0x01
+#define TADC_REVISION3_REG			0x02
+#define TADC_REVISION4_REG			0x03
+#define TADC_PERPH_TYPE_REG			0x04
+#define TADC_PERPH_SUBTYPE_REG			0x05
+
+/* TADC register definitions */
+#define TADC_SW_CH_CONV_REG(chip)		(chip->tadc_base + 0x06)
+#define TADC_MBG_ERR_REG(chip)			(chip->tadc_base + 0x07)
+#define TADC_EN_CTL_REG(chip)			(chip->tadc_base + 0x46)
+#define TADC_CONV_REQ_REG(chip)			(chip->tadc_base + 0x51)
+#define TADC_HWTRIG_CONV_CH_EN_REG(chip)	(chip->tadc_base + 0x52)
+#define TADC_HW_SETTLE_DELAY_REG(chip)		(chip->tadc_base + 0x53)
+#define TADC_LONG_HW_SETTLE_DLY_EN_REG(chip)	(chip->tadc_base + 0x54)
+#define TADC_LONG_HW_SETTLE_DLY_REG(chip)	(chip->tadc_base + 0x55)
+#define TADC_ADC_BUF_CH_REG(chip)		(chip->tadc_base + 0x56)
+#define TADC_ADC_AAF_CH_REG(chip)		(chip->tadc_base + 0x57)
+#define TADC_ADC_DATA_RDBK_REG(chip)		(chip->tadc_base + 0x58)
+#define TADC_CH1_ADC_LO_REG(chip)		(chip->tadc_base + 0x60)
+#define TADC_CH1_ADC_HI_REG(chip)		(chip->tadc_base + 0x61)
+#define TADC_CH2_ADC_LO_REG(chip)		(chip->tadc_base + 0x62)
+#define TADC_CH2_ADC_HI_REG(chip)		(chip->tadc_base + 0x63)
+#define TADC_CH3_ADC_LO_REG(chip)		(chip->tadc_base + 0x64)
+#define TADC_CH3_ADC_HI_REG(chip)		(chip->tadc_base + 0x65)
+#define TADC_CH4_ADC_LO_REG(chip)		(chip->tadc_base + 0x66)
+#define TADC_CH4_ADC_HI_REG(chip)		(chip->tadc_base + 0x67)
+#define TADC_CH5_ADC_LO_REG(chip)		(chip->tadc_base + 0x68)
+#define TADC_CH5_ADC_HI_REG(chip)		(chip->tadc_base + 0x69)
+#define TADC_CH6_ADC_LO_REG(chip)		(chip->tadc_base + 0x70)
+#define TADC_CH6_ADC_HI_REG(chip)		(chip->tadc_base + 0x71)
+#define TADC_CH7_ADC_LO_REG(chip)		(chip->tadc_base + 0x72)
+#define TADC_CH7_ADC_HI_REG(chip)		(chip->tadc_base + 0x73)
+#define TADC_CH8_ADC_LO_REG(chip)		(chip->tadc_base + 0x74)
+#define TADC_CH8_ADC_HI_REG(chip)		(chip->tadc_base + 0x75)
+#define TADC_ADC_DIRECT_TST(chip)		(chip->tadc_base + 0xE7)
+
+/* TADC_CMP register definitions */
+#define TADC_CMP_THR1_CMP_REG(chip)		(chip->tadc_cmp_base + 0x51)
+#define TADC_CMP_THR1_CH1_CMP_LO_REG(chip)	(chip->tadc_cmp_base + 0x52)
+#define TADC_CMP_THR1_CH1_CMP_HI_REG(chip)	(chip->tadc_cmp_base + 0x53)
+#define TADC_CMP_THR1_CH2_CMP_LO_REG(chip)	(chip->tadc_cmp_base + 0x54)
+#define TADC_CMP_THR1_CH2_CMP_HI_REG(chip)	(chip->tadc_cmp_base + 0x55)
+#define TADC_CMP_THR1_CH3_CMP_LO_REG(chip)	(chip->tadc_cmp_base + 0x56)
+#define TADC_CMP_THR1_CH3_CMP_HI_REG(chip)	(chip->tadc_cmp_base + 0x57)
+#define TADC_CMP_THR2_CMP_REG(chip)		(chip->tadc_cmp_base + 0x67)
+#define TADC_CMP_THR2_CH1_CMP_LO_REG(chip)	(chip->tadc_cmp_base + 0x68)
+#define TADC_CMP_THR2_CH1_CMP_HI_REG(chip)	(chip->tadc_cmp_base + 0x69)
+#define TADC_CMP_THR2_CH2_CMP_LO_REG(chip)	(chip->tadc_cmp_base + 0x6A)
+#define TADC_CMP_THR2_CH2_CMP_HI_REG(chip)	(chip->tadc_cmp_base + 0x6B)
+#define TADC_CMP_THR2_CH3_CMP_LO_REG(chip)	(chip->tadc_cmp_base + 0x6C)
+#define TADC_CMP_THR2_CH3_CMP_HI_REG(chip)	(chip->tadc_cmp_base + 0x6D)
+#define TADC_CMP_THR3_CMP_REG(chip)		(chip->tadc_cmp_base + 0x7D)
+#define TADC_CMP_THR3_CH1_CMP_LO_REG(chip)	(chip->tadc_cmp_base + 0x7E)
+#define TADC_CMP_THR3_CH1_CMP_HI_REG(chip)	(chip->tadc_cmp_base + 0x7F)
+#define TADC_CMP_THR3_CH2_CMP_LO_REG(chip)	(chip->tadc_cmp_base + 0x80)
+#define TADC_CMP_THR3_CH2_CMP_HI_REG(chip)	(chip->tadc_cmp_base + 0x81)
+#define TADC_CMP_THR3_CH3_CMP_LO_REG(chip)	(chip->tadc_cmp_base + 0x82)
+#define TADC_CMP_THR3_CH3_CMP_HI_REG(chip)	(chip->tadc_cmp_base + 0x83)
+#define TADC_CMP_THR4_CMP_REG(chip)		(chip->tadc_cmp_base + 0x93)
+#define TADC_CMP_THR4_CH1_CMP_LO_REG(chip)	(chip->tadc_cmp_base + 0x94)
+#define TADC_CMP_THR4_CH1_CMP_HI_REG(chip)	(chip->tadc_cmp_base + 0x95)
+#define TADC_CMP_THR1_CH1_HYST_REG(chip)	(chip->tadc_cmp_base + 0xB0)
+#define TADC_CMP_THR2_CH1_HYST_REG(chip)	(chip->tadc_cmp_base + 0xB1)
+#define TADC_CMP_THR3_CH1_HYST_REG(chip)	(chip->tadc_cmp_base + 0xB2)
+#define TADC_CMP_THR4_CH1_HYST_REG(chip)	(chip->tadc_cmp_base + 0xB3)
+
+/* 10 bits of resolution */
+#define TADC_RESOLUTION			1024
+/* number of hardware channels */
+#define TADC_NUM_CH			8
+
+enum tadc_chan_id {
+	TADC_THERM1 = 0,
+	TADC_THERM2,
+	TADC_DIE_TEMP,
+	TADC_BATT_I,
+	TADC_BATT_V,
+	TADC_INPUT_I,
+	TADC_INPUT_V,
+	TADC_OTG_I,
+	/* virtual channels */
+	TADC_BATT_P,
+	TADC_INPUT_P,
+	TADC_THERM1_THR1,
+	TADC_THERM1_THR2,
+	TADC_THERM1_THR3,
+	TADC_THERM1_THR4,
+	TADC_THERM2_THR1,
+	TADC_THERM2_THR2,
+	TADC_THERM2_THR3,
+	TADC_DIE_TEMP_THR1,
+	TADC_DIE_TEMP_THR2,
+	TADC_DIE_TEMP_THR3,
+	TADC_CHAN_ID_MAX,
+};
+
+#define TADC_CHAN(_name, _type, _channel, _info_mask)	\
+{							\
+	.type			= _type,		\
+	.channel		= _channel,		\
+	.info_mask_separate	= _info_mask,		\
+	.extend_name		= _name,		\
+}
+
+#define TADC_THERM_CHAN(_name, _channel)		\
+TADC_CHAN(_name, IIO_TEMP, _channel,			\
+	BIT(IIO_CHAN_INFO_RAW) |			\
+	BIT(IIO_CHAN_INFO_PROCESSED))
+
+#define TADC_TEMP_CHAN(_name, _channel)			\
+TADC_CHAN(_name, IIO_TEMP, _channel,			\
+	BIT(IIO_CHAN_INFO_RAW) |			\
+	BIT(IIO_CHAN_INFO_PROCESSED) |			\
+	BIT(IIO_CHAN_INFO_SCALE) |			\
+	BIT(IIO_CHAN_INFO_OFFSET))
+
+#define TADC_CURRENT_CHAN(_name, _channel)		\
+TADC_CHAN(_name, IIO_CURRENT, _channel,			\
+	BIT(IIO_CHAN_INFO_RAW) |			\
+	BIT(IIO_CHAN_INFO_PROCESSED) |			\
+	BIT(IIO_CHAN_INFO_SCALE))
+
+
+#define TADC_VOLTAGE_CHAN(_name, _channel)		\
+TADC_CHAN(_name, IIO_VOLTAGE, _channel,			\
+	BIT(IIO_CHAN_INFO_RAW) |			\
+	BIT(IIO_CHAN_INFO_PROCESSED) |			\
+	BIT(IIO_CHAN_INFO_SCALE))
+
+#define TADC_POWER_CHAN(_name, _channel)		\
+TADC_CHAN(_name, IIO_POWER, _channel,			\
+	BIT(IIO_CHAN_INFO_PROCESSED))
+
+static const struct iio_chan_spec tadc_iio_chans[] = {
+	[TADC_THERM1]		= TADC_THERM_CHAN(
+					"batt", TADC_THERM1),
+	[TADC_THERM2]		= TADC_THERM_CHAN(
+					"skin", TADC_THERM2),
+	[TADC_DIE_TEMP]		= TADC_TEMP_CHAN(
+					"die", TADC_DIE_TEMP),
+	[TADC_BATT_I]		= TADC_CURRENT_CHAN(
+					"batt", TADC_BATT_I),
+	[TADC_BATT_V]		= TADC_VOLTAGE_CHAN(
+					"batt", TADC_BATT_V),
+	[TADC_INPUT_I]		= TADC_CURRENT_CHAN(
+					"input", TADC_INPUT_I),
+	[TADC_INPUT_V]		= TADC_VOLTAGE_CHAN(
+					"input", TADC_INPUT_V),
+	[TADC_OTG_I]		= TADC_CURRENT_CHAN(
+					"otg", TADC_OTG_I),
+	[TADC_BATT_P]		= TADC_POWER_CHAN(
+					"batt", TADC_BATT_P),
+	[TADC_INPUT_P]		= TADC_POWER_CHAN(
+					"input", TADC_INPUT_P),
+	[TADC_THERM1_THR1]	= TADC_THERM_CHAN(
+					"batt_warm", TADC_THERM1_THR1),
+	[TADC_THERM1_THR2]	= TADC_THERM_CHAN(
+					"batt_cool", TADC_THERM1_THR2),
+	[TADC_THERM1_THR3]	= TADC_THERM_CHAN(
+					"batt_cold", TADC_THERM1_THR3),
+	[TADC_THERM1_THR4]	= TADC_THERM_CHAN(
+					"batt_hot", TADC_THERM1_THR4),
+	[TADC_THERM2_THR1]	= TADC_THERM_CHAN(
+					"skin_lb", TADC_THERM2_THR1),
+	[TADC_THERM2_THR2]	= TADC_THERM_CHAN(
+					"skin_ub", TADC_THERM2_THR2),
+	[TADC_THERM2_THR3]	= TADC_THERM_CHAN(
+					"skin_rst", TADC_THERM2_THR3),
+	[TADC_DIE_TEMP_THR1]	= TADC_THERM_CHAN(
+					"die_lb", TADC_DIE_TEMP_THR1),
+	[TADC_DIE_TEMP_THR2]	= TADC_THERM_CHAN(
+					"die_ub", TADC_DIE_TEMP_THR2),
+	[TADC_DIE_TEMP_THR3]	= TADC_THERM_CHAN(
+					"die_rst", TADC_DIE_TEMP_THR3),
+};
+
+struct tadc_therm_thr {
+	int	addr_lo;
+	int	addr_hi;
+};
+
+struct tadc_chan_data {
+	s32			scale;
+	s32			offset;
+	u32			rbias;
+	const struct tadc_pt	*table;
+	size_t			tablesize;
+	struct tadc_therm_thr	thr[4];
+};
+
+struct tadc_chip {
+	struct device		*dev;
+	struct regmap		*regmap;
+	u32			tadc_base;
+	u32			tadc_cmp_base;
+	struct tadc_chan_data	chans[TADC_NUM_CH];
+	struct completion	eoc_complete;
+	struct mutex		write_lock;
+	struct mutex		conv_lock;
+	struct power_supply	*usb_psy;
+	struct votable		*tadc_disable_votable;
+	struct work_struct	status_change_work;
+	struct notifier_block	nb;
+	u8			hwtrig_conv;
+};
+
+struct tadc_pt {
+	s32 x;
+	s32 y;
+};
+
+/*
+ * Thermistor tables are generated by the B-parameter equation which is a
+ * simplifed version of the Steinhart-Hart equation.
+ *
+ * (1 / T) = (1 / T0) + (1 / B) * ln(R / R0)
+ *
+ * Where R0 is the resistance at temperature T0, and T0 is typically room
+ * temperature (25C).
+ */
+static const struct tadc_pt tadc_therm_3450b_68k[] = {
+	{ 4151,		120000 },
+	{ 4648,		115000 },
+	{ 5220,		110000 },
+	{ 5880,		105000 },
+	{ 6644,		100000 },
+	{ 7533,		95000 },
+	{ 8571,		90000 },
+	{ 9786,		85000 },
+	{ 11216,	80000 },
+	{ 12906,	75000 },
+	{ 14910,	70000 },
+	{ 17300,	65000 },
+	{ 20163,	60000 },
+	{ 23609,	55000 },
+	{ 27780,	50000 },
+	{ 32855,	45000 },
+	{ 39065,	40000 },
+	{ 46712,	35000 },
+	{ 56185,	30000 },
+	{ 68000,	25000 },
+	{ 82837,	20000 },
+	{ 101604,	15000 },
+	{ 125525,	10000 },
+	{ 156261,	5000 },
+	{ 196090,	0 },
+	{ 248163,	-5000 },
+	{ 316887,	-10000 },
+	{ 408493,	-15000 },
+	{ 531889,	-20000 },
+	{ 699966,	-25000 },
+	{ 931618,	-30000 },
+	{ 1254910,	-35000 },
+	{ 1712127,	-40000 },
+};
+
+static bool tadc_is_reg_locked(struct tadc_chip *chip, u16 reg)
+{
+	if ((reg & 0xFF00) == chip->tadc_cmp_base)
+		return true;
+
+	if (reg >= TADC_HWTRIG_CONV_CH_EN_REG(chip))
+		return true;
+
+	return false;
+}
+
+static int tadc_read(struct tadc_chip *chip, u16 reg, u8 *val, size_t count)
+{
+	int rc = 0;
+
+	rc = regmap_bulk_read(chip->regmap, reg, val, count);
+	if (rc < 0)
+		pr_err("Couldn't read 0x%04x rc=%d\n", reg, rc);
+
+	return rc;
+}
+
+static int tadc_write(struct tadc_chip *chip, u16 reg, u8 data)
+{
+	int rc = 0;
+
+	mutex_lock(&chip->write_lock);
+	if (tadc_is_reg_locked(chip, reg)) {
+		rc = regmap_write(chip->regmap, (reg & 0xFF00) | 0xD0, 0xA5);
+		if (rc < 0) {
+			pr_err("Couldn't unlock secure register rc=%d\n", rc);
+			goto unlock;
+		}
+	}
+
+	rc = regmap_write(chip->regmap, reg, data);
+	if (rc < 0) {
+		pr_err("Couldn't write 0x%02x to 0x%04x rc=%d\n",
+								data, reg, rc);
+		goto unlock;
+	}
+
+unlock:
+	mutex_unlock(&chip->write_lock);
+	return rc;
+}
+static int tadc_bulk_write(struct tadc_chip *chip, u16 reg, u8 *data,
+								size_t count)
+{
+	int rc = 0, i;
+
+	mutex_lock(&chip->write_lock);
+	for (i = 0; i < count; ++i, ++reg) {
+		if (tadc_is_reg_locked(chip, reg)) {
+			rc = regmap_write(chip->regmap,
+						(reg & 0xFF00) | 0xD0, 0xA5);
+			if (rc < 0) {
+				pr_err("Couldn't unlock secure register rc=%d\n",
+								rc);
+				goto unlock;
+			}
+		}
+
+		rc = regmap_write(chip->regmap, reg, data[i]);
+		if (rc < 0) {
+			pr_err("Couldn't write 0x%02x to 0x%04x rc=%d\n",
+							data[i], reg, rc);
+			goto unlock;
+		}
+	}
+
+unlock:
+	mutex_unlock(&chip->write_lock);
+	return rc;
+}
+
+static int tadc_masked_write(struct tadc_chip *chip, u16 reg, u8 mask, u8 data)
+{
+	int rc = 0;
+
+	mutex_lock(&chip->write_lock);
+	if (tadc_is_reg_locked(chip, reg)) {
+		rc = regmap_write(chip->regmap, (reg & 0xFF00) | 0xD0, 0xA5);
+		if (rc < 0) {
+			pr_err("Couldn't unlock secure register rc=%d\n", rc);
+			goto unlock;
+		}
+	}
+
+	rc = regmap_update_bits(chip->regmap, reg, mask, data);
+
+unlock:
+	mutex_unlock(&chip->write_lock);
+	return rc;
+}
+
+static int tadc_lerp(const struct tadc_pt *pts, size_t size, bool inv,
+							s32 input, s32 *output)
+{
+	int i;
+	s64 temp;
+	bool ascending;
+
+	if (pts == NULL) {
+		pr_err("Table is NULL\n");
+		return -EINVAL;
+	}
+
+	if (size < 1) {
+		pr_err("Table has no entries\n");
+		return -ENOENT;
+	}
+
+	if (size == 1) {
+		*output = inv ? pts[0].x : pts[0].y;
+		return 0;
+	}
+
+	ascending = inv ? (pts[0].y < pts[1].y) : (pts[0].x < pts[1].x);
+	if (ascending ? (input <= (inv ? pts[0].y : pts[0].x)) :
+			(input >= (inv ? pts[0].y : pts[0].x))) {
+		*output = inv ? pts[0].x : pts[0].y;
+		return 0;
+	}
+
+	if (ascending ? (input >= (inv ? pts[size - 1].y : pts[size - 1].x)) :
+			(input <= (inv ? pts[size - 1].y : pts[size - 1].x))) {
+		*output = inv ? pts[size - 1].x : pts[size - 1].y;
+		return 0;
+	}
+
+	for (i = 1; i < size; i++)
+		if (ascending ? (input <= (inv ? pts[i].y : pts[i].x)) :
+				(input >= (inv ? pts[i].y : pts[i].x)))
+			break;
+
+	if (inv) {
+		temp = (s64)(pts[i].x - pts[i - 1].x) *
+						(s64)(input - pts[i - 1].y);
+		temp = div_s64(temp, pts[i].y - pts[i - 1].y);
+		*output = temp + pts[i - 1].x;
+	} else {
+		temp = (s64)(pts[i].y - pts[i - 1].y) *
+						(s64)(input - pts[i - 1].x);
+		temp = div_s64(temp, pts[i].x - pts[i - 1].x);
+		*output = temp + pts[i - 1].y;
+	}
+
+	return 0;
+}
+
+/*
+ * Process the result of a thermistor reading.
+ *
+ * The voltage input to the ADC is a result of a voltage divider circuit.
+ * Vout = (Rtherm / (Rbias + Rtherm)) * Vbias
+ *
+ * The ADC value is based on the output voltage of the voltage divider, and the
+ * bias voltage.
+ * ADC = (Vin * 1024) / Vbias
+ *
+ * Combine these equations and solve for Rtherm
+ * Rtherm = (ADC * Rbias) / (1024 - ADC)
+ */
+static int tadc_get_processed_therm(const struct tadc_chan_data *chan_data,
+							s16 adc, s32 *result)
+{
+	s32 rtherm;
+
+	rtherm = div_s64((s64)adc * chan_data->rbias, TADC_RESOLUTION - adc);
+	return tadc_lerp(chan_data->table, chan_data->tablesize, false, rtherm,
+									result);
+}
+
+static int tadc_get_raw_therm(const struct tadc_chan_data *chan_data,
+							int mdegc, int *result)
+{
+	int rc;
+	s32 rtherm;
+
+	rc = tadc_lerp(chan_data->table, chan_data->tablesize, true, mdegc,
+								&rtherm);
+	if (rc < 0) {
+		pr_err("Couldn't interpolate %d\n rc=%d", mdegc, rc);
+		return rc;
+	}
+
+	*result = div64_s64((s64)rtherm * TADC_RESOLUTION,
+						(s64)chan_data->rbias + rtherm);
+	return 0;
+}
+
+static int tadc_read_channel(struct tadc_chip *chip, u16 address, int *adc)
+{
+	u8 val[2];
+	int rc;
+
+	rc = tadc_read(chip, address, val, ARRAY_SIZE(val));
+	if (rc < 0) {
+		pr_err("Couldn't read channel rc=%d\n", rc);
+		return rc;
+	}
+
+	/* the 10th bit is the sign bit for all channels */
+	*adc = sign_extend32(val[0] | val[1] << BITS_PER_BYTE, 10);
+	return rc;
+}
+
+static int tadc_write_channel(struct tadc_chip *chip, u16 address, int adc)
+{
+	u8 val[2];
+	int rc;
+
+	/* the 10th bit is the sign bit for all channels */
+	adc = sign_extend32(adc, 10);
+	val[0] = (u8)adc;
+	val[1] = (u8)(adc >> BITS_PER_BYTE);
+	rc = tadc_bulk_write(chip, address, val, 2);
+	if (rc < 0) {
+		pr_err("Couldn't write to channel rc=%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+#define CONVERSION_TIMEOUT_MS 100
+static int tadc_do_conversion(struct tadc_chip *chip, u8 channels, s16 *adc)
+{
+	unsigned long timeout, timeleft;
+	u8 val[TADC_NUM_CH * 2];
+	int rc = 0, i;
+
+	mutex_lock(&chip->conv_lock);
+	rc = tadc_read(chip, TADC_MBG_ERR_REG(chip), val, 1);
+	if (rc < 0) {
+		pr_err("Couldn't read mbg error status rc=%d\n", rc);
+		goto unlock;
+	}
+
+	reinit_completion(&chip->eoc_complete);
+
+	if (get_effective_result(chip->tadc_disable_votable)) {
+		/* leave it back in completed state */
+		complete_all(&chip->eoc_complete);
+		rc = -ENODATA;
+		goto unlock;
+	}
+
+	if (val[0] != 0) {
+		tadc_write(chip, TADC_EN_CTL_REG(chip), 0);
+		tadc_write(chip, TADC_EN_CTL_REG(chip), 0x80);
+	}
+
+	rc = tadc_write(chip, TADC_CONV_REQ_REG(chip), channels);
+	if (rc < 0) {
+		pr_err("Couldn't write conversion request rc=%d\n", rc);
+		goto unlock;
+	}
+
+	timeout = msecs_to_jiffies(CONVERSION_TIMEOUT_MS);
+	timeleft = wait_for_completion_timeout(&chip->eoc_complete, timeout);
+
+	if (timeleft == 0) {
+		rc = tadc_read(chip, TADC_SW_CH_CONV_REG(chip), val, 1);
+		if (rc < 0) {
+			pr_err("Couldn't read conversion status rc=%d\n", rc);
+			goto unlock;
+		}
+
+		/*
+		 * check one last time if the channel we are requesting
+		 * has completed conversion
+		 */
+		if (val[0] != channels) {
+			rc = -ETIMEDOUT;
+			goto unlock;
+		}
+	}
+
+	rc = tadc_read(chip, TADC_CH1_ADC_LO_REG(chip), val, ARRAY_SIZE(val));
+	if (rc < 0) {
+		pr_err("Couldn't read adc channels rc=%d\n", rc);
+		goto unlock;
+	}
+
+	for (i = 0; i < TADC_NUM_CH; i++)
+		adc[i] = (s16)(val[i * 2] | (u16)val[i * 2 + 1] << 8);
+
+	pr_debug("Conversion time for channels 0x%x = %dms\n", channels,
+			jiffies_to_msecs(timeout - timeleft));
+
+unlock:
+	mutex_unlock(&chip->conv_lock);
+	return rc;
+}
+
+static int tadc_read_raw(struct iio_dev *indio_dev,
+		struct iio_chan_spec const *chan, int *val, int *val2,
+		long mask)
+{
+	struct tadc_chip *chip = iio_priv(indio_dev);
+	struct tadc_chan_data *chan_data = NULL;
+	int rc, offset = 0, scale, scale2, scale_type;
+	s16 adc[TADC_NUM_CH];
+
+	switch (chan->channel) {
+	case TADC_THERM1_THR1:
+	case TADC_THERM1_THR2:
+	case TADC_THERM1_THR3:
+	case TADC_THERM1_THR4:
+		chan_data = &chip->chans[TADC_THERM1];
+		break;
+	case TADC_THERM2_THR1:
+	case TADC_THERM2_THR2:
+	case TADC_THERM2_THR3:
+		chan_data = &chip->chans[TADC_THERM2];
+		break;
+	case TADC_DIE_TEMP_THR1:
+	case TADC_DIE_TEMP_THR2:
+	case TADC_DIE_TEMP_THR3:
+		chan_data = &chip->chans[TADC_DIE_TEMP];
+		break;
+	default:
+		if (chan->channel >= ARRAY_SIZE(chip->chans)) {
+			pr_err("Channel %d is out of bounds\n", chan->channel);
+			return -EINVAL;
+		}
+
+		chan_data = &chip->chans[chan->channel];
+		break;
+	}
+
+	if (!chan_data)
+		return -EINVAL;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		switch (chan->channel) {
+		case TADC_THERM1_THR1:
+		case TADC_THERM2_THR1:
+		case TADC_DIE_TEMP_THR1:
+			rc = tadc_read_channel(chip,
+					chan_data->thr[0].addr_lo, val);
+			break;
+		case TADC_THERM1_THR2:
+		case TADC_THERM2_THR2:
+		case TADC_DIE_TEMP_THR2:
+			rc = tadc_read_channel(chip,
+					chan_data->thr[1].addr_lo, val);
+			break;
+		case TADC_THERM1_THR3:
+		case TADC_THERM2_THR3:
+		case TADC_DIE_TEMP_THR3:
+			rc = tadc_read_channel(chip,
+					chan_data->thr[2].addr_lo, val);
+			break;
+		case TADC_THERM1_THR4:
+			rc = tadc_read_channel(chip,
+					chan_data->thr[3].addr_lo, val);
+			break;
+		default:
+			rc = tadc_do_conversion(chip, BIT(chan->channel), adc);
+			if (rc < 0) {
+				if (rc != -ENODATA)
+					pr_err("Couldn't read battery current and voltage channels rc=%d\n",
+									rc);
+				return rc;
+			}
+			*val = adc[chan->channel];
+			break;
+		}
+
+		if (rc < 0 && rc != -ENODATA) {
+			pr_err("Couldn't read channel %d\n", chan->channel);
+			return rc;
+		}
+
+		return IIO_VAL_INT;
+	case IIO_CHAN_INFO_PROCESSED:
+		switch (chan->channel) {
+		case TADC_THERM1:
+		case TADC_THERM2:
+		case TADC_THERM1_THR1:
+		case TADC_THERM1_THR2:
+		case TADC_THERM1_THR3:
+		case TADC_THERM1_THR4:
+		case TADC_THERM2_THR1:
+		case TADC_THERM2_THR2:
+		case TADC_THERM2_THR3:
+			rc = tadc_read_raw(indio_dev, chan, val, NULL,
+							IIO_CHAN_INFO_RAW);
+			if (rc < 0)
+				return rc;
+
+			rc = tadc_get_processed_therm(chan_data, *val, val);
+			if (rc < 0) {
+				pr_err("Couldn't process 0x%04x from channel %d rc=%d\n",
+						*val, chan->channel, rc);
+				return rc;
+			}
+			break;
+		case TADC_BATT_P:
+			rc = tadc_do_conversion(chip,
+				BIT(TADC_BATT_I) | BIT(TADC_BATT_V), adc);
+			if (rc < 0 && rc != -ENODATA) {
+				pr_err("Couldn't read battery current and voltage channels rc=%d\n",
+									rc);
+				return rc;
+			}
+
+			*val = adc[TADC_BATT_I] * adc[TADC_BATT_V];
+			break;
+		case TADC_INPUT_P:
+			rc = tadc_do_conversion(chip,
+				BIT(TADC_INPUT_I) | BIT(TADC_INPUT_V), adc);
+			if (rc < 0 && rc != -ENODATA) {
+				pr_err("Couldn't read input current and voltage channels rc=%d\n",
+									rc);
+				return rc;
+			}
+
+			*val = adc[TADC_INPUT_I] * adc[TADC_INPUT_V];
+			break;
+		default:
+			rc = tadc_read_raw(indio_dev, chan, val, NULL,
+							IIO_CHAN_INFO_RAW);
+			if (rc < 0)
+				return rc;
+
+			/* offset is optional */
+			rc = tadc_read_raw(indio_dev, chan, &offset, NULL,
+							IIO_CHAN_INFO_OFFSET);
+			if (rc < 0)
+				return rc;
+
+			scale_type = tadc_read_raw(indio_dev, chan,
+					&scale, &scale2, IIO_CHAN_INFO_SCALE);
+			switch (scale_type) {
+			case IIO_VAL_INT:
+				*val = *val * scale + offset;
+				break;
+			case IIO_VAL_FRACTIONAL:
+				*val = div_s64((s64)*val * scale + offset,
+									scale2);
+				break;
+			default:
+				return -EINVAL;
+			}
+			break;
+		}
+
+		return IIO_VAL_INT;
+	case IIO_CHAN_INFO_SCALE:
+		switch (chan->channel) {
+		case TADC_DIE_TEMP:
+		case TADC_DIE_TEMP_THR1:
+		case TADC_DIE_TEMP_THR2:
+		case TADC_DIE_TEMP_THR3:
+			*val = chan_data->scale;
+			return IIO_VAL_INT;
+		case TADC_BATT_I:
+		case TADC_BATT_V:
+		case TADC_INPUT_I:
+		case TADC_INPUT_V:
+		case TADC_OTG_I:
+			*val = chan_data->scale;
+			*val2 = TADC_RESOLUTION;
+			return IIO_VAL_FRACTIONAL;
+		}
+
+		return -EINVAL;
+	case IIO_CHAN_INFO_OFFSET:
+		*val = chan_data->offset;
+		return IIO_VAL_INT;
+	}
+
+	return -EINVAL;
+}
+
+static int tadc_write_raw(struct iio_dev *indio_dev,
+		struct iio_chan_spec const *chan, int val, int val2,
+		long mask)
+{
+	struct tadc_chip *chip = iio_priv(indio_dev);
+	const struct tadc_chan_data *chan_data;
+	int rc, raw;
+	s32 rem;
+
+	switch (chan->channel) {
+	case TADC_THERM1_THR1:
+	case TADC_THERM1_THR2:
+	case TADC_THERM1_THR3:
+	case TADC_THERM1_THR4:
+		chan_data = &chip->chans[TADC_THERM1];
+		break;
+	case TADC_THERM2_THR1:
+	case TADC_THERM2_THR2:
+	case TADC_THERM2_THR3:
+		chan_data = &chip->chans[TADC_THERM2];
+		break;
+	case TADC_DIE_TEMP_THR1:
+	case TADC_DIE_TEMP_THR2:
+	case TADC_DIE_TEMP_THR3:
+		chan_data = &chip->chans[TADC_DIE_TEMP];
+		break;
+	default:
+		if (chan->channel >= ARRAY_SIZE(chip->chans)) {
+			pr_err("Channel %d is out of bounds\n", chan->channel);
+			return -EINVAL;
+		}
+
+		chan_data = &chip->chans[chan->channel];
+		break;
+	}
+
+	if (!chan_data)
+		return -EINVAL;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_PROCESSED:
+		switch (chan->channel) {
+		case TADC_THERM1_THR1:
+		case TADC_THERM1_THR2:
+		case TADC_THERM1_THR3:
+		case TADC_THERM1_THR4:
+		case TADC_THERM2_THR1:
+		case TADC_THERM2_THR2:
+		case TADC_THERM2_THR3:
+			rc = tadc_get_raw_therm(chan_data, val, &raw);
+			if (rc < 0) {
+				pr_err("Couldn't get raw value rc=%d\n", rc);
+				return rc;
+			}
+			break;
+		case TADC_DIE_TEMP_THR1:
+		case TADC_DIE_TEMP_THR2:
+		case TADC_DIE_TEMP_THR3:
+			/* DIV_ROUND_CLOSEST does not like negative numbers */
+			raw = div_s64_rem(val - chan_data->offset,
+							chan_data->scale, &rem);
+			if (abs(rem) >= abs(chan_data->scale / 2))
+				raw++;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		rc = tadc_write_raw(indio_dev, chan, raw, 0,
+							IIO_CHAN_INFO_RAW);
+		if (rc < 0) {
+			pr_err("Couldn't write raw rc=%d\n", rc);
+			return rc;
+		}
+
+		break;
+	case IIO_CHAN_INFO_RAW:
+		switch (chan->channel) {
+		case TADC_THERM1_THR1:
+		case TADC_THERM2_THR1:
+		case TADC_DIE_TEMP_THR1:
+			rc = tadc_write_channel(chip,
+					chan_data->thr[0].addr_lo, val);
+			break;
+		case TADC_THERM1_THR2:
+		case TADC_THERM2_THR2:
+		case TADC_DIE_TEMP_THR2:
+			rc = tadc_write_channel(chip,
+					chan_data->thr[1].addr_lo, val);
+			break;
+		case TADC_THERM1_THR3:
+		case TADC_THERM2_THR3:
+		case TADC_DIE_TEMP_THR3:
+			rc = tadc_write_channel(chip,
+					chan_data->thr[2].addr_lo, val);
+			break;
+		case TADC_THERM1_THR4:
+			rc = tadc_write_channel(chip,
+					chan_data->thr[3].addr_lo, val);
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		if (rc < 0) {
+			pr_err("Couldn't write channel %d\n", chan->channel);
+			return rc;
+		}
+
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static irqreturn_t handle_eoc(int irq, void *dev_id)
+{
+	struct tadc_chip *chip = dev_id;
+
+	complete_all(&chip->eoc_complete);
+	return IRQ_HANDLED;
+}
+
+static int tadc_disable_vote_callback(struct votable *votable,
+			void *data, int disable, const char *client)
+{
+	struct tadc_chip *chip = data;
+	int rc;
+	int timeout;
+	unsigned long timeleft;
+
+	if (disable) {
+		timeout = msecs_to_jiffies(CONVERSION_TIMEOUT_MS);
+		timeleft = wait_for_completion_timeout(&chip->eoc_complete,
+				timeout);
+		if (timeleft == 0)
+			pr_err("Timed out waiting for eoc, disabling hw conversions regardless\n");
+
+		rc = tadc_read(chip, TADC_HWTRIG_CONV_CH_EN_REG(chip),
+							&chip->hwtrig_conv, 1);
+		if (rc < 0) {
+			pr_err("Couldn't save hw conversions rc=%d\n", rc);
+			return rc;
+		}
+		rc = tadc_write(chip, TADC_HWTRIG_CONV_CH_EN_REG(chip), 0x00);
+		if (rc < 0) {
+			pr_err("Couldn't disable hw conversions rc=%d\n", rc);
+			return rc;
+		}
+		rc = tadc_write(chip, TADC_ADC_DIRECT_TST(chip), 0x80);
+		if (rc < 0) {
+			pr_err("Couldn't enable direct test mode rc=%d\n", rc);
+			return rc;
+		}
+	} else {
+		rc = tadc_write(chip, TADC_ADC_DIRECT_TST(chip), 0x00);
+		if (rc < 0) {
+			pr_err("Couldn't disable direct test mode rc=%d\n", rc);
+			return rc;
+		}
+		rc = tadc_write(chip, TADC_HWTRIG_CONV_CH_EN_REG(chip),
+							chip->hwtrig_conv);
+		if (rc < 0) {
+			pr_err("Couldn't restore hw conversions rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	pr_debug("client: %s disable: %d\n", client, disable);
+	return 0;
+}
+
+static void status_change_work(struct work_struct *work)
+{
+	struct tadc_chip *chip = container_of(work,
+			struct tadc_chip, status_change_work);
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	if (!chip->usb_psy)
+		chip->usb_psy = power_supply_get_by_name("usb");
+
+	if (!chip->usb_psy) {
+		/* treat usb is not present */
+		vote(chip->tadc_disable_votable, USB_PRESENT_VOTER, true, 0);
+		return;
+	}
+
+	rc = power_supply_get_property(chip->usb_psy,
+		       POWER_SUPPLY_PROP_PRESENT, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't get present status rc=%d\n", rc);
+		/* treat usb is not present */
+		vote(chip->tadc_disable_votable, USB_PRESENT_VOTER, true, 0);
+		return;
+	}
+
+	/* disable if usb is not present */
+	vote(chip->tadc_disable_votable, USB_PRESENT_VOTER, !pval.intval, 0);
+}
+
+static int tadc_notifier_call(struct notifier_block *nb,
+		unsigned long ev, void *v)
+{
+	struct power_supply *psy = v;
+	struct tadc_chip *chip = container_of(nb, struct tadc_chip, nb);
+
+	if (ev != PSY_EVENT_PROP_CHANGED)
+		return NOTIFY_OK;
+
+	if ((strcmp(psy->desc->name, "usb") == 0))
+		schedule_work(&chip->status_change_work);
+
+	return NOTIFY_OK;
+}
+
+static int tadc_register_notifier(struct tadc_chip *chip)
+{
+	int rc;
+
+	chip->nb.notifier_call = tadc_notifier_call;
+	rc = power_supply_reg_notifier(&chip->nb);
+	if (rc < 0) {
+		pr_err("Couldn't register psy notifier rc = %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int tadc_suspend(struct device *dev)
+{
+	struct tadc_chip *chip = dev_get_drvdata(dev);
+
+	vote(chip->tadc_disable_votable, SLEEP_VOTER, true, 0);
+	return 0;
+}
+
+static int tadc_resume(struct device *dev)
+{
+	struct tadc_chip *chip = dev_get_drvdata(dev);
+
+	vote(chip->tadc_disable_votable, SLEEP_VOTER, false, 0);
+	return 0;
+}
+
+static int tadc_set_therm_table(struct tadc_chan_data *chan_data, u32 beta,
+				u32 rtherm)
+{
+	if (beta == 3450 && rtherm == 68000) {
+		chan_data->table = tadc_therm_3450b_68k;
+		chan_data->tablesize = ARRAY_SIZE(tadc_therm_3450b_68k);
+		return 0;
+	}
+
+	return -ENOENT;
+}
+
+static int tadc_parse_dt(struct tadc_chip *chip)
+{
+	struct device_node *child, *node;
+	struct tadc_chan_data *chan_data;
+	u32 chan_id, rtherm, beta;
+	int rc = 0;
+
+	node = chip->dev->of_node;
+	for_each_available_child_of_node(node, child) {
+		rc = of_property_read_u32(child, "reg", &chan_id);
+		if (rc < 0) {
+			pr_err("Couldn't find channel for %s rc=%d",
+							child->name, rc);
+			return rc;
+		}
+
+		if (chan_id > TADC_NUM_CH - 1) {
+			pr_err("Channel %d is out of range [0, %d]\n",
+						chan_id, TADC_NUM_CH - 1);
+			return -EINVAL;
+		}
+
+		chan_data = &chip->chans[chan_id];
+		if (chan_id == TADC_THERM1 || chan_id == TADC_THERM2) {
+			rc = of_property_read_u32(child,
+					"qcom,rbias", &chan_data->rbias);
+			if (rc < 0) {
+				pr_err("Couldn't read qcom,rbias rc=%d\n", rc);
+				return rc;
+			}
+
+			rc = of_property_read_u32(child,
+					"qcom,beta-coefficient", &beta);
+			if (rc < 0) {
+				pr_err("Couldn't read qcom,beta-coefficient rc=%d\n",
+									rc);
+				return rc;
+			}
+
+			rc = of_property_read_u32(child,
+					"qcom,rtherm-at-25degc", &rtherm);
+			if (rc < 0) {
+				pr_err("Couldn't read qcom,rtherm-at-25degc rc=%d\n",
+					rc);
+				return rc;
+			}
+
+			rc = tadc_set_therm_table(chan_data, beta, rtherm);
+			if (rc < 0) {
+				pr_err("Couldn't set therm table rc=%d\n", rc);
+				return rc;
+			}
+		} else {
+			rc = of_property_read_s32(child, "qcom,scale",
+							&chan_data->scale);
+			if (rc < 0) {
+				pr_err("Couldn't read scale rc=%d\n", rc);
+				return rc;
+			}
+
+			of_property_read_s32(child, "qcom,offset",
+							&chan_data->offset);
+		}
+	}
+
+	return rc;
+}
+
+static int tadc_init_hw(struct tadc_chip *chip)
+{
+	int rc;
+
+	chip->chans[TADC_THERM1].thr[0].addr_lo =
+					TADC_CMP_THR1_CH1_CMP_LO_REG(chip);
+	chip->chans[TADC_THERM1].thr[0].addr_hi =
+					TADC_CMP_THR1_CH1_CMP_HI_REG(chip);
+	chip->chans[TADC_THERM1].thr[1].addr_lo =
+					TADC_CMP_THR2_CH1_CMP_LO_REG(chip);
+	chip->chans[TADC_THERM1].thr[1].addr_hi =
+					TADC_CMP_THR2_CH1_CMP_HI_REG(chip);
+	chip->chans[TADC_THERM1].thr[2].addr_lo =
+					TADC_CMP_THR3_CH1_CMP_LO_REG(chip);
+	chip->chans[TADC_THERM1].thr[2].addr_hi =
+					TADC_CMP_THR3_CH1_CMP_HI_REG(chip);
+	chip->chans[TADC_THERM1].thr[3].addr_lo =
+					TADC_CMP_THR4_CH1_CMP_LO_REG(chip);
+	chip->chans[TADC_THERM1].thr[3].addr_hi =
+					TADC_CMP_THR4_CH1_CMP_HI_REG(chip);
+
+	chip->chans[TADC_THERM2].thr[0].addr_lo =
+					TADC_CMP_THR1_CH2_CMP_LO_REG(chip);
+	chip->chans[TADC_THERM2].thr[0].addr_hi =
+					TADC_CMP_THR1_CH2_CMP_HI_REG(chip);
+	chip->chans[TADC_THERM2].thr[1].addr_lo =
+					TADC_CMP_THR2_CH2_CMP_LO_REG(chip);
+	chip->chans[TADC_THERM2].thr[1].addr_hi =
+					TADC_CMP_THR2_CH2_CMP_HI_REG(chip);
+	chip->chans[TADC_THERM2].thr[2].addr_lo =
+					TADC_CMP_THR3_CH2_CMP_LO_REG(chip);
+	chip->chans[TADC_THERM2].thr[2].addr_hi =
+					TADC_CMP_THR3_CH2_CMP_HI_REG(chip);
+
+	chip->chans[TADC_DIE_TEMP].thr[0].addr_lo =
+					TADC_CMP_THR1_CH3_CMP_LO_REG(chip);
+	chip->chans[TADC_DIE_TEMP].thr[0].addr_hi =
+					TADC_CMP_THR1_CH3_CMP_HI_REG(chip);
+	chip->chans[TADC_DIE_TEMP].thr[1].addr_lo =
+					TADC_CMP_THR2_CH3_CMP_LO_REG(chip);
+	chip->chans[TADC_DIE_TEMP].thr[1].addr_hi =
+					TADC_CMP_THR2_CH3_CMP_HI_REG(chip);
+	chip->chans[TADC_DIE_TEMP].thr[2].addr_lo =
+					TADC_CMP_THR3_CH3_CMP_LO_REG(chip);
+	chip->chans[TADC_DIE_TEMP].thr[2].addr_hi =
+					TADC_CMP_THR3_CH3_CMP_HI_REG(chip);
+
+	rc = tadc_write(chip, TADC_CMP_THR1_CMP_REG(chip), 0);
+	if (rc < 0) {
+		pr_err("Couldn't enable hardware triggers rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = tadc_write(chip, TADC_CMP_THR2_CMP_REG(chip), 0);
+	if (rc < 0) {
+		pr_err("Couldn't enable hardware triggers rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = tadc_write(chip, TADC_CMP_THR3_CMP_REG(chip), 0);
+	if (rc < 0) {
+		pr_err("Couldn't enable hardware triggers rc=%d\n", rc);
+		return rc;
+	}
+
+	/* enable connector and die temp hardware triggers */
+	rc = tadc_masked_write(chip, TADC_HWTRIG_CONV_CH_EN_REG(chip),
+					BIT(TADC_THERM2) | BIT(TADC_DIE_TEMP),
+					BIT(TADC_THERM2) | BIT(TADC_DIE_TEMP));
+	if (rc < 0) {
+		pr_err("Couldn't enable hardware triggers rc=%d\n", rc);
+		return rc;
+	}
+
+	/* save hw triggered conversion configuration */
+	rc = tadc_read(chip, TADC_HWTRIG_CONV_CH_EN_REG(chip),
+							&chip->hwtrig_conv, 1);
+	if (rc < 0) {
+		pr_err("Couldn't save hw conversions rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static const struct iio_info tadc_info = {
+	.read_raw		= &tadc_read_raw,
+	.write_raw		= &tadc_write_raw,
+	.driver_module		= THIS_MODULE,
+};
+
+static int tadc_probe(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct iio_dev *indio_dev;
+	struct tadc_chip *chip;
+	int rc, irq;
+
+	indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*chip));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	chip = iio_priv(indio_dev);
+	chip->dev = &pdev->dev;
+	init_completion(&chip->eoc_complete);
+
+	/*
+	 * set the completion in "completed" state so disable of the tadc
+	 * can progress
+	 */
+	complete_all(&chip->eoc_complete);
+
+	rc = of_property_read_u32(node, "reg", &chip->tadc_base);
+	if (rc < 0) {
+		pr_err("Couldn't read base address rc=%d\n", rc);
+		return rc;
+	}
+	chip->tadc_cmp_base = chip->tadc_base + 0x100;
+
+	mutex_init(&chip->write_lock);
+	mutex_init(&chip->conv_lock);
+	INIT_WORK(&chip->status_change_work, status_change_work);
+	chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
+	if (!chip->regmap) {
+		pr_err("Couldn't get regmap\n");
+		return -ENODEV;
+	}
+
+	rc = tadc_parse_dt(chip);
+	if (rc < 0) {
+		pr_err("Couldn't parse device tree rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = tadc_init_hw(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize hardware rc=%d\n", rc);
+		return rc;
+	}
+
+	chip->tadc_disable_votable = create_votable("SMB_TADC_DISABLE",
+					VOTE_SET_ANY,
+					tadc_disable_vote_callback,
+					chip);
+	if (IS_ERR(chip->tadc_disable_votable)) {
+		rc = PTR_ERR(chip->tadc_disable_votable);
+		return rc;
+	}
+	/* assume usb is not present */
+	vote(chip->tadc_disable_votable, USB_PRESENT_VOTER, true, 0);
+	vote(chip->tadc_disable_votable, SHUTDOWN_VOTER, false, 0);
+	vote(chip->tadc_disable_votable, SLEEP_VOTER, false, 0);
+
+	rc = tadc_register_notifier(chip);
+	if (rc < 0) {
+		pr_err("Couldn't register notifier=%d\n", rc);
+		goto destroy_votable;
+	}
+
+	irq = of_irq_get_byname(node, "eoc");
+	if (irq < 0) {
+		pr_err("Couldn't get eoc irq rc=%d\n", irq);
+		goto destroy_votable;
+	}
+
+	rc = devm_request_threaded_irq(chip->dev, irq, NULL, handle_eoc,
+						IRQF_ONESHOT, "eoc", chip);
+	if (rc < 0) {
+		pr_err("Couldn't request irq %d rc=%d\n", irq, rc);
+		goto destroy_votable;
+	}
+
+	indio_dev->dev.parent = chip->dev;
+	indio_dev->name = pdev->name;
+	indio_dev->modes = INDIO_DIRECT_MODE;
+	indio_dev->info = &tadc_info;
+	indio_dev->channels = tadc_iio_chans;
+	indio_dev->num_channels = ARRAY_SIZE(tadc_iio_chans);
+
+	rc = devm_iio_device_register(chip->dev, indio_dev);
+	if (rc < 0) {
+		pr_err("Couldn't register IIO device rc=%d\n", rc);
+		goto destroy_votable;
+	}
+
+	platform_set_drvdata(pdev, chip);
+	return 0;
+
+destroy_votable:
+	destroy_votable(chip->tadc_disable_votable);
+	return rc;
+}
+
+static int tadc_remove(struct platform_device *pdev)
+{
+	struct tadc_chip *chip = platform_get_drvdata(pdev);
+
+	destroy_votable(chip->tadc_disable_votable);
+	return 0;
+}
+
+static void tadc_shutdown(struct platform_device *pdev)
+{
+	struct tadc_chip *chip = platform_get_drvdata(pdev);
+
+	vote(chip->tadc_disable_votable, SHUTDOWN_VOTER, true, 0);
+}
+
+static const struct dev_pm_ops tadc_pm_ops = {
+	.resume		= tadc_resume,
+	.suspend	= tadc_suspend,
+};
+
+static const struct of_device_id tadc_match_table[] = {
+	{ .compatible = "qcom,tadc" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, tadc_match_table);
+
+static struct platform_driver tadc_driver = {
+	.driver		= {
+		.name		= "qcom-tadc",
+		.of_match_table	= tadc_match_table,
+		.pm		= &tadc_pm_ops,
+	},
+	.probe		= tadc_probe,
+	.remove		= tadc_remove,
+	.shutdown	= tadc_shutdown,
+};
+module_platform_driver(tadc_driver);
+
+MODULE_DESCRIPTION("Qualcomm Technologies Inc. TADC driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/input/keycombo.c	2019-01-22 16:16:23.991250826 +0100
@@ -0,0 +1,261 @@
+/* drivers/input/keycombo.c
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/input.h>
+#include <linux/keycombo.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+struct keycombo_state {
+	struct input_handler input_handler;
+	unsigned long keybit[BITS_TO_LONGS(KEY_CNT)];
+	unsigned long upbit[BITS_TO_LONGS(KEY_CNT)];
+	unsigned long key[BITS_TO_LONGS(KEY_CNT)];
+	spinlock_t lock;
+	struct  workqueue_struct *wq;
+	int key_down_target;
+	int key_down;
+	int key_up;
+	struct delayed_work key_down_work;
+	int delay;
+	struct work_struct key_up_work;
+	void (*key_up_fn)(void *);
+	void (*key_down_fn)(void *);
+	void *priv;
+	int key_is_down;
+	struct wakeup_source combo_held_wake_source;
+	struct wakeup_source combo_up_wake_source;
+};
+
+static void do_key_down(struct work_struct *work)
+{
+	struct delayed_work *dwork = container_of(work, struct delayed_work,
+									work);
+	struct keycombo_state *state = container_of(dwork,
+					struct keycombo_state, key_down_work);
+	if (state->key_down_fn)
+		state->key_down_fn(state->priv);
+}
+
+static void do_key_up(struct work_struct *work)
+{
+	struct keycombo_state *state = container_of(work, struct keycombo_state,
+								key_up_work);
+	if (state->key_up_fn)
+		state->key_up_fn(state->priv);
+	__pm_relax(&state->combo_up_wake_source);
+}
+
+static void keycombo_event(struct input_handle *handle, unsigned int type,
+		unsigned int code, int value)
+{
+	unsigned long flags;
+	struct keycombo_state *state = handle->private;
+
+	if (type != EV_KEY)
+		return;
+
+	if (code >= KEY_MAX)
+		return;
+
+	if (!test_bit(code, state->keybit))
+		return;
+
+	spin_lock_irqsave(&state->lock, flags);
+	if (!test_bit(code, state->key) == !value)
+		goto done;
+	__change_bit(code, state->key);
+	if (test_bit(code, state->upbit)) {
+		if (value)
+			state->key_up++;
+		else
+			state->key_up--;
+	} else {
+		if (value)
+			state->key_down++;
+		else
+			state->key_down--;
+	}
+	if (state->key_down == state->key_down_target && state->key_up == 0) {
+		__pm_stay_awake(&state->combo_held_wake_source);
+		state->key_is_down = 1;
+		if (queue_delayed_work(state->wq, &state->key_down_work,
+								state->delay))
+			pr_debug("Key down work already queued!");
+	} else if (state->key_is_down) {
+		if (!cancel_delayed_work(&state->key_down_work)) {
+			__pm_stay_awake(&state->combo_up_wake_source);
+			queue_work(state->wq, &state->key_up_work);
+		}
+		__pm_relax(&state->combo_held_wake_source);
+		state->key_is_down = 0;
+	}
+done:
+	spin_unlock_irqrestore(&state->lock, flags);
+}
+
+static int keycombo_connect(struct input_handler *handler,
+		struct input_dev *dev,
+		const struct input_device_id *id)
+{
+	int i;
+	int ret;
+	struct input_handle *handle;
+	struct keycombo_state *state =
+		container_of(handler, struct keycombo_state, input_handler);
+	for (i = 0; i < KEY_MAX; i++) {
+		if (test_bit(i, state->keybit) && test_bit(i, dev->keybit))
+			break;
+	}
+	if (i == KEY_MAX)
+		return -ENODEV;
+
+	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+	if (!handle)
+		return -ENOMEM;
+
+	handle->dev = dev;
+	handle->handler = handler;
+	handle->name = KEYCOMBO_NAME;
+	handle->private = state;
+
+	ret = input_register_handle(handle);
+	if (ret)
+		goto err_input_register_handle;
+
+	ret = input_open_device(handle);
+	if (ret)
+		goto err_input_open_device;
+
+	return 0;
+
+err_input_open_device:
+	input_unregister_handle(handle);
+err_input_register_handle:
+	kfree(handle);
+	return ret;
+}
+
+static void keycombo_disconnect(struct input_handle *handle)
+{
+	input_close_device(handle);
+	input_unregister_handle(handle);
+	kfree(handle);
+}
+
+static const struct input_device_id keycombo_ids[] = {
+		{
+				.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+				.evbit = { BIT_MASK(EV_KEY) },
+		},
+		{ },
+};
+MODULE_DEVICE_TABLE(input, keycombo_ids);
+
+static int keycombo_probe(struct platform_device *pdev)
+{
+	int ret;
+	int key, *keyp;
+	struct keycombo_state *state;
+	struct keycombo_platform_data *pdata = pdev->dev.platform_data;
+
+	if (!pdata)
+		return -EINVAL;
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+	if (!state)
+		return -ENOMEM;
+
+	spin_lock_init(&state->lock);
+	keyp = pdata->keys_down;
+	while ((key = *keyp++)) {
+		if (key >= KEY_MAX)
+			continue;
+		state->key_down_target++;
+		__set_bit(key, state->keybit);
+	}
+	if (pdata->keys_up) {
+		keyp = pdata->keys_up;
+		while ((key = *keyp++)) {
+			if (key >= KEY_MAX)
+				continue;
+			__set_bit(key, state->keybit);
+			__set_bit(key, state->upbit);
+		}
+	}
+
+	state->wq = alloc_ordered_workqueue("keycombo", 0);
+	if (!state->wq)
+		return -ENOMEM;
+
+	state->priv = pdata->priv;
+
+	if (pdata->key_down_fn)
+		state->key_down_fn = pdata->key_down_fn;
+	INIT_DELAYED_WORK(&state->key_down_work, do_key_down);
+
+	if (pdata->key_up_fn)
+		state->key_up_fn = pdata->key_up_fn;
+	INIT_WORK(&state->key_up_work, do_key_up);
+
+	wakeup_source_init(&state->combo_held_wake_source, "key combo");
+	wakeup_source_init(&state->combo_up_wake_source, "key combo up");
+	state->delay = msecs_to_jiffies(pdata->key_down_delay);
+
+	state->input_handler.event = keycombo_event;
+	state->input_handler.connect = keycombo_connect;
+	state->input_handler.disconnect = keycombo_disconnect;
+	state->input_handler.name = KEYCOMBO_NAME;
+	state->input_handler.id_table = keycombo_ids;
+	ret = input_register_handler(&state->input_handler);
+	if (ret) {
+		kfree(state);
+		return ret;
+	}
+	platform_set_drvdata(pdev, state);
+	return 0;
+}
+
+int keycombo_remove(struct platform_device *pdev)
+{
+	struct keycombo_state *state = platform_get_drvdata(pdev);
+	input_unregister_handler(&state->input_handler);
+	destroy_workqueue(state->wq);
+	kfree(state);
+	return 0;
+}
+
+
+struct platform_driver keycombo_driver = {
+		.driver.name = KEYCOMBO_NAME,
+		.probe = keycombo_probe,
+		.remove = keycombo_remove,
+};
+
+static int __init keycombo_init(void)
+{
+	return platform_driver_register(&keycombo_driver);
+}
+
+static void __exit keycombo_exit(void)
+{
+	return platform_driver_unregister(&keycombo_driver);
+}
+
+module_init(keycombo_init);
+module_exit(keycombo_exit);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/input/keyreset.c	2019-01-22 16:16:23.991250826 +0100
@@ -0,0 +1,144 @@
+/* drivers/input/keyreset.c
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/input.h>
+#include <linux/keyreset.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/keycombo.h>
+
+struct keyreset_state {
+	int restart_requested;
+	int (*reset_fn)(void);
+	struct platform_device *pdev_child;
+	struct work_struct restart_work;
+};
+
+static void do_restart(struct work_struct *unused)
+{
+	orderly_reboot();
+}
+
+static void do_reset_fn(void *priv)
+{
+	struct keyreset_state *state = priv;
+	if (state->restart_requested)
+		panic("keyboard reset failed, %d", state->restart_requested);
+	if (state->reset_fn) {
+		state->restart_requested = state->reset_fn();
+	} else {
+		pr_info("keyboard reset\n");
+		schedule_work(&state->restart_work);
+		state->restart_requested = 1;
+	}
+}
+
+static int keyreset_probe(struct platform_device *pdev)
+{
+	int ret = -ENOMEM;
+	struct keycombo_platform_data *pdata_child;
+	struct keyreset_platform_data *pdata = pdev->dev.platform_data;
+	int up_size = 0, down_size = 0, size;
+	int key, *keyp;
+	struct keyreset_state *state;
+
+	if (!pdata)
+		return -EINVAL;
+	state = devm_kzalloc(&pdev->dev, sizeof(*state), GFP_KERNEL);
+	if (!state)
+		return -ENOMEM;
+
+	state->pdev_child = platform_device_alloc(KEYCOMBO_NAME,
+							PLATFORM_DEVID_AUTO);
+	if (!state->pdev_child)
+		return -ENOMEM;
+	state->pdev_child->dev.parent = &pdev->dev;
+	INIT_WORK(&state->restart_work, do_restart);
+
+	keyp = pdata->keys_down;
+	while ((key = *keyp++)) {
+		if (key >= KEY_MAX)
+			continue;
+		down_size++;
+	}
+	if (pdata->keys_up) {
+		keyp = pdata->keys_up;
+		while ((key = *keyp++)) {
+			if (key >= KEY_MAX)
+				continue;
+			up_size++;
+		}
+	}
+	size = sizeof(struct keycombo_platform_data)
+			+ sizeof(int) * (down_size + 1);
+	pdata_child = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+	if (!pdata_child)
+		goto error;
+	memcpy(pdata_child->keys_down, pdata->keys_down,
+						sizeof(int) * down_size);
+	if (up_size > 0) {
+		pdata_child->keys_up = devm_kzalloc(&pdev->dev, up_size + 1,
+								GFP_KERNEL);
+		if (!pdata_child->keys_up)
+			goto error;
+		memcpy(pdata_child->keys_up, pdata->keys_up,
+							sizeof(int) * up_size);
+		if (!pdata_child->keys_up)
+			goto error;
+	}
+	state->reset_fn = pdata->reset_fn;
+	pdata_child->key_down_fn = do_reset_fn;
+	pdata_child->priv = state;
+	pdata_child->key_down_delay = pdata->key_down_delay;
+	ret = platform_device_add_data(state->pdev_child, pdata_child, size);
+	if (ret)
+		goto error;
+	platform_set_drvdata(pdev, state);
+	return platform_device_add(state->pdev_child);
+error:
+	platform_device_put(state->pdev_child);
+	return ret;
+}
+
+int keyreset_remove(struct platform_device *pdev)
+{
+	struct keyreset_state *state = platform_get_drvdata(pdev);
+	platform_device_put(state->pdev_child);
+	return 0;
+}
+
+
+struct platform_driver keyreset_driver = {
+	.driver.name = KEYRESET_NAME,
+	.probe = keyreset_probe,
+	.remove = keyreset_remove,
+};
+
+static int __init keyreset_init(void)
+{
+	return platform_driver_register(&keyreset_driver);
+}
+
+static void __exit keyreset_exit(void)
+{
+	return platform_driver_unregister(&keyreset_driver);
+}
+
+module_init(keyreset_init);
+module_exit(keyreset_exit);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/input/misc/gpio_axis.c	2019-01-22 16:16:23.995250862 +0100
@@ -0,0 +1,192 @@
+/* drivers/input/misc/gpio_axis.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+
+struct gpio_axis_state {
+	struct gpio_event_input_devs *input_devs;
+	struct gpio_event_axis_info *info;
+	uint32_t pos;
+};
+
+uint16_t gpio_axis_4bit_gray_map_table[] = {
+	[0x0] = 0x0, [0x1] = 0x1, /* 0000 0001 */
+	[0x3] = 0x2, [0x2] = 0x3, /* 0011 0010 */
+	[0x6] = 0x4, [0x7] = 0x5, /* 0110 0111 */
+	[0x5] = 0x6, [0x4] = 0x7, /* 0101 0100 */
+	[0xc] = 0x8, [0xd] = 0x9, /* 1100 1101 */
+	[0xf] = 0xa, [0xe] = 0xb, /* 1111 1110 */
+	[0xa] = 0xc, [0xb] = 0xd, /* 1010 1011 */
+	[0x9] = 0xe, [0x8] = 0xf, /* 1001 1000 */
+};
+uint16_t gpio_axis_4bit_gray_map(struct gpio_event_axis_info *info, uint16_t in)
+{
+	return gpio_axis_4bit_gray_map_table[in];
+}
+
+uint16_t gpio_axis_5bit_singletrack_map_table[] = {
+	[0x10] = 0x00, [0x14] = 0x01, [0x1c] = 0x02, /*     10000 10100 11100 */
+	[0x1e] = 0x03, [0x1a] = 0x04, [0x18] = 0x05, /*     11110 11010 11000 */
+	[0x08] = 0x06, [0x0a] = 0x07, [0x0e] = 0x08, /*    01000 01010 01110  */
+	[0x0f] = 0x09, [0x0d] = 0x0a, [0x0c] = 0x0b, /*    01111 01101 01100  */
+	[0x04] = 0x0c, [0x05] = 0x0d, [0x07] = 0x0e, /*   00100 00101 00111   */
+	[0x17] = 0x0f, [0x16] = 0x10, [0x06] = 0x11, /*   10111 10110 00110   */
+	[0x02] = 0x12, [0x12] = 0x13, [0x13] = 0x14, /*  00010 10010 10011    */
+	[0x1b] = 0x15, [0x0b] = 0x16, [0x03] = 0x17, /*  11011 01011 00011    */
+	[0x01] = 0x18, [0x09] = 0x19, [0x19] = 0x1a, /* 00001 01001 11001     */
+	[0x1d] = 0x1b, [0x15] = 0x1c, [0x11] = 0x1d, /* 11101 10101 10001     */
+};
+uint16_t gpio_axis_5bit_singletrack_map(
+	struct gpio_event_axis_info *info, uint16_t in)
+{
+	return gpio_axis_5bit_singletrack_map_table[in];
+}
+
+static void gpio_event_update_axis(struct gpio_axis_state *as, int report)
+{
+	struct gpio_event_axis_info *ai = as->info;
+	int i;
+	int change;
+	uint16_t state = 0;
+	uint16_t pos;
+	uint16_t old_pos = as->pos;
+	for (i = ai->count - 1; i >= 0; i--)
+		state = (state << 1) | gpio_get_value(ai->gpio[i]);
+	pos = ai->map(ai, state);
+	if (ai->flags & GPIOEAF_PRINT_RAW)
+		pr_info("axis %d-%d raw %x, pos %d -> %d\n",
+			ai->type, ai->code, state, old_pos, pos);
+	if (report && pos != old_pos) {
+		if (ai->type == EV_REL) {
+			change = (ai->decoded_size + pos - old_pos) %
+				  ai->decoded_size;
+			if (change > ai->decoded_size / 2)
+				change -= ai->decoded_size;
+			if (change == ai->decoded_size / 2) {
+				if (ai->flags & GPIOEAF_PRINT_EVENT)
+					pr_info("axis %d-%d unknown direction, "
+						"pos %d -> %d\n", ai->type,
+						ai->code, old_pos, pos);
+				change = 0; /* no closest direction */
+			}
+			if (ai->flags & GPIOEAF_PRINT_EVENT)
+				pr_info("axis %d-%d change %d\n",
+					ai->type, ai->code, change);
+			input_report_rel(as->input_devs->dev[ai->dev],
+						ai->code, change);
+		} else {
+			if (ai->flags & GPIOEAF_PRINT_EVENT)
+				pr_info("axis %d-%d now %d\n",
+					ai->type, ai->code, pos);
+			input_event(as->input_devs->dev[ai->dev],
+					ai->type, ai->code, pos);
+		}
+		input_sync(as->input_devs->dev[ai->dev]);
+	}
+	as->pos = pos;
+}
+
+static irqreturn_t gpio_axis_irq_handler(int irq, void *dev_id)
+{
+	struct gpio_axis_state *as = dev_id;
+	gpio_event_update_axis(as, 1);
+	return IRQ_HANDLED;
+}
+
+int gpio_event_axis_func(struct gpio_event_input_devs *input_devs,
+			 struct gpio_event_info *info, void **data, int func)
+{
+	int ret;
+	int i;
+	int irq;
+	struct gpio_event_axis_info *ai;
+	struct gpio_axis_state *as;
+
+	ai = container_of(info, struct gpio_event_axis_info, info);
+	if (func == GPIO_EVENT_FUNC_SUSPEND) {
+		for (i = 0; i < ai->count; i++)
+			disable_irq(gpio_to_irq(ai->gpio[i]));
+		return 0;
+	}
+	if (func == GPIO_EVENT_FUNC_RESUME) {
+		for (i = 0; i < ai->count; i++)
+			enable_irq(gpio_to_irq(ai->gpio[i]));
+		return 0;
+	}
+
+	if (func == GPIO_EVENT_FUNC_INIT) {
+		*data = as = kmalloc(sizeof(*as), GFP_KERNEL);
+		if (as == NULL) {
+			ret = -ENOMEM;
+			goto err_alloc_axis_state_failed;
+		}
+		as->input_devs = input_devs;
+		as->info = ai;
+		if (ai->dev >= input_devs->count) {
+			pr_err("gpio_event_axis: bad device index %d >= %d "
+				"for %d:%d\n", ai->dev, input_devs->count,
+				ai->type, ai->code);
+			ret = -EINVAL;
+			goto err_bad_device_index;
+		}
+
+		input_set_capability(input_devs->dev[ai->dev],
+				     ai->type, ai->code);
+		if (ai->type == EV_ABS) {
+			input_set_abs_params(input_devs->dev[ai->dev], ai->code,
+					     0, ai->decoded_size - 1, 0, 0);
+		}
+		for (i = 0; i < ai->count; i++) {
+			ret = gpio_request(ai->gpio[i], "gpio_event_axis");
+			if (ret < 0)
+				goto err_request_gpio_failed;
+			ret = gpio_direction_input(ai->gpio[i]);
+			if (ret < 0)
+				goto err_gpio_direction_input_failed;
+			ret = irq = gpio_to_irq(ai->gpio[i]);
+			if (ret < 0)
+				goto err_get_irq_num_failed;
+			ret = request_irq(irq, gpio_axis_irq_handler,
+					  IRQF_TRIGGER_RISING |
+					  IRQF_TRIGGER_FALLING,
+					  "gpio_event_axis", as);
+			if (ret < 0)
+				goto err_request_irq_failed;
+		}
+		gpio_event_update_axis(as, 0);
+		return 0;
+	}
+
+	ret = 0;
+	as = *data;
+	for (i = ai->count - 1; i >= 0; i--) {
+		free_irq(gpio_to_irq(ai->gpio[i]), as);
+err_request_irq_failed:
+err_get_irq_num_failed:
+err_gpio_direction_input_failed:
+		gpio_free(ai->gpio[i]);
+err_request_gpio_failed:
+		;
+	}
+err_bad_device_index:
+	kfree(as);
+	*data = NULL;
+err_alloc_axis_state_failed:
+	return ret;
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/input/misc/gpio_event.c	2019-01-22 16:16:23.995250862 +0100
@@ -0,0 +1,228 @@
+/* drivers/input/misc/gpio_event.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/gpio_event.h>
+#include <linux/hrtimer.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct gpio_event {
+	struct gpio_event_input_devs *input_devs;
+	const struct gpio_event_platform_data *info;
+	void *state[0];
+};
+
+static int gpio_input_event(
+	struct input_dev *dev, unsigned int type, unsigned int code, int value)
+{
+	int i;
+	int devnr;
+	int ret = 0;
+	int tmp_ret;
+	struct gpio_event_info **ii;
+	struct gpio_event *ip = input_get_drvdata(dev);
+
+	for (devnr = 0; devnr < ip->input_devs->count; devnr++)
+		if (ip->input_devs->dev[devnr] == dev)
+			break;
+	if (devnr == ip->input_devs->count) {
+		pr_err("gpio_input_event: unknown device %p\n", dev);
+		return -EIO;
+	}
+
+	for (i = 0, ii = ip->info->info; i < ip->info->info_count; i++, ii++) {
+		if ((*ii)->event) {
+			tmp_ret = (*ii)->event(ip->input_devs, *ii,
+						&ip->state[i],
+						devnr, type, code, value);
+			if (tmp_ret)
+				ret = tmp_ret;
+		}
+	}
+	return ret;
+}
+
+static int gpio_event_call_all_func(struct gpio_event *ip, int func)
+{
+	int i;
+	int ret;
+	struct gpio_event_info **ii;
+
+	if (func == GPIO_EVENT_FUNC_INIT || func == GPIO_EVENT_FUNC_RESUME) {
+		ii = ip->info->info;
+		for (i = 0; i < ip->info->info_count; i++, ii++) {
+			if ((*ii)->func == NULL) {
+				ret = -ENODEV;
+				pr_err("gpio_event_probe: Incomplete pdata, "
+					"no function\n");
+				goto err_no_func;
+			}
+			if (func == GPIO_EVENT_FUNC_RESUME && (*ii)->no_suspend)
+				continue;
+			ret = (*ii)->func(ip->input_devs, *ii, &ip->state[i],
+					  func);
+			if (ret) {
+				pr_err("gpio_event_probe: function failed\n");
+				goto err_func_failed;
+			}
+		}
+		return 0;
+	}
+
+	ret = 0;
+	i = ip->info->info_count;
+	ii = ip->info->info + i;
+	while (i > 0) {
+		i--;
+		ii--;
+		if ((func & ~1) == GPIO_EVENT_FUNC_SUSPEND && (*ii)->no_suspend)
+			continue;
+		(*ii)->func(ip->input_devs, *ii, &ip->state[i], func & ~1);
+err_func_failed:
+err_no_func:
+		;
+	}
+	return ret;
+}
+
+static void __maybe_unused gpio_event_suspend(struct gpio_event *ip)
+{
+	gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_SUSPEND);
+	if (ip->info->power)
+		ip->info->power(ip->info, 0);
+}
+
+static void __maybe_unused gpio_event_resume(struct gpio_event *ip)
+{
+	if (ip->info->power)
+		ip->info->power(ip->info, 1);
+	gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_RESUME);
+}
+
+static int gpio_event_probe(struct platform_device *pdev)
+{
+	int err;
+	struct gpio_event *ip;
+	struct gpio_event_platform_data *event_info;
+	int dev_count = 1;
+	int i;
+	int registered = 0;
+
+	event_info = pdev->dev.platform_data;
+	if (event_info == NULL) {
+		pr_err("gpio_event_probe: No pdata\n");
+		return -ENODEV;
+	}
+	if ((!event_info->name && !event_info->names[0]) ||
+	    !event_info->info || !event_info->info_count) {
+		pr_err("gpio_event_probe: Incomplete pdata\n");
+		return -ENODEV;
+	}
+	if (!event_info->name)
+		while (event_info->names[dev_count])
+			dev_count++;
+	ip = kzalloc(sizeof(*ip) +
+		     sizeof(ip->state[0]) * event_info->info_count +
+		     sizeof(*ip->input_devs) +
+		     sizeof(ip->input_devs->dev[0]) * dev_count, GFP_KERNEL);
+	if (ip == NULL) {
+		err = -ENOMEM;
+		pr_err("gpio_event_probe: Failed to allocate private data\n");
+		goto err_kp_alloc_failed;
+	}
+	ip->input_devs = (void*)&ip->state[event_info->info_count];
+	platform_set_drvdata(pdev, ip);
+
+	for (i = 0; i < dev_count; i++) {
+		struct input_dev *input_dev = input_allocate_device();
+		if (input_dev == NULL) {
+			err = -ENOMEM;
+			pr_err("gpio_event_probe: "
+				"Failed to allocate input device\n");
+			goto err_input_dev_alloc_failed;
+		}
+		input_set_drvdata(input_dev, ip);
+		input_dev->name = event_info->name ?
+					event_info->name : event_info->names[i];
+		input_dev->event = gpio_input_event;
+		ip->input_devs->dev[i] = input_dev;
+	}
+	ip->input_devs->count = dev_count;
+	ip->info = event_info;
+	if (event_info->power)
+		ip->info->power(ip->info, 1);
+
+	err = gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_INIT);
+	if (err)
+		goto err_call_all_func_failed;
+
+	for (i = 0; i < dev_count; i++) {
+		err = input_register_device(ip->input_devs->dev[i]);
+		if (err) {
+			pr_err("gpio_event_probe: Unable to register %s "
+				"input device\n", ip->input_devs->dev[i]->name);
+			goto err_input_register_device_failed;
+		}
+		registered++;
+	}
+
+	return 0;
+
+err_input_register_device_failed:
+	gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT);
+err_call_all_func_failed:
+	if (event_info->power)
+		ip->info->power(ip->info, 0);
+	for (i = 0; i < registered; i++)
+		input_unregister_device(ip->input_devs->dev[i]);
+	for (i = dev_count - 1; i >= registered; i--) {
+		input_free_device(ip->input_devs->dev[i]);
+err_input_dev_alloc_failed:
+		;
+	}
+	kfree(ip);
+err_kp_alloc_failed:
+	return err;
+}
+
+static int gpio_event_remove(struct platform_device *pdev)
+{
+	struct gpio_event *ip = platform_get_drvdata(pdev);
+	int i;
+
+	gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT);
+	if (ip->info->power)
+		ip->info->power(ip->info, 0);
+	for (i = 0; i < ip->input_devs->count; i++)
+		input_unregister_device(ip->input_devs->dev[i]);
+	kfree(ip);
+	return 0;
+}
+
+static struct platform_driver gpio_event_driver = {
+	.probe		= gpio_event_probe,
+	.remove		= gpio_event_remove,
+	.driver		= {
+		.name	= GPIO_EVENT_DEV_NAME,
+	},
+};
+
+module_platform_driver(gpio_event_driver);
+
+MODULE_DESCRIPTION("GPIO Event Driver");
+MODULE_LICENSE("GPL");
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/input/misc/gpio_input.c	2019-01-22 16:16:23.995250862 +0100
@@ -0,0 +1,390 @@
+/* drivers/input/misc/gpio_input.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+#include <linux/hrtimer.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/pm_wakeup.h>
+
+enum {
+	DEBOUNCE_UNSTABLE     = BIT(0),	/* Got irq, while debouncing */
+	DEBOUNCE_PRESSED      = BIT(1),
+	DEBOUNCE_NOTPRESSED   = BIT(2),
+	DEBOUNCE_WAIT_IRQ     = BIT(3),	/* Stable irq state */
+	DEBOUNCE_POLL         = BIT(4),	/* Stable polling state */
+
+	DEBOUNCE_UNKNOWN =
+		DEBOUNCE_PRESSED | DEBOUNCE_NOTPRESSED,
+};
+
+struct gpio_key_state {
+	struct gpio_input_state *ds;
+	uint8_t debounce;
+};
+
+struct gpio_input_state {
+	struct gpio_event_input_devs *input_devs;
+	const struct gpio_event_input_info *info;
+	struct hrtimer timer;
+	int use_irq;
+	int debounce_count;
+	spinlock_t irq_lock;
+	struct wakeup_source *ws;
+	struct gpio_key_state key_state[0];
+};
+
+static enum hrtimer_restart gpio_event_input_timer_func(struct hrtimer *timer)
+{
+	int i;
+	int pressed;
+	struct gpio_input_state *ds =
+		container_of(timer, struct gpio_input_state, timer);
+	unsigned gpio_flags = ds->info->flags;
+	unsigned npolarity;
+	int nkeys = ds->info->keymap_size;
+	const struct gpio_event_direct_entry *key_entry;
+	struct gpio_key_state *key_state;
+	unsigned long irqflags;
+	uint8_t debounce;
+	bool sync_needed;
+
+#if 0
+	key_entry = kp->keys_info->keymap;
+	key_state = kp->key_state;
+	for (i = 0; i < nkeys; i++, key_entry++, key_state++)
+		pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio,
+			gpio_read_detect_status(key_entry->gpio));
+#endif
+	key_entry = ds->info->keymap;
+	key_state = ds->key_state;
+	sync_needed = false;
+	spin_lock_irqsave(&ds->irq_lock, irqflags);
+	for (i = 0; i < nkeys; i++, key_entry++, key_state++) {
+		debounce = key_state->debounce;
+		if (debounce & DEBOUNCE_WAIT_IRQ)
+			continue;
+		if (key_state->debounce & DEBOUNCE_UNSTABLE) {
+			debounce = key_state->debounce = DEBOUNCE_UNKNOWN;
+			enable_irq(gpio_to_irq(key_entry->gpio));
+			if (gpio_flags & GPIOEDF_PRINT_KEY_UNSTABLE)
+				pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+					"(%d) continue debounce\n",
+					ds->info->type, key_entry->code,
+					i, key_entry->gpio);
+		}
+		npolarity = !(gpio_flags & GPIOEDF_ACTIVE_HIGH);
+		pressed = gpio_get_value(key_entry->gpio) ^ npolarity;
+		if (debounce & DEBOUNCE_POLL) {
+			if (pressed == !(debounce & DEBOUNCE_PRESSED)) {
+				ds->debounce_count++;
+				key_state->debounce = DEBOUNCE_UNKNOWN;
+				if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+					pr_info("gpio_keys_scan_keys: key %x-"
+						"%x, %d (%d) start debounce\n",
+						ds->info->type, key_entry->code,
+						i, key_entry->gpio);
+			}
+			continue;
+		}
+		if (pressed && (debounce & DEBOUNCE_NOTPRESSED)) {
+			if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+				pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+					"(%d) debounce pressed 1\n",
+					ds->info->type, key_entry->code,
+					i, key_entry->gpio);
+			key_state->debounce = DEBOUNCE_PRESSED;
+			continue;
+		}
+		if (!pressed && (debounce & DEBOUNCE_PRESSED)) {
+			if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+				pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+					"(%d) debounce pressed 0\n",
+					ds->info->type, key_entry->code,
+					i, key_entry->gpio);
+			key_state->debounce = DEBOUNCE_NOTPRESSED;
+			continue;
+		}
+		/* key is stable */
+		ds->debounce_count--;
+		if (ds->use_irq)
+			key_state->debounce |= DEBOUNCE_WAIT_IRQ;
+		else
+			key_state->debounce |= DEBOUNCE_POLL;
+		if (gpio_flags & GPIOEDF_PRINT_KEYS)
+			pr_info("gpio_keys_scan_keys: key %x-%x, %d (%d) "
+				"changed to %d\n", ds->info->type,
+				key_entry->code, i, key_entry->gpio, pressed);
+		input_event(ds->input_devs->dev[key_entry->dev], ds->info->type,
+			    key_entry->code, pressed);
+		sync_needed = true;
+	}
+	if (sync_needed) {
+		for (i = 0; i < ds->input_devs->count; i++)
+			input_sync(ds->input_devs->dev[i]);
+	}
+
+#if 0
+	key_entry = kp->keys_info->keymap;
+	key_state = kp->key_state;
+	for (i = 0; i < nkeys; i++, key_entry++, key_state++) {
+		pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio,
+			gpio_read_detect_status(key_entry->gpio));
+	}
+#endif
+
+	if (ds->debounce_count)
+		hrtimer_start(timer, ds->info->debounce_time, HRTIMER_MODE_REL);
+	else if (!ds->use_irq)
+		hrtimer_start(timer, ds->info->poll_time, HRTIMER_MODE_REL);
+	else
+		__pm_relax(ds->ws);
+
+	spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+
+	return HRTIMER_NORESTART;
+}
+
+static irqreturn_t gpio_event_input_irq_handler(int irq, void *dev_id)
+{
+	struct gpio_key_state *ks = dev_id;
+	struct gpio_input_state *ds = ks->ds;
+	int keymap_index = ks - ds->key_state;
+	const struct gpio_event_direct_entry *key_entry;
+	unsigned long irqflags;
+	int pressed;
+
+	if (!ds->use_irq)
+		return IRQ_HANDLED;
+
+	key_entry = &ds->info->keymap[keymap_index];
+
+	if (ds->info->debounce_time.tv64) {
+		spin_lock_irqsave(&ds->irq_lock, irqflags);
+		if (ks->debounce & DEBOUNCE_WAIT_IRQ) {
+			ks->debounce = DEBOUNCE_UNKNOWN;
+			if (ds->debounce_count++ == 0) {
+				__pm_stay_awake(ds->ws);
+				hrtimer_start(
+					&ds->timer, ds->info->debounce_time,
+					HRTIMER_MODE_REL);
+			}
+			if (ds->info->flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+				pr_info("gpio_event_input_irq_handler: "
+					"key %x-%x, %d (%d) start debounce\n",
+					ds->info->type, key_entry->code,
+					keymap_index, key_entry->gpio);
+		} else {
+			disable_irq_nosync(irq);
+			ks->debounce = DEBOUNCE_UNSTABLE;
+		}
+		spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+	} else {
+		pressed = gpio_get_value(key_entry->gpio) ^
+			!(ds->info->flags & GPIOEDF_ACTIVE_HIGH);
+		if (ds->info->flags & GPIOEDF_PRINT_KEYS)
+			pr_info("gpio_event_input_irq_handler: key %x-%x, %d "
+				"(%d) changed to %d\n",
+				ds->info->type, key_entry->code, keymap_index,
+				key_entry->gpio, pressed);
+		input_event(ds->input_devs->dev[key_entry->dev], ds->info->type,
+			    key_entry->code, pressed);
+		input_sync(ds->input_devs->dev[key_entry->dev]);
+	}
+	return IRQ_HANDLED;
+}
+
+static int gpio_event_input_request_irqs(struct gpio_input_state *ds)
+{
+	int i;
+	int err;
+	unsigned int irq;
+	unsigned long req_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
+
+	for (i = 0; i < ds->info->keymap_size; i++) {
+		err = irq = gpio_to_irq(ds->info->keymap[i].gpio);
+		if (err < 0)
+			goto err_gpio_get_irq_num_failed;
+		err = request_irq(irq, gpio_event_input_irq_handler,
+				  req_flags, "gpio_keys", &ds->key_state[i]);
+		if (err) {
+			pr_err("gpio_event_input_request_irqs: request_irq "
+				"failed for input %d, irq %d\n",
+				ds->info->keymap[i].gpio, irq);
+			goto err_request_irq_failed;
+		}
+		if (ds->info->info.no_suspend) {
+			err = enable_irq_wake(irq);
+			if (err) {
+				pr_err("gpio_event_input_request_irqs: "
+					"enable_irq_wake failed for input %d, "
+					"irq %d\n",
+					ds->info->keymap[i].gpio, irq);
+				goto err_enable_irq_wake_failed;
+			}
+		}
+	}
+	return 0;
+
+	for (i = ds->info->keymap_size - 1; i >= 0; i--) {
+		irq = gpio_to_irq(ds->info->keymap[i].gpio);
+		if (ds->info->info.no_suspend)
+			disable_irq_wake(irq);
+err_enable_irq_wake_failed:
+		free_irq(irq, &ds->key_state[i]);
+err_request_irq_failed:
+err_gpio_get_irq_num_failed:
+		;
+	}
+	return err;
+}
+
+int gpio_event_input_func(struct gpio_event_input_devs *input_devs,
+			struct gpio_event_info *info, void **data, int func)
+{
+	int ret;
+	int i;
+	unsigned long irqflags;
+	struct gpio_event_input_info *di;
+	struct gpio_input_state *ds = *data;
+	char *wlname;
+
+	di = container_of(info, struct gpio_event_input_info, info);
+
+	if (func == GPIO_EVENT_FUNC_SUSPEND) {
+		if (ds->use_irq)
+			for (i = 0; i < di->keymap_size; i++)
+				disable_irq(gpio_to_irq(di->keymap[i].gpio));
+		hrtimer_cancel(&ds->timer);
+		return 0;
+	}
+	if (func == GPIO_EVENT_FUNC_RESUME) {
+		spin_lock_irqsave(&ds->irq_lock, irqflags);
+		if (ds->use_irq)
+			for (i = 0; i < di->keymap_size; i++)
+				enable_irq(gpio_to_irq(di->keymap[i].gpio));
+		hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+		spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+		return 0;
+	}
+
+	if (func == GPIO_EVENT_FUNC_INIT) {
+		if (ktime_to_ns(di->poll_time) <= 0)
+			di->poll_time = ktime_set(0, 20 * NSEC_PER_MSEC);
+
+		*data = ds = kzalloc(sizeof(*ds) + sizeof(ds->key_state[0]) *
+					di->keymap_size, GFP_KERNEL);
+		if (ds == NULL) {
+			ret = -ENOMEM;
+			pr_err("gpio_event_input_func: "
+				"Failed to allocate private data\n");
+			goto err_ds_alloc_failed;
+		}
+		ds->debounce_count = di->keymap_size;
+		ds->input_devs = input_devs;
+		ds->info = di;
+		wlname = kasprintf(GFP_KERNEL, "gpio_input:%s%s",
+				   input_devs->dev[0]->name,
+				   (input_devs->count > 1) ? "..." : "");
+
+		ds->ws = wakeup_source_register(wlname);
+		kfree(wlname);
+		if (!ds->ws) {
+			ret = -ENOMEM;
+			pr_err("gpio_event_input_func: "
+				"Failed to allocate wakeup source\n");
+			goto err_ws_failed;
+		}
+
+		spin_lock_init(&ds->irq_lock);
+
+		for (i = 0; i < di->keymap_size; i++) {
+			int dev = di->keymap[i].dev;
+			if (dev >= input_devs->count) {
+				pr_err("gpio_event_input_func: bad device "
+					"index %d >= %d for key code %d\n",
+					dev, input_devs->count,
+					di->keymap[i].code);
+				ret = -EINVAL;
+				goto err_bad_keymap;
+			}
+			input_set_capability(input_devs->dev[dev], di->type,
+					     di->keymap[i].code);
+			ds->key_state[i].ds = ds;
+			ds->key_state[i].debounce = DEBOUNCE_UNKNOWN;
+		}
+
+		for (i = 0; i < di->keymap_size; i++) {
+			ret = gpio_request(di->keymap[i].gpio, "gpio_kp_in");
+			if (ret) {
+				pr_err("gpio_event_input_func: gpio_request "
+					"failed for %d\n", di->keymap[i].gpio);
+				goto err_gpio_request_failed;
+			}
+			ret = gpio_direction_input(di->keymap[i].gpio);
+			if (ret) {
+				pr_err("gpio_event_input_func: "
+					"gpio_direction_input failed for %d\n",
+					di->keymap[i].gpio);
+				goto err_gpio_configure_failed;
+			}
+		}
+
+		ret = gpio_event_input_request_irqs(ds);
+
+		spin_lock_irqsave(&ds->irq_lock, irqflags);
+		ds->use_irq = ret == 0;
+
+		pr_info("GPIO Input Driver: Start gpio inputs for %s%s in %s "
+			"mode\n", input_devs->dev[0]->name,
+			(input_devs->count > 1) ? "..." : "",
+			ret == 0 ? "interrupt" : "polling");
+
+		hrtimer_init(&ds->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+		ds->timer.function = gpio_event_input_timer_func;
+		hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+		spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+		return 0;
+	}
+
+	ret = 0;
+	spin_lock_irqsave(&ds->irq_lock, irqflags);
+	hrtimer_cancel(&ds->timer);
+	if (ds->use_irq) {
+		for (i = di->keymap_size - 1; i >= 0; i--) {
+			int irq = gpio_to_irq(di->keymap[i].gpio);
+			if (ds->info->info.no_suspend)
+				disable_irq_wake(irq);
+			free_irq(irq, &ds->key_state[i]);
+		}
+	}
+	spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+
+	for (i = di->keymap_size - 1; i >= 0; i--) {
+err_gpio_configure_failed:
+		gpio_free(di->keymap[i].gpio);
+err_gpio_request_failed:
+		;
+	}
+err_bad_keymap:
+	wakeup_source_unregister(ds->ws);
+err_ws_failed:
+	kfree(ds);
+err_ds_alloc_failed:
+	return ret;
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/input/misc/gpio_matrix.c	2019-01-22 16:16:23.995250862 +0100
@@ -0,0 +1,441 @@
+/* drivers/input/misc/gpio_matrix.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/wakelock.h>
+
+struct gpio_kp {
+	struct gpio_event_input_devs *input_devs;
+	struct gpio_event_matrix_info *keypad_info;
+	struct hrtimer timer;
+	struct wake_lock wake_lock;
+	int current_output;
+	unsigned int use_irq:1;
+	unsigned int key_state_changed:1;
+	unsigned int last_key_state_changed:1;
+	unsigned int some_keys_pressed:2;
+	unsigned int disabled_irq:1;
+	unsigned long keys_pressed[0];
+};
+
+static void clear_phantom_key(struct gpio_kp *kp, int out, int in)
+{
+	struct gpio_event_matrix_info *mi = kp->keypad_info;
+	int key_index = out * mi->ninputs + in;
+	unsigned short keyentry = mi->keymap[key_index];
+	unsigned short keycode = keyentry & MATRIX_KEY_MASK;
+	unsigned short dev = keyentry >> MATRIX_CODE_BITS;
+
+	if (!test_bit(keycode, kp->input_devs->dev[dev]->key)) {
+		if (mi->flags & GPIOKPF_PRINT_PHANTOM_KEYS)
+			pr_info("gpiomatrix: phantom key %x, %d-%d (%d-%d) "
+				"cleared\n", keycode, out, in,
+				mi->output_gpios[out], mi->input_gpios[in]);
+		__clear_bit(key_index, kp->keys_pressed);
+	} else {
+		if (mi->flags & GPIOKPF_PRINT_PHANTOM_KEYS)
+			pr_info("gpiomatrix: phantom key %x, %d-%d (%d-%d) "
+				"not cleared\n", keycode, out, in,
+				mi->output_gpios[out], mi->input_gpios[in]);
+	}
+}
+
+static int restore_keys_for_input(struct gpio_kp *kp, int out, int in)
+{
+	int rv = 0;
+	int key_index;
+
+	key_index = out * kp->keypad_info->ninputs + in;
+	while (out < kp->keypad_info->noutputs) {
+		if (test_bit(key_index, kp->keys_pressed)) {
+			rv = 1;
+			clear_phantom_key(kp, out, in);
+		}
+		key_index += kp->keypad_info->ninputs;
+		out++;
+	}
+	return rv;
+}
+
+static void remove_phantom_keys(struct gpio_kp *kp)
+{
+	int out, in, inp;
+	int key_index;
+
+	if (kp->some_keys_pressed < 3)
+		return;
+
+	for (out = 0; out < kp->keypad_info->noutputs; out++) {
+		inp = -1;
+		key_index = out * kp->keypad_info->ninputs;
+		for (in = 0; in < kp->keypad_info->ninputs; in++, key_index++) {
+			if (test_bit(key_index, kp->keys_pressed)) {
+				if (inp == -1) {
+					inp = in;
+					continue;
+				}
+				if (inp >= 0) {
+					if (!restore_keys_for_input(kp, out + 1,
+									inp))
+						break;
+					clear_phantom_key(kp, out, inp);
+					inp = -2;
+				}
+				restore_keys_for_input(kp, out, in);
+			}
+		}
+	}
+}
+
+static void report_key(struct gpio_kp *kp, int key_index, int out, int in)
+{
+	struct gpio_event_matrix_info *mi = kp->keypad_info;
+	int pressed = test_bit(key_index, kp->keys_pressed);
+	unsigned short keyentry = mi->keymap[key_index];
+	unsigned short keycode = keyentry & MATRIX_KEY_MASK;
+	unsigned short dev = keyentry >> MATRIX_CODE_BITS;
+
+	if (pressed != test_bit(keycode, kp->input_devs->dev[dev]->key)) {
+		if (keycode == KEY_RESERVED) {
+			if (mi->flags & GPIOKPF_PRINT_UNMAPPED_KEYS)
+				pr_info("gpiomatrix: unmapped key, %d-%d "
+					"(%d-%d) changed to %d\n",
+					out, in, mi->output_gpios[out],
+					mi->input_gpios[in], pressed);
+		} else {
+			if (mi->flags & GPIOKPF_PRINT_MAPPED_KEYS)
+				pr_info("gpiomatrix: key %x, %d-%d (%d-%d) "
+					"changed to %d\n", keycode,
+					out, in, mi->output_gpios[out],
+					mi->input_gpios[in], pressed);
+			input_report_key(kp->input_devs->dev[dev], keycode, pressed);
+		}
+	}
+}
+
+static void report_sync(struct gpio_kp *kp)
+{
+	int i;
+
+	for (i = 0; i < kp->input_devs->count; i++)
+		input_sync(kp->input_devs->dev[i]);
+}
+
+static enum hrtimer_restart gpio_keypad_timer_func(struct hrtimer *timer)
+{
+	int out, in;
+	int key_index;
+	int gpio;
+	struct gpio_kp *kp = container_of(timer, struct gpio_kp, timer);
+	struct gpio_event_matrix_info *mi = kp->keypad_info;
+	unsigned gpio_keypad_flags = mi->flags;
+	unsigned polarity = !!(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH);
+
+	out = kp->current_output;
+	if (out == mi->noutputs) {
+		out = 0;
+		kp->last_key_state_changed = kp->key_state_changed;
+		kp->key_state_changed = 0;
+		kp->some_keys_pressed = 0;
+	} else {
+		key_index = out * mi->ninputs;
+		for (in = 0; in < mi->ninputs; in++, key_index++) {
+			gpio = mi->input_gpios[in];
+			if (gpio_get_value(gpio) ^ !polarity) {
+				if (kp->some_keys_pressed < 3)
+					kp->some_keys_pressed++;
+				kp->key_state_changed |= !__test_and_set_bit(
+						key_index, kp->keys_pressed);
+			} else
+				kp->key_state_changed |= __test_and_clear_bit(
+						key_index, kp->keys_pressed);
+		}
+		gpio = mi->output_gpios[out];
+		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+			gpio_set_value(gpio, !polarity);
+		else
+			gpio_direction_input(gpio);
+		out++;
+	}
+	kp->current_output = out;
+	if (out < mi->noutputs) {
+		gpio = mi->output_gpios[out];
+		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+			gpio_set_value(gpio, polarity);
+		else
+			gpio_direction_output(gpio, polarity);
+		hrtimer_start(timer, mi->settle_time, HRTIMER_MODE_REL);
+		return HRTIMER_NORESTART;
+	}
+	if (gpio_keypad_flags & GPIOKPF_DEBOUNCE) {
+		if (kp->key_state_changed) {
+			hrtimer_start(&kp->timer, mi->debounce_delay,
+				      HRTIMER_MODE_REL);
+			return HRTIMER_NORESTART;
+		}
+		kp->key_state_changed = kp->last_key_state_changed;
+	}
+	if (kp->key_state_changed) {
+		if (gpio_keypad_flags & GPIOKPF_REMOVE_SOME_PHANTOM_KEYS)
+			remove_phantom_keys(kp);
+		key_index = 0;
+		for (out = 0; out < mi->noutputs; out++)
+			for (in = 0; in < mi->ninputs; in++, key_index++)
+				report_key(kp, key_index, out, in);
+		report_sync(kp);
+	}
+	if (!kp->use_irq || kp->some_keys_pressed) {
+		hrtimer_start(timer, mi->poll_time, HRTIMER_MODE_REL);
+		return HRTIMER_NORESTART;
+	}
+
+	/* No keys are pressed, reenable interrupt */
+	for (out = 0; out < mi->noutputs; out++) {
+		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+			gpio_set_value(mi->output_gpios[out], polarity);
+		else
+			gpio_direction_output(mi->output_gpios[out], polarity);
+	}
+	for (in = 0; in < mi->ninputs; in++)
+		enable_irq(gpio_to_irq(mi->input_gpios[in]));
+	wake_unlock(&kp->wake_lock);
+	return HRTIMER_NORESTART;
+}
+
+static irqreturn_t gpio_keypad_irq_handler(int irq_in, void *dev_id)
+{
+	int i;
+	struct gpio_kp *kp = dev_id;
+	struct gpio_event_matrix_info *mi = kp->keypad_info;
+	unsigned gpio_keypad_flags = mi->flags;
+
+	if (!kp->use_irq) {
+		/* ignore interrupt while registering the handler */
+		kp->disabled_irq = 1;
+		disable_irq_nosync(irq_in);
+		return IRQ_HANDLED;
+	}
+
+	for (i = 0; i < mi->ninputs; i++)
+		disable_irq_nosync(gpio_to_irq(mi->input_gpios[i]));
+	for (i = 0; i < mi->noutputs; i++) {
+		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+			gpio_set_value(mi->output_gpios[i],
+				!(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH));
+		else
+			gpio_direction_input(mi->output_gpios[i]);
+	}
+	wake_lock(&kp->wake_lock);
+	hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+	return IRQ_HANDLED;
+}
+
+static int gpio_keypad_request_irqs(struct gpio_kp *kp)
+{
+	int i;
+	int err;
+	unsigned int irq;
+	unsigned long request_flags;
+	struct gpio_event_matrix_info *mi = kp->keypad_info;
+
+	switch (mi->flags & (GPIOKPF_ACTIVE_HIGH|GPIOKPF_LEVEL_TRIGGERED_IRQ)) {
+	default:
+		request_flags = IRQF_TRIGGER_FALLING;
+		break;
+	case GPIOKPF_ACTIVE_HIGH:
+		request_flags = IRQF_TRIGGER_RISING;
+		break;
+	case GPIOKPF_LEVEL_TRIGGERED_IRQ:
+		request_flags = IRQF_TRIGGER_LOW;
+		break;
+	case GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_ACTIVE_HIGH:
+		request_flags = IRQF_TRIGGER_HIGH;
+		break;
+	}
+
+	for (i = 0; i < mi->ninputs; i++) {
+		err = irq = gpio_to_irq(mi->input_gpios[i]);
+		if (err < 0)
+			goto err_gpio_get_irq_num_failed;
+		err = request_irq(irq, gpio_keypad_irq_handler, request_flags,
+				  "gpio_kp", kp);
+		if (err) {
+			pr_err("gpiomatrix: request_irq failed for input %d, "
+				"irq %d\n", mi->input_gpios[i], irq);
+			goto err_request_irq_failed;
+		}
+		err = enable_irq_wake(irq);
+		if (err) {
+			pr_err("gpiomatrix: set_irq_wake failed for input %d, "
+				"irq %d\n", mi->input_gpios[i], irq);
+		}
+		disable_irq(irq);
+		if (kp->disabled_irq) {
+			kp->disabled_irq = 0;
+			enable_irq(irq);
+		}
+	}
+	return 0;
+
+	for (i = mi->noutputs - 1; i >= 0; i--) {
+		free_irq(gpio_to_irq(mi->input_gpios[i]), kp);
+err_request_irq_failed:
+err_gpio_get_irq_num_failed:
+		;
+	}
+	return err;
+}
+
+int gpio_event_matrix_func(struct gpio_event_input_devs *input_devs,
+	struct gpio_event_info *info, void **data, int func)
+{
+	int i;
+	int err;
+	int key_count;
+	struct gpio_kp *kp;
+	struct gpio_event_matrix_info *mi;
+
+	mi = container_of(info, struct gpio_event_matrix_info, info);
+	if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME) {
+		/* TODO: disable scanning */
+		return 0;
+	}
+
+	if (func == GPIO_EVENT_FUNC_INIT) {
+		if (mi->keymap == NULL ||
+		   mi->input_gpios == NULL ||
+		   mi->output_gpios == NULL) {
+			err = -ENODEV;
+			pr_err("gpiomatrix: Incomplete pdata\n");
+			goto err_invalid_platform_data;
+		}
+		key_count = mi->ninputs * mi->noutputs;
+
+		*data = kp = kzalloc(sizeof(*kp) + sizeof(kp->keys_pressed[0]) *
+				     BITS_TO_LONGS(key_count), GFP_KERNEL);
+		if (kp == NULL) {
+			err = -ENOMEM;
+			pr_err("gpiomatrix: Failed to allocate private data\n");
+			goto err_kp_alloc_failed;
+		}
+		kp->input_devs = input_devs;
+		kp->keypad_info = mi;
+		for (i = 0; i < key_count; i++) {
+			unsigned short keyentry = mi->keymap[i];
+			unsigned short keycode = keyentry & MATRIX_KEY_MASK;
+			unsigned short dev = keyentry >> MATRIX_CODE_BITS;
+			if (dev >= input_devs->count) {
+				pr_err("gpiomatrix: bad device index %d >= "
+					"%d for key code %d\n",
+					dev, input_devs->count, keycode);
+				err = -EINVAL;
+				goto err_bad_keymap;
+			}
+			if (keycode && keycode <= KEY_MAX)
+				input_set_capability(input_devs->dev[dev],
+							EV_KEY, keycode);
+		}
+
+		for (i = 0; i < mi->noutputs; i++) {
+			err = gpio_request(mi->output_gpios[i], "gpio_kp_out");
+			if (err) {
+				pr_err("gpiomatrix: gpio_request failed for "
+					"output %d\n", mi->output_gpios[i]);
+				goto err_request_output_gpio_failed;
+			}
+			if (gpio_cansleep(mi->output_gpios[i])) {
+				pr_err("gpiomatrix: unsupported output gpio %d,"
+					" can sleep\n", mi->output_gpios[i]);
+				err = -EINVAL;
+				goto err_output_gpio_configure_failed;
+			}
+			if (mi->flags & GPIOKPF_DRIVE_INACTIVE)
+				err = gpio_direction_output(mi->output_gpios[i],
+					!(mi->flags & GPIOKPF_ACTIVE_HIGH));
+			else
+				err = gpio_direction_input(mi->output_gpios[i]);
+			if (err) {
+				pr_err("gpiomatrix: gpio_configure failed for "
+					"output %d\n", mi->output_gpios[i]);
+				goto err_output_gpio_configure_failed;
+			}
+		}
+		for (i = 0; i < mi->ninputs; i++) {
+			err = gpio_request(mi->input_gpios[i], "gpio_kp_in");
+			if (err) {
+				pr_err("gpiomatrix: gpio_request failed for "
+					"input %d\n", mi->input_gpios[i]);
+				goto err_request_input_gpio_failed;
+			}
+			err = gpio_direction_input(mi->input_gpios[i]);
+			if (err) {
+				pr_err("gpiomatrix: gpio_direction_input failed"
+					" for input %d\n", mi->input_gpios[i]);
+				goto err_gpio_direction_input_failed;
+			}
+		}
+		kp->current_output = mi->noutputs;
+		kp->key_state_changed = 1;
+
+		hrtimer_init(&kp->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+		kp->timer.function = gpio_keypad_timer_func;
+		wake_lock_init(&kp->wake_lock, WAKE_LOCK_SUSPEND, "gpio_kp");
+		err = gpio_keypad_request_irqs(kp);
+		kp->use_irq = err == 0;
+
+		pr_info("GPIO Matrix Keypad Driver: Start keypad matrix for "
+			"%s%s in %s mode\n", input_devs->dev[0]->name,
+			(input_devs->count > 1) ? "..." : "",
+			kp->use_irq ? "interrupt" : "polling");
+
+		if (kp->use_irq)
+			wake_lock(&kp->wake_lock);
+		hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+
+		return 0;
+	}
+
+	err = 0;
+	kp = *data;
+
+	if (kp->use_irq)
+		for (i = mi->noutputs - 1; i >= 0; i--)
+			free_irq(gpio_to_irq(mi->input_gpios[i]), kp);
+
+	hrtimer_cancel(&kp->timer);
+	wake_lock_destroy(&kp->wake_lock);
+	for (i = mi->noutputs - 1; i >= 0; i--) {
+err_gpio_direction_input_failed:
+		gpio_free(mi->input_gpios[i]);
+err_request_input_gpio_failed:
+		;
+	}
+	for (i = mi->noutputs - 1; i >= 0; i--) {
+err_output_gpio_configure_failed:
+		gpio_free(mi->output_gpios[i]);
+err_request_output_gpio_failed:
+		;
+	}
+err_bad_keymap:
+	kfree(kp);
+err_kp_alloc_failed:
+err_invalid_platform_data:
+	return err;
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/input/misc/gpio_output.c	2019-01-22 16:16:23.995250862 +0100
@@ -0,0 +1,97 @@
+/* drivers/input/misc/gpio_output.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+
+int gpio_event_output_event(
+	struct gpio_event_input_devs *input_devs, struct gpio_event_info *info,
+	void **data, unsigned int dev, unsigned int type,
+	unsigned int code, int value)
+{
+	int i;
+	struct gpio_event_output_info *oi;
+	oi = container_of(info, struct gpio_event_output_info, info);
+	if (type != oi->type)
+		return 0;
+	if (!(oi->flags & GPIOEDF_ACTIVE_HIGH))
+		value = !value;
+	for (i = 0; i < oi->keymap_size; i++)
+		if (dev == oi->keymap[i].dev && code == oi->keymap[i].code)
+			gpio_set_value(oi->keymap[i].gpio, value);
+	return 0;
+}
+
+int gpio_event_output_func(
+	struct gpio_event_input_devs *input_devs, struct gpio_event_info *info,
+	void **data, int func)
+{
+	int ret;
+	int i;
+	struct gpio_event_output_info *oi;
+	oi = container_of(info, struct gpio_event_output_info, info);
+
+	if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME)
+		return 0;
+
+	if (func == GPIO_EVENT_FUNC_INIT) {
+		int output_level = !(oi->flags & GPIOEDF_ACTIVE_HIGH);
+
+		for (i = 0; i < oi->keymap_size; i++) {
+			int dev = oi->keymap[i].dev;
+			if (dev >= input_devs->count) {
+				pr_err("gpio_event_output_func: bad device "
+					"index %d >= %d for key code %d\n",
+					dev, input_devs->count,
+					oi->keymap[i].code);
+				ret = -EINVAL;
+				goto err_bad_keymap;
+			}
+			input_set_capability(input_devs->dev[dev], oi->type,
+					     oi->keymap[i].code);
+		}
+
+		for (i = 0; i < oi->keymap_size; i++) {
+			ret = gpio_request(oi->keymap[i].gpio,
+					   "gpio_event_output");
+			if (ret) {
+				pr_err("gpio_event_output_func: gpio_request "
+					"failed for %d\n", oi->keymap[i].gpio);
+				goto err_gpio_request_failed;
+			}
+			ret = gpio_direction_output(oi->keymap[i].gpio,
+						    output_level);
+			if (ret) {
+				pr_err("gpio_event_output_func: "
+					"gpio_direction_output failed for %d\n",
+					oi->keymap[i].gpio);
+				goto err_gpio_direction_output_failed;
+			}
+		}
+		return 0;
+	}
+
+	ret = 0;
+	for (i = oi->keymap_size - 1; i >= 0; i--) {
+err_gpio_direction_output_failed:
+		gpio_free(oi->keymap[i].gpio);
+err_gpio_request_failed:
+		;
+	}
+err_bad_keymap:
+	return ret;
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/input/misc/ots_pat9125./Kconfig linux-4.4.115-fbx/drivers/input/misc/ots_pat9125/Kconfig
--- linux-4.4.115-fbx/drivers/input/misc/ots_pat9125./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/input/misc/ots_pat9125/Kconfig	2019-01-22 16:16:23.999250898 +0100
@@ -0,0 +1,14 @@
+#
+# PixArt OTS switch driver configuration
+#
+
+config INPUT_PIXART_OTS_PAT9125_SWITCH
+	tristate "PixArt PAT9125 Rotating Switch driver"
+	depends on INPUT && I2C && GPIOLIB
+	help
+	  Say Y to enable support for the PixArt OTS pat9125
+	  rotating switch driver.
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called ots_pat9125.
diff -Nruw linux-4.4.115-fbx/drivers/input/misc/ots_pat9125./Makefile linux-4.4.115-fbx/drivers/input/misc/ots_pat9125/Makefile
--- linux-4.4.115-fbx/drivers/input/misc/ots_pat9125./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/input/misc/ots_pat9125/Makefile	2019-01-22 16:16:23.999250898 +0100
@@ -0,0 +1,7 @@
+#
+# Makefile for the PixArt OST switch driver.
+#
+
+# Each configuration option enables a list of files.
+
+obj-$(CONFIG_INPUT_PIXART_OTS_PAT9125_SWITCH) += pat9125_linux_driver.o  pixart_ots.o
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/input/misc/qpnp-power-on.c	2019-01-22 16:16:23.999250898 +0100
@@ -0,0 +1,2413 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/input.h>
+#include <linux/log2.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/input/qpnp-power-on.h>
+#include <linux/power_supply.h>
+
+#define PMIC_VER_8941           0x01
+#define PMIC_VERSION_REG        0x0105
+#define PMIC_VERSION_REV4_REG   0x0103
+
+#define PMIC8941_V1_REV4	0x01
+#define PMIC8941_V2_REV4	0x02
+#define PON_PRIMARY		0x01
+#define PON_SECONDARY		0x02
+#define PON_1REG		0x03
+#define PON_GEN2_PRIMARY	0x04
+#define PON_GEN2_SECONDARY	0x05
+
+#define PON_OFFSET(subtype, offset_gen1, offset_gen2) \
+	(((subtype == PON_PRIMARY) || \
+	(subtype == PON_SECONDARY) || \
+	(subtype == PON_1REG)) ? offset_gen1 : offset_gen2)
+
+/* Common PNP defines */
+#define QPNP_PON_REVISION2(pon)			((pon)->base + 0x01)
+#define QPNP_PON_PERPH_SUBTYPE(pon)		((pon)->base + 0x05)
+
+/* PON common register addresses */
+#define QPNP_PON_RT_STS(pon)			((pon)->base + 0x10)
+#define QPNP_PON_PULL_CTL(pon)			((pon)->base + 0x70)
+#define QPNP_PON_DBC_CTL(pon)			((pon)->base + 0x71)
+
+/* PON/RESET sources register addresses */
+#define QPNP_PON_REASON1(pon) \
+	((pon)->base + PON_OFFSET((pon)->subtype, 0x8, 0xC0))
+#define QPNP_PON_WARM_RESET_REASON1(pon) \
+	((pon)->base + PON_OFFSET((pon)->subtype, 0xA, 0xC2))
+#define QPNP_POFF_REASON1(pon) \
+	((pon)->base + PON_OFFSET((pon)->subtype, 0xC, 0xC5))
+#define QPNP_PON_WARM_RESET_REASON2(pon)	((pon)->base + 0xB)
+#define QPNP_PON_OFF_REASON(pon)		((pon)->base + 0xC7)
+#define QPNP_FAULT_REASON1(pon)			((pon)->base + 0xC8)
+#define QPNP_S3_RESET_REASON(pon)		((pon)->base + 0xCA)
+#define QPNP_PON_KPDPWR_S1_TIMER(pon)		((pon)->base + 0x40)
+#define QPNP_PON_KPDPWR_S2_TIMER(pon)		((pon)->base + 0x41)
+#define QPNP_PON_KPDPWR_S2_CNTL(pon)		((pon)->base + 0x42)
+#define QPNP_PON_KPDPWR_S2_CNTL2(pon)		((pon)->base + 0x43)
+#define QPNP_PON_RESIN_S1_TIMER(pon)		((pon)->base + 0x44)
+#define QPNP_PON_RESIN_S2_TIMER(pon)		((pon)->base + 0x45)
+#define QPNP_PON_RESIN_S2_CNTL(pon)		((pon)->base + 0x46)
+#define QPNP_PON_RESIN_S2_CNTL2(pon)		((pon)->base + 0x47)
+#define QPNP_PON_KPDPWR_RESIN_S1_TIMER(pon)	((pon)->base + 0x48)
+#define QPNP_PON_KPDPWR_RESIN_S2_TIMER(pon)	((pon)->base + 0x49)
+#define QPNP_PON_KPDPWR_RESIN_S2_CNTL(pon)	((pon)->base + 0x4A)
+#define QPNP_PON_KPDPWR_RESIN_S2_CNTL2(pon)	((pon)->base + 0x4B)
+#define QPNP_PON_PS_HOLD_RST_CTL(pon)		((pon)->base + 0x5A)
+#define QPNP_PON_PS_HOLD_RST_CTL2(pon)		((pon)->base + 0x5B)
+#define QPNP_PON_WD_RST_S2_CTL(pon)		((pon)->base + 0x56)
+#define QPNP_PON_WD_RST_S2_CTL2(pon)		((pon)->base + 0x57)
+#define QPNP_PON_S3_SRC(pon)			((pon)->base + 0x74)
+#define QPNP_PON_S3_DBC_CTL(pon)		((pon)->base + 0x75)
+#define QPNP_PON_SMPL_CTL(pon)			((pon)->base + 0x7F)
+#define QPNP_PON_TRIGGER_EN(pon)		((pon)->base + 0x80)
+#define QPNP_PON_XVDD_RB_SPARE(pon)		((pon)->base + 0x8E)
+#define QPNP_PON_SOFT_RB_SPARE(pon)		((pon)->base + 0x8F)
+#define QPNP_PON_SEC_ACCESS(pon)		((pon)->base + 0xD0)
+
+#define QPNP_PON_SEC_UNLOCK			0xA5
+
+#define QPNP_PON_WARM_RESET_TFT			BIT(4)
+
+#define QPNP_PON_RESIN_PULL_UP			BIT(0)
+#define QPNP_PON_KPDPWR_PULL_UP			BIT(1)
+#define QPNP_PON_CBLPWR_PULL_UP			BIT(2)
+#define QPNP_PON_FAULT_PULL_UP			BIT(4)
+#define QPNP_PON_S2_CNTL_EN			BIT(7)
+#define QPNP_PON_S2_RESET_ENABLE		BIT(7)
+#define QPNP_PON_DELAY_BIT_SHIFT		6
+#define QPNP_PON_GEN2_DELAY_BIT_SHIFT		14
+
+#define QPNP_PON_S1_TIMER_MASK			(0xF)
+#define QPNP_PON_S2_TIMER_MASK			(0x7)
+#define QPNP_PON_S2_CNTL_TYPE_MASK		(0xF)
+
+#define QPNP_PON_DBC_DELAY_MASK(pon) \
+		PON_OFFSET((pon)->subtype, 0x7, 0xF)
+
+#define QPNP_PON_KPDPWR_N_SET			BIT(0)
+#define QPNP_PON_RESIN_N_SET			BIT(1)
+#define QPNP_PON_CBLPWR_N_SET			BIT(2)
+#define QPNP_PON_RESIN_BARK_N_SET		BIT(4)
+#define QPNP_PON_KPDPWR_RESIN_BARK_N_SET	BIT(5)
+
+#define QPNP_PON_WD_EN				BIT(7)
+#define QPNP_PON_RESET_EN			BIT(7)
+#define QPNP_PON_POWER_OFF_MASK			0xF
+#define QPNP_GEN2_POFF_SEQ			BIT(7)
+#define QPNP_GEN2_FAULT_SEQ			BIT(6)
+#define QPNP_GEN2_S3_RESET_SEQ			BIT(5)
+
+#define QPNP_PON_S3_SRC_KPDPWR			0
+#define QPNP_PON_S3_SRC_RESIN			1
+#define QPNP_PON_S3_SRC_KPDPWR_AND_RESIN	2
+#define QPNP_PON_S3_SRC_KPDPWR_OR_RESIN		3
+#define QPNP_PON_S3_SRC_MASK			0x3
+#define QPNP_PON_HARD_RESET_MASK		GENMASK(7, 5)
+
+#define QPNP_PON_UVLO_DLOAD_EN			BIT(7)
+#define QPNP_PON_SMPL_EN			BIT(7)
+
+/* Ranges */
+#define QPNP_PON_S1_TIMER_MAX			10256
+#define QPNP_PON_S2_TIMER_MAX			2000
+#define QPNP_PON_S3_TIMER_SECS_MAX		128
+#define QPNP_PON_S3_DBC_DELAY_MASK		0x07
+#define QPNP_PON_RESET_TYPE_MAX			0xF
+#define PON_S1_COUNT_MAX			0xF
+#define QPNP_PON_MIN_DBC_US			(USEC_PER_SEC / 64)
+#define QPNP_PON_MAX_DBC_US			(USEC_PER_SEC * 2)
+#define QPNP_PON_GEN2_MIN_DBC_US		62
+#define QPNP_PON_GEN2_MAX_DBC_US		(USEC_PER_SEC / 4)
+
+#define QPNP_KEY_STATUS_DELAY			msecs_to_jiffies(250)
+
+#define QPNP_PON_BUFFER_SIZE			9
+
+#define QPNP_POFF_REASON_UVLO			13
+
+enum qpnp_pon_version {
+	QPNP_PON_GEN1_V1,
+	QPNP_PON_GEN1_V2,
+	QPNP_PON_GEN2,
+};
+
+enum pon_type {
+	PON_KPDPWR,
+	PON_RESIN,
+	PON_CBLPWR,
+	PON_KPDPWR_RESIN,
+};
+
+struct qpnp_pon_config {
+	u32 pon_type;
+	u32 support_reset;
+	u32 key_code;
+	u32 s1_timer;
+	u32 s2_timer;
+	u32 s2_type;
+	u32 pull_up;
+	u32 state_irq;
+	u32 bark_irq;
+	u16 s2_cntl_addr;
+	u16 s2_cntl2_addr;
+	bool old_state;
+	bool use_bark;
+	bool config_reset;
+};
+
+struct pon_regulator {
+	struct qpnp_pon		*pon;
+	struct regulator_dev	*rdev;
+	struct regulator_desc	rdesc;
+	u32			addr;
+	u32			bit;
+	bool			enabled;
+};
+
+struct qpnp_pon {
+	struct platform_device	*pdev;
+	struct regmap		*regmap;
+	struct input_dev	*pon_input;
+	struct qpnp_pon_config	*pon_cfg;
+	struct pon_regulator	*pon_reg_cfg;
+	struct list_head	list;
+	struct delayed_work	bark_work;
+	struct dentry		*debugfs;
+	int			pon_trigger_reason;
+	int			pon_power_off_reason;
+	int			num_pon_reg;
+	int			num_pon_config;
+	u32			dbc_time_us;
+	u32			uvlo;
+	int			warm_reset_poff_type;
+	int			hard_reset_poff_type;
+	int			shutdown_poff_type;
+	u16			base;
+	u8			subtype;
+	u8			pon_ver;
+	u8			warm_reset_reason1;
+	u8			warm_reset_reason2;
+	bool			is_spon;
+	bool			store_hard_reset_reason;
+	bool			kpdpwr_dbc_enable;
+	ktime_t			kpdpwr_last_release_time;
+};
+
+static int pon_ship_mode_en;
+module_param_named(
+	ship_mode_en, pon_ship_mode_en, int, 0600
+);
+
+static struct qpnp_pon *sys_reset_dev;
+static DEFINE_SPINLOCK(spon_list_slock);
+static LIST_HEAD(spon_dev_list);
+
+static u32 s1_delay[PON_S1_COUNT_MAX + 1] = {
+	0, 32, 56, 80, 138, 184, 272, 408, 608, 904, 1352, 2048,
+	3072, 4480, 6720, 10256
+};
+
+static const char * const qpnp_pon_reason[] = {
+	[0] = "Triggered from Hard Reset",
+	[1] = "Triggered from SMPL (sudden momentary power loss)",
+	[2] = "Triggered from RTC (RTC alarm expiry)",
+	[3] = "Triggered from DC (DC charger insertion)",
+	[4] = "Triggered from USB (USB charger insertion)",
+	[5] = "Triggered from PON1 (secondary PMIC)",
+	[6] = "Triggered from CBL (external power supply)",
+	[7] = "Triggered from KPD (power key press)",
+};
+
+#define POFF_REASON_FAULT_OFFSET	16
+#define POFF_REASON_S3_RESET_OFFSET	32
+static const char * const qpnp_poff_reason[] = {
+	/* QPNP_PON_GEN1 POFF reasons */
+	[0] = "Triggered from SOFT (Software)",
+	[1] = "Triggered from PS_HOLD (PS_HOLD/MSM controlled shutdown)",
+	[2] = "Triggered from PMIC_WD (PMIC watchdog)",
+	[3] = "Triggered from GP1 (Keypad_Reset1)",
+	[4] = "Triggered from GP2 (Keypad_Reset2)",
+	[5] = "Triggered from KPDPWR_AND_RESIN (Simultaneous power key and reset line)",
+	[6] = "Triggered from RESIN_N (Reset line/Volume Down Key)",
+	[7] = "Triggered from KPDPWR_N (Long Power Key hold)",
+	[8] = "N/A",
+	[9] = "N/A",
+	[10] = "N/A",
+	[11] = "Triggered from CHARGER (Charger ENUM_TIMER, BOOT_DONE)",
+	[12] = "Triggered from TFT (Thermal Fault Tolerance)",
+	[13] = "Triggered from UVLO (Under Voltage Lock Out)",
+	[14] = "Triggered from OTST3 (Overtemp)",
+	[15] = "Triggered from STAGE3 (Stage 3 reset)",
+
+	/* QPNP_PON_GEN2 FAULT reasons */
+	[16] = "Triggered from GP_FAULT0",
+	[17] = "Triggered from GP_FAULT1",
+	[18] = "Triggered from GP_FAULT2",
+	[19] = "Triggered from GP_FAULT3",
+	[20] = "Triggered from MBG_FAULT",
+	[21] = "Triggered from OVLO (Over Voltage Lock Out)",
+	[22] = "Triggered from UVLO (Under Voltage Lock Out)",
+	[23] = "Triggered from AVDD_RB",
+	[24] = "N/A",
+	[25] = "N/A",
+	[26] = "N/A",
+	[27] = "Triggered from FAULT_FAULT_N",
+	[28] = "Triggered from FAULT_PBS_WATCHDOG_TO",
+	[29] = "Triggered from FAULT_PBS_NACK",
+	[30] = "Triggered from FAULT_RESTART_PON",
+	[31] = "Triggered from OTST3 (Overtemp)",
+
+	/* QPNP_PON_GEN2 S3_RESET reasons */
+	[32] = "N/A",
+	[33] = "N/A",
+	[34] = "N/A",
+	[35] = "N/A",
+	[36] = "Triggered from S3_RESET_FAULT_N",
+	[37] = "Triggered from S3_RESET_PBS_WATCHDOG_TO",
+	[38] = "Triggered from S3_RESET_PBS_NACK",
+	[39] = "Triggered from S3_RESET_KPDPWR_ANDOR_RESIN (power key and/or reset line)",
+};
+
+static int
+qpnp_pon_masked_write(struct qpnp_pon *pon, u16 addr, u8 mask, u8 val)
+{
+	int rc;
+
+	rc = regmap_update_bits(pon->regmap, addr, mask, val);
+	if (rc)
+		dev_err(&pon->pdev->dev,
+			"Unable to regmap_update_bits to addr=%hx, rc(%d)\n",
+			addr, rc);
+	return rc;
+}
+
+static bool is_pon_gen1(struct qpnp_pon *pon)
+{
+	return pon->subtype == PON_PRIMARY ||
+			pon->subtype == PON_SECONDARY;
+}
+
+static bool is_pon_gen2(struct qpnp_pon *pon)
+{
+	return pon->subtype == PON_GEN2_PRIMARY ||
+			pon->subtype == PON_GEN2_SECONDARY;
+}
+
+/**
+ * qpnp_pon_set_restart_reason - Store device restart reason in PMIC register.
+ *
+ * Returns = 0 if PMIC feature is not available or store restart reason
+ * successfully.
+ * Returns > 0 for errors
+ *
+ * This function is used to store device restart reason in PMIC register.
+ * It checks here to see if the restart reason register has been specified.
+ * If it hasn't, this function should immediately return 0
+ */
+int qpnp_pon_set_restart_reason(enum pon_restart_reason reason)
+{
+	int rc = 0;
+	struct qpnp_pon *pon = sys_reset_dev;
+
+	if (!pon)
+		return 0;
+
+	if (!pon->store_hard_reset_reason)
+		return 0;
+
+	if (is_pon_gen2(pon))
+		rc = qpnp_pon_masked_write(pon, QPNP_PON_SOFT_RB_SPARE(pon),
+					   GENMASK(7, 1), (reason << 1));
+	else
+		rc = qpnp_pon_masked_write(pon, QPNP_PON_SOFT_RB_SPARE(pon),
+					   GENMASK(7, 2), (reason << 2));
+
+	if (rc)
+		dev_err(&pon->pdev->dev,
+				"Unable to write to addr=%x, rc(%d)\n",
+				QPNP_PON_SOFT_RB_SPARE(pon), rc);
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_pon_set_restart_reason);
+
+/*
+ * qpnp_pon_check_hard_reset_stored - Checks if the PMIC need to
+ * store hard reset reason.
+ *
+ * Returns true if reset reason can be stored, false if it cannot be stored
+ *
+ */
+bool qpnp_pon_check_hard_reset_stored(void)
+{
+	struct qpnp_pon *pon = sys_reset_dev;
+
+	if (!pon)
+		return false;
+
+	return pon->store_hard_reset_reason;
+}
+EXPORT_SYMBOL(qpnp_pon_check_hard_reset_stored);
+
+static int qpnp_pon_set_dbc(struct qpnp_pon *pon, u32 delay)
+{
+	int rc = 0;
+	u32 val;
+
+	if (delay == pon->dbc_time_us)
+		goto out;
+
+	if (pon->pon_input)
+		mutex_lock(&pon->pon_input->mutex);
+
+	if (is_pon_gen2(pon)) {
+		if (delay < QPNP_PON_GEN2_MIN_DBC_US)
+			delay = QPNP_PON_GEN2_MIN_DBC_US;
+		else if (delay > QPNP_PON_GEN2_MAX_DBC_US)
+			delay = QPNP_PON_GEN2_MAX_DBC_US;
+		val = (delay << QPNP_PON_GEN2_DELAY_BIT_SHIFT) / USEC_PER_SEC;
+	} else {
+		if (delay < QPNP_PON_MIN_DBC_US)
+			delay = QPNP_PON_MIN_DBC_US;
+		else if (delay > QPNP_PON_MAX_DBC_US)
+			delay = QPNP_PON_MAX_DBC_US;
+		val = (delay << QPNP_PON_DELAY_BIT_SHIFT) / USEC_PER_SEC;
+	}
+
+	val = ilog2(val);
+	rc = qpnp_pon_masked_write(pon, QPNP_PON_DBC_CTL(pon),
+					QPNP_PON_DBC_DELAY_MASK(pon), val);
+	if (rc) {
+		dev_err(&pon->pdev->dev, "Unable to set PON debounce\n");
+		goto unlock;
+	}
+
+	pon->dbc_time_us = delay;
+
+unlock:
+	if (pon->pon_input)
+		mutex_unlock(&pon->pon_input->mutex);
+out:
+	return rc;
+}
+
+static int qpnp_pon_get_dbc(struct qpnp_pon *pon, u32 *delay)
+{
+	int rc;
+	unsigned int val;
+
+	rc = regmap_read(pon->regmap, QPNP_PON_DBC_CTL(pon), &val);
+	if (rc) {
+		pr_err("Unable to read pon_dbc_ctl rc=%d\n", rc);
+		return rc;
+	}
+	val &= QPNP_PON_DBC_DELAY_MASK(pon);
+
+	if (is_pon_gen2(pon))
+		*delay = USEC_PER_SEC /
+			(1 << (QPNP_PON_GEN2_DELAY_BIT_SHIFT - val));
+	else
+		*delay = USEC_PER_SEC /
+			(1 << (QPNP_PON_DELAY_BIT_SHIFT - val));
+
+	return rc;
+}
+
+static ssize_t qpnp_pon_dbc_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qpnp_pon *pon = dev_get_drvdata(dev);
+
+	return snprintf(buf, QPNP_PON_BUFFER_SIZE, "%d\n", pon->dbc_time_us);
+}
+
+static ssize_t qpnp_pon_dbc_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t size)
+{
+	struct qpnp_pon *pon = dev_get_drvdata(dev);
+	u32 value;
+	int rc;
+
+	if (size > QPNP_PON_BUFFER_SIZE)
+		return -EINVAL;
+
+	rc = kstrtou32(buf, 10, &value);
+	if (rc)
+		return rc;
+
+	rc = qpnp_pon_set_dbc(pon, value);
+	if (rc < 0)
+		return rc;
+
+	return size;
+}
+
+static DEVICE_ATTR(debounce_us, 0664, qpnp_pon_dbc_show, qpnp_pon_dbc_store);
+
+static int qpnp_pon_reset_config(struct qpnp_pon *pon,
+		enum pon_power_off_type type)
+{
+	int rc;
+	u16 rst_en_reg;
+
+	if (pon->pon_ver == QPNP_PON_GEN1_V1)
+		rst_en_reg = QPNP_PON_PS_HOLD_RST_CTL(pon);
+	else
+		rst_en_reg = QPNP_PON_PS_HOLD_RST_CTL2(pon);
+
+	/*
+	 * Based on the poweroff type set for a PON device through device tree
+	 * change the type being configured into PS_HOLD_RST_CTL.
+	 */
+	switch (type) {
+	case PON_POWER_OFF_WARM_RESET:
+		if (pon->warm_reset_poff_type != -EINVAL)
+			type = pon->warm_reset_poff_type;
+		break;
+	case PON_POWER_OFF_HARD_RESET:
+		if (pon->hard_reset_poff_type != -EINVAL)
+			type = pon->hard_reset_poff_type;
+		break;
+	case PON_POWER_OFF_SHUTDOWN:
+		if (pon->shutdown_poff_type != -EINVAL)
+			type = pon->shutdown_poff_type;
+		break;
+	default:
+		break;
+	}
+
+	rc = qpnp_pon_masked_write(pon, rst_en_reg, QPNP_PON_RESET_EN, 0);
+	if (rc)
+		dev_err(&pon->pdev->dev,
+			"Unable to write to addr=%hx, rc(%d)\n",
+			rst_en_reg, rc);
+
+	/*
+	 * We need 10 sleep clock cycles here. But since the clock is
+	 * internally generated, we need to add 50% tolerance to be
+	 * conservative.
+	 */
+	udelay(500);
+
+	rc = qpnp_pon_masked_write(pon, QPNP_PON_PS_HOLD_RST_CTL(pon),
+				   QPNP_PON_POWER_OFF_MASK, type);
+	if (rc)
+		dev_err(&pon->pdev->dev,
+			"Unable to write to addr=%x, rc(%d)\n",
+				QPNP_PON_PS_HOLD_RST_CTL(pon), rc);
+
+	rc = qpnp_pon_masked_write(pon, rst_en_reg, QPNP_PON_RESET_EN,
+						    QPNP_PON_RESET_EN);
+	if (rc)
+		dev_err(&pon->pdev->dev,
+			"Unable to write to addr=%hx, rc(%d)\n",
+			rst_en_reg, rc);
+
+	dev_dbg(&pon->pdev->dev, "power off type = 0x%02X\n", type);
+	return rc;
+}
+
+/**
+ * qpnp_pon_system_pwr_off - Configure system-reset PMIC for shutdown or reset
+ * @type: Determines the type of power off to perform - shutdown, reset, etc
+ *
+ * This function will support configuring for multiple PMICs. In some cases, the
+ * PON of secondary PMICs also needs to be configured. So this supports that
+ * requirement. Once the system-reset and secondary PMIC is configured properly,
+ * the MSM can drop PS_HOLD to activate the specified configuration. Note that
+ * this function may be called from atomic context as in the case of the panic
+ * notifier path and thus it should not rely on function calls that may sleep.
+ */
+int qpnp_pon_system_pwr_off(enum pon_power_off_type type)
+{
+	int rc = 0;
+	struct qpnp_pon *pon = sys_reset_dev;
+	struct qpnp_pon *tmp;
+	struct power_supply *batt_psy;
+	union power_supply_propval val;
+	unsigned long flags;
+
+	if (!pon)
+		return -ENODEV;
+
+	rc = qpnp_pon_reset_config(pon, type);
+	if (rc) {
+		dev_err(&pon->pdev->dev,
+			"Error configuring main PON rc: %d\n",
+			rc);
+		return rc;
+	}
+
+	/*
+	 * Check if a secondary PON device needs to be configured. If it
+	 * is available, configure that also as per the requested power off
+	 * type
+	 */
+	spin_lock_irqsave(&spon_list_slock, flags);
+	if (list_empty(&spon_dev_list))
+		goto out;
+
+	list_for_each_entry_safe(pon, tmp, &spon_dev_list, list) {
+		dev_emerg(&pon->pdev->dev,
+				"PMIC@SID%d: configuring PON for reset\n",
+				to_spmi_device(pon->pdev->dev.parent)->usid);
+		rc = qpnp_pon_reset_config(pon, type);
+		if (rc) {
+			dev_err(&pon->pdev->dev,
+				"Error configuring secondary PON rc: %d\n",
+				rc);
+			goto out;
+		}
+	}
+	/* Set ship mode here if it has been requested */
+	if (!!pon_ship_mode_en) {
+		batt_psy = power_supply_get_by_name("battery");
+		if (batt_psy) {
+			pr_debug("Set ship mode!\n");
+			val.intval = 1;
+			rc = power_supply_set_property(batt_psy,
+					POWER_SUPPLY_PROP_SET_SHIP_MODE, &val);
+			if (rc)
+				dev_err(&pon->pdev->dev,
+						"Set ship-mode failed\n");
+		}
+	}
+out:
+	spin_unlock_irqrestore(&spon_list_slock, flags);
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_pon_system_pwr_off);
+
+/**
+ * qpnp_pon_is_warm_reset - Checks if the PMIC went through a warm reset.
+ *
+ * Returns > 0 for warm resets, 0 for not warm reset, < 0 for errors
+ *
+ * Note that this function will only return the warm vs not-warm reset status
+ * of the PMIC that is configured as the system-reset device.
+ */
+int qpnp_pon_is_warm_reset(void)
+{
+	struct qpnp_pon *pon = sys_reset_dev;
+
+	if (!pon)
+		return -EPROBE_DEFER;
+
+	if (is_pon_gen1(pon) || pon->subtype == PON_1REG)
+		return pon->warm_reset_reason1
+			|| (pon->warm_reset_reason2 & QPNP_PON_WARM_RESET_TFT);
+	else
+		return pon->warm_reset_reason1;
+}
+EXPORT_SYMBOL(qpnp_pon_is_warm_reset);
+
+/**
+ * qpnp_pon_wd_config - Disable the wd in a warm reset.
+ * @enable: to enable or disable the PON watch dog
+ *
+ * Returns = 0 for operate successfully, < 0 for errors
+ */
+int qpnp_pon_wd_config(bool enable)
+{
+	struct qpnp_pon *pon = sys_reset_dev;
+	int rc = 0;
+
+	if (!pon)
+		return -EPROBE_DEFER;
+
+	rc = qpnp_pon_masked_write(pon, QPNP_PON_WD_RST_S2_CTL2(pon),
+			QPNP_PON_WD_EN, enable ? QPNP_PON_WD_EN : 0);
+	if (rc)
+		dev_err(&pon->pdev->dev,
+				"Unable to write to addr=%x, rc(%d)\n",
+				QPNP_PON_WD_RST_S2_CTL2(pon), rc);
+
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_pon_wd_config);
+
+static int qpnp_pon_get_trigger_config(enum pon_trigger_source pon_src,
+							bool *enabled)
+{
+	struct qpnp_pon *pon = sys_reset_dev;
+	int rc;
+	u16 addr;
+	int val;
+	u8 mask;
+
+	if (!pon)
+		return -ENODEV;
+
+	if (pon_src < PON_SMPL || pon_src > PON_KPDPWR_N) {
+		dev_err(&pon->pdev->dev, "Invalid PON source\n");
+		return -EINVAL;
+	}
+
+	addr = QPNP_PON_TRIGGER_EN(pon);
+	mask = BIT(pon_src);
+	if (is_pon_gen2(pon) && pon_src == PON_SMPL) {
+		addr = QPNP_PON_SMPL_CTL(pon);
+		mask = QPNP_PON_SMPL_EN;
+	}
+
+
+	rc = regmap_read(pon->regmap, addr, &val);
+	if (rc)
+		dev_err(&pon->pdev->dev,
+			"Unable to read from addr=%hx, rc(%d)\n",
+			addr, rc);
+	else
+		*enabled = !!(val & mask);
+
+	return rc;
+}
+
+/**
+ * qpnp_pon_trigger_config - Configures (enable/disable) the PON trigger source
+ * @pon_src: PON source to be configured
+ * @enable: to enable or disable the PON trigger
+ *
+ * This function configures the power-on trigger capability of a
+ * PON source. If a specific PON trigger is disabled it cannot act
+ * as a power-on source to the PMIC.
+ */
+
+int qpnp_pon_trigger_config(enum pon_trigger_source pon_src, bool enable)
+{
+	struct qpnp_pon *pon = sys_reset_dev;
+	int rc;
+
+	if (!pon)
+		return -EPROBE_DEFER;
+
+	if (pon_src < PON_SMPL || pon_src > PON_KPDPWR_N) {
+		dev_err(&pon->pdev->dev, "Invalid PON source\n");
+		return -EINVAL;
+	}
+
+	if (is_pon_gen2(pon) && pon_src == PON_SMPL) {
+		rc = qpnp_pon_masked_write(pon, QPNP_PON_SMPL_CTL(pon),
+			QPNP_PON_SMPL_EN, enable ? QPNP_PON_SMPL_EN : 0);
+		if (rc)
+			dev_err(&pon->pdev->dev,
+				"Unable to write to addr=%x, rc(%d)\n",
+				QPNP_PON_SMPL_CTL(pon), rc);
+	} else {
+		rc = qpnp_pon_masked_write(pon, QPNP_PON_TRIGGER_EN(pon),
+				BIT(pon_src), enable ? BIT(pon_src) : 0);
+		if (rc)
+			dev_err(&pon->pdev->dev,
+				"Unable to write to addr=%x, rc(%d)\n",
+				QPNP_PON_TRIGGER_EN(pon), rc);
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_pon_trigger_config);
+
+/*
+ * This function stores the PMIC warm reset reason register values. It also
+ * clears these registers if the qcom,clear-warm-reset device tree property
+ * is specified.
+ */
+static int qpnp_pon_store_and_clear_warm_reset(struct qpnp_pon *pon)
+{
+	int rc;
+	u8 reg = 0;
+	uint val;
+
+	rc = regmap_read(pon->regmap, QPNP_PON_WARM_RESET_REASON1(pon),
+			 &val);
+	if (rc) {
+		dev_err(&pon->pdev->dev, "Unable to read addr=%x, rc(%d)\n",
+			QPNP_PON_WARM_RESET_REASON1(pon), rc);
+		return rc;
+	}
+	pon->warm_reset_reason1 = (u8)val;
+
+	if (is_pon_gen1(pon) || pon->subtype == PON_1REG) {
+		rc = regmap_read(pon->regmap, QPNP_PON_WARM_RESET_REASON2(pon),
+				 &val);
+		if (rc) {
+			dev_err(&pon->pdev->dev,
+				"Unable to read addr=%x, rc(%d)\n",
+				QPNP_PON_WARM_RESET_REASON2(pon), rc);
+			return rc;
+		}
+	pon->warm_reset_reason2 = (u8)val;
+	}
+
+	if (of_property_read_bool(pon->pdev->dev.of_node,
+					"qcom,clear-warm-reset")) {
+		rc = regmap_write(pon->regmap,
+				  QPNP_PON_WARM_RESET_REASON1(pon), reg);
+		if (rc)
+			dev_err(&pon->pdev->dev,
+				"Unable to write to addr=%hx, rc(%d)\n",
+				QPNP_PON_WARM_RESET_REASON1(pon), rc);
+	}
+
+	return 0;
+}
+
+static struct qpnp_pon_config *
+qpnp_get_cfg(struct qpnp_pon *pon, u32 pon_type)
+{
+	int i;
+
+	for (i = 0; i < pon->num_pon_config; i++) {
+		if (pon_type == pon->pon_cfg[i].pon_type)
+			return  &pon->pon_cfg[i];
+	}
+
+	return NULL;
+}
+
+static int
+qpnp_pon_input_dispatch(struct qpnp_pon *pon, u32 pon_type)
+{
+	int rc;
+	struct qpnp_pon_config *cfg = NULL;
+	u8  pon_rt_bit = 0;
+	u32 key_status;
+	uint pon_rt_sts;
+	u64 elapsed_us;
+
+	cfg = qpnp_get_cfg(pon, pon_type);
+	if (!cfg)
+		return -EINVAL;
+
+	/* Check if key reporting is supported */
+	if (!cfg->key_code)
+		return 0;
+
+	if (pon->kpdpwr_dbc_enable && cfg->pon_type == PON_KPDPWR) {
+		elapsed_us = ktime_us_delta(ktime_get(),
+				pon->kpdpwr_last_release_time);
+		if (elapsed_us < pon->dbc_time_us) {
+			pr_debug("Ignoring kpdpwr event - within debounce time\n");
+			return 0;
+		}
+	}
+
+	/* check the RT status to get the current status of the line */
+	rc = regmap_read(pon->regmap, QPNP_PON_RT_STS(pon), &pon_rt_sts);
+	if (rc) {
+		dev_err(&pon->pdev->dev, "Unable to read PON RT status\n");
+		return rc;
+	}
+
+	switch (cfg->pon_type) {
+	case PON_KPDPWR:
+		pon_rt_bit = QPNP_PON_KPDPWR_N_SET;
+		break;
+	case PON_RESIN:
+		pon_rt_bit = QPNP_PON_RESIN_N_SET;
+		break;
+	case PON_CBLPWR:
+		pon_rt_bit = QPNP_PON_CBLPWR_N_SET;
+		break;
+	case PON_KPDPWR_RESIN:
+		pon_rt_bit = QPNP_PON_KPDPWR_RESIN_BARK_N_SET;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	pr_debug("PMIC input: code=%d, sts=0x%hhx\n",
+					cfg->key_code, pon_rt_sts);
+	key_status = pon_rt_sts & pon_rt_bit;
+
+	if (pon->kpdpwr_dbc_enable && cfg->pon_type == PON_KPDPWR) {
+		if (!key_status)
+			pon->kpdpwr_last_release_time = ktime_get();
+	}
+
+	/*
+	 * simulate press event in case release event occurred
+	 * without a press event
+	 */
+	if (!cfg->old_state && !key_status) {
+		input_report_key(pon->pon_input, cfg->key_code, 1);
+		input_sync(pon->pon_input);
+	}
+
+	input_report_key(pon->pon_input, cfg->key_code, key_status);
+	input_sync(pon->pon_input);
+
+	cfg->old_state = !!key_status;
+
+	return 0;
+}
+
+static irqreturn_t qpnp_kpdpwr_irq(int irq, void *_pon)
+{
+	int rc;
+	struct qpnp_pon *pon = _pon;
+
+	rc = qpnp_pon_input_dispatch(pon, PON_KPDPWR);
+	if (rc)
+		dev_err(&pon->pdev->dev, "Unable to send input event\n");
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t qpnp_kpdpwr_bark_irq(int irq, void *_pon)
+{
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t qpnp_resin_irq(int irq, void *_pon)
+{
+	int rc;
+	struct qpnp_pon *pon = _pon;
+
+	rc = qpnp_pon_input_dispatch(pon, PON_RESIN);
+	if (rc)
+		dev_err(&pon->pdev->dev, "Unable to send input event\n");
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t qpnp_kpdpwr_resin_bark_irq(int irq, void *_pon)
+{
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t qpnp_cblpwr_irq(int irq, void *_pon)
+{
+	int rc;
+	struct qpnp_pon *pon = _pon;
+
+	rc = qpnp_pon_input_dispatch(pon, PON_CBLPWR);
+	if (rc)
+		dev_err(&pon->pdev->dev, "Unable to send input event\n");
+
+	return IRQ_HANDLED;
+}
+
+static void print_pon_reg(struct qpnp_pon *pon, u16 offset)
+{
+	int rc;
+	u16 addr;
+	uint reg;
+
+	addr = pon->base + offset;
+	rc = regmap_read(pon->regmap, addr, &reg);
+	if (rc)
+		dev_emerg(&pon->pdev->dev,
+				"Unable to read reg at 0x%04hx\n", addr);
+	else
+		dev_emerg(&pon->pdev->dev, "reg@0x%04hx: %02hhx\n", addr, reg);
+}
+
+#define PON_PBL_STATUS			0x7
+#define PON_PON_REASON1(subtype)	PON_OFFSET(subtype, 0x8, 0xC0)
+#define PON_PON_REASON2			0x9
+#define PON_WARM_RESET_REASON1(subtype)	PON_OFFSET(subtype, 0xA, 0xC2)
+#define PON_WARM_RESET_REASON2		0xB
+#define PON_POFF_REASON1(subtype)	PON_OFFSET(subtype, 0xC, 0xC5)
+#define PON_POFF_REASON2		0xD
+#define PON_SOFT_RESET_REASON1(subtype)	PON_OFFSET(subtype, 0xE, 0xCB)
+#define PON_SOFT_RESET_REASON2		0xF
+#define PON_FAULT_REASON1		0xC8
+#define PON_FAULT_REASON2		0xC9
+#define PON_PMIC_WD_RESET_S1_TIMER	0x54
+#define PON_PMIC_WD_RESET_S2_TIMER	0x55
+static irqreturn_t qpnp_pmic_wd_bark_irq(int irq, void *_pon)
+{
+	struct qpnp_pon *pon = _pon;
+
+	print_pon_reg(pon, PON_PBL_STATUS);
+	print_pon_reg(pon, PON_PON_REASON1(pon->subtype));
+	print_pon_reg(pon, PON_WARM_RESET_REASON1(pon->subtype));
+	print_pon_reg(pon, PON_SOFT_RESET_REASON1(pon->subtype));
+	print_pon_reg(pon, PON_POFF_REASON1(pon->subtype));
+	if (is_pon_gen1(pon) || pon->subtype == PON_1REG) {
+		print_pon_reg(pon, PON_PON_REASON2);
+		print_pon_reg(pon, PON_WARM_RESET_REASON2);
+		print_pon_reg(pon, PON_POFF_REASON2);
+		print_pon_reg(pon, PON_SOFT_RESET_REASON2);
+	} else {
+		print_pon_reg(pon, PON_FAULT_REASON1);
+		print_pon_reg(pon, PON_FAULT_REASON2);
+	}
+	print_pon_reg(pon, PON_PMIC_WD_RESET_S1_TIMER);
+	print_pon_reg(pon, PON_PMIC_WD_RESET_S2_TIMER);
+	panic("PMIC Watch dog triggered");
+
+	return IRQ_HANDLED;
+}
+
+static void bark_work_func(struct work_struct *work)
+{
+	int rc;
+	uint pon_rt_sts = 0;
+	struct qpnp_pon_config *cfg;
+	struct qpnp_pon *pon =
+		container_of(work, struct qpnp_pon, bark_work.work);
+
+	cfg = qpnp_get_cfg(pon, PON_RESIN);
+	if (!cfg) {
+		dev_err(&pon->pdev->dev, "Invalid config pointer\n");
+		goto err_return;
+	}
+
+	/* enable reset */
+	rc = qpnp_pon_masked_write(pon, cfg->s2_cntl2_addr,
+				QPNP_PON_S2_CNTL_EN, QPNP_PON_S2_CNTL_EN);
+	if (rc) {
+		dev_err(&pon->pdev->dev, "Unable to configure S2 enable\n");
+		goto err_return;
+	}
+	/* bark RT status update delay */
+	msleep(100);
+	/* read the bark RT status */
+	rc = regmap_read(pon->regmap, QPNP_PON_RT_STS(pon), &pon_rt_sts);
+	if (rc) {
+		dev_err(&pon->pdev->dev, "Unable to read PON RT status\n");
+		goto err_return;
+	}
+
+	if (!(pon_rt_sts & QPNP_PON_RESIN_BARK_N_SET)) {
+		/* report the key event and enable the bark IRQ */
+		input_report_key(pon->pon_input, cfg->key_code, 0);
+		input_sync(pon->pon_input);
+		enable_irq(cfg->bark_irq);
+	} else {
+		/* disable reset */
+		rc = qpnp_pon_masked_write(pon, cfg->s2_cntl2_addr,
+				QPNP_PON_S2_CNTL_EN, 0);
+		if (rc) {
+			dev_err(&pon->pdev->dev,
+				"Unable to configure S2 enable\n");
+			goto err_return;
+		}
+		/* re-arm the work */
+		schedule_delayed_work(&pon->bark_work, QPNP_KEY_STATUS_DELAY);
+	}
+
+err_return:
+	return;
+}
+
+static irqreturn_t qpnp_resin_bark_irq(int irq, void *_pon)
+{
+	int rc;
+	struct qpnp_pon *pon = _pon;
+	struct qpnp_pon_config *cfg;
+
+	/* disable the bark interrupt */
+	disable_irq_nosync(irq);
+
+	cfg = qpnp_get_cfg(pon, PON_RESIN);
+	if (!cfg) {
+		dev_err(&pon->pdev->dev, "Invalid config pointer\n");
+		goto err_exit;
+	}
+
+	/* disable reset */
+	rc = qpnp_pon_masked_write(pon, cfg->s2_cntl2_addr,
+					QPNP_PON_S2_CNTL_EN, 0);
+	if (rc) {
+		dev_err(&pon->pdev->dev, "Unable to configure S2 enable\n");
+		goto err_exit;
+	}
+
+	/* report the key event */
+	input_report_key(pon->pon_input, cfg->key_code, 1);
+	input_sync(pon->pon_input);
+	/* schedule work to check the bark status for key-release */
+	schedule_delayed_work(&pon->bark_work, QPNP_KEY_STATUS_DELAY);
+err_exit:
+	return IRQ_HANDLED;
+}
+
+static int
+qpnp_config_pull(struct qpnp_pon *pon, struct qpnp_pon_config *cfg)
+{
+	int rc;
+	u8 pull_bit;
+
+	switch (cfg->pon_type) {
+	case PON_KPDPWR:
+		pull_bit = QPNP_PON_KPDPWR_PULL_UP;
+		break;
+	case PON_RESIN:
+		pull_bit = QPNP_PON_RESIN_PULL_UP;
+		break;
+	case PON_CBLPWR:
+		pull_bit = QPNP_PON_CBLPWR_PULL_UP;
+		break;
+	case PON_KPDPWR_RESIN:
+		pull_bit = QPNP_PON_KPDPWR_PULL_UP | QPNP_PON_RESIN_PULL_UP;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	rc = qpnp_pon_masked_write(pon, QPNP_PON_PULL_CTL(pon),
+				pull_bit, cfg->pull_up ? pull_bit : 0);
+	if (rc)
+		dev_err(&pon->pdev->dev, "Unable to config pull-up\n");
+
+	return rc;
+}
+
+static int
+qpnp_config_reset(struct qpnp_pon *pon, struct qpnp_pon_config *cfg)
+{
+	int rc;
+	u8 i;
+	u16 s1_timer_addr, s2_timer_addr;
+
+	switch (cfg->pon_type) {
+	case PON_KPDPWR:
+		s1_timer_addr = QPNP_PON_KPDPWR_S1_TIMER(pon);
+		s2_timer_addr = QPNP_PON_KPDPWR_S2_TIMER(pon);
+		break;
+	case PON_RESIN:
+		s1_timer_addr = QPNP_PON_RESIN_S1_TIMER(pon);
+		s2_timer_addr = QPNP_PON_RESIN_S2_TIMER(pon);
+		break;
+	case PON_KPDPWR_RESIN:
+		s1_timer_addr = QPNP_PON_KPDPWR_RESIN_S1_TIMER(pon);
+		s2_timer_addr = QPNP_PON_KPDPWR_RESIN_S2_TIMER(pon);
+		break;
+	default:
+		return -EINVAL;
+	}
+	/* disable S2 reset */
+	rc = qpnp_pon_masked_write(pon, cfg->s2_cntl2_addr,
+				QPNP_PON_S2_CNTL_EN, 0);
+	if (rc) {
+		dev_err(&pon->pdev->dev, "Unable to configure S2 enable\n");
+		return rc;
+	}
+
+	usleep_range(100, 120);
+
+	/* configure s1 timer, s2 timer and reset type */
+	for (i = 0; i < PON_S1_COUNT_MAX + 1; i++) {
+		if (cfg->s1_timer <= s1_delay[i])
+			break;
+	}
+	rc = qpnp_pon_masked_write(pon, s1_timer_addr,
+				QPNP_PON_S1_TIMER_MASK, i);
+	if (rc) {
+		dev_err(&pon->pdev->dev, "Unable to configure S1 timer\n");
+		return rc;
+	}
+
+	i = 0;
+	if (cfg->s2_timer) {
+		i = cfg->s2_timer / 10;
+		i = ilog2(i + 1);
+	}
+
+	rc = qpnp_pon_masked_write(pon, s2_timer_addr,
+				QPNP_PON_S2_TIMER_MASK, i);
+	if (rc) {
+		dev_err(&pon->pdev->dev, "Unable to configure S2 timer\n");
+		return rc;
+	}
+
+	rc = qpnp_pon_masked_write(pon, cfg->s2_cntl_addr,
+				QPNP_PON_S2_CNTL_TYPE_MASK, (u8)cfg->s2_type);
+	if (rc) {
+		dev_err(&pon->pdev->dev,
+			"Unable to configure S2 reset type\n");
+		return rc;
+	}
+
+	/* enable S2 reset */
+	rc = qpnp_pon_masked_write(pon, cfg->s2_cntl2_addr,
+				QPNP_PON_S2_CNTL_EN, QPNP_PON_S2_CNTL_EN);
+	if (rc) {
+		dev_err(&pon->pdev->dev, "Unable to configure S2 enable\n");
+		return rc;
+	}
+
+	return 0;
+}
+
+static int
+qpnp_pon_request_irqs(struct qpnp_pon *pon, struct qpnp_pon_config *cfg)
+{
+	int rc = 0;
+
+	switch (cfg->pon_type) {
+	case PON_KPDPWR:
+		rc = devm_request_irq(&pon->pdev->dev, cfg->state_irq,
+							qpnp_kpdpwr_irq,
+				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+						"qpnp_kpdpwr_status", pon);
+		if (rc < 0) {
+			dev_err(&pon->pdev->dev, "Can't request %d IRQ\n",
+							cfg->state_irq);
+			return rc;
+		}
+		if (cfg->use_bark) {
+			rc = devm_request_irq(&pon->pdev->dev, cfg->bark_irq,
+						qpnp_kpdpwr_bark_irq,
+						IRQF_TRIGGER_RISING,
+						"qpnp_kpdpwr_bark", pon);
+			if (rc < 0) {
+				dev_err(&pon->pdev->dev,
+					"Can't request %d IRQ\n",
+						cfg->bark_irq);
+				return rc;
+			}
+		}
+		break;
+	case PON_RESIN:
+		rc = devm_request_irq(&pon->pdev->dev, cfg->state_irq,
+							qpnp_resin_irq,
+				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+						"qpnp_resin_status", pon);
+		if (rc < 0) {
+			dev_err(&pon->pdev->dev, "Can't request %d IRQ\n",
+							cfg->state_irq);
+			return rc;
+		}
+		if (cfg->use_bark) {
+			rc = devm_request_irq(&pon->pdev->dev, cfg->bark_irq,
+						qpnp_resin_bark_irq,
+						IRQF_TRIGGER_RISING,
+						"qpnp_resin_bark", pon);
+			if (rc < 0) {
+				dev_err(&pon->pdev->dev,
+					"Can't request %d IRQ\n",
+						cfg->bark_irq);
+				return rc;
+			}
+		}
+		break;
+	case PON_CBLPWR:
+		rc = devm_request_irq(&pon->pdev->dev, cfg->state_irq,
+							qpnp_cblpwr_irq,
+				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+					"qpnp_cblpwr_status", pon);
+		if (rc < 0) {
+			dev_err(&pon->pdev->dev, "Can't request %d IRQ\n",
+							cfg->state_irq);
+			return rc;
+		}
+		break;
+	case PON_KPDPWR_RESIN:
+		if (cfg->use_bark) {
+			rc = devm_request_irq(&pon->pdev->dev, cfg->bark_irq,
+					qpnp_kpdpwr_resin_bark_irq,
+					IRQF_TRIGGER_RISING,
+					"qpnp_kpdpwr_resin_bark", pon);
+			if (rc < 0) {
+				dev_err(&pon->pdev->dev,
+					"Can't request %d IRQ\n",
+						cfg->bark_irq);
+				return rc;
+			}
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* mark the interrupts wakeable if they support linux-key */
+	if (cfg->key_code) {
+		enable_irq_wake(cfg->state_irq);
+		/* special handling for RESIN due to a hardware bug */
+		if (cfg->pon_type == PON_RESIN && cfg->support_reset)
+			enable_irq_wake(cfg->bark_irq);
+	}
+
+	return rc;
+}
+
+static int
+qpnp_pon_config_input(struct qpnp_pon *pon,  struct qpnp_pon_config *cfg)
+{
+	if (!pon->pon_input) {
+		pon->pon_input = input_allocate_device();
+		if (!pon->pon_input) {
+			dev_err(&pon->pdev->dev,
+				"Can't allocate pon input device\n");
+			return -ENOMEM;
+		}
+		pon->pon_input->name = "qpnp_pon";
+		pon->pon_input->phys = "qpnp_pon/input0";
+	}
+
+	input_set_capability(pon->pon_input, EV_KEY, cfg->key_code);
+
+	return 0;
+}
+
+static int qpnp_pon_config_init(struct qpnp_pon *pon)
+{
+	int rc = 0, i = 0, pmic_wd_bark_irq;
+	struct device_node *pp = NULL;
+	struct qpnp_pon_config *cfg;
+	uint pmic_type;
+	uint revid_rev4;
+
+	if (!pon->num_pon_config) {
+		dev_dbg(&pon->pdev->dev, "num_pon_config: %d\n",
+			pon->num_pon_config);
+		return 0;
+	}
+
+	/* iterate through the list of pon configs */
+	for_each_available_child_of_node(pon->pdev->dev.of_node, pp) {
+		if (!of_find_property(pp, "qcom,pon-type", NULL))
+			continue;
+
+		cfg = &pon->pon_cfg[i++];
+
+		rc = of_property_read_u32(pp, "qcom,pon-type", &cfg->pon_type);
+		if (rc) {
+			dev_err(&pon->pdev->dev, "PON type not specified\n");
+			return rc;
+		}
+
+		switch (cfg->pon_type) {
+		case PON_KPDPWR:
+			cfg->state_irq = platform_get_irq_byname(pon->pdev,
+								 "kpdpwr");
+			if (cfg->state_irq < 0) {
+				dev_err(&pon->pdev->dev,
+					"Unable to get kpdpwr irq\n");
+				return cfg->state_irq;
+			}
+
+			rc = of_property_read_u32(pp, "qcom,support-reset",
+							&cfg->support_reset);
+
+			if (rc) {
+				if (rc == -EINVAL) {
+					dev_dbg(&pon->pdev->dev,
+						"'qcom,support-reset' DT property doesn't exist\n");
+				} else {
+					dev_err(&pon->pdev->dev,
+						"Unable to read 'qcom,support-reset'\n");
+					return rc;
+				}
+			} else {
+				cfg->config_reset = true;
+			}
+
+			cfg->use_bark = of_property_read_bool(pp,
+							"qcom,use-bark");
+			if (cfg->use_bark) {
+				cfg->bark_irq
+					= platform_get_irq_byname(pon->pdev,
+								"kpdpwr-bark");
+				if (cfg->bark_irq < 0) {
+					dev_err(&pon->pdev->dev,
+					"Unable to get kpdpwr-bark irq\n");
+					return cfg->bark_irq;
+				}
+			}
+
+			/*
+			 * If the value read from REVISION2 register is 0x00,
+			 * then there is a single register to control s2 reset.
+			 * Otherwise there are separate registers for s2 reset
+			 * type and s2 reset enable.
+			 */
+			if (pon->pon_ver == QPNP_PON_GEN1_V1) {
+				cfg->s2_cntl_addr = cfg->s2_cntl2_addr =
+					QPNP_PON_KPDPWR_S2_CNTL(pon);
+			} else {
+				cfg->s2_cntl_addr =
+					QPNP_PON_KPDPWR_S2_CNTL(pon);
+				cfg->s2_cntl2_addr =
+					QPNP_PON_KPDPWR_S2_CNTL2(pon);
+			}
+
+			break;
+		case PON_RESIN:
+			cfg->state_irq = platform_get_irq_byname(pon->pdev,
+								 "resin");
+			if (cfg->state_irq < 0) {
+				dev_err(&pon->pdev->dev,
+					"Unable to get resin irq\n");
+				return cfg->bark_irq;
+			}
+
+			rc = of_property_read_u32(pp, "qcom,support-reset",
+							&cfg->support_reset);
+
+			if (rc) {
+				if (rc == -EINVAL) {
+					dev_dbg(&pon->pdev->dev,
+						"'qcom,support-reset' DT property doesn't exist\n");
+				} else {
+					dev_err(&pon->pdev->dev,
+						"Unable to read 'qcom,support-reset'\n");
+					return rc;
+				}
+			} else {
+				cfg->config_reset = true;
+			}
+
+			cfg->use_bark = of_property_read_bool(pp,
+							"qcom,use-bark");
+
+			rc = regmap_read(pon->regmap, PMIC_VERSION_REG,
+					 &pmic_type);
+
+			if (rc) {
+				dev_err(&pon->pdev->dev,
+					"Unable to read PMIC type\n");
+				return rc;
+			}
+
+			if (pmic_type == PMIC_VER_8941) {
+
+				rc = regmap_read(pon->regmap,
+						 PMIC_VERSION_REV4_REG,
+						 &revid_rev4);
+
+				if (rc) {
+					dev_err(&pon->pdev->dev,
+					"Unable to read PMIC revision ID\n");
+					return rc;
+				}
+
+				/*
+				 * PM8941 V3 does not have hardware bug. Hence
+				 * bark is not required from PMIC versions 3.0.
+				 */
+				if (!(revid_rev4 == PMIC8941_V1_REV4 ||
+					revid_rev4 == PMIC8941_V2_REV4)) {
+					cfg->support_reset = false;
+					cfg->use_bark = false;
+				}
+			}
+
+			if (cfg->use_bark) {
+				cfg->bark_irq
+					= platform_get_irq_byname(pon->pdev,
+								"resin-bark");
+				if (cfg->bark_irq < 0) {
+					dev_err(&pon->pdev->dev,
+					"Unable to get resin-bark irq\n");
+					return cfg->bark_irq;
+				}
+			}
+
+			if (pon->pon_ver == QPNP_PON_GEN1_V1) {
+				cfg->s2_cntl_addr = cfg->s2_cntl2_addr =
+					QPNP_PON_RESIN_S2_CNTL(pon);
+			} else {
+				cfg->s2_cntl_addr =
+					QPNP_PON_RESIN_S2_CNTL(pon);
+				cfg->s2_cntl2_addr =
+					QPNP_PON_RESIN_S2_CNTL2(pon);
+			}
+
+			break;
+		case PON_CBLPWR:
+			cfg->state_irq = platform_get_irq_byname(pon->pdev,
+								 "cblpwr");
+			if (cfg->state_irq < 0) {
+				dev_err(&pon->pdev->dev,
+						"Unable to get cblpwr irq\n");
+				return rc;
+			}
+			break;
+		case PON_KPDPWR_RESIN:
+			rc = of_property_read_u32(pp, "qcom,support-reset",
+							&cfg->support_reset);
+
+			if (rc) {
+				if (rc == -EINVAL) {
+					dev_dbg(&pon->pdev->dev,
+						"'qcom,support-reset' DT property doesn't exist\n");
+				} else {
+					dev_err(&pon->pdev->dev,
+						"Unable to read 'qcom,support-reset'\n");
+					return rc;
+				}
+			} else {
+				cfg->config_reset = true;
+			}
+
+			cfg->use_bark = of_property_read_bool(pp,
+							"qcom,use-bark");
+			if (cfg->use_bark) {
+				cfg->bark_irq
+					= platform_get_irq_byname(pon->pdev,
+								"kpdpwr-resin-bark");
+				if (cfg->bark_irq < 0) {
+					dev_err(&pon->pdev->dev,
+					"Unable to get kpdpwr-resin-bark irq\n");
+					return cfg->bark_irq;
+				}
+			}
+
+			if (pon->pon_ver == QPNP_PON_GEN1_V1) {
+				cfg->s2_cntl_addr = cfg->s2_cntl2_addr =
+				QPNP_PON_KPDPWR_RESIN_S2_CNTL(pon);
+			} else {
+				cfg->s2_cntl_addr =
+				QPNP_PON_KPDPWR_RESIN_S2_CNTL(pon);
+				cfg->s2_cntl2_addr =
+				QPNP_PON_KPDPWR_RESIN_S2_CNTL2(pon);
+			}
+
+			break;
+		default:
+			dev_err(&pon->pdev->dev, "PON RESET %d not supported",
+								cfg->pon_type);
+			return -EINVAL;
+		}
+
+		if (cfg->support_reset) {
+			/*
+			 * Get the reset parameters (bark debounce time and
+			 * reset debounce time) for the reset line.
+			 */
+			rc = of_property_read_u32(pp, "qcom,s1-timer",
+							&cfg->s1_timer);
+			if (rc) {
+				dev_err(&pon->pdev->dev,
+					"Unable to read s1-timer\n");
+				return rc;
+			}
+			if (cfg->s1_timer > QPNP_PON_S1_TIMER_MAX) {
+				dev_err(&pon->pdev->dev,
+					"Incorrect S1 debounce time\n");
+				return -EINVAL;
+			}
+			rc = of_property_read_u32(pp, "qcom,s2-timer",
+							&cfg->s2_timer);
+			if (rc) {
+				dev_err(&pon->pdev->dev,
+					"Unable to read s2-timer\n");
+				return rc;
+			}
+			if (cfg->s2_timer > QPNP_PON_S2_TIMER_MAX) {
+				dev_err(&pon->pdev->dev,
+					"Incorrect S2 debounce time\n");
+				return -EINVAL;
+			}
+			rc = of_property_read_u32(pp, "qcom,s2-type",
+							&cfg->s2_type);
+			if (rc) {
+				dev_err(&pon->pdev->dev,
+					"Unable to read s2-type\n");
+				return rc;
+			}
+			if (cfg->s2_type > QPNP_PON_RESET_TYPE_MAX) {
+				dev_err(&pon->pdev->dev,
+					"Incorrect reset type specified\n");
+				return -EINVAL;
+			}
+		}
+		/*
+		 * Get the standard-key parameters. This might not be
+		 * specified if there is no key mapping on the reset line.
+		 */
+		rc = of_property_read_u32(pp, "linux,code", &cfg->key_code);
+		if (rc && rc != -EINVAL) {
+			dev_err(&pon->pdev->dev, "Unable to read key-code\n");
+			return rc;
+		}
+		/* Register key configuration */
+		if (cfg->key_code) {
+			rc = qpnp_pon_config_input(pon, cfg);
+			if (rc < 0)
+				return rc;
+		}
+		/* get the pull-up configuration */
+		rc = of_property_read_u32(pp, "qcom,pull-up", &cfg->pull_up);
+		if (rc && rc != -EINVAL) {
+			dev_err(&pon->pdev->dev, "Unable to read pull-up\n");
+			return rc;
+		}
+	}
+
+	pmic_wd_bark_irq = platform_get_irq_byname(pon->pdev, "pmic-wd-bark");
+	/* request the pmic-wd-bark irq only if it is defined */
+	if (pmic_wd_bark_irq >= 0) {
+		rc = devm_request_irq(&pon->pdev->dev, pmic_wd_bark_irq,
+					qpnp_pmic_wd_bark_irq,
+					IRQF_TRIGGER_RISING,
+					"qpnp_pmic_wd_bark", pon);
+		if (rc < 0) {
+			dev_err(&pon->pdev->dev,
+				"Can't request %d IRQ\n",
+					pmic_wd_bark_irq);
+			goto free_input_dev;
+		}
+	}
+
+	/* register the input device */
+	if (pon->pon_input) {
+		rc = input_register_device(pon->pon_input);
+		if (rc) {
+			dev_err(&pon->pdev->dev,
+				"Can't register pon key: %d\n", rc);
+			goto free_input_dev;
+		}
+	}
+
+	for (i = 0; i < pon->num_pon_config; i++) {
+		cfg = &pon->pon_cfg[i];
+		/* Configure the pull-up */
+		rc = qpnp_config_pull(pon, cfg);
+		if (rc) {
+			dev_err(&pon->pdev->dev, "Unable to config pull-up\n");
+			goto unreg_input_dev;
+		}
+
+		if (cfg->config_reset) {
+			/* Configure the reset-configuration */
+			if (cfg->support_reset) {
+				rc = qpnp_config_reset(pon, cfg);
+				if (rc) {
+					dev_err(&pon->pdev->dev,
+						"Unable to config pon reset\n");
+					goto unreg_input_dev;
+				}
+			} else {
+				if (cfg->pon_type != PON_CBLPWR) {
+					/* disable S2 reset */
+					rc = qpnp_pon_masked_write(pon,
+						cfg->s2_cntl2_addr,
+						QPNP_PON_S2_CNTL_EN, 0);
+					if (rc) {
+						dev_err(&pon->pdev->dev,
+							"Unable to disable S2 reset\n");
+						goto unreg_input_dev;
+					}
+				}
+			}
+		}
+
+		rc = qpnp_pon_request_irqs(pon, cfg);
+		if (rc) {
+			dev_err(&pon->pdev->dev, "Unable to request-irq's\n");
+			goto unreg_input_dev;
+		}
+	}
+
+	device_init_wakeup(&pon->pdev->dev, 1);
+
+	return rc;
+
+unreg_input_dev:
+	if (pon->pon_input)
+		input_unregister_device(pon->pon_input);
+free_input_dev:
+	if (pon->pon_input)
+		input_free_device(pon->pon_input);
+	return rc;
+}
+
+static int pon_spare_regulator_enable(struct regulator_dev *rdev)
+{
+	int rc = 0;
+	u8 value;
+	struct pon_regulator *pon_reg = rdev_get_drvdata(rdev);
+
+	pr_debug("reg %s enable addr: %x bit: %d\n", rdev->desc->name,
+		pon_reg->addr, pon_reg->bit);
+
+	value = BIT(pon_reg->bit) & 0xFF;
+	rc = qpnp_pon_masked_write(pon_reg->pon, pon_reg->pon->base +
+				pon_reg->addr, value, value);
+	if (rc)
+		dev_err(&pon_reg->pon->pdev->dev, "Unable to write to %x\n",
+			pon_reg->pon->base + pon_reg->addr);
+	else
+		pon_reg->enabled = true;
+	return rc;
+}
+
+static int pon_spare_regulator_disable(struct regulator_dev *rdev)
+{
+	int rc = 0;
+	u8 mask;
+	struct pon_regulator *pon_reg = rdev_get_drvdata(rdev);
+
+	pr_debug("reg %s disable addr: %x bit: %d\n", rdev->desc->name,
+		pon_reg->addr, pon_reg->bit);
+
+	mask = BIT(pon_reg->bit) & 0xFF;
+	rc = qpnp_pon_masked_write(pon_reg->pon, pon_reg->pon->base +
+				pon_reg->addr, mask, 0);
+	if (rc)
+		dev_err(&pon_reg->pon->pdev->dev, "Unable to write to %x\n",
+			pon_reg->pon->base + pon_reg->addr);
+	else
+		pon_reg->enabled = false;
+	return rc;
+}
+
+static int pon_spare_regulator_is_enable(struct regulator_dev *rdev)
+{
+	struct pon_regulator *pon_reg = rdev_get_drvdata(rdev);
+
+	return pon_reg->enabled;
+}
+
+struct regulator_ops pon_spare_reg_ops = {
+	.enable		= pon_spare_regulator_enable,
+	.disable	= pon_spare_regulator_disable,
+	.is_enabled	= pon_spare_regulator_is_enable,
+};
+
+static int pon_regulator_init(struct qpnp_pon *pon)
+{
+	int rc = 0, i = 0;
+	struct regulator_init_data *init_data;
+	struct regulator_config reg_cfg = {};
+	struct device_node *node = NULL;
+	struct device *dev = &pon->pdev->dev;
+	struct pon_regulator *pon_reg;
+
+	if (!pon->num_pon_reg)
+		return 0;
+
+	pon->pon_reg_cfg = devm_kcalloc(dev, pon->num_pon_reg,
+					sizeof(*(pon->pon_reg_cfg)),
+					GFP_KERNEL);
+
+	if (!pon->pon_reg_cfg)
+		return -ENOMEM;
+
+	for_each_available_child_of_node(dev->of_node, node) {
+		if (!of_find_property(node, "regulator-name", NULL))
+			continue;
+
+		pon_reg = &pon->pon_reg_cfg[i++];
+		pon_reg->pon = pon;
+
+		rc = of_property_read_u32(node, "qcom,pon-spare-reg-addr",
+			&pon_reg->addr);
+		if (rc) {
+			dev_err(dev, "Unable to read address for regulator, rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		rc = of_property_read_u32(node, "qcom,pon-spare-reg-bit",
+			&pon_reg->bit);
+		if (rc) {
+			dev_err(dev, "Unable to read bit for regulator, rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		init_data = of_get_regulator_init_data(dev, node,
+				&pon_reg->rdesc);
+		if (!init_data) {
+			dev_err(dev, "regulator init data is missing\n");
+			return -EINVAL;
+		}
+		init_data->constraints.valid_ops_mask
+			|= REGULATOR_CHANGE_STATUS;
+
+		if (!init_data->constraints.name) {
+			dev_err(dev, "regulator-name is missing\n");
+			return -EINVAL;
+		}
+
+		pon_reg->rdesc.owner = THIS_MODULE;
+		pon_reg->rdesc.type = REGULATOR_VOLTAGE;
+		pon_reg->rdesc.ops = &pon_spare_reg_ops;
+		pon_reg->rdesc.name = init_data->constraints.name;
+
+		reg_cfg.dev = dev;
+		reg_cfg.init_data = init_data;
+		reg_cfg.driver_data = pon_reg;
+		reg_cfg.of_node = node;
+
+		pon_reg->rdev = regulator_register(&pon_reg->rdesc, &reg_cfg);
+		if (IS_ERR(pon_reg->rdev)) {
+			rc = PTR_ERR(pon_reg->rdev);
+			pon_reg->rdev = NULL;
+			if (rc != -EPROBE_DEFER)
+				dev_err(dev, "regulator_register failed, rc=%d\n",
+					rc);
+			return rc;
+		}
+	}
+	return rc;
+}
+
+static bool smpl_en;
+
+static int qpnp_pon_smpl_en_get(char *buf, const struct kernel_param *kp)
+{
+	bool enabled = 0;
+	int rc;
+
+	rc = qpnp_pon_get_trigger_config(PON_SMPL, &enabled);
+	if (rc < 0)
+		return rc;
+
+	return snprintf(buf, QPNP_PON_BUFFER_SIZE, "%d", enabled);
+}
+
+static int qpnp_pon_smpl_en_set(const char *val,
+					const struct kernel_param *kp)
+{
+	int rc;
+
+	rc = param_set_bool(val, kp);
+	if (rc < 0) {
+		pr_err("Unable to set smpl_en rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = qpnp_pon_trigger_config(PON_SMPL, *(bool *)kp->arg);
+	return rc;
+}
+
+static struct kernel_param_ops smpl_en_ops = {
+	.set = qpnp_pon_smpl_en_set,
+	.get = qpnp_pon_smpl_en_get,
+};
+
+module_param_cb(smpl_en, &smpl_en_ops, &smpl_en, 0644);
+
+static bool dload_on_uvlo;
+
+static int qpnp_pon_debugfs_uvlo_dload_get(char *buf,
+		const struct kernel_param *kp)
+{
+	struct qpnp_pon *pon = sys_reset_dev;
+	int rc = 0;
+	uint reg;
+
+	if (!pon)
+		return -ENODEV;
+
+	rc = regmap_read(pon->regmap, QPNP_PON_XVDD_RB_SPARE(pon), &reg);
+	if (rc) {
+		dev_err(&pon->pdev->dev,
+			"Unable to read addr=%x, rc(%d)\n",
+			QPNP_PON_XVDD_RB_SPARE(pon), rc);
+		return rc;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%d",
+			!!(QPNP_PON_UVLO_DLOAD_EN & reg));
+}
+
+static int qpnp_pon_debugfs_uvlo_dload_set(const char *val,
+		const struct kernel_param *kp)
+{
+	struct qpnp_pon *pon = sys_reset_dev;
+	int rc = 0;
+	uint reg;
+
+	if (!pon)
+		return -ENODEV;
+
+	rc = param_set_bool(val, kp);
+	if (rc) {
+		pr_err("Unable to set bms_reset: %d\n", rc);
+		return rc;
+	}
+
+	rc = regmap_read(pon->regmap, QPNP_PON_XVDD_RB_SPARE(pon), &reg);
+	if (rc) {
+		dev_err(&pon->pdev->dev,
+			"Unable to read addr=%x, rc(%d)\n",
+			QPNP_PON_XVDD_RB_SPARE(pon), rc);
+		return rc;
+	}
+
+	reg &= ~QPNP_PON_UVLO_DLOAD_EN;
+	if (*(bool *)kp->arg)
+		reg |= QPNP_PON_UVLO_DLOAD_EN;
+
+	rc = regmap_write(pon->regmap, QPNP_PON_XVDD_RB_SPARE(pon), reg);
+	if (rc) {
+		dev_err(&pon->pdev->dev,
+			"Unable to write to addr=%hx, rc(%d)\n",
+				QPNP_PON_XVDD_RB_SPARE(pon), rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static struct kernel_param_ops dload_on_uvlo_ops = {
+	.set = qpnp_pon_debugfs_uvlo_dload_set,
+	.get = qpnp_pon_debugfs_uvlo_dload_get,
+};
+
+module_param_cb(dload_on_uvlo, &dload_on_uvlo_ops, &dload_on_uvlo, 0644);
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int qpnp_pon_debugfs_uvlo_get(void *data, u64 *val)
+{
+	struct qpnp_pon *pon = (struct qpnp_pon *) data;
+
+	*val = pon->uvlo;
+
+	return 0;
+}
+
+static int qpnp_pon_debugfs_uvlo_set(void *data, u64 val)
+{
+	struct qpnp_pon *pon = (struct qpnp_pon *) data;
+
+	if (pon->pon_trigger_reason == PON_SMPL ||
+		pon->pon_power_off_reason == QPNP_POFF_REASON_UVLO)
+		panic("An UVLO was occurred.\n");
+	pon->uvlo = val;
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(qpnp_pon_debugfs_uvlo_fops, qpnp_pon_debugfs_uvlo_get,
+			qpnp_pon_debugfs_uvlo_set, "0x%02llx\n");
+
+static void qpnp_pon_debugfs_init(struct platform_device *pdev)
+{
+	struct qpnp_pon *pon = dev_get_drvdata(&pdev->dev);
+	struct dentry *ent;
+
+	pon->debugfs = debugfs_create_dir(dev_name(&pdev->dev), NULL);
+	if (!pon->debugfs) {
+		dev_err(&pon->pdev->dev,
+			"Unable to create debugfs directory\n");
+	} else {
+		ent = debugfs_create_file("uvlo_panic", 0644,
+				pon->debugfs, pon, &qpnp_pon_debugfs_uvlo_fops);
+		if (!ent)
+			dev_err(&pon->pdev->dev,
+				"Unable to create uvlo_panic debugfs file.\n");
+	}
+}
+
+static void qpnp_pon_debugfs_remove(struct platform_device *pdev)
+{
+	struct qpnp_pon *pon = dev_get_drvdata(&pdev->dev);
+
+	debugfs_remove_recursive(pon->debugfs);
+}
+
+#else
+
+static void qpnp_pon_debugfs_init(struct platform_device *pdev)
+{}
+
+static void qpnp_pon_debugfs_remove(struct platform_device *pdev)
+{}
+#endif
+
+static int read_gen2_pon_off_reason(struct qpnp_pon *pon, u16 *reason,
+					int *reason_index_offset)
+{
+	int rc;
+	int buf[2], reg;
+
+	rc = regmap_read(pon->regmap,
+			QPNP_PON_OFF_REASON(pon),
+			 &reg);
+	if (rc) {
+		dev_err(&pon->pdev->dev, "Unable to read PON_OFF_REASON reg rc:%d\n",
+			rc);
+		return rc;
+	}
+
+	if (reg & QPNP_GEN2_POFF_SEQ) {
+		rc = regmap_read(pon->regmap,
+				QPNP_POFF_REASON1(pon),
+				buf);
+		if (rc) {
+			dev_err(&pon->pdev->dev, "Unable to read POFF_REASON1 reg rc:%d\n",
+				rc);
+			return rc;
+		}
+		*reason = (u8)buf[0];
+		*reason_index_offset = 0;
+	} else if (reg & QPNP_GEN2_FAULT_SEQ) {
+		rc = regmap_bulk_read(pon->regmap,
+				QPNP_FAULT_REASON1(pon),
+				buf, 2);
+		if (rc) {
+			dev_err(&pon->pdev->dev, "Unable to read FAULT_REASON regs rc:%d\n",
+				rc);
+			return rc;
+		}
+		*reason = (u8)buf[0] | (u16)(buf[1] << 8);
+		*reason_index_offset = POFF_REASON_FAULT_OFFSET;
+	} else if (reg & QPNP_GEN2_S3_RESET_SEQ) {
+		rc = regmap_read(pon->regmap,
+				QPNP_S3_RESET_REASON(pon),
+				buf);
+		if (rc) {
+			dev_err(&pon->pdev->dev, "Unable to read S3_RESET_REASON reg rc:%d\n",
+				rc);
+			return rc;
+		}
+		*reason = (u8)buf[0];
+		*reason_index_offset = POFF_REASON_S3_RESET_OFFSET;
+	}
+
+	return 0;
+}
+
+static int qpnp_pon_probe(struct platform_device *pdev)
+{
+	struct qpnp_pon *pon;
+	unsigned int base;
+	struct device_node *node = NULL;
+	u32 delay = 0, s3_debounce = 0;
+	int rc, sys_reset, index;
+	int reason_index_offset = 0;
+	u8 buf[2];
+	uint pon_sts = 0;
+	u16 poff_sts = 0;
+	const char *s3_src;
+	u8 s3_src_reg;
+	unsigned long flags;
+	uint temp = 0;
+
+	pon = devm_kzalloc(&pdev->dev, sizeof(struct qpnp_pon), GFP_KERNEL);
+	if (!pon)
+		return -ENOMEM;
+
+	pon->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!pon->regmap) {
+		dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+		return -EINVAL;
+	}
+
+	sys_reset = of_property_read_bool(pdev->dev.of_node,
+						"qcom,system-reset");
+	if (sys_reset && sys_reset_dev) {
+		dev_err(&pdev->dev,
+			"qcom,system-reset property can only be specified for one device on the system\n");
+		return -EINVAL;
+	} else if (sys_reset) {
+		sys_reset_dev = pon;
+	}
+
+	pon->pdev = pdev;
+
+	rc = of_property_read_u32(pdev->dev.of_node, "reg", &base);
+	if (rc < 0) {
+		dev_err(&pdev->dev,
+			"Couldn't find reg in node = %s rc = %d\n",
+			pdev->dev.of_node->full_name, rc);
+		return rc;
+	}
+	pon->base = base;
+
+	/* get the total number of pon configurations */
+	for_each_available_child_of_node(pdev->dev.of_node, node) {
+		if (of_find_property(node, "regulator-name", NULL)) {
+			pon->num_pon_reg++;
+		} else if (of_find_property(node, "qcom,pon-type", NULL)) {
+			pon->num_pon_config++;
+		} else {
+			pr_err("Unknown sub-node\n");
+			return -EINVAL;
+		}
+	}
+
+	pr_debug("PON@SID %d: num_pon_config: %d num_pon_reg: %d\n",
+		to_spmi_device(pon->pdev->dev.parent)->usid,
+		pon->num_pon_config, pon->num_pon_reg);
+
+	rc = pon_regulator_init(pon);
+	if (rc) {
+		dev_err(&pdev->dev, "Error in pon_regulator_init rc: %d\n",
+			rc);
+		return rc;
+	}
+
+	if (!pon->num_pon_config)
+		/* No PON config., do not register the driver */
+		dev_info(&pdev->dev, "No PON config. specified\n");
+	else
+		pon->pon_cfg = devm_kzalloc(&pdev->dev,
+				sizeof(struct qpnp_pon_config) *
+				pon->num_pon_config, GFP_KERNEL);
+
+	/* Read PON_PERPH_SUBTYPE register to get PON type */
+	rc = regmap_read(pon->regmap,
+				QPNP_PON_PERPH_SUBTYPE(pon),
+				&temp);
+	if (rc) {
+		dev_err(&pon->pdev->dev,
+			"Unable to read PON_PERPH_SUBTYPE register rc: %d\n",
+			rc);
+		return rc;
+	}
+	pon->subtype = temp;
+
+	/* Check if it is rev B */
+	rc = regmap_read(pon->regmap,
+			QPNP_PON_REVISION2(pon), &temp);
+	if (rc) {
+		dev_err(&pon->pdev->dev,
+			"Unable to read addr=%x, rc(%d)\n",
+			QPNP_PON_REVISION2(pon), rc);
+		return rc;
+	}
+
+	pon->pon_ver = temp;
+	if (is_pon_gen1(pon)) {
+		if (pon->pon_ver == 0)
+			pon->pon_ver = QPNP_PON_GEN1_V1;
+		else
+			pon->pon_ver = QPNP_PON_GEN1_V2;
+	} else if (is_pon_gen2(pon)) {
+		pon->pon_ver = QPNP_PON_GEN2;
+	} else if (pon->subtype == PON_1REG) {
+		pon->pon_ver = QPNP_PON_GEN1_V2;
+	} else {
+		dev_err(&pon->pdev->dev,
+			"Invalid PON_PERPH_SUBTYPE value %x\n",
+			pon->subtype);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: pon_subtype=%x, pon_version=%x\n", __func__,
+			pon->subtype, pon->pon_ver);
+
+	rc = qpnp_pon_store_and_clear_warm_reset(pon);
+	if (rc) {
+		dev_err(&pon->pdev->dev,
+			"Unable to store/clear WARM_RESET_REASONx registers rc: %d\n",
+			rc);
+		return rc;
+	}
+
+	/* PON reason */
+	rc = regmap_read(pon->regmap, QPNP_PON_REASON1(pon), &pon_sts);
+	if (rc) {
+		dev_err(&pon->pdev->dev,
+			"Unable to read PON_RESASON1 reg rc: %d\n",
+			rc);
+		return rc;
+	}
+
+	if (sys_reset)
+		boot_reason = ffs(pon_sts);
+
+	index = ffs(pon_sts) - 1;
+	cold_boot = !qpnp_pon_is_warm_reset();
+	if (index >= ARRAY_SIZE(qpnp_pon_reason) || index < 0) {
+		dev_info(&pon->pdev->dev,
+			"PMIC@SID%d Power-on reason: Unknown and '%s' boot\n",
+			to_spmi_device(pon->pdev->dev.parent)->usid,
+			 cold_boot ? "cold" : "warm");
+	} else {
+		pon->pon_trigger_reason = index;
+		dev_info(&pon->pdev->dev,
+			"PMIC@SID%d Power-on reason: %s and '%s' boot\n",
+			to_spmi_device(pon->pdev->dev.parent)->usid,
+			 qpnp_pon_reason[index],
+			cold_boot ? "cold" : "warm");
+	}
+
+	/* POFF reason */
+	if (!is_pon_gen1(pon) && pon->subtype != PON_1REG) {
+		rc = read_gen2_pon_off_reason(pon, &poff_sts,
+						&reason_index_offset);
+		if (rc)
+			return rc;
+	} else {
+		rc = regmap_bulk_read(pon->regmap, QPNP_POFF_REASON1(pon),
+			buf, 2);
+		if (rc) {
+			dev_err(&pon->pdev->dev, "Unable to read POFF_REASON regs rc:%d\n",
+				rc);
+			return rc;
+		}
+		poff_sts = buf[0] | (buf[1] << 8);
+	}
+	index = ffs(poff_sts) - 1 + reason_index_offset;
+	if (index >= ARRAY_SIZE(qpnp_poff_reason) || index < 0) {
+		dev_info(&pon->pdev->dev,
+				"PMIC@SID%d: Unknown power-off reason\n",
+				to_spmi_device(pon->pdev->dev.parent)->usid);
+	} else {
+		pon->pon_power_off_reason = index;
+		dev_info(&pon->pdev->dev,
+				"PMIC@SID%d: Power-off reason: %s\n",
+				to_spmi_device(pon->pdev->dev.parent)->usid,
+				qpnp_poff_reason[index]);
+	}
+
+	if (pon->pon_trigger_reason == PON_SMPL ||
+		pon->pon_power_off_reason == QPNP_POFF_REASON_UVLO) {
+		if (of_property_read_bool(pdev->dev.of_node,
+						"qcom,uvlo-panic"))
+			panic("An UVLO was occurred.");
+	}
+
+	/* program s3 debounce */
+	rc = of_property_read_u32(pon->pdev->dev.of_node,
+				"qcom,s3-debounce", &s3_debounce);
+	if (rc) {
+		if (rc != -EINVAL) {
+			dev_err(&pon->pdev->dev,
+				"Unable to read s3 timer rc:%d\n",
+				rc);
+			return rc;
+		}
+	} else {
+		if (s3_debounce > QPNP_PON_S3_TIMER_SECS_MAX) {
+			dev_info(&pon->pdev->dev,
+				"Exceeded S3 max value, set it to max\n");
+			s3_debounce = QPNP_PON_S3_TIMER_SECS_MAX;
+		}
+
+		/* 0 is a special value to indicate instant s3 reset */
+		if (s3_debounce != 0)
+			s3_debounce = ilog2(s3_debounce);
+
+		/* s3 debounce is SEC_ACCESS register */
+		rc = qpnp_pon_masked_write(pon, QPNP_PON_SEC_ACCESS(pon),
+					0xFF, QPNP_PON_SEC_UNLOCK);
+		if (rc) {
+			dev_err(&pdev->dev, "Unable to do SEC_ACCESS rc:%d\n",
+				rc);
+			return rc;
+		}
+
+		rc = qpnp_pon_masked_write(pon, QPNP_PON_S3_DBC_CTL(pon),
+				QPNP_PON_S3_DBC_DELAY_MASK, s3_debounce);
+		if (rc) {
+			dev_err(&pdev->dev,
+				"Unable to set S3 debounce rc:%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	/* program s3 source */
+	s3_src = "kpdpwr-and-resin";
+	rc = of_property_read_string(pon->pdev->dev.of_node,
+				"qcom,s3-src", &s3_src);
+	if (rc && rc != -EINVAL) {
+		dev_err(&pon->pdev->dev, "Unable to read s3 timer rc: %d\n",
+			rc);
+		return rc;
+	}
+
+	if (!strcmp(s3_src, "kpdpwr"))
+		s3_src_reg = QPNP_PON_S3_SRC_KPDPWR;
+	else if (!strcmp(s3_src, "resin"))
+		s3_src_reg = QPNP_PON_S3_SRC_RESIN;
+	else if (!strcmp(s3_src, "kpdpwr-or-resin"))
+		s3_src_reg = QPNP_PON_S3_SRC_KPDPWR_OR_RESIN;
+	else /* default combination */
+		s3_src_reg = QPNP_PON_S3_SRC_KPDPWR_AND_RESIN;
+
+	/*
+	 * S3 source is a write once register. If the register has
+	 * been configured by bootloader then this operation will
+	 * not be effective.
+	 */
+	rc = qpnp_pon_masked_write(pon, QPNP_PON_S3_SRC(pon),
+			QPNP_PON_S3_SRC_MASK, s3_src_reg);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to program s3 source rc: %d\n",
+			rc);
+		return rc;
+	}
+
+	dev_set_drvdata(&pdev->dev, pon);
+
+	INIT_DELAYED_WORK(&pon->bark_work, bark_work_func);
+
+	/* register the PON configurations */
+	rc = qpnp_pon_config_init(pon);
+	if (rc) {
+		dev_err(&pdev->dev,
+			"Unable to initialize PON configurations rc: %d\n", rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(pon->pdev->dev.of_node,
+				"qcom,pon-dbc-delay", &delay);
+	if (rc) {
+		if (rc != -EINVAL) {
+			dev_err(&pdev->dev,
+				"Unable to read debounce delay rc: %d\n", rc);
+			return rc;
+		}
+	} else {
+		rc = qpnp_pon_set_dbc(pon, delay);
+		if (rc) {
+			dev_err(&pdev->dev,
+				"Unable to set PON debounce delay rc=%d\n", rc);
+			return rc;
+		}
+	}
+	rc = qpnp_pon_get_dbc(pon, &pon->dbc_time_us);
+	if (rc) {
+		dev_err(&pdev->dev,
+			"Unable to get PON debounce delay rc=%d\n", rc);
+		return rc;
+	}
+
+	pon->kpdpwr_dbc_enable = of_property_read_bool(pon->pdev->dev.of_node,
+					"qcom,kpdpwr-sw-debounce");
+
+	rc = of_property_read_u32(pon->pdev->dev.of_node,
+				"qcom,warm-reset-poweroff-type",
+				&pon->warm_reset_poff_type);
+	if (rc) {
+		if (rc != -EINVAL) {
+			dev_err(&pdev->dev, "Unable to read warm reset poweroff type rc: %d\n",
+				rc);
+			return rc;
+		}
+		pon->warm_reset_poff_type = -EINVAL;
+	} else if (pon->warm_reset_poff_type <= PON_POWER_OFF_RESERVED ||
+			pon->warm_reset_poff_type >= PON_POWER_OFF_MAX_TYPE) {
+		dev_err(&pdev->dev, "Invalid warm-reset-poweroff-type\n");
+		pon->warm_reset_poff_type = -EINVAL;
+	}
+
+	rc = of_property_read_u32(pon->pdev->dev.of_node,
+				"qcom,hard-reset-poweroff-type",
+				&pon->hard_reset_poff_type);
+	if (rc) {
+		if (rc != -EINVAL) {
+			dev_err(&pdev->dev, "Unable to read hard reset poweroff type rc: %d\n",
+				rc);
+			return rc;
+		}
+		pon->hard_reset_poff_type = -EINVAL;
+	} else if (pon->hard_reset_poff_type <= PON_POWER_OFF_RESERVED ||
+			pon->hard_reset_poff_type >= PON_POWER_OFF_MAX_TYPE) {
+		dev_err(&pdev->dev, "Invalid hard-reset-poweroff-type\n");
+		pon->hard_reset_poff_type = -EINVAL;
+	}
+
+	rc = of_property_read_u32(pon->pdev->dev.of_node,
+				"qcom,shutdown-poweroff-type",
+				&pon->shutdown_poff_type);
+	if (rc) {
+		if (rc != -EINVAL) {
+			dev_err(&pdev->dev, "Unable to read shutdown poweroff type rc: %d\n",
+				rc);
+			return rc;
+		}
+		pon->shutdown_poff_type = -EINVAL;
+	} else if (pon->shutdown_poff_type <= PON_POWER_OFF_RESERVED ||
+			pon->shutdown_poff_type >= PON_POWER_OFF_MAX_TYPE) {
+		dev_err(&pdev->dev, "Invalid shutdown-poweroff-type\n");
+		pon->shutdown_poff_type = -EINVAL;
+	}
+
+	rc = device_create_file(&pdev->dev, &dev_attr_debounce_us);
+	if (rc) {
+		dev_err(&pdev->dev, "sys file creation failed rc: %d\n", rc);
+		return rc;
+	}
+
+	if (of_property_read_bool(pdev->dev.of_node,
+					"qcom,secondary-pon-reset")) {
+		if (sys_reset) {
+			dev_err(&pdev->dev,
+				"qcom,system-reset property shouldn't be used along with qcom,secondary-pon-reset property\n");
+			return -EINVAL;
+		}
+		spin_lock_irqsave(&spon_list_slock, flags);
+		list_add(&pon->list, &spon_dev_list);
+		spin_unlock_irqrestore(&spon_list_slock, flags);
+		pon->is_spon = true;
+	}
+
+	/* config whether store the hard reset reason */
+	pon->store_hard_reset_reason = of_property_read_bool(pdev->dev.of_node,
+					"qcom,store-hard-reset-reason");
+
+	qpnp_pon_debugfs_init(pdev);
+	return 0;
+}
+
+static int qpnp_pon_remove(struct platform_device *pdev)
+{
+	struct qpnp_pon *pon = dev_get_drvdata(&pdev->dev);
+	unsigned long flags;
+
+	device_remove_file(&pdev->dev, &dev_attr_debounce_us);
+
+	cancel_delayed_work_sync(&pon->bark_work);
+
+	if (pon->pon_input)
+		input_unregister_device(pon->pon_input);
+	qpnp_pon_debugfs_remove(pdev);
+	if (pon->is_spon) {
+		spin_lock_irqsave(&spon_list_slock, flags);
+		list_del(&pon->list);
+		spin_unlock_irqrestore(&spon_list_slock, flags);
+	}
+	return 0;
+}
+
+static const struct of_device_id spmi_match_table[] = {
+	{ .compatible = "qcom,qpnp-power-on", },
+	{}
+};
+
+static struct platform_driver qpnp_pon_driver = {
+	.driver		= {
+		.name		= "qcom,qpnp-power-on",
+		.of_match_table	= spmi_match_table,
+	},
+	.probe		= qpnp_pon_probe,
+	.remove		= qpnp_pon_remove,
+};
+
+static int __init qpnp_pon_init(void)
+{
+	return platform_driver_register(&qpnp_pon_driver);
+}
+subsys_initcall(qpnp_pon_init);
+
+static void __exit qpnp_pon_exit(void)
+{
+	return platform_driver_unregister(&qpnp_pon_driver);
+}
+module_exit(qpnp_pon_exit);
+
+MODULE_DESCRIPTION("QPNP PMIC POWER-ON driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/drivers/input/misc/vl53L0./Makefile linux-4.4.115-fbx/drivers/input/misc/vl53L0/Makefile
--- linux-4.4.115-fbx/drivers/input/misc/vl53L0./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/input/misc/vl53L0/Makefile	2019-01-22 16:16:23.999250898 +0100
@@ -0,0 +1,20 @@
+#
+# Makefile for the vl53L0 drivers.
+#
+
+# Each configuration option enables a list of files.
+#FEATURE_USE_CCI := false
+FEATURE_USE_CCI := true
+
+ifeq ($(FEATURE_USE_CCI), true)
+ccflags-y	+= -Idrivers/input/misc/vl53L0/inc -DCAMERA_CCI
+else
+ccflags-y	+= -Idrivers/input/misc/vl53L0/inc
+endif
+
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/cci
+obj-$(CONFIG_INPUT_STMVL53L0)			+= stmvl53l0.o
+stmvl53l0-objs				:= stmvl53l0_module.o stmvl53l0_module-i2c.o stmvl53l0_module-cci.o src/vl53l0_api_calibration.o src/vl53l0_api_core.o src/vl53l0_api_histogram.o src/vl53l0_api_ranging.o src/vl53l0_api_strings.o src/vl53l0_api.o src/vl53l0_platform.o src/vl53l0_i2c_platform.o src/vl53l0_port_i2c.o src/vl53l010_api.o src/vl53l010_tuning.o
diff -Nruw linux-4.4.115-fbx/drivers/input/touchscreen/st./fts_lib/Makefile linux-4.4.115-fbx/drivers/input/touchscreen/st/fts_lib/Makefile
--- linux-4.4.115-fbx/drivers/input/touchscreen/st./fts_lib/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/input/touchscreen/st/fts_lib/Makefile	2019-01-22 16:16:24.039251260 +0100
@@ -0,0 +1,7 @@
+#
+# Makefile for the FTS touchscreen driver.
+#
+
+obj-$(CONFIG_TOUCHSCREEN_ST_I2C) += ftsCompensation.o \
+	ftsCrossCompile.o ftsError.o ftsFrame.o ftsIO.o ftsTest.o \
+	ftsTime.o ftsTool.o ftsFlash.o ftsGesture.o
diff -Nruw linux-4.4.115-fbx/drivers/input/touchscreen/st./Kconfig linux-4.4.115-fbx/drivers/input/touchscreen/st/Kconfig
--- linux-4.4.115-fbx/drivers/input/touchscreen/st./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/input/touchscreen/st/Kconfig	2019-01-22 16:16:24.039251260 +0100
@@ -0,0 +1,9 @@
+#
+# STMicroelectronics touchscreen driver configuration
+#
+
+config TOUCHSCREEN_ST_I2C
+	tristate "STMicroelectronics i2c touchscreen"
+	depends on TOUCHSCREEN_ST
+	help
+	  This enables support for ST touch panel over I2C based touchscreens.
diff -Nruw linux-4.4.115-fbx/drivers/input/touchscreen/st./Makefile linux-4.4.115-fbx/drivers/input/touchscreen/st/Makefile
--- linux-4.4.115-fbx/drivers/input/touchscreen/st./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/input/touchscreen/st/Makefile	2019-01-22 16:16:24.039251260 +0100
@@ -0,0 +1,5 @@
+#
+## Makefile for the STMicroelectronics touchscreen driver.
+#
+
+obj-$(CONFIG_TOUCHSCREEN_ST_I2C) += fts.o fts_gui.o fts_driver_test.o fts_lib/
diff -Nruw linux-4.4.115-fbx/drivers/input/touchscreen/synaptics_dsx./Kconfig linux-4.4.115-fbx/drivers/input/touchscreen/synaptics_dsx/Kconfig
--- linux-4.4.115-fbx/drivers/input/touchscreen/synaptics_dsx./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/input/touchscreen/synaptics_dsx/Kconfig	2019-01-22 16:16:24.043251297 +0100
@@ -0,0 +1,64 @@
+#
+# Synaptics DSX touchscreen driver configuration
+#
+menuconfig TOUCHSCREEN_SYNAPTICS_DSX_v21
+	bool "Synaptics DSX touchscreen"
+	default y
+	help
+	  Say Y here if you have a Synaptics DSX touchscreen connected
+	  to your system.
+
+	  If unsure, say N.
+
+if TOUCHSCREEN_SYNAPTICS_DSX_v21
+
+choice
+	default TOUCHSCREEN_SYNAPTICS_DSX_I2C_v21
+	prompt "Synaptics DSX touchscreen bus interface"
+
+config TOUCHSCREEN_SYNAPTICS_DSX_I2C_v21
+	bool "I2C"
+	depends on I2C
+	help
+	  Say Y here if you have a Synaptics DSX touchscreen interfaced
+	  to the host processor over I2C
+
+	  If unsure, say N.
+
+	  This module uses the services of DSX CORE
+
+config TOUCHSCREEN_SYNAPTICS_DSX_SPI_v21
+	bool "SPI"
+	depends on SPI_MASTER
+	help
+	  Say Y here if you have a Synaptics DSX touchscreen interfaced
+	  to the host processor over SPI
+
+	  If unsure, say N.
+
+	  This module uses the services of DSX CORE
+endchoice
+
+config TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21
+	tristate "Synaptics DSX core driver module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_I2C_v21 || TOUCHSCREEN_SYNAPTICS_DSX_SPI_v21
+	help
+	  Say Y here to enable basic touch reporting functionalities.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_core.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v21
+	tristate "Synaptics DSX touchscreen firmware update module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21
+	help
+	  Say Y here to enable support for carrying out firmware update.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_fw_update.
+
+endif
diff -Nruw linux-4.4.115-fbx/drivers/input/touchscreen/synaptics_dsx./Makefile linux-4.4.115-fbx/drivers/input/touchscreen/synaptics_dsx/Makefile
--- linux-4.4.115-fbx/drivers/input/touchscreen/synaptics_dsx./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/input/touchscreen/synaptics_dsx/Makefile	2019-01-22 16:16:24.043251297 +0100
@@ -0,0 +1,10 @@
+#
+# Makefile for the Synaptics DSX touchscreen driver.
+#
+
+# Each configuration option enables a list of files.
+
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_I2C_v21) += synaptics_dsx_i2c.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_SPI_v21) += synaptics_dsx_spi.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21) += synaptics_dsx_core.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v21) += synaptics_dsx_fw_update.o
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/iommu/dma-mapping-fast.c	2019-10-29 09:26:23.813204842 +0100
@@ -0,0 +1,850 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dma-contiguous.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-mapping-fast.h>
+#include <linux/io-pgtable-fast.h>
+#include <linux/vmalloc.h>
+#include <asm/cacheflush.h>
+#include <asm/dma-iommu.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+/* some redundant definitions... :( TODO: move to io-pgtable-fast.h */
+#define FAST_PAGE_SHIFT		12
+#define FAST_PAGE_SIZE (1UL << FAST_PAGE_SHIFT)
+#define FAST_PAGE_MASK (~(PAGE_SIZE - 1))
+#define FAST_PTE_ADDR_MASK		((av8l_fast_iopte)0xfffffffff000)
+#define FAST_MAIR_ATTR_IDX_CACHE	1
+#define FAST_PTE_ATTRINDX_SHIFT		2
+#define FAST_PTE_ATTRINDX_MASK		0x7
+#define FAST_PTE_SH_SHIFT		8
+#define FAST_PTE_SH_MASK	   (((av8l_fast_iopte)0x3) << FAST_PTE_SH_SHIFT)
+#define FAST_PTE_SH_OS             (((av8l_fast_iopte)2) << FAST_PTE_SH_SHIFT)
+#define FAST_PTE_SH_IS             (((av8l_fast_iopte)3) << FAST_PTE_SH_SHIFT)
+
+static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
+				 bool coherent)
+{
+	if (dma_get_attr(DMA_ATTR_STRONGLY_ORDERED, attrs))
+		return pgprot_noncached(prot);
+	else if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+		return pgprot_writecombine(prot);
+	return prot;
+}
+
+static int __get_iommu_pgprot(struct dma_attrs *attrs, int prot,
+			      bool coherent)
+{
+	if (!dma_get_attr(DMA_ATTR_EXEC_MAPPING, attrs))
+		prot |= IOMMU_NOEXEC;
+	if (dma_get_attr(DMA_ATTR_STRONGLY_ORDERED, attrs))
+		prot |= IOMMU_DEVICE;
+	if (coherent)
+		prot |= IOMMU_CACHE;
+
+	return prot;
+}
+
+static void fast_dmac_clean_range(struct dma_fast_smmu_mapping *mapping,
+				  void *start, void *end)
+{
+	if (!mapping->is_smmu_pt_coherent)
+		dmac_clean_range(start, end);
+}
+
+static bool __fast_is_pte_coherent(av8l_fast_iopte *ptep)
+{
+	int attr_idx = (*ptep & (FAST_PTE_ATTRINDX_MASK <<
+			FAST_PTE_ATTRINDX_SHIFT)) >>
+			FAST_PTE_ATTRINDX_SHIFT;
+
+	if ((attr_idx == FAST_MAIR_ATTR_IDX_CACHE) &&
+		(((*ptep & FAST_PTE_SH_MASK) == FAST_PTE_SH_IS) ||
+		  (*ptep & FAST_PTE_SH_MASK) == FAST_PTE_SH_OS))
+		return true;
+
+	return false;
+}
+
+static bool is_dma_coherent(struct device *dev, struct dma_attrs *attrs)
+{
+	bool is_coherent;
+
+	if (dma_get_attr(DMA_ATTR_FORCE_COHERENT, attrs))
+		is_coherent = true;
+	else if (dma_get_attr(DMA_ATTR_FORCE_NON_COHERENT, attrs))
+		is_coherent = false;
+	else if (is_device_dma_coherent(dev))
+		is_coherent = true;
+	else
+		is_coherent = false;
+
+	return is_coherent;
+}
+
+/*
+ * Checks if the allocated range (ending at @end) covered the upcoming
+ * stale bit.  We don't need to know exactly where the range starts since
+ * we already know where the candidate search range started.  If, starting
+ * from the beginning of the candidate search range, we had to step over
+ * (or landed directly on top of) the upcoming stale bit, then we return
+ * true.
+ *
+ * Due to wrapping, there are two scenarios we'll need to check: (1) if the
+ * range [search_start, upcoming_stale] spans 0 (i.e. search_start >
+ * upcoming_stale), and, (2) if the range: [search_start, upcoming_stale]
+ * does *not* span 0 (i.e. search_start <= upcoming_stale).  And for each
+ * of those two scenarios we need to handle three cases: (1) the bit was
+ * found before wrapping or
+ */
+static bool __bit_covered_stale(unsigned long upcoming_stale,
+				unsigned long search_start,
+				unsigned long end)
+{
+	if (search_start > upcoming_stale) {
+		if (end >= search_start) {
+			/*
+			 * We started searching above upcoming_stale and we
+			 * didn't wrap, so we couldn't have crossed
+			 * upcoming_stale.
+			 */
+			return false;
+		}
+		/*
+		 * We wrapped. Did we cross (or land on top of)
+		 * upcoming_stale?
+		 */
+		return end >= upcoming_stale;
+	}
+
+	if (search_start <= upcoming_stale) {
+		if (end >= search_start) {
+			/*
+			 * We didn't wrap.  Did we cross (or land on top
+			 * of) upcoming_stale?
+			 */
+			return end >= upcoming_stale;
+		}
+		/*
+		 * We wrapped. So we must have crossed upcoming_stale
+		 * (since we started searching below it).
+		 */
+		return true;
+	}
+
+	/* we should have covered all logical combinations... */
+	WARN_ON(1);
+	return true;
+}
+
+static dma_addr_t __fast_smmu_alloc_iova(struct dma_fast_smmu_mapping *mapping,
+					 struct dma_attrs *attrs,
+					 size_t size)
+{
+	unsigned long bit, prev_search_start, nbits = size >> FAST_PAGE_SHIFT;
+	unsigned long align = (1 << get_order(size)) - 1;
+
+	bit = bitmap_find_next_zero_area(
+		mapping->bitmap, mapping->num_4k_pages, mapping->next_start,
+		nbits, align);
+	if (unlikely(bit > mapping->num_4k_pages)) {
+		/* try wrapping */
+		mapping->next_start = 0; /* TODO: SHOULD I REALLY DO THIS?!? */
+		bit = bitmap_find_next_zero_area(
+			mapping->bitmap, mapping->num_4k_pages, 0, nbits,
+			align);
+		if (unlikely(bit > mapping->num_4k_pages))
+			return DMA_ERROR_CODE;
+	}
+
+	bitmap_set(mapping->bitmap, bit, nbits);
+	prev_search_start = mapping->next_start;
+	mapping->next_start = bit + nbits;
+	if (unlikely(mapping->next_start >= mapping->num_4k_pages))
+		mapping->next_start = 0;
+
+	/*
+	 * If we just re-allocated a VA whose TLB hasn't been invalidated
+	 * since it was last used and unmapped, we need to invalidate it
+	 * here.  We actually invalidate the entire TLB so that we don't
+	 * have to invalidate the TLB again until we wrap back around.
+	 */
+	if (mapping->have_stale_tlbs &&
+	    __bit_covered_stale(mapping->upcoming_stale_bit,
+				prev_search_start,
+				bit + nbits - 1)) {
+		bool skip_sync = dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs);
+
+		iommu_tlbiall(mapping->domain);
+		mapping->have_stale_tlbs = false;
+		av8l_fast_clear_stale_ptes(mapping->pgtbl_pmds,
+				mapping->domain->geometry.aperture_start,
+				mapping->base,
+				mapping->base + mapping->size - 1,
+				skip_sync);
+	}
+
+	return (bit << FAST_PAGE_SHIFT) + mapping->base;
+}
+
+/*
+ * Checks whether the candidate bit will be allocated sooner than the
+ * current upcoming stale bit.  We can say candidate will be upcoming
+ * sooner than the current upcoming stale bit if it lies between the
+ * starting bit of the next search range and the upcoming stale bit
+ * (allowing for wrap-around).
+ *
+ * Stated differently, we're checking the relative ordering of three
+ * unsigned numbers.  So we need to check all 6 (i.e. 3!) permutations,
+ * namely:
+ *
+ *     0 |---A---B---C---| TOP (Case 1)
+ *     0 |---A---C---B---| TOP (Case 2)
+ *     0 |---B---A---C---| TOP (Case 3)
+ *     0 |---B---C---A---| TOP (Case 4)
+ *     0 |---C---A---B---| TOP (Case 5)
+ *     0 |---C---B---A---| TOP (Case 6)
+ *
+ * Note that since we're allowing numbers to wrap, the following three
+ * scenarios are all equivalent for Case 1:
+ *
+ *     0 |---A---B---C---| TOP
+ *     0 |---C---A---B---| TOP (C has wrapped. This is Case 5.)
+ *     0 |---B---C---A---| TOP (C and B have wrapped. This is Case 4.)
+ *
+ * In any of these cases, if we start searching from A, we will find B
+ * before we find C.
+ *
+ * We can also find two equivalent cases for Case 2:
+ *
+ *     0 |---A---C---B---| TOP
+ *     0 |---B---A---C---| TOP (B has wrapped. This is Case 3.)
+ *     0 |---C---B---A---| TOP (B and C have wrapped. This is Case 6.)
+ *
+ * In any of these cases, if we start searching from A, we will find C
+ * before we find B.
+ */
+static bool __bit_is_sooner(unsigned long candidate,
+			    struct dma_fast_smmu_mapping *mapping)
+{
+	unsigned long A = mapping->next_start;
+	unsigned long B = candidate;
+	unsigned long C = mapping->upcoming_stale_bit;
+
+	if ((A < B && B < C) ||	/* Case 1 */
+	    (C < A && A < B) ||	/* Case 5 */
+	    (B < C && C < A))	/* Case 4 */
+		return true;
+
+	if ((A < C && C < B) ||	/* Case 2 */
+	    (B < A && A < C) ||	/* Case 3 */
+	    (C < B && B < A))	/* Case 6 */
+		return false;
+
+	/*
+	 * For simplicity, we've been ignoring the possibility of any of
+	 * our three numbers being equal.  Handle those cases here (they
+	 * shouldn't happen very often, (I think?)).
+	 */
+
+	/*
+	 * If candidate is the next bit to be searched then it's definitely
+	 * sooner.
+	 */
+	if (A == B)
+		return true;
+
+	/*
+	 * If candidate is the next upcoming stale bit we'll return false
+	 * to avoid doing `upcoming = candidate' in the caller (which would
+	 * be useless since they're already equal)
+	 */
+	if (B == C)
+		return false;
+
+	/*
+	 * If next start is the upcoming stale bit then candidate can't
+	 * possibly be sooner.  The "soonest" bit is already selected.
+	 */
+	if (A == C)
+		return false;
+
+	/* We should have covered all logical combinations. */
+	WARN(1, "Well, that's awkward. A=%ld, B=%ld, C=%ld\n", A, B, C);
+	return true;
+}
+
+static void __fast_smmu_free_iova(struct dma_fast_smmu_mapping *mapping,
+				  dma_addr_t iova, size_t size)
+{
+	unsigned long start_bit = (iova - mapping->base) >> FAST_PAGE_SHIFT;
+	unsigned long nbits = size >> FAST_PAGE_SHIFT;
+
+	/*
+	 * We don't invalidate TLBs on unmap.  We invalidate TLBs on map
+	 * when we're about to re-allocate a VA that was previously
+	 * unmapped but hasn't yet been invalidated.  So we need to keep
+	 * track of which bit is the closest to being re-allocated here.
+	 */
+	if (__bit_is_sooner(start_bit, mapping))
+		mapping->upcoming_stale_bit = start_bit;
+
+	bitmap_clear(mapping->bitmap, start_bit, nbits);
+	mapping->have_stale_tlbs = true;
+}
+
+
+static void __fast_dma_page_cpu_to_dev(struct page *page, unsigned long off,
+				       size_t size, enum dma_data_direction dir)
+{
+	__dma_map_area(page_address(page) + off, size, dir);
+}
+
+static void __fast_dma_page_dev_to_cpu(struct page *page, unsigned long off,
+				       size_t size, enum dma_data_direction dir)
+{
+	__dma_unmap_area(page_address(page) + off, size, dir);
+
+	/* TODO: WHAT IS THIS? */
+	/*
+	 * Mark the D-cache clean for this page to avoid extra flushing.
+	 */
+	if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
+		set_bit(PG_dcache_clean, &page->flags);
+}
+
+static int __fast_dma_direction_to_prot(enum dma_data_direction dir)
+{
+	switch (dir) {
+	case DMA_BIDIRECTIONAL:
+		return IOMMU_READ | IOMMU_WRITE;
+	case DMA_TO_DEVICE:
+		return IOMMU_READ;
+	case DMA_FROM_DEVICE:
+		return IOMMU_WRITE;
+	default:
+		return 0;
+	}
+}
+
+static dma_addr_t fast_smmu_map_page(struct device *dev, struct page *page,
+				   unsigned long offset, size_t size,
+				   enum dma_data_direction dir,
+				   struct dma_attrs *attrs)
+{
+	struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
+	dma_addr_t iova;
+	unsigned long flags;
+	av8l_fast_iopte *pmd;
+	phys_addr_t phys_plus_off = page_to_phys(page) + offset;
+	phys_addr_t phys_to_map = round_down(phys_plus_off, FAST_PAGE_SIZE);
+	unsigned long offset_from_phys_to_map = phys_plus_off & ~FAST_PAGE_MASK;
+	size_t len = ALIGN(size + offset_from_phys_to_map, FAST_PAGE_SIZE);
+	int nptes = len >> FAST_PAGE_SHIFT;
+	bool skip_sync = dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs);
+	int prot = __fast_dma_direction_to_prot(dir);
+	bool is_coherent = is_dma_coherent(dev, attrs);
+
+	prot = __get_iommu_pgprot(attrs, prot, is_coherent);
+
+	if (!skip_sync && !is_coherent)
+		__fast_dma_page_cpu_to_dev(phys_to_page(phys_to_map),
+					   offset_from_phys_to_map, size, dir);
+
+	spin_lock_irqsave(&mapping->lock, flags);
+
+	iova = __fast_smmu_alloc_iova(mapping, attrs, len);
+
+	if (unlikely(iova == DMA_ERROR_CODE))
+		goto fail;
+
+	pmd = iopte_pmd_offset(mapping->pgtbl_pmds,
+		mapping->domain->geometry.aperture_start, iova);
+
+	if (unlikely(av8l_fast_map_public(pmd, phys_to_map, len, prot)))
+		goto fail_free_iova;
+
+	fast_dmac_clean_range(mapping, pmd, pmd + nptes);
+
+	spin_unlock_irqrestore(&mapping->lock, flags);
+	return iova + offset_from_phys_to_map;
+
+fail_free_iova:
+	__fast_smmu_free_iova(mapping, iova, size);
+fail:
+	spin_unlock_irqrestore(&mapping->lock, flags);
+	return DMA_ERROR_CODE;
+}
+
+static void fast_smmu_unmap_page(struct device *dev, dma_addr_t iova,
+			       size_t size, enum dma_data_direction dir,
+			       struct dma_attrs *attrs)
+{
+	struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
+	unsigned long flags;
+	av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds,
+				mapping->domain->geometry.aperture_start,
+				iova);
+	unsigned long offset = iova & ~FAST_PAGE_MASK;
+	size_t len = ALIGN(size + offset, FAST_PAGE_SIZE);
+	int nptes = len >> FAST_PAGE_SHIFT;
+	struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
+	bool skip_sync = dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs);
+	bool is_coherent = is_dma_coherent(dev, attrs);
+
+	if (!skip_sync && !is_coherent)
+		__fast_dma_page_dev_to_cpu(page, offset, size, dir);
+
+	spin_lock_irqsave(&mapping->lock, flags);
+	av8l_fast_unmap_public(pmd, len);
+	fast_dmac_clean_range(mapping, pmd, pmd + nptes);
+	__fast_smmu_free_iova(mapping, iova, len);
+	spin_unlock_irqrestore(&mapping->lock, flags);
+}
+
+static void fast_smmu_sync_single_for_cpu(struct device *dev,
+		dma_addr_t iova, size_t size, enum dma_data_direction dir)
+{
+	struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
+	av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds,
+				mapping->domain->geometry.aperture_start,
+				iova);
+	unsigned long offset = iova & ~FAST_PAGE_MASK;
+	struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
+
+	if (!__fast_is_pte_coherent(pmd))
+		__fast_dma_page_dev_to_cpu(page, offset, size, dir);
+}
+
+static void fast_smmu_sync_single_for_device(struct device *dev,
+		dma_addr_t iova, size_t size, enum dma_data_direction dir)
+{
+	struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
+	av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds,
+				mapping->domain->geometry.aperture_start,
+				iova);
+	unsigned long offset = iova & ~FAST_PAGE_MASK;
+	struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
+
+	if (!__fast_is_pte_coherent(pmd))
+		__fast_dma_page_cpu_to_dev(page, offset, size, dir);
+}
+
+static int fast_smmu_map_sg(struct device *dev, struct scatterlist *sg,
+			    int nents, enum dma_data_direction dir,
+			    struct dma_attrs *attrs)
+{
+	return -EINVAL;
+}
+
+static void fast_smmu_unmap_sg(struct device *dev,
+			       struct scatterlist *sg, int nents,
+			       enum dma_data_direction dir,
+			       struct dma_attrs *attrs)
+{
+	WARN_ON_ONCE(1);
+}
+
+static void fast_smmu_sync_sg_for_cpu(struct device *dev,
+		struct scatterlist *sg, int nents, enum dma_data_direction dir)
+{
+	WARN_ON_ONCE(1);
+}
+
+static void fast_smmu_sync_sg_for_device(struct device *dev,
+		struct scatterlist *sg, int nents, enum dma_data_direction dir)
+{
+	WARN_ON_ONCE(1);
+}
+
+static void __fast_smmu_free_pages(struct page **pages, int count)
+{
+	while (count--)
+		__free_page(pages[count]);
+	kvfree(pages);
+}
+
+static struct page **__fast_smmu_alloc_pages(unsigned int count, gfp_t gfp)
+{
+	struct page **pages;
+	unsigned int i = 0, array_size = count * sizeof(*pages);
+
+	if (array_size <= PAGE_SIZE)
+		pages = kzalloc(array_size, GFP_KERNEL);
+	else
+		pages = vzalloc(array_size);
+	if (!pages)
+		return NULL;
+
+	/* IOMMU can map any pages, so himem can also be used here */
+	gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
+
+	for (i = 0; i < count; ++i) {
+		struct page *page = alloc_page(gfp);
+
+		if (!page) {
+			__fast_smmu_free_pages(pages, i - 1);
+			return NULL;
+		}
+		pages[i] = page;
+	}
+	return pages;
+}
+
+static void *fast_smmu_alloc(struct device *dev, size_t size,
+			     dma_addr_t *handle, gfp_t gfp,
+			     struct dma_attrs *attrs)
+{
+	struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
+	struct sg_table sgt;
+	dma_addr_t dma_addr, iova_iter;
+	void *addr;
+	av8l_fast_iopte *ptep;
+	unsigned long flags;
+	struct sg_mapping_iter miter;
+	unsigned int count = ALIGN(size, SZ_4K) >> PAGE_SHIFT;
+	int prot = IOMMU_READ | IOMMU_WRITE; /* TODO: extract from attrs */
+	bool is_coherent = is_dma_coherent(dev, attrs);
+	pgprot_t remap_prot = __get_dma_pgprot(attrs, PAGE_KERNEL, is_coherent);
+	struct page **pages;
+
+	prot = __get_iommu_pgprot(attrs, prot, is_coherent);
+
+	*handle = DMA_ERROR_CODE;
+
+	pages = __fast_smmu_alloc_pages(count, gfp);
+	if (!pages) {
+		dev_err(dev, "no pages\n");
+		return NULL;
+	}
+
+	size = ALIGN(size, SZ_4K);
+	if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, gfp)) {
+		dev_err(dev, "no sg tablen\n");
+		goto out_free_pages;
+	}
+
+	if (!is_coherent) {
+		/*
+		 * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
+		 * sufficient here, so skip it by using the "wrong" direction.
+		 */
+		sg_miter_start(&miter, sgt.sgl, sgt.orig_nents,
+			       SG_MITER_FROM_SG);
+		while (sg_miter_next(&miter))
+			__dma_flush_range(miter.addr,
+					  miter.addr + miter.length);
+		sg_miter_stop(&miter);
+	}
+
+	spin_lock_irqsave(&mapping->lock, flags);
+	dma_addr = __fast_smmu_alloc_iova(mapping, attrs, size);
+	if (dma_addr == DMA_ERROR_CODE) {
+		dev_err(dev, "no iova\n");
+		spin_unlock_irqrestore(&mapping->lock, flags);
+		goto out_free_sg;
+	}
+	iova_iter = dma_addr;
+	sg_miter_start(&miter, sgt.sgl, sgt.orig_nents,
+		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+	while (sg_miter_next(&miter)) {
+		int nptes = miter.length >> FAST_PAGE_SHIFT;
+
+		ptep = iopte_pmd_offset(mapping->pgtbl_pmds,
+			mapping->domain->geometry.aperture_start,
+			iova_iter);
+		if (unlikely(av8l_fast_map_public(
+				     ptep, page_to_phys(miter.page),
+				     miter.length, prot))) {
+			dev_err(dev, "no map public\n");
+			/* TODO: unwind previously successful mappings */
+			goto out_free_iova;
+		}
+		fast_dmac_clean_range(mapping, ptep, ptep + nptes);
+		iova_iter += miter.length;
+	}
+	sg_miter_stop(&miter);
+	spin_unlock_irqrestore(&mapping->lock, flags);
+
+	addr = dma_common_pages_remap(pages, size, VM_USERMAP, remap_prot,
+				      __builtin_return_address(0));
+	if (!addr) {
+		dev_err(dev, "no common pages\n");
+		goto out_unmap;
+	}
+
+	*handle = dma_addr;
+	sg_free_table(&sgt);
+	return addr;
+
+out_unmap:
+	/* need to take the lock again for page tables and iova */
+	spin_lock_irqsave(&mapping->lock, flags);
+	ptep = iopte_pmd_offset(mapping->pgtbl_pmds,
+		mapping->domain->geometry.aperture_start,
+		dma_addr);
+	av8l_fast_unmap_public(ptep, size);
+	fast_dmac_clean_range(mapping, ptep, ptep + count);
+out_free_iova:
+	__fast_smmu_free_iova(mapping, dma_addr, size);
+	spin_unlock_irqrestore(&mapping->lock, flags);
+out_free_sg:
+	sg_free_table(&sgt);
+out_free_pages:
+	__fast_smmu_free_pages(pages, count);
+	return NULL;
+}
+
+static void fast_smmu_free(struct device *dev, size_t size,
+			   void *vaddr, dma_addr_t dma_handle,
+			   struct dma_attrs *attrs)
+{
+	struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
+	struct vm_struct *area;
+	struct page **pages;
+	size_t count = ALIGN(size, SZ_4K) >> FAST_PAGE_SHIFT;
+	av8l_fast_iopte *ptep;
+	unsigned long flags;
+
+	size = ALIGN(size, SZ_4K);
+
+	area = find_vm_area(vaddr);
+	if (WARN_ON_ONCE(!area))
+		return;
+
+	pages = area->pages;
+	dma_common_free_remap(vaddr, size, VM_USERMAP, false);
+	ptep = iopte_pmd_offset(mapping->pgtbl_pmds,
+		mapping->domain->geometry.aperture_start, dma_handle);
+	spin_lock_irqsave(&mapping->lock, flags);
+	av8l_fast_unmap_public(ptep, size);
+	fast_dmac_clean_range(mapping, ptep, ptep + count);
+	__fast_smmu_free_iova(mapping, dma_handle, size);
+	spin_unlock_irqrestore(&mapping->lock, flags);
+	__fast_smmu_free_pages(pages, count);
+}
+
+static int fast_smmu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+				void *cpu_addr, dma_addr_t dma_addr,
+				size_t size, struct dma_attrs *attrs)
+{
+	struct vm_struct *area;
+	unsigned long uaddr = vma->vm_start;
+	struct page **pages;
+	int i, nr_pages, ret = 0;
+	bool coherent = is_dma_coherent(dev, attrs);
+
+	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
+					     coherent);
+	area = find_vm_area(cpu_addr);
+	if (!area)
+		return -EINVAL;
+
+	pages = area->pages;
+	nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	for (i = vma->vm_pgoff; i < nr_pages && uaddr < vma->vm_end; i++) {
+		ret = vm_insert_page(vma, uaddr, pages[i]);
+		if (ret)
+			break;
+		uaddr += PAGE_SIZE;
+	}
+
+	return ret;
+}
+
+static int fast_smmu_dma_supported(struct device *dev, u64 mask)
+{
+	return mask <= 0xffffffff;
+}
+
+static int fast_smmu_mapping_error(struct device *dev,
+				   dma_addr_t dma_addr)
+{
+	return dma_addr == DMA_ERROR_CODE;
+}
+
+static void __fast_smmu_mapped_over_stale(struct dma_fast_smmu_mapping *fast,
+					  void *data)
+{
+	av8l_fast_iopte *ptep = data;
+	dma_addr_t iova;
+	unsigned long bitmap_idx;
+
+	bitmap_idx = (unsigned long)(ptep - fast->pgtbl_pmds);
+	iova = bitmap_idx << FAST_PAGE_SHIFT;
+	dev_err(fast->dev, "Mapped over stale tlb at %pa\n", &iova);
+	dev_err(fast->dev, "bitmap (failure at idx %lu):\n", bitmap_idx);
+	dev_err(fast->dev, "ptep: %p pmds: %p diff: %lu\n", ptep,
+		fast->pgtbl_pmds, bitmap_idx);
+	print_hex_dump(KERN_ERR, "bmap: ", DUMP_PREFIX_ADDRESS,
+		       32, 8, fast->bitmap, fast->bitmap_size, false);
+}
+
+static int fast_smmu_notify(struct notifier_block *self,
+			    unsigned long action, void *data)
+{
+	struct dma_fast_smmu_mapping *fast = container_of(
+		self, struct dma_fast_smmu_mapping, notifier);
+
+	switch (action) {
+	case MAPPED_OVER_STALE_TLB:
+		__fast_smmu_mapped_over_stale(fast, data);
+		return NOTIFY_OK;
+	default:
+		WARN(1, "Unhandled notifier action");
+		return NOTIFY_DONE;
+	}
+}
+
+static const struct dma_map_ops fast_smmu_dma_ops = {
+	.alloc = fast_smmu_alloc,
+	.free = fast_smmu_free,
+	.mmap = fast_smmu_mmap_attrs,
+	.map_page = fast_smmu_map_page,
+	.unmap_page = fast_smmu_unmap_page,
+	.sync_single_for_cpu = fast_smmu_sync_single_for_cpu,
+	.sync_single_for_device = fast_smmu_sync_single_for_device,
+	.map_sg = fast_smmu_map_sg,
+	.unmap_sg = fast_smmu_unmap_sg,
+	.sync_sg_for_cpu = fast_smmu_sync_sg_for_cpu,
+	.sync_sg_for_device = fast_smmu_sync_sg_for_device,
+	.dma_supported = fast_smmu_dma_supported,
+	.mapping_error = fast_smmu_mapping_error,
+};
+
+/**
+ * __fast_smmu_create_mapping_sized
+ * @base: bottom of the VA range
+ * @size: size of the VA range in bytes
+ *
+ * Creates a mapping structure which holds information about used/unused IO
+ * address ranges, which is required to perform mapping with IOMMU aware
+ * functions. The only VA range supported is [0, 4GB].
+ *
+ * The client device need to be attached to the mapping with
+ * fast_smmu_attach_device function.
+ */
+static struct dma_fast_smmu_mapping *__fast_smmu_create_mapping_sized(
+	dma_addr_t base, u64 size)
+{
+	struct dma_fast_smmu_mapping *fast;
+
+	fast = kzalloc(sizeof(struct dma_fast_smmu_mapping), GFP_KERNEL);
+	if (!fast)
+		goto err;
+
+	fast->base = base;
+	fast->size = size;
+	fast->num_4k_pages = size >> FAST_PAGE_SHIFT;
+	fast->bitmap_size = BITS_TO_LONGS(fast->num_4k_pages) * sizeof(long);
+
+	fast->bitmap = kzalloc(fast->bitmap_size, GFP_KERNEL | __GFP_NOWARN |
+								__GFP_NORETRY);
+	if (!fast->bitmap)
+		fast->bitmap = vzalloc(fast->bitmap_size);
+
+	if (!fast->bitmap)
+		goto err2;
+
+	spin_lock_init(&fast->lock);
+
+	return fast;
+err2:
+	kfree(fast);
+err:
+	return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * fast_smmu_attach_device
+ * @dev: valid struct device pointer
+ * @mapping: io address space mapping structure (returned from
+ *	fast_smmu_create_mapping)
+ *
+ * Attaches specified io address space mapping to the provided device,
+ * this replaces the dma operations (dma_map_ops pointer) with the
+ * IOMMU aware version. More than one client might be attached to
+ * the same io address space mapping.
+ */
+int fast_smmu_attach_device(struct device *dev,
+			    struct dma_iommu_mapping *mapping)
+{
+	int atomic_domain = 1;
+	struct iommu_domain *domain = mapping->domain;
+	struct iommu_pgtbl_info info;
+	u64 size = (u64)mapping->bits << PAGE_SHIFT;
+	struct iommu_domain_geometry geometry;
+
+	if (mapping->base + size > (SZ_1G * 4ULL))
+		return -EINVAL;
+
+	if (iommu_domain_set_attr(domain, DOMAIN_ATTR_ATOMIC,
+				  &atomic_domain))
+		return -EINVAL;
+
+	mapping->fast = __fast_smmu_create_mapping_sized(mapping->base, size);
+	if (IS_ERR(mapping->fast))
+		return -ENOMEM;
+	mapping->fast->domain = domain;
+	mapping->fast->dev = dev;
+
+	geometry.aperture_start = mapping->base;
+	geometry.aperture_end = mapping->base + size - 1;
+	if (iommu_domain_set_attr(domain, DOMAIN_ATTR_GEOMETRY,
+				  &geometry))
+		return -EINVAL;
+
+	if (iommu_attach_device(domain, dev))
+		return -EINVAL;
+
+	if (iommu_domain_get_attr(domain, DOMAIN_ATTR_PGTBL_INFO,
+				  &info)) {
+		dev_err(dev, "Couldn't get page table info\n");
+		fast_smmu_detach_device(dev, mapping);
+		return -EINVAL;
+	}
+	mapping->fast->pgtbl_pmds = info.pmds;
+
+	if (iommu_domain_get_attr(domain, DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT,
+				  &mapping->fast->is_smmu_pt_coherent))
+		return -EINVAL;
+
+	mapping->fast->notifier.notifier_call = fast_smmu_notify;
+	av8l_register_notify(&mapping->fast->notifier);
+
+	dev->archdata.mapping = mapping;
+	set_dma_ops(dev, &fast_smmu_dma_ops);
+
+	return 0;
+}
+EXPORT_SYMBOL(fast_smmu_attach_device);
+
+/**
+ * fast_smmu_detach_device
+ * @dev: valid struct device pointer
+ *
+ * Detaches the provided device from a previously attached map.
+ * This voids the dma operations (dma_map_ops pointer)
+ */
+void fast_smmu_detach_device(struct device *dev,
+			     struct dma_iommu_mapping *mapping)
+{
+	iommu_detach_device(mapping->domain, dev);
+	dev->archdata.mapping = NULL;
+	set_dma_ops(dev, NULL);
+
+	kvfree(mapping->fast->bitmap);
+	kfree(mapping->fast);
+}
+EXPORT_SYMBOL(fast_smmu_detach_device);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/iommu/iommu-debug.c	2019-10-29 09:26:23.817204881 +0100
@@ -0,0 +1,2365 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "iommu-debug: %s: " fmt, __func__
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/iommu.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/dma-contiguous.h>
+#include <soc/qcom/secure_buffer.h>
+#include <linux/qcom_iommu.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+#include <asm/dma-iommu.h>
+#include "iommu-debug.h"
+
+#if defined(CONFIG_IOMMU_DEBUG_TRACKING) || defined(CONFIG_IOMMU_TESTS)
+
+static const char *iommu_debug_attr_to_string(enum iommu_attr attr)
+{
+	switch (attr) {
+	case DOMAIN_ATTR_GEOMETRY:
+		return "DOMAIN_ATTR_GEOMETRY";
+	case DOMAIN_ATTR_PAGING:
+		return "DOMAIN_ATTR_PAGING";
+	case DOMAIN_ATTR_WINDOWS:
+		return "DOMAIN_ATTR_WINDOWS";
+	case DOMAIN_ATTR_FSL_PAMU_STASH:
+		return "DOMAIN_ATTR_FSL_PAMU_STASH";
+	case DOMAIN_ATTR_FSL_PAMU_ENABLE:
+		return "DOMAIN_ATTR_FSL_PAMU_ENABLE";
+	case DOMAIN_ATTR_FSL_PAMUV1:
+		return "DOMAIN_ATTR_FSL_PAMUV1";
+	case DOMAIN_ATTR_NESTING:
+		return "DOMAIN_ATTR_NESTING";
+	case DOMAIN_ATTR_PT_BASE_ADDR:
+		return "DOMAIN_ATTR_PT_BASE_ADDR";
+	case DOMAIN_ATTR_SECURE_VMID:
+		return "DOMAIN_ATTR_SECURE_VMID";
+	case DOMAIN_ATTR_ATOMIC:
+		return "DOMAIN_ATTR_ATOMIC";
+	case DOMAIN_ATTR_CONTEXT_BANK:
+		return "DOMAIN_ATTR_CONTEXT_BANK";
+	case DOMAIN_ATTR_TTBR0:
+		return "DOMAIN_ATTR_TTBR0";
+	case DOMAIN_ATTR_CONTEXTIDR:
+		return "DOMAIN_ATTR_CONTEXTIDR";
+	case DOMAIN_ATTR_PROCID:
+		return "DOMAIN_ATTR_PROCID";
+	case DOMAIN_ATTR_DYNAMIC:
+		return "DOMAIN_ATTR_DYNAMIC";
+	case DOMAIN_ATTR_NON_FATAL_FAULTS:
+		return "DOMAIN_ATTR_NON_FATAL_FAULTS";
+	case DOMAIN_ATTR_S1_BYPASS:
+		return "DOMAIN_ATTR_S1_BYPASS";
+	case DOMAIN_ATTR_FAST:
+		return "DOMAIN_ATTR_FAST";
+	case DOMAIN_ATTR_EARLY_MAP:
+		return "DOMAIN_ATTR_EARLY_MAP";
+	case DOMAIN_ATTR_CB_STALL_DISABLE:
+		return "DOMAIN_ATTR_CB_STALL_DISABLE";
+	default:
+		return "Unknown attr!";
+	}
+}
+#endif
+
+#ifdef CONFIG_IOMMU_DEBUG_TRACKING
+
+static DEFINE_MUTEX(iommu_debug_attachments_lock);
+static LIST_HEAD(iommu_debug_attachments);
+
+/*
+ * Each group may have more than one domain; but each domain may
+ * only have one group.
+ * Used by debug tools to display the name of the device(s) associated
+ * with a particular domain.
+ */
+struct iommu_debug_attachment {
+	struct iommu_domain *domain;
+	struct iommu_group *group;
+	struct list_head list;
+};
+
+void iommu_debug_attach_device(struct iommu_domain *domain,
+			       struct device *dev)
+{
+	struct iommu_debug_attachment *attach;
+	struct iommu_group *group;
+
+	group = iommu_group_get(dev);
+	if (!group)
+		return;
+
+	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
+	if (!attach)
+		return;
+
+	attach->domain = domain;
+	attach->group = group;
+	INIT_LIST_HEAD(&attach->list);
+
+	mutex_lock(&iommu_debug_attachments_lock);
+	list_add(&attach->list, &iommu_debug_attachments);
+	mutex_unlock(&iommu_debug_attachments_lock);
+}
+
+void iommu_debug_domain_remove(struct iommu_domain *domain)
+{
+	struct iommu_debug_attachment *it, *tmp;
+
+	mutex_lock(&iommu_debug_attachments_lock);
+	list_for_each_entry_safe(it, tmp, &iommu_debug_attachments, list) {
+		if (it->domain != domain)
+			continue;
+		list_del(&it->list);
+		iommu_group_put(it->group);
+		kfree(it);
+	}
+
+	mutex_unlock(&iommu_debug_attachments_lock);
+}
+
+#endif
+
+#ifdef CONFIG_IOMMU_TESTS
+
+#ifdef CONFIG_64BIT
+
+#define kstrtoux kstrtou64
+#define kstrtox_from_user kstrtoull_from_user
+#define kstrtosize_t kstrtoul
+
+#else
+
+#define kstrtoux kstrtou32
+#define kstrtox_from_user kstrtouint_from_user
+#define kstrtosize_t kstrtouint
+
+#endif
+
+static LIST_HEAD(iommu_debug_devices);
+static struct dentry *debugfs_tests_dir;
+static u32 iters_per_op = 1;
+static void *virt_addr;
+
+struct iommu_debug_device {
+	struct device *dev;
+	struct iommu_domain *domain;
+	u64 iova;
+	u64 phys;
+	size_t len;
+	struct list_head list;
+	struct mutex clk_lock;
+	unsigned int clk_count;
+};
+
+static int iommu_debug_build_phoney_sg_table(struct device *dev,
+					     struct sg_table *table,
+					     unsigned long total_size,
+					     unsigned long chunk_size)
+{
+	unsigned long nents = total_size / chunk_size;
+	struct scatterlist *sg;
+	int i;
+	struct page *page;
+
+	BUG_ON(!IS_ALIGNED(total_size, PAGE_SIZE));
+	BUG_ON(!IS_ALIGNED(total_size, chunk_size));
+	BUG_ON(sg_alloc_table(table, nents, GFP_KERNEL));
+	page = alloc_pages(GFP_KERNEL, get_order(chunk_size));
+	if (!page)
+		goto free_table;
+
+	/* all the same page... why not. */
+	for_each_sg(table->sgl, sg, table->nents, i)
+		sg_set_page(sg, page, chunk_size, 0);
+
+	return 0;
+
+free_table:
+	sg_free_table(table);
+	return -ENOMEM;
+}
+
+static void iommu_debug_destroy_phoney_sg_table(struct device *dev,
+						struct sg_table *table,
+						unsigned long chunk_size)
+{
+	__free_pages(sg_page(table->sgl), get_order(chunk_size));
+	sg_free_table(table);
+}
+
+static const char * const _size_to_string(unsigned long size)
+{
+	switch (size) {
+	case SZ_4K:
+		return "4K";
+	case SZ_8K:
+		return "8K";
+	case SZ_16K:
+		return "16K";
+	case SZ_64K:
+		return "64K";
+	case SZ_2M:
+		return "2M";
+	case SZ_1M * 12:
+		return "12M";
+	case SZ_1M * 20:
+		return "20M";
+	}
+	return "unknown size, please add to _size_to_string";
+}
+
+static int nr_iters_set(void *data, u64 val)
+{
+	if (!val)
+		val = 1;
+	if (val > 10000)
+		val = 10000;
+	*(u32 *)data = val;
+	return 0;
+}
+
+static int nr_iters_get(void *data, u64 *val)
+{
+	*val = *(u32 *)data;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(iommu_debug_nr_iters_ops,
+			nr_iters_get, nr_iters_set, "%llu\n");
+
+static void iommu_debug_device_profiling(struct seq_file *s, struct device *dev,
+					 enum iommu_attr attrs[],
+					 void *attr_values[], int nattrs,
+					 const size_t sizes[])
+{
+	int i;
+	const size_t *sz;
+	struct iommu_domain *domain;
+	struct bus_type *bus;
+	unsigned long iova = 0x10000;
+	phys_addr_t paddr = 0xa000;
+
+	bus = msm_iommu_get_bus(dev);
+	if (!bus)
+		return;
+
+	domain = iommu_domain_alloc(bus);
+	if (!domain) {
+		seq_puts(s, "Couldn't allocate domain\n");
+		return;
+	}
+
+	seq_puts(s, "Domain attributes: [ ");
+	for (i = 0; i < nattrs; ++i) {
+		/* not all attrs are ints, but this will get us by for now */
+		seq_printf(s, "%s=%d%s", iommu_debug_attr_to_string(attrs[i]),
+			   *((int *)attr_values[i]),
+			   i < nattrs ? " " : "");
+	}
+	seq_puts(s, "]\n");
+	for (i = 0; i < nattrs; ++i) {
+		if (iommu_domain_set_attr(domain, attrs[i], attr_values[i])) {
+			seq_printf(s, "Couldn't set %d to the value at %p\n",
+				 attrs[i], attr_values[i]);
+			goto out_domain_free;
+		}
+	}
+
+	if (iommu_attach_device(domain, dev)) {
+		seq_puts(s,
+			 "Couldn't attach new domain to device. Is it already attached?\n");
+		goto out_domain_free;
+	}
+
+	seq_printf(s, "(average over %d iterations)\n", iters_per_op);
+	seq_printf(s, "%8s %19s %16s\n", "size", "iommu_map", "iommu_unmap");
+	for (sz = sizes; *sz; ++sz) {
+		size_t size = *sz;
+		size_t unmapped;
+		u64 map_elapsed_ns = 0, unmap_elapsed_ns = 0;
+		u64 map_elapsed_us = 0, unmap_elapsed_us = 0;
+		u32 map_elapsed_rem = 0, unmap_elapsed_rem = 0;
+		struct timespec tbefore, tafter, diff;
+		int i;
+
+		for (i = 0; i < iters_per_op; ++i) {
+			getnstimeofday(&tbefore);
+			if (iommu_map(domain, iova, paddr, size,
+				      IOMMU_READ | IOMMU_WRITE)) {
+				seq_puts(s, "Failed to map\n");
+				continue;
+			}
+			getnstimeofday(&tafter);
+			diff = timespec_sub(tafter, tbefore);
+			map_elapsed_ns += timespec_to_ns(&diff);
+
+			getnstimeofday(&tbefore);
+			unmapped = iommu_unmap(domain, iova, size);
+			if (unmapped != size) {
+				seq_printf(s,
+					   "Only unmapped %zx instead of %zx\n",
+					   unmapped, size);
+				continue;
+			}
+			getnstimeofday(&tafter);
+			diff = timespec_sub(tafter, tbefore);
+			unmap_elapsed_ns += timespec_to_ns(&diff);
+		}
+
+		map_elapsed_ns = div_u64_rem(map_elapsed_ns, iters_per_op,
+				&map_elapsed_rem);
+		unmap_elapsed_ns = div_u64_rem(unmap_elapsed_ns, iters_per_op,
+				&unmap_elapsed_rem);
+
+		map_elapsed_us = div_u64_rem(map_elapsed_ns, 1000,
+						&map_elapsed_rem);
+		unmap_elapsed_us = div_u64_rem(unmap_elapsed_ns, 1000,
+						&unmap_elapsed_rem);
+
+		seq_printf(s, "%8s %12lld.%03d us %9lld.%03d us\n",
+			_size_to_string(size),
+			map_elapsed_us, map_elapsed_rem,
+			unmap_elapsed_us, unmap_elapsed_rem);
+	}
+
+	seq_putc(s, '\n');
+	seq_printf(s, "%8s %19s %16s\n", "size", "iommu_map_sg", "iommu_unmap");
+	for (sz = sizes; *sz; ++sz) {
+		size_t size = *sz;
+		size_t unmapped;
+		u64 map_elapsed_ns = 0, unmap_elapsed_ns = 0;
+		u64 map_elapsed_us = 0, unmap_elapsed_us = 0;
+		u32 map_elapsed_rem = 0, unmap_elapsed_rem = 0;
+		struct timespec tbefore, tafter, diff;
+		struct sg_table table;
+		unsigned long chunk_size = SZ_4K;
+		int i;
+
+		if (iommu_debug_build_phoney_sg_table(dev, &table, size,
+						      chunk_size)) {
+			seq_puts(s,
+				"couldn't build phoney sg table! bailing...\n");
+			goto out_detach;
+		}
+
+		for (i = 0; i < iters_per_op; ++i) {
+			getnstimeofday(&tbefore);
+			if (iommu_map_sg(domain, iova, table.sgl, table.nents,
+					 IOMMU_READ | IOMMU_WRITE) != size) {
+				seq_puts(s, "Failed to map_sg\n");
+				goto next;
+			}
+			getnstimeofday(&tafter);
+			diff = timespec_sub(tafter, tbefore);
+			map_elapsed_ns += timespec_to_ns(&diff);
+
+			getnstimeofday(&tbefore);
+			unmapped = iommu_unmap(domain, iova, size);
+			if (unmapped != size) {
+				seq_printf(s,
+					   "Only unmapped %zx instead of %zx\n",
+					   unmapped, size);
+				goto next;
+			}
+			getnstimeofday(&tafter);
+			diff = timespec_sub(tafter, tbefore);
+			unmap_elapsed_ns += timespec_to_ns(&diff);
+		}
+
+		map_elapsed_ns = div_u64_rem(map_elapsed_ns, iters_per_op,
+				&map_elapsed_rem);
+		unmap_elapsed_ns = div_u64_rem(unmap_elapsed_ns, iters_per_op,
+				&unmap_elapsed_rem);
+
+		map_elapsed_us = div_u64_rem(map_elapsed_ns, 1000,
+						&map_elapsed_rem);
+		unmap_elapsed_us = div_u64_rem(unmap_elapsed_ns, 1000,
+						&unmap_elapsed_rem);
+
+		seq_printf(s, "%8s %12lld.%03d us %9lld.%03d us\n",
+			_size_to_string(size),
+			map_elapsed_us, map_elapsed_rem,
+			unmap_elapsed_us, unmap_elapsed_rem);
+
+next:
+		iommu_debug_destroy_phoney_sg_table(dev, &table, chunk_size);
+	}
+
+out_detach:
+	iommu_detach_device(domain, dev);
+out_domain_free:
+	iommu_domain_free(domain);
+}
+
+static int iommu_debug_profiling_show(struct seq_file *s, void *ignored)
+{
+	struct iommu_debug_device *ddev = s->private;
+	const size_t sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12,
+					SZ_1M * 20, 0 };
+	enum iommu_attr attrs[] = {
+		DOMAIN_ATTR_ATOMIC,
+	};
+	int htw_disable = 1, atomic = 1;
+	void *attr_values[] = { &htw_disable, &atomic };
+
+	iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
+				     ARRAY_SIZE(attrs), sizes);
+
+	return 0;
+}
+
+static int iommu_debug_profiling_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, iommu_debug_profiling_show, inode->i_private);
+}
+
+static const struct file_operations iommu_debug_profiling_fops = {
+	.open	 = iommu_debug_profiling_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = single_release,
+};
+
+static int iommu_debug_secure_profiling_show(struct seq_file *s, void *ignored)
+{
+	struct iommu_debug_device *ddev = s->private;
+	const size_t sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12,
+					SZ_1M * 20, 0 };
+
+	enum iommu_attr attrs[] = {
+		DOMAIN_ATTR_ATOMIC,
+		DOMAIN_ATTR_SECURE_VMID,
+	};
+	int one = 1, secure_vmid = VMID_CP_PIXEL;
+	void *attr_values[] = { &one, &secure_vmid };
+
+	iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
+				     ARRAY_SIZE(attrs), sizes);
+
+	return 0;
+}
+
+static int iommu_debug_secure_profiling_open(struct inode *inode,
+					     struct file *file)
+{
+	return single_open(file, iommu_debug_secure_profiling_show,
+			   inode->i_private);
+}
+
+static const struct file_operations iommu_debug_secure_profiling_fops = {
+	.open	 = iommu_debug_secure_profiling_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = single_release,
+};
+
+static int iommu_debug_profiling_fast_show(struct seq_file *s, void *ignored)
+{
+	struct iommu_debug_device *ddev = s->private;
+	size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
+	enum iommu_attr attrs[] = {
+		DOMAIN_ATTR_FAST,
+		DOMAIN_ATTR_ATOMIC,
+		DOMAIN_ATTR_GEOMETRY,
+	};
+	int one = 1;
+	struct iommu_domain_geometry geometry = {0, 0, 0};
+	void *attr_values[] = { &one, &one, &geometry};
+
+	geometry.aperture_end = (dma_addr_t)(SZ_1G * 4ULL - 1);
+
+	iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
+				     ARRAY_SIZE(attrs), sizes);
+
+	return 0;
+}
+
+static int iommu_debug_profiling_fast_open(struct inode *inode,
+					   struct file *file)
+{
+	return single_open(file, iommu_debug_profiling_fast_show,
+			   inode->i_private);
+}
+
+static const struct file_operations iommu_debug_profiling_fast_fops = {
+	.open	 = iommu_debug_profiling_fast_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = single_release,
+};
+
+static int iommu_debug_profiling_fast_dma_api_show(struct seq_file *s,
+						 void *ignored)
+{
+	int i, experiment;
+	struct iommu_debug_device *ddev = s->private;
+	struct device *dev = ddev->dev;
+	u64 map_elapsed_ns[10], unmap_elapsed_ns[10];
+	struct dma_iommu_mapping *mapping;
+	dma_addr_t dma_addr;
+	void *virt;
+	int fast = 1;
+	const char * const extra_labels[] = {
+		"not coherent",
+		"coherent",
+	};
+	struct dma_attrs coherent_attrs;
+	struct dma_attrs *extra_attrs[] = {
+		NULL,
+		&coherent_attrs,
+	};
+
+	init_dma_attrs(&coherent_attrs);
+	dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &coherent_attrs);
+
+	virt = kmalloc(1518, GFP_KERNEL);
+	if (!virt)
+		goto out;
+
+	mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4ULL);
+	if (!mapping) {
+		seq_puts(s, "fast_smmu_create_mapping failed\n");
+		goto out_kfree;
+	}
+
+	if (iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_FAST, &fast)) {
+		seq_puts(s, "iommu_domain_set_attr failed\n");
+		goto out_release_mapping;
+	}
+
+	if (arm_iommu_attach_device(dev, mapping)) {
+		seq_puts(s, "fast_smmu_attach_device failed\n");
+		goto out_release_mapping;
+	}
+
+	if (iommu_enable_config_clocks(mapping->domain)) {
+		seq_puts(s, "Couldn't enable clocks\n");
+		goto out_detach;
+	}
+	for (experiment = 0; experiment < 2; ++experiment) {
+		size_t map_avg = 0, unmap_avg = 0;
+
+		for (i = 0; i < 10; ++i) {
+			struct timespec tbefore, tafter, diff;
+			u64 ns;
+
+			getnstimeofday(&tbefore);
+			dma_addr = dma_map_single_attrs(
+				dev, virt, SZ_4K, DMA_TO_DEVICE,
+				extra_attrs[experiment]);
+			getnstimeofday(&tafter);
+			diff = timespec_sub(tafter, tbefore);
+			ns = timespec_to_ns(&diff);
+			if (dma_mapping_error(dev, dma_addr)) {
+				seq_puts(s, "dma_map_single failed\n");
+				goto out_disable_config_clocks;
+			}
+			map_elapsed_ns[i] = ns;
+
+			getnstimeofday(&tbefore);
+			dma_unmap_single_attrs(
+				dev, dma_addr, SZ_4K, DMA_TO_DEVICE,
+				extra_attrs[experiment]);
+			getnstimeofday(&tafter);
+			diff = timespec_sub(tafter, tbefore);
+			ns = timespec_to_ns(&diff);
+			unmap_elapsed_ns[i] = ns;
+		}
+
+		seq_printf(s, "%13s %24s (ns): [", extra_labels[experiment],
+			   "dma_map_single_attrs");
+		for (i = 0; i < 10; ++i) {
+			map_avg += map_elapsed_ns[i];
+			seq_printf(s, "%5llu%s", map_elapsed_ns[i],
+				   i < 9 ? ", " : "");
+		}
+		map_avg /= 10;
+		seq_printf(s, "] (avg: %zu)\n", map_avg);
+
+		seq_printf(s, "%13s %24s (ns): [", extra_labels[experiment],
+			   "dma_unmap_single_attrs");
+		for (i = 0; i < 10; ++i) {
+			unmap_avg += unmap_elapsed_ns[i];
+			seq_printf(s, "%5llu%s", unmap_elapsed_ns[i],
+				   i < 9 ? ", " : "");
+		}
+		unmap_avg /= 10;
+		seq_printf(s, "] (avg: %zu)\n", unmap_avg);
+	}
+
+out_disable_config_clocks:
+	iommu_disable_config_clocks(mapping->domain);
+out_detach:
+	arm_iommu_detach_device(dev);
+out_release_mapping:
+	arm_iommu_release_mapping(mapping);
+out_kfree:
+	kfree(virt);
+out:
+	return 0;
+}
+
+static int iommu_debug_profiling_fast_dma_api_open(struct inode *inode,
+						 struct file *file)
+{
+	return single_open(file, iommu_debug_profiling_fast_dma_api_show,
+			   inode->i_private);
+}
+
+static const struct file_operations iommu_debug_profiling_fast_dma_api_fops = {
+	.open	 = iommu_debug_profiling_fast_dma_api_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = single_release,
+};
+
+static int __tlb_stress_sweep(struct device *dev, struct seq_file *s)
+{
+	int i, ret = 0;
+	u64 iova;
+	const u64  max = SZ_1G * 4ULL - 1;
+	void *virt;
+	phys_addr_t phys;
+	dma_addr_t dma_addr;
+
+	/*
+	 * we'll be doing 4K and 8K mappings.  Need to own an entire 8K
+	 * chunk that we can work with.
+	 */
+	virt = (void *)__get_free_pages(GFP_KERNEL, get_order(SZ_8K));
+	phys = virt_to_phys(virt);
+
+	/* fill the whole 4GB space */
+	for (iova = 0, i = 0; iova < max; iova += SZ_8K, ++i) {
+		dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
+		if (dma_addr == DMA_ERROR_CODE) {
+			dev_err(dev, "Failed map on iter %d\n", i);
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+	if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
+		dev_err(dev,
+			"dma_map_single unexpectedly (VA should have been exhausted)\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * free up 4K at the very beginning, then leave one 4K mapping,
+	 * then free up 8K.  This will result in the next 8K map to skip
+	 * over the 4K hole and take the 8K one.
+	 */
+	dma_unmap_single(dev, 0, SZ_4K, DMA_TO_DEVICE);
+	dma_unmap_single(dev, SZ_8K, SZ_4K, DMA_TO_DEVICE);
+	dma_unmap_single(dev, SZ_8K + SZ_4K, SZ_4K, DMA_TO_DEVICE);
+
+	/* remap 8K */
+	dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
+	if (dma_addr != SZ_8K) {
+		dma_addr_t expected = SZ_8K;
+
+		dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
+			&dma_addr, &expected);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * now remap 4K.  We should get the first 4K chunk that was skipped
+	 * over during the previous 8K map.  If we missed a TLB invalidate
+	 * at that point this should explode.
+	 */
+	dma_addr = dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE);
+	if (dma_addr != 0) {
+		dma_addr_t expected = 0;
+
+		dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
+			&dma_addr, &expected);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
+		dev_err(dev,
+			"dma_map_single unexpectedly after remaps (VA should have been exhausted)\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* we're all full again. unmap everything. */
+	for (iova = 0; iova < max; iova += SZ_8K)
+		dma_unmap_single(dev, (dma_addr_t)iova, SZ_8K, DMA_TO_DEVICE);
+
+out:
+	free_pages((unsigned long)virt, get_order(SZ_8K));
+	return ret;
+}
+
+struct fib_state {
+	unsigned long cur;
+	unsigned long prev;
+};
+
+static void fib_init(struct fib_state *f)
+{
+	f->cur = f->prev = 1;
+}
+
+static unsigned long get_next_fib(struct fib_state *f)
+{
+	int next = f->cur + f->prev;
+
+	f->prev = f->cur;
+	f->cur = next;
+	return next;
+}
+
+/*
+ * Not actually random.  Just testing the fibs (and max - the fibs).
+ */
+static int __rand_va_sweep(struct device *dev, struct seq_file *s,
+			   const size_t size)
+{
+	u64 iova;
+	const u64 max = SZ_1G * 4ULL - 1;
+	int i, remapped, unmapped, ret = 0;
+	void *virt;
+	dma_addr_t dma_addr, dma_addr2;
+	struct fib_state fib;
+
+	virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
+	if (!virt) {
+		if (size > SZ_8K) {
+			dev_err(dev,
+				"Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
+				_size_to_string(size));
+			return 0;
+		}
+		return -ENOMEM;
+	}
+
+	/* fill the whole 4GB space */
+	for (iova = 0, i = 0; iova < max; iova += size, ++i) {
+		dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
+		if (dma_addr == DMA_ERROR_CODE) {
+			dev_err(dev, "Failed map on iter %d\n", i);
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+	/* now unmap "random" iovas */
+	unmapped = 0;
+	fib_init(&fib);
+	for (iova = get_next_fib(&fib) * size;
+	     iova < max - size;
+	     iova = (u64)get_next_fib(&fib) * size) {
+		dma_addr = (dma_addr_t)(iova);
+		dma_addr2 = (dma_addr_t)((max + 1) - size - iova);
+		if (dma_addr == dma_addr2) {
+			WARN(1,
+			"%s test needs update! The random number sequence is folding in on itself and should be changed.\n",
+			__func__);
+			return -EINVAL;
+		}
+		dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
+		dma_unmap_single(dev, dma_addr2, size, DMA_TO_DEVICE);
+		unmapped += 2;
+	}
+
+	/* and map until everything fills back up */
+	for (remapped = 0; ; ++remapped) {
+		dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
+		if (dma_addr == DMA_ERROR_CODE)
+			break;
+	}
+
+	if (unmapped != remapped) {
+		dev_err(dev,
+			"Unexpected random remap count! Unmapped %d but remapped %d\n",
+			unmapped, remapped);
+		ret = -EINVAL;
+	}
+
+	for (iova = 0; iova < max; iova += size)
+		dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
+
+out:
+	free_pages((unsigned long)virt, get_order(size));
+	return ret;
+}
+
+static int __check_mapping(struct device *dev, struct iommu_domain *domain,
+			   dma_addr_t iova, phys_addr_t expected)
+{
+	phys_addr_t res = iommu_iova_to_phys_hard(domain, iova);
+	phys_addr_t res2 = iommu_iova_to_phys(domain, iova);
+
+	WARN(res != res2, "hard/soft iova_to_phys fns don't agree...");
+
+	if (res != expected) {
+		dev_err_ratelimited(dev,
+				    "Bad translation for %pa! Expected: %pa Got: %pa\n",
+				    &iova, &expected, &res);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int __full_va_sweep(struct device *dev, struct seq_file *s,
+			   const size_t size, struct iommu_domain *domain)
+{
+	u64 iova;
+	dma_addr_t dma_addr;
+	void *virt;
+	phys_addr_t phys;
+	const u64 max = SZ_1G * 4ULL - 1;
+	int ret = 0, i;
+
+	virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
+	if (!virt) {
+		if (size > SZ_8K) {
+			dev_err(dev,
+				"Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
+				_size_to_string(size));
+			return 0;
+		}
+		return -ENOMEM;
+	}
+	phys = virt_to_phys(virt);
+
+	for (iova = 0, i = 0; iova < max; iova += size, ++i) {
+		unsigned long expected = iova;
+
+		dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
+		if (dma_addr != expected) {
+			dev_err_ratelimited(dev,
+					    "Unexpected iova on iter %d (expected: 0x%lx got: 0x%lx)\n",
+					    i, expected,
+					    (unsigned long)dma_addr);
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+	if (domain) {
+		/* check every mapping from 0..6M */
+		for (iova = 0, i = 0; iova < SZ_2M * 3; iova += size, ++i) {
+			phys_addr_t expected = phys;
+
+			if (__check_mapping(dev, domain, iova, expected)) {
+				dev_err(dev, "iter: %d\n", i);
+				ret = -EINVAL;
+				goto out;
+			}
+		}
+		/* and from 4G..4G-6M */
+		for (iova = 0, i = 0; iova < SZ_2M * 3; iova += size, ++i) {
+			phys_addr_t expected = phys;
+			unsigned long theiova = ((SZ_1G * 4ULL) - size) - iova;
+
+			if (__check_mapping(dev, domain, theiova, expected)) {
+				dev_err(dev, "iter: %d\n", i);
+				ret = -EINVAL;
+				goto out;
+			}
+		}
+	}
+
+	/* at this point, our VA space should be full */
+	dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
+	if (dma_addr != DMA_ERROR_CODE) {
+		dev_err_ratelimited(dev,
+				    "dma_map_single succeeded when it should have failed. Got iova: 0x%lx\n",
+				    (unsigned long)dma_addr);
+		ret = -EINVAL;
+	}
+
+out:
+	for (iova = 0; iova < max; iova += size)
+		dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
+
+	free_pages((unsigned long)virt, get_order(size));
+	return ret;
+}
+
+#define ds_printf(d, s, fmt, ...) ({				\
+			dev_err(d, fmt, ##__VA_ARGS__);		\
+			seq_printf(s, fmt, ##__VA_ARGS__);	\
+		})
+
+static int __functional_dma_api_va_test(struct device *dev, struct seq_file *s,
+				     struct iommu_domain *domain, void *priv)
+{
+	int i, j, ret = 0;
+	size_t *sz, *sizes = priv;
+
+	for (j = 0; j < 1; ++j) {
+		for (sz = sizes; *sz; ++sz) {
+			for (i = 0; i < 2; ++i) {
+				ds_printf(dev, s, "Full VA sweep @%s %d",
+					       _size_to_string(*sz), i);
+				if (__full_va_sweep(dev, s, *sz, domain)) {
+					ds_printf(dev, s, "  -> FAILED\n");
+					ret = -EINVAL;
+				} else {
+					ds_printf(dev, s, "  -> SUCCEEDED\n");
+				}
+			}
+		}
+	}
+
+	ds_printf(dev, s, "bonus map:");
+	if (__full_va_sweep(dev, s, SZ_4K, domain)) {
+		ds_printf(dev, s, "  -> FAILED\n");
+		ret = -EINVAL;
+	} else {
+		ds_printf(dev, s, "  -> SUCCEEDED\n");
+	}
+
+	for (sz = sizes; *sz; ++sz) {
+		for (i = 0; i < 2; ++i) {
+			ds_printf(dev, s, "Rand VA sweep @%s %d",
+				   _size_to_string(*sz), i);
+			if (__rand_va_sweep(dev, s, *sz)) {
+				ds_printf(dev, s, "  -> FAILED\n");
+				ret = -EINVAL;
+			} else {
+				ds_printf(dev, s, "  -> SUCCEEDED\n");
+			}
+		}
+	}
+
+	ds_printf(dev, s, "TLB stress sweep");
+	if (__tlb_stress_sweep(dev, s)) {
+		ds_printf(dev, s, "  -> FAILED\n");
+		ret = -EINVAL;
+	} else {
+		ds_printf(dev, s, "  -> SUCCEEDED\n");
+	}
+
+	ds_printf(dev, s, "second bonus map:");
+	if (__full_va_sweep(dev, s, SZ_4K, domain)) {
+		ds_printf(dev, s, "  -> FAILED\n");
+		ret = -EINVAL;
+	} else {
+		ds_printf(dev, s, "  -> SUCCEEDED\n");
+	}
+
+	return ret;
+}
+
+static int __functional_dma_api_alloc_test(struct device *dev,
+					   struct seq_file *s,
+					   struct iommu_domain *domain,
+					   void *ignored)
+{
+	size_t size = SZ_1K * 742;
+	int ret = 0;
+	u8 *data;
+	dma_addr_t iova;
+
+	/* Make sure we can allocate and use a buffer */
+	ds_printf(dev, s, "Allocating coherent buffer");
+	data = dma_alloc_coherent(dev, size, &iova, GFP_KERNEL);
+	if (!data) {
+		ds_printf(dev, s, "  -> FAILED\n");
+		ret = -EINVAL;
+	} else {
+		int i;
+
+		ds_printf(dev, s, "  -> SUCCEEDED\n");
+		ds_printf(dev, s, "Using coherent buffer");
+		for (i = 0; i < 742; ++i) {
+			int ind = SZ_1K * i;
+			u8 *p = data + ind;
+			u8 val = i % 255;
+
+			memset(data, 0xa5, size);
+			*p = val;
+			(*p)++;
+			if ((*p) != val + 1) {
+				ds_printf(dev, s,
+					  "  -> FAILED on iter %d since %d != %d\n",
+					  i, *p, val + 1);
+				ret = -EINVAL;
+			}
+		}
+		if (!ret)
+			ds_printf(dev, s, "  -> SUCCEEDED\n");
+		dma_free_coherent(dev, size, data, iova);
+	}
+
+	return ret;
+}
+
+static int __functional_dma_api_basic_test(struct device *dev,
+					   struct seq_file *s,
+					   struct iommu_domain *domain,
+					   void *ignored)
+{
+	size_t size = 1518;
+	int i, j, ret = 0;
+	u8 *data;
+	dma_addr_t iova;
+	phys_addr_t pa, pa2;
+
+	ds_printf(dev, s, "Basic DMA API test");
+	/* Make sure we can allocate and use a buffer */
+	for (i = 0; i < 1000; ++i) {
+		data = kmalloc(size, GFP_KERNEL);
+		if (!data) {
+			ds_printf(dev, s, "  -> FAILED\n");
+			ret = -EINVAL;
+			goto out;
+		}
+		memset(data, 0xa5, size);
+		iova = dma_map_single(dev, data, size, DMA_TO_DEVICE);
+		pa = iommu_iova_to_phys(domain, iova);
+		pa2 = iommu_iova_to_phys_hard(domain, iova);
+		if (pa != pa2) {
+			dev_err(dev,
+				"iova_to_phys doesn't match iova_to_phys_hard: %pa != %pa\n",
+				&pa, &pa2);
+			ret = -EINVAL;
+			goto out;
+		}
+		pa2 = virt_to_phys(data);
+		if (pa != pa2) {
+			dev_err(dev,
+				"iova_to_phys doesn't match virt_to_phys: %pa != %pa\n",
+				&pa, &pa2);
+			ret = -EINVAL;
+			goto out;
+		}
+		dma_unmap_single(dev, iova, size, DMA_TO_DEVICE);
+		for (j = 0; j < size; ++j) {
+			if (data[j] != 0xa5) {
+				dev_err(dev, "data[%d] != 0xa5\n", data[j]);
+				ret = -EINVAL;
+				goto out;
+			}
+		}
+		kfree(data);
+	}
+
+out:
+	if (ret)
+		ds_printf(dev, s, "  -> FAILED\n");
+	else
+		ds_printf(dev, s, "  -> SUCCEEDED\n");
+
+	return ret;
+}
+
+/* Creates a fresh fast mapping and applies @fn to it */
+static int __apply_to_new_mapping(struct seq_file *s,
+				    int (*fn)(struct device *dev,
+					      struct seq_file *s,
+					      struct iommu_domain *domain,
+					      void *priv),
+				    void *priv)
+{
+	struct dma_iommu_mapping *mapping;
+	struct iommu_debug_device *ddev = s->private;
+	struct device *dev = ddev->dev;
+	int ret = -EINVAL, fast = 1;
+	phys_addr_t pt_phys;
+
+	mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
+						(SZ_1G * 4ULL));
+	if (!mapping)
+		goto out;
+
+	if (iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_FAST, &fast)) {
+		seq_puts(s, "iommu_domain_set_attr failed\n");
+		goto out_release_mapping;
+	}
+
+	if (arm_iommu_attach_device(dev, mapping))
+		goto out_release_mapping;
+
+	if (iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_PT_BASE_ADDR,
+				  &pt_phys)) {
+		ds_printf(dev, s, "Couldn't get page table base address\n");
+		goto out_release_mapping;
+	}
+
+	dev_err(dev, "testing with pgtables at %pa\n", &pt_phys);
+	if (iommu_enable_config_clocks(mapping->domain)) {
+		ds_printf(dev, s, "Couldn't enable clocks\n");
+		goto out_release_mapping;
+	}
+	ret = fn(dev, s, mapping->domain, priv);
+	iommu_disable_config_clocks(mapping->domain);
+
+	arm_iommu_detach_device(dev);
+out_release_mapping:
+	arm_iommu_release_mapping(mapping);
+out:
+	seq_printf(s, "%s\n", ret ? "FAIL" : "SUCCESS");
+	return 0;
+}
+
+static int iommu_debug_functional_fast_dma_api_show(struct seq_file *s,
+						    void *ignored)
+{
+	size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
+	int ret = 0;
+
+	ret |= __apply_to_new_mapping(s, __functional_dma_api_alloc_test, NULL);
+	ret |= __apply_to_new_mapping(s, __functional_dma_api_basic_test, NULL);
+	ret |= __apply_to_new_mapping(s, __functional_dma_api_va_test, sizes);
+	return ret;
+}
+
+static int iommu_debug_functional_fast_dma_api_open(struct inode *inode,
+						    struct file *file)
+{
+	return single_open(file, iommu_debug_functional_fast_dma_api_show,
+			   inode->i_private);
+}
+
+static const struct file_operations iommu_debug_functional_fast_dma_api_fops = {
+	.open	 = iommu_debug_functional_fast_dma_api_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = single_release,
+};
+
+static int iommu_debug_functional_arm_dma_api_show(struct seq_file *s,
+						   void *ignored)
+{
+	struct dma_iommu_mapping *mapping;
+	struct iommu_debug_device *ddev = s->private;
+	struct device *dev = ddev->dev;
+	size_t sizes[] = {SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12, 0};
+	int ret = -EINVAL;
+
+	/* Make the size equal to MAX_ULONG */
+	mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
+						(SZ_1G * 4ULL - 1));
+	if (!mapping)
+		goto out;
+
+	if (arm_iommu_attach_device(dev, mapping))
+		goto out_release_mapping;
+
+	ret = __functional_dma_api_alloc_test(dev, s, mapping->domain, sizes);
+	ret |= __functional_dma_api_basic_test(dev, s, mapping->domain, sizes);
+
+	arm_iommu_detach_device(dev);
+out_release_mapping:
+	arm_iommu_release_mapping(mapping);
+out:
+	seq_printf(s, "%s\n", ret ? "FAIL" : "SUCCESS");
+	return 0;
+}
+
+static int iommu_debug_functional_arm_dma_api_open(struct inode *inode,
+						   struct file *file)
+{
+	return single_open(file, iommu_debug_functional_arm_dma_api_show,
+			   inode->i_private);
+}
+
+static const struct file_operations iommu_debug_functional_arm_dma_api_fops = {
+	.open	 = iommu_debug_functional_arm_dma_api_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = single_release,
+};
+
+static int iommu_debug_attach_do_attach(struct iommu_debug_device *ddev,
+					int val, bool is_secure)
+{
+	struct bus_type *bus;
+
+	bus = msm_iommu_get_bus(ddev->dev);
+	if (!bus)
+		return -EINVAL;
+
+	ddev->domain = iommu_domain_alloc(bus);
+	if (!ddev->domain) {
+		pr_err("Couldn't allocate domain\n");
+		return -ENOMEM;
+	}
+
+	val = VMID_CP_CAMERA;
+	if (is_secure && iommu_domain_set_attr(ddev->domain,
+					       DOMAIN_ATTR_SECURE_VMID,
+					       &val)) {
+		pr_err("Couldn't set secure vmid to %d\n", val);
+		goto out_domain_free;
+	}
+
+	if (iommu_attach_device(ddev->domain, ddev->dev)) {
+		pr_err("Couldn't attach new domain to device. Is it already attached?\n");
+		goto out_domain_free;
+	}
+
+	return 0;
+
+out_domain_free:
+	iommu_domain_free(ddev->domain);
+	ddev->domain = NULL;
+	return -EIO;
+}
+
+static ssize_t __iommu_debug_dma_attach_write(struct file *file,
+					  const char __user *ubuf,
+					  size_t count, loff_t *offset)
+{
+	struct iommu_debug_device *ddev = file->private_data;
+	struct device *dev = ddev->dev;
+	struct dma_iommu_mapping *dma_mapping;
+	ssize_t retval = -EINVAL;
+	int val;
+
+	if (kstrtoint_from_user(ubuf, count, 0, &val)) {
+		pr_err("Invalid format. Expected a hex or decimal integer");
+		retval = -EFAULT;
+		goto out;
+	}
+
+	if (val) {
+		if (dev->archdata.mapping)
+			if (dev->archdata.mapping->domain) {
+				pr_err("Already attached.\n");
+				retval = -EINVAL;
+				goto out;
+			}
+		if (WARN(dev->archdata.iommu,
+			"Attachment tracking out of sync with device\n")) {
+			retval = -EINVAL;
+			goto out;
+		}
+
+		dma_mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
+				(SZ_1G * 4ULL));
+
+		if (!dma_mapping)
+			goto out;
+
+		if (arm_iommu_attach_device(dev, dma_mapping))
+			goto out_release_mapping;
+		pr_err("Attached\n");
+	} else {
+		if (!dev->archdata.mapping) {
+			pr_err("No mapping. Did you already attach?\n");
+			retval = -EINVAL;
+			goto out;
+		}
+		if (!dev->archdata.mapping->domain) {
+			pr_err("No domain. Did you already attach?\n");
+			retval = -EINVAL;
+			goto out;
+		}
+		arm_iommu_detach_device(dev);
+		arm_iommu_release_mapping(dev->archdata.mapping);
+		pr_err("Detached\n");
+	}
+	retval = count;
+	return retval;
+
+out_release_mapping:
+	arm_iommu_release_mapping(dma_mapping);
+out:
+	return retval;
+}
+
+static ssize_t __iommu_debug_attach_write(struct file *file,
+					  const char __user *ubuf,
+					  size_t count, loff_t *offset,
+					  bool is_secure)
+{
+	struct iommu_debug_device *ddev = file->private_data;
+	ssize_t retval;
+	int val;
+
+	if (kstrtoint_from_user(ubuf, count, 0, &val)) {
+		pr_err("Invalid format. Expected a hex or decimal integer");
+		retval = -EFAULT;
+		goto out;
+	}
+
+	if (val) {
+		if (ddev->domain) {
+			pr_err("Already attached.\n");
+			retval = -EINVAL;
+			goto out;
+		}
+		if (WARN(ddev->dev->archdata.iommu,
+			 "Attachment tracking out of sync with device\n")) {
+			retval = -EINVAL;
+			goto out;
+		}
+		if (iommu_debug_attach_do_attach(ddev, val, is_secure)) {
+			retval = -EIO;
+			goto out;
+		}
+		pr_err("Attached\n");
+	} else {
+		if (!ddev->domain) {
+			pr_err("No domain. Did you already attach?\n");
+			retval = -EINVAL;
+			goto out;
+		}
+		iommu_detach_device(ddev->domain, ddev->dev);
+		iommu_domain_free(ddev->domain);
+		ddev->domain = NULL;
+		pr_err("Detached\n");
+	}
+
+	retval = count;
+out:
+	return retval;
+}
+
+static ssize_t iommu_debug_dma_attach_write(struct file *file,
+					  const char __user *ubuf,
+					  size_t count, loff_t *offset)
+{
+	return __iommu_debug_dma_attach_write(file, ubuf, count, offset);
+
+}
+
+static ssize_t iommu_debug_dma_attach_read(struct file *file, char __user *ubuf,
+				       size_t count, loff_t *offset)
+{
+	struct iommu_debug_device *ddev = file->private_data;
+	struct device *dev = ddev->dev;
+	char c[2];
+
+	if (*offset)
+		return 0;
+
+	if (!dev->archdata.mapping)
+		c[0] = '0';
+	else
+		c[0] = dev->archdata.mapping->domain ? '1' : '0';
+
+	c[1] = '\n';
+	if (copy_to_user(ubuf, &c, 2)) {
+		pr_err("copy_to_user failed\n");
+		return -EFAULT;
+	}
+	*offset = 1;		/* non-zero means we're done */
+
+	return 2;
+}
+
+static const struct file_operations iommu_debug_dma_attach_fops = {
+	.open	= simple_open,
+	.write	= iommu_debug_dma_attach_write,
+	.read	= iommu_debug_dma_attach_read,
+};
+
+static ssize_t iommu_debug_virt_addr_read(struct file *file, char __user *ubuf,
+				     size_t count, loff_t *offset)
+{
+	char buf[100];
+	ssize_t retval;
+	size_t buflen;
+
+	if (*offset)
+		return 0;
+
+	memset(buf, 0, 100);
+
+	if (!virt_addr)
+		strlcpy(buf, "FAIL\n", 100);
+	else
+		snprintf(buf, 100, "0x%pK\n", virt_addr);
+
+	buflen = strlen(buf);
+	if (copy_to_user(ubuf, buf, buflen)) {
+		pr_err("Couldn't copy_to_user\n");
+		retval = -EFAULT;
+	} else {
+		*offset = 1;	/* non-zero means we're done */
+		retval = buflen;
+	}
+
+	return retval;
+}
+
+static const struct file_operations iommu_debug_virt_addr_fops = {
+	.open	= simple_open,
+	.read	= iommu_debug_virt_addr_read,
+};
+
+static ssize_t iommu_debug_attach_write(struct file *file,
+					  const char __user *ubuf,
+					  size_t count, loff_t *offset)
+{
+	return __iommu_debug_attach_write(file, ubuf, count, offset,
+					  false);
+
+}
+
+static ssize_t iommu_debug_attach_read(struct file *file, char __user *ubuf,
+				       size_t count, loff_t *offset)
+{
+	struct iommu_debug_device *ddev = file->private_data;
+	char c[2];
+
+	if (*offset)
+		return 0;
+
+	c[0] = ddev->domain ? '1' : '0';
+	c[1] = '\n';
+	if (copy_to_user(ubuf, &c, 2)) {
+		pr_err("copy_to_user failed\n");
+		return -EFAULT;
+	}
+	*offset = 1;		/* non-zero means we're done */
+
+	return 2;
+}
+
+static const struct file_operations iommu_debug_attach_fops = {
+	.open	= simple_open,
+	.write	= iommu_debug_attach_write,
+	.read	= iommu_debug_attach_read,
+};
+
+static ssize_t iommu_debug_attach_write_secure(struct file *file,
+					       const char __user *ubuf,
+					       size_t count, loff_t *offset)
+{
+	return __iommu_debug_attach_write(file, ubuf, count, offset,
+					  true);
+
+}
+
+static const struct file_operations iommu_debug_secure_attach_fops = {
+	.open	= simple_open,
+	.write	= iommu_debug_attach_write_secure,
+	.read	= iommu_debug_attach_read,
+};
+
+static ssize_t iommu_debug_pte_write(struct file *file,
+				      const char __user *ubuf,
+				      size_t count, loff_t *offset)
+{
+	struct iommu_debug_device *ddev = file->private_data;
+	dma_addr_t iova;
+
+	if (kstrtox_from_user(ubuf, count, 0, &iova)) {
+		pr_err("Invalid format for iova\n");
+		ddev->iova = 0;
+		return -EINVAL;
+	}
+
+	ddev->iova = iova;
+	pr_err("Saved iova=%pa for future PTE commands\n", &iova);
+	return count;
+}
+
+
+static ssize_t iommu_debug_pte_read(struct file *file, char __user *ubuf,
+				     size_t count, loff_t *offset)
+{
+	struct iommu_debug_device *ddev = file->private_data;
+	struct device *dev = ddev->dev;
+	uint64_t pte;
+	char buf[100];
+	ssize_t retval;
+	size_t buflen;
+
+	if (kptr_restrict != 0) {
+		pr_err("kptr_restrict needs to be disabled.\n");
+		return -EPERM;
+	}
+	if (!dev->archdata.mapping) {
+		pr_err("No mapping. Did you already attach?\n");
+		return -EINVAL;
+	}
+	if (!dev->archdata.mapping->domain) {
+		pr_err("No domain. Did you already attach?\n");
+		return -EINVAL;
+	}
+
+	if (*offset)
+		return 0;
+
+	memset(buf, 0, 100);
+
+	pte = iommu_iova_to_pte(dev->archdata.mapping->domain,
+			ddev->iova);
+
+	if (!pte)
+		strlcpy(buf, "FAIL\n", 100);
+	else
+		snprintf(buf, 100, "pte=%016llx\n", pte);
+
+	buflen = strlen(buf);
+	if (copy_to_user(ubuf, buf, buflen)) {
+		pr_err("Couldn't copy_to_user\n");
+		retval = -EFAULT;
+	} else {
+		*offset = 1;	/* non-zero means we're done */
+		retval = buflen;
+	}
+
+	return retval;
+}
+
+static const struct file_operations iommu_debug_pte_fops = {
+	.open	= simple_open,
+	.write	= iommu_debug_pte_write,
+	.read	= iommu_debug_pte_read,
+};
+
+static ssize_t iommu_debug_atos_write(struct file *file,
+				      const char __user *ubuf,
+				      size_t count, loff_t *offset)
+{
+	struct iommu_debug_device *ddev = file->private_data;
+	dma_addr_t iova;
+
+	if (kstrtox_from_user(ubuf, count, 0, &iova)) {
+		pr_err("Invalid format for iova\n");
+		ddev->iova = 0;
+		return -EINVAL;
+	}
+
+	ddev->iova = iova;
+	pr_err("Saved iova=%pa for future ATOS commands\n", &iova);
+	return count;
+}
+
+static ssize_t iommu_debug_atos_read(struct file *file, char __user *ubuf,
+				     size_t count, loff_t *offset)
+{
+	struct iommu_debug_device *ddev = file->private_data;
+	phys_addr_t phys;
+	char buf[100];
+	ssize_t retval;
+	size_t buflen;
+
+	if (kptr_restrict != 0) {
+		pr_err("kptr_restrict needs to be disabled.\n");
+		return -EPERM;
+	}
+	if (!ddev->domain) {
+		pr_err("No domain. Did you already attach?\n");
+		return -EINVAL;
+	}
+
+	if (*offset)
+		return 0;
+
+	memset(buf, 0, 100);
+
+	phys = iommu_iova_to_phys_hard(ddev->domain, ddev->iova);
+	if (!phys) {
+		strlcpy(buf, "FAIL\n", 100);
+		phys = iommu_iova_to_phys(ddev->domain, ddev->iova);
+		dev_err(ddev->dev, "ATOS for %pa failed. Software walk returned: %pa\n",
+			&ddev->iova, &phys);
+	} else {
+		snprintf(buf, 100, "%pa\n", &phys);
+	}
+
+	buflen = strlen(buf);
+	if (copy_to_user(ubuf, buf, buflen)) {
+		pr_err("Couldn't copy_to_user\n");
+		retval = -EFAULT;
+	} else {
+		*offset = 1;	/* non-zero means we're done */
+		retval = buflen;
+	}
+
+	return retval;
+}
+
+static const struct file_operations iommu_debug_atos_fops = {
+	.open	= simple_open,
+	.write	= iommu_debug_atos_write,
+	.read	= iommu_debug_atos_read,
+};
+
+static ssize_t iommu_debug_dma_atos_read(struct file *file, char __user *ubuf,
+				     size_t count, loff_t *offset)
+{
+	struct iommu_debug_device *ddev = file->private_data;
+	struct device *dev = ddev->dev;
+	phys_addr_t phys;
+	char buf[100];
+	ssize_t retval;
+	size_t buflen;
+
+	if (kptr_restrict != 0) {
+		pr_err("kptr_restrict needs to be disabled.\n");
+		return -EPERM;
+	}
+	if (!dev->archdata.mapping) {
+		pr_err("No mapping. Did you already attach?\n");
+		return -EINVAL;
+	}
+	if (!dev->archdata.mapping->domain) {
+		pr_err("No domain. Did you already attach?\n");
+		return -EINVAL;
+	}
+
+	if (*offset)
+		return 0;
+
+	memset(buf, 0, 100);
+
+	phys = iommu_iova_to_phys_hard(dev->archdata.mapping->domain,
+			ddev->iova);
+	if (!phys)
+		strlcpy(buf, "FAIL\n", 100);
+	else
+		snprintf(buf, 100, "%pa\n", &phys);
+
+	buflen = strlen(buf);
+	if (copy_to_user(ubuf, buf, buflen)) {
+		pr_err("Couldn't copy_to_user\n");
+		retval = -EFAULT;
+	} else {
+		*offset = 1;	/* non-zero means we're done */
+		retval = buflen;
+	}
+
+	return retval;
+}
+
+static const struct file_operations iommu_debug_dma_atos_fops = {
+	.open	= simple_open,
+	.write	= iommu_debug_atos_write,
+	.read	= iommu_debug_dma_atos_read,
+};
+
+static ssize_t iommu_debug_map_write(struct file *file, const char __user *ubuf,
+				     size_t count, loff_t *offset)
+{
+	ssize_t retval = -EINVAL;
+	int ret;
+	char *comma1, *comma2, *comma3;
+	char buf[100];
+	dma_addr_t iova;
+	phys_addr_t phys;
+	size_t size;
+	int prot;
+	struct iommu_debug_device *ddev = file->private_data;
+
+	if (count >= 100) {
+		pr_err("Value too large\n");
+		return -EINVAL;
+	}
+
+	if (!ddev->domain) {
+		pr_err("No domain. Did you already attach?\n");
+		return -EINVAL;
+	}
+
+	memset(buf, 0, 100);
+
+	if (copy_from_user(buf, ubuf, count)) {
+		pr_err("Couldn't copy from user\n");
+		retval = -EFAULT;
+	}
+
+	comma1 = strnchr(buf, count, ',');
+	if (!comma1)
+		goto invalid_format;
+
+	comma2 = strnchr(comma1 + 1, count, ',');
+	if (!comma2)
+		goto invalid_format;
+
+	comma3 = strnchr(comma2 + 1, count, ',');
+	if (!comma3)
+		goto invalid_format;
+
+	/* split up the words */
+	*comma1 = *comma2 = *comma3 = '\0';
+
+	if (kstrtoux(buf, 0, &iova))
+		goto invalid_format;
+
+	if (kstrtoux(comma1 + 1, 0, &phys))
+		goto invalid_format;
+
+	if (kstrtosize_t(comma2 + 1, 0, &size))
+		goto invalid_format;
+
+	if (kstrtoint(comma3 + 1, 0, &prot))
+		goto invalid_format;
+
+	ret = iommu_map(ddev->domain, iova, phys, size, prot);
+	if (ret) {
+		pr_err("iommu_map failed with %d\n", ret);
+		retval = -EIO;
+		goto out;
+	}
+
+	retval = count;
+	pr_err("Mapped %pa to %pa (len=0x%zx, prot=0x%x)\n",
+	       &iova, &phys, size, prot);
+out:
+	return retval;
+
+invalid_format:
+	pr_err("Invalid format. Expected: iova,phys,len,prot where `prot' is the bitwise OR of IOMMU_READ, IOMMU_WRITE, etc.\n");
+	return retval;
+}
+
+static const struct file_operations iommu_debug_map_fops = {
+	.open	= simple_open,
+	.write	= iommu_debug_map_write,
+};
+
+static ssize_t iommu_debug_dma_map_write(struct file *file,
+		const char __user *ubuf, size_t count, loff_t *offset)
+{
+	ssize_t retval = -EINVAL;
+	int ret;
+	char *comma1, *comma2;
+	char buf[100];
+	unsigned long addr;
+	void *v_addr;
+	dma_addr_t iova;
+	size_t size;
+	unsigned int attr;
+	struct dma_attrs coherent_attr;
+	struct dma_attrs *dma_attrs = &coherent_attr;
+	struct iommu_debug_device *ddev = file->private_data;
+	struct device *dev = ddev->dev;
+
+	init_dma_attrs(dma_attrs);
+
+	if (count >= 100) {
+		pr_err("Value too large\n");
+		return -EINVAL;
+	}
+
+	if (!dev->archdata.mapping) {
+		pr_err("No mapping. Did you already attach?\n");
+		retval = -EINVAL;
+		goto out;
+	}
+	if (!dev->archdata.mapping->domain) {
+		pr_err("No domain. Did you already attach?\n");
+		retval = -EINVAL;
+		goto out;
+	}
+
+	memset(buf, 0, 100);
+
+	if (copy_from_user(buf, ubuf, count)) {
+		pr_err("Couldn't copy from user\n");
+		retval = -EFAULT;
+		goto out;
+	}
+
+	comma1 = strnchr(buf, count, ',');
+	if (!comma1)
+		goto invalid_format;
+
+	comma2 = strnchr(comma1 + 1, count, ',');
+	if (!comma2)
+		goto invalid_format;
+
+	*comma1 = *comma2 = '\0';
+
+	if (kstrtoul(buf, 0, &addr))
+		goto invalid_format;
+	v_addr = (void *)addr;
+
+	if (kstrtosize_t(comma1 + 1, 0, &size))
+		goto invalid_format;
+
+	if (kstrtouint(comma2 + 1, 0, &attr))
+		goto invalid_format;
+
+	if (v_addr < virt_addr || v_addr > (virt_addr + SZ_1M - 1))
+		goto invalid_addr;
+
+	if (attr == 0)
+		dma_attrs = NULL;
+	else if (attr == 1)
+		dma_set_attr(DMA_ATTR_FORCE_COHERENT, dma_attrs);
+	else if (attr == 2)
+		dma_set_attr(DMA_ATTR_FORCE_NON_COHERENT, dma_attrs);
+	else
+		goto invalid_format;
+
+	iova = dma_map_single_attrs(dev, v_addr, size,
+					DMA_TO_DEVICE, dma_attrs);
+
+	if (dma_mapping_error(dev, iova)) {
+		pr_err("Failed to perform dma_map_single\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	retval = count;
+	pr_err("Mapped 0x%p to %pa (len=0x%zx)\n",
+			v_addr, &iova, size);
+	ddev->iova = iova;
+		pr_err("Saved iova=%pa for future PTE commands\n", &iova);
+out:
+	return retval;
+
+invalid_format:
+	pr_err("Invalid format. Expected: addr,len,dma attr where 'dma attr' is\n0: normal mapping\n1: force coherent\n2: force non-cohernet\n");
+	return retval;
+
+invalid_addr:
+	pr_err("Invalid addr given! Address should be within 1MB size from start addr returned by doing 'cat virt_addr'.\n");
+	return retval;
+}
+
+static ssize_t iommu_debug_dma_map_read(struct file *file, char __user *ubuf,
+	     size_t count, loff_t *offset)
+{
+	struct iommu_debug_device *ddev = file->private_data;
+	struct device *dev = ddev->dev;
+	char buf[100];
+	ssize_t retval;
+	size_t buflen;
+	dma_addr_t iova;
+
+	if (!dev->archdata.mapping) {
+		pr_err("No mapping. Did you already attach?\n");
+		return -EINVAL;
+	}
+	if (!dev->archdata.mapping->domain) {
+		pr_err("No domain. Did you already attach?\n");
+		return -EINVAL;
+	}
+
+	if (*offset)
+		return 0;
+
+	memset(buf, 0, 100);
+
+	iova = ddev->iova;
+	snprintf(buf, 100, "%pa\n", &iova);
+
+	buflen = strlen(buf);
+	if (copy_to_user(ubuf, buf, buflen)) {
+		pr_err("Couldn't copy_to_user\n");
+		retval = -EFAULT;
+	} else {
+		*offset = 1;	/* non-zero means we're done */
+		retval = buflen;
+	}
+
+	return retval;
+}
+
+static const struct file_operations iommu_debug_dma_map_fops = {
+	.open	= simple_open,
+	.write	= iommu_debug_dma_map_write,
+	.read	= iommu_debug_dma_map_read,
+};
+
+static ssize_t iommu_debug_unmap_write(struct file *file,
+				       const char __user *ubuf,
+				       size_t count, loff_t *offset)
+{
+	ssize_t retval = 0;
+	char *comma1;
+	char buf[100];
+	dma_addr_t iova;
+	size_t size;
+	size_t unmapped;
+	struct iommu_debug_device *ddev = file->private_data;
+
+	if (count >= 100) {
+		pr_err("Value too large\n");
+		return -EINVAL;
+	}
+
+	if (!ddev->domain) {
+		pr_err("No domain. Did you already attach?\n");
+		return -EINVAL;
+	}
+
+	memset(buf, 0, 100);
+
+	if (copy_from_user(buf, ubuf, count)) {
+		pr_err("Couldn't copy from user\n");
+		retval = -EFAULT;
+		goto out;
+	}
+
+	comma1 = strnchr(buf, count, ',');
+	if (!comma1)
+		goto invalid_format;
+
+	/* split up the words */
+	*comma1 = '\0';
+
+	if (kstrtoux(buf, 0, &iova))
+		goto invalid_format;
+
+	if (kstrtosize_t(comma1 + 1, 0, &size))
+		goto invalid_format;
+
+	unmapped = iommu_unmap(ddev->domain, iova, size);
+	if (unmapped != size) {
+		pr_err("iommu_unmap failed. Expected to unmap: 0x%zx, unmapped: 0x%zx",
+		       size, unmapped);
+		return -EIO;
+	}
+
+	retval = count;
+	pr_err("Unmapped %pa (len=0x%zx)\n", &iova, size);
+out:
+	return retval;
+
+invalid_format:
+	pr_err("Invalid format. Expected: iova,len\n");
+	return retval;
+}
+
+static const struct file_operations iommu_debug_unmap_fops = {
+	.open	= simple_open,
+	.write	= iommu_debug_unmap_write,
+};
+
+static ssize_t iommu_debug_dma_unmap_write(struct file *file,
+				       const char __user *ubuf,
+				       size_t count, loff_t *offset)
+{
+	ssize_t retval = 0;
+	char *comma1, *comma2;
+	char buf[100];
+	size_t size;
+	unsigned int attr;
+	dma_addr_t iova;
+	struct dma_attrs coherent_attr;
+	struct dma_attrs *dma_attrs = &coherent_attr;
+	struct iommu_debug_device *ddev = file->private_data;
+	struct device *dev = ddev->dev;
+
+	init_dma_attrs(dma_attrs);
+
+	if (count >= 100) {
+		pr_err("Value too large\n");
+		return -EINVAL;
+	}
+
+	if (!dev->archdata.mapping) {
+		pr_err("No mapping. Did you already attach?\n");
+		retval = -EINVAL;
+		goto out;
+	}
+	if (!dev->archdata.mapping->domain) {
+		pr_err("No domain. Did you already attach?\n");
+		retval = -EINVAL;
+		goto out;
+	}
+
+	memset(buf, 0, 100);
+
+	if (copy_from_user(buf, ubuf, count)) {
+		pr_err("Couldn't copy from user\n");
+		retval = -EFAULT;
+		goto out;
+	}
+
+	comma1 = strnchr(buf, count, ',');
+	if (!comma1)
+		goto invalid_format;
+
+	comma2 = strnchr(comma1 + 1, count, ',');
+	if (!comma2)
+		goto invalid_format;
+
+	*comma1 = *comma2 = '\0';
+
+	if (kstrtoux(buf, 0, &iova))
+		goto invalid_format;
+
+	if (kstrtosize_t(comma1 + 1, 0, &size))
+		goto invalid_format;
+
+	if (kstrtouint(comma2 + 1, 0, &attr))
+		goto invalid_format;
+
+	if (attr == 0)
+		dma_attrs = NULL;
+	else if (attr == 1)
+		dma_set_attr(DMA_ATTR_FORCE_COHERENT, dma_attrs);
+	else if (attr == 2)
+		dma_set_attr(DMA_ATTR_FORCE_NON_COHERENT, dma_attrs);
+	else
+		goto invalid_format;
+
+	dma_unmap_single_attrs(dev, iova, size, DMA_TO_DEVICE, dma_attrs);
+
+	retval = count;
+	pr_err("Unmapped %pa (len=0x%zx)\n", &iova, size);
+out:
+	return retval;
+
+invalid_format:
+	pr_err("Invalid format. Expected: iova,len, dma attr\n");
+	return retval;
+}
+
+static const struct file_operations iommu_debug_dma_unmap_fops = {
+	.open	= simple_open,
+	.write	= iommu_debug_dma_unmap_write,
+};
+
+static ssize_t iommu_debug_config_clocks_write(struct file *file,
+					       const char __user *ubuf,
+					       size_t count, loff_t *offset)
+{
+	char buf;
+	struct iommu_debug_device *ddev = file->private_data;
+	struct device *dev = ddev->dev;
+
+	/* we're expecting a single character plus (optionally) a newline */
+	if (count > 2) {
+		dev_err(dev, "Invalid value\n");
+		return -EINVAL;
+	}
+
+	if (!ddev->domain) {
+		dev_err(dev, "No domain. Did you already attach?\n");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&buf, ubuf, 1)) {
+		dev_err(dev, "Couldn't copy from user\n");
+		return -EFAULT;
+	}
+
+	mutex_lock(&ddev->clk_lock);
+	switch (buf) {
+	case '0':
+		if (ddev->clk_count == 0) {
+			dev_err(dev, "Config clocks already disabled\n");
+			break;
+		}
+
+		if (--ddev->clk_count > 0)
+			break;
+
+		dev_err(dev, "Disabling config clocks\n");
+		iommu_disable_config_clocks(ddev->domain);
+		break;
+	case '1':
+		if (ddev->clk_count++ > 0)
+			break;
+
+		dev_err(dev, "Enabling config clocks\n");
+		if (iommu_enable_config_clocks(ddev->domain))
+			dev_err(dev, "Failed!\n");
+		break;
+	default:
+		dev_err(dev, "Invalid value. Should be 0 or 1.\n");
+		mutex_unlock(&ddev->clk_lock);
+		return -EINVAL;
+	}
+	mutex_unlock(&ddev->clk_lock);
+
+	return count;
+}
+
+static const struct file_operations iommu_debug_config_clocks_fops = {
+	.open	= simple_open,
+	.write	= iommu_debug_config_clocks_write,
+};
+
+/*
+ * The following will only work for drivers that implement the generic
+ * device tree bindings described in
+ * Documentation/devicetree/bindings/iommu/iommu.txt
+ */
+static int snarf_iommu_devices(struct device *dev, const char *name)
+{
+	struct iommu_debug_device *ddev;
+	struct dentry *dir;
+
+	if (IS_ERR_OR_NULL(dev))
+		return -EINVAL;
+
+	ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
+	if (!ddev)
+		return -ENODEV;
+	mutex_init(&ddev->clk_lock);
+	ddev->dev = dev;
+	dir = debugfs_create_dir(name, debugfs_tests_dir);
+	if (!dir) {
+		pr_err("Couldn't create iommu/devices/%s debugfs dir\n",
+		       name);
+		goto err;
+	}
+
+	if (!debugfs_create_file("nr_iters", S_IRUSR, dir, &iters_per_op,
+				&iommu_debug_nr_iters_ops)) {
+		pr_err("Couldn't create iommu/devices/%s/nr_iters debugfs file\n",
+		       name);
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("virt_addr", S_IRUSR, dir, ddev,
+				&iommu_debug_virt_addr_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/virt_addr debugfs file\n",
+		       name);
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("profiling", S_IRUSR, dir, ddev,
+				 &iommu_debug_profiling_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/profiling debugfs file\n",
+		       name);
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("secure_profiling", S_IRUSR, dir, ddev,
+				 &iommu_debug_secure_profiling_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/secure_profiling debugfs file\n",
+		       name);
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("profiling_fast", S_IRUSR, dir, ddev,
+				 &iommu_debug_profiling_fast_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/profiling_fast debugfs file\n",
+		       name);
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("profiling_fast_dma_api", S_IRUSR, dir, ddev,
+				 &iommu_debug_profiling_fast_dma_api_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/profiling_fast_dma_api debugfs file\n",
+		       name);
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("functional_fast_dma_api", S_IRUSR, dir, ddev,
+				 &iommu_debug_functional_fast_dma_api_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/functional_fast_dma_api debugfs file\n",
+		       name);
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("functional_arm_dma_api", S_IRUSR, dir, ddev,
+				 &iommu_debug_functional_arm_dma_api_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/functional_arm_dma_api debugfs file\n",
+		       name);
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("dma_attach", S_IRUSR, dir, ddev,
+				 &iommu_debug_dma_attach_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/dma_attach debugfs file\n",
+			       name);
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("attach", S_IRUSR, dir, ddev,
+				 &iommu_debug_attach_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/attach debugfs file\n",
+		       name);
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("secure_attach", S_IRUSR, dir, ddev,
+				 &iommu_debug_secure_attach_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/secure_attach debugfs file\n",
+		       name);
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("atos", S_IWUSR, dir, ddev,
+				 &iommu_debug_atos_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/atos debugfs file\n",
+		       name);
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("dma_atos", S_IWUSR, dir, ddev,
+				 &iommu_debug_dma_atos_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/dma_atos debugfs file\n",
+			       name);
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("map", S_IWUSR, dir, ddev,
+				 &iommu_debug_map_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/map debugfs file\n",
+		       name);
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("dma_map", S_IWUSR, dir, ddev,
+					 &iommu_debug_dma_map_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/dma_map debugfs file\n",
+			       name);
+			goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("unmap", S_IWUSR, dir, ddev,
+				 &iommu_debug_unmap_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/unmap debugfs file\n",
+		       name);
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("dma_unmap", S_IWUSR, dir, ddev,
+					 &iommu_debug_dma_unmap_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/dma_unmap debugfs file\n",
+			       name);
+			goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("pte", S_IWUSR, dir, ddev,
+			&iommu_debug_pte_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/pte debugfs file\n",
+				name);
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("config_clocks", S_IWUSR, dir, ddev,
+				 &iommu_debug_config_clocks_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/config_clocks debugfs file\n",
+		       name);
+		goto err_rmdir;
+	}
+
+	list_add(&ddev->list, &iommu_debug_devices);
+	return 0;
+
+err_rmdir:
+	debugfs_remove_recursive(dir);
+err:
+	kfree(ddev);
+	return 0;
+}
+
+static int pass_iommu_devices(struct device *dev, void *ignored)
+{
+	if (!of_device_is_compatible(dev->of_node, "iommu-debug-test"))
+		return 0;
+
+	if (!of_find_property(dev->of_node, "iommus", NULL))
+		return 0;
+
+	return snarf_iommu_devices(dev, dev_name(dev));
+}
+
+static int iommu_debug_populate_devices(void)
+{
+	int ret;
+	struct device_node *np;
+	const char *cb_name;
+
+	for_each_compatible_node(np, NULL, "qcom,msm-smmu-v2-ctx") {
+		if (!of_device_is_compatible(np, "iommu-debug-test"))
+			continue;
+
+		ret = of_property_read_string(np, "label", &cb_name);
+		if (ret)
+			return ret;
+
+		ret = snarf_iommu_devices(msm_iommu_get_ctx(cb_name), cb_name);
+		if (ret)
+			return ret;
+	}
+
+	return bus_for_each_dev(&platform_bus_type, NULL, NULL,
+			pass_iommu_devices);
+}
+
+static int iommu_debug_init_tests(void)
+{
+	debugfs_tests_dir = debugfs_create_dir("tests",
+					       iommu_debugfs_top);
+	if (!debugfs_tests_dir) {
+		pr_err("Couldn't create iommu/tests debugfs directory\n");
+		return -ENODEV;
+	}
+
+	virt_addr = kzalloc(SZ_1M, GFP_KERNEL);
+
+	if (!virt_addr)
+		return -ENOMEM;
+
+	return iommu_debug_populate_devices();
+}
+
+static void iommu_debug_destroy_tests(void)
+{
+	debugfs_remove_recursive(debugfs_tests_dir);
+}
+#else
+static inline int iommu_debug_init_tests(void) { return 0; }
+static inline void iommu_debug_destroy_tests(void) { }
+#endif
+
+/*
+ * This isn't really a "driver", we just need something in the device tree
+ * so that our tests can run without any client drivers, and our tests rely
+ * on parsing the device tree for nodes with the `iommus' property.
+ */
+static int iommu_debug_pass(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static const struct of_device_id iommu_debug_of_match[] = {
+	{ .compatible = "iommu-debug-test" },
+	{ },
+};
+
+static struct platform_driver iommu_debug_driver = {
+	.probe = iommu_debug_pass,
+	.remove = iommu_debug_pass,
+	.driver = {
+		.name = "iommu-debug",
+		.of_match_table = iommu_debug_of_match,
+	},
+};
+
+static int iommu_debug_init(void)
+{
+	if (iommu_debug_init_tests())
+		return -ENODEV;
+
+	return platform_driver_register(&iommu_debug_driver);
+}
+
+static void iommu_debug_exit(void)
+{
+	platform_driver_unregister(&iommu_debug_driver);
+	iommu_debug_destroy_tests();
+}
+
+module_init(iommu_debug_init);
+module_exit(iommu_debug_exit);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/iommu/iommu-debug.h	2019-01-22 16:16:24.063251478 +0100
@@ -0,0 +1,34 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef IOMMU_DEBUG_H
+#define IOMMU_DEBUG_H
+
+#ifdef CONFIG_IOMMU_DEBUG_TRACKING
+
+void iommu_debug_attach_device(struct iommu_domain *domain, struct device *dev);
+void iommu_debug_domain_remove(struct iommu_domain *domain);
+
+#else  /* !CONFIG_IOMMU_DEBUG_TRACKING */
+
+static inline void iommu_debug_attach_device(struct iommu_domain *domain,
+					     struct device *dev)
+{
+}
+
+static inline void iommu_debug_domain_remove(struct iommu_domain *domain)
+{
+}
+
+#endif  /* CONFIG_IOMMU_DEBUG_TRACKING */
+
+#endif /* IOMMU_DEBUG_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/iommu/io-pgtable-fast.c	2019-01-22 16:16:24.063251478 +0100
@@ -0,0 +1,751 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"io-pgtable-fast: " fmt
+
+#include <linux/iommu.h>
+#include <linux/kernel.h>
+#include <linux/scatterlist.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/io-pgtable-fast.h>
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include <linux/vmalloc.h>
+
+#include "io-pgtable.h"
+
+#define AV8L_FAST_MAX_ADDR_BITS		48
+
+/* Struct accessors */
+#define iof_pgtable_to_data(x)						\
+	container_of((x), struct av8l_fast_io_pgtable, iop)
+
+#define iof_pgtable_ops_to_pgtable(x)					\
+	container_of((x), struct io_pgtable, ops)
+
+#define iof_pgtable_ops_to_data(x)					\
+	iof_pgtable_to_data(iof_pgtable_ops_to_pgtable(x))
+
+struct av8l_fast_io_pgtable {
+	struct io_pgtable	  iop;
+	av8l_fast_iopte		 *pgd;
+	av8l_fast_iopte		 *puds[4];
+	av8l_fast_iopte		 *pmds;
+	struct page		**pages; /* page table memory */
+	int			nr_pages;
+	dma_addr_t		base;
+	dma_addr_t		end;
+};
+
+/* Page table bits */
+#define AV8L_FAST_PTE_TYPE_SHIFT	0
+#define AV8L_FAST_PTE_TYPE_MASK		0x3
+
+#define AV8L_FAST_PTE_TYPE_BLOCK	1
+#define AV8L_FAST_PTE_TYPE_TABLE	3
+#define AV8L_FAST_PTE_TYPE_PAGE		3
+
+#define AV8L_FAST_PTE_NSTABLE		(((av8l_fast_iopte)1) << 63)
+#define AV8L_FAST_PTE_XN		(((av8l_fast_iopte)3) << 53)
+#define AV8L_FAST_PTE_AF		(((av8l_fast_iopte)1) << 10)
+#define AV8L_FAST_PTE_SH_NS		(((av8l_fast_iopte)0) << 8)
+#define AV8L_FAST_PTE_SH_OS		(((av8l_fast_iopte)2) << 8)
+#define AV8L_FAST_PTE_SH_IS		(((av8l_fast_iopte)3) << 8)
+#define AV8L_FAST_PTE_NS		(((av8l_fast_iopte)1) << 5)
+#define AV8L_FAST_PTE_VALID		(((av8l_fast_iopte)1) << 0)
+
+#define AV8L_FAST_PTE_ATTR_LO_MASK	(((av8l_fast_iopte)0x3ff) << 2)
+/* Ignore the contiguous bit for block splitting */
+#define AV8L_FAST_PTE_ATTR_HI_MASK	(((av8l_fast_iopte)6) << 52)
+#define AV8L_FAST_PTE_ATTR_MASK		(AV8L_FAST_PTE_ATTR_LO_MASK |	\
+					 AV8L_FAST_PTE_ATTR_HI_MASK)
+#define AV8L_FAST_PTE_ADDR_MASK		((av8l_fast_iopte)0xfffffffff000)
+
+
+/* Stage-1 PTE */
+#define AV8L_FAST_PTE_AP_PRIV_RW	(((av8l_fast_iopte)0) << 6)
+#define AV8L_FAST_PTE_AP_RW		(((av8l_fast_iopte)1) << 6)
+#define AV8L_FAST_PTE_AP_PRIV_RO	(((av8l_fast_iopte)2) << 6)
+#define AV8L_FAST_PTE_AP_RO		(((av8l_fast_iopte)3) << 6)
+#define AV8L_FAST_PTE_ATTRINDX_SHIFT	2
+#define AV8L_FAST_PTE_nG		(((av8l_fast_iopte)1) << 11)
+
+/* Stage-2 PTE */
+#define AV8L_FAST_PTE_HAP_FAULT		(((av8l_fast_iopte)0) << 6)
+#define AV8L_FAST_PTE_HAP_READ		(((av8l_fast_iopte)1) << 6)
+#define AV8L_FAST_PTE_HAP_WRITE		(((av8l_fast_iopte)2) << 6)
+#define AV8L_FAST_PTE_MEMATTR_OIWB	(((av8l_fast_iopte)0xf) << 2)
+#define AV8L_FAST_PTE_MEMATTR_NC	(((av8l_fast_iopte)0x5) << 2)
+#define AV8L_FAST_PTE_MEMATTR_DEV	(((av8l_fast_iopte)0x1) << 2)
+
+/* Register bits */
+#define ARM_32_LPAE_TCR_EAE		(1 << 31)
+#define ARM_64_LPAE_S2_TCR_RES1		(1 << 31)
+
+#define AV8L_FAST_TCR_TG0_4K		(0 << 14)
+#define AV8L_FAST_TCR_TG0_64K		(1 << 14)
+#define AV8L_FAST_TCR_TG0_16K		(2 << 14)
+
+#define AV8L_FAST_TCR_SH0_SHIFT		12
+#define AV8L_FAST_TCR_SH0_MASK		0x3
+#define AV8L_FAST_TCR_SH_NS		0
+#define AV8L_FAST_TCR_SH_OS		2
+#define AV8L_FAST_TCR_SH_IS		3
+
+#define AV8L_FAST_TCR_ORGN0_SHIFT	10
+#define AV8L_FAST_TCR_IRGN0_SHIFT	8
+#define AV8L_FAST_TCR_RGN_MASK		0x3
+#define AV8L_FAST_TCR_RGN_NC		0
+#define AV8L_FAST_TCR_RGN_WBWA		1
+#define AV8L_FAST_TCR_RGN_WT		2
+#define AV8L_FAST_TCR_RGN_WB		3
+
+#define AV8L_FAST_TCR_SL0_SHIFT		6
+#define AV8L_FAST_TCR_SL0_MASK		0x3
+
+#define AV8L_FAST_TCR_T0SZ_SHIFT	0
+#define AV8L_FAST_TCR_SZ_MASK		0xf
+
+#define AV8L_FAST_TCR_PS_SHIFT		16
+#define AV8L_FAST_TCR_PS_MASK		0x7
+
+#define AV8L_FAST_TCR_IPS_SHIFT		32
+#define AV8L_FAST_TCR_IPS_MASK		0x7
+
+#define AV8L_FAST_TCR_PS_32_BIT		0x0ULL
+#define AV8L_FAST_TCR_PS_36_BIT		0x1ULL
+#define AV8L_FAST_TCR_PS_40_BIT		0x2ULL
+#define AV8L_FAST_TCR_PS_42_BIT		0x3ULL
+#define AV8L_FAST_TCR_PS_44_BIT		0x4ULL
+#define AV8L_FAST_TCR_PS_48_BIT		0x5ULL
+
+#define AV8L_FAST_TCR_EPD1_SHIFT	23
+#define AV8L_FAST_TCR_EPD1_FAULT	1
+
+#define AV8L_FAST_TCR_SEP_SHIFT		(15 + 32)
+#define AV8L_FAST_TCR_SEP_UPSTREAM	7ULL
+
+#define AV8L_FAST_MAIR_ATTR_SHIFT(n)	((n) << 3)
+#define AV8L_FAST_MAIR_ATTR_MASK	0xff
+#define AV8L_FAST_MAIR_ATTR_DEVICE	0x04
+#define AV8L_FAST_MAIR_ATTR_NC		0x44
+#define AV8L_FAST_MAIR_ATTR_WBRWA	0xff
+#define AV8L_FAST_MAIR_ATTR_IDX_NC	0
+#define AV8L_FAST_MAIR_ATTR_IDX_CACHE	1
+#define AV8L_FAST_MAIR_ATTR_IDX_DEV	2
+
+#define AV8L_FAST_PAGE_SHIFT		12
+
+
+#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB
+
+#include <asm/cacheflush.h>
+#include <linux/notifier.h>
+
+static ATOMIC_NOTIFIER_HEAD(av8l_notifier_list);
+
+void av8l_register_notify(struct notifier_block *nb)
+{
+	atomic_notifier_chain_register(&av8l_notifier_list, nb);
+}
+EXPORT_SYMBOL(av8l_register_notify);
+
+static void __av8l_check_for_stale_tlb(av8l_fast_iopte *ptep)
+{
+	if (unlikely(*ptep)) {
+		atomic_notifier_call_chain(
+			&av8l_notifier_list, MAPPED_OVER_STALE_TLB,
+			(void *) ptep);
+		pr_err("Tried to map over a non-vacant pte: 0x%llx @ %p\n",
+		       *ptep, ptep);
+		pr_err("Nearby memory:\n");
+		print_hex_dump(KERN_ERR, "pgtbl: ", DUMP_PREFIX_ADDRESS,
+			       32, 8, ptep - 16, 32 * sizeof(*ptep), false);
+	}
+}
+
+void av8l_fast_clear_stale_ptes(av8l_fast_iopte *pmds, u64 base,
+		u64 start, u64 end, bool skip_sync)
+{
+	int i;
+	av8l_fast_iopte *pmdp = iopte_pmd_offset(pmds, base, start);
+
+	for (i = start >> AV8L_FAST_PAGE_SHIFT;
+			i <= (end >> AV8L_FAST_PAGE_SHIFT); ++i) {
+		if (!(*pmdp & AV8L_FAST_PTE_VALID)) {
+			*pmdp = 0;
+			if (!skip_sync)
+				dmac_clean_range(pmdp, pmdp + 1);
+		}
+		pmdp++;
+	}
+}
+#else
+static void __av8l_check_for_stale_tlb(av8l_fast_iopte *ptep)
+{
+}
+#endif
+
+/* caller must take care of cache maintenance on *ptep */
+int av8l_fast_map_public(av8l_fast_iopte *ptep, phys_addr_t paddr, size_t size,
+			 int prot)
+{
+	int i, nptes = size >> AV8L_FAST_PAGE_SHIFT;
+	av8l_fast_iopte pte = AV8L_FAST_PTE_XN
+		| AV8L_FAST_PTE_TYPE_PAGE
+		| AV8L_FAST_PTE_AF
+		| AV8L_FAST_PTE_nG
+		| AV8L_FAST_PTE_SH_OS;
+
+	if (prot & IOMMU_DEVICE)
+		pte |= (AV8L_FAST_MAIR_ATTR_IDX_DEV
+			<< AV8L_FAST_PTE_ATTRINDX_SHIFT);
+	else if (prot & IOMMU_CACHE)
+		pte |= (AV8L_FAST_MAIR_ATTR_IDX_CACHE
+			<< AV8L_FAST_PTE_ATTRINDX_SHIFT);
+
+	if (!(prot & IOMMU_WRITE))
+		pte |= AV8L_FAST_PTE_AP_RO;
+	else
+		pte |= AV8L_FAST_PTE_AP_RW;
+
+	paddr &= AV8L_FAST_PTE_ADDR_MASK;
+	for (i = 0; i < nptes; i++, paddr += SZ_4K) {
+		__av8l_check_for_stale_tlb(ptep + i);
+		*(ptep + i) = pte | paddr;
+	}
+
+	return 0;
+}
+
+static int av8l_fast_map(struct io_pgtable_ops *ops, unsigned long iova,
+			 phys_addr_t paddr, size_t size, int prot)
+{
+	struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
+	av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, data->base, iova);
+	unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT;
+
+	av8l_fast_map_public(ptep, paddr, size, prot);
+	dmac_clean_range(ptep, ptep + nptes);
+
+	return 0;
+}
+
+static void __av8l_fast_unmap(av8l_fast_iopte *ptep, size_t size,
+			      bool need_stale_tlb_tracking)
+{
+	unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT;
+	int val = need_stale_tlb_tracking
+		? AV8L_FAST_PTE_UNMAPPED_NEED_TLBI
+		: 0;
+
+	memset(ptep, val, sizeof(*ptep) * nptes);
+}
+
+/* caller must take care of cache maintenance on *ptep */
+void av8l_fast_unmap_public(av8l_fast_iopte *ptep, size_t size)
+{
+	__av8l_fast_unmap(ptep, size, true);
+}
+
+static size_t av8l_fast_unmap(struct io_pgtable_ops *ops, unsigned long iova,
+			      size_t size)
+{
+	struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
+	struct io_pgtable *iop = &data->iop;
+	av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, data->base, iova);
+	unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT;
+
+	__av8l_fast_unmap(ptep, size, false);
+	dmac_clean_range(ptep, ptep + nptes);
+	iop->cfg.tlb->tlb_flush_all(iop->cookie);
+
+	return size;
+}
+
+#if defined(CONFIG_ARM64)
+#define FAST_PGDNDX(va) (((va) & 0x7fc0000000) >> 27)
+#elif defined(CONFIG_ARM)
+#define FAST_PGDNDX(va) (((va) & 0xc0000000) >> 27)
+#endif
+
+static phys_addr_t av8l_fast_iova_to_phys(struct io_pgtable_ops *ops,
+					  unsigned long iova)
+{
+	struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
+	av8l_fast_iopte pte, *pgdp, *pudp, *pmdp;
+	unsigned long pgd;
+	phys_addr_t phys;
+	const unsigned long pts = AV8L_FAST_PTE_TYPE_SHIFT;
+	const unsigned long ptm = AV8L_FAST_PTE_TYPE_MASK;
+	const unsigned long ptt = AV8L_FAST_PTE_TYPE_TABLE;
+	const unsigned long ptp = AV8L_FAST_PTE_TYPE_PAGE;
+	const av8l_fast_iopte am = AV8L_FAST_PTE_ADDR_MASK;
+
+	/* TODO: clean up some of these magic numbers... */
+
+	pgd = (unsigned long)data->pgd | FAST_PGDNDX(iova);
+	pgdp = (av8l_fast_iopte *)pgd;
+
+	pte = *pgdp;
+	if (((pte >> pts) & ptm) != ptt)
+		return 0;
+	pudp = phys_to_virt((pte & am) | ((iova & 0x3fe00000) >> 18));
+
+	pte = *pudp;
+	if (((pte >> pts) & ptm) != ptt)
+		return 0;
+	pmdp = phys_to_virt((pte & am) | ((iova & 0x1ff000) >> 9));
+
+	pte = *pmdp;
+	if (((pte >> pts) & ptm) != ptp)
+		return 0;
+	phys = pte & am;
+
+	return phys | (iova & 0xfff);
+}
+
+static int av8l_fast_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
+			    struct scatterlist *sg, unsigned int nents,
+			    int prot, size_t *size)
+{
+	return -ENODEV;
+}
+
+static struct av8l_fast_io_pgtable *
+av8l_fast_alloc_pgtable_data(struct io_pgtable_cfg *cfg)
+{
+	struct av8l_fast_io_pgtable *data;
+
+	data = kmalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return NULL;
+
+	data->iop.ops = (struct io_pgtable_ops) {
+		.map		= av8l_fast_map,
+		.map_sg		= av8l_fast_map_sg,
+		.unmap		= av8l_fast_unmap,
+		.iova_to_phys	= av8l_fast_iova_to_phys,
+	};
+
+	return data;
+}
+
+/*
+ * We need max 1 page for the pgd, 4 pages for puds (1GB VA per pud page) and
+ * 2048 pages for pmds (each pud page contains 512 table entries, each
+ * pointing to a pmd).
+ */
+#define NUM_PGD_PAGES 1
+#define NUM_PUD_PAGES 4
+#define NUM_PMD_PAGES 2048
+#define NUM_PGTBL_PAGES (NUM_PGD_PAGES + NUM_PUD_PAGES + NUM_PMD_PAGES)
+
+/* undefine arch specific definitions which depends on page table format */
+#undef pud_index
+#undef pud_mask
+#undef pud_next
+#undef pmd_index
+#undef pmd_mask
+#undef pmd_next
+
+#define pud_index(addr)		(((addr) >> 30) & 0x3)
+#define pud_mask(addr)		((addr) & ~((1UL << 30) - 1))
+#define pud_next(addr, end)					\
+({	unsigned long __boundary = pud_mask(addr + (1UL << 30));\
+	(__boundary - 1 < (end) - 1) ? __boundary : (end);	\
+})
+
+#define pmd_index(addr)		(((addr) >> 21) & 0x1ff)
+#define pmd_mask(addr)		((addr) & ~((1UL << 21) - 1))
+#define pmd_next(addr, end)					\
+({	unsigned long __boundary = pmd_mask(addr + (1UL << 21));\
+	(__boundary - 1 < (end) - 1) ? __boundary : (end);	\
+})
+
+static int
+av8l_fast_prepopulate_pgtables(struct av8l_fast_io_pgtable *data,
+			       struct io_pgtable_cfg *cfg, void *cookie)
+{
+	int i, j, pg = 0;
+	struct page **pages, *page;
+	dma_addr_t base = cfg->iova_base;
+	dma_addr_t end = cfg->iova_end;
+	dma_addr_t pud, pmd;
+	int pmd_pg_index;
+
+	pages = kmalloc(sizeof(*pages) * NUM_PGTBL_PAGES, __GFP_NOWARN |
+							__GFP_NORETRY);
+
+	if (!pages)
+		pages = vmalloc(sizeof(*pages) * NUM_PGTBL_PAGES);
+
+	if (!pages)
+		return -ENOMEM;
+
+	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+	if (!page)
+		goto err_free_pages_arr;
+	pages[pg++] = page;
+	data->pgd = page_address(page);
+
+	/*
+	 * We need max 2048 entries at level 2 to map 4GB of VA space. A page
+	 * can hold 512 entries, so we need max 4 pages.
+	 */
+	for (i = pud_index(base), pud = base; pud < end;
+			++i, pud = pud_next(pud, end)) {
+		av8l_fast_iopte pte, *ptep;
+
+		page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+		if (!page)
+			goto err_free_pages;
+		pages[pg++] = page;
+		data->puds[i] = page_address(page);
+		pte = page_to_phys(page) | AV8L_FAST_PTE_TYPE_TABLE;
+		ptep = ((av8l_fast_iopte *)data->pgd) + i;
+		*ptep = pte;
+	}
+	dmac_clean_range(data->pgd, data->pgd + 4);
+
+	/*
+	 * We have max 4 puds, each of which can point to 512 pmds, so we'll
+	 * have max 2048 pmds, each of which can hold 512 ptes, for a grand
+	 * total of 2048*512=1048576 PTEs.
+	 */
+	pmd_pg_index = pg;
+	for (i = pud_index(base), pud = base; pud < end;
+			++i, pud = pud_next(pud, end)) {
+		for (j = pmd_index(pud), pmd = pud; pmd < pud_next(pud, end);
+				++j, pmd = pmd_next(pmd, end)) {
+			av8l_fast_iopte pte, *pudp;
+			void *addr;
+
+			page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+			if (!page)
+				goto err_free_pages;
+			pages[pg++] = page;
+
+			addr = page_address(page);
+			dmac_clean_range(addr, addr + SZ_4K);
+
+			pte = page_to_phys(page) | AV8L_FAST_PTE_TYPE_TABLE;
+			pudp = data->puds[i] + j;
+			*pudp = pte;
+		}
+		dmac_clean_range(data->puds[i], data->puds[i] + 512);
+	}
+
+	/*
+	 * We map the pmds into a virtually contiguous space so that we
+	 * don't have to traverse the first two levels of the page tables
+	 * to find the appropriate pud.  Instead, it will be a simple
+	 * offset from the virtual base of the pmds.
+	 */
+	data->pmds = vmap(&pages[pmd_pg_index], pg - pmd_pg_index,
+			  VM_IOREMAP, PAGE_KERNEL);
+	if (!data->pmds)
+		goto err_free_pages;
+
+	data->pages = pages;
+	data->nr_pages = pg;
+	data->base = base;
+	data->end = end;
+	return 0;
+
+err_free_pages:
+	for (i = 0; i < pg; ++i)
+		__free_page(pages[i]);
+err_free_pages_arr:
+	kvfree(pages);
+	return -ENOMEM;
+}
+
+static struct io_pgtable *
+av8l_fast_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
+{
+	u64 reg;
+	struct av8l_fast_io_pgtable *data =
+		av8l_fast_alloc_pgtable_data(cfg);
+
+	if (!data)
+		return NULL;
+
+	/* restrict according to the fast map requirements */
+	cfg->ias = 32;
+	cfg->pgsize_bitmap = SZ_4K;
+
+	/* TCR */
+	if (cfg->quirks & IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT)
+		reg = (AV8L_FAST_TCR_SH_OS << AV8L_FAST_TCR_SH0_SHIFT) |
+			(AV8L_FAST_TCR_RGN_WBWA << AV8L_FAST_TCR_IRGN0_SHIFT) |
+			(AV8L_FAST_TCR_RGN_WBWA << AV8L_FAST_TCR_ORGN0_SHIFT);
+	else
+		reg = (AV8L_FAST_TCR_SH_OS << AV8L_FAST_TCR_SH0_SHIFT) |
+			(AV8L_FAST_TCR_RGN_NC << AV8L_FAST_TCR_IRGN0_SHIFT) |
+			(AV8L_FAST_TCR_RGN_NC << AV8L_FAST_TCR_ORGN0_SHIFT);
+
+	reg |= AV8L_FAST_TCR_TG0_4K;
+
+	switch (cfg->oas) {
+	case 32:
+		reg |= (AV8L_FAST_TCR_PS_32_BIT << AV8L_FAST_TCR_IPS_SHIFT);
+		break;
+	case 36:
+		reg |= (AV8L_FAST_TCR_PS_36_BIT << AV8L_FAST_TCR_IPS_SHIFT);
+		break;
+	case 40:
+		reg |= (AV8L_FAST_TCR_PS_40_BIT << AV8L_FAST_TCR_IPS_SHIFT);
+		break;
+	case 42:
+		reg |= (AV8L_FAST_TCR_PS_42_BIT << AV8L_FAST_TCR_IPS_SHIFT);
+		break;
+	case 44:
+		reg |= (AV8L_FAST_TCR_PS_44_BIT << AV8L_FAST_TCR_IPS_SHIFT);
+		break;
+	case 48:
+		reg |= (AV8L_FAST_TCR_PS_48_BIT << AV8L_FAST_TCR_IPS_SHIFT);
+		break;
+	default:
+		goto out_free_data;
+	}
+
+	reg |= (64ULL - cfg->ias) << AV8L_FAST_TCR_T0SZ_SHIFT;
+	reg |= AV8L_FAST_TCR_EPD1_FAULT << AV8L_FAST_TCR_EPD1_SHIFT;
+#if defined(CONFIG_ARM)
+	reg |= ARM_32_LPAE_TCR_EAE;
+#endif
+	reg |= AV8L_FAST_TCR_SEP_UPSTREAM << AV8L_FAST_TCR_SEP_SHIFT;
+	cfg->av8l_fast_cfg.tcr = reg;
+
+	/* MAIRs */
+	reg = (AV8L_FAST_MAIR_ATTR_NC
+	       << AV8L_FAST_MAIR_ATTR_SHIFT(AV8L_FAST_MAIR_ATTR_IDX_NC)) |
+	      (AV8L_FAST_MAIR_ATTR_WBRWA
+	       << AV8L_FAST_MAIR_ATTR_SHIFT(AV8L_FAST_MAIR_ATTR_IDX_CACHE)) |
+	      (AV8L_FAST_MAIR_ATTR_DEVICE
+	       << AV8L_FAST_MAIR_ATTR_SHIFT(AV8L_FAST_MAIR_ATTR_IDX_DEV));
+
+	cfg->av8l_fast_cfg.mair[0] = reg;
+	cfg->av8l_fast_cfg.mair[1] = 0;
+
+	/* Allocate all page table memory! */
+	if (av8l_fast_prepopulate_pgtables(data, cfg, cookie))
+		goto out_free_data;
+
+	cfg->av8l_fast_cfg.pmds = data->pmds;
+
+	/* TTBRs */
+	cfg->av8l_fast_cfg.ttbr[0] = virt_to_phys(data->pgd);
+	cfg->av8l_fast_cfg.ttbr[1] = 0;
+	return &data->iop;
+
+out_free_data:
+	kfree(data);
+	return NULL;
+}
+
+static void av8l_fast_free_pgtable(struct io_pgtable *iop)
+{
+	int i;
+	struct av8l_fast_io_pgtable *data = iof_pgtable_to_data(iop);
+
+	vunmap(data->pmds);
+	for (i = 0; i < data->nr_pages; ++i)
+		__free_page(data->pages[i]);
+	kvfree(data->pages);
+	kfree(data);
+}
+
+struct io_pgtable_init_fns io_pgtable_av8l_fast_init_fns = {
+	.alloc	= av8l_fast_alloc_pgtable,
+	.free	= av8l_fast_free_pgtable,
+};
+
+
+#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST_SELFTEST
+
+#include <linux/dma-contiguous.h>
+
+static struct io_pgtable_cfg *cfg_cookie;
+
+static void dummy_tlb_flush_all(void *cookie)
+{
+	WARN_ON(cookie != cfg_cookie);
+}
+
+static void dummy_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
+				void *cookie)
+{
+	WARN_ON(cookie != cfg_cookie);
+	WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
+}
+
+static void dummy_tlb_sync(void *cookie)
+{
+	WARN_ON(cookie != cfg_cookie);
+}
+
+static struct iommu_gather_ops dummy_tlb_ops __initdata = {
+	.tlb_flush_all	= dummy_tlb_flush_all,
+	.tlb_add_flush	= dummy_tlb_add_flush,
+	.tlb_sync	= dummy_tlb_sync,
+};
+
+/*
+ * Returns true if the iova range is successfully mapped to the contiguous
+ * phys range in ops.
+ */
+static bool av8l_fast_range_has_specific_mapping(struct io_pgtable_ops *ops,
+						 const unsigned long iova_start,
+						 const phys_addr_t phys_start,
+						 const size_t size)
+{
+	u64 iova = iova_start;
+	phys_addr_t phys = phys_start;
+
+	while (iova < (iova_start + size)) {
+		/* + 42 just to make sure offsetting is working */
+		if (ops->iova_to_phys(ops, iova + 42) != (phys + 42))
+			return false;
+		iova += SZ_4K;
+		phys += SZ_4K;
+	}
+	return true;
+}
+
+static int __init av8l_fast_positive_testing(void)
+{
+	int failed = 0;
+	u64 iova;
+	struct io_pgtable_ops *ops;
+	struct io_pgtable_cfg cfg;
+	struct av8l_fast_io_pgtable *data;
+	av8l_fast_iopte *pmds;
+	u64 max = SZ_1G * 4ULL - 1;
+	u64 base = 0;
+
+	cfg = (struct io_pgtable_cfg) {
+		.quirks = 0,
+		.tlb = &dummy_tlb_ops,
+		.ias = 32,
+		.oas = 32,
+		.pgsize_bitmap = SZ_4K,
+		.iova_base = base,
+		.iova_end = max,
+	};
+
+	cfg_cookie = &cfg;
+	ops = alloc_io_pgtable_ops(ARM_V8L_FAST, &cfg, &cfg);
+
+	if (WARN_ON(!ops))
+		return 1;
+
+	data = iof_pgtable_ops_to_data(ops);
+	pmds = data->pmds;
+
+	/* map the entire 4GB VA space with 4K map calls */
+	for (iova = base; iova < max; iova += SZ_4K) {
+		if (WARN_ON(ops->map(ops, iova, iova, SZ_4K, IOMMU_READ))) {
+			failed++;
+			continue;
+		}
+	}
+	if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, base,
+					base, max - base)))
+		failed++;
+
+	/* unmap it all */
+	for (iova = base; iova < max; iova += SZ_4K) {
+		if (WARN_ON(ops->unmap(ops, iova, SZ_4K) != SZ_4K))
+			failed++;
+	}
+
+	/* sweep up TLB proving PTEs */
+	av8l_fast_clear_stale_ptes(pmds, base, base, max, false);
+
+	/* map the entire 4GB VA space with 8K map calls */
+	for (iova = base; iova < max; iova += SZ_8K) {
+		if (WARN_ON(ops->map(ops, iova, iova, SZ_8K, IOMMU_READ))) {
+			failed++;
+			continue;
+		}
+	}
+
+	if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, base,
+					base, max - base)))
+		failed++;
+
+	/* unmap it all with 8K unmap calls */
+	for (iova = base; iova < max; iova += SZ_8K) {
+		if (WARN_ON(ops->unmap(ops, iova, SZ_8K) != SZ_8K))
+			failed++;
+	}
+
+	/* sweep up TLB proving PTEs */
+	av8l_fast_clear_stale_ptes(pmds, base, base, max, false);
+
+	/* map the entire 4GB VA space with 16K map calls */
+	for (iova = base; iova < max; iova += SZ_16K) {
+		if (WARN_ON(ops->map(ops, iova, iova, SZ_16K, IOMMU_READ))) {
+			failed++;
+			continue;
+		}
+	}
+
+	if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, base,
+					base, max - base)))
+		failed++;
+
+	/* unmap it all */
+	for (iova = base; iova < max; iova += SZ_16K) {
+		if (WARN_ON(ops->unmap(ops, iova, SZ_16K) != SZ_16K))
+			failed++;
+	}
+
+	/* sweep up TLB proving PTEs */
+	av8l_fast_clear_stale_ptes(pmds, base, base, max, false);
+
+	/* map the entire 4GB VA space with 64K map calls */
+	for (iova = base; iova < max; iova += SZ_64K) {
+		if (WARN_ON(ops->map(ops, iova, iova, SZ_64K, IOMMU_READ))) {
+			failed++;
+			continue;
+		}
+	}
+
+	if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, base,
+					base, max - base)))
+		failed++;
+
+	/* unmap it all at once */
+	if (WARN_ON(ops->unmap(ops, base, max - base) != (max - base)))
+		failed++;
+
+	free_io_pgtable_ops(ops);
+	return failed;
+}
+
+static int __init av8l_fast_do_selftests(void)
+{
+	int failed = 0;
+
+	failed += av8l_fast_positive_testing();
+
+	pr_err("selftest: completed with %d failures\n", failed);
+
+	return 0;
+}
+subsys_initcall(av8l_fast_do_selftests);
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/iommu/msm_dma_iommu_mapping.c	2019-01-22 16:16:24.063251478 +0100
@@ -0,0 +1,423 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/slab.h>
+#include <linux/rbtree.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <asm/barrier.h>
+
+#include <linux/msm_dma_iommu_mapping.h>
+
+/**
+ * struct msm_iommu_map - represents a mapping of an ion buffer to an iommu
+ * @lnode - list node to exist in the buffer's list of iommu mappings
+ * @dev - Device this is mapped to. Used as key
+ * @sgl - The scatterlist for this mapping
+ * @nents - Number of entries in sgl
+ * @dir - The direction for the unmap.
+ * @meta - Backpointer to the meta this guy belongs to.
+ * @ref - for reference counting this mapping
+ *
+ * Represents a mapping of one dma_buf buffer to a particular device
+ * and address range. There may exist other mappings of this buffer in
+ * different devices. All mappings will have the same cacheability and security.
+ */
+struct msm_iommu_map {
+	struct list_head lnode;
+	struct rb_node node;
+	struct device *dev;
+	struct scatterlist sgl;
+	unsigned int nents;
+	enum dma_data_direction dir;
+	struct msm_iommu_meta *meta;
+	struct kref ref;
+};
+
+struct msm_iommu_meta {
+	struct rb_node node;
+	struct list_head iommu_maps;
+	struct kref ref;
+	struct mutex lock;
+	void *buffer;
+};
+
+static struct rb_root iommu_root;
+static DEFINE_MUTEX(msm_iommu_map_mutex);
+
+static void msm_iommu_meta_add(struct msm_iommu_meta *meta)
+{
+	struct rb_root *root = &iommu_root;
+	struct rb_node **p = &root->rb_node;
+	struct rb_node *parent = NULL;
+	struct msm_iommu_meta *entry;
+
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct msm_iommu_meta, node);
+
+		if (meta->buffer < entry->buffer) {
+			p = &(*p)->rb_left;
+		} else if (meta->buffer > entry->buffer) {
+			p = &(*p)->rb_right;
+		} else {
+			pr_err("%s: dma_buf %p already exists\n", __func__,
+				entry->buffer);
+			BUG();
+		}
+	}
+
+	rb_link_node(&meta->node, parent, p);
+	rb_insert_color(&meta->node, root);
+}
+
+static struct msm_iommu_meta *msm_iommu_meta_lookup(void *buffer)
+{
+	struct rb_root *root = &iommu_root;
+	struct rb_node **p = &root->rb_node;
+	struct rb_node *parent = NULL;
+	struct msm_iommu_meta *entry = NULL;
+
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct msm_iommu_meta, node);
+
+		if (buffer < entry->buffer)
+			p = &(*p)->rb_left;
+		else if (buffer > entry->buffer)
+			p = &(*p)->rb_right;
+		else
+			return entry;
+	}
+
+	return NULL;
+}
+
+static void msm_iommu_add(struct msm_iommu_meta *meta,
+			  struct msm_iommu_map *iommu)
+{
+	struct msm_iommu_map *entry;
+
+	list_for_each_entry(entry, &meta->iommu_maps, lnode) {
+		if (entry->dev == iommu->dev) {
+			pr_err("%s: dma_buf %p already has mapping to device %p\n",
+				__func__, meta->buffer, iommu->dev);
+			BUG();
+		}
+	}
+	INIT_LIST_HEAD(&iommu->lnode);
+	list_add(&iommu->lnode, &meta->iommu_maps);
+}
+
+
+static struct msm_iommu_map *msm_iommu_lookup(struct msm_iommu_meta *meta,
+					      struct device *dev)
+{
+	struct msm_iommu_map *entry;
+
+	list_for_each_entry(entry, &meta->iommu_maps, lnode) {
+		if (entry->dev == dev)
+			return entry;
+	}
+
+	return NULL;
+}
+
+static struct msm_iommu_meta *msm_iommu_meta_create(struct dma_buf *dma_buf)
+{
+	struct msm_iommu_meta *meta;
+
+	meta = kzalloc(sizeof(*meta), GFP_KERNEL);
+
+	if (!meta)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&meta->iommu_maps);
+	meta->buffer = dma_buf->priv;
+	kref_init(&meta->ref);
+	mutex_init(&meta->lock);
+	msm_iommu_meta_add(meta);
+
+	return meta;
+}
+
+static void msm_iommu_meta_put(struct msm_iommu_meta *meta);
+
+static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
+				   int nents, enum dma_data_direction dir,
+				   struct dma_buf *dma_buf,
+				   struct dma_attrs *attrs)
+{
+	struct msm_iommu_map *iommu_map;
+	struct msm_iommu_meta *iommu_meta = NULL;
+	int ret = 0;
+	bool extra_meta_ref_taken = false;
+	int late_unmap = !dma_get_attr(DMA_ATTR_NO_DELAYED_UNMAP, attrs);
+
+	mutex_lock(&msm_iommu_map_mutex);
+	iommu_meta = msm_iommu_meta_lookup(dma_buf->priv);
+
+	if (!iommu_meta) {
+		iommu_meta = msm_iommu_meta_create(dma_buf);
+
+		if (IS_ERR(iommu_meta)) {
+			mutex_unlock(&msm_iommu_map_mutex);
+			ret = PTR_ERR(iommu_meta);
+			goto out;
+		}
+		if (late_unmap) {
+			kref_get(&iommu_meta->ref);
+			extra_meta_ref_taken = true;
+		}
+	} else {
+		kref_get(&iommu_meta->ref);
+	}
+
+	mutex_unlock(&msm_iommu_map_mutex);
+
+	mutex_lock(&iommu_meta->lock);
+	iommu_map = msm_iommu_lookup(iommu_meta, dev);
+	if (!iommu_map) {
+		iommu_map = kmalloc(sizeof(*iommu_map), GFP_ATOMIC);
+
+		if (!iommu_map) {
+			ret = -ENOMEM;
+			goto out_unlock;
+		}
+
+		ret = dma_map_sg_attrs(dev, sg, nents, dir, attrs);
+		if (ret != nents) {
+			kfree(iommu_map);
+			goto out_unlock;
+		}
+
+		kref_init(&iommu_map->ref);
+		if (late_unmap)
+			kref_get(&iommu_map->ref);
+		iommu_map->meta = iommu_meta;
+		iommu_map->sgl.dma_address = sg->dma_address;
+		iommu_map->sgl.dma_length = sg->dma_length;
+		iommu_map->dev = dev;
+		msm_iommu_add(iommu_meta, iommu_map);
+
+	} else {
+		sg->dma_address = iommu_map->sgl.dma_address;
+		sg->dma_length = iommu_map->sgl.dma_length;
+
+		kref_get(&iommu_map->ref);
+		if (is_device_dma_coherent(dev))
+			/*
+			 * Ensure all outstanding changes for coherent
+			 * buffers are applied to the cache before any
+			 * DMA occurs.
+			 */
+			dmb(ish);
+		ret = nents;
+	}
+	mutex_unlock(&iommu_meta->lock);
+	return ret;
+
+out_unlock:
+	mutex_unlock(&iommu_meta->lock);
+out:
+	if (!IS_ERR(iommu_meta)) {
+		if (extra_meta_ref_taken)
+			msm_iommu_meta_put(iommu_meta);
+		msm_iommu_meta_put(iommu_meta);
+	}
+	return ret;
+
+}
+
+/*
+ * We are not taking a reference to the dma_buf here. It is expected that
+ * clients hold reference to the dma_buf until they are done with mapping and
+ * unmapping.
+ */
+int msm_dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
+		   enum dma_data_direction dir, struct dma_buf *dma_buf,
+		   struct dma_attrs *attrs)
+{
+	int ret;
+
+	if (IS_ERR_OR_NULL(dev)) {
+		pr_err("%s: dev pointer is invalid\n", __func__);
+		return -EINVAL;
+	}
+
+	if (IS_ERR_OR_NULL(sg)) {
+		pr_err("%s: sg table pointer is invalid\n", __func__);
+		return -EINVAL;
+	}
+
+	if (IS_ERR_OR_NULL(dma_buf)) {
+		pr_err("%s: dma_buf pointer is invalid\n", __func__);
+		return -EINVAL;
+	}
+
+	ret = __msm_dma_map_sg(dev, sg, nents, dir, dma_buf, attrs);
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_dma_map_sg_attrs);
+
+static void msm_iommu_meta_destroy(struct kref *kref)
+{
+	struct msm_iommu_meta *meta = container_of(kref, struct msm_iommu_meta,
+						ref);
+
+	if (!list_empty(&meta->iommu_maps)) {
+		WARN(1, "%s: DMA Buffer %p being destroyed with outstanding iommu mappins!\n", __func__,
+			meta->buffer);
+	}
+	rb_erase(&meta->node, &iommu_root);
+	kfree(meta);
+}
+
+static void msm_iommu_meta_put(struct msm_iommu_meta *meta)
+{
+	/*
+	 * Need to lock here to prevent race against map/unmap
+	 */
+	mutex_lock(&msm_iommu_map_mutex);
+	kref_put(&meta->ref, msm_iommu_meta_destroy);
+	mutex_unlock(&msm_iommu_map_mutex);
+}
+
+static void msm_iommu_map_release(struct kref *kref)
+{
+	struct msm_iommu_map *map = container_of(kref, struct msm_iommu_map,
+						ref);
+
+	list_del(&map->lnode);
+	dma_unmap_sg(map->dev, &map->sgl, map->nents, map->dir);
+	kfree(map);
+}
+
+void msm_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents,
+		      enum dma_data_direction dir, struct dma_buf *dma_buf)
+{
+	struct msm_iommu_map *iommu_map;
+	struct msm_iommu_meta *meta;
+
+	mutex_lock(&msm_iommu_map_mutex);
+	meta = msm_iommu_meta_lookup(dma_buf->priv);
+	if (!meta) {
+		WARN(1, "%s: (%p) was never mapped\n", __func__, dma_buf);
+		mutex_unlock(&msm_iommu_map_mutex);
+		goto out;
+
+	}
+	mutex_unlock(&msm_iommu_map_mutex);
+
+	mutex_lock(&meta->lock);
+	iommu_map = msm_iommu_lookup(meta, dev);
+
+	if (!iommu_map) {
+		WARN(1, "%s: (%p) was never mapped for device  %p\n", __func__,
+				dma_buf, dev);
+		mutex_unlock(&meta->lock);
+		goto out;
+	}
+
+	/*
+	 * Save direction for later use when we actually unmap.
+	 * Not used right now but in the future if we go to coherent mapping
+	 * API we might want to call the appropriate API when client asks
+	 * to unmap
+	 */
+	iommu_map->dir = dir;
+
+	kref_put(&iommu_map->ref, msm_iommu_map_release);
+	mutex_unlock(&meta->lock);
+
+	msm_iommu_meta_put(meta);
+
+out:
+	return;
+}
+EXPORT_SYMBOL(msm_dma_unmap_sg);
+
+int msm_dma_unmap_all_for_dev(struct device *dev)
+{
+	int ret = 0;
+	struct msm_iommu_meta *meta;
+	struct rb_root *root;
+	struct rb_node *meta_node;
+
+	mutex_lock(&msm_iommu_map_mutex);
+	root = &iommu_root;
+	meta_node = rb_first(root);
+	while (meta_node) {
+		struct msm_iommu_map *iommu_map;
+		struct msm_iommu_map *iommu_map_next;
+
+		meta = rb_entry(meta_node, struct msm_iommu_meta, node);
+		mutex_lock(&meta->lock);
+		list_for_each_entry_safe(iommu_map, iommu_map_next,
+						&meta->iommu_maps, lnode)
+			if (iommu_map->dev == dev)
+				if (!kref_put(&iommu_map->ref,
+						msm_iommu_map_release))
+					ret = -EINVAL;
+
+		mutex_unlock(&meta->lock);
+		meta_node = rb_next(meta_node);
+	}
+	mutex_unlock(&msm_iommu_map_mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_dma_unmap_all_for_dev);
+
+/*
+ * Only to be called by ION code when a buffer is freed
+ */
+void msm_dma_buf_freed(void *buffer)
+{
+	struct msm_iommu_map *iommu_map;
+	struct msm_iommu_map *iommu_map_next;
+	struct msm_iommu_meta *meta;
+
+	mutex_lock(&msm_iommu_map_mutex);
+	meta = msm_iommu_meta_lookup(buffer);
+	if (!meta) {
+		/* Already unmapped (assuming no late unmapping) */
+		mutex_unlock(&msm_iommu_map_mutex);
+		goto out;
+
+	}
+	mutex_unlock(&msm_iommu_map_mutex);
+
+	mutex_lock(&meta->lock);
+
+	list_for_each_entry_safe(iommu_map, iommu_map_next, &meta->iommu_maps,
+				 lnode)
+		kref_put(&iommu_map->ref, msm_iommu_map_release);
+
+	if (!list_empty(&meta->iommu_maps)) {
+		WARN(1, "%s: DMA Buffer %p being destroyed with outstanding iommu mappins!\n", __func__,
+			meta->buffer);
+	}
+
+	INIT_LIST_HEAD(&meta->iommu_maps);
+	mutex_unlock(&meta->lock);
+
+	msm_iommu_meta_put(meta);
+out:
+	return;
+
+}
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/irqchip/msm_show_resume_irq.c	2019-01-22 16:16:24.075251586 +0100
@@ -0,0 +1,22 @@
+/* Copyright (c) 2011, 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+int msm_show_resume_irq_mask;
+
+module_param_named(
+	debug_mask, msm_show_resume_irq_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
+);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/leds/leds-qpnp.c	2019-01-22 16:16:24.147252238 +0100
@@ -0,0 +1,4274 @@
+/* Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/leds.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/qpnp/pwm.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+
+#define WLED_MOD_EN_REG(base, n)	(base + 0x60 + n*0x10)
+#define WLED_IDAC_DLY_REG(base, n)	(WLED_MOD_EN_REG(base, n) + 0x01)
+#define WLED_FULL_SCALE_REG(base, n)	(WLED_IDAC_DLY_REG(base, n) + 0x01)
+#define WLED_MOD_SRC_SEL_REG(base, n)	(WLED_FULL_SCALE_REG(base, n) + 0x01)
+
+/* wled control registers */
+#define WLED_OVP_INT_STATUS(base)		(base + 0x10)
+#define WLED_BRIGHTNESS_CNTL_LSB(base, n)	(base + 0x40 + 2*n)
+#define WLED_BRIGHTNESS_CNTL_MSB(base, n)	(base + 0x41 + 2*n)
+#define WLED_MOD_CTRL_REG(base)			(base + 0x46)
+#define WLED_SYNC_REG(base)			(base + 0x47)
+#define WLED_FDBCK_CTRL_REG(base)		(base + 0x48)
+#define WLED_SWITCHING_FREQ_REG(base)		(base + 0x4C)
+#define WLED_OVP_CFG_REG(base)			(base + 0x4D)
+#define WLED_BOOST_LIMIT_REG(base)		(base + 0x4E)
+#define WLED_CURR_SINK_REG(base)		(base + 0x4F)
+#define WLED_HIGH_POLE_CAP_REG(base)		(base + 0x58)
+#define WLED_CURR_SINK_MASK		0xE0
+#define WLED_CURR_SINK_SHFT		0x05
+#define WLED_DISABLE_ALL_SINKS		0x00
+#define WLED_DISABLE_1_2_SINKS		0x80
+#define WLED_SWITCH_FREQ_MASK		0x0F
+#define WLED_OVP_VAL_MASK		0x03
+#define WLED_OVP_INT_MASK		0x02
+#define WLED_OVP_VAL_BIT_SHFT		0x00
+#define WLED_BOOST_LIMIT_MASK		0x07
+#define WLED_BOOST_LIMIT_BIT_SHFT	0x00
+#define WLED_BOOST_ON			0x80
+#define WLED_BOOST_OFF			0x00
+#define WLED_EN_MASK			0x80
+#define WLED_NO_MASK			0x00
+#define WLED_CP_SELECT_MAX		0x03
+#define WLED_CP_SELECT_MASK		0x02
+#define WLED_USE_EXT_GEN_MOD_SRC	0x01
+#define WLED_CTL_DLY_STEP		200
+#define WLED_CTL_DLY_MAX		1400
+#define WLED_MAX_CURR			25
+#define WLED_NO_CURRENT			0x00
+#define WLED_OVP_DELAY			1000
+#define WLED_OVP_DELAY_INT		200
+#define WLED_OVP_DELAY_LOOP		100
+#define WLED_MSB_MASK			0x0F
+#define WLED_MAX_CURR_MASK		0x1F
+#define WLED_OP_FDBCK_MASK		0x07
+#define WLED_OP_FDBCK_BIT_SHFT		0x00
+#define WLED_OP_FDBCK_DEFAULT		0x00
+
+#define WLED_SET_ILIM_CODE		0x01
+
+#define WLED_MAX_LEVEL			4095
+#define WLED_8_BIT_MASK			0xFF
+#define WLED_4_BIT_MASK			0x0F
+#define WLED_8_BIT_SHFT			0x08
+#define WLED_MAX_DUTY_CYCLE		0xFFF
+
+#define WLED_SYNC_VAL			0x07
+#define WLED_SYNC_RESET_VAL		0x00
+
+#define PMIC_VER_8026			0x04
+#define PMIC_VER_8941			0x01
+#define PMIC_VERSION_REG		0x0105
+
+#define WLED_DEFAULT_STRINGS		0x01
+#define WLED_THREE_STRINGS		0x03
+#define WLED_MAX_TRIES			5
+#define WLED_DEFAULT_OVP_VAL		0x02
+#define WLED_BOOST_LIM_DEFAULT		0x03
+#define WLED_CP_SEL_DEFAULT		0x00
+#define WLED_CTRL_DLY_DEFAULT		0x00
+#define WLED_SWITCH_FREQ_DEFAULT	0x0B
+
+#define FLASH_SAFETY_TIMER(base)	(base + 0x40)
+#define FLASH_MAX_CURR(base)		(base + 0x41)
+#define FLASH_LED_0_CURR(base)		(base + 0x42)
+#define FLASH_LED_1_CURR(base)		(base + 0x43)
+#define FLASH_CLAMP_CURR(base)		(base + 0x44)
+#define FLASH_LED_TMR_CTRL(base)	(base + 0x48)
+#define FLASH_HEADROOM(base)		(base + 0x4A)
+#define FLASH_STARTUP_DELAY(base)	(base + 0x4B)
+#define FLASH_MASK_ENABLE(base)		(base + 0x4C)
+#define FLASH_VREG_OK_FORCE(base)	(base + 0x4F)
+#define FLASH_ENABLE_CONTROL(base)	(base + 0x46)
+#define FLASH_LED_STROBE_CTRL(base)	(base + 0x47)
+#define FLASH_WATCHDOG_TMR(base)	(base + 0x49)
+#define FLASH_FAULT_DETECT(base)	(base + 0x51)
+#define FLASH_PERIPHERAL_SUBTYPE(base)	(base + 0x05)
+#define FLASH_CURRENT_RAMP(base)	(base + 0x54)
+
+#define FLASH_MAX_LEVEL			0x4F
+#define TORCH_MAX_LEVEL			0x0F
+#define	FLASH_NO_MASK			0x00
+
+#define FLASH_MASK_1			0x20
+#define FLASH_MASK_REG_MASK		0xE0
+#define FLASH_HEADROOM_MASK		0x03
+#define FLASH_SAFETY_TIMER_MASK		0x7F
+#define FLASH_CURRENT_MASK		0xFF
+#define FLASH_MAX_CURRENT_MASK		0x7F
+#define FLASH_TMR_MASK			0x03
+#define FLASH_TMR_WATCHDOG		0x03
+#define FLASH_TMR_SAFETY		0x00
+#define FLASH_FAULT_DETECT_MASK		0X80
+#define FLASH_HW_VREG_OK		0x40
+#define FLASH_SW_VREG_OK                0x80
+#define FLASH_VREG_MASK			0xC0
+#define FLASH_STARTUP_DLY_MASK		0x02
+#define FLASH_CURRENT_RAMP_MASK		0xBF
+
+#define FLASH_ENABLE_ALL		0xE0
+#define FLASH_ENABLE_MODULE		0x80
+#define FLASH_ENABLE_MODULE_MASK	0x80
+#define FLASH_DISABLE_ALL		0x00
+#define FLASH_ENABLE_MASK		0xE0
+#define FLASH_ENABLE_LED_0		0xC0
+#define FLASH_ENABLE_LED_1		0xA0
+#define FLASH_INIT_MASK			0xE0
+#define	FLASH_SELFCHECK_ENABLE		0x80
+#define FLASH_WATCHDOG_MASK		0x1F
+#define FLASH_RAMP_STEP_27US		0xBF
+
+#define FLASH_HW_SW_STROBE_SEL_MASK	0x04
+#define FLASH_STROBE_MASK		0xC7
+#define FLASH_LED_0_OUTPUT		0x80
+#define FLASH_LED_1_OUTPUT		0x40
+#define FLASH_TORCH_OUTPUT		0xC0
+
+#define FLASH_CURRENT_PRGM_MIN		1
+#define FLASH_CURRENT_PRGM_SHIFT	1
+#define FLASH_CURRENT_MAX		0x4F
+#define FLASH_CURRENT_TORCH		0x07
+
+#define FLASH_DURATION_200ms		0x13
+#define TORCH_DURATION_12s		0x0A
+#define FLASH_CLAMP_200mA		0x0F
+
+#define FLASH_SUBTYPE_DUAL		0x01
+#define FLASH_SUBTYPE_SINGLE		0x02
+
+#define FLASH_RAMP_UP_DELAY_US		1000
+#define FLASH_RAMP_DN_DELAY_US		2160
+
+#define LED_TRIGGER_DEFAULT		"none"
+
+#define RGB_LED_SRC_SEL(base)		(base + 0x45)
+#define RGB_LED_EN_CTL(base)		(base + 0x46)
+#define RGB_LED_ATC_CTL(base)		(base + 0x47)
+
+#define RGB_MAX_LEVEL			LED_FULL
+#define RGB_LED_ENABLE_RED		0x80
+#define RGB_LED_ENABLE_GREEN		0x40
+#define RGB_LED_ENABLE_BLUE		0x20
+#define RGB_LED_SOURCE_VPH_PWR		0x01
+#define RGB_LED_ENABLE_MASK		0xE0
+#define RGB_LED_SRC_MASK		0x03
+#define QPNP_LED_PWM_FLAGS	(PM_PWM_LUT_LOOP | PM_PWM_LUT_RAMP_UP)
+#define QPNP_LUT_RAMP_STEP_DEFAULT	255
+#define	PWM_LUT_MAX_SIZE		63
+#define	PWM_GPLED_LUT_MAX_SIZE		31
+#define RGB_LED_DISABLE			0x00
+
+#define MPP_MAX_LEVEL			LED_FULL
+#define LED_MPP_MODE_CTRL(base)		(base + 0x40)
+#define LED_MPP_VIN_CTRL(base)		(base + 0x41)
+#define LED_MPP_EN_CTRL(base)		(base + 0x46)
+#define LED_MPP_SINK_CTRL(base)		(base + 0x4C)
+
+#define LED_MPP_CURRENT_MIN		5
+#define LED_MPP_CURRENT_MAX		40
+#define LED_MPP_VIN_CTRL_DEFAULT	0
+#define LED_MPP_CURRENT_PER_SETTING	5
+#define LED_MPP_SOURCE_SEL_DEFAULT	LED_MPP_MODE_ENABLE
+
+#define LED_MPP_SINK_MASK		0x07
+#define LED_MPP_MODE_MASK		0x7F
+#define LED_MPP_VIN_MASK		0x03
+#define LED_MPP_EN_MASK			0x80
+#define LED_MPP_SRC_MASK		0x0F
+#define LED_MPP_MODE_CTRL_MASK		0x70
+
+#define LED_MPP_MODE_SINK		(0x06 << 4)
+#define LED_MPP_MODE_ENABLE		0x01
+#define LED_MPP_MODE_OUTPUT		0x10
+#define LED_MPP_MODE_DISABLE		0x00
+#define LED_MPP_EN_ENABLE		0x80
+#define LED_MPP_EN_DISABLE		0x00
+
+#define MPP_SOURCE_DTEST1		0x08
+
+#define GPIO_MAX_LEVEL			LED_FULL
+#define LED_GPIO_MODE_CTRL(base)	(base + 0x40)
+#define LED_GPIO_VIN_CTRL(base)		(base + 0x41)
+#define LED_GPIO_EN_CTRL(base)		(base + 0x46)
+
+#define LED_GPIO_VIN_CTRL_DEFAULT	0
+#define LED_GPIO_SOURCE_SEL_DEFAULT	LED_GPIO_MODE_ENABLE
+
+#define LED_GPIO_MODE_MASK		0x3F
+#define LED_GPIO_VIN_MASK		0x0F
+#define LED_GPIO_EN_MASK		0x80
+#define LED_GPIO_SRC_MASK		0x0F
+#define LED_GPIO_MODE_CTRL_MASK		0x30
+
+#define LED_GPIO_MODE_ENABLE	0x01
+#define LED_GPIO_MODE_DISABLE	0x00
+#define LED_GPIO_MODE_OUTPUT		0x10
+#define LED_GPIO_EN_ENABLE		0x80
+#define LED_GPIO_EN_DISABLE		0x00
+
+#define KPDBL_MAX_LEVEL			LED_FULL
+#define KPDBL_ROW_SRC_SEL(base)		(base + 0x40)
+#define KPDBL_ENABLE(base)		(base + 0x46)
+#define KPDBL_ROW_SRC(base)		(base + 0xE5)
+
+#define KPDBL_ROW_SRC_SEL_VAL_MASK	0x0F
+#define KPDBL_ROW_SCAN_EN_MASK		0x80
+#define KPDBL_ROW_SCAN_VAL_MASK		0x0F
+#define KPDBL_ROW_SCAN_EN_SHIFT		7
+#define KPDBL_MODULE_EN			0x80
+#define KPDBL_MODULE_DIS		0x00
+#define KPDBL_MODULE_EN_MASK		0x80
+#define NUM_KPDBL_LEDS			4
+#define KPDBL_MASTER_BIT_INDEX		0
+
+/**
+ * enum qpnp_leds - QPNP supported led ids
+ * @QPNP_ID_WLED - White led backlight
+ */
+enum qpnp_leds {
+	QPNP_ID_WLED = 0,
+	QPNP_ID_FLASH1_LED0,
+	QPNP_ID_FLASH1_LED1,
+	QPNP_ID_RGB_RED,
+	QPNP_ID_RGB_GREEN,
+	QPNP_ID_RGB_BLUE,
+	QPNP_ID_LED_MPP,
+	QPNP_ID_KPDBL,
+	QPNP_ID_LED_GPIO,
+	QPNP_ID_MAX,
+};
+
+/* current boost limit */
+enum wled_current_boost_limit {
+	WLED_CURR_LIMIT_105mA,
+	WLED_CURR_LIMIT_385mA,
+	WLED_CURR_LIMIT_525mA,
+	WLED_CURR_LIMIT_805mA,
+	WLED_CURR_LIMIT_980mA,
+	WLED_CURR_LIMIT_1260mA,
+	WLED_CURR_LIMIT_1400mA,
+	WLED_CURR_LIMIT_1680mA,
+};
+
+/* over voltage protection threshold */
+enum wled_ovp_threshold {
+	WLED_OVP_35V,
+	WLED_OVP_32V,
+	WLED_OVP_29V,
+	WLED_OVP_27V,
+};
+
+enum flash_headroom {
+	HEADROOM_250mV = 0,
+	HEADROOM_300mV,
+	HEADROOM_400mV,
+	HEADROOM_500mV,
+};
+
+enum flash_startup_dly {
+	DELAY_10us = 0,
+	DELAY_32us,
+	DELAY_64us,
+	DELAY_128us,
+};
+
+enum led_mode {
+	PWM_MODE = 0,
+	LPG_MODE,
+	MANUAL_MODE,
+};
+
+static u8 wled_debug_regs[] = {
+	/* brightness registers */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45,
+	/* common registers */
+	0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
+	/* LED1 */
+	0x60, 0x61, 0x62, 0x63, 0x66,
+	/* LED2 */
+	0x70, 0x71, 0x72, 0x73, 0x76,
+	/* LED3 */
+	0x80, 0x81, 0x82, 0x83, 0x86,
+};
+
+static u8 flash_debug_regs[] = {
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x48, 0x49, 0x4b, 0x4c,
+	0x4f, 0x46, 0x47,
+};
+
+static u8 rgb_pwm_debug_regs[] = {
+	0x45, 0x46, 0x47,
+};
+
+static u8 mpp_debug_regs[] = {
+	0x40, 0x41, 0x42, 0x45, 0x46, 0x4c,
+};
+
+static u8 kpdbl_debug_regs[] = {
+	0x40, 0x46, 0xb1, 0xb3, 0xb4, 0xe5,
+};
+
+static u8 gpio_debug_regs[] = {
+	0x40, 0x41, 0x42, 0x45, 0x46,
+};
+
+/**
+ *  pwm_config_data - pwm configuration data
+ *  @lut_params - lut parameters to be used by pwm driver
+ *  @pwm_device - pwm device
+ *  @pwm_period_us - period for pwm, in us
+ *  @mode - mode the led operates in
+ *  @old_duty_pcts - storage for duty pcts that may need to be reused
+ *  @default_mode - default mode of LED as set in device tree
+ *  @use_blink - use blink sysfs entry
+ *  @blinking - device is currently blinking w/LPG mode
+ */
+struct pwm_config_data {
+	struct lut_params	lut_params;
+	struct pwm_device	*pwm_dev;
+	u32			pwm_period_us;
+	struct pwm_duty_cycles	*duty_cycles;
+	int	*old_duty_pcts;
+	u8	mode;
+	u8	default_mode;
+	bool	pwm_enabled;
+	bool use_blink;
+	bool blinking;
+};
+
+/**
+ *  wled_config_data - wled configuration data
+ *  @num_strings - number of wled strings to be configured
+ *  @num_physical_strings - physical number of strings supported
+ *  @ovp_val - over voltage protection threshold
+ *  @boost_curr_lim - boot current limit
+ *  @cp_select - high pole capacitance
+ *  @ctrl_delay_us - delay in activation of led
+ *  @dig_mod_gen_en - digital module generator
+ *  @cs_out_en - current sink output enable
+ *  @op_fdbck - selection of output as feedback for the boost
+ */
+struct wled_config_data {
+	u8	num_strings;
+	u8	num_physical_strings;
+	u8	ovp_val;
+	u8	boost_curr_lim;
+	u8	cp_select;
+	u8	ctrl_delay_us;
+	u8	switch_freq;
+	u8	op_fdbck;
+	u8	pmic_version;
+	bool	dig_mod_gen_en;
+	bool	cs_out_en;
+};
+
+/**
+ *  mpp_config_data - mpp configuration data
+ *  @pwm_cfg - device pwm configuration
+ *  @current_setting - current setting, 5ma-40ma in 5ma increments
+ *  @source_sel - source selection
+ *  @mode_ctrl - mode control
+ *  @vin_ctrl - input control
+ *  @min_brightness - minimum brightness supported
+ *  @pwm_mode - pwm mode in use
+ *  @max_uV - maximum regulator voltage
+ *  @min_uV - minimum regulator voltage
+ *  @mpp_reg - regulator to power mpp based LED
+ *  @enable - flag indicating LED on or off
+ */
+struct mpp_config_data {
+	struct pwm_config_data	*pwm_cfg;
+	u8	current_setting;
+	u8	source_sel;
+	u8	mode_ctrl;
+	u8	vin_ctrl;
+	u8	min_brightness;
+	u8 pwm_mode;
+	u32	max_uV;
+	u32	min_uV;
+	struct regulator *mpp_reg;
+	bool	enable;
+};
+
+/**
+ *  flash_config_data - flash configuration data
+ *  @current_prgm - current to be programmed, scaled by max level
+ *  @clamp_curr - clamp current to use
+ *  @headroom - headroom value to use
+ *  @duration - duration of the flash
+ *  @enable_module - enable address for particular flash
+ *  @trigger_flash - trigger flash
+ *  @startup_dly - startup delay for flash
+ *  @strobe_type - select between sw and hw strobe
+ *  @peripheral_subtype - module peripheral subtype
+ *  @current_addr - address to write for current
+ *  @second_addr - address of secondary flash to be written
+ *  @safety_timer - enable safety timer or watchdog timer
+ *  @torch_enable - enable flash LED torch mode
+ *  @flash_reg_get - flash regulator attached or not
+ *  @flash_wa_reg_get - workaround regulator attached or not
+ *  @flash_on - flash status, on or off
+ *  @torch_on - torch status, on or off
+ *  @vreg_ok - specifies strobe type, sw or hw
+ *  @no_smbb_support - specifies if smbb boost is not required and there is a
+    single regulator for both flash and torch
+ *  @flash_boost_reg - boost regulator for flash
+ *  @torch_boost_reg - boost regulator for torch
+ *  @flash_wa_reg - flash regulator for wa
+ */
+struct flash_config_data {
+	u8	current_prgm;
+	u8	clamp_curr;
+	u8	headroom;
+	u8	duration;
+	u8	enable_module;
+	u8	trigger_flash;
+	u8	startup_dly;
+	u8	strobe_type;
+	u8	peripheral_subtype;
+	u16	current_addr;
+	u16	second_addr;
+	bool	safety_timer;
+	bool	torch_enable;
+	bool	flash_reg_get;
+	bool    flash_wa_reg_get;
+	bool	flash_on;
+	bool	torch_on;
+	bool	vreg_ok;
+	bool    no_smbb_support;
+	struct regulator *flash_boost_reg;
+	struct regulator *torch_boost_reg;
+	struct regulator *flash_wa_reg;
+};
+
+/**
+ *  kpdbl_config_data - kpdbl configuration data
+ *  @pwm_cfg - device pwm configuration
+ *  @mode - running mode: pwm or lut
+ *  @row_id - row id of the led
+ *  @row_src_vbst - 0 for vph_pwr and 1 for vbst
+ *  @row_src_en - enable row source
+ *  @always_on - always on row
+ *  @lut_params - lut parameters to be used by pwm driver
+ *  @duty_cycles - duty cycles for lut
+ *  @pwm_mode - pwm mode in use
+ */
+struct kpdbl_config_data {
+	struct pwm_config_data	*pwm_cfg;
+	u32	row_id;
+	bool	row_src_vbst;
+	bool	row_src_en;
+	bool	always_on;
+	struct pwm_duty_cycles  *duty_cycles;
+	struct lut_params	lut_params;
+	u8	pwm_mode;
+};
+
+/**
+ *  rgb_config_data - rgb configuration data
+ *  @pwm_cfg - device pwm configuration
+ *  @enable - bits to enable led
+ */
+struct rgb_config_data {
+	struct pwm_config_data	*pwm_cfg;
+	u8	enable;
+};
+
+/**
+ *  gpio_config_data - gpio configuration data
+ *  @source_sel - source selection
+ *  @mode_ctrl - mode control
+ *  @vin_ctrl - input control
+ *  @enable - flag indicating LED on or off
+ */
+struct gpio_config_data {
+	u8	source_sel;
+	u8	mode_ctrl;
+	u8	vin_ctrl;
+	bool	enable;
+};
+
+/**
+ * struct qpnp_led_data - internal led data structure
+ * @led_classdev - led class device
+ * @delayed_work - delayed work for turning off the LED
+ * @workqueue - dedicated workqueue to handle concurrency
+ * @work - workqueue for led
+ * @id - led index
+ * @base_reg - base register given in device tree
+ * @lock - to protect the transactions
+ * @reg - cached value of led register
+ * @num_leds - number of leds in the module
+ * @max_current - maximum current supported by LED
+ * @default_on - true: default state max, false, default state 0
+ * @turn_off_delay_ms - number of msec before turning off the LED
+ */
+struct qpnp_led_data {
+	struct led_classdev		cdev;
+	struct platform_device		*pdev;
+	struct regmap			*regmap;
+	struct delayed_work		dwork;
+	struct workqueue_struct		*workqueue;
+	struct work_struct		work;
+	int				id;
+	u16				base;
+	u8				reg;
+	u8				num_leds;
+	struct mutex			lock;
+	struct wled_config_data		*wled_cfg;
+	struct flash_config_data	*flash_cfg;
+	struct kpdbl_config_data	*kpdbl_cfg;
+	struct rgb_config_data		*rgb_cfg;
+	struct mpp_config_data		*mpp_cfg;
+	struct gpio_config_data		*gpio_cfg;
+	int				max_current;
+	bool				default_on;
+	bool				in_order_command_processing;
+	int				turn_off_delay_ms;
+};
+
+static DEFINE_MUTEX(flash_lock);
+static struct pwm_device *kpdbl_master;
+static u32 kpdbl_master_period_us;
+DECLARE_BITMAP(kpdbl_leds_in_use, NUM_KPDBL_LEDS);
+static bool is_kpdbl_master_turn_on;
+
+static int
+qpnp_led_masked_write(struct qpnp_led_data *led, u16 addr, u8 mask, u8 val)
+{
+	int rc;
+
+	rc = regmap_update_bits(led->regmap, addr, mask, val);
+	if (rc)
+		dev_err(&led->pdev->dev,
+			"Unable to regmap_update_bits to addr=%x, rc(%d)\n",
+			addr, rc);
+	return rc;
+}
+
+static void qpnp_dump_regs(struct qpnp_led_data *led, u8 regs[], u8 array_size)
+{
+	int i;
+	u8 val;
+
+	pr_debug("===== %s LED register dump start =====\n", led->cdev.name);
+	for (i = 0; i < array_size; i++) {
+		regmap_bulk_read(led->regmap, led->base + regs[i], &val,
+				 sizeof(val));
+		pr_debug("%s: 0x%x = 0x%x\n", led->cdev.name,
+					led->base + regs[i], val);
+	}
+	pr_debug("===== %s LED register dump end =====\n", led->cdev.name);
+}
+
+static int qpnp_wled_sync(struct qpnp_led_data *led)
+{
+	int rc;
+	u8 val;
+
+	/* sync */
+	val = WLED_SYNC_VAL;
+	rc = regmap_write(led->regmap, WLED_SYNC_REG(led->base), val);
+	if (rc) {
+		dev_err(&led->pdev->dev,
+				"WLED set sync reg failed(%d)\n", rc);
+		return rc;
+	}
+
+	val = WLED_SYNC_RESET_VAL;
+	rc = regmap_write(led->regmap, WLED_SYNC_REG(led->base), val);
+	if (rc) {
+		dev_err(&led->pdev->dev,
+				"WLED reset sync reg failed(%d)\n", rc);
+		return rc;
+	}
+	return 0;
+}
+
+static int qpnp_wled_set(struct qpnp_led_data *led)
+{
+	int rc, duty, level, tries = 0;
+	u8 val, i, num_wled_strings;
+	uint sink_val, ilim_val, ovp_val;
+
+	num_wled_strings = led->wled_cfg->num_strings;
+
+	level = led->cdev.brightness;
+
+	if (level > WLED_MAX_LEVEL)
+		level = WLED_MAX_LEVEL;
+	if (level == 0) {
+		for (i = 0; i < num_wled_strings; i++) {
+			rc = qpnp_led_masked_write(led,
+				WLED_FULL_SCALE_REG(led->base, i),
+				WLED_MAX_CURR_MASK, WLED_NO_CURRENT);
+			if (rc) {
+				dev_err(&led->pdev->dev,
+					"Write max current failure (%d)\n",
+					rc);
+				return rc;
+			}
+		}
+
+		rc = qpnp_wled_sync(led);
+		if (rc) {
+			dev_err(&led->pdev->dev,
+				"WLED sync failed(%d)\n", rc);
+			return rc;
+		}
+
+		rc = regmap_read(led->regmap, WLED_CURR_SINK_REG(led->base),
+				 &sink_val);
+		if (rc) {
+			dev_err(&led->pdev->dev,
+				"WLED read sink reg failed(%d)\n", rc);
+			return rc;
+		}
+
+		if (led->wled_cfg->pmic_version == PMIC_VER_8026) {
+			val = WLED_DISABLE_ALL_SINKS;
+			rc = regmap_write(led->regmap,
+					  WLED_CURR_SINK_REG(led->base), val);
+			if (rc) {
+				dev_err(&led->pdev->dev,
+					"WLED write sink reg failed(%d)\n", rc);
+				return rc;
+			}
+
+			usleep_range(WLED_OVP_DELAY, WLED_OVP_DELAY + 10);
+		} else if (led->wled_cfg->pmic_version == PMIC_VER_8941) {
+			if (led->wled_cfg->num_physical_strings <=
+					WLED_THREE_STRINGS) {
+				val = WLED_DISABLE_1_2_SINKS;
+				rc = regmap_write(led->regmap,
+						  WLED_CURR_SINK_REG(led->base),
+						  val);
+				if (rc) {
+					dev_err(&led->pdev->dev,
+						"WLED write sink reg failed");
+					return rc;
+				}
+
+				rc = regmap_read(led->regmap,
+					 WLED_BOOST_LIMIT_REG(led->base),
+					 &ilim_val);
+				if (rc) {
+					dev_err(&led->pdev->dev,
+						"Unable to read boost reg");
+				}
+				val = WLED_SET_ILIM_CODE;
+				rc = regmap_write(led->regmap,
+					  WLED_BOOST_LIMIT_REG(led->base),
+					  val);
+				if (rc) {
+					dev_err(&led->pdev->dev,
+						"WLED write sink reg failed");
+					return rc;
+				}
+				usleep_range(WLED_OVP_DELAY,
+					     WLED_OVP_DELAY + 10);
+			} else {
+				val = WLED_DISABLE_ALL_SINKS;
+				rc = regmap_write(led->regmap,
+						  WLED_CURR_SINK_REG(led->base),
+						  val);
+				if (rc) {
+					dev_err(&led->pdev->dev,
+						"WLED write sink reg failed");
+					return rc;
+				}
+
+				msleep(WLED_OVP_DELAY_INT);
+				while (tries < WLED_MAX_TRIES) {
+					rc = regmap_read(led->regmap,
+						 WLED_OVP_INT_STATUS(led->base),
+						 &ovp_val);
+					if (rc) {
+						dev_err(&led->pdev->dev,
+						"Unable to read boost reg");
+					}
+
+					if (ovp_val & WLED_OVP_INT_MASK)
+						break;
+
+					msleep(WLED_OVP_DELAY_LOOP);
+					tries++;
+				}
+				usleep_range(WLED_OVP_DELAY,
+					     WLED_OVP_DELAY + 10);
+			}
+		}
+
+		val = WLED_BOOST_OFF;
+		rc = regmap_write(led->regmap, WLED_MOD_CTRL_REG(led->base),
+				  val);
+		if (rc) {
+			dev_err(&led->pdev->dev,
+				"WLED write ctrl reg failed(%d)\n", rc);
+			return rc;
+		}
+
+		for (i = 0; i < num_wled_strings; i++) {
+			rc = qpnp_led_masked_write(led,
+				WLED_FULL_SCALE_REG(led->base, i),
+				WLED_MAX_CURR_MASK, (u8)led->max_current);
+			if (rc) {
+				dev_err(&led->pdev->dev,
+					"Write max current failure (%d)\n",
+					rc);
+				return rc;
+			}
+		}
+
+		rc = qpnp_wled_sync(led);
+		if (rc) {
+			dev_err(&led->pdev->dev,
+				"WLED sync failed(%d)\n", rc);
+			return rc;
+		}
+
+		if (led->wled_cfg->pmic_version == PMIC_VER_8941) {
+			if (led->wled_cfg->num_physical_strings <=
+					WLED_THREE_STRINGS) {
+				rc = regmap_write(led->regmap,
+					  WLED_BOOST_LIMIT_REG(led->base),
+					  ilim_val);
+				if (rc) {
+					dev_err(&led->pdev->dev,
+						"WLED write sink reg failed");
+					return rc;
+				}
+			} else {
+				/* restore OVP to original value */
+				rc = regmap_write(led->regmap,
+						  WLED_OVP_CFG_REG(led->base),
+						  *&led->wled_cfg->ovp_val);
+				if (rc) {
+					dev_err(&led->pdev->dev,
+						"WLED write sink reg failed");
+					return rc;
+				}
+			}
+		}
+
+		/* re-enable all sinks */
+		rc = regmap_write(led->regmap, WLED_CURR_SINK_REG(led->base),
+				  sink_val);
+		if (rc) {
+			dev_err(&led->pdev->dev,
+				"WLED write sink reg failed(%d)\n", rc);
+			return rc;
+		}
+
+	} else {
+		val = WLED_BOOST_ON;
+		rc = regmap_write(led->regmap, WLED_MOD_CTRL_REG(led->base),
+				  val);
+		if (rc) {
+			dev_err(&led->pdev->dev,
+				"WLED write ctrl reg failed(%d)\n", rc);
+			return rc;
+		}
+	}
+
+	duty = (WLED_MAX_DUTY_CYCLE * level) / WLED_MAX_LEVEL;
+
+	/* program brightness control registers */
+	for (i = 0; i < num_wled_strings; i++) {
+		rc = qpnp_led_masked_write(led,
+			WLED_BRIGHTNESS_CNTL_MSB(led->base, i), WLED_MSB_MASK,
+			(duty >> WLED_8_BIT_SHFT) & WLED_4_BIT_MASK);
+		if (rc) {
+			dev_err(&led->pdev->dev,
+				"WLED set brightness MSB failed(%d)\n", rc);
+			return rc;
+		}
+		val = duty & WLED_8_BIT_MASK;
+		rc = regmap_write(led->regmap,
+				  WLED_BRIGHTNESS_CNTL_LSB(led->base, i), val);
+		if (rc) {
+			dev_err(&led->pdev->dev,
+				"WLED set brightness LSB failed(%d)\n", rc);
+			return rc;
+		}
+	}
+
+	rc = qpnp_wled_sync(led);
+	if (rc) {
+		dev_err(&led->pdev->dev, "WLED sync failed(%d)\n", rc);
+		return rc;
+	}
+	return 0;
+}
+
+static int qpnp_mpp_set(struct qpnp_led_data *led)
+{
+	int rc;
+	u8 val;
+	int duty_us, duty_ns, period_us;
+
+	if (led->cdev.brightness) {
+		if (led->mpp_cfg->mpp_reg && !led->mpp_cfg->enable) {
+			rc = regulator_set_voltage(led->mpp_cfg->mpp_reg,
+					led->mpp_cfg->min_uV,
+					led->mpp_cfg->max_uV);
+			if (rc) {
+				dev_err(&led->pdev->dev,
+					"Regulator voltage set failed rc=%d\n",
+									rc);
+				return rc;
+			}
+
+			rc = regulator_enable(led->mpp_cfg->mpp_reg);
+			if (rc) {
+				dev_err(&led->pdev->dev,
+					"Regulator enable failed(%d)\n", rc);
+				goto err_reg_enable;
+			}
+		}
+
+		led->mpp_cfg->enable = true;
+
+		if (led->cdev.brightness < led->mpp_cfg->min_brightness) {
+			dev_warn(&led->pdev->dev, "brightness is less than supported, set to minimum supported\n");
+			led->cdev.brightness = led->mpp_cfg->min_brightness;
+		}
+
+		if (led->mpp_cfg->pwm_mode != MANUAL_MODE) {
+			if (!led->mpp_cfg->pwm_cfg->blinking) {
+				led->mpp_cfg->pwm_cfg->mode =
+					led->mpp_cfg->pwm_cfg->default_mode;
+				led->mpp_cfg->pwm_mode =
+					led->mpp_cfg->pwm_cfg->default_mode;
+			}
+		}
+		if (led->mpp_cfg->pwm_mode == PWM_MODE) {
+			/*config pwm for brightness scaling*/
+			rc = pwm_change_mode(led->mpp_cfg->pwm_cfg->pwm_dev,
+					PM_PWM_MODE_PWM);
+			if (rc < 0) {
+				dev_err(&led->pdev->dev,
+					"Failed to set PWM mode, rc = %d\n",
+					rc);
+				return rc;
+			}
+			period_us = led->mpp_cfg->pwm_cfg->pwm_period_us;
+			if (period_us > INT_MAX / NSEC_PER_USEC) {
+				duty_us = (period_us * led->cdev.brightness) /
+					LED_FULL;
+				rc = pwm_config_us(
+					led->mpp_cfg->pwm_cfg->pwm_dev,
+					duty_us,
+					period_us);
+			} else {
+				duty_ns = ((period_us * NSEC_PER_USEC) /
+					LED_FULL) * led->cdev.brightness;
+				rc = pwm_config(
+					led->mpp_cfg->pwm_cfg->pwm_dev,
+					duty_ns,
+					period_us * NSEC_PER_USEC);
+			}
+			if (rc < 0) {
+				dev_err(&led->pdev->dev, "Failed to configure pwm for new values\n");
+				goto err_mpp_reg_write;
+			}
+		}
+
+		if (led->mpp_cfg->pwm_mode != MANUAL_MODE) {
+			pwm_enable(led->mpp_cfg->pwm_cfg->pwm_dev);
+			led->mpp_cfg->pwm_cfg->pwm_enabled = 1;
+		} else {
+			if (led->cdev.brightness < LED_MPP_CURRENT_MIN)
+				led->cdev.brightness = LED_MPP_CURRENT_MIN;
+			else {
+				/*
+				 * PMIC supports LED intensity from 5mA - 40mA
+				 * in steps of 5mA. Brightness is rounded to
+				 * 5mA or nearest lower supported values
+				 */
+				led->cdev.brightness /= LED_MPP_CURRENT_MIN;
+				led->cdev.brightness *= LED_MPP_CURRENT_MIN;
+			}
+
+			val = (led->cdev.brightness / LED_MPP_CURRENT_MIN) - 1;
+
+			rc = qpnp_led_masked_write(led,
+					LED_MPP_SINK_CTRL(led->base),
+					LED_MPP_SINK_MASK, val);
+			if (rc) {
+				dev_err(&led->pdev->dev,
+					"Failed to write sink control reg\n");
+				goto err_mpp_reg_write;
+			}
+		}
+
+		val = (led->mpp_cfg->source_sel & LED_MPP_SRC_MASK) |
+			(led->mpp_cfg->mode_ctrl & LED_MPP_MODE_CTRL_MASK);
+
+		rc = qpnp_led_masked_write(led,
+			LED_MPP_MODE_CTRL(led->base), LED_MPP_MODE_MASK,
+			val);
+		if (rc) {
+			dev_err(&led->pdev->dev,
+					"Failed to write led mode reg\n");
+			goto err_mpp_reg_write;
+		}
+
+		rc = qpnp_led_masked_write(led,
+				LED_MPP_EN_CTRL(led->base), LED_MPP_EN_MASK,
+				LED_MPP_EN_ENABLE);
+		if (rc) {
+			dev_err(&led->pdev->dev, "Failed to write led enable reg\n");
+			goto err_mpp_reg_write;
+		}
+	} else {
+		if (led->mpp_cfg->pwm_mode != MANUAL_MODE) {
+			led->mpp_cfg->pwm_cfg->mode =
+				led->mpp_cfg->pwm_cfg->default_mode;
+			led->mpp_cfg->pwm_mode =
+				led->mpp_cfg->pwm_cfg->default_mode;
+			pwm_disable(led->mpp_cfg->pwm_cfg->pwm_dev);
+			led->mpp_cfg->pwm_cfg->pwm_enabled = 0;
+		}
+		rc = qpnp_led_masked_write(led,
+					LED_MPP_MODE_CTRL(led->base),
+					LED_MPP_MODE_MASK,
+					LED_MPP_MODE_DISABLE);
+		if (rc) {
+			dev_err(&led->pdev->dev,
+					"Failed to write led mode reg\n");
+			goto err_mpp_reg_write;
+		}
+
+		rc = qpnp_led_masked_write(led,
+					LED_MPP_EN_CTRL(led->base),
+					LED_MPP_EN_MASK,
+					LED_MPP_EN_DISABLE);
+		if (rc) {
+			dev_err(&led->pdev->dev,
+					"Failed to write led enable reg\n");
+			goto err_mpp_reg_write;
+		}
+
+		if (led->mpp_cfg->mpp_reg && led->mpp_cfg->enable) {
+			rc = regulator_disable(led->mpp_cfg->mpp_reg);
+			if (rc) {
+				dev_err(&led->pdev->dev,
+					"MPP regulator disable failed(%d)\n",
+					rc);
+				return rc;
+			}
+
+			rc = regulator_set_voltage(led->mpp_cfg->mpp_reg,
+						0, led->mpp_cfg->max_uV);
+			if (rc) {
+				dev_err(&led->pdev->dev,
+					"MPP regulator voltage set failed(%d)\n",
+					rc);
+				return rc;
+			}
+		}
+
+		led->mpp_cfg->enable = false;
+	}
+
+	if (led->mpp_cfg->pwm_mode != MANUAL_MODE)
+		led->mpp_cfg->pwm_cfg->blinking = false;
+	qpnp_dump_regs(led, mpp_debug_regs, ARRAY_SIZE(mpp_debug_regs));
+
+	return 0;
+
+err_mpp_reg_write:
+	if (led->mpp_cfg->mpp_reg)
+		regulator_disable(led->mpp_cfg->mpp_reg);
+err_reg_enable:
+	if (led->mpp_cfg->mpp_reg)
+		regulator_set_voltage(led->mpp_cfg->mpp_reg, 0,
+							led->mpp_cfg->max_uV);
+	led->mpp_cfg->enable = false;
+
+	return rc;
+}
+
+static int qpnp_gpio_set(struct qpnp_led_data *led)
+{
+	int rc, val;
+
+	if (led->cdev.brightness) {
+		val = (led->gpio_cfg->source_sel & LED_GPIO_SRC_MASK) |
+			(led->gpio_cfg->mode_ctrl & LED_GPIO_MODE_CTRL_MASK);
+
+		rc = qpnp_led_masked_write(led,
+			 LED_GPIO_MODE_CTRL(led->base),
+			 LED_GPIO_MODE_MASK,
+			 val);
+		if (rc) {
+			dev_err(&led->pdev->dev,
+					"Failed to write led mode reg\n");
+			goto err_gpio_reg_write;
+		}
+
+		rc = qpnp_led_masked_write(led,
+			 LED_GPIO_EN_CTRL(led->base),
+			 LED_GPIO_EN_MASK,
+			 LED_GPIO_EN_ENABLE);
+		if (rc) {
+			dev_err(&led->pdev->dev,
+					"Failed to write led enable reg\n");
+			goto err_gpio_reg_write;
+		}
+
+		led->gpio_cfg->enable = true;
+	} else {
+		rc = qpnp_led_masked_write(led,
+				LED_GPIO_MODE_CTRL(led->base),
+				LED_GPIO_MODE_MASK,
+				LED_GPIO_MODE_DISABLE);
+		if (rc) {
+			dev_err(&led->pdev->dev,
+					"Failed to write led mode reg\n");
+			goto err_gpio_reg_write;
+		}
+
+		rc = qpnp_led_masked_write(led,
+				LED_GPIO_EN_CTRL(led->base),
+				LED_GPIO_EN_MASK,
+				LED_GPIO_EN_DISABLE);
+		if (rc) {
+			dev_err(&led->pdev->dev,
+					"Failed to write led enable reg\n");
+			goto err_gpio_reg_write;
+		}
+
+		led->gpio_cfg->enable = false;
+	}
+
+	qpnp_dump_regs(led, gpio_debug_regs, ARRAY_SIZE(gpio_debug_regs));
+
+	return 0;
+
+err_gpio_reg_write:
+	led->gpio_cfg->enable = false;
+
+	return rc;
+}
+
+static int qpnp_flash_regulator_operate(struct qpnp_led_data *led, bool on)
+{
+	int rc, i;
+	struct qpnp_led_data *led_array;
+	bool regulator_on = false;
+
+	led_array = dev_get_drvdata(&led->pdev->dev);
+	if (!led_array) {
+		dev_err(&led->pdev->dev, "Unable to get LED array\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < led->num_leds; i++)
+		regulator_on |= led_array[i].flash_cfg->flash_on;
+
+	if (!on)
+		goto regulator_turn_off;
+
+	if (!regulator_on && !led->flash_cfg->flash_on) {
+		for (i = 0; i < led->num_leds; i++) {
+			if (led_array[i].flash_cfg->flash_reg_get) {
+				if (led_array[i].flash_cfg->flash_wa_reg_get) {
+					rc = regulator_enable(
+						led_array[i].flash_cfg->
+							flash_wa_reg);
+					if (rc) {
+						dev_err(&led->pdev->dev, "Flash wa regulator enable failed(%d)\n",
+							rc);
+						return rc;
+					}
+				}
+
+				rc = regulator_enable(
+				       led_array[i].flash_cfg->flash_boost_reg);
+				if (rc) {
+					if (led_array[i].flash_cfg->
+							flash_wa_reg_get)
+						/*
+						 * Disable flash wa regulator
+						 * when flash boost regulator
+						 * enable fails
+						 */
+						regulator_disable(
+							led_array[i].flash_cfg->
+								flash_wa_reg);
+					dev_err(&led->pdev->dev, "Flash boost regulator enable failed(%d)\n",
+						rc);
+					return rc;
+				}
+				led->flash_cfg->flash_on = true;
+			}
+			break;
+		}
+	}
+
+	return 0;
+
+regulator_turn_off:
+	if (regulator_on && led->flash_cfg->flash_on) {
+		for (i = 0; i < led->num_leds; i++) {
+			if (led_array[i].flash_cfg->flash_reg_get) {
+				rc = qpnp_led_masked_write(led,
+					FLASH_ENABLE_CONTROL(led->base),
+					FLASH_ENABLE_MASK,
+					FLASH_DISABLE_ALL);
+				if (rc) {
+					dev_err(&led->pdev->dev,
+						"Enable reg write failed(%d)\n",
+						rc);
+				}
+
+				rc = regulator_disable(
+				       led_array[i].flash_cfg->flash_boost_reg);
+				if (rc) {
+					dev_err(&led->pdev->dev, "Flash boost regulator disable failed(%d)\n",
+						rc);
+					return rc;
+				}
+				if (led_array[i].flash_cfg->flash_wa_reg_get) {
+					rc = regulator_disable(
+						led_array[i].flash_cfg->
+							flash_wa_reg);
+					if (rc) {
+						dev_err(&led->pdev->dev, "Flash_wa regulator disable failed(%d)\n",
+							rc);
+						return rc;
+					}
+				}
+				led->flash_cfg->flash_on = false;
+			}
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int qpnp_torch_regulator_operate(struct qpnp_led_data *led, bool on)
+{
+	int rc;
+
+	if (!on)
+		goto regulator_turn_off;
+
+	if (!led->flash_cfg->torch_on) {
+		rc = regulator_enable(led->flash_cfg->torch_boost_reg);
+		if (rc) {
+			dev_err(&led->pdev->dev,
+				"Regulator enable failed(%d)\n", rc);
+				return rc;
+		}
+		led->flash_cfg->torch_on = true;
+	}
+	return 0;
+
+regulator_turn_off:
+	if (led->flash_cfg->torch_on) {
+		rc = qpnp_led_masked_write(led,	FLASH_ENABLE_CONTROL(led->base),
+				FLASH_ENABLE_MODULE_MASK, FLASH_DISABLE_ALL);
+		if (rc) {
+			dev_err(&led->pdev->dev,
+				"Enable reg write failed(%d)\n", rc);
+		}
+
+		rc = regulator_disable(led->flash_cfg->torch_boost_reg);
+		if (rc) {
+			dev_err(&led->pdev->dev,
+				"Regulator disable failed(%d)\n", rc);
+			return rc;
+		}
+		led->flash_cfg->torch_on = false;
+	}
+	return 0;
+}
+
+static int qpnp_flash_set(struct qpnp_led_data *led)
+{
+	int rc = 0, error;
+	int val = led->cdev.brightness;
+
+	if (led->flash_cfg->torch_enable)
+		led->flash_cfg->current_prgm =
+			(val * TORCH_MAX_LEVEL / led->max_current);
+	else
+		led->flash_cfg->current_prgm =
+			(val * FLASH_MAX_LEVEL / led->max_current);
+
+	/* Set led current */
+	if (val > 0) {
+		if (led->flash_cfg->torch_enable) {
+			if (led->flash_cfg->peripheral_subtype ==
+							FLASH_SUBTYPE_DUAL) {
+				if (!led->flash_cfg->no_smbb_support)
+					rc = qpnp_torch_regulator_operate(led,
+									true);
+				else
+					rc = qpnp_flash_regulator_operate(led,
+									true);
+				if (rc) {
+					dev_err(&led->pdev->dev,
+					"Torch regulator operate failed(%d)\n",
+					rc);
+					return rc;
+				}
+			} else if (led->flash_cfg->peripheral_subtype ==
+							FLASH_SUBTYPE_SINGLE) {
+				rc = qpnp_flash_regulator_operate(led, true);
+				if (rc) {
+					dev_err(&led->pdev->dev,
+					"Flash regulator operate failed(%d)\n",
+					rc);
+					goto error_flash_set;
+				}
+			}
+
+			rc = qpnp_led_masked_write(led,
+				FLASH_MAX_CURR(led->base),
+				FLASH_CURRENT_MASK,
+				TORCH_MAX_LEVEL);
+			if (rc) {
+				dev_err(&led->pdev->dev,
+					"Max current reg write failed(%d)\n",
+					rc);
+				goto error_reg_write;
+			}
+
+			rc = qpnp_led_masked_write(led,
+				FLASH_LED_TMR_CTRL(led->base),
+				FLASH_TMR_MASK,
+				FLASH_TMR_WATCHDOG);
+			if (rc) {
+				dev_err(&led->pdev->dev,
+					"Timer control reg write failed(%d)\n",
+					rc);
+				goto error_reg_write;
+			}
+
+			rc = qpnp_led_masked_write(led,
+				led->flash_cfg->current_addr,
+				FLASH_CURRENT_MASK,
+				led->flash_cfg->current_prgm);
+			if (rc) {
+				dev_err(&led->pdev->dev,
+					"Current reg write failed(%d)\n", rc);
+				goto error_reg_write;
+			}
+
+			rc = qpnp_led_masked_write(led,
+				led->flash_cfg->second_addr,
+				FLASH_CURRENT_MASK,
+				led->flash_cfg->current_prgm);
+			if (rc) {
+				dev_err(&led->pdev->dev,
+					"2nd Current reg write failed(%d)\n",
+					rc);
+				goto error_reg_write;
+			}
+
+			rc = qpnp_led_masked_write(led,
+				FLASH_WATCHDOG_TMR(led->base),
+				FLASH_WATCHDOG_MASK,
+				led->flash_cfg->duration);
+			if (rc) {
+				dev_err(&led->pdev->dev,
+					"Max current reg write failed(%d)\n",
+					rc);
+				goto error_reg_write;
+			}
+
+			rc = qpnp_led_masked_write(led,
+				FLASH_ENABLE_CONTROL(led->base),
+				FLASH_ENABLE_MASK,
+				led->flash_cfg->enable_module);
+			if (rc) {
+				dev_err(&led->pdev->dev,
+					"Enable reg write failed(%d)\n",
+					rc);
+				goto error_reg_write;
+			}
+
+			if (!led->flash_cfg->strobe_type)
+				led->flash_cfg->trigger_flash &=
+						~FLASH_HW_SW_STROBE_SEL_MASK;
+			else
+				led->flash_cfg->trigger_flash |=
+						FLASH_HW_SW_STROBE_SEL_MASK;
+
+			rc = qpnp_led_masked_write(led,
+				FLASH_LED_STROBE_CTRL(led->base),
+				led->flash_cfg->trigger_flash,
+				led->flash_cfg->trigger_flash);
+			if (rc) {
+				dev_err(&led->pdev->dev,
+					"LED %d strobe reg write failed(%d)\n",
+					led->id, rc);
+				goto error_reg_write;
+			}
+		} else {
+			rc = qpnp_flash_regulator_operate(led, true);
+			if (rc) {
+				dev_err(&led->pdev->dev,
+					"Flash regulator operate failed(%d)\n",
+					rc);
+				goto error_flash_set;
+			}
+
+			rc = qpnp_led_masked_write(led,
+				FLASH_LED_TMR_CTRL(led->base),
+				FLASH_TMR_MASK,
+				FLASH_TMR_SAFETY);
+			if (rc) {
+				dev_err(&led->pdev->dev,
+					"Timer control reg write failed(%d)\n",
+					rc);
+				goto error_reg_write;
+			}
+
+			/* Set flash safety timer */
+			rc = qpnp_led_masked_write(led,
+				FLASH_SAFETY_TIMER(led->base),
+				FLASH_SAFETY_TIMER_MASK,
+				led->flash_cfg->duration);
+			if (rc) {
+				dev_err(&led->pdev->dev,
+					"Safety timer reg write failed(%d)\n",
+					rc);
+				goto error_flash_set;
+			}
+
+			/* Set max current */
+			rc = qpnp_led_masked_write(led,
+				FLASH_MAX_CURR(led->base), FLASH_CURRENT_MASK,
+				FLASH_MAX_LEVEL);
+			if (rc) {
+				dev_err(&led->pdev->dev,
+					"Max current reg write failed(%d)\n",
+					rc);
+				goto error_flash_set;
+			}
+
+			/* Set clamp current */
+			rc = qpnp_led_masked_write(led,
+				FLASH_CLAMP_CURR(led->base),
+				FLASH_CURRENT_MASK,
+				led->flash_cfg->clamp_curr);
+			if (rc) {
+				dev_err(&led->pdev->dev,
+					"Clamp current reg write failed(%d)\n",
+					rc);
+				goto error_flash_set;
+			}
+
+			rc = qpnp_led_masked_write(led,
+				led->flash_cfg->current_addr,
+				FLASH_CURRENT_MASK,
+				led->flash_cfg->current_prgm);
+			if (rc) {
+				dev_err(&led->pdev->dev,
+					"Current reg write failed(%d)\n", rc);
+				goto error_flash_set;
+			}
+
+			rc = qpnp_led_masked_write(led,
+				FLASH_ENABLE_CONTROL(led->base),
+				led->flash_cfg->enable_module,
+				led->flash_cfg->enable_module);
+			if (rc) {
+				dev_err(&led->pdev->dev,
+					"Enable reg write failed(%d)\n", rc);
+				goto error_flash_set;
+			}
+
+			/*
+			 * Add 1ms delay for bharger enter stable state
+			 */
+			usleep_range(FLASH_RAMP_UP_DELAY_US,
+				     FLASH_RAMP_UP_DELAY_US + 10);
+
+			if (!led->flash_cfg->strobe_type)
+				led->flash_cfg->trigger_flash &=
+						~FLASH_HW_SW_STROBE_SEL_MASK;
+			else
+				led->flash_cfg->trigger_flash |=
+						FLASH_HW_SW_STROBE_SEL_MASK;
+
+			rc = qpnp_led_masked_write(led,
+				FLASH_LED_STROBE_CTRL(led->base),
+				led->flash_cfg->trigger_flash,
+				led->flash_cfg->trigger_flash);
+			if (rc) {
+				dev_err(&led->pdev->dev,
+				"LED %d strobe reg write failed(%d)\n",
+				led->id, rc);
+				goto error_flash_set;
+			}
+		}
+	} else {
+		rc = qpnp_led_masked_write(led,
+			FLASH_LED_STROBE_CTRL(led->base),
+			led->flash_cfg->trigger_flash,
+			FLASH_DISABLE_ALL);
+		if (rc) {
+			dev_err(&led->pdev->dev,
+				"LED %d flash write failed(%d)\n", led->id, rc);
+			if (led->flash_cfg->torch_enable)
+				goto error_torch_set;
+			else
+				goto error_flash_set;
+		}
+
+		if (led->flash_cfg->torch_enable) {
+			if (led->flash_cfg->peripheral_subtype ==
+							FLASH_SUBTYPE_DUAL) {
+				if (!led->flash_cfg->no_smbb_support)
+					rc = qpnp_torch_regulator_operate(led,
+									false);
+				else
+					rc = qpnp_flash_regulator_operate(led,
+									false);
+				if (rc) {
+					dev_err(&led->pdev->dev,
+						"Torch regulator operate failed(%d)\n",
+						rc);
+					return rc;
+				}
+			} else if (led->flash_cfg->peripheral_subtype ==
+							FLASH_SUBTYPE_SINGLE) {
+				rc = qpnp_flash_regulator_operate(led, false);
+				if (rc) {
+					dev_err(&led->pdev->dev,
+						"Flash regulator operate failed(%d)\n",
+						rc);
+					return rc;
+				}
+			}
+		} else {
+			/*
+			 * Disable module after ramp down complete for stable
+			 * behavior
+			 */
+			usleep_range(FLASH_RAMP_UP_DELAY_US,
+				     FLASH_RAMP_UP_DELAY_US + 10);
+
+			rc = qpnp_led_masked_write(led,
+				FLASH_ENABLE_CONTROL(led->base),
+				led->flash_cfg->enable_module &
+				~FLASH_ENABLE_MODULE_MASK,
+				FLASH_DISABLE_ALL);
+			if (rc) {
+				dev_err(&led->pdev->dev,
+					"Enable reg write failed(%d)\n", rc);
+				if (led->flash_cfg->torch_enable)
+					goto error_torch_set;
+				else
+					goto error_flash_set;
+			}
+
+			rc = qpnp_flash_regulator_operate(led, false);
+			if (rc) {
+				dev_err(&led->pdev->dev,
+					"Flash regulator operate failed(%d)\n",
+					rc);
+				return rc;
+			}
+		}
+	}
+
+	qpnp_dump_regs(led, flash_debug_regs, ARRAY_SIZE(flash_debug_regs));
+
+	return 0;
+
+error_reg_write:
+	if (led->flash_cfg->peripheral_subtype == FLASH_SUBTYPE_SINGLE)
+		goto error_flash_set;
+
+error_torch_set:
+	if (!led->flash_cfg->no_smbb_support)
+		error = qpnp_torch_regulator_operate(led, false);
+	else
+		error = qpnp_flash_regulator_operate(led, false);
+	if (error) {
+		dev_err(&led->pdev->dev,
+			"Torch regulator operate failed(%d)\n", rc);
+		return error;
+	}
+	return rc;
+
+error_flash_set:
+	error = qpnp_flash_regulator_operate(led, false);
+	if (error) {
+		dev_err(&led->pdev->dev,
+			"Flash regulator operate failed(%d)\n", rc);
+		return error;
+	}
+	return rc;
+}
+
+static int qpnp_kpdbl_set(struct qpnp_led_data *led)
+{
+	int rc;
+	int duty_us, duty_ns, period_us;
+
+	if (led->cdev.brightness) {
+		if (!led->kpdbl_cfg->pwm_cfg->blinking)
+			led->kpdbl_cfg->pwm_cfg->mode =
+				led->kpdbl_cfg->pwm_cfg->default_mode;
+
+		if (bitmap_empty(kpdbl_leds_in_use, NUM_KPDBL_LEDS)) {
+			rc = qpnp_led_masked_write(led, KPDBL_ENABLE(led->base),
+					KPDBL_MODULE_EN_MASK, KPDBL_MODULE_EN);
+			if (rc) {
+				dev_err(&led->pdev->dev,
+					"Enable reg write failed(%d)\n", rc);
+				return rc;
+			}
+		}
+
+		/* On some platforms, GPLED1 channel should always be enabled
+		 * for the other GPLEDs 2/3/4 to glow. Before enabling GPLED
+		 * 2/3/4, first check if GPLED1 is already enabled. If GPLED1
+		 * channel is not enabled, then enable the GPLED1 channel but
+		 * with a 0 brightness
+		 */
+		if (!led->kpdbl_cfg->always_on &&
+			!test_bit(KPDBL_MASTER_BIT_INDEX, kpdbl_leds_in_use) &&
+						kpdbl_master) {
+			rc = pwm_config_us(kpdbl_master, 0,
+					kpdbl_master_period_us);
+			if (rc < 0) {
+				dev_err(&led->pdev->dev,
+					"pwm config failed\n");
+				return rc;
+			}
+
+			rc = pwm_enable(kpdbl_master);
+			if (rc < 0) {
+				dev_err(&led->pdev->dev,
+					"pwm enable failed\n");
+				return rc;
+			}
+			set_bit(KPDBL_MASTER_BIT_INDEX,
+						kpdbl_leds_in_use);
+		}
+
+		if (led->kpdbl_cfg->pwm_cfg->mode == PWM_MODE) {
+			rc = pwm_change_mode(led->kpdbl_cfg->pwm_cfg->pwm_dev,
+					PM_PWM_MODE_PWM);
+			if (rc < 0) {
+				dev_err(&led->pdev->dev,
+					"Failed to set PWM mode, rc = %d\n",
+					rc);
+				return rc;
+			}
+			period_us = led->kpdbl_cfg->pwm_cfg->pwm_period_us;
+			if (period_us > INT_MAX / NSEC_PER_USEC) {
+				duty_us = (period_us * led->cdev.brightness) /
+					KPDBL_MAX_LEVEL;
+				rc = pwm_config_us(
+					led->kpdbl_cfg->pwm_cfg->pwm_dev,
+					duty_us,
+					period_us);
+			} else {
+				duty_ns = ((period_us * NSEC_PER_USEC) /
+					KPDBL_MAX_LEVEL) * led->cdev.brightness;
+				rc = pwm_config(
+					led->kpdbl_cfg->pwm_cfg->pwm_dev,
+					duty_ns,
+					period_us * NSEC_PER_USEC);
+			}
+			if (rc < 0) {
+				dev_err(&led->pdev->dev,
+					"pwm config failed\n");
+				return rc;
+			}
+		}
+
+		rc = pwm_enable(led->kpdbl_cfg->pwm_cfg->pwm_dev);
+		if (rc < 0) {
+			dev_err(&led->pdev->dev, "pwm enable failed\n");
+			return rc;
+		}
+		led->kpdbl_cfg->pwm_cfg->pwm_enabled = 1;
+		set_bit(led->kpdbl_cfg->row_id, kpdbl_leds_in_use);
+
+		/* is_kpdbl_master_turn_on will be set to true when GPLED1
+		 * channel is enabled and has a valid brightness value
+		 */
+		if (led->kpdbl_cfg->always_on)
+			is_kpdbl_master_turn_on = true;
+
+	} else {
+		led->kpdbl_cfg->pwm_cfg->mode =
+			led->kpdbl_cfg->pwm_cfg->default_mode;
+
+		/* Before disabling GPLED1, check if any other GPLED 2/3/4 is
+		 * on. If any of the other GPLED 2/3/4 is on, then have the
+		 * GPLED1 channel enabled with 0 brightness.
+		 */
+		if (led->kpdbl_cfg->always_on) {
+			if (bitmap_weight(kpdbl_leds_in_use,
+						NUM_KPDBL_LEDS) > 1) {
+				rc = pwm_config_us(
+					led->kpdbl_cfg->pwm_cfg->pwm_dev, 0,
+					led->kpdbl_cfg->pwm_cfg->pwm_period_us);
+				if (rc < 0) {
+					dev_err(&led->pdev->dev,
+						"pwm config failed\n");
+					return rc;
+				}
+
+				rc = pwm_enable(led->kpdbl_cfg->pwm_cfg->
+							pwm_dev);
+				if (rc < 0) {
+					dev_err(&led->pdev->dev,
+						"pwm enable failed\n");
+					return rc;
+				}
+				led->kpdbl_cfg->pwm_cfg->pwm_enabled = 1;
+			} else {
+				if (kpdbl_master) {
+					pwm_disable(kpdbl_master);
+					clear_bit(KPDBL_MASTER_BIT_INDEX,
+						kpdbl_leds_in_use);
+					rc = qpnp_led_masked_write(
+						led, KPDBL_ENABLE(led->base),
+						KPDBL_MODULE_EN_MASK,
+						KPDBL_MODULE_DIS);
+					if (rc) {
+						dev_err(&led->pdev->dev, "Failed to write led enable reg\n");
+						return rc;
+					}
+				}
+			}
+			is_kpdbl_master_turn_on = false;
+		} else {
+			pwm_disable(led->kpdbl_cfg->pwm_cfg->pwm_dev);
+			led->kpdbl_cfg->pwm_cfg->pwm_enabled = 0;
+			clear_bit(led->kpdbl_cfg->row_id, kpdbl_leds_in_use);
+			if (bitmap_weight(kpdbl_leds_in_use,
+				NUM_KPDBL_LEDS) == 1 && kpdbl_master &&
+						!is_kpdbl_master_turn_on) {
+				pwm_disable(kpdbl_master);
+				clear_bit(KPDBL_MASTER_BIT_INDEX,
+					kpdbl_leds_in_use);
+				rc = qpnp_led_masked_write(
+					led, KPDBL_ENABLE(led->base),
+					KPDBL_MODULE_EN_MASK, KPDBL_MODULE_DIS);
+				if (rc) {
+					dev_err(&led->pdev->dev,
+					"Failed to write led enable reg\n");
+					return rc;
+				}
+				is_kpdbl_master_turn_on = false;
+			}
+		}
+	}
+
+	led->kpdbl_cfg->pwm_cfg->blinking = false;
+
+	qpnp_dump_regs(led, kpdbl_debug_regs, ARRAY_SIZE(kpdbl_debug_regs));
+
+	return 0;
+}
+
+static int qpnp_rgb_set(struct qpnp_led_data *led)
+{
+	int rc;
+	int duty_us, duty_ns, period_us;
+
+	if (led->cdev.brightness) {
+		if (!led->rgb_cfg->pwm_cfg->blinking)
+			led->rgb_cfg->pwm_cfg->mode =
+				led->rgb_cfg->pwm_cfg->default_mode;
+		if (led->rgb_cfg->pwm_cfg->mode == PWM_MODE) {
+			rc = pwm_change_mode(led->rgb_cfg->pwm_cfg->pwm_dev,
+					PM_PWM_MODE_PWM);
+			if (rc < 0) {
+				dev_err(&led->pdev->dev,
+					"Failed to set PWM mode, rc = %d\n",
+					rc);
+				return rc;
+			}
+			period_us = led->rgb_cfg->pwm_cfg->pwm_period_us;
+			if (period_us > INT_MAX / NSEC_PER_USEC) {
+				duty_us = (period_us * led->cdev.brightness) /
+					LED_FULL;
+				rc = pwm_config_us(
+					led->rgb_cfg->pwm_cfg->pwm_dev,
+					duty_us,
+					period_us);
+			} else {
+				duty_ns = ((period_us * NSEC_PER_USEC) /
+					LED_FULL) * led->cdev.brightness;
+				rc = pwm_config(
+					led->rgb_cfg->pwm_cfg->pwm_dev,
+					duty_ns,
+					period_us * NSEC_PER_USEC);
+			}
+			if (rc < 0) {
+				dev_err(&led->pdev->dev,
+					"pwm config failed\n");
+				return rc;
+			}
+		}
+		rc = qpnp_led_masked_write(led,
+			RGB_LED_EN_CTL(led->base),
+			led->rgb_cfg->enable, led->rgb_cfg->enable);
+		if (rc) {
+			dev_err(&led->pdev->dev,
+				"Failed to write led enable reg\n");
+			return rc;
+		}
+		if (!led->rgb_cfg->pwm_cfg->pwm_enabled) {
+			pwm_enable(led->rgb_cfg->pwm_cfg->pwm_dev);
+			led->rgb_cfg->pwm_cfg->pwm_enabled = 1;
+		}
+	} else {
+		led->rgb_cfg->pwm_cfg->mode =
+			led->rgb_cfg->pwm_cfg->default_mode;
+		if (led->rgb_cfg->pwm_cfg->pwm_enabled) {
+			pwm_disable(led->rgb_cfg->pwm_cfg->pwm_dev);
+			led->rgb_cfg->pwm_cfg->pwm_enabled = 0;
+		}
+		rc = qpnp_led_masked_write(led,
+			RGB_LED_EN_CTL(led->base),
+			led->rgb_cfg->enable, RGB_LED_DISABLE);
+		if (rc) {
+			dev_err(&led->pdev->dev,
+				"Failed to write led enable reg\n");
+			return rc;
+		}
+	}
+
+	led->rgb_cfg->pwm_cfg->blinking = false;
+	qpnp_dump_regs(led, rgb_pwm_debug_regs, ARRAY_SIZE(rgb_pwm_debug_regs));
+
+	return 0;
+}
+
+static void qpnp_led_set(struct led_classdev *led_cdev,
+				enum led_brightness value)
+{
+	struct qpnp_led_data *led;
+
+	led = container_of(led_cdev, struct qpnp_led_data, cdev);
+	if (value < LED_OFF) {
+		dev_err(&led->pdev->dev, "Invalid brightness value\n");
+		return;
+	}
+
+	if (value > led->cdev.max_brightness)
+		value = led->cdev.max_brightness;
+
+	led->cdev.brightness = value;
+	if (led->in_order_command_processing)
+		queue_work(led->workqueue, &led->work);
+	else
+		schedule_work(&led->work);
+}
+
+static void __qpnp_led_work(struct qpnp_led_data *led,
+				enum led_brightness value)
+{
+	int rc;
+
+	if (led->id == QPNP_ID_FLASH1_LED0 || led->id == QPNP_ID_FLASH1_LED1)
+		mutex_lock(&flash_lock);
+	else
+		mutex_lock(&led->lock);
+
+	switch (led->id) {
+	case QPNP_ID_WLED:
+		rc = qpnp_wled_set(led);
+		if (rc < 0)
+			dev_err(&led->pdev->dev,
+				"WLED set brightness failed (%d)\n", rc);
+		break;
+	case QPNP_ID_FLASH1_LED0:
+	case QPNP_ID_FLASH1_LED1:
+		rc = qpnp_flash_set(led);
+		if (rc < 0)
+			dev_err(&led->pdev->dev,
+				"FLASH set brightness failed (%d)\n", rc);
+		break;
+	case QPNP_ID_RGB_RED:
+	case QPNP_ID_RGB_GREEN:
+	case QPNP_ID_RGB_BLUE:
+		rc = qpnp_rgb_set(led);
+		if (rc < 0)
+			dev_err(&led->pdev->dev,
+				"RGB set brightness failed (%d)\n", rc);
+		break;
+	case QPNP_ID_LED_MPP:
+		rc = qpnp_mpp_set(led);
+		if (rc < 0)
+			dev_err(&led->pdev->dev,
+					"MPP set brightness failed (%d)\n", rc);
+		break;
+	case QPNP_ID_LED_GPIO:
+		rc = qpnp_gpio_set(led);
+		if (rc < 0)
+			dev_err(&led->pdev->dev,
+					"GPIO set brightness failed (%d)\n",
+					rc);
+		break;
+	case QPNP_ID_KPDBL:
+		rc = qpnp_kpdbl_set(led);
+		if (rc < 0)
+			dev_err(&led->pdev->dev,
+				"KPDBL set brightness failed (%d)\n", rc);
+		break;
+	default:
+		dev_err(&led->pdev->dev, "Invalid LED(%d)\n", led->id);
+		break;
+	}
+	if (led->id == QPNP_ID_FLASH1_LED0 || led->id == QPNP_ID_FLASH1_LED1)
+		mutex_unlock(&flash_lock);
+	else
+		mutex_unlock(&led->lock);
+
+}
+
+static void qpnp_led_work(struct work_struct *work)
+{
+	struct qpnp_led_data *led = container_of(work,
+					struct qpnp_led_data, work);
+
+	__qpnp_led_work(led, led->cdev.brightness);
+}
+
+static int qpnp_led_set_max_brightness(struct qpnp_led_data *led)
+{
+	switch (led->id) {
+	case QPNP_ID_WLED:
+		led->cdev.max_brightness = WLED_MAX_LEVEL;
+		break;
+	case QPNP_ID_FLASH1_LED0:
+	case QPNP_ID_FLASH1_LED1:
+		led->cdev.max_brightness = led->max_current;
+		break;
+	case QPNP_ID_RGB_RED:
+	case QPNP_ID_RGB_GREEN:
+	case QPNP_ID_RGB_BLUE:
+		led->cdev.max_brightness = RGB_MAX_LEVEL;
+		break;
+	case QPNP_ID_LED_MPP:
+		if (led->mpp_cfg->pwm_mode == MANUAL_MODE)
+			led->cdev.max_brightness = led->max_current;
+		else
+			led->cdev.max_brightness = MPP_MAX_LEVEL;
+		break;
+	case QPNP_ID_LED_GPIO:
+			led->cdev.max_brightness = led->max_current;
+		break;
+	case QPNP_ID_KPDBL:
+		led->cdev.max_brightness = KPDBL_MAX_LEVEL;
+		break;
+	default:
+		dev_err(&led->pdev->dev, "Invalid LED(%d)\n", led->id);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static enum led_brightness qpnp_led_get(struct led_classdev *led_cdev)
+{
+	struct qpnp_led_data *led;
+
+	led = container_of(led_cdev, struct qpnp_led_data, cdev);
+
+	return led->cdev.brightness;
+}
+
+static void qpnp_led_turn_off_delayed(struct work_struct *work)
+{
+	struct delayed_work *dwork = to_delayed_work(work);
+	struct qpnp_led_data *led
+		= container_of(dwork, struct qpnp_led_data, dwork);
+
+	led->cdev.brightness = LED_OFF;
+	qpnp_led_set(&led->cdev, led->cdev.brightness);
+}
+
+static void qpnp_led_turn_off(struct qpnp_led_data *led)
+{
+	INIT_DELAYED_WORK(&led->dwork, qpnp_led_turn_off_delayed);
+	schedule_delayed_work(&led->dwork,
+		msecs_to_jiffies(led->turn_off_delay_ms));
+}
+
+static int qpnp_wled_init(struct qpnp_led_data *led)
+{
+	int rc, i;
+	u8 num_wled_strings, val = 0;
+
+	num_wled_strings = led->wled_cfg->num_strings;
+
+	/* verify ranges */
+	if (led->wled_cfg->ovp_val > WLED_OVP_27V) {
+		dev_err(&led->pdev->dev, "Invalid ovp value\n");
+		return -EINVAL;
+	}
+
+	if (led->wled_cfg->boost_curr_lim > WLED_CURR_LIMIT_1680mA) {
+		dev_err(&led->pdev->dev, "Invalid boost current limit\n");
+		return -EINVAL;
+	}
+
+	if (led->wled_cfg->cp_select > WLED_CP_SELECT_MAX) {
+		dev_err(&led->pdev->dev, "Invalid pole capacitance\n");
+		return -EINVAL;
+	}
+
+	if (led->max_current > WLED_MAX_CURR) {
+		dev_err(&led->pdev->dev, "Invalid max current\n");
+		return -EINVAL;
+	}
+
+	if ((led->wled_cfg->ctrl_delay_us % WLED_CTL_DLY_STEP) ||
+		(led->wled_cfg->ctrl_delay_us > WLED_CTL_DLY_MAX)) {
+		dev_err(&led->pdev->dev, "Invalid control delay\n");
+		return -EINVAL;
+	}
+
+	/* program over voltage protection threshold */
+	rc = qpnp_led_masked_write(led, WLED_OVP_CFG_REG(led->base),
+		WLED_OVP_VAL_MASK,
+		(led->wled_cfg->ovp_val << WLED_OVP_VAL_BIT_SHFT));
+	if (rc) {
+		dev_err(&led->pdev->dev,
+				"WLED OVP reg write failed(%d)\n", rc);
+		return rc;
+	}
+
+	/* program current boost limit */
+	rc = qpnp_led_masked_write(led, WLED_BOOST_LIMIT_REG(led->base),
+		WLED_BOOST_LIMIT_MASK, led->wled_cfg->boost_curr_lim);
+	if (rc) {
+		dev_err(&led->pdev->dev,
+				"WLED boost limit reg write failed(%d)\n", rc);
+		return rc;
+	}
+
+	/* program output feedback */
+	rc = qpnp_led_masked_write(led, WLED_FDBCK_CTRL_REG(led->base),
+		WLED_OP_FDBCK_MASK,
+		(led->wled_cfg->op_fdbck << WLED_OP_FDBCK_BIT_SHFT));
+	if (rc) {
+		dev_err(&led->pdev->dev,
+				"WLED fdbck ctrl reg write failed(%d)\n", rc);
+		return rc;
+	}
+
+	/* program switch frequency */
+	rc = qpnp_led_masked_write(led,
+		WLED_SWITCHING_FREQ_REG(led->base),
+		WLED_SWITCH_FREQ_MASK, led->wled_cfg->switch_freq);
+	if (rc) {
+		dev_err(&led->pdev->dev,
+			"WLED switch freq reg write failed(%d)\n", rc);
+		return rc;
+	}
+
+	/* program current sink */
+	if (led->wled_cfg->cs_out_en) {
+		for (i = 0; i < led->wled_cfg->num_strings; i++)
+			val |= 1 << i;
+		rc = qpnp_led_masked_write(led, WLED_CURR_SINK_REG(led->base),
+			WLED_CURR_SINK_MASK, (val << WLED_CURR_SINK_SHFT));
+		if (rc) {
+			dev_err(&led->pdev->dev,
+				"WLED curr sink reg write failed(%d)\n", rc);
+			return rc;
+		}
+	}
+
+	/* program high pole capacitance */
+	rc = qpnp_led_masked_write(led, WLED_HIGH_POLE_CAP_REG(led->base),
+		WLED_CP_SELECT_MASK, led->wled_cfg->cp_select);
+	if (rc) {
+		dev_err(&led->pdev->dev,
+				"WLED pole cap reg write failed(%d)\n", rc);
+		return rc;
+	}
+
+	/* program modulator, current mod src and cabc */
+	for (i = 0; i < num_wled_strings; i++) {
+		rc = qpnp_led_masked_write(led, WLED_MOD_EN_REG(led->base, i),
+			WLED_NO_MASK, WLED_EN_MASK);
+		if (rc) {
+			dev_err(&led->pdev->dev,
+				"WLED mod enable reg write failed(%d)\n", rc);
+			return rc;
+		}
+
+		if (led->wled_cfg->dig_mod_gen_en) {
+			rc = qpnp_led_masked_write(led,
+				WLED_MOD_SRC_SEL_REG(led->base, i),
+				WLED_NO_MASK, WLED_USE_EXT_GEN_MOD_SRC);
+			if (rc) {
+				dev_err(&led->pdev->dev,
+				"WLED dig mod en reg write failed(%d)\n", rc);
+			}
+		}
+
+		rc = qpnp_led_masked_write(led,
+			WLED_FULL_SCALE_REG(led->base, i), WLED_MAX_CURR_MASK,
+			(u8)led->max_current);
+		if (rc) {
+			dev_err(&led->pdev->dev,
+				"WLED max current reg write failed(%d)\n", rc);
+			return rc;
+		}
+
+	}
+
+	/* Reset WLED enable register */
+	rc = qpnp_led_masked_write(led, WLED_MOD_CTRL_REG(led->base),
+		WLED_8_BIT_MASK, WLED_BOOST_OFF);
+	if (rc) {
+		dev_err(&led->pdev->dev,
+			"WLED write ctrl reg failed(%d)\n", rc);
+		return rc;
+	}
+
+	/* dump wled registers */
+	qpnp_dump_regs(led, wled_debug_regs, ARRAY_SIZE(wled_debug_regs));
+
+	return 0;
+}
+
+static ssize_t led_mode_store(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct qpnp_led_data *led;
+	unsigned long state;
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	ssize_t ret = -EINVAL;
+
+	ret = kstrtoul(buf, 10, &state);
+	if (ret)
+		return ret;
+
+	led = container_of(led_cdev, struct qpnp_led_data, cdev);
+
+	/* '1' to enable torch mode; '0' to switch to flash mode */
+	if (state == 1)
+		led->flash_cfg->torch_enable = true;
+	else
+		led->flash_cfg->torch_enable = false;
+
+	return count;
+}
+
+static ssize_t led_strobe_type_store(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct qpnp_led_data *led;
+	unsigned long state;
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	ssize_t ret = -EINVAL;
+
+	ret = kstrtoul(buf, 10, &state);
+	if (ret)
+		return ret;
+
+	led = container_of(led_cdev, struct qpnp_led_data, cdev);
+
+	/* '0' for sw strobe; '1' for hw strobe */
+	if (state == 1)
+		led->flash_cfg->strobe_type = 1;
+	else
+		led->flash_cfg->strobe_type = 0;
+
+	return count;
+}
+
+static int qpnp_pwm_init(struct pwm_config_data *pwm_cfg,
+					struct platform_device *pdev,
+					const char *name)
+{
+	int rc, start_idx, idx_len, lut_max_size;
+
+	if (pwm_cfg->pwm_dev) {
+		if (pwm_cfg->mode == LPG_MODE) {
+			start_idx =
+			pwm_cfg->duty_cycles->start_idx;
+			idx_len =
+			pwm_cfg->duty_cycles->num_duty_pcts;
+
+			if (strnstr(name, "kpdbl", sizeof("kpdbl")))
+				lut_max_size = PWM_GPLED_LUT_MAX_SIZE;
+			else
+				lut_max_size = PWM_LUT_MAX_SIZE;
+
+			if (idx_len >= lut_max_size && start_idx) {
+				dev_err(&pdev->dev,
+					"Wrong LUT size or index\n");
+				return -EINVAL;
+			}
+
+			if ((start_idx + idx_len) > lut_max_size) {
+				dev_err(&pdev->dev, "Exceed LUT limit\n");
+				return -EINVAL;
+			}
+			rc = pwm_lut_config(pwm_cfg->pwm_dev,
+				pwm_cfg->pwm_period_us,
+				pwm_cfg->duty_cycles->duty_pcts,
+				pwm_cfg->lut_params);
+			if (rc < 0) {
+				dev_err(&pdev->dev, "Failed to configure pwm LUT\n");
+				return rc;
+			}
+			rc = pwm_change_mode(pwm_cfg->pwm_dev, PM_PWM_MODE_LPG);
+			if (rc < 0) {
+				dev_err(&pdev->dev, "Failed to set LPG mode\n");
+				return rc;
+			}
+		}
+	} else {
+		dev_err(&pdev->dev, "Invalid PWM device\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static ssize_t pwm_us_store(struct device *dev,
+	struct device_attribute *attr,
+	const char *buf, size_t count)
+{
+	struct qpnp_led_data *led;
+	u32 pwm_us;
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	ssize_t ret;
+	u32 previous_pwm_us;
+	struct pwm_config_data *pwm_cfg;
+
+	led = container_of(led_cdev, struct qpnp_led_data, cdev);
+
+	ret = kstrtou32(buf, 10, &pwm_us);
+	if (ret)
+		return ret;
+
+	switch (led->id) {
+	case QPNP_ID_LED_MPP:
+		pwm_cfg = led->mpp_cfg->pwm_cfg;
+		break;
+	case QPNP_ID_RGB_RED:
+	case QPNP_ID_RGB_GREEN:
+	case QPNP_ID_RGB_BLUE:
+		pwm_cfg = led->rgb_cfg->pwm_cfg;
+		break;
+	case QPNP_ID_KPDBL:
+		pwm_cfg = led->kpdbl_cfg->pwm_cfg;
+		break;
+	default:
+		dev_err(&led->pdev->dev, "Invalid LED id type for pwm_us\n");
+		return -EINVAL;
+	}
+
+	if (pwm_cfg->mode == LPG_MODE)
+		pwm_cfg->blinking = true;
+
+	previous_pwm_us = pwm_cfg->pwm_period_us;
+
+	pwm_cfg->pwm_period_us = pwm_us;
+	if (pwm_cfg->pwm_enabled) {
+		pwm_disable(pwm_cfg->pwm_dev);
+		pwm_cfg->pwm_enabled = 0;
+	}
+	ret = qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
+	if (ret) {
+		pwm_cfg->pwm_period_us = previous_pwm_us;
+		if (pwm_cfg->pwm_enabled) {
+			pwm_disable(pwm_cfg->pwm_dev);
+			pwm_cfg->pwm_enabled = 0;
+		}
+		qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
+		qpnp_led_set(&led->cdev, led->cdev.brightness);
+		dev_err(&led->pdev->dev,
+			"Failed to initialize pwm with new pwm_us value\n");
+		return ret;
+	}
+	qpnp_led_set(&led->cdev, led->cdev.brightness);
+	return count;
+}
+
+static ssize_t pause_lo_store(struct device *dev,
+	struct device_attribute *attr,
+	const char *buf, size_t count)
+{
+	struct qpnp_led_data *led;
+	u32 pause_lo;
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	ssize_t ret;
+	u32 previous_pause_lo;
+	struct pwm_config_data *pwm_cfg;
+
+	ret = kstrtou32(buf, 10, &pause_lo);
+	if (ret)
+		return ret;
+	led = container_of(led_cdev, struct qpnp_led_data, cdev);
+
+	switch (led->id) {
+	case QPNP_ID_LED_MPP:
+		pwm_cfg = led->mpp_cfg->pwm_cfg;
+		break;
+	case QPNP_ID_RGB_RED:
+	case QPNP_ID_RGB_GREEN:
+	case QPNP_ID_RGB_BLUE:
+		pwm_cfg = led->rgb_cfg->pwm_cfg;
+		break;
+	case QPNP_ID_KPDBL:
+		pwm_cfg = led->kpdbl_cfg->pwm_cfg;
+		break;
+	default:
+		dev_err(&led->pdev->dev,
+			"Invalid LED id type for pause lo\n");
+		return -EINVAL;
+	}
+
+	if (pwm_cfg->mode == LPG_MODE)
+		pwm_cfg->blinking = true;
+
+	previous_pause_lo = pwm_cfg->lut_params.lut_pause_lo;
+
+	if (pwm_cfg->pwm_enabled) {
+		pwm_disable(pwm_cfg->pwm_dev);
+		pwm_cfg->pwm_enabled = 0;
+	}
+	pwm_cfg->lut_params.lut_pause_lo = pause_lo;
+	ret = qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
+	if (ret) {
+		pwm_cfg->lut_params.lut_pause_lo = previous_pause_lo;
+		if (pwm_cfg->pwm_enabled) {
+			pwm_disable(pwm_cfg->pwm_dev);
+			pwm_cfg->pwm_enabled = 0;
+		}
+		qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
+		qpnp_led_set(&led->cdev, led->cdev.brightness);
+		dev_err(&led->pdev->dev,
+			"Failed to initialize pwm with new pause lo value\n");
+		return ret;
+	}
+	qpnp_led_set(&led->cdev, led->cdev.brightness);
+	return count;
+}
+
+static ssize_t pause_hi_store(struct device *dev,
+	struct device_attribute *attr,
+	const char *buf, size_t count)
+{
+	struct qpnp_led_data *led;
+	u32 pause_hi;
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	ssize_t ret;
+	u32 previous_pause_hi;
+	struct pwm_config_data *pwm_cfg;
+
+	ret = kstrtou32(buf, 10, &pause_hi);
+	if (ret)
+		return ret;
+	led = container_of(led_cdev, struct qpnp_led_data, cdev);
+
+	switch (led->id) {
+	case QPNP_ID_LED_MPP:
+		pwm_cfg = led->mpp_cfg->pwm_cfg;
+		break;
+	case QPNP_ID_RGB_RED:
+	case QPNP_ID_RGB_GREEN:
+	case QPNP_ID_RGB_BLUE:
+		pwm_cfg = led->rgb_cfg->pwm_cfg;
+		break;
+	case QPNP_ID_KPDBL:
+		pwm_cfg = led->kpdbl_cfg->pwm_cfg;
+		break;
+	default:
+		dev_err(&led->pdev->dev,
+			"Invalid LED id type for pause hi\n");
+		return -EINVAL;
+	}
+
+	if (pwm_cfg->mode == LPG_MODE)
+		pwm_cfg->blinking = true;
+
+	previous_pause_hi = pwm_cfg->lut_params.lut_pause_hi;
+
+	if (pwm_cfg->pwm_enabled) {
+		pwm_disable(pwm_cfg->pwm_dev);
+		pwm_cfg->pwm_enabled = 0;
+	}
+	pwm_cfg->lut_params.lut_pause_hi = pause_hi;
+	ret = qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
+	if (ret) {
+		pwm_cfg->lut_params.lut_pause_hi = previous_pause_hi;
+		if (pwm_cfg->pwm_enabled) {
+			pwm_disable(pwm_cfg->pwm_dev);
+			pwm_cfg->pwm_enabled = 0;
+		}
+		qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
+		qpnp_led_set(&led->cdev, led->cdev.brightness);
+		dev_err(&led->pdev->dev,
+			"Failed to initialize pwm with new pause hi value\n");
+		return ret;
+	}
+	qpnp_led_set(&led->cdev, led->cdev.brightness);
+	return count;
+}
+
+static ssize_t start_idx_store(struct device *dev,
+	struct device_attribute *attr,
+	const char *buf, size_t count)
+{
+	struct qpnp_led_data *led;
+	u32 start_idx;
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	ssize_t ret;
+	u32 previous_start_idx;
+	struct pwm_config_data *pwm_cfg;
+
+	ret = kstrtou32(buf, 10, &start_idx);
+	if (ret)
+		return ret;
+	led = container_of(led_cdev, struct qpnp_led_data, cdev);
+
+	switch (led->id) {
+	case QPNP_ID_LED_MPP:
+		pwm_cfg = led->mpp_cfg->pwm_cfg;
+		break;
+	case QPNP_ID_RGB_RED:
+	case QPNP_ID_RGB_GREEN:
+	case QPNP_ID_RGB_BLUE:
+		pwm_cfg = led->rgb_cfg->pwm_cfg;
+		break;
+	case QPNP_ID_KPDBL:
+		pwm_cfg = led->kpdbl_cfg->pwm_cfg;
+		break;
+	default:
+		dev_err(&led->pdev->dev,
+			"Invalid LED id type for start idx\n");
+		return -EINVAL;
+	}
+
+	if (pwm_cfg->mode == LPG_MODE)
+		pwm_cfg->blinking = true;
+
+	previous_start_idx = pwm_cfg->duty_cycles->start_idx;
+	pwm_cfg->duty_cycles->start_idx = start_idx;
+	pwm_cfg->lut_params.start_idx = pwm_cfg->duty_cycles->start_idx;
+	if (pwm_cfg->pwm_enabled) {
+		pwm_disable(pwm_cfg->pwm_dev);
+		pwm_cfg->pwm_enabled = 0;
+	}
+	ret = qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
+	if (ret) {
+		pwm_cfg->duty_cycles->start_idx = previous_start_idx;
+		pwm_cfg->lut_params.start_idx = pwm_cfg->duty_cycles->start_idx;
+		if (pwm_cfg->pwm_enabled) {
+			pwm_disable(pwm_cfg->pwm_dev);
+			pwm_cfg->pwm_enabled = 0;
+		}
+		qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
+		qpnp_led_set(&led->cdev, led->cdev.brightness);
+		dev_err(&led->pdev->dev,
+			"Failed to initialize pwm with new start idx value\n");
+		return ret;
+	}
+	qpnp_led_set(&led->cdev, led->cdev.brightness);
+	return count;
+}
+
+static ssize_t ramp_step_ms_store(struct device *dev,
+	struct device_attribute *attr,
+	const char *buf, size_t count)
+{
+	struct qpnp_led_data *led;
+	u32 ramp_step_ms;
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	ssize_t ret;
+	u32 previous_ramp_step_ms;
+	struct pwm_config_data *pwm_cfg;
+
+	ret = kstrtou32(buf, 10, &ramp_step_ms);
+	if (ret)
+		return ret;
+	led = container_of(led_cdev, struct qpnp_led_data, cdev);
+
+	switch (led->id) {
+	case QPNP_ID_LED_MPP:
+		pwm_cfg = led->mpp_cfg->pwm_cfg;
+		break;
+	case QPNP_ID_RGB_RED:
+	case QPNP_ID_RGB_GREEN:
+	case QPNP_ID_RGB_BLUE:
+		pwm_cfg = led->rgb_cfg->pwm_cfg;
+		break;
+	case QPNP_ID_KPDBL:
+		pwm_cfg = led->kpdbl_cfg->pwm_cfg;
+		break;
+	default:
+		dev_err(&led->pdev->dev,
+			"Invalid LED id type for ramp step\n");
+		return -EINVAL;
+	}
+
+	if (pwm_cfg->mode == LPG_MODE)
+		pwm_cfg->blinking = true;
+
+	previous_ramp_step_ms = pwm_cfg->lut_params.ramp_step_ms;
+
+	if (pwm_cfg->pwm_enabled) {
+		pwm_disable(pwm_cfg->pwm_dev);
+		pwm_cfg->pwm_enabled = 0;
+	}
+	pwm_cfg->lut_params.ramp_step_ms = ramp_step_ms;
+	ret = qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
+	if (ret) {
+		pwm_cfg->lut_params.ramp_step_ms = previous_ramp_step_ms;
+		if (pwm_cfg->pwm_enabled) {
+			pwm_disable(pwm_cfg->pwm_dev);
+			pwm_cfg->pwm_enabled = 0;
+		}
+		qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
+		qpnp_led_set(&led->cdev, led->cdev.brightness);
+		dev_err(&led->pdev->dev,
+			"Failed to initialize pwm with new ramp step value\n");
+		return ret;
+	}
+	qpnp_led_set(&led->cdev, led->cdev.brightness);
+	return count;
+}
+
+static ssize_t lut_flags_store(struct device *dev,
+	struct device_attribute *attr,
+	const char *buf, size_t count)
+{
+	struct qpnp_led_data *led;
+	u32 lut_flags;
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	ssize_t ret;
+	u32 previous_lut_flags;
+	struct pwm_config_data *pwm_cfg;
+
+	ret = kstrtou32(buf, 10, &lut_flags);
+	if (ret)
+		return ret;
+	led = container_of(led_cdev, struct qpnp_led_data, cdev);
+
+	switch (led->id) {
+	case QPNP_ID_LED_MPP:
+		pwm_cfg = led->mpp_cfg->pwm_cfg;
+		break;
+	case QPNP_ID_RGB_RED:
+	case QPNP_ID_RGB_GREEN:
+	case QPNP_ID_RGB_BLUE:
+		pwm_cfg = led->rgb_cfg->pwm_cfg;
+		break;
+	case QPNP_ID_KPDBL:
+		pwm_cfg = led->kpdbl_cfg->pwm_cfg;
+		break;
+	default:
+		dev_err(&led->pdev->dev,
+			"Invalid LED id type for lut flags\n");
+		return -EINVAL;
+	}
+
+	if (pwm_cfg->mode == LPG_MODE)
+		pwm_cfg->blinking = true;
+
+	previous_lut_flags = pwm_cfg->lut_params.flags;
+
+	if (pwm_cfg->pwm_enabled) {
+		pwm_disable(pwm_cfg->pwm_dev);
+		pwm_cfg->pwm_enabled = 0;
+	}
+	pwm_cfg->lut_params.flags = lut_flags;
+	ret = qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
+	if (ret) {
+		pwm_cfg->lut_params.flags = previous_lut_flags;
+		if (pwm_cfg->pwm_enabled) {
+			pwm_disable(pwm_cfg->pwm_dev);
+			pwm_cfg->pwm_enabled = 0;
+		}
+		qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
+		qpnp_led_set(&led->cdev, led->cdev.brightness);
+		dev_err(&led->pdev->dev,
+			"Failed to initialize pwm with new lut flags value\n");
+		return ret;
+	}
+	qpnp_led_set(&led->cdev, led->cdev.brightness);
+	return count;
+}
+
+static ssize_t duty_pcts_store(struct device *dev,
+	struct device_attribute *attr,
+	const char *buf, size_t count)
+{
+	struct qpnp_led_data *led;
+	int num_duty_pcts = 0;
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	char *buffer;
+	ssize_t ret;
+	int i = 0;
+	int max_duty_pcts;
+	struct pwm_config_data *pwm_cfg;
+	u32 previous_num_duty_pcts;
+	int value;
+	int *previous_duty_pcts;
+
+	led = container_of(led_cdev, struct qpnp_led_data, cdev);
+
+	switch (led->id) {
+	case QPNP_ID_LED_MPP:
+		pwm_cfg = led->mpp_cfg->pwm_cfg;
+		max_duty_pcts = PWM_LUT_MAX_SIZE;
+		break;
+	case QPNP_ID_RGB_RED:
+	case QPNP_ID_RGB_GREEN:
+	case QPNP_ID_RGB_BLUE:
+		pwm_cfg = led->rgb_cfg->pwm_cfg;
+		max_duty_pcts = PWM_LUT_MAX_SIZE;
+		break;
+	case QPNP_ID_KPDBL:
+		pwm_cfg = led->kpdbl_cfg->pwm_cfg;
+		max_duty_pcts = PWM_GPLED_LUT_MAX_SIZE;
+		break;
+	default:
+		dev_err(&led->pdev->dev,
+			"Invalid LED id type for duty pcts\n");
+		return -EINVAL;
+	}
+
+	if (pwm_cfg->mode == LPG_MODE)
+		pwm_cfg->blinking = true;
+
+	buffer = (char *)buf;
+
+	for (i = 0; i < max_duty_pcts; i++) {
+		if (buffer == NULL)
+			break;
+		ret = sscanf((const char *)buffer, "%u,%s", &value, buffer);
+		pwm_cfg->old_duty_pcts[i] = value;
+		num_duty_pcts++;
+		if (ret <= 1)
+			break;
+	}
+
+	if (num_duty_pcts >= max_duty_pcts) {
+		dev_err(&led->pdev->dev,
+			"Number of duty pcts given exceeds max (%d)\n",
+			max_duty_pcts);
+		return -EINVAL;
+	}
+
+	previous_num_duty_pcts = pwm_cfg->duty_cycles->num_duty_pcts;
+	previous_duty_pcts = pwm_cfg->duty_cycles->duty_pcts;
+
+	pwm_cfg->duty_cycles->num_duty_pcts = num_duty_pcts;
+	pwm_cfg->duty_cycles->duty_pcts = pwm_cfg->old_duty_pcts;
+	pwm_cfg->old_duty_pcts = previous_duty_pcts;
+	pwm_cfg->lut_params.idx_len = pwm_cfg->duty_cycles->num_duty_pcts;
+
+	if (pwm_cfg->pwm_enabled) {
+		pwm_disable(pwm_cfg->pwm_dev);
+		pwm_cfg->pwm_enabled = 0;
+	}
+
+	ret = qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
+	if (ret)
+		goto restore;
+
+	qpnp_led_set(&led->cdev, led->cdev.brightness);
+	return count;
+
+restore:
+	dev_err(&led->pdev->dev,
+		"Failed to initialize pwm with new duty pcts value\n");
+	pwm_cfg->duty_cycles->num_duty_pcts = previous_num_duty_pcts;
+	pwm_cfg->old_duty_pcts = pwm_cfg->duty_cycles->duty_pcts;
+	pwm_cfg->duty_cycles->duty_pcts = previous_duty_pcts;
+	pwm_cfg->lut_params.idx_len = pwm_cfg->duty_cycles->num_duty_pcts;
+	if (pwm_cfg->pwm_enabled) {
+		pwm_disable(pwm_cfg->pwm_dev);
+		pwm_cfg->pwm_enabled = 0;
+	}
+	qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
+	qpnp_led_set(&led->cdev, led->cdev.brightness);
+	return ret;
+}
+
+static void led_blink(struct qpnp_led_data *led,
+			struct pwm_config_data *pwm_cfg)
+{
+	int rc;
+
+	flush_work(&led->work);
+	mutex_lock(&led->lock);
+	if (pwm_cfg->use_blink) {
+		if (led->cdev.brightness) {
+			pwm_cfg->blinking = true;
+			if (led->id == QPNP_ID_LED_MPP)
+				led->mpp_cfg->pwm_mode = LPG_MODE;
+			else if (led->id == QPNP_ID_KPDBL)
+				led->kpdbl_cfg->pwm_mode = LPG_MODE;
+			pwm_cfg->mode = LPG_MODE;
+		} else {
+			pwm_cfg->blinking = false;
+			pwm_cfg->mode = pwm_cfg->default_mode;
+			if (led->id == QPNP_ID_LED_MPP)
+				led->mpp_cfg->pwm_mode = pwm_cfg->default_mode;
+			else if (led->id == QPNP_ID_KPDBL)
+				led->kpdbl_cfg->pwm_mode =
+						pwm_cfg->default_mode;
+		}
+		if (pwm_cfg->pwm_enabled) {
+			pwm_disable(pwm_cfg->pwm_dev);
+			pwm_cfg->pwm_enabled = 0;
+		}
+		qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
+		if (led->id == QPNP_ID_RGB_RED || led->id == QPNP_ID_RGB_GREEN
+				|| led->id == QPNP_ID_RGB_BLUE) {
+			rc = qpnp_rgb_set(led);
+			if (rc < 0)
+				dev_err(&led->pdev->dev,
+				"RGB set brightness failed (%d)\n", rc);
+		} else if (led->id == QPNP_ID_LED_MPP) {
+			rc = qpnp_mpp_set(led);
+			if (rc < 0)
+				dev_err(&led->pdev->dev,
+				"MPP set brightness failed (%d)\n", rc);
+		} else if (led->id == QPNP_ID_KPDBL) {
+			rc = qpnp_kpdbl_set(led);
+			if (rc < 0)
+				dev_err(&led->pdev->dev,
+				"KPDBL set brightness failed (%d)\n", rc);
+		}
+	}
+	mutex_unlock(&led->lock);
+}
+
+static ssize_t blink_store(struct device *dev,
+	struct device_attribute *attr,
+	const char *buf, size_t count)
+{
+	struct qpnp_led_data *led;
+	unsigned long blinking;
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	ssize_t ret = -EINVAL;
+
+	ret = kstrtoul(buf, 10, &blinking);
+	if (ret)
+		return ret;
+	led = container_of(led_cdev, struct qpnp_led_data, cdev);
+	led->cdev.brightness = blinking ? led->cdev.max_brightness : 0;
+
+	switch (led->id) {
+	case QPNP_ID_LED_MPP:
+		led_blink(led, led->mpp_cfg->pwm_cfg);
+		break;
+	case QPNP_ID_RGB_RED:
+	case QPNP_ID_RGB_GREEN:
+	case QPNP_ID_RGB_BLUE:
+		led_blink(led, led->rgb_cfg->pwm_cfg);
+		break;
+	case QPNP_ID_KPDBL:
+		led_blink(led, led->kpdbl_cfg->pwm_cfg);
+		break;
+	default:
+		dev_err(&led->pdev->dev, "Invalid LED id type for blink\n");
+		return -EINVAL;
+	}
+	return count;
+}
+
+static DEVICE_ATTR(led_mode, 0664, NULL, led_mode_store);
+static DEVICE_ATTR(strobe, 0664, NULL, led_strobe_type_store);
+static DEVICE_ATTR(pwm_us, 0664, NULL, pwm_us_store);
+static DEVICE_ATTR(pause_lo, 0664, NULL, pause_lo_store);
+static DEVICE_ATTR(pause_hi, 0664, NULL, pause_hi_store);
+static DEVICE_ATTR(start_idx, 0664, NULL, start_idx_store);
+static DEVICE_ATTR(ramp_step_ms, 0664, NULL, ramp_step_ms_store);
+static DEVICE_ATTR(lut_flags, 0664, NULL, lut_flags_store);
+static DEVICE_ATTR(duty_pcts, 0664, NULL, duty_pcts_store);
+static DEVICE_ATTR(blink, 0664, NULL, blink_store);
+
+static struct attribute *led_attrs[] = {
+	&dev_attr_led_mode.attr,
+	&dev_attr_strobe.attr,
+	NULL
+};
+
+static const struct attribute_group led_attr_group = {
+	.attrs = led_attrs,
+};
+
+static struct attribute *pwm_attrs[] = {
+	&dev_attr_pwm_us.attr,
+	NULL
+};
+
+static struct attribute *lpg_attrs[] = {
+	&dev_attr_pause_lo.attr,
+	&dev_attr_pause_hi.attr,
+	&dev_attr_start_idx.attr,
+	&dev_attr_ramp_step_ms.attr,
+	&dev_attr_lut_flags.attr,
+	&dev_attr_duty_pcts.attr,
+	NULL
+};
+
+static struct attribute *blink_attrs[] = {
+	&dev_attr_blink.attr,
+	NULL
+};
+
+static const struct attribute_group pwm_attr_group = {
+	.attrs = pwm_attrs,
+};
+
+static const struct attribute_group lpg_attr_group = {
+	.attrs = lpg_attrs,
+};
+
+static const struct attribute_group blink_attr_group = {
+	.attrs = blink_attrs,
+};
+
+static int qpnp_flash_init(struct qpnp_led_data *led)
+{
+	int rc;
+
+	led->flash_cfg->flash_on = false;
+
+	rc = qpnp_led_masked_write(led,
+		FLASH_LED_STROBE_CTRL(led->base),
+		FLASH_STROBE_MASK, FLASH_DISABLE_ALL);
+	if (rc) {
+		dev_err(&led->pdev->dev,
+			"LED %d flash write failed(%d)\n", led->id, rc);
+		return rc;
+	}
+
+	/* Disable flash LED module */
+	rc = qpnp_led_masked_write(led, FLASH_ENABLE_CONTROL(led->base),
+		FLASH_ENABLE_MASK, FLASH_DISABLE_ALL);
+	if (rc) {
+		dev_err(&led->pdev->dev, "Enable reg write failed(%d)\n", rc);
+		return rc;
+	}
+
+	if (led->flash_cfg->torch_enable)
+		return 0;
+
+	/* Set headroom */
+	rc = qpnp_led_masked_write(led, FLASH_HEADROOM(led->base),
+		FLASH_HEADROOM_MASK, led->flash_cfg->headroom);
+	if (rc) {
+		dev_err(&led->pdev->dev,
+			"Headroom reg write failed(%d)\n", rc);
+		return rc;
+	}
+
+	/* Set startup delay */
+	rc = qpnp_led_masked_write(led,
+		FLASH_STARTUP_DELAY(led->base), FLASH_STARTUP_DLY_MASK,
+		led->flash_cfg->startup_dly);
+	if (rc) {
+		dev_err(&led->pdev->dev,
+			"Startup delay reg write failed(%d)\n", rc);
+		return rc;
+	}
+
+	/* Set timer control - safety or watchdog */
+	if (led->flash_cfg->safety_timer) {
+		rc = qpnp_led_masked_write(led,
+			FLASH_LED_TMR_CTRL(led->base),
+			FLASH_TMR_MASK, FLASH_TMR_SAFETY);
+		if (rc) {
+			dev_err(&led->pdev->dev,
+				"LED timer ctrl reg write failed(%d)\n",
+				rc);
+			return rc;
+		}
+	}
+
+	/* Set Vreg force */
+	if (led->flash_cfg->vreg_ok)
+		rc = qpnp_led_masked_write(led,	FLASH_VREG_OK_FORCE(led->base),
+			FLASH_VREG_MASK, FLASH_SW_VREG_OK);
+	else
+		rc = qpnp_led_masked_write(led, FLASH_VREG_OK_FORCE(led->base),
+			FLASH_VREG_MASK, FLASH_HW_VREG_OK);
+
+	if (rc) {
+		dev_err(&led->pdev->dev,
+			"Vreg OK reg write failed(%d)\n", rc);
+		return rc;
+	}
+
+	/* Set self fault check */
+	rc = qpnp_led_masked_write(led, FLASH_FAULT_DETECT(led->base),
+		FLASH_FAULT_DETECT_MASK, FLASH_SELFCHECK_ENABLE);
+	if (rc) {
+		dev_err(&led->pdev->dev,
+			"Fault detect reg write failed(%d)\n", rc);
+		return rc;
+	}
+
+	/* Set mask enable */
+	rc = qpnp_led_masked_write(led, FLASH_MASK_ENABLE(led->base),
+		FLASH_MASK_REG_MASK, FLASH_MASK_1);
+	if (rc) {
+		dev_err(&led->pdev->dev,
+			"Mask enable reg write failed(%d)\n", rc);
+		return rc;
+	}
+
+	/* Set current ramp */
+	rc = qpnp_led_masked_write(led, FLASH_CURRENT_RAMP(led->base),
+		FLASH_CURRENT_RAMP_MASK, FLASH_RAMP_STEP_27US);
+	if (rc) {
+		dev_err(&led->pdev->dev,
+			"Current ramp reg write failed(%d)\n", rc);
+		return rc;
+	}
+
+	led->flash_cfg->strobe_type = 0;
+
+	/* dump flash registers */
+	qpnp_dump_regs(led, flash_debug_regs, ARRAY_SIZE(flash_debug_regs));
+
+	return 0;
+}
+
+static int qpnp_kpdbl_init(struct qpnp_led_data *led)
+{
+	int rc;
+	uint val;
+
+	/* select row source - vbst or vph */
+	rc = regmap_read(led->regmap, KPDBL_ROW_SRC_SEL(led->base), &val);
+	if (rc) {
+		dev_err(&led->pdev->dev,
+			"Unable to read from addr=%x, rc(%d)\n",
+			KPDBL_ROW_SRC_SEL(led->base), rc);
+		return rc;
+	}
+
+	if (led->kpdbl_cfg->row_src_vbst)
+		val |= 1 << led->kpdbl_cfg->row_id;
+	else
+		val &= ~(1 << led->kpdbl_cfg->row_id);
+
+	rc = regmap_write(led->regmap, KPDBL_ROW_SRC_SEL(led->base), val);
+	if (rc) {
+		dev_err(&led->pdev->dev,
+			"Unable to read from addr=%x, rc(%d)\n",
+			KPDBL_ROW_SRC_SEL(led->base), rc);
+		return rc;
+	}
+
+	/* row source enable */
+	rc = regmap_read(led->regmap, KPDBL_ROW_SRC(led->base), &val);
+	if (rc) {
+		dev_err(&led->pdev->dev,
+			"Unable to read from addr=%x, rc(%d)\n",
+			KPDBL_ROW_SRC(led->base), rc);
+		return rc;
+	}
+
+	if (led->kpdbl_cfg->row_src_en)
+		val |= KPDBL_ROW_SCAN_EN_MASK | (1 << led->kpdbl_cfg->row_id);
+	else
+		val &= ~(1 << led->kpdbl_cfg->row_id);
+
+	rc = regmap_write(led->regmap, KPDBL_ROW_SRC(led->base), val);
+	if (rc) {
+		dev_err(&led->pdev->dev,
+			"Unable to write to addr=%x, rc(%d)\n",
+			KPDBL_ROW_SRC(led->base), rc);
+		return rc;
+	}
+
+	/* enable module */
+	rc = qpnp_led_masked_write(led, KPDBL_ENABLE(led->base),
+		KPDBL_MODULE_EN_MASK, KPDBL_MODULE_EN);
+	if (rc) {
+		dev_err(&led->pdev->dev,
+			"Enable module write failed(%d)\n", rc);
+		return rc;
+	}
+
+	rc = qpnp_pwm_init(led->kpdbl_cfg->pwm_cfg, led->pdev,
+				led->cdev.name);
+	if (rc) {
+		dev_err(&led->pdev->dev, "Failed to initialize pwm\n");
+		return rc;
+	}
+
+	if (led->kpdbl_cfg->always_on) {
+		kpdbl_master = led->kpdbl_cfg->pwm_cfg->pwm_dev;
+		kpdbl_master_period_us = led->kpdbl_cfg->pwm_cfg->pwm_period_us;
+	}
+
+	/* dump kpdbl registers */
+	qpnp_dump_regs(led, kpdbl_debug_regs, ARRAY_SIZE(kpdbl_debug_regs));
+
+	return 0;
+}
+
+static int qpnp_rgb_init(struct qpnp_led_data *led)
+{
+	int rc;
+
+	rc = qpnp_led_masked_write(led, RGB_LED_SRC_SEL(led->base),
+		RGB_LED_SRC_MASK, RGB_LED_SOURCE_VPH_PWR);
+	if (rc) {
+		dev_err(&led->pdev->dev,
+			"Failed to write led source select register\n");
+		return rc;
+	}
+
+	rc = qpnp_pwm_init(led->rgb_cfg->pwm_cfg, led->pdev, led->cdev.name);
+	if (rc) {
+		dev_err(&led->pdev->dev, "Failed to initialize pwm\n");
+		return rc;
+	}
+	/* Initialize led for use in auto trickle charging mode */
+	rc = qpnp_led_masked_write(led, RGB_LED_ATC_CTL(led->base),
+		led->rgb_cfg->enable, led->rgb_cfg->enable);
+
+	return 0;
+}
+
+static int qpnp_mpp_init(struct qpnp_led_data *led)
+{
+	int rc;
+	u8 val;
+
+
+	if (led->max_current < LED_MPP_CURRENT_MIN ||
+		led->max_current > LED_MPP_CURRENT_MAX) {
+		dev_err(&led->pdev->dev,
+			"max current for mpp is not valid\n");
+		return -EINVAL;
+	}
+
+	val = (led->mpp_cfg->current_setting / LED_MPP_CURRENT_PER_SETTING) - 1;
+
+	if (val < 0)
+		val = 0;
+
+	rc = qpnp_led_masked_write(led, LED_MPP_VIN_CTRL(led->base),
+		LED_MPP_VIN_MASK, led->mpp_cfg->vin_ctrl);
+	if (rc) {
+		dev_err(&led->pdev->dev,
+			"Failed to write led vin control reg\n");
+		return rc;
+	}
+
+	rc = qpnp_led_masked_write(led, LED_MPP_SINK_CTRL(led->base),
+		LED_MPP_SINK_MASK, val);
+	if (rc) {
+		dev_err(&led->pdev->dev,
+			"Failed to write sink control reg\n");
+		return rc;
+	}
+
+	if (led->mpp_cfg->pwm_mode != MANUAL_MODE) {
+		rc = qpnp_pwm_init(led->mpp_cfg->pwm_cfg, led->pdev,
+					led->cdev.name);
+		if (rc) {
+			dev_err(&led->pdev->dev,
+				"Failed to initialize pwm\n");
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int qpnp_gpio_init(struct qpnp_led_data *led)
+{
+	int rc;
+
+	rc = qpnp_led_masked_write(led, LED_GPIO_VIN_CTRL(led->base),
+		LED_GPIO_VIN_MASK, led->gpio_cfg->vin_ctrl);
+	if (rc) {
+		dev_err(&led->pdev->dev,
+			"Failed to write led vin control reg\n");
+		return rc;
+	}
+
+	return 0;
+}
+
+static int qpnp_led_initialize(struct qpnp_led_data *led)
+{
+	int rc = 0;
+
+	switch (led->id) {
+	case QPNP_ID_WLED:
+		rc = qpnp_wled_init(led);
+		if (rc)
+			dev_err(&led->pdev->dev,
+				"WLED initialize failed(%d)\n", rc);
+		break;
+	case QPNP_ID_FLASH1_LED0:
+	case QPNP_ID_FLASH1_LED1:
+		rc = qpnp_flash_init(led);
+		if (rc)
+			dev_err(&led->pdev->dev,
+				"FLASH initialize failed(%d)\n", rc);
+		break;
+	case QPNP_ID_RGB_RED:
+	case QPNP_ID_RGB_GREEN:
+	case QPNP_ID_RGB_BLUE:
+		rc = qpnp_rgb_init(led);
+		if (rc)
+			dev_err(&led->pdev->dev,
+				"RGB initialize failed(%d)\n", rc);
+		break;
+	case QPNP_ID_LED_MPP:
+		rc = qpnp_mpp_init(led);
+		if (rc)
+			dev_err(&led->pdev->dev,
+				"MPP initialize failed(%d)\n", rc);
+		break;
+	case QPNP_ID_LED_GPIO:
+		rc = qpnp_gpio_init(led);
+		if (rc)
+			dev_err(&led->pdev->dev,
+				"GPIO initialize failed(%d)\n", rc);
+		break;
+	case QPNP_ID_KPDBL:
+		rc = qpnp_kpdbl_init(led);
+		if (rc)
+			dev_err(&led->pdev->dev,
+				"KPDBL initialize failed(%d)\n", rc);
+		break;
+	default:
+		dev_err(&led->pdev->dev, "Invalid LED(%d)\n", led->id);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int qpnp_get_common_configs(struct qpnp_led_data *led,
+				struct device_node *node)
+{
+	int rc;
+	u32 val;
+	const char *temp_string;
+
+	led->cdev.default_trigger = LED_TRIGGER_DEFAULT;
+	rc = of_property_read_string(node, "linux,default-trigger",
+		&temp_string);
+	if (!rc)
+		led->cdev.default_trigger = temp_string;
+	else if (rc != -EINVAL)
+		return rc;
+
+	led->default_on = false;
+	rc = of_property_read_string(node, "qcom,default-state",
+		&temp_string);
+	if (!rc) {
+		if (strcmp(temp_string, "on") == 0)
+			led->default_on = true;
+	} else if (rc != -EINVAL)
+		return rc;
+
+	led->turn_off_delay_ms = 0;
+	rc = of_property_read_u32(node, "qcom,turn-off-delay-ms", &val);
+	if (!rc)
+		led->turn_off_delay_ms = val;
+	else if (rc != -EINVAL)
+		return rc;
+
+	return 0;
+}
+
+/*
+ * Handlers for alternative sources of platform_data
+ */
+static int qpnp_get_config_wled(struct qpnp_led_data *led,
+				struct device_node *node)
+{
+	u32 val;
+	uint tmp;
+	int rc;
+
+	led->wled_cfg = devm_kzalloc(&led->pdev->dev,
+				sizeof(struct wled_config_data), GFP_KERNEL);
+	if (!led->wled_cfg)
+		return -ENOMEM;
+
+	rc = regmap_read(led->regmap, PMIC_VERSION_REG, &tmp);
+	if (rc) {
+		dev_err(&led->pdev->dev,
+			"Unable to read pmic ver, rc(%d)\n", rc);
+	}
+	led->wled_cfg->pmic_version = (u8)tmp;
+
+	led->wled_cfg->num_strings = WLED_DEFAULT_STRINGS;
+	rc = of_property_read_u32(node, "qcom,num-strings", &val);
+	if (!rc)
+		led->wled_cfg->num_strings = (u8) val;
+	else if (rc != -EINVAL)
+		return rc;
+
+	led->wled_cfg->num_physical_strings = led->wled_cfg->num_strings;
+	rc = of_property_read_u32(node, "qcom,num-physical-strings", &val);
+	if (!rc)
+		led->wled_cfg->num_physical_strings = (u8) val;
+	else if (rc != -EINVAL)
+		return rc;
+
+	led->wled_cfg->ovp_val = WLED_DEFAULT_OVP_VAL;
+	rc = of_property_read_u32(node, "qcom,ovp-val", &val);
+	if (!rc)
+		led->wled_cfg->ovp_val = (u8) val;
+	else if (rc != -EINVAL)
+		return rc;
+
+	led->wled_cfg->boost_curr_lim = WLED_BOOST_LIM_DEFAULT;
+	rc = of_property_read_u32(node, "qcom,boost-curr-lim", &val);
+	if (!rc)
+		led->wled_cfg->boost_curr_lim = (u8) val;
+	else if (rc != -EINVAL)
+		return rc;
+
+	led->wled_cfg->cp_select = WLED_CP_SEL_DEFAULT;
+	rc = of_property_read_u32(node, "qcom,cp-sel", &val);
+	if (!rc)
+		led->wled_cfg->cp_select = (u8) val;
+	else if (rc != -EINVAL)
+		return rc;
+
+	led->wled_cfg->ctrl_delay_us = WLED_CTRL_DLY_DEFAULT;
+	rc = of_property_read_u32(node, "qcom,ctrl-delay-us", &val);
+	if (!rc)
+		led->wled_cfg->ctrl_delay_us = (u8) val;
+	else if (rc != -EINVAL)
+		return rc;
+
+	led->wled_cfg->op_fdbck = WLED_OP_FDBCK_DEFAULT;
+	rc = of_property_read_u32(node, "qcom,op-fdbck", &val);
+	if (!rc)
+		led->wled_cfg->op_fdbck = (u8) val;
+	else if (rc != -EINVAL)
+		return rc;
+
+	led->wled_cfg->switch_freq = WLED_SWITCH_FREQ_DEFAULT;
+	rc = of_property_read_u32(node, "qcom,switch-freq", &val);
+	if (!rc)
+		led->wled_cfg->switch_freq = (u8) val;
+	else if (rc != -EINVAL)
+		return rc;
+
+	led->wled_cfg->dig_mod_gen_en =
+		of_property_read_bool(node, "qcom,dig-mod-gen-en");
+
+	led->wled_cfg->cs_out_en =
+		of_property_read_bool(node, "qcom,cs-out-en");
+
+	return 0;
+}
+
+static int qpnp_get_config_flash(struct qpnp_led_data *led,
+				struct device_node *node, bool *reg_set)
+{
+	int rc;
+	u32 val;
+	uint tmp;
+
+	led->flash_cfg = devm_kzalloc(&led->pdev->dev,
+				sizeof(struct flash_config_data), GFP_KERNEL);
+	if (!led->flash_cfg)
+		return -ENOMEM;
+
+	rc = regmap_read(led->regmap, FLASH_PERIPHERAL_SUBTYPE(led->base),
+			&tmp);
+	if (rc) {
+		dev_err(&led->pdev->dev,
+			"Unable to read from addr=%x, rc(%d)\n",
+			FLASH_PERIPHERAL_SUBTYPE(led->base), rc);
+	}
+	led->flash_cfg->peripheral_subtype = (u8)tmp;
+
+	led->flash_cfg->torch_enable =
+		of_property_read_bool(node, "qcom,torch-enable");
+
+	led->flash_cfg->no_smbb_support =
+		of_property_read_bool(node, "qcom,no-smbb-support");
+
+	if (of_find_property(of_get_parent(node), "flash-wa-supply",
+					NULL) && (!*reg_set)) {
+		led->flash_cfg->flash_wa_reg =
+			devm_regulator_get(&led->pdev->dev, "flash-wa");
+		if (IS_ERR_OR_NULL(led->flash_cfg->flash_wa_reg)) {
+			rc = PTR_ERR(led->flash_cfg->flash_wa_reg);
+			if (rc != EPROBE_DEFER) {
+				dev_err(&led->pdev->dev,
+					"Flash wa regulator get failed(%d)\n",
+					rc);
+			}
+		} else {
+			led->flash_cfg->flash_wa_reg_get = true;
+		}
+	}
+
+	if (led->id == QPNP_ID_FLASH1_LED0) {
+		led->flash_cfg->enable_module = FLASH_ENABLE_LED_0;
+		led->flash_cfg->current_addr = FLASH_LED_0_CURR(led->base);
+		led->flash_cfg->trigger_flash = FLASH_LED_0_OUTPUT;
+		if (!*reg_set) {
+			led->flash_cfg->flash_boost_reg =
+				regulator_get(&led->pdev->dev,
+							"flash-boost");
+			if (IS_ERR(led->flash_cfg->flash_boost_reg)) {
+				rc = PTR_ERR(led->flash_cfg->flash_boost_reg);
+				dev_err(&led->pdev->dev,
+					"Regulator get failed(%d)\n", rc);
+				goto error_get_flash_reg;
+			}
+			led->flash_cfg->flash_reg_get = true;
+			*reg_set = true;
+		} else
+			led->flash_cfg->flash_reg_get = false;
+
+		if (led->flash_cfg->torch_enable) {
+			led->flash_cfg->second_addr =
+						FLASH_LED_1_CURR(led->base);
+		}
+	} else if (led->id == QPNP_ID_FLASH1_LED1) {
+		led->flash_cfg->enable_module = FLASH_ENABLE_LED_1;
+		led->flash_cfg->current_addr = FLASH_LED_1_CURR(led->base);
+		led->flash_cfg->trigger_flash = FLASH_LED_1_OUTPUT;
+		if (!*reg_set) {
+			led->flash_cfg->flash_boost_reg =
+					regulator_get(&led->pdev->dev,
+								"flash-boost");
+			if (IS_ERR(led->flash_cfg->flash_boost_reg)) {
+				rc = PTR_ERR(led->flash_cfg->flash_boost_reg);
+				dev_err(&led->pdev->dev,
+					"Regulator get failed(%d)\n", rc);
+				goto error_get_flash_reg;
+			}
+			led->flash_cfg->flash_reg_get = true;
+			*reg_set = true;
+		} else
+			led->flash_cfg->flash_reg_get = false;
+
+		if (led->flash_cfg->torch_enable) {
+			led->flash_cfg->second_addr =
+						FLASH_LED_0_CURR(led->base);
+		}
+	} else {
+		dev_err(&led->pdev->dev, "Unknown flash LED name given\n");
+		return -EINVAL;
+	}
+
+	if (led->flash_cfg->torch_enable) {
+		if (of_find_property(of_get_parent(node), "torch-boost-supply",
+									NULL)) {
+			if (!led->flash_cfg->no_smbb_support) {
+				led->flash_cfg->torch_boost_reg =
+					regulator_get(&led->pdev->dev,
+								"torch-boost");
+				if (IS_ERR(led->flash_cfg->torch_boost_reg)) {
+					rc = PTR_ERR(led->flash_cfg->
+							torch_boost_reg);
+					dev_err(&led->pdev->dev,
+					"Torch regulator get failed(%d)\n", rc);
+					goto error_get_torch_reg;
+				}
+			}
+			led->flash_cfg->enable_module = FLASH_ENABLE_MODULE;
+		} else
+			led->flash_cfg->enable_module = FLASH_ENABLE_ALL;
+		led->flash_cfg->trigger_flash = FLASH_TORCH_OUTPUT;
+
+		rc = of_property_read_u32(node, "qcom,duration", &val);
+		if (!rc)
+			led->flash_cfg->duration = ((u8) val) - 2;
+		else if (rc == -EINVAL)
+			led->flash_cfg->duration = TORCH_DURATION_12s;
+		else {
+			if (led->flash_cfg->peripheral_subtype ==
+							FLASH_SUBTYPE_SINGLE)
+				goto error_get_flash_reg;
+			else if (led->flash_cfg->peripheral_subtype ==
+							FLASH_SUBTYPE_DUAL)
+				goto error_get_torch_reg;
+		}
+
+		rc = of_property_read_u32(node, "qcom,current", &val);
+		if (!rc)
+			led->flash_cfg->current_prgm = (val *
+				TORCH_MAX_LEVEL / led->max_current);
+		else {
+			if (led->flash_cfg->peripheral_subtype ==
+							FLASH_SUBTYPE_SINGLE)
+				goto error_get_flash_reg;
+			else if (led->flash_cfg->peripheral_subtype ==
+							FLASH_SUBTYPE_DUAL)
+				goto error_get_torch_reg;
+			goto error_get_torch_reg;
+		}
+
+		return 0;
+	}
+
+	rc = of_property_read_u32(node, "qcom,duration", &val);
+	if (!rc)
+		led->flash_cfg->duration = (u8)((val - 10) / 10);
+	else if (rc == -EINVAL)
+		led->flash_cfg->duration = FLASH_DURATION_200ms;
+	else
+		goto error_get_flash_reg;
+
+	rc = of_property_read_u32(node, "qcom,current", &val);
+	if (!rc)
+		led->flash_cfg->current_prgm = val * FLASH_MAX_LEVEL
+						/ led->max_current;
+	else
+		goto error_get_flash_reg;
+
+	rc = of_property_read_u32(node, "qcom,headroom", &val);
+	if (!rc)
+		led->flash_cfg->headroom = (u8) val;
+	else if (rc == -EINVAL)
+		led->flash_cfg->headroom = HEADROOM_500mV;
+	else
+		goto error_get_flash_reg;
+
+	rc = of_property_read_u32(node, "qcom,clamp-curr", &val);
+	if (!rc)
+		led->flash_cfg->clamp_curr = (val *
+				FLASH_MAX_LEVEL / led->max_current);
+	else if (rc == -EINVAL)
+		led->flash_cfg->clamp_curr = FLASH_CLAMP_200mA;
+	else
+		goto error_get_flash_reg;
+
+	rc = of_property_read_u32(node, "qcom,startup-dly", &val);
+	if (!rc)
+		led->flash_cfg->startup_dly = (u8) val;
+	else if (rc == -EINVAL)
+		led->flash_cfg->startup_dly = DELAY_128us;
+	else
+		goto error_get_flash_reg;
+
+	led->flash_cfg->safety_timer =
+		of_property_read_bool(node, "qcom,safety-timer");
+
+	led->flash_cfg->vreg_ok =
+		of_property_read_bool(node, "qcom,sw_vreg_ok");
+
+	return 0;
+
+error_get_torch_reg:
+	if (led->flash_cfg->no_smbb_support)
+		regulator_put(led->flash_cfg->flash_boost_reg);
+	else
+		regulator_put(led->flash_cfg->torch_boost_reg);
+
+error_get_flash_reg:
+	regulator_put(led->flash_cfg->flash_boost_reg);
+	return rc;
+
+}
+
+static int qpnp_get_config_pwm(struct pwm_config_data *pwm_cfg,
+				struct platform_device *pdev,
+				struct device_node *node)
+{
+	struct property *prop;
+	int rc, i, lut_max_size;
+	u32 val;
+	u8 *temp_cfg;
+	const char *led_label;
+
+	pwm_cfg->pwm_dev = of_pwm_get(node, NULL);
+
+	if (IS_ERR(pwm_cfg->pwm_dev)) {
+		rc = PTR_ERR(pwm_cfg->pwm_dev);
+		dev_err(&pdev->dev, "Cannot get PWM device rc:(%d)\n", rc);
+		pwm_cfg->pwm_dev = NULL;
+		return rc;
+	}
+
+	if (pwm_cfg->mode != MANUAL_MODE) {
+		rc = of_property_read_u32(node, "qcom,pwm-us", &val);
+		if (!rc)
+			pwm_cfg->pwm_period_us = val;
+		else
+			return rc;
+	}
+
+	pwm_cfg->use_blink =
+		of_property_read_bool(node, "qcom,use-blink");
+
+	if (pwm_cfg->mode == LPG_MODE || pwm_cfg->use_blink) {
+		pwm_cfg->duty_cycles =
+			devm_kzalloc(&pdev->dev,
+			sizeof(struct pwm_duty_cycles), GFP_KERNEL);
+		if (!pwm_cfg->duty_cycles) {
+			dev_err(&pdev->dev, "Unable to allocate memory\n");
+			rc = -ENOMEM;
+			goto bad_lpg_params;
+		}
+
+		prop = of_find_property(node, "qcom,duty-pcts",
+			&pwm_cfg->duty_cycles->num_duty_pcts);
+		if (!prop) {
+			dev_err(&pdev->dev, "Looking up property node qcom,duty-pcts failed\n");
+			rc =  -ENODEV;
+			goto bad_lpg_params;
+		} else if (!pwm_cfg->duty_cycles->num_duty_pcts) {
+			dev_err(&pdev->dev, "Invalid length of duty pcts\n");
+			rc =  -EINVAL;
+			goto bad_lpg_params;
+		}
+
+		rc = of_property_read_string(node, "label", &led_label);
+
+		if (rc < 0) {
+			dev_err(&pdev->dev,
+				"Failure reading label, rc = %d\n", rc);
+			return rc;
+		}
+
+		if (strcmp(led_label, "kpdbl") == 0)
+			lut_max_size = PWM_GPLED_LUT_MAX_SIZE;
+		else
+			lut_max_size = PWM_LUT_MAX_SIZE;
+
+		pwm_cfg->duty_cycles->duty_pcts =
+			devm_kzalloc(&pdev->dev,
+			sizeof(int) * lut_max_size,
+			GFP_KERNEL);
+		if (!pwm_cfg->duty_cycles->duty_pcts) {
+			dev_err(&pdev->dev, "Unable to allocate memory\n");
+			rc = -ENOMEM;
+			goto bad_lpg_params;
+		}
+
+		pwm_cfg->old_duty_pcts =
+			devm_kzalloc(&pdev->dev,
+			sizeof(int) * lut_max_size,
+			GFP_KERNEL);
+		if (!pwm_cfg->old_duty_pcts) {
+			dev_err(&pdev->dev, "Unable to allocate memory\n");
+			rc = -ENOMEM;
+			goto bad_lpg_params;
+		}
+
+		temp_cfg = devm_kzalloc(&pdev->dev,
+				pwm_cfg->duty_cycles->num_duty_pcts *
+				sizeof(u8), GFP_KERNEL);
+		if (!temp_cfg) {
+			dev_err(&pdev->dev, "Failed to allocate memory for duty pcts\n");
+			rc = -ENOMEM;
+			goto bad_lpg_params;
+		}
+
+		memcpy(temp_cfg, prop->value,
+			pwm_cfg->duty_cycles->num_duty_pcts);
+
+		for (i = 0; i < pwm_cfg->duty_cycles->num_duty_pcts; i++)
+			pwm_cfg->duty_cycles->duty_pcts[i] =
+				(int) temp_cfg[i];
+
+		rc = of_property_read_u32(node, "qcom,start-idx", &val);
+		if (!rc) {
+			pwm_cfg->lut_params.start_idx = val;
+			pwm_cfg->duty_cycles->start_idx = val;
+		} else
+			goto bad_lpg_params;
+
+		pwm_cfg->lut_params.lut_pause_hi = 0;
+		rc = of_property_read_u32(node, "qcom,pause-hi", &val);
+		if (!rc)
+			pwm_cfg->lut_params.lut_pause_hi = val;
+		else if (rc != -EINVAL)
+			goto bad_lpg_params;
+
+		pwm_cfg->lut_params.lut_pause_lo = 0;
+		rc = of_property_read_u32(node, "qcom,pause-lo", &val);
+		if (!rc)
+			pwm_cfg->lut_params.lut_pause_lo = val;
+		else if (rc != -EINVAL)
+			goto bad_lpg_params;
+
+		pwm_cfg->lut_params.ramp_step_ms =
+				QPNP_LUT_RAMP_STEP_DEFAULT;
+		rc = of_property_read_u32(node, "qcom,ramp-step-ms", &val);
+		if (!rc)
+			pwm_cfg->lut_params.ramp_step_ms = val;
+		else if (rc != -EINVAL)
+			goto bad_lpg_params;
+
+		pwm_cfg->lut_params.flags = QPNP_LED_PWM_FLAGS;
+		rc = of_property_read_u32(node, "qcom,lut-flags", &val);
+		if (!rc)
+			pwm_cfg->lut_params.flags = (u8) val;
+		else if (rc != -EINVAL)
+			goto bad_lpg_params;
+
+		pwm_cfg->lut_params.idx_len =
+			pwm_cfg->duty_cycles->num_duty_pcts;
+	}
+	return 0;
+
+bad_lpg_params:
+	pwm_cfg->use_blink = false;
+	if (pwm_cfg->mode == PWM_MODE) {
+		dev_err(&pdev->dev, "LPG parameters not set for blink mode, defaulting to PWM mode\n");
+		return 0;
+	}
+	return rc;
+};
+
+static int qpnp_led_get_mode(const char *mode)
+{
+	if (strcmp(mode, "manual") == 0)
+		return MANUAL_MODE;
+	else if (strcmp(mode, "pwm") == 0)
+		return PWM_MODE;
+	else if (strcmp(mode, "lpg") == 0)
+		return LPG_MODE;
+	else
+		return -EINVAL;
+};
+
+static int qpnp_get_config_kpdbl(struct qpnp_led_data *led,
+				struct device_node *node)
+{
+	int rc;
+	u32 val;
+	u8 led_mode;
+	const char *mode;
+
+	led->kpdbl_cfg = devm_kzalloc(&led->pdev->dev,
+				sizeof(struct kpdbl_config_data), GFP_KERNEL);
+	if (!led->kpdbl_cfg)
+		return -ENOMEM;
+
+	rc = of_property_read_string(node, "qcom,mode", &mode);
+	if (!rc) {
+		led_mode = qpnp_led_get_mode(mode);
+		if ((led_mode == MANUAL_MODE) || (led_mode == -EINVAL)) {
+			dev_err(&led->pdev->dev, "Selected mode not supported for kpdbl.\n");
+			return -EINVAL;
+		}
+		led->kpdbl_cfg->pwm_cfg = devm_kzalloc(&led->pdev->dev,
+					sizeof(struct pwm_config_data),
+					GFP_KERNEL);
+		if (!led->kpdbl_cfg->pwm_cfg)
+			return -ENOMEM;
+
+		led->kpdbl_cfg->pwm_cfg->mode = led_mode;
+		led->kpdbl_cfg->pwm_cfg->default_mode = led_mode;
+	} else {
+		return rc;
+	}
+
+	rc = qpnp_get_config_pwm(led->kpdbl_cfg->pwm_cfg, led->pdev,  node);
+	if (rc < 0) {
+		if (led->kpdbl_cfg->pwm_cfg->pwm_dev)
+			pwm_put(led->kpdbl_cfg->pwm_cfg->pwm_dev);
+		return rc;
+	}
+
+	rc = of_property_read_u32(node, "qcom,row-id", &val);
+	if (!rc)
+		led->kpdbl_cfg->row_id = val;
+	else
+		return rc;
+
+	led->kpdbl_cfg->row_src_vbst =
+			of_property_read_bool(node, "qcom,row-src-vbst");
+
+	led->kpdbl_cfg->row_src_en =
+			of_property_read_bool(node, "qcom,row-src-en");
+
+	led->kpdbl_cfg->always_on =
+			of_property_read_bool(node, "qcom,always-on");
+
+	return 0;
+}
+
+static int qpnp_get_config_rgb(struct qpnp_led_data *led,
+				struct device_node *node)
+{
+	int rc;
+	u8 led_mode;
+	const char *mode;
+
+	led->rgb_cfg = devm_kzalloc(&led->pdev->dev,
+				sizeof(struct rgb_config_data), GFP_KERNEL);
+	if (!led->rgb_cfg)
+		return -ENOMEM;
+
+	if (led->id == QPNP_ID_RGB_RED)
+		led->rgb_cfg->enable = RGB_LED_ENABLE_RED;
+	else if (led->id == QPNP_ID_RGB_GREEN)
+		led->rgb_cfg->enable = RGB_LED_ENABLE_GREEN;
+	else if (led->id == QPNP_ID_RGB_BLUE)
+		led->rgb_cfg->enable = RGB_LED_ENABLE_BLUE;
+	else
+		return -EINVAL;
+
+	rc = of_property_read_string(node, "qcom,mode", &mode);
+	if (!rc) {
+		led_mode = qpnp_led_get_mode(mode);
+		if ((led_mode == MANUAL_MODE) || (led_mode == -EINVAL)) {
+			dev_err(&led->pdev->dev, "Selected mode not supported for rgb\n");
+			return -EINVAL;
+		}
+		led->rgb_cfg->pwm_cfg = devm_kzalloc(&led->pdev->dev,
+					sizeof(struct pwm_config_data),
+					GFP_KERNEL);
+		if (!led->rgb_cfg->pwm_cfg) {
+			dev_err(&led->pdev->dev,
+				"Unable to allocate memory\n");
+			return -ENOMEM;
+		}
+		led->rgb_cfg->pwm_cfg->mode = led_mode;
+		led->rgb_cfg->pwm_cfg->default_mode = led_mode;
+	} else {
+		return rc;
+	}
+
+	rc = qpnp_get_config_pwm(led->rgb_cfg->pwm_cfg, led->pdev, node);
+	if (rc < 0) {
+		if (led->rgb_cfg->pwm_cfg->pwm_dev)
+			pwm_put(led->rgb_cfg->pwm_cfg->pwm_dev);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int qpnp_get_config_mpp(struct qpnp_led_data *led,
+		struct device_node *node)
+{
+	int rc;
+	u32 val;
+	u8 led_mode;
+	const char *mode;
+
+	led->mpp_cfg = devm_kzalloc(&led->pdev->dev,
+			sizeof(struct mpp_config_data), GFP_KERNEL);
+	if (!led->mpp_cfg)
+		return -ENOMEM;
+
+	if (of_find_property(of_get_parent(node), "mpp-power-supply", NULL)) {
+		led->mpp_cfg->mpp_reg =
+				regulator_get(&led->pdev->dev,
+							"mpp-power");
+		if (IS_ERR(led->mpp_cfg->mpp_reg)) {
+			rc = PTR_ERR(led->mpp_cfg->mpp_reg);
+			dev_err(&led->pdev->dev,
+				"MPP regulator get failed(%d)\n", rc);
+			return rc;
+		}
+	}
+
+	if (led->mpp_cfg->mpp_reg) {
+		rc = of_property_read_u32(of_get_parent(node),
+					"qcom,mpp-power-max-voltage", &val);
+		if (!rc)
+			led->mpp_cfg->max_uV = val;
+		else
+			goto err_config_mpp;
+
+		rc = of_property_read_u32(of_get_parent(node),
+					"qcom,mpp-power-min-voltage", &val);
+		if (!rc)
+			led->mpp_cfg->min_uV = val;
+		else
+			goto err_config_mpp;
+	} else {
+		rc = of_property_read_u32(of_get_parent(node),
+					"qcom,mpp-power-max-voltage", &val);
+		if (!rc)
+			dev_warn(&led->pdev->dev, "No regulator specified\n");
+
+		rc = of_property_read_u32(of_get_parent(node),
+					"qcom,mpp-power-min-voltage", &val);
+		if (!rc)
+			dev_warn(&led->pdev->dev, "No regulator specified\n");
+	}
+
+	led->mpp_cfg->current_setting = LED_MPP_CURRENT_MIN;
+	rc = of_property_read_u32(node, "qcom,current-setting", &val);
+	if (!rc) {
+		if (led->mpp_cfg->current_setting < LED_MPP_CURRENT_MIN)
+			led->mpp_cfg->current_setting = LED_MPP_CURRENT_MIN;
+		else if (led->mpp_cfg->current_setting > LED_MPP_CURRENT_MAX)
+			led->mpp_cfg->current_setting = LED_MPP_CURRENT_MAX;
+		else
+			led->mpp_cfg->current_setting = (u8) val;
+	} else if (rc != -EINVAL)
+		goto err_config_mpp;
+
+	led->mpp_cfg->source_sel = LED_MPP_SOURCE_SEL_DEFAULT;
+	rc = of_property_read_u32(node, "qcom,source-sel", &val);
+	if (!rc)
+		led->mpp_cfg->source_sel = (u8) val;
+	else if (rc != -EINVAL)
+		goto err_config_mpp;
+
+	led->mpp_cfg->mode_ctrl = LED_MPP_MODE_SINK;
+	rc = of_property_read_u32(node, "qcom,mode-ctrl", &val);
+	if (!rc)
+		led->mpp_cfg->mode_ctrl = (u8) val;
+	else if (rc != -EINVAL)
+		goto err_config_mpp;
+
+	led->mpp_cfg->vin_ctrl = LED_MPP_VIN_CTRL_DEFAULT;
+	rc = of_property_read_u32(node, "qcom,vin-ctrl", &val);
+	if (!rc)
+		led->mpp_cfg->vin_ctrl = (u8) val;
+	else if (rc != -EINVAL)
+		goto err_config_mpp;
+
+	led->mpp_cfg->min_brightness = 0;
+	rc = of_property_read_u32(node, "qcom,min-brightness", &val);
+	if (!rc)
+		led->mpp_cfg->min_brightness = (u8) val;
+	else if (rc != -EINVAL)
+		goto err_config_mpp;
+
+	rc = of_property_read_string(node, "qcom,mode", &mode);
+	if (!rc) {
+		led_mode = qpnp_led_get_mode(mode);
+		led->mpp_cfg->pwm_mode = led_mode;
+		if (led_mode == MANUAL_MODE)
+			return MANUAL_MODE;
+		else if (led_mode == -EINVAL) {
+			dev_err(&led->pdev->dev, "Selected mode not supported for mpp\n");
+			rc = -EINVAL;
+			goto err_config_mpp;
+		}
+		led->mpp_cfg->pwm_cfg = devm_kzalloc(&led->pdev->dev,
+					sizeof(struct pwm_config_data),
+					GFP_KERNEL);
+		if (!led->mpp_cfg->pwm_cfg) {
+			dev_err(&led->pdev->dev,
+				"Unable to allocate memory\n");
+			rc = -ENOMEM;
+			goto err_config_mpp;
+		}
+		led->mpp_cfg->pwm_cfg->mode = led_mode;
+		led->mpp_cfg->pwm_cfg->default_mode = led_mode;
+	} else {
+		return rc;
+	}
+
+	rc = qpnp_get_config_pwm(led->mpp_cfg->pwm_cfg, led->pdev, node);
+	if (rc < 0) {
+		if (led->mpp_cfg->pwm_cfg && led->mpp_cfg->pwm_cfg->pwm_dev)
+			pwm_put(led->mpp_cfg->pwm_cfg->pwm_dev);
+		goto err_config_mpp;
+	}
+
+	return 0;
+
+err_config_mpp:
+	if (led->mpp_cfg->mpp_reg)
+		regulator_put(led->mpp_cfg->mpp_reg);
+	return rc;
+}
+
+static int qpnp_get_config_gpio(struct qpnp_led_data *led,
+		struct device_node *node)
+{
+	int rc;
+	u32 val;
+
+	led->gpio_cfg = devm_kzalloc(&led->pdev->dev,
+			sizeof(struct gpio_config_data), GFP_KERNEL);
+	if (!led->gpio_cfg)
+		return -ENOMEM;
+
+	led->gpio_cfg->source_sel = LED_GPIO_SOURCE_SEL_DEFAULT;
+	rc = of_property_read_u32(node, "qcom,source-sel", &val);
+	if (!rc)
+		led->gpio_cfg->source_sel = (u8) val;
+	else if (rc != -EINVAL)
+		goto err_config_gpio;
+
+	led->gpio_cfg->mode_ctrl = LED_GPIO_MODE_OUTPUT;
+	rc = of_property_read_u32(node, "qcom,mode-ctrl", &val);
+	if (!rc)
+		led->gpio_cfg->mode_ctrl = (u8) val;
+	else if (rc != -EINVAL)
+		goto err_config_gpio;
+
+	led->gpio_cfg->vin_ctrl = LED_GPIO_VIN_CTRL_DEFAULT;
+	rc = of_property_read_u32(node, "qcom,vin-ctrl", &val);
+	if (!rc)
+		led->gpio_cfg->vin_ctrl = (u8) val;
+	else if (rc != -EINVAL)
+		goto err_config_gpio;
+
+	return 0;
+
+err_config_gpio:
+	return rc;
+}
+
+static int qpnp_leds_probe(struct platform_device *pdev)
+{
+	struct qpnp_led_data *led, *led_array;
+	unsigned int base;
+	struct device_node *node, *temp;
+	int rc, i, num_leds = 0, parsed_leds = 0;
+	const char *led_label;
+	bool regulator_probe = false;
+
+	node = pdev->dev.of_node;
+	if (node == NULL)
+		return -ENODEV;
+
+	temp = NULL;
+	while ((temp = of_get_next_child(node, temp)))
+		num_leds++;
+
+	if (!num_leds)
+		return -ECHILD;
+
+	led_array = devm_kcalloc(&pdev->dev, num_leds, sizeof(*led_array),
+				GFP_KERNEL);
+	if (!led_array)
+		return -ENOMEM;
+
+	for_each_child_of_node(node, temp) {
+		led = &led_array[parsed_leds];
+		led->num_leds = num_leds;
+		led->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+		if (!led->regmap) {
+			dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+			return -EINVAL;
+		}
+		led->pdev = pdev;
+
+		rc = of_property_read_u32(pdev->dev.of_node, "reg", &base);
+		if (rc < 0) {
+			dev_err(&pdev->dev,
+				"Couldn't find reg in node = %s rc = %d\n",
+				pdev->dev.of_node->full_name, rc);
+			goto fail_id_check;
+		}
+		led->base = base;
+
+		rc = of_property_read_string(temp, "label", &led_label);
+		if (rc < 0) {
+			dev_err(&led->pdev->dev,
+				"Failure reading label, rc = %d\n", rc);
+			goto fail_id_check;
+		}
+
+		rc = of_property_read_string(temp, "linux,name",
+			&led->cdev.name);
+		if (rc < 0) {
+			dev_err(&led->pdev->dev,
+				"Failure reading led name, rc = %d\n", rc);
+			goto fail_id_check;
+		}
+
+		rc = of_property_read_u32(temp, "qcom,max-current",
+			&led->max_current);
+		if (rc < 0) {
+			dev_err(&led->pdev->dev,
+				"Failure reading max_current, rc =  %d\n", rc);
+			goto fail_id_check;
+		}
+
+		rc = of_property_read_u32(temp, "qcom,id", &led->id);
+		if (rc < 0) {
+			dev_err(&led->pdev->dev,
+				"Failure reading led id, rc =  %d\n", rc);
+			goto fail_id_check;
+		}
+
+		rc = qpnp_get_common_configs(led, temp);
+		if (rc) {
+			dev_err(&led->pdev->dev, "Failure reading common led configuration, rc = %d\n",
+				rc);
+			goto fail_id_check;
+		}
+
+		led->cdev.brightness_set    = qpnp_led_set;
+		led->cdev.brightness_get    = qpnp_led_get;
+
+		if (strcmp(led_label, "wled") == 0) {
+			rc = qpnp_get_config_wled(led, temp);
+			if (rc < 0) {
+				dev_err(&led->pdev->dev,
+					"Unable to read wled config data\n");
+				goto fail_id_check;
+			}
+		} else if (strcmp(led_label, "flash") == 0) {
+			if (!of_find_property(node, "flash-boost-supply", NULL))
+				regulator_probe = true;
+			rc = qpnp_get_config_flash(led, temp, &regulator_probe);
+			if (rc < 0) {
+				dev_err(&led->pdev->dev,
+					"Unable to read flash config data\n");
+				goto fail_id_check;
+			}
+		} else if (strcmp(led_label, "rgb") == 0) {
+			rc = qpnp_get_config_rgb(led, temp);
+			if (rc < 0) {
+				dev_err(&led->pdev->dev,
+					"Unable to read rgb config data\n");
+				goto fail_id_check;
+			}
+		} else if (strcmp(led_label, "mpp") == 0) {
+			rc = qpnp_get_config_mpp(led, temp);
+			if (rc < 0) {
+				dev_err(&led->pdev->dev,
+						"Unable to read mpp config data\n");
+				goto fail_id_check;
+			}
+		} else if (strcmp(led_label, "gpio") == 0) {
+			rc = qpnp_get_config_gpio(led, temp);
+			if (rc < 0) {
+				dev_err(&led->pdev->dev,
+						"Unable to read gpio config data\n");
+				goto fail_id_check;
+			}
+		} else if (strcmp(led_label, "kpdbl") == 0) {
+			bitmap_zero(kpdbl_leds_in_use, NUM_KPDBL_LEDS);
+			is_kpdbl_master_turn_on = false;
+			rc = qpnp_get_config_kpdbl(led, temp);
+			if (rc < 0) {
+				dev_err(&led->pdev->dev,
+					"Unable to read kpdbl config data\n");
+				goto fail_id_check;
+			}
+		} else {
+			dev_err(&led->pdev->dev, "No LED matching label\n");
+			rc = -EINVAL;
+			goto fail_id_check;
+		}
+
+		if (led->id != QPNP_ID_FLASH1_LED0 &&
+					led->id != QPNP_ID_FLASH1_LED1)
+			mutex_init(&led->lock);
+
+		led->in_order_command_processing = of_property_read_bool
+				(temp, "qcom,in-order-command-processing");
+
+		if (led->in_order_command_processing) {
+			/*
+			 * the command order from user space needs to be
+			 * maintained use ordered workqueue to prevent
+			 * concurrency
+			 */
+			led->workqueue = alloc_ordered_workqueue
+							("led_workqueue", 0);
+			if (!led->workqueue) {
+				rc = -ENOMEM;
+				goto fail_id_check;
+			}
+		}
+
+		INIT_WORK(&led->work, qpnp_led_work);
+
+		rc =  qpnp_led_initialize(led);
+		if (rc < 0)
+			goto fail_id_check;
+
+		rc = qpnp_led_set_max_brightness(led);
+		if (rc < 0)
+			goto fail_id_check;
+
+		rc = led_classdev_register(&pdev->dev, &led->cdev);
+		if (rc) {
+			dev_err(&pdev->dev,
+				"unable to register led %d,rc=%d\n",
+						 led->id, rc);
+			goto fail_id_check;
+		}
+
+		if (led->id == QPNP_ID_FLASH1_LED0 ||
+			led->id == QPNP_ID_FLASH1_LED1) {
+			rc = sysfs_create_group(&led->cdev.dev->kobj,
+							&led_attr_group);
+			if (rc)
+				goto fail_id_check;
+
+		}
+
+		if (led->id == QPNP_ID_LED_MPP) {
+			if (!led->mpp_cfg->pwm_cfg)
+				break;
+			if (led->mpp_cfg->pwm_cfg->mode == PWM_MODE) {
+				rc = sysfs_create_group(&led->cdev.dev->kobj,
+					&pwm_attr_group);
+				if (rc)
+					goto fail_id_check;
+			}
+			if (led->mpp_cfg->pwm_cfg->use_blink) {
+				rc = sysfs_create_group(&led->cdev.dev->kobj,
+					&blink_attr_group);
+				if (rc)
+					goto fail_id_check;
+
+				rc = sysfs_create_group(&led->cdev.dev->kobj,
+					&lpg_attr_group);
+				if (rc)
+					goto fail_id_check;
+			} else if (led->mpp_cfg->pwm_cfg->mode == LPG_MODE) {
+				rc = sysfs_create_group(&led->cdev.dev->kobj,
+					&lpg_attr_group);
+				if (rc)
+					goto fail_id_check;
+			}
+		} else if ((led->id == QPNP_ID_RGB_RED) ||
+			(led->id == QPNP_ID_RGB_GREEN) ||
+			(led->id == QPNP_ID_RGB_BLUE)) {
+			if (led->rgb_cfg->pwm_cfg->mode == PWM_MODE) {
+				rc = sysfs_create_group(&led->cdev.dev->kobj,
+					&pwm_attr_group);
+				if (rc)
+					goto fail_id_check;
+			}
+			if (led->rgb_cfg->pwm_cfg->use_blink) {
+				rc = sysfs_create_group(&led->cdev.dev->kobj,
+					&blink_attr_group);
+				if (rc)
+					goto fail_id_check;
+
+				rc = sysfs_create_group(&led->cdev.dev->kobj,
+					&lpg_attr_group);
+				if (rc)
+					goto fail_id_check;
+			} else if (led->rgb_cfg->pwm_cfg->mode == LPG_MODE) {
+				rc = sysfs_create_group(&led->cdev.dev->kobj,
+					&lpg_attr_group);
+				if (rc)
+					goto fail_id_check;
+			}
+		} else if (led->id == QPNP_ID_KPDBL) {
+			if (led->kpdbl_cfg->pwm_cfg->mode == PWM_MODE) {
+				rc = sysfs_create_group(&led->cdev.dev->kobj,
+					&pwm_attr_group);
+				if (rc)
+					goto fail_id_check;
+			}
+			if (led->kpdbl_cfg->pwm_cfg->use_blink) {
+				rc = sysfs_create_group(&led->cdev.dev->kobj,
+					&blink_attr_group);
+				if (rc)
+					goto fail_id_check;
+
+				rc = sysfs_create_group(&led->cdev.dev->kobj,
+					&lpg_attr_group);
+				if (rc)
+					goto fail_id_check;
+			} else if (led->kpdbl_cfg->pwm_cfg->mode == LPG_MODE) {
+				rc = sysfs_create_group(&led->cdev.dev->kobj,
+					&lpg_attr_group);
+				if (rc)
+					goto fail_id_check;
+			}
+		}
+
+		/* configure default state */
+		if (led->default_on) {
+			led->cdev.brightness = led->cdev.max_brightness;
+			__qpnp_led_work(led, led->cdev.brightness);
+			if (led->turn_off_delay_ms > 0)
+				qpnp_led_turn_off(led);
+		} else
+			led->cdev.brightness = LED_OFF;
+
+		parsed_leds++;
+	}
+	dev_set_drvdata(&pdev->dev, led_array);
+	return 0;
+
+fail_id_check:
+	for (i = 0; i < parsed_leds; i++) {
+		if (led_array[i].id != QPNP_ID_FLASH1_LED0 &&
+				led_array[i].id != QPNP_ID_FLASH1_LED1)
+			mutex_destroy(&led_array[i].lock);
+		if (led_array[i].in_order_command_processing)
+			destroy_workqueue(led_array[i].workqueue);
+		led_classdev_unregister(&led_array[i].cdev);
+	}
+
+	return rc;
+}
+
+static int qpnp_leds_remove(struct platform_device *pdev)
+{
+	struct qpnp_led_data *led_array  = dev_get_drvdata(&pdev->dev);
+	int i, parsed_leds = led_array->num_leds;
+
+	for (i = 0; i < parsed_leds; i++) {
+		cancel_work_sync(&led_array[i].work);
+		if (led_array[i].id != QPNP_ID_FLASH1_LED0 &&
+				led_array[i].id != QPNP_ID_FLASH1_LED1)
+			mutex_destroy(&led_array[i].lock);
+
+		if (led_array[i].in_order_command_processing)
+			destroy_workqueue(led_array[i].workqueue);
+		led_classdev_unregister(&led_array[i].cdev);
+		switch (led_array[i].id) {
+		case QPNP_ID_WLED:
+			break;
+		case QPNP_ID_FLASH1_LED0:
+		case QPNP_ID_FLASH1_LED1:
+			if (led_array[i].flash_cfg->flash_reg_get)
+				regulator_put(
+				       led_array[i].flash_cfg->flash_boost_reg);
+			if (led_array[i].flash_cfg->torch_enable)
+				if (!led_array[i].flash_cfg->no_smbb_support)
+					regulator_put(led_array[i].
+					flash_cfg->torch_boost_reg);
+			sysfs_remove_group(&led_array[i].cdev.dev->kobj,
+							&led_attr_group);
+			break;
+		case QPNP_ID_RGB_RED:
+		case QPNP_ID_RGB_GREEN:
+		case QPNP_ID_RGB_BLUE:
+			if (led_array[i].rgb_cfg->pwm_cfg->mode == PWM_MODE)
+				sysfs_remove_group(&led_array[i].cdev.dev->kobj,
+							&pwm_attr_group);
+			if (led_array[i].rgb_cfg->pwm_cfg->use_blink) {
+				sysfs_remove_group(&led_array[i].cdev.dev->kobj,
+							&blink_attr_group);
+				sysfs_remove_group(&led_array[i].cdev.dev->kobj,
+							&lpg_attr_group);
+			} else if (led_array[i].rgb_cfg->pwm_cfg->mode
+				   == LPG_MODE)
+				sysfs_remove_group(&led_array[i].cdev.dev->kobj,
+							&lpg_attr_group);
+			break;
+		case QPNP_ID_LED_MPP:
+			if (!led_array[i].mpp_cfg->pwm_cfg)
+				break;
+			if (led_array[i].mpp_cfg->pwm_cfg->mode == PWM_MODE)
+				sysfs_remove_group(&led_array[i].cdev.dev->kobj,
+							&pwm_attr_group);
+			if (led_array[i].mpp_cfg->pwm_cfg->use_blink) {
+				sysfs_remove_group(&led_array[i].cdev.dev->kobj,
+							&blink_attr_group);
+				sysfs_remove_group(&led_array[i].cdev.dev->kobj,
+							&lpg_attr_group);
+			} else if (led_array[i].mpp_cfg->pwm_cfg->mode
+				   == LPG_MODE)
+				sysfs_remove_group(&led_array[i].cdev.dev->kobj,
+							&lpg_attr_group);
+			if (led_array[i].mpp_cfg->mpp_reg)
+				regulator_put(led_array[i].mpp_cfg->mpp_reg);
+			break;
+		case QPNP_ID_KPDBL:
+			if (led_array[i].kpdbl_cfg->pwm_cfg->mode == PWM_MODE)
+				sysfs_remove_group(&led_array[i].cdev.dev->kobj,
+							&pwm_attr_group);
+			if (led_array[i].kpdbl_cfg->pwm_cfg->use_blink) {
+				sysfs_remove_group(&led_array[i].cdev.dev->kobj,
+							&blink_attr_group);
+				sysfs_remove_group(&led_array[i].cdev.dev->kobj,
+							&lpg_attr_group);
+			} else if (led_array[i].kpdbl_cfg->pwm_cfg->mode
+				   == LPG_MODE)
+				sysfs_remove_group(&led_array[i].cdev.dev->kobj,
+							&lpg_attr_group);
+			break;
+		default:
+			dev_err(&led_array->pdev->dev,
+					"Invalid LED(%d)\n",
+					led_array[i].id);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id spmi_match_table[] = {
+	{ .compatible = "qcom,leds-qpnp",},
+	{ },
+};
+#else
+#define spmi_match_table NULL
+#endif
+
+static struct platform_driver qpnp_leds_driver = {
+	.driver		= {
+		.name		= "qcom,leds-qpnp",
+		.of_match_table	= spmi_match_table,
+	},
+	.probe		= qpnp_leds_probe,
+	.remove		= qpnp_leds_remove,
+};
+
+static int __init qpnp_led_init(void)
+{
+	return platform_driver_register(&qpnp_leds_driver);
+}
+module_init(qpnp_led_init);
+
+static void __exit qpnp_led_exit(void)
+{
+	platform_driver_unregister(&qpnp_leds_driver);
+}
+module_exit(qpnp_led_exit);
+
+MODULE_DESCRIPTION("QPNP LEDs driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("leds:leds-qpnp");
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/leds/leds-qpnp-flash-v2.c	2019-10-29 09:26:23.845205155 +0100
@@ -0,0 +1,2478 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"flashv2: %s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/regmap.h>
+#include <linux/power_supply.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/regulator/consumer.h>
+#include <linux/leds-qpnp-flash.h>
+#include <linux/leds-qpnp-flash-v2.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include <linux/log2.h>
+#include "leds.h"
+
+#define	FLASH_LED_REG_LED_STATUS1(base)		(base + 0x08)
+#define	FLASH_LED_REG_LED_STATUS2(base)		(base + 0x09)
+#define	FLASH_LED_REG_INT_RT_STS(base)		(base + 0x10)
+#define	FLASH_LED_REG_SAFETY_TMR(base)		(base + 0x40)
+#define	FLASH_LED_REG_TGR_CURRENT(base)		(base + 0x43)
+#define	FLASH_LED_REG_MOD_CTRL(base)		(base + 0x46)
+#define	FLASH_LED_REG_IRES(base)		(base + 0x47)
+#define	FLASH_LED_REG_STROBE_CFG(base)		(base + 0x48)
+#define	FLASH_LED_REG_STROBE_CTRL(base)		(base + 0x49)
+#define	FLASH_LED_EN_LED_CTRL(base)		(base + 0x4C)
+#define	FLASH_LED_REG_HDRM_PRGM(base)		(base + 0x4D)
+#define	FLASH_LED_REG_HDRM_AUTO_MODE_CTRL(base)	(base + 0x50)
+#define	FLASH_LED_REG_WARMUP_DELAY(base)	(base + 0x51)
+#define	FLASH_LED_REG_ISC_DELAY(base)		(base + 0x52)
+#define	FLASH_LED_REG_THERMAL_RMP_DN_RATE(base)	(base + 0x55)
+#define	FLASH_LED_REG_THERMAL_THRSH1(base)	(base + 0x56)
+#define	FLASH_LED_REG_THERMAL_THRSH2(base)	(base + 0x57)
+#define	FLASH_LED_REG_THERMAL_THRSH3(base)	(base + 0x58)
+#define	FLASH_LED_REG_THERMAL_HYSTERESIS(base)	(base + 0x59)
+#define	FLASH_LED_REG_THERMAL_DEBOUNCE(base)	(base + 0x5A)
+#define	FLASH_LED_REG_VPH_DROOP_THRESHOLD(base)	(base + 0x61)
+#define	FLASH_LED_REG_VPH_DROOP_DEBOUNCE(base)	(base + 0x62)
+#define	FLASH_LED_REG_ILED_GRT_THRSH(base)	(base + 0x67)
+#define	FLASH_LED_REG_LED1N2_ICLAMP_LOW(base)	(base + 0x68)
+#define	FLASH_LED_REG_LED1N2_ICLAMP_MID(base)	(base + 0x69)
+#define	FLASH_LED_REG_LED3_ICLAMP_LOW(base)	(base + 0x6A)
+#define	FLASH_LED_REG_LED3_ICLAMP_MID(base)	(base + 0x6B)
+#define	FLASH_LED_REG_MITIGATION_SEL(base)	(base + 0x6E)
+#define	FLASH_LED_REG_MITIGATION_SW(base)	(base + 0x6F)
+#define	FLASH_LED_REG_LMH_LEVEL(base)		(base + 0x70)
+#define	FLASH_LED_REG_MULTI_STROBE_CTRL(base)	(base + 0x71)
+#define	FLASH_LED_REG_LPG_INPUT_CTRL(base)	(base + 0x72)
+#define	FLASH_LED_REG_CURRENT_DERATE_EN(base)	(base + 0x76)
+
+#define	FLASH_LED_HDRM_VOL_MASK			GENMASK(7, 4)
+#define	FLASH_LED_CURRENT_MASK			GENMASK(6, 0)
+#define	FLASH_LED_STROBE_MASK			GENMASK(1, 0)
+#define	FLASH_HW_STROBE_MASK			GENMASK(2, 0)
+#define	FLASH_LED_ISC_WARMUP_DELAY_MASK		GENMASK(1, 0)
+#define	FLASH_LED_CURRENT_DERATE_EN_MASK	GENMASK(2, 0)
+#define	FLASH_LED_VPH_DROOP_DEBOUNCE_MASK	GENMASK(1, 0)
+#define	FLASH_LED_CHGR_MITIGATION_SEL_MASK	GENMASK(5, 4)
+#define	FLASH_LED_LMH_MITIGATION_SEL_MASK	GENMASK(1, 0)
+#define	FLASH_LED_ILED_GRT_THRSH_MASK		GENMASK(5, 0)
+#define	FLASH_LED_LMH_LEVEL_MASK		GENMASK(1, 0)
+#define	FLASH_LED_VPH_DROOP_HYSTERESIS_MASK	GENMASK(5, 4)
+#define	FLASH_LED_VPH_DROOP_THRESHOLD_MASK	GENMASK(2, 0)
+#define	FLASH_LED_THERMAL_HYSTERESIS_MASK	GENMASK(1, 0)
+#define	FLASH_LED_THERMAL_DEBOUNCE_MASK		GENMASK(1, 0)
+#define	FLASH_LED_THERMAL_THRSH_MASK		GENMASK(2, 0)
+#define	FLASH_LED_MOD_CTRL_MASK			BIT(7)
+#define	FLASH_LED_HW_SW_STROBE_SEL_BIT		BIT(2)
+#define	FLASH_LED_VPH_DROOP_FAULT_MASK		BIT(4)
+#define	FLASH_LED_LMH_MITIGATION_EN_MASK	BIT(0)
+#define	FLASH_LED_CHGR_MITIGATION_EN_MASK	BIT(4)
+#define	THERMAL_OTST1_RAMP_CTRL_MASK		BIT(7)
+#define	THERMAL_OTST1_RAMP_CTRL_SHIFT		7
+#define	THERMAL_DERATE_SLOW_SHIFT		4
+#define	THERMAL_DERATE_SLOW_MASK		GENMASK(6, 4)
+#define	THERMAL_DERATE_FAST_MASK		GENMASK(2, 0)
+#define	LED1N2_FLASH_ONCE_ONLY_BIT		BIT(0)
+#define	LED3_FLASH_ONCE_ONLY_BIT		BIT(1)
+#define	LPG_INPUT_SEL_BIT			BIT(0)
+
+#define	VPH_DROOP_DEBOUNCE_US_TO_VAL(val_us)	(val_us / 8)
+#define	VPH_DROOP_HYST_MV_TO_VAL(val_mv)	(val_mv / 25)
+#define	VPH_DROOP_THRESH_VAL_TO_UV(val)		((val + 25) * 100000)
+#define	MITIGATION_THRSH_MA_TO_VAL(val_ma)	(val_ma / 100)
+#define	THERMAL_HYST_TEMP_TO_VAL(val, divisor)	(val / divisor)
+
+#define	FLASH_LED_ISC_WARMUP_DELAY_SHIFT	6
+#define	FLASH_LED_WARMUP_DELAY_DEFAULT		2
+#define	FLASH_LED_ISC_DELAY_DEFAULT		3
+#define	FLASH_LED_VPH_DROOP_DEBOUNCE_DEFAULT	2
+#define	FLASH_LED_VPH_DROOP_HYST_SHIFT		4
+#define	FLASH_LED_VPH_DROOP_HYST_DEFAULT	2
+#define	FLASH_LED_VPH_DROOP_THRESH_DEFAULT	5
+#define	FLASH_LED_DEBOUNCE_MAX			3
+#define	FLASH_LED_HYSTERESIS_MAX		3
+#define	FLASH_LED_VPH_DROOP_THRESH_MAX		7
+#define	THERMAL_DERATE_SLOW_MAX			314592
+#define	THERMAL_DERATE_FAST_MAX			512
+#define	THERMAL_DEBOUNCE_TIME_MAX		64
+#define	THERMAL_DERATE_HYSTERESIS_MAX		3
+#define	FLASH_LED_THERMAL_THRSH_MIN		3
+#define	FLASH_LED_THERMAL_THRSH_MAX		7
+#define	FLASH_LED_THERMAL_OTST_LEVELS		3
+#define	FLASH_LED_VLED_MAX_DEFAULT_UV		3500000
+#define	FLASH_LED_IBATT_OCP_THRESH_DEFAULT_UA	4500000
+#define	FLASH_LED_RPARA_DEFAULT_UOHM		0
+#define	FLASH_LED_SAFETY_TMR_ENABLE		BIT(7)
+#define	FLASH_LED_LMH_LEVEL_DEFAULT		0
+#define	FLASH_LED_LMH_MITIGATION_ENABLE		1
+#define	FLASH_LED_LMH_MITIGATION_DISABLE	0
+#define	FLASH_LED_CHGR_MITIGATION_ENABLE	BIT(4)
+#define	FLASH_LED_CHGR_MITIGATION_DISABLE	0
+#define	FLASH_LED_LMH_MITIGATION_SEL_DEFAULT	2
+#define	FLASH_LED_MITIGATION_SEL_MAX		2
+#define	FLASH_LED_CHGR_MITIGATION_SEL_SHIFT	4
+#define	FLASH_LED_CHGR_MITIGATION_THRSH_DEFAULT	0xA
+#define	FLASH_LED_CHGR_MITIGATION_THRSH_MAX	0x1F
+#define	FLASH_LED_LMH_OCV_THRESH_DEFAULT_UV	3700000
+#define	FLASH_LED_LMH_RBATT_THRESH_DEFAULT_UOHM	400000
+#define	FLASH_LED_IRES_BASE			3
+#define	FLASH_LED_IRES_DIVISOR			2500
+#define	FLASH_LED_IRES_MIN_UA			5000
+#define	FLASH_LED_IRES_DEFAULT_UA		12500
+#define	FLASH_LED_IRES_DEFAULT_VAL		0x00
+#define	FLASH_LED_HDRM_VOL_SHIFT		4
+#define	FLASH_LED_HDRM_VOL_DEFAULT_MV		0x80
+#define	FLASH_LED_HDRM_VOL_HI_LO_WIN_DEFAULT_MV	0x04
+#define	FLASH_LED_HDRM_VOL_BASE_MV		125
+#define	FLASH_LED_HDRM_VOL_STEP_MV		25
+#define	FLASH_LED_STROBE_CFG_DEFAULT		0x00
+#define	FLASH_LED_HW_STROBE_OPTION_1		0x00
+#define	FLASH_LED_HW_STROBE_OPTION_2		0x01
+#define	FLASH_LED_HW_STROBE_OPTION_3		0x02
+#define	FLASH_LED_ENABLE			BIT(0)
+#define	FLASH_LED_MOD_ENABLE			BIT(7)
+#define	FLASH_LED_DISABLE			0x00
+#define	FLASH_LED_SAFETY_TMR_DISABLED		0x13
+#define	FLASH_LED_MAX_TOTAL_CURRENT_MA		3750
+#define	FLASH_LED_IRES5P0_MAX_CURR_MA		640
+#define	FLASH_LED_IRES7P5_MAX_CURR_MA		960
+#define	FLASH_LED_IRES10P0_MAX_CURR_MA		1280
+#define	FLASH_LED_IRES12P5_MAX_CURR_MA		1600
+#define	MAX_IRES_LEVELS				4
+
+/* notifier call chain for flash-led irqs */
+static ATOMIC_NOTIFIER_HEAD(irq_notifier_list);
+
+enum flash_charger_mitigation {
+	FLASH_DISABLE_CHARGER_MITIGATION,
+	FLASH_HW_CHARGER_MITIGATION_BY_ILED_THRSHLD,
+	FLASH_SW_CHARGER_MITIGATION,
+};
+
+enum flash_led_type {
+	FLASH_LED_TYPE_FLASH,
+	FLASH_LED_TYPE_TORCH,
+};
+
+enum {
+	LED1 = 0,
+	LED2,
+	LED3,
+};
+
+enum strobe_type {
+	SW_STROBE = 0,
+	HW_STROBE,
+	LPG_STROBE,
+};
+
+/*
+ * Configurations for each individual LED
+ */
+struct flash_node_data {
+	struct platform_device		*pdev;
+	struct led_classdev		cdev;
+	struct pinctrl			*strobe_pinctrl;
+	struct pinctrl_state		*hw_strobe_state_active;
+	struct pinctrl_state		*hw_strobe_state_suspend;
+	int				hw_strobe_gpio;
+	int				ires_ua;
+	int				default_ires_ua;
+	int				max_current;
+	int				current_ma;
+	int				prev_current_ma;
+	u8				duration;
+	u8				id;
+	u8				type;
+	u8				ires_idx;
+	u8				default_ires_idx;
+	u8				hdrm_val;
+	u8				current_reg_val;
+	u8				strobe_ctrl;
+	u8				strobe_sel;
+	bool				led_on;
+};
+
+
+struct flash_switch_data {
+	struct platform_device		*pdev;
+	struct regulator		*vreg;
+	struct pinctrl			*led_en_pinctrl;
+	struct pinctrl_state		*gpio_state_active;
+	struct pinctrl_state		*gpio_state_suspend;
+	struct led_classdev		cdev;
+	int				led_mask;
+	bool				regulator_on;
+	bool				enabled;
+};
+
+/*
+ * Flash LED configuration read from device tree
+ */
+struct flash_led_platform_data {
+	struct pmic_revid_data	*pmic_rev_id;
+	int			*thermal_derate_current;
+	int			all_ramp_up_done_irq;
+	int			all_ramp_down_done_irq;
+	int			led_fault_irq;
+	int			ibatt_ocp_threshold_ua;
+	int			vled_max_uv;
+	int			rpara_uohm;
+	int			lmh_rbatt_threshold_uohm;
+	int			lmh_ocv_threshold_uv;
+	int			thermal_derate_slow;
+	int			thermal_derate_fast;
+	int			thermal_hysteresis;
+	int			thermal_debounce;
+	int			thermal_thrsh1;
+	int			thermal_thrsh2;
+	int			thermal_thrsh3;
+	int			hw_strobe_option;
+	u32			led1n2_iclamp_low_ma;
+	u32			led1n2_iclamp_mid_ma;
+	u32			led3_iclamp_low_ma;
+	u32			led3_iclamp_mid_ma;
+	u8			isc_delay;
+	u8			warmup_delay;
+	u8			current_derate_en_cfg;
+	u8			vph_droop_threshold;
+	u8			vph_droop_hysteresis;
+	u8			vph_droop_debounce;
+	u8			lmh_mitigation_sel;
+	u8			chgr_mitigation_sel;
+	u8			lmh_level;
+	u8			iled_thrsh_val;
+	bool			hdrm_auto_mode_en;
+	bool			thermal_derate_en;
+	bool			otst_ramp_bkup_en;
+};
+
+/*
+ * Flash LED data structure containing flash LED attributes
+ */
+struct qpnp_flash_led {
+	struct flash_led_platform_data	*pdata;
+	struct platform_device		*pdev;
+	struct regmap			*regmap;
+	struct flash_node_data		*fnode;
+	struct flash_switch_data	*snode;
+	struct power_supply		*bms_psy;
+	struct notifier_block		nb;
+	spinlock_t			lock;
+	int				num_fnodes;
+	int				num_snodes;
+	int				enable;
+	int				total_current_ma;
+	u16				base;
+	bool				trigger_lmh;
+	bool				trigger_chgr;
+};
+
+static int thermal_derate_slow_table[] = {
+	128, 256, 512, 1024, 2048, 4096, 8192, 314592,
+};
+
+static int thermal_derate_fast_table[] = {
+	32, 64, 96, 128, 256, 384, 512,
+};
+
+static int otst1_threshold_table[] = {
+	85, 79, 73, 67, 109, 103, 97, 91,
+};
+
+static int otst2_threshold_table[] = {
+	110, 104, 98, 92, 134, 128, 122, 116,
+};
+
+static int otst3_threshold_table[] = {
+	125, 119, 113, 107, 149, 143, 137, 131,
+};
+
+static int max_ires_curr_ma_table[MAX_IRES_LEVELS] = {
+	FLASH_LED_IRES12P5_MAX_CURR_MA, FLASH_LED_IRES10P0_MAX_CURR_MA,
+	FLASH_LED_IRES7P5_MAX_CURR_MA, FLASH_LED_IRES5P0_MAX_CURR_MA
+};
+
+static inline int get_current_reg_code(int target_curr_ma, int ires_ua)
+{
+	if (!ires_ua || !target_curr_ma || (target_curr_ma < (ires_ua / 1000)))
+		return 0;
+
+	return DIV_ROUND_UP(target_curr_ma * 1000, ires_ua) - 1;
+}
+
+static int qpnp_flash_led_read(struct qpnp_flash_led *led, u16 addr, u8 *data)
+{
+	int rc;
+	uint val;
+
+	rc = regmap_read(led->regmap, addr, &val);
+	if (rc < 0) {
+		pr_err("Unable to read from 0x%04X rc = %d\n", addr, rc);
+		return rc;
+	}
+
+	pr_debug("Read 0x%02X from addr 0x%04X\n", val, addr);
+	*data = (u8)val;
+	return 0;
+}
+
+static int qpnp_flash_led_write(struct qpnp_flash_led *led, u16 addr, u8 data)
+{
+	int rc;
+
+	rc = regmap_write(led->regmap, addr, data);
+	if (rc < 0) {
+		pr_err("Unable to write to 0x%04X rc = %d\n", addr, rc);
+		return rc;
+	}
+
+	pr_debug("Wrote 0x%02X to addr 0x%04X\n", data, addr);
+	return 0;
+}
+
+static int
+qpnp_flash_led_masked_read(struct qpnp_flash_led *led, u16 addr, u8 mask,
+								u8 *val)
+{
+	int rc;
+
+	rc = qpnp_flash_led_read(led, addr, val);
+	if (rc < 0)
+		return rc;
+
+	*val &= mask;
+	return rc;
+}
+
+static int
+qpnp_flash_led_masked_write(struct qpnp_flash_led *led, u16 addr, u8 mask,
+								u8 val)
+{
+	int rc;
+
+	rc = regmap_update_bits(led->regmap, addr, mask, val);
+	if (rc < 0)
+		pr_err("Unable to update bits from 0x%04X, rc = %d\n", addr,
+			rc);
+	else
+		pr_debug("Wrote 0x%02X to addr 0x%04X\n", val, addr);
+
+	return rc;
+}
+
+static enum
+led_brightness qpnp_flash_led_brightness_get(struct led_classdev *led_cdev)
+{
+	return led_cdev->brightness;
+}
+
+static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led)
+{
+	int rc, i, addr_offset;
+	u8 val = 0, mask, strobe_mask = 0, strobe_ctrl;
+
+	for (i = 0; i < led->num_fnodes; i++) {
+		addr_offset = led->fnode[i].id;
+		rc = qpnp_flash_led_write(led,
+			FLASH_LED_REG_HDRM_PRGM(led->base + addr_offset),
+			led->fnode[i].hdrm_val);
+		if (rc < 0)
+			return rc;
+
+		val |= 0x1 << led->fnode[i].id;
+
+		if (led->fnode[i].strobe_sel == HW_STROBE) {
+			if (led->fnode[i].id == LED3)
+				strobe_mask |= LED3_FLASH_ONCE_ONLY_BIT;
+			else
+				strobe_mask |= LED1N2_FLASH_ONCE_ONLY_BIT;
+		}
+
+		if (led->fnode[i].id == LED3 &&
+				led->fnode[i].strobe_sel == LPG_STROBE)
+			strobe_mask |= LED3_FLASH_ONCE_ONLY_BIT;
+		/*
+		 * As per the hardware recommendation, to use LED2/LED3 in HW
+		 * strobe mode, LED1 should be set to HW strobe mode as well.
+		 */
+		if (led->fnode[i].strobe_sel == HW_STROBE &&
+		      (led->fnode[i].id == LED2 || led->fnode[i].id == LED3)) {
+			mask = FLASH_HW_STROBE_MASK;
+			addr_offset = led->fnode[LED1].id;
+			/*
+			 * HW_STROBE: enable, TRIGGER: level,
+			 * POLARITY: active high
+			 */
+			strobe_ctrl = BIT(2) | BIT(0);
+			rc = qpnp_flash_led_masked_write(led,
+				FLASH_LED_REG_STROBE_CTRL(
+				led->base + addr_offset),
+				mask, strobe_ctrl);
+			if (rc < 0)
+				return rc;
+		}
+	}
+
+	rc = qpnp_flash_led_masked_write(led,
+		FLASH_LED_REG_MULTI_STROBE_CTRL(led->base),
+		strobe_mask, 0);
+	if (rc < 0)
+		return rc;
+
+	if (led->fnode[LED3].strobe_sel == LPG_STROBE) {
+		rc = qpnp_flash_led_masked_write(led,
+			FLASH_LED_REG_LPG_INPUT_CTRL(led->base),
+			LPG_INPUT_SEL_BIT, LPG_INPUT_SEL_BIT);
+		if (rc < 0)
+			return rc;
+	}
+
+	rc = qpnp_flash_led_write(led,
+				FLASH_LED_REG_HDRM_AUTO_MODE_CTRL(led->base),
+				val);
+	if (rc < 0)
+		return rc;
+
+	rc = qpnp_flash_led_masked_write(led,
+			FLASH_LED_REG_ISC_DELAY(led->base),
+			FLASH_LED_ISC_WARMUP_DELAY_MASK,
+			led->pdata->isc_delay);
+	if (rc < 0)
+		return rc;
+
+	rc = qpnp_flash_led_masked_write(led,
+			FLASH_LED_REG_WARMUP_DELAY(led->base),
+			FLASH_LED_ISC_WARMUP_DELAY_MASK,
+			led->pdata->warmup_delay);
+	if (rc < 0)
+		return rc;
+
+	rc = qpnp_flash_led_masked_write(led,
+			FLASH_LED_REG_CURRENT_DERATE_EN(led->base),
+			FLASH_LED_CURRENT_DERATE_EN_MASK,
+			led->pdata->current_derate_en_cfg);
+	if (rc < 0)
+		return rc;
+
+	val = (led->pdata->otst_ramp_bkup_en << THERMAL_OTST1_RAMP_CTRL_SHIFT);
+	mask = THERMAL_OTST1_RAMP_CTRL_MASK;
+	if (led->pdata->thermal_derate_slow >= 0) {
+		val |= (led->pdata->thermal_derate_slow <<
+				THERMAL_DERATE_SLOW_SHIFT);
+		mask |= THERMAL_DERATE_SLOW_MASK;
+	}
+
+	if (led->pdata->thermal_derate_fast >= 0) {
+		val |= led->pdata->thermal_derate_fast;
+		mask |= THERMAL_DERATE_FAST_MASK;
+	}
+
+	rc = qpnp_flash_led_masked_write(led,
+			FLASH_LED_REG_THERMAL_RMP_DN_RATE(led->base),
+			mask, val);
+	if (rc < 0)
+		return rc;
+
+	if (led->pdata->thermal_debounce >= 0) {
+		rc = qpnp_flash_led_masked_write(led,
+				FLASH_LED_REG_THERMAL_DEBOUNCE(led->base),
+				FLASH_LED_THERMAL_DEBOUNCE_MASK,
+				led->pdata->thermal_debounce);
+		if (rc < 0)
+			return rc;
+	}
+
+	if (led->pdata->thermal_hysteresis >= 0) {
+		rc = qpnp_flash_led_masked_write(led,
+				FLASH_LED_REG_THERMAL_HYSTERESIS(led->base),
+				FLASH_LED_THERMAL_HYSTERESIS_MASK,
+				led->pdata->thermal_hysteresis);
+		if (rc < 0)
+			return rc;
+	}
+
+	if (led->pdata->thermal_thrsh1 >= 0) {
+		rc = qpnp_flash_led_masked_write(led,
+				FLASH_LED_REG_THERMAL_THRSH1(led->base),
+				FLASH_LED_THERMAL_THRSH_MASK,
+				led->pdata->thermal_thrsh1);
+		if (rc < 0)
+			return rc;
+	}
+
+	if (led->pdata->thermal_thrsh2 >= 0) {
+		rc = qpnp_flash_led_masked_write(led,
+				FLASH_LED_REG_THERMAL_THRSH2(led->base),
+				FLASH_LED_THERMAL_THRSH_MASK,
+				led->pdata->thermal_thrsh2);
+		if (rc < 0)
+			return rc;
+	}
+
+	if (led->pdata->thermal_thrsh3 >= 0) {
+		rc = qpnp_flash_led_masked_write(led,
+				FLASH_LED_REG_THERMAL_THRSH3(led->base),
+				FLASH_LED_THERMAL_THRSH_MASK,
+				led->pdata->thermal_thrsh3);
+		if (rc < 0)
+			return rc;
+	}
+
+	rc = qpnp_flash_led_masked_write(led,
+			FLASH_LED_REG_VPH_DROOP_DEBOUNCE(led->base),
+			FLASH_LED_VPH_DROOP_DEBOUNCE_MASK,
+			led->pdata->vph_droop_debounce);
+	if (rc < 0)
+		return rc;
+
+	rc = qpnp_flash_led_masked_write(led,
+			FLASH_LED_REG_VPH_DROOP_THRESHOLD(led->base),
+			FLASH_LED_VPH_DROOP_THRESHOLD_MASK,
+			led->pdata->vph_droop_threshold);
+	if (rc < 0)
+		return rc;
+
+	rc = qpnp_flash_led_masked_write(led,
+			FLASH_LED_REG_VPH_DROOP_THRESHOLD(led->base),
+			FLASH_LED_VPH_DROOP_HYSTERESIS_MASK,
+			led->pdata->vph_droop_hysteresis);
+	if (rc < 0)
+		return rc;
+
+	rc = qpnp_flash_led_masked_write(led,
+			FLASH_LED_REG_MITIGATION_SEL(led->base),
+			FLASH_LED_LMH_MITIGATION_SEL_MASK,
+			led->pdata->lmh_mitigation_sel);
+	if (rc < 0)
+		return rc;
+
+	val = led->pdata->chgr_mitigation_sel
+				<< FLASH_LED_CHGR_MITIGATION_SEL_SHIFT;
+	rc = qpnp_flash_led_masked_write(led,
+			FLASH_LED_REG_MITIGATION_SEL(led->base),
+			FLASH_LED_CHGR_MITIGATION_SEL_MASK,
+			val);
+	if (rc < 0)
+		return rc;
+
+	rc = qpnp_flash_led_masked_write(led,
+			FLASH_LED_REG_LMH_LEVEL(led->base),
+			FLASH_LED_LMH_LEVEL_MASK,
+			led->pdata->lmh_level);
+	if (rc < 0)
+		return rc;
+
+	rc = qpnp_flash_led_masked_write(led,
+			FLASH_LED_REG_ILED_GRT_THRSH(led->base),
+			FLASH_LED_ILED_GRT_THRSH_MASK,
+			led->pdata->iled_thrsh_val);
+	if (rc < 0)
+		return rc;
+
+	if (led->pdata->led1n2_iclamp_low_ma) {
+		val = get_current_reg_code(led->pdata->led1n2_iclamp_low_ma,
+						led->fnode[LED1].ires_ua);
+		rc = qpnp_flash_led_masked_write(led,
+				FLASH_LED_REG_LED1N2_ICLAMP_LOW(led->base),
+				FLASH_LED_CURRENT_MASK, val);
+		if (rc < 0)
+			return rc;
+	}
+
+	if (led->pdata->led1n2_iclamp_mid_ma) {
+		val = get_current_reg_code(led->pdata->led1n2_iclamp_mid_ma,
+						led->fnode[LED1].ires_ua);
+		rc = qpnp_flash_led_masked_write(led,
+				FLASH_LED_REG_LED1N2_ICLAMP_MID(led->base),
+				FLASH_LED_CURRENT_MASK, val);
+		if (rc < 0)
+			return rc;
+	}
+
+	if (led->pdata->led3_iclamp_low_ma) {
+		val = get_current_reg_code(led->pdata->led3_iclamp_low_ma,
+						led->fnode[LED3].ires_ua);
+		rc = qpnp_flash_led_masked_write(led,
+				FLASH_LED_REG_LED3_ICLAMP_LOW(led->base),
+				FLASH_LED_CURRENT_MASK, val);
+		if (rc < 0)
+			return rc;
+	}
+
+	if (led->pdata->led3_iclamp_mid_ma) {
+		val = get_current_reg_code(led->pdata->led3_iclamp_mid_ma,
+						led->fnode[LED3].ires_ua);
+		rc = qpnp_flash_led_masked_write(led,
+				FLASH_LED_REG_LED3_ICLAMP_MID(led->base),
+				FLASH_LED_CURRENT_MASK, val);
+		if (rc < 0)
+			return rc;
+	}
+
+	if (led->pdata->hw_strobe_option > 0) {
+		rc = qpnp_flash_led_masked_write(led,
+				FLASH_LED_REG_STROBE_CFG(led->base),
+				FLASH_LED_STROBE_MASK,
+				led->pdata->hw_strobe_option);
+		if (rc < 0)
+			return rc;
+	}
+
+	return 0;
+}
+
+static int qpnp_flash_led_hw_strobe_enable(struct flash_node_data *fnode,
+						int hw_strobe_option, bool on)
+{
+	int rc = 0;
+
+	/*
+	 * If the LED controlled by this fnode is not GPIO controlled
+	 * for the given strobe_option, return.
+	 */
+	if (hw_strobe_option == FLASH_LED_HW_STROBE_OPTION_1)
+		return 0;
+	else if (hw_strobe_option == FLASH_LED_HW_STROBE_OPTION_2
+						&& fnode->id != LED3)
+		return 0;
+	else if (hw_strobe_option == FLASH_LED_HW_STROBE_OPTION_3
+						&& fnode->id == LED1)
+		return 0;
+
+	if (gpio_is_valid(fnode->hw_strobe_gpio)) {
+		gpio_set_value(fnode->hw_strobe_gpio, on ? 1 : 0);
+	} else if (fnode->strobe_pinctrl && fnode->hw_strobe_state_active &&
+					fnode->hw_strobe_state_suspend) {
+		rc = pinctrl_select_state(fnode->strobe_pinctrl,
+			on ? fnode->hw_strobe_state_active :
+			fnode->hw_strobe_state_suspend);
+		if (rc < 0) {
+			pr_err("failed to change hw strobe pin state\n");
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int qpnp_flash_led_regulator_enable(struct qpnp_flash_led *led,
+				struct flash_switch_data *snode, bool on)
+{
+	int rc = 0;
+
+	if (!snode || !snode->vreg)
+		return 0;
+
+	if (snode->regulator_on == on)
+		return 0;
+
+	if (on)
+		rc = regulator_enable(snode->vreg);
+	else
+		rc = regulator_disable(snode->vreg);
+
+	if (rc < 0) {
+		pr_err("regulator_%s failed, rc=%d\n",
+			on ? "enable" : "disable", rc);
+		return rc;
+	}
+
+	snode->regulator_on = on ? true : false;
+	return 0;
+}
+
+static int get_property_from_fg(struct qpnp_flash_led *led,
+		enum power_supply_property prop, int *val)
+{
+	int rc;
+	union power_supply_propval pval = {0, };
+
+	if (!led->bms_psy) {
+		pr_err("no bms psy found\n");
+		return -EINVAL;
+	}
+
+	rc = power_supply_get_property(led->bms_psy, prop, &pval);
+	if (rc) {
+		pr_err("bms psy doesn't support reading prop %d rc = %d\n",
+			prop, rc);
+		return rc;
+	}
+
+	*val = pval.intval;
+	return rc;
+}
+
+#define VOLTAGE_HDRM_DEFAULT_MV	350
+static int qpnp_flash_led_get_voltage_headroom(struct qpnp_flash_led *led)
+{
+	int i, voltage_hdrm_mv = 0, voltage_hdrm_max = 0;
+
+	for (i = 0; i < led->num_fnodes; i++) {
+		if (led->fnode[i].led_on) {
+			if (led->fnode[i].id < 2) {
+				if (led->fnode[i].current_ma < 750)
+					voltage_hdrm_mv = 125;
+				else if (led->fnode[i].current_ma < 1000)
+					voltage_hdrm_mv = 175;
+				else if (led->fnode[i].current_ma < 1250)
+					voltage_hdrm_mv = 250;
+				else
+					voltage_hdrm_mv = 350;
+			} else {
+				if (led->fnode[i].current_ma < 375)
+					voltage_hdrm_mv = 125;
+				else if (led->fnode[i].current_ma < 500)
+					voltage_hdrm_mv = 175;
+				else if (led->fnode[i].current_ma < 625)
+					voltage_hdrm_mv = 250;
+				else
+					voltage_hdrm_mv = 350;
+			}
+
+			voltage_hdrm_max = max(voltage_hdrm_max,
+						voltage_hdrm_mv);
+		}
+	}
+
+	if (!voltage_hdrm_max)
+		return VOLTAGE_HDRM_DEFAULT_MV;
+
+	return voltage_hdrm_max;
+}
+
+#define UCONV			1000000LL
+#define MCONV			1000LL
+#define FLASH_VDIP_MARGIN	50000
+#define BOB_EFFICIENCY		900LL
+#define VIN_FLASH_MIN_UV	3300000LL
+static int qpnp_flash_led_calc_max_current(struct qpnp_flash_led *led,
+					int *max_current)
+{
+	int ocv_uv = 0, rbatt_uohm = 0, ibat_now = 0, voltage_hdrm_mv = 0;
+	int rc = 0;
+	int64_t ibat_flash_ua, avail_flash_ua, avail_flash_power_fw;
+	int64_t ibat_safe_ua, vin_flash_uv, vph_flash_uv, vph_flash_vdip;
+
+	/* RESISTANCE = esr_uohm + rslow_uohm */
+	rc = get_property_from_fg(led, POWER_SUPPLY_PROP_RESISTANCE,
+			&rbatt_uohm);
+	if (rc < 0) {
+		pr_err("bms psy does not support resistance, rc=%d\n", rc);
+		return rc;
+	}
+
+	/* If no battery is connected, return max possible flash current */
+	if (!rbatt_uohm) {
+		*max_current = FLASH_LED_MAX_TOTAL_CURRENT_MA;
+		return 0;
+	}
+
+	rc = get_property_from_fg(led, POWER_SUPPLY_PROP_VOLTAGE_OCV, &ocv_uv);
+	if (rc < 0) {
+		pr_err("bms psy does not support OCV, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = get_property_from_fg(led, POWER_SUPPLY_PROP_CURRENT_NOW,
+			&ibat_now);
+	if (rc < 0) {
+		pr_err("bms psy does not support current, rc=%d\n", rc);
+		return rc;
+	}
+
+	rbatt_uohm += led->pdata->rpara_uohm;
+	voltage_hdrm_mv = qpnp_flash_led_get_voltage_headroom(led);
+	vph_flash_vdip =
+		VPH_DROOP_THRESH_VAL_TO_UV(led->pdata->vph_droop_threshold)
+							+ FLASH_VDIP_MARGIN;
+
+	/* Check if LMH_MITIGATION needs to be triggered */
+	if (!led->trigger_lmh && (ocv_uv < led->pdata->lmh_ocv_threshold_uv ||
+			rbatt_uohm > led->pdata->lmh_rbatt_threshold_uohm)) {
+		led->trigger_lmh = true;
+		rc = qpnp_flash_led_masked_write(led,
+				FLASH_LED_REG_MITIGATION_SW(led->base),
+				FLASH_LED_LMH_MITIGATION_EN_MASK,
+				FLASH_LED_LMH_MITIGATION_ENABLE);
+		if (rc < 0) {
+			pr_err("trigger lmh mitigation failed, rc=%d\n", rc);
+			return rc;
+		}
+
+		/* Wait for LMH mitigation to take effect */
+		udelay(100);
+
+		return qpnp_flash_led_calc_max_current(led, max_current);
+	}
+
+	/*
+	 * Calculate the maximum current that can pulled out of the battery
+	 * before the battery voltage dips below a safe threshold.
+	 */
+	ibat_safe_ua = div_s64((ocv_uv - vph_flash_vdip) * UCONV,
+				rbatt_uohm);
+
+	if (ibat_safe_ua <= led->pdata->ibatt_ocp_threshold_ua) {
+		/*
+		 * If the calculated current is below the OCP threshold, then
+		 * use it as the possible flash current.
+		 */
+		ibat_flash_ua = ibat_safe_ua - ibat_now;
+		vph_flash_uv = vph_flash_vdip;
+	} else {
+		/*
+		 * If the calculated current is above the OCP threshold, then
+		 * use the ocp threshold instead.
+		 *
+		 * Any higher current will be tripping the battery OCP.
+		 */
+		ibat_flash_ua = led->pdata->ibatt_ocp_threshold_ua - ibat_now;
+		vph_flash_uv = ocv_uv - div64_s64((int64_t)rbatt_uohm
+				* led->pdata->ibatt_ocp_threshold_ua, UCONV);
+	}
+	/* Calculate the input voltage of the flash module. */
+	vin_flash_uv = max((led->pdata->vled_max_uv +
+				(voltage_hdrm_mv * MCONV)), VIN_FLASH_MIN_UV);
+	/* Calculate the available power for the flash module. */
+	avail_flash_power_fw = BOB_EFFICIENCY * vph_flash_uv * ibat_flash_ua;
+	/*
+	 * Calculate the available amount of current the flash module can draw
+	 * before collapsing the battery. (available power/ flash input voltage)
+	 */
+	avail_flash_ua = div64_s64(avail_flash_power_fw, vin_flash_uv * MCONV);
+	pr_debug("avail_iflash=%lld, ocv=%d, ibat=%d, rbatt=%d, trigger_lmh=%d\n",
+		avail_flash_ua, ocv_uv, ibat_now, rbatt_uohm, led->trigger_lmh);
+	*max_current = min(FLASH_LED_MAX_TOTAL_CURRENT_MA,
+			(int)(div64_s64(avail_flash_ua, MCONV)));
+	return 0;
+}
+
+static int qpnp_flash_led_calc_thermal_current_lim(struct qpnp_flash_led *led,
+						int *thermal_current_lim)
+{
+	int rc;
+	u8 thermal_thrsh1, thermal_thrsh2, thermal_thrsh3, otst_status;
+
+	/* Store THERMAL_THRSHx register values */
+	rc = qpnp_flash_led_masked_read(led,
+			FLASH_LED_REG_THERMAL_THRSH1(led->base),
+			FLASH_LED_THERMAL_THRSH_MASK,
+			&thermal_thrsh1);
+	if (rc < 0)
+		return rc;
+
+	rc = qpnp_flash_led_masked_read(led,
+			FLASH_LED_REG_THERMAL_THRSH2(led->base),
+			FLASH_LED_THERMAL_THRSH_MASK,
+			&thermal_thrsh2);
+	if (rc < 0)
+		return rc;
+
+	rc = qpnp_flash_led_masked_read(led,
+			FLASH_LED_REG_THERMAL_THRSH3(led->base),
+			FLASH_LED_THERMAL_THRSH_MASK,
+			&thermal_thrsh3);
+	if (rc < 0)
+		return rc;
+
+	/* Lower THERMAL_THRSHx thresholds to minimum */
+	rc = qpnp_flash_led_masked_write(led,
+			FLASH_LED_REG_THERMAL_THRSH1(led->base),
+			FLASH_LED_THERMAL_THRSH_MASK,
+			FLASH_LED_THERMAL_THRSH_MIN);
+	if (rc < 0)
+		return rc;
+
+	rc = qpnp_flash_led_masked_write(led,
+			FLASH_LED_REG_THERMAL_THRSH2(led->base),
+			FLASH_LED_THERMAL_THRSH_MASK,
+			FLASH_LED_THERMAL_THRSH_MIN);
+	if (rc < 0)
+		return rc;
+
+	rc = qpnp_flash_led_masked_write(led,
+			FLASH_LED_REG_THERMAL_THRSH3(led->base),
+			FLASH_LED_THERMAL_THRSH_MASK,
+			FLASH_LED_THERMAL_THRSH_MIN);
+	if (rc < 0)
+		return rc;
+
+	/* Check THERMAL_OTST status */
+	rc = qpnp_flash_led_read(led,
+			FLASH_LED_REG_LED_STATUS2(led->base),
+			&otst_status);
+	if (rc < 0)
+		return rc;
+
+	/* Look up current limit based on THERMAL_OTST status */
+	if (otst_status)
+		*thermal_current_lim =
+			led->pdata->thermal_derate_current[otst_status >> 1];
+
+	/* Restore THERMAL_THRESHx registers to original values */
+	rc = qpnp_flash_led_masked_write(led,
+			FLASH_LED_REG_THERMAL_THRSH1(led->base),
+			FLASH_LED_THERMAL_THRSH_MASK,
+			thermal_thrsh1);
+	if (rc < 0)
+		return rc;
+
+	rc = qpnp_flash_led_masked_write(led,
+			FLASH_LED_REG_THERMAL_THRSH2(led->base),
+			FLASH_LED_THERMAL_THRSH_MASK,
+			thermal_thrsh2);
+	if (rc < 0)
+		return rc;
+
+	rc = qpnp_flash_led_masked_write(led,
+			FLASH_LED_REG_THERMAL_THRSH3(led->base),
+			FLASH_LED_THERMAL_THRSH_MASK,
+			thermal_thrsh3);
+	if (rc < 0)
+		return rc;
+
+	return 0;
+}
+
+static int qpnp_flash_led_get_max_avail_current(struct qpnp_flash_led *led,
+						int *max_avail_current)
+{
+	int thermal_current_lim = 0, rc;
+
+	led->trigger_lmh = false;
+	rc = qpnp_flash_led_calc_max_current(led, max_avail_current);
+	if (rc < 0) {
+		pr_err("Couldn't calculate max_avail_current, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (led->pdata->thermal_derate_en) {
+		rc = qpnp_flash_led_calc_thermal_current_lim(led,
+			&thermal_current_lim);
+		if (rc < 0) {
+			pr_err("Couldn't calculate thermal_current_lim, rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	if (thermal_current_lim)
+		*max_avail_current = min(*max_avail_current,
+					thermal_current_lim);
+
+	return 0;
+}
+
+static void qpnp_flash_led_aggregate_max_current(struct flash_node_data *fnode)
+{
+	struct qpnp_flash_led *led = dev_get_drvdata(&fnode->pdev->dev);
+
+	if (fnode->current_ma)
+		led->total_current_ma += fnode->current_ma
+						- fnode->prev_current_ma;
+	else
+		led->total_current_ma -= fnode->prev_current_ma;
+
+	fnode->prev_current_ma = fnode->current_ma;
+}
+
+static void qpnp_flash_led_node_set(struct flash_node_data *fnode, int value)
+{
+	int i = 0;
+	int prgm_current_ma = value;
+	int min_ma = fnode->ires_ua / 1000;
+	struct qpnp_flash_led *led = dev_get_drvdata(&fnode->pdev->dev);
+
+	if (value <= 0)
+		prgm_current_ma = 0;
+	else if (value < min_ma)
+		prgm_current_ma = min_ma;
+
+	fnode->ires_idx = fnode->default_ires_idx;
+	fnode->ires_ua = fnode->default_ires_ua;
+
+	prgm_current_ma = min(prgm_current_ma, fnode->max_current);
+	if (prgm_current_ma > max_ires_curr_ma_table[fnode->ires_idx]) {
+		/* find the matching ires */
+		for (i = MAX_IRES_LEVELS - 1; i >= 0; i--) {
+			if (prgm_current_ma <= max_ires_curr_ma_table[i]) {
+				fnode->ires_idx = i;
+				fnode->ires_ua = FLASH_LED_IRES_MIN_UA +
+				      (FLASH_LED_IRES_BASE - fnode->ires_idx) *
+				      FLASH_LED_IRES_DIVISOR;
+				break;
+			}
+		}
+	}
+	fnode->current_ma = prgm_current_ma;
+	fnode->cdev.brightness = prgm_current_ma;
+	fnode->current_reg_val = get_current_reg_code(prgm_current_ma,
+					fnode->ires_ua);
+	fnode->led_on = prgm_current_ma != 0;
+
+	if (led->pdata->chgr_mitigation_sel == FLASH_SW_CHARGER_MITIGATION) {
+		qpnp_flash_led_aggregate_max_current(fnode);
+		led->trigger_chgr = false;
+		if (led->total_current_ma >= 1000)
+			led->trigger_chgr = true;
+	}
+}
+
+static int qpnp_flash_led_switch_disable(struct flash_switch_data *snode)
+{
+	struct qpnp_flash_led *led = dev_get_drvdata(&snode->pdev->dev);
+	int i, rc, addr_offset;
+
+	rc = qpnp_flash_led_masked_write(led,
+				FLASH_LED_EN_LED_CTRL(led->base),
+				snode->led_mask, FLASH_LED_DISABLE);
+	if (rc < 0)
+		return rc;
+
+	if (led->trigger_lmh) {
+		rc = qpnp_flash_led_masked_write(led,
+				FLASH_LED_REG_MITIGATION_SW(led->base),
+				FLASH_LED_LMH_MITIGATION_EN_MASK,
+				FLASH_LED_LMH_MITIGATION_DISABLE);
+		if (rc < 0) {
+			pr_err("disable lmh mitigation failed, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	if (!led->trigger_chgr) {
+		rc = qpnp_flash_led_masked_write(led,
+				FLASH_LED_REG_MITIGATION_SW(led->base),
+				FLASH_LED_CHGR_MITIGATION_EN_MASK,
+				FLASH_LED_CHGR_MITIGATION_DISABLE);
+		if (rc < 0) {
+			pr_err("disable chgr mitigation failed, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	led->enable--;
+	if (led->enable == 0) {
+		rc = qpnp_flash_led_masked_write(led,
+				FLASH_LED_REG_MOD_CTRL(led->base),
+				FLASH_LED_MOD_CTRL_MASK, FLASH_LED_DISABLE);
+		if (rc < 0)
+			return rc;
+	}
+
+	for (i = 0; i < led->num_fnodes; i++) {
+		if (!led->fnode[i].led_on ||
+				!(snode->led_mask & BIT(led->fnode[i].id)))
+			continue;
+
+		addr_offset = led->fnode[i].id;
+		rc = qpnp_flash_led_masked_write(led,
+			FLASH_LED_REG_TGR_CURRENT(led->base + addr_offset),
+			FLASH_LED_CURRENT_MASK, 0);
+		if (rc < 0)
+			return rc;
+
+		led->fnode[i].led_on = false;
+
+		if (led->fnode[i].strobe_sel == HW_STROBE) {
+			rc = qpnp_flash_led_hw_strobe_enable(&led->fnode[i],
+					led->pdata->hw_strobe_option, false);
+			if (rc < 0) {
+				pr_err("Unable to disable hw strobe, rc=%d\n",
+					rc);
+				return rc;
+			}
+		}
+	}
+
+	if (snode->led_en_pinctrl) {
+		pr_debug("Selecting suspend state for %s\n", snode->cdev.name);
+		rc = pinctrl_select_state(snode->led_en_pinctrl,
+				snode->gpio_state_suspend);
+		if (rc < 0) {
+			pr_err("failed to select pinctrl suspend state rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	snode->enabled = false;
+	return 0;
+}
+
+static int qpnp_flash_led_switch_set(struct flash_switch_data *snode, bool on)
+{
+	struct qpnp_flash_led *led = dev_get_drvdata(&snode->pdev->dev);
+	int rc, i, addr_offset;
+	u8 val, mask;
+
+	if (snode->enabled == on) {
+		pr_debug("Switch node is already %s!\n",
+			on ? "enabled" : "disabled");
+		return 0;
+	}
+
+	if (!on) {
+		rc = qpnp_flash_led_switch_disable(snode);
+		return rc;
+	}
+
+	/* Iterate over all active leds for this switch node */
+	val = 0;
+	for (i = 0; i < led->num_fnodes; i++)
+		if (led->fnode[i].led_on &&
+				snode->led_mask & BIT(led->fnode[i].id))
+			val |= led->fnode[i].ires_idx << (led->fnode[i].id * 2);
+
+	rc = qpnp_flash_led_masked_write(led, FLASH_LED_REG_IRES(led->base),
+						FLASH_LED_CURRENT_MASK, val);
+	if (rc < 0)
+		return rc;
+
+	val = 0;
+	for (i = 0; i < led->num_fnodes; i++) {
+		if (!led->fnode[i].led_on ||
+				!(snode->led_mask & BIT(led->fnode[i].id)))
+			continue;
+
+		addr_offset = led->fnode[i].id;
+		if (led->fnode[i].strobe_sel == SW_STROBE)
+			mask = FLASH_LED_HW_SW_STROBE_SEL_BIT;
+		else
+			mask = FLASH_HW_STROBE_MASK;
+		rc = qpnp_flash_led_masked_write(led,
+			FLASH_LED_REG_STROBE_CTRL(led->base + addr_offset),
+			mask, led->fnode[i].strobe_ctrl);
+		if (rc < 0)
+			return rc;
+
+		rc = qpnp_flash_led_masked_write(led,
+			FLASH_LED_REG_TGR_CURRENT(led->base + addr_offset),
+			FLASH_LED_CURRENT_MASK, led->fnode[i].current_reg_val);
+		if (rc < 0)
+			return rc;
+
+		rc = qpnp_flash_led_write(led,
+			FLASH_LED_REG_SAFETY_TMR(led->base + addr_offset),
+			led->fnode[i].duration);
+		if (rc < 0)
+			return rc;
+
+		val |= FLASH_LED_ENABLE << led->fnode[i].id;
+
+		if (led->fnode[i].strobe_sel == HW_STROBE) {
+			rc = qpnp_flash_led_hw_strobe_enable(&led->fnode[i],
+					led->pdata->hw_strobe_option, true);
+			if (rc < 0) {
+				pr_err("Unable to enable hw strobe rc=%d\n",
+					rc);
+				return rc;
+			}
+		}
+	}
+
+	if (snode->led_en_pinctrl) {
+		pr_debug("Selecting active state for %s\n", snode->cdev.name);
+		rc = pinctrl_select_state(snode->led_en_pinctrl,
+				snode->gpio_state_active);
+		if (rc < 0) {
+			pr_err("failed to select pinctrl active state rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	if (led->enable == 0) {
+		rc = qpnp_flash_led_masked_write(led,
+				FLASH_LED_REG_MOD_CTRL(led->base),
+				FLASH_LED_MOD_CTRL_MASK, FLASH_LED_MOD_ENABLE);
+		if (rc < 0)
+			return rc;
+	}
+	led->enable++;
+
+	if (led->trigger_lmh) {
+		rc = qpnp_flash_led_masked_write(led,
+				FLASH_LED_REG_MITIGATION_SW(led->base),
+				FLASH_LED_LMH_MITIGATION_EN_MASK,
+				FLASH_LED_LMH_MITIGATION_ENABLE);
+		if (rc < 0) {
+			pr_err("trigger lmh mitigation failed, rc=%d\n", rc);
+			return rc;
+		}
+		/* Wait for LMH mitigation to take effect */
+		udelay(500);
+	}
+
+	if (led->trigger_chgr) {
+		rc = qpnp_flash_led_masked_write(led,
+				FLASH_LED_REG_MITIGATION_SW(led->base),
+				FLASH_LED_CHGR_MITIGATION_EN_MASK,
+				FLASH_LED_CHGR_MITIGATION_ENABLE);
+		if (rc < 0) {
+			pr_err("trigger chgr mitigation failed, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	rc = qpnp_flash_led_masked_write(led,
+					FLASH_LED_EN_LED_CTRL(led->base),
+					snode->led_mask, val);
+	if (rc < 0)
+		return rc;
+
+	snode->enabled = true;
+	return 0;
+}
+
+int qpnp_flash_led_prepare(struct led_trigger *trig, int options,
+					int *max_current)
+{
+	struct led_classdev *led_cdev;
+	struct flash_switch_data *snode;
+	struct qpnp_flash_led *led;
+	int rc;
+
+	if (!trig) {
+		pr_err("Invalid led_trigger provided\n");
+		return -EINVAL;
+	}
+
+	led_cdev = trigger_to_lcdev(trig);
+	if (!led_cdev) {
+		pr_err("Invalid led_cdev in trigger %s\n", trig->name);
+		return -EINVAL;
+	}
+
+	snode = container_of(led_cdev, struct flash_switch_data, cdev);
+	led = dev_get_drvdata(&snode->pdev->dev);
+
+	if (!(options & FLASH_LED_PREPARE_OPTIONS_MASK)) {
+		pr_err("Invalid options %d\n", options);
+		return -EINVAL;
+	}
+
+	if (options & ENABLE_REGULATOR) {
+		rc = qpnp_flash_led_regulator_enable(led, snode, true);
+		if (rc < 0) {
+			pr_err("enable regulator failed, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	if (options & DISABLE_REGULATOR) {
+		rc = qpnp_flash_led_regulator_enable(led, snode, false);
+		if (rc < 0) {
+			pr_err("disable regulator failed, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	if (options & QUERY_MAX_CURRENT) {
+		rc = qpnp_flash_led_get_max_avail_current(led, max_current);
+		if (rc < 0) {
+			pr_err("query max current failed, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static void qpnp_flash_led_brightness_set(struct led_classdev *led_cdev,
+						enum led_brightness value)
+{
+	struct flash_node_data *fnode = NULL;
+	struct flash_switch_data *snode = NULL;
+	struct qpnp_flash_led *led = NULL;
+	int rc;
+
+	/*
+	 * strncmp() must be used here since a prefix comparison is required
+	 * in order to support names like led:switch_0 and led:flash_1.
+	 */
+	if (!strncmp(led_cdev->name, "led:switch", strlen("led:switch"))) {
+		snode = container_of(led_cdev, struct flash_switch_data, cdev);
+		led = dev_get_drvdata(&snode->pdev->dev);
+	} else if (!strncmp(led_cdev->name, "led:flash", strlen("led:flash")) ||
+			!strncmp(led_cdev->name, "led:torch",
+						strlen("led:torch"))) {
+		fnode = container_of(led_cdev, struct flash_node_data, cdev);
+		led = dev_get_drvdata(&fnode->pdev->dev);
+	}
+
+	if (!led) {
+		pr_err("Failed to get flash driver data\n");
+		return;
+	}
+
+	spin_lock(&led->lock);
+	if (snode) {
+		rc = qpnp_flash_led_switch_set(snode, value > 0);
+		if (rc < 0)
+			pr_err("Failed to set flash LED switch rc=%d\n", rc);
+	} else if (fnode) {
+		qpnp_flash_led_node_set(fnode, value);
+	}
+
+	spin_unlock(&led->lock);
+}
+
+/* sysfs show function for flash_max_current */
+static ssize_t qpnp_flash_led_max_current_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int rc, max_current = 0;
+	struct flash_switch_data *snode;
+	struct qpnp_flash_led *led;
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+
+	snode = container_of(led_cdev, struct flash_switch_data, cdev);
+	led = dev_get_drvdata(&snode->pdev->dev);
+
+	rc = qpnp_flash_led_get_max_avail_current(led, &max_current);
+	if (rc < 0)
+		pr_err("query max current failed, rc=%d\n", rc);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", max_current);
+}
+
+/* sysfs attributes exported by flash_led */
+static struct device_attribute qpnp_flash_led_attrs[] = {
+	__ATTR(max_current, 0664, qpnp_flash_led_max_current_show, NULL),
+};
+
+static int flash_led_psy_notifier_call(struct notifier_block *nb,
+		unsigned long ev, void *v)
+{
+	struct power_supply *psy = v;
+	struct qpnp_flash_led *led =
+			container_of(nb, struct qpnp_flash_led, nb);
+
+	if (ev != PSY_EVENT_PROP_CHANGED)
+		return NOTIFY_OK;
+
+	if (!strcmp(psy->desc->name, "bms")) {
+		led->bms_psy = power_supply_get_by_name("bms");
+		if (!led->bms_psy)
+			pr_err("Failed to get bms power_supply\n");
+		else
+			power_supply_unreg_notifier(&led->nb);
+	}
+
+	return NOTIFY_OK;
+}
+
+static int flash_led_psy_register_notifier(struct qpnp_flash_led *led)
+{
+	int rc;
+
+	led->nb.notifier_call = flash_led_psy_notifier_call;
+	rc = power_supply_reg_notifier(&led->nb);
+	if (rc < 0) {
+		pr_err("Couldn't register psy notifier, rc = %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+/* irq handler */
+static irqreturn_t qpnp_flash_led_irq_handler(int irq, void *_led)
+{
+	struct qpnp_flash_led *led = _led;
+	enum flash_led_irq_type irq_type = INVALID_IRQ;
+	int rc;
+	u8 irq_status, led_status1, led_status2;
+
+	pr_debug("irq received, irq=%d\n", irq);
+
+	rc = qpnp_flash_led_read(led,
+			FLASH_LED_REG_INT_RT_STS(led->base), &irq_status);
+	if (rc < 0) {
+		pr_err("Failed to read interrupt status reg, rc=%d\n", rc);
+		goto exit;
+	}
+
+	if (irq == led->pdata->all_ramp_up_done_irq)
+		irq_type = ALL_RAMP_UP_DONE_IRQ;
+	else if (irq == led->pdata->all_ramp_down_done_irq)
+		irq_type = ALL_RAMP_DOWN_DONE_IRQ;
+	else if (irq == led->pdata->led_fault_irq)
+		irq_type = LED_FAULT_IRQ;
+
+	if (irq_type == ALL_RAMP_UP_DONE_IRQ)
+		atomic_notifier_call_chain(&irq_notifier_list,
+						irq_type, NULL);
+
+	if (irq_type == LED_FAULT_IRQ) {
+		rc = qpnp_flash_led_read(led,
+			FLASH_LED_REG_LED_STATUS1(led->base), &led_status1);
+		if (rc < 0) {
+			pr_err("Failed to read led_status1 reg, rc=%d\n", rc);
+			goto exit;
+		}
+
+		rc = qpnp_flash_led_read(led,
+			FLASH_LED_REG_LED_STATUS2(led->base), &led_status2);
+		if (rc < 0) {
+			pr_err("Failed to read led_status2 reg, rc=%d\n", rc);
+			goto exit;
+		}
+
+		if (led_status1)
+			pr_emerg("led short/open fault detected! led_status1=%x\n",
+				led_status1);
+
+		if (led_status2 & FLASH_LED_VPH_DROOP_FAULT_MASK)
+			pr_emerg("led vph_droop fault detected!\n");
+	}
+
+	pr_debug("irq handled, irq_type=%x, irq_status=%x\n", irq_type,
+		irq_status);
+
+exit:
+	return IRQ_HANDLED;
+}
+
+int qpnp_flash_led_register_irq_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(&irq_notifier_list, nb);
+}
+
+int qpnp_flash_led_unregister_irq_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_unregister(&irq_notifier_list, nb);
+}
+
+static inline u8 get_safety_timer_code(u32 duration_ms)
+{
+	if (!duration_ms)
+		return 0;
+
+	return (duration_ms / 10) - 1;
+}
+
+static inline u8 get_vph_droop_thresh_code(u32 val_mv)
+{
+	if (!val_mv)
+		return 0;
+
+	return (val_mv / 100) - 25;
+}
+
+static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led,
+			struct flash_node_data *fnode, struct device_node *node)
+{
+	const char *temp_string;
+	int rc, min_ma;
+	u32 val;
+	bool hw_strobe = 0, edge_trigger = 0, active_high = 0;
+
+	fnode->pdev = led->pdev;
+	fnode->cdev.brightness_set = qpnp_flash_led_brightness_set;
+	fnode->cdev.brightness_get = qpnp_flash_led_brightness_get;
+
+	rc = of_property_read_string(node, "qcom,led-name", &fnode->cdev.name);
+	if (rc < 0) {
+		pr_err("Unable to read flash LED names\n");
+		return rc;
+	}
+
+	rc = of_property_read_string(node, "label", &temp_string);
+	if (!rc) {
+		if (!strcmp(temp_string, "flash")) {
+			fnode->type = FLASH_LED_TYPE_FLASH;
+		} else if (!strcmp(temp_string, "torch")) {
+			fnode->type = FLASH_LED_TYPE_TORCH;
+		} else {
+			pr_err("Wrong flash LED type\n");
+			return rc;
+		}
+	} else {
+		pr_err("Unable to read flash LED label\n");
+		return rc;
+	}
+
+	rc = of_property_read_u32(node, "qcom,id", &val);
+	if (!rc) {
+		fnode->id = (u8)val;
+	} else {
+		pr_err("Unable to read flash LED ID\n");
+		return rc;
+	}
+
+	rc = of_property_read_string(node, "qcom,default-led-trigger",
+						&fnode->cdev.default_trigger);
+	if (rc < 0) {
+		pr_err("Unable to read trigger name\n");
+		return rc;
+	}
+
+	fnode->default_ires_ua = fnode->ires_ua = FLASH_LED_IRES_DEFAULT_UA;
+	fnode->default_ires_idx = fnode->ires_idx = FLASH_LED_IRES_DEFAULT_VAL;
+	rc = of_property_read_u32(node, "qcom,ires-ua", &val);
+	if (!rc) {
+		fnode->default_ires_ua = fnode->ires_ua = val;
+		fnode->default_ires_idx = fnode->ires_idx =
+			FLASH_LED_IRES_BASE - (val - FLASH_LED_IRES_MIN_UA) /
+			FLASH_LED_IRES_DIVISOR;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read current resolution rc=%d\n", rc);
+		return rc;
+	}
+
+	min_ma = fnode->ires_ua / 1000;
+	rc = of_property_read_u32(node, "qcom,max-current", &val);
+	if (!rc) {
+		if (val < min_ma)
+			val = min_ma;
+		fnode->max_current = val;
+		fnode->cdev.max_brightness = val;
+	} else {
+		pr_err("Unable to read max current, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(node, "qcom,current-ma", &val);
+	if (!rc) {
+		if (val < min_ma || val > fnode->max_current)
+			pr_warn("Invalid operational current specified, capping it\n");
+		if (val < min_ma)
+			val = min_ma;
+		if (val > fnode->max_current)
+			val = fnode->max_current;
+		fnode->current_ma = val;
+		fnode->cdev.brightness = val;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read operational current, rc=%d\n", rc);
+		return rc;
+	}
+
+	fnode->duration = FLASH_LED_SAFETY_TMR_DISABLED;
+	rc = of_property_read_u32(node, "qcom,duration-ms", &val);
+	if (!rc) {
+		fnode->duration = get_safety_timer_code(val);
+		if (fnode->duration)
+			fnode->duration |= FLASH_LED_SAFETY_TMR_ENABLE;
+	} else if (rc == -EINVAL) {
+		if (fnode->type == FLASH_LED_TYPE_FLASH) {
+			pr_err("Timer duration is required for flash LED\n");
+			return rc;
+		}
+	} else {
+		pr_err("Unable to read timer duration\n");
+		return rc;
+	}
+
+	fnode->hdrm_val = FLASH_LED_HDRM_VOL_DEFAULT_MV;
+	rc = of_property_read_u32(node, "qcom,hdrm-voltage-mv", &val);
+	if (!rc) {
+		val = (val - FLASH_LED_HDRM_VOL_BASE_MV) /
+						FLASH_LED_HDRM_VOL_STEP_MV;
+		fnode->hdrm_val = (val << FLASH_LED_HDRM_VOL_SHIFT) &
+							FLASH_LED_HDRM_VOL_MASK;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read headroom voltage\n");
+		return rc;
+	}
+
+	rc = of_property_read_u32(node, "qcom,hdrm-vol-hi-lo-win-mv", &val);
+	if (!rc) {
+		fnode->hdrm_val |= (val / FLASH_LED_HDRM_VOL_STEP_MV) &
+						~FLASH_LED_HDRM_VOL_MASK;
+	} else if (rc == -EINVAL) {
+		fnode->hdrm_val |= FLASH_LED_HDRM_VOL_HI_LO_WIN_DEFAULT_MV;
+	} else {
+		pr_err("Unable to read hdrm hi-lo window voltage\n");
+		return rc;
+	}
+
+	fnode->strobe_sel = SW_STROBE;
+	rc = of_property_read_u32(node, "qcom,strobe-sel", &val);
+	if (rc < 0) {
+		if (rc != -EINVAL) {
+			pr_err("Unable to read qcom,strobe-sel property\n");
+			return rc;
+		}
+	} else {
+		if (val < SW_STROBE || val > LPG_STROBE) {
+			pr_err("Incorrect strobe selection specified %d\n",
+				val);
+			return -EINVAL;
+		}
+		fnode->strobe_sel = (u8)val;
+	}
+
+	/*
+	 * LPG strobe is allowed only for LED3 and HW strobe option should be
+	 * option 2 or 3.
+	 */
+	if (fnode->strobe_sel == LPG_STROBE) {
+		if (led->pdata->hw_strobe_option ==
+				FLASH_LED_HW_STROBE_OPTION_1) {
+			pr_err("Incorrect strobe option for LPG strobe\n");
+			return -EINVAL;
+		}
+		if (fnode->id != LED3) {
+			pr_err("Incorrect LED chosen for LPG strobe\n");
+			return -EINVAL;
+		}
+	}
+
+	if (fnode->strobe_sel == HW_STROBE) {
+		edge_trigger = of_property_read_bool(node,
+						"qcom,hw-strobe-edge-trigger");
+		active_high = !of_property_read_bool(node,
+						"qcom,hw-strobe-active-low");
+		hw_strobe = 1;
+	} else if (fnode->strobe_sel == LPG_STROBE) {
+		/* LPG strobe requires level trigger and active high */
+		edge_trigger = 0;
+		active_high =  1;
+		hw_strobe = 1;
+	}
+	fnode->strobe_ctrl = (hw_strobe << 2) | (edge_trigger << 1) |
+				active_high;
+
+	rc = led_classdev_register(&led->pdev->dev, &fnode->cdev);
+	if (rc < 0) {
+		pr_err("Unable to register led node %d\n", fnode->id);
+		return rc;
+	}
+
+	fnode->cdev.dev->of_node = node;
+	fnode->strobe_pinctrl = devm_pinctrl_get(fnode->cdev.dev);
+	if (IS_ERR_OR_NULL(fnode->strobe_pinctrl)) {
+		pr_debug("No pinctrl defined for %s, err=%ld\n",
+			fnode->cdev.name, PTR_ERR(fnode->strobe_pinctrl));
+		fnode->strobe_pinctrl = NULL;
+	}
+
+	if (fnode->strobe_sel == HW_STROBE) {
+		if (of_find_property(node, "qcom,hw-strobe-gpio", NULL)) {
+			fnode->hw_strobe_gpio = of_get_named_gpio(node,
+						"qcom,hw-strobe-gpio", 0);
+			if (fnode->hw_strobe_gpio < 0) {
+				pr_err("Invalid gpio specified\n");
+				return fnode->hw_strobe_gpio;
+			}
+			gpio_direction_output(fnode->hw_strobe_gpio, 0);
+		} else if (fnode->strobe_pinctrl) {
+			fnode->hw_strobe_gpio = -1;
+			fnode->hw_strobe_state_active =
+				pinctrl_lookup_state(fnode->strobe_pinctrl,
+							"strobe_enable");
+			if (IS_ERR_OR_NULL(fnode->hw_strobe_state_active)) {
+				pr_err("No active pin for hardware strobe, rc=%ld\n",
+					PTR_ERR(fnode->hw_strobe_state_active));
+				fnode->hw_strobe_state_active = NULL;
+			}
+
+			fnode->hw_strobe_state_suspend =
+				pinctrl_lookup_state(fnode->strobe_pinctrl,
+							"strobe_disable");
+			if (IS_ERR_OR_NULL(fnode->hw_strobe_state_suspend)) {
+				pr_err("No suspend pin for hardware strobe, rc=%ld\n",
+					PTR_ERR(fnode->hw_strobe_state_suspend)
+					);
+				fnode->hw_strobe_state_suspend = NULL;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int qpnp_flash_led_parse_and_register_switch(struct qpnp_flash_led *led,
+						struct flash_switch_data *snode,
+						struct device_node *node)
+{
+	int rc = 0, num;
+	char reg_name[16], reg_sup_name[16];
+
+	rc = of_property_read_string(node, "qcom,led-name", &snode->cdev.name);
+	if (rc < 0) {
+		pr_err("Failed to read switch node name, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = sscanf(snode->cdev.name, "led:switch_%d", &num);
+	if (!rc) {
+		pr_err("No number for switch device?\n");
+		return -EINVAL;
+	}
+
+	rc = of_property_read_string(node, "qcom,default-led-trigger",
+					&snode->cdev.default_trigger);
+	if (rc < 0) {
+		pr_err("Unable to read trigger name, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(node, "qcom,led-mask", &snode->led_mask);
+	if (rc < 0) {
+		pr_err("Unable to read led mask rc=%d\n", rc);
+		return rc;
+	}
+
+	if (snode->led_mask < 1 || snode->led_mask > 7) {
+		pr_err("Invalid value for led-mask\n");
+		return -EINVAL;
+	}
+
+	scnprintf(reg_name, sizeof(reg_name), "switch%d-supply", num);
+	if (of_find_property(led->pdev->dev.of_node, reg_name, NULL)) {
+		scnprintf(reg_sup_name, sizeof(reg_sup_name), "switch%d", num);
+		snode->vreg = devm_regulator_get(&led->pdev->dev, reg_sup_name);
+		if (IS_ERR_OR_NULL(snode->vreg)) {
+			rc = PTR_ERR(snode->vreg);
+			if (rc != -EPROBE_DEFER)
+				pr_err("Failed to get regulator, rc=%d\n", rc);
+			snode->vreg = NULL;
+			return rc;
+		}
+	}
+
+	snode->pdev = led->pdev;
+	snode->cdev.brightness_set = qpnp_flash_led_brightness_set;
+	snode->cdev.brightness_get = qpnp_flash_led_brightness_get;
+	snode->cdev.flags |= LED_KEEP_TRIGGER;
+	rc = led_classdev_register(&led->pdev->dev, &snode->cdev);
+	if (rc < 0) {
+		pr_err("Unable to register led switch node\n");
+		return rc;
+	}
+
+	snode->cdev.dev->of_node = node;
+
+	snode->led_en_pinctrl = devm_pinctrl_get(snode->cdev.dev);
+	if (IS_ERR_OR_NULL(snode->led_en_pinctrl)) {
+		pr_debug("No pinctrl defined for %s, err=%ld\n",
+			snode->cdev.name, PTR_ERR(snode->led_en_pinctrl));
+		snode->led_en_pinctrl = NULL;
+	}
+
+	if (snode->led_en_pinctrl) {
+		snode->gpio_state_active =
+			pinctrl_lookup_state(snode->led_en_pinctrl,
+						"led_enable");
+		if (IS_ERR_OR_NULL(snode->gpio_state_active)) {
+			pr_err("Cannot lookup LED active state\n");
+			devm_pinctrl_put(snode->led_en_pinctrl);
+			snode->led_en_pinctrl = NULL;
+			return PTR_ERR(snode->gpio_state_active);
+		}
+
+		snode->gpio_state_suspend =
+			pinctrl_lookup_state(snode->led_en_pinctrl,
+						"led_disable");
+		if (IS_ERR_OR_NULL(snode->gpio_state_suspend)) {
+			pr_err("Cannot lookup LED disable state\n");
+			devm_pinctrl_put(snode->led_en_pinctrl);
+			snode->led_en_pinctrl = NULL;
+			return PTR_ERR(snode->gpio_state_suspend);
+		}
+	}
+
+	return 0;
+}
+
+static int get_code_from_table(int *table, int len, int value)
+{
+	int i;
+
+	for (i = 0; i < len; i++) {
+		if (value == table[i])
+			break;
+	}
+
+	if (i == len) {
+		pr_err("Couldn't find %d from table\n", value);
+		return -ENODATA;
+	}
+
+	return i;
+}
+
+static int qpnp_flash_led_parse_common_dt(struct qpnp_flash_led *led,
+						struct device_node *node)
+{
+	struct device_node *revid_node;
+	int rc;
+	u32 val;
+	bool short_circuit_det, open_circuit_det, vph_droop_det;
+
+	revid_node = of_parse_phandle(node, "qcom,pmic-revid", 0);
+	if (!revid_node) {
+		pr_err("Missing qcom,pmic-revid property - driver failed\n");
+		return -EINVAL;
+	}
+
+	led->pdata->pmic_rev_id = get_revid_data(revid_node);
+	if (IS_ERR_OR_NULL(led->pdata->pmic_rev_id)) {
+		pr_err("Unable to get pmic_revid rc=%ld\n",
+			PTR_ERR(led->pdata->pmic_rev_id));
+		/*
+		 * the revid peripheral must be registered, any failure
+		 * here only indicates that the rev-id module has not
+		 * probed yet.
+		 */
+		return -EPROBE_DEFER;
+	}
+
+	pr_debug("PMIC subtype %d Digital major %d\n",
+		led->pdata->pmic_rev_id->pmic_subtype,
+		led->pdata->pmic_rev_id->rev4);
+
+	led->pdata->hdrm_auto_mode_en = of_property_read_bool(node,
+							"qcom,hdrm-auto-mode");
+
+	led->pdata->isc_delay = FLASH_LED_ISC_DELAY_DEFAULT;
+	rc = of_property_read_u32(node, "qcom,isc-delay-us", &val);
+	if (!rc) {
+		led->pdata->isc_delay =
+				val >> FLASH_LED_ISC_WARMUP_DELAY_SHIFT;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read ISC delay, rc=%d\n", rc);
+		return rc;
+	}
+
+	led->pdata->warmup_delay = FLASH_LED_WARMUP_DELAY_DEFAULT;
+	rc = of_property_read_u32(node, "qcom,warmup-delay-us", &val);
+	if (!rc) {
+		led->pdata->warmup_delay =
+				val >> FLASH_LED_ISC_WARMUP_DELAY_SHIFT;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read WARMUP delay, rc=%d\n", rc);
+		return rc;
+	}
+
+	short_circuit_det =
+		of_property_read_bool(node, "qcom,short-circuit-det");
+	open_circuit_det = of_property_read_bool(node, "qcom,open-circuit-det");
+	vph_droop_det = of_property_read_bool(node, "qcom,vph-droop-det");
+	led->pdata->current_derate_en_cfg = (vph_droop_det << 2) |
+				(open_circuit_det << 1) | short_circuit_det;
+
+	led->pdata->thermal_derate_en =
+		of_property_read_bool(node, "qcom,thermal-derate-en");
+
+	if (led->pdata->thermal_derate_en) {
+		led->pdata->thermal_derate_current =
+			devm_kcalloc(&led->pdev->dev,
+					FLASH_LED_THERMAL_OTST_LEVELS,
+					sizeof(int), GFP_KERNEL);
+		if (!led->pdata->thermal_derate_current)
+			return -ENOMEM;
+
+		rc = of_property_read_u32_array(node,
+					"qcom,thermal-derate-current",
+					led->pdata->thermal_derate_current,
+					FLASH_LED_THERMAL_OTST_LEVELS);
+		if (rc < 0) {
+			pr_err("Unable to read thermal current limits, rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	led->pdata->otst_ramp_bkup_en =
+		!of_property_read_bool(node, "qcom,otst-ramp-back-up-dis");
+
+	led->pdata->thermal_derate_slow = -EINVAL;
+	rc = of_property_read_u32(node, "qcom,thermal-derate-slow", &val);
+	if (!rc) {
+		if (val < 0 || val > THERMAL_DERATE_SLOW_MAX) {
+			pr_err("Invalid thermal_derate_slow %d\n", val);
+			return -EINVAL;
+		}
+
+		led->pdata->thermal_derate_slow =
+			get_code_from_table(thermal_derate_slow_table,
+				ARRAY_SIZE(thermal_derate_slow_table), val);
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read thermal derate slow, rc=%d\n", rc);
+		return rc;
+	}
+
+	led->pdata->thermal_derate_fast = -EINVAL;
+	rc = of_property_read_u32(node, "qcom,thermal-derate-fast", &val);
+	if (!rc) {
+		if (val < 0 || val > THERMAL_DERATE_FAST_MAX) {
+			pr_err("Invalid thermal_derate_fast %d\n", val);
+			return -EINVAL;
+		}
+
+		led->pdata->thermal_derate_fast =
+			get_code_from_table(thermal_derate_fast_table,
+				ARRAY_SIZE(thermal_derate_fast_table), val);
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read thermal derate fast, rc=%d\n", rc);
+		return rc;
+	}
+
+	led->pdata->thermal_debounce = -EINVAL;
+	rc = of_property_read_u32(node, "qcom,thermal-debounce", &val);
+	if (!rc) {
+		if (val < 0 || val > THERMAL_DEBOUNCE_TIME_MAX) {
+			pr_err("Invalid thermal_debounce %d\n", val);
+			return -EINVAL;
+		}
+
+		if (val >= 0 && val < 16)
+			led->pdata->thermal_debounce = 0;
+		else
+			led->pdata->thermal_debounce = ilog2(val) - 3;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read thermal debounce, rc=%d\n", rc);
+		return rc;
+	}
+
+	led->pdata->thermal_hysteresis = -EINVAL;
+	rc = of_property_read_u32(node, "qcom,thermal-hysteresis", &val);
+	if (!rc) {
+		if (led->pdata->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE)
+			val = THERMAL_HYST_TEMP_TO_VAL(val, 20);
+		else
+			val = THERMAL_HYST_TEMP_TO_VAL(val, 15);
+
+		if (val < 0 || val > THERMAL_DERATE_HYSTERESIS_MAX) {
+			pr_err("Invalid thermal_derate_hysteresis %d\n", val);
+			return -EINVAL;
+		}
+
+		led->pdata->thermal_hysteresis = val;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read thermal hysteresis, rc=%d\n", rc);
+		return rc;
+	}
+
+	led->pdata->thermal_thrsh1 = -EINVAL;
+	rc = of_property_read_u32(node, "qcom,thermal-thrsh1", &val);
+	if (!rc) {
+		led->pdata->thermal_thrsh1 =
+			get_code_from_table(otst1_threshold_table,
+				ARRAY_SIZE(otst1_threshold_table), val);
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read thermal thrsh1, rc=%d\n", rc);
+		return rc;
+	}
+
+	led->pdata->thermal_thrsh2 = -EINVAL;
+	rc = of_property_read_u32(node, "qcom,thermal-thrsh2", &val);
+	if (!rc) {
+		led->pdata->thermal_thrsh2 =
+			get_code_from_table(otst2_threshold_table,
+				ARRAY_SIZE(otst2_threshold_table), val);
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read thermal thrsh2, rc=%d\n", rc);
+		return rc;
+	}
+
+	led->pdata->thermal_thrsh3 = -EINVAL;
+	rc = of_property_read_u32(node, "qcom,thermal-thrsh3", &val);
+	if (!rc) {
+		led->pdata->thermal_thrsh3 =
+			get_code_from_table(otst3_threshold_table,
+				ARRAY_SIZE(otst3_threshold_table), val);
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read thermal thrsh3, rc=%d\n", rc);
+		return rc;
+	}
+
+	led->pdata->vph_droop_debounce = FLASH_LED_VPH_DROOP_DEBOUNCE_DEFAULT;
+	rc = of_property_read_u32(node, "qcom,vph-droop-debounce-us", &val);
+	if (!rc) {
+		led->pdata->vph_droop_debounce =
+			VPH_DROOP_DEBOUNCE_US_TO_VAL(val);
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read VPH droop debounce, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (led->pdata->vph_droop_debounce > FLASH_LED_DEBOUNCE_MAX) {
+		pr_err("Invalid VPH droop debounce specified\n");
+		return -EINVAL;
+	}
+
+	led->pdata->vph_droop_threshold = FLASH_LED_VPH_DROOP_THRESH_DEFAULT;
+	rc = of_property_read_u32(node, "qcom,vph-droop-threshold-mv", &val);
+	if (!rc) {
+		led->pdata->vph_droop_threshold =
+			get_vph_droop_thresh_code(val);
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read VPH droop threshold, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (led->pdata->vph_droop_threshold > FLASH_LED_VPH_DROOP_THRESH_MAX) {
+		pr_err("Invalid VPH droop threshold specified\n");
+		return -EINVAL;
+	}
+
+	led->pdata->vph_droop_hysteresis =
+			FLASH_LED_VPH_DROOP_HYST_DEFAULT;
+	rc = of_property_read_u32(node, "qcom,vph-droop-hysteresis-mv", &val);
+	if (!rc) {
+		led->pdata->vph_droop_hysteresis =
+			VPH_DROOP_HYST_MV_TO_VAL(val);
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read VPH droop hysteresis, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (led->pdata->vph_droop_hysteresis > FLASH_LED_HYSTERESIS_MAX) {
+		pr_err("Invalid VPH droop hysteresis specified\n");
+		return -EINVAL;
+	}
+
+	led->pdata->vph_droop_hysteresis <<= FLASH_LED_VPH_DROOP_HYST_SHIFT;
+
+	led->pdata->hw_strobe_option = -EINVAL;
+	rc = of_property_read_u32(node, "qcom,hw-strobe-option", &val);
+	if (!rc) {
+		led->pdata->hw_strobe_option = val;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to parse hw strobe option, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(node, "qcom,led1n2-iclamp-low-ma", &val);
+	if (!rc) {
+		led->pdata->led1n2_iclamp_low_ma = val;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read led1n2_iclamp_low current, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(node, "qcom,led1n2-iclamp-mid-ma", &val);
+	if (!rc) {
+		led->pdata->led1n2_iclamp_mid_ma = val;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read led1n2_iclamp_mid current, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(node, "qcom,led3-iclamp-low-ma", &val);
+	if (!rc) {
+		led->pdata->led3_iclamp_low_ma = val;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read led3_iclamp_low current, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(node, "qcom,led3-iclamp-mid-ma", &val);
+	if (!rc) {
+		led->pdata->led3_iclamp_mid_ma = val;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read led3_iclamp_mid current, rc=%d\n", rc);
+		return rc;
+	}
+
+	led->pdata->vled_max_uv = FLASH_LED_VLED_MAX_DEFAULT_UV;
+	rc = of_property_read_u32(node, "qcom,vled-max-uv", &val);
+	if (!rc) {
+		led->pdata->vled_max_uv = val;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to parse vled_max voltage, rc=%d\n", rc);
+		return rc;
+	}
+
+	led->pdata->ibatt_ocp_threshold_ua =
+		FLASH_LED_IBATT_OCP_THRESH_DEFAULT_UA;
+	rc = of_property_read_u32(node, "qcom,ibatt-ocp-threshold-ua", &val);
+	if (!rc) {
+		led->pdata->ibatt_ocp_threshold_ua = val;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to parse ibatt_ocp threshold, rc=%d\n", rc);
+		return rc;
+	}
+
+	led->pdata->rpara_uohm = FLASH_LED_RPARA_DEFAULT_UOHM;
+	rc = of_property_read_u32(node, "qcom,rparasitic-uohm", &val);
+	if (!rc) {
+		led->pdata->rpara_uohm = val;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to parse rparasitic, rc=%d\n", rc);
+		return rc;
+	}
+
+	led->pdata->lmh_ocv_threshold_uv =
+		FLASH_LED_LMH_OCV_THRESH_DEFAULT_UV;
+	rc = of_property_read_u32(node, "qcom,lmh-ocv-threshold-uv", &val);
+	if (!rc) {
+		led->pdata->lmh_ocv_threshold_uv = val;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to parse lmh ocv threshold, rc=%d\n", rc);
+		return rc;
+	}
+
+	led->pdata->lmh_rbatt_threshold_uohm =
+		FLASH_LED_LMH_RBATT_THRESH_DEFAULT_UOHM;
+	rc = of_property_read_u32(node, "qcom,lmh-rbatt-threshold-uohm", &val);
+	if (!rc) {
+		led->pdata->lmh_rbatt_threshold_uohm = val;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to parse lmh rbatt threshold, rc=%d\n", rc);
+		return rc;
+	}
+
+	led->pdata->lmh_level = FLASH_LED_LMH_LEVEL_DEFAULT;
+	rc = of_property_read_u32(node, "qcom,lmh-level", &val);
+	if (!rc) {
+		led->pdata->lmh_level = val;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to parse lmh_level, rc=%d\n", rc);
+		return rc;
+	}
+
+	led->pdata->lmh_mitigation_sel = FLASH_LED_LMH_MITIGATION_SEL_DEFAULT;
+	rc = of_property_read_u32(node, "qcom,lmh-mitigation-sel", &val);
+	if (!rc) {
+		led->pdata->lmh_mitigation_sel = val;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to parse lmh_mitigation_sel, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (led->pdata->lmh_mitigation_sel > FLASH_LED_MITIGATION_SEL_MAX) {
+		pr_err("Invalid lmh_mitigation_sel specified\n");
+		return -EINVAL;
+	}
+
+	led->pdata->chgr_mitigation_sel = FLASH_SW_CHARGER_MITIGATION;
+	rc = of_property_read_u32(node, "qcom,chgr-mitigation-sel", &val);
+	if (!rc) {
+		led->pdata->chgr_mitigation_sel = val;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to parse chgr_mitigation_sel, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (led->pdata->chgr_mitigation_sel > FLASH_LED_MITIGATION_SEL_MAX) {
+		pr_err("Invalid chgr_mitigation_sel specified\n");
+		return -EINVAL;
+	}
+
+	led->pdata->iled_thrsh_val = FLASH_LED_CHGR_MITIGATION_THRSH_DEFAULT;
+	rc = of_property_read_u32(node, "qcom,iled-thrsh-ma", &val);
+	if (!rc) {
+		led->pdata->iled_thrsh_val = MITIGATION_THRSH_MA_TO_VAL(val);
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to parse iled_thrsh_val, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (led->pdata->iled_thrsh_val > FLASH_LED_CHGR_MITIGATION_THRSH_MAX) {
+		pr_err("Invalid iled_thrsh_val specified\n");
+		return -EINVAL;
+	}
+
+	led->pdata->all_ramp_up_done_irq =
+		of_irq_get_byname(node, "all-ramp-up-done-irq");
+	if (led->pdata->all_ramp_up_done_irq < 0)
+		pr_debug("all-ramp-up-done-irq not used\n");
+
+	led->pdata->all_ramp_down_done_irq =
+		of_irq_get_byname(node, "all-ramp-down-done-irq");
+	if (led->pdata->all_ramp_down_done_irq < 0)
+		pr_debug("all-ramp-down-done-irq not used\n");
+
+	led->pdata->led_fault_irq =
+		of_irq_get_byname(node, "led-fault-irq");
+	if (led->pdata->led_fault_irq < 0)
+		pr_debug("led-fault-irq not used\n");
+
+	return 0;
+}
+
+static int qpnp_flash_led_probe(struct platform_device *pdev)
+{
+	struct qpnp_flash_led *led;
+	struct device_node *node, *temp;
+	const char *temp_string;
+	unsigned int base;
+	int rc, i = 0, j = 0;
+
+	node = pdev->dev.of_node;
+	if (!node) {
+		pr_err("No flash LED nodes defined\n");
+		return -ENODEV;
+	}
+
+	rc = of_property_read_u32(node, "reg", &base);
+	if (rc < 0) {
+		pr_err("Couldn't find reg in node %s, rc = %d\n",
+			node->full_name, rc);
+		return rc;
+	}
+
+	led = devm_kzalloc(&pdev->dev, sizeof(struct qpnp_flash_led),
+								GFP_KERNEL);
+	if (!led)
+		return -ENOMEM;
+
+	led->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!led->regmap) {
+		pr_err("Couldn't get parent's regmap\n");
+		return -EINVAL;
+	}
+
+	led->base = base;
+	led->pdev = pdev;
+	led->pdata = devm_kzalloc(&pdev->dev,
+			sizeof(struct flash_led_platform_data), GFP_KERNEL);
+	if (!led->pdata)
+		return -ENOMEM;
+
+	rc = qpnp_flash_led_parse_common_dt(led, node);
+	if (rc < 0) {
+		pr_err("Failed to parse common flash LED device tree\n");
+		return rc;
+	}
+
+	for_each_available_child_of_node(node, temp) {
+		rc = of_property_read_string(temp, "label", &temp_string);
+		if (rc < 0) {
+			pr_err("Failed to parse label, rc=%d\n", rc);
+			return rc;
+		}
+
+		if (!strcmp("switch", temp_string)) {
+			led->num_snodes++;
+		} else if (!strcmp("flash", temp_string) ||
+				!strcmp("torch", temp_string)) {
+			led->num_fnodes++;
+		} else {
+			pr_err("Invalid label for led node\n");
+			return -EINVAL;
+		}
+	}
+
+	if (!led->num_fnodes) {
+		pr_err("No LED nodes defined\n");
+		return -ECHILD;
+	}
+
+	led->fnode = devm_kcalloc(&pdev->dev, led->num_fnodes,
+				sizeof(*led->fnode),
+				GFP_KERNEL);
+	if (!led->fnode)
+		return -ENOMEM;
+
+	led->snode = devm_kcalloc(&pdev->dev, led->num_snodes,
+				sizeof(*led->snode),
+				GFP_KERNEL);
+	if (!led->snode)
+		return -ENOMEM;
+
+	temp = NULL;
+	i = 0;
+	j = 0;
+	for_each_available_child_of_node(node, temp) {
+		rc = of_property_read_string(temp, "label", &temp_string);
+		if (rc < 0) {
+			pr_err("Failed to parse label, rc=%d\n", rc);
+			return rc;
+		}
+
+		if (!strcmp("flash", temp_string) ||
+				!strcmp("torch", temp_string)) {
+			rc = qpnp_flash_led_parse_each_led_dt(led,
+					&led->fnode[i], temp);
+			if (rc < 0) {
+				pr_err("Unable to parse flash node %d rc=%d\n",
+					i, rc);
+				goto error_led_register;
+			}
+			i++;
+		}
+
+		if (!strcmp("switch", temp_string)) {
+			rc = qpnp_flash_led_parse_and_register_switch(led,
+					&led->snode[j], temp);
+			if (rc < 0) {
+				pr_err("Unable to parse and register switch node, rc=%d\n",
+					rc);
+				goto error_switch_register;
+			}
+			j++;
+		}
+	}
+
+	/* setup irqs */
+	if (led->pdata->all_ramp_up_done_irq >= 0) {
+		rc = devm_request_threaded_irq(&led->pdev->dev,
+			led->pdata->all_ramp_up_done_irq,
+			NULL, qpnp_flash_led_irq_handler,
+			IRQF_ONESHOT,
+			"qpnp_flash_led_all_ramp_up_done_irq", led);
+		if (rc < 0) {
+			pr_err("Unable to request all_ramp_up_done(%d) IRQ(err:%d)\n",
+				led->pdata->all_ramp_up_done_irq, rc);
+			goto error_switch_register;
+		}
+	}
+
+	if (led->pdata->all_ramp_down_done_irq >= 0) {
+		rc = devm_request_threaded_irq(&led->pdev->dev,
+			led->pdata->all_ramp_down_done_irq,
+			NULL, qpnp_flash_led_irq_handler,
+			IRQF_ONESHOT,
+			"qpnp_flash_led_all_ramp_down_done_irq", led);
+		if (rc < 0) {
+			pr_err("Unable to request all_ramp_down_done(%d) IRQ(err:%d)\n",
+				led->pdata->all_ramp_down_done_irq, rc);
+			goto error_switch_register;
+		}
+	}
+
+	if (led->pdata->led_fault_irq >= 0) {
+		rc = devm_request_threaded_irq(&led->pdev->dev,
+			led->pdata->led_fault_irq,
+			NULL, qpnp_flash_led_irq_handler,
+			IRQF_ONESHOT,
+			"qpnp_flash_led_fault_irq", led);
+		if (rc < 0) {
+			pr_err("Unable to request led_fault(%d) IRQ(err:%d)\n",
+				led->pdata->led_fault_irq, rc);
+			goto error_switch_register;
+		}
+	}
+
+	led->bms_psy = power_supply_get_by_name("bms");
+	if (!led->bms_psy) {
+		rc = flash_led_psy_register_notifier(led);
+		if (rc < 0) {
+			pr_err("Couldn't register psy notifier, rc = %d\n", rc);
+			goto error_switch_register;
+		}
+	}
+
+	rc = qpnp_flash_led_init_settings(led);
+	if (rc < 0) {
+		pr_err("Failed to initialize flash LED, rc=%d\n", rc);
+		goto unreg_notifier;
+	}
+
+	for (i = 0; i < led->num_snodes; i++) {
+		for (j = 0; j < ARRAY_SIZE(qpnp_flash_led_attrs); j++) {
+			rc = sysfs_create_file(&led->snode[i].cdev.dev->kobj,
+					&qpnp_flash_led_attrs[j].attr);
+			if (rc < 0) {
+				pr_err("sysfs creation failed, rc=%d\n", rc);
+				goto sysfs_fail;
+			}
+		}
+	}
+
+	spin_lock_init(&led->lock);
+
+	dev_set_drvdata(&pdev->dev, led);
+
+	return 0;
+
+sysfs_fail:
+	for (--j; j >= 0; j--)
+		sysfs_remove_file(&led->snode[i].cdev.dev->kobj,
+				&qpnp_flash_led_attrs[j].attr);
+
+	for (--i; i >= 0; i--) {
+		for (j = 0; j < ARRAY_SIZE(qpnp_flash_led_attrs); j++)
+			sysfs_remove_file(&led->snode[i].cdev.dev->kobj,
+					&qpnp_flash_led_attrs[j].attr);
+	}
+
+	i = led->num_snodes;
+unreg_notifier:
+	power_supply_unreg_notifier(&led->nb);
+error_switch_register:
+	while (i > 0)
+		led_classdev_unregister(&led->snode[--i].cdev);
+	i = led->num_fnodes;
+error_led_register:
+	while (i > 0)
+		led_classdev_unregister(&led->fnode[--i].cdev);
+
+	return rc;
+}
+
+static int qpnp_flash_led_remove(struct platform_device *pdev)
+{
+	struct qpnp_flash_led *led = dev_get_drvdata(&pdev->dev);
+	int i, j;
+
+	for (i = 0; i < led->num_snodes; i++) {
+		for (j = 0; j < ARRAY_SIZE(qpnp_flash_led_attrs); j++)
+			sysfs_remove_file(&led->snode[i].cdev.dev->kobj,
+					&qpnp_flash_led_attrs[j].attr);
+
+		if (led->snode[i].regulator_on)
+			qpnp_flash_led_regulator_enable(led,
+					&led->snode[i], false);
+	}
+
+	while (i > 0)
+		led_classdev_unregister(&led->snode[--i].cdev);
+
+	i = led->num_fnodes;
+	while (i > 0)
+		led_classdev_unregister(&led->fnode[--i].cdev);
+
+	power_supply_unreg_notifier(&led->nb);
+	return 0;
+}
+
+const struct of_device_id qpnp_flash_led_match_table[] = {
+	{ .compatible = "qcom,qpnp-flash-led-v2",},
+	{ },
+};
+
+static struct platform_driver qpnp_flash_led_driver = {
+	.driver		= {
+		.name = "qcom,qpnp-flash-led-v2",
+		.of_match_table = qpnp_flash_led_match_table,
+	},
+	.probe		= qpnp_flash_led_probe,
+	.remove		= qpnp_flash_led_remove,
+};
+
+static int __init qpnp_flash_led_init(void)
+{
+	return platform_driver_register(&qpnp_flash_led_driver);
+}
+late_initcall(qpnp_flash_led_init);
+
+static void __exit qpnp_flash_led_exit(void)
+{
+	platform_driver_unregister(&qpnp_flash_led_driver);
+}
+module_exit(qpnp_flash_led_exit);
+
+MODULE_DESCRIPTION("QPNP Flash LED driver v2");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("leds:leds-qpnp-flash-v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/leds/leds-qpnp-wled.c	2019-01-22 16:16:24.147252238 +0100
@@ -0,0 +1,2784 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/errno.h>
+#include <linux/leds.h>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/leds-qpnp-wled.h>
+#include <linux/qpnp/qpnp-revid.h>
+
+/* base addresses */
+#define QPNP_WLED_CTRL_BASE		"qpnp-wled-ctrl-base"
+#define QPNP_WLED_SINK_BASE		"qpnp-wled-sink-base"
+
+/* ctrl registers */
+#define QPNP_WLED_FAULT_STATUS(b)	(b + 0x08)
+#define QPNP_WLED_INT_RT_STS(b)		(b + 0x10)
+#define QPNP_WLED_EN_REG(b)		(b + 0x46)
+#define QPNP_WLED_FDBK_OP_REG(b)	(b + 0x48)
+#define QPNP_WLED_VREF_REG(b)		(b + 0x49)
+#define QPNP_WLED_BOOST_DUTY_REG(b)	(b + 0x4B)
+#define QPNP_WLED_SWITCH_FREQ_REG(b)	(b + 0x4C)
+#define QPNP_WLED_OVP_REG(b)		(b + 0x4D)
+#define QPNP_WLED_ILIM_REG(b)		(b + 0x4E)
+#define QPNP_WLED_AMOLED_VOUT_REG(b)	(b + 0x4F)
+#define QPNP_WLED_SOFTSTART_RAMP_DLY(b) (b + 0x53)
+#define QPNP_WLED_VLOOP_COMP_RES_REG(b)	(b + 0x55)
+#define QPNP_WLED_VLOOP_COMP_GM_REG(b)	(b + 0x56)
+#define QPNP_WLED_EN_PSM_REG(b)		(b + 0x5A)
+#define QPNP_WLED_PSM_CTRL_REG(b)	(b + 0x5B)
+#define QPNP_WLED_LCD_AUTO_PFM_REG(b)	(b + 0x5C)
+#define QPNP_WLED_SC_PRO_REG(b)		(b + 0x5E)
+#define QPNP_WLED_SWIRE_AVDD_REG(b)	(b + 0x5F)
+#define QPNP_WLED_CTRL_SPARE_REG(b)	(b + 0xDF)
+#define QPNP_WLED_TEST1_REG(b)		(b + 0xE2)
+#define QPNP_WLED_TEST4_REG(b)		(b + 0xE5)
+#define QPNP_WLED_REF_7P7_TRIM_REG(b)	(b + 0xF2)
+
+#define QPNP_WLED_7P7_TRIM_MASK		GENMASK(3, 0)
+#define QPNP_WLED_EN_MASK		0x7F
+#define QPNP_WLED_EN_SHIFT		7
+#define QPNP_WLED_FDBK_OP_MASK		0xF8
+#define QPNP_WLED_VREF_MASK		GENMASK(3, 0)
+
+#define QPNP_WLED_VLOOP_COMP_RES_MASK			0xF0
+#define QPNP_WLED_VLOOP_COMP_RES_OVERWRITE		0x80
+#define QPNP_WLED_LOOP_COMP_RES_STEP_KOHM		20
+#define QPNP_WLED_LOOP_COMP_RES_MIN_KOHM		20
+#define QPNP_WLED_LOOP_COMP_RES_MAX_KOHM		320
+#define QPNP_WLED_VLOOP_COMP_GM_MASK			GENMASK(3, 0)
+#define QPNP_WLED_VLOOP_COMP_GM_OVERWRITE		0x80
+#define QPNP_WLED_VLOOP_COMP_AUTO_GM_EN			BIT(6)
+#define QPNP_WLED_VLOOP_COMP_AUTO_GM_THRESH_MASK	GENMASK(5, 4)
+#define QPNP_WLED_VLOOP_COMP_AUTO_GM_THRESH_SHIFT	4
+#define QPNP_WLED_LOOP_EA_GM_DFLT_AMOLED_PMI8994	0x03
+#define QPNP_WLED_LOOP_GM_DFLT_AMOLED_PMI8998		0x09
+#define QPNP_WLED_LOOP_GM_DFLT_WLED			0x09
+#define QPNP_WLED_LOOP_EA_GM_MIN			0x0
+#define QPNP_WLED_LOOP_EA_GM_MAX			0xF
+#define QPNP_WLED_LOOP_AUTO_GM_THRESH_MAX		3
+#define QPNP_WLED_LOOP_AUTO_GM_DFLT_THRESH		1
+#define QPNP_WLED_VREF_PSM_MASK				0xF8
+#define QPNP_WLED_VREF_PSM_STEP_MV			50
+#define QPNP_WLED_VREF_PSM_MIN_MV			400
+#define QPNP_WLED_VREF_PSM_MAX_MV			750
+#define QPNP_WLED_VREF_PSM_DFLT_AMOLED_MV		450
+#define QPNP_WLED_PSM_OVERWRITE_BIT			BIT(7)
+#define QPNP_WLED_LCD_AUTO_PFM_DFLT_THRESH		1
+#define QPNP_WLED_LCD_AUTO_PFM_THRESH_MAX		0xF
+#define QPNP_WLED_LCD_AUTO_PFM_EN_SHIFT			7
+#define QPNP_WLED_LCD_AUTO_PFM_EN_BIT			BIT(7)
+#define QPNP_WLED_LCD_AUTO_PFM_THRESH_MASK		GENMASK(3, 0)
+#define QPNP_WLED_EN_PSM_BIT				BIT(7)
+
+#define QPNP_WLED_ILIM_MASK		GENMASK(2, 0)
+#define QPNP_WLED_ILIM_OVERWRITE	BIT(7)
+#define PMI8994_WLED_ILIM_MIN_MA	105
+#define PMI8994_WLED_ILIM_MAX_MA	1980
+#define PMI8994_WLED_DFLT_ILIM_MA	980
+#define PMI8994_AMOLED_DFLT_ILIM_MA	385
+#define PMI8998_WLED_ILIM_MAX_MA	1500
+#define PMI8998_WLED_DFLT_ILIM_MA	970
+#define PMI8998_AMOLED_DFLT_ILIM_MA	620
+#define QPNP_WLED_BOOST_DUTY_MASK	0xFC
+#define QPNP_WLED_BOOST_DUTY_STEP_NS	52
+#define QPNP_WLED_BOOST_DUTY_MIN_NS	26
+#define QPNP_WLED_BOOST_DUTY_MAX_NS	156
+#define QPNP_WLED_DEF_BOOST_DUTY_NS	104
+#define QPNP_WLED_SWITCH_FREQ_MASK	GENMASK(3, 0)
+#define QPNP_WLED_SWITCH_FREQ_OVERWRITE BIT(7)
+#define QPNP_WLED_OVP_MASK		GENMASK(1, 0)
+#define QPNP_WLED_TEST4_EN_DEB_BYPASS_ILIM_BIT	BIT(6)
+#define QPNP_WLED_TEST4_EN_SH_FOR_SS_BIT	BIT(5)
+#define QPNP_WLED_TEST4_EN_CLAMP_BIT		BIT(4)
+#define QPNP_WLED_TEST4_EN_SOFT_START_BIT	BIT(1)
+#define QPNP_WLED_TEST4_EN_VREF_UP			\
+		(QPNP_WLED_TEST4_EN_SH_FOR_SS_BIT |	\
+		QPNP_WLED_TEST4_EN_CLAMP_BIT |		\
+		QPNP_WLED_TEST4_EN_SOFT_START_BIT)
+#define QPNP_WLED_TEST4_EN_IIND_UP	0x1
+#define QPNP_WLED_ILIM_FAULT_BIT	BIT(0)
+#define QPNP_WLED_OVP_FAULT_BIT		BIT(1)
+#define QPNP_WLED_SC_FAULT_BIT		BIT(2)
+#define QPNP_WLED_OVP_FLT_RT_STS_BIT	BIT(1)
+
+/* QPNP_WLED_SOFTSTART_RAMP_DLY */
+#define SOFTSTART_OVERWRITE_BIT		BIT(7)
+#define SOFTSTART_RAMP_DELAY_MASK	GENMASK(2, 0)
+
+/* sink registers */
+#define QPNP_WLED_CURR_SINK_REG(b)	(b + 0x46)
+#define QPNP_WLED_SYNC_REG(b)		(b + 0x47)
+#define QPNP_WLED_MOD_REG(b)		(b + 0x4A)
+#define QPNP_WLED_HYB_THRES_REG(b)	(b + 0x4B)
+#define QPNP_WLED_MOD_EN_REG(b, n)	(b + 0x50 + (n * 0x10))
+#define QPNP_WLED_SYNC_DLY_REG(b, n)	(QPNP_WLED_MOD_EN_REG(b, n) + 0x01)
+#define QPNP_WLED_FS_CURR_REG(b, n)	(QPNP_WLED_MOD_EN_REG(b, n) + 0x02)
+#define QPNP_WLED_CABC_REG(b, n)	(QPNP_WLED_MOD_EN_REG(b, n) + 0x06)
+#define QPNP_WLED_BRIGHT_LSB_REG(b, n)	(QPNP_WLED_MOD_EN_REG(b, n) + 0x07)
+#define QPNP_WLED_BRIGHT_MSB_REG(b, n)	(QPNP_WLED_MOD_EN_REG(b, n) + 0x08)
+#define QPNP_WLED_SINK_TEST5_REG(b)	(b + 0xE6)
+
+#define QPNP_WLED_MOD_FREQ_1200_KHZ	1200
+#define QPNP_WLED_MOD_FREQ_2400_KHZ	2400
+#define QPNP_WLED_MOD_FREQ_9600_KHZ	9600
+#define QPNP_WLED_MOD_FREQ_19200_KHZ	19200
+#define QPNP_WLED_MOD_FREQ_MASK		0x3F
+#define QPNP_WLED_MOD_FREQ_SHIFT	6
+#define QPNP_WLED_ACC_CLK_FREQ_MASK	0xE7
+#define QPNP_WLED_ACC_CLK_FREQ_SHIFT	3
+#define QPNP_WLED_PHASE_STAG_MASK	0xDF
+#define QPNP_WLED_PHASE_STAG_SHIFT	5
+#define QPNP_WLED_DIM_RES_MASK		0xFD
+#define QPNP_WLED_DIM_RES_SHIFT		1
+#define QPNP_WLED_DIM_HYB_MASK		0xFB
+#define QPNP_WLED_DIM_HYB_SHIFT		2
+#define QPNP_WLED_DIM_ANA_MASK		0xFE
+#define QPNP_WLED_HYB_THRES_MASK	0xF8
+#define QPNP_WLED_HYB_THRES_MIN		78
+#define QPNP_WLED_DEF_HYB_THRES		625
+#define QPNP_WLED_HYB_THRES_MAX		10000
+#define QPNP_WLED_MOD_EN_MASK		0x7F
+#define QPNP_WLED_MOD_EN_SHFT		7
+#define QPNP_WLED_MOD_EN		1
+#define QPNP_WLED_GATE_DRV_MASK		0xFE
+#define QPNP_WLED_SYNC_DLY_MASK		GENMASK(2, 0)
+#define QPNP_WLED_SYNC_DLY_MIN_US	0
+#define QPNP_WLED_SYNC_DLY_MAX_US	1400
+#define QPNP_WLED_SYNC_DLY_STEP_US	200
+#define QPNP_WLED_DEF_SYNC_DLY_US	400
+#define QPNP_WLED_FS_CURR_MASK		GENMASK(3, 0)
+#define QPNP_WLED_FS_CURR_MIN_UA	0
+#define QPNP_WLED_FS_CURR_MAX_UA	30000
+#define QPNP_WLED_FS_CURR_STEP_UA	2500
+#define QPNP_WLED_CABC_MASK		0x80
+#define QPNP_WLED_CABC_SHIFT		7
+#define QPNP_WLED_CURR_SINK_SHIFT	4
+#define QPNP_WLED_CURR_SINK_MASK	GENMASK(7, 4)
+#define QPNP_WLED_BRIGHT_LSB_MASK	0xFF
+#define QPNP_WLED_BRIGHT_MSB_SHIFT	8
+#define QPNP_WLED_BRIGHT_MSB_MASK	0x0F
+#define QPNP_WLED_SYNC			0x0F
+#define QPNP_WLED_SYNC_RESET		0x00
+
+#define QPNP_WLED_SINK_TEST5_HYB	0x14
+#define QPNP_WLED_SINK_TEST5_DIG	0x1E
+#define QPNP_WLED_SINK_TEST5_HVG_PULL_STR_BIT	BIT(3)
+
+#define QPNP_WLED_SWITCH_FREQ_800_KHZ_CODE	0x0B
+#define QPNP_WLED_SWITCH_FREQ_1600_KHZ_CODE	0x05
+
+#define QPNP_WLED_DISP_SEL_REG(b)	(b + 0x44)
+#define QPNP_WLED_MODULE_RDY_REG(b)	(b + 0x45)
+#define QPNP_WLED_MODULE_EN_REG(b)	(b + 0x46)
+#define QPNP_WLED_MODULE_RDY_MASK	0x7F
+#define QPNP_WLED_MODULE_RDY_SHIFT	7
+#define QPNP_WLED_MODULE_EN_MASK	BIT(7)
+#define QPNP_WLED_MODULE_EN_SHIFT	7
+#define QPNP_WLED_DISP_SEL_MASK		0x7F
+#define QPNP_WLED_DISP_SEL_SHIFT	7
+#define QPNP_WLED_EN_SC_DEB_CYCLES_MASK	0x79
+#define QPNP_WLED_EN_DEB_CYCLES_MASK	0xF9
+#define QPNP_WLED_EN_SC_SHIFT		7
+#define QPNP_WLED_SC_PRO_EN_DSCHGR	0x8
+#define QPNP_WLED_SC_DEB_CYCLES_MIN     2
+#define QPNP_WLED_SC_DEB_CYCLES_MAX     16
+#define QPNP_WLED_SC_DEB_CYCLES_SUB     2
+#define QPNP_WLED_SC_DEB_CYCLES_DFLT    4
+#define QPNP_WLED_EXT_FET_DTEST2	0x09
+
+#define QPNP_WLED_SEC_ACCESS_REG(b)    (b + 0xD0)
+#define QPNP_WLED_SEC_UNLOCK           0xA5
+
+#define NUM_DDIC_CODES			256
+#define QPNP_WLED_MAX_STRINGS		4
+#define QPNP_PM660_WLED_MAX_STRINGS	3
+#define WLED_MAX_LEVEL_4095		4095
+#define QPNP_WLED_RAMP_DLY_MS		20
+#define QPNP_WLED_TRIGGER_NONE		"none"
+#define QPNP_WLED_STR_SIZE		20
+#define QPNP_WLED_MIN_MSLEEP		20
+#define QPNP_WLED_SC_DLY_MS		20
+#define QPNP_WLED_SOFT_START_DLY_US	10000
+
+#define NUM_SUPPORTED_AVDD_VOLTAGES	6
+#define QPNP_WLED_DFLT_AVDD_MV		7600
+#define QPNP_WLED_AVDD_MIN_MV		5650
+#define QPNP_WLED_AVDD_MAX_MV		7900
+#define QPNP_WLED_AVDD_STEP_MV		150
+#define QPNP_WLED_AVDD_MIN_TRIM_VAL	0x0
+#define QPNP_WLED_AVDD_MAX_TRIM_VAL	0xF
+#define QPNP_WLED_AVDD_SEL_SPMI_BIT	BIT(7)
+#define QPNP_WLED_AVDD_SET_BIT		BIT(4)
+
+#define NUM_SUPPORTED_OVP_THRESHOLDS	4
+#define NUM_SUPPORTED_ILIM_THRESHOLDS	8
+
+#define QPNP_WLED_AVDD_MV_TO_REG(val) \
+		((val - QPNP_WLED_AVDD_MIN_MV) / QPNP_WLED_AVDD_STEP_MV)
+
+/* output feedback mode */
+enum qpnp_wled_fdbk_op {
+	QPNP_WLED_FDBK_AUTO,
+	QPNP_WLED_FDBK_WLED1,
+	QPNP_WLED_FDBK_WLED2,
+	QPNP_WLED_FDBK_WLED3,
+	QPNP_WLED_FDBK_WLED4,
+};
+
+/* dimming modes */
+enum qpnp_wled_dim_mode {
+	QPNP_WLED_DIM_ANALOG,
+	QPNP_WLED_DIM_DIGITAL,
+	QPNP_WLED_DIM_HYBRID,
+};
+
+/* wled ctrl debug registers */
+static u8 qpnp_wled_ctrl_dbg_regs[] = {
+	0x44, 0x46, 0x48, 0x49, 0x4b, 0x4c, 0x4d, 0x4e, 0x50, 0x51, 0x52, 0x53,
+	0x54, 0x55, 0x56, 0x57, 0x58, 0x5a, 0x5b, 0x5d, 0x5e, 0xe2
+};
+
+/* wled sink debug registers */
+static u8 qpnp_wled_sink_dbg_regs[] = {
+	0x46, 0x47, 0x48, 0x4a, 0x4b,
+	0x50, 0x51, 0x52, 0x53,	0x56, 0x57, 0x58,
+	0x60, 0x61, 0x62, 0x63,	0x66, 0x67, 0x68,
+	0x70, 0x71, 0x72, 0x73,	0x76, 0x77, 0x78,
+	0x80, 0x81, 0x82, 0x83,	0x86, 0x87, 0x88,
+	0xe6,
+};
+
+static int qpnp_wled_avdd_target_voltages[NUM_SUPPORTED_AVDD_VOLTAGES] = {
+	7900, 7600, 7300, 6400, 6100, 5800,
+};
+
+static u8 qpnp_wled_ovp_reg_settings[NUM_SUPPORTED_AVDD_VOLTAGES] = {
+	0x0, 0x0, 0x1, 0x2, 0x2, 0x3,
+};
+
+static int qpnp_wled_avdd_trim_adjustments[NUM_SUPPORTED_AVDD_VOLTAGES] = {
+	3, 0, -2, 7, 3, 3,
+};
+
+static int qpnp_wled_ovp_thresholds_pmi8994[NUM_SUPPORTED_OVP_THRESHOLDS] = {
+	31000, 29500, 19400, 17800,
+};
+
+static int qpnp_wled_ovp_thresholds_pmi8998[NUM_SUPPORTED_OVP_THRESHOLDS] = {
+	31100, 29600, 19600, 18100,
+};
+
+static int qpnp_wled_ilim_settings_pmi8994[NUM_SUPPORTED_ILIM_THRESHOLDS] = {
+	105, 385, 660, 980, 1150, 1420, 1700, 1980,
+};
+
+static int qpnp_wled_ilim_settings_pmi8998[NUM_SUPPORTED_ILIM_THRESHOLDS] = {
+	105, 280, 450, 620, 970, 1150, 1300, 1500,
+};
+
+struct wled_vref_setting {
+	u32 min_uv;
+	u32 max_uv;
+	u32 step_uv;
+	u32 default_uv;
+};
+
+static struct wled_vref_setting vref_setting_pmi8994 = {
+	300000, 675000, 25000, 350000,
+};
+static struct wled_vref_setting vref_setting_pmi8998 = {
+	60000, 397500, 22500, 127500,
+};
+
+/**
+ *  qpnp_wled - wed data structure
+ *  @ cdev - led class device
+ *  @ pdev - platform device
+ *  @ work - worker for led operation
+ *  @ wq - workqueue for setting brightness level
+ *  @ lock - mutex lock for exclusive access
+ *  @ fdbk_op - output feedback mode
+ *  @ dim_mode - dimming mode
+ *  @ ovp_irq - over voltage protection irq
+ *  @ sc_irq - short circuit irq
+ *  @ sc_cnt - short circuit irq count
+ *  @ avdd_target_voltage_mv - target voltage for AVDD module in mV
+ *  @ ctrl_base - base address for wled ctrl
+ *  @ sink_base - base address for wled sink
+ *  @ mod_freq_khz - modulator frequency in KHZ
+ *  @ hyb_thres - threshold for hybrid dimming
+ *  @ sync_dly_us - sync delay in us
+ *  @ vref_uv - ref voltage in uv
+ *  @ vref_psm_mv - ref psm voltage in mv
+ *  @ loop_comp_res_kohm - control to select the compensation resistor
+ *  @ loop_ea_gm - control to select the gm for the gm stage in control loop
+ *  @ sc_deb_cycles - debounce time for short circuit detection
+ *  @ switch_freq_khz - switching frequency in KHZ
+ *  @ ovp_mv - over voltage protection in mv
+ *  @ ilim_ma - current limiter in ma
+ *  @ boost_duty_ns - boost duty cycle in ns
+ *  @ fs_curr_ua - full scale current in ua
+ *  @ ramp_ms - delay between ramp steps in ms
+ *  @ ramp_step - ramp step size
+ *  @ cons_sync_write_delay_us - delay between two consecutive writes to SYNC
+ *  @ auto_calibration_ovp_count - OVP fault irq count to run auto calibration
+ *  @ max_strings - Number of strings supported in WLED peripheral
+ *  @ prev_level - Previous brightness level
+ *  @ brt_map_table - Brightness map table
+ *  @ strings - supported list of strings
+ *  @ num_strings - number of strings
+ *  @ loop_auto_gm_thresh - the clamping level for auto gm
+ *  @ lcd_auto_pfm_thresh - the threshold for lcd auto pfm mode
+ *  @ loop_auto_gm_en - select if auto gm is enabled
+ *  @ lcd_auto_pfm_en - select if auto pfm is enabled in lcd mode
+ *  @ lcd_psm_ctrl - select if psm needs to be controlled in lcd mode
+ *  @ avdd_mode_spmi - enable avdd programming via spmi
+ *  @ en_9b_dim_res - enable or disable 9bit dimming
+ *  @ en_phase_stag - enable or disable phase staggering
+ *  @ en_cabc - enable or disable cabc
+ *  @ disp_type_amoled - type of display: LCD/AMOLED
+ *  @ en_ext_pfet_sc_pro - enable sc protection on external pfet
+ *  @ prev_state - previous state of WLED
+ *  @ stepper_en - Flag to enable stepper algorithm
+ *  @ ovp_irq_disabled - OVP interrupt disable status
+ *  @ auto_calib_enabled - Flag to enable auto calibration feature
+ *  @ auto_calib_done - Flag to indicate auto calibration is done
+ *  @ module_dis_perm - Flat to keep module permanently disabled
+ *  @ start_ovp_fault_time - Time when the OVP fault first occurred
+ */
+struct qpnp_wled {
+	struct led_classdev	cdev;
+	struct platform_device	*pdev;
+	struct regmap		*regmap;
+	struct pmic_revid_data	*pmic_rev_id;
+	struct work_struct	work;
+	struct workqueue_struct *wq;
+	struct mutex		lock;
+	struct mutex		bus_lock;
+	enum qpnp_wled_fdbk_op	fdbk_op;
+	enum qpnp_wled_dim_mode	dim_mode;
+	int			ovp_irq;
+	int			sc_irq;
+	u32			sc_cnt;
+	u32			avdd_target_voltage_mv;
+	u16			ctrl_base;
+	u16			sink_base;
+	u16			mod_freq_khz;
+	u16			hyb_thres;
+	u16			sync_dly_us;
+	u32			vref_uv;
+	u16			vref_psm_mv;
+	u16			loop_comp_res_kohm;
+	u16			loop_ea_gm;
+	u16			sc_deb_cycles;
+	u16			switch_freq_khz;
+	u16			ovp_mv;
+	u16			ilim_ma;
+	u16			boost_duty_ns;
+	u16			fs_curr_ua;
+	u16			ramp_ms;
+	u16			ramp_step;
+	u16			cons_sync_write_delay_us;
+	u16			auto_calibration_ovp_count;
+	u16			max_strings;
+	u16			prev_level;
+	u16			*brt_map_table;
+	u8			strings[QPNP_WLED_MAX_STRINGS];
+	u8			num_strings;
+	u8			loop_auto_gm_thresh;
+	u8			lcd_auto_pfm_thresh;
+	bool			loop_auto_gm_en;
+	bool			lcd_auto_pfm_en;
+	bool			lcd_psm_ctrl;
+	bool			avdd_mode_spmi;
+	bool			en_9b_dim_res;
+	bool			en_phase_stag;
+	bool			en_cabc;
+	bool			disp_type_amoled;
+	bool			en_ext_pfet_sc_pro;
+	bool			prev_state;
+	bool			stepper_en;
+	bool			ovp_irq_disabled;
+	bool			auto_calib_enabled;
+	bool			auto_calib_done;
+	bool			module_dis_perm;
+	ktime_t			start_ovp_fault_time;
+};
+
+static int qpnp_wled_step_delay_us = 52000;
+module_param_named(
+	total_step_delay_us, qpnp_wled_step_delay_us, int, 0600
+);
+
+static int qpnp_wled_step_size_threshold = 3;
+module_param_named(
+	step_size_threshold, qpnp_wled_step_size_threshold, int, 0600
+);
+
+static int qpnp_wled_step_delay_gain = 2;
+module_param_named(
+	step_delay_gain, qpnp_wled_step_delay_gain, int, 0600
+);
+
+/* helper to read a pmic register */
+static int qpnp_wled_read_reg(struct qpnp_wled *wled, u16 addr, u8 *data)
+{
+	int rc;
+	uint val;
+
+	rc = regmap_read(wled->regmap, addr, &val);
+	if (rc < 0) {
+		dev_err(&wled->pdev->dev,
+			"Error reading address: %x(%d)\n", addr, rc);
+		return rc;
+	}
+
+	*data = (u8)val;
+	return 0;
+}
+
+/* helper to write a pmic register */
+static int qpnp_wled_write_reg(struct qpnp_wled *wled, u16 addr, u8 data)
+{
+	int rc;
+
+	mutex_lock(&wled->bus_lock);
+	rc = regmap_write(wled->regmap, addr, data);
+	if (rc < 0) {
+		dev_err(&wled->pdev->dev, "Error writing address: %x(%d)\n",
+			addr, rc);
+		goto out;
+	}
+
+	dev_dbg(&wled->pdev->dev, "wrote: WLED_0x%x = 0x%x\n", addr, data);
+out:
+	mutex_unlock(&wled->bus_lock);
+	return rc;
+}
+
+static int qpnp_wled_masked_write_reg(struct qpnp_wled *wled, u16 addr,
+					u8 mask, u8 data)
+{
+	int rc;
+
+	mutex_lock(&wled->bus_lock);
+	rc = regmap_update_bits(wled->regmap, addr, mask, data);
+	if (rc < 0) {
+		dev_err(&wled->pdev->dev, "Error writing address: %x(%d)\n",
+			addr, rc);
+		goto out;
+	}
+
+	dev_dbg(&wled->pdev->dev, "wrote: WLED_0x%x = 0x%x\n", addr, data);
+out:
+	mutex_unlock(&wled->bus_lock);
+	return rc;
+}
+
+static int qpnp_wled_sec_write_reg(struct qpnp_wled *wled, u16 addr, u8 data)
+{
+	int rc;
+	u8 reg = QPNP_WLED_SEC_UNLOCK;
+	u16 base_addr = addr & 0xFF00;
+
+	mutex_lock(&wled->bus_lock);
+	rc = regmap_write(wled->regmap, QPNP_WLED_SEC_ACCESS_REG(base_addr),
+			reg);
+	if (rc < 0) {
+		dev_err(&wled->pdev->dev, "Error writing address: %x(%d)\n",
+			QPNP_WLED_SEC_ACCESS_REG(base_addr), rc);
+		goto out;
+	}
+
+	rc = regmap_write(wled->regmap, addr, data);
+	if (rc < 0) {
+		dev_err(&wled->pdev->dev, "Error writing address: %x(%d)\n",
+			addr, rc);
+		goto out;
+	}
+
+	dev_dbg(&wled->pdev->dev, "wrote: WLED_0x%x = 0x%x\n", addr, data);
+out:
+	mutex_unlock(&wled->bus_lock);
+	return rc;
+}
+
+static int qpnp_wled_swire_avdd_config(struct qpnp_wled *wled)
+{
+	int rc;
+	u8 val;
+
+	if (wled->pmic_rev_id->pmic_subtype != PMI8998_SUBTYPE &&
+		wled->pmic_rev_id->pmic_subtype != PM660L_SUBTYPE)
+		return 0;
+
+	if (!wled->disp_type_amoled || wled->avdd_mode_spmi)
+		return 0;
+
+	val = QPNP_WLED_AVDD_MV_TO_REG(wled->avdd_target_voltage_mv);
+	rc = qpnp_wled_write_reg(wled,
+			QPNP_WLED_SWIRE_AVDD_REG(wled->ctrl_base), val);
+	return rc;
+}
+
+static int qpnp_wled_sync_reg_toggle(struct qpnp_wled *wled)
+{
+	int rc;
+	u8 reg;
+
+	/* sync */
+	reg = QPNP_WLED_SYNC;
+	rc = qpnp_wled_write_reg(wled, QPNP_WLED_SYNC_REG(wled->sink_base),
+			reg);
+	if (rc < 0)
+		return rc;
+
+	if (wled->cons_sync_write_delay_us)
+		usleep_range(wled->cons_sync_write_delay_us,
+				wled->cons_sync_write_delay_us + 1);
+
+	reg = QPNP_WLED_SYNC_RESET;
+	rc = qpnp_wled_write_reg(wled, QPNP_WLED_SYNC_REG(wled->sink_base),
+			reg);
+	if (rc < 0)
+		return rc;
+
+	return 0;
+}
+
+/* set wled to a level of brightness */
+static int qpnp_wled_set_level(struct qpnp_wled *wled, int level)
+{
+	int i, rc;
+	u8 reg;
+	u16 low_limit = WLED_MAX_LEVEL_4095 * 4 / 1000;
+
+	/* WLED's lower limit of operation is 0.4% */
+	if (level > 0 && level < low_limit)
+		level = low_limit;
+
+	/* set brightness registers */
+	for (i = 0; i < wled->max_strings; i++) {
+		reg = level & QPNP_WLED_BRIGHT_LSB_MASK;
+		rc = qpnp_wled_write_reg(wled,
+				QPNP_WLED_BRIGHT_LSB_REG(wled->sink_base,
+					wled->strings[i]), reg);
+		if (rc < 0)
+			return rc;
+
+		reg = level >> QPNP_WLED_BRIGHT_MSB_SHIFT;
+		reg = reg & QPNP_WLED_BRIGHT_MSB_MASK;
+		rc = qpnp_wled_write_reg(wled,
+				QPNP_WLED_BRIGHT_MSB_REG(wled->sink_base,
+					wled->strings[i]), reg);
+		if (rc < 0)
+			return rc;
+	}
+
+	rc = qpnp_wled_sync_reg_toggle(wled);
+	if (rc < 0) {
+		dev_err(&wled->pdev->dev, "Failed to toggle sync reg %d\n", rc);
+		return rc;
+	}
+
+	pr_debug("level:%d\n", level);
+	return 0;
+}
+
+static int qpnp_wled_set_map_level(struct qpnp_wled *wled, int level)
+{
+	int rc, i;
+
+	if (level < wled->prev_level) {
+		for (i = wled->prev_level; i >= level; i--) {
+			rc = qpnp_wled_set_level(wled, wled->brt_map_table[i]);
+			if (rc < 0) {
+				pr_err("set brightness level failed, rc:%d\n",
+					rc);
+				return rc;
+			}
+		}
+	} else if (level > wled->prev_level) {
+		for (i = wled->prev_level; i <= level; i++) {
+			rc = qpnp_wled_set_level(wled, wled->brt_map_table[i]);
+			if (rc < 0) {
+				pr_err("set brightness level failed, rc:%d\n",
+					rc);
+				return rc;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int qpnp_wled_set_step_level(struct qpnp_wled *wled, int new_level)
+{
+	int rc, i, num_steps, delay_us;
+	u16 level, start_level, end_level, step_size;
+	bool level_inc = false;
+
+	level = wled->prev_level;
+	start_level = wled->brt_map_table[level];
+	end_level = wled->brt_map_table[new_level];
+	level_inc = (new_level > level);
+
+	num_steps = abs(start_level - end_level);
+	if (!num_steps)
+		return 0;
+
+	delay_us = qpnp_wled_step_delay_us / num_steps;
+	pr_debug("level goes from [%d %d] num_steps: %d, delay: %d\n",
+		start_level, end_level, num_steps, delay_us);
+
+	if (delay_us < 500) {
+		step_size = 1000 / delay_us;
+		num_steps = num_steps / step_size;
+		delay_us = 1000;
+	} else {
+		if (num_steps < qpnp_wled_step_size_threshold)
+			delay_us *= qpnp_wled_step_delay_gain;
+
+		step_size = 1;
+	}
+
+	i = start_level;
+	while (num_steps--) {
+		if (level_inc)
+			i += step_size;
+		else
+			i -= step_size;
+
+		rc = qpnp_wled_set_level(wled, i);
+		if (rc < 0)
+			return rc;
+
+		if (delay_us > 0) {
+			if (delay_us < 20000)
+				usleep_range(delay_us, delay_us + 1);
+			else
+				msleep(delay_us / USEC_PER_MSEC);
+		}
+	}
+
+	if (i != end_level) {
+		i = end_level;
+		rc = qpnp_wled_set_level(wled, i);
+		if (rc < 0)
+			return rc;
+	}
+
+	return 0;
+}
+
+static int qpnp_wled_psm_config(struct qpnp_wled *wled, bool enable)
+{
+	int rc;
+
+	if (!wled->lcd_psm_ctrl)
+		return 0;
+
+	rc = qpnp_wled_masked_write_reg(wled,
+			QPNP_WLED_EN_PSM_REG(wled->ctrl_base),
+			QPNP_WLED_EN_PSM_BIT,
+			enable ? QPNP_WLED_EN_PSM_BIT : 0);
+	if (rc < 0)
+		return rc;
+
+	rc = qpnp_wled_masked_write_reg(wled,
+			QPNP_WLED_PSM_CTRL_REG(wled->ctrl_base),
+			QPNP_WLED_PSM_OVERWRITE_BIT,
+			enable ? QPNP_WLED_PSM_OVERWRITE_BIT : 0);
+	if (rc < 0)
+		return rc;
+
+	return 0;
+}
+
+static int qpnp_wled_module_en(struct qpnp_wled *wled,
+				u16 base_addr, bool state)
+{
+	int rc;
+
+	if (wled->module_dis_perm)
+		return 0;
+
+	rc = qpnp_wled_masked_write_reg(wled,
+			QPNP_WLED_MODULE_EN_REG(base_addr),
+			QPNP_WLED_MODULE_EN_MASK,
+			state << QPNP_WLED_MODULE_EN_SHIFT);
+	if (rc < 0)
+		return rc;
+
+	/*
+	 * Wait for at least 10ms before enabling OVP fault interrupt after
+	 * enabling the module so that soft start is completed. Also, this
+	 * delay can be used to control PSM during enable when required. Keep
+	 * OVP interrupt disabled when the module is disabled.
+	 */
+	if (state) {
+		usleep_range(QPNP_WLED_SOFT_START_DLY_US,
+				QPNP_WLED_SOFT_START_DLY_US + 1000);
+		rc = qpnp_wled_psm_config(wled, false);
+		if (rc < 0)
+			return rc;
+
+		if (wled->ovp_irq > 0 && wled->ovp_irq_disabled) {
+			enable_irq(wled->ovp_irq);
+			wled->ovp_irq_disabled = false;
+		}
+	} else {
+		if (wled->ovp_irq > 0 && !wled->ovp_irq_disabled) {
+			disable_irq(wled->ovp_irq);
+			wled->ovp_irq_disabled = true;
+		}
+
+		rc = qpnp_wled_psm_config(wled, true);
+		if (rc < 0)
+			return rc;
+	}
+
+	return 0;
+}
+
+/* sysfs store function for ramp */
+static ssize_t qpnp_wled_ramp_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qpnp_wled *wled = dev_get_drvdata(dev);
+	int i, rc;
+
+	mutex_lock(&wled->lock);
+
+	if (!wled->cdev.brightness) {
+		rc = qpnp_wled_module_en(wled, wled->ctrl_base, true);
+		if (rc) {
+			dev_err(&wled->pdev->dev, "wled enable failed\n");
+			goto unlock_mutex;
+		}
+	}
+
+	/* ramp up */
+	for (i = 0; i <= wled->cdev.max_brightness;) {
+		rc = qpnp_wled_set_level(wled, i);
+		if (rc) {
+			dev_err(&wled->pdev->dev, "wled set level failed\n");
+			goto restore_brightness;
+		}
+
+		if (wled->ramp_ms < QPNP_WLED_MIN_MSLEEP)
+			usleep_range(wled->ramp_ms * USEC_PER_MSEC,
+					wled->ramp_ms * USEC_PER_MSEC);
+		else
+			msleep(wled->ramp_ms);
+
+		if (i == wled->cdev.max_brightness)
+			break;
+
+		i += wled->ramp_step;
+		if (i > wled->cdev.max_brightness)
+			i = wled->cdev.max_brightness;
+	}
+
+	/* ramp down */
+	for (i = wled->cdev.max_brightness; i >= 0;) {
+		rc = qpnp_wled_set_level(wled, i);
+		if (rc) {
+			dev_err(&wled->pdev->dev, "wled set level failed\n");
+			goto restore_brightness;
+		}
+
+		if (wled->ramp_ms < QPNP_WLED_MIN_MSLEEP)
+			usleep_range(wled->ramp_ms * USEC_PER_MSEC,
+					wled->ramp_ms * USEC_PER_MSEC);
+		else
+			msleep(wled->ramp_ms);
+
+		if (i == 0)
+			break;
+
+		i -= wled->ramp_step;
+		if (i < 0)
+			i = 0;
+	}
+
+	dev_info(&wled->pdev->dev, "wled ramp complete\n");
+
+restore_brightness:
+	/* restore the old brightness */
+	qpnp_wled_set_level(wled, wled->cdev.brightness);
+	if (!wled->cdev.brightness) {
+		rc = qpnp_wled_module_en(wled, wled->ctrl_base, false);
+		if (rc)
+			dev_err(&wled->pdev->dev, "wled enable failed\n");
+	}
+unlock_mutex:
+	mutex_unlock(&wled->lock);
+
+	return count;
+}
+
+static int qpnp_wled_dump_regs(struct qpnp_wled *wled, u16 base_addr,
+				u8 dbg_regs[], u8 size, char *label,
+				int count, char *buf)
+{
+	int i, rc;
+	u8 reg;
+
+	for (i = 0; i < size; i++) {
+		rc = qpnp_wled_read_reg(wled, base_addr + dbg_regs[i], &reg);
+		if (rc < 0)
+			return rc;
+
+		count += snprintf(buf + count, PAGE_SIZE - count,
+				"%s: REG_0x%x = 0x%x\n", label,
+				base_addr + dbg_regs[i], reg);
+
+		if (count >= PAGE_SIZE)
+			return PAGE_SIZE - 1;
+	}
+
+	return count;
+}
+
+/* sysfs show function for debug registers */
+static ssize_t qpnp_wled_dump_regs_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct qpnp_wled *wled = dev_get_drvdata(dev);
+	int count = 0;
+
+	count = qpnp_wled_dump_regs(wled, wled->ctrl_base,
+			qpnp_wled_ctrl_dbg_regs,
+			ARRAY_SIZE(qpnp_wled_ctrl_dbg_regs),
+			"wled_ctrl", count, buf);
+
+	if (count < 0 || count == PAGE_SIZE - 1)
+		return count;
+
+	count = qpnp_wled_dump_regs(wled, wled->sink_base,
+			qpnp_wled_sink_dbg_regs,
+			ARRAY_SIZE(qpnp_wled_sink_dbg_regs),
+			"wled_sink", count, buf);
+
+	if (count < 0 || count == PAGE_SIZE - 1)
+		return count;
+
+	return count;
+}
+
+/* sysfs show function for ramp delay in each step */
+static ssize_t qpnp_wled_ramp_ms_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct qpnp_wled *wled = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", wled->ramp_ms);
+}
+
+/* sysfs store function for ramp delay in each step */
+static ssize_t qpnp_wled_ramp_ms_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qpnp_wled *wled = dev_get_drvdata(dev);
+	int data, rc;
+
+	rc = kstrtoint(buf, 10, &data);
+	if (rc)
+		return rc;
+
+	wled->ramp_ms = data;
+	return count;
+}
+
+/* sysfs show function for ramp step */
+static ssize_t qpnp_wled_ramp_step_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct qpnp_wled *wled = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", wled->ramp_step);
+}
+
+/* sysfs store function for ramp step */
+static ssize_t qpnp_wled_ramp_step_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qpnp_wled *wled = dev_get_drvdata(dev);
+	int data, rc;
+
+	rc = kstrtoint(buf, 10, &data);
+	if (rc)
+		return rc;
+
+	wled->ramp_step = data;
+	return count;
+}
+
+/* sysfs show function for dim mode */
+static ssize_t qpnp_wled_dim_mode_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct qpnp_wled *wled = dev_get_drvdata(dev);
+	char *str;
+
+	if (wled->dim_mode == QPNP_WLED_DIM_ANALOG)
+		str = "analog";
+	else if (wled->dim_mode == QPNP_WLED_DIM_DIGITAL)
+		str = "digital";
+	else
+		str = "hybrid";
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", str);
+}
+
+/* sysfs store function for dim mode*/
+static ssize_t qpnp_wled_dim_mode_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qpnp_wled *wled = dev_get_drvdata(dev);
+	char str[QPNP_WLED_STR_SIZE + 1];
+	int rc, temp;
+	u8 reg;
+
+	if (snprintf(str, QPNP_WLED_STR_SIZE, "%s", buf) > QPNP_WLED_STR_SIZE)
+		return -EINVAL;
+
+	if (strcmp(str, "analog") == 0)
+		temp = QPNP_WLED_DIM_ANALOG;
+	else if (strcmp(str, "digital") == 0)
+		temp = QPNP_WLED_DIM_DIGITAL;
+	else
+		temp = QPNP_WLED_DIM_HYBRID;
+
+	if (temp == wled->dim_mode)
+		return count;
+
+	rc = qpnp_wled_read_reg(wled, QPNP_WLED_MOD_REG(wled->sink_base), &reg);
+	if (rc < 0)
+		return rc;
+
+	if (temp == QPNP_WLED_DIM_HYBRID) {
+		reg &= QPNP_WLED_DIM_HYB_MASK;
+		reg |= (1 << QPNP_WLED_DIM_HYB_SHIFT);
+	} else {
+		reg &= QPNP_WLED_DIM_HYB_MASK;
+		reg |= (0 << QPNP_WLED_DIM_HYB_SHIFT);
+		reg &= QPNP_WLED_DIM_ANA_MASK;
+		reg |= temp;
+	}
+
+	rc = qpnp_wled_write_reg(wled, QPNP_WLED_MOD_REG(wled->sink_base), reg);
+	if (rc)
+		return rc;
+
+	wled->dim_mode = temp;
+
+	return count;
+}
+
+/* sysfs show function for full scale current in ua*/
+static ssize_t qpnp_wled_fs_curr_ua_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct qpnp_wled *wled = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", wled->fs_curr_ua);
+}
+
+/* sysfs store function for full scale current in ua*/
+static ssize_t qpnp_wled_fs_curr_ua_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qpnp_wled *wled = dev_get_drvdata(dev);
+	int data, i, rc;
+	u8 reg;
+
+	rc = kstrtoint(buf, 10, &data);
+	if (rc)
+		return rc;
+
+	for (i = 0; i < wled->max_strings; i++) {
+		if (data < QPNP_WLED_FS_CURR_MIN_UA)
+			data = QPNP_WLED_FS_CURR_MIN_UA;
+		else if (data > QPNP_WLED_FS_CURR_MAX_UA)
+			data = QPNP_WLED_FS_CURR_MAX_UA;
+
+		reg = data / QPNP_WLED_FS_CURR_STEP_UA;
+		rc = qpnp_wled_masked_write_reg(wled,
+			QPNP_WLED_FS_CURR_REG(wled->sink_base, i),
+			QPNP_WLED_FS_CURR_MASK, reg);
+		if (rc < 0)
+			return rc;
+	}
+
+	wled->fs_curr_ua = data;
+
+	rc = qpnp_wled_sync_reg_toggle(wled);
+	if (rc < 0) {
+		dev_err(&wled->pdev->dev, "Failed to toggle sync reg %d\n", rc);
+		return rc;
+	}
+
+	return count;
+}
+
+/* sysfs attributes exported by wled */
+static struct device_attribute qpnp_wled_attrs[] = {
+	__ATTR(dump_regs, 0664, qpnp_wled_dump_regs_show, NULL),
+	__ATTR(dim_mode, 0664, qpnp_wled_dim_mode_show,
+		qpnp_wled_dim_mode_store),
+	__ATTR(fs_curr_ua, 0664, qpnp_wled_fs_curr_ua_show,
+		qpnp_wled_fs_curr_ua_store),
+	__ATTR(start_ramp, 0664, NULL, qpnp_wled_ramp_store),
+	__ATTR(ramp_ms, 0664, qpnp_wled_ramp_ms_show, qpnp_wled_ramp_ms_store),
+	__ATTR(ramp_step, 0664, qpnp_wled_ramp_step_show,
+		qpnp_wled_ramp_step_store),
+};
+
+/* worker for setting wled brightness */
+static void qpnp_wled_work(struct work_struct *work)
+{
+	struct qpnp_wled *wled;
+	int level, level_255, rc;
+
+	wled = container_of(work, struct qpnp_wled, work);
+
+	mutex_lock(&wled->lock);
+	level = wled->cdev.brightness;
+
+	if (wled->brt_map_table) {
+		/*
+		 * Change the 12 bit level to 8 bit level and use the mapped
+		 * values for 12 bit level from brightness map table.
+		 */
+		level_255 = DIV_ROUND_CLOSEST(level, 16);
+		if (level_255 > 255)
+			level_255 = 255;
+
+		pr_debug("level: %d level_255: %d\n", level, level_255);
+		if (wled->stepper_en)
+			rc = qpnp_wled_set_step_level(wled, level_255);
+		else
+			rc = qpnp_wled_set_map_level(wled, level_255);
+		if (rc) {
+			dev_err(&wled->pdev->dev, "wled set level failed\n");
+			goto unlock_mutex;
+		}
+		wled->prev_level = level_255;
+	} else if (level) {
+		rc = qpnp_wled_set_level(wled, level);
+		if (rc) {
+			dev_err(&wled->pdev->dev, "wled set level failed\n");
+			goto unlock_mutex;
+		}
+	}
+
+	if (!!level != wled->prev_state) {
+		if (!!level) {
+			/*
+			 * For AMOLED display in pmi8998, SWIRE_AVDD_DEFAULT has
+			 * to be reconfigured every time the module is enabled.
+			 */
+			rc = qpnp_wled_swire_avdd_config(wled);
+			if (rc < 0) {
+				pr_err("Write to SWIRE_AVDD_DEFAULT register failed rc:%d\n",
+					rc);
+				goto unlock_mutex;
+			}
+		}
+
+		rc = qpnp_wled_module_en(wled, wled->ctrl_base, !!level);
+		if (rc) {
+			dev_err(&wled->pdev->dev, "wled %sable failed\n",
+						level ? "en" : "dis");
+			goto unlock_mutex;
+		}
+	}
+
+	wled->prev_state = !!level;
+unlock_mutex:
+	mutex_unlock(&wled->lock);
+}
+
+/* get api registered with led classdev for wled brightness */
+static enum led_brightness qpnp_wled_get(struct led_classdev *led_cdev)
+{
+	struct qpnp_wled *wled;
+
+	wled = container_of(led_cdev, struct qpnp_wled, cdev);
+
+	return wled->cdev.brightness;
+}
+
+/* set api registered with led classdev for wled brightness */
+static void qpnp_wled_set(struct led_classdev *led_cdev,
+				enum led_brightness level)
+{
+	struct qpnp_wled *wled;
+
+	wled = container_of(led_cdev, struct qpnp_wled, cdev);
+
+	if (level < LED_OFF)
+		level = LED_OFF;
+	else if (level > wled->cdev.max_brightness)
+		level = wled->cdev.max_brightness;
+
+	wled->cdev.brightness = level;
+	queue_work(wled->wq, &wled->work);
+}
+
+static int qpnp_wled_set_disp(struct qpnp_wled *wled, u16 base_addr)
+{
+	int rc;
+	u8 reg;
+
+	/* display type */
+	rc = qpnp_wled_read_reg(wled, QPNP_WLED_DISP_SEL_REG(base_addr), &reg);
+	if (rc < 0)
+		return rc;
+
+	reg &= QPNP_WLED_DISP_SEL_MASK;
+	reg |= (wled->disp_type_amoled << QPNP_WLED_DISP_SEL_SHIFT);
+
+	rc = qpnp_wled_sec_write_reg(wled, QPNP_WLED_DISP_SEL_REG(base_addr),
+			reg);
+	if (rc)
+		return rc;
+
+	if (wled->disp_type_amoled) {
+		/* Configure the PSM CTRL register for AMOLED */
+		if (wled->vref_psm_mv < QPNP_WLED_VREF_PSM_MIN_MV)
+			wled->vref_psm_mv = QPNP_WLED_VREF_PSM_MIN_MV;
+		else if (wled->vref_psm_mv > QPNP_WLED_VREF_PSM_MAX_MV)
+			wled->vref_psm_mv = QPNP_WLED_VREF_PSM_MAX_MV;
+
+		rc = qpnp_wled_read_reg(wled,
+				QPNP_WLED_PSM_CTRL_REG(wled->ctrl_base), &reg);
+		if (rc < 0)
+			return rc;
+
+		reg &= QPNP_WLED_VREF_PSM_MASK;
+		reg |= ((wled->vref_psm_mv - QPNP_WLED_VREF_PSM_MIN_MV)/
+			QPNP_WLED_VREF_PSM_STEP_MV);
+		reg |= QPNP_WLED_PSM_OVERWRITE_BIT;
+		rc = qpnp_wled_write_reg(wled,
+				QPNP_WLED_PSM_CTRL_REG(wled->ctrl_base), reg);
+		if (rc)
+			return rc;
+
+		/* Configure the VLOOP COMP RES register for AMOLED */
+		if (wled->loop_comp_res_kohm < QPNP_WLED_LOOP_COMP_RES_MIN_KOHM)
+			wled->loop_comp_res_kohm =
+					QPNP_WLED_LOOP_COMP_RES_MIN_KOHM;
+		else if (wled->loop_comp_res_kohm >
+					QPNP_WLED_LOOP_COMP_RES_MAX_KOHM)
+			wled->loop_comp_res_kohm =
+					QPNP_WLED_LOOP_COMP_RES_MAX_KOHM;
+
+		rc = qpnp_wled_read_reg(wled,
+				QPNP_WLED_VLOOP_COMP_RES_REG(wled->ctrl_base),
+				&reg);
+		if (rc < 0)
+			return rc;
+
+		reg &= QPNP_WLED_VLOOP_COMP_RES_MASK;
+		reg |= ((wled->loop_comp_res_kohm -
+				 QPNP_WLED_LOOP_COMP_RES_MIN_KOHM)/
+				 QPNP_WLED_LOOP_COMP_RES_STEP_KOHM);
+		reg |= QPNP_WLED_VLOOP_COMP_RES_OVERWRITE;
+		rc = qpnp_wled_write_reg(wled,
+				QPNP_WLED_VLOOP_COMP_RES_REG(wled->ctrl_base),
+				reg);
+		if (rc)
+			return rc;
+
+		/* Configure the CTRL TEST4 register for AMOLED */
+		rc = qpnp_wled_read_reg(wled,
+				QPNP_WLED_TEST4_REG(wled->ctrl_base), &reg);
+		if (rc < 0)
+			return rc;
+
+		reg |= QPNP_WLED_TEST4_EN_IIND_UP;
+		rc = qpnp_wled_sec_write_reg(wled,
+				QPNP_WLED_TEST4_REG(base_addr), reg);
+		if (rc)
+			return rc;
+	} else {
+		/*
+		 * enable VREF_UP to avoid false ovp on low brightness for LCD
+		 */
+		reg = QPNP_WLED_TEST4_EN_VREF_UP
+				| QPNP_WLED_TEST4_EN_DEB_BYPASS_ILIM_BIT;
+		rc = qpnp_wled_sec_write_reg(wled,
+				QPNP_WLED_TEST4_REG(base_addr), reg);
+		if (rc)
+			return rc;
+	}
+
+	return 0;
+}
+
+#define AUTO_CALIB_BRIGHTNESS		200
+static int wled_auto_calibrate(struct qpnp_wled *wled)
+{
+	int rc = 0, i;
+	u8 reg = 0, sink_config = 0, sink_test = 0, sink_valid = 0, int_sts;
+
+	/* read configured sink configuration */
+	rc = qpnp_wled_read_reg(wled,
+		QPNP_WLED_CURR_SINK_REG(wled->sink_base), &sink_config);
+	if (rc < 0) {
+		pr_err("Failed to read SINK configuration rc=%d\n", rc);
+		goto failed_calib;
+	}
+
+	/* disable the module before starting calibration */
+	rc = qpnp_wled_masked_write_reg(wled,
+			QPNP_WLED_MODULE_EN_REG(wled->ctrl_base),
+			QPNP_WLED_MODULE_EN_MASK, 0);
+	if (rc < 0) {
+		pr_err("Failed to disable WLED module rc=%d\n", rc);
+		goto failed_calib;
+	}
+
+	/* set low brightness across all sinks */
+	rc = qpnp_wled_set_level(wled, AUTO_CALIB_BRIGHTNESS);
+	if (rc < 0) {
+		pr_err("Failed to set brightness for calibration rc=%d\n", rc);
+		goto failed_calib;
+	}
+
+	if (wled->en_cabc) {
+		for (i = 0; i < wled->max_strings; i++) {
+			reg = 0;
+			rc = qpnp_wled_masked_write_reg(wled,
+				QPNP_WLED_CABC_REG(wled->sink_base, i),
+				QPNP_WLED_CABC_MASK, reg);
+			if (rc < 0)
+				goto failed_calib;
+		}
+	}
+
+	/* disable all sinks */
+	rc = qpnp_wled_write_reg(wled,
+		 QPNP_WLED_CURR_SINK_REG(wled->sink_base), 0);
+	if (rc < 0) {
+		pr_err("Failed to disable all sinks rc=%d\n", rc);
+		goto failed_calib;
+	}
+
+	/* iterate through the strings one by one */
+	for (i = 0; i < wled->max_strings; i++) {
+		sink_test = 1 << (QPNP_WLED_CURR_SINK_SHIFT + i);
+
+		/* Enable feedback control */
+		rc = qpnp_wled_write_reg(wled,
+			QPNP_WLED_FDBK_OP_REG(wled->ctrl_base),
+			i + 1);
+		if (rc < 0) {
+			pr_err("Failed to enable feedback for SINK %d rc = %d\n",
+						i + 1, rc);
+			goto failed_calib;
+		}
+
+		/* enable the sink */
+		rc = qpnp_wled_write_reg(wled,
+			QPNP_WLED_CURR_SINK_REG(wled->sink_base), sink_test);
+		if (rc < 0) {
+			pr_err("Failed to configure SINK %d rc=%d\n",
+						i + 1, rc);
+			goto failed_calib;
+		}
+
+		/* Enable the module */
+		rc = qpnp_wled_masked_write_reg(wled,
+			QPNP_WLED_MODULE_EN_REG(wled->ctrl_base),
+			QPNP_WLED_MODULE_EN_MASK, QPNP_WLED_MODULE_EN_MASK);
+		if (rc < 0) {
+			pr_err("Failed to enable WLED module rc=%d\n", rc);
+			goto failed_calib;
+		}
+
+		/* delay for WLED soft-start */
+		usleep_range(QPNP_WLED_SOFT_START_DLY_US,
+				QPNP_WLED_SOFT_START_DLY_US + 1000);
+
+		rc = qpnp_wled_read_reg(wled,
+			QPNP_WLED_INT_RT_STS(wled->ctrl_base), &int_sts);
+		if (rc < 0) {
+			pr_err("Error in reading WLED_INT_RT_STS rc=%d\n", rc);
+			goto failed_calib;
+		}
+
+		if (int_sts & QPNP_WLED_OVP_FAULT_BIT)
+			pr_debug("WLED OVP fault detected with SINK %d\n",
+						i + 1);
+		else
+			sink_valid |= sink_test;
+
+		/* Disable the module */
+		rc = qpnp_wled_masked_write_reg(wled,
+			QPNP_WLED_MODULE_EN_REG(wled->ctrl_base),
+			QPNP_WLED_MODULE_EN_MASK, 0);
+		if (rc < 0) {
+			pr_err("Failed to disable WLED module rc=%d\n", rc);
+			goto failed_calib;
+		}
+	}
+
+	if (sink_valid == sink_config) {
+		pr_debug("WLED auto-calibration complete, default sink-config=%x OK!\n",
+						sink_config);
+	} else {
+		pr_warn("Invalid WLED default sink config=%x changing it to=%x\n",
+						sink_config, sink_valid);
+		sink_config = sink_valid;
+	}
+
+	if (!sink_config) {
+		pr_warn("No valid WLED sinks found\n");
+		wled->module_dis_perm = true;
+		goto failed_calib;
+	}
+
+	/* write the new sink configuration */
+	rc = qpnp_wled_write_reg(wled,
+			QPNP_WLED_CURR_SINK_REG(wled->sink_base), sink_config);
+	if (rc < 0) {
+		pr_err("Failed to reconfigure the default sink rc=%d\n", rc);
+		goto failed_calib;
+	}
+
+	/* MODULATOR_EN setting for valid sinks */
+	for (i = 0; i < wled->max_strings; i++) {
+		if (wled->en_cabc) {
+			reg = 1 << QPNP_WLED_CABC_SHIFT;
+			rc = qpnp_wled_masked_write_reg(wled,
+				QPNP_WLED_CABC_REG(wled->sink_base, i),
+				QPNP_WLED_CABC_MASK, reg);
+			if (rc < 0)
+				goto failed_calib;
+		}
+
+		if (sink_config & (1 << (QPNP_WLED_CURR_SINK_SHIFT + i)))
+			reg = (QPNP_WLED_MOD_EN << QPNP_WLED_MOD_EN_SHFT);
+		else
+			reg = 0x0; /* disable modulator_en for unused sink */
+
+		if (wled->dim_mode == QPNP_WLED_DIM_HYBRID)
+			reg &= QPNP_WLED_GATE_DRV_MASK;
+		else
+			reg |= ~QPNP_WLED_GATE_DRV_MASK;
+
+		rc = qpnp_wled_write_reg(wled,
+			QPNP_WLED_MOD_EN_REG(wled->sink_base, i), reg);
+		if (rc < 0) {
+			pr_err("Failed to configure MODULATOR_EN rc=%d\n", rc);
+			goto failed_calib;
+		}
+	}
+
+	/* restore the feedback setting */
+	rc = qpnp_wled_write_reg(wled,
+			QPNP_WLED_FDBK_OP_REG(wled->ctrl_base),
+			wled->fdbk_op);
+	if (rc < 0) {
+		pr_err("Failed to restore feedback setting rc=%d\n", rc);
+		goto failed_calib;
+	}
+
+	/* restore  brightness */
+	rc = qpnp_wled_set_level(wled, !wled->cdev.brightness ?
+			AUTO_CALIB_BRIGHTNESS : wled->cdev.brightness);
+	if (rc < 0) {
+		pr_err("Failed to set brightness after calibration rc=%d\n",
+						rc);
+		goto failed_calib;
+	}
+
+	rc = qpnp_wled_masked_write_reg(wled,
+			QPNP_WLED_MODULE_EN_REG(wled->ctrl_base),
+			QPNP_WLED_MODULE_EN_MASK,
+			QPNP_WLED_MODULE_EN_MASK);
+	if (rc < 0) {
+		pr_err("Failed to enable WLED module rc=%d\n", rc);
+		goto failed_calib;
+	}
+
+	/* delay for WLED soft-start */
+	usleep_range(QPNP_WLED_SOFT_START_DLY_US,
+			QPNP_WLED_SOFT_START_DLY_US + 1000);
+
+failed_calib:
+	return rc;
+}
+
+#define WLED_AUTO_CAL_OVP_COUNT		5
+#define WLED_AUTO_CAL_CNT_DLY_US	1000000	/* 1 second */
+static bool qpnp_wled_auto_cal_required(struct qpnp_wled *wled)
+{
+	s64 elapsed_time_us;
+
+	/*
+	 * Check if the OVP fault was an occasional one
+	 * or if its firing continuously, the latter qualifies
+	 * for an auto-calibration check.
+	 */
+	if (!wled->auto_calibration_ovp_count) {
+		wled->start_ovp_fault_time = ktime_get();
+		wled->auto_calibration_ovp_count++;
+	} else {
+		elapsed_time_us = ktime_us_delta(ktime_get(),
+				wled->start_ovp_fault_time);
+		if (elapsed_time_us > WLED_AUTO_CAL_CNT_DLY_US)
+			wled->auto_calibration_ovp_count = 0;
+		else
+			wled->auto_calibration_ovp_count++;
+
+		if (wled->auto_calibration_ovp_count >=
+				WLED_AUTO_CAL_OVP_COUNT) {
+			wled->auto_calibration_ovp_count = 0;
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static int qpnp_wled_auto_calibrate_at_init(struct qpnp_wled *wled)
+{
+	int rc;
+	u8 fault_status = 0, rt_status = 0;
+
+	if (!wled->auto_calib_enabled)
+		return 0;
+
+	rc = qpnp_wled_read_reg(wled,
+			QPNP_WLED_INT_RT_STS(wled->ctrl_base), &rt_status);
+	if (rc < 0)
+		pr_err("Failed to read RT status rc=%d\n", rc);
+
+	rc = qpnp_wled_read_reg(wled,
+			QPNP_WLED_FAULT_STATUS(wled->ctrl_base), &fault_status);
+	if (rc < 0)
+		pr_err("Failed to read fault status rc=%d\n", rc);
+
+	if ((rt_status & QPNP_WLED_OVP_FLT_RT_STS_BIT) ||
+			(fault_status & QPNP_WLED_OVP_FAULT_BIT)) {
+		mutex_lock(&wled->lock);
+		rc = wled_auto_calibrate(wled);
+		if (rc < 0)
+			pr_err("Failed auto-calibration rc=%d\n", rc);
+		else
+			wled->auto_calib_done = true;
+		mutex_unlock(&wled->lock);
+	}
+
+	return rc;
+}
+
+/* ovp irq handler */
+static irqreturn_t qpnp_wled_ovp_irq_handler(int irq, void *_wled)
+{
+	struct qpnp_wled *wled = _wled;
+	int rc;
+	u8 fault_sts, int_sts;
+
+	rc = qpnp_wled_read_reg(wled,
+			QPNP_WLED_INT_RT_STS(wled->ctrl_base), &int_sts);
+	if (rc < 0) {
+		pr_err("Error in reading WLED_INT_RT_STS rc=%d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	rc = qpnp_wled_read_reg(wled,
+			QPNP_WLED_FAULT_STATUS(wled->ctrl_base), &fault_sts);
+	if (rc < 0) {
+		pr_err("Error in reading WLED_FAULT_STATUS rc=%d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	if (fault_sts & (QPNP_WLED_OVP_FAULT_BIT | QPNP_WLED_ILIM_FAULT_BIT))
+		pr_err("WLED OVP fault detected, int_sts=%x fault_sts= %x\n",
+			int_sts, fault_sts);
+
+	if (fault_sts & QPNP_WLED_OVP_FAULT_BIT) {
+		if (wled->auto_calib_enabled && !wled->auto_calib_done) {
+			if (qpnp_wled_auto_cal_required(wled)) {
+				mutex_lock(&wled->lock);
+				if (wled->ovp_irq > 0 &&
+						!wled->ovp_irq_disabled) {
+					disable_irq_nosync(wled->ovp_irq);
+					wled->ovp_irq_disabled = true;
+				}
+
+				rc = wled_auto_calibrate(wled);
+				if (rc < 0)
+					pr_err("Failed auto-calibration rc=%d\n",
+								rc);
+				else
+					wled->auto_calib_done = true;
+
+				if (wled->ovp_irq > 0 &&
+						wled->ovp_irq_disabled) {
+					enable_irq(wled->ovp_irq);
+					wled->ovp_irq_disabled = false;
+				}
+				mutex_unlock(&wled->lock);
+			}
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+/* short circuit irq handler */
+static irqreturn_t qpnp_wled_sc_irq_handler(int irq, void *_wled)
+{
+	struct qpnp_wled *wled = _wled;
+	int rc;
+	u8 val;
+
+	rc = qpnp_wled_read_reg(wled,
+			QPNP_WLED_FAULT_STATUS(wled->ctrl_base), &val);
+	if (rc < 0) {
+		pr_err("Error in reading WLED_FAULT_STATUS rc=%d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	pr_err("WLED short circuit detected %d times fault_status=%x\n",
+		++wled->sc_cnt, val);
+	mutex_lock(&wled->lock);
+	qpnp_wled_module_en(wled, wled->ctrl_base, false);
+	msleep(QPNP_WLED_SC_DLY_MS);
+	qpnp_wled_module_en(wled, wled->ctrl_base, true);
+	mutex_unlock(&wled->lock);
+
+	return IRQ_HANDLED;
+}
+
+static bool is_avdd_trim_adjustment_required(struct qpnp_wled *wled)
+{
+	int rc;
+	u8 reg = 0;
+
+	/*
+	 * AVDD trim adjustment is not required for pmi8998/pm660l and not
+	 * supported for pmi8994.
+	 */
+	if (wled->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE ||
+		wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE ||
+		wled->pmic_rev_id->pmic_subtype == PMI8994_SUBTYPE)
+		return false;
+
+	/*
+	 * Configure TRIM_REG only if disp_type_amoled and it has
+	 * not already been programmed by bootloader.
+	 */
+	if (!wled->disp_type_amoled)
+		return false;
+
+	rc = qpnp_wled_read_reg(wled,
+			QPNP_WLED_CTRL_SPARE_REG(wled->ctrl_base), &reg);
+	if (rc < 0)
+		return false;
+
+	return !(reg & QPNP_WLED_AVDD_SET_BIT);
+}
+
+static int qpnp_wled_gm_config(struct qpnp_wled *wled)
+{
+	int rc;
+	u8 mask = 0, reg = 0;
+
+	/* Configure the LOOP COMP GM register */
+	if ((wled->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE ||
+			wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE)) {
+		if (wled->disp_type_amoled) {
+			reg = 0;
+			mask |= QPNP_WLED_VLOOP_COMP_AUTO_GM_EN |
+				QPNP_WLED_VLOOP_COMP_AUTO_GM_THRESH_MASK;
+		} else {
+			if (wled->loop_auto_gm_en)
+				reg |= QPNP_WLED_VLOOP_COMP_AUTO_GM_EN;
+
+			if (wled->loop_auto_gm_thresh >
+					QPNP_WLED_LOOP_AUTO_GM_THRESH_MAX)
+				wled->loop_auto_gm_thresh =
+					QPNP_WLED_LOOP_AUTO_GM_THRESH_MAX;
+
+			reg |= wled->loop_auto_gm_thresh <<
+				QPNP_WLED_VLOOP_COMP_AUTO_GM_THRESH_SHIFT;
+			mask |= QPNP_WLED_VLOOP_COMP_AUTO_GM_EN |
+				QPNP_WLED_VLOOP_COMP_AUTO_GM_THRESH_MASK;
+		}
+	}
+
+	if (wled->loop_ea_gm < QPNP_WLED_LOOP_EA_GM_MIN)
+		wled->loop_ea_gm = QPNP_WLED_LOOP_EA_GM_MIN;
+	else if (wled->loop_ea_gm > QPNP_WLED_LOOP_EA_GM_MAX)
+		wled->loop_ea_gm = QPNP_WLED_LOOP_EA_GM_MAX;
+
+	reg |= wled->loop_ea_gm | QPNP_WLED_VLOOP_COMP_GM_OVERWRITE;
+	mask |= QPNP_WLED_VLOOP_COMP_GM_MASK |
+		QPNP_WLED_VLOOP_COMP_GM_OVERWRITE;
+
+	rc = qpnp_wled_masked_write_reg(wled,
+			QPNP_WLED_VLOOP_COMP_GM_REG(wled->ctrl_base), mask,
+			reg);
+	if (rc)
+		pr_err("write VLOOP_COMP_GM_REG failed, rc=%d]\n", rc);
+
+	return rc;
+}
+
+static int qpnp_wled_ovp_config(struct qpnp_wled *wled)
+{
+	int rc, i, *ovp_table;
+	u8 reg;
+
+	/*
+	 * Configure the OVP register based on ovp_mv only if display type is
+	 * not AMOLED.
+	 */
+	if (wled->disp_type_amoled)
+		return 0;
+
+	if (wled->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE ||
+		wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE)
+		ovp_table = qpnp_wled_ovp_thresholds_pmi8998;
+	else
+		ovp_table = qpnp_wled_ovp_thresholds_pmi8994;
+
+	for (i = 0; i < NUM_SUPPORTED_OVP_THRESHOLDS; i++) {
+		if (wled->ovp_mv == ovp_table[i])
+			break;
+	}
+
+	if (i == NUM_SUPPORTED_OVP_THRESHOLDS) {
+		dev_err(&wled->pdev->dev,
+			"Invalid ovp threshold specified in device tree\n");
+		return -EINVAL;
+	}
+
+	reg = i & QPNP_WLED_OVP_MASK;
+	rc = qpnp_wled_masked_write_reg(wled,
+			QPNP_WLED_OVP_REG(wled->ctrl_base),
+			QPNP_WLED_OVP_MASK, reg);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+static int qpnp_wled_avdd_trim_config(struct qpnp_wled *wled)
+{
+	int rc, i;
+	u8 reg;
+
+	for (i = 0; i < NUM_SUPPORTED_AVDD_VOLTAGES; i++) {
+		if (wled->avdd_target_voltage_mv ==
+				qpnp_wled_avdd_target_voltages[i])
+			break;
+	}
+
+	if (i == NUM_SUPPORTED_AVDD_VOLTAGES) {
+		dev_err(&wled->pdev->dev,
+			"Invalid avdd target voltage specified in device tree\n");
+		return -EINVAL;
+	}
+
+	/* Update WLED_OVP register based on desired target voltage */
+	reg = qpnp_wled_ovp_reg_settings[i];
+	rc = qpnp_wled_masked_write_reg(wled,
+			QPNP_WLED_OVP_REG(wled->ctrl_base),
+			QPNP_WLED_OVP_MASK, reg);
+	if (rc)
+		return rc;
+
+	/* Update WLED_TRIM register based on desired target voltage */
+	rc = qpnp_wled_read_reg(wled,
+			QPNP_WLED_REF_7P7_TRIM_REG(wled->ctrl_base), &reg);
+	if (rc)
+		return rc;
+
+	reg += qpnp_wled_avdd_trim_adjustments[i];
+	if ((s8)reg < QPNP_WLED_AVDD_MIN_TRIM_VAL ||
+			(s8)reg > QPNP_WLED_AVDD_MAX_TRIM_VAL) {
+		dev_dbg(&wled->pdev->dev,
+			 "adjusted trim %d is not within range, capping it\n",
+			 (s8)reg);
+		if ((s8)reg < QPNP_WLED_AVDD_MIN_TRIM_VAL)
+			reg = QPNP_WLED_AVDD_MIN_TRIM_VAL;
+		else
+			reg = QPNP_WLED_AVDD_MAX_TRIM_VAL;
+	}
+
+	reg &= QPNP_WLED_7P7_TRIM_MASK;
+	rc = qpnp_wled_sec_write_reg(wled,
+			QPNP_WLED_REF_7P7_TRIM_REG(wled->ctrl_base), reg);
+	if (rc < 0)
+		dev_err(&wled->pdev->dev, "Write to 7P7_TRIM register failed, rc=%d\n",
+			rc);
+	return rc;
+}
+
+static int qpnp_wled_avdd_mode_config(struct qpnp_wled *wled)
+{
+	int rc;
+	u8 reg = 0;
+
+	/*
+	 * At present, configuring the mode to SPMI/SWIRE for controlling
+	 * AVDD voltage is available only in pmi8998/pm660l.
+	 */
+	if (wled->pmic_rev_id->pmic_subtype != PMI8998_SUBTYPE &&
+		wled->pmic_rev_id->pmic_subtype != PM660L_SUBTYPE)
+		return 0;
+
+	/* AMOLED_VOUT should be configured for AMOLED */
+	if (!wled->disp_type_amoled)
+		return 0;
+
+	/* Configure avdd register */
+	if (wled->avdd_target_voltage_mv > QPNP_WLED_AVDD_MAX_MV) {
+		dev_dbg(&wled->pdev->dev, "Capping avdd target voltage to %d\n",
+			QPNP_WLED_AVDD_MAX_MV);
+		wled->avdd_target_voltage_mv = QPNP_WLED_AVDD_MAX_MV;
+	} else if (wled->avdd_target_voltage_mv < QPNP_WLED_AVDD_MIN_MV) {
+		dev_info(&wled->pdev->dev, "Capping avdd target voltage to %d\n",
+			QPNP_WLED_AVDD_MIN_MV);
+		wled->avdd_target_voltage_mv = QPNP_WLED_AVDD_MIN_MV;
+	}
+
+	if (wled->avdd_mode_spmi) {
+		reg = QPNP_WLED_AVDD_MV_TO_REG(wled->avdd_target_voltage_mv);
+		reg |= QPNP_WLED_AVDD_SEL_SPMI_BIT;
+		rc = qpnp_wled_write_reg(wled,
+				QPNP_WLED_AMOLED_VOUT_REG(wled->ctrl_base),
+				reg);
+		if (rc < 0)
+			pr_err("Write to AMOLED_VOUT register failed, rc=%d\n",
+				rc);
+	} else {
+		rc = qpnp_wled_swire_avdd_config(wled);
+		if (rc < 0)
+			pr_err("Write to SWIRE_AVDD_DEFAULT register failed rc:%d\n",
+				rc);
+	}
+
+	return rc;
+}
+
+static int qpnp_wled_ilim_config(struct qpnp_wled *wled)
+{
+	int rc, i, *ilim_table;
+	u8 reg;
+
+	if (wled->ilim_ma < PMI8994_WLED_ILIM_MIN_MA)
+		wled->ilim_ma = PMI8994_WLED_ILIM_MIN_MA;
+
+	if (wled->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE ||
+		wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE) {
+		ilim_table = qpnp_wled_ilim_settings_pmi8998;
+		if (wled->ilim_ma > PMI8998_WLED_ILIM_MAX_MA)
+			wled->ilim_ma = PMI8998_WLED_ILIM_MAX_MA;
+	} else {
+		ilim_table = qpnp_wled_ilim_settings_pmi8994;
+		if (wled->ilim_ma > PMI8994_WLED_ILIM_MAX_MA)
+			wled->ilim_ma = PMI8994_WLED_ILIM_MAX_MA;
+	}
+
+	for (i = 0; i < NUM_SUPPORTED_ILIM_THRESHOLDS; i++) {
+		if (wled->ilim_ma == ilim_table[i])
+			break;
+	}
+
+	if (i == NUM_SUPPORTED_ILIM_THRESHOLDS) {
+		dev_err(&wled->pdev->dev,
+			"Invalid ilim threshold specified in device tree\n");
+		return -EINVAL;
+	}
+
+	reg = (i & QPNP_WLED_ILIM_MASK) | QPNP_WLED_ILIM_OVERWRITE;
+	rc = qpnp_wled_masked_write_reg(wled,
+			QPNP_WLED_ILIM_REG(wled->ctrl_base),
+			QPNP_WLED_ILIM_MASK | QPNP_WLED_ILIM_OVERWRITE, reg);
+	if (rc < 0)
+		dev_err(&wled->pdev->dev, "Write to ILIM register failed, rc=%d\n",
+			rc);
+	return rc;
+}
+
+static int qpnp_wled_vref_config(struct qpnp_wled *wled)
+{
+
+	struct wled_vref_setting vref_setting;
+	int rc;
+	u8 reg = 0;
+
+	if (wled->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE ||
+			wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE)
+		vref_setting = vref_setting_pmi8998;
+	else
+		vref_setting = vref_setting_pmi8994;
+
+	if (wled->vref_uv < vref_setting.min_uv)
+		wled->vref_uv = vref_setting.min_uv;
+	else if (wled->vref_uv > vref_setting.max_uv)
+		wled->vref_uv = vref_setting.max_uv;
+
+	reg |= DIV_ROUND_CLOSEST(wled->vref_uv - vref_setting.min_uv,
+					vref_setting.step_uv);
+
+	rc = qpnp_wled_masked_write_reg(wled,
+			QPNP_WLED_VREF_REG(wled->ctrl_base),
+			QPNP_WLED_VREF_MASK, reg);
+	if (rc)
+		pr_err("Write VREF_REG failed, rc=%d\n", rc);
+
+	return rc;
+}
+
+/* Configure WLED registers */
+static int qpnp_wled_config(struct qpnp_wled *wled)
+{
+	int rc, i, temp;
+	u8 reg = 0, sink_en = 0, mask;
+
+	/* Configure display type */
+	rc = qpnp_wled_set_disp(wled, wled->ctrl_base);
+	if (rc < 0)
+		return rc;
+
+	/* Configure the FEEDBACK OUTPUT register */
+	rc = qpnp_wled_read_reg(wled, QPNP_WLED_FDBK_OP_REG(wled->ctrl_base),
+			&reg);
+	if (rc < 0)
+		return rc;
+	reg &= QPNP_WLED_FDBK_OP_MASK;
+	reg |= wled->fdbk_op;
+	rc = qpnp_wled_write_reg(wled, QPNP_WLED_FDBK_OP_REG(wled->ctrl_base),
+			reg);
+	if (rc)
+		return rc;
+
+	/* Configure the VREF register */
+	rc = qpnp_wled_vref_config(wled);
+	if (rc < 0) {
+		pr_err("Error in configuring wled vref, rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Configure VLOOP_COMP_GM register */
+	rc = qpnp_wled_gm_config(wled);
+	if (rc < 0) {
+		pr_err("Error in configureing wled gm, rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Configure the ILIM register */
+	rc = qpnp_wled_ilim_config(wled);
+	if (rc < 0) {
+		pr_err("Error in configuring wled ilim, rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Configure auto PFM mode for LCD mode only */
+	if ((wled->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE ||
+		wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE)
+		&& !wled->disp_type_amoled) {
+		reg = 0;
+		reg |= wled->lcd_auto_pfm_thresh;
+		reg |= wled->lcd_auto_pfm_en <<
+			QPNP_WLED_LCD_AUTO_PFM_EN_SHIFT;
+		rc = qpnp_wled_masked_write_reg(wled,
+				QPNP_WLED_LCD_AUTO_PFM_REG(wled->ctrl_base),
+				QPNP_WLED_LCD_AUTO_PFM_EN_BIT |
+				QPNP_WLED_LCD_AUTO_PFM_THRESH_MASK, reg);
+		if (rc < 0) {
+			pr_err("Write LCD_AUTO_PFM failed, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	/* Configure the Soft start Ramp delay: for AMOLED - 0,for LCD - 2 */
+	reg = (wled->disp_type_amoled) ? 0 : 2;
+	mask = SOFTSTART_RAMP_DELAY_MASK;
+	if ((wled->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE ||
+		wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE)
+		&& wled->disp_type_amoled) {
+		reg |= SOFTSTART_OVERWRITE_BIT;
+		mask |= SOFTSTART_OVERWRITE_BIT;
+	}
+
+	rc = qpnp_wled_masked_write_reg(wled,
+			QPNP_WLED_SOFTSTART_RAMP_DLY(wled->ctrl_base),
+			mask, reg);
+	if (rc)
+		return rc;
+
+	/* Configure the MAX BOOST DUTY register */
+	if (wled->boost_duty_ns < QPNP_WLED_BOOST_DUTY_MIN_NS)
+		wled->boost_duty_ns = QPNP_WLED_BOOST_DUTY_MIN_NS;
+	else if (wled->boost_duty_ns > QPNP_WLED_BOOST_DUTY_MAX_NS)
+		wled->boost_duty_ns = QPNP_WLED_BOOST_DUTY_MAX_NS;
+
+	rc = qpnp_wled_read_reg(wled, QPNP_WLED_BOOST_DUTY_REG(wled->ctrl_base),
+			&reg);
+	if (rc < 0)
+		return rc;
+	reg &= QPNP_WLED_BOOST_DUTY_MASK;
+	reg |= (wled->boost_duty_ns / QPNP_WLED_BOOST_DUTY_STEP_NS);
+	rc = qpnp_wled_write_reg(wled,
+			QPNP_WLED_BOOST_DUTY_REG(wled->ctrl_base), reg);
+	if (rc)
+		return rc;
+
+	/* Configure the SWITCHING FREQ register */
+	if (wled->switch_freq_khz == 1600)
+		reg = QPNP_WLED_SWITCH_FREQ_1600_KHZ_CODE;
+	else
+		reg = QPNP_WLED_SWITCH_FREQ_800_KHZ_CODE;
+
+	/*
+	 * Do not set the overwrite bit when switching frequency is selected
+	 * for AMOLED. This register is in logic reset block which can cause
+	 * the value to be overwritten during module enable/disable.
+	 */
+	mask = QPNP_WLED_SWITCH_FREQ_MASK | QPNP_WLED_SWITCH_FREQ_OVERWRITE;
+	if (!wled->disp_type_amoled)
+		reg |= QPNP_WLED_SWITCH_FREQ_OVERWRITE;
+
+	rc = qpnp_wled_masked_write_reg(wled,
+			QPNP_WLED_SWITCH_FREQ_REG(wled->ctrl_base), mask, reg);
+	if (rc < 0)
+		return rc;
+
+	rc = qpnp_wled_ovp_config(wled);
+	if (rc < 0) {
+		pr_err("Error in configuring OVP threshold, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (is_avdd_trim_adjustment_required(wled)) {
+		rc = qpnp_wled_avdd_trim_config(wled);
+		if (rc < 0)
+			return rc;
+	}
+
+	rc = qpnp_wled_avdd_mode_config(wled);
+	if (rc < 0)
+		return rc;
+
+	/* Configure the MODULATION register */
+	if (wled->mod_freq_khz <= QPNP_WLED_MOD_FREQ_1200_KHZ) {
+		wled->mod_freq_khz = QPNP_WLED_MOD_FREQ_1200_KHZ;
+		temp = 3;
+	} else if (wled->mod_freq_khz <= QPNP_WLED_MOD_FREQ_2400_KHZ) {
+		wled->mod_freq_khz = QPNP_WLED_MOD_FREQ_2400_KHZ;
+		temp = 2;
+	} else if (wled->mod_freq_khz <= QPNP_WLED_MOD_FREQ_9600_KHZ) {
+		wled->mod_freq_khz = QPNP_WLED_MOD_FREQ_9600_KHZ;
+		temp = 1;
+	} else if (wled->mod_freq_khz <= QPNP_WLED_MOD_FREQ_19200_KHZ) {
+		wled->mod_freq_khz = QPNP_WLED_MOD_FREQ_19200_KHZ;
+		temp = 0;
+	} else {
+		wled->mod_freq_khz = QPNP_WLED_MOD_FREQ_9600_KHZ;
+		temp = 1;
+	}
+
+	rc = qpnp_wled_read_reg(wled, QPNP_WLED_MOD_REG(wled->sink_base), &reg);
+	if (rc < 0)
+		return rc;
+	reg &= QPNP_WLED_MOD_FREQ_MASK;
+	reg |= (temp << QPNP_WLED_MOD_FREQ_SHIFT);
+
+	reg &= QPNP_WLED_PHASE_STAG_MASK;
+	reg |= (wled->en_phase_stag << QPNP_WLED_PHASE_STAG_SHIFT);
+
+	reg &= QPNP_WLED_ACC_CLK_FREQ_MASK;
+	reg |= (temp << QPNP_WLED_ACC_CLK_FREQ_SHIFT);
+
+	reg &= QPNP_WLED_DIM_RES_MASK;
+	reg |= (wled->en_9b_dim_res << QPNP_WLED_DIM_RES_SHIFT);
+
+	if (wled->dim_mode == QPNP_WLED_DIM_HYBRID) {
+		reg &= QPNP_WLED_DIM_HYB_MASK;
+		reg |= (1 << QPNP_WLED_DIM_HYB_SHIFT);
+	} else {
+		reg &= QPNP_WLED_DIM_HYB_MASK;
+		reg |= (0 << QPNP_WLED_DIM_HYB_SHIFT);
+		reg &= QPNP_WLED_DIM_ANA_MASK;
+		reg |= wled->dim_mode;
+	}
+
+	rc = qpnp_wled_write_reg(wled, QPNP_WLED_MOD_REG(wled->sink_base), reg);
+	if (rc)
+		return rc;
+
+	/* Configure the HYBRID THRESHOLD register */
+	if (wled->hyb_thres < QPNP_WLED_HYB_THRES_MIN)
+		wled->hyb_thres = QPNP_WLED_HYB_THRES_MIN;
+	else if (wled->hyb_thres > QPNP_WLED_HYB_THRES_MAX)
+		wled->hyb_thres = QPNP_WLED_HYB_THRES_MAX;
+
+	rc = qpnp_wled_read_reg(wled, QPNP_WLED_HYB_THRES_REG(wled->sink_base),
+			&reg);
+	if (rc < 0)
+		return rc;
+	reg &= QPNP_WLED_HYB_THRES_MASK;
+	temp = fls(wled->hyb_thres / QPNP_WLED_HYB_THRES_MIN) - 1;
+	reg |= temp;
+	rc = qpnp_wled_write_reg(wled, QPNP_WLED_HYB_THRES_REG(wled->sink_base),
+			reg);
+	if (rc)
+		return rc;
+
+	/* Configure TEST5 register */
+	if (wled->dim_mode == QPNP_WLED_DIM_DIGITAL) {
+		reg = QPNP_WLED_SINK_TEST5_DIG;
+	} else {
+		reg = QPNP_WLED_SINK_TEST5_HYB;
+		if (wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE)
+			reg |= QPNP_WLED_SINK_TEST5_HVG_PULL_STR_BIT;
+	}
+
+	rc = qpnp_wled_sec_write_reg(wled,
+			QPNP_WLED_SINK_TEST5_REG(wled->sink_base), reg);
+	if (rc)
+		return rc;
+
+	/* disable all current sinks and enable selected strings */
+	reg = 0x00;
+	rc = qpnp_wled_write_reg(wled, QPNP_WLED_CURR_SINK_REG(wled->sink_base),
+			reg);
+
+	for (i = 0; i < wled->max_strings; i++) {
+		/* SYNC DELAY */
+		if (wled->sync_dly_us > QPNP_WLED_SYNC_DLY_MAX_US)
+			wled->sync_dly_us = QPNP_WLED_SYNC_DLY_MAX_US;
+
+		reg = wled->sync_dly_us / QPNP_WLED_SYNC_DLY_STEP_US;
+		mask = QPNP_WLED_SYNC_DLY_MASK;
+		rc = qpnp_wled_masked_write_reg(wled,
+			QPNP_WLED_SYNC_DLY_REG(wled->sink_base, i),
+			mask, reg);
+		if (rc < 0)
+			return rc;
+
+		/* FULL SCALE CURRENT */
+		if (wled->fs_curr_ua > QPNP_WLED_FS_CURR_MAX_UA)
+			wled->fs_curr_ua = QPNP_WLED_FS_CURR_MAX_UA;
+
+		reg = wled->fs_curr_ua / QPNP_WLED_FS_CURR_STEP_UA;
+		mask = QPNP_WLED_FS_CURR_MASK;
+		rc = qpnp_wled_masked_write_reg(wled,
+			QPNP_WLED_FS_CURR_REG(wled->sink_base, i),
+			mask, reg);
+		if (rc < 0)
+			return rc;
+
+		/* CABC */
+		reg = wled->en_cabc ? (1  << QPNP_WLED_CABC_SHIFT) : 0;
+		mask = QPNP_WLED_CABC_MASK;
+		rc = qpnp_wled_masked_write_reg(wled,
+			QPNP_WLED_CABC_REG(wled->sink_base, i),
+			mask, reg);
+		if (rc < 0)
+			return rc;
+	}
+
+	/* Settings specific to valid sinks */
+	for (i = 0; i < wled->num_strings; i++) {
+		if (wled->strings[i] >= wled->max_strings) {
+			dev_err(&wled->pdev->dev, "Invalid string number\n");
+			return -EINVAL;
+		}
+		/* MODULATOR */
+		rc = qpnp_wled_read_reg(wled,
+			QPNP_WLED_MOD_EN_REG(wled->sink_base, i), &reg);
+		if (rc < 0)
+			return rc;
+		reg &= QPNP_WLED_MOD_EN_MASK;
+		reg |= (QPNP_WLED_MOD_EN << QPNP_WLED_MOD_EN_SHFT);
+
+		if (wled->dim_mode == QPNP_WLED_DIM_HYBRID)
+			reg &= QPNP_WLED_GATE_DRV_MASK;
+		else
+			reg |= ~QPNP_WLED_GATE_DRV_MASK;
+
+		rc = qpnp_wled_write_reg(wled,
+			QPNP_WLED_MOD_EN_REG(wled->sink_base, i), reg);
+		if (rc)
+			return rc;
+
+		/* SINK EN */
+		temp = wled->strings[i] + QPNP_WLED_CURR_SINK_SHIFT;
+		sink_en |= (1 << temp);
+	}
+	mask = QPNP_WLED_CURR_SINK_MASK;
+	rc = qpnp_wled_masked_write_reg(wled,
+		QPNP_WLED_CURR_SINK_REG(wled->sink_base),
+		mask, sink_en);
+	if (rc < 0) {
+		dev_err(&wled->pdev->dev,
+			"Failed to enable WLED sink config rc = %d\n", rc);
+		return rc;
+	}
+
+	rc = qpnp_wled_sync_reg_toggle(wled);
+	if (rc < 0) {
+		dev_err(&wled->pdev->dev, "Failed to toggle sync reg %d\n", rc);
+		return rc;
+	}
+
+	rc = qpnp_wled_auto_calibrate_at_init(wled);
+	if (rc < 0)
+		pr_err("Failed to auto-calibrate at init rc=%d\n", rc);
+
+	/* setup ovp and sc irqs */
+	if (wled->ovp_irq >= 0) {
+		rc = devm_request_threaded_irq(&wled->pdev->dev, wled->ovp_irq,
+				NULL, qpnp_wled_ovp_irq_handler, IRQF_ONESHOT,
+				"qpnp_wled_ovp_irq", wled);
+		if (rc < 0) {
+			dev_err(&wled->pdev->dev,
+				"Unable to request ovp(%d) IRQ(err:%d)\n",
+				wled->ovp_irq, rc);
+			return rc;
+		}
+		rc = qpnp_wled_read_reg(wled,
+				QPNP_WLED_MODULE_EN_REG(wled->ctrl_base), &reg);
+		/* disable the OVP irq only if the module is not enabled */
+		if (!rc && !(reg & QPNP_WLED_MODULE_EN_MASK)) {
+			disable_irq(wled->ovp_irq);
+			wled->ovp_irq_disabled = true;
+		}
+	}
+
+	if (wled->sc_irq >= 0) {
+		wled->sc_cnt = 0;
+		rc = devm_request_threaded_irq(&wled->pdev->dev, wled->sc_irq,
+				NULL, qpnp_wled_sc_irq_handler, IRQF_ONESHOT,
+				"qpnp_wled_sc_irq", wled);
+		if (rc < 0) {
+			dev_err(&wled->pdev->dev,
+				"Unable to request sc(%d) IRQ(err:%d)\n",
+				wled->sc_irq, rc);
+			return rc;
+		}
+
+		rc = qpnp_wled_read_reg(wled,
+				QPNP_WLED_SC_PRO_REG(wled->ctrl_base), &reg);
+		if (rc < 0)
+			return rc;
+		reg &= QPNP_WLED_EN_SC_DEB_CYCLES_MASK;
+		reg |= 1 << QPNP_WLED_EN_SC_SHIFT;
+
+		if (wled->sc_deb_cycles < QPNP_WLED_SC_DEB_CYCLES_MIN)
+			wled->sc_deb_cycles = QPNP_WLED_SC_DEB_CYCLES_MIN;
+		else if (wled->sc_deb_cycles > QPNP_WLED_SC_DEB_CYCLES_MAX)
+			wled->sc_deb_cycles = QPNP_WLED_SC_DEB_CYCLES_MAX;
+		temp = fls(wled->sc_deb_cycles) - QPNP_WLED_SC_DEB_CYCLES_SUB;
+		reg |= (temp << 1);
+
+		if (wled->disp_type_amoled)
+			reg |= QPNP_WLED_SC_PRO_EN_DSCHGR;
+
+		rc = qpnp_wled_write_reg(wled,
+				QPNP_WLED_SC_PRO_REG(wled->ctrl_base), reg);
+		if (rc)
+			return rc;
+
+		if (wled->en_ext_pfet_sc_pro) {
+			reg = QPNP_WLED_EXT_FET_DTEST2;
+			rc = qpnp_wled_sec_write_reg(wled,
+					QPNP_WLED_TEST1_REG(wled->ctrl_base),
+					reg);
+			if (rc)
+				return rc;
+		}
+	} else {
+		rc = qpnp_wled_read_reg(wled,
+				QPNP_WLED_SC_PRO_REG(wled->ctrl_base), &reg);
+		if (rc < 0)
+			return rc;
+		reg &= QPNP_WLED_EN_DEB_CYCLES_MASK;
+
+		if (wled->sc_deb_cycles < QPNP_WLED_SC_DEB_CYCLES_MIN)
+			wled->sc_deb_cycles = QPNP_WLED_SC_DEB_CYCLES_MIN;
+		else if (wled->sc_deb_cycles > QPNP_WLED_SC_DEB_CYCLES_MAX)
+			wled->sc_deb_cycles = QPNP_WLED_SC_DEB_CYCLES_MAX;
+		temp = fls(wled->sc_deb_cycles) - QPNP_WLED_SC_DEB_CYCLES_SUB;
+		reg |= (temp << 1);
+
+		rc = qpnp_wled_write_reg(wled,
+				QPNP_WLED_SC_PRO_REG(wled->ctrl_base), reg);
+		if (rc)
+			return rc;
+	}
+
+	return 0;
+}
+
+/* parse wled dtsi parameters */
+static int qpnp_wled_parse_dt(struct qpnp_wled *wled)
+{
+	struct platform_device *pdev = wled->pdev;
+	struct property *prop;
+	const char *temp_str;
+	u32 temp_val;
+	int rc, i, size;
+	u8 *strings;
+
+	wled->cdev.name = "wled";
+	rc = of_property_read_string(pdev->dev.of_node,
+			"linux,name", &wled->cdev.name);
+	if (rc && (rc != -EINVAL)) {
+		dev_err(&pdev->dev, "Unable to read led name\n");
+		return rc;
+	}
+
+	wled->cdev.default_trigger = QPNP_WLED_TRIGGER_NONE;
+	rc = of_property_read_string(pdev->dev.of_node, "linux,default-trigger",
+					&wled->cdev.default_trigger);
+	if (rc && (rc != -EINVAL)) {
+		dev_err(&pdev->dev, "Unable to read led trigger\n");
+		return rc;
+	}
+
+	if (of_find_property(pdev->dev.of_node, "qcom,wled-brightness-map",
+			NULL)) {
+		size = of_property_count_elems_of_size(pdev->dev.of_node,
+				"qcom,wled-brightness-map", sizeof(u16));
+		if (size != NUM_DDIC_CODES) {
+			pr_err("Invalid WLED brightness map size:%d\n", size);
+			return rc;
+		}
+
+		wled->brt_map_table = devm_kcalloc(&pdev->dev, NUM_DDIC_CODES,
+						sizeof(u16), GFP_KERNEL);
+		if (!wled->brt_map_table)
+			return -ENOMEM;
+
+		rc = of_property_read_u16_array(pdev->dev.of_node,
+			"qcom,wled-brightness-map", wled->brt_map_table,
+			NUM_DDIC_CODES);
+		if (rc < 0) {
+			pr_err("Error in reading WLED brightness map, rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		for (i = 0; i < NUM_DDIC_CODES; i++) {
+			if (wled->brt_map_table[i] > WLED_MAX_LEVEL_4095) {
+				pr_err("WLED brightness map not in range\n");
+				return -EDOM;
+			}
+
+			if ((i > 1) && wled->brt_map_table[i]
+						< wled->brt_map_table[i - 1]) {
+				pr_err("WLED brightness map not in ascending order?\n");
+				return -EDOM;
+			}
+		}
+	}
+
+	wled->stepper_en = of_property_read_bool(pdev->dev.of_node,
+				"qcom,wled-stepper-en");
+	wled->disp_type_amoled = of_property_read_bool(pdev->dev.of_node,
+				"qcom,disp-type-amoled");
+	if (wled->disp_type_amoled) {
+		wled->vref_psm_mv = QPNP_WLED_VREF_PSM_DFLT_AMOLED_MV;
+		rc = of_property_read_u32(pdev->dev.of_node,
+				"qcom,vref-psm-mv", &temp_val);
+		if (!rc) {
+			wled->vref_psm_mv = temp_val;
+		} else if (rc != -EINVAL) {
+			dev_err(&pdev->dev, "Unable to read vref-psm\n");
+			return rc;
+		}
+
+		wled->loop_comp_res_kohm = 320;
+		if (wled->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE ||
+			wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE)
+			wled->loop_comp_res_kohm = 300;
+
+		rc = of_property_read_u32(pdev->dev.of_node,
+				"qcom,loop-comp-res-kohm", &temp_val);
+		if (!rc) {
+			wled->loop_comp_res_kohm = temp_val;
+		} else if (rc != -EINVAL) {
+			dev_err(&pdev->dev, "Unable to read loop-comp-res-kohm\n");
+			return rc;
+		}
+
+		wled->avdd_mode_spmi = of_property_read_bool(pdev->dev.of_node,
+				"qcom,avdd-mode-spmi");
+
+		wled->avdd_target_voltage_mv = QPNP_WLED_DFLT_AVDD_MV;
+		rc = of_property_read_u32(pdev->dev.of_node,
+				"qcom,avdd-target-voltage-mv", &temp_val);
+		if (!rc) {
+			wled->avdd_target_voltage_mv = temp_val;
+		} else if (rc != -EINVAL) {
+			dev_err(&pdev->dev, "Unable to read avdd target voltage\n");
+			return rc;
+		}
+	}
+
+	if (wled->disp_type_amoled) {
+		if (wled->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE ||
+			wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE)
+			wled->loop_ea_gm =
+				QPNP_WLED_LOOP_GM_DFLT_AMOLED_PMI8998;
+		else
+			wled->loop_ea_gm =
+				QPNP_WLED_LOOP_EA_GM_DFLT_AMOLED_PMI8994;
+	} else {
+		wled->loop_ea_gm = QPNP_WLED_LOOP_GM_DFLT_WLED;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,loop-ea-gm", &temp_val);
+	if (!rc) {
+		wled->loop_ea_gm = temp_val;
+	} else if (rc != -EINVAL) {
+		dev_err(&pdev->dev, "Unable to read loop-ea-gm\n");
+		return rc;
+	}
+
+	if (wled->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE ||
+		wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE) {
+		wled->loop_auto_gm_en =
+			of_property_read_bool(pdev->dev.of_node,
+					"qcom,loop-auto-gm-en");
+		wled->loop_auto_gm_thresh = QPNP_WLED_LOOP_AUTO_GM_DFLT_THRESH;
+		rc = of_property_read_u8(pdev->dev.of_node,
+				"qcom,loop-auto-gm-thresh",
+				&wled->loop_auto_gm_thresh);
+		if (rc && rc != -EINVAL) {
+			dev_err(&pdev->dev,
+				"Unable to read loop-auto-gm-thresh\n");
+			return rc;
+		}
+	}
+
+	if (wled->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE ||
+		wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE) {
+
+		if (wled->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE &&
+				wled->pmic_rev_id->rev4 == PMI8998_V2P0_REV4)
+			wled->lcd_auto_pfm_en = false;
+		else
+			wled->lcd_auto_pfm_en = true;
+
+		wled->lcd_auto_pfm_thresh = QPNP_WLED_LCD_AUTO_PFM_DFLT_THRESH;
+		rc = of_property_read_u8(pdev->dev.of_node,
+				"qcom,lcd-auto-pfm-thresh",
+				&wled->lcd_auto_pfm_thresh);
+		if (rc && rc != -EINVAL) {
+			dev_err(&pdev->dev,
+				"Unable to read lcd-auto-pfm-thresh\n");
+			return rc;
+		}
+
+		if (wled->lcd_auto_pfm_thresh >
+				QPNP_WLED_LCD_AUTO_PFM_THRESH_MAX)
+			wled->lcd_auto_pfm_thresh =
+				QPNP_WLED_LCD_AUTO_PFM_THRESH_MAX;
+	}
+
+	wled->sc_deb_cycles = QPNP_WLED_SC_DEB_CYCLES_DFLT;
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,sc-deb-cycles", &temp_val);
+	if (!rc) {
+		wled->sc_deb_cycles = temp_val;
+	} else if (rc != -EINVAL) {
+		dev_err(&pdev->dev, "Unable to read sc debounce cycles\n");
+		return rc;
+	}
+
+	wled->fdbk_op = QPNP_WLED_FDBK_AUTO;
+	rc = of_property_read_string(pdev->dev.of_node,
+			"qcom,fdbk-output", &temp_str);
+	if (!rc) {
+		if (strcmp(temp_str, "wled1") == 0)
+			wled->fdbk_op = QPNP_WLED_FDBK_WLED1;
+		else if (strcmp(temp_str, "wled2") == 0)
+			wled->fdbk_op = QPNP_WLED_FDBK_WLED2;
+		else if (strcmp(temp_str, "wled3") == 0)
+			wled->fdbk_op = QPNP_WLED_FDBK_WLED3;
+		else if (strcmp(temp_str, "wled4") == 0)
+			wled->fdbk_op = QPNP_WLED_FDBK_WLED4;
+		else
+			wled->fdbk_op = QPNP_WLED_FDBK_AUTO;
+	} else if (rc != -EINVAL) {
+		dev_err(&pdev->dev, "Unable to read feedback output\n");
+		return rc;
+	}
+
+	if (wled->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE ||
+			wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE)
+		wled->vref_uv = vref_setting_pmi8998.default_uv;
+	else
+		wled->vref_uv = vref_setting_pmi8994.default_uv;
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,vref-uv", &temp_val);
+	if (!rc) {
+		wled->vref_uv = temp_val;
+	} else if (rc != -EINVAL) {
+		dev_err(&pdev->dev, "Unable to read vref\n");
+		return rc;
+	}
+
+	wled->switch_freq_khz = wled->disp_type_amoled ? 1600 : 800;
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,switch-freq-khz", &temp_val);
+	if (!rc) {
+		wled->switch_freq_khz = temp_val;
+	} else if (rc != -EINVAL) {
+		dev_err(&pdev->dev, "Unable to read switch freq\n");
+		return rc;
+	}
+
+	if (wled->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE ||
+		wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE)
+		wled->ovp_mv = 29600;
+	else
+		wled->ovp_mv = 29500;
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,ovp-mv", &temp_val);
+	if (!rc) {
+		wled->ovp_mv = temp_val;
+	} else if (rc != -EINVAL) {
+		dev_err(&pdev->dev, "Unable to read ovp\n");
+		return rc;
+	}
+
+	if (wled->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE ||
+		wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE) {
+		if (wled->disp_type_amoled)
+			wled->ilim_ma = PMI8998_AMOLED_DFLT_ILIM_MA;
+		else
+			wled->ilim_ma = PMI8998_WLED_DFLT_ILIM_MA;
+	} else {
+		if (wled->disp_type_amoled)
+			wled->ilim_ma = PMI8994_AMOLED_DFLT_ILIM_MA;
+		else
+			wled->ilim_ma = PMI8994_WLED_DFLT_ILIM_MA;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,ilim-ma", &temp_val);
+	if (!rc) {
+		wled->ilim_ma = temp_val;
+	} else if (rc != -EINVAL) {
+		dev_err(&pdev->dev, "Unable to read ilim\n");
+		return rc;
+	}
+
+	wled->boost_duty_ns = QPNP_WLED_DEF_BOOST_DUTY_NS;
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,boost-duty-ns", &temp_val);
+	if (!rc) {
+		wled->boost_duty_ns = temp_val;
+	} else if (rc != -EINVAL) {
+		dev_err(&pdev->dev, "Unable to read boost duty\n");
+		return rc;
+	}
+
+	wled->mod_freq_khz = QPNP_WLED_MOD_FREQ_9600_KHZ;
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,mod-freq-khz", &temp_val);
+	if (!rc) {
+		wled->mod_freq_khz = temp_val;
+	} else if (rc != -EINVAL) {
+		dev_err(&pdev->dev, "Unable to read modulation freq\n");
+		return rc;
+	}
+
+	wled->dim_mode = QPNP_WLED_DIM_HYBRID;
+	rc = of_property_read_string(pdev->dev.of_node,
+			"qcom,dim-mode", &temp_str);
+	if (!rc) {
+		if (strcmp(temp_str, "analog") == 0)
+			wled->dim_mode = QPNP_WLED_DIM_ANALOG;
+		else if (strcmp(temp_str, "digital") == 0)
+			wled->dim_mode = QPNP_WLED_DIM_DIGITAL;
+		else
+			wled->dim_mode = QPNP_WLED_DIM_HYBRID;
+	} else if (rc != -EINVAL) {
+		dev_err(&pdev->dev, "Unable to read dim mode\n");
+		return rc;
+	}
+
+	if (wled->dim_mode == QPNP_WLED_DIM_HYBRID) {
+		wled->hyb_thres = QPNP_WLED_DEF_HYB_THRES;
+		rc = of_property_read_u32(pdev->dev.of_node,
+				"qcom,hyb-thres", &temp_val);
+		if (!rc) {
+			wled->hyb_thres = temp_val;
+		} else if (rc != -EINVAL) {
+			dev_err(&pdev->dev, "Unable to read hyb threshold\n");
+			return rc;
+		}
+	}
+
+	wled->sync_dly_us = QPNP_WLED_DEF_SYNC_DLY_US;
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,sync-dly-us", &temp_val);
+	if (!rc) {
+		wled->sync_dly_us = temp_val;
+	} else if (rc != -EINVAL) {
+		dev_err(&pdev->dev, "Unable to read sync delay\n");
+		return rc;
+	}
+
+	wled->fs_curr_ua = QPNP_WLED_FS_CURR_MAX_UA;
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,fs-curr-ua", &temp_val);
+	if (!rc) {
+		wled->fs_curr_ua = temp_val;
+	} else if (rc != -EINVAL) {
+		dev_err(&pdev->dev, "Unable to read full scale current\n");
+		return rc;
+	}
+
+	wled->cons_sync_write_delay_us = 0;
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,cons-sync-write-delay-us", &temp_val);
+	if (!rc)
+		wled->cons_sync_write_delay_us = temp_val;
+
+	wled->en_9b_dim_res = of_property_read_bool(pdev->dev.of_node,
+			"qcom,en-9b-dim-res");
+	wled->en_phase_stag = of_property_read_bool(pdev->dev.of_node,
+			"qcom,en-phase-stag");
+	wled->en_cabc = of_property_read_bool(pdev->dev.of_node,
+			"qcom,en-cabc");
+
+	if (wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE)
+		wled->max_strings = QPNP_PM660_WLED_MAX_STRINGS;
+	else
+		wled->max_strings = QPNP_WLED_MAX_STRINGS;
+
+	prop = of_find_property(pdev->dev.of_node,
+			"qcom,led-strings-list", &temp_val);
+	if (!prop || !temp_val || temp_val > QPNP_WLED_MAX_STRINGS) {
+		dev_err(&pdev->dev, "Invalid strings info, use default");
+		wled->num_strings = wled->max_strings;
+		for (i = 0; i < wled->num_strings; i++)
+			wled->strings[i] = i;
+	} else {
+		wled->num_strings = temp_val;
+		strings = prop->value;
+		for (i = 0; i < wled->num_strings; ++i)
+			wled->strings[i] = strings[i];
+	}
+
+	wled->ovp_irq = platform_get_irq_byname(pdev, "ovp-irq");
+	if (wled->ovp_irq < 0)
+		dev_dbg(&pdev->dev, "ovp irq is not used\n");
+
+	wled->sc_irq = platform_get_irq_byname(pdev, "sc-irq");
+	if (wled->sc_irq < 0)
+		dev_dbg(&pdev->dev, "sc irq is not used\n");
+
+	wled->en_ext_pfet_sc_pro = of_property_read_bool(pdev->dev.of_node,
+					"qcom,en-ext-pfet-sc-pro");
+
+	wled->lcd_psm_ctrl = of_property_read_bool(pdev->dev.of_node,
+				"qcom,lcd-psm-ctrl");
+
+	wled->auto_calib_enabled = of_property_read_bool(pdev->dev.of_node,
+					"qcom,auto-calibration-enable");
+	return 0;
+}
+
+static int qpnp_wled_probe(struct platform_device *pdev)
+{
+	struct qpnp_wled *wled;
+	struct device_node *revid_node;
+	int rc = 0, i;
+	const __be32 *prop;
+
+	wled = devm_kzalloc(&pdev->dev, sizeof(*wled), GFP_KERNEL);
+	if (!wled)
+		return -ENOMEM;
+		wled->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+		if (!wled->regmap) {
+			dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+			return -EINVAL;
+		}
+
+	wled->pdev = pdev;
+
+	revid_node = of_parse_phandle(pdev->dev.of_node, "qcom,pmic-revid", 0);
+	if (!revid_node) {
+		pr_err("Missing qcom,pmic-revid property - driver failed\n");
+		return -EINVAL;
+	}
+
+	wled->pmic_rev_id = get_revid_data(revid_node);
+	of_node_put(revid_node);
+	if (IS_ERR_OR_NULL(wled->pmic_rev_id)) {
+		pr_err("Unable to get pmic_revid rc=%ld\n",
+			PTR_ERR(wled->pmic_rev_id));
+		/*
+		 * the revid peripheral must be registered, any failure
+		 * here only indicates that the rev-id module has not
+		 * probed yet.
+		 */
+		return -EPROBE_DEFER;
+	}
+
+	pr_debug("PMIC subtype %d Digital major %d\n",
+		wled->pmic_rev_id->pmic_subtype, wled->pmic_rev_id->rev4);
+
+	wled->wq = alloc_ordered_workqueue("qpnp_wled_wq", WQ_HIGHPRI);
+	if (!wled->wq) {
+		pr_err("Unable to alloc workqueue for WLED\n");
+		return -ENOMEM;
+	}
+
+	prop = of_get_address_by_name(pdev->dev.of_node, QPNP_WLED_SINK_BASE,
+			NULL, NULL);
+	if (!prop) {
+		dev_err(&pdev->dev, "Couldnt find sink's addr rc %d\n", rc);
+		return rc;
+	}
+	wled->sink_base = be32_to_cpu(*prop);
+
+	prop = of_get_address_by_name(pdev->dev.of_node, QPNP_WLED_CTRL_BASE,
+			NULL, NULL);
+	if (!prop) {
+		dev_err(&pdev->dev, "Couldnt find ctrl's addr rc = %d\n", rc);
+		return rc;
+	}
+	wled->ctrl_base = be32_to_cpu(*prop);
+
+	dev_set_drvdata(&pdev->dev, wled);
+
+	rc = qpnp_wled_parse_dt(wled);
+	if (rc) {
+		dev_err(&pdev->dev, "DT parsing failed\n");
+		return rc;
+	}
+
+	mutex_init(&wled->bus_lock);
+	mutex_init(&wled->lock);
+	rc = qpnp_wled_config(wled);
+	if (rc) {
+		dev_err(&pdev->dev, "wled config failed\n");
+		return rc;
+	}
+
+	INIT_WORK(&wled->work, qpnp_wled_work);
+	wled->ramp_ms = QPNP_WLED_RAMP_DLY_MS;
+	wled->ramp_step = 1;
+
+	wled->cdev.brightness_set = qpnp_wled_set;
+	wled->cdev.brightness_get = qpnp_wled_get;
+
+	wled->cdev.max_brightness = WLED_MAX_LEVEL_4095;
+
+	rc = led_classdev_register(&pdev->dev, &wled->cdev);
+	if (rc) {
+		dev_err(&pdev->dev, "wled registration failed(%d)\n", rc);
+		goto wled_register_fail;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(qpnp_wled_attrs); i++) {
+		rc = sysfs_create_file(&wled->cdev.dev->kobj,
+				&qpnp_wled_attrs[i].attr);
+		if (rc < 0) {
+			dev_err(&pdev->dev, "sysfs creation failed\n");
+			goto sysfs_fail;
+		}
+	}
+
+	return 0;
+
+sysfs_fail:
+	for (i--; i >= 0; i--)
+		sysfs_remove_file(&wled->cdev.dev->kobj,
+				&qpnp_wled_attrs[i].attr);
+	led_classdev_unregister(&wled->cdev);
+wled_register_fail:
+	cancel_work_sync(&wled->work);
+	destroy_workqueue(wled->wq);
+	mutex_destroy(&wled->lock);
+	return rc;
+}
+
+static int qpnp_wled_remove(struct platform_device *pdev)
+{
+	struct qpnp_wled *wled = dev_get_drvdata(&pdev->dev);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(qpnp_wled_attrs); i++)
+		sysfs_remove_file(&wled->cdev.dev->kobj,
+				&qpnp_wled_attrs[i].attr);
+
+	led_classdev_unregister(&wled->cdev);
+	cancel_work_sync(&wled->work);
+	destroy_workqueue(wled->wq);
+	mutex_destroy(&wled->lock);
+
+	return 0;
+}
+
+static const struct of_device_id spmi_match_table[] = {
+	{ .compatible = "qcom,qpnp-wled",},
+	{ },
+};
+
+static struct platform_driver qpnp_wled_driver = {
+	.driver		= {
+		.name		= "qcom,qpnp-wled",
+		.of_match_table	= spmi_match_table,
+	},
+	.probe		= qpnp_wled_probe,
+	.remove		= qpnp_wled_remove,
+};
+
+static int __init qpnp_wled_init(void)
+{
+	return platform_driver_register(&qpnp_wled_driver);
+}
+module_init(qpnp_wled_init);
+
+static void __exit qpnp_wled_exit(void)
+{
+	platform_driver_unregister(&qpnp_wled_driver);
+}
+module_exit(qpnp_wled_exit);
+
+MODULE_DESCRIPTION("QPNP WLED driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("leds:leds-qpnp-wled");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/md/dm-req-crypt.c	2019-01-22 16:16:24.183252564 +0100
@@ -0,0 +1,1365 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/completion.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/mempool.h>
+#include <linux/slab.h>
+#include <linux/crypto.h>
+#include <linux/qcrypto.h>
+#include <linux/workqueue.h>
+#include <linux/backing-dev.h>
+#include <linux/atomic.h>
+#include <linux/scatterlist.h>
+#include <linux/device-mapper.h>
+#include <linux/printk.h>
+
+#include <asm/page.h>
+#include <asm/unaligned.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/hash.h>
+#include <crypto/md5.h>
+#include <crypto/algapi.h>
+#include <crypto/ice.h>
+
+#define DM_MSG_PREFIX "req-crypt"
+
+#define MAX_SG_LIST	1024
+#define REQ_DM_512_KB (512*1024)
+#define MAX_ENCRYPTION_BUFFERS 1
+#define MIN_IOS 256
+#define MIN_POOL_PAGES 32
+#define KEY_SIZE_XTS 32
+#define AES_XTS_IV_LEN 16
+#define MAX_MSM_ICE_KEY_LUT_SIZE 32
+#define SECTOR_SIZE 512
+#define MIN_CRYPTO_TRANSFER_SIZE (4 * 1024)
+
+#define DM_REQ_CRYPT_ERROR -1
+#define DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC -2
+
+/*
+ * ENCRYPTION_MODE_CRYPTO means dm-req-crypt would invoke crypto operations
+ * for all of the requests. Crypto operations are performed by crypto engine
+ * plugged with Linux Kernel Crypto APIs
+ */
+#define DM_REQ_CRYPT_ENCRYPTION_MODE_CRYPTO 0
+/*
+ * ENCRYPTION_MODE_TRANSPARENT means dm-req-crypt would not invoke crypto
+ * operations for any of the requests. Data would be encrypted or decrypted
+ * using Inline Crypto Engine(ICE) embedded in storage hardware
+ */
+#define DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT 1
+
+#define DM_REQ_CRYPT_QUEUE_SIZE 256
+
+struct req_crypt_result {
+	struct completion completion;
+	int err;
+};
+
+#define FDE_KEY_ID	0
+#define PFE_KEY_ID	1
+
+static struct dm_dev *dev;
+static struct kmem_cache *_req_crypt_io_pool;
+static struct kmem_cache *_req_dm_scatterlist_pool;
+static sector_t start_sector_orig;
+static struct workqueue_struct *req_crypt_queue;
+static struct workqueue_struct *req_crypt_split_io_queue;
+static mempool_t *req_io_pool;
+static mempool_t *req_page_pool;
+static mempool_t *req_scatterlist_pool;
+static bool is_fde_enabled;
+static struct crypto_ablkcipher *tfm;
+static unsigned int encryption_mode;
+static struct ice_crypto_setting *ice_settings;
+
+unsigned int num_engines;
+unsigned int num_engines_fde, fde_cursor;
+unsigned int num_engines_pfe, pfe_cursor;
+struct crypto_engine_entry *fde_eng, *pfe_eng;
+DEFINE_MUTEX(engine_list_mutex);
+
+struct req_dm_crypt_io {
+	struct ice_crypto_setting ice_settings;
+	struct work_struct work;
+	struct request *cloned_request;
+	int error;
+	atomic_t pending;
+	struct timespec start_time;
+	bool should_encrypt;
+	bool should_decrypt;
+	u32 key_id;
+};
+
+struct req_dm_split_req_io {
+	struct work_struct work;
+	struct scatterlist *req_split_sg_read;
+	struct req_crypt_result result;
+	struct crypto_engine_entry *engine;
+	u8 IV[AES_XTS_IV_LEN];
+	int size;
+	struct request *clone;
+};
+
+#ifdef CONFIG_FIPS_ENABLE
+static struct qcrypto_func_set dm_qcrypto_func;
+#else
+static struct qcrypto_func_set dm_qcrypto_func = {
+		qcrypto_cipher_set_device_hw,
+		qcrypto_cipher_set_flag,
+		qcrypto_get_num_engines,
+		qcrypto_get_engine_list
+};
+#endif
+static void req_crypt_cipher_complete
+		(struct crypto_async_request *req, int err);
+static void req_cryptd_split_req_queue_cb
+		(struct work_struct *work);
+static void req_cryptd_split_req_queue
+		(struct req_dm_split_req_io *io);
+static void req_crypt_split_io_complete
+		(struct req_crypt_result *res, int err);
+
+static  bool req_crypt_should_encrypt(struct req_dm_crypt_io *req)
+{
+	int ret = 0;
+	bool should_encrypt = false;
+	struct bio *bio = NULL;
+	bool is_encrypted = false;
+	bool is_inplace = false;
+
+	if (!req || !req->cloned_request || !req->cloned_request->bio)
+		return false;
+
+	if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT)
+		return false;
+	bio = req->cloned_request->bio;
+
+	/* req->key_id = key_id; @todo support more than 1 pfe key */
+	if ((ret == 0) && (is_encrypted || is_inplace)) {
+		should_encrypt = true;
+		req->key_id = PFE_KEY_ID;
+	} else if (is_fde_enabled) {
+		should_encrypt = true;
+		req->key_id = FDE_KEY_ID;
+	}
+
+	return should_encrypt;
+}
+
+static  bool req_crypt_should_deccrypt(struct req_dm_crypt_io *req)
+{
+	int ret = 0;
+	bool should_deccrypt = false;
+	struct bio *bio = NULL;
+	bool is_encrypted = false;
+	bool is_inplace = false;
+
+	if (!req || !req->cloned_request || !req->cloned_request->bio)
+		return false;
+	if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT)
+		return false;
+
+	bio = req->cloned_request->bio;
+
+	/* req->key_id = key_id; @todo support more than 1 pfe key */
+	if ((ret == 0) && (is_encrypted && !is_inplace)) {
+		should_deccrypt = true;
+		req->key_id = PFE_KEY_ID;
+	} else if (is_fde_enabled) {
+		should_deccrypt = true;
+		req->key_id = FDE_KEY_ID;
+	}
+
+	return should_deccrypt;
+}
+
+static void req_crypt_inc_pending(struct req_dm_crypt_io *io)
+{
+	atomic_inc(&io->pending);
+}
+
+static void req_crypt_dec_pending_encrypt(struct req_dm_crypt_io *io)
+{
+	int error = 0;
+	struct request *clone = NULL;
+
+	if (io) {
+		error = io->error;
+		if (io->cloned_request) {
+			clone = io->cloned_request;
+		} else {
+			DMERR("%s io->cloned_request is NULL\n",
+								__func__);
+			/*
+			 * If Clone is NULL we cannot do anything,
+			 * this should never happen
+			 */
+			BUG();
+		}
+	} else {
+		DMERR("%s io is NULL\n", __func__);
+		/*
+		 * If Clone is NULL we cannot do anything,
+		 * this should never happen
+		 */
+		BUG();
+	}
+
+	atomic_dec(&io->pending);
+
+	if (error < 0) {
+		dm_kill_unmapped_request(clone, error);
+		mempool_free(io, req_io_pool);
+	} else
+		dm_dispatch_request(clone);
+}
+
+static void req_crypt_dec_pending_decrypt(struct req_dm_crypt_io *io)
+{
+	int error = 0;
+	struct request *clone = NULL;
+
+	if (io) {
+		error = io->error;
+		if (io->cloned_request) {
+			clone = io->cloned_request;
+		} else {
+			DMERR("%s io->cloned_request is NULL\n",
+								__func__);
+			/*
+			 * If Clone is NULL we cannot do anything,
+			 * this should never happen
+			 */
+			BUG();
+		}
+	} else {
+		DMERR("%s io is NULL\n",
+							__func__);
+		/*
+		 * If Clone is NULL we cannot do anything,
+		 * this should never happen
+		 */
+		BUG();
+	}
+
+	/* Should never get here if io or Clone is NULL */
+	dm_end_request(clone, error);
+	atomic_dec(&io->pending);
+	mempool_free(io, req_io_pool);
+}
+
+/*
+ * The callback that will be called by the worker queue to perform Decryption
+ * for reads and use the dm function to complete the bios and requests.
+ */
+static void req_cryptd_crypt_read_convert(struct req_dm_crypt_io *io)
+{
+	struct request *clone = NULL;
+	int error = DM_REQ_CRYPT_ERROR;
+	int total_sg_len = 0, total_bytes_in_req = 0, temp_size = 0, i = 0;
+	struct scatterlist *sg = NULL;
+	struct scatterlist *req_sg_read = NULL;
+
+	unsigned int engine_list_total = 0;
+	struct crypto_engine_entry *curr_engine_list = NULL;
+	bool split_transfers = 0;
+	sector_t tempiv;
+	struct req_dm_split_req_io *split_io = NULL;
+
+	if (io) {
+		error = io->error;
+		if (io->cloned_request) {
+			clone = io->cloned_request;
+		} else {
+			DMERR("%s io->cloned_request is NULL\n",
+								__func__);
+			error = DM_REQ_CRYPT_ERROR;
+			goto submit_request;
+		}
+	} else {
+		DMERR("%s io is NULL\n",
+							__func__);
+		error = DM_REQ_CRYPT_ERROR;
+		goto submit_request;
+	}
+
+	req_crypt_inc_pending(io);
+
+	mutex_lock(&engine_list_mutex);
+
+	engine_list_total = (io->key_id == FDE_KEY_ID ? num_engines_fde :
+						   (io->key_id == PFE_KEY_ID ?
+							num_engines_pfe : 0));
+
+	curr_engine_list = (io->key_id == FDE_KEY_ID ? fde_eng :
+						   (io->key_id == PFE_KEY_ID ?
+							pfe_eng : NULL));
+
+	mutex_unlock(&engine_list_mutex);
+
+	req_sg_read = (struct scatterlist *)mempool_alloc(req_scatterlist_pool,
+								GFP_KERNEL);
+	if (!req_sg_read) {
+		DMERR("%s req_sg_read allocation failed\n",
+						__func__);
+		error = DM_REQ_CRYPT_ERROR;
+		goto ablkcipher_req_alloc_failure;
+	}
+	memset(req_sg_read, 0, sizeof(struct scatterlist) * MAX_SG_LIST);
+
+	total_sg_len = blk_rq_map_sg_no_cluster(clone->q, clone, req_sg_read);
+	if ((total_sg_len <= 0) || (total_sg_len > MAX_SG_LIST)) {
+		DMERR("%s Request Error%d", __func__, total_sg_len);
+		error = DM_REQ_CRYPT_ERROR;
+		goto ablkcipher_req_alloc_failure;
+	}
+
+	total_bytes_in_req = clone->__data_len;
+	if (total_bytes_in_req > REQ_DM_512_KB) {
+		DMERR("%s total_bytes_in_req > 512 MB %d",
+				__func__, total_bytes_in_req);
+		error = DM_REQ_CRYPT_ERROR;
+		goto ablkcipher_req_alloc_failure;
+	}
+
+
+	if ((clone->__data_len >= (MIN_CRYPTO_TRANSFER_SIZE *
+		engine_list_total))
+		&& (engine_list_total > 1))
+		split_transfers = 1;
+
+	if (split_transfers) {
+		split_io = kzalloc(sizeof(struct req_dm_split_req_io)
+				* engine_list_total, GFP_KERNEL);
+		if (!split_io) {
+			DMERR("%s split_io allocation failed\n", __func__);
+			error = DM_REQ_CRYPT_ERROR;
+			goto ablkcipher_req_alloc_failure;
+		}
+
+		split_io[0].req_split_sg_read = sg = req_sg_read;
+		split_io[engine_list_total - 1].size = total_bytes_in_req;
+		for (i = 0; i < (engine_list_total); i++) {
+			while ((sg) && i < (engine_list_total - 1)) {
+				split_io[i].size += sg->length;
+				split_io[engine_list_total - 1].size -=
+						sg->length;
+				if (split_io[i].size >=
+						(total_bytes_in_req /
+							engine_list_total)) {
+					split_io[i + 1].req_split_sg_read =
+							sg_next(sg);
+					sg_mark_end(sg);
+					break;
+				}
+				sg = sg_next(sg);
+			}
+			split_io[i].engine = &curr_engine_list[i];
+			init_completion(&split_io[i].result.completion);
+			memset(&split_io[i].IV, 0, AES_XTS_IV_LEN);
+			tempiv = clone->__sector + (temp_size / SECTOR_SIZE);
+			memcpy(&split_io[i].IV, &tempiv, sizeof(sector_t));
+			temp_size +=  split_io[i].size;
+			split_io[i].clone = clone;
+			req_cryptd_split_req_queue(&split_io[i]);
+		}
+	} else {
+		split_io = kzalloc(sizeof(struct req_dm_split_req_io),
+				GFP_KERNEL);
+		if (!split_io) {
+			DMERR("%s split_io allocation failed\n", __func__);
+			error = DM_REQ_CRYPT_ERROR;
+			goto ablkcipher_req_alloc_failure;
+		}
+		split_io->engine = &curr_engine_list[0];
+		init_completion(&split_io->result.completion);
+		memcpy(split_io->IV, &clone->__sector, sizeof(sector_t));
+		split_io->req_split_sg_read = req_sg_read;
+		split_io->size = total_bytes_in_req;
+		split_io->clone = clone;
+		req_cryptd_split_req_queue(split_io);
+	}
+
+	if (!split_transfers) {
+		wait_for_completion_interruptible(&split_io->result.completion);
+		if (split_io->result.err) {
+			DMERR("%s error = %d for request\n",
+				 __func__, split_io->result.err);
+			error = DM_REQ_CRYPT_ERROR;
+			goto ablkcipher_req_alloc_failure;
+		}
+	} else {
+		for (i = 0; i < (engine_list_total); i++) {
+			wait_for_completion_interruptible(
+					&split_io[i].result.completion);
+			if (split_io[i].result.err) {
+				DMERR("%s error = %d for %dst request\n",
+					 __func__, split_io[i].result.err, i);
+				error = DM_REQ_CRYPT_ERROR;
+				goto ablkcipher_req_alloc_failure;
+			}
+		}
+	}
+	error = 0;
+ablkcipher_req_alloc_failure:
+
+	mempool_free(req_sg_read, req_scatterlist_pool);
+	kfree(split_io);
+submit_request:
+	if (io)
+		io->error = error;
+	req_crypt_dec_pending_decrypt(io);
+}
+
+/*
+ * This callback is called by the worker queue to perform non-decrypt reads
+ * and use the dm function to complete the bios and requests.
+ */
+static void req_cryptd_crypt_read_plain(struct req_dm_crypt_io *io)
+{
+	struct request *clone = NULL;
+	int error = 0;
+
+	if (!io || !io->cloned_request) {
+		DMERR("%s io is invalid\n", __func__);
+		BUG(); /* should not happen */
+	}
+
+	clone = io->cloned_request;
+
+	dm_end_request(clone, error);
+	mempool_free(io, req_io_pool);
+}
+
+/*
+ * The callback that will be called by the worker queue to perform Encryption
+ * for writes and submit the request using the elevelator.
+ */
+static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io)
+{
+	struct request *clone = NULL;
+	struct bio *bio_src = NULL;
+	unsigned int total_sg_len_req_in = 0, total_sg_len_req_out = 0,
+		total_bytes_in_req = 0, error = DM_MAPIO_REMAPPED, rc = 0;
+	struct req_iterator iter;
+	struct req_iterator iter1;
+	struct ablkcipher_request *req = NULL;
+	struct req_crypt_result result;
+	struct bio_vec bvec;
+	struct scatterlist *req_sg_in = NULL;
+	struct scatterlist *req_sg_out = NULL;
+	int copy_bio_sector_to_req = 0;
+	gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
+	struct page *page = NULL;
+	u8 IV[AES_XTS_IV_LEN];
+	int remaining_size = 0, err = 0;
+	struct crypto_engine_entry engine;
+	unsigned int engine_list_total = 0;
+	struct crypto_engine_entry *curr_engine_list = NULL;
+	unsigned int *engine_cursor = NULL;
+
+
+	if (io) {
+		if (io->cloned_request) {
+			clone = io->cloned_request;
+		} else {
+			DMERR("%s io->cloned_request is NULL\n",
+								__func__);
+			error = DM_REQ_CRYPT_ERROR;
+			goto submit_request;
+		}
+	} else {
+		DMERR("%s io is NULL\n",
+							__func__);
+		error = DM_REQ_CRYPT_ERROR;
+		goto submit_request;
+	}
+
+	req_crypt_inc_pending(io);
+
+	req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
+	if (!req) {
+		DMERR("%s ablkcipher request allocation failed\n",
+					__func__);
+		error = DM_REQ_CRYPT_ERROR;
+		goto ablkcipher_req_alloc_failure;
+	}
+
+	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				req_crypt_cipher_complete, &result);
+
+	mutex_lock(&engine_list_mutex);
+	engine_list_total = (io->key_id == FDE_KEY_ID ? num_engines_fde :
+						   (io->key_id == PFE_KEY_ID ?
+							num_engines_pfe : 0));
+
+	curr_engine_list = (io->key_id == FDE_KEY_ID ? fde_eng :
+						(io->key_id == PFE_KEY_ID ?
+						pfe_eng : NULL));
+
+	engine_cursor = (io->key_id == FDE_KEY_ID ? &fde_cursor :
+					(io->key_id == PFE_KEY_ID ? &pfe_cursor
+					: NULL));
+	if ((engine_list_total < 1) || (NULL == curr_engine_list)
+	   || (NULL == engine_cursor)) {
+		DMERR("%s Unknown Key ID!\n",
+						   __func__);
+		error = DM_REQ_CRYPT_ERROR;
+		mutex_unlock(&engine_list_mutex);
+		goto ablkcipher_req_alloc_failure;
+	}
+
+	engine = curr_engine_list[*engine_cursor];
+	(*engine_cursor)++;
+	(*engine_cursor) %= engine_list_total;
+
+	err = (dm_qcrypto_func.cipher_set)(req, engine.ce_device,
+				   engine.hw_instance);
+	if (err) {
+		DMERR("%s qcrypto_cipher_set_device_hw failed with err %d\n",
+				__func__, err);
+		mutex_unlock(&engine_list_mutex);
+		goto ablkcipher_req_alloc_failure;
+	}
+	mutex_unlock(&engine_list_mutex);
+
+	init_completion(&result.completion);
+
+	(dm_qcrypto_func.cipher_flag)(req,
+		QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B);
+	crypto_ablkcipher_clear_flags(tfm, ~0);
+	crypto_ablkcipher_setkey(tfm, NULL, KEY_SIZE_XTS);
+
+	req_sg_in = (struct scatterlist *)mempool_alloc(req_scatterlist_pool,
+								GFP_KERNEL);
+	if (!req_sg_in) {
+		DMERR("%s req_sg_in allocation failed\n",
+					__func__);
+		error = DM_REQ_CRYPT_ERROR;
+		goto ablkcipher_req_alloc_failure;
+	}
+	memset(req_sg_in, 0, sizeof(struct scatterlist) * MAX_SG_LIST);
+
+	req_sg_out = (struct scatterlist *)mempool_alloc(req_scatterlist_pool,
+								GFP_KERNEL);
+	if (!req_sg_out) {
+		DMERR("%s req_sg_out allocation failed\n",
+					__func__);
+		error = DM_REQ_CRYPT_ERROR;
+		goto ablkcipher_req_alloc_failure;
+	}
+	memset(req_sg_out, 0, sizeof(struct scatterlist) * MAX_SG_LIST);
+
+	total_sg_len_req_in = blk_rq_map_sg(clone->q, clone, req_sg_in);
+	if ((total_sg_len_req_in <= 0) ||
+			(total_sg_len_req_in > MAX_SG_LIST)) {
+		DMERR("%s Request Error%d", __func__, total_sg_len_req_in);
+		error = DM_REQ_CRYPT_ERROR;
+		goto ablkcipher_req_alloc_failure;
+	}
+
+	total_bytes_in_req = clone->__data_len;
+	if (total_bytes_in_req > REQ_DM_512_KB) {
+		DMERR("%s total_bytes_in_req > 512 MB %d",
+				__func__, total_bytes_in_req);
+		error = DM_REQ_CRYPT_ERROR;
+		goto ablkcipher_req_alloc_failure;
+	}
+
+	rq_for_each_segment(bvec, clone, iter) {
+		if (bvec.bv_len > remaining_size) {
+			page = NULL;
+			while (page == NULL) {
+				page = mempool_alloc(req_page_pool, gfp_mask);
+				if (!page) {
+					DMERR("%s Crypt page alloc failed",
+							__func__);
+					congestion_wait(BLK_RW_ASYNC, HZ/100);
+				}
+			}
+
+			bvec.bv_page = page;
+			bvec.bv_offset = 0;
+			remaining_size = PAGE_SIZE -  bvec.bv_len;
+			if (remaining_size < 0)
+				BUG();
+		} else {
+			bvec.bv_page = page;
+			bvec.bv_offset = PAGE_SIZE - remaining_size;
+			remaining_size = remaining_size -  bvec.bv_len;
+		}
+	}
+
+	total_sg_len_req_out = blk_rq_map_sg(clone->q, clone, req_sg_out);
+	if ((total_sg_len_req_out <= 0) ||
+			(total_sg_len_req_out > MAX_SG_LIST)) {
+		DMERR("%s Request Error %d", __func__, total_sg_len_req_out);
+		error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC;
+		goto ablkcipher_req_alloc_failure;
+	}
+
+	memset(IV, 0, AES_XTS_IV_LEN);
+	memcpy(IV, &clone->__sector, sizeof(sector_t));
+
+	ablkcipher_request_set_crypt(req, req_sg_in, req_sg_out,
+			total_bytes_in_req, (void *) IV);
+
+	rc = crypto_ablkcipher_encrypt(req);
+
+	switch (rc) {
+	case 0:
+		break;
+
+	case -EBUSY:
+		/*
+		 * Lets make this synchronous request by waiting on
+		 * in progress as well
+		 */
+	case -EINPROGRESS:
+		wait_for_completion_interruptible(&result.completion);
+		if (result.err) {
+			DMERR("%s error = %d encrypting the request\n",
+				 __func__, result.err);
+			error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC;
+			goto ablkcipher_req_alloc_failure;
+		}
+		break;
+
+	default:
+		error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC;
+		goto ablkcipher_req_alloc_failure;
+	}
+
+	__rq_for_each_bio(bio_src, clone) {
+		if (copy_bio_sector_to_req == 0) {
+			copy_bio_sector_to_req++;
+		}
+		blk_queue_bounce(clone->q, &bio_src);
+	}
+
+	/*
+	 * Recalculate the phy_segments as we allocate new pages
+	 * This is used by storage driver to fill the sg list.
+	 */
+	blk_recalc_rq_segments(clone);
+
+ablkcipher_req_alloc_failure:
+	if (req)
+		ablkcipher_request_free(req);
+
+	if (error == DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC) {
+		rq_for_each_segment(bvec, clone, iter1) {
+			if (bvec.bv_offset == 0) {
+				mempool_free(bvec.bv_page, req_page_pool);
+				bvec.bv_page = NULL;
+			} else
+				bvec.bv_page = NULL;
+		}
+	}
+
+	mempool_free(req_sg_in, req_scatterlist_pool);
+	mempool_free(req_sg_out, req_scatterlist_pool);
+submit_request:
+	if (io)
+		io->error = error;
+	req_crypt_dec_pending_encrypt(io);
+}
+
+/*
+ * This callback is called by the worker queue to perform non-encrypted writes
+ * and submit the request using the elevelator.
+ */
+static void req_cryptd_crypt_write_plain(struct req_dm_crypt_io *io)
+{
+	struct request *clone = NULL;
+
+	if (!io || !io->cloned_request) {
+		DMERR("%s io is invalid\n", __func__);
+		BUG(); /* should not happen */
+	}
+
+	clone = io->cloned_request;
+	io->error = 0;
+	dm_dispatch_request(clone);
+}
+
+/* Queue callback function that will get triggered */
+static void req_cryptd_crypt(struct work_struct *work)
+{
+	struct req_dm_crypt_io *io =
+			container_of(work, struct req_dm_crypt_io, work);
+
+	if (rq_data_dir(io->cloned_request) == WRITE) {
+		if (io->should_encrypt)
+			req_cryptd_crypt_write_convert(io);
+		else
+			req_cryptd_crypt_write_plain(io);
+	} else if (rq_data_dir(io->cloned_request) == READ) {
+		if (io->should_decrypt)
+			req_cryptd_crypt_read_convert(io);
+		else
+			req_cryptd_crypt_read_plain(io);
+	} else {
+		DMERR("%s received non-write request for Clone 0x%p\n",
+				__func__, io->cloned_request);
+	}
+}
+
+static void req_cryptd_split_req_queue_cb(struct work_struct *work)
+{
+	struct req_dm_split_req_io *io =
+			container_of(work, struct req_dm_split_req_io, work);
+	struct ablkcipher_request *req = NULL;
+	struct req_crypt_result result;
+	int err = 0;
+	struct crypto_engine_entry *engine = NULL;
+
+	if ((!io) || (!io->req_split_sg_read) || (!io->engine)) {
+		DMERR("%s Input invalid\n",
+			 __func__);
+		err = DM_REQ_CRYPT_ERROR;
+		/* If io is not populated this should not be called */
+		BUG();
+	}
+	req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
+	if (!req) {
+		DMERR("%s ablkcipher request allocation failed\n", __func__);
+		err = DM_REQ_CRYPT_ERROR;
+		goto ablkcipher_req_alloc_failure;
+	}
+
+	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+					req_crypt_cipher_complete, &result);
+
+	engine = io->engine;
+
+	err = (dm_qcrypto_func.cipher_set)(req, engine->ce_device,
+			engine->hw_instance);
+	if (err) {
+		DMERR("%s qcrypto_cipher_set_device_hw failed with err %d\n",
+				__func__, err);
+		goto ablkcipher_req_alloc_failure;
+	}
+	init_completion(&result.completion);
+	(dm_qcrypto_func.cipher_flag)(req,
+		QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B);
+
+	crypto_ablkcipher_clear_flags(tfm, ~0);
+	crypto_ablkcipher_setkey(tfm, NULL, KEY_SIZE_XTS);
+
+	ablkcipher_request_set_crypt(req, io->req_split_sg_read,
+			io->req_split_sg_read, io->size, (void *) io->IV);
+
+	err = crypto_ablkcipher_decrypt(req);
+	switch (err) {
+	case 0:
+		break;
+
+	case -EBUSY:
+		/*
+		 * Lets make this synchronous request by waiting on
+		 * in progress as well
+		 */
+	case -EINPROGRESS:
+		wait_for_completion_io(&result.completion);
+		if (result.err) {
+			DMERR("%s error = %d encrypting the request\n",
+				 __func__, result.err);
+			err = DM_REQ_CRYPT_ERROR;
+			goto ablkcipher_req_alloc_failure;
+		}
+		break;
+
+	default:
+		err = DM_REQ_CRYPT_ERROR;
+		goto ablkcipher_req_alloc_failure;
+	}
+	err = 0;
+ablkcipher_req_alloc_failure:
+	if (req)
+		ablkcipher_request_free(req);
+
+	req_crypt_split_io_complete(&io->result, err);
+}
+
+static void req_cryptd_split_req_queue(struct req_dm_split_req_io *io)
+{
+	INIT_WORK(&io->work, req_cryptd_split_req_queue_cb);
+	queue_work(req_crypt_split_io_queue, &io->work);
+}
+
+static void req_cryptd_queue_crypt(struct req_dm_crypt_io *io)
+{
+	INIT_WORK(&io->work, req_cryptd_crypt);
+	queue_work(req_crypt_queue, &io->work);
+}
+
+/*
+ * Cipher complete callback, this is triggered by the Linux crypto api once
+ * the operation is done. This signals the waiting thread that the crypto
+ * operation is complete.
+ */
+static void req_crypt_cipher_complete(struct crypto_async_request *req, int err)
+{
+	struct req_crypt_result *res = req->data;
+
+	if (err == -EINPROGRESS)
+		return;
+
+	res->err = err;
+	complete(&res->completion);
+}
+
+static void req_crypt_split_io_complete(struct req_crypt_result *res, int err)
+{
+	if (err == -EINPROGRESS)
+		return;
+
+	res->err = err;
+	complete(&res->completion);
+}
+/*
+ * If bio->bi_dev is a partition, remap the location
+ */
+static inline void req_crypt_blk_partition_remap(struct bio *bio)
+{
+	struct block_device *bdev = bio->bi_bdev;
+
+	if (bio_sectors(bio) && bdev != bdev->bd_contains) {
+		struct hd_struct *p = bdev->bd_part;
+		/*
+		* Check for integer overflow, should never happen.
+		*/
+		if (p->start_sect > (UINT_MAX - bio->bi_iter.bi_sector))
+			BUG();
+
+		bio->bi_iter.bi_sector += p->start_sect;
+		bio->bi_bdev = bdev->bd_contains;
+	}
+}
+
+/*
+ * The endio function is called from ksoftirqd context (atomic).
+ * For write operations the new pages created form the mempool
+ * is freed and returned.  * For read operations, decryption is
+ * required, since this is called in a atomic  * context, the
+ * request is sent to a worker queue to complete decryptiona and
+ * free the request once done.
+ */
+static int req_crypt_endio(struct dm_target *ti, struct request *clone,
+			    int error, union map_info *map_context)
+{
+	int err = 0;
+	struct req_iterator iter1;
+	struct bio_vec bvec;
+	struct req_dm_crypt_io *req_io = map_context->ptr;
+
+	/* If it is for ICE, free up req_io and return */
+	if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) {
+		mempool_free(req_io, req_io_pool);
+		err = error;
+		goto submit_request;
+	}
+
+	if (rq_data_dir(clone) == WRITE) {
+		rq_for_each_segment(bvec, clone, iter1) {
+			if (req_io->should_encrypt && bvec.bv_offset == 0) {
+				mempool_free(bvec.bv_page, req_page_pool);
+				bvec.bv_page = NULL;
+			} else
+				bvec.bv_page = NULL;
+		}
+		mempool_free(req_io, req_io_pool);
+		goto submit_request;
+	} else if (rq_data_dir(clone) == READ) {
+		req_io->error = error;
+		req_cryptd_queue_crypt(req_io);
+		err = DM_ENDIO_INCOMPLETE;
+		goto submit_request;
+	}
+
+submit_request:
+	return err;
+}
+
+/*
+ * This function is called with interrupts disabled
+ * The function remaps the clone for the underlying device.
+ * If it is a write request, it calls into the worker queue to
+ * encrypt the data
+ * and submit the request directly using the elevator
+ * For a read request no pre-processing is required the request
+ * is returned to dm once mapping is done
+ */
+static int req_crypt_map(struct dm_target *ti, struct request *clone,
+			 union map_info *map_context)
+{
+	struct req_dm_crypt_io *req_io = NULL;
+	int error = DM_REQ_CRYPT_ERROR, copy_bio_sector_to_req = 0;
+	struct bio *bio_src = NULL;
+	gfp_t gfp_flag = GFP_KERNEL;
+
+	if (in_interrupt() || irqs_disabled())
+		gfp_flag = GFP_NOWAIT;
+
+	req_io = mempool_alloc(req_io_pool, gfp_flag);
+	if (!req_io) {
+		WARN_ON(1);
+		error = DM_REQ_CRYPT_ERROR;
+		goto submit_request;
+	}
+
+	/* Save the clone in the req_io, the callback to the worker
+	 * queue will get the req_io
+	 */
+	req_io->cloned_request = clone;
+	map_context->ptr = req_io;
+	atomic_set(&req_io->pending, 0);
+
+	if (rq_data_dir(clone) == WRITE)
+		req_io->should_encrypt = req_crypt_should_encrypt(req_io);
+	if (rq_data_dir(clone) == READ)
+		req_io->should_decrypt = req_crypt_should_deccrypt(req_io);
+
+	/* Get the queue of the underlying original device */
+	clone->q = bdev_get_queue(dev->bdev);
+	clone->rq_disk = dev->bdev->bd_disk;
+
+	__rq_for_each_bio(bio_src, clone) {
+		bio_src->bi_bdev = dev->bdev;
+		/* Currently the way req-dm works is that once the underlying
+		 * device driver completes the request by calling into the
+		 * block layer. The block layer completes the bios (clones) and
+		 * then the cloned request. This is undesirable for req-dm-crypt
+		 * hence added a flag BIO_DONTFREE, this flag will ensure that
+		 * blk layer does not complete the cloned bios before completing
+		 * the request. When the crypt endio is called, post-processing
+		 * is done and then the dm layer will complete the bios (clones)
+		 * and free them.
+		 */
+		if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT)
+			bio_src->bi_flags |= 1 << BIO_INLINECRYPT;
+		else
+			bio_src->bi_flags |= 1 << BIO_DONTFREE;
+
+		/*
+		 * If this device has partitions, remap block n
+		 * of partition p to block n+start(p) of the disk.
+		 */
+		req_crypt_blk_partition_remap(bio_src);
+		if (copy_bio_sector_to_req == 0) {
+			clone->__sector = bio_src->bi_iter.bi_sector;
+			copy_bio_sector_to_req++;
+		}
+		blk_queue_bounce(clone->q, &bio_src);
+	}
+
+	if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) {
+		/* Set all crypto parameters for inline crypto engine */
+		memcpy(&req_io->ice_settings, ice_settings,
+					sizeof(struct ice_crypto_setting));
+	} else {
+		/* ICE checks for key_index which could be >= 0. If a chip has
+		 * both ICE and GPCE and wanted to use GPCE, there could be
+		 * issue. Storage driver send all requests to ICE driver. If
+		 * it sees key_index as 0, it would assume it is for ICE while
+		 * it is not. Hence set invalid key index by default.
+		 */
+		req_io->ice_settings.key_index = -1;
+
+	}
+
+	if (rq_data_dir(clone) == READ ||
+		encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) {
+		error = DM_MAPIO_REMAPPED;
+		goto submit_request;
+	} else if (rq_data_dir(clone) == WRITE) {
+		req_cryptd_queue_crypt(req_io);
+		error = DM_MAPIO_SUBMITTED;
+		goto submit_request;
+	}
+
+submit_request:
+	return error;
+
+}
+
+static void deconfigure_qcrypto(void)
+{
+	if (req_page_pool) {
+		mempool_destroy(req_page_pool);
+		req_page_pool = NULL;
+	}
+
+	if (req_scatterlist_pool) {
+		mempool_destroy(req_scatterlist_pool);
+		req_scatterlist_pool = NULL;
+	}
+
+	if (req_crypt_split_io_queue) {
+		destroy_workqueue(req_crypt_split_io_queue);
+		req_crypt_split_io_queue = NULL;
+	}
+	if (req_crypt_queue) {
+		destroy_workqueue(req_crypt_queue);
+		req_crypt_queue = NULL;
+	}
+
+	kmem_cache_destroy(_req_dm_scatterlist_pool);
+
+	mutex_lock(&engine_list_mutex);
+	kfree(pfe_eng);
+	pfe_eng = NULL;
+	kfree(fde_eng);
+	fde_eng = NULL;
+	mutex_unlock(&engine_list_mutex);
+
+	if (tfm) {
+		crypto_free_ablkcipher(tfm);
+		tfm = NULL;
+	}
+}
+
+static void req_crypt_dtr(struct dm_target *ti)
+{
+	DMDEBUG("dm-req-crypt Destructor.\n");
+
+	mempool_destroy(req_io_pool);
+	req_io_pool = NULL;
+
+	if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) {
+		kfree(ice_settings);
+		ice_settings = NULL;
+	} else {
+		deconfigure_qcrypto();
+	}
+
+	if (_req_crypt_io_pool)
+		kmem_cache_destroy(_req_crypt_io_pool);
+
+	if (dev) {
+		dm_put_device(ti, dev);
+		dev = NULL;
+	}
+}
+
+static int configure_qcrypto(void)
+{
+	struct crypto_engine_entry *eng_list = NULL;
+	struct block_device *bdev = NULL;
+	int err = DM_REQ_CRYPT_ERROR, i;
+	struct request_queue *q = NULL;
+
+	bdev = dev->bdev;
+	q = bdev_get_queue(bdev);
+	blk_queue_max_hw_sectors(q, DM_REQ_CRYPT_QUEUE_SIZE);
+
+	/* Allocate the crypto alloc blk cipher and keep the handle */
+	tfm = crypto_alloc_ablkcipher("qcom-xts(aes)", 0, 0);
+	if (IS_ERR(tfm)) {
+		DMERR("%s ablkcipher tfm allocation failed : error\n",
+						 __func__);
+		tfm = NULL;
+		goto exit_err;
+	}
+
+	num_engines_fde = num_engines_pfe = 0;
+
+	mutex_lock(&engine_list_mutex);
+	num_engines = (dm_qcrypto_func.get_num_engines)();
+	if (!num_engines) {
+		DMERR(KERN_INFO "%s qcrypto_get_num_engines failed\n",
+					__func__);
+		err = DM_REQ_CRYPT_ERROR;
+		mutex_unlock(&engine_list_mutex);
+		goto exit_err;
+	}
+
+	eng_list = kcalloc(num_engines, sizeof(*eng_list), GFP_KERNEL);
+	if (eng_list == NULL) {
+		DMERR("%s engine list allocation failed\n", __func__);
+		err = DM_REQ_CRYPT_ERROR;
+		mutex_unlock(&engine_list_mutex);
+		goto exit_err;
+	}
+
+	(dm_qcrypto_func.get_engine_list)(num_engines, eng_list);
+
+	for (i = 0; i < num_engines; i++) {
+		if (eng_list[i].ce_device == FDE_KEY_ID)
+			num_engines_fde++;
+		if (eng_list[i].ce_device == PFE_KEY_ID)
+			num_engines_pfe++;
+	}
+
+	fde_eng = kcalloc(num_engines_fde, sizeof(*fde_eng), GFP_KERNEL);
+	if (fde_eng == NULL) {
+		DMERR("%s fde engine list allocation failed\n", __func__);
+		mutex_unlock(&engine_list_mutex);
+		goto exit_err;
+	}
+
+	pfe_eng = kcalloc(num_engines_pfe, sizeof(*pfe_eng), GFP_KERNEL);
+	if (pfe_eng == NULL) {
+		DMERR("%s pfe engine list allocation failed\n", __func__);
+		mutex_unlock(&engine_list_mutex);
+		goto exit_err;
+	}
+
+	fde_cursor = 0;
+	pfe_cursor = 0;
+
+	for (i = 0; i < num_engines; i++) {
+		if (eng_list[i].ce_device == FDE_KEY_ID)
+			fde_eng[fde_cursor++] = eng_list[i];
+		if (eng_list[i].ce_device == PFE_KEY_ID)
+			pfe_eng[pfe_cursor++] = eng_list[i];
+	}
+
+	fde_cursor = 0;
+	pfe_cursor = 0;
+	mutex_unlock(&engine_list_mutex);
+
+	_req_dm_scatterlist_pool = kmem_cache_create("req_dm_scatterlist",
+				sizeof(struct scatterlist) * MAX_SG_LIST,
+				 __alignof__(struct scatterlist), 0, NULL);
+	if (!_req_dm_scatterlist_pool)
+		goto exit_err;
+
+	req_crypt_queue = alloc_workqueue("req_cryptd",
+					WQ_UNBOUND |
+					WQ_CPU_INTENSIVE |
+					WQ_MEM_RECLAIM,
+					0);
+	if (!req_crypt_queue) {
+		DMERR("%s req_crypt_queue not allocated\n", __func__);
+		goto exit_err;
+	}
+
+	req_crypt_split_io_queue = alloc_workqueue("req_crypt_split",
+					WQ_UNBOUND |
+					WQ_CPU_INTENSIVE |
+					WQ_MEM_RECLAIM,
+					0);
+	if (!req_crypt_split_io_queue) {
+		DMERR("%s req_crypt_split_io_queue not allocated\n", __func__);
+		goto exit_err;
+	}
+	req_scatterlist_pool = mempool_create_slab_pool(MIN_IOS,
+					_req_dm_scatterlist_pool);
+	if (!req_scatterlist_pool) {
+		DMERR("%s req_scatterlist_pool is not allocated\n", __func__);
+		err = -ENOMEM;
+		goto exit_err;
+	}
+
+	req_page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
+	if (!req_page_pool) {
+		DMERR("%s req_page_pool not allocated\n", __func__);
+		goto exit_err;
+	}
+
+	err = 0;
+
+exit_err:
+	kfree(eng_list);
+	return err;
+}
+
+/*
+ * Construct an encryption mapping:
+ * <cipher> <key> <iv_offset> <dev_path> <start>
+ */
+static int req_crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+	int err = DM_REQ_CRYPT_ERROR;
+	unsigned long long tmpll;
+	char dummy;
+	int ret;
+
+	DMDEBUG("dm-req-crypt Constructor.\n");
+
+	if (argc < 5) {
+		DMERR(" %s Not enough args\n", __func__);
+		err = DM_REQ_CRYPT_ERROR;
+		goto ctr_exit;
+	}
+
+	if (argv[3]) {
+		if (dm_get_device(ti, argv[3],
+				dm_table_get_mode(ti->table), &dev)) {
+			DMERR(" %s Device Lookup failed\n", __func__);
+			err =  DM_REQ_CRYPT_ERROR;
+			goto ctr_exit;
+		}
+	} else {
+		DMERR(" %s Arg[3] invalid\n", __func__);
+		err =  DM_REQ_CRYPT_ERROR;
+		goto ctr_exit;
+	}
+
+	if (argv[4]) {
+		if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
+			DMERR("%s Invalid device sector\n", __func__);
+			err =  DM_REQ_CRYPT_ERROR;
+			goto ctr_exit;
+		}
+	} else {
+		DMERR(" %s Arg[4] invalid\n", __func__);
+		err =  DM_REQ_CRYPT_ERROR;
+		goto ctr_exit;
+	}
+	start_sector_orig = tmpll;
+
+	/* Allow backward compatible */
+	if (argc >= 6) {
+		if (argv[5]) {
+			if (!strcmp(argv[5], "fde_enabled"))
+				is_fde_enabled = true;
+			else
+				is_fde_enabled = false;
+		} else {
+			DMERR(" %s Arg[5] invalid\n", __func__);
+			err =  DM_REQ_CRYPT_ERROR;
+			goto ctr_exit;
+		}
+	} else {
+		DMERR(" %s Arg[5] missing, set FDE enabled.\n", __func__);
+		is_fde_enabled = true; /* backward compatible */
+	}
+
+	_req_crypt_io_pool = KMEM_CACHE(req_dm_crypt_io, 0);
+	if (!_req_crypt_io_pool) {
+		err =  DM_REQ_CRYPT_ERROR;
+		goto ctr_exit;
+	}
+
+	encryption_mode = DM_REQ_CRYPT_ENCRYPTION_MODE_CRYPTO;
+	if (argc >= 7 && argv[6]) {
+		if (!strcmp(argv[6], "ice"))
+			encryption_mode =
+				DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT;
+	}
+
+	if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) {
+		/* configure ICE settings */
+		ice_settings =
+			kzalloc(sizeof(struct ice_crypto_setting), GFP_KERNEL);
+		if (!ice_settings) {
+			err = -ENOMEM;
+			goto ctr_exit;
+		}
+		ice_settings->key_size = ICE_CRYPTO_KEY_SIZE_128;
+		ice_settings->algo_mode = ICE_CRYPTO_ALGO_MODE_AES_XTS;
+		ice_settings->key_mode = ICE_CRYPTO_USE_LUT_SW_KEY;
+		if (kstrtou16(argv[1], 0, &ice_settings->key_index) ||
+			ice_settings->key_index < 0 ||
+			ice_settings->key_index > MAX_MSM_ICE_KEY_LUT_SIZE) {
+			DMERR("%s Err: key index %d received for ICE\n",
+				__func__, ice_settings->key_index);
+			err = DM_REQ_CRYPT_ERROR;
+			goto ctr_exit;
+		}
+	} else {
+		ret = configure_qcrypto();
+		if (ret) {
+			DMERR("%s failed to configure qcrypto\n", __func__);
+			err = ret;
+			goto ctr_exit;
+		}
+	}
+
+	req_io_pool = mempool_create_slab_pool(MIN_IOS, _req_crypt_io_pool);
+	if (!req_io_pool) {
+		DMERR("%s req_io_pool not allocated\n", __func__);
+		err = -ENOMEM;
+		goto ctr_exit;
+	}
+
+	/*
+	 * If underlying device supports flush/discard, mapped target
+	 * should also allow it
+	 */
+	ti->num_flush_bios = 1;
+	ti->num_discard_bios = 1;
+
+	err = 0;
+	DMINFO("%s: Mapping block_device %s to dm-req-crypt ok!\n",
+	       __func__, argv[3]);
+ctr_exit:
+	if (err)
+		req_crypt_dtr(ti);
+
+	return err;
+}
+
+static int req_crypt_iterate_devices(struct dm_target *ti,
+				 iterate_devices_callout_fn fn, void *data)
+{
+	return fn(ti, dev, start_sector_orig, ti->len, data);
+}
+void set_qcrypto_func_dm(void *dev,
+			void *flag,
+			void *engines,
+			void *engine_list)
+{
+	dm_qcrypto_func.cipher_set  = dev;
+	dm_qcrypto_func.cipher_flag = flag;
+	dm_qcrypto_func.get_num_engines = engines;
+	dm_qcrypto_func.get_engine_list = engine_list;
+}
+EXPORT_SYMBOL(set_qcrypto_func_dm);
+
+static struct target_type req_crypt_target = {
+	.name   = "req-crypt",
+	.version = {1, 0, 0},
+	.module = THIS_MODULE,
+	.ctr    = req_crypt_ctr,
+	.dtr    = req_crypt_dtr,
+	.map_rq = req_crypt_map,
+	.rq_end_io = req_crypt_endio,
+	.iterate_devices = req_crypt_iterate_devices,
+};
+
+static int __init req_dm_crypt_init(void)
+{
+	int r;
+
+
+	r = dm_register_target(&req_crypt_target);
+	if (r < 0) {
+		DMERR("register failed %d", r);
+		return r;
+	}
+
+	DMINFO("dm-req-crypt successfully initalized.\n");
+
+	return r;
+}
+
+static void __exit req_dm_crypt_exit(void)
+{
+	dm_unregister_target(&req_crypt_target);
+}
+
+module_init(req_dm_crypt_init);
+module_exit(req_dm_crypt_exit);
+
+MODULE_DESCRIPTION(DM_NAME " target for request based transparent encryption / decryption");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/md/dm-verity-fec.c	2019-01-22 16:16:24.187252601 +0100
@@ -0,0 +1,870 @@
+/*
+ * Copyright (C) 2015 Google, Inc.
+ *
+ * Author: Sami Tolvanen <samitolvanen@google.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include "dm-verity-fec.h"
+#include <linux/math64.h>
+#include <linux/sysfs.h>
+
+#define DM_MSG_PREFIX	"verity-fec"
+
+/*
+ * If error correction has been configured, returns true.
+ */
+bool verity_fec_is_enabled(struct dm_verity *v)
+{
+	return v->fec && v->fec->dev;
+}
+
+/*
+ * Return a pointer to dm_verity_fec_io after dm_verity_io and its variable
+ * length fields.
+ */
+static inline struct dm_verity_fec_io *fec_io(struct dm_verity_io *io)
+{
+	return (struct dm_verity_fec_io *) verity_io_digest_end(io->v, io);
+}
+
+/*
+ * Return an interleaved offset for a byte in RS block.
+ */
+static inline u64 fec_interleave(struct dm_verity *v, u64 offset)
+{
+	u32 mod;
+
+	mod = do_div(offset, v->fec->rsn);
+	return offset + mod * (v->fec->rounds << v->data_dev_block_bits);
+}
+
+/*
+ * Decode an RS block using Reed-Solomon.
+ */
+static int fec_decode_rs8(struct dm_verity *v, struct dm_verity_fec_io *fio,
+			  u8 *data, u8 *fec, int neras)
+{
+	int i;
+	uint16_t par[DM_VERITY_FEC_RSM - DM_VERITY_FEC_MIN_RSN];
+
+	for (i = 0; i < v->fec->roots; i++)
+		par[i] = fec[i];
+
+	return decode_rs8(fio->rs, data, par, v->fec->rsn, NULL, neras,
+			  fio->erasures, 0, NULL);
+}
+
+/*
+ * Read error-correcting codes for the requested RS block. Returns a pointer
+ * to the data block. Caller is responsible for releasing buf.
+ */
+static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
+			   unsigned *offset, struct dm_buffer **buf)
+{
+	u64 position, block;
+	u8 *res;
+
+	position = (index + rsb) * v->fec->roots;
+	block = position >> v->data_dev_block_bits;
+	*offset = (unsigned)(position - (block << v->data_dev_block_bits));
+
+	res = dm_bufio_read(v->fec->bufio, v->fec->start + block, buf);
+	if (unlikely(IS_ERR(res))) {
+		DMERR("%s: FEC %llu: parity read failed (block %llu): %ld",
+		      v->data_dev->name, (unsigned long long)rsb,
+		      (unsigned long long)(v->fec->start + block),
+		      PTR_ERR(res));
+		*buf = NULL;
+	}
+
+	return res;
+}
+
+/* Loop over each preallocated buffer slot. */
+#define fec_for_each_prealloc_buffer(__i) \
+	for (__i = 0; __i < DM_VERITY_FEC_BUF_PREALLOC; __i++)
+
+/* Loop over each extra buffer slot. */
+#define fec_for_each_extra_buffer(io, __i) \
+	for (__i = DM_VERITY_FEC_BUF_PREALLOC; __i < DM_VERITY_FEC_BUF_MAX; __i++)
+
+/* Loop over each allocated buffer. */
+#define fec_for_each_buffer(io, __i) \
+	for (__i = 0; __i < (io)->nbufs; __i++)
+
+/* Loop over each RS block in each allocated buffer. */
+#define fec_for_each_buffer_rs_block(io, __i, __j) \
+	fec_for_each_buffer(io, __i) \
+		for (__j = 0; __j < 1 << DM_VERITY_FEC_BUF_RS_BITS; __j++)
+
+/*
+ * Return a pointer to the current RS block when called inside
+ * fec_for_each_buffer_rs_block.
+ */
+static inline u8 *fec_buffer_rs_block(struct dm_verity *v,
+				      struct dm_verity_fec_io *fio,
+				      unsigned i, unsigned j)
+{
+	return &fio->bufs[i][j * v->fec->rsn];
+}
+
+/*
+ * Return an index to the current RS block when called inside
+ * fec_for_each_buffer_rs_block.
+ */
+static inline unsigned fec_buffer_rs_index(unsigned i, unsigned j)
+{
+	return (i << DM_VERITY_FEC_BUF_RS_BITS) + j;
+}
+
+/*
+ * Decode all RS blocks from buffers and copy corrected bytes into fio->output
+ * starting from block_offset.
+ */
+static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
+			   u64 rsb, int byte_index, unsigned block_offset,
+			   int neras)
+{
+	int r, corrected = 0, res;
+	struct dm_buffer *buf;
+	unsigned n, i, offset;
+	u8 *par, *block;
+
+	par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
+	if (IS_ERR(par))
+		return PTR_ERR(par);
+
+	/*
+	 * Decode the RS blocks we have in bufs. Each RS block results in
+	 * one corrected target byte and consumes fec->roots parity bytes.
+	 */
+	fec_for_each_buffer_rs_block(fio, n, i) {
+		block = fec_buffer_rs_block(v, fio, n, i);
+		res = fec_decode_rs8(v, fio, block, &par[offset], neras);
+		if (res < 0) {
+			dm_bufio_release(buf);
+
+			r = res;
+			goto error;
+		}
+
+		corrected += res;
+		fio->output[block_offset] = block[byte_index];
+
+		block_offset++;
+		if (block_offset >= 1 << v->data_dev_block_bits)
+			goto done;
+
+		/* read the next block when we run out of parity bytes */
+		offset += v->fec->roots;
+		if (offset >= 1 << v->data_dev_block_bits) {
+			dm_bufio_release(buf);
+
+			par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
+			if (unlikely(IS_ERR(par)))
+				return PTR_ERR(par);
+		}
+	}
+done:
+	r = corrected;
+error:
+	if (r < 0 && neras)
+		DMERR_LIMIT("%s: FEC %llu: failed to correct: %d",
+			    v->data_dev->name, (unsigned long long)rsb, r);
+	else if (r > 0) {
+		DMWARN_LIMIT("%s: FEC %llu: corrected %d errors",
+			     v->data_dev->name, (unsigned long long)rsb, r);
+		atomic_add_unless(&v->fec->corrected, 1, INT_MAX);
+	}
+
+	return r;
+}
+
+/*
+ * Locate data block erasures using verity hashes.
+ */
+static int fec_is_erasure(struct dm_verity *v, struct dm_verity_io *io,
+			  u8 *want_digest, u8 *data)
+{
+	if (unlikely(verity_hash(v, verity_io_hash_desc(v, io),
+				 data, 1 << v->data_dev_block_bits,
+				 verity_io_real_digest(v, io))))
+		return 0;
+
+	return memcmp(verity_io_real_digest(v, io), want_digest,
+		      v->digest_size) != 0;
+}
+
+/*
+ * Read data blocks that are part of the RS block and deinterleave as much as
+ * fits into buffers. Check for erasure locations if @neras is non-NULL.
+ */
+static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
+			 u64 rsb, u64 target, unsigned block_offset,
+			 int *neras)
+{
+	bool is_zero;
+	int i, j, target_index = -1;
+	struct dm_buffer *buf;
+	struct dm_bufio_client *bufio;
+	struct dm_verity_fec_io *fio = fec_io(io);
+	u64 block, ileaved;
+	u8 *bbuf, *rs_block;
+	u8 want_digest[v->digest_size];
+	unsigned n, k;
+
+	if (neras)
+		*neras = 0;
+
+	/*
+	 * read each of the rsn data blocks that are part of the RS block, and
+	 * interleave contents to available bufs
+	 */
+	for (i = 0; i < v->fec->rsn; i++) {
+		ileaved = fec_interleave(v, rsb * v->fec->rsn + i);
+
+		/*
+		 * target is the data block we want to correct, target_index is
+		 * the index of this block within the rsn RS blocks
+		 */
+		if (ileaved == target)
+			target_index = i;
+
+		block = ileaved >> v->data_dev_block_bits;
+		bufio = v->fec->data_bufio;
+
+		if (block >= v->data_blocks) {
+			block -= v->data_blocks;
+
+			/*
+			 * blocks outside the area were assumed to contain
+			 * zeros when encoding data was generated
+			 */
+			if (unlikely(block >= v->fec->hash_blocks))
+				continue;
+
+			block += v->hash_start;
+			bufio = v->bufio;
+		}
+
+		bbuf = dm_bufio_read(bufio, block, &buf);
+		if (unlikely(IS_ERR(bbuf))) {
+			DMWARN_LIMIT("%s: FEC %llu: read failed (%llu): %ld",
+				     v->data_dev->name,
+				     (unsigned long long)rsb,
+				     (unsigned long long)block, PTR_ERR(bbuf));
+
+			/* assume the block is corrupted */
+			if (neras && *neras <= v->fec->roots)
+				fio->erasures[(*neras)++] = i;
+
+			continue;
+		}
+
+		/* locate erasures if the block is on the data device */
+		if (bufio == v->fec->data_bufio &&
+		    verity_hash_for_block(v, io, block, want_digest,
+					  &is_zero) == 0) {
+			/* skip known zero blocks entirely */
+			if (is_zero)
+				continue;
+
+			/*
+			 * skip if we have already found the theoretical
+			 * maximum number (i.e. fec->roots) of erasures
+			 */
+			if (neras && *neras <= v->fec->roots &&
+			    fec_is_erasure(v, io, want_digest, bbuf))
+				fio->erasures[(*neras)++] = i;
+		}
+
+		/*
+		 * deinterleave and copy the bytes that fit into bufs,
+		 * starting from block_offset
+		 */
+		fec_for_each_buffer_rs_block(fio, n, j) {
+			k = fec_buffer_rs_index(n, j) + block_offset;
+
+			if (k >= 1 << v->data_dev_block_bits)
+				goto done;
+
+			rs_block = fec_buffer_rs_block(v, fio, n, j);
+			rs_block[i] = bbuf[k];
+		}
+done:
+		dm_bufio_release(buf);
+	}
+
+	return target_index;
+}
+
+/*
+ * Allocate RS control structure and FEC buffers from preallocated mempools,
+ * and attempt to allocate as many extra buffers as available.
+ */
+static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
+{
+	unsigned n;
+
+	if (!fio->rs) {
+		fio->rs = mempool_alloc(v->fec->rs_pool, 0);
+		if (unlikely(!fio->rs)) {
+			DMERR("failed to allocate RS");
+			return -ENOMEM;
+		}
+	}
+
+	fec_for_each_prealloc_buffer(n) {
+		if (fio->bufs[n])
+			continue;
+
+		fio->bufs[n] = mempool_alloc(v->fec->prealloc_pool, GFP_NOIO);
+		if (unlikely(!fio->bufs[n])) {
+			DMERR("failed to allocate FEC buffer");
+			return -ENOMEM;
+		}
+	}
+
+	/* try to allocate the maximum number of buffers */
+	fec_for_each_extra_buffer(fio, n) {
+		if (fio->bufs[n])
+			continue;
+
+		fio->bufs[n] = mempool_alloc(v->fec->extra_pool, GFP_NOIO);
+		/* we can manage with even one buffer if necessary */
+		if (unlikely(!fio->bufs[n]))
+			break;
+	}
+	fio->nbufs = n;
+
+	if (!fio->output) {
+		fio->output = mempool_alloc(v->fec->output_pool, GFP_NOIO);
+
+		if (!fio->output) {
+			DMERR("failed to allocate FEC page");
+			return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Initialize buffers and clear erasures. fec_read_bufs() assumes buffers are
+ * zeroed before deinterleaving.
+ */
+static void fec_init_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
+{
+	unsigned n;
+
+	fec_for_each_buffer(fio, n)
+		memset(fio->bufs[n], 0, v->fec->rsn << DM_VERITY_FEC_BUF_RS_BITS);
+
+	memset(fio->erasures, 0, sizeof(fio->erasures));
+}
+
+/*
+ * Decode all RS blocks in a single data block and return the target block
+ * (indicated by @offset) in fio->output. If @use_erasures is non-zero, uses
+ * hashes to locate erasures.
+ */
+static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io,
+			  struct dm_verity_fec_io *fio, u64 rsb, u64 offset,
+			  bool use_erasures)
+{
+	int r, neras = 0;
+	unsigned pos;
+
+	r = fec_alloc_bufs(v, fio);
+	if (unlikely(r < 0))
+		return r;
+
+	for (pos = 0; pos < 1 << v->data_dev_block_bits; ) {
+		fec_init_bufs(v, fio);
+
+		r = fec_read_bufs(v, io, rsb, offset, pos,
+				  use_erasures ? &neras : NULL);
+		if (unlikely(r < 0))
+			return r;
+
+		r = fec_decode_bufs(v, fio, rsb, r, pos, neras);
+		if (r < 0)
+			return r;
+
+		pos += fio->nbufs << DM_VERITY_FEC_BUF_RS_BITS;
+	}
+
+	/* Always re-validate the corrected block against the expected hash */
+	r = verity_hash(v, verity_io_hash_desc(v, io), fio->output,
+			1 << v->data_dev_block_bits,
+			verity_io_real_digest(v, io));
+	if (unlikely(r < 0))
+		return r;
+
+	if (memcmp(verity_io_real_digest(v, io), verity_io_want_digest(v, io),
+		   v->digest_size)) {
+		DMERR_LIMIT("%s: FEC %llu: failed to correct (%d erasures)",
+			    v->data_dev->name, (unsigned long long)rsb, neras);
+		return -EILSEQ;
+	}
+
+	return 0;
+}
+
+static int fec_bv_copy(struct dm_verity *v, struct dm_verity_io *io, u8 *data,
+		       size_t len)
+{
+	struct dm_verity_fec_io *fio = fec_io(io);
+
+	memcpy(data, &fio->output[fio->output_pos], len);
+	fio->output_pos += len;
+
+	return 0;
+}
+
+/*
+ * Correct errors in a block. Copies corrected block to dest if non-NULL,
+ * otherwise to a bio_vec starting from iter.
+ */
+int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
+		      enum verity_block_type type, sector_t block, u8 *dest,
+		      struct bvec_iter *iter)
+{
+	int r;
+	struct dm_verity_fec_io *fio = fec_io(io);
+	u64 offset, res, rsb;
+
+	if (!verity_fec_is_enabled(v))
+		return -EOPNOTSUPP;
+
+	if (fio->level >= DM_VERITY_FEC_MAX_RECURSION) {
+		DMWARN_LIMIT("%s: FEC: recursion too deep", v->data_dev->name);
+		return -EIO;
+	}
+
+	fio->level++;
+
+	if (type == DM_VERITY_BLOCK_TYPE_METADATA)
+		block += v->data_blocks;
+
+	/*
+	 * For RS(M, N), the continuous FEC data is divided into blocks of N
+	 * bytes. Since block size may not be divisible by N, the last block
+	 * is zero padded when decoding.
+	 *
+	 * Each byte of the block is covered by a different RS(M, N) code,
+	 * and each code is interleaved over N blocks to make it less likely
+	 * that bursty corruption will leave us in unrecoverable state.
+	 */
+
+	offset = block << v->data_dev_block_bits;
+	res = div64_u64(offset, v->fec->rounds << v->data_dev_block_bits);
+
+	/*
+	 * The base RS block we can feed to the interleaver to find out all
+	 * blocks required for decoding.
+	 */
+	rsb = offset - res * (v->fec->rounds << v->data_dev_block_bits);
+
+	/*
+	 * Locating erasures is slow, so attempt to recover the block without
+	 * them first. Do a second attempt with erasures if the corruption is
+	 * bad enough.
+	 */
+	r = fec_decode_rsb(v, io, fio, rsb, offset, false);
+	if (r < 0) {
+		r = fec_decode_rsb(v, io, fio, rsb, offset, true);
+		if (r < 0)
+			goto done;
+	}
+
+	if (dest)
+		memcpy(dest, fio->output, 1 << v->data_dev_block_bits);
+	else if (iter) {
+		fio->output_pos = 0;
+		r = verity_for_bv_block(v, io, iter, fec_bv_copy);
+	}
+
+done:
+	fio->level--;
+	return r;
+}
+
+/*
+ * Clean up per-bio data.
+ */
+void verity_fec_finish_io(struct dm_verity_io *io)
+{
+	unsigned n;
+	struct dm_verity_fec *f = io->v->fec;
+	struct dm_verity_fec_io *fio = fec_io(io);
+
+	if (!verity_fec_is_enabled(io->v))
+		return;
+
+	mempool_free(fio->rs, f->rs_pool);
+
+	fec_for_each_prealloc_buffer(n)
+		mempool_free(fio->bufs[n], f->prealloc_pool);
+
+	fec_for_each_extra_buffer(fio, n)
+		mempool_free(fio->bufs[n], f->extra_pool);
+
+	mempool_free(fio->output, f->output_pool);
+}
+
+/*
+ * Initialize per-bio data.
+ */
+void verity_fec_init_io(struct dm_verity_io *io)
+{
+	struct dm_verity_fec_io *fio = fec_io(io);
+
+	if (!verity_fec_is_enabled(io->v))
+		return;
+
+	fio->rs = NULL;
+	memset(fio->bufs, 0, sizeof(fio->bufs));
+	fio->nbufs = 0;
+	fio->output = NULL;
+	fio->level = 0;
+}
+
+/*
+ * Append feature arguments and values to the status table.
+ */
+unsigned verity_fec_status_table(struct dm_verity *v, unsigned sz,
+				 char *result, unsigned maxlen)
+{
+	if (!verity_fec_is_enabled(v))
+		return sz;
+
+	DMEMIT(" " DM_VERITY_OPT_FEC_DEV " %s "
+	       DM_VERITY_OPT_FEC_BLOCKS " %llu "
+	       DM_VERITY_OPT_FEC_START " %llu "
+	       DM_VERITY_OPT_FEC_ROOTS " %d",
+	       v->fec->dev->name,
+	       (unsigned long long)v->fec->blocks,
+	       (unsigned long long)v->fec->start,
+	       v->fec->roots);
+
+	return sz;
+}
+
+void verity_fec_dtr(struct dm_verity *v)
+{
+	struct dm_verity_fec *f = v->fec;
+	struct kobject *kobj = &f->kobj_holder.kobj;
+
+	if (!verity_fec_is_enabled(v))
+		goto out;
+
+	mempool_destroy(f->rs_pool);
+	mempool_destroy(f->prealloc_pool);
+	mempool_destroy(f->extra_pool);
+	kmem_cache_destroy(f->cache);
+
+	if (f->data_bufio)
+		dm_bufio_client_destroy(f->data_bufio);
+	if (f->bufio)
+		dm_bufio_client_destroy(f->bufio);
+
+	if (f->dev)
+		dm_put_device(v->ti, f->dev);
+
+	if (kobj->state_initialized) {
+		kobject_put(kobj);
+		wait_for_completion(dm_get_completion_from_kobject(kobj));
+	}
+
+out:
+	kfree(f);
+	v->fec = NULL;
+}
+
+static void *fec_rs_alloc(gfp_t gfp_mask, void *pool_data)
+{
+	struct dm_verity *v = (struct dm_verity *)pool_data;
+
+	return init_rs(8, 0x11d, 0, 1, v->fec->roots);
+}
+
+static void fec_rs_free(void *element, void *pool_data)
+{
+	struct rs_control *rs = (struct rs_control *)element;
+
+	if (rs)
+		free_rs(rs);
+}
+
+bool verity_is_fec_opt_arg(const char *arg_name)
+{
+	return (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_DEV) ||
+		!strcasecmp(arg_name, DM_VERITY_OPT_FEC_BLOCKS) ||
+		!strcasecmp(arg_name, DM_VERITY_OPT_FEC_START) ||
+		!strcasecmp(arg_name, DM_VERITY_OPT_FEC_ROOTS));
+}
+
+int verity_fec_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
+			      unsigned *argc, const char *arg_name)
+{
+	int r;
+	struct dm_target *ti = v->ti;
+	const char *arg_value;
+	unsigned long long num_ll;
+	unsigned char num_c;
+	char dummy;
+
+	if (!*argc) {
+		ti->error = "FEC feature arguments require a value";
+		return -EINVAL;
+	}
+
+	arg_value = dm_shift_arg(as);
+	(*argc)--;
+
+	if (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_DEV)) {
+		r = dm_get_device(ti, arg_value, FMODE_READ, &v->fec->dev);
+		if (r) {
+			ti->error = "FEC device lookup failed";
+			return r;
+		}
+
+	} else if (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_BLOCKS)) {
+		if (sscanf(arg_value, "%llu%c", &num_ll, &dummy) != 1 ||
+		    ((sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT))
+		     >> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll)) {
+			ti->error = "Invalid " DM_VERITY_OPT_FEC_BLOCKS;
+			return -EINVAL;
+		}
+		v->fec->blocks = num_ll;
+
+	} else if (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_START)) {
+		if (sscanf(arg_value, "%llu%c", &num_ll, &dummy) != 1 ||
+		    ((sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT)) >>
+		     (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll)) {
+			ti->error = "Invalid " DM_VERITY_OPT_FEC_START;
+			return -EINVAL;
+		}
+		v->fec->start = num_ll;
+
+	} else if (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_ROOTS)) {
+		if (sscanf(arg_value, "%hhu%c", &num_c, &dummy) != 1 || !num_c ||
+		    num_c < (DM_VERITY_FEC_RSM - DM_VERITY_FEC_MAX_RSN) ||
+		    num_c > (DM_VERITY_FEC_RSM - DM_VERITY_FEC_MIN_RSN)) {
+			ti->error = "Invalid " DM_VERITY_OPT_FEC_ROOTS;
+			return -EINVAL;
+		}
+		v->fec->roots = num_c;
+
+	} else {
+		ti->error = "Unrecognized verity FEC feature request";
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static ssize_t corrected_show(struct kobject *kobj, struct kobj_attribute *attr,
+			      char *buf)
+{
+	struct dm_verity_fec *f = container_of(kobj, struct dm_verity_fec,
+					       kobj_holder.kobj);
+
+	return sprintf(buf, "%d\n", atomic_read(&f->corrected));
+}
+
+static struct kobj_attribute attr_corrected = __ATTR_RO(corrected);
+
+static struct attribute *fec_attrs[] = {
+	&attr_corrected.attr,
+	NULL
+};
+
+static struct kobj_type fec_ktype = {
+	.sysfs_ops = &kobj_sysfs_ops,
+	.default_attrs = fec_attrs,
+	.release = dm_kobject_release
+};
+
+/*
+ * Allocate dm_verity_fec for v->fec. Must be called before verity_fec_ctr.
+ */
+int verity_fec_ctr_alloc(struct dm_verity *v)
+{
+	struct dm_verity_fec *f;
+
+	f = kzalloc(sizeof(struct dm_verity_fec), GFP_KERNEL);
+	if (!f) {
+		v->ti->error = "Cannot allocate FEC structure";
+		return -ENOMEM;
+	}
+	v->fec = f;
+
+	return 0;
+}
+
+/*
+ * Validate arguments and preallocate memory. Must be called after arguments
+ * have been parsed using verity_fec_parse_opt_args.
+ */
+int verity_fec_ctr(struct dm_verity *v)
+{
+	int r;
+	struct dm_verity_fec *f = v->fec;
+	struct dm_target *ti = v->ti;
+	struct mapped_device *md = dm_table_get_md(ti->table);
+	u64 hash_blocks;
+
+	if (!verity_fec_is_enabled(v)) {
+		verity_fec_dtr(v);
+		return 0;
+	}
+
+	/* Create a kobject and sysfs attributes */
+	init_completion(&f->kobj_holder.completion);
+
+	r = kobject_init_and_add(&f->kobj_holder.kobj, &fec_ktype,
+				 &disk_to_dev(dm_disk(md))->kobj, "%s", "fec");
+	if (r) {
+		ti->error = "Cannot create kobject";
+		return r;
+	}
+
+	/*
+	 * FEC is computed over data blocks, possible metadata, and
+	 * hash blocks. In other words, FEC covers total of fec_blocks
+	 * blocks consisting of the following:
+	 *
+	 *  data blocks | hash blocks | metadata (optional)
+	 *
+	 * We allow metadata after hash blocks to support a use case
+	 * where all data is stored on the same device and FEC covers
+	 * the entire area.
+	 *
+	 * If metadata is included, we require it to be available on the
+	 * hash device after the hash blocks.
+	 */
+
+	hash_blocks = v->hash_blocks - v->hash_start;
+
+	/*
+	 * Require matching block sizes for data and hash devices for
+	 * simplicity.
+	 */
+	if (v->data_dev_block_bits != v->hash_dev_block_bits) {
+		ti->error = "Block sizes must match to use FEC";
+		return -EINVAL;
+	}
+
+	if (!f->roots) {
+		ti->error = "Missing " DM_VERITY_OPT_FEC_ROOTS;
+		return -EINVAL;
+	}
+	f->rsn = DM_VERITY_FEC_RSM - f->roots;
+
+	if (!f->blocks) {
+		ti->error = "Missing " DM_VERITY_OPT_FEC_BLOCKS;
+		return -EINVAL;
+	}
+
+	f->rounds = f->blocks;
+	if (sector_div(f->rounds, f->rsn))
+		f->rounds++;
+
+	/*
+	 * Due to optional metadata, f->blocks can be larger than
+	 * data_blocks and hash_blocks combined.
+	 */
+	if (f->blocks < v->data_blocks + hash_blocks || !f->rounds) {
+		ti->error = "Invalid " DM_VERITY_OPT_FEC_BLOCKS;
+		return -EINVAL;
+	}
+
+	/*
+	 * Metadata is accessed through the hash device, so we require
+	 * it to be large enough.
+	 */
+	f->hash_blocks = f->blocks - v->data_blocks;
+	if (dm_bufio_get_device_size(v->bufio) < f->hash_blocks) {
+		ti->error = "Hash device is too small for "
+			DM_VERITY_OPT_FEC_BLOCKS;
+		return -E2BIG;
+	}
+
+	f->bufio = dm_bufio_client_create(f->dev->bdev,
+					  1 << v->data_dev_block_bits,
+					  1, 0, NULL, NULL);
+	if (IS_ERR(f->bufio)) {
+		ti->error = "Cannot initialize FEC bufio client";
+		return PTR_ERR(f->bufio);
+	}
+
+	if (dm_bufio_get_device_size(f->bufio) <
+	    ((f->start + f->rounds * f->roots) >> v->data_dev_block_bits)) {
+		ti->error = "FEC device is too small";
+		return -E2BIG;
+	}
+
+	f->data_bufio = dm_bufio_client_create(v->data_dev->bdev,
+					       1 << v->data_dev_block_bits,
+					       1, 0, NULL, NULL);
+	if (IS_ERR(f->data_bufio)) {
+		ti->error = "Cannot initialize FEC data bufio client";
+		return PTR_ERR(f->data_bufio);
+	}
+
+	if (dm_bufio_get_device_size(f->data_bufio) < v->data_blocks) {
+		ti->error = "Data device is too small";
+		return -E2BIG;
+	}
+
+	/* Preallocate an rs_control structure for each worker thread */
+	f->rs_pool = mempool_create(num_online_cpus(), fec_rs_alloc,
+				    fec_rs_free, (void *) v);
+	if (!f->rs_pool) {
+		ti->error = "Cannot allocate RS pool";
+		return -ENOMEM;
+	}
+
+	f->cache = kmem_cache_create("dm_verity_fec_buffers",
+				     f->rsn << DM_VERITY_FEC_BUF_RS_BITS,
+				     0, 0, NULL);
+	if (!f->cache) {
+		ti->error = "Cannot create FEC buffer cache";
+		return -ENOMEM;
+	}
+
+	/* Preallocate DM_VERITY_FEC_BUF_PREALLOC buffers for each thread */
+	f->prealloc_pool = mempool_create_slab_pool(num_online_cpus() *
+						    DM_VERITY_FEC_BUF_PREALLOC,
+						    f->cache);
+	if (!f->prealloc_pool) {
+		ti->error = "Cannot allocate FEC buffer prealloc pool";
+		return -ENOMEM;
+	}
+
+	f->extra_pool = mempool_create_slab_pool(0, f->cache);
+	if (!f->extra_pool) {
+		ti->error = "Cannot allocate FEC buffer extra pool";
+		return -ENOMEM;
+	}
+
+	/* Preallocate an output buffer for each thread */
+	f->output_pool = mempool_create_kmalloc_pool(num_online_cpus(),
+						     1 << v->data_dev_block_bits);
+	if (!f->output_pool) {
+		ti->error = "Cannot allocate FEC output pool";
+		return -ENOMEM;
+	}
+
+	/* Reserve space for our per-bio data */
+	ti->per_bio_data_size += sizeof(struct dm_verity_fec_io);
+
+	return 0;
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/md/dm-verity-fec.h	2019-01-22 16:16:24.187252601 +0100
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2015 Google, Inc.
+ *
+ * Author: Sami Tolvanen <samitolvanen@google.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#ifndef DM_VERITY_FEC_H
+#define DM_VERITY_FEC_H
+
+#include "dm.h"
+#include "dm-verity.h"
+#include <linux/rslib.h>
+
+/* Reed-Solomon(M, N) parameters */
+#define DM_VERITY_FEC_RSM		255
+#define DM_VERITY_FEC_MAX_RSN		253
+#define DM_VERITY_FEC_MIN_RSN		231	/* ~10% space overhead */
+
+/* buffers for deinterleaving and decoding */
+#define DM_VERITY_FEC_BUF_PREALLOC	1	/* buffers to preallocate */
+#define DM_VERITY_FEC_BUF_RS_BITS	4	/* 1 << RS blocks per buffer */
+/* we need buffers for at most 1 << block size RS blocks */
+#define DM_VERITY_FEC_BUF_MAX \
+	(1 << (PAGE_SHIFT - DM_VERITY_FEC_BUF_RS_BITS))
+
+/* maximum recursion level for verity_fec_decode */
+#define DM_VERITY_FEC_MAX_RECURSION	4
+
+#define DM_VERITY_OPT_FEC_DEV		"use_fec_from_device"
+#define DM_VERITY_OPT_FEC_BLOCKS	"fec_blocks"
+#define DM_VERITY_OPT_FEC_START		"fec_start"
+#define DM_VERITY_OPT_FEC_ROOTS		"fec_roots"
+
+/* configuration */
+struct dm_verity_fec {
+	struct dm_dev *dev;	/* parity data device */
+	struct dm_bufio_client *data_bufio;	/* for data dev access */
+	struct dm_bufio_client *bufio;		/* for parity data access */
+	sector_t start;		/* parity data start in blocks */
+	sector_t blocks;	/* number of blocks covered */
+	sector_t rounds;	/* number of interleaving rounds */
+	sector_t hash_blocks;	/* blocks covered after v->hash_start */
+	unsigned char roots;	/* number of parity bytes, M-N of RS(M, N) */
+	unsigned char rsn;	/* N of RS(M, N) */
+	mempool_t *rs_pool;	/* mempool for fio->rs */
+	mempool_t *prealloc_pool;	/* mempool for preallocated buffers */
+	mempool_t *extra_pool;	/* mempool for extra buffers */
+	mempool_t *output_pool;	/* mempool for output */
+	struct kmem_cache *cache;	/* cache for buffers */
+	atomic_t corrected;		/* corrected errors */
+	struct dm_kobject_holder kobj_holder;	/* for sysfs attributes */
+};
+
+/* per-bio data */
+struct dm_verity_fec_io {
+	struct rs_control *rs;	/* Reed-Solomon state */
+	int erasures[DM_VERITY_FEC_MAX_RSN];	/* erasures for decode_rs8 */
+	u8 *bufs[DM_VERITY_FEC_BUF_MAX];	/* bufs for deinterleaving */
+	unsigned nbufs;		/* number of buffers allocated */
+	u8 *output;		/* buffer for corrected output */
+	size_t output_pos;
+	unsigned level;		/* recursion level */
+};
+
+#ifdef CONFIG_DM_VERITY_FEC
+
+/* each feature parameter requires a value */
+#define DM_VERITY_OPTS_FEC	8
+
+extern bool verity_fec_is_enabled(struct dm_verity *v);
+
+extern int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
+			     enum verity_block_type type, sector_t block,
+			     u8 *dest, struct bvec_iter *iter);
+
+extern unsigned verity_fec_status_table(struct dm_verity *v, unsigned sz,
+					char *result, unsigned maxlen);
+
+extern void verity_fec_finish_io(struct dm_verity_io *io);
+extern void verity_fec_init_io(struct dm_verity_io *io);
+
+extern bool verity_is_fec_opt_arg(const char *arg_name);
+extern int verity_fec_parse_opt_args(struct dm_arg_set *as,
+				     struct dm_verity *v, unsigned *argc,
+				     const char *arg_name);
+
+extern void verity_fec_dtr(struct dm_verity *v);
+
+extern int verity_fec_ctr_alloc(struct dm_verity *v);
+extern int verity_fec_ctr(struct dm_verity *v);
+
+#else /* !CONFIG_DM_VERITY_FEC */
+
+#define DM_VERITY_OPTS_FEC	0
+
+static inline bool verity_fec_is_enabled(struct dm_verity *v)
+{
+	return false;
+}
+
+static inline int verity_fec_decode(struct dm_verity *v,
+				    struct dm_verity_io *io,
+				    enum verity_block_type type,
+				    sector_t block, u8 *dest,
+				    struct bvec_iter *iter)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline unsigned verity_fec_status_table(struct dm_verity *v,
+					       unsigned sz, char *result,
+					       unsigned maxlen)
+{
+	return sz;
+}
+
+static inline void verity_fec_finish_io(struct dm_verity_io *io)
+{
+}
+
+static inline void verity_fec_init_io(struct dm_verity_io *io)
+{
+}
+
+static inline bool verity_is_fec_opt_arg(const char *arg_name)
+{
+	return false;
+}
+
+static inline int verity_fec_parse_opt_args(struct dm_arg_set *as,
+					    struct dm_verity *v,
+					    unsigned *argc,
+					    const char *arg_name)
+{
+	return -EINVAL;
+}
+
+static inline void verity_fec_dtr(struct dm_verity *v)
+{
+}
+
+static inline int verity_fec_ctr_alloc(struct dm_verity *v)
+{
+	return 0;
+}
+
+static inline int verity_fec_ctr(struct dm_verity *v)
+{
+	return 0;
+}
+
+#endif /* CONFIG_DM_VERITY_FEC */
+
+#endif /* DM_VERITY_FEC_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/md/dm-verity.h	2019-10-29 09:26:23.865205351 +0100
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ * Copyright (C) 2015 Google, Inc.
+ *
+ * Author: Mikulas Patocka <mpatocka@redhat.com>
+ *
+ * Based on Chromium dm-verity driver (C) 2011 The Chromium OS Authors
+ *
+ * This file is released under the GPLv2.
+ */
+
+#ifndef DM_VERITY_H
+#define DM_VERITY_H
+
+#include "dm-bufio.h"
+#include <linux/device-mapper.h>
+#include <crypto/hash.h>
+
+#define DM_VERITY_MAX_LEVELS		63
+
+enum verity_mode {
+	DM_VERITY_MODE_EIO,
+	DM_VERITY_MODE_LOGGING,
+	DM_VERITY_MODE_RESTART
+};
+
+enum verity_block_type {
+	DM_VERITY_BLOCK_TYPE_DATA,
+	DM_VERITY_BLOCK_TYPE_METADATA
+};
+
+struct dm_verity_fec;
+
+struct dm_verity {
+	struct dm_dev *data_dev;
+	struct dm_dev *hash_dev;
+	struct dm_target *ti;
+	struct dm_bufio_client *bufio;
+	char *alg_name;
+	struct crypto_shash *tfm;
+	u8 *root_digest;	/* digest of the root block */
+	u8 *salt;		/* salt: its size is salt_size */
+	u8 *zero_digest;	/* digest for a zero block */
+	unsigned salt_size;
+	sector_t data_start;	/* data offset in 512-byte sectors */
+	sector_t hash_start;	/* hash start in blocks */
+	sector_t data_blocks;	/* the number of data blocks */
+	sector_t hash_blocks;	/* the number of hash blocks */
+	unsigned char data_dev_block_bits;	/* log2(data blocksize) */
+	unsigned char hash_dev_block_bits;	/* log2(hash blocksize) */
+	unsigned char hash_per_block_bits;	/* log2(hashes in hash block) */
+	unsigned char levels;	/* the number of tree levels */
+	unsigned char version;
+	unsigned digest_size;	/* digest size for the current hash algorithm */
+	unsigned shash_descsize;/* the size of temporary space for crypto */
+	int hash_failed;	/* set to 1 if hash of any block failed */
+	enum verity_mode mode;	/* mode for handling verification errors */
+	unsigned corrupted_errs;/* Number of errors for corrupted blocks */
+
+	struct workqueue_struct *verify_wq;
+
+	/* starting blocks for each tree level. 0 is the lowest level. */
+	sector_t hash_level_block[DM_VERITY_MAX_LEVELS];
+
+	struct dm_verity_fec *fec;	/* forward error correction */
+};
+
+struct dm_verity_io {
+	struct dm_verity *v;
+
+	/* original value of bio->bi_end_io */
+	bio_end_io_t *orig_bi_end_io;
+
+	sector_t block;
+	unsigned n_blocks;
+
+	struct bvec_iter iter;
+
+	struct work_struct work;
+
+	/*
+	 * Three variably-size fields follow this struct:
+	 *
+	 * u8 hash_desc[v->shash_descsize];
+	 * u8 real_digest[v->digest_size];
+	 * u8 want_digest[v->digest_size];
+	 *
+	 * To access them use: verity_io_hash_desc(), verity_io_real_digest()
+	 * and verity_io_want_digest().
+	 */
+};
+
+static inline struct shash_desc *verity_io_hash_desc(struct dm_verity *v,
+						     struct dm_verity_io *io)
+{
+	return (struct shash_desc *)(io + 1);
+}
+
+static inline u8 *verity_io_real_digest(struct dm_verity *v,
+					struct dm_verity_io *io)
+{
+	return (u8 *)(io + 1) + v->shash_descsize;
+}
+
+static inline u8 *verity_io_want_digest(struct dm_verity *v,
+					struct dm_verity_io *io)
+{
+	return (u8 *)(io + 1) + v->shash_descsize + v->digest_size;
+}
+
+static inline u8 *verity_io_digest_end(struct dm_verity *v,
+				       struct dm_verity_io *io)
+{
+	return verity_io_want_digest(v, io) + v->digest_size;
+}
+
+extern int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
+			       struct bvec_iter *iter,
+			       int (*process)(struct dm_verity *v,
+					      struct dm_verity_io *io,
+					      u8 *data, size_t len));
+
+extern int verity_hash(struct dm_verity *v, struct shash_desc *desc,
+		       const u8 *data, size_t len, u8 *digest);
+
+extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
+				 sector_t block, u8 *digest, bool *is_zero);
+
+extern void verity_status(struct dm_target *ti, status_type_t type,
+			unsigned status_flags, char *result, unsigned maxlen);
+extern int verity_prepare_ioctl(struct dm_target *ti,
+                struct block_device **bdev, fmode_t *mode);
+extern int verity_iterate_devices(struct dm_target *ti,
+				iterate_devices_callout_fn fn, void *data);
+extern void verity_io_hints(struct dm_target *ti, struct queue_limits *limits);
+extern void verity_dtr(struct dm_target *ti);
+extern int verity_ctr(struct dm_target *ti, unsigned argc, char **argv);
+extern int verity_map(struct dm_target *ti, struct bio *bio);
+extern void dm_verity_avb_error_handler(void);
+#endif /* DM_VERITY_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/md/dm-verity-target.c	2019-10-29 09:26:23.865205351 +0100
@@ -0,0 +1,1111 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * Author: Mikulas Patocka <mpatocka@redhat.com>
+ *
+ * Based on Chromium dm-verity driver (C) 2011 The Chromium OS Authors
+ *
+ * This file is released under the GPLv2.
+ *
+ * In the file "/sys/module/dm_verity/parameters/prefetch_cluster" you can set
+ * default prefetch value. Data are read in "prefetch_cluster" chunks from the
+ * hash device. Setting this greatly improves performance when data and hash
+ * are on the same disk on different partitions on devices with poor random
+ * access behavior.
+ */
+
+#include "dm-verity.h"
+#include "dm-verity-fec.h"
+
+#include <linux/module.h>
+#include <linux/reboot.h>
+
+#define DM_MSG_PREFIX			"verity"
+
+#define DM_VERITY_ENV_LENGTH		42
+#define DM_VERITY_ENV_VAR_NAME		"DM_VERITY_ERR_BLOCK_NR"
+
+#define DM_VERITY_DEFAULT_PREFETCH_SIZE	262144
+
+#define DM_VERITY_MAX_CORRUPTED_ERRS	100
+
+#define DM_VERITY_OPT_LOGGING		"ignore_corruption"
+#define DM_VERITY_OPT_RESTART		"restart_on_corruption"
+#define DM_VERITY_OPT_IGN_ZEROES	"ignore_zero_blocks"
+
+#define DM_VERITY_OPTS_MAX		(2 + DM_VERITY_OPTS_FEC)
+
+static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
+
+module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, S_IRUGO | S_IWUSR);
+
+struct dm_verity_prefetch_work {
+	struct work_struct work;
+	struct dm_verity *v;
+	sector_t block;
+	unsigned n_blocks;
+};
+
+/*
+ * Auxiliary structure appended to each dm-bufio buffer. If the value
+ * hash_verified is nonzero, hash of the block has been verified.
+ *
+ * The variable hash_verified is set to 0 when allocating the buffer, then
+ * it can be changed to 1 and it is never reset to 0 again.
+ *
+ * There is no lock around this value, a race condition can at worst cause
+ * that multiple processes verify the hash of the same buffer simultaneously
+ * and write 1 to hash_verified simultaneously.
+ * This condition is harmless, so we don't need locking.
+ */
+struct buffer_aux {
+	int hash_verified;
+};
+
+/*
+ * Initialize struct buffer_aux for a freshly created buffer.
+ */
+static void dm_bufio_alloc_callback(struct dm_buffer *buf)
+{
+	struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
+
+	aux->hash_verified = 0;
+}
+
+/*
+ * Translate input sector number to the sector number on the target device.
+ */
+static sector_t verity_map_sector(struct dm_verity *v, sector_t bi_sector)
+{
+	return v->data_start + dm_target_offset(v->ti, bi_sector);
+}
+
+/*
+ * Return hash position of a specified block at a specified tree level
+ * (0 is the lowest level).
+ * The lowest "hash_per_block_bits"-bits of the result denote hash position
+ * inside a hash block. The remaining bits denote location of the hash block.
+ */
+static sector_t verity_position_at_level(struct dm_verity *v, sector_t block,
+					 int level)
+{
+	return block >> (level * v->hash_per_block_bits);
+}
+
+/*
+ * Wrapper for crypto_shash_init, which handles verity salting.
+ */
+static int verity_hash_init(struct dm_verity *v, struct shash_desc *desc)
+{
+	int r;
+
+	desc->tfm = v->tfm;
+	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	r = crypto_shash_init(desc);
+
+	if (unlikely(r < 0)) {
+		DMERR("crypto_shash_init failed: %d", r);
+		return r;
+	}
+
+	if (likely(v->version >= 1)) {
+		r = crypto_shash_update(desc, v->salt, v->salt_size);
+
+		if (unlikely(r < 0)) {
+			DMERR("crypto_shash_update failed: %d", r);
+			return r;
+		}
+	}
+
+	return 0;
+}
+
+static int verity_hash_update(struct dm_verity *v, struct shash_desc *desc,
+			      const u8 *data, size_t len)
+{
+	int r = crypto_shash_update(desc, data, len);
+
+	if (unlikely(r < 0))
+		DMERR("crypto_shash_update failed: %d", r);
+
+	return r;
+}
+
+static int verity_hash_final(struct dm_verity *v, struct shash_desc *desc,
+			     u8 *digest)
+{
+	int r;
+
+	if (unlikely(!v->version)) {
+		r = crypto_shash_update(desc, v->salt, v->salt_size);
+
+		if (r < 0) {
+			DMERR("crypto_shash_update failed: %d", r);
+			return r;
+		}
+	}
+
+	r = crypto_shash_final(desc, digest);
+
+	if (unlikely(r < 0))
+		DMERR("crypto_shash_final failed: %d", r);
+
+	return r;
+}
+
+int verity_hash(struct dm_verity *v, struct shash_desc *desc,
+		const u8 *data, size_t len, u8 *digest)
+{
+	int r;
+
+	r = verity_hash_init(v, desc);
+	if (unlikely(r < 0))
+		return r;
+
+	r = verity_hash_update(v, desc, data, len);
+	if (unlikely(r < 0))
+		return r;
+
+	return verity_hash_final(v, desc, digest);
+}
+
+static void verity_hash_at_level(struct dm_verity *v, sector_t block, int level,
+				 sector_t *hash_block, unsigned *offset)
+{
+	sector_t position = verity_position_at_level(v, block, level);
+	unsigned idx;
+
+	*hash_block = v->hash_level_block[level] + (position >> v->hash_per_block_bits);
+
+	if (!offset)
+		return;
+
+	idx = position & ((1 << v->hash_per_block_bits) - 1);
+	if (!v->version)
+		*offset = idx * v->digest_size;
+	else
+		*offset = idx << (v->hash_dev_block_bits - v->hash_per_block_bits);
+}
+
+/*
+ * Handle verification errors.
+ */
+static int verity_handle_err(struct dm_verity *v, enum verity_block_type type,
+			     unsigned long long block)
+{
+	char verity_env[DM_VERITY_ENV_LENGTH];
+	char *envp[] = { verity_env, NULL };
+	const char *type_str = "";
+	struct mapped_device *md = dm_table_get_md(v->ti->table);
+
+	/* Corruption should be visible in device status in all modes */
+	v->hash_failed = 1;
+
+	if (v->corrupted_errs >= DM_VERITY_MAX_CORRUPTED_ERRS)
+		goto out;
+
+	v->corrupted_errs++;
+
+	switch (type) {
+	case DM_VERITY_BLOCK_TYPE_DATA:
+		type_str = "data";
+		break;
+	case DM_VERITY_BLOCK_TYPE_METADATA:
+		type_str = "metadata";
+		break;
+	default:
+		BUG();
+	}
+
+	DMERR("%s: %s block %llu is corrupted", v->data_dev->name, type_str,
+		block);
+
+	if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS)
+		DMERR("%s: reached maximum errors", v->data_dev->name);
+
+	snprintf(verity_env, DM_VERITY_ENV_LENGTH, "%s=%d,%llu",
+		DM_VERITY_ENV_VAR_NAME, type, block);
+
+	kobject_uevent_env(&disk_to_dev(dm_disk(md))->kobj, KOBJ_CHANGE, envp);
+
+out:
+	if (v->mode == DM_VERITY_MODE_LOGGING)
+		return 0;
+
+	if (v->mode == DM_VERITY_MODE_RESTART) {
+#ifdef CONFIG_DM_VERITY_AVB
+		dm_verity_avb_error_handler();
+#endif
+		kernel_restart("dm-verity device corrupted");
+	}
+
+	return 1;
+}
+
+/*
+ * Verify hash of a metadata block pertaining to the specified data block
+ * ("block" argument) at a specified level ("level" argument).
+ *
+ * On successful return, verity_io_want_digest(v, io) contains the hash value
+ * for a lower tree level or for the data block (if we're at the lowest level).
+ *
+ * If "skip_unverified" is true, unverified buffer is skipped and 1 is returned.
+ * If "skip_unverified" is false, unverified buffer is hashed and verified
+ * against current value of verity_io_want_digest(v, io).
+ */
+static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
+			       sector_t block, int level, bool skip_unverified,
+			       u8 *want_digest)
+{
+	struct dm_buffer *buf;
+	struct buffer_aux *aux;
+	u8 *data;
+	int r;
+	sector_t hash_block;
+	unsigned offset;
+
+	verity_hash_at_level(v, block, level, &hash_block, &offset);
+
+	data = dm_bufio_read(v->bufio, hash_block, &buf);
+	if (IS_ERR(data))
+		return PTR_ERR(data);
+
+	aux = dm_bufio_get_aux_data(buf);
+
+	if (!aux->hash_verified) {
+		if (skip_unverified) {
+			r = 1;
+			goto release_ret_r;
+		}
+
+		r = verity_hash(v, verity_io_hash_desc(v, io),
+				data, 1 << v->hash_dev_block_bits,
+				verity_io_real_digest(v, io));
+		if (unlikely(r < 0))
+			goto release_ret_r;
+
+		if (likely(memcmp(verity_io_real_digest(v, io), want_digest,
+				  v->digest_size) == 0))
+			aux->hash_verified = 1;
+		else if (verity_fec_decode(v, io,
+					   DM_VERITY_BLOCK_TYPE_METADATA,
+					   hash_block, data, NULL) == 0)
+			aux->hash_verified = 1;
+		else if (verity_handle_err(v,
+					   DM_VERITY_BLOCK_TYPE_METADATA,
+					   hash_block)) {
+			r = -EIO;
+			goto release_ret_r;
+		}
+	}
+
+	data += offset;
+	memcpy(want_digest, data, v->digest_size);
+	r = 0;
+
+release_ret_r:
+	dm_bufio_release(buf);
+	return r;
+}
+
+/*
+ * Find a hash for a given block, write it to digest and verify the integrity
+ * of the hash tree if necessary.
+ */
+int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
+			  sector_t block, u8 *digest, bool *is_zero)
+{
+	int r = 0, i;
+
+	if (likely(v->levels)) {
+		/*
+		 * First, we try to get the requested hash for
+		 * the current block. If the hash block itself is
+		 * verified, zero is returned. If it isn't, this
+		 * function returns 1 and we fall back to whole
+		 * chain verification.
+		 */
+		r = verity_verify_level(v, io, block, 0, true, digest);
+		if (likely(r <= 0))
+			goto out;
+	}
+
+	memcpy(digest, v->root_digest, v->digest_size);
+
+	for (i = v->levels - 1; i >= 0; i--) {
+		r = verity_verify_level(v, io, block, i, false, digest);
+		if (unlikely(r))
+			goto out;
+	}
+out:
+	if (!r && v->zero_digest)
+		*is_zero = !memcmp(v->zero_digest, digest, v->digest_size);
+	else
+		*is_zero = false;
+
+	return r;
+}
+
+/*
+ * Calls function process for 1 << v->data_dev_block_bits bytes in the bio_vec
+ * starting from iter.
+ */
+int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
+			struct bvec_iter *iter,
+			int (*process)(struct dm_verity *v,
+				       struct dm_verity_io *io, u8 *data,
+				       size_t len))
+{
+	unsigned todo = 1 << v->data_dev_block_bits;
+	struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size);
+
+	do {
+		int r;
+		u8 *page;
+		unsigned len;
+		struct bio_vec bv = bio_iter_iovec(bio, *iter);
+
+		page = kmap_atomic(bv.bv_page);
+		len = bv.bv_len;
+
+		if (likely(len >= todo))
+			len = todo;
+
+		r = process(v, io, page + bv.bv_offset, len);
+		kunmap_atomic(page);
+
+		if (r < 0)
+			return r;
+
+		bio_advance_iter(bio, iter, len);
+		todo -= len;
+	} while (todo);
+
+	return 0;
+}
+
+static int verity_bv_hash_update(struct dm_verity *v, struct dm_verity_io *io,
+				 u8 *data, size_t len)
+{
+	return verity_hash_update(v, verity_io_hash_desc(v, io), data, len);
+}
+
+static int verity_bv_zero(struct dm_verity *v, struct dm_verity_io *io,
+			  u8 *data, size_t len)
+{
+	memset(data, 0, len);
+	return 0;
+}
+
+/*
+ * Verify one "dm_verity_io" structure.
+ */
+static int verity_verify_io(struct dm_verity_io *io)
+{
+	bool is_zero;
+	struct dm_verity *v = io->v;
+	struct bvec_iter start;
+	unsigned b;
+
+	for (b = 0; b < io->n_blocks; b++) {
+		int r;
+		struct shash_desc *desc = verity_io_hash_desc(v, io);
+
+		r = verity_hash_for_block(v, io, io->block + b,
+					  verity_io_want_digest(v, io),
+					  &is_zero);
+		if (unlikely(r < 0))
+			return r;
+
+		if (is_zero) {
+			/*
+			 * If we expect a zero block, don't validate, just
+			 * return zeros.
+			 */
+			r = verity_for_bv_block(v, io, &io->iter,
+						verity_bv_zero);
+			if (unlikely(r < 0))
+				return r;
+
+			continue;
+		}
+
+		r = verity_hash_init(v, desc);
+		if (unlikely(r < 0))
+			return r;
+
+		start = io->iter;
+		r = verity_for_bv_block(v, io, &io->iter, verity_bv_hash_update);
+		if (unlikely(r < 0))
+			return r;
+
+		r = verity_hash_final(v, desc, verity_io_real_digest(v, io));
+		if (unlikely(r < 0))
+			return r;
+
+		if (likely(memcmp(verity_io_real_digest(v, io),
+				  verity_io_want_digest(v, io), v->digest_size) == 0))
+			continue;
+		else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
+					   io->block + b, NULL, &start) == 0)
+			continue;
+		else if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
+					   io->block + b))
+			return -EIO;
+	}
+
+	return 0;
+}
+
+/*
+ * End one "io" structure with a given error.
+ */
+static void verity_finish_io(struct dm_verity_io *io, int error)
+{
+	struct dm_verity *v = io->v;
+	struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size);
+
+	bio->bi_end_io = io->orig_bi_end_io;
+	bio->bi_error = error;
+
+	verity_fec_finish_io(io);
+
+	bio_endio(bio);
+}
+
+static void verity_work(struct work_struct *w)
+{
+	struct dm_verity_io *io = container_of(w, struct dm_verity_io, work);
+
+	verity_finish_io(io, verity_verify_io(io));
+}
+
+static void verity_end_io(struct bio *bio)
+{
+	struct dm_verity_io *io = bio->bi_private;
+
+	if (bio->bi_error && !verity_fec_is_enabled(io->v)) {
+		verity_finish_io(io, bio->bi_error);
+		return;
+	}
+
+	INIT_WORK(&io->work, verity_work);
+	queue_work(io->v->verify_wq, &io->work);
+}
+
+/*
+ * Prefetch buffers for the specified io.
+ * The root buffer is not prefetched, it is assumed that it will be cached
+ * all the time.
+ */
+static void verity_prefetch_io(struct work_struct *work)
+{
+	struct dm_verity_prefetch_work *pw =
+		container_of(work, struct dm_verity_prefetch_work, work);
+	struct dm_verity *v = pw->v;
+	int i;
+	sector_t prefetch_size;
+
+	for (i = v->levels - 2; i >= 0; i--) {
+		sector_t hash_block_start;
+		sector_t hash_block_end;
+		verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
+		verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
+		if (!i) {
+			unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster);
+
+			cluster >>= v->data_dev_block_bits;
+			if (unlikely(!cluster))
+				goto no_prefetch_cluster;
+
+			if (unlikely(cluster & (cluster - 1)))
+				cluster = 1 << __fls(cluster);
+
+			hash_block_start &= ~(sector_t)(cluster - 1);
+			hash_block_end |= cluster - 1;
+			if (unlikely(hash_block_end >= v->hash_blocks))
+				hash_block_end = v->hash_blocks - 1;
+		}
+no_prefetch_cluster:
+		// for emmc, it is more efficient to send bigger read
+		prefetch_size = max((sector_t)CONFIG_DM_VERITY_HASH_PREFETCH_MIN_SIZE,
+			hash_block_end - hash_block_start + 1);
+		if ((hash_block_start + prefetch_size) >= (v->hash_start + v->hash_blocks)) {
+			prefetch_size = hash_block_end - hash_block_start + 1;
+		}
+		dm_bufio_prefetch(v->bufio, hash_block_start,
+				  prefetch_size);
+	}
+
+	kfree(pw);
+}
+
+static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io)
+{
+	struct dm_verity_prefetch_work *pw;
+
+	pw = kmalloc(sizeof(struct dm_verity_prefetch_work),
+		GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+
+	if (!pw)
+		return;
+
+	INIT_WORK(&pw->work, verity_prefetch_io);
+	pw->v = v;
+	pw->block = io->block;
+	pw->n_blocks = io->n_blocks;
+	queue_work(v->verify_wq, &pw->work);
+}
+
+/*
+ * Bio map function. It allocates dm_verity_io structure and bio vector and
+ * fills them. Then it issues prefetches and the I/O.
+ */
+int verity_map(struct dm_target *ti, struct bio *bio)
+{
+	struct dm_verity *v = ti->private;
+	struct dm_verity_io *io;
+
+	bio->bi_bdev = v->data_dev->bdev;
+	bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
+
+	if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
+	    ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
+		DMERR_LIMIT("unaligned io");
+		return -EIO;
+	}
+
+	if (bio_end_sector(bio) >>
+	    (v->data_dev_block_bits - SECTOR_SHIFT) > v->data_blocks) {
+		DMERR_LIMIT("io out of range");
+		return -EIO;
+	}
+
+	if (bio_data_dir(bio) == WRITE)
+		return -EIO;
+
+	io = dm_per_bio_data(bio, ti->per_bio_data_size);
+	io->v = v;
+	io->orig_bi_end_io = bio->bi_end_io;
+	io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
+	io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits;
+
+	bio->bi_end_io = verity_end_io;
+	bio->bi_private = io;
+	io->iter = bio->bi_iter;
+
+	verity_fec_init_io(io);
+
+	verity_submit_prefetch(v, io);
+
+	generic_make_request(bio);
+
+	return DM_MAPIO_SUBMITTED;
+}
+EXPORT_SYMBOL_GPL(verity_map);
+
+/*
+ * Status: V (valid) or C (corruption found)
+ */
+void verity_status(struct dm_target *ti, status_type_t type,
+			  unsigned status_flags, char *result, unsigned maxlen)
+{
+	struct dm_verity *v = ti->private;
+	unsigned args = 0;
+	unsigned sz = 0;
+	unsigned x;
+
+	switch (type) {
+	case STATUSTYPE_INFO:
+		DMEMIT("%c", v->hash_failed ? 'C' : 'V');
+		break;
+	case STATUSTYPE_TABLE:
+		DMEMIT("%u %s %s %u %u %llu %llu %s ",
+			v->version,
+			v->data_dev->name,
+			v->hash_dev->name,
+			1 << v->data_dev_block_bits,
+			1 << v->hash_dev_block_bits,
+			(unsigned long long)v->data_blocks,
+			(unsigned long long)v->hash_start,
+			v->alg_name
+			);
+		for (x = 0; x < v->digest_size; x++)
+			DMEMIT("%02x", v->root_digest[x]);
+		DMEMIT(" ");
+		if (!v->salt_size)
+			DMEMIT("-");
+		else
+			for (x = 0; x < v->salt_size; x++)
+				DMEMIT("%02x", v->salt[x]);
+		if (v->mode != DM_VERITY_MODE_EIO)
+			args++;
+		if (verity_fec_is_enabled(v))
+			args += DM_VERITY_OPTS_FEC;
+		if (v->zero_digest)
+			args++;
+		if (!args)
+			return;
+		DMEMIT(" %u", args);
+		if (v->mode != DM_VERITY_MODE_EIO) {
+			DMEMIT(" ");
+			switch (v->mode) {
+			case DM_VERITY_MODE_LOGGING:
+				DMEMIT(DM_VERITY_OPT_LOGGING);
+				break;
+			case DM_VERITY_MODE_RESTART:
+				DMEMIT(DM_VERITY_OPT_RESTART);
+				break;
+			default:
+				BUG();
+			}
+		}
+		if (v->zero_digest)
+			DMEMIT(" " DM_VERITY_OPT_IGN_ZEROES);
+		sz = verity_fec_status_table(v, sz, result, maxlen);
+		break;
+	}
+}
+EXPORT_SYMBOL_GPL(verity_status);
+
+int verity_prepare_ioctl(struct dm_target *ti,
+		struct block_device **bdev, fmode_t *mode)
+{
+	struct dm_verity *v = ti->private;
+
+	*bdev = v->data_dev->bdev;
+
+	if (v->data_start ||
+	    ti->len != i_size_read(v->data_dev->bdev->bd_inode) >> SECTOR_SHIFT)
+		return 1;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(verity_prepare_ioctl);
+
+int verity_iterate_devices(struct dm_target *ti,
+				  iterate_devices_callout_fn fn, void *data)
+{
+	struct dm_verity *v = ti->private;
+
+	return fn(ti, v->data_dev, v->data_start, ti->len, data);
+}
+EXPORT_SYMBOL_GPL(verity_iterate_devices);
+
+void verity_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+	struct dm_verity *v = ti->private;
+
+	if (limits->logical_block_size < 1 << v->data_dev_block_bits)
+		limits->logical_block_size = 1 << v->data_dev_block_bits;
+
+	if (limits->physical_block_size < 1 << v->data_dev_block_bits)
+		limits->physical_block_size = 1 << v->data_dev_block_bits;
+
+	blk_limits_io_min(limits, limits->logical_block_size);
+}
+EXPORT_SYMBOL_GPL(verity_io_hints);
+
+void verity_dtr(struct dm_target *ti)
+{
+	struct dm_verity *v = ti->private;
+
+	if (v->verify_wq)
+		destroy_workqueue(v->verify_wq);
+
+	if (v->bufio)
+		dm_bufio_client_destroy(v->bufio);
+
+	kfree(v->salt);
+	kfree(v->root_digest);
+	kfree(v->zero_digest);
+
+	if (v->tfm)
+		crypto_free_shash(v->tfm);
+
+	kfree(v->alg_name);
+
+	if (v->hash_dev)
+		dm_put_device(ti, v->hash_dev);
+
+	if (v->data_dev)
+		dm_put_device(ti, v->data_dev);
+
+	verity_fec_dtr(v);
+
+	kfree(v);
+}
+EXPORT_SYMBOL_GPL(verity_dtr);
+
+static int verity_alloc_zero_digest(struct dm_verity *v)
+{
+	int r = -ENOMEM;
+	struct shash_desc *desc;
+	u8 *zero_data;
+
+	v->zero_digest = kmalloc(v->digest_size, GFP_KERNEL);
+
+	if (!v->zero_digest)
+		return r;
+
+	desc = kmalloc(v->shash_descsize, GFP_KERNEL);
+
+	if (!desc)
+		return r; /* verity_dtr will free zero_digest */
+
+	zero_data = kzalloc(1 << v->data_dev_block_bits, GFP_KERNEL);
+
+	if (!zero_data)
+		goto out;
+
+	r = verity_hash(v, desc, zero_data, 1 << v->data_dev_block_bits,
+			v->zero_digest);
+
+out:
+	kfree(desc);
+	kfree(zero_data);
+
+	return r;
+}
+
+static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v)
+{
+	int r;
+	unsigned argc;
+	struct dm_target *ti = v->ti;
+	const char *arg_name;
+
+	static struct dm_arg _args[] = {
+		{0, DM_VERITY_OPTS_MAX, "Invalid number of feature args"},
+	};
+
+	r = dm_read_arg_group(_args, as, &argc, &ti->error);
+	if (r)
+		return -EINVAL;
+
+	if (!argc)
+		return 0;
+
+	do {
+		arg_name = dm_shift_arg(as);
+		argc--;
+
+		if (!strcasecmp(arg_name, DM_VERITY_OPT_LOGGING)) {
+			v->mode = DM_VERITY_MODE_LOGGING;
+			continue;
+
+		} else if (!strcasecmp(arg_name, DM_VERITY_OPT_RESTART)) {
+			v->mode = DM_VERITY_MODE_RESTART;
+			continue;
+
+		} else if (!strcasecmp(arg_name, DM_VERITY_OPT_IGN_ZEROES)) {
+			r = verity_alloc_zero_digest(v);
+			if (r) {
+				ti->error = "Cannot allocate zero digest";
+				return r;
+			}
+			continue;
+
+		} else if (verity_is_fec_opt_arg(arg_name)) {
+			r = verity_fec_parse_opt_args(as, v, &argc, arg_name);
+			if (r)
+				return r;
+			continue;
+		}
+
+		ti->error = "Unrecognized verity feature request";
+		return -EINVAL;
+	} while (argc && !r);
+
+	return r;
+}
+
+/*
+ * Target parameters:
+ *	<version>	The current format is version 1.
+ *			Vsn 0 is compatible with original Chromium OS releases.
+ *	<data device>
+ *	<hash device>
+ *	<data block size>
+ *	<hash block size>
+ *	<the number of data blocks>
+ *	<hash start block>
+ *	<algorithm>
+ *	<digest>
+ *	<salt>		Hex string or "-" if no salt.
+ */
+int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
+{
+	struct dm_verity *v;
+	struct dm_arg_set as;
+	unsigned int num;
+	unsigned long long num_ll;
+	int r;
+	int i;
+	sector_t hash_position;
+	char dummy;
+
+	v = kzalloc(sizeof(struct dm_verity), GFP_KERNEL);
+	if (!v) {
+		ti->error = "Cannot allocate verity structure";
+		return -ENOMEM;
+	}
+	ti->private = v;
+	v->ti = ti;
+
+	r = verity_fec_ctr_alloc(v);
+	if (r)
+		goto bad;
+
+	if ((dm_table_get_mode(ti->table) & ~FMODE_READ)) {
+		ti->error = "Device must be readonly";
+		r = -EINVAL;
+		goto bad;
+	}
+
+	if (argc < 10) {
+		ti->error = "Not enough arguments";
+		r = -EINVAL;
+		goto bad;
+	}
+
+	if (sscanf(argv[0], "%u%c", &num, &dummy) != 1 ||
+	    num > 1) {
+		ti->error = "Invalid version";
+		r = -EINVAL;
+		goto bad;
+	}
+	v->version = num;
+
+	r = dm_get_device(ti, argv[1], FMODE_READ, &v->data_dev);
+	if (r) {
+		ti->error = "Data device lookup failed";
+		goto bad;
+	}
+
+	r = dm_get_device(ti, argv[2], FMODE_READ, &v->hash_dev);
+	if (r) {
+		ti->error = "Data device lookup failed";
+		goto bad;
+	}
+
+	if (sscanf(argv[3], "%u%c", &num, &dummy) != 1 ||
+	    !num || (num & (num - 1)) ||
+	    num < bdev_logical_block_size(v->data_dev->bdev) ||
+	    num > PAGE_SIZE) {
+		ti->error = "Invalid data device block size";
+		r = -EINVAL;
+		goto bad;
+	}
+	v->data_dev_block_bits = __ffs(num);
+
+	if (sscanf(argv[4], "%u%c", &num, &dummy) != 1 ||
+	    !num || (num & (num - 1)) ||
+	    num < bdev_logical_block_size(v->hash_dev->bdev) ||
+	    num > INT_MAX) {
+		ti->error = "Invalid hash device block size";
+		r = -EINVAL;
+		goto bad;
+	}
+	v->hash_dev_block_bits = __ffs(num);
+
+	if (sscanf(argv[5], "%llu%c", &num_ll, &dummy) != 1 ||
+	    (sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT))
+	    >> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll) {
+		ti->error = "Invalid data blocks";
+		r = -EINVAL;
+		goto bad;
+	}
+	v->data_blocks = num_ll;
+
+	if (ti->len > (v->data_blocks << (v->data_dev_block_bits - SECTOR_SHIFT))) {
+		ti->error = "Data device is too small";
+		r = -EINVAL;
+		goto bad;
+	}
+
+	if (sscanf(argv[6], "%llu%c", &num_ll, &dummy) != 1 ||
+	    (sector_t)(num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT))
+	    >> (v->hash_dev_block_bits - SECTOR_SHIFT) != num_ll) {
+		ti->error = "Invalid hash start";
+		r = -EINVAL;
+		goto bad;
+	}
+	v->hash_start = num_ll;
+
+	v->alg_name = kstrdup(argv[7], GFP_KERNEL);
+	if (!v->alg_name) {
+		ti->error = "Cannot allocate algorithm name";
+		r = -ENOMEM;
+		goto bad;
+	}
+
+	v->tfm = crypto_alloc_shash(v->alg_name, 0, 0);
+	if (IS_ERR(v->tfm)) {
+		ti->error = "Cannot initialize hash function";
+		r = PTR_ERR(v->tfm);
+		v->tfm = NULL;
+		goto bad;
+	}
+	v->digest_size = crypto_shash_digestsize(v->tfm);
+	if ((1 << v->hash_dev_block_bits) < v->digest_size * 2) {
+		ti->error = "Digest size too big";
+		r = -EINVAL;
+		goto bad;
+	}
+	v->shash_descsize =
+		sizeof(struct shash_desc) + crypto_shash_descsize(v->tfm);
+
+	v->root_digest = kmalloc(v->digest_size, GFP_KERNEL);
+	if (!v->root_digest) {
+		ti->error = "Cannot allocate root digest";
+		r = -ENOMEM;
+		goto bad;
+	}
+	if (strlen(argv[8]) != v->digest_size * 2 ||
+	    hex2bin(v->root_digest, argv[8], v->digest_size)) {
+		ti->error = "Invalid root digest";
+		r = -EINVAL;
+		goto bad;
+	}
+
+	if (strcmp(argv[9], "-")) {
+		v->salt_size = strlen(argv[9]) / 2;
+		v->salt = kmalloc(v->salt_size, GFP_KERNEL);
+		if (!v->salt) {
+			ti->error = "Cannot allocate salt";
+			r = -ENOMEM;
+			goto bad;
+		}
+		if (strlen(argv[9]) != v->salt_size * 2 ||
+		    hex2bin(v->salt, argv[9], v->salt_size)) {
+			ti->error = "Invalid salt";
+			r = -EINVAL;
+			goto bad;
+		}
+	}
+
+	argv += 10;
+	argc -= 10;
+
+	/* Optional parameters */
+	if (argc) {
+		as.argc = argc;
+		as.argv = argv;
+
+		r = verity_parse_opt_args(&as, v);
+		if (r < 0)
+			goto bad;
+	}
+
+	v->hash_per_block_bits =
+		__fls((1 << v->hash_dev_block_bits) / v->digest_size);
+
+	v->levels = 0;
+	if (v->data_blocks)
+		while (v->hash_per_block_bits * v->levels < 64 &&
+		       (unsigned long long)(v->data_blocks - 1) >>
+		       (v->hash_per_block_bits * v->levels))
+			v->levels++;
+
+	if (v->levels > DM_VERITY_MAX_LEVELS) {
+		ti->error = "Too many tree levels";
+		r = -E2BIG;
+		goto bad;
+	}
+
+	hash_position = v->hash_start;
+	for (i = v->levels - 1; i >= 0; i--) {
+		sector_t s;
+		v->hash_level_block[i] = hash_position;
+		s = (v->data_blocks + ((sector_t)1 << ((i + 1) * v->hash_per_block_bits)) - 1)
+					>> ((i + 1) * v->hash_per_block_bits);
+		if (hash_position + s < hash_position) {
+			ti->error = "Hash device offset overflow";
+			r = -E2BIG;
+			goto bad;
+		}
+		hash_position += s;
+	}
+	v->hash_blocks = hash_position;
+
+	v->bufio = dm_bufio_client_create(v->hash_dev->bdev,
+		1 << v->hash_dev_block_bits, 1, sizeof(struct buffer_aux),
+		dm_bufio_alloc_callback, NULL);
+	if (IS_ERR(v->bufio)) {
+		ti->error = "Cannot initialize dm-bufio";
+		r = PTR_ERR(v->bufio);
+		v->bufio = NULL;
+		goto bad;
+	}
+
+	if (dm_bufio_get_device_size(v->bufio) < v->hash_blocks) {
+		ti->error = "Hash device is too small";
+		r = -E2BIG;
+		goto bad;
+	}
+
+	/* WQ_UNBOUND greatly improves performance when running on ramdisk */
+	v->verify_wq = alloc_workqueue("kverityd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus());
+	if (!v->verify_wq) {
+		ti->error = "Cannot allocate workqueue";
+		r = -ENOMEM;
+		goto bad;
+	}
+
+	ti->per_bio_data_size = sizeof(struct dm_verity_io) +
+				v->shash_descsize + v->digest_size * 2;
+
+	r = verity_fec_ctr(v);
+	if (r)
+		goto bad;
+
+	ti->per_bio_data_size = roundup(ti->per_bio_data_size,
+					__alignof__(struct dm_verity_io));
+
+	return 0;
+
+bad:
+	verity_dtr(ti);
+
+	return r;
+}
+EXPORT_SYMBOL_GPL(verity_ctr);
+
+static struct target_type verity_target = {
+	.name		= "verity",
+	.version	= {1, 3, 0},
+	.module		= THIS_MODULE,
+	.ctr		= verity_ctr,
+	.dtr		= verity_dtr,
+	.map		= verity_map,
+	.status		= verity_status,
+	.prepare_ioctl	= verity_prepare_ioctl,
+	.iterate_devices = verity_iterate_devices,
+	.io_hints	= verity_io_hints,
+};
+
+static int __init dm_verity_init(void)
+{
+	int r;
+
+	r = dm_register_target(&verity_target);
+	if (r < 0)
+		DMERR("register failed %d", r);
+
+	return r;
+}
+
+static void __exit dm_verity_exit(void)
+{
+	dm_unregister_target(&verity_target);
+}
+
+module_init(dm_verity_init);
+module_exit(dm_verity_exit);
+
+MODULE_AUTHOR("Mikulas Patocka <mpatocka@redhat.com>");
+MODULE_AUTHOR("Mandeep Baines <msb@chromium.org>");
+MODULE_AUTHOR("Will Drewry <wad@chromium.org>");
+MODULE_DESCRIPTION(DM_NAME " target for transparent disk integrity checking");
+MODULE_LICENSE("GPL");
diff -Nruw linux-4.4.115-fbx/drivers/media/cec./Kconfig linux-4.4.115-fbx/drivers/media/cec/Kconfig
--- linux-4.4.115-fbx/drivers/media/cec./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/cec/Kconfig	2019-10-29 09:26:23.885205546 +0100
@@ -0,0 +1,6 @@
+config MEDIA_CEC_RC
+	bool "HDMI CEC RC integration"
+	depends on CEC_CORE && RC_CORE
+	depends on CEC_CORE=m || RC_CORE=y
+	---help---
+	  Pass on CEC remote control messages to the RC framework.
diff -Nruw linux-4.4.115-fbx/drivers/media/cec./Makefile linux-4.4.115-fbx/drivers/media/cec/Makefile
--- linux-4.4.115-fbx/drivers/media/cec./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/cec/Makefile	2019-10-29 09:26:23.885205546 +0100
@@ -0,0 +1,7 @@
+cec-objs := cec-core.o cec-adap.o cec-api.o cec-edid.o
+
+ifeq ($(CONFIG_CEC_NOTIFIER),y)
+  cec-objs += cec-notifier.o
+endif
+
+obj-$(CONFIG_CEC_CORE) += cec.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./ais/camera/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/ais/camera/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./ais/camera/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/ais/camera/Makefile	2019-01-22 16:16:24.403254557 +0100
@@ -0,0 +1,3 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/msm_vb2
+obj-$(CONFIG_MSM_AIS) += camera.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./ais/common/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/ais/common/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./ais/common/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/ais/common/Makefile	2019-10-29 09:26:23.929205977 +0100
@@ -0,0 +1,2 @@
+ccflags-y += -Idrivers/media/platform/msm/ais/
+obj-$(CONFIG_MSM_AIS) += msm_camera_io_util.o cam_smmu_api.o cam_hw_ops.o cam_soc_api.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./ais/fd/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/ais/fd/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./ais/fd/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/ais/fd/Makefile	2019-01-22 16:16:24.407254593 +0100
@@ -0,0 +1,8 @@
+GCC_VERSION      := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CROSS_COMPILE)gcc)
+ccflags-y += -Idrivers/media/video/msm
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/pproc/cpp
+ccflags-y += -Idrivers/media/platform/msm/ais/msm_buf_mgr/
+
+obj-$(CONFIG_MSM_AIS_FD) += msm_fd_dev.o msm_fd_hw.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./ais/isp/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/ais/isp/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./ais/isp/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/ais/isp/Makefile	2019-01-22 16:16:24.407254593 +0100
@@ -0,0 +1,5 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/ais/common/
+obj-$(CONFIG_MSM_AIS) += msm_buf_mgr.o msm_isp_util.o msm_isp_axi_util.o msm_isp_stats_util.o
+obj-$(CONFIG_MSM_AIS) += msm_isp47.o msm_isp.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./ais/ispif/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/ais/ispif/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./ais/ispif/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/ais/ispif/Makefile	2019-01-22 16:16:24.415254665 +0100
@@ -0,0 +1,4 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+obj-$(CONFIG_MSM_AIS) += msm_ispif.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./ais/jpeg_10/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/ais/jpeg_10/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./ais/jpeg_10/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/ais/jpeg_10/Makefile	2019-01-22 16:16:24.415254665 +0100
@@ -0,0 +1,7 @@
+GCC_VERSION      := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CROSS_COMPILE)gcc)
+
+ccflags-y += -Idrivers/media/platform/msm/ais/jpeg_10
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+
+obj-$(CONFIG_MSM_AIS_JPEG) += msm_jpeg_dev.o msm_jpeg_sync.o msm_jpeg_core.o msm_jpeg_hw.o msm_jpeg_platform.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./ais/jpeg_dma/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/ais/jpeg_dma/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./ais/jpeg_dma/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/ais/jpeg_dma/Makefile	2019-01-22 16:16:24.419254701 +0100
@@ -0,0 +1,4 @@
+GCC_VERSION      := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CROSS_COMPILE)gcc)
+ccflags-y += -Idrivers/media/video/msm
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+obj-$(CONFIG_MSM_AIS_JPEGDMA) += msm_jpeg_dma_dev.o msm_jpeg_dma_hw.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./ais/Kconfig linux-4.4.115-fbx/drivers/media/platform/msm/ais/Kconfig
--- linux-4.4.115-fbx/drivers/media/platform/msm./ais/Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/ais/Kconfig	2019-01-22 16:16:24.403254557 +0100
@@ -0,0 +1,85 @@
+menuconfig MSM_AIS
+	bool "QTI MSM Automotive Imaging Subsystem"
+	  depends on ARCH_QCOM && VIDEO_V4L2 && I2C
+	---help---
+	  Say Y here to enable msm AIS
+
+config MSM_AIS_DEBUG
+	bool "QTI MSM AIS debugging with printk"
+	  depends on MSM_AIS
+	  default n
+	---help---
+	  Enable printk() debug for msm AIS.
+	  Enabling ais debug will affect performance.
+	  This feature is only applicable to
+	  Automotive platforms.
+
+config MSM_AIS_CAMERA_SENSOR
+	bool "QTI MSM camera sensor support"
+	  depends on MSM_AIS
+	  select NEW_LEDS
+	  select LEDS_CLASS
+	---help---
+	  This flag enables support for Camera Sensor.
+	  The sensor driver is capable of providing real time
+	  data for camera support. The driver support V4L2
+	  subdev APIs.
+
+config MSM_AIS_CPP
+	bool "QTI MSM Camera Post Processing Engine support"
+	  depends on MSM_AIS
+	---help---
+	  Enable support for Camera Post-processing Engine
+	  The Post processing engine is capable of scaling
+	  and cropping image. The driver support V4L2 subdev
+	  APIs.
+
+config MSM_AIS_EEPROM
+	bool "QTI MSM Camera ROM Interface for Calibration support"
+	  depends on MSM_AIS
+	---help---
+	  Enable support for ROM Interface for Calibration
+	  Provides interface for reading the Calibration data
+	  and also provides support for writing data in case of FLASH ROM.
+	  Currently supports I2C, CCI and SPI protocol
+
+config MSM_AIS_JPEG
+	bool "QTI MSM Jpeg Encoder Engine support"
+	  depends on MSM_AIS
+	---help---
+	  Enable support for Jpeg Encoder/Decoder
+	  Engine for 8974.
+	  This module serves as the common driver
+	  for the JPEG 1.0 encoder and decoder.
+
+config MSM_AIS_FD
+	bool "QTI MSM FD face detection engine support"
+	  depends on MSM_AIS
+	---help---
+	  Enables support for the MSM FD face detection engine.
+	  MSM Face Detection library
+	  enables the Face detection
+	  hardware block.
+
+config MSM_AIS_JPEGDMA
+	bool "QTI MSM Jpeg dma"
+	  depends on MSM_AIS
+	  select V4L2_MEM2MEM_DEV
+	---help---
+	  Enable support for Jpeg dma engine.
+	  The jpeg DMA engine is a hardware enabled
+	  jpeg decode.
+	  This feature is currently not supported on
+	  Automotive platforms.
+
+config MSM_AIS_SEC_CCI_TA_NAME
+	string "Name of TA to handle Secure CCI transactions"
+	  depends on MSM_AIS_CCI
+	default "seccamdemo64"
+
+config MSM_AIS_SEC_CCI_DEBUG
+	bool "QTI MSM Secure CCI Relay Debug"
+	  depends on MSM_AIS_CCI
+	---help---
+	  Enables simulation of secure camera for Secure CCI Realy
+	  debugging.
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./ais/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/ais/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./ais/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/ais/Makefile	2019-10-29 09:26:23.925205938 +0100
@@ -0,0 +1,25 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor
+ccflags-y += -Idrivers/media/platform/msm/ais/codecs
+ccflags-y += -Idrivers/media/platform/msm/ais/isps
+ccflags-y += -Idrivers/media/platform/msm/ais/pproc
+ccflags-y += -Idrivers/media/platform/msm/ais/msm_vb2
+ccflags-y += -Idrivers/media/platform/msm/ais/camera
+ccflags-y += -Idrivers/media/platform/msm/ais/jpeg_10
+ccflags-y += -Idrivers/media/platform/msm/ais/jpeg_dma
+ccflags-y += -Idrivers/media/platform/msm/ais/fd
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+
+obj-$(CONFIG_MSM_AIS) += common/
+obj-$(CONFIG_MSM_AIS) += msm.o
+obj-$(CONFIG_MSM_AIS) += camera/
+obj-$(CONFIG_MSM_AIS) += msm_vb2/
+obj-$(CONFIG_MSM_AIS) += sensor/
+obj-$(CONFIG_MSM_AIS) += pproc/
+obj-$(CONFIG_MSM_AIS) += isp/
+obj-$(CONFIG_MSM_AIS) += ispif/
+obj-$(CONFIG_MSM_AIS_JPEG) += jpeg_10/
+obj-$(CONFIG_MSM_AIS_JPEGDMA) += jpeg_dma/
+obj-$(CONFIG_MSM_AIS) += msm_buf_mgr/
+obj-$(CONFIG_MSM_AIS) += msm_ais_mgr/
+obj-$(CONFIG_MSM_AIS_FD) += fd/
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./ais/msm_ais_mgr/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/ais/msm_ais_mgr/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./ais/msm_ais_mgr/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/ais/msm_ais_mgr/Makefile	2019-10-29 09:26:23.937206055 +0100
@@ -0,0 +1,5 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/cci
+obj-$(CONFIG_MSM_AIS) += msm_ais_mgr.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./ais/msm_buf_mgr/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/ais/msm_buf_mgr/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./ais/msm_buf_mgr/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/ais/msm_buf_mgr/Makefile	2019-01-22 16:16:24.423254738 +0100
@@ -0,0 +1,2 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+obj-$(CONFIG_MSM_AIS) += msm_generic_buf_mgr.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./ais/msm_vb2/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/ais/msm_vb2/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./ais/msm_vb2/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/ais/msm_vb2/Makefile	2019-01-22 16:16:24.423254738 +0100
@@ -0,0 +1,3 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/msm_vb2
+obj-$(CONFIG_MSM_AIS) += msm_vb2.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./ais/pproc/cpp/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/ais/pproc/cpp/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./ais/pproc/cpp/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/ais/pproc/cpp/Makefile	2019-01-22 16:16:24.423254738 +0100
@@ -0,0 +1,6 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/isp/
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais/msm_buf_mgr/
+obj-$(CONFIG_MSM_AIS_CPP) += msm_cpp_soc.o msm_cpp.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./ais/pproc/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/ais/pproc/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./ais/pproc/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/ais/pproc/Makefile	2019-01-22 16:16:24.423254738 +0100
@@ -0,0 +1 @@
+obj-$(CONFIG_MSM_AIS_CPP) += cpp/
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./ais/sensor/actuator/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/ais/sensor/actuator/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./ais/sensor/actuator/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/ais/sensor/actuator/Makefile	2019-01-22 16:16:24.423254738 +0100
@@ -0,0 +1,5 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/cci
+obj-$(CONFIG_MSM_AIS) += msm_actuator.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./ais/sensor/cci/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/ais/sensor/cci/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./ais/sensor/cci/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/ais/sensor/cci/Makefile	2019-01-22 16:16:24.423254738 +0100
@@ -0,0 +1,6 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/cci
+obj-$(CONFIG_MSM_AIS) += msm_cci.o
+obj-$(CONFIG_MSM_AIS) += msm_early_cam.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./ais/sensor/csid/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/ais/sensor/csid/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./ais/sensor/csid/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/ais/sensor/csid/Makefile	2019-01-22 16:16:24.427254774 +0100
@@ -0,0 +1,4 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+obj-$(CONFIG_MSM_AIS) += msm_csid.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./ais/sensor/csiphy/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/ais/sensor/csiphy/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./ais/sensor/csiphy/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/ais/sensor/csiphy/Makefile	2019-01-22 16:16:24.427254774 +0100
@@ -0,0 +1,4 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+obj-$(CONFIG_MSM_AIS) += msm_csiphy.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./ais/sensor/eeprom/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/ais/sensor/eeprom/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./ais/sensor/eeprom/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/ais/sensor/eeprom/Makefile	2019-01-22 16:16:24.427254774 +0100
@@ -0,0 +1,5 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/cci
+obj-$(CONFIG_MSM_AIS_EEPROM) += msm_eeprom.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./ais/sensor/flash/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/ais/sensor/flash/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./ais/sensor/flash/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/ais/sensor/flash/Makefile	2019-01-22 16:16:24.427254774 +0100
@@ -0,0 +1,5 @@
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/cci
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+obj-$(CONFIG_MSM_AIS) += msm_flash.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./ais/sensor/io/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/ais/sensor/io/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./ais/sensor/io/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/ais/sensor/io/Makefile	2019-01-22 16:16:24.427254774 +0100
@@ -0,0 +1,6 @@
+ccflags-y += -Idrivers/media/platform/msm/ais/
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/cci
+ccflags-y += -Idrivers/misc/
+obj-$(CONFIG_MSM_AIS)   += msm_camera_cci_i2c.o msm_camera_qup_i2c.o msm_camera_spi.o msm_camera_dt_util.o msm_camera_tz_i2c.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./ais/sensor/ir_cut/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/ais/sensor/ir_cut/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./ais/sensor/ir_cut/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/ais/sensor/ir_cut/Makefile	2019-01-22 16:16:24.431254810 +0100
@@ -0,0 +1,4 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+obj-$(CONFIG_MSM_AIS) += msm_ir_cut.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./ais/sensor/ir_led/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/ais/sensor/ir_led/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./ais/sensor/ir_led/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/ais/sensor/ir_led/Makefile	2019-01-22 16:16:24.431254810 +0100
@@ -0,0 +1,4 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+obj-$(CONFIG_MSM_AIS) += msm_ir_led.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./ais/sensor/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/ais/sensor/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./ais/sensor/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/ais/sensor/Makefile	2019-01-22 16:16:24.423254738 +0100
@@ -0,0 +1,8 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais/msm_vb2
+ccflags-y += -Idrivers/media/platform/msm/ais/camera
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/cci
+obj-$(CONFIG_MSM_AIS) += cci/ io/ csiphy/ csid/ actuator/ eeprom/ ois/ flash/ ir_led/ ir_cut/
+obj-$(CONFIG_MSM_AIS_CAMERA_SENSOR) += msm_sensor_init.o msm_sensor_driver.o msm_sensor.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./ais/sensor/ois/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/ais/sensor/ois/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./ais/sensor/ois/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/ais/sensor/ois/Makefile	2019-01-22 16:16:24.431254810 +0100
@@ -0,0 +1,5 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/cci
+obj-$(CONFIG_MSM_AIS) += msm_ois.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/camera/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/camera/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/camera/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/camera/Makefile	2019-01-22 16:16:24.431254810 +0100
@@ -0,0 +1,3 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/msm_vb2
+obj-$(CONFIG_MSMB_CAMERA) += camera.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/common/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/common/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/common/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/common/Makefile	2019-10-29 09:26:23.945206134 +0100
@@ -0,0 +1,3 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/
+ccflags-y += -Idrivers/misc/
+obj-$(CONFIG_MSMB_CAMERA) += msm_camera_io_util.o cam_smmu_api.o cam_hw_ops.o cam_soc_api.o msm_camera_tz_util.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/fd/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/fd/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/fd/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/fd/Makefile	2019-01-22 16:16:24.435254846 +0100
@@ -0,0 +1,8 @@
+GCC_VERSION      := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CROSS_COMPILE)gcc)
+ccflags-y += -Idrivers/media/video/msm
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/pproc/cpp
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/msm_buf_mgr/
+
+obj-$(CONFIG_MSM_FD) += msm_fd_dev.o msm_fd_hw.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/isp/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/isp/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/isp/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/isp/Makefile	2019-01-22 16:16:24.435254846 +0100
@@ -0,0 +1,7 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common/
+obj-$(CONFIG_MSMB_CAMERA) += msm_buf_mgr.o msm_isp_util.o msm_isp_axi_util.o msm_isp_stats_util.o
+obj-$(CONFIG_MSMB_CAMERA) += msm_isp48.o msm_isp47.o msm_isp46.o msm_isp44.o msm_isp40.o msm_isp.o
+
+CFLAGS_msm_isp_util.o = -I$(src)
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/ispif/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/ispif/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/ispif/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/ispif/Makefile	2019-01-22 16:16:24.443254919 +0100
@@ -0,0 +1,4 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+obj-$(CONFIG_MSM_CSID) += msm_ispif.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/jpeg_10/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/jpeg_10/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/jpeg_10/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/jpeg_10/Makefile	2019-01-22 16:16:24.443254919 +0100
@@ -0,0 +1,7 @@
+GCC_VERSION      := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CROSS_COMPILE)gcc)
+
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/jpeg_10
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+
+obj-$(CONFIG_MSMB_JPEG) += msm_jpeg_dev.o msm_jpeg_sync.o msm_jpeg_core.o msm_jpeg_hw.o msm_jpeg_platform.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/jpeg_dma/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/jpeg_dma/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/jpeg_dma/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/jpeg_dma/Makefile	2019-01-22 16:16:24.443254919 +0100
@@ -0,0 +1,4 @@
+GCC_VERSION      := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CROSS_COMPILE)gcc)
+ccflags-y += -Idrivers/media/video/msm
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+obj-$(CONFIG_MSM_JPEGDMA) += msm_jpeg_dma_dev.o msm_jpeg_dma_hw.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/Kconfig linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/Kconfig
--- linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/Kconfig	2019-01-22 16:16:24.431254810 +0100
@@ -0,0 +1,255 @@
+config MSM_CAMERA_SENSOR
+	    bool "QTI MSM camera sensor support"
+	    depends on MSMB_CAMERA
+	    select NEW_LEDS
+	    select LEDS_CLASS
+        ---help---
+          This flag enables support for Camera Sensor.
+          The sensor driver is capable of providing real time
+          data for camera support. The driver support V4L2
+          subdev APIs.
+
+config MSM_CPP
+        bool "QTI MSM Camera Post Processing Engine support"
+        depends on MSMB_CAMERA
+        ---help---
+          Enable support for Camera Post-processing Engine
+          The Post processing engine is capable of scaling
+          and cropping image. The driver support V4L2 subdev
+          APIs.
+
+config MSM_CCI
+        bool "QTI MSM Camera Control Interface support"
+        depends on MSMB_CAMERA
+        ---help---
+          Enable support for Camera Control Interface driver only
+          for those platforms that have hardware support. This driver
+          is responsible for handling I2C read and write on the I2C
+          bus. It is also responsible for synchronization with
+          GPIO and data frames.
+
+config MSM_CSI20_HEADER
+        bool "QTI MSM CSI 2.0 Header"
+        depends on MSMB_CAMERA
+        ---help---
+          Enable support for CSI drivers to include 2.0
+          header. This header has register macros and its
+          values and bit mask for register configuration bits
+          This config macro is required targets based on 8960,
+          8930 and 8064 platforms.
+
+config MSM_CSI22_HEADER
+        bool "QTI MSM CSI 2.2 Header"
+        depends on MSMB_CAMERA
+        ---help---
+          Enable support for CSI drivers to include 2.2
+          header. This header has register macros and its
+          values and bit mask for register configuration bits
+          This config macro is required targets based on 8610
+          platform.
+
+config MSM_CSI30_HEADER
+        bool "QTI MSM CSI 3.0 Header"
+        depends on MSMB_CAMERA
+        ---help---
+          Enable support for CSI drivers to include 3.0
+          header. This header has register macros and its
+          values and bit mask for register configuration bits
+          This config macro is required for targets based on
+          8064 platforms.
+
+config MSM_CSI31_HEADER
+        bool "QTI MSM CSI 3.1 Header"
+        depends on MSMB_CAMERA
+        ---help---
+          Enable support for CSI drivers to include 3.0
+          header. This header has register macros and its
+          values and bit mask for register configuration bits
+          This config macro is required for targets based on
+          APQ8084 platform.
+
+config MSM_CSIPHY
+        bool "QTI MSM Camera Serial Interface Physical receiver support"
+        depends on MSMB_CAMERA
+        ---help---
+          Enable support for Camera Serial Interface
+          Physical receiver. It deserializes packets and
+          supports detection of packet start and stop
+          signalling.
+
+config MSM_CSID
+        bool "QTI MSM Camera Serial Interface decoder support"
+        depends on MSMB_CAMERA
+        ---help---
+          Enable support for Camera Serial Interface decoder.
+          It supports lane merging and decoding of packets
+          based on cid which is mapped to a virtual channel
+          and datatype.
+
+config MSM_EEPROM
+        bool "QTI MSM Camera ROM Interface for Calibration support"
+        depends on MSMB_CAMERA
+        ---help---
+          Enable support for ROM Interface for Calibration
+          Provides interface for reading the Claibration data.
+          and also provides support for writing data in case of FLASH ROM.
+	  Currently supports I2C, CCI and SPI protocol
+
+config MSM_ISPIF
+        bool "QTI MSM Image Signal Processing interface support"
+        depends on MSMB_CAMERA
+        ---help---
+          Enable support for Image Signal Processing interface module.
+          This module acts as a crossbar between CSID and VFE. Output
+          of any CID of CSID can be routed to of of pixel or raw
+          data interface in VFE.
+
+config MSM_ISPIF_V1
+        bool "QTI MSM Image Signal Processing interface support"
+        depends on MSMB_CAMERA
+        ---help---
+          Enable support for Image Signal Processing interface module.
+          This module acts as a crossbar between CSID and VFE. Output
+          of any CID of MSM_CSI22_HEADER can be routed to of pixel
+          or raw data interface in VFE.
+
+config MSM_ISPIF_V2
+        bool "QTI MSM Image Signal Processing interface support"
+        depends on MSMB_CAMERA
+        ---help---
+          Enable support for Image Signal Processing interface module.
+          This module acts as a crossbar between CSID and VFE. Output
+          of any CID of CSID can be routed to of pixel
+          or raw data interface in VFE.
+
+config IMX134
+	bool "Sensor IMX134 (BAYER 8M)"
+	depends on MSMB_CAMERA
+	---help---
+		Sony 8 MP Bayer Sensor with auto focus, uses
+		4 mipi lanes full resolution @30fps and
+		HFR @60fps and @120fps,
+		Video HDR support.
+
+config IMX132
+	bool "Sensor IMX132 (BAYER 2M)"
+	depends on MSMB_CAMERA
+	---help---
+		Sony 2 MP Bayer Sensor with auto focus, uses
+		2 mipi lanes, preview config = 1920 x 1080 at 30 fps,
+		snapshot config = 1920 x 1080 at 30 fps,
+		Video HDR support.
+
+config OV9724
+	bool "Sensor OV9724 (BAYER 2M)"
+	depends on MSMB_CAMERA
+	---help---
+		OmniVision 2 MP Bayer Sensor, supports 2 mipi lanes,
+		preview and snapshot config at 1280*720 at 30 fps,
+		hfr video at 60, 90 and 120 fps. This sensor driver does
+		not support auto focus.
+
+config OV5648
+	bool "Sensor OV5648 (BAYER 5M)"
+	depends on MSMB_CAMERA
+	---help---
+		OmniVision 5 MP Bayer Sensor, only use 1 mipi lane,
+		preview set to 1296*972 at 30 fps,
+		snapshot set to 2592*1944 at 12 fps,
+		This sensor driver does not support auto focus.
+
+config GC0339
+	bool "Sensor GC0339 (BAYER .3M)"
+	depends on MSMB_CAMERA
+	---help---
+		gc0339 is a Galaxycore .3 MP Bayer Sensor.
+		It supports 1 or 2 mipi lanes.
+		Preview and snapshot resolution shall be 640*480 at 30 fps,
+		It does not support auto focus.
+
+config OV8825
+	bool "OmniVision OV8825 (BAYER 8MP)"
+	depends on MSMB_CAMERA
+	---help---
+		OmniVision 8 MP Bayer Sensor with auto focus.uses
+		2 mipi lanes, preview config = 1632*1224 30 fps,
+		snapshot config = 3264 * 2448 at 18 fps.
+		2 lanes max fps is 18, 4 lanes max fps is 24.
+
+config OV8865
+	bool "OmniVision OV8865 (BAYER 8MP)"
+	depends on MSMB_CAMERA
+	---help---
+		OmniVision 8 MP Bayer Sensor with auto focus.uses
+		4 mipi lanes, preview config = 1632*1224 30 fps,
+		snapshot config = 3264 * 2448 at 30 fps.
+		Max fps is 30fps at 3264 * 2448, 60fps at 1632 * 1224
+
+config s5k4e1
+	bool "Sensor s5k4e1 (BAYER 5MP)"
+	depends on MSMB_CAMERA
+	---help---
+		Samsung 5 MP Bayer Sensor. It uses 2 mipi lanes,
+		supports 720P preview at 30 fps
+		and QSXGA snapshot at 15 fps.
+		This sensor driver does not support auto focus.
+
+config OV12830
+	bool "OmniVision OV12830 (BAYER 12MP)"
+	depends on MSMB_CAMERA
+	---help---
+		OmniVision 12.8 MP Bayer Sensor with auto focus.uses
+		4 mipi lanes, preview config = 2112 * 1500 at 30 fps,
+		snapshot config = 4224 * 3000 at 15 fps.
+		2 lanes max fps is 18, 4 lanes max fps is 24.
+
+config MSM_V4L2_VIDEO_OVERLAY_DEVICE
+	tristate "QTI MSM V4l2 video overlay device"
+	---help---
+	  Enables support for the MSM V4L2 video
+	  overlay driver. This allows video rendering
+	  apps to render overlaid video using Video4Linux2
+	  APIs, by using /dev/videoX device
+
+config MSMB_JPEG
+	tristate "QTI MSM Jpeg Encoder Engine support"
+        depends on MSMB_CAMERA && (ARCH_MSM8974 || ARCH_MSM8226 || ARCH_APQ8084 || ARCH_MSM8916 || ARCH_QCOM)
+	---help---
+	  Enable support for Jpeg Encoder/Decoder
+	  Engine for 8974.
+	  This module serves as the common driver
+	  for the JPEG 1.0 encoder and decoder.
+
+config MSM_GEMINI
+	tristate "QTI MSM Gemini JPEG engine support"
+	depends on MSMB_CAMERA && (ARCH_MSM7X30 || ARCH_MSM8X60 || ARCH_MSM8960)
+	---help---
+	  Enables support for the Gemini JPEG encoder
+	  Engine for 8x60, 7x30 and 8960.
+	  This module serves as the driver
+	  for JPEG encoding functionality.
+
+config MSM_FD
+	 tristate "QTI MSM FD face detection engine support"
+	 depends on MSMB_CAMERA
+	 ---help---
+	    Enables support for the MSM FD face detection engine.
+
+config MSM_JPEGDMA
+	tristate "QTI MSM Jpeg dma"
+        depends on MSMB_CAMERA
+        select V4L2_MEM2MEM_DEV
+	---help---
+	  Enable support for Jpeg dma engine.
+
+config MSM_SEC_CCI_TA_NAME
+	string "Name of TA to handle Secure CCI transactions"
+	depends on MSM_CCI
+	default "seccamdemo64"
+
+config MSM_SEC_CCI_DEBUG
+	bool "QTI MSM Secure CCI Relay Debug"
+	depends on MSM_CCI
+	---help---
+	  Enables simulation of secure camera for Secure CCI Realy
+	  debugging.
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/Makefile	2019-01-22 16:16:24.431254810 +0100
@@ -0,0 +1,24 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/codecs
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/isps
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/pproc
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/msm_vb2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/camera
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/jpeg_10
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/jpeg_dma
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/fd
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+
+obj-$(CONFIG_MSMB_CAMERA) += common/
+obj-$(CONFIG_MSMB_CAMERA) += msm.o
+obj-$(CONFIG_MSMB_CAMERA) += camera/
+obj-$(CONFIG_MSMB_CAMERA) += msm_vb2/
+obj-$(CONFIG_MSMB_CAMERA) += sensor/
+obj-$(CONFIG_MSMB_CAMERA) += pproc/
+obj-$(CONFIG_MSMB_CAMERA) += isp/
+obj-$(CONFIG_MSMB_CAMERA) += ispif/
+obj-$(CONFIG_MSMB_JPEG) += jpeg_10/
+obj-$(CONFIG_MSM_JPEGDMA) += jpeg_dma/
+obj-$(CONFIG_MSMB_CAMERA) += msm_buf_mgr/
+obj-$(CONFIG_MSM_FD) += fd/
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/msm_buf_mgr/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/msm_buf_mgr/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/msm_buf_mgr/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/msm_buf_mgr/Makefile	2019-01-22 16:16:24.443254919 +0100
@@ -0,0 +1,2 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+obj-$(CONFIG_MSMB_CAMERA) += msm_generic_buf_mgr.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/msm_vb2/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/msm_vb2/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/msm_vb2/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/msm_vb2/Makefile	2019-01-22 16:16:24.443254919 +0100
@@ -0,0 +1,3 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/msm_vb2
+obj-$(CONFIG_MSMB_CAMERA) += msm_vb2.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/pproc/cpp/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/pproc/cpp/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/pproc/cpp/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/pproc/cpp/Makefile	2019-01-22 16:16:24.447254955 +0100
@@ -0,0 +1,6 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/isp/
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/msm_buf_mgr/
+obj-$(CONFIG_MSM_CPP) += msm_cpp_soc.o msm_cpp.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/pproc/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/pproc/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/pproc/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/pproc/Makefile	2019-01-22 16:16:24.447254955 +0100
@@ -0,0 +1 @@
+obj-$(CONFIG_MSMB_CAMERA) += cpp/
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/sensor/actuator/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/sensor/actuator/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/sensor/actuator/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/sensor/actuator/Makefile	2019-01-22 16:16:24.447254955 +0100
@@ -0,0 +1,5 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/cci
+obj-$(CONFIG_MSMB_CAMERA) += msm_actuator.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/sensor/cci/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/sensor/cci/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/sensor/cci/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/sensor/cci/Makefile	2019-01-22 16:16:24.447254955 +0100
@@ -0,0 +1,4 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+obj-$(CONFIG_MSM_CCI) += msm_cci.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/sensor/csid/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/sensor/csid/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/sensor/csid/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/sensor/csid/Makefile	2019-01-22 16:16:24.447254955 +0100
@@ -0,0 +1,4 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+obj-$(CONFIG_MSM_CSID) += msm_csid.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/sensor/csiphy/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/sensor/csiphy/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/sensor/csiphy/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/sensor/csiphy/Makefile	2019-01-22 16:16:24.447254955 +0100
@@ -0,0 +1,4 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+obj-$(CONFIG_MSM_CSIPHY) += msm_csiphy.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/sensor/eeprom/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/sensor/eeprom/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/sensor/eeprom/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/sensor/eeprom/Makefile	2019-01-22 16:16:24.451254991 +0100
@@ -0,0 +1,5 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/cci
+obj-$(CONFIG_MSM_EEPROM) += msm_eeprom.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/sensor/flash/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/sensor/flash/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/sensor/flash/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/sensor/flash/Makefile	2019-01-22 16:16:24.451254991 +0100
@@ -0,0 +1,5 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/cci
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+obj-$(CONFIG_MSMB_CAMERA) += msm_flash.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/sensor/io/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/sensor/io/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/sensor/io/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/sensor/io/Makefile	2019-01-22 16:16:24.451254991 +0100
@@ -0,0 +1,7 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/cci
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+ccflags-y += -Idrivers/misc/
+obj-$(CONFIG_MSMB_CAMERA)   += msm_camera_cci_i2c.o msm_camera_qup_i2c.o msm_camera_spi.o msm_camera_dt_util.o msm_camera_tz_i2c.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/sensor/ir_cut/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/sensor/ir_cut/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/sensor/ir_cut/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/sensor/ir_cut/Makefile	2019-01-22 16:16:24.451254991 +0100
@@ -0,0 +1,4 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+obj-$(CONFIG_MSMB_CAMERA) += msm_ir_cut.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/sensor/ir_led/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/sensor/ir_led/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/sensor/ir_led/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/sensor/ir_led/Makefile	2019-01-22 16:16:24.451254991 +0100
@@ -0,0 +1,4 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+obj-$(CONFIG_MSMB_CAMERA) += msm_ir_led.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/sensor/laser_led/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/sensor/laser_led/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/sensor/laser_led/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/sensor/laser_led/Makefile	2019-01-22 16:16:24.451254991 +0100
@@ -0,0 +1,5 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/cci
+obj-$(CONFIG_MSMB_CAMERA) += msm_laser_led.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/sensor/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/sensor/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/sensor/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/sensor/Makefile	2019-01-22 16:16:24.447254955 +0100
@@ -0,0 +1,9 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/msm_vb2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/camera
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/cci
+obj-$(CONFIG_MSMB_CAMERA) += cci/ io/ csiphy/ csid/ actuator/ eeprom/ ois/ flash/ ir_led/ ir_cut/
+obj-$(CONFIG_MSMB_CAMERA) += laser_led/
+obj-$(CONFIG_MSM_CAMERA_SENSOR) += msm_sensor_init.o msm_sensor_driver.o msm_sensor.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/sensor/ois/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/sensor/ois/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./camera_v2/sensor/ois/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/camera_v2/sensor/ois/Makefile	2019-01-22 16:16:24.451254991 +0100
@@ -0,0 +1,5 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/cci
+obj-$(CONFIG_MSMB_CAMERA) += msm_ois.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./Kconfig linux-4.4.115-fbx/drivers/media/platform/msm/Kconfig
--- linux-4.4.115-fbx/drivers/media/platform/msm./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/Kconfig	2019-01-22 16:16:24.403254557 +0100
@@ -0,0 +1,45 @@
+#
+# MSM camera configuration
+#
+
+comment "QTI MSM Camera And Video & AIS"
+
+menuconfig MSM_CAMERA
+	bool "QTI MSM camera and video capture support"
+	depends on ARCH_QCOM && VIDEO_V4L2 && I2C
+	---help---
+	  Say Y here to enable selecting the video adapters for
+	  QTI msm camera and video capture drivers. enabling this
+	  adds support for the camera driver stack including sensor, isp
+	  and postprocessing drivers for legacy chipsets.
+
+config MSM_CAMERA_DEBUG
+	bool "QTI MSM camera debugging with printk"
+	depends on MSM_CAMERA
+	default n
+	---help---
+	  Enable printk() debug for msm camera
+
+menuconfig MSMB_CAMERA
+	bool "QTI MSM camera and video capture 2.0 support"
+	depends on ARCH_QCOM && VIDEO_V4L2 && I2C
+	---help---
+	  Say Y here to enable selecting the video adapters for
+	  QTI msm camera and video capture 2.0, enabling this
+	  adds support for the camera driver stack including sensor, isp
+	  and postprocessing drivers.
+
+config MSMB_CAMERA_DEBUG
+	bool "QTI MSM camera 2.0 debugging with printk"
+	depends on MSMB_CAMERA
+	---help---
+	  Enable printk() debug for msm camera 2.0
+
+if MSMB_CAMERA
+source "drivers/media/platform/msm/camera_v2/Kconfig"
+endif # MSMB_CAMERA
+
+source "drivers/media/platform/msm/vidc/Kconfig"
+source "drivers/media/platform/msm/sde/Kconfig"
+source "drivers/media/platform/msm/ais/Kconfig"
+source "drivers/media/platform/msm/tspp/Kconfig"
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./Makefile linux-4.4.115-fbx/drivers/media/platform/msm/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/Makefile	2019-01-22 16:16:24.403254557 +0100
@@ -0,0 +1,9 @@
+#
+# Makefile for the QCOM specific video device drivers
+# based on V4L2.
+#
+obj-$(CONFIG_MSMB_CAMERA) += camera_v2/
+obj-$(CONFIG_MSM_VIDC_V4L2) += vidc/
+obj-y += sde/
+obj-y += tspp/
+obj-$(CONFIG_MSM_AIS) += ais/
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./sde/cec/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/sde/cec/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./sde/cec/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/sde/cec/Makefile	2019-01-22 16:16:24.451254991 +0100
@@ -0,0 +1,3 @@
+obj-y := \
+	sde_hdmi_cec.o \
+	sde_hdmi_cec_util.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./sde/cec/sde_hdmi_cec.c linux-4.4.115-fbx/drivers/media/platform/msm/sde/cec/sde_hdmi_cec.c
--- linux-4.4.115-fbx/drivers/media/platform/msm./sde/cec/sde_hdmi_cec.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/sde/cec/sde_hdmi_cec.c	2019-06-20 12:29:44.969525999 +0200
@@ -0,0 +1,509 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/hdmi-cec/hdmi-cec.h>
+
+#include "sde_hdmi_cec_util.h"
+
+#define CEC_NAME "sde-hdmi-cec"
+
+/* CEC Register Definition */
+#define HDMI_CEC_INT_TX_DONE		BIT(0)
+#define HDMI_CEC_INT_TX_DONE_MASK	BIT(1)
+#define HDMI_CEC_INT_TX_ERROR		BIT(2)
+#define HDMI_CEC_INT_TX_ERROR_MASK	BIT(3)
+#define HDMI_CEC_INT_MONITOR		BIT(4)
+#define HDMI_CEC_INT_MONITOR_MASK	BIT(5)
+#define HDMI_CEC_INT_RX_DONE		BIT(6)
+#define HDMI_CEC_INT_RX_DONE_MASK	BIT(7)
+
+#define HDMI_CEC_INT_STATE \
+	(HDMI_CEC_INT_TX_DONE | \
+	 HDMI_CEC_INT_TX_ERROR | \
+	 HDMI_CEC_INT_MONITOR | \
+	 HDMI_CEC_INT_RX_DONE)
+
+#define HDMI_CEC_INT_MASK \
+	(HDMI_CEC_INT_TX_DONE_MASK | \
+	 HDMI_CEC_INT_TX_ERROR_MASK | \
+	 HDMI_CEC_INT_MONITOR_MASK | \
+	 HDMI_CEC_INT_RX_DONE_MASK)
+
+#define HDMI_CEC_TX_ERR_NONE		0
+#define HDMI_CEC_TX_ERR_NACK		1
+#define HDMI_CEC_TX_ERR_ARB_LOSS	2
+#define HDMI_CEC_TX_ERR_MAX_RETRIES	3
+
+#define HDMI_CEC_MIN_HW_VERSION		0x30000001
+
+#define HDMI_CEC_WR_RANGE                (0x000002DC)
+#define HDMI_CEC_RD_RANGE                (0x000002E0)
+#define HDMI_VERSION                     (0x000002E4)
+#define HDMI_CEC_CTRL                    (0x0000028C)
+#define HDMI_CEC_WR_DATA                 (0x00000290)
+#define HDMI_CEC_RETRANSMIT              (0x00000294)
+#define HDMI_CEC_STATUS                  (0x00000298)
+#define HDMI_CEC_INT                     (0x0000029C)
+#define HDMI_CEC_ADDR                    (0x000002A0)
+#define HDMI_CEC_TIME                    (0x000002A4)
+#define HDMI_CEC_REFTIMER                (0x000002A8)
+#define HDMI_CEC_RD_DATA                 (0x000002AC)
+#define HDMI_CEC_RD_FILTER               (0x000002B0)
+#define HDMI_CEC_COMPL_CTL               (0x00000360)
+#define HDMI_CEC_RD_START_RANGE          (0x00000364)
+#define HDMI_CEC_RD_TOTAL_RANGE          (0x00000368)
+#define HDMI_CEC_RD_ERR_RESP_LO          (0x0000036C)
+#define HDMI_CEC_WR_CHECK_CONFIG         (0x00000370)
+
+enum cec_irq_status {
+	CEC_IRQ_FRAME_WR_DONE = 1 << 0,
+	CEC_IRQ_FRAME_RD_DONE = 1 << 1,
+};
+
+struct sde_hdmi_cec {
+	struct cec_adapter *adap;
+	struct device *dev;
+	struct cec_hw_resource hw_res;
+	int irq;
+	u32 tx_status;
+	u32 tx_retransmits;
+	enum cec_irq_status irq_status;
+};
+
+static inline struct sde_hdmi_cec *adapter_priv(struct cec_adapter *adapter)
+{
+       return *(struct sde_hdmi_cec **) cec_adapter_priv(adapter);
+}
+
+static int sde_hdmi_cec_set_logical_address(struct cec_adapter *adap,
+					    const u8 logical_addr)
+{
+	struct sde_hdmi_cec *cec = adapter_priv(adap);
+	struct cec_hw_resource *hw = &cec->hw_res;
+
+	CEC_REG_WRITE(hw, HDMI_CEC_ADDR, logical_addr & 0xF);
+
+	return 0;
+}
+
+static int sde_hdmi_cec_send(struct cec_adapter *adap, u16 expire_ms,
+			     u8 attempts, const u8 *data, const u8 len)
+{
+	struct sde_hdmi_cec *cec = adapter_priv(adap);
+	struct cec_hw_resource *hw = &cec->hw_res;
+	u32 frame_type;
+	u8 retransmits;
+	int i;
+	u32 line_check_retry = 10;
+
+	/* toggle cec in order to flush out bad hw state, if any */
+	CEC_REG_WRITE(hw, HDMI_CEC_CTRL, 0);
+	CEC_REG_WRITE(hw, HDMI_CEC_CTRL, 1);
+
+	/* make sure state is cleared */
+	wmb();
+
+	retransmits = attempts ? (attempts - 1) : 0;
+
+	CEC_REG_WRITE(hw, HDMI_CEC_RETRANSMIT, (retransmits << 4) | BIT(0));
+
+	/* set message data */
+	if ((data[0] & 0xf) == 0xf)
+		frame_type = 1;
+	else
+		frame_type = 0;
+
+	for (i = 0; i < len; i++)
+		CEC_REG_WRITE(hw, HDMI_CEC_WR_DATA,
+			(data[i] << 8) | frame_type);
+
+	/* check line status */
+	while ((CEC_REG_READ(hw, HDMI_CEC_STATUS) & BIT(0)) &&
+		line_check_retry) {
+		line_check_retry--;
+		pr_debug("CEC line is busy(%d)\n", line_check_retry);
+		schedule();
+	}
+
+	if (!line_check_retry && (CEC_REG_READ(hw, HDMI_CEC_STATUS) & BIT(0))) {
+		pr_err("CEC line is busy. Retry failed\n");
+		return -EBUSY;
+	}
+
+	cec->tx_retransmits = retransmits;
+
+	/* start transmission */
+	CEC_REG_WRITE(hw, HDMI_CEC_CTRL, BIT(0) | BIT(1) |
+		((len & 0x1F) << 4) | BIT(9));
+
+	pr_debug("tx %*ph", len, data);
+
+	return 0;
+}
+
+static int sde_hdmi_cec_set_rx_mode(struct cec_adapter *adap,
+				    enum cec_rx_mode rx_mode)
+{
+	struct sde_hdmi_cec *cec = adapter_priv(adap);
+	struct cec_hw_resource *hw = &cec->hw_res;
+	u32 intr;
+
+	switch (rx_mode) {
+	case CEC_RX_MODE_DISABLED:
+		intr = CEC_REG_READ(hw, HDMI_CEC_INT);
+		intr &= HDMI_CEC_INT_MASK;
+		intr &= ~HDMI_CEC_INT_RX_DONE_MASK;
+		intr |= HDMI_CEC_INT_RX_DONE;
+		CEC_REG_WRITE(hw, HDMI_CEC_INT, intr);
+		break;
+	case CEC_RX_MODE_DEFAULT:
+		intr = CEC_REG_READ(hw, HDMI_CEC_INT);
+		intr &= HDMI_CEC_INT_MASK;
+		intr |= HDMI_CEC_INT_RX_DONE_MASK;
+		CEC_REG_WRITE(hw, HDMI_CEC_INT, intr);
+		break;
+	default:
+		return -ENOTTY;
+	}
+
+	return 0;
+}
+
+static int sde_hdmi_cec_reset(struct cec_adapter *adap)
+{
+	struct sde_hdmi_cec *cec = adapter_priv(adap);
+	struct cec_hw_resource *hw = &cec->hw_res;
+
+	CEC_REG_WRITE(hw, HDMI_CEC_CTRL, 0);
+	CEC_REG_WRITE(hw, HDMI_CEC_CTRL, 1);
+
+	return 0;
+}
+
+static int sde_hdmi_cec_attach(struct cec_adapter *adap)
+{
+	struct sde_hdmi_cec *cec = adapter_priv(adap);
+	struct cec_hw_resource *hw = &cec->hw_res;
+	u32 hdmi_hw_version;
+
+	pm_runtime_get_sync(cec->dev);
+
+	hdmi_hw_version = CEC_REG_READ(hw, HDMI_VERSION);
+	if (hdmi_hw_version < HDMI_CEC_MIN_HW_VERSION) {
+		pr_err("CEC version %x is not supported.\n",
+		       hdmi_hw_version);
+		return -ENXIO;
+	}
+
+	/* set the reference timer counter interval to 50us
+	 * (19.2Mhz * 0.00005 = 960 = 0x3C0) */
+	CEC_REG_WRITE(hw, HDMI_CEC_REFTIMER, 0x3C0 | BIT(16));
+
+	CEC_REG_WRITE(hw, HDMI_CEC_WR_CHECK_CONFIG, 0x4);
+	CEC_REG_WRITE(hw, HDMI_CEC_RD_RANGE, 0x30AB9888);
+	CEC_REG_WRITE(hw, HDMI_CEC_WR_RANGE, 0x888AA888);
+	CEC_REG_WRITE(hw, HDMI_CEC_RD_START_RANGE, 0x88888888);
+	CEC_REG_WRITE(hw, HDMI_CEC_RD_TOTAL_RANGE, 0x99);
+	CEC_REG_WRITE(hw, HDMI_CEC_RD_ERR_RESP_LO, 0x4A);
+	CEC_REG_WRITE(hw, HDMI_CEC_COMPL_CTL, 0xF);
+	CEC_REG_WRITE(hw, HDMI_CEC_RD_FILTER, BIT(0) | (0x7FF << 4));
+	CEC_REG_WRITE(hw, HDMI_CEC_TIME, BIT(0) | ((7 * 0x30) << 7));
+
+	/* Clear pending interrupts, enable tx interrupts */
+	CEC_REG_WRITE(hw, HDMI_CEC_INT, HDMI_CEC_INT_STATE |
+		      HDMI_CEC_INT_TX_DONE_MASK |
+		      HDMI_CEC_INT_TX_ERROR_MASK);
+
+	/* Enable Engine */
+	CEC_REG_WRITE(hw, HDMI_CEC_CTRL, BIT(0));
+
+	pr_debug("adapter attached\n");
+
+	return 0;
+}
+
+static int sde_hdmi_cec_detach(struct cec_adapter *adap)
+{
+	struct sde_hdmi_cec *cec = adapter_priv(adap);
+	struct cec_hw_resource *hw = &cec->hw_res;
+
+	/* Disable Engine */
+	CEC_REG_WRITE(hw, HDMI_CEC_CTRL, 0);
+
+	/* Disable CEC interrupts */
+	CEC_REG_WRITE(hw, HDMI_CEC_INT, 0);
+
+	pm_runtime_put(cec->dev);
+
+	pr_debug("adapter detached\n");
+
+	return 0;
+}
+
+static int sde_hdmi_cec_set_detached_config(struct cec_adapter *adap,
+					    const struct cec_detached_config *cfg)
+{
+	return 0;
+}
+
+static void sde_hdmi_cec_handle_rx_done(struct sde_hdmi_cec *cec)
+{
+	struct cec_hw_resource *hw = &cec->hw_res;
+	u8 msg[CEC_MAX_MSG_LEN];
+	u32 data, len, i;
+
+	data = CEC_REG_READ(hw, HDMI_CEC_RD_DATA);
+	len = (data >> 8) & 0x1f;
+
+	if (len < 1 || len > CEC_MAX_MSG_LEN) {
+		pr_err("invalid message size %d\n", len);
+		return;
+	}
+
+	msg[0] = data & 0xff;
+	for (i = 1; i < len; i++)
+		msg[i] = CEC_REG_READ(hw, HDMI_CEC_RD_DATA) & 0xff;
+
+	pr_debug("rx %*ph\n", len, msg);
+
+	adapter_rx_done(cec->adap, msg, len, 1,
+			CEC_RX_F_ACKED | CEC_RX_F_COMPLETE);
+}
+
+static void sde_hdmi_cec_handle_tx_done(struct sde_hdmi_cec *cec, u32 status)
+{
+	u32 error = (status & GENMASK(7, 4)) >> 4;
+	u32 frame_done = status & BIT(3);
+	u32 retransmits = 0;
+	u8 tx_flags;
+
+	switch (error) {
+	case HDMI_CEC_TX_ERR_NONE:
+		pr_debug("tx done\n");
+		tx_flags = 0;
+		break;
+	case HDMI_CEC_TX_ERR_NACK:
+		pr_debug("tx nack\n");
+		tx_flags = CEC_TX_F_NACK;
+		break;
+	case HDMI_CEC_TX_ERR_ARB_LOSS:
+		pr_debug("tx arb lost\n");
+		tx_flags = CEC_TX_F_ARBITRATION_LOST;
+		break;
+	case HDMI_CEC_TX_ERR_MAX_RETRIES:
+		pr_debug("tx max retries %d\n", cec->tx_retransmits);
+		tx_flags = CEC_TX_F_MAX_RETRIES | CEC_TX_F_NACK;
+		/*
+		 * The retransmit count is reset after all retransmits
+		 * have been attempted, so use a cached value
+		 */
+		retransmits = cec->tx_retransmits;
+		break;
+	default:
+		pr_debug("tx unknown error\n");
+		tx_flags = CEC_TX_F_UNKNOWN_ERROR;
+		break;
+	}
+
+	adapter_tx_done(cec->adap, frame_done, tx_flags,
+			retransmits + 1);
+}
+
+static irqreturn_t sde_hdmi_cec_irq_handler_thread(int irq, void *priv)
+{
+	struct sde_hdmi_cec *cec = priv;
+
+	if (cec->irq_status & CEC_IRQ_FRAME_WR_DONE)
+		sde_hdmi_cec_handle_tx_done(cec, cec->tx_status);
+
+	if (cec->irq_status & CEC_IRQ_FRAME_RD_DONE)
+		sde_hdmi_cec_handle_rx_done(cec);
+
+	cec->irq_status = 0;
+	cec->tx_status = 0;
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t sde_hdmi_cec_irq_handler(int irq, void *priv)
+{
+	struct sde_hdmi_cec *cec = priv;
+	struct cec_hw_resource *hw = &cec->hw_res;
+	u32 data = CEC_REG_READ(hw, HDMI_CEC_INT);
+
+	if (((data & HDMI_CEC_INT_TX_DONE) &&
+	     (data & HDMI_CEC_INT_TX_DONE_MASK)) ||
+	    ((data & HDMI_CEC_INT_TX_ERROR) &&
+	     (data & HDMI_CEC_INT_TX_ERROR_MASK))) {
+		cec->tx_status = CEC_REG_READ(hw, HDMI_CEC_STATUS);
+		cec->irq_status |= CEC_IRQ_FRAME_WR_DONE;
+	}
+
+	if ((data & HDMI_CEC_INT_RX_DONE) &&
+	    (data & HDMI_CEC_INT_RX_DONE_MASK))
+		cec->irq_status |= CEC_IRQ_FRAME_RD_DONE;
+
+	CEC_REG_WRITE(hw, HDMI_CEC_INT, data);
+
+	return cec->irq_status ? IRQ_WAKE_THREAD : IRQ_HANDLED;
+}
+
+static const struct cec_adapter_ops sde_hdmi_cec_adap_ops = {
+	.set_logical_address    = sde_hdmi_cec_set_logical_address,
+	.send                   = sde_hdmi_cec_send,
+	.reset                  = sde_hdmi_cec_reset,
+	.set_rx_mode            = sde_hdmi_cec_set_rx_mode,
+	.attach                 = sde_hdmi_cec_attach,
+	.detach                 = sde_hdmi_cec_detach,
+	.set_detached_config    = sde_hdmi_cec_set_detached_config,
+};
+
+static int sde_hdmi_cec_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct cec_adapter *adapter;
+	struct sde_hdmi_cec *cec;
+	struct device_node *np;
+	struct platform_device *hdmi_dev;
+	struct sde_hdmi_cec **priv;
+	int ret;
+
+	cec = devm_kzalloc(dev, sizeof(*cec), GFP_KERNEL);
+	if (!cec)
+		return -ENOMEM;
+
+	cec->dev = dev;
+
+	np = of_parse_phandle(pdev->dev.of_node, "qcom,hdmi-dev", 0);
+	if (!np) {
+		pr_err("failed to find hdmi node in device tree\n");
+		return -ENODEV;
+	}
+	hdmi_dev = of_find_device_by_node(np);
+	if (hdmi_dev == NULL)
+		return -EPROBE_DEFER;
+
+	cec->irq = of_irq_get(dev->of_node, 0);
+	if (cec->irq < 0) {
+		pr_err("failed to get irq\n");
+		return cec->irq;
+	}
+
+	ret = devm_request_threaded_irq(dev, cec->irq, sde_hdmi_cec_irq_handler,
+					sde_hdmi_cec_irq_handler_thread, 0,
+					pdev->name, cec);
+	if (ret)
+		return ret;
+
+	ret = sde_hdmi_cec_init_resource(pdev, &cec->hw_res);
+	if (ret)
+		return ret;
+
+	adapter = alloc_cec_adapter(sizeof (cec));
+	if (!adapter)
+		return -ENOMEM;
+
+	adapter->driver_name = "sde-hdmi-cec";
+	adapter->ops = &sde_hdmi_cec_adap_ops;
+	adapter->module = THIS_MODULE;
+	adapter->flags = CEC_HW_HAS_RX_FILTER;
+
+	priv = cec_adapter_priv(adapter);
+	*priv = cec;
+
+	ret = register_cec_adapter(adapter, &pdev->dev);
+	if (ret)
+		goto err_del_adap;
+
+	cec->adap = adapter;
+
+	platform_set_drvdata(pdev, cec);
+
+	pm_runtime_enable(dev);
+
+	return ret;
+
+err_del_adap:
+	free_cec_adapter(adapter);
+	return ret;
+}
+
+static int sde_hdmi_cec_remove(struct platform_device *pdev)
+{
+	struct sde_hdmi_cec *cec = platform_get_drvdata(pdev);
+
+	pm_runtime_disable(&pdev->dev);
+
+	unregister_cec_adapter(cec->adap);
+	free_cec_adapter(cec->adap);
+
+	devm_free_irq(&pdev->dev, cec->irq, cec);
+	sde_hdmi_cec_deinit_resource(pdev, &cec->hw_res);
+
+	return 0;
+}
+
+static int __maybe_unused sde_hdmi_cec_runtime_suspend(struct device *dev)
+{
+	struct sde_hdmi_cec *cec = dev_get_drvdata(dev);
+	struct cec_hw_resource *hw = &cec->hw_res;
+
+	return sde_hdmi_cec_enable_power(hw, false);
+}
+
+static int __maybe_unused sde_hdmi_cec_runtime_resume(struct device *dev)
+{
+	struct sde_hdmi_cec *cec = dev_get_drvdata(dev);
+	struct cec_hw_resource *hw = &cec->hw_res;
+
+	return sde_hdmi_cec_enable_power(hw, true);
+}
+
+static const struct dev_pm_ops sde_hdmi_cec_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+				pm_runtime_force_resume)
+	SET_RUNTIME_PM_OPS(sde_hdmi_cec_runtime_suspend,
+		sde_hdmi_cec_runtime_resume, NULL)
+};
+
+static const struct of_device_id sde_hdmi_cec_match[] = {
+	{
+		.compatible = "qcom,hdmi-cec",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, sde_hdmi_cec_match);
+
+static struct platform_driver sde_hdmi_cec_pdrv = {
+	.probe = sde_hdmi_cec_probe,
+	.remove = sde_hdmi_cec_remove,
+	.driver = {
+		.name = CEC_NAME,
+		.of_match_table = sde_hdmi_cec_match,
+		.pm = &sde_hdmi_cec_pm_ops,
+	},
+};
+
+module_platform_driver(sde_hdmi_cec_pdrv);
+MODULE_DESCRIPTION("MSM SDE HDMI CEC driver");
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./sde/cec/sde_hdmi_cec_util.c linux-4.4.115-fbx/drivers/media/platform/msm/sde/cec/sde_hdmi_cec_util.c
--- linux-4.4.115-fbx/drivers/media/platform/msm./sde/cec/sde_hdmi_cec_util.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/sde/cec/sde_hdmi_cec_util.c	2019-01-22 16:16:24.455255027 +0100
@@ -0,0 +1,743 @@
+/* Copyright (c) 2012, 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+
+#include "sde_hdmi_cec_util.h"
+
+void sde_hdmi_cec_reg_w(struct cec_io_data *io,
+	u32 offset, u32 value, bool debug)
+{
+	u32 in_val;
+
+	if (!io || !io->base) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	if (offset > io->len) {
+		pr_err("offset out of range\n");
+		return;
+	}
+
+	writel_relaxed(value, io->base + offset);
+	if (debug) {
+		in_val = readl_relaxed(io->base + offset);
+		pr_debug("[%08x] => %08x [%08x]\n",
+			(u32)(unsigned long)(io->base + offset),
+			value, in_val);
+	}
+}
+
+u32 sde_hdmi_cec_reg_r(struct cec_io_data *io, u32 offset, bool debug)
+{
+	u32 value;
+
+	if (!io || !io->base) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	if (offset > io->len) {
+		pr_err("offset out of range\n");
+		return -EINVAL;
+	}
+
+	value = readl_relaxed(io->base + offset);
+	if (debug)
+		pr_debug("[%08x] <= %08x\n",
+			(u32)(unsigned long)(io->base + offset), value);
+
+	return value;
+}
+
+void sde_hdmi_cec_reg_dump(void __iomem *base, u32 length, const char *prefix,
+	bool debug)
+{
+	if (debug)
+		print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 32, 4,
+			__io_virt(base), length, false);
+}
+
+static int sde_hdmi_cec_config_vreg(struct device *dev,
+	struct cec_vreg *in_vreg, int num_vreg, bool config)
+{
+	int i = 0, rc = 0;
+	struct cec_vreg *curr_vreg = NULL;
+	enum cec_vreg_type type;
+
+	if (!in_vreg || !num_vreg)
+		return rc;
+
+	if (config) {
+		for (i = 0; i < num_vreg; i++) {
+			curr_vreg = &in_vreg[i];
+			curr_vreg->vreg = regulator_get(dev,
+				curr_vreg->vreg_name);
+			rc = PTR_RET(curr_vreg->vreg);
+			if (rc) {
+				pr_err("%s get failed. rc=%d\n",
+					 curr_vreg->vreg_name, rc);
+				curr_vreg->vreg = NULL;
+				goto vreg_get_fail;
+			}
+			type = (regulator_count_voltages(curr_vreg->vreg) > 0)
+					? CEC_REG_LDO : CEC_REG_VS;
+			if (type == CEC_REG_LDO) {
+				rc = regulator_set_voltage(
+					curr_vreg->vreg,
+					curr_vreg->min_voltage,
+					curr_vreg->max_voltage);
+				if (rc < 0) {
+					pr_err("%s set vltg fail\n",
+						curr_vreg->vreg_name);
+					goto vreg_set_voltage_fail;
+				}
+			}
+		}
+	} else {
+		for (i = num_vreg-1; i >= 0; i--) {
+			curr_vreg = &in_vreg[i];
+			if (curr_vreg->vreg) {
+				type = (regulator_count_voltages(
+					curr_vreg->vreg) > 0)
+					? CEC_REG_LDO : CEC_REG_VS;
+				if (type == CEC_REG_LDO) {
+					regulator_set_voltage(curr_vreg->vreg,
+						0, curr_vreg->max_voltage);
+				}
+				regulator_put(curr_vreg->vreg);
+				curr_vreg->vreg = NULL;
+			}
+		}
+	}
+	return 0;
+
+vreg_unconfig:
+if (type == CEC_REG_LDO)
+	regulator_set_load(curr_vreg->vreg, 0);
+
+vreg_set_voltage_fail:
+	regulator_put(curr_vreg->vreg);
+	curr_vreg->vreg = NULL;
+
+vreg_get_fail:
+	for (i--; i >= 0; i--) {
+		curr_vreg = &in_vreg[i];
+		type = (regulator_count_voltages(curr_vreg->vreg) > 0)
+			? CEC_REG_LDO : CEC_REG_VS;
+		goto vreg_unconfig;
+	}
+	return rc;
+}
+
+static int sde_hdmi_cec_enable_vreg(struct cec_hw_resource *hw, int enable)
+{
+	int i = 0, rc = 0;
+	bool need_sleep;
+	struct cec_vreg *in_vreg = hw->vreg_config;
+	int num_vreg = hw->num_vreg;
+
+	if (enable) {
+		for (i = 0; i < num_vreg; i++) {
+			rc = PTR_RET(in_vreg[i].vreg);
+			if (rc) {
+				pr_err("%s regulator error. rc=%d\n",
+					in_vreg[i].vreg_name, rc);
+				goto vreg_set_opt_mode_fail;
+			}
+			need_sleep = !regulator_is_enabled(in_vreg[i].vreg);
+			if (in_vreg[i].pre_on_sleep && need_sleep)
+				usleep_range(in_vreg[i].pre_on_sleep * 1000,
+					in_vreg[i].pre_on_sleep * 1000);
+			rc = regulator_set_load(in_vreg[i].vreg,
+				in_vreg[i].enable_load);
+			if (rc < 0) {
+				pr_err("%s set opt m fail\n",
+					in_vreg[i].vreg_name);
+				goto vreg_set_opt_mode_fail;
+			}
+			rc = regulator_enable(in_vreg[i].vreg);
+			if (in_vreg[i].post_on_sleep && need_sleep)
+				usleep_range(in_vreg[i].post_on_sleep * 1000,
+					in_vreg[i].post_on_sleep * 1000);
+			if (rc < 0) {
+				pr_err("%s enable failed\n",
+					in_vreg[i].vreg_name);
+				goto disable_vreg;
+			}
+		}
+	} else {
+		for (i = num_vreg-1; i >= 0; i--) {
+			if (in_vreg[i].pre_off_sleep)
+				usleep_range(in_vreg[i].pre_off_sleep * 1000,
+					in_vreg[i].pre_off_sleep * 1000);
+			regulator_set_load(in_vreg[i].vreg,
+				in_vreg[i].disable_load);
+			regulator_disable(in_vreg[i].vreg);
+			if (in_vreg[i].post_off_sleep)
+				usleep_range(in_vreg[i].post_off_sleep * 1000,
+					in_vreg[i].post_off_sleep * 1000);
+		}
+	}
+	return rc;
+
+disable_vreg:
+	regulator_set_load(in_vreg[i].vreg, in_vreg[i].disable_load);
+
+vreg_set_opt_mode_fail:
+	for (i--; i >= 0; i--) {
+		if (in_vreg[i].pre_off_sleep)
+			usleep_range(in_vreg[i].pre_off_sleep * 1000,
+				in_vreg[i].pre_off_sleep * 1000);
+		regulator_set_load(in_vreg[i].vreg,
+			in_vreg[i].disable_load);
+		regulator_disable(in_vreg[i].vreg);
+		if (in_vreg[i].post_off_sleep)
+			usleep_range(in_vreg[i].post_off_sleep * 1000,
+				in_vreg[i].post_off_sleep * 1000);
+	}
+
+	return rc;
+}
+
+static void sde_hdmi_cec_put_clk(struct cec_clk *clk_arry, int num_clk)
+{
+	int i;
+
+	for (i = num_clk - 1; i >= 0; i--) {
+		if (clk_arry[i].clk)
+			clk_put(clk_arry[i].clk);
+		clk_arry[i].clk = NULL;
+	}
+}
+
+static int sde_hdmi_cec_get_clk(struct device *dev,
+	struct cec_clk *clk_arry, int num_clk)
+{
+	int i, rc = 0;
+
+	for (i = 0; i < num_clk; i++) {
+		clk_arry[i].clk = clk_get(dev, clk_arry[i].clk_name);
+		rc = PTR_RET(clk_arry[i].clk);
+		if (rc) {
+			pr_err("'%s' get failed. rc=%d\n",
+				clk_arry[i].clk_name, rc);
+			goto error;
+		}
+	}
+
+	return rc;
+
+error:
+	sde_hdmi_cec_put_clk(clk_arry, num_clk);
+
+	return rc;
+}
+
+static int sde_hdmi_cec_enable_clk(struct cec_hw_resource *hw, int enable)
+{
+	int i, rc = 0;
+	struct cec_clk *clk_arry = hw->clk_config;
+	int num_clk = hw->num_clk;
+
+	if (enable) {
+		for (i = 0; i < num_clk; i++) {
+			pr_debug("enable %s\n", clk_arry[i].clk_name);
+			if (clk_arry[i].clk) {
+				rc = clk_prepare_enable(clk_arry[i].clk);
+				if (rc)
+					pr_err("%s enable fail. rc=%d\n",
+						clk_arry[i].clk_name, rc);
+			} else {
+				pr_err("%s is not available\n",
+					clk_arry[i].clk_name);
+				rc = -EPERM;
+			}
+		}
+	} else {
+		for (i = num_clk - 1; i >= 0; i--) {
+			pr_debug("disable %s\n", clk_arry[i].clk_name);
+
+			if (clk_arry[i].clk)
+				clk_disable_unprepare(clk_arry[i].clk);
+			else
+				pr_err("%s is not available\n",
+					clk_arry[i].clk_name);
+		}
+	}
+
+	return rc;
+}
+
+static int sde_hdmi_cec_pinctrl_enable(struct cec_hw_resource *hw,
+	bool enable)
+{
+	struct pinctrl_state *pin_state = NULL;
+	int rc = 0;
+
+	if (!hw) {
+		pr_err("invalid input param hw:%pK\n", hw);
+		return -EINVAL;
+	}
+
+	pr_debug("set cec pinctrl state %d\n", enable);
+
+	pin_state = enable ? hw->pin_res.state_active : hw->pin_res.state_sleep;
+
+	if (!IS_ERR_OR_NULL(hw->pin_res.pinctrl))
+		rc = pinctrl_select_state(hw->pin_res.pinctrl,
+			pin_state);
+	else
+		pr_err("pinstate not found\n");
+
+	return rc;
+}
+
+static void sde_hdmi_cec_put_dt_clock(struct platform_device *pdev,
+					struct cec_hw_resource *hw)
+{
+	if (!pdev || !hw) {
+		pr_err("invalid input param pdev:%pK hw:%pK\n", pdev, hw);
+		return;
+	}
+
+	if (hw->clk_config) {
+		sde_hdmi_cec_put_clk(hw->clk_config, hw->num_clk);
+		devm_kfree(&pdev->dev, hw->clk_config);
+		hw->clk_config = NULL;
+	}
+	hw->num_clk = 0;
+
+	pr_debug("put dt clock\n");
+}
+
+static int sde_hdmi_cec_get_dt_clock(struct platform_device *pdev,
+					struct cec_hw_resource *hw)
+{
+	int i = 0;
+	int num_clk = 0;
+	const char *clock_name;
+	int rc = 0;
+
+	if (!pdev || !hw) {
+		pr_err("invalid input param pdev:%pK hw:%pK\n", pdev, hw);
+		return -EINVAL;
+	}
+
+	hw->num_clk = 0;
+	num_clk = of_property_count_strings(pdev->dev.of_node, "clock-names");
+	if (num_clk <= 0) {
+		pr_debug("clocks are not defined\n");
+		return 0;
+	}
+
+	hw->num_clk = num_clk;
+	hw->clk_config = devm_kzalloc(&pdev->dev,
+			sizeof(struct cec_clk) * num_clk, GFP_KERNEL);
+	if (!hw->clk_config) {
+		hw->num_clk = 0;
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < num_clk; i++) {
+		of_property_read_string_index(pdev->dev.of_node, "clock-names",
+							i, &clock_name);
+		strlcpy(hw->clk_config[i].clk_name, clock_name,
+				sizeof(hw->clk_config[i].clk_name));
+	}
+
+	rc = sde_hdmi_cec_get_clk(&pdev->dev, hw->clk_config, hw->num_clk);
+	if (rc) {
+		sde_hdmi_cec_put_dt_clock(pdev, hw);
+		return rc;
+	}
+
+	pr_debug("get dt clock\n");
+
+	return 0;
+}
+
+static int sde_hdmi_cec_get_dt_supply(struct platform_device *pdev,
+				struct cec_hw_resource *hw)
+{
+	int i = 0, rc = 0;
+	u32 tmp = 0;
+	struct device_node *of_node = NULL, *supply_root_node = NULL;
+	struct device_node *supply_node = NULL;
+
+	if (!pdev || !hw) {
+		pr_err("invalid input param pdev:%pK hw:%pK\n", pdev, hw);
+		return -EINVAL;
+	}
+
+	of_node = pdev->dev.of_node;
+
+	hw->num_vreg = 0;
+	supply_root_node = of_get_child_by_name(of_node,
+						"qcom,platform-supply-entries");
+	if (!supply_root_node) {
+		pr_debug("no supply entry present\n");
+		return rc;
+	}
+
+	hw->num_vreg = of_get_available_child_count(supply_root_node);
+	if (hw->num_vreg == 0) {
+		pr_debug("no vreg present\n");
+		return rc;
+	}
+
+	pr_debug("vreg found. count=%d\n", hw->num_vreg);
+	hw->vreg_config = devm_kzalloc(&pdev->dev, sizeof(struct cec_vreg) *
+						hw->num_vreg, GFP_KERNEL);
+	if (!hw->vreg_config) {
+		rc = -ENOMEM;
+		return rc;
+	}
+
+	for_each_available_child_of_node(supply_root_node, supply_node) {
+		const char *st = NULL;
+
+		rc = of_property_read_string(supply_node,
+						"qcom,supply-name", &st);
+		if (rc) {
+			pr_err("error reading name. rc=%d\n", rc);
+			goto error;
+		}
+
+		strlcpy(hw->vreg_config[i].vreg_name, st,
+					sizeof(hw->vreg_config[i].vreg_name));
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-min-voltage", &tmp);
+		if (rc) {
+			pr_err("error reading min volt. rc=%d\n", rc);
+			goto error;
+		}
+		hw->vreg_config[i].min_voltage = tmp;
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-max-voltage", &tmp);
+		if (rc) {
+			pr_err("error reading max volt. rc=%d\n", rc);
+			goto error;
+		}
+		hw->vreg_config[i].max_voltage = tmp;
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-enable-load", &tmp);
+		if (rc) {
+			pr_err("error reading enable load. rc=%d\n", rc);
+			goto error;
+		}
+		hw->vreg_config[i].enable_load = tmp;
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-disable-load", &tmp);
+		if (rc) {
+			pr_err("error reading disable load. rc=%d\n", rc);
+			goto error;
+		}
+		hw->vreg_config[i].disable_load = tmp;
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-pre-on-sleep", &tmp);
+		if (rc)
+			pr_debug("no supply pre sleep value. rc=%d\n", rc);
+
+		hw->vreg_config[i].pre_on_sleep = (!rc ? tmp : 0);
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-pre-off-sleep", &tmp);
+		if (rc)
+			pr_debug("no supply pre sleep value. rc=%d\n", rc);
+
+		hw->vreg_config[i].pre_off_sleep = (!rc ? tmp : 0);
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-post-on-sleep", &tmp);
+		if (rc)
+			pr_debug("no supply post sleep value. rc=%d\n", rc);
+
+		hw->vreg_config[i].post_on_sleep = (!rc ? tmp : 0);
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-post-off-sleep", &tmp);
+		if (rc)
+			pr_debug("no supply post sleep value. rc=%d\n", rc);
+
+		hw->vreg_config[i].post_off_sleep = (!rc ? tmp : 0);
+
+		pr_debug("%s min=%d, max=%d, enable=%d, disable=%d, preonsleep=%d, postonsleep=%d, preoffsleep=%d, postoffsleep=%d\n",
+					hw->vreg_config[i].vreg_name,
+					hw->vreg_config[i].min_voltage,
+					hw->vreg_config[i].max_voltage,
+					hw->vreg_config[i].enable_load,
+					hw->vreg_config[i].disable_load,
+					hw->vreg_config[i].pre_on_sleep,
+					hw->vreg_config[i].post_on_sleep,
+					hw->vreg_config[i].pre_off_sleep,
+					hw->vreg_config[i].post_off_sleep);
+		++i;
+
+		rc = 0;
+	}
+
+	rc = sde_hdmi_cec_config_vreg(&pdev->dev,
+		hw->vreg_config, hw->num_vreg, true);
+	if (rc)
+		goto error;
+
+	pr_debug("get dt supply\n");
+
+	return rc;
+
+error:
+	if (hw->vreg_config) {
+		devm_kfree(&pdev->dev, hw->vreg_config);
+		hw->vreg_config = NULL;
+		hw->num_vreg = 0;
+	}
+
+	return rc;
+}
+
+static void sde_hdmi_cec_put_dt_supply(struct platform_device *pdev,
+				struct cec_hw_resource *hw)
+{
+	if (!pdev || !hw) {
+		pr_err("invalid input param pdev:%pK hw:%pK\n", pdev, hw);
+		return;
+	}
+
+	sde_hdmi_cec_config_vreg(&pdev->dev,
+		hw->vreg_config, hw->num_vreg, false);
+
+	if (hw->vreg_config) {
+		devm_kfree(&pdev->dev, hw->vreg_config);
+		hw->vreg_config = NULL;
+	}
+	hw->num_vreg = 0;
+
+	pr_debug("put dt supply\n");
+}
+
+static int sde_hdmi_cec_get_dt_pinres(struct platform_device *pdev,
+				struct cec_hw_resource *hw)
+{
+	if (!pdev || !hw) {
+		pr_err("invalid input param pdev:%pK hw:%pK\n", pdev, hw);
+		return -EINVAL;
+	}
+
+	hw->pin_res.pinctrl = devm_pinctrl_get(&pdev->dev);
+	if (IS_ERR_OR_NULL(hw->pin_res.pinctrl)) {
+		pr_err("failed to get pinctrl\n");
+		return PTR_ERR(hw->pin_res.pinctrl);
+	}
+
+	hw->pin_res.state_active =
+		pinctrl_lookup_state(hw->pin_res.pinctrl, "cec_active");
+	if (IS_ERR_OR_NULL(hw->pin_res.state_active))
+		pr_debug("cannot get active pinstate\n");
+
+	hw->pin_res.state_sleep =
+		pinctrl_lookup_state(hw->pin_res.pinctrl, "cec_sleep");
+	if (IS_ERR_OR_NULL(hw->pin_res.state_sleep))
+		pr_debug("cannot get sleep pinstate\n");
+
+	pr_debug("get dt pinres data\n");
+
+	return 0;
+}
+
+static void sde_hdmi_cec_put_dt_pinres(struct platform_device *pdev,
+				struct cec_hw_resource *hw)
+{
+	if (!pdev || !hw) {
+		pr_err("invalid input param pdev:%pK hw:%pK\n", pdev, hw);
+		return;
+	}
+
+	if (!IS_ERR_OR_NULL(hw->pin_res.pinctrl))
+		devm_pinctrl_put(hw->pin_res.pinctrl);
+}
+
+static void sde_hdmi_cec_deinit_power(struct platform_device *pdev,
+	struct cec_hw_resource *hw)
+{
+	if (!pdev || !hw) {
+		pr_err("invalid input param pdev:%pK hw:%pK\n", pdev, hw);
+		return;
+	}
+
+	sde_hdmi_cec_put_dt_supply(pdev, hw);
+	sde_hdmi_cec_put_dt_clock(pdev, hw);
+	sde_hdmi_cec_put_dt_pinres(pdev, hw);
+
+	pr_debug("put dt power data\n");
+}
+
+static int sde_hdmi_cec_init_power(struct platform_device *pdev,
+	struct cec_hw_resource *hw)
+{
+	int rc = 0;
+
+	if (!pdev || !hw) {
+		pr_err("invalid input param pdev:%pK hw:%pK\n", pdev, hw);
+		return -EINVAL;
+	}
+
+	/* VREG */
+	rc = sde_hdmi_cec_get_dt_supply(pdev, hw);
+	if (rc) {
+		pr_err("get_dt_supply failed. rc=%d\n", rc);
+		goto error;
+	}
+
+	/* Clock */
+	rc = sde_hdmi_cec_get_dt_clock(pdev, hw);
+	if (rc) {
+		pr_err("get_dt_clock failed. rc=%d\n", rc);
+		goto error;
+	}
+
+	/* Pinctrl */
+	rc = sde_hdmi_cec_get_dt_pinres(pdev, hw);
+	if (rc) {
+		pr_err("get_dt_pinres failed. rc=%d\n", rc);
+		goto error;
+	}
+
+	pr_debug("get dt power data\n");
+
+	return rc;
+
+error:
+	sde_hdmi_cec_deinit_power(pdev, hw);
+	return rc;
+}
+
+static int sde_hdmi_cec_init_io(struct platform_device *pdev,
+	struct cec_hw_resource *hw)
+{
+	struct resource *res = NULL;
+	struct cec_io_data *io_data = NULL;
+	const char *reg_name;
+
+	if (!pdev || !hw) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	if (of_property_read_string(pdev->dev.of_node, "reg-names",
+			&reg_name)) {
+		pr_err("cec reg not defined\n");
+		return -ENODEV;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, reg_name);
+	if (!res) {
+		pr_err("%s get_res_byname failed\n", reg_name);
+		return -ENODEV;
+	}
+
+	io_data = &hw->io_res;
+	io_data->len = (u32)resource_size(res);
+	io_data->base = ioremap(res->start, io_data->len);
+	if (!io_data->base) {
+		pr_err("%s ioremap failed\n", reg_name);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static void sde_hdmi_cec_deinit_io(struct platform_device *pdev,
+	struct cec_hw_resource *hw)
+{
+	struct cec_io_data *io_data = NULL;
+
+	if (!pdev || !hw) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	io_data = &hw->io_res;
+
+	if (io_data->base) {
+		iounmap(io_data->base);
+		io_data->base = NULL;
+	}
+	io_data->len = 0;
+}
+
+int sde_hdmi_cec_init_resource(struct platform_device *pdev,
+	struct cec_hw_resource *hw)
+{
+	int rc = 0;
+
+	/* power */
+	rc = sde_hdmi_cec_init_power(pdev, hw);
+	if (rc)
+		return rc;
+
+	/* io */
+	rc = sde_hdmi_cec_init_io(pdev, hw);
+	if (rc)
+		goto io_error;
+
+	pr_debug("cec init resource\n");
+
+	return rc;
+
+io_error:
+	sde_hdmi_cec_deinit_power(pdev, hw);
+	return rc;
+}
+
+void sde_hdmi_cec_deinit_resource(struct platform_device *pdev,
+	struct cec_hw_resource *hw)
+{
+	sde_hdmi_cec_deinit_power(pdev, hw);
+	sde_hdmi_cec_deinit_io(pdev, hw);
+
+	pr_debug("cec deinit resource\n");
+}
+
+int sde_hdmi_cec_enable_power(struct cec_hw_resource *hw, bool enable)
+{
+	int rc = 0;
+
+	rc = sde_hdmi_cec_enable_vreg(hw, enable);
+	if (rc)
+		return rc;
+
+	rc = sde_hdmi_cec_pinctrl_enable(hw, enable);
+	if (rc)
+		return rc;
+
+	rc = sde_hdmi_cec_enable_clk(hw, enable);
+	if (rc)
+		return rc;
+
+	pr_debug("cec power enable = %d\n", enable);
+
+	return rc;
+}
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./sde/cec/sde_hdmi_cec_util.h linux-4.4.115-fbx/drivers/media/platform/msm/sde/cec/sde_hdmi_cec_util.h
--- linux-4.4.115-fbx/drivers/media/platform/msm./sde/cec/sde_hdmi_cec_util.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/sde/cec/sde_hdmi_cec_util.h	2019-01-22 16:16:24.455255027 +0100
@@ -0,0 +1,93 @@
+/* Copyright (c) 2012, 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_HDMI_CEC_UTIL_H__
+#define __SDE_HDMI_CEC_UTIL_H__
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/types.h>
+
+#ifdef DEBUG
+#define CEC_REG_WRITE(hw, off, val) \
+	sde_hdmi_cec_reg_w(&(hw)->io_res, (off), (val), true)
+#define CEC_REG_READ(hw, off) \
+	sde_hdmi_cec_reg_r(&(hw)->io_res, (off), true)
+#else
+#define CEC_REG_WRITE(hw, off, val) \
+	sde_hdmi_cec_reg_w(&(hw)->io_res, (off), (val), false)
+#define CEC_REG_READ(hw, off) \
+	sde_hdmi_cec_reg_r(&(hw)->io_res, (off), false)
+#endif
+
+struct cec_io_data {
+	u32 len;
+	void __iomem *base;
+};
+
+enum cec_vreg_type {
+	CEC_REG_LDO,
+	CEC_REG_VS,
+};
+
+struct cec_vreg {
+	struct regulator *vreg; /* vreg handle */
+	char vreg_name[32];
+	int min_voltage;
+	int max_voltage;
+	int enable_load;
+	int disable_load;
+	int pre_on_sleep;
+	int post_on_sleep;
+	int pre_off_sleep;
+	int post_off_sleep;
+};
+
+struct cec_clk {
+	struct clk *clk; /* clk handle */
+	char clk_name[32];
+};
+
+struct cec_pin_res {
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *state_active;
+	struct pinctrl_state *state_sleep;
+};
+
+struct cec_hw_resource {
+	/* power */
+	unsigned num_vreg;
+	struct cec_vreg *vreg_config;
+	unsigned num_clk;
+	struct cec_clk *clk_config;
+	struct cec_pin_res pin_res;
+
+	/* io */
+	struct cec_io_data io_res;
+};
+
+void sde_hdmi_cec_reg_w(struct cec_io_data *io,
+	u32 offset, u32 value, bool debug);
+u32 sde_hdmi_cec_reg_r(struct cec_io_data *io, u32 offset, bool debug);
+void sde_hdmi_cec_reg_dump(void __iomem *base, u32 length, const char *prefix,
+	bool debug);
+
+int sde_hdmi_cec_init_resource(struct platform_device *pdev,
+	struct cec_hw_resource *hw);
+void sde_hdmi_cec_deinit_resource(struct platform_device *pdev,
+	struct cec_hw_resource *hw);
+int sde_hdmi_cec_enable_power(struct cec_hw_resource *hw, bool enable);
+
+#endif /* __SDE_HDMI_CEC_UTIL_H__ */
+
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./sde/Kconfig linux-4.4.115-fbx/drivers/media/platform/msm/sde/Kconfig
--- linux-4.4.115-fbx/drivers/media/platform/msm./sde/Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/sde/Kconfig	2019-05-31 20:58:54.918934932 +0200
@@ -0,0 +1,27 @@
+config MSM_SDE_ROTATOR
+	bool "QTI V4L2 based SDE Rotator"
+	depends on ARCH_QCOM && VIDEO_V4L2
+	select V4L2_MEM2MEM_DEV
+	select VIDEOBUF2_CORE
+	select SW_SYNC if SYNC
+	---help---
+	  Enable support of V4L2 rotator driver.
+
+config MSM_SDE_ROTATOR_EVTLOG_DEBUG
+	depends on MSM_SDE_ROTATOR
+	bool "Enable sde rotator debugging"
+	---help---
+	The SDE rotator debugging provides support to enable rotator debugging
+	features to: Dump rotator registers during driver errors, panic
+	driver during fatal errors and enable some rotator driver logging
+	into an internal buffer (this avoids logging overhead).
+
+config MSM_SDE_HDMI_CEC
+	bool "QTI SDE HDMI CEC Driver"
+	depends on DRM_SDE_HDMI
+	select HDMI_CEC
+	---help---
+	  The HDMI CEC driver provides support to enable HDMI CEC features
+	  which allows various audiovisual products to communicate using HDMI
+	  CEC links. CEC is a protocol defined in HDMI spec which consists of
+	  both low-level and high-level protocol definition.
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./sde/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/sde/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./sde/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/sde/Makefile	2019-01-22 16:16:24.451254991 +0100
@@ -0,0 +1,2 @@
+obj-$(CONFIG_MSM_SDE_ROTATOR)	    += rotator/
+obj-$(CONFIG_MSM_SDE_HDMI_CEC)      += cec/
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./sde/rotator/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/sde/rotator/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./sde/rotator/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/sde/rotator/Makefile	2019-01-22 16:16:24.455255027 +0100
@@ -0,0 +1,27 @@
+ccflags-y += -I$(src) -Idrivers/staging/android
+
+obj-y := \
+		sde_rotator_dev.o \
+		sde_rotator_core.o \
+		sde_rotator_base.o \
+		sde_rotator_formats.o \
+		sde_rotator_util.o \
+		sde_rotator_io_util.o \
+		sde_rotator_smmu.o
+
+obj-y += \
+		sde_rotator_r1_wb.o \
+		sde_rotator_r1_pipe.o \
+		sde_rotator_r1_ctl.o \
+		sde_rotator_r1.o
+
+obj-y += \
+		sde_rotator_r3.o
+
+obj-$(CONFIG_SYNC) += \
+		sde_rotator_sync.o
+
+obj-$(CONFIG_DEBUG_FS) += \
+		sde_rotator_debug.o \
+		sde_rotator_r1_debug.o \
+		sde_rotator_r3_debug.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./tspp/Kconfig linux-4.4.115-fbx/drivers/media/platform/msm/tspp/Kconfig
--- linux-4.4.115-fbx/drivers/media/platform/msm./tspp/Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/tspp/Kconfig	2019-01-22 16:16:24.459255064 +0100
@@ -0,0 +1,12 @@
+config DVB_MSM_TSPP
+	depends on ARCH_QCOM
+	depends on PINCTRL && DVB_CORE && I2C
+	select DVB_SI2168 if MEDIA_SUBDRV_AUTOSELECT
+	select MEDIA_TUNER_SI2157 if MEDIA_SUBDRV_AUTOSELECT
+	tristate "MSM TSPP (Transport Stream Packet Processor) DVB Support"
+	---help---
+	  This adds support for DVB front-end cards connected to TS inputs
+	  of the MSM TSPP (Transport Stream Packet Processor).
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called msm_tspp.
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./tspp/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/tspp/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./tspp/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/tspp/Makefile	2019-01-22 16:16:24.459255064 +0100
@@ -0,0 +1,7 @@
+obj-$(CONFIG_DVB_MSM_TSPP) += msm_tspp.o
+
+msm_tspp-y += tspp-core.o tspp-dvb.o
+
+ccflags-y += -Idrivers/media/dvb-core/
+ccflags-y += -Idrivers/media/dvb-frontends
+ccflags-y += -Idrivers/media/tuners
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./tspp/mpq_dvb_debug.h linux-4.4.115-fbx/drivers/media/platform/msm/tspp/mpq_dvb_debug.h
--- linux-4.4.115-fbx/drivers/media/platform/msm./tspp/mpq_dvb_debug.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/tspp/mpq_dvb_debug.h	2019-01-22 16:16:24.459255064 +0100
@@ -0,0 +1,41 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MPQ_DVB_DEBUG_H
+#define _MPQ_DVB_DEBUG_H
+
+/* Enable this line if you want to output debug printouts */
+#define MPG_DVB_DEBUG_ENABLE
+
+#undef MPQ_DVB_DBG_PRINT		/* undef it, just in case */
+
+#ifdef MPG_DVB_DEBUG_ENABLE
+#define MPQ_DVB_ERR_PRINT(fmt, args...) pr_err(fmt, ## args)
+#define MPQ_DVB_WARN_PRINT(fmt, args...) pr_warn(fmt, ## args)
+#define MPQ_DVB_NOTICE_PRINT(fmt, args...) pr_notice(fmt, ## args)
+#define MPQ_DVB_DBG_PRINT(fmt, args...) pr_debug(fmt, ## args)
+#else  /* MPG_DVB_DEBUG_ENABLE */
+#define MPQ_DVB_ERR_PRINT(fmt, args...)
+#define MPQ_DVB_WARN_PRINT(fmt, args...)
+#define MPQ_DVB_NOTICE_PRINT(fmt, args...)
+#define MPQ_DVB_DBG_PRINT(fmt, args...)
+#endif /* MPG_DVB_DEBUG_ENABLE */
+
+
+/*
+ * The following can be used to disable specific printout
+ * by adding a letter to the end of MPQ_DVB_DBG_PRINT
+ */
+#undef MPQ_DVB_DBG_PRINTT
+#define MPQ_DVB_DBG_PRINTT(fmt, args...)
+
+#endif /* _MPQ_DVB_DEBUG_H */
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./tspp/tspp-core.c linux-4.4.115-fbx/drivers/media/platform/msm/tspp/tspp-core.c
--- linux-4.4.115-fbx/drivers/media/platform/msm./tspp/tspp-core.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/tspp/tspp-core.c	2019-01-22 16:16:24.459255064 +0100
@@ -0,0 +1,3294 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>        /* Just for modules */
+#include <linux/kernel.h>        /* Only for KERN_INFO */
+#include <linux/err.h>           /* Error macros */
+#include <linux/list.h>          /* Linked list */
+#include <linux/cdev.h>
+#include <linux/init.h>          /* Needed for the macros */
+#include <linux/io.h>            /* IO macros */
+#include <linux/device.h>        /* Device drivers need this */
+#include <linux/sched.h>         /* Externally defined globals */
+#include <linux/pm_runtime.h>    /* Runtime power management */
+#include <linux/fs.h>
+#include <linux/uaccess.h>       /* copy_to_user */
+#include <linux/slab.h>          /* kfree, kzalloc */
+#include <linux/ioport.h>        /* XXX_ mem_region */
+#include <linux/dma-mapping.h>   /* dma_XXX */
+#include <linux/dmapool.h>       /* DMA pools */
+#include <linux/delay.h>         /* msleep */
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/poll.h>          /* poll() file op */
+#include <linux/wait.h>          /* wait() macros, sleeping */
+#include <linux/bitops.h>        /* BIT() macro */
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+#include <linux/msm-sps.h>            /* BAM stuff */
+#include <linux/wakelock.h>      /* Locking functions */
+#include <linux/timer.h>         /* Timer services */
+#include <linux/jiffies.h>       /* Jiffies counter */
+#include <linux/debugfs.h>
+#include <linux/i2c.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/string.h>
+#include <linux/msm-bus.h>
+#include <linux/interrupt.h>	/* tasklet */
+#include <asm/arch_timer.h> /* Timer */
+#include <linux/avtimer_kernel.h> /* Timer */
+
+#include "tspp-core.h"
+#include "tspp-dvb.h"
+
+/*
+ * General defines
+ */
+#define TSPP_TSIF_INSTANCES            2
+#define TSPP_GPIOS_PER_TSIF            4
+#define TSPP_FILTER_TABLES             3
+#define TSPP_MAX_DEVICES               1
+#define TSPP_NUM_CHANNELS              16
+#define TSPP_NUM_PRIORITIES            16
+#define TSPP_NUM_KEYS                  8
+#define INVALID_CHANNEL                0xFFFFFFFF
+#define TSPP_BAM_DEFAULT_IPC_LOGLVL    2
+/*
+ * BAM descriptor FIFO size (in number of descriptors).
+ * Max number of descriptors allowed by SPS which is 8K-1.
+ */
+#define TSPP_SPS_DESCRIPTOR_COUNT      (8 * 1024 - 1)
+#define TSPP_PACKET_LENGTH             188
+#define TSPP_MIN_BUFFER_SIZE           (TSPP_PACKET_LENGTH)
+
+/* Max descriptor buffer size allowed by SPS */
+#define TSPP_MAX_BUFFER_SIZE           (32 * 1024 - 1)
+
+/*
+ * Returns whether to use DMA pool for TSPP output buffers.
+ * For buffers smaller than page size, using DMA pool
+ * provides better memory utilization as dma_alloc_coherent
+ * allocates minimum of page size.
+ */
+#define TSPP_USE_DMA_POOL(buff_size)   ((buff_size) < PAGE_SIZE)
+
+/*
+ * Max allowed TSPP buffers/descriptors.
+ * If SPS desc FIFO holds X descriptors, we can queue up to X-1 descriptors.
+ */
+#define TSPP_NUM_BUFFERS               (TSPP_SPS_DESCRIPTOR_COUNT - 1)
+#define TSPP_TSIF_DEFAULT_TIME_LIMIT   60
+#define SPS_DESCRIPTOR_SIZE            8
+#define MIN_ACCEPTABLE_BUFFER_COUNT    2
+#define TSPP_DEBUG(msg...)
+
+/*
+ * TSIF register offsets
+ */
+#define TSIF_STS_CTL_OFF               (0x0)
+#define TSIF_TIME_LIMIT_OFF            (0x4)
+#define TSIF_CLK_REF_OFF               (0x8)
+#define TSIF_LPBK_FLAGS_OFF            (0xc)
+#define TSIF_LPBK_DATA_OFF            (0x10)
+#define TSIF_TEST_CTL_OFF             (0x14)
+#define TSIF_TEST_MODE_OFF            (0x18)
+#define TSIF_TEST_RESET_OFF           (0x1c)
+#define TSIF_TEST_EXPORT_OFF          (0x20)
+#define TSIF_TEST_CURRENT_OFF         (0x24)
+#define TSIF_TTS_CTL_OFF	      (0x38)
+
+#define TSIF_DATA_PORT_OFF            (0x100)
+
+/* bits for TSIF_STS_CTL register */
+#define TSIF_STS_CTL_EN_IRQ       BIT(28)
+#define TSIF_STS_CTL_PACK_AVAIL   BIT(27)
+#define TSIF_STS_CTL_1ST_PACKET   BIT(26)
+#define TSIF_STS_CTL_OVERFLOW     BIT(25)
+#define TSIF_STS_CTL_LOST_SYNC    BIT(24)
+#define TSIF_STS_CTL_TIMEOUT      BIT(23)
+#define TSIF_STS_CTL_INV_SYNC     BIT(21)
+#define TSIF_STS_CTL_INV_NULL     BIT(20)
+#define TSIF_STS_CTL_INV_ERROR    BIT(19)
+#define TSIF_STS_CTL_INV_ENABLE   BIT(18)
+#define TSIF_STS_CTL_INV_DATA     BIT(17)
+#define TSIF_STS_CTL_INV_CLOCK    BIT(16)
+#define TSIF_STS_CTL_SPARE        BIT(15)
+#define TSIF_STS_CTL_EN_NULL      BIT(11)
+#define TSIF_STS_CTL_EN_ERROR     BIT(10)
+#define TSIF_STS_CTL_LAST_BIT     BIT(9)
+#define TSIF_STS_CTL_EN_TIME_LIM  BIT(8)
+#define TSIF_STS_CTL_EN_TCR       BIT(7)
+#define TSIF_STS_CTL_TEST_MODE    BIT(6)
+#define TSIF_STS_CTL_MODE_2       BIT(5)
+#define TSIF_STS_CTL_EN_DM        BIT(4)
+#define TSIF_STS_CTL_STOP         BIT(3)
+#define TSIF_STS_CTL_START        BIT(0)
+
+/* bits for TSIF_TTS_CTRL register */
+#define TSIF_TTS_CTL_TTS_ENDIANNESS	BIT(4)
+#define TSIF_TTS_CTL_TTS_SOURCE		BIT(3)
+#define TSIF_TTS_CTL_TTS_LENGTH_1	BIT(1)
+#define TSIF_TTS_CTL_TTS_LENGTH_0	BIT(0)
+
+/*
+ * TSPP register offsets
+ */
+#define TSPP_RST			0x00
+#define TSPP_CLK_CONTROL		0x04
+#define TSPP_CONFIG			0x08
+#define TSPP_CONTROL			0x0C
+#define TSPP_PS_DISABLE			0x10
+#define TSPP_MSG_IRQ_STATUS		0x14
+#define TSPP_MSG_IRQ_MASK		0x18
+#define TSPP_IRQ_STATUS			0x1C
+#define TSPP_IRQ_MASK			0x20
+#define TSPP_IRQ_CLEAR			0x24
+#define TSPP_PIPE_ERROR_STATUS(_n)	(0x28 + (_n << 2))
+#define TSPP_STATUS			0x68
+#define TSPP_CURR_TSP_HEADER		0x6C
+#define TSPP_CURR_PID_FILTER		0x70
+#define TSPP_SYSTEM_KEY(_n)		(0x74 + (_n << 2))
+#define TSPP_CBC_INIT_VAL(_n)		(0x94 + (_n << 2))
+#define TSPP_DATA_KEY_RESET		0x9C
+#define TSPP_KEY_VALID			0xA0
+#define TSPP_KEY_ERROR			0xA4
+#define TSPP_TEST_CTRL			0xA8
+#define TSPP_VERSION			0xAC
+#define TSPP_GENERICS			0xB0
+#define TSPP_NOP			0xB4
+
+/*
+ * Register bit definitions
+ */
+/* TSPP_RST */
+#define TSPP_RST_RESET                    BIT(0)
+
+/* TSPP_CLK_CONTROL	*/
+#define TSPP_CLK_CONTROL_FORCE_CRYPTO     BIT(9)
+#define TSPP_CLK_CONTROL_FORCE_PES_PL     BIT(8)
+#define TSPP_CLK_CONTROL_FORCE_PES_AF     BIT(7)
+#define TSPP_CLK_CONTROL_FORCE_RAW_CTRL   BIT(6)
+#define TSPP_CLK_CONTROL_FORCE_PERF_CNT   BIT(5)
+#define TSPP_CLK_CONTROL_FORCE_CTX_SEARCH BIT(4)
+#define TSPP_CLK_CONTROL_FORCE_TSP_PROC   BIT(3)
+#define TSPP_CLK_CONTROL_FORCE_CONS_AHB2MEM BIT(2)
+#define TSPP_CLK_CONTROL_FORCE_TS_AHB2MEM BIT(1)
+#define TSPP_CLK_CONTROL_SET_CLKON        BIT(0)
+
+/* TSPP_CONFIG	*/
+#define TSPP_CONFIG_SET_PACKET_LENGTH(_a, _b) (_a = (_a & 0xF0) | \
+((_b & 0xF) << 8))
+#define TSPP_CONFIG_GET_PACKET_LENGTH(_a) ((_a >> 8) & 0xF)
+#define TSPP_CONFIG_DUP_WITH_DISC_EN		BIT(7)
+#define TSPP_CONFIG_PES_SYNC_ERROR_MASK   BIT(6)
+#define TSPP_CONFIG_PS_LEN_ERR_MASK       BIT(5)
+#define TSPP_CONFIG_PS_CONT_ERR_UNSP_MASK BIT(4)
+#define TSPP_CONFIG_PS_CONT_ERR_MASK      BIT(3)
+#define TSPP_CONFIG_PS_DUP_TSP_MASK       BIT(2)
+#define TSPP_CONFIG_TSP_ERR_IND_MASK      BIT(1)
+#define TSPP_CONFIG_TSP_SYNC_ERR_MASK     BIT(0)
+
+/* TSPP_CONTROL */
+#define TSPP_CONTROL_PID_FILTER_LOCK      BIT(5)
+#define TSPP_CONTROL_FORCE_KEY_CALC       BIT(4)
+#define TSPP_CONTROL_TSP_CONS_SRC_DIS     BIT(3)
+#define TSPP_CONTROL_TSP_TSIF1_SRC_DIS    BIT(2)
+#define TSPP_CONTROL_TSP_TSIF0_SRC_DIS    BIT(1)
+#define TSPP_CONTROL_PERF_COUNT_INIT      BIT(0)
+
+/* TSPP_MSG_IRQ_STATUS + TSPP_MSG_IRQ_MASK */
+#define TSPP_MSG_TSPP_IRQ                 BIT(2)
+#define TSPP_MSG_TSIF_1_IRQ               BIT(1)
+#define TSPP_MSG_TSIF_0_IRQ               BIT(0)
+
+/* TSPP_IRQ_STATUS + TSPP_IRQ_MASK + TSPP_IRQ_CLEAR */
+#define TSPP_IRQ_STATUS_TSP_RD_CMPL		BIT(19)
+#define TSPP_IRQ_STATUS_KEY_ERROR		BIT(18)
+#define TSPP_IRQ_STATUS_KEY_SWITCHED_BAD	BIT(17)
+#define TSPP_IRQ_STATUS_KEY_SWITCHED		BIT(16)
+#define TSPP_IRQ_STATUS_PS_BROKEN(_n)		BIT((_n))
+
+/* TSPP_PIPE_ERROR_STATUS */
+#define TSPP_PIPE_PES_SYNC_ERROR		BIT(3)
+#define TSPP_PIPE_PS_LENGTH_ERROR		BIT(2)
+#define TSPP_PIPE_PS_CONTINUITY_ERROR		BIT(1)
+#define TSPP_PIP_PS_LOST_START			BIT(0)
+
+/* TSPP_STATUS			*/
+#define TSPP_STATUS_TSP_PKT_AVAIL		BIT(10)
+#define TSPP_STATUS_TSIF1_DM_REQ		BIT(6)
+#define TSPP_STATUS_TSIF0_DM_REQ		BIT(2)
+#define TSPP_CURR_FILTER_TABLE			BIT(0)
+
+/* TSPP_GENERICS		*/
+#define TSPP_GENERICS_CRYPTO_GEN		BIT(12)
+#define TSPP_GENERICS_MAX_CONS_PIPES		BIT(7)
+#define TSPP_GENERICS_MAX_PIPES			BIT(2)
+#define TSPP_GENERICS_TSIF_1_GEN		BIT(1)
+#define TSPP_GENERICS_TSIF_0_GEN		BIT(0)
+
+/*
+ * TSPP memory regions
+ */
+#define TSPP_PID_FILTER_TABLE0      0x800
+#define TSPP_PID_FILTER_TABLE1      0x880
+#define TSPP_PID_FILTER_TABLE2      0x900
+#define TSPP_GLOBAL_PERFORMANCE     0x980 /* see tspp_global_performance */
+#define TSPP_PIPE_CONTEXT           0x990 /* see tspp_pipe_context */
+#define TSPP_PIPE_PERFORMANCE       0x998 /* see tspp_pipe_performance */
+#define TSPP_TSP_BUFF_WORD(_n)      (0xC10 + (_n << 2))
+#define TSPP_DATA_KEY               0xCD0
+
+struct debugfs_entry {
+	const char *name;
+	mode_t mode;
+	int offset;
+};
+
+static const struct debugfs_entry debugfs_tsif_regs[] = {
+	{"sts_ctl",             S_IRUGO | S_IWUSR, TSIF_STS_CTL_OFF},
+	{"time_limit",          S_IRUGO | S_IWUSR, TSIF_TIME_LIMIT_OFF},
+	{"clk_ref",             S_IRUGO | S_IWUSR, TSIF_CLK_REF_OFF},
+	{"lpbk_flags",          S_IRUGO | S_IWUSR, TSIF_LPBK_FLAGS_OFF},
+	{"lpbk_data",           S_IRUGO | S_IWUSR, TSIF_LPBK_DATA_OFF},
+	{"test_ctl",            S_IRUGO | S_IWUSR, TSIF_TEST_CTL_OFF},
+	{"test_mode",           S_IRUGO | S_IWUSR, TSIF_TEST_MODE_OFF},
+	{"test_reset",                    S_IWUSR, TSIF_TEST_RESET_OFF},
+	{"test_export",         S_IRUGO | S_IWUSR, TSIF_TEST_EXPORT_OFF},
+	{"test_current",        S_IRUGO,           TSIF_TEST_CURRENT_OFF},
+	{"data_port",           S_IRUSR,           TSIF_DATA_PORT_OFF},
+	{"tts_source",          S_IRUSR | S_IWUSR, TSIF_TTS_CTL_OFF},
+};
+
+static const struct debugfs_entry debugfs_tspp_regs[] = {
+	{"rst",                 S_IRUGO | S_IWUSR, TSPP_RST},
+	{"clk_control",         S_IRUGO | S_IWUSR, TSPP_CLK_CONTROL},
+	{"config",              S_IRUGO | S_IWUSR, TSPP_CONFIG},
+	{"control",             S_IRUGO | S_IWUSR, TSPP_CONTROL},
+	{"ps_disable",          S_IRUGO | S_IWUSR, TSPP_PS_DISABLE},
+	{"msg_irq_status",      S_IRUGO | S_IWUSR, TSPP_MSG_IRQ_STATUS},
+	{"msg_irq_mask",        S_IRUGO | S_IWUSR, TSPP_MSG_IRQ_MASK},
+	{"irq_status",          S_IRUGO | S_IWUSR, TSPP_IRQ_STATUS},
+	{"irq_mask",            S_IRUGO | S_IWUSR, TSPP_IRQ_MASK},
+	{"irq_clear",           S_IRUGO | S_IWUSR, TSPP_IRQ_CLEAR},
+	/* {"pipe_error_status",S_IRUGO | S_IWUSR, TSPP_PIPE_ERROR_STATUS}, */
+	{"status",              S_IRUGO | S_IWUSR, TSPP_STATUS},
+	{"curr_tsp_header",     S_IRUGO | S_IWUSR, TSPP_CURR_TSP_HEADER},
+	{"curr_pid_filter",     S_IRUGO | S_IWUSR, TSPP_CURR_PID_FILTER},
+	/* {"system_key",       S_IRUGO | S_IWUSR, TSPP_SYSTEM_KEY}, */
+	/* {"cbc_init_val",     S_IRUGO | S_IWUSR, TSPP_CBC_INIT_VAL}, */
+	{"data_key_reset",      S_IRUGO | S_IWUSR, TSPP_DATA_KEY_RESET},
+	{"key_valid",           S_IRUGO | S_IWUSR, TSPP_KEY_VALID},
+	{"key_error",           S_IRUGO | S_IWUSR, TSPP_KEY_ERROR},
+	{"test_ctrl",           S_IRUGO | S_IWUSR, TSPP_TEST_CTRL},
+	{"version",             S_IRUGO | S_IWUSR, TSPP_VERSION},
+	{"generics",            S_IRUGO | S_IWUSR, TSPP_GENERICS},
+	{"pid_filter_table0",   S_IRUGO | S_IWUSR, TSPP_PID_FILTER_TABLE0},
+	{"pid_filter_table1",   S_IRUGO | S_IWUSR, TSPP_PID_FILTER_TABLE1},
+	{"pid_filter_table2",   S_IRUGO | S_IWUSR, TSPP_PID_FILTER_TABLE2},
+	{"tsp_total_num",       S_IRUGO | S_IWUSR, TSPP_GLOBAL_PERFORMANCE},
+	{"tsp_ignored_num",     S_IRUGO | S_IWUSR, TSPP_GLOBAL_PERFORMANCE + 4},
+	{"tsp_err_ind_num",     S_IRUGO | S_IWUSR, TSPP_GLOBAL_PERFORMANCE + 8},
+	{"tsp_sync_err_num",   S_IRUGO | S_IWUSR, TSPP_GLOBAL_PERFORMANCE + 16},
+	{"pipe_context",        S_IRUGO | S_IWUSR, TSPP_PIPE_CONTEXT},
+	{"pipe_performance",    S_IRUGO | S_IWUSR, TSPP_PIPE_PERFORMANCE},
+	{"data_key",            S_IRUGO | S_IWUSR, TSPP_DATA_KEY}
+};
+
+struct tspp_pid_filter {
+	u32 filter;			/* see FILTER_ macros */
+	u32 config;			/* see FILTER_ macros */
+};
+
+/* tsp_info */
+#define FILTER_HEADER_ERROR_MASK          BIT(7)
+#define FILTER_TRANS_END_DISABLE          BIT(6)
+#define FILTER_DEC_ON_ERROR_EN            BIT(5)
+#define FILTER_DECRYPT                    BIT(4)
+#define FILTER_HAS_ENCRYPTION(_p)         (_p->config & FILTER_DECRYPT)
+#define FILTER_GET_PIPE_NUMBER0(_p)       (_p->config & 0xF)
+#define FILTER_SET_PIPE_NUMBER0(_p, _b)   (_p->config = \
+			(_p->config & ~0xF) | (_b & 0xF))
+#define FILTER_GET_PIPE_PROCESS0(_p)      ((_p->filter >> 30) & 0x3)
+#define FILTER_SET_PIPE_PROCESS0(_p, _b)  (_p->filter = \
+			(_p->filter & ~(0x3<<30)) | ((_b & 0x3) << 30))
+#define FILTER_GET_PIPE_PID(_p)           ((_p->filter >> 13) & 0x1FFF)
+#define FILTER_SET_PIPE_PID(_p, _b)       (_p->filter = \
+			(_p->filter & ~(0x1FFF<<13)) | ((_b & 0x1FFF) << 13))
+#define FILTER_GET_PID_MASK(_p)           (_p->filter & 0x1FFF)
+#define FILTER_SET_PID_MASK(_p, _b)       (_p->filter = \
+			(_p->filter & ~0x1FFF) | (_b & 0x1FFF))
+#define FILTER_GET_PIPE_PROCESS1(_p)      ((_p->config >> 30) & 0x3)
+#define FILTER_SET_PIPE_PROCESS1(_p, _b)  (_p->config = \
+			(_p->config & ~(0x3<<30)) | ((_b & 0x3) << 30))
+#define FILTER_GET_KEY_NUMBER(_p)         ((_p->config >> 8) & 0x7)
+#define FILTER_SET_KEY_NUMBER(_p, _b)     (_p->config = \
+			(_p->config & ~(0x7<<8)) | ((_b & 0x7) << 8))
+
+struct tspp_global_performance_regs {
+	u32 tsp_total;
+	u32 tsp_ignored;
+	u32 tsp_error;
+	u32 tsp_sync;
+};
+
+struct tspp_pipe_context_regs {
+	u16 pes_bytes_left;
+	u16 count;
+	u32 tsif_suffix;
+} __packed;
+#define CONTEXT_GET_STATE(_a)					(_a & 0x3)
+#define CONTEXT_UNSPEC_LENGTH					BIT(11)
+#define CONTEXT_GET_CONT_COUNT(_a)			((_a >> 12) & 0xF)
+
+#define MSEC_TO_JIFFIES(msec)			((msec) * HZ / 1000)
+
+struct tspp_pipe_performance_regs {
+	u32 tsp_total;
+	u32 ps_duplicate_tsp;
+	u32 tsp_no_payload;
+	u32 tsp_broken_ps;
+	u32 ps_total_num;
+	u32 ps_continuity_error;
+	u32 ps_length_error;
+	u32 pes_sync_error;
+};
+
+struct tspp_tsif_device {
+	void __iomem *base;
+	u32 time_limit;
+	u32 ref_count;
+	enum tspp_tsif_mode mode;
+	int clock_inverse;
+	int data_inverse;
+	int sync_inverse;
+	int enable_inverse;
+	u32 tsif_irq;
+
+	int reset_gpio;
+	struct i2c_adapter *i2c_adapter;
+	struct i2c_client *i2c_client;
+
+	/* debugfs */
+	struct dentry *dent_tsif;
+	struct dentry *debugfs_tsif_regs[ARRAY_SIZE(debugfs_tsif_regs)];
+	u32 stat_rx;
+	u32 stat_overflow;
+	u32 stat_lost_sync;
+	u32 stat_timeout;
+	enum tsif_tts_source tts_source;
+	u32 lpass_timer_enable;
+};
+
+enum tspp_buf_state {
+	TSPP_BUF_STATE_EMPTY,	/* buffer has been allocated, but not waiting */
+	TSPP_BUF_STATE_WAITING, /* buffer is waiting to be filled */
+	TSPP_BUF_STATE_DATA,    /* buffer is not empty and can be read */
+	TSPP_BUF_STATE_LOCKED   /* buffer is being read by a client */
+};
+
+struct tspp_mem_buffer {
+	struct tspp_mem_buffer *next;
+	struct sps_mem_buffer sps;
+	struct tspp_data_descriptor desc; /* buffer descriptor for kernel api */
+	enum tspp_buf_state state;
+	size_t filled;          /* how much data this buffer is holding */
+	int read_index;         /* where to start reading data from */
+};
+
+/* this represents each char device 'channel' */
+struct tspp_channel {
+	struct tspp_device *pdev; /* can use container_of instead? */
+	struct sps_pipe *pipe;
+	struct sps_connect config;
+	struct sps_register_event event;
+	struct tspp_mem_buffer *data;    /* list of buffers */
+	struct tspp_mem_buffer *read;    /* first buffer ready to be read */
+	struct tspp_mem_buffer *waiting; /* first outstanding transfer */
+	struct tspp_mem_buffer *locked;  /* buffer currently being read */
+	wait_queue_head_t in_queue; /* set when data is received */
+	u32 id;           /* channel id (0-15) */
+	int used;         /* is this channel in use? */
+	int key;          /* which encryption key index is used */
+	u32 buffer_size;  /* size of the sps transfer buffers */
+	u32 max_buffers;  /* how many buffers should be allocated */
+	u32 buffer_count; /* how many buffers are actually allocated */
+	u32 filter_count; /* how many filters have been added to this channel */
+	u32 int_freq;     /* generate interrupts every x descriptors */
+	enum tspp_source src;
+	enum tspp_mode mode;
+	tspp_notifier *notifier; /* used only with kernel api */
+	void *notify_data;       /* data to be passed with the notifier */
+	u32 expiration_period_ms; /* notification on partially filled buffers */
+	struct timer_list expiration_timer;
+	struct dma_pool *dma_pool;
+	tspp_memfree *memfree;   /* user defined memory free function */
+	void *user_info; /* user cookie passed to memory alloc/free function */
+};
+
+struct tspp_pid_filter_table {
+	struct tspp_pid_filter filter[TSPP_NUM_PRIORITIES];
+};
+
+struct tspp_key_entry {
+	u32 even_lsb;
+	u32 even_msb;
+	u32 odd_lsb;
+	u32 odd_msb;
+};
+
+struct tspp_key_table {
+	struct tspp_key_entry entry[TSPP_NUM_KEYS];
+};
+
+struct tspp_pinctrl {
+	struct pinctrl *pinctrl;
+
+	struct pinctrl_state *disabled;
+	struct pinctrl_state *tsif0_mode1;
+	struct pinctrl_state *tsif0_mode2;
+	struct pinctrl_state *tsif1_mode1;
+	struct pinctrl_state *tsif1_mode2;
+	struct pinctrl_state *dual_mode1;
+	struct pinctrl_state *dual_mode2;
+
+	bool tsif0_active;
+	bool tsif1_active;
+};
+
+/* this represents the actual hardware device */
+struct tspp_device {
+	struct list_head devlist; /* list of all devices */
+	struct platform_device *pdev;
+	void __iomem *base;
+	uint32_t tsif_bus_client;
+	unsigned int tspp_irq;
+	unsigned int bam_irq;
+	unsigned long bam_handle;
+	struct sps_bam_props bam_props;
+	struct wakeup_source ws;
+	spinlock_t spinlock;
+	struct tasklet_struct tlet;
+	struct tspp_tsif_device tsif[TSPP_TSIF_INSTANCES];
+	/* clocks */
+	struct clk *tsif_pclk;
+	struct clk *tsif_ref_clk;
+	/* regulators */
+	struct regulator *tsif_vreg;
+	/* data */
+	struct tspp_pid_filter_table *filters[TSPP_FILTER_TABLES];
+	struct tspp_channel channels[TSPP_NUM_CHANNELS];
+	struct tspp_key_table *tspp_key_table;
+	struct tspp_global_performance_regs *tspp_global_performance;
+	struct tspp_pipe_context_regs *tspp_pipe_context;
+	struct tspp_pipe_performance_regs *tspp_pipe_performance;
+	bool req_irqs;
+	/* pinctrl */
+	struct mutex mutex;
+	struct tspp_pinctrl pinctrl;
+	unsigned int tts_source; /* Time stamp source type LPASS timer/TCR */
+
+	struct dentry *dent;
+	struct dentry *debugfs_regs[ARRAY_SIZE(debugfs_tspp_regs)];
+};
+
+static int tspp_key_entry;
+static u32 channel_id;  /* next channel id number to assign */
+
+static LIST_HEAD(tspp_devices);
+
+/*** IRQ ***/
+static irqreturn_t tspp_isr(int irq, void *dev)
+{
+	struct tspp_device *device = dev;
+	u32 status, mask;
+	u32 data;
+
+	status = readl_relaxed(device->base + TSPP_IRQ_STATUS);
+	mask = readl_relaxed(device->base + TSPP_IRQ_MASK);
+	status &= mask;
+
+	if (!status) {
+		dev_warn(&device->pdev->dev, "Spurious interrupt");
+		return IRQ_NONE;
+	}
+
+	/* if (status & TSPP_IRQ_STATUS_TSP_RD_CMPL) */
+
+	if (status & TSPP_IRQ_STATUS_KEY_ERROR) {
+		/* read the key error info */
+		data = readl_relaxed(device->base + TSPP_KEY_ERROR);
+		dev_info(&device->pdev->dev, "key error 0x%x", data);
+	}
+	if (status & TSPP_IRQ_STATUS_KEY_SWITCHED_BAD) {
+		data = readl_relaxed(device->base + TSPP_KEY_VALID);
+		dev_info(&device->pdev->dev, "key invalidated: 0x%x", data);
+	}
+	if (status & TSPP_IRQ_STATUS_KEY_SWITCHED)
+		dev_info(&device->pdev->dev, "key switched");
+
+	if (status & 0xffff)
+		dev_info(&device->pdev->dev, "broken pipe %i", status & 0xffff);
+
+	writel_relaxed(status, device->base + TSPP_IRQ_CLEAR);
+
+	/*
+	 * Before returning IRQ_HANDLED to the generic interrupt handling
+	 * framework need to make sure all operations including clearing of
+	 * interrupt status registers in the hardware is performed.
+	 * Thus a barrier after clearing the interrupt status register
+	 * is required to guarantee that the interrupt status register has
+	 * really been cleared by the time we return from this handler.
+	 */
+	wmb();
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t tsif_isr(int irq, void *dev)
+{
+	struct tspp_tsif_device *tsif_device = dev;
+	u32 sts_ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF);
+
+	if (!(sts_ctl & (TSIF_STS_CTL_PACK_AVAIL |
+			 TSIF_STS_CTL_OVERFLOW |
+			 TSIF_STS_CTL_LOST_SYNC |
+			 TSIF_STS_CTL_TIMEOUT)))
+		return IRQ_NONE;
+
+	if (sts_ctl & TSIF_STS_CTL_OVERFLOW)
+		tsif_device->stat_overflow++;
+
+	if (sts_ctl & TSIF_STS_CTL_LOST_SYNC)
+		tsif_device->stat_lost_sync++;
+
+	if (sts_ctl & TSIF_STS_CTL_TIMEOUT)
+		tsif_device->stat_timeout++;
+
+	iowrite32(sts_ctl, tsif_device->base + TSIF_STS_CTL_OFF);
+
+	/*
+	 * Before returning IRQ_HANDLED to the generic interrupt handling
+	 * framework need to make sure all operations including clearing of
+	 * interrupt status registers in the hardware is performed.
+	 * Thus a barrier after clearing the interrupt status register
+	 * is required to guarantee that the interrupt status register has
+	 * really been cleared by the time we return from this handler.
+	 */
+	wmb();
+	return IRQ_HANDLED;
+}
+
+/*** callbacks ***/
+static void tspp_sps_complete_cb(struct sps_event_notify *notify)
+{
+	struct tspp_device *pdev;
+
+	if (!notify || !notify->user)
+		return;
+
+	pdev = notify->user;
+	tasklet_schedule(&pdev->tlet);
+}
+
+static void tspp_expiration_timer(unsigned long data)
+{
+	struct tspp_device *pdev = (struct tspp_device *)data;
+
+	if (pdev)
+		tasklet_schedule(&pdev->tlet);
+}
+
+/*** tasklet ***/
+static void tspp_sps_complete_tlet(unsigned long data)
+{
+	int i;
+	int complete;
+	unsigned long flags;
+	struct sps_iovec iovec;
+	struct tspp_channel *channel;
+	struct tspp_device *device = (struct tspp_device *)data;
+
+	spin_lock_irqsave(&device->spinlock, flags);
+
+	for (i = 0; i < TSPP_NUM_CHANNELS; i++) {
+		complete = 0;
+		channel = &device->channels[i];
+
+		if (!channel->used || !channel->waiting)
+			continue;
+
+		/* stop the expiration timer */
+		if (channel->expiration_period_ms)
+			del_timer(&channel->expiration_timer);
+
+		/* get completions */
+		while (channel->waiting->state == TSPP_BUF_STATE_WAITING) {
+			if (sps_get_iovec(channel->pipe, &iovec) != 0) {
+				pr_err("tspp: Error in iovec on channel %i",
+					channel->id);
+				break;
+			}
+			if (iovec.size == 0)
+				break;
+
+			if (DESC_FULL_ADDR(iovec.flags, iovec.addr)
+			    != channel->waiting->sps.phys_base)
+				pr_err("tspp: buffer mismatch %pa",
+					&channel->waiting->sps.phys_base);
+
+			complete = 1;
+			channel->waiting->state = TSPP_BUF_STATE_DATA;
+			channel->waiting->filled = iovec.size;
+			channel->waiting->read_index = 0;
+
+			if (channel->src == TSPP_SOURCE_TSIF0)
+				device->tsif[0].stat_rx++;
+			else if (channel->src == TSPP_SOURCE_TSIF1)
+				device->tsif[1].stat_rx++;
+
+			/* update the pointers */
+			channel->waiting = channel->waiting->next;
+		}
+
+		/* wake any waiting processes */
+		if (complete) {
+			wake_up_interruptible(&channel->in_queue);
+
+			/* call notifiers */
+			if (channel->notifier)
+				channel->notifier(channel->id,
+					channel->notify_data);
+		}
+
+		/* restart expiration timer */
+		if (channel->expiration_period_ms)
+			mod_timer(&channel->expiration_timer,
+				jiffies +
+				MSEC_TO_JIFFIES(
+					channel->expiration_period_ms));
+	}
+
+	spin_unlock_irqrestore(&device->spinlock, flags);
+}
+
+static int tspp_config_gpios(struct tspp_device *device,
+				enum tspp_source source,
+				int enable)
+{
+	int ret;
+	struct pinctrl_state *s;
+	struct tspp_pinctrl *p = &device->pinctrl;
+	bool mode2;
+
+	/*
+	 * TSIF devices are handled separately, however changing of the pinctrl
+	 * state must be protected from race condition.
+	 */
+	if (mutex_lock_interruptible(&device->mutex))
+		return -ERESTARTSYS;
+
+	switch (source) {
+	case TSPP_SOURCE_TSIF0:
+		mode2 = device->tsif[0].mode == TSPP_TSIF_MODE_2;
+		if (enable == p->tsif1_active) {
+			if (enable)
+				/* Both tsif enabled */
+				s = mode2 ? p->dual_mode2 : p->dual_mode1;
+			else
+				/* Both tsif disabled */
+				s = p->disabled;
+		} else if (enable) {
+			/* Only tsif0 is enabled */
+			s = mode2 ? p->tsif0_mode2 : p->tsif0_mode1;
+		} else {
+			/* Only tsif1 is enabled */
+			s = mode2 ? p->tsif1_mode2 : p->tsif1_mode1;
+		}
+
+		if (!s) {
+			pr_err("%s: missing pinctrl for source %d\n",
+			       __func__, source);
+			mutex_unlock(&device->mutex);
+			return -EINVAL;
+		}
+
+		ret = pinctrl_select_state(p->pinctrl, s);
+		if (!ret)
+			p->tsif0_active = enable;
+		break;
+	case TSPP_SOURCE_TSIF1:
+		mode2 = device->tsif[1].mode == TSPP_TSIF_MODE_2;
+		if (enable == p->tsif0_active) {
+			if (enable)
+				/* Both tsif enabled */
+				s = mode2 ? p->dual_mode2 : p->dual_mode1;
+			else
+				/* Both tsif disabled */
+				s = p->disabled;
+		} else if (enable) {
+			/* Only tsif1 is enabled */
+			s = mode2 ? p->tsif1_mode2 : p->tsif1_mode1;
+		} else {
+			/* Only tsif0 is enabled */
+			s = mode2 ? p->tsif0_mode2 : p->tsif0_mode1;
+		}
+
+		if (!s) {
+			pr_err("%s: missing pinctrl for source %d\n",
+			       __func__, source);
+			mutex_unlock(&device->mutex);
+			return -EINVAL;
+		}
+
+		ret = pinctrl_select_state(p->pinctrl, s);
+		if (!ret)
+			p->tsif1_active = enable;
+		break;
+	default:
+		pr_err("%s: invalid source %d\n", __func__, source);
+		mutex_unlock(&device->mutex);
+		return -EINVAL;
+	}
+
+	if (ret)
+		pr_err("%s: failed to change pinctrl state, ret=%d\n",
+			__func__, ret);
+
+	mutex_unlock(&device->mutex);
+	return ret;
+}
+
+static int tspp_get_pinctrl(struct tspp_device *device)
+{
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *state;
+
+	pinctrl = devm_pinctrl_get(&device->pdev->dev);
+	if (IS_ERR(pinctrl)) {
+		pr_err("%s: Unable to get pinctrl handle\n", __func__);
+		return -EINVAL;
+	}
+	device->pinctrl.pinctrl = pinctrl;
+
+	state = pinctrl_lookup_state(pinctrl, "disabled");
+	if (IS_ERR(state)) {
+		pr_err("%s: Unable to find state %s\n",
+			__func__, "disabled");
+		return -EINVAL;
+	}
+	device->pinctrl.disabled = state;
+
+	state = pinctrl_lookup_state(pinctrl, "tsif0-mode1");
+	device->pinctrl.tsif0_mode1 = state;
+
+	state = pinctrl_lookup_state(pinctrl, "tsif0-mode2");
+	device->pinctrl.tsif0_mode2 = state;
+
+	state = pinctrl_lookup_state(pinctrl, "tsif1-mode1");
+	device->pinctrl.tsif1_mode1 = state;
+
+	state = pinctrl_lookup_state(pinctrl, "tsif1-mode2");
+	device->pinctrl.tsif1_mode2 = state;
+
+	state = pinctrl_lookup_state(pinctrl, "dual-tsif-mode1");
+	device->pinctrl.dual_mode1 = state;
+
+	state = pinctrl_lookup_state(pinctrl, "dual-tsif-mode2");
+	device->pinctrl.dual_mode2 = state;
+
+	device->pinctrl.tsif0_active = false;
+	device->pinctrl.tsif1_active = false;
+
+	return 0;
+}
+
+
+/*** Clock functions ***/
+static int tspp_clock_start(struct tspp_device *device)
+{
+	int rc;
+
+	if (device == NULL) {
+		pr_err("tspp: Can't start clocks, invalid device\n");
+		return -EINVAL;
+	}
+
+	if (device->tsif_bus_client) {
+		rc = msm_bus_scale_client_update_request(
+					device->tsif_bus_client, 1);
+		if (rc) {
+			pr_err("tspp: Can't enable bus\n");
+			return -EBUSY;
+		}
+	}
+
+	if (device->tsif_vreg) {
+		rc = regulator_set_voltage(device->tsif_vreg,
+					RPM_REGULATOR_CORNER_SUPER_TURBO,
+					RPM_REGULATOR_CORNER_SUPER_TURBO);
+		if (rc) {
+			pr_err("Unable to set CX voltage.\n");
+			if (device->tsif_bus_client)
+				msm_bus_scale_client_update_request(
+					device->tsif_bus_client, 0);
+			return rc;
+		}
+	}
+
+	if (device->tsif_pclk && clk_prepare_enable(device->tsif_pclk) != 0) {
+		pr_err("tspp: Can't start pclk");
+
+		if (device->tsif_vreg) {
+			regulator_set_voltage(device->tsif_vreg,
+					RPM_REGULATOR_CORNER_NONE,
+					RPM_REGULATOR_CORNER_SUPER_TURBO);
+		}
+
+		if (device->tsif_bus_client)
+			msm_bus_scale_client_update_request(
+				device->tsif_bus_client, 0);
+		return -EBUSY;
+	}
+
+	if (device->tsif_ref_clk &&
+		clk_prepare_enable(device->tsif_ref_clk) != 0) {
+		pr_err("tspp: Can't start ref clk");
+		clk_disable_unprepare(device->tsif_pclk);
+		if (device->tsif_vreg) {
+			regulator_set_voltage(device->tsif_vreg,
+					RPM_REGULATOR_CORNER_NONE,
+					RPM_REGULATOR_CORNER_SUPER_TURBO);
+		}
+
+		if (device->tsif_bus_client)
+			msm_bus_scale_client_update_request(
+				device->tsif_bus_client, 0);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static void tspp_clock_stop(struct tspp_device *device)
+{
+	int rc;
+
+	if (device == NULL) {
+		pr_err("tspp: Can't stop clocks, invalid device\n");
+		return;
+	}
+
+	if (device->tsif_pclk)
+		clk_disable_unprepare(device->tsif_pclk);
+
+	if (device->tsif_ref_clk)
+		clk_disable_unprepare(device->tsif_ref_clk);
+
+	if (device->tsif_vreg) {
+		rc = regulator_set_voltage(device->tsif_vreg,
+					RPM_REGULATOR_CORNER_NONE,
+					RPM_REGULATOR_CORNER_SUPER_TURBO);
+		if (rc)
+			pr_err("Unable to set CX voltage.\n");
+	}
+
+	if (device->tsif_bus_client) {
+		rc = msm_bus_scale_client_update_request(
+					device->tsif_bus_client, 0);
+		if (rc)
+			pr_err("tspp: Can't disable bus\n");
+	}
+}
+
+/*** TSIF functions ***/
+static int tspp_start_tsif(struct tspp_tsif_device *tsif_device)
+{
+	int start_hardware = 0;
+	u32 ctl;
+	u32 tts_ctl;
+	int retval;
+
+	if (tsif_device->ref_count == 0) {
+		start_hardware = 1;
+	} else if (tsif_device->ref_count > 0) {
+		ctl = readl_relaxed(tsif_device->base + TSIF_STS_CTL_OFF);
+		if ((ctl & TSIF_STS_CTL_START) != 1) {
+			/* this hardware should already be running */
+			pr_warn("tspp: tsif hw not started but ref count > 0");
+			start_hardware = 1;
+		}
+	}
+
+	if (start_hardware) {
+		ctl = TSIF_STS_CTL_EN_IRQ |
+				TSIF_STS_CTL_EN_DM |
+				TSIF_STS_CTL_PACK_AVAIL |
+				TSIF_STS_CTL_OVERFLOW |
+				TSIF_STS_CTL_LOST_SYNC;
+
+		if (tsif_device->clock_inverse)
+			ctl |= TSIF_STS_CTL_INV_CLOCK;
+
+		if (tsif_device->data_inverse)
+			ctl |= TSIF_STS_CTL_INV_DATA;
+
+		if (tsif_device->sync_inverse)
+			ctl |= TSIF_STS_CTL_INV_SYNC;
+
+		if (tsif_device->enable_inverse)
+			ctl |= TSIF_STS_CTL_INV_ENABLE;
+
+		switch (tsif_device->mode) {
+		case TSPP_TSIF_MODE_LOOPBACK:
+			ctl |= TSIF_STS_CTL_EN_NULL |
+					TSIF_STS_CTL_EN_ERROR |
+					TSIF_STS_CTL_TEST_MODE;
+			break;
+		case TSPP_TSIF_MODE_1:
+			ctl |= TSIF_STS_CTL_EN_TIME_LIM;
+			if (tsif_device->tts_source != TSIF_TTS_LPASS_TIMER)
+				ctl |= TSIF_STS_CTL_EN_TCR;
+			break;
+		case TSPP_TSIF_MODE_2:
+			ctl |= TSIF_STS_CTL_EN_TIME_LIM |
+					TSIF_STS_CTL_MODE_2;
+			if (tsif_device->tts_source != TSIF_TTS_LPASS_TIMER)
+				ctl |= TSIF_STS_CTL_EN_TCR;
+			break;
+		default:
+			pr_warn("tspp: unknown tsif mode 0x%x",
+				tsif_device->mode);
+		}
+		/* Set 4bytes Time Stamp for TCR */
+		if (tsif_device->tts_source == TSIF_TTS_LPASS_TIMER) {
+			if (tsif_device->lpass_timer_enable == 0) {
+				retval = avcs_core_open();
+				if (retval < 0) {
+					pr_warn("tspp: avcs open fail:%d\n",
+						retval);
+					return retval;
+				}
+				retval = avcs_core_disable_power_collapse(1);
+				if (retval  < 0) {
+					pr_warn("tspp: avcs power enable:%d\n",
+						retval);
+					return retval;
+				}
+				tsif_device->lpass_timer_enable = 1;
+			}
+
+			tts_ctl	= readl_relaxed(tsif_device->base +
+						TSIF_TTS_CTL_OFF);
+			tts_ctl = 0;
+			/* Set LPASS Timer TTS source */
+			tts_ctl |= TSIF_TTS_CTL_TTS_SOURCE;
+			 /* Set 4 byte TTS */
+			tts_ctl |= TSIF_TTS_CTL_TTS_LENGTH_0;
+
+			writel_relaxed(tts_ctl, tsif_device->base +
+				       TSIF_TTS_CTL_OFF);
+			/* write TTS control register */
+			wmb();
+			tts_ctl	= readl_relaxed(tsif_device->base +
+						TSIF_TTS_CTL_OFF);
+		}
+
+		writel_relaxed(ctl, tsif_device->base + TSIF_STS_CTL_OFF);
+		/* write Status control register */
+		wmb();
+		writel_relaxed(tsif_device->time_limit,
+			  tsif_device->base + TSIF_TIME_LIMIT_OFF);
+		/* assure register configuration is done before starting TSIF */
+		wmb();
+		writel_relaxed(ctl | TSIF_STS_CTL_START,
+			  tsif_device->base + TSIF_STS_CTL_OFF);
+		/* assure TSIF start configuration */
+		wmb();
+	}
+
+	ctl = readl_relaxed(tsif_device->base + TSIF_STS_CTL_OFF);
+	if (!(ctl & TSIF_STS_CTL_START))
+		return -EBUSY;
+
+	tsif_device->ref_count++;
+	return 0;
+}
+
+static void tspp_stop_tsif(struct tspp_tsif_device *tsif_device)
+{
+	if (tsif_device->ref_count == 0) {
+		if (tsif_device->lpass_timer_enable == 1) {
+			if (avcs_core_disable_power_collapse(0) == 0)
+				tsif_device->lpass_timer_enable = 0;
+		}
+		return;
+	}
+
+	tsif_device->ref_count--;
+
+	if (tsif_device->ref_count == 0) {
+		writel_relaxed(TSIF_STS_CTL_STOP,
+			tsif_device->base + TSIF_STS_CTL_OFF);
+		/* assure TSIF stop configuration */
+		wmb();
+	}
+}
+
+/*** local TSPP functions ***/
+static int tspp_channels_in_use(struct tspp_device *pdev)
+{
+	int i;
+	int count = 0;
+
+	for (i = 0; i < TSPP_NUM_CHANNELS; i++)
+		count += (pdev->channels[i].used ? 1 : 0);
+
+	return count;
+}
+
+static struct tspp_device *tspp_find_by_id(int id)
+{
+	struct tspp_device *dev;
+
+	list_for_each_entry(dev, &tspp_devices, devlist) {
+		if (dev->pdev->id == id)
+			return dev;
+	}
+	return NULL;
+}
+
+static int tspp_get_key_entry(void)
+{
+	int i;
+
+	for (i = 0; i < TSPP_NUM_KEYS; i++) {
+		if (!(tspp_key_entry & (1 << i))) {
+			tspp_key_entry |= (1 << i);
+			return i;
+		}
+	}
+	return 1 < TSPP_NUM_KEYS;
+}
+
+static void tspp_free_key_entry(int entry)
+{
+	if (entry > TSPP_NUM_KEYS) {
+		pr_err("tspp_free_key_entry: index out of bounds");
+		return;
+	}
+
+	tspp_key_entry &= ~(1 << entry);
+}
+
+static int tspp_alloc_buffer(u32 channel_id, struct tspp_data_descriptor *desc,
+	u32 size, struct dma_pool *dma_pool, tspp_allocator *alloc, void *user)
+{
+	if (size < TSPP_MIN_BUFFER_SIZE ||
+		size > TSPP_MAX_BUFFER_SIZE) {
+		pr_err("tspp: bad buffer size %i", size);
+		return -ENOMEM;
+	}
+
+	if (alloc) {
+		TSPP_DEBUG("tspp using alloc function");
+		desc->virt_base = alloc(channel_id, size,
+			&desc->phys_base, user);
+	} else {
+		if (!dma_pool)
+			desc->virt_base = dma_alloc_coherent(NULL, size,
+				&desc->phys_base, GFP_KERNEL);
+		else
+			desc->virt_base = dma_pool_alloc(dma_pool, GFP_KERNEL,
+				&desc->phys_base);
+
+		if (desc->virt_base == 0) {
+			pr_err("tspp: dma buffer allocation failed %i\n", size);
+			return -ENOMEM;
+		}
+	}
+
+	desc->size = size;
+	return 0;
+}
+
+static int tspp_queue_buffer(struct tspp_channel *channel,
+	struct tspp_mem_buffer *buffer)
+{
+	int rc;
+	u32 flags = 0;
+
+	/* make sure the interrupt frequency is valid */
+	if (channel->int_freq < 1)
+		channel->int_freq = 1;
+
+	/* generate interrupt according to requested frequency */
+	if (buffer->desc.id % channel->int_freq == channel->int_freq-1)
+		flags = SPS_IOVEC_FLAG_INT;
+
+	/* start the transfer */
+	rc = sps_transfer_one(channel->pipe,
+		buffer->sps.phys_base,
+		buffer->sps.size,
+		flags ? channel->pdev : NULL,
+		flags);
+	if (rc < 0)
+		return rc;
+
+	buffer->state = TSPP_BUF_STATE_WAITING;
+
+	return 0;
+}
+
+static int tspp_global_reset(struct tspp_device *pdev)
+{
+	u32 i, val;
+
+	/* stop all TSIFs */
+	for (i = 0; i < TSPP_TSIF_INSTANCES; i++) {
+		pdev->tsif[i].ref_count = 1; /* allows stopping hw */
+		tspp_stop_tsif(&pdev->tsif[i]); /* will reset ref_count to 0 */
+		pdev->tsif[i].time_limit = TSPP_TSIF_DEFAULT_TIME_LIMIT;
+		pdev->tsif[i].clock_inverse = 0;
+		pdev->tsif[i].data_inverse = 0;
+		pdev->tsif[i].sync_inverse = 0;
+		pdev->tsif[i].enable_inverse = 0;
+		pdev->tsif[i].lpass_timer_enable = 0;
+	}
+	writel_relaxed(TSPP_RST_RESET, pdev->base + TSPP_RST);
+	/* assure state is reset before continuing with configuration */
+	wmb();
+
+	/* TSPP tables */
+	for (i = 0; i < TSPP_FILTER_TABLES; i++)
+		memset_io(pdev->filters[i],
+			0, sizeof(struct tspp_pid_filter_table));
+
+	/* disable all filters */
+	val = (2 << TSPP_NUM_CHANNELS) - 1;
+	writel_relaxed(val, pdev->base + TSPP_PS_DISABLE);
+
+	/* TSPP registers */
+	val = readl_relaxed(pdev->base + TSPP_CONTROL);
+	writel_relaxed(val | TSPP_CLK_CONTROL_FORCE_PERF_CNT,
+		pdev->base + TSPP_CONTROL);
+	/* assure tspp performance count clock is set to 0 */
+	wmb();
+	memset_io(pdev->tspp_global_performance, 0,
+		sizeof(struct tspp_global_performance_regs));
+	memset_io(pdev->tspp_pipe_context, 0,
+		sizeof(struct tspp_pipe_context_regs));
+	memset_io(pdev->tspp_pipe_performance, 0,
+		sizeof(struct tspp_pipe_performance_regs));
+	/* assure tspp pipe context registers are set to 0 */
+	wmb();
+	writel_relaxed(val & ~TSPP_CLK_CONTROL_FORCE_PERF_CNT,
+		pdev->base + TSPP_CONTROL);
+	/* assure tspp performance count clock  is reset */
+	wmb();
+
+	val = readl_relaxed(pdev->base + TSPP_CONFIG);
+	val &= ~(TSPP_CONFIG_PS_LEN_ERR_MASK |
+			TSPP_CONFIG_PS_CONT_ERR_UNSP_MASK |
+			TSPP_CONFIG_PS_CONT_ERR_MASK);
+	TSPP_CONFIG_SET_PACKET_LENGTH(val, TSPP_PACKET_LENGTH);
+	writel_relaxed(val, pdev->base + TSPP_CONFIG);
+	writel_relaxed(0x0007ffff, pdev->base + TSPP_IRQ_MASK);
+	writel_relaxed(0x000fffff, pdev->base + TSPP_IRQ_CLEAR);
+	writel_relaxed(0, pdev->base + TSPP_RST);
+	/* assure tspp reset clear */
+	wmb();
+
+	tspp_key_entry = 0;
+
+	return 0;
+}
+
+static void tspp_channel_init(struct tspp_channel *channel,
+	struct tspp_device *pdev)
+{
+	channel->pdev = pdev;
+	channel->data = NULL;
+	channel->read = NULL;
+	channel->waiting = NULL;
+	channel->locked = NULL;
+	channel->id = channel_id++;
+	channel->used = 0;
+	channel->buffer_size = TSPP_MIN_BUFFER_SIZE;
+	channel->max_buffers = TSPP_NUM_BUFFERS;
+	channel->buffer_count = 0;
+	channel->filter_count = 0;
+	channel->int_freq = 1;
+	channel->src = TSPP_SOURCE_NONE;
+	channel->mode = TSPP_MODE_DISABLED;
+	channel->notifier = NULL;
+	channel->notify_data = NULL;
+	channel->expiration_period_ms = 0;
+	channel->memfree = NULL;
+	channel->user_info = NULL;
+	init_waitqueue_head(&channel->in_queue);
+}
+
+static void tspp_set_tsif_mode(struct tspp_channel *channel,
+	enum tspp_tsif_mode mode)
+{
+	int index;
+
+	switch (channel->src) {
+	case TSPP_SOURCE_TSIF0:
+		index = 0;
+		break;
+	case TSPP_SOURCE_TSIF1:
+		index = 1;
+		break;
+	default:
+		pr_warn("tspp: can't set mode for non-tsif source %d",
+			channel->src);
+		return;
+	}
+	channel->pdev->tsif[index].mode = mode;
+}
+
+static void tspp_set_signal_inversion(struct tspp_channel *channel,
+					int clock_inverse, int data_inverse,
+					int sync_inverse, int enable_inverse)
+{
+	int index;
+
+	switch (channel->src) {
+	case TSPP_SOURCE_TSIF0:
+		index = 0;
+		break;
+	case TSPP_SOURCE_TSIF1:
+		index = 1;
+		break;
+	default:
+		return;
+	}
+	channel->pdev->tsif[index].clock_inverse = clock_inverse;
+	channel->pdev->tsif[index].data_inverse = data_inverse;
+	channel->pdev->tsif[index].sync_inverse = sync_inverse;
+	channel->pdev->tsif[index].enable_inverse = enable_inverse;
+}
+
+static int tspp_is_buffer_size_aligned(u32 size, enum tspp_mode mode)
+{
+	u32 alignment;
+
+	switch (mode) {
+	case TSPP_MODE_RAW:
+		/* must be a multiple of 192 */
+		alignment = (TSPP_PACKET_LENGTH + 4);
+		if (size % alignment)
+			return 0;
+		return 1;
+
+	case TSPP_MODE_RAW_NO_SUFFIX:
+		/* must be a multiple of 188 */
+		alignment = TSPP_PACKET_LENGTH;
+		if (size % alignment)
+			return 0;
+		return 1;
+
+	case TSPP_MODE_DISABLED:
+	case TSPP_MODE_PES:
+	default:
+		/* no alignment requirement */
+		return 1;
+	}
+
+}
+
+static u32 tspp_align_buffer_size_by_mode(u32 size, enum tspp_mode mode)
+{
+	u32 new_size;
+	u32 alignment;
+
+	switch (mode) {
+	case TSPP_MODE_RAW:
+		/* must be a multiple of 192 */
+		alignment = (TSPP_PACKET_LENGTH + 4);
+		break;
+
+	case TSPP_MODE_RAW_NO_SUFFIX:
+		/* must be a multiple of 188 */
+		alignment = TSPP_PACKET_LENGTH;
+		break;
+
+	case TSPP_MODE_DISABLED:
+	case TSPP_MODE_PES:
+	default:
+		/* no alignment requirement - give the user what he asks for */
+		alignment = 1;
+		break;
+	}
+	/* align up */
+	new_size = (((size + alignment - 1) / alignment) * alignment);
+	return new_size;
+}
+
+static void tspp_destroy_buffers(u32 channel_id, struct tspp_channel *channel)
+{
+	int i;
+	struct tspp_mem_buffer *pbuf, *temp;
+
+	pbuf = channel->data;
+	for (i = 0; i < channel->buffer_count; i++) {
+		if (pbuf->desc.phys_base) {
+			if (channel->memfree) {
+				channel->memfree(channel_id,
+					pbuf->desc.size,
+					pbuf->desc.virt_base,
+					pbuf->desc.phys_base,
+					channel->user_info);
+			} else {
+				if (!channel->dma_pool)
+					dma_free_coherent(
+						&channel->pdev->pdev->dev,
+						pbuf->desc.size,
+						pbuf->desc.virt_base,
+						pbuf->desc.phys_base);
+				else
+					dma_pool_free(channel->dma_pool,
+						pbuf->desc.virt_base,
+						pbuf->desc.phys_base);
+			}
+			pbuf->desc.phys_base = 0;
+		}
+		pbuf->desc.virt_base = 0;
+		pbuf->state = TSPP_BUF_STATE_EMPTY;
+		temp = pbuf;
+		pbuf = pbuf->next;
+		kfree(temp);
+	}
+}
+
+static int msm_tspp_req_irqs(struct tspp_device *device)
+{
+	int rc;
+	int i;
+	int j;
+
+	rc = request_irq(device->tspp_irq, tspp_isr, IRQF_SHARED,
+		dev_name(&device->pdev->dev), device);
+	if (rc) {
+		dev_err(&device->pdev->dev,
+			"failed to request TSPP IRQ %d : %d",
+			device->tspp_irq, rc);
+		return rc;
+	}
+
+	for (i = 0; i < TSPP_TSIF_INSTANCES; i++) {
+		rc = request_irq(device->tsif[i].tsif_irq,
+			tsif_isr, IRQF_SHARED, dev_name(&device->pdev->dev),
+			&device->tsif[i]);
+		if (rc) {
+			dev_err(&device->pdev->dev,
+				"failed to request TSIF%d IRQ: %d",
+				i, rc);
+			goto failed;
+		}
+	}
+	device->req_irqs = true;
+	return 0;
+
+failed:
+	free_irq(device->tspp_irq, device);
+	for (j = 0; j < i; j++)
+		free_irq(device->tsif[j].tsif_irq, device);
+
+	return rc;
+}
+
+static inline void msm_tspp_free_irqs(struct tspp_device *device)
+{
+	int i;
+
+	for (i = 0; i < TSPP_TSIF_INSTANCES; i++) {
+		if (device->tsif[i].tsif_irq)
+			free_irq(device->tsif[i].tsif_irq,  &device->tsif[i]);
+	}
+
+	if (device->tspp_irq)
+		free_irq(device->tspp_irq, device);
+	device->req_irqs = false;
+}
+
+/*** TSPP API functions ***/
+
+/**
+ * tspp_open_stream - open a TSPP stream for use.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @source: stream source parameters.
+ *
+ * Return  error status
+ *
+ */
+int tspp_open_stream(u32 dev, u32 channel_id,
+			struct tspp_select_source *source)
+{
+	u32 val;
+	int rc;
+	struct tspp_device *pdev;
+	struct tspp_channel *channel;
+	bool req_irqs = false;
+
+	TSPP_DEBUG("tspp_open_stream %i %i %i %i",
+		dev, channel_id, source->source, source->mode);
+
+	if (dev >= TSPP_MAX_DEVICES) {
+		pr_err("tspp: device id out of range");
+		return -ENODEV;
+	}
+
+	if (channel_id >= TSPP_NUM_CHANNELS) {
+		pr_err("tspp: channel id out of range");
+		return -ECHRNG;
+	}
+
+	pdev = tspp_find_by_id(dev);
+	if (!pdev) {
+		pr_err("tspp_str: can't find device %i", dev);
+		return -ENODEV;
+	}
+	channel = &pdev->channels[channel_id];
+	channel->src = source->source;
+	tspp_set_tsif_mode(channel, source->mode);
+	tspp_set_signal_inversion(channel, source->clk_inverse,
+			source->data_inverse, source->sync_inverse,
+			source->enable_inverse);
+
+	/* Request IRQ resources on first open */
+	if (!pdev->req_irqs && (source->source == TSPP_SOURCE_TSIF0 ||
+		source->source == TSPP_SOURCE_TSIF1)) {
+		rc = msm_tspp_req_irqs(pdev);
+		if (rc) {
+			pr_err("tspp: error requesting irqs\n");
+			return rc;
+		}
+		req_irqs = true;
+	}
+
+	switch (source->source) {
+	case TSPP_SOURCE_TSIF0:
+		if (tspp_config_gpios(pdev, channel->src, 1) != 0) {
+			rc = -EBUSY;
+			pr_err("tspp: error enabling tsif0 GPIOs\n");
+			goto free_irq;
+		}
+		/* make sure TSIF0 is running & enabled */
+		if (tspp_start_tsif(&pdev->tsif[0]) != 0) {
+			rc = -EBUSY;
+			pr_err("tspp: error starting tsif0");
+			goto free_irq;
+		}
+		if (pdev->tsif[0].ref_count == 1) {
+			val = readl_relaxed(pdev->base + TSPP_CONTROL);
+			writel_relaxed(val & ~TSPP_CONTROL_TSP_TSIF0_SRC_DIS,
+				pdev->base + TSPP_CONTROL);
+			/* Assure BAM TS PKT packet processing is enabled */
+			wmb();
+		}
+		break;
+	case TSPP_SOURCE_TSIF1:
+		if (tspp_config_gpios(pdev, channel->src, 1) != 0) {
+			rc = -EBUSY;
+			pr_err("tspp: error enabling tsif1 GPIOs\n");
+			goto free_irq;
+		}
+		/* make sure TSIF1 is running & enabled */
+		if (tspp_start_tsif(&pdev->tsif[1]) != 0) {
+			rc = -EBUSY;
+			pr_err("tspp: error starting tsif1");
+			goto free_irq;
+		}
+		if (pdev->tsif[1].ref_count == 1) {
+			val = readl_relaxed(pdev->base + TSPP_CONTROL);
+			writel_relaxed(val & ~TSPP_CONTROL_TSP_TSIF1_SRC_DIS,
+				pdev->base + TSPP_CONTROL);
+			/* Assure BAM TS PKT packet processing is enabled */
+			wmb();
+		}
+		break;
+	case TSPP_SOURCE_MEM:
+		break;
+	default:
+		pr_err("tspp: channel %i invalid source %i",
+			channel->id, source->source);
+		return -EBUSY;
+	}
+
+	return 0;
+
+free_irq:
+	/* Free irqs only if were requested during opening of this stream */
+	if (req_irqs)
+		msm_tspp_free_irqs(pdev);
+	return rc;
+}
+
+/**
+ * tspp_close_stream - close a TSPP stream.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ *
+ * Return  error status
+ *
+ */
+int tspp_close_stream(u32 dev, u32 channel_id)
+{
+	u32 val;
+	u32 prev_ref_count = 0;
+	struct tspp_device *pdev;
+	struct tspp_channel *channel;
+
+	if (channel_id >= TSPP_NUM_CHANNELS) {
+		pr_err("tspp: channel id out of range");
+		return -ECHRNG;
+	}
+	pdev = tspp_find_by_id(dev);
+	if (!pdev) {
+		pr_err("tspp_cs: can't find device %i", dev);
+		return -EBUSY;
+	}
+	channel = &pdev->channels[channel_id];
+
+	switch (channel->src) {
+	case TSPP_SOURCE_TSIF0:
+		prev_ref_count = pdev->tsif[0].ref_count;
+		tspp_stop_tsif(&pdev->tsif[0]);
+		if (tspp_config_gpios(pdev, channel->src, 0) != 0)
+			pr_err("tspp: error disabling tsif0 GPIOs\n");
+
+		if (prev_ref_count == 1) {
+			val = readl_relaxed(pdev->base + TSPP_CONTROL);
+			writel_relaxed(val | TSPP_CONTROL_TSP_TSIF0_SRC_DIS,
+				pdev->base + TSPP_CONTROL);
+			/* Assure BAM TS PKT packet processing is disabled */
+			wmb();
+		}
+		break;
+	case TSPP_SOURCE_TSIF1:
+		prev_ref_count = pdev->tsif[1].ref_count;
+		tspp_stop_tsif(&pdev->tsif[1]);
+		if (tspp_config_gpios(pdev, channel->src, 0) != 0)
+			pr_err("tspp: error disabling tsif0 GPIOs\n");
+
+		if (prev_ref_count == 1) {
+			val = readl_relaxed(pdev->base + TSPP_CONTROL);
+			writel_relaxed(val | TSPP_CONTROL_TSP_TSIF1_SRC_DIS,
+				pdev->base + TSPP_CONTROL);
+			/* Assure BAM TS PKT packet processing is disabled */
+			wmb();
+		}
+		break;
+	case TSPP_SOURCE_MEM:
+		break;
+	case TSPP_SOURCE_NONE:
+		break;
+	}
+
+	channel->src = TSPP_SOURCE_NONE;
+
+	/* Free requested interrupts to save power */
+	if ((pdev->tsif[0].ref_count + pdev->tsif[1].ref_count) == 0 &&
+		prev_ref_count)
+		msm_tspp_free_irqs(pdev);
+
+	return 0;
+}
+
+static int tspp_init_sps_device(struct tspp_device *dev)
+{
+	int ret;
+
+	ret = sps_register_bam_device(&dev->bam_props, &dev->bam_handle);
+	if (ret) {
+		pr_err("tspp: failed to register bam device, err-%d\n", ret);
+		return ret;
+	}
+
+	ret = sps_device_reset(dev->bam_handle);
+	if (ret) {
+		sps_deregister_bam_device(dev->bam_handle);
+		pr_err("tspp: error resetting bam device, err=%d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * tspp_open_channel - open a TSPP channel.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ *
+ * Return  error status
+ *
+ */
+int tspp_open_channel(u32 dev, u32 channel_id)
+{
+	int rc = 0;
+	struct sps_connect *config;
+	struct sps_register_event *event;
+	struct tspp_channel *channel;
+	struct tspp_device *pdev;
+
+	if (channel_id >= TSPP_NUM_CHANNELS) {
+		pr_err("tspp: channel id out of range");
+		return -ECHRNG;
+	}
+	pdev = tspp_find_by_id(dev);
+	if (!pdev) {
+		pr_err("tspp_oc: can't find device %i", dev);
+		return -ENODEV;
+	}
+	channel = &pdev->channels[channel_id];
+
+	if (channel->used) {
+		pr_err("tspp channel already in use");
+		return -EBUSY;
+	}
+
+	config = &channel->config;
+	event = &channel->event;
+
+	/* start the clocks if needed */
+	if (tspp_channels_in_use(pdev) == 0) {
+		rc = tspp_clock_start(pdev);
+		if (rc)
+			return rc;
+
+		if (pdev->bam_handle == SPS_DEV_HANDLE_INVALID) {
+			rc = tspp_init_sps_device(pdev);
+			if (rc) {
+				pr_err("tspp: failed to init sps device, err=%d\n",
+					rc);
+				tspp_clock_stop(pdev);
+				return rc;
+			}
+		}
+
+		__pm_stay_awake(&pdev->ws);
+	}
+
+	/* mark it as used */
+	channel->used = 1;
+
+	/* start the bam  */
+	channel->pipe = sps_alloc_endpoint();
+	if (channel->pipe == 0) {
+		pr_err("tspp: error allocating endpoint");
+		rc = -ENOMEM;
+		goto err_sps_alloc;
+	}
+
+	/* get default configuration */
+	sps_get_config(channel->pipe, config);
+
+	config->source = pdev->bam_handle;
+	config->destination = SPS_DEV_HANDLE_MEM;
+	config->mode = SPS_MODE_SRC;
+	config->options =
+		SPS_O_AUTO_ENABLE | /* connection is auto-enabled */
+		SPS_O_STREAMING | /* streaming mode */
+		SPS_O_DESC_DONE | /* interrupt on end of descriptor */
+		SPS_O_ACK_TRANSFERS | /* must use sps_get_iovec() */
+		SPS_O_HYBRID; /* Read actual descriptors in sps_get_iovec() */
+	config->src_pipe_index = channel->id;
+	config->desc.size =
+		TSPP_SPS_DESCRIPTOR_COUNT * SPS_DESCRIPTOR_SIZE;
+	config->desc.base = dma_alloc_coherent(&pdev->pdev->dev,
+						config->desc.size,
+						&config->desc.phys_base,
+						GFP_KERNEL);
+	if (config->desc.base == 0) {
+		pr_err("tspp: error allocating sps descriptors");
+		rc = -ENOMEM;
+		goto err_desc_alloc;
+	}
+
+	memset(config->desc.base, 0, config->desc.size);
+
+	rc = sps_connect(channel->pipe, config);
+	if (rc) {
+		pr_err("tspp: error connecting bam");
+		goto err_connect;
+	}
+
+	event->mode = SPS_TRIGGER_CALLBACK;
+	event->options = SPS_O_DESC_DONE;
+	event->callback = tspp_sps_complete_cb;
+	event->xfer_done = NULL;
+	event->user = pdev;
+
+	rc = sps_register_event(channel->pipe, event);
+	if (rc) {
+		pr_err("tspp: error registering event");
+		goto err_event;
+	}
+
+	init_timer(&channel->expiration_timer);
+	channel->expiration_timer.function = tspp_expiration_timer;
+	channel->expiration_timer.data = (unsigned long)pdev;
+	channel->expiration_timer.expires = 0xffffffffL;
+
+	rc = pm_runtime_get(&pdev->pdev->dev);
+	if (rc < 0) {
+		dev_err(&pdev->pdev->dev,
+			"Runtime PM: Unable to wake up tspp device, rc = %d",
+			rc);
+	}
+	return 0;
+
+err_event:
+	sps_disconnect(channel->pipe);
+err_connect:
+	dma_free_coherent(&pdev->pdev->dev, config->desc.size,
+		config->desc.base, config->desc.phys_base);
+err_desc_alloc:
+	sps_free_endpoint(channel->pipe);
+err_sps_alloc:
+	channel->used = 0;
+	return rc;
+}
+
+/**
+ * tspp_close_channel - close a TSPP channel.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ *
+ * Return  error status
+ *
+ */
+int tspp_close_channel(u32 dev, u32 channel_id)
+{
+	int i;
+	int id;
+	int table_idx;
+	u32 val;
+	unsigned long flags;
+
+	struct sps_connect *config;
+	struct tspp_device *pdev;
+	struct tspp_channel *channel;
+
+	if (channel_id >= TSPP_NUM_CHANNELS) {
+		pr_err("tspp: channel id out of range");
+		return -ECHRNG;
+	}
+	pdev = tspp_find_by_id(dev);
+	if (!pdev) {
+		pr_err("tspp_close: can't find device %i", dev);
+		return -ENODEV;
+	}
+	channel = &pdev->channels[channel_id];
+
+	/* if the channel is not used, we are done */
+	if (!channel->used)
+		return 0;
+
+	/*
+	 * Need to protect access to used and waiting fields, as they are
+	 * used by the tasklet which is invoked from interrupt context
+	 */
+	spin_lock_irqsave(&pdev->spinlock, flags);
+	channel->used = 0;
+	channel->waiting = NULL;
+	spin_unlock_irqrestore(&pdev->spinlock, flags);
+
+	if (channel->expiration_period_ms)
+		del_timer(&channel->expiration_timer);
+
+	channel->notifier = NULL;
+	channel->notify_data = NULL;
+	channel->expiration_period_ms = 0;
+
+	config = &channel->config;
+	pdev = channel->pdev;
+
+	/* disable pipe (channel) */
+	val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+	writel_relaxed(val | channel->id, pdev->base + TSPP_PS_DISABLE);
+	/* Assure PS_DISABLE register is set */
+	wmb();
+
+	/* unregister all filters for this channel */
+	for (table_idx = 0; table_idx < TSPP_FILTER_TABLES; table_idx++) {
+		for (i = 0; i < TSPP_NUM_PRIORITIES; i++) {
+			struct tspp_pid_filter *filter =
+				&pdev->filters[table_idx]->filter[i];
+			id = FILTER_GET_PIPE_NUMBER0(filter);
+			if (id == channel->id) {
+				if (FILTER_HAS_ENCRYPTION(filter))
+					tspp_free_key_entry(
+						FILTER_GET_KEY_NUMBER(filter));
+				filter->config = 0;
+				filter->filter = 0;
+			}
+		}
+	}
+	channel->filter_count = 0;
+
+	/* disconnect the bam */
+	if (sps_disconnect(channel->pipe) != 0)
+		pr_warn("tspp: Error freeing sps endpoint (%i)", channel->id);
+
+	/* destroy the buffers */
+	dma_free_coherent(&pdev->pdev->dev, config->desc.size,
+		config->desc.base, config->desc.phys_base);
+
+	sps_free_endpoint(channel->pipe);
+
+	tspp_destroy_buffers(channel_id, channel);
+
+	dma_pool_destroy(channel->dma_pool);
+	channel->dma_pool = NULL;
+
+	channel->src = TSPP_SOURCE_NONE;
+	channel->mode = TSPP_MODE_DISABLED;
+	channel->memfree = NULL;
+	channel->user_info = NULL;
+	channel->buffer_count = 0;
+	channel->data = NULL;
+	channel->read = NULL;
+	channel->locked = NULL;
+
+	if (tspp_channels_in_use(pdev) == 0) {
+		sps_deregister_bam_device(pdev->bam_handle);
+		pdev->bam_handle = SPS_DEV_HANDLE_INVALID;
+
+		__pm_relax(&pdev->ws);
+		tspp_clock_stop(pdev);
+	}
+
+	pm_runtime_put(&pdev->pdev->dev);
+
+	return 0;
+}
+
+/**
+ * tspp_get_ref_clk_counter - return the TSIF clock reference (TCR) counter.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @source: The TSIF source from which the counter should be read
+ * @tcr_counter: the value of TCR counter
+ *
+ * Return  error status
+ *
+ * TCR increments at a rate equal to 27 MHz/256 = 105.47 kHz.
+ * If source is neither TSIF 0 or TSIF1 0 is returned.
+ */
+int tspp_get_ref_clk_counter(u32 dev, enum tspp_source source, u32 *tcr_counter)
+{
+	struct tspp_device *pdev;
+	struct tspp_tsif_device *tsif_device;
+
+	if (!tcr_counter)
+		return -EINVAL;
+
+	pdev = tspp_find_by_id(dev);
+	if (!pdev) {
+		pr_err("tspp_get_ref_clk_counter: can't find device %i\n", dev);
+		return -ENODEV;
+	}
+
+	switch (source) {
+	case TSPP_SOURCE_TSIF0:
+		tsif_device = &pdev->tsif[0];
+		break;
+
+	case TSPP_SOURCE_TSIF1:
+		tsif_device = &pdev->tsif[1];
+		break;
+
+	default:
+		tsif_device = NULL;
+		break;
+	}
+
+	if (tsif_device && tsif_device->ref_count)
+		*tcr_counter = ioread32(tsif_device->base + TSIF_CLK_REF_OFF);
+	else
+		*tcr_counter = 0;
+
+	return 0;
+}
+
+/**
+ * tspp_get_lpass_time_counter - return the LPASS  Timer counter value.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @source: The TSIF source from which the counter should be read
+ * @tcr_counter: the value of TCR counter
+ *
+ * Return  error status
+ *
+ * If source is neither TSIF 0 or TSIF1 0 is returned.
+ */
+int tspp_get_lpass_time_counter(u32 dev, enum tspp_source source,
+			u64 *lpass_time_counter)
+{
+	struct tspp_device *pdev;
+	struct tspp_tsif_device *tsif_device;
+
+	if (!lpass_time_counter)
+		return -EINVAL;
+
+	pdev = tspp_find_by_id(dev);
+	if (!pdev) {
+		pr_err("tspp_get_lpass_time_counter: can't find device %i\n",
+		       dev);
+		return -ENODEV;
+	}
+
+	switch (source) {
+	case TSPP_SOURCE_TSIF0:
+		tsif_device = &pdev->tsif[0];
+		break;
+
+	case TSPP_SOURCE_TSIF1:
+		tsif_device = &pdev->tsif[1];
+		break;
+
+	default:
+		tsif_device = NULL;
+		break;
+	}
+
+	if (tsif_device && tsif_device->ref_count) {
+		if (avcs_core_query_timer(lpass_time_counter) < 0) {
+			pr_err("tspp_get_lpass_time_counter: read error\n");
+			*lpass_time_counter = 0;
+			return -ENETRESET;
+		}
+	} else
+		*lpass_time_counter = 0;
+
+	return 0;
+}
+
+/**
+ * tspp_get_tts_source - Return the TTS source value.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @tts_source:Updated TTS source type
+ *
+ * Return  error status
+ *
+ */
+int tspp_get_tts_source(u32 dev, int *tts_source)
+{
+	struct tspp_device *pdev;
+
+	if (tts_source == NULL)
+		return -EINVAL;
+
+	pdev = tspp_find_by_id(dev);
+	if (!pdev) {
+		pr_err("tspp_get_tts_source: can't find device %i\n",
+		       dev);
+		return -ENODEV;
+	}
+
+	*tts_source = pdev->tts_source;
+
+	return 0;
+}
+
+/**
+ * tspp_add_filter - add a TSPP filter to a channel.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @filter: TSPP filter parameters
+ *
+ * Return  error status
+ *
+ */
+int tspp_add_filter(u32 dev, u32 channel_id,
+	struct tspp_filter *filter)
+{
+	int i, rc;
+	int other_channel;
+	int entry;
+	u32 val, pid, enabled;
+	struct tspp_device *pdev;
+	struct tspp_pid_filter p;
+	struct tspp_channel *channel;
+
+	TSPP_DEBUG("tspp: add filter");
+	if (channel_id >= TSPP_NUM_CHANNELS) {
+		pr_err("tspp: channel id out of range");
+		return -ECHRNG;
+	}
+	pdev = tspp_find_by_id(dev);
+	if (!pdev) {
+		pr_err("tspp_add: can't find device %i", dev);
+		return -ENODEV;
+	}
+
+	channel = &pdev->channels[channel_id];
+
+	if (filter->source > TSPP_SOURCE_MEM) {
+		pr_err("tspp invalid source");
+		return -ENOSR;
+	}
+
+	if (filter->priority >= TSPP_NUM_PRIORITIES) {
+		pr_err("tspp invalid filter priority");
+		return -ENOSR;
+	}
+
+	channel->mode = filter->mode;
+	/*
+	 * if buffers are already allocated, verify they fulfil
+	 * the alignment requirements.
+	 */
+	if ((channel->buffer_count > 0) &&
+	   (!tspp_is_buffer_size_aligned(channel->buffer_size, channel->mode)))
+		pr_warn("tspp: buffers allocated with incorrect alignment\n");
+
+	if (filter->mode == TSPP_MODE_PES) {
+		for (i = 0; i < TSPP_NUM_PRIORITIES; i++) {
+			struct tspp_pid_filter *tspp_filter =
+				&pdev->filters[channel->src]->filter[i];
+			pid = FILTER_GET_PIPE_PID((tspp_filter));
+			enabled = FILTER_GET_PIPE_PROCESS0(tspp_filter);
+			if (enabled && (pid == filter->pid)) {
+				other_channel =
+					FILTER_GET_PIPE_NUMBER0(tspp_filter);
+				pr_err("tspp: pid 0x%x already in use by channel %i",
+					filter->pid, other_channel);
+				return -EBADSLT;
+			}
+		}
+	}
+
+	/* make sure this priority is not already in use */
+	enabled = FILTER_GET_PIPE_PROCESS0(
+		(&(pdev->filters[channel->src]->filter[filter->priority])));
+	if (enabled) {
+		pr_err("tspp: filter priority %i source %i is already enabled\n",
+			filter->priority, channel->src);
+		return -ENOSR;
+	}
+
+	if (channel->mode == TSPP_MODE_PES) {
+		/*
+		 * if we are already processing in PES mode, disable pipe
+		 * (channel) and filter to be updated
+		 */
+		val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+		writel_relaxed(val | (1 << channel->id),
+			pdev->base + TSPP_PS_DISABLE);
+		/* Assure PS_DISABLE register is set */
+		wmb();
+	}
+
+	/* update entry */
+	p.filter = 0;
+	p.config = FILTER_TRANS_END_DISABLE;
+	FILTER_SET_PIPE_PROCESS0((&p), filter->mode);
+	FILTER_SET_PIPE_PID((&p), filter->pid);
+	FILTER_SET_PID_MASK((&p), filter->mask);
+	FILTER_SET_PIPE_NUMBER0((&p), channel->id);
+	FILTER_SET_PIPE_PROCESS1((&p), TSPP_MODE_DISABLED);
+	if (filter->decrypt) {
+		entry = tspp_get_key_entry();
+		if (entry == -1) {
+			pr_err("tspp: no more keys available!");
+		} else {
+			p.config |= FILTER_DECRYPT;
+			FILTER_SET_KEY_NUMBER((&p), entry);
+		}
+	}
+
+	pdev->filters[channel->src]->
+		filter[filter->priority].config = p.config;
+	pdev->filters[channel->src]->
+		filter[filter->priority].filter = p.filter;
+
+	/*
+	 * allocate buffers if needed (i.e. if user did has not already called
+	 * tspp_allocate_buffers() explicitly).
+	 */
+	if (channel->buffer_count == 0) {
+		channel->buffer_size =
+		tspp_align_buffer_size_by_mode(channel->buffer_size,
+							channel->mode);
+		rc = tspp_allocate_buffers(dev, channel->id,
+					channel->max_buffers,
+					channel->buffer_size,
+					channel->int_freq, NULL, NULL, NULL);
+		if (rc != 0) {
+			pr_err("tspp: tspp_allocate_buffers failed\n");
+			return rc;
+		}
+	}
+
+	/* reenable pipe */
+	val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+	writel_relaxed(val & ~(1 << channel->id), pdev->base + TSPP_PS_DISABLE);
+	/* Assure PS_DISABLE register is reset */
+	wmb();
+	val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+
+	channel->filter_count++;
+
+	return 0;
+}
+
+/**
+ * tspp_remove_filter - remove a TSPP filter from a channel.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @filter: TSPP filter parameters
+ *
+ * Return  error status
+ *
+ */
+int tspp_remove_filter(u32 dev, u32 channel_id,
+	struct tspp_filter *filter)
+{
+	int entry;
+	u32 val;
+	struct tspp_device *pdev;
+	int src;
+	struct tspp_pid_filter *tspp_filter;
+	struct tspp_channel *channel;
+
+	if (channel_id >= TSPP_NUM_CHANNELS) {
+		pr_err("tspp: channel id out of range");
+		return -ECHRNG;
+	}
+	if (!filter) {
+		pr_err("tspp: NULL filter pointer");
+		return -EINVAL;
+	}
+	pdev = tspp_find_by_id(dev);
+	if (!pdev) {
+		pr_err("tspp_remove: can't find device %i", dev);
+		return -ENODEV;
+	}
+	if (filter->priority >= TSPP_NUM_PRIORITIES) {
+		pr_err("tspp invalid filter priority");
+		return -ENOSR;
+	}
+	channel = &pdev->channels[channel_id];
+
+	src = channel->src;
+	if ((src == TSPP_SOURCE_TSIF0) || (src == TSPP_SOURCE_TSIF1))
+		tspp_filter = &(pdev->filters[src]->filter[filter->priority]);
+	else {
+		pr_err("tspp_remove: wrong source type %d", src);
+		return -EINVAL;
+	}
+
+
+	/* disable pipe (channel) */
+	val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+	writel_relaxed(val | channel->id, pdev->base + TSPP_PS_DISABLE);
+	/* Assure PS_DISABLE register is set */
+	wmb();
+
+	/* update data keys */
+	if (tspp_filter->config & FILTER_DECRYPT) {
+		entry = FILTER_GET_KEY_NUMBER(tspp_filter);
+		tspp_free_key_entry(entry);
+	}
+
+	/* update pid table */
+	tspp_filter->config = 0;
+	tspp_filter->filter = 0;
+
+	channel->filter_count--;
+
+	/* reenable pipe */
+	val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+	writel_relaxed(val & ~(1 << channel->id),
+		pdev->base + TSPP_PS_DISABLE);
+	/* Assure PS_DISABLE register is reset */
+	wmb();
+	val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+
+	return 0;
+}
+
+/**
+ * tspp_set_key - set TSPP key in key table.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @key: TSPP key parameters
+ *
+ * Return  error status
+ *
+ */
+int tspp_set_key(u32 dev, u32 channel_id, struct tspp_key *key)
+{
+	int i;
+	int id;
+	int key_index;
+	int data;
+	struct tspp_channel *channel;
+	struct tspp_device *pdev;
+
+	if (channel_id >= TSPP_NUM_CHANNELS) {
+		pr_err("tspp: channel id out of range");
+		return -ECHRNG;
+	}
+	pdev = tspp_find_by_id(dev);
+	if (!pdev) {
+		pr_err("tspp_set: can't find device %i", dev);
+		return -ENODEV;
+	}
+	channel = &pdev->channels[channel_id];
+
+	/* read the key index used by this channel */
+	for (i = 0; i < TSPP_NUM_PRIORITIES; i++) {
+		struct tspp_pid_filter *tspp_filter =
+			&(pdev->filters[channel->src]->filter[i]);
+		id = FILTER_GET_PIPE_NUMBER0(tspp_filter);
+		if (id == channel->id) {
+			if (FILTER_HAS_ENCRYPTION(tspp_filter)) {
+				key_index = FILTER_GET_KEY_NUMBER(tspp_filter);
+				break;
+			}
+		}
+	}
+	if (i == TSPP_NUM_PRIORITIES) {
+		pr_err("tspp: no encryption on this channel");
+		return -ENOKEY;
+	}
+
+	if (key->parity == TSPP_KEY_PARITY_EVEN) {
+		pdev->tspp_key_table->entry[key_index].even_lsb = key->lsb;
+		pdev->tspp_key_table->entry[key_index].even_msb = key->msb;
+	} else {
+		pdev->tspp_key_table->entry[key_index].odd_lsb = key->lsb;
+		pdev->tspp_key_table->entry[key_index].odd_msb = key->msb;
+	}
+	data = readl_relaxed(channel->pdev->base + TSPP_KEY_VALID);
+
+	return 0;
+}
+
+/**
+ * tspp_register_notification - register TSPP channel notification function.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @notify: notification function
+ * @userdata: user data to pass to notification function
+ * @timer_ms: notification for partially filled buffers
+ *
+ * Return  error status
+ *
+ */
+int tspp_register_notification(u32 dev, u32 channel_id,
+	tspp_notifier *notify, void *userdata, u32 timer_ms)
+{
+	struct tspp_channel *channel;
+	struct tspp_device *pdev;
+
+	if (channel_id >= TSPP_NUM_CHANNELS) {
+		pr_err("tspp: channel id out of range");
+		return -ECHRNG;
+	}
+	pdev = tspp_find_by_id(dev);
+	if (!pdev) {
+		pr_err("tspp_reg: can't find device %i", dev);
+		return -ENODEV;
+	}
+	channel = &pdev->channels[channel_id];
+	channel->notifier = notify;
+	channel->notify_data = userdata;
+	channel->expiration_period_ms = timer_ms;
+
+	return 0;
+}
+
+/**
+ * tspp_unregister_notification - unregister TSPP channel notification function.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ *
+ * Return  error status
+ *
+ */
+int tspp_unregister_notification(u32 dev, u32 channel_id)
+{
+	struct tspp_channel *channel;
+	struct tspp_device *pdev;
+
+	if (channel_id >= TSPP_NUM_CHANNELS) {
+		pr_err("tspp: channel id out of range");
+		return -ECHRNG;
+	}
+	pdev = tspp_find_by_id(dev);
+	if (!pdev) {
+		pr_err("tspp_unreg: can't find device %i", dev);
+		return -ENODEV;
+	}
+	channel = &pdev->channels[channel_id];
+	channel->notifier = NULL;
+	channel->notify_data = 0;
+	return 0;
+}
+
+/**
+ * tspp_get_buffer - get TSPP data buffer.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ *
+ * Return  error status
+ *
+ */
+const struct tspp_data_descriptor *tspp_get_buffer(u32 dev, u32 channel_id)
+{
+	struct tspp_mem_buffer *buffer;
+	struct tspp_channel *channel;
+	struct tspp_device *pdev;
+	unsigned long flags;
+
+	if (channel_id >= TSPP_NUM_CHANNELS) {
+		pr_err("tspp: channel id out of range");
+		return NULL;
+	}
+	pdev = tspp_find_by_id(dev);
+	if (!pdev) {
+		pr_err("tspp_get: can't find device %i", dev);
+		return NULL;
+	}
+
+	spin_lock_irqsave(&pdev->spinlock, flags);
+
+	channel = &pdev->channels[channel_id];
+
+	if (!channel->read) {
+		spin_unlock_irqrestore(&pdev->spinlock, flags);
+		pr_warn("tspp: no buffer to get on channel %i!",
+			channel->id);
+		return NULL;
+	}
+
+	buffer = channel->read;
+	/* see if we have any buffers ready to read */
+	if (buffer->state != TSPP_BUF_STATE_DATA) {
+		spin_unlock_irqrestore(&pdev->spinlock, flags);
+		return NULL;
+	}
+
+	if (buffer->state == TSPP_BUF_STATE_DATA) {
+		/* mark the buffer as busy */
+		buffer->state = TSPP_BUF_STATE_LOCKED;
+
+		/* increment the pointer along the list */
+		channel->read = channel->read->next;
+	}
+
+	spin_unlock_irqrestore(&pdev->spinlock, flags);
+
+	return &buffer->desc;
+}
+
+/**
+ * tspp_release_buffer - release TSPP data buffer back to TSPP.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @descriptor_id: buffer descriptor ID
+ *
+ * Return  error status
+ *
+ */
+int tspp_release_buffer(u32 dev, u32 channel_id, u32 descriptor_id)
+{
+	int i, found = 0;
+	struct tspp_mem_buffer *buffer;
+	struct tspp_channel *channel;
+	struct tspp_device *pdev;
+	unsigned long flags;
+
+	if (channel_id >= TSPP_NUM_CHANNELS) {
+		pr_err("tspp: channel id out of range");
+		return -ECHRNG;
+	}
+	pdev = tspp_find_by_id(dev);
+	if (!pdev) {
+		pr_err("tspp: can't find device %i", dev);
+		return -ENODEV;
+	}
+
+	spin_lock_irqsave(&pdev->spinlock, flags);
+
+	channel = &pdev->channels[channel_id];
+
+	if (descriptor_id > channel->buffer_count)
+		pr_warn("tspp: desc id looks weird 0x%08x", descriptor_id);
+
+	/* find the correct descriptor */
+	buffer = channel->locked;
+	for (i = 0; i < channel->buffer_count; i++) {
+		if (buffer->desc.id == descriptor_id) {
+			found = 1;
+			break;
+		}
+		buffer = buffer->next;
+	}
+	channel->locked = channel->locked->next;
+
+	if (!found) {
+		spin_unlock_irqrestore(&pdev->spinlock, flags);
+		pr_err("tspp: cant find desc %i", descriptor_id);
+		return -EINVAL;
+	}
+
+	/* make sure the buffer is in the expected state */
+	if (buffer->state != TSPP_BUF_STATE_LOCKED) {
+		spin_unlock_irqrestore(&pdev->spinlock, flags);
+		pr_err("tspp: buffer %i not locked", descriptor_id);
+		return -EINVAL;
+	}
+	/* unlock the buffer and requeue it */
+	buffer->state = TSPP_BUF_STATE_WAITING;
+
+	if (tspp_queue_buffer(channel, buffer))
+		pr_warn("tspp: can't requeue buffer");
+
+	spin_unlock_irqrestore(&pdev->spinlock, flags);
+
+	return 0;
+}
+
+/**
+ * tspp_allocate_buffers - allocate TSPP data buffers.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @count: number of buffers to allocate
+ * @size: size of each buffer to allocate
+ * @int_freq: interrupt frequency
+ * @alloc: user defined memory allocator function. Pass NULL for default.
+ * @memfree: user defined memory free function. Pass NULL for default.
+ * @user: user data to pass to the memory allocator/free function
+ *
+ * Return  error status
+ *
+ * The user can optionally call this function explicitly to allocate the TSPP
+ * data buffers. Alternatively, if the user did not call this function, it
+ * is called implicitly by tspp_add_filter().
+ */
+int tspp_allocate_buffers(u32 dev, u32 channel_id, u32 count, u32 size,
+			u32 int_freq, tspp_allocator *alloc,
+			tspp_memfree *memfree, void *user)
+{
+	struct tspp_channel *channel;
+	struct tspp_device *pdev;
+	struct tspp_mem_buffer *last = NULL;
+
+	TSPP_DEBUG("tspp_allocate_buffers");
+
+	if (channel_id >= TSPP_NUM_CHANNELS) {
+		pr_err("%s: channel id out of range", __func__);
+		return -ECHRNG;
+	}
+
+	pdev = tspp_find_by_id(dev);
+	if (!pdev) {
+		pr_err("%s: can't find device %i", __func__, dev);
+		return -ENODEV;
+	}
+
+	if (count < MIN_ACCEPTABLE_BUFFER_COUNT) {
+		pr_err("%s: tspp requires a minimum of %i buffers\n",
+			__func__, MIN_ACCEPTABLE_BUFFER_COUNT);
+		return -EINVAL;
+	}
+
+	if (count > TSPP_NUM_BUFFERS) {
+		pr_err("%s: tspp requires a maximum of %i buffers\n",
+			__func__, TSPP_NUM_BUFFERS);
+		return -EINVAL;
+	}
+
+	channel = &pdev->channels[channel_id];
+
+	/* allow buffer allocation only if there was no previous buffer
+	 * allocation for this channel.
+	 */
+	if (channel->buffer_count > 0) {
+		pr_err("%s: buffers already allocated for channel %u",
+			__func__, channel_id);
+		return -EINVAL;
+	}
+
+	channel->max_buffers = count;
+
+	/* set up interrupt frequency */
+	if (int_freq > channel->max_buffers) {
+		int_freq = channel->max_buffers;
+		pr_warn("%s: setting interrupt frequency to %u\n",
+			__func__, int_freq);
+	}
+	channel->int_freq = int_freq;
+	/*
+	 * it is the responsibility of the caller to tspp_allocate_buffers(),
+	 * whether it's the user or the driver, to make sure the size parameter
+	 * is compatible to the channel mode.
+	 */
+	channel->buffer_size = size;
+
+	/* save user defined memory free function for later use */
+	channel->memfree = memfree;
+	channel->user_info = user;
+
+	/*
+	 * For small buffers, create a DMA pool so that memory
+	 * is not wasted through dma_alloc_coherent.
+	 */
+	if (TSPP_USE_DMA_POOL(channel->buffer_size)) {
+		channel->dma_pool = dma_pool_create("tspp",
+			&pdev->pdev->dev, channel->buffer_size, 0, 0);
+		if (!channel->dma_pool) {
+			pr_err("%s: Can't allocate memory pool\n", __func__);
+			return -ENOMEM;
+		}
+	} else {
+		channel->dma_pool = NULL;
+	}
+
+
+	for (channel->buffer_count = 0;
+		channel->buffer_count < channel->max_buffers;
+		channel->buffer_count++) {
+
+		/* allocate the descriptor */
+		struct tspp_mem_buffer *desc = (struct tspp_mem_buffer *)
+			kmalloc(sizeof(struct tspp_mem_buffer), GFP_KERNEL);
+		if (!desc) {
+			pr_warn("%s: Can't allocate desc %i",
+				__func__, channel->buffer_count);
+			break;
+		}
+
+		desc->desc.id = channel->buffer_count;
+		/* allocate the buffer */
+		if (tspp_alloc_buffer(channel_id, &desc->desc,
+			channel->buffer_size, channel->dma_pool,
+			alloc, user) != 0) {
+			kfree(desc);
+			pr_warn("%s: Can't allocate buffer %i",
+				__func__, channel->buffer_count);
+			break;
+		}
+
+		/* add the descriptor to the list */
+		desc->filled = 0;
+		desc->read_index = 0;
+		if (!channel->data) {
+			channel->data = desc;
+			desc->next = channel->data;
+		} else {
+			if (last != NULL)
+				last->next = desc;
+		}
+		last = desc;
+		desc->next = channel->data;
+
+		/* prepare the sps descriptor */
+		desc->sps.phys_base = desc->desc.phys_base;
+		desc->sps.base = desc->desc.virt_base;
+		desc->sps.size = desc->desc.size;
+
+		/* start the transfer */
+		if (tspp_queue_buffer(channel, desc))
+			pr_err("%s: can't queue buffer %i",
+				__func__, desc->desc.id);
+	}
+
+	if (channel->buffer_count < channel->max_buffers) {
+		/*
+		 * we failed to allocate the requested number of buffers.
+		 * we don't allow a partial success, so need to clean up here.
+		 */
+		tspp_destroy_buffers(channel_id, channel);
+		channel->buffer_count = 0;
+
+		dma_pool_destroy(channel->dma_pool);
+		channel->dma_pool = NULL;
+		return -ENOMEM;
+	}
+
+	channel->waiting = channel->data;
+	channel->read = channel->data;
+	channel->locked = channel->data;
+
+	/* Now that buffers are scheduled to HW, kick data expiration timer */
+	if (channel->expiration_period_ms)
+		mod_timer(&channel->expiration_timer,
+			jiffies +
+			MSEC_TO_JIFFIES(
+				channel->expiration_period_ms));
+
+	return 0;
+}
+
+/*** debugfs ***/
+static int debugfs_iomem_x32_set(void *data, u64 val)
+{
+	int rc;
+	int clock_started = 0;
+	struct tspp_device *pdev;
+
+	pdev = tspp_find_by_id(0);
+	if (!pdev) {
+		pr_err("%s: can't find device 0\n", __func__);
+		return 0;
+	}
+
+	if (tspp_channels_in_use(pdev) == 0) {
+		rc = tspp_clock_start(pdev);
+		if (rc) {
+			pr_err("%s: tspp_clock_start failed %d\n",
+				__func__, rc);
+			return 0;
+		}
+		clock_started = 1;
+	}
+
+	writel_relaxed(val, data);
+	/* Assure register write */
+	wmb();
+
+	if (clock_started)
+		tspp_clock_stop(pdev);
+	return 0;
+}
+
+static int debugfs_iomem_x32_get(void *data, u64 *val)
+{
+	int rc;
+	int clock_started = 0;
+	struct tspp_device *pdev;
+
+	pdev = tspp_find_by_id(0);
+	if (!pdev) {
+		pr_err("%s: can't find device 0\n", __func__);
+		*val = 0;
+		return 0;
+	}
+
+	if (tspp_channels_in_use(pdev) == 0) {
+		rc = tspp_clock_start(pdev);
+		if (rc) {
+			pr_err("%s: tspp_clock_start failed %d\n",
+				__func__, rc);
+			*val = 0;
+			return 0;
+		}
+		clock_started = 1;
+	}
+
+	*val = readl_relaxed(data);
+
+	if (clock_started)
+		tspp_clock_stop(pdev);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
+			debugfs_iomem_x32_set, "0x%08llx");
+
+static void tsif_debugfs_init(struct tspp_tsif_device *tsif_device,
+	int instance)
+{
+	char name[10];
+
+	snprintf(name, 10, "tsif%i", instance);
+	tsif_device->dent_tsif = debugfs_create_dir(
+	      name, NULL);
+	if (tsif_device->dent_tsif) {
+		int i;
+		void __iomem *base = tsif_device->base;
+
+		for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++) {
+			tsif_device->debugfs_tsif_regs[i] =
+			   debugfs_create_file(
+				debugfs_tsif_regs[i].name,
+				debugfs_tsif_regs[i].mode,
+				tsif_device->dent_tsif,
+				base + debugfs_tsif_regs[i].offset,
+				&fops_iomem_x32);
+		}
+
+		debugfs_create_u32(
+			"stat_rx_chunks",
+			S_IRUGO | S_IWUSR | S_IWGRP,
+			tsif_device->dent_tsif,
+			&tsif_device->stat_rx);
+
+		debugfs_create_u32(
+			"stat_overflow",
+			S_IRUGO | S_IWUSR | S_IWGRP,
+			tsif_device->dent_tsif,
+			&tsif_device->stat_overflow);
+
+		debugfs_create_u32(
+			"stat_lost_sync",
+			S_IRUGO | S_IWUSR | S_IWGRP,
+			tsif_device->dent_tsif,
+			&tsif_device->stat_lost_sync);
+
+		debugfs_create_u32(
+			"stat_timeout",
+			S_IRUGO | S_IWUSR | S_IWGRP,
+			tsif_device->dent_tsif,
+			&tsif_device->stat_timeout);
+	}
+}
+
+static void tsif_debugfs_exit(struct tspp_tsif_device *tsif_device)
+{
+	int i;
+
+	debugfs_remove_recursive(tsif_device->dent_tsif);
+	tsif_device->dent_tsif = NULL;
+	for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++)
+		tsif_device->debugfs_tsif_regs[i] = NULL;
+}
+
+static void tspp_debugfs_init(struct tspp_device *device, int instance)
+{
+	char name[10];
+
+	snprintf(name, 10, "tspp%i", instance);
+	device->dent = debugfs_create_dir(
+	      name, NULL);
+	if (device->dent) {
+		int i;
+		void __iomem *base = device->base;
+
+		for (i = 0; i < ARRAY_SIZE(debugfs_tspp_regs); i++)
+			device->debugfs_regs[i] =
+			   debugfs_create_file(
+				debugfs_tspp_regs[i].name,
+				debugfs_tspp_regs[i].mode,
+				device->dent,
+				base + debugfs_tspp_regs[i].offset,
+				&fops_iomem_x32);
+	}
+}
+
+static void tspp_debugfs_exit(struct tspp_device *device)
+{
+	int i;
+
+	debugfs_remove_recursive(device->dent);
+	for (i = 0; i < ARRAY_SIZE(debugfs_tspp_regs); i++)
+		device->debugfs_regs[i] = NULL;
+}
+
+static int msm_tspp_map_irqs(struct platform_device *pdev,
+				struct tspp_device *device)
+{
+	int rc;
+
+	/* get IRQ numbers from platform information */
+
+	/* map TSPP IRQ */
+	rc = platform_get_irq_byname(pdev, "TSIF_TSPP_IRQ");
+	if (rc > 0) {
+		device->tspp_irq = rc;
+	} else {
+		dev_err(&pdev->dev, "failed to get TSPP IRQ");
+		return -EINVAL;
+	}
+
+	/* map TSIF IRQs */
+	rc = platform_get_irq_byname(pdev, "TSIF0_IRQ");
+	if (rc > 0) {
+		device->tsif[0].tsif_irq = rc;
+	} else {
+		dev_err(&pdev->dev, "failed to get TSIF0 IRQ");
+		return -EINVAL;
+	}
+
+	rc = platform_get_irq_byname(pdev, "TSIF1_IRQ");
+	if (rc > 0) {
+		device->tsif[1].tsif_irq = rc;
+	} else {
+		dev_err(&pdev->dev, "failed to get TSIF1 IRQ");
+		return -EINVAL;
+	}
+
+	/* map BAM IRQ */
+	rc = platform_get_irq_byname(pdev, "TSIF_BAM_IRQ");
+	if (rc > 0) {
+		device->bam_irq = rc;
+	} else {
+		dev_err(&pdev->dev, "failed to get TSPP BAM IRQ");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int msm_tspp_probe(struct platform_device *pdev)
+{
+	int rc = -ENODEV;
+	u32 version;
+	u32 i;
+	struct tspp_device *device;
+	struct device_node *child;
+	struct resource *mem_tsif0;
+	struct resource *mem_tsif1;
+	struct resource *mem_tspp;
+	struct resource *mem_bam;
+	struct msm_bus_scale_pdata *tspp_bus_pdata = NULL;
+	unsigned long rate;
+
+	if (pdev->dev.of_node) {
+		/* ID is always 0 since there is only 1 instance of TSPP */
+		pdev->id = 0;
+		tspp_bus_pdata = msm_bus_cl_get_pdata(pdev);
+	} else {
+		/* must have device tree data */
+		pr_err("tspp: Device tree data not available\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	/* OK, we will use this device */
+	device = kzalloc(sizeof(struct tspp_device), GFP_KERNEL);
+	if (!device) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	/* set up references */
+	device->pdev = pdev;
+	platform_set_drvdata(pdev, device);
+
+	/* setup pin control */
+	rc = tspp_get_pinctrl(device);
+	if (rc) {
+		pr_err("tspp: failed to get pin control data, rc=%d\n", rc);
+		goto err_pinctrl;
+	}
+
+	/* register bus client */
+	if (tspp_bus_pdata) {
+		device->tsif_bus_client =
+			msm_bus_scale_register_client(tspp_bus_pdata);
+		if (!device->tsif_bus_client)
+			pr_err("tspp: Unable to register bus client\n");
+	} else {
+		device->tsif_bus_client = 0;
+	}
+
+	/* map regulators */
+	device->tsif_vreg = devm_regulator_get_optional(&pdev->dev, "vdd_cx");
+	if (IS_ERR(device->tsif_vreg)) {
+		rc = PTR_ERR(device->tsif_vreg);
+		device->tsif_vreg = NULL;
+		if (rc == -ENODEV) {
+			pr_notice("%s: vdd_cx regulator will not be used\n",
+				__func__);
+		} else {
+			dev_err(&pdev->dev,
+				"failed to get CX regulator, err=%d\n", rc);
+			goto err_regulator;
+		}
+	} else {
+		/* Set an initial voltage and enable the regulator */
+		rc = regulator_set_voltage(device->tsif_vreg,
+					RPM_REGULATOR_CORNER_NONE,
+					RPM_REGULATOR_CORNER_SUPER_TURBO);
+		if (rc) {
+			dev_err(&pdev->dev, "Unable to set CX voltage.\n");
+			goto err_regulator;
+		}
+
+		rc = regulator_enable(device->tsif_vreg);
+		if (rc) {
+			dev_err(&pdev->dev, "Unable to enable CX regulator.\n");
+			goto err_regulator;
+		}
+	}
+
+	/* map clocks */
+	device->tsif_pclk = clk_get(&pdev->dev, "iface_clk");
+	if (IS_ERR(device->tsif_pclk)) {
+		rc = PTR_ERR(device->tsif_pclk);
+		device->tsif_pclk = NULL;
+		goto err_pclock;
+	}
+
+	device->tsif_ref_clk = clk_get(&pdev->dev, "ref_clk");
+	if (IS_ERR(device->tsif_ref_clk)) {
+		rc = PTR_ERR(device->tsif_ref_clk);
+		device->tsif_ref_clk = NULL;
+		goto err_refclock;
+	}
+	rate = clk_round_rate(device->tsif_ref_clk, 1);
+	rc = clk_set_rate(device->tsif_ref_clk, rate);
+	if (rc)
+		goto err_res_tsif0;
+
+	/* map I/O memory */
+	mem_tsif0 = platform_get_resource_byname(pdev,
+				IORESOURCE_MEM, "MSM_TSIF0_PHYS");
+	if (!mem_tsif0) {
+		pr_err("tspp: Missing tsif0 MEM resource\n");
+		rc = -ENXIO;
+		goto err_res_tsif0;
+	}
+	device->tsif[0].base = ioremap(mem_tsif0->start,
+		resource_size(mem_tsif0));
+	if (!device->tsif[0].base) {
+		pr_err("tspp: ioremap failed\n");
+		goto err_map_tsif0;
+	}
+
+	mem_tsif1 = platform_get_resource_byname(pdev,
+				IORESOURCE_MEM, "MSM_TSIF1_PHYS");
+	if (!mem_tsif1) {
+		dev_err(&pdev->dev, "Missing tsif1 MEM resource\n");
+		rc = -ENXIO;
+		goto err_res_tsif1;
+	}
+	device->tsif[1].base = ioremap(mem_tsif1->start,
+		resource_size(mem_tsif1));
+	if (!device->tsif[1].base) {
+		dev_err(&pdev->dev, "ioremap failed");
+		goto err_map_tsif1;
+	}
+
+	mem_tspp = platform_get_resource_byname(pdev,
+				IORESOURCE_MEM, "MSM_TSPP_PHYS");
+	if (!mem_tspp) {
+		dev_err(&pdev->dev, "Missing MEM resource");
+		rc = -ENXIO;
+		goto err_res_dev;
+	}
+	device->base = ioremap(mem_tspp->start, resource_size(mem_tspp));
+	if (!device->base) {
+		dev_err(&pdev->dev, "ioremap failed");
+		goto err_map_dev;
+	}
+
+	mem_bam = platform_get_resource_byname(pdev,
+				IORESOURCE_MEM, "MSM_TSPP_BAM_PHYS");
+	if (!mem_bam) {
+		pr_err("tspp: Missing bam MEM resource");
+		rc = -ENXIO;
+		goto err_res_bam;
+	}
+	memset(&device->bam_props, 0, sizeof(device->bam_props));
+	device->bam_props.phys_addr = mem_bam->start;
+	device->bam_props.virt_addr = ioremap(mem_bam->start,
+		resource_size(mem_bam));
+	if (!device->bam_props.virt_addr) {
+		dev_err(&pdev->dev, "ioremap failed");
+		goto err_map_bam;
+	}
+
+	if (msm_tspp_map_irqs(pdev, device))
+		goto err_irq;
+	device->req_irqs = false;
+
+	/* Check whether AV timer time stamps are enabled */
+	if (!of_property_read_u32(pdev->dev.of_node, "qcom,lpass-timer-tts",
+				  &device->tts_source)) {
+		if (device->tts_source == 1)
+			device->tts_source = TSIF_TTS_LPASS_TIMER;
+		else
+			device->tts_source = TSIF_TTS_TCR;
+	} else {
+		device->tts_source = TSIF_TTS_TCR;
+	}
+
+	for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
+		device->tsif[i].tts_source = device->tts_source;
+
+	/* Parse TSIF properties */
+	for_each_child_of_node(pdev->dev.of_node, child) {
+		struct tspp_tsif_device *tsif;
+		struct device_node *i2c_bus;
+		int tsin;
+
+		rc = of_property_read_u32(child, "tsin-num", &tsin);
+		if (rc) {
+			dev_err(&pdev->dev, "No tsin-num found\n");
+			goto err_tsif;
+		}
+
+		if (tsin > TSPP_TSIF_INSTANCES) {
+			dev_err(&pdev->dev, "tsin-num %d is not supported\n",
+				tsin);
+			rc = -EINVAL;
+			goto err_tsif;
+		}
+
+		tsif = &device->tsif[tsin];
+		if (tsif->i2c_adapter) {
+			dev_err(&pdev->dev, "tsin %d already declared\n", tsin);
+			rc = -EINVAL;
+			goto err_tsif;
+		}
+
+		i2c_bus = of_parse_phandle(child, "i2c-bus", 0);
+		if (!i2c_bus) {
+			dev_err(&pdev->dev, "No i2c-bus found\n");
+			rc = -ENODEV;
+			goto err_tsif;
+		}
+
+		tsif->i2c_adapter = of_find_i2c_adapter_by_node(i2c_bus);
+		of_node_put(i2c_bus);
+		if (!tsif->i2c_adapter) {
+			dev_err(&pdev->dev, "No i2c adapter found\n");
+			rc = -ENODEV;
+			goto err_tsif;
+		}
+
+		tsif->reset_gpio = of_get_named_gpio(child, "reset-gpios", 0);
+
+		rc = gpio_is_valid(tsif->reset_gpio);
+		if (!rc) {
+			dev_err(&pdev->dev, "reset gpio not valid\n");
+			rc = -ENODEV;
+			goto err_tsif;
+		}
+
+		rc = devm_gpio_request_one(&pdev->dev, tsif->reset_gpio,
+					   GPIOF_OUT_INIT_LOW, "fe reset");
+		if (rc && rc != -EBUSY) {
+			dev_err(&pdev->dev, "Can't request reset gpio\n");
+			goto err_tsif;
+		}
+
+		if (!rc) {
+			gpio_direction_output(tsif->reset_gpio, 0);
+			usleep_range(3500, 5000);
+			gpio_direction_output(tsif->reset_gpio, 1);
+			usleep_range(3500, 5000);
+		}
+	}
+
+	/* power management */
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+	tspp_debugfs_init(device, 0);
+
+	for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
+		tsif_debugfs_init(&device->tsif[i], i);
+
+	wakeup_source_init(&device->ws, dev_name(&pdev->dev));
+
+	/* set up pointers to ram-based 'registers' */
+	device->filters[0] = device->base + TSPP_PID_FILTER_TABLE0;
+	device->filters[1] = device->base + TSPP_PID_FILTER_TABLE1;
+	device->filters[2] = device->base + TSPP_PID_FILTER_TABLE2;
+	device->tspp_key_table = device->base + TSPP_DATA_KEY;
+	device->tspp_global_performance =
+		device->base + TSPP_GLOBAL_PERFORMANCE;
+	device->tspp_pipe_context =
+		device->base + TSPP_PIPE_CONTEXT;
+	device->tspp_pipe_performance =
+		device->base + TSPP_PIPE_PERFORMANCE;
+
+	device->bam_props.summing_threshold = 0x10;
+	device->bam_props.irq = device->bam_irq;
+	device->bam_props.manage = SPS_BAM_MGR_LOCAL;
+	/*add SPS BAM log level*/
+	device->bam_props.ipc_loglevel = TSPP_BAM_DEFAULT_IPC_LOGLVL;
+
+	if (tspp_clock_start(device) != 0) {
+		dev_err(&pdev->dev, "Can't start clocks");
+		goto err_clock;
+	}
+
+	device->bam_handle = SPS_DEV_HANDLE_INVALID;
+
+	spin_lock_init(&device->spinlock);
+	mutex_init(&device->mutex);
+	tasklet_init(&device->tlet, tspp_sps_complete_tlet,
+			(unsigned long)device);
+
+	/* initialize everything to a known state */
+	tspp_global_reset(device);
+
+	version = readl_relaxed(device->base + TSPP_VERSION);
+	/*
+	 * TSPP version can be bits [7:0] or alternatively,
+	 * TSPP major version is bits [31:28].
+	 */
+	if ((version != 0x1) && (((version >> 28) & 0xF) != 0x1))
+		pr_warn("tspp: unrecognized hw version=%i", version);
+
+	/* initialize the channels */
+	for (i = 0; i < TSPP_NUM_CHANNELS; i++)
+		tspp_channel_init(&(device->channels[i]), device);
+
+	/* stop the clocks for power savings */
+	tspp_clock_stop(device);
+
+	/* everything is ok, so add the device to the list */
+	list_add_tail(&(device->devlist), &tspp_devices);
+
+	rc = mpq_dmx_tspp_plugin_init(&pdev->dev, device->tsif[0].i2c_adapter);
+	if (rc)
+		goto err_clock;
+
+	return 0;
+
+err_clock:
+	tspp_debugfs_exit(device);
+	for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
+		tsif_debugfs_exit(&device->tsif[i]);
+	pm_runtime_disable(&pdev->dev);
+err_tsif:
+	for (i = 0; i < TSPP_TSIF_INSTANCES; i++) {
+		if (device->tsif[i].i2c_adapter)
+			i2c_put_adapter(device->tsif[i].i2c_adapter);
+	}
+err_irq:
+	iounmap(device->bam_props.virt_addr);
+err_map_bam:
+err_res_bam:
+	iounmap(device->base);
+err_map_dev:
+err_res_dev:
+	iounmap(device->tsif[1].base);
+err_map_tsif1:
+err_res_tsif1:
+	iounmap(device->tsif[0].base);
+err_map_tsif0:
+err_res_tsif0:
+	if (device->tsif_ref_clk)
+		clk_put(device->tsif_ref_clk);
+err_refclock:
+	if (device->tsif_pclk)
+		clk_put(device->tsif_pclk);
+err_pclock:
+	if (device->tsif_vreg)
+		regulator_disable(device->tsif_vreg);
+err_regulator:
+	if (device->tsif_bus_client)
+		msm_bus_scale_unregister_client(device->tsif_bus_client);
+err_pinctrl:
+	kfree(device);
+
+out:
+	return rc;
+}
+
+static int msm_tspp_remove(struct platform_device *pdev)
+{
+	struct tspp_channel *channel;
+	u32 i;
+
+	struct tspp_device *device = platform_get_drvdata(pdev);
+
+	mpq_dmx_tspp_plugin_exit();
+
+	/* free the buffers, and delete the channels */
+	for (i = 0; i < TSPP_NUM_CHANNELS; i++) {
+		channel = &device->channels[i];
+		tspp_close_channel(device->pdev->id, i);
+	}
+
+	for (i = 0; i < TSPP_TSIF_INSTANCES; i++) {
+		tsif_debugfs_exit(&device->tsif[i]);
+		if (device->tsif[i].i2c_adapter)
+			i2c_put_adapter(device->tsif[i].i2c_adapter);
+	}
+
+	mutex_destroy(&device->mutex);
+
+	if (device->tsif_bus_client)
+		msm_bus_scale_unregister_client(device->tsif_bus_client);
+
+	wakeup_source_trash(&device->ws);
+	if (device->req_irqs)
+		msm_tspp_free_irqs(device);
+
+	iounmap(device->bam_props.virt_addr);
+	iounmap(device->base);
+	for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
+		iounmap(device->tsif[i].base);
+
+	if (device->tsif_ref_clk)
+		clk_put(device->tsif_ref_clk);
+
+	if (device->tsif_pclk)
+		clk_put(device->tsif_pclk);
+
+	if (device->tsif_vreg)
+		regulator_disable(device->tsif_vreg);
+
+	pm_runtime_disable(&pdev->dev);
+
+	kfree(device);
+
+	return 0;
+}
+
+/*** power management ***/
+
+static int tspp_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: suspending...");
+	return 0;
+}
+
+static int tspp_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: resuming...");
+	return 0;
+}
+
+static const struct dev_pm_ops tspp_dev_pm_ops = {
+	.runtime_suspend = tspp_runtime_suspend,
+	.runtime_resume = tspp_runtime_resume,
+};
+
+static const struct of_device_id msm_match_table[] = {
+	{.compatible = "qcom,msm_tspp"},
+	{ /* sentinel */ }
+};
+
+static struct platform_driver msm_tspp_driver = {
+	.probe          = msm_tspp_probe,
+	.remove         = msm_tspp_remove,
+	.driver         = {
+		.name   = "msm_tspp",
+		.pm     = &tspp_dev_pm_ops,
+		.of_match_table = msm_match_table,
+	},
+};
+
+module_platform_driver(msm_tspp_driver);
+
+MODULE_DESCRIPTION("MSM TSPP DVB Driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./tspp/tspp-core.h linux-4.4.115-fbx/drivers/media/platform/msm/tspp/tspp-core.h
--- linux-4.4.115-fbx/drivers/media/platform/msm./tspp/tspp-core.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/tspp/tspp-core.h	2019-01-22 16:16:24.459255064 +0100
@@ -0,0 +1,108 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _TSPP_CORE_H_
+#define _TSPP_CORE_H_
+
+struct tspp_data_descriptor {
+	void *virt_base;   /* logical address of the actual data */
+	phys_addr_t phys_base; /* physical address of the actual data */
+	u32 size;          /* size of buffer in bytes */
+	int id;            /* unique identifier */
+	void *user;        /* user-defined data */
+};
+
+enum tspp_key_parity {
+	TSPP_KEY_PARITY_EVEN,
+	TSPP_KEY_PARITY_ODD
+};
+
+struct tspp_key {
+	enum tspp_key_parity parity;
+	int lsb;
+	int msb;
+};
+
+enum tspp_source {
+	TSPP_SOURCE_TSIF0,
+	TSPP_SOURCE_TSIF1,
+	TSPP_SOURCE_MEM,
+	TSPP_SOURCE_NONE = -1
+};
+
+enum tspp_mode {
+	TSPP_MODE_DISABLED,
+	TSPP_MODE_PES,
+	TSPP_MODE_RAW,
+	TSPP_MODE_RAW_NO_SUFFIX
+};
+
+enum tspp_tsif_mode {
+	TSPP_TSIF_MODE_LOOPBACK, /* loopback mode */
+	TSPP_TSIF_MODE_1,        /* without sync */
+	TSPP_TSIF_MODE_2         /* with sync signal */
+};
+
+struct tspp_filter {
+	int pid;
+	int mask;
+	enum tspp_mode mode;
+	unsigned int priority;	/* 0 - 15 */
+	int decrypt;
+	enum tspp_source source;
+};
+
+struct tspp_select_source {
+	enum tspp_source source;
+	enum tspp_tsif_mode mode;
+	int clk_inverse;
+	int data_inverse;
+	int sync_inverse;
+	int enable_inverse;
+};
+
+enum tsif_tts_source {
+	TSIF_TTS_TCR = 0,	/* Time stamps from TCR counter */
+	TSIF_TTS_LPASS_TIMER	/* Time stamps from AV/Qtimer Timer  */
+};
+
+typedef void (tspp_notifier)(int channel_id, void *user);
+typedef void* (tspp_allocator)(int channel_id, u32 size,
+	phys_addr_t *phys_base, void *user);
+typedef void (tspp_memfree)(int channel_id, u32 size,
+	void *virt_base, phys_addr_t phys_base, void *user);
+
+/* Kernel API functions */
+int tspp_open_stream(u32 dev, u32 channel_id,
+			struct tspp_select_source *source);
+int tspp_close_stream(u32 dev, u32 channel_id);
+int tspp_open_channel(u32 dev, u32 channel_id);
+int tspp_close_channel(u32 dev, u32 channel_id);
+int tspp_get_ref_clk_counter(u32 dev,
+	enum tspp_source source, u32 *tcr_counter);
+int tspp_add_filter(u32 dev, u32 channel_id, struct tspp_filter *filter);
+int tspp_remove_filter(u32 dev, u32 channel_id,	struct tspp_filter *filter);
+int tspp_set_key(u32 dev, u32 channel_id, struct tspp_key *key);
+int tspp_register_notification(u32 dev, u32 channel_id, tspp_notifier *notify,
+	void *data, u32 timer_ms);
+int tspp_unregister_notification(u32 dev, u32 channel_id);
+const struct tspp_data_descriptor *tspp_get_buffer(u32 dev, u32 channel_id);
+int tspp_release_buffer(u32 dev, u32 channel_id, u32 descriptor_id);
+int tspp_allocate_buffers(u32 dev, u32 channel_id, u32 count,
+	u32 size, u32 int_freq, tspp_allocator *alloc,
+	tspp_memfree *memfree, void *user);
+
+int tspp_get_tts_source(u32 dev, int *tts_source);
+int tspp_get_lpass_time_counter(u32 dev, enum tspp_source source,
+			u64 *lpass_time_counter);
+
+#endif /* _TSPP_CORE_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./tspp/tspp-dvb.c linux-4.4.115-fbx/drivers/media/platform/msm/tspp/tspp-dvb.c
--- linux-4.4.115-fbx/drivers/media/platform/msm./tspp/tspp-dvb.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/tspp/tspp-dvb.c	2019-01-22 16:16:24.459255064 +0100
@@ -0,0 +1,1671 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/vmalloc.h>
+#include "dvb_demux.h"
+#include "dmxdev.h"
+#include "mpq_dvb_debug.h"
+
+#include "tspp-core.h"
+
+#include "si2168.h"
+#include "si2157.h"
+
+#define TSIF_COUNT			2
+
+/* TSIF alias name length */
+#define TSIF_NAME_LENGTH		20
+
+/* Max number of PID filters */
+#define TSPP_MAX_PID_FILTER_NUM		128
+
+/* Max number of user-defined HW PID filters */
+#define TSPP_MAX_HW_PID_FILTER_NUM	15
+
+/* HW index  of the last entry in the TSPP HW filter table */
+#define TSPP_LAST_HW_FILTER_INDEX	15
+
+/* Number of filters required to accept all packets except NULL packets */
+#define TSPP_BLOCK_NULLS_FILTERS_NUM	13
+
+/* Max number of section filters */
+#define TSPP_MAX_SECTION_FILTER_NUM	128
+
+/* For each TSIF we use a single pipe holding the data after PID filtering */
+#define TSPP_CHANNEL			0
+
+/* the channel_id set to TSPP driver based on TSIF number and channel type */
+#define TSPP_CHANNEL_ID(tsif, ch)		((tsif << 1) + ch)
+#define TSPP_GET_TSIF_NUM(ch_id)		(ch_id >> 1)
+
+/* mask that set to care for all bits in pid filter */
+#define TSPP_PID_MASK			0x1FFF
+
+/* dvb-demux defines pid 0x2000 as full capture pid */
+#define TSPP_PASS_THROUGH_PID		0x2000
+
+/* NULL packets pid */
+#define TSPP_NULL_PACKETS_PID		0x1FFF
+
+#define TSPP_RAW_TTS_SIZE		192
+#define TSPP_RAW_SIZE			188
+
+#define MAX_BAM_DESCRIPTOR_SIZE	(32 * 1024 - 1)
+
+#define MAX_BAM_DESCRIPTOR_COUNT	(8 * 1024 - 2)
+
+#define TSPP_BUFFER_SIZE		(500 * 1024) /* 500KB */
+
+#define TSPP_DEFAULT_DESCRIPTOR_SIZE	(TSPP_RAW_TTS_SIZE)
+
+#define TSPP_BUFFER_COUNT(buffer_size)	\
+	((buffer_size) / tspp_desc_size)
+
+/* When TSPP notifies demux that new packets are received.
+ * Using max descriptor size (170 packets).
+ * Assuming 20MBit/sec stream, with 170 packets
+ * per descriptor there would be about 82 descriptors,
+ * Meaning about 82 notifications per second.
+ */
+#define TSPP_NOTIFICATION_SIZE(desc_size)		\
+	(MAX_BAM_DESCRIPTOR_SIZE / (desc_size))
+
+/* Channel timeout in msec */
+#define TSPP_CHANNEL_TIMEOUT			100
+
+/* module parameters for load time configuration */
+static int tspp_out_buffer_size = TSPP_BUFFER_SIZE;
+static int tspp_desc_size = TSPP_DEFAULT_DESCRIPTOR_SIZE;
+static int tspp_notification_size =
+	TSPP_NOTIFICATION_SIZE(TSPP_DEFAULT_DESCRIPTOR_SIZE);
+static int tspp_channel_timeout = TSPP_CHANNEL_TIMEOUT;
+
+module_param(tspp_out_buffer_size, int, S_IRUGO | S_IWUSR);
+module_param(tspp_desc_size, int, S_IRUGO | S_IWUSR);
+module_param(tspp_notification_size, int, S_IRUGO | S_IWUSR);
+module_param(tspp_channel_timeout, int, S_IRUGO | S_IWUSR);
+
+/* TSIF operation mode: 1 = TSIF_MODE_1,  2 = TSIF_MODE_2, 3 = TSIF_LOOPBACK */
+static int tsif_mode = 2;
+module_param(tsif_mode, int, S_IRUGO | S_IWUSR);
+
+/* Inverse TSIF clock signal */
+static int clock_inv;
+module_param(clock_inv, int, S_IRUGO | S_IWUSR);
+
+struct mpq_demux {
+	struct dvb_demux demux;
+	struct dmxdev dmxdev;
+	dmx_source_t source;
+	int ts_packet_timestamp_source;
+};
+
+static struct batfish_dvb_adapter {
+	struct device *dev;
+	struct i2c_adapter *i2c_adapter;
+	struct dvb_adapter adapter;
+	struct dvb_frontend *fe;
+	struct dmx_frontend frontend;
+	struct mpq_demux demux;
+	struct i2c_client *i2c_client_demod;
+	struct i2c_client *i2c_client_tuner;
+} batfish_dvb_adapter;
+
+/* The following structure hold singleton information
+ * required for dmx implementation on top of TSPP.
+ */
+static struct
+{
+	/* Information for each TSIF input processing */
+	struct {
+		/*
+		 * TSPP pipe holding all TS packets after PID filtering.
+		 * The following is reference count for number of feeds
+		 * allocated on that pipe.
+		 */
+		int channel_ref;
+
+		/* Counter for data notifications on the pipe */
+		atomic_t data_cnt;
+
+		/* flag to indicate control operation is in progress */
+		atomic_t control_op;
+
+		/* Number of buffers */
+		u32 buffer_count;
+
+		/*
+		 * Holds PIDs of allocated filters along with
+		 * how many feeds are opened on the same PID. For
+		 * TSPP HW filters, holds also the filter table index.
+		 * When pid == -1, the entry is free.
+		 */
+		struct {
+			int pid;
+			int ref_count;
+			int hw_index;
+		} filters[TSPP_MAX_PID_FILTER_NUM];
+
+		/* Indicates available/allocated filter table indexes */
+		int hw_indexes[TSPP_MAX_HW_PID_FILTER_NUM];
+
+		/* Number of currently allocated PID filters */
+		u16 current_filter_count;
+
+		/*
+		 * Flag to indicate whether the user added a filter to accept
+		 * NULL packets (PID = 0x1FFF)
+		 */
+		int pass_nulls_flag;
+
+		/*
+		 * Flag to indicate whether the user added a filter to accept
+		 * all packets (PID = 0x2000)
+		 */
+		int pass_all_flag;
+
+		/*
+		 * Flag to indicate whether the filter that accepts
+		 * all packets has already been added and is
+		 * currently enabled
+		 */
+		int accept_all_filter_exists_flag;
+
+		/* Thread processing TS packets from TSPP */
+		struct task_struct *thread;
+		wait_queue_head_t wait_queue;
+
+		/* TSIF alias */
+		char name[TSIF_NAME_LENGTH];
+
+		/* Pointer to the demux connected to this TSIF */
+		struct mpq_demux *mpq_demux;
+
+		/* Mutex protecting the data-structure */
+		struct mutex mutex;
+	} tsif[TSIF_COUNT];
+} mpq_dmx_tspp_info;
+
+/**
+ * Returns a free HW filter index that can be used.
+ *
+ * @tsif: The TSIF to allocate filter from
+ *
+ * Return  HW filter index or -ENOMEM if no filters available
+ */
+static int mpq_tspp_allocate_hw_filter_index(int tsif)
+{
+	int i;
+
+	for (i = 0; i < TSPP_MAX_HW_PID_FILTER_NUM; i++) {
+		if (mpq_dmx_tspp_info.tsif[tsif].hw_indexes[i] == 0) {
+			mpq_dmx_tspp_info.tsif[tsif].hw_indexes[i] = 1;
+			return i;
+		}
+	}
+
+	return -ENOMEM;
+}
+
+/**
+ * Releases a HW filter index for future reuse.
+ *
+ * @tsif: The TSIF from which the filter should be released
+ * @hw_index: The HW index to release
+ *
+ */
+static inline void mpq_tspp_release_hw_filter_index(int tsif, int hw_index)
+{
+	if ((hw_index >= 0) && (hw_index < TSPP_MAX_HW_PID_FILTER_NUM))
+		mpq_dmx_tspp_info.tsif[tsif].hw_indexes[hw_index] = 0;
+}
+
+
+/**
+ * Returns a free filter slot that can be used.
+ *
+ * @tsif: The TSIF to allocate filter from
+ *
+ * Return  filter index or -ENOMEM if no filters available
+ */
+static int mpq_tspp_get_free_filter_slot(int tsif)
+{
+	int slot;
+
+	for (slot = 0; slot < TSPP_MAX_PID_FILTER_NUM; slot++)
+		if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid == -1)
+			return slot;
+
+	return -ENOMEM;
+}
+
+/**
+ * Returns filter index of specific pid.
+ *
+ * @tsif: The TSIF to which the pid is allocated
+ * @pid: The pid to search for
+ *
+ * Return  filter index or -1 if no filter available
+ */
+static int mpq_tspp_get_filter_slot(int tsif, int pid)
+{
+	int slot;
+
+	for (slot = 0; slot < TSPP_MAX_PID_FILTER_NUM; slot++)
+		if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid == pid)
+			return slot;
+
+	return -EINVAL;
+}
+
+/**
+ * mpq_dmx_tspp_swfilter_desc - helper function
+ *
+ * Takes a tspp buffer descriptor and send it to the SW filter for demuxing,
+ * one TS packet at a time.
+ *
+ * @mpq_demux - mpq demux object
+ * @tspp_data_desc - tspp buffer descriptor
+ */
+static inline void mpq_dmx_tspp_swfilter_desc(struct mpq_demux *mpq_demux,
+	const struct tspp_data_descriptor *tspp_data_desc)
+{
+	u32 notif_size;
+	int i;
+
+	notif_size = tspp_data_desc->size / TSPP_RAW_TTS_SIZE;
+	for (i = 0; i < notif_size; i++)
+		dvb_dmx_swfilter_packets(&mpq_demux->demux,
+			((u8 *)tspp_data_desc->virt_base) +
+			i * TSPP_RAW_TTS_SIZE, 1);
+}
+
+/**
+ * Demux thread function handling data from specific TSIF.
+ *
+ * @arg: TSIF number
+ */
+static int mpq_dmx_tspp_thread(void *arg)
+{
+	int tsif = (int)(uintptr_t)arg;
+	struct mpq_demux *mpq_demux;
+	const struct tspp_data_descriptor *tspp_data_desc;
+	atomic_t *data_cnt;
+	int channel_id;
+	int ref_count;
+	int ret;
+
+	do {
+		ret = wait_event_interruptible(
+			mpq_dmx_tspp_info.tsif[tsif].wait_queue,
+			(atomic_read(&mpq_dmx_tspp_info.tsif[tsif].data_cnt) &&
+			!atomic_read(&mpq_dmx_tspp_info.tsif[tsif].control_op))
+			|| kthread_should_stop());
+
+		if ((ret < 0) || kthread_should_stop()) {
+			MPQ_DVB_ERR_PRINT("%s: exit\n", __func__);
+			break;
+		}
+
+		/* Lock against the TSPP filters data-structure */
+		if (mutex_lock_interruptible(
+			&mpq_dmx_tspp_info.tsif[tsif].mutex))
+			return -ERESTARTSYS;
+
+		channel_id = TSPP_CHANNEL_ID(tsif, TSPP_CHANNEL);
+
+		ref_count = mpq_dmx_tspp_info.tsif[tsif].channel_ref;
+		data_cnt = &mpq_dmx_tspp_info.tsif[tsif].data_cnt;
+
+		/* Make sure channel is still active */
+		if (ref_count == 0) {
+			mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
+			continue;
+		}
+
+		atomic_dec(data_cnt);
+
+		mpq_demux = mpq_dmx_tspp_info.tsif[tsif].mpq_demux;
+
+		/*
+		 * Go through all filled descriptors
+		 * and perform demuxing on them
+		 */
+		do {
+			if (atomic_read(&mpq_dmx_tspp_info.tsif[tsif].
+					control_op)) {
+				/* restore for next iteration */
+				atomic_inc(data_cnt);
+				break;
+			}
+			tspp_data_desc = tspp_get_buffer(0, channel_id);
+			if (!tspp_data_desc)
+				break;
+
+			mpq_dmx_tspp_swfilter_desc(mpq_demux,
+				tspp_data_desc);
+			/*
+			 * Notify TSPP that the buffer
+			 * is no longer needed
+			 */
+			tspp_release_buffer(0, channel_id,
+				tspp_data_desc->id);
+		} while (1);
+
+		mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
+	} while (1);
+
+	return 0;
+}
+
+/**
+ * Callback function from TSPP when new data is ready.
+ *
+ * @channel_id: Channel with new TS packets
+ * @user: user-data holding TSIF number
+ */
+static void mpq_tspp_callback(int channel_id, void *user)
+{
+	int tsif = (int)(uintptr_t)user;
+
+	atomic_inc(&mpq_dmx_tspp_info.tsif[tsif].data_cnt);
+	wake_up(&mpq_dmx_tspp_info.tsif[tsif].wait_queue);
+}
+
+/**
+ * Add a filter to accept all packets as the last entry
+ * of the TSPP HW filter table.
+ *
+ * @channel_id: Channel ID number.
+ * @source: TSPP source.
+ *
+ * Return  error status
+ */
+static int mpq_tspp_add_accept_all_filter(int channel_id,
+				enum tspp_source source)
+{
+	struct tspp_filter tspp_filter;
+	int tsif = TSPP_GET_TSIF_NUM(channel_id);
+	int ret;
+
+	MPQ_DVB_DBG_PRINT("%s: executed, channel id = %d, source = %d\n",
+		__func__, channel_id, source);
+
+	if (mpq_dmx_tspp_info.tsif[tsif].accept_all_filter_exists_flag) {
+		MPQ_DVB_DBG_PRINT("%s: accept all filter already exists\n",
+				__func__);
+		return 0;
+	}
+
+	/* This filter will be the last entry in the table */
+	tspp_filter.priority = TSPP_LAST_HW_FILTER_INDEX;
+	/* Pass all pids - set mask to 0 */
+	tspp_filter.pid = 0;
+	tspp_filter.mask = 0;
+	/*
+	 * Include TTS in RAW packets, if you change this to
+	 * TSPP_MODE_RAW_NO_SUFFIX you must also change TSPP_RAW_TTS_SIZE
+	 * accordingly.
+	 */
+	tspp_filter.mode = TSPP_MODE_RAW;
+	tspp_filter.source = source;
+	tspp_filter.decrypt = 0;
+
+	ret = tspp_add_filter(0, channel_id, &tspp_filter);
+	if (!ret) {
+		mpq_dmx_tspp_info.tsif[tsif].accept_all_filter_exists_flag = 1;
+		MPQ_DVB_DBG_PRINT(
+				"%s: accept all filter added successfully\n",
+				__func__);
+	}
+
+	return ret;
+}
+
+/**
+ * Remove the filter that accepts all packets from the last entry
+ * of the TSPP HW filter table.
+ *
+ * @channel_id: Channel ID number.
+ * @source: TSPP source.
+ *
+ * Return  error status
+ */
+static int mpq_tspp_remove_accept_all_filter(int channel_id,
+				enum tspp_source source)
+{
+	struct tspp_filter tspp_filter;
+	int tsif = TSPP_GET_TSIF_NUM(channel_id);
+	int ret;
+
+	MPQ_DVB_DBG_PRINT("%s: executed, channel id = %d, source = %d\n",
+		__func__, channel_id, source);
+
+	if (mpq_dmx_tspp_info.tsif[tsif].accept_all_filter_exists_flag == 0) {
+		MPQ_DVB_DBG_PRINT("%s: accept all filter doesn't exist\n",
+				__func__);
+		return 0;
+	}
+
+	tspp_filter.priority = TSPP_LAST_HW_FILTER_INDEX;
+
+	ret = tspp_remove_filter(0, channel_id, &tspp_filter);
+	if (!ret) {
+		mpq_dmx_tspp_info.tsif[tsif].accept_all_filter_exists_flag = 0;
+		MPQ_DVB_DBG_PRINT(
+			"%s: accept all filter removed successfully\n",
+			__func__);
+	}
+
+	return ret;
+}
+
+/**
+ * Add filters designed to accept all packets except NULL packets, i.e.
+ * packets with PID = 0x1FFF.
+ * This function is called after user-defined filters were removed,
+ * so it assumes that the first 13 HW filters in the TSPP filter
+ * table are free for use.
+ *
+ * @channel_id: Channel ID number.
+ * @source: TSPP source.
+ *
+ * Return  0 on success, -1 otherwise
+ */
+static int mpq_tspp_add_null_blocking_filters(int channel_id,
+				enum tspp_source source)
+{
+	struct tspp_filter tspp_filter;
+	int ret = 0;
+	int i, j;
+	u16 full_pid_mask = 0x1FFF;
+	u8 mask_shift;
+	u8 pid_shift;
+	int tsif = TSPP_GET_TSIF_NUM(channel_id);
+
+	MPQ_DVB_DBG_PRINT("%s: executed, channel id = %d, source = %d\n",
+		__func__, channel_id, source);
+
+	/*
+	 * Add a total of 13 filters that will accept packets with
+	 * every PID other than 0x1FFF, which is the NULL PID.
+	 *
+	 * Filter 0: accept all PIDs with bit 12 clear, i.e.
+	 * PID = 0x0000 .. 0x0FFF (4096 PIDs in total):
+	 * Mask = 0x1000, PID = 0x0000.
+	 *
+	 * Filter 12: Accept PID 0x1FFE:
+	 * Mask = 0x1FFF, PID = 0x1FFE.
+	 *
+	 * In general: For N = 0 .. 12,
+	 * Filter <N>: accept all PIDs with <N> MSBits set and bit <N-1> clear.
+	 * Filter <N> Mask = N+1 MSBits set, others clear.
+	 * Filter <N> PID = <N> MSBits set, others clear.
+	 */
+
+	/*
+	 * Include TTS in RAW packets, if you change this to
+	 * TSPP_MODE_RAW_NO_SUFFIX you must also change TSPP_RAW_TTS_SIZE
+	 * accordingly.
+	 */
+	tspp_filter.mode = TSPP_MODE_RAW;
+	tspp_filter.source = source;
+	tspp_filter.decrypt = 0;
+
+	for (i = 0; i < TSPP_BLOCK_NULLS_FILTERS_NUM; i++) {
+		tspp_filter.priority = mpq_tspp_allocate_hw_filter_index(tsif);
+		if (tspp_filter.priority != i) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: got unexpected HW index %d, expected %d\n",
+				__func__, tspp_filter.priority, i);
+			ret = -1;
+			break;
+		}
+		mask_shift = (TSPP_BLOCK_NULLS_FILTERS_NUM - 1 - i);
+		pid_shift = (TSPP_BLOCK_NULLS_FILTERS_NUM - i);
+		tspp_filter.mask =
+			((full_pid_mask >> mask_shift) << mask_shift);
+		tspp_filter.pid = ((full_pid_mask >> pid_shift) << pid_shift);
+
+		if (tspp_add_filter(0, channel_id, &tspp_filter)) {
+			ret = -1;
+			break;
+		}
+	}
+
+	if (ret) {
+		/* cleanup on failure */
+		for (j = 0; j < i; j++) {
+			tspp_filter.priority = j;
+			mpq_tspp_release_hw_filter_index(tsif, j);
+			tspp_remove_filter(0, channel_id, &tspp_filter);
+		}
+	} else {
+		MPQ_DVB_DBG_PRINT(
+			"%s: NULL blocking filters added successfully\n",
+			__func__);
+	}
+
+	return ret;
+}
+
+/**
+ * Remove filters designed to accept all packets except NULL packets, i.e.
+ * packets with PID = 0x1FFF.
+ *
+ * @channel_id: Channel ID number.
+ *
+ * @source: TSPP source.
+ *
+ * Return  0 on success, -1 otherwise
+ */
+static int mpq_tspp_remove_null_blocking_filters(int channel_id,
+				enum tspp_source source)
+{
+	struct tspp_filter tspp_filter;
+	int tsif = TSPP_GET_TSIF_NUM(channel_id);
+	int ret = 0;
+	int i;
+
+	MPQ_DVB_DBG_PRINT("%s: executed, channel id = %d, source = %d\n",
+		__func__, channel_id, source);
+
+	for (i = 0; i < TSPP_BLOCK_NULLS_FILTERS_NUM; i++) {
+		tspp_filter.priority = i;
+		if (tspp_remove_filter(0, channel_id, &tspp_filter)) {
+			MPQ_DVB_ERR_PRINT("%s: failed to remove filter %d\n",
+				__func__, i);
+			ret = -1;
+		}
+
+		mpq_tspp_release_hw_filter_index(tsif, i);
+	}
+
+	return ret;
+}
+
+/**
+ * Add all current user-defined filters (up to 15) as HW filters
+ *
+ * @channel_id: Channel ID number.
+ *
+ * @source: TSPP source.
+ *
+ * Return  0 on success, -1 otherwise
+ */
+static int mpq_tspp_add_all_user_filters(int channel_id,
+				enum tspp_source source)
+{
+	struct tspp_filter tspp_filter;
+	int tsif = TSPP_GET_TSIF_NUM(channel_id);
+	int slot;
+	u16 added_count = 0;
+	u16 total_filters_count = 0;
+
+	MPQ_DVB_DBG_PRINT("%s: executed\n", __func__);
+
+	/*
+	 * Include TTS in RAW packets, if you change this to
+	 * TSPP_MODE_RAW_NO_SUFFIX you must also change TSPP_RAW_TTS_SIZE
+	 * accordingly.
+	 */
+	tspp_filter.mode = TSPP_MODE_RAW;
+	tspp_filter.source = source;
+	tspp_filter.decrypt = 0;
+
+	for (slot = 0; slot < TSPP_MAX_PID_FILTER_NUM; slot++) {
+		if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid == -1)
+			continue;
+
+		/*
+		 * count total number of user filters to verify that it is
+		 * exactly TSPP_MAX_HW_PID_FILTER_NUM as expected.
+		 */
+		total_filters_count++;
+
+		if (added_count > TSPP_MAX_HW_PID_FILTER_NUM)
+			continue;
+
+		tspp_filter.priority = mpq_tspp_allocate_hw_filter_index(tsif);
+
+		if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid ==
+				TSPP_PASS_THROUGH_PID) {
+			/* pass all pids */
+			tspp_filter.pid = 0;
+			tspp_filter.mask = 0;
+		} else {
+			tspp_filter.pid =
+				mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid;
+			tspp_filter.mask = TSPP_PID_MASK;
+		}
+
+		MPQ_DVB_DBG_PRINT(
+			"%s: adding HW filter, PID = %d, mask = 0x%X, index = %d\n",
+				__func__, tspp_filter.pid, tspp_filter.mask,
+				tspp_filter.priority);
+
+		if (!tspp_add_filter(0, channel_id, &tspp_filter)) {
+			mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index =
+				tspp_filter.priority;
+			added_count++;
+		} else {
+			MPQ_DVB_ERR_PRINT("%s: tspp_add_filter failed\n",
+						__func__);
+		}
+	}
+
+	if ((added_count != TSPP_MAX_HW_PID_FILTER_NUM) ||
+		(added_count != total_filters_count))
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * Remove all user-defined HW filters
+ *
+ * @channel_id: Channel ID number.
+ *
+ * @source: TSPP source.
+ *
+ * Return  0 on success, -1 otherwise
+ */
+static int mpq_tspp_remove_all_user_filters(int channel_id,
+				enum tspp_source source)
+{
+	struct tspp_filter tspp_filter;
+	int ret = 0;
+	int tsif = TSPP_GET_TSIF_NUM(channel_id);
+	int i;
+
+	MPQ_DVB_DBG_PRINT("%s: executed\n", __func__);
+
+	for (i = 0; i < TSPP_MAX_HW_PID_FILTER_NUM; i++) {
+		tspp_filter.priority = i;
+		MPQ_DVB_DBG_PRINT("%s: Removing HW filter %d\n",
+			__func__, tspp_filter.priority);
+		if (tspp_remove_filter(0, channel_id, &tspp_filter))
+			ret = -1;
+
+		mpq_tspp_release_hw_filter_index(tsif, i);
+		mpq_dmx_tspp_info.tsif[tsif].filters[i].hw_index = -1;
+	}
+
+	return ret;
+}
+
+/**
+ * Configure TSPP channel to filter the PID of new feed.
+ *
+ * @feed: The feed to configure the channel with
+ *
+ * Return  error status
+ *
+ * The function checks if the new PID can be added to an already
+ * allocated channel, if not, a new channel is allocated and configured.
+ */
+static int mpq_tspp_dmx_add_channel(struct dvb_demux_feed *feed)
+{
+	struct mpq_demux *mpq_demux = feed->demux->priv;
+	struct tspp_select_source tspp_source;
+	struct tspp_filter tspp_filter;
+	int tsif;
+	int ret = 0;
+	int slot;
+	int channel_id;
+	int *channel_ref_count;
+	u32 buffer_size;
+	int restore_user_filters = 0;
+	int remove_accept_all_filter = 0;
+	int remove_null_blocking_filters = 0;
+
+	tspp_source.clk_inverse = clock_inv;
+	tspp_source.data_inverse = 0;
+	tspp_source.sync_inverse = 0;
+	tspp_source.enable_inverse = 0;
+
+	MPQ_DVB_DBG_PRINT("%s: executed, PID = %d\n", __func__, feed->pid);
+
+	switch (tsif_mode) {
+	case 1:
+		tspp_source.mode = TSPP_TSIF_MODE_1;
+		break;
+	case 2:
+		tspp_source.mode = TSPP_TSIF_MODE_2;
+		break;
+	default:
+		tspp_source.mode = TSPP_TSIF_MODE_LOOPBACK;
+		break;
+	}
+
+	/* determine the TSIF we are reading from */
+	if (mpq_demux->source == DMX_SOURCE_FRONT0) {
+		tsif = 0;
+		tspp_source.source = TSPP_SOURCE_TSIF0;
+	} else if (mpq_demux->source == DMX_SOURCE_FRONT1) {
+		tsif = 1;
+		tspp_source.source = TSPP_SOURCE_TSIF1;
+	} else {
+		/* invalid source */
+		MPQ_DVB_ERR_PRINT(
+			"%s: invalid input source (%d)\n",
+			__func__,
+			mpq_demux->source);
+
+		return -EINVAL;
+	}
+
+	atomic_inc(&mpq_dmx_tspp_info.tsif[tsif].control_op);
+	if (mutex_lock_interruptible(&mpq_dmx_tspp_info.tsif[tsif].mutex)) {
+		atomic_dec(&mpq_dmx_tspp_info.tsif[tsif].control_op);
+		return -ERESTARTSYS;
+	}
+
+	/*
+	 * It is possible that this PID was already requested before.
+	 * Can happen if we play and record same PES or PCR
+	 * piggypacked on video packet.
+	 */
+	slot = mpq_tspp_get_filter_slot(tsif, feed->pid);
+	if (slot >= 0) {
+		/* PID already configured */
+		mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count++;
+		goto out;
+	}
+
+
+	channel_id = TSPP_CHANNEL_ID(tsif, TSPP_CHANNEL);
+	channel_ref_count = &mpq_dmx_tspp_info.tsif[tsif].channel_ref;
+
+	/*
+	 * Recalculate 'tspp_notification_size' and buffer count in case
+	 * 'tspp_desc_size' or 'tspp_out_buffer_size' parameters have changed.
+	 */
+	buffer_size = tspp_desc_size;
+	tspp_notification_size = TSPP_NOTIFICATION_SIZE(tspp_desc_size);
+	mpq_dmx_tspp_info.tsif[tsif].buffer_count =
+			TSPP_BUFFER_COUNT(tspp_out_buffer_size);
+	if (mpq_dmx_tspp_info.tsif[tsif].buffer_count >
+			MAX_BAM_DESCRIPTOR_COUNT)
+		mpq_dmx_tspp_info.tsif[tsif].buffer_count =
+			MAX_BAM_DESCRIPTOR_COUNT;
+
+	/* check if required TSPP pipe is already allocated or not */
+	if (*channel_ref_count == 0) {
+		ret = tspp_open_channel(0, channel_id);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: tspp_open_channel(%d) failed (%d)\n",
+				__func__,
+				channel_id,
+				ret);
+
+			goto out;
+		}
+
+		/* set TSPP source */
+		ret = tspp_open_stream(0, channel_id, &tspp_source);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: tspp_select_source(%d,%d) failed (%d)\n",
+				__func__,
+				channel_id,
+				tspp_source.source,
+				ret);
+
+			goto add_channel_close_ch;
+		}
+
+		/* register notification on TS packets */
+		tspp_register_notification(0,
+					   channel_id,
+					   mpq_tspp_callback,
+					   (void *)(uintptr_t)tsif,
+					   tspp_channel_timeout);
+
+		/*
+		 * Register allocator and provide allocation function
+		 * that allocates from contiguous memory so that we can have
+		 * big notification size, smallest descriptor, and still provide
+		 * TZ with single big buffer based on notification size.
+		 */
+		ret = tspp_allocate_buffers(0, channel_id,
+			   mpq_dmx_tspp_info.tsif[tsif].buffer_count,
+			   buffer_size, tspp_notification_size,
+			   NULL, NULL, NULL);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: tspp_allocate_buffers(%d) failed (%d)\n",
+				__func__,
+				channel_id,
+				ret);
+
+			goto add_channel_unregister_notif;
+		}
+
+		mpq_dmx_tspp_info.tsif[tsif].mpq_demux = mpq_demux;
+	}
+
+	/* add new PID to the existing pipe */
+	slot = mpq_tspp_get_free_filter_slot(tsif);
+	if (slot < 0) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: mpq_tspp_get_free_filter_slot(%d) failed\n",
+			__func__, tsif);
+
+		goto add_channel_unregister_notif;
+	}
+
+	if (feed->pid == TSPP_PASS_THROUGH_PID)
+		mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 1;
+	else if (feed->pid == TSPP_NULL_PACKETS_PID)
+		mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 1;
+
+	mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = feed->pid;
+	mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count++;
+
+	tspp_filter.priority = -1;
+
+	if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count <
+					TSPP_MAX_HW_PID_FILTER_NUM) {
+		/* HW filtering mode */
+		tspp_filter.priority = mpq_tspp_allocate_hw_filter_index(tsif);
+		if (tspp_filter.priority < 0)
+			goto add_channel_free_filter_slot;
+
+		if (feed->pid == TSPP_PASS_THROUGH_PID) {
+			/* pass all pids */
+			tspp_filter.pid = 0;
+			tspp_filter.mask = 0;
+		} else {
+			tspp_filter.pid = feed->pid;
+			tspp_filter.mask = TSPP_PID_MASK;
+		}
+
+		/*
+		 * Include TTS in RAW packets, if you change this to
+		 * TSPP_MODE_RAW_NO_SUFFIX you must also change
+		 * TSPP_RAW_TTS_SIZE accordingly.
+		 */
+		tspp_filter.mode = TSPP_MODE_RAW;
+		tspp_filter.source = tspp_source.source;
+		tspp_filter.decrypt = 0;
+		ret = tspp_add_filter(0, channel_id, &tspp_filter);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: tspp_add_filter(%d) failed (%d)\n",
+				__func__,
+				channel_id,
+				ret);
+
+			goto add_channel_free_filter_slot;
+		}
+		mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index =
+			tspp_filter.priority;
+
+		MPQ_DVB_DBG_PRINT(
+			"%s: HW filtering mode: added TSPP HW filter, PID = %d, mask = 0x%X, index = %d\n",
+			__func__, tspp_filter.pid, tspp_filter.mask,
+			tspp_filter.priority);
+	} else if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count ==
+					TSPP_MAX_HW_PID_FILTER_NUM) {
+		/* Crossing the threshold - from HW to SW filtering mode */
+
+		/* Add a temporary filter to accept all packets */
+		ret = mpq_tspp_add_accept_all_filter(channel_id,
+					tspp_source.source);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_tspp_add_accept_all_filter(%d, %d) failed\n",
+				__func__, channel_id, tspp_source.source);
+
+			goto add_channel_free_filter_slot;
+		}
+
+		/* Remove all existing user filters */
+		ret = mpq_tspp_remove_all_user_filters(channel_id,
+					tspp_source.source);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_tspp_remove_all_user_filters(%d, %d) failed\n",
+				__func__, channel_id, tspp_source.source);
+
+			restore_user_filters = 1;
+			remove_accept_all_filter = 1;
+
+			goto add_channel_free_filter_slot;
+		}
+
+		/* Add HW filters to block NULL packets */
+		ret = mpq_tspp_add_null_blocking_filters(channel_id,
+					tspp_source.source);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_tspp_add_null_blocking_filters(%d, %d) failed\n",
+				__func__, channel_id, tspp_source.source);
+
+			restore_user_filters = 1;
+			remove_accept_all_filter = 1;
+
+			goto add_channel_free_filter_slot;
+		}
+
+		/* Remove filters that accepts all packets, if necessary */
+		if ((mpq_dmx_tspp_info.tsif[tsif].pass_all_flag == 0) &&
+			(mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag == 0)) {
+
+			ret = mpq_tspp_remove_accept_all_filter(channel_id,
+						tspp_source.source);
+			if (ret < 0) {
+				MPQ_DVB_ERR_PRINT(
+					"%s: mpq_tspp_remove_accept_all_filter(%d, %d) failed\n",
+					__func__, channel_id,
+					tspp_source.source);
+
+				remove_null_blocking_filters = 1;
+				restore_user_filters = 1;
+				remove_accept_all_filter = 1;
+
+				goto add_channel_free_filter_slot;
+			}
+		}
+	} else {
+		/* Already working in SW filtering mode */
+		if (mpq_dmx_tspp_info.tsif[tsif].pass_all_flag ||
+			mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag) {
+
+			ret = mpq_tspp_add_accept_all_filter(channel_id,
+						tspp_source.source);
+			if (ret < 0) {
+				MPQ_DVB_ERR_PRINT(
+					"%s: mpq_tspp_add_accept_all_filter(%d, %d) failed\n",
+					__func__, channel_id,
+					tspp_source.source);
+
+				goto add_channel_free_filter_slot;
+			}
+		}
+	}
+
+	(*channel_ref_count)++;
+	mpq_dmx_tspp_info.tsif[tsif].current_filter_count++;
+
+	MPQ_DVB_DBG_PRINT("%s: success, current_filter_count = %d\n",
+		__func__, mpq_dmx_tspp_info.tsif[tsif].current_filter_count);
+
+	goto out;
+
+add_channel_free_filter_slot:
+	/* restore internal database state */
+	mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = -1;
+	mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count--;
+
+	/* release HW index if we allocated one */
+	if (tspp_filter.priority >= 0) {
+		mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index = -1;
+		mpq_tspp_release_hw_filter_index(tsif, tspp_filter.priority);
+	}
+
+	/* restore HW filter table state if necessary */
+	if (remove_null_blocking_filters)
+		mpq_tspp_remove_null_blocking_filters(channel_id,
+						tspp_source.source);
+
+	if (restore_user_filters)
+		mpq_tspp_add_all_user_filters(channel_id, tspp_source.source);
+
+	if (remove_accept_all_filter)
+		mpq_tspp_remove_accept_all_filter(channel_id,
+						tspp_source.source);
+
+	/* restore flags. we can only get here if we changed the flags. */
+	if (feed->pid == TSPP_PASS_THROUGH_PID)
+		mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 0;
+	else if (feed->pid == TSPP_NULL_PACKETS_PID)
+		mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 0;
+
+add_channel_unregister_notif:
+	if (*channel_ref_count == 0) {
+		tspp_unregister_notification(0, channel_id);
+		tspp_close_stream(0, channel_id);
+	}
+add_channel_close_ch:
+	if (*channel_ref_count == 0)
+		tspp_close_channel(0, channel_id);
+out:
+	mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
+	atomic_dec(&mpq_dmx_tspp_info.tsif[tsif].control_op);
+	return ret;
+}
+
+/**
+ * Removes filter from TSPP.
+ *
+ * @feed: The feed to remove
+ *
+ * Return  error status
+ *
+ * The function checks if this is the only PID allocated within
+ * the channel, if so, the channel is closed as well.
+ */
+static int mpq_tspp_dmx_remove_channel(struct dvb_demux_feed *feed)
+{
+	int tsif;
+	int ret = 0;
+	int channel_id;
+	int slot;
+	atomic_t *data_cnt;
+	int *channel_ref_count;
+	enum tspp_source tspp_source;
+	struct tspp_filter tspp_filter;
+	struct mpq_demux *mpq_demux = feed->demux->priv;
+	int restore_null_blocking_filters = 0;
+	int remove_accept_all_filter = 0;
+	int remove_user_filters = 0;
+	int accept_all_filter_existed = 0;
+
+	MPQ_DVB_DBG_PRINT("%s: executed, PID = %d\n", __func__, feed->pid);
+
+	/* determine the TSIF we are reading from */
+	if (mpq_demux->source == DMX_SOURCE_FRONT0) {
+		tsif = 0;
+		tspp_source = TSPP_SOURCE_TSIF0;
+	} else if (mpq_demux->source == DMX_SOURCE_FRONT1) {
+		tsif = 1;
+		tspp_source = TSPP_SOURCE_TSIF1;
+	} else {
+		/* invalid source */
+		MPQ_DVB_ERR_PRINT(
+			"%s: invalid input source (%d)\n",
+			__func__,
+			mpq_demux->source);
+
+		return -EINVAL;
+	}
+
+	atomic_inc(&mpq_dmx_tspp_info.tsif[tsif].control_op);
+	if (mutex_lock_interruptible(&mpq_dmx_tspp_info.tsif[tsif].mutex)) {
+		atomic_dec(&mpq_dmx_tspp_info.tsif[tsif].control_op);
+		return -ERESTARTSYS;
+	}
+
+	channel_id = TSPP_CHANNEL_ID(tsif, TSPP_CHANNEL);
+	channel_ref_count = &mpq_dmx_tspp_info.tsif[tsif].channel_ref;
+	data_cnt = &mpq_dmx_tspp_info.tsif[tsif].data_cnt;
+
+	/* check if required TSPP pipe is already allocated or not */
+	if (*channel_ref_count == 0) {
+		/* invalid feed provided as the channel is not allocated */
+		MPQ_DVB_ERR_PRINT(
+			"%s: invalid feed (%d)\n",
+			__func__,
+			channel_id);
+
+		ret = -EINVAL;
+		goto out;
+	}
+
+	slot = mpq_tspp_get_filter_slot(tsif, feed->pid);
+
+	if (slot < 0) {
+		/* invalid feed provided as it has no filter allocated */
+		MPQ_DVB_ERR_PRINT(
+			"%s: mpq_tspp_get_filter_slot failed (%d,%d)\n",
+			__func__,
+			feed->pid,
+			tsif);
+
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* since filter was found, ref_count > 0 so it's ok to decrement it */
+	mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count--;
+
+	if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count) {
+		/*
+		 * there are still references to this pid, do not
+		 * remove the filter yet
+		 */
+		goto out;
+	}
+
+	if (feed->pid == TSPP_PASS_THROUGH_PID)
+		mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 0;
+	else if (feed->pid == TSPP_NULL_PACKETS_PID)
+		mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 0;
+
+	mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = -1;
+
+	if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count <=
+					TSPP_MAX_HW_PID_FILTER_NUM) {
+		/* staying in HW filtering mode */
+		tspp_filter.priority =
+			mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index;
+		ret = tspp_remove_filter(0, channel_id, &tspp_filter);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: tspp_remove_filter failed (%d,%d)\n",
+				__func__,
+				channel_id,
+				tspp_filter.priority);
+
+			goto remove_channel_failed_restore_count;
+		}
+		mpq_tspp_release_hw_filter_index(tsif, tspp_filter.priority);
+		mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index = -1;
+
+		MPQ_DVB_DBG_PRINT(
+			"%s: HW filtering mode: Removed TSPP HW filter, PID = %d, index = %d\n",
+			__func__, feed->pid, tspp_filter.priority);
+	} else  if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count ==
+					(TSPP_MAX_HW_PID_FILTER_NUM + 1)) {
+		/* Crossing the threshold - from SW to HW filtering mode */
+
+		accept_all_filter_existed =
+			mpq_dmx_tspp_info.tsif[tsif].
+				accept_all_filter_exists_flag;
+
+		/* Add a temporary filter to accept all packets */
+		ret = mpq_tspp_add_accept_all_filter(channel_id,
+					tspp_source);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_tspp_add_accept_all_filter(%d, %d) failed\n",
+				__func__, channel_id, tspp_source);
+
+			goto remove_channel_failed_restore_count;
+		}
+
+		ret = mpq_tspp_remove_null_blocking_filters(channel_id,
+					tspp_source);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_tspp_remove_null_blocking_filters(%d, %d) failed\n",
+				__func__, channel_id, tspp_source);
+
+			restore_null_blocking_filters = 1;
+			if (!accept_all_filter_existed)
+				remove_accept_all_filter = 1;
+
+			goto remove_channel_failed_restore_count;
+		}
+
+		ret = mpq_tspp_add_all_user_filters(channel_id,
+					tspp_source);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_tspp_add_all_user_filters(%d, %d) failed\n",
+				__func__, channel_id, tspp_source);
+
+			remove_user_filters = 1;
+			restore_null_blocking_filters = 1;
+			if (!accept_all_filter_existed)
+				remove_accept_all_filter = 1;
+
+			goto remove_channel_failed_restore_count;
+		}
+
+		ret = mpq_tspp_remove_accept_all_filter(channel_id,
+					tspp_source);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_tspp_remove_accept_all_filter(%d, %d) failed\n",
+				__func__, channel_id, tspp_source);
+
+			remove_user_filters = 1;
+			restore_null_blocking_filters = 1;
+			if (!accept_all_filter_existed)
+				remove_accept_all_filter = 1;
+
+			goto remove_channel_failed_restore_count;
+		}
+	} else {
+		/* staying in SW filtering mode */
+		if ((mpq_dmx_tspp_info.tsif[tsif].pass_all_flag == 0) &&
+			(mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag == 0)) {
+
+			ret = mpq_tspp_remove_accept_all_filter(channel_id,
+						tspp_source);
+			if (ret < 0) {
+				MPQ_DVB_ERR_PRINT(
+					"%s: mpq_tspp_remove_accept_all_filter(%d, %d) failed\n",
+					__func__, channel_id,
+					tspp_source);
+
+				goto remove_channel_failed_restore_count;
+			}
+		}
+	}
+
+	mpq_dmx_tspp_info.tsif[tsif].current_filter_count--;
+	(*channel_ref_count)--;
+
+	MPQ_DVB_DBG_PRINT("%s: success, current_filter_count = %d\n",
+		__func__, mpq_dmx_tspp_info.tsif[tsif].current_filter_count);
+
+	if (*channel_ref_count == 0) {
+		/* channel is not used any more, release it */
+		tspp_unregister_notification(0, channel_id);
+		tspp_close_stream(0, channel_id);
+		tspp_close_channel(0, channel_id);
+		atomic_set(data_cnt, 0);
+	}
+
+	goto out;
+
+remove_channel_failed_restore_count:
+	/* restore internal database state */
+	mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = feed->pid;
+	mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count++;
+
+	if (remove_user_filters)
+		mpq_tspp_remove_all_user_filters(channel_id, tspp_source);
+
+	if (restore_null_blocking_filters)
+		mpq_tspp_add_null_blocking_filters(channel_id, tspp_source);
+
+	if (remove_accept_all_filter)
+		mpq_tspp_remove_accept_all_filter(channel_id, tspp_source);
+
+	/* restore flags. we can only get here if we changed the flags. */
+	if (feed->pid == TSPP_PASS_THROUGH_PID)
+		mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 1;
+	else if (feed->pid == TSPP_NULL_PACKETS_PID)
+		mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 1;
+
+out:
+	mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
+	atomic_dec(&mpq_dmx_tspp_info.tsif[tsif].control_op);
+	return ret;
+}
+
+static int mpq_tspp_dmx_start_filtering(struct dvb_demux_feed *feed)
+{
+	int ret;
+	struct mpq_demux *mpq_demux = feed->demux->priv;
+
+	MPQ_DVB_DBG_PRINT(
+		"%s(pid=%d) executed\n",
+		__func__,
+		feed->pid);
+
+	if (mpq_demux == NULL) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: invalid mpq_demux handle\n",
+			__func__);
+
+		return -EINVAL;
+	}
+
+	if (mpq_demux->source < DMX_SOURCE_DVR0) {
+		/* source from TSPP, need to configure tspp pipe */
+		ret = mpq_tspp_dmx_add_channel(feed);
+
+		if (ret < 0) {
+			MPQ_DVB_DBG_PRINT(
+				"%s: mpq_tspp_dmx_add_channel failed(%d)\n",
+				__func__,
+				ret);
+			return ret;
+		}
+	}
+
+	/*
+	 * Always feed sections/PES starting from a new one and
+	 * do not partial transfer data from older one
+	 */
+	feed->pusi_seen = 0;
+
+	return 0;
+}
+
+static int mpq_tspp_dmx_stop_filtering(struct dvb_demux_feed *feed)
+{
+	int ret = 0;
+	struct mpq_demux *mpq_demux = feed->demux->priv;
+
+	MPQ_DVB_DBG_PRINT("%s(%d) executed\n", __func__, feed->pid);
+
+	if (mpq_demux->source < DMX_SOURCE_DVR0) {
+		/* source from TSPP, need to configure tspp pipe */
+		ret = mpq_tspp_dmx_remove_channel(feed);
+	}
+
+	return ret;
+}
+
+/**
+ * Reads TSIF STC from TSPP
+ *
+ * @demux: demux device
+ * @num: STC number. 0 for TSIF0 and 1 for TSIF1.
+ * @stc: STC value
+ * @base: divisor to get 90KHz value
+ *
+ * Return     error code
+ */
+static int mpq_tspp_dmx_get_stc(struct dmx_demux *demux, unsigned int num,
+		u64 *stc, unsigned int *base)
+{
+	enum tspp_source source;
+	u32 tcr_counter;
+	u64 avtimer_stc = 0;
+	int tts_source = 0;
+
+	if (!demux || !stc || !base)
+		return -EINVAL;
+
+	if (num == 0)
+		source = TSPP_SOURCE_TSIF0;
+	else if (num == 1)
+		source = TSPP_SOURCE_TSIF1;
+	else
+		return -EINVAL;
+
+	if (tspp_get_tts_source(0, &tts_source) < 0)
+		tts_source = TSIF_TTS_TCR;
+
+	if (tts_source != TSIF_TTS_LPASS_TIMER) {
+		tspp_get_ref_clk_counter(0, source, &tcr_counter);
+		*stc = ((u64)tcr_counter) * 256; /* conversion to 27MHz */
+		*base = 300; /* divisor to get 90KHz clock from stc value */
+	} else {
+		if (tspp_get_lpass_time_counter(0, source, &avtimer_stc) < 0)
+			return -EINVAL;
+		*stc = avtimer_stc;
+	}
+	return 0;
+}
+
+static int mpq_tspp_dmx_init(
+			struct dvb_adapter *mpq_adapter,
+			struct mpq_demux *mpq_demux)
+{
+	int result;
+
+	/* Set the kernel-demux object capabilities */
+	mpq_demux->demux.dmx.capabilities =
+		DMX_TS_FILTERING			|
+		DMX_SECTION_FILTERING;
+
+	/* Set dvb-demux "virtual" function pointers */
+	mpq_demux->demux.priv = (void *)mpq_demux;
+	mpq_demux->demux.filternum = TSPP_MAX_SECTION_FILTER_NUM;
+	mpq_demux->demux.feednum = 128;
+	mpq_demux->demux.start_feed = mpq_tspp_dmx_start_filtering;
+	mpq_demux->demux.stop_feed = mpq_tspp_dmx_stop_filtering;
+
+	/* Initialize dvb_demux object */
+	result = dvb_dmx_init(&mpq_demux->demux);
+	if (result < 0) {
+		MPQ_DVB_ERR_PRINT("%s: dvb_dmx_init failed\n", __func__);
+		goto init_failed;
+	}
+
+	/* Now initailize the dmx-dev object */
+	mpq_demux->dmxdev.filternum = 128;
+	mpq_demux->dmxdev.demux = &mpq_demux->demux.dmx;
+	mpq_demux->dmxdev.capabilities = 0;
+	mpq_demux->dmxdev.demux->get_stc = mpq_tspp_dmx_get_stc;
+
+	result = dvb_dmxdev_init(&mpq_demux->dmxdev, mpq_adapter);
+	if (result < 0) {
+		MPQ_DVB_ERR_PRINT("%s: dvb_dmxdev_init failed (errno=%d)\n",
+						  __func__,
+						  result);
+		goto init_failed_dmx_release;
+	}
+
+	/* Get the TSIF TTS info */
+	if (tspp_get_tts_source(0, &mpq_demux->ts_packet_timestamp_source) < 0)
+		mpq_demux->ts_packet_timestamp_source = TSIF_TTS_TCR;
+
+	return 0;
+
+init_failed_dmx_release:
+	dvb_dmx_release(&mpq_demux->demux);
+init_failed:
+	return result;
+}
+
+static void mpq_tspp_dmx_release(struct mpq_demux *mpq_demux)
+{
+	dvb_dmxdev_release(&mpq_demux->dmxdev);
+	dvb_dmx_release(&mpq_demux->demux);
+}
+
+static int
+batfish_attach(struct batfish_dvb_adapter *adapter)
+{
+	struct i2c_adapter *i2c_adapter;
+	struct i2c_client *client_demod;
+	struct i2c_client *client_tuner;
+	struct i2c_board_info info;
+	struct si2168_config si2168_config;
+	struct si2157_config si2157_config;
+
+	memset(&si2168_config, 0, sizeof (si2168_config));
+	si2168_config.fe = &adapter->fe;
+	si2168_config.i2c_adapter = &i2c_adapter;
+	si2168_config.ts_mode = SI2168_TS_SERIAL;
+	si2168_config.ts_clock_inv = clock_inv;
+
+	memset(&info, 0, sizeof (info));
+	strlcpy(info.type, "si2168", I2C_NAME_SIZE);
+	info.addr = 0x64;
+	info.platform_data = &si2168_config;
+	request_module(info.type);
+	client_demod = i2c_new_device(adapter->i2c_adapter, &info);
+	if (client_demod == NULL || client_demod->dev.driver == NULL)
+		goto fail_demod_device;
+
+	if (!try_module_get(client_demod->dev.driver->owner))
+		goto fail_demod_module;
+
+	memset(&si2157_config, 0, sizeof (si2157_config));
+	si2157_config.fe = adapter->fe;
+
+	memset(&info, 0, sizeof (info));
+	strlcpy(info.type, "si2141", I2C_NAME_SIZE);
+	info.addr = 0x60;
+	info.platform_data = &si2157_config;
+	request_module("si2157");
+	client_tuner = i2c_new_device(i2c_adapter, &info);
+	if (client_tuner == NULL || client_tuner->dev.driver == NULL)
+		goto fail_tuner_device;
+
+	if (!try_module_get(client_tuner->dev.driver->owner))
+		goto fail_tuner_module;
+
+	adapter->i2c_client_demod = client_demod;
+	adapter->i2c_client_tuner = client_tuner;
+	return 0;
+
+fail_tuner_module:
+	i2c_unregister_device(client_tuner);
+fail_tuner_device:
+	module_put(client_demod->dev.driver->owner);
+fail_demod_module:
+	i2c_unregister_device(client_demod);
+fail_demod_device:
+	return -ENODEV;
+}
+
+static void
+batfish_detach(struct batfish_dvb_adapter *adapter)
+{
+	struct i2c_client *client;
+
+	client = adapter->i2c_client_tuner;
+	if (client) {
+		module_put(client->dev.driver->owner);
+		i2c_unregister_device(client);
+	}
+
+	client = adapter->i2c_client_demod;
+	if (client) {
+		module_put(client->dev.driver->owner);
+		i2c_unregister_device(client);
+	}
+}
+
+static int
+batfish_init(struct batfish_dvb_adapter *adapter)
+{
+	short int ids[] = { -1 };
+	int ret;
+
+	ret = dvb_register_adapter(&adapter->adapter, "MSM TSPP",
+				   THIS_MODULE, adapter->dev, ids);
+	if (ret < 0)
+		return ret;
+
+	ret = mpq_tspp_dmx_init(&adapter->adapter, &adapter->demux);
+	if (ret < 0)
+		goto init_unregister_adapter;
+
+	adapter->frontend.source = DMX_FRONTEND_0;
+
+	ret = adapter->demux.demux.dmx.add_frontend(&adapter->demux.demux.dmx,
+						    &adapter->frontend);
+	if (ret < 0)
+		goto init_release_tspp;
+
+	ret = adapter->demux.demux.dmx.connect_frontend(&adapter->demux.demux.dmx,
+							&adapter->frontend);
+	if (ret < 0)
+		goto init_remove_frontend;
+
+	ret = batfish_attach(adapter);
+	if (ret < 0)
+		goto init_remove_frontend;
+
+	ret = dvb_register_frontend(&adapter->adapter, adapter->fe);
+	if (ret < 0)
+		goto init_detach;
+
+	return 0;
+
+init_detach:
+	batfish_detach(adapter);
+init_remove_frontend:
+	adapter->demux.demux.dmx.remove_frontend(&adapter->demux.demux.dmx,
+						 &adapter->frontend);
+init_release_tspp:
+	mpq_tspp_dmx_release(&adapter->demux);
+init_unregister_adapter:
+	dvb_unregister_adapter(&adapter->adapter);
+	return ret;
+}
+
+static void
+batfish_deinit(struct batfish_dvb_adapter *adapter)
+{
+	dvb_unregister_frontend(adapter->fe);
+
+	adapter->demux.demux.dmx.remove_frontend(&adapter->demux.demux.dmx,
+						 &adapter->frontend);
+
+	mpq_tspp_dmx_release(&adapter->demux);
+
+	dvb_unregister_adapter(&adapter->adapter);
+
+	batfish_detach(adapter);
+}
+
+int mpq_dmx_tspp_plugin_init(struct device *dev, struct i2c_adapter *adap)
+{
+	int i;
+	int j;
+
+	for (i = 0; i < TSIF_COUNT; i++) {
+		mpq_dmx_tspp_info.tsif[i].channel_ref = 0;
+		atomic_set(&mpq_dmx_tspp_info.tsif[i].data_cnt, 0);
+		atomic_set(&mpq_dmx_tspp_info.tsif[i].control_op, 0);
+
+		for (j = 0; j < TSPP_MAX_PID_FILTER_NUM; j++) {
+			mpq_dmx_tspp_info.tsif[i].filters[j].pid = -1;
+			mpq_dmx_tspp_info.tsif[i].filters[j].ref_count = 0;
+			mpq_dmx_tspp_info.tsif[i].filters[j].hw_index = -1;
+		}
+
+		for (j = 0; j < TSPP_MAX_HW_PID_FILTER_NUM; j++)
+			mpq_dmx_tspp_info.tsif[i].hw_indexes[j] = 0;
+
+		mpq_dmx_tspp_info.tsif[i].current_filter_count = 0;
+		mpq_dmx_tspp_info.tsif[i].pass_nulls_flag = 0;
+		mpq_dmx_tspp_info.tsif[i].pass_all_flag = 0;
+		mpq_dmx_tspp_info.tsif[i].accept_all_filter_exists_flag = 0;
+
+		snprintf(mpq_dmx_tspp_info.tsif[i].name,
+				TSIF_NAME_LENGTH,
+				"dmx_tsif%d",
+				i);
+
+		init_waitqueue_head(&mpq_dmx_tspp_info.tsif[i].wait_queue);
+		mpq_dmx_tspp_info.tsif[i].thread =
+			kthread_run(
+				mpq_dmx_tspp_thread, (void *)(uintptr_t)i,
+				mpq_dmx_tspp_info.tsif[i].name);
+
+		if (IS_ERR(mpq_dmx_tspp_info.tsif[i].thread)) {
+			for (j = 0; j < i; j++) {
+				kthread_stop(mpq_dmx_tspp_info.tsif[j].thread);
+				mutex_destroy(&mpq_dmx_tspp_info.tsif[j].mutex);
+			}
+
+			MPQ_DVB_ERR_PRINT(
+				"%s: kthread_run failed\n",
+				__func__);
+
+			return -ENOMEM;
+		}
+
+		mutex_init(&mpq_dmx_tspp_info.tsif[i].mutex);
+	}
+
+	batfish_dvb_adapter.dev = dev;
+	batfish_dvb_adapter.i2c_adapter = adap;
+
+	return batfish_init(&batfish_dvb_adapter);
+}
+
+void mpq_dmx_tspp_plugin_exit(void)
+{
+	int i;
+
+	batfish_deinit(&batfish_dvb_adapter);
+
+	for (i = 0; i < TSIF_COUNT; i++) {
+		mutex_lock(&mpq_dmx_tspp_info.tsif[i].mutex);
+
+		/*
+		 * Note: tspp_close_channel will also free the TSPP buffers
+		 * even if we allocated them ourselves,
+		 * using our free function.
+		 */
+		if (mpq_dmx_tspp_info.tsif[i].channel_ref) {
+			tspp_unregister_notification(0,
+				TSPP_CHANNEL_ID(i, TSPP_CHANNEL));
+			tspp_close_channel(0,
+				TSPP_CHANNEL_ID(i, TSPP_CHANNEL));
+		}
+
+		mutex_unlock(&mpq_dmx_tspp_info.tsif[i].mutex);
+		kthread_stop(mpq_dmx_tspp_info.tsif[i].thread);
+		mutex_destroy(&mpq_dmx_tspp_info.tsif[i].mutex);
+	}
+}
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./tspp/tspp-dvb.h linux-4.4.115-fbx/drivers/media/platform/msm/tspp/tspp-dvb.h
--- linux-4.4.115-fbx/drivers/media/platform/msm./tspp/tspp-dvb.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/tspp/tspp-dvb.h	2019-01-22 16:16:24.459255064 +0100
@@ -0,0 +1,7 @@
+#ifndef TSPP_DVB_H_
+# define TSPP_DVB_H_
+
+int mpq_dmx_tspp_plugin_init(struct device *dev, struct i2c_adapter *adap);
+void mpq_dmx_tspp_plugin_exit(void);
+
+#endif /* TSPP_DVB_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/governors/fixedpoint.h linux-4.4.115-fbx/drivers/media/platform/msm/vidc/governors/fixedpoint.h
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/governors/fixedpoint.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/governors/fixedpoint.h	2019-01-22 16:16:24.463255100 +0100
@@ -0,0 +1,72 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifdef _FIXP_ARITH_H
+#error "This implementation is meant to override fixp-arith.h, don't use both"
+#endif
+
+#ifndef __FP_H__
+#define __FP_H__
+
+/*
+ * Normally would typedef'ed, but checkpatch doesn't like typedef.
+ * Also should be normally typedef'ed to intmax_t but that doesn't seem to be
+ * available in the kernel
+ */
+#define fp_t size_t
+
+/* (Arbitrarily) make the first 25% of the bits to be the fractional bits */
+#define FP_FRACTIONAL_BITS ((sizeof(fp_t) * 8) / 4)
+
+#define FP(__i, __f_n, __f_d) \
+	((((fp_t)(__i)) << FP_FRACTIONAL_BITS) + \
+	(((__f_n) << FP_FRACTIONAL_BITS) / (__f_d)))
+
+#define FP_INT(__i) FP(__i, 0, 1)
+#define FP_ONE FP_INT(1)
+#define FP_ZERO FP_INT(0)
+
+static inline size_t fp_frac_base(void)
+{
+	return GENMASK(FP_FRACTIONAL_BITS - 1, 0);
+}
+
+static inline size_t fp_frac(fp_t a)
+{
+	return a & GENMASK(FP_FRACTIONAL_BITS - 1, 0);
+}
+
+static inline size_t fp_int(fp_t a)
+{
+	return a >> FP_FRACTIONAL_BITS;
+}
+
+static inline size_t fp_round(fp_t a)
+{
+	/* is the fractional part >= frac_max / 2? */
+	bool round_up = fp_frac(a) >= fp_frac_base() / 2;
+
+	return fp_int(a) + round_up;
+}
+
+static inline fp_t fp_mult(fp_t a, fp_t b)
+{
+	return (a * b) >> FP_FRACTIONAL_BITS;
+}
+
+
+static inline fp_t fp_div(fp_t a, fp_t b)
+{
+	return (a << FP_FRACTIONAL_BITS) / b;
+}
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/governors/Kconfig linux-4.4.115-fbx/drivers/media/platform/msm/vidc/governors/Kconfig
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/governors/Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/governors/Kconfig	2019-01-22 16:16:24.463255100 +0100
@@ -0,0 +1,6 @@
+menuconfig MSM_VIDC_GOVERNORS
+	tristate "Clock and bandwidth governors for QTI MSM V4L2 based video driver"
+	depends on MSM_VIDC_V4L2 && PM_DEVFREQ
+	help
+	Chooses a set of devfreq governors aimed at providing accurate bandwidth
+	or clock frequency values for MSM V4L2 video driver.
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/governors/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/vidc/governors/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/governors/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/governors/Makefile	2019-01-22 16:16:24.463255100 +0100
@@ -0,0 +1,9 @@
+ccflags-y := -I$(srctree)/drivers/devfreq/ \
+	-I$(srctree)/drivers/media/platform/msm/vidc/ \
+        -I$(srctree)/drivers/media/platform/msm/vidc/governors/
+
+msm-vidc-dyn-gov-objs := msm_vidc_dyn_gov.o
+
+msm-vidc-table-gov-objs := msm_vidc_table_gov.o
+
+obj-$(CONFIG_MSM_VIDC_GOVERNORS) := msm-vidc-dyn-gov.o msm-vidc-table-gov.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/governors/msm_vidc_dyn_gov.c linux-4.4.115-fbx/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/governors/msm_vidc_dyn_gov.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c	2019-01-22 16:16:24.463255100 +0100
@@ -0,0 +1,1153 @@
+/* Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include "governor.h"
+#include "fixedpoint.h"
+#include "msm_vidc_internal.h"
+#include "msm_vidc_debug.h"
+#include "vidc_hfi_api.h"
+
+static bool debug;
+module_param(debug, bool, 0644);
+
+enum governor_mode {
+	GOVERNOR_DDR,
+	GOVERNOR_VMEM,
+	GOVERNOR_VMEM_PLUS,
+};
+
+struct governor {
+	enum governor_mode mode;
+	struct devfreq_governor devfreq_gov;
+};
+
+enum scenario {
+	SCENARIO_WORST,
+	SCENARIO_SUSTAINED_WORST,
+	SCENARIO_AVERAGE,
+	SCENARIO_MAX,
+};
+
+/*
+ * Minimum dimensions that the governor is willing to calculate
+ * bandwidth for.  This means that anything bandwidth(0, 0) ==
+ * bandwidth(BASELINE_DIMENSIONS.width, BASELINE_DIMENSIONS.height)
+ */
+const struct {
+	int height, width;
+} BASELINE_DIMENSIONS = {
+	.width = 1280,
+	.height = 720,
+};
+
+/*
+ * These are hardcoded AB values that the governor votes for in certain
+ * situations, where a certain bus frequency is desired.  It isn't exactly
+ * scalable since different platforms have different bus widths, but we'll
+ * deal with that in the future.
+ */
+const unsigned long NOMINAL_BW_MBPS = 6000 /* ideally 320 Mhz */,
+	SVS_BW_MBPS = 2000 /* ideally 100 Mhz */;
+
+/* converts Mbps to bps (the "b" part can be bits or bytes based on context) */
+#define kbps(__mbps) ((__mbps) * 1000)
+#define bps(__mbps) (kbps(__mbps) * 1000)
+
+#define GENERATE_SCENARIO_PROFILE(__average, __worst) {                        \
+	[SCENARIO_AVERAGE] = (__average),                                      \
+	[SCENARIO_WORST] =  (__worst),                                         \
+	[SCENARIO_SUSTAINED_WORST] = (__worst),                                \
+}
+
+#define GENERATE_COMPRESSION_PROFILE(__bpp, __average, __worst) {              \
+	.bpp = __bpp,                                                          \
+	.ratio = GENERATE_SCENARIO_PROFILE(__average, __worst),                \
+}
+
+/*
+ * The below table is a structural representation of the following table:
+ *  Resolution |    Bitrate |              Compression Ratio          |
+ * ............|............|.........................................|
+ * Width Height|Average High|Avg_8bpc Worst_8bpc Avg_10bpc Worst_10bpc|
+ *  1280    720|      7   14|    1.69       1.28      1.49        1.23|
+ *  1920   1080|     20   40|    1.69       1.28      1.49        1.23|
+ *  2560   1440|     32   64|     2.2       1.26      1.97        1.22|
+ *  3840   2160|     42   84|     2.2       1.26      1.97        1.22|
+ *  4096   2160|     44   88|     2.2       1.26      1.97        1.22|
+ *  4096   2304|     48   96|     2.2       1.26      1.97        1.22|
+ */
+#define COMPRESSION_RATIO_MAX 2
+static struct lut {
+	int frame_size; /* width x height */
+	unsigned long bitrate[SCENARIO_MAX];
+	struct {
+		int bpp;
+		fp_t ratio[SCENARIO_MAX];
+	} compression_ratio[COMPRESSION_RATIO_MAX];
+} const LUT[] = {
+	{
+		.frame_size = 1280 * 720,
+		.bitrate = GENERATE_SCENARIO_PROFILE(7, 14),
+		.compression_ratio = {
+			GENERATE_COMPRESSION_PROFILE(8,
+					FP(1, 69, 100),
+					FP(1, 28, 100)),
+			GENERATE_COMPRESSION_PROFILE(10,
+					FP(1, 49, 100),
+					FP(1, 23, 100)),
+		}
+	},
+	{
+		.frame_size = 1920 * 1088,
+		.bitrate = GENERATE_SCENARIO_PROFILE(20, 40),
+		.compression_ratio = {
+			GENERATE_COMPRESSION_PROFILE(8,
+					FP(1, 69, 100),
+					FP(1, 28, 100)),
+			GENERATE_COMPRESSION_PROFILE(10,
+					FP(1, 49, 100),
+					FP(1, 23, 100)),
+		}
+	},
+	{
+		.frame_size = 2560 * 1440,
+		.bitrate = GENERATE_SCENARIO_PROFILE(32, 64),
+		.compression_ratio = {
+			GENERATE_COMPRESSION_PROFILE(8,
+					FP(2, 20, 100),
+					FP(1, 26, 100)),
+			GENERATE_COMPRESSION_PROFILE(10,
+					FP(1, 97, 100),
+					FP(1, 22, 100)),
+		}
+	},
+	{
+		.frame_size = 3840 * 2160,
+		.bitrate = GENERATE_SCENARIO_PROFILE(42, 84),
+		.compression_ratio = {
+			GENERATE_COMPRESSION_PROFILE(8,
+					FP(2, 20, 100),
+					FP(1, 26, 100)),
+			GENERATE_COMPRESSION_PROFILE(10,
+					FP(1, 97, 100),
+					FP(1, 22, 100)),
+		}
+	},
+	{
+		.frame_size = 4096 * 2160,
+		.bitrate = GENERATE_SCENARIO_PROFILE(44, 88),
+		.compression_ratio = {
+			GENERATE_COMPRESSION_PROFILE(8,
+					FP(2, 20, 100),
+					FP(1, 26, 100)),
+			GENERATE_COMPRESSION_PROFILE(10,
+					FP(1, 97, 100),
+					FP(1, 22, 100)),
+		}
+	},
+	{
+		.frame_size = 4096 * 2304,
+		.bitrate = GENERATE_SCENARIO_PROFILE(48, 96),
+		.compression_ratio = {
+			GENERATE_COMPRESSION_PROFILE(8,
+					FP(2, 20, 100),
+					FP(1, 26, 100)),
+			GENERATE_COMPRESSION_PROFILE(10,
+					FP(1, 97, 100),
+					FP(1, 22, 100)),
+		}
+	},
+};
+
+static struct lut const *__lut(int width, int height)
+{
+	int frame_size = height * width, c = 0;
+
+	do {
+		if (LUT[c].frame_size >= frame_size)
+			return &LUT[c];
+	} while (++c < ARRAY_SIZE(LUT));
+
+	return &LUT[ARRAY_SIZE(LUT) - 1];
+}
+
+static fp_t __compression_ratio(struct lut const *entry, int bpp,
+		enum scenario s)
+{
+	int c = 0;
+
+	for (c = 0; c < COMPRESSION_RATIO_MAX; ++c) {
+		if (entry->compression_ratio[c].bpp == bpp)
+			return entry->compression_ratio[c].ratio[s];
+	}
+
+	WARN(true, "Shouldn't be here, LUT possibly corrupted?\n");
+	return FP_ZERO; /* impossible */
+}
+
+#define DUMP_HEADER_MAGIC 0xdeadbeef
+#define DUMP_FP_FMT "%FP" /* special format for fp_t */
+struct dump {
+	char *key;
+	char *format;
+	size_t val;
+};
+
+static void __dump(struct dump dump[], int len)
+{
+	int c = 0;
+
+	for (c = 0; c < len; ++c) {
+		char format_line[128] = "", formatted_line[128] = "";
+
+		if (dump[c].val == DUMP_HEADER_MAGIC) {
+			snprintf(formatted_line, sizeof(formatted_line), "%s\n",
+					dump[c].key);
+		} else {
+			bool fp_format = !strcmp(dump[c].format, DUMP_FP_FMT);
+
+			if (!fp_format) {
+				snprintf(format_line, sizeof(format_line),
+						"    %-35s: %s\n", dump[c].key,
+						dump[c].format);
+				snprintf(formatted_line, sizeof(formatted_line),
+						format_line, dump[c].val);
+			} else {
+				size_t integer_part, fractional_part;
+
+				integer_part = fp_int(dump[c].val);
+				fractional_part = fp_frac(dump[c].val);
+				snprintf(formatted_line, sizeof(formatted_line),
+						"    %-35s: %zd + %zd/%zd\n",
+						dump[c].key, integer_part,
+						fractional_part,
+						fp_frac_base());
+
+
+			}
+		}
+
+		dprintk(VIDC_DBG, "%s", formatted_line);
+	}
+}
+
+static unsigned long __calculate_vpe(struct vidc_bus_vote_data *d,
+		enum governor_mode gm)
+{
+	return 0;
+}
+
+static bool __ubwc(enum hal_uncompressed_format f)
+{
+	switch (f) {
+	case HAL_COLOR_FORMAT_NV12_UBWC:
+	case HAL_COLOR_FORMAT_NV12_TP10_UBWC:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static int __bpp(enum hal_uncompressed_format f)
+{
+	switch (f) {
+	case HAL_COLOR_FORMAT_NV12:
+	case HAL_COLOR_FORMAT_NV21:
+	case HAL_COLOR_FORMAT_NV12_UBWC:
+		return 8;
+	case HAL_COLOR_FORMAT_NV12_TP10_UBWC:
+		return 10;
+	default:
+		dprintk(VIDC_ERR,
+				"What's this?  We don't support this colorformat (%x)",
+				f);
+		return INT_MAX;
+	}
+}
+
+static unsigned long __calculate_vmem_plus_ab(struct vidc_bus_vote_data *d)
+{
+	unsigned long i = 0, vmem_plus = 0;
+
+	if (!d->imem_ab_tbl || !d->imem_ab_tbl_size) {
+		vmem_plus = 1; /* Vote for the min ab value */
+		goto exit;
+	}
+
+	/* Pick up vmem frequency based on venus core frequency */
+	for (i = 0; i < d->imem_ab_tbl_size; i++) {
+		if (d->imem_ab_tbl[i].core_freq == d->core_freq) {
+			vmem_plus = d->imem_ab_tbl[i].imem_ab;
+			break;
+		}
+	}
+
+	/* Incase we get an unsupported freq throw a warning
+	 * and set ab to the minimum value. */
+	if (!vmem_plus) {
+		vmem_plus = 1;
+		dprintk(VIDC_WARN,
+			"could not calculate vmem ab value due to core freq mismatch\n");
+		WARN_ON(VIDC_DBG_WARN_ENABLE);
+	}
+
+exit:
+	return vmem_plus;
+}
+
+static unsigned long __calculate_decoder(struct vidc_bus_vote_data *d,
+		enum governor_mode gm) {
+	/*
+	 * XXX: Don't fool around with any of the hardcoded numbers unless you
+	 * know /exactly/ what you're doing.  Many of these numbers are
+	 * measured heuristics and hardcoded numbers taken from the firmware.
+	 */
+	/* Decoder parameters */
+	enum scenario scenario;
+	int width, height, lcu_size, dpb_bpp, opb_bpp, fps;
+	bool unified_dpb_opb, dpb_compression_enabled, opb_compression_enabled;
+	fp_t dpb_opb_scaling_ratio, dpb_compression_factor,
+		opb_compression_factor, qsmmu_bw_overhead_factor;
+	int vmem_size; /* in kB */
+
+	/* Derived parameters */
+	int lcu_per_frame, tnbr_per_lcu_10bpc, tnbr_per_lcu_8bpc, tnbr_per_lcu,
+		colocated_bytes_per_lcu, vmem_line_buffer, vmem_chroma_cache,
+		vmem_luma_cache, vmem_chroma_luma_cache;
+	unsigned long bitrate;
+	fp_t bins_to_bit_factor, dpb_write_factor, ten_bpc_packing_factor,
+		ten_bpc_bpp_factor, vsp_read_factor, vsp_write_factor,
+		ocmem_usage_lcu_factor, ref_ocmem_bw_factor_read,
+		ref_ocmem_bw_factor_write, bw_for_1x_8bpc, dpb_bw_for_1x,
+		motion_vector_complexity, row_cache_penalty, opb_bw;
+
+	/* Output parameters */
+	struct {
+		fp_t vsp_read, vsp_write, collocated_read, collocated_write,
+			line_buffer_read, line_buffer_write, recon_read,
+			recon_write, opb_read, opb_write, dpb_read, dpb_write,
+			total;
+	} ddr, vmem;
+
+	unsigned long ret = 0;
+
+	/* Decoder parameters setup */
+	scenario = SCENARIO_WORST;
+
+	width = max(d->width, BASELINE_DIMENSIONS.width);
+	height = max(d->height, BASELINE_DIMENSIONS.height);
+
+	lcu_size = 32;
+
+	dpb_bpp = d->num_formats >= 1 ? __bpp(d->color_formats[0]) : INT_MAX;
+	opb_bpp = d->num_formats >= 2 ?  __bpp(d->color_formats[1]) : dpb_bpp;
+
+	fps = d->fps;
+
+	unified_dpb_opb = d->num_formats == 1;
+
+	dpb_opb_scaling_ratio = FP_ONE;
+
+	dpb_compression_enabled = d->num_formats >= 1 &&
+		__ubwc(d->color_formats[0]);
+	opb_compression_enabled = d->num_formats >= 2 &&
+		__ubwc(d->color_formats[1]);
+
+	dpb_compression_factor = !dpb_compression_enabled ? FP_ONE :
+		__compression_ratio(__lut(width, height), dpb_bpp, scenario);
+
+	opb_compression_factor = !opb_compression_enabled ? FP_ONE :
+		__compression_ratio(__lut(width, height), opb_bpp, scenario);
+
+	vmem_size = 512; /* in kB */
+
+	/* Derived parameters setup */
+	lcu_per_frame = DIV_ROUND_UP(width, lcu_size) *
+		DIV_ROUND_UP(height, lcu_size);
+
+	bitrate = __lut(width, height)->bitrate[scenario];
+
+	bins_to_bit_factor = FP(1, 60, 100);
+
+	dpb_write_factor = scenario == SCENARIO_AVERAGE ?
+		FP_ONE : FP(1, 5, 100);
+
+	ten_bpc_packing_factor = FP(1, 67, 1000);
+	ten_bpc_bpp_factor = FP(1, 1, 4);
+
+	vsp_read_factor = bins_to_bit_factor + FP_INT(2);
+	vsp_write_factor = bins_to_bit_factor;
+
+	tnbr_per_lcu_10bpc = lcu_size == 16 ? 384 + 192 :
+				lcu_size == 32 ? 640 + 256 :
+						1280 + 384;
+	tnbr_per_lcu_8bpc = lcu_size == 16 ? 256 + 192 :
+				lcu_size == 32 ? 512 + 256 :
+						1024 + 384;
+	tnbr_per_lcu = dpb_bpp == 10 ? tnbr_per_lcu_10bpc : tnbr_per_lcu_8bpc;
+
+	colocated_bytes_per_lcu = lcu_size == 16 ? 16 :
+				lcu_size == 32 ? 64 : 256;
+
+	ocmem_usage_lcu_factor = lcu_size == 16 ? FP(1, 8, 10) :
+				lcu_size == 32 ? FP(1, 2, 10) :
+						FP_ONE;
+	ref_ocmem_bw_factor_read = vmem_size < 296 ? FP_ZERO :
+				vmem_size < 648 ? FP(0, 1, 4) :
+						FP(0, 55, 100);
+	ref_ocmem_bw_factor_write = vmem_size < 296 ? FP_ZERO :
+				vmem_size < 648 ? FP(0, 7, 10) :
+						FP(1, 4, 10);
+
+	/* Prelim b/w calculation */
+	bw_for_1x_8bpc = fp_mult(FP_INT(width * height * fps),
+			fp_mult(FP(1, 50, 100), dpb_write_factor));
+	bw_for_1x_8bpc = fp_div(bw_for_1x_8bpc, FP_INT(bps(1)));
+
+	dpb_bw_for_1x = dpb_bpp == 8 ? bw_for_1x_8bpc :
+		fp_mult(bw_for_1x_8bpc, fp_mult(ten_bpc_packing_factor,
+					ten_bpc_bpp_factor));
+	/* VMEM adjustments */
+	vmem_line_buffer = tnbr_per_lcu * DIV_ROUND_UP(width, lcu_size) / 1024;
+	vmem_chroma_cache = dpb_bpp == 10 ? 176 : 128;
+	vmem_luma_cache = dpb_bpp == 10 ? 353 : 256;
+	vmem_chroma_luma_cache = vmem_chroma_cache + vmem_luma_cache;
+
+	motion_vector_complexity = scenario == SCENARIO_AVERAGE ?
+		FP(2, 66, 100) : FP_INT(4);
+
+	row_cache_penalty = FP_ZERO;
+	if (vmem_size < vmem_line_buffer + vmem_chroma_cache)
+		row_cache_penalty = fp_mult(FP(0, 5, 100),
+				motion_vector_complexity);
+	else if (vmem_size < vmem_line_buffer + vmem_luma_cache)
+		row_cache_penalty = fp_mult(FP(0, 7, 100),
+				motion_vector_complexity);
+	else if (vmem_size < vmem_line_buffer + vmem_chroma_cache
+			+ vmem_luma_cache)
+		row_cache_penalty = fp_mult(FP(0, 3, 100),
+				motion_vector_complexity);
+	else
+		row_cache_penalty = FP_ZERO;
+
+
+	opb_bw = unified_dpb_opb ? FP_ZERO :
+		fp_div(fp_div(bw_for_1x_8bpc, dpb_opb_scaling_ratio),
+				opb_compression_factor);
+
+	/* B/W breakdown on a per buffer type basis for VMEM */
+	vmem.vsp_read = FP_ZERO;
+	vmem.vsp_write = FP_ZERO;
+
+	vmem.collocated_read = FP_ZERO;
+	vmem.collocated_write = FP_ZERO;
+
+	vmem.line_buffer_read = FP_INT(tnbr_per_lcu *
+			lcu_per_frame * fps / bps(1));
+	vmem.line_buffer_write = vmem.line_buffer_read;
+
+	vmem.recon_read = FP_ZERO;
+	vmem.recon_write = FP_ZERO;
+
+	vmem.opb_read = FP_ZERO;
+	vmem.opb_write = FP_ZERO;
+
+	vmem.dpb_read = fp_mult(ocmem_usage_lcu_factor, fp_mult(
+					ref_ocmem_bw_factor_read,
+					dpb_bw_for_1x));
+	vmem.dpb_write = fp_mult(ocmem_usage_lcu_factor, fp_mult(
+					ref_ocmem_bw_factor_write,
+					dpb_bw_for_1x));
+
+	vmem.total = vmem.vsp_read + vmem.vsp_write +
+		vmem.collocated_read + vmem.collocated_write +
+		vmem.line_buffer_read + vmem.line_buffer_write +
+		vmem.recon_read + vmem.recon_write +
+		vmem.opb_read + vmem.opb_write +
+		vmem.dpb_read + vmem.dpb_write;
+
+	/*
+	 * Attempt to force VMEM to a certain frequency for 4K
+	 */
+	if (width * height * fps >= 3840 * 2160 * 60)
+		vmem.total = FP_INT(NOMINAL_BW_MBPS);
+	else if (width * height * fps >= 3840 * 2160 * 30)
+		vmem.total = FP_INT(SVS_BW_MBPS);
+
+	/* ........................................ for DDR */
+	ddr.vsp_read = fp_div(fp_mult(FP_INT(bitrate),
+				vsp_read_factor), FP_INT(8));
+	ddr.vsp_write = fp_div(fp_mult(FP_INT(bitrate),
+				vsp_write_factor), FP_INT(8));
+
+	ddr.collocated_read = FP_INT(lcu_per_frame *
+			colocated_bytes_per_lcu * fps / bps(1));
+	ddr.collocated_write = FP_INT(lcu_per_frame *
+			colocated_bytes_per_lcu * fps / bps(1));
+
+	ddr.line_buffer_read = vmem_size ? FP_ZERO : vmem.line_buffer_read;
+	ddr.line_buffer_write = vmem_size ? FP_ZERO : vmem.line_buffer_write;
+
+	ddr.recon_read = FP_ZERO;
+	ddr.recon_write = fp_div(dpb_bw_for_1x, dpb_compression_factor);
+
+	ddr.opb_read = FP_ZERO;
+	ddr.opb_write = opb_bw;
+
+	ddr.dpb_read = fp_div(fp_mult(dpb_bw_for_1x,
+				motion_vector_complexity + row_cache_penalty),
+			dpb_compression_factor);
+	ddr.dpb_write = FP_ZERO;
+
+	ddr.total = ddr.vsp_read + ddr.vsp_write +
+		ddr.collocated_read + ddr.collocated_write +
+		ddr.line_buffer_read + ddr.line_buffer_write +
+		ddr.recon_read + ddr.recon_write +
+		ddr.opb_read + ddr.opb_write +
+		ddr.dpb_read + ddr.dpb_write;
+
+	qsmmu_bw_overhead_factor = FP(1, 3, 100);
+	ddr.total = fp_mult(ddr.total, qsmmu_bw_overhead_factor);
+
+	/* Dump all the variables for easier debugging */
+	if (debug) {
+		struct dump dump[] = {
+		{"DECODER PARAMETERS", "", DUMP_HEADER_MAGIC},
+		{"content", "%d", scenario},
+		{"LCU size", "%d", lcu_size},
+		{"DPB bitdepth", "%d", dpb_bpp},
+		{"frame rate", "%d", fps},
+		{"DPB/OPB unified", "%d", unified_dpb_opb},
+		{"DPB/OPB downscaling ratio", DUMP_FP_FMT,
+			dpb_opb_scaling_ratio},
+		{"DPB compression", "%d", dpb_compression_enabled},
+		{"OPB compression", "%d", opb_compression_enabled},
+		{"DPB compression factor", DUMP_FP_FMT,
+			dpb_compression_factor},
+		{"OPB compression factor", DUMP_FP_FMT,
+			opb_compression_factor},
+		{"VMEM size", "%dkB", vmem_size},
+		{"frame width", "%d", width},
+		{"frame height", "%d", height},
+
+		{"DERIVED PARAMETERS (1)", "", DUMP_HEADER_MAGIC},
+		{"LCUs/frame", "%d", lcu_per_frame},
+		{"bitrate (Mbit/sec)", "%d", bitrate},
+		{"bins to bit factor", DUMP_FP_FMT, bins_to_bit_factor},
+		{"DPB write factor", DUMP_FP_FMT, dpb_write_factor},
+		{"10bpc packing factor", DUMP_FP_FMT,
+			ten_bpc_packing_factor},
+		{"10bpc,BPP factor", DUMP_FP_FMT, ten_bpc_bpp_factor},
+		{"VSP read factor", DUMP_FP_FMT, vsp_read_factor},
+		{"VSP write factor", DUMP_FP_FMT, vsp_write_factor},
+		{"TNBR/LCU_10bpc", "%d", tnbr_per_lcu_10bpc},
+		{"TNBR/LCU_8bpc", "%d", tnbr_per_lcu_8bpc},
+		{"TNBR/LCU", "%d", tnbr_per_lcu},
+		{"colocated bytes/LCU", "%d", colocated_bytes_per_lcu},
+		{"OCMEM usage LCU factor", DUMP_FP_FMT,
+			ocmem_usage_lcu_factor},
+		{"ref OCMEM b/w factor (read)", DUMP_FP_FMT,
+			ref_ocmem_bw_factor_read},
+		{"ref OCMEM b/w factor (write)", DUMP_FP_FMT,
+			ref_ocmem_bw_factor_write},
+		{"B/W for 1x (NV12 8bpc)", DUMP_FP_FMT, bw_for_1x_8bpc},
+		{"DPB B/W For 1x (NV12)", DUMP_FP_FMT, dpb_bw_for_1x},
+
+		{"VMEM", "", DUMP_HEADER_MAGIC},
+		{"line buffer", "%d", vmem_line_buffer},
+		{"chroma cache", "%d", vmem_chroma_cache},
+		{"luma cache", "%d", vmem_luma_cache},
+		{"luma & chroma cache", "%d", vmem_chroma_luma_cache},
+
+		{"DERIVED PARAMETERS (2)", "", DUMP_HEADER_MAGIC},
+		{"MV complexity", DUMP_FP_FMT, motion_vector_complexity},
+		{"row cache penalty", DUMP_FP_FMT, row_cache_penalty},
+		{"OPB B/W (single instance)", DUMP_FP_FMT, opb_bw},
+
+		{"INTERMEDIATE DDR B/W", "", DUMP_HEADER_MAGIC},
+		{"VSP read", DUMP_FP_FMT, ddr.vsp_read},
+		{"VSP write", DUMP_FP_FMT, ddr.vsp_write},
+		{"collocated read", DUMP_FP_FMT, ddr.collocated_read},
+		{"collocated write", DUMP_FP_FMT, ddr.collocated_write},
+		{"line buffer read", DUMP_FP_FMT, ddr.line_buffer_read},
+		{"line buffer write", DUMP_FP_FMT, ddr.line_buffer_write},
+		{"recon read", DUMP_FP_FMT, ddr.recon_read},
+		{"recon write", DUMP_FP_FMT, ddr.recon_write},
+		{"OPB read", DUMP_FP_FMT, ddr.opb_read},
+		{"OPB write", DUMP_FP_FMT, ddr.opb_write},
+		{"DPB read", DUMP_FP_FMT, ddr.dpb_read},
+		{"DPB write", DUMP_FP_FMT, ddr.dpb_write},
+
+		{"INTERMEDIATE VMEM B/W", "", DUMP_HEADER_MAGIC},
+		{"VSP read", "%d", vmem.vsp_read},
+		{"VSP write", DUMP_FP_FMT, vmem.vsp_write},
+		{"collocated read", DUMP_FP_FMT, vmem.collocated_read},
+		{"collocated write", DUMP_FP_FMT, vmem.collocated_write},
+		{"line buffer read", DUMP_FP_FMT, vmem.line_buffer_read},
+		{"line buffer write", DUMP_FP_FMT, vmem.line_buffer_write},
+		{"recon read", DUMP_FP_FMT, vmem.recon_read},
+		{"recon write", DUMP_FP_FMT, vmem.recon_write},
+		{"OPB read", DUMP_FP_FMT, vmem.opb_read},
+		{"OPB write", DUMP_FP_FMT, vmem.opb_write},
+		{"DPB read", DUMP_FP_FMT, vmem.dpb_read},
+		{"DPB write", DUMP_FP_FMT, vmem.dpb_write},
+		};
+		__dump(dump, ARRAY_SIZE(dump));
+	}
+
+	switch (gm) {
+	case GOVERNOR_DDR:
+		ret = kbps(fp_round(ddr.total));
+		break;
+	case GOVERNOR_VMEM:
+		ret = kbps(fp_round(vmem.total));
+		break;
+	case GOVERNOR_VMEM_PLUS:
+		ret = __calculate_vmem_plus_ab(d);
+		break;
+	default:
+		dprintk(VIDC_ERR, "%s - Unknown governor\n", __func__);
+	}
+
+	return ret;
+}
+
+static unsigned long __calculate_encoder(struct vidc_bus_vote_data *d,
+		enum governor_mode gm)
+{
+	/*
+	 * XXX: Don't fool around with any of the hardcoded numbers unless you
+	 * know /exactly/ what you're doing.  Many of these numbers are
+	 * measured heuristics and hardcoded numbers taken from the firmware.
+	 */
+	/* Encoder Parameters */
+	enum scenario scenario, bitrate_scenario;
+	enum hal_video_codec standard;
+	int width, height, fps, vmem_size;
+	enum hal_uncompressed_format dpb_color_format;
+	enum hal_uncompressed_format original_color_format;
+	bool dpb_compression_enabled, original_compression_enabled,
+		two_stage_encoding, low_power, rotation, cropping_or_scaling;
+	fp_t dpb_compression_factor, original_compression_factor,
+		qsmmu_bw_overhead_factor;
+	bool b_frames_enabled;
+
+	/* Derived Parameters */
+	int lcu_size;
+	enum gop {
+		GOP_IBBP,
+		GOP_IPPP,
+	} gop;
+	unsigned long bitrate;
+	fp_t bins_to_bit_factor, chroma_luma_factor_dpb, one_frame_bw_dpb,
+		 chroma_luma_factor_original, one_frame_bw_original,
+		 line_buffer_size_per_lcu, line_buffer_size, line_buffer_bw,
+		 original_vmem_requirement, bw_increase_p, bw_increase_b;
+	int collocated_mv_per_lcu, max_transaction_size,
+		search_window_size_vertical_p, search_window_factor_p,
+		search_window_factor_bw_p, vmem_size_p, available_vmem_p,
+		search_window_size_vertical_b, search_window_factor_b,
+		search_window_factor_bw_b, vmem_size_b, available_vmem_b;
+
+	/* Output paramaters */
+	struct {
+		fp_t vsp_read, vsp_write, collocated_read, collocated_write,
+			line_buffer_read, line_buffer_write, original_read,
+			original_write, dpb_read, dpb_write, total;
+	} ddr, vmem;
+
+	unsigned long ret = 0;
+
+	/* Encoder Parameters setup */
+	scenario = SCENARIO_WORST;
+
+	standard = d->codec;
+	width = max(d->width, BASELINE_DIMENSIONS.width);
+	height = max(d->height, BASELINE_DIMENSIONS.height);
+
+	dpb_color_format = HAL_COLOR_FORMAT_NV12_UBWC;
+	original_color_format = d->num_formats >= 1 ?
+		d->color_formats[0] : HAL_UNUSED_COLOR;
+
+	fps = d->fps;
+	bitrate_scenario = SCENARIO_WORST;
+
+	dpb_compression_enabled = __ubwc(dpb_color_format);
+	original_compression_enabled = __ubwc(original_color_format);
+
+	two_stage_encoding = false;
+	low_power = d->power_mode == VIDC_POWER_LOW;
+	b_frames_enabled = false;
+
+	dpb_compression_factor = !dpb_compression_enabled ? FP_ONE :
+		__compression_ratio(__lut(width, height),
+				__bpp(dpb_color_format), scenario);
+	original_compression_factor = !original_compression_enabled ? FP_ONE :
+		__compression_ratio(__lut(width, height),
+				__bpp(original_color_format), scenario);
+
+	rotation = false;
+	cropping_or_scaling = false;
+	vmem_size = 512; /* in kB */
+
+	/* Derived Parameters */
+	lcu_size = 16;
+	gop = b_frames_enabled ? GOP_IBBP : GOP_IPPP;
+	bitrate = __lut(width, height)->bitrate[bitrate_scenario];
+	bins_to_bit_factor = FP(1, 6, 10);
+
+	/*
+	 * FIXME: Minor color format related hack: a lot of the derived params
+	 * depend on the YUV bitdepth as a variable.  However, we don't have
+	 * appropriate enums defined yet (hence no support).  As a result omit
+	 * a lot of the checks (which should look like the snippet below) in
+	 * favour of hardcoding.
+	 *      dpb_color_format == YUV420 ? 0.5 :
+	 *      dpb_color_format == YUV422 ? 1.0 : 2.0
+	 * Similar hacks are annotated inline in code with the string "CF hack"
+	 * for documentation purposes.
+	 */
+	chroma_luma_factor_dpb = FP(0, 1, 2);
+	one_frame_bw_dpb = fp_mult(FP_ONE + chroma_luma_factor_dpb,
+			fp_div(FP_INT(width * height * fps),
+				FP_INT(1000 * 1000)));
+
+	chroma_luma_factor_original = FP(0, 1, 2); /* XXX: CF hack */
+	one_frame_bw_original = fp_mult(FP_ONE + chroma_luma_factor_original,
+			fp_div(FP_INT(width * height * fps),
+				FP_INT(1000 * 1000)));
+
+	line_buffer_size_per_lcu = FP_ZERO;
+	if (lcu_size == 16)
+		line_buffer_size_per_lcu = FP_INT(128) + fp_mult(FP_INT(256),
+					FP_ONE /*XXX: CF hack */);
+	else
+		line_buffer_size_per_lcu = FP_INT(192) + fp_mult(FP_INT(512),
+					FP_ONE /*XXX: CF hack */);
+
+	line_buffer_size = fp_div(
+			fp_mult(FP_INT(width / lcu_size),
+				line_buffer_size_per_lcu),
+			FP_INT(1024));
+	line_buffer_bw = fp_mult(line_buffer_size,
+			fp_div(FP_INT((height / lcu_size /
+				(two_stage_encoding ? 2 : 1) - 1) * fps),
+				FP_INT(1000)));
+
+	collocated_mv_per_lcu = lcu_size == 16 ? 16 : 64;
+	max_transaction_size = 256;
+
+	original_vmem_requirement = FP_INT(3 *
+			(two_stage_encoding ? 2 : 1) * lcu_size);
+	original_vmem_requirement = fp_mult(original_vmem_requirement,
+			(FP_ONE + chroma_luma_factor_original));
+	original_vmem_requirement += FP_INT((cropping_or_scaling ? 3 : 0) * 2);
+	original_vmem_requirement = fp_mult(original_vmem_requirement,
+			FP_INT(max_transaction_size));
+	original_vmem_requirement = fp_div(original_vmem_requirement,
+			FP_INT(1024));
+
+	search_window_size_vertical_p = low_power ? 32 :
+					b_frames_enabled ? 80 :
+					width > 2048 ? 64 : 48;
+	search_window_factor_p = search_window_size_vertical_p * 2 / lcu_size;
+	search_window_factor_bw_p = !two_stage_encoding ?
+		search_window_size_vertical_p * 2 / lcu_size + 1 :
+		(search_window_size_vertical_p * 2 / lcu_size + 2) / 2;
+	vmem_size_p = (search_window_factor_p * width + 128 * 2) *
+		lcu_size / 2 / 1024; /* XXX: CF hack */
+	bw_increase_p = fp_mult(one_frame_bw_dpb,
+			FP_INT(search_window_factor_bw_p - 1) / 3);
+	available_vmem_p = min_t(int, 3, (vmem_size - fp_int(line_buffer_size) -
+			fp_int(original_vmem_requirement)) / vmem_size_p);
+
+	search_window_size_vertical_b = 48;
+	search_window_factor_b = search_window_size_vertical_b * 2 / lcu_size;
+	search_window_factor_bw_b = !two_stage_encoding ?
+		search_window_size_vertical_b * 2 / lcu_size + 1 :
+		(search_window_size_vertical_b * 2 / lcu_size + 2) / 2;
+	vmem_size_b = (search_window_factor_b * width + 128 * 2) * lcu_size /
+		2 / 1024;
+	bw_increase_b = fp_mult(one_frame_bw_dpb,
+			FP_INT((search_window_factor_bw_b - 1) / 3));
+	available_vmem_b = min_t(int, 6, (vmem_size - fp_int(line_buffer_size) -
+			fp_int(original_vmem_requirement)) / vmem_size_b);
+
+	/* Output parameters for DDR */
+	ddr.vsp_read = fp_mult(fp_div(FP_INT(bitrate), FP_INT(8)),
+			bins_to_bit_factor);
+	ddr.vsp_write = ddr.vsp_read + fp_div(FP_INT(bitrate), FP_INT(8));
+
+	ddr.collocated_read = fp_div(FP_INT(DIV_ROUND_UP(width, lcu_size) *
+			DIV_ROUND_UP(height, lcu_size) *
+			collocated_mv_per_lcu * fps), FP_INT(1000 * 1000));
+	ddr.collocated_write = ddr.collocated_read;
+
+	ddr.line_buffer_read = (FP_INT(vmem_size) >= line_buffer_size +
+		original_vmem_requirement) ? FP_ZERO : line_buffer_bw;
+	ddr.line_buffer_write = ddr.line_buffer_read;
+
+	ddr.original_read = fp_div(one_frame_bw_original,
+			original_compression_factor);
+	ddr.original_write = FP_ZERO;
+
+	ddr.dpb_read = FP_ZERO;
+	if (gop == GOP_IPPP) {
+		ddr.dpb_read = one_frame_bw_dpb + fp_mult(bw_increase_p,
+			FP_INT(3 - available_vmem_p));
+	} else if (scenario == SCENARIO_WORST ||
+			scenario == SCENARIO_SUSTAINED_WORST) {
+		ddr.dpb_read = fp_mult(one_frame_bw_dpb, FP_INT(2));
+		ddr.dpb_read += fp_mult(FP_INT(6 - available_vmem_b),
+				bw_increase_b);
+	} else {
+		fp_t part_p, part_b;
+
+		part_p = one_frame_bw_dpb + fp_mult(bw_increase_p,
+				FP_INT(3 - available_vmem_p));
+		part_p = fp_div(part_p, FP_INT(3));
+
+		part_b = fp_mult(one_frame_bw_dpb, 2) +
+			fp_mult(FP_INT(6 - available_vmem_b), bw_increase_b);
+		part_b = fp_mult(part_b, FP(0, 2, 3));
+
+		ddr.dpb_read = part_p + part_b;
+	}
+
+	ddr.dpb_read = fp_div(ddr.dpb_read, dpb_compression_factor);
+	ddr.dpb_write = fp_div(one_frame_bw_dpb, dpb_compression_factor);
+
+	ddr.total = ddr.vsp_read + ddr.vsp_write +
+		ddr.collocated_read + ddr.collocated_write +
+		ddr.line_buffer_read + ddr.line_buffer_write +
+		ddr.original_read + ddr.original_write +
+		ddr.dpb_read + ddr.dpb_write;
+
+	qsmmu_bw_overhead_factor = FP(1, 3, 100);
+	ddr.total = fp_mult(ddr.total, qsmmu_bw_overhead_factor);
+
+	/* ................. for VMEM */
+	vmem.vsp_read = FP_ZERO;
+	vmem.vsp_write = FP_ZERO;
+
+	vmem.collocated_read = FP_ZERO;
+	vmem.collocated_write = FP_ZERO;
+
+	vmem.line_buffer_read = line_buffer_bw - ddr.line_buffer_read;
+	vmem.line_buffer_write = vmem.line_buffer_read;
+
+	vmem.original_read = FP_INT(vmem_size) >= original_vmem_requirement ?
+		ddr.original_read : FP_ZERO;
+	vmem.original_write = vmem.original_read;
+
+	vmem.dpb_read = FP_ZERO;
+	if (gop == GOP_IPPP) {
+		fp_t temp = fp_mult(one_frame_bw_dpb,
+			FP_INT(search_window_factor_bw_p * available_vmem_p));
+		temp = fp_div(temp, FP_INT(3));
+
+		vmem.dpb_read = temp;
+	} else if (scenario != SCENARIO_AVERAGE) {
+		fp_t temp = fp_mult(one_frame_bw_dpb, FP_INT(2));
+
+		temp = fp_mult(temp, FP_INT(search_window_factor_bw_b *
+					available_vmem_b));
+		temp = fp_div(temp, FP_INT(6));
+
+		vmem.dpb_read = temp;
+	} else {
+		fp_t part_p, part_b;
+
+		part_p = fp_mult(one_frame_bw_dpb, FP_INT(
+					search_window_factor_bw_p *
+					available_vmem_p));
+		part_p = fp_div(part_p, FP_INT(3 * 3));
+
+		part_b = fp_mult(one_frame_bw_dpb, FP_INT(2 *
+					search_window_factor_bw_b *
+					available_vmem_b));
+		part_b = fp_div(part_b, FP_INT(6));
+		part_b = fp_mult(part_b, FP(0, 2, 3));
+
+		vmem.dpb_read = part_p + part_b;
+	}
+
+	vmem.dpb_write = FP_ZERO;
+	if (gop == GOP_IPPP) {
+		fp_t temp = fp_mult(one_frame_bw_dpb,
+				FP_INT(available_vmem_p));
+		temp = fp_div(temp, FP_INT(3));
+
+		vmem.dpb_write = temp;
+	} else if (scenario != SCENARIO_AVERAGE) {
+		fp_t temp = fp_mult(one_frame_bw_dpb,
+				FP_INT(2 * available_vmem_b));
+		temp = fp_div(temp, FP_INT(6));
+
+		vmem.dpb_write = temp;
+	} else {
+		fp_t part_b, part_p;
+
+		part_b = fp_mult(one_frame_bw_dpb, FP_INT(available_vmem_p));
+		part_b = fp_div(part_b, FP_INT(9));
+
+		part_p = fp_mult(one_frame_bw_dpb, FP_INT(
+					2 * available_vmem_b));
+		part_p = fp_div(part_p, FP_INT(6));
+		part_b = fp_mult(part_b, FP(0, 2, 3));
+
+		vmem.dpb_write = part_p + part_b;
+	}
+
+	vmem.total = vmem.vsp_read + vmem.vsp_write +
+		vmem.collocated_read + vmem.collocated_write +
+		vmem.line_buffer_read + vmem.line_buffer_write +
+		vmem.original_read + vmem.original_write +
+		vmem.dpb_read + vmem.dpb_write;
+
+	/*
+	 * When in low power mode, attempt to force the VMEM clocks a certain
+	 * frequency that DCVS would prefer
+	 */
+	if (width * height >= 3840 * 2160 && low_power)
+		vmem.total = FP_INT(NOMINAL_BW_MBPS);
+
+	if (debug) {
+		struct dump dump[] = {
+		{"ENCODER PARAMETERS", "", DUMP_HEADER_MAGIC},
+		{"scenario", "%d", scenario},
+		{"standard", "%#x", standard},
+		{"width", "%d", width},
+		{"height", "%d", height},
+		{"DPB format", "%#x", dpb_color_format},
+		{"original frame format", "%#x", original_color_format},
+		{"fps", "%d", fps},
+		{"target bitrate", "%d", bitrate_scenario},
+		{"DPB compression enable", "%d", dpb_compression_enabled},
+		{"original compression enable", "%d",
+			original_compression_enabled},
+		{"two stage encoding", "%d", two_stage_encoding},
+		{"low power mode", "%d", low_power},
+		{"DPB compression factor", DUMP_FP_FMT,
+			dpb_compression_factor},
+		{"original compression factor", DUMP_FP_FMT,
+			original_compression_factor},
+		{"rotation", "%d", rotation},
+		{"cropping or scaling", "%d", cropping_or_scaling},
+		{"VMEM size (KB)", "%d", vmem_size},
+
+		{"DERIVED PARAMETERS", "", DUMP_HEADER_MAGIC},
+		{"LCU size", "%d", lcu_size},
+		{"GOB pattern", "%d", gop},
+		{"bitrate (Mbit/sec)", "%lu", bitrate},
+		{"bins to bit factor", DUMP_FP_FMT, bins_to_bit_factor},
+		{"B-frames enabled", "%d", b_frames_enabled},
+		{"search window size vertical (B)", "%d",
+			search_window_size_vertical_b},
+		{"search window factor (B)", "%d", search_window_factor_b},
+		{"search window factor BW (B)", "%d",
+			search_window_factor_bw_b},
+		{"VMEM size (B)", "%d", vmem_size_b},
+		{"bw increase (MB/s) (B)", DUMP_FP_FMT, bw_increase_b},
+		{"available VMEM (B)", "%d", available_vmem_b},
+		{"search window size vertical (P)", "%d",
+			search_window_size_vertical_p},
+		{"search window factor (P)", "%d", search_window_factor_p},
+		{"search window factor BW (P)", "%d",
+			search_window_factor_bw_p},
+		{"VMEM size (P)", "%d", vmem_size_p},
+		{"bw increase (MB/s) (P)", DUMP_FP_FMT, bw_increase_p},
+		{"available VMEM (P)", "%d", available_vmem_p},
+		{"chroma/luma factor DPB", DUMP_FP_FMT,
+			chroma_luma_factor_dpb},
+		{"one frame BW DPB (MB/s)", DUMP_FP_FMT, one_frame_bw_dpb},
+		{"chroma/Luma factor original", DUMP_FP_FMT,
+			chroma_luma_factor_original},
+		{"one frame BW original (MB/s)", DUMP_FP_FMT,
+			one_frame_bw_original},
+		{"line buffer size per LCU", DUMP_FP_FMT,
+			line_buffer_size_per_lcu},
+		{"line buffer size (KB)", DUMP_FP_FMT, line_buffer_size},
+		{"line buffer BW (MB/s)", DUMP_FP_FMT, line_buffer_bw},
+		{"collocated MVs per LCU", "%d", collocated_mv_per_lcu},
+		{"original VMEM requirement (KB)", DUMP_FP_FMT,
+			original_vmem_requirement},
+
+		{"INTERMEDIATE B/W DDR", "", DUMP_HEADER_MAGIC},
+		{"VSP read", DUMP_FP_FMT, ddr.vsp_read},
+		{"VSP read", DUMP_FP_FMT, ddr.vsp_write},
+		{"collocated read", DUMP_FP_FMT, ddr.collocated_read},
+		{"collocated read", DUMP_FP_FMT, ddr.collocated_write},
+		{"line buffer read", DUMP_FP_FMT, ddr.line_buffer_read},
+		{"line buffer read", DUMP_FP_FMT, ddr.line_buffer_write},
+		{"original read", DUMP_FP_FMT, ddr.original_read},
+		{"original read", DUMP_FP_FMT, ddr.original_write},
+		{"DPB read", DUMP_FP_FMT, ddr.dpb_read},
+		{"DPB write", DUMP_FP_FMT, ddr.dpb_write},
+
+		{"INTERMEDIATE B/W VMEM", "", DUMP_HEADER_MAGIC},
+		{"VSP read", DUMP_FP_FMT, vmem.vsp_read},
+		{"VSP read", DUMP_FP_FMT, vmem.vsp_write},
+		{"collocated read", DUMP_FP_FMT, vmem.collocated_read},
+		{"collocated read", DUMP_FP_FMT, vmem.collocated_write},
+		{"line buffer read", DUMP_FP_FMT, vmem.line_buffer_read},
+		{"line buffer read", DUMP_FP_FMT, vmem.line_buffer_write},
+		{"original read", DUMP_FP_FMT, vmem.original_read},
+		{"original read", DUMP_FP_FMT, vmem.original_write},
+		{"DPB read", DUMP_FP_FMT, vmem.dpb_read},
+		{"DPB write", DUMP_FP_FMT, vmem.dpb_write},
+		};
+		__dump(dump, ARRAY_SIZE(dump));
+	}
+
+	switch (gm) {
+	case GOVERNOR_DDR:
+		ret = kbps(fp_round(ddr.total));
+		break;
+	case GOVERNOR_VMEM:
+		ret = kbps(fp_round(vmem.total));
+		break;
+	case GOVERNOR_VMEM_PLUS:
+		ret = __calculate_vmem_plus_ab(d);
+		break;
+	default:
+		dprintk(VIDC_ERR, "%s - Unknown governor\n", __func__);
+	}
+
+	return ret;
+}
+
+static unsigned long __calculate(struct vidc_bus_vote_data *d,
+		enum governor_mode gm)
+{
+	unsigned long (*calc[])(struct vidc_bus_vote_data *,
+			enum governor_mode) = {
+		[HAL_VIDEO_DOMAIN_VPE] = __calculate_vpe,
+		[HAL_VIDEO_DOMAIN_ENCODER] = __calculate_encoder,
+		[HAL_VIDEO_DOMAIN_DECODER] = __calculate_decoder,
+	};
+
+	return calc[d->domain](d, gm);
+}
+
+
+static int __get_target_freq(struct devfreq *dev, unsigned long *freq,
+		u32 *flag)
+{
+	unsigned long ab_kbps = 0, c = 0;
+	struct devfreq_dev_status stats = {0};
+	struct msm_vidc_gov_data *vidc_data = NULL;
+	struct governor *gov = NULL;
+
+	if (!dev || !freq || !flag)
+		return -EINVAL;
+
+	gov = container_of(dev->governor,
+			struct governor, devfreq_gov);
+	dev->profile->get_dev_status(dev->dev.parent, &stats);
+	vidc_data = (struct msm_vidc_gov_data *)stats.private_data;
+
+	for (c = 0; c < vidc_data->data_count; ++c) {
+		if (vidc_data->data->power_mode == VIDC_POWER_TURBO) {
+			*freq = INT_MAX;
+			goto exit;
+		}
+	}
+
+	for (c = 0; c < vidc_data->data_count; ++c)
+		ab_kbps += __calculate(&vidc_data->data[c], gov->mode);
+
+	*freq = clamp(ab_kbps, dev->min_freq, dev->max_freq ?: UINT_MAX);
+exit:
+	return 0;
+}
+
+static int __event_handler(struct devfreq *devfreq, unsigned int event,
+		void *data)
+{
+	int rc = 0;
+
+	if (!devfreq)
+		return -EINVAL;
+
+	switch (event) {
+	case DEVFREQ_GOV_START:
+	case DEVFREQ_GOV_RESUME:
+		mutex_lock(&devfreq->lock);
+		rc = update_devfreq(devfreq);
+		mutex_unlock(&devfreq->lock);
+		break;
+	}
+
+	return rc;
+}
+
+static struct governor governors[] = {
+	{
+		.mode = GOVERNOR_DDR,
+		.devfreq_gov = {
+			.name = "msm-vidc-ddr",
+			.get_target_freq = __get_target_freq,
+			.event_handler = __event_handler,
+		},
+	},
+	{
+		.mode = GOVERNOR_VMEM,
+		.devfreq_gov = {
+			.name = "msm-vidc-vmem",
+			.get_target_freq = __get_target_freq,
+			.event_handler = __event_handler,
+		},
+	},
+	{
+		.mode = GOVERNOR_VMEM_PLUS,
+		.devfreq_gov = {
+			.name = "msm-vidc-vmem+",
+			.get_target_freq = __get_target_freq,
+			.event_handler = __event_handler,
+		},
+	},
+};
+
+static int __init msm_vidc_bw_gov_init(void)
+{
+	int c = 0, rc = 0;
+
+	for (c = 0; c < ARRAY_SIZE(governors); ++c) {
+		dprintk(VIDC_DBG, "Adding governor %s\n",
+				governors[c].devfreq_gov.name);
+
+		rc = devfreq_add_governor(&governors[c].devfreq_gov);
+		if (rc) {
+			dprintk(VIDC_ERR, "Error adding governor %s: %d\n",
+				governors[c].devfreq_gov.name, rc);
+			break;
+		}
+	}
+
+	return rc;
+}
+module_init(msm_vidc_bw_gov_init);
+
+static void __exit msm_vidc_bw_gov_exit(void)
+{
+	int c = 0;
+
+	for (c = 0; c < ARRAY_SIZE(governors); ++c) {
+		dprintk(VIDC_DBG, "Removing governor %s\n",
+				governors[c].devfreq_gov.name);
+		devfreq_remove_governor(&governors[c].devfreq_gov);
+	}
+}
+module_exit(msm_vidc_bw_gov_exit);
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/governors/msm_vidc_table_gov.c linux-4.4.115-fbx/drivers/media/platform/msm/vidc/governors/msm_vidc_table_gov.c
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/governors/msm_vidc_table_gov.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/governors/msm_vidc_table_gov.c	2019-01-22 16:16:24.463255100 +0100
@@ -0,0 +1,384 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include "governor.h"
+#include "msm_vidc_debug.h"
+#include "msm_vidc_res_parse.h"
+#include "msm_vidc_internal.h"
+#include "venus_hfi.h"
+
+enum bus_profile {
+	VIDC_BUS_PROFILE_NORMAL			= BIT(0),
+	VIDC_BUS_PROFILE_LOW			= BIT(1),
+	VIDC_BUS_PROFILE_UBWC			= BIT(2),
+	VIDC_BUS_PROFILE_UBWC_10_BIT		= BIT(3),
+};
+
+struct bus_profile_entry {
+	struct {
+		u32 load, freq;
+	} *bus_table;
+	u32 bus_table_size;
+	u32 codec_mask;
+	enum bus_profile profile;
+};
+
+struct msm_vidc_bus_table_gov {
+	struct bus_profile_entry *bus_prof_entries;
+	u32 count;
+	struct devfreq_governor devfreq_gov;
+};
+
+static int __get_bus_freq(struct msm_vidc_bus_table_gov *gov,
+		struct vidc_bus_vote_data *data,
+		enum bus_profile profile)
+{
+	int i = 0, load = 0, freq = 0;
+	enum vidc_vote_data_session sess_type = 0;
+	struct bus_profile_entry *entry = NULL;
+	bool found = false;
+
+	load = NUM_MBS_PER_SEC(data->width, data->height, data->fps);
+	sess_type = VIDC_VOTE_DATA_SESSION_VAL(data->codec, data->domain);
+
+	/* check if appropriate bus profile is present */
+	for (i = 0; i < gov->count; i++) {
+		entry = &gov->bus_prof_entries[i];
+		if (!entry->bus_table || !entry->bus_table_size)
+			continue;
+		if (!venus_hfi_is_session_supported(
+				entry->codec_mask, sess_type))
+			continue;
+		if (entry->profile == profile) {
+			found = true;
+			break;
+		}
+	}
+
+	if (found) {
+		/* loop over bus table and select frequency */
+		for (i = entry->bus_table_size - 1; i >= 0; --i) {
+			/* load is arranged in descending order */
+			freq = entry->bus_table[i].freq;
+			if (load <= entry->bus_table[i].load)
+				break;
+		}
+	}
+
+	return freq;
+}
+
+static int msm_vidc_table_get_target_freq(struct devfreq *dev,
+		unsigned long *frequency, u32 *flag)
+{
+	struct devfreq_dev_status status = {0};
+	struct msm_vidc_gov_data *vidc_data = NULL;
+	struct msm_vidc_bus_table_gov *gov = NULL;
+	enum bus_profile profile = 0;
+	int i = 0;
+
+	if (!dev || !frequency || !flag) {
+		dprintk(VIDC_ERR, "%s: Invalid params %pK, %pK, %pK\n",
+			__func__, dev, frequency, flag);
+		return -EINVAL;
+	}
+
+	gov = container_of(dev->governor,
+			struct msm_vidc_bus_table_gov, devfreq_gov);
+	if (!gov) {
+		dprintk(VIDC_ERR, "%s: governor not found\n", __func__);
+		return -EINVAL;
+	}
+
+	dev->profile->get_dev_status(dev->dev.parent, &status);
+	vidc_data = (struct msm_vidc_gov_data *)status.private_data;
+
+	*frequency = 0;
+	for (i = 0; i < vidc_data->data_count; i++) {
+		struct vidc_bus_vote_data *data = &vidc_data->data[i];
+		int freq = 0;
+
+		if (data->power_mode == VIDC_POWER_TURBO) {
+			dprintk(VIDC_DBG, "bus: found turbo session[%d] %#x\n",
+				i, VIDC_VOTE_DATA_SESSION_VAL(data->codec,
+					data->domain));
+			*frequency = INT_MAX;
+			goto exit;
+		}
+
+		profile = VIDC_BUS_PROFILE_NORMAL;
+		if (data->color_formats[0] == HAL_COLOR_FORMAT_NV12_UBWC)
+			profile = VIDC_BUS_PROFILE_UBWC;
+		else if (data->color_formats[0] ==
+					HAL_COLOR_FORMAT_NV12_TP10_UBWC)
+			profile = VIDC_BUS_PROFILE_UBWC_10_BIT;
+
+		freq = __get_bus_freq(gov, data, profile);
+		/*
+		 * chose frequency from normal profile
+		 * if specific profile frequency was not found.
+		 */
+		if (!freq) {
+			dprintk(VIDC_WARN,
+				"appropriate bus table not found, voting with Normal Profile\n");
+			freq = __get_bus_freq(gov, data,
+				VIDC_BUS_PROFILE_NORMAL);
+		}
+
+		*frequency += (unsigned long)freq;
+
+		dprintk(VIDC_DBG,
+			"session[%d] %#x: wxh %dx%d, fps %d, bus_profile %#x, freq %d, total_freq %ld KBps\n",
+			i, VIDC_VOTE_DATA_SESSION_VAL(
+			data->codec, data->domain), data->width,
+			data->height, data->fps, profile,
+			freq, *frequency);
+	}
+exit:
+	return 0;
+}
+
+int msm_vidc_table_event_handler(struct devfreq *devfreq,
+		unsigned int event, void *data)
+{
+	int rc = 0;
+
+	if (!devfreq) {
+		dprintk(VIDC_ERR, "%s: NULL devfreq\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (event) {
+	case DEVFREQ_GOV_START:
+	case DEVFREQ_GOV_RESUME:
+		mutex_lock(&devfreq->lock);
+		rc = update_devfreq(devfreq);
+		mutex_unlock(&devfreq->lock);
+		break;
+	}
+
+	return rc;
+}
+
+static int msm_vidc_free_bus_table(struct platform_device *pdev,
+		struct msm_vidc_bus_table_gov *data)
+{
+	int rc = 0, i = 0;
+
+	if (!pdev || !data) {
+		dprintk(VIDC_ERR, "%s: invalid args %pK %pK\n",
+			__func__, pdev, data);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < data->count; i++)
+		data->bus_prof_entries[i].bus_table = NULL;
+
+	data->bus_prof_entries = NULL;
+	data->count = 0;
+
+	return rc;
+}
+
+static int msm_vidc_load_bus_table(struct platform_device *pdev,
+		struct msm_vidc_bus_table_gov *data)
+{
+	int rc = 0, i = 0, j = 0;
+	const char *name = NULL;
+	struct bus_profile_entry *entry = NULL;
+	struct device_node *parent_node = NULL;
+	struct device_node *child_node = NULL;
+
+	if (!pdev || !data) {
+		dprintk(VIDC_ERR, "%s: invalid args %pK %pK\n",
+			__func__, pdev, data);
+		return -EINVAL;
+	}
+
+	of_property_read_string(pdev->dev.of_node, "name", &name);
+	if (strlen(name) > ARRAY_SIZE(data->devfreq_gov.name) - 1) {
+		dprintk(VIDC_ERR,
+			"%s: name is too long, max should be %zu chars\n",
+			__func__, ARRAY_SIZE(data->devfreq_gov.name) - 1);
+		return -EINVAL;
+	}
+
+	strlcpy((char *)data->devfreq_gov.name, name,
+			ARRAY_SIZE(data->devfreq_gov.name));
+	data->devfreq_gov.get_target_freq = msm_vidc_table_get_target_freq;
+	data->devfreq_gov.event_handler = msm_vidc_table_event_handler;
+
+	parent_node = of_find_node_by_name(pdev->dev.of_node,
+			"qcom,bus-freq-table");
+	if (!parent_node) {
+		dprintk(VIDC_DBG, "Node qcom,bus-freq-table not found.\n");
+		return 0;
+	}
+
+	data->count = of_get_child_count(parent_node);
+	if (!data->count) {
+		dprintk(VIDC_DBG, "No child nodes in qcom,bus-freq-table\n");
+		return 0;
+	}
+
+	data->bus_prof_entries = devm_kzalloc(&pdev->dev,
+			sizeof(*data->bus_prof_entries) * data->count,
+			GFP_KERNEL);
+	if (!data->bus_prof_entries) {
+		dprintk(VIDC_DBG, "no memory to allocate bus_prof_entries\n");
+		return -ENOMEM;
+	}
+
+	for_each_child_of_node(parent_node, child_node) {
+
+		if (i >= data->count) {
+			dprintk(VIDC_ERR,
+				"qcom,bus-freq-table: invalid child node %d, max is %d\n",
+				i, data->count);
+			break;
+		}
+		entry = &data->bus_prof_entries[i];
+
+		if (of_find_property(child_node, "qcom,codec-mask", NULL)) {
+			rc = of_property_read_u32(child_node,
+					"qcom,codec-mask", &entry->codec_mask);
+			if (rc) {
+				dprintk(VIDC_ERR,
+					"qcom,codec-mask not found\n");
+				break;
+			}
+		}
+
+		if (of_find_property(child_node, "qcom,low-power-mode", NULL))
+			entry->profile = VIDC_BUS_PROFILE_LOW;
+		else if (of_find_property(child_node, "qcom,ubwc-mode", NULL))
+			entry->profile = VIDC_BUS_PROFILE_UBWC;
+		else if (of_find_property(child_node, "qcom,ubwc-10bit", NULL))
+			entry->profile = VIDC_BUS_PROFILE_UBWC_10_BIT;
+		else
+			entry->profile = VIDC_BUS_PROFILE_NORMAL;
+
+		if (of_find_property(child_node,
+					"qcom,load-busfreq-tbl", NULL)) {
+			rc = msm_vidc_load_u32_table(pdev, child_node,
+						"qcom,load-busfreq-tbl",
+						sizeof(*entry->bus_table),
+						(u32 **)&entry->bus_table,
+						&entry->bus_table_size);
+			if (rc) {
+				dprintk(VIDC_ERR,
+					"qcom,load-busfreq-tbl failed\n");
+				break;
+			}
+		} else {
+			entry->bus_table = NULL;
+			entry->bus_table_size = 0;
+		}
+
+		dprintk(VIDC_DBG,
+			"qcom,load-busfreq-tbl: size %d, codec_mask %#x, profile %#x\n",
+			entry->bus_table_size, entry->codec_mask,
+			entry->profile);
+		for (j = 0; j < entry->bus_table_size; j++)
+			dprintk(VIDC_DBG, "   load %8d freq %8d\n",
+				entry->bus_table[j].load,
+				entry->bus_table[j].freq);
+
+		i++;
+	}
+
+	return rc;
+}
+
+static int msm_vidc_bus_table_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct msm_vidc_bus_table_gov *gov = NULL;
+
+	dprintk(VIDC_DBG, "%s\n", __func__);
+
+	gov = devm_kzalloc(&pdev->dev, sizeof(*gov), GFP_KERNEL);
+	if (!gov) {
+		dprintk(VIDC_ERR, "%s: allocation failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	platform_set_drvdata(pdev, gov);
+
+	rc = msm_vidc_load_bus_table(pdev, gov);
+	if (rc)
+		return rc;
+
+	rc = devfreq_add_governor(&gov->devfreq_gov);
+	if (rc)
+		dprintk(VIDC_ERR, "%s: add governor failed\n", __func__);
+
+	return rc;
+}
+
+static int msm_vidc_bus_table_remove(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct msm_vidc_bus_table_gov *gov = NULL;
+
+	dprintk(VIDC_DBG, "%s\n", __func__);
+
+	gov = platform_get_drvdata(pdev);
+	if (IS_ERR_OR_NULL(gov))
+		return PTR_ERR(gov);
+
+	rc = msm_vidc_free_bus_table(pdev, gov);
+	if (rc)
+		dprintk(VIDC_WARN, "%s: free bus table failed\n", __func__);
+
+	rc = devfreq_remove_governor(&gov->devfreq_gov);
+
+	return rc;
+}
+
+static const struct of_device_id device_id[] = {
+	{.compatible = "qcom,msm-vidc,governor,table"},
+	{}
+};
+
+static struct platform_driver msm_vidc_bus_table_driver = {
+	.probe = msm_vidc_bus_table_probe,
+	.remove = msm_vidc_bus_table_remove,
+	.driver = {
+		.name = "msm_vidc_bus_table_governor",
+		.owner = THIS_MODULE,
+		.of_match_table = device_id,
+	},
+};
+
+static int __init msm_vidc_bus_table_init(void)
+{
+
+	dprintk(VIDC_DBG, "%s\n", __func__);
+
+	return platform_driver_register(&msm_vidc_bus_table_driver);
+}
+
+module_init(msm_vidc_bus_table_init);
+
+static void __exit msm_vidc_bus_table_exit(void)
+{
+	dprintk(VIDC_DBG, "%s\n", __func__);
+	platform_driver_unregister(&msm_vidc_bus_table_driver);
+}
+
+module_exit(msm_vidc_bus_table_exit);
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/hfi_packetization.c linux-4.4.115-fbx/drivers/media/platform/msm/vidc/hfi_packetization.c
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/hfi_packetization.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/hfi_packetization.c	2019-01-22 16:16:24.463255100 +0100
@@ -0,0 +1,2519 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/errno.h>
+#include <linux/log2.h>
+#include <linux/hash.h>
+#include "hfi_packetization.h"
+#include "msm_vidc_debug.h"
+
+/* Set up look-up tables to convert HAL_* to HFI_*.
+ *
+ * The tables below mostly take advantage of the fact that most
+ * HAL_* types are defined bitwise. So if we index them normally
+ * when declaring the tables, we end up with huge arrays with wasted
+ * space.  So before indexing them, we apply log2 to use a more
+ * sensible index.
+ */
+static int profile_table[] = {
+	[ilog2(HAL_H264_PROFILE_BASELINE)] = HFI_H264_PROFILE_BASELINE,
+	[ilog2(HAL_H264_PROFILE_MAIN)] = HFI_H264_PROFILE_MAIN,
+	[ilog2(HAL_H264_PROFILE_HIGH)] = HFI_H264_PROFILE_HIGH,
+	[ilog2(HAL_H264_PROFILE_CONSTRAINED_BASE)] =
+		HFI_H264_PROFILE_CONSTRAINED_BASE,
+	[ilog2(HAL_H264_PROFILE_CONSTRAINED_HIGH)] =
+		HFI_H264_PROFILE_CONSTRAINED_HIGH,
+	[ilog2(HAL_VPX_PROFILE_VERSION_1)] = HFI_VPX_PROFILE_VERSION_1,
+	[ilog2(HAL_MVC_PROFILE_STEREO_HIGH)] = HFI_H264_PROFILE_STEREO_HIGH,
+};
+
+static int entropy_mode[] = {
+	[ilog2(HAL_H264_ENTROPY_CAVLC)] = HFI_H264_ENTROPY_CAVLC,
+	[ilog2(HAL_H264_ENTROPY_CABAC)] = HFI_H264_ENTROPY_CABAC,
+};
+
+static int cabac_model[] = {
+	[ilog2(HAL_H264_CABAC_MODEL_0)] = HFI_H264_CABAC_MODEL_0,
+	[ilog2(HAL_H264_CABAC_MODEL_1)] = HFI_H264_CABAC_MODEL_1,
+	[ilog2(HAL_H264_CABAC_MODEL_2)] = HFI_H264_CABAC_MODEL_2,
+};
+
+static int statistics_mode[] = {
+	[ilog2(HAL_STATISTICS_MODE_DEFAULT)] = HFI_STATISTICS_MODE_DEFAULT,
+	[ilog2(HAL_STATISTICS_MODE_1)] = HFI_STATISTICS_MODE_1,
+	[ilog2(HAL_STATISTICS_MODE_2)] = HFI_STATISTICS_MODE_2,
+	[ilog2(HAL_STATISTICS_MODE_3)] = HFI_STATISTICS_MODE_3,
+};
+
+static int color_format[] = {
+	[ilog2(HAL_COLOR_FORMAT_MONOCHROME)] = HFI_COLOR_FORMAT_MONOCHROME,
+	[ilog2(HAL_COLOR_FORMAT_NV12)] = HFI_COLOR_FORMAT_NV12,
+	[ilog2(HAL_COLOR_FORMAT_NV21)] = HFI_COLOR_FORMAT_NV21,
+	[ilog2(HAL_COLOR_FORMAT_NV12_4x4TILE)] = HFI_COLOR_FORMAT_NV12_4x4TILE,
+	[ilog2(HAL_COLOR_FORMAT_NV21_4x4TILE)] = HFI_COLOR_FORMAT_NV21_4x4TILE,
+	[ilog2(HAL_COLOR_FORMAT_YUYV)] = HFI_COLOR_FORMAT_YUYV,
+	[ilog2(HAL_COLOR_FORMAT_YVYU)] = HFI_COLOR_FORMAT_YVYU,
+	[ilog2(HAL_COLOR_FORMAT_UYVY)] = HFI_COLOR_FORMAT_UYVY,
+	[ilog2(HAL_COLOR_FORMAT_VYUY)] = HFI_COLOR_FORMAT_VYUY,
+	[ilog2(HAL_COLOR_FORMAT_RGB565)] = HFI_COLOR_FORMAT_RGB565,
+	[ilog2(HAL_COLOR_FORMAT_BGR565)] = HFI_COLOR_FORMAT_BGR565,
+	[ilog2(HAL_COLOR_FORMAT_RGB888)] = HFI_COLOR_FORMAT_RGB888,
+	[ilog2(HAL_COLOR_FORMAT_BGR888)] = HFI_COLOR_FORMAT_BGR888,
+	[ilog2(HAL_COLOR_FORMAT_RGBA8888)] = HFI_COLOR_FORMAT_RGBA8888,
+	/* UBWC Color formats*/
+	[ilog2(HAL_COLOR_FORMAT_NV12_UBWC)] =  HFI_COLOR_FORMAT_NV12_UBWC,
+	[ilog2(HAL_COLOR_FORMAT_NV12_TP10_UBWC)] =
+			HFI_COLOR_FORMAT_YUV420_TP10_UBWC,
+	[ilog2(HAL_COLOR_FORMAT_RGBA8888_UBWC)] =
+			HFI_COLOR_FORMAT_RGBA8888_UBWC,
+};
+
+static int nal_type[] = {
+	[ilog2(HAL_NAL_FORMAT_STARTCODES)] = HFI_NAL_FORMAT_STARTCODES,
+	[ilog2(HAL_NAL_FORMAT_ONE_NAL_PER_BUFFER)] =
+		HFI_NAL_FORMAT_ONE_NAL_PER_BUFFER,
+	[ilog2(HAL_NAL_FORMAT_ONE_BYTE_LENGTH)] =
+		HFI_NAL_FORMAT_ONE_BYTE_LENGTH,
+	[ilog2(HAL_NAL_FORMAT_TWO_BYTE_LENGTH)] =
+		HFI_NAL_FORMAT_TWO_BYTE_LENGTH,
+	[ilog2(HAL_NAL_FORMAT_FOUR_BYTE_LENGTH)] =
+		HFI_NAL_FORMAT_FOUR_BYTE_LENGTH,
+};
+
+static inline int hal_to_hfi_type(int property, int hal_type)
+{
+	if (hal_type <= 0 || roundup_pow_of_two(hal_type) != hal_type) {
+		/* Not a power of 2, it's not going
+		 * to be in any of the tables anyway */
+		return -EINVAL;
+	}
+
+	if (hal_type)
+		hal_type = ilog2(hal_type);
+
+	switch (property) {
+	case HAL_PARAM_PROFILE_LEVEL_CURRENT:
+		return (hal_type >= ARRAY_SIZE(profile_table)) ?
+			-ENOTSUPP : profile_table[hal_type];
+	case HAL_PARAM_VENC_H264_ENTROPY_CONTROL:
+		return (hal_type >= ARRAY_SIZE(entropy_mode)) ?
+			-ENOTSUPP : entropy_mode[hal_type];
+	case HAL_PARAM_VENC_H264_ENTROPY_CABAC_MODEL:
+		return (hal_type >= ARRAY_SIZE(cabac_model)) ?
+			-ENOTSUPP : cabac_model[hal_type];
+	case HAL_PARAM_UNCOMPRESSED_FORMAT_SELECT:
+		return (hal_type >= ARRAY_SIZE(color_format)) ?
+			-ENOTSUPP : color_format[hal_type];
+	case HAL_PARAM_NAL_STREAM_FORMAT_SELECT:
+		return (hal_type >= ARRAY_SIZE(nal_type)) ?
+			-ENOTSUPP : nal_type[hal_type];
+	case HAL_PARAM_VENC_MBI_STATISTICS_MODE:
+		return (hal_type >= ARRAY_SIZE(statistics_mode)) ?
+			-ENOTSUPP : statistics_mode[hal_type];
+	default:
+		return -ENOTSUPP;
+	}
+}
+
+u32 get_hfi_layout(enum hal_buffer_layout_type hal_buf_layout)
+{
+	u32 hfi_layout;
+
+	switch (hal_buf_layout) {
+	case HAL_BUFFER_LAYOUT_TOP_BOTTOM:
+		hfi_layout = HFI_MVC_BUFFER_LAYOUT_TOP_BOTTOM;
+		break;
+	case HAL_BUFFER_LAYOUT_SEQ:
+		hfi_layout = HFI_MVC_BUFFER_LAYOUT_SEQ;
+		break;
+	default:
+		dprintk(VIDC_ERR, "Invalid buffer layout: %#x\n",
+			hal_buf_layout);
+		hfi_layout = HFI_MVC_BUFFER_LAYOUT_SEQ;
+		break;
+	}
+	return hfi_layout;
+}
+
+enum hal_domain vidc_get_hal_domain(u32 hfi_domain)
+{
+	enum hal_domain hal_domain = 0;
+
+	switch (hfi_domain) {
+	case HFI_VIDEO_DOMAIN_VPE:
+		hal_domain = HAL_VIDEO_DOMAIN_VPE;
+		break;
+	case HFI_VIDEO_DOMAIN_ENCODER:
+		hal_domain = HAL_VIDEO_DOMAIN_ENCODER;
+		break;
+	case HFI_VIDEO_DOMAIN_DECODER:
+		hal_domain = HAL_VIDEO_DOMAIN_DECODER;
+		break;
+	default:
+		dprintk(VIDC_ERR, "%s: invalid domain %x\n",
+			__func__, hfi_domain);
+		hal_domain = 0;
+		break;
+	}
+	return hal_domain;
+}
+
+enum hal_video_codec vidc_get_hal_codec(u32 hfi_codec)
+{
+	enum hal_video_codec hal_codec = 0;
+
+	switch (hfi_codec) {
+	case HFI_VIDEO_CODEC_H264:
+		hal_codec = HAL_VIDEO_CODEC_H264;
+		break;
+	case HFI_VIDEO_CODEC_H263:
+		hal_codec = HAL_VIDEO_CODEC_H263;
+		break;
+	case HFI_VIDEO_CODEC_MPEG1:
+		hal_codec = HAL_VIDEO_CODEC_MPEG1;
+		break;
+	case HFI_VIDEO_CODEC_MPEG2:
+		hal_codec = HAL_VIDEO_CODEC_MPEG2;
+		break;
+	case HFI_VIDEO_CODEC_MPEG4:
+		hal_codec = HAL_VIDEO_CODEC_MPEG4;
+		break;
+	case HFI_VIDEO_CODEC_DIVX_311:
+		hal_codec = HAL_VIDEO_CODEC_DIVX_311;
+		break;
+	case HFI_VIDEO_CODEC_DIVX:
+		hal_codec = HAL_VIDEO_CODEC_DIVX;
+		break;
+	case HFI_VIDEO_CODEC_VC1:
+		hal_codec = HAL_VIDEO_CODEC_VC1;
+		break;
+	case HFI_VIDEO_CODEC_SPARK:
+		hal_codec = HAL_VIDEO_CODEC_SPARK;
+		break;
+	case HFI_VIDEO_CODEC_VP8:
+		hal_codec = HAL_VIDEO_CODEC_VP8;
+		break;
+	case HFI_VIDEO_CODEC_HEVC:
+		hal_codec = HAL_VIDEO_CODEC_HEVC;
+		break;
+	case HFI_VIDEO_CODEC_VP9:
+		hal_codec = HAL_VIDEO_CODEC_VP9;
+		break;
+	case HFI_VIDEO_CODEC_HEVC_HYBRID:
+		hal_codec = HAL_VIDEO_CODEC_HEVC_HYBRID;
+		break;
+	default:
+		dprintk(VIDC_INFO, "%s: invalid codec 0x%x\n",
+			__func__, hfi_codec);
+		hal_codec = 0;
+		break;
+	}
+	return hal_codec;
+}
+
+
+u32 vidc_get_hfi_domain(enum hal_domain hal_domain)
+{
+	u32 hfi_domain;
+
+	switch (hal_domain) {
+	case HAL_VIDEO_DOMAIN_VPE:
+		hfi_domain = HFI_VIDEO_DOMAIN_VPE;
+		break;
+	case HAL_VIDEO_DOMAIN_ENCODER:
+		hfi_domain = HFI_VIDEO_DOMAIN_ENCODER;
+		break;
+	case HAL_VIDEO_DOMAIN_DECODER:
+		hfi_domain = HFI_VIDEO_DOMAIN_DECODER;
+		break;
+	default:
+		dprintk(VIDC_ERR, "%s: invalid domain 0x%x\n",
+			__func__, hal_domain);
+		hfi_domain = 0;
+		break;
+	}
+	return hfi_domain;
+}
+
+u32 vidc_get_hfi_codec(enum hal_video_codec hal_codec)
+{
+	u32 hfi_codec = 0;
+
+	switch (hal_codec) {
+	case HAL_VIDEO_CODEC_MVC:
+	case HAL_VIDEO_CODEC_H264:
+		hfi_codec = HFI_VIDEO_CODEC_H264;
+		break;
+	case HAL_VIDEO_CODEC_H263:
+		hfi_codec = HFI_VIDEO_CODEC_H263;
+		break;
+	case HAL_VIDEO_CODEC_MPEG1:
+		hfi_codec = HFI_VIDEO_CODEC_MPEG1;
+		break;
+	case HAL_VIDEO_CODEC_MPEG2:
+		hfi_codec = HFI_VIDEO_CODEC_MPEG2;
+		break;
+	case HAL_VIDEO_CODEC_MPEG4:
+		hfi_codec = HFI_VIDEO_CODEC_MPEG4;
+		break;
+	case HAL_VIDEO_CODEC_DIVX_311:
+		hfi_codec = HFI_VIDEO_CODEC_DIVX_311;
+		break;
+	case HAL_VIDEO_CODEC_DIVX:
+		hfi_codec = HFI_VIDEO_CODEC_DIVX;
+		break;
+	case HAL_VIDEO_CODEC_VC1:
+		hfi_codec = HFI_VIDEO_CODEC_VC1;
+		break;
+	case HAL_VIDEO_CODEC_SPARK:
+		hfi_codec = HFI_VIDEO_CODEC_SPARK;
+		break;
+	case HAL_VIDEO_CODEC_VP8:
+		hfi_codec = HFI_VIDEO_CODEC_VP8;
+		break;
+	case HAL_VIDEO_CODEC_HEVC:
+		hfi_codec = HFI_VIDEO_CODEC_HEVC;
+		break;
+	case HAL_VIDEO_CODEC_VP9:
+		hfi_codec = HFI_VIDEO_CODEC_VP9;
+		break;
+	case HAL_VIDEO_CODEC_HEVC_HYBRID:
+		hfi_codec = HFI_VIDEO_CODEC_HEVC_HYBRID;
+		break;
+	default:
+		dprintk(VIDC_INFO, "%s: invalid codec 0x%x\n",
+			__func__, hal_codec);
+		hfi_codec = 0;
+		break;
+	}
+	return hfi_codec;
+}
+
+static void create_pkt_enable(void *pkt, u32 type, bool enable)
+{
+	u32 *pkt_header = pkt;
+	u32 *pkt_type = &pkt_header[0];
+	struct hfi_enable *hfi_enable = (struct hfi_enable *)&pkt_header[1];
+
+	*pkt_type = type;
+	hfi_enable->enable = enable;
+}
+
+int create_pkt_cmd_sys_init(struct hfi_cmd_sys_init_packet *pkt,
+			   u32 arch_type)
+{
+	int rc = 0;
+	if (!pkt)
+		return -EINVAL;
+
+	pkt->packet_type = HFI_CMD_SYS_INIT;
+	pkt->size = sizeof(struct hfi_cmd_sys_init_packet);
+	pkt->arch_type = arch_type;
+	return rc;
+}
+
+int create_pkt_cmd_sys_pc_prep(struct hfi_cmd_sys_pc_prep_packet *pkt)
+{
+	int rc = 0;
+	if (!pkt)
+		return -EINVAL;
+
+	pkt->packet_type = HFI_CMD_SYS_PC_PREP;
+	pkt->size = sizeof(struct hfi_cmd_sys_pc_prep_packet);
+	return rc;
+}
+
+int create_pkt_cmd_sys_idle_indicator(
+	struct hfi_cmd_sys_set_property_packet *pkt,
+	u32 enable)
+{
+	struct hfi_enable *hfi;
+	if (!pkt)
+		return -EINVAL;
+
+	pkt->size = sizeof(struct hfi_cmd_sys_set_property_packet) +
+		sizeof(struct hfi_enable) + sizeof(u32);
+	pkt->packet_type = HFI_CMD_SYS_SET_PROPERTY;
+	pkt->num_properties = 1;
+	pkt->rg_property_data[0] = HFI_PROPERTY_SYS_IDLE_INDICATOR;
+	hfi = (struct hfi_enable *) &pkt->rg_property_data[1];
+	hfi->enable = enable;
+	return 0;
+}
+
+int create_pkt_cmd_sys_debug_config(
+	struct hfi_cmd_sys_set_property_packet *pkt,
+	u32 mode)
+{
+	struct hfi_debug_config *hfi;
+	if (!pkt)
+		return -EINVAL;
+
+	pkt->size = sizeof(struct hfi_cmd_sys_set_property_packet) +
+		sizeof(struct hfi_debug_config) + sizeof(u32);
+	pkt->packet_type = HFI_CMD_SYS_SET_PROPERTY;
+	pkt->num_properties = 1;
+	pkt->rg_property_data[0] = HFI_PROPERTY_SYS_DEBUG_CONFIG;
+	hfi = (struct hfi_debug_config *) &pkt->rg_property_data[1];
+	hfi->debug_config = mode;
+	hfi->debug_mode = HFI_DEBUG_MODE_QUEUE;
+	if (msm_vidc_fw_debug_mode
+			<= (HFI_DEBUG_MODE_QUEUE | HFI_DEBUG_MODE_QDSS))
+		hfi->debug_mode = msm_vidc_fw_debug_mode;
+	return 0;
+}
+
+int create_pkt_cmd_sys_coverage_config(
+	struct hfi_cmd_sys_set_property_packet *pkt,
+	u32 mode)
+{
+	if (!pkt) {
+		dprintk(VIDC_ERR, "In %s(), No input packet\n", __func__);
+		return -EINVAL;
+	}
+
+	pkt->size = sizeof(struct hfi_cmd_sys_set_property_packet) +
+		sizeof(u32);
+	pkt->packet_type = HFI_CMD_SYS_SET_PROPERTY;
+	pkt->num_properties = 1;
+	pkt->rg_property_data[0] = HFI_PROPERTY_SYS_CONFIG_COVERAGE;
+	pkt->rg_property_data[1] = mode;
+	dprintk(VIDC_DBG, "Firmware coverage mode %d\n",
+			pkt->rg_property_data[1]);
+	return 0;
+}
+
+int create_pkt_cmd_sys_set_resource(
+		struct hfi_cmd_sys_set_resource_packet *pkt,
+		struct vidc_resource_hdr *resource_hdr,
+		void *resource_value)
+{
+	int rc = 0;
+	if (!pkt || !resource_hdr || !resource_value)
+		return -EINVAL;
+
+	pkt->packet_type = HFI_CMD_SYS_SET_RESOURCE;
+	pkt->size = sizeof(struct hfi_cmd_sys_set_resource_packet);
+	pkt->resource_handle = hash32_ptr(resource_hdr->resource_handle);
+
+	switch (resource_hdr->resource_id) {
+	case VIDC_RESOURCE_OCMEM:
+	case VIDC_RESOURCE_VMEM:
+	{
+		struct hfi_resource_ocmem *hfioc_mem =
+			(struct hfi_resource_ocmem *)
+			&pkt->rg_resource_data[0];
+
+		phys_addr_t imem_addr = (phys_addr_t)resource_value;
+
+		pkt->resource_type = HFI_RESOURCE_OCMEM;
+		pkt->size += sizeof(struct hfi_resource_ocmem) - sizeof(u32);
+		hfioc_mem->size = (u32)resource_hdr->size;
+		hfioc_mem->mem = imem_addr;
+		break;
+	}
+	default:
+		dprintk(VIDC_ERR, "Invalid resource_id %d\n",
+					resource_hdr->resource_id);
+		rc = -ENOTSUPP;
+	}
+
+	return rc;
+}
+
+int create_pkt_cmd_sys_release_resource(
+		struct hfi_cmd_sys_release_resource_packet *pkt,
+		struct vidc_resource_hdr *resource_hdr)
+{
+	int rc = 0;
+	if (!pkt)
+		return -EINVAL;
+
+	pkt->size = sizeof(struct hfi_cmd_sys_release_resource_packet);
+	pkt->packet_type = HFI_CMD_SYS_RELEASE_RESOURCE;
+	pkt->resource_handle = hash32_ptr(resource_hdr->resource_handle);
+
+	switch (resource_hdr->resource_id) {
+	case VIDC_RESOURCE_OCMEM:
+	case VIDC_RESOURCE_VMEM:
+		pkt->resource_type = HFI_RESOURCE_OCMEM;
+		break;
+	default:
+		dprintk(VIDC_ERR, "Invalid resource_id %d\n",
+					resource_hdr->resource_id);
+		rc = -ENOTSUPP;
+	}
+
+	return rc;
+}
+
+int create_pkt_cmd_sys_ping(struct hfi_cmd_sys_ping_packet *pkt)
+{
+	int rc = 0;
+	if (!pkt)
+		return -EINVAL;
+
+	pkt->size = sizeof(struct hfi_cmd_sys_ping_packet);
+	pkt->packet_type = HFI_CMD_SYS_PING;
+
+	return rc;
+}
+
+inline int create_pkt_cmd_sys_session_init(
+		struct hfi_cmd_sys_session_init_packet *pkt,
+		struct hal_session *session,
+		u32 session_domain, u32 session_codec)
+{
+	int rc = 0;
+	if (!pkt)
+		return -EINVAL;
+
+	pkt->size = sizeof(struct hfi_cmd_sys_session_init_packet);
+	pkt->packet_type = HFI_CMD_SYS_SESSION_INIT;
+	pkt->session_id = hash32_ptr(session);
+	pkt->session_domain = vidc_get_hfi_domain(session_domain);
+	pkt->session_codec = vidc_get_hfi_codec(session_codec);
+	if (!pkt->session_codec)
+		return -EINVAL;
+
+	return rc;
+}
+
+int create_pkt_cmd_session_cmd(struct vidc_hal_session_cmd_pkt *pkt,
+			int pkt_type, struct hal_session *session)
+{
+	int rc = 0;
+	if (!pkt)
+		return -EINVAL;
+
+	/*
+	 * Legacy packetization should skip sending any 3xx specific session
+	 * cmds. Add 3xx specific packetization to the switch case below.
+	 */
+	switch (pkt_type) {
+	case HFI_CMD_SESSION_CONTINUE:
+		dprintk(VIDC_INFO,
+			"%s - skip sending %x for legacy hfi\n",
+			__func__, pkt_type);
+		return -EPERM;
+	default:
+		break;
+	}
+
+	pkt->size = sizeof(struct vidc_hal_session_cmd_pkt);
+	pkt->packet_type = pkt_type;
+	pkt->session_id = hash32_ptr(session);
+
+	return rc;
+}
+
+int create_3x_pkt_cmd_session_cmd(struct vidc_hal_session_cmd_pkt *pkt,
+			int pkt_type, struct hal_session *session)
+{
+	int rc = 0;
+	if (!pkt)
+		return -EINVAL;
+
+	pkt->size = sizeof(struct vidc_hal_session_cmd_pkt);
+	pkt->packet_type = pkt_type;
+	pkt->session_id = hash32_ptr(session);
+
+	return rc;
+}
+
+int create_pkt_cmd_sys_power_control(
+	struct hfi_cmd_sys_set_property_packet *pkt, u32 enable)
+{
+	struct hfi_enable *hfi;
+	if (!pkt) {
+		dprintk(VIDC_ERR, "No input packet\n");
+		return -EINVAL;
+	}
+
+	pkt->size = sizeof(struct hfi_cmd_sys_set_property_packet) +
+		sizeof(struct hfi_enable) + sizeof(u32);
+	pkt->packet_type = HFI_CMD_SYS_SET_PROPERTY;
+	pkt->num_properties = 1;
+	pkt->rg_property_data[0] = HFI_PROPERTY_SYS_CODEC_POWER_PLANE_CTRL;
+	hfi = (struct hfi_enable *) &pkt->rg_property_data[1];
+	hfi->enable = enable;
+	return 0;
+}
+
+static u32 get_hfi_buffer(int hal_buffer)
+{
+	u32 buffer;
+	switch (hal_buffer) {
+	case HAL_BUFFER_INPUT:
+		buffer = HFI_BUFFER_INPUT;
+		break;
+	case HAL_BUFFER_OUTPUT:
+		buffer = HFI_BUFFER_OUTPUT;
+		break;
+	case HAL_BUFFER_OUTPUT2:
+		buffer = HFI_BUFFER_OUTPUT2;
+		break;
+	case HAL_BUFFER_EXTRADATA_INPUT:
+		buffer = HFI_BUFFER_EXTRADATA_INPUT;
+		break;
+	case HAL_BUFFER_EXTRADATA_OUTPUT:
+		buffer = HFI_BUFFER_EXTRADATA_OUTPUT;
+		break;
+	case HAL_BUFFER_EXTRADATA_OUTPUT2:
+		buffer = HFI_BUFFER_EXTRADATA_OUTPUT2;
+		break;
+	case HAL_BUFFER_INTERNAL_SCRATCH:
+		buffer = HFI_BUFFER_INTERNAL_SCRATCH;
+		break;
+	case HAL_BUFFER_INTERNAL_SCRATCH_1:
+		buffer = HFI_BUFFER_INTERNAL_SCRATCH_1;
+		break;
+	case HAL_BUFFER_INTERNAL_SCRATCH_2:
+		buffer = HFI_BUFFER_INTERNAL_SCRATCH_2;
+		break;
+	case HAL_BUFFER_INTERNAL_PERSIST:
+		buffer = HFI_BUFFER_INTERNAL_PERSIST;
+		break;
+	case HAL_BUFFER_INTERNAL_PERSIST_1:
+		buffer = HFI_BUFFER_INTERNAL_PERSIST_1;
+		break;
+	default:
+		dprintk(VIDC_ERR, "Invalid buffer: %#x\n",
+				hal_buffer);
+		buffer = 0;
+		break;
+	}
+	return buffer;
+}
+
+static int get_hfi_extradata_index(enum hal_extradata_id index)
+{
+	int ret = 0;
+	switch (index) {
+	case HAL_EXTRADATA_MB_QUANTIZATION:
+		ret = HFI_PROPERTY_PARAM_VDEC_MB_QUANTIZATION;
+		break;
+	case HAL_EXTRADATA_INTERLACE_VIDEO:
+		ret = HFI_PROPERTY_PARAM_VDEC_INTERLACE_VIDEO_EXTRADATA;
+		break;
+	case HAL_EXTRADATA_VC1_FRAMEDISP:
+		ret = HFI_PROPERTY_PARAM_VDEC_VC1_FRAMEDISP_EXTRADATA;
+		break;
+	case HAL_EXTRADATA_VC1_SEQDISP:
+		ret = HFI_PROPERTY_PARAM_VDEC_VC1_SEQDISP_EXTRADATA;
+		break;
+	case HAL_EXTRADATA_TIMESTAMP:
+		ret = HFI_PROPERTY_PARAM_VDEC_TIMESTAMP_EXTRADATA;
+		break;
+	case HAL_EXTRADATA_S3D_FRAME_PACKING:
+		ret = HFI_PROPERTY_PARAM_S3D_FRAME_PACKING_EXTRADATA;
+		break;
+	case HAL_EXTRADATA_FRAME_RATE:
+		ret = HFI_PROPERTY_PARAM_VDEC_FRAME_RATE_EXTRADATA;
+		break;
+	case HAL_EXTRADATA_PANSCAN_WINDOW:
+		ret = HFI_PROPERTY_PARAM_VDEC_PANSCAN_WNDW_EXTRADATA;
+		break;
+	case HAL_EXTRADATA_RECOVERY_POINT_SEI:
+		ret = HFI_PROPERTY_PARAM_VDEC_RECOVERY_POINT_SEI_EXTRADATA;
+		break;
+	case HAL_EXTRADATA_MULTISLICE_INFO:
+		ret = HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_INFO;
+		break;
+	case HAL_EXTRADATA_NUM_CONCEALED_MB:
+		ret = HFI_PROPERTY_PARAM_VDEC_NUM_CONCEALED_MB;
+		break;
+	case HAL_EXTRADATA_ASPECT_RATIO:
+	case HAL_EXTRADATA_INPUT_CROP:
+	case HAL_EXTRADATA_DIGITAL_ZOOM:
+	case HAL_EXTRADATA_OUTPUT_CROP:
+		ret = HFI_PROPERTY_PARAM_INDEX_EXTRADATA;
+		break;
+	case HAL_EXTRADATA_MPEG2_SEQDISP:
+		ret = HFI_PROPERTY_PARAM_VDEC_MPEG2_SEQDISP_EXTRADATA;
+		break;
+	case HAL_EXTRADATA_STREAM_USERDATA:
+		ret = HFI_PROPERTY_PARAM_VDEC_STREAM_USERDATA_EXTRADATA;
+		break;
+	case HAL_EXTRADATA_FRAME_QP:
+		ret = HFI_PROPERTY_PARAM_VDEC_FRAME_QP_EXTRADATA;
+		break;
+	case HAL_EXTRADATA_FRAME_BITS_INFO:
+		ret = HFI_PROPERTY_PARAM_VDEC_FRAME_BITS_INFO_EXTRADATA;
+		break;
+	case HAL_EXTRADATA_LTR_INFO:
+		ret = HFI_PROPERTY_PARAM_VENC_LTR_INFO;
+		break;
+	case HAL_EXTRADATA_METADATA_MBI:
+		ret = HFI_PROPERTY_PARAM_VENC_MBI_DUMPING;
+		break;
+	case HAL_EXTRADATA_VQZIP_SEI:
+		ret = HFI_PROPERTY_PARAM_VDEC_VQZIP_SEI_EXTRADATA;
+		break;
+	case HAL_EXTRADATA_YUV_STATS:
+		ret = HFI_PROPERTY_PARAM_VENC_YUVSTAT_INFO_EXTRADATA;
+		break;
+	case HAL_EXTRADATA_ROI_QP:
+		ret = HFI_PROPERTY_PARAM_VENC_ROI_QP_EXTRADATA;
+		break;
+	case HAL_EXTRADATA_MASTERING_DISPLAY_COLOUR_SEI:
+		ret =
+		HFI_PROPERTY_PARAM_VDEC_MASTERING_DISPLAY_COLOUR_SEI_EXTRADATA;
+		break;
+	case HAL_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI:
+		ret = HFI_PROPERTY_PARAM_VDEC_CONTENT_LIGHT_LEVEL_SEI_EXTRADATA;
+		break;
+	case HAL_EXTRADATA_PQ_INFO:
+		ret = HFI_PROPERTY_PARAM_VENC_OVERRIDE_QP_EXTRADATA;
+		break;
+	case HAL_EXTRADATA_VUI_DISPLAY_INFO:
+		ret = HFI_PROPERTY_PARAM_VUI_DISPLAY_INFO_EXTRADATA;
+		break;
+	case HAL_EXTRADATA_VPX_COLORSPACE:
+		ret = HFI_PROPERTY_PARAM_VDEC_VPX_COLORSPACE_EXTRADATA;
+		break;
+	default:
+		dprintk(VIDC_WARN, "Extradata index not found: %d\n", index);
+		break;
+	}
+	return ret;
+}
+
+static int get_hfi_extradata_id(enum hal_extradata_id index)
+{
+	int ret = 0;
+	switch (index) {
+	case HAL_EXTRADATA_ASPECT_RATIO:
+		ret = MSM_VIDC_EXTRADATA_ASPECT_RATIO;
+		break;
+	case HAL_EXTRADATA_INPUT_CROP:
+		ret = MSM_VIDC_EXTRADATA_INPUT_CROP;
+		break;
+	case HAL_EXTRADATA_DIGITAL_ZOOM:
+		ret = MSM_VIDC_EXTRADATA_DIGITAL_ZOOM;
+		break;
+	case HAL_EXTRADATA_OUTPUT_CROP:
+		ret = MSM_VIDC_EXTRADATA_OUTPUT_CROP;
+		break;
+	default:
+		ret = get_hfi_extradata_index(index);
+		break;
+	}
+	return ret;
+}
+
+static u32 get_hfi_buf_mode(enum buffer_mode_type hal_buf_mode)
+{
+	u32 buf_mode;
+	switch (hal_buf_mode) {
+	case HAL_BUFFER_MODE_STATIC:
+		buf_mode = HFI_BUFFER_MODE_STATIC;
+		break;
+	case HAL_BUFFER_MODE_RING:
+		buf_mode = HFI_BUFFER_MODE_RING;
+		break;
+	case HAL_BUFFER_MODE_DYNAMIC:
+		buf_mode = HFI_BUFFER_MODE_DYNAMIC;
+		break;
+	default:
+		dprintk(VIDC_ERR, "Invalid buffer mode: %#x\n",
+				hal_buf_mode);
+		buf_mode = 0;
+		break;
+	}
+	return buf_mode;
+}
+
+static u32 get_hfi_ltr_mode(enum ltr_mode ltr_mode_type)
+{
+	u32 ltrmode;
+	switch (ltr_mode_type) {
+	case HAL_LTR_MODE_DISABLE:
+		ltrmode = HFI_LTR_MODE_DISABLE;
+		break;
+	case HAL_LTR_MODE_MANUAL:
+		ltrmode = HFI_LTR_MODE_MANUAL;
+		break;
+	case HAL_LTR_MODE_PERIODIC:
+		ltrmode = HFI_LTR_MODE_PERIODIC;
+		break;
+	default:
+		dprintk(VIDC_ERR, "Invalid ltr mode: %#x\n",
+			ltr_mode_type);
+		ltrmode = HFI_LTR_MODE_DISABLE;
+		break;
+	}
+	return ltrmode;
+}
+
+int create_pkt_cmd_session_set_buffers(
+		struct hfi_cmd_session_set_buffers_packet *pkt,
+		struct hal_session *session,
+		struct vidc_buffer_addr_info *buffer_info)
+{
+	int rc = 0;
+	int i = 0;
+	if (!pkt || !session)
+		return -EINVAL;
+
+	pkt->packet_type = HFI_CMD_SESSION_SET_BUFFERS;
+	pkt->session_id = hash32_ptr(session);
+	pkt->buffer_size = buffer_info->buffer_size;
+	pkt->min_buffer_size = buffer_info->buffer_size;
+	pkt->num_buffers = buffer_info->num_buffers;
+
+	if (buffer_info->buffer_type == HAL_BUFFER_OUTPUT ||
+		buffer_info->buffer_type == HAL_BUFFER_OUTPUT2) {
+		struct hfi_buffer_info *buff;
+		pkt->extra_data_size = buffer_info->extradata_size;
+		pkt->size = sizeof(struct hfi_cmd_session_set_buffers_packet) -
+				sizeof(u32) + (buffer_info->num_buffers *
+				sizeof(struct hfi_buffer_info));
+		buff = (struct hfi_buffer_info *) pkt->rg_buffer_info;
+		for (i = 0; i < pkt->num_buffers; i++) {
+			buff->buffer_addr =
+				(u32)buffer_info->align_device_addr;
+			buff->extra_data_addr =
+				(u32)buffer_info->extradata_addr;
+		}
+	} else {
+		pkt->extra_data_size = 0;
+		pkt->size = sizeof(struct hfi_cmd_session_set_buffers_packet) +
+			((buffer_info->num_buffers - 1) * sizeof(u32));
+		for (i = 0; i < pkt->num_buffers; i++) {
+			pkt->rg_buffer_info[i] =
+				(u32)buffer_info->align_device_addr;
+		}
+	}
+
+	pkt->buffer_type = get_hfi_buffer(buffer_info->buffer_type);
+	if (!pkt->buffer_type)
+		return -EINVAL;
+
+	return rc;
+}
+
+int create_pkt_cmd_session_release_buffers(
+		struct hfi_cmd_session_release_buffer_packet *pkt,
+		struct hal_session *session,
+		struct vidc_buffer_addr_info *buffer_info)
+{
+	int rc = 0;
+	int i = 0;
+	if (!pkt || !session)
+		return -EINVAL;
+
+	pkt->packet_type = HFI_CMD_SESSION_RELEASE_BUFFERS;
+	pkt->session_id = hash32_ptr(session);
+	pkt->buffer_size = buffer_info->buffer_size;
+	pkt->num_buffers = buffer_info->num_buffers;
+
+	if (buffer_info->buffer_type == HAL_BUFFER_OUTPUT ||
+		buffer_info->buffer_type == HAL_BUFFER_OUTPUT2) {
+		struct hfi_buffer_info *buff;
+		buff = (struct hfi_buffer_info *) pkt->rg_buffer_info;
+		for (i = 0; i < pkt->num_buffers; i++) {
+			buff->buffer_addr =
+				(u32)buffer_info->align_device_addr;
+			buff->extra_data_addr =
+				(u32)buffer_info->extradata_addr;
+		}
+		pkt->size = sizeof(struct hfi_cmd_session_set_buffers_packet) -
+				sizeof(u32) + (buffer_info->num_buffers *
+				sizeof(struct hfi_buffer_info));
+	} else {
+		for (i = 0; i < pkt->num_buffers; i++) {
+			pkt->rg_buffer_info[i] =
+				(u32)buffer_info->align_device_addr;
+		}
+		pkt->extra_data_size = 0;
+		pkt->size = sizeof(struct hfi_cmd_session_set_buffers_packet) +
+			((buffer_info->num_buffers - 1) * sizeof(u32));
+	}
+	pkt->response_req = buffer_info->response_required;
+	pkt->buffer_type = get_hfi_buffer(buffer_info->buffer_type);
+	if (!pkt->buffer_type)
+		return -EINVAL;
+	return rc;
+}
+
+int create_pkt_cmd_session_etb_decoder(
+	struct hfi_cmd_session_empty_buffer_compressed_packet *pkt,
+	struct hal_session *session, struct vidc_frame_data *input_frame)
+{
+	int rc = 0;
+	if (!pkt || !session)
+		return -EINVAL;
+
+	pkt->size =
+		sizeof(struct hfi_cmd_session_empty_buffer_compressed_packet);
+	pkt->packet_type = HFI_CMD_SESSION_EMPTY_BUFFER;
+	pkt->session_id = hash32_ptr(session);
+	pkt->time_stamp_hi = upper_32_bits(input_frame->timestamp);
+	pkt->time_stamp_lo = lower_32_bits(input_frame->timestamp);
+	pkt->flags = input_frame->flags;
+	pkt->mark_target = input_frame->mark_target;
+	pkt->mark_data = input_frame->mark_data;
+	pkt->offset = input_frame->offset;
+	pkt->alloc_len = input_frame->alloc_len;
+	pkt->filled_len = input_frame->filled_len;
+	pkt->input_tag = input_frame->clnt_data;
+	pkt->packet_buffer = (u32)input_frame->device_addr;
+
+	trace_msm_v4l2_vidc_buffer_event_start("ETB",
+		input_frame->device_addr, input_frame->timestamp,
+		input_frame->alloc_len, input_frame->filled_len,
+		input_frame->offset);
+
+	if (!pkt->packet_buffer)
+		rc = -EINVAL;
+	return rc;
+}
+
+int create_pkt_cmd_session_etb_encoder(
+	struct hfi_cmd_session_empty_buffer_uncompressed_plane0_packet *pkt,
+	struct hal_session *session, struct vidc_frame_data *input_frame)
+{
+	int rc = 0;
+	if (!pkt || !session)
+		return -EINVAL;
+
+	pkt->size = sizeof(struct
+		hfi_cmd_session_empty_buffer_uncompressed_plane0_packet);
+	pkt->packet_type = HFI_CMD_SESSION_EMPTY_BUFFER;
+	pkt->session_id = hash32_ptr(session);
+	pkt->view_id = 0;
+	pkt->time_stamp_hi = upper_32_bits(input_frame->timestamp);
+	pkt->time_stamp_lo = lower_32_bits(input_frame->timestamp);
+	pkt->flags = input_frame->flags;
+	pkt->mark_target = input_frame->mark_target;
+	pkt->mark_data = input_frame->mark_data;
+	pkt->offset = input_frame->offset;
+	pkt->alloc_len = input_frame->alloc_len;
+	pkt->filled_len = input_frame->filled_len;
+	pkt->input_tag = input_frame->clnt_data;
+	pkt->packet_buffer = (u32)input_frame->device_addr;
+	pkt->extra_data_buffer = (u32)input_frame->extradata_addr;
+
+	trace_msm_v4l2_vidc_buffer_event_start("ETB",
+		input_frame->device_addr, input_frame->timestamp,
+		input_frame->alloc_len, input_frame->filled_len,
+		input_frame->offset);
+
+	if (!pkt->packet_buffer)
+		rc = -EINVAL;
+	return rc;
+}
+
+int create_pkt_cmd_session_ftb(struct hfi_cmd_session_fill_buffer_packet *pkt,
+		struct hal_session *session,
+		struct vidc_frame_data *output_frame)
+{
+	int rc = 0;
+	if (!pkt || !session || !output_frame)
+		return -EINVAL;
+
+	pkt->size = sizeof(struct hfi_cmd_session_fill_buffer_packet);
+	pkt->packet_type = HFI_CMD_SESSION_FILL_BUFFER;
+	pkt->session_id = hash32_ptr(session);
+
+	if (output_frame->buffer_type == HAL_BUFFER_OUTPUT)
+		pkt->stream_id = 0;
+	else if (output_frame->buffer_type == HAL_BUFFER_OUTPUT2)
+		pkt->stream_id = 1;
+
+	if (!output_frame->device_addr)
+		return -EINVAL;
+
+	pkt->packet_buffer = (u32)output_frame->device_addr;
+	pkt->extra_data_buffer = (u32)output_frame->extradata_addr;
+	pkt->alloc_len = output_frame->alloc_len;
+	pkt->filled_len = output_frame->filled_len;
+	pkt->offset = output_frame->offset;
+	pkt->rgData[0] = output_frame->extradata_size;
+
+	trace_msm_v4l2_vidc_buffer_event_start("FTB",
+		output_frame->device_addr, output_frame->timestamp,
+		output_frame->alloc_len, output_frame->filled_len,
+		output_frame->offset);
+	dprintk(VIDC_DBG, "### Q OUTPUT BUFFER ###: %d, %d, %d\n",
+			pkt->alloc_len, pkt->filled_len, pkt->offset);
+
+	return rc;
+}
+
+int create_pkt_cmd_session_parse_seq_header(
+		struct hfi_cmd_session_parse_sequence_header_packet *pkt,
+		struct hal_session *session, struct vidc_seq_hdr *seq_hdr)
+{
+	int rc = 0;
+	if (!pkt || !session || !seq_hdr)
+		return -EINVAL;
+
+	pkt->size = sizeof(struct hfi_cmd_session_parse_sequence_header_packet);
+	pkt->packet_type = HFI_CMD_SESSION_PARSE_SEQUENCE_HEADER;
+	pkt->session_id = hash32_ptr(session);
+	pkt->header_len = seq_hdr->seq_hdr_len;
+	if (!seq_hdr->seq_hdr)
+		return -EINVAL;
+	pkt->packet_buffer = (u32)seq_hdr->seq_hdr;
+	return rc;
+}
+
+int create_pkt_cmd_session_get_seq_hdr(
+		struct hfi_cmd_session_get_sequence_header_packet *pkt,
+		struct hal_session *session, struct vidc_seq_hdr *seq_hdr)
+{
+	int rc = 0;
+
+	if (!pkt || !session || !seq_hdr)
+		return -EINVAL;
+
+	pkt->size = sizeof(struct hfi_cmd_session_get_sequence_header_packet);
+	pkt->packet_type = HFI_CMD_SESSION_GET_SEQUENCE_HEADER;
+	pkt->session_id = hash32_ptr(session);
+	pkt->buffer_len = seq_hdr->seq_hdr_len;
+	if (!seq_hdr->seq_hdr)
+		return -EINVAL;
+	pkt->packet_buffer = (u32)seq_hdr->seq_hdr;
+	return rc;
+}
+
+int create_pkt_cmd_session_get_buf_req(
+		struct hfi_cmd_session_get_property_packet *pkt,
+		struct hal_session *session)
+{
+	int rc = 0;
+
+	if (!pkt || !session)
+		return -EINVAL;
+
+	pkt->size = sizeof(struct hfi_cmd_session_get_property_packet);
+	pkt->packet_type = HFI_CMD_SESSION_GET_PROPERTY;
+	pkt->session_id = hash32_ptr(session);
+	pkt->num_properties = 1;
+	pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS;
+
+	return rc;
+}
+
+int create_pkt_cmd_session_flush(struct hfi_cmd_session_flush_packet *pkt,
+			struct hal_session *session, enum hal_flush flush_mode)
+{
+	int rc = 0;
+	if (!pkt || !session)
+		return -EINVAL;
+
+	pkt->size = sizeof(struct hfi_cmd_session_flush_packet);
+	pkt->packet_type = HFI_CMD_SESSION_FLUSH;
+	pkt->session_id = hash32_ptr(session);
+	switch (flush_mode) {
+	case HAL_FLUSH_INPUT:
+		pkt->flush_type = HFI_FLUSH_INPUT;
+		break;
+	case HAL_FLUSH_OUTPUT:
+		pkt->flush_type = HFI_FLUSH_OUTPUT;
+		break;
+	case HAL_FLUSH_ALL:
+		pkt->flush_type = HFI_FLUSH_ALL;
+		break;
+	default:
+		dprintk(VIDC_ERR, "Invalid flush mode: %#x\n", flush_mode);
+		return -EINVAL;
+	}
+	return rc;
+}
+
+int create_pkt_cmd_session_get_property(
+		struct hfi_cmd_session_get_property_packet *pkt,
+		struct hal_session *session, enum hal_property ptype)
+{
+	int rc = 0;
+	if (!pkt || !session) {
+		dprintk(VIDC_ERR, "%s Invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+	pkt->size = sizeof(struct hfi_cmd_session_get_property_packet);
+	pkt->packet_type = HFI_CMD_SESSION_GET_PROPERTY;
+	pkt->session_id = hash32_ptr(session);
+	pkt->num_properties = 1;
+	switch (ptype) {
+	case HAL_PARAM_PROFILE_LEVEL_CURRENT:
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT;
+		break;
+	default:
+		dprintk(VIDC_ERR, "%s cmd:%#x not supported\n", __func__,
+			ptype);
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+int create_3x_pkt_cmd_session_get_property(
+		struct hfi_cmd_session_get_property_packet *pkt,
+		struct hal_session *session, enum hal_property ptype)
+{
+	int rc = 0;
+
+	if (!pkt || !session) {
+		dprintk(VIDC_ERR, "%s Invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+	pkt->size = sizeof(struct hfi_cmd_session_get_property_packet);
+	pkt->packet_type = HFI_CMD_SESSION_GET_PROPERTY;
+	pkt->session_id = hash32_ptr(session);
+	pkt->num_properties = 1;
+	switch (ptype) {
+	case HAL_CONFIG_VDEC_ENTROPY:
+		pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_VDEC_ENTROPY;
+		break;
+	default:
+		rc = create_pkt_cmd_session_get_property(pkt,
+				session, ptype);
+	}
+	return rc;
+}
+
+int create_pkt_cmd_session_set_property(
+		struct hfi_cmd_session_set_property_packet *pkt,
+		struct hal_session *session,
+		enum hal_property ptype, void *pdata)
+{
+	int rc = 0;
+	if (!pkt || !session)
+		return -EINVAL;
+
+	pkt->size = sizeof(struct hfi_cmd_session_set_property_packet);
+	pkt->packet_type = HFI_CMD_SESSION_SET_PROPERTY;
+	pkt->session_id = hash32_ptr(session);
+	pkt->num_properties = 1;
+
+	switch (ptype) {
+	case HAL_CONFIG_FRAME_RATE:
+	{
+		u32 buffer_type;
+		struct hfi_frame_rate *hfi;
+		struct hal_frame_rate *prop = (struct hal_frame_rate *) pdata;
+
+		pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_FRAME_RATE;
+		hfi = (struct hfi_frame_rate *) &pkt->rg_property_data[1];
+		buffer_type = get_hfi_buffer(prop->buffer_type);
+		if (buffer_type)
+			hfi->buffer_type = buffer_type;
+		else
+			return -EINVAL;
+
+		hfi->frame_rate = prop->frame_rate;
+		pkt->size += sizeof(u32) + sizeof(struct hfi_frame_rate);
+		break;
+	}
+	case HAL_PARAM_UNCOMPRESSED_FORMAT_SELECT:
+	{
+		u32 buffer_type;
+		struct hfi_uncompressed_format_select *hfi;
+		struct hal_uncompressed_format_select *prop =
+			(struct hal_uncompressed_format_select *) pdata;
+
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT;
+
+		hfi = (struct hfi_uncompressed_format_select *)
+					&pkt->rg_property_data[1];
+		buffer_type = get_hfi_buffer(prop->buffer_type);
+		if (buffer_type)
+			hfi->buffer_type = buffer_type;
+		else
+			return -EINVAL;
+		hfi->format = hal_to_hfi_type(
+				HAL_PARAM_UNCOMPRESSED_FORMAT_SELECT,
+				prop->format);
+		pkt->size += sizeof(u32) +
+			sizeof(struct hfi_uncompressed_format_select);
+		break;
+	}
+	case HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO:
+		break;
+	case HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO:
+		break;
+	case HAL_PARAM_EXTRA_DATA_HEADER_CONFIG:
+		break;
+	case HAL_PARAM_FRAME_SIZE:
+	{
+		struct hfi_frame_size *hfi;
+		struct hal_frame_size *prop = (struct hal_frame_size *) pdata;
+		u32 buffer_type;
+
+		pkt->rg_property_data[0] = HFI_PROPERTY_PARAM_FRAME_SIZE;
+		hfi = (struct hfi_frame_size *) &pkt->rg_property_data[1];
+		buffer_type = get_hfi_buffer(prop->buffer_type);
+		if (buffer_type)
+			hfi->buffer_type = buffer_type;
+		else
+			return -EINVAL;
+
+		hfi->height = prop->height;
+		hfi->width = prop->width;
+		pkt->size += sizeof(u32) + sizeof(struct hfi_frame_size);
+		break;
+	}
+	case HAL_CONFIG_REALTIME:
+	{
+		create_pkt_enable(pkt->rg_property_data,
+			HFI_PROPERTY_CONFIG_REALTIME,
+			(((struct hal_enable *) pdata)->enable));
+		pkt->size += sizeof(u32) * 2;
+		break;
+	}
+	case HAL_PARAM_BUFFER_COUNT_ACTUAL:
+	{
+		struct hfi_buffer_count_actual *hfi;
+		struct hal_buffer_count_actual *prop =
+			(struct hal_buffer_count_actual *) pdata;
+		u32 buffer_type;
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL;
+		hfi = (struct hfi_buffer_count_actual *)
+			&pkt->rg_property_data[1];
+		hfi->buffer_count_actual = prop->buffer_count_actual;
+
+		buffer_type = get_hfi_buffer(prop->buffer_type);
+		if (buffer_type)
+			hfi->buffer_type = buffer_type;
+		else
+			return -EINVAL;
+
+		pkt->size += sizeof(u32) + sizeof(struct
+				hfi_buffer_count_actual);
+
+		break;
+	}
+	case HAL_PARAM_NAL_STREAM_FORMAT_SELECT:
+	{
+		struct hfi_nal_stream_format_select *hfi;
+		struct hal_nal_stream_format_select *prop =
+			(struct hal_nal_stream_format_select *)pdata;
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT;
+		hfi = (struct hfi_nal_stream_format_select *)
+			&pkt->rg_property_data[1];
+		dprintk(VIDC_DBG, "data is :%d\n",
+				prop->nal_stream_format_select);
+		hfi->nal_stream_format_select = hal_to_hfi_type(
+				HAL_PARAM_NAL_STREAM_FORMAT_SELECT,
+				prop->nal_stream_format_select);
+		pkt->size += sizeof(u32) +
+			sizeof(struct hfi_nal_stream_format_select);
+		break;
+	}
+	case HAL_PARAM_VDEC_OUTPUT_ORDER:
+	{
+		int *data = (int *) pdata;
+		pkt->rg_property_data[0] =
+				HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER;
+		switch (*data) {
+		case HAL_OUTPUT_ORDER_DECODE:
+			pkt->rg_property_data[1] = HFI_OUTPUT_ORDER_DECODE;
+			break;
+		case HAL_OUTPUT_ORDER_DISPLAY:
+			pkt->rg_property_data[1] = HFI_OUTPUT_ORDER_DISPLAY;
+			break;
+		default:
+			dprintk(VIDC_ERR, "invalid output order: %#x\n",
+						  *data);
+			break;
+		}
+		pkt->size += sizeof(u32) * 2;
+		break;
+	}
+	case HAL_PARAM_VDEC_PICTURE_TYPE_DECODE:
+	{
+		struct hfi_enable_picture *hfi;
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VDEC_PICTURE_TYPE_DECODE;
+		hfi = (struct hfi_enable_picture *) &pkt->rg_property_data[1];
+		hfi->picture_type =
+			((struct hfi_enable_picture *)pdata)->picture_type;
+		pkt->size += sizeof(u32) * 2;
+		break;
+	}
+	case HAL_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO:
+	{
+		create_pkt_enable(pkt->rg_property_data,
+			HFI_PROPERTY_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO,
+			((struct hal_enable *)pdata)->enable);
+		pkt->size += sizeof(u32) * 2;
+		break;
+	}
+	case HAL_CONFIG_VDEC_POST_LOOP_DEBLOCKER:
+	{
+		create_pkt_enable(pkt->rg_property_data,
+			HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER,
+			((struct hal_enable *)pdata)->enable);
+		pkt->size += sizeof(u32) * 2;
+		break;
+	}
+	case HAL_PARAM_VDEC_MULTI_STREAM:
+	{
+		struct hfi_multi_stream *hfi;
+		struct hal_multi_stream *prop =
+			(struct hal_multi_stream *) pdata;
+		u32 buffer_type;
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM;
+		hfi = (struct hfi_multi_stream *) &pkt->rg_property_data[1];
+
+		buffer_type = get_hfi_buffer(prop->buffer_type);
+		if (buffer_type)
+			hfi->buffer_type = buffer_type;
+		else
+			return -EINVAL;
+		hfi->enable = prop->enable;
+		hfi->width = prop->width;
+		hfi->height = prop->height;
+		pkt->size += sizeof(u32) + sizeof(struct hfi_multi_stream);
+		break;
+	}
+	case HAL_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT:
+	{
+		struct hfi_display_picture_buffer_count *hfi;
+		struct hal_display_picture_buffer_count *prop =
+			(struct hal_display_picture_buffer_count *) pdata;
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT;
+		hfi = (struct hfi_display_picture_buffer_count *)
+			&pkt->rg_property_data[1];
+		hfi->count = prop->count;
+		hfi->enable = prop->enable;
+		pkt->size += sizeof(u32) +
+			sizeof(struct hfi_display_picture_buffer_count);
+		break;
+	}
+	case HAL_PARAM_DIVX_FORMAT:
+	{
+		int *data = pdata;
+		pkt->rg_property_data[0] = HFI_PROPERTY_PARAM_DIVX_FORMAT;
+		switch (*data) {
+		case HAL_DIVX_FORMAT_4:
+			pkt->rg_property_data[1] = HFI_DIVX_FORMAT_4;
+			break;
+		case HAL_DIVX_FORMAT_5:
+			pkt->rg_property_data[1] = HFI_DIVX_FORMAT_5;
+			break;
+		case HAL_DIVX_FORMAT_6:
+			pkt->rg_property_data[1] = HFI_DIVX_FORMAT_6;
+			break;
+		default:
+			dprintk(VIDC_ERR, "Invalid divx format: %#x\n", *data);
+			break;
+		}
+		pkt->size += sizeof(u32) * 2;
+		break;
+	}
+	case HAL_CONFIG_VDEC_MB_ERROR_MAP_REPORTING:
+	{
+		create_pkt_enable(pkt->rg_property_data,
+			HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP_REPORTING,
+			((struct hal_enable *)pdata)->enable);
+		pkt->size += sizeof(u32) * 2;
+		break;
+	}
+	case HAL_PARAM_VDEC_CONTINUE_DATA_TRANSFER:
+	{
+		create_pkt_enable(pkt->rg_property_data,
+			HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER,
+			((struct hal_enable *)pdata)->enable);
+		pkt->size += sizeof(u32) * 2;
+		break;
+	}
+	case HAL_PARAM_VDEC_SYNC_FRAME_DECODE:
+	{
+		create_pkt_enable(pkt->rg_property_data,
+			HFI_PROPERTY_PARAM_VDEC_THUMBNAIL_MODE,
+			((struct hal_enable *)pdata)->enable);
+		pkt->size += sizeof(u32) * 2;
+		break;
+	}
+	case HAL_PARAM_VENC_SYNC_FRAME_SEQUENCE_HEADER:
+	{
+		create_pkt_enable(pkt->rg_property_data,
+			HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER,
+			((struct hal_enable *)pdata)->enable);
+		pkt->size += sizeof(u32) * 2;
+		break;
+	}
+	case HAL_CONFIG_VENC_REQUEST_IFRAME:
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME;
+		pkt->size += sizeof(u32);
+		break;
+	case HAL_PARAM_VENC_MPEG4_SHORT_HEADER:
+		break;
+	case HAL_PARAM_VENC_MPEG4_AC_PREDICTION:
+		break;
+	case HAL_CONFIG_VENC_TARGET_BITRATE:
+	{
+		struct hfi_bitrate *hfi;
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE;
+		hfi = (struct hfi_bitrate *) &pkt->rg_property_data[1];
+		hfi->bit_rate = ((struct hal_bitrate *)pdata)->bit_rate;
+		hfi->layer_id = ((struct hal_bitrate *)pdata)->layer_id;
+		pkt->size += sizeof(u32) + sizeof(struct hfi_bitrate);
+		break;
+	}
+	case HAL_CONFIG_VENC_MAX_BITRATE:
+	{
+		struct hfi_bitrate *hfi;
+
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE;
+		hfi = (struct hfi_bitrate *) &pkt->rg_property_data[1];
+		hfi->bit_rate = ((struct hal_bitrate *)pdata)->bit_rate;
+		hfi->layer_id = ((struct hal_bitrate *)pdata)->layer_id;
+
+		pkt->size += sizeof(u32) + sizeof(struct hfi_bitrate);
+		break;
+	}
+	case HAL_PARAM_PROFILE_LEVEL_CURRENT:
+	{
+		struct hfi_profile_level *hfi;
+		struct hal_profile_level *prop =
+			(struct hal_profile_level *) pdata;
+
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT;
+		hfi = (struct hfi_profile_level *)
+			&pkt->rg_property_data[1];
+		hfi->level = prop->level;
+		hfi->profile = hal_to_hfi_type(HAL_PARAM_PROFILE_LEVEL_CURRENT,
+				prop->profile);
+		if (hfi->profile <= 0) {
+			hfi->profile = HFI_H264_PROFILE_HIGH;
+			dprintk(VIDC_WARN,
+					"Profile %d not supported, falling back to high\n",
+					prop->profile);
+		}
+
+		if (!hfi->level) {
+			hfi->level = 1;
+			dprintk(VIDC_WARN,
+					"Level %d not supported, falling back to high\n",
+					prop->level);
+		}
+
+		pkt->size += sizeof(u32) + sizeof(struct hfi_profile_level);
+		break;
+	}
+	case HAL_PARAM_VENC_H264_ENTROPY_CONTROL:
+	{
+		struct hfi_h264_entropy_control *hfi;
+		struct hal_h264_entropy_control *prop =
+			(struct hal_h264_entropy_control *) pdata;
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL;
+		hfi = (struct hfi_h264_entropy_control *)
+			&pkt->rg_property_data[1];
+		hfi->entropy_mode = hal_to_hfi_type(
+		   HAL_PARAM_VENC_H264_ENTROPY_CONTROL,
+		   prop->entropy_mode);
+		if (hfi->entropy_mode == HAL_H264_ENTROPY_CABAC)
+			hfi->cabac_model = hal_to_hfi_type(
+			   HAL_PARAM_VENC_H264_ENTROPY_CABAC_MODEL,
+			   prop->cabac_model);
+		pkt->size += sizeof(u32) + sizeof(
+			struct hfi_h264_entropy_control);
+		break;
+	}
+	case HAL_PARAM_VENC_RATE_CONTROL:
+	{
+		u32 *rc;
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VENC_RATE_CONTROL;
+		rc = (u32 *)pdata;
+		switch ((enum hal_rate_control) *rc) {
+		case HAL_RATE_CONTROL_OFF:
+			pkt->rg_property_data[1] = HFI_RATE_CONTROL_OFF;
+			break;
+		case HAL_RATE_CONTROL_CBR_CFR:
+			pkt->rg_property_data[1] = HFI_RATE_CONTROL_CBR_CFR;
+			break;
+		case HAL_RATE_CONTROL_CBR_VFR:
+			pkt->rg_property_data[1] = HFI_RATE_CONTROL_CBR_VFR;
+			break;
+		case HAL_RATE_CONTROL_VBR_CFR:
+			pkt->rg_property_data[1] = HFI_RATE_CONTROL_VBR_CFR;
+			break;
+		case HAL_RATE_CONTROL_VBR_VFR:
+			pkt->rg_property_data[1] = HFI_RATE_CONTROL_VBR_VFR;
+			break;
+		case HAL_RATE_CONTROL_MBR_CFR:
+			pkt->rg_property_data[1] = HFI_RATE_CONTROL_MBR_CFR;
+			break;
+		case HAL_RATE_CONTROL_MBR_VFR:
+			pkt->rg_property_data[1] = HFI_RATE_CONTROL_MBR_VFR;
+			break;
+		default:
+			dprintk(VIDC_ERR,
+					"Invalid Rate control setting: %pK\n",
+					pdata);
+			break;
+		}
+		pkt->size += sizeof(u32) * 2;
+		break;
+	}
+	case HAL_PARAM_VENC_MPEG4_TIME_RESOLUTION:
+	{
+		struct hfi_mpeg4_time_resolution *hfi;
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VENC_MPEG4_TIME_RESOLUTION;
+		hfi = (struct hfi_mpeg4_time_resolution *)
+			&pkt->rg_property_data[1];
+		hfi->time_increment_resolution =
+			((struct hal_mpeg4_time_resolution *)pdata)->
+					time_increment_resolution;
+		pkt->size += sizeof(u32) * 2;
+		break;
+	}
+	case HAL_PARAM_VENC_MPEG4_HEADER_EXTENSION:
+	{
+		struct hfi_mpeg4_header_extension *hfi;
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VENC_MPEG4_HEADER_EXTENSION;
+		hfi = (struct hfi_mpeg4_header_extension *)
+			&pkt->rg_property_data[1];
+		hfi->header_extension = (u32)(unsigned long) pdata;
+		pkt->size += sizeof(u32) * 2;
+		break;
+	}
+	case HAL_PARAM_VENC_H264_DEBLOCK_CONTROL:
+	{
+		struct hfi_h264_db_control *hfi;
+		struct hal_h264_db_control *prop =
+			(struct hal_h264_db_control *) pdata;
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL;
+		hfi = (struct hfi_h264_db_control *) &pkt->rg_property_data[1];
+		switch (prop->mode) {
+		case HAL_H264_DB_MODE_DISABLE:
+			hfi->mode = HFI_H264_DB_MODE_DISABLE;
+			break;
+		case HAL_H264_DB_MODE_SKIP_SLICE_BOUNDARY:
+			hfi->mode = HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY;
+			break;
+		case HAL_H264_DB_MODE_ALL_BOUNDARY:
+			hfi->mode = HFI_H264_DB_MODE_ALL_BOUNDARY;
+			break;
+		default:
+			dprintk(VIDC_ERR, "Invalid deblocking mode: %#x\n",
+						  prop->mode);
+			break;
+		}
+		hfi->slice_alpha_offset = prop->slice_alpha_offset;
+		hfi->slice_beta_offset = prop->slice_beta_offset;
+		pkt->size += sizeof(u32) +
+			sizeof(struct hfi_h264_db_control);
+		break;
+	}
+	case HAL_PARAM_VENC_SESSION_QP:
+	{
+		struct hfi_quantization *hfi;
+		struct hal_quantization *hal_quant =
+			(struct hal_quantization *) pdata;
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VENC_SESSION_QP;
+		hfi = (struct hfi_quantization *) &pkt->rg_property_data[1];
+		hfi->qp_i = hal_quant->qpi;
+		hfi->qp_p = hal_quant->qpp;
+		hfi->qp_b = hal_quant->qpb;
+		hfi->layer_id = hal_quant->layer_id;
+		pkt->size += sizeof(u32) + sizeof(struct hfi_quantization);
+		break;
+	}
+	case HAL_PARAM_VENC_SESSION_QP_RANGE:
+	{
+		struct hfi_quantization_range *hfi;
+		struct hfi_quantization_range *hal_range =
+			(struct hfi_quantization_range *) pdata;
+		u32 min_qp, max_qp;
+
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE;
+		hfi = (struct hfi_quantization_range *)
+				&pkt->rg_property_data[1];
+
+		min_qp = hal_range->min_qp;
+		max_qp = hal_range->max_qp;
+
+		/* We'll be packing in the qp, so make sure we
+		 * won't be losing data when masking */
+		if (min_qp > 0xff || max_qp > 0xff) {
+			dprintk(VIDC_ERR, "qp value out of range\n");
+			rc = -ERANGE;
+			break;
+		}
+
+		/* When creating the packet, pack the qp value as
+		 * 0xiippbb, where ii = qp range for I-frames,
+		 * pp = qp range for P-frames, etc. */
+		hfi->min_qp = min_qp | min_qp << 8 | min_qp << 16;
+		hfi->max_qp = max_qp | max_qp << 8 | max_qp << 16;
+		hfi->layer_id = hal_range->layer_id;
+
+		pkt->size += sizeof(u32) +
+			sizeof(struct hfi_quantization_range);
+		break;
+	}
+	case HAL_PARAM_VENC_SESSION_QP_RANGE_PACKED:
+	{
+		struct hfi_quantization_range *hfi;
+		struct hfi_quantization_range *hal_range =
+			(struct hfi_quantization_range *) pdata;
+
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE;
+		hfi = (struct hfi_quantization_range *)
+				&pkt->rg_property_data[1];
+
+		hfi->min_qp = hal_range->min_qp;
+		hfi->max_qp = hal_range->max_qp;
+		hfi->layer_id = hal_range->layer_id;
+
+		pkt->size += sizeof(u32) +
+			sizeof(struct hfi_quantization_range);
+		break;
+	}
+	case HAL_PARAM_VENC_SEARCH_RANGE:
+	{
+		struct hfi_vc1e_perf_cfg_type *hfi;
+		struct hal_vc1e_perf_cfg_type *hal_mv_searchrange =
+			(struct hal_vc1e_perf_cfg_type *) pdata;
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VENC_VC1_PERF_CFG;
+		hfi = (struct hfi_vc1e_perf_cfg_type *)
+				&pkt->rg_property_data[1];
+		hfi->search_range_x_subsampled[0] =
+			hal_mv_searchrange->i_frame.x_subsampled;
+		hfi->search_range_x_subsampled[1] =
+			hal_mv_searchrange->p_frame.x_subsampled;
+		hfi->search_range_x_subsampled[2] =
+			hal_mv_searchrange->b_frame.x_subsampled;
+		hfi->search_range_y_subsampled[0] =
+			hal_mv_searchrange->i_frame.y_subsampled;
+		hfi->search_range_y_subsampled[1] =
+			hal_mv_searchrange->p_frame.y_subsampled;
+		hfi->search_range_y_subsampled[2] =
+			hal_mv_searchrange->b_frame.y_subsampled;
+		pkt->size += sizeof(u32) +
+			sizeof(struct hfi_vc1e_perf_cfg_type);
+		break;
+	}
+	case HAL_PARAM_VENC_MAX_NUM_B_FRAMES:
+	{
+		struct hfi_max_num_b_frames *hfi;
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES;
+		hfi = (struct hfi_max_num_b_frames *) &pkt->rg_property_data[1];
+		memcpy(hfi, (struct hfi_max_num_b_frames *) pdata,
+				sizeof(struct hfi_max_num_b_frames));
+		pkt->size += sizeof(u32) + sizeof(struct hfi_max_num_b_frames);
+		break;
+	}
+	case HAL_CONFIG_VENC_INTRA_PERIOD:
+	{
+		struct hfi_intra_period *hfi;
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD;
+		hfi = (struct hfi_intra_period *) &pkt->rg_property_data[1];
+		memcpy(hfi, (struct hfi_intra_period *) pdata,
+				sizeof(struct hfi_intra_period));
+		pkt->size += sizeof(u32) + sizeof(struct hfi_intra_period);
+		break;
+	}
+	case HAL_CONFIG_VENC_IDR_PERIOD:
+	{
+		struct hfi_idr_period *hfi;
+		pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD;
+		hfi = (struct hfi_idr_period *) &pkt->rg_property_data[1];
+		hfi->idr_period = ((struct hfi_idr_period *) pdata)->idr_period;
+		pkt->size += sizeof(u32) * 2;
+		break;
+	}
+	case HAL_PARAM_VDEC_CONCEAL_COLOR:
+	{
+		struct hfi_conceal_color *hfi;
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR;
+		hfi = (struct hfi_conceal_color *) &pkt->rg_property_data[1];
+		if (hfi)
+			hfi->conceal_color =
+				((struct hfi_conceal_color *) pdata)->
+				conceal_color;
+		pkt->size += sizeof(u32) * 2;
+		break;
+	}
+	case HAL_CONFIG_VPE_OPERATIONS:
+	{
+		struct hfi_operations_type *hfi;
+		struct hal_operations *prop =
+			(struct hal_operations *) pdata;
+		pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_VPE_OPERATIONS;
+		hfi = (struct hfi_operations_type *) &pkt->rg_property_data[1];
+		switch (prop->rotate) {
+		case HAL_ROTATE_NONE:
+			hfi->rotation = HFI_ROTATE_NONE;
+			break;
+		case HAL_ROTATE_90:
+			hfi->rotation = HFI_ROTATE_90;
+			break;
+		case HAL_ROTATE_180:
+			hfi->rotation = HFI_ROTATE_180;
+			break;
+		case HAL_ROTATE_270:
+			hfi->rotation = HFI_ROTATE_270;
+			break;
+		default:
+			dprintk(VIDC_ERR, "Invalid rotation setting: %#x\n",
+				prop->rotate);
+			rc = -EINVAL;
+			break;
+		}
+		switch (prop->flip) {
+		case HAL_FLIP_NONE:
+			hfi->flip = HFI_FLIP_NONE;
+			break;
+		case HAL_FLIP_HORIZONTAL:
+			hfi->flip = HFI_FLIP_HORIZONTAL;
+			break;
+		case HAL_FLIP_VERTICAL:
+			hfi->flip = HFI_FLIP_VERTICAL;
+			break;
+		default:
+			dprintk(VIDC_ERR, "Invalid flip setting: %#x\n",
+				prop->flip);
+			rc = -EINVAL;
+			break;
+		}
+		pkt->size += sizeof(u32) + sizeof(struct hfi_operations_type);
+		break;
+	}
+	case HAL_PARAM_VENC_INTRA_REFRESH:
+	{
+		struct hfi_intra_refresh *hfi;
+		struct hal_intra_refresh *prop =
+			(struct hal_intra_refresh *) pdata;
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH;
+		hfi = (struct hfi_intra_refresh *) &pkt->rg_property_data[1];
+		switch (prop->mode) {
+		case HAL_INTRA_REFRESH_NONE:
+			hfi->mode = HFI_INTRA_REFRESH_NONE;
+			break;
+		case HAL_INTRA_REFRESH_ADAPTIVE:
+			hfi->mode = HFI_INTRA_REFRESH_ADAPTIVE;
+			break;
+		case HAL_INTRA_REFRESH_CYCLIC:
+			hfi->mode = HFI_INTRA_REFRESH_CYCLIC;
+			break;
+		case HAL_INTRA_REFRESH_CYCLIC_ADAPTIVE:
+			hfi->mode = HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE;
+			break;
+		case HAL_INTRA_REFRESH_RANDOM:
+			hfi->mode = HFI_INTRA_REFRESH_RANDOM;
+			break;
+		default:
+			dprintk(VIDC_ERR,
+					"Invalid intra refresh setting: %#x\n",
+					prop->mode);
+			break;
+		}
+		hfi->air_mbs = prop->air_mbs;
+		hfi->air_ref = prop->air_ref;
+		hfi->cir_mbs = prop->cir_mbs;
+		pkt->size += sizeof(u32) + sizeof(struct hfi_intra_refresh);
+		break;
+	}
+	case HAL_PARAM_VENC_MULTI_SLICE_CONTROL:
+	{
+		struct hfi_multi_slice_control *hfi;
+		struct hal_multi_slice_control *prop =
+			(struct hal_multi_slice_control *) pdata;
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL;
+		hfi = (struct hfi_multi_slice_control *)
+			&pkt->rg_property_data[1];
+		switch (prop->multi_slice) {
+		case HAL_MULTI_SLICE_OFF:
+			hfi->multi_slice = HFI_MULTI_SLICE_OFF;
+			break;
+		case HAL_MULTI_SLICE_GOB:
+			hfi->multi_slice = HFI_MULTI_SLICE_GOB;
+			break;
+		case HAL_MULTI_SLICE_BY_MB_COUNT:
+			hfi->multi_slice = HFI_MULTI_SLICE_BY_MB_COUNT;
+			break;
+		case HAL_MULTI_SLICE_BY_BYTE_COUNT:
+			hfi->multi_slice = HFI_MULTI_SLICE_BY_BYTE_COUNT;
+			break;
+		default:
+			dprintk(VIDC_ERR, "Invalid slice settings: %#x\n",
+				prop->multi_slice);
+			break;
+		}
+		hfi->slice_size = prop->slice_size;
+		pkt->size += sizeof(u32) + sizeof(struct
+					hfi_multi_slice_control);
+		break;
+	}
+	case HAL_PARAM_INDEX_EXTRADATA:
+	{
+		struct hfi_index_extradata_config *hfi;
+		struct hal_extradata_enable *extra = pdata;
+		int id = 0;
+		pkt->rg_property_data[0] =
+			get_hfi_extradata_index(extra->index);
+		hfi = (struct hfi_index_extradata_config *)
+			&pkt->rg_property_data[1];
+		hfi->enable = extra->enable;
+		id = get_hfi_extradata_id(extra->index);
+		if (id)
+			hfi->index_extra_data_id = id;
+		else {
+			dprintk(VIDC_WARN,
+				"Failed to find extradata id: %d\n",
+				id);
+			rc = -EINVAL;
+		}
+		pkt->size += sizeof(u32) +
+			sizeof(struct hfi_index_extradata_config);
+		break;
+	}
+	case HAL_PARAM_VENC_SLICE_DELIVERY_MODE:
+	{
+		create_pkt_enable(pkt->rg_property_data,
+				HFI_PROPERTY_PARAM_VENC_SLICE_DELIVERY_MODE,
+				((struct hal_enable *)pdata)->enable);
+		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+		break;
+	}
+	case HAL_PARAM_VENC_H264_VUI_TIMING_INFO:
+	{
+		struct hfi_h264_vui_timing_info *hfi;
+		struct hal_h264_vui_timing_info *timing_info = pdata;
+
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VENC_H264_VUI_TIMING_INFO;
+
+		hfi = (struct hfi_h264_vui_timing_info *)&pkt->
+			rg_property_data[1];
+		hfi->enable = timing_info->enable;
+		hfi->fixed_frame_rate = timing_info->fixed_frame_rate;
+		hfi->time_scale = timing_info->time_scale;
+
+		pkt->size += sizeof(u32) +
+			sizeof(struct hfi_h264_vui_timing_info);
+		break;
+	}
+	case HAL_CONFIG_VPE_DEINTERLACE:
+	{
+		create_pkt_enable(pkt->rg_property_data,
+				HFI_PROPERTY_CONFIG_VPE_DEINTERLACE,
+				((struct hal_enable *)pdata)->enable);
+		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+		break;
+	}
+	case HAL_PARAM_VENC_GENERATE_AUDNAL:
+	{
+		create_pkt_enable(pkt->rg_property_data,
+				HFI_PROPERTY_PARAM_VENC_GENERATE_AUDNAL,
+				((struct hal_enable *)pdata)->enable);
+		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+		break;
+	}
+	case HAL_PARAM_BUFFER_ALLOC_MODE:
+	{
+		u32 buffer_type;
+		u32 buffer_mode;
+		struct hfi_buffer_alloc_mode *hfi;
+		struct hal_buffer_alloc_mode *alloc_info = pdata;
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE;
+		hfi = (struct hfi_buffer_alloc_mode *)
+			&pkt->rg_property_data[1];
+		buffer_type = get_hfi_buffer(alloc_info->buffer_type);
+		if (buffer_type)
+			hfi->buffer_type = buffer_type;
+		else
+			return -EINVAL;
+		buffer_mode = get_hfi_buf_mode(alloc_info->buffer_mode);
+		if (buffer_mode)
+			hfi->buffer_mode = buffer_mode;
+		else
+			return -EINVAL;
+		pkt->size += sizeof(u32) + sizeof(struct hfi_buffer_alloc_mode);
+		break;
+	}
+	case HAL_PARAM_VDEC_FRAME_ASSEMBLY:
+	{
+		create_pkt_enable(pkt->rg_property_data,
+				HFI_PROPERTY_PARAM_VDEC_FRAME_ASSEMBLY,
+				((struct hal_enable *)pdata)->enable);
+		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+		break;
+	}
+	case HAL_PARAM_VENC_H264_VUI_BITSTREAM_RESTRC:
+	{
+		create_pkt_enable(pkt->rg_property_data,
+			HFI_PROPERTY_PARAM_VENC_H264_VUI_BITSTREAM_RESTRC,
+			((struct hal_enable *)pdata)->enable);
+		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+		break;
+	}
+	case HAL_PARAM_VENC_PRESERVE_TEXT_QUALITY:
+	{
+		create_pkt_enable(pkt->rg_property_data,
+				HFI_PROPERTY_PARAM_VENC_PRESERVE_TEXT_QUALITY,
+				((struct hal_enable *)pdata)->enable);
+		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+		break;
+	}
+	case HAL_PARAM_VDEC_SCS_THRESHOLD:
+	{
+		struct hfi_scs_threshold *hfi;
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VDEC_SCS_THRESHOLD;
+		hfi = (struct hfi_scs_threshold *) &pkt->rg_property_data[1];
+		hfi->threshold_value =
+			((struct hal_scs_threshold *) pdata)->threshold_value;
+		pkt->size += sizeof(u32) + sizeof(struct hfi_scs_threshold);
+		break;
+	}
+	case HAL_PARAM_MVC_BUFFER_LAYOUT:
+	{
+		struct hfi_mvc_buffer_layout_descp_type *hfi;
+		struct hal_mvc_buffer_layout *layout_info = pdata;
+		pkt->rg_property_data[0] = HFI_PROPERTY_PARAM_MVC_BUFFER_LAYOUT;
+		hfi = (struct hfi_mvc_buffer_layout_descp_type *)
+			&pkt->rg_property_data[1];
+		hfi->layout_type = get_hfi_layout(layout_info->layout_type);
+		hfi->bright_view_first = layout_info->bright_view_first;
+		hfi->ngap = layout_info->ngap;
+		pkt->size += sizeof(u32) +
+			sizeof(struct hfi_mvc_buffer_layout_descp_type);
+		break;
+	}
+	case HAL_PARAM_VENC_LTRMODE:
+	{
+		struct hfi_ltr_mode *hfi;
+		struct hal_ltr_mode *hal = pdata;
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VENC_LTRMODE;
+		hfi = (struct hfi_ltr_mode *) &pkt->rg_property_data[1];
+		hfi->ltr_mode = get_hfi_ltr_mode(hal->mode);
+		hfi->ltr_count = hal->count;
+		hfi->trust_mode = hal->trust_mode;
+		pkt->size += sizeof(u32) + sizeof(struct hfi_ltr_mode);
+		break;
+	}
+	case HAL_CONFIG_VENC_USELTRFRAME:
+	{
+		struct hfi_ltr_use *hfi;
+		struct hal_ltr_use *hal = pdata;
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_CONFIG_VENC_USELTRFRAME;
+		hfi = (struct hfi_ltr_use *) &pkt->rg_property_data[1];
+		hfi->frames = hal->frames;
+		hfi->ref_ltr = hal->ref_ltr;
+		hfi->use_constrnt = hal->use_constraint;
+		pkt->size += sizeof(u32) + sizeof(struct hfi_ltr_use);
+		break;
+	}
+	case HAL_CONFIG_VENC_MARKLTRFRAME:
+	{
+		struct hfi_ltr_mark *hfi;
+		struct hal_ltr_mark *hal = pdata;
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_CONFIG_VENC_MARKLTRFRAME;
+		hfi = (struct hfi_ltr_mark *) &pkt->rg_property_data[1];
+		hfi->mark_frame = hal->mark_frame;
+		pkt->size += sizeof(u32) + sizeof(struct hfi_ltr_mark);
+		break;
+	}
+	case HAL_PARAM_VENC_HIER_P_MAX_ENH_LAYERS:
+	{
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VENC_HIER_P_MAX_NUM_ENH_LAYER;
+		pkt->rg_property_data[1] = *(u32 *)pdata;
+		pkt->size += sizeof(u32) * 2;
+		break;
+	}
+	case HAL_CONFIG_VENC_HIER_P_NUM_FRAMES:
+	{
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_CONFIG_VENC_HIER_P_ENH_LAYER;
+		pkt->rg_property_data[1] = *(u32 *)pdata;
+		pkt->size += sizeof(u32) * 2;
+		break;
+	}
+	case HAL_PARAM_VENC_DISABLE_RC_TIMESTAMP:
+	{
+		create_pkt_enable(pkt->rg_property_data,
+				HFI_PROPERTY_PARAM_VENC_DISABLE_RC_TIMESTAMP,
+				((struct hal_enable *)pdata)->enable);
+		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+		break;
+	}
+	case HAL_PARAM_VENC_ENABLE_INITIAL_QP:
+	{
+		struct hfi_initial_quantization *hfi;
+		struct hal_initial_quantization *quant = pdata;
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VENC_INITIAL_QP;
+		hfi = (struct hfi_initial_quantization *)
+			&pkt->rg_property_data[1];
+		hfi->init_qp_enable = quant->init_qp_enable;
+		hfi->qp_i = quant->qpi;
+		hfi->qp_p = quant->qpp;
+		hfi->qp_b = quant->qpb;
+		pkt->size += sizeof(u32) +
+			sizeof(struct hfi_initial_quantization);
+		break;
+	}
+	case HAL_PARAM_VPE_COLOR_SPACE_CONVERSION:
+	{
+		struct hfi_vpe_color_space_conversion *hfi = NULL;
+		struct hal_vpe_color_space_conversion *hal = pdata;
+		pkt->rg_property_data[0] =
+				HFI_PROPERTY_PARAM_VPE_COLOR_SPACE_CONVERSION;
+		hfi = (struct hfi_vpe_color_space_conversion *)
+			&pkt->rg_property_data[1];
+		memcpy(hfi->csc_matrix, hal->csc_matrix,
+				sizeof(hfi->csc_matrix));
+		memcpy(hfi->csc_bias, hal->csc_bias, sizeof(hfi->csc_bias));
+		memcpy(hfi->csc_limit, hal->csc_limit, sizeof(hfi->csc_limit));
+		pkt->size += sizeof(u32) +
+				sizeof(struct hfi_vpe_color_space_conversion);
+		break;
+	}
+	case HAL_PARAM_VENC_VPX_ERROR_RESILIENCE_MODE:
+	{
+		create_pkt_enable(pkt->rg_property_data,
+			HFI_PROPERTY_PARAM_VENC_VPX_ERROR_RESILIENCE_MODE,
+			((struct hal_enable *)pdata)->enable);
+		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+		break;
+	}
+	case HAL_PARAM_VENC_H264_NAL_SVC_EXT:
+	{
+		create_pkt_enable(pkt->rg_property_data,
+			HFI_PROPERTY_PARAM_VENC_H264_NAL_SVC_EXT,
+			((struct hal_enable *)pdata)->enable);
+		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+		break;
+	}
+	case HAL_CONFIG_VENC_PERF_MODE:
+	{
+		u32 hfi_perf_mode = 0;
+		enum hal_perf_mode hal_perf_mode = *(enum hal_perf_mode *)pdata;
+
+		switch (hal_perf_mode) {
+		case HAL_PERF_MODE_POWER_SAVE:
+			hfi_perf_mode = HFI_VENC_PERFMODE_POWER_SAVE;
+			break;
+		case HAL_PERF_MODE_POWER_MAX_QUALITY:
+			hfi_perf_mode = HFI_VENC_PERFMODE_MAX_QUALITY;
+			break;
+		default:
+			return -ENOTSUPP;
+		}
+
+		pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_VENC_PERF_MODE;
+		pkt->rg_property_data[1] = hfi_perf_mode;
+		pkt->size += sizeof(u32) * 2;
+		break;
+	}
+	case HAL_PARAM_VENC_HIER_B_MAX_ENH_LAYERS:
+	{
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VENC_HIER_B_MAX_NUM_ENH_LAYER;
+		pkt->rg_property_data[1] = *(u32 *)pdata;
+		pkt->size += sizeof(u32) * 2;
+		break;
+	}
+	case HAL_PARAM_VDEC_NON_SECURE_OUTPUT2:
+	{
+		create_pkt_enable(pkt->rg_property_data,
+				HFI_PROPERTY_PARAM_VDEC_NONCP_OUTPUT2,
+				((struct hal_enable *)pdata)->enable);
+		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+		break;
+	}
+	case HAL_PARAM_VENC_HIER_P_HYBRID_MODE:
+	{
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VENC_HIER_P_HYBRID_MODE;
+		pkt->rg_property_data[1] =
+			((struct hfi_hybrid_hierp *)pdata)->layers;
+		pkt->size += sizeof(u32) +
+			sizeof(struct hfi_hybrid_hierp);
+		break;
+	}
+	case HAL_PARAM_VENC_MBI_STATISTICS_MODE:
+	{
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VENC_MBI_DUMPING;
+		pkt->rg_property_data[1] = hal_to_hfi_type(
+			HAL_PARAM_VENC_MBI_STATISTICS_MODE,
+				*(u32 *)pdata);
+		pkt->size += sizeof(u32) * 2;
+		break;
+	}
+	case HAL_CONFIG_VENC_FRAME_QP:
+	{
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_CONFIG_VENC_FRAME_QP;
+		pkt->rg_property_data[1] = *(u32 *)pdata;
+		pkt->size += sizeof(u32) * 2;
+		break;
+	}
+	case HAL_CONFIG_VENC_BASELAYER_PRIORITYID:
+	{
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_CONFIG_VENC_BASELAYER_PRIORITYID;
+		pkt->rg_property_data[1] = *(u32 *)pdata;
+		pkt->size += sizeof(u32) * 2;
+		break;
+	}
+	case HAL_PROPERTY_PARAM_VENC_ASPECT_RATIO:
+	{
+		struct hfi_aspect_ratio *hfi = NULL;
+		struct hal_aspect_ratio *hal = pdata;
+
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VENC_ASPECT_RATIO;
+		hfi = (struct hfi_aspect_ratio *)
+			&pkt->rg_property_data[1];
+		memcpy(hfi, hal,
+			sizeof(struct hfi_aspect_ratio));
+		pkt->size += sizeof(u32) +
+				sizeof(struct hfi_aspect_ratio);
+		break;
+	}
+	case HAL_PARAM_VENC_BITRATE_TYPE:
+	{
+		create_pkt_enable(pkt->rg_property_data,
+			HFI_PROPERTY_PARAM_VENC_BITRATE_TYPE,
+			((struct hal_enable *)pdata)->enable);
+		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+		break;
+	}
+	case HAL_PARAM_VENC_CONSTRAINED_INTRA_PRED:
+	{
+		create_pkt_enable(pkt->rg_property_data,
+			HFI_PROPERTY_PARAM_VENC_CONSTRAINED_INTRA_PRED,
+			((struct hal_enable *)pdata)->enable);
+		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+		break;
+	}
+	case HAL_PARAM_VENC_H264_TRANSFORM_8x8:
+	{
+		create_pkt_enable(pkt->rg_property_data,
+			HFI_PROPERTY_PARAM_VENC_H264_8X8_TRANSFORM,
+			((struct hal_enable *)pdata)->enable);
+		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+		break;
+	}
+	case HAL_PARAM_VENC_VIDEO_SIGNAL_INFO:
+	{
+		struct hal_video_signal_info *hal = pdata;
+		struct hfi_video_signal_metadata *signal_info =
+			(struct hfi_video_signal_metadata *)
+			&pkt->rg_property_data[1];
+
+		signal_info->enable = true;
+		signal_info->video_format = MSM_VIDC_NTSC;
+		signal_info->video_full_range = hal->full_range;
+		signal_info->color_description = MSM_VIDC_COLOR_DESC_PRESENT;
+		signal_info->color_primaries = hal->color_space;
+		signal_info->transfer_characteristics = hal->transfer_chars;
+		signal_info->matrix_coeffs = hal->matrix_coeffs;
+
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VENC_VIDEO_SIGNAL_INFO;
+		pkt->size += sizeof(u32) + sizeof(*signal_info);
+		break;
+	}
+	case HAL_PARAM_VENC_IFRAMESIZE_TYPE:
+	{
+		enum hal_iframesize_type hal =
+			*(enum hal_iframesize_type *)pdata;
+		struct hfi_iframe_size *hfi = (struct hfi_iframe_size *)
+			&pkt->rg_property_data[1];
+
+		switch (hal) {
+		case HAL_IFRAMESIZE_TYPE_DEFAULT:
+			hfi->type = HFI_IFRAME_SIZE_DEFAULT;
+			break;
+		case HAL_IFRAMESIZE_TYPE_MEDIUM:
+			hfi->type = HFI_IFRAME_SIZE_MEDIUM;
+			break;
+		case HAL_IFRAMESIZE_TYPE_HUGE:
+			hfi->type = HFI_IFRAME_SIZE_HIGH;
+			break;
+		case HAL_IFRAMESIZE_TYPE_UNLIMITED:
+			hfi->type = HFI_IFRAME_SIZE_UNLIMITED;
+			break;
+		default:
+			return -ENOTSUPP;
+		}
+		pkt->rg_property_data[0] = HFI_PROPERTY_PARAM_VENC_IFRAMESIZE;
+		pkt->size += sizeof(u32) + sizeof(struct hfi_iframe_size);
+		break;
+	}
+	case HAL_PARAM_VENC_SEND_OUTPUT_FOR_SKIPPED_FRAME:
+	{
+		create_pkt_enable(pkt->rg_property_data,
+			HFI_PROPERTY_PARAM_VENC_SEND_OUTPUT_FOR_SKIPPED_FRAMES,
+			((struct hal_enable *)pdata)->enable);
+		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+		break;
+	}
+
+	/* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
+	case HAL_CONFIG_BUFFER_REQUIREMENTS:
+	case HAL_CONFIG_PRIORITY:
+	case HAL_CONFIG_BATCH_INFO:
+	case HAL_PARAM_METADATA_PASS_THROUGH:
+	case HAL_SYS_IDLE_INDICATOR:
+	case HAL_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED:
+	case HAL_PARAM_INTERLACE_FORMAT_SUPPORTED:
+	case HAL_PARAM_CHROMA_SITE:
+	case HAL_PARAM_PROPERTIES_SUPPORTED:
+	case HAL_PARAM_PROFILE_LEVEL_SUPPORTED:
+	case HAL_PARAM_CAPABILITY_SUPPORTED:
+	case HAL_PARAM_NAL_STREAM_FORMAT_SUPPORTED:
+	case HAL_PARAM_MULTI_VIEW_FORMAT:
+	case HAL_PARAM_MAX_SEQUENCE_HEADER_SIZE:
+	case HAL_PARAM_CODEC_SUPPORTED:
+	case HAL_PARAM_VDEC_MULTI_VIEW_SELECT:
+	case HAL_PARAM_VDEC_MB_QUANTIZATION:
+	case HAL_PARAM_VDEC_NUM_CONCEALED_MB:
+	case HAL_PARAM_VDEC_H264_ENTROPY_SWITCHING:
+	case HAL_PARAM_VENC_MPEG4_DATA_PARTITIONING:
+	case HAL_CONFIG_BUFFER_COUNT_ACTUAL:
+	case HAL_CONFIG_VDEC_MULTI_STREAM:
+	case HAL_PARAM_VENC_MULTI_SLICE_INFO:
+	case HAL_CONFIG_VENC_TIMESTAMP_SCALE:
+	case HAL_PARAM_BUFFER_SIZE_MINIMUM:
+	default:
+		dprintk(VIDC_ERR, "DEFAULT: Calling %#x\n", ptype);
+		rc = -ENOTSUPP;
+		break;
+	}
+	return rc;
+}
+
+static int get_hfi_ssr_type(enum hal_ssr_trigger_type type)
+{
+	int rc = HFI_TEST_SSR_HW_WDOG_IRQ;
+	switch (type) {
+	case SSR_ERR_FATAL:
+		rc = HFI_TEST_SSR_SW_ERR_FATAL;
+		break;
+	case SSR_SW_DIV_BY_ZERO:
+		rc = HFI_TEST_SSR_SW_DIV_BY_ZERO;
+		break;
+	case SSR_HW_WDOG_IRQ:
+		rc = HFI_TEST_SSR_HW_WDOG_IRQ;
+		break;
+	default:
+		dprintk(VIDC_WARN,
+			"SSR trigger type not recognized, using WDOG.\n");
+	}
+	return rc;
+}
+
+int create_pkt_ssr_cmd(enum hal_ssr_trigger_type type,
+		struct hfi_cmd_sys_test_ssr_packet *pkt)
+{
+	if (!pkt) {
+		dprintk(VIDC_ERR, "Invalid params, device: %pK\n", pkt);
+		return -EINVAL;
+	}
+	pkt->size = sizeof(struct hfi_cmd_sys_test_ssr_packet);
+	pkt->packet_type = HFI_CMD_SYS_TEST_SSR;
+	pkt->trigger_type = get_hfi_ssr_type(type);
+	return 0;
+}
+
+int create_pkt_cmd_sys_image_version(
+		struct hfi_cmd_sys_get_property_packet *pkt)
+{
+	if (!pkt) {
+		dprintk(VIDC_ERR, "%s invalid param :%pK\n", __func__, pkt);
+		return -EINVAL;
+	}
+	pkt->size = sizeof(struct hfi_cmd_sys_get_property_packet);
+	pkt->packet_type = HFI_CMD_SYS_GET_PROPERTY;
+	pkt->num_properties = 1;
+	pkt->rg_property_data[0] = HFI_PROPERTY_SYS_IMAGE_VERSION;
+	return 0;
+}
+
+static int create_3x_pkt_cmd_session_set_property(
+		struct hfi_cmd_session_set_property_packet *pkt,
+		struct hal_session *session,
+		enum hal_property ptype, void *pdata)
+{
+	int rc = 0;
+
+	if (!pkt || !session || !pdata)
+		return -EINVAL;
+
+	pkt->size = sizeof(struct hfi_cmd_session_set_property_packet);
+	pkt->packet_type = HFI_CMD_SESSION_SET_PROPERTY;
+	pkt->session_id = hash32_ptr(session);
+	pkt->num_properties = 1;
+
+	/*
+	 * Any session set property which is different in 3XX packetization
+	 * should be added as a new case below. All unchanged session set
+	 * properties will be handled in the default case.
+	 */
+	switch (ptype) {
+	case HAL_PARAM_VDEC_MULTI_STREAM:
+	{
+		u32 buffer_type;
+		struct hfi_3x_multi_stream *hfi;
+		struct hal_multi_stream *prop =
+			(struct hal_multi_stream *) pdata;
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM;
+		hfi = (struct hfi_3x_multi_stream *) &pkt->rg_property_data[1];
+
+		buffer_type = get_hfi_buffer(prop->buffer_type);
+		if (buffer_type)
+			hfi->buffer_type = buffer_type;
+		else
+			return -EINVAL;
+		hfi->enable = prop->enable;
+		pkt->size += sizeof(u32) + sizeof(struct hfi_3x_multi_stream);
+		break;
+	}
+	case HAL_PARAM_VENC_INTRA_REFRESH:
+	{
+		struct hfi_3x_intra_refresh *hfi;
+		struct hal_intra_refresh *prop =
+			(struct hal_intra_refresh *) pdata;
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH;
+		hfi = (struct hfi_3x_intra_refresh *) &pkt->rg_property_data[1];
+		hfi->mbs = 0;
+		switch (prop->mode) {
+		case HAL_INTRA_REFRESH_NONE:
+			hfi->mode = HFI_INTRA_REFRESH_NONE;
+			break;
+		case HAL_INTRA_REFRESH_ADAPTIVE:
+			hfi->mode = HFI_INTRA_REFRESH_ADAPTIVE;
+			hfi->mbs = prop->air_mbs;
+			break;
+		case HAL_INTRA_REFRESH_CYCLIC:
+			hfi->mode = HFI_INTRA_REFRESH_CYCLIC;
+			hfi->mbs = prop->cir_mbs;
+			break;
+		case HAL_INTRA_REFRESH_CYCLIC_ADAPTIVE:
+			hfi->mode = HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE;
+			hfi->mbs = prop->air_mbs;
+			break;
+		case HAL_INTRA_REFRESH_RANDOM:
+			hfi->mode = HFI_INTRA_REFRESH_RANDOM;
+			hfi->mbs = prop->air_mbs;
+			break;
+		default:
+			dprintk(VIDC_ERR,
+				"Invalid intra refresh setting: %d\n",
+				prop->mode);
+			break;
+		}
+		pkt->size += sizeof(u32) + sizeof(struct hfi_3x_intra_refresh);
+		break;
+	}
+	case HAL_PARAM_SYNC_BASED_INTERRUPT:
+	{
+		create_pkt_enable(pkt->rg_property_data,
+				HFI_PROPERTY_PARAM_SYNC_BASED_INTERRUPT,
+				((struct hal_enable *)pdata)->enable);
+		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+		break;
+	}
+	case HAL_PARAM_VENC_VQZIP_SEI:
+	{
+		create_pkt_enable(pkt->rg_property_data,
+				HFI_PROPERTY_PARAM_VENC_VQZIP_SEI_TYPE,
+				((struct hal_enable *)pdata)->enable);
+		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+		break;
+	}
+	/* Deprecated param on Venus 3xx */
+	case HAL_PARAM_VDEC_CONTINUE_DATA_TRANSFER:
+	{
+		rc = -ENOTSUPP;
+		break;
+	}
+	case HAL_PARAM_BUFFER_SIZE_MINIMUM:
+	{
+		struct hfi_buffer_size_minimum *hfi;
+		struct hal_buffer_size_minimum *prop =
+			(struct hal_buffer_size_minimum *) pdata;
+		u32 buffer_type;
+
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_BUFFER_SIZE_MINIMUM;
+
+		hfi = (struct hfi_buffer_size_minimum *)
+			&pkt->rg_property_data[1];
+		hfi->buffer_size = prop->buffer_size;
+
+		buffer_type = get_hfi_buffer(prop->buffer_type);
+		if (buffer_type)
+			hfi->buffer_type = buffer_type;
+		else
+			return -EINVAL;
+
+		pkt->size += sizeof(u32) + sizeof(struct
+				hfi_buffer_count_actual);
+		break;
+	}
+	case HAL_PARAM_VENC_H264_PIC_ORDER_CNT:
+	{
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VENC_H264_PICORDER_CNT_TYPE;
+		pkt->rg_property_data[1] = *(u32 *)pdata;
+		pkt->size += sizeof(u32) * 2;
+		break;
+	}
+	case HAL_PARAM_VENC_LOW_LATENCY:
+	{
+		struct hfi_enable *hfi;
+
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VENC_LOW_LATENCY_MODE;
+		hfi = (struct hfi_enable *) &pkt->rg_property_data[1];
+		hfi->enable = ((struct hal_enable *) pdata)->enable;
+		pkt->size += sizeof(u32) * 2;
+		break;
+	}
+	case HAL_CONFIG_VENC_BLUR_RESOLUTION:
+	{
+		struct hfi_frame_size *hfi;
+		struct hal_frame_size *prop = (struct hal_frame_size *) pdata;
+		u32 buffer_type;
+
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_CONFIG_VENC_BLUR_FRAME_SIZE;
+		hfi = (struct hfi_frame_size *) &pkt->rg_property_data[1];
+		buffer_type = get_hfi_buffer(prop->buffer_type);
+		if (buffer_type)
+			hfi->buffer_type = buffer_type;
+		else
+			return -EINVAL;
+
+		hfi->height = prop->height;
+		hfi->width = prop->width;
+		pkt->size += sizeof(u32) + sizeof(struct hfi_frame_size);
+		break;
+	}
+	default:
+		rc = create_pkt_cmd_session_set_property(pkt,
+				session, ptype, pdata);
+	}
+	return rc;
+}
+
+int create_pkt_cmd_session_sync_process(
+		struct hfi_cmd_session_sync_process_packet *pkt,
+		struct hal_session *session)
+{
+	if (!pkt || !session)
+		return -EINVAL;
+
+	*pkt = (struct hfi_cmd_session_sync_process_packet) {0};
+	pkt->size = sizeof(*pkt);
+	pkt->packet_type = HFI_CMD_SESSION_SYNC;
+	pkt->session_id = hash32_ptr(session);
+	pkt->sync_id = 0;
+
+	return 0;
+}
+
+static struct hfi_packetization_ops hfi_default = {
+	.sys_init = create_pkt_cmd_sys_init,
+	.sys_pc_prep = create_pkt_cmd_sys_pc_prep,
+	.sys_idle_indicator = create_pkt_cmd_sys_idle_indicator,
+	.sys_power_control = create_pkt_cmd_sys_power_control,
+	.sys_set_resource = create_pkt_cmd_sys_set_resource,
+	.sys_debug_config = create_pkt_cmd_sys_debug_config,
+	.sys_coverage_config = create_pkt_cmd_sys_coverage_config,
+	.sys_release_resource = create_pkt_cmd_sys_release_resource,
+	.sys_ping = create_pkt_cmd_sys_ping,
+	.sys_image_version = create_pkt_cmd_sys_image_version,
+	.ssr_cmd = create_pkt_ssr_cmd,
+	.session_init = create_pkt_cmd_sys_session_init,
+	.session_cmd = create_pkt_cmd_session_cmd,
+	.session_set_buffers = create_pkt_cmd_session_set_buffers,
+	.session_release_buffers = create_pkt_cmd_session_release_buffers,
+	.session_etb_decoder = create_pkt_cmd_session_etb_decoder,
+	.session_etb_encoder = create_pkt_cmd_session_etb_encoder,
+	.session_ftb = create_pkt_cmd_session_ftb,
+	.session_parse_seq_header = create_pkt_cmd_session_parse_seq_header,
+	.session_get_seq_hdr = create_pkt_cmd_session_get_seq_hdr,
+	.session_get_buf_req = create_pkt_cmd_session_get_buf_req,
+	.session_flush = create_pkt_cmd_session_flush,
+	.session_get_property = create_pkt_cmd_session_get_property,
+	.session_set_property = create_pkt_cmd_session_set_property,
+};
+
+struct hfi_packetization_ops *get_venus_3x_ops(void)
+{
+	static struct hfi_packetization_ops hfi_venus_3x;
+
+	hfi_venus_3x = hfi_default;
+
+	/* Override new HFI functions for HFI_PACKETIZATION_3XX here. */
+	hfi_venus_3x.session_set_property =
+		create_3x_pkt_cmd_session_set_property;
+	hfi_venus_3x.session_get_property =
+		create_3x_pkt_cmd_session_get_property;
+	hfi_venus_3x.session_cmd = create_3x_pkt_cmd_session_cmd;
+	hfi_venus_3x.session_sync_process = create_pkt_cmd_session_sync_process;
+
+	return &hfi_venus_3x;
+}
+
+struct hfi_packetization_ops *hfi_get_pkt_ops_handle(
+			enum hfi_packetization_type type)
+{
+	dprintk(VIDC_DBG, "%s selected\n",
+		type == HFI_PACKETIZATION_LEGACY ? "legacy packetization" :
+		type == HFI_PACKETIZATION_3XX ? "3xx packetization" :
+		"Unknown hfi");
+
+	switch (type) {
+	case HFI_PACKETIZATION_LEGACY:
+		return &hfi_default;
+	case HFI_PACKETIZATION_3XX:
+		return get_venus_3x_ops();
+	}
+
+	return NULL;
+}
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/hfi_packetization.h linux-4.4.115-fbx/drivers/media/platform/msm/vidc/hfi_packetization.h
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/hfi_packetization.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/hfi_packetization.h	2019-01-22 16:16:24.463255100 +0100
@@ -0,0 +1,102 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __HFI_PACKETIZATION__
+#define __HFI_PACKETIZATION__
+
+#include <linux/types.h>
+#include "vidc_hfi_helper.h"
+#include "vidc_hfi.h"
+#include "vidc_hfi_api.h"
+
+#define call_hfi_pkt_op(q, op, args...)			\
+	(((q) && (q)->pkt_ops && (q)->pkt_ops->op) ?	\
+	((q)->pkt_ops->op(args)) : 0)
+
+enum hfi_packetization_type {
+	HFI_PACKETIZATION_LEGACY,
+	HFI_PACKETIZATION_3XX,
+};
+
+struct hfi_packetization_ops {
+	int (*sys_init)(struct hfi_cmd_sys_init_packet *pkt, u32 arch_type);
+	int (*sys_pc_prep)(struct hfi_cmd_sys_pc_prep_packet *pkt);
+	int (*sys_idle_indicator)(struct hfi_cmd_sys_set_property_packet *pkt,
+		u32 enable);
+	int (*sys_power_control)(struct hfi_cmd_sys_set_property_packet *pkt,
+		u32 enable);
+	int (*sys_set_resource)(
+		struct hfi_cmd_sys_set_resource_packet *pkt,
+		struct vidc_resource_hdr *resource_hdr,
+		void *resource_value);
+	int (*sys_debug_config)(struct hfi_cmd_sys_set_property_packet *pkt,
+			u32 mode);
+	int (*sys_coverage_config)(struct hfi_cmd_sys_set_property_packet *pkt,
+			u32 mode);
+	int (*sys_release_resource)(
+		struct hfi_cmd_sys_release_resource_packet *pkt,
+		struct vidc_resource_hdr *resource_hdr);
+	int (*sys_ping)(struct hfi_cmd_sys_ping_packet *pkt);
+	int (*sys_image_version)(struct hfi_cmd_sys_get_property_packet *pkt);
+	int (*ssr_cmd)(enum hal_ssr_trigger_type type,
+		struct hfi_cmd_sys_test_ssr_packet *pkt);
+	int (*session_init)(
+		struct hfi_cmd_sys_session_init_packet *pkt,
+		struct hal_session *session,
+		u32 session_domain, u32 session_codec);
+	int (*session_cmd)(struct vidc_hal_session_cmd_pkt *pkt,
+		int pkt_type, struct hal_session *session);
+	int (*session_set_buffers)(
+		struct hfi_cmd_session_set_buffers_packet *pkt,
+		struct hal_session *session,
+		struct vidc_buffer_addr_info *buffer_info);
+	int (*session_release_buffers)(
+		struct hfi_cmd_session_release_buffer_packet *pkt,
+		struct hal_session *session,
+		struct vidc_buffer_addr_info *buffer_info);
+	int (*session_etb_decoder)(
+		struct hfi_cmd_session_empty_buffer_compressed_packet *pkt,
+		struct hal_session *session,
+		struct vidc_frame_data *input_frame);
+	int (*session_etb_encoder)(
+		struct hfi_cmd_session_empty_buffer_uncompressed_plane0_packet
+		*pkt, struct hal_session *session,
+		struct vidc_frame_data *input_frame);
+	int (*session_ftb)(struct hfi_cmd_session_fill_buffer_packet *pkt,
+		struct hal_session *session,
+		struct vidc_frame_data *output_frame);
+	int (*session_parse_seq_header)(
+		struct hfi_cmd_session_parse_sequence_header_packet *pkt,
+		struct hal_session *session, struct vidc_seq_hdr *seq_hdr);
+	int (*session_get_seq_hdr)(
+		struct hfi_cmd_session_get_sequence_header_packet *pkt,
+		struct hal_session *session, struct vidc_seq_hdr *seq_hdr);
+	int (*session_get_buf_req)(
+		struct hfi_cmd_session_get_property_packet *pkt,
+		struct hal_session *session);
+	int (*session_flush)(struct hfi_cmd_session_flush_packet *pkt,
+		struct hal_session *session, enum hal_flush flush_mode);
+	int (*session_get_property)(
+		struct hfi_cmd_session_get_property_packet *pkt,
+		struct hal_session *session, enum hal_property ptype);
+	int (*session_set_property)(
+		struct hfi_cmd_session_set_property_packet *pkt,
+		struct hal_session *session,
+		enum hal_property ptype, void *pdata);
+	int (*session_sync_process)(
+		struct hfi_cmd_session_sync_process_packet *pkt,
+		struct hal_session *session);
+};
+
+struct hfi_packetization_ops *hfi_get_pkt_ops_handle(
+		enum hfi_packetization_type);
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/hfi_response_handler.c linux-4.4.115-fbx/drivers/media/platform/msm/vidc/hfi_response_handler.c
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/hfi_response_handler.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/hfi_response_handler.c	2019-10-29 09:26:23.953206212 +0100
@@ -0,0 +1,1959 @@
+/* Copyright (c) 2012-2016,2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/hash.h>
+#include <soc/qcom/smem.h>
+#include "vidc_hfi_helper.h"
+#include "vidc_hfi_io.h"
+#include "msm_vidc_debug.h"
+#include "vidc_hfi.h"
+
+static enum vidc_status hfi_parse_init_done_properties(
+		struct msm_vidc_capability *capability,
+		u32 num_sessions, u8 *data_ptr, u32 num_properties,
+		u32 rem_bytes, u32 codec, u32 domain);
+
+static enum vidc_status hfi_map_err_status(u32 hfi_err)
+{
+	enum vidc_status vidc_err;
+	switch (hfi_err) {
+	case HFI_ERR_NONE:
+	case HFI_ERR_SESSION_SAME_STATE_OPERATION:
+		vidc_err = VIDC_ERR_NONE;
+		break;
+	case HFI_ERR_SYS_FATAL:
+		vidc_err = VIDC_ERR_HW_FATAL;
+		break;
+	case HFI_ERR_SYS_VERSION_MISMATCH:
+	case HFI_ERR_SYS_INVALID_PARAMETER:
+	case HFI_ERR_SYS_SESSION_ID_OUT_OF_RANGE:
+	case HFI_ERR_SESSION_INVALID_PARAMETER:
+	case HFI_ERR_SESSION_INVALID_SESSION_ID:
+	case HFI_ERR_SESSION_INVALID_STREAM_ID:
+		vidc_err = VIDC_ERR_BAD_PARAM;
+		break;
+	case HFI_ERR_SYS_INSUFFICIENT_RESOURCES:
+	case HFI_ERR_SYS_UNSUPPORTED_DOMAIN:
+	case HFI_ERR_SYS_UNSUPPORTED_CODEC:
+	case HFI_ERR_SESSION_UNSUPPORTED_PROPERTY:
+	case HFI_ERR_SESSION_UNSUPPORTED_SETTING:
+	case HFI_ERR_SESSION_INSUFFICIENT_RESOURCES:
+	case HFI_ERR_SESSION_UNSUPPORTED_STREAM:
+		vidc_err = VIDC_ERR_NOT_SUPPORTED;
+		break;
+	case HFI_ERR_SYS_MAX_SESSIONS_REACHED:
+		vidc_err = VIDC_ERR_MAX_CLIENTS;
+		break;
+	case HFI_ERR_SYS_SESSION_IN_USE:
+		vidc_err = VIDC_ERR_CLIENT_PRESENT;
+		break;
+	case HFI_ERR_SESSION_FATAL:
+		vidc_err = VIDC_ERR_CLIENT_FATAL;
+		break;
+	case HFI_ERR_SESSION_BAD_POINTER:
+		vidc_err = VIDC_ERR_BAD_PARAM;
+		break;
+	case HFI_ERR_SESSION_INCORRECT_STATE_OPERATION:
+		vidc_err = VIDC_ERR_BAD_STATE;
+		break;
+	case HFI_ERR_SESSION_STREAM_CORRUPT:
+	case HFI_ERR_SESSION_STREAM_CORRUPT_OUTPUT_STALLED:
+		vidc_err = VIDC_ERR_BITSTREAM_ERR;
+		break;
+	case HFI_ERR_SESSION_SYNC_FRAME_NOT_DETECTED:
+		vidc_err = VIDC_ERR_IFRAME_EXPECTED;
+		break;
+	case HFI_ERR_SESSION_START_CODE_NOT_FOUND:
+		vidc_err = VIDC_ERR_START_CODE_NOT_FOUND;
+		break;
+	case HFI_ERR_SESSION_EMPTY_BUFFER_DONE_OUTPUT_PENDING:
+	default:
+		vidc_err = VIDC_ERR_FAIL;
+		break;
+	}
+	return vidc_err;
+}
+
+static enum msm_vidc_pixel_depth get_hal_pixel_depth(u32 hfi_bit_depth)
+{
+	switch (hfi_bit_depth) {
+	case HFI_BITDEPTH_8: return MSM_VIDC_BIT_DEPTH_8;
+	case HFI_BITDEPTH_9:
+	case HFI_BITDEPTH_10: return MSM_VIDC_BIT_DEPTH_10;
+	}
+	dprintk(VIDC_ERR, "Unsupported bit depth: %d\n", hfi_bit_depth);
+	return MSM_VIDC_BIT_DEPTH_UNSUPPORTED;
+}
+
+static int hfi_process_sess_evt_seq_changed(u32 device_id,
+		struct hfi_msg_event_notify_packet *pkt,
+		struct msm_vidc_cb_info *info)
+{
+	struct msm_vidc_cb_event event_notify = {0};
+	int num_properties_changed;
+	struct hfi_frame_size *frame_sz;
+	struct hfi_profile_level *profile_level;
+	struct hfi_bit_depth *pixel_depth;
+	struct hfi_pic_struct *pic_struct;
+	u8 *data_ptr;
+	int prop_id;
+	enum msm_vidc_pixel_depth luma_bit_depth, chroma_bit_depth;
+	struct hfi_colour_space *colour_info;
+
+	if (sizeof(struct hfi_msg_event_notify_packet) > pkt->size) {
+		dprintk(VIDC_ERR,
+				"hal_process_session_init_done: bad_pkt_size\n");
+		return -E2BIG;
+	}
+
+	event_notify.device_id = device_id;
+	event_notify.session_id = (void *)(uintptr_t)pkt->session_id;
+	event_notify.status = VIDC_ERR_NONE;
+	num_properties_changed = pkt->event_data2;
+	switch (pkt->event_data1) {
+	case HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUFFER_RESOURCES:
+		event_notify.hal_event_type =
+			HAL_EVENT_SEQ_CHANGED_SUFFICIENT_RESOURCES;
+		break;
+	case HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUFFER_RESOURCES:
+		event_notify.hal_event_type =
+			HAL_EVENT_SEQ_CHANGED_INSUFFICIENT_RESOURCES;
+		break;
+	default:
+		break;
+	}
+
+	if (num_properties_changed) {
+		data_ptr = (u8 *) &pkt->rg_ext_event_data[0];
+		do {
+			prop_id = (int) *((u32 *)data_ptr);
+			switch (prop_id) {
+			case HFI_PROPERTY_PARAM_FRAME_SIZE:
+				data_ptr = data_ptr + sizeof(u32);
+				frame_sz =
+					(struct hfi_frame_size *) data_ptr;
+				event_notify.width = frame_sz->width;
+				event_notify.height = frame_sz->height;
+				dprintk(VIDC_DBG, "height: %d width: %d\n",
+					frame_sz->height, frame_sz->width);
+				data_ptr +=
+					sizeof(struct hfi_frame_size);
+				break;
+			case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT:
+				data_ptr = data_ptr + sizeof(u32);
+				profile_level =
+					(struct hfi_profile_level *) data_ptr;
+				dprintk(VIDC_DBG, "profile: %d level: %d\n",
+					profile_level->profile,
+					profile_level->level);
+				data_ptr +=
+					sizeof(struct hfi_profile_level);
+				break;
+			case HFI_PROPERTY_PARAM_VDEC_PIXEL_BITDEPTH:
+				data_ptr = data_ptr + sizeof(u32);
+				pixel_depth = (struct hfi_bit_depth *) data_ptr;
+				/*
+				 * Luma and chroma can have different bitdepths.
+				 * Driver should rely on luma and chroma
+				 * bitdepth for determining output bitdepth
+				 * type.
+				 *
+				 * pixel_depth->bitdepth will include luma
+				 * bitdepth info in bits 0..15 and chroma
+				 * bitdept in bits 16..31.
+				 */
+				luma_bit_depth = get_hal_pixel_depth(
+					pixel_depth->bit_depth &
+					GENMASK(15, 0));
+				chroma_bit_depth = get_hal_pixel_depth(
+					(pixel_depth->bit_depth &
+					GENMASK(31, 16)) >> 16);
+				if (luma_bit_depth == MSM_VIDC_BIT_DEPTH_10 ||
+					chroma_bit_depth ==
+						MSM_VIDC_BIT_DEPTH_10)
+					event_notify.bit_depth =
+						MSM_VIDC_BIT_DEPTH_10;
+				else
+					event_notify.bit_depth = luma_bit_depth;
+				dprintk(VIDC_DBG,
+					"bitdepth(%d), luma_bit_depth(%d), chroma_bit_depth(%d)\n",
+					event_notify.bit_depth, luma_bit_depth,
+					chroma_bit_depth);
+				data_ptr += sizeof(struct hfi_bit_depth);
+				break;
+			case HFI_PROPERTY_PARAM_VDEC_PIC_STRUCT:
+				data_ptr = data_ptr + sizeof(u32);
+				pic_struct = (struct hfi_pic_struct *) data_ptr;
+				event_notify.pic_struct =
+					pic_struct->progressive_only;
+				dprintk(VIDC_DBG,
+					"Progressive only flag: %d\n",
+						pic_struct->progressive_only);
+				data_ptr +=
+					sizeof(struct hfi_pic_struct);
+				break;
+			case HFI_PROPERTY_PARAM_VDEC_COLOUR_SPACE:
+				data_ptr = data_ptr + sizeof(u32);
+				colour_info =
+					(struct hfi_colour_space *) data_ptr;
+				event_notify.colour_space =
+					colour_info->colour_space;
+				dprintk(VIDC_DBG,
+					"Colour space value is: %d\n",
+						colour_info->colour_space);
+				data_ptr +=
+					sizeof(struct hfi_colour_space);
+				break;
+			default:
+				dprintk(VIDC_ERR,
+					"%s cmd: %#x not supported\n",
+					__func__, prop_id);
+				break;
+			}
+			num_properties_changed--;
+		} while (num_properties_changed > 0);
+	}
+
+	*info = (struct msm_vidc_cb_info) {
+		.response_type =  HAL_SESSION_EVENT_CHANGE,
+		.response.event = event_notify,
+	};
+
+	return 0;
+}
+
+static int hfi_process_evt_release_buffer_ref(u32 device_id,
+		struct hfi_msg_event_notify_packet *pkt,
+		struct msm_vidc_cb_info *info)
+{
+	struct msm_vidc_cb_event event_notify = {0};
+	struct hfi_msg_release_buffer_ref_event_packet *data;
+
+	dprintk(VIDC_DBG,
+			"RECEIVED: EVENT_NOTIFY - release_buffer_reference\n");
+	if (sizeof(struct hfi_msg_event_notify_packet)
+		> pkt->size) {
+		dprintk(VIDC_ERR,
+				"hal_process_session_init_done: bad_pkt_size\n");
+		return -E2BIG;
+	}
+
+	data = (struct hfi_msg_release_buffer_ref_event_packet *)
+				pkt->rg_ext_event_data;
+
+	event_notify.device_id = device_id;
+	event_notify.session_id = (void *)(uintptr_t)pkt->session_id;
+	event_notify.status = VIDC_ERR_NONE;
+	event_notify.hal_event_type = HAL_EVENT_RELEASE_BUFFER_REFERENCE;
+	event_notify.packet_buffer = data->packet_buffer;
+	event_notify.extra_data_buffer = data->extra_data_buffer;
+
+	*info = (struct msm_vidc_cb_info) {
+		.response_type =  HAL_SESSION_EVENT_CHANGE,
+		.response.event = event_notify,
+	};
+
+	return 0;
+}
+
+static int hfi_process_sys_error(u32 device_id, struct msm_vidc_cb_info *info)
+{
+	struct msm_vidc_cb_cmd_done cmd_done = {0};
+	cmd_done.device_id = device_id;
+
+	*info = (struct msm_vidc_cb_info) {
+		.response_type =  HAL_SYS_ERROR,
+		.response.cmd = cmd_done,
+	};
+
+	return 0;
+}
+
+static int hfi_process_session_error(u32 device_id,
+		struct hfi_msg_event_notify_packet *pkt,
+		struct msm_vidc_cb_info *info)
+{
+	struct msm_vidc_cb_cmd_done cmd_done = {0};
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+	cmd_done.status = hfi_map_err_status(pkt->event_data1);
+	dprintk(VIDC_INFO, "Received: SESSION_ERROR with event id : %d\n",
+		pkt->event_data1);
+	switch (pkt->event_data1) {
+	case HFI_ERR_SESSION_INVALID_SCALE_FACTOR:
+	case HFI_ERR_SESSION_UNSUPPORT_BUFFERTYPE:
+	case HFI_ERR_SESSION_UNSUPPORTED_SETTING:
+	case HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED:
+		cmd_done.status = VIDC_ERR_NONE;
+		dprintk(VIDC_INFO, "Non Fatal: HFI_EVENT_SESSION_ERROR\n");
+		*info = (struct msm_vidc_cb_info) {
+			.response_type =  HAL_RESPONSE_UNUSED,
+			.response.cmd = cmd_done,
+		};
+		return 0;
+	default:
+		dprintk(VIDC_ERR, "HFI_EVENT_SESSION_ERROR\n");
+		*info = (struct msm_vidc_cb_info) {
+			.response_type =  HAL_SESSION_ERROR,
+			.response.cmd = cmd_done,
+		};
+		return 0;
+	}
+}
+
+static int hfi_process_event_notify(u32 device_id,
+		struct hfi_msg_event_notify_packet *pkt,
+		struct msm_vidc_cb_info *info)
+{
+	dprintk(VIDC_DBG, "Received: EVENT_NOTIFY\n");
+
+	if (pkt->size < sizeof(struct hfi_msg_event_notify_packet)) {
+		dprintk(VIDC_ERR, "Invalid Params\n");
+		return -E2BIG;
+	}
+
+	switch (pkt->event_id) {
+	case HFI_EVENT_SYS_ERROR:
+		dprintk(VIDC_ERR, "HFI_EVENT_SYS_ERROR: %d, %#x\n",
+			pkt->event_data1, pkt->event_data2);
+		return hfi_process_sys_error(device_id, info);
+	case HFI_EVENT_SESSION_ERROR:
+		dprintk(VIDC_INFO, "HFI_EVENT_SESSION_ERROR[%#x]\n",
+				pkt->session_id);
+		return hfi_process_session_error(device_id, pkt, info);
+
+	case HFI_EVENT_SESSION_SEQUENCE_CHANGED:
+		dprintk(VIDC_INFO, "HFI_EVENT_SESSION_SEQUENCE_CHANGED[%#x]\n",
+			pkt->session_id);
+		return hfi_process_sess_evt_seq_changed(device_id, pkt, info);
+
+	case HFI_EVENT_RELEASE_BUFFER_REFERENCE:
+		dprintk(VIDC_INFO, "HFI_EVENT_RELEASE_BUFFER_REFERENCE[%#x]\n",
+			pkt->session_id);
+		return hfi_process_evt_release_buffer_ref(device_id, pkt, info);
+
+	case HFI_EVENT_SESSION_PROPERTY_CHANGED:
+	default:
+		*info = (struct msm_vidc_cb_info) {
+			.response_type =  HAL_RESPONSE_UNUSED,
+		};
+
+		return 0;
+	}
+}
+
+static int hfi_process_sys_init_done(u32 device_id,
+		struct hfi_msg_sys_init_done_packet *pkt,
+		struct msm_vidc_cb_info *info)
+{
+	struct msm_vidc_cb_cmd_done cmd_done = {0};
+	enum vidc_status status = VIDC_ERR_NONE;
+
+	dprintk(VIDC_DBG, "RECEIVED: SYS_INIT_DONE\n");
+	if (sizeof(struct hfi_msg_sys_init_done_packet) > pkt->size) {
+		dprintk(VIDC_ERR, "%s: bad_pkt_size: %d\n", __func__,
+				pkt->size);
+		return -E2BIG;
+	}
+	if (!pkt->num_properties) {
+		dprintk(VIDC_ERR,
+				"hal_process_sys_init_done: no_properties\n");
+		status = VIDC_ERR_FAIL;
+		goto err_no_prop;
+	}
+
+	status = hfi_map_err_status(pkt->error_type);
+	if (status) {
+		dprintk(VIDC_ERR, "%s: status %#x\n",
+			__func__, status);
+		goto err_no_prop;
+	}
+
+err_no_prop:
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = NULL;
+	cmd_done.status = (u32)status;
+	cmd_done.size = sizeof(struct vidc_hal_sys_init_done);
+	*info = (struct msm_vidc_cb_info) {
+		.response_type =  HAL_SYS_INIT_DONE,
+		.response.cmd = cmd_done,
+	};
+	return 0;
+}
+
+static int hfi_process_sys_rel_resource_done(u32 device_id,
+		struct hfi_msg_sys_release_resource_done_packet *pkt,
+		struct msm_vidc_cb_info *info)
+{
+	struct msm_vidc_cb_cmd_done cmd_done = {0};
+	enum vidc_status status = VIDC_ERR_NONE;
+	u32 pkt_size;
+	dprintk(VIDC_DBG, "RECEIVED: SYS_RELEASE_RESOURCE_DONE\n");
+	pkt_size = sizeof(struct hfi_msg_sys_release_resource_done_packet);
+	if (pkt_size > pkt->size) {
+		dprintk(VIDC_ERR,
+			"hal_process_sys_rel_resource_done: bad size: %d\n",
+			pkt->size);
+		return -E2BIG;
+	}
+
+	status = hfi_map_err_status(pkt->error_type);
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = NULL;
+	cmd_done.status = (u32) status;
+	cmd_done.size = 0;
+
+	*info = (struct msm_vidc_cb_info) {
+		.response_type =  HAL_SYS_RELEASE_RESOURCE_DONE,
+		.response.cmd = cmd_done,
+	};
+
+	return 0;
+}
+
+enum hal_capability get_hal_cap_type(u32 capability_type)
+{
+	enum hal_capability hal_cap = 0;
+
+	switch (capability_type) {
+	case HFI_CAPABILITY_FRAME_WIDTH:
+		hal_cap = HAL_CAPABILITY_FRAME_WIDTH;
+		break;
+	case HFI_CAPABILITY_FRAME_HEIGHT:
+		hal_cap = HAL_CAPABILITY_FRAME_HEIGHT;
+		break;
+	case HFI_CAPABILITY_MBS_PER_FRAME:
+		hal_cap = HAL_CAPABILITY_MBS_PER_FRAME;
+		break;
+	case HFI_CAPABILITY_MBS_PER_SECOND:
+		hal_cap = HAL_CAPABILITY_MBS_PER_SECOND;
+		break;
+	case HFI_CAPABILITY_FRAMERATE:
+		hal_cap = HAL_CAPABILITY_FRAMERATE;
+		break;
+	case HFI_CAPABILITY_SCALE_X:
+		hal_cap = HAL_CAPABILITY_SCALE_X;
+		break;
+	case HFI_CAPABILITY_SCALE_Y:
+		hal_cap = HAL_CAPABILITY_SCALE_Y;
+		break;
+	case HFI_CAPABILITY_BITRATE:
+		hal_cap = HAL_CAPABILITY_BITRATE;
+		break;
+	case HFI_CAPABILITY_BFRAME:
+		hal_cap = HAL_CAPABILITY_BFRAME;
+		break;
+	case HFI_CAPABILITY_PEAKBITRATE:
+		hal_cap = HAL_CAPABILITY_PEAKBITRATE;
+		break;
+	case HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS:
+		hal_cap = HAL_CAPABILITY_HIER_P_NUM_ENH_LAYERS;
+		break;
+	case HFI_CAPABILITY_ENC_LTR_COUNT:
+		hal_cap = HAL_CAPABILITY_ENC_LTR_COUNT;
+		break;
+	case HFI_CAPABILITY_CP_OUTPUT2_THRESH:
+		hal_cap = HAL_CAPABILITY_SECURE_OUTPUT2_THRESHOLD;
+		break;
+	case HFI_CAPABILITY_HIER_B_NUM_ENH_LAYERS:
+		hal_cap = HAL_CAPABILITY_HIER_B_NUM_ENH_LAYERS;
+		break;
+	case HFI_CAPABILITY_LCU_SIZE:
+		hal_cap = HAL_CAPABILITY_LCU_SIZE;
+		break;
+	case HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS:
+		hal_cap = HAL_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS;
+		break;
+	case HFI_CAPABILITY_MBS_PER_SECOND_POWERSAVE:
+		hal_cap = HAL_CAPABILITY_MBS_PER_SECOND_POWER_SAVE;
+		break;
+	default:
+		dprintk(VIDC_DBG, "%s: unknown capablity %#x\n",
+			__func__, capability_type);
+		break;
+	}
+
+	return hal_cap;
+}
+
+static inline void copy_cap_prop(
+		struct hfi_capability_supported *in,
+		struct msm_vidc_capability *capability)
+{
+	struct hal_capability_supported *out = NULL;
+
+	if (!in || !capability) {
+		dprintk(VIDC_ERR, "%s Invalid input parameters\n",
+			__func__);
+		return;
+	}
+
+	switch (in->capability_type) {
+	case HFI_CAPABILITY_FRAME_WIDTH:
+		out = &capability->width;
+		break;
+	case HFI_CAPABILITY_FRAME_HEIGHT:
+		out = &capability->height;
+		break;
+	case HFI_CAPABILITY_MBS_PER_FRAME:
+		out = &capability->mbs_per_frame;
+		break;
+	case HFI_CAPABILITY_MBS_PER_SECOND:
+		out = &capability->mbs_per_sec;
+		break;
+	case HFI_CAPABILITY_FRAMERATE:
+		out = &capability->frame_rate;
+		break;
+	case HFI_CAPABILITY_SCALE_X:
+		out = &capability->scale_x;
+		break;
+	case HFI_CAPABILITY_SCALE_Y:
+		out = &capability->scale_y;
+		break;
+	case HFI_CAPABILITY_BITRATE:
+		out = &capability->bitrate;
+		break;
+	case HFI_CAPABILITY_BFRAME:
+		out = &capability->bframe;
+		break;
+	case HFI_CAPABILITY_PEAKBITRATE:
+		out = &capability->peakbitrate;
+		break;
+	case HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS:
+		out = &capability->hier_p;
+		break;
+	case HFI_CAPABILITY_ENC_LTR_COUNT:
+		out = &capability->ltr_count;
+		break;
+	case HFI_CAPABILITY_CP_OUTPUT2_THRESH:
+		out = &capability->secure_output2_threshold;
+		break;
+	case HFI_CAPABILITY_HIER_B_NUM_ENH_LAYERS:
+		out = &capability->hier_b;
+		break;
+	case HFI_CAPABILITY_LCU_SIZE:
+		out = &capability->lcu_size;
+		break;
+	case HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS:
+		out = &capability->hier_p_hybrid;
+		break;
+	case HFI_CAPABILITY_MBS_PER_SECOND_POWERSAVE:
+		out = &capability->mbs_per_sec_power_save;
+		break;
+	default:
+		dprintk(VIDC_DBG, "%s: unknown capablity %#x\n",
+			__func__, in->capability_type);
+		break;
+	}
+
+	if (out) {
+		out->capability_type = get_hal_cap_type(in->capability_type);
+		out->min = in->min;
+		out->max = in->max;
+		out->step_size = in->step_size;
+	}
+
+	return;
+}
+
+static int hfi_fill_codec_info(u8 *data_ptr,
+		struct vidc_hal_sys_init_done *sys_init_done) {
+	u32 i;
+	u32 codecs = 0, codec_count = 0, size = 0;
+	struct msm_vidc_capability *capability;
+	u32 prop_id = *((u32 *)data_ptr);
+	u8 *orig_data_ptr = data_ptr;
+
+	if (prop_id ==  HFI_PROPERTY_PARAM_CODEC_SUPPORTED) {
+		struct hfi_codec_supported *prop;
+
+		data_ptr = data_ptr + sizeof(u32);
+		prop = (struct hfi_codec_supported *) data_ptr;
+		sys_init_done->dec_codec_supported =
+			prop->decoder_codec_supported;
+		sys_init_done->enc_codec_supported =
+			prop->encoder_codec_supported;
+		size = sizeof(struct hfi_codec_supported) + sizeof(u32);
+	} else {
+		dprintk(VIDC_WARN,
+			"%s: prop_id %#x, expected codec_supported property\n",
+			__func__, prop_id);
+	}
+
+	codecs = sys_init_done->dec_codec_supported;
+	for (i = 0; i < 8 * sizeof(codecs); i++) {
+		if ((1 << i) & codecs) {
+			capability =
+				&sys_init_done->capabilities[codec_count++];
+			capability->codec =
+				vidc_get_hal_codec((1 << i) & codecs);
+			capability->domain =
+				vidc_get_hal_domain(HFI_VIDEO_DOMAIN_DECODER);
+			if (codec_count == VIDC_MAX_DECODE_SESSIONS) {
+				dprintk(VIDC_ERR,
+					"Max supported decoder sessions reached");
+				break;
+			}
+		}
+	}
+	codecs = sys_init_done->enc_codec_supported;
+	for (i = 0; i < 8 * sizeof(codecs); i++) {
+		if ((1 << i) & codecs) {
+			capability =
+				&sys_init_done->capabilities[codec_count++];
+			capability->codec =
+				vidc_get_hal_codec((1 << i) & codecs);
+			capability->domain =
+				vidc_get_hal_domain(HFI_VIDEO_DOMAIN_ENCODER);
+			if (codec_count == VIDC_MAX_SESSIONS) {
+				dprintk(VIDC_ERR,
+					"Max supported sessions reached");
+				break;
+			}
+		}
+	}
+	sys_init_done->codec_count = codec_count;
+
+	prop_id = *((u32 *)(orig_data_ptr + size));
+	if (prop_id == HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED) {
+		struct hfi_max_sessions_supported *prop =
+			(struct hfi_max_sessions_supported *)
+			(orig_data_ptr + size + sizeof(u32));
+
+		sys_init_done->max_sessions_supported = prop->max_sessions;
+		size += sizeof(struct hfi_max_sessions_supported) + sizeof(u32);
+		dprintk(VIDC_DBG, "max_sessions_supported %d\n",
+				prop->max_sessions);
+	}
+	return size;
+}
+
+enum vidc_status hfi_process_session_init_done_prop_read(
+		struct hfi_msg_sys_session_init_done_packet *pkt,
+		struct vidc_hal_session_init_done *session_init_done)
+{
+	enum vidc_status status = VIDC_ERR_NONE;
+	struct msm_vidc_capability *capability = NULL;
+	u32 rem_bytes, num_properties;
+	u8 *data_ptr;
+
+	rem_bytes = pkt->size - sizeof(struct
+			hfi_msg_sys_session_init_done_packet) + sizeof(u32);
+	if (!rem_bytes) {
+		dprintk(VIDC_ERR, "%s: invalid property info\n", __func__);
+		return VIDC_ERR_FAIL;
+	}
+
+	status = hfi_map_err_status(pkt->error_type);
+	if (status) {
+		dprintk(VIDC_ERR, "%s: error status 0x%x\n", __func__, status);
+		return status;
+	}
+
+	data_ptr = (u8 *)&pkt->rg_property_data[0];
+	num_properties = pkt->num_properties;
+
+	capability = &session_init_done->capability;
+	status = hfi_parse_init_done_properties(
+			capability, 1, data_ptr, num_properties, rem_bytes,
+			vidc_get_hfi_codec(capability->codec),
+			vidc_get_hfi_domain(capability->domain));
+	if (status) {
+		dprintk(VIDC_ERR, "%s: parse status 0x%x\n", __func__, status);
+		return status;
+	}
+
+	return status;
+}
+
+static int copy_caps_to_sessions(struct hfi_capability_supported *cap,
+		u32 num_caps, struct msm_vidc_capability *capabilities,
+		u32 num_sessions, u32 codecs, u32 domain)
+{
+	u32 i = 0, j = 0;
+	struct msm_vidc_capability *capability;
+	u32 sess_codec;
+	u32 sess_domain;
+
+	/*
+	 * iterate over num_sessions and copy all the capabilities
+	 * to matching sessions.
+	 */
+	for (i = 0; i < num_sessions; i++) {
+		sess_codec = 0;
+		sess_domain = 0;
+		capability = &capabilities[i];
+
+		if (capability->codec)
+			sess_codec =
+				vidc_get_hfi_codec(capability->codec);
+		if (capability->domain)
+			sess_domain =
+				vidc_get_hfi_domain(capability->domain);
+
+		if (!(sess_codec & codecs && sess_domain & domain))
+			continue;
+
+		for (j = 0; j < num_caps; j++)
+			copy_cap_prop(&cap[j], capability);
+	}
+
+	return 0;
+}
+
+static int copy_alloc_mode_to_sessions(
+		struct hfi_buffer_alloc_mode_supported *prop,
+		struct msm_vidc_capability *capabilities,
+		u32 num_sessions, u32 codecs, u32 domain)
+{
+	u32 i = 0, j = 0;
+	struct msm_vidc_capability *capability;
+	u32 sess_codec;
+	u32 sess_domain;
+
+	/*
+	 * iterate over num_sessions and copy all the entries
+	 * to matching sessions.
+	 */
+	for (i = 0; i < num_sessions; i++) {
+		sess_codec = 0;
+		sess_domain = 0;
+		capability = &capabilities[i];
+
+		if (capability->codec)
+			sess_codec =
+				vidc_get_hfi_codec(capability->codec);
+		if (capability->domain)
+			sess_domain =
+				vidc_get_hfi_domain(capability->domain);
+
+		if (!(sess_codec & codecs && sess_domain & domain))
+			continue;
+
+		for (j = 0; j < prop->num_entries; j++) {
+			if (prop->buffer_type == HFI_BUFFER_OUTPUT ||
+				prop->buffer_type == HFI_BUFFER_OUTPUT2) {
+				switch (prop->rg_data[j]) {
+				case HFI_BUFFER_MODE_STATIC:
+					capability->alloc_mode_out |=
+						HAL_BUFFER_MODE_STATIC;
+					break;
+				case HFI_BUFFER_MODE_RING:
+					capability->alloc_mode_out |=
+						HAL_BUFFER_MODE_RING;
+					break;
+				case HFI_BUFFER_MODE_DYNAMIC:
+					capability->alloc_mode_out |=
+						HAL_BUFFER_MODE_DYNAMIC;
+					break;
+				}
+			} else if (prop->buffer_type == HFI_BUFFER_INPUT) {
+				switch (prop->rg_data[j]) {
+				case HFI_BUFFER_MODE_STATIC:
+					capability->alloc_mode_in |=
+						HAL_BUFFER_MODE_STATIC;
+					break;
+				case HFI_BUFFER_MODE_RING:
+					capability->alloc_mode_in |=
+						HAL_BUFFER_MODE_RING;
+					break;
+				case HFI_BUFFER_MODE_DYNAMIC:
+					capability->alloc_mode_in |=
+						HAL_BUFFER_MODE_DYNAMIC;
+					break;
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+static enum vidc_status hfi_parse_init_done_properties(
+		struct msm_vidc_capability *capabilities,
+		u32 num_sessions, u8 *data_ptr, u32 num_properties,
+		u32 rem_bytes, u32 codecs, u32 domain)
+{
+	enum vidc_status status = VIDC_ERR_NONE;
+	u32 prop_id, next_offset;
+
+	while (status == VIDC_ERR_NONE && num_properties &&
+			rem_bytes >= sizeof(u32)) {
+
+		prop_id = *((u32 *)data_ptr);
+		next_offset = sizeof(u32);
+
+		switch (prop_id) {
+		case HFI_PROPERTY_PARAM_CODEC_MASK_SUPPORTED:
+		{
+			struct hfi_codec_mask_supported *prop =
+				(struct hfi_codec_mask_supported *)
+				(data_ptr + next_offset);
+
+			codecs = prop->codecs;
+			domain = prop->video_domains;
+			next_offset += sizeof(struct hfi_codec_mask_supported);
+			num_properties--;
+			break;
+		}
+		case HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED:
+		{
+			struct hfi_capability_supported_info *prop =
+				(struct hfi_capability_supported_info *)
+				(data_ptr + next_offset);
+
+			if ((rem_bytes - next_offset) < prop->num_capabilities *
+				sizeof(struct hfi_capability_supported)) {
+				status = VIDC_ERR_BAD_PARAM;
+				break;
+			}
+			next_offset += sizeof(u32) +
+				prop->num_capabilities *
+				sizeof(struct hfi_capability_supported);
+
+			copy_caps_to_sessions(&prop->rg_data[0],
+					prop->num_capabilities,
+					capabilities, num_sessions,
+					codecs, domain);
+			num_properties--;
+			break;
+		}
+		case HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED:
+		{
+			struct hfi_uncompressed_format_supported *prop =
+				(struct hfi_uncompressed_format_supported *)
+				(data_ptr + next_offset);
+			u32 num_format_entries;
+			char *fmt_ptr;
+			struct hfi_uncompressed_plane_info *plane_info;
+
+			if ((rem_bytes - next_offset) < sizeof(*prop)) {
+				status = VIDC_ERR_BAD_PARAM;
+				break;
+			}
+			num_format_entries = prop->format_entries;
+			next_offset = sizeof(*prop);
+			fmt_ptr = (char *)&prop->rg_format_info[0];
+
+			while (num_format_entries) {
+				u32 bytes_to_skip;
+				plane_info =
+				(struct hfi_uncompressed_plane_info *) fmt_ptr;
+
+				if ((rem_bytes - next_offset) <
+						sizeof(*plane_info)) {
+					status = VIDC_ERR_BAD_PARAM;
+					break;
+				}
+				bytes_to_skip = sizeof(*plane_info) -
+					sizeof(struct
+					hfi_uncompressed_plane_constraints) +
+					plane_info->num_planes *
+					sizeof(struct
+					hfi_uncompressed_plane_constraints);
+
+				fmt_ptr += bytes_to_skip;
+				next_offset += bytes_to_skip;
+				num_format_entries--;
+			}
+			num_properties--;
+			break;
+		}
+		case HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED:
+		{
+			struct hfi_properties_supported *prop =
+				(struct hfi_properties_supported *)
+				(data_ptr + next_offset);
+			next_offset += sizeof(*prop) - sizeof(u32)
+				+ prop->num_properties * sizeof(u32);
+			num_properties--;
+			break;
+		}
+		case HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED:
+		{
+			struct msm_vidc_capability capability;
+			char *ptr = NULL;
+			u32 count = 0;
+			u32 prof_count = 0;
+			struct hfi_profile_level *prof_level;
+			struct hfi_profile_level_supported *prop =
+				(struct hfi_profile_level_supported *)
+				(data_ptr + next_offset);
+
+			ptr = (char *) &prop->rg_profile_level[0];
+			prof_count = prop->profile_count;
+			next_offset += sizeof(u32);
+
+			if (prof_count > MAX_PROFILE_COUNT) {
+				prof_count = MAX_PROFILE_COUNT;
+				dprintk(VIDC_WARN,
+					"prop count exceeds max profile count\n");
+				break;
+			}
+			while (prof_count) {
+				prof_level = (struct hfi_profile_level *)ptr;
+				capability.
+				profile_level.profile_level[count].profile
+					= prof_level->profile;
+				capability.
+				profile_level.profile_level[count].level
+					= prof_level->level;
+				prof_count--;
+				count++;
+				ptr += sizeof(struct hfi_profile_level);
+				next_offset += sizeof(struct hfi_profile_level);
+			}
+			num_properties--;
+			break;
+		}
+		case HFI_PROPERTY_PARAM_INTERLACE_FORMAT_SUPPORTED:
+		{
+			next_offset +=
+				sizeof(struct hfi_interlace_format_supported);
+			num_properties--;
+			break;
+		}
+		case HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SUPPORTED:
+		{
+			next_offset +=
+				sizeof(struct hfi_nal_stream_format_supported);
+			num_properties--;
+			break;
+		}
+		case HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT:
+		{
+			next_offset += sizeof(u32);
+			num_properties--;
+			break;
+		}
+		case HFI_PROPERTY_PARAM_MAX_SEQUENCE_HEADER_SIZE:
+		{
+			next_offset += sizeof(u32);
+			num_properties--;
+			break;
+		}
+		case HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH:
+		{
+			next_offset +=
+				sizeof(struct hfi_intra_refresh);
+			num_properties--;
+			break;
+		}
+		case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE_SUPPORTED:
+		{
+			struct hfi_buffer_alloc_mode_supported *prop =
+				(struct hfi_buffer_alloc_mode_supported *)
+				(data_ptr + next_offset);
+
+			if (prop->num_entries >= 32) {
+				dprintk(VIDC_ERR,
+					"%s - num_entries: %d from f/w seems suspect\n",
+					__func__, prop->num_entries);
+				break;
+			}
+			next_offset +=
+				sizeof(struct hfi_buffer_alloc_mode_supported) -
+				sizeof(u32) + prop->num_entries * sizeof(u32);
+
+			copy_alloc_mode_to_sessions(prop,
+					capabilities, num_sessions,
+					codecs, domain);
+
+			num_properties--;
+			break;
+		}
+		default:
+			dprintk(VIDC_DBG,
+				"%s: default case - data_ptr %pK, prop_id 0x%x\n",
+				__func__, data_ptr, prop_id);
+			break;
+		}
+		rem_bytes -= next_offset;
+		data_ptr += next_offset;
+	}
+
+	return status;
+}
+
+enum vidc_status hfi_process_sys_init_done_prop_read(
+	struct hfi_msg_sys_init_done_packet *pkt,
+	struct vidc_hal_sys_init_done *sys_init_done)
+{
+	enum vidc_status status = VIDC_ERR_NONE;
+	u32 rem_bytes, bytes_read, num_properties;
+	u8 *data_ptr;
+	u32 codecs = 0, domain = 0;
+
+	if (!pkt || !sys_init_done) {
+		dprintk(VIDC_ERR,
+			"hfi_msg_sys_init_done: Invalid input\n");
+		return VIDC_ERR_FAIL;
+	}
+
+	rem_bytes = pkt->size - sizeof(struct
+			hfi_msg_sys_init_done_packet) + sizeof(u32);
+
+	if (!rem_bytes) {
+		dprintk(VIDC_ERR,
+			"hfi_msg_sys_init_done: missing_prop_info\n");
+		return VIDC_ERR_FAIL;
+	}
+
+	status = hfi_map_err_status(pkt->error_type);
+	if (status) {
+		dprintk(VIDC_ERR, "%s: status %#x\n", __func__, status);
+		return status;
+	}
+
+	data_ptr = (u8 *) &pkt->rg_property_data[0];
+	num_properties = pkt->num_properties;
+	dprintk(VIDC_DBG,
+		"%s: data_start %pK, num_properties %#x\n",
+		__func__, data_ptr, num_properties);
+	if (!num_properties) {
+		sys_init_done->capabilities = NULL;
+		dprintk(VIDC_DBG,
+			"Venus didn't set any properties in SYS_INIT_DONE");
+		return status;
+	}
+	bytes_read = hfi_fill_codec_info(data_ptr, sys_init_done);
+	data_ptr += bytes_read;
+	rem_bytes -= bytes_read;
+	num_properties--;
+
+	status = hfi_parse_init_done_properties(
+			sys_init_done->capabilities,
+			VIDC_MAX_SESSIONS, data_ptr, num_properties,
+			rem_bytes, codecs, domain);
+	if (status) {
+		dprintk(VIDC_ERR, "%s: parse status %#x\n",
+			__func__, status);
+		return status;
+	}
+
+	return status;
+}
+
+static void hfi_process_sess_get_prop_dec_entropy(
+	struct hfi_msg_session_property_info_packet *prop,
+	enum hal_h264_entropy *entropy)
+{
+	u32 req_bytes, hfi_entropy;
+
+	req_bytes = prop->size - sizeof(
+			struct hfi_msg_session_property_info_packet);
+
+	if (!req_bytes || req_bytes % sizeof(hfi_entropy)) {
+		dprintk(VIDC_ERR, "%s: bad packet: %d\n", __func__, req_bytes);
+		return;
+	}
+
+	hfi_entropy = prop->rg_property_data[1];
+	*entropy =
+		hfi_entropy == HFI_H264_ENTROPY_CAVLC ? HAL_H264_ENTROPY_CAVLC :
+		hfi_entropy == HFI_H264_ENTROPY_CABAC ? HAL_H264_ENTROPY_CABAC :
+							HAL_UNUSED_ENTROPY;
+}
+
+static void hfi_process_sess_get_prop_profile_level(
+	struct hfi_msg_session_property_info_packet *prop,
+	struct hfi_profile_level *profile_level)
+{
+	struct hfi_profile_level *hfi_profile_level;
+	u32 req_bytes;
+	dprintk(VIDC_DBG, "Entered %s\n", __func__);
+	if (!prop) {
+		dprintk(VIDC_ERR,
+			"hal_process_sess_get_profile_level: bad_prop: %pK\n",
+			prop);
+		return;
+	}
+	req_bytes = prop->size - sizeof(
+			struct hfi_msg_session_property_info_packet);
+
+	if (!req_bytes || req_bytes % sizeof(struct hfi_profile_level)) {
+		dprintk(VIDC_ERR,
+			"hal_process_sess_get_profile_level: bad_pkt: %d\n",
+			req_bytes);
+		return;
+	}
+	hfi_profile_level = (struct hfi_profile_level *)
+				&prop->rg_property_data[1];
+	profile_level->profile = hfi_profile_level->profile;
+	profile_level->level = hfi_profile_level->level;
+	dprintk(VIDC_DBG, "%s profile: %d level: %d\n",
+		__func__, profile_level->profile,
+		profile_level->level);
+}
+
+static void hfi_process_sess_get_prop_buf_req(
+	struct hfi_msg_session_property_info_packet *prop,
+	struct buffer_requirements *buffreq)
+{
+	struct hfi_buffer_requirements *hfi_buf_req;
+	u32 req_bytes;
+
+	if (!prop) {
+		dprintk(VIDC_ERR,
+			"hal_process_sess_get_prop_buf_req: bad_prop: %pK\n",
+			prop);
+		return;
+	}
+
+	req_bytes = prop->size - sizeof(
+			struct hfi_msg_session_property_info_packet);
+	if (!req_bytes || req_bytes % sizeof(struct hfi_buffer_requirements) ||
+		!prop->rg_property_data[1]) {
+		dprintk(VIDC_ERR,
+			"hal_process_sess_get_prop_buf_req: bad_pkt: %d\n",
+			req_bytes);
+		return;
+	}
+
+	hfi_buf_req = (struct hfi_buffer_requirements *)
+		&prop->rg_property_data[1];
+
+	if (!hfi_buf_req) {
+		dprintk(VIDC_ERR, "%s - invalid buffer req pointer\n",
+			__func__);
+		return;
+	}
+
+	while (req_bytes) {
+		if (hfi_buf_req->buffer_size &&
+			hfi_buf_req->buffer_count_min > hfi_buf_req->
+			buffer_count_actual)
+				dprintk(VIDC_WARN,
+					"Bad buffer requirements for %#x: min %d, actual %d\n",
+					hfi_buf_req->buffer_type,
+					hfi_buf_req->buffer_count_min,
+					hfi_buf_req->buffer_count_actual);
+
+		dprintk(VIDC_DBG, "got buffer requirements for: %d\n",
+					hfi_buf_req->buffer_type);
+		switch (hfi_buf_req->buffer_type) {
+		case HFI_BUFFER_INPUT:
+			memcpy(&buffreq->buffer[0], hfi_buf_req,
+				sizeof(struct hfi_buffer_requirements));
+			buffreq->buffer[0].buffer_type = HAL_BUFFER_INPUT;
+			break;
+		case HFI_BUFFER_OUTPUT:
+			memcpy(&buffreq->buffer[1], hfi_buf_req,
+			sizeof(struct hfi_buffer_requirements));
+			buffreq->buffer[1].buffer_type = HAL_BUFFER_OUTPUT;
+			break;
+		case HFI_BUFFER_OUTPUT2:
+			memcpy(&buffreq->buffer[2], hfi_buf_req,
+				sizeof(struct hfi_buffer_requirements));
+			buffreq->buffer[2].buffer_type = HAL_BUFFER_OUTPUT2;
+			break;
+		case HFI_BUFFER_EXTRADATA_INPUT:
+			memcpy(&buffreq->buffer[3], hfi_buf_req,
+				sizeof(struct hfi_buffer_requirements));
+			buffreq->buffer[3].buffer_type =
+				HAL_BUFFER_EXTRADATA_INPUT;
+			break;
+		case HFI_BUFFER_EXTRADATA_OUTPUT:
+			memcpy(&buffreq->buffer[4], hfi_buf_req,
+				sizeof(struct hfi_buffer_requirements));
+			buffreq->buffer[4].buffer_type =
+				HAL_BUFFER_EXTRADATA_OUTPUT;
+			break;
+		case HFI_BUFFER_EXTRADATA_OUTPUT2:
+			memcpy(&buffreq->buffer[5], hfi_buf_req,
+				sizeof(struct hfi_buffer_requirements));
+			buffreq->buffer[5].buffer_type =
+				HAL_BUFFER_EXTRADATA_OUTPUT2;
+			break;
+		case HFI_BUFFER_INTERNAL_SCRATCH:
+			memcpy(&buffreq->buffer[6], hfi_buf_req,
+			sizeof(struct hfi_buffer_requirements));
+			buffreq->buffer[6].buffer_type =
+				HAL_BUFFER_INTERNAL_SCRATCH;
+			break;
+		case HFI_BUFFER_INTERNAL_SCRATCH_1:
+			memcpy(&buffreq->buffer[7], hfi_buf_req,
+				sizeof(struct hfi_buffer_requirements));
+			buffreq->buffer[7].buffer_type =
+				HAL_BUFFER_INTERNAL_SCRATCH_1;
+			break;
+		case HFI_BUFFER_INTERNAL_SCRATCH_2:
+			memcpy(&buffreq->buffer[8], hfi_buf_req,
+				sizeof(struct hfi_buffer_requirements));
+			buffreq->buffer[8].buffer_type =
+				HAL_BUFFER_INTERNAL_SCRATCH_2;
+			break;
+		case HFI_BUFFER_INTERNAL_PERSIST:
+			memcpy(&buffreq->buffer[9], hfi_buf_req,
+			sizeof(struct hfi_buffer_requirements));
+			buffreq->buffer[9].buffer_type =
+				HAL_BUFFER_INTERNAL_PERSIST;
+			break;
+		case HFI_BUFFER_INTERNAL_PERSIST_1:
+			memcpy(&buffreq->buffer[10], hfi_buf_req,
+				sizeof(struct hfi_buffer_requirements));
+			buffreq->buffer[10].buffer_type =
+				HAL_BUFFER_INTERNAL_PERSIST_1;
+			break;
+		default:
+			dprintk(VIDC_ERR,
+			"hal_process_sess_get_prop_buf_req: bad_buffer_type: %d\n",
+			hfi_buf_req->buffer_type);
+			break;
+		}
+		req_bytes -= sizeof(struct hfi_buffer_requirements);
+		hfi_buf_req++;
+	}
+}
+
+static int hfi_process_session_prop_info(u32 device_id,
+		struct hfi_msg_session_property_info_packet *pkt,
+		struct msm_vidc_cb_info *info)
+{
+	struct msm_vidc_cb_cmd_done cmd_done = {0};
+	struct hfi_profile_level profile_level = {0};
+	enum hal_h264_entropy entropy = {0};
+	struct buffer_requirements buff_req = { { {0} } };
+
+	dprintk(VIDC_DBG, "Received SESSION_PROPERTY_INFO[%#x]\n",
+			pkt->session_id);
+
+	if (pkt->size < sizeof(struct hfi_msg_session_property_info_packet)) {
+		dprintk(VIDC_ERR,
+				"hal_process_session_prop_info: bad_pkt_size\n");
+		return -E2BIG;
+	} else if (!pkt->num_properties) {
+		dprintk(VIDC_ERR,
+			"hal_process_session_prop_info: no_properties\n");
+		return -EINVAL;
+	}
+
+	switch (pkt->rg_property_data[0]) {
+	case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
+		hfi_process_sess_get_prop_buf_req(pkt, &buff_req);
+		cmd_done.device_id = device_id;
+		cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+		cmd_done.status = VIDC_ERR_NONE;
+		cmd_done.data.property.buf_req = buff_req;
+		cmd_done.size = sizeof(buff_req);
+
+		*info = (struct msm_vidc_cb_info) {
+			.response_type =  HAL_SESSION_PROPERTY_INFO,
+			.response.cmd = cmd_done,
+		};
+
+		return 0;
+	case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT:
+		hfi_process_sess_get_prop_profile_level(pkt, &profile_level);
+		cmd_done.device_id = device_id;
+		cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+		cmd_done.status = VIDC_ERR_NONE;
+		cmd_done.data.property.profile_level =
+			(struct hal_profile_level) {
+				.profile = profile_level.profile,
+				.level = profile_level.level,
+			};
+		cmd_done.size = sizeof(struct hal_profile_level);
+
+		*info = (struct msm_vidc_cb_info) {
+			.response_type =  HAL_SESSION_PROPERTY_INFO,
+			.response.cmd = cmd_done,
+		};
+		return 0;
+	case HFI_PROPERTY_CONFIG_VDEC_ENTROPY:
+		hfi_process_sess_get_prop_dec_entropy(pkt, &entropy);
+		cmd_done.device_id = device_id;
+		cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+		cmd_done.status = VIDC_ERR_NONE;
+		cmd_done.data.property.h264_entropy = entropy;
+		cmd_done.size = sizeof(enum hal_h264_entropy);
+
+		*info = (struct msm_vidc_cb_info) {
+			.response_type =  HAL_SESSION_PROPERTY_INFO,
+			.response.cmd = cmd_done,
+		};
+		return 0;
+	default:
+		dprintk(VIDC_DBG,
+				"hal_process_session_prop_info: unknown_prop_id: %x\n",
+				pkt->rg_property_data[0]);
+		return -ENOTSUPP;
+	}
+}
+
+static int hfi_process_session_init_done(u32 device_id,
+		struct hfi_msg_sys_session_init_done_packet *pkt,
+		struct msm_vidc_cb_info *info)
+{
+	struct msm_vidc_cb_cmd_done cmd_done = {0};
+	struct vidc_hal_session_init_done session_init_done = { {0} };
+
+	dprintk(VIDC_DBG, "RECEIVED: SESSION_INIT_DONE[%x]\n", pkt->session_id);
+
+	if (sizeof(struct hfi_msg_sys_session_init_done_packet) > pkt->size) {
+		dprintk(VIDC_ERR,
+				"hal_process_session_init_done: bad_pkt_size\n");
+		return -E2BIG;
+	}
+
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+	cmd_done.status = hfi_map_err_status(pkt->error_type);
+	if (!cmd_done.status) {
+		cmd_done.status = hfi_process_session_init_done_prop_read(
+			pkt, &session_init_done);
+	}
+
+	cmd_done.data.session_init_done = session_init_done;
+	cmd_done.size = sizeof(struct vidc_hal_session_init_done);
+
+	*info = (struct msm_vidc_cb_info) {
+		.response_type =  HAL_SESSION_INIT_DONE,
+		.response.cmd = cmd_done,
+	};
+
+	return 0;
+}
+
+static int hfi_process_session_load_res_done(u32 device_id,
+		struct hfi_msg_session_load_resources_done_packet *pkt,
+		struct msm_vidc_cb_info *info)
+{
+	struct msm_vidc_cb_cmd_done cmd_done = {0};
+	dprintk(VIDC_DBG, "RECEIVED: SESSION_LOAD_RESOURCES_DONE[%#x]\n",
+		pkt->session_id);
+
+	if (sizeof(struct hfi_msg_session_load_resources_done_packet) !=
+		pkt->size) {
+		dprintk(VIDC_ERR,
+				"hal_process_session_load_res_done: bad packet size: %d\n",
+				pkt->size);
+		return -E2BIG;
+	}
+
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+	cmd_done.status = hfi_map_err_status(pkt->error_type);
+	cmd_done.size = 0;
+
+	*info = (struct msm_vidc_cb_info) {
+		.response_type =  HAL_SESSION_LOAD_RESOURCE_DONE,
+		.response.cmd = cmd_done,
+	};
+
+	return 0;
+}
+
+static int hfi_process_session_flush_done(u32 device_id,
+		struct hfi_msg_session_flush_done_packet *pkt,
+		struct msm_vidc_cb_info *info)
+{
+	struct msm_vidc_cb_cmd_done cmd_done = {0};
+
+	dprintk(VIDC_DBG, "RECEIVED: SESSION_FLUSH_DONE[%#x]\n",
+			pkt->session_id);
+
+	if (sizeof(struct hfi_msg_session_flush_done_packet) != pkt->size) {
+		dprintk(VIDC_ERR,
+				"hal_process_session_flush_done: bad packet size: %d\n",
+				pkt->size);
+		return -E2BIG;
+	}
+
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+	cmd_done.status = hfi_map_err_status(pkt->error_type);
+	cmd_done.size = sizeof(u32);
+
+	switch (pkt->flush_type) {
+	case HFI_FLUSH_OUTPUT:
+		cmd_done.data.flush_type = HAL_FLUSH_OUTPUT;
+		break;
+	case HFI_FLUSH_INPUT:
+		cmd_done.data.flush_type = HAL_FLUSH_INPUT;
+		break;
+	case HFI_FLUSH_ALL:
+		cmd_done.data.flush_type = HAL_FLUSH_ALL;
+		break;
+	default:
+		dprintk(VIDC_ERR,
+				"%s: invalid flush type!", __func__);
+		return -EINVAL;
+	}
+
+	*info = (struct msm_vidc_cb_info) {
+		.response_type =  HAL_SESSION_FLUSH_DONE,
+		.response.cmd = cmd_done,
+	};
+
+	return 0;
+}
+
+static int hfi_process_session_etb_done(u32 device_id,
+		struct hfi_msg_session_empty_buffer_done_packet *pkt,
+		struct msm_vidc_cb_info *info)
+{
+	struct msm_vidc_cb_data_done data_done = {0};
+	struct hfi_picture_type *hfi_picture_type = NULL;
+
+	dprintk(VIDC_DBG, "RECEIVED: SESSION_ETB_DONE[%#x]\n", pkt->session_id);
+
+	if (!pkt || pkt->size <
+		sizeof(struct hfi_msg_session_empty_buffer_done_packet)) {
+		dprintk(VIDC_ERR,
+				"hal_process_session_etb_done: bad_pkt_size\n");
+		return -E2BIG;
+	}
+
+	data_done.device_id = device_id;
+	data_done.session_id = (void *)(uintptr_t)pkt->session_id;
+	data_done.status = hfi_map_err_status(pkt->error_type);
+	data_done.size = sizeof(struct msm_vidc_cb_data_done);
+	data_done.clnt_data = pkt->input_tag;
+	data_done.input_done.offset = pkt->offset;
+	data_done.input_done.filled_len = pkt->filled_len;
+	data_done.input_done.packet_buffer =
+		(ion_phys_addr_t)pkt->packet_buffer;
+	data_done.input_done.extra_data_buffer =
+		(ion_phys_addr_t)pkt->extra_data_buffer;
+	data_done.input_done.status =
+		hfi_map_err_status(pkt->error_type);
+	hfi_picture_type = (struct hfi_picture_type *)&pkt->rgData[0];
+	if (hfi_picture_type->is_sync_frame) {
+		if (hfi_picture_type->picture_type)
+			data_done.input_done.flags =
+				hfi_picture_type->picture_type;
+		else
+			dprintk(VIDC_DBG,
+				"Non-Sync frame sent for H264/HEVC\n");
+	}
+
+	trace_msm_v4l2_vidc_buffer_event_end("ETB",
+		(u32)pkt->packet_buffer, -1, -1,
+		pkt->filled_len, pkt->offset);
+
+	*info = (struct msm_vidc_cb_info) {
+		.response_type =  HAL_SESSION_ETB_DONE,
+		.response.data = data_done,
+	};
+
+	return 0;
+}
+
+static int hfi_process_session_ftb_done(
+		u32 device_id, struct vidc_hal_msg_pkt_hdr *msg_hdr,
+		struct msm_vidc_cb_info *info)
+{
+	struct msm_vidc_cb_data_done data_done = {0};
+	bool is_decoder = false, is_encoder = false;
+
+	if (!msg_hdr) {
+		dprintk(VIDC_ERR, "Invalid Params\n");
+		return -EINVAL;
+	}
+
+	is_encoder = msg_hdr->size == sizeof(struct
+			hfi_msg_session_fill_buffer_done_compressed_packet) + 4;
+	is_decoder = msg_hdr->size == sizeof(struct
+			hfi_msg_session_fbd_uncompressed_plane0_packet) + 4;
+
+	if (!(is_encoder ^ is_decoder)) {
+		dprintk(VIDC_ERR, "Ambiguous packet (%#x) received (size %d)\n",
+				msg_hdr->packet, msg_hdr->size);
+		return -EBADHANDLE;
+	}
+
+	if (is_encoder) {
+		struct hfi_msg_session_fill_buffer_done_compressed_packet *pkt =
+		(struct hfi_msg_session_fill_buffer_done_compressed_packet *)
+		msg_hdr;
+		dprintk(VIDC_DBG, "RECEIVED: SESSION_FTB_DONE[%#x]\n",
+				pkt->session_id);
+		if (sizeof(struct
+			hfi_msg_session_fill_buffer_done_compressed_packet)
+			> pkt->size) {
+			dprintk(VIDC_ERR,
+				"hal_process_session_ftb_done: bad_pkt_size\n");
+			return -E2BIG;
+		} else if (pkt->error_type != HFI_ERR_NONE) {
+			dprintk(VIDC_ERR,
+				"got buffer back with error %x\n",
+				pkt->error_type);
+			/* Proceed with the FBD */
+		}
+
+		data_done.device_id = device_id;
+		data_done.session_id = (void *)(uintptr_t)pkt->session_id;
+		data_done.status = hfi_map_err_status(pkt->error_type);
+		data_done.size = sizeof(struct msm_vidc_cb_data_done);
+		data_done.clnt_data = 0;
+
+		data_done.output_done.timestamp_hi = pkt->time_stamp_hi;
+		data_done.output_done.timestamp_lo = pkt->time_stamp_lo;
+		data_done.output_done.flags1 = pkt->flags;
+		data_done.output_done.mark_target = pkt->mark_target;
+		data_done.output_done.mark_data = pkt->mark_data;
+		data_done.output_done.stats = pkt->stats;
+		data_done.output_done.offset1 = pkt->offset;
+		data_done.output_done.alloc_len1 = pkt->alloc_len;
+		data_done.output_done.filled_len1 = pkt->filled_len;
+		data_done.output_done.picture_type = pkt->picture_type;
+		data_done.output_done.packet_buffer1 =
+			(ion_phys_addr_t)pkt->packet_buffer;
+		data_done.output_done.extra_data_buffer =
+			(ion_phys_addr_t)pkt->extra_data_buffer;
+		data_done.output_done.buffer_type = HAL_BUFFER_OUTPUT;
+	} else /* if (is_decoder) */ {
+		struct hfi_msg_session_fbd_uncompressed_plane0_packet *pkt =
+		(struct	hfi_msg_session_fbd_uncompressed_plane0_packet *)
+		msg_hdr;
+
+		dprintk(VIDC_DBG, "RECEIVED: SESSION_FTB_DONE[%#x]\n",
+				pkt->session_id);
+		if (sizeof(
+			struct hfi_msg_session_fbd_uncompressed_plane0_packet) >
+			pkt->size) {
+			dprintk(VIDC_ERR,
+					"hal_process_session_ftb_done: bad_pkt_size\n");
+			return -E2BIG;
+		}
+
+		data_done.device_id = device_id;
+		data_done.session_id = (void *)(uintptr_t)pkt->session_id;
+		data_done.status = hfi_map_err_status(pkt->error_type);
+		data_done.size = sizeof(struct msm_vidc_cb_data_done);
+		data_done.clnt_data = 0;
+
+		data_done.output_done.stream_id = pkt->stream_id;
+		data_done.output_done.view_id = pkt->view_id;
+		data_done.output_done.timestamp_hi = pkt->time_stamp_hi;
+		data_done.output_done.timestamp_lo = pkt->time_stamp_lo;
+		data_done.output_done.flags1 = pkt->flags;
+		data_done.output_done.mark_target = pkt->mark_target;
+		data_done.output_done.mark_data = pkt->mark_data;
+		data_done.output_done.stats = pkt->stats;
+		data_done.output_done.alloc_len1 = pkt->alloc_len;
+		data_done.output_done.filled_len1 = pkt->filled_len;
+		data_done.output_done.offset1 = pkt->offset;
+		data_done.output_done.frame_width = pkt->frame_width;
+		data_done.output_done.frame_height = pkt->frame_height;
+		data_done.output_done.start_x_coord = pkt->start_x_coord;
+		data_done.output_done.start_y_coord = pkt->start_y_coord;
+		data_done.output_done.input_tag1 = pkt->input_tag;
+		data_done.output_done.picture_type = pkt->picture_type;
+		data_done.output_done.packet_buffer1 = pkt->packet_buffer;
+		data_done.output_done.extra_data_buffer =
+			pkt->extra_data_buffer;
+
+		if (!pkt->stream_id)
+			data_done.output_done.buffer_type = HAL_BUFFER_OUTPUT;
+		else if (pkt->stream_id == 1)
+			data_done.output_done.buffer_type = HAL_BUFFER_OUTPUT2;
+	}
+
+	trace_msm_v4l2_vidc_buffer_event_end("FTB",
+		(u32)data_done.output_done.packet_buffer1,
+		(((u64)data_done.output_done.timestamp_hi) << 32)
+		+ ((u64)data_done.output_done.timestamp_lo),
+		data_done.output_done.alloc_len1,
+		data_done.output_done.filled_len1,
+		data_done.output_done.offset1);
+
+	*info = (struct msm_vidc_cb_info) {
+		.response_type =  HAL_SESSION_FTB_DONE,
+		.response.data = data_done,
+	};
+
+	return 0;
+}
+
+static int hfi_process_session_start_done(u32 device_id,
+		struct hfi_msg_session_start_done_packet *pkt,
+		struct msm_vidc_cb_info *info)
+{
+	struct msm_vidc_cb_cmd_done cmd_done = {0};
+
+	dprintk(VIDC_DBG, "RECEIVED: SESSION_START_DONE[%#x]\n",
+			pkt->session_id);
+
+	if (!pkt || pkt->size !=
+		sizeof(struct hfi_msg_session_start_done_packet)) {
+		dprintk(VIDC_ERR, "%s: bad packet/packet size\n",
+			__func__);
+		return -E2BIG;
+	}
+
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+	cmd_done.status = hfi_map_err_status(pkt->error_type);
+	cmd_done.size = 0;
+
+	*info = (struct msm_vidc_cb_info) {
+		.response_type =  HAL_SESSION_START_DONE,
+		.response.cmd = cmd_done,
+	};
+	return 0;
+}
+
+static int hfi_process_session_stop_done(u32 device_id,
+		struct hfi_msg_session_stop_done_packet *pkt,
+		struct msm_vidc_cb_info *info)
+{
+	struct msm_vidc_cb_cmd_done cmd_done = {0};
+
+	dprintk(VIDC_DBG, "RECEIVED: SESSION_STOP_DONE[%#x]\n",
+			pkt->session_id);
+
+	if (!pkt || pkt->size !=
+		sizeof(struct hfi_msg_session_stop_done_packet)) {
+		dprintk(VIDC_ERR, "%s: bad packet/packet size\n",
+			__func__);
+		return -E2BIG;
+	}
+
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+	cmd_done.status = hfi_map_err_status(pkt->error_type);
+	cmd_done.size = 0;
+
+	*info = (struct msm_vidc_cb_info) {
+		.response_type =  HAL_SESSION_STOP_DONE,
+		.response.cmd = cmd_done,
+	};
+
+	return 0;
+}
+
+static int hfi_process_session_rel_res_done(u32 device_id,
+		struct hfi_msg_session_release_resources_done_packet *pkt,
+		struct msm_vidc_cb_info *info)
+{
+	struct msm_vidc_cb_cmd_done cmd_done = {0};
+
+	dprintk(VIDC_DBG, "RECEIVED: SESSION_RELEASE_RESOURCES_DONE[%#x]\n",
+		pkt->session_id);
+
+	if (!pkt || pkt->size !=
+		sizeof(struct hfi_msg_session_release_resources_done_packet)) {
+		dprintk(VIDC_ERR, "%s: bad packet/packet size\n",
+			__func__);
+		return -E2BIG;
+	}
+
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+	cmd_done.status = hfi_map_err_status(pkt->error_type);
+	cmd_done.size = 0;
+
+	*info = (struct msm_vidc_cb_info) {
+		.response_type =  HAL_SESSION_RELEASE_RESOURCE_DONE,
+		.response.cmd = cmd_done,
+	};
+
+	return 0;
+}
+
+static int hfi_process_session_rel_buf_done(u32 device_id,
+		struct hfi_msg_session_release_buffers_done_packet *pkt,
+		struct msm_vidc_cb_info *info)
+{
+	struct msm_vidc_cb_cmd_done cmd_done = {0};
+
+	if (!pkt || pkt->size <
+		sizeof(struct hfi_msg_session_release_buffers_done_packet)) {
+		dprintk(VIDC_ERR, "bad packet/packet size %d\n",
+			pkt ? pkt->size : 0);
+		return -E2BIG;
+	}
+	dprintk(VIDC_DBG, "RECEIVED:SESSION_RELEASE_BUFFER_DONE[%#x]\n",
+			pkt->session_id);
+
+	cmd_done.device_id = device_id;
+	cmd_done.size = sizeof(struct msm_vidc_cb_cmd_done);
+	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+	cmd_done.status = hfi_map_err_status(pkt->error_type);
+	if (pkt->rg_buffer_info) {
+		cmd_done.data.buffer_info =
+			*(struct hal_buffer_info *)pkt->rg_buffer_info;
+		cmd_done.size = sizeof(struct hal_buffer_info);
+	} else {
+		dprintk(VIDC_ERR, "invalid payload in rel_buff_done\n");
+	}
+
+	*info = (struct msm_vidc_cb_info) {
+		.response_type =  HAL_SESSION_RELEASE_BUFFER_DONE,
+		.response.cmd = cmd_done,
+	};
+
+	return 0;
+}
+
+static int hfi_process_session_end_done(u32 device_id,
+		struct hfi_msg_sys_session_end_done_packet *pkt,
+		struct msm_vidc_cb_info *info)
+{
+	struct msm_vidc_cb_cmd_done cmd_done = {0};
+
+	dprintk(VIDC_DBG, "RECEIVED: SESSION_END_DONE[%#x]\n", pkt->session_id);
+
+	if (!pkt || pkt->size !=
+		sizeof(struct hfi_msg_sys_session_end_done_packet)) {
+		dprintk(VIDC_ERR, "%s: bad packet/packet size\n", __func__);
+		return -E2BIG;
+	}
+
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+	cmd_done.status = hfi_map_err_status(pkt->error_type);
+	cmd_done.size = 0;
+
+	*info = (struct msm_vidc_cb_info) {
+		.response_type =  HAL_SESSION_END_DONE,
+		.response.cmd = cmd_done,
+	};
+
+	return 0;
+}
+
+static int hfi_process_session_abort_done(u32 device_id,
+	struct hfi_msg_sys_session_abort_done_packet *pkt,
+	struct msm_vidc_cb_info *info)
+{
+	struct msm_vidc_cb_cmd_done cmd_done = {0};
+
+	dprintk(VIDC_DBG, "RECEIVED: SESSION_ABORT_DONE[%#x]\n",
+			pkt->session_id);
+
+	if (!pkt || pkt->size !=
+		sizeof(struct hfi_msg_sys_session_abort_done_packet)) {
+		dprintk(VIDC_ERR, "%s: bad packet/packet size: %d\n",
+				__func__, pkt ? pkt->size : 0);
+		return -E2BIG;
+	}
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+	cmd_done.status = hfi_map_err_status(pkt->error_type);
+	cmd_done.size = 0;
+
+	*info = (struct msm_vidc_cb_info) {
+		.response_type =  HAL_SESSION_ABORT_DONE,
+		.response.cmd = cmd_done,
+	};
+
+	return 0;
+}
+
+static int hfi_process_session_get_seq_hdr_done(
+		u32 device_id,
+		struct hfi_msg_session_get_sequence_header_done_packet *pkt,
+		struct msm_vidc_cb_info *info)
+{
+	struct msm_vidc_cb_data_done data_done = {0};
+	if (!pkt || pkt->size !=
+		sizeof(struct
+		hfi_msg_session_get_sequence_header_done_packet)) {
+		dprintk(VIDC_ERR, "%s: bad packet/packet size\n",
+			__func__);
+		return -E2BIG;
+	}
+
+	dprintk(VIDC_DBG, "RECEIVED:SESSION_GET_SEQ_HDR_DONE[%#x]\n",
+			pkt->session_id);
+
+	data_done.device_id = device_id;
+	data_done.size = sizeof(struct msm_vidc_cb_data_done);
+	data_done.session_id = (void *)(uintptr_t)pkt->session_id;
+	data_done.status = hfi_map_err_status(pkt->error_type);
+	data_done.output_done.packet_buffer1 =
+		(ion_phys_addr_t)pkt->sequence_header;
+	data_done.output_done.filled_len1 = pkt->header_len;
+	dprintk(VIDC_INFO, "seq_hdr: %#x, Length: %d\n",
+			pkt->sequence_header, pkt->header_len);
+
+	*info = (struct msm_vidc_cb_info) {
+		.response_type =  HAL_SESSION_GET_SEQ_HDR_DONE,
+		.response.data = data_done,
+	};
+
+	return 0;
+}
+
+static void hfi_process_sys_get_prop_image_version(
+		struct hfi_msg_sys_property_info_packet *pkt)
+{
+	int i = 0;
+	u32 smem_block_size = 0;
+	u8 *smem_table_ptr;
+	char version[256];
+	const u32 version_string_size = 128;
+	const u32 smem_image_index_venus = 14 * 128;
+	u8 *str_image_version;
+	int req_bytes;
+
+	req_bytes = pkt->size - sizeof(*pkt);
+	if (req_bytes < version_string_size ||
+			!pkt->rg_property_data[1] ||
+			pkt->num_properties > 1) {
+		dprintk(VIDC_ERR,
+				"hfi_process_sys_get_prop_image_version: bad_pkt: %d\n",
+				req_bytes);
+		return;
+	}
+	str_image_version = (u8 *)&pkt->rg_property_data[1];
+	/*
+	 * The version string returned by firmware includes null
+	 * characters at the start and in between. Replace the null
+	 * characters with space, to print the version info.
+	 */
+	for (i = 0; i < version_string_size; i++) {
+		if (str_image_version[i] != '\0')
+			version[i] = str_image_version[i];
+		else
+			version[i] = ' ';
+	}
+	version[i] = '\0';
+	dprintk(VIDC_DBG, "F/W version: %s\n", version);
+
+	smem_table_ptr = smem_get_entry(SMEM_IMAGE_VERSION_TABLE,
+			&smem_block_size, 0, SMEM_ANY_HOST_FLAG);
+	if ((smem_image_index_venus + version_string_size) <= smem_block_size &&
+			smem_table_ptr)
+		memcpy(smem_table_ptr + smem_image_index_venus,
+				str_image_version, version_string_size);
+}
+
+static int hfi_process_sys_property_info(u32 device_id,
+		struct hfi_msg_sys_property_info_packet *pkt,
+		struct msm_vidc_cb_info *info)
+{
+	if (!pkt) {
+		dprintk(VIDC_ERR, "%s: invalid param\n", __func__);
+		return -EINVAL;
+	} else if (pkt->size < sizeof(*pkt)) {
+		dprintk(VIDC_ERR,
+				"hfi_process_sys_property_info: bad_pkt_size\n");
+		return -E2BIG;
+	} else if (!pkt->num_properties) {
+		dprintk(VIDC_ERR,
+				"hfi_process_sys_property_info: no_properties\n");
+		return -EINVAL;
+	}
+
+	switch (pkt->rg_property_data[0]) {
+	case HFI_PROPERTY_SYS_IMAGE_VERSION:
+		hfi_process_sys_get_prop_image_version(pkt);
+
+		*info = (struct msm_vidc_cb_info) {
+			.response_type =  HAL_RESPONSE_UNUSED,
+		};
+		return 0;
+	default:
+		dprintk(VIDC_DBG,
+				"hfi_process_sys_property_info: unknown_prop_id: %x\n",
+				pkt->rg_property_data[0]);
+		return -ENOTSUPP;
+	}
+
+}
+
+static int hfi_process_ignore(u32 device_id,
+		struct vidc_hal_msg_pkt_hdr *msg_hdr,
+		struct msm_vidc_cb_info *info)
+{
+	*info = (struct msm_vidc_cb_info) {
+		.response_type =  HAL_RESPONSE_UNUSED,
+	};
+
+	return 0;
+}
+
+int hfi_process_msg_packet(u32 device_id, struct vidc_hal_msg_pkt_hdr *msg_hdr,
+		struct msm_vidc_cb_info *info)
+{
+	typedef int (*pkt_func_def)(u32, void *, struct msm_vidc_cb_info *info);
+	pkt_func_def pkt_func = NULL;
+
+	if (!info || !msg_hdr || msg_hdr->size < VIDC_IFACEQ_MIN_PKT_SIZE) {
+		dprintk(VIDC_ERR, "%s: bad packet/packet size\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	dprintk(VIDC_DBG, "Parse response %#x\n", msg_hdr->packet);
+	switch (msg_hdr->packet) {
+	case HFI_MSG_EVENT_NOTIFY:
+		pkt_func = (pkt_func_def)hfi_process_event_notify;
+		break;
+	case  HFI_MSG_SYS_INIT_DONE:
+		pkt_func = (pkt_func_def)hfi_process_sys_init_done;
+		break;
+	case HFI_MSG_SYS_SESSION_INIT_DONE:
+		pkt_func = (pkt_func_def)hfi_process_session_init_done;
+		break;
+	case HFI_MSG_SYS_PROPERTY_INFO:
+		pkt_func = (pkt_func_def)hfi_process_sys_property_info;
+		break;
+	case HFI_MSG_SYS_SESSION_END_DONE:
+		pkt_func = (pkt_func_def)hfi_process_session_end_done;
+		break;
+	case HFI_MSG_SESSION_LOAD_RESOURCES_DONE:
+		pkt_func = (pkt_func_def)hfi_process_session_load_res_done;
+		break;
+	case HFI_MSG_SESSION_START_DONE:
+		pkt_func = (pkt_func_def)hfi_process_session_start_done;
+		break;
+	case HFI_MSG_SESSION_STOP_DONE:
+		pkt_func = (pkt_func_def)hfi_process_session_stop_done;
+		break;
+	case HFI_MSG_SESSION_EMPTY_BUFFER_DONE:
+		pkt_func = (pkt_func_def)hfi_process_session_etb_done;
+		break;
+	case HFI_MSG_SESSION_FILL_BUFFER_DONE:
+		pkt_func = (pkt_func_def)hfi_process_session_ftb_done;
+		break;
+	case HFI_MSG_SESSION_FLUSH_DONE:
+		pkt_func = (pkt_func_def)hfi_process_session_flush_done;
+		break;
+	case HFI_MSG_SESSION_PROPERTY_INFO:
+		pkt_func = (pkt_func_def)hfi_process_session_prop_info;
+		break;
+	case HFI_MSG_SESSION_RELEASE_RESOURCES_DONE:
+		pkt_func = (pkt_func_def)hfi_process_session_rel_res_done;
+		break;
+	case HFI_MSG_SYS_RELEASE_RESOURCE:
+		pkt_func = (pkt_func_def)hfi_process_sys_rel_resource_done;
+		break;
+	case HFI_MSG_SESSION_GET_SEQUENCE_HEADER_DONE:
+		pkt_func = (pkt_func_def) hfi_process_session_get_seq_hdr_done;
+		break;
+	case HFI_MSG_SESSION_RELEASE_BUFFERS_DONE:
+		pkt_func = (pkt_func_def)hfi_process_session_rel_buf_done;
+		break;
+	case HFI_MSG_SYS_SESSION_ABORT_DONE:
+		pkt_func = (pkt_func_def)hfi_process_session_abort_done;
+		break;
+	case HFI_MSG_SESSION_SYNC_DONE:
+		pkt_func = (pkt_func_def)hfi_process_ignore;
+		break;
+	default:
+		dprintk(VIDC_DBG, "Unable to parse message: %#x\n",
+				msg_hdr->packet);
+		break;
+	}
+
+	return pkt_func ? pkt_func(device_id, msg_hdr, info) : -ENOTSUPP;
+}
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/Kconfig linux-4.4.115-fbx/drivers/media/platform/msm/vidc/Kconfig
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/Kconfig	2019-01-22 16:16:24.459255064 +0100
@@ -0,0 +1,11 @@
+#
+# VIDEO CORE
+#
+
+menuconfig MSM_VIDC_V4L2
+	tristate "Qualcomm MSM V4L2 based video driver"
+		depends on ARCH_QCOM && VIDEO_V4L2
+		select VIDEOBUF2_CORE
+
+source "drivers/media/platform/msm/vidc/vmem/Kconfig"
+source "drivers/media/platform/msm/vidc/governors/Kconfig"
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/vidc/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/Makefile	2019-01-22 16:16:24.459255064 +0100
@@ -0,0 +1,22 @@
+ccflags-y += -I$(srctree)/drivers/media/platform/msm/vidc/
+
+msm-vidc-objs := msm_v4l2_vidc.o \
+                                msm_vidc_common.o \
+                                msm_vidc.o \
+                                msm_vdec.o \
+                                msm_venc.o \
+                                msm_smem.o \
+                                msm_vidc_debug.o \
+                                msm_vidc_res_parse.o \
+                                venus_hfi.o \
+                                hfi_response_handler.o \
+                                hfi_packetization.o \
+                                vidc_hfi.o \
+                                venus_boot.o \
+                                msm_vidc_dcvs.o
+
+obj-$(CONFIG_MSM_VIDC_V4L2) := msm-vidc.o
+
+obj-$(CONFIG_MSM_VIDC_V4L2) += governors/
+
+obj-$(CONFIG_MSM_VIDC_VMEM) += vmem/
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_smem.c linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_smem.c
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_smem.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_smem.c	2019-01-22 16:16:24.463255100 +0100
@@ -0,0 +1,711 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/dma-iommu.h>
+#include <linux/dma-attrs.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-direction.h>
+#include <linux/iommu.h>
+#include <linux/msm_dma_iommu_mapping.h>
+#include <linux/msm_ion.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include "media/msm_vidc.h"
+#include "msm_vidc_debug.h"
+#include "msm_vidc_resources.h"
+
+struct smem_client {
+	int mem_type;
+	void *clnt;
+	struct msm_vidc_platform_resources *res;
+	enum session_type session_type;
+};
+
+static int get_device_address(struct smem_client *smem_client,
+		struct ion_handle *hndl, unsigned long align,
+		ion_phys_addr_t *iova, unsigned long *buffer_size,
+		unsigned long flags, enum hal_buffer buffer_type,
+		struct dma_mapping_info *mapping_info)
+{
+	int rc = 0;
+	struct ion_client *clnt = NULL;
+	struct dma_buf *buf = NULL;
+	struct dma_buf_attachment *attach;
+	struct sg_table *table = NULL;
+	struct context_bank_info *cb = NULL;
+
+	if (!iova || !buffer_size || !hndl || !smem_client || !mapping_info) {
+		dprintk(VIDC_ERR, "Invalid params: %pK, %pK, %pK, %pK\n",
+				smem_client, hndl, iova, buffer_size);
+		return -EINVAL;
+	}
+
+	clnt = smem_client->clnt;
+	if (!clnt) {
+		dprintk(VIDC_ERR, "Invalid client\n");
+		return -EINVAL;
+	}
+
+	if (is_iommu_present(smem_client->res)) {
+		cb = msm_smem_get_context_bank(smem_client, flags & SMEM_SECURE,
+				buffer_type);
+		if (!cb) {
+			dprintk(VIDC_ERR,
+				"%s: Failed to get context bank device\n",
+				 __func__);
+			rc = -EIO;
+			goto mem_map_failed;
+		}
+
+		/* Convert an Ion handle to a dma buf */
+		buf = ion_share_dma_buf(clnt, hndl);
+		if (IS_ERR_OR_NULL(buf)) {
+			rc = PTR_ERR(buf) ?: -ENOMEM;
+			dprintk(VIDC_ERR, "Share ION buf to DMA failed\n");
+			goto mem_map_failed;
+		}
+
+		/* Check if the dmabuf size matches expected size */
+		if (buf->size < *buffer_size) {
+			rc = -EINVAL;
+			dprintk(VIDC_ERR,
+				"Size mismatch! Dmabuf size: %zu Expected Size: %lu",
+				buf->size, *buffer_size);
+			goto mem_buf_size_mismatch;
+		}
+		/* Prepare a dma buf for dma on the given device */
+		attach = dma_buf_attach(buf, cb->dev);
+		if (IS_ERR_OR_NULL(attach)) {
+			rc = PTR_ERR(attach) ?: -ENOMEM;
+			dprintk(VIDC_ERR, "Failed to attach dmabuf\n");
+			goto mem_buf_attach_failed;
+		}
+
+		/* Get the scatterlist for the given attachment */
+		table = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+		if (IS_ERR_OR_NULL(table)) {
+			rc = PTR_ERR(table) ?: -ENOMEM;
+			dprintk(VIDC_ERR, "Failed to map table\n");
+			goto mem_map_table_failed;
+		}
+
+		/* debug trace's need to be updated later */
+		trace_msm_smem_buffer_iommu_op_start("MAP", 0, 0,
+			align, *iova, *buffer_size);
+
+		/* Map a scatterlist into an SMMU */
+		rc = msm_dma_map_sg_lazy(cb->dev, table->sgl, table->nents,
+				DMA_BIDIRECTIONAL, buf);
+		if (rc != table->nents) {
+			dprintk(VIDC_ERR,
+				"Mapping failed with rc(%d), expected rc(%d)\n",
+				rc, table->nents);
+			rc = -ENOMEM;
+			goto mem_map_sg_failed;
+		}
+		if (table->sgl) {
+			dprintk(VIDC_DBG,
+				"%s: CB : %s, DMA buf: %pK, device: %pK, attach: %pK, table: %pK, table sgl: %pK, rc: %d, dma_address: %pa\n",
+				__func__, cb->name, buf, cb->dev, attach,
+				table, table->sgl, rc,
+				&table->sgl->dma_address);
+
+			*iova = table->sgl->dma_address;
+			*buffer_size = table->sgl->dma_length;
+		} else {
+			dprintk(VIDC_ERR, "sgl is NULL\n");
+			rc = -ENOMEM;
+			goto mem_map_sg_failed;
+		}
+
+		mapping_info->dev = cb->dev;
+		mapping_info->mapping = cb->mapping;
+		mapping_info->table = table;
+		mapping_info->attach = attach;
+		mapping_info->buf = buf;
+
+		trace_msm_smem_buffer_iommu_op_end("MAP", 0, 0,
+			align, *iova, *buffer_size);
+	} else {
+		dprintk(VIDC_DBG, "Using physical memory address\n");
+		rc = ion_phys(clnt, hndl, iova, (size_t *)buffer_size);
+		if (rc) {
+			dprintk(VIDC_ERR, "ion memory map failed - %d\n", rc);
+			goto mem_map_failed;
+		}
+	}
+
+	dprintk(VIDC_DBG, "mapped ion handle %pK to %pa\n", hndl, iova);
+	return 0;
+mem_map_sg_failed:
+	dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL);
+mem_map_table_failed:
+	dma_buf_detach(buf, attach);
+mem_buf_size_mismatch:
+mem_buf_attach_failed:
+	dma_buf_put(buf);
+mem_map_failed:
+	return rc;
+}
+
+static void put_device_address(struct smem_client *smem_client,
+	struct ion_handle *hndl, u32 flags,
+	struct dma_mapping_info *mapping_info,
+	enum hal_buffer buffer_type)
+{
+	struct ion_client *clnt = NULL;
+
+	if (!hndl || !smem_client || !mapping_info) {
+		dprintk(VIDC_WARN, "Invalid params: %pK, %pK\n",
+				smem_client, hndl);
+		return;
+	}
+
+	if (!mapping_info->dev || !mapping_info->table ||
+		!mapping_info->buf || !mapping_info->attach) {
+			dprintk(VIDC_WARN, "Invalid params:\n");
+			return;
+	}
+
+	clnt = smem_client->clnt;
+	if (!clnt) {
+		dprintk(VIDC_WARN, "Invalid client\n");
+		return;
+	}
+	if (is_iommu_present(smem_client->res)) {
+		dprintk(VIDC_DBG,
+			"Calling dma_unmap_sg - device: %pK, address: %pa, buf: %pK, table: %pK, attach: %pK\n",
+			mapping_info->dev,
+			&mapping_info->table->sgl->dma_address,
+			mapping_info->buf, mapping_info->table,
+			mapping_info->attach);
+
+		trace_msm_smem_buffer_iommu_op_start("UNMAP", 0, 0, 0, 0, 0);
+		msm_dma_unmap_sg(mapping_info->dev, mapping_info->table->sgl,
+			mapping_info->table->nents, DMA_BIDIRECTIONAL,
+			mapping_info->buf);
+		dma_buf_unmap_attachment(mapping_info->attach,
+			mapping_info->table, DMA_BIDIRECTIONAL);
+		dma_buf_detach(mapping_info->buf, mapping_info->attach);
+		dma_buf_put(mapping_info->buf);
+		trace_msm_smem_buffer_iommu_op_end("UNMAP", 0, 0, 0, 0, 0);
+	}
+}
+
+static int ion_user_to_kernel(struct smem_client *client, int fd, u32 size,
+		struct msm_smem *mem, enum hal_buffer buffer_type)
+{
+	struct ion_handle *hndl;
+	ion_phys_addr_t iova = 0;
+	unsigned long buffer_size = size;
+	int rc = 0;
+	unsigned long align = SZ_4K;
+	unsigned long ion_flags = 0;
+
+	hndl = ion_import_dma_buf(client->clnt, fd);
+	dprintk(VIDC_DBG, "%s ion handle: %pK\n", __func__, hndl);
+	if (IS_ERR_OR_NULL(hndl)) {
+		dprintk(VIDC_ERR, "Failed to get handle: %pK, %d, %d, %pK\n",
+				client, fd, size, hndl);
+		rc = -ENOMEM;
+		goto fail_import_fd;
+	}
+
+	mem->kvaddr = NULL;
+	rc = ion_handle_get_flags(client->clnt, hndl, &ion_flags);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to get ion flags: %d\n", rc);
+		goto fail_device_address;
+	}
+
+	mem->buffer_type = buffer_type;
+	if (ion_flags & ION_FLAG_CACHED)
+		mem->flags |= SMEM_CACHED;
+
+	if (ion_flags & ION_FLAG_SECURE)
+		mem->flags |= SMEM_SECURE;
+
+	rc = get_device_address(client, hndl, align, &iova, &buffer_size,
+				mem->flags, buffer_type, &mem->mapping_info);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to get device address: %d\n", rc);
+		goto fail_device_address;
+	}
+
+	mem->mem_type = client->mem_type;
+	mem->smem_priv = hndl;
+	mem->device_addr = iova;
+	mem->size = buffer_size;
+	if ((u32)mem->device_addr != iova) {
+		dprintk(VIDC_ERR, "iova(%pa) truncated to %#x",
+			&iova, (u32)mem->device_addr);
+		goto fail_device_address;
+	}
+	dprintk(VIDC_DBG,
+		"%s: ion_handle = %pK, fd = %d, device_addr = %pa, size = %zx, kvaddr = %pK, buffer_type = %d, flags = %#lx\n",
+		__func__, mem->smem_priv, fd, &mem->device_addr, mem->size,
+		mem->kvaddr, mem->buffer_type, mem->flags);
+	return rc;
+fail_device_address:
+	ion_free(client->clnt, hndl);
+fail_import_fd:
+	return rc;
+}
+
+static int get_secure_flag_for_buffer_type(
+		struct smem_client *client, enum hal_buffer buffer_type)
+{
+
+	if (!client) {
+		dprintk(VIDC_ERR, "%s - invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (buffer_type) {
+	case HAL_BUFFER_INPUT:
+		if (client->session_type == MSM_VIDC_ENCODER)
+			return ION_FLAG_CP_PIXEL;
+		else
+			return ION_FLAG_CP_BITSTREAM;
+	case HAL_BUFFER_OUTPUT:
+	case HAL_BUFFER_OUTPUT2:
+		if (client->session_type == MSM_VIDC_ENCODER)
+			return ION_FLAG_CP_BITSTREAM;
+		else
+			return ION_FLAG_CP_PIXEL;
+	case HAL_BUFFER_INTERNAL_SCRATCH:
+		return ION_FLAG_CP_BITSTREAM;
+	case HAL_BUFFER_INTERNAL_SCRATCH_1:
+		return ION_FLAG_CP_NON_PIXEL;
+	case HAL_BUFFER_INTERNAL_SCRATCH_2:
+		return ION_FLAG_CP_PIXEL;
+	case HAL_BUFFER_INTERNAL_PERSIST:
+		return ION_FLAG_CP_BITSTREAM;
+	case HAL_BUFFER_INTERNAL_PERSIST_1:
+		return ION_FLAG_CP_NON_PIXEL;
+	default:
+		WARN(1, "No matching secure flag for buffer type : %x\n",
+				buffer_type);
+		return -EINVAL;
+	}
+}
+
+static int alloc_ion_mem(struct smem_client *client, size_t size, u32 align,
+	u32 flags, enum hal_buffer buffer_type, struct msm_smem *mem,
+	int map_kernel)
+{
+	struct ion_handle *hndl;
+	ion_phys_addr_t iova = 0;
+	unsigned long buffer_size = 0;
+	unsigned long heap_mask = 0;
+	int rc = 0;
+	int ion_flags = 0;
+
+	align = ALIGN(align, SZ_4K);
+	size = ALIGN(size, SZ_4K);
+
+	if (is_iommu_present(client->res)) {
+		heap_mask = ION_HEAP(ION_IOMMU_HEAP_ID);
+	} else {
+		dprintk(VIDC_DBG,
+			"allocate shared memory from adsp heap size %zx align %d\n",
+			size, align);
+		heap_mask = ION_HEAP(ION_ADSP_HEAP_ID);
+	}
+
+	if (flags & SMEM_CACHED)
+		ion_flags |= ION_FLAG_CACHED;
+
+	if (flags & SMEM_SECURE) {
+		int secure_flag =
+			get_secure_flag_for_buffer_type(client, buffer_type);
+		if (secure_flag < 0) {
+			rc = secure_flag;
+			goto fail_shared_mem_alloc;
+		}
+
+		ion_flags |= ION_FLAG_SECURE | secure_flag;
+		heap_mask = ION_HEAP(ION_SECURE_HEAP_ID);
+
+		if (client->res->slave_side_cp) {
+			heap_mask = ION_HEAP(ION_CP_MM_HEAP_ID);
+			size = ALIGN(size, SZ_1M);
+			align = ALIGN(size, SZ_1M);
+		}
+	}
+
+	trace_msm_smem_buffer_ion_op_start("ALLOC", (u32)buffer_type,
+		heap_mask, size, align, flags, map_kernel);
+	hndl = ion_alloc(client->clnt, size, align, heap_mask, ion_flags);
+	if (IS_ERR_OR_NULL(hndl)) {
+		dprintk(VIDC_ERR,
+		"Failed to allocate shared memory = %pK, %zx, %d, %#x\n",
+		client, size, align, flags);
+		rc = -ENOMEM;
+		goto fail_shared_mem_alloc;
+	}
+	trace_msm_smem_buffer_ion_op_end("ALLOC", (u32)buffer_type,
+		heap_mask, size, align, flags, map_kernel);
+	mem->mem_type = client->mem_type;
+	mem->smem_priv = hndl;
+	mem->flags = flags;
+	mem->buffer_type = buffer_type;
+	if (map_kernel) {
+		mem->kvaddr = ion_map_kernel(client->clnt, hndl);
+		if (IS_ERR_OR_NULL(mem->kvaddr)) {
+			dprintk(VIDC_ERR,
+				"Failed to map shared mem in kernel\n");
+			rc = -EIO;
+			goto fail_map;
+		}
+	} else {
+		mem->kvaddr = NULL;
+	}
+
+	rc = get_device_address(client, hndl, align, &iova, &buffer_size,
+				flags, buffer_type, &mem->mapping_info);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to get device address: %d\n",
+			rc);
+		goto fail_device_address;
+	}
+	mem->device_addr = iova;
+	if ((u32)mem->device_addr != iova) {
+		dprintk(VIDC_ERR, "iova(%pa) truncated to %#x",
+			&iova, (u32)mem->device_addr);
+		goto fail_device_address;
+	}
+	mem->size = size;
+	dprintk(VIDC_DBG,
+		"%s: ion_handle = %pK, device_addr = %pa, size = %#zx, kvaddr = %pK, buffer_type = %#x, flags = %#lx\n",
+		__func__, mem->smem_priv, &mem->device_addr,
+		mem->size, mem->kvaddr, mem->buffer_type, mem->flags);
+	return rc;
+fail_device_address:
+	if (mem->kvaddr)
+		ion_unmap_kernel(client->clnt, hndl);
+fail_map:
+	ion_free(client->clnt, hndl);
+fail_shared_mem_alloc:
+	return rc;
+}
+
+static void free_ion_mem(struct smem_client *client, struct msm_smem *mem)
+{
+	dprintk(VIDC_DBG,
+		"%s: ion_handle = %pK, device_addr = %pa, size = %#zx, kvaddr = %pK, buffer_type = %#x\n",
+		__func__, mem->smem_priv, &mem->device_addr,
+		mem->size, mem->kvaddr, mem->buffer_type);
+
+	if (mem->device_addr)
+		put_device_address(client, mem->smem_priv, mem->flags,
+			&mem->mapping_info, mem->buffer_type);
+
+	if (mem->kvaddr)
+		ion_unmap_kernel(client->clnt, mem->smem_priv);
+	if (mem->smem_priv) {
+		trace_msm_smem_buffer_ion_op_start("FREE",
+				(u32)mem->buffer_type, -1, mem->size, -1,
+				mem->flags, -1);
+		dprintk(VIDC_DBG,
+			"%s: Freeing handle %pK, client: %pK\n",
+			__func__, mem->smem_priv, client->clnt);
+		ion_free(client->clnt, mem->smem_priv);
+		trace_msm_smem_buffer_ion_op_end("FREE", (u32)mem->buffer_type,
+			-1, mem->size, -1, mem->flags, -1);
+	}
+}
+
+static void *ion_new_client(void)
+{
+	struct ion_client *client = NULL;
+	client = msm_ion_client_create("video_client");
+	if (!client)
+		dprintk(VIDC_ERR, "Failed to create smem client\n");
+	return client;
+};
+
+static void ion_delete_client(struct smem_client *client)
+{
+	ion_client_destroy(client->clnt);
+}
+
+struct msm_smem *msm_smem_user_to_kernel(void *clt, int fd, u32 size,
+		enum hal_buffer buffer_type)
+{
+	struct smem_client *client = clt;
+	int rc = 0;
+	struct msm_smem *mem;
+	if (fd < 0) {
+		dprintk(VIDC_ERR, "Invalid fd: %d\n", fd);
+		return NULL;
+	}
+	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+	if (!mem) {
+		dprintk(VIDC_ERR, "Failed to allocte shared mem\n");
+		return NULL;
+	}
+	switch (client->mem_type) {
+	case SMEM_ION:
+		rc = ion_user_to_kernel(clt, fd, size, mem, buffer_type);
+		break;
+	default:
+		dprintk(VIDC_ERR, "Mem type not supported\n");
+		rc = -EINVAL;
+		break;
+	}
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to allocate shared memory\n");
+		kfree(mem);
+		mem = NULL;
+	}
+	return mem;
+}
+
+bool msm_smem_compare_buffers(void *clt, int fd, void *priv)
+{
+	struct smem_client *client = clt;
+	struct ion_handle *handle = NULL;
+	bool ret = false;
+
+	if (!clt || !priv) {
+		dprintk(VIDC_ERR, "Invalid params: %pK, %pK\n",
+			clt, priv);
+		return false;
+	}
+	handle = ion_import_dma_buf(client->clnt, fd);
+	ret = handle == priv;
+	(!IS_ERR_OR_NULL(handle)) ? ion_free(client->clnt, handle) : 0;
+	return ret;
+}
+
+static int ion_cache_operations(struct smem_client *client,
+	struct msm_smem *mem, enum smem_cache_ops cache_op,
+	int size)
+{
+	unsigned long ionflag = 0;
+	int rc = 0;
+	int msm_cache_ops = 0;
+	int op_size = 0;
+	if (!mem || !client) {
+		dprintk(VIDC_ERR, "Invalid params: %pK, %pK\n",
+			mem, client);
+		return -EINVAL;
+	}
+	rc = ion_handle_get_flags(client->clnt,	mem->smem_priv,
+		&ionflag);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"ion_handle_get_flags failed: %d\n", rc);
+		goto cache_op_failed;
+	}
+	if (ION_IS_CACHED(ionflag)) {
+		switch (cache_op) {
+		case SMEM_CACHE_CLEAN:
+			msm_cache_ops = ION_IOC_CLEAN_CACHES;
+			break;
+		case SMEM_CACHE_INVALIDATE:
+			msm_cache_ops = ION_IOC_INV_CACHES;
+			break;
+		case SMEM_CACHE_CLEAN_INVALIDATE:
+			msm_cache_ops = ION_IOC_CLEAN_INV_CACHES;
+			break;
+		default:
+			dprintk(VIDC_ERR, "cache operation not supported\n");
+			rc = -EINVAL;
+			goto cache_op_failed;
+		}
+		if (size <= 0)
+			op_size = mem->size;
+		else
+			op_size = mem->size < size ? mem->size : size;
+
+		rc = msm_ion_do_cache_offset_op(client->clnt,
+				(struct ion_handle *)mem->smem_priv,
+				0, mem->offset,
+				(unsigned long)op_size, msm_cache_ops);
+		if (rc) {
+			dprintk(VIDC_ERR,
+					"cache operation failed %d\n", rc);
+			goto cache_op_failed;
+		}
+	}
+cache_op_failed:
+	return rc;
+}
+
+int msm_smem_cache_operations(void *clt, struct msm_smem *mem,
+		enum smem_cache_ops cache_op, int size)
+{
+	struct smem_client *client = clt;
+	int rc = 0;
+	if (!client) {
+		dprintk(VIDC_ERR, "Invalid params: %pK\n",
+			client);
+		return -EINVAL;
+	}
+	switch (client->mem_type) {
+	case SMEM_ION:
+		rc = ion_cache_operations(client, mem, cache_op, size);
+		if (rc)
+			dprintk(VIDC_ERR,
+			"Failed cache operations: %d\n", rc);
+		break;
+	default:
+		dprintk(VIDC_ERR, "Mem type not supported\n");
+		break;
+	}
+	return rc;
+}
+
+void *msm_smem_new_client(enum smem_type mtype,
+		void *platform_resources, enum session_type stype)
+{
+	struct smem_client *client = NULL;
+	void *clnt = NULL;
+	struct msm_vidc_platform_resources *res = platform_resources;
+	switch (mtype) {
+	case SMEM_ION:
+		clnt = ion_new_client();
+		break;
+	default:
+		dprintk(VIDC_ERR, "Mem type not supported\n");
+		break;
+	}
+	if (clnt) {
+		client = kzalloc(sizeof(*client), GFP_KERNEL);
+		if (client) {
+			client->mem_type = mtype;
+			client->clnt = clnt;
+			client->res = res;
+			client->session_type = stype;
+		}
+	} else {
+		dprintk(VIDC_ERR, "Failed to create new client: mtype = %d\n",
+			mtype);
+	}
+	return client;
+}
+
+struct msm_smem *msm_smem_alloc(void *clt, size_t size, u32 align, u32 flags,
+		enum hal_buffer buffer_type, int map_kernel)
+{
+	struct smem_client *client;
+	int rc = 0;
+	struct msm_smem *mem;
+	client = clt;
+	if (!client) {
+		dprintk(VIDC_ERR, "Invalid  client passed\n");
+		return NULL;
+	}
+	if (!size) {
+		dprintk(VIDC_ERR, "No need to allocate memory of size: %zx\n",
+			size);
+		return NULL;
+	}
+	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+	if (!mem) {
+		dprintk(VIDC_ERR, "Failed to allocate shared mem\n");
+		return NULL;
+	}
+	switch (client->mem_type) {
+	case SMEM_ION:
+		rc = alloc_ion_mem(client, size, align, flags, buffer_type,
+					mem, map_kernel);
+		break;
+	default:
+		dprintk(VIDC_ERR, "Mem type not supported\n");
+		rc = -EINVAL;
+		break;
+	}
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to allocate shared memory\n");
+		kfree(mem);
+		mem = NULL;
+	}
+	return mem;
+}
+
+void msm_smem_free(void *clt, struct msm_smem *mem)
+{
+	struct smem_client *client = clt;
+	if (!client || !mem) {
+		dprintk(VIDC_ERR, "Invalid  client/handle passed\n");
+		return;
+	}
+	switch (client->mem_type) {
+	case SMEM_ION:
+		free_ion_mem(client, mem);
+		break;
+	default:
+		dprintk(VIDC_ERR, "Mem type not supported\n");
+		break;
+	}
+	kfree(mem);
+};
+
+void msm_smem_delete_client(void *clt)
+{
+	struct smem_client *client = clt;
+	if (!client) {
+		dprintk(VIDC_ERR, "Invalid  client passed\n");
+		return;
+	}
+	switch (client->mem_type) {
+	case SMEM_ION:
+		ion_delete_client(client);
+		break;
+	default:
+		dprintk(VIDC_ERR, "Mem type not supported\n");
+		break;
+	}
+	kfree(client);
+}
+
+struct context_bank_info *msm_smem_get_context_bank(void *clt,
+			bool is_secure, enum hal_buffer buffer_type)
+{
+	struct smem_client *client = clt;
+	struct context_bank_info *cb = NULL, *match = NULL;
+
+	if (!clt) {
+		dprintk(VIDC_ERR, "%s - invalid params\n", __func__);
+		return NULL;
+	}
+
+	/*
+	 * HAL_BUFFER_INPUT is directly mapped to bitstream CB in DT
+	 * as the buffer type structure was initially designed
+	 * just for decoder. For Encoder, input should be mapped to
+	 * pixel CB. So swap the buffer types just in this local scope.
+	 */
+	if (is_secure && client->session_type == MSM_VIDC_ENCODER) {
+		if (buffer_type == HAL_BUFFER_INPUT)
+			buffer_type = HAL_BUFFER_OUTPUT;
+		else if (buffer_type == HAL_BUFFER_OUTPUT)
+			buffer_type = HAL_BUFFER_INPUT;
+	}
+
+	list_for_each_entry(cb, &client->res->context_banks, list) {
+		if (cb->is_secure == is_secure &&
+				cb->buffer_type & buffer_type) {
+			match = cb;
+			dprintk(VIDC_DBG,
+				"context bank found for CB : %s, device: %pK mapping: %pK\n",
+				match->name, match->dev, match->mapping);
+			break;
+		}
+	}
+
+	return match;
+}
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_v4l2_vidc.c linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_v4l2_vidc.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c	2019-10-29 09:26:23.957206251 +0100
@@ -0,0 +1,799 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/io.h>
+#include <media/msm_vidc.h>
+#include "msm_vidc_common.h"
+#include "msm_vidc_debug.h"
+#include "msm_vidc_internal.h"
+#include "msm_vidc_res_parse.h"
+#include "msm_vidc_resources.h"
+#include "venus_boot.h"
+#include "vidc_hfi_api.h"
+
+#define BASE_DEVICE_NUMBER 32
+
+struct msm_vidc_drv *vidc_driver;
+
+uint32_t msm_vidc_pwr_collapse_delay = 3000;
+
+static inline struct msm_vidc_inst *get_vidc_inst(struct file *filp, void *fh)
+{
+	return container_of(filp->private_data,
+					struct msm_vidc_inst, event_handler);
+}
+
+static int msm_v4l2_open(struct file *filp)
+{
+	struct video_device *vdev = video_devdata(filp);
+	struct msm_video_device *vid_dev =
+		container_of(vdev, struct msm_video_device, vdev);
+	struct msm_vidc_core *core = video_drvdata(filp);
+	struct msm_vidc_inst *vidc_inst;
+
+	trace_msm_v4l2_vidc_open_start("msm_v4l2_open start");
+	vidc_inst = msm_vidc_open(core->id, vid_dev->type);
+	if (!vidc_inst) {
+		dprintk(VIDC_ERR,
+		"Failed to create video instance, core: %d, type = %d\n",
+		core->id, vid_dev->type);
+		return -ENOMEM;
+	}
+	clear_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags);
+	filp->private_data = &(vidc_inst->event_handler);
+	trace_msm_v4l2_vidc_open_end("msm_v4l2_open end");
+	return 0;
+}
+
+static int msm_v4l2_close(struct file *filp)
+{
+	int rc = 0;
+	struct msm_vidc_inst *vidc_inst;
+
+	trace_msm_v4l2_vidc_close_start("msm_v4l2_close start");
+	vidc_inst = get_vidc_inst(filp, NULL);
+	rc = msm_vidc_release_buffers(vidc_inst,
+			V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+	if (rc)
+		dprintk(VIDC_WARN,
+			"Failed in %s for release output buffers\n", __func__);
+
+	rc = msm_vidc_close(vidc_inst);
+	trace_msm_v4l2_vidc_close_end("msm_v4l2_close end");
+	return rc;
+}
+
+static int msm_v4l2_querycap(struct file *filp, void *fh,
+			struct v4l2_capability *cap)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(filp, fh);
+	return msm_vidc_querycap((void *)vidc_inst, cap);
+}
+
+int msm_v4l2_enum_fmt(struct file *file, void *fh,
+					struct v4l2_fmtdesc *f)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+	return msm_vidc_enum_fmt((void *)vidc_inst, f);
+}
+
+int msm_v4l2_s_fmt(struct file *file, void *fh,
+					struct v4l2_format *f)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+	return msm_vidc_s_fmt((void *)vidc_inst, f);
+}
+
+int msm_v4l2_g_fmt(struct file *file, void *fh,
+					struct v4l2_format *f)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+	return msm_vidc_g_fmt((void *)vidc_inst, f);
+}
+
+int msm_v4l2_s_ctrl(struct file *file, void *fh,
+					struct v4l2_control *a)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+	return msm_vidc_s_ctrl((void *)vidc_inst, a);
+}
+
+int msm_v4l2_g_ctrl(struct file *file, void *fh,
+					struct v4l2_control *a)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+	return msm_vidc_g_ctrl((void *)vidc_inst, a);
+}
+
+int msm_v4l2_s_ext_ctrl(struct file *file, void *fh,
+					struct v4l2_ext_controls *a)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+	return msm_vidc_s_ext_ctrl((void *)vidc_inst, a);
+}
+
+int msm_v4l2_reqbufs(struct file *file, void *fh,
+				struct v4l2_requestbuffers *b)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+	return msm_vidc_reqbufs((void *)vidc_inst, b);
+}
+
+int msm_v4l2_prepare_buf(struct file *file, void *fh,
+				struct v4l2_buffer *b)
+{
+	return msm_vidc_prepare_buf(get_vidc_inst(file, fh), b);
+}
+
+int msm_v4l2_qbuf(struct file *file, void *fh,
+				struct v4l2_buffer *b)
+{
+	return msm_vidc_qbuf(get_vidc_inst(file, fh), b);
+}
+
+int msm_v4l2_dqbuf(struct file *file, void *fh,
+				struct v4l2_buffer *b)
+{
+	return msm_vidc_dqbuf(get_vidc_inst(file, fh), b);
+}
+
+int msm_v4l2_streamon(struct file *file, void *fh,
+				enum v4l2_buf_type i)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+	return msm_vidc_streamon((void *)vidc_inst, i);
+}
+
+int msm_v4l2_streamoff(struct file *file, void *fh,
+				enum v4l2_buf_type i)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+	return msm_vidc_streamoff((void *)vidc_inst, i);
+}
+
+static int msm_v4l2_subscribe_event(struct v4l2_fh *fh,
+				const struct v4l2_event_subscription *sub)
+{
+	struct msm_vidc_inst *vidc_inst = container_of(fh,
+			struct msm_vidc_inst, event_handler);
+	return msm_vidc_subscribe_event((void *)vidc_inst, sub);
+}
+
+static int msm_v4l2_unsubscribe_event(struct v4l2_fh *fh,
+				const struct v4l2_event_subscription *sub)
+{
+	struct msm_vidc_inst *vidc_inst = container_of(fh,
+			struct msm_vidc_inst, event_handler);
+	return msm_vidc_unsubscribe_event((void *)vidc_inst, sub);
+}
+
+static int msm_v4l2_decoder_cmd(struct file *file, void *fh,
+				struct v4l2_decoder_cmd *dec)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+
+	return msm_vidc_comm_cmd((void *)vidc_inst, (union msm_v4l2_cmd *)dec);
+}
+
+static int msm_v4l2_encoder_cmd(struct file *file, void *fh,
+				struct v4l2_encoder_cmd *enc)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+
+	return msm_vidc_comm_cmd((void *)vidc_inst, (union msm_v4l2_cmd *)enc);
+}
+static int msm_v4l2_s_parm(struct file *file, void *fh,
+			struct v4l2_streamparm *a)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+	return msm_vidc_comm_s_parm(vidc_inst, a);
+}
+static int msm_v4l2_g_parm(struct file *file, void *fh,
+		struct v4l2_streamparm *a)
+{
+	return 0;
+}
+
+static int msm_v4l2_enum_framesizes(struct file *file, void *fh,
+				struct v4l2_frmsizeenum *fsize)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+	return msm_vidc_enum_framesizes((void *)vidc_inst, fsize);
+}
+
+static int msm_v4l2_queryctrl(struct file *file, void *fh,
+	struct v4l2_queryctrl *ctrl)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+
+	return msm_vidc_query_ctrl((void *)vidc_inst, ctrl);
+}
+
+static int msm_v4l2_query_ext_ctrl(struct file *file, void *fh,
+	struct v4l2_query_ext_ctrl *ctrl)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+
+	return msm_vidc_query_ext_ctrl((void *)vidc_inst, ctrl);
+}
+
+static const struct v4l2_ioctl_ops msm_v4l2_ioctl_ops = {
+	.vidioc_querycap = msm_v4l2_querycap,
+	.vidioc_enum_fmt_vid_cap_mplane = msm_v4l2_enum_fmt,
+	.vidioc_enum_fmt_vid_out_mplane = msm_v4l2_enum_fmt,
+	.vidioc_s_fmt_vid_cap_mplane = msm_v4l2_s_fmt,
+	.vidioc_s_fmt_vid_out_mplane = msm_v4l2_s_fmt,
+	.vidioc_g_fmt_vid_cap_mplane = msm_v4l2_g_fmt,
+	.vidioc_g_fmt_vid_out_mplane = msm_v4l2_g_fmt,
+	.vidioc_reqbufs = msm_v4l2_reqbufs,
+	.vidioc_prepare_buf = msm_v4l2_prepare_buf,
+	.vidioc_qbuf = msm_v4l2_qbuf,
+	.vidioc_dqbuf = msm_v4l2_dqbuf,
+	.vidioc_streamon = msm_v4l2_streamon,
+	.vidioc_streamoff = msm_v4l2_streamoff,
+	.vidioc_s_ctrl = msm_v4l2_s_ctrl,
+	.vidioc_g_ctrl = msm_v4l2_g_ctrl,
+	.vidioc_queryctrl = msm_v4l2_queryctrl,
+	.vidioc_query_ext_ctrl = msm_v4l2_query_ext_ctrl,
+	.vidioc_s_ext_ctrls = msm_v4l2_s_ext_ctrl,
+	.vidioc_subscribe_event = msm_v4l2_subscribe_event,
+	.vidioc_unsubscribe_event = msm_v4l2_unsubscribe_event,
+	.vidioc_decoder_cmd = msm_v4l2_decoder_cmd,
+	.vidioc_encoder_cmd = msm_v4l2_encoder_cmd,
+	.vidioc_s_parm = msm_v4l2_s_parm,
+	.vidioc_g_parm = msm_v4l2_g_parm,
+	.vidioc_enum_framesizes = msm_v4l2_enum_framesizes,
+};
+
+static const struct v4l2_ioctl_ops msm_v4l2_enc_ioctl_ops = {
+};
+
+static unsigned int msm_v4l2_poll(struct file *filp,
+	struct poll_table_struct *pt)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(filp, NULL);
+	return msm_vidc_poll((void *)vidc_inst, filp, pt);
+}
+
+static const struct v4l2_file_operations msm_v4l2_vidc_fops = {
+	.owner = THIS_MODULE,
+	.open = msm_v4l2_open,
+	.release = msm_v4l2_close,
+	.unlocked_ioctl = video_ioctl2,
+	.poll = msm_v4l2_poll,
+};
+
+void msm_vidc_release_video_device(struct video_device *pvdev)
+{
+}
+
+static int read_platform_resources(struct msm_vidc_core *core,
+		struct platform_device *pdev)
+{
+	if (!core || !pdev) {
+		dprintk(VIDC_ERR, "%s: Invalid params %pK %pK\n",
+			__func__, core, pdev);
+		return -EINVAL;
+	}
+
+	core->hfi_type = VIDC_HFI_VENUS;
+	core->resources.pdev = pdev;
+	if (pdev->dev.of_node) {
+		/* Target supports DT, parse from it */
+		return read_platform_resources_from_dt(&core->resources);
+	} else {
+		dprintk(VIDC_ERR, "pdev node is NULL\n");
+		return -EINVAL;
+	}
+}
+
+static int msm_vidc_initialize_core(struct platform_device *pdev,
+				struct msm_vidc_core *core)
+{
+	int i = 0;
+	int rc = 0;
+	if (!core)
+		return -EINVAL;
+	rc = read_platform_resources(core, pdev);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to get platform resources\n");
+		return rc;
+	}
+
+	INIT_LIST_HEAD(&core->instances);
+	mutex_init(&core->lock);
+
+	core->state = VIDC_CORE_UNINIT;
+	for (i = SYS_MSG_INDEX(SYS_MSG_START);
+		i <= SYS_MSG_INDEX(SYS_MSG_END); i++) {
+		init_completion(&core->completions[i]);
+	}
+
+	msm_comm_sort_ctrl();
+	INIT_DELAYED_WORK(&core->fw_unload_work, msm_vidc_fw_unload_handler);
+	return rc;
+}
+
+static ssize_t msm_vidc_link_name_show(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct msm_vidc_core *core = dev_get_drvdata(dev);
+	if (core)
+		if (dev == &core->vdev[MSM_VIDC_DECODER].vdev.dev)
+			return snprintf(buf, PAGE_SIZE, "venus_dec");
+		else if (dev == &core->vdev[MSM_VIDC_ENCODER].vdev.dev)
+			return snprintf(buf, PAGE_SIZE, "venus_enc");
+		else
+			return 0;
+	else
+		return 0;
+}
+
+static DEVICE_ATTR(link_name, 0444, msm_vidc_link_name_show, NULL);
+
+static ssize_t store_pwr_collapse_delay(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long val = 0;
+	int rc = 0;
+	rc = kstrtoul(buf, 0, &val);
+	if (rc)
+		return rc;
+	else if (!val)
+		return -EINVAL;
+	msm_vidc_pwr_collapse_delay = val;
+	return count;
+}
+
+static ssize_t show_pwr_collapse_delay(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", msm_vidc_pwr_collapse_delay);
+}
+
+static DEVICE_ATTR(pwr_collapse_delay, 0644, show_pwr_collapse_delay,
+		store_pwr_collapse_delay);
+
+static ssize_t show_thermal_level(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", vidc_driver->thermal_level);
+}
+
+static ssize_t store_thermal_level(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int rc = 0, val = 0;
+
+	rc = kstrtoint(buf, 0, &val);
+	if (rc || val < 0) {
+		dprintk(VIDC_WARN,
+			"Invalid thermal level value: %s\n", buf);
+		return -EINVAL;
+	}
+	dprintk(VIDC_DBG, "Thermal level old %d new %d\n",
+			vidc_driver->thermal_level, val);
+
+	if (val == vidc_driver->thermal_level)
+		return count;
+	vidc_driver->thermal_level = val;
+
+	msm_comm_handle_thermal_event();
+	return count;
+}
+
+static DEVICE_ATTR(thermal_level, S_IRUGO | S_IWUSR, show_thermal_level,
+		store_thermal_level);
+
+static ssize_t show_platform_version(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%d",
+			vidc_driver->platform_version);
+}
+
+static ssize_t store_platform_version(struct device *dev,
+		struct device_attribute *attr, const char *buf,
+		size_t count)
+{
+	dprintk(VIDC_WARN, "store platform version is not allowed\n");
+	return count;
+}
+
+static DEVICE_ATTR(platform_version, S_IRUGO, show_platform_version,
+		store_platform_version);
+
+static struct attribute *msm_vidc_core_attrs[] = {
+		&dev_attr_pwr_collapse_delay.attr,
+		&dev_attr_thermal_level.attr,
+		&dev_attr_platform_version.attr,
+		NULL
+};
+
+static struct attribute_group msm_vidc_core_attr_group = {
+		.attrs = msm_vidc_core_attrs,
+};
+
+static const struct of_device_id msm_vidc_dt_match[] = {
+	{.compatible = "qcom,msm-vidc"},
+	{.compatible = "qcom,msm-vidc,context-bank"},
+	{.compatible = "qcom,msm-vidc,bus"},
+	{}
+};
+
+static int msm_vidc_probe_vidc_device(struct platform_device *pdev)
+{
+	int rc = 0;
+	void __iomem *base;
+	struct resource *res;
+	struct msm_vidc_core *core;
+	struct device *dev;
+	int nr = BASE_DEVICE_NUMBER;
+
+	if (!vidc_driver) {
+		dprintk(VIDC_ERR, "Invalid vidc driver\n");
+		return -EINVAL;
+	}
+
+	core = kzalloc(sizeof(*core), GFP_KERNEL);
+	if (!core)
+		return -ENOMEM;
+
+	dev_set_drvdata(&pdev->dev, core);
+	rc = msm_vidc_initialize_core(pdev, core);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to init core\n");
+		goto err_core_init;
+	}
+	rc = sysfs_create_group(&pdev->dev.kobj, &msm_vidc_core_attr_group);
+	if (rc) {
+		dprintk(VIDC_ERR,
+				"Failed to create attributes\n");
+		goto err_core_init;
+	}
+
+	core->id = MSM_VIDC_CORE_VENUS;
+
+	rc = v4l2_device_register(&pdev->dev, &core->v4l2_dev);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to register v4l2 device\n");
+		goto err_v4l2_register;
+	}
+
+	/* setup the decoder device */
+	core->vdev[MSM_VIDC_DECODER].vdev.release =
+		msm_vidc_release_video_device;
+	core->vdev[MSM_VIDC_DECODER].vdev.fops = &msm_v4l2_vidc_fops;
+	core->vdev[MSM_VIDC_DECODER].vdev.ioctl_ops = &msm_v4l2_ioctl_ops;
+	core->vdev[MSM_VIDC_DECODER].vdev.vfl_dir = VFL_DIR_M2M;
+	core->vdev[MSM_VIDC_DECODER].type = MSM_VIDC_DECODER;
+	core->vdev[MSM_VIDC_DECODER].vdev.v4l2_dev = &core->v4l2_dev;
+	rc = video_register_device(&core->vdev[MSM_VIDC_DECODER].vdev,
+					VFL_TYPE_GRABBER, nr);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to register video decoder device");
+		goto err_dec_register;
+	}
+
+	video_set_drvdata(&core->vdev[MSM_VIDC_DECODER].vdev, core);
+	dev = &core->vdev[MSM_VIDC_DECODER].vdev.dev;
+	rc = device_create_file(dev, &dev_attr_link_name);
+	if (rc) {
+		dprintk(VIDC_ERR,
+				"Failed to create link name sysfs for decoder");
+		goto err_dec_attr_link_name;
+	}
+
+	/* setup the encoder device */
+	core->vdev[MSM_VIDC_ENCODER].vdev.release =
+		msm_vidc_release_video_device;
+	core->vdev[MSM_VIDC_ENCODER].vdev.fops = &msm_v4l2_vidc_fops;
+	core->vdev[MSM_VIDC_ENCODER].vdev.ioctl_ops = &msm_v4l2_ioctl_ops;
+	core->vdev[MSM_VIDC_ENCODER].vdev.vfl_dir = VFL_DIR_M2M;
+	core->vdev[MSM_VIDC_ENCODER].type = MSM_VIDC_ENCODER;
+	core->vdev[MSM_VIDC_ENCODER].vdev.v4l2_dev = &core->v4l2_dev;
+	rc = video_register_device(&core->vdev[MSM_VIDC_ENCODER].vdev,
+				VFL_TYPE_GRABBER, nr + 1);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to register video encoder device");
+		goto err_enc_register;
+	}
+
+	video_set_drvdata(&core->vdev[MSM_VIDC_ENCODER].vdev, core);
+	dev = &core->vdev[MSM_VIDC_ENCODER].vdev.dev;
+	rc = device_create_file(dev, &dev_attr_link_name);
+	if (rc) {
+		dprintk(VIDC_ERR,
+				"Failed to create link name sysfs for encoder");
+		goto err_enc_attr_link_name;
+	}
+
+	/* finish setting up the 'core' */
+	mutex_lock(&vidc_driver->lock);
+	if (vidc_driver->num_cores  + 1 > MSM_VIDC_CORES_MAX) {
+		mutex_unlock(&vidc_driver->lock);
+		dprintk(VIDC_ERR, "Maximum cores already exist, core_no = %d\n",
+				vidc_driver->num_cores);
+		goto err_cores_exceeded;
+	}
+	vidc_driver->num_cores++;
+	mutex_unlock(&vidc_driver->lock);
+
+	core->device = vidc_hfi_initialize(core->hfi_type, core->id,
+				&core->resources, &handle_cmd_response);
+	if (IS_ERR_OR_NULL(core->device)) {
+		mutex_lock(&vidc_driver->lock);
+		vidc_driver->num_cores--;
+		mutex_unlock(&vidc_driver->lock);
+
+		rc = PTR_ERR(core->device) ?: -EBADHANDLE;
+		if (rc != -EPROBE_DEFER)
+			dprintk(VIDC_ERR, "Failed to create HFI device\n");
+		else
+			dprintk(VIDC_DBG, "msm_vidc: request probe defer\n");
+		goto err_cores_exceeded;
+	}
+
+	mutex_lock(&vidc_driver->lock);
+	list_add_tail(&core->list, &vidc_driver->cores);
+	mutex_unlock(&vidc_driver->lock);
+
+	core->debugfs_root = msm_vidc_debugfs_init_core(
+		core, vidc_driver->debugfs_root);
+
+	vidc_driver->platform_version = 0;
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "efuse");
+	if (!res) {
+		dprintk(VIDC_DBG, "failed to get efuse resource\n");
+	} else {
+		base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+		if (!base) {
+			dprintk(VIDC_ERR,
+				"failed efuse ioremap: res->start %#x, size %d\n",
+				(u32)res->start, (u32)resource_size(res));
+		} else {
+			u32 efuse = 0;
+			struct platform_version_table *pf_ver_tbl =
+				core->resources.pf_ver_tbl;
+
+			efuse = readl_relaxed(base);
+			vidc_driver->platform_version =
+				(efuse & pf_ver_tbl->version_mask) >>
+				pf_ver_tbl->version_shift;
+			dprintk(VIDC_DBG,
+				"efuse 0x%x, platform version 0x%x\n",
+				efuse, vidc_driver->platform_version);
+
+			devm_iounmap(&pdev->dev, base);
+		}
+	}
+
+	dprintk(VIDC_DBG, "populating sub devices\n");
+	/*
+	 * Trigger probe for each sub-device i.e. qcom,msm-vidc,context-bank.
+	 * When msm_vidc_probe is called for each sub-device, parse the
+	 * context-bank details and store it in core->resources.context_banks
+	 * list.
+	 */
+	rc = of_platform_populate(pdev->dev.of_node, msm_vidc_dt_match, NULL,
+			&pdev->dev);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to trigger probe for sub-devices\n");
+		goto err_fail_sub_device_probe;
+	}
+
+	return rc;
+
+err_fail_sub_device_probe:
+	vidc_hfi_deinitialize(core->hfi_type, core->device);
+err_cores_exceeded:
+	device_remove_file(&core->vdev[MSM_VIDC_ENCODER].vdev.dev,
+			&dev_attr_link_name);
+err_enc_attr_link_name:
+	video_unregister_device(&core->vdev[MSM_VIDC_ENCODER].vdev);
+err_enc_register:
+	device_remove_file(&core->vdev[MSM_VIDC_DECODER].vdev.dev,
+			&dev_attr_link_name);
+err_dec_attr_link_name:
+	video_unregister_device(&core->vdev[MSM_VIDC_DECODER].vdev);
+err_dec_register:
+	v4l2_device_unregister(&core->v4l2_dev);
+err_v4l2_register:
+	sysfs_remove_group(&pdev->dev.kobj, &msm_vidc_core_attr_group);
+err_core_init:
+	dev_set_drvdata(&pdev->dev, NULL);
+	kfree(core);
+	return rc;
+}
+
+static int msm_vidc_probe_context_bank(struct platform_device *pdev)
+{
+	return read_context_bank_resources_from_dt(pdev);
+}
+
+static int msm_vidc_probe_bus(struct platform_device *pdev)
+{
+	return read_bus_resources_from_dt(pdev);
+}
+
+static int msm_vidc_probe(struct platform_device *pdev)
+{
+	/*
+	 * Sub devices probe will be triggered by of_platform_populate() towards
+	 * the end of the probe function after msm-vidc device probe is
+	 * completed. Return immediately after completing sub-device probe.
+	 */
+	if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-vidc")) {
+		return msm_vidc_probe_vidc_device(pdev);
+	} else if (of_device_is_compatible(pdev->dev.of_node,
+		"qcom,msm-vidc,bus")) {
+		return msm_vidc_probe_bus(pdev);
+	} else if (of_device_is_compatible(pdev->dev.of_node,
+		"qcom,msm-vidc,context-bank")) {
+		return msm_vidc_probe_context_bank(pdev);
+	} else {
+		/* How did we end up here? */
+		BUG();
+		return -EINVAL;
+	}
+}
+
+static int msm_vidc_remove(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct msm_vidc_core *core;
+
+	if (!pdev) {
+		dprintk(VIDC_ERR, "%s invalid input %pK", __func__, pdev);
+		return -EINVAL;
+	}
+
+	core = dev_get_drvdata(&pdev->dev);
+	if (!core) {
+		dprintk(VIDC_ERR, "%s invalid core", __func__);
+		return -EINVAL;
+	}
+
+	if (core->resources.use_non_secure_pil)
+		venus_boot_deinit();
+
+	vidc_hfi_deinitialize(core->hfi_type, core->device);
+	device_remove_file(&core->vdev[MSM_VIDC_ENCODER].vdev.dev,
+				&dev_attr_link_name);
+	video_unregister_device(&core->vdev[MSM_VIDC_ENCODER].vdev);
+	device_remove_file(&core->vdev[MSM_VIDC_DECODER].vdev.dev,
+				&dev_attr_link_name);
+	video_unregister_device(&core->vdev[MSM_VIDC_DECODER].vdev);
+	v4l2_device_unregister(&core->v4l2_dev);
+
+	msm_vidc_free_platform_resources(&core->resources);
+	sysfs_remove_group(&pdev->dev.kobj, &msm_vidc_core_attr_group);
+	dev_set_drvdata(&pdev->dev, NULL);
+	mutex_destroy(&core->lock);
+	kfree(core);
+	return rc;
+}
+
+static int msm_vidc_pm_suspend(struct device *dev)
+{
+	int rc = 0;
+	struct msm_vidc_core *core;
+
+	/*
+	 * Bail out if
+	 * - driver possibly not probed yet
+	 * - not the main device. We don't support power management on
+	 *   subdevices (e.g. context banks)
+	 */
+	if (!dev || !dev->driver ||
+		!of_device_is_compatible(dev->of_node, "qcom,msm-vidc"))
+		return 0;
+
+	core = dev_get_drvdata(dev);
+	if (!core) {
+		dprintk(VIDC_ERR, "%s invalid core\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = msm_vidc_suspend(core->id);
+	if (rc == -ENOTSUPP)
+		rc = 0;
+	else if (rc)
+		dprintk(VIDC_WARN, "Failed to suspend: %d\n", rc);
+
+
+	return rc;
+}
+
+static int msm_vidc_pm_resume(struct device *dev)
+{
+	dprintk(VIDC_INFO, "%s\n", __func__);
+	return 0;
+}
+
+static const struct dev_pm_ops msm_vidc_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(msm_vidc_pm_suspend, msm_vidc_pm_resume)
+};
+
+MODULE_DEVICE_TABLE(of, msm_vidc_dt_match);
+
+static struct platform_driver msm_vidc_driver = {
+	.probe = msm_vidc_probe,
+	.remove = msm_vidc_remove,
+	.driver = {
+		.name = "msm_vidc_v4l2",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_vidc_dt_match,
+		.pm = &msm_vidc_pm_ops,
+	},
+};
+
+static int __init msm_vidc_init(void)
+{
+	int rc = 0;
+	vidc_driver = kzalloc(sizeof(*vidc_driver),
+						GFP_KERNEL);
+	if (!vidc_driver) {
+		dprintk(VIDC_ERR,
+			"Failed to allocate memroy for msm_vidc_drv\n");
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&vidc_driver->cores);
+	mutex_init(&vidc_driver->lock);
+	vidc_driver->debugfs_root = msm_vidc_debugfs_init_drv();
+	if (!vidc_driver->debugfs_root)
+		dprintk(VIDC_ERR,
+			"Failed to create debugfs for msm_vidc\n");
+
+	rc = platform_driver_register(&msm_vidc_driver);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Failed to register platform driver\n");
+		debugfs_remove_recursive(vidc_driver->debugfs_root);
+		kfree(vidc_driver);
+		vidc_driver = NULL;
+	}
+
+	return rc;
+}
+
+static void __exit msm_vidc_exit(void)
+{
+	platform_driver_unregister(&msm_vidc_driver);
+	debugfs_remove_recursive(vidc_driver->debugfs_root);
+	mutex_destroy(&vidc_driver->lock);
+	kfree(vidc_driver);
+	vidc_driver = NULL;
+}
+
+module_init(msm_vidc_init);
+module_exit(msm_vidc_exit);
+
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_vdec.c linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_vdec.c
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_vdec.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_vdec.c	2019-10-29 09:26:23.957206251 +0100
@@ -0,0 +1,2965 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/sort.h>
+#include <linux/slab.h>
+#include <soc/qcom/scm.h>
+#include "msm_vidc_internal.h"
+#include "msm_vidc_common.h"
+#include "vidc_hfi_api.h"
+#include "msm_vidc_debug.h"
+#include "msm_vidc_dcvs.h"
+#include "msm_vdec.h"
+
+#define MSM_VDEC_DVC_NAME "msm_vdec_8974"
+#define MIN_NUM_OUTPUT_BUFFERS 4
+#define MIN_NUM_OUTPUT_BUFFERS_VP9 6
+#define MIN_NUM_OUTPUT_BUFFERS_HEVC 5
+#define MIN_NUM_CAPTURE_BUFFERS 6
+#define MIN_NUM_THUMBNAIL_MODE_CAPTURE_BUFFERS 1
+#define MAX_NUM_OUTPUT_BUFFERS VB2_MAX_FRAME
+#define DEFAULT_VIDEO_CONCEAL_COLOR_BLACK 0x8010
+#define MB_SIZE_IN_PIXEL (16 * 16)
+#define MAX_OPERATING_FRAME_RATE (300 << 16)
+#define OPERATING_FRAME_RATE_STEP (1 << 16)
+
+static const char *const mpeg_video_vidc_divx_format[] = {
+	"DIVX Format 3",
+	"DIVX Format 4",
+	"DIVX Format 5",
+	"DIVX Format 6",
+	NULL
+};
+static const char *mpeg_video_stream_format[] = {
+	"NAL Format Start Codes",
+	"NAL Format One NAL Per Buffer",
+	"NAL Format One Byte Length",
+	"NAL Format Two Byte Length",
+	"NAL Format Four Byte Length",
+	NULL
+};
+static const char *const mpeg_video_output_order[] = {
+	"Display Order",
+	"Decode Order",
+	NULL
+};
+static const char *const mpeg_vidc_video_alloc_mode_type[] = {
+	"Buffer Allocation Static",
+	"Buffer Allocation Ring Buffer",
+	"Buffer Allocation Dynamic Buffer"
+};
+
+static const char *const perf_level[] = {
+	"Nominal",
+	"Performance",
+	"Turbo"
+};
+
+static const char *const h263_level[] = {
+	"1.0",
+	"2.0",
+	"3.0",
+	"4.0",
+	"4.5",
+	"5.0",
+	"6.0",
+	"7.0",
+};
+
+static const char *const h263_profile[] = {
+	"Baseline",
+	"H320 Coding",
+	"Backward Compatible",
+	"ISWV2",
+	"ISWV3",
+	"High Compression",
+	"Internet",
+	"Interlace",
+	"High Latency",
+};
+
+static const char *const vp8_profile_level[] = {
+	"Unused",
+	"0.0",
+	"1.0",
+	"2.0",
+	"3.0",
+};
+
+static const char *const mpeg2_profile[] = {
+	"Simple",
+	"Main",
+	"422",
+	"Snr Scalable",
+	"Spatial Scalable",
+	"High",
+};
+
+static const char *const mpeg2_level[] = {
+	"0",
+	"1",
+	"2",
+	"3",
+};
+static const char *const mpeg_vidc_video_entropy_mode[] = {
+	"CAVLC Entropy Mode",
+	"CABAC Entropy Mode",
+};
+
+static const char *const mpeg_vidc_video_h264_mvc_layout[] = {
+	"Frame packing arrangement sequential",
+	"Frame packing arrangement top-bottom",
+};
+
+static const char *const mpeg_vidc_video_dpb_color_format[] = {
+	"DPB Color Format None",
+	"DPB Color Format UBWC",
+	"DPB Color Format UBWC TP10",
+};
+
+static struct msm_vidc_ctrl msm_vdec_ctrls[] = {
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_STREAM_FORMAT,
+		.name = "NAL Format",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDC_VIDEO_NAL_FORMAT_STARTCODES,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_NAL_FORMAT_FOUR_BYTE_LENGTH,
+		.default_value = V4L2_MPEG_VIDC_VIDEO_NAL_FORMAT_STARTCODES,
+		.menu_skip_mask = ~(
+		(1 << V4L2_MPEG_VIDC_VIDEO_NAL_FORMAT_STARTCODES) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_NAL_FORMAT_ONE_NAL_PER_BUFFER) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_NAL_FORMAT_ONE_BYTE_LENGTH) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_NAL_FORMAT_TWO_BYTE_LENGTH) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_NAL_FORMAT_FOUR_BYTE_LENGTH)
+		),
+		.qmenu = mpeg_video_stream_format,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_OUTPUT_ORDER,
+		.name = "Output Order",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDC_VIDEO_OUTPUT_ORDER_DISPLAY,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_OUTPUT_ORDER_DECODE,
+		.default_value = V4L2_MPEG_VIDC_VIDEO_OUTPUT_ORDER_DISPLAY,
+		.menu_skip_mask = ~(
+			(1 << V4L2_MPEG_VIDC_VIDEO_OUTPUT_ORDER_DISPLAY) |
+			(1 << V4L2_MPEG_VIDC_VIDEO_OUTPUT_ORDER_DECODE)
+			),
+		.qmenu = mpeg_video_output_order,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_PICTYPE_DEC_MODE,
+		.name = "Picture Type Decoding",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = 0,
+		.maximum = 1,
+		.default_value = 0,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_KEEP_ASPECT_RATIO,
+		.name = "Keep Aspect Ratio",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = 0,
+		.maximum = 1,
+		.default_value = 0,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_POST_LOOP_DEBLOCKER_MODE,
+		.name = "Deblocker Mode",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = 0,
+		.maximum = 1,
+		.default_value = 0,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_DIVX_FORMAT,
+		.name = "Divx Format",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDC_VIDEO_DIVX_FORMAT_4,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_DIVX_FORMAT_6,
+		.default_value = V4L2_MPEG_VIDC_VIDEO_DIVX_FORMAT_4,
+		.menu_skip_mask = ~(
+			(1 << V4L2_MPEG_VIDC_VIDEO_DIVX_FORMAT_4) |
+			(1 << V4L2_MPEG_VIDC_VIDEO_DIVX_FORMAT_5) |
+			(1 << V4L2_MPEG_VIDC_VIDEO_DIVX_FORMAT_6)
+			),
+		.qmenu = mpeg_video_vidc_divx_format,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_MB_ERROR_MAP_REPORTING,
+		.name = "MB Error Map Reporting",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = 0,
+		.maximum = 1,
+		.default_value = 0,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_CONTINUE_DATA_TRANSFER,
+		.name = "control",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = 0,
+		.maximum = 1,
+		.default_value = 0,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_SYNC_FRAME_DECODE,
+		.name = "Sync Frame Decode",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = V4L2_MPEG_VIDC_VIDEO_SYNC_FRAME_DECODE_DISABLE,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_SYNC_FRAME_DECODE_ENABLE,
+		.default_value = V4L2_MPEG_VIDC_VIDEO_SYNC_FRAME_DECODE_DISABLE,
+		.step = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_SECURE,
+		.name = "Secure mode",
+		.type = V4L2_CTRL_TYPE_BUTTON,
+		.minimum = 0,
+		.maximum = 0,
+		.default_value = 0,
+		.step = 0,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA,
+		.name = "Extradata Type",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDC_EXTRADATA_NONE,
+		.maximum = V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE,
+		.default_value = V4L2_MPEG_VIDC_EXTRADATA_NONE,
+		.menu_skip_mask = ~(
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_NONE) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_MB_QUANTIZATION) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_INTERLACE_VIDEO) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_VC1_FRAMEDISP) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_VC1_SEQDISP) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_TIMESTAMP) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_S3D_FRAME_PACKING) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_FRAME_RATE) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_PANSCAN_WINDOW) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_RECOVERY_POINT_SEI) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_INPUT_CROP) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_DIGITAL_ZOOM) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_ASPECT_RATIO) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_MPEG2_SEQDISP) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_STREAM_USERDATA) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_FRAME_QP) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_FRAME_BITS_INFO) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_VQZIP_SEI) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_OUTPUT_CROP) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_DISPLAY_COLOUR_SEI) |
+			(1 <<
+			V4L2_MPEG_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_VUI_DISPLAY) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE)
+			),
+		.qmenu = mpeg_video_vidc_extradata,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_SET_PERF_LEVEL,
+		.name = "Decoder Performance Level",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_CID_MPEG_VIDC_PERF_LEVEL_NOMINAL,
+		.maximum = V4L2_CID_MPEG_VIDC_PERF_LEVEL_TURBO,
+		.default_value = V4L2_CID_MPEG_VIDC_PERF_LEVEL_NOMINAL,
+		.menu_skip_mask = ~(
+			(1 << V4L2_CID_MPEG_VIDC_PERF_LEVEL_NOMINAL) |
+			(1 << V4L2_CID_MPEG_VIDC_PERF_LEVEL_TURBO)),
+		.qmenu = perf_level,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_ALLOC_MODE_INPUT,
+		.name = "Buffer allocation mode for input",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDC_VIDEO_STATIC,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_DYNAMIC,
+		.default_value = V4L2_MPEG_VIDC_VIDEO_STATIC,
+		.menu_skip_mask = ~(
+			(1 << V4L2_MPEG_VIDC_VIDEO_STATIC) |
+			(1 << V4L2_MPEG_VIDC_VIDEO_RING) |
+			(1 << V4L2_MPEG_VIDC_VIDEO_DYNAMIC)
+			),
+		.qmenu = mpeg_vidc_video_alloc_mode_type,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_ALLOC_MODE_OUTPUT,
+		.name = "Buffer allocation mode for output",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDC_VIDEO_STATIC,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_DYNAMIC,
+		.default_value = V4L2_MPEG_VIDC_VIDEO_STATIC,
+		.menu_skip_mask = ~(
+			(1 << V4L2_MPEG_VIDC_VIDEO_STATIC) |
+			(1 << V4L2_MPEG_VIDC_VIDEO_RING) |
+			(1 << V4L2_MPEG_VIDC_VIDEO_DYNAMIC)
+			),
+		.qmenu = mpeg_vidc_video_alloc_mode_type,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_FRAME_ASSEMBLY,
+		.name = "Video frame assembly",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = V4L2_MPEG_VIDC_FRAME_ASSEMBLY_DISABLE,
+		.maximum = V4L2_MPEG_VIDC_FRAME_ASSEMBLY_ENABLE,
+		.default_value =  V4L2_MPEG_VIDC_FRAME_ASSEMBLY_DISABLE,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_MODE,
+		.name = "Video decoder multi stream",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum =
+			V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_PRIMARY,
+		.maximum =
+			V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_SECONDARY,
+		.default_value =
+			V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_PRIMARY,
+		.menu_skip_mask = 0,
+		.step = 1,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE,
+		.name = "MPEG4 Profile",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE,
+		.maximum =
+		V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY,
+		.default_value = V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE,
+		.menu_skip_mask = 0,
+		.flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
+		.name = "MPEG4 Level",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDEO_MPEG4_LEVEL_0,
+		.maximum = V4L2_MPEG_VIDEO_MPEG4_LEVEL_5,
+		.default_value = V4L2_MPEG_VIDEO_MPEG4_LEVEL_0,
+		.menu_skip_mask = 0,
+		.flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+		.name = "H264 Profile",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.maximum = V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH,
+		.default_value = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
+		.menu_skip_mask = 0,
+		.flags = V4L2_CTRL_FLAG_VOLATILE,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+		.name = "H264 Level",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.maximum = V4L2_MPEG_VIDEO_H264_LEVEL_5_2,
+		.default_value = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
+		.menu_skip_mask = 0,
+		.flags = V4L2_CTRL_FLAG_VOLATILE,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_H263_PROFILE,
+		.name = "H263 Profile",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_BASELINE,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_HIGHLATENCY,
+		.default_value = V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_BASELINE,
+		.menu_skip_mask = ~(
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_BASELINE) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_H320CODING) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_BACKWARDCOMPATIBLE) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_ISWV2) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_ISWV3) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_HIGHCOMPRESSION) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_INTERNET) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_INTERLACE) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_HIGHLATENCY)
+		),
+		.qmenu = h263_profile,
+		.flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_H263_LEVEL,
+		.name = "H263 Level",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_1_0,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_7_0,
+		.default_value = V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_1_0,
+		.menu_skip_mask = ~(
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_1_0) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_2_0) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_3_0) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_4_0) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_5_0) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_6_0) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_7_0)
+		),
+		.qmenu = h263_level,
+		.flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL,
+		.name = "VP8 Profile Level",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDC_VIDEO_VP8_UNUSED,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_1,
+		.default_value = V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_0,
+		.menu_skip_mask = ~(
+			(1 << V4L2_MPEG_VIDC_VIDEO_VP8_UNUSED) |
+			(1 << V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_0) |
+			(1 << V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_1)
+		),
+		.qmenu = vp8_profile_level,
+		.flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_PROFILE,
+		.name = "MPEG2 Profile",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDC_VIDEO_MPEG2_PROFILE_SIMPLE,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_MPEG2_PROFILE_HIGH,
+		.default_value = V4L2_MPEG_VIDC_VIDEO_MPEG2_PROFILE_SIMPLE,
+		.menu_skip_mask = ~(
+		(1 << V4L2_MPEG_VIDC_VIDEO_MPEG2_PROFILE_SIMPLE) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_MPEG2_PROFILE_MAIN) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_MPEG2_PROFILE_422) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_MPEG2_PROFILE_SNR_SCALABLE) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_MPEG2_PROFILE_SPATIAL_SCALABLE) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_MPEG2_PROFILE_HIGH)
+		),
+		.qmenu = mpeg2_profile,
+		.flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_LEVEL,
+		.name = "MPEG2 Level",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDC_VIDEO_MPEG2_LEVEL_0,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_MPEG2_LEVEL_3,
+		.default_value = V4L2_MPEG_VIDC_VIDEO_MPEG2_LEVEL_0,
+		.menu_skip_mask = ~(
+			(1 << V4L2_MPEG_VIDC_VIDEO_MPEG2_LEVEL_0) |
+			(1 << V4L2_MPEG_VIDC_VIDEO_MPEG2_LEVEL_1) |
+			(1 << V4L2_MPEG_VIDC_VIDEO_MPEG2_LEVEL_2) |
+			(1 << V4L2_MPEG_VIDC_VIDEO_MPEG2_LEVEL_3)
+		),
+		.qmenu = mpeg2_level,
+		.flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_SCS_THRESHOLD,
+		.name = "Video start code search threshold",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 1,
+		.maximum = INT_MAX,
+		.default_value = INT_MAX,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_MVC_BUFFER_LAYOUT,
+		.name = "MVC buffer layout",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_MVC_TOP_BOTTOM,
+		.default_value = V4L2_MPEG_VIDC_VIDEO_MVC_SEQUENTIAL,
+		.menu_skip_mask = ~(
+			(1 << V4L2_MPEG_VIDC_VIDEO_MVC_SEQUENTIAL) |
+			(1 << V4L2_MPEG_VIDC_VIDEO_MVC_TOP_BOTTOM)
+			),
+		.qmenu = mpeg_vidc_video_h264_mvc_layout,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR,
+		.name = "Picture concealed color",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0x0,
+		.maximum = 0xffffff,
+		.default_value = DEFAULT_VIDEO_CONCEAL_COLOR_BLACK,
+		.step = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_BUFFER_SIZE_LIMIT,
+		.name = "Buffer size limit",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = INT_MAX,
+		.default_value = 0,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_SECURE_SCALING_THRESHOLD,
+		.name = "Secure scaling output2 threshold",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = INT_MAX,
+		.default_value = 0,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_NON_SECURE_OUTPUT2,
+		.name = "Non-Secure output2",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = 0,
+		.maximum = 1,
+		.default_value = 0,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_DPB_COLOR_FORMAT,
+		.name = "Video decoder dpb color format",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDC_VIDEO_DPB_COLOR_FMT_NONE,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_DPB_COLOR_FMT_TP10_UBWC,
+		.default_value = V4L2_MPEG_VIDC_VIDEO_DPB_COLOR_FMT_NONE,
+		.menu_skip_mask = ~(
+			(1 << V4L2_MPEG_VIDC_VIDEO_DPB_COLOR_FMT_NONE) |
+			(1 << V4L2_MPEG_VIDC_VIDEO_DPB_COLOR_FMT_UBWC) |
+			(1 << V4L2_MPEG_VIDC_VIDEO_DPB_COLOR_FMT_TP10_UBWC)
+			),
+		.qmenu = mpeg_vidc_video_dpb_color_format,
+		.flags = V4L2_CTRL_FLAG_MODIFY_LAYOUT,
+	},
+	{
+		.id = V4L2_CID_VIDC_QBUF_MODE,
+		.name = "Allows batching of buffers for power savings",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = V4L2_VIDC_QBUF_STANDARD,
+		.maximum = V4L2_VIDC_QBUF_BATCHED,
+		.default_value = V4L2_VIDC_QBUF_STANDARD,
+		.step = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE,
+		.name = "Entropy Mode",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC,
+		.maximum = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC,
+		.default_value = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC,
+		.step = 0,
+		.menu_skip_mask = ~(
+		(1 << V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC) |
+		(1 << V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC)
+		),
+		.qmenu = mpeg_vidc_video_entropy_mode,
+		.flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_PRIORITY,
+		.name = "Session Priority",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = V4L2_MPEG_VIDC_VIDEO_PRIORITY_REALTIME_ENABLE,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_PRIORITY_REALTIME_DISABLE,
+		.default_value = V4L2_MPEG_VIDC_VIDEO_PRIORITY_REALTIME_DISABLE,
+		.step = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE,
+		.name = "Set Decoder Operating rate",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = MAX_OPERATING_FRAME_RATE,
+		.default_value = 0,
+		.step = OPERATING_FRAME_RATE_STEP,
+	},
+};
+
+#define NUM_CTRLS ARRAY_SIZE(msm_vdec_ctrls)
+
+static int vdec_hal_to_v4l2(int id, int value);
+
+static int try_set_ext_ctrl(struct msm_vidc_inst *inst,
+	struct v4l2_ext_controls *ctrl);
+
+static u32 get_frame_size_nv12(int plane,
+					u32 height, u32 width)
+{
+	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12, width, height);
+}
+
+static u32 get_frame_size_nv12_ubwc(int plane, u32 height, u32 width)
+{
+	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_UBWC, width, height);
+}
+
+static u32 get_frame_size_compressed_full_yuv(int plane,
+					u32 max_mbs_per_frame, u32 size_per_mb)
+{
+	return (max_mbs_per_frame * size_per_mb * 3 / 2);
+}
+
+static u32 get_frame_size_compressed(int plane,
+					u32 max_mbs_per_frame, u32 size_per_mb)
+{
+	return (max_mbs_per_frame * size_per_mb * 3/2)/2;
+}
+
+static u32 get_frame_size_nv12_ubwc_10bit(int plane, u32 height, u32 width)
+{
+	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_BPP10_UBWC, width, height);
+}
+
+static u32 get_frame_size(struct msm_vidc_inst *inst,
+					const struct msm_vidc_format *fmt,
+					int fmt_type, int plane)
+{
+	u32 frame_size = 0;
+	if (fmt_type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		frame_size = fmt->get_frame_size(plane,
+					inst->capability.mbs_per_frame.max,
+					MB_SIZE_IN_PIXEL);
+		if (inst->buffer_size_limit &&
+			(inst->buffer_size_limit < frame_size)) {
+			frame_size = inst->buffer_size_limit;
+			dprintk(VIDC_DBG, "input buffer size limited to %d\n",
+				frame_size);
+		} else {
+			dprintk(VIDC_DBG, "set input buffer size to %d\n",
+				frame_size);
+		}
+	} else if (fmt_type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		frame_size = fmt->get_frame_size(plane,
+					inst->capability.height.max,
+					inst->capability.width.max);
+		dprintk(VIDC_DBG, "set output buffer size to %d\n",
+			frame_size);
+	} else {
+		dprintk(VIDC_WARN, "Wrong format type\n");
+	}
+	return frame_size;
+}
+
+static int is_ctrl_valid_for_codec(struct msm_vidc_inst *inst,
+					struct v4l2_ctrl *ctrl)
+{
+	int rc = 0;
+	switch (ctrl->id) {
+	case V4L2_CID_MPEG_VIDC_VIDEO_MVC_BUFFER_LAYOUT:
+		if (inst->fmts[OUTPUT_PORT].fourcc != V4L2_PIX_FMT_H264_MVC) {
+			dprintk(VIDC_ERR, "Control %#x only valid for MVC\n",
+					ctrl->id);
+			rc = -ENOTSUPP;
+			break;
+		}
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+		if (inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_H264_MVC &&
+			ctrl->val != V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH) {
+			dprintk(VIDC_ERR,
+					"Profile %#x not supported for MVC\n",
+					ctrl->val);
+			rc = -ENOTSUPP;
+			break;
+		}
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+		if (inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_H264_MVC &&
+			ctrl->val >= V4L2_MPEG_VIDEO_H264_LEVEL_5_2) {
+			dprintk(VIDC_ERR, "Level %#x not supported for MVC\n",
+					ctrl->val);
+			rc = -ENOTSUPP;
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+	return rc;
+}
+
+struct msm_vidc_format vdec_formats[] = {
+	{
+		.name = "YCbCr Semiplanar 4:2:0",
+		.description = "Y/CbCr 4:2:0",
+		.fourcc = V4L2_PIX_FMT_NV12,
+		.get_frame_size = get_frame_size_nv12,
+		.type = CAPTURE_PORT,
+	},
+	{
+		.name = "UBWC YCbCr Semiplanar 4:2:0",
+		.description = "UBWC Y/CbCr 4:2:0",
+		.fourcc = V4L2_PIX_FMT_NV12_UBWC,
+		.get_frame_size = get_frame_size_nv12_ubwc,
+		.type = CAPTURE_PORT,
+	},
+	{
+		.name = "UBWC YCbCr Semiplanar 4:2:0 10bit",
+		.description = "UBWC Y/CbCr 4:2:0 10bit",
+		.fourcc = V4L2_PIX_FMT_NV12_TP10_UBWC,
+		.get_frame_size = get_frame_size_nv12_ubwc_10bit,
+		.type = CAPTURE_PORT,
+	},
+	{
+		.name = "Mpeg4",
+		.description = "Mpeg4 compressed format",
+		.fourcc = V4L2_PIX_FMT_MPEG4,
+		.get_frame_size = get_frame_size_compressed,
+		.type = OUTPUT_PORT,
+	},
+	{
+		.name = "Mpeg2",
+		.description = "Mpeg2 compressed format",
+		.fourcc = V4L2_PIX_FMT_MPEG2,
+		.get_frame_size = get_frame_size_compressed,
+		.type = OUTPUT_PORT,
+	},
+	{
+		.name = "H263",
+		.description = "H263 compressed format",
+		.fourcc = V4L2_PIX_FMT_H263,
+		.get_frame_size = get_frame_size_compressed,
+		.type = OUTPUT_PORT,
+	},
+	{
+		.name = "VC1",
+		.description = "VC-1 compressed format",
+		.fourcc = V4L2_PIX_FMT_VC1_ANNEX_G,
+		.get_frame_size = get_frame_size_compressed,
+		.type = OUTPUT_PORT,
+	},
+	{
+		.name = "VC1 SP",
+		.description = "VC-1 compressed format G",
+		.fourcc = V4L2_PIX_FMT_VC1_ANNEX_L,
+		.get_frame_size = get_frame_size_compressed,
+		.type = OUTPUT_PORT,
+	},
+	{
+		.name = "H264",
+		.description = "H264 compressed format",
+		.fourcc = V4L2_PIX_FMT_H264,
+		.get_frame_size = get_frame_size_compressed,
+		.type = OUTPUT_PORT,
+	},
+	{
+		.name = "H264_MVC",
+		.description = "H264_MVC compressed format",
+		.fourcc = V4L2_PIX_FMT_H264_MVC,
+		.get_frame_size = get_frame_size_compressed,
+		.type = OUTPUT_PORT,
+	},
+	{
+		.name = "HEVC",
+		.description = "HEVC compressed format",
+		.fourcc = V4L2_PIX_FMT_HEVC,
+		.get_frame_size = get_frame_size_compressed,
+		.type = OUTPUT_PORT,
+	},
+	{
+		.name = "HEVC_HYBRID",
+		.description = "HEVC compressed format",
+		.fourcc = V4L2_PIX_FMT_HEVC_HYBRID,
+		.get_frame_size = get_frame_size_compressed,
+		.type = OUTPUT_PORT,
+	},
+	{
+		.name = "VP8",
+		.description = "VP8 compressed format",
+		.fourcc = V4L2_PIX_FMT_VP8,
+		.get_frame_size = get_frame_size_compressed,
+		.type = OUTPUT_PORT,
+	},
+	{
+		.name = "VP9",
+		.description = "VP9 compressed format",
+		.fourcc = V4L2_PIX_FMT_VP9,
+		.get_frame_size = get_frame_size_compressed_full_yuv,
+		.type = OUTPUT_PORT,
+	},
+	{
+		.name = "DIVX 311",
+		.description = "DIVX 311 compressed format",
+		.fourcc = V4L2_PIX_FMT_DIVX_311,
+		.get_frame_size = get_frame_size_compressed,
+		.type = OUTPUT_PORT,
+	},
+	{
+		.name = "DIVX",
+		.description = "DIVX 4/5/6 compressed format",
+		.fourcc = V4L2_PIX_FMT_DIVX,
+		.get_frame_size = get_frame_size_compressed,
+		.type = OUTPUT_PORT,
+	}
+};
+
+int msm_vdec_streamon(struct msm_vidc_inst *inst, enum v4l2_buf_type i)
+{
+	int rc = 0;
+	struct buf_queue *q;
+	q = msm_comm_get_vb2q(inst, i);
+	if (!q) {
+		dprintk(VIDC_ERR,
+			"Failed to find buffer queue for type = %d\n", i);
+		return -EINVAL;
+	}
+	dprintk(VIDC_DBG, "Calling streamon\n");
+	mutex_lock(&q->lock);
+	rc = vb2_streamon(&q->vb2_bufq, i);
+	mutex_unlock(&q->lock);
+	if (rc)
+		dprintk(VIDC_ERR, "streamon failed on port: %d\n", i);
+	return rc;
+}
+
+int msm_vdec_streamoff(struct msm_vidc_inst *inst, enum v4l2_buf_type i)
+{
+	int rc = 0;
+	struct buf_queue *q;
+
+	q = msm_comm_get_vb2q(inst, i);
+	if (!q) {
+		dprintk(VIDC_ERR,
+			"Failed to find buffer queue for type = %d\n", i);
+		return -EINVAL;
+	}
+	dprintk(VIDC_DBG, "Calling streamoff\n");
+
+	if (!inst->in_reconfig) {
+		rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE);
+		if (rc)
+			dprintk(VIDC_ERR,
+			"Failed to move inst: %pK to res done state\n", inst);
+	}
+
+	mutex_lock(&q->lock);
+	rc = vb2_streamoff(&q->vb2_bufq, i);
+	mutex_unlock(&q->lock);
+	if (rc)
+		dprintk(VIDC_ERR, "streamoff failed on port: %d\n", i);
+	return rc;
+}
+
+int msm_vdec_prepare_buf(struct msm_vidc_inst *inst,
+					struct v4l2_buffer *b)
+{
+	int rc = 0;
+	struct vidc_buffer_addr_info buffer_info;
+	int extra_idx = 0;
+	int i;
+	struct hfi_device *hdev;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+	hdev = inst->core->device;
+
+	if (inst->state == MSM_VIDC_CORE_INVALID ||
+			inst->core->state == VIDC_CORE_INVALID) {
+		dprintk(VIDC_ERR,
+			"Core %pK in bad state, ignoring prepare buf\n",
+				inst->core);
+		return -EINVAL;
+	}
+
+	switch (b->type) {
+	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+		break;
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+		if (b->length != inst->prop.num_planes[CAPTURE_PORT]) {
+			dprintk(VIDC_ERR,
+			"Planes mismatch: needed: %d, allocated: %d\n",
+			inst->prop.num_planes[CAPTURE_PORT],
+			b->length);
+			rc = -EINVAL;
+			break;
+		}
+		for (i = 0; i < min_t(int, b->length, VIDEO_MAX_PLANES); ++i) {
+			dprintk(VIDC_DBG,
+			"prepare plane: %d, device_addr = %#lx, size = %d\n",
+			i, b->m.planes[i].m.userptr,
+			b->m.planes[i].length);
+		}
+
+		buffer_info.buffer_size = b->m.planes[0].length;
+		buffer_info.buffer_type = msm_comm_get_hal_output_buffer(inst);
+		buffer_info.num_buffers = 1;
+		buffer_info.align_device_addr = b->m.planes[0].m.userptr;
+
+		extra_idx = EXTRADATA_IDX(b->length);
+		if (extra_idx && extra_idx < VIDEO_MAX_PLANES &&
+			b->m.planes[extra_idx].m.userptr) {
+			buffer_info.extradata_addr =
+				b->m.planes[extra_idx].m.userptr;
+			buffer_info.extradata_size =
+				b->m.planes[extra_idx].length;
+			dprintk(VIDC_DBG, "extradata: %pa, length = %d\n",
+				&buffer_info.extradata_addr,
+				buffer_info.extradata_size);
+		} else {
+			buffer_info.extradata_addr = 0;
+			buffer_info.extradata_size = 0;
+		}
+
+		rc = call_hfi_op(hdev, session_set_buffers,
+				(void *)inst->session, &buffer_info);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"vidc_hal_session_set_buffers failed\n");
+		}
+		break;
+	default:
+		dprintk(VIDC_ERR, "Buffer type not recognized: %d\n", b->type);
+		break;
+	}
+
+	return rc;
+}
+
+int msm_vdec_release_buf(struct msm_vidc_inst *inst,
+					struct v4l2_buffer *b)
+{
+	int rc = 0;
+	struct vidc_buffer_addr_info buffer_info;
+	struct msm_vidc_core *core;
+	int extra_idx = 0;
+	int i;
+	struct hfi_device *hdev;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+	core = inst->core;
+	hdev = inst->core->device;
+
+	if (inst->state == MSM_VIDC_CORE_INVALID ||
+			core->state == VIDC_CORE_INVALID) {
+		dprintk(VIDC_ERR,
+			"Core %pK in bad state, ignoring release output buf\n",
+				core);
+		goto exit;
+	}
+
+	switch (b->type) {
+	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+		break;
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+		if (b->length != inst->prop.num_planes[CAPTURE_PORT]) {
+			dprintk(VIDC_ERR,
+			"Planes mismatch: needed: %d, to release: %d\n",
+			inst->prop.num_planes[CAPTURE_PORT], b->length);
+			rc = -EINVAL;
+			break;
+		}
+
+		for (i = 0; i < b->length; ++i) {
+			dprintk(VIDC_DBG,
+			"Release plane: %d device_addr = %#lx, size = %d\n",
+			i, b->m.planes[i].m.userptr,
+			b->m.planes[i].length);
+		}
+
+		buffer_info.buffer_size = b->m.planes[0].length;
+		buffer_info.buffer_type = msm_comm_get_hal_output_buffer(inst);
+		buffer_info.num_buffers = 1;
+		buffer_info.align_device_addr = b->m.planes[0].m.userptr;
+		buffer_info.response_required = false;
+
+		extra_idx = EXTRADATA_IDX(b->length);
+		if (extra_idx && extra_idx < VIDEO_MAX_PLANES
+			&& b->m.planes[extra_idx].m.userptr)
+			buffer_info.extradata_addr =
+				b->m.planes[extra_idx].m.userptr;
+		else
+			buffer_info.extradata_addr = 0;
+
+		rc = call_hfi_op(hdev, session_release_buffers,
+			(void *)inst->session, &buffer_info);
+		if (rc)
+			dprintk(VIDC_ERR,
+			"vidc_hal_session_release_buffers failed\n");
+		break;
+	default:
+		dprintk(VIDC_ERR, "Buffer type not recognized: %d\n", b->type);
+		break;
+	}
+exit:
+	return rc;
+}
+
+int msm_vdec_qbuf(struct msm_vidc_inst *inst, struct v4l2_buffer *b)
+{
+	struct buf_queue *q = NULL;
+	int rc = 0;
+
+	q = msm_comm_get_vb2q(inst, b->type);
+	if (!q) {
+		dprintk(VIDC_ERR, "Failed to find buffer queue for type = %d\n"
+			, b->type);
+		return -EINVAL;
+	}
+
+	mutex_lock(&q->lock);
+	rc = vb2_qbuf(&q->vb2_bufq, b);
+	mutex_unlock(&q->lock);
+
+	if (rc)
+		dprintk(VIDC_ERR, "Failed to qbuf, %d\n", rc);
+	return rc;
+}
+
+int msm_vdec_dqbuf(struct msm_vidc_inst *inst, struct v4l2_buffer *b)
+{
+	struct buf_queue *q = NULL;
+	int rc = 0;
+	q = msm_comm_get_vb2q(inst, b->type);
+	if (!q) {
+		dprintk(VIDC_ERR, "Failed to find buffer queue for type = %d\n"
+			, b->type);
+		return -EINVAL;
+	}
+	mutex_lock(&q->lock);
+	rc = vb2_dqbuf(&q->vb2_bufq, b, true);
+	mutex_unlock(&q->lock);
+	if (rc)
+		dprintk(VIDC_DBG, "Failed to dqbuf, %d\n", rc);
+	return rc;
+}
+
+int msm_vdec_reqbufs(struct msm_vidc_inst *inst, struct v4l2_requestbuffers *b)
+{
+	struct buf_queue *q = NULL;
+	int rc = 0;
+
+	if (!inst || !b) {
+		dprintk(VIDC_ERR,
+			"Invalid input, inst = %pK, buffer = %pK\n", inst, b);
+		return -EINVAL;
+	}
+
+	q = msm_comm_get_vb2q(inst, b->type);
+	if (!q) {
+		dprintk(VIDC_ERR, "Failed to find buffer queue for type = %d\n"
+			, b->type);
+		return -EINVAL;
+	}
+
+	mutex_lock(&q->lock);
+	rc = vb2_reqbufs(&q->vb2_bufq, b);
+	mutex_unlock(&q->lock);
+
+	if (rc)
+		dprintk(VIDC_DBG, "Failed to get reqbufs, %d\n", rc);
+	return rc;
+}
+
+int msm_vdec_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
+{
+	const struct msm_vidc_format *fmt = NULL;
+	struct hfi_device *hdev;
+	int rc = 0, i = 0, stride = 0, scanlines = 0, color_format = 0;
+	unsigned int *plane_sizes = NULL, extra_idx = 0;
+	int num_planes = 0;
+
+	if (!inst || !f || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR,
+			"Invalid input, inst = %pK, format = %pK\n", inst, f);
+		return -EINVAL;
+	}
+
+	hdev = inst->core->device;
+	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+		fmt = &inst->fmts[CAPTURE_PORT];
+	else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+		fmt = &inst->fmts[OUTPUT_PORT];
+	else
+		return -ENOTSUPP;
+
+	f->fmt.pix_mp.pixelformat = fmt->fourcc;
+	f->fmt.pix_mp.num_planes = inst->prop.num_planes[fmt->type];
+	num_planes = inst->prop.num_planes[fmt->type];
+
+	if (inst->in_reconfig) {
+		inst->prop.height[OUTPUT_PORT] = inst->reconfig_height;
+		inst->prop.width[OUTPUT_PORT] = inst->reconfig_width;
+
+		rc = msm_vidc_check_session_supported(inst);
+		if (rc) {
+			dprintk(VIDC_ERR,
+					"%s: unsupported session\n", __func__);
+			goto exit;
+		}
+	}
+
+	f->fmt.pix_mp.height = inst->prop.height[CAPTURE_PORT];
+	f->fmt.pix_mp.width = inst->prop.width[CAPTURE_PORT];
+	stride = inst->prop.width[CAPTURE_PORT];
+	scanlines = inst->prop.height[CAPTURE_PORT];
+
+	if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		plane_sizes = &inst->bufq[OUTPUT_PORT].vb2_bufq.plane_sizes[0];
+		for (i = 0; i < num_planes; ++i) {
+			if (!plane_sizes[i]) {
+				f->fmt.pix_mp.plane_fmt[i].sizeimage =
+					get_frame_size(inst, fmt, f->type, i);
+				plane_sizes[i] = f->fmt.pix_mp.plane_fmt[i].
+					sizeimage;
+			} else
+				f->fmt.pix_mp.plane_fmt[i].sizeimage =
+					plane_sizes[i];
+		}
+		f->fmt.pix_mp.height = inst->prop.height[OUTPUT_PORT];
+		f->fmt.pix_mp.width = inst->prop.width[OUTPUT_PORT];
+		f->fmt.pix_mp.plane_fmt[0].bytesperline =
+			(__u16)inst->prop.width[OUTPUT_PORT];
+		f->fmt.pix_mp.plane_fmt[0].reserved[0] =
+			(__u16)inst->prop.height[OUTPUT_PORT];
+	} else {
+		switch (fmt->fourcc) {
+		case V4L2_PIX_FMT_NV12:
+			color_format = COLOR_FMT_NV12;
+			break;
+		case V4L2_PIX_FMT_NV12_UBWC:
+			color_format = COLOR_FMT_NV12_UBWC;
+			break;
+		case V4L2_PIX_FMT_NV12_TP10_UBWC:
+			color_format = COLOR_FMT_NV12_BPP10_UBWC;
+			break;
+		default:
+			dprintk(VIDC_WARN, "Color format not recognized\n");
+			rc = -ENOTSUPP;
+			goto exit;
+		}
+
+		stride = VENUS_Y_STRIDE(color_format,
+				inst->prop.width[CAPTURE_PORT]);
+		scanlines = VENUS_Y_SCANLINES(color_format,
+				inst->prop.height[CAPTURE_PORT]);
+
+		f->fmt.pix_mp.plane_fmt[0].sizeimage =
+			fmt->get_frame_size(0,
+			inst->prop.height[CAPTURE_PORT],
+			inst->prop.width[CAPTURE_PORT]);
+
+		extra_idx = EXTRADATA_IDX(num_planes);
+		if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
+			f->fmt.pix_mp.plane_fmt[extra_idx].sizeimage =
+				VENUS_EXTRADATA_SIZE(
+					inst->prop.height[CAPTURE_PORT],
+					inst->prop.width[CAPTURE_PORT]);
+		}
+
+		for (i = 0; i < num_planes; ++i)
+			inst->bufq[CAPTURE_PORT].vb2_bufq.plane_sizes[i] =
+				f->fmt.pix_mp.plane_fmt[i].sizeimage;
+
+		f->fmt.pix_mp.height = inst->prop.height[CAPTURE_PORT];
+		f->fmt.pix_mp.width = inst->prop.width[CAPTURE_PORT];
+		f->fmt.pix_mp.plane_fmt[0].bytesperline =
+			(__u16)stride;
+		f->fmt.pix_mp.plane_fmt[0].reserved[0] =
+			(__u16)scanlines;
+	}
+
+exit:
+	return rc;
+}
+
+static int set_default_properties(struct msm_vidc_inst *inst)
+{
+	struct hfi_device *hdev;
+	struct v4l2_control ctrl = {0};
+	enum hal_default_properties defaults;
+	int rc = 0;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s - invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	hdev = inst->core->device;
+
+	defaults = call_hfi_op(hdev, get_default_properties,
+					hdev->hfi_device_data);
+
+	if (defaults & HAL_VIDEO_DYNAMIC_BUF_MODE) {
+		dprintk(VIDC_DBG, "Enable dynamic buffer mode\n");
+		ctrl.id = V4L2_CID_MPEG_VIDC_VIDEO_ALLOC_MODE_OUTPUT;
+		ctrl.value = V4L2_MPEG_VIDC_VIDEO_DYNAMIC;
+		rc = msm_comm_s_ctrl(inst, &ctrl);
+		if (rc)
+			dprintk(VIDC_ERR,
+				"Failed to enable dynamic buffer mode by default: %d\n",
+				rc);
+	}
+
+	return rc;
+}
+
+int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
+{
+	struct msm_vidc_format *fmt = NULL;
+	struct hal_frame_size frame_sz;
+	unsigned int extra_idx = 0;
+	int rc = 0;
+	int ret = 0;
+	int i;
+	int max_input_size = 0;
+
+	if (!inst || !f) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		fmt = msm_comm_get_pixel_fmt_fourcc(vdec_formats,
+			ARRAY_SIZE(vdec_formats), f->fmt.pix_mp.pixelformat,
+			CAPTURE_PORT);
+		if (!fmt || fmt->type != CAPTURE_PORT) {
+			dprintk(VIDC_ERR,
+				"Format: %d not supported on CAPTURE port\n",
+				f->fmt.pix_mp.pixelformat);
+			rc = -EINVAL;
+			goto err_invalid_fmt;
+		}
+		memcpy(&inst->fmts[fmt->type], fmt,
+				sizeof(struct msm_vidc_format));
+
+		inst->prop.width[CAPTURE_PORT] = f->fmt.pix_mp.width;
+		inst->prop.height[CAPTURE_PORT] = f->fmt.pix_mp.height;
+		msm_comm_set_color_format(inst,
+				msm_comm_get_hal_output_buffer(inst),
+				f->fmt.pix_mp.pixelformat);
+
+		if (msm_comm_get_stream_output_mode(inst) ==
+			HAL_VIDEO_DECODER_SECONDARY) {
+			frame_sz.buffer_type = HAL_BUFFER_OUTPUT2;
+			frame_sz.width = inst->prop.width[CAPTURE_PORT];
+			frame_sz.height = inst->prop.height[CAPTURE_PORT];
+			dprintk(VIDC_DBG,
+				"buffer type = %d width = %d, height = %d\n",
+				frame_sz.buffer_type, frame_sz.width,
+				frame_sz.height);
+			ret = msm_comm_try_set_prop(inst,
+				HAL_PARAM_FRAME_SIZE, &frame_sz);
+		}
+
+		f->fmt.pix_mp.plane_fmt[0].sizeimage =
+			inst->fmts[fmt->type].get_frame_size(0,
+			f->fmt.pix_mp.height, f->fmt.pix_mp.width);
+
+		extra_idx = EXTRADATA_IDX(inst->prop.num_planes[fmt->type]);
+		if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
+			f->fmt.pix_mp.plane_fmt[extra_idx].sizeimage =
+				VENUS_EXTRADATA_SIZE(
+					inst->prop.height[CAPTURE_PORT],
+					inst->prop.width[CAPTURE_PORT]);
+		}
+
+		f->fmt.pix_mp.num_planes = inst->prop.num_planes[fmt->type];
+		for (i = 0; i < inst->prop.num_planes[fmt->type]; ++i) {
+			inst->bufq[CAPTURE_PORT].vb2_bufq.plane_sizes[i] =
+				f->fmt.pix_mp.plane_fmt[i].sizeimage;
+		}
+	} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		inst->prop.width[OUTPUT_PORT] = f->fmt.pix_mp.width;
+		inst->prop.height[OUTPUT_PORT] = f->fmt.pix_mp.height;
+
+		fmt = msm_comm_get_pixel_fmt_fourcc(vdec_formats,
+				ARRAY_SIZE(vdec_formats),
+				f->fmt.pix_mp.pixelformat,
+				OUTPUT_PORT);
+		if (!fmt || fmt->type != OUTPUT_PORT) {
+			dprintk(VIDC_ERR,
+			"Format: %d not supported on OUTPUT port\n",
+			f->fmt.pix_mp.pixelformat);
+			rc = -EINVAL;
+			goto err_invalid_fmt;
+		}
+		memcpy(&inst->fmts[fmt->type], fmt,
+				sizeof(struct msm_vidc_format));
+
+		rc = msm_comm_try_state(inst, MSM_VIDC_CORE_INIT_DONE);
+		if (rc) {
+			dprintk(VIDC_ERR, "Failed to initialize instance\n");
+			goto err_invalid_fmt;
+		}
+
+		if (!(get_hal_codec(inst->fmts[fmt->type].fourcc) &
+			inst->core->dec_codec_supported)) {
+			dprintk(VIDC_ERR,
+				"Codec(%#x) is not present in the supported codecs list(%#x)\n",
+				get_hal_codec(inst->fmts[fmt->type].fourcc),
+				inst->core->dec_codec_supported);
+			rc = -EINVAL;
+			goto err_invalid_fmt;
+		}
+
+		rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
+		if (rc) {
+			dprintk(VIDC_ERR, "Failed to open instance\n");
+			goto err_invalid_fmt;
+		}
+
+		rc = msm_vidc_check_session_supported(inst);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"%s: session not supported\n", __func__);
+			goto err_invalid_fmt;
+		}
+
+		frame_sz.buffer_type = HAL_BUFFER_INPUT;
+		frame_sz.width = inst->prop.width[OUTPUT_PORT];
+		frame_sz.height = inst->prop.height[OUTPUT_PORT];
+		dprintk(VIDC_DBG,
+			"buffer type = %d width = %d, height = %d\n",
+			frame_sz.buffer_type, frame_sz.width,
+			frame_sz.height);
+		msm_comm_try_set_prop(inst, HAL_PARAM_FRAME_SIZE, &frame_sz);
+
+		max_input_size = get_frame_size(
+				inst, &inst->fmts[fmt->type], f->type, 0);
+		if (f->fmt.pix_mp.plane_fmt[0].sizeimage > max_input_size ||
+			!f->fmt.pix_mp.plane_fmt[0].sizeimage) {
+			f->fmt.pix_mp.plane_fmt[0].sizeimage = max_input_size;
+		}
+
+		f->fmt.pix_mp.num_planes = inst->prop.num_planes[fmt->type];
+		for (i = 0; i < inst->prop.num_planes[fmt->type]; ++i) {
+			inst->bufq[OUTPUT_PORT].vb2_bufq.plane_sizes[i] =
+				f->fmt.pix_mp.plane_fmt[i].sizeimage;
+		}
+
+		set_default_properties(inst);
+	}
+err_invalid_fmt:
+	return rc;
+}
+
+int msm_vdec_querycap(struct msm_vidc_inst *inst, struct v4l2_capability *cap)
+{
+	if (!inst || !cap) {
+		dprintk(VIDC_ERR,
+			"Invalid input, inst = %pK, cap = %pK\n", inst, cap);
+		return -EINVAL;
+	}
+	strlcpy(cap->driver, MSM_VIDC_DRV_NAME, sizeof(cap->driver));
+	strlcpy(cap->card, MSM_VDEC_DVC_NAME, sizeof(cap->card));
+	cap->bus_info[0] = 0;
+	cap->version = MSM_VIDC_VERSION;
+	cap->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+						V4L2_CAP_VIDEO_OUTPUT_MPLANE |
+						V4L2_CAP_STREAMING;
+	cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+	memset(cap->reserved, 0, sizeof(cap->reserved));
+	return 0;
+}
+
+int msm_vdec_enum_fmt(struct msm_vidc_inst *inst, struct v4l2_fmtdesc *f)
+{
+	const struct msm_vidc_format *fmt = NULL;
+	int rc = 0;
+	if (!inst || !f) {
+		dprintk(VIDC_ERR,
+			"Invalid input, inst = %pK, f = %pK\n", inst, f);
+		return -EINVAL;
+	}
+	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		fmt = msm_comm_get_pixel_fmt_index(vdec_formats,
+			ARRAY_SIZE(vdec_formats), f->index, CAPTURE_PORT);
+	} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		fmt = msm_comm_get_pixel_fmt_index(vdec_formats,
+			ARRAY_SIZE(vdec_formats), f->index, OUTPUT_PORT);
+		f->flags = V4L2_FMT_FLAG_COMPRESSED;
+	}
+
+	memset(f->reserved, 0 , sizeof(f->reserved));
+	if (fmt) {
+		strlcpy(f->description, fmt->description,
+				sizeof(f->description));
+		f->pixelformat = fmt->fourcc;
+	} else {
+		dprintk(VIDC_DBG, "No more formats found\n");
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+static int set_actual_buffer_count(struct msm_vidc_inst *inst,
+			int count, enum hal_buffer type)
+{
+	int rc = 0;
+	struct hfi_device *hdev;
+	struct hal_buffer_count_actual buf_count;
+
+	hdev = inst->core->device;
+
+	buf_count.buffer_type = type;
+	buf_count.buffer_count_actual = count;
+	rc = call_hfi_op(hdev, session_set_property,
+		inst->session, HAL_PARAM_BUFFER_COUNT_ACTUAL, &buf_count);
+	if (rc)
+		dprintk(VIDC_ERR,
+			"Failed to set actual buffer count %d for buffer type %d\n",
+			count, type);
+	return rc;
+}
+
+static int msm_vdec_queue_setup(struct vb2_queue *q,
+				const void *parg,
+				unsigned int *num_buffers,
+				unsigned int *num_planes, unsigned int sizes[],
+				void *alloc_ctxs[])
+{
+	int i, rc = 0;
+	struct msm_vidc_inst *inst;
+	struct hal_buffer_requirements *bufreq;
+	int extra_idx = 0;
+	int min_buff_count = 0;
+
+	if (!q || !num_buffers || !num_planes
+		|| !sizes || !q->drv_priv) {
+		dprintk(VIDC_ERR, "Invalid input, q = %pK, %pK, %pK\n",
+			q, num_buffers, num_planes);
+		return -EINVAL;
+	}
+	inst = q->drv_priv;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = msm_comm_try_get_bufreqs(inst);
+	if (rc) {
+		dprintk(VIDC_ERR,
+				"%s: Failed : Buffer requirements\n", __func__);
+		goto exit;
+	}
+
+	switch (q->type) {
+	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+		*num_planes = inst->prop.num_planes[OUTPUT_PORT];
+		if (*num_buffers < MIN_NUM_OUTPUT_BUFFERS ||
+				*num_buffers > MAX_NUM_OUTPUT_BUFFERS)
+			*num_buffers = MIN_NUM_OUTPUT_BUFFERS;
+		/*
+		 * Increase input buffer count to 6 as for some
+		 * vp9 clips which have superframes with more
+		 * than 4 subframes requires more than 4
+		 * reference frames to decode.
+		 */
+		if (inst->fmts[OUTPUT_PORT].fourcc ==
+				V4L2_PIX_FMT_VP9 &&
+				*num_buffers < MIN_NUM_OUTPUT_BUFFERS_VP9)
+			*num_buffers = MIN_NUM_OUTPUT_BUFFERS_VP9;
+		else if (inst->fmts[OUTPUT_PORT].fourcc ==
+				V4L2_PIX_FMT_HEVC &&
+				*num_buffers < MIN_NUM_OUTPUT_BUFFERS_HEVC)
+			*num_buffers = MIN_NUM_OUTPUT_BUFFERS_HEVC;
+
+		for (i = 0; i < *num_planes; i++) {
+			sizes[i] = get_frame_size(inst,
+					&inst->fmts[OUTPUT_PORT], q->type, i);
+		}
+		rc = set_actual_buffer_count(inst, *num_buffers,
+			HAL_BUFFER_INPUT);
+		break;
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+		dprintk(VIDC_DBG, "Getting bufreqs on capture plane\n");
+		*num_planes = inst->prop.num_planes[CAPTURE_PORT];
+		rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
+		if (rc) {
+			dprintk(VIDC_ERR, "Failed to open instance\n");
+			break;
+		}
+		rc = msm_comm_try_get_bufreqs(inst);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"Failed to get buffer requirements: %d\n", rc);
+			break;
+		}
+
+		bufreq = get_buff_req_buffer(inst,
+			msm_comm_get_hal_output_buffer(inst));
+		if (!bufreq) {
+			dprintk(VIDC_ERR,
+				"No buffer requirement for buffer type %x\n",
+				HAL_BUFFER_OUTPUT);
+			rc = -EINVAL;
+			break;
+		}
+		msm_dcvs_try_enable(inst);
+
+		/* Pretend as if FW itself is asking for
+		 * additional buffers.
+		 * *num_buffers += MSM_VIDC_ADDITIONAL_BUFS_FOR_DCVS
+		 * is wrong since it will end up increasing the count
+		 * on every call to reqbufs if *num_bufs is larger
+		 * than min requirement.
+		 */
+		*num_buffers = max(*num_buffers, bufreq->buffer_count_min
+			+ msm_dcvs_get_extra_buff_count(inst));
+
+		min_buff_count = (!!(inst->flags & VIDC_THUMBNAIL)) ?
+			MIN_NUM_THUMBNAIL_MODE_CAPTURE_BUFFERS :
+				MIN_NUM_CAPTURE_BUFFERS;
+
+		*num_buffers = clamp_val(*num_buffers,
+			min_buff_count, VB2_MAX_FRAME);
+
+		dprintk(VIDC_DBG, "Set actual output buffer count: %d\n",
+				*num_buffers);
+		rc = set_actual_buffer_count(inst, *num_buffers,
+					msm_comm_get_hal_output_buffer(inst));
+		if (rc)
+			break;
+
+		if (*num_buffers != bufreq->buffer_count_actual) {
+			rc = msm_comm_try_get_bufreqs(inst);
+			if (rc) {
+				dprintk(VIDC_WARN,
+					"Failed to get buf req, %d\n", rc);
+				break;
+			}
+		}
+		dprintk(VIDC_DBG, "count =  %d, size = %d, alignment = %d\n",
+				inst->buff_req.buffer[1].buffer_count_actual,
+				inst->buff_req.buffer[1].buffer_size,
+				inst->buff_req.buffer[1].buffer_alignment);
+		sizes[0] = inst->bufq[CAPTURE_PORT].vb2_bufq.plane_sizes[0];
+
+		/*
+		 * Set actual buffer count to firmware for DPB buffers.
+		 * Firmware mandates setting of minimum buffer size
+		 * and actual buffer count for both OUTPUT and OUTPUT2.
+		 * Hence we are setting back the same buffer size
+		 * information back to firmware.
+		 */
+		if (msm_comm_get_stream_output_mode(inst) ==
+			HAL_VIDEO_DECODER_SECONDARY) {
+			bufreq = get_buff_req_buffer(inst,
+					HAL_BUFFER_OUTPUT);
+			if (!bufreq) {
+				rc = -EINVAL;
+				break;
+			}
+
+			rc = set_actual_buffer_count(inst,
+				bufreq->buffer_count_actual,
+				HAL_BUFFER_OUTPUT);
+			if (rc)
+				break;
+		}
+
+		extra_idx =	EXTRADATA_IDX(*num_planes);
+		if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
+			sizes[extra_idx] =
+				VENUS_EXTRADATA_SIZE(
+					inst->prop.height[CAPTURE_PORT],
+					inst->prop.width[CAPTURE_PORT]);
+		}
+		break;
+	default:
+		dprintk(VIDC_ERR, "Invalid q type = %d\n", q->type);
+		rc = -EINVAL;
+		break;
+	}
+exit:
+	return rc;
+}
+
+static int set_max_internal_buffers_size(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	struct msm_vidc_list *buf_list = &inst->scratchbufs;
+	enum multi_stream stream_mode;
+	struct hfi_device *hdev;
+	struct hal_buffer_requirements *output_buf;
+	u32 output_count_actual;
+
+	struct {
+		enum hal_buffer type;
+		struct hal_buffer_requirements *req;
+		size_t size;
+	} internal_buffers[] = {
+		{ HAL_BUFFER_INTERNAL_SCRATCH, NULL, 0},
+		{ HAL_BUFFER_INTERNAL_SCRATCH_1, NULL, 0},
+	};
+
+	struct hal_frame_size frame_sz;
+	int i;
+	struct v4l2_ext_controls ext_ctrls;
+	struct v4l2_ext_control controls[2];
+
+	hdev = inst->core->device;
+	mutex_lock(&buf_list->lock);
+	if (!list_empty(&buf_list->list)) {
+		dprintk(VIDC_DBG, "Scratch list already has allocated buf\n");
+		mutex_unlock(&buf_list->lock);
+		return 0;
+	}
+	mutex_unlock(&buf_list->lock);
+
+	frame_sz.buffer_type = HAL_BUFFER_INPUT;
+	frame_sz.width = inst->capability.width.max;
+	frame_sz.height =
+		(inst->capability.mbs_per_frame.max * 256) /
+		inst->capability.width.max;
+
+	dprintk(VIDC_DBG,
+		"Max buffer reqs, buffer type = %d width = %d, height = %d, max_mbs_per_frame = %d\n",
+		frame_sz.buffer_type, frame_sz.width,
+		frame_sz.height, inst->capability.mbs_per_frame.max);
+
+	stream_mode = msm_comm_get_stream_output_mode(inst);
+
+	if (stream_mode == HAL_VIDEO_DECODER_PRIMARY) {
+		output_buf = get_buff_req_buffer(inst, HAL_BUFFER_OUTPUT);
+		if (!output_buf) {
+			dprintk(VIDC_ERR,
+				"No buffer requirement for buffer type %x\n",
+				HAL_BUFFER_OUTPUT);
+			rc = -EINVAL;
+			goto alloc_fail;
+		}
+		output_count_actual = output_buf->buffer_count_actual;
+		ext_ctrls.count = 2;
+		ext_ctrls.controls = controls;
+		controls[0].id = V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_MODE;
+		controls[0].value =
+			V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_SECONDARY;
+		controls[1].id = V4L2_CID_MPEG_VIDC_VIDEO_DPB_COLOR_FORMAT;
+		controls[1].value = V4L2_MPEG_VIDC_VIDEO_DPB_COLOR_FMT_UBWC;
+		rc = try_set_ext_ctrl(inst, &ext_ctrls);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"%s Failed to move to split mode %d\n",
+				__func__, rc);
+			goto alloc_fail;
+		}
+	}
+
+	msm_comm_try_set_prop(inst, HAL_PARAM_FRAME_SIZE, &frame_sz);
+	frame_sz.buffer_type = HAL_BUFFER_OUTPUT2;
+	msm_comm_try_set_prop(inst, HAL_PARAM_FRAME_SIZE, &frame_sz);
+	rc = msm_comm_try_get_bufreqs(inst);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"%s Failed to get max buf req, %d\n", __func__, rc);
+		return 0;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(internal_buffers); i++) {
+		internal_buffers[i].req =
+			get_buff_req_buffer(inst, internal_buffers[i].type);
+		internal_buffers[i].size = internal_buffers[i].req ?
+			internal_buffers[i].req->buffer_size : 0;
+		if (internal_buffers[i].req == NULL)
+			continue;
+
+		rc = allocate_and_set_internal_bufs(inst,
+					internal_buffers[i].req,
+					&inst->scratchbufs, false);
+		if (rc)
+			goto alloc_fail;
+		dprintk(VIDC_DBG,
+			"Allocated scratch type : %d size to : %zd\n",
+			internal_buffers[i].type, internal_buffers[i].size);
+	}
+
+	if (stream_mode == HAL_VIDEO_DECODER_PRIMARY) {
+		ext_ctrls.count = 2;
+		ext_ctrls.controls = controls;
+		controls[0].id = V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_MODE;
+		controls[0].value =
+			V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_PRIMARY;
+		controls[1].id = V4L2_CID_MPEG_VIDC_VIDEO_DPB_COLOR_FORMAT;
+		controls[1].value = V4L2_MPEG_VIDC_VIDEO_DPB_COLOR_FMT_NONE;
+		rc = try_set_ext_ctrl(inst, &ext_ctrls);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"Failed to move to split mode %d\n",
+				rc);
+			goto alloc_fail;
+		}
+		rc = set_actual_buffer_count(inst, output_count_actual,
+			HAL_BUFFER_OUTPUT);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"Failed to set output buffer count(%u): %d\n",
+				output_count_actual, rc);
+			goto alloc_fail;
+		}
+	}
+
+	frame_sz.buffer_type = HAL_BUFFER_INPUT;
+	frame_sz.width = inst->prop.width[OUTPUT_PORT];
+	frame_sz.height = inst->prop.height[OUTPUT_PORT];
+
+	msm_comm_try_set_prop(inst, HAL_PARAM_FRAME_SIZE, &frame_sz);
+	rc = msm_comm_try_get_bufreqs(inst);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"%s Failed to get back old buf req, %d\n",
+			__func__, rc);
+		goto alloc_fail;
+	}
+	dprintk(VIDC_DBG,
+			"Old buffer reqs, buffer type = %d width = %d, height = %d\n",
+			frame_sz.buffer_type, frame_sz.width,
+			frame_sz.height);
+
+	return 0;
+
+alloc_fail:
+	msm_comm_release_scratch_buffers(inst, false);
+	return rc;
+}
+
+static inline int start_streaming(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	struct hfi_device *hdev;
+	bool slave_side_cp = inst->core->resources.slave_side_cp;
+	struct hal_buffer_size_minimum b;
+	unsigned int buffer_size;
+	struct msm_vidc_format *fmt = NULL;
+	bool max_internal_buf = false;
+
+	fmt = &inst->fmts[CAPTURE_PORT];
+	buffer_size = fmt->get_frame_size(0,
+		inst->prop.height[CAPTURE_PORT],
+		inst->prop.width[CAPTURE_PORT]);
+	hdev = inst->core->device;
+
+	if (msm_comm_get_stream_output_mode(inst) ==
+		HAL_VIDEO_DECODER_SECONDARY) {
+		rc = msm_vidc_check_scaling_supported(inst);
+		b.buffer_type = HAL_BUFFER_OUTPUT2;
+	} else {
+		b.buffer_type = HAL_BUFFER_OUTPUT;
+	}
+
+	b.buffer_size = buffer_size;
+	rc = call_hfi_op(hdev, session_set_property,
+		 inst->session, HAL_PARAM_BUFFER_SIZE_MINIMUM,
+		 &b);
+	if (rc) {
+		dprintk(VIDC_ERR, "H/w scaling is not in valid range\n");
+		return -EINVAL;
+	}
+	max_internal_buf = (inst->flags & VIDC_SECURE) && !slave_side_cp
+				&& (inst->session_type == MSM_VIDC_DECODER);
+	if (max_internal_buf) {
+		rc = set_max_internal_buffers_size(inst);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"Failed to set max scratch buffer size: %d\n",
+				rc);
+			goto fail_start;
+		}
+	}
+	rc = msm_comm_set_scratch_buffers(inst, max_internal_buf);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Failed to set scratch buffers: %d\n", rc);
+		goto fail_start;
+	}
+	rc = msm_comm_set_persist_buffers(inst);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Failed to set persist buffers: %d\n", rc);
+		goto fail_start;
+	}
+
+	if (msm_comm_get_stream_output_mode(inst) ==
+		HAL_VIDEO_DECODER_SECONDARY) {
+		rc = msm_comm_set_output_buffers(inst);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"Failed to set output buffers: %d\n", rc);
+			goto fail_start;
+		}
+	}
+
+	/*
+	 * For seq_changed_insufficient, driver should set session_continue
+	 * to firmware after the following sequence
+	 * - driver raises insufficient event to v4l2 client
+	 * - all output buffers have been flushed and freed
+	 * - v4l2 client queries buffer requirements and splits/combines OPB-DPB
+	 * - v4l2 client sets new set of buffers to firmware
+	 * - v4l2 client issues CONTINUE to firmware to resume decoding of
+	 *   submitted ETBs.
+	 */
+	if (inst->in_reconfig) {
+		dprintk(VIDC_DBG, "send session_continue after reconfig\n");
+		rc = call_hfi_op(hdev, session_continue,
+			(void *) inst->session);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"%s - failed to send session_continue\n",
+				__func__);
+			goto fail_start;
+		}
+	}
+	inst->in_reconfig = false;
+
+	msm_comm_scale_clocks_and_bus(inst);
+
+	rc = msm_comm_try_state(inst, MSM_VIDC_START_DONE);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Failed to move inst: %pK to start done state\n", inst);
+		goto fail_start;
+	}
+	msm_dcvs_init_load(inst);
+	if (msm_comm_get_stream_output_mode(inst) ==
+		HAL_VIDEO_DECODER_SECONDARY) {
+		rc = msm_comm_queue_output_buffers(inst);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"Failed to queue output buffers: %d\n", rc);
+			goto fail_start;
+		}
+	}
+
+fail_start:
+	return rc;
+}
+
+static inline int stop_streaming(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE);
+	if (rc)
+		dprintk(VIDC_ERR,
+			"Failed to move inst: %pK to start done state\n", inst);
+	return rc;
+}
+
+static int msm_vdec_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+	struct msm_vidc_inst *inst;
+	int rc = 0;
+	struct hfi_device *hdev;
+	struct vb2_buffer *vb;
+	struct vb2_buf_entry *temp, *next;
+	if (!q || !q->drv_priv) {
+		dprintk(VIDC_ERR, "Invalid input, q = %pK\n", q);
+		return -EINVAL;
+	}
+	inst = q->drv_priv;
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	if (inst->state == MSM_VIDC_CORE_INVALID ||
+		inst->core->state == VIDC_CORE_INVALID ||
+		inst->core->state == VIDC_CORE_UNINIT) {
+		rc = -EINVAL;
+		goto stream_start_failed;
+	}
+
+	hdev = inst->core->device;
+	dprintk(VIDC_DBG, "Streamon called on: %d capability for inst: %pK\n",
+		q->type, inst);
+	switch (q->type) {
+	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+		if (inst->bufq[CAPTURE_PORT].vb2_bufq.streaming)
+			rc = start_streaming(inst);
+		break;
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+		if (inst->bufq[OUTPUT_PORT].vb2_bufq.streaming)
+			rc = start_streaming(inst);
+		break;
+	default:
+		dprintk(VIDC_ERR, "Queue type is not supported: %d\n", q->type);
+		return -EINVAL;
+	}
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Streamon failed on: %d capability for inst: %pK\n",
+			q->type, inst);
+		goto stream_start_failed;
+	}
+
+	rc = msm_comm_qbuf(inst, NULL);
+	if (rc) {
+		dprintk(VIDC_ERR,
+				"Failed to commit buffers queued before STREAM_ON to hardware: %d\n",
+				rc);
+		goto stream_start_failed;
+	}
+
+stream_start_failed:
+	if (rc) {
+		list_for_each_entry(vb, &q->queued_list, queued_entry) {
+			if (vb->type == q->type &&
+					vb->state == VB2_BUF_STATE_ACTIVE)
+				vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED);
+		}
+		mutex_lock(&inst->pendingq.lock);
+		list_for_each_entry_safe(temp, next,
+				&inst->pendingq.list, list) {
+			if (temp->vb->type == q->type) {
+				list_del(&temp->list);
+				kfree(temp);
+			}
+		}
+		mutex_unlock(&inst->pendingq.lock);
+	}
+	return rc;
+}
+
+static void msm_vdec_stop_streaming(struct vb2_queue *q)
+{
+	struct msm_vidc_inst *inst;
+	int rc = 0;
+	if (!q || !q->drv_priv) {
+		dprintk(VIDC_ERR, "Invalid input, q = %pK\n", q);
+		return;
+	}
+
+	inst = q->drv_priv;
+	dprintk(VIDC_DBG, "Streamoff called on: %d capability\n", q->type);
+	switch (q->type) {
+	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+		if (!inst->bufq[CAPTURE_PORT].vb2_bufq.streaming)
+			rc = stop_streaming(inst);
+		break;
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+		if (!inst->bufq[OUTPUT_PORT].vb2_bufq.streaming)
+			rc = stop_streaming(inst);
+		break;
+	default:
+		dprintk(VIDC_ERR,
+			"Q-type is not supported: %d\n", q->type);
+		rc = -EINVAL;
+		break;
+	}
+
+	msm_comm_scale_clocks_and_bus(inst);
+
+	if (rc)
+		dprintk(VIDC_ERR,
+			"Failed to move inst: %pK, cap = %d to state: %d\n",
+			inst, q->type, MSM_VIDC_RELEASE_RESOURCES_DONE);
+}
+
+static void msm_vdec_buf_queue(struct vb2_buffer *vb)
+{
+	int rc = msm_comm_qbuf(vb2_get_drv_priv(vb->vb2_queue), vb);
+	if (rc)
+		dprintk(VIDC_ERR, "Failed to queue buffer: %d\n", rc);
+}
+
+static void msm_vdec_buf_cleanup(struct vb2_buffer *vb)
+{
+	int rc = 0;
+	struct buf_queue *q = NULL;
+	struct msm_vidc_inst *inst = NULL;
+
+	if (!vb) {
+		dprintk(VIDC_ERR, "%s : Invalid vb pointer %pK",
+			__func__, vb);
+		return;
+	}
+
+	inst = vb2_get_drv_priv(vb->vb2_queue);
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s : Invalid inst pointer",
+			__func__);
+		return;
+	}
+
+	q = msm_comm_get_vb2q(inst, vb->type);
+	if (!q) {
+		dprintk(VIDC_ERR,
+			"%s : Failed to find buffer queue for type = %d\n",
+				__func__, vb->type);
+		return;
+	}
+
+	if (q->vb2_bufq.streaming) {
+		dprintk(VIDC_DBG, "%d PORT is streaming\n",
+			vb->type);
+		return;
+	}
+
+	rc = msm_vidc_release_buffers(inst, vb->type);
+	if (rc)
+		dprintk(VIDC_ERR, "%s : Failed to release buffers : %d\n",
+			__func__, rc);
+}
+
+static const struct vb2_ops msm_vdec_vb2q_ops = {
+	.queue_setup = msm_vdec_queue_setup,
+	.start_streaming = msm_vdec_start_streaming,
+	.buf_queue = msm_vdec_buf_queue,
+	.buf_cleanup = msm_vdec_buf_cleanup,
+	.stop_streaming = msm_vdec_stop_streaming,
+};
+
+const struct vb2_ops *msm_vdec_get_vb2q_ops(void)
+{
+	return &msm_vdec_vb2q_ops;
+}
+
+int msm_vdec_inst_init(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	struct msm_vidc_format *fmt = NULL;
+
+	if (!inst) {
+		dprintk(VIDC_ERR, "Invalid input = %pK\n", inst);
+		return -EINVAL;
+	}
+	inst->prop.height[CAPTURE_PORT] = DEFAULT_HEIGHT;
+	inst->prop.width[CAPTURE_PORT] = DEFAULT_WIDTH;
+	inst->prop.num_planes[CAPTURE_PORT] = 2;
+
+	/* By default, initialize CAPTURE port to NV12 format */
+	fmt = msm_comm_get_pixel_fmt_fourcc(vdec_formats,
+		ARRAY_SIZE(vdec_formats), V4L2_PIX_FMT_NV12,
+			CAPTURE_PORT);
+	if (!fmt || fmt->type != CAPTURE_PORT) {
+		dprintk(VIDC_ERR,
+			"vdec_formats corrupted\n");
+		return -EINVAL;
+	}
+	memcpy(&inst->fmts[fmt->type], fmt,
+			sizeof(struct msm_vidc_format));
+
+	inst->prop.height[OUTPUT_PORT] = DEFAULT_HEIGHT;
+	inst->prop.width[OUTPUT_PORT] = DEFAULT_WIDTH;
+	inst->prop.num_planes[OUTPUT_PORT] = 1;
+
+	/* By default, initialize OUTPUT port to H264 decoder */
+	fmt = msm_comm_get_pixel_fmt_fourcc(vdec_formats,
+			ARRAY_SIZE(vdec_formats), V4L2_PIX_FMT_H264,
+				OUTPUT_PORT);
+	if (!fmt || fmt->type != OUTPUT_PORT) {
+		dprintk(VIDC_ERR,
+			"vdec_formats corrupted\n");
+		return -EINVAL;
+	}
+	memcpy(&inst->fmts[fmt->type], fmt,
+			sizeof(struct msm_vidc_format));
+
+	inst->capability.height.min = MIN_SUPPORTED_HEIGHT;
+	inst->capability.height.max = MAX_SUPPORTED_HEIGHT;
+	inst->capability.width.min = MIN_SUPPORTED_WIDTH;
+	inst->capability.width.max = MAX_SUPPORTED_WIDTH;
+	inst->capability.alloc_mode_in = HAL_BUFFER_MODE_STATIC;
+	inst->capability.alloc_mode_out = HAL_BUFFER_MODE_STATIC;
+	inst->capability.secure_output2_threshold.min = 0;
+	inst->capability.secure_output2_threshold.max = 0;
+	inst->buffer_mode_set[OUTPUT_PORT] = HAL_BUFFER_MODE_STATIC;
+	inst->buffer_mode_set[CAPTURE_PORT] = HAL_BUFFER_MODE_STATIC;
+	inst->prop.fps = DEFAULT_FPS;
+	inst->operating_rate = 0;
+	return rc;
+}
+
+static inline enum buffer_mode_type get_buf_type(int val)
+{
+	switch (val) {
+	case V4L2_MPEG_VIDC_VIDEO_STATIC:
+		return HAL_BUFFER_MODE_STATIC;
+	case V4L2_MPEG_VIDC_VIDEO_RING:
+		return HAL_BUFFER_MODE_RING;
+	case V4L2_MPEG_VIDC_VIDEO_DYNAMIC:
+		return HAL_BUFFER_MODE_DYNAMIC;
+	default:
+		dprintk(VIDC_ERR, "%s: invalid buf type: %d\n", __func__, val);
+	}
+	return 0;
+}
+
+static int try_get_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
+{
+	int rc = 0;
+	struct hfi_device *hdev;
+	union hal_get_property hprop;
+
+	if (!inst || !inst->core || !inst->core->device || !ctrl) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	hdev = inst->core->device;
+	/*
+	 * HACK: unlock the control prior to querying the hardware.  Otherwise
+	 * lower level code that attempts to do g_ctrl() will end up deadlocking
+	 * us.
+	 */
+	v4l2_ctrl_unlock(ctrl);
+
+	switch (ctrl->id) {
+	case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+	case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+	case V4L2_CID_MPEG_VIDC_VIDEO_H263_PROFILE:
+	case V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL:
+	case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_PROFILE:
+		rc = msm_comm_try_get_prop(inst,
+				HAL_PARAM_PROFILE_LEVEL_CURRENT, &hprop);
+		if (rc) {
+			dprintk(VIDC_ERR, "%s: Failed getting profile: %d",
+					__func__, rc);
+			break;
+		}
+		ctrl->val = vdec_hal_to_v4l2(ctrl->id,
+				hprop.profile_level.profile);
+		break;
+	case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+	case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+	case V4L2_CID_MPEG_VIDC_VIDEO_H263_LEVEL:
+	case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_LEVEL:
+		rc = msm_comm_try_get_prop(inst,
+				HAL_PARAM_PROFILE_LEVEL_CURRENT, &hprop);
+		if (rc) {
+			dprintk(VIDC_ERR, "%s: Failed getting level: %d",
+					__func__, rc);
+			break;
+		}
+
+		ctrl->val = vdec_hal_to_v4l2(ctrl->id,
+				hprop.profile_level.level);
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_SECURE_SCALING_THRESHOLD:
+		dprintk(VIDC_DBG, "Secure scaling threshold is: %d",
+				inst->capability.secure_output2_threshold.max);
+		ctrl->val = inst->capability.secure_output2_threshold.max;
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+		rc = msm_comm_try_get_prop(inst,
+				HAL_CONFIG_VDEC_ENTROPY, &hprop);
+		if (rc) {
+			dprintk(VIDC_ERR, "%s: Failed getting entropy type: %d",
+					__func__, rc);
+			break;
+		}
+		switch (hprop.h264_entropy) {
+		case HAL_H264_ENTROPY_CAVLC:
+			ctrl->val = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC;
+			break;
+		case HAL_H264_ENTROPY_CABAC:
+			ctrl->val = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC;
+			break;
+		case HAL_UNUSED_ENTROPY:
+			rc = -ENOTSUPP;
+			break;
+		}
+		break;
+	default:
+		/* Other controls aren't really volatile, shouldn't need to
+		 * modify ctrl->value */
+		break;
+	}
+	v4l2_ctrl_lock(ctrl);
+
+	return rc;
+}
+
+static int vdec_v4l2_to_hal(int id, int value)
+{
+	switch (id) {
+		/* H264 */
+	case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+		switch (value) {
+		case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
+			return HAL_H264_PROFILE_BASELINE;
+		case V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE:
+			return HAL_H264_PROFILE_CONSTRAINED_BASE;
+		case V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH:
+			return HAL_H264_PROFILE_CONSTRAINED_HIGH;
+		case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
+			return HAL_H264_PROFILE_MAIN;
+		case V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED:
+			return HAL_H264_PROFILE_EXTENDED;
+		case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
+			return HAL_H264_PROFILE_HIGH;
+		case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10:
+			return HAL_H264_PROFILE_HIGH10;
+		case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422:
+			return HAL_H264_PROFILE_HIGH422;
+		case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE:
+			return HAL_H264_PROFILE_HIGH444;
+		default:
+			goto unknown_value;
+		}
+	case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+		switch (value) {
+		case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
+			return HAL_H264_LEVEL_1;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
+			return HAL_H264_LEVEL_1b;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
+			return HAL_H264_LEVEL_11;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
+			return HAL_H264_LEVEL_12;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
+			return HAL_H264_LEVEL_13;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
+			return HAL_H264_LEVEL_2;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
+			return HAL_H264_LEVEL_21;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
+			return HAL_H264_LEVEL_22;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
+			return HAL_H264_LEVEL_3;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
+			return HAL_H264_LEVEL_31;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
+			return HAL_H264_LEVEL_32;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
+			return HAL_H264_LEVEL_4;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
+			return HAL_H264_LEVEL_41;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
+			return HAL_H264_LEVEL_42;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_5_0:
+			return HAL_H264_LEVEL_5;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_5_1:
+			return HAL_H264_LEVEL_51;
+		default:
+			goto unknown_value;
+		}
+	}
+unknown_value:
+	dprintk(VIDC_WARN, "Unknown control (%x, %d)\n", id, value);
+	return -EINVAL;
+}
+
+static int vdec_hal_to_v4l2(int id, int value)
+{
+	switch (id) {
+		/* H264 */
+	case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+		switch (value) {
+		case HAL_H264_PROFILE_BASELINE:
+			return V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE;
+		case HAL_H264_PROFILE_CONSTRAINED_BASE:
+			return
+			V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE;
+		case HAL_H264_PROFILE_MAIN:
+			return V4L2_MPEG_VIDEO_H264_PROFILE_MAIN;
+		case HAL_H264_PROFILE_EXTENDED:
+			return V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED;
+		case HAL_H264_PROFILE_HIGH:
+			return V4L2_MPEG_VIDEO_H264_PROFILE_HIGH;
+		case HAL_H264_PROFILE_HIGH10:
+			return V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10;
+		case HAL_H264_PROFILE_HIGH422:
+			return V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422;
+		case HAL_H264_PROFILE_HIGH444:
+			return V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE;
+		default:
+			goto unknown_value;
+		}
+	case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+		switch (value) {
+		case HAL_H264_LEVEL_1:
+			return V4L2_MPEG_VIDEO_H264_LEVEL_1_0;
+		case HAL_H264_LEVEL_1b:
+			return V4L2_MPEG_VIDEO_H264_LEVEL_1B;
+		case HAL_H264_LEVEL_11:
+			return V4L2_MPEG_VIDEO_H264_LEVEL_1_1;
+		case HAL_H264_LEVEL_12:
+			return V4L2_MPEG_VIDEO_H264_LEVEL_1_2;
+		case HAL_H264_LEVEL_13:
+			return V4L2_MPEG_VIDEO_H264_LEVEL_1_3;
+		case HAL_H264_LEVEL_2:
+			return V4L2_MPEG_VIDEO_H264_LEVEL_2_0;
+		case HAL_H264_LEVEL_21:
+			return V4L2_MPEG_VIDEO_H264_LEVEL_2_1;
+		case HAL_H264_LEVEL_22:
+			return V4L2_MPEG_VIDEO_H264_LEVEL_2_2;
+		case HAL_H264_LEVEL_3:
+			return V4L2_MPEG_VIDEO_H264_LEVEL_3_0;
+		case HAL_H264_LEVEL_31:
+			return V4L2_MPEG_VIDEO_H264_LEVEL_3_1;
+		case HAL_H264_LEVEL_32:
+			return V4L2_MPEG_VIDEO_H264_LEVEL_3_2;
+		case HAL_H264_LEVEL_4:
+			return V4L2_MPEG_VIDEO_H264_LEVEL_4_0;
+		case HAL_H264_LEVEL_41:
+			return V4L2_MPEG_VIDEO_H264_LEVEL_4_1;
+		case HAL_H264_LEVEL_42:
+			return V4L2_MPEG_VIDEO_H264_LEVEL_4_2;
+		case HAL_H264_LEVEL_5:
+			return V4L2_MPEG_VIDEO_H264_LEVEL_5_0;
+		case HAL_H264_LEVEL_51:
+			return V4L2_MPEG_VIDEO_H264_LEVEL_5_1;
+		default:
+			goto unknown_value;
+		}
+	case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+	case V4L2_CID_MPEG_VIDC_VIDEO_H263_PROFILE:
+	case V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL:
+	case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_PROFILE:
+	case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+	case V4L2_CID_MPEG_VIDC_VIDEO_H263_LEVEL:
+	case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_LEVEL:
+		/*
+		 * Extremely dirty hack: we haven't implemented g_ctrl of
+		 * any of these controls and have no intention of doing
+		 * so in the near future.  So just return 0 so that we
+		 * don't see the annoying "Unknown control" errors at the
+		 * bottom of this function.
+		 */
+		return 0;
+	}
+
+unknown_value:
+	dprintk(VIDC_WARN, "Unknown control (%x, %d)\n", id, value);
+	return -EINVAL;
+}
+
+static struct v4l2_ctrl *get_ctrl_from_cluster(int id,
+		struct v4l2_ctrl **cluster, int ncontrols)
+{
+	int c;
+	for (c = 0; c < ncontrols; ++c)
+		if (cluster[c]->id == id)
+			return cluster[c];
+	return NULL;
+}
+
+static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
+{
+	int rc = 0;
+	struct hal_nal_stream_format_supported stream_format;
+	struct hal_enable_picture enable_picture;
+	struct hal_enable hal_property;
+	enum hal_property property_id = 0;
+	enum hal_video_codec codec;
+	u32 property_val = 0;
+	void *pdata = NULL;
+	struct hfi_device *hdev;
+	struct hal_extradata_enable extra;
+	struct hal_buffer_alloc_mode alloc_mode;
+	struct hal_multi_stream multi_stream;
+	struct hal_scs_threshold scs_threshold;
+	struct hal_mvc_buffer_layout layout;
+	struct v4l2_ctrl *temp_ctrl = NULL;
+	struct hal_profile_level profile_level;
+	struct hal_frame_size frame_sz;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+	hdev = inst->core->device;
+
+	rc = is_ctrl_valid_for_codec(inst, ctrl);
+	if (rc)
+		return rc;
+
+	/* Small helper macro for quickly getting a control and err checking */
+#define TRY_GET_CTRL(__ctrl_id) ({ \
+		struct v4l2_ctrl *__temp; \
+		__temp = get_ctrl_from_cluster( \
+			__ctrl_id, \
+			ctrl->cluster, ctrl->ncontrols); \
+		if (!__temp) { \
+			dprintk(VIDC_ERR, "Can't find %s (%x) in cluster\n", \
+				#__ctrl_id, __ctrl_id); \
+			/* Clusters are hardcoded, if we can't find */ \
+			/* something then things are massively screwed up */ \
+			BUG_ON(1); \
+		} \
+		__temp; \
+	})
+
+	v4l2_ctrl_unlock(ctrl);
+
+	switch (ctrl->id) {
+	case V4L2_CID_MPEG_VIDC_VIDEO_STREAM_FORMAT:
+		property_id = HAL_PARAM_NAL_STREAM_FORMAT_SELECT;
+		stream_format.nal_stream_format_supported = BIT(ctrl->val);
+		pdata = &stream_format;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_OUTPUT_ORDER:
+		property_id = HAL_PARAM_VDEC_OUTPUT_ORDER;
+		property_val = ctrl->val;
+		pdata = &property_val;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_PICTYPE_DEC_MODE:
+		property_id = HAL_PARAM_VDEC_PICTURE_TYPE_DECODE;
+		if (ctrl->val ==
+			V4L2_MPEG_VIDC_VIDEO_PICTYPE_DECODE_ON) {
+			enable_picture.picture_type = HAL_PICTURE_I;
+		} else {
+			codec = get_hal_codec(inst->fmts[OUTPUT_PORT].fourcc);
+			if (codec == HAL_VIDEO_CODEC_H264) {
+				enable_picture.picture_type = HAL_PICTURE_I |
+					HAL_PICTURE_P | HAL_PICTURE_B |
+					HAL_PICTURE_IDR;
+			} else if (codec == HAL_VIDEO_CODEC_HEVC) {
+				enable_picture.picture_type = HAL_PICTURE_I |
+					HAL_PICTURE_P | HAL_PICTURE_B |
+					HAL_PICTURE_IDR | HAL_PICTURE_CRA;
+			} else {
+				enable_picture.picture_type = HAL_PICTURE_I |
+					HAL_PICTURE_P | HAL_PICTURE_B;
+			}
+		}
+		pdata = &enable_picture;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_KEEP_ASPECT_RATIO:
+		property_id = HAL_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO;
+		hal_property.enable = ctrl->val;
+		pdata = &hal_property;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_POST_LOOP_DEBLOCKER_MODE:
+		property_id = HAL_CONFIG_VDEC_POST_LOOP_DEBLOCKER;
+		hal_property.enable = ctrl->val;
+		pdata = &hal_property;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_DIVX_FORMAT:
+		property_id = HAL_PARAM_DIVX_FORMAT;
+		property_val = ctrl->val;
+		pdata = &property_val;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_MB_ERROR_MAP_REPORTING:
+		property_id = HAL_CONFIG_VDEC_MB_ERROR_MAP_REPORTING;
+		hal_property.enable = ctrl->val;
+		pdata = &hal_property;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_CONTINUE_DATA_TRANSFER:
+		property_id = HAL_PARAM_VDEC_CONTINUE_DATA_TRANSFER;
+		hal_property.enable = ctrl->val;
+		pdata = &hal_property;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_SYNC_FRAME_DECODE:
+		switch (ctrl->val) {
+		case V4L2_MPEG_VIDC_VIDEO_SYNC_FRAME_DECODE_DISABLE:
+			inst->flags &= ~VIDC_THUMBNAIL;
+			break;
+		case V4L2_MPEG_VIDC_VIDEO_SYNC_FRAME_DECODE_ENABLE:
+			inst->flags |= VIDC_THUMBNAIL;
+			break;
+		}
+
+		property_id = HAL_PARAM_VDEC_SYNC_FRAME_DECODE;
+		hal_property.enable = ctrl->val;
+		pdata = &hal_property;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_SECURE:
+		inst->flags |= VIDC_SECURE;
+		dprintk(VIDC_DBG, "Setting secure mode to: %d\n",
+				!!(inst->flags & VIDC_SECURE));
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA:
+		property_id = HAL_PARAM_INDEX_EXTRADATA;
+		extra.index = msm_comm_get_hal_extradata_index(ctrl->val);
+		extra.enable = 1;
+		pdata = &extra;
+		break;
+	case V4L2_CID_MPEG_VIDC_SET_PERF_LEVEL:
+		switch (ctrl->val) {
+		case V4L2_CID_MPEG_VIDC_PERF_LEVEL_NOMINAL:
+			inst->flags &= ~VIDC_TURBO;
+			break;
+		case V4L2_CID_MPEG_VIDC_PERF_LEVEL_TURBO:
+			inst->flags |= VIDC_TURBO;
+			break;
+		default:
+			dprintk(VIDC_ERR, "Perf mode %x not supported\n",
+					ctrl->val);
+			rc = -ENOTSUPP;
+			break;
+		}
+
+		msm_dcvs_try_enable(inst);
+		msm_comm_scale_clocks_and_bus(inst);
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_ALLOC_MODE_INPUT:
+		if (ctrl->val == V4L2_MPEG_VIDC_VIDEO_DYNAMIC) {
+			rc = -ENOTSUPP;
+			break;
+		}
+		property_id = HAL_PARAM_BUFFER_ALLOC_MODE;
+		alloc_mode.buffer_mode = get_buf_type(ctrl->val);
+		alloc_mode.buffer_type = HAL_BUFFER_INPUT;
+		inst->buffer_mode_set[OUTPUT_PORT] = alloc_mode.buffer_mode;
+		pdata = &alloc_mode;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_FRAME_ASSEMBLY:
+		property_id = HAL_PARAM_VDEC_FRAME_ASSEMBLY;
+		hal_property.enable = ctrl->val;
+		pdata = &hal_property;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_ALLOC_MODE_OUTPUT:
+		property_id = HAL_PARAM_BUFFER_ALLOC_MODE;
+		alloc_mode.buffer_mode = get_buf_type(ctrl->val);
+
+		if (!(alloc_mode.buffer_mode &
+			inst->capability.alloc_mode_out)) {
+			dprintk(VIDC_WARN,
+				"buffer mode[%d] not supported for capture port[0x%x]\n",
+				ctrl->val, inst->capability.alloc_mode_out);
+			rc = -ENOTSUPP;
+			break;
+		}
+
+		alloc_mode.buffer_type = HAL_BUFFER_OUTPUT;
+		pdata = &alloc_mode;
+		inst->buffer_mode_set[CAPTURE_PORT] = alloc_mode.buffer_mode;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_MODE:
+		if (ctrl->val && !(inst->capability.pixelprocess_capabilities &
+				HAL_VIDEO_DECODER_MULTI_STREAM_CAPABILITY)) {
+			dprintk(VIDC_ERR, "Downscaling not supported: %#x\n",
+				ctrl->id);
+			rc = -ENOTSUPP;
+			break;
+		}
+		switch (ctrl->val) {
+		case V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_PRIMARY:
+			multi_stream.buffer_type = HAL_BUFFER_OUTPUT;
+			multi_stream.enable = true;
+			pdata = &multi_stream;
+			rc = call_hfi_op(hdev, session_set_property, (void *)
+				inst->session, HAL_PARAM_VDEC_MULTI_STREAM,
+				pdata);
+			if (rc) {
+				dprintk(VIDC_ERR,
+					"Failed : Enabling OUTPUT port : %d\n",
+					rc);
+				break;
+			}
+			multi_stream.buffer_type = HAL_BUFFER_OUTPUT2;
+			multi_stream.enable = false;
+			pdata = &multi_stream;
+			rc = call_hfi_op(hdev, session_set_property, (void *)
+				inst->session, HAL_PARAM_VDEC_MULTI_STREAM,
+				pdata);
+			if (rc)
+				dprintk(VIDC_ERR,
+					"Failed:Disabling OUTPUT2 port : %d\n",
+					rc);
+			break;
+		case V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_SECONDARY:
+			multi_stream.buffer_type = HAL_BUFFER_OUTPUT2;
+			multi_stream.enable = true;
+			pdata = &multi_stream;
+			rc = call_hfi_op(hdev, session_set_property, (void *)
+				inst->session, HAL_PARAM_VDEC_MULTI_STREAM,
+				pdata);
+			if (rc) {
+				dprintk(VIDC_ERR,
+					"Failed :Enabling OUTPUT2 port : %d\n",
+					rc);
+					break;
+			}
+			multi_stream.buffer_type = HAL_BUFFER_OUTPUT;
+			multi_stream.enable = false;
+			pdata = &multi_stream;
+			rc = call_hfi_op(hdev, session_set_property, (void *)
+				inst->session, HAL_PARAM_VDEC_MULTI_STREAM,
+				pdata);
+			if (rc) {
+				dprintk(VIDC_ERR,
+					"Failed disabling OUTPUT port : %d\n",
+					rc);
+				break;
+			}
+
+			frame_sz.buffer_type = HAL_BUFFER_OUTPUT2;
+			frame_sz.width = inst->prop.width[CAPTURE_PORT];
+			frame_sz.height = inst->prop.height[CAPTURE_PORT];
+			pdata = &frame_sz;
+			dprintk(VIDC_DBG,
+				"buffer type = %d width = %d, height = %d\n",
+				frame_sz.buffer_type, frame_sz.width,
+				frame_sz.height);
+			rc = call_hfi_op(hdev, session_set_property, (void *)
+				inst->session, HAL_PARAM_FRAME_SIZE, pdata);
+			if (rc)
+				dprintk(VIDC_ERR,
+					"Failed setting OUTPUT2 size : %d\n",
+					rc);
+
+			alloc_mode.buffer_mode =
+				inst->buffer_mode_set[CAPTURE_PORT];
+			alloc_mode.buffer_type = HAL_BUFFER_OUTPUT2;
+			rc = call_hfi_op(hdev, session_set_property,
+				inst->session, HAL_PARAM_BUFFER_ALLOC_MODE,
+				&alloc_mode);
+			if (rc)
+				dprintk(VIDC_ERR,
+					"Failed to set alloc_mode on OUTPUT2: %d\n",
+					rc);
+			break;
+		default:
+			dprintk(VIDC_ERR,
+				"Failed : Unsupported multi stream setting\n");
+			rc = -ENOTSUPP;
+			break;
+		}
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_SCS_THRESHOLD:
+		property_id = HAL_PARAM_VDEC_SCS_THRESHOLD;
+		scs_threshold.threshold_value = ctrl->val;
+		pdata = &scs_threshold;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_MVC_BUFFER_LAYOUT:
+		property_id = HAL_PARAM_MVC_BUFFER_LAYOUT;
+		layout.layout_type = msm_comm_get_hal_buffer_layout(ctrl->val);
+		layout.bright_view_first = 0;
+		layout.ngap = 0;
+		pdata = &layout;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR:
+		property_id = HAL_PARAM_VDEC_CONCEAL_COLOR;
+		property_val = ctrl->val;
+		pdata = &property_val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H264_LEVEL);
+		property_id =
+			HAL_PARAM_PROFILE_LEVEL_CURRENT;
+		profile_level.profile = vdec_v4l2_to_hal(ctrl->id,
+				ctrl->val);
+		profile_level.level = vdec_v4l2_to_hal(
+				V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+				temp_ctrl->val);
+		pdata = &profile_level;
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H264_PROFILE);
+		property_id =
+			HAL_PARAM_PROFILE_LEVEL_CURRENT;
+		profile_level.level = vdec_v4l2_to_hal(ctrl->id,
+				ctrl->val);
+		profile_level.profile = vdec_v4l2_to_hal(
+				V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+				temp_ctrl->val);
+		pdata = &profile_level;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_BUFFER_SIZE_LIMIT:
+		dprintk(VIDC_DBG,
+			"Limiting input buffer size from %u to %u\n",
+			inst->buffer_size_limit, ctrl->val);
+		inst->buffer_size_limit = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_NON_SECURE_OUTPUT2:
+		property_id = HAL_PARAM_VDEC_NON_SECURE_OUTPUT2;
+		hal_property.enable = ctrl->val;
+		dprintk(VIDC_DBG, "%s non_secure output2\n",
+			ctrl->val ? "Enabling" : "Disabling");
+		pdata = &hal_property;
+		break;
+	case V4L2_CID_VIDC_QBUF_MODE:
+		property_id = HAL_PARAM_SYNC_BASED_INTERRUPT;
+		hal_property.enable = ctrl->val == V4L2_VIDC_QBUF_BATCHED;
+		pdata = &hal_property;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_PRIORITY:
+		property_id = HAL_CONFIG_REALTIME;
+		/* firmware has inverted values for realtime and
+		 * non-realtime priority
+		 */
+		hal_property.enable = !(ctrl->val);
+		pdata = &hal_property;
+		switch (ctrl->val) {
+		case V4L2_MPEG_VIDC_VIDEO_PRIORITY_REALTIME_DISABLE:
+			inst->flags &= ~VIDC_REALTIME;
+			break;
+		case V4L2_MPEG_VIDC_VIDEO_PRIORITY_REALTIME_ENABLE:
+			inst->flags |= VIDC_REALTIME;
+			break;
+		default:
+			dprintk(VIDC_WARN,
+				"inst(%pK) invalid priority ctrl value %#x\n",
+				inst, ctrl->val);
+			break;
+		}
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE:
+		dprintk(VIDC_DBG,
+			"inst(%pK) operating rate changed from %d to %d\n",
+			inst, inst->operating_rate >> 16, ctrl->val >> 16);
+		inst->operating_rate = ctrl->val;
+		break;
+	default:
+		break;
+	}
+
+	v4l2_ctrl_lock(ctrl);
+#undef TRY_GET_CTRL
+
+	if (!rc && property_id) {
+		dprintk(VIDC_DBG,
+			"Control: HAL property=%#x,ctrl: id=%#x,value=%#x\n",
+			property_id, ctrl->id, ctrl->val);
+		rc = call_hfi_op(hdev, session_set_property, (void *)
+				inst->session, property_id, pdata);
+	}
+
+	return rc;
+}
+
+static int try_set_ext_ctrl(struct msm_vidc_inst *inst,
+	struct v4l2_ext_controls *ctrl)
+{
+	int rc = 0, i = 0, fourcc = 0;
+	struct v4l2_ext_control *ext_control;
+	struct v4l2_control control;
+	u32 old_mode = 0;
+	bool mode_changed = false;
+	enum mode {
+		PRIMARY = V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_PRIMARY,
+		SECONDARY = V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_SECONDARY
+	};
+
+	if (!inst || !inst->core || !ctrl) {
+		dprintk(VIDC_ERR,
+			"%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	ext_control = ctrl->controls;
+	control.id = V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_MODE;
+	old_mode = msm_comm_g_ctrl_for_id(inst, control.id);
+
+	for (i = 0; i < ctrl->count; i++) {
+		switch (ext_control[i].id) {
+		case V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_MODE:
+			control.value = ext_control[i].value;
+			rc = msm_comm_s_ctrl(inst, &control);
+			if (rc)
+				dprintk(VIDC_ERR,
+					"%s Failed setting stream output mode : %d\n",
+					__func__, rc);
+
+			if (old_mode == SECONDARY && control.value == PRIMARY)
+				mode_changed = true;
+			break;
+		case V4L2_CID_MPEG_VIDC_VIDEO_DPB_COLOR_FORMAT:
+			switch (ext_control[i].value) {
+			case V4L2_MPEG_VIDC_VIDEO_DPB_COLOR_FMT_NONE:
+				if (!msm_comm_g_ctrl_for_id(inst, control.id)) {
+					rc = msm_comm_release_output_buffers(
+						inst);
+					if (rc)
+						dprintk(VIDC_ERR,
+							"%s Release output buffers failed\n",
+							__func__);
+				}
+				/* Update buffer reqmt for split to comb mode */
+				if (mode_changed) {
+					fourcc =
+						inst->fmts[CAPTURE_PORT].fourcc;
+					msm_comm_set_color_format(inst,
+						HAL_BUFFER_OUTPUT, fourcc);
+					if (rc) {
+						dprintk(VIDC_ERR,
+							"%s Failed setting output color format : %d\n",
+							__func__, rc);
+						break;
+					}
+					rc = msm_comm_try_get_bufreqs(inst);
+					if (rc)
+						dprintk(VIDC_ERR,
+							"%s Failed to get buffer requirements : %d\n",
+							__func__, rc);
+				}
+				break;
+			case V4L2_MPEG_VIDC_VIDEO_DPB_COLOR_FMT_UBWC:
+			case V4L2_MPEG_VIDC_VIDEO_DPB_COLOR_FMT_TP10_UBWC:
+				if (ext_control[i].value ==
+					V4L2_MPEG_VIDC_VIDEO_DPB_COLOR_FMT_UBWC)
+					fourcc = V4L2_PIX_FMT_NV12_UBWC;
+				else
+					fourcc = V4L2_PIX_FMT_NV12_TP10_UBWC;
+				if (msm_comm_g_ctrl_for_id(inst, control.id)) {
+					rc = msm_comm_set_color_format(inst,
+						HAL_BUFFER_OUTPUT, fourcc);
+					if (rc) {
+						dprintk(VIDC_ERR,
+							"%s Failed setting output color format : %d\n",
+							__func__, rc);
+						break;
+					}
+					rc = msm_comm_try_get_bufreqs(inst);
+					if (rc)
+						dprintk(VIDC_ERR,
+							"%s Failed to get buffer requirements : %d\n",
+							__func__, rc);
+				}
+				break;
+			default:
+				dprintk(VIDC_ERR,
+					"%s Unsupported output color format\n",
+					__func__);
+				rc = -ENOTSUPP;
+				break;
+			}
+			break;
+		default:
+			dprintk(VIDC_ERR
+				, "%s Unsupported set control %d",
+				__func__, ext_control[i].id);
+			rc = -ENOTSUPP;
+			break;
+		}
+	}
+
+	return rc;
+}
+
+static int msm_vdec_op_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+	int rc = 0, c = 0;
+	struct msm_vidc_inst *inst = container_of(ctrl->handler,
+				struct msm_vidc_inst, ctrl_handler);
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+	rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Failed to move inst: %pK to start done state\n", inst);
+		goto failed_open_done;
+	}
+
+	for (c = 0; c < ctrl->ncontrols; ++c) {
+		if (ctrl->cluster[c]->is_new) {
+			rc = try_set_ctrl(inst, ctrl->cluster[c]);
+			if (rc) {
+				dprintk(VIDC_ERR, "Failed setting %x\n",
+						ctrl->cluster[c]->id);
+				break;
+			}
+		}
+	}
+
+failed_open_done:
+	return rc;
+}
+
+static int msm_vdec_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+	int rc = 0, c = 0;
+	struct msm_vidc_inst *inst = container_of(ctrl->handler,
+				struct msm_vidc_inst, ctrl_handler);
+	struct v4l2_ctrl *master = ctrl->cluster[0];
+
+	rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Failed to move inst: %pK to start done state\n", inst);
+		goto failed_open_done;
+	}
+	for (c = 0; c < master->ncontrols; ++c) {
+		int d = 0;
+		for (d = 0; d < NUM_CTRLS; ++d) {
+			if (master->cluster[c]->id == inst->ctrls[d]->id &&
+				inst->ctrls[d]->flags &
+				V4L2_CTRL_FLAG_VOLATILE) {
+				rc = try_get_ctrl(inst, master->cluster[c]);
+				if (rc) {
+					dprintk(VIDC_ERR, "Failed getting %x\n",
+							master->cluster[c]->id);
+					return rc;
+				}
+				break;
+			}
+		}
+	}
+	return rc;
+
+failed_open_done:
+	if (rc)
+		dprintk(VIDC_ERR, "Failed to get hal property\n");
+	return rc;
+}
+
+static const struct v4l2_ctrl_ops msm_vdec_ctrl_ops = {
+
+	.s_ctrl = msm_vdec_op_s_ctrl,
+	.g_volatile_ctrl = msm_vdec_op_g_volatile_ctrl,
+};
+
+const struct v4l2_ctrl_ops *msm_vdec_get_ctrl_ops(void)
+{
+	return &msm_vdec_ctrl_ops;
+}
+
+int msm_vdec_s_ext_ctrl(struct msm_vidc_inst *inst,
+	struct v4l2_ext_controls *ctrl)
+{
+	int rc = 0;
+	if (ctrl->ctrl_class != V4L2_CTRL_CLASS_MPEG) {
+		dprintk(VIDC_ERR, "Invalid Class set for extended control\n");
+		return -EINVAL;
+	}
+
+	rc = try_set_ext_ctrl(inst, ctrl);
+	if (rc) {
+		dprintk(VIDC_ERR, "Error setting extended control\n");
+		return rc;
+	}
+	return rc;
+}
+
+int msm_vdec_ctrl_init(struct msm_vidc_inst *inst)
+{
+	return msm_comm_ctrl_init(inst, msm_vdec_ctrls,
+		ARRAY_SIZE(msm_vdec_ctrls), &msm_vdec_ctrl_ops);
+}
+
+void msm_vdec_g_ctrl(struct msm_vidc_ctrl **ctrls, int *num_ctrls)
+{
+	*ctrls = msm_vdec_ctrls;
+	*num_ctrls = NUM_CTRLS;
+}
+
+static int msm_vdec_ctrl_cmp(const void *st1, const void *st2)
+{
+	return (int32_t)((struct msm_vidc_ctrl *)st1)->id -
+		(int32_t)((struct msm_vidc_ctrl *)st2)->id;
+}
+
+void msm_vdec_ctrl_sort(void)
+{
+	sort(msm_vdec_ctrls, NUM_CTRLS, sizeof(struct msm_vidc_ctrl),
+		msm_vdec_ctrl_cmp, NULL);
+}
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_vdec.h linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_vdec.h
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_vdec.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_vdec.h	2019-01-22 16:16:24.463255100 +0100
@@ -0,0 +1,40 @@
+/* Copyright (c) 2012, 2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _MSM_VDEC_H_
+#define _MSM_VDEC_H_
+
+#include <media/msm_vidc.h>
+#include "msm_vidc_internal.h"
+
+int msm_vdec_inst_init(struct msm_vidc_inst *inst);
+int msm_vdec_ctrl_init(struct msm_vidc_inst *inst);
+int msm_vdec_querycap(struct msm_vidc_inst *inst, struct v4l2_capability *cap);
+int msm_vdec_enum_fmt(struct msm_vidc_inst *inst, struct v4l2_fmtdesc *f);
+int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f);
+int msm_vdec_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f);
+int msm_vdec_s_ext_ctrl(struct msm_vidc_inst *inst,
+		struct v4l2_ext_controls *a);
+int msm_vdec_reqbufs(struct msm_vidc_inst *inst, struct v4l2_requestbuffers *b);
+int msm_vdec_prepare_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b);
+int msm_vdec_release_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b);
+int msm_vdec_qbuf(struct msm_vidc_inst *inst, struct v4l2_buffer *b);
+int msm_vdec_dqbuf(struct msm_vidc_inst *inst, struct v4l2_buffer *b);
+int msm_vdec_streamon(struct msm_vidc_inst *inst, enum v4l2_buf_type i);
+int msm_vdec_streamoff(struct msm_vidc_inst *inst, enum v4l2_buf_type i);
+int msm_vdec_cmd(struct msm_vidc_inst *inst, struct v4l2_decoder_cmd *dec);
+int msm_vdec_s_parm(struct msm_vidc_inst *inst, struct v4l2_streamparm *a);
+const struct vb2_ops *msm_vdec_get_vb2q_ops(void);
+void msm_vdec_g_ctrl(struct msm_vidc_ctrl **ctrls, int *num_ctrls);
+void msm_vdec_ctrl_sort(void);
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_venc.c linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_venc.c
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_venc.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_venc.c	2019-10-29 09:26:23.957206251 +0100
@@ -0,0 +1,4693 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/slab.h>
+#include "msm_vidc_internal.h"
+#include "msm_vidc_common.h"
+#include "vidc_hfi_api.h"
+#include "msm_vidc_debug.h"
+#include "msm_vidc_dcvs.h"
+
+#define MSM_VENC_DVC_NAME "msm_venc_8974"
+#define MIN_NUM_OUTPUT_BUFFERS 4
+#define MIN_NUM_CAPTURE_BUFFERS 4
+#define MIN_BIT_RATE 32000
+#define MAX_BIT_RATE 300000000
+#define DEFAULT_BIT_RATE 64000
+#define BIT_RATE_STEP 100
+#define DEFAULT_FRAME_RATE 15
+#define MAX_OPERATING_FRAME_RATE (300 << 16)
+#define OPERATING_FRAME_RATE_STEP (1 << 16)
+#define MAX_SLICE_BYTE_SIZE ((MAX_BIT_RATE)>>3)
+#define MIN_SLICE_BYTE_SIZE 512
+#define MAX_SLICE_MB_SIZE ((4096 * 2304) >> 8)
+#define I_FRAME_QP 26
+#define P_FRAME_QP 28
+#define B_FRAME_QP 30
+#define MAX_INTRA_REFRESH_MBS ((4096 * 2304) >> 8)
+#define MAX_NUM_B_FRAMES 4
+#define MAX_LTR_FRAME_COUNT 10
+#define MAX_HYBRID_HIER_P_LAYERS 6
+
+#define L_MODE V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY
+#define CODING V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY
+#define BITSTREAM_RESTRICT_ENABLED \
+	V4L2_MPEG_VIDC_VIDEO_H264_VUI_BITSTREAM_RESTRICT_ENABLED
+#define BITSTREAM_RESTRICT_DISABLED \
+	V4L2_MPEG_VIDC_VIDEO_H264_VUI_BITSTREAM_RESTRICT_DISABLED
+#define MIN_TIME_RESOLUTION 1
+#define MAX_TIME_RESOLUTION 0xFFFFFF
+#define DEFAULT_TIME_RESOLUTION 0x7530
+
+/*
+ * Default 601 to 709 conversion coefficients for resolution: 176x144 negative
+ * coeffs are converted to s4.9 format (e.g. -22 converted to ((1 << 13) - 22)
+ * 3x3 transformation matrix coefficients in s4.9 fixed point format
+ */
+static u32 vpe_csc_601_to_709_matrix_coeff[HAL_MAX_MATRIX_COEFFS] = {
+	440, 8140, 8098, 0, 460, 52, 0, 34, 463
+};
+
+/* offset coefficients in s9 fixed point format */
+static u32 vpe_csc_601_to_709_bias_coeff[HAL_MAX_BIAS_COEFFS] = {
+	53, 0, 4
+};
+
+/* clamping value for Y/U/V([min,max] for Y/U/V) */
+static u32 vpe_csc_601_to_709_limit_coeff[HAL_MAX_LIMIT_COEFFS] = {
+	16, 235, 16, 240, 16, 240
+};
+
+static const char *const mpeg_video_rate_control[] = {
+	"No Rate Control",
+	"VBR VFR",
+	"VBR CFR",
+	"CBR VFR",
+	"CBR CFR",
+	"MBR CFR",
+	"MBR VFR",
+	NULL
+};
+
+static const char *const mpeg_video_rotation[] = {
+	"No Rotation",
+	"90 Degree Rotation",
+	"180 Degree Rotation",
+	"270 Degree Rotation",
+	NULL
+};
+
+static const char *const h264_video_entropy_cabac_model[] = {
+	"Model 0",
+	"Model 1",
+	"Model 2",
+	NULL
+};
+
+static const char *const h263_level[] = {
+	"1.0",
+	"2.0",
+	"3.0",
+	"4.0",
+	"4.5",
+	"5.0",
+	"6.0",
+	"7.0",
+};
+
+static const char *const h263_profile[] = {
+	"Baseline",
+	"H320 Coding",
+	"Backward Compatible",
+	"ISWV2",
+	"ISWV3",
+	"High Compression",
+	"Internet",
+	"Interlace",
+	"High Latency",
+};
+
+static const char *const hevc_tier_level[] = {
+	"Main Tier Level 1",
+	"Main Tier Level 2",
+	"Main Tier Level 2.1",
+	"Main Tier Level 3",
+	"Main Tier Level 3.1",
+	"Main Tier Level 4",
+	"Main Tier Level 4.1",
+	"Main Tier Level 5",
+	"Main Tier Level 5.1",
+	"Main Tier Level 5.2",
+	"Main Tier Level 6",
+	"Main Tier Level 6.1",
+	"Main Tier Level 6.2",
+	"High Tier Level 1",
+	"High Tier Level 2",
+	"High Tier Level 2.1",
+	"High Tier Level 3",
+	"High Tier Level 3.1",
+	"High Tier Level 4",
+	"High Tier Level 4.1",
+	"High Tier Level 5",
+	"High Tier Level 5.1",
+	"High Tier Level 5.2",
+	"High Tier Level 6",
+	"High Tier Level 6.1",
+	"High Tier Level 6.2",
+};
+
+static const char *const hevc_profile[] = {
+	"Main",
+	"Main10",
+	"Main Still Pic",
+};
+
+static const char *const vp8_profile_level[] = {
+	"Unused",
+	"0.0",
+	"1.0",
+	"2.0",
+	"3.0",
+};
+
+static const char *const perf_level[] = {
+	"Nominal",
+	"Performance",
+	"Turbo"
+};
+
+static const char *const mbi_statistics[] = {
+	"Camcorder Default",
+	"Mode 1",
+	"Mode 2",
+	"Mode 3"
+};
+
+static const char *const intra_refresh_modes[] = {
+	"None",
+	"Cyclic",
+	"Adaptive",
+	"Cyclic Adaptive",
+	"Random"
+};
+
+static const char *const timestamp_mode[] = {
+	"Honor",
+	"Ignore",
+};
+
+static const char *const iframe_sizes[] = {
+	"Default",
+	"Medium",
+	"Huge",
+	"Unlimited"
+};
+
+static struct msm_vidc_ctrl msm_venc_ctrls[] = {
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_IDR_PERIOD,
+		.name = "IDR Period",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 1,
+		.maximum = INT_MAX,
+		.default_value = DEFAULT_FRAME_RATE,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_NUM_P_FRAMES,
+		.name = "Intra Period for P frames",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = INT_MAX,
+		.default_value = 2*DEFAULT_FRAME_RATE-1,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES,
+		.name = "Intra Period for B frames",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = INT_MAX,
+		.default_value = 0,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_REQUEST_IFRAME,
+		.name = "Request I Frame",
+		.type = V4L2_CTRL_TYPE_BUTTON,
+		.minimum = 0,
+		.maximum = 0,
+		.default_value = 0,
+		.step = 0,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL,
+		.name = "Video Framerate and Bitrate Control",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_OFF,
+		.maximum = V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_MBR_VFR,
+		.default_value = V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_OFF,
+		.step = 0,
+		.menu_skip_mask = ~(
+		(1 << V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_OFF) |
+		(1 << V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_VBR_VFR) |
+		(1 << V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_VBR_CFR) |
+		(1 << V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_VFR) |
+		(1 << V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_CFR) |
+		(1 << V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_MBR_CFR) |
+		(1 << V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_MBR_VFR)
+		),
+		.qmenu = mpeg_video_rate_control,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
+		.name = "Bitrate Control",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDEO_BITRATE_MODE_VBR,
+		.maximum = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
+		.default_value = V4L2_MPEG_VIDEO_BITRATE_MODE_VBR,
+		.step = 0,
+		.menu_skip_mask = ~(
+		(1 << V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) |
+		(1 << V4L2_MPEG_VIDEO_BITRATE_MODE_CBR)
+		),
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_BITRATE,
+		.name = "Bit Rate",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = MIN_BIT_RATE,
+		.maximum = MAX_BIT_RATE,
+		.default_value = DEFAULT_BIT_RATE,
+		.step = BIT_RATE_STEP,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_BITRATE_PEAK,
+		.name = "Peak Bit Rate",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = MIN_BIT_RATE,
+		.maximum = MAX_BIT_RATE,
+		.default_value = DEFAULT_BIT_RATE,
+		.step = BIT_RATE_STEP,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE,
+		.name = "Entropy Mode",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC,
+		.maximum = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC,
+		.default_value = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC,
+		.menu_skip_mask = ~(
+		(1 << V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC) |
+		(1 << V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC)
+		),
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL,
+		.name = "CABAC Model",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL_0,
+		.maximum = V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL_1,
+		.default_value = V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL_0,
+		.menu_skip_mask = ~(
+		(1 << V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL_0) |
+		(1 << V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL_1) |
+		(1 << V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL_2)
+		),
+		.qmenu = h264_video_entropy_cabac_model,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE,
+		.name = "MPEG4 Profile",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE,
+		.maximum = CODING,
+		.default_value = V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE,
+		.menu_skip_mask = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
+		.name = "MPEG4 Level",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDEO_MPEG4_LEVEL_0,
+		.maximum = V4L2_MPEG_VIDEO_MPEG4_LEVEL_5,
+		.default_value = V4L2_MPEG_VIDEO_MPEG4_LEVEL_0,
+		.menu_skip_mask = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+		.name = "H264 Profile",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
+		.maximum = V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH,
+		.default_value = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
+		.menu_skip_mask = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+		.name = "H264 Level",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
+		.maximum = V4L2_MPEG_VIDEO_H264_LEVEL_5_2,
+		.default_value = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
+		.menu_skip_mask = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_H263_PROFILE,
+		.name = "H263 Profile",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_BASELINE,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_HIGHLATENCY,
+		.default_value = V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_BASELINE,
+		.menu_skip_mask = ~(
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_BASELINE) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_H320CODING) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_BACKWARDCOMPATIBLE) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_ISWV2) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_ISWV3) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_HIGHCOMPRESSION) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_INTERNET) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_INTERLACE) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_HIGHLATENCY)
+		),
+		.qmenu = h263_profile,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_H263_LEVEL,
+		.name = "H263 Level",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_1_0,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_7_0,
+		.default_value = V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_1_0,
+		.menu_skip_mask = ~(
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_1_0) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_2_0) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_3_0) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_4_0) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_5_0) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_6_0) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_7_0)
+		),
+		.qmenu = h263_level,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL,
+		.name = "VP8 Profile Level",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDC_VIDEO_VP8_UNUSED,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_1,
+		.default_value = V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_0,
+		.menu_skip_mask = ~(
+		(1 << V4L2_MPEG_VIDC_VIDEO_VP8_UNUSED) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_0) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_1)
+		),
+		.qmenu = vp8_profile_level,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_HEVC_PROFILE,
+		.name = "HEVC Profile",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDC_VIDEO_HEVC_PROFILE_MAIN,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_HEVC_PROFILE_MAIN_STILL_PIC,
+		.default_value = V4L2_MPEG_VIDC_VIDEO_HEVC_PROFILE_MAIN,
+		.menu_skip_mask =  ~(
+		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_PROFILE_MAIN) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_PROFILE_MAIN10) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_PROFILE_MAIN_STILL_PIC)
+		),
+		.qmenu = hevc_profile,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_HEVC_TIER_LEVEL,
+		.name = "HEVC Tier and Level",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_1,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_5_2,
+		.default_value =
+			V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_1,
+		.menu_skip_mask = ~(
+		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_1) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_2) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_2_1) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_3) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_3_1) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_4) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_4_1) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_5) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_5_1) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_5_2) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_1) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_2) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_2_1) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_3) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_3_1) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_4) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_4_1) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_5) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_5_1)
+		),
+		.qmenu = hevc_tier_level,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_ROTATION,
+		.name = "Rotation",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_NONE,
+		.maximum = V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_270,
+		.default_value = V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_NONE,
+		.menu_skip_mask = ~(
+		(1 << V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_NONE) |
+		(1 << V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_90) |
+		(1 << V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_180) |
+		(1 << V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_270)
+		),
+		.qmenu = mpeg_video_rotation,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP,
+		.name = "H264 I Frame Quantization",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 1,
+		.maximum = 51,
+		.default_value = I_FRAME_QP,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP,
+		.name = "H264 P Frame Quantization",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 1,
+		.maximum = 51,
+		.default_value = P_FRAME_QP,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP,
+		.name = "H264 B Frame Quantization",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 1,
+		.maximum = 51,
+		.default_value = B_FRAME_QP,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP,
+		.name = "H263 I Frame Quantization",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 1,
+		.maximum = 31,
+		.default_value = I_FRAME_QP,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP,
+		.name = "H263 P Frame Quantization",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 1,
+		.maximum = 31,
+		.default_value = P_FRAME_QP,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP,
+		.name = "H263 B Frame Quantization",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 1,
+		.maximum = 31,
+		.default_value = B_FRAME_QP,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP,
+		.name = "VPX I Frame Quantization",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = 127,
+		.default_value = I_FRAME_QP,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP,
+		.name = "VPX P Frame Quantization",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = 127,
+		.default_value = P_FRAME_QP,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_MIN_QP,
+		.name = "H264 Minimum QP",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 1,
+		.maximum = 51,
+		.default_value = 1,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_MAX_QP,
+		.name = "H264 Maximum QP",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 1,
+		.maximum = 51,
+		.default_value = 51,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_VPX_MIN_QP,
+		.name = "VPX Minimum QP",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = 127,
+		.default_value = 0,
+		.step = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_VPX_MAX_QP,
+		.name = "VPX Maximum QP",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = 127,
+		.default_value = 127,
+		.step = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_VP8_MIN_QP,
+		.name = "VP8 Minimum QP",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 1,
+		.maximum = 128,
+		.default_value = 1,
+		.step = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_VP8_MAX_QP,
+		.name = "VP8 Maximum QP",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 1,
+		.maximum = 128,
+		.default_value = 128,
+		.step = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP,
+		.name = "MPEG4 Minimum QP",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 1,
+		.maximum = 31,
+		.default_value = 1,
+		.step = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP,
+		.name = "MPEG4 Maximum QP",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 1,
+		.maximum = 31,
+		.default_value = 31,
+		.step = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_MIN_QP_PACKED,
+		.name = "H264 Minimum QP PACKED",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0x00010101,
+		.maximum = 0x00333333,
+		.default_value = 0x00010101,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_MAX_QP_PACKED,
+		.name = "H264 Maximum QP PACKED",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0x00010101,
+		.maximum = 0x00333333,
+		.default_value = 0x00333333,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
+		.name = "Slice Mode",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE,
+		.maximum = V4L2_MPEG_VIDEO_MULTI_SLICE_GOB,
+		.default_value = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE,
+		.menu_skip_mask = ~(
+		(1 << V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE) |
+		(1 << V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB) |
+		(1 << V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES) |
+		(1 << V4L2_MPEG_VIDEO_MULTI_SLICE_GOB)
+		),
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES,
+		.name = "Slice Byte Size",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = MIN_SLICE_BYTE_SIZE,
+		.maximum = MAX_SLICE_BYTE_SIZE,
+		.default_value = MIN_SLICE_BYTE_SIZE,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB,
+		.name = "Slice MB Size",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 1,
+		.maximum = MAX_SLICE_MB_SIZE,
+		.default_value = 1,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_GOB,
+		.name = "Slice GOB",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 1,
+		.maximum = MAX_SLICE_MB_SIZE,
+		.default_value = 1,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_DELIVERY_MODE,
+		.name = "Slice delivery mode",
+		.type = V4L2_CTRL_TYPE_BUTTON,
+		.minimum = 0,
+		.maximum = 1,
+		.default_value = 0,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_MODE,
+		.name = "Intra Refresh Mode",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_NONE,
+		.maximum = V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_RANDOM,
+		.default_value = V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_NONE,
+		.menu_skip_mask = ~(
+		(1 << V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_NONE) |
+		(1 << V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_CYCLIC) |
+		(1 << V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_ADAPTIVE) |
+		(1 << V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_CYCLIC_ADAPTIVE) |
+		(1 << V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_RANDOM)
+		),
+		.qmenu = intra_refresh_modes,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_AIR_MBS,
+		.name = "Intra Refresh AIR MBS",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = MAX_INTRA_REFRESH_MBS,
+		.default_value = 0,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_AIR_REF,
+		.name = "Intra Refresh AIR REF",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = MAX_INTRA_REFRESH_MBS,
+		.default_value = 0,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_CIR_MBS,
+		.name = "Intra Refresh CIR MBS",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = MAX_INTRA_REFRESH_MBS,
+		.default_value = 0,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA,
+		.name = "H.264 Loop Filter Alpha Offset",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = -6,
+		.maximum = 6,
+		.default_value = 0,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA,
+		.name = "H.264 Loop Filter Beta Offset",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = -6,
+		.maximum = 6,
+		.default_value = 0,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
+		.name = "H.264 Loop Filter Mode",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED,
+		.maximum = L_MODE,
+		.default_value = V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED,
+		.menu_skip_mask = ~(
+		(1 << V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED) |
+		(1 << V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED) |
+		(1 << L_MODE)
+		),
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_HEADER_MODE,
+		.name = "Sequence Header Mode",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE,
+		.maximum = V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_I_FRAME,
+		.default_value =
+			V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_I_FRAME,
+		.menu_skip_mask = ~(
+		(1 << V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) |
+		(1 << V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_I_FRAME)
+		),
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_SECURE,
+		.name = "Secure mode",
+		.type = V4L2_CTRL_TYPE_BUTTON,
+		.minimum = 0,
+		.maximum = 0,
+		.default_value = 0,
+		.step = 0,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA,
+		.name = "Extradata Type",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDC_EXTRADATA_NONE,
+		.maximum = V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO,
+		.default_value = V4L2_MPEG_VIDC_EXTRADATA_NONE,
+		.menu_skip_mask = ~(
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_NONE) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_MB_QUANTIZATION) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_INTERLACE_VIDEO) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_VC1_FRAMEDISP) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_VC1_SEQDISP) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_TIMESTAMP) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_S3D_FRAME_PACKING) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_FRAME_RATE) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_PANSCAN_WINDOW) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_RECOVERY_POINT_SEI) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_INPUT_CROP) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_DIGITAL_ZOOM) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_ASPECT_RATIO) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_LTR) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_YUV_STATS)|
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_ROI_QP) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO)
+			),
+		.qmenu = mpeg_video_vidc_extradata,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_H264_VUI_TIMING_INFO,
+		.name = "H264 VUI Timing Info",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = V4L2_MPEG_VIDC_VIDEO_H264_VUI_TIMING_INFO_DISABLED,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_H264_VUI_TIMING_INFO_ENABLED,
+		.default_value =
+			V4L2_MPEG_VIDC_VIDEO_H264_VUI_TIMING_INFO_DISABLED,
+		.step = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_AU_DELIMITER,
+		.name = "H264 AU Delimiter",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = V4L2_MPEG_VIDC_VIDEO_AU_DELIMITER_DISABLED,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_AU_DELIMITER_ENABLED,
+		.step = 1,
+		.default_value =
+			V4L2_MPEG_VIDC_VIDEO_AU_DELIMITER_DISABLED,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_SET_PERF_LEVEL,
+		.name = "Encoder Performance Level",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_CID_MPEG_VIDC_PERF_LEVEL_NOMINAL,
+		.maximum = V4L2_CID_MPEG_VIDC_PERF_LEVEL_TURBO,
+		.default_value = V4L2_CID_MPEG_VIDC_PERF_LEVEL_NOMINAL,
+		.menu_skip_mask = ~(
+			(1 << V4L2_CID_MPEG_VIDC_PERF_LEVEL_NOMINAL) |
+			(1 << V4L2_CID_MPEG_VIDC_PERF_LEVEL_TURBO)),
+		.qmenu = perf_level,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB,
+		.name = "Intra Refresh CIR MBS",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = MAX_INTRA_REFRESH_MBS,
+		.default_value = 0,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_H264_VUI_BITSTREAM_RESTRICT,
+		.name = "H264 VUI Timing Info",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = BITSTREAM_RESTRICT_DISABLED,
+		.maximum = BITSTREAM_RESTRICT_ENABLED,
+		.default_value = BITSTREAM_RESTRICT_ENABLED,
+		.step = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_PRESERVE_TEXT_QUALITY,
+		.name = "Preserve Text Qualty",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = V4L2_MPEG_VIDC_VIDEO_PRESERVE_TEXT_QUALITY_DISABLED,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_PRESERVE_TEXT_QUALITY_ENABLED,
+		.default_value =
+			V4L2_MPEG_VIDC_VIDEO_PRESERVE_TEXT_QUALITY_DISABLED,
+		.step = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_DEINTERLACE,
+		.name = "Deinterlace for encoder",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = V4L2_CID_MPEG_VIDC_VIDEO_DEINTERLACE_DISABLED,
+		.maximum = V4L2_CID_MPEG_VIDC_VIDEO_DEINTERLACE_ENABLED,
+		.default_value = V4L2_CID_MPEG_VIDC_VIDEO_DEINTERLACE_DISABLED,
+		.step = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_MPEG4_TIME_RESOLUTION,
+		.name = "Vop time increment resolution",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = MIN_TIME_RESOLUTION,
+		.maximum = MAX_TIME_RESOLUTION,
+		.default_value = DEFAULT_TIME_RESOLUTION,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_REQUEST_SEQ_HEADER,
+		.name = "Request Seq Header",
+		.type = V4L2_CTRL_TYPE_BUTTON,
+		.minimum = 0,
+		.maximum = 0,
+		.default_value = 0,
+		.step = 0,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_USELTRFRAME,
+		.name = "H264 Use LTR",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = (MAX_LTR_FRAME_COUNT - 1),
+		.default_value = 0,
+		.step = 1,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_LTRCOUNT,
+		.name = "Ltr Count",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = MAX_LTR_FRAME_COUNT,
+		.default_value = 0,
+		.step = 1,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_LTRMODE,
+		.name = "Ltr Mode",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = V4L2_MPEG_VIDC_VIDEO_LTR_MODE_DISABLE,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_LTR_MODE_MANUAL,
+		.default_value = V4L2_MPEG_VIDC_VIDEO_LTR_MODE_DISABLE,
+		.step = 1,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_MARKLTRFRAME,
+		.name = "H264 Mark LTR",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = (MAX_LTR_FRAME_COUNT - 1),
+		.default_value = 0,
+		.step = 1,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_HIER_P_NUM_LAYERS,
+		.name = "Set Hier P num layers",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = 6,
+		.default_value = 0,
+		.step = 1,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_TIMESTAMP_MODE,
+		.name = "Encoder Timestamp Mode",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum =
+			V4L2_MPEG_VIDC_VIDEO_RATE_CONTROL_TIMESTAMP_MODE_HONOR,
+		.maximum =
+			V4L2_MPEG_VIDC_VIDEO_RATE_CONTROL_TIMESTAMP_MODE_IGNORE,
+		.default_value =
+			V4L2_MPEG_VIDC_VIDEO_RATE_CONTROL_TIMESTAMP_MODE_HONOR,
+		.menu_skip_mask = ~(
+		(1 << V4L2_MPEG_VIDC_VIDEO_RATE_CONTROL_TIMESTAMP_MODE_HONOR) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_RATE_CONTROL_TIMESTAMP_MODE_IGNORE)),
+		.qmenu = timestamp_mode,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_VPX_ERROR_RESILIENCE,
+		.name = "VP8 Error Resilience mode",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = V4L2_MPEG_VIDC_VIDEO_VPX_ERROR_RESILIENCE_DISABLED,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_VPX_ERROR_RESILIENCE_ENABLED,
+		.default_value =
+			V4L2_MPEG_VIDC_VIDEO_VPX_ERROR_RESILIENCE_DISABLED,
+		.step = 1,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_ENABLE_INITIAL_QP,
+		.name = "Enable setting initial QP",
+		.type = V4L2_CTRL_TYPE_BITMASK,
+		.minimum = 0,
+		.maximum = V4L2_CID_MPEG_VIDC_VIDEO_ENABLE_INITIAL_QP_IFRAME |
+			V4L2_CID_MPEG_VIDC_VIDEO_ENABLE_INITIAL_QP_PFRAME |
+			V4L2_CID_MPEG_VIDC_VIDEO_ENABLE_INITIAL_QP_BFRAME,
+		.default_value = 0,
+		.step = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_INITIAL_I_FRAME_QP,
+		.name = "Iframe initial QP",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 1,
+		.maximum = 127,
+		.default_value = 1,
+		.step = 1,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_INITIAL_P_FRAME_QP,
+		.name = "Pframe initial QP",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 1,
+		.maximum = 127,
+		.default_value = 1,
+		.step = 1,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_INITIAL_B_FRAME_QP,
+		.name = "Bframe initial QP",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 1,
+		.maximum = 127,
+		.default_value = 1,
+		.step = 1,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP,
+		.name = "Iframe initial QP",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 1,
+		.maximum = 51,
+		.default_value = 1,
+		.step = 1,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP,
+		.name = "Pframe initial QP",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 1,
+		.maximum = 51,
+		.default_value = 1,
+		.step = 1,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP,
+		.name = "Bframe initial QP",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 1,
+		.maximum = 51,
+		.default_value = 1,
+		.step = 1,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_X_RANGE,
+		.name = "I-Frame X coordinate search range",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 4,
+		.maximum = 128,
+		.default_value = 4,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_Y_RANGE,
+		.name = "I-Frame Y coordinate search range",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 4,
+		.maximum = 128,
+		.default_value = 4,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_PFRAME_X_RANGE,
+		.name = "P-Frame X coordinate search range",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 4,
+		.maximum = 128,
+		.default_value = 4,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_PFRAME_Y_RANGE,
+		.name = "P-Frame Y coordinate search range",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 4,
+		.maximum = 128,
+		.default_value = 4,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_BFRAME_X_RANGE,
+		.name = "B-Frame X coordinate search range",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 4,
+		.maximum = 128,
+		.default_value = 4,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_BFRAME_Y_RANGE,
+		.name = "B-Frame Y coordinate search range",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 4,
+		.maximum = 128,
+		.default_value = 4,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_H264_NAL_SVC,
+		.name = "Enable H264 SVC NAL",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = V4L2_CID_MPEG_VIDC_VIDEO_H264_NAL_SVC_DISABLED,
+		.maximum = V4L2_CID_MPEG_VIDC_VIDEO_H264_NAL_SVC_ENABLED,
+		.default_value = V4L2_CID_MPEG_VIDC_VIDEO_H264_NAL_SVC_DISABLED,
+		.step = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_PERF_MODE,
+		.name = "Set Encoder performance mode",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = V4L2_MPEG_VIDC_VIDEO_PERF_UNINIT,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_PERF_POWER_SAVE,
+		.default_value = V4L2_MPEG_VIDC_VIDEO_PERF_UNINIT,
+		.step = 1,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_HIER_B_NUM_LAYERS,
+		.name = "Set Hier B num layers",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = 3,
+		.default_value = 0,
+		.step = 1,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_HYBRID_HIERP_MODE,
+		.name = "Set Hybrid Hier P mode",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = 5,
+		.default_value = 0,
+		.step = 1,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_MBI_STATISTICS_MODE,
+		.name = "MBI Statistics Mode",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_CID_MPEG_VIDC_VIDEO_MBI_MODE_DEFAULT,
+		.maximum = V4L2_CID_MPEG_VIDC_VIDEO_MBI_MODE_3,
+		.default_value = V4L2_CID_MPEG_VIDC_VIDEO_MBI_MODE_DEFAULT,
+		.menu_skip_mask = ~(
+			(1 << V4L2_CID_MPEG_VIDC_VIDEO_MBI_MODE_DEFAULT) |
+			(1 << V4L2_CID_MPEG_VIDC_VIDEO_MBI_MODE_1) |
+			(1 << V4L2_CID_MPEG_VIDC_VIDEO_MBI_MODE_2) |
+			(1 << V4L2_CID_MPEG_VIDC_VIDEO_MBI_MODE_3)),
+		.qmenu = mbi_statistics,
+	},
+	{
+		.id = V4L2_CID_VIDC_QBUF_MODE,
+		.name = "Allows batching of buffers for power savings",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = V4L2_VIDC_QBUF_STANDARD,
+		.maximum = V4L2_VIDC_QBUF_BATCHED,
+		.default_value = V4L2_VIDC_QBUF_STANDARD,
+		.step = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_MAX_HIERP_LAYERS,
+		.name = "Set Max Hier P num layers sessions",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = 6,
+		.default_value = 0,
+		.step = 1,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_BASELAYER_ID,
+		.name = "Set Base Layer ID for Hier-P",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = 6,
+		.default_value = 0,
+		.step = 1,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_CONFIG_QP,
+		.name = "Set frame level QP",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 1,
+		.maximum = 127,
+		.default_value = 1,
+		.step = 1,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VENC_PARAM_SAR_WIDTH,
+		.name = "SAR Width",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 1,
+		.maximum = 4096,
+		.default_value = 1,
+		.step = 1,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VENC_PARAM_SAR_HEIGHT,
+		.name = "SAR Height",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 1,
+		.maximum = 2160,
+		.default_value = 1,
+		.step = 1,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_PRIORITY,
+		.name = "Session Priority",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = V4L2_MPEG_VIDC_VIDEO_PRIORITY_REALTIME_ENABLE,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_PRIORITY_REALTIME_DISABLE,
+		.default_value = V4L2_MPEG_VIDC_VIDEO_PRIORITY_REALTIME_DISABLE,
+		.step = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_VQZIP_SEI,
+		.name = "VQZIP SEI",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = V4L2_CID_MPEG_VIDC_VIDEO_VQZIP_SEI_DISABLE,
+		.maximum = V4L2_CID_MPEG_VIDC_VIDEO_VQZIP_SEI_ENABLE,
+		.default_value = V4L2_CID_MPEG_VIDC_VIDEO_VQZIP_SEI_DISABLE,
+		.step = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VENC_PARAM_LAYER_BITRATE,
+		.name = "Layer wise bitrate for H264/H265 Hybrid HP",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = MIN_BIT_RATE,
+		.maximum = MAX_BIT_RATE,
+		.default_value = DEFAULT_BIT_RATE,
+		.step = BIT_RATE_STEP,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE,
+		.name = "Set Encoder Operating rate",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = MAX_OPERATING_FRAME_RATE,
+		.default_value = 0,
+		.step = OPERATING_FRAME_RATE_STEP,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_VENC_BITRATE_TYPE,
+		.name = "BITRATE TYPE",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = V4L2_CID_MPEG_VIDC_VIDEO_VENC_BITRATE_DISABLE,
+		.maximum = V4L2_CID_MPEG_VIDC_VIDEO_VENC_BITRATE_ENABLE,
+		.default_value = V4L2_CID_MPEG_VIDC_VIDEO_VENC_BITRATE_ENABLE,
+		.step = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_H264_PIC_ORDER_CNT,
+		.name = "Set H264 Picture Order Count",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = 2,
+		.default_value = 0,
+		.step = 2,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_VPE_CSC,
+		.name = "Set VPE Color space conversion coefficients",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = V4L2_CID_MPEG_VIDC_VIDEO_VPE_CSC_DISABLE,
+		.maximum = V4L2_CID_MPEG_VIDC_VIDEO_VPE_CSC_ENABLE,
+		.default_value = V4L2_CID_MPEG_VIDC_VIDEO_VPE_CSC_DISABLE,
+		.step = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_MODE,
+		.name = "Low Latency Mode",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_DISABLE,
+		.maximum = V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_ENABLE,
+		.default_value = V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_DISABLE,
+		.step = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_BLUR_WIDTH,
+		.name = "Set Blur width",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = 2048,
+		.default_value = 0,
+		.step = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_BLUR_HEIGHT,
+		.name = "Set Blur height",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = 2048,
+		.default_value = 0,
+		.step = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_H264_TRANSFORM_8x8,
+		.name = "Transform 8x8",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = V4L2_MPEG_VIDC_VIDEO_H264_TRANSFORM_8x8_DISABLE,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_H264_TRANSFORM_8x8_ENABLE,
+		.default_value = V4L2_MPEG_VIDC_VIDEO_H264_TRANSFORM_8x8_ENABLE,
+		.step = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_COLOR_SPACE,
+		.name = "Set Color space",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = MSM_VIDC_BT709_5,
+		.maximum = MSM_VIDC_BT2020,
+		.default_value = MSM_VIDC_BT601_6_625,
+		.step = 1,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_FULL_RANGE,
+		.name = "Set Color space range",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = V4L2_CID_MPEG_VIDC_VIDEO_FULL_RANGE_DISABLE,
+		.maximum = V4L2_CID_MPEG_VIDC_VIDEO_FULL_RANGE_ENABLE,
+		.default_value = V4L2_CID_MPEG_VIDC_VIDEO_FULL_RANGE_DISABLE,
+		.step = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_TRANSFER_CHARS,
+		.name = "Set Color space transfer characterstics",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = MSM_VIDC_TRANSFER_BT709_5,
+		.maximum = MSM_VIDC_TRANSFER_BT_2020_12,
+		.default_value = MSM_VIDC_TRANSFER_601_6_625,
+		.step = 1,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_MATRIX_COEFFS,
+		.name = "Set Color space matrix coefficients",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = MSM_VIDC_MATRIX_BT_709_5,
+		.maximum = MSM_VIDC_MATRIX_BT_2020_CONST,
+		.default_value = MSM_VIDC_MATRIX_601_6_625,
+		.step = 1,
+		.qmenu = NULL,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_TYPE,
+		.name = "Bounds of I-frame size",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_DEFAULT,
+		.maximum = V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_UNLIMITED,
+		.default_value = V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_DEFAULT,
+		.menu_skip_mask = ~(
+			(1 << V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_DEFAULT) |
+			(1 << V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_MEDIUM) |
+			(1 << V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_HUGE) |
+			(1 << V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_UNLIMITED)),
+		.qmenu = iframe_sizes,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_SEND_SKIPPED_FRAME,
+		.name = "Send encoder output buffer for skipped frames",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = V4L2_MPEG_VIDC_VIDEO_SEND_SKIPPED_FRAME_DISABLE,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_SEND_SKIPPED_FRAME_ENABLE,
+		.default_value =
+			V4L2_MPEG_VIDC_VIDEO_SEND_SKIPPED_FRAME_DISABLE,
+		.step = 1,
+	}
+};
+
+#define NUM_CTRLS ARRAY_SIZE(msm_venc_ctrls)
+
+static u32 get_frame_size_nv12(int plane, u32 height, u32 width)
+{
+	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12, width, height);
+}
+
+static u32 get_frame_size_nv12_ubwc(int plane, u32 height, u32 width)
+{
+	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_UBWC, width, height);
+}
+
+static u32 get_frame_size_rgba(int plane, u32 height, u32 width)
+{
+	return VENUS_BUFFER_SIZE(COLOR_FMT_RGBA8888, width, height);
+}
+
+static u32 get_frame_size_rgba_ubwc(int plane, u32 height, u32 width)
+{
+	return VENUS_BUFFER_SIZE(COLOR_FMT_RGBA8888_UBWC, width, height);
+}
+
+static u32 get_frame_size_nv21(int plane, u32 height, u32 width)
+{
+	return VENUS_BUFFER_SIZE(COLOR_FMT_NV21, width, height);
+}
+
+static u32 get_frame_size_compressed(int plane, u32 height, u32 width)
+{
+	int sz = ALIGN(height, 32) * ALIGN(width, 32) * 3 / 2;
+	return ALIGN(sz, SZ_4K);
+}
+
+static struct msm_vidc_format venc_formats[] = {
+	{
+		.name = "YCbCr Semiplanar 4:2:0",
+		.description = "Y/CbCr 4:2:0",
+		.fourcc = V4L2_PIX_FMT_NV12,
+		.get_frame_size = get_frame_size_nv12,
+		.type = OUTPUT_PORT,
+	},
+	{
+		.name = "UBWC YCbCr Semiplanar 4:2:0",
+		.description = "UBWC Y/CbCr 4:2:0",
+		.fourcc = V4L2_PIX_FMT_NV12_UBWC,
+		.get_frame_size = get_frame_size_nv12_ubwc,
+		.type = OUTPUT_PORT,
+	},
+	{
+		.name = "RGBA 8:8:8:8",
+		.description = "RGBA 8:8:8:8",
+		.fourcc = V4L2_PIX_FMT_RGB32,
+		.get_frame_size = get_frame_size_rgba,
+		.type = OUTPUT_PORT,
+	},
+	{
+		.name = "UBWC RGBA 8:8:8:8",
+		.description = "UBWC RGBA 8:8:8:8",
+		.fourcc = V4L2_PIX_FMT_RGBA8888_UBWC,
+		.get_frame_size = get_frame_size_rgba_ubwc,
+		.type = OUTPUT_PORT,
+	},
+	{
+		.name = "Mpeg4",
+		.description = "Mpeg4 compressed format",
+		.fourcc = V4L2_PIX_FMT_MPEG4,
+		.get_frame_size = get_frame_size_compressed,
+		.type = CAPTURE_PORT,
+	},
+	{
+		.name = "H263",
+		.description = "H263 compressed format",
+		.fourcc = V4L2_PIX_FMT_H263,
+		.get_frame_size = get_frame_size_compressed,
+		.type = CAPTURE_PORT,
+	},
+	{
+		.name = "H264",
+		.description = "H264 compressed format",
+		.fourcc = V4L2_PIX_FMT_H264,
+		.get_frame_size = get_frame_size_compressed,
+		.type = CAPTURE_PORT,
+	},
+	{
+		.name = "VP8",
+		.description = "VP8 compressed format",
+		.fourcc = V4L2_PIX_FMT_VP8,
+		.get_frame_size = get_frame_size_compressed,
+		.type = CAPTURE_PORT,
+	},
+	{
+		.name = "HEVC",
+		.description = "HEVC compressed format",
+		.fourcc = V4L2_PIX_FMT_HEVC,
+		.get_frame_size = get_frame_size_compressed,
+		.type = CAPTURE_PORT,
+	},
+	{
+		.name = "YCrCb Semiplanar 4:2:0",
+		.description = "Y/CrCb 4:2:0",
+		.fourcc = V4L2_PIX_FMT_NV21,
+		.get_frame_size = get_frame_size_nv21,
+		.type = OUTPUT_PORT,
+	},
+};
+
+static int msm_venc_set_csc(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	int count = 0;
+	struct hal_vpe_color_space_conversion vpe_csc;
+
+	while (count < HAL_MAX_MATRIX_COEFFS) {
+		if (count < HAL_MAX_BIAS_COEFFS)
+			vpe_csc.csc_bias[count] =
+				vpe_csc_601_to_709_bias_coeff[count];
+		if (count < HAL_MAX_LIMIT_COEFFS)
+			vpe_csc.csc_limit[count] =
+				vpe_csc_601_to_709_limit_coeff[count];
+		vpe_csc.csc_matrix[count] =
+			vpe_csc_601_to_709_matrix_coeff[count];
+		count = count + 1;
+	}
+	rc = msm_comm_try_set_prop(inst,
+			HAL_PARAM_VPE_COLOR_SPACE_CONVERSION, &vpe_csc);
+	if (rc)
+		dprintk(VIDC_ERR, "Setting VPE coefficients failed\n");
+
+	return rc;
+}
+
+static void msm_venc_register_extradata(
+		struct msm_vidc_inst *inst, u32 extradata) {
+	struct session_prop *prop = NULL;
+
+	if (!inst)
+		return;
+
+	prop = &inst->prop;
+	switch (extradata) {
+		case V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO:
+		case V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB:
+		case V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER:
+		case V4L2_MPEG_VIDC_EXTRADATA_LTR:
+		case V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI:
+			prop->extradata[CAPTURE_PORT] |= extradata;
+			prop->num_planes[CAPTURE_PORT] = 2;
+			dprintk(VIDC_DBG, "Output Extradata: %#x",
+				prop->extradata[CAPTURE_PORT]);
+			break;
+		case V4L2_MPEG_VIDC_EXTRADATA_INPUT_CROP:
+		case V4L2_MPEG_VIDC_EXTRADATA_DIGITAL_ZOOM:
+		case V4L2_MPEG_VIDC_EXTRADATA_ASPECT_RATIO:
+		case V4L2_MPEG_VIDC_EXTRADATA_YUV_STATS:
+		case V4L2_MPEG_VIDC_EXTRADATA_ROI_QP:
+		case V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO:
+			prop->extradata[OUTPUT_PORT] |= extradata;
+			prop->num_planes[OUTPUT_PORT] = 2;
+			dprintk(VIDC_DBG, "Input Extradata: %#x",
+				prop->extradata[OUTPUT_PORT]);
+			break;
+		default:
+			dprintk(VIDC_ERR, "Unknown extradata: %d", extradata);
+	}
+}
+
+static int msm_venc_queue_setup(struct vb2_queue *q,
+				const void *parg,
+				unsigned int *num_buffers,
+				unsigned int *num_planes, unsigned int sizes[],
+				void *alloc_ctxs[])
+{
+	int i, temp, rc = 0;
+	struct msm_vidc_inst *inst;
+	struct hal_buffer_count_actual new_buf_count;
+	enum hal_property property_id;
+	struct hfi_device *hdev;
+	struct hal_buffer_requirements *buff_req;
+	u32 extra_idx = 0;
+	struct hal_buffer_requirements *buff_req_buffer = NULL;
+
+	if (!q || !q->drv_priv) {
+		dprintk(VIDC_ERR, "Invalid input\n");
+		return -EINVAL;
+	}
+	inst = q->drv_priv;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+	hdev = inst->core->device;
+
+	rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to open instance\n");
+		return rc;
+	}
+
+	rc = msm_comm_try_get_bufreqs(inst);
+	if (rc) {
+		dprintk(VIDC_ERR,
+				"Failed to get buffer requirements: %d\n", rc);
+		return rc;
+	}
+
+	switch (q->type) {
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+		*num_planes = inst->prop.num_planes[CAPTURE_PORT];
+
+		buff_req = get_buff_req_buffer(inst, HAL_BUFFER_OUTPUT);
+		if (buff_req) {
+			/*
+			 * Pretend as if the FW itself is asking for additional
+			 * buffers, which are required for DCVS
+			 */
+			unsigned int min_req_buffers =
+				buff_req->buffer_count_min +
+				msm_dcvs_get_extra_buff_count(inst);
+			*num_buffers = max(*num_buffers, min_req_buffers);
+		}
+
+		if (*num_buffers < MIN_NUM_CAPTURE_BUFFERS ||
+				*num_buffers > VB2_MAX_FRAME) {
+			int temp = *num_buffers;
+
+			*num_buffers = clamp_val(*num_buffers,
+					MIN_NUM_CAPTURE_BUFFERS,
+					VB2_MAX_FRAME);
+			dprintk(VIDC_INFO,
+				"Changing buffer count on CAPTURE_MPLANE from %d to %d for best effort encoding\n",
+				temp, *num_buffers);
+		}
+
+		for (i = 0; i < *num_planes; i++) {
+			int extra_idx = EXTRADATA_IDX(*num_planes);
+
+			buff_req_buffer = get_buff_req_buffer(inst,
+					HAL_BUFFER_OUTPUT);
+
+			sizes[i] = buff_req_buffer ?
+				buff_req_buffer->buffer_size : 0;
+
+			if (extra_idx && i == extra_idx &&
+					extra_idx < VIDEO_MAX_PLANES) {
+				buff_req_buffer = get_buff_req_buffer(inst,
+						HAL_BUFFER_EXTRADATA_OUTPUT);
+				if (!buff_req_buffer) {
+					dprintk(VIDC_ERR,
+						"%s: failed - invalid buffer req\n",
+						__func__);
+					return -EINVAL;
+				}
+
+				sizes[i] = buff_req_buffer->buffer_size;
+			}
+		}
+
+		dprintk(VIDC_DBG, "actual output buffer count set to fw = %d\n",
+				*num_buffers);
+		property_id = HAL_PARAM_BUFFER_COUNT_ACTUAL;
+		new_buf_count.buffer_type = HAL_BUFFER_OUTPUT;
+		new_buf_count.buffer_count_actual = *num_buffers;
+		rc = call_hfi_op(hdev, session_set_property, inst->session,
+			property_id, &new_buf_count);
+
+		break;
+	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+		*num_planes = inst->prop.num_planes[OUTPUT_PORT];
+
+		*num_buffers = inst->buff_req.buffer[0].buffer_count_actual =
+			max(*num_buffers, inst->buff_req.buffer[0].
+				buffer_count_min);
+
+		temp = *num_buffers;
+
+		*num_buffers = clamp_val(*num_buffers,
+				MIN_NUM_OUTPUT_BUFFERS,
+				VB2_MAX_FRAME);
+		dprintk(VIDC_INFO,
+			"Changing buffer count on OUTPUT_MPLANE from %d to %d for best effort encoding\n",
+			temp, *num_buffers);
+
+		property_id = HAL_PARAM_BUFFER_COUNT_ACTUAL;
+		new_buf_count.buffer_type = HAL_BUFFER_INPUT;
+		new_buf_count.buffer_count_actual = *num_buffers;
+
+		dprintk(VIDC_DBG, "actual input buffer count set to fw = %d\n",
+				*num_buffers);
+
+		rc = call_hfi_op(hdev, session_set_property, inst->session,
+					property_id, &new_buf_count);
+		if (rc)
+			dprintk(VIDC_ERR, "failed to set count to fw\n");
+
+		dprintk(VIDC_DBG, "size = %d, alignment = %d, count = %d\n",
+				inst->buff_req.buffer[0].buffer_size,
+				inst->buff_req.buffer[0].buffer_alignment,
+				inst->buff_req.buffer[0].buffer_count_actual);
+		sizes[0] = inst->fmts[OUTPUT_PORT].get_frame_size(
+				0, inst->prop.height[OUTPUT_PORT],
+				inst->prop.width[OUTPUT_PORT]);
+
+		extra_idx =
+			EXTRADATA_IDX(*num_planes);
+		if (extra_idx && (extra_idx < VIDEO_MAX_PLANES)) {
+			buff_req_buffer = get_buff_req_buffer(inst,
+				HAL_BUFFER_EXTRADATA_INPUT);
+			if (!buff_req_buffer) {
+				dprintk(VIDC_ERR,
+					"%s: failed - invalid buffer req\n",
+					__func__);
+				return -EINVAL;
+			}
+
+			sizes[extra_idx] = buff_req_buffer->buffer_size;
+		}
+
+		break;
+	default:
+		dprintk(VIDC_ERR, "Invalid q type = %d\n", q->type);
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+static int msm_venc_toggle_hier_p(struct msm_vidc_inst *inst, int layers)
+{
+	int num_enh_layers = 0;
+	u32 property_id = 0;
+	struct hfi_device *hdev = NULL;
+	int rc = 0;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_VP8)
+		return 0;
+
+	num_enh_layers = layers ? : 0;
+	dprintk(VIDC_DBG, "%s Hier-P in firmware\n",
+			num_enh_layers ? "Enable" : "Disable");
+
+	hdev = inst->core->device;
+	property_id = HAL_PARAM_VENC_HIER_P_MAX_ENH_LAYERS;
+	rc = call_hfi_op(hdev, session_set_property,
+			(void *)inst->session, property_id,
+			(void *)&num_enh_layers);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"%s: failed with error = %d\n", __func__, rc);
+	}
+	return rc;
+}
+
+static inline int msm_venc_power_save_mode_enable(struct msm_vidc_inst *inst)
+{
+	u32 rc = 0, height = 0, width = 0;
+	u32 prop_id = 0, hq_max = 0, power_conf = 0, inst_load = 0;
+	void *pdata = NULL;
+	bool set_by_client = false, enable_low_power = false;
+	struct hfi_device *hdev = NULL;
+	enum hal_perf_mode venc_mode;
+	enum load_calc_quirks quirks = LOAD_CALC_IGNORE_TURBO_LOAD |
+		LOAD_CALC_IGNORE_THUMBNAIL_LOAD;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	hdev = inst->core->device;
+	inst_load = msm_comm_get_inst_load(inst, quirks);
+	hq_max = inst->capability.mbs_per_sec.max;
+	power_conf = inst->core->resources.power_conf;
+	height = inst->prop.height[CAPTURE_PORT];
+	width = inst->prop.width[CAPTURE_PORT];
+
+	switch (msm_comm_g_ctrl_for_id(inst,
+				V4L2_CID_MPEG_VIDC_VIDEO_PERF_MODE)) {
+		case V4L2_MPEG_VIDC_VIDEO_PERF_MAX_QUALITY:
+		case V4L2_MPEG_VIDC_VIDEO_PERF_POWER_SAVE:
+			set_by_client = true;
+			break;
+	}
+
+	if (inst_load > hq_max) {
+		dprintk(VIDC_INFO, "Setting low power w.r.t core limitation\n");
+		enable_low_power = true;
+	} else if (!set_by_client) {
+		if (power_conf && width * height >= power_conf) {
+			dprintk(VIDC_INFO,
+				"Setting low power w.r.t system power recommendation\n");
+			enable_low_power = true;
+		}
+	}
+
+	if (enable_low_power) {
+		prop_id = HAL_CONFIG_VENC_PERF_MODE;
+		venc_mode = HAL_PERF_MODE_POWER_SAVE;
+		pdata = &venc_mode;
+		rc = call_hfi_op(hdev, session_set_property,
+			(void *)inst->session, prop_id, pdata);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"%s: Failed to set power save mode for inst: %pK\n",
+				__func__, inst);
+			goto fail_power_mode_set;
+		}
+		inst->flags |= VIDC_LOW_POWER;
+		dprintk(VIDC_INFO, "Power Save Mode set for inst: %pK\n", inst);
+	}
+
+fail_power_mode_set:
+	return rc;
+}
+
+static inline int start_streaming(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+	msm_venc_power_save_mode_enable(inst);
+	if (inst->capability.pixelprocess_capabilities &
+		HAL_VIDEO_ENCODER_SCALING_CAPABILITY)
+		rc = msm_vidc_check_scaling_supported(inst);
+	if (rc) {
+		dprintk(VIDC_ERR, "H/w scaling is not in valid range\n");
+		return -EINVAL;
+	}
+	rc = msm_comm_try_get_bufreqs(inst);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Failed to get Buffer Requirements : %d\n", rc);
+		goto fail_start;
+	}
+	rc = msm_comm_set_scratch_buffers(inst, false);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to set scratch buffers: %d\n", rc);
+		goto fail_start;
+	}
+	rc = msm_comm_set_persist_buffers(inst);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to set persist buffers: %d\n", rc);
+		goto fail_start;
+	}
+
+	msm_comm_scale_clocks_and_bus(inst);
+
+	rc = msm_comm_try_state(inst, MSM_VIDC_START_DONE);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Failed to move inst: %pK to start done state\n", inst);
+		goto fail_start;
+	}
+	msm_dcvs_init_load(inst);
+
+fail_start:
+	return rc;
+}
+
+static int msm_venc_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+	struct msm_vidc_inst *inst;
+	int rc = 0;
+	struct vb2_buffer *vb;
+	struct vb2_buf_entry *temp, *next;
+
+	if (!q || !q->drv_priv) {
+		dprintk(VIDC_ERR, "Invalid input, q = %pK\n", q);
+		return -EINVAL;
+	}
+	inst = q->drv_priv;
+
+	if (inst->state == MSM_VIDC_CORE_INVALID ||
+		inst->core->state == VIDC_CORE_INVALID ||
+		inst->core->state == VIDC_CORE_UNINIT) {
+		rc = -EINVAL;
+		goto stream_start_failed;
+	}
+
+	dprintk(VIDC_DBG, "Streamon called on: %d capability for inst: %pK\n",
+		q->type, inst);
+	switch (q->type) {
+	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+		if (inst->bufq[CAPTURE_PORT].vb2_bufq.streaming)
+			rc = start_streaming(inst);
+		break;
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+		if (inst->bufq[OUTPUT_PORT].vb2_bufq.streaming)
+			rc = start_streaming(inst);
+		break;
+	default:
+		dprintk(VIDC_ERR, "Queue type is not supported: %d\n", q->type);
+		return  -EINVAL;
+	}
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Streamon failed on: %d capability for inst: %pK\n",
+			q->type, inst);
+		goto stream_start_failed;
+	}
+
+	rc = msm_comm_qbuf(inst, NULL);
+	if (rc) {
+		dprintk(VIDC_ERR,
+				"Failed to commit buffers queued before STREAM_ON to hardware: %d\n",
+				rc);
+		goto stream_start_failed;
+	}
+
+stream_start_failed:
+	if (rc) {
+		list_for_each_entry(vb, &q->queued_list, queued_entry) {
+			if (vb->type == q->type &&
+					vb->state == VB2_BUF_STATE_ACTIVE)
+				vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED);
+		}
+		mutex_lock(&inst->pendingq.lock);
+		list_for_each_entry_safe(temp, next,
+				&inst->pendingq.list, list) {
+			if (temp->vb->type == q->type) {
+				list_del(&temp->list);
+				kfree(temp);
+			}
+		}
+		mutex_unlock(&inst->pendingq.lock);
+	}
+	return rc;
+}
+
+static void msm_venc_stop_streaming(struct vb2_queue *q)
+{
+	struct msm_vidc_inst *inst;
+	int rc = 0;
+	if (!q || !q->drv_priv) {
+		dprintk(VIDC_ERR, "%s - Invalid input, q = %pK\n", __func__, q);
+		return;
+	}
+
+	inst = q->drv_priv;
+	dprintk(VIDC_DBG, "Streamoff called on: %d capability\n", q->type);
+	switch (q->type) {
+	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+		break;
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+		rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE);
+		break;
+	default:
+		dprintk(VIDC_ERR, "Q-type is not supported: %d\n", q->type);
+		rc = -EINVAL;
+		break;
+	}
+
+	msm_comm_scale_clocks_and_bus(inst);
+
+	if (rc)
+		dprintk(VIDC_ERR,
+			"Failed to move inst: %pK, cap = %d to state: %d\n",
+			inst, q->type, MSM_VIDC_CLOSE_DONE);
+}
+
+static void msm_venc_buf_queue(struct vb2_buffer *vb)
+{
+	int rc = msm_comm_qbuf(vb2_get_drv_priv(vb->vb2_queue), vb);
+	if (rc)
+		dprintk(VIDC_ERR, "Failed to queue buffer: %d\n", rc);
+}
+
+static void msm_venc_buf_cleanup(struct vb2_buffer *vb)
+{
+	int rc = 0;
+	struct buf_queue *q = NULL;
+	struct msm_vidc_inst *inst = NULL;
+
+	if (!vb) {
+		dprintk(VIDC_ERR, "%s : Invalid vb pointer %pK",
+			__func__, vb);
+		return;
+	}
+
+	inst = vb2_get_drv_priv(vb->vb2_queue);
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s : Invalid inst pointer",
+			__func__);
+		return;
+	}
+
+	q = msm_comm_get_vb2q(inst, vb->type);
+	if (!q) {
+		dprintk(VIDC_ERR,
+			"%s : Failed to find buffer queue for type = %d\n",
+				__func__, vb->type);
+		return;
+	}
+
+	if (q->vb2_bufq.streaming) {
+		dprintk(VIDC_DBG, "%d PORT is streaming\n",
+			vb->type);
+		return;
+	}
+
+	rc = msm_vidc_release_buffers(inst, vb->type);
+	if (rc)
+		dprintk(VIDC_ERR, "%s : Failed to release buffers : %d\n",
+			__func__, rc);
+}
+
+static const struct vb2_ops msm_venc_vb2q_ops = {
+	.queue_setup = msm_venc_queue_setup,
+	.start_streaming = msm_venc_start_streaming,
+	.buf_queue = msm_venc_buf_queue,
+	.buf_cleanup = msm_venc_buf_cleanup,
+	.stop_streaming = msm_venc_stop_streaming,
+};
+
+const struct vb2_ops *msm_venc_get_vb2q_ops(void)
+{
+	return &msm_venc_vb2q_ops;
+}
+
+static struct v4l2_ctrl *get_ctrl_from_cluster(int id,
+		struct v4l2_ctrl **cluster, int ncontrols)
+{
+	int c;
+
+	for (c = 0; c < ncontrols; ++c)
+		if (cluster[c]->id == id)
+			return cluster[c];
+	return NULL;
+}
+
+/* Helper function to translate V4L2_* to HAL_* */
+static inline int venc_v4l2_to_hal(int id, int value)
+{
+	switch (id) {
+	/* MPEG4 */
+	case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+		switch (value) {
+		case V4L2_MPEG_VIDEO_MPEG4_LEVEL_0:
+			return HAL_MPEG4_LEVEL_0;
+		case V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B:
+			return HAL_MPEG4_LEVEL_0b;
+		case V4L2_MPEG_VIDEO_MPEG4_LEVEL_1:
+			return HAL_MPEG4_LEVEL_1;
+		case V4L2_MPEG_VIDEO_MPEG4_LEVEL_2:
+			return HAL_MPEG4_LEVEL_2;
+		case V4L2_MPEG_VIDEO_MPEG4_LEVEL_3:
+			return HAL_MPEG4_LEVEL_3;
+		case V4L2_MPEG_VIDEO_MPEG4_LEVEL_4:
+			return HAL_MPEG4_LEVEL_4;
+		case V4L2_MPEG_VIDEO_MPEG4_LEVEL_5:
+			return HAL_MPEG4_LEVEL_5;
+		default:
+			goto unknown_value;
+		}
+	case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+		switch (value) {
+		case V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE:
+			return HAL_MPEG4_PROFILE_SIMPLE;
+		case V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE:
+			return HAL_MPEG4_PROFILE_ADVANCEDSIMPLE;
+		default:
+			goto unknown_value;
+		}
+	/* H264 */
+	case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+		switch (value) {
+		case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
+			return HAL_H264_PROFILE_BASELINE;
+		case V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE:
+			return HAL_H264_PROFILE_CONSTRAINED_BASE;
+		case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
+			return HAL_H264_PROFILE_MAIN;
+		case V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED:
+			return HAL_H264_PROFILE_EXTENDED;
+		case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
+			return HAL_H264_PROFILE_HIGH;
+		case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10:
+			return HAL_H264_PROFILE_HIGH10;
+		case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422:
+			return HAL_H264_PROFILE_HIGH422;
+		case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE:
+			return HAL_H264_PROFILE_HIGH444;
+		case V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH:
+			return HAL_H264_PROFILE_CONSTRAINED_HIGH;
+		default:
+			goto unknown_value;
+		}
+	case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+		switch (value) {
+		case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
+			return HAL_H264_LEVEL_1;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
+			return HAL_H264_LEVEL_1b;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
+			return HAL_H264_LEVEL_11;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
+			return HAL_H264_LEVEL_12;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
+			return HAL_H264_LEVEL_13;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
+			return HAL_H264_LEVEL_2;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
+			return HAL_H264_LEVEL_21;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
+			return HAL_H264_LEVEL_22;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
+			return HAL_H264_LEVEL_3;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
+			return HAL_H264_LEVEL_31;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
+			return HAL_H264_LEVEL_32;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
+			return HAL_H264_LEVEL_4;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
+			return HAL_H264_LEVEL_41;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
+			return HAL_H264_LEVEL_42;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_5_0:
+			return HAL_H264_LEVEL_5;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_5_1:
+			return HAL_H264_LEVEL_51;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_5_2:
+			return HAL_H264_LEVEL_52;
+		default:
+			goto unknown_value;
+		}
+	/* H263 */
+	case V4L2_CID_MPEG_VIDC_VIDEO_H263_PROFILE:
+		switch (value) {
+		case V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_BASELINE:
+			return HAL_H263_PROFILE_BASELINE;
+		case V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_H320CODING:
+			return HAL_H263_PROFILE_H320CODING;
+		case V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_BACKWARDCOMPATIBLE:
+			return HAL_H263_PROFILE_BACKWARDCOMPATIBLE;
+		case V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_ISWV2:
+			return HAL_H263_PROFILE_ISWV2;
+		case V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_ISWV3:
+			return HAL_H263_PROFILE_ISWV3;
+		case V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_HIGHCOMPRESSION:
+			return HAL_H263_PROFILE_HIGHCOMPRESSION;
+		case V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_INTERNET:
+			return HAL_H263_PROFILE_INTERNET;
+		case V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_INTERLACE:
+			return HAL_H263_PROFILE_INTERLACE;
+		case V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_HIGHLATENCY:
+			return HAL_H263_PROFILE_HIGHLATENCY;
+		default:
+			goto unknown_value;
+		}
+	case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+		switch (value) {
+		case V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC:
+			return HAL_H264_ENTROPY_CAVLC;
+		case V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC:
+			return HAL_H264_ENTROPY_CABAC;
+		default:
+			goto unknown_value;
+		}
+	case V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL:
+		switch (value) {
+		case V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL_0:
+			return HAL_H264_CABAC_MODEL_0;
+		case V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL_1:
+			return HAL_H264_CABAC_MODEL_1;
+		case V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL_2:
+			return HAL_H264_CABAC_MODEL_2;
+		default:
+			goto unknown_value;
+		}
+	case V4L2_CID_MPEG_VIDC_VIDEO_H263_LEVEL:
+		switch (value) {
+		case V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_1_0:
+			return HAL_H263_LEVEL_10;
+		case V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_2_0:
+			return HAL_H263_LEVEL_20;
+		case V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_3_0:
+			return HAL_H263_LEVEL_30;
+		case V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_4_0:
+			return HAL_H263_LEVEL_40;
+		case V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_4_5:
+			return HAL_H263_LEVEL_45;
+		case V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_5_0:
+			return HAL_H263_LEVEL_50;
+		case V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_6_0:
+			return HAL_H263_LEVEL_60;
+		case V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_7_0:
+			return HAL_H263_LEVEL_70;
+		default:
+			goto unknown_value;
+		}
+	case V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL:
+		switch (value) {
+		case V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_0:
+			return HAL_VPX_PROFILE_VERSION_0;
+		case V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_1:
+			return HAL_VPX_PROFILE_VERSION_1;
+		case V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_2:
+			return HAL_VPX_PROFILE_VERSION_2;
+		case V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_3:
+			return HAL_VPX_PROFILE_VERSION_3;
+		case V4L2_MPEG_VIDC_VIDEO_VP8_UNUSED:
+			return HAL_VPX_PROFILE_UNUSED;
+		default:
+			goto unknown_value;
+		}
+	case V4L2_CID_MPEG_VIDC_VIDEO_HEVC_PROFILE:
+		switch (value) {
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_PROFILE_MAIN:
+			return HAL_HEVC_PROFILE_MAIN;
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_PROFILE_MAIN10:
+			return HAL_HEVC_PROFILE_MAIN10;
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_PROFILE_MAIN_STILL_PIC:
+			return HAL_HEVC_PROFILE_MAIN_STILL_PIC;
+		default:
+			goto unknown_value;
+		}
+	case V4L2_CID_MPEG_VIDC_VIDEO_HEVC_TIER_LEVEL:
+		switch (value) {
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_1:
+			return HAL_HEVC_MAIN_TIER_LEVEL_1;
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_2:
+			return HAL_HEVC_MAIN_TIER_LEVEL_2;
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_2_1:
+			return HAL_HEVC_MAIN_TIER_LEVEL_2_1;
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_3:
+			return HAL_HEVC_MAIN_TIER_LEVEL_3;
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_3_1:
+			return HAL_HEVC_MAIN_TIER_LEVEL_3_1;
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_4:
+			return HAL_HEVC_MAIN_TIER_LEVEL_4;
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_4_1:
+			return HAL_HEVC_MAIN_TIER_LEVEL_4_1;
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_5:
+			return HAL_HEVC_MAIN_TIER_LEVEL_5;
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_5_1:
+			return HAL_HEVC_MAIN_TIER_LEVEL_5_1;
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_5_2:
+			return HAL_HEVC_MAIN_TIER_LEVEL_5_2;
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_6:
+			return HAL_HEVC_MAIN_TIER_LEVEL_6;
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_6_1:
+			return HAL_HEVC_MAIN_TIER_LEVEL_6_1;
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_6_2:
+			return HAL_HEVC_MAIN_TIER_LEVEL_6_2;
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_1:
+			return HAL_HEVC_HIGH_TIER_LEVEL_1;
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_2:
+			return HAL_HEVC_HIGH_TIER_LEVEL_2;
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_2_1:
+			return HAL_HEVC_HIGH_TIER_LEVEL_2_1;
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_3:
+			return HAL_HEVC_HIGH_TIER_LEVEL_3;
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_3_1:
+			return HAL_HEVC_HIGH_TIER_LEVEL_3_1;
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_4:
+			return HAL_HEVC_HIGH_TIER_LEVEL_4;
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_4_1:
+			return HAL_HEVC_HIGH_TIER_LEVEL_4_1;
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_5:
+			return HAL_HEVC_HIGH_TIER_LEVEL_5;
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_5_1:
+			return HAL_HEVC_HIGH_TIER_LEVEL_5_1;
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_5_2:
+			return HAL_HEVC_HIGH_TIER_LEVEL_5_2;
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_6:
+			return HAL_HEVC_HIGH_TIER_LEVEL_6;
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_6_1:
+			return HAL_HEVC_HIGH_TIER_LEVEL_6_1;
+		default:
+			goto unknown_value;
+		}
+	case V4L2_CID_MPEG_VIDC_VIDEO_ROTATION:
+		switch (value) {
+		case V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_NONE:
+			return HAL_ROTATE_NONE;
+		case V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_90:
+			return HAL_ROTATE_90;
+		case V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_180:
+			return HAL_ROTATE_180;
+		case V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_270:
+			return HAL_ROTATE_270;
+		default:
+			goto unknown_value;
+		}
+	case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
+		switch (value) {
+		case V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED:
+			return HAL_H264_DB_MODE_DISABLE;
+		case V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED:
+			return HAL_H264_DB_MODE_ALL_BOUNDARY;
+		case L_MODE:
+			return HAL_H264_DB_MODE_SKIP_SLICE_BOUNDARY;
+		default:
+			goto unknown_value;
+		}
+	case V4L2_CID_MPEG_VIDC_VIDEO_MBI_STATISTICS_MODE:
+		switch (value) {
+		case V4L2_CID_MPEG_VIDC_VIDEO_MBI_MODE_DEFAULT:
+			return HAL_STATISTICS_MODE_DEFAULT;
+		case V4L2_CID_MPEG_VIDC_VIDEO_MBI_MODE_1:
+			return HAL_STATISTICS_MODE_1;
+		case V4L2_CID_MPEG_VIDC_VIDEO_MBI_MODE_2:
+			return HAL_STATISTICS_MODE_2;
+		case V4L2_CID_MPEG_VIDC_VIDEO_MBI_MODE_3:
+			return HAL_STATISTICS_MODE_3;
+		default:
+			goto unknown_value;
+		}
+	case V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_TYPE:
+		switch (value) {
+		case V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_DEFAULT:
+			return HAL_IFRAMESIZE_TYPE_DEFAULT;
+		case V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_MEDIUM:
+			return HAL_IFRAMESIZE_TYPE_MEDIUM;
+		case V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_HUGE:
+			return HAL_IFRAMESIZE_TYPE_HUGE;
+		case V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_UNLIMITED:
+			return HAL_IFRAMESIZE_TYPE_UNLIMITED;
+		default:
+			goto unknown_value;
+		}
+	}
+
+unknown_value:
+	dprintk(VIDC_WARN, "Unknown control (%x, %d)\n", id, value);
+	return -EINVAL;
+}
+
+static int msm_venc_validate_qp_value(struct msm_vidc_inst *inst,
+					struct v4l2_ctrl *ctrl);
+
+static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
+{
+	int rc = 0;
+	struct hal_request_iframe request_iframe;
+	struct hal_bitrate bitrate;
+	struct hal_profile_level profile_level;
+	struct hal_h264_entropy_control h264_entropy_control;
+	struct hal_quantization quantization;
+	struct hal_intra_period intra_period;
+	struct hal_idr_period idr_period;
+	struct hal_operations operations;
+	struct hal_intra_refresh intra_refresh;
+	struct hal_multi_slice_control multi_slice_control;
+	struct hal_h264_db_control h264_db_control;
+	struct hal_enable enable;
+	struct hal_h264_vui_timing_info vui_timing_info;
+	struct hal_quantization_range qp_range;
+	struct hal_h264_vui_bitstream_restrc vui_bitstream_restrict;
+	struct hal_preserve_text_quality preserve_text_quality;
+	u32 property_id = 0, property_val = 0;
+	void *pdata = NULL;
+	struct v4l2_ctrl *temp_ctrl = NULL;
+	struct hfi_device *hdev;
+	struct hal_extradata_enable extra;
+	struct hal_mpeg4_time_resolution time_res;
+	struct hal_ltr_use use_ltr;
+	struct hal_ltr_mark mark_ltr;
+	struct hal_hybrid_hierp hyb_hierp;
+	u32 hier_p_layers = 0, hier_b_layers = 0, mbi_statistics_mode = 0;
+	enum hal_perf_mode venc_mode;
+	int max_hierp_layers;
+	int baselayerid = 0;
+	int frameqp = 0;
+	int pic_order_cnt = 0;
+	struct hal_video_signal_info signal_info = {0};
+	enum hal_iframesize_type iframesize_type = HAL_IFRAMESIZE_TYPE_DEFAULT;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+	hdev = inst->core->device;
+
+	/* Small helper macro for quickly getting a control and err checking */
+#define TRY_GET_CTRL(__ctrl_id) ({ \
+		struct v4l2_ctrl *__temp; \
+		__temp = get_ctrl_from_cluster( \
+			__ctrl_id, \
+			ctrl->cluster, ctrl->ncontrols); \
+		if (!__temp) { \
+			dprintk(VIDC_ERR, "Can't find %s (%x) in cluster\n", \
+				#__ctrl_id, __ctrl_id); \
+			/* Clusters are hardcoded, if we can't find */ \
+			/* something then things are massively screwed up */ \
+			BUG_ON(1); \
+		} \
+		__temp; \
+	})
+
+	/*
+	* Unlock the control prior to setting to the hardware. Otherwise
+	* lower level code that attempts to do a get_ctrl() will end up
+	* deadlocking.
+	*/
+	v4l2_ctrl_unlock(ctrl);
+
+	switch (ctrl->id) {
+	case V4L2_CID_MPEG_VIDC_VIDEO_IDR_PERIOD:
+		if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_H264 &&
+			inst->fmts[CAPTURE_PORT].fourcc !=
+				V4L2_PIX_FMT_H264_NO_SC &&
+			inst->fmts[CAPTURE_PORT].fourcc !=
+				V4L2_PIX_FMT_HEVC) {
+			dprintk(VIDC_ERR,
+				"Control %#x only valid for H264 and HEVC\n",
+				ctrl->id);
+			rc = -ENOTSUPP;
+			break;
+		}
+
+		property_id = HAL_CONFIG_VENC_IDR_PERIOD;
+		idr_period.idr_period = ctrl->val;
+		pdata = &idr_period;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES:
+	case V4L2_CID_MPEG_VIDC_VIDEO_NUM_P_FRAMES:
+	{
+		int num_p, num_b;
+		u32 max_num_b_frames;
+
+		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES);
+		num_b = temp_ctrl->val;
+
+		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_NUM_P_FRAMES);
+		num_p = temp_ctrl->val;
+
+		if (ctrl->id == V4L2_CID_MPEG_VIDC_VIDEO_NUM_P_FRAMES)
+			num_p = ctrl->val;
+		else if (ctrl->id == V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES)
+			num_b = ctrl->val;
+
+		max_num_b_frames = num_b ? MAX_NUM_B_FRAMES : 0;
+		property_id = HAL_PARAM_VENC_MAX_NUM_B_FRAMES;
+		pdata = &max_num_b_frames;
+		rc = call_hfi_op(hdev, session_set_property,
+			(void *)inst->session, property_id, pdata);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"Failed : Setprop MAX_NUM_B_FRAMES %d\n",
+				rc);
+			break;
+		}
+
+		property_id = HAL_CONFIG_VENC_INTRA_PERIOD;
+		intra_period.pframes = num_p;
+		intra_period.bframes = num_b;
+
+		/*
+		 *Incase firmware does not have B-Frame support,
+		 *offload the b-frame count to p-frame to make up
+		 *for the requested Intraperiod
+		 */
+		if (!inst->capability.bframe.max) {
+			intra_period.pframes = num_p + num_b;
+			intra_period.bframes = 0;
+			dprintk(VIDC_DBG,
+				"No bframe support, changing pframe from %d to %d\n",
+				num_p, intra_period.pframes);
+		}
+		pdata = &intra_period;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDC_VIDEO_REQUEST_IFRAME:
+		property_id = HAL_CONFIG_VENC_REQUEST_IFRAME;
+		request_iframe.enable = true;
+		pdata = &request_iframe;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL:
+	case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+	{
+		int final_mode = 0;
+		struct v4l2_ctrl update_ctrl = {.id = 0};
+
+		/* V4L2_CID_MPEG_VIDEO_BITRATE_MODE and _RATE_CONTROL
+		 * manipulate the same thing.  If one control's state
+		 * changes, try to mirror the state in the other control's
+		 * value */
+		if (ctrl->id == V4L2_CID_MPEG_VIDEO_BITRATE_MODE) {
+			if (ctrl->val == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) {
+				final_mode = HAL_RATE_CONTROL_VBR_CFR;
+				update_ctrl.val =
+				V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_VBR_CFR;
+			} else {/* ...if (ctrl->val == _BITRATE_MODE_CBR) */
+				final_mode = HAL_RATE_CONTROL_CBR_CFR;
+				update_ctrl.val =
+				V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_CFR;
+			}
+
+			update_ctrl.id = V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL;
+
+		} else if (ctrl->id == V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL) {
+			switch (ctrl->val) {
+			case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_OFF:
+			case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_VBR_VFR:
+			case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_VBR_CFR:
+				update_ctrl.val =
+					V4L2_MPEG_VIDEO_BITRATE_MODE_VBR;
+				break;
+			case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_VFR:
+			case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_CFR:
+			case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_MBR_CFR:
+			case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_MBR_VFR:
+				update_ctrl.val =
+					V4L2_MPEG_VIDEO_BITRATE_MODE_CBR;
+				break;
+			}
+
+			final_mode = ctrl->val;
+			update_ctrl.id = V4L2_CID_MPEG_VIDEO_BITRATE_MODE;
+		}
+
+		if (update_ctrl.id) {
+			temp_ctrl = TRY_GET_CTRL(update_ctrl.id);
+			temp_ctrl->val = update_ctrl.val;
+		}
+
+		property_id = HAL_PARAM_VENC_RATE_CONTROL;
+		property_val = final_mode;
+		pdata = &property_val;
+
+		break;
+	}
+	case V4L2_CID_MPEG_VIDEO_BITRATE:
+	{
+		property_id = HAL_CONFIG_VENC_TARGET_BITRATE;
+		bitrate.bit_rate = ctrl->val;
+		bitrate.layer_id = 0;
+		pdata = &bitrate;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
+	{
+		struct v4l2_ctrl *avg_bitrate = TRY_GET_CTRL(
+			V4L2_CID_MPEG_VIDEO_BITRATE);
+
+		if (ctrl->val < avg_bitrate->val) {
+			dprintk(VIDC_ERR,
+				"Peak bitrate (%d) is lower than average bitrate (%d)\n",
+				ctrl->val, avg_bitrate->val);
+			rc = -EINVAL;
+			break;
+		} else if (ctrl->val < avg_bitrate->val * 2) {
+			dprintk(VIDC_WARN,
+				"Peak bitrate (%d) ideally should be twice the average bitrate (%d)\n",
+				ctrl->val, avg_bitrate->val);
+		}
+
+		property_id = HAL_CONFIG_VENC_MAX_BITRATE;
+		bitrate.bit_rate = ctrl->val;
+		bitrate.layer_id = 0;
+		pdata = &bitrate;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+		temp_ctrl = TRY_GET_CTRL(
+			V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL);
+
+		property_id = HAL_PARAM_VENC_H264_ENTROPY_CONTROL;
+		h264_entropy_control.entropy_mode = venc_v4l2_to_hal(
+			V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE, ctrl->val);
+		h264_entropy_control.cabac_model = venc_v4l2_to_hal(
+			V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL,
+			temp_ctrl->val);
+		pdata = &h264_entropy_control;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL:
+		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE);
+
+		property_id = HAL_PARAM_VENC_H264_ENTROPY_CONTROL;
+		h264_entropy_control.cabac_model = venc_v4l2_to_hal(
+			V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE, ctrl->val);
+		h264_entropy_control.entropy_mode = venc_v4l2_to_hal(
+			V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL,
+			temp_ctrl->val);
+		pdata = &h264_entropy_control;
+		break;
+	case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL);
+
+		property_id = HAL_PARAM_PROFILE_LEVEL_CURRENT;
+		profile_level.profile = venc_v4l2_to_hal(ctrl->id,
+						ctrl->val);
+		profile_level.level = venc_v4l2_to_hal(
+				V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
+				temp_ctrl->val);
+		pdata = &profile_level;
+		break;
+	case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE);
+
+		property_id = HAL_PARAM_PROFILE_LEVEL_CURRENT;
+		profile_level.level = venc_v4l2_to_hal(ctrl->id,
+							ctrl->val);
+		profile_level.profile = venc_v4l2_to_hal(
+				V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE,
+				temp_ctrl->val);
+		pdata = &profile_level;
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H264_LEVEL);
+
+		property_id = HAL_PARAM_PROFILE_LEVEL_CURRENT;
+		profile_level.profile = venc_v4l2_to_hal(ctrl->id,
+							ctrl->val);
+		profile_level.level = venc_v4l2_to_hal(
+				V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+				temp_ctrl->val);
+		pdata = &profile_level;
+		dprintk(VIDC_DBG, "\nprofile: %d\n",
+			   profile_level.profile);
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H264_PROFILE);
+
+		property_id = HAL_PARAM_PROFILE_LEVEL_CURRENT;
+		profile_level.level = venc_v4l2_to_hal(ctrl->id,
+							ctrl->val);
+		profile_level.profile = venc_v4l2_to_hal(
+				V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+				temp_ctrl->val);
+		pdata = &profile_level;
+		dprintk(VIDC_DBG, "\nLevel: %d\n",
+			   profile_level.level);
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_H263_PROFILE:
+		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_H263_LEVEL);
+
+		property_id = HAL_PARAM_PROFILE_LEVEL_CURRENT;
+		profile_level.profile = venc_v4l2_to_hal(ctrl->id,
+							ctrl->val);
+		profile_level.level = venc_v4l2_to_hal(
+				V4L2_CID_MPEG_VIDC_VIDEO_H263_LEVEL,
+				temp_ctrl->val);
+		pdata = &profile_level;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_H263_LEVEL:
+		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_H263_PROFILE);
+
+		property_id = HAL_PARAM_PROFILE_LEVEL_CURRENT;
+		profile_level.level = venc_v4l2_to_hal(ctrl->id,
+							ctrl->val);
+		profile_level.profile = venc_v4l2_to_hal(
+				V4L2_CID_MPEG_VIDC_VIDEO_H263_PROFILE,
+				ctrl->val);
+		pdata = &profile_level;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL:
+		property_id = HAL_PARAM_PROFILE_LEVEL_CURRENT;
+		profile_level.profile = venc_v4l2_to_hal(
+				V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL,
+				ctrl->val);
+		profile_level.level = HAL_VPX_PROFILE_UNUSED;
+		pdata = &profile_level;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_HEVC_PROFILE:
+		temp_ctrl =
+			TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_HEVC_TIER_LEVEL);
+
+		property_id = HAL_PARAM_PROFILE_LEVEL_CURRENT;
+		profile_level.profile = venc_v4l2_to_hal(ctrl->id,
+							ctrl->val);
+		profile_level.level = venc_v4l2_to_hal(
+				V4L2_CID_MPEG_VIDC_VIDEO_HEVC_TIER_LEVEL,
+				temp_ctrl->val);
+		pdata = &profile_level;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_HEVC_TIER_LEVEL:
+		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_HEVC_PROFILE);
+
+		property_id = HAL_PARAM_PROFILE_LEVEL_CURRENT;
+		profile_level.level = venc_v4l2_to_hal(ctrl->id,
+							ctrl->val);
+		profile_level.profile = venc_v4l2_to_hal(
+				V4L2_CID_MPEG_VIDC_VIDEO_HEVC_PROFILE,
+				temp_ctrl->val);
+		pdata = &profile_level;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_ROTATION:
+	{
+		struct v4l2_ctrl *deinterlace = NULL;
+		if (!(inst->capability.pixelprocess_capabilities &
+			HAL_VIDEO_ENCODER_ROTATION_CAPABILITY)) {
+			dprintk(VIDC_ERR, "Rotation not supported: %#x\n",
+				ctrl->id);
+			rc = -ENOTSUPP;
+			break;
+		}
+		deinterlace =
+			TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_DEINTERLACE);
+		if (ctrl->val && deinterlace && deinterlace->val !=
+				V4L2_CID_MPEG_VIDC_VIDEO_DEINTERLACE_DISABLED) {
+			dprintk(VIDC_ERR,
+				"Rotation not supported with deinterlacing\n");
+			rc = -EINVAL;
+			break;
+		}
+		property_id = HAL_CONFIG_VPE_OPERATIONS;
+		operations.rotate = venc_v4l2_to_hal(
+				V4L2_CID_MPEG_VIDC_VIDEO_ROTATION,
+				ctrl->val);
+		operations.flip = HAL_FLIP_NONE;
+		pdata = &operations;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: {
+		struct v4l2_ctrl *qpp, *qpb;
+
+		qpp = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP);
+		qpb = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP);
+
+		property_id = HAL_PARAM_VENC_SESSION_QP;
+		quantization.qpi = ctrl->val;
+		quantization.qpp = qpp->val;
+		quantization.qpb = qpb->val;
+		quantization.layer_id = 0;
+
+		pdata = &quantization;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: {
+		struct v4l2_ctrl *qpi, *qpb;
+
+		qpi = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP);
+		qpb = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP);
+
+		property_id = HAL_PARAM_VENC_SESSION_QP;
+		quantization.qpp = ctrl->val;
+		quantization.qpi = qpi->val;
+		quantization.qpb = qpb->val;
+		quantization.layer_id = 0;
+
+		pdata = &quantization;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: {
+		struct v4l2_ctrl *qpi, *qpp;
+
+		qpi = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP);
+		qpp = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP);
+
+		property_id = HAL_PARAM_VENC_SESSION_QP;
+		quantization.qpb = ctrl->val;
+		quantization.qpi = qpi->val;
+		quantization.qpp = qpp->val;
+		quantization.layer_id = 0;
+
+		pdata = &quantization;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP: {
+		struct v4l2_ctrl *qpp, *qpb;
+
+		qpp = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP);
+		qpb = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP);
+
+		property_id = HAL_PARAM_VENC_SESSION_QP;
+		quantization.qpi = ctrl->val;
+		quantization.qpp = qpp->val;
+		quantization.qpb = qpb->val;
+		quantization.layer_id = 0;
+
+		pdata = &quantization;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: {
+		struct v4l2_ctrl *qpi, *qpb;
+
+		qpi = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP);
+		qpb = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP);
+
+		property_id = HAL_PARAM_VENC_SESSION_QP;
+		quantization.qpp = ctrl->val;
+		quantization.qpi = qpi->val;
+		quantization.qpb = qpb->val;
+		quantization.layer_id = 0;
+
+		pdata = &quantization;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: {
+		struct v4l2_ctrl *qpi, *qpp;
+
+		qpi = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP);
+		qpp = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP);
+
+		property_id = HAL_PARAM_VENC_SESSION_QP;
+		quantization.qpb = ctrl->val;
+		quantization.qpi = qpi->val;
+		quantization.qpp = qpp->val;
+		quantization.layer_id = 0;
+
+		pdata = &quantization;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP: {
+		struct v4l2_ctrl *qpp;
+
+		qpp = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP);
+
+		property_id = HAL_PARAM_VENC_SESSION_QP;
+		quantization.qpi = ctrl->val;
+		quantization.qpp = qpp->val;
+		/* Bframes are not supported for VPX */
+		quantization.qpb = 0;
+		quantization.layer_id = 0;
+
+		pdata = &quantization;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP: {
+		struct v4l2_ctrl *qpi;
+
+		qpi = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP);
+
+		property_id = HAL_PARAM_VENC_SESSION_QP;
+		quantization.qpp = ctrl->val;
+		quantization.qpi = qpi->val;
+		/* Bframes are not supported for VPX */
+		quantization.qpb = 0;
+		quantization.layer_id = 0;
+
+		pdata = &quantization;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDEO_H264_MIN_QP: {
+		struct v4l2_ctrl *qp_max;
+
+		qp_max = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H264_MAX_QP);
+		if (ctrl->val >= qp_max->val) {
+			dprintk(VIDC_ERR,
+					"Bad range: Min QP (%d) > Max QP(%d)\n",
+					ctrl->val, qp_max->val);
+			rc = -ERANGE;
+			break;
+		}
+
+		property_id = HAL_PARAM_VENC_SESSION_QP_RANGE;
+		qp_range.layer_id = 0;
+		qp_range.max_qp = qp_max->val;
+		qp_range.min_qp = ctrl->val;
+
+		pdata = &qp_range;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: {
+		struct v4l2_ctrl *qp_min;
+
+		qp_min = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H264_MIN_QP);
+		if (ctrl->val <= qp_min->val) {
+			dprintk(VIDC_ERR,
+					"Bad range: Max QP (%d) < Min QP(%d)\n",
+					ctrl->val, qp_min->val);
+			rc = -ERANGE;
+			break;
+		}
+
+		property_id = HAL_PARAM_VENC_SESSION_QP_RANGE;
+		qp_range.layer_id = 0;
+		qp_range.max_qp = ctrl->val;
+		qp_range.min_qp = qp_min->val;
+
+		pdata = &qp_range;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP: {
+		struct v4l2_ctrl *qp_max;
+
+		qp_max = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP);
+		if (ctrl->val >= qp_max->val) {
+			dprintk(VIDC_ERR,
+					"Bad range: Min QP (%d) > Max QP(%d)\n",
+					ctrl->val, qp_max->val);
+			rc = -ERANGE;
+			break;
+		}
+
+		property_id = HAL_PARAM_VENC_SESSION_QP_RANGE;
+		qp_range.layer_id = 0;
+		qp_range.max_qp = qp_max->val;
+		qp_range.min_qp = ctrl->val;
+
+		pdata = &qp_range;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP: {
+		struct v4l2_ctrl *qp_min;
+
+		qp_min = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP);
+		if (ctrl->val <= qp_min->val) {
+			dprintk(VIDC_ERR,
+					"Bad range: Max QP (%d) < Min QP(%d)\n",
+					ctrl->val, qp_min->val);
+			rc = -ERANGE;
+			break;
+		}
+
+		property_id = HAL_PARAM_VENC_SESSION_QP_RANGE;
+		qp_range.layer_id = 0;
+		qp_range.max_qp = ctrl->val;
+		qp_range.min_qp = qp_min->val;
+
+		pdata = &qp_range;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDEO_VPX_MIN_QP: {
+		struct v4l2_ctrl *qp_max;
+
+		qp_max = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_VPX_MAX_QP);
+		if (ctrl->val >= qp_max->val) {
+			dprintk(VIDC_ERR,
+					"Bad range: Min QP (%d) > Max QP(%d)\n",
+					ctrl->val, qp_max->val);
+			rc = -ERANGE;
+			break;
+		}
+		property_id = HAL_PARAM_VENC_SESSION_QP_RANGE;
+		qp_range.layer_id = 0;
+		qp_range.max_qp = qp_max->val;
+		qp_range.min_qp = ctrl->val;
+		pdata = &qp_range;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP: {
+		struct v4l2_ctrl *qp_min;
+
+		qp_min = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_VPX_MIN_QP);
+		if (ctrl->val <= qp_min->val) {
+			dprintk(VIDC_ERR,
+					"Bad range: Max QP (%d) < Min QP(%d)\n",
+					ctrl->val, qp_min->val);
+			rc = -ERANGE;
+			break;
+		}
+		property_id = HAL_PARAM_VENC_SESSION_QP_RANGE;
+		qp_range.layer_id = 0;
+		qp_range.max_qp = ctrl->val;
+		qp_range.min_qp = qp_min->val;
+		pdata = &qp_range;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDC_VIDEO_VP8_MIN_QP: {
+		struct v4l2_ctrl *qp_max;
+
+		qp_max = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_VP8_MAX_QP);
+		property_id = HAL_PARAM_VENC_SESSION_QP_RANGE;
+		qp_range.layer_id = 0;
+		qp_range.max_qp = qp_max->val;
+		qp_range.min_qp = ctrl->val;
+		pdata = &qp_range;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDC_VIDEO_VP8_MAX_QP: {
+		struct v4l2_ctrl *qp_min;
+
+		qp_min = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_VP8_MIN_QP);
+		property_id = HAL_PARAM_VENC_SESSION_QP_RANGE;
+		qp_range.layer_id = 0;
+		qp_range.max_qp = ctrl->val;
+		qp_range.min_qp = qp_min->val;
+		pdata = &qp_range;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDEO_MIN_QP_PACKED: {
+		struct v4l2_ctrl *qp_max;
+
+		qp_max = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_MAX_QP_PACKED);
+		if (ctrl->val >= qp_max->val) {
+			dprintk(VIDC_ERR,
+					"Bad range: Min QP PACKED (0x%x) > Max QP PACKED (0x%x)\n",
+					ctrl->val, qp_max->val);
+			rc = -ERANGE;
+			break;
+		}
+
+		property_id = HAL_PARAM_VENC_SESSION_QP_RANGE_PACKED;
+		qp_range.layer_id = 0;
+		qp_range.max_qp = qp_max->val;
+		qp_range.min_qp = ctrl->val;
+
+		pdata = &qp_range;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDEO_MAX_QP_PACKED: {
+		struct v4l2_ctrl *qp_min;
+
+		qp_min = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_MIN_QP_PACKED);
+		if (ctrl->val <= qp_min->val) {
+			dprintk(VIDC_ERR,
+					"Bad range: Max QP PACKED (%d) < Min QP PACKED (%d)\n",
+					ctrl->val, qp_min->val);
+			rc = -ERANGE;
+			break;
+		}
+
+		property_id = HAL_PARAM_VENC_SESSION_QP_RANGE_PACKED;
+		qp_range.layer_id = 0;
+		qp_range.max_qp = ctrl->val;
+		qp_range.min_qp = qp_min->val;
+
+		pdata = &qp_range;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: {
+		int temp = 0;
+
+		switch (ctrl->val) {
+		case V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB:
+			temp = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB;
+			break;
+		case V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES:
+			temp = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES;
+			break;
+		case V4L2_MPEG_VIDEO_MULTI_SLICE_GOB:
+			temp = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_GOB;
+			break;
+		case V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE:
+		default:
+			temp = 0;
+			break;
+		}
+
+		if (temp)
+			temp_ctrl = TRY_GET_CTRL(temp);
+
+		property_id = HAL_PARAM_VENC_MULTI_SLICE_CONTROL;
+		multi_slice_control.multi_slice = ctrl->val;
+		multi_slice_control.slice_size = temp ? temp_ctrl->val : 0;
+
+		pdata = &multi_slice_control;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES:
+	case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:
+	case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_GOB:
+		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE);
+
+		property_id = HAL_PARAM_VENC_MULTI_SLICE_CONTROL;
+		multi_slice_control.multi_slice = temp_ctrl->val;
+		multi_slice_control.slice_size = ctrl->val;
+		pdata = &multi_slice_control;
+		break;
+	case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_DELIVERY_MODE: {
+		bool codec_avc =
+			inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264 ||
+			inst->fmts[CAPTURE_PORT].fourcc ==
+							V4L2_PIX_FMT_H264_NO_SC;
+
+		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE);
+		if (codec_avc && temp_ctrl->val ==
+				V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB) {
+			property_id = HAL_PARAM_VENC_SLICE_DELIVERY_MODE;
+			enable.enable = true;
+		} else {
+			dprintk(VIDC_WARN,
+				"Failed : slice delivery mode is valid "\
+				"only for H264 encoder and MB based slicing\n");
+			enable.enable = false;
+		}
+		pdata = &enable;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_MODE: {
+		struct v4l2_ctrl *air_mbs, *air_ref, *cir_mbs;
+		bool is_cont_intra_supported = false;
+
+		air_mbs = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_AIR_MBS);
+		air_ref = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_AIR_REF);
+		cir_mbs = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_CIR_MBS);
+
+		is_cont_intra_supported =
+		(inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264) ||
+		(inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_HEVC);
+
+		if (is_cont_intra_supported) {
+			if (ctrl->val != HAL_INTRA_REFRESH_NONE)
+				enable.enable = true;
+			else
+				enable.enable = false;
+
+			rc = call_hfi_op(hdev, session_set_property,
+				(void *)inst->session,
+				HAL_PARAM_VENC_CONSTRAINED_INTRA_PRED, &enable);
+			if (rc) {
+				dprintk(VIDC_ERR,
+					"Failed to set constrained intra\n");
+				rc = -EINVAL;
+				break;
+			}
+		}
+
+		property_id = HAL_PARAM_VENC_INTRA_REFRESH;
+
+		intra_refresh.mode = ctrl->val;
+		intra_refresh.air_mbs = air_mbs->val;
+		intra_refresh.air_ref = air_ref->val;
+		intra_refresh.cir_mbs = cir_mbs->val;
+
+		pdata = &intra_refresh;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDC_VIDEO_AIR_MBS: {
+		struct v4l2_ctrl *ir_mode, *air_ref, *cir_mbs;
+		ir_mode = TRY_GET_CTRL(
+				V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_MODE);
+		air_ref = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_AIR_REF);
+		cir_mbs = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_CIR_MBS);
+
+		property_id = HAL_PARAM_VENC_INTRA_REFRESH;
+
+		intra_refresh.air_mbs = ctrl->val;
+		intra_refresh.mode = ir_mode->val;
+		intra_refresh.air_ref = air_ref->val;
+		intra_refresh.cir_mbs = cir_mbs->val;
+
+		pdata = &intra_refresh;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDC_VIDEO_AIR_REF: {
+		struct v4l2_ctrl *ir_mode, *air_mbs, *cir_mbs;
+		ir_mode = TRY_GET_CTRL(
+				V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_MODE);
+		air_mbs = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_AIR_MBS);
+		cir_mbs = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_CIR_MBS);
+
+		property_id = HAL_PARAM_VENC_INTRA_REFRESH;
+
+		intra_refresh.air_ref = ctrl->val;
+		intra_refresh.air_mbs = air_mbs->val;
+		intra_refresh.mode = ir_mode->val;
+		intra_refresh.cir_mbs = cir_mbs->val;
+
+		pdata = &intra_refresh;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDC_VIDEO_CIR_MBS: {
+		struct v4l2_ctrl *ir_mode, *air_mbs, *air_ref;
+
+		ir_mode = TRY_GET_CTRL(
+				V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_MODE);
+		air_mbs = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_AIR_MBS);
+		air_ref = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_AIR_REF);
+
+		property_id = HAL_PARAM_VENC_INTRA_REFRESH;
+
+		intra_refresh.cir_mbs = ctrl->val;
+		intra_refresh.air_mbs = air_mbs->val;
+		intra_refresh.air_ref = air_ref->val;
+		intra_refresh.mode = ir_mode->val;
+
+		pdata = &intra_refresh;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: {
+		struct v4l2_ctrl *air_mbs, *air_ref;
+
+		air_mbs = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_AIR_MBS);
+		air_ref = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_AIR_REF);
+
+		property_id = HAL_PARAM_VENC_INTRA_REFRESH;
+
+		intra_refresh.cir_mbs = ctrl->val;
+		intra_refresh.air_mbs = air_mbs->val;
+		intra_refresh.air_ref = air_ref->val;
+		intra_refresh.mode = HAL_INTRA_REFRESH_CYCLIC;
+
+		pdata = &intra_refresh;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
+	{
+		struct v4l2_ctrl *alpha, *beta;
+
+		alpha = TRY_GET_CTRL(
+				V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA);
+		beta = TRY_GET_CTRL(
+				V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA);
+
+		property_id = HAL_PARAM_VENC_H264_DEBLOCK_CONTROL;
+		h264_db_control.slice_alpha_offset = alpha->val;
+		h264_db_control.slice_beta_offset = beta->val;
+		h264_db_control.mode = venc_v4l2_to_hal(
+				V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
+				ctrl->val);
+		pdata = &h264_db_control;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA:
+	{
+		struct v4l2_ctrl *mode, *beta;
+
+		mode = TRY_GET_CTRL(
+				V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE);
+		beta = TRY_GET_CTRL(
+				V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA);
+
+		property_id = HAL_PARAM_VENC_H264_DEBLOCK_CONTROL;
+		h264_db_control.slice_alpha_offset = ctrl->val;
+		h264_db_control.slice_beta_offset = beta->val;
+		h264_db_control.mode = venc_v4l2_to_hal(
+				V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
+				mode->val);
+		pdata = &h264_db_control;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA:
+	{
+		struct v4l2_ctrl *mode, *alpha;
+
+		mode = TRY_GET_CTRL(
+				V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE);
+		alpha = TRY_GET_CTRL(
+				V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA);
+		property_id = HAL_PARAM_VENC_H264_DEBLOCK_CONTROL;
+		h264_db_control.slice_alpha_offset = alpha->val;
+		h264_db_control.slice_beta_offset = ctrl->val;
+		h264_db_control.mode = venc_v4l2_to_hal(
+				V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
+				mode->val);
+		pdata = &h264_db_control;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
+		property_id = HAL_PARAM_VENC_SYNC_FRAME_SEQUENCE_HEADER;
+
+		switch (ctrl->val) {
+		case V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE:
+			enable.enable = 0;
+			break;
+		case V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_I_FRAME:
+			enable.enable = 1;
+			break;
+		case V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME:
+		default:
+			rc = -ENOTSUPP;
+			break;
+		}
+		pdata = &enable;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_SECURE:
+		inst->flags |= VIDC_SECURE;
+		dprintk(VIDC_INFO, "Setting secure mode to: %d\n",
+				!!(inst->flags & VIDC_SECURE));
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA:
+		msm_venc_register_extradata(inst, (u32)ctrl->val);
+		property_id = HAL_PARAM_INDEX_EXTRADATA;
+		extra.index = msm_comm_get_hal_extradata_index(ctrl->val);
+		extra.enable = 1;
+		pdata = &extra;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_H264_VUI_TIMING_INFO:
+	{
+		struct v4l2_ctrl *rc_mode;
+		bool cfr = false;
+
+		property_id = HAL_PARAM_VENC_H264_VUI_TIMING_INFO;
+		rc_mode = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL);
+
+		switch (rc_mode->val) {
+		case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_VBR_CFR:
+		case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_CFR:
+			cfr = true;
+			break;
+		default:
+			cfr = false;
+			break;
+		}
+
+		switch (ctrl->val) {
+		case V4L2_MPEG_VIDC_VIDEO_H264_VUI_TIMING_INFO_DISABLED:
+			vui_timing_info.enable = 0;
+			break;
+		case V4L2_MPEG_VIDC_VIDEO_H264_VUI_TIMING_INFO_ENABLED:
+			vui_timing_info.enable = 1;
+			vui_timing_info.fixed_frame_rate = cfr;
+			vui_timing_info.time_scale = NSEC_PER_SEC;
+		}
+
+		pdata = &vui_timing_info;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDC_VIDEO_AU_DELIMITER:
+		property_id = HAL_PARAM_VENC_GENERATE_AUDNAL;
+
+		switch (ctrl->val) {
+		case V4L2_MPEG_VIDC_VIDEO_AU_DELIMITER_DISABLED:
+			enable.enable = 0;
+			break;
+		case V4L2_MPEG_VIDC_VIDEO_AU_DELIMITER_ENABLED:
+			enable.enable = 1;
+			break;
+		default:
+			rc = -ENOTSUPP;
+			break;
+		}
+
+		pdata = &enable;
+		break;
+	case V4L2_CID_MPEG_VIDC_SET_PERF_LEVEL:
+		switch (ctrl->val) {
+		case V4L2_CID_MPEG_VIDC_PERF_LEVEL_NOMINAL:
+			if (inst->flags & VIDC_TURBO) {
+				inst->flags &= ~VIDC_TURBO;
+				msm_dcvs_init_load(inst);
+			}
+			break;
+		case V4L2_CID_MPEG_VIDC_PERF_LEVEL_TURBO:
+			inst->flags |= VIDC_TURBO;
+			break;
+		default:
+			dprintk(VIDC_ERR, "Perf mode %x not supported\n",
+					ctrl->val);
+			rc = -ENOTSUPP;
+			break;
+		}
+
+		msm_dcvs_try_enable(inst);
+		msm_comm_scale_clocks_and_bus(inst);
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_H264_VUI_BITSTREAM_RESTRICT:
+		property_id = HAL_PARAM_VENC_H264_VUI_BITSTREAM_RESTRC;
+		vui_bitstream_restrict.enable = ctrl->val;
+		pdata = &vui_bitstream_restrict;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_PRESERVE_TEXT_QUALITY:
+		property_id = HAL_PARAM_VENC_PRESERVE_TEXT_QUALITY;
+		preserve_text_quality.enable = ctrl->val;
+		pdata = &preserve_text_quality;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_MPEG4_TIME_RESOLUTION:
+		property_id = HAL_PARAM_VENC_MPEG4_TIME_RESOLUTION;
+		time_res.time_increment_resolution = ctrl->val;
+		pdata = &time_res;
+		break;
+
+	case V4L2_CID_MPEG_VIDC_VIDEO_DEINTERLACE:
+	{
+		struct v4l2_ctrl *rotation = NULL;
+		if (!(inst->capability.pixelprocess_capabilities &
+			HAL_VIDEO_ENCODER_DEINTERLACE_CAPABILITY)) {
+			dprintk(VIDC_ERR, "Deinterlace not supported: %#x\n",
+					ctrl->id);
+			rc = -ENOTSUPP;
+			break;
+		}
+		rotation = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_ROTATION);
+		if (ctrl->val && rotation && rotation->val !=
+			V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_NONE) {
+			dprintk(VIDC_ERR,
+				"Deinterlacing not supported with rotation");
+			rc = -EINVAL;
+			break;
+		}
+		property_id = HAL_CONFIG_VPE_DEINTERLACE;
+		switch (ctrl->val) {
+		case V4L2_CID_MPEG_VIDC_VIDEO_DEINTERLACE_ENABLED:
+			enable.enable = 1;
+			break;
+		case V4L2_CID_MPEG_VIDC_VIDEO_DEINTERLACE_DISABLED:
+		default:
+			enable.enable = 0;
+			break;
+		}
+		pdata = &enable;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDC_VIDEO_REQUEST_SEQ_HEADER:
+		atomic_inc(&inst->seq_hdr_reqs);
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_USELTRFRAME:
+		property_id = HAL_CONFIG_VENC_USELTRFRAME;
+		use_ltr.ref_ltr = ctrl->val;
+		use_ltr.use_constraint = false;
+		use_ltr.frames = 0;
+		pdata = &use_ltr;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_MARKLTRFRAME:
+		property_id = HAL_CONFIG_VENC_MARKLTRFRAME;
+		mark_ltr.mark_frame = ctrl->val;
+		pdata = &mark_ltr;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_HIER_P_NUM_LAYERS:
+		property_id = HAL_CONFIG_VENC_HIER_P_NUM_FRAMES;
+		hier_p_layers = ctrl->val;
+		if (hier_p_layers > inst->capability.hier_p.max) {
+			dprintk(VIDC_ERR,
+				"Error setting hier p num layers %d max supported is %d\n",
+				hier_p_layers, inst->capability.hier_p.max);
+			rc = -ENOTSUPP;
+			break;
+		}
+		pdata = &hier_p_layers;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_TIMESTAMP_MODE:
+		property_id = HAL_PARAM_VENC_DISABLE_RC_TIMESTAMP;
+		enable.enable = (ctrl->val ==
+		V4L2_MPEG_VIDC_VIDEO_RATE_CONTROL_TIMESTAMP_MODE_IGNORE);
+		pdata = &enable;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_VPX_ERROR_RESILIENCE:
+		property_id = HAL_PARAM_VENC_VPX_ERROR_RESILIENCE_MODE;
+		enable.enable = ctrl->val;
+		pdata = &enable;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_H264_NAL_SVC:
+		property_id = HAL_PARAM_VENC_H264_NAL_SVC_EXT;
+		enable.enable = ctrl->val;
+		pdata = &enable;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_PERF_MODE:
+		property_id = HAL_CONFIG_VENC_PERF_MODE;
+
+		switch (ctrl->val) {
+		case V4L2_MPEG_VIDC_VIDEO_PERF_POWER_SAVE:
+			inst->flags |= VIDC_LOW_POWER;
+			venc_mode = HAL_PERF_MODE_POWER_SAVE;
+			break;
+		case V4L2_MPEG_VIDC_VIDEO_PERF_MAX_QUALITY:
+			inst->flags &= ~VIDC_LOW_POWER;
+			venc_mode = HAL_PERF_MODE_POWER_MAX_QUALITY;
+			break;
+		default:
+			dprintk(VIDC_ERR, "Power save mode %x not supported\n",
+					ctrl->val);
+			rc = -ENOTSUPP;
+			property_id = 0;
+			break;
+		}
+		pdata = &venc_mode;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_HIER_B_NUM_LAYERS:
+		if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_HEVC) {
+			dprintk(VIDC_ERR, "Hier B supported for HEVC only\n");
+			rc = -ENOTSUPP;
+			break;
+		}
+		property_id = HAL_PARAM_VENC_HIER_B_MAX_ENH_LAYERS;
+		hier_b_layers = ctrl->val;
+		pdata = &hier_b_layers;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_HYBRID_HIERP_MODE:
+		property_id = HAL_PARAM_VENC_HIER_P_HYBRID_MODE;
+		hyb_hierp.layers = ctrl->val;
+		pdata = &hyb_hierp;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_MBI_STATISTICS_MODE:
+		property_id = HAL_PARAM_VENC_MBI_STATISTICS_MODE;
+		mbi_statistics_mode = venc_v4l2_to_hal(
+			V4L2_CID_MPEG_VIDC_VIDEO_MBI_STATISTICS_MODE,
+			ctrl->val);
+		pdata = &mbi_statistics_mode;
+		break;
+	case V4L2_CID_VIDC_QBUF_MODE:
+		property_id = HAL_PARAM_SYNC_BASED_INTERRUPT;
+		enable.enable = ctrl->val == V4L2_VIDC_QBUF_BATCHED;
+		pdata = &enable;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_MAX_HIERP_LAYERS:
+		property_id = HAL_PARAM_VENC_HIER_P_MAX_ENH_LAYERS;
+		max_hierp_layers = ctrl->val;
+		if (max_hierp_layers > inst->capability.hier_p.max) {
+			dprintk(VIDC_ERR,
+				"Error max HP layers(%d)>max supported(%d)\n",
+				max_hierp_layers, inst->capability.hier_p.max);
+			rc = -ENOTSUPP;
+			break;
+		}
+		pdata = &max_hierp_layers;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_BASELAYER_ID:
+		property_id = HAL_CONFIG_VENC_BASELAYER_PRIORITYID;
+		baselayerid = ctrl->val;
+		pdata = &baselayerid;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_CONFIG_QP:
+		/* Sanity check for the QP boundaries as we are using
+		 * same control to set dynamic QP for all the codecs
+		 */
+		rc = msm_venc_validate_qp_value(inst, ctrl);
+		if (rc) {
+			dprintk(VIDC_ERR, "Invalid QP Config QP Range\n");
+			break;
+		}
+		property_id = HAL_CONFIG_VENC_FRAME_QP;
+		frameqp = ctrl->val;
+		pdata = &frameqp;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_INITIAL_I_FRAME_QP:
+	{
+		rc = msm_venc_validate_qp_value(inst, ctrl);
+		if (rc) {
+			dprintk(VIDC_ERR, "Invalid Initial I QP\n");
+			break;
+		}
+		/*
+		 * Defer sending property from here, set_ext_ctrl
+		 * will send it based on the rc value.
+		 */
+		property_id = 0;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDC_VIDEO_INITIAL_B_FRAME_QP:
+	{
+		rc = msm_venc_validate_qp_value(inst, ctrl);
+		if (rc) {
+			dprintk(VIDC_ERR, "Invalid Initial B QP\n");
+			break;
+		}
+		/*
+		 * Defer sending property from here, set_ext_ctrl
+		 * will send it based on the rc value.
+		 */
+		property_id = 0;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDC_VIDEO_INITIAL_P_FRAME_QP:
+	{
+		rc = msm_venc_validate_qp_value(inst, ctrl);
+		if (rc) {
+			dprintk(VIDC_ERR, "Invalid Initial P QP\n");
+			break;
+		}
+		/*
+		 * Defer sending property from here, set_ext_ctrl
+		 * will send it based on the rc value.
+		 */
+		property_id = 0;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDC_VIDEO_VQZIP_SEI:
+		property_id = HAL_PARAM_VENC_VQZIP_SEI;
+		enable.enable = ctrl->val;
+		pdata = &enable;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_PRIORITY:
+		property_id = HAL_CONFIG_REALTIME;
+		/* firmware has inverted values for realtime and
+		 * non-realtime priority
+		 */
+		enable.enable = !(ctrl->val);
+		pdata = &enable;
+		switch (ctrl->val) {
+		case V4L2_MPEG_VIDC_VIDEO_PRIORITY_REALTIME_DISABLE:
+			inst->flags &= ~VIDC_REALTIME;
+			break;
+		case V4L2_MPEG_VIDC_VIDEO_PRIORITY_REALTIME_ENABLE:
+			inst->flags |= VIDC_REALTIME;
+			break;
+		default:
+			dprintk(VIDC_WARN,
+				"inst(%pK) invalid priority ctrl value %#x\n",
+				inst, ctrl->val);
+			break;
+		}
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE:
+		dprintk(VIDC_DBG,
+			"inst(%pK) operating rate changed from %d to %d\n",
+			inst, inst->operating_rate >> 16, ctrl->val >> 16);
+		inst->operating_rate = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_VENC_BITRATE_TYPE:
+	{
+		property_id = HAL_PARAM_VENC_BITRATE_TYPE;
+		enable.enable = ctrl->val;
+		pdata = &enable;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDC_VIDEO_H264_PIC_ORDER_CNT:
+	{
+		property_id = HAL_PARAM_VENC_H264_PIC_ORDER_CNT;
+		pic_order_cnt = ctrl->val;
+		pdata = &pic_order_cnt;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDC_VIDEO_COLOR_SPACE:
+	{
+		signal_info.color_space = ctrl->val;
+		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_FULL_RANGE);
+		signal_info.full_range = temp_ctrl ? temp_ctrl->val : 0;
+		temp_ctrl =
+			TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_TRANSFER_CHARS);
+		signal_info.transfer_chars = temp_ctrl ? temp_ctrl->val : 0;
+		temp_ctrl =
+			TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_MATRIX_COEFFS);
+		signal_info.matrix_coeffs = temp_ctrl ? temp_ctrl->val : 0;
+		property_id = HAL_PARAM_VENC_VIDEO_SIGNAL_INFO;
+		pdata = &signal_info;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDC_VIDEO_FULL_RANGE:
+	{
+		signal_info.full_range = ctrl->val;
+		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_COLOR_SPACE);
+		signal_info.color_space = temp_ctrl ? temp_ctrl->val : 0;
+		temp_ctrl =
+			TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_TRANSFER_CHARS);
+		signal_info.transfer_chars = temp_ctrl ? temp_ctrl->val : 0;
+		temp_ctrl =
+			TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_MATRIX_COEFFS);
+		signal_info.matrix_coeffs = temp_ctrl ? temp_ctrl->val : 0;
+		property_id = HAL_PARAM_VENC_VIDEO_SIGNAL_INFO;
+		pdata = &signal_info;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDC_VIDEO_TRANSFER_CHARS:
+	{
+		signal_info.transfer_chars = ctrl->val;
+		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_FULL_RANGE);
+		signal_info.full_range = temp_ctrl ? temp_ctrl->val : 0;
+		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_COLOR_SPACE);
+		signal_info.color_space = temp_ctrl ? temp_ctrl->val : 0;
+		temp_ctrl =
+			TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_MATRIX_COEFFS);
+		signal_info.matrix_coeffs = temp_ctrl ? temp_ctrl->val : 0;
+		property_id = HAL_PARAM_VENC_VIDEO_SIGNAL_INFO;
+		pdata = &signal_info;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDC_VIDEO_MATRIX_COEFFS:
+	{
+		signal_info.matrix_coeffs = ctrl->val;
+		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_FULL_RANGE);
+		signal_info.full_range = temp_ctrl ? temp_ctrl->val : 0;
+		temp_ctrl =
+			TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_TRANSFER_CHARS);
+		signal_info.transfer_chars = temp_ctrl ? temp_ctrl->val : 0;
+		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_COLOR_SPACE);
+		signal_info.color_space = temp_ctrl ? temp_ctrl->val : 0;
+		property_id = HAL_PARAM_VENC_VIDEO_SIGNAL_INFO;
+		pdata = &signal_info;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDC_VIDEO_VPE_CSC:
+		if (ctrl->val == V4L2_CID_MPEG_VIDC_VIDEO_VPE_CSC_ENABLE) {
+			rc = msm_venc_set_csc(inst);
+			if (rc)
+				dprintk(VIDC_ERR, "fail to set csc: %d\n", rc);
+		}
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_MODE:
+	{
+		property_id = HAL_PARAM_VENC_LOW_LATENCY;
+		if (ctrl->val ==
+			V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_ENABLE)
+			enable.enable = 1;
+		else
+			enable.enable = 0;
+		pdata = &enable;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDC_VIDEO_H264_TRANSFORM_8x8:
+		property_id = HAL_PARAM_VENC_H264_TRANSFORM_8x8;
+		switch (ctrl->val) {
+		case V4L2_MPEG_VIDC_VIDEO_H264_TRANSFORM_8x8_ENABLE:
+			enable.enable = 1;
+			break;
+		case V4L2_MPEG_VIDC_VIDEO_H264_TRANSFORM_8x8_DISABLE:
+			enable.enable = 0;
+			break;
+		default:
+			dprintk(VIDC_ERR,
+				"Invalid H264 8x8 transform control value %d\n",
+				ctrl->val);
+			rc = -ENOTSUPP;
+			break;
+		}
+		pdata = &enable;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_TYPE:
+		property_id = HAL_PARAM_VENC_IFRAMESIZE_TYPE;
+		iframesize_type = venc_v4l2_to_hal(
+				V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_TYPE,
+				ctrl->val);
+		pdata = &iframesize_type;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_SEND_SKIPPED_FRAME:
+		property_id = HAL_PARAM_VENC_SEND_OUTPUT_FOR_SKIPPED_FRAME;
+		switch (ctrl->val) {
+		case V4L2_MPEG_VIDC_VIDEO_SEND_SKIPPED_FRAME_ENABLE:
+			enable.enable = 1;
+			break;
+		case V4L2_MPEG_VIDC_VIDEO_SEND_SKIPPED_FRAME_DISABLE:
+			enable.enable = 0;
+			break;
+		default:
+			dprintk(VIDC_ERR,
+				"Invalid send skipped frames control value %d\n",
+				ctrl->val);
+			rc = -ENOTSUPP;
+			break;
+		}
+		pdata = &enable;
+		break;
+
+	default:
+		dprintk(VIDC_ERR, "Unsupported index: %x\n", ctrl->id);
+		rc = -ENOTSUPP;
+		break;
+	}
+
+	v4l2_ctrl_lock(ctrl);
+
+	if (!rc && property_id) {
+		dprintk(VIDC_DBG, "Control: HAL property=%x,ctrl_value=%d\n",
+				property_id,
+				ctrl->val);
+		rc = call_hfi_op(hdev, session_set_property,
+				(void *)inst->session, property_id, pdata);
+	}
+
+	return rc;
+}
+
+static int msm_venc_validate_qp_value(struct msm_vidc_inst *inst,
+					struct v4l2_ctrl *ctrl)
+{
+	int rc = 0, min, max;
+	struct v4l2_ctrl *temp_ctrl = NULL;
+	int qp_value = ctrl->val;
+
+#define VALIDATE_BOUNDARIES(__min, __max, __val) ({\
+	int __rc = __val >= __min && \
+			__val <= __max; \
+	if (!__rc) \
+		dprintk(VIDC_ERR, "QP beyond range: min(%d) max(%d) val(%d)", \
+				__min, __max, __val); \
+	__rc; \
+})
+
+	switch (inst->fmts[CAPTURE_PORT].fourcc) {
+	case V4L2_PIX_FMT_VP8:
+		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_VPX_MAX_QP);
+		max = temp_ctrl->maximum;
+		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_VPX_MIN_QP);
+		min = temp_ctrl->minimum;
+		if (!VALIDATE_BOUNDARIES(min, max, qp_value))
+			rc = -EINVAL;
+		break;
+	case V4L2_PIX_FMT_H263:
+	case V4L2_PIX_FMT_MPEG4:
+		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP);
+		max = temp_ctrl->maximum;
+		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP);
+		min = temp_ctrl->minimum;
+		if (!VALIDATE_BOUNDARIES(min, max, qp_value))
+			rc = -EINVAL;
+		break;
+	case V4L2_PIX_FMT_H264:
+	case V4L2_PIX_FMT_HEVC:
+		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H264_MAX_QP);
+		max = temp_ctrl->maximum;
+		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H264_MIN_QP);
+		min = temp_ctrl->minimum;
+		if (!VALIDATE_BOUNDARIES(min, max, qp_value))
+			rc = -EINVAL;
+		break;
+	default:
+		dprintk(VIDC_ERR, "%s Invalid Codec\n", __func__);
+		return -EINVAL;
+	}
+	return rc;
+#undef VALIDATE_BOUNDARIES
+}
+
+#undef TRY_GET_CTRL
+
+static int try_set_ext_ctrl(struct msm_vidc_inst *inst,
+	struct v4l2_ext_controls *ctrl)
+{
+	int rc = 0, i;
+	struct v4l2_ext_control *control;
+	struct hfi_device *hdev;
+	struct hal_ltr_mode ltr_mode;
+	struct hal_vc1e_perf_cfg_type search_range = { {0} };
+	u32 property_id = 0;
+	void *pdata = NULL;
+	struct msm_vidc_capability *cap = NULL;
+	struct hal_initial_quantization quant;
+	struct hal_aspect_ratio sar;
+	struct hal_bitrate bitrate;
+	struct hal_frame_size blur_res;
+	struct v4l2_control temp_ctrl;
+
+	if (!inst || !inst->core || !inst->core->device || !ctrl) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	hdev = inst->core->device;
+	cap = &inst->capability;
+
+	control = ctrl->controls;
+	for (i = 0; i < ctrl->count; i++) {
+		switch (control[i].id) {
+		case V4L2_CID_MPEG_VIDC_VIDEO_LTRMODE:
+			if (control[i].value !=
+				V4L2_MPEG_VIDC_VIDEO_LTR_MODE_DISABLE) {
+				rc = msm_venc_toggle_hier_p(inst, false);
+				if (rc)
+					break;
+			}
+			ltr_mode.mode = control[i].value;
+			ltr_mode.trust_mode = 1;
+			property_id = HAL_PARAM_VENC_LTRMODE;
+			pdata = &ltr_mode;
+			break;
+		case V4L2_CID_MPEG_VIDC_VIDEO_LTRCOUNT:
+			ltr_mode.count =  control[i].value;
+			if (ltr_mode.count > cap->ltr_count.max) {
+				dprintk(VIDC_ERR,
+					"Invalid LTR count %d. Supported max: %d\n",
+					ltr_mode.count,
+					cap->ltr_count.max);
+				/*
+				 * FIXME: Return an error (-EINVALID)
+				 * here once VP8 supports LTR count
+				 * capability
+				 */
+				ltr_mode.count = 1;
+			}
+			ltr_mode.trust_mode = 1;
+			property_id = HAL_PARAM_VENC_LTRMODE;
+			pdata = &ltr_mode;
+			break;
+		case V4L2_CID_MPEG_VIDC_VIDEO_ENABLE_INITIAL_QP:
+			property_id = HAL_PARAM_VENC_ENABLE_INITIAL_QP;
+			quant.init_qp_enable = control[i].value;
+			pdata = &quant;
+			break;
+		case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP:
+			quant.qpi = control[i].value;
+			property_id = HAL_PARAM_VENC_ENABLE_INITIAL_QP;
+			pdata = &quant;
+			break;
+		case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP:
+			quant.qpp = control[i].value;
+			property_id = HAL_PARAM_VENC_ENABLE_INITIAL_QP;
+			pdata = &quant;
+			break;
+		case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP:
+			quant.qpb = control[i].value;
+			property_id = HAL_PARAM_VENC_ENABLE_INITIAL_QP;
+			pdata = &quant;
+			break;
+		case V4L2_CID_MPEG_VIDC_VIDEO_INITIAL_I_FRAME_QP:
+			/* Sanity check for the QP boundaries as we are using
+			 * same control to set Initial QP for all the codecs
+			 */
+			temp_ctrl.id =
+				V4L2_CID_MPEG_VIDC_VIDEO_INITIAL_I_FRAME_QP;
+			temp_ctrl.value = control[i].value;
+
+			rc = msm_comm_s_ctrl(inst, &temp_ctrl);
+			if (rc) {
+				dprintk(VIDC_ERR,
+					"%s Failed setting Initial I Frame QP : %d\n",
+					__func__, rc);
+				break;
+			}
+			quant.qpi = control[i].value;
+			property_id = HAL_PARAM_VENC_ENABLE_INITIAL_QP;
+			pdata = &quant;
+			break;
+		case V4L2_CID_MPEG_VIDC_VIDEO_INITIAL_P_FRAME_QP:
+			temp_ctrl.id =
+				V4L2_CID_MPEG_VIDC_VIDEO_INITIAL_P_FRAME_QP;
+			temp_ctrl.value = control[i].value;
+			rc = msm_comm_s_ctrl(inst, &temp_ctrl);
+			if (rc) {
+				dprintk(VIDC_ERR,
+					"%s Failed setting Initial P Frame QP : %d\n",
+					__func__, rc);
+				break;
+			}
+			quant.qpp = control[i].value;
+			property_id = HAL_PARAM_VENC_ENABLE_INITIAL_QP;
+			pdata = &quant;
+			break;
+		case V4L2_CID_MPEG_VIDC_VIDEO_INITIAL_B_FRAME_QP:
+			temp_ctrl.id =
+				V4L2_CID_MPEG_VIDC_VIDEO_INITIAL_B_FRAME_QP;
+			temp_ctrl.value = control[i].value;
+			rc = msm_comm_s_ctrl(inst, &temp_ctrl);
+			if (rc) {
+				dprintk(VIDC_ERR,
+					"%s Failed setting Initial B Frame QP : %d\n",
+					__func__, rc);
+				break;
+			}
+			quant.qpb = control[i].value;
+			property_id = HAL_PARAM_VENC_ENABLE_INITIAL_QP;
+			pdata = &quant;
+			break;
+		case V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_X_RANGE:
+			search_range.i_frame.x_subsampled = control[i].value;
+			property_id = HAL_PARAM_VENC_SEARCH_RANGE;
+			pdata = &search_range;
+			break;
+		case V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_Y_RANGE:
+			search_range.i_frame.y_subsampled = control[i].value;
+			property_id = HAL_PARAM_VENC_SEARCH_RANGE;
+			pdata = &search_range;
+			break;
+		case V4L2_CID_MPEG_VIDC_VIDEO_PFRAME_X_RANGE:
+			search_range.p_frame.x_subsampled = control[i].value;
+			property_id = HAL_PARAM_VENC_SEARCH_RANGE;
+			pdata = &search_range;
+			break;
+		case V4L2_CID_MPEG_VIDC_VIDEO_PFRAME_Y_RANGE:
+			search_range.p_frame.y_subsampled = control[i].value;
+			property_id = HAL_PARAM_VENC_SEARCH_RANGE;
+			pdata = &search_range;
+			break;
+		case V4L2_CID_MPEG_VIDC_VIDEO_BFRAME_X_RANGE:
+			search_range.b_frame.x_subsampled = control[i].value;
+			property_id = HAL_PARAM_VENC_SEARCH_RANGE;
+			pdata = &search_range;
+			break;
+		case V4L2_CID_MPEG_VIDC_VIDEO_BFRAME_Y_RANGE:
+			search_range.b_frame.y_subsampled = control[i].value;
+			property_id = HAL_PARAM_VENC_SEARCH_RANGE;
+			pdata = &search_range;
+			break;
+		case V4L2_CID_MPEG_VIDC_VENC_PARAM_SAR_WIDTH:
+			sar.aspect_width = control[i].value;
+			property_id = HAL_PROPERTY_PARAM_VENC_ASPECT_RATIO;
+			pdata = &sar;
+			break;
+		case V4L2_CID_MPEG_VIDC_VENC_PARAM_SAR_HEIGHT:
+			sar.aspect_height = control[i].value;
+			property_id = HAL_PROPERTY_PARAM_VENC_ASPECT_RATIO;
+			pdata = &sar;
+			break;
+		case V4L2_CID_MPEG_VIDC_VENC_PARAM_LAYER_BITRATE:
+		{
+			if (control[i].value) {
+				bitrate.layer_id = i;
+				bitrate.bit_rate = control[i].value;
+				property_id = HAL_CONFIG_VENC_TARGET_BITRATE;
+				pdata = &bitrate;
+				dprintk(VIDC_DBG, "bitrate for layer(%d)=%d\n",
+					i, bitrate.bit_rate);
+				rc = call_hfi_op(hdev, session_set_property,
+					(void *)inst->session, property_id,
+					 pdata);
+				if (rc) {
+					dprintk(VIDC_DBG, "prop %x failed\n",
+						property_id);
+					return rc;
+				}
+				if (i == MAX_HYBRID_HIER_P_LAYERS - 1) {
+					dprintk(VIDC_DBG, "HAL property=%x\n",
+						property_id);
+					property_id = 0;
+					rc = 0;
+				}
+			}
+			break;
+		}
+		case V4L2_CID_MPEG_VIDC_VIDEO_BLUR_WIDTH:
+			property_id = HAL_CONFIG_VENC_BLUR_RESOLUTION;
+			blur_res.width = control[i].value;
+			blur_res.buffer_type = HAL_BUFFER_INPUT;
+			property_id = HAL_CONFIG_VENC_BLUR_RESOLUTION;
+			pdata = &blur_res;
+			break;
+		case V4L2_CID_MPEG_VIDC_VIDEO_BLUR_HEIGHT:
+			blur_res.height = control[i].value;
+			blur_res.buffer_type = HAL_BUFFER_INPUT;
+			property_id = HAL_CONFIG_VENC_BLUR_RESOLUTION;
+			pdata = &blur_res;
+			break;
+		default:
+			dprintk(VIDC_ERR, "Invalid id set: %d\n",
+				control[i].id);
+			rc = -ENOTSUPP;
+			break;
+		}
+		if (rc)
+			break;
+	}
+
+	if (!rc && property_id) {
+		dprintk(VIDC_DBG, "Control: HAL property=%x\n", property_id);
+		rc = call_hfi_op(hdev, session_set_property,
+				(void *)inst->session, property_id, pdata);
+	}
+	return rc;
+}
+
+static int msm_venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+
+	int rc = 0, c = 0;
+
+	struct msm_vidc_inst *inst = container_of(ctrl->handler,
+					struct msm_vidc_inst, ctrl_handler);
+
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
+
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Failed to move inst: %pK to start done state\n", inst);
+		goto failed_open_done;
+	}
+
+	for (c = 0; c < ctrl->ncontrols; ++c) {
+		if (ctrl->cluster[c]->is_new) {
+			struct v4l2_ctrl *temp = ctrl->cluster[c];
+
+			rc = try_set_ctrl(inst, temp);
+			if (rc) {
+				dprintk(VIDC_ERR, "Failed setting %s (%x)\n",
+						v4l2_ctrl_get_name(temp->id),
+						temp->id);
+				break;
+			}
+		}
+	}
+failed_open_done:
+	if (rc)
+		dprintk(VIDC_ERR, "Failed setting control: %x (%s)",
+				ctrl->id, v4l2_ctrl_get_name(ctrl->id));
+	return rc;
+}
+
+static int msm_venc_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+	return 0;
+}
+
+static const struct v4l2_ctrl_ops msm_venc_ctrl_ops = {
+
+	.s_ctrl = msm_venc_op_s_ctrl,
+	.g_volatile_ctrl = msm_venc_op_g_volatile_ctrl,
+};
+
+const struct v4l2_ctrl_ops *msm_venc_get_ctrl_ops(void)
+{
+	return &msm_venc_ctrl_ops;
+}
+
+int msm_venc_inst_init(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	struct msm_vidc_format *fmt = NULL;
+
+	if (!inst) {
+		dprintk(VIDC_ERR, "Invalid input = %pK\n", inst);
+		return -EINVAL;
+	}
+	inst->prop.height[CAPTURE_PORT] = DEFAULT_HEIGHT;
+	inst->prop.width[CAPTURE_PORT] = DEFAULT_WIDTH;
+	inst->prop.num_planes[CAPTURE_PORT] = 1;
+
+	/* By default, initialize CAPTURE port to H264 encoder */
+	fmt = msm_comm_get_pixel_fmt_fourcc(venc_formats,
+		ARRAY_SIZE(venc_formats), V4L2_PIX_FMT_H264,
+			CAPTURE_PORT);
+	if (!fmt || fmt->type != CAPTURE_PORT) {
+		dprintk(VIDC_ERR,
+			"venc_formats corrupted\n");
+		return -EINVAL;
+	}
+	memcpy(&inst->fmts[fmt->type], fmt,
+			sizeof(struct msm_vidc_format));
+
+	inst->prop.height[OUTPUT_PORT] = DEFAULT_HEIGHT;
+	inst->prop.width[OUTPUT_PORT] = DEFAULT_WIDTH;
+	inst->prop.num_planes[OUTPUT_PORT] = 1;
+
+	/* By default, initialize OUTPUT port to NV12 format */
+	fmt = msm_comm_get_pixel_fmt_fourcc(venc_formats,
+		ARRAY_SIZE(venc_formats), V4L2_PIX_FMT_NV12,
+			OUTPUT_PORT);
+	if (!fmt || fmt->type != OUTPUT_PORT) {
+		dprintk(VIDC_ERR,
+			"venc_formats corrupted\n");
+		return -EINVAL;
+	}
+	memcpy(&inst->fmts[fmt->type], fmt,
+			sizeof(struct msm_vidc_format));
+
+	inst->capability.height.min = MIN_SUPPORTED_HEIGHT;
+	inst->capability.height.max = DEFAULT_HEIGHT;
+	inst->capability.width.min = MIN_SUPPORTED_WIDTH;
+	inst->capability.width.max = DEFAULT_WIDTH;
+	inst->capability.alloc_mode_in = HAL_BUFFER_MODE_STATIC;
+	inst->capability.alloc_mode_out = HAL_BUFFER_MODE_STATIC;
+	inst->capability.secure_output2_threshold.min = 0;
+	inst->capability.secure_output2_threshold.max = 0;
+	inst->buffer_mode_set[OUTPUT_PORT] = HAL_BUFFER_MODE_STATIC;
+	inst->buffer_mode_set[CAPTURE_PORT] = HAL_BUFFER_MODE_STATIC;
+	inst->prop.fps = DEFAULT_FPS;
+	inst->capability.pixelprocess_capabilities = 0;
+	inst->operating_rate = 0;
+	return rc;
+}
+
+int msm_venc_s_ext_ctrl(struct msm_vidc_inst *inst,
+	struct v4l2_ext_controls *ctrl)
+{
+	int rc = 0;
+	if (ctrl->ctrl_class != V4L2_CTRL_CLASS_MPEG) {
+		dprintk(VIDC_ERR, "Invalid Class set for extended control\n");
+		return -EINVAL;
+	}
+	rc = try_set_ext_ctrl(inst, ctrl);
+	if (rc) {
+		dprintk(VIDC_ERR, "Error setting extended control\n");
+		return rc;
+	}
+	return rc;
+}
+
+int msm_venc_querycap(struct msm_vidc_inst *inst, struct v4l2_capability *cap)
+{
+	if (!inst || !cap) {
+		dprintk(VIDC_ERR,
+			"Invalid input, inst = %pK, cap = %pK\n", inst, cap);
+		return -EINVAL;
+	}
+	strlcpy(cap->driver, MSM_VIDC_DRV_NAME, sizeof(cap->driver));
+	strlcpy(cap->card, MSM_VENC_DVC_NAME, sizeof(cap->card));
+	cap->bus_info[0] = 0;
+	cap->version = MSM_VIDC_VERSION;
+	cap->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+						V4L2_CAP_VIDEO_OUTPUT_MPLANE |
+						V4L2_CAP_STREAMING;
+	cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+	memset(cap->reserved, 0, sizeof(cap->reserved));
+	return 0;
+}
+
+int msm_venc_enum_fmt(struct msm_vidc_inst *inst, struct v4l2_fmtdesc *f)
+{
+	const struct msm_vidc_format *fmt = NULL;
+	int rc = 0;
+	if (!inst || !f) {
+		dprintk(VIDC_ERR,
+			"Invalid input, inst = %pK, f = %pK\n", inst, f);
+		return -EINVAL;
+	}
+	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		fmt = msm_comm_get_pixel_fmt_index(venc_formats,
+			ARRAY_SIZE(venc_formats), f->index, CAPTURE_PORT);
+	} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		fmt = msm_comm_get_pixel_fmt_index(venc_formats,
+			ARRAY_SIZE(venc_formats), f->index, OUTPUT_PORT);
+		f->flags = V4L2_FMT_FLAG_COMPRESSED;
+	}
+
+	memset(f->reserved, 0 , sizeof(f->reserved));
+	if (fmt) {
+		strlcpy(f->description, fmt->description,
+				sizeof(f->description));
+		f->pixelformat = fmt->fourcc;
+	} else {
+		dprintk(VIDC_DBG, "No more formats found\n");
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+int msm_venc_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
+{
+	struct msm_vidc_format *fmt = NULL;
+	int rc = 0;
+	int extra_idx = 0;
+	struct hfi_device *hdev;
+
+	if (!inst || !f) {
+		dprintk(VIDC_ERR,
+			"Invalid input, inst = %pK, format = %pK\n", inst, f);
+		return -EINVAL;
+	}
+
+	if (!inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+	hdev = inst->core->device;
+
+	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		fmt = msm_comm_get_pixel_fmt_fourcc(venc_formats,
+			ARRAY_SIZE(venc_formats), f->fmt.pix_mp.pixelformat,
+			CAPTURE_PORT);
+		if (!fmt || fmt->type != CAPTURE_PORT) {
+			dprintk(VIDC_ERR,
+				"Format: %d not supported on CAPTURE port\n",
+				f->fmt.pix_mp.pixelformat);
+			rc = -EINVAL;
+			goto exit;
+		}
+
+		memcpy(&inst->fmts[fmt->type], fmt,
+				sizeof(struct msm_vidc_format));
+		f->fmt.pix_mp.num_planes = inst->prop.num_planes[CAPTURE_PORT];
+
+		rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
+		if (rc) {
+			dprintk(VIDC_ERR, "Failed to open instance\n");
+			goto exit;
+		}
+
+		inst->prop.width[CAPTURE_PORT] = f->fmt.pix_mp.width;
+		inst->prop.height[CAPTURE_PORT] = f->fmt.pix_mp.height;
+		rc = msm_vidc_check_session_supported(inst);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"%s: session not supported\n", __func__);
+			goto exit;
+		}
+	} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		struct hal_frame_size frame_sz;
+
+		inst->prop.width[OUTPUT_PORT] = f->fmt.pix_mp.width;
+		inst->prop.height[OUTPUT_PORT] = f->fmt.pix_mp.height;
+
+		rc = msm_vidc_check_session_supported(inst);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"%s: session not supported\n", __func__);
+			goto exit;
+		}
+
+		frame_sz.buffer_type = HAL_BUFFER_INPUT;
+		frame_sz.width = inst->prop.width[OUTPUT_PORT];
+		frame_sz.height = inst->prop.height[OUTPUT_PORT];
+		dprintk(VIDC_DBG, "width = %d, height = %d\n",
+				frame_sz.width, frame_sz.height);
+		rc = call_hfi_op(hdev, session_set_property, (void *)
+			inst->session, HAL_PARAM_FRAME_SIZE, &frame_sz);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"Failed to set framesize for Output port\n");
+			goto exit;
+		}
+
+		fmt = msm_comm_get_pixel_fmt_fourcc(venc_formats,
+			ARRAY_SIZE(venc_formats), f->fmt.pix_mp.pixelformat,
+			OUTPUT_PORT);
+		if (!fmt || fmt->type != OUTPUT_PORT) {
+			dprintk(VIDC_ERR,
+				"Format: %d not supported on OUTPUT port\n",
+				f->fmt.pix_mp.pixelformat);
+			rc = -EINVAL;
+			goto exit;
+		}
+		memcpy(&inst->fmts[fmt->type], fmt,
+				sizeof(struct msm_vidc_format));
+		f->fmt.pix_mp.num_planes = inst->prop.num_planes[OUTPUT_PORT];
+		msm_comm_set_color_format(inst, HAL_BUFFER_INPUT, fmt->fourcc);
+	} else {
+		dprintk(VIDC_ERR, "%s - Unsupported buf type: %d\n",
+			__func__, f->type);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		struct hal_frame_size frame_sz = {0};
+		struct hal_buffer_requirements *bufreq = NULL;
+
+		frame_sz.width = inst->prop.width[CAPTURE_PORT];
+		frame_sz.height = inst->prop.height[CAPTURE_PORT];
+		frame_sz.buffer_type = HAL_BUFFER_OUTPUT;
+		rc = call_hfi_op(hdev, session_set_property, (void *)
+				inst->session, HAL_PARAM_FRAME_SIZE,
+				&frame_sz);
+		if (rc) {
+			dprintk(VIDC_ERR,
+					"Failed to set OUTPUT framesize\n");
+			goto exit;
+		}
+		rc = msm_comm_try_get_bufreqs(inst);
+		if (rc) {
+			dprintk(VIDC_WARN,
+				"%s : Getting buffer reqs failed: %d\n",
+					__func__, rc);
+			goto exit;
+		}
+		bufreq = get_buff_req_buffer(inst, HAL_BUFFER_OUTPUT);
+		f->fmt.pix_mp.plane_fmt[0].sizeimage =
+			bufreq ? bufreq->buffer_size : 0;
+
+		extra_idx = EXTRADATA_IDX(inst->prop.num_planes[fmt->type]);
+		if (extra_idx && (extra_idx < VIDEO_MAX_PLANES)) {
+			bufreq = get_buff_req_buffer(inst,
+					HAL_BUFFER_EXTRADATA_OUTPUT);
+			f->fmt.pix_mp.plane_fmt[extra_idx].sizeimage =
+				bufreq ? bufreq->buffer_size : 0;
+		}
+
+	} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		struct hal_buffer_requirements *bufreq = NULL;
+
+		f->fmt.pix_mp.plane_fmt[0].sizeimage =
+			inst->fmts[fmt->type].get_frame_size(0,
+			f->fmt.pix_mp.height, f->fmt.pix_mp.width);
+
+		extra_idx = EXTRADATA_IDX(inst->prop.num_planes[fmt->type]);
+		if (extra_idx && (extra_idx < VIDEO_MAX_PLANES)) {
+			bufreq = get_buff_req_buffer(inst,
+					HAL_BUFFER_EXTRADATA_INPUT);
+			f->fmt.pix_mp.plane_fmt[extra_idx].sizeimage =
+				bufreq ? bufreq->buffer_size : 0;
+		}
+	}
+exit:
+	return rc;
+}
+
+int msm_venc_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
+{
+	const struct msm_vidc_format *fmt = NULL;
+	int rc = 0;
+	int i;
+	u32 height, width, num_planes;
+	unsigned int extra_idx = 0;
+	struct hal_buffer_requirements *bufreq = NULL;
+
+	if (!inst || !f) {
+		dprintk(VIDC_ERR,
+			"Invalid input, inst = %pK, format = %pK\n", inst, f);
+		return -EINVAL;
+	}
+
+	rc = msm_comm_try_get_bufreqs(inst);
+	if (rc) {
+		dprintk(VIDC_WARN, "Getting buffer requirements failed: %d\n",
+				rc);
+		return rc;
+	}
+
+	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		fmt = &inst->fmts[CAPTURE_PORT];
+		height = inst->prop.height[CAPTURE_PORT];
+		width = inst->prop.width[CAPTURE_PORT];
+		num_planes = inst->prop.num_planes[CAPTURE_PORT];
+	} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		fmt = &inst->fmts[OUTPUT_PORT];
+		height = inst->prop.height[OUTPUT_PORT];
+		width = inst->prop.width[OUTPUT_PORT];
+		num_planes = inst->prop.num_planes[OUTPUT_PORT];
+	} else {
+		dprintk(VIDC_ERR, "Invalid type: %x\n", f->type);
+		return -ENOTSUPP;
+	}
+
+	f->fmt.pix_mp.pixelformat = fmt->fourcc;
+	f->fmt.pix_mp.height = height;
+	f->fmt.pix_mp.width = width;
+	f->fmt.pix_mp.num_planes = num_planes;
+
+	if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		for (i = 0; i < num_planes; ++i) {
+			f->fmt.pix_mp.plane_fmt[i].sizeimage =
+				fmt->get_frame_size(i, height, width);
+		}
+	} else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		bufreq = get_buff_req_buffer(inst,
+				HAL_BUFFER_OUTPUT);
+
+		f->fmt.pix_mp.plane_fmt[0].sizeimage =
+			bufreq ? bufreq->buffer_size : 0;
+	}
+	extra_idx = EXTRADATA_IDX(num_planes);
+	if (extra_idx && (extra_idx < VIDEO_MAX_PLANES)) {
+		if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+			bufreq = get_buff_req_buffer(inst,
+						HAL_BUFFER_EXTRADATA_OUTPUT);
+		else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+			bufreq = get_buff_req_buffer(inst,
+						HAL_BUFFER_EXTRADATA_INPUT);
+
+		f->fmt.pix_mp.plane_fmt[extra_idx].sizeimage =
+			bufreq ? bufreq->buffer_size : 0;
+	}
+
+	for (i = 0; i < num_planes; ++i) {
+		if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+			inst->bufq[OUTPUT_PORT].vb2_bufq.plane_sizes[i] =
+				f->fmt.pix_mp.plane_fmt[i].sizeimage;
+		} else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+			inst->bufq[CAPTURE_PORT].vb2_bufq.plane_sizes[i] =
+				f->fmt.pix_mp.plane_fmt[i].sizeimage;
+		}
+	}
+
+	return rc;
+}
+
+int msm_venc_reqbufs(struct msm_vidc_inst *inst, struct v4l2_requestbuffers *b)
+{
+	struct buf_queue *q = NULL;
+	int rc = 0;
+	if (!inst || !b) {
+		dprintk(VIDC_ERR,
+			"Invalid input, inst = %pK, buffer = %pK\n", inst, b);
+		return -EINVAL;
+	}
+	q = msm_comm_get_vb2q(inst, b->type);
+	if (!q) {
+		dprintk(VIDC_ERR,
+		"Failed to find buffer queue for type = %d\n", b->type);
+		return -EINVAL;
+	}
+
+	mutex_lock(&q->lock);
+	rc = vb2_reqbufs(&q->vb2_bufq, b);
+	mutex_unlock(&q->lock);
+	if (rc)
+		dprintk(VIDC_DBG, "Failed to get reqbufs, %d\n", rc);
+	return rc;
+}
+
+int msm_venc_prepare_buf(struct msm_vidc_inst *inst,
+					struct v4l2_buffer *b)
+{
+	int rc = 0;
+	int i;
+	struct vidc_buffer_addr_info buffer_info = {0};
+	struct hfi_device *hdev;
+	int extra_idx = 0;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	hdev = inst->core->device;
+
+	if (inst->state == MSM_VIDC_CORE_INVALID ||
+			inst->core->state == VIDC_CORE_INVALID) {
+		dprintk(VIDC_ERR,
+			"Core %pK in bad state, ignoring prepare buf\n",
+				inst->core);
+		return -EINVAL;
+	}
+
+	switch (b->type) {
+	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+		break;
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+		if (b->length != inst->prop.num_planes[CAPTURE_PORT]) {
+			dprintk(VIDC_ERR,
+				"Planes mismatch: needed: %d, allocated: %d\n",
+				inst->prop.num_planes[CAPTURE_PORT],
+				b->length);
+			rc = -EINVAL;
+			break;
+		}
+
+		for (i = 0; i < min_t(int, b->length, VIDEO_MAX_PLANES); i++) {
+			dprintk(VIDC_DBG, "device_addr = %#lx, size = %d\n",
+				b->m.planes[i].m.userptr,
+				b->m.planes[i].length);
+		}
+		buffer_info.buffer_size = b->m.planes[0].length;
+		buffer_info.buffer_type = HAL_BUFFER_OUTPUT;
+		buffer_info.num_buffers = 1;
+		buffer_info.align_device_addr =
+			b->m.planes[0].m.userptr;
+
+		extra_idx = EXTRADATA_IDX(b->length);
+		if (extra_idx && (extra_idx < VIDEO_MAX_PLANES)) {
+			buffer_info.extradata_addr =
+				b->m.planes[extra_idx].m.userptr;
+			dprintk(VIDC_DBG, "extradata: %#lx\n",
+					b->m.planes[extra_idx].m.userptr);
+			buffer_info.extradata_size =
+				b->m.planes[extra_idx].length;
+		}
+
+		rc = call_hfi_op(hdev, session_set_buffers,
+				(void *)inst->session, &buffer_info);
+		if (rc)
+			dprintk(VIDC_ERR,
+					"vidc_hal_session_set_buffers failed\n");
+		break;
+	default:
+		dprintk(VIDC_ERR,
+			"Buffer type not recognized: %d\n", b->type);
+		break;
+	}
+
+	return rc;
+}
+
+int msm_venc_release_buf(struct msm_vidc_inst *inst,
+					struct v4l2_buffer *b)
+{
+	int i, rc = 0, extra_idx = 0;
+	struct vidc_buffer_addr_info buffer_info = {0};
+	struct hfi_device *hdev;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	hdev = inst->core->device;
+
+	rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Failed to move inst: %pK to release res done state\n",
+			inst);
+		goto exit;
+	}
+	switch (b->type) {
+	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+		break;
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: {
+		if (b->length !=
+			inst->prop.num_planes[CAPTURE_PORT]) {
+			dprintk(VIDC_ERR,
+					"Planes mismatch: needed: %d, to release: %d\n",
+					inst->prop.num_planes[CAPTURE_PORT],
+					b->length);
+			rc = -EINVAL;
+			break;
+		}
+		for (i = 0; i < b->length; i++) {
+			dprintk(VIDC_DBG,
+				"Release device_addr = %#lx, size = %d, %d\n",
+				b->m.planes[i].m.userptr,
+				b->m.planes[i].length, inst->state);
+		}
+		buffer_info.buffer_size = b->m.planes[0].length;
+		buffer_info.buffer_type = HAL_BUFFER_OUTPUT;
+		buffer_info.num_buffers = 1;
+		buffer_info.align_device_addr =
+			b->m.planes[0].m.userptr;
+		extra_idx = EXTRADATA_IDX(b->length);
+		if (extra_idx && (extra_idx < VIDEO_MAX_PLANES))
+			buffer_info.extradata_addr =
+			b->m.planes[extra_idx].m.userptr;
+		buffer_info.response_required = false;
+		rc = call_hfi_op(hdev, session_release_buffers,
+				(void *)inst->session, &buffer_info);
+		if (rc)
+			dprintk(VIDC_ERR,
+					"vidc_hal_session_release_buffers failed\n");
+		}
+		break;
+	default:
+		dprintk(VIDC_ERR, "Buffer type not recognized: %d\n", b->type);
+		break;
+	}
+exit:
+	return rc;
+}
+
+int msm_venc_qbuf(struct msm_vidc_inst *inst, struct v4l2_buffer *b)
+{
+	struct buf_queue *q = NULL;
+	int rc = 0;
+	q = msm_comm_get_vb2q(inst, b->type);
+	if (!q) {
+		dprintk(VIDC_ERR,
+			"Failed to find buffer queue for type = %d\n", b->type);
+		return -EINVAL;
+	}
+	mutex_lock(&q->lock);
+	rc = vb2_qbuf(&q->vb2_bufq, b);
+	mutex_unlock(&q->lock);
+	if (rc)
+		dprintk(VIDC_ERR, "Failed to qbuf, %d\n", rc);
+	return rc;
+}
+
+int msm_venc_dqbuf(struct msm_vidc_inst *inst, struct v4l2_buffer *b)
+{
+	struct buf_queue *q = NULL;
+	int rc = 0;
+	q = msm_comm_get_vb2q(inst, b->type);
+	if (!q) {
+		dprintk(VIDC_ERR,
+			"Failed to find buffer queue for type = %d\n", b->type);
+		return -EINVAL;
+	}
+	mutex_lock(&q->lock);
+	rc = vb2_dqbuf(&q->vb2_bufq, b, true);
+	mutex_unlock(&q->lock);
+	if (rc)
+		dprintk(VIDC_DBG, "Failed to dqbuf, %d\n", rc);
+	return rc;
+}
+
+int msm_venc_streamon(struct msm_vidc_inst *inst, enum v4l2_buf_type i)
+{
+	int rc = 0;
+	struct buf_queue *q;
+	q = msm_comm_get_vb2q(inst, i);
+	if (!q) {
+		dprintk(VIDC_ERR,
+			"Failed to find buffer queue for type = %d\n", i);
+		return -EINVAL;
+	}
+	dprintk(VIDC_DBG, "Calling streamon\n");
+	mutex_lock(&q->lock);
+	rc = vb2_streamon(&q->vb2_bufq, i);
+	mutex_unlock(&q->lock);
+	if (rc)
+		dprintk(VIDC_ERR, "streamon failed on port: %d\n", i);
+	return rc;
+}
+
+int msm_venc_streamoff(struct msm_vidc_inst *inst, enum v4l2_buf_type i)
+{
+	int rc = 0;
+	struct buf_queue *q;
+	q = msm_comm_get_vb2q(inst, i);
+	if (!q) {
+		dprintk(VIDC_ERR,
+			"Failed to find buffer queue for type = %d\n", i);
+		return -EINVAL;
+	}
+	dprintk(VIDC_DBG, "Calling streamoff on port: %d\n", i);
+
+	rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE);
+	if (rc)
+		dprintk(VIDC_ERR,
+			"Failed to move inst: %pK to res done state\n", inst);
+
+	mutex_lock(&q->lock);
+	rc = vb2_streamoff(&q->vb2_bufq, i);
+	mutex_unlock(&q->lock);
+	if (rc)
+		dprintk(VIDC_ERR, "streamoff failed on port: %d\n", i);
+	return rc;
+}
+
+int msm_venc_ctrl_init(struct msm_vidc_inst *inst)
+{
+	return msm_comm_ctrl_init(inst, msm_venc_ctrls,
+			ARRAY_SIZE(msm_venc_ctrls), &msm_venc_ctrl_ops);
+}
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_venc.h linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_venc.h
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_venc.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_venc.h	2019-01-22 16:16:24.463255100 +0100
@@ -0,0 +1,37 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _MSM_VENC_H_
+#define _MSM_VENC_H_
+
+#include <media/msm_vidc.h>
+#include "msm_vidc_internal.h"
+
+int msm_venc_inst_init(struct msm_vidc_inst *inst);
+int msm_venc_ctrl_init(struct msm_vidc_inst *inst);
+int msm_venc_querycap(void *instance, struct v4l2_capability *cap);
+int msm_venc_enum_fmt(void *instance, struct v4l2_fmtdesc *f);
+int msm_venc_s_fmt(void *instance, struct v4l2_format *f);
+int msm_venc_g_fmt(void *instance, struct v4l2_format *f);
+int msm_venc_s_ext_ctrl(void *instance, struct v4l2_ext_controls *a);
+int msm_venc_reqbufs(void *instance, struct v4l2_requestbuffers *b);
+int msm_venc_prepare_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b);
+int msm_venc_release_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b);
+int msm_venc_qbuf(struct msm_vidc_inst *inst, struct v4l2_buffer *b);
+int msm_venc_dqbuf(struct msm_vidc_inst *inst, struct v4l2_buffer *b);
+int msm_venc_streamon(struct msm_vidc_inst *inst, enum v4l2_buf_type i);
+int msm_venc_streamoff(struct msm_vidc_inst *inst, enum v4l2_buf_type i);
+int msm_venc_cmd(struct msm_vidc_inst *inst, struct v4l2_encoder_cmd *enc);
+int msm_venc_s_parm(struct msm_vidc_inst *inst, struct v4l2_streamparm *a);
+struct vb2_ops *msm_venc_get_vb2q_ops(void);
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_vidc.c linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_vidc.c
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_vidc.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_vidc.c	2019-10-29 09:26:23.957206251 +0100
@@ -0,0 +1,1613 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/dma-direction.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/bsearch.h>
+#include <linux/delay.h>
+#include <media/msm_vidc.h>
+#include "msm_vidc_internal.h"
+#include "msm_vidc_debug.h"
+#include "msm_vdec.h"
+#include "msm_venc.h"
+#include "msm_vidc_common.h"
+#include "vidc_hfi_api.h"
+#include "msm_vidc_dcvs.h"
+
+#define MAX_EVENTS 30
+
+static int get_poll_flags(void *instance)
+{
+	struct msm_vidc_inst *inst = instance;
+	struct vb2_queue *outq = &inst->bufq[OUTPUT_PORT].vb2_bufq;
+	struct vb2_queue *capq = &inst->bufq[CAPTURE_PORT].vb2_bufq;
+	struct vb2_buffer *out_vb = NULL;
+	struct vb2_buffer *cap_vb = NULL;
+	unsigned long flags;
+	int rc = 0;
+
+	if (v4l2_event_pending(&inst->event_handler))
+		rc |= POLLPRI;
+
+	spin_lock_irqsave(&capq->done_lock, flags);
+	if (!list_empty(&capq->done_list))
+		cap_vb = list_first_entry(&capq->done_list, struct vb2_buffer,
+								done_entry);
+	if (cap_vb && (cap_vb->state == VB2_BUF_STATE_DONE
+				|| cap_vb->state == VB2_BUF_STATE_ERROR))
+		rc |= POLLIN | POLLRDNORM;
+	spin_unlock_irqrestore(&capq->done_lock, flags);
+
+	spin_lock_irqsave(&outq->done_lock, flags);
+	if (!list_empty(&outq->done_list))
+		out_vb = list_first_entry(&outq->done_list, struct vb2_buffer,
+								done_entry);
+	if (out_vb && (out_vb->state == VB2_BUF_STATE_DONE
+				|| out_vb->state == VB2_BUF_STATE_ERROR))
+		rc |= POLLOUT | POLLWRNORM;
+	spin_unlock_irqrestore(&outq->done_lock, flags);
+
+	return rc;
+}
+
+int msm_vidc_poll(void *instance, struct file *filp,
+		struct poll_table_struct *wait)
+{
+	struct msm_vidc_inst *inst = instance;
+	struct vb2_queue *outq = NULL;
+	struct vb2_queue *capq = NULL;
+
+	if (!inst)
+		return -EINVAL;
+
+	outq = &inst->bufq[OUTPUT_PORT].vb2_bufq;
+	capq = &inst->bufq[CAPTURE_PORT].vb2_bufq;
+
+	poll_wait(filp, &inst->event_handler.wait, wait);
+	poll_wait(filp, &capq->done_wq, wait);
+	poll_wait(filp, &outq->done_wq, wait);
+	return get_poll_flags(inst);
+}
+EXPORT_SYMBOL(msm_vidc_poll);
+
+int msm_vidc_querycap(void *instance, struct v4l2_capability *cap)
+{
+	struct msm_vidc_inst *inst = instance;
+
+	if (!inst || !cap)
+		return -EINVAL;
+
+	if (inst->session_type == MSM_VIDC_DECODER)
+		return msm_vdec_querycap(inst, cap);
+	else if (inst->session_type == MSM_VIDC_ENCODER)
+		return msm_venc_querycap(instance, cap);
+	return -EINVAL;
+}
+EXPORT_SYMBOL(msm_vidc_querycap);
+
+int msm_vidc_enum_fmt(void *instance, struct v4l2_fmtdesc *f)
+{
+	struct msm_vidc_inst *inst = instance;
+
+	if (!inst || !f)
+		return -EINVAL;
+
+	if (inst->session_type == MSM_VIDC_DECODER)
+		return msm_vdec_enum_fmt(inst, f);
+	else if (inst->session_type == MSM_VIDC_ENCODER)
+		return msm_venc_enum_fmt(instance, f);
+	return -EINVAL;
+}
+EXPORT_SYMBOL(msm_vidc_enum_fmt);
+
+int msm_vidc_query_ctrl(void *instance, struct v4l2_queryctrl *ctrl)
+{
+	struct msm_vidc_inst *inst = instance;
+	int rc = 0;
+
+	if (!inst || !ctrl)
+		return -EINVAL;
+
+	switch (ctrl->id) {
+	case V4L2_CID_MPEG_VIDC_VIDEO_HYBRID_HIERP_MODE:
+		ctrl->maximum = inst->capability.hier_p_hybrid.max;
+		ctrl->minimum = inst->capability.hier_p_hybrid.min;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_HIER_B_NUM_LAYERS:
+		ctrl->maximum = inst->capability.hier_b.max;
+		ctrl->minimum = inst->capability.hier_b.min;
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_HIER_P_NUM_LAYERS:
+		ctrl->maximum = inst->capability.hier_p.max;
+		ctrl->minimum = inst->capability.hier_p.min;
+		break;
+	default:
+		rc = -EINVAL;
+	}
+	return rc;
+}
+EXPORT_SYMBOL(msm_vidc_query_ctrl);
+
+static int msm_vidc_queryctrl_bsearch_cmp1(const void *key, const void *elt)
+{
+	return *(int32_t *)key - (int32_t)((struct msm_vidc_ctrl *)elt)->id;
+}
+
+static int msm_vidc_queryctrl_bsearch_cmp2(const void *key, const void *elt)
+{
+	uint32_t id = *(uint32_t *)key;
+	struct msm_vidc_ctrl *ctrl = (struct msm_vidc_ctrl *)elt;
+
+	if (id >= ctrl[0].id && id < ctrl[1].id)
+		return 0;
+	else if (id < ctrl[0].id)
+		return -1;
+	else
+		return 1;
+}
+
+int msm_vidc_query_ext_ctrl(void *instance, struct v4l2_query_ext_ctrl *ctrl)
+{
+	struct msm_vidc_inst *inst = instance;
+	bool get_next_ctrl = 0;
+	int i, num_ctrls, rc = 0;
+	struct msm_vidc_ctrl *key = NULL;
+	struct msm_vidc_ctrl *msm_vdec_ctrls;
+
+	if (!inst || !ctrl)
+		return -EINVAL;
+
+	i = ctrl->id;
+	memset(ctrl, 0, sizeof(struct v4l2_query_ext_ctrl));
+	ctrl->id = i;
+
+	if (ctrl->id & V4L2_CTRL_FLAG_NEXT_CTRL)
+		get_next_ctrl = 1;
+	else if (ctrl->id & V4L2_CTRL_FLAG_NEXT_COMPOUND)
+		goto query_ext_ctrl_err;
+
+	ctrl->id &= ~V4L2_CTRL_FLAG_NEXT_CTRL;
+	ctrl->id &= ~V4L2_CTRL_FLAG_NEXT_COMPOUND;
+
+	if (ctrl->id > V4L2_CID_PRIVATE_BASE ||
+		(ctrl->id >= V4L2_CID_BASE && ctrl->id <= V4L2_CID_LASTP1))
+		goto query_ext_ctrl_err;
+	else if (ctrl->id == V4L2_CID_PRIVATE_BASE && get_next_ctrl)
+		ctrl->id = V4L2_CID_MPEG_MSM_VIDC_BASE;
+
+	if (inst->session_type == MSM_VIDC_DECODER)
+		msm_vdec_g_ctrl(&msm_vdec_ctrls, &num_ctrls);
+	else
+		return -EINVAL;
+
+	if (!get_next_ctrl)
+		key = bsearch(&ctrl->id, msm_vdec_ctrls, num_ctrls,
+					sizeof(struct msm_vidc_ctrl),
+					msm_vidc_queryctrl_bsearch_cmp1);
+	else {
+		key = bsearch(&ctrl->id, msm_vdec_ctrls, num_ctrls-1,
+					sizeof(struct msm_vidc_ctrl),
+					msm_vidc_queryctrl_bsearch_cmp2);
+
+		if (key && ctrl->id > key->id)
+			key++;
+		if (key) {
+			for (i = key-msm_vdec_ctrls, key = NULL;
+				i < num_ctrls; i++)
+				if (!(msm_vdec_ctrls[i].flags &
+					V4L2_CTRL_FLAG_DISABLED)) {
+					key = &msm_vdec_ctrls[i];
+					break;
+				}
+		}
+	}
+
+	if (key) {
+		ctrl->id = key->id;
+		ctrl->type = key->type;
+		strlcpy(ctrl->name, key->name, MAX_NAME_LENGTH);
+		ctrl->minimum = key->minimum;
+		ctrl->maximum = key->maximum;
+		ctrl->step = key->step;
+		ctrl->default_value = key->default_value;
+		ctrl->flags = key->flags;
+		ctrl->elems = 1;
+		ctrl->nr_of_dims = 0;
+		return rc;
+	}
+
+query_ext_ctrl_err:
+	ctrl->name[0] = '\0';
+	ctrl->flags |= V4L2_CTRL_FLAG_DISABLED;
+	return -EINVAL;
+}
+EXPORT_SYMBOL(msm_vidc_query_ext_ctrl);
+
+int msm_vidc_s_fmt(void *instance, struct v4l2_format *f)
+{
+	struct msm_vidc_inst *inst = instance;
+
+	if (!inst || !f)
+		return -EINVAL;
+
+	if (inst->session_type == MSM_VIDC_DECODER)
+		return msm_vdec_s_fmt(inst, f);
+	if (inst->session_type == MSM_VIDC_ENCODER)
+		return msm_venc_s_fmt(instance, f);
+	return -EINVAL;
+}
+EXPORT_SYMBOL(msm_vidc_s_fmt);
+
+int msm_vidc_g_fmt(void *instance, struct v4l2_format *f)
+{
+	struct msm_vidc_inst *inst = instance;
+
+	if (!inst || !f)
+		return -EINVAL;
+
+	if (inst->session_type == MSM_VIDC_DECODER)
+		return msm_vdec_g_fmt(inst, f);
+	else if (inst->session_type == MSM_VIDC_ENCODER)
+		return msm_venc_g_fmt(instance, f);
+	return -EINVAL;
+}
+EXPORT_SYMBOL(msm_vidc_g_fmt);
+
+int msm_vidc_s_ctrl(void *instance, struct v4l2_control *control)
+{
+	struct msm_vidc_inst *inst = instance;
+
+	if (!inst || !control)
+		return -EINVAL;
+
+	return msm_comm_s_ctrl(instance, control);
+}
+EXPORT_SYMBOL(msm_vidc_s_ctrl);
+
+int msm_vidc_g_ctrl(void *instance, struct v4l2_control *control)
+{
+	struct msm_vidc_inst *inst = instance;
+
+	if (!inst || !control)
+		return -EINVAL;
+
+	return msm_comm_g_ctrl(instance, control);
+}
+EXPORT_SYMBOL(msm_vidc_g_ctrl);
+
+int msm_vidc_s_ext_ctrl(void *instance, struct v4l2_ext_controls *control)
+{
+	struct msm_vidc_inst *inst = instance;
+	if (!inst || !control)
+		return -EINVAL;
+
+	if (inst->session_type == MSM_VIDC_DECODER)
+		return msm_vdec_s_ext_ctrl(inst, control);
+	if (inst->session_type == MSM_VIDC_ENCODER)
+		return msm_venc_s_ext_ctrl(instance, control);
+	return -EINVAL;
+}
+EXPORT_SYMBOL(msm_vidc_s_ext_ctrl);
+
+int msm_vidc_reqbufs(void *instance, struct v4l2_requestbuffers *b)
+{
+	struct msm_vidc_inst *inst = instance;
+
+	if (!inst || !b)
+		return -EINVAL;
+
+	if (inst->session_type == MSM_VIDC_DECODER)
+		return msm_vdec_reqbufs(inst, b);
+	if (inst->session_type == MSM_VIDC_ENCODER)
+		return msm_venc_reqbufs(instance, b);
+	return -EINVAL;
+}
+EXPORT_SYMBOL(msm_vidc_reqbufs);
+
+static bool ion_hndl_matches(struct msm_vidc_inst *inst,
+			     struct msm_smem *handle, int fd)
+{
+	if (!handle)
+		return false;
+	else
+		return msm_smem_compare_buffers(inst->mem_client, fd,
+						handle->smem_priv);
+}
+
+struct buffer_info *get_registered_buf(struct msm_vidc_inst *inst,
+		struct v4l2_buffer *b, int idx, int *plane)
+{
+	struct buffer_info *temp;
+	struct buffer_info *ret = NULL;
+	int i;
+	int fd = b->m.planes[idx].reserved[0];
+	u32 buff_off = b->m.planes[idx].reserved[1];
+	u32 size = b->m.planes[idx].length;
+	ion_phys_addr_t device_addr = b->m.planes[idx].m.userptr;
+
+	if (fd < 0 || !plane) {
+		dprintk(VIDC_ERR, "Invalid input\n");
+		goto err_invalid_input;
+	}
+
+	WARN(!mutex_is_locked(&inst->registeredbufs.lock),
+		"Registered buf lock is not acquired for %s", __func__);
+
+	*plane = 0;
+	list_for_each_entry(temp, &inst->registeredbufs.list, list) {
+		if (b->type != temp->type)
+			continue;
+
+		for (i = 0; i < min(temp->num_planes, VIDEO_MAX_PLANES); i++) {
+			bool off_size_matches;
+
+			if (temp->inactive)
+				continue;
+
+			off_size_matches = buff_off == temp->buff_off[i] &&
+				size == temp->size[i];
+
+			if (!off_size_matches)
+				continue;
+
+			if (device_addr != temp->device_addr[i] &&
+			    !ion_hndl_matches(inst, temp->handle[i], fd))
+				continue;
+
+			dprintk(VIDC_DBG, "This memory region is already mapped\n");
+			ret = temp;
+			*plane = i;
+			break;
+		}
+
+		if (ret)
+			break;
+	}
+
+err_invalid_input:
+	return ret;
+}
+
+static struct msm_smem *get_same_fd_buffer(struct msm_vidc_inst *inst, int fd)
+{
+	struct buffer_info *temp;
+	struct msm_smem *same_fd_handle = NULL;
+
+	int i;
+
+	if (!fd)
+		return NULL;
+
+	if (!inst || fd < 0) {
+		dprintk(VIDC_ERR, "%s: Invalid input\n", __func__);
+		goto err_invalid_input;
+	}
+
+	mutex_lock(&inst->registeredbufs.lock);
+	list_for_each_entry(temp, &inst->registeredbufs.list, list) {
+		for (i = 0; i < min(temp->num_planes, VIDEO_MAX_PLANES); i++) {
+			if (temp->mapped[i] &&
+			    ion_hndl_matches(inst, temp->handle[i], fd)) {
+				temp->same_fd_ref[i]++;
+				dprintk(VIDC_INFO,
+				"Found same fd buffer\n");
+				same_fd_handle = temp->handle[i];
+				break;
+			}
+		}
+		if (same_fd_handle)
+			break;
+	}
+	mutex_unlock(&inst->registeredbufs.lock);
+
+err_invalid_input:
+	return same_fd_handle;
+}
+
+struct buffer_info *device_to_uvaddr(struct msm_vidc_list *buf_list,
+				ion_phys_addr_t device_addr)
+{
+	struct buffer_info *temp = NULL;
+	bool found = false;
+	int i;
+
+	if (!buf_list || !device_addr) {
+		dprintk(VIDC_ERR,
+			"Invalid input- device_addr: %pa buf_list: %pK\n",
+			&device_addr, buf_list);
+		goto err_invalid_input;
+	}
+
+	mutex_lock(&buf_list->lock);
+	list_for_each_entry(temp, &buf_list->list, list) {
+		for (i = 0; i < min(temp->num_planes, VIDEO_MAX_PLANES); i++) {
+			if (!temp->inactive &&
+				temp->device_addr[i] == device_addr)  {
+				dprintk(VIDC_INFO,
+				"Found same fd buffer\n");
+				found = true;
+				break;
+			}
+		}
+
+		if (found)
+			break;
+	}
+	mutex_unlock(&buf_list->lock);
+
+err_invalid_input:
+	return temp;
+}
+
+static inline void populate_buf_info(struct buffer_info *binfo,
+			struct v4l2_buffer *b, u32 i)
+{
+	if (i >= VIDEO_MAX_PLANES) {
+		dprintk(VIDC_ERR, "%s: Invalid input\n", __func__);
+		return;
+	}
+	binfo->type = b->type;
+	binfo->fd[i] = b->m.planes[i].reserved[0];
+	binfo->buff_off[i] = b->m.planes[i].reserved[1];
+	binfo->size[i] = b->m.planes[i].length;
+	binfo->uvaddr[i] = b->m.planes[i].m.userptr;
+	binfo->num_planes = b->length;
+	binfo->memory = b->memory;
+	binfo->v4l2_index = b->index;
+	binfo->timestamp.tv_sec = b->timestamp.tv_sec;
+	binfo->timestamp.tv_usec = b->timestamp.tv_usec;
+	dprintk(VIDC_DBG, "%s: fd[%d] = %d b->index = %d",
+			__func__, i, binfo->fd[i], b->index);
+}
+
+static inline void repopulate_v4l2_buffer(struct v4l2_buffer *b,
+					struct buffer_info *binfo)
+{
+	int i = 0;
+	b->type = binfo->type;
+	b->length = binfo->num_planes;
+	b->memory = binfo->memory;
+	b->index = binfo->v4l2_index;
+	b->timestamp.tv_sec = binfo->timestamp.tv_sec;
+	b->timestamp.tv_usec = binfo->timestamp.tv_usec;
+	binfo->dequeued = false;
+	for (i = 0; i < binfo->num_planes; ++i) {
+		b->m.planes[i].reserved[0] = binfo->fd[i];
+		b->m.planes[i].reserved[1] = binfo->buff_off[i];
+		b->m.planes[i].length = binfo->size[i];
+		b->m.planes[i].m.userptr = binfo->device_addr[i];
+		dprintk(VIDC_DBG, "%s %d %d %d %pa\n", __func__, binfo->fd[i],
+				binfo->buff_off[i], binfo->size[i],
+				&binfo->device_addr[i]);
+	}
+}
+
+static struct msm_smem *map_buffer(struct msm_vidc_inst *inst,
+		struct v4l2_plane *p, enum hal_buffer buffer_type)
+{
+	struct msm_smem *handle = NULL;
+	handle = msm_comm_smem_user_to_kernel(inst,
+				p->reserved[0],
+				p->length,
+				buffer_type);
+	if (!handle) {
+		dprintk(VIDC_ERR,
+			"%s: Failed to get device buffer address\n", __func__);
+		return NULL;
+	}
+	return handle;
+}
+
+static inline enum hal_buffer get_hal_buffer_type(
+		struct msm_vidc_inst *inst, struct v4l2_buffer *b)
+{
+	if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+		return HAL_BUFFER_INPUT;
+	else if (b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+		return HAL_BUFFER_OUTPUT;
+	else
+		return -EINVAL;
+}
+
+static inline bool is_dynamic_output_buffer_mode(struct v4l2_buffer *b,
+				struct msm_vidc_inst *inst)
+{
+	return b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+		inst->buffer_mode_set[CAPTURE_PORT] == HAL_BUFFER_MODE_DYNAMIC;
+}
+
+static inline void save_v4l2_buffer(struct v4l2_buffer *b,
+						struct buffer_info *binfo)
+{
+	int i = 0;
+	for (i = 0; i < b->length; ++i) {
+		if (EXTRADATA_IDX(b->length) &&
+			(i == EXTRADATA_IDX(b->length)) &&
+			!b->m.planes[i].length) {
+			continue;
+		}
+		populate_buf_info(binfo, b, i);
+	}
+
+	if (EXTRADATA_IDX(b->length)) {
+		i = EXTRADATA_IDX(b->length);
+		if (b->m.planes[i].length)
+			binfo->device_addr[i] = binfo->handle[i]->device_addr +
+				binfo->buff_off[i];
+	}
+}
+
+int map_and_register_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b)
+{
+	struct buffer_info *binfo = NULL;
+	struct buffer_info *temp = NULL, *iterator = NULL;
+	int plane = 0;
+	int i = 0, rc = 0;
+	struct msm_smem *same_fd_handle = NULL;
+
+	if (!b || !inst) {
+		dprintk(VIDC_ERR, "%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
+	if (!binfo) {
+		dprintk(VIDC_ERR, "Out of memory\n");
+		rc = -ENOMEM;
+		goto exit;
+	}
+	if (b->length > VIDEO_MAX_PLANES) {
+		dprintk(VIDC_ERR, "Num planes exceeds max: %d, %d\n",
+			b->length, VIDEO_MAX_PLANES);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	dprintk(VIDC_DBG,
+		"[MAP] Create binfo = %pK fd = %d size = %d type = %d\n",
+		binfo, b->m.planes[0].reserved[0],
+		b->m.planes[0].length, b->type);
+
+	for (i = 0; i < b->length; ++i) {
+		rc = 0;
+		if (EXTRADATA_IDX(b->length) &&
+			(i == EXTRADATA_IDX(b->length)) &&
+			!b->m.planes[i].length) {
+			continue;
+		}
+		mutex_lock(&inst->registeredbufs.lock);
+		temp = get_registered_buf(inst, b, i, &plane);
+		if (temp && !is_dynamic_output_buffer_mode(b, inst)) {
+			dprintk(VIDC_DBG,
+				"This memory region has already been prepared\n");
+			rc = 0;
+			mutex_unlock(&inst->registeredbufs.lock);
+			goto exit;
+		}
+
+		if (temp && is_dynamic_output_buffer_mode(b, inst) && !i) {
+			/*
+			* Buffer is already present in registered list
+			* increment ref_count, populate new values of v4l2
+			* buffer in existing buffer_info struct.
+			*
+			* We will use the saved buffer info and queue it when
+			* we receive RELEASE_BUFFER_REFERENCE EVENT from f/w.
+			*/
+			dprintk(VIDC_DBG, "[MAP] Buffer already prepared\n");
+			temp->inactive = false;
+			list_for_each_entry(iterator,
+				&inst->registeredbufs.list, list) {
+				if (iterator == temp) {
+					rc = buf_ref_get(inst, temp);
+					save_v4l2_buffer(b, temp);
+					break;
+				}
+			}
+		}
+		mutex_unlock(&inst->registeredbufs.lock);
+		/*
+		 * rc == 1,
+		 * buffer is mapped, fw has released all reference, so skip
+		 * mapping and queue it immediately.
+		 *
+		 * rc == 2,
+		 * buffer is mapped and fw is holding a reference, hold it in
+		 * the driver and queue it later when fw has released
+		 */
+		if (rc == 1) {
+			rc = 0;
+			goto exit;
+		} else if (rc == 2) {
+			rc = -EEXIST;
+			goto exit;
+		}
+
+		same_fd_handle = get_same_fd_buffer(
+				inst, b->m.planes[i].reserved[0]);
+
+		populate_buf_info(binfo, b, i);
+		if (same_fd_handle) {
+			binfo->device_addr[i] =
+			same_fd_handle->device_addr + binfo->buff_off[i];
+			b->m.planes[i].m.userptr = binfo->device_addr[i];
+			binfo->mapped[i] = false;
+			binfo->handle[i] = same_fd_handle;
+		} else {
+			binfo->handle[i] = map_buffer(inst, &b->m.planes[i],
+					get_hal_buffer_type(inst, b));
+			if (!binfo->handle[i]) {
+				rc = -EINVAL;
+				goto exit;
+			}
+
+			binfo->mapped[i] = true;
+			binfo->device_addr[i] = binfo->handle[i]->device_addr +
+				binfo->buff_off[i];
+			b->m.planes[i].m.userptr = binfo->device_addr[i];
+		}
+
+		/* We maintain one ref count for all planes*/
+		if (!i && is_dynamic_output_buffer_mode(b, inst)) {
+			rc = buf_ref_get(inst, binfo);
+			if (rc < 0)
+				goto exit;
+		}
+		dprintk(VIDC_DBG,
+			"%s: [MAP] binfo = %pK, handle[%d] = %pK, device_addr = %pa, fd = %d, offset = %d, mapped = %d\n",
+			__func__, binfo, i, binfo->handle[i],
+			&binfo->device_addr[i], binfo->fd[i],
+			binfo->buff_off[i], binfo->mapped[i]);
+	}
+
+	mutex_lock(&inst->registeredbufs.lock);
+	list_add_tail(&binfo->list, &inst->registeredbufs.list);
+	mutex_unlock(&inst->registeredbufs.lock);
+	return 0;
+
+exit:
+	kfree(binfo);
+	return rc;
+}
+
+int unmap_and_deregister_buf(struct msm_vidc_inst *inst,
+			struct buffer_info *binfo)
+{
+	int i = 0;
+	struct buffer_info *temp = NULL;
+	bool found = false, keep_node = false;
+
+	if (!inst || !binfo) {
+		dprintk(VIDC_ERR, "%s invalid param: %pK %pK\n",
+			__func__, inst, binfo);
+		return -EINVAL;
+	}
+
+	WARN(!mutex_is_locked(&inst->registeredbufs.lock),
+		"Registered buf lock is not acquired for %s", __func__);
+
+	/*
+	* Make sure the buffer to be unmapped and deleted
+	* from the registered list is present in the list.
+	*/
+	list_for_each_entry(temp, &inst->registeredbufs.list, list) {
+		if (temp == binfo) {
+			found = true;
+			break;
+		}
+	}
+
+	/*
+	* Free the buffer info only if
+	* - buffer info has not been deleted from registered list
+	* - vidc client has called dqbuf on the buffer
+	* - no references are held on the buffer
+	*/
+	if (!found || !temp || !temp->pending_deletion || !temp->dequeued)
+		goto exit;
+
+	for (i = 0; i < temp->num_planes; i++) {
+		dprintk(VIDC_DBG,
+			"%s: [UNMAP] binfo = %pK, handle[%d] = %pK, device_addr = %pa, fd = %d, offset = %d, mapped = %d\n",
+			__func__, temp, i, temp->handle[i],
+			&temp->device_addr[i], temp->fd[i],
+			temp->buff_off[i], temp->mapped[i]);
+		/*
+		* Unmap the handle only if the buffer has been mapped and no
+		* other buffer has a reference to this buffer.
+		* In case of buffers with same fd, we will map the buffer only
+		* once and subsequent buffers will refer to the mapped buffer's
+		* device address.
+		* For buffers which share the same fd, do not unmap and keep
+		* the buffer info in registered list.
+		*/
+		if (temp->handle[i] && temp->mapped[i] &&
+			!temp->same_fd_ref[i]) {
+			msm_comm_smem_free(inst,
+				temp->handle[i]);
+		}
+
+		if (temp->same_fd_ref[i])
+			keep_node = true;
+		else {
+			temp->fd[i] = 0;
+			temp->handle[i] = 0;
+			temp->device_addr[i] = 0;
+			temp->uvaddr[i] = 0;
+		}
+	}
+	if (!keep_node) {
+		dprintk(VIDC_DBG, "[UNMAP] AND-FREED binfo: %pK\n", temp);
+		list_del(&temp->list);
+		kfree(temp);
+	} else {
+		temp->inactive = true;
+		dprintk(VIDC_DBG, "[UNMAP] NOT-FREED binfo: %pK\n", temp);
+	}
+exit:
+	return 0;
+}
+
+int qbuf_dynamic_buf(struct msm_vidc_inst *inst,
+			struct buffer_info *binfo)
+{
+	struct v4l2_buffer b = {0};
+	struct v4l2_plane plane[VIDEO_MAX_PLANES] = { {0} };
+
+	if (!binfo) {
+		dprintk(VIDC_ERR, "%s invalid param: %pK\n", __func__, binfo);
+		return -EINVAL;
+	}
+	dprintk(VIDC_DBG, "%s fd[0] = %d\n", __func__, binfo->fd[0]);
+
+	b.m.planes = plane;
+	repopulate_v4l2_buffer(&b, binfo);
+
+	if (inst->session_type == MSM_VIDC_DECODER)
+		return msm_vdec_qbuf(inst, &b);
+	if (inst->session_type == MSM_VIDC_ENCODER)
+		return msm_venc_qbuf(inst, &b);
+
+	return -EINVAL;
+}
+
+int output_buffer_cache_invalidate(struct msm_vidc_inst *inst,
+			struct buffer_info *binfo, struct v4l2_buffer *b)
+{
+	int i = 0;
+	int rc = 0;
+	int size = -1;
+
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s: invalid inst: %pK\n", __func__, inst);
+		return -EINVAL;
+	}
+
+	if (!binfo) {
+		dprintk(VIDC_ERR, "%s: invalid buffer info: %pK\n",
+			__func__, inst);
+		return -EINVAL;
+	}
+
+	if (b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		for (i = 0; i < binfo->num_planes; i++) {
+			if (binfo->handle[i]) {
+				struct msm_smem smem = *binfo->handle[i];
+
+				if (inst->session_type == MSM_VIDC_ENCODER &&
+					!i)
+					size = b->m.planes[i].bytesused +
+						b->m.planes[i].data_offset;
+				else
+					size = -1;
+
+				smem.offset =
+					(unsigned int)(binfo->buff_off[i]);
+				smem.size   = binfo->size[i];
+				rc = msm_comm_smem_cache_operations(inst,
+					&smem, SMEM_CACHE_INVALIDATE,
+					size);
+				if (rc) {
+					dprintk(VIDC_ERR,
+						"%s: Failed to clean caches: %d\n",
+						__func__, rc);
+					return -EINVAL;
+				}
+			} else
+				dprintk(VIDC_DBG,
+					"%s: NULL handle for plane %d\n",
+					__func__, i);
+		}
+	}
+	return 0;
+}
+
+static bool valid_v4l2_buffer(struct v4l2_buffer *b,
+		struct msm_vidc_inst *inst) {
+	enum vidc_ports port =
+		!V4L2_TYPE_IS_MULTIPLANAR(b->type) ? MAX_PORT_NUM :
+		b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ? CAPTURE_PORT :
+		b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ? OUTPUT_PORT :
+								MAX_PORT_NUM;
+
+	return port != MAX_PORT_NUM &&
+		inst->prop.num_planes[port] == b->length;
+}
+
+int msm_vidc_prepare_buf(void *instance, struct v4l2_buffer *b)
+{
+	struct msm_vidc_inst *inst = instance;
+
+	if (!inst || !inst->core || !b || !valid_v4l2_buffer(b, inst))
+		return -EINVAL;
+
+	if (inst->state == MSM_VIDC_CORE_INVALID ||
+		inst->core->state == VIDC_CORE_INVALID)
+		return -EINVAL;
+
+	if (is_dynamic_output_buffer_mode(b, inst))
+		return 0;
+
+	if (map_and_register_buf(inst, b))
+		return -EINVAL;
+
+	if (inst->session_type == MSM_VIDC_DECODER)
+		return msm_vdec_prepare_buf(inst, b);
+	if (inst->session_type == MSM_VIDC_ENCODER)
+		return msm_venc_prepare_buf(instance, b);
+	return -EINVAL;
+}
+EXPORT_SYMBOL(msm_vidc_prepare_buf);
+
+int msm_vidc_release_buffers(void *instance, int buffer_type)
+{
+	struct msm_vidc_inst *inst = instance;
+	struct buffer_info *bi, *dummy;
+	struct v4l2_buffer buffer_info;
+	struct v4l2_plane plane[VIDEO_MAX_PLANES];
+	struct vb2_buf_entry *temp, *next;
+	int i, rc = 0;
+
+	if (!inst)
+		return -EINVAL;
+
+	if (!inst->in_reconfig &&
+		inst->state > MSM_VIDC_LOAD_RESOURCES &&
+		inst->state < MSM_VIDC_RELEASE_RESOURCES_DONE) {
+		rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE);
+		if (rc) {
+			dprintk(VIDC_ERR,
+					"Failed to move inst: %pK to release res done\n",
+					inst);
+		}
+	}
+
+	/*
+	* In dynamic buffer mode, driver needs to release resources,
+	* but not call release buffers on firmware, as the buffers
+	* were never registered with firmware.
+	*/
+	if (buffer_type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+		inst->buffer_mode_set[CAPTURE_PORT] ==
+				HAL_BUFFER_MODE_DYNAMIC) {
+		goto free_and_unmap;
+	}
+
+	mutex_lock(&inst->registeredbufs.lock);
+	list_for_each_entry(bi, &inst->registeredbufs.list, list) {
+		bool release_buf = false;
+
+		if (bi->type == buffer_type) {
+			buffer_info.type = bi->type;
+			for (i = 0; i < min(bi->num_planes, VIDEO_MAX_PLANES);
+						i++) {
+				plane[i].reserved[0] = bi->fd[i];
+				plane[i].reserved[1] = bi->buff_off[i];
+				plane[i].length = bi->size[i];
+				plane[i].m.userptr = bi->device_addr[i];
+				buffer_info.m.planes = plane;
+				dprintk(VIDC_DBG,
+					"Releasing buffer: %d, %d, %d\n",
+					buffer_info.m.planes[i].reserved[0],
+					buffer_info.m.planes[i].reserved[1],
+					buffer_info.m.planes[i].length);
+			}
+			buffer_info.length = bi->num_planes;
+			release_buf = true;
+		}
+
+		if (!release_buf)
+			continue;
+		if (inst->session_type == MSM_VIDC_DECODER)
+			rc = msm_vdec_release_buf(inst,	&buffer_info);
+		if (inst->session_type == MSM_VIDC_ENCODER)
+			rc = msm_venc_release_buf(instance,
+				&buffer_info);
+		if (rc)
+			dprintk(VIDC_ERR,
+				"Failed Release buffer: %d, %d, %d\n",
+				buffer_info.m.planes[0].reserved[0],
+				buffer_info.m.planes[0].reserved[1],
+				buffer_info.m.planes[0].length);
+	}
+	mutex_unlock(&inst->registeredbufs.lock);
+
+free_and_unmap:
+	mutex_lock(&inst->registeredbufs.lock);
+	list_for_each_entry_safe(bi, dummy, &inst->registeredbufs.list, list) {
+		if (bi->type == buffer_type) {
+			list_del(&bi->list);
+			for (i = 0; i < bi->num_planes; i++) {
+				if (bi->handle[i] && bi->mapped[i]) {
+					dprintk(VIDC_DBG,
+						"%s: [UNMAP] binfo = %pK, handle[%d] = %pK, device_addr = %pa, fd = %d, offset = %d, mapped = %d\n",
+						__func__, bi, i, bi->handle[i],
+						&bi->device_addr[i], bi->fd[i],
+						bi->buff_off[i], bi->mapped[i]);
+					msm_comm_smem_free(inst,
+							bi->handle[i]);
+				}
+			}
+			kfree(bi);
+		}
+	}
+	mutex_unlock(&inst->registeredbufs.lock);
+
+	mutex_lock(&inst->pendingq.lock);
+	list_for_each_entry_safe(temp, next, &inst->pendingq.list, list) {
+		if (temp->vb->type == buffer_type) {
+			list_del(&temp->list);
+			kfree(temp);
+		}
+	}
+	mutex_unlock(&inst->pendingq.lock);
+	return rc;
+}
+EXPORT_SYMBOL(msm_vidc_release_buffers);
+
+int msm_vidc_qbuf(void *instance, struct v4l2_buffer *b)
+{
+	struct msm_vidc_inst *inst = instance;
+	struct buffer_info *binfo;
+	int plane = 0;
+	int rc = 0;
+	int i;
+	int size = -1;
+
+	if (!inst || !inst->core || !b || !valid_v4l2_buffer(b, inst))
+		return -EINVAL;
+
+	if (inst->state == MSM_VIDC_CORE_INVALID ||
+		inst->core->state == VIDC_CORE_INVALID)
+		return -EINVAL;
+
+	rc = map_and_register_buf(inst, b);
+	if (rc == -EEXIST) {
+		if (atomic_read(&inst->in_flush) &&
+			is_dynamic_output_buffer_mode(b, inst)) {
+			dprintk(VIDC_DBG,
+				"Flush in progress, do not hold any buffers in driver\n");
+			msm_comm_flush_dynamic_buffers(inst);
+		}
+		return 0;
+	}
+	if (rc)
+		return rc;
+
+	for (i = 0; i < b->length; ++i) {
+		if (EXTRADATA_IDX(b->length) &&
+			(i == EXTRADATA_IDX(b->length)) &&
+			!b->m.planes[i].length) {
+			b->m.planes[i].m.userptr = 0;
+			continue;
+		}
+		mutex_lock(&inst->registeredbufs.lock);
+		binfo = get_registered_buf(inst, b, i, &plane);
+		mutex_unlock(&inst->registeredbufs.lock);
+		if (!binfo) {
+			dprintk(VIDC_ERR,
+				"This buffer is not registered: %d, %d, %d\n",
+				b->m.planes[i].reserved[0],
+				b->m.planes[i].reserved[1],
+				b->m.planes[i].length);
+			goto err_invalid_buff;
+		}
+		b->m.planes[i].m.userptr = binfo->device_addr[i];
+		dprintk(VIDC_DBG, "Queueing device address = %pa\n",
+				&binfo->device_addr[i]);
+
+		if (inst->fmts[OUTPUT_PORT].fourcc ==
+			V4L2_PIX_FMT_HEVC_HYBRID && binfo->handle[i] &&
+			b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+			rc = msm_comm_smem_cache_operations(inst,
+				binfo->handle[i], SMEM_CACHE_INVALIDATE, -1);
+			if (rc) {
+				dprintk(VIDC_ERR,
+					"Failed to inv caches: %d\n", rc);
+				goto err_invalid_buff;
+			}
+		}
+
+		if (binfo->handle[i] &&
+			(b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)) {
+			if (inst->session_type == MSM_VIDC_DECODER && !i)
+				size = b->m.planes[i].bytesused +
+						b->m.planes[i].data_offset;
+			else
+				size = -1;
+			rc = msm_comm_smem_cache_operations(inst,
+					binfo->handle[i], SMEM_CACHE_CLEAN,
+					size);
+			if (rc) {
+				dprintk(VIDC_ERR,
+					"Failed to clean caches: %d\n", rc);
+				goto err_invalid_buff;
+			}
+		}
+	}
+
+	if (inst->session_type == MSM_VIDC_DECODER)
+		return msm_vdec_qbuf(inst, b);
+	if (inst->session_type == MSM_VIDC_ENCODER)
+		return msm_venc_qbuf(instance, b);
+
+err_invalid_buff:
+	return -EINVAL;
+}
+EXPORT_SYMBOL(msm_vidc_qbuf);
+
+int msm_vidc_dqbuf(void *instance, struct v4l2_buffer *b)
+{
+	struct msm_vidc_inst *inst = instance;
+	struct buffer_info *buffer_info = NULL;
+	int i = 0, rc = 0;
+
+	if (!inst || !b || !valid_v4l2_buffer(b, inst))
+		return -EINVAL;
+
+	if (inst->session_type == MSM_VIDC_DECODER)
+		rc = msm_vdec_dqbuf(instance, b);
+	if (inst->session_type == MSM_VIDC_ENCODER)
+		rc = msm_venc_dqbuf(instance, b);
+
+	if (rc)
+		return rc;
+
+	for (i = 0; i < b->length; i++) {
+		if (EXTRADATA_IDX(b->length) &&
+			i == EXTRADATA_IDX(b->length)) {
+			continue;
+		}
+		buffer_info = device_to_uvaddr(&inst->registeredbufs,
+			b->m.planes[i].m.userptr);
+
+		if (!buffer_info) {
+			dprintk(VIDC_ERR,
+				"%s no buffer info registered for buffer addr: %#lx\n",
+				__func__, b->m.planes[i].m.userptr);
+			return -EINVAL;
+		}
+
+		b->m.planes[i].m.userptr = buffer_info->uvaddr[i];
+		b->m.planes[i].reserved[0] = buffer_info->fd[i];
+		b->m.planes[i].reserved[1] = buffer_info->buff_off[i];
+		if (!(inst->flags & VIDC_SECURE) && !b->m.planes[i].m.userptr) {
+			dprintk(VIDC_ERR,
+			"%s: Failed to find user virtual address, %#lx, %d, %d\n",
+			__func__, b->m.planes[i].m.userptr, b->type, i);
+			return -EINVAL;
+		}
+	}
+
+	if (!buffer_info) {
+		dprintk(VIDC_ERR,
+			"%s: error - no buffer info found in registered list\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	rc = output_buffer_cache_invalidate(inst, buffer_info, b);
+	if (rc)
+		return rc;
+
+
+	if (is_dynamic_output_buffer_mode(b, inst)) {
+		buffer_info->dequeued = true;
+
+		dprintk(VIDC_DBG, "[DEQUEUED]: fd[0] = %d\n",
+			buffer_info->fd[0]);
+		mutex_lock(&inst->registeredbufs.lock);
+		rc = unmap_and_deregister_buf(inst, buffer_info);
+		mutex_unlock(&inst->registeredbufs.lock);
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(msm_vidc_dqbuf);
+
+int msm_vidc_streamon(void *instance, enum v4l2_buf_type i)
+{
+	struct msm_vidc_inst *inst = instance;
+
+	if (!inst)
+		return -EINVAL;
+
+	if (inst->session_type == MSM_VIDC_DECODER)
+		return msm_vdec_streamon(inst, i);
+	if (inst->session_type == MSM_VIDC_ENCODER)
+		return msm_venc_streamon(instance, i);
+	return -EINVAL;
+}
+EXPORT_SYMBOL(msm_vidc_streamon);
+
+int msm_vidc_streamoff(void *instance, enum v4l2_buf_type i)
+{
+	struct msm_vidc_inst *inst = instance;
+
+	if (!inst)
+		return -EINVAL;
+
+	if (inst->session_type == MSM_VIDC_DECODER)
+		return msm_vdec_streamoff(inst, i);
+	if (inst->session_type == MSM_VIDC_ENCODER)
+		return msm_venc_streamoff(instance, i);
+	return -EINVAL;
+}
+EXPORT_SYMBOL(msm_vidc_streamoff);
+
+int msm_vidc_enum_framesizes(void *instance, struct v4l2_frmsizeenum *fsize)
+{
+	struct msm_vidc_inst *inst = instance;
+	struct msm_vidc_capability *capability = NULL;
+	enum hal_video_codec codec;
+	int i;
+
+	if (!inst || !fsize) {
+		dprintk(VIDC_ERR, "%s: invalid parameter: %pK %pK\n",
+				__func__, inst, fsize);
+		return -EINVAL;
+	}
+	if (!inst->core)
+		return -EINVAL;
+	if (fsize->index != 0)
+		return -EINVAL;
+
+	codec = get_hal_codec(fsize->pixel_format);
+	if (codec == HAL_UNUSED_CODEC)
+		return -EINVAL;
+
+	for (i = 0; i < VIDC_MAX_SESSIONS; i++) {
+		if (inst->core->capabilities[i].codec == codec) {
+			capability = &inst->core->capabilities[i];
+			break;
+		}
+	}
+
+	if (capability) {
+		fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+		fsize->stepwise.min_width = capability->width.min;
+		fsize->stepwise.max_width = capability->width.max;
+		fsize->stepwise.step_width = capability->width.step_size;
+		fsize->stepwise.min_height = capability->height.min;
+		fsize->stepwise.max_height = capability->height.max;
+		fsize->stepwise.step_height = capability->height.step_size;
+	} else {
+		dprintk(VIDC_ERR, "%s: Invalid Pixel Fmt %#x\n",
+				__func__, fsize->pixel_format);
+		return -EINVAL;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(msm_vidc_enum_framesizes);
+
+static void *vidc_get_userptr(void *alloc_ctx, unsigned long vaddr,
+				unsigned long size, enum dma_data_direction dma_dir)
+{
+	return (void *)0xdeadbeef;
+}
+
+static void vidc_put_userptr(void *buf_priv)
+{
+}
+
+static const struct vb2_mem_ops msm_vidc_vb2_mem_ops = {
+	.get_userptr = vidc_get_userptr,
+	.put_userptr = vidc_put_userptr,
+};
+
+static inline int vb2_bufq_init(struct msm_vidc_inst *inst,
+		enum v4l2_buf_type type, enum session_type sess)
+{
+	struct vb2_queue *q = NULL;
+	if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		q = &inst->bufq[CAPTURE_PORT].vb2_bufq;
+	} else if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		q = &inst->bufq[OUTPUT_PORT].vb2_bufq;
+	} else {
+		dprintk(VIDC_ERR, "buf_type = %d not recognised\n", type);
+		return -EINVAL;
+	}
+
+	q->type = type;
+	q->io_modes = VB2_MMAP | VB2_USERPTR;
+	q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+
+	if (sess == MSM_VIDC_DECODER)
+		q->ops = msm_vdec_get_vb2q_ops();
+	else if (sess == MSM_VIDC_ENCODER)
+		q->ops = msm_venc_get_vb2q_ops();
+	q->mem_ops = &msm_vidc_vb2_mem_ops;
+	q->drv_priv = inst;
+	q->allow_zero_bytesused = 1;
+	return vb2_queue_init(q);
+}
+
+static int setup_event_queue(void *inst,
+				struct video_device *pvdev)
+{
+	int rc = 0;
+	struct msm_vidc_inst *vidc_inst = (struct msm_vidc_inst *)inst;
+
+	v4l2_fh_init(&vidc_inst->event_handler, pvdev);
+	v4l2_fh_add(&vidc_inst->event_handler);
+
+	return rc;
+}
+
+int msm_vidc_subscribe_event(void *inst, const struct v4l2_event_subscription *sub)
+{
+	int rc = 0;
+	struct msm_vidc_inst *vidc_inst = (struct msm_vidc_inst *)inst;
+
+	if (!inst || !sub)
+		return -EINVAL;
+
+	rc = v4l2_event_subscribe(&vidc_inst->event_handler, sub, MAX_EVENTS, NULL);
+	return rc;
+}
+EXPORT_SYMBOL(msm_vidc_subscribe_event);
+
+int msm_vidc_unsubscribe_event(void *inst, const struct v4l2_event_subscription *sub)
+{
+	int rc = 0;
+	struct msm_vidc_inst *vidc_inst = (struct msm_vidc_inst *)inst;
+
+	if (!inst || !sub)
+		return -EINVAL;
+
+	rc = v4l2_event_unsubscribe(&vidc_inst->event_handler, sub);
+	return rc;
+}
+EXPORT_SYMBOL(msm_vidc_unsubscribe_event);
+
+int msm_vidc_dqevent(void *inst, struct v4l2_event *event)
+{
+	int rc = 0;
+	struct msm_vidc_inst *vidc_inst = (struct msm_vidc_inst *)inst;
+
+	if (!inst || !event)
+		return -EINVAL;
+
+	rc = v4l2_event_dequeue(&vidc_inst->event_handler, event, false);
+	return rc;
+}
+EXPORT_SYMBOL(msm_vidc_dqevent);
+
+static bool msm_vidc_check_for_inst_overload(struct msm_vidc_core *core)
+{
+	u32 instance_count = 0;
+	u32 secure_instance_count = 0;
+	struct msm_vidc_inst *inst = NULL;
+	bool overload = false;
+
+	mutex_lock(&core->lock);
+	list_for_each_entry(inst, &core->instances, list) {
+		instance_count++;
+		/* This flag is not updated yet for the current instance */
+		if (inst->flags & VIDC_SECURE)
+			secure_instance_count++;
+	}
+	mutex_unlock(&core->lock);
+
+	/* Instance count includes current instance as well. */
+
+	if ((instance_count > core->resources.max_inst_count) ||
+		(secure_instance_count > core->resources.max_secure_inst_count))
+		overload = true;
+	return overload;
+}
+
+void *msm_vidc_open(int core_id, int session_type)
+{
+	struct msm_vidc_inst *inst = NULL;
+	struct msm_vidc_core *core = NULL;
+	int rc = 0;
+	int i = 0;
+	if (core_id >= MSM_VIDC_CORES_MAX ||
+			session_type >= MSM_VIDC_MAX_DEVICES) {
+		dprintk(VIDC_ERR, "Invalid input, core_id = %d, session = %d\n",
+			core_id, session_type);
+		goto err_invalid_core;
+	}
+	core = get_vidc_core(core_id);
+	if (!core) {
+		dprintk(VIDC_ERR,
+			"Failed to find core for core_id = %d\n", core_id);
+		goto err_invalid_core;
+	}
+
+	inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+	if (!inst) {
+		dprintk(VIDC_ERR, "Failed to allocate memory\n");
+		rc = -ENOMEM;
+		goto err_invalid_core;
+	}
+
+	dprintk(VIDC_INFO, "Opening video instance: %pK, %d\n",
+		inst, session_type);
+
+	mutex_init(&inst->sync_lock);
+	mutex_init(&inst->bufq[CAPTURE_PORT].lock);
+	mutex_init(&inst->bufq[OUTPUT_PORT].lock);
+	mutex_init(&inst->lock);
+
+	INIT_MSM_VIDC_LIST(&inst->pendingq);
+	INIT_MSM_VIDC_LIST(&inst->scratchbufs);
+	INIT_MSM_VIDC_LIST(&inst->persistbufs);
+	INIT_MSM_VIDC_LIST(&inst->pending_getpropq);
+	INIT_MSM_VIDC_LIST(&inst->outputbufs);
+	INIT_MSM_VIDC_LIST(&inst->registeredbufs);
+
+	kref_init(&inst->kref);
+
+	inst->session_type = session_type;
+	inst->state = MSM_VIDC_CORE_UNINIT_DONE;
+	inst->core = core;
+	inst->bit_depth = MSM_VIDC_BIT_DEPTH_8;
+	inst->instant_bitrate = 0;
+	inst->pic_struct = MSM_VIDC_PIC_STRUCT_PROGRESSIVE;
+	inst->colour_space = MSM_VIDC_BT601_6_525;
+
+	for (i = SESSION_MSG_INDEX(SESSION_MSG_START);
+		i <= SESSION_MSG_INDEX(SESSION_MSG_END); i++) {
+		init_completion(&inst->completions[i]);
+	}
+	inst->mem_client = msm_smem_new_client(SMEM_ION,
+					&inst->core->resources, session_type);
+	if (!inst->mem_client) {
+		dprintk(VIDC_ERR, "Failed to create memory client\n");
+		goto fail_mem_client;
+	}
+	if (session_type == MSM_VIDC_DECODER) {
+		msm_vdec_inst_init(inst);
+		rc = msm_vdec_ctrl_init(inst);
+	} else if (session_type == MSM_VIDC_ENCODER) {
+		msm_venc_inst_init(inst);
+		rc = msm_venc_ctrl_init(inst);
+	}
+
+	if (rc)
+		goto fail_bufq_capture;
+
+	msm_dcvs_init(inst);
+	rc = vb2_bufq_init(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+			session_type);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Failed to initialize vb2 queue on capture port\n");
+		goto fail_bufq_capture;
+	}
+	rc = vb2_bufq_init(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+			session_type);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Failed to initialize vb2 queue on capture port\n");
+		goto fail_bufq_output;
+	}
+
+	setup_event_queue(inst, &core->vdev[session_type].vdev);
+
+	rc = msm_comm_try_state(inst, MSM_VIDC_CORE_INIT_DONE);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Failed to move video instance to init state\n");
+		goto fail_init;
+	}
+
+	if (msm_vidc_check_for_inst_overload(core)) {
+		dprintk(VIDC_ERR,
+			"Instance count reached Max limit, rejecting session");
+		goto fail_init;
+	}
+
+	mutex_lock(&core->lock);
+	list_add_tail(&inst->list, &core->instances);
+	mutex_unlock(&core->lock);
+
+	inst->debugfs_root =
+		msm_vidc_debugfs_init_inst(inst, core->debugfs_root);
+
+	return inst;
+fail_init:
+	v4l2_fh_del(&inst->event_handler);
+	v4l2_fh_exit(&inst->event_handler);
+	vb2_queue_release(&inst->bufq[OUTPUT_PORT].vb2_bufq);
+fail_bufq_output:
+	vb2_queue_release(&inst->bufq[CAPTURE_PORT].vb2_bufq);
+fail_bufq_capture:
+	msm_comm_ctrl_deinit(inst);
+	msm_smem_delete_client(inst->mem_client);
+fail_mem_client:
+	mutex_destroy(&inst->sync_lock);
+	mutex_destroy(&inst->bufq[CAPTURE_PORT].lock);
+	mutex_destroy(&inst->bufq[OUTPUT_PORT].lock);
+	mutex_destroy(&inst->lock);
+
+	DEINIT_MSM_VIDC_LIST(&inst->pendingq);
+	DEINIT_MSM_VIDC_LIST(&inst->scratchbufs);
+	DEINIT_MSM_VIDC_LIST(&inst->persistbufs);
+	DEINIT_MSM_VIDC_LIST(&inst->pending_getpropq);
+	DEINIT_MSM_VIDC_LIST(&inst->outputbufs);
+	DEINIT_MSM_VIDC_LIST(&inst->registeredbufs);
+
+	kfree(inst);
+	inst = NULL;
+err_invalid_core:
+	return inst;
+}
+EXPORT_SYMBOL(msm_vidc_open);
+
+static void cleanup_instance(struct msm_vidc_inst *inst)
+{
+	struct vb2_buf_entry *entry, *dummy;
+	if (inst) {
+
+		mutex_lock(&inst->pendingq.lock);
+		list_for_each_entry_safe(entry, dummy, &inst->pendingq.list,
+				list) {
+			list_del(&entry->list);
+			kfree(entry);
+		}
+		mutex_unlock(&inst->pendingq.lock);
+
+		if (msm_comm_release_scratch_buffers(inst, false)) {
+			dprintk(VIDC_ERR,
+				"Failed to release scratch buffers\n");
+		}
+
+		if (msm_comm_release_persist_buffers(inst)) {
+			dprintk(VIDC_ERR,
+				"Failed to release persist buffers\n");
+		}
+
+		if (msm_comm_release_output_buffers(inst)) {
+			dprintk(VIDC_ERR,
+				"Failed to release output buffers\n");
+		}
+
+		mutex_lock(&inst->pending_getpropq.lock);
+		if (!list_empty(&inst->pending_getpropq.list)) {
+			dprintk(VIDC_ERR,
+				"pending_getpropq not empty\n");
+			WARN_ON(VIDC_DBG_WARN_ENABLE);
+		}
+		mutex_unlock(&inst->pending_getpropq.lock);
+	}
+}
+
+int msm_vidc_destroy(struct msm_vidc_inst *inst)
+{
+	struct msm_vidc_core *core;
+
+	if (!inst || !inst->core)
+		return -EINVAL;
+
+	core = inst->core;
+
+	mutex_lock(&core->lock);
+	/* inst->list lives in core->instances */
+	list_del(&inst->list);
+	mutex_unlock(&core->lock);
+
+	msm_comm_ctrl_deinit(inst);
+
+	DEINIT_MSM_VIDC_LIST(&inst->pendingq);
+	DEINIT_MSM_VIDC_LIST(&inst->scratchbufs);
+	DEINIT_MSM_VIDC_LIST(&inst->persistbufs);
+	DEINIT_MSM_VIDC_LIST(&inst->pending_getpropq);
+	DEINIT_MSM_VIDC_LIST(&inst->outputbufs);
+	DEINIT_MSM_VIDC_LIST(&inst->registeredbufs);
+
+	v4l2_fh_del(&inst->event_handler);
+	v4l2_fh_exit(&inst->event_handler);
+
+	mutex_destroy(&inst->sync_lock);
+	mutex_destroy(&inst->bufq[CAPTURE_PORT].lock);
+	mutex_destroy(&inst->bufq[OUTPUT_PORT].lock);
+	mutex_destroy(&inst->lock);
+
+	msm_vidc_debugfs_deinit_inst(inst);
+
+	dprintk(VIDC_INFO, "Closed video instance: %pK\n", inst);
+
+	kfree(inst);
+	return 0;
+}
+
+int msm_vidc_close(void *instance)
+{
+	void close_helper(struct kref *kref)
+	{
+		struct msm_vidc_inst *inst = container_of(kref,
+				struct msm_vidc_inst, kref);
+
+		msm_vidc_destroy(inst);
+	}
+
+	struct msm_vidc_inst *inst = instance;
+	struct buffer_info *bi, *dummy;
+	int rc = 0, i = 0;
+
+	if (!inst || !inst->core)
+		return -EINVAL;
+
+
+	mutex_lock(&inst->registeredbufs.lock);
+	list_for_each_entry_safe(bi, dummy, &inst->registeredbufs.list, list) {
+		if (bi->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+			int i = 0;
+
+			list_del(&bi->list);
+
+			for (i = 0; i < min(bi->num_planes, VIDEO_MAX_PLANES);
+					i++) {
+				if (bi->handle[i] && bi->mapped[i])
+					msm_comm_smem_free(inst, bi->handle[i]);
+			}
+
+			kfree(bi);
+		}
+	}
+	mutex_unlock(&inst->registeredbufs.lock);
+
+	cleanup_instance(inst);
+	if (inst->state != MSM_VIDC_CORE_INVALID &&
+		inst->core->state != VIDC_CORE_INVALID)
+		rc = msm_comm_try_state(inst, MSM_VIDC_CORE_UNINIT);
+	else
+		rc = msm_comm_force_cleanup(inst);
+	if (rc)
+		dprintk(VIDC_ERR,
+			"Failed to move video instance to uninit state\n");
+
+	msm_comm_session_clean(inst);
+	msm_smem_delete_client(inst->mem_client);
+
+	for (i = 0; i < MAX_PORT_NUM; i++) {
+		mutex_lock(&inst->bufq[i].lock);
+		vb2_queue_release(&inst->bufq[i].vb2_bufq);
+		mutex_unlock(&inst->bufq[i].lock);
+	}
+
+	kref_put(&inst->kref, close_helper);
+	return 0;
+}
+EXPORT_SYMBOL(msm_vidc_close);
+
+int msm_vidc_suspend(int core_id)
+{
+	return msm_comm_suspend(core_id);
+}
+EXPORT_SYMBOL(msm_vidc_suspend);
+
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_vidc_common.c linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_vidc_common.c
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_vidc_common.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_vidc_common.c	2019-10-29 09:26:23.961206290 +0100
@@ -0,0 +1,5406 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <asm/div64.h>
+#include "msm_vidc_common.h"
+#include "vidc_hfi_api.h"
+#include "msm_vidc_debug.h"
+#include "msm_vidc_dcvs.h"
+#include "msm_vdec.h"
+
+#define IS_ALREADY_IN_STATE(__p, __d) ({\
+	int __rc = (__p >= __d);\
+	__rc; \
+})
+
+#define SUM_ARRAY(__arr, __start, __end) ({\
+		int __index;\
+		typeof((__arr)[0]) __sum = 0;\
+		for (__index = (__start); __index <= (__end); __index++) {\
+			if (__index >= 0 && __index < ARRAY_SIZE(__arr))\
+				__sum += __arr[__index];\
+		} \
+		__sum;\
+})
+
+#define V4L2_EVENT_SEQ_CHANGED_SUFFICIENT \
+		V4L2_EVENT_MSM_VIDC_PORT_SETTINGS_CHANGED_SUFFICIENT
+#define V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT \
+		V4L2_EVENT_MSM_VIDC_PORT_SETTINGS_CHANGED_INSUFFICIENT
+#define V4L2_EVENT_RELEASE_BUFFER_REFERENCE \
+		V4L2_EVENT_MSM_VIDC_RELEASE_BUFFER_REFERENCE
+
+#define MAX_SUPPORTED_INSTANCES 16
+
+const char *const mpeg_video_vidc_extradata[] = {
+	"Extradata none",
+	"Extradata MB Quantization",
+	"Extradata Interlace Video",
+	"Extradata VC1 Framedisp",
+	"Extradata VC1 Seqdisp",
+	"Extradata timestamp",
+	"Extradata S3D Frame Packing",
+	"Extradata Frame Rate",
+	"Extradata Panscan Window",
+	"Extradata Recovery point SEI",
+	"Extradata Multislice info",
+	"Extradata number of concealed MB",
+	"Extradata metadata filler",
+	"Extradata input crop",
+	"Extradata digital zoom",
+	"Extradata aspect ratio",
+	"Extradata mpeg2 seqdisp",
+	"Extradata stream userdata",
+	"Extradata frame QP",
+	"Extradata frame bits info",
+	"Extradata LTR",
+	"Extradata macroblock metadata",
+	"Extradata VQZip SEI",
+	"Extradata YUV Stats",
+	"Extradata ROI QP",
+	"Extradata output crop",
+	"Extradata display colour SEI",
+	"Extradata light level SEI",
+	"Extradata PQ Info",
+	"Extradata display VUI",
+	"Extradata vpx color space",
+};
+
+struct getprop_buf {
+	struct list_head list;
+	void *data;
+};
+
+static void msm_comm_generate_session_error(struct msm_vidc_inst *inst);
+static void msm_comm_generate_sys_error(struct msm_vidc_inst *inst);
+static void handle_session_error(enum hal_command_response cmd, void *data);
+static void msm_vidc_print_running_insts(struct msm_vidc_core *core);
+static void msm_comm_print_debug_info(struct msm_vidc_inst *inst);
+
+bool msm_comm_turbo_session(struct msm_vidc_inst *inst)
+{
+	return !!(inst->flags & VIDC_TURBO);
+}
+
+static inline bool is_thumbnail_session(struct msm_vidc_inst *inst)
+{
+	return !!(inst->flags & VIDC_THUMBNAIL);
+}
+
+static inline bool is_low_power_session(struct msm_vidc_inst *inst)
+{
+	return !!(inst->flags & VIDC_LOW_POWER);
+}
+
+static inline bool is_realtime_session(struct msm_vidc_inst *inst)
+{
+	return !!(inst->flags & VIDC_REALTIME);
+}
+
+int msm_comm_g_ctrl(struct msm_vidc_inst *inst, struct v4l2_control *ctrl)
+{
+	return v4l2_g_ctrl(&inst->ctrl_handler, ctrl);
+}
+
+int msm_comm_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_control *ctrl)
+{
+	return v4l2_s_ctrl(NULL, &inst->ctrl_handler, ctrl);
+}
+
+int msm_comm_g_ctrl_for_id(struct msm_vidc_inst *inst, int id)
+{
+	int rc = 0;
+	struct v4l2_control ctrl = {
+		.id = id,
+	};
+
+	rc = msm_comm_g_ctrl(inst, &ctrl);
+	return rc ?: ctrl.value;
+}
+
+static struct v4l2_ctrl **get_super_cluster(struct msm_vidc_inst *inst,
+				int num_ctrls)
+{
+	int c = 0;
+	struct v4l2_ctrl **cluster = kmalloc(sizeof(struct v4l2_ctrl *) *
+			num_ctrls, GFP_KERNEL);
+
+	if (!cluster || !inst)
+		return NULL;
+
+	for (c = 0; c < num_ctrls; c++)
+		cluster[c] =  inst->ctrls[c];
+
+	return cluster;
+}
+
+int msm_comm_ctrl_init(struct msm_vidc_inst *inst,
+		struct msm_vidc_ctrl *drv_ctrls, u32 num_ctrls,
+		const struct v4l2_ctrl_ops *ctrl_ops)
+{
+	int idx = 0;
+	struct v4l2_ctrl_config ctrl_cfg = {0};
+	int ret_val = 0;
+
+	if (!inst || !drv_ctrls || !ctrl_ops || !num_ctrls) {
+		dprintk(VIDC_ERR, "%s - invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	inst->ctrls = kcalloc(num_ctrls, sizeof(struct v4l2_ctrl *),
+				GFP_KERNEL);
+	if (!inst->ctrls) {
+		dprintk(VIDC_ERR, "%s - failed to allocate ctrl\n", __func__);
+		return -ENOMEM;
+	}
+
+	ret_val = v4l2_ctrl_handler_init(&inst->ctrl_handler, num_ctrls);
+
+	if (ret_val) {
+		dprintk(VIDC_ERR, "CTRL ERR: Control handler init failed, %d\n",
+				inst->ctrl_handler.error);
+		return ret_val;
+	}
+
+	for (; idx < num_ctrls; idx++) {
+		struct v4l2_ctrl *ctrl = NULL;
+
+		if (IS_PRIV_CTRL(drv_ctrls[idx].id)) {
+			/*add private control*/
+			ctrl_cfg.def = drv_ctrls[idx].default_value;
+			ctrl_cfg.flags = 0;
+			ctrl_cfg.id = drv_ctrls[idx].id;
+			ctrl_cfg.max = drv_ctrls[idx].maximum;
+			ctrl_cfg.min = drv_ctrls[idx].minimum;
+			ctrl_cfg.menu_skip_mask =
+				drv_ctrls[idx].menu_skip_mask;
+			ctrl_cfg.name = drv_ctrls[idx].name;
+			ctrl_cfg.ops = ctrl_ops;
+			ctrl_cfg.step = drv_ctrls[idx].step;
+			ctrl_cfg.type = drv_ctrls[idx].type;
+			ctrl_cfg.qmenu = drv_ctrls[idx].qmenu;
+
+			ctrl = v4l2_ctrl_new_custom(&inst->ctrl_handler,
+					&ctrl_cfg, NULL);
+		} else {
+			if (drv_ctrls[idx].type == V4L2_CTRL_TYPE_MENU) {
+				ctrl = v4l2_ctrl_new_std_menu(
+					&inst->ctrl_handler,
+					ctrl_ops,
+					drv_ctrls[idx].id,
+					drv_ctrls[idx].maximum,
+					drv_ctrls[idx].menu_skip_mask,
+					drv_ctrls[idx].default_value);
+			} else {
+				ctrl = v4l2_ctrl_new_std(&inst->ctrl_handler,
+					ctrl_ops,
+					drv_ctrls[idx].id,
+					drv_ctrls[idx].minimum,
+					drv_ctrls[idx].maximum,
+					drv_ctrls[idx].step,
+					drv_ctrls[idx].default_value);
+			}
+		}
+
+		if (!ctrl) {
+			dprintk(VIDC_ERR, "%s - invalid ctrl %s\n", __func__,
+				 drv_ctrls[idx].name);
+			return -EINVAL;
+		}
+
+		ret_val = inst->ctrl_handler.error;
+		if (ret_val) {
+			dprintk(VIDC_ERR,
+				"Error adding ctrl (%s) to ctrl handle, %d\n",
+				drv_ctrls[idx].name, inst->ctrl_handler.error);
+			return ret_val;
+		}
+
+		ctrl->flags |= drv_ctrls[idx].flags;
+		inst->ctrls[idx] = ctrl;
+	}
+
+	/* Construct a super cluster of all controls */
+	inst->cluster = get_super_cluster(inst, num_ctrls);
+	if (!inst->cluster) {
+		dprintk(VIDC_WARN,
+			"Failed to setup super cluster\n");
+		return -EINVAL;
+	}
+
+	v4l2_ctrl_cluster(num_ctrls, inst->cluster);
+
+	return ret_val;
+}
+
+int msm_comm_ctrl_deinit(struct msm_vidc_inst *inst)
+{
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	kfree(inst->ctrls);
+	kfree(inst->cluster);
+	v4l2_ctrl_handler_free(&inst->ctrl_handler);
+
+	return 0;
+}
+
+enum multi_stream msm_comm_get_stream_output_mode(struct msm_vidc_inst *inst)
+{
+	switch (msm_comm_g_ctrl_for_id(inst,
+				V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_MODE)) {
+		case V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_SECONDARY:
+			return HAL_VIDEO_DECODER_SECONDARY;
+		case V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_PRIMARY:
+		default:
+			return HAL_VIDEO_DECODER_PRIMARY;
+	}
+}
+
+static int msm_comm_get_mbs_per_sec(struct msm_vidc_inst *inst)
+{
+	int output_port_mbs, capture_port_mbs;
+	int fps;
+
+	output_port_mbs = inst->in_reconfig ?
+			NUM_MBS_PER_FRAME(inst->reconfig_width,
+				inst->reconfig_height) :
+			NUM_MBS_PER_FRAME(inst->prop.width[OUTPUT_PORT],
+				inst->prop.height[OUTPUT_PORT]);
+
+	capture_port_mbs = NUM_MBS_PER_FRAME(inst->prop.width[CAPTURE_PORT],
+		inst->prop.height[CAPTURE_PORT]);
+
+	if (inst->operating_rate) {
+		fps = (inst->operating_rate >> 16) ?
+			inst->operating_rate >> 16 : 1;
+		/*
+		 * Check if operating rate is less than fps.
+		 * If Yes, then use fps to scale clocks
+		*/
+		fps = fps > inst->prop.fps ? fps : inst->prop.fps;
+		return max(output_port_mbs, capture_port_mbs) * fps;
+	} else {
+		return max(output_port_mbs, capture_port_mbs) * inst->prop.fps;
+	}
+}
+
+int msm_comm_get_inst_load(struct msm_vidc_inst *inst,
+		enum load_calc_quirks quirks)
+{
+	int load = 0;
+
+	mutex_lock(&inst->lock);
+
+	if (!(inst->state >= MSM_VIDC_OPEN_DONE &&
+		inst->state < MSM_VIDC_STOP_DONE))
+		goto exit;
+
+	load = msm_comm_get_mbs_per_sec(inst);
+
+	if (is_thumbnail_session(inst)) {
+		if (quirks & LOAD_CALC_IGNORE_THUMBNAIL_LOAD)
+			load = 0;
+	}
+
+	if (msm_comm_turbo_session(inst)) {
+		if (!(quirks & LOAD_CALC_IGNORE_TURBO_LOAD))
+			load = inst->core->resources.max_load;
+	}
+
+	/*  Clock and Load calculations for REALTIME/NON-REALTIME
+	 *                        OPERATING RATE SET/NO OPERATING RATE SET
+	 *
+	 *                 | OPERATING RATE SET   | OPERATING RATE NOT SET |
+	 * ----------------|--------------------- |------------------------|
+	 * REALTIME        | load = res * op_rate |  load = res * fps      |
+	 *                 | clk  = res * op_rate |  clk  = res * fps      |
+	 * ----------------|----------------------|------------------------|
+	 * NON-REALTIME    | load = res * 1 fps   |  load = res * 1 fps    |
+	 *                 | clk  = res * op_rate |  clk  = res * fps      |
+	 * ----------------|----------------------|------------------------|
+	 */
+
+	if (!is_realtime_session(inst) &&
+		(quirks & LOAD_CALC_IGNORE_NON_REALTIME_LOAD)) {
+		if (!inst->prop.fps) {
+			dprintk(VIDC_INFO, "instance:%pK fps = 0\n", inst);
+			load = 0;
+		} else {
+			load = msm_comm_get_mbs_per_sec(inst) / inst->prop.fps;
+		}
+	}
+
+exit:
+	mutex_unlock(&inst->lock);
+	return load;
+}
+
+int msm_comm_get_load(struct msm_vidc_core *core,
+	enum session_type type, enum load_calc_quirks quirks)
+{
+	struct msm_vidc_inst *inst = NULL;
+	int num_mbs_per_sec = 0;
+
+	if (!core) {
+		dprintk(VIDC_ERR, "Invalid args: %pK\n", core);
+		return -EINVAL;
+	}
+
+	mutex_lock(&core->lock);
+	list_for_each_entry(inst, &core->instances, list) {
+		if (inst->session_type != type)
+			continue;
+
+		num_mbs_per_sec += msm_comm_get_inst_load(inst, quirks);
+	}
+	mutex_unlock(&core->lock);
+
+	return num_mbs_per_sec;
+}
+
+enum hal_domain get_hal_domain(int session_type)
+{
+	enum hal_domain domain;
+	switch (session_type) {
+	case MSM_VIDC_ENCODER:
+		domain = HAL_VIDEO_DOMAIN_ENCODER;
+		break;
+	case MSM_VIDC_DECODER:
+		domain = HAL_VIDEO_DOMAIN_DECODER;
+		break;
+	default:
+		dprintk(VIDC_ERR, "Wrong domain\n");
+		domain = HAL_UNUSED_DOMAIN;
+		break;
+	}
+
+	return domain;
+}
+
+enum hal_video_codec get_hal_codec(int fourcc)
+{
+	enum hal_video_codec codec;
+	switch (fourcc) {
+	case V4L2_PIX_FMT_H264:
+	case V4L2_PIX_FMT_H264_NO_SC:
+		codec = HAL_VIDEO_CODEC_H264;
+		break;
+	case V4L2_PIX_FMT_H264_MVC:
+		codec = HAL_VIDEO_CODEC_MVC;
+		break;
+	case V4L2_PIX_FMT_H263:
+		codec = HAL_VIDEO_CODEC_H263;
+		break;
+	case V4L2_PIX_FMT_MPEG1:
+		codec = HAL_VIDEO_CODEC_MPEG1;
+		break;
+	case V4L2_PIX_FMT_MPEG2:
+		codec = HAL_VIDEO_CODEC_MPEG2;
+		break;
+	case V4L2_PIX_FMT_MPEG4:
+		codec = HAL_VIDEO_CODEC_MPEG4;
+		break;
+	case V4L2_PIX_FMT_VC1_ANNEX_G:
+	case V4L2_PIX_FMT_VC1_ANNEX_L:
+		codec = HAL_VIDEO_CODEC_VC1;
+		break;
+	case V4L2_PIX_FMT_VP8:
+		codec = HAL_VIDEO_CODEC_VP8;
+		break;
+	case V4L2_PIX_FMT_VP9:
+		codec = HAL_VIDEO_CODEC_VP9;
+		break;
+	case V4L2_PIX_FMT_DIVX_311:
+		codec = HAL_VIDEO_CODEC_DIVX_311;
+		break;
+	case V4L2_PIX_FMT_DIVX:
+		codec = HAL_VIDEO_CODEC_DIVX;
+		break;
+	case V4L2_PIX_FMT_HEVC:
+		codec = HAL_VIDEO_CODEC_HEVC;
+		break;
+	case V4L2_PIX_FMT_HEVC_HYBRID:
+		codec = HAL_VIDEO_CODEC_HEVC_HYBRID;
+		break;
+	default:
+		dprintk(VIDC_ERR, "Wrong codec: %d\n", fourcc);
+		codec = HAL_UNUSED_CODEC;
+		break;
+	}
+
+	return codec;
+}
+
+static enum hal_uncompressed_format get_hal_uncompressed(int fourcc)
+{
+	enum hal_uncompressed_format format = HAL_UNUSED_COLOR;
+
+	switch (fourcc) {
+	case V4L2_PIX_FMT_NV12:
+		format = HAL_COLOR_FORMAT_NV12;
+		break;
+	case V4L2_PIX_FMT_NV21:
+		format = HAL_COLOR_FORMAT_NV21;
+		break;
+	case V4L2_PIX_FMT_NV12_UBWC:
+		format = HAL_COLOR_FORMAT_NV12_UBWC;
+		break;
+	case V4L2_PIX_FMT_NV12_TP10_UBWC:
+		format = HAL_COLOR_FORMAT_NV12_TP10_UBWC;
+		break;
+	case V4L2_PIX_FMT_RGB32:
+		format = HAL_COLOR_FORMAT_RGBA8888;
+		break;
+	case V4L2_PIX_FMT_RGBA8888_UBWC:
+		format = HAL_COLOR_FORMAT_RGBA8888_UBWC;
+		break;
+	default:
+		format = HAL_UNUSED_COLOR;
+		break;
+	}
+
+	return format;
+}
+
+static int msm_comm_vote_bus(struct msm_vidc_core *core)
+{
+	int rc = 0, vote_data_count = 0, i = 0;
+	struct hfi_device *hdev;
+	struct msm_vidc_inst *inst = NULL;
+	struct vidc_bus_vote_data *vote_data = NULL;
+	unsigned long core_freq = 0;
+
+	if (!core) {
+		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, core);
+		return -EINVAL;
+	}
+
+	hdev = core->device;
+	if (!hdev) {
+		dprintk(VIDC_ERR, "%s Invalid device handle: %pK\n",
+			__func__, hdev);
+		return -EINVAL;
+	}
+
+	mutex_lock(&core->lock);
+	list_for_each_entry(inst, &core->instances, list)
+		++vote_data_count;
+
+	vote_data = kzalloc(sizeof(*vote_data) * vote_data_count,
+			GFP_TEMPORARY);
+	if (!vote_data) {
+		dprintk(VIDC_ERR, "%s: failed to allocate memory\n", __func__);
+		rc = -ENOMEM;
+		goto fail_alloc;
+	}
+
+	core_freq = call_hfi_op(hdev, get_core_clock_rate,
+			hdev->hfi_device_data, 0);
+
+	list_for_each_entry(inst, &core->instances, list) {
+		int codec = 0, yuv = 0;
+
+		codec = inst->session_type == MSM_VIDC_DECODER ?
+			inst->fmts[OUTPUT_PORT].fourcc :
+			inst->fmts[CAPTURE_PORT].fourcc;
+
+		yuv = inst->session_type == MSM_VIDC_DECODER ?
+			inst->fmts[CAPTURE_PORT].fourcc :
+			inst->fmts[OUTPUT_PORT].fourcc;
+
+		vote_data[i].domain = get_hal_domain(inst->session_type);
+		vote_data[i].codec = get_hal_codec(codec);
+		vote_data[i].width =  max(inst->prop.width[CAPTURE_PORT],
+			inst->prop.width[OUTPUT_PORT]);
+		vote_data[i].height = max(inst->prop.height[CAPTURE_PORT],
+			inst->prop.height[OUTPUT_PORT]);
+
+		if (inst->operating_rate)
+			vote_data[i].fps = (inst->operating_rate >> 16) ?
+				inst->operating_rate >> 16 : 1;
+		else
+			vote_data[i].fps = inst->prop.fps;
+
+		if (msm_comm_turbo_session(inst))
+			vote_data[i].power_mode = VIDC_POWER_TURBO;
+		else if (is_low_power_session(inst))
+			vote_data[i].power_mode = VIDC_POWER_LOW;
+		else
+			vote_data[i].power_mode = VIDC_POWER_NORMAL;
+		if (i == 0) {
+			vote_data[i].imem_ab_tbl = core->resources.imem_ab_tbl;
+			vote_data[i].imem_ab_tbl_size =
+				core->resources.imem_ab_tbl_size;
+			vote_data[i].core_freq = core_freq;
+		}
+
+		/*
+		 * TODO: support for OBP-DBP split mode hasn't been yet
+		 * implemented, once it is, this part of code needs to be
+		 * revisited since passing in accurate information to the bus
+		 * governor will drastically reduce bandwidth
+		 */
+		vote_data[i].color_formats[0] = get_hal_uncompressed(yuv);
+		vote_data[i].num_formats = 1;
+		i++;
+	}
+	mutex_unlock(&core->lock);
+
+	rc = call_hfi_op(hdev, vote_bus, hdev->hfi_device_data, vote_data,
+			vote_data_count);
+	if (rc)
+		dprintk(VIDC_ERR, "Failed to scale bus: %d\n", rc);
+
+	kfree(vote_data);
+	return rc;
+
+fail_alloc:
+	mutex_unlock(&core->lock);
+	return rc;
+}
+
+struct msm_vidc_core *get_vidc_core(int core_id)
+{
+	struct msm_vidc_core *core;
+	int found = 0;
+	if (core_id > MSM_VIDC_CORES_MAX) {
+		dprintk(VIDC_ERR, "Core id = %d is greater than max = %d\n",
+			core_id, MSM_VIDC_CORES_MAX);
+		return NULL;
+	}
+	mutex_lock(&vidc_driver->lock);
+	list_for_each_entry(core, &vidc_driver->cores, list) {
+		if (core->id == core_id) {
+			found = 1;
+			break;
+		}
+	}
+	mutex_unlock(&vidc_driver->lock);
+	if (found)
+		return core;
+	return NULL;
+}
+
+const struct msm_vidc_format *msm_comm_get_pixel_fmt_index(
+	const struct msm_vidc_format fmt[], int size, int index, int fmt_type)
+{
+	int i, k = 0;
+	if (!fmt || index < 0) {
+		dprintk(VIDC_ERR, "Invalid inputs, fmt = %pK, index = %d\n",
+						fmt, index);
+		return NULL;
+	}
+	for (i = 0; i < size; i++) {
+		if (fmt[i].type != fmt_type)
+			continue;
+		if (k == index)
+			break;
+		k++;
+	}
+	if (i == size) {
+		dprintk(VIDC_INFO, "Format not found\n");
+		return NULL;
+	}
+	return &fmt[i];
+}
+struct msm_vidc_format *msm_comm_get_pixel_fmt_fourcc(
+	struct msm_vidc_format fmt[], int size, int fourcc, int fmt_type)
+{
+	int i;
+	if (!fmt) {
+		dprintk(VIDC_ERR, "Invalid inputs, fmt = %pK\n", fmt);
+		return NULL;
+	}
+	for (i = 0; i < size; i++) {
+		if (fmt[i].fourcc == fourcc)
+				break;
+	}
+	if (i == size) {
+		dprintk(VIDC_INFO, "Format not found\n");
+		return NULL;
+	}
+	return &fmt[i];
+}
+
+struct buf_queue *msm_comm_get_vb2q(
+		struct msm_vidc_inst *inst, enum v4l2_buf_type type)
+{
+	if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+		return &inst->bufq[CAPTURE_PORT];
+	if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+		return &inst->bufq[OUTPUT_PORT];
+	return NULL;
+}
+
+static void handle_sys_init_done(enum hal_command_response cmd, void *data)
+{
+	struct msm_vidc_cb_cmd_done *response = data;
+	struct msm_vidc_core *core;
+	struct vidc_hal_sys_init_done *sys_init_msg;
+	u32 index;
+
+	if (!IS_HAL_SYS_CMD(cmd)) {
+		dprintk(VIDC_ERR, "%s - invalid cmd\n", __func__);
+		return;
+	}
+
+	index = SYS_MSG_INDEX(cmd);
+
+	if (!response) {
+		dprintk(VIDC_ERR,
+			"Failed to get valid response for sys init\n");
+		return;
+	}
+	core = get_vidc_core(response->device_id);
+	if (!core) {
+		dprintk(VIDC_ERR, "Wrong device_id received\n");
+		return;
+	}
+	sys_init_msg = &response->data.sys_init_done;
+	if (!sys_init_msg) {
+		dprintk(VIDC_ERR, "sys_init_done message not proper\n");
+		return;
+	}
+
+	core->enc_codec_supported = sys_init_msg->enc_codec_supported;
+	core->dec_codec_supported = sys_init_msg->dec_codec_supported;
+
+	/* This should come from sys_init_done */
+	core->resources.max_inst_count =
+		sys_init_msg->max_sessions_supported ? :
+		MAX_SUPPORTED_INSTANCES;
+
+	core->resources.max_secure_inst_count =
+		core->resources.max_secure_inst_count ? :
+		core->resources.max_inst_count;
+
+	if (core->id == MSM_VIDC_CORE_VENUS &&
+		(core->dec_codec_supported & HAL_VIDEO_CODEC_H264))
+			core->dec_codec_supported |=
+				HAL_VIDEO_CODEC_MVC;
+
+	core->codec_count = sys_init_msg->codec_count;
+	memcpy(core->capabilities, sys_init_msg->capabilities,
+		sys_init_msg->codec_count * sizeof(struct msm_vidc_capability));
+
+	dprintk(VIDC_DBG,
+		"%s: supported_codecs[%d]: enc = %#x, dec = %#x\n",
+		__func__, core->codec_count, core->enc_codec_supported,
+		core->dec_codec_supported);
+
+	complete(&(core->completions[index]));
+
+	return;
+}
+
+static void put_inst(struct msm_vidc_inst *inst)
+{
+	void put_inst_helper(struct kref *kref)
+	{
+		struct msm_vidc_inst *inst = container_of(kref,
+				struct msm_vidc_inst, kref);
+
+		msm_vidc_destroy(inst);
+	}
+
+	if (!inst)
+		return;
+
+	kref_put(&inst->kref, put_inst_helper);
+}
+
+static struct msm_vidc_inst *get_inst(struct msm_vidc_core *core,
+		void *session_id)
+{
+	struct msm_vidc_inst *inst = NULL;
+	bool matches = false;
+
+	if (!core || !session_id)
+		return NULL;
+
+	mutex_lock(&core->lock);
+	/*
+	 * This is as good as !list_empty(!inst->list), but at this point
+	 * we don't really know if inst was kfree'd via close syscall before
+	 * hardware could respond.  So manually walk thru the list of active
+	 * sessions
+	 */
+	list_for_each_entry(inst, &core->instances, list) {
+		if (inst == session_id) {
+			/*
+			 * Even if the instance is valid, we really shouldn't
+			 * be receiving or handling callbacks when we've deleted
+			 * our session with HFI
+			 */
+			matches = !!inst->session;
+			break;
+		}
+	}
+
+	/*
+	 * kref_* is atomic_int backed, so no need for inst->lock.  But we can
+	 * always acquire inst->lock and release it in put_inst for a stronger
+	 * locking system.
+	 */
+	inst = (matches && kref_get_unless_zero(&inst->kref)) ? inst : NULL;
+	mutex_unlock(&core->lock);
+
+	return inst;
+}
+
+static void handle_session_release_buf_done(enum hal_command_response cmd,
+	void *data)
+{
+	struct msm_vidc_cb_cmd_done *response = data;
+	struct msm_vidc_inst *inst;
+	struct internal_buf *buf;
+	struct list_head *ptr, *next;
+	struct hal_buffer_info *buffer;
+	u32 buf_found = false;
+	u32 address;
+
+	if (!response) {
+		dprintk(VIDC_ERR, "Invalid release_buf_done response\n");
+		return;
+	}
+
+	inst = get_inst(get_vidc_core(response->device_id),
+			response->session_id);
+	if (!inst) {
+		dprintk(VIDC_WARN, "Got a response for an inactive session\n");
+		return;
+	}
+
+	buffer = &response->data.buffer_info;
+	address = buffer->buffer_addr;
+
+	mutex_lock(&inst->scratchbufs.lock);
+	list_for_each_safe(ptr, next, &inst->scratchbufs.list) {
+		buf = list_entry(ptr, struct internal_buf, list);
+		if (address == (u32)buf->handle->device_addr) {
+			dprintk(VIDC_DBG, "releasing scratch: %pa\n",
+					&buf->handle->device_addr);
+			buf_found = true;
+		}
+	}
+	mutex_unlock(&inst->scratchbufs.lock);
+
+	mutex_lock(&inst->persistbufs.lock);
+	list_for_each_safe(ptr, next, &inst->persistbufs.list) {
+		buf = list_entry(ptr, struct internal_buf, list);
+		if (address == (u32)buf->handle->device_addr) {
+			dprintk(VIDC_DBG, "releasing persist: %pa\n",
+					&buf->handle->device_addr);
+			buf_found = true;
+		}
+	}
+	mutex_unlock(&inst->persistbufs.lock);
+
+	if (!buf_found)
+		dprintk(VIDC_ERR, "invalid buffer received from firmware");
+	if (IS_HAL_SESSION_CMD(cmd)) {
+		complete(&inst->completions[SESSION_MSG_INDEX(cmd)]);
+	} else {
+		dprintk(VIDC_ERR, "Invalid inst cmd response: %d\n", cmd);
+	}
+
+	put_inst(inst);
+}
+
+static void handle_sys_release_res_done(
+		enum hal_command_response cmd, void *data)
+{
+	struct msm_vidc_cb_cmd_done *response = data;
+	struct msm_vidc_core *core;
+	if (!response) {
+		dprintk(VIDC_ERR,
+			"Failed to get valid response for sys init\n");
+		return;
+	}
+	core = get_vidc_core(response->device_id);
+	if (!core) {
+		dprintk(VIDC_ERR, "Wrong device_id received\n");
+		return;
+	}
+	complete(&core->completions[
+			SYS_MSG_INDEX(HAL_SYS_RELEASE_RESOURCE_DONE)]);
+}
+
+static void change_inst_state(struct msm_vidc_inst *inst,
+	enum instance_state state)
+{
+	if (!inst) {
+		dprintk(VIDC_ERR, "Invalid parameter %s\n", __func__);
+		return;
+	}
+	mutex_lock(&inst->lock);
+	if (inst->state == MSM_VIDC_CORE_INVALID) {
+		dprintk(VIDC_DBG,
+			"Inst: %pK is in bad state can't change state to %d\n",
+			inst, state);
+		goto exit;
+	}
+	dprintk(VIDC_DBG, "Moved inst: %pK from state: %d to state: %d\n",
+		   inst, inst->state, state);
+	inst->state = state;
+exit:
+	mutex_unlock(&inst->lock);
+}
+
+static int signal_session_msg_receipt(enum hal_command_response cmd,
+		struct msm_vidc_inst *inst)
+{
+	if (!inst) {
+		dprintk(VIDC_ERR, "Invalid(%pK) instance id\n", inst);
+		return -EINVAL;
+	}
+	if (IS_HAL_SESSION_CMD(cmd)) {
+		complete(&inst->completions[SESSION_MSG_INDEX(cmd)]);
+	} else {
+		dprintk(VIDC_ERR, "Invalid inst cmd response: %d\n", cmd);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int wait_for_sess_signal_receipt(struct msm_vidc_inst *inst,
+	enum hal_command_response cmd)
+{
+	int rc = 0;
+	struct hfi_device *hdev;
+
+	if (!IS_HAL_SESSION_CMD(cmd)) {
+		dprintk(VIDC_ERR, "Invalid inst cmd response: %d\n", cmd);
+		return -EINVAL;
+	}
+	hdev = (struct hfi_device *)(inst->core->device);
+	rc = wait_for_completion_timeout(
+		&inst->completions[SESSION_MSG_INDEX(cmd)],
+		msecs_to_jiffies(msm_vidc_hw_rsp_timeout));
+	if (!rc) {
+		dprintk(VIDC_ERR, "Wait interrupted or timed out: %d\n",
+				SESSION_MSG_INDEX(cmd));
+		call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
+		dprintk(VIDC_ERR,
+			"sess resp timeout can potentially crash the system\n");
+		msm_comm_print_debug_info(inst);
+		BUG_ON(msm_vidc_debug_timeout);
+		msm_comm_kill_session(inst);
+		rc = -EIO;
+	} else {
+		rc = 0;
+	}
+	return rc;
+}
+
+static int wait_for_state(struct msm_vidc_inst *inst,
+	enum instance_state flipped_state,
+	enum instance_state desired_state,
+	enum hal_command_response hal_cmd)
+{
+	int rc = 0;
+	if (IS_ALREADY_IN_STATE(flipped_state, desired_state)) {
+		dprintk(VIDC_INFO, "inst: %pK is already in state: %d\n",
+						inst, inst->state);
+		goto err_same_state;
+	}
+	dprintk(VIDC_DBG, "Waiting for hal_cmd: %d\n", hal_cmd);
+	rc = wait_for_sess_signal_receipt(inst, hal_cmd);
+	if (!rc)
+		change_inst_state(inst, desired_state);
+err_same_state:
+	return rc;
+}
+
+void msm_vidc_queue_v4l2_event(struct msm_vidc_inst *inst, int event_type)
+{
+	struct v4l2_event event = {.id = 0, .type = event_type};
+	v4l2_event_queue_fh(&inst->event_handler, &event);
+}
+
+static void msm_comm_generate_max_clients_error(struct msm_vidc_inst *inst)
+{
+	enum hal_command_response cmd = HAL_SESSION_ERROR;
+	struct msm_vidc_cb_cmd_done response = {0};
+
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s: invalid input parameters\n", __func__);
+		return;
+	}
+	dprintk(VIDC_ERR, "%s: Too many clients\n", __func__);
+	response.session_id = inst;
+	response.status = VIDC_ERR_MAX_CLIENTS;
+	handle_session_error(cmd, (void *)&response);
+}
+
+static void print_cap(const char *type,
+		struct hal_capability_supported *cap)
+{
+	dprintk(VIDC_DBG,
+		"%-24s: %-8d %-8d %-8d\n",
+		type, cap->min, cap->max, cap->step_size);
+}
+
+
+static void msm_vidc_comm_update_ctrl_limits(struct msm_vidc_inst *inst)
+{
+	struct v4l2_ctrl *ctrl = NULL;
+
+	ctrl = v4l2_ctrl_find(&inst->ctrl_handler,
+		V4L2_CID_MPEG_VIDC_VIDEO_HYBRID_HIERP_MODE);
+	if (ctrl) {
+		v4l2_ctrl_modify_range(ctrl, inst->capability.hier_p_hybrid.min,
+			inst->capability.hier_p_hybrid.max, ctrl->step,
+			inst->capability.hier_p_hybrid.min);
+		dprintk(VIDC_DBG,
+			"%s: Updated Range = %lld --> %lld Def value = %lld\n",
+			ctrl->name, ctrl->minimum, ctrl->maximum,
+			ctrl->default_value);
+	}
+
+	ctrl = v4l2_ctrl_find(&inst->ctrl_handler,
+		V4L2_CID_MPEG_VIDC_VIDEO_HIER_B_NUM_LAYERS);
+	if (ctrl) {
+		v4l2_ctrl_modify_range(ctrl, inst->capability.hier_b.min,
+			inst->capability.hier_b.max, ctrl->step,
+			inst->capability.hier_b.min);
+		dprintk(VIDC_DBG,
+			"%s: Updated Range = %lld --> %lld Def value = %lld\n",
+			ctrl->name, ctrl->minimum, ctrl->maximum,
+			ctrl->default_value);
+	}
+
+	ctrl = v4l2_ctrl_find(&inst->ctrl_handler,
+		V4L2_CID_MPEG_VIDC_VIDEO_HIER_P_NUM_LAYERS);
+	if (ctrl) {
+		v4l2_ctrl_modify_range(ctrl, inst->capability.hier_p.min,
+			inst->capability.hier_p.max, ctrl->step,
+			inst->capability.hier_p.min);
+		dprintk(VIDC_DBG,
+			"%s: Updated Range = %lld --> %lld Def value = %lld\n",
+			ctrl->name, ctrl->minimum, ctrl->maximum,
+			ctrl->default_value);
+	}
+}
+
+static void handle_session_init_done(enum hal_command_response cmd, void *data)
+{
+	struct msm_vidc_cb_cmd_done *response = data;
+	struct msm_vidc_inst *inst = NULL;
+	struct vidc_hal_session_init_done *session_init_done = NULL;
+	struct msm_vidc_capability *capability = NULL;
+	struct hfi_device *hdev;
+	struct msm_vidc_core *core;
+	u32 i, codec;
+
+	if (!response) {
+		dprintk(VIDC_ERR,
+				"Failed to get valid response for session init\n");
+		return;
+	}
+
+	inst = get_inst(get_vidc_core(response->device_id),
+		response->session_id);
+
+	if (!inst) {
+		dprintk(VIDC_WARN, "Got a response for an inactive session\n");
+		return;
+	}
+
+	if (response->status) {
+		dprintk(VIDC_ERR,
+			"Session init response from FW : %#x\n",
+			response->status);
+		if (response->status == VIDC_ERR_MAX_CLIENTS)
+			msm_comm_generate_max_clients_error(inst);
+		else
+			msm_comm_generate_session_error(inst);
+
+		signal_session_msg_receipt(cmd, inst);
+		put_inst(inst);
+		return;
+	}
+
+	core = inst->core;
+	hdev = inst->core->device;
+	codec = inst->session_type == MSM_VIDC_DECODER ?
+			inst->fmts[OUTPUT_PORT].fourcc :
+			inst->fmts[CAPTURE_PORT].fourcc;
+
+	/* check if capabilities are available for this session */
+	for (i = 0; i < VIDC_MAX_SESSIONS; i++) {
+		if (core->capabilities[i].codec ==
+				get_hal_codec(codec) &&
+			core->capabilities[i].domain ==
+				get_hal_domain(inst->session_type)) {
+			capability = &core->capabilities[i];
+			break;
+		}
+	}
+
+	if (capability) {
+		dprintk(VIDC_DBG,
+			"%s: capabilities available for codec 0x%x, domain %#x\n",
+			__func__, capability->codec, capability->domain);
+		memcpy(&inst->capability, capability,
+			sizeof(struct msm_vidc_capability));
+	} else {
+		session_init_done = (struct vidc_hal_session_init_done *)
+				&response->data.session_init_done;
+		if (!session_init_done) {
+			dprintk(VIDC_ERR,
+				"%s: Failed to get valid response for session init\n",
+				__func__);
+			return;
+		}
+		capability = &session_init_done->capability;
+		dprintk(VIDC_DBG,
+			"%s: got capabilities for codec 0x%x, domain 0x%x\n",
+			__func__, capability->codec,
+			capability->domain);
+		memcpy(&inst->capability, capability,
+			sizeof(struct msm_vidc_capability));
+	}
+	inst->capability.pixelprocess_capabilities =
+		call_hfi_op(hdev, get_core_capabilities, hdev->hfi_device_data);
+
+	dprintk(VIDC_DBG,
+		"Capability type : min      max      step size\n");
+	print_cap("width", &inst->capability.width);
+	print_cap("height", &inst->capability.height);
+	print_cap("mbs_per_frame", &inst->capability.mbs_per_frame);
+	print_cap("frame_rate", &inst->capability.frame_rate);
+	print_cap("scale_x", &inst->capability.scale_x);
+	print_cap("scale_y", &inst->capability.scale_y);
+	print_cap("hier_p", &inst->capability.hier_p);
+	print_cap("ltr_count", &inst->capability.ltr_count);
+	print_cap("mbs_per_sec_low_power",
+		&inst->capability.mbs_per_sec_power_save);
+	print_cap("hybrid-hp", &inst->capability.hier_p_hybrid);
+
+	signal_session_msg_receipt(cmd, inst);
+
+	/*
+	 * Update controls after informing session_init_done to avoid
+	 * timeouts.
+	 */
+
+	msm_vidc_comm_update_ctrl_limits(inst);
+	put_inst(inst);
+}
+
+static void handle_event_change(enum hal_command_response cmd, void *data)
+{
+	struct msm_vidc_inst *inst = NULL;
+	struct msm_vidc_cb_event *event_notify = data;
+	int event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT;
+	struct v4l2_event seq_changed_event = {0};
+	int rc = 0;
+	struct hfi_device *hdev;
+	u32 *ptr = NULL;
+
+	if (!event_notify) {
+		dprintk(VIDC_WARN, "Got an empty event from hfi\n");
+		return;
+	}
+
+	inst = get_inst(get_vidc_core(event_notify->device_id),
+			event_notify->session_id);
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_WARN, "Got a response for an inactive session\n");
+		goto err_bad_event;
+	}
+	hdev = inst->core->device;
+
+	switch (event_notify->hal_event_type) {
+	case HAL_EVENT_SEQ_CHANGED_SUFFICIENT_RESOURCES:
+		rc = msm_comm_g_ctrl_for_id(inst,
+			V4L2_CID_MPEG_VIDC_VIDEO_CONTINUE_DATA_TRANSFER);
+		if ((IS_ERR_VALUE(rc) || rc == false))
+			event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT;
+		else
+			event = V4L2_EVENT_SEQ_CHANGED_SUFFICIENT;
+		break;
+	case HAL_EVENT_SEQ_CHANGED_INSUFFICIENT_RESOURCES:
+		event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT;
+		break;
+	case HAL_EVENT_RELEASE_BUFFER_REFERENCE:
+	{
+		struct v4l2_event buf_event = {0};
+		struct buffer_info *binfo = NULL, *temp = NULL;
+		u32 *ptr = NULL;
+
+		dprintk(VIDC_DBG, "%s - inst: %pK buffer: %pa extra: %pa\n",
+				__func__, inst, &event_notify->packet_buffer,
+				&event_notify->extra_data_buffer);
+
+		if (inst->state >= MSM_VIDC_STOP ||
+				inst->core->state == VIDC_CORE_INVALID) {
+			dprintk(VIDC_DBG,
+					"Event release buf ref received in invalid state - discard\n");
+			goto err_bad_event;
+		}
+
+		/*
+		 * Get the buffer_info entry for the
+		 * device address.
+		 */
+		binfo = device_to_uvaddr(&inst->registeredbufs,
+				event_notify->packet_buffer);
+		if (!binfo) {
+			dprintk(VIDC_ERR,
+					"%s buffer not found in registered list\n",
+					__func__);
+			goto err_bad_event;
+		}
+
+		/* Fill event data to be sent to client*/
+		buf_event.type = V4L2_EVENT_RELEASE_BUFFER_REFERENCE;
+		ptr = (u32 *)buf_event.u.data;
+		ptr[0] = binfo->fd[0];
+		ptr[1] = binfo->buff_off[0];
+
+		dprintk(VIDC_DBG,
+				"RELEASE REFERENCE EVENT FROM F/W - fd = %d offset = %d\n",
+				ptr[0], ptr[1]);
+
+		/* Decrement buffer reference count*/
+		mutex_lock(&inst->registeredbufs.lock);
+		list_for_each_entry(temp, &inst->registeredbufs.list,
+				list) {
+			if (temp == binfo) {
+				buf_ref_put(inst, binfo);
+				break;
+			}
+		}
+
+		/*
+		 * Release buffer and remove from list
+		 * if reference goes to zero.
+		 */
+		if (unmap_and_deregister_buf(inst, binfo))
+			dprintk(VIDC_ERR,
+					"%s: buffer unmap failed\n", __func__);
+		mutex_unlock(&inst->registeredbufs.lock);
+
+		/*send event to client*/
+		v4l2_event_queue_fh(&inst->event_handler, &buf_event);
+		goto err_bad_event;
+	}
+	default:
+		break;
+	}
+
+	/* Bit depth and pic struct changed event are combined into a single
+	 * event (insufficient event) for the userspace. Currently bitdepth
+	 * changes is only for HEVC and interlaced support is for all
+	 * codecs except HEVC
+	 * event data is now as follows:
+	 * u32 *ptr = seq_changed_event.u.data;
+	 * ptr[0] = height
+	 * ptr[1] = width
+	 * ptr[2] = flag to indicate bit depth or/and pic struct changed
+	 * ptr[3] = bit depth
+	 * ptr[4] = pic struct (progressive or interlaced)
+	 * ptr[5] = colour space
+	 */
+
+	ptr = (u32 *)seq_changed_event.u.data;
+
+	if (ptr != NULL) {
+		ptr[2] = 0x0;
+		ptr[3] = inst->bit_depth;
+		ptr[4] = inst->pic_struct;
+		ptr[5] = inst->colour_space;
+
+		if (inst->bit_depth != event_notify->bit_depth) {
+			inst->bit_depth = event_notify->bit_depth;
+			ptr[2] |= V4L2_EVENT_BITDEPTH_FLAG;
+			ptr[3] = inst->bit_depth;
+			event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT;
+			dprintk(VIDC_DBG,
+				"V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT due to bit-depth change\n");
+		}
+
+		if (inst->pic_struct != event_notify->pic_struct) {
+			inst->pic_struct = event_notify->pic_struct;
+			event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT;
+			ptr[2] |= V4L2_EVENT_PICSTRUCT_FLAG;
+			ptr[4] = inst->pic_struct;
+			dprintk(VIDC_DBG,
+				"V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT due to pic-struct change\n");
+		}
+
+		if (inst->bit_depth == MSM_VIDC_BIT_DEPTH_10
+				&& inst->colour_space !=
+					event_notify->colour_space) {
+			inst->colour_space = event_notify->colour_space;
+			event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT;
+			ptr[2] |= V4L2_EVENT_COLOUR_SPACE_FLAG;
+			ptr[5] = inst->colour_space;
+			dprintk(VIDC_DBG,
+				"V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT due to colour space change\n");
+		}
+
+	}
+
+	if (event == V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT) {
+		dprintk(VIDC_DBG, "V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT\n");
+		inst->reconfig_height = event_notify->height;
+		inst->reconfig_width = event_notify->width;
+		inst->in_reconfig = true;
+	} else {
+		dprintk(VIDC_DBG, "V4L2_EVENT_SEQ_CHANGED_SUFFICIENT\n");
+		dprintk(VIDC_DBG,
+			"event_notify->height = %d event_notify->width = %d\n",
+			event_notify->height,
+			event_notify->width);
+		inst->prop.height[OUTPUT_PORT] = event_notify->height;
+		inst->prop.width[OUTPUT_PORT] = event_notify->width;
+	}
+
+	if (inst->session_type == MSM_VIDC_DECODER)
+		msm_dcvs_init_load(inst);
+
+	rc = msm_vidc_check_session_supported(inst);
+	if (!rc) {
+		seq_changed_event.type = event;
+		if (event == V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT) {
+			u32 *ptr = NULL;
+
+			ptr = (u32 *)seq_changed_event.u.data;
+			ptr[0] = event_notify->height;
+			ptr[1] = event_notify->width;
+		} else {
+			if (msm_comm_get_stream_output_mode(inst) ==
+				HAL_VIDEO_DECODER_SECONDARY) {
+				struct hal_frame_size frame_sz;
+
+				frame_sz.buffer_type = HAL_BUFFER_OUTPUT2;
+				frame_sz.width = event_notify->width;
+				frame_sz.height = event_notify->height;
+				dprintk(VIDC_DBG,
+					"Update OPB dimensions to firmware if buffer requirements are sufficient\n");
+				rc = msm_comm_try_set_prop(inst,
+					HAL_PARAM_FRAME_SIZE, &frame_sz);
+			}
+
+			dprintk(VIDC_DBG,
+				"send session_continue after sufficient event\n");
+			rc = call_hfi_op(hdev, session_continue,
+					(void *) inst->session);
+			if (rc) {
+				dprintk(VIDC_ERR,
+					"%s - failed to send session_continue\n",
+					__func__);
+				goto err_bad_event;
+			}
+		}
+		v4l2_event_queue_fh(&inst->event_handler, &seq_changed_event);
+	} else if (rc == -ENOTSUPP) {
+		msm_vidc_queue_v4l2_event(inst,
+				V4L2_EVENT_MSM_VIDC_HW_UNSUPPORTED);
+	} else if (rc == -EBUSY) {
+		msm_vidc_queue_v4l2_event(inst,
+				V4L2_EVENT_MSM_VIDC_HW_OVERLOAD);
+	}
+
+err_bad_event:
+	put_inst(inst);
+	return;
+}
+
+static void handle_session_prop_info(enum hal_command_response cmd, void *data)
+{
+	struct msm_vidc_cb_cmd_done *response = data;
+	struct getprop_buf *getprop;
+	struct msm_vidc_inst *inst;
+	if (!response) {
+		dprintk(VIDC_ERR,
+			"Failed to get valid response for prop info\n");
+		return;
+	}
+
+	inst = get_inst(get_vidc_core(response->device_id),
+			response->session_id);
+	if (!inst) {
+		dprintk(VIDC_WARN, "Got a response for an inactive session\n");
+		return;
+	}
+
+	getprop = kzalloc(sizeof(*getprop), GFP_KERNEL);
+	if (!getprop) {
+		dprintk(VIDC_ERR, "%s: getprop kzalloc failed\n", __func__);
+		goto err_prop_info;
+	}
+
+	getprop->data = kmemdup(&response->data.property,
+			sizeof(union hal_get_property), GFP_KERNEL);
+	if (!getprop->data) {
+		dprintk(VIDC_ERR, "%s: kmemdup failed\n", __func__);
+		kfree(getprop);
+		goto err_prop_info;
+	}
+
+	mutex_lock(&inst->pending_getpropq.lock);
+	list_add_tail(&getprop->list, &inst->pending_getpropq.list);
+	mutex_unlock(&inst->pending_getpropq.lock);
+
+	signal_session_msg_receipt(cmd, inst);
+err_prop_info:
+	put_inst(inst);
+	return;
+}
+
+static void handle_load_resource_done(enum hal_command_response cmd, void *data)
+{
+	struct msm_vidc_cb_cmd_done *response = data;
+	struct msm_vidc_inst *inst;
+
+	if (!response) {
+		dprintk(VIDC_ERR,
+			"Failed to get valid response for load resource\n");
+		return;
+	}
+
+	inst = get_inst(get_vidc_core(response->device_id),
+			response->session_id);
+	if (!inst) {
+		dprintk(VIDC_WARN, "Got a response for an inactive session\n");
+		return;
+	}
+
+	if (response->status) {
+		dprintk(VIDC_ERR,
+				"Load resource response from FW : %#x\n",
+				response->status);
+		msm_comm_generate_session_error(inst);
+	}
+
+	put_inst(inst);
+}
+
+static void handle_start_done(enum hal_command_response cmd, void *data)
+{
+	struct msm_vidc_cb_cmd_done *response = data;
+	struct msm_vidc_inst *inst;
+
+	if (!response) {
+		dprintk(VIDC_ERR, "Failed to get valid response for start\n");
+		return;
+	}
+
+	inst = get_inst(get_vidc_core(response->device_id),
+			response->session_id);
+	if (!inst) {
+		dprintk(VIDC_WARN, "Got a response for an inactive session\n");
+		return;
+	}
+
+	signal_session_msg_receipt(cmd, inst);
+	put_inst(inst);
+}
+
+static void handle_stop_done(enum hal_command_response cmd, void *data)
+{
+	struct msm_vidc_cb_cmd_done *response = data;
+	struct msm_vidc_inst *inst;
+
+	if (!response) {
+		dprintk(VIDC_ERR, "Failed to get valid response for stop\n");
+		return;
+	}
+
+	inst = get_inst(get_vidc_core(response->device_id),
+			response->session_id);
+	if (!inst) {
+		dprintk(VIDC_WARN, "Got a response for an inactive session\n");
+		return;
+	}
+
+	signal_session_msg_receipt(cmd, inst);
+	put_inst(inst);
+}
+
+static void handle_release_res_done(enum hal_command_response cmd, void *data)
+{
+	struct msm_vidc_cb_cmd_done *response = data;
+	struct msm_vidc_inst *inst;
+
+	if (!response) {
+		dprintk(VIDC_ERR,
+			"Failed to get valid response for release resource\n");
+		return;
+	}
+
+	inst = get_inst(get_vidc_core(response->device_id),
+			response->session_id);
+	if (!inst) {
+		dprintk(VIDC_WARN, "Got a response for an inactive session\n");
+		return;
+	}
+
+	signal_session_msg_receipt(cmd, inst);
+	put_inst(inst);
+}
+
+void validate_output_buffers(struct msm_vidc_inst *inst)
+{
+	struct internal_buf *binfo;
+	u32 buffers_owned_by_driver = 0;
+	struct hal_buffer_requirements *output_buf;
+	output_buf = get_buff_req_buffer(inst, HAL_BUFFER_OUTPUT);
+	if (!output_buf) {
+		dprintk(VIDC_DBG,
+			"This output buffer not required, buffer_type: %x\n",
+			HAL_BUFFER_OUTPUT);
+		return;
+	}
+	mutex_lock(&inst->outputbufs.lock);
+	list_for_each_entry(binfo, &inst->outputbufs.list, list) {
+		if (binfo->buffer_ownership != DRIVER) {
+			dprintk(VIDC_DBG,
+				"This buffer is with FW %pa\n",
+				&binfo->handle->device_addr);
+			continue;
+		}
+		buffers_owned_by_driver++;
+	}
+	mutex_unlock(&inst->outputbufs.lock);
+
+	if (buffers_owned_by_driver != output_buf->buffer_count_actual)
+		dprintk(VIDC_WARN,
+			"OUTPUT Buffer count mismatch %d of %d\n",
+			buffers_owned_by_driver,
+			output_buf->buffer_count_actual);
+
+	return;
+}
+
+int msm_comm_queue_output_buffers(struct msm_vidc_inst *inst)
+{
+	struct internal_buf *binfo;
+	struct hfi_device *hdev;
+	struct msm_smem *handle;
+	struct vidc_frame_data frame_data = {0};
+	struct hal_buffer_requirements *output_buf, *extra_buf;
+	int rc = 0;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	hdev = inst->core->device;
+
+	output_buf = get_buff_req_buffer(inst, HAL_BUFFER_OUTPUT);
+	if (!output_buf) {
+		dprintk(VIDC_DBG,
+			"This output buffer not required, buffer_type: %x\n",
+			HAL_BUFFER_OUTPUT);
+		return 0;
+	}
+	dprintk(VIDC_DBG,
+		"output: num = %d, size = %d\n",
+		output_buf->buffer_count_actual,
+		output_buf->buffer_size);
+
+	extra_buf = get_buff_req_buffer(inst, HAL_BUFFER_EXTRADATA_OUTPUT);
+
+	mutex_lock(&inst->outputbufs.lock);
+	list_for_each_entry(binfo, &inst->outputbufs.list, list) {
+		if (binfo->buffer_ownership != DRIVER)
+			continue;
+		handle = binfo->handle;
+		frame_data.alloc_len = output_buf->buffer_size;
+		frame_data.filled_len = 0;
+		frame_data.offset = 0;
+		frame_data.device_addr = handle->device_addr;
+		frame_data.flags = 0;
+		frame_data.extradata_addr = handle->device_addr +
+		output_buf->buffer_size;
+		frame_data.buffer_type = HAL_BUFFER_OUTPUT;
+		frame_data.extradata_size = extra_buf ?
+			extra_buf->buffer_size : 0;
+		rc = call_hfi_op(hdev, session_ftb,
+			(void *) inst->session, &frame_data);
+		binfo->buffer_ownership = FIRMWARE;
+	}
+	mutex_unlock(&inst->outputbufs.lock);
+
+	return 0;
+}
+
+static void handle_session_flush(enum hal_command_response cmd, void *data)
+{
+	struct msm_vidc_cb_cmd_done *response = data;
+	struct msm_vidc_inst *inst;
+	struct v4l2_event flush_event = {0};
+	u32 *ptr = NULL;
+	enum hal_flush flush_type;
+	int rc;
+
+	if (!response) {
+		dprintk(VIDC_ERR, "Failed to get valid response for flush\n");
+		return;
+	}
+
+	inst = get_inst(get_vidc_core(response->device_id),
+			response->session_id);
+	if (!inst) {
+		dprintk(VIDC_WARN, "Got a response for an inactive session\n");
+		return;
+	}
+
+	if (msm_comm_get_stream_output_mode(inst) ==
+			HAL_VIDEO_DECODER_SECONDARY) {
+		validate_output_buffers(inst);
+		if (!inst->in_reconfig) {
+			rc = msm_comm_queue_output_buffers(inst);
+			if (rc) {
+				dprintk(VIDC_ERR,
+						"Failed to queue output buffers: %d\n",
+						rc);
+			}
+		}
+	}
+	atomic_dec(&inst->in_flush);
+	flush_event.type = V4L2_EVENT_MSM_VIDC_FLUSH_DONE;
+	ptr = (u32 *)flush_event.u.data;
+
+	flush_type = response->data.flush_type;
+	switch (flush_type) {
+	case HAL_FLUSH_INPUT:
+		ptr[0] = V4L2_QCOM_CMD_FLUSH_OUTPUT;
+		break;
+	case HAL_FLUSH_OUTPUT:
+		ptr[0] = V4L2_QCOM_CMD_FLUSH_CAPTURE;
+		break;
+	case HAL_FLUSH_ALL:
+		ptr[0] |= V4L2_QCOM_CMD_FLUSH_CAPTURE;
+		ptr[0] |= V4L2_QCOM_CMD_FLUSH_OUTPUT;
+		break;
+	default:
+		dprintk(VIDC_ERR, "Invalid flush type received!");
+		goto exit;
+	}
+
+	dprintk(VIDC_DBG,
+		"Notify flush complete, flush_type: %x\n", flush_type);
+	v4l2_event_queue_fh(&inst->event_handler, &flush_event);
+
+exit:
+	put_inst(inst);
+}
+
+static void handle_session_error(enum hal_command_response cmd, void *data)
+{
+	struct msm_vidc_cb_cmd_done *response = data;
+	struct hfi_device *hdev = NULL;
+	struct msm_vidc_inst *inst = NULL;
+	int event = V4L2_EVENT_MSM_VIDC_SYS_ERROR;
+
+	if (!response) {
+		dprintk(VIDC_ERR,
+			"Failed to get valid response for session error\n");
+		return;
+	}
+
+	inst = get_inst(get_vidc_core(response->device_id),
+			response->session_id);
+	if (!inst) {
+		dprintk(VIDC_WARN, "Got a response for an inactive session\n");
+		return;
+	}
+
+	hdev = inst->core->device;
+	dprintk(VIDC_WARN, "Session error received for session %pK\n", inst);
+	change_inst_state(inst, MSM_VIDC_CORE_INVALID);
+
+	if (response->status == VIDC_ERR_MAX_CLIENTS) {
+		dprintk(VIDC_WARN, "Too many clients, rejecting %pK", inst);
+		event = V4L2_EVENT_MSM_VIDC_MAX_CLIENTS;
+
+		/*
+		 * Clean the HFI session now. Since inst->state is moved to
+		 * INVALID, forward thread doesn't know FW has valid session
+		 * or not. This is the last place driver knows that there is
+		 * no session in FW. Hence clean HFI session now.
+		 */
+
+		msm_comm_session_clean(inst);
+	} else if (response->status == VIDC_ERR_NOT_SUPPORTED) {
+		dprintk(VIDC_WARN, "Unsupported bitstream in %pK", inst);
+		event = V4L2_EVENT_MSM_VIDC_HW_UNSUPPORTED;
+	} else {
+		dprintk(VIDC_WARN, "Unknown session error (%d) for %pK\n",
+				response->status, inst);
+		event = V4L2_EVENT_MSM_VIDC_SYS_ERROR;
+	}
+
+	msm_vidc_queue_v4l2_event(inst, event);
+	put_inst(inst);
+}
+
+static void msm_comm_clean_notify_client(struct msm_vidc_core *core)
+{
+	struct msm_vidc_inst *inst = NULL;
+	if (!core) {
+		dprintk(VIDC_ERR, "%s: Invalid params\n", __func__);
+		return;
+	}
+
+	dprintk(VIDC_WARN, "%s: Core %pK\n", __func__, core);
+	mutex_lock(&core->lock);
+	core->state = VIDC_CORE_INVALID;
+
+	list_for_each_entry(inst, &core->instances, list) {
+		mutex_lock(&inst->lock);
+		inst->state = MSM_VIDC_CORE_INVALID;
+		mutex_unlock(&inst->lock);
+		dprintk(VIDC_WARN,
+			"%s Send sys error for inst %pK\n", __func__, inst);
+		msm_vidc_queue_v4l2_event(inst,
+				V4L2_EVENT_MSM_VIDC_SYS_ERROR);
+	}
+	mutex_unlock(&core->lock);
+}
+
+static void handle_sys_error(enum hal_command_response cmd, void *data)
+{
+	struct msm_vidc_cb_cmd_done *response = data;
+	struct msm_vidc_core *core = NULL;
+	struct hfi_device *hdev = NULL;
+	struct msm_vidc_inst *inst = NULL;
+	int rc = 0;
+
+	subsystem_crashed("venus");
+	if (!response) {
+		dprintk(VIDC_ERR,
+			"Failed to get valid response for sys error\n");
+		return;
+	}
+
+	core = get_vidc_core(response->device_id);
+	if (!core) {
+		dprintk(VIDC_ERR,
+				"Got SYS_ERR but unable to identify core\n");
+		return;
+	}
+
+	mutex_lock(&core->lock);
+	if (core->state == VIDC_CORE_INVALID ||
+		core->state == VIDC_CORE_UNINIT) {
+		dprintk(VIDC_ERR,
+			"%s: Core already moved to state %d\n",
+			 __func__, core->state);
+		mutex_unlock(&core->lock);
+		return;
+	}
+	mutex_unlock(&core->lock);
+
+	dprintk(VIDC_WARN, "SYS_ERROR %d received for core %pK\n", cmd, core);
+	msm_comm_clean_notify_client(core);
+
+	hdev = core->device;
+	mutex_lock(&core->lock);
+	if (core->state == VIDC_CORE_INVALID) {
+		dprintk(VIDC_DBG, "Calling core_release\n");
+		rc = call_hfi_op(hdev, core_release,
+						 hdev->hfi_device_data);
+		if (rc) {
+			dprintk(VIDC_ERR, "core_release failed\n");
+			mutex_unlock(&core->lock);
+			return;
+		}
+		core->state = VIDC_CORE_UNINIT;
+	}
+	mutex_unlock(&core->lock);
+
+	msm_vidc_print_running_insts(core);
+	call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
+	dprintk(VIDC_ERR,
+		"SYS_ERROR can potentially crash the system\n");
+
+	mutex_lock(&core->lock);
+	list_for_each_entry(inst, &core->instances, list)
+		msm_comm_print_inst_info(inst);
+	mutex_unlock(&core->lock);
+
+	BUG_ON(msm_vidc_debug_timeout);
+}
+
+void msm_comm_session_clean(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	struct hfi_device *hdev = NULL;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid params\n", __func__);
+		return;
+	}
+
+	hdev = inst->core->device;
+	mutex_lock(&inst->lock);
+	if (hdev && inst->session) {
+		dprintk(VIDC_DBG, "cleaning up instance: %pK\n", inst);
+		rc = call_hfi_op(hdev, session_clean,
+				(void *)inst->session);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"Session clean failed :%pK\n", inst);
+		}
+		inst->session = NULL;
+	}
+	mutex_unlock(&inst->lock);
+}
+
+static void handle_session_close(enum hal_command_response cmd, void *data)
+{
+	struct msm_vidc_cb_cmd_done *response = data;
+	struct msm_vidc_inst *inst;
+
+	if (!response) {
+		dprintk(VIDC_ERR,
+			"Failed to get valid response for session close\n");
+		return;
+	}
+
+	inst = get_inst(get_vidc_core(response->device_id),
+			response->session_id);
+	if (!inst) {
+		dprintk(VIDC_WARN, "Got a response for an inactive session\n");
+		return;
+	}
+
+	signal_session_msg_receipt(cmd, inst);
+	show_stats(inst);
+	put_inst(inst);
+}
+
+static struct vb2_buffer *get_vb_from_device_addr(struct buf_queue *bufq,
+		unsigned long dev_addr)
+{
+	struct vb2_buffer *vb = NULL;
+	struct vb2_queue *q = NULL;
+	int found = 0;
+	if (!bufq) {
+		dprintk(VIDC_ERR, "Invalid parameter\n");
+		return NULL;
+	}
+	q = &bufq->vb2_bufq;
+	mutex_lock(&bufq->lock);
+	list_for_each_entry(vb, &q->queued_list, queued_entry) {
+		if (vb->planes[0].m.userptr == dev_addr &&
+			vb->state == VB2_BUF_STATE_ACTIVE) {
+			found = 1;
+			dprintk(VIDC_DBG, "Found v4l2_buf index : %d\n",
+					vb->index);
+			break;
+		}
+	}
+	mutex_unlock(&bufq->lock);
+	if (!found) {
+		dprintk(VIDC_DBG,
+			"Failed to find buffer in queued list: %#lx, qtype = %d\n",
+			dev_addr, q->type);
+		vb = NULL;
+	}
+	return vb;
+}
+
+static void handle_ebd(enum hal_command_response cmd, void *data)
+{
+	struct msm_vidc_cb_data_done *response = data;
+	struct vb2_buffer *vb;
+	struct msm_vidc_inst *inst;
+	struct vidc_hal_ebd *empty_buf_done;
+	struct vb2_v4l2_buffer *vbuf = NULL;
+
+	if (!response) {
+		dprintk(VIDC_ERR, "Invalid response from vidc_hal\n");
+		return;
+	}
+
+	inst = get_inst(get_vidc_core(response->device_id),
+			response->session_id);
+	if (!inst) {
+		dprintk(VIDC_WARN, "Got a response for an inactive session\n");
+		return;
+	}
+
+	vb = get_vb_from_device_addr(&inst->bufq[OUTPUT_PORT],
+			response->input_done.packet_buffer);
+	if (vb) {
+		vbuf = to_vb2_v4l2_buffer(vb);
+		vb->planes[0].bytesused = response->input_done.filled_len;
+		vb->planes[0].data_offset = response->input_done.offset;
+		if (vb->planes[0].data_offset > vb->planes[0].length)
+			dprintk(VIDC_INFO, "data_offset overflow length\n");
+		if (vb->planes[0].bytesused > vb->planes[0].length)
+			dprintk(VIDC_INFO, "bytesused overflow length\n");
+		if (vb->planes[0].m.userptr !=
+			response->clnt_data)
+			dprintk(VIDC_INFO, "Client data != bufaddr\n");
+		empty_buf_done = (struct vidc_hal_ebd *)&response->input_done;
+		if (empty_buf_done) {
+			if (empty_buf_done->status == VIDC_ERR_NOT_SUPPORTED) {
+				dprintk(VIDC_INFO,
+					"Failed : Unsupported input stream\n");
+				vbuf->flags |=
+					V4L2_QCOM_BUF_INPUT_UNSUPPORTED;
+			}
+			if (empty_buf_done->status == VIDC_ERR_BITSTREAM_ERR) {
+				dprintk(VIDC_INFO,
+					"Failed : Corrupted input stream\n");
+				vbuf->flags |=
+					V4L2_QCOM_BUF_DATA_CORRUPT;
+			}
+			if (empty_buf_done->status ==
+				VIDC_ERR_START_CODE_NOT_FOUND) {
+				vbuf->flags |=
+					V4L2_MSM_VIDC_BUF_START_CODE_NOT_FOUND;
+				dprintk(VIDC_INFO,
+					"Failed: Start code not found\n");
+			}
+			if (empty_buf_done->flags & HAL_BUFFERFLAG_SYNCFRAME)
+				vbuf->flags |=
+					V4L2_QCOM_BUF_FLAG_IDRFRAME |
+					V4L2_BUF_FLAG_KEYFRAME;
+		}
+		dprintk(VIDC_DBG,
+			"Got ebd from hal: device_addr: %pa, alloc: %d, status: %#x, pic_type: %#x, flags: %#x\n",
+			&empty_buf_done->packet_buffer,
+			empty_buf_done->alloc_len, empty_buf_done->status,
+			empty_buf_done->picture_type, empty_buf_done->flags);
+
+		mutex_lock(&inst->bufq[OUTPUT_PORT].lock);
+		vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+		mutex_unlock(&inst->bufq[OUTPUT_PORT].lock);
+		msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_EBD);
+	}
+
+	put_inst(inst);
+}
+
+int buf_ref_get(struct msm_vidc_inst *inst, struct buffer_info *binfo)
+{
+	int cnt = 0;
+
+	if (!inst || !binfo)
+		return -EINVAL;
+
+	atomic_inc(&binfo->ref_count);
+	cnt = atomic_read(&binfo->ref_count);
+	if (cnt > 2) {
+		dprintk(VIDC_DBG, "%s: invalid ref_cnt: %d\n", __func__, cnt);
+		cnt = -EINVAL;
+	}
+	if (cnt == 2)
+		inst->buffers_held_in_driver++;
+
+	dprintk(VIDC_DBG, "REF_GET[%d] fd[0] = %d\n", cnt, binfo->fd[0]);
+
+	return cnt;
+}
+
+int buf_ref_put(struct msm_vidc_inst *inst, struct buffer_info *binfo)
+{
+	int rc = 0;
+	int cnt;
+	bool release_buf = false;
+	bool qbuf_again = false;
+
+	if (!inst || !binfo)
+		return -EINVAL;
+
+	atomic_dec(&binfo->ref_count);
+	cnt = atomic_read(&binfo->ref_count);
+	dprintk(VIDC_DBG, "REF_PUT[%d] fd[0] = %d\n", cnt, binfo->fd[0]);
+	if (!cnt)
+		release_buf = true;
+	else if (cnt == 1)
+		qbuf_again = true;
+	else {
+		dprintk(VIDC_DBG, "%s: invalid ref_cnt: %d\n", __func__, cnt);
+		cnt = -EINVAL;
+	}
+
+	if (cnt < 0)
+		return cnt;
+
+	if (release_buf) {
+		/*
+		* We can not delete binfo here as we need to set the user
+		* virtual address saved in binfo->uvaddr to the dequeued v4l2
+		* buffer.
+		*
+		* We will set the pending_deletion flag to true here and delete
+		* binfo from registered list in dqbuf after setting the uvaddr.
+		*/
+		dprintk(VIDC_DBG, "fd[0] = %d -> pending_deletion = true\n",
+			binfo->fd[0]);
+		binfo->pending_deletion = true;
+	} else if (qbuf_again) {
+		inst->buffers_held_in_driver--;
+		rc = qbuf_dynamic_buf(inst, binfo);
+		if (!rc)
+			return rc;
+	}
+	return cnt;
+}
+
+static void handle_dynamic_buffer(struct msm_vidc_inst *inst,
+		ion_phys_addr_t device_addr, u32 flags)
+{
+	struct buffer_info *binfo = NULL, *temp = NULL;
+
+	/*
+	 * Update reference count and release OR queue back the buffer,
+	 * only when firmware is not holding a reference.
+	 */
+	if (inst->buffer_mode_set[CAPTURE_PORT] == HAL_BUFFER_MODE_DYNAMIC) {
+		binfo = device_to_uvaddr(&inst->registeredbufs, device_addr);
+		if (!binfo) {
+			dprintk(VIDC_ERR,
+				"%s buffer not found in registered list\n",
+				__func__);
+			return;
+		}
+		if (flags & HAL_BUFFERFLAG_READONLY) {
+			dprintk(VIDC_DBG,
+				"FBD fd[0] = %d -> Reference with f/w, addr: %pa\n",
+				binfo->fd[0], &device_addr);
+		} else {
+			dprintk(VIDC_DBG,
+				"FBD fd[0] = %d -> FBD_ref_released, addr: %pa\n",
+				binfo->fd[0], &device_addr);
+
+			mutex_lock(&inst->registeredbufs.lock);
+			list_for_each_entry(temp, &inst->registeredbufs.list,
+							list) {
+				if (temp == binfo) {
+					buf_ref_put(inst, binfo);
+					break;
+				}
+			}
+			mutex_unlock(&inst->registeredbufs.lock);
+		}
+	}
+}
+
+static int handle_multi_stream_buffers(struct msm_vidc_inst *inst,
+		ion_phys_addr_t dev_addr)
+{
+	struct internal_buf *binfo;
+	struct msm_smem *handle;
+	bool found = false;
+
+	mutex_lock(&inst->outputbufs.lock);
+	list_for_each_entry(binfo, &inst->outputbufs.list, list) {
+		handle = binfo->handle;
+		if (handle && dev_addr == handle->device_addr) {
+			if (binfo->buffer_ownership == DRIVER) {
+				dprintk(VIDC_ERR,
+					"FW returned same buffer: %pa\n",
+					&dev_addr);
+				break;
+			}
+			binfo->buffer_ownership = DRIVER;
+			found = true;
+			break;
+		}
+	}
+	mutex_unlock(&inst->outputbufs.lock);
+
+	if (!found) {
+		dprintk(VIDC_ERR,
+			"Failed to find output buffer in queued list: %pa\n",
+			&dev_addr);
+	}
+
+	return 0;
+}
+
+enum hal_buffer msm_comm_get_hal_output_buffer(struct msm_vidc_inst *inst)
+{
+	if (msm_comm_get_stream_output_mode(inst) ==
+		HAL_VIDEO_DECODER_SECONDARY)
+		return HAL_BUFFER_OUTPUT2;
+	else
+		return HAL_BUFFER_OUTPUT;
+}
+
+static void handle_fbd(enum hal_command_response cmd, void *data)
+{
+	struct msm_vidc_cb_data_done *response = data;
+	struct msm_vidc_inst *inst;
+	struct vb2_buffer *vb = NULL;
+	struct vidc_hal_fbd *fill_buf_done;
+	enum hal_buffer buffer_type;
+	int extra_idx = 0;
+	int64_t time_usec = 0;
+	struct vb2_v4l2_buffer *vbuf = NULL;
+
+	if (!response) {
+		dprintk(VIDC_ERR, "Invalid response from vidc_hal\n");
+		return;
+	}
+
+	inst = get_inst(get_vidc_core(response->device_id),
+			response->session_id);
+	if (!inst) {
+		dprintk(VIDC_WARN, "Got a response for an inactive session\n");
+		return;
+	}
+
+	fill_buf_done = (struct vidc_hal_fbd *)&response->output_done;
+	buffer_type = msm_comm_get_hal_output_buffer(inst);
+	if (fill_buf_done->buffer_type == buffer_type) {
+		vb = get_vb_from_device_addr(&inst->bufq[CAPTURE_PORT],
+				fill_buf_done->packet_buffer1);
+	} else {
+		if (handle_multi_stream_buffers(inst,
+				fill_buf_done->packet_buffer1))
+			dprintk(VIDC_ERR,
+				"Failed : Output buffer not found %pa\n",
+				&fill_buf_done->packet_buffer1);
+		goto err_handle_fbd;
+	}
+
+	if (vb) {
+		vbuf = to_vb2_v4l2_buffer(vb);
+		vb->planes[0].bytesused = fill_buf_done->filled_len1;
+		vb->planes[0].data_offset = fill_buf_done->offset1;
+		if (vb->planes[0].data_offset > vb->planes[0].length)
+			dprintk(VIDC_INFO,
+				"fbd:Overflow data_offset = %d; length = %d\n",
+				vb->planes[0].data_offset,
+				vb->planes[0].length);
+		if (vb->planes[0].bytesused > vb->planes[0].length)
+			dprintk(VIDC_INFO,
+				"fbd:Overflow bytesused = %d; length = %d\n",
+				vb->planes[0].bytesused,
+				vb->planes[0].length);
+		if (!(fill_buf_done->flags1 &
+			HAL_BUFFERFLAG_TIMESTAMPINVALID)) {
+			time_usec = fill_buf_done->timestamp_hi;
+			time_usec = (time_usec << 32) |
+				fill_buf_done->timestamp_lo;
+		} else {
+			time_usec = 0;
+			dprintk(VIDC_DBG,
+					"Set zero timestamp for buffer %pa, filled: %d, (hi:%u, lo:%u)\n",
+					&fill_buf_done->packet_buffer1,
+					fill_buf_done->filled_len1,
+					fill_buf_done->timestamp_hi,
+					fill_buf_done->timestamp_lo);
+		}
+		vbuf->timestamp =
+			ns_to_timeval(time_usec * NSEC_PER_USEC);
+		vbuf->flags = 0;
+		extra_idx =
+			EXTRADATA_IDX(inst->prop.num_planes[CAPTURE_PORT]);
+		if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
+			vb->planes[extra_idx].m.userptr =
+				(unsigned long)fill_buf_done->extra_data_buffer;
+			vb->planes[extra_idx].bytesused =
+				vb->planes[extra_idx].length;
+			vb->planes[extra_idx].data_offset = 0;
+		}
+
+		handle_dynamic_buffer(inst, fill_buf_done->packet_buffer1,
+					fill_buf_done->flags1);
+		if (fill_buf_done->flags1 & HAL_BUFFERFLAG_READONLY)
+			vbuf->flags |= V4L2_QCOM_BUF_FLAG_READONLY;
+		if (fill_buf_done->flags1 & HAL_BUFFERFLAG_EOS)
+			vbuf->flags |= V4L2_QCOM_BUF_FLAG_EOS;
+		if (fill_buf_done->flags1 & HAL_BUFFERFLAG_CODECCONFIG)
+			vbuf->flags &= ~V4L2_QCOM_BUF_FLAG_CODECCONFIG;
+		if (fill_buf_done->flags1 & HAL_BUFFERFLAG_SYNCFRAME)
+			vbuf->flags |= V4L2_QCOM_BUF_FLAG_IDRFRAME;
+		if (fill_buf_done->flags1 & HAL_BUFFERFLAG_EOSEQ)
+			vbuf->flags |= V4L2_QCOM_BUF_FLAG_EOSEQ;
+		if (fill_buf_done->flags1 & HAL_BUFFERFLAG_DECODEONLY)
+			vbuf->flags |= V4L2_QCOM_BUF_FLAG_DECODEONLY;
+		if (fill_buf_done->flags1 & HAL_BUFFERFLAG_DATACORRUPT)
+			vbuf->flags |= V4L2_QCOM_BUF_DATA_CORRUPT;
+		if (fill_buf_done->flags1 & HAL_BUFFERFLAG_DROP_FRAME)
+			vbuf->flags |= V4L2_QCOM_BUF_DROP_FRAME;
+		if (fill_buf_done->flags1 & HAL_BUFFERFLAG_MBAFF)
+			vbuf->flags |= V4L2_MSM_BUF_FLAG_MBAFF;
+
+		switch (fill_buf_done->picture_type) {
+		case HAL_PICTURE_IDR:
+			vbuf->flags |= V4L2_QCOM_BUF_FLAG_IDRFRAME;
+			vbuf->flags |= V4L2_BUF_FLAG_KEYFRAME;
+			break;
+		case HAL_PICTURE_I:
+			vbuf->flags |= V4L2_BUF_FLAG_KEYFRAME;
+			break;
+		case HAL_PICTURE_P:
+			vbuf->flags |= V4L2_BUF_FLAG_PFRAME;
+			break;
+		case HAL_PICTURE_B:
+			vbuf->flags |= V4L2_BUF_FLAG_BFRAME;
+			break;
+		case HAL_FRAME_NOTCODED:
+		case HAL_UNUSED_PICT:
+			/* Do we need to care about these? */
+		case HAL_FRAME_YUV:
+			break;
+		default:
+			break;
+		}
+
+		inst->count.fbd++;
+
+		if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
+			dprintk(VIDC_DBG,
+				"extradata: userptr = %pK;"
+				" bytesused = %d; length = %d\n",
+				(u8 *)vb->planes[extra_idx].m.userptr,
+				vb->planes[extra_idx].bytesused,
+				vb->planes[extra_idx].length);
+		}
+		dprintk(VIDC_DBG,
+		"Got fbd from hal: device_addr: %pa, alloc: %d, filled: %d, offset: %d, ts: %lld, flags: %#x, crop: %d %d %d %d, pic_type: %#x, mark_data: %#x\n",
+		&fill_buf_done->packet_buffer1, fill_buf_done->alloc_len1,
+		fill_buf_done->filled_len1, fill_buf_done->offset1, time_usec,
+		fill_buf_done->flags1, fill_buf_done->start_x_coord,
+		fill_buf_done->start_y_coord, fill_buf_done->frame_width,
+		fill_buf_done->frame_height, fill_buf_done->picture_type,
+		fill_buf_done->mark_data);
+
+		mutex_lock(&inst->bufq[CAPTURE_PORT].lock);
+		vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+		mutex_unlock(&inst->bufq[CAPTURE_PORT].lock);
+		msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_FBD);
+	}
+
+err_handle_fbd:
+	put_inst(inst);
+}
+
+static void handle_seq_hdr_done(enum hal_command_response cmd, void *data)
+{
+	struct msm_vidc_cb_data_done *response = data;
+	struct msm_vidc_inst *inst;
+	struct vb2_buffer *vb;
+	struct vidc_hal_fbd *fill_buf_done;
+	struct vb2_v4l2_buffer *vbuf;
+
+	if (!response) {
+		dprintk(VIDC_ERR, "Invalid response from vidc_hal\n");
+		return;
+	}
+
+	inst = get_inst(get_vidc_core(response->device_id),
+			response->session_id);
+	if (!inst) {
+		dprintk(VIDC_WARN, "Got a response for an inactive session\n");
+		return;
+	}
+
+	fill_buf_done = (struct vidc_hal_fbd *)&response->output_done;
+	vb = get_vb_from_device_addr(&inst->bufq[CAPTURE_PORT],
+				fill_buf_done->packet_buffer1);
+	if (!vb) {
+		dprintk(VIDC_ERR,
+				"Failed to find video buffer for seq_hdr_done: %pa\n",
+				&fill_buf_done->packet_buffer1);
+		goto err_seq_hdr_done;
+	}
+	vbuf = to_vb2_v4l2_buffer(vb);
+
+	vb->planes[0].bytesused = fill_buf_done->filled_len1;
+	vb->planes[0].data_offset = fill_buf_done->offset1;
+
+	vbuf->flags = V4L2_QCOM_BUF_FLAG_CODECCONFIG;
+	vbuf->timestamp = ns_to_timeval(0);
+
+	dprintk(VIDC_DBG, "Filled length = %d; offset = %d; flags %x\n",
+				vb->planes[0].bytesused,
+				vb->planes[0].data_offset,
+				vbuf->flags);
+	mutex_lock(&inst->bufq[CAPTURE_PORT].lock);
+	vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+	mutex_unlock(&inst->bufq[CAPTURE_PORT].lock);
+
+err_seq_hdr_done:
+	put_inst(inst);
+}
+
+void handle_cmd_response(enum hal_command_response cmd, void *data)
+{
+	dprintk(VIDC_DBG, "Command response = %d\n", cmd);
+	switch (cmd) {
+	case HAL_SYS_INIT_DONE:
+		handle_sys_init_done(cmd, data);
+		break;
+	case HAL_SYS_RELEASE_RESOURCE_DONE:
+		handle_sys_release_res_done(cmd, data);
+		break;
+	case HAL_SESSION_INIT_DONE:
+		handle_session_init_done(cmd, data);
+		break;
+	case HAL_SESSION_PROPERTY_INFO:
+		handle_session_prop_info(cmd, data);
+		break;
+	case HAL_SESSION_LOAD_RESOURCE_DONE:
+		handle_load_resource_done(cmd, data);
+		break;
+	case HAL_SESSION_START_DONE:
+		handle_start_done(cmd, data);
+		break;
+	case HAL_SESSION_ETB_DONE:
+		handle_ebd(cmd, data);
+		break;
+	case HAL_SESSION_FTB_DONE:
+		handle_fbd(cmd, data);
+		break;
+	case HAL_SESSION_STOP_DONE:
+		handle_stop_done(cmd, data);
+		break;
+	case HAL_SESSION_RELEASE_RESOURCE_DONE:
+		handle_release_res_done(cmd, data);
+		break;
+	case HAL_SESSION_END_DONE:
+	case HAL_SESSION_ABORT_DONE:
+		handle_session_close(cmd, data);
+		break;
+	case HAL_SESSION_EVENT_CHANGE:
+		handle_event_change(cmd, data);
+		break;
+	case HAL_SESSION_FLUSH_DONE:
+		handle_session_flush(cmd, data);
+		break;
+	case HAL_SESSION_GET_SEQ_HDR_DONE:
+		handle_seq_hdr_done(cmd, data);
+		break;
+	case HAL_SYS_WATCHDOG_TIMEOUT:
+	case HAL_SYS_ERROR:
+		handle_sys_error(cmd, data);
+		break;
+	case HAL_SESSION_ERROR:
+		handle_session_error(cmd, data);
+		break;
+	case HAL_SESSION_RELEASE_BUFFER_DONE:
+		handle_session_release_buf_done(cmd, data);
+		break;
+	default:
+		dprintk(VIDC_DBG, "response unhandled: %d\n", cmd);
+		break;
+	}
+}
+
+int msm_comm_scale_clocks(struct msm_vidc_core *core)
+{
+	int num_mbs_per_sec, enc_mbs_per_sec, dec_mbs_per_sec;
+
+	enc_mbs_per_sec =
+		msm_comm_get_load(core, MSM_VIDC_ENCODER, LOAD_CALC_NO_QUIRKS);
+	dec_mbs_per_sec	=
+		msm_comm_get_load(core, MSM_VIDC_DECODER, LOAD_CALC_NO_QUIRKS);
+
+	if (enc_mbs_per_sec >= dec_mbs_per_sec) {
+	/*
+	 * If Encoder load is higher, use that load. Encoder votes for higher
+	 * clock. Since Encoder and Deocder run on parallel cores, this clock
+	 * should suffice decoder usecases.
+	 */
+		num_mbs_per_sec = enc_mbs_per_sec;
+	} else {
+	/*
+	 * If Decoder load is higher, it's tricky to decide clock. Decoder
+	 * higher load might results less clocks than Encoder smaller load.
+	 * At this point driver doesn't know which clock to vote. Hence use
+	 * total load.
+	 */
+		num_mbs_per_sec = enc_mbs_per_sec + dec_mbs_per_sec;
+	}
+
+	return msm_comm_scale_clocks_load(core, num_mbs_per_sec,
+			LOAD_CALC_NO_QUIRKS);
+}
+
+int msm_comm_scale_clocks_load(struct msm_vidc_core *core,
+		int num_mbs_per_sec, enum load_calc_quirks quirks)
+{
+	int rc = 0;
+	struct hfi_device *hdev;
+	struct msm_vidc_inst *inst = NULL;
+	unsigned long instant_bitrate = 0;
+	int num_sessions = 0;
+	struct vidc_clk_scale_data clk_scale_data = { {0} };
+	int codec = 0;
+
+	if (!core) {
+		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, core);
+		return -EINVAL;
+	}
+
+	hdev = core->device;
+	if (!hdev) {
+		dprintk(VIDC_ERR, "%s Invalid device handle: %pK\n",
+			__func__, hdev);
+		return -EINVAL;
+	}
+
+	mutex_lock(&core->lock);
+	list_for_each_entry(inst, &core->instances, list) {
+
+		codec = inst->session_type == MSM_VIDC_DECODER ?
+			inst->fmts[OUTPUT_PORT].fourcc :
+			inst->fmts[CAPTURE_PORT].fourcc;
+
+		if (msm_comm_turbo_session(inst))
+			clk_scale_data.power_mode[num_sessions] =
+				VIDC_POWER_TURBO;
+		else if (is_low_power_session(inst))
+			clk_scale_data.power_mode[num_sessions] =
+				VIDC_POWER_LOW;
+		else
+			clk_scale_data.power_mode[num_sessions] =
+				VIDC_POWER_NORMAL;
+
+		if (inst->dcvs_mode)
+			clk_scale_data.load[num_sessions] = inst->dcvs.load;
+		else
+			clk_scale_data.load[num_sessions] =
+				msm_comm_get_inst_load(inst, quirks);
+
+		clk_scale_data.session[num_sessions] =
+				VIDC_VOTE_DATA_SESSION_VAL(
+				get_hal_codec(codec),
+				get_hal_domain(inst->session_type));
+		num_sessions++;
+
+		if (inst->instant_bitrate > instant_bitrate)
+			instant_bitrate = inst->instant_bitrate;
+
+	}
+	clk_scale_data.num_sessions = num_sessions;
+	mutex_unlock(&core->lock);
+
+
+	rc = call_hfi_op(hdev, scale_clocks,
+		hdev->hfi_device_data, num_mbs_per_sec,
+		&clk_scale_data, instant_bitrate);
+	if (rc)
+		dprintk(VIDC_ERR, "Failed to set clock rate: %d\n", rc);
+
+	return rc;
+}
+
+void msm_comm_scale_clocks_and_bus(struct msm_vidc_inst *inst)
+{
+	struct msm_vidc_core *core;
+	struct hfi_device *hdev;
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s Invalid params\n", __func__);
+		return;
+	}
+	core = inst->core;
+	hdev = core->device;
+
+	if (msm_comm_scale_clocks(core)) {
+		dprintk(VIDC_WARN,
+				"Failed to scale clocks. Performance might be impacted\n");
+	}
+	if (msm_comm_vote_bus(core)) {
+		dprintk(VIDC_WARN,
+				"Failed to scale DDR bus. Performance might be impacted\n");
+	}
+}
+
+static inline enum msm_vidc_thermal_level msm_comm_vidc_thermal_level(int level)
+{
+	switch (level) {
+	case 0:
+		return VIDC_THERMAL_NORMAL;
+	case 1:
+		return VIDC_THERMAL_LOW;
+	case 2:
+		return VIDC_THERMAL_HIGH;
+	default:
+		return VIDC_THERMAL_CRITICAL;
+	}
+}
+
+static unsigned long msm_comm_get_clock_rate(struct msm_vidc_core *core)
+{
+	struct hfi_device *hdev;
+	unsigned long freq = 0;
+
+	if (!core || !core->device) {
+		dprintk(VIDC_ERR, "%s Invalid params\n", __func__);
+		return -EINVAL;
+	}
+	hdev = core->device;
+
+	freq = call_hfi_op(hdev, get_core_clock_rate, hdev->hfi_device_data, 1);
+	dprintk(VIDC_DBG, "clock freq %ld\n", freq);
+
+	return freq;
+}
+
+static bool is_core_turbo(struct msm_vidc_core *core, unsigned long freq)
+{
+	int i = 0;
+	struct msm_vidc_platform_resources *res = &core->resources;
+	struct load_freq_table *table = res->load_freq_tbl;
+	u32 max_freq = 0;
+
+	for (i = 0; i < res->load_freq_tbl_size; i++) {
+		if (max_freq < table[i].freq)
+			max_freq = table[i].freq;
+	}
+	return freq >= max_freq;
+}
+
+static bool is_thermal_permissible(struct msm_vidc_core *core)
+{
+	enum msm_vidc_thermal_level tl;
+	unsigned long freq = 0;
+	bool is_turbo = false;
+
+	if (!core->resources.thermal_mitigable)
+		return true;
+
+	if (msm_vidc_thermal_mitigation_disabled) {
+		dprintk(VIDC_DBG,
+			"Thermal mitigation not enabled. debugfs %d\n",
+			msm_vidc_thermal_mitigation_disabled);
+		return true;
+	}
+
+	tl = msm_comm_vidc_thermal_level(vidc_driver->thermal_level);
+	freq = msm_comm_get_clock_rate(core);
+
+	is_turbo = is_core_turbo(core, freq);
+	dprintk(VIDC_DBG,
+		"Core freq %ld Thermal level %d Turbo mode %d\n",
+		freq, tl, is_turbo);
+
+	if (is_turbo && tl >= VIDC_THERMAL_LOW) {
+		dprintk(VIDC_ERR,
+			"Video session not allowed. Turbo mode %d Thermal level %d\n",
+			is_turbo, tl);
+		return false;
+	}
+	return true;
+}
+
+static int msm_comm_session_abort(struct msm_vidc_inst *inst)
+{
+	int rc = 0, abort_completion = 0;
+	struct hfi_device *hdev;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid params\n", __func__);
+		return -EINVAL;
+	}
+	hdev = inst->core->device;
+	abort_completion = SESSION_MSG_INDEX(HAL_SESSION_ABORT_DONE);
+
+	rc = call_hfi_op(hdev, session_abort, (void *)inst->session);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"%s session_abort failed rc: %d\n", __func__, rc);
+		return rc;
+	}
+	rc = wait_for_completion_timeout(
+			&inst->completions[abort_completion],
+			msecs_to_jiffies(msm_vidc_hw_rsp_timeout));
+	if (!rc) {
+		dprintk(VIDC_ERR,
+				"%s: Wait interrupted or timed out [%pK]: %d\n",
+				__func__, inst, abort_completion);
+		call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
+		dprintk(VIDC_ERR,
+			"ABORT timeout can potentially crash the system\n");
+		msm_comm_print_debug_info(inst);
+
+		BUG_ON(msm_vidc_debug_timeout);
+		rc = -EBUSY;
+	} else {
+		rc = 0;
+	}
+	msm_comm_session_clean(inst);
+	return rc;
+}
+
+static void handle_thermal_event(struct msm_vidc_core *core)
+{
+	int rc = 0;
+	struct msm_vidc_inst *inst;
+
+	if (!core || !core->device) {
+		dprintk(VIDC_ERR, "%s Invalid params\n", __func__);
+		return;
+	}
+	mutex_lock(&core->lock);
+	list_for_each_entry(inst, &core->instances, list) {
+		if (!inst->session)
+			continue;
+
+		mutex_unlock(&core->lock);
+		if (inst->state >= MSM_VIDC_OPEN_DONE &&
+			inst->state < MSM_VIDC_CLOSE_DONE) {
+			dprintk(VIDC_WARN, "%s: abort inst %pK\n",
+				__func__, inst);
+			rc = msm_comm_session_abort(inst);
+			if (rc) {
+				dprintk(VIDC_ERR,
+					"%s session_abort failed rc: %d\n",
+					__func__, rc);
+				goto err_sess_abort;
+			}
+			change_inst_state(inst, MSM_VIDC_CORE_INVALID);
+			dprintk(VIDC_WARN,
+				"%s Send sys error for inst %pK\n",
+				__func__, inst);
+			msm_vidc_queue_v4l2_event(inst,
+					V4L2_EVENT_MSM_VIDC_SYS_ERROR);
+		} else {
+			msm_comm_generate_session_error(inst);
+		}
+		mutex_lock(&core->lock);
+	}
+	mutex_unlock(&core->lock);
+	return;
+
+err_sess_abort:
+	msm_comm_clean_notify_client(core);
+	return;
+}
+
+void msm_comm_handle_thermal_event()
+{
+	struct msm_vidc_core *core;
+
+	list_for_each_entry(core, &vidc_driver->cores, list) {
+		if (!is_thermal_permissible(core)) {
+			dprintk(VIDC_WARN,
+				"Thermal level critical, stop all active sessions!\n");
+			handle_thermal_event(core);
+		}
+	}
+}
+
+int msm_comm_check_core_init(struct msm_vidc_core *core)
+{
+	int rc = 0;
+	struct hfi_device *hdev;
+	struct msm_vidc_inst *inst = NULL;
+
+	mutex_lock(&core->lock);
+	if (core->state >= VIDC_CORE_INIT_DONE) {
+		dprintk(VIDC_INFO, "Video core: %d is already in state: %d\n",
+				core->id, core->state);
+		goto exit;
+	}
+	dprintk(VIDC_DBG, "Waiting for SYS_INIT_DONE\n");
+	hdev = (struct hfi_device *)core->device;
+	rc = wait_for_completion_timeout(
+		&core->completions[SYS_MSG_INDEX(HAL_SYS_INIT_DONE)],
+		msecs_to_jiffies(msm_vidc_hw_rsp_timeout));
+	if (!rc) {
+		dprintk(VIDC_ERR, "%s: Wait interrupted or timed out: %d\n",
+				__func__, SYS_MSG_INDEX(HAL_SYS_INIT_DONE));
+		call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
+		dprintk(VIDC_ERR,
+			"SYS_INIT timeout can potentially crash the system\n");
+		/*
+		 * For SYS_INIT, there will not be any inst pointer.
+		 * Just grab one of the inst from instances list and
+		 * use it.
+		 */
+		inst = list_first_entry(&core->instances,
+			struct msm_vidc_inst, list);
+
+		mutex_unlock(&core->lock);
+		msm_comm_print_debug_info(inst);
+		mutex_lock(&core->lock);
+
+		BUG_ON(msm_vidc_debug_timeout);
+		rc = -EIO;
+		goto exit;
+	} else {
+		core->state = VIDC_CORE_INIT_DONE;
+		rc = 0;
+	}
+	dprintk(VIDC_DBG, "SYS_INIT_DONE!!!\n");
+exit:
+	mutex_unlock(&core->lock);
+	return rc;
+}
+
+static int msm_comm_init_core_done(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+
+	rc = msm_comm_check_core_init(inst->core);
+	if (rc) {
+		dprintk(VIDC_ERR, "%s - failed to initialize core\n", __func__);
+		msm_comm_generate_sys_error(inst);
+		return rc;
+	}
+	change_inst_state(inst, MSM_VIDC_CORE_INIT_DONE);
+	return rc;
+}
+
+static int msm_comm_init_core(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	struct hfi_device *hdev;
+	struct msm_vidc_core *core;
+
+	if (!inst || !inst->core || !inst->core->device)
+		return -EINVAL;
+
+	core = inst->core;
+	hdev = core->device;
+	mutex_lock(&core->lock);
+	if (core->state >= VIDC_CORE_INIT) {
+		dprintk(VIDC_INFO, "Video core: %d is already in state: %d\n",
+				core->id, core->state);
+		goto core_already_inited;
+	}
+	if (!core->capabilities) {
+		core->capabilities = kzalloc(VIDC_MAX_SESSIONS *
+				sizeof(struct msm_vidc_capability), GFP_KERNEL);
+		if (!core->capabilities) {
+			dprintk(VIDC_ERR,
+				"%s: failed to allocate capabilities\n",
+				__func__);
+			rc = -ENOMEM;
+			goto fail_cap_alloc;
+		}
+	} else {
+		dprintk(VIDC_WARN,
+			"%s: capabilities memory is expected to be freed\n",
+			__func__);
+	}
+
+	rc = call_hfi_op(hdev, core_init, hdev->hfi_device_data);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to init core, id = %d\n",
+				core->id);
+		goto fail_core_init;
+	}
+	core->state = VIDC_CORE_INIT;
+	core->smmu_fault_handled = false;
+core_already_inited:
+	change_inst_state(inst, MSM_VIDC_CORE_INIT);
+	mutex_unlock(&core->lock);
+	return rc;
+
+fail_core_init:
+	kfree(core->capabilities);
+fail_cap_alloc:
+	core->capabilities = NULL;
+	core->state = VIDC_CORE_UNINIT;
+	mutex_unlock(&core->lock);
+	return rc;
+}
+
+static int msm_vidc_deinit_core(struct msm_vidc_inst *inst)
+{
+	struct msm_vidc_core *core;
+	struct hfi_device *hdev;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	core = inst->core;
+	hdev = core->device;
+
+	mutex_lock(&core->lock);
+	if (core->state == VIDC_CORE_UNINIT) {
+		dprintk(VIDC_INFO, "Video core: %d is already in state: %d\n",
+				core->id, core->state);
+		goto core_already_uninited;
+	}
+	mutex_unlock(&core->lock);
+
+	msm_comm_scale_clocks_and_bus(inst);
+
+	mutex_lock(&core->lock);
+
+	if (!core->resources.never_unload_fw) {
+		cancel_delayed_work(&core->fw_unload_work);
+
+		/*
+		 * Delay unloading of firmware. This is useful
+		 * in avoiding firmware download delays in cases where we
+		 * will have a burst of back to back video playback sessions
+		 * e.g. thumbnail generation.
+		 */
+		schedule_delayed_work(&core->fw_unload_work,
+			msecs_to_jiffies(core->state == VIDC_CORE_INVALID ?
+					0 : msm_vidc_firmware_unload_delay));
+
+		dprintk(VIDC_DBG, "firmware unload delayed by %u ms\n",
+			core->state == VIDC_CORE_INVALID ?
+			0 : msm_vidc_firmware_unload_delay);
+	}
+
+core_already_uninited:
+	change_inst_state(inst, MSM_VIDC_CORE_UNINIT);
+	mutex_unlock(&core->lock);
+	return 0;
+}
+
+int msm_comm_force_cleanup(struct msm_vidc_inst *inst)
+{
+	msm_comm_kill_session(inst);
+	return msm_vidc_deinit_core(inst);
+}
+
+static int msm_comm_session_init(int flipped_state,
+	struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	int fourcc = 0;
+	struct hfi_device *hdev;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+	hdev = inst->core->device;
+
+	if (IS_ALREADY_IN_STATE(flipped_state, MSM_VIDC_OPEN)) {
+		dprintk(VIDC_INFO, "inst: %pK is already in state: %d\n",
+						inst, inst->state);
+		goto exit;
+	}
+	if (inst->session_type == MSM_VIDC_DECODER) {
+		fourcc = inst->fmts[OUTPUT_PORT].fourcc;
+	} else if (inst->session_type == MSM_VIDC_ENCODER) {
+		fourcc = inst->fmts[CAPTURE_PORT].fourcc;
+	} else {
+		dprintk(VIDC_ERR, "Invalid session\n");
+		return -EINVAL;
+	}
+
+	rc = call_hfi_op(hdev, session_init, hdev->hfi_device_data,
+			inst, get_hal_domain(inst->session_type),
+			get_hal_codec(fourcc),
+			&inst->session);
+
+	if (rc || !inst->session) {
+		dprintk(VIDC_ERR,
+			"Failed to call session init for: %pK, %pK, %d, %d\n",
+			inst->core->device, inst,
+			inst->session_type, fourcc);
+		rc = -EINVAL;
+		goto exit;
+	}
+	change_inst_state(inst, MSM_VIDC_OPEN);
+exit:
+	return rc;
+}
+
+static void msm_vidc_print_running_insts(struct msm_vidc_core *core)
+{
+	struct msm_vidc_inst *temp;
+	dprintk(VIDC_ERR, "Running instances:\n");
+	dprintk(VIDC_ERR, "%4s|%4s|%4s|%4s|%4s\n",
+			"type", "w", "h", "fps", "prop");
+
+	mutex_lock(&core->lock);
+	list_for_each_entry(temp, &core->instances, list) {
+		if (temp->state >= MSM_VIDC_OPEN_DONE &&
+				temp->state < MSM_VIDC_STOP_DONE) {
+			char properties[4] = "";
+
+			if (is_thumbnail_session(temp))
+				strlcat(properties, "N", sizeof(properties));
+
+			if (msm_comm_turbo_session(temp))
+				strlcat(properties, "T", sizeof(properties));
+
+			dprintk(VIDC_ERR, "%4d|%4d|%4d|%4d|%4s\n",
+					temp->session_type,
+					max(temp->prop.width[CAPTURE_PORT],
+						temp->prop.width[OUTPUT_PORT]),
+					max(temp->prop.height[CAPTURE_PORT],
+						temp->prop.height[OUTPUT_PORT]),
+					temp->prop.fps, properties);
+		}
+	}
+	mutex_unlock(&core->lock);
+}
+
+static int msm_vidc_load_resources(int flipped_state,
+	struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	struct hfi_device *hdev;
+	int num_mbs_per_sec = 0, max_load_adj = 0;
+	struct msm_vidc_core *core;
+	enum load_calc_quirks quirks = LOAD_CALC_IGNORE_TURBO_LOAD |
+		LOAD_CALC_IGNORE_THUMBNAIL_LOAD |
+		LOAD_CALC_IGNORE_NON_REALTIME_LOAD;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	core = inst->core;
+	if (core->state == VIDC_CORE_INVALID) {
+		dprintk(VIDC_ERR,
+				"Core is in bad state can't do load res\n");
+		return -EINVAL;
+	}
+
+	if (inst->state == MSM_VIDC_CORE_INVALID) {
+		dprintk(VIDC_ERR,
+				"Instance is in invalid state can't do load res\n");
+		return -EINVAL;
+	}
+
+	num_mbs_per_sec =
+		msm_comm_get_load(core, MSM_VIDC_DECODER, quirks) +
+		msm_comm_get_load(core, MSM_VIDC_ENCODER, quirks);
+
+	max_load_adj = core->resources.max_load +
+		inst->capability.mbs_per_frame.max;
+
+	if (num_mbs_per_sec > max_load_adj) {
+		dprintk(VIDC_ERR, "HW is overloaded, needed: %d max: %d\n",
+			num_mbs_per_sec, max_load_adj);
+		msm_vidc_print_running_insts(core);
+		inst->state = MSM_VIDC_CORE_INVALID;
+		msm_comm_kill_session(inst);
+		return -EBUSY;
+	}
+
+	hdev = core->device;
+	if (IS_ALREADY_IN_STATE(flipped_state, MSM_VIDC_LOAD_RESOURCES)) {
+		dprintk(VIDC_INFO, "inst: %pK is already in state: %d\n",
+						inst, inst->state);
+		goto exit;
+	}
+
+	rc = call_hfi_op(hdev, session_load_res, (void *) inst->session);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Failed to send load resources\n");
+		goto exit;
+	}
+	change_inst_state(inst, MSM_VIDC_LOAD_RESOURCES);
+exit:
+	return rc;
+}
+
+static int msm_vidc_start(int flipped_state, struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	struct hfi_device *hdev;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+	if (inst->state == MSM_VIDC_CORE_INVALID ||
+			inst->core->state == VIDC_CORE_INVALID) {
+		dprintk(VIDC_ERR,
+				"Core is in bad state can't do start\n");
+		return -EINVAL;
+	}
+
+	hdev = inst->core->device;
+
+	if (IS_ALREADY_IN_STATE(flipped_state, MSM_VIDC_START)) {
+		dprintk(VIDC_INFO,
+			"inst: %pK is already in state: %d\n",
+			inst, inst->state);
+		goto exit;
+	}
+	rc = call_hfi_op(hdev, session_start, (void *) inst->session);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Failed to send start\n");
+		goto exit;
+	}
+	change_inst_state(inst, MSM_VIDC_START);
+exit:
+	return rc;
+}
+
+static int msm_vidc_stop(int flipped_state, struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	struct hfi_device *hdev;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+	hdev = inst->core->device;
+
+	if (IS_ALREADY_IN_STATE(flipped_state, MSM_VIDC_STOP)) {
+		dprintk(VIDC_INFO,
+			"inst: %pK is already in state: %d\n",
+			inst, inst->state);
+		goto exit;
+	}
+	dprintk(VIDC_DBG, "Send Stop to hal\n");
+	rc = call_hfi_op(hdev, session_stop, (void *) inst->session);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to send stop\n");
+		goto exit;
+	}
+	change_inst_state(inst, MSM_VIDC_STOP);
+exit:
+	return rc;
+}
+
+static int msm_vidc_release_res(int flipped_state, struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	struct hfi_device *hdev;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+	hdev = inst->core->device;
+
+	if (IS_ALREADY_IN_STATE(flipped_state, MSM_VIDC_RELEASE_RESOURCES)) {
+		dprintk(VIDC_INFO,
+			"inst: %pK is already in state: %d\n",
+			inst, inst->state);
+		goto exit;
+	}
+	dprintk(VIDC_DBG,
+		"Send release res to hal\n");
+	rc = call_hfi_op(hdev, session_release_res, (void *) inst->session);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Failed to send release resources\n");
+		goto exit;
+	}
+	change_inst_state(inst, MSM_VIDC_RELEASE_RESOURCES);
+exit:
+	return rc;
+}
+
+static int msm_comm_session_close(int flipped_state,
+			struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	struct hfi_device *hdev;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid params\n", __func__);
+		return -EINVAL;
+	}
+	hdev = inst->core->device;
+	if (IS_ALREADY_IN_STATE(flipped_state, MSM_VIDC_CLOSE)) {
+		dprintk(VIDC_INFO,
+			"inst: %pK is already in state: %d\n",
+						inst, inst->state);
+		goto exit;
+	}
+	dprintk(VIDC_DBG,
+		"Send session close to hal\n");
+	rc = call_hfi_op(hdev, session_end, (void *) inst->session);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Failed to send close\n");
+		goto exit;
+	}
+	change_inst_state(inst, MSM_VIDC_CLOSE);
+exit:
+	return rc;
+}
+
+int msm_comm_suspend(int core_id)
+{
+	struct hfi_device *hdev;
+	struct msm_vidc_core *core;
+	int rc = 0;
+
+	core = get_vidc_core(core_id);
+	if (!core) {
+		dprintk(VIDC_ERR,
+			"%s: Failed to find core for core_id = %d\n",
+			__func__, core_id);
+		return -EINVAL;
+	}
+
+	hdev = (struct hfi_device *)core->device;
+	if (!hdev) {
+		dprintk(VIDC_ERR, "%s Invalid device handle\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&core->lock);
+	if (core->state == VIDC_CORE_INVALID) {
+		dprintk(VIDC_ERR,
+				"%s - fw is not in proper state, skip suspend\n",
+				__func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	rc = call_hfi_op(hdev, suspend, hdev->hfi_device_data);
+	if (rc)
+		dprintk(VIDC_WARN, "Failed to suspend\n");
+
+exit:
+	mutex_unlock(&core->lock);
+	return rc;
+}
+
+static int get_flipped_state(int present_state,
+	int desired_state)
+{
+	int flipped_state = present_state;
+	if (flipped_state < MSM_VIDC_STOP
+			&& desired_state > MSM_VIDC_STOP) {
+		flipped_state = MSM_VIDC_STOP + (MSM_VIDC_STOP - flipped_state);
+		flipped_state &= 0xFFFE;
+		flipped_state = flipped_state - 1;
+	} else if (flipped_state > MSM_VIDC_STOP
+			&& desired_state < MSM_VIDC_STOP) {
+		flipped_state = MSM_VIDC_STOP -
+			(flipped_state - MSM_VIDC_STOP + 1);
+		flipped_state &= 0xFFFE;
+		flipped_state = flipped_state - 1;
+	}
+	return flipped_state;
+}
+
+struct hal_buffer_requirements *get_buff_req_buffer(
+		struct msm_vidc_inst *inst, enum hal_buffer buffer_type)
+{
+	int i;
+	for (i = 0; i < HAL_BUFFER_MAX; i++) {
+		if (inst->buff_req.buffer[i].buffer_type == buffer_type)
+			return &inst->buff_req.buffer[i];
+	}
+	return NULL;
+}
+
+static int set_output_buffers(struct msm_vidc_inst *inst,
+	enum hal_buffer buffer_type)
+{
+	int rc = 0;
+	struct msm_smem *handle;
+	struct internal_buf *binfo = NULL;
+	u32 smem_flags = 0, buffer_size;
+	struct hal_buffer_requirements *output_buf, *extradata_buf;
+	int i;
+	struct hfi_device *hdev;
+	struct hal_buffer_size_minimum b;
+
+	hdev = inst->core->device;
+
+	output_buf = get_buff_req_buffer(inst, buffer_type);
+	if (!output_buf) {
+		dprintk(VIDC_DBG,
+			"This output buffer not required, buffer_type: %x\n",
+			buffer_type);
+		return 0;
+	}
+	dprintk(VIDC_DBG,
+		"output: num = %d, size = %d\n",
+		output_buf->buffer_count_actual,
+		output_buf->buffer_size);
+
+	buffer_size = output_buf->buffer_size;
+	b.buffer_type = buffer_type;
+	b.buffer_size = buffer_size;
+	rc = call_hfi_op(hdev, session_set_property,
+		inst->session, HAL_PARAM_BUFFER_SIZE_MINIMUM,
+		&b);
+
+	extradata_buf = get_buff_req_buffer(inst, HAL_BUFFER_EXTRADATA_OUTPUT);
+	if (extradata_buf) {
+		dprintk(VIDC_DBG,
+			"extradata: num = %d, size = %d\n",
+			extradata_buf->buffer_count_actual,
+			extradata_buf->buffer_size);
+		buffer_size += extradata_buf->buffer_size;
+	} else {
+		dprintk(VIDC_DBG,
+			"This extradata buffer not required, buffer_type: %x\n",
+			buffer_type);
+	}
+
+	if (inst->flags & VIDC_SECURE)
+		smem_flags |= SMEM_SECURE;
+
+	if (output_buf->buffer_size) {
+		for (i = 0; i < output_buf->buffer_count_actual;
+				i++) {
+			handle = msm_comm_smem_alloc(inst,
+					buffer_size, 1, smem_flags,
+					buffer_type, 0);
+			if (!handle) {
+				dprintk(VIDC_ERR,
+					"Failed to allocate output memory\n");
+				rc = -ENOMEM;
+				goto err_no_mem;
+			}
+			rc = msm_comm_smem_cache_operations(inst,
+					handle, SMEM_CACHE_CLEAN, -1);
+			if (rc) {
+				dprintk(VIDC_WARN,
+					"Failed to clean cache may cause undefined behavior\n");
+			}
+			binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
+			if (!binfo) {
+				dprintk(VIDC_ERR, "Out of memory\n");
+				rc = -ENOMEM;
+				goto fail_kzalloc;
+			}
+
+			binfo->handle = handle;
+			binfo->buffer_type = buffer_type;
+			binfo->buffer_ownership = DRIVER;
+			dprintk(VIDC_DBG, "Output buffer address: %pa\n",
+					&handle->device_addr);
+
+			if (inst->buffer_mode_set[CAPTURE_PORT] ==
+				HAL_BUFFER_MODE_STATIC) {
+				struct vidc_buffer_addr_info buffer_info = {0};
+				buffer_info.buffer_size =
+					output_buf->buffer_size;
+				buffer_info.buffer_type = buffer_type;
+				buffer_info.num_buffers = 1;
+				buffer_info.align_device_addr =
+					handle->device_addr;
+				buffer_info.extradata_addr =
+					handle->device_addr +
+					output_buf->buffer_size;
+				if (extradata_buf)
+					buffer_info.extradata_size =
+						extradata_buf->buffer_size;
+				rc = call_hfi_op(hdev, session_set_buffers,
+					(void *) inst->session, &buffer_info);
+				if (rc) {
+					dprintk(VIDC_ERR,
+						"%s : session_set_buffers failed\n",
+						__func__);
+					goto fail_set_buffers;
+				}
+			}
+			mutex_lock(&inst->outputbufs.lock);
+			list_add_tail(&binfo->list, &inst->outputbufs.list);
+			mutex_unlock(&inst->outputbufs.lock);
+		}
+	}
+	return rc;
+fail_set_buffers:
+	msm_comm_smem_free(inst, handle);
+err_no_mem:
+	kfree(binfo);
+fail_kzalloc:
+	return rc;
+}
+
+static inline char *get_buffer_name(enum hal_buffer buffer_type)
+{
+	switch (buffer_type) {
+	case HAL_BUFFER_INPUT: return "input";
+	case HAL_BUFFER_OUTPUT: return "output";
+	case HAL_BUFFER_OUTPUT2: return "output_2";
+	case HAL_BUFFER_EXTRADATA_INPUT: return "input_extra";
+	case HAL_BUFFER_EXTRADATA_OUTPUT: return "output_extra";
+	case HAL_BUFFER_EXTRADATA_OUTPUT2: return "output2_extra";
+	case HAL_BUFFER_INTERNAL_SCRATCH: return "scratch";
+	case HAL_BUFFER_INTERNAL_SCRATCH_1: return "scratch_1";
+	case HAL_BUFFER_INTERNAL_SCRATCH_2: return "scratch_2";
+	case HAL_BUFFER_INTERNAL_PERSIST: return "persist";
+	case HAL_BUFFER_INTERNAL_PERSIST_1: return "persist_1";
+	case HAL_BUFFER_INTERNAL_CMD_QUEUE: return "queue";
+	default: return "????";
+	}
+}
+
+static int set_internal_buf_on_fw(struct msm_vidc_inst *inst,
+				enum hal_buffer buffer_type,
+				struct msm_smem *handle, bool reuse)
+{
+	struct vidc_buffer_addr_info buffer_info;
+	struct hfi_device *hdev;
+	int rc = 0;
+
+	if (!inst || !inst->core || !inst->core->device || !handle) {
+		dprintk(VIDC_ERR, "%s - invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	hdev = inst->core->device;
+
+	rc = msm_comm_smem_cache_operations(inst,
+					handle, SMEM_CACHE_CLEAN, -1);
+	if (rc) {
+		dprintk(VIDC_WARN,
+			"Failed to clean cache. Undefined behavior\n");
+	}
+
+	buffer_info.buffer_size = handle->size;
+	buffer_info.buffer_type = buffer_type;
+	buffer_info.num_buffers = 1;
+	buffer_info.align_device_addr = handle->device_addr;
+	dprintk(VIDC_DBG, "%s %s buffer : %pa\n",
+				reuse ? "Reusing" : "Allocated",
+				get_buffer_name(buffer_type),
+				&buffer_info.align_device_addr);
+
+	rc = call_hfi_op(hdev, session_set_buffers,
+		(void *) inst->session, &buffer_info);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"vidc_hal_session_set_buffers failed\n");
+		return rc;
+	}
+	return 0;
+}
+
+static bool reuse_internal_buffers(struct msm_vidc_inst *inst,
+		enum hal_buffer buffer_type, struct msm_vidc_list *buf_list)
+{
+	struct internal_buf *buf;
+	int rc = 0;
+	bool reused = false;
+
+	if (!inst || !buf_list) {
+		dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
+		return false;
+	}
+
+	mutex_lock(&buf_list->lock);
+	list_for_each_entry(buf, &buf_list->list, list) {
+		if (!buf->handle) {
+			reused = false;
+			break;
+		}
+
+		if (buf->buffer_type != buffer_type)
+			continue;
+
+		/*
+		 * Persist buffer size won't change with resolution. If they
+		 * are in queue means that they are already allocated and
+		 * given to HW. HW can use them without reallocation. These
+		 * buffers are not released as part of port reconfig. So
+		 * driver no need to set them again.
+		*/
+
+		if (buffer_type != HAL_BUFFER_INTERNAL_PERSIST
+			&& buffer_type != HAL_BUFFER_INTERNAL_PERSIST_1) {
+
+			rc = set_internal_buf_on_fw(inst, buffer_type,
+					buf->handle, true);
+			if (rc) {
+				dprintk(VIDC_ERR,
+					"%s: session_set_buffers failed\n",
+					__func__);
+				reused = false;
+				break;
+			}
+		}
+		reused = true;
+		dprintk(VIDC_DBG,
+			"Re-using internal buffer type : %d\n", buffer_type);
+	}
+	mutex_unlock(&buf_list->lock);
+	return reused;
+}
+
+int allocate_and_set_internal_bufs(struct msm_vidc_inst *inst,
+			struct hal_buffer_requirements *internal_bufreq,
+			struct msm_vidc_list *buf_list, bool set_on_fw)
+{
+	struct msm_smem *handle;
+	struct internal_buf *binfo;
+	u32 smem_flags = 0;
+	int rc = 0;
+	int i = 0;
+
+	if (!inst || !internal_bufreq || !buf_list)
+		return -EINVAL;
+
+	if (!internal_bufreq->buffer_size)
+		return 0;
+
+	if (inst->flags & VIDC_SECURE)
+		smem_flags |= SMEM_SECURE;
+
+	for (i = 0; i < internal_bufreq->buffer_count_actual; i++) {
+		handle = msm_comm_smem_alloc(inst, internal_bufreq->buffer_size,
+				1, smem_flags, internal_bufreq->buffer_type, 0);
+		if (!handle) {
+			dprintk(VIDC_ERR,
+				"Failed to allocate scratch memory\n");
+			rc = -ENOMEM;
+			goto err_no_mem;
+		}
+
+		binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
+		if (!binfo) {
+			dprintk(VIDC_ERR, "Out of memory\n");
+			rc = -ENOMEM;
+			goto fail_kzalloc;
+		}
+
+		binfo->handle = handle;
+		binfo->buffer_type = internal_bufreq->buffer_type;
+
+		if (set_on_fw) {
+			rc = set_internal_buf_on_fw(inst,
+					internal_bufreq->buffer_type,
+					handle, false);
+			if (rc)
+				goto fail_set_buffers;
+		}
+		mutex_lock(&buf_list->lock);
+		list_add_tail(&binfo->list, &buf_list->list);
+		mutex_unlock(&buf_list->lock);
+	}
+	return rc;
+
+fail_set_buffers:
+	kfree(binfo);
+fail_kzalloc:
+	msm_comm_smem_free(inst, handle);
+err_no_mem:
+	return rc;
+
+}
+
+static int set_internal_buffers(struct msm_vidc_inst *inst,
+	enum hal_buffer buffer_type, struct msm_vidc_list *buf_list)
+{
+	struct hal_buffer_requirements *internal_buf;
+
+	internal_buf = get_buff_req_buffer(inst, buffer_type);
+	if (!internal_buf) {
+		dprintk(VIDC_DBG,
+			"This internal buffer not required, buffer_type: %x\n",
+			buffer_type);
+		return 0;
+	}
+
+	dprintk(VIDC_DBG, "Buffer type %s: num = %d, size = %d\n",
+		get_buffer_name(buffer_type),
+		internal_buf->buffer_count_actual, internal_buf->buffer_size);
+
+	/*
+	* Try reusing existing internal buffers first.
+	* If it's not possible to reuse, allocate new buffers.
+	*/
+	if (reuse_internal_buffers(inst, buffer_type, buf_list))
+		return 0;
+
+	return allocate_and_set_internal_bufs(inst, internal_buf,
+				buf_list, true);
+}
+
+int msm_comm_try_state(struct msm_vidc_inst *inst, int state)
+{
+	int rc = 0;
+	int flipped_state;
+	struct msm_vidc_core *core;
+	if (!inst) {
+		dprintk(VIDC_ERR,
+				"Invalid instance pointer = %pK\n", inst);
+		return -EINVAL;
+	}
+	dprintk(VIDC_DBG,
+			"Trying to move inst: %pK from: %#x to %#x\n",
+			inst, inst->state, state);
+	core = inst->core;
+	if (!core) {
+		dprintk(VIDC_ERR,
+				"Invalid core pointer = %pK\n", inst);
+		return -EINVAL;
+	}
+	mutex_lock(&inst->sync_lock);
+	if (inst->state == MSM_VIDC_CORE_INVALID ||
+			core->state == VIDC_CORE_INVALID) {
+		dprintk(VIDC_ERR,
+				"Core is in bad state can't change the state\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+	flipped_state = get_flipped_state(inst->state, state);
+	dprintk(VIDC_DBG,
+			"flipped_state = %#x\n", flipped_state);
+	switch (flipped_state) {
+	case MSM_VIDC_CORE_UNINIT_DONE:
+	case MSM_VIDC_CORE_INIT:
+		rc = msm_comm_init_core(inst);
+		if (rc || state <= get_flipped_state(inst->state, state))
+			break;
+	case MSM_VIDC_CORE_INIT_DONE:
+		rc = msm_comm_init_core_done(inst);
+		if (rc || state <= get_flipped_state(inst->state, state))
+			break;
+	case MSM_VIDC_OPEN:
+		rc = msm_comm_session_init(flipped_state, inst);
+		if (rc || state <= get_flipped_state(inst->state, state))
+			break;
+	case MSM_VIDC_OPEN_DONE:
+		rc = wait_for_state(inst, flipped_state, MSM_VIDC_OPEN_DONE,
+			HAL_SESSION_INIT_DONE);
+		if (rc || state <= get_flipped_state(inst->state, state))
+			break;
+	case MSM_VIDC_LOAD_RESOURCES:
+		rc = msm_vidc_load_resources(flipped_state, inst);
+		if (rc || state <= get_flipped_state(inst->state, state))
+			break;
+	case MSM_VIDC_LOAD_RESOURCES_DONE:
+	case MSM_VIDC_START:
+		rc = msm_vidc_start(flipped_state, inst);
+		if (rc || state <= get_flipped_state(inst->state, state))
+			break;
+	case MSM_VIDC_START_DONE:
+		rc = wait_for_state(inst, flipped_state, MSM_VIDC_START_DONE,
+				HAL_SESSION_START_DONE);
+		if (rc || state <= get_flipped_state(inst->state, state))
+			break;
+	case MSM_VIDC_STOP:
+		rc = msm_vidc_stop(flipped_state, inst);
+		if (rc || state <= get_flipped_state(inst->state, state))
+			break;
+	case MSM_VIDC_STOP_DONE:
+		rc = wait_for_state(inst, flipped_state, MSM_VIDC_STOP_DONE,
+				HAL_SESSION_STOP_DONE);
+		if (rc || state <= get_flipped_state(inst->state, state))
+			break;
+		dprintk(VIDC_DBG, "Moving to Stop Done state\n");
+	case MSM_VIDC_RELEASE_RESOURCES:
+		rc = msm_vidc_release_res(flipped_state, inst);
+		if (rc || state <= get_flipped_state(inst->state, state))
+			break;
+	case MSM_VIDC_RELEASE_RESOURCES_DONE:
+		rc = wait_for_state(inst, flipped_state,
+			MSM_VIDC_RELEASE_RESOURCES_DONE,
+			HAL_SESSION_RELEASE_RESOURCE_DONE);
+		if (rc || state <= get_flipped_state(inst->state, state))
+			break;
+		dprintk(VIDC_DBG,
+				"Moving to release resources done state\n");
+	case MSM_VIDC_CLOSE:
+		rc = msm_comm_session_close(flipped_state, inst);
+		if (rc || state <= get_flipped_state(inst->state, state))
+			break;
+	case MSM_VIDC_CLOSE_DONE:
+		rc = wait_for_state(inst, flipped_state, MSM_VIDC_CLOSE_DONE,
+				HAL_SESSION_END_DONE);
+		if (rc || state <= get_flipped_state(inst->state, state))
+			break;
+		msm_comm_session_clean(inst);
+	case MSM_VIDC_CORE_UNINIT:
+	case MSM_VIDC_CORE_INVALID:
+		dprintk(VIDC_DBG, "Sending core uninit\n");
+		rc = msm_vidc_deinit_core(inst);
+		if (rc || state == get_flipped_state(inst->state, state))
+			break;
+	default:
+		dprintk(VIDC_ERR, "State not recognized\n");
+		rc = -EINVAL;
+		break;
+	}
+exit:
+	mutex_unlock(&inst->sync_lock);
+	if (rc)
+		dprintk(VIDC_ERR,
+				"Failed to move from state: %d to %d\n",
+				inst->state, state);
+	else
+		trace_msm_vidc_common_state_change((void *)inst,
+				inst->state, state);
+	return rc;
+}
+
+int msm_vidc_comm_cmd(void *instance, union msm_v4l2_cmd *cmd)
+{
+	struct msm_vidc_inst *inst = instance;
+	struct v4l2_decoder_cmd *dec = NULL;
+	struct v4l2_encoder_cmd *enc = NULL;
+	struct msm_vidc_core *core;
+	int which_cmd = 0, flags = 0, rc = 0;
+
+	if (!inst || !inst->core || !cmd) {
+		dprintk(VIDC_ERR, "%s invalid params\n", __func__);
+		return -EINVAL;
+	}
+	core = inst->core;
+	if (inst->session_type == MSM_VIDC_ENCODER) {
+		enc = (struct v4l2_encoder_cmd *)cmd;
+		which_cmd = enc->cmd;
+		flags = enc->flags;
+	} else if (inst->session_type == MSM_VIDC_DECODER) {
+		dec = (struct v4l2_decoder_cmd *)cmd;
+		which_cmd = dec->cmd;
+		flags = dec->flags;
+	}
+
+
+	switch (which_cmd) {
+	case V4L2_DEC_QCOM_CMD_FLUSH:
+		if (core->state != VIDC_CORE_INVALID &&
+			inst->state ==  MSM_VIDC_CORE_INVALID) {
+			rc = msm_comm_kill_session(inst);
+			if (rc)
+				dprintk(VIDC_ERR,
+					"Fail to clean session: %d\n",
+					rc);
+		}
+		rc = msm_comm_flush(inst, flags);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"Failed to flush buffers: %d\n", rc);
+		}
+		break;
+	default:
+		dprintk(VIDC_ERR, "Unknown Command %d\n", which_cmd);
+		rc = -ENOTSUPP;
+		break;
+	}
+	return rc;
+}
+
+static void populate_frame_data(struct vidc_frame_data *data,
+		const struct vb2_buffer *vb, struct msm_vidc_inst *inst)
+{
+	int64_t time_usec;
+	int extra_idx;
+	enum v4l2_buf_type type = vb->type;
+	enum vidc_ports port = type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
+		OUTPUT_PORT : CAPTURE_PORT;
+	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+	time_usec = timeval_to_ns(&vbuf->timestamp);
+	do_div(time_usec, NSEC_PER_USEC);
+
+	data->alloc_len = vb->planes[0].length;
+	data->device_addr = vb->planes[0].m.userptr;
+	data->timestamp = time_usec;
+	data->flags = 0;
+	data->clnt_data = data->device_addr;
+
+	if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		bool pic_decoding_mode = msm_comm_g_ctrl_for_id(inst,
+				V4L2_CID_MPEG_VIDC_VIDEO_PICTYPE_DEC_MODE);
+
+		data->buffer_type = HAL_BUFFER_INPUT;
+		data->filled_len = vb->planes[0].bytesused;
+		data->offset = vb->planes[0].data_offset;
+
+		if (vbuf->flags & V4L2_QCOM_BUF_FLAG_EOS)
+			data->flags |= HAL_BUFFERFLAG_EOS;
+
+		if (vbuf->flags & V4L2_MSM_BUF_FLAG_YUV_601_709_CLAMP)
+			data->flags |= HAL_BUFFERFLAG_YUV_601_709_CSC_CLAMP;
+
+		if (vbuf->flags & V4L2_QCOM_BUF_FLAG_CODECCONFIG)
+			data->flags |= HAL_BUFFERFLAG_CODECCONFIG;
+
+		if (vbuf->flags & V4L2_QCOM_BUF_FLAG_DECODEONLY)
+			data->flags |= HAL_BUFFERFLAG_DECODEONLY;
+
+		if (vbuf->flags & V4L2_QCOM_BUF_TIMESTAMP_INVALID)
+			data->timestamp = LLONG_MAX;
+
+		/* XXX: This is a dirty hack necessitated by the firmware,
+		 * which refuses to issue FBDs for non I-frames in Picture Type
+		 * Decoding mode, unless we pass in non-zero value in mark_data
+		 * and mark_target.
+		 */
+		data->mark_data = data->mark_target =
+			pic_decoding_mode ? 0xdeadbeef : 0;
+
+	} else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		data->buffer_type = msm_comm_get_hal_output_buffer(inst);
+	}
+
+	extra_idx = EXTRADATA_IDX(inst->prop.num_planes[port]);
+	if (extra_idx && extra_idx < VIDEO_MAX_PLANES &&
+			vb->planes[extra_idx].m.userptr) {
+		data->extradata_addr = vb->planes[extra_idx].m.userptr;
+		data->extradata_size = vb->planes[extra_idx].length;
+		data->flags |= HAL_BUFFERFLAG_EXTRADATA;
+	}
+}
+
+static unsigned int count_single_batch(struct msm_vidc_list *list,
+		enum v4l2_buf_type type)
+{
+	struct vb2_buf_entry *buf;
+	int count = 0;
+	struct vb2_v4l2_buffer *vbuf = NULL;
+
+	mutex_lock(&list->lock);
+	list_for_each_entry(buf, &list->list, list) {
+		if (buf->vb->type != type)
+			continue;
+
+		++count;
+
+		vbuf = to_vb2_v4l2_buffer(buf->vb);
+		if (!(vbuf->flags & V4L2_MSM_BUF_FLAG_DEFER))
+			goto found_batch;
+	}
+	 /* don't have a full batch */
+	count = 0;
+
+found_batch:
+	mutex_unlock(&list->lock);
+	return count;
+}
+
+static unsigned int count_buffers(struct msm_vidc_list *list,
+		enum v4l2_buf_type type)
+{
+	struct vb2_buf_entry *buf;
+	int count = 0;
+
+	mutex_lock(&list->lock);
+	list_for_each_entry(buf, &list->list, list) {
+		if (buf->vb->type != type)
+			continue;
+
+		++count;
+	}
+	mutex_unlock(&list->lock);
+
+	return count;
+}
+
+static void log_frame(struct msm_vidc_inst *inst, struct vidc_frame_data *data,
+		enum v4l2_buf_type type)
+{
+	if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		dprintk(VIDC_DBG,
+				"Sending etb (%pa) to hal: filled: %d, ts: %lld, flags = %#x\n",
+				&data->device_addr, data->filled_len,
+				data->timestamp, data->flags);
+		msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_ETB);
+
+		if (msm_vidc_bitrate_clock_scaling &&
+			inst->session_type == MSM_VIDC_DECODER &&
+			!inst->dcvs_mode)
+				inst->instant_bitrate =
+					data->filled_len * 8 * inst->prop.fps;
+		else
+			inst->instant_bitrate = 0;
+	} else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		dprintk(VIDC_DBG,
+				"Sending ftb (%pa) to hal: size: %d, ts: %lld, flags = %#x\n",
+				&data->device_addr, data->alloc_len,
+				data->timestamp, data->flags);
+		msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_FTB);
+	}
+
+	msm_dcvs_check_and_scale_clocks(inst,
+			type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+	if (msm_vidc_bitrate_clock_scaling && !inst->dcvs_mode &&
+		type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+		inst->session_type == MSM_VIDC_DECODER)
+		if (msm_comm_scale_clocks(inst->core))
+			dprintk(VIDC_WARN,
+				"Failed to scale clocks. Performance might be impacted\n");
+
+	if (msm_comm_vote_bus(inst->core))
+		dprintk(VIDC_WARN,
+			"Failed to scale bus. Performance might be impacted\n");
+}
+
+static int request_seq_header(struct msm_vidc_inst *inst,
+		struct vidc_frame_data *data)
+{
+	struct vidc_seq_hdr seq_hdr = {
+		.seq_hdr = data->device_addr,
+		.seq_hdr_len = data->alloc_len,
+	};
+
+	dprintk(VIDC_DBG, "Requesting sequence header in %pa\n",
+			&seq_hdr.seq_hdr);
+	return call_hfi_op(inst->core->device, session_get_seq_hdr,
+			inst->session, &seq_hdr);
+}
+
+/*
+ * Attempts to queue `vb` to hardware.  If, for various reasons, the buffer
+ * cannot be queued to hardware, the buffer will be staged for commit in the
+ * pending queue.  Once the hardware reaches a good state (or if `vb` is NULL,
+ * the subsequent *_qbuf will commit the previously staged buffers to hardware.
+ */
+int msm_comm_qbuf(struct msm_vidc_inst *inst, struct vb2_buffer *vb)
+{
+	int rc = 0, capture_count, output_count;
+	struct msm_vidc_core *core;
+	struct hfi_device *hdev;
+	struct {
+		struct vidc_frame_data *data;
+		int count;
+	} etbs, ftbs;
+	bool defer = false, batch_mode;
+	struct vb2_buf_entry *temp, *next;
+	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s: Invalid arguments\n", __func__);
+		return -EINVAL;
+	}
+
+	core = inst->core;
+	hdev = core->device;
+
+	if (inst->state == MSM_VIDC_CORE_INVALID ||
+		core->state == VIDC_CORE_INVALID ||
+		core->state == VIDC_CORE_UNINIT) {
+		dprintk(VIDC_ERR, "Core is in bad state. Can't Queue\n");
+		return -EINVAL;
+	}
+
+	/* Stick the buffer into the pendinq, we'll pop it out later on
+	 * if we want to commit it to hardware */
+	if (vb) {
+		temp = kzalloc(sizeof(*temp), GFP_KERNEL);
+		if (!temp) {
+			dprintk(VIDC_ERR, "Out of memory\n");
+			goto err_no_mem;
+		}
+
+		temp->vb = vb;
+		mutex_lock(&inst->pendingq.lock);
+		list_add_tail(&temp->list, &inst->pendingq.list);
+		mutex_unlock(&inst->pendingq.lock);
+	}
+
+	batch_mode = msm_comm_g_ctrl_for_id(inst, V4L2_CID_VIDC_QBUF_MODE)
+		== V4L2_VIDC_QBUF_BATCHED;
+	capture_count = (batch_mode ? &count_single_batch : &count_buffers)
+		(&inst->pendingq, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+	output_count = (batch_mode ? &count_single_batch : &count_buffers)
+		(&inst->pendingq, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+	/*
+	 * Somewhat complicated logic to prevent queuing the buffer to hardware.
+	 * Don't queue if:
+	 * 1) Hardware isn't ready (that's simple)
+	 */
+	defer = defer ?: inst->state != MSM_VIDC_START_DONE;
+
+	/*
+	 * 2) The client explicitly tells us not to because it wants this
+	 * buffer to be batched with future frames.  The batch size (on both
+	 * capabilities) is completely determined by the client.
+	 */
+	defer = defer ?: vbuf && vbuf->flags & V4L2_MSM_BUF_FLAG_DEFER;
+
+	/* 3) If we're in batch mode, we must have full batches of both types */
+	defer = defer ?: batch_mode && (!output_count || !capture_count);
+
+	if (defer) {
+		dprintk(VIDC_DBG, "Deferring queue of %pK\n", vb);
+		return 0;
+	}
+
+	dprintk(VIDC_DBG, "%sing %d etbs and %d ftbs\n",
+			batch_mode ? "Batch" : "Process",
+			output_count, capture_count);
+
+	etbs.data = kcalloc(output_count, sizeof(*etbs.data), GFP_KERNEL);
+	ftbs.data = kcalloc(capture_count, sizeof(*ftbs.data), GFP_KERNEL);
+	/* Note that it's perfectly normal for (e|f)tbs.data to be NULL if
+	 * we're not in batch mode (i.e. (output|capture)_count == 0) */
+	if ((!etbs.data && output_count) ||
+			(!ftbs.data && capture_count)) {
+		dprintk(VIDC_ERR, "Failed to alloc memory for batching\n");
+		kfree(etbs.data);
+		etbs.data = NULL;
+
+		kfree(ftbs.data);
+		ftbs.data = NULL;
+		goto err_no_mem;
+	}
+
+	etbs.count = ftbs.count = 0;
+
+	/*
+	 * Try to collect all pending buffers into 2 batches of ftb and etb
+	 * Note that these "batches" might be empty if we're no in batching mode
+	 * and the pendingq is empty
+	 */
+	mutex_lock(&inst->pendingq.lock);
+	list_for_each_entry_safe(temp, next, &inst->pendingq.list, list) {
+		struct vidc_frame_data *frame_data = NULL;
+
+		switch (temp->vb->type) {
+		case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+			if (ftbs.count < capture_count && ftbs.data)
+				frame_data = &ftbs.data[ftbs.count++];
+			break;
+		case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+			if (etbs.count < output_count && etbs.data)
+				frame_data = &etbs.data[etbs.count++];
+			break;
+		default:
+			break;
+		}
+
+		if (!frame_data)
+			continue;
+
+		populate_frame_data(frame_data, temp->vb, inst);
+
+		list_del(&temp->list);
+		kfree(temp);
+	}
+	mutex_unlock(&inst->pendingq.lock);
+
+	/* Finally commit all our frame(s) to H/W */
+	if (batch_mode) {
+		int ftb_index = 0, c = 0;
+
+		for (c = 0; atomic_read(&inst->seq_hdr_reqs) > 0; ++c) {
+			rc = request_seq_header(inst, &ftbs.data[c]);
+			if (rc) {
+				dprintk(VIDC_ERR,
+						"Failed requesting sequence header: %d\n",
+						rc);
+				goto err_bad_input;
+			}
+
+			atomic_dec(&inst->seq_hdr_reqs);
+		}
+
+		ftb_index = c;
+		rc = call_hfi_op(hdev, session_process_batch, inst->session,
+				etbs.count, etbs.data,
+				ftbs.count - ftb_index, &ftbs.data[ftb_index]);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"Failed to queue batch of %d ETBs and %d FTBs\n",
+				etbs.count, ftbs.count);
+			goto err_bad_input;
+		}
+
+		for (c = ftb_index; c < ftbs.count; ++c) {
+			log_frame(inst, &ftbs.data[c],
+					V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+		}
+
+		for (c = 0; c < etbs.count; ++c) {
+			log_frame(inst, &etbs.data[c],
+					V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+		}
+	}
+
+	if (!batch_mode && etbs.count) {
+		int c = 0;
+
+		for (c = 0; c < etbs.count; ++c) {
+			struct vidc_frame_data *frame_data = &etbs.data[c];
+
+			rc = call_hfi_op(hdev, session_etb, inst->session,
+					frame_data);
+			if (rc) {
+				dprintk(VIDC_ERR, "Failed to issue etb: %d\n",
+						rc);
+				goto err_bad_input;
+			}
+
+			log_frame(inst, frame_data,
+					V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+		}
+	}
+
+	if (!batch_mode && ftbs.count) {
+		int c = 0;
+
+		for (c = 0; atomic_read(&inst->seq_hdr_reqs) > 0; ++c) {
+			rc = request_seq_header(inst, &ftbs.data[c]);
+			if (rc) {
+				dprintk(VIDC_ERR,
+						"Failed requesting sequence header: %d\n",
+						rc);
+				goto err_bad_input;
+			}
+
+			atomic_dec(&inst->seq_hdr_reqs);
+		}
+
+		for (; c < ftbs.count; ++c) {
+			struct vidc_frame_data *frame_data = &ftbs.data[c];
+
+			rc = call_hfi_op(hdev, session_ftb,
+					inst->session, frame_data);
+			if (rc) {
+				dprintk(VIDC_ERR, "Failed to issue ftb: %d\n",
+						rc);
+				goto err_bad_input;
+			}
+
+			log_frame(inst, frame_data,
+					V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+		}
+	}
+
+err_bad_input:
+	if (rc)
+		dprintk(VIDC_ERR, "Failed to queue buffer\n");
+
+	kfree(etbs.data);
+	kfree(ftbs.data);
+err_no_mem:
+	return rc;
+}
+
+int msm_comm_try_get_bufreqs(struct msm_vidc_inst *inst)
+{
+	int rc = 0, i = 0;
+	union hal_get_property hprop;
+
+	rc = msm_comm_try_get_prop(inst, HAL_PARAM_GET_BUFFER_REQUIREMENTS,
+					&hprop);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed getting buffer requirements: %d", rc);
+		return rc;
+	}
+
+	dprintk(VIDC_DBG, "Buffer requirements:\n");
+	dprintk(VIDC_DBG, "%15s %8s %8s\n", "buffer type", "count", "size");
+	for (i = 0; i < HAL_BUFFER_MAX; i++) {
+		struct hal_buffer_requirements req = hprop.buf_req.buffer[i];
+
+		inst->buff_req.buffer[i] = req;
+		dprintk(VIDC_DBG, "%15s %8d %8d\n",
+				get_buffer_name(req.buffer_type),
+				req.buffer_count_actual, req.buffer_size);
+	}
+
+	dprintk(VIDC_PROF, "Input buffers: %d, Output buffers: %d\n",
+			inst->buff_req.buffer[0].buffer_count_actual,
+			inst->buff_req.buffer[1].buffer_count_actual);
+	return rc;
+}
+
+int msm_comm_try_get_prop(struct msm_vidc_inst *inst, enum hal_property ptype,
+				union hal_get_property *hprop)
+{
+	int rc = 0;
+	struct hfi_device *hdev;
+	struct getprop_buf *buf;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	hdev = inst->core->device;
+	mutex_lock(&inst->sync_lock);
+	if (inst->state < MSM_VIDC_OPEN_DONE ||
+			inst->state >= MSM_VIDC_CLOSE) {
+
+		/* No need to check inst->state == MSM_VIDC_INVALID since
+		 * INVALID is > CLOSE_DONE. When core went to INVALID state,
+		 * we put all the active instances in INVALID. So > CLOSE_DONE
+		 * is enough check to have.
+		 */
+
+		dprintk(VIDC_ERR,
+			"In Wrong state to call Buf Req: Inst %pK or Core %pK\n",
+				inst, inst->core);
+		rc = -EAGAIN;
+		mutex_unlock(&inst->sync_lock);
+		goto exit;
+	}
+	mutex_unlock(&inst->sync_lock);
+
+	switch (ptype) {
+	case HAL_PARAM_PROFILE_LEVEL_CURRENT:
+	case HAL_CONFIG_VDEC_ENTROPY:
+		rc = call_hfi_op(hdev, session_get_property, inst->session,
+				ptype);
+		break;
+	case HAL_PARAM_GET_BUFFER_REQUIREMENTS:
+		rc = call_hfi_op(hdev, session_get_buf_req, inst->session);
+		break;
+	default:
+		rc = -EAGAIN;
+		break;
+	}
+
+	if (rc) {
+		dprintk(VIDC_ERR, "Can't query hardware for property: %d\n",
+				rc);
+		goto exit;
+	}
+
+	rc = wait_for_completion_timeout(&inst->completions[
+			SESSION_MSG_INDEX(HAL_SESSION_PROPERTY_INFO)],
+		msecs_to_jiffies(msm_vidc_hw_rsp_timeout));
+	if (!rc) {
+		dprintk(VIDC_ERR,
+			"%s: Wait interrupted or timed out [%pK]: %d\n",
+			__func__, inst,
+			SESSION_MSG_INDEX(HAL_SESSION_PROPERTY_INFO));
+		inst->state = MSM_VIDC_CORE_INVALID;
+		call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
+		dprintk(VIDC_ERR,
+			"SESS_PROP timeout can potentially crash the system\n");
+		msm_comm_print_debug_info(inst);
+
+		BUG_ON(msm_vidc_debug_timeout);
+		msm_comm_kill_session(inst);
+		rc = -ETIMEDOUT;
+		goto exit;
+	} else {
+		/* wait_for_completion_timeout returns jiffies before expiry */
+		rc = 0;
+	}
+
+	mutex_lock(&inst->pending_getpropq.lock);
+	if (!list_empty(&inst->pending_getpropq.list)) {
+		buf = list_first_entry(&inst->pending_getpropq.list,
+					struct getprop_buf, list);
+		*hprop = *(union hal_get_property *)buf->data;
+		kfree(buf->data);
+		list_del(&buf->list);
+		kfree(buf);
+	} else {
+		dprintk(VIDC_ERR, "%s getprop list empty\n", __func__);
+		rc = -EINVAL;
+	}
+	mutex_unlock(&inst->pending_getpropq.lock);
+exit:
+	return rc;
+}
+
+int msm_comm_release_output_buffers(struct msm_vidc_inst *inst)
+{
+	struct msm_smem *handle;
+	struct internal_buf *buf, *dummy;
+	struct vidc_buffer_addr_info buffer_info;
+	int rc = 0;
+	struct msm_vidc_core *core;
+	struct hfi_device *hdev;
+	if (!inst) {
+		dprintk(VIDC_ERR,
+				"Invalid instance pointer = %pK\n", inst);
+		return -EINVAL;
+	}
+	mutex_lock(&inst->outputbufs.lock);
+	if (list_empty(&inst->outputbufs.list)) {
+		dprintk(VIDC_DBG, "%s - No OUTPUT buffers allocated\n",
+			__func__);
+		mutex_unlock(&inst->outputbufs.lock);
+		return 0;
+	}
+	mutex_unlock(&inst->outputbufs.lock);
+
+	core = inst->core;
+	if (!core) {
+		dprintk(VIDC_ERR,
+				"Invalid core pointer = %pK\n", core);
+		return -EINVAL;
+	}
+	hdev = core->device;
+	if (!hdev) {
+		dprintk(VIDC_ERR, "Invalid device pointer = %pK\n", hdev);
+		return -EINVAL;
+	}
+	mutex_lock(&inst->outputbufs.lock);
+	list_for_each_entry_safe(buf, dummy, &inst->outputbufs.list, list) {
+		handle = buf->handle;
+		if (!handle) {
+			dprintk(VIDC_ERR, "%s - invalid handle\n", __func__);
+			goto exit;
+		}
+
+		buffer_info.buffer_size = handle->size;
+		buffer_info.buffer_type = buf->buffer_type;
+		buffer_info.num_buffers = 1;
+		buffer_info.align_device_addr = handle->device_addr;
+		if (inst->buffer_mode_set[CAPTURE_PORT] ==
+			HAL_BUFFER_MODE_STATIC &&
+			inst->state != MSM_VIDC_CORE_INVALID &&
+				core->state != VIDC_CORE_INVALID) {
+			buffer_info.response_required = false;
+			rc = call_hfi_op(hdev, session_release_buffers,
+				(void *)inst->session, &buffer_info);
+			if (rc) {
+				dprintk(VIDC_WARN,
+					"Rel output buf fail:%pa, %d\n",
+					&buffer_info.align_device_addr,
+					buffer_info.buffer_size);
+			}
+		}
+
+		list_del(&buf->list);
+		msm_comm_smem_free(inst, buf->handle);
+		kfree(buf);
+	}
+
+exit:
+	mutex_unlock(&inst->outputbufs.lock);
+	return rc;
+}
+
+static enum hal_buffer scratch_buf_sufficient(struct msm_vidc_inst *inst,
+				enum hal_buffer buffer_type)
+{
+	struct hal_buffer_requirements *bufreq = NULL;
+	struct internal_buf *buf;
+	int count = 0;
+
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s - invalid param\n", __func__);
+		goto not_sufficient;
+	}
+
+	bufreq = get_buff_req_buffer(inst, buffer_type);
+	if (!bufreq)
+		goto not_sufficient;
+
+	/* Check if current scratch buffers are sufficient */
+	mutex_lock(&inst->scratchbufs.lock);
+
+	list_for_each_entry(buf, &inst->scratchbufs.list, list) {
+		if (!buf->handle) {
+			dprintk(VIDC_ERR, "%s: invalid buf handle\n", __func__);
+			mutex_unlock(&inst->scratchbufs.lock);
+			goto not_sufficient;
+		}
+		if (buf->buffer_type == buffer_type &&
+			buf->handle->size >= bufreq->buffer_size)
+			count++;
+	}
+	mutex_unlock(&inst->scratchbufs.lock);
+
+	if (count != bufreq->buffer_count_actual)
+		goto not_sufficient;
+
+	dprintk(VIDC_DBG,
+		"Existing scratch buffer is sufficient for buffer type %#x\n",
+		buffer_type);
+
+	return buffer_type;
+
+not_sufficient:
+	return HAL_BUFFER_NONE;
+}
+
+int msm_comm_release_scratch_buffers(struct msm_vidc_inst *inst,
+					bool check_for_reuse)
+{
+	struct msm_smem *handle;
+	struct internal_buf *buf, *dummy;
+	struct vidc_buffer_addr_info buffer_info;
+	int rc = 0;
+	struct msm_vidc_core *core;
+	struct hfi_device *hdev;
+	enum hal_buffer sufficiency = HAL_BUFFER_NONE;
+	if (!inst) {
+		dprintk(VIDC_ERR,
+				"Invalid instance pointer = %pK\n", inst);
+		return -EINVAL;
+	}
+	core = inst->core;
+	if (!core) {
+		dprintk(VIDC_ERR,
+				"Invalid core pointer = %pK\n", core);
+		return -EINVAL;
+	}
+	hdev = core->device;
+	if (!hdev) {
+		dprintk(VIDC_ERR, "Invalid device pointer = %pK\n", hdev);
+		return -EINVAL;
+	}
+
+	if (check_for_reuse) {
+		sufficiency |= scratch_buf_sufficient(inst,
+					HAL_BUFFER_INTERNAL_SCRATCH);
+
+		sufficiency |= scratch_buf_sufficient(inst,
+					HAL_BUFFER_INTERNAL_SCRATCH_1);
+
+		sufficiency |= scratch_buf_sufficient(inst,
+					HAL_BUFFER_INTERNAL_SCRATCH_2);
+	}
+
+	mutex_lock(&inst->scratchbufs.lock);
+	list_for_each_entry_safe(buf, dummy, &inst->scratchbufs.list, list) {
+		if (!buf->handle) {
+			dprintk(VIDC_ERR, "%s - buf->handle NULL\n", __func__);
+			rc = -EINVAL;
+			goto exit;
+		}
+
+		handle = buf->handle;
+		buffer_info.buffer_size = handle->size;
+		buffer_info.buffer_type = buf->buffer_type;
+		buffer_info.num_buffers = 1;
+		buffer_info.align_device_addr = handle->device_addr;
+		if (inst->state != MSM_VIDC_CORE_INVALID &&
+				core->state != VIDC_CORE_INVALID) {
+			buffer_info.response_required = true;
+			rc = call_hfi_op(hdev, session_release_buffers,
+				(void *)inst->session, &buffer_info);
+			if (rc) {
+				dprintk(VIDC_WARN,
+					"Rel scrtch buf fail:%pa, %d\n",
+					&buffer_info.align_device_addr,
+					buffer_info.buffer_size);
+			}
+			mutex_unlock(&inst->scratchbufs.lock);
+			rc = wait_for_sess_signal_receipt(inst,
+				HAL_SESSION_RELEASE_BUFFER_DONE);
+			if (rc) {
+				change_inst_state(inst,
+					MSM_VIDC_CORE_INVALID);
+				msm_comm_kill_session(inst);
+			}
+			mutex_lock(&inst->scratchbufs.lock);
+		}
+
+		/*If scratch buffers can be reused, do not free the buffers*/
+		if (sufficiency & buf->buffer_type)
+			continue;
+
+		list_del(&buf->list);
+		msm_comm_smem_free(inst, buf->handle);
+		kfree(buf);
+	}
+
+exit:
+	mutex_unlock(&inst->scratchbufs.lock);
+	return rc;
+}
+
+int msm_comm_release_persist_buffers(struct msm_vidc_inst *inst)
+{
+	struct msm_smem *handle;
+	struct list_head *ptr, *next;
+	struct internal_buf *buf;
+	struct vidc_buffer_addr_info buffer_info;
+	int rc = 0;
+	struct msm_vidc_core *core;
+	struct hfi_device *hdev;
+	if (!inst) {
+		dprintk(VIDC_ERR,
+				"Invalid instance pointer = %pK\n", inst);
+		return -EINVAL;
+	}
+	core = inst->core;
+	if (!core) {
+		dprintk(VIDC_ERR,
+				"Invalid core pointer = %pK\n", core);
+		return -EINVAL;
+	}
+	hdev = core->device;
+	if (!hdev) {
+		dprintk(VIDC_ERR, "Invalid device pointer = %pK\n", hdev);
+		return -EINVAL;
+	}
+
+	mutex_lock(&inst->persistbufs.lock);
+	list_for_each_safe(ptr, next, &inst->persistbufs.list) {
+		buf = list_entry(ptr, struct internal_buf, list);
+		handle = buf->handle;
+		buffer_info.buffer_size = handle->size;
+		buffer_info.buffer_type = buf->buffer_type;
+		buffer_info.num_buffers = 1;
+		buffer_info.align_device_addr = handle->device_addr;
+		if (inst->state != MSM_VIDC_CORE_INVALID &&
+				core->state != VIDC_CORE_INVALID) {
+			buffer_info.response_required = true;
+			rc = call_hfi_op(hdev, session_release_buffers,
+				(void *)inst->session, &buffer_info);
+			if (rc) {
+				dprintk(VIDC_WARN,
+					"Rel prst buf fail:%pa, %d\n",
+					&buffer_info.align_device_addr,
+					buffer_info.buffer_size);
+			}
+			mutex_unlock(&inst->persistbufs.lock);
+			rc = wait_for_sess_signal_receipt(inst,
+				HAL_SESSION_RELEASE_BUFFER_DONE);
+			if (rc) {
+				change_inst_state(inst, MSM_VIDC_CORE_INVALID);
+				msm_comm_kill_session(inst);
+			}
+			mutex_lock(&inst->persistbufs.lock);
+		}
+		list_del(&buf->list);
+		msm_comm_smem_free(inst, buf->handle);
+		kfree(buf);
+	}
+	mutex_unlock(&inst->persistbufs.lock);
+	return rc;
+}
+
+int msm_comm_try_set_prop(struct msm_vidc_inst *inst,
+	enum hal_property ptype, void *pdata)
+{
+	int rc = 0;
+	struct hfi_device *hdev;
+	if (!inst) {
+		dprintk(VIDC_ERR, "Invalid input: %pK\n", inst);
+		return -EINVAL;
+	}
+
+	if (!inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+	hdev = inst->core->device;
+
+	mutex_lock(&inst->sync_lock);
+	if (inst->state < MSM_VIDC_OPEN_DONE || inst->state >= MSM_VIDC_CLOSE) {
+		dprintk(VIDC_ERR, "Not in proper state to set property\n");
+		rc = -EAGAIN;
+		goto exit;
+	}
+	rc = call_hfi_op(hdev, session_set_property, (void *)inst->session,
+			ptype, pdata);
+	if (rc)
+		dprintk(VIDC_ERR, "Failed to set hal property for framesize\n");
+exit:
+	mutex_unlock(&inst->sync_lock);
+	return rc;
+}
+
+int msm_comm_set_output_buffers(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	if (msm_comm_release_output_buffers(inst))
+		dprintk(VIDC_WARN, "Failed to release output buffers\n");
+
+	rc = set_output_buffers(inst, HAL_BUFFER_OUTPUT);
+	if (rc)
+		goto error;
+	return rc;
+error:
+	msm_comm_release_output_buffers(inst);
+	return rc;
+}
+
+int msm_comm_set_scratch_buffers(struct msm_vidc_inst *inst,
+						bool max_int_buffer) {
+	int rc = 0;
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!max_int_buffer && msm_comm_release_scratch_buffers(inst, true))
+		dprintk(VIDC_WARN, "Failed to release scratch buffers\n");
+
+	rc = set_internal_buffers(inst, HAL_BUFFER_INTERNAL_SCRATCH,
+		&inst->scratchbufs);
+	if (rc)
+		goto error;
+
+	rc = set_internal_buffers(inst, HAL_BUFFER_INTERNAL_SCRATCH_1,
+		&inst->scratchbufs);
+	if (rc)
+		goto error;
+
+	rc = set_internal_buffers(inst, HAL_BUFFER_INTERNAL_SCRATCH_2,
+		&inst->scratchbufs);
+	if (rc)
+		goto error;
+
+	return rc;
+error:
+	msm_comm_release_scratch_buffers(inst, false);
+	return rc;
+}
+
+int msm_comm_set_persist_buffers(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = set_internal_buffers(inst, HAL_BUFFER_INTERNAL_PERSIST,
+		&inst->persistbufs);
+	if (rc)
+		goto error;
+
+	rc = set_internal_buffers(inst, HAL_BUFFER_INTERNAL_PERSIST_1,
+		&inst->persistbufs);
+	if (rc)
+		goto error;
+	return rc;
+error:
+	msm_comm_release_persist_buffers(inst);
+	return rc;
+}
+
+static void msm_comm_flush_in_invalid_state(struct msm_vidc_inst *inst)
+{
+	struct list_head *ptr, *next;
+	enum vidc_ports ports[] = {OUTPUT_PORT, CAPTURE_PORT};
+	int c = 0;
+
+	for (c = 0; c < ARRAY_SIZE(ports); ++c) {
+		enum vidc_ports port = ports[c];
+
+		dprintk(VIDC_DBG, "Flushing buffers of type %d in bad state\n",
+				port);
+		mutex_lock(&inst->bufq[port].lock);
+		list_for_each_safe(ptr, next, &inst->bufq[port].
+				vb2_bufq.queued_list) {
+			struct vb2_buffer *vb = container_of(ptr,
+					struct vb2_buffer, queued_entry);
+
+			if (vb->state == VB2_BUF_STATE_ACTIVE) {
+				vb->planes[0].bytesused = 0;
+				vb->planes[0].data_offset = 0;
+				vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+			} else {
+				dprintk(VIDC_WARN,
+					"%s VB is in state %d not in ACTIVE state\n"
+					, __func__, vb->state);
+			}
+		}
+		mutex_unlock(&inst->bufq[port].lock);
+	}
+
+	msm_vidc_queue_v4l2_event(inst, V4L2_EVENT_MSM_VIDC_FLUSH_DONE);
+	return;
+}
+
+void msm_comm_flush_dynamic_buffers(struct msm_vidc_inst *inst)
+{
+	struct buffer_info *binfo = NULL;
+
+	if (inst->buffer_mode_set[CAPTURE_PORT] != HAL_BUFFER_MODE_DYNAMIC)
+		return;
+
+	/*
+	* dynamic buffer mode:- if flush is called during seek
+	* driver should not queue any new buffer it has been holding.
+	*
+	* Each dynamic o/p buffer can have one of following ref_count:
+	* ref_count : 0 - f/w has released reference and sent fbd back.
+	*		  The buffer has been returned back to client.
+	*
+	* ref_count : 1 - f/w is holding reference. f/w may have released
+	*                 fbd as read_only OR fbd is pending. f/w will
+	*		  release reference before sending flush_done.
+	*
+	* ref_count : 2 - f/w is holding reference, f/w has released fbd as
+	*                 read_only, which client has queued back to driver.
+	*                 driver holds this buffer and will queue back
+	*                 only when f/w releases the reference. During
+	*		  flush_done, f/w will release the reference but driver
+	*		  should not queue back the buffer to f/w.
+	*		  Flush all buffers with ref_count 2.
+	*/
+	mutex_lock(&inst->registeredbufs.lock);
+	if (!list_empty(&inst->registeredbufs.list)) {
+		struct v4l2_event buf_event = {0};
+		u32 *ptr = NULL;
+
+		list_for_each_entry(binfo, &inst->registeredbufs.list, list) {
+			if (binfo->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+				atomic_read(&binfo->ref_count) == 2) {
+
+				atomic_dec(&binfo->ref_count);
+				buf_event.type =
+				V4L2_EVENT_MSM_VIDC_RELEASE_UNQUEUED_BUFFER;
+				ptr = (u32 *)buf_event.u.data;
+				ptr[0] = binfo->fd[0];
+				ptr[1] = binfo->buff_off[0];
+				ptr[2] = binfo->uvaddr[0];
+				ptr[3] = (u32) binfo->timestamp.tv_sec;
+				ptr[4] = (u32) binfo->timestamp.tv_usec;
+				ptr[5] = binfo->v4l2_index;
+				dprintk(VIDC_DBG,
+					"released buffer held in driver before issuing flush: %pa fd[0]: %d\n",
+					&binfo->device_addr[0], binfo->fd[0]);
+				/*send event to client*/
+				v4l2_event_queue_fh(&inst->event_handler,
+					&buf_event);
+			}
+		}
+	}
+	mutex_unlock(&inst->registeredbufs.lock);
+}
+
+void msm_comm_flush_pending_dynamic_buffers(struct msm_vidc_inst *inst)
+{
+	struct buffer_info *binfo = NULL;
+
+	if (!inst)
+		return;
+
+	if (inst->buffer_mode_set[CAPTURE_PORT] != HAL_BUFFER_MODE_DYNAMIC)
+		return;
+
+	if (list_empty(&inst->pendingq.list) ||
+		list_empty(&inst->registeredbufs.list))
+		return;
+
+	/*
+	* Dynamic Buffer mode - Since pendingq is not empty
+	* no output buffers have been sent to firmware yet.
+	* Hence remove reference to all pendingq o/p buffers
+	* before flushing them.
+	*/
+
+	mutex_lock(&inst->registeredbufs.lock);
+	list_for_each_entry(binfo, &inst->registeredbufs.list, list) {
+		if (binfo->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+			dprintk(VIDC_DBG,
+				"%s: binfo = %pK device_addr = %pa\n",
+				__func__, binfo, &binfo->device_addr[0]);
+			buf_ref_put(inst, binfo);
+		}
+	}
+	mutex_unlock(&inst->registeredbufs.lock);
+}
+
+int msm_comm_flush(struct msm_vidc_inst *inst, u32 flags)
+{
+	int rc =  0;
+	bool ip_flush = false;
+	bool op_flush = false;
+	struct vb2_buf_entry *temp, *next;
+	struct mutex *lock;
+	struct msm_vidc_core *core;
+	struct hfi_device *hdev;
+	if (!inst) {
+		dprintk(VIDC_ERR,
+				"Invalid instance pointer = %pK\n", inst);
+		return -EINVAL;
+	}
+	core = inst->core;
+	if (!core) {
+		dprintk(VIDC_ERR,
+				"Invalid core pointer = %pK\n", core);
+		return -EINVAL;
+	}
+	hdev = core->device;
+	if (!hdev) {
+		dprintk(VIDC_ERR, "Invalid device pointer = %pK\n", hdev);
+		return -EINVAL;
+	}
+
+	ip_flush = flags & V4L2_QCOM_CMD_FLUSH_OUTPUT;
+	op_flush = flags & V4L2_QCOM_CMD_FLUSH_CAPTURE;
+
+	if (ip_flush && !op_flush) {
+		dprintk(VIDC_INFO, "Input only flush not supported\n");
+		return 0;
+	}
+
+	msm_comm_flush_dynamic_buffers(inst);
+
+	if (inst->state == MSM_VIDC_CORE_INVALID ||
+			core->state == VIDC_CORE_INVALID ||
+			core->state == VIDC_CORE_UNINIT) {
+		dprintk(VIDC_ERR,
+				"Core %pK and inst %pK are in bad state\n",
+					core, inst);
+		msm_comm_flush_in_invalid_state(inst);
+		return 0;
+	}
+
+	if (inst->in_reconfig && !ip_flush && op_flush) {
+		mutex_lock(&inst->pendingq.lock);
+		if (!list_empty(&inst->pendingq.list)) {
+			/*
+			 * Execution can never reach here since port reconfig
+			 * wont happen unless pendingq is emptied out
+			 * (both pendingq and flush being secured with same
+			 * lock). Printing a message here incase this breaks.
+			 */
+			dprintk(VIDC_WARN,
+			"FLUSH BUG: Pending q not empty! It should be empty\n");
+		}
+		mutex_unlock(&inst->pendingq.lock);
+		atomic_inc(&inst->in_flush);
+		dprintk(VIDC_DBG, "Send flush Output to firmware\n");
+		rc = call_hfi_op(hdev, session_flush, inst->session,
+				HAL_FLUSH_OUTPUT);
+	} else {
+		msm_comm_flush_pending_dynamic_buffers(inst);
+		/*
+		 * If flush is called after queueing buffers but before
+		 * streamon driver should flush the pending queue
+		 */
+		mutex_lock(&inst->pendingq.lock);
+		list_for_each_entry_safe(temp, next,
+				&inst->pendingq.list, list) {
+			enum v4l2_buf_type type = temp->vb->type;
+
+			if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+				lock = &inst->bufq[CAPTURE_PORT].lock;
+			else
+				lock = &inst->bufq[OUTPUT_PORT].lock;
+
+			temp->vb->planes[0].bytesused = 0;
+
+			mutex_lock(lock);
+			vb2_buffer_done(temp->vb, VB2_BUF_STATE_DONE);
+			msm_vidc_debugfs_update(inst,
+				type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ?
+					MSM_VIDC_DEBUGFS_EVENT_FBD :
+					MSM_VIDC_DEBUGFS_EVENT_EBD);
+			list_del(&temp->list);
+			mutex_unlock(lock);
+
+			kfree(temp);
+		}
+		mutex_unlock(&inst->pendingq.lock);
+
+		/*Do not send flush in case of session_error */
+		if (!(inst->state == MSM_VIDC_CORE_INVALID &&
+			  core->state != VIDC_CORE_INVALID)) {
+			atomic_inc(&inst->in_flush);
+			dprintk(VIDC_DBG, "Send flush all to firmware\n");
+			rc = call_hfi_op(hdev, session_flush, inst->session,
+				HAL_FLUSH_ALL);
+		}
+	}
+
+	return rc;
+}
+
+
+enum hal_extradata_id msm_comm_get_hal_extradata_index(
+	enum v4l2_mpeg_vidc_extradata index)
+{
+	int ret = 0;
+	switch (index) {
+	case V4L2_MPEG_VIDC_EXTRADATA_NONE:
+		ret = HAL_EXTRADATA_NONE;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_MB_QUANTIZATION:
+		ret = HAL_EXTRADATA_MB_QUANTIZATION;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_INTERLACE_VIDEO:
+		ret = HAL_EXTRADATA_INTERLACE_VIDEO;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_VC1_FRAMEDISP:
+		ret = HAL_EXTRADATA_VC1_FRAMEDISP;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_VC1_SEQDISP:
+		ret = HAL_EXTRADATA_VC1_SEQDISP;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_TIMESTAMP:
+		ret = HAL_EXTRADATA_TIMESTAMP;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_S3D_FRAME_PACKING:
+		ret = HAL_EXTRADATA_S3D_FRAME_PACKING;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_FRAME_RATE:
+		ret = HAL_EXTRADATA_FRAME_RATE;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_PANSCAN_WINDOW:
+		ret = HAL_EXTRADATA_PANSCAN_WINDOW;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_RECOVERY_POINT_SEI:
+		ret = HAL_EXTRADATA_RECOVERY_POINT_SEI;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO:
+		ret = HAL_EXTRADATA_MULTISLICE_INFO;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB:
+		ret = HAL_EXTRADATA_NUM_CONCEALED_MB;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER:
+		ret = HAL_EXTRADATA_METADATA_FILLER;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_ASPECT_RATIO:
+		ret = HAL_EXTRADATA_ASPECT_RATIO;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_INPUT_CROP:
+		ret = HAL_EXTRADATA_INPUT_CROP;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_DIGITAL_ZOOM:
+		ret = HAL_EXTRADATA_DIGITAL_ZOOM;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_MPEG2_SEQDISP:
+		ret = HAL_EXTRADATA_MPEG2_SEQDISP;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_STREAM_USERDATA:
+		ret = HAL_EXTRADATA_STREAM_USERDATA;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_FRAME_QP:
+		ret = HAL_EXTRADATA_FRAME_QP;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_FRAME_BITS_INFO:
+		ret = HAL_EXTRADATA_FRAME_BITS_INFO;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_LTR:
+		ret = HAL_EXTRADATA_LTR_INFO;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI:
+		ret = HAL_EXTRADATA_METADATA_MBI;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_VQZIP_SEI:
+		ret = HAL_EXTRADATA_VQZIP_SEI;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_YUV_STATS:
+		ret = HAL_EXTRADATA_YUV_STATS;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_ROI_QP:
+		ret = HAL_EXTRADATA_ROI_QP;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_OUTPUT_CROP:
+		ret = HAL_EXTRADATA_OUTPUT_CROP;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_DISPLAY_COLOUR_SEI:
+		ret = HAL_EXTRADATA_MASTERING_DISPLAY_COLOUR_SEI;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI:
+		ret = HAL_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO:
+		ret = HAL_EXTRADATA_PQ_INFO;
+		break;
+
+	case V4L2_MPEG_VIDC_EXTRADATA_VUI_DISPLAY:
+		ret = HAL_EXTRADATA_VUI_DISPLAY_INFO;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE:
+		ret = HAL_EXTRADATA_VPX_COLORSPACE;
+		break;
+	default:
+		dprintk(VIDC_WARN, "Extradata not found: %d\n", index);
+		break;
+	}
+	return ret;
+};
+
+enum hal_buffer_layout_type msm_comm_get_hal_buffer_layout(
+	enum v4l2_mpeg_vidc_video_mvc_layout index)
+{
+	int ret = 0;
+	switch (index) {
+	case V4L2_MPEG_VIDC_VIDEO_MVC_SEQUENTIAL:
+		ret = HAL_BUFFER_LAYOUT_SEQ;
+		break;
+	case V4L2_MPEG_VIDC_VIDEO_MVC_TOP_BOTTOM:
+		ret = HAL_BUFFER_LAYOUT_TOP_BOTTOM;
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
+
+int msm_vidc_trigger_ssr(struct msm_vidc_core *core,
+	enum hal_ssr_trigger_type type)
+{
+	int rc = 0;
+	struct hfi_device *hdev;
+	if (!core || !core->device) {
+		dprintk(VIDC_WARN, "Invalid parameters: %pK\n", core);
+		return -EINVAL;
+	}
+	hdev = core->device;
+	if (core->state == VIDC_CORE_INIT_DONE) {
+		/*
+		 * In current implementation user-initiated SSR triggers
+		 * a fatal error from hardware. However, there is no way
+		 * to know if fatal error is due to SSR or not. Handle
+		 * user SSR as non-fatal.
+		 */
+		mutex_lock(&core->lock);
+		core->resources.debug_timeout = false;
+		mutex_unlock(&core->lock);
+		rc = call_hfi_op(hdev, core_trigger_ssr,
+				hdev->hfi_device_data, type);
+	}
+
+	return rc;
+}
+
+static int msm_vidc_load_supported(struct msm_vidc_inst *inst)
+{
+	int num_mbs_per_sec = 0, max_load_adj = 0;
+	enum load_calc_quirks quirks = LOAD_CALC_IGNORE_TURBO_LOAD |
+		LOAD_CALC_IGNORE_THUMBNAIL_LOAD |
+		LOAD_CALC_IGNORE_NON_REALTIME_LOAD;
+
+	if (inst->state == MSM_VIDC_OPEN_DONE) {
+		max_load_adj = inst->core->resources.max_load +
+			inst->capability.mbs_per_frame.max;
+		num_mbs_per_sec = msm_comm_get_load(inst->core,
+					MSM_VIDC_DECODER, quirks);
+		num_mbs_per_sec += msm_comm_get_load(inst->core,
+					MSM_VIDC_ENCODER, quirks);
+		if (num_mbs_per_sec > max_load_adj) {
+			dprintk(VIDC_ERR,
+				"H/W is overloaded. needed: %d max: %d\n",
+				num_mbs_per_sec,
+				max_load_adj);
+			msm_vidc_print_running_insts(inst->core);
+			return -EBUSY;
+		}
+	}
+	return 0;
+}
+
+int msm_vidc_check_scaling_supported(struct msm_vidc_inst *inst)
+{
+	u32 x_min, x_max, y_min, y_max;
+	u32 input_height, input_width, output_height, output_width;
+
+	input_height = inst->prop.height[OUTPUT_PORT];
+	input_width = inst->prop.width[OUTPUT_PORT];
+	output_height = inst->prop.height[CAPTURE_PORT];
+	output_width = inst->prop.width[CAPTURE_PORT];
+
+	if (!input_height || !input_width || !output_height || !output_width) {
+		dprintk(VIDC_ERR,
+			"Invalid : Input height = %d width = %d"
+			" output height = %d width = %d\n",
+			input_height, input_width, output_height,
+			output_width);
+		return -ENOTSUPP;
+	}
+
+	if (!inst->capability.scale_x.min ||
+		!inst->capability.scale_x.max ||
+		!inst->capability.scale_y.min ||
+		!inst->capability.scale_y.max) {
+
+		if (input_width * input_height !=
+			output_width * output_height) {
+			dprintk(VIDC_ERR,
+				"%s: scaling is not supported (%dx%d != %dx%d)\n",
+				__func__, input_width, input_height,
+				output_width, output_height);
+			return -ENOTSUPP;
+		} else {
+			dprintk(VIDC_DBG, "%s: supported WxH = %dx%d\n",
+				__func__, input_width, input_height);
+			return 0;
+		}
+	}
+
+	x_min = (1<<16)/inst->capability.scale_x.min;
+	y_min = (1<<16)/inst->capability.scale_y.min;
+	x_max = inst->capability.scale_x.max >> 16;
+	y_max = inst->capability.scale_y.max >> 16;
+
+	if (input_height > output_height) {
+		if (input_height > x_min * output_height) {
+			dprintk(VIDC_ERR,
+				"Unsupported height downscale ratio %d vs %d\n",
+				input_height/output_height, x_min);
+			return -ENOTSUPP;
+		}
+	} else {
+		if (output_height > x_max * input_height) {
+			dprintk(VIDC_ERR,
+				"Unsupported height upscale ratio %d vs %d\n",
+				input_height/output_height, x_max);
+			return -ENOTSUPP;
+		}
+	}
+	if (input_width > output_width) {
+		if (input_width > y_min * output_width) {
+			dprintk(VIDC_ERR,
+				"Unsupported width downscale ratio %d vs %d\n",
+				input_width/output_width, y_min);
+			return -ENOTSUPP;
+		}
+	} else {
+		if (output_width > y_max * input_width) {
+			dprintk(VIDC_ERR,
+				"Unsupported width upscale ratio %d vs %d\n",
+				input_width/output_width, y_max);
+			return -ENOTSUPP;
+		}
+	}
+	return 0;
+}
+
+int msm_vidc_check_session_supported(struct msm_vidc_inst *inst)
+{
+	struct msm_vidc_capability *capability;
+	int rc = 0;
+	struct hfi_device *hdev;
+	struct msm_vidc_core *core;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_WARN, "%s: Invalid parameter\n", __func__);
+		return -EINVAL;
+	}
+	capability = &inst->capability;
+	hdev = inst->core->device;
+	core = inst->core;
+	rc = msm_vidc_load_supported(inst);
+	if (rc) {
+		change_inst_state(inst, MSM_VIDC_CORE_INVALID);
+		msm_comm_kill_session(inst);
+		dprintk(VIDC_WARN,
+			"%s: Hardware is overloaded\n", __func__);
+		return rc;
+	}
+
+	if (!is_thermal_permissible(core)) {
+		dprintk(VIDC_WARN,
+			"Thermal level critical, stop all active sessions!\n");
+		return -ENOTSUPP;
+	}
+
+	if (!rc)
+		msm_dcvs_try_enable(inst);
+
+	if (!rc) {
+		if (inst->prop.width[CAPTURE_PORT] < capability->width.min ||
+			inst->prop.height[CAPTURE_PORT] <
+			capability->height.min) {
+			dprintk(VIDC_ERR,
+				"Unsupported WxH = (%u)x(%u), min supported is - (%u)x(%u)\n",
+				inst->prop.width[CAPTURE_PORT],
+				inst->prop.height[CAPTURE_PORT],
+				capability->width.min,
+				capability->height.min);
+			rc = -ENOTSUPP;
+		}
+		if (!rc && inst->prop.width[CAPTURE_PORT] >
+			capability->width.max) {
+			dprintk(VIDC_ERR,
+				"Unsupported width = %u supported max width = %u",
+				inst->prop.width[CAPTURE_PORT],
+				capability->width.max);
+				rc = -ENOTSUPP;
+		}
+
+		if (!rc && inst->prop.height[CAPTURE_PORT]
+			* inst->prop.width[CAPTURE_PORT] >
+			capability->width.max * capability->height.max) {
+			dprintk(VIDC_ERR,
+			"Unsupported WxH = (%u)x(%u), max supported is - (%u)x(%u)\n",
+			inst->prop.width[CAPTURE_PORT],
+			inst->prop.height[CAPTURE_PORT],
+			capability->width.max, capability->height.max);
+			rc = -ENOTSUPP;
+		}
+	}
+	if (rc) {
+		change_inst_state(inst, MSM_VIDC_CORE_INVALID);
+		msm_comm_kill_session(inst);
+		dprintk(VIDC_ERR,
+			"%s: Resolution unsupported\n", __func__);
+	}
+	return rc;
+}
+
+static void msm_comm_generate_session_error(struct msm_vidc_inst *inst)
+{
+	enum hal_command_response cmd = HAL_SESSION_ERROR;
+	struct msm_vidc_cb_cmd_done response = {0};
+
+	dprintk(VIDC_WARN, "msm_comm_generate_session_error\n");
+	if (!inst || !inst->core) {
+		dprintk(VIDC_ERR, "%s: invalid input parameters\n", __func__);
+		return;
+	}
+
+	response.session_id = inst;
+	response.status = VIDC_ERR_FAIL;
+	handle_session_error(cmd, (void *)&response);
+}
+
+static void msm_comm_generate_sys_error(struct msm_vidc_inst *inst)
+{
+	struct msm_vidc_core *core;
+	enum hal_command_response cmd = HAL_SYS_ERROR;
+	struct msm_vidc_cb_cmd_done response  = {0};
+	if (!inst || !inst->core) {
+		dprintk(VIDC_ERR, "%s: invalid input parameters\n", __func__);
+		return;
+	}
+	core = inst->core;
+	response.device_id = (u32) core->id;
+	handle_sys_error(cmd, (void *) &response);
+
+}
+
+int msm_comm_kill_session(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s: invalid input parameters\n", __func__);
+		return -EINVAL;
+	} else if (!inst->session) {
+		/* There's no hfi session to kill */
+		return 0;
+	}
+
+	/*
+	 * We're internally forcibly killing the session, if fw is aware of
+	 * the session send session_abort to firmware to clean up and release
+	 * the session, else just kill the session inside the driver.
+	 */
+	if ((inst->state >= MSM_VIDC_OPEN_DONE &&
+			inst->state < MSM_VIDC_CLOSE_DONE) ||
+			inst->state == MSM_VIDC_CORE_INVALID) {
+		if (msm_comm_session_abort(inst)) {
+			msm_comm_generate_sys_error(inst);
+			return 0;
+		}
+		change_inst_state(inst, MSM_VIDC_CLOSE_DONE);
+		msm_comm_generate_session_error(inst);
+	} else {
+		dprintk(VIDC_WARN,
+				"Inactive session %pK, triggering an internal session error\n",
+				inst);
+		msm_comm_generate_session_error(inst);
+
+	}
+
+	return rc;
+}
+
+struct msm_smem *msm_comm_smem_alloc(struct msm_vidc_inst *inst,
+			size_t size, u32 align, u32 flags,
+			enum hal_buffer buffer_type, int map_kernel)
+{
+	struct msm_smem *m = NULL;
+
+	if (!inst || !inst->core) {
+		dprintk(VIDC_ERR, "%s: invalid inst: %pK\n", __func__, inst);
+		return NULL;
+	}
+	m = msm_smem_alloc(inst->mem_client, size, align,
+				flags, buffer_type, map_kernel);
+	return m;
+}
+
+void msm_comm_smem_free(struct msm_vidc_inst *inst, struct msm_smem *mem)
+{
+	if (!inst || !inst->core || !mem) {
+		dprintk(VIDC_ERR,
+			"%s: invalid params: %pK %pK\n", __func__, inst, mem);
+		return;
+	}
+	msm_smem_free(inst->mem_client, mem);
+}
+
+int msm_comm_smem_cache_operations(struct msm_vidc_inst *inst,
+		struct msm_smem *mem, enum smem_cache_ops cache_ops,
+		int size)
+{
+	if (!inst || !mem) {
+		dprintk(VIDC_ERR,
+			"%s: invalid params: %pK %pK\n", __func__, inst, mem);
+		return -EINVAL;
+	}
+	return msm_smem_cache_operations(inst->mem_client, mem,
+						cache_ops, size);
+}
+
+struct msm_smem *msm_comm_smem_user_to_kernel(struct msm_vidc_inst *inst,
+			int fd, u32 offset, enum hal_buffer buffer_type)
+{
+	struct msm_smem *m = NULL;
+
+	if (!inst || !inst->core) {
+		dprintk(VIDC_ERR, "%s: invalid inst: %pK\n", __func__, inst);
+		return NULL;
+	}
+
+	if (inst->state == MSM_VIDC_CORE_INVALID) {
+		dprintk(VIDC_ERR, "Core in Invalid state, returning from %s\n",
+			__func__);
+		return NULL;
+	}
+
+	m = msm_smem_user_to_kernel(inst->mem_client,
+			fd, offset, buffer_type);
+	return m;
+}
+
+void msm_vidc_fw_unload_handler(struct work_struct *work)
+{
+	struct msm_vidc_core *core = NULL;
+	struct hfi_device *hdev = NULL;
+	int rc = 0;
+
+	core = container_of(work, struct msm_vidc_core, fw_unload_work.work);
+	if (!core || !core->device) {
+		dprintk(VIDC_ERR, "%s - invalid work or core handle\n",
+				__func__);
+		return;
+	}
+
+	hdev = core->device;
+
+	mutex_lock(&core->lock);
+	if (list_empty(&core->instances) &&
+		core->state != VIDC_CORE_UNINIT) {
+		if (core->state > VIDC_CORE_INIT) {
+			dprintk(VIDC_DBG, "Calling vidc_hal_core_release\n");
+			rc = call_hfi_op(hdev, core_release,
+					hdev->hfi_device_data);
+			if (rc) {
+				dprintk(VIDC_ERR,
+					"Failed to release core, id = %d\n",
+					core->id);
+				mutex_unlock(&core->lock);
+				return;
+			}
+		}
+		core->state = VIDC_CORE_UNINIT;
+		kfree(core->capabilities);
+		core->capabilities = NULL;
+	}
+	mutex_unlock(&core->lock);
+}
+
+int msm_comm_set_color_format(struct msm_vidc_inst *inst,
+		enum hal_buffer buffer_type, int fourcc)
+{
+	struct hal_uncompressed_format_select hal_fmt = {0};
+	enum hal_uncompressed_format format = HAL_UNUSED_COLOR;
+	int rc = 0;
+	struct hfi_device *hdev;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s - invalid param\n", __func__);
+		return -EINVAL;
+	}
+
+	hdev = inst->core->device;
+
+	format = get_hal_uncompressed(fourcc);
+	if (format == HAL_UNUSED_COLOR) {
+		dprintk(VIDC_ERR, "Using unsupported colorformat %#x\n",
+				fourcc);
+		rc = -ENOTSUPP;
+		goto exit;
+	}
+
+	hal_fmt.buffer_type = buffer_type;
+	hal_fmt.format = format;
+
+	rc = call_hfi_op(hdev, session_set_property, inst->session,
+		HAL_PARAM_UNCOMPRESSED_FORMAT_SELECT, &hal_fmt);
+	if (rc)
+		dprintk(VIDC_ERR,
+			"Failed to set input color format\n");
+	else
+		dprintk(VIDC_DBG, "Setting uncompressed colorformat to %#x\n",
+				format);
+
+exit:
+	return rc;
+}
+
+int msm_vidc_comm_s_parm(struct msm_vidc_inst *inst, struct v4l2_streamparm *a)
+{
+	u32 property_id = 0;
+	u64 us_per_frame = 0;
+	void *pdata;
+	int rc = 0, fps = 0;
+	struct hal_frame_rate frame_rate;
+	struct hfi_device *hdev;
+
+	if (!inst || !inst->core || !inst->core->device || !a) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	hdev = inst->core->device;
+	property_id = HAL_CONFIG_FRAME_RATE;
+
+	if (a->parm.output.timeperframe.denominator) {
+		switch (a->type) {
+		case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+		case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+			us_per_frame = a->parm.output.timeperframe.numerator *
+				(u64)USEC_PER_SEC;
+			do_div(us_per_frame, a->parm.output.
+				timeperframe.denominator);
+			break;
+		default:
+			dprintk(VIDC_ERR,
+					"Scale clocks : Unknown buffer type %d\n",
+					a->type);
+			break;
+		}
+	}
+
+	if (!us_per_frame) {
+		dprintk(VIDC_ERR,
+				"Failed to scale clocks : time between frames is 0\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	fps = USEC_PER_SEC;
+	do_div(fps, us_per_frame);
+
+	if (fps % 15 == 14 || fps % 24 == 23)
+		fps = fps + 1;
+	else if ((fps > 1) && (fps % 24 == 1 || fps % 15 == 1))
+		fps = fps - 1;
+
+	if (inst->prop.fps != fps) {
+		dprintk(VIDC_PROF, "reported fps changed for %pK: %d->%d\n",
+				inst, inst->prop.fps, fps);
+		inst->prop.fps = fps;
+		frame_rate.frame_rate = inst->prop.fps * BIT(16);
+		frame_rate.buffer_type = HAL_BUFFER_OUTPUT;
+		pdata = &frame_rate;
+		if (inst->session_type == MSM_VIDC_ENCODER) {
+			rc = call_hfi_op(hdev, session_set_property,
+				inst->session, property_id, pdata);
+
+			if (rc)
+				dprintk(VIDC_WARN,
+					"Failed to set frame rate %d\n", rc);
+		} else {
+			msm_dcvs_init_load(inst);
+		}
+		msm_comm_scale_clocks_and_bus(inst);
+		msm_dcvs_try_enable(inst);
+	}
+exit:
+	return rc;
+}
+
+void msm_comm_print_inst_info(struct msm_vidc_inst *inst)
+{
+	struct buffer_info *temp;
+	struct internal_buf *buf;
+	int i = 0;
+	bool is_decode = false;
+	enum vidc_ports port;
+	bool is_secure = false;
+
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s - invalid param %pK\n",
+			__func__, inst);
+		return;
+	}
+
+	is_decode = inst->session_type == MSM_VIDC_DECODER;
+	port = is_decode ? OUTPUT_PORT : CAPTURE_PORT;
+	is_secure = inst->flags & VIDC_SECURE;
+	dprintk(VIDC_ERR,
+			"%s session, %s, Codec type: %s HxW: %d x %d fps: %d bitrate: %d bit-depth: %s\n",
+			is_decode ? "Decode" : "Encode",
+			is_secure ? "Secure" : "Non-Secure",
+			inst->fmts[port].name,
+			inst->prop.height[port], inst->prop.width[port],
+			inst->prop.fps, inst->prop.bitrate,
+			!inst->bit_depth ? "8" : "10");
+
+	dprintk(VIDC_ERR,
+			"---Buffer details for inst: %pK of type: %d---\n",
+			inst, inst->session_type);
+	mutex_lock(&inst->registeredbufs.lock);
+	dprintk(VIDC_ERR, "registered buffer list:\n");
+	list_for_each_entry(temp, &inst->registeredbufs.list, list)
+		for (i = 0; i < temp->num_planes; i++)
+			dprintk(VIDC_ERR,
+					"type: %d plane: %d addr: %pa size: %d\n",
+					temp->type, i, &temp->device_addr[i],
+					temp->size[i]);
+
+	mutex_unlock(&inst->registeredbufs.lock);
+
+	mutex_lock(&inst->scratchbufs.lock);
+	dprintk(VIDC_ERR, "scratch buffer list:\n");
+	list_for_each_entry(buf, &inst->scratchbufs.list, list)
+		dprintk(VIDC_ERR, "type: %d addr: %pa size: %zu\n",
+				buf->buffer_type, &buf->handle->device_addr,
+				buf->handle->size);
+	mutex_unlock(&inst->scratchbufs.lock);
+
+	mutex_lock(&inst->persistbufs.lock);
+	dprintk(VIDC_ERR, "persist buffer list:\n");
+	list_for_each_entry(buf, &inst->persistbufs.list, list)
+		dprintk(VIDC_ERR, "type: %d addr: %pa size: %zu\n",
+				buf->buffer_type, &buf->handle->device_addr,
+				buf->handle->size);
+	mutex_unlock(&inst->persistbufs.lock);
+
+	mutex_lock(&inst->outputbufs.lock);
+	dprintk(VIDC_ERR, "dpb buffer list:\n");
+	list_for_each_entry(buf, &inst->outputbufs.list, list)
+		dprintk(VIDC_ERR, "type: %d addr: %pa size: %zu\n",
+				buf->buffer_type, &buf->handle->device_addr,
+				buf->handle->size);
+	mutex_unlock(&inst->outputbufs.lock);
+}
+
+static void msm_comm_print_debug_info(struct msm_vidc_inst *inst)
+{
+	struct msm_vidc_core *core = NULL;
+	struct msm_vidc_inst *temp = NULL;
+
+	if (!inst || !inst->core) {
+		dprintk(VIDC_ERR, "%s - invalid param %pK %pK\n",
+				__func__, inst, core);
+		return;
+	}
+	core = inst->core;
+
+	dprintk(VIDC_ERR, "Venus core frequency = %lu",
+		msm_comm_get_clock_rate(core));
+	mutex_lock(&core->lock);
+	dprintk(VIDC_ERR, "Printing instance info that caused Error\n");
+	msm_comm_print_inst_info(inst);
+	dprintk(VIDC_ERR, "Printing remaining instances info\n");
+	list_for_each_entry(temp, &core->instances, list) {
+		/* inst already printed above. Hence don't repeat.*/
+		if (temp == inst)
+			continue;
+		msm_comm_print_inst_info(temp);
+	}
+	mutex_unlock(&core->lock);
+}
+void msm_comm_sort_ctrl(void)
+{
+	msm_vdec_ctrl_sort();
+}
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_vidc_common.h linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_vidc_common.h
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_vidc_common.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_vidc_common.h	2019-10-29 09:26:23.961206290 +0100
@@ -0,0 +1,107 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MSM_VIDC_COMMON_H_
+#define _MSM_VIDC_COMMON_H_
+#include "msm_vidc_internal.h"
+struct vb2_buf_entry {
+	struct list_head list;
+	struct vb2_buffer *vb;
+};
+
+extern const char *const mpeg_video_vidc_extradata[];
+
+enum load_calc_quirks {
+	LOAD_CALC_NO_QUIRKS = 0,
+	LOAD_CALC_IGNORE_TURBO_LOAD = 1 << 0,
+	LOAD_CALC_IGNORE_THUMBNAIL_LOAD = 1 << 1,
+	LOAD_CALC_IGNORE_NON_REALTIME_LOAD = 1 << 2,
+};
+
+struct msm_vidc_core *get_vidc_core(int core_id);
+const struct msm_vidc_format *msm_comm_get_pixel_fmt_index(
+	const struct msm_vidc_format fmt[], int size, int index, int fmt_type);
+struct msm_vidc_format *msm_comm_get_pixel_fmt_fourcc(
+	struct msm_vidc_format fmt[], int size, int fourcc, int fmt_type);
+struct buf_queue *msm_comm_get_vb2q(
+		struct msm_vidc_inst *inst, enum v4l2_buf_type type);
+int msm_comm_try_state(struct msm_vidc_inst *inst, int state);
+int msm_comm_try_get_bufreqs(struct msm_vidc_inst *inst);
+int msm_comm_try_set_prop(struct msm_vidc_inst *inst,
+	enum hal_property ptype, void *pdata);
+int msm_comm_try_get_prop(struct msm_vidc_inst *inst,
+	enum hal_property ptype, union hal_get_property *hprop);
+int msm_comm_set_scratch_buffers(struct msm_vidc_inst *inst,
+						bool max_int_buffer);
+int msm_comm_set_persist_buffers(struct msm_vidc_inst *inst);
+int msm_comm_set_output_buffers(struct msm_vidc_inst *inst);
+int allocate_and_set_internal_bufs(struct msm_vidc_inst *inst,
+			struct hal_buffer_requirements *internal_bufreq,
+			struct msm_vidc_list *buf_list, bool set_on_fw);
+int msm_comm_queue_output_buffers(struct msm_vidc_inst *inst);
+int msm_comm_qbuf(struct msm_vidc_inst *inst, struct vb2_buffer *vb);
+void msm_comm_scale_clocks_and_bus(struct msm_vidc_inst *inst);
+int msm_comm_scale_clocks(struct msm_vidc_core *core);
+int msm_comm_scale_clocks_load(struct msm_vidc_core *core,
+		int num_mbs_per_sec, enum load_calc_quirks quirks);
+void msm_comm_flush_dynamic_buffers(struct msm_vidc_inst *inst);
+int msm_comm_flush(struct msm_vidc_inst *inst, u32 flags);
+int msm_comm_release_scratch_buffers(struct msm_vidc_inst *inst,
+					bool check_for_reuse);
+int msm_comm_release_persist_buffers(struct msm_vidc_inst *inst);
+int msm_comm_release_output_buffers(struct msm_vidc_inst *inst);
+int msm_comm_force_cleanup(struct msm_vidc_inst *inst);
+int msm_comm_suspend(int core_id);
+enum hal_extradata_id msm_comm_get_hal_extradata_index(
+	enum v4l2_mpeg_vidc_extradata index);
+enum hal_buffer_layout_type msm_comm_get_hal_buffer_layout(
+	enum v4l2_mpeg_vidc_video_mvc_layout index);
+struct hal_buffer_requirements *get_buff_req_buffer(
+			struct msm_vidc_inst *inst, u32 buffer_type);
+#define IS_PRIV_CTRL(idx) (\
+		(V4L2_CTRL_ID2CLASS(idx) == V4L2_CTRL_CLASS_MPEG) && \
+		V4L2_CTRL_DRIVER_PRIV(idx))
+void msm_comm_session_clean(struct msm_vidc_inst *inst);
+int msm_comm_kill_session(struct msm_vidc_inst *inst);
+enum multi_stream msm_comm_get_stream_output_mode(struct msm_vidc_inst *inst);
+enum hal_buffer msm_comm_get_hal_output_buffer(struct msm_vidc_inst *inst);
+struct msm_smem *msm_comm_smem_alloc(struct msm_vidc_inst *inst,
+			size_t size, u32 align, u32 flags,
+			enum hal_buffer buffer_type, int map_kernel);
+void msm_comm_smem_free(struct msm_vidc_inst *inst, struct msm_smem *mem);
+int msm_comm_smem_cache_operations(struct msm_vidc_inst *inst,
+		struct msm_smem *mem, enum smem_cache_ops cache_ops, int size);
+struct msm_smem *msm_comm_smem_user_to_kernel(struct msm_vidc_inst *inst,
+			int fd, u32 offset, enum hal_buffer buffer_type);
+enum hal_video_codec get_hal_codec(int fourcc);
+enum hal_domain get_hal_domain(int session_type);
+int msm_comm_check_core_init(struct msm_vidc_core *core);
+int msm_comm_get_inst_load(struct msm_vidc_inst *inst,
+			enum load_calc_quirks quirks);
+int msm_comm_get_load(struct msm_vidc_core *core,
+			enum session_type type, enum load_calc_quirks quirks);
+int msm_comm_set_color_format(struct msm_vidc_inst *inst,
+		enum hal_buffer buffer_type, int fourcc);
+int msm_comm_g_ctrl(struct msm_vidc_inst *inst, struct v4l2_control *ctrl);
+int msm_comm_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_control *ctrl);
+int msm_comm_g_ctrl_for_id(struct msm_vidc_inst *inst, int id);
+int msm_comm_ctrl_init(struct msm_vidc_inst *inst,
+		struct msm_vidc_ctrl *drv_ctrls, u32 num_ctrls,
+		const struct v4l2_ctrl_ops *ctrl_ops);
+int msm_comm_ctrl_deinit(struct msm_vidc_inst *inst);
+void msm_comm_cleanup_internal_buffers(struct msm_vidc_inst *inst);
+int msm_vidc_comm_s_parm(struct msm_vidc_inst *inst, struct v4l2_streamparm *a);
+bool msm_comm_turbo_session(struct msm_vidc_inst *inst);
+void msm_comm_print_inst_info(struct msm_vidc_inst *inst);
+void msm_comm_sort_ctrl(void);
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_vidc_dcvs.c linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_vidc_dcvs.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c	2019-01-22 16:16:24.467255136 +0100
@@ -0,0 +1,650 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_vidc_common.h"
+#include "vidc_hfi_api.h"
+#include "msm_vidc_debug.h"
+#include "msm_vidc_dcvs.h"
+
+#define IS_VALID_DCVS_SESSION(__cur_mbpf, __min_mbpf) \
+		((__cur_mbpf) >= (__min_mbpf))
+
+static bool msm_dcvs_check_supported(struct msm_vidc_inst *inst);
+static int msm_dcvs_enc_scale_clocks(struct msm_vidc_inst *inst);
+static int msm_dcvs_dec_scale_clocks(struct msm_vidc_inst *inst, bool fbd);
+
+int msm_dcvs_try_enable(struct msm_vidc_inst *inst)
+{
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s: Invalid args: %p\n", __func__, inst);
+		return -EINVAL;
+	}
+	inst->dcvs_mode = msm_dcvs_check_supported(inst);
+	return 0;
+}
+
+static inline int msm_dcvs_get_mbs_per_frame(struct msm_vidc_inst *inst)
+{
+	int height, width;
+
+	if (!inst->in_reconfig) {
+		height = max(inst->prop.height[CAPTURE_PORT],
+				inst->prop.height[OUTPUT_PORT]);
+		width = max(inst->prop.width[CAPTURE_PORT],
+				inst->prop.width[OUTPUT_PORT]);
+	} else {
+		height = inst->reconfig_height;
+		width = inst->reconfig_width;
+	}
+
+	return NUM_MBS_PER_FRAME(height, width);
+}
+
+static inline int msm_dcvs_count_active_instances(struct msm_vidc_core *core,
+	enum session_type session_type)
+{
+	int active_instances = 0;
+	struct msm_vidc_inst *temp = NULL;
+
+	if (!core) {
+		dprintk(VIDC_ERR, "%s: Invalid args: %pK\n", __func__, core);
+		return -EINVAL;
+	}
+
+	/* DCVS condition is as following
+	 * Decoder DCVS : Only for ONE decoder session.
+	 * Encoder DCVS : Only for ONE encoder session + ONE decoder session
+	 */
+	mutex_lock(&core->lock);
+	list_for_each_entry(temp, &core->instances, list) {
+		if (temp->state >= MSM_VIDC_OPEN_DONE &&
+			temp->state < MSM_VIDC_STOP_DONE &&
+			(temp->session_type == session_type ||
+			 temp->session_type == MSM_VIDC_ENCODER))
+			active_instances++;
+	}
+	mutex_unlock(&core->lock);
+	return active_instances;
+}
+
+static bool msm_dcvs_check_codec_supported(int fourcc,
+		unsigned long codecs_supported, enum session_type type)
+{
+	int codec_bit, session_type_bit;
+	bool codec_type, session_type;
+	unsigned long session;
+
+	session = VIDC_VOTE_DATA_SESSION_VAL(get_hal_codec(fourcc),
+		get_hal_domain(type));
+
+	if (!codecs_supported || !session)
+		return false;
+
+	/* ffs returns a 1 indexed, test_bit takes a 0 indexed...index */
+	codec_bit = ffs(session) - 1;
+	session_type_bit = codec_bit + 1;
+
+	codec_type =
+		test_bit(codec_bit, &codecs_supported) ==
+		test_bit(codec_bit, &session);
+	session_type =
+		test_bit(session_type_bit, &codecs_supported) ==
+		test_bit(session_type_bit, &session);
+
+	return codec_type && session_type;
+}
+
+static void msm_dcvs_update_dcvs_params(int idx, struct msm_vidc_inst *inst)
+{
+	struct dcvs_stats *dcvs = NULL;
+	struct msm_vidc_platform_resources *res = NULL;
+	struct dcvs_table *table = NULL;
+
+	if (!inst || !inst->core) {
+		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, inst);
+		return;
+	}
+
+	dcvs = &inst->dcvs;
+	res = &inst->core->resources;
+	table = res->dcvs_tbl;
+
+	dcvs->load_low = table[idx].load_low;
+	dcvs->load_high = table[idx].load_high;
+	dcvs->supported_codecs = table[idx].supported_codecs;
+}
+
+static void msm_dcvs_enc_check_and_scale_clocks(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+
+	if (inst->session_type == MSM_VIDC_ENCODER &&
+		msm_vidc_enc_dcvs_mode) {
+		rc = msm_dcvs_enc_scale_clocks(inst);
+		if (rc) {
+			dprintk(VIDC_DBG,
+				"ENC_DCVS: error while scaling clocks\n");
+		}
+	}
+}
+
+static void msm_dcvs_dec_check_and_scale_clocks(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+
+	if (inst->session_type == MSM_VIDC_DECODER &&
+		msm_vidc_dec_dcvs_mode) {
+		msm_dcvs_monitor_buffer(inst);
+		rc = msm_dcvs_dec_scale_clocks(inst, false);
+		if (rc) {
+			dprintk(VIDC_ERR,
+					"%s: Failed to scale clocks in DCVS: %d\n",
+					__func__, rc);
+		}
+	}
+}
+
+void msm_dcvs_check_and_scale_clocks(struct msm_vidc_inst *inst, bool is_etb)
+{
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, inst);
+		return;
+	}
+	msm_dcvs_try_enable(inst);
+	if (!inst->dcvs_mode) {
+		dprintk(VIDC_DBG, "DCVS is not enabled\n");
+		return;
+	}
+
+	if (is_etb)
+		msm_dcvs_enc_check_and_scale_clocks(inst);
+	else
+		msm_dcvs_dec_check_and_scale_clocks(inst);
+}
+
+static inline int get_pending_bufs_fw(struct msm_vidc_inst *inst)
+{
+	int fw_out_qsize = 0, buffers_in_driver = 0;
+
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s Invalid args\n", __func__);
+		return -EINVAL;
+	}
+
+	if (inst->state >= MSM_VIDC_OPEN_DONE &&
+		inst->state < MSM_VIDC_STOP_DONE) {
+		fw_out_qsize = inst->count.ftb - inst->count.fbd;
+		buffers_in_driver = inst->buffers_held_in_driver;
+	}
+
+	return fw_out_qsize + buffers_in_driver;
+}
+
+static inline void msm_dcvs_print_dcvs_stats(struct dcvs_stats *dcvs)
+{
+	dprintk(VIDC_DBG,
+		"DCVS: Load_Low %d, Load High %d\n",
+		dcvs->load_low,
+		dcvs->load_high);
+
+	dprintk(VIDC_DBG,
+		"DCVS: ThrDispBufLow %d, ThrDispBufHigh %d\n",
+		dcvs->threshold_disp_buf_low,
+		dcvs->threshold_disp_buf_high);
+
+	dprintk(VIDC_DBG,
+		"DCVS: min_threshold %d, max_threshold %d\n",
+		dcvs->min_threshold, dcvs->max_threshold);
+}
+
+void msm_dcvs_init_load(struct msm_vidc_inst *inst)
+{
+	struct msm_vidc_core *core;
+	struct hal_buffer_requirements *output_buf_req;
+	struct dcvs_stats *dcvs;
+	struct dcvs_table *table;
+	struct msm_vidc_platform_resources *res = NULL;
+	int i, num_rows, fourcc;
+	dprintk(VIDC_DBG, "Init DCVS Load\n");
+
+	if (!inst || !inst->core) {
+		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, inst);
+		return;
+	}
+
+	core = inst->core;
+	dcvs = &inst->dcvs;
+	res = &core->resources;
+	dcvs->load = msm_comm_get_inst_load(inst, LOAD_CALC_IGNORE_TURBO_LOAD);
+
+	num_rows = res->dcvs_tbl_size;
+	table = res->dcvs_tbl;
+
+	if (!num_rows || !table) {
+		dprintk(VIDC_ERR,
+				"%s: Dcvs table entry not found.\n", __func__);
+		return;
+	}
+
+	fourcc = inst->session_type == MSM_VIDC_DECODER ?
+				inst->fmts[OUTPUT_PORT].fourcc :
+				inst->fmts[CAPTURE_PORT].fourcc;
+
+	for (i = 0; i < num_rows; i++) {
+		bool matches = msm_dcvs_check_codec_supported(
+					fourcc,
+					table[i].supported_codecs,
+					inst->session_type);
+		if (!matches)
+			continue;
+
+		if (dcvs->load > table[i].load) {
+			msm_dcvs_update_dcvs_params(i, inst);
+			break;
+		}
+	}
+
+	if (inst->session_type == MSM_VIDC_ENCODER)
+		goto print_stats;
+
+	output_buf_req = get_buff_req_buffer(inst,
+		msm_comm_get_hal_output_buffer(inst));
+
+	if (!output_buf_req) {
+		dprintk(VIDC_ERR,
+			"%s: No buffer requirement for buffer type %x\n",
+			__func__, HAL_BUFFER_OUTPUT);
+		return;
+	}
+
+	dcvs->transition_turbo = false;
+
+	/* calculating the min and max threshold */
+	if (output_buf_req->buffer_count_actual) {
+		dcvs->min_threshold = output_buf_req->buffer_count_actual -
+			output_buf_req->buffer_count_min -
+			msm_dcvs_get_extra_buff_count(inst) + 1;
+		dcvs->max_threshold = output_buf_req->buffer_count_actual;
+		if (dcvs->max_threshold <= dcvs->min_threshold)
+			dcvs->max_threshold =
+				dcvs->min_threshold + DCVS_BUFFER_SAFEGUARD;
+		dcvs->threshold_disp_buf_low = dcvs->min_threshold;
+		dcvs->threshold_disp_buf_high = dcvs->max_threshold;
+	}
+
+print_stats:
+	msm_dcvs_print_dcvs_stats(dcvs);
+}
+
+void msm_dcvs_init(struct msm_vidc_inst *inst)
+{
+	dprintk(VIDC_DBG, "Init DCVS Struct\n");
+
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, inst);
+		return;
+	}
+
+	inst->dcvs = (struct dcvs_stats){ {0} };
+	inst->dcvs.threshold_disp_buf_high = DCVS_NOMINAL_THRESHOLD;
+	inst->dcvs.threshold_disp_buf_low = DCVS_TURBO_THRESHOLD;
+}
+
+void msm_dcvs_monitor_buffer(struct msm_vidc_inst *inst)
+{
+	int new_ftb, i, prev_buf_count;
+	int fw_pending_bufs, total_output_buf, buffers_outside_fw;
+	struct dcvs_stats *dcvs;
+	struct hal_buffer_requirements *output_buf_req;
+
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, inst);
+		return;
+	}
+	dcvs = &inst->dcvs;
+
+	mutex_lock(&inst->lock);
+	output_buf_req = get_buff_req_buffer(inst,
+				msm_comm_get_hal_output_buffer(inst));
+	if (!output_buf_req) {
+		dprintk(VIDC_ERR, "%s : Get output buffer req failed %pK\n",
+			__func__, inst);
+		mutex_unlock(&inst->lock);
+		return;
+	}
+
+	total_output_buf = output_buf_req->buffer_count_actual;
+	fw_pending_bufs = get_pending_bufs_fw(inst);
+	mutex_unlock(&inst->lock);
+
+	buffers_outside_fw = total_output_buf - fw_pending_bufs;
+	dcvs->num_ftb[dcvs->ftb_index] = buffers_outside_fw;
+	dcvs->ftb_index = (dcvs->ftb_index + 1) % DCVS_FTB_WINDOW;
+
+	if (dcvs->ftb_counter < DCVS_FTB_WINDOW)
+		dcvs->ftb_counter++;
+
+	dprintk(VIDC_PROF,
+		"DCVS: ftb_counter %d\n", dcvs->ftb_counter);
+
+	if (dcvs->ftb_counter == DCVS_FTB_WINDOW) {
+		new_ftb = 0;
+		for (i = 0; i < dcvs->ftb_counter; i++) {
+			if (dcvs->num_ftb[i] > new_ftb)
+				new_ftb = dcvs->num_ftb[i];
+		}
+
+		dcvs->threshold_disp_buf_high = new_ftb;
+		if (dcvs->threshold_disp_buf_high <=
+			dcvs->threshold_disp_buf_low +
+			DCVS_BUFFER_SAFEGUARD) {
+			dcvs->threshold_disp_buf_high =
+				dcvs->threshold_disp_buf_low +
+				DCVS_BUFFER_SAFEGUARD
+				+ (DCVS_BUFFER_SAFEGUARD == 0);
+		}
+
+		dcvs->threshold_disp_buf_high =
+			clamp(dcvs->threshold_disp_buf_high,
+				dcvs->min_threshold,
+				dcvs->max_threshold);
+	}
+
+	if (dcvs->ftb_counter == DCVS_FTB_WINDOW &&
+			dcvs->load == dcvs->load_low) {
+		prev_buf_count =
+			dcvs->num_ftb[((dcvs->ftb_index - 2 +
+				DCVS_FTB_WINDOW) % DCVS_FTB_WINDOW)];
+		if (prev_buf_count == dcvs->threshold_disp_buf_low &&
+			buffers_outside_fw <= dcvs->threshold_disp_buf_low) {
+			dcvs->transition_turbo = true;
+		} else if (buffers_outside_fw > dcvs->threshold_disp_buf_low &&
+			(buffers_outside_fw -
+			 (prev_buf_count - buffers_outside_fw))
+			< dcvs->threshold_disp_buf_low){
+			dcvs->transition_turbo = true;
+		}
+	}
+
+	dprintk(VIDC_PROF,
+		"DCVS: total_output_buf %d buffers_outside_fw %d load %d transition_turbo %d\n",
+		total_output_buf, buffers_outside_fw, dcvs->load_low,
+		dcvs->transition_turbo);
+}
+
+static int msm_dcvs_enc_scale_clocks(struct msm_vidc_inst *inst)
+{
+	int rc = 0, fw_pending_bufs = 0, total_input_buf = 0;
+	struct msm_vidc_core *core;
+	struct dcvs_stats *dcvs;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s Invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	core = inst->core;
+	dcvs = &inst->dcvs;
+
+	mutex_lock(&inst->lock);
+	total_input_buf = inst->buff_req.buffer[0].buffer_count_actual;
+	fw_pending_bufs = (inst->count.etb - inst->count.ebd);
+	mutex_unlock(&inst->lock);
+
+	dprintk(VIDC_PROF,
+		"DCVS: total_input_buf %d, fw_pending_bufs %d\n",
+		total_input_buf, fw_pending_bufs);
+
+	if (dcvs->etb_counter < total_input_buf) {
+		dcvs->etb_counter++;
+		if (dcvs->etb_counter != total_input_buf) {
+			return msm_comm_scale_clocks_load(core, dcvs->load,
+					LOAD_CALC_NO_QUIRKS);
+		}
+	}
+
+	dprintk(VIDC_PROF,
+		"DCVS: total_input_buf %d, fw_pending_bufs %d etb_counter %d  dcvs->load %d\n",
+		total_input_buf, fw_pending_bufs,
+		dcvs->etb_counter, dcvs->load);
+
+	if (fw_pending_bufs <= DCVS_ENC_LOW_THR &&
+		dcvs->load > dcvs->load_low) {
+		dcvs->load = dcvs->load_low;
+		dcvs->prev_freq_lowered = true;
+	} else {
+		dcvs->prev_freq_lowered = false;
+	}
+
+	if (fw_pending_bufs >= DCVS_ENC_HIGH_THR &&
+		dcvs->load < dcvs->load_high) {
+		dcvs->load = dcvs->load_high;
+		dcvs->prev_freq_increased = true;
+	} else {
+		dcvs->prev_freq_increased = false;
+	}
+
+	if (dcvs->prev_freq_lowered || dcvs->prev_freq_increased) {
+		dprintk(VIDC_PROF,
+			"DCVS: (Scaling Clock %s)  etb clock set = %d total_input_buf = %d fw_pending_bufs %d\n",
+			dcvs->prev_freq_lowered ? "Lower" : "Higher",
+			dcvs->load, total_input_buf, fw_pending_bufs);
+
+		rc = msm_comm_scale_clocks_load(core, dcvs->load,
+				LOAD_CALC_NO_QUIRKS);
+		if (rc) {
+			dprintk(VIDC_PROF,
+				"Failed to set clock rate in FBD: %d\n", rc);
+		}
+	} else {
+		dprintk(VIDC_PROF,
+			"DCVS: etb clock load_old = %d total_input_buf = %d fw_pending_bufs %d\n",
+			dcvs->load, total_input_buf, fw_pending_bufs);
+	}
+
+	return rc;
+}
+
+
+/*
+ * In DCVS scale_clocks will be done both in qbuf and FBD
+ * 1 indicates call made from fbd that lowers clock
+ * 0 indicates call made from qbuf that increases clock
+ * based on DCVS algorithm
+ */
+
+static int msm_dcvs_dec_scale_clocks(struct msm_vidc_inst *inst, bool fbd)
+{
+	int rc = 0;
+	int fw_pending_bufs = 0;
+	int total_output_buf = 0;
+	int buffers_outside_fw = 0;
+	struct msm_vidc_core *core;
+	struct hal_buffer_requirements *output_buf_req;
+	struct dcvs_stats *dcvs;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s Invalid params\n", __func__);
+		return -EINVAL;
+	}
+	core = inst->core;
+	dcvs = &inst->dcvs;
+	mutex_lock(&inst->lock);
+	fw_pending_bufs = get_pending_bufs_fw(inst);
+
+	output_buf_req = get_buff_req_buffer(inst,
+		msm_comm_get_hal_output_buffer(inst));
+	mutex_unlock(&inst->lock);
+	if (!output_buf_req) {
+		dprintk(VIDC_ERR,
+			"%s: No buffer requirement for buffer type %x\n",
+			__func__, HAL_BUFFER_OUTPUT);
+		return -EINVAL;
+	}
+
+	/* Total number of output buffers */
+	total_output_buf = output_buf_req->buffer_count_actual;
+
+	/* Buffers outside FW are with display */
+	buffers_outside_fw = total_output_buf - fw_pending_bufs;
+
+	if (buffers_outside_fw >= dcvs->threshold_disp_buf_high &&
+		!dcvs->prev_freq_increased &&
+		dcvs->load > dcvs->load_low) {
+		dcvs->load = dcvs->load_low;
+		dcvs->prev_freq_lowered = true;
+		dcvs->prev_freq_increased = false;
+	} else if (dcvs->transition_turbo && dcvs->load == dcvs->load_low) {
+		dcvs->load = dcvs->load_high;
+		dcvs->prev_freq_increased = true;
+		dcvs->prev_freq_lowered = false;
+		dcvs->transition_turbo = false;
+	} else {
+		dcvs->prev_freq_increased = false;
+		dcvs->prev_freq_lowered = false;
+	}
+
+	if (dcvs->prev_freq_lowered || dcvs->prev_freq_increased) {
+		dprintk(VIDC_PROF,
+			"DCVS: clock set = %d tot_output_buf = %d buffers_outside_fw %d threshold_high %d transition_turbo %d\n",
+			dcvs->load, total_output_buf, buffers_outside_fw,
+			dcvs->threshold_disp_buf_high, dcvs->transition_turbo);
+
+		rc = msm_comm_scale_clocks_load(core, dcvs->load,
+				LOAD_CALC_NO_QUIRKS);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"Failed to set clock rate in FBD: %d\n", rc);
+		}
+	} else {
+		dprintk(VIDC_PROF,
+			"DCVS: clock old = %d tot_output_buf = %d buffers_outside_fw %d threshold_high %d transition_turbo %d\n",
+			dcvs->load, total_output_buf, buffers_outside_fw,
+			dcvs->threshold_disp_buf_high, dcvs->transition_turbo);
+	}
+	return rc;
+}
+
+static bool msm_dcvs_check_supported(struct msm_vidc_inst *inst)
+{
+	int num_mbs_per_frame = 0, instance_count = 0;
+	long int instance_load = 0;
+	long int dcvs_limit = 0;
+	struct msm_vidc_inst *temp = NULL;
+	struct msm_vidc_core *core;
+	struct hal_buffer_requirements *output_buf_req;
+	struct dcvs_stats *dcvs;
+	bool is_codec_supported = false;
+	bool is_dcvs_supported = true;
+	struct msm_vidc_platform_resources *res = NULL;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_WARN, "%s: Invalid parameter\n", __func__);
+		return -EINVAL;
+	}
+
+	core = inst->core;
+	dcvs = &inst->dcvs;
+	res = &core->resources;
+
+	if (!res->dcvs_limit) {
+		dprintk(VIDC_WARN,
+				"%s: dcvs limit table not found\n", __func__);
+		return false;
+	}
+	instance_count = msm_dcvs_count_active_instances(core,
+		inst->session_type);
+	num_mbs_per_frame = msm_dcvs_get_mbs_per_frame(inst);
+	instance_load = msm_comm_get_inst_load(inst, LOAD_CALC_NO_QUIRKS);
+	dcvs_limit =
+		(long int)res->dcvs_limit[inst->session_type].min_mbpf *
+		res->dcvs_limit[inst->session_type].fps;
+	inst->dcvs.extra_buffer_count = 0;
+
+	if (!IS_VALID_DCVS_SESSION(num_mbs_per_frame,
+		res->dcvs_limit[inst->session_type].min_mbpf) ||
+		(inst->flags & VIDC_THUMBNAIL)) {
+		inst->dcvs.extra_buffer_count = 0;
+		is_dcvs_supported = false;
+		goto dcvs_decision_done;
+
+	}
+
+	if (inst->session_type == MSM_VIDC_DECODER) {
+		inst->dcvs.extra_buffer_count = DCVS_DEC_EXTRA_OUTPUT_BUFFERS;
+		output_buf_req = get_buff_req_buffer(inst,
+				msm_comm_get_hal_output_buffer(inst));
+		if (!output_buf_req) {
+			dprintk(VIDC_ERR,
+					"%s: No buffer requirement for buffer type %x\n",
+					__func__, HAL_BUFFER_OUTPUT);
+			return false;
+		}
+		is_codec_supported =
+			msm_dcvs_check_codec_supported(
+				inst->fmts[OUTPUT_PORT].fourcc,
+				inst->dcvs.supported_codecs,
+				inst->session_type);
+		if (!is_codec_supported ||
+				!msm_vidc_dec_dcvs_mode) {
+			inst->dcvs.extra_buffer_count = 0;
+			is_dcvs_supported = false;
+			goto dcvs_decision_done;
+		}
+		if (msm_comm_turbo_session(inst) ||
+			!IS_VALID_DCVS_SESSION(instance_load, dcvs_limit) ||
+			instance_count > 1)
+			is_dcvs_supported = false;
+	}
+	if (inst->session_type == MSM_VIDC_ENCODER) {
+		inst->dcvs.extra_buffer_count = DCVS_ENC_EXTRA_OUTPUT_BUFFERS;
+		is_codec_supported =
+			msm_dcvs_check_codec_supported(
+				inst->fmts[CAPTURE_PORT].fourcc,
+				inst->dcvs.supported_codecs,
+				inst->session_type);
+		if (!is_codec_supported ||
+				!msm_vidc_enc_dcvs_mode) {
+			inst->dcvs.extra_buffer_count = 0;
+			is_dcvs_supported = false;
+			goto dcvs_decision_done;
+		}
+		if (msm_comm_turbo_session(inst) ||
+			!IS_VALID_DCVS_SESSION(instance_load, dcvs_limit) ||
+				instance_count > 1)
+			is_dcvs_supported = false;
+	}
+dcvs_decision_done:
+	if (!is_dcvs_supported) {
+		msm_comm_scale_clocks(core);
+		if (instance_count > 1) {
+			mutex_lock(&core->lock);
+			list_for_each_entry(temp, &core->instances, list)
+				temp->dcvs_mode = false;
+			mutex_unlock(&core->lock);
+		}
+	}
+	return is_dcvs_supported;
+}
+
+int msm_dcvs_get_extra_buff_count(struct msm_vidc_inst *inst)
+{
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s Invalid args\n", __func__);
+		return 0;
+	}
+
+	return inst->dcvs.extra_buffer_count;
+}
+
+
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_vidc_dcvs.h linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_vidc_dcvs.h
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_vidc_dcvs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_vidc_dcvs.h	2019-01-22 16:16:24.467255136 +0100
@@ -0,0 +1,40 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MSM_VIDC_DCVS_H_
+#define _MSM_VIDC_DCVS_H_
+#include "msm_vidc_internal.h"
+
+/* Low threshold for encoder dcvs */
+#define DCVS_ENC_LOW_THR 4
+/* High threshold for encoder dcvs */
+#define DCVS_ENC_HIGH_THR 9
+/* extra o/p buffers in case of encoder dcvs */
+#define DCVS_ENC_EXTRA_OUTPUT_BUFFERS 2
+/* extra o/p buffers in case of decoder dcvs */
+#define DCVS_DEC_EXTRA_OUTPUT_BUFFERS 4
+/* Default threshold to reduce the core frequency */
+#define DCVS_NOMINAL_THRESHOLD 8
+/* Default threshold to increase the core frequency */
+#define DCVS_TURBO_THRESHOLD 4
+
+/* Considering one safeguard buffer */
+#define DCVS_BUFFER_SAFEGUARD (DCVS_DEC_EXTRA_OUTPUT_BUFFERS - 1)
+
+void msm_dcvs_init(struct msm_vidc_inst *inst);
+void msm_dcvs_init_load(struct msm_vidc_inst *inst);
+void msm_dcvs_monitor_buffer(struct msm_vidc_inst *inst);
+void msm_dcvs_check_and_scale_clocks(struct msm_vidc_inst *inst, bool is_etb);
+int  msm_dcvs_get_extra_buff_count(struct msm_vidc_inst *inst);
+int msm_dcvs_try_enable(struct msm_vidc_inst *inst);
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_vidc_debug.c linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_vidc_debug.c
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_vidc_debug.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_vidc_debug.c	2019-01-22 16:16:24.467255136 +0100
@@ -0,0 +1,550 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define CREATE_TRACE_POINTS
+#define MAX_SSR_STRING_LEN 10
+#include "msm_vidc_debug.h"
+#include "vidc_hfi_api.h"
+
+int msm_vidc_debug = VIDC_ERR | VIDC_WARN;
+EXPORT_SYMBOL(msm_vidc_debug);
+
+int msm_vidc_debug_out = VIDC_OUT_PRINTK;
+EXPORT_SYMBOL(msm_vidc_debug_out);
+
+int msm_vidc_fw_debug = 0x18;
+int msm_vidc_fw_debug_mode = 1;
+int msm_vidc_fw_low_power_mode = 1;
+int msm_vidc_hw_rsp_timeout = 2000;
+bool msm_vidc_fw_coverage = false;
+bool msm_vidc_vpe_csc_601_to_709 = false;
+bool msm_vidc_dec_dcvs_mode = true;
+bool msm_vidc_enc_dcvs_mode = true;
+bool msm_vidc_sys_idle_indicator = false;
+int msm_vidc_firmware_unload_delay = 15000;
+bool msm_vidc_thermal_mitigation_disabled = false;
+bool msm_vidc_bitrate_clock_scaling = true;
+bool msm_vidc_debug_timeout = false;
+
+#define MAX_DBG_BUF_SIZE 4096
+
+#define DYNAMIC_BUF_OWNER(__binfo) ({ \
+	atomic_read(&__binfo->ref_count) == 2 ? "video driver" : "firmware";\
+})
+
+struct core_inst_pair {
+	struct msm_vidc_core *core;
+	struct msm_vidc_inst *inst;
+};
+
+static int core_info_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static u32 write_str(char *buffer,
+		size_t size, const char *fmt, ...)
+{
+	va_list args;
+	u32 len;
+
+	va_start(args, fmt);
+	len = vscnprintf(buffer, size, fmt, args);
+	va_end(args);
+	return len;
+}
+
+static ssize_t core_info_read(struct file *file, char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	struct msm_vidc_core *core = file->private_data;
+	struct hfi_device *hdev;
+	struct hal_fw_info fw_info = { {0} };
+	char *dbuf, *cur, *end;
+	int i = 0, rc = 0;
+	ssize_t len = 0;
+
+	if (!core || !core->device) {
+		dprintk(VIDC_ERR, "Invalid params, core: %pK\n", core);
+		return 0;
+	}
+
+	dbuf = kzalloc(MAX_DBG_BUF_SIZE, GFP_KERNEL);
+	if (!dbuf) {
+		dprintk(VIDC_ERR, "%s: Allocation failed!\n", __func__);
+		return -ENOMEM;
+	}
+	cur = dbuf;
+	end = cur + MAX_DBG_BUF_SIZE;
+	hdev = core->device;
+
+	cur += write_str(cur, end - cur, "===============================\n");
+	cur += write_str(cur, end - cur, "CORE %d: %pK\n", core->id, core);
+	cur += write_str(cur, end - cur, "===============================\n");
+	cur += write_str(cur, end - cur, "Core state: %d\n", core->state);
+	rc = call_hfi_op(hdev, get_fw_info, hdev->hfi_device_data, &fw_info);
+	if (rc) {
+		dprintk(VIDC_WARN, "Failed to read FW info\n");
+		goto err_fw_info;
+	}
+
+	cur += write_str(cur, end - cur,
+		"FW version : %s\n", &fw_info.version);
+	cur += write_str(cur, end - cur,
+		"base addr: 0x%x\n", fw_info.base_addr);
+	cur += write_str(cur, end - cur,
+		"register_base: 0x%x\n", fw_info.register_base);
+	cur += write_str(cur, end - cur,
+		"register_size: %u\n", fw_info.register_size);
+	cur += write_str(cur, end - cur, "irq: %u\n", fw_info.irq);
+
+err_fw_info:
+	for (i = SYS_MSG_START; i < SYS_MSG_END; i++) {
+		cur += write_str(cur, end - cur, "completions[%d]: %s\n", i,
+			completion_done(&core->completions[SYS_MSG_INDEX(i)]) ?
+			"pending" : "done");
+	}
+	len = simple_read_from_buffer(buf, count, ppos,
+			dbuf, cur - dbuf);
+
+	kfree(dbuf);
+	return len;
+}
+
+static const struct file_operations core_info_fops = {
+	.open = core_info_open,
+	.read = core_info_read,
+};
+
+static int trigger_ssr_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t trigger_ssr_write(struct file *filp, const char __user *buf,
+		size_t count, loff_t *ppos) {
+	unsigned long ssr_trigger_val = 0;
+	int rc = 0;
+	struct msm_vidc_core *core = filp->private_data;
+	size_t size = MAX_SSR_STRING_LEN;
+	char kbuf[MAX_SSR_STRING_LEN + 1] = {0};
+
+	if (!count)
+		goto exit;
+
+	if (count < size)
+		size = count;
+
+	if (copy_from_user(kbuf, buf, size)) {
+		dprintk(VIDC_WARN, "%s User memory fault\n", __func__);
+		rc = -EFAULT;
+		goto exit;
+	}
+
+	rc = kstrtoul(kbuf, 0, &ssr_trigger_val);
+	if (rc) {
+		dprintk(VIDC_WARN, "returning error err %d\n", rc);
+		rc = -EINVAL;
+	} else {
+		msm_vidc_trigger_ssr(core, ssr_trigger_val);
+		rc = count;
+	}
+exit:
+	return rc;
+}
+
+static const struct file_operations ssr_fops = {
+	.open = trigger_ssr_open,
+	.write = trigger_ssr_write,
+};
+
+struct dentry *msm_vidc_debugfs_init_drv(void)
+{
+	bool ok = false;
+	struct dentry *dir = NULL;
+
+	dir = debugfs_create_dir("msm_vidc", NULL);
+	if (IS_ERR_OR_NULL(dir)) {
+		dir = NULL;
+		goto failed_create_dir;
+	}
+
+#define __debugfs_create(__type, __name, __value) ({                          \
+	struct dentry *f = debugfs_create_##__type(__name, S_IRUGO | S_IWUSR, \
+		dir, __value);                                                \
+	if (IS_ERR_OR_NULL(f)) {                                              \
+		dprintk(VIDC_ERR, "Failed creating debugfs file '%pd/%s'\n",  \
+			dir, __name);                                         \
+		f = NULL;                                                     \
+	}                                                                     \
+	f;                                                                    \
+})
+
+	ok =
+	__debugfs_create(x32, "debug_level", &msm_vidc_debug) &&
+	__debugfs_create(x32, "fw_level", &msm_vidc_fw_debug) &&
+	__debugfs_create(u32, "fw_debug_mode", &msm_vidc_fw_debug_mode) &&
+	__debugfs_create(bool, "fw_coverage", &msm_vidc_fw_coverage) &&
+	__debugfs_create(bool, "dcvs_dec_mode", &msm_vidc_dec_dcvs_mode) &&
+	__debugfs_create(bool, "dcvs_enc_mode", &msm_vidc_enc_dcvs_mode) &&
+	__debugfs_create(u32, "fw_low_power_mode",
+			&msm_vidc_fw_low_power_mode) &&
+	__debugfs_create(u32, "debug_output", &msm_vidc_debug_out) &&
+	__debugfs_create(u32, "hw_rsp_timeout", &msm_vidc_hw_rsp_timeout) &&
+	__debugfs_create(bool, "sys_idle_indicator",
+			&msm_vidc_sys_idle_indicator) &&
+	__debugfs_create(u32, "firmware_unload_delay",
+			&msm_vidc_firmware_unload_delay) &&
+	__debugfs_create(bool, "disable_thermal_mitigation",
+			&msm_vidc_thermal_mitigation_disabled) &&
+	__debugfs_create(bool, "bitrate_clock_scaling",
+			&msm_vidc_bitrate_clock_scaling) &&
+	__debugfs_create(bool, "debug_timeout",
+			&msm_vidc_debug_timeout);
+
+#undef __debugfs_create
+
+	if (!ok)
+		goto failed_create_dir;
+
+	return dir;
+
+failed_create_dir:
+	if (dir)
+		debugfs_remove_recursive(vidc_driver->debugfs_root);
+
+	return NULL;
+}
+
+struct dentry *msm_vidc_debugfs_init_core(struct msm_vidc_core *core,
+		struct dentry *parent)
+{
+	struct dentry *dir = NULL;
+	char debugfs_name[MAX_DEBUGFS_NAME];
+	if (!core) {
+		dprintk(VIDC_ERR, "Invalid params, core: %pK\n", core);
+		goto failed_create_dir;
+	}
+
+	snprintf(debugfs_name, MAX_DEBUGFS_NAME, "core%d", core->id);
+	dir = debugfs_create_dir(debugfs_name, parent);
+	if (!dir) {
+		dprintk(VIDC_ERR, "Failed to create debugfs for msm_vidc\n");
+		goto failed_create_dir;
+	}
+
+	if (!debugfs_create_file("info", S_IRUGO, dir, core, &core_info_fops)) {
+		dprintk(VIDC_ERR, "debugfs_create_file: fail\n");
+		goto failed_create_dir;
+	}
+	if (!debugfs_create_file("trigger_ssr", S_IWUSR,
+			dir, core, &ssr_fops)) {
+		dprintk(VIDC_ERR, "debugfs_create_file: fail\n");
+		goto failed_create_dir;
+	}
+failed_create_dir:
+	return dir;
+}
+
+static int inst_info_open(struct inode *inode, struct file *file)
+{
+	dprintk(VIDC_INFO, "Open inode ptr: %pK\n", inode->i_private);
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static int publish_unreleased_reference(struct msm_vidc_inst *inst,
+		char **dbuf, char *end)
+{
+	char *cur = *dbuf;
+	struct buffer_info *temp = NULL;
+
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s: invalid param\n", __func__);
+		return -EINVAL;
+	}
+
+	if (inst->buffer_mode_set[CAPTURE_PORT] == HAL_BUFFER_MODE_DYNAMIC) {
+		cur += write_str(cur, end - cur, "Pending buffer references\n");
+
+		mutex_lock(&inst->registeredbufs.lock);
+		list_for_each_entry(temp, &inst->registeredbufs.list, list) {
+			if (temp->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+			!temp->inactive && atomic_read(&temp->ref_count)) {
+				cur += write_str(cur, end - cur,
+					"\tpending buffer: %#lx fd[0] = %d ref_count = %d held by: %s\n",
+					temp->device_addr[0],
+					temp->fd[0],
+					atomic_read(&temp->ref_count),
+					DYNAMIC_BUF_OWNER(temp));
+			}
+		}
+		mutex_unlock(&inst->registeredbufs.lock);
+	}
+
+	*dbuf = cur;
+	return 0;
+}
+
+static void put_inst_helper(struct kref *kref)
+{
+	struct msm_vidc_inst *inst = container_of(kref,
+			struct msm_vidc_inst, kref);
+
+	msm_vidc_destroy(inst);
+}
+
+static ssize_t inst_info_read(struct file *file, char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	struct core_inst_pair *idata = file->private_data;
+	struct msm_vidc_core *core;
+	struct msm_vidc_inst *inst, *temp = NULL;
+	char *dbuf, *cur, *end;
+	int i, j;
+	ssize_t len = 0;
+
+	if (!idata || !idata->core || !idata->inst) {
+		dprintk(VIDC_ERR, "%s: Invalid params\n", __func__);
+		return 0;
+	}
+
+	core = idata->core;
+	inst = idata->inst;
+
+	mutex_lock(&core->lock);
+	list_for_each_entry(temp, &core->instances, list) {
+		if (temp == inst)
+			break;
+	}
+	inst = ((temp == inst) && kref_get_unless_zero(&inst->kref)) ?
+		inst : NULL;
+	mutex_unlock(&core->lock);
+
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s: Instance has become obsolete", __func__);
+		return 0;
+	}
+
+	dbuf = kzalloc(MAX_DBG_BUF_SIZE, GFP_KERNEL);
+	if (!dbuf) {
+		dprintk(VIDC_ERR, "%s: Allocation failed!\n", __func__);
+		len = -ENOMEM;
+		goto failed_alloc;
+	}
+	cur = dbuf;
+	end = cur + MAX_DBG_BUF_SIZE;
+
+	cur += write_str(cur, end - cur, "==============================\n");
+	cur += write_str(cur, end - cur, "INSTANCE: %pK (%s)\n", inst,
+		inst->session_type == MSM_VIDC_ENCODER ? "Encoder" : "Decoder");
+	cur += write_str(cur, end - cur, "==============================\n");
+	cur += write_str(cur, end - cur, "core: %pK\n", inst->core);
+	cur += write_str(cur, end - cur, "height: %d\n",
+		inst->prop.height[CAPTURE_PORT]);
+	cur += write_str(cur, end - cur, "width: %d\n",
+		inst->prop.width[CAPTURE_PORT]);
+	cur += write_str(cur, end - cur, "fps: %d\n", inst->prop.fps);
+	cur += write_str(cur, end - cur, "state: %d\n", inst->state);
+	cur += write_str(cur, end - cur, "secure: %d\n",
+		!!(inst->flags & VIDC_SECURE));
+	cur += write_str(cur, end - cur, "-----------Formats-------------\n");
+	for (i = 0; i < MAX_PORT_NUM; i++) {
+		cur += write_str(cur, end - cur, "capability: %s\n",
+			i == OUTPUT_PORT ? "Output" : "Capture");
+		cur += write_str(cur, end - cur, "name : %s\n",
+			inst->fmts[i].name);
+		cur += write_str(cur, end - cur, "planes : %d\n",
+			inst->prop.num_planes[i]);
+		cur += write_str(cur, end - cur,
+			"type: %s\n", inst->fmts[i].type == OUTPUT_PORT ?
+			"Output" : "Capture");
+		switch (inst->buffer_mode_set[i]) {
+		case HAL_BUFFER_MODE_STATIC:
+			cur += write_str(cur, end - cur,
+				"buffer mode : %s\n", "static");
+			break;
+		case HAL_BUFFER_MODE_RING:
+			cur += write_str(cur, end - cur,
+				"buffer mode : %s\n", "ring");
+			break;
+		case HAL_BUFFER_MODE_DYNAMIC:
+			cur += write_str(cur, end - cur,
+				"buffer mode : %s\n", "dynamic");
+			break;
+		default:
+			cur += write_str(cur, end - cur,
+				"buffer mode : unsupported\n");
+		}
+
+		cur += write_str(cur, end - cur, "count: %u\n",
+				inst->bufq[i].vb2_bufq.num_buffers);
+
+		for (j = 0; j < inst->prop.num_planes[i]; j++)
+			cur += write_str(cur, end - cur,
+			"size for plane %d: %u\n", j,
+			inst->bufq[i].vb2_bufq.plane_sizes[j]);
+
+		if (i < MAX_PORT_NUM - 1)
+			cur += write_str(cur, end - cur, "\n");
+	}
+	cur += write_str(cur, end - cur, "-------------------------------\n");
+	for (i = SESSION_MSG_START; i < SESSION_MSG_END; i++) {
+		cur += write_str(cur, end - cur, "completions[%d]: %s\n", i,
+		completion_done(&inst->completions[SESSION_MSG_INDEX(i)]) ?
+		"pending" : "done");
+	}
+	cur += write_str(cur, end - cur, "ETB Count: %d\n", inst->count.etb);
+	cur += write_str(cur, end - cur, "EBD Count: %d\n", inst->count.ebd);
+	cur += write_str(cur, end - cur, "FTB Count: %d\n", inst->count.ftb);
+	cur += write_str(cur, end - cur, "FBD Count: %d\n", inst->count.fbd);
+
+	publish_unreleased_reference(inst, &cur, end);
+	len = simple_read_from_buffer(buf, count, ppos,
+		dbuf, cur - dbuf);
+
+	kfree(dbuf);
+failed_alloc:
+	kref_put(&inst->kref, put_inst_helper);
+	return len;
+}
+
+static int inst_info_release(struct inode *inode, struct file *file)
+{
+	dprintk(VIDC_INFO, "Release inode ptr: %pK\n", inode->i_private);
+	file->private_data = NULL;
+	return 0;
+}
+
+static const struct file_operations inst_info_fops = {
+	.open = inst_info_open,
+	.read = inst_info_read,
+	.release = inst_info_release,
+};
+
+struct dentry *msm_vidc_debugfs_init_inst(struct msm_vidc_inst *inst,
+		struct dentry *parent)
+{
+	struct dentry *dir = NULL, *info = NULL;
+	char debugfs_name[MAX_DEBUGFS_NAME];
+	struct core_inst_pair *idata = NULL;
+
+	if (!inst) {
+		dprintk(VIDC_ERR, "Invalid params, inst: %pK\n", inst);
+		goto exit;
+	}
+	snprintf(debugfs_name, MAX_DEBUGFS_NAME, "inst_%pK", inst);
+
+	idata = kzalloc(sizeof(struct core_inst_pair), GFP_KERNEL);
+	if (!idata) {
+		dprintk(VIDC_ERR, "%s: Allocation failed!\n", __func__);
+		goto exit;
+	}
+
+	idata->core = inst->core;
+	idata->inst = inst;
+
+	dir = debugfs_create_dir(debugfs_name, parent);
+	if (!dir) {
+		dprintk(VIDC_ERR, "Failed to create debugfs for msm_vidc\n");
+		goto failed_create_dir;
+	}
+
+	info = debugfs_create_file("info", S_IRUGO, dir,
+			idata, &inst_info_fops);
+	if (!info) {
+		dprintk(VIDC_ERR, "debugfs_create_file: fail\n");
+		goto failed_create_file;
+	}
+
+	dir->d_inode->i_private = info->d_inode->i_private;
+	inst->debug.pdata[FRAME_PROCESSING].sampling = true;
+	return dir;
+
+failed_create_file:
+	debugfs_remove_recursive(dir);
+	dir = NULL;
+failed_create_dir:
+	kfree(idata);
+exit:
+	return dir;
+}
+
+void msm_vidc_debugfs_deinit_inst(struct msm_vidc_inst *inst)
+{
+	struct dentry *dentry = NULL;
+
+	if (!inst || !inst->debugfs_root)
+		return;
+
+	dentry = inst->debugfs_root;
+	if (dentry->d_inode) {
+		dprintk(VIDC_INFO, "Destroy %pK\n", dentry->d_inode->i_private);
+		kfree(dentry->d_inode->i_private);
+		dentry->d_inode->i_private = NULL;
+	}
+	debugfs_remove_recursive(dentry);
+	inst->debugfs_root = NULL;
+}
+
+void msm_vidc_debugfs_update(struct msm_vidc_inst *inst,
+	enum msm_vidc_debugfs_event e)
+{
+	struct msm_vidc_debug *d = &inst->debug;
+	char a[64] = "Frame processing";
+	switch (e) {
+	case MSM_VIDC_DEBUGFS_EVENT_ETB:
+		mutex_lock(&inst->lock);
+		inst->count.etb++;
+		mutex_unlock(&inst->lock);
+		if (inst->count.ebd && inst->count.ftb > inst->count.fbd) {
+			d->pdata[FRAME_PROCESSING].name[0] = '\0';
+			tic(inst, FRAME_PROCESSING, a);
+		}
+	break;
+	case MSM_VIDC_DEBUGFS_EVENT_EBD:
+		mutex_lock(&inst->lock);
+		inst->count.ebd++;
+		mutex_unlock(&inst->lock);
+		if (inst->count.ebd && inst->count.ebd == inst->count.etb) {
+			toc(inst, FRAME_PROCESSING);
+			dprintk(VIDC_PROF, "EBD: FW needs input buffers\n");
+		}
+		if (inst->count.ftb == inst->count.fbd)
+			dprintk(VIDC_PROF, "EBD: FW needs output buffers\n");
+	break;
+	case MSM_VIDC_DEBUGFS_EVENT_FTB: {
+		inst->count.ftb++;
+		if (inst->count.ebd && inst->count.etb > inst->count.ebd) {
+			d->pdata[FRAME_PROCESSING].name[0] = '\0';
+			tic(inst, FRAME_PROCESSING, a);
+		}
+	}
+	break;
+	case MSM_VIDC_DEBUGFS_EVENT_FBD:
+		inst->debug.samples++;
+		if (inst->count.ebd && inst->count.fbd == inst->count.ftb) {
+			toc(inst, FRAME_PROCESSING);
+			dprintk(VIDC_PROF, "FBD: FW needs output buffers\n");
+		}
+		if (inst->count.etb == inst->count.ebd)
+			dprintk(VIDC_PROF, "FBD: FW needs input buffers\n");
+		break;
+	default:
+		dprintk(VIDC_ERR, "Invalid state in debugfs: %d\n", e);
+		break;
+	}
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_vidc_debug.h linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_vidc_debug.h
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_vidc_debug.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_vidc_debug.h	2019-01-22 16:16:24.467255136 +0100
@@ -0,0 +1,180 @@
+/* Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MSM_VIDC_DEBUG__
+#define __MSM_VIDC_DEBUG__
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include "msm_vidc_internal.h"
+#include "trace/events/msm_vidc.h"
+
+#ifndef VIDC_DBG_LABEL
+#define VIDC_DBG_LABEL "msm_vidc"
+#endif
+
+#define VIDC_DBG_TAG VIDC_DBG_LABEL ": %4s: "
+#define VIDC_DBG_WARN_ENABLE (msm_vidc_debug & VIDC_INFO)
+
+/* To enable messages OR these values and
+ * echo the result to debugfs file.
+ *
+ * To enable all messages set debug_level = 0x101F
+ */
+
+enum vidc_msg_prio {
+	VIDC_ERR  = 0x0001,
+	VIDC_WARN = 0x0002,
+	VIDC_INFO = 0x0004,
+	VIDC_DBG  = 0x0008,
+	VIDC_PROF = 0x0010,
+	VIDC_PKT  = 0x0020,
+	VIDC_FW   = 0x1000,
+};
+
+enum vidc_msg_out {
+	VIDC_OUT_PRINTK = 0,
+	VIDC_OUT_FTRACE,
+};
+
+enum msm_vidc_debugfs_event {
+	MSM_VIDC_DEBUGFS_EVENT_ETB,
+	MSM_VIDC_DEBUGFS_EVENT_EBD,
+	MSM_VIDC_DEBUGFS_EVENT_FTB,
+	MSM_VIDC_DEBUGFS_EVENT_FBD,
+};
+
+extern int msm_vidc_debug;
+extern int msm_vidc_debug_out;
+extern int msm_vidc_fw_debug;
+extern int msm_vidc_fw_debug_mode;
+extern int msm_vidc_fw_low_power_mode;
+extern int msm_vidc_hw_rsp_timeout;
+extern bool msm_vidc_fw_coverage;
+extern bool msm_vidc_vpe_csc_601_to_709;
+extern bool msm_vidc_dec_dcvs_mode;
+extern bool msm_vidc_enc_dcvs_mode;
+extern bool msm_vidc_sys_idle_indicator;
+extern int msm_vidc_firmware_unload_delay;
+extern bool msm_vidc_thermal_mitigation_disabled;
+extern bool msm_vidc_bitrate_clock_scaling;
+extern bool msm_vidc_debug_timeout;
+
+#define VIDC_MSG_PRIO2STRING(__level) ({ \
+	char *__str; \
+	\
+	switch (__level) { \
+	case VIDC_ERR: \
+		__str = "err"; \
+		break; \
+	case VIDC_WARN: \
+		__str = "warn"; \
+		break; \
+	case VIDC_INFO: \
+		__str = "info"; \
+		break; \
+	case VIDC_DBG: \
+		__str = "dbg"; \
+		break; \
+	case VIDC_PROF: \
+		__str = "prof"; \
+		break; \
+	case VIDC_PKT: \
+		__str = "pkt"; \
+		break; \
+	case VIDC_FW: \
+		__str = "fw"; \
+		break; \
+	default: \
+		__str = "????"; \
+		break; \
+	} \
+	\
+	__str; \
+	})
+
+#define dprintk(__level, __fmt, arg...)	\
+	do { \
+		if (msm_vidc_debug & __level) { \
+			if (msm_vidc_debug_out == VIDC_OUT_PRINTK) { \
+				pr_info(VIDC_DBG_TAG __fmt, \
+						VIDC_MSG_PRIO2STRING(__level), \
+						## arg); \
+			} else if (msm_vidc_debug_out == VIDC_OUT_FTRACE) { \
+				trace_printk(KERN_DEBUG VIDC_DBG_TAG __fmt, \
+						VIDC_MSG_PRIO2STRING(__level), \
+						## arg); \
+			} \
+		} \
+	} while (0)
+
+
+
+struct dentry *msm_vidc_debugfs_init_drv(void);
+struct dentry *msm_vidc_debugfs_init_core(struct msm_vidc_core *core,
+		struct dentry *parent);
+struct dentry *msm_vidc_debugfs_init_inst(struct msm_vidc_inst *inst,
+		struct dentry *parent);
+void msm_vidc_debugfs_deinit_inst(struct msm_vidc_inst *inst);
+void msm_vidc_debugfs_update(struct msm_vidc_inst *inst,
+		enum msm_vidc_debugfs_event e);
+
+static inline void tic(struct msm_vidc_inst *i, enum profiling_points p,
+				 char *b)
+{
+	struct timeval __ddl_tv;
+	if (!i->debug.pdata[p].name[0])
+		memcpy(i->debug.pdata[p].name, b, 64);
+	if ((msm_vidc_debug & VIDC_PROF) &&
+		i->debug.pdata[p].sampling) {
+		do_gettimeofday(&__ddl_tv);
+		i->debug.pdata[p].start =
+			(__ddl_tv.tv_sec * 1000) + (__ddl_tv.tv_usec / 1000);
+			i->debug.pdata[p].sampling = false;
+	}
+}
+
+static inline void toc(struct msm_vidc_inst *i, enum profiling_points p)
+{
+	struct timeval __ddl_tv;
+	if ((msm_vidc_debug & VIDC_PROF) &&
+		!i->debug.pdata[p].sampling) {
+		do_gettimeofday(&__ddl_tv);
+		i->debug.pdata[p].stop = (__ddl_tv.tv_sec * 1000)
+			+ (__ddl_tv.tv_usec / 1000);
+		i->debug.pdata[p].cumulative += i->debug.pdata[p].stop -
+			i->debug.pdata[p].start;
+		i->debug.pdata[p].sampling = true;
+	}
+}
+
+static inline void show_stats(struct msm_vidc_inst *i)
+{
+	int x;
+	for (x = 0; x < MAX_PROFILING_POINTS; x++) {
+		if (i->debug.pdata[x].name[0] &&
+				(msm_vidc_debug & VIDC_PROF)) {
+			if (i->debug.samples) {
+				dprintk(VIDC_PROF, "%s averaged %d ms/sample\n",
+						i->debug.pdata[x].name,
+						i->debug.pdata[x].cumulative /
+						i->debug.samples);
+			}
+
+			dprintk(VIDC_PROF, "%s Samples: %d\n",
+					i->debug.pdata[x].name,
+					i->debug.samples);
+		}
+	}
+}
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_vidc_internal.h linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_vidc_internal.h
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_vidc_internal.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_vidc_internal.h	2019-10-29 09:26:23.961206290 +0100
@@ -0,0 +1,384 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MSM_VIDC_INTERNAL_H_
+#define _MSM_VIDC_INTERNAL_H_
+
+#include <linux/atomic.h>
+#include <linux/list.h>
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/kref.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ctrls.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/msm_vidc.h>
+#include <media/msm_media_info.h>
+
+#include "vidc_hfi_api.h"
+
+#define MSM_VIDC_DRV_NAME "msm_vidc_driver"
+#define MSM_VIDC_VERSION KERNEL_VERSION(0, 0, 1);
+#define MAX_DEBUGFS_NAME 50
+#define DEFAULT_TIMEOUT 3
+#define DEFAULT_HEIGHT 1088
+#define DEFAULT_WIDTH 1920
+#define MIN_SUPPORTED_WIDTH 32
+#define MIN_SUPPORTED_HEIGHT 32
+#define MAX_SUPPORTED_WIDTH 4096
+#define MAX_SUPPORTED_HEIGHT 2160
+#define DEFAULT_FPS 15
+
+/* Maintains the number of FTB's between each FBD over a window */
+#define DCVS_FTB_WINDOW 32
+
+#define V4L2_EVENT_VIDC_BASE  10
+
+#define SYS_MSG_START HAL_SYS_INIT_DONE
+#define SYS_MSG_END HAL_SYS_ERROR
+#define SESSION_MSG_START HAL_SESSION_EVENT_CHANGE
+#define SESSION_MSG_END HAL_SESSION_ERROR
+#define SYS_MSG_INDEX(__msg) (__msg - SYS_MSG_START)
+#define SESSION_MSG_INDEX(__msg) (__msg - SESSION_MSG_START)
+
+
+#define MAX_NAME_LENGTH 64
+
+#define EXTRADATA_IDX(__num_planes) ((__num_planes) ? (__num_planes) - 1 : 0)
+
+#define NUM_MBS_PER_SEC(__height, __width, __fps) \
+	(NUM_MBS_PER_FRAME(__height, __width) * __fps)
+
+#define NUM_MBS_PER_FRAME(__height, __width) \
+	((ALIGN(__height, 16) / 16) * (ALIGN(__width, 16) / 16))
+
+enum vidc_ports {
+	OUTPUT_PORT,
+	CAPTURE_PORT,
+	MAX_PORT_NUM
+};
+
+enum vidc_core_state {
+	VIDC_CORE_UNINIT = 0,
+	VIDC_CORE_INIT,
+	VIDC_CORE_INIT_DONE,
+	VIDC_CORE_INVALID
+};
+
+/* Do not change the enum values unless
+ * you know what you are doing*/
+enum instance_state {
+	MSM_VIDC_CORE_UNINIT_DONE = 0x0001,
+	MSM_VIDC_CORE_INIT,
+	MSM_VIDC_CORE_INIT_DONE,
+	MSM_VIDC_OPEN,
+	MSM_VIDC_OPEN_DONE,
+	MSM_VIDC_LOAD_RESOURCES,
+	MSM_VIDC_LOAD_RESOURCES_DONE,
+	MSM_VIDC_START,
+	MSM_VIDC_START_DONE,
+	MSM_VIDC_STOP,
+	MSM_VIDC_STOP_DONE,
+	MSM_VIDC_RELEASE_RESOURCES,
+	MSM_VIDC_RELEASE_RESOURCES_DONE,
+	MSM_VIDC_CLOSE,
+	MSM_VIDC_CLOSE_DONE,
+	MSM_VIDC_CORE_UNINIT,
+	MSM_VIDC_CORE_INVALID
+};
+
+struct buf_info {
+	struct list_head list;
+	struct vb2_buffer *buf;
+};
+
+struct msm_vidc_list {
+	struct list_head list;
+	struct mutex lock;
+};
+
+static inline void INIT_MSM_VIDC_LIST(struct msm_vidc_list *mlist)
+{
+	mutex_init(&mlist->lock);
+	INIT_LIST_HEAD(&mlist->list);
+}
+
+static inline void DEINIT_MSM_VIDC_LIST(struct msm_vidc_list *mlist)
+{
+	mutex_destroy(&mlist->lock);
+}
+
+enum buffer_owner {
+	DRIVER,
+	FIRMWARE,
+	CLIENT,
+	MAX_OWNER
+};
+
+struct internal_buf {
+	struct list_head list;
+	enum hal_buffer buffer_type;
+	struct msm_smem *handle;
+	enum buffer_owner buffer_ownership;
+};
+
+struct msm_vidc_format {
+	char name[MAX_NAME_LENGTH];
+	u8 description[32];
+	u32 fourcc;
+	int type;
+	u32 (*get_frame_size)(int plane, u32 height, u32 width);
+};
+
+struct msm_vidc_drv {
+	struct mutex lock;
+	struct list_head cores;
+	int num_cores;
+	struct dentry *debugfs_root;
+	int thermal_level;
+	u32 platform_version;
+};
+
+struct msm_video_device {
+	int type;
+	struct video_device vdev;
+};
+
+struct session_prop {
+	u32 width[MAX_PORT_NUM];
+	u32 height[MAX_PORT_NUM];
+	u32 num_planes[MAX_PORT_NUM];
+	u32 extradata[MAX_PORT_NUM];
+	u32 fps;
+	u32 bitrate;
+};
+
+struct buf_queue {
+	struct vb2_queue vb2_bufq;
+	struct mutex lock;
+};
+
+enum profiling_points {
+	SYS_INIT = 0,
+	SESSION_INIT,
+	LOAD_RESOURCES,
+	FRAME_PROCESSING,
+	FW_IDLE,
+	MAX_PROFILING_POINTS,
+};
+
+struct buf_count {
+	int etb;
+	int ftb;
+	int fbd;
+	int ebd;
+};
+
+struct dcvs_stats {
+	int num_ftb[DCVS_FTB_WINDOW];
+	bool transition_turbo;
+	int ftb_index;
+	int ftb_counter;
+	bool prev_freq_lowered;
+	bool prev_freq_increased;
+	int threshold_disp_buf_high;
+	int threshold_disp_buf_low;
+	int load;
+	int load_low;
+	int load_high;
+	int min_threshold;
+	int max_threshold;
+	int etb_counter;
+	bool is_power_save_mode;
+	unsigned int extra_buffer_count;
+	u32 supported_codecs;
+};
+
+struct profile_data {
+	int start;
+	int stop;
+	int cumulative;
+	char name[64];
+	int sampling;
+	int average;
+};
+
+struct msm_vidc_debug {
+	struct profile_data pdata[MAX_PROFILING_POINTS];
+	int profile;
+	int samples;
+};
+
+enum msm_vidc_modes {
+	VIDC_SECURE = BIT(0),
+	VIDC_TURBO = BIT(1),
+	VIDC_THUMBNAIL = BIT(2),
+	VIDC_LOW_POWER = BIT(3),
+	VIDC_REALTIME = BIT(4),
+};
+
+struct msm_vidc_core {
+	struct list_head list;
+	struct mutex lock;
+	int id;
+	struct hfi_device *device;
+	struct msm_video_device vdev[MSM_VIDC_MAX_DEVICES];
+	struct v4l2_device v4l2_dev;
+	struct list_head instances;
+	struct dentry *debugfs_root;
+	enum vidc_core_state state;
+	struct completion completions[SYS_MSG_END - SYS_MSG_START + 1];
+	enum msm_vidc_hfi_type hfi_type;
+	struct msm_vidc_platform_resources resources;
+	u32 enc_codec_supported;
+	u32 dec_codec_supported;
+	u32 codec_count;
+	struct msm_vidc_capability *capabilities;
+	struct delayed_work fw_unload_work;
+	bool smmu_fault_handled;
+};
+
+struct msm_vidc_inst {
+	struct list_head list;
+	struct mutex sync_lock, lock;
+	struct msm_vidc_core *core;
+	enum session_type session_type;
+	void *session;
+	struct session_prop prop;
+	enum instance_state state;
+	struct msm_vidc_format fmts[MAX_PORT_NUM];
+	struct buf_queue bufq[MAX_PORT_NUM];
+	struct msm_vidc_list pendingq;
+	struct msm_vidc_list scratchbufs;
+	struct msm_vidc_list persistbufs;
+	struct msm_vidc_list pending_getpropq;
+	struct msm_vidc_list outputbufs;
+	struct msm_vidc_list registeredbufs;
+	struct buffer_requirements buff_req;
+	void *mem_client;
+	struct v4l2_ctrl_handler ctrl_handler;
+	struct completion completions[SESSION_MSG_END - SESSION_MSG_START + 1];
+	struct v4l2_ctrl **cluster;
+	struct v4l2_fh event_handler;
+	bool in_reconfig;
+	u32 reconfig_width;
+	u32 reconfig_height;
+	struct dentry *debugfs_root;
+	void *priv;
+	struct msm_vidc_debug debug;
+	struct buf_count count;
+	struct dcvs_stats dcvs;
+	enum msm_vidc_modes flags;
+	struct msm_vidc_capability capability;
+	u32 buffer_size_limit;
+	enum buffer_mode_type buffer_mode_set[MAX_PORT_NUM];
+	atomic_t seq_hdr_reqs;
+	struct v4l2_ctrl **ctrls;
+	bool dcvs_mode;
+	enum msm_vidc_pixel_depth bit_depth;
+	struct kref kref;
+	unsigned long instant_bitrate;
+	u32 buffers_held_in_driver;
+	atomic_t in_flush;
+	u32 pic_struct;
+	u32 colour_space;
+	u32 operating_rate;
+};
+
+extern struct msm_vidc_drv *vidc_driver;
+
+struct msm_vidc_ctrl_cluster {
+	struct v4l2_ctrl **cluster;
+	struct list_head list;
+};
+
+struct msm_vidc_ctrl {
+	u32 id;
+	char name[MAX_NAME_LENGTH];
+	enum v4l2_ctrl_type type;
+	s32 minimum;
+	s32 maximum;
+	s32 default_value;
+	u32 step;
+	u32 menu_skip_mask;
+	u32 flags;
+	const char * const *qmenu;
+};
+
+void handle_cmd_response(enum hal_command_response cmd, void *data);
+int msm_vidc_trigger_ssr(struct msm_vidc_core *core,
+	enum hal_ssr_trigger_type type);
+int msm_vidc_check_session_supported(struct msm_vidc_inst *inst);
+int msm_vidc_check_scaling_supported(struct msm_vidc_inst *inst);
+void msm_vidc_queue_v4l2_event(struct msm_vidc_inst *inst, int event_type);
+
+struct buffer_info {
+	struct list_head list;
+	int type;
+	int num_planes;
+	int fd[VIDEO_MAX_PLANES];
+	int buff_off[VIDEO_MAX_PLANES];
+	int size[VIDEO_MAX_PLANES];
+	unsigned long uvaddr[VIDEO_MAX_PLANES];
+	ion_phys_addr_t device_addr[VIDEO_MAX_PLANES];
+	struct msm_smem *handle[VIDEO_MAX_PLANES];
+	enum v4l2_memory memory;
+	u32 v4l2_index;
+	bool pending_deletion;
+	atomic_t ref_count;
+	bool dequeued;
+	bool inactive;
+	bool mapped[VIDEO_MAX_PLANES];
+	int same_fd_ref[VIDEO_MAX_PLANES];
+	struct timeval timestamp;
+};
+
+struct buffer_info *device_to_uvaddr(struct msm_vidc_list *buf_list,
+				ion_phys_addr_t device_addr);
+int buf_ref_get(struct msm_vidc_inst *inst, struct buffer_info *binfo);
+int buf_ref_put(struct msm_vidc_inst *inst, struct buffer_info *binfo);
+int output_buffer_cache_invalidate(struct msm_vidc_inst *inst,
+		struct buffer_info *binfo, struct v4l2_buffer *b);
+int qbuf_dynamic_buf(struct msm_vidc_inst *inst,
+			struct buffer_info *binfo);
+int unmap_and_deregister_buf(struct msm_vidc_inst *inst,
+			struct buffer_info *binfo);
+
+void msm_comm_handle_thermal_event(void);
+void *msm_smem_new_client(enum smem_type mtype,
+		void *platform_resources, enum session_type stype);
+struct msm_smem *msm_smem_alloc(void *clt, size_t size, u32 align, u32 flags,
+		enum hal_buffer buffer_type, int map_kernel);
+void msm_smem_free(void *clt, struct msm_smem *mem);
+void msm_smem_delete_client(void *clt);
+int msm_smem_cache_operations(void *clt, struct msm_smem *mem,
+		enum smem_cache_ops, int size);
+struct msm_smem *msm_smem_user_to_kernel(void *clt, int fd, u32 offset,
+				enum hal_buffer buffer_type);
+struct context_bank_info *msm_smem_get_context_bank(void *clt,
+		bool is_secure, enum hal_buffer buffer_type);
+void msm_vidc_fw_unload_handler(struct work_struct *work);
+bool msm_smem_compare_buffers(void *clt, int fd, void *priv);
+/* XXX: normally should be in msm_vidc.h, but that's meant for public APIs,
+ * whereas this is private */
+int msm_vidc_destroy(struct msm_vidc_inst *inst);
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_vidc_resources.h linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_vidc_resources.h
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_vidc_resources.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_vidc_resources.h	2019-01-22 16:16:24.467255136 +0100
@@ -0,0 +1,208 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MSM_VIDC_RESOURCES_H__
+#define __MSM_VIDC_RESOURCES_H__
+
+#include <linux/devfreq.h>
+#include <linux/platform_device.h>
+#include <media/msm_vidc.h>
+#include "soc/qcom/cx_ipeak.h"
+#define MAX_BUFFER_TYPES 32
+
+struct platform_version_table {
+	u32 version_mask;
+	u32 version_shift;
+};
+
+struct load_freq_table {
+	u32 load;
+	u32 freq;
+	u32 supported_codecs;
+};
+
+struct dcvs_table {
+	u32 load;
+	u32 load_low;
+	u32 load_high;
+	u32 supported_codecs;
+};
+
+struct dcvs_limit {
+	u32 min_mbpf;
+	u32 fps;
+};
+
+struct imem_ab_table {
+	u32 core_freq;
+	u32 imem_ab;
+};
+
+struct reg_value_pair {
+	u32 reg;
+	u32 value;
+};
+
+struct reg_set {
+	struct reg_value_pair *reg_tbl;
+	int count;
+};
+
+struct addr_range {
+	u32 start;
+	u32 size;
+};
+
+struct addr_set {
+	struct addr_range *addr_tbl;
+	int count;
+};
+
+struct context_bank_info {
+	struct list_head list;
+	const char *name;
+	u32 buffer_type;
+	bool is_secure;
+	struct addr_range addr_range;
+	struct device *dev;
+	struct dma_iommu_mapping *mapping;
+};
+
+struct buffer_usage_table {
+	u32 buffer_type;
+	u32 tz_usage;
+};
+
+struct buffer_usage_set {
+	struct buffer_usage_table *buffer_usage_tbl;
+	u32 count;
+};
+
+struct regulator_info {
+	struct regulator *regulator;
+	bool has_hw_power_collapse;
+	char *name;
+};
+
+struct regulator_set {
+	struct regulator_info *regulator_tbl;
+	u32 count;
+};
+
+struct clock_info {
+	const char *name;
+	struct clk *clk;
+	struct load_freq_table *load_freq_tbl;
+	u32 count;
+	bool has_scaling;
+	bool has_mem_retention;
+	unsigned long rate_on_enable;
+};
+
+struct clock_set {
+	struct clock_info *clock_tbl;
+	u32 count;
+};
+
+struct bus_info {
+	char *name;
+	int master;
+	int slave;
+	unsigned int range[2];
+	const char *governor;
+	struct device *dev;
+	struct devfreq_dev_profile devfreq_prof;
+	struct devfreq *devfreq;
+	struct msm_bus_client_handle *client;
+};
+
+struct bus_set {
+	struct bus_info *bus_tbl;
+	u32 count;
+};
+
+enum imem_type {
+	IMEM_NONE,
+	IMEM_OCMEM,
+	IMEM_VMEM,
+	IMEM_MAX,
+};
+
+struct allowed_clock_rates_table {
+	u32 clock_rate;
+};
+
+struct clock_profile_entry {
+	u32 codec_mask;
+	u32 cycles;
+	u32 low_power_factor;
+};
+
+struct clock_freq_table {
+	struct clock_profile_entry *clk_prof_entries;
+	u32 count;
+};
+
+struct msm_vidc_platform_resources {
+	phys_addr_t firmware_base;
+	phys_addr_t register_base;
+	uint32_t register_size;
+	uint32_t irq;
+	struct platform_version_table *pf_ver_tbl;
+	struct allowed_clock_rates_table *allowed_clks_tbl;
+	u32 allowed_clks_tbl_size;
+	struct clock_freq_table clock_freq_tbl;
+	struct load_freq_table *load_freq_tbl;
+	uint32_t load_freq_tbl_size;
+	struct dcvs_table *dcvs_tbl;
+	uint32_t dcvs_tbl_size;
+	struct dcvs_limit *dcvs_limit;
+	struct imem_ab_table *imem_ab_tbl;
+	u32 imem_ab_tbl_size;
+	struct reg_set reg_set;
+	struct addr_set qdss_addr_set;
+	struct buffer_usage_set buffer_usage_set;
+	uint32_t imem_size;
+	enum imem_type imem_type;
+	uint32_t max_load;
+	uint32_t power_conf;
+	struct platform_device *pdev;
+	struct regulator_set regulator_set;
+	struct clock_set clock_set;
+	struct bus_set bus_set;
+	bool use_non_secure_pil;
+	bool sw_power_collapsible;
+	bool sys_idle_indicator;
+	bool slave_side_cp;
+	struct list_head context_banks;
+	bool thermal_mitigable;
+	const char *fw_name;
+	const char *hfi_version;
+	bool never_unload_fw;
+	bool debug_timeout;
+	uint32_t pm_qos_latency_us;
+	uint32_t max_inst_count;
+	uint32_t max_secure_inst_count;
+	uint32_t clk_freq_threshold;
+	struct cx_ipeak_client *cx_ipeak_context;
+};
+
+static inline bool is_iommu_present(struct msm_vidc_platform_resources *res)
+{
+	return !list_empty(&res->context_banks);
+}
+
+extern uint32_t msm_vidc_pwr_collapse_delay;
+
+#endif
+
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_vidc_res_parse.c linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_vidc_res_parse.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c	2019-01-22 16:16:24.467255136 +0100
@@ -0,0 +1,1538 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/dma-iommu.h>
+#include <linux/iommu.h>
+#include <linux/qcom_iommu.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include "msm_vidc_debug.h"
+#include "msm_vidc_resources.h"
+#include "msm_vidc_res_parse.h"
+#include "venus_boot.h"
+#include "soc/qcom/secure_buffer.h"
+#include "soc/qcom/cx_ipeak.h"
+
+enum clock_properties {
+	CLOCK_PROP_HAS_SCALING = 1 << 0,
+	CLOCK_PROP_HAS_MEM_RETENTION    = 1 << 1,
+};
+static int msm_vidc_populate_legacy_context_bank(
+			struct msm_vidc_platform_resources *res);
+
+static size_t get_u32_array_num_elements(struct device_node *np,
+					char *name)
+{
+	int len;
+	size_t num_elements = 0;
+	if (!of_get_property(np, name, &len)) {
+		dprintk(VIDC_ERR, "Failed to read %s from device tree\n",
+			name);
+		goto fail_read;
+	}
+
+	num_elements = len / sizeof(u32);
+	if (num_elements <= 0) {
+		dprintk(VIDC_ERR, "%s not specified in device tree\n",
+			name);
+		goto fail_read;
+	}
+	return num_elements;
+
+fail_read:
+	return 0;
+}
+
+static inline enum imem_type read_imem_type(struct platform_device *pdev)
+{
+	bool is_compatible(char *compat)
+	{
+		return !!of_find_compatible_node(NULL, NULL, compat);
+	}
+
+	return is_compatible("qcom,msm-ocmem") ? IMEM_OCMEM :
+		is_compatible("qcom,msm-vmem") ? IMEM_VMEM :
+						IMEM_NONE;
+
+}
+
+static inline void msm_vidc_free_allowed_clocks_table(
+		struct msm_vidc_platform_resources *res)
+{
+	res->allowed_clks_tbl = NULL;
+}
+
+static inline void msm_vidc_free_cycles_per_mb_table(
+		struct msm_vidc_platform_resources *res)
+{
+	res->clock_freq_tbl.clk_prof_entries = NULL;
+}
+
+static inline void msm_vidc_free_platform_version_table(
+		struct msm_vidc_platform_resources *res)
+{
+	res->pf_ver_tbl = NULL;
+}
+
+static inline void msm_vidc_free_freq_table(
+		struct msm_vidc_platform_resources *res)
+{
+	res->load_freq_tbl = NULL;
+}
+
+static inline void msm_vidc_free_dcvs_table(
+		struct msm_vidc_platform_resources *res)
+{
+	res->dcvs_tbl = NULL;
+}
+
+static inline void msm_vidc_free_dcvs_limit(
+		struct msm_vidc_platform_resources *res)
+{
+	res->dcvs_limit = NULL;
+}
+
+static inline void msm_vidc_free_imem_ab_table(
+		struct msm_vidc_platform_resources *res)
+{
+	res->imem_ab_tbl = NULL;
+}
+
+static inline void msm_vidc_free_reg_table(
+			struct msm_vidc_platform_resources *res)
+{
+	res->reg_set.reg_tbl = NULL;
+}
+
+static inline void msm_vidc_free_qdss_addr_table(
+			struct msm_vidc_platform_resources *res)
+{
+	res->qdss_addr_set.addr_tbl = NULL;
+}
+
+static inline void msm_vidc_free_bus_vectors(
+			struct msm_vidc_platform_resources *res)
+{
+	kfree(res->bus_set.bus_tbl);
+	res->bus_set.bus_tbl = NULL;
+	res->bus_set.count = 0;
+}
+
+static inline void msm_vidc_free_buffer_usage_table(
+			struct msm_vidc_platform_resources *res)
+{
+	res->buffer_usage_set.buffer_usage_tbl = NULL;
+}
+
+static inline void msm_vidc_free_regulator_table(
+			struct msm_vidc_platform_resources *res)
+{
+	int c = 0;
+	for (c = 0; c < res->regulator_set.count; ++c) {
+		struct regulator_info *rinfo =
+			&res->regulator_set.regulator_tbl[c];
+
+		rinfo->name = NULL;
+	}
+
+	res->regulator_set.regulator_tbl = NULL;
+	res->regulator_set.count = 0;
+}
+
+static inline void msm_vidc_free_clock_table(
+			struct msm_vidc_platform_resources *res)
+{
+	res->clock_set.clock_tbl = NULL;
+	res->clock_set.count = 0;
+}
+
+void msm_vidc_free_platform_resources(
+			struct msm_vidc_platform_resources *res)
+{
+	msm_vidc_free_clock_table(res);
+	msm_vidc_free_regulator_table(res);
+	msm_vidc_free_freq_table(res);
+	msm_vidc_free_platform_version_table(res);
+	msm_vidc_free_dcvs_table(res);
+	msm_vidc_free_dcvs_limit(res);
+	msm_vidc_free_cycles_per_mb_table(res);
+	msm_vidc_free_allowed_clocks_table(res);
+	msm_vidc_free_reg_table(res);
+	msm_vidc_free_qdss_addr_table(res);
+	msm_vidc_free_bus_vectors(res);
+	msm_vidc_free_buffer_usage_table(res);
+	cx_ipeak_unregister(res->cx_ipeak_context);
+	res->cx_ipeak_context = NULL;
+}
+
+static int msm_vidc_load_reg_table(struct msm_vidc_platform_resources *res)
+{
+	struct reg_set *reg_set;
+	struct platform_device *pdev = res->pdev;
+	int i;
+	int rc = 0;
+
+	if (!of_find_property(pdev->dev.of_node, "qcom,reg-presets", NULL)) {
+		/* qcom,reg-presets is an optional property.  It likely won't be
+		 * present if we don't have any register settings to program */
+		dprintk(VIDC_DBG, "qcom,reg-presets not found\n");
+		return 0;
+	}
+
+	reg_set = &res->reg_set;
+	reg_set->count = get_u32_array_num_elements(pdev->dev.of_node,
+			"qcom,reg-presets");
+	reg_set->count /=  sizeof(*reg_set->reg_tbl) / sizeof(u32);
+
+	if (!reg_set->count) {
+		dprintk(VIDC_DBG, "no elements in reg set\n");
+		return rc;
+	}
+
+	reg_set->reg_tbl = devm_kzalloc(&pdev->dev, reg_set->count *
+			sizeof(*(reg_set->reg_tbl)), GFP_KERNEL);
+	if (!reg_set->reg_tbl) {
+		dprintk(VIDC_ERR, "%s Failed to alloc register table\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	if (of_property_read_u32_array(pdev->dev.of_node, "qcom,reg-presets",
+		(u32 *)reg_set->reg_tbl, reg_set->count * 2)) {
+		dprintk(VIDC_ERR, "Failed to read register table\n");
+		msm_vidc_free_reg_table(res);
+		return -EINVAL;
+	}
+	for (i = 0; i < reg_set->count; i++) {
+		dprintk(VIDC_DBG,
+			"reg = %x, value = %x\n",
+			reg_set->reg_tbl[i].reg,
+			reg_set->reg_tbl[i].value
+		);
+	}
+	return rc;
+}
+static int msm_vidc_load_qdss_table(struct msm_vidc_platform_resources *res)
+{
+	struct addr_set *qdss_addr_set;
+	struct platform_device *pdev = res->pdev;
+	int i;
+	int rc = 0;
+
+	if (!of_find_property(pdev->dev.of_node, "qcom,qdss-presets", NULL)) {
+		/* qcom,qdss-presets is an optional property. It likely won't be
+		 * present if we don't have any register settings to program */
+		dprintk(VIDC_DBG, "qcom,qdss-presets not found\n");
+		return rc;
+	}
+
+	qdss_addr_set = &res->qdss_addr_set;
+	qdss_addr_set->count = get_u32_array_num_elements(pdev->dev.of_node,
+					"qcom,qdss-presets");
+	qdss_addr_set->count /= sizeof(*qdss_addr_set->addr_tbl) / sizeof(u32);
+
+	if (!qdss_addr_set->count) {
+		dprintk(VIDC_DBG, "no elements in qdss reg set\n");
+		return rc;
+	}
+
+	qdss_addr_set->addr_tbl = devm_kzalloc(&pdev->dev,
+			qdss_addr_set->count * sizeof(*qdss_addr_set->addr_tbl),
+			GFP_KERNEL);
+	if (!qdss_addr_set->addr_tbl) {
+		dprintk(VIDC_ERR, "%s Failed to alloc register table\n",
+			__func__);
+		rc = -ENOMEM;
+		goto err_qdss_addr_tbl;
+	}
+
+	rc = of_property_read_u32_array(pdev->dev.of_node, "qcom,qdss-presets",
+		(u32 *)qdss_addr_set->addr_tbl, qdss_addr_set->count * 2);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to read qdss address table\n");
+		msm_vidc_free_qdss_addr_table(res);
+		rc = -EINVAL;
+		goto err_qdss_addr_tbl;
+	}
+
+	for (i = 0; i < qdss_addr_set->count; i++) {
+		dprintk(VIDC_DBG, "qdss addr = %x, value = %x\n",
+				qdss_addr_set->addr_tbl[i].start,
+				qdss_addr_set->addr_tbl[i].size);
+	}
+err_qdss_addr_tbl:
+	return rc;
+}
+
+static int msm_vidc_load_imem_ab_table(struct msm_vidc_platform_resources *res)
+{
+	int num_elements = 0;
+	struct platform_device *pdev = res->pdev;
+
+	if (!of_find_property(pdev->dev.of_node, "qcom,imem-ab-tbl", NULL)) {
+		/* optional property */
+		dprintk(VIDC_DBG, "qcom,imem-freq-tbl not found\n");
+		return 0;
+	}
+
+	num_elements = get_u32_array_num_elements(pdev->dev.of_node,
+			"qcom,imem-ab-tbl");
+	num_elements /= (sizeof(*res->imem_ab_tbl) / sizeof(u32));
+	if (!num_elements) {
+		dprintk(VIDC_ERR, "no elements in imem ab table\n");
+		return -EINVAL;
+	}
+
+	res->imem_ab_tbl = devm_kzalloc(&pdev->dev, num_elements *
+			sizeof(*res->imem_ab_tbl), GFP_KERNEL);
+	if (!res->imem_ab_tbl) {
+		dprintk(VIDC_ERR, "Failed to alloc imem_ab_tbl\n");
+		return -ENOMEM;
+	}
+
+	if (of_property_read_u32_array(pdev->dev.of_node,
+		"qcom,imem-ab-tbl", (u32 *)res->imem_ab_tbl,
+		num_elements * sizeof(*res->imem_ab_tbl) / sizeof(u32))) {
+		dprintk(VIDC_ERR, "Failed to read imem_ab_tbl\n");
+		msm_vidc_free_imem_ab_table(res);
+		return -EINVAL;
+	}
+
+	res->imem_ab_tbl_size = num_elements;
+
+	return 0;
+}
+
+/**
+ * msm_vidc_load_u32_table() - load dtsi table entries
+ * @pdev: A pointer to the platform device.
+ * @of_node:      A pointer to the device node.
+ * @table_name:   A pointer to the dtsi table entry name.
+ * @struct_size:  The size of the structure which is nothing but
+ *                a single entry in the dtsi table.
+ * @table:        A pointer to the table pointer which needs to be
+ *                filled by the dtsi table entries.
+ * @num_elements: Number of elements pointer which needs to be filled
+ *                with the number of elements in the table.
+ *
+ * This is a generic implementation to load single or multiple array
+ * table from dtsi. The array elements should be of size equal to u32.
+ *
+ * Return:        Return '0' for success else appropriate error value.
+ */
+int msm_vidc_load_u32_table(struct platform_device *pdev,
+		struct device_node *of_node, char *table_name, int struct_size,
+		u32 **table, u32 *num_elements)
+{
+	int rc = 0, num_elemts = 0;
+	u32 *ptbl = NULL;
+
+	if (!of_find_property(of_node, table_name, NULL)) {
+		dprintk(VIDC_DBG, "%s not found\n", table_name);
+		return 0;
+	}
+
+	num_elemts = get_u32_array_num_elements(of_node, table_name);
+	if (!num_elemts) {
+		dprintk(VIDC_ERR, "no elements in %s\n", table_name);
+		return 0;
+	}
+	num_elemts /= struct_size / sizeof(u32);
+
+	ptbl = devm_kzalloc(&pdev->dev, num_elemts * struct_size, GFP_KERNEL);
+	if (!ptbl) {
+		dprintk(VIDC_ERR, "Failed to alloc table %s\n", table_name);
+		return -ENOMEM;
+	}
+
+	if (of_property_read_u32_array(of_node, table_name, ptbl,
+			num_elemts * struct_size / sizeof(u32))) {
+		dprintk(VIDC_ERR, "Failed to read %s\n", table_name);
+		return -EINVAL;
+	}
+
+	*table = ptbl;
+	if (num_elements)
+		*num_elements = num_elemts;
+
+	return rc;
+}
+EXPORT_SYMBOL(msm_vidc_load_u32_table);
+
+static int msm_vidc_load_platform_version_table(
+		struct msm_vidc_platform_resources *res)
+{
+	int rc = 0;
+	struct platform_device *pdev = res->pdev;
+
+	if (!of_find_property(pdev->dev.of_node,
+			"qcom,platform-version", NULL)) {
+		dprintk(VIDC_DBG, "qcom,platform-version not found\n");
+		return 0;
+	}
+
+	rc = msm_vidc_load_u32_table(pdev, pdev->dev.of_node,
+			"qcom,platform-version",
+			sizeof(*res->pf_ver_tbl),
+			(u32 **)&res->pf_ver_tbl,
+			NULL);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"%s: failed to read platform version table\n",
+			__func__);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int msm_vidc_load_allowed_clocks_table(
+		struct msm_vidc_platform_resources *res)
+{
+	int rc = 0;
+	struct platform_device *pdev = res->pdev;
+
+	if (!of_find_property(pdev->dev.of_node,
+			"qcom,allowed-clock-rates", NULL)) {
+		dprintk(VIDC_DBG, "qcom,allowed-clock-rates not found\n");
+		return 0;
+	}
+
+	rc = msm_vidc_load_u32_table(pdev, pdev->dev.of_node,
+				"qcom,allowed-clock-rates",
+				sizeof(*res->allowed_clks_tbl),
+				(u32 **)&res->allowed_clks_tbl,
+				&res->allowed_clks_tbl_size);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"%s: failed to read allowed clocks table\n", __func__);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int msm_vidc_load_cycles_per_mb_table(
+		struct msm_vidc_platform_resources *res)
+{
+	int rc = 0, i = 0;
+	struct clock_freq_table *clock_freq_tbl = &res->clock_freq_tbl;
+	struct clock_profile_entry *entry = NULL;
+	struct device_node *parent_node = NULL;
+	struct device_node *child_node = NULL;
+	struct platform_device *pdev = res->pdev;
+
+	parent_node = of_find_node_by_name(pdev->dev.of_node,
+			"qcom,clock-freq-tbl");
+	if (!parent_node) {
+		dprintk(VIDC_DBG, "Node qcom,clock-freq-tbl not found.\n");
+		return 0;
+	}
+
+	clock_freq_tbl->count = 0;
+	for_each_child_of_node(parent_node, child_node)
+		clock_freq_tbl->count++;
+
+	if (!clock_freq_tbl->count) {
+		dprintk(VIDC_DBG, "No child nodes in qcom,clock-freq-tbl\n");
+		return 0;
+	}
+
+	clock_freq_tbl->clk_prof_entries = devm_kzalloc(&pdev->dev,
+		sizeof(*clock_freq_tbl->clk_prof_entries) *
+		clock_freq_tbl->count, GFP_KERNEL);
+	if (!clock_freq_tbl->clk_prof_entries) {
+		dprintk(VIDC_DBG, "no memory to allocate clk_prof_entries\n");
+		return -ENOMEM;
+	}
+
+	for_each_child_of_node(parent_node, child_node) {
+
+		if (i >= clock_freq_tbl->count) {
+			dprintk(VIDC_ERR,
+				"qcom,clock-freq-tbl: invalid child node %d, max is %d\n",
+				i, clock_freq_tbl->count);
+			break;
+		}
+
+		entry = &clock_freq_tbl->clk_prof_entries[i];
+		dprintk(VIDC_DBG, "qcom,clock-freq-tbl: profile[%d]\n", i);
+
+		if (of_find_property(child_node, "qcom,codec-mask", NULL)) {
+			rc = of_property_read_u32(child_node,
+					"qcom,codec-mask", &entry->codec_mask);
+			if (rc) {
+				dprintk(VIDC_ERR,
+					"qcom,codec-mask not found\n");
+				goto error;
+			}
+		} else {
+			entry->codec_mask = 0;
+		}
+		dprintk(VIDC_DBG, "codec_mask %#x\n", entry->codec_mask);
+
+		if (of_find_property(child_node, "qcom,cycles-per-mb", NULL)) {
+			rc = of_property_read_u32(child_node,
+					"qcom,cycles-per-mb", &entry->cycles);
+			if (rc) {
+				dprintk(VIDC_ERR,
+					"qcom,cycles-per-mb not found\n");
+				goto error;
+			}
+		} else {
+			entry->cycles = 0;
+		}
+		dprintk(VIDC_DBG, "cycles_per_mb %d\n", entry->cycles);
+
+		if (of_find_property(child_node,
+				"qcom,low-power-mode-factor", NULL)) {
+			rc = of_property_read_u32(child_node,
+					"qcom,low-power-mode-factor",
+					&entry->low_power_factor);
+			if (rc) {
+				dprintk(VIDC_ERR,
+					"qcom,low-power-mode-factor not found\n");
+				goto error;
+			}
+		} else {
+			entry->low_power_factor = 0;
+		}
+		dprintk(VIDC_DBG, "low_power_factor %d\n",
+				entry->low_power_factor);
+
+		i++;
+	}
+
+error:
+	return rc;
+}
+
+static int msm_vidc_load_freq_table(struct msm_vidc_platform_resources *res)
+{
+	int rc = 0;
+	int num_elements = 0;
+	struct platform_device *pdev = res->pdev;
+
+	/* A comparator to compare loads (needed later on) */
+	int cmp(const void *a, const void *b)
+	{
+		/* want to sort in reverse so flip the comparison */
+		return ((struct load_freq_table *)b)->load -
+			((struct load_freq_table *)a)->load;
+	}
+
+	if (!of_find_property(pdev->dev.of_node, "qcom,load-freq-tbl", NULL)) {
+		/* qcom,load-freq-tbl is an optional property.  It likely won't
+		 * be present on cores that we can't clock scale on. */
+		dprintk(VIDC_DBG, "qcom,load-freq-tbl not found\n");
+		return 0;
+	}
+
+	num_elements = get_u32_array_num_elements(pdev->dev.of_node,
+			"qcom,load-freq-tbl");
+	num_elements /= sizeof(*res->load_freq_tbl) / sizeof(u32);
+	if (!num_elements) {
+		dprintk(VIDC_ERR, "no elements in frequency table\n");
+		return rc;
+	}
+
+	res->load_freq_tbl = devm_kzalloc(&pdev->dev, num_elements *
+			sizeof(*res->load_freq_tbl), GFP_KERNEL);
+	if (!res->load_freq_tbl) {
+		dprintk(VIDC_ERR,
+				"%s Failed to alloc load_freq_tbl\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	if (of_property_read_u32_array(pdev->dev.of_node,
+		"qcom,load-freq-tbl", (u32 *)res->load_freq_tbl,
+		num_elements * sizeof(*res->load_freq_tbl) / sizeof(u32))) {
+		dprintk(VIDC_ERR, "Failed to read frequency table\n");
+		msm_vidc_free_freq_table(res);
+		return -EINVAL;
+	}
+
+	res->load_freq_tbl_size = num_elements;
+
+	/* The entries in the DT might not be sorted (for aesthetic purposes).
+	 * Given that we expect the loads in descending order for our scaling
+	 * logic to work, just sort it ourselves
+	 */
+	sort(res->load_freq_tbl, res->load_freq_tbl_size,
+			sizeof(*res->load_freq_tbl), cmp, NULL);
+	return rc;
+}
+
+static int msm_vidc_load_dcvs_table(struct msm_vidc_platform_resources *res)
+{
+	int rc = 0;
+	int num_elements = 0;
+	struct platform_device *pdev = res->pdev;
+
+	if (!of_find_property(pdev->dev.of_node, "qcom,dcvs-tbl", NULL)) {
+		/*
+		 * qcom,dcvs-tbl is an optional property. Incase qcom,dcvs-limit
+		 * property is present, it becomes mandatory. It likely won't
+		 * be present on targets that does not support the feature
+		 */
+		dprintk(VIDC_DBG, "qcom,dcvs-tbl not found\n");
+		return 0;
+	}
+
+	num_elements = get_u32_array_num_elements(pdev->dev.of_node,
+			"qcom,dcvs-tbl");
+	num_elements /= sizeof(*res->dcvs_tbl) / sizeof(u32);
+	if (!num_elements) {
+		dprintk(VIDC_ERR, "no elements in dcvs table\n");
+		return rc;
+	}
+
+	res->dcvs_tbl = devm_kzalloc(&pdev->dev, num_elements *
+			sizeof(*res->dcvs_tbl), GFP_KERNEL);
+	if (!res->dcvs_tbl) {
+		dprintk(VIDC_ERR,
+				"%s Failed to alloc dcvs_tbl\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	if (of_property_read_u32_array(pdev->dev.of_node,
+		"qcom,dcvs-tbl", (u32 *)res->dcvs_tbl,
+		num_elements * sizeof(*res->dcvs_tbl) / sizeof(u32))) {
+		dprintk(VIDC_ERR, "Failed to read dcvs table\n");
+		msm_vidc_free_dcvs_table(res);
+		return -EINVAL;
+	}
+	res->dcvs_tbl_size = num_elements;
+
+	return rc;
+}
+
+static int msm_vidc_load_dcvs_limit(struct msm_vidc_platform_resources *res)
+{
+	int rc = 0;
+	int num_elements = 0;
+	struct platform_device *pdev = res->pdev;
+
+	if (!of_find_property(pdev->dev.of_node, "qcom,dcvs-limit", NULL)) {
+		/*
+		 * qcom,dcvs-limit is an optional property. Incase qcom,dcvs-tbl
+		 * property is present, it becomes mandatory. It likely won't
+		 * be present on targets that does not support the feature
+		 */
+		dprintk(VIDC_DBG, "qcom,dcvs-limit not found\n");
+		return 0;
+	}
+
+	num_elements = get_u32_array_num_elements(pdev->dev.of_node,
+			"qcom,dcvs-limit");
+	num_elements /= sizeof(*res->dcvs_limit) / sizeof(u32);
+	if (!num_elements) {
+		dprintk(VIDC_ERR, "no elements in dcvs limit\n");
+		res->dcvs_limit = NULL;
+		return rc;
+	}
+
+	res->dcvs_limit = devm_kzalloc(&pdev->dev, num_elements *
+			sizeof(*res->dcvs_limit), GFP_KERNEL);
+	if (!res->dcvs_limit) {
+		dprintk(VIDC_ERR,
+				"%s Failed to alloc dcvs_limit\n",
+				__func__);
+		return -ENOMEM;
+	}
+	if (of_property_read_u32_array(pdev->dev.of_node,
+		"qcom,dcvs-limit", (u32 *)res->dcvs_limit,
+		num_elements * sizeof(*res->dcvs_limit) / sizeof(u32))) {
+		dprintk(VIDC_ERR, "Failed to read dcvs limit\n");
+		msm_vidc_free_dcvs_limit(res);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+
+static int msm_vidc_populate_bus(struct device *dev,
+		struct msm_vidc_platform_resources *res)
+{
+	struct bus_set *buses = &res->bus_set;
+	const char *temp_name = NULL;
+	struct bus_info *bus = NULL, *temp_table;
+	u32 range[2];
+	int rc = 0;
+
+	temp_table = krealloc(buses->bus_tbl, sizeof(*temp_table) *
+			(buses->count + 1), GFP_KERNEL);
+	if (!temp_table) {
+		dprintk(VIDC_ERR, "%s: Failed to allocate memory", __func__);
+		rc = -ENOMEM;
+		goto err_bus;
+	}
+
+	buses->bus_tbl = temp_table;
+	bus = &buses->bus_tbl[buses->count];
+
+	rc = of_property_read_string(dev->of_node, "label", &temp_name);
+	if (rc) {
+		dprintk(VIDC_ERR, "'label' not found in node\n");
+		goto err_bus;
+	}
+	/* need a non-const version of name, hence copying it over */
+	bus->name = devm_kstrdup(dev, temp_name, GFP_KERNEL);
+	if (!bus->name) {
+		rc = -ENOMEM;
+		goto err_bus;
+	}
+
+	rc = of_property_read_u32(dev->of_node, "qcom,bus-master",
+			&bus->master);
+	if (rc) {
+		dprintk(VIDC_ERR, "'qcom,bus-master' not found in node\n");
+		goto err_bus;
+	}
+
+	rc = of_property_read_u32(dev->of_node, "qcom,bus-slave", &bus->slave);
+	if (rc) {
+		dprintk(VIDC_ERR, "'qcom,bus-slave' not found in node\n");
+		goto err_bus;
+	}
+
+	rc = of_property_read_string(dev->of_node, "qcom,bus-governor",
+			&bus->governor);
+	if (rc) {
+		rc = 0;
+		dprintk(VIDC_DBG,
+				"'qcom,bus-governor' not found, default to performance governor\n");
+		bus->governor = "performance";
+	}
+
+	rc = of_property_read_u32_array(dev->of_node, "qcom,bus-range-kbps",
+			range, ARRAY_SIZE(range));
+	if (rc) {
+		rc = 0;
+		dprintk(VIDC_DBG,
+				"'qcom,range' not found defaulting to <0 INT_MAX>\n");
+		range[0] = 0;
+		range[1] = INT_MAX;
+	}
+
+	bus->range[0] = range[0]; /* min */
+	bus->range[1] = range[1]; /* max */
+
+	buses->count++;
+	bus->dev = dev;
+	dprintk(VIDC_DBG, "Found bus %s [%d->%d] with governor %s\n",
+			bus->name, bus->master, bus->slave, bus->governor);
+err_bus:
+	return rc;
+}
+
+static int msm_vidc_load_buffer_usage_table(
+		struct msm_vidc_platform_resources *res)
+{
+	int rc = 0;
+	struct platform_device *pdev = res->pdev;
+	struct buffer_usage_set *buffer_usage_set = &res->buffer_usage_set;
+
+	if (!of_find_property(pdev->dev.of_node,
+				"qcom,buffer-type-tz-usage-table", NULL)) {
+		/* qcom,buffer-type-tz-usage-table is an optional property.  It
+		 * likely won't be present if the core doesn't support content
+		 * protection */
+		dprintk(VIDC_DBG, "buffer-type-tz-usage-table not found\n");
+		return 0;
+	}
+
+	buffer_usage_set->count = get_u32_array_num_elements(
+		pdev->dev.of_node, "qcom,buffer-type-tz-usage-table");
+	buffer_usage_set->count /=
+		sizeof(*buffer_usage_set->buffer_usage_tbl) / sizeof(u32);
+	if (!buffer_usage_set->count) {
+		dprintk(VIDC_DBG, "no elements in buffer usage set\n");
+		return 0;
+	}
+
+	buffer_usage_set->buffer_usage_tbl = devm_kzalloc(&pdev->dev,
+			buffer_usage_set->count *
+			sizeof(*buffer_usage_set->buffer_usage_tbl),
+			GFP_KERNEL);
+	if (!buffer_usage_set->buffer_usage_tbl) {
+		dprintk(VIDC_ERR, "%s Failed to alloc buffer usage table\n",
+			__func__);
+		rc = -ENOMEM;
+		goto err_load_buf_usage;
+	}
+
+	rc = of_property_read_u32_array(pdev->dev.of_node,
+		    "qcom,buffer-type-tz-usage-table",
+		(u32 *)buffer_usage_set->buffer_usage_tbl,
+		buffer_usage_set->count *
+		sizeof(*buffer_usage_set->buffer_usage_tbl) / sizeof(u32));
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to read buffer usage table\n");
+		goto err_load_buf_usage;
+	}
+
+	return 0;
+err_load_buf_usage:
+	msm_vidc_free_buffer_usage_table(res);
+	return rc;
+}
+
+static int msm_vidc_load_regulator_table(
+		struct msm_vidc_platform_resources *res)
+{
+	int rc = 0;
+	struct platform_device *pdev = res->pdev;
+	struct regulator_set *regulators = &res->regulator_set;
+	struct device_node *domains_parent_node = NULL;
+	struct property *domains_property = NULL;
+	int reg_count = 0;
+
+	regulators->count = 0;
+	regulators->regulator_tbl = NULL;
+
+	domains_parent_node = pdev->dev.of_node;
+	for_each_property_of_node(domains_parent_node, domains_property) {
+		const char *search_string = "-supply";
+		char *supply;
+		bool matched = false;
+
+		/* check if current property is possibly a regulator */
+		supply = strnstr(domains_property->name, search_string,
+				strlen(domains_property->name) + 1);
+		matched = supply && (*(supply + strlen(search_string)) == '\0');
+		if (!matched)
+			continue;
+
+		reg_count++;
+	}
+
+	regulators->regulator_tbl = devm_kzalloc(&pdev->dev,
+			sizeof(*regulators->regulator_tbl) *
+			reg_count, GFP_KERNEL);
+
+	if (!regulators->regulator_tbl) {
+		rc = -ENOMEM;
+		dprintk(VIDC_ERR,
+			"Failed to alloc memory for regulator table\n");
+		goto err_reg_tbl_alloc;
+	}
+
+	for_each_property_of_node(domains_parent_node, domains_property) {
+		const char *search_string = "-supply";
+		char *supply;
+		bool matched = false;
+		struct device_node *regulator_node = NULL;
+		struct regulator_info *rinfo = NULL;
+
+		/* check if current property is possibly a regulator */
+		supply = strnstr(domains_property->name, search_string,
+				strlen(domains_property->name) + 1);
+		matched = supply && (supply[strlen(search_string)] == '\0');
+		if (!matched)
+			continue;
+
+		/* make sure prop isn't being misused */
+		regulator_node = of_parse_phandle(domains_parent_node,
+				domains_property->name, 0);
+		if (IS_ERR(regulator_node)) {
+			dprintk(VIDC_WARN, "%s is not a phandle\n",
+					domains_property->name);
+			continue;
+		}
+		regulators->count++;
+
+		/* populate regulator info */
+		rinfo = &regulators->regulator_tbl[regulators->count - 1];
+		rinfo->name = devm_kzalloc(&pdev->dev,
+			(supply - domains_property->name) + 1, GFP_KERNEL);
+		if (!rinfo->name) {
+			rc = -ENOMEM;
+			dprintk(VIDC_ERR,
+					"Failed to alloc memory for regulator name\n");
+			goto err_reg_name_alloc;
+		}
+		strlcpy(rinfo->name, domains_property->name,
+			(supply - domains_property->name) + 1);
+
+		rinfo->has_hw_power_collapse = of_property_read_bool(
+			regulator_node, "qcom,support-hw-trigger");
+
+		dprintk(VIDC_DBG, "Found regulator %s: h/w collapse = %s\n",
+				rinfo->name,
+				rinfo->has_hw_power_collapse ? "yes" : "no");
+	}
+
+	if (!regulators->count)
+		dprintk(VIDC_DBG, "No regulators found");
+
+	return 0;
+
+err_reg_name_alloc:
+err_reg_tbl_alloc:
+	msm_vidc_free_regulator_table(res);
+	return rc;
+}
+
+static int msm_vidc_load_clock_table(
+		struct msm_vidc_platform_resources *res)
+{
+	int rc = 0, num_clocks = 0, c = 0;
+	struct platform_device *pdev = res->pdev;
+	int *clock_props = NULL;
+	struct clock_set *clocks = &res->clock_set;
+
+	num_clocks = of_property_count_strings(pdev->dev.of_node,
+				"clock-names");
+	if (num_clocks <= 0) {
+		dprintk(VIDC_DBG, "No clocks found\n");
+		clocks->count = 0;
+		rc = 0;
+		goto err_load_clk_table_fail;
+	}
+
+	clock_props = devm_kzalloc(&pdev->dev, num_clocks *
+			sizeof(*clock_props), GFP_KERNEL);
+	if (!clock_props) {
+		dprintk(VIDC_ERR, "No memory to read clock properties\n");
+		rc = -ENOMEM;
+		goto err_load_clk_table_fail;
+	}
+
+	rc = of_property_read_u32_array(pdev->dev.of_node,
+				"qcom,clock-configs", clock_props,
+				num_clocks);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to read clock properties: %d\n", rc);
+		goto err_load_clk_prop_fail;
+	}
+
+	clocks->clock_tbl = devm_kzalloc(&pdev->dev, sizeof(*clocks->clock_tbl)
+			* num_clocks, GFP_KERNEL);
+	if (!clocks->clock_tbl) {
+		dprintk(VIDC_ERR, "Failed to allocate memory for clock tbl\n");
+		rc = -ENOMEM;
+		goto err_load_clk_prop_fail;
+	}
+
+	clocks->count = num_clocks;
+	dprintk(VIDC_DBG, "Found %d clocks\n", num_clocks);
+
+	for (c = 0; c < num_clocks; ++c) {
+		struct clock_info *vc = &res->clock_set.clock_tbl[c];
+
+		of_property_read_string_index(pdev->dev.of_node,
+				"clock-names", c, &vc->name);
+
+		if (clock_props[c] & CLOCK_PROP_HAS_SCALING) {
+			vc->has_scaling = true;
+			vc->count = res->load_freq_tbl_size;
+			vc->load_freq_tbl = res->load_freq_tbl;
+		} else {
+			vc->count = 0;
+			vc->load_freq_tbl = NULL;
+			vc->has_scaling = false;
+		}
+
+		if (clock_props[c] & CLOCK_PROP_HAS_MEM_RETENTION)
+			vc->has_mem_retention = true;
+		else
+			vc->has_mem_retention = false;
+
+		dprintk(VIDC_DBG, "Found clock %s: scale-able = %s\n", vc->name,
+			vc->count ? "yes" : "no");
+	}
+
+
+	return 0;
+
+err_load_clk_prop_fail:
+err_load_clk_table_fail:
+	return rc;
+}
+
+int read_platform_resources_from_dt(
+		struct msm_vidc_platform_resources *res)
+{
+	struct platform_device *pdev = res->pdev;
+	struct resource *kres = NULL;
+	int rc = 0;
+	uint32_t firmware_base = 0;
+
+	if (!pdev->dev.of_node) {
+		dprintk(VIDC_ERR, "DT node not found\n");
+		return -ENOENT;
+	}
+
+	INIT_LIST_HEAD(&res->context_banks);
+
+	res->firmware_base = (phys_addr_t)firmware_base;
+
+	kres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	res->register_base = kres ? kres->start : -1;
+	res->register_size = kres ? (kres->end + 1 - kres->start) : -1;
+
+	kres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	res->irq = kres ? kres->start : -1;
+
+	of_property_read_u32(pdev->dev.of_node,
+			"qcom,imem-size", &res->imem_size);
+	res->imem_type = read_imem_type(pdev);
+
+	res->sys_idle_indicator = of_property_read_bool(pdev->dev.of_node,
+			"qcom,enable-idle-indicator");
+
+	res->thermal_mitigable =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,enable-thermal-mitigation");
+
+	rc = of_property_read_string(pdev->dev.of_node, "qcom,firmware-name",
+			&res->fw_name);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to read firmware name: %d\n", rc);
+		goto err_load_freq_table;
+	}
+	dprintk(VIDC_DBG, "Firmware filename: %s\n", res->fw_name);
+
+	rc = of_property_read_string(pdev->dev.of_node, "qcom,hfi-version",
+			&res->hfi_version);
+	if (rc)
+		dprintk(VIDC_DBG, "HFI packetization will default to legacy\n");
+
+	rc = msm_vidc_load_platform_version_table(res);
+	if (rc)
+		dprintk(VIDC_ERR, "Failed to load pf version table: %d\n", rc);
+
+	rc = msm_vidc_load_freq_table(res);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to load freq table: %d\n", rc);
+		goto err_load_freq_table;
+	}
+
+	rc = msm_vidc_load_dcvs_table(res);
+	if (rc)
+		dprintk(VIDC_WARN, "Failed to load dcvs table: %d\n", rc);
+
+	rc = msm_vidc_load_dcvs_limit(res);
+	if (rc)
+		dprintk(VIDC_WARN, "Failed to load dcvs limit: %d\n", rc);
+
+	rc = msm_vidc_load_imem_ab_table(res);
+	if (rc)
+		dprintk(VIDC_WARN, "Failed to load freq table: %d\n", rc);
+
+	rc = msm_vidc_load_qdss_table(res);
+	if (rc)
+		dprintk(VIDC_WARN, "Failed to load qdss reg table: %d\n", rc);
+
+	rc = msm_vidc_load_reg_table(res);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to load reg table: %d\n", rc);
+		goto err_load_reg_table;
+	}
+
+	rc = msm_vidc_load_buffer_usage_table(res);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Failed to load buffer usage table: %d\n", rc);
+		goto err_load_buffer_usage_table;
+	}
+
+	rc = msm_vidc_load_regulator_table(res);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to load list of regulators %d\n", rc);
+		goto err_load_regulator_table;
+	}
+
+	rc = msm_vidc_load_clock_table(res);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Failed to load clock table: %d\n", rc);
+		goto err_load_clock_table;
+	}
+
+	rc = msm_vidc_load_cycles_per_mb_table(res);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Failed to load cycles per mb table: %d\n", rc);
+		goto err_load_cycles_per_mb_table;
+	}
+
+	rc = msm_vidc_load_allowed_clocks_table(res);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Failed to load allowed clocks table: %d\n", rc);
+		goto err_load_allowed_clocks_table;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node, "qcom,max-hw-load",
+			&res->max_load);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Failed to determine max load supported: %d\n", rc);
+		goto err_load_max_hw_load;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node, "qcom,power-conf",
+			&res->power_conf);
+	if (rc) {
+		dprintk(VIDC_DBG,
+			"Failed to read power configuration: %d\n", rc);
+	}
+
+	rc = msm_vidc_populate_legacy_context_bank(res);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Failed to setup context banks %d\n", rc);
+		goto err_setup_legacy_cb;
+	}
+
+	res->use_non_secure_pil = of_property_read_bool(pdev->dev.of_node,
+			"qcom,use-non-secure-pil");
+
+	if (res->use_non_secure_pil || !is_iommu_present(res)) {
+		of_property_read_u32(pdev->dev.of_node, "qcom,fw-bias",
+				&firmware_base);
+		res->firmware_base = (phys_addr_t)firmware_base;
+		dprintk(VIDC_DBG,
+				"Using fw-bias : %pa", &res->firmware_base);
+	}
+
+	res->sw_power_collapsible = of_property_read_bool(pdev->dev.of_node,
+					"qcom,sw-power-collapse");
+	dprintk(VIDC_DBG, "Power collapse supported = %s\n",
+		res->sw_power_collapsible ? "yes" : "no");
+
+	res->never_unload_fw = of_property_read_bool(pdev->dev.of_node,
+			"qcom,never-unload-fw");
+
+	res->debug_timeout = of_property_read_bool(pdev->dev.of_node,
+			"qcom,debug-timeout");
+
+	msm_vidc_debug_timeout |= res->debug_timeout;
+
+	of_property_read_u32(pdev->dev.of_node,
+			"qcom,pm-qos-latency-us", &res->pm_qos_latency_us);
+
+	res->slave_side_cp = of_property_read_bool(pdev->dev.of_node,
+					"qcom,slave-side-cp");
+	dprintk(VIDC_DBG, "Slave side cp = %s\n",
+				res->slave_side_cp ? "yes" : "no");
+
+	of_property_read_u32(pdev->dev.of_node,
+			"qcom,max-secure-instances",
+			&res->max_secure_inst_count);
+
+	if (of_find_property(pdev->dev.of_node,
+			"qcom,cx-ipeak-data", NULL)) {
+		res->cx_ipeak_context = cx_ipeak_register(
+			pdev->dev.of_node, "qcom,cx-ipeak-data");
+	}
+
+	if (IS_ERR(res->cx_ipeak_context)) {
+		rc = PTR_ERR(res->cx_ipeak_context);
+		if (rc == -EPROBE_DEFER)
+			dprintk(VIDC_INFO,
+				"cx-ipeak register failed. Deferring probe!");
+		else
+			dprintk(VIDC_ERR,
+				"cx-ipeak register failed. rc: %d", rc);
+
+		res->cx_ipeak_context = NULL;
+		goto err_register_cx_ipeak;
+	} else if (res->cx_ipeak_context) {
+		dprintk(VIDC_INFO, "cx-ipeak register successful");
+	} else {
+		dprintk(VIDC_INFO, "cx-ipeak register not implemented");
+	}
+
+	of_property_read_u32(pdev->dev.of_node,
+			"qcom,clock-freq-threshold",
+			&res->clk_freq_threshold);
+	dprintk(VIDC_DBG, "cx ipeak threshold frequency = %u\n",
+				res->clk_freq_threshold);
+
+	return rc;
+
+err_register_cx_ipeak:
+err_setup_legacy_cb:
+err_load_max_hw_load:
+	msm_vidc_free_allowed_clocks_table(res);
+err_load_allowed_clocks_table:
+	msm_vidc_free_cycles_per_mb_table(res);
+err_load_cycles_per_mb_table:
+	msm_vidc_free_clock_table(res);
+err_load_clock_table:
+	msm_vidc_free_regulator_table(res);
+err_load_regulator_table:
+	msm_vidc_free_buffer_usage_table(res);
+err_load_buffer_usage_table:
+	msm_vidc_free_reg_table(res);
+err_load_reg_table:
+	msm_vidc_free_freq_table(res);
+err_load_freq_table:
+	return rc;
+}
+
+static int get_secure_vmid(struct context_bank_info *cb)
+{
+	if (!strcasecmp(cb->name, "venus_sec_bitstream"))
+		return VMID_CP_BITSTREAM;
+	else if (!strcasecmp(cb->name, "venus_sec_pixel"))
+		return VMID_CP_PIXEL;
+	else if (!strcasecmp(cb->name, "venus_sec_non_pixel"))
+		return VMID_CP_NON_PIXEL;
+	else {
+		WARN(1, "No matching secure vmid for cb name: %s\n",
+			cb->name);
+		return VMID_INVAL;
+	}
+}
+
+static int msm_vidc_setup_context_bank(struct context_bank_info *cb,
+		struct device *dev)
+{
+	int rc = 0;
+	int secure_vmid = VMID_INVAL;
+	struct bus_type *bus;
+
+	if (!dev || !cb) {
+		dprintk(VIDC_ERR,
+			"%s: Invalid Input params\n", __func__);
+		return -EINVAL;
+	}
+	cb->dev = dev;
+
+	bus = msm_iommu_get_bus(cb->dev);
+	if (IS_ERR_OR_NULL(bus)) {
+		dprintk(VIDC_ERR, "%s - failed to get bus type\n", __func__);
+		rc = PTR_ERR(bus) ?: -ENODEV;
+		goto remove_cb;
+	}
+
+	cb->mapping = arm_iommu_create_mapping(bus, cb->addr_range.start,
+					cb->addr_range.size);
+	if (IS_ERR_OR_NULL(cb->mapping)) {
+		dprintk(VIDC_ERR, "%s - failed to create mapping\n", __func__);
+		rc = PTR_ERR(cb->mapping) ?: -ENODEV;
+		goto remove_cb;
+	}
+
+	if (cb->is_secure) {
+		secure_vmid = get_secure_vmid(cb);
+		rc = iommu_domain_set_attr(cb->mapping->domain,
+				DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
+		if (rc) {
+			dprintk(VIDC_ERR,
+					"%s - programming secure vmid failed: %s %d\n",
+					__func__, dev_name(dev), rc);
+			goto release_mapping;
+		}
+	}
+
+	rc = arm_iommu_attach_device(cb->dev, cb->mapping);
+	if (rc) {
+		dprintk(VIDC_ERR, "%s - Couldn't arm_iommu_attach_device\n",
+			__func__);
+		goto release_mapping;
+	}
+
+	dprintk(VIDC_DBG, "Attached %s and created mapping\n", dev_name(dev));
+	dprintk(VIDC_DBG,
+		"Context bank name:%s, buffer_type: %#x, is_secure: %d, address range start: %#x, size: %#x, dev: %pK, mapping: %pK",
+		cb->name, cb->buffer_type, cb->is_secure, cb->addr_range.start,
+		cb->addr_range.size, cb->dev, cb->mapping);
+
+	return rc;
+
+release_mapping:
+	arm_iommu_release_mapping(cb->mapping);
+remove_cb:
+	return rc;
+}
+
+int msm_vidc_smmu_fault_handler(struct iommu_domain *domain,
+		struct device *dev, unsigned long iova, int flags, void *token)
+{
+	struct msm_vidc_core *core = token;
+	struct msm_vidc_inst *inst;
+
+	if (!domain || !core) {
+		dprintk(VIDC_ERR, "%s - invalid param %pK %pK\n",
+			__func__, domain, core);
+		return -EINVAL;
+	}
+
+	if (core->smmu_fault_handled)
+		return -ENOSYS;
+
+	dprintk(VIDC_ERR, "%s - faulting address: %lx\n", __func__, iova);
+
+	mutex_lock(&core->lock);
+	list_for_each_entry(inst, &core->instances, list) {
+		msm_comm_print_inst_info(inst);
+	}
+	core->smmu_fault_handled = true;
+	mutex_unlock(&core->lock);
+	/*
+	 * Return -ENOSYS to elicit the default behaviour of smmu driver.
+	 * If we return -ENOSYS, then smmu driver assumes page fault handler
+	 * is not installed and prints a list of useful debug information like
+	 * FAR, SID etc. This information is not printed if we return 0.
+	 */
+	return -ENOSYS;
+}
+
+static int msm_vidc_populate_context_bank(struct device *dev,
+		struct msm_vidc_core *core)
+{
+	int rc = 0;
+	struct context_bank_info *cb = NULL;
+	struct device_node *np = NULL;
+
+	if (!dev || !core) {
+		dprintk(VIDC_ERR, "%s - invalid inputs\n", __func__);
+		return -EINVAL;
+	}
+
+	np = dev->of_node;
+	cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL);
+	if (!cb) {
+		dprintk(VIDC_ERR, "%s - Failed to allocate cb\n", __func__);
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&cb->list);
+	list_add_tail(&cb->list, &core->resources.context_banks);
+
+	rc = of_property_read_string(np, "label", &cb->name);
+	if (rc) {
+		dprintk(VIDC_DBG,
+			"Failed to read cb label from device tree\n");
+		rc = 0;
+	}
+
+	dprintk(VIDC_DBG, "%s: context bank has name %s\n", __func__, cb->name);
+	rc = of_property_read_u32_array(np, "virtual-addr-pool",
+			(u32 *)&cb->addr_range, 2);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Could not read addr pool for context bank : %s %d\n",
+			cb->name, rc);
+		goto err_setup_cb;
+	}
+
+	cb->is_secure = of_property_read_bool(np, "qcom,secure-context-bank");
+	dprintk(VIDC_DBG, "context bank %s : secure = %d\n",
+			cb->name, cb->is_secure);
+
+	/* setup buffer type for each sub device*/
+	rc = of_property_read_u32(np, "buffer-types", &cb->buffer_type);
+	if (rc) {
+		dprintk(VIDC_ERR, "failed to load buffer_type info %d\n", rc);
+		rc = -ENOENT;
+		goto err_setup_cb;
+	}
+	dprintk(VIDC_DBG,
+		"context bank %s address start = %x address size = %x buffer_type = %x\n",
+		cb->name, cb->addr_range.start,
+		cb->addr_range.size, cb->buffer_type);
+
+	rc = msm_vidc_setup_context_bank(cb, dev);
+	if (rc) {
+		dprintk(VIDC_ERR, "Cannot setup context bank %d\n", rc);
+		goto err_setup_cb;
+	}
+
+	iommu_set_fault_handler(cb->mapping->domain,
+		msm_vidc_smmu_fault_handler, (void *)core);
+
+	return 0;
+
+err_setup_cb:
+	list_del(&cb->list);
+	return rc;
+}
+
+static int msm_vidc_populate_legacy_context_bank(
+			struct msm_vidc_platform_resources *res)
+{
+	int rc = 0;
+	struct platform_device *pdev = NULL;
+	struct device_node *domains_parent_node = NULL;
+	struct device_node *domains_child_node = NULL;
+	struct device_node *ctx_node = NULL;
+	struct context_bank_info *cb;
+
+	if (!res || !res->pdev) {
+		dprintk(VIDC_ERR, "%s - invalid inputs\n", __func__);
+		return -EINVAL;
+	}
+	pdev = res->pdev;
+
+	domains_parent_node = of_find_node_by_name(pdev->dev.of_node,
+			"qcom,vidc-iommu-domains");
+	if (!domains_parent_node) {
+		dprintk(VIDC_DBG,
+			"%s legacy iommu domains not present\n", __func__);
+		return 0;
+	}
+
+	/* set up each context bank for legacy DT bindings*/
+	for_each_child_of_node(domains_parent_node,
+		domains_child_node) {
+		cb = devm_kzalloc(&pdev->dev, sizeof(*cb), GFP_KERNEL);
+		if (!cb) {
+			dprintk(VIDC_ERR,
+				"%s - Failed to allocate cb\n", __func__);
+			return -ENOMEM;
+		}
+		INIT_LIST_HEAD(&cb->list);
+		list_add_tail(&cb->list, &res->context_banks);
+
+		ctx_node = of_parse_phandle(domains_child_node,
+				"qcom,vidc-domain-phandle", 0);
+		if (!ctx_node) {
+			dprintk(VIDC_ERR,
+				"%s Unable to parse pHandle\n", __func__);
+			rc = -EBADHANDLE;
+			goto err_setup_cb;
+		}
+
+		rc = of_property_read_string(ctx_node, "label", &(cb->name));
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"%s Could not find label\n", __func__);
+			goto err_setup_cb;
+		}
+
+		rc = of_property_read_u32_array(ctx_node,
+			"qcom,virtual-addr-pool", (u32 *)&cb->addr_range, 2);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"%s Could not read addr pool for group : %s (%d)\n",
+				__func__, cb->name, rc);
+			goto err_setup_cb;
+		}
+
+		cb->is_secure =
+			of_property_read_bool(ctx_node, "qcom,secure-domain");
+
+		rc = of_property_read_u32(domains_child_node,
+				"qcom,vidc-buffer-types", &cb->buffer_type);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"%s Could not read buffer type (%d)\n",
+				__func__, rc);
+			goto err_setup_cb;
+		}
+
+		cb->dev = msm_iommu_get_ctx(cb->name);
+		if (IS_ERR_OR_NULL(cb->dev)) {
+			dprintk(VIDC_ERR, "%s could not get device for cb %s\n",
+					__func__, cb->name);
+			rc = -ENOENT;
+			goto err_setup_cb;
+		}
+
+		rc = msm_vidc_setup_context_bank(cb, cb->dev);
+		if (rc) {
+			dprintk(VIDC_ERR, "Cannot setup context bank %d\n", rc);
+			goto err_setup_cb;
+		}
+		dprintk(VIDC_DBG,
+			"%s: context bank %s secure %d addr start = %#x addr size = %#x buffer_type = %#x\n",
+			__func__, cb->name, cb->is_secure, cb->addr_range.start,
+			cb->addr_range.size, cb->buffer_type);
+	}
+	return rc;
+
+err_setup_cb:
+	list_del(&cb->list);
+	return rc;
+}
+
+int read_context_bank_resources_from_dt(struct platform_device *pdev)
+{
+	struct msm_vidc_core *core;
+	int rc = 0;
+
+	if (!pdev) {
+		dprintk(VIDC_ERR, "Invalid platform device\n");
+		return -EINVAL;
+	} else if (!pdev->dev.parent) {
+		dprintk(VIDC_ERR, "Failed to find a parent for %s\n",
+				dev_name(&pdev->dev));
+		return -ENODEV;
+	}
+
+	core = dev_get_drvdata(pdev->dev.parent);
+	if (!core) {
+		dprintk(VIDC_ERR, "Failed to find cookie in parent device %s",
+				dev_name(pdev->dev.parent));
+		return -EINVAL;
+	}
+
+	if (of_property_read_bool(pdev->dev.of_node, "qcom,fw-context-bank")) {
+		if (core->resources.use_non_secure_pil) {
+			struct context_bank_info *cb;
+
+			cb = devm_kzalloc(&pdev->dev, sizeof(*cb), GFP_KERNEL);
+			if (!cb) {
+				dprintk(VIDC_ERR, "alloc venus cb failed\n");
+				return -ENOMEM;
+			}
+
+			cb->dev = &pdev->dev;
+			rc = venus_boot_init(&core->resources, cb);
+			if (rc) {
+				dprintk(VIDC_ERR,
+				"Failed to init non-secure PIL %d\n", rc);
+			}
+		}
+	} else {
+		rc = msm_vidc_populate_context_bank(&pdev->dev, core);
+		if (rc)
+			dprintk(VIDC_ERR, "Failed to probe context bank\n");
+		else
+			dprintk(VIDC_DBG, "Successfully probed context bank\n");
+	}
+	return rc;
+}
+
+int read_bus_resources_from_dt(struct platform_device *pdev)
+{
+	struct msm_vidc_core *core;
+
+	if (!pdev) {
+		dprintk(VIDC_ERR, "Invalid platform device\n");
+		return -EINVAL;
+	} else if (!pdev->dev.parent) {
+		dprintk(VIDC_ERR, "Failed to find a parent for %s\n",
+				dev_name(&pdev->dev));
+		return -ENODEV;
+	}
+
+	core = dev_get_drvdata(pdev->dev.parent);
+	if (!core) {
+		dprintk(VIDC_ERR, "Failed to find cookie in parent device %s",
+				dev_name(pdev->dev.parent));
+		return -EINVAL;
+	}
+
+	return msm_vidc_populate_bus(&pdev->dev, &core->resources);
+}
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_vidc_res_parse.h linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_vidc_res_parse.h
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/msm_vidc_res_parse.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/msm_vidc_res_parse.h	2019-01-22 16:16:24.467255136 +0100
@@ -0,0 +1,36 @@
+
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef DT_PARSE
+#define DT_PARSE
+#include <linux/of.h>
+#include "msm_vidc_resources.h"
+#include "msm_vidc_common.h"
+void msm_vidc_free_platform_resources(
+		struct msm_vidc_platform_resources *res);
+
+int read_hfi_type(struct platform_device *pdev);
+
+int read_platform_resources_from_dt(
+		struct msm_vidc_platform_resources *res);
+
+int read_context_bank_resources_from_dt(struct platform_device *pdev);
+
+int read_bus_resources_from_dt(struct platform_device *pdev);
+
+int msm_vidc_load_u32_table(struct platform_device *pdev,
+		struct device_node *of_node, char *table_name, int struct_size,
+		u32 **table, u32 *num_elements);
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/venus_boot.c linux-4.4.115-fbx/drivers/media/platform/msm/vidc/venus_boot.c
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/venus_boot.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/venus_boot.c	2019-01-22 16:16:24.467255136 +0100
@@ -0,0 +1,489 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define VIDC_DBG_LABEL "venus_boot"
+
+#include <asm/dma-iommu.h>
+#include <asm/page.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/qcom_iommu.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/subsystem_restart.h>
+#include "msm_vidc_debug.h"
+#include "vidc_hfi_io.h"
+#include "venus_boot.h"
+
+/* VENUS WRAPPER registers */
+#define VENUS_WRAPPER_VBIF_SS_SEC_CPA_START_ADDR_v1 \
+				(VIDC_WRAPPER_BASE_OFFS + 0x1018)
+#define VENUS_WRAPPER_VBIF_SS_SEC_CPA_END_ADDR_v1 \
+				(VIDC_WRAPPER_BASE_OFFS + 0x101C)
+#define VENUS_WRAPPER_VBIF_SS_SEC_FW_START_ADDR_v1 \
+				(VIDC_WRAPPER_BASE_OFFS + 0x1020)
+#define VENUS_WRAPPER_VBIF_SS_SEC_FW_END_ADDR_v1 \
+				(VIDC_WRAPPER_BASE_OFFS + 0x1024)
+
+#define VENUS_WRAPPER_VBIF_SS_SEC_CPA_START_ADDR_v2 \
+				(VIDC_WRAPPER_BASE_OFFS + 0x1020)
+#define VENUS_WRAPPER_VBIF_SS_SEC_CPA_END_ADDR_v2 \
+				(VIDC_WRAPPER_BASE_OFFS + 0x1024)
+#define VENUS_WRAPPER_VBIF_SS_SEC_FW_START_ADDR_v2 \
+				(VIDC_WRAPPER_BASE_OFFS + 0x1028)
+#define VENUS_WRAPPER_VBIF_SS_SEC_FW_END_ADDR_v2 \
+				(VIDC_WRAPPER_BASE_OFFS + 0x102C)
+
+#define VENUS_WRAPPER_SW_RESET	(VIDC_WRAPPER_BASE_OFFS + 0x3000)
+
+/* VENUS VBIF registers */
+#define VENUS_VBIF_CLKON_FORCE_ON			BIT(0)
+
+#define VENUS_VBIF_ADDR_TRANS_EN  (VIDC_VBIF_BASE_OFFS + 0x1000)
+#define VENUS_VBIF_AT_OLD_BASE    (VIDC_VBIF_BASE_OFFS + 0x1004)
+#define VENUS_VBIF_AT_OLD_HIGH    (VIDC_VBIF_BASE_OFFS + 0x1008)
+#define VENUS_VBIF_AT_NEW_BASE    (VIDC_VBIF_BASE_OFFS + 0x1010)
+#define VENUS_VBIF_AT_NEW_HIGH    (VIDC_VBIF_BASE_OFFS + 0x1018)
+
+
+/* Poll interval in uS */
+#define POLL_INTERVAL_US				50
+
+#define VENUS_REGION_SIZE				0x00500000
+
+static struct {
+	struct msm_vidc_platform_resources *resources;
+	struct regulator *gdsc;
+	const char *reg_name;
+	void __iomem *reg_base;
+	struct device *iommu_ctx_bank_dev;
+	struct dma_iommu_mapping *mapping;
+	dma_addr_t fw_iova;
+	bool is_booted;
+	bool hw_ver_checked;
+	u32 fw_sz;
+	u32 hw_ver_major;
+	u32 hw_ver_minor;
+	void *venus_notif_hdle;
+} *venus_data = NULL;
+
+/* Get venus clocks and set rates for rate-settable clocks */
+static int venus_clock_setup(void)
+{
+	int i, rc = 0;
+	unsigned long rate;
+	struct msm_vidc_platform_resources *res = venus_data->resources;
+	struct clock_info *cl;
+
+	for (i = 0; i < res->clock_set.count; i++) {
+		cl = &res->clock_set.clock_tbl[i];
+		/* Make sure rate-settable clocks' rates are set */
+		if (!clk_get_rate(cl->clk) && cl->count) {
+			rate = clk_round_rate(cl->clk, 0);
+			rc = clk_set_rate(cl->clk, rate);
+			if (rc) {
+				dprintk(VIDC_ERR,
+						"Failed to set clock rate %lu %s: %d\n",
+						rate, cl->name, rc);
+				break;
+			}
+		}
+	}
+
+	return rc;
+}
+
+static int venus_clock_prepare_enable(void)
+{
+	int i, rc = 0;
+	struct msm_vidc_platform_resources *res = venus_data->resources;
+	struct clock_info *cl;
+
+	for (i = 0; i < res->clock_set.count; i++) {
+		cl = &res->clock_set.clock_tbl[i];
+		rc = clk_prepare_enable(cl->clk);
+		if (rc) {
+			dprintk(VIDC_ERR, "failed to enable %s\n", cl->name);
+			for (i--; i >= 0; i--) {
+				cl = &res->clock_set.clock_tbl[i];
+				clk_disable_unprepare(cl->clk);
+			}
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static void venus_clock_disable_unprepare(void)
+{
+	struct msm_vidc_platform_resources *res = venus_data->resources;
+	struct clock_info *cl;
+	int i = res->clock_set.count;
+
+	for (i--; i >= 0; i--) {
+		cl = &res->clock_set.clock_tbl[i];
+		clk_disable_unprepare(cl->clk);
+	}
+}
+
+static int venus_setup_cb(struct device *dev,
+				u32 size)
+{
+	dma_addr_t va_start = 0x0;
+	size_t va_size = size;
+
+	venus_data->mapping = arm_iommu_create_mapping(
+		msm_iommu_get_bus(dev), va_start, va_size);
+	if (IS_ERR_OR_NULL(venus_data->mapping)) {
+		dprintk(VIDC_ERR, "%s: failed to create mapping for %s\n",
+		__func__, dev_name(dev));
+		return -ENODEV;
+	}
+	dprintk(VIDC_DBG,
+		"%s Attached device %pK and created mapping %pK for %s\n",
+		__func__, dev, venus_data->mapping, dev_name(dev));
+	return 0;
+}
+
+static int pil_venus_mem_setup(size_t size)
+{
+	int rc = 0;
+
+	if (!venus_data->mapping) {
+		size = round_up(size, SZ_4K);
+		rc = venus_setup_cb(venus_data->iommu_ctx_bank_dev, size);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"%s: Failed to setup context bank for venus : %s\n",
+				__func__,
+				dev_name(venus_data->iommu_ctx_bank_dev));
+			return rc;
+		}
+		venus_data->fw_sz = size;
+	}
+
+	return rc;
+}
+
+static int pil_venus_auth_and_reset(void)
+{
+	int rc;
+
+	phys_addr_t fw_bias = venus_data->resources->firmware_base;
+	void __iomem *reg_base = venus_data->reg_base;
+	u32 ver;
+	bool iommu_present = is_iommu_present(venus_data->resources);
+	struct device *dev = venus_data->iommu_ctx_bank_dev;
+
+	if (!fw_bias) {
+		dprintk(VIDC_ERR, "FW bias is not valid\n");
+		return -EINVAL;
+	}
+	venus_data->fw_iova = (dma_addr_t)NULL;
+	/* Get Venus version number */
+	if (!venus_data->hw_ver_checked) {
+		ver = readl_relaxed(reg_base + VIDC_WRAPPER_HW_VERSION);
+		venus_data->hw_ver_minor = (ver & 0x0FFF0000) >> 16;
+		venus_data->hw_ver_major = (ver & 0xF0000000) >> 28;
+		venus_data->hw_ver_checked = 1;
+	}
+
+	if (iommu_present) {
+		u32 cpa_start_addr, cpa_end_addr, fw_start_addr, fw_end_addr;
+		/* Get the cpa and fw start/end addr based on Venus version */
+		if (venus_data->hw_ver_major == 0x1 &&
+				venus_data->hw_ver_minor <= 1) {
+			cpa_start_addr =
+				VENUS_WRAPPER_VBIF_SS_SEC_CPA_START_ADDR_v1;
+			cpa_end_addr =
+				VENUS_WRAPPER_VBIF_SS_SEC_CPA_END_ADDR_v1;
+			fw_start_addr =
+				VENUS_WRAPPER_VBIF_SS_SEC_FW_START_ADDR_v1;
+			fw_end_addr =
+				VENUS_WRAPPER_VBIF_SS_SEC_FW_END_ADDR_v1;
+		} else {
+			cpa_start_addr =
+				VENUS_WRAPPER_VBIF_SS_SEC_CPA_START_ADDR_v2;
+			cpa_end_addr =
+				VENUS_WRAPPER_VBIF_SS_SEC_CPA_END_ADDR_v2;
+			fw_start_addr =
+				VENUS_WRAPPER_VBIF_SS_SEC_FW_START_ADDR_v2;
+			fw_end_addr =
+				VENUS_WRAPPER_VBIF_SS_SEC_FW_END_ADDR_v2;
+		}
+
+		/* Program CPA start and end address */
+		writel_relaxed(0, reg_base + cpa_start_addr);
+		writel_relaxed(venus_data->fw_sz, reg_base + cpa_end_addr);
+
+		/* Program FW start and end address */
+		writel_relaxed(0, reg_base + fw_start_addr);
+		writel_relaxed(venus_data->fw_sz, reg_base + fw_end_addr);
+	} else {
+		rc = regulator_enable(venus_data->gdsc);
+		if (rc) {
+			dprintk(VIDC_ERR, "GDSC enable failed\n");
+			goto err;
+		}
+
+		rc = venus_clock_prepare_enable();
+		if (rc) {
+			dprintk(VIDC_ERR, "Clock prepare and enable failed\n");
+			regulator_disable(venus_data->gdsc);
+			goto err;
+		}
+
+		writel_relaxed(0, reg_base + VENUS_VBIF_AT_OLD_BASE);
+		writel_relaxed(VENUS_REGION_SIZE,
+				reg_base + VENUS_VBIF_AT_OLD_HIGH);
+		writel_relaxed(fw_bias, reg_base + VENUS_VBIF_AT_NEW_BASE);
+		writel_relaxed(fw_bias + VENUS_REGION_SIZE,
+				reg_base + VENUS_VBIF_AT_NEW_HIGH);
+		writel_relaxed(0x7F007F, reg_base + VENUS_VBIF_ADDR_TRANS_EN);
+		venus_clock_disable_unprepare();
+		regulator_disable(venus_data->gdsc);
+	}
+	/* Make sure all register writes are committed. */
+	mb();
+
+	/*
+	 * Need to wait 10 cycles of internal clocks before bringing ARM9
+	 * out of reset.
+	 */
+	udelay(1);
+
+	if (iommu_present) {
+		phys_addr_t pa = fw_bias;
+
+		rc = arm_iommu_attach_device(dev, venus_data->mapping);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"Failed to attach iommu for %s : %d\n",
+				dev_name(dev), rc);
+			goto release_mapping;
+		}
+
+		dprintk(VIDC_DBG, "Attached and created mapping for %s\n",
+				dev_name(dev));
+
+		/* Map virtual addr space 0 - fw_sz to fw phys addr space */
+		rc = iommu_map(venus_data->mapping->domain,
+			venus_data->fw_iova, pa, venus_data->fw_sz,
+			IOMMU_READ|IOMMU_WRITE|IOMMU_PRIV);
+		if (!rc) {
+			dprintk(VIDC_DBG,
+				"%s - Successfully mapped and performed test translation!\n",
+				dev_name(dev));
+		}
+
+		if (rc || (venus_data->fw_iova != 0)) {
+			dprintk(VIDC_ERR, "%s - Failed to setup IOMMU\n",
+					dev_name(dev));
+			goto err_iommu_map;
+		}
+	}
+	/* Bring Arm9 out of reset */
+	writel_relaxed(0, reg_base + VENUS_WRAPPER_SW_RESET);
+
+	venus_data->is_booted = 1;
+	return 0;
+
+err_iommu_map:
+	if (iommu_present)
+		arm_iommu_detach_device(dev);
+release_mapping:
+	if (iommu_present)
+		arm_iommu_release_mapping(venus_data->mapping);
+err:
+	return rc;
+}
+
+static int pil_venus_shutdown(void)
+{
+	void __iomem *reg_base = venus_data->reg_base;
+	u32 reg;
+	int rc;
+
+	if (!venus_data->is_booted)
+		return 0;
+
+	/* Assert the reset to ARM9 */
+	reg = readl_relaxed(reg_base + VENUS_WRAPPER_SW_RESET);
+	reg |= BIT(4);
+	writel_relaxed(reg, reg_base + VENUS_WRAPPER_SW_RESET);
+
+	/* Make sure reset is asserted before the mapping is removed */
+	mb();
+
+	if (is_iommu_present(venus_data->resources)) {
+		iommu_unmap(venus_data->mapping->domain, venus_data->fw_iova,
+			venus_data->fw_sz);
+		arm_iommu_detach_device(venus_data->iommu_ctx_bank_dev);
+	}
+	/*
+	 * Force the VBIF clk to be on to avoid AXI bridge halt ack failure
+	 * for certain Venus version.
+	 */
+	if (venus_data->hw_ver_major == 0x1 &&
+				(venus_data->hw_ver_minor == 0x2 ||
+				venus_data->hw_ver_minor == 0x3)) {
+		reg = readl_relaxed(reg_base + VIDC_VENUS_VBIF_CLK_ON);
+		reg |= VENUS_VBIF_CLKON_FORCE_ON;
+		writel_relaxed(reg, reg_base + VIDC_VENUS_VBIF_CLK_ON);
+	}
+
+	/* Halt AXI and AXI OCMEM VBIF Access */
+	reg = readl_relaxed(reg_base + VENUS_VBIF_AXI_HALT_CTRL0);
+	reg |= VENUS_VBIF_AXI_HALT_CTRL0_HALT_REQ;
+	writel_relaxed(reg, reg_base + VENUS_VBIF_AXI_HALT_CTRL0);
+
+	/* Request for AXI bus port halt */
+	rc = readl_poll_timeout(reg_base + VENUS_VBIF_AXI_HALT_CTRL1,
+			reg, reg & VENUS_VBIF_AXI_HALT_CTRL1_HALT_ACK,
+			POLL_INTERVAL_US,
+			VENUS_VBIF_AXI_HALT_ACK_TIMEOUT_US);
+	if (rc)
+		dprintk(VIDC_ERR, "Port halt timeout\n");
+
+	venus_data->is_booted = 0;
+
+	return 0;
+}
+
+static int venus_notifier_cb(struct notifier_block *this, unsigned long code,
+							void *ss_handle)
+{
+	struct notif_data *data = (struct notif_data *)ss_handle;
+	static bool venus_data_set;
+	int ret;
+
+	if (!data->no_auth)
+		return NOTIFY_DONE;
+
+	if (!venus_data_set) {
+		ret = venus_clock_setup();
+		if (ret)
+			return ret;
+
+		ret = of_property_read_string(data->pdev->dev.of_node,
+				"qcom,proxy-reg-names", &venus_data->reg_name);
+		if (ret)
+			return ret;
+
+		venus_data->gdsc = devm_regulator_get(
+				&data->pdev->dev, venus_data->reg_name);
+		if (IS_ERR(venus_data->gdsc)) {
+			dprintk(VIDC_ERR, "Failed to get Venus GDSC\n");
+			return -ENODEV;
+		}
+
+		venus_data_set = true;
+	}
+
+	if (code != SUBSYS_AFTER_POWERUP && code != SUBSYS_AFTER_SHUTDOWN)
+		return NOTIFY_DONE;
+
+	ret = regulator_enable(venus_data->gdsc);
+	if (ret) {
+		dprintk(VIDC_ERR, "GDSC enable failed\n");
+		return ret;
+	}
+
+	ret = venus_clock_prepare_enable();
+	if (ret) {
+		dprintk(VIDC_ERR, "Clock prepare and enable failed\n");
+		goto err_clks;
+	}
+
+	if (code == SUBSYS_AFTER_POWERUP) {
+		if (is_iommu_present(venus_data->resources))
+			pil_venus_mem_setup(VENUS_REGION_SIZE);
+		pil_venus_auth_and_reset();
+	 } else if (code == SUBSYS_AFTER_SHUTDOWN)
+		pil_venus_shutdown();
+
+	venus_clock_disable_unprepare();
+	regulator_disable(venus_data->gdsc);
+
+	return NOTIFY_DONE;
+err_clks:
+	regulator_disable(venus_data->gdsc);
+	return ret;
+}
+
+static struct notifier_block venus_notifier = {
+	.notifier_call = venus_notifier_cb,
+};
+
+int venus_boot_init(struct msm_vidc_platform_resources *res,
+			struct context_bank_info *cb)
+{
+	int rc = 0;
+
+	if (!res || !cb) {
+		dprintk(VIDC_ERR, "Invalid platform resource handle\n");
+		return -EINVAL;
+	}
+	venus_data = kzalloc(sizeof(*venus_data), GFP_KERNEL);
+	if (!venus_data)
+		return -ENOMEM;
+
+	venus_data->resources = res;
+	venus_data->iommu_ctx_bank_dev = cb->dev;
+	if (!venus_data->iommu_ctx_bank_dev) {
+		dprintk(VIDC_ERR, "Invalid venus context bank device\n");
+		return -ENODEV;
+	}
+	venus_data->reg_base = ioremap_nocache(res->register_base,
+			(unsigned long)res->register_size);
+	if (!venus_data->reg_base) {
+		dprintk(VIDC_ERR,
+				"could not map reg addr %pa of size %d\n",
+				&res->register_base, res->register_size);
+		rc = -ENOMEM;
+		goto err_ioremap_fail;
+	}
+	venus_data->venus_notif_hdle = subsys_notif_register_notifier("venus",
+							&venus_notifier);
+	if (IS_ERR(venus_data->venus_notif_hdle)) {
+		dprintk(VIDC_ERR, "register event notification failed\n");
+		rc = PTR_ERR(venus_data->venus_notif_hdle);
+		goto err_subsys_notif;
+	}
+
+	return rc;
+
+err_subsys_notif:
+err_ioremap_fail:
+	kfree(venus_data);
+	return rc;
+}
+
+void venus_boot_deinit(void)
+{
+	venus_data->resources = NULL;
+	subsys_notif_unregister_notifier(venus_data->venus_notif_hdle,
+			&venus_notifier);
+	kfree(venus_data);
+}
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/venus_boot.h linux-4.4.115-fbx/drivers/media/platform/msm/vidc/venus_boot.h
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/venus_boot.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/venus_boot.h	2019-01-22 16:16:24.467255136 +0100
@@ -0,0 +1,22 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __VENUS_BOOT_H__
+#define __VENUS_BOOT_H__
+#include "msm_vidc_resources.h"
+
+int venus_boot_init(struct msm_vidc_platform_resources *res,
+		struct context_bank_info *cb);
+void venus_boot_deinit(void);
+
+#endif /* __VENUS_BOOT_H__ */
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/venus_hfi.c linux-4.4.115-fbx/drivers/media/platform/msm/vidc/venus_hfi.c
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/venus_hfi.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/venus_hfi.c	2019-10-29 09:26:23.965206329 +0100
@@ -0,0 +1,4793 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/dma-iommu.h>
+#include <asm/memory.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/coresight-stm.h>
+#include <linux/delay.h>
+#include <linux/devfreq.h>
+#include <linux/hash.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/pm_qos.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <soc/qcom/cx_ipeak.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/smem.h>
+#include <soc/qcom/subsystem_restart.h>
+#include "hfi_packetization.h"
+#include "msm_vidc_debug.h"
+#include "venus_hfi.h"
+#include "vidc_hfi_io.h"
+
+#define FIRMWARE_SIZE			0X00A00000
+#define REG_ADDR_OFFSET_BITMASK	0x000FFFFF
+#define QDSS_IOVA_START 0x80001000
+
+static struct hal_device_data hal_ctxt;
+
+#define TZBSP_MEM_PROTECT_VIDEO_VAR 0x8
+struct tzbsp_memprot {
+	u32 cp_start;
+	u32 cp_size;
+	u32 cp_nonpixel_start;
+	u32 cp_nonpixel_size;
+};
+
+struct tzbsp_resp {
+	int ret;
+};
+
+#define TZBSP_VIDEO_SET_STATE 0xa
+
+/* Poll interval in uS */
+#define POLL_INTERVAL_US 50
+
+enum tzbsp_video_state {
+	TZBSP_VIDEO_STATE_SUSPEND = 0,
+	TZBSP_VIDEO_STATE_RESUME = 1,
+	TZBSP_VIDEO_STATE_RESTORE_THRESHOLD = 2,
+};
+
+struct tzbsp_video_set_state_req {
+	u32 state; /* should be tzbsp_video_state enum value */
+	u32 spare; /* reserved for future, should be zero */
+};
+
+const struct msm_vidc_gov_data DEFAULT_BUS_VOTE = {
+	.data = NULL,
+	.data_count = 0,
+	.imem_size = 0,
+};
+
+const int max_packets = 1000;
+
+static void venus_hfi_pm_handler(struct work_struct *work);
+static DECLARE_DELAYED_WORK(venus_hfi_pm_work, venus_hfi_pm_handler);
+static inline int __resume(struct venus_hfi_device *device);
+static inline int __suspend(struct venus_hfi_device *device);
+static int __disable_regulators(struct venus_hfi_device *device);
+static int __enable_regulators(struct venus_hfi_device *device);
+static inline int __prepare_enable_clks(struct venus_hfi_device *device);
+static inline void __disable_unprepare_clks(struct venus_hfi_device *device);
+static int __scale_clocks_load(struct venus_hfi_device *device, int load,
+		struct vidc_clk_scale_data *data,
+		unsigned long instant_bitrate);
+static void __flush_debug_queue(struct venus_hfi_device *device, u8 *packet);
+static int __initialize_packetization(struct venus_hfi_device *device);
+static struct hal_session *__get_session(struct venus_hfi_device *device,
+		u32 session_id);
+static int __iface_cmdq_write(struct venus_hfi_device *device,
+					void *pkt);
+static int __load_fw(struct venus_hfi_device *device);
+static void __unload_fw(struct venus_hfi_device *device);
+static int __tzbsp_set_video_state(enum tzbsp_video_state state);
+
+
+/**
+ * Utility function to enforce some of our assumptions.  Spam calls to this
+ * in hotspots in code to double check some of the assumptions that we hold.
+ */
+static inline void __strict_check(struct venus_hfi_device *device)
+{
+	if (!mutex_is_locked(&device->lock)) {
+		dprintk(VIDC_WARN,
+			"device->lock mutex is not locked\n");
+		WARN_ON(VIDC_DBG_WARN_ENABLE);
+	}
+}
+
+static inline void __set_state(struct venus_hfi_device *device,
+		enum venus_hfi_state state)
+{
+	device->state = state;
+}
+
+static inline bool __core_in_valid_state(struct venus_hfi_device *device)
+{
+	return device->state != VENUS_STATE_DEINIT;
+}
+
+static void __dump_packet(u8 *packet, enum vidc_msg_prio log_level)
+{
+	u32 c = 0, packet_size = *(u32 *)packet;
+	const int row_size = 32;
+	/* row must contain enough for 0xdeadbaad * 8 to be converted into
+	 * "de ad ba ab " * 8 + '\0' */
+	char row[3 * row_size];
+
+	for (c = 0; c * row_size < packet_size; ++c) {
+		int bytes_to_read = ((c + 1) * row_size > packet_size) ?
+			packet_size % row_size : row_size;
+		hex_dump_to_buffer(packet + c * row_size, bytes_to_read,
+				row_size, 4, row, sizeof(row), false);
+		dprintk(log_level, "%s\n", row);
+	}
+}
+
+static void __sim_modify_cmd_packet(u8 *packet, struct venus_hfi_device *device)
+{
+	struct hfi_cmd_sys_session_init_packet *sys_init;
+	struct hal_session *session = NULL;
+	u8 i;
+	phys_addr_t fw_bias = 0;
+
+	if (!device || !packet) {
+		dprintk(VIDC_ERR, "Invalid Param\n");
+		return;
+	} else if (!device->hal_data->firmware_base
+			|| is_iommu_present(device->res)) {
+		return;
+	}
+
+	fw_bias = device->hal_data->firmware_base;
+	sys_init = (struct hfi_cmd_sys_session_init_packet *)packet;
+
+	session = __get_session(device, sys_init->session_id);
+	if (!session) {
+		dprintk(VIDC_DBG, "%s :Invalid session id: %x\n",
+				__func__, sys_init->session_id);
+		return;
+	}
+
+	switch (sys_init->packet_type) {
+	case HFI_CMD_SESSION_EMPTY_BUFFER:
+		if (session->is_decoder) {
+			struct hfi_cmd_session_empty_buffer_compressed_packet
+			*pkt = (struct
+			hfi_cmd_session_empty_buffer_compressed_packet
+			*) packet;
+			pkt->packet_buffer -= fw_bias;
+		} else {
+			struct
+			hfi_cmd_session_empty_buffer_uncompressed_plane0_packet
+			*pkt = (struct
+			hfi_cmd_session_empty_buffer_uncompressed_plane0_packet
+			*) packet;
+			pkt->packet_buffer -= fw_bias;
+		}
+		break;
+	case HFI_CMD_SESSION_FILL_BUFFER:
+	{
+		struct hfi_cmd_session_fill_buffer_packet *pkt =
+			(struct hfi_cmd_session_fill_buffer_packet *)packet;
+		pkt->packet_buffer -= fw_bias;
+		break;
+	}
+	case HFI_CMD_SESSION_SET_BUFFERS:
+	{
+		struct hfi_cmd_session_set_buffers_packet *pkt =
+			(struct hfi_cmd_session_set_buffers_packet *)packet;
+		if (pkt->buffer_type == HFI_BUFFER_OUTPUT ||
+			pkt->buffer_type == HFI_BUFFER_OUTPUT2) {
+			struct hfi_buffer_info *buff;
+			buff = (struct hfi_buffer_info *) pkt->rg_buffer_info;
+			buff->buffer_addr -= fw_bias;
+			if (buff->extra_data_addr >= fw_bias)
+				buff->extra_data_addr -= fw_bias;
+		} else {
+			for (i = 0; i < pkt->num_buffers; i++)
+				pkt->rg_buffer_info[i] -= fw_bias;
+		}
+		break;
+	}
+	case HFI_CMD_SESSION_RELEASE_BUFFERS:
+	{
+		struct hfi_cmd_session_release_buffer_packet *pkt =
+			(struct hfi_cmd_session_release_buffer_packet *)packet;
+		if (pkt->buffer_type == HFI_BUFFER_OUTPUT ||
+			pkt->buffer_type == HFI_BUFFER_OUTPUT2) {
+			struct hfi_buffer_info *buff;
+			buff = (struct hfi_buffer_info *) pkt->rg_buffer_info;
+			buff->buffer_addr -= fw_bias;
+			buff->extra_data_addr -= fw_bias;
+		} else {
+			for (i = 0; i < pkt->num_buffers; i++)
+				pkt->rg_buffer_info[i] -= fw_bias;
+		}
+		break;
+	}
+	case HFI_CMD_SESSION_PARSE_SEQUENCE_HEADER:
+	{
+		struct hfi_cmd_session_parse_sequence_header_packet *pkt =
+			(struct hfi_cmd_session_parse_sequence_header_packet *)
+		packet;
+		pkt->packet_buffer -= fw_bias;
+		break;
+	}
+	case HFI_CMD_SESSION_GET_SEQUENCE_HEADER:
+	{
+		struct hfi_cmd_session_get_sequence_header_packet *pkt =
+			(struct hfi_cmd_session_get_sequence_header_packet *)
+		packet;
+		pkt->packet_buffer -= fw_bias;
+		break;
+	}
+	default:
+		break;
+	}
+}
+
+static int __acquire_regulator(struct regulator_info *rinfo)
+{
+	int rc = 0;
+
+	if (rinfo->has_hw_power_collapse) {
+		rc = regulator_set_mode(rinfo->regulator,
+				REGULATOR_MODE_NORMAL);
+		if (rc) {
+			/*
+			* This is somewhat fatal, but nothing we can do
+			* about it. We can't disable the regulator w/o
+			* getting it back under s/w control
+			*/
+			dprintk(VIDC_WARN,
+				"Failed to acquire regulator control: %s\n",
+					rinfo->name);
+		} else {
+
+			dprintk(VIDC_DBG,
+					"Acquire regulator control from HW: %s\n",
+					rinfo->name);
+
+		}
+	}
+
+	if (!regulator_is_enabled(rinfo->regulator)) {
+		dprintk(VIDC_WARN, "Regulator is not enabled %s\n",
+			rinfo->name);
+		WARN_ON(VIDC_DBG_WARN_ENABLE);
+	}
+
+	return rc;
+}
+
+static int __hand_off_regulator(struct regulator_info *rinfo)
+{
+	int rc = 0;
+
+	if (rinfo->has_hw_power_collapse) {
+		rc = regulator_set_mode(rinfo->regulator,
+				REGULATOR_MODE_FAST);
+		if (rc) {
+			dprintk(VIDC_WARN,
+				"Failed to hand off regulator control: %s\n",
+					rinfo->name);
+		} else {
+			dprintk(VIDC_DBG,
+					"Hand off regulator control to HW: %s\n",
+					rinfo->name);
+		}
+	}
+
+	return rc;
+}
+
+static int __hand_off_regulators(struct venus_hfi_device *device)
+{
+	struct regulator_info *rinfo;
+	int rc = 0, c = 0;
+
+	venus_hfi_for_each_regulator(device, rinfo) {
+		rc = __hand_off_regulator(rinfo);
+		/*
+		* If one regulator hand off failed, driver should take
+		* the control for other regulators back.
+		*/
+		if (rc)
+			goto err_reg_handoff_failed;
+		c++;
+	}
+
+	return rc;
+err_reg_handoff_failed:
+	venus_hfi_for_each_regulator_reverse_continue(device, rinfo, c)
+		__acquire_regulator(rinfo);
+
+	return rc;
+}
+
+static int __write_queue(struct vidc_iface_q_info *qinfo, u8 *packet,
+		bool *rx_req_is_set)
+{
+	struct hfi_queue_header *queue;
+	u32 packet_size_in_words, new_write_idx;
+	u32 empty_space, read_idx;
+	u32 *write_ptr;
+
+	if (!qinfo || !packet) {
+		dprintk(VIDC_ERR, "Invalid Params\n");
+		return -EINVAL;
+	} else if (!qinfo->q_array.align_virtual_addr) {
+		dprintk(VIDC_WARN, "Queues have already been freed\n");
+		return -EINVAL;
+	}
+
+	queue = (struct hfi_queue_header *) qinfo->q_hdr;
+	if (!queue) {
+		dprintk(VIDC_ERR, "queue not present\n");
+		return -ENOENT;
+	}
+
+	if (msm_vidc_debug & VIDC_PKT) {
+		dprintk(VIDC_PKT, "%s: %pK\n", __func__, qinfo);
+		__dump_packet(packet, VIDC_PKT);
+	}
+
+	packet_size_in_words = (*(u32 *)packet) >> 2;
+	if (!packet_size_in_words) {
+		dprintk(VIDC_ERR, "Zero packet size\n");
+		return -ENODATA;
+	}
+
+	read_idx = queue->qhdr_read_idx;
+
+	empty_space = (queue->qhdr_write_idx >=  read_idx) ?
+		(queue->qhdr_q_size - (queue->qhdr_write_idx -  read_idx)) :
+		(read_idx - queue->qhdr_write_idx);
+	if (empty_space <= packet_size_in_words) {
+		queue->qhdr_tx_req =  1;
+		dprintk(VIDC_ERR, "Insufficient size (%d) to write (%d)\n",
+					  empty_space, packet_size_in_words);
+		return -ENOTEMPTY;
+	}
+
+	queue->qhdr_tx_req =  0;
+
+	new_write_idx = (queue->qhdr_write_idx + packet_size_in_words);
+	write_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
+		(queue->qhdr_write_idx << 2));
+	if (new_write_idx < queue->qhdr_q_size) {
+		memcpy(write_ptr, packet, packet_size_in_words << 2);
+	} else {
+		new_write_idx -= queue->qhdr_q_size;
+		memcpy(write_ptr, packet, (packet_size_in_words -
+			new_write_idx) << 2);
+		memcpy((void *)qinfo->q_array.align_virtual_addr,
+			packet + ((packet_size_in_words - new_write_idx) << 2),
+			new_write_idx  << 2);
+	}
+
+	/* Memory barrier to make sure packet is written before updating the
+	 * write index */
+	mb();
+	queue->qhdr_write_idx = new_write_idx;
+	if (rx_req_is_set)
+		*rx_req_is_set = queue->qhdr_rx_req == 1;
+	/* Memory barrier to make sure write index is updated before an
+	 * interrupt is raised on venus. */
+	mb();
+	return 0;
+}
+
+static void __hal_sim_modify_msg_packet(u8 *packet,
+					struct venus_hfi_device *device)
+{
+	struct hfi_msg_sys_session_init_done_packet *sys_idle;
+	struct hal_session *session = NULL;
+	phys_addr_t fw_bias = 0;
+
+	if (!device || !packet) {
+		dprintk(VIDC_ERR, "Invalid Param\n");
+		return;
+	} else if (!device->hal_data->firmware_base
+			|| is_iommu_present(device->res)) {
+		return;
+	}
+
+	fw_bias = device->hal_data->firmware_base;
+	sys_idle = (struct hfi_msg_sys_session_init_done_packet *)packet;
+	session = __get_session(device, sys_idle->session_id);
+
+	if (!session) {
+		dprintk(VIDC_DBG, "%s: Invalid session id: %x\n",
+				__func__, sys_idle->session_id);
+		return;
+	}
+
+	switch (sys_idle->packet_type) {
+	case HFI_MSG_SESSION_FILL_BUFFER_DONE:
+		if (session->is_decoder) {
+			struct
+			hfi_msg_session_fbd_uncompressed_plane0_packet
+			*pkt_uc = (struct
+			hfi_msg_session_fbd_uncompressed_plane0_packet
+			*) packet;
+			pkt_uc->packet_buffer += fw_bias;
+		} else {
+			struct
+			hfi_msg_session_fill_buffer_done_compressed_packet
+			*pkt = (struct
+			hfi_msg_session_fill_buffer_done_compressed_packet
+			*) packet;
+			pkt->packet_buffer += fw_bias;
+		}
+		break;
+	case HFI_MSG_SESSION_EMPTY_BUFFER_DONE:
+	{
+		struct hfi_msg_session_empty_buffer_done_packet *pkt =
+		(struct hfi_msg_session_empty_buffer_done_packet *)packet;
+		pkt->packet_buffer += fw_bias;
+		break;
+	}
+	case HFI_MSG_SESSION_GET_SEQUENCE_HEADER_DONE:
+	{
+		struct
+		hfi_msg_session_get_sequence_header_done_packet
+		*pkt =
+		(struct hfi_msg_session_get_sequence_header_done_packet *)
+		packet;
+		pkt->sequence_header += fw_bias;
+		break;
+	}
+	default:
+		break;
+	}
+}
+
+static int __read_queue(struct vidc_iface_q_info *qinfo, u8 *packet,
+		u32 *pb_tx_req_is_set)
+{
+	struct hfi_queue_header *queue;
+	u32 packet_size_in_words, new_read_idx;
+	u32 *read_ptr;
+	u32 receive_request = 0;
+		int rc = 0;
+
+	if (!qinfo || !packet || !pb_tx_req_is_set) {
+		dprintk(VIDC_ERR, "Invalid Params\n");
+		return -EINVAL;
+	} else if (!qinfo->q_array.align_virtual_addr) {
+		dprintk(VIDC_WARN, "Queues have already been freed\n");
+		return -EINVAL;
+	}
+
+	/*Memory barrier to make sure data is valid before
+	 *reading it*/
+	mb();
+	queue = (struct hfi_queue_header *) qinfo->q_hdr;
+
+	if (!queue) {
+		dprintk(VIDC_ERR, "Queue memory is not allocated\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * Do not set receive request for debug queue, if set,
+	 * Venus generates interrupt for debug messages even
+	 * when there is no response message available.
+	 * In general debug queue will not become full as it
+	 * is being emptied out for every interrupt from Venus.
+	 * Venus will anyway generates interrupt if it is full.
+	 */
+	if (queue->qhdr_type & HFI_Q_ID_CTRL_TO_HOST_MSG_Q)
+		receive_request = 1;
+
+	if (queue->qhdr_read_idx == queue->qhdr_write_idx) {
+		queue->qhdr_rx_req = receive_request;
+		*pb_tx_req_is_set = 0;
+		dprintk(VIDC_DBG,
+			"%s queue is empty, rx_req = %u, tx_req = %u, read_idx = %u\n",
+			receive_request ? "message" : "debug",
+			queue->qhdr_rx_req, queue->qhdr_tx_req,
+			queue->qhdr_read_idx);
+		return -ENODATA;
+	}
+
+	read_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
+				(queue->qhdr_read_idx << 2));
+	packet_size_in_words = (*read_ptr) >> 2;
+	if (!packet_size_in_words) {
+		dprintk(VIDC_ERR, "Zero packet size\n");
+		return -ENODATA;
+	}
+
+	new_read_idx = queue->qhdr_read_idx + packet_size_in_words;
+	if (((packet_size_in_words << 2) <= VIDC_IFACEQ_VAR_HUGE_PKT_SIZE)
+			&& queue->qhdr_read_idx <= queue->qhdr_q_size) {
+		if (new_read_idx < queue->qhdr_q_size) {
+			memcpy(packet, read_ptr,
+					packet_size_in_words << 2);
+		} else {
+			new_read_idx -= queue->qhdr_q_size;
+			memcpy(packet, read_ptr,
+			(packet_size_in_words - new_read_idx) << 2);
+			memcpy(packet + ((packet_size_in_words -
+					new_read_idx) << 2),
+					(u8 *)qinfo->q_array.align_virtual_addr,
+					new_read_idx << 2);
+		}
+	} else {
+		dprintk(VIDC_WARN,
+			"BAD packet received, read_idx: %#x, pkt_size: %d\n",
+			queue->qhdr_read_idx, packet_size_in_words << 2);
+		dprintk(VIDC_WARN, "Dropping this packet\n");
+		new_read_idx = queue->qhdr_write_idx;
+		rc = -ENODATA;
+	}
+
+	queue->qhdr_read_idx = new_read_idx;
+
+	if (queue->qhdr_read_idx != queue->qhdr_write_idx)
+		queue->qhdr_rx_req = 0;
+	else
+		queue->qhdr_rx_req = receive_request;
+
+	*pb_tx_req_is_set = (1 == queue->qhdr_tx_req) ? 1 : 0;
+
+	if (msm_vidc_debug & VIDC_PKT) {
+		dprintk(VIDC_PKT, "%s: %pK\n", __func__, qinfo);
+		__dump_packet(packet, VIDC_PKT);
+	}
+
+	return rc;
+}
+
+static int __smem_alloc(struct venus_hfi_device *dev,
+			struct vidc_mem_addr *mem, u32 size, u32 align,
+			u32 flags, u32 usage)
+{
+	struct msm_smem *alloc = NULL;
+	int rc = 0;
+
+	if (!dev || !dev->hal_client || !mem || !size) {
+		dprintk(VIDC_ERR, "Invalid Params\n");
+		return -EINVAL;
+	}
+
+	dprintk(VIDC_INFO, "start to alloc size: %d, flags: %d\n", size, flags);
+	alloc = msm_smem_alloc(dev->hal_client, size, align, flags, usage, 1);
+	if (!alloc) {
+		dprintk(VIDC_ERR, "Alloc failed\n");
+		rc = -ENOMEM;
+		goto fail_smem_alloc;
+	}
+
+	dprintk(VIDC_DBG, "__smem_alloc: ptr = %pK, size = %d\n",
+			alloc->kvaddr, size);
+	rc = msm_smem_cache_operations(dev->hal_client, alloc,
+		SMEM_CACHE_CLEAN, -1);
+	if (rc) {
+		dprintk(VIDC_WARN, "Failed to clean cache\n");
+		dprintk(VIDC_WARN, "This may result in undefined behavior\n");
+	}
+
+	mem->mem_size = alloc->size;
+	mem->mem_data = alloc;
+	mem->align_virtual_addr = alloc->kvaddr;
+	mem->align_device_addr = alloc->device_addr;
+	return rc;
+fail_smem_alloc:
+	return rc;
+}
+
+static void __smem_free(struct venus_hfi_device *dev, struct msm_smem *mem)
+{
+	if (!dev || !mem) {
+		dprintk(VIDC_ERR, "invalid param %pK %pK\n", dev, mem);
+		return;
+	}
+
+	msm_smem_free(dev->hal_client, mem);
+}
+
+static void __write_register(struct venus_hfi_device *device,
+		u32 reg, u32 value)
+{
+	u32 hwiosymaddr = reg;
+	u8 *base_addr;
+	if (!device) {
+		dprintk(VIDC_ERR, "Invalid params: %pK\n", device);
+		return;
+	}
+
+	__strict_check(device);
+
+	if (!device->power_enabled) {
+		dprintk(VIDC_WARN,
+			"HFI Write register failed : Power is OFF\n");
+		WARN_ON(VIDC_DBG_WARN_ENABLE);
+		return;
+	}
+
+	base_addr = device->hal_data->register_base;
+	dprintk(VIDC_DBG, "Base addr: %pK, written to: %#x, Value: %#x...\n",
+		base_addr, hwiosymaddr, value);
+	base_addr += hwiosymaddr;
+	writel_relaxed(value, base_addr);
+	wmb();
+}
+
+static int __read_register(struct venus_hfi_device *device, u32 reg)
+{
+	int rc = 0;
+	u8 *base_addr;
+	if (!device) {
+		dprintk(VIDC_ERR, "Invalid params: %pK\n", device);
+		return -EINVAL;
+	}
+
+	__strict_check(device);
+
+	if (!device->power_enabled) {
+		dprintk(VIDC_WARN,
+			"HFI Read register failed : Power is OFF\n");
+		WARN_ON(VIDC_DBG_WARN_ENABLE);
+		return -EINVAL;
+	}
+
+	base_addr = device->hal_data->register_base;
+
+	rc = readl_relaxed(base_addr + reg);
+	rmb();
+	dprintk(VIDC_DBG, "Base addr: %pK, read from: %#x, value: %#x...\n",
+		base_addr, reg, rc);
+
+	return rc;
+}
+
+static void __set_registers(struct venus_hfi_device *device)
+{
+	struct reg_set *reg_set;
+	int i;
+
+	if (!device->res) {
+		dprintk(VIDC_ERR,
+			"device resources null, cannot set registers\n");
+		return;
+	}
+
+	reg_set = &device->res->reg_set;
+	for (i = 0; i < reg_set->count; i++) {
+		__write_register(device, reg_set->reg_tbl[i].reg,
+				reg_set->reg_tbl[i].value);
+	}
+}
+
+/*
+ * The existence of this function is a hack for 8996 (or certain Venus versions)
+ * to overcome a hardware bug.  Whenever the GDSCs momentarily power collapse
+ * (after calling __hand_off_regulators()), the values of the threshold
+ * registers (typically programmed by TZ) are incorrectly reset.  As a result
+ * reprogram these registers at certain agreed upon points.
+ */
+static void __set_threshold_registers(struct venus_hfi_device *device)
+{
+	u32 version = __read_register(device, VIDC_WRAPPER_HW_VERSION);
+
+	version &= ~GENMASK(15, 0);
+	if (version != (0x3 << 28 | 0x43 << 16))
+		return;
+
+	if (__tzbsp_set_video_state(TZBSP_VIDEO_STATE_RESTORE_THRESHOLD))
+		dprintk(VIDC_ERR, "Failed to restore threshold values\n");
+}
+
+static void __iommu_detach(struct venus_hfi_device *device)
+{
+	struct context_bank_info *cb;
+
+	if (!device || !device->res) {
+		dprintk(VIDC_ERR, "Invalid parameter: %pK\n", device);
+		return;
+	}
+
+	list_for_each_entry(cb, &device->res->context_banks, list) {
+		if (cb->dev)
+			arm_iommu_detach_device(cb->dev);
+		if (cb->mapping)
+			arm_iommu_release_mapping(cb->mapping);
+	}
+}
+
+static bool __is_session_supported(unsigned long sessions_supported,
+		enum vidc_vote_data_session session_type)
+{
+	bool same_codec, same_session_type;
+	int codec_bit, session_type_bit;
+	unsigned long session = session_type;
+
+	if (!sessions_supported || !session)
+		return false;
+
+	/* ffs returns a 1 indexed, test_bit takes a 0 indexed...index */
+	codec_bit = ffs(session) - 1;
+	session_type_bit = codec_bit + 1;
+
+	same_codec = test_bit(codec_bit, &sessions_supported) ==
+		test_bit(codec_bit, &session);
+	same_session_type = test_bit(session_type_bit, &sessions_supported) ==
+		test_bit(session_type_bit, &session);
+
+	return same_codec && same_session_type;
+}
+
+bool venus_hfi_is_session_supported(unsigned long sessions_supported,
+		enum vidc_vote_data_session session_type)
+{
+	return __is_session_supported(sessions_supported, session_type);
+}
+EXPORT_SYMBOL(venus_hfi_is_session_supported);
+
+static int __devfreq_target(struct device *devfreq_dev,
+		unsigned long *freq, u32 flags)
+{
+	int rc = 0;
+	uint64_t ab = 0;
+	struct bus_info *bus = NULL, *temp = NULL;
+	struct venus_hfi_device *device = dev_get_drvdata(devfreq_dev);
+
+	venus_hfi_for_each_bus(device, temp) {
+		if (temp->dev == devfreq_dev) {
+			bus = temp;
+			break;
+		}
+	}
+
+	if (!bus) {
+		rc = -EBADHANDLE;
+		goto err_unknown_device;
+	}
+
+	/*
+	 * Clamp for all non zero frequencies. This clamp is necessary to stop
+	 * devfreq driver from spamming - Couldn't update frequency - logs, if
+	 * the scaled ab value is not part of the frequency table.
+	 */
+	if (*freq)
+		*freq = clamp_t(typeof(*freq), *freq, bus->range[0],
+				bus->range[1]);
+
+	/* we expect governors to provide values in kBps form, convert to Bps */
+	ab = *freq * 1000;
+	rc = msm_bus_scale_update_bw(bus->client, ab, 0);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed voting bus %s to ab %llu\n: %d",
+				bus->name, ab, rc);
+		goto err_unknown_device;
+	}
+
+	dprintk(VIDC_PROF, "Voting bus %s to ab %llu\n", bus->name, ab);
+
+	return 0;
+err_unknown_device:
+	return rc;
+}
+
+static int __devfreq_get_status(struct device *devfreq_dev,
+		struct devfreq_dev_status *stat)
+{
+	int rc = 0;
+	struct bus_info *bus = NULL, *temp = NULL;
+	struct venus_hfi_device *device = dev_get_drvdata(devfreq_dev);
+
+	venus_hfi_for_each_bus(device, temp) {
+		if (temp->dev == devfreq_dev) {
+			bus = temp;
+			break;
+		}
+	}
+
+	if (!bus) {
+		rc = -EBADHANDLE;
+		goto err_unknown_device;
+	}
+
+	*stat = (struct devfreq_dev_status) {
+		.private_data = &device->bus_vote,
+		/*
+		 * Put in dummy place holder values for upstream govs, our
+		 * custom gov only needs .private_data.  We should fill this in
+		 * properly if we can actually measure busy_time accurately
+		 * (which we can't at the moment)
+		 */
+		.total_time = 1,
+		.busy_time = 1,
+		.current_frequency = 0,
+	};
+
+err_unknown_device:
+	return rc;
+}
+
+static int __unvote_buses(struct venus_hfi_device *device)
+{
+	int rc = 0;
+	struct bus_info *bus = NULL;
+
+	venus_hfi_for_each_bus(device, bus) {
+		int local_rc = 0;
+		unsigned long zero = 0;
+
+		rc = devfreq_suspend_device(bus->devfreq);
+		if (rc)
+			goto err_unknown_device;
+
+		local_rc = __devfreq_target(bus->dev, &zero, 0);
+		rc = rc ?: local_rc;
+	}
+
+	if (rc)
+		dprintk(VIDC_WARN, "Failed to unvote some buses\n");
+
+err_unknown_device:
+	return rc;
+}
+
+static int __vote_buses(struct venus_hfi_device *device,
+		struct vidc_bus_vote_data *data, int num_data)
+{
+	int rc = 0;
+	struct bus_info *bus = NULL;
+	struct vidc_bus_vote_data *new_data = NULL;
+
+	if (!num_data) {
+		dprintk(VIDC_DBG, "No vote data available\n");
+		goto no_data_count;
+	} else if (!data) {
+		dprintk(VIDC_ERR, "Invalid voting data\n");
+		return -EINVAL;
+	}
+
+	new_data = kmemdup(data, num_data * sizeof(*new_data), GFP_KERNEL);
+	if (!new_data) {
+		dprintk(VIDC_ERR, "Can't alloc memory to cache bus votes\n");
+		rc = -ENOMEM;
+		goto err_no_mem;
+	}
+
+no_data_count:
+	kfree(device->bus_vote.data);
+	device->bus_vote.data = new_data;
+	device->bus_vote.data_count = num_data;
+	device->bus_vote.imem_size = device->res->imem_size;
+
+	venus_hfi_for_each_bus(device, bus) {
+		if (bus && bus->devfreq) {
+			/* NOP if already resume */
+			rc = devfreq_resume_device(bus->devfreq);
+			if (rc)
+				goto err_no_mem;
+
+			/* Kick devfreq awake incase _resume() didn't do it */
+			bus->devfreq->nb.notifier_call(
+				&bus->devfreq->nb, 0, NULL);
+		}
+	}
+
+err_no_mem:
+	return rc;
+}
+
+static int venus_hfi_vote_buses(void *dev, struct vidc_bus_vote_data *d, int n)
+{
+	int rc = 0;
+	struct venus_hfi_device *device = dev;
+
+	if (!device)
+		return -EINVAL;
+
+	mutex_lock(&device->lock);
+	rc = __vote_buses(device, d, n);
+	mutex_unlock(&device->lock);
+
+	return rc;
+
+}
+static int __core_set_resource(struct venus_hfi_device *device,
+		struct vidc_resource_hdr *resource_hdr, void *resource_value)
+{
+	struct hfi_cmd_sys_set_resource_packet *pkt;
+	u8 packet[VIDC_IFACEQ_VAR_SMALL_PKT_SIZE];
+	int rc = 0;
+
+	if (!device || !resource_hdr || !resource_value) {
+		dprintk(VIDC_ERR, "set_res: Invalid Params\n");
+		return -EINVAL;
+	}
+
+	pkt = (struct hfi_cmd_sys_set_resource_packet *) packet;
+
+	rc = call_hfi_pkt_op(device, sys_set_resource,
+			pkt, resource_hdr, resource_value);
+	if (rc) {
+		dprintk(VIDC_ERR, "set_res: failed to create packet\n");
+		goto err_create_pkt;
+	}
+
+	rc = __iface_cmdq_write(device, pkt);
+	if (rc)
+		rc = -ENOTEMPTY;
+
+err_create_pkt:
+	return rc;
+}
+
+static int __alloc_imem(struct venus_hfi_device *device, unsigned long size)
+{
+	struct imem *imem = NULL;
+	int rc = 0;
+
+	if (!device)
+		return -EINVAL;
+
+	imem = &device->resources.imem;
+	if (imem->type) {
+		dprintk(VIDC_ERR, "IMEM of type %d already allocated\n",
+				imem->type);
+		return -ENOMEM;
+	}
+
+	switch (device->res->imem_type) {
+	case IMEM_VMEM:
+	{
+		phys_addr_t vmem_buffer = 0;
+
+		rc = vmem_allocate(size, &vmem_buffer);
+		if (rc) {
+			if (rc == -ENOTSUPP) {
+				dprintk(VIDC_DBG,
+					"Target does not support vmem\n");
+				rc = 0;
+			}
+			goto imem_alloc_failed;
+		} else if (!vmem_buffer) {
+			rc = -ENOMEM;
+			goto imem_alloc_failed;
+		}
+
+		imem->vmem = vmem_buffer;
+		break;
+	}
+	case IMEM_NONE:
+		rc = 0;
+		break;
+
+	default:
+		rc = -ENOTSUPP;
+		goto imem_alloc_failed;
+	}
+
+	imem->type = device->res->imem_type;
+	dprintk(VIDC_DBG, "Allocated %ld bytes of IMEM of type %d\n", size,
+			imem->type);
+	return 0;
+imem_alloc_failed:
+	imem->type = IMEM_NONE;
+	return rc;
+}
+
+static int __free_imem(struct venus_hfi_device *device)
+{
+	struct imem *imem = NULL;
+	int rc = 0;
+
+	if (!device)
+		return -EINVAL;
+
+	imem = &device->resources.imem;
+	switch (imem->type) {
+	case IMEM_NONE:
+		/* Follow the semantics of free(NULL), which is a no-op. */
+		break;
+	case IMEM_VMEM:
+		vmem_free(imem->vmem);
+		break;
+	default:
+		rc = -ENOTSUPP;
+		goto imem_free_failed;
+	}
+
+	imem->type = IMEM_NONE;
+	return 0;
+
+imem_free_failed:
+	return rc;
+}
+
+static int __set_imem(struct venus_hfi_device *device, struct imem *imem)
+{
+	struct vidc_resource_hdr rhdr;
+	phys_addr_t addr = 0;
+	int rc = 0;
+
+	if (!device || !device->res || !imem) {
+		dprintk(VIDC_ERR, "Invalid params, core: %pK, imem: %pK\n",
+			device, imem);
+		return -EINVAL;
+	}
+
+	rhdr.resource_handle = imem; /* cookie */
+	rhdr.size = device->res->imem_size;
+	rhdr.resource_id = VIDC_RESOURCE_NONE;
+
+	switch (imem->type) {
+	case IMEM_VMEM:
+		rhdr.resource_id = VIDC_RESOURCE_VMEM;
+		addr = imem->vmem;
+		break;
+	case IMEM_NONE:
+		dprintk(VIDC_DBG, "%s Target does not support IMEM", __func__);
+		rc = 0;
+		goto imem_set_failed;
+	default:
+		dprintk(VIDC_ERR, "IMEM of type %d unsupported\n", imem->type);
+		rc = -ENOTSUPP;
+		goto imem_set_failed;
+	}
+
+	BUG_ON(!addr);
+
+	rc = __core_set_resource(device, &rhdr, (void *)addr);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to set IMEM on driver\n");
+		goto imem_set_failed;
+	}
+
+	dprintk(VIDC_DBG,
+			"Managed to set IMEM buffer of type %d sized %d bytes at %pa\n",
+			rhdr.resource_id, rhdr.size, &addr);
+
+	rc = __vote_buses(device, device->bus_vote.data,
+			device->bus_vote.data_count);
+	if (rc) {
+		dprintk(VIDC_ERR,
+				"Failed to vote for buses after setting imem: %d\n",
+				rc);
+	}
+
+imem_set_failed:
+	return rc;
+}
+
+static int __tzbsp_set_video_state(enum tzbsp_video_state state)
+{
+	struct tzbsp_video_set_state_req cmd = {0};
+	int tzbsp_rsp = 0;
+	int rc = 0;
+	struct scm_desc desc = {0};
+
+	desc.args[0] = cmd.state = state;
+	desc.args[1] = cmd.spare = 0;
+	desc.arginfo = SCM_ARGS(2);
+
+	if (!is_scm_armv8()) {
+		rc = scm_call(SCM_SVC_BOOT, TZBSP_VIDEO_SET_STATE, &cmd,
+				sizeof(cmd), &tzbsp_rsp, sizeof(tzbsp_rsp));
+	} else {
+		rc = scm_call2(SCM_SIP_FNID(SCM_SVC_BOOT,
+				TZBSP_VIDEO_SET_STATE), &desc);
+		tzbsp_rsp = desc.ret[0];
+	}
+
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed scm_call %d\n", rc);
+		return rc;
+	}
+
+	dprintk(VIDC_DBG, "Set state %d, resp %d\n", state, tzbsp_rsp);
+	if (tzbsp_rsp) {
+		dprintk(VIDC_ERR,
+				"Failed to set video core state to suspend: %d\n",
+				tzbsp_rsp);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static inline int __boot_firmware(struct venus_hfi_device *device)
+{
+	int rc = 0;
+	u32 ctrl_status = 0, count = 0, max_tries = 100;
+
+	__write_register(device, VIDC_CTRL_INIT, 0x1);
+	while (!ctrl_status && count < max_tries) {
+		ctrl_status = __read_register(device, VIDC_CPU_CS_SCIACMDARG0);
+		if ((ctrl_status & 0xFE) == 0x4) {
+			dprintk(VIDC_ERR, "invalid setting for UC_REGION\n");
+			break;
+		}
+
+		usleep_range(500, 1000);
+		count++;
+	}
+
+	if (count >= max_tries) {
+		dprintk(VIDC_ERR, "Error booting up vidc firmware\n");
+		rc = -ETIME;
+	}
+	return rc;
+}
+
+static struct clock_info *__get_clock(struct venus_hfi_device *device,
+		char *name)
+{
+	struct clock_info *vc;
+
+	venus_hfi_for_each_clock(device, vc) {
+		if (!strcmp(vc->name, name))
+			return vc;
+	}
+
+	dprintk(VIDC_WARN, "%s Clock %s not found\n", __func__, name);
+
+	return NULL;
+}
+
+static unsigned long __get_clock_rate(struct clock_info *clock,
+	int num_mbs_per_sec, struct vidc_clk_scale_data *data)
+{
+	int num_rows = clock->count;
+	struct load_freq_table *table = clock->load_freq_tbl;
+	unsigned long freq = table[0].freq, max_freq = 0;
+	int i = 0, j = 0;
+	unsigned long instance_freq[VIDC_MAX_SESSIONS] = {0};
+
+	if (!data && !num_rows) {
+		freq = 0;
+		goto print_clk;
+	}
+
+	if ((!num_mbs_per_sec || !data) && num_rows) {
+		freq = table[num_rows - 1].freq;
+		goto print_clk;
+	}
+
+	for (i = 0; i < num_rows; i++) {
+		if (num_mbs_per_sec > table[i].load)
+			break;
+		for (j = 0; j < data->num_sessions; j++) {
+			bool matches = __is_session_supported(
+				table[i].supported_codecs, data->session[j]);
+
+			if (!matches)
+				continue;
+			instance_freq[j] = table[i].freq;
+		}
+	}
+	for (i = 0; i < data->num_sessions; i++)
+		max_freq = max(instance_freq[i], max_freq);
+
+	freq = max_freq ? : freq;
+print_clk:
+	dprintk(VIDC_PROF, "Required clock rate = %lu num_mbs_per_sec %d\n",
+					freq, num_mbs_per_sec);
+	return freq;
+}
+
+static unsigned long __get_clock_rate_with_bitrate(struct clock_info *clock,
+		int num_mbs_per_sec, struct vidc_clk_scale_data *data,
+		unsigned long instant_bitrate)
+{
+	int num_rows = clock->count;
+	struct load_freq_table *table = clock->load_freq_tbl;
+	unsigned long freq = table[0].freq, max_freq = 0;
+	unsigned long base_freq, supported_clk[VIDC_MAX_SESSIONS] = {0};
+	int i, j;
+
+	if (!data && !num_rows) {
+		freq = 0;
+		goto print_clk;
+	}
+	if ((!num_mbs_per_sec || !data) && num_rows) {
+		freq = table[num_rows - 1].freq;
+		goto print_clk;
+	}
+
+	/* Get clock rate based on current load only */
+	base_freq = __get_clock_rate(clock, num_mbs_per_sec, data);
+
+	/*
+	 * Supported bitrate = 40% of clock frequency
+	 * Check if the instant bitrate is supported by the base frequency.
+	 * If not, move on to the next frequency which supports the bitrate.
+	 */
+
+	for (j = 0; j < data->num_sessions; j++) {
+		unsigned long supported_bitrate = 0;
+
+		for (i = num_rows - 1; i >= 0; i--) {
+			bool matches = __is_session_supported(
+				table[i].supported_codecs, data->session[j]);
+
+			if (!matches)
+				continue;
+			freq = table[i].freq;
+
+			supported_bitrate = freq * 40/100;
+			/*
+			 * Store this frequency for each instance, we need
+			 * to select the maximum freq among all the instances.
+			 */
+			if (freq >= base_freq &&
+				supported_bitrate >= instant_bitrate) {
+				supported_clk[j] = freq;
+				break;
+			}
+		}
+
+		/*
+		 * Current bitrate is higher than max supported load.
+		 * Select max frequency to handle this load.
+		 */
+		if (i < 0)
+			supported_clk[j] = table[0].freq;
+	}
+
+	for (i = 0; i < data->num_sessions; i++)
+		max_freq = max(supported_clk[i], max_freq);
+
+	freq = max_freq ? : base_freq;
+
+	if (base_freq == freq)
+		dprintk(VIDC_DBG, "Stay at base freq: %lu bitrate = %lu\n",
+			freq, instant_bitrate);
+	else
+		dprintk(VIDC_DBG, "Move up clock freq: %lu bitrate = %lu\n",
+			freq, instant_bitrate);
+print_clk:
+	dprintk(VIDC_PROF, "Required clock rate = %lu num_mbs_per_sec %d\n",
+					freq, num_mbs_per_sec);
+	return freq;
+}
+
+static unsigned long venus_hfi_get_core_clock_rate(void *dev, bool actual_rate)
+{
+	struct venus_hfi_device *device = (struct venus_hfi_device *) dev;
+	struct clock_info *vc;
+
+	if (!device) {
+		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, device);
+		return -EINVAL;
+	}
+
+	if (actual_rate) {
+		vc = __get_clock(device, "core_clk");
+		if (vc)
+			return clk_get_rate(vc->clk);
+		else
+			return 0;
+	} else {
+		return device->scaled_rate;
+	}
+}
+
+static int venus_hfi_suspend(void *dev)
+{
+	int rc = 0;
+	struct venus_hfi_device *device = (struct venus_hfi_device *) dev;
+
+	if (!device) {
+		dprintk(VIDC_ERR, "%s invalid device\n", __func__);
+		return -EINVAL;
+	} else if (!device->res->sw_power_collapsible) {
+		return -ENOTSUPP;
+	}
+
+	dprintk(VIDC_DBG, "Suspending Venus\n");
+	flush_delayed_work(&venus_hfi_pm_work);
+
+	mutex_lock(&device->lock);
+	if (device->power_enabled)
+		rc = -EBUSY;
+	mutex_unlock(&device->lock);
+	return rc;
+}
+
+static int venus_hfi_flush_debug_queue(void *dev)
+{
+	int rc = 0;
+	struct venus_hfi_device *device = (struct venus_hfi_device *) dev;
+
+	if (!device) {
+		dprintk(VIDC_ERR, "%s invalid device\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&device->lock);
+
+	if (device->power_enabled) {
+		dprintk(VIDC_DBG, "Venus is busy\n");
+		rc = -EBUSY;
+		goto exit;
+	}
+	__flush_debug_queue(device, NULL);
+exit:
+	mutex_unlock(&device->lock);
+	return rc;
+}
+
+static enum hal_default_properties venus_hfi_get_default_properties(void *dev)
+{
+	enum hal_default_properties prop = 0;
+	struct venus_hfi_device *device = (struct venus_hfi_device *) dev;
+
+	if (!device) {
+		dprintk(VIDC_ERR, "%s invalid device\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&device->lock);
+
+	if (device->packetization_type == HFI_PACKETIZATION_3XX)
+		prop = HAL_VIDEO_DYNAMIC_BUF_MODE;
+
+	mutex_unlock(&device->lock);
+	return prop;
+}
+
+static int __halt_axi(struct venus_hfi_device *device)
+{
+	u32 reg;
+	int rc = 0;
+	if (!device) {
+		dprintk(VIDC_ERR, "Invalid input: %pK\n", device);
+		return -EINVAL;
+	}
+
+	/*
+	 * Driver needs to make sure that clocks are enabled to read Venus AXI
+	 * registers. If not skip AXI HALT.
+	 */
+	if (!device->power_enabled) {
+		dprintk(VIDC_WARN,
+			"Clocks are OFF, skipping AXI HALT\n");
+		WARN_ON(VIDC_DBG_WARN_ENABLE);
+		return -EINVAL;
+	}
+
+	/* Halt AXI and AXI IMEM VBIF Access */
+	reg = __read_register(device, VENUS_VBIF_AXI_HALT_CTRL0);
+	reg |= VENUS_VBIF_AXI_HALT_CTRL0_HALT_REQ;
+	__write_register(device, VENUS_VBIF_AXI_HALT_CTRL0, reg);
+
+	/* Request for AXI bus port halt */
+	rc = readl_poll_timeout(device->hal_data->register_base
+			+ VENUS_VBIF_AXI_HALT_CTRL1,
+			reg, reg & VENUS_VBIF_AXI_HALT_CTRL1_HALT_ACK,
+			POLL_INTERVAL_US,
+			VENUS_VBIF_AXI_HALT_ACK_TIMEOUT_US);
+	if (rc)
+		dprintk(VIDC_WARN, "AXI bus port halt timeout\n");
+
+	return rc;
+}
+
+static int __set_clk_rate(struct venus_hfi_device *device,
+		struct clock_info *cl, u64 rate) {
+	int rc = 0, rc1 = 0;
+	u64 toggle_freq = device->res->clk_freq_threshold;
+	struct cx_ipeak_client *ipeak = device->res->cx_ipeak_context;
+	struct clk *clk = cl->clk;
+
+	if (device->clk_freq < toggle_freq && rate >= toggle_freq) {
+		rc1 = cx_ipeak_update(ipeak, true);
+		dprintk(VIDC_PROF, "Voting up: %d\n", rc);
+	}
+
+	rc = clk_set_rate(clk, rate);
+	if (rc)
+		dprintk(VIDC_ERR,
+			"%s: Failed to set clock rate %llu %s: %d\n",
+			__func__, rate, cl->name, rc);
+
+	if (device->clk_freq >= toggle_freq && rate < toggle_freq) {
+		rc1 = cx_ipeak_update(ipeak, false);
+		dprintk(VIDC_PROF, "Voting down: %d\n", rc);
+	}
+
+	if (rc1)
+		dprintk(VIDC_ERR,
+			"cx_ipeak_update failed! ipeak %pK\n", ipeak);
+
+	if (!rc)
+		device->clk_freq = rate;
+
+	return rc;
+}
+
+static int __scale_clocks_cycles_per_mb(struct venus_hfi_device *device,
+		struct vidc_clk_scale_data *data, unsigned long instant_bitrate)
+{
+	int rc = 0, i = 0, j = 0;
+	struct clock_info *cl;
+	struct clock_freq_table *clk_freq_tbl = NULL;
+	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
+	struct clock_profile_entry *entry = NULL;
+	u64 total_freq = 0, rate = 0;
+
+	clk_freq_tbl = &device->res->clock_freq_tbl;
+	allowed_clks_tbl = device->res->allowed_clks_tbl;
+
+	if (!data) {
+		dprintk(VIDC_DBG, "%s: NULL scale data\n", __func__);
+		total_freq = device->clk_freq;
+		goto get_clock_freq;
+	}
+
+	device->clk_bitrate = instant_bitrate;
+
+	for (i = 0; i < data->num_sessions; i++) {
+		/*
+		 * for each active session iterate through all possible
+		 * sessions and get matching session's cycles per mb
+		 * from dtsi and multiply with the session's load to
+		 * get the frequency required for the session.
+		 * accumulate all session's frequencies to get the
+		 * total clock frequency.
+		 */
+		for (j = 0; j < clk_freq_tbl->count; j++) {
+			bool matched = false;
+			u64 freq = 0;
+
+			entry = &clk_freq_tbl->clk_prof_entries[j];
+
+			matched = __is_session_supported(entry->codec_mask,
+					data->session[i]);
+			if (!matched)
+				continue;
+
+			freq = entry->cycles * data->load[i];
+
+			if (data->power_mode[i] == VIDC_POWER_LOW &&
+					entry->low_power_factor) {
+				/* low_power_factor is in Q16 format */
+				freq = (freq * entry->low_power_factor) >> 16;
+			}
+
+			total_freq += freq;
+
+			dprintk(VIDC_DBG,
+				"session[%d] %#x: cycles (%d), load (%d), freq (%llu), factor (%d)\n",
+				i, data->session[i], entry->cycles,
+				data->load[i], freq,
+				entry->low_power_factor);
+		}
+	}
+
+get_clock_freq:
+	/*
+	 * get required clock rate from allowed clock rates table
+	 */
+	for (i = device->res->allowed_clks_tbl_size - 1; i >= 0; i--) {
+		rate = allowed_clks_tbl[i].clock_rate;
+		if (rate >= total_freq)
+			break;
+	}
+
+	venus_hfi_for_each_clock(device, cl) {
+		if (!cl->has_scaling)
+			continue;
+
+		rc = __set_clk_rate(device, cl, rate);
+		if (rc)
+			return rc;
+
+		if (!strcmp(cl->name, "core_clk"))
+			device->scaled_rate = rate;
+
+		dprintk(VIDC_DBG,
+			"scaling clock %s to %llu (required freq %llu)\n",
+			cl->name, rate, total_freq);
+	}
+
+	return rc;
+}
+
+static int __scale_clocks_load(struct venus_hfi_device *device, int load,
+		struct vidc_clk_scale_data *data, unsigned long instant_bitrate)
+{
+	struct clock_info *cl;
+
+	device->clk_bitrate = instant_bitrate;
+
+	venus_hfi_for_each_clock(device, cl) {
+		if (cl->has_scaling) {
+
+			unsigned long rate = 0;
+			int rc;
+			/*
+			 * load_fw and power_on needs to be addressed.
+			 * differently. Below check enforces the same.
+			 */
+			if (!device->clk_bitrate && !data && !load &&
+				device->clk_freq)
+				rate = device->clk_freq;
+
+			if (!rate) {
+				if (!device->clk_bitrate)
+					rate = __get_clock_rate(cl, load,
+							data);
+				else
+					rate = __get_clock_rate_with_bitrate(cl,
+							load, data,
+							instant_bitrate);
+			}
+
+			rc = __set_clk_rate(device, cl, rate);
+			if (rc)
+				return rc;
+
+
+			if (!strcmp(cl->name, "core_clk"))
+				device->scaled_rate = rate;
+
+			dprintk(VIDC_PROF, "Scaling clock %s to %lu\n",
+					cl->name, rate);
+		}
+	}
+
+	return 0;
+}
+
+static int __scale_clocks(struct venus_hfi_device *device,
+		int load, struct vidc_clk_scale_data *data,
+		unsigned long instant_bitrate)
+{
+	int rc = 0;
+
+	if (device->res->clock_freq_tbl.clk_prof_entries &&
+			device->res->allowed_clks_tbl)
+		rc = __scale_clocks_cycles_per_mb(device,
+				data, instant_bitrate);
+	else if (device->res->load_freq_tbl)
+		rc = __scale_clocks_load(device, load, data, instant_bitrate);
+	else
+		dprintk(VIDC_DBG, "Clock scaling is not supported\n");
+
+	return rc;
+}
+
+static int venus_hfi_scale_clocks(void *dev, int load,
+					struct vidc_clk_scale_data *data,
+					unsigned long instant_bitrate)
+{
+	int rc = 0;
+	struct venus_hfi_device *device = dev;
+
+	if (!device) {
+		dprintk(VIDC_ERR, "Invalid args: %pK\n", device);
+		return -EINVAL;
+	}
+
+	mutex_lock(&device->lock);
+
+	if (__resume(device)) {
+		dprintk(VIDC_ERR, "Resume from power collapse failed\n");
+		rc = -ENODEV;
+		goto exit;
+	}
+
+	rc = __scale_clocks(device, load, data, instant_bitrate);
+exit:
+	mutex_unlock(&device->lock);
+	return rc;
+}
+
+static void __save_clock_rate(struct venus_hfi_device *device, bool reset)
+{
+	struct clock_info *cl;
+
+	venus_hfi_for_each_clock(device, cl) {
+		if (cl->has_scaling) {
+			cl->rate_on_enable =
+				reset ? 0 : clk_get_rate(cl->clk);
+			dprintk(VIDC_PROF, "Saved clock %s rate %lu\n",
+					cl->name, cl->rate_on_enable);
+		}
+	}
+}
+
+static void __restore_clock_rate(struct venus_hfi_device *device)
+{
+	struct clock_info *cl;
+
+	venus_hfi_for_each_clock(device, cl) {
+		if (cl->has_scaling && cl->rate_on_enable) {
+			int rc;
+
+			rc = __set_clk_rate(device, cl, cl->rate_on_enable);
+			if (rc)
+				dprintk(VIDC_ERR,
+				"Failed to restore clock %s rate %lu\n",
+					cl->name, cl->rate_on_enable);
+			else
+				dprintk(VIDC_DBG,
+					"Restored clock %s rate %lu\n",
+					cl->name, cl->rate_on_enable);
+		}
+	}
+}
+
+/* Writes into cmdq without raising an interrupt */
+static int __iface_cmdq_write_relaxed(struct venus_hfi_device *device,
+		void *pkt, bool *requires_interrupt)
+{
+	struct vidc_iface_q_info *q_info;
+	struct vidc_hal_cmd_pkt_hdr *cmd_packet;
+	int result = -E2BIG;
+
+	if (!device || !pkt) {
+		dprintk(VIDC_ERR, "Invalid Params\n");
+		return -EINVAL;
+	}
+
+	__strict_check(device);
+
+	if (!__core_in_valid_state(device)) {
+		dprintk(VIDC_DBG, "%s - fw not in init state\n", __func__);
+		result = -EINVAL;
+		goto err_q_null;
+	}
+
+	cmd_packet = (struct vidc_hal_cmd_pkt_hdr *)pkt;
+	device->last_packet_type = cmd_packet->packet_type;
+
+	q_info = &device->iface_queues[VIDC_IFACEQ_CMDQ_IDX];
+	if (!q_info) {
+		dprintk(VIDC_ERR, "cannot write to shared Q's\n");
+		goto err_q_null;
+	}
+
+	if (!q_info->q_array.align_virtual_addr) {
+		dprintk(VIDC_ERR, "cannot write to shared CMD Q's\n");
+		result = -ENODATA;
+		goto err_q_null;
+	}
+
+	__sim_modify_cmd_packet((u8 *)pkt, device);
+	if (__resume(device)) {
+		dprintk(VIDC_ERR, "%s: Power on failed\n", __func__);
+		goto err_q_write;
+	}
+
+	if (!__write_queue(q_info, (u8 *)pkt, requires_interrupt)) {
+		if (device->res->sw_power_collapsible) {
+			cancel_delayed_work(&venus_hfi_pm_work);
+			if (!queue_delayed_work(device->venus_pm_workq,
+				&venus_hfi_pm_work,
+				msecs_to_jiffies(
+				msm_vidc_pwr_collapse_delay))) {
+				dprintk(VIDC_DBG,
+				"PM work already scheduled\n");
+			}
+		}
+
+		result = 0;
+	} else {
+		dprintk(VIDC_ERR, "__iface_cmdq_write: queue full\n");
+	}
+
+err_q_write:
+err_q_null:
+	return result;
+}
+
+static int __iface_cmdq_write(struct venus_hfi_device *device, void *pkt)
+{
+	bool needs_interrupt = false;
+	int rc = __iface_cmdq_write_relaxed(device, pkt, &needs_interrupt);
+
+	if (!rc && needs_interrupt) {
+		/* Consumer of cmdq prefers that we raise an interrupt */
+		rc = 0;
+		__write_register(device, VIDC_CPU_IC_SOFTINT,
+				1 << VIDC_CPU_IC_SOFTINT_H2A_SHFT);
+	}
+
+	return rc;
+}
+
+static int __iface_msgq_read(struct venus_hfi_device *device, void *pkt)
+{
+	u32 tx_req_is_set = 0;
+	int rc = 0;
+	struct vidc_iface_q_info *q_info;
+
+	if (!pkt) {
+		dprintk(VIDC_ERR, "Invalid Params\n");
+		return -EINVAL;
+	}
+
+	__strict_check(device);
+
+	if (!__core_in_valid_state(device)) {
+		dprintk(VIDC_DBG, "%s - fw not in init state\n", __func__);
+		rc = -EINVAL;
+		goto read_error_null;
+	}
+
+	if (device->iface_queues[VIDC_IFACEQ_MSGQ_IDX].
+		q_array.align_virtual_addr == 0) {
+		dprintk(VIDC_ERR, "cannot read from shared MSG Q's\n");
+		rc = -ENODATA;
+		goto read_error_null;
+	}
+
+	q_info = &device->iface_queues[VIDC_IFACEQ_MSGQ_IDX];
+	if (!__read_queue(q_info, (u8 *)pkt, &tx_req_is_set)) {
+		__hal_sim_modify_msg_packet((u8 *)pkt, device);
+		if (tx_req_is_set)
+			__write_register(device, VIDC_CPU_IC_SOFTINT,
+				1 << VIDC_CPU_IC_SOFTINT_H2A_SHFT);
+		rc = 0;
+	} else
+		rc = -ENODATA;
+
+read_error_null:
+	return rc;
+}
+
+static int __iface_dbgq_read(struct venus_hfi_device *device, void *pkt)
+{
+	u32 tx_req_is_set = 0;
+	int rc = 0;
+	struct vidc_iface_q_info *q_info;
+
+	if (!pkt) {
+		dprintk(VIDC_ERR, "Invalid Params\n");
+		return -EINVAL;
+	}
+
+	__strict_check(device);
+
+	if (!__core_in_valid_state(device)) {
+		dprintk(VIDC_DBG, "%s - fw not in init state\n", __func__);
+		rc = -EINVAL;
+		goto dbg_error_null;
+	}
+
+	if (device->iface_queues[VIDC_IFACEQ_DBGQ_IDX].
+		q_array.align_virtual_addr == 0) {
+		dprintk(VIDC_ERR, "cannot read from shared DBG Q's\n");
+		rc = -ENODATA;
+		goto dbg_error_null;
+	}
+
+	q_info = &device->iface_queues[VIDC_IFACEQ_DBGQ_IDX];
+	if (!__read_queue(q_info, (u8 *)pkt, &tx_req_is_set)) {
+		if (tx_req_is_set)
+			__write_register(device, VIDC_CPU_IC_SOFTINT,
+				1 << VIDC_CPU_IC_SOFTINT_H2A_SHFT);
+		rc = 0;
+	} else
+		rc = -ENODATA;
+
+dbg_error_null:
+	return rc;
+}
+
+static void __set_queue_hdr_defaults(struct hfi_queue_header *q_hdr)
+{
+	q_hdr->qhdr_status = 0x1;
+	q_hdr->qhdr_type = VIDC_IFACEQ_DFLT_QHDR;
+	q_hdr->qhdr_q_size = VIDC_IFACEQ_QUEUE_SIZE / 4;
+	q_hdr->qhdr_pkt_size = 0;
+	q_hdr->qhdr_rx_wm = 0x1;
+	q_hdr->qhdr_tx_wm = 0x1;
+	q_hdr->qhdr_rx_req = 0x1;
+	q_hdr->qhdr_tx_req = 0x0;
+	q_hdr->qhdr_rx_irq_status = 0x0;
+	q_hdr->qhdr_tx_irq_status = 0x0;
+	q_hdr->qhdr_read_idx = 0x0;
+	q_hdr->qhdr_write_idx = 0x0;
+}
+
+static void __interface_queues_release(struct venus_hfi_device *device)
+{
+	int i;
+	struct hfi_mem_map_table *qdss;
+	struct hfi_mem_map *mem_map;
+	int num_entries = device->res->qdss_addr_set.count;
+	unsigned long mem_map_table_base_addr;
+	struct context_bank_info *cb;
+
+	if (device->qdss.mem_data) {
+		qdss = (struct hfi_mem_map_table *)
+			device->qdss.align_virtual_addr;
+		qdss->mem_map_num_entries = num_entries;
+		mem_map_table_base_addr =
+			device->qdss.align_device_addr +
+			sizeof(struct hfi_mem_map_table);
+		qdss->mem_map_table_base_addr =
+			(u32)mem_map_table_base_addr;
+		if ((unsigned long)qdss->mem_map_table_base_addr !=
+			mem_map_table_base_addr) {
+			dprintk(VIDC_ERR,
+				"Invalid mem_map_table_base_addr %#lx",
+				mem_map_table_base_addr);
+		}
+
+		mem_map = (struct hfi_mem_map *)(qdss + 1);
+		cb = msm_smem_get_context_bank(device->hal_client,
+					false, HAL_BUFFER_INTERNAL_CMD_QUEUE);
+
+		for (i = 0; cb && i < num_entries; i++) {
+			iommu_unmap(cb->mapping->domain,
+						mem_map[i].virtual_addr,
+						mem_map[i].size);
+		}
+
+		__smem_free(device, device->qdss.mem_data);
+	}
+
+	__smem_free(device, device->iface_q_table.mem_data);
+	__smem_free(device, device->sfr.mem_data);
+
+	for (i = 0; i < VIDC_IFACEQ_NUMQ; i++) {
+		device->iface_queues[i].q_hdr = NULL;
+		device->iface_queues[i].q_array.mem_data = NULL;
+		device->iface_queues[i].q_array.align_virtual_addr = NULL;
+		device->iface_queues[i].q_array.align_device_addr = 0;
+	}
+
+	device->iface_q_table.mem_data = NULL;
+	device->iface_q_table.align_virtual_addr = NULL;
+	device->iface_q_table.align_device_addr = 0;
+
+	device->qdss.mem_data = NULL;
+	device->qdss.align_virtual_addr = NULL;
+	device->qdss.align_device_addr = 0;
+
+	device->sfr.mem_data = NULL;
+	device->sfr.align_virtual_addr = NULL;
+	device->sfr.align_device_addr = 0;
+
+	device->mem_addr.mem_data = NULL;
+	device->mem_addr.align_virtual_addr = NULL;
+	device->mem_addr.align_device_addr = 0;
+
+	msm_smem_delete_client(device->hal_client);
+	device->hal_client = NULL;
+}
+
+static int __get_qdss_iommu_virtual_addr(struct venus_hfi_device *dev,
+		struct hfi_mem_map *mem_map, struct dma_iommu_mapping *mapping)
+{
+	int i;
+	int rc = 0;
+	dma_addr_t iova = QDSS_IOVA_START;
+	int num_entries = dev->res->qdss_addr_set.count;
+	struct addr_range *qdss_addr_tbl = dev->res->qdss_addr_set.addr_tbl;
+
+	if (!num_entries)
+		return -ENODATA;
+
+	for (i = 0; i < num_entries; i++) {
+		if (mapping) {
+			rc = iommu_map(mapping->domain, iova,
+					qdss_addr_tbl[i].start,
+					qdss_addr_tbl[i].size,
+					IOMMU_READ | IOMMU_WRITE);
+
+			if (rc) {
+				dprintk(VIDC_ERR,
+						"IOMMU QDSS mapping failed for addr %#x\n",
+						qdss_addr_tbl[i].start);
+				rc = -ENOMEM;
+				break;
+			}
+		} else {
+			iova =  qdss_addr_tbl[i].start;
+		}
+
+		mem_map[i].virtual_addr = (u32)iova;
+		mem_map[i].physical_addr = qdss_addr_tbl[i].start;
+		mem_map[i].size = qdss_addr_tbl[i].size;
+		mem_map[i].attr = 0x0;
+
+		iova += mem_map[i].size;
+	}
+
+	if (i < num_entries) {
+		dprintk(VIDC_ERR,
+			"QDSS mapping failed, Freeing other entries %d\n", i);
+
+		for (--i; mapping && i >= 0; i--) {
+			iommu_unmap(mapping->domain,
+				mem_map[i].virtual_addr,
+				mem_map[i].size);
+		}
+	}
+
+	return rc;
+}
+
+static void __setup_ucregion_memory_map(struct venus_hfi_device *device)
+{
+	__write_register(device, VIDC_UC_REGION_ADDR,
+			(u32)device->iface_q_table.align_device_addr);
+	__write_register(device, VIDC_UC_REGION_SIZE, SHARED_QSIZE);
+	__write_register(device, VIDC_CPU_CS_SCIACMDARG2,
+			(u32)device->iface_q_table.align_device_addr);
+	__write_register(device, VIDC_CPU_CS_SCIACMDARG1, 0x01);
+	if (device->sfr.align_device_addr)
+		__write_register(device, VIDC_SFR_ADDR,
+				(u32)device->sfr.align_device_addr);
+	if (device->qdss.align_device_addr)
+		__write_register(device, VIDC_MMAP_ADDR,
+				(u32)device->qdss.align_device_addr);
+}
+
+static int __interface_queues_init(struct venus_hfi_device *dev)
+{
+	struct hfi_queue_table_header *q_tbl_hdr;
+	struct hfi_queue_header *q_hdr;
+	u32 i;
+	int rc = 0;
+	struct hfi_mem_map_table *qdss;
+	struct hfi_mem_map *mem_map;
+	struct vidc_iface_q_info *iface_q;
+	struct hfi_sfr_struct *vsfr;
+	struct vidc_mem_addr *mem_addr;
+	int offset = 0;
+	int num_entries = dev->res->qdss_addr_set.count;
+	u32 value = 0;
+	phys_addr_t fw_bias = 0;
+	size_t q_size;
+	unsigned long mem_map_table_base_addr;
+	struct context_bank_info *cb;
+
+	q_size = SHARED_QSIZE - ALIGNED_SFR_SIZE - ALIGNED_QDSS_SIZE;
+	mem_addr = &dev->mem_addr;
+	if (!is_iommu_present(dev->res))
+		fw_bias = dev->hal_data->firmware_base;
+	rc = __smem_alloc(dev, mem_addr, q_size, 1, 0,
+			HAL_BUFFER_INTERNAL_CMD_QUEUE);
+	if (rc) {
+		dprintk(VIDC_ERR, "iface_q_table_alloc_fail\n");
+		goto fail_alloc_queue;
+	}
+
+	dev->iface_q_table.align_virtual_addr = mem_addr->align_virtual_addr;
+	dev->iface_q_table.align_device_addr = mem_addr->align_device_addr -
+					fw_bias;
+	dev->iface_q_table.mem_size = VIDC_IFACEQ_TABLE_SIZE;
+	dev->iface_q_table.mem_data = mem_addr->mem_data;
+	offset += dev->iface_q_table.mem_size;
+
+	for (i = 0; i < VIDC_IFACEQ_NUMQ; i++) {
+		iface_q = &dev->iface_queues[i];
+		iface_q->q_array.align_device_addr = mem_addr->align_device_addr
+			+ offset - fw_bias;
+		iface_q->q_array.align_virtual_addr =
+			mem_addr->align_virtual_addr + offset;
+		iface_q->q_array.mem_size = VIDC_IFACEQ_QUEUE_SIZE;
+		iface_q->q_array.mem_data = NULL;
+		offset += iface_q->q_array.mem_size;
+		iface_q->q_hdr = VIDC_IFACEQ_GET_QHDR_START_ADDR(
+				dev->iface_q_table.align_virtual_addr, i);
+		__set_queue_hdr_defaults(iface_q->q_hdr);
+	}
+
+	if ((msm_vidc_fw_debug_mode & HFI_DEBUG_MODE_QDSS) && num_entries) {
+		rc = __smem_alloc(dev, mem_addr,
+				ALIGNED_QDSS_SIZE, 1, 0,
+				HAL_BUFFER_INTERNAL_CMD_QUEUE);
+		if (rc) {
+			dprintk(VIDC_WARN,
+				"qdss_alloc_fail: QDSS messages logging will not work\n");
+			dev->qdss.align_device_addr = 0;
+		} else {
+			dev->qdss.align_device_addr =
+				mem_addr->align_device_addr - fw_bias;
+			dev->qdss.align_virtual_addr =
+				mem_addr->align_virtual_addr;
+			dev->qdss.mem_size = ALIGNED_QDSS_SIZE;
+			dev->qdss.mem_data = mem_addr->mem_data;
+		}
+	}
+
+	rc = __smem_alloc(dev, mem_addr,
+			ALIGNED_SFR_SIZE, 1, 0,
+			HAL_BUFFER_INTERNAL_CMD_QUEUE);
+	if (rc) {
+		dprintk(VIDC_WARN, "sfr_alloc_fail: SFR not will work\n");
+		dev->sfr.align_device_addr = 0;
+	} else {
+		dev->sfr.align_device_addr = mem_addr->align_device_addr -
+					fw_bias;
+		dev->sfr.align_virtual_addr = mem_addr->align_virtual_addr;
+		dev->sfr.mem_size = ALIGNED_SFR_SIZE;
+		dev->sfr.mem_data = mem_addr->mem_data;
+	}
+
+	q_tbl_hdr = (struct hfi_queue_table_header *)
+			dev->iface_q_table.align_virtual_addr;
+	q_tbl_hdr->qtbl_version = 0;
+	q_tbl_hdr->device_addr = (void *)dev;
+	strlcpy(q_tbl_hdr->name, "msm_v4l2_vidc", sizeof(q_tbl_hdr->name));
+	q_tbl_hdr->qtbl_size = VIDC_IFACEQ_TABLE_SIZE;
+	q_tbl_hdr->qtbl_qhdr0_offset = sizeof(struct hfi_queue_table_header);
+	q_tbl_hdr->qtbl_qhdr_size = sizeof(struct hfi_queue_header);
+	q_tbl_hdr->qtbl_num_q = VIDC_IFACEQ_NUMQ;
+	q_tbl_hdr->qtbl_num_active_q = VIDC_IFACEQ_NUMQ;
+
+	iface_q = &dev->iface_queues[VIDC_IFACEQ_CMDQ_IDX];
+	q_hdr = iface_q->q_hdr;
+	q_hdr->qhdr_start_addr = (u32)iface_q->q_array.align_device_addr;
+	q_hdr->qhdr_type |= HFI_Q_ID_HOST_TO_CTRL_CMD_Q;
+	if ((ion_phys_addr_t)q_hdr->qhdr_start_addr !=
+		iface_q->q_array.align_device_addr) {
+		dprintk(VIDC_ERR, "Invalid CMDQ device address (%pa)",
+			&iface_q->q_array.align_device_addr);
+	}
+
+	iface_q = &dev->iface_queues[VIDC_IFACEQ_MSGQ_IDX];
+	q_hdr = iface_q->q_hdr;
+	q_hdr->qhdr_start_addr = (u32)iface_q->q_array.align_device_addr;
+	q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_MSG_Q;
+	if ((ion_phys_addr_t)q_hdr->qhdr_start_addr !=
+		iface_q->q_array.align_device_addr) {
+		dprintk(VIDC_ERR, "Invalid MSGQ device address (%pa)",
+			&iface_q->q_array.align_device_addr);
+	}
+
+	iface_q = &dev->iface_queues[VIDC_IFACEQ_DBGQ_IDX];
+	q_hdr = iface_q->q_hdr;
+	q_hdr->qhdr_start_addr = (u32)iface_q->q_array.align_device_addr;
+	q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q;
+	/*
+	 * Set receive request to zero on debug queue as there is no
+	 * need of interrupt from video hardware for debug messages
+	 */
+	q_hdr->qhdr_rx_req = 0;
+	if ((ion_phys_addr_t)q_hdr->qhdr_start_addr !=
+		iface_q->q_array.align_device_addr) {
+		dprintk(VIDC_ERR, "Invalid DBGQ device address (%pa)",
+			&iface_q->q_array.align_device_addr);
+	}
+
+	value = (u32)dev->iface_q_table.align_device_addr;
+	if ((ion_phys_addr_t)value !=
+		dev->iface_q_table.align_device_addr) {
+		dprintk(VIDC_ERR,
+			"Invalid iface_q_table device address (%pa)",
+			&dev->iface_q_table.align_device_addr);
+	}
+
+	if (dev->qdss.mem_data) {
+		qdss = (struct hfi_mem_map_table *)dev->qdss.align_virtual_addr;
+		qdss->mem_map_num_entries = num_entries;
+		mem_map_table_base_addr = dev->qdss.align_device_addr +
+			sizeof(struct hfi_mem_map_table);
+		qdss->mem_map_table_base_addr =
+			(u32)mem_map_table_base_addr;
+		if ((ion_phys_addr_t)qdss->mem_map_table_base_addr !=
+				mem_map_table_base_addr) {
+			dprintk(VIDC_ERR,
+					"Invalid mem_map_table_base_addr (%#lx)",
+					mem_map_table_base_addr);
+		}
+
+		mem_map = (struct hfi_mem_map *)(qdss + 1);
+		cb = msm_smem_get_context_bank(dev->hal_client, false,
+				HAL_BUFFER_INTERNAL_CMD_QUEUE);
+
+		if (!cb) {
+			dprintk(VIDC_ERR,
+				"%s: failed to get context bank\n", __func__);
+			return -EINVAL;
+		}
+
+		rc = __get_qdss_iommu_virtual_addr(dev, mem_map, cb->mapping);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"IOMMU mapping failed, Freeing qdss memdata\n");
+			__smem_free(dev, dev->qdss.mem_data);
+			dev->qdss.mem_data = NULL;
+			dev->qdss.align_virtual_addr = NULL;
+			dev->qdss.align_device_addr = 0;
+		}
+
+		value = (u32)dev->qdss.align_device_addr;
+		if ((ion_phys_addr_t)value !=
+				dev->qdss.align_device_addr) {
+			dprintk(VIDC_ERR, "Invalid qdss device address (%pa)",
+					&dev->qdss.align_device_addr);
+		}
+	}
+
+	vsfr = (struct hfi_sfr_struct *) dev->sfr.align_virtual_addr;
+	vsfr->bufSize = ALIGNED_SFR_SIZE;
+	value = (u32)dev->sfr.align_device_addr;
+	if ((ion_phys_addr_t)value !=
+		dev->sfr.align_device_addr) {
+		dprintk(VIDC_ERR, "Invalid sfr device address (%pa)",
+			&dev->sfr.align_device_addr);
+	}
+
+	__setup_ucregion_memory_map(dev);
+	return 0;
+fail_alloc_queue:
+	return -ENOMEM;
+}
+
+static int __sys_set_debug(struct venus_hfi_device *device, u32 debug)
+{
+	u8 packet[VIDC_IFACEQ_VAR_SMALL_PKT_SIZE];
+	int rc = 0;
+	struct hfi_cmd_sys_set_property_packet *pkt =
+		(struct hfi_cmd_sys_set_property_packet *) &packet;
+
+	rc = call_hfi_pkt_op(device, sys_debug_config, pkt, debug);
+	if (rc) {
+		dprintk(VIDC_WARN,
+			"Debug mode setting to FW failed\n");
+		return -ENOTEMPTY;
+	}
+
+	if (__iface_cmdq_write(device, pkt))
+		return -ENOTEMPTY;
+	return 0;
+}
+
+static int __sys_set_coverage(struct venus_hfi_device *device, u32 mode)
+{
+	u8 packet[VIDC_IFACEQ_VAR_SMALL_PKT_SIZE];
+	int rc = 0;
+	struct hfi_cmd_sys_set_property_packet *pkt =
+		(struct hfi_cmd_sys_set_property_packet *) &packet;
+
+	rc = call_hfi_pkt_op(device, sys_coverage_config,
+			pkt, mode);
+	if (rc) {
+		dprintk(VIDC_WARN,
+			"Coverage mode setting to FW failed\n");
+		return -ENOTEMPTY;
+	}
+
+	if (__iface_cmdq_write(device, pkt)) {
+		dprintk(VIDC_WARN, "Failed to send coverage pkt to f/w\n");
+		return -ENOTEMPTY;
+	}
+
+	return 0;
+}
+
+static int __sys_set_idle_message(struct venus_hfi_device *device,
+	bool enable)
+{
+	u8 packet[VIDC_IFACEQ_VAR_SMALL_PKT_SIZE];
+	struct hfi_cmd_sys_set_property_packet *pkt =
+		(struct hfi_cmd_sys_set_property_packet *) &packet;
+	if (!enable) {
+		dprintk(VIDC_DBG, "sys_idle_indicator is not enabled\n");
+		return 0;
+	}
+
+	call_hfi_pkt_op(device, sys_idle_indicator, pkt, enable);
+	if (__iface_cmdq_write(device, pkt))
+		return -ENOTEMPTY;
+	return 0;
+}
+
+static int __sys_set_power_control(struct venus_hfi_device *device,
+	bool enable)
+{
+	struct regulator_info *rinfo;
+	bool supported = false;
+	u8 packet[VIDC_IFACEQ_VAR_SMALL_PKT_SIZE];
+	struct hfi_cmd_sys_set_property_packet *pkt =
+		(struct hfi_cmd_sys_set_property_packet *) &packet;
+
+	venus_hfi_for_each_regulator(device, rinfo) {
+		if (rinfo->has_hw_power_collapse) {
+			supported = true;
+			break;
+		}
+	}
+
+	if (!supported)
+		return 0;
+
+	call_hfi_pkt_op(device, sys_power_control, pkt, enable);
+	if (__iface_cmdq_write(device, pkt))
+		return -ENOTEMPTY;
+	return 0;
+}
+
+static int venus_hfi_core_init(void *device)
+{
+	struct hfi_cmd_sys_init_packet pkt;
+	struct hfi_cmd_sys_get_property_packet version_pkt;
+	int rc = 0;
+	struct list_head *ptr, *next;
+	struct hal_session *session = NULL;
+	struct venus_hfi_device *dev;
+
+	if (!device) {
+		dprintk(VIDC_ERR, "Invalid device\n");
+		return -ENODEV;
+	}
+
+	dev = device;
+	mutex_lock(&dev->lock);
+
+	rc = __load_fw(dev);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to load Venus FW\n");
+		goto err_load_fw;
+	}
+
+	__set_state(dev, VENUS_STATE_INIT);
+
+	list_for_each_safe(ptr, next, &dev->sess_head) {
+		/* This means that session list is not empty. Kick stale
+		 * sessions out of our valid instance list, but keep the
+		 * list_head inited so that list_del (in the future, called
+		 * by session_clean()) will be valid. When client doesn't close
+		 * them, then it is a genuine leak which driver can't fix. */
+		session = list_entry(ptr, struct hal_session, list);
+		list_del_init(&session->list);
+	}
+
+	INIT_LIST_HEAD(&dev->sess_head);
+
+	if (!dev->hal_client) {
+		dev->hal_client = msm_smem_new_client(
+				SMEM_ION, dev->res, MSM_VIDC_UNKNOWN);
+		if (dev->hal_client == NULL) {
+			dprintk(VIDC_ERR, "Failed to alloc ION_Client\n");
+			rc = -ENODEV;
+			goto err_core_init;
+		}
+
+		dprintk(VIDC_DBG, "Dev_Virt: %pa, Reg_Virt: %pK\n",
+			&dev->hal_data->firmware_base,
+			dev->hal_data->register_base);
+
+		rc = __interface_queues_init(dev);
+		if (rc) {
+			dprintk(VIDC_ERR, "failed to init queues\n");
+			rc = -ENOMEM;
+			goto err_core_init;
+		}
+	} else {
+		dprintk(VIDC_ERR, "hal_client exists\n");
+		rc = -EEXIST;
+		goto err_core_init;
+	}
+
+	rc = __boot_firmware(dev);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to start core\n");
+		rc = -ENODEV;
+		goto err_core_init;
+	}
+
+	rc =  call_hfi_pkt_op(dev, sys_init, &pkt, HFI_VIDEO_ARCH_OX);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to create sys init pkt\n");
+		goto err_core_init;
+	}
+
+	if (__iface_cmdq_write(dev, &pkt)) {
+		rc = -ENOTEMPTY;
+		goto err_core_init;
+	}
+
+	rc = call_hfi_pkt_op(dev, sys_image_version, &version_pkt);
+	if (rc || __iface_cmdq_write(dev, &version_pkt))
+		dprintk(VIDC_WARN, "Failed to send image version pkt to f/w\n");
+
+	if (dev->res->pm_qos_latency_us) {
+#ifdef CONFIG_SMP
+		dev->qos.type = PM_QOS_REQ_AFFINE_IRQ;
+		dev->qos.irq = dev->hal_data->irq;
+#endif
+		pm_qos_add_request(&dev->qos, PM_QOS_CPU_DMA_LATENCY,
+				dev->res->pm_qos_latency_us);
+	}
+
+	mutex_unlock(&dev->lock);
+	return rc;
+err_core_init:
+	__set_state(dev, VENUS_STATE_DEINIT);
+	__unload_fw(dev);
+err_load_fw:
+	mutex_unlock(&dev->lock);
+	return rc;
+}
+
+static int venus_hfi_core_release(void *dev)
+{
+	struct venus_hfi_device *device = dev;
+	int rc = 0;
+
+	if (!device) {
+		dprintk(VIDC_ERR, "invalid device\n");
+		return -ENODEV;
+	}
+
+	mutex_lock(&device->lock);
+
+	if (device->res->pm_qos_latency_us &&
+		pm_qos_request_active(&device->qos))
+		pm_qos_remove_request(&device->qos);
+	__set_state(device, VENUS_STATE_DEINIT);
+	__unload_fw(device);
+
+	mutex_unlock(&device->lock);
+
+	return rc;
+}
+
+static int __get_q_size(struct venus_hfi_device *dev, unsigned int q_index)
+{
+	struct hfi_queue_header *queue;
+	struct vidc_iface_q_info *q_info;
+	u32 write_ptr, read_ptr;
+
+	if (q_index >= VIDC_IFACEQ_NUMQ) {
+		dprintk(VIDC_ERR, "Invalid q index: %d\n", q_index);
+		return -ENOENT;
+	}
+
+	q_info = &dev->iface_queues[q_index];
+	if (!q_info) {
+		dprintk(VIDC_ERR, "cannot read shared Q's\n");
+		return -ENOENT;
+	}
+
+	queue = (struct hfi_queue_header *)q_info->q_hdr;
+	if (!queue) {
+		dprintk(VIDC_ERR, "queue not present\n");
+		return -ENOENT;
+	}
+
+	write_ptr = (u32)queue->qhdr_write_idx;
+	read_ptr = (u32)queue->qhdr_read_idx;
+	return read_ptr - write_ptr;
+}
+
+static void __core_clear_interrupt(struct venus_hfi_device *device)
+{
+	u32 intr_status = 0;
+
+	if (!device) {
+		dprintk(VIDC_ERR, "%s: NULL device\n", __func__);
+		return;
+	}
+
+	intr_status = __read_register(device, VIDC_WRAPPER_INTR_STATUS);
+
+	if (intr_status & VIDC_WRAPPER_INTR_STATUS_A2H_BMSK ||
+		intr_status & VIDC_WRAPPER_INTR_STATUS_A2HWD_BMSK ||
+		intr_status &
+			VIDC_CPU_CS_SCIACMDARG0_HFI_CTRL_INIT_IDLE_MSG_BMSK) {
+		device->intr_status |= intr_status;
+		device->reg_count++;
+		dprintk(VIDC_DBG,
+			"INTERRUPT for device: %pK: times: %d interrupt_status: %d\n",
+			device, device->reg_count, intr_status);
+	} else {
+		device->spur_count++;
+		dprintk(VIDC_INFO,
+			"SPURIOUS_INTR for device: %pK: times: %d interrupt_status: %d\n",
+			device, device->spur_count, intr_status);
+	}
+
+	__write_register(device, VIDC_CPU_CS_A2HSOFTINTCLR, 1);
+	__write_register(device, VIDC_WRAPPER_INTR_CLEAR, intr_status);
+	dprintk(VIDC_DBG, "Cleared WRAPPER/A2H interrupt\n");
+}
+
+static int venus_hfi_core_ping(void *device)
+{
+	struct hfi_cmd_sys_ping_packet pkt;
+	int rc = 0;
+	struct venus_hfi_device *dev;
+
+	if (!device) {
+		dprintk(VIDC_ERR, "invalid device\n");
+		return -ENODEV;
+	}
+
+	dev = device;
+	mutex_lock(&dev->lock);
+
+	rc = call_hfi_pkt_op(dev, sys_ping, &pkt);
+	if (rc) {
+		dprintk(VIDC_ERR, "core_ping: failed to create packet\n");
+		goto err_create_pkt;
+	}
+
+	if (__iface_cmdq_write(dev, &pkt))
+		rc = -ENOTEMPTY;
+
+err_create_pkt:
+	mutex_unlock(&dev->lock);
+	return rc;
+}
+
+static int venus_hfi_core_trigger_ssr(void *device,
+		enum hal_ssr_trigger_type type)
+{
+	struct hfi_cmd_sys_test_ssr_packet pkt;
+	int rc = 0;
+	struct venus_hfi_device *dev;
+
+	if (!device) {
+		dprintk(VIDC_ERR, "invalid device\n");
+		return -ENODEV;
+	}
+
+	dev = device;
+	mutex_lock(&dev->lock);
+
+	rc = call_hfi_pkt_op(dev, ssr_cmd, type, &pkt);
+	if (rc) {
+		dprintk(VIDC_ERR, "core_ping: failed to create packet\n");
+		goto err_create_pkt;
+	}
+
+	if (__iface_cmdq_write(dev, &pkt))
+		rc = -ENOTEMPTY;
+
+err_create_pkt:
+	mutex_unlock(&dev->lock);
+	return rc;
+}
+
+static int venus_hfi_session_set_property(void *sess,
+					enum hal_property ptype, void *pdata)
+{
+	u8 packet[VIDC_IFACEQ_VAR_LARGE_PKT_SIZE];
+	struct hfi_cmd_session_set_property_packet *pkt =
+		(struct hfi_cmd_session_set_property_packet *) &packet;
+	struct hal_session *session = sess;
+	struct venus_hfi_device *device;
+	int rc = 0;
+
+	if (!session || !session->device || !pdata) {
+		dprintk(VIDC_ERR, "Invalid Params\n");
+		return -EINVAL;
+	}
+
+	device = session->device;
+	mutex_lock(&device->lock);
+
+	dprintk(VIDC_INFO, "in set_prop,with prop id: %#x\n", ptype);
+
+	rc = call_hfi_pkt_op(device, session_set_property,
+			pkt, session, ptype, pdata);
+
+	if (rc == -ENOTSUPP) {
+		dprintk(VIDC_DBG,
+			"set property: unsupported prop id: %#x\n", ptype);
+		rc = 0;
+		goto err_set_prop;
+	} else if (rc) {
+		dprintk(VIDC_ERR, "set property: failed to create packet\n");
+		rc = -EINVAL;
+		goto err_set_prop;
+	}
+
+	if (__iface_cmdq_write(session->device, pkt)) {
+		rc = -ENOTEMPTY;
+		goto err_set_prop;
+	}
+
+err_set_prop:
+	mutex_unlock(&device->lock);
+	return rc;
+}
+
+static int venus_hfi_session_get_property(void *sess,
+					enum hal_property ptype)
+{
+	struct hfi_cmd_session_get_property_packet pkt = {0};
+	struct hal_session *session = sess;
+	int rc = 0;
+	struct venus_hfi_device *device;
+
+	if (!session || !session->device) {
+		dprintk(VIDC_ERR, "Invalid Params\n");
+		return -EINVAL;
+	}
+
+	device = session->device;
+	mutex_lock(&device->lock);
+
+	dprintk(VIDC_INFO, "%s: property id: %d\n", __func__, ptype);
+
+	rc = call_hfi_pkt_op(device, session_get_property,
+				&pkt, session, ptype);
+	if (rc) {
+		dprintk(VIDC_ERR, "get property profile: pkt failed\n");
+		goto err_create_pkt;
+	}
+
+	if (__iface_cmdq_write(session->device, &pkt)) {
+		rc = -ENOTEMPTY;
+		dprintk(VIDC_ERR, "%s cmdq_write error\n", __func__);
+	}
+
+err_create_pkt:
+	mutex_unlock(&device->lock);
+	return rc;
+}
+
+static void __set_default_sys_properties(struct venus_hfi_device *device)
+{
+	if (__sys_set_debug(device, msm_vidc_fw_debug))
+		dprintk(VIDC_WARN, "Setting fw_debug msg ON failed\n");
+	if (__sys_set_idle_message(device,
+		device->res->sys_idle_indicator || msm_vidc_sys_idle_indicator))
+		dprintk(VIDC_WARN, "Setting idle response ON failed\n");
+	if (__sys_set_power_control(device, msm_vidc_fw_low_power_mode))
+		dprintk(VIDC_WARN, "Setting h/w power collapse ON failed\n");
+}
+
+static void __session_clean(struct hal_session *session)
+{
+	dprintk(VIDC_DBG, "deleted the session: %pK\n", session);
+	list_del(&session->list);
+	/* Poison the session handle with zeros */
+	*session = (struct hal_session){ {0} };
+	kfree(session);
+}
+
+static int venus_hfi_session_clean(void *session)
+{
+	struct hal_session *sess_close;
+	struct venus_hfi_device *device;
+	if (!session) {
+		dprintk(VIDC_ERR, "Invalid Params %s\n", __func__);
+		return -EINVAL;
+	}
+
+	sess_close = session;
+	device = sess_close->device;
+
+	if (!device) {
+		dprintk(VIDC_ERR, "Invalid device handle %s\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&device->lock);
+
+	__session_clean(sess_close);
+
+	mutex_unlock(&device->lock);
+	return 0;
+}
+
+static int venus_hfi_session_init(void *device, void *session_id,
+		enum hal_domain session_type, enum hal_video_codec codec_type,
+		void **new_session)
+{
+	struct hfi_cmd_sys_session_init_packet pkt;
+	struct venus_hfi_device *dev;
+	struct hal_session *s;
+
+	if (!device || !new_session) {
+		dprintk(VIDC_ERR, "%s - invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	dev = device;
+	mutex_lock(&dev->lock);
+
+	s = kzalloc(sizeof(struct hal_session), GFP_KERNEL);
+	if (!s) {
+		dprintk(VIDC_ERR, "new session fail: Out of memory\n");
+		goto err_session_init_fail;
+	}
+
+	s->session_id = session_id;
+	s->is_decoder = (session_type == HAL_VIDEO_DOMAIN_DECODER);
+	s->device = dev;
+	s->codec = codec_type;
+	s->domain = session_type;
+	dprintk(VIDC_DBG,
+		"%s: inst %pK, session %pK, codec 0x%x, domain 0x%x\n",
+		__func__, session_id, s, s->codec, s->domain);
+
+	list_add_tail(&s->list, &dev->sess_head);
+
+	__set_default_sys_properties(device);
+
+	if (call_hfi_pkt_op(dev, session_init, &pkt,
+			s, session_type, codec_type)) {
+		dprintk(VIDC_ERR, "session_init: failed to create packet\n");
+		goto err_session_init_fail;
+	}
+
+	*new_session = s;
+	if (__iface_cmdq_write(dev, &pkt))
+		goto err_session_init_fail;
+
+	mutex_unlock(&dev->lock);
+	return 0;
+
+err_session_init_fail:
+	if (s)
+		__session_clean(s);
+	*new_session = NULL;
+	mutex_unlock(&dev->lock);
+	return -EINVAL;
+}
+
+static int __send_session_cmd(struct hal_session *session, int pkt_type)
+{
+	struct vidc_hal_session_cmd_pkt pkt;
+	int rc = 0;
+	struct venus_hfi_device *device = session->device;
+
+	rc = call_hfi_pkt_op(device, session_cmd,
+			&pkt, pkt_type, session);
+	if (rc == -EPERM)
+		return 0;
+
+	if (rc) {
+		dprintk(VIDC_ERR, "send session cmd: create pkt failed\n");
+		goto err_create_pkt;
+	}
+
+	if (__iface_cmdq_write(session->device, &pkt))
+		rc = -ENOTEMPTY;
+
+err_create_pkt:
+	return rc;
+}
+
+static int venus_hfi_session_end(void *session)
+{
+	struct hal_session *sess;
+	struct venus_hfi_device *device;
+	int rc = 0;
+
+	if (!session) {
+		dprintk(VIDC_ERR, "Invalid Params %s\n", __func__);
+		return -EINVAL;
+	}
+
+	sess = session;
+	device = sess->device;
+
+	mutex_lock(&device->lock);
+
+	if (msm_vidc_fw_coverage) {
+		if (__sys_set_coverage(sess->device, msm_vidc_fw_coverage))
+			dprintk(VIDC_WARN, "Fw_coverage msg ON failed\n");
+	}
+
+	rc = __send_session_cmd(session, HFI_CMD_SYS_SESSION_END);
+
+	mutex_unlock(&device->lock);
+
+	return rc;
+}
+
+static int venus_hfi_session_abort(void *sess)
+{
+	struct hal_session *session;
+	struct venus_hfi_device *device;
+	int rc = 0;
+	session = sess;
+
+	if (!session || !session->device) {
+		dprintk(VIDC_ERR, "Invalid Params %s\n", __func__);
+		return -EINVAL;
+	}
+
+	device = session->device;
+
+	mutex_lock(&device->lock);
+
+	__flush_debug_queue(device, NULL);
+	rc = __send_session_cmd(session, HFI_CMD_SYS_SESSION_ABORT);
+
+	mutex_unlock(&device->lock);
+
+	return rc;
+}
+
+static int venus_hfi_session_set_buffers(void *sess,
+				struct vidc_buffer_addr_info *buffer_info)
+{
+	struct hfi_cmd_session_set_buffers_packet *pkt;
+	u8 packet[VIDC_IFACEQ_VAR_LARGE_PKT_SIZE];
+	int rc = 0;
+	struct hal_session *session = sess;
+	struct venus_hfi_device *device;
+
+	if (!session || !session->device || !buffer_info) {
+		dprintk(VIDC_ERR, "Invalid Params\n");
+		return -EINVAL;
+	}
+
+	device = session->device;
+	mutex_lock(&device->lock);
+
+	if (buffer_info->buffer_type == HAL_BUFFER_INPUT) {
+		/*
+		 * Hardware doesn't care about input buffers being
+		 * published beforehand
+		 */
+		rc = 0;
+		goto err_create_pkt;
+	}
+
+	pkt = (struct hfi_cmd_session_set_buffers_packet *)packet;
+
+	rc = call_hfi_pkt_op(device, session_set_buffers,
+			pkt, session, buffer_info);
+	if (rc) {
+		dprintk(VIDC_ERR, "set buffers: failed to create packet\n");
+		goto err_create_pkt;
+	}
+
+	dprintk(VIDC_INFO, "set buffers: %#x\n", buffer_info->buffer_type);
+	if (__iface_cmdq_write(session->device, pkt))
+		rc = -ENOTEMPTY;
+
+err_create_pkt:
+	mutex_unlock(&device->lock);
+	return rc;
+}
+
+static int venus_hfi_session_release_buffers(void *sess,
+				struct vidc_buffer_addr_info *buffer_info)
+{
+	struct hfi_cmd_session_release_buffer_packet *pkt;
+	u8 packet[VIDC_IFACEQ_VAR_LARGE_PKT_SIZE];
+	int rc = 0;
+	struct hal_session *session = sess;
+	struct venus_hfi_device *device;
+
+	if (!session || !session->device || !buffer_info) {
+		dprintk(VIDC_ERR, "Invalid Params\n");
+		return -EINVAL;
+	}
+
+	device = session->device;
+	mutex_lock(&device->lock);
+
+	if (buffer_info->buffer_type == HAL_BUFFER_INPUT) {
+		rc = 0;
+		goto err_create_pkt;
+	}
+
+	pkt = (struct hfi_cmd_session_release_buffer_packet *) packet;
+
+	rc = call_hfi_pkt_op(device, session_release_buffers,
+			pkt, session, buffer_info);
+	if (rc) {
+		dprintk(VIDC_ERR, "release buffers: failed to create packet\n");
+		goto err_create_pkt;
+	}
+
+	dprintk(VIDC_INFO, "Release buffers: %#x\n", buffer_info->buffer_type);
+	if (__iface_cmdq_write(session->device, pkt))
+		rc = -ENOTEMPTY;
+
+err_create_pkt:
+	mutex_unlock(&device->lock);
+	return rc;
+}
+
+static int venus_hfi_session_load_res(void *session)
+{
+	struct hal_session *sess;
+	struct venus_hfi_device *device;
+	int rc = 0;
+
+	if (!session) {
+		dprintk(VIDC_ERR, "Invalid Params %s\n", __func__);
+		return -EINVAL;
+	}
+
+	sess = session;
+	device = sess->device;
+
+	mutex_lock(&device->lock);
+	rc = __send_session_cmd(sess, HFI_CMD_SESSION_LOAD_RESOURCES);
+	mutex_unlock(&device->lock);
+
+	return rc;
+}
+
+static int venus_hfi_session_release_res(void *session)
+{
+	struct hal_session *sess;
+	struct venus_hfi_device *device;
+	int rc = 0;
+
+	if (!session) {
+		dprintk(VIDC_ERR, "Invalid Params %s\n", __func__);
+		return -EINVAL;
+	}
+
+	sess = session;
+	device = sess->device;
+
+	mutex_lock(&device->lock);
+	rc = __send_session_cmd(sess, HFI_CMD_SESSION_RELEASE_RESOURCES);
+	mutex_unlock(&device->lock);
+
+	return rc;
+}
+
+static int venus_hfi_session_start(void *session)
+{
+	struct hal_session *sess;
+	struct venus_hfi_device *device;
+	int rc = 0;
+
+	if (!session) {
+		dprintk(VIDC_ERR, "Invalid Params %s\n", __func__);
+		return -EINVAL;
+	}
+
+	sess = session;
+	device = sess->device;
+
+	mutex_lock(&device->lock);
+	rc = __send_session_cmd(sess, HFI_CMD_SESSION_START);
+	mutex_unlock(&device->lock);
+
+	return rc;
+}
+
+static int venus_hfi_session_continue(void *session)
+{
+	struct hal_session *sess;
+	struct venus_hfi_device *device;
+	int rc = 0;
+
+	if (!session) {
+		dprintk(VIDC_ERR, "Invalid Params %s\n", __func__);
+		return -EINVAL;
+	}
+
+	sess = session;
+	device = sess->device;
+
+	mutex_lock(&device->lock);
+	rc = __send_session_cmd(sess, HFI_CMD_SESSION_CONTINUE);
+	mutex_unlock(&device->lock);
+
+	return rc;
+}
+
+static int venus_hfi_session_stop(void *session)
+{
+	struct hal_session *sess;
+	struct venus_hfi_device *device;
+	int rc = 0;
+
+	if (!session) {
+		dprintk(VIDC_ERR, "Invalid Params %s\n", __func__);
+		return -EINVAL;
+	}
+
+	sess = session;
+	device = sess->device;
+
+	mutex_lock(&device->lock);
+	rc = __send_session_cmd(sess, HFI_CMD_SESSION_STOP);
+	mutex_unlock(&device->lock);
+
+	return rc;
+}
+
+static int __session_etb(struct hal_session *session,
+		struct vidc_frame_data *input_frame, bool relaxed)
+{
+	int rc = 0;
+	struct venus_hfi_device *device = session->device;
+
+	if (session->is_decoder) {
+		struct hfi_cmd_session_empty_buffer_compressed_packet pkt;
+
+		rc = call_hfi_pkt_op(device, session_etb_decoder,
+				&pkt, session, input_frame);
+		if (rc) {
+			dprintk(VIDC_ERR,
+					"Session etb decoder: failed to create pkt\n");
+			goto err_create_pkt;
+		}
+
+		if (!relaxed)
+			rc = __iface_cmdq_write(session->device, &pkt);
+		else
+			rc = __iface_cmdq_write_relaxed(session->device,
+					&pkt, NULL);
+		if (rc)
+			goto err_create_pkt;
+	} else {
+		struct hfi_cmd_session_empty_buffer_uncompressed_plane0_packet
+			pkt;
+
+		rc = call_hfi_pkt_op(device, session_etb_encoder,
+					 &pkt, session, input_frame);
+		if (rc) {
+			dprintk(VIDC_ERR,
+					"Session etb encoder: failed to create pkt\n");
+			goto err_create_pkt;
+		}
+
+		if (!relaxed)
+			rc = __iface_cmdq_write(session->device, &pkt);
+		else
+			rc = __iface_cmdq_write_relaxed(session->device,
+					&pkt, NULL);
+		if (rc)
+			goto err_create_pkt;
+	}
+
+err_create_pkt:
+	return rc;
+}
+
+static int venus_hfi_session_etb(void *sess,
+				struct vidc_frame_data *input_frame)
+{
+	int rc = 0;
+	struct hal_session *session = sess;
+	struct venus_hfi_device *device;
+
+	if (!session || !session->device || !input_frame) {
+		dprintk(VIDC_ERR, "Invalid Params\n");
+		return -EINVAL;
+	}
+
+	device = session->device;
+	mutex_lock(&device->lock);
+	rc = __session_etb(session, input_frame, false);
+	mutex_unlock(&device->lock);
+	return rc;
+}
+
+static int __session_ftb(struct hal_session *session,
+		struct vidc_frame_data *output_frame, bool relaxed)
+{
+	int rc = 0;
+	struct venus_hfi_device *device = session->device;
+	struct hfi_cmd_session_fill_buffer_packet pkt;
+
+	rc = call_hfi_pkt_op(device, session_ftb,
+			&pkt, session, output_frame);
+	if (rc) {
+		dprintk(VIDC_ERR, "Session ftb: failed to create pkt\n");
+		goto err_create_pkt;
+	}
+
+	if (!relaxed)
+		rc = __iface_cmdq_write(session->device, &pkt);
+	else
+		rc = __iface_cmdq_write_relaxed(session->device,
+				&pkt, NULL);
+
+err_create_pkt:
+	return rc;
+}
+
+static int venus_hfi_session_ftb(void *sess,
+				struct vidc_frame_data *output_frame)
+{
+	int rc = 0;
+	struct hal_session *session = sess;
+	struct venus_hfi_device *device;
+
+	if (!session || !session->device || !output_frame) {
+		dprintk(VIDC_ERR, "Invalid Params\n");
+		return -EINVAL;
+	}
+
+	device = session->device;
+	mutex_lock(&device->lock);
+	rc = __session_ftb(session, output_frame, false);
+	mutex_unlock(&device->lock);
+	return rc;
+}
+
+static int venus_hfi_session_process_batch(void *sess,
+		int num_etbs, struct vidc_frame_data etbs[],
+		int num_ftbs, struct vidc_frame_data ftbs[])
+{
+	int rc = 0, c = 0;
+	struct hal_session *session = sess;
+	struct venus_hfi_device *device;
+	struct hfi_cmd_session_sync_process_packet pkt;
+
+	if (!session || !session->device) {
+		dprintk(VIDC_ERR, "%s: Invalid Params\n", __func__);
+		return -EINVAL;
+	}
+
+	device = session->device;
+
+	mutex_lock(&device->lock);
+	for (c = 0; c < num_ftbs; ++c) {
+		rc = __session_ftb(session, &ftbs[c], true);
+		if (rc) {
+			dprintk(VIDC_ERR, "Failed to queue batched ftb: %d\n",
+					rc);
+			goto err_etbs_and_ftbs;
+		}
+	}
+
+	for (c = 0; c < num_etbs; ++c) {
+		rc = __session_etb(session, &etbs[c], true);
+		if (rc) {
+			dprintk(VIDC_ERR, "Failed to queue batched etb: %d\n",
+					rc);
+			goto err_etbs_and_ftbs;
+		}
+	}
+
+	rc = call_hfi_pkt_op(device, session_sync_process, &pkt, session);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to create sync packet\n");
+		goto err_etbs_and_ftbs;
+	}
+
+	if (__iface_cmdq_write(session->device, &pkt))
+		rc = -ENOTEMPTY;
+
+err_etbs_and_ftbs:
+	mutex_unlock(&device->lock);
+	return rc;
+}
+
+static int venus_hfi_session_parse_seq_hdr(void *sess,
+					struct vidc_seq_hdr *seq_hdr)
+{
+	struct hfi_cmd_session_parse_sequence_header_packet *pkt;
+	int rc = 0;
+	u8 packet[VIDC_IFACEQ_VAR_SMALL_PKT_SIZE];
+	struct hal_session *session = sess;
+	struct venus_hfi_device *device;
+
+	if (!session || !session->device || !seq_hdr) {
+		dprintk(VIDC_ERR, "Invalid Params\n");
+		return -EINVAL;
+	}
+
+	device = session->device;
+	mutex_lock(&device->lock);
+
+	pkt = (struct hfi_cmd_session_parse_sequence_header_packet *)packet;
+	rc = call_hfi_pkt_op(device, session_parse_seq_header,
+			pkt, session, seq_hdr);
+	if (rc) {
+		dprintk(VIDC_ERR,
+		"Session parse seq hdr: failed to create pkt\n");
+		goto err_create_pkt;
+	}
+
+	if (__iface_cmdq_write(session->device, pkt))
+		rc = -ENOTEMPTY;
+err_create_pkt:
+	mutex_unlock(&device->lock);
+	return rc;
+}
+
+static int venus_hfi_session_get_seq_hdr(void *sess,
+				struct vidc_seq_hdr *seq_hdr)
+{
+	struct hfi_cmd_session_get_sequence_header_packet *pkt;
+	int rc = 0;
+	u8 packet[VIDC_IFACEQ_VAR_SMALL_PKT_SIZE];
+	struct hal_session *session = sess;
+	struct venus_hfi_device *device;
+
+	if (!session || !session->device || !seq_hdr) {
+		dprintk(VIDC_ERR, "Invalid Params\n");
+		return -EINVAL;
+	}
+
+	device = session->device;
+	mutex_lock(&device->lock);
+
+	pkt = (struct hfi_cmd_session_get_sequence_header_packet *)packet;
+	rc = call_hfi_pkt_op(device, session_get_seq_hdr,
+			pkt, session, seq_hdr);
+	if (rc) {
+		dprintk(VIDC_ERR,
+				"Session get seq hdr: failed to create pkt\n");
+		goto err_create_pkt;
+	}
+
+	if (__iface_cmdq_write(session->device, pkt))
+		rc = -ENOTEMPTY;
+err_create_pkt:
+	mutex_unlock(&device->lock);
+	return rc;
+}
+
+static int venus_hfi_session_get_buf_req(void *sess)
+{
+	struct hfi_cmd_session_get_property_packet pkt;
+	int rc = 0;
+	struct hal_session *session = sess;
+	struct venus_hfi_device *device;
+
+	if (!session || !session->device) {
+		dprintk(VIDC_ERR, "invalid session");
+		return -ENODEV;
+	}
+
+	device = session->device;
+	mutex_lock(&device->lock);
+
+	rc = call_hfi_pkt_op(device, session_get_buf_req,
+			&pkt, session);
+	if (rc) {
+		dprintk(VIDC_ERR,
+				"Session get buf req: failed to create pkt\n");
+		goto err_create_pkt;
+	}
+
+	if (__iface_cmdq_write(session->device, &pkt))
+		rc = -ENOTEMPTY;
+err_create_pkt:
+	mutex_unlock(&device->lock);
+	return rc;
+}
+
+static int venus_hfi_session_flush(void *sess, enum hal_flush flush_mode)
+{
+	struct hfi_cmd_session_flush_packet pkt;
+	int rc = 0;
+	struct hal_session *session = sess;
+	struct venus_hfi_device *device;
+
+	if (!session || !session->device) {
+		dprintk(VIDC_ERR, "invalid session");
+		return -ENODEV;
+	}
+
+	device = session->device;
+	mutex_lock(&device->lock);
+
+	rc = call_hfi_pkt_op(device, session_flush,
+			&pkt, session, flush_mode);
+	if (rc) {
+		dprintk(VIDC_ERR, "Session flush: failed to create pkt\n");
+		goto err_create_pkt;
+	}
+
+	if (__iface_cmdq_write(session->device, &pkt))
+		rc = -ENOTEMPTY;
+err_create_pkt:
+	mutex_unlock(&device->lock);
+	return rc;
+}
+
+static int __check_core_registered(struct hal_device_data core,
+		phys_addr_t fw_addr, u8 *reg_addr, u32 reg_size,
+		phys_addr_t irq)
+{
+	struct venus_hfi_device *device;
+	struct list_head *curr, *next;
+
+	if (core.dev_count) {
+		list_for_each_safe(curr, next, &core.dev_head) {
+			device = list_entry(curr,
+				struct venus_hfi_device, list);
+			if (device && device->hal_data->irq == irq &&
+				(CONTAINS(device->hal_data->
+						firmware_base,
+						FIRMWARE_SIZE, fw_addr) ||
+				CONTAINS(fw_addr, FIRMWARE_SIZE,
+						device->hal_data->
+						firmware_base) ||
+				CONTAINS(device->hal_data->
+						register_base,
+						reg_size, reg_addr) ||
+				CONTAINS(reg_addr, reg_size,
+						device->hal_data->
+						register_base) ||
+				OVERLAPS(device->hal_data->
+						register_base,
+						reg_size, reg_addr, reg_size) ||
+				OVERLAPS(reg_addr, reg_size,
+						device->hal_data->
+						register_base, reg_size) ||
+				OVERLAPS(device->hal_data->
+						firmware_base,
+						FIRMWARE_SIZE, fw_addr,
+						FIRMWARE_SIZE) ||
+				OVERLAPS(fw_addr, FIRMWARE_SIZE,
+						device->hal_data->
+						firmware_base,
+						FIRMWARE_SIZE))) {
+				return 0;
+			} else {
+				dprintk(VIDC_INFO, "Device not registered\n");
+				return -EINVAL;
+			}
+		}
+	} else {
+		dprintk(VIDC_INFO, "no device Registered\n");
+	}
+
+	return -EINVAL;
+}
+
+static void __process_fatal_error(
+		struct venus_hfi_device *device)
+{
+	struct msm_vidc_cb_cmd_done cmd_done = {0};
+
+	cmd_done.device_id = device->device_id;
+	device->callback(HAL_SYS_ERROR, &cmd_done);
+}
+
+static int __prepare_pc(struct venus_hfi_device *device)
+{
+	int rc = 0;
+	struct hfi_cmd_sys_pc_prep_packet pkt;
+
+	rc = call_hfi_pkt_op(device, sys_pc_prep, &pkt);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to create sys pc prep pkt\n");
+		goto err_pc_prep;
+	}
+
+	if (__iface_cmdq_write(device, &pkt))
+		rc = -ENOTEMPTY;
+	if (rc)
+		dprintk(VIDC_ERR, "Failed to prepare venus for power off");
+err_pc_prep:
+	return rc;
+}
+
+static void venus_hfi_pm_handler(struct work_struct *work)
+{
+	int rc = 0;
+	u32 wfi_status = 0, idle_status = 0, pc_ready = 0;
+	int count = 0;
+	const int max_tries = 5;
+	struct venus_hfi_device *device = list_first_entry(
+			&hal_ctxt.dev_head, struct venus_hfi_device, list);
+	if (!device) {
+		dprintk(VIDC_ERR, "%s: NULL device\n", __func__);
+		return;
+	}
+
+	/*
+	 * It is ok to check this variable outside the lock since
+	 * it is being updated in this context only
+	 */
+	if (device->skip_pc_count >= VIDC_MAX_PC_SKIP_COUNT) {
+		dprintk(VIDC_WARN, "Failed to PC for %d times\n",
+				device->skip_pc_count);
+		device->skip_pc_count = 0;
+		__process_fatal_error(device);
+		return;
+	}
+	mutex_lock(&device->lock);
+	if (!device->power_enabled) {
+		dprintk(VIDC_DBG, "%s: Power already disabled\n",
+				__func__);
+		goto exit;
+	}
+
+	rc = __core_in_valid_state(device);
+	if (!rc) {
+		dprintk(VIDC_WARN,
+				"Core is in bad state, Skipping power collapse\n");
+		goto skip_power_off;
+	}
+	pc_ready = __read_register(device, VIDC_CPU_CS_SCIACMDARG0) &
+		VIDC_CPU_CS_SCIACMDARG0_HFI_CTRL_PC_READY;
+	if (!pc_ready) {
+		wfi_status = __read_register(device,
+				VIDC_WRAPPER_CPU_STATUS);
+		idle_status = __read_register(device,
+				VIDC_CPU_CS_SCIACMDARG0);
+		if (!(wfi_status & BIT(0)) ||
+				!(idle_status & BIT(30))) {
+			dprintk(VIDC_WARN, "Skipping PC\n");
+			goto skip_power_off;
+		}
+
+		rc = __prepare_pc(device);
+		if (rc) {
+			dprintk(VIDC_WARN, "Failed PC %d\n", rc);
+			goto skip_power_off;
+		}
+
+		while (count < max_tries) {
+			wfi_status = __read_register(device,
+					VIDC_WRAPPER_CPU_STATUS);
+			pc_ready = __read_register(device,
+					VIDC_CPU_CS_SCIACMDARG0);
+			if ((wfi_status & BIT(0)) && (pc_ready &
+				VIDC_CPU_CS_SCIACMDARG0_HFI_CTRL_PC_READY))
+				break;
+			usleep_range(1000, 1500);
+			count++;
+		}
+
+		if (count == max_tries) {
+			dprintk(VIDC_ERR,
+					"Skip PC. Core is not in right state (%#x, %#x)\n",
+					wfi_status, pc_ready);
+			goto skip_power_off;
+		}
+	}
+
+	rc = __suspend(device);
+	if (rc)
+		dprintk(VIDC_ERR, "Failed venus power off\n");
+
+	/* Cancel pending delayed works if any */
+	cancel_delayed_work(&venus_hfi_pm_work);
+	device->skip_pc_count = 0;
+
+	mutex_unlock(&device->lock);
+	return;
+
+skip_power_off:
+	device->skip_pc_count++;
+	dprintk(VIDC_WARN, "Skip PC(%d, %#x, %#x, %#x)\n",
+		device->skip_pc_count, wfi_status, idle_status, pc_ready);
+	queue_delayed_work(device->venus_pm_workq,
+			&venus_hfi_pm_work,
+			msecs_to_jiffies(msm_vidc_pwr_collapse_delay));
+exit:
+	mutex_unlock(&device->lock);
+	return;
+}
+
+static void __process_sys_error(struct venus_hfi_device *device)
+{
+	struct hfi_sfr_struct *vsfr = NULL;
+
+	__set_state(device, VENUS_STATE_DEINIT);
+
+	/* Once SYS_ERROR received from HW, it is safe to halt the AXI.
+	 * With SYS_ERROR, Venus FW may have crashed and HW might be
+	 * active and causing unnecessary transactions. Hence it is
+	 * safe to stop all AXI transactions from venus sub-system. */
+	if (__halt_axi(device))
+		dprintk(VIDC_WARN, "Failed to halt AXI after SYS_ERROR\n");
+
+	vsfr = (struct hfi_sfr_struct *)device->sfr.align_virtual_addr;
+	if (vsfr) {
+		void *p = memchr(vsfr->rg_data, '\0', vsfr->bufSize);
+		/* SFR isn't guaranteed to be NULL terminated
+		   since SYS_ERROR indicates that Venus is in the
+		   process of crashing.*/
+		if (p == NULL)
+			vsfr->rg_data[vsfr->bufSize - 1] = '\0';
+
+		dprintk(VIDC_ERR, "SFR Message from FW: %s\n",
+				vsfr->rg_data);
+	}
+}
+
+static void __flush_debug_queue(struct venus_hfi_device *device, u8 *packet)
+{
+	bool local_packet = false;
+	enum vidc_msg_prio log_level = VIDC_FW;
+
+	if (!device) {
+		dprintk(VIDC_ERR, "%s: Invalid params\n", __func__);
+		return;
+	}
+
+	if (!packet) {
+		packet = kzalloc(VIDC_IFACEQ_VAR_HUGE_PKT_SIZE, GFP_TEMPORARY);
+		if (!packet) {
+			dprintk(VIDC_ERR, "In %s() Fail to allocate mem\n",
+				__func__);
+			return;
+		}
+
+		local_packet = true;
+
+		/*
+		 * Local packek is used when something FATAL occurred.
+		 * It is good to print these logs by default.
+		 */
+
+		log_level = VIDC_ERR;
+	}
+
+	while (!__iface_dbgq_read(device, packet)) {
+		struct hfi_msg_sys_coverage_packet *pkt =
+			(struct hfi_msg_sys_coverage_packet *) packet;
+		if (pkt->packet_type == HFI_MSG_SYS_COV) {
+			int stm_size = 0;
+			stm_size = stm_log_inv_ts(0, 0,
+				pkt->rg_msg_data, pkt->msg_size);
+			if (stm_size == 0)
+				dprintk(VIDC_ERR,
+					"In %s, stm_log returned size of 0\n",
+					__func__);
+		} else {
+			struct hfi_msg_sys_debug_packet *pkt =
+				(struct hfi_msg_sys_debug_packet *) packet;
+			dprintk(log_level, "%s", pkt->rg_msg_data);
+		}
+	}
+
+	if (local_packet)
+		kfree(packet);
+}
+
+static struct hal_session *__get_session(struct venus_hfi_device *device,
+		u32 session_id)
+{
+	struct hal_session *temp = NULL;
+
+	list_for_each_entry(temp, &device->sess_head, list) {
+		if (session_id == hash32_ptr(temp))
+			return temp;
+	}
+
+	return NULL;
+}
+
+static int __response_handler(struct venus_hfi_device *device)
+{
+	struct msm_vidc_cb_info *packets;
+	int packet_count = 0;
+	u8 *raw_packet = NULL;
+	bool requeue_pm_work = true;
+
+	if (!device || device->state != VENUS_STATE_INIT)
+		return 0;
+
+	packets = device->response_pkt;
+
+	raw_packet = device->raw_packet;
+
+	if (!raw_packet || !packets) {
+		dprintk(VIDC_ERR,
+			"%s: Invalid args : Res packet = %p, Raw packet = %p\n",
+			__func__, packets, raw_packet);
+		return 0;
+	}
+
+	if (device->intr_status & VIDC_WRAPPER_INTR_CLEAR_A2HWD_BMSK) {
+		struct hfi_sfr_struct *vsfr = (struct hfi_sfr_struct *)
+			device->sfr.align_virtual_addr;
+		struct msm_vidc_cb_info info = {
+			.response_type = HAL_SYS_WATCHDOG_TIMEOUT,
+			.response.cmd = {
+				.device_id = device->device_id,
+			}
+		};
+
+		if (vsfr)
+			dprintk(VIDC_ERR, "SFR Message from FW: %s\n",
+					vsfr->rg_data);
+
+		dprintk(VIDC_ERR, "Received watchdog timeout\n");
+		packets[packet_count++] = info;
+		goto exit;
+	}
+
+	/* Bleed the msg queue dry of packets */
+	while (!__iface_msgq_read(device, raw_packet)) {
+		void **session_id = NULL;
+		struct msm_vidc_cb_info *info = &packets[packet_count++];
+		struct vidc_hal_sys_init_done sys_init_done = {0};
+		int rc = 0;
+
+		rc = hfi_process_msg_packet(device->device_id,
+			(struct vidc_hal_msg_pkt_hdr *)raw_packet, info);
+		if (rc) {
+			dprintk(VIDC_WARN,
+					"Corrupt/unknown packet found, discarding\n");
+			--packet_count;
+			continue;
+		}
+
+		/* Process the packet types that we're interested in */
+		switch (info->response_type) {
+		case HAL_SYS_ERROR:
+			__process_sys_error(device);
+			break;
+		case HAL_SYS_RELEASE_RESOURCE_DONE:
+			dprintk(VIDC_DBG, "Received SYS_RELEASE_RESOURCE\n");
+			break;
+		case HAL_SYS_INIT_DONE:
+			dprintk(VIDC_DBG, "Received SYS_INIT_DONE\n");
+			/* Video driver intentionally does not unset
+			 * IMEM on venus to simplify power collapse.
+			 */
+			if (__set_imem(device, &device->resources.imem))
+				dprintk(VIDC_WARN,
+				"Failed to set IMEM. Performance will be impacted\n");
+			sys_init_done.capabilities =
+				device->sys_init_capabilities;
+			hfi_process_sys_init_done_prop_read(
+				(struct hfi_msg_sys_init_done_packet *)
+					raw_packet, &sys_init_done);
+			info->response.cmd.data.sys_init_done = sys_init_done;
+			break;
+		case HAL_SESSION_LOAD_RESOURCE_DONE:
+			/*
+			 * Work around for H/W bug, need to re-program these
+			 * registers as part of a handshake agreement with the
+			 * firmware.  This strictly only needs to be done for
+			 * decoder secure sessions, but there's no harm in doing
+			 * so for all sessions as it's at worst a NO-OP.
+			 */
+			__set_threshold_registers(device);
+			break;
+		default:
+			break;
+		}
+
+		/* For session-related packets, validate session */
+		switch (info->response_type) {
+		case HAL_SESSION_LOAD_RESOURCE_DONE:
+		case HAL_SESSION_INIT_DONE:
+		case HAL_SESSION_END_DONE:
+		case HAL_SESSION_ABORT_DONE:
+		case HAL_SESSION_START_DONE:
+		case HAL_SESSION_STOP_DONE:
+		case HAL_SESSION_FLUSH_DONE:
+		case HAL_SESSION_SUSPEND_DONE:
+		case HAL_SESSION_RESUME_DONE:
+		case HAL_SESSION_SET_PROP_DONE:
+		case HAL_SESSION_GET_PROP_DONE:
+		case HAL_SESSION_PARSE_SEQ_HDR_DONE:
+		case HAL_SESSION_RELEASE_BUFFER_DONE:
+		case HAL_SESSION_RELEASE_RESOURCE_DONE:
+		case HAL_SESSION_PROPERTY_INFO:
+			session_id = &info->response.cmd.session_id;
+			break;
+		case HAL_SESSION_ERROR:
+		case HAL_SESSION_GET_SEQ_HDR_DONE:
+		case HAL_SESSION_ETB_DONE:
+		case HAL_SESSION_FTB_DONE:
+			session_id = &info->response.data.session_id;
+			break;
+		case HAL_SESSION_EVENT_CHANGE:
+			session_id = &info->response.event.session_id;
+			break;
+		case HAL_RESPONSE_UNUSED:
+		default:
+			session_id = NULL;
+			break;
+		}
+
+		/*
+		 * hfi_process_msg_packet provides a session_id that's a hashed
+		 * value of struct hal_session, we need to coerce the hashed
+		 * value back to pointer that we can use. Ideally, hfi_process\
+		 * _msg_packet should take care of this, but it doesn't have
+		 * required information for it
+		 */
+		if (session_id) {
+			struct hal_session *session = NULL;
+
+			if (upper_32_bits((uintptr_t)*session_id) != 0) {
+				dprintk(VIDC_WARN,
+					"Upper 32 bits of session_id != 0\n");
+				WARN_ON(VIDC_DBG_WARN_ENABLE);
+			}
+			session = __get_session(device,
+					(u32)(uintptr_t)*session_id);
+			if (!session) {
+				dprintk(VIDC_ERR,
+						"Received a packet (%#x) for an unrecognized session (%pK), discarding\n",
+						info->response_type,
+						*session_id);
+				--packet_count;
+				continue;
+			}
+
+			*session_id = session->session_id;
+		}
+
+		if (packet_count >= max_packets &&
+				__get_q_size(device, VIDC_IFACEQ_MSGQ_IDX)) {
+			dprintk(VIDC_WARN,
+					"Too many packets in message queue to handle at once, deferring read\n");
+			break;
+		}
+	}
+
+	if (requeue_pm_work && device->res->sw_power_collapsible) {
+		cancel_delayed_work(&venus_hfi_pm_work);
+		if (!queue_delayed_work(device->venus_pm_workq,
+			&venus_hfi_pm_work,
+			msecs_to_jiffies(msm_vidc_pwr_collapse_delay))) {
+			dprintk(VIDC_ERR, "PM work already scheduled\n");
+		}
+	}
+
+exit:
+	__flush_debug_queue(device, raw_packet);
+
+	return packet_count;
+}
+
+static void venus_hfi_core_work_handler(struct work_struct *work)
+{
+	struct venus_hfi_device *device = list_first_entry(
+		&hal_ctxt.dev_head, struct venus_hfi_device, list);
+	int num_responses = 0, i = 0;
+	u32 intr_status;
+
+	mutex_lock(&device->lock);
+
+	dprintk(VIDC_INFO, "Handling interrupt\n");
+
+	if (!__core_in_valid_state(device)) {
+		dprintk(VIDC_DBG, "%s - Core not in init state\n", __func__);
+		goto err_no_work;
+	}
+
+	if (!device->callback) {
+		dprintk(VIDC_ERR, "No interrupt callback function: %pK\n",
+				device);
+		goto err_no_work;
+	}
+
+	if (__resume(device)) {
+		dprintk(VIDC_ERR, "%s: Power enable failed\n", __func__);
+		goto err_no_work;
+	}
+
+	__core_clear_interrupt(device);
+	num_responses = __response_handler(device);
+
+err_no_work:
+
+	/* Keep the interrupt status before releasing device lock */
+	intr_status = device->intr_status;
+	mutex_unlock(&device->lock);
+
+	/*
+	 * Issue the callbacks outside of the locked contex to preserve
+	 * re-entrancy.
+	 */
+
+	for (i = 0; !IS_ERR_OR_NULL(device->response_pkt) &&
+		i < num_responses; ++i) {
+		struct msm_vidc_cb_info *r = &device->response_pkt[i];
+
+		device->callback(r->response_type, &r->response);
+	}
+
+	/* We need re-enable the irq which was disabled in ISR handler */
+	if (!(intr_status & VIDC_WRAPPER_INTR_STATUS_A2HWD_BMSK))
+		enable_irq(device->hal_data->irq);
+
+	/*
+	 * XXX: Don't add any code beyond here.  Reacquiring locks after release
+	 * it above doesn't guarantee the atomicity that we're aiming for.
+	 */
+}
+
+static DECLARE_WORK(venus_hfi_work, venus_hfi_core_work_handler);
+
+static irqreturn_t venus_hfi_isr(int irq, void *dev)
+{
+	struct venus_hfi_device *device = dev;
+	dprintk(VIDC_INFO, "Received an interrupt %d\n", irq);
+	disable_irq_nosync(irq);
+	queue_work(device->vidc_workq, &venus_hfi_work);
+	return IRQ_HANDLED;
+}
+
+static int __init_regs_and_interrupts(struct venus_hfi_device *device,
+		struct msm_vidc_platform_resources *res)
+{
+	struct hal_data *hal = NULL;
+	int rc = 0;
+
+	rc = __check_core_registered(hal_ctxt, res->firmware_base,
+			(u8 *)(uintptr_t)res->register_base,
+			res->register_size, res->irq);
+	if (!rc) {
+		dprintk(VIDC_ERR, "Core present/Already added\n");
+		rc = -EEXIST;
+		goto err_core_init;
+	}
+
+	dprintk(VIDC_DBG, "HAL_DATA will be assigned now\n");
+	hal = (struct hal_data *)
+		kzalloc(sizeof(struct hal_data), GFP_KERNEL);
+	if (!hal) {
+		dprintk(VIDC_ERR, "Failed to alloc\n");
+		rc = -ENOMEM;
+		goto err_core_init;
+	}
+
+	hal->irq = res->irq;
+	hal->firmware_base = res->firmware_base;
+	hal->register_base = devm_ioremap_nocache(&res->pdev->dev,
+			res->register_base, res->register_size);
+	hal->register_size = res->register_size;
+	if (!hal->register_base) {
+		dprintk(VIDC_ERR,
+			"could not map reg addr %pa of size %d\n",
+			&res->register_base, res->register_size);
+		goto error_irq_fail;
+	}
+
+	device->hal_data = hal;
+	rc = request_irq(res->irq, venus_hfi_isr, IRQF_TRIGGER_HIGH,
+			"msm_vidc", device);
+	if (unlikely(rc)) {
+		dprintk(VIDC_ERR, "() :request_irq failed\n");
+		goto error_irq_fail;
+	}
+
+	disable_irq_nosync(res->irq);
+	dprintk(VIDC_INFO,
+		"firmware_base = %pa, register_base = %pa, register_size = %d\n",
+		&res->firmware_base, &res->register_base,
+		res->register_size);
+	return rc;
+
+error_irq_fail:
+	kfree(hal);
+err_core_init:
+	return rc;
+
+}
+
+static inline void __deinit_clocks(struct venus_hfi_device *device)
+{
+	struct clock_info *cl;
+
+	device->clk_freq = 0;
+	venus_hfi_for_each_clock_reverse(device, cl) {
+		if (cl->clk) {
+			clk_put(cl->clk);
+			cl->clk = NULL;
+		}
+	}
+}
+
+static inline int __init_clocks(struct venus_hfi_device *device)
+{
+	int rc = 0;
+	struct clock_info *cl = NULL;
+
+	if (!device) {
+		dprintk(VIDC_ERR, "Invalid params: %pK\n", device);
+		return -EINVAL;
+	}
+
+	venus_hfi_for_each_clock(device, cl) {
+		int i = 0;
+
+		dprintk(VIDC_DBG, "%s: scalable? %d, count %d\n",
+				cl->name, cl->has_scaling, cl->count);
+		for (i = 0; i < cl->count; ++i) {
+			dprintk(VIDC_DBG,
+				"\tload = %d, freq = %d codecs supported %#x\n",
+				cl->load_freq_tbl[i].load,
+				cl->load_freq_tbl[i].freq,
+				cl->load_freq_tbl[i].supported_codecs);
+		}
+	}
+
+	venus_hfi_for_each_clock(device, cl) {
+		if (!cl->clk) {
+			cl->clk = clk_get(&device->res->pdev->dev, cl->name);
+			if (IS_ERR_OR_NULL(cl->clk)) {
+				dprintk(VIDC_ERR,
+					"Failed to get clock: %s\n", cl->name);
+				rc = PTR_ERR(cl->clk) ?: -EINVAL;
+				cl->clk = NULL;
+				goto err_clk_get;
+			}
+		}
+	}
+	device->clk_freq = 0;
+	return 0;
+
+err_clk_get:
+	__deinit_clocks(device);
+	return rc;
+}
+
+
+static inline void __disable_unprepare_clks(struct venus_hfi_device *device)
+{
+	struct clock_info *cl;
+
+	if (!device) {
+		dprintk(VIDC_ERR, "Invalid params: %pK\n", device);
+		return;
+	}
+
+	venus_hfi_for_each_clock_reverse(device, cl) {
+		usleep_range(100, 500);
+		dprintk(VIDC_DBG, "Clock: %s disable and unprepare\n",
+				cl->name);
+		clk_disable_unprepare(cl->clk);
+	}
+}
+
+static inline int __prepare_enable_clks(struct venus_hfi_device *device)
+{
+	struct clock_info *cl = NULL, *cl_fail = NULL;
+	int rc = 0, c = 0;
+	if (!device) {
+		dprintk(VIDC_ERR, "Invalid params: %pK\n", device);
+		return -EINVAL;
+	}
+
+	venus_hfi_for_each_clock(device, cl) {
+		/*
+		 * For the clocks we control, set the rate prior to preparing
+		 * them.  Since we don't really have a load at this point, scale
+		 * it to the lowest frequency possible
+		 */
+		if (cl->has_scaling)
+			__set_clk_rate(device, cl,
+						clk_round_rate(cl->clk, 0));
+
+		if (cl->has_mem_retention) {
+			rc = clk_set_flags(cl->clk, CLKFLAG_NORETAIN_PERIPH);
+			if (rc) {
+				dprintk(VIDC_WARN,
+					"Failed set flag NORETAIN_PERIPH %s\n",
+					cl->name);
+			}
+
+			rc = clk_set_flags(cl->clk, CLKFLAG_NORETAIN_MEM);
+			if (rc) {
+				dprintk(VIDC_WARN,
+					"Failed set flag NORETAIN_MEM %s\n",
+					cl->name);
+			}
+		}
+
+		rc = clk_prepare_enable(cl->clk);
+		if (rc) {
+			dprintk(VIDC_ERR, "Failed to enable clocks\n");
+			cl_fail = cl;
+			goto fail_clk_enable;
+		}
+
+		c++;
+		dprintk(VIDC_DBG, "Clock: %s prepared and enabled\n", cl->name);
+	}
+
+	__write_register(device, VIDC_WRAPPER_CLOCK_CONFIG, 0);
+	__write_register(device, VIDC_WRAPPER_CPU_CLOCK_CONFIG, 0);
+	return rc;
+
+fail_clk_enable:
+	venus_hfi_for_each_clock_reverse_continue(device, cl, c) {
+		usleep_range(100, 500);
+		dprintk(VIDC_ERR, "Clock: %s disable and unprepare\n",
+			cl->name);
+		clk_disable_unprepare(cl->clk);
+	}
+
+	return rc;
+}
+
+static void __deinit_bus(struct venus_hfi_device *device)
+{
+	struct bus_info *bus = NULL;
+	if (!device)
+		return;
+
+	kfree(device->bus_vote.data);
+	device->bus_vote = DEFAULT_BUS_VOTE;
+
+	venus_hfi_for_each_bus_reverse(device, bus) {
+		devfreq_remove_device(bus->devfreq);
+		bus->devfreq = NULL;
+		dev_set_drvdata(bus->dev, NULL);
+
+		msm_bus_scale_unregister(bus->client);
+		bus->client = NULL;
+	}
+}
+
+static int __init_bus(struct venus_hfi_device *device)
+{
+	struct bus_info *bus = NULL;
+	int rc = 0;
+
+	if (!device)
+		return -EINVAL;
+
+	venus_hfi_for_each_bus(device, bus) {
+		struct devfreq_dev_profile profile = {
+			.initial_freq = 0,
+			.polling_ms = INT_MAX,
+			.freq_table = NULL,
+			.max_state = 0,
+			.target = __devfreq_target,
+			.get_dev_status = __devfreq_get_status,
+			.exit = NULL,
+		};
+
+		/*
+		 * This is stupid, but there's no other easy way to ahold
+		 * of struct bus_info in venus_hfi_devfreq_*()
+		 */
+		WARN(dev_get_drvdata(bus->dev), "%s's drvdata already set\n",
+				dev_name(bus->dev));
+		dev_set_drvdata(bus->dev, device);
+
+		bus->client = msm_bus_scale_register(bus->master, bus->slave,
+				bus->name, false);
+		if (IS_ERR_OR_NULL(bus->client)) {
+			rc = PTR_ERR(bus->client) ?: -EBADHANDLE;
+			dprintk(VIDC_ERR, "Failed to register bus %s: %d\n",
+					bus->name, rc);
+			bus->client = NULL;
+			goto err_add_dev;
+		}
+
+		bus->devfreq_prof = profile;
+		bus->devfreq = devfreq_add_device(bus->dev,
+				&bus->devfreq_prof, bus->governor, NULL);
+		if (IS_ERR_OR_NULL(bus->devfreq)) {
+			rc = PTR_ERR(bus->devfreq) ?: -EBADHANDLE;
+			dprintk(VIDC_ERR,
+					"Failed to add devfreq device for bus %s and governor %s: %d\n",
+					bus->name, bus->governor, rc);
+			bus->devfreq = NULL;
+			goto err_add_dev;
+		}
+
+		/*
+		 * Devfreq starts monitoring immediately, since we are just
+		 * initializing stuff at this point, force it to suspend
+		 */
+		devfreq_suspend_device(bus->devfreq);
+	}
+
+	device->bus_vote = DEFAULT_BUS_VOTE;
+	return 0;
+
+err_add_dev:
+	__deinit_bus(device);
+	return rc;
+}
+
+static void __deinit_regulators(struct venus_hfi_device *device)
+{
+	struct regulator_info *rinfo = NULL;
+
+	venus_hfi_for_each_regulator_reverse(device, rinfo) {
+		if (rinfo->regulator) {
+			regulator_put(rinfo->regulator);
+			rinfo->regulator = NULL;
+		}
+	}
+}
+
+static int __init_regulators(struct venus_hfi_device *device)
+{
+	int rc = 0;
+	struct regulator_info *rinfo = NULL;
+
+	venus_hfi_for_each_regulator(device, rinfo) {
+		rinfo->regulator = regulator_get(&device->res->pdev->dev,
+				rinfo->name);
+		if (IS_ERR_OR_NULL(rinfo->regulator)) {
+			rc = PTR_ERR(rinfo->regulator) ?: -EBADHANDLE;
+			dprintk(VIDC_ERR, "Failed to get regulator: %s\n",
+					rinfo->name);
+			rinfo->regulator = NULL;
+			goto err_reg_get;
+		}
+	}
+
+	return 0;
+
+err_reg_get:
+	__deinit_regulators(device);
+	return rc;
+}
+
+static int __init_resources(struct venus_hfi_device *device,
+				struct msm_vidc_platform_resources *res)
+{
+	int rc = 0;
+
+	rc = __init_regulators(device);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to get all regulators\n");
+		return -ENODEV;
+	}
+
+	rc = __init_clocks(device);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to init clocks\n");
+		rc = -ENODEV;
+		goto err_init_clocks;
+	}
+
+	rc = __init_bus(device);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to init bus: %d\n", rc);
+		goto err_init_bus;
+	}
+
+	device->sys_init_capabilities =
+		kzalloc(sizeof(struct msm_vidc_capability)
+		* VIDC_MAX_SESSIONS, GFP_TEMPORARY);
+
+	return rc;
+
+err_init_bus:
+	__deinit_clocks(device);
+err_init_clocks:
+	__deinit_regulators(device);
+	return rc;
+}
+
+static void __deinit_resources(struct venus_hfi_device *device)
+{
+	__deinit_bus(device);
+	__deinit_clocks(device);
+	__deinit_regulators(device);
+	kfree(device->sys_init_capabilities);
+	device->sys_init_capabilities = NULL;
+}
+
+static int __protect_cp_mem(struct venus_hfi_device *device)
+{
+	struct tzbsp_memprot memprot;
+	unsigned int resp = 0;
+	int rc = 0;
+	struct context_bank_info *cb;
+	struct scm_desc desc = {0};
+
+	if (!device)
+		return -EINVAL;
+
+	memprot.cp_start = 0x0;
+	memprot.cp_size = 0x0;
+	memprot.cp_nonpixel_start = 0x0;
+	memprot.cp_nonpixel_size = 0x0;
+
+	list_for_each_entry(cb, &device->res->context_banks, list) {
+		if (!strcmp(cb->name, "venus_ns")) {
+			desc.args[1] = memprot.cp_size =
+				cb->addr_range.start;
+			dprintk(VIDC_DBG, "%s memprot.cp_size: %#x\n",
+				__func__, memprot.cp_size);
+		}
+
+		if (!strcmp(cb->name, "venus_sec_non_pixel")) {
+			desc.args[2] = memprot.cp_nonpixel_start =
+				cb->addr_range.start;
+			desc.args[3] = memprot.cp_nonpixel_size =
+				cb->addr_range.size;
+			dprintk(VIDC_DBG,
+				"%s memprot.cp_nonpixel_start: %#x size: %#x\n",
+				__func__, memprot.cp_nonpixel_start,
+				memprot.cp_nonpixel_size);
+		}
+	}
+
+	if (!is_scm_armv8()) {
+		rc = scm_call(SCM_SVC_MP, TZBSP_MEM_PROTECT_VIDEO_VAR, &memprot,
+			sizeof(memprot), &resp, sizeof(resp));
+	} else {
+		desc.arginfo = SCM_ARGS(4);
+		rc = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
+			       TZBSP_MEM_PROTECT_VIDEO_VAR), &desc);
+		resp = desc.ret[0];
+	}
+
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to protect memory(%d) response: %d\n",
+				rc, resp);
+	}
+
+	trace_venus_hfi_var_done(
+		memprot.cp_start, memprot.cp_size,
+		memprot.cp_nonpixel_start, memprot.cp_nonpixel_size);
+	return rc;
+}
+
+static int __disable_regulator(struct regulator_info *rinfo)
+{
+	int rc = 0;
+
+	dprintk(VIDC_DBG, "Disabling regulator %s\n", rinfo->name);
+
+	/*
+	* This call is needed. Driver needs to acquire the control back
+	* from HW in order to disable the regualtor. Else the behavior
+	* is unknown.
+	*/
+
+	rc = __acquire_regulator(rinfo);
+	if (rc) {
+		/* This is somewhat fatal, but nothing we can do
+		 * about it. We can't disable the regulator w/o
+		 * getting it back under s/w control */
+		dprintk(VIDC_WARN,
+			"Failed to acquire control on %s\n",
+			rinfo->name);
+
+		goto disable_regulator_failed;
+	}
+
+	rc = regulator_disable(rinfo->regulator);
+	if (rc) {
+		dprintk(VIDC_WARN,
+			"Failed to disable %s: %d\n",
+			rinfo->name, rc);
+		goto disable_regulator_failed;
+	}
+
+	return 0;
+disable_regulator_failed:
+
+	/* Bring attention to this issue */
+	WARN_ON(VIDC_DBG_WARN_ENABLE);
+	return rc;
+}
+
+static int __enable_hw_power_collapse(struct venus_hfi_device *device)
+{
+	int rc = 0;
+
+	if (!msm_vidc_fw_low_power_mode) {
+		dprintk(VIDC_DBG, "Not enabling hardware power collapse\n");
+		return 0;
+	}
+
+	rc = __hand_off_regulators(device);
+	if (rc)
+		dprintk(VIDC_WARN,
+			"%s : Failed to enable HW power collapse %d\n",
+				__func__, rc);
+	return rc;
+}
+
+static int __enable_regulators(struct venus_hfi_device *device)
+{
+	int rc = 0, c = 0;
+	struct regulator_info *rinfo;
+
+	dprintk(VIDC_DBG, "Enabling regulators\n");
+
+	venus_hfi_for_each_regulator(device, rinfo) {
+		rc = regulator_enable(rinfo->regulator);
+		if (rc) {
+			dprintk(VIDC_ERR,
+					"Failed to enable %s: %d\n",
+					rinfo->name, rc);
+			goto err_reg_enable_failed;
+		}
+
+		dprintk(VIDC_DBG, "Enabled regulator %s\n",
+				rinfo->name);
+		c++;
+	}
+
+	return 0;
+
+err_reg_enable_failed:
+	venus_hfi_for_each_regulator_reverse_continue(device, rinfo, c)
+		__disable_regulator(rinfo);
+
+	return rc;
+}
+
+static int __disable_regulators(struct venus_hfi_device *device)
+{
+	struct regulator_info *rinfo;
+	int rc = 0;
+
+	dprintk(VIDC_DBG, "Disabling regulators\n");
+
+	venus_hfi_for_each_regulator_reverse(device, rinfo)
+		__disable_regulator(rinfo);
+
+	return rc;
+}
+
+static int __venus_power_on(struct venus_hfi_device *device)
+{
+	int rc = 0;
+
+	if (device->power_enabled)
+		return 0;
+
+	device->power_enabled = true;
+	/* Vote for all hardware resources */
+	rc = __vote_buses(device, device->bus_vote.data,
+			device->bus_vote.data_count);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to vote buses, err: %d\n", rc);
+		goto fail_vote_buses;
+	}
+
+	rc = __alloc_imem(device, device->res->imem_size);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to allocate IMEM\n");
+		goto fail_alloc_imem;
+	}
+
+	rc = __enable_regulators(device);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to enable GDSC, err = %d\n", rc);
+		goto fail_enable_gdsc;
+	}
+
+	rc = __prepare_enable_clks(device);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to enable clocks: %d\n", rc);
+		goto fail_enable_clks;
+	}
+
+	rc = __scale_clocks(device, 0, NULL, 0);
+	if (rc) {
+		dprintk(VIDC_WARN,
+				"Failed to scale clocks, performance might be affected\n");
+		rc = 0;
+	}
+
+	/*
+	 * Re-program all of the registers that get reset as a result of
+	 * regulator_disable() and _enable()
+	 */
+	__set_registers(device);
+
+	__write_register(device, VIDC_WRAPPER_INTR_MASK,
+			VIDC_WRAPPER_INTR_MASK_A2HVCODEC_BMSK);
+	device->intr_status = 0;
+	enable_irq(device->hal_data->irq);
+
+	/*
+	 * Hand off control of regulators to h/w _after_ enabling clocks.
+	 * Note that the GDSC will turn off when switching from normal
+	 * (s/w triggered) to fast (HW triggered) unless the h/w vote is
+	 * present. Since Venus isn't up yet, the GDSC will be off briefly.
+	 */
+	if (__enable_hw_power_collapse(device))
+		dprintk(VIDC_ERR, "Failed to enabled inter-frame PC\n");
+
+	return rc;
+
+fail_enable_clks:
+	__disable_regulators(device);
+fail_enable_gdsc:
+	__free_imem(device);
+fail_alloc_imem:
+	__unvote_buses(device);
+fail_vote_buses:
+	device->power_enabled = false;
+	return rc;
+}
+
+static void __venus_power_off(struct venus_hfi_device *device, bool halt_axi)
+{
+	if (!device->power_enabled)
+		return;
+
+	if (!(device->intr_status & VIDC_WRAPPER_INTR_STATUS_A2HWD_BMSK))
+		disable_irq_nosync(device->hal_data->irq);
+	device->intr_status = 0;
+
+	/* Halt the AXI to make sure there are no pending transactions.
+	 * Clocks should be unprepared after making sure axi is halted.
+	 */
+	if (halt_axi && __halt_axi(device))
+		dprintk(VIDC_WARN, "Failed to halt AXI\n");
+
+	__disable_unprepare_clks(device);
+	if (__disable_regulators(device))
+		dprintk(VIDC_WARN, "Failed to disable regulators\n");
+
+	__free_imem(device);
+
+	if (__unvote_buses(device))
+		dprintk(VIDC_WARN, "Failed to unvote for buses\n");
+	device->power_enabled = false;
+}
+
+static inline int __suspend(struct venus_hfi_device *device)
+{
+	int rc = 0;
+
+	if (!device) {
+		dprintk(VIDC_ERR, "Invalid params: %pK\n", device);
+		return -EINVAL;
+	} else if (!device->power_enabled) {
+		dprintk(VIDC_DBG, "Power already disabled\n");
+		return 0;
+	}
+
+	dprintk(VIDC_PROF, "Entering power collapse\n");
+
+	if (device->res->pm_qos_latency_us &&
+		pm_qos_request_active(&device->qos))
+		pm_qos_remove_request(&device->qos);
+
+	rc = __tzbsp_set_video_state(TZBSP_VIDEO_STATE_SUSPEND);
+	if (rc) {
+		dprintk(VIDC_WARN, "Failed to suspend video core %d\n", rc);
+		goto err_tzbsp_suspend;
+	}
+
+	__save_clock_rate(device, false);
+	__venus_power_off(device, true);
+	dprintk(VIDC_PROF, "Venus power collapsed\n");
+	return rc;
+
+err_tzbsp_suspend:
+	return rc;
+}
+
+static inline int __resume(struct venus_hfi_device *device)
+{
+	int rc = 0;
+
+	if (!device) {
+		dprintk(VIDC_ERR, "Invalid params: %pK\n", device);
+		return -EINVAL;
+	} else if (device->power_enabled) {
+		dprintk(VIDC_DBG, "Power is already enabled\n");
+		goto exit;
+	} else if (!__core_in_valid_state(device)) {
+		dprintk(VIDC_DBG, "venus_hfi_device in deinit state.");
+		return -EINVAL;
+	}
+
+	dprintk(VIDC_PROF, "Resuming from power collapse\n");
+	rc = __venus_power_on(device);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to power on venus\n");
+		goto err_venus_power_on;
+	}
+	__restore_clock_rate(device);
+
+	/* Reboot the firmware */
+	rc = __tzbsp_set_video_state(TZBSP_VIDEO_STATE_RESUME);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to resume video core %d\n", rc);
+		goto err_set_video_state;
+	}
+
+	__setup_ucregion_memory_map(device);
+	/* Wait for boot completion */
+	rc = __boot_firmware(device);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to reset venus core\n");
+		goto err_reset_core;
+	}
+
+	/*
+	 * Work around for H/W bug, need to reprogram these registers once
+	 * firmware is out reset
+	 */
+	__set_threshold_registers(device);
+
+	if (device->res->pm_qos_latency_us) {
+#ifdef CONFIG_SMP
+		device->qos.type = PM_QOS_REQ_AFFINE_IRQ;
+		device->qos.irq = device->hal_data->irq;
+#endif
+		pm_qos_add_request(&device->qos, PM_QOS_CPU_DMA_LATENCY,
+				device->res->pm_qos_latency_us);
+	}
+	dprintk(VIDC_PROF, "Resumed from power collapse\n");
+exit:
+	device->skip_pc_count = 0;
+	return rc;
+err_reset_core:
+	__tzbsp_set_video_state(TZBSP_VIDEO_STATE_SUSPEND);
+err_set_video_state:
+	__save_clock_rate(device, true);
+	__venus_power_off(device, true);
+err_venus_power_on:
+	dprintk(VIDC_ERR, "Failed to resume from power collapse\n");
+	return rc;
+}
+
+static int __load_fw(struct venus_hfi_device *device)
+{
+	int rc = 0;
+	/* Initialize resources */
+	rc = __init_resources(device, device->res);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to init resources: %d\n", rc);
+		goto fail_init_res;
+	}
+
+	rc = __initialize_packetization(device);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to initialize packetization\n");
+		goto fail_init_pkt;
+	}
+	trace_msm_v4l2_vidc_fw_load_start("msm_v4l2_vidc venus_fw load start");
+
+	rc = __venus_power_on(device);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to power on venus in in load_fw\n");
+		goto fail_venus_power_on;
+	}
+
+	if ((!device->res->use_non_secure_pil && !device->res->firmware_base)
+			|| device->res->use_non_secure_pil) {
+		if (!device->resources.fw.cookie)
+			device->resources.fw.cookie =
+				subsystem_get_with_fwname("venus",
+				device->res->fw_name);
+
+		if (IS_ERR_OR_NULL(device->resources.fw.cookie)) {
+			dprintk(VIDC_ERR, "Failed to download firmware\n");
+			device->resources.fw.cookie = NULL;
+			rc = -ENOMEM;
+			goto fail_load_fw;
+		}
+	}
+
+	if (!device->res->use_non_secure_pil && !device->res->firmware_base) {
+		rc = __protect_cp_mem(device);
+		if (rc) {
+			dprintk(VIDC_ERR, "Failed to protect memory\n");
+			goto fail_protect_mem;
+		}
+	}
+	trace_msm_v4l2_vidc_fw_load_end("msm_v4l2_vidc venus_fw load end");
+	return rc;
+fail_protect_mem:
+	if (device->resources.fw.cookie)
+		subsystem_put(device->resources.fw.cookie);
+	device->resources.fw.cookie = NULL;
+fail_load_fw:
+	__save_clock_rate(device, true);
+	__venus_power_off(device, true);
+fail_venus_power_on:
+fail_init_pkt:
+	__deinit_resources(device);
+fail_init_res:
+	trace_msm_v4l2_vidc_fw_load_end("msm_v4l2_vidc venus_fw load end");
+	return rc;
+}
+
+static void __unload_fw(struct venus_hfi_device *device)
+{
+	if (!device->resources.fw.cookie)
+		return;
+
+	cancel_delayed_work(&venus_hfi_pm_work);
+	if (device->state != VENUS_STATE_DEINIT)
+		flush_workqueue(device->venus_pm_workq);
+
+	__vote_buses(device, NULL, 0);
+	subsystem_put(device->resources.fw.cookie);
+	__interface_queues_release(device);
+	__save_clock_rate(device, true);
+	__venus_power_off(device, false);
+	device->resources.fw.cookie = NULL;
+	__deinit_resources(device);
+}
+
+static int venus_hfi_get_fw_info(void *dev, struct hal_fw_info *fw_info)
+{
+	int i = 0, j = 0;
+	struct venus_hfi_device *device = dev;
+	u32 smem_block_size = 0;
+	u8 *smem_table_ptr;
+	char version[VENUS_VERSION_LENGTH] = "";
+	const u32 smem_image_index_venus = 14 * 128;
+
+	if (!device || !fw_info) {
+		dprintk(VIDC_ERR,
+			"%s Invalid parameter: device = %pK fw_info = %pK\n",
+			__func__, device, fw_info);
+		return -EINVAL;
+	}
+
+	mutex_lock(&device->lock);
+
+	smem_table_ptr = smem_get_entry(SMEM_IMAGE_VERSION_TABLE,
+			&smem_block_size, 0, SMEM_ANY_HOST_FLAG);
+	if (smem_table_ptr &&
+			((smem_image_index_venus +
+			  VENUS_VERSION_LENGTH) <= smem_block_size))
+		memcpy(version,
+			smem_table_ptr + smem_image_index_venus,
+			VENUS_VERSION_LENGTH);
+
+	while (version[i++] != 'V' && i < VENUS_VERSION_LENGTH)
+		;
+
+	if (i == VENUS_VERSION_LENGTH - 1) {
+		dprintk(VIDC_WARN, "Venus version string is not proper\n");
+		fw_info->version[0] = '\0';
+		goto fail_version_string;
+	}
+
+	for (i--; i < VENUS_VERSION_LENGTH && j < VENUS_VERSION_LENGTH - 1; i++)
+		fw_info->version[j++] = version[i];
+	fw_info->version[j] = '\0';
+
+fail_version_string:
+	dprintk(VIDC_DBG, "F/W version retrieved : %s\n", fw_info->version);
+	fw_info->base_addr = device->hal_data->firmware_base;
+	fw_info->register_base = device->res->register_base;
+	fw_info->register_size = device->hal_data->register_size;
+	fw_info->irq = device->hal_data->irq;
+
+	mutex_unlock(&device->lock);
+	return 0;
+}
+
+static int venus_hfi_get_core_capabilities(void *dev)
+{
+	struct venus_hfi_device *device = dev;
+	int rc = 0;
+
+	if (!device)
+		return -EINVAL;
+
+	mutex_lock(&device->lock);
+
+	rc = HAL_VIDEO_ENCODER_ROTATION_CAPABILITY |
+		HAL_VIDEO_ENCODER_SCALING_CAPABILITY |
+		HAL_VIDEO_ENCODER_DEINTERLACE_CAPABILITY |
+		HAL_VIDEO_DECODER_MULTI_STREAM_CAPABILITY;
+
+	mutex_unlock(&device->lock);
+
+	return rc;
+}
+
+static int __initialize_packetization(struct venus_hfi_device *device)
+{
+	int rc = 0;
+	const char *hfi_version;
+
+	if (!device || !device->res) {
+		dprintk(VIDC_ERR, "%s - invalid param\n", __func__);
+		return -EINVAL;
+	}
+
+	hfi_version = device->res->hfi_version;
+
+	if (!hfi_version) {
+		device->packetization_type = HFI_PACKETIZATION_LEGACY;
+	} else if (!strcmp(hfi_version, "3xx")) {
+		device->packetization_type = HFI_PACKETIZATION_3XX;
+	} else {
+		dprintk(VIDC_ERR, "Unsupported hfi version\n");
+		return -EINVAL;
+	}
+
+	device->pkt_ops = hfi_get_pkt_ops_handle(device->packetization_type);
+	if (!device->pkt_ops) {
+		rc = -EINVAL;
+		dprintk(VIDC_ERR, "Failed to get pkt_ops handle\n");
+	}
+
+	return rc;
+}
+
+static struct venus_hfi_device *__add_device(u32 device_id,
+			struct msm_vidc_platform_resources *res,
+			hfi_cmd_response_callback callback)
+{
+	struct venus_hfi_device *hdevice = NULL;
+	int rc = 0;
+
+	if (!res || !callback) {
+		dprintk(VIDC_ERR, "Invalid Parameters\n");
+		return NULL;
+	}
+
+	dprintk(VIDC_INFO, "entered , device_id: %d\n", device_id);
+
+	hdevice = (struct venus_hfi_device *)
+			kzalloc(sizeof(struct venus_hfi_device), GFP_KERNEL);
+	if (!hdevice) {
+		dprintk(VIDC_ERR, "failed to allocate new device\n");
+		goto exit;
+	}
+
+	hdevice->response_pkt = kmalloc_array(max_packets,
+				sizeof(*hdevice->response_pkt), GFP_KERNEL);
+	if (!hdevice->response_pkt) {
+		dprintk(VIDC_ERR, "failed to allocate response_pkt\n");
+		goto err_cleanup;
+	}
+
+	hdevice->raw_packet =
+		kzalloc(VIDC_IFACEQ_VAR_HUGE_PKT_SIZE, GFP_TEMPORARY);
+	if (!hdevice->raw_packet) {
+		dprintk(VIDC_ERR, "failed to allocate raw packet\n");
+		goto err_cleanup;
+	}
+
+	rc = __init_regs_and_interrupts(hdevice, res);
+	if (rc)
+		goto err_cleanup;
+
+	hdevice->res = res;
+	hdevice->device_id = device_id;
+	hdevice->callback = callback;
+
+	hdevice->vidc_workq = create_singlethread_workqueue(
+		"msm_vidc_workerq_venus");
+	if (!hdevice->vidc_workq) {
+		dprintk(VIDC_ERR, ": create vidc workq failed\n");
+		goto err_cleanup;
+	}
+
+	hdevice->venus_pm_workq = create_singlethread_workqueue(
+			"pm_workerq_venus");
+	if (!hdevice->venus_pm_workq) {
+		dprintk(VIDC_ERR, ": create pm workq failed\n");
+		goto err_cleanup;
+	}
+
+	if (!hal_ctxt.dev_count)
+		INIT_LIST_HEAD(&hal_ctxt.dev_head);
+
+	mutex_init(&hdevice->lock);
+	INIT_LIST_HEAD(&hdevice->list);
+	INIT_LIST_HEAD(&hdevice->sess_head);
+	list_add_tail(&hdevice->list, &hal_ctxt.dev_head);
+	hal_ctxt.dev_count++;
+
+	return hdevice;
+
+err_cleanup:
+	if (hdevice->vidc_workq)
+		destroy_workqueue(hdevice->vidc_workq);
+	kfree(hdevice->response_pkt);
+	kfree(hdevice->raw_packet);
+	kfree(hdevice);
+exit:
+	return NULL;
+}
+
+static struct venus_hfi_device *__get_device(u32 device_id,
+				struct msm_vidc_platform_resources *res,
+				hfi_cmd_response_callback callback)
+{
+	if (!res || !callback) {
+		dprintk(VIDC_ERR, "Invalid params: %pK %pK\n", res, callback);
+		return NULL;
+	}
+
+	return __add_device(device_id, res, callback);
+}
+
+void venus_hfi_delete_device(void *device)
+{
+	struct venus_hfi_device *close, *tmp, *dev;
+
+	if (!device)
+		return;
+
+	dev = (struct venus_hfi_device *) device;
+
+	mutex_lock(&dev->lock);
+	__iommu_detach(dev);
+	mutex_unlock(&dev->lock);
+
+	list_for_each_entry_safe(close, tmp, &hal_ctxt.dev_head, list) {
+		if (close->hal_data->irq == dev->hal_data->irq) {
+			hal_ctxt.dev_count--;
+			list_del(&close->list);
+			mutex_destroy(&close->lock);
+			destroy_workqueue(close->vidc_workq);
+			destroy_workqueue(close->venus_pm_workq);
+			free_irq(dev->hal_data->irq, close);
+			iounmap(dev->hal_data->register_base);
+			kfree(close->hal_data);
+			kfree(close->response_pkt);
+			kfree(close->raw_packet);
+			kfree(close);
+			break;
+		}
+	}
+}
+
+static void venus_init_hfi_callbacks(struct hfi_device *hdev)
+{
+	hdev->core_init = venus_hfi_core_init;
+	hdev->core_release = venus_hfi_core_release;
+	hdev->core_ping = venus_hfi_core_ping;
+	hdev->core_trigger_ssr = venus_hfi_core_trigger_ssr;
+	hdev->session_init = venus_hfi_session_init;
+	hdev->session_end = venus_hfi_session_end;
+	hdev->session_abort = venus_hfi_session_abort;
+	hdev->session_clean = venus_hfi_session_clean;
+	hdev->session_set_buffers = venus_hfi_session_set_buffers;
+	hdev->session_release_buffers = venus_hfi_session_release_buffers;
+	hdev->session_load_res = venus_hfi_session_load_res;
+	hdev->session_release_res = venus_hfi_session_release_res;
+	hdev->session_start = venus_hfi_session_start;
+	hdev->session_continue = venus_hfi_session_continue;
+	hdev->session_stop = venus_hfi_session_stop;
+	hdev->session_etb = venus_hfi_session_etb;
+	hdev->session_ftb = venus_hfi_session_ftb;
+	hdev->session_process_batch = venus_hfi_session_process_batch;
+	hdev->session_parse_seq_hdr = venus_hfi_session_parse_seq_hdr;
+	hdev->session_get_seq_hdr = venus_hfi_session_get_seq_hdr;
+	hdev->session_get_buf_req = venus_hfi_session_get_buf_req;
+	hdev->session_flush = venus_hfi_session_flush;
+	hdev->session_set_property = venus_hfi_session_set_property;
+	hdev->session_get_property = venus_hfi_session_get_property;
+	hdev->scale_clocks = venus_hfi_scale_clocks;
+	hdev->vote_bus = venus_hfi_vote_buses;
+	hdev->get_fw_info = venus_hfi_get_fw_info;
+	hdev->get_core_capabilities = venus_hfi_get_core_capabilities;
+	hdev->suspend = venus_hfi_suspend;
+	hdev->flush_debug_queue = venus_hfi_flush_debug_queue;
+	hdev->get_core_clock_rate = venus_hfi_get_core_clock_rate;
+	hdev->get_default_properties = venus_hfi_get_default_properties;
+}
+
+int venus_hfi_initialize(struct hfi_device *hdev, u32 device_id,
+		struct msm_vidc_platform_resources *res,
+		hfi_cmd_response_callback callback)
+{
+	int rc = 0;
+
+	if (!hdev || !res || !callback) {
+		dprintk(VIDC_ERR, "Invalid params: %pK %pK %pK\n",
+			hdev, res, callback);
+		rc = -EINVAL;
+		goto err_venus_hfi_init;
+	}
+
+	hdev->hfi_device_data = __get_device(device_id, res, callback);
+
+	if (IS_ERR_OR_NULL(hdev->hfi_device_data)) {
+		rc = PTR_ERR(hdev->hfi_device_data) ?: -EINVAL;
+		goto err_venus_hfi_init;
+	}
+
+	venus_init_hfi_callbacks(hdev);
+
+err_venus_hfi_init:
+	return rc;
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/venus_hfi.h linux-4.4.115-fbx/drivers/media/platform/msm/vidc/venus_hfi.h
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/venus_hfi.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/venus_hfi.h	2019-01-22 16:16:24.467255136 +0100
@@ -0,0 +1,271 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __H_VENUS_HFI_H__
+#define __H_VENUS_HFI_H__
+
+#include <linux/clk.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/pm_qos.h>
+#include <linux/spinlock.h>
+#include "vmem/vmem.h"
+#include "vidc_hfi_api.h"
+#include "vidc_hfi_helper.h"
+#include "vidc_hfi_api.h"
+#include "vidc_hfi.h"
+#include "msm_vidc_resources.h"
+#include "hfi_packetization.h"
+
+#define HFI_MASK_QHDR_TX_TYPE			0xFF000000
+#define HFI_MASK_QHDR_RX_TYPE			0x00FF0000
+#define HFI_MASK_QHDR_PRI_TYPE			0x0000FF00
+#define HFI_MASK_QHDR_Q_ID_TYPE			0x000000FF
+#define HFI_Q_ID_HOST_TO_CTRL_CMD_Q		0x00
+#define HFI_Q_ID_CTRL_TO_HOST_MSG_Q		0x01
+#define HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q	0x02
+#define HFI_MASK_QHDR_STATUS			0x000000FF
+
+#define VIDC_MAX_UNCOMPRESSED_FMT_PLANES	3
+
+#define VIDC_IFACEQ_NUMQ					3
+#define VIDC_IFACEQ_CMDQ_IDX				0
+#define VIDC_IFACEQ_MSGQ_IDX				1
+#define VIDC_IFACEQ_DBGQ_IDX				2
+#define VIDC_IFACEQ_MAX_BUF_COUNT			50
+#define VIDC_IFACE_MAX_PARALLEL_CLNTS		16
+#define VIDC_IFACEQ_DFLT_QHDR				0x01010000
+
+#define VIDC_MAX_NAME_LENGTH 64
+#define VIDC_MAX_PC_SKIP_COUNT 10
+struct hfi_queue_table_header {
+	u32 qtbl_version;
+	u32 qtbl_size;
+	u32 qtbl_qhdr0_offset;
+	u32 qtbl_qhdr_size;
+	u32 qtbl_num_q;
+	u32 qtbl_num_active_q;
+	void *device_addr;
+	char name[256];
+};
+
+struct hfi_queue_header {
+	u32 qhdr_status;
+	u32 qhdr_start_addr;
+	u32 qhdr_type;
+	u32 qhdr_q_size;
+	u32 qhdr_pkt_size;
+	u32 qhdr_pkt_drop_cnt;
+	u32 qhdr_rx_wm;
+	u32 qhdr_tx_wm;
+	u32 qhdr_rx_req;
+	u32 qhdr_tx_req;
+	u32 qhdr_rx_irq_status;
+	u32 qhdr_tx_irq_status;
+	u32 qhdr_read_idx;
+	u32 qhdr_write_idx;
+};
+
+struct hfi_mem_map_table {
+	u32 mem_map_num_entries;
+	u32 mem_map_table_base_addr;
+};
+
+struct hfi_mem_map {
+	u32 virtual_addr;
+	u32 physical_addr;
+	u32 size;
+	u32 attr;
+};
+
+#define VIDC_IFACEQ_TABLE_SIZE (sizeof(struct hfi_queue_table_header) \
+	+ sizeof(struct hfi_queue_header) * VIDC_IFACEQ_NUMQ)
+
+#define VIDC_IFACEQ_QUEUE_SIZE	(VIDC_IFACEQ_MAX_PKT_SIZE *  \
+	VIDC_IFACEQ_MAX_BUF_COUNT * VIDC_IFACE_MAX_PARALLEL_CLNTS)
+
+#define VIDC_IFACEQ_GET_QHDR_START_ADDR(ptr, i)     \
+	(void *)((ptr + sizeof(struct hfi_queue_table_header)) + \
+		(i * sizeof(struct hfi_queue_header)))
+
+#define QDSS_SIZE 4096
+#define SFR_SIZE 4096
+
+#define QUEUE_SIZE (VIDC_IFACEQ_TABLE_SIZE + \
+	(VIDC_IFACEQ_QUEUE_SIZE * VIDC_IFACEQ_NUMQ))
+
+#define ALIGNED_QDSS_SIZE ALIGN(QDSS_SIZE, SZ_4K)
+#define ALIGNED_SFR_SIZE ALIGN(SFR_SIZE, SZ_4K)
+#define ALIGNED_QUEUE_SIZE ALIGN(QUEUE_SIZE, SZ_4K)
+#define SHARED_QSIZE ALIGN(ALIGNED_SFR_SIZE + ALIGNED_QUEUE_SIZE + \
+			ALIGNED_QDSS_SIZE, SZ_1M)
+
+enum vidc_hw_reg {
+	VIDC_HWREG_CTRL_STATUS =  0x1,
+	VIDC_HWREG_QTBL_INFO =  0x2,
+	VIDC_HWREG_QTBL_ADDR =  0x3,
+	VIDC_HWREG_CTRLR_RESET =  0x4,
+	VIDC_HWREG_IFACEQ_FWRXREQ =  0x5,
+	VIDC_HWREG_IFACEQ_FWTXREQ =  0x6,
+	VIDC_HWREG_VHI_SOFTINTEN =  0x7,
+	VIDC_HWREG_VHI_SOFTINTSTATUS =  0x8,
+	VIDC_HWREG_VHI_SOFTINTCLR =  0x9,
+	VIDC_HWREG_HVI_SOFTINTEN =  0xA,
+};
+
+struct vidc_mem_addr {
+	ion_phys_addr_t align_device_addr;
+	u8 *align_virtual_addr;
+	u32 mem_size;
+	struct msm_smem *mem_data;
+};
+
+struct vidc_iface_q_info {
+	void *q_hdr;
+	struct vidc_mem_addr q_array;
+};
+
+/*
+ * These are helper macros to iterate over various lists within
+ * venus_hfi_device->res.  The intention is to cut down on a lot of boiler-plate
+ * code
+ */
+
+/* Read as "for each 'thing' in a set of 'thingies'" */
+#define venus_hfi_for_each_thing(__device, __thing, __thingy) \
+	venus_hfi_for_each_thing_continue(__device, __thing, __thingy, 0)
+
+#define venus_hfi_for_each_thing_reverse(__device, __thing, __thingy) \
+	venus_hfi_for_each_thing_reverse_continue(__device, __thing, __thingy, \
+			(__device)->res->__thingy##_set.count - 1)
+
+/* TODO: the __from parameter technically not required since we can figure it
+ * out with some pointer magic (i.e. __thing - __thing##_tbl[0]).  If this macro
+ * sees extensive use, probably worth cleaning it up but for now omitting it
+ * since it introduces unneccessary complexity.
+ */
+#define venus_hfi_for_each_thing_continue(__device, __thing, __thingy, __from) \
+	for (__thing = &(__device)->res->\
+			__thingy##_set.__thingy##_tbl[__from]; \
+		__thing < &(__device)->res->__thingy##_set.__thingy##_tbl[0] + \
+			((__device)->res->__thingy##_set.count - __from); \
+		++__thing)
+
+#define venus_hfi_for_each_thing_reverse_continue(__device, __thing, __thingy, \
+		__from) \
+	for (__thing = &(__device)->res->\
+			__thingy##_set.__thingy##_tbl[__from]; \
+		__thing >= &(__device)->res->__thingy##_set.__thingy##_tbl[0]; \
+		--__thing)
+
+/* Regular set helpers */
+#define venus_hfi_for_each_regulator(__device, __rinfo) \
+	venus_hfi_for_each_thing(__device, __rinfo, regulator)
+
+#define venus_hfi_for_each_regulator_reverse(__device, __rinfo) \
+	venus_hfi_for_each_thing_reverse(__device, __rinfo, regulator)
+
+#define venus_hfi_for_each_regulator_reverse_continue(__device, __rinfo, \
+		__from) \
+	venus_hfi_for_each_thing_reverse_continue(__device, __rinfo, \
+			regulator, __from)
+
+/* Clock set helpers */
+#define venus_hfi_for_each_clock(__device, __cinfo) \
+	venus_hfi_for_each_thing(__device, __cinfo, clock)
+
+#define venus_hfi_for_each_clock_reverse(__device, __cinfo) \
+	venus_hfi_for_each_thing_reverse(__device, __cinfo, clock)
+
+#define venus_hfi_for_each_clock_reverse_continue(__device, __rinfo, \
+		__from) \
+	venus_hfi_for_each_thing_reverse_continue(__device, __rinfo, \
+			clock, __from)
+
+/* Bus set helpers */
+#define venus_hfi_for_each_bus(__device, __binfo) \
+	venus_hfi_for_each_thing(__device, __binfo, bus)
+#define venus_hfi_for_each_bus_reverse(__device, __binfo) \
+	venus_hfi_for_each_thing_reverse(__device, __binfo, bus)
+
+
+/* Internal data used in vidc_hal not exposed to msm_vidc*/
+struct hal_data {
+	u32 irq;
+	phys_addr_t firmware_base;
+	u8 __iomem *register_base;
+	u32 register_size;
+};
+
+struct imem {
+	enum imem_type type;
+	union {
+		phys_addr_t vmem;
+	};
+};
+
+struct venus_resources {
+	struct msm_vidc_fw fw;
+	struct imem imem;
+};
+
+enum venus_hfi_state {
+	VENUS_STATE_DEINIT = 1,
+	VENUS_STATE_INIT,
+};
+
+struct venus_hfi_device {
+	struct list_head list;
+	struct list_head sess_head;
+	u32 intr_status;
+	u32 device_id;
+	u32 clk_freq;
+	u32 last_packet_type;
+	unsigned long clk_bitrate;
+	unsigned long scaled_rate;
+	struct msm_vidc_gov_data bus_vote;
+	bool power_enabled;
+	struct mutex lock;
+	msm_vidc_callback callback;
+	struct vidc_mem_addr iface_q_table;
+	struct vidc_mem_addr qdss;
+	struct vidc_mem_addr sfr;
+	struct vidc_mem_addr mem_addr;
+	struct vidc_iface_q_info iface_queues[VIDC_IFACEQ_NUMQ];
+	struct smem_client *hal_client;
+	struct hal_data *hal_data;
+	struct workqueue_struct *vidc_workq;
+	struct workqueue_struct *venus_pm_workq;
+	int spur_count;
+	int reg_count;
+	struct venus_resources resources;
+	struct msm_vidc_platform_resources *res;
+	enum venus_hfi_state state;
+	struct hfi_packetization_ops *pkt_ops;
+	enum hfi_packetization_type packetization_type;
+	struct msm_vidc_cb_info *response_pkt;
+	u8 *raw_packet;
+	struct pm_qos_request qos;
+	unsigned int skip_pc_count;
+	struct msm_vidc_capability *sys_init_capabilities;
+};
+
+void venus_hfi_delete_device(void *device);
+
+int venus_hfi_initialize(struct hfi_device *hdev, u32 device_id,
+		struct msm_vidc_platform_resources *res,
+		hfi_cmd_response_callback callback);
+bool venus_hfi_is_session_supported(unsigned long sessions_supported,
+		enum vidc_vote_data_session session_type);
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/vidc_hfi_api.h linux-4.4.115-fbx/drivers/media/platform/msm/vidc/vidc_hfi_api.h
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/vidc_hfi_api.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/vidc_hfi_api.h	2019-04-24 19:28:47.288498128 +0200
@@ -0,0 +1,1546 @@
+/* Copyright (c) 2012-2017,2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __VIDC_HFI_API_H__
+#define __VIDC_HFI_API_H__
+
+#include <linux/log2.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <media/msm_vidc.h>
+#include "msm_vidc_resources.h"
+
+#define CONTAINS(__a, __sz, __t) ({\
+	int __rc = __t >= __a && \
+			__t < __a + __sz; \
+	__rc; \
+})
+
+#define OVERLAPS(__t, __tsz, __a, __asz) ({\
+	int __rc = __t <= __a && \
+			__t + __tsz >= __a + __asz; \
+	__rc; \
+})
+
+#define HAL_BUFFERFLAG_EOS              0x00000001
+#define HAL_BUFFERFLAG_STARTTIME        0x00000002
+#define HAL_BUFFERFLAG_DECODEONLY       0x00000004
+#define HAL_BUFFERFLAG_DATACORRUPT      0x00000008
+#define HAL_BUFFERFLAG_ENDOFFRAME       0x00000010
+#define HAL_BUFFERFLAG_SYNCFRAME        0x00000020
+#define HAL_BUFFERFLAG_EXTRADATA        0x00000040
+#define HAL_BUFFERFLAG_CODECCONFIG      0x00000080
+#define HAL_BUFFERFLAG_TIMESTAMPINVALID 0x00000100
+#define HAL_BUFFERFLAG_READONLY         0x00000200
+#define HAL_BUFFERFLAG_ENDOFSUBFRAME    0x00000400
+#define HAL_BUFFERFLAG_EOSEQ            0x00200000
+#define HAL_BUFFERFLAG_MBAFF            0x08000000
+#define HAL_BUFFERFLAG_YUV_601_709_CSC_CLAMP   0x10000000
+#define HAL_BUFFERFLAG_DROP_FRAME       0x20000000
+#define HAL_BUFFERFLAG_TS_DISCONTINUITY	0x40000000
+#define HAL_BUFFERFLAG_TS_ERROR		0x80000000
+
+
+
+#define HAL_DEBUG_MSG_LOW				0x00000001
+#define HAL_DEBUG_MSG_MEDIUM			0x00000002
+#define HAL_DEBUG_MSG_HIGH				0x00000004
+#define HAL_DEBUG_MSG_ERROR				0x00000008
+#define HAL_DEBUG_MSG_FATAL				0x00000010
+#define MAX_PROFILE_COUNT	16
+
+#define HAL_MAX_MATRIX_COEFFS 9
+#define HAL_MAX_BIAS_COEFFS 3
+#define HAL_MAX_LIMIT_COEFFS 6
+#define VENUS_VERSION_LENGTH 128
+
+/* 16 encoder and 16 decoder sessions */
+#define VIDC_MAX_SESSIONS               32
+#define VIDC_MAX_DECODE_SESSIONS        16
+#define VIDC_MAX_ENCODE_SESSIONS        16
+
+
+enum vidc_status {
+	VIDC_ERR_NONE = 0x0,
+	VIDC_ERR_FAIL = 0x80000000,
+	VIDC_ERR_ALLOC_FAIL,
+	VIDC_ERR_ILLEGAL_OP,
+	VIDC_ERR_BAD_PARAM,
+	VIDC_ERR_BAD_HANDLE,
+	VIDC_ERR_NOT_SUPPORTED,
+	VIDC_ERR_BAD_STATE,
+	VIDC_ERR_MAX_CLIENTS,
+	VIDC_ERR_IFRAME_EXPECTED,
+	VIDC_ERR_HW_FATAL,
+	VIDC_ERR_BITSTREAM_ERR,
+	VIDC_ERR_INDEX_NOMORE,
+	VIDC_ERR_SEQHDR_PARSE_FAIL,
+	VIDC_ERR_INSUFFICIENT_BUFFER,
+	VIDC_ERR_BAD_POWER_STATE,
+	VIDC_ERR_NO_VALID_SESSION,
+	VIDC_ERR_TIMEOUT,
+	VIDC_ERR_CMDQFULL,
+	VIDC_ERR_START_CODE_NOT_FOUND,
+	VIDC_ERR_CLIENT_PRESENT = 0x90000001,
+	VIDC_ERR_CLIENT_FATAL,
+	VIDC_ERR_CMD_QUEUE_FULL,
+	VIDC_ERR_UNUSED = 0x10000000
+};
+
+enum hal_extradata_id {
+	HAL_EXTRADATA_NONE,
+	HAL_EXTRADATA_MB_QUANTIZATION,
+	HAL_EXTRADATA_INTERLACE_VIDEO,
+	HAL_EXTRADATA_VC1_FRAMEDISP,
+	HAL_EXTRADATA_VC1_SEQDISP,
+	HAL_EXTRADATA_TIMESTAMP,
+	HAL_EXTRADATA_S3D_FRAME_PACKING,
+	HAL_EXTRADATA_FRAME_RATE,
+	HAL_EXTRADATA_PANSCAN_WINDOW,
+	HAL_EXTRADATA_RECOVERY_POINT_SEI,
+	HAL_EXTRADATA_MULTISLICE_INFO,
+	HAL_EXTRADATA_INDEX,
+	HAL_EXTRADATA_NUM_CONCEALED_MB,
+	HAL_EXTRADATA_METADATA_FILLER,
+	HAL_EXTRADATA_ASPECT_RATIO,
+	HAL_EXTRADATA_MPEG2_SEQDISP,
+	HAL_EXTRADATA_STREAM_USERDATA,
+	HAL_EXTRADATA_FRAME_QP,
+	HAL_EXTRADATA_FRAME_BITS_INFO,
+	HAL_EXTRADATA_INPUT_CROP,
+	HAL_EXTRADATA_DIGITAL_ZOOM,
+	HAL_EXTRADATA_LTR_INFO,
+	HAL_EXTRADATA_METADATA_MBI,
+	HAL_EXTRADATA_VQZIP_SEI,
+	HAL_EXTRADATA_YUV_STATS,
+	HAL_EXTRADATA_ROI_QP,
+	HAL_EXTRADATA_OUTPUT_CROP,
+	HAL_EXTRADATA_MASTERING_DISPLAY_COLOUR_SEI,
+	HAL_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI,
+	HAL_EXTRADATA_PQ_INFO,
+	HAL_EXTRADATA_VUI_DISPLAY_INFO,
+	HAL_EXTRADATA_VPX_COLORSPACE,
+};
+
+enum hal_property {
+	HAL_CONFIG_FRAME_RATE = 0x04000001,
+	HAL_PARAM_UNCOMPRESSED_FORMAT_SELECT,
+	HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO,
+	HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO,
+	HAL_PARAM_EXTRA_DATA_HEADER_CONFIG,
+	HAL_PARAM_INDEX_EXTRADATA,
+	HAL_PARAM_FRAME_SIZE,
+	HAL_CONFIG_REALTIME,
+	HAL_PARAM_BUFFER_COUNT_ACTUAL,
+	HAL_PARAM_BUFFER_SIZE_MINIMUM,
+	HAL_PARAM_NAL_STREAM_FORMAT_SELECT,
+	HAL_PARAM_VDEC_OUTPUT_ORDER,
+	HAL_PARAM_VDEC_PICTURE_TYPE_DECODE,
+	HAL_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO,
+	HAL_CONFIG_VDEC_POST_LOOP_DEBLOCKER,
+	HAL_PARAM_VDEC_MULTI_STREAM,
+	HAL_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT,
+	HAL_PARAM_DIVX_FORMAT,
+	HAL_CONFIG_VDEC_MB_ERROR_MAP_REPORTING,
+	HAL_PARAM_VDEC_CONTINUE_DATA_TRANSFER,
+	HAL_CONFIG_VDEC_MB_ERROR_MAP,
+	HAL_CONFIG_VENC_REQUEST_IFRAME,
+	HAL_PARAM_VENC_MPEG4_SHORT_HEADER,
+	HAL_PARAM_VENC_MPEG4_AC_PREDICTION,
+	HAL_CONFIG_VENC_TARGET_BITRATE,
+	HAL_PARAM_PROFILE_LEVEL_CURRENT,
+	HAL_PARAM_VENC_H264_ENTROPY_CONTROL,
+	HAL_PARAM_VENC_RATE_CONTROL,
+	HAL_PARAM_VENC_MPEG4_TIME_RESOLUTION,
+	HAL_PARAM_VENC_MPEG4_HEADER_EXTENSION,
+	HAL_PARAM_VENC_H264_DEBLOCK_CONTROL,
+	HAL_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF,
+	HAL_PARAM_VENC_SESSION_QP,
+	HAL_PARAM_VENC_SESSION_QP_RANGE,
+	HAL_CONFIG_VENC_INTRA_PERIOD,
+	HAL_CONFIG_VENC_IDR_PERIOD,
+	HAL_CONFIG_VPE_OPERATIONS,
+	HAL_PARAM_VENC_INTRA_REFRESH,
+	HAL_PARAM_VENC_MULTI_SLICE_CONTROL,
+	HAL_CONFIG_VPE_DEINTERLACE,
+	HAL_SYS_DEBUG_CONFIG,
+	HAL_CONFIG_BUFFER_REQUIREMENTS,
+	HAL_CONFIG_PRIORITY,
+	HAL_CONFIG_BATCH_INFO,
+	HAL_PARAM_METADATA_PASS_THROUGH,
+	HAL_SYS_IDLE_INDICATOR,
+	HAL_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED,
+	HAL_PARAM_INTERLACE_FORMAT_SUPPORTED,
+	HAL_PARAM_CHROMA_SITE,
+	HAL_PARAM_PROPERTIES_SUPPORTED,
+	HAL_PARAM_PROFILE_LEVEL_SUPPORTED,
+	HAL_PARAM_CAPABILITY_SUPPORTED,
+	HAL_PARAM_NAL_STREAM_FORMAT_SUPPORTED,
+	HAL_PARAM_MULTI_VIEW_FORMAT,
+	HAL_PARAM_MAX_SEQUENCE_HEADER_SIZE,
+	HAL_PARAM_CODEC_SUPPORTED,
+	HAL_PARAM_VDEC_MULTI_VIEW_SELECT,
+	HAL_PARAM_VDEC_MB_QUANTIZATION,
+	HAL_PARAM_VDEC_NUM_CONCEALED_MB,
+	HAL_PARAM_VDEC_H264_ENTROPY_SWITCHING,
+	HAL_PARAM_VENC_SLICE_DELIVERY_MODE,
+	HAL_PARAM_VENC_MPEG4_DATA_PARTITIONING,
+	HAL_CONFIG_BUFFER_COUNT_ACTUAL,
+	HAL_CONFIG_VDEC_MULTI_STREAM,
+	HAL_PARAM_VENC_MULTI_SLICE_INFO,
+	HAL_CONFIG_VENC_TIMESTAMP_SCALE,
+	HAL_PARAM_VENC_SYNC_FRAME_SEQUENCE_HEADER,
+	HAL_PARAM_VDEC_SYNC_FRAME_DECODE,
+	HAL_PARAM_VENC_H264_ENTROPY_CABAC_MODEL,
+	HAL_CONFIG_VENC_MAX_BITRATE,
+	HAL_PARAM_VENC_H264_VUI_TIMING_INFO,
+	HAL_PARAM_VENC_GENERATE_AUDNAL,
+	HAL_PARAM_VENC_MAX_NUM_B_FRAMES,
+	HAL_PARAM_BUFFER_ALLOC_MODE,
+	HAL_PARAM_VDEC_FRAME_ASSEMBLY,
+	HAL_PARAM_VENC_H264_VUI_BITSTREAM_RESTRC,
+	HAL_PARAM_VENC_PRESERVE_TEXT_QUALITY,
+	HAL_PARAM_VDEC_CONCEAL_COLOR,
+	HAL_PARAM_VDEC_SCS_THRESHOLD,
+	HAL_PARAM_GET_BUFFER_REQUIREMENTS,
+	HAL_PARAM_MVC_BUFFER_LAYOUT,
+	HAL_PARAM_VENC_LTRMODE,
+	HAL_CONFIG_VENC_MARKLTRFRAME,
+	HAL_CONFIG_VENC_USELTRFRAME,
+	HAL_CONFIG_VENC_LTRPERIOD,
+	HAL_CONFIG_VENC_HIER_P_NUM_FRAMES,
+	HAL_PARAM_VENC_HIER_P_MAX_ENH_LAYERS,
+	HAL_PARAM_VENC_DISABLE_RC_TIMESTAMP,
+	HAL_PARAM_VENC_ENABLE_INITIAL_QP,
+	HAL_PARAM_VENC_SEARCH_RANGE,
+	HAL_PARAM_VPE_COLOR_SPACE_CONVERSION,
+	HAL_PARAM_VENC_VPX_ERROR_RESILIENCE_MODE,
+	HAL_PARAM_VENC_H264_NAL_SVC_EXT,
+	HAL_CONFIG_VENC_PERF_MODE,
+	HAL_PARAM_VENC_HIER_B_MAX_ENH_LAYERS,
+	HAL_PARAM_VDEC_NON_SECURE_OUTPUT2,
+	HAL_PARAM_VENC_HIER_P_HYBRID_MODE,
+	HAL_PARAM_VENC_MBI_STATISTICS_MODE,
+	HAL_PARAM_SYNC_BASED_INTERRUPT,
+	HAL_CONFIG_VENC_FRAME_QP,
+	HAL_CONFIG_VENC_BASELAYER_PRIORITYID,
+	HAL_PARAM_VENC_VQZIP_SEI,
+	HAL_PROPERTY_PARAM_VENC_ASPECT_RATIO,
+	HAL_CONFIG_VDEC_ENTROPY,
+	HAL_PARAM_VENC_BITRATE_TYPE,
+	HAL_PARAM_VENC_H264_PIC_ORDER_CNT,
+	HAL_PARAM_VENC_LOW_LATENCY,
+	HAL_PARAM_VENC_CONSTRAINED_INTRA_PRED,
+	HAL_CONFIG_VENC_BLUR_RESOLUTION,
+	HAL_PARAM_VENC_SESSION_QP_RANGE_PACKED,
+	HAL_PARAM_VENC_H264_TRANSFORM_8x8,
+	HAL_PARAM_VENC_VIDEO_SIGNAL_INFO,
+	HAL_PARAM_VENC_IFRAMESIZE_TYPE,
+	HAL_PARAM_VENC_SEND_OUTPUT_FOR_SKIPPED_FRAME
+};
+
+enum hal_domain {
+	HAL_VIDEO_DOMAIN_VPE,
+	HAL_VIDEO_DOMAIN_ENCODER,
+	HAL_VIDEO_DOMAIN_DECODER,
+	HAL_UNUSED_DOMAIN = 0x10000000,
+};
+
+enum multi_stream {
+	HAL_VIDEO_DECODER_NONE = 0x00000000,
+	HAL_VIDEO_DECODER_PRIMARY = 0x00000001,
+	HAL_VIDEO_DECODER_SECONDARY = 0x00000002,
+	HAL_VIDEO_DECODER_BOTH_OUTPUTS = 0x00000004,
+	HAL_VIDEO_UNUSED_OUTPUTS = 0x10000000,
+};
+
+enum hal_core_capabilities {
+	HAL_VIDEO_ENCODER_ROTATION_CAPABILITY = 0x00000001,
+	HAL_VIDEO_ENCODER_SCALING_CAPABILITY = 0x00000002,
+	HAL_VIDEO_ENCODER_DEINTERLACE_CAPABILITY = 0x00000004,
+	HAL_VIDEO_DECODER_MULTI_STREAM_CAPABILITY = 0x00000008,
+	HAL_VIDEO_UNUSED_CAPABILITY      = 0x10000000,
+};
+
+enum hal_default_properties {
+	HAL_VIDEO_DYNAMIC_BUF_MODE = 0x00000001,
+	HAL_VIDEO_CONTINUE_DATA_TRANSFER = 0x00000002,
+};
+
+enum hal_video_codec {
+	HAL_VIDEO_CODEC_UNKNOWN  = 0x00000000,
+	HAL_VIDEO_CODEC_MVC      = 0x00000001,
+	HAL_VIDEO_CODEC_H264     = 0x00000002,
+	HAL_VIDEO_CODEC_H263     = 0x00000004,
+	HAL_VIDEO_CODEC_MPEG1    = 0x00000008,
+	HAL_VIDEO_CODEC_MPEG2    = 0x00000010,
+	HAL_VIDEO_CODEC_MPEG4    = 0x00000020,
+	HAL_VIDEO_CODEC_DIVX_311 = 0x00000040,
+	HAL_VIDEO_CODEC_DIVX     = 0x00000080,
+	HAL_VIDEO_CODEC_VC1      = 0x00000100,
+	HAL_VIDEO_CODEC_SPARK    = 0x00000200,
+	HAL_VIDEO_CODEC_VP6      = 0x00000400,
+	HAL_VIDEO_CODEC_VP7      = 0x00000800,
+	HAL_VIDEO_CODEC_VP8      = 0x00001000,
+	HAL_VIDEO_CODEC_HEVC     = 0x00002000,
+	HAL_VIDEO_CODEC_VP9      = 0x00004000,
+	HAL_VIDEO_CODEC_HEVC_HYBRID     = 0x80000000,
+	HAL_UNUSED_CODEC = 0x10000000,
+};
+
+enum hal_h263_profile {
+	HAL_H263_PROFILE_BASELINE           = 0x00000001,
+	HAL_H263_PROFILE_H320CODING         = 0x00000002,
+	HAL_H263_PROFILE_BACKWARDCOMPATIBLE = 0x00000004,
+	HAL_H263_PROFILE_ISWV2              = 0x00000008,
+	HAL_H263_PROFILE_ISWV3              = 0x00000010,
+	HAL_H263_PROFILE_HIGHCOMPRESSION    = 0x00000020,
+	HAL_H263_PROFILE_INTERNET           = 0x00000040,
+	HAL_H263_PROFILE_INTERLACE          = 0x00000080,
+	HAL_H263_PROFILE_HIGHLATENCY        = 0x00000100,
+	HAL_UNUSED_H263_PROFILE = 0x10000000,
+};
+
+enum hal_h263_level {
+	HAL_H263_LEVEL_10 = 0x00000001,
+	HAL_H263_LEVEL_20 = 0x00000002,
+	HAL_H263_LEVEL_30 = 0x00000004,
+	HAL_H263_LEVEL_40 = 0x00000008,
+	HAL_H263_LEVEL_45 = 0x00000010,
+	HAL_H263_LEVEL_50 = 0x00000020,
+	HAL_H263_LEVEL_60 = 0x00000040,
+	HAL_H263_LEVEL_70 = 0x00000080,
+	HAL_UNUSED_H263_LEVEL = 0x10000000,
+};
+
+enum hal_mpeg2_profile {
+	HAL_MPEG2_PROFILE_SIMPLE  = 0x00000001,
+	HAL_MPEG2_PROFILE_MAIN    = 0x00000002,
+	HAL_MPEG2_PROFILE_422     = 0x00000004,
+	HAL_MPEG2_PROFILE_SNR     = 0x00000008,
+	HAL_MPEG2_PROFILE_SPATIAL = 0x00000010,
+	HAL_MPEG2_PROFILE_HIGH    = 0x00000020,
+	HAL_UNUSED_MPEG2_PROFILE = 0x10000000,
+};
+
+enum hal_mpeg2_level {
+	HAL_MPEG2_LEVEL_LL  = 0x00000001,
+	HAL_MPEG2_LEVEL_ML  = 0x00000002,
+	HAL_MPEG2_LEVEL_H14 = 0x00000004,
+	HAL_MPEG2_LEVEL_HL  = 0x00000008,
+	HAL_UNUSED_MEPG2_LEVEL = 0x10000000,
+};
+
+enum hal_mpeg4_profile {
+	HAL_MPEG4_PROFILE_SIMPLE           = 0x00000001,
+	HAL_MPEG4_PROFILE_ADVANCEDSIMPLE   = 0x00000002,
+	HAL_MPEG4_PROFILE_CORE             = 0x00000004,
+	HAL_MPEG4_PROFILE_MAIN             = 0x00000008,
+	HAL_MPEG4_PROFILE_NBIT             = 0x00000010,
+	HAL_MPEG4_PROFILE_SCALABLETEXTURE  = 0x00000020,
+	HAL_MPEG4_PROFILE_SIMPLEFACE       = 0x00000040,
+	HAL_MPEG4_PROFILE_SIMPLEFBA        = 0x00000080,
+	HAL_MPEG4_PROFILE_BASICANIMATED    = 0x00000100,
+	HAL_MPEG4_PROFILE_HYBRID           = 0x00000200,
+	HAL_MPEG4_PROFILE_ADVANCEDREALTIME = 0x00000400,
+	HAL_MPEG4_PROFILE_CORESCALABLE     = 0x00000800,
+	HAL_MPEG4_PROFILE_ADVANCEDCODING   = 0x00001000,
+	HAL_MPEG4_PROFILE_ADVANCEDCORE     = 0x00002000,
+	HAL_MPEG4_PROFILE_ADVANCEDSCALABLE = 0x00004000,
+	HAL_MPEG4_PROFILE_SIMPLESCALABLE   = 0x00008000,
+	HAL_UNUSED_MPEG4_PROFILE = 0x10000000,
+};
+
+enum hal_mpeg4_level {
+	HAL_MPEG4_LEVEL_0  = 0x00000001,
+	HAL_MPEG4_LEVEL_0b = 0x00000002,
+	HAL_MPEG4_LEVEL_1  = 0x00000004,
+	HAL_MPEG4_LEVEL_2  = 0x00000008,
+	HAL_MPEG4_LEVEL_3  = 0x00000010,
+	HAL_MPEG4_LEVEL_4  = 0x00000020,
+	HAL_MPEG4_LEVEL_4a = 0x00000040,
+	HAL_MPEG4_LEVEL_5  = 0x00000080,
+	HAL_MPEG4_LEVEL_VENDOR_START_UNUSED = 0x7F000000,
+	HAL_MPEG4_LEVEL_6  = 0x7F000001,
+	HAL_MPEG4_LEVEL_7  = 0x7F000002,
+	HAL_MPEG4_LEVEL_8  = 0x7F000003,
+	HAL_MPEG4_LEVEL_9  = 0x7F000004,
+	HAL_MPEG4_LEVEL_3b = 0x7F000005,
+	HAL_UNUSED_MPEG4_LEVEL = 0x10000000,
+};
+
+enum hal_h264_profile {
+	HAL_H264_PROFILE_BASELINE = 0x00000001,
+	HAL_H264_PROFILE_MAIN     = 0x00000002,
+	HAL_H264_PROFILE_HIGH     = 0x00000004,
+	HAL_H264_PROFILE_EXTENDED = 0x00000008,
+	HAL_H264_PROFILE_HIGH10   = 0x00000010,
+	HAL_H264_PROFILE_HIGH422  = 0x00000020,
+	HAL_H264_PROFILE_HIGH444  = 0x00000040,
+	HAL_H264_PROFILE_CONSTRAINED_BASE  = 0x00000080,
+	HAL_H264_PROFILE_CONSTRAINED_HIGH  = 0x00000100,
+	HAL_UNUSED_H264_PROFILE = 0x10000000,
+};
+
+enum hal_h264_level {
+	HAL_H264_LEVEL_1  = 0x00000001,
+	HAL_H264_LEVEL_1b = 0x00000002,
+	HAL_H264_LEVEL_11 = 0x00000004,
+	HAL_H264_LEVEL_12 = 0x00000008,
+	HAL_H264_LEVEL_13 = 0x00000010,
+	HAL_H264_LEVEL_2  = 0x00000020,
+	HAL_H264_LEVEL_21 = 0x00000040,
+	HAL_H264_LEVEL_22 = 0x00000080,
+	HAL_H264_LEVEL_3  = 0x00000100,
+	HAL_H264_LEVEL_31 = 0x00000200,
+	HAL_H264_LEVEL_32 = 0x00000400,
+	HAL_H264_LEVEL_4  = 0x00000800,
+	HAL_H264_LEVEL_41 = 0x00001000,
+	HAL_H264_LEVEL_42 = 0x00002000,
+	HAL_H264_LEVEL_5  = 0x00004000,
+	HAL_H264_LEVEL_51 = 0x00008000,
+	HAL_H264_LEVEL_52 = 0x00010000,
+	HAL_UNUSED_H264_LEVEL = 0x10000000,
+};
+
+enum hal_hevc_profile {
+	HAL_HEVC_PROFILE_MAIN           = 0x00000001,
+	HAL_HEVC_PROFILE_MAIN10         = 0x00000002,
+	HAL_HEVC_PROFILE_MAIN_STILL_PIC = 0x00000004,
+	HAL_UNUSED_HEVC_PROFILE         = 0x10000000,
+};
+
+enum hal_hevc_level {
+	HAL_HEVC_MAIN_TIER_LEVEL_1      = 0x10000001,
+	HAL_HEVC_MAIN_TIER_LEVEL_2      = 0x10000002,
+	HAL_HEVC_MAIN_TIER_LEVEL_2_1    = 0x10000004,
+	HAL_HEVC_MAIN_TIER_LEVEL_3      = 0x10000008,
+	HAL_HEVC_MAIN_TIER_LEVEL_3_1    = 0x10000010,
+	HAL_HEVC_MAIN_TIER_LEVEL_4      = 0x10000020,
+	HAL_HEVC_MAIN_TIER_LEVEL_4_1    = 0x10000040,
+	HAL_HEVC_MAIN_TIER_LEVEL_5      = 0x10000080,
+	HAL_HEVC_MAIN_TIER_LEVEL_5_1    = 0x10000100,
+	HAL_HEVC_MAIN_TIER_LEVEL_5_2    = 0x10000200,
+	HAL_HEVC_MAIN_TIER_LEVEL_6      = 0x10000400,
+	HAL_HEVC_MAIN_TIER_LEVEL_6_1    = 0x10000800,
+	HAL_HEVC_MAIN_TIER_LEVEL_6_2    = 0x10001000,
+	HAL_HEVC_HIGH_TIER_LEVEL_1      = 0x20000001,
+	HAL_HEVC_HIGH_TIER_LEVEL_2      = 0x20000002,
+	HAL_HEVC_HIGH_TIER_LEVEL_2_1    = 0x20000004,
+	HAL_HEVC_HIGH_TIER_LEVEL_3      = 0x20000008,
+	HAL_HEVC_HIGH_TIER_LEVEL_3_1    = 0x20000010,
+	HAL_HEVC_HIGH_TIER_LEVEL_4      = 0x20000020,
+	HAL_HEVC_HIGH_TIER_LEVEL_4_1    = 0x20000040,
+	HAL_HEVC_HIGH_TIER_LEVEL_5      = 0x20000080,
+	HAL_HEVC_HIGH_TIER_LEVEL_5_1    = 0x20000100,
+	HAL_HEVC_HIGH_TIER_LEVEL_5_2    = 0x20000200,
+	HAL_HEVC_HIGH_TIER_LEVEL_6      = 0x20000400,
+	HAL_HEVC_HIGH_TIER_LEVEL_6_1    = 0x20000800,
+	HAL_HEVC_HIGH_TIER_LEVEL_6_2    = 0x20001000,
+	HAL_UNUSED_HEVC_TIER_LEVEL      = 0x80000000,
+};
+
+enum hal_hevc_tier {
+	HAL_HEVC_TIER_MAIN   = 0x00000001,
+	HAL_HEVC_TIER_HIGH   = 0x00000002,
+	HAL_UNUSED_HEVC_TIER = 0x10000000,
+};
+
+enum hal_vpx_profile {
+	HAL_VPX_PROFILE_SIMPLE    = 0x00000001,
+	HAL_VPX_PROFILE_ADVANCED  = 0x00000002,
+	HAL_VPX_PROFILE_VERSION_0 = 0x00000004,
+	HAL_VPX_PROFILE_VERSION_1 = 0x00000008,
+	HAL_VPX_PROFILE_VERSION_2 = 0x00000010,
+	HAL_VPX_PROFILE_VERSION_3 = 0x00000020,
+	HAL_VPX_PROFILE_UNUSED = 0x10000000,
+};
+
+enum hal_vc1_profile {
+	HAL_VC1_PROFILE_SIMPLE   = 0x00000001,
+	HAL_VC1_PROFILE_MAIN     = 0x00000002,
+	HAL_VC1_PROFILE_ADVANCED = 0x00000004,
+	HAL_UNUSED_VC1_PROFILE = 0x10000000,
+};
+
+enum hal_vc1_level {
+	HAL_VC1_LEVEL_LOW    = 0x00000001,
+	HAL_VC1_LEVEL_MEDIUM = 0x00000002,
+	HAL_VC1_LEVEL_HIGH   = 0x00000004,
+	HAL_VC1_LEVEL_0      = 0x00000008,
+	HAL_VC1_LEVEL_1      = 0x00000010,
+	HAL_VC1_LEVEL_2      = 0x00000020,
+	HAL_VC1_LEVEL_3      = 0x00000040,
+	HAL_VC1_LEVEL_4      = 0x00000080,
+	HAL_UNUSED_VC1_LEVEL = 0x10000000,
+};
+
+enum hal_divx_format {
+	HAL_DIVX_FORMAT_4,
+	HAL_DIVX_FORMAT_5,
+	HAL_DIVX_FORMAT_6,
+	HAL_UNUSED_DIVX_FORMAT = 0x10000000,
+};
+
+enum hal_divx_profile {
+	HAL_DIVX_PROFILE_QMOBILE  = 0x00000001,
+	HAL_DIVX_PROFILE_MOBILE   = 0x00000002,
+	HAL_DIVX_PROFILE_MT       = 0x00000004,
+	HAL_DIVX_PROFILE_HT       = 0x00000008,
+	HAL_DIVX_PROFILE_HD       = 0x00000010,
+	HAL_UNUSED_DIVX_PROFILE = 0x10000000,
+};
+
+enum hal_mvc_profile {
+	HAL_MVC_PROFILE_STEREO_HIGH  = 0x00001000,
+	HAL_UNUSED_MVC_PROFILE = 0x10000000,
+};
+
+enum hal_mvc_level {
+	HAL_MVC_LEVEL_1  = 0x00000001,
+	HAL_MVC_LEVEL_1b = 0x00000002,
+	HAL_MVC_LEVEL_11 = 0x00000004,
+	HAL_MVC_LEVEL_12 = 0x00000008,
+	HAL_MVC_LEVEL_13 = 0x00000010,
+	HAL_MVC_LEVEL_2  = 0x00000020,
+	HAL_MVC_LEVEL_21 = 0x00000040,
+	HAL_MVC_LEVEL_22 = 0x00000080,
+	HAL_MVC_LEVEL_3  = 0x00000100,
+	HAL_MVC_LEVEL_31 = 0x00000200,
+	HAL_MVC_LEVEL_32 = 0x00000400,
+	HAL_MVC_LEVEL_4  = 0x00000800,
+	HAL_MVC_LEVEL_41 = 0x00001000,
+	HAL_MVC_LEVEL_42 = 0x00002000,
+	HAL_MVC_LEVEL_5  = 0x00004000,
+	HAL_MVC_LEVEL_51 = 0x00008000,
+	HAL_UNUSED_MVC_LEVEL = 0x10000000,
+};
+
+struct hal_frame_rate {
+	enum hal_buffer buffer_type;
+	u32 frame_rate;
+};
+
+enum hal_uncompressed_format {
+	HAL_COLOR_FORMAT_MONOCHROME     = 0x00000001,
+	HAL_COLOR_FORMAT_NV12           = 0x00000002,
+	HAL_COLOR_FORMAT_NV21           = 0x00000004,
+	HAL_COLOR_FORMAT_NV12_4x4TILE   = 0x00000008,
+	HAL_COLOR_FORMAT_NV21_4x4TILE   = 0x00000010,
+	HAL_COLOR_FORMAT_YUYV           = 0x00000020,
+	HAL_COLOR_FORMAT_YVYU           = 0x00000040,
+	HAL_COLOR_FORMAT_UYVY           = 0x00000080,
+	HAL_COLOR_FORMAT_VYUY           = 0x00000100,
+	HAL_COLOR_FORMAT_RGB565         = 0x00000200,
+	HAL_COLOR_FORMAT_BGR565         = 0x00000400,
+	HAL_COLOR_FORMAT_RGB888         = 0x00000800,
+	HAL_COLOR_FORMAT_BGR888         = 0x00001000,
+	HAL_COLOR_FORMAT_NV12_UBWC      = 0x00002000,
+	HAL_COLOR_FORMAT_NV12_TP10_UBWC = 0x00004000,
+	HAL_COLOR_FORMAT_RGBA8888       = 0x00008000,
+	HAL_COLOR_FORMAT_RGBA8888_UBWC  = 0x00010000,
+	HAL_UNUSED_COLOR                = 0x10000000,
+};
+
+enum hal_statistics_mode_type {
+	HAL_STATISTICS_MODE_DEFAULT	= 0x00000001,
+	HAL_STATISTICS_MODE_1		= 0x00000002,
+	HAL_STATISTICS_MODE_2		= 0x00000004,
+	HAL_STATISTICS_MODE_3		= 0x00000008,
+};
+
+enum hal_ssr_trigger_type {
+	SSR_ERR_FATAL = 1,
+	SSR_SW_DIV_BY_ZERO,
+	SSR_HW_WDOG_IRQ,
+};
+
+struct hal_uncompressed_format_select {
+	enum hal_buffer buffer_type;
+	enum hal_uncompressed_format format;
+};
+
+struct hal_uncompressed_plane_actual {
+	int actual_stride;
+	u32 actual_plane_buffer_height;
+};
+
+struct hal_uncompressed_plane_actual_info {
+	enum hal_buffer buffer_type;
+	u32 num_planes;
+	struct hal_uncompressed_plane_actual rg_plane_format[1];
+};
+
+struct hal_uncompressed_plane_constraints {
+	u32 stride_multiples;
+	u32 max_stride;
+	u32 min_plane_buffer_height_multiple;
+	u32 buffer_alignment;
+};
+
+struct hal_uncompressed_plane_actual_constraints_info {
+	enum hal_buffer buffer_type;
+	u32 num_planes;
+	struct hal_uncompressed_plane_constraints rg_plane_format[1];
+};
+
+struct hal_extra_data_header_config {
+	u32 type;
+	enum hal_buffer buffer_type;
+	u32 version;
+	u32 port_index;
+	u32 client_extradata_id;
+};
+
+struct hal_frame_size {
+	enum hal_buffer buffer_type;
+	u32 width;
+	u32 height;
+};
+
+struct hal_enable {
+	bool enable;
+};
+
+struct hal_buffer_count_actual {
+	enum hal_buffer buffer_type;
+	u32 buffer_count_actual;
+};
+
+struct hal_buffer_size_minimum {
+	enum hal_buffer buffer_type;
+	u32 buffer_size;
+};
+
+struct hal_buffer_display_hold_count_actual {
+	enum hal_buffer buffer_type;
+	u32 hold_count;
+};
+
+enum hal_nal_stream_format {
+	HAL_NAL_FORMAT_STARTCODES         = 0x00000001,
+	HAL_NAL_FORMAT_ONE_NAL_PER_BUFFER = 0x00000002,
+	HAL_NAL_FORMAT_ONE_BYTE_LENGTH    = 0x00000004,
+	HAL_NAL_FORMAT_TWO_BYTE_LENGTH    = 0x00000008,
+	HAL_NAL_FORMAT_FOUR_BYTE_LENGTH   = 0x00000010,
+};
+
+enum hal_output_order {
+	HAL_OUTPUT_ORDER_DISPLAY,
+	HAL_OUTPUT_ORDER_DECODE,
+	HAL_UNUSED_OUTPUT = 0x10000000,
+};
+
+enum hal_picture {
+	HAL_PICTURE_I = 0x01,
+	HAL_PICTURE_P = 0x02,
+	HAL_PICTURE_B = 0x04,
+	HAL_PICTURE_IDR = 0x08,
+	HAL_PICTURE_CRA = 0x10,
+	HAL_FRAME_NOTCODED = 0x7F002000,
+	HAL_FRAME_YUV = 0x7F004000,
+	HAL_UNUSED_PICT = 0x10000000,
+};
+
+struct hal_extradata_enable {
+	u32 enable;
+	enum hal_extradata_id index;
+};
+
+struct hal_enable_picture {
+	u32 picture_type;
+};
+
+struct hal_multi_stream {
+	enum hal_buffer buffer_type;
+	u32 enable;
+	u32 width;
+	u32 height;
+};
+
+struct hal_display_picture_buffer_count {
+	u32 enable;
+	u32 count;
+};
+
+struct hal_mb_error_map {
+	u32 error_map_size;
+	u8 rg_error_map[1];
+};
+
+struct hal_request_iframe {
+	u32 enable;
+};
+
+struct hal_bitrate {
+	u32 bit_rate;
+	u32 layer_id;
+};
+
+struct hal_profile_level {
+	u32 profile;
+	u32 level;
+};
+
+struct hal_profile_level_supported {
+	u32 profile_count;
+	struct hal_profile_level profile_level[MAX_PROFILE_COUNT];
+};
+
+enum hal_h264_entropy {
+	HAL_H264_ENTROPY_CAVLC = 1,
+	HAL_H264_ENTROPY_CABAC = 2,
+	HAL_UNUSED_ENTROPY = 0x10000000,
+};
+
+enum hal_h264_cabac_model {
+	HAL_H264_CABAC_MODEL_0 = 1,
+	HAL_H264_CABAC_MODEL_1 = 2,
+	HAL_H264_CABAC_MODEL_2 = 4,
+	HAL_UNUSED_CABAC = 0x10000000,
+};
+
+struct hal_h264_entropy_control {
+	enum hal_h264_entropy entropy_mode;
+	enum hal_h264_cabac_model cabac_model;
+};
+
+enum hal_rate_control {
+	HAL_RATE_CONTROL_OFF,
+	HAL_RATE_CONTROL_VBR_VFR,
+	HAL_RATE_CONTROL_VBR_CFR,
+	HAL_RATE_CONTROL_CBR_VFR,
+	HAL_RATE_CONTROL_CBR_CFR,
+	HAL_RATE_CONTROL_MBR_CFR,
+	HAL_RATE_CONTROL_MBR_VFR,
+	HAL_UNUSED_RC = 0x10000000,
+};
+
+struct hal_mpeg4_time_resolution {
+	u32 time_increment_resolution;
+};
+
+struct hal_mpeg4_header_extension {
+	u32 header_extension;
+};
+
+enum hal_h264_db_mode {
+	HAL_H264_DB_MODE_DISABLE,
+	HAL_H264_DB_MODE_SKIP_SLICE_BOUNDARY,
+	HAL_H264_DB_MODE_ALL_BOUNDARY,
+	HAL_UNUSED_H264_DB = 0x10000000,
+};
+
+struct hal_h264_db_control {
+	enum hal_h264_db_mode mode;
+	int slice_alpha_offset;
+	int slice_beta_offset;
+};
+
+struct hal_temporal_spatial_tradeoff {
+	u32 ts_factor;
+};
+
+struct hal_quantization {
+	u32 qpi;
+	u32 qpp;
+	u32 qpb;
+	u32 layer_id;
+};
+
+struct hal_initial_quantization {
+	u32 qpi;
+	u32 qpp;
+	u32 qpb;
+	u32 init_qp_enable;
+};
+
+struct hal_quantization_range {
+	u32 min_qp;
+	u32 max_qp;
+	u32 layer_id;
+};
+
+struct hal_intra_period {
+	u32 pframes;
+	u32 bframes;
+};
+
+struct hal_idr_period {
+	u32 idr_period;
+};
+
+enum hal_rotate {
+	HAL_ROTATE_NONE,
+	HAL_ROTATE_90,
+	HAL_ROTATE_180,
+	HAL_ROTATE_270,
+	HAL_UNUSED_ROTATE = 0x10000000,
+};
+
+enum hal_flip {
+	HAL_FLIP_NONE,
+	HAL_FLIP_HORIZONTAL,
+	HAL_FLIP_VERTICAL,
+	HAL_UNUSED_FLIP = 0x10000000,
+};
+
+struct hal_operations {
+	enum hal_rotate rotate;
+	enum hal_flip flip;
+};
+
+enum hal_intra_refresh_mode {
+	HAL_INTRA_REFRESH_NONE,
+	HAL_INTRA_REFRESH_CYCLIC,
+	HAL_INTRA_REFRESH_ADAPTIVE,
+	HAL_INTRA_REFRESH_CYCLIC_ADAPTIVE,
+	HAL_INTRA_REFRESH_RANDOM,
+	HAL_UNUSED_INTRA = 0x10000000,
+};
+
+struct hal_intra_refresh {
+	enum hal_intra_refresh_mode mode;
+	u32 air_mbs;
+	u32 air_ref;
+	u32 cir_mbs;
+};
+
+enum hal_multi_slice {
+	HAL_MULTI_SLICE_OFF,
+	HAL_MULTI_SLICE_BY_MB_COUNT,
+	HAL_MULTI_SLICE_BY_BYTE_COUNT,
+	HAL_MULTI_SLICE_GOB,
+	HAL_UNUSED_SLICE = 0x10000000,
+};
+
+struct hal_multi_slice_control {
+	enum hal_multi_slice multi_slice;
+	u32 slice_size;
+};
+
+struct hal_debug_config {
+	u32 debug_config;
+};
+
+struct hal_buffer_requirements {
+	enum hal_buffer buffer_type;
+	u32 buffer_size;
+	u32 buffer_region_size;
+	u32 buffer_hold_count;
+	u32 buffer_count_min;
+	u32 buffer_count_actual;
+	u32 contiguous;
+	u32 buffer_alignment;
+};
+
+enum hal_priority {/* Priority increases with number */
+	HAL_PRIORITY_LOW = 10,
+	HAL_PRIOIRTY_MEDIUM = 20,
+	HAL_PRIORITY_HIGH = 30,
+	HAL_UNUSED_PRIORITY = 0x10000000,
+};
+
+struct hal_batch_info {
+	u32 input_batch_count;
+	u32 output_batch_count;
+};
+
+struct hal_metadata_pass_through {
+	u32 enable;
+	u32 size;
+};
+
+struct hal_uncompressed_format_supported {
+	enum hal_buffer buffer_type;
+	u32 format_entries;
+	u32 rg_format_info[1];
+};
+
+enum hal_interlace_format {
+	HAL_INTERLACE_FRAME_PROGRESSIVE                 = 0x01,
+	HAL_INTERLACE_INTERLEAVE_FRAME_TOPFIELDFIRST    = 0x02,
+	HAL_INTERLACE_INTERLEAVE_FRAME_BOTTOMFIELDFIRST = 0x04,
+	HAL_INTERLACE_FRAME_TOPFIELDFIRST               = 0x08,
+	HAL_INTERLACE_FRAME_BOTTOMFIELDFIRST            = 0x10,
+	HAL_UNUSED_INTERLACE = 0x10000000,
+};
+
+struct hal_interlace_format_supported {
+	enum hal_buffer buffer_type;
+	enum hal_interlace_format format;
+};
+
+enum hal_chroma_site {
+	HAL_CHROMA_SITE_0,
+	HAL_CHROMA_SITE_1,
+	HAL_UNUSED_CHROMA = 0x10000000,
+};
+
+struct hal_properties_supported {
+	u32 num_properties;
+	u32 rg_properties[1];
+};
+
+enum hal_capability {
+	HAL_CAPABILITY_FRAME_WIDTH = 0x1,
+	HAL_CAPABILITY_FRAME_HEIGHT,
+	HAL_CAPABILITY_MBS_PER_FRAME,
+	HAL_CAPABILITY_MBS_PER_SECOND,
+	HAL_CAPABILITY_FRAMERATE,
+	HAL_CAPABILITY_SCALE_X,
+	HAL_CAPABILITY_SCALE_Y,
+	HAL_CAPABILITY_BITRATE,
+	HAL_CAPABILITY_BFRAME,
+	HAL_CAPABILITY_PEAKBITRATE,
+	HAL_CAPABILITY_HIER_P_NUM_ENH_LAYERS,
+	HAL_CAPABILITY_ENC_LTR_COUNT,
+	HAL_CAPABILITY_SECURE_OUTPUT2_THRESHOLD,
+	HAL_CAPABILITY_HIER_B_NUM_ENH_LAYERS,
+	HAL_CAPABILITY_LCU_SIZE,
+	HAL_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS,
+	HAL_CAPABILITY_MBS_PER_SECOND_POWER_SAVE,
+	HAL_UNUSED_CAPABILITY = 0x10000000,
+};
+
+struct hal_capability_supported {
+	enum hal_capability capability_type;
+	u32 min;
+	u32 max;
+	u32 step_size;
+};
+
+struct hal_capability_supported_info {
+	u32 num_capabilities;
+	struct hal_capability_supported rg_data[1];
+};
+
+struct hal_nal_stream_format_supported {
+	u32 nal_stream_format_supported;
+};
+
+struct hal_nal_stream_format_select {
+	u32 nal_stream_format_select;
+};
+
+struct hal_multi_view_format {
+	u32 views;
+	u32 rg_view_order[1];
+};
+
+enum hal_buffer_layout_type {
+	HAL_BUFFER_LAYOUT_TOP_BOTTOM,
+	HAL_BUFFER_LAYOUT_SEQ,
+	HAL_UNUSED_BUFFER_LAYOUT = 0x10000000,
+};
+
+struct hal_mvc_buffer_layout {
+	enum hal_buffer_layout_type layout_type;
+	u32 bright_view_first;
+	u32 ngap;
+};
+
+struct hal_seq_header_info {
+	u32 nax_header_len;
+};
+
+struct hal_aspect_ratio {
+	u32 aspect_width;
+	u32 aspect_height;
+};
+
+struct hal_codec_supported {
+	u32 decoder_codec_supported;
+	u32 encoder_codec_supported;
+};
+
+struct hal_multi_view_select {
+	u32 view_index;
+};
+
+struct hal_timestamp_scale {
+	u32 time_stamp_scale;
+};
+
+
+struct hal_h264_vui_timing_info {
+	u32 enable;
+	u32 fixed_frame_rate;
+	u32 time_scale;
+};
+
+struct hal_h264_vui_bitstream_restrc {
+	u32 enable;
+};
+
+struct hal_preserve_text_quality {
+	u32 enable;
+};
+
+struct hal_vc1e_perf_cfg_type {
+	struct {
+		u32 x_subsampled;
+		u32 y_subsampled;
+	} i_frame, p_frame, b_frame;
+};
+
+struct hal_vpe_color_space_conversion {
+	u32 csc_matrix[HAL_MAX_MATRIX_COEFFS];
+	u32 csc_bias[HAL_MAX_BIAS_COEFFS];
+	u32 csc_limit[HAL_MAX_LIMIT_COEFFS];
+};
+
+struct hal_video_signal_info {
+	u32 color_space;
+	u32 transfer_chars;
+	u32 matrix_coeffs;
+	bool full_range;
+};
+
+enum hal_iframesize_type {
+	HAL_IFRAMESIZE_TYPE_DEFAULT,
+	HAL_IFRAMESIZE_TYPE_MEDIUM,
+	HAL_IFRAMESIZE_TYPE_HUGE,
+	HAL_IFRAMESIZE_TYPE_UNLIMITED,
+};
+
+enum vidc_resource_id {
+	VIDC_RESOURCE_NONE,
+	VIDC_RESOURCE_OCMEM,
+	VIDC_RESOURCE_VMEM,
+	VIDC_UNUSED_RESOURCE = 0x10000000,
+};
+
+struct vidc_resource_hdr {
+	enum vidc_resource_id resource_id;
+	void *resource_handle;
+	u32 size;
+};
+
+struct vidc_buffer_addr_info {
+	enum hal_buffer buffer_type;
+	u32 buffer_size;
+	u32 num_buffers;
+	ion_phys_addr_t align_device_addr;
+	ion_phys_addr_t extradata_addr;
+	u32 extradata_size;
+	u32 response_required;
+};
+
+/* Needs to be exactly the same as hfi_buffer_info */
+struct hal_buffer_info {
+	u32 buffer_addr;
+	u32 extra_data_addr;
+};
+
+struct vidc_frame_plane_config {
+	u32 left;
+	u32 top;
+	u32 width;
+	u32 height;
+	u32 stride;
+	u32 scan_lines;
+};
+
+struct vidc_uncompressed_frame_config {
+	struct vidc_frame_plane_config luma_plane;
+	struct vidc_frame_plane_config chroma_plane;
+};
+
+struct vidc_frame_data {
+	enum hal_buffer buffer_type;
+	ion_phys_addr_t device_addr;
+	ion_phys_addr_t extradata_addr;
+	int64_t timestamp;
+	u32 flags;
+	u32 offset;
+	u32 alloc_len;
+	u32 filled_len;
+	u32 mark_target;
+	u32 mark_data;
+	u32 clnt_data;
+	u32 extradata_size;
+};
+
+struct vidc_seq_hdr {
+	ion_phys_addr_t seq_hdr;
+	u32 seq_hdr_len;
+};
+
+struct hal_fw_info {
+	char version[VENUS_VERSION_LENGTH];
+	phys_addr_t base_addr;
+	int register_base;
+	int register_size;
+	int irq;
+};
+
+enum hal_flush {
+	HAL_FLUSH_INPUT,
+	HAL_FLUSH_OUTPUT,
+	HAL_FLUSH_ALL,
+	HAL_UNUSED_FLUSH = 0x10000000,
+};
+
+enum hal_event_type {
+	HAL_EVENT_SEQ_CHANGED_SUFFICIENT_RESOURCES,
+	HAL_EVENT_SEQ_CHANGED_INSUFFICIENT_RESOURCES,
+	HAL_EVENT_RELEASE_BUFFER_REFERENCE,
+	HAL_UNUSED_SEQCHG = 0x10000000,
+};
+
+enum buffer_mode_type {
+	HAL_BUFFER_MODE_STATIC = 0x001,
+	HAL_BUFFER_MODE_RING = 0x010,
+	HAL_BUFFER_MODE_DYNAMIC = 0x100,
+};
+
+struct hal_buffer_alloc_mode {
+	enum hal_buffer buffer_type;
+	enum buffer_mode_type buffer_mode;
+};
+
+enum ltr_mode {
+	HAL_LTR_MODE_DISABLE,
+	HAL_LTR_MODE_MANUAL,
+	HAL_LTR_MODE_PERIODIC,
+};
+
+struct hal_ltr_mode {
+	enum ltr_mode mode;
+	u32 count;
+	u32 trust_mode;
+};
+
+struct hal_ltr_use {
+	u32 ref_ltr;
+	u32 use_constraint;
+	u32 frames;
+};
+
+struct hal_ltr_mark {
+	u32 mark_frame;
+};
+
+enum hal_perf_mode {
+	HAL_PERF_MODE_POWER_SAVE,
+	HAL_PERF_MODE_POWER_MAX_QUALITY,
+};
+
+struct hal_hybrid_hierp {
+	u32 layers;
+};
+
+struct hal_scs_threshold {
+	u32 threshold_value;
+};
+
+struct buffer_requirements {
+	struct hal_buffer_requirements buffer[HAL_BUFFER_MAX];
+};
+
+union hal_get_property {
+	struct hal_frame_rate frame_rate;
+	struct hal_uncompressed_format_select format_select;
+	struct hal_uncompressed_plane_actual plane_actual;
+	struct hal_uncompressed_plane_actual_info plane_actual_info;
+	struct hal_uncompressed_plane_constraints plane_constraints;
+	struct hal_uncompressed_plane_actual_constraints_info
+						plane_constraints_info;
+	struct hal_extra_data_header_config extra_data_header_config;
+	struct hal_frame_size frame_size;
+	struct hal_enable enable;
+	struct hal_buffer_count_actual buffer_count_actual;
+	struct hal_extradata_enable extradata_enable;
+	struct hal_enable_picture enable_picture;
+	struct hal_multi_stream multi_stream;
+	struct hal_display_picture_buffer_count display_picture_buffer_count;
+	struct hal_mb_error_map mb_error_map;
+	struct hal_request_iframe request_iframe;
+	struct hal_bitrate bitrate;
+	struct hal_profile_level profile_level;
+	struct hal_profile_level_supported profile_level_supported;
+	struct hal_mpeg4_time_resolution mpeg4_time_resolution;
+	struct hal_mpeg4_header_extension mpeg4_header_extension;
+	struct hal_h264_db_control h264_db_control;
+	struct hal_temporal_spatial_tradeoff temporal_spatial_tradeoff;
+	struct hal_quantization quantization;
+	struct hal_quantization_range quantization_range;
+	struct hal_intra_period intra_period;
+	struct hal_idr_period idr_period;
+	struct hal_operations operations;
+	struct hal_intra_refresh intra_refresh;
+	struct hal_multi_slice_control multi_slice_control;
+	struct hal_debug_config debug_config;
+	struct hal_batch_info batch_info;
+	struct hal_metadata_pass_through metadata_pass_through;
+	struct hal_uncompressed_format_supported uncompressed_format_supported;
+	struct hal_interlace_format_supported interlace_format_supported;
+	struct hal_properties_supported properties_supported;
+	struct hal_capability_supported capability_supported;
+	struct hal_capability_supported_info capability_supported_info;
+	struct hal_nal_stream_format_supported nal_stream_format_supported;
+	struct hal_nal_stream_format_select nal_stream_format_select;
+	struct hal_multi_view_format multi_view_format;
+	struct hal_seq_header_info seq_header_info;
+	struct hal_codec_supported codec_supported;
+	struct hal_multi_view_select multi_view_select;
+	struct hal_timestamp_scale timestamp_scale;
+	struct hal_h264_vui_timing_info h264_vui_timing_info;
+	struct hal_h264_vui_bitstream_restrc h264_vui_bitstream_restrc;
+	struct hal_preserve_text_quality preserve_text_quality;
+	struct hal_buffer_info buffer_info;
+	struct hal_buffer_alloc_mode buffer_alloc_mode;
+	struct buffer_requirements buf_req;
+	enum hal_h264_entropy h264_entropy;
+};
+
+/* HAL Response */
+#define IS_HAL_SYS_CMD(cmd) ((cmd) >= HAL_SYS_INIT_DONE && \
+		(cmd) <= HAL_SYS_ERROR)
+#define IS_HAL_SESSION_CMD(cmd) ((cmd) >= HAL_SESSION_EVENT_CHANGE && \
+		(cmd) <= HAL_SESSION_ERROR)
+enum hal_command_response {
+	/* SYSTEM COMMANDS_DONE*/
+	HAL_SYS_INIT_DONE,
+	HAL_SYS_SET_RESOURCE_DONE,
+	HAL_SYS_RELEASE_RESOURCE_DONE,
+	HAL_SYS_PING_ACK_DONE,
+	HAL_SYS_PC_PREP_DONE,
+	HAL_SYS_IDLE,
+	HAL_SYS_DEBUG,
+	HAL_SYS_WATCHDOG_TIMEOUT,
+	HAL_SYS_ERROR,
+	/* SESSION COMMANDS_DONE */
+	HAL_SESSION_EVENT_CHANGE,
+	HAL_SESSION_LOAD_RESOURCE_DONE,
+	HAL_SESSION_INIT_DONE,
+	HAL_SESSION_END_DONE,
+	HAL_SESSION_ABORT_DONE,
+	HAL_SESSION_START_DONE,
+	HAL_SESSION_STOP_DONE,
+	HAL_SESSION_ETB_DONE,
+	HAL_SESSION_FTB_DONE,
+	HAL_SESSION_FLUSH_DONE,
+	HAL_SESSION_SUSPEND_DONE,
+	HAL_SESSION_RESUME_DONE,
+	HAL_SESSION_SET_PROP_DONE,
+	HAL_SESSION_GET_PROP_DONE,
+	HAL_SESSION_PARSE_SEQ_HDR_DONE,
+	HAL_SESSION_GET_SEQ_HDR_DONE,
+	HAL_SESSION_RELEASE_BUFFER_DONE,
+	HAL_SESSION_RELEASE_RESOURCE_DONE,
+	HAL_SESSION_PROPERTY_INFO,
+	HAL_SESSION_ERROR,
+	HAL_RESPONSE_UNUSED = 0x10000000,
+};
+
+struct vidc_hal_ebd {
+	u32 timestamp_hi;
+	u32 timestamp_lo;
+	u32 flags;
+	enum vidc_status status;
+	u32 mark_target;
+	u32 mark_data;
+	u32 stats;
+	u32 offset;
+	u32 alloc_len;
+	u32 filled_len;
+	enum hal_picture picture_type;
+	ion_phys_addr_t packet_buffer;
+	ion_phys_addr_t extra_data_buffer;
+};
+
+struct vidc_hal_fbd {
+	u32 stream_id;
+	u32 view_id;
+	u32 timestamp_hi;
+	u32 timestamp_lo;
+	u32 flags1;
+	u32 mark_target;
+	u32 mark_data;
+	u32 stats;
+	u32 alloc_len1;
+	u32 filled_len1;
+	u32 offset1;
+	u32 frame_width;
+	u32 frame_height;
+	u32 start_x_coord;
+	u32 start_y_coord;
+	u32 input_tag;
+	u32 input_tag1;
+	enum hal_picture picture_type;
+	ion_phys_addr_t packet_buffer1;
+	ion_phys_addr_t extra_data_buffer;
+	u32 flags2;
+	u32 alloc_len2;
+	u32 filled_len2;
+	u32 offset2;
+	ion_phys_addr_t packet_buffer2;
+	u32 flags3;
+	u32 alloc_len3;
+	u32 filled_len3;
+	u32 offset3;
+	ion_phys_addr_t packet_buffer3;
+	enum hal_buffer buffer_type;
+};
+
+struct msm_vidc_capability {
+	enum hal_domain domain;
+	enum hal_video_codec codec;
+	struct hal_capability_supported width;
+	struct hal_capability_supported height;
+	struct hal_capability_supported mbs_per_frame;
+	struct hal_capability_supported mbs_per_sec;
+	struct hal_capability_supported frame_rate;
+	struct hal_capability_supported scale_x;
+	struct hal_capability_supported scale_y;
+	struct hal_capability_supported bitrate;
+	struct hal_capability_supported bframe;
+	struct hal_capability_supported peakbitrate;
+	struct hal_capability_supported hier_p;
+	struct hal_capability_supported ltr_count;
+	struct hal_capability_supported secure_output2_threshold;
+	struct hal_capability_supported hier_b;
+	struct hal_capability_supported lcu_size;
+	struct hal_capability_supported hier_p_hybrid;
+	struct hal_capability_supported mbs_per_sec_power_save;
+	struct hal_profile_level_supported profile_level;
+	struct hal_uncompressed_format_supported uncomp_format;
+	struct hal_interlace_format_supported HAL_format;
+	struct hal_nal_stream_format_supported nal_stream_format;
+	struct hal_intra_refresh intra_refresh;
+	enum buffer_mode_type alloc_mode_out;
+	enum buffer_mode_type alloc_mode_in;
+	u32 pixelprocess_capabilities;
+};
+
+struct vidc_hal_sys_init_done {
+	u32 dec_codec_supported;
+	u32 enc_codec_supported;
+	u32 codec_count;
+	struct msm_vidc_capability *capabilities;
+	u32 max_sessions_supported;
+};
+
+struct vidc_hal_session_init_done {
+	struct msm_vidc_capability capability;
+};
+
+struct msm_vidc_cb_cmd_done {
+	u32 device_id;
+	void *session_id;
+	enum vidc_status status;
+	u32 size;
+	union {
+		struct vidc_resource_hdr resource_hdr;
+		struct vidc_buffer_addr_info buffer_addr_info;
+		struct vidc_frame_plane_config frame_plane_config;
+		struct vidc_uncompressed_frame_config uncompressed_frame_config;
+		struct vidc_frame_data frame_data;
+		struct vidc_seq_hdr seq_hdr;
+		struct vidc_hal_ebd ebd;
+		struct vidc_hal_fbd fbd;
+		struct vidc_hal_sys_init_done sys_init_done;
+		struct vidc_hal_session_init_done session_init_done;
+		struct hal_buffer_info buffer_info;
+		union hal_get_property property;
+		enum hal_flush flush_type;
+	} data;
+};
+
+struct msm_vidc_cb_event {
+	u32 device_id;
+	void *session_id;
+	enum vidc_status status;
+	u32 height;
+	u32 width;
+	enum msm_vidc_pixel_depth bit_depth;
+	u32 hal_event_type;
+	ion_phys_addr_t packet_buffer;
+	ion_phys_addr_t extra_data_buffer;
+	u32 pic_struct;
+	u32 colour_space;
+};
+
+struct msm_vidc_cb_data_done {
+	u32 device_id;
+	void *session_id;
+	enum vidc_status status;
+	u32 size;
+	u32 clnt_data;
+	union {
+		struct vidc_hal_ebd input_done;
+		struct vidc_hal_fbd output_done;
+	};
+};
+
+struct msm_vidc_cb_info {
+	enum hal_command_response response_type;
+	union {
+		struct msm_vidc_cb_cmd_done cmd;
+		struct msm_vidc_cb_event event;
+		struct msm_vidc_cb_data_done data;
+	} response;
+};
+
+enum msm_vidc_hfi_type {
+	VIDC_HFI_VENUS,
+};
+
+enum msm_vidc_thermal_level {
+	VIDC_THERMAL_NORMAL = 0,
+	VIDC_THERMAL_LOW,
+	VIDC_THERMAL_HIGH,
+	VIDC_THERMAL_CRITICAL
+};
+
+enum vidc_vote_data_session {
+	VIDC_BUS_VOTE_DATA_SESSION_INVALID = 0,
+	/* No declarations exist. Values generated by VIDC_VOTE_DATA_SESSION_VAL
+	 * describe the enumerations e.g.:
+	 *
+	 * enum vidc_bus_vote_data_session_type h264_decoder_session =
+	 *        VIDC_VOTE_DATA_SESSION_VAL(HAL_VIDEO_CODEC_H264,
+	 *                 HAL_VIDEO_DOMAIN_DECODER);
+	 */
+};
+
+/* Careful modifying VIDC_VOTE_DATA_SESSION_VAL().
+ *
+ * This macro assigns two bits to each codec: the lower bit denoting the codec
+ * type, and the higher bit denoting session type. */
+static inline enum vidc_vote_data_session VIDC_VOTE_DATA_SESSION_VAL(
+		enum hal_video_codec c, enum hal_domain d) {
+	if (d != HAL_VIDEO_DOMAIN_ENCODER && d != HAL_VIDEO_DOMAIN_DECODER)
+		return VIDC_BUS_VOTE_DATA_SESSION_INVALID;
+
+	return (1 << ilog2(c) * 2) | ((d - 1) << (ilog2(c) * 2 + 1));
+}
+
+struct msm_vidc_gov_data {
+	struct vidc_bus_vote_data *data;
+	u32 data_count;
+	int imem_size;
+};
+
+enum msm_vidc_power_mode {
+	VIDC_POWER_NORMAL = 0,
+	VIDC_POWER_LOW,
+	VIDC_POWER_TURBO
+};
+
+struct vidc_bus_vote_data {
+	enum hal_domain domain;
+	enum hal_video_codec codec;
+	enum hal_uncompressed_format color_formats[2];
+	int num_formats; /* 1 = DPB-OPB unified; 2 = split */
+	int height, width, fps;
+	enum msm_vidc_power_mode power_mode;
+	struct imem_ab_table *imem_ab_tbl;
+	u32 imem_ab_tbl_size;
+	unsigned long core_freq;
+};
+
+struct vidc_clk_scale_data {
+	enum vidc_vote_data_session session[VIDC_MAX_SESSIONS];
+	enum msm_vidc_power_mode power_mode[VIDC_MAX_SESSIONS];
+	u32 load[VIDC_MAX_SESSIONS];
+	int num_sessions;
+};
+
+struct hal_index_extradata_input_crop_payload {
+	u32 size;
+	u32 version;
+	u32 port_index;
+	u32 left;
+	u32 top;
+	u32 width;
+	u32 height;
+};
+
+struct hal_cmd_sys_get_property_packet {
+	u32 size;
+	u32 packet_type;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+#define call_hfi_op(q, op, args...)			\
+	(((q) && (q)->op) ? ((q)->op(args)) : 0)
+
+struct hfi_device {
+	void *hfi_device_data;
+
+	/*Add function pointers for all the hfi functions below*/
+	int (*core_init)(void *device);
+	int (*core_release)(void *device);
+	int (*core_ping)(void *device);
+	int (*core_trigger_ssr)(void *device, enum hal_ssr_trigger_type);
+	int (*session_init)(void *device, void *session_id,
+		enum hal_domain session_type, enum hal_video_codec codec_type,
+		void **new_session);
+	int (*session_end)(void *session);
+	int (*session_abort)(void *session);
+	int (*session_set_buffers)(void *sess,
+				struct vidc_buffer_addr_info *buffer_info);
+	int (*session_release_buffers)(void *sess,
+				struct vidc_buffer_addr_info *buffer_info);
+	int (*session_load_res)(void *sess);
+	int (*session_release_res)(void *sess);
+	int (*session_start)(void *sess);
+	int (*session_continue)(void *sess);
+	int (*session_stop)(void *sess);
+	int (*session_etb)(void *sess, struct vidc_frame_data *input_frame);
+	int (*session_ftb)(void *sess, struct vidc_frame_data *output_frame);
+	int (*session_process_batch)(void *sess,
+		int num_etbs, struct vidc_frame_data etbs[],
+		int num_ftbs, struct vidc_frame_data ftbs[]);
+	int (*session_parse_seq_hdr)(void *sess,
+			struct vidc_seq_hdr *seq_hdr);
+	int (*session_get_seq_hdr)(void *sess,
+			struct vidc_seq_hdr *seq_hdr);
+	int (*session_get_buf_req)(void *sess);
+	int (*session_flush)(void *sess, enum hal_flush flush_mode);
+	int (*session_set_property)(void *sess, enum hal_property ptype,
+			void *pdata);
+	int (*session_get_property)(void *sess, enum hal_property ptype);
+	int (*scale_clocks)(void *dev, int load,
+			struct vidc_clk_scale_data *data,
+			unsigned long instant_bitrate);
+	int (*vote_bus)(void *dev, struct vidc_bus_vote_data *data,
+			int num_data);
+	int (*get_fw_info)(void *dev, struct hal_fw_info *fw_info);
+	int (*session_clean)(void *sess);
+	int (*get_core_capabilities)(void *dev);
+	int (*suspend)(void *dev);
+	int (*flush_debug_queue)(void *dev);
+	unsigned long (*get_core_clock_rate)(void *dev, bool actual_rate);
+	enum hal_default_properties (*get_default_properties)(void *dev);
+};
+
+typedef void (*hfi_cmd_response_callback) (enum hal_command_response cmd,
+			void *data);
+typedef void (*msm_vidc_callback) (u32 response, void *callback);
+
+struct hfi_device *vidc_hfi_initialize(enum msm_vidc_hfi_type hfi_type,
+		u32 device_id, struct msm_vidc_platform_resources *res,
+		hfi_cmd_response_callback callback);
+void vidc_hfi_deinitialize(enum msm_vidc_hfi_type hfi_type,
+			struct hfi_device *hdev);
+u32 vidc_get_hfi_domain(enum hal_domain hal_domain);
+u32 vidc_get_hfi_codec(enum hal_video_codec hal_codec);
+enum hal_domain vidc_get_hal_domain(u32 hfi_domain);
+enum hal_video_codec vidc_get_hal_codec(u32 hfi_codec);
+
+#endif /*__VIDC_HFI_API_H__ */
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/vidc_hfi.c linux-4.4.115-fbx/drivers/media/platform/msm/vidc/vidc_hfi.c
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/vidc_hfi.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/vidc_hfi.c	2019-01-22 16:16:24.467255136 +0100
@@ -0,0 +1,72 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/slab.h>
+#include "msm_vidc_debug.h"
+#include "vidc_hfi_api.h"
+#include "venus_hfi.h"
+
+struct hfi_device *vidc_hfi_initialize(enum msm_vidc_hfi_type hfi_type,
+		u32 device_id, struct msm_vidc_platform_resources *res,
+		hfi_cmd_response_callback callback)
+{
+	struct hfi_device *hdev = NULL;
+	int rc = 0;
+	hdev = (struct hfi_device *)
+			kzalloc(sizeof(struct hfi_device), GFP_KERNEL);
+	if (!hdev) {
+		dprintk(VIDC_ERR, "%s: failed to allocate hdev\n", __func__);
+		return NULL;
+	}
+
+	switch (hfi_type) {
+	case VIDC_HFI_VENUS:
+		rc = venus_hfi_initialize(hdev, device_id, res, callback);
+		break;
+	default:
+		dprintk(VIDC_ERR, "Unsupported host-firmware interface\n");
+		goto err_hfi_init;
+	}
+
+	if (rc) {
+		if (rc != -EPROBE_DEFER)
+			dprintk(VIDC_ERR, "%s device init failed rc = %d",
+				__func__, rc);
+		goto err_hfi_init;
+	}
+
+	return hdev;
+
+err_hfi_init:
+	kfree(hdev);
+	return ERR_PTR(rc);
+}
+
+void vidc_hfi_deinitialize(enum msm_vidc_hfi_type hfi_type,
+			struct hfi_device *hdev)
+{
+	if (!hdev) {
+		dprintk(VIDC_ERR, "%s invalid device %pK", __func__, hdev);
+		return;
+	}
+
+	switch (hfi_type) {
+	case VIDC_HFI_VENUS:
+		venus_hfi_delete_device(hdev->hfi_device_data);
+		break;
+	default:
+		dprintk(VIDC_ERR, "Unsupported host-firmware interface\n");
+	}
+
+	kfree(hdev);
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/vidc_hfi.h linux-4.4.115-fbx/drivers/media/platform/msm/vidc/vidc_hfi.h
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/vidc_hfi.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/vidc_hfi.h	2019-10-29 09:26:23.965206329 +0100
@@ -0,0 +1,934 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __H_VIDC_HFI_H__
+#define __H_VIDC_HFI_H__
+
+#include <media/msm_media_info.h>
+#include "vidc_hfi_helper.h"
+#include "vidc_hfi_api.h"
+
+#define HFI_EVENT_SESSION_SEQUENCE_CHANGED (HFI_OX_BASE + 0x3)
+#define HFI_EVENT_SESSION_PROPERTY_CHANGED (HFI_OX_BASE + 0x4)
+#define HFI_EVENT_SESSION_LTRUSE_FAILED (HFI_OX_BASE + 0x5)
+#define HFI_EVENT_RELEASE_BUFFER_REFERENCE (HFI_OX_BASE + 0x6)
+
+#define HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUFFER_RESOURCES	\
+	(HFI_OX_BASE + 0x1)
+#define HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUFFER_RESOURCES	\
+	(HFI_OX_BASE + 0x2)
+
+#define HFI_BUFFERFLAG_EOS			0x00000001
+#define HFI_BUFFERFLAG_STARTTIME		0x00000002
+#define HFI_BUFFERFLAG_DECODEONLY		0x00000004
+#define HFI_BUFFERFLAG_DATACORRUPT		0x00000008
+#define HFI_BUFFERFLAG_ENDOFFRAME		0x00000010
+#define HFI_BUFFERFLAG_SYNCFRAME		0x00000020
+#define HFI_BUFFERFLAG_EXTRADATA		0x00000040
+#define HFI_BUFFERFLAG_CODECCONFIG		0x00000080
+#define HFI_BUFFERFLAG_TIMESTAMPINVALID		0x00000100
+#define HFI_BUFFERFLAG_READONLY			0x00000200
+#define HFI_BUFFERFLAG_ENDOFSUBFRAME		0x00000400
+#define HFI_BUFFERFLAG_EOSEQ			0x00200000
+#define HFI_BUFFER_FLAG_MBAFF			0x08000000
+#define HFI_BUFFERFLAG_VPE_YUV_601_709_CSC_CLAMP \
+						0x10000000
+#define HFI_BUFFERFLAG_DROP_FRAME               0x20000000
+#define HFI_BUFFERFLAG_TEI			0x40000000
+#define HFI_BUFFERFLAG_DISCONTINUITY		0x80000000
+
+
+#define HFI_ERR_SESSION_EMPTY_BUFFER_DONE_OUTPUT_PENDING	\
+	(HFI_OX_BASE + 0x1001)
+#define HFI_ERR_SESSION_SAME_STATE_OPERATION		\
+	(HFI_OX_BASE + 0x1002)
+#define HFI_ERR_SESSION_SYNC_FRAME_NOT_DETECTED		\
+	(HFI_OX_BASE + 0x1003)
+#define  HFI_ERR_SESSION_START_CODE_NOT_FOUND		\
+	(HFI_OX_BASE + 0x1004)
+
+#define HFI_BUFFER_INTERNAL_SCRATCH (HFI_OX_BASE + 0x1)
+#define HFI_BUFFER_EXTRADATA_INPUT (HFI_OX_BASE + 0x2)
+#define HFI_BUFFER_EXTRADATA_OUTPUT (HFI_OX_BASE + 0x3)
+#define HFI_BUFFER_EXTRADATA_OUTPUT2 (HFI_OX_BASE + 0x4)
+#define HFI_BUFFER_INTERNAL_SCRATCH_1 (HFI_OX_BASE + 0x5)
+#define HFI_BUFFER_INTERNAL_SCRATCH_2 (HFI_OX_BASE + 0x6)
+
+#define HFI_BUFFER_MODE_STATIC (HFI_OX_BASE + 0x1)
+#define HFI_BUFFER_MODE_RING (HFI_OX_BASE + 0x2)
+#define HFI_BUFFER_MODE_DYNAMIC (HFI_OX_BASE + 0x3)
+
+#define HFI_FLUSH_INPUT (HFI_OX_BASE + 0x1)
+#define HFI_FLUSH_OUTPUT (HFI_OX_BASE + 0x2)
+#define HFI_FLUSH_ALL (HFI_OX_BASE + 0x4)
+
+#define HFI_EXTRADATA_NONE					0x00000000
+#define HFI_EXTRADATA_MB_QUANTIZATION		0x00000001
+#define HFI_EXTRADATA_INTERLACE_VIDEO		0x00000002
+#define HFI_EXTRADATA_VC1_FRAMEDISP			0x00000003
+#define HFI_EXTRADATA_VC1_SEQDISP			0x00000004
+#define HFI_EXTRADATA_TIMESTAMP				0x00000005
+#define HFI_EXTRADATA_S3D_FRAME_PACKING		0x00000006
+#define HFI_EXTRADATA_FRAME_RATE			0x00000007
+#define HFI_EXTRADATA_PANSCAN_WINDOW		0x00000008
+#define HFI_EXTRADATA_RECOVERY_POINT_SEI	0x00000009
+#define HFI_EXTRADATA_MPEG2_SEQDISP		0x0000000D
+#define HFI_EXTRADATA_STREAM_USERDATA		0x0000000E
+#define HFI_EXTRADATA_FRAME_QP			0x0000000F
+#define HFI_EXTRADATA_FRAME_BITS_INFO		0x00000010
+#define HFI_EXTRADATA_VPX_COLORSPACE		0x00000014
+#define HFI_EXTRADATA_MULTISLICE_INFO		0x7F100000
+#define HFI_EXTRADATA_NUM_CONCEALED_MB		0x7F100001
+#define HFI_EXTRADATA_INDEX					0x7F100002
+#define HFI_EXTRADATA_METADATA_LTR			0x7F100004
+#define HFI_EXTRADATA_METADATA_FILLER		0x7FE00002
+
+#define HFI_INDEX_EXTRADATA_INPUT_CROP		0x0700000E
+#define HFI_INDEX_EXTRADATA_OUTPUT_CROP		0x0700000F
+#define HFI_INDEX_EXTRADATA_ASPECT_RATIO	0x7F100003
+
+struct hfi_buffer_alloc_mode {
+	u32 buffer_type;
+	u32 buffer_mode;
+};
+
+
+struct hfi_index_extradata_config {
+	int enable;
+	u32 index_extra_data_id;
+};
+
+struct hfi_extradata_header {
+	u32 size;
+	u32 version;
+	u32 port_index;
+	u32 type;
+	u32 data_size;
+	u8 rg_data[1];
+};
+
+#define HFI_INTERLACE_FRAME_PROGRESSIVE					0x01
+#define HFI_INTERLACE_INTERLEAVE_FRAME_TOPFIELDFIRST	0x02
+#define HFI_INTERLACE_INTERLEAVE_FRAME_BOTTOMFIELDFIRST	0x04
+#define HFI_INTERLACE_FRAME_TOPFIELDFIRST				0x08
+#define HFI_INTERLACE_FRAME_BOTTOMFIELDFIRST			0x10
+
+#define HFI_PROPERTY_SYS_OX_START			\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x0000)
+
+#define HFI_PROPERTY_PARAM_OX_START				\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x1000)
+#define HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL			\
+	(HFI_PROPERTY_PARAM_OX_START + 0x001)
+#define HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO	\
+	(HFI_PROPERTY_PARAM_OX_START + 0x002)
+#define HFI_PROPERTY_PARAM_INTERLACE_FORMAT_SUPPORTED	\
+	(HFI_PROPERTY_PARAM_OX_START + 0x003)
+#define HFI_PROPERTY_PARAM_CHROMA_SITE					\
+(HFI_PROPERTY_PARAM_OX_START + 0x004)
+#define HFI_PROPERTY_PARAM_EXTRA_DATA_HEADER_CONFIG		\
+	(HFI_PROPERTY_PARAM_OX_START + 0x005)
+#define HFI_PROPERTY_PARAM_INDEX_EXTRADATA             \
+	(HFI_PROPERTY_PARAM_OX_START + 0x006)
+#define HFI_PROPERTY_PARAM_DIVX_FORMAT					\
+	(HFI_PROPERTY_PARAM_OX_START + 0x007)
+#define HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE			\
+	(HFI_PROPERTY_PARAM_OX_START + 0x008)
+#define HFI_PROPERTY_PARAM_S3D_FRAME_PACKING_EXTRADATA	\
+	(HFI_PROPERTY_PARAM_OX_START + 0x009)
+#define HFI_PROPERTY_PARAM_ERR_DETECTION_CODE_EXTRADATA \
+	(HFI_PROPERTY_PARAM_OX_START + 0x00A)
+#define HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE_SUPPORTED	\
+	(HFI_PROPERTY_PARAM_OX_START + 0x00B)
+#define HFI_PROPERTY_PARAM_BUFFER_SIZE_MINIMUM			\
+	(HFI_PROPERTY_PARAM_OX_START + 0x00C)
+#define HFI_PROPERTY_PARAM_SYNC_BASED_INTERRUPT			\
+	(HFI_PROPERTY_PARAM_OX_START + 0x00E)
+
+#define HFI_PROPERTY_CONFIG_OX_START					\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x02000)
+#define HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS			\
+	(HFI_PROPERTY_CONFIG_OX_START + 0x001)
+#define HFI_PROPERTY_CONFIG_REALTIME					\
+	(HFI_PROPERTY_CONFIG_OX_START + 0x002)
+#define HFI_PROPERTY_CONFIG_PRIORITY					\
+	(HFI_PROPERTY_CONFIG_OX_START + 0x003)
+#define HFI_PROPERTY_CONFIG_BATCH_INFO					\
+	(HFI_PROPERTY_CONFIG_OX_START + 0x004)
+
+#define HFI_PROPERTY_PARAM_VDEC_OX_START				\
+	(HFI_DOMAIN_BASE_VDEC + HFI_ARCH_OX_OFFSET + 0x3000)
+#define HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER	\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x001)
+#define HFI_PROPERTY_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x002)
+#define HFI_PROPERTY_PARAM_VDEC_MULTI_VIEW_SELECT		\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x003)
+#define HFI_PROPERTY_PARAM_VDEC_PICTURE_TYPE_DECODE		\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x004)
+#define HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER			\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x005)
+#define HFI_PROPERTY_PARAM_VDEC_MB_QUANTIZATION			\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x006)
+#define HFI_PROPERTY_PARAM_VDEC_NUM_CONCEALED_MB		\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x007)
+#define HFI_PROPERTY_PARAM_VDEC_H264_ENTROPY_SWITCHING	\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x008)
+#define HFI_PROPERTY_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x009)
+#define HFI_PROPERTY_PARAM_VDEC_FRAME_RATE_EXTRADATA  \
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x00A)
+#define HFI_PROPERTY_PARAM_VDEC_PANSCAN_WNDW_EXTRADATA \
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x00B)
+#define HFI_PROPERTY_PARAM_VDEC_RECOVERY_POINT_SEI_EXTRADATA \
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x00C)
+#define HFI_PROPERTY_PARAM_VDEC_THUMBNAIL_MODE   \
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x00D)
+
+#define HFI_PROPERTY_PARAM_VDEC_FRAME_ASSEMBLY		\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x00E)
+#define HFI_PROPERTY_PARAM_VDEC_VC1_FRAMEDISP_EXTRADATA		\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x011)
+#define HFI_PROPERTY_PARAM_VDEC_VC1_SEQDISP_EXTRADATA		\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x012)
+#define HFI_PROPERTY_PARAM_VDEC_TIMESTAMP_EXTRADATA			\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x013)
+#define HFI_PROPERTY_PARAM_VDEC_INTERLACE_VIDEO_EXTRADATA	\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x014)
+#define HFI_PROPERTY_PARAM_VDEC_AVC_SESSION_SELECT \
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x015)
+#define HFI_PROPERTY_PARAM_VDEC_MPEG2_SEQDISP_EXTRADATA \
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x016)
+#define HFI_PROPERTY_PARAM_VDEC_STREAM_USERDATA_EXTRADATA \
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x017)
+#define HFI_PROPERTY_PARAM_VDEC_FRAME_QP_EXTRADATA \
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x018)
+#define HFI_PROPERTY_PARAM_VDEC_FRAME_BITS_INFO_EXTRADATA \
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x019)
+#define HFI_PROPERTY_PARAM_VDEC_SCS_THRESHOLD \
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x01A)
+#define HFI_PROPERTY_PARAM_VUI_DISPLAY_INFO_EXTRADATA \
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x01B)
+#define HFI_PROPERTY_PARAM_VDEC_VQZIP_SEI_EXTRADATA \
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x001C)
+#define HFI_PROPERTY_PARAM_VDEC_VPX_COLORSPACE_EXTRADATA \
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x001D)
+#define HFI_PROPERTY_PARAM_VDEC_MASTERING_DISPLAY_COLOUR_SEI_EXTRADATA \
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x001E)
+#define HFI_PROPERTY_PARAM_VDEC_CONTENT_LIGHT_LEVEL_SEI_EXTRADATA \
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x001F)
+
+#define HFI_PROPERTY_CONFIG_VDEC_OX_START				\
+	(HFI_DOMAIN_BASE_VDEC + HFI_ARCH_OX_OFFSET + 0x4000)
+#define HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER	\
+	(HFI_PROPERTY_CONFIG_VDEC_OX_START + 0x001)
+#define HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP_REPORTING	\
+	(HFI_PROPERTY_CONFIG_VDEC_OX_START + 0x002)
+#define HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP			\
+	(HFI_PROPERTY_CONFIG_VDEC_OX_START + 0x003)
+#define HFI_PROPERTY_CONFIG_VDEC_ENTROPY \
+	(HFI_PROPERTY_CONFIG_VDEC_OX_START + 0x004)
+
+#define HFI_PROPERTY_PARAM_VENC_OX_START				\
+	(HFI_DOMAIN_BASE_VENC + HFI_ARCH_OX_OFFSET + 0x5000)
+#define  HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_INFO       \
+	(HFI_PROPERTY_PARAM_VENC_OX_START + 0x001)
+#define  HFI_PROPERTY_PARAM_VENC_H264_IDR_S3D_FRAME_PACKING_NAL \
+	(HFI_PROPERTY_PARAM_VENC_OX_START + 0x002)
+#define  HFI_PROPERTY_PARAM_VENC_LTR_INFO			\
+	(HFI_PROPERTY_PARAM_VENC_OX_START + 0x003)
+#define  HFI_PROPERTY_PARAM_VENC_MBI_DUMPING				\
+	(HFI_PROPERTY_PARAM_VENC_OX_START + 0x005)
+#define HFI_PROPERTY_PARAM_VENC_FRAME_QP_EXTRADATA		\
+	(HFI_PROPERTY_PARAM_VENC_OX_START + 0x006)
+#define  HFI_PROPERTY_PARAM_VENC_YUVSTAT_INFO_EXTRADATA		\
+	(HFI_PROPERTY_PARAM_VENC_OX_START + 0x007)
+#define  HFI_PROPERTY_PARAM_VENC_ROI_QP_EXTRADATA		\
+	(HFI_PROPERTY_PARAM_VENC_OX_START + 0x008)
+#define  HFI_PROPERTY_PARAM_VENC_OVERRIDE_QP_EXTRADATA		\
+	(HFI_PROPERTY_PARAM_VENC_OX_START + 0x009)
+
+#define HFI_PROPERTY_CONFIG_VENC_OX_START				\
+	(HFI_DOMAIN_BASE_VENC + HFI_ARCH_OX_OFFSET + 0x6000)
+#define  HFI_PROPERTY_CONFIG_VENC_FRAME_QP				\
+	(HFI_PROPERTY_CONFIG_VENC_OX_START + 0x001)
+
+#define HFI_PROPERTY_PARAM_VPE_OX_START					\
+	(HFI_DOMAIN_BASE_VPE + HFI_ARCH_OX_OFFSET + 0x7000)
+#define HFI_PROPERTY_PARAM_VPE_COLOR_SPACE_CONVERSION			\
+	(HFI_PROPERTY_PARAM_VPE_OX_START + 0x001)
+
+#define HFI_PROPERTY_CONFIG_VPE_OX_START				\
+	(HFI_DOMAIN_BASE_VPE + HFI_ARCH_OX_OFFSET + 0x8000)
+
+struct hfi_batch_info {
+	u32 input_batch_count;
+	u32 output_batch_count;
+};
+
+struct hfi_buffer_count_actual {
+	u32 buffer_type;
+	u32 buffer_count_actual;
+};
+
+struct hfi_buffer_size_minimum {
+	u32 buffer_type;
+	u32 buffer_size;
+};
+
+struct hfi_buffer_requirements {
+	u32 buffer_type;
+	u32 buffer_size;
+	u32 buffer_region_size;
+	u32 buffer_hold_count;
+	u32 buffer_count_min;
+	u32 buffer_count_actual;
+	u32 contiguous;
+	u32 buffer_alignment;
+};
+
+#define HFI_CHROMA_SITE_0			(HFI_OX_BASE + 0x1)
+#define HFI_CHROMA_SITE_1			(HFI_OX_BASE + 0x2)
+#define HFI_CHROMA_SITE_2			(HFI_OX_BASE + 0x3)
+#define HFI_CHROMA_SITE_3			(HFI_OX_BASE + 0x4)
+#define HFI_CHROMA_SITE_4			(HFI_OX_BASE + 0x5)
+#define HFI_CHROMA_SITE_5			(HFI_OX_BASE + 0x6)
+
+struct hfi_data_payload {
+	u32 size;
+	u8 rg_data[1];
+};
+
+struct hfi_enable_picture {
+	u32 picture_type;
+};
+
+struct hfi_display_picture_buffer_count {
+	int enable;
+	u32 count;
+};
+
+struct hfi_extra_data_header_config {
+	u32 type;
+	u32 buffer_type;
+	u32 version;
+	u32 port_index;
+	u32 client_extra_data_id;
+};
+
+struct hfi_interlace_format_supported {
+	u32 buffer_type;
+	u32 format;
+};
+
+struct hfi_buffer_alloc_mode_supported {
+	u32 buffer_type;
+	u32 num_entries;
+	u32 rg_data[1];
+};
+
+struct hfi_mb_error_map {
+	u32 error_map_size;
+	u8 rg_error_map[1];
+};
+
+struct hfi_metadata_pass_through {
+	int enable;
+	u32 size;
+};
+
+struct hfi_multi_view_select {
+	u32 view_index;
+};
+
+struct hfi_hybrid_hierp {
+	u32 layers;
+};
+
+#define HFI_PRIORITY_LOW		10
+#define HFI_PRIOIRTY_MEDIUM		20
+#define HFI_PRIORITY_HIGH		30
+
+#define HFI_OUTPUT_ORDER_DISPLAY	(HFI_OX_BASE + 0x1)
+#define HFI_OUTPUT_ORDER_DECODE		(HFI_OX_BASE + 0x2)
+
+#define HFI_RATE_CONTROL_OFF		(HFI_OX_BASE + 0x1)
+#define HFI_RATE_CONTROL_VBR_VFR	(HFI_OX_BASE + 0x2)
+#define HFI_RATE_CONTROL_VBR_CFR	(HFI_OX_BASE + 0x3)
+#define HFI_RATE_CONTROL_CBR_VFR	(HFI_OX_BASE + 0x4)
+#define HFI_RATE_CONTROL_CBR_CFR	(HFI_OX_BASE + 0x5)
+#define HFI_RATE_CONTROL_MBR_CFR	(HFI_OX_BASE + 0x6)
+#define HFI_RATE_CONTROL_MBR_VFR	(HFI_OX_BASE + 0x7)
+
+
+struct hfi_uncompressed_plane_actual_constraints_info {
+	u32 buffer_type;
+	u32 num_planes;
+	struct hfi_uncompressed_plane_constraints rg_plane_format[1];
+};
+
+#define HFI_CMD_SYS_OX_START		\
+(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + HFI_CMD_START_OFFSET + 0x0000)
+#define HFI_CMD_SYS_SESSION_ABORT	(HFI_CMD_SYS_OX_START + 0x001)
+#define HFI_CMD_SYS_PING		(HFI_CMD_SYS_OX_START + 0x002)
+
+#define HFI_CMD_SESSION_OX_START	\
+(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + HFI_CMD_START_OFFSET + 0x1000)
+#define HFI_CMD_SESSION_LOAD_RESOURCES	(HFI_CMD_SESSION_OX_START + 0x001)
+#define HFI_CMD_SESSION_START		(HFI_CMD_SESSION_OX_START + 0x002)
+#define HFI_CMD_SESSION_STOP		(HFI_CMD_SESSION_OX_START + 0x003)
+#define HFI_CMD_SESSION_EMPTY_BUFFER	(HFI_CMD_SESSION_OX_START + 0x004)
+#define HFI_CMD_SESSION_FILL_BUFFER	(HFI_CMD_SESSION_OX_START + 0x005)
+#define HFI_CMD_SESSION_SUSPEND		(HFI_CMD_SESSION_OX_START + 0x006)
+#define HFI_CMD_SESSION_RESUME		(HFI_CMD_SESSION_OX_START + 0x007)
+#define HFI_CMD_SESSION_FLUSH		(HFI_CMD_SESSION_OX_START + 0x008)
+#define HFI_CMD_SESSION_GET_PROPERTY	(HFI_CMD_SESSION_OX_START + 0x009)
+#define HFI_CMD_SESSION_PARSE_SEQUENCE_HEADER	\
+	(HFI_CMD_SESSION_OX_START + 0x00A)
+#define HFI_CMD_SESSION_RELEASE_BUFFERS		\
+	(HFI_CMD_SESSION_OX_START + 0x00B)
+#define HFI_CMD_SESSION_RELEASE_RESOURCES	\
+	(HFI_CMD_SESSION_OX_START + 0x00C)
+#define HFI_CMD_SESSION_CONTINUE	(HFI_CMD_SESSION_OX_START + 0x00D)
+#define HFI_CMD_SESSION_SYNC		(HFI_CMD_SESSION_OX_START + 0x00E)
+
+#define HFI_MSG_SYS_OX_START			\
+(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + HFI_MSG_START_OFFSET + 0x0000)
+#define HFI_MSG_SYS_PING_ACK	(HFI_MSG_SYS_OX_START + 0x2)
+#define HFI_MSG_SYS_SESSION_ABORT_DONE	(HFI_MSG_SYS_OX_START + 0x4)
+
+#define HFI_MSG_SESSION_OX_START		\
+(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + HFI_MSG_START_OFFSET + 0x1000)
+#define HFI_MSG_SESSION_LOAD_RESOURCES_DONE	(HFI_MSG_SESSION_OX_START + 0x1)
+#define HFI_MSG_SESSION_START_DONE		(HFI_MSG_SESSION_OX_START + 0x2)
+#define HFI_MSG_SESSION_STOP_DONE		(HFI_MSG_SESSION_OX_START + 0x3)
+#define HFI_MSG_SESSION_SUSPEND_DONE	(HFI_MSG_SESSION_OX_START + 0x4)
+#define HFI_MSG_SESSION_RESUME_DONE		(HFI_MSG_SESSION_OX_START + 0x5)
+#define HFI_MSG_SESSION_FLUSH_DONE		(HFI_MSG_SESSION_OX_START + 0x6)
+#define HFI_MSG_SESSION_EMPTY_BUFFER_DONE	(HFI_MSG_SESSION_OX_START + 0x7)
+#define HFI_MSG_SESSION_FILL_BUFFER_DONE	(HFI_MSG_SESSION_OX_START + 0x8)
+#define HFI_MSG_SESSION_PROPERTY_INFO		(HFI_MSG_SESSION_OX_START + 0x9)
+#define HFI_MSG_SESSION_RELEASE_RESOURCES_DONE	\
+	(HFI_MSG_SESSION_OX_START + 0xA)
+#define HFI_MSG_SESSION_PARSE_SEQUENCE_HEADER_DONE		\
+	(HFI_MSG_SESSION_OX_START + 0xB)
+#define  HFI_MSG_SESSION_RELEASE_BUFFERS_DONE			\
+	(HFI_MSG_SESSION_OX_START + 0xC)
+
+#define VIDC_IFACEQ_MAX_PKT_SIZE                        1024
+#define VIDC_IFACEQ_MED_PKT_SIZE                        768
+#define VIDC_IFACEQ_MIN_PKT_SIZE                        8
+#define VIDC_IFACEQ_VAR_SMALL_PKT_SIZE          100
+#define VIDC_IFACEQ_VAR_LARGE_PKT_SIZE          512
+#define VIDC_IFACEQ_VAR_HUGE_PKT_SIZE          (1024*12)
+
+
+struct hfi_cmd_sys_session_abort_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+};
+
+struct hfi_cmd_sys_ping_packet {
+	u32 size;
+	u32 packet_type;
+	u32 client_data;
+};
+
+struct hfi_cmd_session_load_resources_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+};
+
+struct hfi_cmd_session_start_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+};
+
+struct hfi_cmd_session_stop_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+};
+
+struct hfi_cmd_session_empty_buffer_compressed_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 time_stamp_hi;
+	u32 time_stamp_lo;
+	u32 flags;
+	u32 mark_target;
+	u32 mark_data;
+	u32 offset;
+	u32 alloc_len;
+	u32 filled_len;
+	u32 input_tag;
+	u32 packet_buffer;
+	u32 extra_data_buffer;
+	u32 rgData[1];
+};
+
+struct hfi_cmd_session_empty_buffer_uncompressed_plane0_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 view_id;
+	u32 time_stamp_hi;
+	u32 time_stamp_lo;
+	u32 flags;
+	u32 mark_target;
+	u32 mark_data;
+	u32 alloc_len;
+	u32 filled_len;
+	u32 offset;
+	u32 input_tag;
+	u32 packet_buffer;
+	u32 extra_data_buffer;
+	u32 rgData[1];
+};
+
+struct hfi_cmd_session_empty_buffer_uncompressed_plane1_packet {
+	u32 flags;
+	u32 alloc_len;
+	u32 filled_len;
+	u32 offset;
+	u32 packet_buffer2;
+	u32 rgData[1];
+};
+
+struct hfi_cmd_session_empty_buffer_uncompressed_plane2_packet {
+	u32 flags;
+	u32 alloc_len;
+	u32 filled_len;
+	u32 offset;
+	u32 packet_buffer3;
+	u32 rgData[1];
+};
+
+struct hfi_cmd_session_fill_buffer_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 stream_id;
+	u32 offset;
+	u32 alloc_len;
+	u32 filled_len;
+	u32 output_tag;
+	u32 packet_buffer;
+	u32 extra_data_buffer;
+	u32 rgData[1];
+};
+
+struct hfi_cmd_session_flush_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 flush_type;
+};
+
+struct hfi_cmd_session_suspend_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+};
+
+struct hfi_cmd_session_resume_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+};
+
+struct hfi_cmd_session_get_property_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+struct hfi_cmd_session_release_buffer_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 buffer_type;
+	u32 buffer_size;
+	u32 extra_data_size;
+	int response_req;
+	u32 num_buffers;
+	u32 rg_buffer_info[1];
+};
+
+struct hfi_cmd_session_release_resources_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+};
+
+struct hfi_cmd_session_parse_sequence_header_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 header_len;
+	u32 packet_buffer;
+};
+
+struct hfi_msg_sys_session_abort_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+};
+
+struct hfi_msg_sys_idle_packet {
+	u32 size;
+	u32 packet_type;
+};
+
+struct hfi_msg_sys_ping_ack_packet {
+	u32 size;
+	u32 packet_type;
+	u32 client_data;
+};
+
+struct hfi_msg_sys_property_info_packet {
+	u32 size;
+	u32 packet_type;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+struct hfi_msg_session_load_resources_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+};
+
+struct hfi_msg_session_start_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+};
+
+struct hfi_msg_session_stop_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+};
+
+struct hfi_msg_session_suspend_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+};
+
+struct hfi_msg_session_resume_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+};
+
+struct hfi_msg_session_flush_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+	u32 flush_type;
+};
+
+struct hfi_msg_session_empty_buffer_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+	u32 offset;
+	u32 filled_len;
+	u32 input_tag;
+	u32 packet_buffer;
+	u32 extra_data_buffer;
+	u32 rgData[0];
+};
+
+struct hfi_msg_session_fill_buffer_done_compressed_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 time_stamp_hi;
+	u32 time_stamp_lo;
+	u32 error_type;
+	u32 flags;
+	u32 mark_target;
+	u32 mark_data;
+	u32 stats;
+	u32 offset;
+	u32 alloc_len;
+	u32 filled_len;
+	u32 input_tag;
+	u32 output_tag;
+	u32 picture_type;
+	u32 packet_buffer;
+	u32 extra_data_buffer;
+	u32 rgData[0];
+};
+
+struct hfi_msg_session_fbd_uncompressed_plane0_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 stream_id;
+	u32 view_id;
+	u32 error_type;
+	u32 time_stamp_hi;
+	u32 time_stamp_lo;
+	u32 flags;
+	u32 mark_target;
+	u32 mark_data;
+	u32 stats;
+	u32 alloc_len;
+	u32 filled_len;
+	u32 offset;
+	u32 frame_width;
+	u32 frame_height;
+	u32 start_x_coord;
+	u32 start_y_coord;
+	u32 input_tag;
+	u32 input_tag2;
+	u32 output_tag;
+	u32 picture_type;
+	u32 packet_buffer;
+	u32 extra_data_buffer;
+	u32 rgData[0];
+};
+
+struct hfi_msg_session_fill_buffer_done_uncompressed_plane1_packet {
+	u32 flags;
+	u32 alloc_len;
+	u32 filled_len;
+	u32 offset;
+	u32 packet_buffer2;
+	u32 rgData[0];
+};
+
+struct hfi_msg_session_fill_buffer_done_uncompressed_plane2_packet {
+	u32 flags;
+	u32 alloc_len;
+	u32 filled_len;
+	u32 offset;
+	u32 packet_buffer3;
+	u32 rgData[0];
+};
+
+struct hfi_msg_session_parse_sequence_header_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+struct hfi_msg_session_property_info_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+struct hfi_msg_session_release_resources_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+};
+
+struct hfi_msg_session_release_buffers_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+	u32 num_buffers;
+	u32 rg_buffer_info[1];
+};
+
+struct hfi_extradata_mb_quantization_payload {
+	u8 rg_mb_qp[1];
+};
+
+struct hfi_extradata_vc1_pswnd {
+	u32 ps_wnd_h_offset;
+	u32 ps_wnd_v_offset;
+	u32 ps_wnd_width;
+	u32 ps_wnd_height;
+};
+
+struct hfi_extradata_vc1_framedisp_payload {
+	u32 res_pic;
+	u32 ref;
+	u32 range_map_present;
+	u32 range_map_y;
+	u32 range_map_uv;
+	u32 num_pan_scan_wnds;
+	struct hfi_extradata_vc1_pswnd rg_ps_wnd[1];
+};
+
+struct hfi_extradata_vc1_seqdisp_payload {
+	u32 prog_seg_frm;
+	u32 uv_sampling_fmt;
+	u32 color_fmt_flag;
+	u32 color_primaries;
+	u32 transfer_char;
+	u32 mat_coeff;
+	u32 aspect_ratio;
+	u32 aspect_horiz;
+	u32 aspect_vert;
+};
+
+struct hfi_extradata_timestamp_payload {
+	u32 time_stamp_low;
+	u32 time_stamp_high;
+};
+
+
+struct hfi_extradata_s3d_frame_packing_payload {
+	u32 fpa_id;
+	int cancel_flag;
+	u32 fpa_type;
+	int quin_cunx_flag;
+	u32 content_interprtation_type;
+	int spatial_flipping_flag;
+	int frame0_flipped_flag;
+	int field_views_flag;
+	int current_frame_isFrame0_flag;
+	int frame0_self_contained_flag;
+	int frame1_self_contained_flag;
+	u32 frame0_graid_pos_x;
+	u32 frame0_graid_pos_y;
+	u32 frame1_graid_pos_x;
+	u32 frame1_graid_pos_y;
+	u32 fpa_reserved_byte;
+	u32 fpa_repetition_period;
+	int fpa_extension_flag;
+};
+
+struct hfi_extradata_interlace_video_payload {
+	u32 format;
+};
+
+struct hfi_extradata_num_concealed_mb_payload {
+	u32 num_mb_concealed;
+};
+
+struct hfi_extradata_sliceinfo {
+	u32 offset_in_stream;
+	u32 slice_length;
+};
+
+struct hfi_extradata_multislice_info_payload {
+	u32 num_slices;
+	struct hfi_extradata_sliceinfo rg_slice_info[1];
+};
+
+struct hfi_index_extradata_input_crop_payload {
+	u32 size;
+	u32 version;
+	u32 port_index;
+	u32 left;
+	u32 top;
+	u32 width;
+	u32 height;
+};
+
+struct hfi_index_extradata_output_crop_payload {
+	u32 size;
+	u32 version;
+	u32 port_index;
+	u32 left;
+	u32 top;
+	u32 display_width;
+	u32 display_height;
+	u32 width;
+	u32 height;
+};
+
+struct hfi_index_extradata_digital_zoom_payload {
+	u32 size;
+	u32 version;
+	u32 port_index;
+	int width;
+	int height;
+};
+
+struct hfi_index_extradata_aspect_ratio_payload {
+	u32 size;
+	u32 version;
+	u32 port_index;
+	u32 aspect_width;
+	u32 aspect_height;
+};
+struct hfi_extradata_panscan_wndw_payload {
+	u32 num_window;
+	struct hfi_extradata_vc1_pswnd wnd[1];
+};
+
+struct hfi_extradata_frame_type_payload {
+	u32 frame_rate;
+};
+
+struct hfi_extradata_recovery_point_sei_payload {
+	u32 flag;
+};
+
+struct hfi_cmd_session_continue_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+};
+
+struct hal_session {
+	struct list_head list;
+	void *session_id;
+	bool is_decoder;
+	enum hal_video_codec codec;
+	enum hal_domain domain;
+	void *device;
+};
+
+struct hal_device_data {
+	struct list_head dev_head;
+	int dev_count;
+};
+
+struct msm_vidc_fw {
+	void *cookie;
+};
+
+int hfi_process_msg_packet(u32 device_id, struct vidc_hal_msg_pkt_hdr *msg_hdr,
+		struct msm_vidc_cb_info *info);
+
+enum vidc_status hfi_process_sys_init_done_prop_read(
+	struct hfi_msg_sys_init_done_packet *pkt,
+	struct vidc_hal_sys_init_done *sys_init_done);
+
+enum vidc_status hfi_process_session_init_done_prop_read(
+	struct hfi_msg_sys_session_init_done_packet *pkt,
+	struct vidc_hal_session_init_done *session_init_done);
+
+#endif
+
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/vidc_hfi_helper.h linux-4.4.115-fbx/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/vidc_hfi_helper.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/vidc_hfi_helper.h	2019-10-29 09:26:23.965206329 +0100
@@ -0,0 +1,1182 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __H_VIDC_HFI_HELPER_H__
+#define __H_VIDC_HFI_HELPER_H__
+
+#define HFI_COMMON_BASE				(0)
+#define HFI_OX_BASE					(0x01000000)
+
+#define HFI_VIDEO_DOMAIN_ENCODER	(HFI_COMMON_BASE + 0x1)
+#define HFI_VIDEO_DOMAIN_DECODER	(HFI_COMMON_BASE + 0x2)
+#define HFI_VIDEO_DOMAIN_VPE		(HFI_COMMON_BASE + 0x4)
+#define HFI_VIDEO_DOMAIN_MBI		(HFI_COMMON_BASE + 0x8)
+
+#define HFI_DOMAIN_BASE_COMMON		(HFI_COMMON_BASE + 0)
+#define HFI_DOMAIN_BASE_VDEC		(HFI_COMMON_BASE + 0x01000000)
+#define HFI_DOMAIN_BASE_VENC		(HFI_COMMON_BASE + 0x02000000)
+#define HFI_DOMAIN_BASE_VPE			(HFI_COMMON_BASE + 0x03000000)
+
+#define HFI_VIDEO_ARCH_OX			(HFI_COMMON_BASE + 0x1)
+
+#define HFI_ARCH_COMMON_OFFSET		(0)
+#define HFI_ARCH_OX_OFFSET			(0x00200000)
+
+#define  HFI_CMD_START_OFFSET		(0x00010000)
+#define  HFI_MSG_START_OFFSET		(0x00020000)
+
+#define HFI_ERR_NONE						HFI_COMMON_BASE
+#define HFI_ERR_SYS_FATAL				(HFI_COMMON_BASE + 0x1)
+#define HFI_ERR_SYS_INVALID_PARAMETER		(HFI_COMMON_BASE + 0x2)
+#define HFI_ERR_SYS_VERSION_MISMATCH		(HFI_COMMON_BASE + 0x3)
+#define HFI_ERR_SYS_INSUFFICIENT_RESOURCES	(HFI_COMMON_BASE + 0x4)
+#define HFI_ERR_SYS_MAX_SESSIONS_REACHED	(HFI_COMMON_BASE + 0x5)
+#define HFI_ERR_SYS_UNSUPPORTED_CODEC		(HFI_COMMON_BASE + 0x6)
+#define HFI_ERR_SYS_SESSION_IN_USE			(HFI_COMMON_BASE + 0x7)
+#define HFI_ERR_SYS_SESSION_ID_OUT_OF_RANGE	(HFI_COMMON_BASE + 0x8)
+#define HFI_ERR_SYS_UNSUPPORTED_DOMAIN		(HFI_COMMON_BASE + 0x9)
+
+#define HFI_ERR_SESSION_FATAL			(HFI_COMMON_BASE + 0x1001)
+#define HFI_ERR_SESSION_INVALID_PARAMETER	(HFI_COMMON_BASE + 0x1002)
+#define HFI_ERR_SESSION_BAD_POINTER		(HFI_COMMON_BASE + 0x1003)
+#define HFI_ERR_SESSION_INVALID_SESSION_ID	(HFI_COMMON_BASE + 0x1004)
+#define HFI_ERR_SESSION_INVALID_STREAM_ID	(HFI_COMMON_BASE + 0x1005)
+#define HFI_ERR_SESSION_INCORRECT_STATE_OPERATION		\
+	(HFI_COMMON_BASE + 0x1006)
+#define HFI_ERR_SESSION_UNSUPPORTED_PROPERTY	(HFI_COMMON_BASE + 0x1007)
+
+#define HFI_ERR_SESSION_UNSUPPORTED_SETTING	(HFI_COMMON_BASE + 0x1008)
+
+#define HFI_ERR_SESSION_INSUFFICIENT_RESOURCES	(HFI_COMMON_BASE + 0x1009)
+
+#define HFI_ERR_SESSION_STREAM_CORRUPT_OUTPUT_STALLED	\
+	(HFI_COMMON_BASE + 0x100A)
+
+#define HFI_ERR_SESSION_STREAM_CORRUPT		(HFI_COMMON_BASE + 0x100B)
+#define HFI_ERR_SESSION_ENC_OVERFLOW		(HFI_COMMON_BASE + 0x100C)
+#define HFI_ERR_SESSION_UNSUPPORTED_STREAM	(HFI_COMMON_BASE + 0x100D)
+#define HFI_ERR_SESSION_CMDSIZE			(HFI_COMMON_BASE + 0x100E)
+#define HFI_ERR_SESSION_UNSUPPORT_CMD		(HFI_COMMON_BASE + 0x100F)
+#define HFI_ERR_SESSION_UNSUPPORT_BUFFERTYPE	(HFI_COMMON_BASE + 0x1010)
+#define HFI_ERR_SESSION_BUFFERCOUNT_TOOSMALL	(HFI_COMMON_BASE + 0x1011)
+#define HFI_ERR_SESSION_INVALID_SCALE_FACTOR	(HFI_COMMON_BASE + 0x1012)
+#define HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED	(HFI_COMMON_BASE + 0x1013)
+
+#define HFI_EVENT_SYS_ERROR				(HFI_COMMON_BASE + 0x1)
+#define HFI_EVENT_SESSION_ERROR			(HFI_COMMON_BASE + 0x2)
+
+#define HFI_VIDEO_CODEC_H264				0x00000002
+#define HFI_VIDEO_CODEC_H263				0x00000004
+#define HFI_VIDEO_CODEC_MPEG1				0x00000008
+#define HFI_VIDEO_CODEC_MPEG2				0x00000010
+#define HFI_VIDEO_CODEC_MPEG4				0x00000020
+#define HFI_VIDEO_CODEC_DIVX_311			0x00000040
+#define HFI_VIDEO_CODEC_DIVX				0x00000080
+#define HFI_VIDEO_CODEC_VC1				0x00000100
+#define HFI_VIDEO_CODEC_SPARK				0x00000200
+#define HFI_VIDEO_CODEC_VP8				0x00001000
+#define HFI_VIDEO_CODEC_HEVC				0x00002000
+#define HFI_VIDEO_CODEC_VP9				0x00004000
+#define HFI_VIDEO_CODEC_HEVC_HYBRID			0x80000000
+
+#define HFI_H264_PROFILE_BASELINE			0x00000001
+#define HFI_H264_PROFILE_MAIN				0x00000002
+#define HFI_H264_PROFILE_HIGH				0x00000004
+#define HFI_H264_PROFILE_STEREO_HIGH		0x00000008
+#define HFI_H264_PROFILE_MULTIVIEW_HIGH		0x00000010
+#define HFI_H264_PROFILE_CONSTRAINED_BASE	0x00000020
+#define HFI_H264_PROFILE_CONSTRAINED_HIGH	0x00000040
+
+#define HFI_H264_LEVEL_1					0x00000001
+#define HFI_H264_LEVEL_1b					0x00000002
+#define HFI_H264_LEVEL_11					0x00000004
+#define HFI_H264_LEVEL_12					0x00000008
+#define HFI_H264_LEVEL_13					0x00000010
+#define HFI_H264_LEVEL_2					0x00000020
+#define HFI_H264_LEVEL_21					0x00000040
+#define HFI_H264_LEVEL_22					0x00000080
+#define HFI_H264_LEVEL_3					0x00000100
+#define HFI_H264_LEVEL_31					0x00000200
+#define HFI_H264_LEVEL_32					0x00000400
+#define HFI_H264_LEVEL_4					0x00000800
+#define HFI_H264_LEVEL_41					0x00001000
+#define HFI_H264_LEVEL_42					0x00002000
+#define HFI_H264_LEVEL_5					0x00004000
+#define HFI_H264_LEVEL_51					0x00008000
+#define HFI_H264_LEVEL_52                                       0x00010000
+
+#define HFI_H263_PROFILE_BASELINE			0x00000001
+
+#define HFI_H263_LEVEL_10					0x00000001
+#define HFI_H263_LEVEL_20					0x00000002
+#define HFI_H263_LEVEL_30					0x00000004
+#define HFI_H263_LEVEL_40					0x00000008
+#define HFI_H263_LEVEL_45					0x00000010
+#define HFI_H263_LEVEL_50					0x00000020
+#define HFI_H263_LEVEL_60					0x00000040
+#define HFI_H263_LEVEL_70					0x00000080
+
+#define HFI_MPEG2_PROFILE_SIMPLE			0x00000001
+#define HFI_MPEG2_PROFILE_MAIN				0x00000002
+#define HFI_MPEG2_PROFILE_422				0x00000004
+#define HFI_MPEG2_PROFILE_SNR				0x00000008
+#define HFI_MPEG2_PROFILE_SPATIAL			0x00000010
+#define HFI_MPEG2_PROFILE_HIGH				0x00000020
+
+#define HFI_MPEG2_LEVEL_LL					0x00000001
+#define HFI_MPEG2_LEVEL_ML					0x00000002
+#define HFI_MPEG2_LEVEL_H14					0x00000004
+#define HFI_MPEG2_LEVEL_HL					0x00000008
+
+#define HFI_MPEG4_PROFILE_SIMPLE			0x00000001
+#define HFI_MPEG4_PROFILE_ADVANCEDSIMPLE	0x00000002
+
+#define HFI_MPEG4_LEVEL_0					0x00000001
+#define HFI_MPEG4_LEVEL_0b					0x00000002
+#define HFI_MPEG4_LEVEL_1					0x00000004
+#define HFI_MPEG4_LEVEL_2					0x00000008
+#define HFI_MPEG4_LEVEL_3					0x00000010
+#define HFI_MPEG4_LEVEL_4					0x00000020
+#define HFI_MPEG4_LEVEL_4a					0x00000040
+#define HFI_MPEG4_LEVEL_5					0x00000080
+#define HFI_MPEG4_LEVEL_6					0x00000100
+#define HFI_MPEG4_LEVEL_7					0x00000200
+#define HFI_MPEG4_LEVEL_8					0x00000400
+#define HFI_MPEG4_LEVEL_9					0x00000800
+#define HFI_MPEG4_LEVEL_3b					0x00001000
+
+#define HFI_VC1_PROFILE_SIMPLE				0x00000001
+#define HFI_VC1_PROFILE_MAIN				0x00000002
+#define HFI_VC1_PROFILE_ADVANCED			0x00000004
+
+#define HFI_VC1_LEVEL_LOW					0x00000001
+#define HFI_VC1_LEVEL_MEDIUM				0x00000002
+#define HFI_VC1_LEVEL_HIGH					0x00000004
+#define HFI_VC1_LEVEL_0						0x00000008
+#define HFI_VC1_LEVEL_1						0x00000010
+#define HFI_VC1_LEVEL_2						0x00000020
+#define HFI_VC1_LEVEL_3						0x00000040
+#define HFI_VC1_LEVEL_4						0x00000080
+
+#define HFI_VPX_PROFILE_SIMPLE				0x00000001
+#define HFI_VPX_PROFILE_ADVANCED			0x00000002
+#define HFI_VPX_PROFILE_VERSION_0			0x00000004
+#define HFI_VPX_PROFILE_VERSION_1			0x00000008
+#define HFI_VPX_PROFILE_VERSION_2			0x00000010
+#define HFI_VPX_PROFILE_VERSION_3			0x00000020
+
+#define HFI_DIVX_FORMAT_4				(HFI_COMMON_BASE + 0x1)
+#define HFI_DIVX_FORMAT_5				(HFI_COMMON_BASE + 0x2)
+#define HFI_DIVX_FORMAT_6				(HFI_COMMON_BASE + 0x3)
+
+#define HFI_DIVX_PROFILE_QMOBILE		0x00000001
+#define HFI_DIVX_PROFILE_MOBILE			0x00000002
+#define HFI_DIVX_PROFILE_MT				0x00000004
+#define HFI_DIVX_PROFILE_HT				0x00000008
+#define HFI_DIVX_PROFILE_HD				0x00000010
+
+#define  HFI_HEVC_PROFILE_MAIN			0x00000001
+#define  HFI_HEVC_PROFILE_MAIN10		0x00000002
+#define  HFI_HEVC_PROFILE_MAIN_STILL_PIC	0x00000004
+
+#define  HFI_HEVC_LEVEL_1	0x00000001
+#define  HFI_HEVC_LEVEL_2	0x00000002
+#define  HFI_HEVC_LEVEL_21	0x00000004
+#define  HFI_HEVC_LEVEL_3	0x00000008
+#define  HFI_HEVC_LEVEL_31	0x00000010
+#define  HFI_HEVC_LEVEL_4	0x00000020
+#define  HFI_HEVC_LEVEL_41	0x00000040
+#define  HFI_HEVC_LEVEL_5	0x00000080
+#define  HFI_HEVC_LEVEL_51	0x00000100
+#define  HFI_HEVC_LEVEL_52	0x00000200
+#define  HFI_HEVC_LEVEL_6	0x00000400
+#define  HFI_HEVC_LEVEL_61	0x00000800
+#define  HFI_HEVC_LEVEL_62	0x00001000
+
+#define HFI_HEVC_TIER_MAIN	0x1
+#define HFI_HEVC_TIER_HIGH0	0x2
+
+#define HFI_BUFFER_INPUT				(HFI_COMMON_BASE + 0x1)
+#define HFI_BUFFER_OUTPUT				(HFI_COMMON_BASE + 0x2)
+#define HFI_BUFFER_OUTPUT2				(HFI_COMMON_BASE + 0x3)
+#define HFI_BUFFER_INTERNAL_PERSIST		(HFI_COMMON_BASE + 0x4)
+#define HFI_BUFFER_INTERNAL_PERSIST_1		(HFI_COMMON_BASE + 0x5)
+
+#define  HFI_BITDEPTH_8				(HFI_COMMON_BASE + 0x0)
+#define  HFI_BITDEPTH_9				(HFI_COMMON_BASE + 0x1)
+#define  HFI_BITDEPTH_10			(HFI_COMMON_BASE + 0x2)
+
+#define HFI_VENC_PERFMODE_MAX_QUALITY	0x1
+#define HFI_VENC_PERFMODE_POWER_SAVE	0x2
+
+struct hfi_buffer_info {
+	u32 buffer_addr;
+	u32 extra_data_addr;
+};
+
+#define HFI_PROPERTY_SYS_COMMON_START		\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x0000)
+#define HFI_PROPERTY_SYS_DEBUG_CONFIG		\
+	(HFI_PROPERTY_SYS_COMMON_START + 0x001)
+#define HFI_PROPERTY_SYS_RESOURCE_OCMEM_REQUIREMENT_INFO	\
+	(HFI_PROPERTY_SYS_COMMON_START + 0x002)
+#define HFI_PROPERTY_SYS_CONFIG_VCODEC_CLKFREQ				\
+	(HFI_PROPERTY_SYS_COMMON_START + 0x003)
+#define HFI_PROPERTY_SYS_IDLE_INDICATOR         \
+	(HFI_PROPERTY_SYS_COMMON_START + 0x004)
+#define  HFI_PROPERTY_SYS_CODEC_POWER_PLANE_CTRL     \
+	(HFI_PROPERTY_SYS_COMMON_START + 0x005)
+#define  HFI_PROPERTY_SYS_IMAGE_VERSION    \
+	(HFI_PROPERTY_SYS_COMMON_START + 0x006)
+#define  HFI_PROPERTY_SYS_CONFIG_COVERAGE    \
+	(HFI_PROPERTY_SYS_COMMON_START + 0x007)
+
+#define HFI_PROPERTY_PARAM_COMMON_START	\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x1000)
+#define HFI_PROPERTY_PARAM_FRAME_SIZE		\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x001)
+#define HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO	\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x002)
+#define HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT		\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x003)
+#define HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED	\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x004)
+#define HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT			\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x005)
+#define HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED			\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x006)
+#define HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED				\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x007)
+#define HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED				\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x008)
+#define HFI_PROPERTY_PARAM_CODEC_SUPPORTED			\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x009)
+#define HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SUPPORTED		\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x00A)
+#define HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT			\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x00B)
+#define HFI_PROPERTY_PARAM_MULTI_VIEW_FORMAT				\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x00C)
+#define  HFI_PROPERTY_PARAM_MAX_SEQUENCE_HEADER_SIZE        \
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x00D)
+#define  HFI_PROPERTY_PARAM_CODEC_MASK_SUPPORTED            \
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x00E)
+#define HFI_PROPERTY_PARAM_MVC_BUFFER_LAYOUT \
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x00F)
+#define  HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED	    \
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x010)
+
+#define HFI_PROPERTY_CONFIG_COMMON_START				\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x2000)
+#define HFI_PROPERTY_CONFIG_FRAME_RATE					\
+	(HFI_PROPERTY_CONFIG_COMMON_START + 0x001)
+
+#define HFI_PROPERTY_PARAM_VDEC_COMMON_START				\
+	(HFI_DOMAIN_BASE_VDEC + HFI_ARCH_COMMON_OFFSET + 0x3000)
+#define HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM				\
+	(HFI_PROPERTY_PARAM_VDEC_COMMON_START + 0x001)
+#define HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR				\
+	(HFI_PROPERTY_PARAM_VDEC_COMMON_START + 0x002)
+#define HFI_PROPERTY_PARAM_VDEC_NONCP_OUTPUT2				\
+	(HFI_PROPERTY_PARAM_VDEC_COMMON_START + 0x003)
+#define  HFI_PROPERTY_PARAM_VDEC_PIXEL_BITDEPTH				\
+	(HFI_PROPERTY_PARAM_VDEC_COMMON_START + 0x007)
+#define  HFI_PROPERTY_PARAM_VDEC_PIC_STRUCT				\
+	(HFI_PROPERTY_PARAM_VDEC_COMMON_START + 0x009)
+#define  HFI_PROPERTY_PARAM_VDEC_COLOUR_SPACE				\
+	(HFI_PROPERTY_PARAM_VDEC_COMMON_START + 0x00A)
+
+
+#define HFI_PROPERTY_CONFIG_VDEC_COMMON_START				\
+	(HFI_DOMAIN_BASE_VDEC + HFI_ARCH_COMMON_OFFSET + 0x4000)
+
+#define HFI_PROPERTY_PARAM_VENC_COMMON_START				\
+	(HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x5000)
+#define HFI_PROPERTY_PARAM_VENC_SLICE_DELIVERY_MODE			\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x001)
+#define HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL		\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x002)
+#define HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL		\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x003)
+#define HFI_PROPERTY_PARAM_VENC_RATE_CONTROL				\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x004)
+#define  HFI_PROPERTY_PARAM_VENC_H264_PICORDER_CNT_TYPE     \
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x005)
+#define HFI_PROPERTY_PARAM_VENC_SESSION_QP				\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x006)
+#define HFI_PROPERTY_PARAM_VENC_MPEG4_AC_PREDICTION			\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x007)
+#define  HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE           \
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x008)
+#define HFI_PROPERTY_PARAM_VENC_MPEG4_TIME_RESOLUTION		\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x009)
+#define HFI_PROPERTY_PARAM_VENC_MPEG4_SHORT_HEADER			\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00A)
+#define HFI_PROPERTY_PARAM_VENC_MPEG4_HEADER_EXTENSION		\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00B)
+#define  HFI_PROPERTY_PARAM_VENC_OPEN_GOP                   \
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00C)
+#define HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH				\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00D)
+#define HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL			\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00E)
+#define  HFI_PROPERTY_PARAM_VENC_VBV_HRD_BUF_SIZE           \
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00F)
+#define  HFI_PROPERTY_PARAM_VENC_QUALITY_VS_SPEED           \
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x010)
+#define HFI_PROPERTY_PARAM_VENC_ADVANCED				\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x012)
+#define  HFI_PROPERTY_PARAM_VENC_H264_SPS_ID                \
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x014)
+#define  HFI_PROPERTY_PARAM_VENC_H264_PPS_ID               \
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x015)
+#define HFI_PROPERTY_PARAM_VENC_GENERATE_AUDNAL	\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x016)
+#define HFI_PROPERTY_PARAM_VENC_ASPECT_RATIO			\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x017)
+#define HFI_PROPERTY_PARAM_VENC_NUMREF					\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x018)
+#define HFI_PROPERTY_PARAM_VENC_MULTIREF_P				\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x019)
+#define HFI_PROPERTY_PARAM_VENC_H264_NAL_SVC_EXT		\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01B)
+#define HFI_PROPERTY_PARAM_VENC_LTRMODE		\
+	 (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01C)
+#define HFI_PROPERTY_PARAM_VENC_VIDEO_SIGNAL_INFO	\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01D)
+#define HFI_PROPERTY_PARAM_VENC_H264_VUI_TIMING_INFO	\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01E)
+#define HFI_PROPERTY_PARAM_VENC_VC1_PERF_CFG		\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01F)
+#define  HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES \
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x020)
+#define HFI_PROPERTY_PARAM_VENC_H264_VUI_BITSTREAM_RESTRC \
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x021)
+#define HFI_PROPERTY_PARAM_VENC_LOW_LATENCY_MODE	\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x022)
+#define HFI_PROPERTY_PARAM_VENC_PRESERVE_TEXT_QUALITY \
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x023)
+#define HFI_PROPERTY_PARAM_VENC_H264_8X8_TRANSFORM \
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x025)
+#define HFI_PROPERTY_PARAM_VENC_HIER_P_MAX_NUM_ENH_LAYER	\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x026)
+#define HFI_PROPERTY_PARAM_VENC_DISABLE_RC_TIMESTAMP \
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x027)
+#define HFI_PROPERTY_PARAM_VENC_INITIAL_QP	\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x028)
+#define HFI_PROPERTY_PARAM_VENC_VPX_ERROR_RESILIENCE_MODE	\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x029)
+#define HFI_PROPERTY_PARAM_VENC_CONSTRAINED_INTRA_PRED	\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x02B)
+#define HFI_PROPERTY_PARAM_VENC_HIER_B_MAX_NUM_ENH_LAYER	\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x02C)
+#define  HFI_PROPERTY_PARAM_VENC_HIER_P_HYBRID_MODE	\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x02F)
+#define  HFI_PROPERTY_PARAM_VENC_BITRATE_TYPE		\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x031)
+#define  HFI_PROPERTY_PARAM_VENC_VQZIP_SEI_TYPE		\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x033)
+#define  HFI_PROPERTY_PARAM_VENC_IFRAMESIZE			\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x034)
+#define  HFI_PROPERTY_PARAM_VENC_SEND_OUTPUT_FOR_SKIPPED_FRAMES    \
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x035)
+
+#define HFI_PROPERTY_CONFIG_VENC_COMMON_START				\
+	(HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x6000)
+#define HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE				\
+	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x001)
+#define HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD				\
+	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x002)
+#define HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD				\
+	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x003)
+#define HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME			\
+	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x004)
+#define  HFI_PROPERTY_CONFIG_VENC_SLICE_SIZE                \
+	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x005)
+#define HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE				\
+	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x007)
+
+#define HFI_PROPERTY_PARAM_VPE_COMMON_START				\
+	(HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x7000)
+#define  HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER	\
+	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x008)
+#define  HFI_PROPERTY_CONFIG_VENC_MARKLTRFRAME			\
+	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x009)
+#define  HFI_PROPERTY_CONFIG_VENC_USELTRFRAME			\
+	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x00A)
+#define  HFI_PROPERTY_CONFIG_VENC_HIER_P_ENH_LAYER		\
+	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x00B)
+#define  HFI_PROPERTY_CONFIG_VENC_LTRPERIOD			\
+	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x00C)
+#define  HFI_PROPERTY_CONFIG_VENC_PERF_MODE			\
+	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x00E)
+#define HFI_PROPERTY_CONFIG_VENC_BASELAYER_PRIORITYID		\
+	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x00F)
+
+#define HFI_PROPERTY_CONFIG_VPE_COMMON_START				\
+	(HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x8000)
+#define  HFI_PROPERTY_CONFIG_VENC_BLUR_FRAME_SIZE		\
+	(HFI_PROPERTY_CONFIG_COMMON_START + 0x010)
+#define HFI_PROPERTY_CONFIG_VPE_DEINTERLACE				\
+	(HFI_PROPERTY_CONFIG_VPE_COMMON_START + 0x001)
+#define HFI_PROPERTY_CONFIG_VPE_OPERATIONS				\
+	(HFI_PROPERTY_CONFIG_VPE_COMMON_START + 0x002)
+
+struct hfi_pic_struct {
+	u32 progressive_only;
+};
+
+struct hfi_bitrate {
+	u32 bit_rate;
+	u32 layer_id;
+};
+
+struct hfi_colour_space {
+	u32 colour_space;
+};
+
+#define HFI_CAPABILITY_FRAME_WIDTH			(HFI_COMMON_BASE + 0x1)
+#define HFI_CAPABILITY_FRAME_HEIGHT			(HFI_COMMON_BASE + 0x2)
+#define HFI_CAPABILITY_MBS_PER_FRAME			(HFI_COMMON_BASE + 0x3)
+#define HFI_CAPABILITY_MBS_PER_SECOND			(HFI_COMMON_BASE + 0x4)
+#define HFI_CAPABILITY_FRAMERATE			(HFI_COMMON_BASE + 0x5)
+#define HFI_CAPABILITY_SCALE_X				(HFI_COMMON_BASE + 0x6)
+#define HFI_CAPABILITY_SCALE_Y				(HFI_COMMON_BASE + 0x7)
+#define HFI_CAPABILITY_BITRATE				(HFI_COMMON_BASE + 0x8)
+#define HFI_CAPABILITY_BFRAME				(HFI_COMMON_BASE + 0x9)
+#define HFI_CAPABILITY_PEAKBITRATE			(HFI_COMMON_BASE + 0xa)
+#define HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS		(HFI_COMMON_BASE + 0x10)
+#define HFI_CAPABILITY_ENC_LTR_COUNT			(HFI_COMMON_BASE + 0x11)
+#define HFI_CAPABILITY_CP_OUTPUT2_THRESH		(HFI_COMMON_BASE + 0x12)
+#define HFI_CAPABILITY_HIER_B_NUM_ENH_LAYERS	(HFI_COMMON_BASE + 0x13)
+#define HFI_CAPABILITY_LCU_SIZE				(HFI_COMMON_BASE + 0x14)
+#define HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS	(HFI_COMMON_BASE + 0x15)
+#define HFI_CAPABILITY_MBS_PER_SECOND_POWERSAVE		(HFI_COMMON_BASE + 0x16)
+
+struct hfi_capability_supported {
+	u32 capability_type;
+	u32 min;
+	u32 max;
+	u32 step_size;
+};
+
+struct hfi_capability_supported_info {
+	u32 num_capabilities;
+	struct hfi_capability_supported rg_data[1];
+};
+
+#define HFI_DEBUG_MSG_LOW					0x00000001
+#define HFI_DEBUG_MSG_MEDIUM					0x00000002
+#define HFI_DEBUG_MSG_HIGH					0x00000004
+#define HFI_DEBUG_MSG_ERROR					0x00000008
+#define HFI_DEBUG_MSG_FATAL					0x00000010
+#define HFI_DEBUG_MSG_PERF					0x00000020
+
+#define HFI_DEBUG_MODE_QUEUE					0x00000001
+#define HFI_DEBUG_MODE_QDSS					0x00000002
+
+struct hfi_debug_config {
+	u32 debug_config;
+	u32 debug_mode;
+};
+
+struct hfi_enable {
+	u32 enable;
+};
+
+#define HFI_H264_DB_MODE_DISABLE			(HFI_COMMON_BASE + 0x1)
+#define HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY	\
+	(HFI_COMMON_BASE + 0x2)
+#define HFI_H264_DB_MODE_ALL_BOUNDARY		(HFI_COMMON_BASE + 0x3)
+
+struct hfi_h264_db_control {
+	u32 mode;
+	u32 slice_alpha_offset;
+	u32 slice_beta_offset;
+};
+
+#define HFI_H264_ENTROPY_CAVLC				(HFI_COMMON_BASE + 0x1)
+#define HFI_H264_ENTROPY_CABAC				(HFI_COMMON_BASE + 0x2)
+
+#define HFI_H264_CABAC_MODEL_0				(HFI_COMMON_BASE + 0x1)
+#define HFI_H264_CABAC_MODEL_1				(HFI_COMMON_BASE + 0x2)
+#define HFI_H264_CABAC_MODEL_2				(HFI_COMMON_BASE + 0x3)
+
+struct hfi_h264_entropy_control {
+	u32 entropy_mode;
+	u32 cabac_model;
+};
+
+struct hfi_frame_rate {
+	u32 buffer_type;
+	u32 frame_rate;
+};
+
+#define HFI_INTRA_REFRESH_NONE				(HFI_COMMON_BASE + 0x1)
+#define HFI_INTRA_REFRESH_CYCLIC			(HFI_COMMON_BASE + 0x2)
+#define HFI_INTRA_REFRESH_ADAPTIVE			(HFI_COMMON_BASE + 0x3)
+#define HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE	(HFI_COMMON_BASE + 0x4)
+#define HFI_INTRA_REFRESH_RANDOM			(HFI_COMMON_BASE + 0x5)
+
+struct hfi_intra_refresh {
+	u32 mode;
+	u32 air_mbs;
+	u32 air_ref;
+	u32 cir_mbs;
+};
+
+struct hfi_3x_intra_refresh {
+	u32 mode;
+	u32 mbs;
+};
+
+struct hfi_idr_period {
+	u32 idr_period;
+};
+
+struct hfi_operations_type {
+	u32 rotation;
+	u32 flip;
+};
+
+struct hfi_max_num_b_frames {
+	u32 max_num_b_frames;
+};
+
+struct hfi_vc1e_perf_cfg_type {
+	u32 search_range_x_subsampled[3];
+	u32 search_range_y_subsampled[3];
+};
+
+struct hfi_conceal_color {
+	u32 conceal_color;
+};
+
+struct hfi_intra_period {
+	u32 pframes;
+	u32 bframes;
+};
+
+struct hfi_mpeg4_header_extension {
+	u32 header_extension;
+};
+
+struct hfi_mpeg4_time_resolution {
+	u32 time_increment_resolution;
+};
+
+struct hfi_multi_stream {
+	u32 buffer_type;
+	u32 enable;
+	u32 width;
+	u32 height;
+};
+
+struct hfi_3x_multi_stream {
+	u32 buffer_type;
+	u32 enable;
+};
+
+struct hfi_multi_view_format {
+	u32 views;
+	u32 rg_view_order[1];
+};
+
+#define HFI_MULTI_SLICE_OFF				(HFI_COMMON_BASE + 0x1)
+#define HFI_MULTI_SLICE_BY_MB_COUNT		(HFI_COMMON_BASE + 0x2)
+#define HFI_MULTI_SLICE_BY_BYTE_COUNT	(HFI_COMMON_BASE + 0x3)
+#define HFI_MULTI_SLICE_GOB				(HFI_COMMON_BASE + 0x4)
+
+struct hfi_multi_slice_control {
+	u32 multi_slice;
+	u32 slice_size;
+};
+
+#define HFI_NAL_FORMAT_STARTCODES			0x00000001
+#define HFI_NAL_FORMAT_ONE_NAL_PER_BUFFER	0x00000002
+#define HFI_NAL_FORMAT_ONE_BYTE_LENGTH		0x00000004
+#define HFI_NAL_FORMAT_TWO_BYTE_LENGTH		0x00000008
+#define HFI_NAL_FORMAT_FOUR_BYTE_LENGTH		0x00000010
+
+struct hfi_nal_stream_format_supported {
+	u32 nal_stream_format_supported;
+};
+
+struct hfi_nal_stream_format_select {
+	u32 nal_stream_format_select;
+};
+#define HFI_PICTURE_TYPE_I					0x01
+#define HFI_PICTURE_TYPE_P					0x02
+#define HFI_PICTURE_TYPE_B					0x04
+#define HFI_PICTURE_TYPE_IDR					0x08
+#define HFI_PICTURE_TYPE_CRA					0x10
+
+struct hfi_profile_level {
+	u32 profile;
+	u32 level;
+};
+
+struct hfi_profile_level_supported {
+	u32 profile_count;
+	struct hfi_profile_level rg_profile_level[1];
+};
+
+struct hfi_quality_vs_speed {
+	u32 quality_vs_speed;
+};
+
+struct hfi_quantization {
+	u32 qp_i;
+	u32 qp_p;
+	u32 qp_b;
+	u32 layer_id;
+};
+
+struct hfi_initial_quantization {
+	u32 qp_i;
+	u32 qp_p;
+	u32 qp_b;
+	u32 init_qp_enable;
+};
+
+struct hfi_quantization_range {
+	u32 min_qp;
+	u32 max_qp;
+	u32 layer_id;
+};
+
+#define HFI_LTR_MODE_DISABLE	0x0
+#define HFI_LTR_MODE_MANUAL		0x1
+#define HFI_LTR_MODE_PERIODIC	0x2
+
+struct hfi_ltr_mode {
+	u32 ltr_mode;
+	u32 ltr_count;
+	u32 trust_mode;
+};
+
+struct hfi_ltr_use {
+	u32 ref_ltr;
+	u32 use_constrnt;
+	u32 frames;
+};
+
+struct hfi_ltr_mark {
+	u32 mark_frame;
+};
+
+struct hfi_frame_size {
+	u32 buffer_type;
+	u32 width;
+	u32 height;
+};
+
+struct hfi_video_signal_metadata {
+	u32 enable;
+	u32 video_format;
+	u32 video_full_range;
+	u32 color_description;
+	u32 color_primaries;
+	u32 transfer_characteristics;
+	u32 matrix_coeffs;
+};
+
+struct hfi_h264_vui_timing_info {
+	u32 enable;
+	u32 fixed_frame_rate;
+	u32 time_scale;
+};
+
+struct hfi_bit_depth {
+	u32 buffer_type;
+	u32 bit_depth;
+};
+
+struct hfi_picture_type {
+	u32 is_sync_frame;
+	u32 picture_type;
+};
+
+/* Base Offset for UBWC color formats  */
+#define HFI_COLOR_FORMAT_UBWC_BASE        (0x8000)
+/* Base Offset for 10-bit color formats */
+#define HFI_COLOR_FORMAT_10_BIT_BASE      (0x4000)
+
+#define HFI_COLOR_FORMAT_MONOCHROME			(HFI_COMMON_BASE + 0x1)
+#define HFI_COLOR_FORMAT_NV12				(HFI_COMMON_BASE + 0x2)
+#define HFI_COLOR_FORMAT_NV21				(HFI_COMMON_BASE + 0x3)
+#define HFI_COLOR_FORMAT_NV12_4x4TILE		(HFI_COMMON_BASE + 0x4)
+#define HFI_COLOR_FORMAT_NV21_4x4TILE		(HFI_COMMON_BASE + 0x5)
+#define HFI_COLOR_FORMAT_YUYV				(HFI_COMMON_BASE + 0x6)
+#define HFI_COLOR_FORMAT_YVYU				(HFI_COMMON_BASE + 0x7)
+#define HFI_COLOR_FORMAT_UYVY				(HFI_COMMON_BASE + 0x8)
+#define HFI_COLOR_FORMAT_VYUY				(HFI_COMMON_BASE + 0x9)
+#define HFI_COLOR_FORMAT_RGB565				(HFI_COMMON_BASE + 0xA)
+#define HFI_COLOR_FORMAT_BGR565				(HFI_COMMON_BASE + 0xB)
+#define HFI_COLOR_FORMAT_RGB888				(HFI_COMMON_BASE + 0xC)
+#define HFI_COLOR_FORMAT_BGR888				(HFI_COMMON_BASE + 0xD)
+#define HFI_COLOR_FORMAT_YUV444				(HFI_COMMON_BASE + 0xE)
+#define HFI_COLOR_FORMAT_RGBA8888			(HFI_COMMON_BASE + 0x10)
+
+#define HFI_COLOR_FORMAT_YUV420_TP10					\
+		(HFI_COLOR_FORMAT_10_BIT_BASE + HFI_COLOR_FORMAT_NV12)
+
+#define HFI_COLOR_FORMAT_NV12_UBWC					\
+		(HFI_COLOR_FORMAT_UBWC_BASE + HFI_COLOR_FORMAT_NV12)
+
+#define HFI_COLOR_FORMAT_YUV420_TP10_UBWC				\
+		(HFI_COLOR_FORMAT_UBWC_BASE + HFI_COLOR_FORMAT_YUV420_TP10)
+
+#define  HFI_COLOR_FORMAT_RGBA8888_UBWC					\
+		(HFI_COLOR_FORMAT_UBWC_BASE + HFI_COLOR_FORMAT_RGBA8888)
+
+#define HFI_MAX_MATRIX_COEFFS 9
+#define HFI_MAX_BIAS_COEFFS 3
+#define HFI_MAX_LIMIT_COEFFS 6
+
+#define HFI_STATISTICS_MODE_DEFAULT 0x10
+#define HFI_STATISTICS_MODE_1 0x11
+#define HFI_STATISTICS_MODE_2 0x12
+#define HFI_STATISTICS_MODE_3 0x13
+
+struct hfi_uncompressed_format_select {
+	u32 buffer_type;
+	u32 format;
+};
+
+struct hfi_uncompressed_format_supported {
+	u32 buffer_type;
+	u32 format_entries;
+	u32 rg_format_info[1];
+};
+
+struct hfi_uncompressed_plane_actual {
+	u32 actual_stride;
+	u32 actual_plane_buffer_height;
+};
+
+struct hfi_uncompressed_plane_actual_info {
+	u32 buffer_type;
+	u32 num_planes;
+	struct hfi_uncompressed_plane_actual rg_plane_format[1];
+};
+
+struct hfi_uncompressed_plane_constraints {
+	u32 stride_multiples;
+	u32 max_stride;
+	u32 min_plane_buffer_height_multiple;
+	u32 buffer_alignment;
+};
+
+struct hfi_uncompressed_plane_info {
+	u32 format;
+	u32 num_planes;
+	struct hfi_uncompressed_plane_constraints rg_plane_format[1];
+};
+
+struct hfi_codec_supported {
+	u32 decoder_codec_supported;
+	u32 encoder_codec_supported;
+};
+
+struct hfi_properties_supported {
+	u32 num_properties;
+	u32 rg_properties[1];
+};
+
+struct hfi_max_sessions_supported {
+	u32 max_sessions;
+};
+
+struct hfi_vpe_color_space_conversion {
+	u32 csc_matrix[HFI_MAX_MATRIX_COEFFS];
+	u32 csc_bias[HFI_MAX_BIAS_COEFFS];
+	u32 csc_limit[HFI_MAX_LIMIT_COEFFS];
+};
+
+struct hfi_scs_threshold {
+	u32 threshold_value;
+};
+
+#define HFI_ROTATE_NONE					(HFI_COMMON_BASE + 0x1)
+#define HFI_ROTATE_90					(HFI_COMMON_BASE + 0x2)
+#define HFI_ROTATE_180					(HFI_COMMON_BASE + 0x3)
+#define HFI_ROTATE_270					(HFI_COMMON_BASE + 0x4)
+
+#define HFI_FLIP_NONE					(HFI_COMMON_BASE + 0x1)
+#define HFI_FLIP_HORIZONTAL				(HFI_COMMON_BASE + 0x2)
+#define HFI_FLIP_VERTICAL				(HFI_COMMON_BASE + 0x3)
+
+struct hfi_operations {
+	u32 rotate;
+	u32 flip;
+};
+
+#define HFI_RESOURCE_OCMEM 0x00000001
+
+struct hfi_resource_ocmem {
+	u32 size;
+	u32 mem;
+};
+
+struct hfi_resource_ocmem_requirement {
+	u32 session_domain;
+	u32 width;
+	u32 height;
+	u32 size;
+};
+
+struct hfi_resource_ocmem_requirement_info {
+	u32 num_entries;
+	struct hfi_resource_ocmem_requirement rg_requirements[1];
+};
+
+struct hfi_property_sys_image_version_info_type {
+	u32 string_size;
+	u8  str_image_version[1];
+};
+
+struct hfi_venc_config_advanced {
+	u8 pipe2d;
+	u8 hw_mode;
+	u8 low_delay_enforce;
+	u8 worker_vppsg_delay;
+	u32 close_gop;
+	u32 h264_constrain_intra_pred;
+	u32 h264_transform_8x8_flag;
+	u32 mpeg4_qpel_enable;
+	u32 multi_refp_en;
+	u32 qmatrix_en;
+	u8 vpp_info_packet_mode;
+	u8 ref_tile_mode;
+	u8 bitstream_flush_mode;
+	u32 vppsg_vspap_fb_sync_delay;
+	u32 rc_initial_delay;
+	u32 peak_bitrate_constraint;
+	u32 ds_display_frame_width;
+	u32 ds_display_frame_height;
+	u32 perf_tune_param_ptr;
+	u32 input_x_offset;
+	u32 input_y_offset;
+	u32 input_roi_width;
+	u32 input_roi_height;
+	u32 vsp_fifo_dma_sel;
+	u32 h264_num_ref_frames;
+};
+
+struct hfi_vbv_hrd_bufsize {
+	u32 buffer_size;
+};
+
+struct hfi_codec_mask_supported {
+	u32 codecs;
+	u32 video_domains;
+};
+
+struct hfi_seq_header_info {
+	u32 max_hader_len;
+};
+
+struct hfi_aspect_ratio {
+	u32 aspect_width;
+	u32 aspect_height;
+};
+
+#define HFI_IFRAME_SIZE_DEFAULT			(HFI_COMMON_BASE + 0x1)
+#define HFI_IFRAME_SIZE_MEDIUM			(HFI_COMMON_BASE + 0x2)
+#define HFI_IFRAME_SIZE_HIGH			(HFI_COMMON_BASE + 0x3)
+#define HFI_IFRAME_SIZE_UNLIMITED		(HFI_COMMON_BASE + 0x4)
+struct hfi_iframe_size {
+	u32 type;
+};
+
+#define HFI_MVC_BUFFER_LAYOUT_TOP_BOTTOM  (0)
+#define HFI_MVC_BUFFER_LAYOUT_SIDEBYSIDE  (1)
+#define HFI_MVC_BUFFER_LAYOUT_SEQ         (2)
+struct hfi_mvc_buffer_layout_descp_type {
+	u32    layout_type;
+	u32    bright_view_first;
+	u32    ngap;
+};
+
+
+#define HFI_CMD_SYS_COMMON_START			\
+(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + HFI_CMD_START_OFFSET \
+	+ 0x0000)
+#define HFI_CMD_SYS_INIT		(HFI_CMD_SYS_COMMON_START + 0x001)
+#define HFI_CMD_SYS_PC_PREP		(HFI_CMD_SYS_COMMON_START + 0x002)
+#define HFI_CMD_SYS_SET_RESOURCE	(HFI_CMD_SYS_COMMON_START + 0x003)
+#define HFI_CMD_SYS_RELEASE_RESOURCE (HFI_CMD_SYS_COMMON_START + 0x004)
+#define HFI_CMD_SYS_SET_PROPERTY	(HFI_CMD_SYS_COMMON_START + 0x005)
+#define HFI_CMD_SYS_GET_PROPERTY	(HFI_CMD_SYS_COMMON_START + 0x006)
+#define HFI_CMD_SYS_SESSION_INIT	(HFI_CMD_SYS_COMMON_START + 0x007)
+#define HFI_CMD_SYS_SESSION_END		(HFI_CMD_SYS_COMMON_START + 0x008)
+#define HFI_CMD_SYS_SET_BUFFERS		(HFI_CMD_SYS_COMMON_START + 0x009)
+#define HFI_CMD_SYS_TEST_START		(HFI_CMD_SYS_COMMON_START + 0x100)
+
+#define HFI_CMD_SESSION_COMMON_START		\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET +	\
+	HFI_CMD_START_OFFSET + 0x1000)
+#define HFI_CMD_SESSION_SET_PROPERTY		\
+	(HFI_CMD_SESSION_COMMON_START + 0x001)
+#define HFI_CMD_SESSION_SET_BUFFERS			\
+	(HFI_CMD_SESSION_COMMON_START + 0x002)
+#define HFI_CMD_SESSION_GET_SEQUENCE_HEADER	\
+	(HFI_CMD_SESSION_COMMON_START + 0x003)
+
+#define HFI_MSG_SYS_COMMON_START			\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET +	\
+	HFI_MSG_START_OFFSET + 0x0000)
+#define HFI_MSG_SYS_INIT_DONE			(HFI_MSG_SYS_COMMON_START + 0x1)
+#define HFI_MSG_SYS_PC_PREP_DONE		(HFI_MSG_SYS_COMMON_START + 0x2)
+#define HFI_MSG_SYS_RELEASE_RESOURCE	(HFI_MSG_SYS_COMMON_START + 0x3)
+#define HFI_MSG_SYS_DEBUG			(HFI_MSG_SYS_COMMON_START + 0x4)
+#define HFI_MSG_SYS_SESSION_INIT_DONE	(HFI_MSG_SYS_COMMON_START + 0x6)
+#define HFI_MSG_SYS_SESSION_END_DONE	(HFI_MSG_SYS_COMMON_START + 0x7)
+#define HFI_MSG_SYS_IDLE		(HFI_MSG_SYS_COMMON_START + 0x8)
+#define HFI_MSG_SYS_COV                 (HFI_MSG_SYS_COMMON_START + 0x9)
+#define HFI_MSG_SYS_PROPERTY_INFO	(HFI_MSG_SYS_COMMON_START + 0xA)
+#define HFI_MSG_SESSION_SYNC_DONE      (HFI_MSG_SESSION_OX_START + 0xD)
+
+#define HFI_MSG_SESSION_COMMON_START		\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET +	\
+	HFI_MSG_START_OFFSET + 0x1000)
+#define HFI_MSG_EVENT_NOTIFY	(HFI_MSG_SESSION_COMMON_START + 0x1)
+#define HFI_MSG_SESSION_GET_SEQUENCE_HEADER_DONE	\
+	(HFI_MSG_SESSION_COMMON_START + 0x2)
+
+#define HFI_CMD_SYS_TEST_SSR	(HFI_CMD_SYS_TEST_START + 0x1)
+#define HFI_TEST_SSR_SW_ERR_FATAL	0x1
+#define HFI_TEST_SSR_SW_DIV_BY_ZERO	0x2
+#define HFI_TEST_SSR_HW_WDOG_IRQ	0x3
+
+struct vidc_hal_cmd_pkt_hdr {
+	u32 size;
+	u32 packet_type;
+};
+
+struct vidc_hal_msg_pkt_hdr {
+	u32 size;
+	u32 packet;
+};
+
+struct vidc_hal_session_cmd_pkt {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+};
+
+struct hfi_cmd_sys_init_packet {
+	u32 size;
+	u32 packet_type;
+	u32 arch_type;
+};
+
+struct hfi_cmd_sys_pc_prep_packet {
+	u32 size;
+	u32 packet_type;
+};
+
+struct hfi_cmd_sys_set_resource_packet {
+	u32 size;
+	u32 packet_type;
+	u32 resource_handle;
+	u32 resource_type;
+	u32 rg_resource_data[1];
+};
+
+struct hfi_cmd_sys_release_resource_packet {
+	u32 size;
+	u32 packet_type;
+	u32 resource_type;
+	u32 resource_handle;
+};
+
+struct hfi_cmd_sys_set_property_packet {
+	u32 size;
+	u32 packet_type;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+struct hfi_cmd_sys_get_property_packet {
+	u32 size;
+	u32 packet_type;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+struct hfi_cmd_sys_session_init_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 session_domain;
+	u32 session_codec;
+};
+
+struct hfi_cmd_sys_session_end_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+};
+
+struct hfi_cmd_sys_set_buffers_packet {
+	u32 size;
+	u32 packet_type;
+	u32 buffer_type;
+	u32 buffer_size;
+	u32 num_buffers;
+	u32 rg_buffer_addr[1];
+};
+
+struct hfi_cmd_session_set_property_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 num_properties;
+	u32 rg_property_data[0];
+};
+
+struct hfi_cmd_session_set_buffers_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 buffer_type;
+	u32 buffer_size;
+	u32 extra_data_size;
+	u32 min_buffer_size;
+	u32 num_buffers;
+	u32 rg_buffer_info[1];
+};
+
+struct hfi_cmd_session_get_sequence_header_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 buffer_len;
+	u32 packet_buffer;
+};
+
+struct hfi_cmd_session_sync_process_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 sync_id;
+	u32 rg_data[1];
+};
+
+struct hfi_msg_event_notify_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 event_id;
+	u32 event_data1;
+	u32 event_data2;
+	u32 rg_ext_event_data[1];
+};
+
+struct hfi_msg_release_buffer_ref_event_packet {
+	u32 packet_buffer;
+	u32 extra_data_buffer;
+	u32 output_tag;
+};
+
+struct hfi_msg_sys_init_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 error_type;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+struct hfi_msg_sys_pc_prep_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 error_type;
+};
+
+struct hfi_msg_sys_release_resource_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 resource_handle;
+	u32 error_type;
+};
+
+struct hfi_msg_sys_session_init_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+struct hfi_msg_sys_session_end_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+};
+
+struct hfi_msg_session_get_sequence_header_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+	u32 header_len;
+	u32 sequence_header;
+};
+
+struct hfi_msg_sys_debug_packet {
+	u32 size;
+	u32 packet_type;
+	u32 msg_type;
+	u32 msg_size;
+	u32 time_stamp_hi;
+	u32 time_stamp_lo;
+	u8 rg_msg_data[1];
+};
+
+struct hfi_msg_sys_coverage_packet {
+	u32 size;
+	u32 packet_type;
+	u32 msg_size;
+	u32 time_stamp_hi;
+	u32 time_stamp_lo;
+	u8 rg_msg_data[1];
+};
+
+enum HFI_VENUS_QTBL_STATUS {
+	HFI_VENUS_QTBL_DISABLED = 0x00,
+	HFI_VENUS_QTBL_ENABLED = 0x01,
+	HFI_VENUS_QTBL_INITIALIZING = 0x02,
+	HFI_VENUS_QTBL_DEINITIALIZING = 0x03
+};
+
+enum HFI_VENUS_CTRL_INIT_STATUS {
+	HFI_VENUS_CTRL_NOT_INIT = 0x0,
+	HFI_VENUS_CTRL_READY = 0x1,
+	HFI_VENUS_CTRL_ERROR_FATAL = 0x2
+};
+
+struct hfi_sfr_struct {
+	u32 bufSize;
+	u8 rg_data[1];
+};
+
+struct hfi_cmd_sys_test_ssr_packet {
+	u32 size;
+	u32 packet_type;
+	u32 trigger_type;
+};
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/vidc_hfi_io.h linux-4.4.115-fbx/drivers/media/platform/msm/vidc/vidc_hfi_io.h
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/vidc_hfi_io.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/vidc_hfi_io.h	2019-01-22 16:16:24.471255172 +0100
@@ -0,0 +1,191 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __VIDC_HFI_IO_H__
+#define __VIDC_HFI_IO_H__
+
+#include <linux/io.h>
+
+#define VENUS_VCODEC_SS_CLOCK_HALT     0x0000000C
+#define VENUS_VPP_CORE_SW_RESET        0x00042004
+#define VENUS_VPP_CTRL_CTRL_RESET      0x00041008
+
+#define VIDC_VBIF_BASE_OFFS			0x00080000
+#define VIDC_VBIF_VERSION			(VIDC_VBIF_BASE_OFFS + 0x00)
+#define VIDC_VENUS_VBIF_DDR_OUT_MAX_BURST		\
+			(VIDC_VBIF_BASE_OFFS + 0xD8)
+#define VIDC_VENUS_VBIF_OCMEM_OUT_MAX_BURST		\
+			(VIDC_VBIF_BASE_OFFS + 0xDC)
+#define VIDC_VENUS_VBIF_ROUND_ROBIN_QOS_ARB		\
+			(VIDC_VBIF_BASE_OFFS + 0x124)
+
+#define VIDC_CPU_BASE_OFFS			0x000C0000
+#define VIDC_CPU_CS_BASE_OFFS		(VIDC_CPU_BASE_OFFS + 0x00012000)
+#define VIDC_CPU_IC_BASE_OFFS		(VIDC_CPU_BASE_OFFS + 0x0001F000)
+
+#define VIDC_CPU_CS_REMAP_OFFS		(VIDC_CPU_CS_BASE_OFFS + 0x00)
+#define VIDC_CPU_CS_TIMER_CONTROL	(VIDC_CPU_CS_BASE_OFFS + 0x04)
+#define VIDC_CPU_CS_A2HSOFTINTEN	(VIDC_CPU_CS_BASE_OFFS + 0x10)
+#define VIDC_CPU_CS_A2HSOFTINTENCLR	(VIDC_CPU_CS_BASE_OFFS + 0x14)
+#define VIDC_CPU_CS_A2HSOFTINT		(VIDC_CPU_CS_BASE_OFFS + 0x18)
+#define VIDC_CPU_CS_A2HSOFTINTCLR	(VIDC_CPU_CS_BASE_OFFS + 0x1C)
+#define VIDC_CPU_CS_SCIACMD			(VIDC_CPU_CS_BASE_OFFS + 0x48)
+
+/* HFI_CTRL_STATUS */
+#define VIDC_CPU_CS_SCIACMDARG0		(VIDC_CPU_CS_BASE_OFFS + 0x4C)
+#define VIDC_CPU_CS_SCIACMDARG0_BMSK	0xff
+#define VIDC_CPU_CS_SCIACMDARG0_SHFT	0x0
+#define VIDC_CPU_CS_SCIACMDARG0_HFI_CTRL_ERROR_STATUS_BMSK	0xfe
+#define VIDC_CPU_CS_SCIACMDARG0_HFI_CTRL_ERROR_STATUS_SHFT	0x1
+#define VIDC_CPU_CS_SCIACMDARG0_HFI_CTRL_INIT_STATUS_BMSK	0x1
+#define VIDC_CPU_CS_SCIACMDARG0_HFI_CTRL_INIT_STATUS_SHFT	0x0
+#define VIDC_CPU_CS_SCIACMDARG0_HFI_CTRL_PC_READY           0x100
+#define VIDC_CPU_CS_SCIACMDARG0_HFI_CTRL_INIT_IDLE_MSG_BMSK     0x40000000
+
+/* HFI_QTBL_INFO */
+#define VIDC_CPU_CS_SCIACMDARG1		(VIDC_CPU_CS_BASE_OFFS + 0x50)
+
+/* HFI_QTBL_ADDR */
+#define VIDC_CPU_CS_SCIACMDARG2		(VIDC_CPU_CS_BASE_OFFS + 0x54)
+
+/* HFI_VERSION_INFO */
+#define VIDC_CPU_CS_SCIACMDARG3		(VIDC_CPU_CS_BASE_OFFS + 0x58)
+#define VIDC_CPU_IC_IRQSTATUS		(VIDC_CPU_IC_BASE_OFFS + 0x00)
+#define VIDC_CPU_IC_FIQSTATUS		(VIDC_CPU_IC_BASE_OFFS + 0x04)
+#define VIDC_CPU_IC_RAWINTR			(VIDC_CPU_IC_BASE_OFFS + 0x08)
+#define VIDC_CPU_IC_INTSELECT		(VIDC_CPU_IC_BASE_OFFS + 0x0C)
+#define VIDC_CPU_IC_INTENABLE		(VIDC_CPU_IC_BASE_OFFS + 0x10)
+#define VIDC_CPU_IC_INTENACLEAR		(VIDC_CPU_IC_BASE_OFFS + 0x14)
+#define VIDC_CPU_IC_SOFTINT			(VIDC_CPU_IC_BASE_OFFS + 0x18)
+#define VIDC_CPU_IC_SOFTINT_H2A_BMSK	0x8000
+#define VIDC_CPU_IC_SOFTINT_H2A_SHFT	0xF
+#define VIDC_CPU_IC_SOFTINTCLEAR	(VIDC_CPU_IC_BASE_OFFS + 0x1C)
+
+/*---------------------------------------------------------------------------
+ * MODULE: vidc_wrapper
+ *--------------------------------------------------------------------------*/
+#define VIDC_WRAPPER_BASE_OFFS		0x000E0000
+
+#define VIDC_WRAPPER_HW_VERSION		(VIDC_WRAPPER_BASE_OFFS + 0x00)
+#define VIDC_WRAPPER_HW_VERSION_MAJOR_VERSION_MASK  0x78000000
+#define VIDC_WRAPPER_HW_VERSION_MAJOR_VERSION_SHIFT 28
+#define VIDC_WRAPPER_HW_VERSION_MINOR_VERSION_MASK  0xFFF0000
+#define VIDC_WRAPPER_HW_VERSION_MINOR_VERSION_SHIFT 16
+#define VIDC_WRAPPER_HW_VERSION_STEP_VERSION_MASK   0xFFFF
+#define VIDC_WRAPPER_CLOCK_CONFIG	(VIDC_WRAPPER_BASE_OFFS + 0x04)
+
+#define VIDC_WRAPPER_INTR_STATUS	(VIDC_WRAPPER_BASE_OFFS + 0x0C)
+#define VIDC_WRAPPER_INTR_STATUS_A2HWD_BMSK	0x10
+#define VIDC_WRAPPER_INTR_STATUS_A2HWD_SHFT	0x4
+#define VIDC_WRAPPER_INTR_STATUS_A2H_BMSK	0x4
+#define VIDC_WRAPPER_INTR_STATUS_A2H_SHFT	0x2
+
+#define VIDC_WRAPPER_INTR_MASK		(VIDC_WRAPPER_BASE_OFFS + 0x10)
+#define VIDC_WRAPPER_INTR_MASK_A2HWD_BMSK	0x10
+#define VIDC_WRAPPER_INTR_MASK_A2HWD_SHFT	0x4
+#define VIDC_WRAPPER_INTR_MASK_A2HVCODEC_BMSK	0x8
+#define VIDC_WRAPPER_INTR_MASK_A2HVCODEC_SHFT	0x3
+#define VIDC_WRAPPER_INTR_MASK_A2HCPU_BMSK	0x4
+#define VIDC_WRAPPER_INTR_MASK_A2HCPU_SHFT	0x2
+
+#define VIDC_WRAPPER_INTR_CLEAR		(VIDC_WRAPPER_BASE_OFFS + 0x14)
+#define VIDC_WRAPPER_INTR_CLEAR_A2HWD_BMSK	0x10
+#define VIDC_WRAPPER_INTR_CLEAR_A2HWD_SHFT	0x4
+#define VIDC_WRAPPER_INTR_CLEAR_A2H_BMSK	0x4
+#define VIDC_WRAPPER_INTR_CLEAR_A2H_SHFT	0x2
+
+#define VIDC_WRAPPER_VBIF_XIN_SW_RESET	(VIDC_WRAPPER_BASE_OFFS + 0x18)
+#define VIDC_WRAPPER_VBIF_XIN_STATUS	(VIDC_WRAPPER_BASE_OFFS + 0x1C)
+#define VIDC_WRAPPER_CPU_CLOCK_CONFIG	(VIDC_WRAPPER_BASE_OFFS + 0x2000)
+#define VIDC_WRAPPER_VBIF_XIN_CPU_SW_RESET	\
+				(VIDC_WRAPPER_BASE_OFFS + 0x2004)
+#define VIDC_WRAPPER_AXI_HALT		(VIDC_WRAPPER_BASE_OFFS + 0x2008)
+#define VIDC_WRAPPER_AXI_HALT_STATUS	(VIDC_WRAPPER_BASE_OFFS + 0x200C)
+#define VIDC_WRAPPER_CPU_CGC_DIS	(VIDC_WRAPPER_BASE_OFFS + 0x2010)
+#define VIDC_WRAPPER_CPU_STATUS (VIDC_WRAPPER_BASE_OFFS + 0x2014)
+#define VIDC_VENUS_VBIF_CLK_ON		(VIDC_VBIF_BASE_OFFS + 0x4)
+#define VIDC_VBIF_IN_RD_LIM_CONF0       (VIDC_VBIF_BASE_OFFS + 0xB0)
+#define VIDC_VBIF_IN_RD_LIM_CONF1       (VIDC_VBIF_BASE_OFFS + 0xB4)
+#define VIDC_VBIF_IN_RD_LIM_CONF2       (VIDC_VBIF_BASE_OFFS + 0xB8)
+#define VIDC_VBIF_IN_RD_LIM_CONF3       (VIDC_VBIF_BASE_OFFS + 0xBC)
+#define VIDC_VBIF_IN_WR_LIM_CONF0       (VIDC_VBIF_BASE_OFFS + 0xC0)
+#define VIDC_VBIF_IN_WR_LIM_CONF1       (VIDC_VBIF_BASE_OFFS + 0xC4)
+#define VIDC_VBIF_IN_WR_LIM_CONF2       (VIDC_VBIF_BASE_OFFS + 0xC8)
+#define VIDC_VBIF_IN_WR_LIM_CONF3       (VIDC_VBIF_BASE_OFFS + 0xCC)
+#define VIDC_VBIF_OUT_RD_LIM_CONF0      (VIDC_VBIF_BASE_OFFS + 0xD0)
+#define VIDC_VBIF_OUT_WR_LIM_CONF0      (VIDC_VBIF_BASE_OFFS + 0xD4)
+#define VIDC_VBIF_DDR_OUT_MAX_BURST     (VIDC_VBIF_BASE_OFFS + 0xD8)
+#define VIDC_VBIF_OCMEM_OUT_MAX_BURST   (VIDC_VBIF_BASE_OFFS + 0xDC)
+#define VIDC_VBIF_DDR_ARB_CONF0         (VIDC_VBIF_BASE_OFFS + 0xF4)
+#define VIDC_VBIF_DDR_ARB_CONF1         (VIDC_VBIF_BASE_OFFS + 0xF8)
+#define VIDC_VBIF_ROUND_ROBIN_QOS_ARB   (VIDC_VBIF_BASE_OFFS + 0x124)
+#define VIDC_VBIF_OUT_AXI_AOOO_EN       (VIDC_VBIF_BASE_OFFS + 0x178)
+#define VIDC_VBIF_OUT_AXI_AOOO          (VIDC_VBIF_BASE_OFFS + 0x17C)
+#define VIDC_VBIF_ARB_CTL               (VIDC_VBIF_BASE_OFFS + 0xF0)
+#define VIDC_VBIF_OUT_AXI_AMEMTYPE_CONF0 (VIDC_VBIF_BASE_OFFS + 0x160)
+#define VIDC_VBIF_OUT_AXI_AMEMTYPE_CONF1 (VIDC_VBIF_BASE_OFFS + 0x164)
+#define VIDC_VBIF_ADDR_TRANS_EN         (VIDC_VBIF_BASE_OFFS + 0xC00)
+#define VIDC_VBIF_AT_OLD_BASE           (VIDC_VBIF_BASE_OFFS + 0xC04)
+#define VIDC_VBIF_AT_OLD_HIGH           (VIDC_VBIF_BASE_OFFS + 0xC08)
+#define VIDC_VBIF_AT_NEW_BASE           (VIDC_VBIF_BASE_OFFS + 0xC10)
+#define VIDC_VBIF_AT_NEW_HIGH           (VIDC_VBIF_BASE_OFFS + 0xC18)
+#define VENUS_VBIF_AXI_HALT_CTRL0   (VIDC_VBIF_BASE_OFFS + 0x208)
+#define VENUS_VBIF_AXI_HALT_CTRL1   (VIDC_VBIF_BASE_OFFS + 0x20C)
+
+#define VENUS_VBIF_AXI_HALT_CTRL0_HALT_REQ		BIT(0)
+#define VENUS_VBIF_AXI_HALT_CTRL1_HALT_ACK		BIT(0)
+#define VENUS_VBIF_AXI_HALT_ACK_TIMEOUT_US		500000
+
+#define VIDC_VENUS0_WRAPPER_VBIF_REQ_PRIORITY \
+	(VIDC_WRAPPER_BASE_OFFS + 0x20)
+#define VIDC_VENUS0_WRAPPER_VBIF_PRIORITY_LEVEL \
+	(VIDC_WRAPPER_BASE_OFFS + 0x24)
+
+#define VIDC_CTRL_INIT 0x000D2048
+#define VIDC_CTRL_INIT_RESERVED_BITS31_1__M 0xFFFFFFFE
+#define VIDC_CTRL_INIT_RESERVED_BITS31_1__S 1
+#define VIDC_CTRL_INIT_CTRL__M 0x00000001
+#define VIDC_CTRL_INIT_CTRL__S 0
+
+#define VIDC_CTRL_STATUS 0x000D204C
+#define VIDC_CTRL_STATUS_RESERVED_BITS31_8__M 0xFFFFFF00
+#define VIDC_CTRL_STATUS_RESERVED_BITS31_8__S 8
+#define VIDC_CTRL_ERROR_STATUS__M             0x000000FE
+#define VIDC_CTRL_ERROR_STATUS__S             1
+#define VIDC_CTRL_INIT_STATUS__M              0x00000001
+#define VIDC_CTRL_INIT_STATUS__S              0
+
+#define VIDC_QTBL_INFO 0x000D2050
+#define VIDC_QTBL_HOSTID__M 0xFF000000
+#define VIDC_QTBL_HOSTID__S 24
+#define VIDC_QTBL_INFO_RESERVED_BITS23_8__M 0x00FFFF00
+#define VIDC_QTBL_INFO_RESERVED_BITS23_8__S 8
+#define VIDC_QTBL_STATUS__M 0x000000FF
+#define VIDC_QTBL_STATUS__S 0
+
+#define VIDC_QTBL_ADDR 0x000D2054
+
+#define VIDC_VERSION_INFO 0x000D2058
+#define VIDC_VERSION_INFO_MAJOR__M  0xF0000000
+#define VIDC_VERSION_INFO_MAJOR__S  28
+#define VIDC_VERSION_INFO_MINOR__M  0x0FFFFFE0
+#define VIDC_VERSION_INFO_MINOR__S  5
+#define VIDC_VERSION_INFO_BRANCH__M 0x0000001F
+#define VIDC_VERSION_INFO_BRANCH__S 0
+
+#define VIDC_SFR_ADDR 0x000D205C
+#define VIDC_MMAP_ADDR 0x000D2060
+#define VIDC_UC_REGION_ADDR 0x000D2064
+#define VIDC_UC_REGION_SIZE 0x000D2068
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/vmem/Kconfig linux-4.4.115-fbx/drivers/media/platform/msm/vidc/vmem/Kconfig
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/vmem/Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/vmem/Kconfig	2019-01-22 16:16:24.471255172 +0100
@@ -0,0 +1,3 @@
+menuconfig MSM_VIDC_VMEM
+	tristate "Qualcomm Technologies Inc MSM VMEM driver"
+	depends on ARCH_QCOM && MSM_VIDC_V4L2
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/vmem/Makefile linux-4.4.115-fbx/drivers/media/platform/msm/vidc/vmem/Makefile
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/vmem/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/vmem/Makefile	2019-01-22 16:16:24.471255172 +0100
@@ -0,0 +1,7 @@
+ccflags-y += -I$(srctree)/drivers/media/platform/msm/vidc/
+ccflags-y += -I$(srctree)/drivers/media/platform/msm/vidc/vmem/
+
+msm-vidc-vmem-objs := vmem.o \
+                      vmem_debugfs.o
+
+obj-$(CONFIG_MSM_VIDC_VMEM) := msm-vidc-vmem.o
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/vmem/vmem.c linux-4.4.115-fbx/drivers/media/platform/msm/vidc/vmem/vmem.c
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/vmem/vmem.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/vmem/vmem.c	2019-01-22 16:16:24.471255172 +0100
@@ -0,0 +1,739 @@
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msm-bus.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include "vmem.h"
+#include "vmem_debugfs.h"
+
+/* Registers */
+#define OCIMEM_BASE(v)               ((uint8_t *)(v)->reg.base)
+#define OCIMEM_HW_VERSION(v)         (OCIMEM_BASE(v) + 0x00)
+#define OCIMEM_HW_PROFILE(v)         (OCIMEM_BASE(v) + 0x04)
+#define OCIMEM_GEN_CTL(v)            (OCIMEM_BASE(v) + 0x08)
+#define OCIMEM_GEN_STAT(v)           (OCIMEM_BASE(v) + 0x0C)
+#define OCIMEM_INTC_CLR(v)           (OCIMEM_BASE(v) + 0x10)
+#define OCIMEM_INTC_MASK(v)          (OCIMEM_BASE(v) + 0x14)
+#define OCIMEM_INTC_STAT(v)          (OCIMEM_BASE(v) + 0x18)
+#define OCIMEM_OSW_STATUS(v)         (OCIMEM_BASE(v) + 0x1C)
+#define OCIMEM_PSCGC_TIMERS(v)       (OCIMEM_BASE(v) + 0x34)
+#define OCIMEM_PSCGC_STAT(v)         (OCIMEM_BASE(v) + 0x38)
+#define OCIMEM_PSCGC_M0_M7_CTL(v)    (OCIMEM_BASE(v) + 0x3C)
+#define OCIMEM_ERR_ADDRESS(v)        (OCIMEM_BASE(v) + 0x60)
+#define OCIMEM_AXI_ERR_SYNDROME(v)   (OCIMEM_BASE(v) + 0x64)
+#define OCIMEM_DEBUG_CTL(v)          (OCIMEM_BASE(v) + 0x68)
+
+/*
+ * Helper macro to help out with masks and shifts for values packed into
+ * registers.
+ */
+#define DECLARE_TYPE(__type, __end, __start)                                   \
+	static const unsigned int __type##_BITS = (__end) - (__start) + 1;     \
+	static const unsigned int __type##_SHIFT = (__start);                  \
+	static const unsigned int __type##_MASK = GENMASK((__end), (__start)); \
+	static inline unsigned int __type(uint32_t val)                        \
+	{                                                                      \
+		return (val & __type##_MASK) >> __type##_SHIFT;                \
+	}                                                                      \
+	static inline uint32_t __type##_UPDATE(unsigned int val)               \
+	{                                                                      \
+		return (val << __type##_SHIFT) & __type##_MASK;                \
+	}
+
+/* Register masks */
+/* OCIMEM_PSCGC_M0_M7_CTL */
+DECLARE_TYPE(BANK0_STATE, 3, 0);
+DECLARE_TYPE(BANK1_STATE, 7, 4);
+DECLARE_TYPE(BANK2_STATE, 11, 8);
+DECLARE_TYPE(BANK3_STATE, 15, 12);
+/* OCIMEM_PSCGC_TIMERS */
+DECLARE_TYPE(TIMERS_WAKEUP, 3, 0);
+DECLARE_TYPE(TIMERS_SLEEP, 11, 8);
+/* OCIMEM_HW_VERSION */
+DECLARE_TYPE(VERSION_STEP, 15, 0);
+DECLARE_TYPE(VERSION_MINOR, 27, 16);
+DECLARE_TYPE(VERSION_MAJOR, 31, 28);
+/* OCIMEM_HW_PROFILE */
+DECLARE_TYPE(PROFILE_BANKS, 16, 12);
+/* OCIMEM_AXI_ERR_SYNDROME */
+DECLARE_TYPE(ERR_SYN_ATID, 14, 8);
+DECLARE_TYPE(ERR_SYN_AMID, 23, 16);
+DECLARE_TYPE(ERR_SYN_APID, 28, 24);
+DECLARE_TYPE(ERR_SYN_ABID, 31, 29);
+/* OCIMEM_INTC_MASK */
+DECLARE_TYPE(AXI_ERR_INT, 0, 0);
+
+/* Internal stuff */
+#define MAX_BANKS 4
+
+enum bank_state {
+	BANK_STATE_NORM_PASSTHRU = 0b000,
+	BANK_STATE_NORM_FORCE_CORE_ON = 0b010,
+	BANK_STATE_NORM_FORCE_PERIPH_ON = 0b001,
+	BANK_STATE_NORM_FORCE_ALL_ON = 0b011,
+	BANK_STATE_SLEEP_RET = 0b110,
+	BANK_STATE_SLEEP_RET_PERIPH_ON = 0b111,
+	BANK_STATE_SLEEP_NO_RET = 0b100,
+};
+
+struct vmem {
+	int irq;
+	int num_banks;
+	int bank_size;
+	struct {
+		struct resource *resource;
+		void __iomem *base;
+	} reg, mem;
+	struct regulator *vdd;
+	struct {
+		const char *name;
+		struct clk *clk;
+		bool has_mem_retention;
+	} *clocks;
+	int num_clocks;
+	struct {
+		struct msm_bus_scale_pdata *pdata;
+		uint32_t priv;
+	} bus;
+	atomic_t alloc_count;
+	struct dentry *debugfs_root;
+};
+
+static struct vmem *vmem;
+
+static inline u32 __readl(void * __iomem addr)
+{
+	u32 value = 0;
+
+	pr_debug("read %pK ", addr);
+	value = readl_relaxed(addr);
+	pr_debug("-> %08x\n", value);
+
+	return value;
+}
+
+static inline void __writel(u32 val, void * __iomem addr)
+{
+	pr_debug("write %08x -> %pK\n", val, addr);
+	writel_relaxed(val, addr);
+	/*
+	 * Commit all writes via a mem barrier, as subsequent __readl()
+	 * will depend on the state that's set via __writel().
+	 */
+	mb();
+}
+
+static inline void __wait_timer(struct vmem *v, bool wakeup)
+{
+	uint32_t ticks = 0;
+	unsigned int (*timer)(uint32_t) = wakeup ?
+		TIMERS_WAKEUP : TIMERS_SLEEP;
+
+	ticks = timer(__readl(OCIMEM_PSCGC_TIMERS(v)));
+
+	/* Sleep for `ticks` nanoseconds as per h/w spec */
+	ndelay(ticks);
+}
+
+static inline void __wait_wakeup(struct vmem *v)
+{
+	return __wait_timer(v, true);
+}
+
+static inline void __wait_sleep(struct vmem *v)
+{
+	return __wait_timer(v, false);
+}
+
+static inline int __power_on(struct vmem *v)
+{
+	int rc = 0, c = 0;
+
+	rc = msm_bus_scale_client_update_request(v->bus.priv, 1);
+	if (rc) {
+		pr_err("Failed to vote for buses (%d)\n", rc);
+		goto exit;
+	}
+	pr_debug("Voted for buses\n");
+
+	rc = regulator_enable(v->vdd);
+	if (rc) {
+		pr_err("Failed to power on gdsc (%d)", rc);
+		goto unvote_bus;
+	}
+	pr_debug("Enabled regulator vdd\n");
+
+	for (c = 0; c < v->num_clocks; ++c) {
+		if (v->clocks[c].has_mem_retention) {
+			rc = clk_set_flags(v->clocks[c].clk,
+				       CLKFLAG_NORETAIN_PERIPH);
+			if (rc) {
+				pr_warn("Failed set flag NORETAIN_PERIPH %s\n",
+					v->clocks[c].name);
+			}
+			rc = clk_set_flags(v->clocks[c].clk,
+				       CLKFLAG_NORETAIN_MEM);
+			if (rc) {
+				pr_warn("Failed set flag NORETAIN_MEM %s\n",
+					v->clocks[c].name);
+			}
+		}
+
+		rc = clk_prepare_enable(v->clocks[c].clk);
+		if (rc) {
+			pr_err("Failed to enable %s clock (%d)\n",
+					v->clocks[c].name, rc);
+			goto disable_clocks;
+		}
+
+		pr_debug("Enabled clock %s\n", v->clocks[c].name);
+	}
+
+	return 0;
+disable_clocks:
+	for (--c; c >= 0; c--)
+		clk_disable_unprepare(v->clocks[c].clk);
+	regulator_disable(v->vdd);
+unvote_bus:
+	msm_bus_scale_client_update_request(v->bus.priv, 0);
+exit:
+	return rc;
+}
+
+static inline int __power_off(struct vmem *v)
+{
+	int c = v->num_clocks;
+
+	for (c--; c >= 0; --c) {
+		clk_disable_unprepare(v->clocks[c].clk);
+		pr_debug("Disabled clock %s\n", v->clocks[c].name);
+	}
+
+	regulator_disable(v->vdd);
+	pr_debug("Disabled regulator vdd\n");
+
+	msm_bus_scale_client_update_request(v->bus.priv, 0);
+	pr_debug("Unvoted for buses\n");
+
+	return 0;
+}
+
+static inline enum bank_state __bank_get_state(struct vmem *v,
+		unsigned int bank)
+{
+	unsigned int (*func[MAX_BANKS])(uint32_t) = {
+		BANK0_STATE, BANK1_STATE, BANK2_STATE, BANK3_STATE
+	};
+
+	BUG_ON(bank >= ARRAY_SIZE(func));
+	return func[bank](__readl(OCIMEM_PSCGC_M0_M7_CTL(v)));
+}
+
+static inline void __bank_set_state(struct vmem *v, unsigned int bank,
+		enum bank_state state)
+{
+	uint32_t bank_state = 0;
+	struct {
+		uint32_t (*update)(unsigned int);
+		uint32_t mask;
+	} banks[MAX_BANKS] = {
+		{BANK0_STATE_UPDATE, BANK0_STATE_MASK},
+		{BANK1_STATE_UPDATE, BANK1_STATE_MASK},
+		{BANK2_STATE_UPDATE, BANK2_STATE_MASK},
+		{BANK3_STATE_UPDATE, BANK3_STATE_MASK},
+	};
+
+	BUG_ON(bank >= ARRAY_SIZE(banks));
+
+	bank_state = __readl(OCIMEM_PSCGC_M0_M7_CTL(v));
+	bank_state &= ~banks[bank].mask;
+	bank_state |= banks[bank].update(state);
+
+	__writel(bank_state, OCIMEM_PSCGC_M0_M7_CTL(v));
+}
+
+static inline void __toggle_interrupts(struct vmem *v, bool enable)
+{
+	uint32_t ints = __readl(OCIMEM_INTC_MASK(v)),
+		mask = AXI_ERR_INT_MASK,
+		update = AXI_ERR_INT_UPDATE(!enable);
+
+	ints &= ~mask;
+	ints |= update;
+
+	__writel(ints, OCIMEM_INTC_MASK(v));
+}
+
+static void __enable_interrupts(struct vmem *v)
+{
+	pr_debug("Enabling interrupts\n");
+	enable_irq(v->irq);
+	__toggle_interrupts(v, true);
+}
+
+static void __disable_interrupts(struct vmem *v)
+{
+	pr_debug("Disabling interrupts\n");
+	__toggle_interrupts(v, false);
+	disable_irq_nosync(v->irq);
+}
+
+/**
+ * vmem_allocate: - Allocates memory from VMEM.  Allocations have a few
+ * restrictions: only allocations of the entire VMEM memory are allowed, and
+ * , as a result, only single outstanding allocations are allowed.
+ *
+ * @size: amount of bytes to allocate
+ * @addr: A pointer to phys_addr_t where the physical address of the memory
+ * allocated is stored.
+ *
+ * Return: 0 in case of successful allocation (i.e. *addr != NULL). -ENOTSUPP,
+ * if platform doesn't support VMEM. -EEXIST, if there are outstanding VMEM
+ * allocations.  -ENOMEM, if platform can't support allocation of `size` bytes.
+ * -EAGAIN, if `size` does not allocate the entire VMEM region.  -EIO in case of
+ * internal errors.
+ */
+int vmem_allocate(size_t size, phys_addr_t *addr)
+{
+	int rc = 0, c = 0;
+	resource_size_t max_size = 0;
+
+	if (!vmem) {
+		pr_err("No vmem, try rebooting your device\n");
+		rc = -ENOTSUPP;
+		goto exit;
+	}
+	if (!size) {
+		pr_err("%s Invalid size %zu\n", __func__, size);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	max_size = resource_size(vmem->mem.resource);
+
+	if (atomic_read(&vmem->alloc_count)) {
+		pr_err("Only single allocations allowed for vmem\n");
+		rc = -EEXIST;
+		goto exit;
+	} else if (size > max_size) {
+		pr_err("Out of memory, have max %pa\n", &max_size);
+		rc = -ENOMEM;
+		goto exit;
+	} else if (size != max_size) {
+		pr_err("Only support allocations of size %pa\n", &max_size);
+		rc = -EAGAIN;
+		goto exit;
+	}
+
+	rc = __power_on(vmem);
+	if (rc) {
+		pr_err("Failed power on (%d)\n", rc);
+		goto exit;
+	}
+
+	BUG_ON(vmem->num_banks != DIV_ROUND_UP(size, vmem->bank_size));
+
+	/* Turn on the necessary banks */
+	for (c = 0; c < vmem->num_banks; ++c) {
+		__bank_set_state(vmem, c, BANK_STATE_NORM_FORCE_CORE_ON);
+		__wait_wakeup(vmem);
+	}
+
+	/* Enable interrupts to detect faults */
+	__enable_interrupts(vmem);
+
+	atomic_inc(&vmem->alloc_count);
+	*addr = (phys_addr_t)vmem->mem.resource->start;
+	return 0;
+exit:
+	return rc;
+}
+EXPORT_SYMBOL(vmem_allocate);
+
+/**
+ * vmem_free: - Frees the memory allocated via vmem_allocate.  Undefined
+ * behaviour if to_free is a not a pointer returned via vmem_allocate
+ */
+void vmem_free(phys_addr_t to_free)
+{
+	int c = 0;
+	if (!to_free || !vmem)
+		return;
+
+	BUG_ON(atomic_read(&vmem->alloc_count) == 0);
+
+	for (c = 0; c < vmem->num_banks; ++c) {
+		enum bank_state curr_state = __bank_get_state(vmem, c);
+
+		if (curr_state != BANK_STATE_NORM_FORCE_CORE_ON) {
+			pr_warn("When freeing, expected bank state to be %d, was instead %d\n",
+					BANK_STATE_NORM_FORCE_CORE_ON,
+					curr_state);
+		}
+
+		__bank_set_state(vmem, c, BANK_STATE_SLEEP_NO_RET);
+	}
+
+	__disable_interrupts(vmem);
+	__power_off(vmem);
+	atomic_dec(&vmem->alloc_count);
+}
+EXPORT_SYMBOL(vmem_free);
+
+struct vmem_interrupt_cookie {
+	struct vmem *vmem;
+	struct work_struct work;
+};
+
+static void __irq_helper(struct work_struct *work)
+{
+	struct vmem_interrupt_cookie *cookie = container_of(work,
+			struct vmem_interrupt_cookie, work);
+	struct vmem *v = cookie->vmem;
+	unsigned int stat, gen_stat, pscgc_stat, err_addr_abs,
+		err_addr_rel, err_syn;
+
+	stat = __readl(OCIMEM_INTC_STAT(v));
+	gen_stat = __readl(OCIMEM_GEN_CTL(v));
+	pscgc_stat = __readl(OCIMEM_PSCGC_STAT(v));
+
+	err_addr_abs = __readl(OCIMEM_ERR_ADDRESS(v));
+	err_addr_rel = v->mem.resource->start - err_addr_abs;
+
+	err_syn = __readl(OCIMEM_AXI_ERR_SYNDROME(v));
+
+	pr_crit("Detected a fault on VMEM:\n");
+	pr_cont("\tinterrupt status: %x\n", stat);
+	pr_cont("\tgeneral status: %x\n", gen_stat);
+	pr_cont("\tmemory status: %x\n", pscgc_stat);
+	pr_cont("\tfault address: %x (absolute), %x (relative)\n",
+			err_addr_abs, err_addr_rel);
+	pr_cont("\tfault bank: %x\n", err_addr_rel / v->bank_size);
+	pr_cont("\tfault core: %u (mid), %u (pid), %u (bid)\n",
+			ERR_SYN_AMID(err_syn), ERR_SYN_APID(err_syn),
+			ERR_SYN_ABID(err_syn));
+
+	/* Clear the interrupt */
+	__writel(0, OCIMEM_INTC_CLR(v));
+
+	__enable_interrupts(v);
+}
+
+static struct vmem_interrupt_cookie interrupt_cookie;
+
+static irqreturn_t __irq_handler(int irq, void *cookie)
+{
+	struct vmem *v = cookie;
+	irqreturn_t status = __readl(OCIMEM_INTC_STAT(vmem)) ?
+		IRQ_HANDLED : IRQ_NONE;
+
+	if (status != IRQ_NONE) {
+		/* Mask further interrupts while handling this one */
+		__disable_interrupts(v);
+
+		interrupt_cookie.vmem = v;
+		INIT_WORK(&interrupt_cookie.work, __irq_helper);
+		schedule_work(&interrupt_cookie.work);
+	}
+
+	return status;
+}
+
+static inline int __init_resources(struct vmem *v,
+		struct platform_device *pdev)
+{
+	int rc = 0, c = 0;
+	int *clock_props = NULL;
+
+	v->irq = platform_get_irq(pdev, 0);
+	if (v->irq < 0) {
+		rc = v->irq;
+		pr_err("Failed to get irq (%d)\n", rc);
+		v->irq = 0;
+		goto exit;
+	}
+
+	/* Registers and memory */
+	v->reg.resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+			"reg-base");
+	if (!v->reg.resource) {
+		pr_err("Failed to find register base\n");
+		rc = -ENOENT;
+		goto exit;
+	}
+
+	v->reg.base = devm_ioremap_resource(&pdev->dev, v->reg.resource);
+	if (IS_ERR_OR_NULL(v->reg.base)) {
+		rc = PTR_ERR(v->reg.base) ?: -EIO;
+		pr_err("Failed to map register base into kernel (%d)\n", rc);
+		v->reg.base = NULL;
+		goto exit;
+	}
+
+	pr_debug("Register range: %pa -> %pa\n", &v->reg.resource->start,
+			&v->reg.resource->end);
+
+	v->mem.resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+			"mem-base");
+	if (!v->mem.resource) {
+		pr_err("Failed to find memory base\n");
+		rc = -ENOENT;
+		goto exit;
+	}
+
+	v->mem.base = NULL;
+	pr_debug("Memory range: %pa -> %pa\n", &v->mem.resource->start,
+			&v->mem.resource->end);
+
+	/* Buses, Clocks & Regulators*/
+	v->num_clocks = of_property_count_strings(pdev->dev.of_node,
+			"clock-names");
+	if (v->num_clocks <= 0) {
+		pr_err("Can't find any clocks\n");
+		goto exit;
+	}
+
+	v->clocks = devm_kzalloc(&pdev->dev, sizeof(*v->clocks) * v->num_clocks,
+			GFP_KERNEL);
+	if (!v->clocks) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	clock_props = devm_kzalloc(&pdev->dev,
+					v->num_clocks * sizeof(*clock_props),
+					GFP_KERNEL);
+	if (!clock_props) {
+		pr_err("Failed to allocate clock config table\n");
+		goto exit;
+	}
+
+	rc = of_property_read_u32_array(pdev->dev.of_node, "clock-config",
+			clock_props, v->num_clocks);
+	if (rc) {
+		pr_err("Failed to read clock config\n");
+		goto exit;
+	}
+
+	for (c = 0; c < v->num_clocks; ++c) {
+		const char *name = NULL;
+		struct clk *temp = NULL;
+
+		of_property_read_string_index(pdev->dev.of_node, "clock-names",
+				c, &name);
+		temp = devm_clk_get(&pdev->dev, name);
+		if (IS_ERR_OR_NULL(temp)) {
+			rc = PTR_ERR(temp) ?: -ENOENT;
+			pr_err("Failed to find %s (%d)\n", name, rc);
+			goto exit;
+		}
+
+		v->clocks[c].clk = temp;
+		v->clocks[c].name = name;
+		v->clocks[c].has_mem_retention = clock_props[c];
+	}
+
+	v->vdd = devm_regulator_get(&pdev->dev, "vdd");
+	if (IS_ERR_OR_NULL(v->vdd)) {
+		rc = PTR_ERR(v->vdd) ?: -ENOENT;
+		pr_err("Failed to find regulator (vdd) (%d)\n", rc);
+		goto exit;
+	}
+
+	v->bus.pdata = msm_bus_cl_get_pdata(pdev);
+	if (IS_ERR_OR_NULL(v->bus.pdata)) {
+		rc = PTR_ERR(v->bus.pdata) ?: -ENOENT;
+		pr_err("Failed to find bus vectors (%d)\n", rc);
+		goto exit;
+	}
+
+	v->bus.priv = msm_bus_scale_register_client(v->bus.pdata);
+	if (!v->bus.priv) {
+		rc = -EBADHANDLE;
+		pr_err("Failed to register bus client\n");
+		goto free_pdata;
+	}
+
+	/* Misc. */
+	rc = of_property_read_u32(pdev->dev.of_node, "qcom,bank-size",
+			&v->bank_size);
+	if (rc || !v->bank_size) {
+		pr_err("Failed reading (or found invalid) qcom,bank-size in %s (%d)\n",
+				of_node_full_name(pdev->dev.of_node), rc);
+		rc = -ENOENT;
+		goto free_pdata;
+	}
+
+	v->num_banks = resource_size(v->mem.resource) / v->bank_size;
+
+	pr_debug("Found configuration with %d banks with size %d\n",
+			v->num_banks, v->bank_size);
+
+	return 0;
+free_pdata:
+	msm_bus_cl_clear_pdata(v->bus.pdata);
+exit:
+	return rc;
+}
+
+static inline void __uninit_resources(struct vmem *v,
+		struct platform_device *pdev)
+{
+	int c = 0;
+
+	msm_bus_cl_clear_pdata(v->bus.pdata);
+	v->bus.pdata = NULL;
+	v->bus.priv = 0;
+
+	for (c = 0; c < v->num_clocks; ++c) {
+		v->clocks[c].clk = NULL;
+		v->clocks[c].name = NULL;
+	}
+
+	v->vdd = NULL;
+}
+
+static int vmem_probe(struct platform_device *pdev)
+{
+	uint32_t version = 0, num_banks = 0, rc = 0;
+	struct vmem *v = NULL;
+
+	if (vmem) {
+		pr_err("Only one instance of %s allowed", pdev->name);
+		return -EEXIST;
+	}
+
+	v = devm_kzalloc(&pdev->dev, sizeof(*v), GFP_KERNEL);
+	if (!v) {
+		pr_err("Failed allocate context memory in probe\n");
+		return -ENOMEM;
+	}
+
+
+	rc = __init_resources(v, pdev);
+	if (rc) {
+		pr_err("Failed to read resources\n");
+		goto exit;
+	}
+
+	/*
+	 * For now, only support up to 4 banks. It's unrealistic that VMEM has
+	 * more banks than that (even in the future).
+	 */
+	if (v->num_banks > MAX_BANKS) {
+		pr_err("Number of banks (%d) exceeds what's supported (%d)\n",
+			v->num_banks, MAX_BANKS);
+		rc = -ENOTSUPP;
+		goto exit;
+	}
+
+	/* Cross check the platform resources with what's available on chip */
+	rc = __power_on(v);
+	if (rc) {
+		pr_err("Failed to power on (%d)\n", rc);
+		goto exit;
+	}
+
+	version = __readl(OCIMEM_HW_VERSION(v));
+	pr_debug("v%d.%d.%d\n", VERSION_MAJOR(version), VERSION_MINOR(version),
+			VERSION_STEP(version));
+
+	num_banks = PROFILE_BANKS(__readl(OCIMEM_HW_PROFILE(v)));
+	pr_debug("Found %d banks on chip\n", num_banks);
+	if (v->num_banks != num_banks) {
+		pr_err("Platform configuration of %d banks differs from what's available on chip (%d)\n",
+				v->num_banks, num_banks);
+		rc = -EINVAL;
+		goto disable_clocks;
+	}
+
+	rc = devm_request_irq(&pdev->dev, v->irq, __irq_handler,
+			IRQF_TRIGGER_HIGH, "vmem", v);
+	if (rc) {
+		pr_err("Failed to setup irq (%d)\n", rc);
+		goto disable_clocks;
+	}
+
+	__disable_interrupts(v);
+
+	/* Everything good so far, set up the global context and debug hooks */
+	pr_info("Up and running with %d banks of memory from %pR\n",
+			v->num_banks, &v->mem.resource);
+	v->debugfs_root = vmem_debugfs_init(pdev);
+	platform_set_drvdata(pdev, v);
+	vmem = v;
+
+disable_clocks:
+	__power_off(v);
+exit:
+	return rc;
+}
+
+static int vmem_remove(struct platform_device *pdev)
+{
+	struct vmem *v = platform_get_drvdata(pdev);
+
+	BUG_ON(v != vmem);
+
+	__uninit_resources(v, pdev);
+	vmem_debugfs_deinit(v->debugfs_root);
+	vmem = NULL;
+
+	return 0;
+}
+
+static const struct of_device_id vmem_of_match[] = {
+	{.compatible = "qcom,msm-vmem"},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, vmem_of_match);
+
+static struct platform_driver vmem_driver = {
+	.probe = vmem_probe,
+	.remove = vmem_remove,
+	.driver = {
+		.name = "msm_vidc_vmem",
+		.owner = THIS_MODULE,
+		.of_match_table = vmem_of_match,
+	},
+};
+
+static int __init vmem_init(void)
+{
+	return platform_driver_register(&vmem_driver);
+}
+
+static void __exit vmem_exit(void)
+{
+	platform_driver_unregister(&vmem_driver);
+}
+
+module_init(vmem_init);
+module_exit(vmem_exit);
+
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/vmem/vmem_debugfs.c linux-4.4.115-fbx/drivers/media/platform/msm/vidc/vmem/vmem_debugfs.c
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/vmem/vmem_debugfs.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/vmem/vmem_debugfs.c	2019-01-22 16:16:24.471255172 +0100
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include "vmem.h"
+
+struct vmem_debugfs_cookie {
+	phys_addr_t addr;
+	size_t size;
+};
+
+static int __vmem_alloc_get(void *priv, u64 *val)
+{
+	struct vmem_debugfs_cookie *cookie = priv;
+
+	*val = cookie->size;
+	return 0;
+}
+
+static int __vmem_alloc_set(void *priv, u64 val)
+{
+	struct vmem_debugfs_cookie *cookie = priv;
+	int rc = 0;
+
+	switch (val) {
+	case 0: /* free */
+		vmem_free(cookie->addr);
+		cookie->size = 0;
+		break;
+	default:
+		rc = vmem_allocate(val, &cookie->addr);
+		cookie->size = val;
+		break;
+	}
+
+	return rc;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_vmem_alloc, __vmem_alloc_get,
+		__vmem_alloc_set, "%llu");
+
+struct dentry *vmem_debugfs_init(struct platform_device *pdev)
+{
+	struct vmem_debugfs_cookie *alloc_cookie = NULL;
+	struct dentry *debugfs_root = NULL;
+
+	alloc_cookie = devm_kzalloc(&pdev->dev, sizeof(*alloc_cookie),
+			GFP_KERNEL);
+	if (!alloc_cookie)
+		goto exit;
+
+	debugfs_root = debugfs_create_dir("vmem", NULL);
+	if (IS_ERR_OR_NULL(debugfs_root)) {
+		pr_warn("Failed to create '<debugfs>/vmem'\n");
+		debugfs_root = NULL;
+		goto exit;
+	}
+
+	debugfs_create_file("alloc", S_IRUSR | S_IWUSR, debugfs_root,
+			alloc_cookie, &fops_vmem_alloc);
+
+exit:
+	return debugfs_root;
+}
+
+void vmem_debugfs_deinit(struct dentry *debugfs_root)
+{
+	debugfs_remove_recursive(debugfs_root);
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/vmem/vmem_debugfs.h linux-4.4.115-fbx/drivers/media/platform/msm/vidc/vmem/vmem_debugfs.h
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/vmem/vmem_debugfs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/vmem/vmem_debugfs.h	2019-01-22 16:16:24.471255172 +0100
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __VMEM_DEBUGFS_H__
+#define __VMEM_DEBUGFS_H__
+
+#include <linux/debugfs.h>
+
+struct dentry *vmem_debugfs_init(struct platform_device *pdev);
+void vmem_debugfs_deinit(struct dentry *debugfs_root);
+
+#endif /* __VMEM_DEBUGFS_H__ */
diff -Nruw linux-4.4.115-fbx/drivers/media/platform/msm./vidc/vmem/vmem.h linux-4.4.115-fbx/drivers/media/platform/msm/vidc/vmem/vmem.h
--- linux-4.4.115-fbx/drivers/media/platform/msm./vidc/vmem/vmem.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/media/platform/msm/vidc/vmem/vmem.h	2019-01-22 16:16:24.471255172 +0100
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __VMEM_H__
+#define __VMEM_H__
+
+#if (defined CONFIG_MSM_VIDC_VMEM) || (defined CONFIG_MSM_VIDC_VMEM_MODULE)
+
+int vmem_allocate(size_t size, phys_addr_t *addr);
+void vmem_free(phys_addr_t to_free);
+
+#else
+
+static inline int vmem_allocate(size_t size, phys_addr_t *addr)
+{
+	return -ENODEV;
+}
+
+static inline void vmem_free(phys_addr_t to_free)
+{
+}
+
+#endif
+
+#endif /* __VMEM_H__ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/media/rc/keymaps/rc-cec.c	2019-10-29 09:26:23.977206447 +0100
@@ -0,0 +1,182 @@
+/* Keytable for the CEC remote control
+ *
+ * Copyright (c) 2015 by Kamil Debski
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+/*
+ * CEC Spec "High-Definition Multimedia Interface Specification" can be obtained
+ * here: http://xtreamerdev.googlecode.com/files/CEC_Specs.pdf
+ * The list of control codes is listed in Table 27: User Control Codes p. 95
+ */
+
+static struct rc_map_table cec[] = {
+	{ 0x00, KEY_OK },
+	{ 0x01, KEY_UP },
+	{ 0x02, KEY_DOWN },
+	{ 0x03, KEY_LEFT },
+	{ 0x04, KEY_RIGHT },
+	{ 0x05, KEY_RIGHT_UP },
+	{ 0x06, KEY_RIGHT_DOWN },
+	{ 0x07, KEY_LEFT_UP },
+	{ 0x08, KEY_LEFT_DOWN },
+	{ 0x09, KEY_ROOT_MENU }, /* CEC Spec: Device Root Menu - see Note 2 */
+	/*
+	 * Note 2: This is the initial display that a device shows. It is
+	 * device-dependent and can be, for example, a contents menu, setup
+	 * menu, favorite menu or other menu. The actual menu displayed
+	 * may also depend on the device's current state.
+	 */
+	{ 0x0a, KEY_SETUP },
+	{ 0x0b, KEY_MENU }, /* CEC Spec: Contents Menu */
+	{ 0x0c, KEY_FAVORITES }, /* CEC Spec: Favorite Menu */
+	{ 0x0d, KEY_EXIT },
+	/* 0x0e-0x0f: Reserved */
+	{ 0x10, KEY_MEDIA_TOP_MENU },
+	{ 0x11, KEY_CONTEXT_MENU },
+	/* 0x12-0x1c: Reserved */
+	{ 0x1d, KEY_DIGITS }, /* CEC Spec: select/toggle a Number Entry Mode */
+	{ 0x1e, KEY_NUMERIC_11 },
+	{ 0x1f, KEY_NUMERIC_12 },
+	/* 0x20-0x29: Keys 0 to 9 */
+	{ 0x20, KEY_NUMERIC_0 },
+	{ 0x21, KEY_NUMERIC_1 },
+	{ 0x22, KEY_NUMERIC_2 },
+	{ 0x23, KEY_NUMERIC_3 },
+	{ 0x24, KEY_NUMERIC_4 },
+	{ 0x25, KEY_NUMERIC_5 },
+	{ 0x26, KEY_NUMERIC_6 },
+	{ 0x27, KEY_NUMERIC_7 },
+	{ 0x28, KEY_NUMERIC_8 },
+	{ 0x29, KEY_NUMERIC_9 },
+	{ 0x2a, KEY_DOT },
+	{ 0x2b, KEY_ENTER },
+	{ 0x2c, KEY_CLEAR },
+	/* 0x2d-0x2e: Reserved */
+	{ 0x2f, KEY_NEXT_FAVORITE }, /* CEC Spec: Next Favorite */
+	{ 0x30, KEY_CHANNELUP },
+	{ 0x31, KEY_CHANNELDOWN },
+	{ 0x32, KEY_PREVIOUS }, /* CEC Spec: Previous Channel */
+	{ 0x33, KEY_SOUND }, /* CEC Spec: Sound Select */
+	{ 0x34, KEY_VIDEO }, /* 0x34: CEC Spec: Input Select */
+	{ 0x35, KEY_INFO }, /* CEC Spec: Display Information */
+	{ 0x36, KEY_HELP },
+	{ 0x37, KEY_PAGEUP },
+	{ 0x38, KEY_PAGEDOWN },
+	/* 0x39-0x3f: Reserved */
+	{ 0x40, KEY_POWER },
+	{ 0x41, KEY_VOLUMEUP },
+	{ 0x42, KEY_VOLUMEDOWN },
+	{ 0x43, KEY_MUTE },
+	{ 0x44, KEY_PLAYCD },
+	{ 0x45, KEY_STOPCD },
+	{ 0x46, KEY_PAUSECD },
+	{ 0x47, KEY_RECORD },
+	{ 0x48, KEY_REWIND },
+	{ 0x49, KEY_FASTFORWARD },
+	{ 0x4a, KEY_EJECTCD }, /* CEC Spec: Eject */
+	{ 0x4b, KEY_FORWARD },
+	{ 0x4c, KEY_BACK },
+	{ 0x4d, KEY_STOP_RECORD }, /* CEC Spec: Stop-Record */
+	{ 0x4e, KEY_PAUSE_RECORD }, /* CEC Spec: Pause-Record */
+	/* 0x4f: Reserved */
+	{ 0x50, KEY_ANGLE },
+	{ 0x51, KEY_TV2 },
+	{ 0x52, KEY_VOD }, /* CEC Spec: Video on Demand */
+	{ 0x53, KEY_EPG },
+	{ 0x54, KEY_TIME }, /* CEC Spec: Timer */
+	{ 0x55, KEY_CONFIG },
+	/*
+	 * The following codes are hard to implement at this moment, as they
+	 * carry an additional additional argument. Most likely changes to RC
+	 * framework are necessary.
+	 * For now they are interpreted by the CEC framework as non keycodes
+	 * and are passed as messages enabling user application to parse them.
+	 */
+	/* 0x56: CEC Spec: Select Broadcast Type */
+	/* 0x57: CEC Spec: Select Sound presentation */
+	{ 0x58, KEY_AUDIO_DESC }, /* CEC 2.0 and up */
+	{ 0x59, KEY_WWW }, /* CEC 2.0 and up */
+	{ 0x5a, KEY_3D_MODE }, /* CEC 2.0 and up */
+	/* 0x5b-0x5f: Reserved */
+	{ 0x60, KEY_PLAYCD }, /* CEC Spec: Play Function */
+	{ 0x6005, KEY_FASTFORWARD },
+	{ 0x6006, KEY_FASTFORWARD },
+	{ 0x6007, KEY_FASTFORWARD },
+	{ 0x6015, KEY_SLOW },
+	{ 0x6016, KEY_SLOW },
+	{ 0x6017, KEY_SLOW },
+	{ 0x6009, KEY_FASTREVERSE },
+	{ 0x600a, KEY_FASTREVERSE },
+	{ 0x600b, KEY_FASTREVERSE },
+	{ 0x6019, KEY_SLOWREVERSE },
+	{ 0x601a, KEY_SLOWREVERSE },
+	{ 0x601b, KEY_SLOWREVERSE },
+	{ 0x6020, KEY_REWIND },
+	{ 0x6024, KEY_PLAYCD },
+	{ 0x6025, KEY_PAUSECD },
+	{ 0x61, KEY_PLAYPAUSE }, /* CEC Spec: Pause-Play Function */
+	{ 0x62, KEY_RECORD }, /* Spec: Record Function */
+	{ 0x63, KEY_PAUSE_RECORD }, /* CEC Spec: Pause-Record Function */
+	{ 0x64, KEY_STOPCD }, /* CEC Spec: Stop Function */
+	{ 0x65, KEY_MUTE }, /* CEC Spec: Mute Function */
+	{ 0x66, KEY_UNMUTE }, /* CEC Spec: Restore the volume */
+	/*
+	 * The following codes are hard to implement at this moment, as they
+	 * carry an additional additional argument. Most likely changes to RC
+	 * framework are necessary.
+	 * For now they are interpreted by the CEC framework as non keycodes
+	 * and are passed as messages enabling user application to parse them.
+	 */
+	/* 0x67: CEC Spec: Tune Function */
+	/* 0x68: CEC Spec: Seleect Media Function */
+	/* 0x69: CEC Spec: Select A/V Input Function */
+	/* 0x6a: CEC Spec: Select Audio Input Function */
+	{ 0x6b, KEY_POWER }, /* CEC Spec: Power Toggle Function */
+	{ 0x6c, KEY_SLEEP }, /* CEC Spec: Power Off Function */
+	{ 0x6d, KEY_WAKEUP }, /* CEC Spec: Power On Function */
+	/* 0x6e-0x70: Reserved */
+	{ 0x71, KEY_BLUE }, /* CEC Spec: F1 (Blue) */
+	{ 0x72, KEY_RED }, /* CEC Spec: F2 (Red) */
+	{ 0x73, KEY_GREEN }, /* CEC Spec: F3 (Green) */
+	{ 0x74, KEY_YELLOW }, /* CEC Spec: F4 (Yellow) */
+	{ 0x75, KEY_F5 },
+	{ 0x76, KEY_DATA }, /* CEC Spec: Data - see Note 3 */
+	/*
+	 * Note 3: This is used, for example, to enter or leave a digital TV
+	 * data broadcast application.
+	 */
+	/* 0x77-0xff: Reserved */
+};
+
+static struct rc_map_list cec_map = {
+	.map = {
+		.scan		= cec,
+		.size		= ARRAY_SIZE(cec),
+		.rc_type	= RC_TYPE_CEC,
+		.name		= RC_MAP_CEC,
+	}
+};
+
+static int __init init_rc_map_cec(void)
+{
+	return rc_map_register(&cec_map);
+}
+
+static void __exit exit_rc_map_cec(void)
+{
+	rc_map_unregister(&cec_map);
+}
+
+module_init(init_rc_map_cec);
+module_exit(exit_rc_map_cec);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kamil Debski");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/media/rc/keymaps/rc-rc6-freebox.c	2019-01-22 16:16:24.527255679 +0100
@@ -0,0 +1,135 @@
+/* rc-rc6-freebox.c - Keytable for Freebox/Alicebox IR controller
+ *
+ * Copyright (c) 2012 by Nicolas Pouillon <npouillon@freebox.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+#define RC6_FREEBOX_TV 0xf2
+#define RC6_FREEBOX_WIDE 0x49
+#define RC6_FREEBOX_POWER 0x0c
+#define RC6_FREEBOX_STOP 0x31
+#define RC6_FREEBOX_REC 0x37
+#define RC6_FREEBOX_REW 0x2f
+#define RC6_FREEBOX_PLAY 0xb8
+#define RC6_FREEBOX_FF 0x2e
+#define RC6_FREEBOX_PREV 0x4d
+#define RC6_FREEBOX_NEXT 0x4c
+#define RC6_FREEBOX_RELOAD 0x83
+#define RC6_FREEBOX_MENU 0xcc
+#define RC6_FREEBOX_UP 0x99
+#define RC6_FREEBOX_LEFT 0x9b
+#define RC6_FREEBOX_RIGHT 0x9c
+#define RC6_FREEBOX_DOWN 0x9a
+#define RC6_FREEBOX_OK 0x5c
+#define RC6_FREEBOX_VOL_INC 0x5b
+#define RC6_FREEBOX_VOL_DEC 0x5a
+#define RC6_FREEBOX_MUTE 0x0d
+#define RC6_FREEBOX_PROG_UP 0x58
+#define RC6_FREEBOX_PROG_DOWN 0x59
+#define RC6_FREEBOX_FREEBOX 0xd7
+#define RC6_FREEBOX_RED 0x6d
+#define RC6_FREEBOX_GREEN 0x6e
+#define RC6_FREEBOX_YELLOW 0x6f
+#define RC6_FREEBOX_BLUE 0x70
+#define RC6_FREEBOX_1 0x01
+#define RC6_FREEBOX_2 0x02
+#define RC6_FREEBOX_3 0x03
+#define RC6_FREEBOX_4 0x04
+#define RC6_FREEBOX_5 0x05
+#define RC6_FREEBOX_6 0x06
+#define RC6_FREEBOX_7 0x07
+#define RC6_FREEBOX_8 0x08
+#define RC6_FREEBOX_9 0x09
+#define RC6_FREEBOX_BACK 0x9e
+#define RC6_FREEBOX_0 0x00
+#define RC6_FREEBOX_SWAP 0x0a
+#define RC6_FREEBOX_HELP 0x81
+#define RC6_FREEBOX_INFO 0x0f
+#define RC6_FREEBOX_GUIDE 0x97
+#define RC6_FREEBOX_OPTIONS 0x54
+
+#define MAP(x,y) { 0x80382600 + RC6_FREEBOX_##x, KEY_##y }
+
+static struct rc_map_table rc6_freebox[] = {
+	MAP(0, NUMERIC_0),
+	MAP(1, NUMERIC_1),
+	MAP(2, NUMERIC_2),
+	MAP(3, NUMERIC_3),
+	MAP(4, NUMERIC_4),
+	MAP(5, NUMERIC_5),
+	MAP(6, NUMERIC_6),
+	MAP(7, NUMERIC_7),
+	MAP(8, NUMERIC_8),
+	MAP(9, NUMERIC_9),
+	MAP(SWAP, BACK),
+	MAP(POWER, POWER),
+	MAP(MUTE, MUTE),
+	MAP(INFO, INFO),
+
+	MAP(FF, FASTFORWARD),
+	MAP(REW, REWIND),
+	MAP(STOP, STOP),
+	MAP(REC, RECORD),
+	MAP(WIDE, ZOOM),
+	MAP(PREV, PREVIOUS),
+	MAP(NEXT, NEXT),
+
+	MAP(OPTIONS, OPTION),
+	MAP(PROG_UP, CHANNELUP),
+	MAP(PROG_DOWN, CHANNELDOWN),
+	MAP(VOL_DEC, VOLUMEDOWN),
+	MAP(VOL_INC, VOLUMEUP),
+	MAP(OK, OK),
+
+	MAP(RED, RED),
+	MAP(GREEN, GREEN),
+	MAP(YELLOW, YELLOW),
+	MAP(BLUE, BLUE),
+
+	MAP(HELP, HELP),
+	MAP(RELOAD, REFRESH),
+
+	MAP(GUIDE, PROGRAM),
+	MAP(UP, UP),
+	MAP(DOWN, DOWN),
+	MAP(LEFT, LEFT),
+	MAP(RIGHT, RIGHT),
+	MAP(BACK, BACKSPACE),
+	MAP(PLAY, PLAY),
+
+	MAP(MENU, LIST),
+	MAP(FREEBOX, HOME),
+	MAP(TV, SCREEN),
+};
+
+static struct rc_map_list rc6_freebox_map = {
+	.map = {
+		.scan    = rc6_freebox,
+		.size    = ARRAY_SIZE(rc6_freebox),
+		.rc_type = RC_TYPE_RC6_MCE,
+		.name    = "rc-rc6-freebox",
+	}
+};
+
+static int __init init_rc_map_rc6_freebox(void)
+{
+	return rc_map_register(&rc6_freebox_map);
+}
+
+static void __exit exit_rc_map_rc6_freebox(void)
+{
+	rc_map_unregister(&rc6_freebox_map);
+}
+
+module_init(init_rc_map_rc6_freebox)
+module_exit(exit_rc_map_rc6_freebox)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Pouillon <npouillon@freebox.fr>");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/mfd/fbx7hd-top-psoc.c	2019-01-22 16:16:24.671256983 +0100
@@ -0,0 +1,1426 @@
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/spi/spi.h>
+#include <linux/workqueue.h>
+#include <linux/uaccess.h>
+#include <linux/gpio.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/poll.h>
+#include <linux/miscdevice.h>
+#include <linux/net.h>
+
+#include "fbx7hd-top-psoc.h"
+
+/*
+ * queued reports
+ */
+struct psoc_report {
+	struct timespec			tstamp;
+	u8				button_id;
+	u8				pressed;
+	u8				x;
+	u8				y;
+} __attribute__((packed));
+
+/*
+ * private context
+ */
+struct psoc_cmd {
+	u8				cmd_buf[MSG_MAX_LEN];
+	size_t				cmd_len;
+
+	int				xmit_status;
+	u8				retval;
+	u8				reply_data[MSG_MAX_LEN];
+	size_t				reply_len;
+
+	struct completion		completion;
+	unsigned long			timeout_time;
+	struct list_head		next;
+};
+
+enum led {
+	LED_R0_RED,
+	LED_R0_GREEN,
+	LED_R0_BLUE,
+	LED_R1,
+	LED_R2,
+	LED_R3_RED,
+	LED_R3_GREEN,
+	LED_R3_BLUE,
+	LED_R4,
+	LED_R5,
+	LED_R6_RED,
+	LED_R6_GREEN,
+	LED_R6_BLUE,
+	LED_R7,
+	LED_R8,
+	LED_R9_RED,
+	LED_R9_GREEN,
+	LED_R9_BLUE,
+	LED_R10,
+	LED_R11,
+	LED_BT,
+	LED_VOICE,
+	LED_LAST,
+};
+
+enum led_action {
+	LED_ACTION_OFF,
+	LED_ACTION_ON,
+	LED_ACTION_PWM0,
+	LED_ACTION_PWM1,
+};
+
+static const char *led_action_str[] = {
+	[LED_ACTION_OFF] = "off",
+	[LED_ACTION_ON] = "on",
+	[LED_ACTION_PWM0] = "pwm0",
+	[LED_ACTION_PWM1] = "pwm1",
+};
+
+static const struct psoc_led_desc {
+	unsigned int	controller;
+	unsigned int	hw_led;
+} led_descs[] = {
+	[LED_R0_RED] = {
+		.controller	= 1,
+		.hw_led		= 2,
+	},
+
+	[LED_R0_GREEN] = {
+		.controller	= 1,
+		.hw_led		= 1,
+	},
+
+	[LED_R0_BLUE] = {
+		.controller	= 1,
+		.hw_led		= 0,
+	},
+
+	[LED_R1] = {
+		.controller	= 0,
+		.hw_led		= 1,
+	},
+
+	[LED_R2] = {
+		.controller	= 0,
+		.hw_led		= 2,
+	},
+
+	[LED_R3_RED] = {
+		.controller	= 1,
+		.hw_led		= 5,
+	},
+
+	[LED_R3_GREEN] = {
+		.controller	= 1,
+		.hw_led		= 4,
+	},
+
+	[LED_R3_BLUE] = {
+		.controller	= 1,
+		.hw_led		= 3,
+	},
+
+	[LED_R4] = {
+		.controller	= 0,
+		.hw_led		= 4,
+	},
+
+	[LED_R5] = {
+		.controller	= 0,
+		.hw_led		= 5,
+	},
+
+	[LED_R6_RED] = {
+		.controller	= 1,
+		.hw_led		= 8,
+	},
+
+	[LED_R6_GREEN] = {
+		.controller	= 1,
+		.hw_led		= 7,
+	},
+
+	[LED_R6_BLUE] = {
+		.controller	= 1,
+		.hw_led		= 6,
+	},
+
+	[LED_R7] = {
+		.controller	= 0,
+		.hw_led		= 7,
+	},
+
+	[LED_R8] = {
+		.controller	= 0,
+		.hw_led		= 8,
+	},
+
+	[LED_R9_RED] = {
+		.controller	= 1,
+		.hw_led		= 11,
+	},
+
+	[LED_R9_GREEN] = {
+		.controller	= 1,
+		.hw_led		= 10,
+	},
+
+	[LED_R9_BLUE] = {
+		.controller	= 1,
+		.hw_led		= 9,
+	},
+
+	[LED_R10] = {
+		.controller	= 0,
+		.hw_led		= 10,
+	},
+
+	[LED_R11] = {
+		.controller	= 0,
+		.hw_led		= 11,
+	},
+
+	[LED_BT] = {
+		.controller	= 0,
+		.hw_led		= 12,
+	},
+
+	[LED_VOICE] = {
+		.controller	= 0,
+		.hw_led		= 13,
+	},
+};
+
+struct psoc_led {
+	enum led_action			action;
+	unsigned int			cur_hw_state;
+};
+
+struct psoc_client;
+
+struct fbx_psoc {
+	struct spi_device		*spi;
+	struct miscdevice		misc_dev;
+	struct gpio_desc		*reset_gpio;
+	struct gpio_desc		*irq_gpio;
+
+	unsigned int			api_version;
+	unsigned int			build_date;
+
+	u8				last_txid;
+	spinlock_t			cmd_queue_lock;
+	struct list_head		cmd_queue;
+	struct delayed_work		rxtx_work;
+	u8				rx_buf[MSG_MAX_LEN];
+	size_t				rx_buf_len;
+	bool				rx_escaped;
+	struct psoc_cmd			*pending;
+	struct psoc_led			leds[LED_LAST];
+
+	struct mutex			client_lock;
+	struct psoc_client		*client;
+};
+
+#define CLIENT_MAX_REPORTS		128
+
+struct psoc_client {
+	struct fbx_psoc			*priv;
+	wait_queue_head_t		inq;
+	struct psoc_report		reports[CLIENT_MAX_REPORTS];
+	u32				report_head;
+	u32				report_tail;
+};
+
+/*
+ *
+ */
+static int is_white_char(int c)
+{
+	return c == '\0' || c == ' ' || c == '\t' || c == '\n' || c == '\r';
+}
+
+/*
+ *
+ */
+static void psoc_handle_rx_msg(struct fbx_psoc *priv)
+{
+	const struct cmd_hdr *hdr;
+	u8 mtype;
+
+	if (priv->rx_buf_len < sizeof (*hdr)) {
+		dev_warn(&priv->spi->dev,
+			 "ignored too small len: %zu\n",
+			 priv->rx_buf_len);
+		goto reinit;
+	}
+
+	hdr = (const struct cmd_hdr *)priv->rx_buf;
+	if (hdr->len != priv->rx_buf_len) {
+		dev_warn(&priv->spi->dev,
+			 "ignored bad len: %zu vs %u\n",
+			 priv->rx_buf_len, hdr->len);
+		goto reinit;
+	}
+
+	mtype = (hdr->msg_flags & MSG_TYPE_MASK) >> MSG_TYPE_SHIFT;
+	switch (mtype) {
+	case MSG_TYPE_CMD:
+	{
+		struct psoc_report report;
+		struct psoc_client *client;
+		u8 msg_id = hdr->msg_flags & MSG_ID_MASK;
+		size_t dlen;
+		u32 nslot;
+
+		memset(&report, 0, sizeof (report));
+		get_monotonic_boottime(&report.tstamp);
+
+		dlen = hdr->len - sizeof (*hdr);
+		switch (msg_id) {
+		case MSG_ID_S2H_BTN_REPORT:
+		{
+			const struct cmd_btn_report *brep;
+			if (dlen != sizeof (*brep)) {
+				dev_warn(&priv->spi->dev,
+					 "bad btn report len %zu\n", dlen);
+				goto reinit;
+			}
+
+			brep = (const struct cmd_btn_report *)(hdr + 1);
+			report.button_id = brep->btn_state &
+				BTN_REPORT_BTN_MASK;
+			report.pressed = !!(brep->btn_state &
+					    BTN_REPORT_F_PRESSED);
+			break;
+		}
+		case MSG_ID_S2H_TOUCH_REPORT:
+		{
+			const struct cmd_touch_report *trep;
+			if (dlen != sizeof (*trep)) {
+				dev_warn(&priv->spi->dev,
+					 "bad touch report len %zu\n", dlen);
+				goto reinit;
+			}
+
+			trep = (const struct cmd_touch_report *)(hdr + 1);
+			report.button_id = 3;
+			report.pressed = !!(trep->flags &
+					    TOUCH_REPORT_F_PRESSED);
+			report.x = trep->x;
+			report.y = trep->y;
+			break;
+		}
+		default:
+			dev_warn(&priv->spi->dev, "unknown psoc cmd %u\n",
+				 msg_id);
+			goto reinit;
+		}
+
+		mutex_lock(&priv->client_lock);
+		client = priv->client;
+		if (!client) {
+			mutex_unlock(&priv->client_lock);
+			goto reinit;
+		}
+
+		nslot = client->report_head;
+		nslot++;
+		if (nslot >= ARRAY_SIZE(client->reports))
+			nslot = 0;
+
+		if (nslot == client->report_tail) {
+			/* queue is full */
+			if (net_ratelimit())
+				dev_warn(&priv->spi->dev,
+					 "client queue full\n");
+			mutex_unlock(&priv->client_lock);
+			goto reinit;
+		}
+
+		memcpy(&client->reports[client->report_head],
+		       &report, sizeof (report));
+		client->report_head = nslot;
+		wake_up(&client->inq);
+		mutex_unlock(&priv->client_lock);
+		break;
+	}
+
+	case MSG_TYPE_ACK:
+	{
+		const struct cmd_hdr *tx_hdr;
+		const struct cmd_ack_retval *ack;
+		struct psoc_cmd *pending = priv->pending;
+
+		if (!pending) {
+			dev_warn(&priv->spi->dev,
+				 "ignored ack with no pending command\n");
+			goto reinit;
+		}
+
+		tx_hdr = (const struct cmd_hdr *)pending->cmd_buf;
+		if (hdr->txid != tx_hdr->txid) {
+			dev_warn(&priv->spi->dev,
+				 "ignored ack with for another txid: %u %u\n",
+				 hdr->txid, tx_hdr->txid);
+			goto reinit;
+		}
+
+		priv->pending = NULL;
+
+		if (hdr->len < sizeof (*hdr) + sizeof (*ack)) {
+			dev_warn(&priv->spi->dev, "ack too small\n");
+			pending->xmit_status = -EIO;
+			complete(&pending->completion);
+			goto reinit;
+		}
+
+		ack = (const struct cmd_ack_retval *)(hdr + 1);
+		if (ack->retval)
+			dev_warn(&priv->spi->dev,
+				 "cmd msg_id:%u failed with ret:%u\n",
+				 tx_hdr->msg_flags & MSG_ID_MASK,
+				 ack->retval);
+
+		if (pending->reply_len > sizeof (pending->reply_data)) {
+			dev_warn(&priv->spi->dev, "reply data too big\n");
+			pending->xmit_status = -EIO;
+			complete(&pending->completion);
+			goto reinit;
+		}
+
+		pending->xmit_status = 0;
+		pending->retval = ack->retval;
+		pending->reply_len = hdr->len - sizeof (*hdr) - sizeof (*ack);
+		memcpy(pending->reply_data, ack + 1, pending->reply_len);
+		complete(&pending->completion);
+		goto reinit;
+	}
+	}
+
+
+reinit:
+	priv->rx_buf_len = 0;
+	priv->rx_escaped = false;
+}
+
+/*
+ * escape delimiters from buffer and add delim at the end
+ *
+ * edata buffer size must be (2 * data + 1) buffer size
+ */
+static size_t escape_buf(const u8 *data, size_t icount, u8 *edata)
+{
+	u8 *p;
+	size_t i;
+
+	p = edata;
+	for (i = 0; i < icount; i++) {
+		switch (data[i]) {
+		case DELIM_CHAR:
+			*p++ = ESCAPE_CHAR;
+			*p++ = ESCAPED_DELIM;
+			break;
+		case IGNORE_CHAR:
+			*p++ = ESCAPE_CHAR;
+			*p++ = ESCAPED_IGNORE;
+			break;
+		case ESCAPE_CHAR:
+			*p++ = ESCAPE_CHAR;
+			*p++ = ESCAPED_ESCAPE;
+			break;
+		default:
+			*p++ = data[i];
+		}
+	}
+	*p++ = DELIM_CHAR;
+	return p - edata;
+}
+
+/*
+ *
+ */
+static ssize_t psoc_rxtx_xfer(struct fbx_psoc *priv,
+			      u8 *rx_buf, size_t rx_xfer_min)
+{
+	struct psoc_cmd *cmd;
+	u8 tx_buf[MSG_MAX_LEN * 2 + 1];
+	size_t tx_buf_len, xfer_len;
+	struct spi_transfer t;
+	int ret;
+
+	/*
+	 * do we have a command to send ?
+	 */
+	cmd = NULL;
+	if (!priv->pending) {
+		spin_lock(&priv->cmd_queue_lock);
+		cmd = list_first_entry_or_null(&priv->cmd_queue,
+					       struct psoc_cmd, next);
+		if (cmd)
+			list_del(&cmd->next);
+		spin_unlock(&priv->cmd_queue_lock);
+	}
+
+	if (cmd) {
+		struct cmd_hdr *hdr = (struct cmd_hdr *)cmd->cmd_buf;
+
+		/* update txid before sending */
+		hdr->txid = ++priv->last_txid;
+		tx_buf_len = escape_buf(cmd->cmd_buf, cmd->cmd_len, tx_buf);
+	} else
+		tx_buf_len = 0;
+
+	xfer_len = rx_xfer_min > tx_buf_len ? rx_xfer_min : tx_buf_len;
+
+	/* if receiving more than sending, make sure to pad with
+	 * delimiter */
+	if (xfer_len > tx_buf_len)
+		memset(tx_buf + tx_buf_len, IGNORE_CHAR,
+		       xfer_len - tx_buf_len);
+
+	/*
+	 * do the SPI transfer
+	 */
+	memset(&t, 0, sizeof (t));
+	t.rx_buf = rx_buf;
+	t.tx_buf = tx_buf;
+	t.len = xfer_len;
+
+	ret = spi_sync_transfer(priv->spi, &t, 1);
+	if (ret < 0) {
+		dev_err(&priv->spi->dev, "SPI transfer failed: %d", ret);
+		cmd->xmit_status = -EIO;
+		complete(&cmd->completion);
+		return -1;
+	}
+
+	if (cmd)
+		priv->pending = cmd;
+
+	return xfer_len;
+}
+
+/*
+ *
+ */
+static int psoc_rxtx_process(struct fbx_psoc *priv)
+{
+	u8 rx_ebuf[MSG_MAX_LEN * 2 + 1];
+	size_t i, rx_xfer_len;
+	ssize_t ret;
+
+	rx_xfer_len = MSG_MAX_LEN * 2;
+	ret = psoc_rxtx_xfer(priv, rx_ebuf, rx_xfer_len);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * process rx
+	 */
+	for (i = 0; i < ret; i++) {
+		uint8_t b = rx_ebuf[i];
+
+		if (b == IGNORE_CHAR)
+			continue;
+
+		if (priv->rx_escaped) {
+			if (b == ESCAPED_DELIM) {
+				b = DELIM_CHAR;
+			} else if (b == ESCAPED_IGNORE) {
+				b = IGNORE_CHAR;
+			} else if (b == ESCAPED_ESCAPE) {
+				b = ESCAPE_CHAR;
+			} else {
+				/* invalid escape, eat it */
+				continue;
+			}
+
+                        priv->rx_escaped = false;
+		} else {
+			if (b == DELIM_CHAR) {
+				if (priv->rx_buf_len) {
+					/* full message */
+					psoc_handle_rx_msg(priv);
+				}
+
+				/* empty message */
+				priv->rx_escaped = false;
+				continue;
+			}
+
+			if (b == ESCAPE_CHAR) {
+				priv->rx_escaped = true;
+				continue;
+			}
+		}
+
+		/* valid byte to add to message */
+		if (priv->rx_buf_len == sizeof (priv->rx_buf)) {
+			/* lost sync, ignore */
+			priv->rx_buf_len = 0;;
+			priv->rx_escaped = false;
+			dev_err(&priv->spi->dev, "lost rx sync, flush\n");
+			continue;
+		}
+
+		priv->rx_buf[priv->rx_buf_len++] = b;
+	}
+
+	return 0;
+}
+
+/*
+ *
+ */
+void psoc_rxtx_work_handler(struct work_struct *work)
+{
+	struct fbx_psoc *priv = container_of(work, struct fbx_psoc,
+					     rxtx_work.work);
+	unsigned long delay;
+	unsigned int i;
+	bool tx_waiting;
+
+	if (priv->pending &&
+	    jiffies > priv->pending->timeout_time) {
+		dev_err(&priv->spi->dev, "command timeout\n");
+		priv->pending->xmit_status = -ETIMEDOUT;
+		complete(&priv->pending->completion);
+		priv->pending = NULL;
+	}
+
+	/* try to receive as many message as possible */
+	for (i = 0; i < 16; i++) {
+		spin_lock(&priv->cmd_queue_lock);
+		tx_waiting = !list_empty(&priv->cmd_queue);
+		spin_unlock(&priv->cmd_queue_lock);
+
+		if (!tx_waiting) {
+			if (priv->irq_gpio &&
+			    !gpiod_get_value(priv->irq_gpio)) {
+				/* no need to read */
+				goto resched;
+			}
+		}
+
+		if (psoc_rxtx_process(priv))
+			break;
+	}
+
+resched:
+	/* reschedule */
+	if (priv->pending)
+		delay = 0;
+	else
+		delay = HZ / 100;
+
+	schedule_delayed_work(&priv->rxtx_work, delay);
+}
+
+/*
+ *
+ */
+static int send_and_wait_cmd_retdata(struct fbx_psoc *priv,
+				     uint8_t msg_id,
+				     const void *data, size_t dlen,
+				     void *retbuf, size_t retbuflen)
+{
+	struct psoc_cmd *cmd;
+	struct cmd_hdr *hdr;
+	size_t total;
+	int retval;
+
+	total = sizeof (*hdr) + dlen;
+	if (total > sizeof (cmd->cmd_buf)) {
+		/* too big */
+		dev_err(&priv->spi->dev, "command to big (%zu), cannot send",
+			total);
+		return -EFBIG;
+	}
+
+	cmd = kzalloc(sizeof (*cmd), GFP_KERNEL);
+	if (!cmd)
+		return -ENOMEM;
+
+	init_completion(&cmd->completion);
+	hdr = (struct cmd_hdr *)cmd->cmd_buf;
+	hdr->len = total;
+	hdr->msg_flags = msg_id;
+	memcpy(hdr + 1, data, dlen);
+	cmd->cmd_len = total;
+
+	spin_lock(&priv->cmd_queue_lock);
+	list_add_tail(&cmd->next, &priv->cmd_queue);
+	cmd->timeout_time = jiffies + 1 * HZ;
+	spin_unlock(&priv->cmd_queue_lock);
+	cancel_delayed_work(&priv->rxtx_work);
+	schedule_delayed_work(&priv->rxtx_work, 0);
+
+	wait_for_completion(&cmd->completion);
+	if (cmd->xmit_status) {
+		retval = cmd->xmit_status;
+		goto out;
+	}
+
+	retval = cmd->retval;
+	if (!retval) {
+		if (retbuflen != cmd->reply_len) {
+			dev_err(&priv->spi->dev,
+				"command msg_id:%u unexpected "
+				"reply_len:%zu vs %zu",
+				msg_id, cmd->reply_len, retbuflen);
+			retval = -EINVAL;
+		} else
+			memcpy(retbuf, cmd->reply_data, cmd->reply_len);
+	}
+
+out:
+	kfree(cmd);
+	return retval;
+}
+
+/*
+ *
+ */
+static int psoc_read_version(struct fbx_psoc *priv)
+{
+	struct cmd_get_version_ack gversion_ack;
+	int ret;
+
+	ret = send_and_wait_cmd_retdata(priv,
+					MSG_ID_H2S_GET_VERSION,
+					NULL, 0,
+					&gversion_ack,
+					sizeof (gversion_ack));
+	if (ret < 0)
+		return ret;
+
+	/* command should never fail */
+	if (ret > 0)
+		return -EINVAL;
+
+	priv->api_version = gversion_ack.api_version;
+	priv->build_date = gversion_ack.build_date;
+	return 0;
+}
+
+/*
+ *
+ */
+static int psoc_get_hw_led_state(struct fbx_psoc *priv,
+				 uint8_t hw_id, u8 *state)
+{
+	struct cmd_get_led gled;
+	struct cmd_get_led_ack gled_ack;
+	int ret;
+
+	gled.led = hw_id;
+	ret = send_and_wait_cmd_retdata(priv,
+					MSG_ID_H2S_GET_LED,
+					&gled, sizeof (gled),
+					&gled_ack, sizeof (gled_ack));
+	if (ret < 0)
+		return ret;
+
+	/* command should never fail */
+	if (ret > 0)
+		return -EINVAL;
+
+	*state = gled_ack.val;
+	return 0;
+}
+
+/*
+ *
+ */
+static int psoc_set_hw_led_state(struct fbx_psoc *priv,
+				 uint8_t hw_id, u8 state)
+{
+	struct cmd_set_led sled;
+	int ret;
+
+	sled.led = hw_id;
+	sled.val = state;
+	ret = send_and_wait_cmd_retdata(priv,
+					MSG_ID_H2S_SET_LED,
+					&sled, sizeof (sled),
+					NULL, 0);
+	if (ret < 0)
+		return ret;
+
+	/* command can fail for i2c xfer error */
+	if (ret > 0)
+		return -EIO;
+
+	return 0;
+}
+
+/*
+ *
+ */
+static int psoc_i2c_write(struct fbx_psoc *priv,
+			  unsigned int address,
+			  const uint8_t *data, size_t dlen)
+{
+	struct cmd_write_i2c_regval *i2c;
+	uint8_t buf[sizeof (*i2c) + dlen];
+	int ret;
+
+	i2c = (struct cmd_write_i2c_regval *)buf;
+	i2c->address = address;
+	memcpy(i2c->data, data, dlen);
+
+	ret = send_and_wait_cmd_retdata(priv,
+					MSG_ID_H2S_WRITE_I2C_REGVAL,
+					i2c, sizeof (buf),
+					NULL, 0);
+	if (ret < 0)
+		return ret;
+
+	/* command can fail for i2c xfer error */
+	if (ret > 0)
+		return -EIO;
+
+	return 0;
+}
+
+/*
+ *
+ */
+static int psoc_set_report(struct fbx_psoc *priv, bool enabled)
+
+{
+	struct cmd_set_report sreport;
+	int ret;
+
+	sreport.status = enabled;
+	ret = send_and_wait_cmd_retdata(priv,
+					MSG_ID_H2S_SET_REPORT,
+					&sreport, sizeof (sreport),
+					NULL, 0);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+/*
+ * set local state from hw state
+ */
+static int psoc_led_init(struct fbx_psoc *priv, enum led led)
+{
+	const struct psoc_led_desc *desc;
+	struct psoc_led *pled;
+	uint8_t hw_led, state;
+
+	if (led >= LED_LAST)
+		return -EINVAL;
+
+	desc = &led_descs[led];
+	pled = &priv->leds[led];
+
+	hw_led = desc->hw_led + 16 * desc->controller;
+	if (psoc_get_hw_led_state(priv, hw_led, &state))
+		return 1;
+
+	pled->cur_hw_state = state;
+
+	switch (pled->cur_hw_state) {
+	case 0:
+	default:
+		pled->action = LED_ACTION_OFF;
+		break;
+	case 1:
+		pled->action = LED_ACTION_ON;
+		break;
+	case 2:
+		pled->action = LED_ACTION_PWM0;
+		break;
+	case 3:
+		pled->action = LED_ACTION_PWM1;
+		break;
+	}
+
+	return 0;
+}
+
+/*
+ * update hw state after change
+ */
+static int psoc_led_update(struct fbx_psoc *priv, enum led led)
+{
+	const struct psoc_led_desc *desc;
+	struct psoc_led *pled;
+	u8 hw_led;
+	u8 state;
+
+	if (led >= LED_LAST)
+		return -EINVAL;
+
+	desc = &led_descs[led];
+	pled = &priv->leds[led];
+
+	switch (pled->action) {
+	case LED_ACTION_OFF:
+		state = 0;
+		break;
+	case LED_ACTION_ON:
+		state = 1;
+		break;
+	case LED_ACTION_PWM0:
+		state = 2;
+		break;
+	case LED_ACTION_PWM1:
+		state = 3;
+		break;
+	}
+
+	hw_led = desc->hw_led + 16 * desc->controller;
+	psoc_set_hw_led_state(priv, hw_led, state);
+
+	return 0;
+}
+
+/*
+ *
+ */
+static ssize_t
+psoc_misc_read(struct file *file, char __user *buf, size_t size, loff_t * pos)
+{
+	struct psoc_client *client = file->private_data;
+	struct fbx_psoc *priv = client->priv;
+	size_t done;
+
+	if (size % sizeof (struct psoc_report))
+		return -EINVAL;
+
+	if (!size)
+		return 0;
+
+	mutex_lock(&priv->client_lock);
+	if (client->report_tail == client->report_head) {
+		mutex_unlock(&priv->client_lock);
+
+		if (file->f_flags & O_NONBLOCK)
+			return -EAGAIN;
+
+		if (wait_event_interruptible(client->inq,
+			     (client->report_tail != client->report_head)))
+			return -ERESTARTSYS;
+		mutex_lock(&priv->client_lock);
+	}
+
+	done = 0;
+	while (done < size) {
+		if (client->report_head == client->report_tail)
+			break;
+
+		if (copy_to_user(buf, &client->reports[client->report_tail],
+				 sizeof (struct psoc_report))) {
+			mutex_unlock(&priv->client_lock);
+			return -EFAULT;
+		}
+
+		client->report_tail++;
+		if (client->report_tail >= ARRAY_SIZE(client->reports))
+			client->report_tail = 0;
+
+		buf += sizeof (struct psoc_report);
+		done += sizeof (struct psoc_report);
+	}
+
+	mutex_unlock(&priv->client_lock);
+
+	return done;
+}
+
+/*
+ *
+ */
+static unsigned int
+psoc_misc_poll(struct file *file, poll_table *wait)
+{
+	struct psoc_client *client = file->private_data;
+	struct fbx_psoc *priv = client->priv;
+	unsigned int mask = 0;
+
+	mutex_lock(&priv->client_lock);
+	poll_wait(file, &client->inq,  wait);
+	if (client->report_head != client->report_tail)
+		mask |= POLLIN | POLLRDNORM;	/* readable */
+	mutex_unlock(&priv->client_lock);
+	return mask;
+}
+
+/*
+ *
+ */
+static int
+psoc_misc_open(struct inode *inode, struct file *file)
+{
+	struct miscdevice *miscdev = file->private_data;
+	struct fbx_psoc *priv = container_of(miscdev, struct fbx_psoc,
+					     misc_dev);
+	struct psoc_client *client;
+	int ret;
+
+	client = kzalloc(sizeof (*client), GFP_KERNEL);
+	if (!client)
+		return -ENOMEM;
+	client->priv = priv;
+	init_waitqueue_head(&client->inq);
+
+	mutex_lock(&priv->client_lock);
+	if (priv->client) {
+		mutex_unlock(&priv->client_lock);
+		kfree(client);
+		return -EBUSY;
+	}
+
+	priv->client = client;
+	file->private_data = client;
+	mutex_unlock(&priv->client_lock);
+
+	ret = psoc_set_report(priv, true);
+	if (ret) {
+		mutex_lock(&priv->client_lock);
+		priv->client = NULL;
+		mutex_unlock(&priv->client_lock);
+		kfree(client);
+		return ret;
+	}
+
+	nonseekable_open(inode, file);
+	return 0;
+}
+
+/*
+ *
+ */
+static int
+psoc_misc_release(struct inode *inode, struct file *file)
+{
+	struct psoc_client *client = file->private_data;
+	struct fbx_psoc *priv = client->priv;
+
+	psoc_set_report(priv, false);
+
+	mutex_lock(&priv->client_lock);
+	priv->client = NULL;
+	mutex_unlock(&priv->client_lock);
+	kfree(client);
+	return 0;
+}
+
+/*
+ * sysfs callbacks
+ */
+static ssize_t psoc_api_version_show(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct fbx_psoc *priv = dev_get_drvdata(dev);
+	return sprintf(buf, "%u\n", priv->api_version);
+}
+
+static DEVICE_ATTR_RO(psoc_api_version);
+
+/*
+ *
+ */
+static ssize_t psoc_build_date_show(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct fbx_psoc *priv = dev_get_drvdata(dev);
+	return sprintf(buf, "%u\n", priv->build_date);
+}
+
+static DEVICE_ATTR_RO(psoc_build_date);
+
+/*
+ *
+ */
+static ssize_t psoc_led_state_show(struct device *dev,
+				   struct device_attribute *attr,
+				   char *buf)
+{
+	struct fbx_psoc *priv = dev_get_drvdata(dev);
+	struct psoc_led *pled;
+	struct dev_ext_attribute *eattr;
+	enum led led;
+
+	eattr = container_of(attr, struct dev_ext_attribute, attr);
+	led = (unsigned long)eattr->var;
+	if (led >= LED_LAST)
+		return -EINVAL;
+
+	pled = &priv->leds[led];
+	return sprintf(buf, "%s\n", led_action_str[pled->action]);
+}
+
+/*
+ *
+ */
+static ssize_t psoc_led_state_store(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t len)
+{
+	struct fbx_psoc *priv = dev_get_drvdata(dev);
+	struct dev_ext_attribute *eattr;
+	unsigned int led;
+	size_t i, read_len, action_value;
+	int ret;
+
+	/* ignore leading whitespaces */
+	if (is_white_char(*buf))
+		return 1;
+
+	eattr = container_of(attr, struct dev_ext_attribute, attr);
+	led = (unsigned long)eattr->var;
+	if (led >= LED_LAST)
+		return -EINVAL;
+
+	/* lookup action */
+	read_len = 0;
+	for (i = 0; i < ARRAY_SIZE(led_action_str); i++) {
+		size_t action_len = strlen(led_action_str[i]);
+
+		if (len < action_len)
+			continue;
+
+		if (!strncmp(buf, led_action_str[i], action_len) &&
+		    is_white_char(buf[action_len])) {
+			read_len = action_len;
+			action_value = i;
+			break;
+		}
+	}
+
+	if (!read_len) {
+		dev_err(dev, "invalid led action value");
+		return -EINVAL;
+	}
+
+	if (priv->leds[led].action == action_value)
+		return read_len;
+
+	priv->leds[led].action = action_value;
+	ret = psoc_led_update(priv, led);
+	if (ret)
+		return ret;
+
+	return read_len;
+}
+
+#define PSOC_LED_ATTR(_name, _func, _led)				\
+	struct dev_ext_attribute					\
+	dev_attr_psoc_led_ ## _name ## _ ## _func = {			\
+		.attr = __ATTR(psoc_led_ ## _name ## _ ## _func,	\
+			       S_IRUGO | S_IWUSR,			\
+			       psoc_led_ ## _func ## _show,		\
+			       psoc_led_ ## _func ## _store		\
+			),						\
+		.var = (void *) _led,					\
+	}
+
+static PSOC_LED_ATTR(r0_red, state, LED_R0_RED);
+static PSOC_LED_ATTR(r0_green, state, LED_R0_GREEN);
+static PSOC_LED_ATTR(r0_blue, state, LED_R0_BLUE);
+static PSOC_LED_ATTR(r1, state, LED_R1);
+static PSOC_LED_ATTR(r2, state, LED_R2);
+static PSOC_LED_ATTR(r3_red, state, LED_R3_RED);
+static PSOC_LED_ATTR(r3_green, state, LED_R3_GREEN);
+static PSOC_LED_ATTR(r3_blue, state, LED_R3_BLUE);
+static PSOC_LED_ATTR(r4, state, LED_R4);
+static PSOC_LED_ATTR(r5, state, LED_R5);
+static PSOC_LED_ATTR(r6_red, state, LED_R6_RED);
+static PSOC_LED_ATTR(r6_green, state, LED_R6_GREEN);
+static PSOC_LED_ATTR(r6_blue, state, LED_R6_BLUE);
+static PSOC_LED_ATTR(r7, state, LED_R7);
+static PSOC_LED_ATTR(r8, state, LED_R8);
+static PSOC_LED_ATTR(r9_red, state, LED_R9_RED);
+static PSOC_LED_ATTR(r9_green, state, LED_R9_GREEN);
+static PSOC_LED_ATTR(r9_blue, state, LED_R9_BLUE);
+static PSOC_LED_ATTR(r10, state, LED_R10);
+static PSOC_LED_ATTR(r11, state, LED_R11);
+static PSOC_LED_ATTR(bt, state, LED_BT);
+static PSOC_LED_ATTR(voice, state, LED_VOICE);
+
+/*
+ *
+ */
+static ssize_t psoc_pwm_store(struct device *dev,
+			      struct device_attribute *attr,
+			      const char *buf, size_t len)
+{
+	struct fbx_psoc *priv = dev_get_drvdata(dev);
+	struct dev_ext_attribute *eattr;
+	unsigned long att, value;
+	int controler, pwm, reg, ret;
+	u8 i2c_data[2];
+
+	eattr = container_of(attr, struct dev_ext_attribute, attr);
+	att = (unsigned long)eattr->var;
+	controler = (att >> 8) & 0xf;
+	pwm = (att >> 4) & 0xf;
+	reg = att & (0xf);
+	if (pwm > 1)
+		return -EINVAL;
+	if (reg > 1)
+		return -EINVAL;
+
+        ret = kstrtoul(buf, 0, &value);
+        if (ret)
+                return ret;
+
+        if (value > 255)
+                return -EINVAL;
+
+	i2c_data[0] = 0x2 + pwm * 2 + reg;
+	i2c_data[1] = value;
+	ret = psoc_i2c_write(priv,
+			     controler ?
+			     LED_CTRL1_I2C_ADDRESS : LED_CTRL0_I2C_ADDRESS,
+			     i2c_data, sizeof (i2c_data));
+	if (ret)
+		return ret;
+
+	return len;
+}
+
+
+#define PSOC_PWM_ATTR(_name, _func, _pwm)				\
+	struct dev_ext_attribute					\
+	dev_attr_psoc_pwm_ ## _name ## _ ## _func = {			\
+		.attr = __ATTR(psoc_pwm_ ## _name ## _ ## _func,	\
+			       S_IWUSR,					\
+			       NULL,					\
+			       psoc_pwm_store				\
+			),						\
+		.var = (void *)(_pwm),					\
+	}
+
+static PSOC_PWM_ATTR(c0p0, psc, (0 << 8) | (0 << 4) | 0);
+static PSOC_PWM_ATTR(c0p0, pwm, (0 << 8) | (0 << 4) | 1);
+static PSOC_PWM_ATTR(c0p1, psc, (0 << 8) | (1 << 4) | 0);
+static PSOC_PWM_ATTR(c0p1, pwm, (0 << 8) | (1 << 4) | 1);
+static PSOC_PWM_ATTR(c1p0, psc, (1 << 8) | (0 << 4) | 0);
+static PSOC_PWM_ATTR(c1p0, pwm, (1 << 8) | (0 << 4) | 1);
+static PSOC_PWM_ATTR(c1p1, psc, (1 << 8) | (1 << 4) | 0);
+static PSOC_PWM_ATTR(c1p1, pwm, (1 << 8) | (1 << 4) | 1);
+
+
+static struct attribute *sysfs_attrs_ctrl[] = {
+	&dev_attr_psoc_api_version.attr,
+	&dev_attr_psoc_build_date.attr,
+
+	&dev_attr_psoc_led_r0_red_state.attr.attr,
+	&dev_attr_psoc_led_r0_green_state.attr.attr,
+	&dev_attr_psoc_led_r0_blue_state.attr.attr,
+	&dev_attr_psoc_led_r1_state.attr.attr,
+	&dev_attr_psoc_led_r2_state.attr.attr,
+	&dev_attr_psoc_led_r3_red_state.attr.attr,
+	&dev_attr_psoc_led_r3_green_state.attr.attr,
+	&dev_attr_psoc_led_r3_blue_state.attr.attr,
+	&dev_attr_psoc_led_r4_state.attr.attr,
+	&dev_attr_psoc_led_r5_state.attr.attr,
+	&dev_attr_psoc_led_r6_red_state.attr.attr,
+	&dev_attr_psoc_led_r6_green_state.attr.attr,
+	&dev_attr_psoc_led_r6_blue_state.attr.attr,
+	&dev_attr_psoc_led_r7_state.attr.attr,
+	&dev_attr_psoc_led_r8_state.attr.attr,
+	&dev_attr_psoc_led_r9_red_state.attr.attr,
+	&dev_attr_psoc_led_r9_green_state.attr.attr,
+	&dev_attr_psoc_led_r9_blue_state.attr.attr,
+	&dev_attr_psoc_led_r10_state.attr.attr,
+	&dev_attr_psoc_led_r11_state.attr.attr,
+	&dev_attr_psoc_led_bt_state.attr.attr,
+	&dev_attr_psoc_led_voice_state.attr.attr,
+
+	&dev_attr_psoc_pwm_c0p0_pwm.attr.attr,
+	&dev_attr_psoc_pwm_c0p0_psc.attr.attr,
+	&dev_attr_psoc_pwm_c0p1_pwm.attr.attr,
+	&dev_attr_psoc_pwm_c0p1_psc.attr.attr,
+	&dev_attr_psoc_pwm_c1p0_pwm.attr.attr,
+	&dev_attr_psoc_pwm_c1p0_psc.attr.attr,
+	&dev_attr_psoc_pwm_c1p1_pwm.attr.attr,
+	&dev_attr_psoc_pwm_c1p1_psc.attr.attr,
+
+	NULL
+};
+
+static const struct attribute_group psoc_attribute_group[] = {
+	{.attrs = sysfs_attrs_ctrl },
+};
+
+/*
+ *
+ */
+static int init_leds(struct fbx_psoc *priv)
+{
+	size_t i;
+
+	/* read initial led state */
+	for (i = 0; i < ARRAY_SIZE(priv->leds); i++) {
+		int ret;
+
+		ret = psoc_led_init(priv, i);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+static const struct file_operations psoc_fops = {
+	.open		= psoc_misc_open,
+	.read		= psoc_misc_read,
+	.poll		= psoc_misc_poll,
+	.release	= psoc_misc_release,
+	.owner		= THIS_MODULE,
+	.llseek		= noop_llseek,
+};
+
+/*
+ * SPI device probe callback
+ */
+static int fbx7hd_top_psoc_spi_probe(struct spi_device *spi)
+{
+	struct fbx_psoc *priv;
+	int ret;
+
+	priv = devm_kzalloc(&spi->dev, sizeof (*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset",
+						   GPIOD_OUT_LOW);
+	if (IS_ERR(priv->reset_gpio)) {
+		ret = PTR_ERR(priv->reset_gpio);
+		if (ret != -EPROBE_DEFER)
+			dev_err(&spi->dev,
+				"failed to get reset gpio :%d", ret);
+		return ret;
+	}
+
+	if (priv->reset_gpio) {
+		gpiod_set_value(priv->reset_gpio, 0);
+		msleep(100);
+	}
+
+	priv->irq_gpio = devm_gpiod_get_optional(&spi->dev, "irq", GPIOD_IN);
+	if (IS_ERR(priv->irq_gpio)) {
+		ret = PTR_ERR(priv->irq_gpio);
+		if (ret != -EPROBE_DEFER)
+			dev_err(&spi->dev,
+				"failed to get irq gpio :%d", ret);
+		return ret;
+	}
+
+	priv->spi = spi;
+	INIT_LIST_HEAD(&priv->cmd_queue);
+	spin_lock_init(&priv->cmd_queue_lock);
+	INIT_DELAYED_WORK(&priv->rxtx_work, psoc_rxtx_work_handler);
+	mutex_init(&priv->client_lock);
+
+	if (psoc_read_version(priv)) {
+		ret = -EIO;
+		goto fail;
+	}
+
+	if (init_leds(priv)) {
+		ret = -EIO;
+		goto fail;
+	}
+
+	ret = sysfs_create_group(&spi->dev.kobj,
+				 psoc_attribute_group);
+	if (ret < 0) {
+		dev_err(&spi->dev, "Sysfs registration failed\n");
+		goto fail;
+	}
+
+	priv->misc_dev.minor = MISC_DYNAMIC_MINOR;
+	priv->misc_dev.name = "fbx7hd-psoc";
+        priv->misc_dev.fops = &psoc_fops;
+
+	ret = misc_register(&priv->misc_dev);
+	if (ret)
+		goto fail_sysfs;
+
+	dev_info(&spi->dev,
+		 "Freebox v7HD top PSOC device, api:0x%02x build:%u\n",
+		 priv->api_version,
+		 priv->build_date);
+
+	dev_set_drvdata(&spi->dev, priv);
+	return 0;
+
+fail_sysfs:
+	sysfs_remove_group(&spi->dev.kobj, psoc_attribute_group);
+
+fail:
+	cancel_delayed_work_sync(&priv->rxtx_work);
+	return ret;
+}
+
+/*
+ * spi device remove callback
+ */
+static int fbx7hd_top_psoc_spi_remove(struct spi_device *spi)
+{
+	struct fbx_psoc *priv;
+
+	priv = dev_get_drvdata(&spi->dev);
+	cancel_delayed_work_sync(&priv->rxtx_work);
+	misc_deregister(&priv->misc_dev);
+	sysfs_remove_group(&spi->dev.kobj, psoc_attribute_group);
+	return 0;
+}
+
+const struct of_device_id fbx7hd_top_psoc_match[] = {
+	{ .compatible = "freebox,fbx7hd-top-psoc" },
+	{ },
+};
+
+static struct spi_driver fbx7hd_top_psoc_spi_driver = {
+	.driver = {
+		.name	= "fbx7hd_top_psoc",
+		.of_match_table = of_match_ptr(fbx7hd_top_psoc_match),
+	},
+
+	.probe		= fbx7hd_top_psoc_spi_probe,
+	.remove		= fbx7hd_top_psoc_spi_remove,
+};
+
+static int __init fbx7hd_top_psoc_spi_init(void)
+{
+	int ret;
+
+	ret = spi_register_driver(&fbx7hd_top_psoc_spi_driver);
+	if (ret != 0)
+		pr_err("Failed to register fbx7hd top psoc driver: %d\n", ret);
+
+	return 0;
+}
+
+static void __exit fbx7hd_top_psoc_spi_exit(void)
+{
+	spi_unregister_driver(&fbx7hd_top_psoc_spi_driver);
+}
+
+module_init(fbx7hd_top_psoc_spi_init);
+module_exit(fbx7hd_top_psoc_spi_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxime Bizon");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/mfd/fbx7hd-top-psoc.h	2019-01-22 16:16:24.671256983 +0100
@@ -0,0 +1,103 @@
+#ifndef FBX7HD_TOP_PSOC_H_
+#define FBX7HD_TOP_PSOC_H_
+
+#define LED_CTRL0_I2C_ADDRESS	0x60
+#define LED_CTRL1_I2C_ADDRESS	0x61
+
+#define MSG_MAX_LEN	16
+
+#define DELIM_CHAR	0x00
+#define IGNORE_CHAR	0xff
+
+#define ESCAPE_CHAR	0xfe
+#define ESCAPED_DELIM	0x01
+#define ESCAPED_IGNORE	0x02
+#define ESCAPED_ESCAPE	0x03
+
+enum msg_id {
+	/* host => psoc */
+	MSG_ID_H2S_GET_VERSION		= 0x0,
+	MSG_ID_H2S_WRITE_I2C_REGVAL	= 0x1,
+	MSG_ID_H2S_SET_LED		= 0x2,
+	MSG_ID_H2S_GET_LED		= 0x3,
+	MSG_ID_H2S_SET_REPORT		= 0x4,
+
+	/* psoc => host */
+	MSG_ID_S2H_BTN_REPORT		= 0x8,
+	MSG_ID_S2H_TOUCH_REPORT		= 0x9,
+};
+
+enum msg_type {
+	MSG_TYPE_CMD = 0,
+	MSG_TYPE_ACK = 1,
+};
+
+#define MSG_ID_MASK			0x0f
+#define MSG_FLAG_NOACK_MASK		(1 << 4)
+#define MSG_FLAGS_MASK			(0x3 << 4)
+#define MSG_TYPE_SHIFT			6
+#define MSG_TYPE_MASK			(1 << MSG_TYPE_SHIFT)
+
+struct cmd_hdr {
+	u8		len;
+	u8		msg_flags;
+	u8		txid;
+};
+
+enum msg_ack_retval {
+	MSG_ACK_RET_OK			= 0,
+	MSG_ACK_RET_BAD_FORMAT		= 1,
+	MSG_ACK_RET_BAD_ARGS		= 2,
+	MSG_ACK_RET_UNKNOW_MSGID	= 3,
+
+	MSG_ACK_RET_WRITE_I2C_FAIL	= 0x10,
+	MSG_ACK_RET_SET_LED_FAIL	= 0x10,
+};
+
+struct cmd_ack_retval {
+	u8		retval;
+};
+
+struct cmd_get_version_ack {
+	u8		api_version;
+	u32		build_date;
+} __attribute__((packed));
+
+struct cmd_write_i2c_regval {
+	u8		address;
+	u8		data[0];
+};
+
+struct cmd_set_led {
+	u8		led;
+	u8		val;
+};
+
+struct cmd_get_led {
+	u8		led;
+};
+
+struct cmd_get_led_ack {
+	u8		val;
+};
+
+struct cmd_set_report {
+	u8		status;
+};
+
+#define BTN_REPORT_BTN_MASK	0xf
+#define BTN_REPORT_F_PRESSED	(1 << 4)
+
+struct cmd_btn_report {
+	u8		btn_state;
+};
+
+#define TOUCH_REPORT_F_PRESSED	(1 << 0)
+
+struct cmd_touch_report {
+	u8		flags;
+	u8		x;
+	u8		y;
+};
+
+#endif /* FBX7HD_TOP_PSOC_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/mfd/msm-cdc-pinctrl.c	2019-01-22 16:16:24.675257020 +0100
@@ -0,0 +1,245 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/mfd/msm-cdc-pinctrl.h>
+
+struct msm_cdc_pinctrl_info {
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *pinctrl_active;
+	struct pinctrl_state *pinctrl_sleep;
+	int gpio;
+	bool state;
+};
+
+static struct msm_cdc_pinctrl_info *msm_cdc_pinctrl_get_gpiodata(
+						struct device_node *np)
+{
+	struct platform_device *pdev;
+	struct msm_cdc_pinctrl_info *gpio_data;
+
+	if (!np) {
+		pr_err("%s: device node is null\n", __func__);
+		return NULL;
+	}
+
+	pdev = of_find_device_by_node(np);
+	if (!pdev) {
+		pr_err("%s: platform device not found!\n", __func__);
+		return NULL;
+	}
+
+	gpio_data = dev_get_drvdata(&pdev->dev);
+	if (!gpio_data)
+		dev_err(&pdev->dev, "%s: cannot find cdc gpio info\n",
+			__func__);
+
+	return gpio_data;
+}
+
+/*
+ * msm_cdc_get_gpio_state: select pinctrl sleep state
+ * @np: pointer to struct device_node
+ *
+ * Returns error code for failure and GPIO value on success
+ */
+int msm_cdc_get_gpio_state(struct device_node *np)
+{
+	struct msm_cdc_pinctrl_info *gpio_data;
+	int value = -EINVAL;
+
+	gpio_data = msm_cdc_pinctrl_get_gpiodata(np);
+	if (!gpio_data)
+		return value;
+
+	if (gpio_is_valid(gpio_data->gpio))
+		value = gpio_get_value_cansleep(gpio_data->gpio);
+
+	return value;
+}
+EXPORT_SYMBOL(msm_cdc_get_gpio_state);
+
+/*
+ * msm_cdc_pinctrl_select_sleep_state: select pinctrl sleep state
+ * @np: pointer to struct device_node
+ *
+ * Returns error code for failure
+ */
+int msm_cdc_pinctrl_select_sleep_state(struct device_node *np)
+{
+	struct msm_cdc_pinctrl_info *gpio_data;
+
+	gpio_data = msm_cdc_pinctrl_get_gpiodata(np);
+	if (!gpio_data)
+		return -EINVAL;
+
+	if (!gpio_data->pinctrl_sleep) {
+		pr_err("%s: pinctrl sleep state is null\n", __func__);
+		return -EINVAL;
+	}
+	gpio_data->state = false;
+
+	return pinctrl_select_state(gpio_data->pinctrl,
+				    gpio_data->pinctrl_sleep);
+}
+EXPORT_SYMBOL(msm_cdc_pinctrl_select_sleep_state);
+
+/*
+ * msm_cdc_pinctrl_select_active_state: select pinctrl active state
+ * @np: pointer to struct device_node
+ *
+ * Returns error code for failure
+ */
+int msm_cdc_pinctrl_select_active_state(struct device_node *np)
+{
+	struct msm_cdc_pinctrl_info *gpio_data;
+
+	gpio_data = msm_cdc_pinctrl_get_gpiodata(np);
+	if (!gpio_data)
+		return -EINVAL;
+
+	if (!gpio_data->pinctrl_active) {
+		pr_err("%s: pinctrl active state is null\n", __func__);
+		return -EINVAL;
+	}
+	gpio_data->state = true;
+
+	return pinctrl_select_state(gpio_data->pinctrl,
+				    gpio_data->pinctrl_active);
+}
+EXPORT_SYMBOL(msm_cdc_pinctrl_select_active_state);
+
+/*
+ * msm_cdc_pinctrl_get_state: get curren pinctrl state
+ * @np: pointer to struct device_node
+ *
+ * Returns 0 for sleep state, 1 for active state
+ */
+bool msm_cdc_pinctrl_get_state(struct device_node *np)
+{
+	struct msm_cdc_pinctrl_info *gpio_data;
+
+	gpio_data = msm_cdc_pinctrl_get_gpiodata(np);
+	if (!gpio_data)
+		return -EINVAL;
+
+	return gpio_data->state;
+}
+EXPORT_SYMBOL(msm_cdc_pinctrl_get_state);
+
+static int msm_cdc_pinctrl_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct msm_cdc_pinctrl_info *gpio_data;
+
+	gpio_data = devm_kzalloc(&pdev->dev,
+				 sizeof(struct msm_cdc_pinctrl_info),
+				 GFP_KERNEL);
+	if (!gpio_data)
+		return -ENOMEM;
+
+	gpio_data->pinctrl = devm_pinctrl_get(&pdev->dev);
+	if (IS_ERR_OR_NULL(gpio_data->pinctrl)) {
+		dev_err(&pdev->dev, "%s: Cannot get cdc gpio pinctrl:%ld\n",
+			__func__, PTR_ERR(gpio_data->pinctrl));
+		ret = PTR_ERR(gpio_data->pinctrl);
+		goto err_pctrl_get;
+	}
+
+	gpio_data->pinctrl_active = pinctrl_lookup_state(
+					gpio_data->pinctrl, "aud_active");
+	if (IS_ERR_OR_NULL(gpio_data->pinctrl_active)) {
+		dev_err(&pdev->dev, "%s: Cannot get aud_active pinctrl state:%ld\n",
+			__func__, PTR_ERR(gpio_data->pinctrl_active));
+		ret = PTR_ERR(gpio_data->pinctrl_active);
+		goto err_lookup_state;
+	}
+
+	gpio_data->pinctrl_sleep = pinctrl_lookup_state(
+					gpio_data->pinctrl, "aud_sleep");
+	if (IS_ERR_OR_NULL(gpio_data->pinctrl_sleep)) {
+		dev_err(&pdev->dev, "%s: Cannot get aud_sleep pinctrl state:%ld\n",
+			__func__, PTR_ERR(gpio_data->pinctrl_sleep));
+		ret = PTR_ERR(gpio_data->pinctrl_sleep);
+		goto err_lookup_state;
+	}
+	/* skip setting to sleep state for LPI_TLMM GPIOs */
+	if (!of_property_read_bool(pdev->dev.of_node, "qcom,lpi-gpios")) {
+		/* Set pinctrl state to aud_sleep by default */
+		ret = pinctrl_select_state(gpio_data->pinctrl,
+					   gpio_data->pinctrl_sleep);
+		if (ret)
+			dev_err(&pdev->dev, "%s: set cdc gpio sleep state fail: %d\n",
+				__func__, ret);
+	}
+
+	gpio_data->gpio = of_get_named_gpio(pdev->dev.of_node,
+					    "qcom,cdc-rst-n-gpio", 0);
+	if (gpio_is_valid(gpio_data->gpio)) {
+		ret = gpio_request(gpio_data->gpio, "MSM_CDC_RESET");
+		if (ret) {
+			dev_err(&pdev->dev, "%s: Failed to request gpio %d\n",
+				__func__, gpio_data->gpio);
+			goto err_lookup_state;
+		}
+	}
+
+	dev_set_drvdata(&pdev->dev, gpio_data);
+	return 0;
+
+err_lookup_state:
+	devm_pinctrl_put(gpio_data->pinctrl);
+err_pctrl_get:
+	devm_kfree(&pdev->dev, gpio_data);
+	return ret;
+}
+
+static int msm_cdc_pinctrl_remove(struct platform_device *pdev)
+{
+	struct msm_cdc_pinctrl_info *gpio_data;
+
+	gpio_data = dev_get_drvdata(&pdev->dev);
+
+	if (gpio_data && gpio_data->pinctrl)
+		devm_pinctrl_put(gpio_data->pinctrl);
+
+	devm_kfree(&pdev->dev, gpio_data);
+
+	return 0;
+}
+
+static const struct of_device_id msm_cdc_pinctrl_match[] = {
+	{.compatible = "qcom,msm-cdc-pinctrl"},
+	{}
+};
+
+static struct platform_driver msm_cdc_pinctrl_driver = {
+	.driver = {
+		.name = "msm-cdc-pinctrl",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_cdc_pinctrl_match,
+	},
+	.probe = msm_cdc_pinctrl_probe,
+	.remove = msm_cdc_pinctrl_remove,
+};
+module_platform_driver(msm_cdc_pinctrl_driver);
+
+MODULE_DESCRIPTION("MSM CODEC pin control platform driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/mfd/msm-cdc-supply.c	2019-01-22 16:16:24.675257020 +0100
@@ -0,0 +1,457 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/mfd/msm-cdc-supply.h>
+#include <linux/regulator/consumer.h>
+
+#define CODEC_DT_MAX_PROP_SIZE 40
+
+static int msm_cdc_dt_parse_vreg_info(struct device *dev,
+				      struct cdc_regulator *cdc_vreg,
+				      const char *name, bool is_ond)
+{
+	char prop_name[CODEC_DT_MAX_PROP_SIZE];
+	struct device_node *regulator_node = NULL;
+	const __be32 *prop;
+	int len, rc;
+	u32 prop_val;
+
+	/* Parse supply name */
+	snprintf(prop_name, CODEC_DT_MAX_PROP_SIZE, "%s-supply", name);
+
+	regulator_node = of_parse_phandle(dev->of_node, prop_name, 0);
+	if (!regulator_node) {
+		dev_err(dev, "%s: Looking up %s property in node %s failed",
+			__func__, prop_name, dev->of_node->full_name);
+		rc = -EINVAL;
+		goto done;
+	}
+	cdc_vreg->name = name;
+	cdc_vreg->ondemand = is_ond;
+
+	/* Parse supply - voltage */
+	snprintf(prop_name, CODEC_DT_MAX_PROP_SIZE, "qcom,%s-voltage", name);
+	prop = of_get_property(dev->of_node, prop_name, &len);
+	if (!prop || (len != (2 * sizeof(__be32)))) {
+		dev_err(dev, "%s: %s %s property\n", __func__,
+			prop ? "invalid format" : "no", prop_name);
+		rc = -EINVAL;
+		goto done;
+	} else {
+		cdc_vreg->min_uV = be32_to_cpup(&prop[0]);
+		cdc_vreg->max_uV = be32_to_cpup(&prop[1]);
+	}
+
+	/* Parse supply - current */
+	snprintf(prop_name, CODEC_DT_MAX_PROP_SIZE, "qcom,%s-current", name);
+	rc = of_property_read_u32(dev->of_node, prop_name, &prop_val);
+	if (rc) {
+		dev_err(dev, "%s: Looking up %s property in node %s failed",
+			__func__, prop_name, dev->of_node->full_name);
+		goto done;
+	}
+	cdc_vreg->optimum_uA = prop_val;
+
+	dev_info(dev, "%s: %s: vol=[%d %d]uV, curr=[%d]uA, ond %d\n",
+		 __func__, cdc_vreg->name, cdc_vreg->min_uV, cdc_vreg->max_uV,
+		 cdc_vreg->optimum_uA, cdc_vreg->ondemand);
+
+done:
+	return rc;
+}
+
+static int msm_cdc_parse_supplies(struct device *dev,
+				  struct cdc_regulator *cdc_reg,
+				  const char *sup_list, int sup_cnt,
+				  bool is_ond)
+{
+	int idx, rc = 0;
+	const char *name = NULL;
+
+	for (idx = 0; idx < sup_cnt; idx++) {
+		rc = of_property_read_string_index(dev->of_node, sup_list, idx,
+						   &name);
+		if (rc) {
+			dev_err(dev, "%s: read string %s[%d] error (%d)\n",
+				__func__, sup_list, idx, rc);
+			goto done;
+		}
+
+		dev_dbg(dev, "%s: Found cdc supply %s as part of %s\n",
+			__func__, name, sup_list);
+
+		rc = msm_cdc_dt_parse_vreg_info(dev, &cdc_reg[idx], name,
+						is_ond);
+		if (rc) {
+			dev_err(dev, "%s: parse %s vreg info failed (%d)\n",
+				__func__, name, rc);
+			goto done;
+		}
+	}
+
+done:
+	return rc;
+}
+
+static int msm_cdc_check_supply_param(struct device *dev,
+				      struct cdc_regulator *cdc_vreg,
+				      int num_supplies)
+{
+	if (!dev) {
+		pr_err("%s: device is NULL\n", __func__);
+		return -ENODEV;
+	}
+
+	if (!cdc_vreg || (num_supplies <= 0)) {
+		dev_err(dev, "%s: supply check failed: vreg: %pK, num_supplies: %d\n",
+			__func__, cdc_vreg, num_supplies);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * msm_cdc_disable_static_supplies:
+ *	Disable codec static supplies
+ *
+ * @dev: pointer to codec device
+ * @supplies: pointer to regulator bulk data
+ * @cdc_vreg: pointer to platform regulator data
+ * @num_supplies: number of supplies
+ *
+ * Return error code if supply disable is failed
+ */
+int msm_cdc_disable_static_supplies(struct device *dev,
+				    struct regulator_bulk_data *supplies,
+				    struct cdc_regulator *cdc_vreg,
+				    int num_supplies)
+{
+	int rc, i;
+
+	if ((!dev) || (!supplies) || (!cdc_vreg)) {
+		pr_err("%s: either dev or supplies or cdc_vreg is NULL\n",
+				__func__);
+		return -EINVAL;
+	}
+	/* input parameter validation */
+	rc = msm_cdc_check_supply_param(dev, cdc_vreg, num_supplies);
+	if (rc)
+		return rc;
+
+	for (i = 0; i < num_supplies; i++) {
+		if (cdc_vreg[i].ondemand)
+			continue;
+
+		rc = regulator_disable(supplies[i].consumer);
+		if (rc)
+			dev_err(dev, "%s: failed to disable supply %s, err:%d\n",
+				__func__, supplies[i].supply, rc);
+		else
+			dev_dbg(dev, "%s: disabled regulator %s\n",
+				__func__, supplies[i].supply);
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(msm_cdc_disable_static_supplies);
+
+/*
+ * msm_cdc_release_supplies:
+ *	Release codec power supplies
+ *
+ * @dev: pointer to codec device
+ * @supplies: pointer to regulator bulk data
+ * @cdc_vreg: pointer to platform regulator data
+ * @num_supplies: number of supplies
+ *
+ * Return error code if supply disable is failed
+ */
+int msm_cdc_release_supplies(struct device *dev,
+			     struct regulator_bulk_data *supplies,
+			     struct cdc_regulator *cdc_vreg,
+			     int num_supplies)
+{
+	int rc = 0;
+	int i;
+
+	if ((!dev) || (!supplies) || (!cdc_vreg)) {
+		pr_err("%s: either dev or supplies or cdc_vreg is NULL\n",
+				__func__);
+		return -EINVAL;
+	}
+	/* input parameter validation */
+	rc = msm_cdc_check_supply_param(dev, cdc_vreg, num_supplies);
+	if (rc)
+		return rc;
+
+	msm_cdc_disable_static_supplies(dev, supplies, cdc_vreg,
+					num_supplies);
+	for (i = 0; i < num_supplies; i++) {
+		if (regulator_count_voltages(supplies[i].consumer) < 0)
+			continue;
+
+		regulator_set_voltage(supplies[i].consumer, 0,
+				      cdc_vreg[i].max_uV);
+		regulator_set_load(supplies[i].consumer, 0);
+		devm_regulator_put(supplies[i].consumer);
+		supplies[i].consumer = NULL;
+	}
+	devm_kfree(dev, supplies);
+
+	return rc;
+}
+EXPORT_SYMBOL(msm_cdc_release_supplies);
+
+/*
+ * msm_cdc_enable_static_supplies:
+ *	Enable codec static supplies
+ *
+ * @dev: pointer to codec device
+ * @supplies: pointer to regulator bulk data
+ * @cdc_vreg: pointer to platform regulator data
+ * @num_supplies: number of supplies
+ *
+ * Return error code if supply enable is failed
+ */
+int msm_cdc_enable_static_supplies(struct device *dev,
+				   struct regulator_bulk_data *supplies,
+				   struct cdc_regulator *cdc_vreg,
+				   int num_supplies)
+{
+	int rc, i;
+
+	if ((!dev) || (!supplies) || (!cdc_vreg)) {
+		pr_err("%s: either dev or supplies or cdc_vreg is NULL\n",
+				__func__);
+		return -EINVAL;
+	}
+	/* input parameter validation */
+	rc = msm_cdc_check_supply_param(dev, cdc_vreg, num_supplies);
+	if (rc)
+		return rc;
+
+	for (i = 0; i < num_supplies; i++) {
+		if (cdc_vreg[i].ondemand)
+			continue;
+
+		rc = regulator_enable(supplies[i].consumer);
+		if (rc) {
+			dev_err(dev, "%s: failed to enable supply %s, rc: %d\n",
+				__func__, supplies[i].supply, rc);
+			break;
+		}
+	}
+
+	while (rc && i--)
+		if (!cdc_vreg[i].ondemand)
+			regulator_disable(supplies[i].consumer);
+
+	return rc;
+}
+EXPORT_SYMBOL(msm_cdc_enable_static_supplies);
+
+/*
+ * msm_cdc_init_supplies:
+ *	Initialize codec static supplies with regulator get
+ *
+ * @dev: pointer to codec device
+ * @supplies: pointer to regulator bulk data
+ * @cdc_vreg: pointer to platform regulator data
+ * @num_supplies: number of supplies
+ *
+ * Return error code if supply init is failed
+ */
+int msm_cdc_init_supplies(struct device *dev,
+			  struct regulator_bulk_data **supplies,
+			  struct cdc_regulator *cdc_vreg,
+			  int num_supplies)
+{
+	struct regulator_bulk_data *vsup;
+	int rc;
+	int i;
+
+	if (!dev || !cdc_vreg) {
+		pr_err("%s: device pointer or dce_vreg is NULL\n",
+				__func__);
+		return -EINVAL;
+	}
+	/* input parameter validation */
+	rc = msm_cdc_check_supply_param(dev, cdc_vreg, num_supplies);
+	if (rc)
+		return rc;
+
+	vsup = devm_kcalloc(dev, num_supplies,
+			    sizeof(struct regulator_bulk_data),
+			    GFP_KERNEL);
+	if (!vsup)
+		return -ENOMEM;
+
+	for (i = 0; i < num_supplies; i++) {
+		if (!cdc_vreg[i].name) {
+			dev_err(dev, "%s: supply name not defined\n",
+				__func__);
+			rc = -EINVAL;
+			goto err_supply;
+		}
+		vsup[i].supply = cdc_vreg[i].name;
+	}
+
+	rc = devm_regulator_bulk_get(dev, num_supplies, vsup);
+	if (rc) {
+		dev_err(dev, "%s: failed to get supplies (%d)\n",
+			__func__, rc);
+		goto err_supply;
+	}
+
+	/* Set voltage and current on regulators */
+	for (i = 0; i < num_supplies; i++) {
+		if (regulator_count_voltages(vsup[i].consumer) < 0)
+			continue;
+
+		rc = regulator_set_voltage(vsup[i].consumer,
+					   cdc_vreg[i].min_uV,
+					   cdc_vreg[i].max_uV);
+		if (rc) {
+			dev_err(dev, "%s: set regulator voltage failed for %s, err:%d\n",
+				__func__, vsup[i].supply, rc);
+			goto err_set_supply;
+		}
+		rc = regulator_set_load(vsup[i].consumer,
+					cdc_vreg[i].optimum_uA);
+		if (rc < 0) {
+			dev_err(dev, "%s: set regulator optimum mode failed for %s, err:%d\n",
+				__func__, vsup[i].supply, rc);
+			goto err_set_supply;
+		}
+	}
+
+	*supplies = vsup;
+
+	return 0;
+
+err_set_supply:
+	for (i = 0; i < num_supplies; i++)
+		devm_regulator_put(vsup[i].consumer);
+err_supply:
+	devm_kfree(dev, vsup);
+	return rc;
+}
+EXPORT_SYMBOL(msm_cdc_init_supplies);
+
+/*
+ * msm_cdc_get_power_supplies:
+ *	Get codec power supplies from device tree.
+ *	Allocate memory to hold regulator data for
+ *	all power supplies.
+ *
+ * @dev: pointer to codec device
+ * @cdc_vreg: pointer to codec regulator
+ * @total_num_supplies: total number of supplies read from DT
+ *
+ * Return error code if supply disable is failed
+ */
+int msm_cdc_get_power_supplies(struct device *dev,
+			       struct cdc_regulator **cdc_vreg,
+			       int *total_num_supplies)
+{
+	const char *static_prop_name = "qcom,cdc-static-supplies";
+	const char *ond_prop_name = "qcom,cdc-on-demand-supplies";
+	const char *cp_prop_name = "qcom,cdc-cp-supplies";
+	int static_sup_cnt = 0;
+	int ond_sup_cnt = 0;
+	int cp_sup_cnt = 0;
+	int num_supplies = 0;
+	struct cdc_regulator *cdc_reg;
+	int rc;
+
+	if (!dev) {
+		pr_err("%s: device pointer is NULL\n", __func__);
+		return -EINVAL;
+	}
+	static_sup_cnt = of_property_count_strings(dev->of_node,
+						   static_prop_name);
+	if (IS_ERR_VALUE(static_sup_cnt)) {
+		dev_err(dev, "%s: Failed to get static supplies(%d)\n",
+			__func__, static_sup_cnt);
+		rc = static_sup_cnt;
+		goto err_supply_cnt;
+	}
+	ond_sup_cnt = of_property_count_strings(dev->of_node, ond_prop_name);
+	if (IS_ERR_VALUE(ond_sup_cnt))
+		ond_sup_cnt = 0;
+
+	cp_sup_cnt = of_property_count_strings(dev->of_node,
+					       cp_prop_name);
+	if (IS_ERR_VALUE(cp_sup_cnt))
+		cp_sup_cnt = 0;
+
+	num_supplies = static_sup_cnt + ond_sup_cnt + cp_sup_cnt;
+	if (num_supplies <= 0) {
+		dev_err(dev, "%s: supply count is 0 or negative\n", __func__);
+		rc = -EINVAL;
+		goto err_supply_cnt;
+	}
+
+	cdc_reg = devm_kcalloc(dev, num_supplies,
+			       sizeof(struct cdc_regulator),
+			       GFP_KERNEL);
+	if (!cdc_reg) {
+		rc = -ENOMEM;
+		goto err_mem_alloc;
+	}
+
+	rc = msm_cdc_parse_supplies(dev, cdc_reg, static_prop_name,
+				    static_sup_cnt, false);
+	if (rc) {
+		dev_err(dev, "%s: failed to parse static supplies(%d)\n",
+				__func__, rc);
+		goto err_sup;
+	}
+
+	rc = msm_cdc_parse_supplies(dev, &cdc_reg[static_sup_cnt],
+				    ond_prop_name, ond_sup_cnt,
+				    true);
+	if (rc) {
+		dev_err(dev, "%s: failed to parse demand supplies(%d)\n",
+				__func__, rc);
+		goto err_sup;
+	}
+
+	rc = msm_cdc_parse_supplies(dev,
+				    &cdc_reg[static_sup_cnt + ond_sup_cnt],
+				    cp_prop_name, cp_sup_cnt, true);
+	if (rc) {
+		dev_err(dev, "%s: failed to parse cp supplies(%d)\n",
+				__func__, rc);
+		goto err_sup;
+	}
+
+	*cdc_vreg = cdc_reg;
+	*total_num_supplies = num_supplies;
+
+	return 0;
+
+err_sup:
+	devm_kfree(dev, cdc_reg);
+err_supply_cnt:
+err_mem_alloc:
+	return rc;
+}
+EXPORT_SYMBOL(msm_cdc_get_power_supplies);
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/mfd/qcom-i2c-pmic.c	2019-01-22 16:16:24.679257056 +0100
@@ -0,0 +1,681 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "I2C PMIC: %s: " fmt, __func__
+
+#include <linux/bitops.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define I2C_INTR_STATUS_BASE	0x0550
+#define INT_RT_STS_OFFSET	0x10
+#define INT_SET_TYPE_OFFSET	0x11
+#define INT_POL_HIGH_OFFSET	0x12
+#define INT_POL_LOW_OFFSET	0x13
+#define INT_LATCHED_CLR_OFFSET	0x14
+#define INT_EN_SET_OFFSET	0x15
+#define INT_EN_CLR_OFFSET	0x16
+#define INT_LATCHED_STS_OFFSET	0x18
+#define INT_PENDING_STS_OFFSET	0x19
+#define INT_MID_SEL_OFFSET	0x1A
+#define INT_MID_SEL_MASK	GENMASK(1, 0)
+#define INT_PRIORITY_OFFSET	0x1B
+#define INT_PRIORITY_BIT	BIT(0)
+
+enum {
+	IRQ_SET_TYPE = 0,
+	IRQ_POL_HIGH,
+	IRQ_POL_LOW,
+	IRQ_LATCHED_CLR, /* not needed but makes life easy */
+	IRQ_EN_SET,
+	IRQ_MAX_REGS,
+};
+
+struct i2c_pmic_periph {
+	void		*data;
+	u16		addr;
+	u8		cached[IRQ_MAX_REGS];
+	u8		synced[IRQ_MAX_REGS];
+	u8		wake;
+	struct mutex	lock;
+};
+
+struct i2c_pmic {
+	struct device		*dev;
+	struct regmap		*regmap;
+	struct irq_domain	*domain;
+	struct i2c_pmic_periph	*periph;
+	struct pinctrl		*pinctrl;
+	const char		*pinctrl_name;
+	int			num_periphs;
+};
+
+static void i2c_pmic_irq_bus_lock(struct irq_data *d)
+{
+	struct i2c_pmic_periph *periph = irq_data_get_irq_chip_data(d);
+
+	mutex_lock(&periph->lock);
+}
+
+static void i2c_pmic_sync_type_polarity(struct i2c_pmic *chip,
+			       struct i2c_pmic_periph *periph)
+{
+	int rc;
+
+	/* did any irq type change? */
+	if (periph->cached[IRQ_SET_TYPE] ^ periph->synced[IRQ_SET_TYPE]) {
+		rc = regmap_write(chip->regmap,
+				  periph->addr | INT_SET_TYPE_OFFSET,
+				  periph->cached[IRQ_SET_TYPE]);
+		if (rc < 0) {
+			pr_err("Couldn't set periph 0x%04x irqs 0x%02x type rc=%d\n",
+				periph->addr, periph->cached[IRQ_SET_TYPE], rc);
+			return;
+		}
+
+		periph->synced[IRQ_SET_TYPE] = periph->cached[IRQ_SET_TYPE];
+	}
+
+	/* did any polarity high change? */
+	if (periph->cached[IRQ_POL_HIGH] ^ periph->synced[IRQ_POL_HIGH]) {
+		rc = regmap_write(chip->regmap,
+				  periph->addr | INT_POL_HIGH_OFFSET,
+				  periph->cached[IRQ_POL_HIGH]);
+		if (rc < 0) {
+			pr_err("Couldn't set periph 0x%04x irqs 0x%02x polarity high rc=%d\n",
+				periph->addr, periph->cached[IRQ_POL_HIGH], rc);
+			return;
+		}
+
+		periph->synced[IRQ_POL_HIGH] = periph->cached[IRQ_POL_HIGH];
+	}
+
+	/* did any polarity low change? */
+	if (periph->cached[IRQ_POL_LOW] ^ periph->synced[IRQ_POL_LOW]) {
+		rc = regmap_write(chip->regmap,
+				  periph->addr | INT_POL_LOW_OFFSET,
+				  periph->cached[IRQ_POL_LOW]);
+		if (rc < 0) {
+			pr_err("Couldn't set periph 0x%04x irqs 0x%02x polarity low rc=%d\n",
+				periph->addr, periph->cached[IRQ_POL_LOW], rc);
+			return;
+		}
+
+		periph->synced[IRQ_POL_LOW] = periph->cached[IRQ_POL_LOW];
+	}
+}
+
+static void i2c_pmic_sync_enable(struct i2c_pmic *chip,
+				 struct i2c_pmic_periph *periph)
+{
+	u8 en_set, en_clr;
+	int rc;
+
+	/* determine which irqs were enabled and which were disabled */
+	en_clr = periph->synced[IRQ_EN_SET] & ~periph->cached[IRQ_EN_SET];
+	en_set = ~periph->synced[IRQ_EN_SET] & periph->cached[IRQ_EN_SET];
+
+	/* were any irqs disabled? */
+	if (en_clr) {
+		rc = regmap_write(chip->regmap,
+				  periph->addr | INT_EN_CLR_OFFSET, en_clr);
+		if (rc < 0) {
+			pr_err("Couldn't disable periph 0x%04x irqs 0x%02x rc=%d\n",
+				periph->addr, en_clr, rc);
+			return;
+		}
+	}
+
+	/* were any irqs enabled? */
+	if (en_set) {
+		rc = regmap_write(chip->regmap,
+				  periph->addr | INT_EN_SET_OFFSET, en_set);
+		if (rc < 0) {
+			pr_err("Couldn't enable periph 0x%04x irqs 0x%02x rc=%d\n",
+				periph->addr, en_set, rc);
+			return;
+		}
+	}
+
+	/* irq enabled status was written to hardware */
+	periph->synced[IRQ_EN_SET] = periph->cached[IRQ_EN_SET];
+}
+
+static void i2c_pmic_irq_bus_sync_unlock(struct irq_data *d)
+{
+	struct i2c_pmic_periph *periph = irq_data_get_irq_chip_data(d);
+	struct i2c_pmic *chip = periph->data;
+
+	i2c_pmic_sync_type_polarity(chip, periph);
+	i2c_pmic_sync_enable(chip, periph);
+	mutex_unlock(&periph->lock);
+}
+
+static void i2c_pmic_irq_disable(struct irq_data *d)
+{
+	struct i2c_pmic_periph *periph = irq_data_get_irq_chip_data(d);
+
+	periph->cached[IRQ_EN_SET] &= ~d->hwirq & 0xFF;
+}
+
+static void i2c_pmic_irq_enable(struct irq_data *d)
+{
+	struct i2c_pmic_periph *periph = irq_data_get_irq_chip_data(d);
+
+	periph->cached[IRQ_EN_SET] |= d->hwirq & 0xFF;
+}
+
+static int i2c_pmic_irq_set_type(struct irq_data *d, unsigned int irq_type)
+{
+	struct i2c_pmic_periph *periph = irq_data_get_irq_chip_data(d);
+
+	switch (irq_type) {
+	case IRQ_TYPE_EDGE_RISING:
+		periph->cached[IRQ_SET_TYPE] |= d->hwirq & 0xFF;
+		periph->cached[IRQ_POL_HIGH] |= d->hwirq & 0xFF;
+		periph->cached[IRQ_POL_LOW] &= ~d->hwirq & 0xFF;
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		periph->cached[IRQ_SET_TYPE] |= d->hwirq & 0xFF;
+		periph->cached[IRQ_POL_HIGH] &= ~d->hwirq & 0xFF;
+		periph->cached[IRQ_POL_LOW] |= d->hwirq & 0xFF;
+		break;
+	case IRQ_TYPE_EDGE_BOTH:
+		periph->cached[IRQ_SET_TYPE] |= d->hwirq & 0xFF;
+		periph->cached[IRQ_POL_HIGH] |= d->hwirq & 0xFF;
+		periph->cached[IRQ_POL_LOW] |= d->hwirq & 0xFF;
+		break;
+	case IRQ_TYPE_LEVEL_HIGH:
+		periph->cached[IRQ_SET_TYPE] &= ~d->hwirq & 0xFF;
+		periph->cached[IRQ_POL_HIGH] |= d->hwirq & 0xFF;
+		periph->cached[IRQ_POL_LOW] &= ~d->hwirq & 0xFF;
+		break;
+	case IRQ_TYPE_LEVEL_LOW:
+		periph->cached[IRQ_SET_TYPE] &= ~d->hwirq & 0xFF;
+		periph->cached[IRQ_POL_HIGH] &= ~d->hwirq & 0xFF;
+		periph->cached[IRQ_POL_LOW] |= d->hwirq & 0xFF;
+		break;
+	default:
+		pr_err("irq type 0x%04x is not supported\n", irq_type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int i2c_pmic_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+	struct i2c_pmic_periph *periph = irq_data_get_irq_chip_data(d);
+
+	if (on)
+		periph->wake |= d->hwirq & 0xFF;
+	else
+		periph->wake &= ~d->hwirq & 0xFF;
+
+	return 0;
+}
+#else
+#define i2c_pmic_irq_set_wake NULL
+#endif
+
+static struct irq_chip i2c_pmic_irq_chip = {
+	.name			= "i2c_pmic_irq_chip",
+	.irq_bus_lock		= i2c_pmic_irq_bus_lock,
+	.irq_bus_sync_unlock	= i2c_pmic_irq_bus_sync_unlock,
+	.irq_disable		= i2c_pmic_irq_disable,
+	.irq_enable		= i2c_pmic_irq_enable,
+	.irq_set_type		= i2c_pmic_irq_set_type,
+	.irq_set_wake		= i2c_pmic_irq_set_wake,
+};
+
+static struct i2c_pmic_periph *i2c_pmic_find_periph(struct i2c_pmic *chip,
+						    irq_hw_number_t hwirq)
+{
+	int i;
+
+	for (i = 0; i < chip->num_periphs; i++)
+		if (chip->periph[i].addr == (hwirq & 0xFF00))
+			return &chip->periph[i];
+
+	pr_err_ratelimited("Couldn't find periph struct for hwirq 0x%04lx\n",
+			   hwirq);
+	return NULL;
+}
+
+static int i2c_pmic_domain_map(struct irq_domain *d, unsigned int virq,
+			irq_hw_number_t hwirq)
+{
+	struct i2c_pmic *chip = d->host_data;
+	struct i2c_pmic_periph *periph = i2c_pmic_find_periph(chip, hwirq);
+
+	if (!periph)
+		return -ENODEV;
+
+	irq_set_chip_data(virq, periph);
+	irq_set_chip_and_handler(virq, &i2c_pmic_irq_chip, handle_level_irq);
+	irq_set_nested_thread(virq, 1);
+	irq_set_noprobe(virq);
+	return 0;
+}
+
+static int i2c_pmic_domain_xlate(struct irq_domain *d,
+				 struct device_node *ctrlr, const u32 *intspec,
+				 unsigned int intsize, unsigned long *out_hwirq,
+				 unsigned int *out_type)
+{
+	if (intsize != 3)
+		return -EINVAL;
+
+	if (intspec[0] > 0xFF || intspec[1] > 0x7 ||
+					intspec[2] > IRQ_TYPE_SENSE_MASK)
+		return -EINVAL;
+
+	/*
+	 * Interrupt specifiers are triplets
+	 * <peripheral-address, irq-number, IRQ_TYPE_*>
+	 *
+	 * peripheral-address - The base address of the peripheral
+	 * irq-number	      - The zero based bit position of the peripheral's
+	 *			interrupt registers corresponding to the irq
+	 *			where the LSB is 0 and the MSB is 7
+	 * IRQ_TYPE_*	      - Please refer to linux/irq.h
+	 */
+	*out_hwirq = intspec[0] << 8 | BIT(intspec[1]);
+	*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
+
+	return 0;
+}
+
+static const struct irq_domain_ops i2c_pmic_domain_ops = {
+	.map	= i2c_pmic_domain_map,
+	.xlate	= i2c_pmic_domain_xlate,
+};
+
+static void i2c_pmic_irq_ack_now(struct i2c_pmic *chip, u16 hwirq)
+{
+	int rc;
+
+	rc = regmap_write(chip->regmap,
+			  (hwirq & 0xFF00) | INT_LATCHED_CLR_OFFSET,
+			  hwirq & 0xFF);
+	if (rc < 0)
+		pr_err_ratelimited("Couldn't ack 0x%04x rc=%d\n", hwirq, rc);
+}
+
+static void i2c_pmic_irq_disable_now(struct i2c_pmic *chip, u16 hwirq)
+{
+	struct i2c_pmic_periph *periph = i2c_pmic_find_periph(chip, hwirq);
+	int rc;
+
+	if (!periph)
+		return;
+
+	mutex_lock(&periph->lock);
+	periph->cached[IRQ_EN_SET] &= ~hwirq & 0xFF;
+
+	rc = regmap_write(chip->regmap,
+			  (hwirq & 0xFF00) | INT_EN_CLR_OFFSET,
+			  hwirq & 0xFF);
+	if (rc < 0) {
+		pr_err_ratelimited("Couldn't disable irq 0x%04x rc=%d\n",
+				   hwirq, rc);
+		goto unlock;
+	}
+
+	periph->synced[IRQ_EN_SET] = periph->cached[IRQ_EN_SET];
+
+unlock:
+	mutex_unlock(&periph->lock);
+}
+
+static void i2c_pmic_periph_status_handler(struct i2c_pmic *chip,
+					   u16 periph_address, u8 periph_status)
+{
+	unsigned int hwirq, virq;
+	int i;
+
+	while (periph_status) {
+		i = ffs(periph_status) - 1;
+		periph_status &= ~BIT(i);
+		hwirq = periph_address | BIT(i);
+		virq = irq_find_mapping(chip->domain, hwirq);
+		if (virq == 0) {
+			pr_err_ratelimited("Couldn't find mapping; disabling 0x%04x\n",
+					   hwirq);
+			i2c_pmic_irq_disable_now(chip, hwirq);
+			continue;
+		}
+
+		handle_nested_irq(virq);
+		i2c_pmic_irq_ack_now(chip, hwirq);
+	}
+}
+
+static void i2c_pmic_summary_status_handler(struct i2c_pmic *chip,
+					    struct i2c_pmic_periph *periph,
+					    u8 summary_status)
+{
+	unsigned int periph_status;
+	int rc, i;
+
+	while (summary_status) {
+		i = ffs(summary_status) - 1;
+		summary_status &= ~BIT(i);
+
+		rc = regmap_read(chip->regmap,
+				 periph[i].addr | INT_LATCHED_STS_OFFSET,
+				 &periph_status);
+		if (rc < 0) {
+			pr_err_ratelimited("Couldn't read 0x%04x | INT_LATCHED_STS rc=%d\n",
+					   periph[i].addr, rc);
+			continue;
+		}
+
+		i2c_pmic_periph_status_handler(chip, periph[i].addr,
+					       periph_status);
+	}
+}
+
+static irqreturn_t i2c_pmic_irq_handler(int irq, void *dev_id)
+{
+	struct i2c_pmic *chip = dev_id;
+	struct i2c_pmic_periph *periph;
+	unsigned int summary_status;
+	int rc, i;
+
+	for (i = 0; i < DIV_ROUND_UP(chip->num_periphs, BITS_PER_BYTE); i++) {
+		rc = regmap_read(chip->regmap, I2C_INTR_STATUS_BASE + i,
+				&summary_status);
+		if (rc < 0) {
+			pr_err_ratelimited("Couldn't read I2C_INTR_STATUS%d rc=%d\n",
+					   i, rc);
+			continue;
+		}
+
+		if (summary_status == 0)
+			continue;
+
+		periph = &chip->periph[i * 8];
+		i2c_pmic_summary_status_handler(chip, periph, summary_status);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int i2c_pmic_parse_dt(struct i2c_pmic *chip)
+{
+	struct device_node *node = chip->dev->of_node;
+	int rc, i;
+	u32 temp;
+
+	if (!node) {
+		pr_err("missing device tree\n");
+		return -EINVAL;
+	}
+
+	chip->num_periphs = of_property_count_u32_elems(node,
+							"qcom,periph-map");
+	if (chip->num_periphs < 0) {
+		pr_err("missing qcom,periph-map property rc=%d\n",
+			chip->num_periphs);
+		return chip->num_periphs;
+	}
+
+	if (chip->num_periphs == 0) {
+		pr_err("qcom,periph-map must contain at least one address\n");
+		return -EINVAL;
+	}
+
+	chip->periph = devm_kcalloc(chip->dev, chip->num_periphs,
+				     sizeof(*chip->periph), GFP_KERNEL);
+	if (!chip->periph)
+		return -ENOMEM;
+
+	for (i = 0; i < chip->num_periphs; i++) {
+		rc = of_property_read_u32_index(node, "qcom,periph-map",
+						i, &temp);
+		if (rc < 0) {
+			pr_err("Couldn't read qcom,periph-map[%d] rc=%d\n",
+			       i, rc);
+			return rc;
+		}
+
+		chip->periph[i].addr = (u16)(temp << 8);
+		chip->periph[i].data = chip;
+		mutex_init(&chip->periph[i].lock);
+	}
+
+	of_property_read_string(node, "pinctrl-names", &chip->pinctrl_name);
+
+	return rc;
+}
+
+#define MAX_I2C_RETRIES	3
+static int i2c_pmic_read(struct regmap *map, unsigned int reg, void *val,
+			size_t val_count)
+{
+	int rc, retries = 0;
+
+	do {
+		rc = regmap_bulk_read(map, reg, val, val_count);
+	} while (rc == -ENOTCONN && retries++ < MAX_I2C_RETRIES);
+
+	if (retries > 1)
+		pr_err("i2c_pmic_read failed for %d retries, rc = %d\n",
+			retries - 1, rc);
+
+	return rc;
+}
+
+static int i2c_pmic_determine_initial_status(struct i2c_pmic *chip)
+{
+	int rc, i;
+
+	for (i = 0; i < chip->num_periphs; i++) {
+		rc = i2c_pmic_read(chip->regmap,
+				chip->periph[i].addr | INT_SET_TYPE_OFFSET,
+				chip->periph[i].cached, IRQ_MAX_REGS);
+		if (rc < 0) {
+			pr_err("Couldn't read irq data rc=%d\n", rc);
+			return rc;
+		}
+
+		memcpy(chip->periph[i].synced, chip->periph[i].cached,
+		       IRQ_MAX_REGS * sizeof(*chip->periph[i].synced));
+	}
+
+	return 0;
+}
+
+static struct regmap_config i2c_pmic_regmap_config = {
+	.reg_bits	= 16,
+	.val_bits	= 8,
+	.max_register	= 0xFFFF,
+};
+
+static int i2c_pmic_probe(struct i2c_client *client,
+			  const struct i2c_device_id *id)
+{
+	struct i2c_pmic *chip;
+	int rc = 0;
+
+	chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->dev = &client->dev;
+	chip->regmap = devm_regmap_init_i2c(client, &i2c_pmic_regmap_config);
+	if (!chip->regmap)
+		return -ENODEV;
+
+	i2c_set_clientdata(client, chip);
+	if (!of_property_read_bool(chip->dev->of_node, "interrupt-controller"))
+		goto probe_children;
+
+	chip->domain = irq_domain_add_tree(client->dev.of_node,
+					   &i2c_pmic_domain_ops, chip);
+	if (!chip->domain) {
+		rc = -ENOMEM;
+		goto cleanup;
+	}
+
+	rc = i2c_pmic_parse_dt(chip);
+	if (rc < 0) {
+		pr_err("Couldn't parse device tree rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = i2c_pmic_determine_initial_status(chip);
+	if (rc < 0) {
+		pr_err("Couldn't determine initial status rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	if (chip->pinctrl_name) {
+		chip->pinctrl = devm_pinctrl_get_select(chip->dev,
+							chip->pinctrl_name);
+		if (IS_ERR(chip->pinctrl)) {
+			pr_err("Couldn't select %s pinctrl rc=%ld\n",
+				chip->pinctrl_name, PTR_ERR(chip->pinctrl));
+			rc = PTR_ERR(chip->pinctrl);
+			goto cleanup;
+		}
+	}
+
+	rc = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+				       i2c_pmic_irq_handler,
+				       IRQF_ONESHOT | IRQF_SHARED,
+				       "i2c_pmic_stat_irq", chip);
+	if (rc < 0) {
+		pr_err("Couldn't request irq %d rc=%d\n", client->irq, rc);
+		goto cleanup;
+	}
+
+	enable_irq_wake(client->irq);
+
+probe_children:
+	of_platform_populate(chip->dev->of_node, NULL, NULL, chip->dev);
+	pr_info("I2C PMIC probe successful\n");
+	return rc;
+
+cleanup:
+	if (chip->domain)
+		irq_domain_remove(chip->domain);
+	i2c_set_clientdata(client, NULL);
+	return rc;
+}
+
+static int i2c_pmic_remove(struct i2c_client *client)
+{
+	struct i2c_pmic *chip = i2c_get_clientdata(client);
+
+	of_platform_depopulate(chip->dev);
+	if (chip->domain)
+		irq_domain_remove(chip->domain);
+	i2c_set_clientdata(client, NULL);
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int i2c_pmic_suspend(struct device *dev)
+{
+	struct i2c_pmic *chip = dev_get_drvdata(dev);
+	struct i2c_pmic_periph *periph;
+	int rc = 0, i;
+
+	for (i = 0; i < chip->num_periphs; i++) {
+		periph = &chip->periph[i];
+
+		rc = regmap_write(chip->regmap,
+				  periph->addr | INT_EN_CLR_OFFSET, 0xFF);
+		if (rc < 0) {
+			pr_err_ratelimited("Couldn't clear 0x%04x irqs rc=%d\n",
+				periph->addr, rc);
+			continue;
+		}
+
+		rc = regmap_write(chip->regmap,
+				  periph->addr | INT_EN_SET_OFFSET,
+				  periph->wake);
+		if (rc < 0)
+			pr_err_ratelimited("Couldn't enable 0x%04x wake irqs 0x%02x rc=%d\n",
+			       periph->addr, periph->wake, rc);
+	}
+
+	return rc;
+}
+
+static int i2c_pmic_resume(struct device *dev)
+{
+	struct i2c_pmic *chip = dev_get_drvdata(dev);
+	struct i2c_pmic_periph *periph;
+	int rc = 0, i;
+
+	for (i = 0; i < chip->num_periphs; i++) {
+		periph = &chip->periph[i];
+
+		rc = regmap_write(chip->regmap,
+				  periph->addr | INT_EN_CLR_OFFSET, 0xFF);
+		if (rc < 0) {
+			pr_err("Couldn't clear 0x%04x irqs rc=%d\n",
+				periph->addr, rc);
+			continue;
+		}
+
+		rc = regmap_write(chip->regmap,
+				  periph->addr | INT_EN_SET_OFFSET,
+				  periph->synced[IRQ_EN_SET]);
+		if (rc < 0)
+			pr_err("Couldn't restore 0x%04x synced irqs 0x%02x rc=%d\n",
+			       periph->addr, periph->synced[IRQ_EN_SET], rc);
+	}
+
+	return rc;
+}
+#endif
+static SIMPLE_DEV_PM_OPS(i2c_pmic_pm_ops, i2c_pmic_suspend, i2c_pmic_resume);
+
+static const struct of_device_id i2c_pmic_match_table[] = {
+	{ .compatible = "qcom,i2c-pmic", },
+	{ },
+};
+
+static const struct i2c_device_id i2c_pmic_id[] = {
+	{ "i2c-pmic", 0 },
+	{ },
+};
+MODULE_DEVICE_TABLE(i2c, i2c_pmic_id);
+
+static struct i2c_driver i2c_pmic_driver = {
+	.driver		= {
+		.name		= "i2c_pmic",
+		.owner		= THIS_MODULE,
+		.pm		= &i2c_pmic_pm_ops,
+		.of_match_table	= i2c_pmic_match_table,
+	},
+	.probe		= i2c_pmic_probe,
+	.remove		= i2c_pmic_remove,
+	.id_table	= i2c_pmic_id,
+};
+
+module_i2c_driver(i2c_pmic_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("i2c:i2c_pmic");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/mfd/wcd9335-regmap.c	2019-01-22 16:16:24.687257128 +0100
@@ -0,0 +1,1612 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mfd/wcd9xxx/core.h>
+#include <linux/mfd/wcd9335/registers.h>
+#include <linux/regmap.h>
+#include <linux/device.h>
+#include "wcd9xxx-regmap.h"
+
+static const struct reg_sequence wcd9335_1_x_defaults[] = {
+	{ WCD9335_CODEC_RPM_CLK_GATE                    , 0x03 , 0x00 },
+	{ WCD9335_CODEC_RPM_PWR_CPE_DRAM1_SHUTDOWN      , 0x1f , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE0          , 0x00 , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_EFUSE_CTL              , 0x00 , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_RX0_INP_CFG         , 0x00 , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_RX1_INP_CFG         , 0x00 , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_RX2_INP_CFG         , 0x00 , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_RX3_INP_CFG         , 0x00 , 0x00 },
+	{ WCD9335_CPE_SS_CPARMAD_BUFRDY_INT_PERIOD      , 0x14 , 0x00 },
+	{ WCD9335_CPE_SS_SS_ERROR_INT_MASK              , 0x3f , 0x00 },
+	{ WCD9335_SOC_MAD_AUDIO_IIR_CTL_VAL             , 0x00 , 0x00 },
+	{ WCD9335_BIAS_VBG_FINE_ADJ                     , 0x55 , 0x00 },
+	{ WCD9335_SIDO_SIDO_CCL_2                       , 0x6c , 0x00 },
+	{ WCD9335_SIDO_SIDO_CCL_3                       , 0x2d , 0x00 },
+	{ WCD9335_SIDO_SIDO_CCL_8                       , 0x6c , 0x00 },
+	{ WCD9335_SIDO_SIDO_CCL_10                      , 0x6c , 0x00 },
+	{ WCD9335_SIDO_SIDO_DRIVER_2                    , 0x77 , 0x00 },
+	{ WCD9335_SIDO_SIDO_DRIVER_3                    , 0x77 , 0x00 },
+	{ WCD9335_SIDO_SIDO_TEST_2                      , 0x00 , 0x00 },
+	{ WCD9335_MBHC_ZDET_ANA_CTL                     , 0x00 , 0x00 },
+	{ WCD9335_MBHC_FSM_DEBUG                        , 0xc0 , 0x00 },
+	{ WCD9335_TX_1_2_ATEST_REFCTL                   , 0x08 , 0x00 },
+	{ WCD9335_TX_3_4_ATEST_REFCTL                   , 0x08 , 0x00 },
+	{ WCD9335_TX_5_6_ATEST_REFCTL                   , 0x08 , 0x00 },
+	{ WCD9335_FLYBACK_VNEG_CTRL_1                   , 0x67 , 0x00 },
+	{ WCD9335_FLYBACK_VNEG_CTRL_4                   , 0x5f , 0x00 },
+	{ WCD9335_FLYBACK_VNEG_CTRL_9                   , 0x50 , 0x00 },
+	{ WCD9335_FLYBACK_VNEG_DAC_CTRL_1               , 0x65 , 0x00 },
+	{ WCD9335_FLYBACK_VNEG_DAC_CTRL_4               , 0x40 , 0x00 },
+	{ WCD9335_RX_BIAS_HPH_PA                        , 0xaa , 0x00 },
+	{ WCD9335_RX_BIAS_HPH_LOWPOWER                  , 0x62 , 0x00 },
+	{ WCD9335_HPH_PA_CTL2                           , 0x40 , 0x00 },
+	{ WCD9335_HPH_L_EN                              , 0x00 , 0x00 },
+	{ WCD9335_HPH_R_EN                              , 0x00 , 0x00 },
+	{ WCD9335_HPH_R_ATEST                           , 0x50 , 0x00 },
+	{ WCD9335_HPH_RDAC_LDO_CTL                      , 0x00 , 0x00 },
+	{ WCD9335_CDC_TX0_TX_PATH_CFG0                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_TX0_TX_PATH_CFG1                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_TX0_TX_PATH_SEC2                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_TX0_TX_PATH_SEC3                  , 0x0c , 0x00 },
+	{ WCD9335_CDC_TX1_TX_PATH_CFG0                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_TX1_TX_PATH_CFG1                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_TX1_TX_PATH_SEC2                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_TX1_TX_PATH_SEC3                  , 0x0c , 0x00 },
+	{ WCD9335_CDC_TX2_TX_PATH_CFG0                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_TX3_TX_PATH_CFG0                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_TX4_TX_PATH_CFG0                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_TX5_TX_PATH_CFG0                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_TX6_TX_PATH_CFG0                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_TX7_TX_PATH_CFG0                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_TX8_TX_PATH_CFG0                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_TX2_TX_PATH_CFG1                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_TX3_TX_PATH_CFG1                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_TX4_TX_PATH_CFG1                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_TX5_TX_PATH_CFG1                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_TX6_TX_PATH_CFG1                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_TX7_TX_PATH_CFG1                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_TX8_TX_PATH_CFG1                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_TX2_TX_PATH_SEC2                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_TX3_TX_PATH_SEC2                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_TX4_TX_PATH_SEC2                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_TX5_TX_PATH_SEC2                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_TX6_TX_PATH_SEC2                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_TX7_TX_PATH_SEC2                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_TX8_TX_PATH_SEC2                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_TX2_TX_PATH_SEC3                  , 0x0c , 0x00 },
+	{ WCD9335_CDC_TX3_TX_PATH_SEC3                  , 0x0c , 0x00 },
+	{ WCD9335_CDC_TX4_TX_PATH_SEC3                  , 0x0c , 0x00 },
+	{ WCD9335_CDC_TX5_TX_PATH_SEC3                  , 0x0c , 0x00 },
+	{ WCD9335_CDC_TX6_TX_PATH_SEC3                  , 0x0c , 0x00 },
+	{ WCD9335_CDC_TX7_TX_PATH_SEC3                  , 0x0c , 0x00 },
+	{ WCD9335_CDC_TX8_TX_PATH_SEC3                  , 0x0c , 0x00 },
+	{ WCD9335_CDC_COMPANDER1_CTL7                   , 0x0c , 0x00 },
+	{ WCD9335_CDC_COMPANDER2_CTL7                   , 0x0c , 0x00 },
+	{ WCD9335_CDC_COMPANDER3_CTL7                   , 0x0c , 0x00 },
+	{ WCD9335_CDC_COMPANDER4_CTL7                   , 0x0c , 0x00 },
+	{ WCD9335_CDC_COMPANDER5_CTL7                   , 0x0c , 0x00 },
+	{ WCD9335_CDC_COMPANDER6_CTL7                   , 0x0c , 0x00 },
+	{ WCD9335_CDC_COMPANDER7_CTL7                   , 0x0c , 0x00 },
+	{ WCD9335_CDC_COMPANDER8_CTL7                   , 0x0c , 0x00 },
+	{ WCD9335_CDC_RX0_RX_PATH_CFG1                  , 0x04 , 0x00 },
+	{ WCD9335_CDC_RX0_RX_PATH_MIX_CFG               , 0x0e , 0x00 },
+	{ WCD9335_CDC_RX0_RX_PATH_SEC0                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_RX0_RX_PATH_SEC1                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_RX0_RX_PATH_MIX_SEC0              , 0x00 , 0x00 },
+	{ WCD9335_CDC_RX1_RX_PATH_CFG1                  , 0x04 , 0x00 },
+	{ WCD9335_CDC_RX1_RX_PATH_MIX_CFG               , 0x0e , 0x00 },
+	{ WCD9335_CDC_RX1_RX_PATH_SEC0                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_RX1_RX_PATH_SEC1                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_RX1_RX_PATH_MIX_SEC0              , 0x00 , 0x00 },
+	{ WCD9335_CDC_RX2_RX_PATH_CFG1                  , 0x04 , 0x00 },
+	{ WCD9335_CDC_RX2_RX_PATH_MIX_CFG               , 0x0e , 0x00 },
+	{ WCD9335_CDC_RX2_RX_PATH_SEC0                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_RX2_RX_PATH_SEC1                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_RX2_RX_PATH_MIX_SEC0              , 0x00 , 0x00 },
+	{ WCD9335_CDC_RX3_RX_PATH_CFG1                  , 0x04 , 0x00 },
+	{ WCD9335_CDC_RX3_RX_PATH_MIX_CFG               , 0x0e , 0x00 },
+	{ WCD9335_CDC_RX3_RX_PATH_SEC0                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_RX3_RX_PATH_SEC1                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_RX3_RX_PATH_MIX_SEC0              , 0x00 , 0x00 },
+	{ WCD9335_CDC_RX4_RX_PATH_CFG1                  , 0x04 , 0x00 },
+	{ WCD9335_CDC_RX4_RX_PATH_MIX_CFG               , 0x0e , 0x00 },
+	{ WCD9335_CDC_RX4_RX_PATH_SEC0                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_RX4_RX_PATH_SEC1                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_RX4_RX_PATH_MIX_SEC0              , 0x00 , 0x00 },
+	{ WCD9335_CDC_RX5_RX_PATH_CFG1                  , 0x04 , 0x00 },
+	{ WCD9335_CDC_RX5_RX_PATH_MIX_CFG               , 0x0e , 0x00 },
+	{ WCD9335_CDC_RX5_RX_PATH_SEC0                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_RX5_RX_PATH_SEC1                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_RX5_RX_PATH_MIX_SEC0              , 0x00 , 0x00 },
+	{ WCD9335_CDC_RX6_RX_PATH_CFG1                  , 0x04 , 0x00 },
+	{ WCD9335_CDC_RX6_RX_PATH_MIX_CFG               , 0x0e , 0x00 },
+	{ WCD9335_CDC_RX6_RX_PATH_SEC0                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_RX6_RX_PATH_SEC1                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_RX6_RX_PATH_MIX_SEC0              , 0x00 , 0x00 },
+	{ WCD9335_CDC_RX7_RX_PATH_CFG1                  , 0x04 , 0x00 },
+	{ WCD9335_CDC_RX7_RX_PATH_MIX_CFG               , 0x0e , 0x00 },
+	{ WCD9335_CDC_RX7_RX_PATH_SEC0                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_RX7_RX_PATH_SEC1                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_RX7_RX_PATH_MIX_SEC0              , 0x00 , 0x00 },
+	{ WCD9335_CDC_RX8_RX_PATH_CFG1                  , 0x04 , 0x00 },
+	{ WCD9335_CDC_RX8_RX_PATH_MIX_CFG               , 0x0e , 0x00 },
+	{ WCD9335_CDC_RX8_RX_PATH_SEC0                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_RX8_RX_PATH_SEC1                  , 0x00 , 0x00 },
+	{ WCD9335_CDC_RX8_RX_PATH_MIX_SEC0              , 0x00 , 0x00 },
+	{ WCD9335_SPLINE_SRC0_CLK_RST_CTL_0             , 0x00 , 0x00 },
+	{ WCD9335_SPLINE_SRC1_CLK_RST_CTL_0             , 0x00 , 0x00 },
+	{ WCD9335_SPLINE_SRC2_CLK_RST_CTL_0             , 0x00 , 0x00 },
+	{ WCD9335_SPLINE_SRC3_CLK_RST_CTL_0             , 0x00 , 0x00 },
+	{ WCD9335_CDC_CLK_RST_CTRL_FS_CNT_CONTROL       , 0x00 , 0x00 },
+	{ WCD9335_TEST_DEBUG_NPL_DLY_TEST_1             , 0x00 , 0x00 },
+	{ WCD9335_TEST_DEBUG_NPL_DLY_TEST_2             , 0x00 , 0x00 },
+};
+
+static const struct reg_sequence wcd9335_2_0_defaults[] = {
+	{ WCD9335_CODEC_RPM_CLK_GATE                    , 0x07 , 0x00 },
+	{ WCD9335_CODEC_RPM_PWR_CPE_DRAM1_SHUTDOWN      , 0x3f , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE0          , 0x01 , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_EFUSE_CTL              , 0x10 , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_RX0_INP_CFG         , 0x08 , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_RX1_INP_CFG         , 0x08 , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_RX2_INP_CFG         , 0x08 , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_RX3_INP_CFG         , 0x08 , 0x00 },
+	{ WCD9335_CPE_SS_CPARMAD_BUFRDY_INT_PERIOD      , 0x13 , 0x00 },
+	{ WCD9335_CPE_SS_SS_ERROR_INT_MASK              , 0xff , 0x00 },
+	{ WCD9335_SOC_MAD_AUDIO_IIR_CTL_VAL             , 0x40 , 0x00 },
+	{ WCD9335_BIAS_VBG_FINE_ADJ                     , 0xc5 , 0x00 },
+	{ WCD9335_SIDO_SIDO_CCL_2                       , 0x92 , 0x00 },
+	{ WCD9335_SIDO_SIDO_CCL_3                       , 0x35 , 0x00 },
+	{ WCD9335_SIDO_SIDO_CCL_8                       , 0x6e , 0x00 },
+	{ WCD9335_SIDO_SIDO_CCL_10                      , 0x6e , 0x00 },
+	{ WCD9335_SIDO_SIDO_DRIVER_2                    , 0x55 , 0x00 },
+	{ WCD9335_SIDO_SIDO_DRIVER_3                    , 0x55 , 0x00 },
+	{ WCD9335_SIDO_SIDO_TEST_2                      , 0x0f , 0x00 },
+	{ WCD9335_MBHC_ZDET_ANA_CTL                     , 0x0f , 0x00 },
+	{ WCD9335_TX_1_2_ATEST_REFCTL                   , 0x0a , 0x00 },
+	{ WCD9335_TX_3_4_ATEST_REFCTL                   , 0x0a , 0x00 },
+	{ WCD9335_TX_5_6_ATEST_REFCTL                   , 0x0a , 0x00 },
+	{ WCD9335_FLYBACK_VNEG_CTRL_1                   , 0xeb , 0x00 },
+	{ WCD9335_FLYBACK_VNEG_CTRL_4                   , 0x7f , 0x00 },
+	{ WCD9335_FLYBACK_VNEG_CTRL_9                   , 0x64 , 0x00 },
+	{ WCD9335_FLYBACK_VNEG_DAC_CTRL_1               , 0xed , 0x00 },
+	{ WCD9335_RX_BIAS_HPH_PA                        , 0x9a , 0x00 },
+	{ WCD9335_RX_BIAS_HPH_LOWPOWER                  , 0x82 , 0x00 },
+	{ WCD9335_HPH_PA_CTL2                           , 0x50 , 0x00 },
+	{ WCD9335_HPH_L_EN                              , 0x80 , 0x00 },
+	{ WCD9335_HPH_R_EN                              , 0x80 , 0x00 },
+	{ WCD9335_HPH_R_ATEST                           , 0x54 , 0x00 },
+	{ WCD9335_HPH_RDAC_LDO_CTL                      , 0x33 , 0x00 },
+	{ WCD9335_CDC_TX0_TX_PATH_CFG0                  , 0x10 , 0x00 },
+	{ WCD9335_CDC_TX0_TX_PATH_CFG1                  , 0x02 , 0x00 },
+	{ WCD9335_CDC_TX0_TX_PATH_SEC2                  , 0x01 , 0x00 },
+	{ WCD9335_CDC_TX0_TX_PATH_SEC3                  , 0x3c , 0x00 },
+	{ WCD9335_CDC_TX1_TX_PATH_CFG0                  , 0x10 , 0x00 },
+	{ WCD9335_CDC_TX1_TX_PATH_CFG1                  , 0x02 , 0x00 },
+	{ WCD9335_CDC_TX1_TX_PATH_SEC2                  , 0x01 , 0x00 },
+	{ WCD9335_CDC_TX1_TX_PATH_SEC3                  , 0x3c , 0x00 },
+	{ WCD9335_CDC_TX2_TX_PATH_CFG0                  , 0x10 , 0x00 },
+	{ WCD9335_CDC_TX3_TX_PATH_CFG0                  , 0x10 , 0x00 },
+	{ WCD9335_CDC_TX4_TX_PATH_CFG0                  , 0x10 , 0x00 },
+	{ WCD9335_CDC_TX5_TX_PATH_CFG0                  , 0x10 , 0x00 },
+	{ WCD9335_CDC_TX6_TX_PATH_CFG0                  , 0x10 , 0x00 },
+	{ WCD9335_CDC_TX7_TX_PATH_CFG0                  , 0x10 , 0x00 },
+	{ WCD9335_CDC_TX8_TX_PATH_CFG0                  , 0x10 , 0x00 },
+	{ WCD9335_CDC_TX2_TX_PATH_CFG1                  , 0x02 , 0x00 },
+	{ WCD9335_CDC_TX3_TX_PATH_CFG1                  , 0x02 , 0x00 },
+	{ WCD9335_CDC_TX4_TX_PATH_CFG1                  , 0x02 , 0x00 },
+	{ WCD9335_CDC_TX5_TX_PATH_CFG1                  , 0x02 , 0x00 },
+	{ WCD9335_CDC_TX6_TX_PATH_CFG1                  , 0x02 , 0x00 },
+	{ WCD9335_CDC_TX7_TX_PATH_CFG1                  , 0x02 , 0x00 },
+	{ WCD9335_CDC_TX8_TX_PATH_CFG1                  , 0x02 , 0x00 },
+	{ WCD9335_CDC_TX2_TX_PATH_SEC2                  , 0x01 , 0x00 },
+	{ WCD9335_CDC_TX3_TX_PATH_SEC2                  , 0x01 , 0x00 },
+	{ WCD9335_CDC_TX4_TX_PATH_SEC2                  , 0x01 , 0x00 },
+	{ WCD9335_CDC_TX5_TX_PATH_SEC2                  , 0x01 , 0x00 },
+	{ WCD9335_CDC_TX6_TX_PATH_SEC2                  , 0x01 , 0x00 },
+	{ WCD9335_CDC_TX7_TX_PATH_SEC2                  , 0x01 , 0x00 },
+	{ WCD9335_CDC_TX8_TX_PATH_SEC2                  , 0x01 , 0x00 },
+	{ WCD9335_CDC_TX2_TX_PATH_SEC3                  , 0x3c , 0x00 },
+	{ WCD9335_CDC_TX3_TX_PATH_SEC3                  , 0x3c , 0x00 },
+	{ WCD9335_CDC_TX4_TX_PATH_SEC3                  , 0x3c , 0x00 },
+	{ WCD9335_CDC_TX5_TX_PATH_SEC3                  , 0x3c , 0x00 },
+	{ WCD9335_CDC_TX6_TX_PATH_SEC3                  , 0x3c , 0x00 },
+	{ WCD9335_CDC_TX7_TX_PATH_SEC3                  , 0x3c , 0x00 },
+	{ WCD9335_CDC_TX8_TX_PATH_SEC3                  , 0x3c , 0x00 },
+	{ WCD9335_CDC_COMPANDER1_CTL7                   , 0x08 , 0x00 },
+	{ WCD9335_CDC_COMPANDER2_CTL7                   , 0x08 , 0x00 },
+	{ WCD9335_CDC_COMPANDER3_CTL7                   , 0x08 , 0x00 },
+	{ WCD9335_CDC_COMPANDER4_CTL7                   , 0x08 , 0x00 },
+	{ WCD9335_CDC_COMPANDER5_CTL7                   , 0x08 , 0x00 },
+	{ WCD9335_CDC_COMPANDER6_CTL7                   , 0x08 , 0x00 },
+	{ WCD9335_CDC_COMPANDER7_CTL7                   , 0x08 , 0x00 },
+	{ WCD9335_CDC_COMPANDER8_CTL7                   , 0x08 , 0x00 },
+	{ WCD9335_CDC_RX0_RX_PATH_CFG1                  , 0x44 , 0x00 },
+	{ WCD9335_CDC_RX0_RX_PATH_MIX_CFG               , 0x1e , 0x00 },
+	{ WCD9335_CDC_RX0_RX_PATH_SEC0                  , 0xfc , 0x00 },
+	{ WCD9335_CDC_RX0_RX_PATH_SEC1                  , 0x08 , 0x00 },
+	{ WCD9335_CDC_RX0_RX_PATH_MIX_SEC0              , 0x08 , 0x00 },
+	{ WCD9335_CDC_RX1_RX_PATH_CFG1                  , 0x44 , 0x00 },
+	{ WCD9335_CDC_RX1_RX_PATH_MIX_CFG               , 0x1e , 0x00 },
+	{ WCD9335_CDC_RX1_RX_PATH_SEC0                  , 0xfc , 0x00 },
+	{ WCD9335_CDC_RX1_RX_PATH_SEC1                  , 0x08 , 0x00 },
+	{ WCD9335_CDC_RX1_RX_PATH_MIX_SEC0              , 0x08 , 0x00 },
+	{ WCD9335_CDC_RX2_RX_PATH_CFG1                  , 0x44 , 0x00 },
+	{ WCD9335_CDC_RX2_RX_PATH_MIX_CFG               , 0x1e , 0x00 },
+	{ WCD9335_CDC_RX2_RX_PATH_SEC0                  , 0xfc , 0x00 },
+	{ WCD9335_CDC_RX2_RX_PATH_SEC1                  , 0x08 , 0x00 },
+	{ WCD9335_CDC_RX2_RX_PATH_MIX_SEC0              , 0x08 , 0x00 },
+	{ WCD9335_CDC_RX3_RX_PATH_CFG1                  , 0x44 , 0x00 },
+	{ WCD9335_CDC_RX3_RX_PATH_MIX_CFG               , 0x1e , 0x00 },
+	{ WCD9335_CDC_RX3_RX_PATH_SEC0                  , 0xfc , 0x00 },
+	{ WCD9335_CDC_RX3_RX_PATH_SEC1                  , 0x08 , 0x00 },
+	{ WCD9335_CDC_RX3_RX_PATH_MIX_SEC0              , 0x08 , 0x00 },
+	{ WCD9335_CDC_RX4_RX_PATH_CFG1                  , 0x44 , 0x00 },
+	{ WCD9335_CDC_RX4_RX_PATH_MIX_CFG               , 0x1e , 0x00 },
+	{ WCD9335_CDC_RX4_RX_PATH_SEC0                  , 0xfc , 0x00 },
+	{ WCD9335_CDC_RX4_RX_PATH_SEC1                  , 0x08 , 0x00 },
+	{ WCD9335_CDC_RX4_RX_PATH_MIX_SEC0              , 0x08 , 0x00 },
+	{ WCD9335_CDC_RX5_RX_PATH_CFG1                  , 0x44 , 0x00 },
+	{ WCD9335_CDC_RX5_RX_PATH_MIX_CFG               , 0x1e , 0x00 },
+	{ WCD9335_CDC_RX5_RX_PATH_SEC0                  , 0xfc , 0x00 },
+	{ WCD9335_CDC_RX5_RX_PATH_SEC1                  , 0x08 , 0x00 },
+	{ WCD9335_CDC_RX5_RX_PATH_MIX_SEC0              , 0x08 , 0x00 },
+	{ WCD9335_CDC_RX6_RX_PATH_CFG1                  , 0x44 , 0x00 },
+	{ WCD9335_CDC_RX6_RX_PATH_MIX_CFG               , 0x1e , 0x00 },
+	{ WCD9335_CDC_RX6_RX_PATH_SEC0                  , 0xfc , 0x00 },
+	{ WCD9335_CDC_RX6_RX_PATH_SEC1                  , 0x08 , 0x00 },
+	{ WCD9335_CDC_RX6_RX_PATH_MIX_SEC0              , 0x08 , 0x00 },
+	{ WCD9335_CDC_RX7_RX_PATH_CFG1                  , 0x44 , 0x00 },
+	{ WCD9335_CDC_RX7_RX_PATH_MIX_CFG               , 0x1e , 0x00 },
+	{ WCD9335_CDC_RX7_RX_PATH_SEC0                  , 0xfc , 0x00 },
+	{ WCD9335_CDC_RX7_RX_PATH_SEC1                  , 0x08 , 0x00 },
+	{ WCD9335_CDC_RX7_RX_PATH_MIX_SEC0              , 0x08 , 0x00 },
+	{ WCD9335_CDC_RX8_RX_PATH_CFG1                  , 0x44 , 0x00 },
+	{ WCD9335_CDC_RX8_RX_PATH_MIX_CFG               , 0x1e , 0x00 },
+	{ WCD9335_CDC_RX8_RX_PATH_SEC0                  , 0xfc , 0x00 },
+	{ WCD9335_CDC_RX8_RX_PATH_SEC1                  , 0x08 , 0x00 },
+	{ WCD9335_CDC_RX8_RX_PATH_MIX_SEC0              , 0x08 , 0x00 },
+	{ WCD9335_SPLINE_SRC0_CLK_RST_CTL_0             , 0x20 , 0x00 },
+	{ WCD9335_SPLINE_SRC1_CLK_RST_CTL_0             , 0x20 , 0x00 },
+	{ WCD9335_SPLINE_SRC2_CLK_RST_CTL_0             , 0x20 , 0x00 },
+	{ WCD9335_SPLINE_SRC3_CLK_RST_CTL_0             , 0x20 , 0x00 },
+	{ WCD9335_CDC_CLK_RST_CTRL_FS_CNT_CONTROL       , 0x0c , 0x00 },
+	{ WCD9335_TEST_DEBUG_NPL_DLY_TEST_1             , 0x10 , 0x00 },
+	{ WCD9335_TEST_DEBUG_NPL_DLY_TEST_2             , 0x60 , 0x00 },
+	{ WCD9335_DATA_HUB_NATIVE_FIFO_SYNC             , 0x00 , 0x00 },
+	{ WCD9335_DATA_HUB_NATIVE_FIFO_STATUS           , 0x00 , 0x00 },
+	{ WCD9335_CPE_SS_TX_PP_BUF_INT_PERIOD           , 0x60 , 0x00 },
+	{ WCD9335_CPE_SS_TX_PP_CFG                      , 0x3C , 0x00 },
+	{ WCD9335_CPE_SS_SVA_CFG                        , 0x00 , 0x00 },
+	{ WCD9335_MBHC_FSM_STATUS                       , 0x00 , 0x00 },
+	{ WCD9335_FLYBACK_CTRL_1                        , 0x45 , 0x00 },
+	{ WCD9335_CDC_TX0_TX_PATH_SEC7                  , 0x25 , 0x00 },
+	{ WCD9335_SPLINE_SRC0_STATUS                    , 0x00 , 0x00 },
+	{ WCD9335_SPLINE_SRC1_STATUS                    , 0x00 , 0x00 },
+	{ WCD9335_SPLINE_SRC2_STATUS                    , 0x00 , 0x00 },
+	{ WCD9335_SPLINE_SRC3_STATUS                    , 0x00 , 0x00 },
+	{ WCD9335_CDC_PROX_DETECT_PROX_CTL_REPEAT_PAT   , 0x00 , 0x00 },
+};
+
+static const struct reg_default wcd9335_defaults[] = {
+	/* Page #0 registers */
+	{ WCD9335_PAGE0_PAGE_REGISTER                   , 0x00 },
+	{ WCD9335_CODEC_RPM_CLK_BYPASS                  , 0x00 },
+	{ WCD9335_CODEC_RPM_CLK_MCLK_CFG                , 0x00 },
+	{ WCD9335_CODEC_RPM_RST_CTL                     , 0x00 },
+	{ WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL          , 0x07 },
+	{ WCD9335_CODEC_RPM_PWR_CPE_DEEPSLP_1           , 0x00 },
+	{ WCD9335_CODEC_RPM_PWR_CPE_DEEPSLP_2           , 0x00 },
+	{ WCD9335_CODEC_RPM_PWR_CPE_DEEPSLP_3           , 0x00 },
+	{ WCD9335_CODEC_RPM_PWR_CPE_IRAM_SHUTDOWN       , 0x01 },
+	{ WCD9335_CODEC_RPM_PWR_CPE_DRAM0_SHUTDOWN_1    , 0xff },
+	{ WCD9335_CODEC_RPM_PWR_CPE_DRAM0_SHUTDOWN_2    , 0xff },
+	{ WCD9335_CODEC_RPM_INT_MASK                    , 0x3f },
+	{ WCD9335_CODEC_RPM_INT_STATUS                  , 0x00 },
+	{ WCD9335_CODEC_RPM_INT_CLEAR                   , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE1          , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE2          , 0x07 },
+	{ WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE3          , 0x01 },
+	{ WCD9335_CHIP_TIER_CTRL_EFUSE_TEST0            , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_EFUSE_TEST1            , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT0         , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT1         , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT2         , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT3         , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT4         , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT5         , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT6         , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT7         , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT8         , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT9         , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT10        , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT11        , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT12        , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT13        , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT14        , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT15        , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_EFUSE_STATUS           , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_I2C_SLAVE_ID_NONNEGO   , 0x0d },
+	{ WCD9335_CHIP_TIER_CTRL_I2C_SLAVE_ID_1         , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_I2C_SLAVE_ID_2         , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_I2C_SLAVE_ID_3         , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_ANA_WAIT_STATE_CTL     , 0xCC },
+	{ WCD9335_CHIP_TIER_CTRL_I2C_ACTIVE             , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_PROC1_MON_CTL          , 0x06 },
+	{ WCD9335_CHIP_TIER_CTRL_PROC1_MON_STATUS       , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_PROC1_MON_CNT_MSB      , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_PROC1_MON_CNT_LSB      , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_PROC2_MON_CTL          , 0x06 },
+	{ WCD9335_CHIP_TIER_CTRL_PROC2_MON_STATUS       , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_PROC2_MON_CNT_MSB      , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_PROC2_MON_CNT_LSB      , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_PROC3_MON_CTL          , 0x06 },
+	{ WCD9335_CHIP_TIER_CTRL_PROC3_MON_STATUS       , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_PROC3_MON_CNT_MSB      , 0x00 },
+	{ WCD9335_CHIP_TIER_CTRL_PROC3_MON_CNT_LSB      , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_RX_I2S_CTL          , 0x0c },
+	{ WCD9335_DATA_HUB_DATA_HUB_TX_I2S_CTL          , 0x0c },
+	{ WCD9335_DATA_HUB_DATA_HUB_I2S_CLK             , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_RX4_INP_CFG         , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_RX5_INP_CFG         , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_RX6_INP_CFG         , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_RX7_INP_CFG         , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_SB_TX0_INP_CFG      , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_SB_TX1_INP_CFG      , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_SB_TX2_INP_CFG      , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_SB_TX3_INP_CFG      , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_SB_TX4_INP_CFG      , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_SB_TX5_INP_CFG      , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_SB_TX6_INP_CFG      , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_SB_TX7_INP_CFG      , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_SB_TX8_INP_CFG      , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_SB_TX9_INP_CFG      , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_SB_TX10_INP_CFG     , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_SB_TX11_INP_CFG     , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_SB_TX13_INP_CFG     , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_SB_TX14_INP_CFG     , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_SB_TX15_INP_CFG     , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_TX_I2S_SD0_L_CFG    , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_TX_I2S_SD0_R_CFG    , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_TX_I2S_SD1_L_CFG    , 0x00 },
+	{ WCD9335_DATA_HUB_DATA_HUB_TX_I2S_SD1_R_CFG    , 0x00 },
+	{ WCD9335_INTR_CFG                              , 0x00 },
+	{ WCD9335_INTR_CLR_COMMIT                       , 0x00 },
+	{ WCD9335_INTR_PIN1_MASK0                       , 0xff },
+	{ WCD9335_INTR_PIN1_MASK1                       , 0xff },
+	{ WCD9335_INTR_PIN1_MASK2                       , 0xff },
+	{ WCD9335_INTR_PIN1_MASK3                       , 0xff },
+	{ WCD9335_INTR_PIN1_STATUS0                     , 0x00 },
+	{ WCD9335_INTR_PIN1_STATUS1                     , 0x00 },
+	{ WCD9335_INTR_PIN1_STATUS2                     , 0x00 },
+	{ WCD9335_INTR_PIN1_STATUS3                     , 0x00 },
+	{ WCD9335_INTR_PIN1_CLEAR0                      , 0x00 },
+	{ WCD9335_INTR_PIN1_CLEAR1                      , 0x00 },
+	{ WCD9335_INTR_PIN1_CLEAR2                      , 0x00 },
+	{ WCD9335_INTR_PIN1_CLEAR3                      , 0x00 },
+	{ WCD9335_INTR_PIN2_MASK0                       , 0xff },
+	{ WCD9335_INTR_PIN2_MASK1                       , 0xff },
+	{ WCD9335_INTR_PIN2_MASK2                       , 0xff },
+	{ WCD9335_INTR_PIN2_MASK3                       , 0xff },
+	{ WCD9335_INTR_PIN2_STATUS0                     , 0x00 },
+	{ WCD9335_INTR_PIN2_STATUS1                     , 0x00 },
+	{ WCD9335_INTR_PIN2_STATUS2                     , 0x00 },
+	{ WCD9335_INTR_PIN2_STATUS3                     , 0x00 },
+	{ WCD9335_INTR_PIN2_CLEAR0                      , 0x00 },
+	{ WCD9335_INTR_PIN2_CLEAR1                      , 0x00 },
+	{ WCD9335_INTR_PIN2_CLEAR2                      , 0x00 },
+	{ WCD9335_INTR_PIN2_CLEAR3                      , 0x00 },
+	{ WCD9335_INTR_LEVEL0                           , 0x03 },
+	{ WCD9335_INTR_LEVEL1                           , 0xe0 },
+	{ WCD9335_INTR_LEVEL2                           , 0x10 },
+	{ WCD9335_INTR_LEVEL3                           , 0x80 },
+	{ WCD9335_INTR_BYPASS0                          , 0x00 },
+	{ WCD9335_INTR_BYPASS1                          , 0x00 },
+	{ WCD9335_INTR_BYPASS2                          , 0x00 },
+	{ WCD9335_INTR_BYPASS3                          , 0x00 },
+	{ WCD9335_INTR_SET0                             , 0x00 },
+	{ WCD9335_INTR_SET1                             , 0x00 },
+	{ WCD9335_INTR_SET2                             , 0x00 },
+	{ WCD9335_INTR_SET3                             , 0x00 },
+	/* Page #1 registers */
+	{ WCD9335_PAGE1_PAGE_REGISTER                   , 0x00 },
+	{ WCD9335_CPE_FLL_USER_CTL_0                    , 0x71 },
+	{ WCD9335_CPE_FLL_USER_CTL_1                    , 0x34 },
+	{ WCD9335_CPE_FLL_USER_CTL_2                    , 0x0b },
+	{ WCD9335_CPE_FLL_USER_CTL_3                    , 0x02 },
+	{ WCD9335_CPE_FLL_USER_CTL_4                    , 0x04 },
+	{ WCD9335_CPE_FLL_USER_CTL_5                    , 0x02 },
+	{ WCD9335_CPE_FLL_USER_CTL_6                    , 0x64 },
+	{ WCD9335_CPE_FLL_USER_CTL_7                    , 0x00 },
+	{ WCD9335_CPE_FLL_USER_CTL_8                    , 0x94 },
+	{ WCD9335_CPE_FLL_USER_CTL_9                    , 0x70 },
+	{ WCD9335_CPE_FLL_L_VAL_CTL_0                   , 0x40 },
+	{ WCD9335_CPE_FLL_L_VAL_CTL_1                   , 0x00 },
+	{ WCD9335_CPE_FLL_DSM_FRAC_CTL_0                , 0x00 },
+	{ WCD9335_CPE_FLL_DSM_FRAC_CTL_1                , 0xff },
+	{ WCD9335_CPE_FLL_CONFIG_CTL_0                  , 0x6b },
+	{ WCD9335_CPE_FLL_CONFIG_CTL_1                  , 0x05 },
+	{ WCD9335_CPE_FLL_CONFIG_CTL_2                  , 0x08 },
+	{ WCD9335_CPE_FLL_CONFIG_CTL_3                  , 0x00 },
+	{ WCD9335_CPE_FLL_CONFIG_CTL_4                  , 0x10 },
+	{ WCD9335_CPE_FLL_TEST_CTL_0                    , 0x00 },
+	{ WCD9335_CPE_FLL_TEST_CTL_1                    , 0x00 },
+	{ WCD9335_CPE_FLL_TEST_CTL_2                    , 0x00 },
+	{ WCD9335_CPE_FLL_TEST_CTL_3                    , 0x00 },
+	{ WCD9335_CPE_FLL_TEST_CTL_4                    , 0x00 },
+	{ WCD9335_CPE_FLL_TEST_CTL_5                    , 0x00 },
+	{ WCD9335_CPE_FLL_TEST_CTL_6                    , 0x00 },
+	{ WCD9335_CPE_FLL_TEST_CTL_7                    , 0x33 },
+	{ WCD9335_CPE_FLL_FREQ_CTL_0                    , 0x00 },
+	{ WCD9335_CPE_FLL_FREQ_CTL_1                    , 0x00 },
+	{ WCD9335_CPE_FLL_FREQ_CTL_2                    , 0x00 },
+	{ WCD9335_CPE_FLL_FREQ_CTL_3                    , 0x00 },
+	{ WCD9335_CPE_FLL_SSC_CTL_0                     , 0x00 },
+	{ WCD9335_CPE_FLL_SSC_CTL_1                     , 0x00 },
+	{ WCD9335_CPE_FLL_SSC_CTL_2                     , 0x00 },
+	{ WCD9335_CPE_FLL_SSC_CTL_3                     , 0x00 },
+	{ WCD9335_CPE_FLL_FLL_MODE                      , 0x20 },
+	{ WCD9335_CPE_FLL_STATUS_0                      , 0x00 },
+	{ WCD9335_CPE_FLL_STATUS_1                      , 0x00 },
+	{ WCD9335_CPE_FLL_STATUS_2                      , 0x00 },
+	{ WCD9335_CPE_FLL_STATUS_3                      , 0x00 },
+	{ WCD9335_I2S_FLL_USER_CTL_0                    , 0x41 },
+	{ WCD9335_I2S_FLL_USER_CTL_1                    , 0x94 },
+	{ WCD9335_I2S_FLL_USER_CTL_2                    , 0x08 },
+	{ WCD9335_I2S_FLL_USER_CTL_3                    , 0x02 },
+	{ WCD9335_I2S_FLL_USER_CTL_4                    , 0x04 },
+	{ WCD9335_I2S_FLL_USER_CTL_5                    , 0x02 },
+	{ WCD9335_I2S_FLL_USER_CTL_6                    , 0x40 },
+	{ WCD9335_I2S_FLL_USER_CTL_7                    , 0x00 },
+	{ WCD9335_I2S_FLL_USER_CTL_8                    , 0x5f },
+	{ WCD9335_I2S_FLL_USER_CTL_9                    , 0x02 },
+	{ WCD9335_I2S_FLL_L_VAL_CTL_0                   , 0x40 },
+	{ WCD9335_I2S_FLL_L_VAL_CTL_1                   , 0x00 },
+	{ WCD9335_I2S_FLL_DSM_FRAC_CTL_0                , 0x00 },
+	{ WCD9335_I2S_FLL_DSM_FRAC_CTL_1                , 0xff },
+	{ WCD9335_I2S_FLL_CONFIG_CTL_0                  , 0x6b },
+	{ WCD9335_I2S_FLL_CONFIG_CTL_1                  , 0x05 },
+	{ WCD9335_I2S_FLL_CONFIG_CTL_2                  , 0x08 },
+	{ WCD9335_I2S_FLL_CONFIG_CTL_3                  , 0x00 },
+	{ WCD9335_I2S_FLL_CONFIG_CTL_4                  , 0x30 },
+	{ WCD9335_I2S_FLL_TEST_CTL_0                    , 0x80 },
+	{ WCD9335_I2S_FLL_TEST_CTL_1                    , 0x00 },
+	{ WCD9335_I2S_FLL_TEST_CTL_2                    , 0x00 },
+	{ WCD9335_I2S_FLL_TEST_CTL_3                    , 0x00 },
+	{ WCD9335_I2S_FLL_TEST_CTL_4                    , 0x00 },
+	{ WCD9335_I2S_FLL_TEST_CTL_5                    , 0x00 },
+	{ WCD9335_I2S_FLL_TEST_CTL_6                    , 0x00 },
+	{ WCD9335_I2S_FLL_TEST_CTL_7                    , 0xff },
+	{ WCD9335_I2S_FLL_FREQ_CTL_0                    , 0x00 },
+	{ WCD9335_I2S_FLL_FREQ_CTL_1                    , 0x00 },
+	{ WCD9335_I2S_FLL_FREQ_CTL_2                    , 0x00 },
+	{ WCD9335_I2S_FLL_FREQ_CTL_3                    , 0x00 },
+	{ WCD9335_I2S_FLL_SSC_CTL_0                     , 0x00 },
+	{ WCD9335_I2S_FLL_SSC_CTL_1                     , 0x00 },
+	{ WCD9335_I2S_FLL_SSC_CTL_2                     , 0x00 },
+	{ WCD9335_I2S_FLL_SSC_CTL_3                     , 0x00 },
+	{ WCD9335_I2S_FLL_FLL_MODE                      , 0x00 },
+	{ WCD9335_I2S_FLL_STATUS_0                      , 0x00 },
+	{ WCD9335_I2S_FLL_STATUS_1                      , 0x00 },
+	{ WCD9335_I2S_FLL_STATUS_2                      , 0x00 },
+	{ WCD9335_I2S_FLL_STATUS_3                      , 0x00 },
+	{ WCD9335_SB_FLL_USER_CTL_0                     , 0x41 },
+	{ WCD9335_SB_FLL_USER_CTL_1                     , 0x94 },
+	{ WCD9335_SB_FLL_USER_CTL_2                     , 0x08 },
+	{ WCD9335_SB_FLL_USER_CTL_3                     , 0x02 },
+	{ WCD9335_SB_FLL_USER_CTL_4                     , 0x04 },
+	{ WCD9335_SB_FLL_USER_CTL_5                     , 0x02 },
+	{ WCD9335_SB_FLL_USER_CTL_6                     , 0x40 },
+	{ WCD9335_SB_FLL_USER_CTL_7                     , 0x00 },
+	{ WCD9335_SB_FLL_USER_CTL_8                     , 0x5e },
+	{ WCD9335_SB_FLL_USER_CTL_9                     , 0x01 },
+	{ WCD9335_SB_FLL_L_VAL_CTL_0                    , 0x40 },
+	{ WCD9335_SB_FLL_L_VAL_CTL_1                    , 0x00 },
+	{ WCD9335_SB_FLL_DSM_FRAC_CTL_0                 , 0x00 },
+	{ WCD9335_SB_FLL_DSM_FRAC_CTL_1                 , 0xff },
+	{ WCD9335_SB_FLL_CONFIG_CTL_0                   , 0x6b },
+	{ WCD9335_SB_FLL_CONFIG_CTL_1                   , 0x05 },
+	{ WCD9335_SB_FLL_CONFIG_CTL_2                   , 0x08 },
+	{ WCD9335_SB_FLL_CONFIG_CTL_3                   , 0x00 },
+	{ WCD9335_SB_FLL_CONFIG_CTL_4                   , 0x10 },
+	{ WCD9335_SB_FLL_TEST_CTL_0                     , 0x00 },
+	{ WCD9335_SB_FLL_TEST_CTL_1                     , 0x00 },
+	{ WCD9335_SB_FLL_TEST_CTL_2                     , 0x00 },
+	{ WCD9335_SB_FLL_TEST_CTL_3                     , 0x00 },
+	{ WCD9335_SB_FLL_TEST_CTL_4                     , 0x00 },
+	{ WCD9335_SB_FLL_TEST_CTL_5                     , 0x00 },
+	{ WCD9335_SB_FLL_TEST_CTL_6                     , 0x00 },
+	{ WCD9335_SB_FLL_TEST_CTL_7                     , 0xff },
+	{ WCD9335_SB_FLL_FREQ_CTL_0                     , 0x00 },
+	{ WCD9335_SB_FLL_FREQ_CTL_1                     , 0x00 },
+	{ WCD9335_SB_FLL_FREQ_CTL_2                     , 0x00 },
+	{ WCD9335_SB_FLL_FREQ_CTL_3                     , 0x00 },
+	{ WCD9335_SB_FLL_SSC_CTL_0                      , 0x00 },
+	{ WCD9335_SB_FLL_SSC_CTL_1                      , 0x00 },
+	{ WCD9335_SB_FLL_SSC_CTL_2                      , 0x00 },
+	{ WCD9335_SB_FLL_SSC_CTL_3                      , 0x00 },
+	{ WCD9335_SB_FLL_FLL_MODE                       , 0x00 },
+	{ WCD9335_SB_FLL_STATUS_0                       , 0x00 },
+	{ WCD9335_SB_FLL_STATUS_1                       , 0x00 },
+	{ WCD9335_SB_FLL_STATUS_2                       , 0x00 },
+	{ WCD9335_SB_FLL_STATUS_3                       , 0x00 },
+	/* Page #2 registers */
+	{ WCD9335_PAGE2_PAGE_REGISTER                   , 0x00 },
+	{ WCD9335_CPE_SS_MEM_PTR_0                      , 0x00 },
+	{ WCD9335_CPE_SS_MEM_PTR_1                      , 0x00 },
+	{ WCD9335_CPE_SS_MEM_PTR_2                      , 0x00 },
+	{ WCD9335_CPE_SS_MEM_CTRL                       , 0x08 },
+	{ WCD9335_CPE_SS_MEM_BANK_0                     , 0x00 },
+	{ WCD9335_CPE_SS_MEM_BANK_1                     , 0x00 },
+	{ WCD9335_CPE_SS_MEM_BANK_2                     , 0x00 },
+	{ WCD9335_CPE_SS_MEM_BANK_3                     , 0x00 },
+	{ WCD9335_CPE_SS_MEM_BANK_4                     , 0x00 },
+	{ WCD9335_CPE_SS_MEM_BANK_5                     , 0x00 },
+	{ WCD9335_CPE_SS_MEM_BANK_6                     , 0x00 },
+	{ WCD9335_CPE_SS_MEM_BANK_7                     , 0x00 },
+	{ WCD9335_CPE_SS_MEM_BANK_8                     , 0x00 },
+	{ WCD9335_CPE_SS_MEM_BANK_9                     , 0x00 },
+	{ WCD9335_CPE_SS_MEM_BANK_10                    , 0x00 },
+	{ WCD9335_CPE_SS_MEM_BANK_11                    , 0x00 },
+	{ WCD9335_CPE_SS_MEM_BANK_12                    , 0x00 },
+	{ WCD9335_CPE_SS_MEM_BANK_13                    , 0x00 },
+	{ WCD9335_CPE_SS_MEM_BANK_14                    , 0x00 },
+	{ WCD9335_CPE_SS_MEM_BANK_15                    , 0x00 },
+	{ WCD9335_CPE_SS_INBOX1_TRG                     , 0x00 },
+	{ WCD9335_CPE_SS_INBOX2_TRG                     , 0x00 },
+	{ WCD9335_CPE_SS_INBOX1_0                       , 0x00 },
+	{ WCD9335_CPE_SS_INBOX1_1                       , 0x00 },
+	{ WCD9335_CPE_SS_INBOX1_2                       , 0x00 },
+	{ WCD9335_CPE_SS_INBOX1_3                       , 0x00 },
+	{ WCD9335_CPE_SS_INBOX1_4                       , 0x00 },
+	{ WCD9335_CPE_SS_INBOX1_5                       , 0x00 },
+	{ WCD9335_CPE_SS_INBOX1_6                       , 0x00 },
+	{ WCD9335_CPE_SS_INBOX1_7                       , 0x00 },
+	{ WCD9335_CPE_SS_INBOX1_8                       , 0x00 },
+	{ WCD9335_CPE_SS_INBOX1_9                       , 0x00 },
+	{ WCD9335_CPE_SS_INBOX1_10                      , 0x00 },
+	{ WCD9335_CPE_SS_INBOX1_11                      , 0x00 },
+	{ WCD9335_CPE_SS_INBOX1_12                      , 0x00 },
+	{ WCD9335_CPE_SS_INBOX1_13                      , 0x00 },
+	{ WCD9335_CPE_SS_INBOX1_14                      , 0x00 },
+	{ WCD9335_CPE_SS_INBOX1_15                      , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX1_0                      , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX1_1                      , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX1_2                      , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX1_3                      , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX1_4                      , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX1_5                      , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX1_6                      , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX1_7                      , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX1_8                      , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX1_9                      , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX1_10                     , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX1_11                     , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX1_12                     , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX1_13                     , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX1_14                     , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX1_15                     , 0x00 },
+	{ WCD9335_CPE_SS_INBOX2_0                       , 0x00 },
+	{ WCD9335_CPE_SS_INBOX2_1                       , 0x00 },
+	{ WCD9335_CPE_SS_INBOX2_2                       , 0x00 },
+	{ WCD9335_CPE_SS_INBOX2_3                       , 0x00 },
+	{ WCD9335_CPE_SS_INBOX2_4                       , 0x00 },
+	{ WCD9335_CPE_SS_INBOX2_5                       , 0x00 },
+	{ WCD9335_CPE_SS_INBOX2_6                       , 0x00 },
+	{ WCD9335_CPE_SS_INBOX2_7                       , 0x00 },
+	{ WCD9335_CPE_SS_INBOX2_8                       , 0x00 },
+	{ WCD9335_CPE_SS_INBOX2_9                       , 0x00 },
+	{ WCD9335_CPE_SS_INBOX2_10                      , 0x00 },
+	{ WCD9335_CPE_SS_INBOX2_11                      , 0x00 },
+	{ WCD9335_CPE_SS_INBOX2_12                      , 0x00 },
+	{ WCD9335_CPE_SS_INBOX2_13                      , 0x00 },
+	{ WCD9335_CPE_SS_INBOX2_14                      , 0x00 },
+	{ WCD9335_CPE_SS_INBOX2_15                      , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX2_0                      , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX2_1                      , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX2_2                      , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX2_3                      , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX2_4                      , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX2_5                      , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX2_6                      , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX2_7                      , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX2_8                      , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX2_9                      , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX2_10                     , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX2_11                     , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX2_12                     , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX2_13                     , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX2_14                     , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX2_15                     , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX1_ACK                    , 0x00 },
+	{ WCD9335_CPE_SS_OUTBOX2_ACK                    , 0x00 },
+	{ WCD9335_CPE_SS_EC_BUF_INT_PERIOD              , 0x3c },
+	{ WCD9335_CPE_SS_US_BUF_INT_PERIOD              , 0x60 },
+	{ WCD9335_CPE_SS_CFG                            , 0x41 },
+	{ WCD9335_CPE_SS_US_EC_MUX_CFG                  , 0x00 },
+	{ WCD9335_CPE_SS_MAD_CTL                        , 0x00 },
+	{ WCD9335_CPE_SS_CPAR_CTL                       , 0x00 },
+	{ WCD9335_CPE_SS_DMIC0_CTL                      , 0x00 },
+	{ WCD9335_CPE_SS_DMIC1_CTL                      , 0x00 },
+	{ WCD9335_CPE_SS_DMIC2_CTL                      , 0x00 },
+	{ WCD9335_CPE_SS_DMIC_CFG                       , 0x80 },
+	{ WCD9335_CPE_SS_CPAR_CFG                       , 0x00 },
+	{ WCD9335_CPE_SS_WDOG_CFG                       , 0x01 },
+	{ WCD9335_CPE_SS_BACKUP_INT                     , 0x00 },
+	{ WCD9335_CPE_SS_STATUS                         , 0x00 },
+	{ WCD9335_CPE_SS_CPE_OCD_CFG                    , 0x00 },
+	{ WCD9335_CPE_SS_SS_ERROR_INT_STATUS            , 0x00 },
+	{ WCD9335_CPE_SS_SS_ERROR_INT_CLEAR             , 0x00 },
+	{ WCD9335_SOC_MAD_MAIN_CTL_1                    , 0x00 },
+	{ WCD9335_SOC_MAD_MAIN_CTL_2                    , 0x00 },
+	{ WCD9335_SOC_MAD_AUDIO_CTL_1                   , 0x00 },
+	{ WCD9335_SOC_MAD_AUDIO_CTL_2                   , 0x00 },
+	{ WCD9335_SOC_MAD_AUDIO_CTL_3                   , 0x00 },
+	{ WCD9335_SOC_MAD_AUDIO_CTL_4                   , 0x00 },
+	{ WCD9335_SOC_MAD_AUDIO_CTL_5                   , 0x00 },
+	{ WCD9335_SOC_MAD_AUDIO_CTL_6                   , 0x00 },
+	{ WCD9335_SOC_MAD_AUDIO_CTL_7                   , 0x00 },
+	{ WCD9335_SOC_MAD_AUDIO_CTL_8                   , 0x00 },
+	{ WCD9335_SOC_MAD_AUDIO_IIR_CTL_PTR             , 0x00 },
+	{ WCD9335_SOC_MAD_ULTR_CTL_1                    , 0x00 },
+	{ WCD9335_SOC_MAD_ULTR_CTL_2                    , 0x00 },
+	{ WCD9335_SOC_MAD_ULTR_CTL_3                    , 0x00 },
+	{ WCD9335_SOC_MAD_ULTR_CTL_4                    , 0x00 },
+	{ WCD9335_SOC_MAD_ULTR_CTL_5                    , 0x00 },
+	{ WCD9335_SOC_MAD_ULTR_CTL_6                    , 0x00 },
+	{ WCD9335_SOC_MAD_ULTR_CTL_7                    , 0x00 },
+	{ WCD9335_SOC_MAD_BEACON_CTL_1                  , 0x00 },
+	{ WCD9335_SOC_MAD_BEACON_CTL_2                  , 0x00 },
+	{ WCD9335_SOC_MAD_BEACON_CTL_3                  , 0x00 },
+	{ WCD9335_SOC_MAD_BEACON_CTL_4                  , 0x00 },
+	{ WCD9335_SOC_MAD_BEACON_CTL_5                  , 0x00 },
+	{ WCD9335_SOC_MAD_BEACON_CTL_6                  , 0x00 },
+	{ WCD9335_SOC_MAD_BEACON_CTL_7                  , 0x00 },
+	{ WCD9335_SOC_MAD_BEACON_CTL_8                  , 0x00 },
+	{ WCD9335_SOC_MAD_BEACON_IIR_CTL_PTR            , 0x00 },
+	{ WCD9335_SOC_MAD_BEACON_IIR_CTL_VAL            , 0x00 },
+	{ WCD9335_SOC_MAD_INP_SEL                       , 0x00 },
+	/* Page #6 registers */
+	{ WCD9335_PAGE6_PAGE_REGISTER                   , 0x00 },
+	{ WCD9335_ANA_BIAS                              , 0x00 },
+	{ WCD9335_ANA_CLK_TOP                           , 0x00 },
+	{ WCD9335_ANA_RCO                               , 0x30 },
+	{ WCD9335_ANA_BUCK_VOUT_A                       , 0xb4 },
+	{ WCD9335_ANA_BUCK_VOUT_D                       , 0xb4 },
+	{ WCD9335_ANA_BUCK_CTL                          , 0x00 },
+	{ WCD9335_ANA_BUCK_STATUS                       , 0xe0 },
+	{ WCD9335_ANA_RX_SUPPLIES                       , 0x00 },
+	{ WCD9335_ANA_HPH                               , 0x00 },
+	{ WCD9335_ANA_EAR                               , 0x00 },
+	{ WCD9335_ANA_LO_1_2                            , 0x00 },
+	{ WCD9335_ANA_LO_3_4                            , 0x00 },
+	{ WCD9335_ANA_MAD_SETUP                         , 0x81 },
+	{ WCD9335_ANA_AMIC1                             , 0x20 },
+	{ WCD9335_ANA_AMIC2                             , 0x00 },
+	{ WCD9335_ANA_AMIC3                             , 0x20 },
+	{ WCD9335_ANA_AMIC4                             , 0x00 },
+	{ WCD9335_ANA_AMIC5                             , 0x20 },
+	{ WCD9335_ANA_AMIC6                             , 0x00 },
+	{ WCD9335_ANA_MBHC_MECH                         , 0x39 },
+	{ WCD9335_ANA_MBHC_ELECT                        , 0x08 },
+	{ WCD9335_ANA_MBHC_ZDET                         , 0x00 },
+	{ WCD9335_ANA_MBHC_RESULT_1                     , 0x00 },
+	{ WCD9335_ANA_MBHC_RESULT_2                     , 0x00 },
+	{ WCD9335_ANA_MBHC_RESULT_3                     , 0x00 },
+	{ WCD9335_ANA_MBHC_BTN0                         , 0x00 },
+	{ WCD9335_ANA_MBHC_BTN1                         , 0x10 },
+	{ WCD9335_ANA_MBHC_BTN2                         , 0x20 },
+	{ WCD9335_ANA_MBHC_BTN3                         , 0x30 },
+	{ WCD9335_ANA_MBHC_BTN4                         , 0x40 },
+	{ WCD9335_ANA_MBHC_BTN5                         , 0x50 },
+	{ WCD9335_ANA_MBHC_BTN6                         , 0x60 },
+	{ WCD9335_ANA_MBHC_BTN7                         , 0x70 },
+	{ WCD9335_ANA_MICB1                             , 0x10 },
+	{ WCD9335_ANA_MICB2                             , 0x10 },
+	{ WCD9335_ANA_MICB2_RAMP                        , 0x00 },
+	{ WCD9335_ANA_MICB3                             , 0x10 },
+	{ WCD9335_ANA_MICB4                             , 0x10 },
+	{ WCD9335_ANA_VBADC                             , 0x00 },
+	{ WCD9335_BIAS_CTL                              , 0x28 },
+	{ WCD9335_CLOCK_TEST_CTL                        , 0x00 },
+	{ WCD9335_RCO_CTRL_1                            , 0x44 },
+	{ WCD9335_RCO_CTRL_2                            , 0x44 },
+	{ WCD9335_RCO_CAL                               , 0x00 },
+	{ WCD9335_RCO_CAL_1                             , 0x00 },
+	{ WCD9335_RCO_CAL_2                             , 0x00 },
+	{ WCD9335_RCO_TEST_CTRL                         , 0x00 },
+	{ WCD9335_RCO_CAL_OUT_1                         , 0x00 },
+	{ WCD9335_RCO_CAL_OUT_2                         , 0x00 },
+	{ WCD9335_RCO_CAL_OUT_3                         , 0x00 },
+	{ WCD9335_RCO_CAL_OUT_4                         , 0x00 },
+	{ WCD9335_RCO_CAL_OUT_5                         , 0x00 },
+	{ WCD9335_SIDO_SIDO_MODE_1                      , 0x84 },
+	{ WCD9335_SIDO_SIDO_MODE_2                      , 0xfe },
+	{ WCD9335_SIDO_SIDO_MODE_3                      , 0xf6 },
+	{ WCD9335_SIDO_SIDO_MODE_4                      , 0x56 },
+	{ WCD9335_SIDO_SIDO_VCL_1                       , 0x00 },
+	{ WCD9335_SIDO_SIDO_VCL_2                       , 0x6c },
+	{ WCD9335_SIDO_SIDO_VCL_3                       , 0x44 },
+	{ WCD9335_SIDO_SIDO_CCL_1                       , 0x57 },
+	{ WCD9335_SIDO_SIDO_CCL_4                       , 0x61 },
+	{ WCD9335_SIDO_SIDO_CCL_5                       , 0x6d },
+	{ WCD9335_SIDO_SIDO_CCL_6                       , 0x60 },
+	{ WCD9335_SIDO_SIDO_CCL_7                       , 0x6f },
+	{ WCD9335_SIDO_SIDO_CCL_9                       , 0x6e },
+	{ WCD9335_SIDO_SIDO_FILTER_1                    , 0x92 },
+	{ WCD9335_SIDO_SIDO_FILTER_2                    , 0x54 },
+	{ WCD9335_SIDO_SIDO_DRIVER_1                    , 0x77 },
+	{ WCD9335_SIDO_SIDO_CAL_CODE_EXT_1              , 0x9c },
+	{ WCD9335_SIDO_SIDO_CAL_CODE_EXT_2              , 0x82 },
+	{ WCD9335_SIDO_SIDO_CAL_CODE_OUT_1              , 0x00 },
+	{ WCD9335_SIDO_SIDO_CAL_CODE_OUT_2              , 0x00 },
+	{ WCD9335_SIDO_SIDO_TEST_1                      , 0x00 },
+	{ WCD9335_MBHC_CTL_1                            , 0x32 },
+	{ WCD9335_MBHC_CTL_2                            , 0x01 },
+	{ WCD9335_MBHC_PLUG_DETECT_CTL                  , 0x69 },
+	{ WCD9335_MBHC_ZDET_RAMP_CTL                    , 0x00 },
+	{ WCD9335_MBHC_TEST_CTL                         , 0x00 },
+	{ WCD9335_VBADC_SUBBLOCK_EN                     , 0xfe },
+	{ WCD9335_VBADC_IBIAS_FE                        , 0x54 },
+	{ WCD9335_VBADC_BIAS_ADC                        , 0x51 },
+	{ WCD9335_VBADC_FE_CTRL                         , 0x1c },
+	{ WCD9335_VBADC_ADC_REF                         , 0x20 },
+	{ WCD9335_VBADC_ADC_IO                          , 0x80 },
+	{ WCD9335_VBADC_ADC_SAR                         , 0xff },
+	{ WCD9335_VBADC_DEBUG                           , 0x00 },
+	{ WCD9335_VBADC_ADC_DOUTMSB                     , 0x00 },
+	{ WCD9335_VBADC_ADC_DOUTLSB                     , 0x00 },
+	{ WCD9335_LDOH_MODE                             , 0x2b },
+	{ WCD9335_LDOH_BIAS                             , 0x68 },
+	{ WCD9335_LDOH_STB_LOADS                        , 0x00 },
+	{ WCD9335_LDOH_SLOWRAMP                         , 0x50 },
+	{ WCD9335_MICB1_TEST_CTL_1                      , 0x1a },
+	{ WCD9335_MICB1_TEST_CTL_2                      , 0x18 },
+	{ WCD9335_MICB1_TEST_CTL_3                      , 0xa4 },
+	{ WCD9335_MICB2_TEST_CTL_1                      , 0x1a },
+	{ WCD9335_MICB2_TEST_CTL_2                      , 0x18 },
+	{ WCD9335_MICB2_TEST_CTL_3                      , 0x24 },
+	{ WCD9335_MICB3_TEST_CTL_1                      , 0x1a },
+	{ WCD9335_MICB3_TEST_CTL_2                      , 0x18 },
+	{ WCD9335_MICB3_TEST_CTL_3                      , 0xa4 },
+	{ WCD9335_MICB4_TEST_CTL_1                      , 0x1a },
+	{ WCD9335_MICB4_TEST_CTL_2                      , 0x18 },
+	{ WCD9335_MICB4_TEST_CTL_3                      , 0xa4 },
+	{ WCD9335_TX_COM_ADC_VCM                        , 0x39 },
+	{ WCD9335_TX_COM_BIAS_ATEST                     , 0xc0 },
+	{ WCD9335_TX_COM_ADC_INT1_IB                    , 0x6f },
+	{ WCD9335_TX_COM_ADC_INT2_IB                    , 0x4f },
+	{ WCD9335_TX_COM_TXFE_DIV_CTL                   , 0x2e },
+	{ WCD9335_TX_COM_TXFE_DIV_START                 , 0x00 },
+	{ WCD9335_TX_COM_TXFE_DIV_STOP_9P6M             , 0xc7 },
+	{ WCD9335_TX_COM_TXFE_DIV_STOP_12P288M          , 0xff },
+	{ WCD9335_TX_1_2_TEST_EN                        , 0xcc },
+	{ WCD9335_TX_1_2_ADC_IB                         , 0x09 },
+	{ WCD9335_TX_1_2_TEST_CTL                       , 0x38 },
+	{ WCD9335_TX_1_2_TEST_BLK_EN                    , 0xff },
+	{ WCD9335_TX_1_2_TXFE_CLKDIV                    , 0x00 },
+	{ WCD9335_TX_1_2_SAR1_ERR                       , 0x00 },
+	{ WCD9335_TX_1_2_SAR2_ERR                       , 0x00 },
+	{ WCD9335_TX_3_4_TEST_EN                        , 0xcc },
+	{ WCD9335_TX_3_4_ADC_IB                         , 0x09 },
+	{ WCD9335_TX_3_4_TEST_CTL                       , 0x38 },
+	{ WCD9335_TX_3_4_TEST_BLK_EN                    , 0xff },
+	{ WCD9335_TX_3_4_TXFE_CLKDIV                    , 0x00 },
+	{ WCD9335_TX_3_4_SAR1_ERR                       , 0x00 },
+	{ WCD9335_TX_3_4_SAR2_ERR                       , 0x00 },
+	{ WCD9335_TX_5_6_TEST_EN                        , 0xcc },
+	{ WCD9335_TX_5_6_ADC_IB                         , 0x09 },
+	{ WCD9335_TX_5_6_TEST_CTL                       , 0x38 },
+	{ WCD9335_TX_5_6_TEST_BLK_EN                    , 0xff },
+	{ WCD9335_TX_5_6_TXFE_CLKDIV                    , 0x00 },
+	{ WCD9335_TX_5_6_SAR1_ERR                       , 0x00 },
+	{ WCD9335_TX_5_6_SAR2_ERR                       , 0x00 },
+	{ WCD9335_CLASSH_MODE_1                         , 0x40 },
+	{ WCD9335_CLASSH_MODE_2                         , 0x3a },
+	{ WCD9335_CLASSH_MODE_3                         , 0x00 },
+	{ WCD9335_CLASSH_CTRL_VCL_1                     , 0x70 },
+	{ WCD9335_CLASSH_CTRL_VCL_2                     , 0xa2 },
+	{ WCD9335_CLASSH_CTRL_CCL_1                     , 0x51 },
+	{ WCD9335_CLASSH_CTRL_CCL_2                     , 0x80 },
+	{ WCD9335_CLASSH_CTRL_CCL_3                     , 0x80 },
+	{ WCD9335_CLASSH_CTRL_CCL_4                     , 0x51 },
+	{ WCD9335_CLASSH_CTRL_CCL_5                     , 0x00 },
+	{ WCD9335_CLASSH_BUCK_TMUX_A_D                  , 0x00 },
+	{ WCD9335_CLASSH_BUCK_SW_DRV_CNTL               , 0x77 },
+	{ WCD9335_CLASSH_SPARE                          , 0x00 },
+	{ WCD9335_FLYBACK_EN                            , 0x4e },
+	{ WCD9335_FLYBACK_VNEG_CTRL_2                   , 0x45 },
+	{ WCD9335_FLYBACK_VNEG_CTRL_3                   , 0x74 },
+	{ WCD9335_FLYBACK_VNEG_CTRL_5                   , 0x83 },
+	{ WCD9335_FLYBACK_VNEG_CTRL_6                   , 0x98 },
+	{ WCD9335_FLYBACK_VNEG_CTRL_7                   , 0xa9 },
+	{ WCD9335_FLYBACK_VNEG_CTRL_8                   , 0x68 },
+	{ WCD9335_FLYBACK_VNEG_DAC_CTRL_2               , 0x50 },
+	{ WCD9335_FLYBACK_VNEG_DAC_CTRL_3               , 0xa6 },
+	{ WCD9335_FLYBACK_TEST_CTL                      , 0x00 },
+	{ WCD9335_RX_AUX_SW_CTL                         , 0x00 },
+	{ WCD9335_RX_PA_AUX_IN_CONN                     , 0x00 },
+	{ WCD9335_RX_TIMER_DIV                          , 0x74 },
+	{ WCD9335_RX_OCP_CTL                            , 0x1f },
+	{ WCD9335_RX_OCP_COUNT                          , 0x77 },
+	{ WCD9335_RX_BIAS_EAR_DAC                       , 0xa0 },
+	{ WCD9335_RX_BIAS_EAR_AMP                       , 0xaa },
+	{ WCD9335_RX_BIAS_HPH_LDO                       , 0xa9 },
+	{ WCD9335_RX_BIAS_HPH_RDACBUFF_CNP2             , 0x8a },
+	{ WCD9335_RX_BIAS_HPH_RDAC_LDO                  , 0x88 },
+	{ WCD9335_RX_BIAS_HPH_CNP1                      , 0x86 },
+	{ WCD9335_RX_BIAS_DIFFLO_PA                     , 0x80 },
+	{ WCD9335_RX_BIAS_DIFFLO_REF                    , 0x88 },
+	{ WCD9335_RX_BIAS_DIFFLO_LDO                    , 0x88 },
+	{ WCD9335_RX_BIAS_SELO_DAC_PA                   , 0xa8 },
+	{ WCD9335_RX_BIAS_BUCK_RST                      , 0x08 },
+	{ WCD9335_RX_BIAS_BUCK_VREF_ERRAMP              , 0x44 },
+	{ WCD9335_RX_BIAS_FLYB_ERRAMP                   , 0x40 },
+	{ WCD9335_RX_BIAS_FLYB_BUFF                     , 0xaa },
+	{ WCD9335_RX_BIAS_FLYB_MID_RST                  , 0x44 },
+	{ WCD9335_HPH_L_STATUS                          , 0x04 },
+	{ WCD9335_HPH_R_STATUS                          , 0x04 },
+	{ WCD9335_HPH_CNP_EN                            , 0x80 },
+	{ WCD9335_HPH_CNP_WG_CTL                        , 0xda },
+	{ WCD9335_HPH_CNP_WG_TIME                       , 0x15 },
+	{ WCD9335_HPH_OCP_CTL                           , 0x28 },
+	{ WCD9335_HPH_AUTO_CHOP                         , 0x12 },
+	{ WCD9335_HPH_CHOP_CTL                          , 0x83 },
+	{ WCD9335_HPH_PA_CTL1                           , 0x46 },
+	{ WCD9335_HPH_L_TEST                            , 0x00 },
+	{ WCD9335_HPH_L_ATEST                           , 0x50 },
+	{ WCD9335_HPH_R_TEST                            , 0x00 },
+	{ WCD9335_HPH_RDAC_CLK_CTL1                     , 0x99 },
+	{ WCD9335_HPH_RDAC_CLK_CTL2                     , 0x9b },
+	{ WCD9335_HPH_RDAC_CHOP_CLK_LP_CTL              , 0x00 },
+	{ WCD9335_HPH_REFBUFF_UHQA_CTL                  , 0xa8 },
+	{ WCD9335_HPH_REFBUFF_LP_CTL                    , 0x00 },
+	{ WCD9335_HPH_L_DAC_CTL                         , 0x00 },
+	{ WCD9335_HPH_R_DAC_CTL                         , 0x00 },
+	{ WCD9335_EAR_EN_REG                            , 0x60 },
+	{ WCD9335_EAR_CMBUFF                            , 0x0d },
+	{ WCD9335_EAR_ICTL                              , 0x40 },
+	{ WCD9335_EAR_EN_DBG_CTL                        , 0x00 },
+	{ WCD9335_EAR_CNP                               , 0xe0 },
+	{ WCD9335_EAR_DAC_CTL_ATEST                     , 0x00 },
+	{ WCD9335_EAR_STATUS_REG                        , 0x04 },
+	{ WCD9335_EAR_OUT_SHORT                         , 0x00 },
+	{ WCD9335_DIFF_LO_MISC                          , 0x03 },
+	{ WCD9335_DIFF_LO_LO2_COMPANDER                 , 0x00 },
+	{ WCD9335_DIFF_LO_LO1_COMPANDER                 , 0x00 },
+	{ WCD9335_DIFF_LO_COMMON                        , 0x40 },
+	{ WCD9335_DIFF_LO_BYPASS_EN                     , 0x00 },
+	{ WCD9335_DIFF_LO_CNP                           , 0x20 },
+	{ WCD9335_DIFF_LO_CORE_OUT_PROG                 , 0x00 },
+	{ WCD9335_DIFF_LO_LDO_OUT_PROG                  , 0x00 },
+	{ WCD9335_DIFF_LO_COM_SWCAP_REFBUF_FREQ         , 0x9b },
+	{ WCD9335_DIFF_LO_COM_PA_FREQ                   , 0xb0 },
+	{ WCD9335_DIFF_LO_RESERVED_REG                  , 0x60 },
+	{ WCD9335_DIFF_LO_LO1_STATUS_1                  , 0x00 },
+	{ WCD9335_DIFF_LO_LO1_STATUS_2                  , 0x00 },
+	{ WCD9335_SE_LO_COM1                            , 0x80 },
+	{ WCD9335_SE_LO_COM2                            , 0x04 },
+	{ WCD9335_SE_LO_LO3_GAIN                        , 0x20 },
+	{ WCD9335_SE_LO_LO3_CTRL                        , 0x04 },
+	{ WCD9335_SE_LO_LO4_GAIN                        , 0x20 },
+	{ WCD9335_SE_LO_LO4_CTRL                        , 0x04 },
+	{ WCD9335_SE_LO_LO3_STATUS                      , 0x00 },
+	{ WCD9335_SE_LO_LO4_STATUS                      , 0x00 },
+	/* Page #10 registers */
+	{ WCD9335_PAGE10_PAGE_REGISTER                  , 0x00 },
+	{ WCD9335_CDC_ANC0_CLK_RESET_CTL                , 0x00 },
+	{ WCD9335_CDC_ANC0_MODE_1_CTL                   , 0x00 },
+	{ WCD9335_CDC_ANC0_MODE_2_CTL                   , 0x00 },
+	{ WCD9335_CDC_ANC0_FF_SHIFT                     , 0x00 },
+	{ WCD9335_CDC_ANC0_FB_SHIFT                     , 0x00 },
+	{ WCD9335_CDC_ANC0_LPF_FF_A_CTL                 , 0x00 },
+	{ WCD9335_CDC_ANC0_LPF_FF_B_CTL                 , 0x00 },
+	{ WCD9335_CDC_ANC0_LPF_FB_CTL                   , 0x00 },
+	{ WCD9335_CDC_ANC0_SMLPF_CTL                    , 0x00 },
+	{ WCD9335_CDC_ANC0_DCFLT_SHIFT_CTL              , 0x00 },
+	{ WCD9335_CDC_ANC0_IIR_ADAPT_CTL                , 0x00 },
+	{ WCD9335_CDC_ANC0_IIR_COEFF_1_CTL              , 0x00 },
+	{ WCD9335_CDC_ANC0_IIR_COEFF_2_CTL              , 0x00 },
+	{ WCD9335_CDC_ANC0_FF_A_GAIN_CTL                , 0x00 },
+	{ WCD9335_CDC_ANC0_FF_B_GAIN_CTL                , 0x00 },
+	{ WCD9335_CDC_ANC0_FB_GAIN_CTL                  , 0x00 },
+	{ WCD9335_CDC_ANC1_CLK_RESET_CTL                , 0x00 },
+	{ WCD9335_CDC_ANC1_MODE_1_CTL                   , 0x00 },
+	{ WCD9335_CDC_ANC1_MODE_2_CTL                   , 0x00 },
+	{ WCD9335_CDC_ANC1_FF_SHIFT                     , 0x00 },
+	{ WCD9335_CDC_ANC1_FB_SHIFT                     , 0x00 },
+	{ WCD9335_CDC_ANC1_LPF_FF_A_CTL                 , 0x00 },
+	{ WCD9335_CDC_ANC1_LPF_FF_B_CTL                 , 0x00 },
+	{ WCD9335_CDC_ANC1_LPF_FB_CTL                   , 0x00 },
+	{ WCD9335_CDC_ANC1_SMLPF_CTL                    , 0x00 },
+	{ WCD9335_CDC_ANC1_DCFLT_SHIFT_CTL              , 0x00 },
+	{ WCD9335_CDC_ANC1_IIR_ADAPT_CTL                , 0x00 },
+	{ WCD9335_CDC_ANC1_IIR_COEFF_1_CTL              , 0x00 },
+	{ WCD9335_CDC_ANC1_IIR_COEFF_2_CTL              , 0x00 },
+	{ WCD9335_CDC_ANC1_FF_A_GAIN_CTL                , 0x00 },
+	{ WCD9335_CDC_ANC1_FF_B_GAIN_CTL                , 0x00 },
+	{ WCD9335_CDC_ANC1_FB_GAIN_CTL                  , 0x00 },
+	{ WCD9335_CDC_TX0_TX_PATH_CTL                   , 0x04 },
+	{ WCD9335_CDC_TX0_TX_VOL_CTL                    , 0x00 },
+	{ WCD9335_CDC_TX0_TX_PATH_192_CTL               , 0x00 },
+	{ WCD9335_CDC_TX0_TX_PATH_192_CFG               , 0x00 },
+	{ WCD9335_CDC_TX0_TX_PATH_SEC0                  , 0x00 },
+	{ WCD9335_CDC_TX0_TX_PATH_SEC1                  , 0x00 },
+	{ WCD9335_CDC_TX0_TX_PATH_SEC4                  , 0x20 },
+	{ WCD9335_CDC_TX0_TX_PATH_SEC5                  , 0x00 },
+	{ WCD9335_CDC_TX0_TX_PATH_SEC6                  , 0x00 },
+	{ WCD9335_CDC_TX1_TX_PATH_CTL                   , 0x04 },
+	{ WCD9335_CDC_TX1_TX_VOL_CTL                    , 0x00 },
+	{ WCD9335_CDC_TX1_TX_PATH_192_CTL               , 0x00 },
+	{ WCD9335_CDC_TX1_TX_PATH_192_CFG               , 0x00 },
+	{ WCD9335_CDC_TX1_TX_PATH_SEC0                  , 0x00 },
+	{ WCD9335_CDC_TX1_TX_PATH_SEC1                  , 0x00 },
+	{ WCD9335_CDC_TX1_TX_PATH_SEC4                  , 0x20 },
+	{ WCD9335_CDC_TX1_TX_PATH_SEC5                  , 0x00 },
+	{ WCD9335_CDC_TX1_TX_PATH_SEC6                  , 0x00 },
+	{ WCD9335_CDC_TX2_TX_PATH_CTL                   , 0x04 },
+	{ WCD9335_CDC_TX2_TX_VOL_CTL                    , 0x00 },
+	{ WCD9335_CDC_TX2_TX_PATH_192_CTL               , 0x00 },
+	{ WCD9335_CDC_TX2_TX_PATH_192_CFG               , 0x00 },
+	{ WCD9335_CDC_TX2_TX_PATH_SEC0                  , 0x00 },
+	{ WCD9335_CDC_TX2_TX_PATH_SEC1                  , 0x00 },
+	{ WCD9335_CDC_TX2_TX_PATH_SEC4                  , 0x20 },
+	{ WCD9335_CDC_TX2_TX_PATH_SEC5                  , 0x00 },
+	{ WCD9335_CDC_TX2_TX_PATH_SEC6                  , 0x00 },
+	{ WCD9335_CDC_TX3_TX_PATH_CTL                   , 0x04 },
+	{ WCD9335_CDC_TX3_TX_VOL_CTL                    , 0x00 },
+	{ WCD9335_CDC_TX3_TX_PATH_192_CTL               , 0x00 },
+	{ WCD9335_CDC_TX3_TX_PATH_192_CFG               , 0x00 },
+	{ WCD9335_CDC_TX3_TX_PATH_SEC0                  , 0x00 },
+	{ WCD9335_CDC_TX3_TX_PATH_SEC1                  , 0x00 },
+	{ WCD9335_CDC_TX3_TX_PATH_SEC4                  , 0x20 },
+	{ WCD9335_CDC_TX3_TX_PATH_SEC5                  , 0x00 },
+	{ WCD9335_CDC_TX3_TX_PATH_SEC6                  , 0x00 },
+	{ WCD9335_CDC_TX4_TX_PATH_CTL                   , 0x04 },
+	{ WCD9335_CDC_TX4_TX_VOL_CTL                    , 0x00 },
+	{ WCD9335_CDC_TX4_TX_PATH_192_CTL               , 0x00 },
+	{ WCD9335_CDC_TX4_TX_PATH_192_CFG               , 0x00 },
+	{ WCD9335_CDC_TX4_TX_PATH_SEC0                  , 0x00 },
+	{ WCD9335_CDC_TX4_TX_PATH_SEC1                  , 0x00 },
+	{ WCD9335_CDC_TX4_TX_PATH_SEC4                  , 0x20 },
+	{ WCD9335_CDC_TX4_TX_PATH_SEC5                  , 0x00 },
+	{ WCD9335_CDC_TX4_TX_PATH_SEC6                  , 0x00 },
+	{ WCD9335_CDC_TX5_TX_PATH_CTL                   , 0x04 },
+	{ WCD9335_CDC_TX5_TX_VOL_CTL                    , 0x00 },
+	{ WCD9335_CDC_TX5_TX_PATH_192_CTL               , 0x00 },
+	{ WCD9335_CDC_TX5_TX_PATH_192_CFG               , 0x00 },
+	{ WCD9335_CDC_TX5_TX_PATH_SEC0                  , 0x00 },
+	{ WCD9335_CDC_TX5_TX_PATH_SEC1                  , 0x00 },
+	{ WCD9335_CDC_TX5_TX_PATH_SEC4                  , 0x20 },
+	{ WCD9335_CDC_TX5_TX_PATH_SEC5                  , 0x00 },
+	{ WCD9335_CDC_TX5_TX_PATH_SEC6                  , 0x00 },
+	{ WCD9335_CDC_TX6_TX_PATH_CTL                   , 0x04 },
+	{ WCD9335_CDC_TX6_TX_VOL_CTL                    , 0x00 },
+	{ WCD9335_CDC_TX6_TX_PATH_192_CTL               , 0x00 },
+	{ WCD9335_CDC_TX6_TX_PATH_192_CFG               , 0x00 },
+	{ WCD9335_CDC_TX6_TX_PATH_SEC0                  , 0x00 },
+	{ WCD9335_CDC_TX6_TX_PATH_SEC1                  , 0x00 },
+	{ WCD9335_CDC_TX6_TX_PATH_SEC4                  , 0x20 },
+	{ WCD9335_CDC_TX6_TX_PATH_SEC5                  , 0x00 },
+	{ WCD9335_CDC_TX6_TX_PATH_SEC6                  , 0x00 },
+	{ WCD9335_CDC_TX7_TX_PATH_CTL                   , 0x04 },
+	{ WCD9335_CDC_TX7_TX_VOL_CTL                    , 0x00 },
+	{ WCD9335_CDC_TX7_TX_PATH_192_CTL               , 0x00 },
+	{ WCD9335_CDC_TX7_TX_PATH_192_CFG               , 0x00 },
+	{ WCD9335_CDC_TX7_TX_PATH_SEC0                  , 0x00 },
+	{ WCD9335_CDC_TX7_TX_PATH_SEC1                  , 0x00 },
+	{ WCD9335_CDC_TX7_TX_PATH_SEC4                  , 0x20 },
+	{ WCD9335_CDC_TX7_TX_PATH_SEC5                  , 0x00 },
+	{ WCD9335_CDC_TX7_TX_PATH_SEC6                  , 0x00 },
+	{ WCD9335_CDC_TX8_TX_PATH_CTL                   , 0x04 },
+	{ WCD9335_CDC_TX8_TX_VOL_CTL                    , 0x00 },
+	{ WCD9335_CDC_TX8_TX_PATH_192_CTL               , 0x00 },
+	{ WCD9335_CDC_TX8_TX_PATH_192_CFG               , 0x00 },
+	{ WCD9335_CDC_TX8_TX_PATH_SEC0                  , 0x00 },
+	{ WCD9335_CDC_TX8_TX_PATH_SEC1                  , 0x00 },
+	{ WCD9335_CDC_TX8_TX_PATH_SEC4                  , 0x20 },
+	{ WCD9335_CDC_TX8_TX_PATH_SEC5                  , 0x00 },
+	{ WCD9335_CDC_TX8_TX_PATH_SEC6                  , 0x00 },
+	{ WCD9335_CDC_TX9_SPKR_PROT_PATH_CTL            , 0x02 },
+	{ WCD9335_CDC_TX9_SPKR_PROT_PATH_CFG0           , 0x00 },
+	{ WCD9335_CDC_TX10_SPKR_PROT_PATH_CTL           , 0x02 },
+	{ WCD9335_CDC_TX10_SPKR_PROT_PATH_CFG0          , 0x00 },
+	{ WCD9335_CDC_TX11_SPKR_PROT_PATH_CTL           , 0x02 },
+	{ WCD9335_CDC_TX11_SPKR_PROT_PATH_CFG0          , 0x00 },
+	{ WCD9335_CDC_TX12_SPKR_PROT_PATH_CTL           , 0x02 },
+	{ WCD9335_CDC_TX12_SPKR_PROT_PATH_CFG0          , 0x00 },
+	/* Page #11 registers */
+	{ WCD9335_PAGE11_PAGE_REGISTER                  , 0x00 },
+	{ WCD9335_CDC_COMPANDER1_CTL0                   , 0x60 },
+	{ WCD9335_CDC_COMPANDER1_CTL1                   , 0xdb },
+	{ WCD9335_CDC_COMPANDER1_CTL2                   , 0xff },
+	{ WCD9335_CDC_COMPANDER1_CTL3                   , 0x35 },
+	{ WCD9335_CDC_COMPANDER1_CTL4                   , 0xff },
+	{ WCD9335_CDC_COMPANDER1_CTL5                   , 0x00 },
+	{ WCD9335_CDC_COMPANDER1_CTL6                   , 0x01 },
+	{ WCD9335_CDC_COMPANDER2_CTL0                   , 0x60 },
+	{ WCD9335_CDC_COMPANDER2_CTL1                   , 0xdb },
+	{ WCD9335_CDC_COMPANDER2_CTL2                   , 0xff },
+	{ WCD9335_CDC_COMPANDER2_CTL3                   , 0x35 },
+	{ WCD9335_CDC_COMPANDER2_CTL4                   , 0xff },
+	{ WCD9335_CDC_COMPANDER2_CTL5                   , 0x00 },
+	{ WCD9335_CDC_COMPANDER2_CTL6                   , 0x01 },
+	{ WCD9335_CDC_COMPANDER3_CTL0                   , 0x60 },
+	{ WCD9335_CDC_COMPANDER3_CTL1                   , 0xdb },
+	{ WCD9335_CDC_COMPANDER3_CTL2                   , 0xff },
+	{ WCD9335_CDC_COMPANDER3_CTL3                   , 0x35 },
+	{ WCD9335_CDC_COMPANDER3_CTL4                   , 0xff },
+	{ WCD9335_CDC_COMPANDER3_CTL5                   , 0x00 },
+	{ WCD9335_CDC_COMPANDER3_CTL6                   , 0x01 },
+	{ WCD9335_CDC_COMPANDER4_CTL0                   , 0x60 },
+	{ WCD9335_CDC_COMPANDER4_CTL1                   , 0xdb },
+	{ WCD9335_CDC_COMPANDER4_CTL2                   , 0xff },
+	{ WCD9335_CDC_COMPANDER4_CTL3                   , 0x35 },
+	{ WCD9335_CDC_COMPANDER4_CTL4                   , 0xff },
+	{ WCD9335_CDC_COMPANDER4_CTL5                   , 0x00 },
+	{ WCD9335_CDC_COMPANDER4_CTL6                   , 0x01 },
+	{ WCD9335_CDC_COMPANDER5_CTL0                   , 0x60 },
+	{ WCD9335_CDC_COMPANDER5_CTL1                   , 0xdb },
+	{ WCD9335_CDC_COMPANDER5_CTL2                   , 0xff },
+	{ WCD9335_CDC_COMPANDER5_CTL3                   , 0x35 },
+	{ WCD9335_CDC_COMPANDER5_CTL4                   , 0xff },
+	{ WCD9335_CDC_COMPANDER5_CTL5                   , 0x00 },
+	{ WCD9335_CDC_COMPANDER5_CTL6                   , 0x01 },
+	{ WCD9335_CDC_COMPANDER6_CTL0                   , 0x60 },
+	{ WCD9335_CDC_COMPANDER6_CTL1                   , 0xdb },
+	{ WCD9335_CDC_COMPANDER6_CTL2                   , 0xff },
+	{ WCD9335_CDC_COMPANDER6_CTL3                   , 0x35 },
+	{ WCD9335_CDC_COMPANDER6_CTL4                   , 0xff },
+	{ WCD9335_CDC_COMPANDER6_CTL5                   , 0x00 },
+	{ WCD9335_CDC_COMPANDER6_CTL6                   , 0x01 },
+	{ WCD9335_CDC_COMPANDER7_CTL0                   , 0x60 },
+	{ WCD9335_CDC_COMPANDER7_CTL1                   , 0xdb },
+	{ WCD9335_CDC_COMPANDER7_CTL2                   , 0xff },
+	{ WCD9335_CDC_COMPANDER7_CTL3                   , 0x35 },
+	{ WCD9335_CDC_COMPANDER7_CTL4                   , 0xff },
+	{ WCD9335_CDC_COMPANDER7_CTL5                   , 0x00 },
+	{ WCD9335_CDC_COMPANDER7_CTL6                   , 0x01 },
+	{ WCD9335_CDC_COMPANDER8_CTL0                   , 0x60 },
+	{ WCD9335_CDC_COMPANDER8_CTL1                   , 0xdb },
+	{ WCD9335_CDC_COMPANDER8_CTL2                   , 0xff },
+	{ WCD9335_CDC_COMPANDER8_CTL3                   , 0x35 },
+	{ WCD9335_CDC_COMPANDER8_CTL4                   , 0xff },
+	{ WCD9335_CDC_COMPANDER8_CTL5                   , 0x00 },
+	{ WCD9335_CDC_COMPANDER8_CTL6                   , 0x01 },
+	{ WCD9335_CDC_RX0_RX_PATH_CTL                   , 0x04 },
+	{ WCD9335_CDC_RX0_RX_PATH_CFG0                  , 0x00 },
+	{ WCD9335_CDC_RX0_RX_PATH_CFG2                  , 0x8f },
+	{ WCD9335_CDC_RX0_RX_VOL_CTL                    , 0x00 },
+	{ WCD9335_CDC_RX0_RX_PATH_MIX_CTL               , 0x04 },
+	{ WCD9335_CDC_RX0_RX_VOL_MIX_CTL                , 0x00 },
+	{ WCD9335_CDC_RX0_RX_PATH_SEC2                  , 0x00 },
+	{ WCD9335_CDC_RX0_RX_PATH_SEC3                  , 0x00 },
+	{ WCD9335_CDC_RX0_RX_PATH_SEC5                  , 0x00 },
+	{ WCD9335_CDC_RX0_RX_PATH_SEC6                  , 0x00 },
+	{ WCD9335_CDC_RX0_RX_PATH_SEC7                  , 0x00 },
+	{ WCD9335_CDC_RX0_RX_PATH_MIX_SEC1              , 0x00 },
+	{ WCD9335_CDC_RX1_RX_PATH_CTL                   , 0x04 },
+	{ WCD9335_CDC_RX1_RX_PATH_CFG0                  , 0x00 },
+	{ WCD9335_CDC_RX1_RX_PATH_CFG2                  , 0x8f },
+	{ WCD9335_CDC_RX1_RX_VOL_CTL                    , 0x00 },
+	{ WCD9335_CDC_RX1_RX_PATH_MIX_CTL               , 0x04 },
+	{ WCD9335_CDC_RX1_RX_VOL_MIX_CTL                , 0x00 },
+	{ WCD9335_CDC_RX1_RX_PATH_SEC2                  , 0x00 },
+	{ WCD9335_CDC_RX1_RX_PATH_SEC3                  , 0x00 },
+	{ WCD9335_CDC_RX1_RX_PATH_SEC4                  , 0x00 },
+	{ WCD9335_CDC_RX1_RX_PATH_SEC5                  , 0x00 },
+	{ WCD9335_CDC_RX1_RX_PATH_SEC6                  , 0x00 },
+	{ WCD9335_CDC_RX1_RX_PATH_SEC7                  , 0x00 },
+	{ WCD9335_CDC_RX1_RX_PATH_MIX_SEC1              , 0x00 },
+	{ WCD9335_CDC_RX2_RX_PATH_CTL                   , 0x04 },
+	{ WCD9335_CDC_RX2_RX_PATH_CFG0                  , 0x00 },
+	{ WCD9335_CDC_RX2_RX_PATH_CFG2                  , 0x8f },
+	{ WCD9335_CDC_RX2_RX_VOL_CTL                    , 0x00 },
+	{ WCD9335_CDC_RX2_RX_PATH_MIX_CTL               , 0x04 },
+	{ WCD9335_CDC_RX2_RX_VOL_MIX_CTL                , 0x00 },
+	{ WCD9335_CDC_RX2_RX_PATH_SEC2                  , 0x00 },
+	{ WCD9335_CDC_RX2_RX_PATH_SEC3                  , 0x00 },
+	{ WCD9335_CDC_RX2_RX_PATH_SEC4                  , 0x00 },
+	{ WCD9335_CDC_RX2_RX_PATH_SEC5                  , 0x00 },
+	{ WCD9335_CDC_RX2_RX_PATH_SEC6                  , 0x00 },
+	{ WCD9335_CDC_RX2_RX_PATH_SEC7                  , 0x00 },
+	{ WCD9335_CDC_RX2_RX_PATH_MIX_SEC1              , 0x00 },
+	{ WCD9335_CDC_RX3_RX_PATH_CTL                   , 0x04 },
+	{ WCD9335_CDC_RX3_RX_PATH_CFG0                  , 0x00 },
+	{ WCD9335_CDC_RX3_RX_PATH_CFG2                  , 0x8f },
+	{ WCD9335_CDC_RX3_RX_VOL_CTL                    , 0x00 },
+	{ WCD9335_CDC_RX3_RX_PATH_MIX_CTL               , 0x04 },
+	{ WCD9335_CDC_RX3_RX_VOL_MIX_CTL                , 0x00 },
+	{ WCD9335_CDC_RX3_RX_PATH_SEC2                  , 0x00 },
+	{ WCD9335_CDC_RX3_RX_PATH_SEC3                  , 0x00 },
+	{ WCD9335_CDC_RX3_RX_PATH_SEC5                  , 0x00 },
+	{ WCD9335_CDC_RX3_RX_PATH_SEC6                  , 0x00 },
+	{ WCD9335_CDC_RX3_RX_PATH_SEC7                  , 0x00 },
+	{ WCD9335_CDC_RX3_RX_PATH_MIX_SEC1              , 0x00 },
+	{ WCD9335_CDC_RX4_RX_PATH_CTL                   , 0x04 },
+	{ WCD9335_CDC_RX4_RX_PATH_CFG0                  , 0x00 },
+	{ WCD9335_CDC_RX4_RX_PATH_CFG2                  , 0x8f },
+	{ WCD9335_CDC_RX4_RX_VOL_CTL                    , 0x00 },
+	{ WCD9335_CDC_RX4_RX_PATH_MIX_CTL               , 0x04 },
+	{ WCD9335_CDC_RX4_RX_VOL_MIX_CTL                , 0x00 },
+	{ WCD9335_CDC_RX4_RX_PATH_SEC2                  , 0x00 },
+	{ WCD9335_CDC_RX4_RX_PATH_SEC3                  , 0x00 },
+	{ WCD9335_CDC_RX4_RX_PATH_SEC5                  , 0x00 },
+	{ WCD9335_CDC_RX4_RX_PATH_SEC6                  , 0x00 },
+	{ WCD9335_CDC_RX4_RX_PATH_SEC7                  , 0x00 },
+	{ WCD9335_CDC_RX4_RX_PATH_MIX_SEC1              , 0x00 },
+	{ WCD9335_CDC_RX5_RX_PATH_CTL                   , 0x04 },
+	{ WCD9335_CDC_RX5_RX_PATH_CFG0                  , 0x00 },
+	{ WCD9335_CDC_RX5_RX_PATH_CFG2                  , 0x8f },
+	{ WCD9335_CDC_RX5_RX_VOL_CTL                    , 0x00 },
+	{ WCD9335_CDC_RX5_RX_PATH_MIX_CTL               , 0x04 },
+	{ WCD9335_CDC_RX5_RX_VOL_MIX_CTL                , 0x00 },
+	{ WCD9335_CDC_RX5_RX_PATH_SEC2                  , 0x00 },
+	{ WCD9335_CDC_RX5_RX_PATH_SEC3                  , 0x00 },
+	{ WCD9335_CDC_RX5_RX_PATH_SEC5                  , 0x00 },
+	{ WCD9335_CDC_RX5_RX_PATH_SEC6                  , 0x00 },
+	{ WCD9335_CDC_RX5_RX_PATH_SEC7                  , 0x00 },
+	{ WCD9335_CDC_RX5_RX_PATH_MIX_SEC1              , 0x00 },
+	{ WCD9335_CDC_RX6_RX_PATH_CTL                   , 0x04 },
+	{ WCD9335_CDC_RX6_RX_PATH_CFG0                  , 0x00 },
+	{ WCD9335_CDC_RX6_RX_PATH_CFG2                  , 0x8f },
+	{ WCD9335_CDC_RX6_RX_VOL_CTL                    , 0x00 },
+	{ WCD9335_CDC_RX6_RX_PATH_MIX_CTL               , 0x04 },
+	{ WCD9335_CDC_RX6_RX_VOL_MIX_CTL                , 0x00 },
+	{ WCD9335_CDC_RX6_RX_PATH_SEC2                  , 0x00 },
+	{ WCD9335_CDC_RX6_RX_PATH_SEC3                  , 0x00 },
+	{ WCD9335_CDC_RX6_RX_PATH_SEC5                  , 0x00 },
+	{ WCD9335_CDC_RX6_RX_PATH_SEC6                  , 0x00 },
+	{ WCD9335_CDC_RX6_RX_PATH_SEC7                  , 0x00 },
+	{ WCD9335_CDC_RX6_RX_PATH_MIX_SEC1              , 0x00 },
+	{ WCD9335_CDC_RX7_RX_PATH_CTL                   , 0x04 },
+	{ WCD9335_CDC_RX7_RX_PATH_CFG0                  , 0x00 },
+	{ WCD9335_CDC_RX7_RX_PATH_CFG2                  , 0x8f },
+	{ WCD9335_CDC_RX7_RX_VOL_CTL                    , 0x00 },
+	{ WCD9335_CDC_RX7_RX_PATH_MIX_CTL               , 0x04 },
+	{ WCD9335_CDC_RX7_RX_VOL_MIX_CTL                , 0x00 },
+	{ WCD9335_CDC_RX7_RX_PATH_SEC2                  , 0x00 },
+	{ WCD9335_CDC_RX7_RX_PATH_SEC3                  , 0x00 },
+	{ WCD9335_CDC_RX7_RX_PATH_SEC5                  , 0x00 },
+	{ WCD9335_CDC_RX7_RX_PATH_SEC6                  , 0x00 },
+	{ WCD9335_CDC_RX7_RX_PATH_SEC7                  , 0x00 },
+	{ WCD9335_CDC_RX7_RX_PATH_MIX_SEC1              , 0x00 },
+	{ WCD9335_CDC_RX8_RX_PATH_CTL                   , 0x04 },
+	{ WCD9335_CDC_RX8_RX_PATH_CFG0                  , 0x00 },
+	{ WCD9335_CDC_RX8_RX_PATH_CFG2                  , 0x8f },
+	{ WCD9335_CDC_RX8_RX_VOL_CTL                    , 0x00 },
+	{ WCD9335_CDC_RX8_RX_PATH_MIX_CTL               , 0x04 },
+	{ WCD9335_CDC_RX8_RX_VOL_MIX_CTL                , 0x00 },
+	{ WCD9335_CDC_RX8_RX_PATH_SEC2                  , 0x00 },
+	{ WCD9335_CDC_RX8_RX_PATH_SEC3                  , 0x00 },
+	{ WCD9335_CDC_RX8_RX_PATH_SEC5                  , 0x00 },
+	{ WCD9335_CDC_RX8_RX_PATH_SEC6                  , 0x00 },
+	{ WCD9335_CDC_RX8_RX_PATH_SEC7                  , 0x00 },
+	{ WCD9335_CDC_RX8_RX_PATH_MIX_SEC1              , 0x00 },
+	/* Page #12 registers */
+	{ WCD9335_PAGE12_PAGE_REGISTER                  , 0x00 },
+	{ WCD9335_CDC_CLSH_CRC                          , 0x00 },
+	{ WCD9335_CDC_CLSH_DLY_CTRL                     , 0x03 },
+	{ WCD9335_CDC_CLSH_DECAY_CTRL                   , 0x02 },
+	{ WCD9335_CDC_CLSH_HPH_V_PA                     , 0x1c },
+	{ WCD9335_CDC_CLSH_EAR_V_PA                     , 0x39 },
+	{ WCD9335_CDC_CLSH_HPH_V_HD                     , 0x0c },
+	{ WCD9335_CDC_CLSH_EAR_V_HD                     , 0x0c },
+	{ WCD9335_CDC_CLSH_K1_MSB                       , 0x01 },
+	{ WCD9335_CDC_CLSH_K1_LSB                       , 0x00 },
+	{ WCD9335_CDC_CLSH_K2_MSB                       , 0x00 },
+	{ WCD9335_CDC_CLSH_K2_LSB                       , 0x80 },
+	{ WCD9335_CDC_CLSH_IDLE_CTRL                    , 0x00 },
+	{ WCD9335_CDC_CLSH_IDLE_HPH                     , 0x00 },
+	{ WCD9335_CDC_CLSH_IDLE_EAR                     , 0x00 },
+	{ WCD9335_CDC_CLSH_TEST0                        , 0x07 },
+	{ WCD9335_CDC_CLSH_TEST1                        , 0x00 },
+	{ WCD9335_CDC_CLSH_OVR_VREF                     , 0x00 },
+	{ WCD9335_CDC_BOOST0_BOOST_PATH_CTL             , 0x00 },
+	{ WCD9335_CDC_BOOST0_BOOST_CTL                  , 0xb2 },
+	{ WCD9335_CDC_BOOST0_BOOST_CFG1                 , 0x00 },
+	{ WCD9335_CDC_BOOST0_BOOST_CFG2                 , 0x00 },
+	{ WCD9335_CDC_BOOST1_BOOST_PATH_CTL             , 0x00 },
+	{ WCD9335_CDC_BOOST1_BOOST_CTL                  , 0xb2 },
+	{ WCD9335_CDC_BOOST1_BOOST_CFG1                 , 0x00 },
+	{ WCD9335_CDC_BOOST1_BOOST_CFG2                 , 0x00 },
+	{ WCD9335_SWR_AHB_BRIDGE_WR_DATA_0              , 0x00 },
+	{ WCD9335_SWR_AHB_BRIDGE_WR_DATA_1              , 0x00 },
+	{ WCD9335_SWR_AHB_BRIDGE_WR_DATA_2              , 0x00 },
+	{ WCD9335_SWR_AHB_BRIDGE_WR_DATA_3              , 0x00 },
+	{ WCD9335_SWR_AHB_BRIDGE_WR_ADDR_0              , 0x00 },
+	{ WCD9335_SWR_AHB_BRIDGE_WR_ADDR_1              , 0x00 },
+	{ WCD9335_SWR_AHB_BRIDGE_WR_ADDR_2              , 0x00 },
+	{ WCD9335_SWR_AHB_BRIDGE_WR_ADDR_3              , 0x00 },
+	{ WCD9335_SWR_AHB_BRIDGE_RD_ADDR_0              , 0x00 },
+	{ WCD9335_SWR_AHB_BRIDGE_RD_ADDR_1              , 0x00 },
+	{ WCD9335_SWR_AHB_BRIDGE_RD_ADDR_2              , 0x00 },
+	{ WCD9335_SWR_AHB_BRIDGE_RD_ADDR_3              , 0x00 },
+	{ WCD9335_SWR_AHB_BRIDGE_RD_DATA_0              , 0x00 },
+	{ WCD9335_SWR_AHB_BRIDGE_RD_DATA_1              , 0x00 },
+	{ WCD9335_SWR_AHB_BRIDGE_RD_DATA_2              , 0x00 },
+	{ WCD9335_SWR_AHB_BRIDGE_RD_DATA_3              , 0x00 },
+	{ WCD9335_SWR_AHB_BRIDGE_ACCESS_CFG             , 0x0f },
+	{ WCD9335_SWR_AHB_BRIDGE_ACCESS_STATUS          , 0x03 },
+	{ WCD9335_CDC_VBAT_VBAT_PATH_CTL                , 0x00 },
+	{ WCD9335_CDC_VBAT_VBAT_CFG                     , 0x0a },
+	{ WCD9335_CDC_VBAT_VBAT_ADC_CAL1                , 0x00 },
+	{ WCD9335_CDC_VBAT_VBAT_ADC_CAL2                , 0x00 },
+	{ WCD9335_CDC_VBAT_VBAT_ADC_CAL3                , 0x04 },
+	{ WCD9335_CDC_VBAT_VBAT_PK_EST1                 , 0xe0 },
+	{ WCD9335_CDC_VBAT_VBAT_PK_EST2                 , 0x01 },
+	{ WCD9335_CDC_VBAT_VBAT_PK_EST3                 , 0x40 },
+	{ WCD9335_CDC_VBAT_VBAT_RF_PROC1                , 0x2a },
+	{ WCD9335_CDC_VBAT_VBAT_RF_PROC2                , 0x86 },
+	{ WCD9335_CDC_VBAT_VBAT_TAC1                    , 0x70 },
+	{ WCD9335_CDC_VBAT_VBAT_TAC2                    , 0x18 },
+	{ WCD9335_CDC_VBAT_VBAT_TAC3                    , 0x18 },
+	{ WCD9335_CDC_VBAT_VBAT_TAC4                    , 0x03 },
+	{ WCD9335_CDC_VBAT_VBAT_GAIN_UPD1               , 0x01 },
+	{ WCD9335_CDC_VBAT_VBAT_GAIN_UPD2               , 0x00 },
+	{ WCD9335_CDC_VBAT_VBAT_GAIN_UPD3               , 0x64 },
+	{ WCD9335_CDC_VBAT_VBAT_GAIN_UPD4               , 0x01 },
+	{ WCD9335_CDC_VBAT_VBAT_DEBUG1                  , 0x00 },
+	{ WCD9335_CDC_VBAT_VBAT_GAIN_UPD_MON            , 0x00 },
+	{ WCD9335_CDC_VBAT_VBAT_GAIN_MON_VAL            , 0x00 },
+	{ WCD9335_CDC_SIDETONE_SRC0_ST_SRC_PATH_CTL     , 0x04 },
+	{ WCD9335_CDC_SIDETONE_SRC0_ST_SRC_PATH_CFG1    , 0x00 },
+	{ WCD9335_CDC_SIDETONE_SRC1_ST_SRC_PATH_CTL     , 0x04 },
+	{ WCD9335_CDC_SIDETONE_SRC1_ST_SRC_PATH_CFG1    , 0x00 },
+	/* Page #13 registers */
+	{ WCD9335_PAGE13_PAGE_REGISTER                  , 0x00 },
+	{ WCD9335_CDC_RX_INP_MUX_RX_INT0_CFG0           , 0x00 },
+	{ WCD9335_CDC_RX_INP_MUX_RX_INT0_CFG1           , 0x00 },
+	{ WCD9335_CDC_RX_INP_MUX_RX_INT1_CFG0           , 0x00 },
+	{ WCD9335_CDC_RX_INP_MUX_RX_INT1_CFG1           , 0x00 },
+	{ WCD9335_CDC_RX_INP_MUX_RX_INT2_CFG0           , 0x00 },
+	{ WCD9335_CDC_RX_INP_MUX_RX_INT2_CFG1           , 0x00 },
+	{ WCD9335_CDC_RX_INP_MUX_RX_INT3_CFG0           , 0x00 },
+	{ WCD9335_CDC_RX_INP_MUX_RX_INT3_CFG1           , 0x00 },
+	{ WCD9335_CDC_RX_INP_MUX_RX_INT4_CFG0           , 0x00 },
+	{ WCD9335_CDC_RX_INP_MUX_RX_INT4_CFG1           , 0x00 },
+	{ WCD9335_CDC_RX_INP_MUX_RX_INT5_CFG0           , 0x00 },
+	{ WCD9335_CDC_RX_INP_MUX_RX_INT5_CFG1           , 0x00 },
+	{ WCD9335_CDC_RX_INP_MUX_RX_INT6_CFG0           , 0x00 },
+	{ WCD9335_CDC_RX_INP_MUX_RX_INT6_CFG1           , 0x00 },
+	{ WCD9335_CDC_RX_INP_MUX_RX_INT7_CFG0           , 0x00 },
+	{ WCD9335_CDC_RX_INP_MUX_RX_INT7_CFG1           , 0x00 },
+	{ WCD9335_CDC_RX_INP_MUX_RX_INT8_CFG0           , 0x00 },
+	{ WCD9335_CDC_RX_INP_MUX_RX_INT8_CFG1           , 0x00 },
+	{ WCD9335_CDC_RX_INP_MUX_RX_MIX_CFG0            , 0x00 },
+	{ WCD9335_CDC_RX_INP_MUX_RX_MIX_CFG1            , 0x00 },
+	{ WCD9335_CDC_RX_INP_MUX_RX_MIX_CFG2            , 0x00 },
+	{ WCD9335_CDC_RX_INP_MUX_RX_MIX_CFG3            , 0x00 },
+	{ WCD9335_CDC_RX_INP_MUX_RX_MIX_CFG4            , 0x00 },
+	{ WCD9335_CDC_RX_INP_MUX_SIDETONE_SRC_CFG0      , 0x00 },
+	{ WCD9335_CDC_RX_INP_MUX_SIDETONE_SRC_CFG1      , 0x00 },
+	{ WCD9335_CDC_RX_INP_MUX_ANC_CFG0               , 0x00 },
+	{ WCD9335_CDC_RX_INP_MUX_SPLINE_SRC_CFG0        , 0x00 },
+	{ WCD9335_CDC_TX_INP_MUX_ADC_MUX0_CFG0          , 0x00 },
+	{ WCD9335_CDC_TX_INP_MUX_ADC_MUX0_CFG1          , 0x00 },
+	{ WCD9335_CDC_TX_INP_MUX_ADC_MUX1_CFG0          , 0x00 },
+	{ WCD9335_CDC_TX_INP_MUX_ADC_MUX1_CFG1          , 0x00 },
+	{ WCD9335_CDC_TX_INP_MUX_ADC_MUX2_CFG0          , 0x00 },
+	{ WCD9335_CDC_TX_INP_MUX_ADC_MUX2_CFG1          , 0x00 },
+	{ WCD9335_CDC_TX_INP_MUX_ADC_MUX3_CFG0          , 0x00 },
+	{ WCD9335_CDC_TX_INP_MUX_ADC_MUX3_CFG1          , 0x00 },
+	{ WCD9335_CDC_TX_INP_MUX_ADC_MUX4_CFG0          , 0x00 },
+	{ WCD9335_CDC_TX_INP_MUX_ADC_MUX5_CFG0          , 0x00 },
+	{ WCD9335_CDC_TX_INP_MUX_ADC_MUX6_CFG0          , 0x00 },
+	{ WCD9335_CDC_TX_INP_MUX_ADC_MUX7_CFG0          , 0x00 },
+	{ WCD9335_CDC_TX_INP_MUX_ADC_MUX8_CFG0          , 0x00 },
+	{ WCD9335_CDC_TX_INP_MUX_ADC_MUX10_CFG0         , 0x00 },
+	{ WCD9335_CDC_TX_INP_MUX_ADC_MUX11_CFG0         , 0x00 },
+	{ WCD9335_CDC_TX_INP_MUX_ADC_MUX12_CFG0         , 0x00 },
+	{ WCD9335_CDC_TX_INP_MUX_ADC_MUX13_CFG0         , 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG0, 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG1, 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG2, 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG3, 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG0, 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG1, 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG2, 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG3, 0x00 },
+	{ WCD9335_CDC_IF_ROUTER_TX_MUX_CFG0             , 0x00 },
+	{ WCD9335_CDC_IF_ROUTER_TX_MUX_CFG1             , 0x00 },
+	{ WCD9335_CDC_IF_ROUTER_TX_MUX_CFG2             , 0x00 },
+	{ WCD9335_CDC_IF_ROUTER_TX_MUX_CFG3             , 0x00 },
+	{ WCD9335_CDC_CLK_RST_CTRL_MCLK_CONTROL         , 0x00 },
+	{ WCD9335_CDC_CLK_RST_CTRL_SWR_CONTROL          , 0x00 },
+	{ WCD9335_CDC_PROX_DETECT_PROX_CTL              , 0x08 },
+	{ WCD9335_CDC_PROX_DETECT_PROX_POLL_PERIOD0     , 0x00 },
+	{ WCD9335_CDC_PROX_DETECT_PROX_POLL_PERIOD1     , 0x4b },
+	{ WCD9335_CDC_PROX_DETECT_PROX_SIG_PATTERN_LSB  , 0x00 },
+	{ WCD9335_CDC_PROX_DETECT_PROX_SIG_PATTERN_MSB  , 0x00 },
+	{ WCD9335_CDC_PROX_DETECT_PROX_STATUS           , 0x00 },
+	{ WCD9335_CDC_PROX_DETECT_PROX_TEST_CTRL        , 0x00 },
+	{ WCD9335_CDC_PROX_DETECT_PROX_TEST_BUFF_LSB    , 0x00 },
+	{ WCD9335_CDC_PROX_DETECT_PROX_TEST_BUFF_MSB    , 0x00 },
+	{ WCD9335_CDC_PROX_DETECT_PROX_TEST_BUFF_LSB_RD , 0x00 },
+	{ WCD9335_CDC_PROX_DETECT_PROX_TEST_BUFF_MSB_RD , 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR0_IIR_PATH_CTL        , 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B1_CTL     , 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B2_CTL     , 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B3_CTL     , 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B4_CTL     , 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B5_CTL     , 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B6_CTL     , 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B7_CTL     , 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B8_CTL     , 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR0_IIR_CTL             , 0x40 },
+	{ WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_TIMER_CTL  , 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR0_IIR_COEF_B1_CTL     , 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR0_IIR_COEF_B2_CTL     , 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR1_IIR_PATH_CTL        , 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B1_CTL     , 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B2_CTL     , 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B3_CTL     , 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B4_CTL     , 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B5_CTL     , 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B6_CTL     , 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B7_CTL     , 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B8_CTL     , 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR1_IIR_CTL             , 0x40 },
+	{ WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_TIMER_CTL  , 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR1_IIR_COEF_B1_CTL     , 0x00 },
+	{ WCD9335_CDC_SIDETONE_IIR1_IIR_COEF_B2_CTL     , 0x00 },
+	{ WCD9335_CDC_TOP_TOP_CFG0                      , 0x00 },
+	{ WCD9335_CDC_TOP_TOP_CFG1                      , 0x00 },
+	{ WCD9335_CDC_TOP_TOP_CFG2                      , 0x00 },
+	{ WCD9335_CDC_TOP_TOP_CFG3                      , 0x18 },
+	{ WCD9335_CDC_TOP_TOP_CFG4                      , 0x00 },
+	{ WCD9335_CDC_TOP_TOP_CFG5                      , 0x00 },
+	{ WCD9335_CDC_TOP_TOP_CFG6                      , 0x00 },
+	{ WCD9335_CDC_TOP_TOP_CFG7                      , 0x00 },
+	{ WCD9335_CDC_TOP_HPHL_COMP_WR_LSB              , 0x00 },
+	{ WCD9335_CDC_TOP_HPHL_COMP_WR_MSB              , 0x00 },
+	{ WCD9335_CDC_TOP_HPHL_COMP_LUT                 , 0x00 },
+	{ WCD9335_CDC_TOP_HPHL_COMP_RD_LSB              , 0x00 },
+	{ WCD9335_CDC_TOP_HPHL_COMP_RD_MSB              , 0x00 },
+	{ WCD9335_CDC_TOP_HPHR_COMP_WR_LSB              , 0x00 },
+	{ WCD9335_CDC_TOP_HPHR_COMP_WR_MSB              , 0x00 },
+	{ WCD9335_CDC_TOP_HPHR_COMP_LUT                 , 0x00 },
+	{ WCD9335_CDC_TOP_HPHR_COMP_RD_LSB              , 0x00 },
+	{ WCD9335_CDC_TOP_HPHR_COMP_RD_MSB              , 0x00 },
+	{ WCD9335_CDC_TOP_DIFFL_COMP_WR_LSB             , 0x00 },
+	{ WCD9335_CDC_TOP_DIFFL_COMP_WR_MSB             , 0x00 },
+	{ WCD9335_CDC_TOP_DIFFL_COMP_LUT                , 0x00 },
+	{ WCD9335_CDC_TOP_DIFFL_COMP_RD_LSB             , 0x00 },
+	{ WCD9335_CDC_TOP_DIFFL_COMP_RD_MSB             , 0x00 },
+	{ WCD9335_CDC_TOP_DIFFR_COMP_WR_LSB             , 0x00 },
+	{ WCD9335_CDC_TOP_DIFFR_COMP_WR_MSB             , 0x00 },
+	{ WCD9335_CDC_TOP_DIFFR_COMP_LUT                , 0x00 },
+	{ WCD9335_CDC_TOP_DIFFR_COMP_RD_LSB             , 0x00 },
+	{ WCD9335_CDC_TOP_DIFFR_COMP_RD_MSB             , 0x00 },
+	/* Page #0x80 registers */
+	{ WCD9335_PAGE80_PAGE_REGISTER                  , 0x00 },
+	{ WCD9335_TLMM_BIST_MODE_PINCFG                 , 0x00 },
+	{ WCD9335_TLMM_RF_PA_ON_PINCFG                  , 0x00 },
+	{ WCD9335_TLMM_INTR1_PINCFG                     , 0x00 },
+	{ WCD9335_TLMM_INTR2_PINCFG                     , 0x00 },
+	{ WCD9335_TLMM_SWR_DATA_PINCFG                  , 0x00 },
+	{ WCD9335_TLMM_SWR_CLK_PINCFG                   , 0x00 },
+	{ WCD9335_TLMM_SLIMBUS_DATA2_PINCFG             , 0x00 },
+	{ WCD9335_TLMM_I2C_CLK_PINCFG                   , 0x00 },
+	{ WCD9335_TLMM_I2C_DATA_PINCFG                  , 0x00 },
+	{ WCD9335_TLMM_I2S_RX_SD0_PINCFG                , 0x00 },
+	{ WCD9335_TLMM_I2S_RX_SD1_PINCFG                , 0x00 },
+	{ WCD9335_TLMM_I2S_RX_SCK_PINCFG                , 0x00 },
+	{ WCD9335_TLMM_I2S_RX_WS_PINCFG                 , 0x00 },
+	{ WCD9335_TLMM_I2S_TX_SD0_PINCFG                , 0x00 },
+	{ WCD9335_TLMM_I2S_TX_SD1_PINCFG                , 0x00 },
+	{ WCD9335_TLMM_I2S_TX_SCK_PINCFG                , 0x00 },
+	{ WCD9335_TLMM_I2S_TX_WS_PINCFG                 , 0x00 },
+	{ WCD9335_TLMM_DMIC1_CLK_PINCFG                 , 0x00 },
+	{ WCD9335_TLMM_DMIC1_DATA_PINCFG                , 0x00 },
+	{ WCD9335_TLMM_DMIC2_CLK_PINCFG                 , 0x00 },
+	{ WCD9335_TLMM_DMIC2_DATA_PINCFG                , 0x00 },
+	{ WCD9335_TLMM_DMIC3_CLK_PINCFG                 , 0x00 },
+	{ WCD9335_TLMM_DMIC3_DATA_PINCFG                , 0x00 },
+	{ WCD9335_TLMM_JTDI_PINCFG                      , 0x00 },
+	{ WCD9335_TLMM_JTDO_PINCFG                      , 0x00 },
+	{ WCD9335_TLMM_JTMS_PINCFG                      , 0x00 },
+	{ WCD9335_TLMM_JTCK_PINCFG                      , 0x00 },
+	{ WCD9335_TLMM_JTRST_PINCFG                     , 0x00 },
+	{ WCD9335_TEST_DEBUG_PIN_CTL_OE_0               , 0x00 },
+	{ WCD9335_TEST_DEBUG_PIN_CTL_OE_1               , 0x00 },
+	{ WCD9335_TEST_DEBUG_PIN_CTL_OE_2               , 0x00 },
+	{ WCD9335_TEST_DEBUG_PIN_CTL_OE_3               , 0x00 },
+	{ WCD9335_TEST_DEBUG_PIN_CTL_DATA_0             , 0x00 },
+	{ WCD9335_TEST_DEBUG_PIN_CTL_DATA_1             , 0x00 },
+	{ WCD9335_TEST_DEBUG_PIN_CTL_DATA_2             , 0x00 },
+	{ WCD9335_TEST_DEBUG_PIN_CTL_DATA_3             , 0x00 },
+	{ WCD9335_TEST_DEBUG_PAD_DRVCTL                 , 0x00 },
+	{ WCD9335_TEST_DEBUG_PIN_STATUS                 , 0x00 },
+	{ WCD9335_TEST_DEBUG_MEM_CTRL                   , 0x00 },
+	{ WCD9335_TEST_DEBUG_DEBUG_BUS_SEL              , 0x00 },
+	{ WCD9335_TEST_DEBUG_DEBUG_JTAG                 , 0x00 },
+	{ WCD9335_TEST_DEBUG_DEBUG_EN_1                 , 0x00 },
+	{ WCD9335_TEST_DEBUG_DEBUG_EN_2                 , 0x00 },
+	{ WCD9335_TEST_DEBUG_DEBUG_EN_3                 , 0x00 },
+};
+
+/*
+ * wcd9335_regmap_register_patch: Update register defaults based on version
+ * @regmap: handle to wcd9xxx regmap
+ * @version: wcd9335 version
+ *
+ * Returns error code in case of failure or 0 for success
+ */
+int wcd9335_regmap_register_patch(struct regmap *regmap, int version)
+{
+	int rc;
+
+	if (!regmap) {
+		pr_err("%s: regmap struct is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (version) {
+	case TASHA_VERSION_1_0:
+	case TASHA_VERSION_1_1:
+		regcache_cache_only(regmap, true);
+		rc = regmap_multi_reg_write(regmap, wcd9335_1_x_defaults,
+					    ARRAY_SIZE(wcd9335_1_x_defaults));
+		regcache_cache_only(regmap, false);
+		break;
+	case TASHA_VERSION_2_0:
+		regcache_cache_only(regmap, true);
+		rc = regmap_multi_reg_write(regmap, wcd9335_2_0_defaults,
+					    ARRAY_SIZE(wcd9335_2_0_defaults));
+		regcache_cache_only(regmap, false);
+		break;
+	default:
+		pr_err("%s: unknown version: %d\n", __func__, version);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(wcd9335_regmap_register_patch);
+
+static bool wcd9335_is_readable_register(struct device *dev, unsigned int reg)
+{
+	u8 pg_num, reg_offset;
+	const u8 *reg_tbl = NULL;
+
+	/*
+	 * Get the page number from MSB of codec register. If its 0x80, assign
+	 * the corresponding page index PAGE_0x80.
+	 */
+	pg_num = reg >> 0x8;
+	if (pg_num == 0x80)
+		pg_num = PAGE_0X80;
+	else if (pg_num >= 0xE)
+		return false;
+
+	reg_tbl = wcd9335_reg[pg_num];
+	reg_offset = reg & 0xFF;
+
+	if (reg_tbl)
+		return reg_tbl[reg_offset];
+	else
+		return false;
+}
+
+static bool wcd9335_is_volatile_register(struct device *dev, unsigned int reg)
+{
+	/*
+	 * registers from 0x000 to 0x0FF are volatile because
+	 * this space contains registers related to interrupt
+	 * status, mask etc
+	 */
+	if (reg < 0x100)
+		return true;
+
+	/* IIR Coeff registers are not cacheable */
+	if ((reg >= WCD9335_CDC_SIDETONE_IIR0_IIR_COEF_B1_CTL) &&
+	    (reg <= WCD9335_CDC_SIDETONE_IIR1_IIR_COEF_B2_CTL))
+		return true;
+
+	if ((reg >= WCD9335_CDC_ANC0_IIR_COEFF_1_CTL) &&
+	    (reg <= WCD9335_CDC_ANC0_FB_GAIN_CTL))
+		return true;
+
+	if ((reg >= WCD9335_CDC_ANC1_IIR_COEFF_1_CTL) &&
+	    (reg <= WCD9335_CDC_ANC1_FB_GAIN_CTL))
+		return true;
+	/*
+	 * CPE inbox and outbox registers are volatile
+	 * since they can be updated in the codec hardware
+	 * to indicate CPE status
+	 */
+	if (reg >= WCD9335_CPE_SS_MEM_PTR_0 &&
+	    reg <= WCD9335_CPE_SS_OUTBOX2_ACK)
+		return true;
+
+	if (reg >= WCD9335_RCO_CAL_OUT_1 &&
+	    reg <= WCD9335_RCO_CAL_OUT_5)
+		return true;
+
+	switch (reg) {
+	case WCD9335_CPE_SS_INBOX1_TRG:
+	case WCD9335_CPE_SS_INBOX2_TRG:
+	case WCD9335_SWR_AHB_BRIDGE_WR_DATA_0:
+	case WCD9335_SWR_AHB_BRIDGE_WR_DATA_1:
+	case WCD9335_SWR_AHB_BRIDGE_WR_DATA_2:
+	case WCD9335_SWR_AHB_BRIDGE_WR_DATA_3:
+	case WCD9335_SWR_AHB_BRIDGE_WR_ADDR_0:
+	case WCD9335_SWR_AHB_BRIDGE_WR_ADDR_1:
+	case WCD9335_SWR_AHB_BRIDGE_WR_ADDR_2:
+	case WCD9335_SWR_AHB_BRIDGE_WR_ADDR_3:
+	case WCD9335_SWR_AHB_BRIDGE_RD_DATA_0:
+	case WCD9335_SWR_AHB_BRIDGE_RD_DATA_1:
+	case WCD9335_SWR_AHB_BRIDGE_RD_DATA_2:
+	case WCD9335_SWR_AHB_BRIDGE_RD_DATA_3:
+	case WCD9335_SWR_AHB_BRIDGE_RD_ADDR_0:
+	case WCD9335_SWR_AHB_BRIDGE_RD_ADDR_1:
+	case WCD9335_SWR_AHB_BRIDGE_RD_ADDR_2:
+	case WCD9335_SWR_AHB_BRIDGE_RD_ADDR_3:
+	case WCD9335_ANA_BIAS:
+	case WCD9335_ANA_CLK_TOP:
+	case WCD9335_ANA_RCO:
+	case WCD9335_CDC_CLK_RST_CTRL_MCLK_CONTROL:
+	case WCD9335_ANA_MBHC_RESULT_3:
+	case WCD9335_ANA_MBHC_RESULT_2:
+	case WCD9335_ANA_MBHC_RESULT_1:
+	case WCD9335_ANA_MBHC_MECH:
+	case WCD9335_ANA_MBHC_ELECT:
+	case WCD9335_ANA_MBHC_ZDET:
+	case WCD9335_ANA_MICB2:
+	case WCD9335_CPE_SS_SS_ERROR_INT_STATUS:
+	case WCD9335_CPE_SS_SS_ERROR_INT_MASK:
+	case WCD9335_CPE_SS_SS_ERROR_INT_CLEAR:
+	case WCD9335_CPE_SS_STATUS:
+	case WCD9335_CPE_SS_BACKUP_INT:
+	case WCD9335_CPE_SS_CFG:
+	case WCD9335_SOC_MAD_MAIN_CTL_1:
+	case WCD9335_SOC_MAD_AUDIO_CTL_3:
+	case WCD9335_SOC_MAD_AUDIO_CTL_4:
+	case WCD9335_FLYBACK_EN:
+	case WCD9335_ANA_RX_SUPPLIES:
+	case WCD9335_CDC_CLK_RST_CTRL_FS_CNT_CONTROL:
+	case WCD9335_SIDO_SIDO_CCL_2:
+	case WCD9335_SIDO_SIDO_CCL_4:
+	case WCD9335_DATA_HUB_NATIVE_FIFO_STATUS:
+	case WCD9335_MBHC_FSM_STATUS:
+	case WCD9335_SPLINE_SRC0_STATUS:
+	case WCD9335_SPLINE_SRC1_STATUS:
+	case WCD9335_SPLINE_SRC2_STATUS:
+	case WCD9335_SPLINE_SRC3_STATUS:
+	case WCD9335_SIDO_SIDO_TEST_2:
+	case WCD9335_SIDO_SIDO_CCL_8:
+	case WCD9335_BIAS_VBG_FINE_ADJ:
+	case WCD9335_VBADC_ADC_DOUTMSB:
+	case WCD9335_VBADC_ADC_DOUTLSB:
+	case WCD9335_CDC_VBAT_VBAT_GAIN_MON_VAL:
+	case WCD9335_ANA_BUCK_CTL:
+		return true;
+	default:
+		return false;
+	}
+}
+
+struct regmap_config wcd9335_regmap_config = {
+	.reg_bits = 16,
+	.val_bits = 8,
+	.cache_type = REGCACHE_RBTREE,
+	.reg_defaults = wcd9335_defaults,
+	.num_reg_defaults = ARRAY_SIZE(wcd9335_defaults),
+	.max_register = WCD9335_MAX_REGISTER,
+	.volatile_reg = wcd9335_is_volatile_register,
+	.readable_reg = wcd9335_is_readable_register,
+	.can_multi_write = true,
+};
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/mfd/wcd9335-tables.c	2019-01-22 16:16:24.687257128 +0100
@@ -0,0 +1,1326 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/mfd/wcd9335/registers.h>
+
+#define WCD9335_REG(reg)  ((reg) & 0xFF)
+
+const u8 wcd9335_page0_reg_readable[WCD9335_PAGE_SIZE] = {
+	[WCD9335_REG(WCD9335_PAGE0_PAGE_REGISTER)] = 1,
+	[WCD9335_REG(WCD9335_CODEC_RPM_CLK_BYPASS)] = 1,
+	[WCD9335_REG(WCD9335_CODEC_RPM_CLK_GATE)] = 1,
+	[WCD9335_REG(WCD9335_CODEC_RPM_CLK_MCLK_CFG)] = 1,
+	[WCD9335_REG(WCD9335_CODEC_RPM_RST_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CODEC_RPM_PWR_CPE_DEEPSLP_1)] = 1,
+	[WCD9335_REG(WCD9335_CODEC_RPM_PWR_CPE_DEEPSLP_2)] = 1,
+	[WCD9335_REG(WCD9335_CODEC_RPM_PWR_CPE_DEEPSLP_3)] = 1,
+	[WCD9335_REG(WCD9335_CODEC_RPM_PWR_CPE_IRAM_SHUTDOWN)] = 1,
+	[WCD9335_REG(WCD9335_CODEC_RPM_PWR_CPE_DRAM1_SHUTDOWN)] = 1,
+	[WCD9335_REG(WCD9335_CODEC_RPM_PWR_CPE_DRAM0_SHUTDOWN_1)] = 1,
+	[WCD9335_REG(WCD9335_CODEC_RPM_PWR_CPE_DRAM0_SHUTDOWN_2)] = 1,
+	[WCD9335_REG(WCD9335_CODEC_RPM_INT_MASK)] = 1,
+	[WCD9335_REG(WCD9335_CODEC_RPM_INT_STATUS)] = 1,
+	[WCD9335_REG(WCD9335_CODEC_RPM_INT_CLEAR)] = 0,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE0)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE1)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE2)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE3)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_EFUSE_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_EFUSE_TEST0)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_EFUSE_TEST1)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT0)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT1)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT2)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT3)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT4)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT5)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT6)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT7)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT8)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT9)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT10)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT11)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT12)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT13)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT14)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT15)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_EFUSE_STATUS)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_I2C_SLAVE_ID_NONNEGO)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_I2C_SLAVE_ID_1)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_I2C_SLAVE_ID_2)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_I2C_SLAVE_ID_3)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_ANA_WAIT_STATE_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_I2C_ACTIVE)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_PROC1_MON_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_PROC1_MON_STATUS)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_PROC1_MON_CNT_MSB)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_PROC1_MON_CNT_LSB)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_PROC2_MON_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_PROC2_MON_STATUS)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_PROC2_MON_CNT_MSB)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_PROC2_MON_CNT_LSB)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_PROC3_MON_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_PROC3_MON_STATUS)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_PROC3_MON_CNT_MSB)] = 1,
+	[WCD9335_REG(WCD9335_CHIP_TIER_CTRL_PROC3_MON_CNT_LSB)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_DATA_HUB_RX_I2S_CTL)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_DATA_HUB_TX_I2S_CTL)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_DATA_HUB_I2S_CLK)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_DATA_HUB_RX0_INP_CFG)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_DATA_HUB_RX1_INP_CFG)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_DATA_HUB_RX2_INP_CFG)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_DATA_HUB_RX3_INP_CFG)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_DATA_HUB_RX4_INP_CFG)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_DATA_HUB_RX5_INP_CFG)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_DATA_HUB_RX6_INP_CFG)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_DATA_HUB_RX7_INP_CFG)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_DATA_HUB_SB_TX0_INP_CFG)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_DATA_HUB_SB_TX1_INP_CFG)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_DATA_HUB_SB_TX2_INP_CFG)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_DATA_HUB_SB_TX3_INP_CFG)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_DATA_HUB_SB_TX4_INP_CFG)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_DATA_HUB_SB_TX5_INP_CFG)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_DATA_HUB_SB_TX6_INP_CFG)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_DATA_HUB_SB_TX7_INP_CFG)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_DATA_HUB_SB_TX8_INP_CFG)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_DATA_HUB_SB_TX9_INP_CFG)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_DATA_HUB_SB_TX10_INP_CFG)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_DATA_HUB_SB_TX11_INP_CFG)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_DATA_HUB_SB_TX13_INP_CFG)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_DATA_HUB_SB_TX14_INP_CFG)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_DATA_HUB_SB_TX15_INP_CFG)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_DATA_HUB_TX_I2S_SD0_L_CFG)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_DATA_HUB_TX_I2S_SD0_R_CFG)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_DATA_HUB_TX_I2S_SD1_L_CFG)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_DATA_HUB_TX_I2S_SD1_R_CFG)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_NATIVE_FIFO_SYNC)] = 1,
+	[WCD9335_REG(WCD9335_DATA_HUB_NATIVE_FIFO_STATUS)] = 1,
+	[WCD9335_REG(WCD9335_INTR_CFG)] = 1,
+	[WCD9335_REG(WCD9335_INTR_CLR_COMMIT)] = 0,
+	[WCD9335_REG(WCD9335_INTR_PIN1_MASK0)] = 1,
+	[WCD9335_REG(WCD9335_INTR_PIN1_MASK1)] = 1,
+	[WCD9335_REG(WCD9335_INTR_PIN1_MASK2)] = 1,
+	[WCD9335_REG(WCD9335_INTR_PIN1_MASK3)] = 1,
+	[WCD9335_REG(WCD9335_INTR_PIN1_STATUS0)] = 1,
+	[WCD9335_REG(WCD9335_INTR_PIN1_STATUS1)] = 1,
+	[WCD9335_REG(WCD9335_INTR_PIN1_STATUS2)] = 1,
+	[WCD9335_REG(WCD9335_INTR_PIN1_STATUS3)] = 1,
+	[WCD9335_REG(WCD9335_INTR_PIN1_CLEAR0)] = 0,
+	[WCD9335_REG(WCD9335_INTR_PIN1_CLEAR1)] = 0,
+	[WCD9335_REG(WCD9335_INTR_PIN1_CLEAR2)] = 0,
+	[WCD9335_REG(WCD9335_INTR_PIN1_CLEAR3)] = 0,
+	[WCD9335_REG(WCD9335_INTR_PIN2_MASK0)] = 1,
+	[WCD9335_REG(WCD9335_INTR_PIN2_MASK1)] = 1,
+	[WCD9335_REG(WCD9335_INTR_PIN2_MASK2)] = 1,
+	[WCD9335_REG(WCD9335_INTR_PIN2_MASK3)] = 1,
+	[WCD9335_REG(WCD9335_INTR_PIN2_STATUS0)] = 1,
+	[WCD9335_REG(WCD9335_INTR_PIN2_STATUS1)] = 1,
+	[WCD9335_REG(WCD9335_INTR_PIN2_STATUS2)] = 1,
+	[WCD9335_REG(WCD9335_INTR_PIN2_STATUS3)] = 1,
+	[WCD9335_REG(WCD9335_INTR_PIN2_CLEAR0)] = 0,
+	[WCD9335_REG(WCD9335_INTR_PIN2_CLEAR1)] = 0,
+	[WCD9335_REG(WCD9335_INTR_PIN2_CLEAR2)] = 0,
+	[WCD9335_REG(WCD9335_INTR_PIN2_CLEAR3)] = 0,
+	[WCD9335_REG(WCD9335_INTR_LEVEL0)] = 1,
+	[WCD9335_REG(WCD9335_INTR_LEVEL1)] = 1,
+	[WCD9335_REG(WCD9335_INTR_LEVEL2)] = 1,
+	[WCD9335_REG(WCD9335_INTR_LEVEL3)] = 1,
+	[WCD9335_REG(WCD9335_INTR_BYPASS0)] = 1,
+	[WCD9335_REG(WCD9335_INTR_BYPASS1)] = 1,
+	[WCD9335_REG(WCD9335_INTR_BYPASS2)] = 1,
+	[WCD9335_REG(WCD9335_INTR_BYPASS3)] = 1,
+	[WCD9335_REG(WCD9335_INTR_SET0)] = 1,
+	[WCD9335_REG(WCD9335_INTR_SET1)] = 1,
+	[WCD9335_REG(WCD9335_INTR_SET2)] = 1,
+	[WCD9335_REG(WCD9335_INTR_SET3)] = 1,
+};
+
+const u8 wcd9335_page1_reg_readable[WCD9335_PAGE_SIZE] = {
+	[WCD9335_REG(WCD9335_PAGE1_PAGE_REGISTER)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_USER_CTL_0)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_USER_CTL_1)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_USER_CTL_2)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_USER_CTL_3)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_USER_CTL_4)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_USER_CTL_5)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_USER_CTL_6)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_USER_CTL_7)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_USER_CTL_8)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_USER_CTL_9)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_L_VAL_CTL_0)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_L_VAL_CTL_1)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_DSM_FRAC_CTL_0)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_DSM_FRAC_CTL_1)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_CONFIG_CTL_0)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_CONFIG_CTL_1)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_CONFIG_CTL_2)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_CONFIG_CTL_3)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_CONFIG_CTL_4)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_TEST_CTL_0)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_TEST_CTL_1)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_TEST_CTL_2)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_TEST_CTL_3)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_TEST_CTL_4)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_TEST_CTL_5)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_TEST_CTL_6)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_TEST_CTL_7)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_FREQ_CTL_0)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_FREQ_CTL_1)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_FREQ_CTL_2)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_FREQ_CTL_3)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_SSC_CTL_0)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_SSC_CTL_1)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_SSC_CTL_2)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_SSC_CTL_3)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_FLL_MODE)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_STATUS_0)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_STATUS_1)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_STATUS_2)] = 1,
+	[WCD9335_REG(WCD9335_CPE_FLL_STATUS_3)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_USER_CTL_0)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_USER_CTL_1)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_USER_CTL_2)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_USER_CTL_3)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_USER_CTL_4)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_USER_CTL_5)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_USER_CTL_6)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_USER_CTL_7)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_USER_CTL_8)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_USER_CTL_9)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_L_VAL_CTL_0)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_L_VAL_CTL_1)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_DSM_FRAC_CTL_0)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_DSM_FRAC_CTL_1)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_CONFIG_CTL_0)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_CONFIG_CTL_1)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_CONFIG_CTL_2)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_CONFIG_CTL_3)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_CONFIG_CTL_4)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_TEST_CTL_0)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_TEST_CTL_1)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_TEST_CTL_2)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_TEST_CTL_3)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_TEST_CTL_4)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_TEST_CTL_5)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_TEST_CTL_6)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_TEST_CTL_7)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_FREQ_CTL_0)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_FREQ_CTL_1)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_FREQ_CTL_2)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_FREQ_CTL_3)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_SSC_CTL_0)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_SSC_CTL_1)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_SSC_CTL_2)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_SSC_CTL_3)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_FLL_MODE)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_STATUS_0)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_STATUS_1)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_STATUS_2)] = 1,
+	[WCD9335_REG(WCD9335_I2S_FLL_STATUS_3)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_USER_CTL_0)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_USER_CTL_1)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_USER_CTL_2)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_USER_CTL_3)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_USER_CTL_4)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_USER_CTL_5)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_USER_CTL_6)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_USER_CTL_7)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_USER_CTL_8)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_USER_CTL_9)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_L_VAL_CTL_0)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_L_VAL_CTL_1)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_DSM_FRAC_CTL_0)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_DSM_FRAC_CTL_1)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_CONFIG_CTL_0)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_CONFIG_CTL_1)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_CONFIG_CTL_2)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_CONFIG_CTL_3)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_CONFIG_CTL_4)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_TEST_CTL_0)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_TEST_CTL_1)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_TEST_CTL_2)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_TEST_CTL_3)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_TEST_CTL_4)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_TEST_CTL_5)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_TEST_CTL_6)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_TEST_CTL_7)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_FREQ_CTL_0)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_FREQ_CTL_1)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_FREQ_CTL_2)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_FREQ_CTL_3)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_SSC_CTL_0)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_SSC_CTL_1)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_SSC_CTL_2)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_SSC_CTL_3)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_FLL_MODE)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_STATUS_0)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_STATUS_1)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_STATUS_2)] = 1,
+	[WCD9335_REG(WCD9335_SB_FLL_STATUS_3)] = 1,
+};
+
+const u8 wcd9335_page2_reg_readable[WCD9335_PAGE_SIZE] = {
+	[WCD9335_REG(WCD9335_PAGE2_PAGE_REGISTER)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_MEM_PTR_0)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_MEM_PTR_1)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_MEM_PTR_2)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_MEM_CTRL)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_MEM_BANK_0)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_MEM_BANK_1)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_MEM_BANK_2)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_MEM_BANK_3)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_MEM_BANK_4)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_MEM_BANK_5)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_MEM_BANK_6)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_MEM_BANK_7)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_MEM_BANK_8)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_MEM_BANK_9)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_MEM_BANK_10)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_MEM_BANK_11)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_MEM_BANK_12)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_MEM_BANK_13)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_MEM_BANK_14)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_MEM_BANK_15)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX1_TRG)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX2_TRG)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX1_0)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX1_1)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX1_2)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX1_3)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX1_4)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX1_5)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX1_6)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX1_7)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX1_8)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX1_9)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX1_10)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX1_11)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX1_12)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX1_13)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX1_14)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX1_15)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX1_0)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX1_1)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX1_2)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX1_3)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX1_4)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX1_5)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX1_6)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX1_7)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX1_8)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX1_9)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX1_10)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX1_11)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX1_12)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX1_13)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX1_14)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX1_15)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX2_0)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX2_1)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX2_2)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX2_3)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX2_4)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX2_5)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX2_6)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX2_7)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX2_8)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX2_9)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX2_10)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX2_11)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX2_12)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX2_13)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX2_14)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_INBOX2_15)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX2_0)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX2_1)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX2_2)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX2_3)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX2_4)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX2_5)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX2_6)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX2_7)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX2_8)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX2_9)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX2_10)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX2_11)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX2_12)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX2_13)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX2_14)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX2_15)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX1_ACK)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_OUTBOX2_ACK)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_EC_BUF_INT_PERIOD)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_US_BUF_INT_PERIOD)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_CPARMAD_BUFRDY_INT_PERIOD)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_CFG)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_US_EC_MUX_CFG)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_MAD_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_CPAR_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_TX_PP_BUF_INT_PERIOD)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_TX_PP_CFG)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_DMIC0_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_DMIC1_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_DMIC2_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_DMIC_CFG)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_SVA_CFG)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_CPAR_CFG)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_WDOG_CFG)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_BACKUP_INT)] = 0,
+	[WCD9335_REG(WCD9335_CPE_SS_STATUS)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_CPE_OCD_CFG)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_SS_ERROR_INT_MASK)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_SS_ERROR_INT_STATUS)] = 1,
+	[WCD9335_REG(WCD9335_CPE_SS_SS_ERROR_INT_CLEAR)] = 0,
+	[WCD9335_REG(WCD9335_SOC_MAD_MAIN_CTL_1)] = 1,
+	[WCD9335_REG(WCD9335_SOC_MAD_MAIN_CTL_2)] = 1,
+	[WCD9335_REG(WCD9335_SOC_MAD_AUDIO_CTL_1)] = 1,
+	[WCD9335_REG(WCD9335_SOC_MAD_AUDIO_CTL_2)] = 1,
+	[WCD9335_REG(WCD9335_SOC_MAD_AUDIO_CTL_3)] = 1,
+	[WCD9335_REG(WCD9335_SOC_MAD_AUDIO_CTL_4)] = 1,
+	[WCD9335_REG(WCD9335_SOC_MAD_AUDIO_CTL_5)] = 1,
+	[WCD9335_REG(WCD9335_SOC_MAD_AUDIO_CTL_6)] = 1,
+	[WCD9335_REG(WCD9335_SOC_MAD_AUDIO_CTL_7)] = 1,
+	[WCD9335_REG(WCD9335_SOC_MAD_AUDIO_CTL_8)] = 1,
+	[WCD9335_REG(WCD9335_SOC_MAD_AUDIO_IIR_CTL_PTR)] = 1,
+	[WCD9335_REG(WCD9335_SOC_MAD_AUDIO_IIR_CTL_VAL)] = 1,
+	[WCD9335_REG(WCD9335_SOC_MAD_ULTR_CTL_1)] = 1,
+	[WCD9335_REG(WCD9335_SOC_MAD_ULTR_CTL_2)] = 1,
+	[WCD9335_REG(WCD9335_SOC_MAD_ULTR_CTL_3)] = 1,
+	[WCD9335_REG(WCD9335_SOC_MAD_ULTR_CTL_4)] = 1,
+	[WCD9335_REG(WCD9335_SOC_MAD_ULTR_CTL_5)] = 1,
+	[WCD9335_REG(WCD9335_SOC_MAD_ULTR_CTL_6)] = 1,
+	[WCD9335_REG(WCD9335_SOC_MAD_ULTR_CTL_7)] = 1,
+	[WCD9335_REG(WCD9335_SOC_MAD_BEACON_CTL_1)] = 1,
+	[WCD9335_REG(WCD9335_SOC_MAD_BEACON_CTL_2)] = 1,
+	[WCD9335_REG(WCD9335_SOC_MAD_BEACON_CTL_3)] = 1,
+	[WCD9335_REG(WCD9335_SOC_MAD_BEACON_CTL_4)] = 1,
+	[WCD9335_REG(WCD9335_SOC_MAD_BEACON_CTL_5)] = 1,
+	[WCD9335_REG(WCD9335_SOC_MAD_BEACON_CTL_6)] = 1,
+	[WCD9335_REG(WCD9335_SOC_MAD_BEACON_CTL_7)] = 1,
+	[WCD9335_REG(WCD9335_SOC_MAD_BEACON_CTL_8)] = 1,
+	[WCD9335_REG(WCD9335_SOC_MAD_BEACON_IIR_CTL_PTR)] = 1,
+	[WCD9335_REG(WCD9335_SOC_MAD_BEACON_IIR_CTL_VAL)] = 1,
+	[WCD9335_REG(WCD9335_SOC_MAD_INP_SEL)] = 1,
+};
+
+const u8 wcd9335_page6_reg_readable[WCD9335_PAGE_SIZE] = {
+	[WCD9335_REG(WCD9335_PAGE6_PAGE_REGISTER)] = 1,
+	[WCD9335_REG(WCD9335_ANA_BIAS)] = 1,
+	[WCD9335_REG(WCD9335_ANA_CLK_TOP)] = 1,
+	[WCD9335_REG(WCD9335_ANA_RCO)] = 1,
+	[WCD9335_REG(WCD9335_ANA_BUCK_VOUT_A)] = 1,
+	[WCD9335_REG(WCD9335_ANA_BUCK_VOUT_D)] = 1,
+	[WCD9335_REG(WCD9335_ANA_BUCK_CTL)] = 1,
+	[WCD9335_REG(WCD9335_ANA_BUCK_STATUS)] = 1,
+	[WCD9335_REG(WCD9335_ANA_RX_SUPPLIES)] = 1,
+	[WCD9335_REG(WCD9335_ANA_HPH)] = 1,
+	[WCD9335_REG(WCD9335_ANA_EAR)] = 1,
+	[WCD9335_REG(WCD9335_ANA_LO_1_2)] = 1,
+	[WCD9335_REG(WCD9335_ANA_LO_3_4)] = 1,
+	[WCD9335_REG(WCD9335_ANA_MAD_SETUP)] = 1,
+	[WCD9335_REG(WCD9335_ANA_AMIC1)] = 1,
+	[WCD9335_REG(WCD9335_ANA_AMIC2)] = 1,
+	[WCD9335_REG(WCD9335_ANA_AMIC3)] = 1,
+	[WCD9335_REG(WCD9335_ANA_AMIC4)] = 1,
+	[WCD9335_REG(WCD9335_ANA_AMIC5)] = 1,
+	[WCD9335_REG(WCD9335_ANA_AMIC6)] = 1,
+	[WCD9335_REG(WCD9335_ANA_MBHC_MECH)] = 1,
+	[WCD9335_REG(WCD9335_ANA_MBHC_ELECT)] = 1,
+	[WCD9335_REG(WCD9335_ANA_MBHC_ZDET)] = 1,
+	[WCD9335_REG(WCD9335_ANA_MBHC_RESULT_1)] = 1,
+	[WCD9335_REG(WCD9335_ANA_MBHC_RESULT_2)] = 1,
+	[WCD9335_REG(WCD9335_ANA_MBHC_RESULT_3)] = 1,
+	[WCD9335_REG(WCD9335_ANA_MBHC_BTN0)] = 1,
+	[WCD9335_REG(WCD9335_ANA_MBHC_BTN1)] = 1,
+	[WCD9335_REG(WCD9335_ANA_MBHC_BTN2)] = 1,
+	[WCD9335_REG(WCD9335_ANA_MBHC_BTN3)] = 1,
+	[WCD9335_REG(WCD9335_ANA_MBHC_BTN4)] = 1,
+	[WCD9335_REG(WCD9335_ANA_MBHC_BTN5)] = 1,
+	[WCD9335_REG(WCD9335_ANA_MBHC_BTN6)] = 1,
+	[WCD9335_REG(WCD9335_ANA_MBHC_BTN7)] = 1,
+	[WCD9335_REG(WCD9335_ANA_MICB1)] = 1,
+	[WCD9335_REG(WCD9335_ANA_MICB2)] = 1,
+	[WCD9335_REG(WCD9335_ANA_MICB2_RAMP)] = 1,
+	[WCD9335_REG(WCD9335_ANA_MICB3)] = 1,
+	[WCD9335_REG(WCD9335_ANA_MICB4)] = 1,
+	[WCD9335_REG(WCD9335_ANA_VBADC)] = 1,
+	[WCD9335_REG(WCD9335_BIAS_CTL)] = 1,
+	[WCD9335_REG(WCD9335_BIAS_VBG_FINE_ADJ)] = 1,
+	[WCD9335_REG(WCD9335_CLOCK_TEST_CTL)] = 1,
+	[WCD9335_REG(WCD9335_RCO_CTRL_1)] = 1,
+	[WCD9335_REG(WCD9335_RCO_CTRL_2)] = 1,
+	[WCD9335_REG(WCD9335_RCO_CAL)] = 1,
+	[WCD9335_REG(WCD9335_RCO_CAL_1)] = 1,
+	[WCD9335_REG(WCD9335_RCO_CAL_2)] = 1,
+	[WCD9335_REG(WCD9335_RCO_TEST_CTRL)] = 1,
+	[WCD9335_REG(WCD9335_RCO_CAL_OUT_1)] = 1,
+	[WCD9335_REG(WCD9335_RCO_CAL_OUT_2)] = 1,
+	[WCD9335_REG(WCD9335_RCO_CAL_OUT_3)] = 1,
+	[WCD9335_REG(WCD9335_RCO_CAL_OUT_4)] = 1,
+	[WCD9335_REG(WCD9335_RCO_CAL_OUT_5)] = 1,
+	[WCD9335_REG(WCD9335_SIDO_SIDO_MODE_1)] = 1,
+	[WCD9335_REG(WCD9335_SIDO_SIDO_MODE_2)] = 1,
+	[WCD9335_REG(WCD9335_SIDO_SIDO_MODE_3)] = 1,
+	[WCD9335_REG(WCD9335_SIDO_SIDO_MODE_4)] = 1,
+	[WCD9335_REG(WCD9335_SIDO_SIDO_VCL_1)] = 1,
+	[WCD9335_REG(WCD9335_SIDO_SIDO_VCL_2)] = 1,
+	[WCD9335_REG(WCD9335_SIDO_SIDO_VCL_3)] = 1,
+	[WCD9335_REG(WCD9335_SIDO_SIDO_CCL_1)] = 1,
+	[WCD9335_REG(WCD9335_SIDO_SIDO_CCL_2)] = 1,
+	[WCD9335_REG(WCD9335_SIDO_SIDO_CCL_3)] = 1,
+	[WCD9335_REG(WCD9335_SIDO_SIDO_CCL_4)] = 1,
+	[WCD9335_REG(WCD9335_SIDO_SIDO_CCL_5)] = 1,
+	[WCD9335_REG(WCD9335_SIDO_SIDO_CCL_6)] = 1,
+	[WCD9335_REG(WCD9335_SIDO_SIDO_CCL_7)] = 1,
+	[WCD9335_REG(WCD9335_SIDO_SIDO_CCL_8)] = 1,
+	[WCD9335_REG(WCD9335_SIDO_SIDO_CCL_9)] = 1,
+	[WCD9335_REG(WCD9335_SIDO_SIDO_CCL_10)] = 1,
+	[WCD9335_REG(WCD9335_SIDO_SIDO_FILTER_1)] = 1,
+	[WCD9335_REG(WCD9335_SIDO_SIDO_FILTER_2)] = 1,
+	[WCD9335_REG(WCD9335_SIDO_SIDO_DRIVER_1)] = 1,
+	[WCD9335_REG(WCD9335_SIDO_SIDO_DRIVER_2)] = 1,
+	[WCD9335_REG(WCD9335_SIDO_SIDO_DRIVER_3)] = 1,
+	[WCD9335_REG(WCD9335_SIDO_SIDO_CAL_CODE_EXT_1)] = 1,
+	[WCD9335_REG(WCD9335_SIDO_SIDO_CAL_CODE_EXT_2)] = 1,
+	[WCD9335_REG(WCD9335_SIDO_SIDO_CAL_CODE_OUT_1)] = 1,
+	[WCD9335_REG(WCD9335_SIDO_SIDO_CAL_CODE_OUT_2)] = 1,
+	[WCD9335_REG(WCD9335_SIDO_SIDO_TEST_1)] = 1,
+	[WCD9335_REG(WCD9335_SIDO_SIDO_TEST_2)] = 1,
+	[WCD9335_REG(WCD9335_MBHC_CTL_1)] = 1,
+	[WCD9335_REG(WCD9335_MBHC_CTL_2)] = 1,
+	[WCD9335_REG(WCD9335_MBHC_PLUG_DETECT_CTL)] = 1,
+	[WCD9335_REG(WCD9335_MBHC_ZDET_ANA_CTL)] = 1,
+	[WCD9335_REG(WCD9335_MBHC_ZDET_RAMP_CTL)] = 1,
+	[WCD9335_REG(WCD9335_MBHC_FSM_DEBUG)] = 1,
+	[WCD9335_REG(WCD9335_MBHC_TEST_CTL)] = 1,
+	[WCD9335_REG(WCD9335_VBADC_SUBBLOCK_EN)] = 1,
+	[WCD9335_REG(WCD9335_VBADC_IBIAS_FE)] = 1,
+	[WCD9335_REG(WCD9335_VBADC_BIAS_ADC)] = 1,
+	[WCD9335_REG(WCD9335_VBADC_FE_CTRL)] = 1,
+	[WCD9335_REG(WCD9335_VBADC_ADC_REF)] = 1,
+	[WCD9335_REG(WCD9335_VBADC_ADC_IO)] = 1,
+	[WCD9335_REG(WCD9335_VBADC_ADC_SAR)] = 1,
+	[WCD9335_REG(WCD9335_VBADC_DEBUG)] = 1,
+	[WCD9335_REG(WCD9335_VBADC_ADC_DOUTMSB)] = 1,
+	[WCD9335_REG(WCD9335_VBADC_ADC_DOUTLSB)] = 1,
+	[WCD9335_REG(WCD9335_LDOH_MODE)] = 1,
+	[WCD9335_REG(WCD9335_LDOH_BIAS)] = 1,
+	[WCD9335_REG(WCD9335_LDOH_STB_LOADS)] = 1,
+	[WCD9335_REG(WCD9335_LDOH_SLOWRAMP)] = 1,
+	[WCD9335_REG(WCD9335_MICB1_TEST_CTL_1)] = 1,
+	[WCD9335_REG(WCD9335_MICB1_TEST_CTL_2)] = 1,
+	[WCD9335_REG(WCD9335_MICB1_TEST_CTL_3)] = 1,
+	[WCD9335_REG(WCD9335_MICB2_TEST_CTL_1)] = 1,
+	[WCD9335_REG(WCD9335_MICB2_TEST_CTL_2)] = 1,
+	[WCD9335_REG(WCD9335_MICB2_TEST_CTL_3)] = 1,
+	[WCD9335_REG(WCD9335_MICB3_TEST_CTL_1)] = 1,
+	[WCD9335_REG(WCD9335_MICB3_TEST_CTL_2)] = 1,
+	[WCD9335_REG(WCD9335_MICB3_TEST_CTL_3)] = 1,
+	[WCD9335_REG(WCD9335_MICB4_TEST_CTL_1)] = 1,
+	[WCD9335_REG(WCD9335_MICB4_TEST_CTL_2)] = 1,
+	[WCD9335_REG(WCD9335_MICB4_TEST_CTL_3)] = 1,
+	[WCD9335_REG(WCD9335_TX_COM_ADC_VCM)] = 1,
+	[WCD9335_REG(WCD9335_TX_COM_BIAS_ATEST)] = 1,
+	[WCD9335_REG(WCD9335_TX_COM_ADC_INT1_IB)] = 1,
+	[WCD9335_REG(WCD9335_TX_COM_ADC_INT2_IB)] = 1,
+	[WCD9335_REG(WCD9335_TX_COM_TXFE_DIV_CTL)] = 1,
+	[WCD9335_REG(WCD9335_TX_COM_TXFE_DIV_START)] = 1,
+	[WCD9335_REG(WCD9335_TX_COM_TXFE_DIV_STOP_9P6M)] = 1,
+	[WCD9335_REG(WCD9335_TX_COM_TXFE_DIV_STOP_12P288M)] = 1,
+	[WCD9335_REG(WCD9335_TX_1_2_TEST_EN)] = 1,
+	[WCD9335_REG(WCD9335_TX_1_2_ADC_IB)] = 1,
+	[WCD9335_REG(WCD9335_TX_1_2_ATEST_REFCTL)] = 1,
+	[WCD9335_REG(WCD9335_TX_1_2_TEST_CTL)] = 1,
+	[WCD9335_REG(WCD9335_TX_1_2_TEST_BLK_EN)] = 1,
+	[WCD9335_REG(WCD9335_TX_1_2_TXFE_CLKDIV)] = 1,
+	[WCD9335_REG(WCD9335_TX_1_2_SAR1_ERR)] = 1,
+	[WCD9335_REG(WCD9335_TX_1_2_SAR2_ERR)] = 1,
+	[WCD9335_REG(WCD9335_TX_3_4_TEST_EN)] = 1,
+	[WCD9335_REG(WCD9335_TX_3_4_ADC_IB)] = 1,
+	[WCD9335_REG(WCD9335_TX_3_4_ATEST_REFCTL)] = 1,
+	[WCD9335_REG(WCD9335_TX_3_4_TEST_CTL)] = 1,
+	[WCD9335_REG(WCD9335_TX_3_4_TEST_BLK_EN)] = 1,
+	[WCD9335_REG(WCD9335_TX_3_4_TXFE_CLKDIV)] = 1,
+	[WCD9335_REG(WCD9335_TX_3_4_SAR1_ERR)] = 1,
+	[WCD9335_REG(WCD9335_TX_3_4_SAR2_ERR)] = 1,
+	[WCD9335_REG(WCD9335_TX_5_6_TEST_EN)] = 1,
+	[WCD9335_REG(WCD9335_TX_5_6_ADC_IB)] = 1,
+	[WCD9335_REG(WCD9335_TX_5_6_ATEST_REFCTL)] = 1,
+	[WCD9335_REG(WCD9335_TX_5_6_TEST_CTL)] = 1,
+	[WCD9335_REG(WCD9335_TX_5_6_TEST_BLK_EN)] = 1,
+	[WCD9335_REG(WCD9335_TX_5_6_TXFE_CLKDIV)] = 1,
+	[WCD9335_REG(WCD9335_TX_5_6_SAR1_ERR)] = 1,
+	[WCD9335_REG(WCD9335_TX_5_6_SAR2_ERR)] = 1,
+	[WCD9335_REG(WCD9335_CLASSH_MODE_1)] = 1,
+	[WCD9335_REG(WCD9335_CLASSH_MODE_2)] = 1,
+	[WCD9335_REG(WCD9335_CLASSH_MODE_3)] = 1,
+	[WCD9335_REG(WCD9335_CLASSH_CTRL_VCL_1)] = 1,
+	[WCD9335_REG(WCD9335_CLASSH_CTRL_VCL_2)] = 1,
+	[WCD9335_REG(WCD9335_CLASSH_CTRL_CCL_1)] = 1,
+	[WCD9335_REG(WCD9335_CLASSH_CTRL_CCL_2)] = 1,
+	[WCD9335_REG(WCD9335_CLASSH_CTRL_CCL_3)] = 1,
+	[WCD9335_REG(WCD9335_CLASSH_CTRL_CCL_4)] = 1,
+	[WCD9335_REG(WCD9335_CLASSH_CTRL_CCL_5)] = 1,
+	[WCD9335_REG(WCD9335_CLASSH_BUCK_TMUX_A_D)] = 1,
+	[WCD9335_REG(WCD9335_CLASSH_BUCK_SW_DRV_CNTL)] = 1,
+	[WCD9335_REG(WCD9335_CLASSH_SPARE)] = 1,
+	[WCD9335_REG(WCD9335_FLYBACK_EN)] = 1,
+	[WCD9335_REG(WCD9335_FLYBACK_VNEG_CTRL_1)] = 1,
+	[WCD9335_REG(WCD9335_FLYBACK_VNEG_CTRL_2)] = 1,
+	[WCD9335_REG(WCD9335_FLYBACK_VNEG_CTRL_3)] = 1,
+	[WCD9335_REG(WCD9335_FLYBACK_VNEG_CTRL_4)] = 1,
+	[WCD9335_REG(WCD9335_FLYBACK_VNEG_CTRL_5)] = 1,
+	[WCD9335_REG(WCD9335_FLYBACK_VNEG_CTRL_6)] = 1,
+	[WCD9335_REG(WCD9335_FLYBACK_VNEG_CTRL_7)] = 1,
+	[WCD9335_REG(WCD9335_FLYBACK_VNEG_CTRL_8)] = 1,
+	[WCD9335_REG(WCD9335_FLYBACK_VNEG_CTRL_9)] = 1,
+	[WCD9335_REG(WCD9335_FLYBACK_VNEG_DAC_CTRL_1)] = 1,
+	[WCD9335_REG(WCD9335_FLYBACK_VNEG_DAC_CTRL_2)] = 1,
+	[WCD9335_REG(WCD9335_FLYBACK_VNEG_DAC_CTRL_3)] = 1,
+	[WCD9335_REG(WCD9335_FLYBACK_VNEG_DAC_CTRL_4)] = 1,
+	[WCD9335_REG(WCD9335_FLYBACK_TEST_CTL)] = 1,
+	[WCD9335_REG(WCD9335_RX_AUX_SW_CTL)] = 1,
+	[WCD9335_REG(WCD9335_RX_PA_AUX_IN_CONN)] = 1,
+	[WCD9335_REG(WCD9335_RX_TIMER_DIV)] = 1,
+	[WCD9335_REG(WCD9335_RX_OCP_CTL)] = 1,
+	[WCD9335_REG(WCD9335_RX_OCP_COUNT)] = 1,
+	[WCD9335_REG(WCD9335_RX_BIAS_EAR_DAC)] = 1,
+	[WCD9335_REG(WCD9335_RX_BIAS_EAR_AMP)] = 1,
+	[WCD9335_REG(WCD9335_RX_BIAS_HPH_LDO)] = 1,
+	[WCD9335_REG(WCD9335_RX_BIAS_HPH_PA)] = 1,
+	[WCD9335_REG(WCD9335_RX_BIAS_HPH_RDACBUFF_CNP2)] = 1,
+	[WCD9335_REG(WCD9335_RX_BIAS_HPH_RDAC_LDO)] = 1,
+	[WCD9335_REG(WCD9335_RX_BIAS_HPH_CNP1)] = 1,
+	[WCD9335_REG(WCD9335_RX_BIAS_HPH_LOWPOWER)] = 1,
+	[WCD9335_REG(WCD9335_RX_BIAS_DIFFLO_PA)] = 1,
+	[WCD9335_REG(WCD9335_RX_BIAS_DIFFLO_REF)] = 1,
+	[WCD9335_REG(WCD9335_RX_BIAS_DIFFLO_LDO)] = 1,
+	[WCD9335_REG(WCD9335_RX_BIAS_SELO_DAC_PA)] = 1,
+	[WCD9335_REG(WCD9335_RX_BIAS_BUCK_RST)] = 1,
+	[WCD9335_REG(WCD9335_RX_BIAS_BUCK_VREF_ERRAMP)] = 1,
+	[WCD9335_REG(WCD9335_RX_BIAS_FLYB_ERRAMP)] = 1,
+	[WCD9335_REG(WCD9335_RX_BIAS_FLYB_BUFF)] = 1,
+	[WCD9335_REG(WCD9335_RX_BIAS_FLYB_MID_RST)] = 1,
+	[WCD9335_REG(WCD9335_HPH_L_STATUS)] = 1,
+	[WCD9335_REG(WCD9335_HPH_R_STATUS)] = 1,
+	[WCD9335_REG(WCD9335_HPH_CNP_EN)] = 1,
+	[WCD9335_REG(WCD9335_HPH_CNP_WG_CTL)] = 1,
+	[WCD9335_REG(WCD9335_HPH_CNP_WG_TIME)] = 1,
+	[WCD9335_REG(WCD9335_HPH_OCP_CTL)] = 1,
+	[WCD9335_REG(WCD9335_HPH_AUTO_CHOP)] = 1,
+	[WCD9335_REG(WCD9335_HPH_CHOP_CTL)] = 1,
+	[WCD9335_REG(WCD9335_HPH_PA_CTL1)] = 1,
+	[WCD9335_REG(WCD9335_HPH_PA_CTL2)] = 1,
+	[WCD9335_REG(WCD9335_HPH_L_EN)] = 1,
+	[WCD9335_REG(WCD9335_HPH_L_TEST)] = 1,
+	[WCD9335_REG(WCD9335_HPH_L_ATEST)] = 1,
+	[WCD9335_REG(WCD9335_HPH_R_EN)] = 1,
+	[WCD9335_REG(WCD9335_HPH_R_TEST)] = 1,
+	[WCD9335_REG(WCD9335_HPH_R_ATEST)] = 1,
+	[WCD9335_REG(WCD9335_HPH_RDAC_CLK_CTL1)] = 1,
+	[WCD9335_REG(WCD9335_HPH_RDAC_CLK_CTL2)] = 1,
+	[WCD9335_REG(WCD9335_HPH_RDAC_LDO_CTL)] = 1,
+	[WCD9335_REG(WCD9335_HPH_RDAC_CHOP_CLK_LP_CTL)] = 1,
+	[WCD9335_REG(WCD9335_HPH_REFBUFF_UHQA_CTL)] = 1,
+	[WCD9335_REG(WCD9335_HPH_REFBUFF_LP_CTL)] = 1,
+	[WCD9335_REG(WCD9335_HPH_L_DAC_CTL)] = 1,
+	[WCD9335_REG(WCD9335_HPH_R_DAC_CTL)] = 1,
+	[WCD9335_REG(WCD9335_EAR_EN_REG)] = 1,
+	[WCD9335_REG(WCD9335_EAR_CMBUFF)] = 1,
+	[WCD9335_REG(WCD9335_EAR_ICTL)] = 1,
+	[WCD9335_REG(WCD9335_EAR_EN_DBG_CTL)] = 1,
+	[WCD9335_REG(WCD9335_EAR_CNP)] = 1,
+	[WCD9335_REG(WCD9335_EAR_DAC_CTL_ATEST)] = 1,
+	[WCD9335_REG(WCD9335_EAR_STATUS_REG)] = 1,
+	[WCD9335_REG(WCD9335_EAR_OUT_SHORT)] = 1,
+	[WCD9335_REG(WCD9335_DIFF_LO_MISC)] = 1,
+	[WCD9335_REG(WCD9335_DIFF_LO_LO2_COMPANDER)] = 1,
+	[WCD9335_REG(WCD9335_DIFF_LO_LO1_COMPANDER)] = 1,
+	[WCD9335_REG(WCD9335_DIFF_LO_COMMON)] = 1,
+	[WCD9335_REG(WCD9335_DIFF_LO_BYPASS_EN)] = 1,
+	[WCD9335_REG(WCD9335_DIFF_LO_CNP)] = 1,
+	[WCD9335_REG(WCD9335_DIFF_LO_CORE_OUT_PROG)] = 1,
+	[WCD9335_REG(WCD9335_DIFF_LO_LDO_OUT_PROG)] = 1,
+	[WCD9335_REG(WCD9335_DIFF_LO_COM_SWCAP_REFBUF_FREQ)] = 1,
+	[WCD9335_REG(WCD9335_DIFF_LO_COM_PA_FREQ)] = 1,
+	[WCD9335_REG(WCD9335_DIFF_LO_RESERVED_REG)] = 1,
+	[WCD9335_REG(WCD9335_DIFF_LO_LO1_STATUS_1)] = 1,
+	[WCD9335_REG(WCD9335_DIFF_LO_LO1_STATUS_2)] = 1,
+	[WCD9335_REG(WCD9335_SE_LO_COM1)] = 1,
+	[WCD9335_REG(WCD9335_SE_LO_COM2)] = 1,
+	[WCD9335_REG(WCD9335_SE_LO_LO3_GAIN)] = 1,
+	[WCD9335_REG(WCD9335_SE_LO_LO3_CTRL)] = 1,
+	[WCD9335_REG(WCD9335_SE_LO_LO4_GAIN)] = 1,
+	[WCD9335_REG(WCD9335_SE_LO_LO4_CTRL)] = 1,
+	[WCD9335_REG(WCD9335_SE_LO_LO3_STATUS)] = 1,
+	[WCD9335_REG(WCD9335_SE_LO_LO4_STATUS)] = 1,
+};
+
+const u8 wcd9335_page10_reg_readable[WCD9335_PAGE_SIZE] = {
+	[WCD9335_REG(WCD9335_PAGE10_PAGE_REGISTER)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC0_CLK_RESET_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC0_MODE_1_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC0_MODE_2_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC0_FF_SHIFT)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC0_FB_SHIFT)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC0_LPF_FF_A_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC0_LPF_FF_B_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC0_LPF_FB_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC0_SMLPF_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC0_DCFLT_SHIFT_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC0_IIR_ADAPT_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC0_IIR_COEFF_1_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC0_IIR_COEFF_2_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC0_FF_A_GAIN_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC0_FF_B_GAIN_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC0_FB_GAIN_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC1_CLK_RESET_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC1_MODE_1_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC1_MODE_2_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC1_FF_SHIFT)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC1_FB_SHIFT)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC1_LPF_FF_A_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC1_LPF_FF_B_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC1_LPF_FB_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC1_SMLPF_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC1_DCFLT_SHIFT_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC1_IIR_ADAPT_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC1_IIR_COEFF_1_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC1_IIR_COEFF_2_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC1_FF_A_GAIN_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC1_FF_B_GAIN_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_ANC1_FB_GAIN_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX0_TX_PATH_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX0_TX_PATH_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX0_TX_PATH_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX0_TX_VOL_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX0_TX_PATH_192_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX0_TX_PATH_192_CFG)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX0_TX_PATH_SEC0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX0_TX_PATH_SEC1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX0_TX_PATH_SEC2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX0_TX_PATH_SEC3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX0_TX_PATH_SEC4)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX0_TX_PATH_SEC5)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX0_TX_PATH_SEC6)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX0_TX_PATH_SEC7)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX1_TX_PATH_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX1_TX_PATH_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX1_TX_PATH_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX1_TX_VOL_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX1_TX_PATH_192_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX1_TX_PATH_192_CFG)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX1_TX_PATH_SEC0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX1_TX_PATH_SEC1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX1_TX_PATH_SEC2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX1_TX_PATH_SEC3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX1_TX_PATH_SEC4)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX1_TX_PATH_SEC5)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX1_TX_PATH_SEC6)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX2_TX_PATH_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX2_TX_PATH_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX2_TX_PATH_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX2_TX_VOL_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX2_TX_PATH_192_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX2_TX_PATH_192_CFG)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX2_TX_PATH_SEC0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX2_TX_PATH_SEC1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX2_TX_PATH_SEC2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX2_TX_PATH_SEC3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX2_TX_PATH_SEC4)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX2_TX_PATH_SEC5)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX2_TX_PATH_SEC6)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX3_TX_PATH_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX3_TX_PATH_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX3_TX_PATH_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX3_TX_VOL_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX3_TX_PATH_192_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX3_TX_PATH_192_CFG)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX3_TX_PATH_SEC0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX3_TX_PATH_SEC1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX3_TX_PATH_SEC2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX3_TX_PATH_SEC3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX3_TX_PATH_SEC4)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX3_TX_PATH_SEC5)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX3_TX_PATH_SEC6)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX4_TX_PATH_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX4_TX_PATH_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX4_TX_PATH_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX4_TX_VOL_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX4_TX_PATH_192_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX4_TX_PATH_192_CFG)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX4_TX_PATH_SEC0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX4_TX_PATH_SEC1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX4_TX_PATH_SEC2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX4_TX_PATH_SEC3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX4_TX_PATH_SEC4)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX4_TX_PATH_SEC5)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX4_TX_PATH_SEC6)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX5_TX_PATH_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX5_TX_PATH_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX5_TX_PATH_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX5_TX_VOL_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX5_TX_PATH_192_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX5_TX_PATH_192_CFG)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX5_TX_PATH_SEC0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX5_TX_PATH_SEC1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX5_TX_PATH_SEC2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX5_TX_PATH_SEC3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX5_TX_PATH_SEC4)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX5_TX_PATH_SEC5)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX5_TX_PATH_SEC6)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX6_TX_PATH_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX6_TX_PATH_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX6_TX_PATH_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX6_TX_VOL_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX6_TX_PATH_192_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX6_TX_PATH_192_CFG)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX6_TX_PATH_SEC0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX6_TX_PATH_SEC1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX6_TX_PATH_SEC2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX6_TX_PATH_SEC3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX6_TX_PATH_SEC4)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX6_TX_PATH_SEC5)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX6_TX_PATH_SEC6)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX7_TX_PATH_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX7_TX_PATH_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX7_TX_PATH_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX7_TX_VOL_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX7_TX_PATH_192_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX7_TX_PATH_192_CFG)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX7_TX_PATH_SEC0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX7_TX_PATH_SEC1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX7_TX_PATH_SEC2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX7_TX_PATH_SEC3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX7_TX_PATH_SEC4)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX7_TX_PATH_SEC5)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX7_TX_PATH_SEC6)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX8_TX_PATH_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX8_TX_PATH_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX8_TX_PATH_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX8_TX_VOL_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX8_TX_PATH_192_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX8_TX_PATH_192_CFG)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX8_TX_PATH_SEC0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX8_TX_PATH_SEC1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX8_TX_PATH_SEC2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX8_TX_PATH_SEC3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX8_TX_PATH_SEC4)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX8_TX_PATH_SEC5)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX8_TX_PATH_SEC6)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX9_SPKR_PROT_PATH_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX9_SPKR_PROT_PATH_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX10_SPKR_PROT_PATH_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX10_SPKR_PROT_PATH_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX11_SPKR_PROT_PATH_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX11_SPKR_PROT_PATH_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX12_SPKR_PROT_PATH_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX12_SPKR_PROT_PATH_CFG0)] = 1,
+};
+
+const u8 wcd9335_page11_reg_readable[WCD9335_PAGE_SIZE] = {
+	[WCD9335_REG(WCD9335_PAGE11_PAGE_REGISTER)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER1_CTL0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER1_CTL1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER1_CTL2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER1_CTL3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER1_CTL4)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER1_CTL5)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER1_CTL6)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER1_CTL7)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER2_CTL0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER2_CTL1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER2_CTL2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER2_CTL3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER2_CTL4)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER2_CTL5)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER2_CTL6)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER2_CTL7)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER3_CTL0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER3_CTL1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER3_CTL2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER3_CTL3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER3_CTL4)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER3_CTL5)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER3_CTL6)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER3_CTL7)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER4_CTL0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER4_CTL1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER4_CTL2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER4_CTL3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER4_CTL4)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER4_CTL5)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER4_CTL6)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER4_CTL7)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER5_CTL0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER5_CTL1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER5_CTL2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER5_CTL3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER5_CTL4)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER5_CTL5)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER5_CTL6)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER5_CTL7)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER6_CTL0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER6_CTL1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER6_CTL2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER6_CTL3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER6_CTL4)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER6_CTL5)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER6_CTL6)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER6_CTL7)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER7_CTL0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER7_CTL1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER7_CTL2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER7_CTL3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER7_CTL4)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER7_CTL5)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER7_CTL6)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER7_CTL7)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER8_CTL0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER8_CTL1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER8_CTL2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER8_CTL3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER8_CTL4)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER8_CTL5)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER8_CTL6)] = 1,
+	[WCD9335_REG(WCD9335_CDC_COMPANDER8_CTL7)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX0_RX_PATH_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX0_RX_PATH_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX0_RX_PATH_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX0_RX_PATH_CFG2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX0_RX_VOL_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX0_RX_PATH_MIX_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX0_RX_PATH_MIX_CFG)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX0_RX_VOL_MIX_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX0_RX_PATH_SEC0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX0_RX_PATH_SEC1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX0_RX_PATH_SEC2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX0_RX_PATH_SEC3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX0_RX_PATH_SEC5)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX0_RX_PATH_SEC6)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX0_RX_PATH_SEC7)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX0_RX_PATH_MIX_SEC0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX0_RX_PATH_MIX_SEC1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX1_RX_PATH_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX1_RX_PATH_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX1_RX_PATH_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX1_RX_PATH_CFG2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX1_RX_VOL_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX1_RX_PATH_MIX_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX1_RX_PATH_MIX_CFG)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX1_RX_VOL_MIX_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX1_RX_PATH_SEC0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX1_RX_PATH_SEC1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX1_RX_PATH_SEC2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX1_RX_PATH_SEC3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX1_RX_PATH_SEC4)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX1_RX_PATH_SEC5)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX1_RX_PATH_SEC6)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX1_RX_PATH_SEC7)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX1_RX_PATH_MIX_SEC0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX1_RX_PATH_MIX_SEC1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX2_RX_PATH_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX2_RX_PATH_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX2_RX_PATH_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX2_RX_PATH_CFG2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX2_RX_VOL_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX2_RX_PATH_MIX_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX2_RX_PATH_MIX_CFG)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX2_RX_VOL_MIX_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX2_RX_PATH_SEC0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX2_RX_PATH_SEC1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX2_RX_PATH_SEC2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX2_RX_PATH_SEC3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX2_RX_PATH_SEC4)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX2_RX_PATH_SEC5)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX2_RX_PATH_SEC6)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX2_RX_PATH_SEC7)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX2_RX_PATH_MIX_SEC0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX2_RX_PATH_MIX_SEC1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX3_RX_PATH_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX3_RX_PATH_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX3_RX_PATH_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX3_RX_PATH_CFG2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX3_RX_VOL_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX3_RX_PATH_MIX_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX3_RX_PATH_MIX_CFG)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX3_RX_VOL_MIX_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX3_RX_PATH_SEC0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX3_RX_PATH_SEC1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX3_RX_PATH_SEC2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX3_RX_PATH_SEC3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX3_RX_PATH_SEC5)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX3_RX_PATH_SEC6)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX3_RX_PATH_SEC7)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX3_RX_PATH_MIX_SEC0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX3_RX_PATH_MIX_SEC1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX4_RX_PATH_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX4_RX_PATH_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX4_RX_PATH_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX4_RX_PATH_CFG2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX4_RX_VOL_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX4_RX_PATH_MIX_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX4_RX_PATH_MIX_CFG)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX4_RX_VOL_MIX_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX4_RX_PATH_SEC0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX4_RX_PATH_SEC1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX4_RX_PATH_SEC2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX4_RX_PATH_SEC3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX4_RX_PATH_SEC5)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX4_RX_PATH_SEC6)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX4_RX_PATH_SEC7)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX4_RX_PATH_MIX_SEC0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX4_RX_PATH_MIX_SEC1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX5_RX_PATH_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX5_RX_PATH_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX5_RX_PATH_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX5_RX_PATH_CFG2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX5_RX_VOL_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX5_RX_PATH_MIX_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX5_RX_PATH_MIX_CFG)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX5_RX_VOL_MIX_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX5_RX_PATH_SEC0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX5_RX_PATH_SEC1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX5_RX_PATH_SEC2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX5_RX_PATH_SEC3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX5_RX_PATH_SEC5)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX5_RX_PATH_SEC6)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX5_RX_PATH_SEC7)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX5_RX_PATH_MIX_SEC0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX5_RX_PATH_MIX_SEC1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX6_RX_PATH_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX6_RX_PATH_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX6_RX_PATH_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX6_RX_PATH_CFG2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX6_RX_VOL_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX6_RX_PATH_MIX_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX6_RX_PATH_MIX_CFG)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX6_RX_VOL_MIX_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX6_RX_PATH_SEC0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX6_RX_PATH_SEC1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX6_RX_PATH_SEC2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX6_RX_PATH_SEC3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX6_RX_PATH_SEC5)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX6_RX_PATH_SEC6)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX6_RX_PATH_SEC7)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX6_RX_PATH_MIX_SEC0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX6_RX_PATH_MIX_SEC1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX7_RX_PATH_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX7_RX_PATH_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX7_RX_PATH_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX7_RX_PATH_CFG2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX7_RX_VOL_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX7_RX_PATH_MIX_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX7_RX_PATH_MIX_CFG)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX7_RX_VOL_MIX_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX7_RX_PATH_SEC0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX7_RX_PATH_SEC1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX7_RX_PATH_SEC2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX7_RX_PATH_SEC3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX7_RX_PATH_SEC5)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX7_RX_PATH_SEC6)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX7_RX_PATH_SEC7)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX7_RX_PATH_MIX_SEC0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX7_RX_PATH_MIX_SEC1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX8_RX_PATH_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX8_RX_PATH_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX8_RX_PATH_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX8_RX_PATH_CFG2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX8_RX_VOL_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX8_RX_PATH_MIX_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX8_RX_PATH_MIX_CFG)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX8_RX_VOL_MIX_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX8_RX_PATH_SEC0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX8_RX_PATH_SEC1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX8_RX_PATH_SEC2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX8_RX_PATH_SEC3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX8_RX_PATH_SEC5)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX8_RX_PATH_SEC6)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX8_RX_PATH_SEC7)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX8_RX_PATH_MIX_SEC0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX8_RX_PATH_MIX_SEC1)] = 1,
+};
+
+const u8 wcd9335_page12_reg_readable[WCD9335_PAGE_SIZE] = {
+	[WCD9335_REG(WCD9335_PAGE12_PAGE_REGISTER)] = 1,
+	[WCD9335_REG(WCD9335_CDC_CLSH_CRC)] = 1,
+	[WCD9335_REG(WCD9335_CDC_CLSH_DLY_CTRL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_CLSH_DECAY_CTRL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_CLSH_HPH_V_PA)] = 1,
+	[WCD9335_REG(WCD9335_CDC_CLSH_EAR_V_PA)] = 1,
+	[WCD9335_REG(WCD9335_CDC_CLSH_HPH_V_HD)] = 1,
+	[WCD9335_REG(WCD9335_CDC_CLSH_EAR_V_HD)] = 1,
+	[WCD9335_REG(WCD9335_CDC_CLSH_K1_MSB)] = 1,
+	[WCD9335_REG(WCD9335_CDC_CLSH_K1_LSB)] = 1,
+	[WCD9335_REG(WCD9335_CDC_CLSH_K2_MSB)] = 1,
+	[WCD9335_REG(WCD9335_CDC_CLSH_K2_LSB)] = 1,
+	[WCD9335_REG(WCD9335_CDC_CLSH_IDLE_CTRL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_CLSH_IDLE_HPH)] = 1,
+	[WCD9335_REG(WCD9335_CDC_CLSH_IDLE_EAR)] = 1,
+	[WCD9335_REG(WCD9335_CDC_CLSH_TEST0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_CLSH_TEST1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_CLSH_OVR_VREF)] = 1,
+	[WCD9335_REG(WCD9335_CDC_BOOST0_BOOST_PATH_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_BOOST0_BOOST_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_BOOST0_BOOST_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_BOOST0_BOOST_CFG2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_BOOST1_BOOST_PATH_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_BOOST1_BOOST_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_BOOST1_BOOST_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_BOOST1_BOOST_CFG2)] = 1,
+	[WCD9335_REG(WCD9335_SWR_AHB_BRIDGE_WR_DATA_0)] = 1,
+	[WCD9335_REG(WCD9335_SWR_AHB_BRIDGE_WR_DATA_1)] = 1,
+	[WCD9335_REG(WCD9335_SWR_AHB_BRIDGE_WR_DATA_2)] = 1,
+	[WCD9335_REG(WCD9335_SWR_AHB_BRIDGE_WR_DATA_3)] = 1,
+	[WCD9335_REG(WCD9335_SWR_AHB_BRIDGE_WR_ADDR_0)] = 1,
+	[WCD9335_REG(WCD9335_SWR_AHB_BRIDGE_WR_ADDR_1)] = 1,
+	[WCD9335_REG(WCD9335_SWR_AHB_BRIDGE_WR_ADDR_2)] = 1,
+	[WCD9335_REG(WCD9335_SWR_AHB_BRIDGE_WR_ADDR_3)] = 1,
+	[WCD9335_REG(WCD9335_SWR_AHB_BRIDGE_RD_ADDR_0)] = 1,
+	[WCD9335_REG(WCD9335_SWR_AHB_BRIDGE_RD_ADDR_1)] = 1,
+	[WCD9335_REG(WCD9335_SWR_AHB_BRIDGE_RD_ADDR_2)] = 1,
+	[WCD9335_REG(WCD9335_SWR_AHB_BRIDGE_RD_ADDR_3)] = 1,
+	[WCD9335_REG(WCD9335_SWR_AHB_BRIDGE_RD_DATA_0)] = 1,
+	[WCD9335_REG(WCD9335_SWR_AHB_BRIDGE_RD_DATA_1)] = 1,
+	[WCD9335_REG(WCD9335_SWR_AHB_BRIDGE_RD_DATA_2)] = 1,
+	[WCD9335_REG(WCD9335_SWR_AHB_BRIDGE_RD_DATA_3)] = 1,
+	[WCD9335_REG(WCD9335_SWR_AHB_BRIDGE_ACCESS_CFG)] = 1,
+	[WCD9335_REG(WCD9335_SWR_AHB_BRIDGE_ACCESS_STATUS)] = 1,
+	[WCD9335_REG(WCD9335_CDC_VBAT_VBAT_PATH_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_VBAT_VBAT_CFG)] = 1,
+	[WCD9335_REG(WCD9335_CDC_VBAT_VBAT_ADC_CAL1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_VBAT_VBAT_ADC_CAL2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_VBAT_VBAT_ADC_CAL3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_VBAT_VBAT_PK_EST1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_VBAT_VBAT_PK_EST2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_VBAT_VBAT_PK_EST3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_VBAT_VBAT_RF_PROC1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_VBAT_VBAT_RF_PROC2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_VBAT_VBAT_TAC1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_VBAT_VBAT_TAC2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_VBAT_VBAT_TAC3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_VBAT_VBAT_TAC4)] = 1,
+	[WCD9335_REG(WCD9335_CDC_VBAT_VBAT_GAIN_UPD1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_VBAT_VBAT_GAIN_UPD2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_VBAT_VBAT_GAIN_UPD3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_VBAT_VBAT_GAIN_UPD4)] = 1,
+	[WCD9335_REG(WCD9335_CDC_VBAT_VBAT_DEBUG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_VBAT_VBAT_GAIN_UPD_MON)] = 0,
+	[WCD9335_REG(WCD9335_CDC_VBAT_VBAT_GAIN_MON_VAL)] = 1,
+	[WCD9335_REG(WCD9335_SPLINE_SRC0_CLK_RST_CTL_0)] = 1,
+	[WCD9335_REG(WCD9335_SPLINE_SRC0_STATUS)] = 1,
+	[WCD9335_REG(WCD9335_SPLINE_SRC1_CLK_RST_CTL_0)] = 1,
+	[WCD9335_REG(WCD9335_SPLINE_SRC1_STATUS)] = 1,
+	[WCD9335_REG(WCD9335_SPLINE_SRC2_CLK_RST_CTL_0)] = 1,
+	[WCD9335_REG(WCD9335_SPLINE_SRC2_STATUS)] = 1,
+	[WCD9335_REG(WCD9335_SPLINE_SRC3_CLK_RST_CTL_0)] = 1,
+	[WCD9335_REG(WCD9335_SPLINE_SRC3_STATUS)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_SRC0_ST_SRC_PATH_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_SRC0_ST_SRC_PATH_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_SRC1_ST_SRC_PATH_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_SRC1_ST_SRC_PATH_CFG1)] = 1,
+};
+
+const u8 wcd9335_page13_reg_readable[WCD9335_PAGE_SIZE] = {
+	[WCD9335_REG(WCD9335_PAGE13_PAGE_REGISTER)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX_INP_MUX_RX_INT0_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX_INP_MUX_RX_INT0_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX_INP_MUX_RX_INT1_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX_INP_MUX_RX_INT1_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX_INP_MUX_RX_INT2_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX_INP_MUX_RX_INT2_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX_INP_MUX_RX_INT3_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX_INP_MUX_RX_INT3_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX_INP_MUX_RX_INT4_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX_INP_MUX_RX_INT4_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX_INP_MUX_RX_INT5_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX_INP_MUX_RX_INT5_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX_INP_MUX_RX_INT6_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX_INP_MUX_RX_INT6_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX_INP_MUX_RX_INT7_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX_INP_MUX_RX_INT7_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX_INP_MUX_RX_INT8_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX_INP_MUX_RX_INT8_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX_INP_MUX_RX_MIX_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX_INP_MUX_RX_MIX_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX_INP_MUX_RX_MIX_CFG2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX_INP_MUX_RX_MIX_CFG3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX_INP_MUX_RX_MIX_CFG4)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX_INP_MUX_SIDETONE_SRC_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX_INP_MUX_SIDETONE_SRC_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX_INP_MUX_ANC_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_RX_INP_MUX_SPLINE_SRC_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX_INP_MUX_ADC_MUX0_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX_INP_MUX_ADC_MUX0_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX_INP_MUX_ADC_MUX1_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX_INP_MUX_ADC_MUX1_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX_INP_MUX_ADC_MUX2_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX_INP_MUX_ADC_MUX2_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX_INP_MUX_ADC_MUX3_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX_INP_MUX_ADC_MUX3_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX_INP_MUX_ADC_MUX4_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX_INP_MUX_ADC_MUX5_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX_INP_MUX_ADC_MUX6_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX_INP_MUX_ADC_MUX7_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX_INP_MUX_ADC_MUX8_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX_INP_MUX_ADC_MUX10_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX_INP_MUX_ADC_MUX11_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX_INP_MUX_ADC_MUX12_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TX_INP_MUX_ADC_MUX13_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_IF_ROUTER_TX_MUX_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_IF_ROUTER_TX_MUX_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_IF_ROUTER_TX_MUX_CFG2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_IF_ROUTER_TX_MUX_CFG3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_CLK_RST_CTRL_MCLK_CONTROL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_CLK_RST_CTRL_FS_CNT_CONTROL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_CLK_RST_CTRL_SWR_CONTROL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_PROX_DETECT_PROX_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_PROX_DETECT_PROX_POLL_PERIOD0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_PROX_DETECT_PROX_POLL_PERIOD1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_PROX_DETECT_PROX_SIG_PATTERN_LSB)] = 1,
+	[WCD9335_REG(WCD9335_CDC_PROX_DETECT_PROX_SIG_PATTERN_MSB)] = 1,
+	[WCD9335_REG(WCD9335_CDC_PROX_DETECT_PROX_STATUS)] = 1,
+	[WCD9335_REG(WCD9335_CDC_PROX_DETECT_PROX_TEST_CTRL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_PROX_DETECT_PROX_TEST_BUFF_LSB)] = 1,
+	[WCD9335_REG(WCD9335_CDC_PROX_DETECT_PROX_TEST_BUFF_MSB)] = 1,
+	[WCD9335_REG(WCD9335_CDC_PROX_DETECT_PROX_TEST_BUFF_LSB_RD)] = 1,
+	[WCD9335_REG(WCD9335_CDC_PROX_DETECT_PROX_TEST_BUFF_MSB_RD)] = 1,
+	[WCD9335_REG(WCD9335_CDC_PROX_DETECT_PROX_CTL_REPEAT_PAT)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR0_IIR_PATH_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B1_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B2_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B3_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B4_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B5_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B6_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B7_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B8_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR0_IIR_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_TIMER_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR0_IIR_COEF_B1_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR0_IIR_COEF_B2_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR1_IIR_PATH_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B1_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B2_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B3_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B4_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B5_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B6_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B7_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B8_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR1_IIR_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_TIMER_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR1_IIR_COEF_B1_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_SIDETONE_IIR1_IIR_COEF_B2_CTL)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TOP_TOP_CFG0)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TOP_TOP_CFG1)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TOP_TOP_CFG2)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TOP_TOP_CFG3)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TOP_TOP_CFG4)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TOP_TOP_CFG5)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TOP_TOP_CFG6)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TOP_TOP_CFG7)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TOP_HPHL_COMP_WR_LSB)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TOP_HPHL_COMP_WR_MSB)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TOP_HPHL_COMP_LUT)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TOP_HPHL_COMP_RD_LSB)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TOP_HPHL_COMP_RD_MSB)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TOP_HPHR_COMP_WR_LSB)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TOP_HPHR_COMP_WR_MSB)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TOP_HPHR_COMP_LUT)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TOP_HPHR_COMP_RD_LSB)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TOP_HPHR_COMP_RD_MSB)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TOP_DIFFL_COMP_WR_LSB)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TOP_DIFFL_COMP_WR_MSB)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TOP_DIFFL_COMP_LUT)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TOP_DIFFL_COMP_RD_LSB)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TOP_DIFFL_COMP_RD_MSB)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TOP_DIFFR_COMP_WR_LSB)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TOP_DIFFR_COMP_WR_MSB)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TOP_DIFFR_COMP_LUT)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TOP_DIFFR_COMP_RD_LSB)] = 1,
+	[WCD9335_REG(WCD9335_CDC_TOP_DIFFR_COMP_RD_MSB)] = 1,
+};
+
+const u8 wcd9335_page_0x80_reg_readable[WCD9335_PAGE_SIZE] = {
+	[WCD9335_REG(WCD9335_PAGE80_PAGE_REGISTER)] = 1,
+	[WCD9335_REG(WCD9335_TLMM_BIST_MODE_PINCFG)] = 1,
+	[WCD9335_REG(WCD9335_TLMM_RF_PA_ON_PINCFG)] = 1,
+	[WCD9335_REG(WCD9335_TLMM_INTR1_PINCFG)] = 1,
+	[WCD9335_REG(WCD9335_TLMM_INTR2_PINCFG)] = 1,
+	[WCD9335_REG(WCD9335_TLMM_SWR_DATA_PINCFG)] = 1,
+	[WCD9335_REG(WCD9335_TLMM_SWR_CLK_PINCFG)] = 1,
+	[WCD9335_REG(WCD9335_TLMM_SLIMBUS_DATA2_PINCFG)] = 1,
+	[WCD9335_REG(WCD9335_TLMM_I2C_CLK_PINCFG)] = 1,
+	[WCD9335_REG(WCD9335_TLMM_I2C_DATA_PINCFG)] = 1,
+	[WCD9335_REG(WCD9335_TLMM_I2S_RX_SD0_PINCFG)] = 1,
+	[WCD9335_REG(WCD9335_TLMM_I2S_RX_SD1_PINCFG)] = 1,
+	[WCD9335_REG(WCD9335_TLMM_I2S_RX_SCK_PINCFG)] = 1,
+	[WCD9335_REG(WCD9335_TLMM_I2S_RX_WS_PINCFG)] = 1,
+	[WCD9335_REG(WCD9335_TLMM_I2S_TX_SD0_PINCFG)] = 1,
+	[WCD9335_REG(WCD9335_TLMM_I2S_TX_SD1_PINCFG)] = 1,
+	[WCD9335_REG(WCD9335_TLMM_I2S_TX_SCK_PINCFG)] = 1,
+	[WCD9335_REG(WCD9335_TLMM_I2S_TX_WS_PINCFG)] = 1,
+	[WCD9335_REG(WCD9335_TLMM_DMIC1_CLK_PINCFG)] = 1,
+	[WCD9335_REG(WCD9335_TLMM_DMIC1_DATA_PINCFG)] = 1,
+	[WCD9335_REG(WCD9335_TLMM_DMIC2_CLK_PINCFG)] = 1,
+	[WCD9335_REG(WCD9335_TLMM_DMIC2_DATA_PINCFG)] = 1,
+	[WCD9335_REG(WCD9335_TLMM_DMIC3_CLK_PINCFG)] = 1,
+	[WCD9335_REG(WCD9335_TLMM_DMIC3_DATA_PINCFG)] = 1,
+	[WCD9335_REG(WCD9335_TLMM_JTDI_PINCFG)] = 1,
+	[WCD9335_REG(WCD9335_TLMM_JTDO_PINCFG)] = 1,
+	[WCD9335_REG(WCD9335_TLMM_JTMS_PINCFG)] = 1,
+	[WCD9335_REG(WCD9335_TLMM_JTCK_PINCFG)] = 1,
+	[WCD9335_REG(WCD9335_TLMM_JTRST_PINCFG)] = 1,
+	[WCD9335_REG(WCD9335_TEST_DEBUG_PIN_CTL_OE_0)] = 1,
+	[WCD9335_REG(WCD9335_TEST_DEBUG_PIN_CTL_OE_1)] = 1,
+	[WCD9335_REG(WCD9335_TEST_DEBUG_PIN_CTL_OE_2)] = 1,
+	[WCD9335_REG(WCD9335_TEST_DEBUG_PIN_CTL_OE_3)] = 1,
+	[WCD9335_REG(WCD9335_TEST_DEBUG_PIN_CTL_DATA_0)] = 1,
+	[WCD9335_REG(WCD9335_TEST_DEBUG_PIN_CTL_DATA_1)] = 1,
+	[WCD9335_REG(WCD9335_TEST_DEBUG_PIN_CTL_DATA_2)] = 1,
+	[WCD9335_REG(WCD9335_TEST_DEBUG_PIN_CTL_DATA_3)] = 1,
+	[WCD9335_REG(WCD9335_TEST_DEBUG_PAD_DRVCTL)] = 1,
+	[WCD9335_REG(WCD9335_TEST_DEBUG_PIN_STATUS)] = 1,
+	[WCD9335_REG(WCD9335_TEST_DEBUG_NPL_DLY_TEST_1)] = 1,
+	[WCD9335_REG(WCD9335_TEST_DEBUG_NPL_DLY_TEST_2)] = 1,
+	[WCD9335_REG(WCD9335_TEST_DEBUG_MEM_CTRL)] = 1,
+	[WCD9335_REG(WCD9335_TEST_DEBUG_DEBUG_BUS_SEL)] = 1,
+	[WCD9335_REG(WCD9335_TEST_DEBUG_DEBUG_JTAG)] = 1,
+	[WCD9335_REG(WCD9335_TEST_DEBUG_DEBUG_EN_1)] = 1,
+	[WCD9335_REG(WCD9335_TEST_DEBUG_DEBUG_EN_2)] = 1,
+	[WCD9335_REG(WCD9335_TEST_DEBUG_DEBUG_EN_3)] = 1,
+};
+
+const u8 *wcd9335_reg[WCD9335_NUM_PAGES] = {
+	[PAGE_0] = wcd9335_page0_reg_readable,
+	[PAGE_1] = wcd9335_page1_reg_readable,
+	[PAGE_2] = wcd9335_page2_reg_readable,
+	[PAGE_6] = wcd9335_page6_reg_readable,
+	[PAGE_10] = wcd9335_page10_reg_readable,
+	[PAGE_11] = wcd9335_page11_reg_readable,
+	[PAGE_12] = wcd9335_page12_reg_readable,
+	[PAGE_13] = wcd9335_page13_reg_readable,
+	[PAGE_0X80] = wcd9335_page_0x80_reg_readable,
+};
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/mfd/wcd934x-regmap.c	2019-10-29 09:26:24.033206995 +0100
@@ -0,0 +1,1955 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mfd/wcd9xxx/core.h>
+#include <linux/mfd/wcd934x/registers.h>
+#include <linux/regmap.h>
+#include <linux/device.h>
+#include "wcd9xxx-regmap.h"
+
+
+static const struct reg_sequence wcd934x_1_1_defaults[] = {
+	{ WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE0,             0x01 },
+	{ WCD934X_BIAS_VBG_FINE_ADJ,                        0x75 },
+	{ WCD934X_HPH_REFBUFF_LP_CTL,                       0x0E },
+	{ WCD934X_EAR_DAC_CTL_ATEST,                        0x08 },
+	{ WCD934X_SIDO_NEW_VOUT_A_STARTUP,                  0x17 },
+	{ WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL,                0x40 },
+	{ WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_L,               0x81 },
+	{ WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_R,               0x81 },
+};
+
+static const struct reg_default wcd934x_defaults[] = {
+	{ WCD934X_PAGE0_PAGE_REGISTER,                      0x00 },
+	{ WCD934X_CODEC_RPM_CLK_BYPASS,                     0x00 },
+	{ WCD934X_CODEC_RPM_CLK_GATE,                       0x1f },
+	{ WCD934X_CODEC_RPM_CLK_MCLK_CFG,                   0x00 },
+	{ WCD934X_CODEC_RPM_CLK_MCLK2_CFG,                  0x02 },
+	{ WCD934X_CODEC_RPM_I2S_DSD_CLK_SEL,                0x00 },
+	{ WCD934X_CODEC_RPM_RST_CTL,                        0x00 },
+	{ WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL,             0x04 },
+	{ WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE0,             0x00 },
+	{ WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE1,             0x00 },
+	{ WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE2,             0x08 },
+	{ WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE3,             0x01 },
+	{ WCD934X_CHIP_TIER_CTRL_EFUSE_CTL,                 0x10 },
+	{ WCD934X_CHIP_TIER_CTRL_EFUSE_TEST0,               0x00 },
+	{ WCD934X_CHIP_TIER_CTRL_EFUSE_TEST1,               0x00 },
+	{ WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT0,            0x00 },
+	{ WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT1,            0x00 },
+	{ WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT2,            0x00 },
+	{ WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT3,            0x00 },
+	{ WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT4,            0x00 },
+	{ WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT5,            0x00 },
+	{ WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT6,            0x00 },
+	{ WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT7,            0x00 },
+	{ WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT8,            0x00 },
+	{ WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT9,            0x00 },
+	{ WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT10,           0x00 },
+	{ WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT11,           0x00 },
+	{ WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT12,           0x00 },
+	{ WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT13,           0x00 },
+	{ WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT14,           0x00 },
+	{ WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT15,           0x00 },
+	{ WCD934X_CHIP_TIER_CTRL_EFUSE_STATUS,              0x00 },
+	{ WCD934X_CHIP_TIER_CTRL_I2C_SLAVE_ID_NONNEGO,      0x0d },
+	{ WCD934X_CHIP_TIER_CTRL_I2C_SLAVE_ID_1,            0x00 },
+	{ WCD934X_CHIP_TIER_CTRL_I2C_SLAVE_ID_2,            0x00 },
+	{ WCD934X_CHIP_TIER_CTRL_I2C_SLAVE_ID_3,            0x00 },
+	{ WCD934X_CHIP_TIER_CTRL_ANA_WAIT_STATE_CTL,        0xcc },
+	{ WCD934X_CHIP_TIER_CTRL_SLNQ_WAIT_STATE_CTL,       0xcc },
+	{ WCD934X_CHIP_TIER_CTRL_I2C_ACTIVE,                0x00 },
+	{ WCD934X_CHIP_TIER_CTRL_ALT_FUNC_EN,               0x00 },
+	{ WCD934X_CHIP_TIER_CTRL_GPIO_CTL_OE,               0x00 },
+	{ WCD934X_CHIP_TIER_CTRL_GPIO_CTL_DATA,             0x00 },
+	{ WCD934X_DATA_HUB_RX0_CFG,                         0x00 },
+	{ WCD934X_DATA_HUB_RX1_CFG,                         0x00 },
+	{ WCD934X_DATA_HUB_RX2_CFG,                         0x00 },
+	{ WCD934X_DATA_HUB_RX3_CFG,                         0x00 },
+	{ WCD934X_DATA_HUB_RX4_CFG,                         0x00 },
+	{ WCD934X_DATA_HUB_RX5_CFG,                         0x00 },
+	{ WCD934X_DATA_HUB_RX6_CFG,                         0x00 },
+	{ WCD934X_DATA_HUB_RX7_CFG,                         0x00 },
+	{ WCD934X_DATA_HUB_SB_TX0_INP_CFG,                  0x00 },
+	{ WCD934X_DATA_HUB_SB_TX1_INP_CFG,                  0x00 },
+	{ WCD934X_DATA_HUB_SB_TX2_INP_CFG,                  0x00 },
+	{ WCD934X_DATA_HUB_SB_TX3_INP_CFG,                  0x00 },
+	{ WCD934X_DATA_HUB_SB_TX4_INP_CFG,                  0x00 },
+	{ WCD934X_DATA_HUB_SB_TX5_INP_CFG,                  0x00 },
+	{ WCD934X_DATA_HUB_SB_TX6_INP_CFG,                  0x00 },
+	{ WCD934X_DATA_HUB_SB_TX7_INP_CFG,                  0x00 },
+	{ WCD934X_DATA_HUB_SB_TX8_INP_CFG,                  0x00 },
+	{ WCD934X_DATA_HUB_SB_TX9_INP_CFG,                  0x00 },
+	{ WCD934X_DATA_HUB_SB_TX10_INP_CFG,                 0x00 },
+	{ WCD934X_DATA_HUB_SB_TX11_INP_CFG,                 0x00 },
+	{ WCD934X_DATA_HUB_SB_TX13_INP_CFG,                 0x00 },
+	{ WCD934X_DATA_HUB_SB_TX14_INP_CFG,                 0x00 },
+	{ WCD934X_DATA_HUB_SB_TX15_INP_CFG,                 0x00 },
+	{ WCD934X_DATA_HUB_I2S_TX0_CFG,                     0x00 },
+	{ WCD934X_DATA_HUB_I2S_TX1_0_CFG,                   0x00 },
+	{ WCD934X_DATA_HUB_I2S_TX1_1_CFG,                   0x00 },
+	{ WCD934X_DATA_HUB_I2S_0_CTL,                       0x0c },
+	{ WCD934X_DATA_HUB_I2S_1_CTL,                       0x0c },
+	{ WCD934X_DATA_HUB_I2S_2_CTL,                       0x0c },
+	{ WCD934X_DATA_HUB_I2S_3_CTL,                       0x0c },
+	{ WCD934X_DATA_HUB_I2S_CLKSRC_CTL,                  0x00 },
+	{ WCD934X_DATA_HUB_I2S_COMMON_CTL,                  0x00 },
+	{ WCD934X_DATA_HUB_I2S_0_TDM_CTL,                   0x00 },
+	{ WCD934X_DATA_HUB_I2S_STATUS,                      0x00 },
+	{ WCD934X_DMA_RDMA_CTL_0,                           0x00 },
+	{ WCD934X_DMA_CH_2_3_CFG_RDMA_0,                    0xff },
+	{ WCD934X_DMA_CH_0_1_CFG_RDMA_0,                    0xff },
+	{ WCD934X_DMA_RDMA_CTL_1,                           0x00 },
+	{ WCD934X_DMA_CH_2_3_CFG_RDMA_1,                    0xff },
+	{ WCD934X_DMA_CH_0_1_CFG_RDMA_1,                    0xff },
+	{ WCD934X_DMA_RDMA_CTL_2,                           0x00 },
+	{ WCD934X_DMA_CH_2_3_CFG_RDMA_2,                    0xff },
+	{ WCD934X_DMA_CH_0_1_CFG_RDMA_2,                    0xff },
+	{ WCD934X_DMA_RDMA_CTL_3,                           0x00 },
+	{ WCD934X_DMA_CH_2_3_CFG_RDMA_3,                    0xff },
+	{ WCD934X_DMA_CH_0_1_CFG_RDMA_3,                    0xff },
+	{ WCD934X_DMA_RDMA_CTL_4,                           0x00 },
+	{ WCD934X_DMA_CH_2_3_CFG_RDMA_4,                    0xff },
+	{ WCD934X_DMA_CH_0_1_CFG_RDMA_4,                    0xff },
+	{ WCD934X_DMA_RDMA4_PRT_CFG,                       0x00 },
+	{ WCD934X_DMA_RDMA_SBTX0_7_CFG,                    0x00 },
+	{ WCD934X_DMA_RDMA_SBTX8_11_CFG,                   0x00 },
+	{ WCD934X_DMA_WDMA_CTL_0,                          0x00 },
+	{ WCD934X_DMA_CH_4_5_CFG_WDMA_0,                   0x00 },
+	{ WCD934X_DMA_CH_2_3_CFG_WDMA_0,                   0x00 },
+	{ WCD934X_DMA_CH_0_1_CFG_WDMA_0,                   0x00 },
+	{ WCD934X_DMA_WDMA_CTL_1,                          0x00 },
+	{ WCD934X_DMA_CH_4_5_CFG_WDMA_1,                   0x00 },
+	{ WCD934X_DMA_CH_2_3_CFG_WDMA_1,                   0x00 },
+	{ WCD934X_DMA_CH_0_1_CFG_WDMA_1,                   0x00 },
+	{ WCD934X_DMA_WDMA_CTL_2,                          0x00 },
+	{ WCD934X_DMA_CH_4_5_CFG_WDMA_2,                   0x00 },
+	{ WCD934X_DMA_CH_2_3_CFG_WDMA_2,                   0x00 },
+	{ WCD934X_DMA_CH_0_1_CFG_WDMA_2,                   0x00 },
+	{ WCD934X_DMA_WDMA_CTL_3,                          0x00 },
+	{ WCD934X_DMA_CH_4_5_CFG_WDMA_3,                   0x00 },
+	{ WCD934X_DMA_CH_2_3_CFG_WDMA_3,                   0x00 },
+	{ WCD934X_DMA_CH_0_1_CFG_WDMA_3,                   0x00 },
+	{ WCD934X_DMA_WDMA_CTL_4,                          0x00 },
+	{ WCD934X_DMA_CH_4_5_CFG_WDMA_4,                   0x00 },
+	{ WCD934X_DMA_CH_2_3_CFG_WDMA_4,                   0x00 },
+	{ WCD934X_DMA_CH_0_1_CFG_WDMA_4,                   0x00 },
+	{ WCD934X_DMA_WDMA0_PRT_CFG,                       0x00 },
+	{ WCD934X_DMA_WDMA3_PRT_CFG,                       0x00 },
+	{ WCD934X_DMA_WDMA4_PRT0_3_CFG,                    0x00 },
+	{ WCD934X_DMA_WDMA4_PRT4_7_CFG,                    0x00 },
+	{ WCD934X_PAGE1_PAGE_REGISTER,                     0x00 },
+	{ WCD934X_CPE_FLL_USER_CTL_0,                      0x71 },
+	{ WCD934X_CPE_FLL_USER_CTL_1,                      0x34 },
+	{ WCD934X_CPE_FLL_USER_CTL_2,                      0x0b },
+	{ WCD934X_CPE_FLL_USER_CTL_3,                      0x02 },
+	{ WCD934X_CPE_FLL_USER_CTL_4,                      0x04 },
+	{ WCD934X_CPE_FLL_USER_CTL_5,                      0x02 },
+	{ WCD934X_CPE_FLL_USER_CTL_6,                      0x6e },
+	{ WCD934X_CPE_FLL_USER_CTL_7,                      0x00 },
+	{ WCD934X_CPE_FLL_USER_CTL_8,                      0x94 },
+	{ WCD934X_CPE_FLL_USER_CTL_9,                      0x50 },
+	{ WCD934X_CPE_FLL_L_VAL_CTL_0,                     0x53 },
+	{ WCD934X_CPE_FLL_L_VAL_CTL_1,                     0x00 },
+	{ WCD934X_CPE_FLL_DSM_FRAC_CTL_0,                  0x00 },
+	{ WCD934X_CPE_FLL_DSM_FRAC_CTL_1,                  0xff },
+	{ WCD934X_CPE_FLL_CONFIG_CTL_0,                    0x6b },
+	{ WCD934X_CPE_FLL_CONFIG_CTL_1,                    0x05 },
+	{ WCD934X_CPE_FLL_CONFIG_CTL_2,                    0x08 },
+	{ WCD934X_CPE_FLL_CONFIG_CTL_3,                    0x00 },
+	{ WCD934X_CPE_FLL_CONFIG_CTL_4,                    0x10 },
+	{ WCD934X_CPE_FLL_TEST_CTL_0,                      0x80 },
+	{ WCD934X_CPE_FLL_TEST_CTL_1,                      0x00 },
+	{ WCD934X_CPE_FLL_TEST_CTL_2,                      0x00 },
+	{ WCD934X_CPE_FLL_TEST_CTL_3,                      0x00 },
+	{ WCD934X_CPE_FLL_TEST_CTL_4,                      0x00 },
+	{ WCD934X_CPE_FLL_TEST_CTL_5,                      0x00 },
+	{ WCD934X_CPE_FLL_TEST_CTL_6,                      0x00 },
+	{ WCD934X_CPE_FLL_TEST_CTL_7,                      0x33 },
+	{ WCD934X_CPE_FLL_FREQ_CTL_0,                      0x00 },
+	{ WCD934X_CPE_FLL_FREQ_CTL_1,                      0x00 },
+	{ WCD934X_CPE_FLL_FREQ_CTL_2,                      0x00 },
+	{ WCD934X_CPE_FLL_FREQ_CTL_3,                      0x00 },
+	{ WCD934X_CPE_FLL_SSC_CTL_0,                       0x00 },
+	{ WCD934X_CPE_FLL_SSC_CTL_1,                       0x00 },
+	{ WCD934X_CPE_FLL_SSC_CTL_2,                       0x00 },
+	{ WCD934X_CPE_FLL_SSC_CTL_3,                       0x00 },
+	{ WCD934X_CPE_FLL_FLL_MODE,                        0x20 },
+	{ WCD934X_CPE_FLL_STATUS_0,                        0x00 },
+	{ WCD934X_CPE_FLL_STATUS_1,                        0x00 },
+	{ WCD934X_CPE_FLL_STATUS_2,                        0x00 },
+	{ WCD934X_CPE_FLL_STATUS_3,                        0x00 },
+	{ WCD934X_I2S_FLL_USER_CTL_0,                      0x41 },
+	{ WCD934X_I2S_FLL_USER_CTL_1,                      0x94 },
+	{ WCD934X_I2S_FLL_USER_CTL_2,                      0x08 },
+	{ WCD934X_I2S_FLL_USER_CTL_3,                      0x02 },
+	{ WCD934X_I2S_FLL_USER_CTL_4,                      0x04 },
+	{ WCD934X_I2S_FLL_USER_CTL_5,                      0x02 },
+	{ WCD934X_I2S_FLL_USER_CTL_6,                      0x40 },
+	{ WCD934X_I2S_FLL_USER_CTL_7,                      0x00 },
+	{ WCD934X_I2S_FLL_USER_CTL_8,                      0x5f },
+	{ WCD934X_I2S_FLL_USER_CTL_9,                      0x02 },
+	{ WCD934X_I2S_FLL_L_VAL_CTL_0,                     0x40 },
+	{ WCD934X_I2S_FLL_L_VAL_CTL_1,                     0x00 },
+	{ WCD934X_I2S_FLL_DSM_FRAC_CTL_0,                  0x00 },
+	{ WCD934X_I2S_FLL_DSM_FRAC_CTL_1,                  0xff },
+	{ WCD934X_I2S_FLL_CONFIG_CTL_0,                    0x6b },
+	{ WCD934X_I2S_FLL_CONFIG_CTL_1,                    0x05 },
+	{ WCD934X_I2S_FLL_CONFIG_CTL_2,                    0x08 },
+	{ WCD934X_I2S_FLL_CONFIG_CTL_3,                    0x00 },
+	{ WCD934X_I2S_FLL_CONFIG_CTL_4,                    0x30 },
+	{ WCD934X_I2S_FLL_TEST_CTL_0,                      0x80 },
+	{ WCD934X_I2S_FLL_TEST_CTL_1,                      0x00 },
+	{ WCD934X_I2S_FLL_TEST_CTL_2,                      0x00 },
+	{ WCD934X_I2S_FLL_TEST_CTL_3,                      0x00 },
+	{ WCD934X_I2S_FLL_TEST_CTL_4,                      0x00 },
+	{ WCD934X_I2S_FLL_TEST_CTL_5,                      0x00 },
+	{ WCD934X_I2S_FLL_TEST_CTL_6,                      0x00 },
+	{ WCD934X_I2S_FLL_TEST_CTL_7,                      0xff },
+	{ WCD934X_I2S_FLL_FREQ_CTL_0,                      0x00 },
+	{ WCD934X_I2S_FLL_FREQ_CTL_1,                      0x00 },
+	{ WCD934X_I2S_FLL_FREQ_CTL_2,                      0x00 },
+	{ WCD934X_I2S_FLL_FREQ_CTL_3,                      0x00 },
+	{ WCD934X_I2S_FLL_SSC_CTL_0,                       0x00 },
+	{ WCD934X_I2S_FLL_SSC_CTL_1,                       0x00 },
+	{ WCD934X_I2S_FLL_SSC_CTL_2,                       0x00 },
+	{ WCD934X_I2S_FLL_SSC_CTL_3,                       0x00 },
+	{ WCD934X_I2S_FLL_FLL_MODE,                        0x00 },
+	{ WCD934X_I2S_FLL_STATUS_0,                        0x00 },
+	{ WCD934X_I2S_FLL_STATUS_1,                        0x00 },
+	{ WCD934X_I2S_FLL_STATUS_2,                        0x00 },
+	{ WCD934X_I2S_FLL_STATUS_3,                        0x00 },
+	{ WCD934X_SB_FLL_USER_CTL_0,                       0x41 },
+	{ WCD934X_SB_FLL_USER_CTL_1,                       0x94 },
+	{ WCD934X_SB_FLL_USER_CTL_2,                       0x08 },
+	{ WCD934X_SB_FLL_USER_CTL_3,                       0x02 },
+	{ WCD934X_SB_FLL_USER_CTL_4,                       0x04 },
+	{ WCD934X_SB_FLL_USER_CTL_5,                       0x02 },
+	{ WCD934X_SB_FLL_USER_CTL_6,                       0x40 },
+	{ WCD934X_SB_FLL_USER_CTL_7,                       0x00 },
+	{ WCD934X_SB_FLL_USER_CTL_8,                       0x5e },
+	{ WCD934X_SB_FLL_USER_CTL_9,                       0x01 },
+	{ WCD934X_SB_FLL_L_VAL_CTL_0,                      0x40 },
+	{ WCD934X_SB_FLL_L_VAL_CTL_1,                      0x00 },
+	{ WCD934X_SB_FLL_DSM_FRAC_CTL_0,                   0x00 },
+	{ WCD934X_SB_FLL_DSM_FRAC_CTL_1,                   0xff },
+	{ WCD934X_SB_FLL_CONFIG_CTL_0,                     0x6b },
+	{ WCD934X_SB_FLL_CONFIG_CTL_1,                     0x05 },
+	{ WCD934X_SB_FLL_CONFIG_CTL_2,                     0x08 },
+	{ WCD934X_SB_FLL_CONFIG_CTL_3,                     0x00 },
+	{ WCD934X_SB_FLL_CONFIG_CTL_4,                     0x10 },
+	{ WCD934X_SB_FLL_TEST_CTL_0,                       0x00 },
+	{ WCD934X_SB_FLL_TEST_CTL_1,                       0x00 },
+	{ WCD934X_SB_FLL_TEST_CTL_2,                       0x00 },
+	{ WCD934X_SB_FLL_TEST_CTL_3,                       0x00 },
+	{ WCD934X_SB_FLL_TEST_CTL_4,                       0x00 },
+	{ WCD934X_SB_FLL_TEST_CTL_5,                       0x00 },
+	{ WCD934X_SB_FLL_TEST_CTL_6,                       0x00 },
+	{ WCD934X_SB_FLL_TEST_CTL_7,                       0xff },
+	{ WCD934X_SB_FLL_FREQ_CTL_0,                       0x00 },
+	{ WCD934X_SB_FLL_FREQ_CTL_1,                       0x00 },
+	{ WCD934X_SB_FLL_FREQ_CTL_2,                       0x00 },
+	{ WCD934X_SB_FLL_FREQ_CTL_3,                       0x00 },
+	{ WCD934X_SB_FLL_SSC_CTL_0,                        0x00 },
+	{ WCD934X_SB_FLL_SSC_CTL_1,                        0x00 },
+	{ WCD934X_SB_FLL_SSC_CTL_2,                        0x00 },
+	{ WCD934X_SB_FLL_SSC_CTL_3,                        0x00 },
+	{ WCD934X_SB_FLL_FLL_MODE,                         0x00 },
+	{ WCD934X_SB_FLL_STATUS_0,                         0x00 },
+	{ WCD934X_SB_FLL_STATUS_1,                         0x00 },
+	{ WCD934X_SB_FLL_STATUS_2,                         0x00 },
+	{ WCD934X_SB_FLL_STATUS_3,                         0x00 },
+	{ WCD934X_PAGE2_PAGE_REGISTER,                     0x00 },
+	{ WCD934X_CPE_SS_CPE_CTL,                          0x05 },
+	{ WCD934X_CPE_SS_PWR_SYS_PSTATE_CTL_0,             0x01 },
+	{ WCD934X_CPE_SS_PWR_SYS_PSTATE_CTL_1,             0x00 },
+	{ WCD934X_CPE_SS_PWR_CPEFLL_CTL,                   0x02 },
+	{ WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_0,         0xff },
+	{ WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_1,         0x0f },
+	{ WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_OVERRIDE,  0x00 },
+	{ WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_0,        0xff },
+	{ WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_1,        0xff },
+	{ WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_2,        0xff },
+	{ WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_3,        0xff },
+	{ WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_4,        0xff },
+	{ WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_5,        0xff },
+	{ WCD934X_CPE_SS_PWR_CPE_DRAM1_SHUTDOWN,           0x07 },
+	{ WCD934X_CPE_SS_SOC_SW_COLLAPSE_CTL,              0x00 },
+	{ WCD934X_CPE_SS_SOC_SW_COLLAPSE_OVERRIDE_CTL,     0x20 },
+	{ WCD934X_CPE_SS_SOC_SW_COLLAPSE_OVERRIDE_CTL1,    0x00 },
+	{ WCD934X_CPE_SS_US_BUF_INT_PERIOD,                0x60 },
+	{ WCD934X_CPE_SS_CPARMAD_BUFRDY_INT_PERIOD,        0x13 },
+	{ WCD934X_CPE_SS_SVA_CFG,                          0x41 },
+	{ WCD934X_CPE_SS_US_CFG,                           0x00 },
+	{ WCD934X_CPE_SS_MAD_CTL,                          0x00 },
+	{ WCD934X_CPE_SS_CPAR_CTL,                         0x00 },
+	{ WCD934X_CPE_SS_DMIC0_CTL,                        0x00 },
+	{ WCD934X_CPE_SS_DMIC1_CTL,                        0x00 },
+	{ WCD934X_CPE_SS_DMIC2_CTL,                        0x00 },
+	{ WCD934X_CPE_SS_DMIC_CFG,                         0x80 },
+	{ WCD934X_CPE_SS_CPAR_CFG,                         0x00 },
+	{ WCD934X_CPE_SS_WDOG_CFG,                         0x01 },
+	{ WCD934X_CPE_SS_BACKUP_INT,                       0x00 },
+	{ WCD934X_CPE_SS_STATUS,                           0x00 },
+	{ WCD934X_CPE_SS_CPE_OCD_CFG,                      0x00 },
+	{ WCD934X_CPE_SS_SS_ERROR_INT_MASK_0A,             0xff },
+	{ WCD934X_CPE_SS_SS_ERROR_INT_MASK_0B,             0x3f },
+	{ WCD934X_CPE_SS_SS_ERROR_INT_MASK_1A,             0xff },
+	{ WCD934X_CPE_SS_SS_ERROR_INT_MASK_1B,             0x3f },
+	{ WCD934X_CPE_SS_SS_ERROR_INT_STATUS_0A,           0x00 },
+	{ WCD934X_CPE_SS_SS_ERROR_INT_STATUS_0B,           0x00 },
+	{ WCD934X_CPE_SS_SS_ERROR_INT_STATUS_1A,           0x00 },
+	{ WCD934X_CPE_SS_SS_ERROR_INT_STATUS_1B,           0x00 },
+	{ WCD934X_CPE_SS_SS_ERROR_INT_CLEAR_0A,            0x00 },
+	{ WCD934X_CPE_SS_SS_ERROR_INT_CLEAR_0B,            0x00 },
+	{ WCD934X_CPE_SS_SS_ERROR_INT_CLEAR_1A,            0x00 },
+	{ WCD934X_CPE_SS_SS_ERROR_INT_CLEAR_1B,            0x00 },
+	{ WCD934X_SOC_MAD_MAIN_CTL_1,                      0x00 },
+	{ WCD934X_SOC_MAD_MAIN_CTL_2,                      0x00 },
+	{ WCD934X_SOC_MAD_AUDIO_CTL_1,                     0x00 },
+	{ WCD934X_SOC_MAD_AUDIO_CTL_2,                     0x00 },
+	{ WCD934X_SOC_MAD_AUDIO_CTL_3,                     0x00 },
+	{ WCD934X_SOC_MAD_AUDIO_CTL_4,                     0x00 },
+	{ WCD934X_SOC_MAD_AUDIO_CTL_5,                     0x00 },
+	{ WCD934X_SOC_MAD_AUDIO_CTL_6,                     0x00 },
+	{ WCD934X_SOC_MAD_AUDIO_CTL_7,                     0x00 },
+	{ WCD934X_SOC_MAD_AUDIO_CTL_8,                     0x00 },
+	{ WCD934X_SOC_MAD_AUDIO_IIR_CTL_PTR,               0x00 },
+	{ WCD934X_SOC_MAD_AUDIO_IIR_CTL_VAL,               0x40 },
+	{ WCD934X_SOC_MAD_ULTR_CTL_1,                      0x00 },
+	{ WCD934X_SOC_MAD_ULTR_CTL_2,                      0x00 },
+	{ WCD934X_SOC_MAD_ULTR_CTL_3,                      0x00 },
+	{ WCD934X_SOC_MAD_ULTR_CTL_4,                      0x00 },
+	{ WCD934X_SOC_MAD_ULTR_CTL_5,                      0x00 },
+	{ WCD934X_SOC_MAD_ULTR_CTL_6,                      0x00 },
+	{ WCD934X_SOC_MAD_ULTR_CTL_7,                      0x00 },
+	{ WCD934X_SOC_MAD_BEACON_CTL_1,                    0x00 },
+	{ WCD934X_SOC_MAD_BEACON_CTL_2,                    0x00 },
+	{ WCD934X_SOC_MAD_BEACON_CTL_3,                    0x00 },
+	{ WCD934X_SOC_MAD_BEACON_CTL_4,                    0x00 },
+	{ WCD934X_SOC_MAD_BEACON_CTL_5,                    0x00 },
+	{ WCD934X_SOC_MAD_BEACON_CTL_6,                    0x00 },
+	{ WCD934X_SOC_MAD_BEACON_CTL_7,                    0x00 },
+	{ WCD934X_SOC_MAD_BEACON_CTL_8,                    0x00 },
+	{ WCD934X_SOC_MAD_BEACON_IIR_CTL_PTR,              0x00 },
+	{ WCD934X_SOC_MAD_BEACON_IIR_CTL_VAL,              0x00 },
+	{ WCD934X_SOC_MAD_INP_SEL,                         0x00 },
+	{ WCD934X_PAGE4_PAGE_REGISTER,                     0x00 },
+	{ WCD934X_INTR_CFG,                                0x00 },
+	{ WCD934X_INTR_CLR_COMMIT,                         0x00 },
+	{ WCD934X_INTR_PIN1_MASK0,                         0xff },
+	{ WCD934X_INTR_PIN1_MASK1,                         0xff },
+	{ WCD934X_INTR_PIN1_MASK2,                         0xff },
+	{ WCD934X_INTR_PIN1_MASK3,                         0xff },
+	{ WCD934X_INTR_PIN1_STATUS0,                       0x00 },
+	{ WCD934X_INTR_PIN1_STATUS1,                       0x00 },
+	{ WCD934X_INTR_PIN1_STATUS2,                       0x00 },
+	{ WCD934X_INTR_PIN1_STATUS3,                       0x00 },
+	{ WCD934X_INTR_PIN1_CLEAR0,                        0x00 },
+	{ WCD934X_INTR_PIN1_CLEAR1,                        0x00 },
+	{ WCD934X_INTR_PIN1_CLEAR2,                        0x00 },
+	{ WCD934X_INTR_PIN1_CLEAR3,                        0x00 },
+	{ WCD934X_INTR_PIN2_MASK3,                         0xff },
+	{ WCD934X_INTR_PIN2_STATUS3,                       0x00 },
+	{ WCD934X_INTR_PIN2_CLEAR3,                        0x00 },
+	{ WCD934X_INTR_CPESS_SUMRY_MASK2,                  0xff },
+	{ WCD934X_INTR_CPESS_SUMRY_MASK3,                  0xff },
+	{ WCD934X_INTR_CPESS_SUMRY_STATUS2,                0x00 },
+	{ WCD934X_INTR_CPESS_SUMRY_STATUS3,                0x00 },
+	{ WCD934X_INTR_CPESS_SUMRY_CLEAR2,                 0x00 },
+	{ WCD934X_INTR_CPESS_SUMRY_CLEAR3,                 0x00 },
+	{ WCD934X_INTR_LEVEL0,                             0x03 },
+	{ WCD934X_INTR_LEVEL1,                             0xe0 },
+	{ WCD934X_INTR_LEVEL2,                             0x94 },
+	{ WCD934X_INTR_LEVEL3,                             0x80 },
+	{ WCD934X_INTR_BYPASS0,                            0x00 },
+	{ WCD934X_INTR_BYPASS1,                            0x00 },
+	{ WCD934X_INTR_BYPASS2,                            0x00 },
+	{ WCD934X_INTR_BYPASS3,                            0x00 },
+	{ WCD934X_INTR_SET0,                               0x00 },
+	{ WCD934X_INTR_SET1,                               0x00 },
+	{ WCD934X_INTR_SET2,                               0x00 },
+	{ WCD934X_INTR_SET3,                               0x00 },
+	{ WCD934X_INTR_CODEC_MISC_MASK,                    0x7f },
+	{ WCD934X_INTR_CODEC_MISC_STATUS,                  0x00 },
+	{ WCD934X_INTR_CODEC_MISC_CLEAR,                   0x00 },
+	{ WCD934X_PAGE5_PAGE_REGISTER,                     0x00 },
+	{ WCD934X_SLNQ_DIG_DEVICE,                         0x49 },
+	{ WCD934X_SLNQ_DIG_REVISION,                       0x01 },
+	{ WCD934X_SLNQ_DIG_H_COMMAND,                      0x00 },
+	{ WCD934X_SLNQ_DIG_NUMBER_OF_BYTE_MSB,             0x00 },
+	{ WCD934X_SLNQ_DIG_NUMBER_OF_BYTE_LSB,             0x00 },
+	{ WCD934X_SLNQ_DIG_MASTER_ADDRESS_MSB,             0x00 },
+	{ WCD934X_SLNQ_DIG_MASTER_ADDRESS_LSB,             0x00 },
+	{ WCD934X_SLNQ_DIG_SLAVE_ADDRESS_MSB,              0x00 },
+	{ WCD934X_SLNQ_DIG_SLAVE_ADDRESS_LSB,              0x00 },
+	{ WCD934X_SLNQ_DIG_TIMER0_INTERRUPT_MSB,           0x40 },
+	{ WCD934X_SLNQ_DIG_TIMER0_INTERRUPT_LSB,           0x00 },
+	{ WCD934X_SLNQ_DIG_TIMER1_INTERRUPT_MSB,           0x40 },
+	{ WCD934X_SLNQ_DIG_TIMER1_INTERRUPT_LSB,           0x00 },
+	{ WCD934X_SLNQ_DIG_TIMER2_INTERRUPT_MSB,           0x40 },
+	{ WCD934X_SLNQ_DIG_TIMER2_INTERRUPT_LSB,           0x00 },
+	{ WCD934X_SLNQ_DIG_COMM_CTL,                       0x00 },
+	{ WCD934X_SLNQ_DIG_FRAME_CTRL,                     0x01 },
+	{ WCD934X_SLNQ_DIG_PDM_2ND_DATA_CH1_2,             0x77 },
+	{ WCD934X_SLNQ_DIG_PDM_2ND_DATA_CH3_4,             0x77 },
+	{ WCD934X_SLNQ_DIG_PDM_2ND_DATA_CH5,               0x70 },
+	{ WCD934X_SLNQ_DIG_SW_EVENT_RD,                    0x00 },
+	{ WCD934X_SLNQ_DIG_SW_EVENT_CTRL,                  0x00 },
+	{ WCD934X_SLNQ_DIG_PDM_SELECT_1,                   0x12 },
+	{ WCD934X_SLNQ_DIG_PDM_SELECT_2,                   0x34 },
+	{ WCD934X_SLNQ_DIG_PDM_SELECT_3,                   0x55 },
+	{ WCD934X_SLNQ_DIG_PDM_SAMPLING_FREQ,              0x01 },
+	{ WCD934X_SLNQ_DIG_PDM_DC_CONVERSION_CTL,          0x00 },
+	{ WCD934X_SLNQ_DIG_PDM_DC_CONVERSION_SEL,          0x11 },
+	{ WCD934X_SLNQ_DIG_PDM_DC_CONV_CHA_MSB,            0x00 },
+	{ WCD934X_SLNQ_DIG_PDM_DC_CONV_CHA_LSB,            0x00 },
+	{ WCD934X_SLNQ_DIG_PDM_DC_CONV_CHB_MSB,            0x00 },
+	{ WCD934X_SLNQ_DIG_PDM_DC_CONV_CHB_LSB,            0x00 },
+	{ WCD934X_SLNQ_DIG_RAM_CNTRL,                      0x01 },
+	{ WCD934X_SLNQ_DIG_SRAM_BANK,                      0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_0,                    0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_1,                    0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_2,                    0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_3,                    0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_4,                    0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_5,                    0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_6,                    0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_7,                    0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_8,                    0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_9,                    0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_A,                    0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_B,                    0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_C,                    0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_D,                    0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_E,                    0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_F,                    0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_10,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_11,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_12,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_13,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_14,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_15,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_16,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_17,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_18,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_19,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_1A,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_1B,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_1C,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_1D,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_1E,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_1F,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_20,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_21,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_22,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_23,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_24,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_25,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_26,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_27,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_28,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_29,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_2A,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_2B,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_2C,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_2D,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_2E,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_2F,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_30,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_31,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_32,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_33,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_34,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_35,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_36,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_37,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_38,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_39,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_3A,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_3B,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_3C,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_3D,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_3E,                   0x00 },
+	{ WCD934X_SLNQ_DIG_SRAM_BYTE_3F,                   0x00 },
+	{ WCD934X_SLNQ_DIG_TOP_CTRL1,                      0x00 },
+	{ WCD934X_SLNQ_DIG_TOP_CTRL2,                      0x00 },
+	{ WCD934X_SLNQ_DIG_PDM_CTRL,                       0x00 },
+	{ WCD934X_SLNQ_DIG_PDM_MUTE_CTRL,                  0x20 },
+	{ WCD934X_SLNQ_DIG_DEC_BYPASS_CTRL,                0x00 },
+	{ WCD934X_SLNQ_DIG_DEC_BYPASS_STATUS,              0x00 },
+	{ WCD934X_SLNQ_DIG_DEC_BYPASS_FS,                  0x00 },
+	{ WCD934X_SLNQ_DIG_DEC_BYPASS_IN_SEL,              0x00 },
+	{ WCD934X_SLNQ_DIG_GPOUT_ENABLE,                   0x00 },
+	{ WCD934X_SLNQ_DIG_GPOUT_VAL,                      0x00 },
+	{ WCD934X_SLNQ_DIG_ANA_INTERRUPT_MASK,             0x00 },
+	{ WCD934X_SLNQ_DIG_ANA_INTERRUPT_STATUS,           0x00 },
+	{ WCD934X_SLNQ_DIG_ANA_INTERRUPT_CLR,              0x00 },
+	{ WCD934X_SLNQ_DIG_IP_TESTING,                     0x00 },
+	{ WCD934X_SLNQ_DIG_INTERRUPT_CNTRL,                0x0f },
+	{ WCD934X_SLNQ_DIG_INTERRUPT_CNT,                  0x00 },
+	{ WCD934X_SLNQ_DIG_INTERRUPT_CNT_MSB,              0xff },
+	{ WCD934X_SLNQ_DIG_INTERRUPT_CNT_LSB,              0xff },
+	{ WCD934X_SLNQ_DIG_INTERRUPT_MASK0,                0xff },
+	{ WCD934X_SLNQ_DIG_INTERRUPT_MASK1,                0xff },
+	{ WCD934X_SLNQ_DIG_INTERRUPT_MASK2,                0xff },
+	{ WCD934X_SLNQ_DIG_INTERRUPT_MASK3,                0xff },
+	{ WCD934X_SLNQ_DIG_INTERRUPT_MASK4,                0x1f },
+	{ WCD934X_SLNQ_DIG_INTERRUPT_STATUS0,              0x00 },
+	{ WCD934X_SLNQ_DIG_INTERRUPT_STATUS1,              0x00 },
+	{ WCD934X_SLNQ_DIG_INTERRUPT_STATUS2,              0x00 },
+	{ WCD934X_SLNQ_DIG_INTERRUPT_STATUS3,              0x00 },
+	{ WCD934X_SLNQ_DIG_INTERRUPT_STATUS4,              0x00 },
+	{ WCD934X_SLNQ_DIG_INTERRUPT_CLR0,                 0x00 },
+	{ WCD934X_SLNQ_DIG_INTERRUPT_CLR1,                 0x00 },
+	{ WCD934X_SLNQ_DIG_INTERRUPT_CLR2,                 0x00 },
+	{ WCD934X_SLNQ_DIG_INTERRUPT_CLR3,                 0x00 },
+	{ WCD934X_SLNQ_DIG_INTERRUPT_CLR4,                 0x00 },
+	{ WCD934X_ANA_PAGE_REGISTER,                       0x00 },
+	{ WCD934X_ANA_BIAS,                                0x00 },
+	{ WCD934X_ANA_RCO,                                 0x00 },
+	{ WCD934X_ANA_PAGE6_SPARE2,                        0x00 },
+	{ WCD934X_ANA_PAGE6_SPARE3,                        0x00 },
+	{ WCD934X_ANA_BUCK_CTL,                            0x00 },
+	{ WCD934X_ANA_BUCK_STATUS,                         0x00 },
+	{ WCD934X_ANA_RX_SUPPLIES,                         0x00 },
+	{ WCD934X_ANA_HPH,                                 0x0c },
+	{ WCD934X_ANA_EAR,                                 0x00 },
+	{ WCD934X_ANA_LO_1_2,                              0x3c },
+	{ WCD934X_ANA_MAD_SETUP,                           0x01 },
+	{ WCD934X_ANA_AMIC1,                               0x20 },
+	{ WCD934X_ANA_AMIC2,                               0x00 },
+	{ WCD934X_ANA_AMIC3,                               0x20 },
+	{ WCD934X_ANA_AMIC4,                               0x00 },
+	{ WCD934X_ANA_MBHC_MECH,                           0x39 },
+	{ WCD934X_ANA_MBHC_ELECT,                          0x08 },
+	{ WCD934X_ANA_MBHC_ZDET,                           0x00 },
+	{ WCD934X_ANA_MBHC_RESULT_1,                       0x00 },
+	{ WCD934X_ANA_MBHC_RESULT_2,                       0x00 },
+	{ WCD934X_ANA_MBHC_RESULT_3,                       0x00 },
+	{ WCD934X_ANA_MBHC_BTN0,                           0x00 },
+	{ WCD934X_ANA_MBHC_BTN1,                           0x10 },
+	{ WCD934X_ANA_MBHC_BTN2,                           0x20 },
+	{ WCD934X_ANA_MBHC_BTN3,                           0x30 },
+	{ WCD934X_ANA_MBHC_BTN4,                           0x40 },
+	{ WCD934X_ANA_MBHC_BTN5,                           0x50 },
+	{ WCD934X_ANA_MBHC_BTN6,                           0x60 },
+	{ WCD934X_ANA_MBHC_BTN7,                           0x70 },
+	{ WCD934X_ANA_MICB1,                               0x10 },
+	{ WCD934X_ANA_MICB2,                               0x10 },
+	{ WCD934X_ANA_MICB2_RAMP,                          0x00 },
+	{ WCD934X_ANA_MICB3,                               0x10 },
+	{ WCD934X_ANA_MICB4,                               0x10 },
+	{ WCD934X_ANA_VBADC,                               0x00 },
+	{ WCD934X_BIAS_CTL,                                0x28 },
+	{ WCD934X_BIAS_VBG_FINE_ADJ,                       0x65 },
+	{ WCD934X_RCO_CTRL_1,                              0x44 },
+	{ WCD934X_RCO_CTRL_2,                              0x48 },
+	{ WCD934X_RCO_CAL,                                 0x00 },
+	{ WCD934X_RCO_CAL_1,                               0x00 },
+	{ WCD934X_RCO_CAL_2,                               0x00 },
+	{ WCD934X_RCO_TEST_CTRL,                           0x00 },
+	{ WCD934X_RCO_CAL_OUT_1,                           0x00 },
+	{ WCD934X_RCO_CAL_OUT_2,                           0x00 },
+	{ WCD934X_RCO_CAL_OUT_3,                           0x00 },
+	{ WCD934X_RCO_CAL_OUT_4,                           0x00 },
+	{ WCD934X_RCO_CAL_OUT_5,                           0x00 },
+	{ WCD934X_SIDO_MODE_1,                             0x84 },
+	{ WCD934X_SIDO_MODE_2,                             0xfe },
+	{ WCD934X_SIDO_MODE_3,                             0xf6 },
+	{ WCD934X_SIDO_MODE_4,                             0x56 },
+	{ WCD934X_SIDO_VCL_1,                              0x00 },
+	{ WCD934X_SIDO_VCL_2,                              0x6c },
+	{ WCD934X_SIDO_VCL_3,                              0x44 },
+	{ WCD934X_SIDO_CCL_1,                              0x57 },
+	{ WCD934X_SIDO_CCL_2,                              0x92 },
+	{ WCD934X_SIDO_CCL_3,                              0x35 },
+	{ WCD934X_SIDO_CCL_4,                              0x61 },
+	{ WCD934X_SIDO_CCL_5,                              0x6d },
+	{ WCD934X_SIDO_CCL_6,                              0x60 },
+	{ WCD934X_SIDO_CCL_7,                              0x6f },
+	{ WCD934X_SIDO_CCL_8,                              0x6f },
+	{ WCD934X_SIDO_CCL_9,                              0x6e },
+	{ WCD934X_SIDO_CCL_10,                             0x26 },
+	{ WCD934X_SIDO_FILTER_1,                           0x92 },
+	{ WCD934X_SIDO_FILTER_2,                           0x54 },
+	{ WCD934X_SIDO_DRIVER_1,                           0x77 },
+	{ WCD934X_SIDO_DRIVER_2,                           0x55 },
+	{ WCD934X_SIDO_DRIVER_3,                           0x55 },
+	{ WCD934X_SIDO_CAL_CODE_EXT_1,                     0x9c },
+	{ WCD934X_SIDO_CAL_CODE_EXT_2,                     0x82 },
+	{ WCD934X_SIDO_CAL_CODE_OUT_1,                     0x00 },
+	{ WCD934X_SIDO_CAL_CODE_OUT_2,                     0x00 },
+	{ WCD934X_SIDO_TEST_1,                             0x00 },
+	{ WCD934X_SIDO_TEST_2,                             0x00 },
+	{ WCD934X_MBHC_CTL_CLK,                            0x30 },
+	{ WCD934X_MBHC_CTL_ANA,                            0x00 },
+	{ WCD934X_MBHC_CTL_SPARE_1,                        0x00 },
+	{ WCD934X_MBHC_CTL_SPARE_2,                        0x00 },
+	{ WCD934X_MBHC_CTL_BCS,                            0x00 },
+	{ WCD934X_MBHC_STATUS_SPARE_1,                     0x00 },
+	{ WCD934X_MBHC_TEST_CTL,                           0x00 },
+	{ WCD934X_VBADC_SUBBLOCK_EN,                       0xde },
+	{ WCD934X_VBADC_IBIAS_FE,                          0x58 },
+	{ WCD934X_VBADC_BIAS_ADC,                          0x51 },
+	{ WCD934X_VBADC_FE_CTRL,                           0x1c },
+	{ WCD934X_VBADC_ADC_REF,                           0x20 },
+	{ WCD934X_VBADC_ADC_IO,                            0x80 },
+	{ WCD934X_VBADC_ADC_SAR,                           0xff },
+	{ WCD934X_VBADC_DEBUG,                             0x00 },
+	{ WCD934X_LDOH_MODE,                               0x2b },
+	{ WCD934X_LDOH_BIAS,                               0x68 },
+	{ WCD934X_LDOH_STB_LOADS,                          0x00 },
+	{ WCD934X_LDOH_SLOWRAMP,                           0x50 },
+	{ WCD934X_MICB1_TEST_CTL_1,                        0x1a },
+	{ WCD934X_MICB1_TEST_CTL_2,                        0x18 },
+	{ WCD934X_MICB1_TEST_CTL_3,                        0xa4 },
+	{ WCD934X_MICB2_TEST_CTL_1,                        0x1a },
+	{ WCD934X_MICB2_TEST_CTL_2,                        0x18 },
+	{ WCD934X_MICB2_TEST_CTL_3,                        0xa4 },
+	{ WCD934X_MICB3_TEST_CTL_1,                        0x1a },
+	{ WCD934X_MICB3_TEST_CTL_2,                        0x18 },
+	{ WCD934X_MICB3_TEST_CTL_3,                        0xa4 },
+	{ WCD934X_MICB4_TEST_CTL_1,                        0x1a },
+	{ WCD934X_MICB4_TEST_CTL_2,                        0x18 },
+	{ WCD934X_MICB4_TEST_CTL_3,                        0xa4 },
+	{ WCD934X_TX_COM_ADC_VCM,                          0x39 },
+	{ WCD934X_TX_COM_BIAS_ATEST,                       0xc0 },
+	{ WCD934X_TX_COM_ADC_INT1_IB,                      0x6f },
+	{ WCD934X_TX_COM_ADC_INT2_IB,                      0x4f },
+	{ WCD934X_TX_COM_TXFE_DIV_CTL,                     0x2e },
+	{ WCD934X_TX_COM_TXFE_DIV_START,                   0x00 },
+	{ WCD934X_TX_COM_TXFE_DIV_STOP_9P6M,               0xc7 },
+	{ WCD934X_TX_COM_TXFE_DIV_STOP_12P288M,            0xff },
+	{ WCD934X_TX_1_2_TEST_EN,                          0xcc },
+	{ WCD934X_TX_1_2_ADC_IB,                           0x09 },
+	{ WCD934X_TX_1_2_ATEST_REFCTL,                     0x0a },
+	{ WCD934X_TX_1_2_TEST_CTL,                         0x38 },
+	{ WCD934X_TX_1_2_TEST_BLK_EN,                      0xff },
+	{ WCD934X_TX_1_2_TXFE_CLKDIV,                      0x00 },
+	{ WCD934X_TX_1_2_SAR1_ERR,                         0x00 },
+	{ WCD934X_TX_1_2_SAR2_ERR,                         0x00 },
+	{ WCD934X_TX_3_4_TEST_EN,                          0xcc },
+	{ WCD934X_TX_3_4_ADC_IB,                           0x09 },
+	{ WCD934X_TX_3_4_ATEST_REFCTL,                     0x0a },
+	{ WCD934X_TX_3_4_TEST_CTL,                         0x38 },
+	{ WCD934X_TX_3_4_TEST_BLK_EN,                      0xff },
+	{ WCD934X_TX_3_4_TXFE_CLKDIV,                      0x00 },
+	{ WCD934X_TX_3_4_SAR1_ERR,                         0x00 },
+	{ WCD934X_TX_3_4_SAR2_ERR,                         0x00 },
+	{ WCD934X_CLASSH_MODE_1,                           0x40 },
+	{ WCD934X_CLASSH_MODE_2,                           0x3a },
+	{ WCD934X_CLASSH_MODE_3,                           0x00 },
+	{ WCD934X_CLASSH_CTRL_VCL_1,                       0x70 },
+	{ WCD934X_CLASSH_CTRL_VCL_2,                       0x82 },
+	{ WCD934X_CLASSH_CTRL_CCL_1,                       0x31 },
+	{ WCD934X_CLASSH_CTRL_CCL_2,                       0x80 },
+	{ WCD934X_CLASSH_CTRL_CCL_3,                       0x80 },
+	{ WCD934X_CLASSH_CTRL_CCL_4,                       0x51 },
+	{ WCD934X_CLASSH_CTRL_CCL_5,                       0x00 },
+	{ WCD934X_CLASSH_BUCK_TMUX_A_D,                    0x00 },
+	{ WCD934X_CLASSH_BUCK_SW_DRV_CNTL,                 0x77 },
+	{ WCD934X_CLASSH_SPARE,                            0x00 },
+	{ WCD934X_FLYBACK_EN,                              0x4e },
+	{ WCD934X_FLYBACK_VNEG_CTRL_1,                     0x0b },
+	{ WCD934X_FLYBACK_VNEG_CTRL_2,                     0x45 },
+	{ WCD934X_FLYBACK_VNEG_CTRL_3,                     0x74 },
+	{ WCD934X_FLYBACK_VNEG_CTRL_4,                     0x7f },
+	{ WCD934X_FLYBACK_VNEG_CTRL_5,                     0x83 },
+	{ WCD934X_FLYBACK_VNEG_CTRL_6,                     0x98 },
+	{ WCD934X_FLYBACK_VNEG_CTRL_7,                     0xa9 },
+	{ WCD934X_FLYBACK_VNEG_CTRL_8,                     0x68 },
+	{ WCD934X_FLYBACK_VNEG_CTRL_9,                     0x64 },
+	{ WCD934X_FLYBACK_VNEGDAC_CTRL_1,                  0xed },
+	{ WCD934X_FLYBACK_VNEGDAC_CTRL_2,                  0xf0 },
+	{ WCD934X_FLYBACK_VNEGDAC_CTRL_3,                  0xa6 },
+	{ WCD934X_FLYBACK_CTRL_1,                          0x65 },
+	{ WCD934X_FLYBACK_TEST_CTL,                        0x00 },
+	{ WCD934X_RX_AUX_SW_CTL,                           0x00 },
+	{ WCD934X_RX_PA_AUX_IN_CONN,                       0x00 },
+	{ WCD934X_RX_TIMER_DIV,                            0x32 },
+	{ WCD934X_RX_OCP_CTL,                              0x1f },
+	{ WCD934X_RX_OCP_COUNT,                            0x77 },
+	{ WCD934X_RX_BIAS_EAR_DAC,                         0xa0 },
+	{ WCD934X_RX_BIAS_EAR_AMP,                         0xaa },
+	{ WCD934X_RX_BIAS_HPH_LDO,                         0xa9 },
+	{ WCD934X_RX_BIAS_HPH_PA,                          0xaa },
+	{ WCD934X_RX_BIAS_HPH_RDACBUFF_CNP2,               0x8a },
+	{ WCD934X_RX_BIAS_HPH_RDAC_LDO,                    0x88 },
+	{ WCD934X_RX_BIAS_HPH_CNP1,                        0x82 },
+	{ WCD934X_RX_BIAS_HPH_LOWPOWER,                    0x82 },
+	{ WCD934X_RX_BIAS_DIFFLO_PA,                       0x80 },
+	{ WCD934X_RX_BIAS_DIFFLO_REF,                      0x88 },
+	{ WCD934X_RX_BIAS_DIFFLO_LDO,                      0x88 },
+	{ WCD934X_RX_BIAS_SELO_DAC_PA,                     0xa8 },
+	{ WCD934X_RX_BIAS_BUCK_RST,                        0x08 },
+	{ WCD934X_RX_BIAS_BUCK_VREF_ERRAMP,                0x44 },
+	{ WCD934X_RX_BIAS_FLYB_ERRAMP,                     0x40 },
+	{ WCD934X_RX_BIAS_FLYB_BUFF,                       0xaa },
+	{ WCD934X_RX_BIAS_FLYB_MID_RST,                    0x14 },
+	{ WCD934X_HPH_L_STATUS,                            0x04 },
+	{ WCD934X_HPH_R_STATUS,                            0x04 },
+	{ WCD934X_HPH_CNP_EN,                              0x80 },
+	{ WCD934X_HPH_CNP_WG_CTL,                          0x9a },
+	{ WCD934X_HPH_CNP_WG_TIME,                         0x14 },
+	{ WCD934X_HPH_OCP_CTL,                             0x28 },
+	{ WCD934X_HPH_AUTO_CHOP,                           0x16 },
+	{ WCD934X_HPH_CHOP_CTL,                            0x83 },
+	{ WCD934X_HPH_PA_CTL1,                             0x46 },
+	{ WCD934X_HPH_PA_CTL2,                             0x50 },
+	{ WCD934X_HPH_L_EN,                                0x80 },
+	{ WCD934X_HPH_L_TEST,                              0xe0 },
+	{ WCD934X_HPH_L_ATEST,                             0x50 },
+	{ WCD934X_HPH_R_EN,                                0x80 },
+	{ WCD934X_HPH_R_TEST,                              0xe0 },
+	{ WCD934X_HPH_R_ATEST,                             0x54 },
+	{ WCD934X_HPH_RDAC_CLK_CTL1,                       0x99 },
+	{ WCD934X_HPH_RDAC_CLK_CTL2,                       0x9b },
+	{ WCD934X_HPH_RDAC_LDO_CTL,                        0x33 },
+	{ WCD934X_HPH_RDAC_CHOP_CLK_LP_CTL,                0x00 },
+	{ WCD934X_HPH_REFBUFF_UHQA_CTL,                    0xa8 },
+	{ WCD934X_HPH_REFBUFF_LP_CTL,                      0x0a },
+	{ WCD934X_HPH_L_DAC_CTL,                           0x00 },
+	{ WCD934X_HPH_R_DAC_CTL,                           0x00 },
+	{ WCD934X_EAR_EN_REG,                              0x60 },
+	{ WCD934X_EAR_CMBUFF,                              0x05 },
+	{ WCD934X_EAR_ICTL,                                0x40 },
+	{ WCD934X_EAR_EN_DBG_CTL,                          0x00 },
+	{ WCD934X_EAR_CNP,                                 0xe0 },
+	{ WCD934X_EAR_DAC_CTL_ATEST,                       0x00 },
+	{ WCD934X_EAR_STATUS_REG,                          0x04 },
+	{ WCD934X_EAR_EAR_MISC,                            0x28 },
+	{ WCD934X_DIFF_LO_MISC,                            0x03 },
+	{ WCD934X_DIFF_LO_LO2_COMPANDER,                   0x00 },
+	{ WCD934X_DIFF_LO_LO1_COMPANDER,                   0x00 },
+	{ WCD934X_DIFF_LO_COMMON,                          0x40 },
+	{ WCD934X_DIFF_LO_BYPASS_EN,                       0x00 },
+	{ WCD934X_DIFF_LO_CNP,                             0x20 },
+	{ WCD934X_DIFF_LO_CORE_OUT_PROG,                   0xa0 },
+	{ WCD934X_DIFF_LO_LDO_OUT_PROG,                    0x00 },
+	{ WCD934X_DIFF_LO_COM_SWCAP_REFBUF_FREQ,           0x8b },
+	{ WCD934X_DIFF_LO_COM_PA_FREQ,                     0xb0 },
+	{ WCD934X_DIFF_LO_RESERVED_REG,                    0x60 },
+	{ WCD934X_DIFF_LO_LO1_STATUS_1,                    0x00 },
+	{ WCD934X_DIFF_LO_LO1_STATUS_2,                    0x00 },
+	{ WCD934X_ANA_NEW_PAGE_REGISTER,                   0x00 },
+	{ WCD934X_HPH_NEW_ANA_HPH2,                        0x00 },
+	{ WCD934X_HPH_NEW_ANA_HPH3,                        0x00 },
+	{ WCD934X_SLNQ_ANA_EN,                             0x02 },
+	{ WCD934X_SLNQ_ANA_STATUS,                         0x00 },
+	{ WCD934X_SLNQ_ANA_LDO_CONFIG,                     0xea },
+	{ WCD934X_SLNQ_ANA_LDO_OCP_CONFIG,                 0x95 },
+	{ WCD934X_SLNQ_ANA_TX_LDO_CONFIG,                  0xb6 },
+	{ WCD934X_SLNQ_ANA_TX_DRV_CONFIG,                  0x26 },
+	{ WCD934X_SLNQ_ANA_RX_CONFIG_1,                    0x64 },
+	{ WCD934X_SLNQ_ANA_RX_CONFIG_2,                    0x40 },
+	{ WCD934X_SLNQ_ANA_PLL_ENABLES,                    0x00 },
+	{ WCD934X_SLNQ_ANA_PLL_PRESET,                     0x08 },
+	{ WCD934X_SLNQ_ANA_PLL_STATUS,                     0x00 },
+	{ WCD934X_CLK_SYS_PLL_ENABLES,                     0x00 },
+	{ WCD934X_CLK_SYS_PLL_PRESET,                      0x00 },
+	{ WCD934X_CLK_SYS_PLL_STATUS,                      0x00 },
+	{ WCD934X_CLK_SYS_MCLK_PRG,                        0x00 },
+	{ WCD934X_CLK_SYS_MCLK2_PRG1,                      0x00 },
+	{ WCD934X_CLK_SYS_MCLK2_PRG2,                      0x00 },
+	{ WCD934X_CLK_SYS_XO_PRG,                          0x00 },
+	{ WCD934X_CLK_SYS_XO_CAP_XTP,                      0x00 },
+	{ WCD934X_CLK_SYS_XO_CAP_XTM,                      0x00 },
+	{ WCD934X_BOOST_BST_EN_DLY,                        0x40 },
+	{ WCD934X_BOOST_CTRL_ILIM,                         0x9c },
+	{ WCD934X_BOOST_VOUT_SETTING,                      0xca },
+	{ WCD934X_SIDO_NEW_VOUT_A_STARTUP,                 0x05 },
+	{ WCD934X_SIDO_NEW_VOUT_D_STARTUP,                 0x0d },
+	{ WCD934X_SIDO_NEW_VOUT_D_FREQ1,                   0x07 },
+	{ WCD934X_SIDO_NEW_VOUT_D_FREQ2,                   0x00 },
+	{ WCD934X_MBHC_NEW_ELECT_REM_CLAMP_CTL,            0x00 },
+	{ WCD934X_MBHC_NEW_CTL_1,                          0x02 },
+	{ WCD934X_MBHC_NEW_CTL_2,                          0x05 },
+	{ WCD934X_MBHC_NEW_PLUG_DETECT_CTL,                0xe9 },
+	{ WCD934X_MBHC_NEW_ZDET_ANA_CTL,                   0x0f },
+	{ WCD934X_MBHC_NEW_ZDET_RAMP_CTL,                  0x00 },
+	{ WCD934X_MBHC_NEW_FSM_STATUS,                     0x00 },
+	{ WCD934X_MBHC_NEW_ADC_RESULT,                     0x00 },
+	{ WCD934X_TX_NEW_AMIC_4_5_SEL,                     0x00 },
+	{ WCD934X_VBADC_NEW_ADC_MODE,                      0x10 },
+	{ WCD934X_VBADC_NEW_ADC_DOUTMSB,                   0x00 },
+	{ WCD934X_VBADC_NEW_ADC_DOUTLSB,                   0x00 },
+	{ WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL,               0x00 },
+	{ WCD934X_HPH_NEW_INT_RDAC_HD2_CTL,                0xa0 },
+	{ WCD934X_HPH_NEW_INT_RDAC_VREF_CTL,               0x10 },
+	{ WCD934X_HPH_NEW_INT_RDAC_OVERRIDE_CTL,           0x00 },
+	{ WCD934X_HPH_NEW_INT_RDAC_MISC1,                  0x00 },
+	{ WCD934X_HPH_NEW_INT_PA_MISC1,                    0x22 },
+	{ WCD934X_HPH_NEW_INT_PA_MISC2,                    0x00 },
+	{ WCD934X_HPH_NEW_INT_PA_RDAC_MISC,                0x00 },
+	{ WCD934X_HPH_NEW_INT_HPH_TIMER1,                  0xfe },
+	{ WCD934X_HPH_NEW_INT_HPH_TIMER2,                  0x02 },
+	{ WCD934X_HPH_NEW_INT_HPH_TIMER3,                  0x4e },
+	{ WCD934X_HPH_NEW_INT_HPH_TIMER4,                  0x54 },
+	{ WCD934X_HPH_NEW_INT_PA_RDAC_MISC2,               0x00 },
+	{ WCD934X_HPH_NEW_INT_PA_RDAC_MISC3,               0x00 },
+	{ WCD934X_RX_NEW_INT_HPH_RDAC_BIAS_LOHIFI,         0x62 },
+	{ WCD934X_RX_NEW_INT_HPH_RDAC_BIAS_ULP,            0x01 },
+	{ WCD934X_RX_NEW_INT_HPH_RDAC_LDO_LP,              0x11 },
+	{ WCD934X_SLNQ_INT_ANA_INT_LDO_TEST,               0x0d },
+	{ WCD934X_SLNQ_INT_ANA_INT_LDO_DEBUG_1,            0x85 },
+	{ WCD934X_SLNQ_INT_ANA_INT_LDO_DEBUG_2,            0xb4 },
+	{ WCD934X_SLNQ_INT_ANA_INT_TX_LDO_TEST,            0x16 },
+	{ WCD934X_SLNQ_INT_ANA_INT_TX_DRV_TEST,            0x00 },
+	{ WCD934X_SLNQ_INT_ANA_INT_RX_TEST,                0x00 },
+	{ WCD934X_SLNQ_INT_ANA_INT_RX_TEST_STATUS,         0x00 },
+	{ WCD934X_SLNQ_INT_ANA_INT_RX_DEBUG_1,             0x50 },
+	{ WCD934X_SLNQ_INT_ANA_INT_RX_DEBUG_2,             0x04 },
+	{ WCD934X_SLNQ_INT_ANA_INT_CLK_CTRL,               0x00 },
+	{ WCD934X_SLNQ_INT_ANA_INT_RESERVED_1,             0x00 },
+	{ WCD934X_SLNQ_INT_ANA_INT_RESERVED_2,             0x00 },
+	{ WCD934X_SLNQ_INT_ANA_INT_PLL_POST_DIV_REG0,      0x00 },
+	{ WCD934X_SLNQ_INT_ANA_INT_PLL_POST_DIV_REG1,      0x00 },
+	{ WCD934X_SLNQ_INT_ANA_INT_PLL_REF_DIV_REG0,       0x00 },
+	{ WCD934X_SLNQ_INT_ANA_INT_PLL_REF_DIV_REG1,       0x00 },
+	{ WCD934X_SLNQ_INT_ANA_INT_PLL_FILTER_REG0,        0x00 },
+	{ WCD934X_SLNQ_INT_ANA_INT_PLL_FILTER_REG1,        0x00 },
+	{ WCD934X_SLNQ_INT_ANA_INT_PLL_L_VAL,              0x00 },
+	{ WCD934X_SLNQ_INT_ANA_INT_PLL_M_VAL,              0x00 },
+	{ WCD934X_SLNQ_INT_ANA_INT_PLL_N_VAL,              0x00 },
+	{ WCD934X_SLNQ_INT_ANA_INT_PLL_TEST_REG0,          0x00 },
+	{ WCD934X_SLNQ_INT_ANA_INT_PLL_PFD_CP_DSM_PROG,    0x00 },
+	{ WCD934X_SLNQ_INT_ANA_INT_PLL_VCO_PROG,           0x00 },
+	{ WCD934X_SLNQ_INT_ANA_INT_PLL_TEST_REG1,          0x00 },
+	{ WCD934X_SLNQ_INT_ANA_INT_PLL_LDO_LOCK_CFG,       0x00 },
+	{ WCD934X_SLNQ_INT_ANA_INT_PLL_DIG_LOCK_DET_CFG,   0x00 },
+	{ WCD934X_CLK_SYS_INT_POST_DIV_REG0,               0x00 },
+	{ WCD934X_CLK_SYS_INT_POST_DIV_REG1,               0x00 },
+	{ WCD934X_CLK_SYS_INT_REF_DIV_REG0,                0x00 },
+	{ WCD934X_CLK_SYS_INT_REF_DIV_REG1,                0x00 },
+	{ WCD934X_CLK_SYS_INT_FILTER_REG0,                 0x00 },
+	{ WCD934X_CLK_SYS_INT_FILTER_REG1,                 0x00 },
+	{ WCD934X_CLK_SYS_INT_PLL_L_VAL,                   0x00 },
+	{ WCD934X_CLK_SYS_INT_PLL_M_VAL,                   0x00 },
+	{ WCD934X_CLK_SYS_INT_PLL_N_VAL,                   0x00 },
+	{ WCD934X_CLK_SYS_INT_TEST_REG0,                   0x00 },
+	{ WCD934X_CLK_SYS_INT_PFD_CP_DSM_PROG,             0x00 },
+	{ WCD934X_CLK_SYS_INT_VCO_PROG,                    0x00 },
+	{ WCD934X_CLK_SYS_INT_TEST_REG1,                   0x00 },
+	{ WCD934X_CLK_SYS_INT_LDO_LOCK_CFG,                0x00 },
+	{ WCD934X_CLK_SYS_INT_DIG_LOCK_DET_CFG,            0x00 },
+	{ WCD934X_CLK_SYS_INT_CLK_TEST1,                   0x00 },
+	{ WCD934X_CLK_SYS_INT_CLK_TEST2,                   0x00 },
+	{ WCD934X_CLK_SYS_INT_CLK_TEST3,                   0x00 },
+	{ WCD934X_CLK_SYS_INT_XO_TEST1,                    0x98 },
+	{ WCD934X_CLK_SYS_INT_XO_TEST2,                    0x00 },
+	{ WCD934X_BOOST_INT_VCOMP_HYST,                    0x02 },
+	{ WCD934X_BOOST_INT_VLOOP_FILTER,                  0xef },
+	{ WCD934X_BOOST_INT_CTRL_IDELTA,                   0xa8 },
+	{ WCD934X_BOOST_INT_CTRL_ILIM_STARTUP,             0x17 },
+	{ WCD934X_BOOST_INT_CTRL_MIN_ONTIME,               0x5f },
+	{ WCD934X_BOOST_INT_CTRL_MAX_ONTIME,               0x88 },
+	{ WCD934X_BOOST_INT_CTRL_TIMING,                   0x0a },
+	{ WCD934X_BOOST_INT_TMUX_A_D,                      0x00 },
+	{ WCD934X_BOOST_INT_SW_DRV_CNTL,                   0xf8 },
+	{ WCD934X_BOOST_INT_SPARE1,                        0x00 },
+	{ WCD934X_BOOST_INT_SPARE2,                        0x00 },
+	{ WCD934X_SIDO_NEW_INT_RAMP_STATUS,                0x00 },
+	{ WCD934X_SIDO_NEW_INT_SPARE_1,                    0x00 },
+	{ WCD934X_SIDO_NEW_INT_DEBUG_VOUT_SETTING_A,       0x64 },
+	{ WCD934X_SIDO_NEW_INT_DEBUG_VOUT_SETTING_D,       0x40 },
+	{ WCD934X_SIDO_NEW_INT_RAMP_INC_WAIT,              0x24 },
+	{ WCD934X_SIDO_NEW_INT_DYNAMIC_IPEAK_CTL,          0x09 },
+	{ WCD934X_SIDO_NEW_INT_RAMP_IBLEED_CTL,            0x7d },
+	{ WCD934X_SIDO_NEW_INT_DEBUG_CPROVR_TEST,          0x00 },
+	{ WCD934X_SIDO_NEW_INT_RAMP_CTL_A,                 0x14 },
+	{ WCD934X_SIDO_NEW_INT_RAMP_CTL_D,                 0x14 },
+	{ WCD934X_SIDO_NEW_INT_RAMP_TIMEOUT_PERIOD,        0x33 },
+	{ WCD934X_SIDO_NEW_INT_DYNAMIC_IPEAK_SETTING1,     0x3f },
+	{ WCD934X_SIDO_NEW_INT_DYNAMIC_IPEAK_SETTING2,     0x74 },
+	{ WCD934X_SIDO_NEW_INT_DYNAMIC_IPEAK_SETTING3,     0x33 },
+	{ WCD934X_SIDO_NEW_INT_HIGH_ACCU_MODE_SEL1,        0x1d },
+	{ WCD934X_SIDO_NEW_INT_HIGH_ACCU_MODE_SEL2,        0x0a },
+	{ WCD934X_MBHC_NEW_INT_SLNQ_HPF,                   0x50 },
+	{ WCD934X_MBHC_NEW_INT_SLNQ_REF,                   0x24 },
+	{ WCD934X_MBHC_NEW_INT_SLNQ_COMP,                  0x50 },
+	{ WCD934X_MBHC_NEW_INT_SPARE_2,                    0x00 },
+	{ WCD934X_PAGE10_PAGE_REGISTER,                    0x00 },
+	{ WCD934X_CDC_ANC0_CLK_RESET_CTL,                  0x00 },
+	{ WCD934X_CDC_ANC0_MODE_1_CTL,                     0x00 },
+	{ WCD934X_CDC_ANC0_MODE_2_CTL,                     0x00 },
+	{ WCD934X_CDC_ANC0_FF_SHIFT,                       0x00 },
+	{ WCD934X_CDC_ANC0_FB_SHIFT,                       0x00 },
+	{ WCD934X_CDC_ANC0_LPF_FF_A_CTL,                   0x00 },
+	{ WCD934X_CDC_ANC0_LPF_FF_B_CTL,                   0x00 },
+	{ WCD934X_CDC_ANC0_LPF_FB_CTL,                     0x00 },
+	{ WCD934X_CDC_ANC0_SMLPF_CTL,                      0x00 },
+	{ WCD934X_CDC_ANC0_DCFLT_SHIFT_CTL,                0x00 },
+	{ WCD934X_CDC_ANC0_IIR_ADAPT_CTL,                  0x00 },
+	{ WCD934X_CDC_ANC0_IIR_COEFF_1_CTL,                0x00 },
+	{ WCD934X_CDC_ANC0_IIR_COEFF_2_CTL,                0x00 },
+	{ WCD934X_CDC_ANC0_FF_A_GAIN_CTL,                  0x00 },
+	{ WCD934X_CDC_ANC0_FF_B_GAIN_CTL,                  0x00 },
+	{ WCD934X_CDC_ANC0_FB_GAIN_CTL,                    0x00 },
+	{ WCD934X_CDC_ANC0_RC_COMMON_CTL,                  0x00 },
+	{ WCD934X_CDC_ANC0_FIFO_COMMON_CTL,                0x88 },
+	{ WCD934X_CDC_ANC0_RC0_STATUS_FMIN_CNTR,           0x00 },
+	{ WCD934X_CDC_ANC0_RC1_STATUS_FMIN_CNTR,           0x00 },
+	{ WCD934X_CDC_ANC0_RC0_STATUS_FMAX_CNTR,           0x00 },
+	{ WCD934X_CDC_ANC0_RC1_STATUS_FMAX_CNTR,           0x00 },
+	{ WCD934X_CDC_ANC0_STATUS_FIFO,                    0x00 },
+	{ WCD934X_CDC_ANC1_CLK_RESET_CTL,                  0x00 },
+	{ WCD934X_CDC_ANC1_MODE_1_CTL,                     0x00 },
+	{ WCD934X_CDC_ANC1_MODE_2_CTL,                     0x00 },
+	{ WCD934X_CDC_ANC1_FF_SHIFT,                       0x00 },
+	{ WCD934X_CDC_ANC1_FB_SHIFT,                       0x00 },
+	{ WCD934X_CDC_ANC1_LPF_FF_A_CTL,                   0x00 },
+	{ WCD934X_CDC_ANC1_LPF_FF_B_CTL,                   0x00 },
+	{ WCD934X_CDC_ANC1_LPF_FB_CTL,                     0x00 },
+	{ WCD934X_CDC_ANC1_SMLPF_CTL,                      0x00 },
+	{ WCD934X_CDC_ANC1_DCFLT_SHIFT_CTL,                0x00 },
+	{ WCD934X_CDC_ANC1_IIR_ADAPT_CTL,                  0x00 },
+	{ WCD934X_CDC_ANC1_IIR_COEFF_1_CTL,                0x00 },
+	{ WCD934X_CDC_ANC1_IIR_COEFF_2_CTL,                0x00 },
+	{ WCD934X_CDC_ANC1_FF_A_GAIN_CTL,                  0x00 },
+	{ WCD934X_CDC_ANC1_FF_B_GAIN_CTL,                  0x00 },
+	{ WCD934X_CDC_ANC1_FB_GAIN_CTL,                    0x00 },
+	{ WCD934X_CDC_ANC1_RC_COMMON_CTL,                  0x00 },
+	{ WCD934X_CDC_ANC1_FIFO_COMMON_CTL,                0x88 },
+	{ WCD934X_CDC_ANC1_RC0_STATUS_FMIN_CNTR,           0x00 },
+	{ WCD934X_CDC_ANC1_RC1_STATUS_FMIN_CNTR,           0x00 },
+	{ WCD934X_CDC_ANC1_RC0_STATUS_FMAX_CNTR,           0x00 },
+	{ WCD934X_CDC_ANC1_RC1_STATUS_FMAX_CNTR,           0x00 },
+	{ WCD934X_CDC_ANC1_STATUS_FIFO,                    0x00 },
+	{ WCD934X_CDC_TX0_TX_PATH_CTL,                     0x04 },
+	{ WCD934X_CDC_TX0_TX_PATH_CFG0,                    0x10 },
+	{ WCD934X_CDC_TX0_TX_PATH_CFG1,                    0x03 },
+	{ WCD934X_CDC_TX0_TX_VOL_CTL,                      0x00 },
+	{ WCD934X_CDC_TX0_TX_PATH_192_CTL,                 0x00 },
+	{ WCD934X_CDC_TX0_TX_PATH_192_CFG,                 0x00 },
+	{ WCD934X_CDC_TX0_TX_PATH_SEC0,                    0x00 },
+	{ WCD934X_CDC_TX0_TX_PATH_SEC1,                    0x00 },
+	{ WCD934X_CDC_TX0_TX_PATH_SEC2,                    0x01 },
+	{ WCD934X_CDC_TX0_TX_PATH_SEC3,                    0x3c },
+	{ WCD934X_CDC_TX0_TX_PATH_SEC4,                    0x20 },
+	{ WCD934X_CDC_TX0_TX_PATH_SEC5,                    0x00 },
+	{ WCD934X_CDC_TX0_TX_PATH_SEC6,                    0x00 },
+	{ WCD934X_CDC_TX0_TX_PATH_SEC7,                    0x25 },
+	{ WCD934X_CDC_TX1_TX_PATH_CTL,                     0x04 },
+	{ WCD934X_CDC_TX1_TX_PATH_CFG0,                    0x10 },
+	{ WCD934X_CDC_TX1_TX_PATH_CFG1,                    0x03 },
+	{ WCD934X_CDC_TX1_TX_VOL_CTL,                      0x00 },
+	{ WCD934X_CDC_TX1_TX_PATH_192_CTL,                 0x00 },
+	{ WCD934X_CDC_TX1_TX_PATH_192_CFG,                 0x00 },
+	{ WCD934X_CDC_TX1_TX_PATH_SEC0,                    0x00 },
+	{ WCD934X_CDC_TX1_TX_PATH_SEC1,                    0x00 },
+	{ WCD934X_CDC_TX1_TX_PATH_SEC2,                    0x01 },
+	{ WCD934X_CDC_TX1_TX_PATH_SEC3,                    0x3c },
+	{ WCD934X_CDC_TX1_TX_PATH_SEC4,                    0x20 },
+	{ WCD934X_CDC_TX1_TX_PATH_SEC5,                    0x00 },
+	{ WCD934X_CDC_TX1_TX_PATH_SEC6,                    0x00 },
+	{ WCD934X_CDC_TX2_TX_PATH_CTL,                     0x04 },
+	{ WCD934X_CDC_TX2_TX_PATH_CFG0,                    0x10 },
+	{ WCD934X_CDC_TX2_TX_PATH_CFG1,                    0x03 },
+	{ WCD934X_CDC_TX2_TX_VOL_CTL,                      0x00 },
+	{ WCD934X_CDC_TX2_TX_PATH_192_CTL,                 0x00 },
+	{ WCD934X_CDC_TX2_TX_PATH_192_CFG,                 0x00 },
+	{ WCD934X_CDC_TX2_TX_PATH_SEC0,                    0x00 },
+	{ WCD934X_CDC_TX2_TX_PATH_SEC1,                    0x00 },
+	{ WCD934X_CDC_TX2_TX_PATH_SEC2,                    0x01 },
+	{ WCD934X_CDC_TX2_TX_PATH_SEC3,                    0x3c },
+	{ WCD934X_CDC_TX2_TX_PATH_SEC4,                    0x20 },
+	{ WCD934X_CDC_TX2_TX_PATH_SEC5,                    0x00 },
+	{ WCD934X_CDC_TX2_TX_PATH_SEC6,                    0x00 },
+	{ WCD934X_CDC_TX3_TX_PATH_CTL,                     0x04 },
+	{ WCD934X_CDC_TX3_TX_PATH_CFG0,                    0x10 },
+	{ WCD934X_CDC_TX3_TX_PATH_CFG1,                    0x03 },
+	{ WCD934X_CDC_TX3_TX_VOL_CTL,                      0x00 },
+	{ WCD934X_CDC_TX3_TX_PATH_192_CTL,                 0x00 },
+	{ WCD934X_CDC_TX3_TX_PATH_192_CFG,                 0x00 },
+	{ WCD934X_CDC_TX3_TX_PATH_SEC0,                    0x00 },
+	{ WCD934X_CDC_TX3_TX_PATH_SEC1,                    0x00 },
+	{ WCD934X_CDC_TX3_TX_PATH_SEC2,                    0x01 },
+	{ WCD934X_CDC_TX3_TX_PATH_SEC3,                    0x3c },
+	{ WCD934X_CDC_TX3_TX_PATH_SEC4,                    0x20 },
+	{ WCD934X_CDC_TX3_TX_PATH_SEC5,                    0x00 },
+	{ WCD934X_CDC_TX3_TX_PATH_SEC6,                    0x00 },
+	{ WCD934X_CDC_TX4_TX_PATH_CTL,                     0x04 },
+	{ WCD934X_CDC_TX4_TX_PATH_CFG0,                    0x10 },
+	{ WCD934X_CDC_TX4_TX_PATH_CFG1,                    0x03 },
+	{ WCD934X_CDC_TX4_TX_VOL_CTL,                      0x00 },
+	{ WCD934X_CDC_TX4_TX_PATH_192_CTL,                 0x00 },
+	{ WCD934X_CDC_TX4_TX_PATH_192_CFG,                 0x00 },
+	{ WCD934X_CDC_TX4_TX_PATH_SEC0,                    0x00 },
+	{ WCD934X_CDC_TX4_TX_PATH_SEC1,                    0x00 },
+	{ WCD934X_CDC_TX4_TX_PATH_SEC2,                    0x01 },
+	{ WCD934X_CDC_TX4_TX_PATH_SEC3,                    0x3c },
+	{ WCD934X_CDC_TX4_TX_PATH_SEC4,                    0x20 },
+	{ WCD934X_CDC_TX4_TX_PATH_SEC5,                    0x00 },
+	{ WCD934X_CDC_TX4_TX_PATH_SEC6,                    0x00 },
+	{ WCD934X_CDC_TX5_TX_PATH_CTL,                     0x04 },
+	{ WCD934X_CDC_TX5_TX_PATH_CFG0,                    0x10 },
+	{ WCD934X_CDC_TX5_TX_PATH_CFG1,                    0x03 },
+	{ WCD934X_CDC_TX5_TX_VOL_CTL,                      0x00 },
+	{ WCD934X_CDC_TX5_TX_PATH_192_CTL,                 0x00 },
+	{ WCD934X_CDC_TX5_TX_PATH_192_CFG,                 0x00 },
+	{ WCD934X_CDC_TX5_TX_PATH_SEC0,                    0x00 },
+	{ WCD934X_CDC_TX5_TX_PATH_SEC1,                    0x00 },
+	{ WCD934X_CDC_TX5_TX_PATH_SEC2,                    0x01 },
+	{ WCD934X_CDC_TX5_TX_PATH_SEC3,                    0x3c },
+	{ WCD934X_CDC_TX5_TX_PATH_SEC4,                    0x20 },
+	{ WCD934X_CDC_TX5_TX_PATH_SEC5,                    0x00 },
+	{ WCD934X_CDC_TX5_TX_PATH_SEC6,                    0x00 },
+	{ WCD934X_CDC_TX6_TX_PATH_CTL,                     0x04 },
+	{ WCD934X_CDC_TX6_TX_PATH_CFG0,                    0x10 },
+	{ WCD934X_CDC_TX6_TX_PATH_CFG1,                    0x03 },
+	{ WCD934X_CDC_TX6_TX_VOL_CTL,                      0x00 },
+	{ WCD934X_CDC_TX6_TX_PATH_192_CTL,                 0x00 },
+	{ WCD934X_CDC_TX6_TX_PATH_192_CFG,                 0x00 },
+	{ WCD934X_CDC_TX6_TX_PATH_SEC0,                    0x00 },
+	{ WCD934X_CDC_TX6_TX_PATH_SEC1,                    0x00 },
+	{ WCD934X_CDC_TX6_TX_PATH_SEC2,                    0x01 },
+	{ WCD934X_CDC_TX6_TX_PATH_SEC3,                    0x3c },
+	{ WCD934X_CDC_TX6_TX_PATH_SEC4,                    0x20 },
+	{ WCD934X_CDC_TX6_TX_PATH_SEC5,                    0x00 },
+	{ WCD934X_CDC_TX6_TX_PATH_SEC6,                    0x00 },
+	{ WCD934X_CDC_TX7_TX_PATH_CTL,                     0x04 },
+	{ WCD934X_CDC_TX7_TX_PATH_CFG0,                    0x10 },
+	{ WCD934X_CDC_TX7_TX_PATH_CFG1,                    0x03 },
+	{ WCD934X_CDC_TX7_TX_VOL_CTL,                      0x00 },
+	{ WCD934X_CDC_TX7_TX_PATH_192_CTL,                 0x00 },
+	{ WCD934X_CDC_TX7_TX_PATH_192_CFG,                 0x00 },
+	{ WCD934X_CDC_TX7_TX_PATH_SEC0,                    0x00 },
+	{ WCD934X_CDC_TX7_TX_PATH_SEC1,                    0x00 },
+	{ WCD934X_CDC_TX7_TX_PATH_SEC2,                    0x01 },
+	{ WCD934X_CDC_TX7_TX_PATH_SEC3,                    0x3c },
+	{ WCD934X_CDC_TX7_TX_PATH_SEC4,                    0x20 },
+	{ WCD934X_CDC_TX7_TX_PATH_SEC5,                    0x00 },
+	{ WCD934X_CDC_TX7_TX_PATH_SEC6,                    0x00 },
+	{ WCD934X_CDC_TX8_TX_PATH_CTL,                     0x04 },
+	{ WCD934X_CDC_TX8_TX_PATH_CFG0,                    0x10 },
+	{ WCD934X_CDC_TX8_TX_PATH_CFG1,                    0x03 },
+	{ WCD934X_CDC_TX8_TX_VOL_CTL,                      0x00 },
+	{ WCD934X_CDC_TX8_TX_PATH_192_CTL,                 0x00 },
+	{ WCD934X_CDC_TX8_TX_PATH_192_CFG,                 0x00 },
+	{ WCD934X_CDC_TX8_TX_PATH_SEC0,                    0x00 },
+	{ WCD934X_CDC_TX8_TX_PATH_SEC1,                    0x00 },
+	{ WCD934X_CDC_TX8_TX_PATH_SEC2,                    0x01 },
+	{ WCD934X_CDC_TX8_TX_PATH_SEC3,                    0x3c },
+	{ WCD934X_CDC_TX8_TX_PATH_SEC4,                    0x20 },
+	{ WCD934X_CDC_TX8_TX_PATH_SEC5,                    0x00 },
+	{ WCD934X_CDC_TX8_TX_PATH_SEC6,                    0x00 },
+	{ WCD934X_CDC_TX9_SPKR_PROT_PATH_CTL,              0x02 },
+	{ WCD934X_CDC_TX9_SPKR_PROT_PATH_CFG0,             0x00 },
+	{ WCD934X_CDC_TX10_SPKR_PROT_PATH_CTL,             0x02 },
+	{ WCD934X_CDC_TX10_SPKR_PROT_PATH_CFG0,            0x00 },
+	{ WCD934X_CDC_TX11_SPKR_PROT_PATH_CTL,             0x02 },
+	{ WCD934X_CDC_TX11_SPKR_PROT_PATH_CFG0,            0x00 },
+	{ WCD934X_CDC_TX12_SPKR_PROT_PATH_CTL,             0x02 },
+	{ WCD934X_CDC_TX12_SPKR_PROT_PATH_CFG0,            0x00 },
+	{ WCD934X_PAGE11_PAGE_REGISTER,                    0x00 },
+	{ WCD934X_CDC_COMPANDER1_CTL0,                     0x60 },
+	{ WCD934X_CDC_COMPANDER1_CTL1,                     0xdb },
+	{ WCD934X_CDC_COMPANDER1_CTL2,                     0xff },
+	{ WCD934X_CDC_COMPANDER1_CTL3,                     0x35 },
+	{ WCD934X_CDC_COMPANDER1_CTL4,                     0xff },
+	{ WCD934X_CDC_COMPANDER1_CTL5,                     0x00 },
+	{ WCD934X_CDC_COMPANDER1_CTL6,                     0x01 },
+	{ WCD934X_CDC_COMPANDER1_CTL7,                     0x08 },
+	{ WCD934X_CDC_COMPANDER2_CTL0,                     0x60 },
+	{ WCD934X_CDC_COMPANDER2_CTL1,                     0xdb },
+	{ WCD934X_CDC_COMPANDER2_CTL2,                     0xff },
+	{ WCD934X_CDC_COMPANDER2_CTL3,                     0x35 },
+	{ WCD934X_CDC_COMPANDER2_CTL4,                     0xff },
+	{ WCD934X_CDC_COMPANDER2_CTL5,                     0x00 },
+	{ WCD934X_CDC_COMPANDER2_CTL6,                     0x01 },
+	{ WCD934X_CDC_COMPANDER2_CTL7,                     0x08 },
+	{ WCD934X_CDC_COMPANDER3_CTL0,                     0x60 },
+	{ WCD934X_CDC_COMPANDER3_CTL1,                     0xdb },
+	{ WCD934X_CDC_COMPANDER3_CTL2,                     0xff },
+	{ WCD934X_CDC_COMPANDER3_CTL3,                     0x35 },
+	{ WCD934X_CDC_COMPANDER3_CTL4,                     0xff },
+	{ WCD934X_CDC_COMPANDER3_CTL5,                     0x00 },
+	{ WCD934X_CDC_COMPANDER3_CTL6,                     0x01 },
+	{ WCD934X_CDC_COMPANDER3_CTL7,                     0x08 },
+	{ WCD934X_CDC_COMPANDER4_CTL0,                     0x60 },
+	{ WCD934X_CDC_COMPANDER4_CTL1,                     0xdb },
+	{ WCD934X_CDC_COMPANDER4_CTL2,                     0xff },
+	{ WCD934X_CDC_COMPANDER4_CTL3,                     0x35 },
+	{ WCD934X_CDC_COMPANDER4_CTL4,                     0xff },
+	{ WCD934X_CDC_COMPANDER4_CTL5,                     0x00 },
+	{ WCD934X_CDC_COMPANDER4_CTL6,                     0x01 },
+	{ WCD934X_CDC_COMPANDER4_CTL7,                     0x08 },
+	{ WCD934X_CDC_COMPANDER7_CTL0,                     0x60 },
+	{ WCD934X_CDC_COMPANDER7_CTL1,                     0xdb },
+	{ WCD934X_CDC_COMPANDER7_CTL2,                     0xff },
+	{ WCD934X_CDC_COMPANDER7_CTL3,                     0x35 },
+	{ WCD934X_CDC_COMPANDER7_CTL4,                     0xff },
+	{ WCD934X_CDC_COMPANDER7_CTL5,                     0x00 },
+	{ WCD934X_CDC_COMPANDER7_CTL6,                     0x01 },
+	{ WCD934X_CDC_COMPANDER7_CTL7,                     0x08 },
+	{ WCD934X_CDC_COMPANDER8_CTL0,                     0x60 },
+	{ WCD934X_CDC_COMPANDER8_CTL1,                     0xdb },
+	{ WCD934X_CDC_COMPANDER8_CTL2,                     0xff },
+	{ WCD934X_CDC_COMPANDER8_CTL3,                     0x35 },
+	{ WCD934X_CDC_COMPANDER8_CTL4,                     0xff },
+	{ WCD934X_CDC_COMPANDER8_CTL5,                     0x00 },
+	{ WCD934X_CDC_COMPANDER8_CTL6,                     0x01 },
+	{ WCD934X_CDC_COMPANDER8_CTL7,                     0x08 },
+	{ WCD934X_CDC_RX0_RX_PATH_CTL,                     0x04 },
+	{ WCD934X_CDC_RX0_RX_PATH_CFG0,                    0x00 },
+	{ WCD934X_CDC_RX0_RX_PATH_CFG1,                    0x64 },
+	{ WCD934X_CDC_RX0_RX_PATH_CFG2,                    0x8f },
+	{ WCD934X_CDC_RX0_RX_VOL_CTL,                      0x00 },
+	{ WCD934X_CDC_RX0_RX_PATH_MIX_CTL,                 0x04 },
+	{ WCD934X_CDC_RX0_RX_PATH_MIX_CFG,                 0x7e },
+	{ WCD934X_CDC_RX0_RX_VOL_MIX_CTL,                  0x00 },
+	{ WCD934X_CDC_RX0_RX_PATH_SEC0,                    0xfc },
+	{ WCD934X_CDC_RX0_RX_PATH_SEC1,                    0x08 },
+	{ WCD934X_CDC_RX0_RX_PATH_SEC2,                    0x00 },
+	{ WCD934X_CDC_RX0_RX_PATH_SEC3,                    0x00 },
+	{ WCD934X_CDC_RX0_RX_PATH_SEC5,                    0x00 },
+	{ WCD934X_CDC_RX0_RX_PATH_SEC6,                    0x00 },
+	{ WCD934X_CDC_RX0_RX_PATH_SEC7,                    0x00 },
+	{ WCD934X_CDC_RX0_RX_PATH_MIX_SEC0,                0x08 },
+	{ WCD934X_CDC_RX0_RX_PATH_MIX_SEC1,                0x00 },
+	{ WCD934X_CDC_RX0_RX_PATH_DSMDEM_CTL,              0x00 },
+	{ WCD934X_CDC_RX1_RX_PATH_CTL,                     0x04 },
+	{ WCD934X_CDC_RX1_RX_PATH_CFG0,                    0x00 },
+	{ WCD934X_CDC_RX1_RX_PATH_CFG1,                    0x64 },
+	{ WCD934X_CDC_RX1_RX_PATH_CFG2,                    0x8f },
+	{ WCD934X_CDC_RX1_RX_VOL_CTL,                      0x00 },
+	{ WCD934X_CDC_RX1_RX_PATH_MIX_CTL,                 0x04 },
+	{ WCD934X_CDC_RX1_RX_PATH_MIX_CFG,                 0x7e },
+	{ WCD934X_CDC_RX1_RX_VOL_MIX_CTL,                  0x00 },
+	{ WCD934X_CDC_RX1_RX_PATH_SEC0,                    0xfc },
+	{ WCD934X_CDC_RX1_RX_PATH_SEC1,                    0x08 },
+	{ WCD934X_CDC_RX1_RX_PATH_SEC2,                    0x00 },
+	{ WCD934X_CDC_RX1_RX_PATH_SEC3,                    0x00 },
+	{ WCD934X_CDC_RX1_RX_PATH_SEC4,                    0x00 },
+	{ WCD934X_CDC_RX1_RX_PATH_SEC5,                    0x00 },
+	{ WCD934X_CDC_RX1_RX_PATH_SEC6,                    0x00 },
+	{ WCD934X_CDC_RX1_RX_PATH_SEC7,                    0x00 },
+	{ WCD934X_CDC_RX1_RX_PATH_MIX_SEC0,                0x08 },
+	{ WCD934X_CDC_RX1_RX_PATH_MIX_SEC1,                0x00 },
+	{ WCD934X_CDC_RX1_RX_PATH_DSMDEM_CTL,              0x00 },
+	{ WCD934X_CDC_RX2_RX_PATH_CTL,                     0x04 },
+	{ WCD934X_CDC_RX2_RX_PATH_CFG0,                    0x00 },
+	{ WCD934X_CDC_RX2_RX_PATH_CFG1,                    0x64 },
+	{ WCD934X_CDC_RX2_RX_PATH_CFG2,                    0x8f },
+	{ WCD934X_CDC_RX2_RX_VOL_CTL,                      0x00 },
+	{ WCD934X_CDC_RX2_RX_PATH_MIX_CTL,                 0x04 },
+	{ WCD934X_CDC_RX2_RX_PATH_MIX_CFG,                 0x7e },
+	{ WCD934X_CDC_RX2_RX_VOL_MIX_CTL,                  0x00 },
+	{ WCD934X_CDC_RX2_RX_PATH_SEC0,                    0xfc },
+	{ WCD934X_CDC_RX2_RX_PATH_SEC1,                    0x08 },
+	{ WCD934X_CDC_RX2_RX_PATH_SEC2,                    0x00 },
+	{ WCD934X_CDC_RX2_RX_PATH_SEC3,                    0x00 },
+	{ WCD934X_CDC_RX2_RX_PATH_SEC4,                    0x00 },
+	{ WCD934X_CDC_RX2_RX_PATH_SEC5,                    0x00 },
+	{ WCD934X_CDC_RX2_RX_PATH_SEC6,                    0x00 },
+	{ WCD934X_CDC_RX2_RX_PATH_SEC7,                    0x00 },
+	{ WCD934X_CDC_RX2_RX_PATH_MIX_SEC0,                0x08 },
+	{ WCD934X_CDC_RX2_RX_PATH_MIX_SEC1,                0x00 },
+	{ WCD934X_CDC_RX2_RX_PATH_DSMDEM_CTL,              0x00 },
+	{ WCD934X_CDC_RX3_RX_PATH_CTL,                     0x04 },
+	{ WCD934X_CDC_RX3_RX_PATH_CFG0,                    0x00 },
+	{ WCD934X_CDC_RX3_RX_PATH_CFG1,                    0x64 },
+	{ WCD934X_CDC_RX3_RX_PATH_CFG2,                    0x8f },
+	{ WCD934X_CDC_RX3_RX_VOL_CTL,                      0x00 },
+	{ WCD934X_CDC_RX3_RX_PATH_MIX_CTL,                 0x04 },
+	{ WCD934X_CDC_RX3_RX_PATH_MIX_CFG,                 0x7e },
+	{ WCD934X_CDC_RX3_RX_VOL_MIX_CTL,                  0x00 },
+	{ WCD934X_CDC_RX3_RX_PATH_SEC0,                    0xfc },
+	{ WCD934X_CDC_RX3_RX_PATH_SEC1,                    0x08 },
+	{ WCD934X_CDC_RX3_RX_PATH_SEC2,                    0x00 },
+	{ WCD934X_CDC_RX3_RX_PATH_SEC3,                    0x00 },
+	{ WCD934X_CDC_RX3_RX_PATH_SEC5,                    0x00 },
+	{ WCD934X_CDC_RX3_RX_PATH_SEC6,                    0x00 },
+	{ WCD934X_CDC_RX3_RX_PATH_SEC7,                    0x00 },
+	{ WCD934X_CDC_RX3_RX_PATH_MIX_SEC0,                0x08 },
+	{ WCD934X_CDC_RX3_RX_PATH_MIX_SEC1,                0x00 },
+	{ WCD934X_CDC_RX3_RX_PATH_DSMDEM_CTL,              0x00 },
+	{ WCD934X_CDC_RX4_RX_PATH_CTL,                     0x04 },
+	{ WCD934X_CDC_RX4_RX_PATH_CFG0,                    0x00 },
+	{ WCD934X_CDC_RX4_RX_PATH_CFG1,                    0x64 },
+	{ WCD934X_CDC_RX4_RX_PATH_CFG2,                    0x8f },
+	{ WCD934X_CDC_RX4_RX_VOL_CTL,                      0x00 },
+	{ WCD934X_CDC_RX4_RX_PATH_MIX_CTL,                 0x04 },
+	{ WCD934X_CDC_RX4_RX_PATH_MIX_CFG,                 0x7e },
+	{ WCD934X_CDC_RX4_RX_VOL_MIX_CTL,                  0x00 },
+	{ WCD934X_CDC_RX4_RX_PATH_SEC0,                    0xfc },
+	{ WCD934X_CDC_RX4_RX_PATH_SEC1,                    0x08 },
+	{ WCD934X_CDC_RX4_RX_PATH_SEC2,                    0x00 },
+	{ WCD934X_CDC_RX4_RX_PATH_SEC3,                    0x00 },
+	{ WCD934X_CDC_RX4_RX_PATH_SEC5,                    0x00 },
+	{ WCD934X_CDC_RX4_RX_PATH_SEC6,                    0x00 },
+	{ WCD934X_CDC_RX4_RX_PATH_SEC7,                    0x00 },
+	{ WCD934X_CDC_RX4_RX_PATH_MIX_SEC0,                0x08 },
+	{ WCD934X_CDC_RX4_RX_PATH_MIX_SEC1,                0x00 },
+	{ WCD934X_CDC_RX4_RX_PATH_DSMDEM_CTL,              0x00 },
+	{ WCD934X_CDC_RX7_RX_PATH_CTL,                     0x04 },
+	{ WCD934X_CDC_RX7_RX_PATH_CFG0,                    0x00 },
+	{ WCD934X_CDC_RX7_RX_PATH_CFG1,                    0x64 },
+	{ WCD934X_CDC_RX7_RX_PATH_CFG2,                    0x8f },
+	{ WCD934X_CDC_RX7_RX_VOL_CTL,                      0x00 },
+	{ WCD934X_CDC_RX7_RX_PATH_MIX_CTL,                 0x04 },
+	{ WCD934X_CDC_RX7_RX_PATH_MIX_CFG,                 0x7e },
+	{ WCD934X_CDC_RX7_RX_VOL_MIX_CTL,                  0x00 },
+	{ WCD934X_CDC_RX7_RX_PATH_SEC0,                    0x04 },
+	{ WCD934X_CDC_RX7_RX_PATH_SEC1,                    0x08 },
+	{ WCD934X_CDC_RX7_RX_PATH_SEC2,                    0x00 },
+	{ WCD934X_CDC_RX7_RX_PATH_SEC3,                    0x00 },
+	{ WCD934X_CDC_RX7_RX_PATH_SEC5,                    0x00 },
+	{ WCD934X_CDC_RX7_RX_PATH_SEC6,                    0x00 },
+	{ WCD934X_CDC_RX7_RX_PATH_SEC7,                    0x00 },
+	{ WCD934X_CDC_RX7_RX_PATH_MIX_SEC0,                0x08 },
+	{ WCD934X_CDC_RX7_RX_PATH_MIX_SEC1,                0x00 },
+	{ WCD934X_CDC_RX7_RX_PATH_DSMDEM_CTL,              0x00 },
+	{ WCD934X_CDC_RX8_RX_PATH_CTL,                     0x04 },
+	{ WCD934X_CDC_RX8_RX_PATH_CFG0,                    0x00 },
+	{ WCD934X_CDC_RX8_RX_PATH_CFG1,                    0x64 },
+	{ WCD934X_CDC_RX8_RX_PATH_CFG2,                    0x8f },
+	{ WCD934X_CDC_RX8_RX_VOL_CTL,                      0x00 },
+	{ WCD934X_CDC_RX8_RX_PATH_MIX_CTL,                 0x04 },
+	{ WCD934X_CDC_RX8_RX_PATH_MIX_CFG,                 0x7e },
+	{ WCD934X_CDC_RX8_RX_VOL_MIX_CTL,                  0x00 },
+	{ WCD934X_CDC_RX8_RX_PATH_SEC0,                    0x04 },
+	{ WCD934X_CDC_RX8_RX_PATH_SEC1,                    0x08 },
+	{ WCD934X_CDC_RX8_RX_PATH_SEC2,                    0x00 },
+	{ WCD934X_CDC_RX8_RX_PATH_SEC3,                    0x00 },
+	{ WCD934X_CDC_RX8_RX_PATH_SEC5,                    0x00 },
+	{ WCD934X_CDC_RX8_RX_PATH_SEC6,                    0x00 },
+	{ WCD934X_CDC_RX8_RX_PATH_SEC7,                    0x00 },
+	{ WCD934X_CDC_RX8_RX_PATH_MIX_SEC0,                0x08 },
+	{ WCD934X_CDC_RX8_RX_PATH_MIX_SEC1,                0x00 },
+	{ WCD934X_CDC_RX8_RX_PATH_DSMDEM_CTL,              0x00 },
+	{ WCD934X_PAGE12_PAGE_REGISTER,                    0x00 },
+	{ WCD934X_CDC_CLSH_CRC,                            0x00 },
+	{ WCD934X_CDC_CLSH_DLY_CTRL,                       0x03 },
+	{ WCD934X_CDC_CLSH_DECAY_CTRL,                     0x02 },
+	{ WCD934X_CDC_CLSH_HPH_V_PA,                       0x1c },
+	{ WCD934X_CDC_CLSH_EAR_V_PA,                       0x39 },
+	{ WCD934X_CDC_CLSH_HPH_V_HD,                       0x0c },
+	{ WCD934X_CDC_CLSH_EAR_V_HD,                       0x0c },
+	{ WCD934X_CDC_CLSH_K1_MSB,                         0x01 },
+	{ WCD934X_CDC_CLSH_K1_LSB,                         0x00 },
+	{ WCD934X_CDC_CLSH_K2_MSB,                         0x00 },
+	{ WCD934X_CDC_CLSH_K2_LSB,                         0x80 },
+	{ WCD934X_CDC_CLSH_IDLE_CTRL,                      0x00 },
+	{ WCD934X_CDC_CLSH_IDLE_HPH,                       0x00 },
+	{ WCD934X_CDC_CLSH_IDLE_EAR,                       0x00 },
+	{ WCD934X_CDC_CLSH_TEST0,                          0x07 },
+	{ WCD934X_CDC_CLSH_TEST1,                          0x00 },
+	{ WCD934X_CDC_CLSH_OVR_VREF,                       0x00 },
+	{ WCD934X_CDC_BOOST0_BOOST_PATH_CTL,               0x00 },
+	{ WCD934X_CDC_BOOST0_BOOST_CTL,                    0xb2 },
+	{ WCD934X_CDC_BOOST0_BOOST_CFG1,                   0x00 },
+	{ WCD934X_CDC_BOOST0_BOOST_CFG2,                   0x00 },
+	{ WCD934X_CDC_BOOST1_BOOST_PATH_CTL,               0x00 },
+	{ WCD934X_CDC_BOOST1_BOOST_CTL,                    0xb2 },
+	{ WCD934X_CDC_BOOST1_BOOST_CFG1,                   0x00 },
+	{ WCD934X_CDC_BOOST1_BOOST_CFG2,                   0x00 },
+	{ WCD934X_CDC_VBAT_VBAT_PATH_CTL,                  0x00 },
+	{ WCD934X_CDC_VBAT_VBAT_CFG,                       0x1a },
+	{ WCD934X_CDC_VBAT_VBAT_ADC_CAL1,                  0x00 },
+	{ WCD934X_CDC_VBAT_VBAT_ADC_CAL2,                  0x00 },
+	{ WCD934X_CDC_VBAT_VBAT_ADC_CAL3,                  0x04 },
+	{ WCD934X_CDC_VBAT_VBAT_PK_EST1,                   0xe0 },
+	{ WCD934X_CDC_VBAT_VBAT_PK_EST2,                   0x01 },
+	{ WCD934X_CDC_VBAT_VBAT_PK_EST3,                   0x40 },
+	{ WCD934X_CDC_VBAT_VBAT_RF_PROC1,                  0x2a },
+	{ WCD934X_CDC_VBAT_VBAT_RF_PROC2,                  0x86 },
+	{ WCD934X_CDC_VBAT_VBAT_TAC1,                      0x70 },
+	{ WCD934X_CDC_VBAT_VBAT_TAC2,                      0x18 },
+	{ WCD934X_CDC_VBAT_VBAT_TAC3,                      0x18 },
+	{ WCD934X_CDC_VBAT_VBAT_TAC4,                      0x03 },
+	{ WCD934X_CDC_VBAT_VBAT_GAIN_UPD1,                 0x01 },
+	{ WCD934X_CDC_VBAT_VBAT_GAIN_UPD2,                 0x00 },
+	{ WCD934X_CDC_VBAT_VBAT_GAIN_UPD3,                 0x64 },
+	{ WCD934X_CDC_VBAT_VBAT_GAIN_UPD4,                 0x01 },
+	{ WCD934X_CDC_VBAT_VBAT_DEBUG1,                    0x00 },
+	{ WCD934X_CDC_VBAT_VBAT_GAIN_UPD_MON,              0x00 },
+	{ WCD934X_CDC_VBAT_VBAT_GAIN_MON_VAL,              0x00 },
+	{ WCD934X_CDC_VBAT_VBAT_BAN,                       0x0c },
+	{ WCD934X_MIXING_ASRC0_CLK_RST_CTL,                0x00 },
+	{ WCD934X_MIXING_ASRC0_CTL0,                       0x00 },
+	{ WCD934X_MIXING_ASRC0_CTL1,                       0x00 },
+	{ WCD934X_MIXING_ASRC0_FIFO_CTL,                   0xa8 },
+	{ WCD934X_MIXING_ASRC0_STATUS_FMIN_CNTR_LSB,       0x00 },
+	{ WCD934X_MIXING_ASRC0_STATUS_FMIN_CNTR_MSB,       0x00 },
+	{ WCD934X_MIXING_ASRC0_STATUS_FMAX_CNTR_LSB,       0x00 },
+	{ WCD934X_MIXING_ASRC0_STATUS_FMAX_CNTR_MSB,       0x00 },
+	{ WCD934X_MIXING_ASRC0_STATUS_FIFO,                0x00 },
+	{ WCD934X_MIXING_ASRC1_CLK_RST_CTL,                0x00 },
+	{ WCD934X_MIXING_ASRC1_CTL0,                       0x00 },
+	{ WCD934X_MIXING_ASRC1_CTL1,                       0x00 },
+	{ WCD934X_MIXING_ASRC1_FIFO_CTL,                   0xa8 },
+	{ WCD934X_MIXING_ASRC1_STATUS_FMIN_CNTR_LSB,       0x00 },
+	{ WCD934X_MIXING_ASRC1_STATUS_FMIN_CNTR_MSB,       0x00 },
+	{ WCD934X_MIXING_ASRC1_STATUS_FMAX_CNTR_LSB,       0x00 },
+	{ WCD934X_MIXING_ASRC1_STATUS_FMAX_CNTR_MSB,       0x00 },
+	{ WCD934X_MIXING_ASRC1_STATUS_FIFO,                0x00 },
+	{ WCD934X_MIXING_ASRC2_CLK_RST_CTL,                0x00 },
+	{ WCD934X_MIXING_ASRC2_CTL0,                       0x00 },
+	{ WCD934X_MIXING_ASRC2_CTL1,                       0x00 },
+	{ WCD934X_MIXING_ASRC2_FIFO_CTL,                   0xa8 },
+	{ WCD934X_MIXING_ASRC2_STATUS_FMIN_CNTR_LSB,       0x00 },
+	{ WCD934X_MIXING_ASRC2_STATUS_FMIN_CNTR_MSB,       0x00 },
+	{ WCD934X_MIXING_ASRC2_STATUS_FMAX_CNTR_LSB,       0x00 },
+	{ WCD934X_MIXING_ASRC2_STATUS_FMAX_CNTR_MSB,       0x00 },
+	{ WCD934X_MIXING_ASRC2_STATUS_FIFO,                0x00 },
+	{ WCD934X_MIXING_ASRC3_CLK_RST_CTL,                0x00 },
+	{ WCD934X_MIXING_ASRC3_CTL0,                       0x00 },
+	{ WCD934X_MIXING_ASRC3_CTL1,                       0x00 },
+	{ WCD934X_MIXING_ASRC3_FIFO_CTL,                   0xa8 },
+	{ WCD934X_MIXING_ASRC3_STATUS_FMIN_CNTR_LSB,       0x00 },
+	{ WCD934X_MIXING_ASRC3_STATUS_FMIN_CNTR_MSB,       0x00 },
+	{ WCD934X_MIXING_ASRC3_STATUS_FMAX_CNTR_LSB,       0x00 },
+	{ WCD934X_MIXING_ASRC3_STATUS_FMAX_CNTR_MSB,       0x00 },
+	{ WCD934X_MIXING_ASRC3_STATUS_FIFO,                0x00 },
+	{ WCD934X_SWR_AHB_BRIDGE_WR_DATA_0,                0x00 },
+	{ WCD934X_SWR_AHB_BRIDGE_WR_DATA_1,                0x00 },
+	{ WCD934X_SWR_AHB_BRIDGE_WR_DATA_2,                0x00 },
+	{ WCD934X_SWR_AHB_BRIDGE_WR_DATA_3,                0x00 },
+	{ WCD934X_SWR_AHB_BRIDGE_WR_ADDR_0,                0x00 },
+	{ WCD934X_SWR_AHB_BRIDGE_WR_ADDR_1,                0x00 },
+	{ WCD934X_SWR_AHB_BRIDGE_WR_ADDR_2,                0x00 },
+	{ WCD934X_SWR_AHB_BRIDGE_WR_ADDR_3,                0x00 },
+	{ WCD934X_SWR_AHB_BRIDGE_RD_ADDR_0,                0x00 },
+	{ WCD934X_SWR_AHB_BRIDGE_RD_ADDR_1,                0x00 },
+	{ WCD934X_SWR_AHB_BRIDGE_RD_ADDR_2,                0x00 },
+	{ WCD934X_SWR_AHB_BRIDGE_RD_ADDR_3,                0x00 },
+	{ WCD934X_SWR_AHB_BRIDGE_RD_DATA_0,                0x00 },
+	{ WCD934X_SWR_AHB_BRIDGE_RD_DATA_1,                0x00 },
+	{ WCD934X_SWR_AHB_BRIDGE_RD_DATA_2,                0x00 },
+	{ WCD934X_SWR_AHB_BRIDGE_RD_DATA_3,                0x00 },
+	{ WCD934X_SWR_AHB_BRIDGE_ACCESS_CFG,               0x0f },
+	{ WCD934X_SWR_AHB_BRIDGE_ACCESS_STATUS,            0x03 },
+	{ WCD934X_CDC_SIDETONE_SRC0_ST_SRC_PATH_CTL,       0x04 },
+	{ WCD934X_CDC_SIDETONE_SRC0_ST_SRC_PATH_CFG1,      0x00 },
+	{ WCD934X_CDC_SIDETONE_SRC1_ST_SRC_PATH_CTL,       0x04 },
+	{ WCD934X_CDC_SIDETONE_SRC1_ST_SRC_PATH_CFG1,      0x00 },
+	{ WCD934X_SIDETONE_ASRC0_CLK_RST_CTL,              0x00 },
+	{ WCD934X_SIDETONE_ASRC0_CTL0,                     0x00 },
+	{ WCD934X_SIDETONE_ASRC0_CTL1,                     0x00 },
+	{ WCD934X_SIDETONE_ASRC0_FIFO_CTL,                 0xa8 },
+	{ WCD934X_SIDETONE_ASRC0_STATUS_FMIN_CNTR_LSB,     0x00 },
+	{ WCD934X_SIDETONE_ASRC0_STATUS_FMIN_CNTR_MSB,     0x00 },
+	{ WCD934X_SIDETONE_ASRC0_STATUS_FMAX_CNTR_LSB,     0x00 },
+	{ WCD934X_SIDETONE_ASRC0_STATUS_FMAX_CNTR_MSB,     0x00 },
+	{ WCD934X_SIDETONE_ASRC0_STATUS_FIFO,              0x00 },
+	{ WCD934X_SIDETONE_ASRC1_CLK_RST_CTL,              0x00 },
+	{ WCD934X_SIDETONE_ASRC1_CTL0,                     0x00 },
+	{ WCD934X_SIDETONE_ASRC1_CTL1,                     0x00 },
+	{ WCD934X_SIDETONE_ASRC1_FIFO_CTL,                 0xa8 },
+	{ WCD934X_SIDETONE_ASRC1_STATUS_FMIN_CNTR_LSB,     0x00 },
+	{ WCD934X_SIDETONE_ASRC1_STATUS_FMIN_CNTR_MSB,     0x00 },
+	{ WCD934X_SIDETONE_ASRC1_STATUS_FMAX_CNTR_LSB,     0x00 },
+	{ WCD934X_SIDETONE_ASRC1_STATUS_FMAX_CNTR_MSB,     0x00 },
+	{ WCD934X_SIDETONE_ASRC1_STATUS_FIFO,              0x00 },
+	{ WCD934X_EC_REF_HQ0_EC_REF_HQ_PATH_CTL,           0x00 },
+	{ WCD934X_EC_REF_HQ0_EC_REF_HQ_CFG0,               0x01 },
+	{ WCD934X_EC_REF_HQ1_EC_REF_HQ_PATH_CTL,           0x00 },
+	{ WCD934X_EC_REF_HQ1_EC_REF_HQ_CFG0,               0x01 },
+	{ WCD934X_EC_ASRC0_CLK_RST_CTL,                    0x00 },
+	{ WCD934X_EC_ASRC0_CTL0,                           0x00 },
+	{ WCD934X_EC_ASRC0_CTL1,                           0x00 },
+	{ WCD934X_EC_ASRC0_FIFO_CTL,                       0xa8 },
+	{ WCD934X_EC_ASRC0_STATUS_FMIN_CNTR_LSB,           0x00 },
+	{ WCD934X_EC_ASRC0_STATUS_FMIN_CNTR_MSB,           0x00 },
+	{ WCD934X_EC_ASRC0_STATUS_FMAX_CNTR_LSB,           0x00 },
+	{ WCD934X_EC_ASRC0_STATUS_FMAX_CNTR_MSB,           0x00 },
+	{ WCD934X_EC_ASRC0_STATUS_FIFO,                    0x00 },
+	{ WCD934X_EC_ASRC1_CLK_RST_CTL,                    0x00 },
+	{ WCD934X_EC_ASRC1_CTL0,                           0x00 },
+	{ WCD934X_EC_ASRC1_CTL1,                           0x00 },
+	{ WCD934X_EC_ASRC1_FIFO_CTL,                       0xa8 },
+	{ WCD934X_EC_ASRC1_STATUS_FMIN_CNTR_LSB,           0x00 },
+	{ WCD934X_EC_ASRC1_STATUS_FMIN_CNTR_MSB,           0x00 },
+	{ WCD934X_EC_ASRC1_STATUS_FMAX_CNTR_LSB,           0x00 },
+	{ WCD934X_EC_ASRC1_STATUS_FMAX_CNTR_MSB,           0x00 },
+	{ WCD934X_EC_ASRC1_STATUS_FIFO,                    0x00 },
+	{ WCD934X_PAGE13_PAGE_REGISTER,                    0x00 },
+	{ WCD934X_CDC_RX_INP_MUX_RX_INT0_CFG0,             0x00 },
+	{ WCD934X_CDC_RX_INP_MUX_RX_INT0_CFG1,             0x00 },
+	{ WCD934X_CDC_RX_INP_MUX_RX_INT1_CFG0,             0x00 },
+	{ WCD934X_CDC_RX_INP_MUX_RX_INT1_CFG1,             0x00 },
+	{ WCD934X_CDC_RX_INP_MUX_RX_INT2_CFG0,             0x00 },
+	{ WCD934X_CDC_RX_INP_MUX_RX_INT2_CFG1,             0x00 },
+	{ WCD934X_CDC_RX_INP_MUX_RX_INT3_CFG0,             0x00 },
+	{ WCD934X_CDC_RX_INP_MUX_RX_INT3_CFG1,             0x00 },
+	{ WCD934X_CDC_RX_INP_MUX_RX_INT4_CFG0,             0x00 },
+	{ WCD934X_CDC_RX_INP_MUX_RX_INT4_CFG1,             0x00 },
+	{ WCD934X_CDC_RX_INP_MUX_RX_INT7_CFG0,             0x00 },
+	{ WCD934X_CDC_RX_INP_MUX_RX_INT7_CFG1,             0x00 },
+	{ WCD934X_CDC_RX_INP_MUX_RX_INT8_CFG0,             0x00 },
+	{ WCD934X_CDC_RX_INP_MUX_RX_INT8_CFG1,             0x00 },
+	{ WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG0,              0x00 },
+	{ WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG1,              0x00 },
+	{ WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG2,              0x00 },
+	{ WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG3,              0x00 },
+	{ WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG4,              0x00 },
+	{ WCD934X_CDC_RX_INP_MUX_SIDETONE_SRC_CFG0,        0x00 },
+	{ WCD934X_CDC_RX_INP_MUX_SIDETONE_SRC_CFG1,        0x00 },
+	{ WCD934X_CDC_RX_INP_MUX_ANC_CFG0,                 0x00 },
+	{ WCD934X_CDC_RX_INP_MUX_SPLINE_ASRC_CFG0,         0x00 },
+	{ WCD934X_CDC_RX_INP_MUX_EC_REF_HQ_CFG0,           0x00 },
+	{ WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG0,            0x00 },
+	{ WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG1,            0x00 },
+	{ WCD934X_CDC_TX_INP_MUX_ADC_MUX1_CFG0,            0x00 },
+	{ WCD934X_CDC_TX_INP_MUX_ADC_MUX1_CFG1,            0x00 },
+	{ WCD934X_CDC_TX_INP_MUX_ADC_MUX2_CFG0,            0x00 },
+	{ WCD934X_CDC_TX_INP_MUX_ADC_MUX2_CFG1,            0x00 },
+	{ WCD934X_CDC_TX_INP_MUX_ADC_MUX3_CFG0,            0x00 },
+	{ WCD934X_CDC_TX_INP_MUX_ADC_MUX3_CFG1,            0x00 },
+	{ WCD934X_CDC_TX_INP_MUX_ADC_MUX4_CFG0,            0x00 },
+	{ WCD934X_CDC_TX_INP_MUX_ADC_MUX5_CFG0,            0x00 },
+	{ WCD934X_CDC_TX_INP_MUX_ADC_MUX6_CFG0,            0x00 },
+	{ WCD934X_CDC_TX_INP_MUX_ADC_MUX7_CFG0,            0x00 },
+	{ WCD934X_CDC_TX_INP_MUX_ADC_MUX8_CFG0,            0x00 },
+	{ WCD934X_CDC_TX_INP_MUX_ADC_MUX10_CFG0,           0x00 },
+	{ WCD934X_CDC_TX_INP_MUX_ADC_MUX11_CFG0,           0x00 },
+	{ WCD934X_CDC_TX_INP_MUX_ADC_MUX12_CFG0,           0x00 },
+	{ WCD934X_CDC_TX_INP_MUX_ADC_MUX13_CFG0,           0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG0,  0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG1,  0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG2,  0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG3,  0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG0,  0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG1,  0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG2,  0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG3,  0x00 },
+	{ WCD934X_CDC_IF_ROUTER_TX_MUX_CFG0,               0x00 },
+	{ WCD934X_CDC_IF_ROUTER_TX_MUX_CFG1,               0x00 },
+	{ WCD934X_CDC_IF_ROUTER_TX_MUX_CFG2,               0x00 },
+	{ WCD934X_CDC_IF_ROUTER_TX_MUX_CFG3,               0x00 },
+	{ WCD934X_CDC_CLK_RST_CTRL_MCLK_CONTROL,           0x00 },
+	{ WCD934X_CDC_CLK_RST_CTRL_FS_CNT_CONTROL,         0x0c },
+	{ WCD934X_CDC_CLK_RST_CTRL_SWR_CONTROL,            0x00 },
+	{ WCD934X_CDC_CLK_RST_CTRL_DSD_CONTROL,            0x00 },
+	{ WCD934X_CDC_CLK_RST_CTRL_ASRC_SHARE_CONTROL,     0x0f },
+	{ WCD934X_CDC_CLK_RST_CTRL_GFM_CONTROL,            0x00 },
+	{ WCD934X_CDC_PROX_DETECT_PROX_CTL,                0x08 },
+	{ WCD934X_CDC_PROX_DETECT_PROX_POLL_PERIOD0,       0x00 },
+	{ WCD934X_CDC_PROX_DETECT_PROX_POLL_PERIOD1,       0x4b },
+	{ WCD934X_CDC_PROX_DETECT_PROX_SIG_PATTERN_LSB,    0x00 },
+	{ WCD934X_CDC_PROX_DETECT_PROX_SIG_PATTERN_MSB,    0x00 },
+	{ WCD934X_CDC_PROX_DETECT_PROX_STATUS,             0x00 },
+	{ WCD934X_CDC_PROX_DETECT_PROX_TEST_CTRL,          0x00 },
+	{ WCD934X_CDC_PROX_DETECT_PROX_TEST_BUFF_LSB,      0x00 },
+	{ WCD934X_CDC_PROX_DETECT_PROX_TEST_BUFF_MSB,      0x00 },
+	{ WCD934X_CDC_PROX_DETECT_PROX_TEST_BUFF_LSB_RD,   0x00 },
+	{ WCD934X_CDC_PROX_DETECT_PROX_TEST_BUFF_MSB_RD,   0x00 },
+	{ WCD934X_CDC_PROX_DETECT_PROX_CTL_REPEAT_PAT,     0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR0_IIR_PATH_CTL,          0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B1_CTL,       0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B2_CTL,       0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B3_CTL,       0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B4_CTL,       0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B5_CTL,       0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B6_CTL,       0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B7_CTL,       0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B8_CTL,       0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR0_IIR_CTL,               0x40 },
+	{ WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_TIMER_CTL,    0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B1_CTL,       0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B2_CTL,       0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR1_IIR_PATH_CTL,          0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B1_CTL,       0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B2_CTL,       0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B3_CTL,       0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B4_CTL,       0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B5_CTL,       0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B6_CTL,       0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B7_CTL,       0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B8_CTL,       0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR1_IIR_CTL,               0x40 },
+	{ WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_TIMER_CTL,    0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR1_IIR_COEF_B1_CTL,       0x00 },
+	{ WCD934X_CDC_SIDETONE_IIR1_IIR_COEF_B2_CTL,       0x00 },
+	{ WCD934X_CDC_TOP_TOP_CFG0,                        0x00 },
+	{ WCD934X_CDC_TOP_TOP_CFG1,                        0x00 },
+	{ WCD934X_CDC_TOP_TOP_CFG7,                        0x00 },
+	{ WCD934X_CDC_TOP_HPHL_COMP_WR_LSB,                0x00 },
+	{ WCD934X_CDC_TOP_HPHL_COMP_WR_MSB,                0x00 },
+	{ WCD934X_CDC_TOP_HPHL_COMP_LUT,                   0x00 },
+	{ WCD934X_CDC_TOP_HPHL_COMP_RD_LSB,                0x00 },
+	{ WCD934X_CDC_TOP_HPHL_COMP_RD_MSB,                0x00 },
+	{ WCD934X_CDC_TOP_HPHR_COMP_WR_LSB,                0x00 },
+	{ WCD934X_CDC_TOP_HPHR_COMP_WR_MSB,                0x00 },
+	{ WCD934X_CDC_TOP_HPHR_COMP_LUT,                   0x00 },
+	{ WCD934X_CDC_TOP_HPHR_COMP_RD_LSB,                0x00 },
+	{ WCD934X_CDC_TOP_HPHR_COMP_RD_MSB,                0x00 },
+	{ WCD934X_CDC_TOP_DIFFL_COMP_WR_LSB,               0x00 },
+	{ WCD934X_CDC_TOP_DIFFL_COMP_WR_MSB,               0x00 },
+	{ WCD934X_CDC_TOP_DIFFL_COMP_LUT,                  0x00 },
+	{ WCD934X_CDC_TOP_DIFFL_COMP_RD_LSB,               0x00 },
+	{ WCD934X_CDC_TOP_DIFFL_COMP_RD_MSB,               0x00 },
+	{ WCD934X_CDC_TOP_DIFFR_COMP_WR_LSB,               0x00 },
+	{ WCD934X_CDC_TOP_DIFFR_COMP_WR_MSB,               0x00 },
+	{ WCD934X_CDC_TOP_DIFFR_COMP_LUT,                  0x00 },
+	{ WCD934X_CDC_TOP_DIFFR_COMP_RD_LSB,               0x00 },
+	{ WCD934X_CDC_TOP_DIFFR_COMP_RD_MSB,               0x00 },
+	{ WCD934X_CDC_DSD0_PATH_CTL,                       0x00 },
+	{ WCD934X_CDC_DSD0_CFG0,                           0x00 },
+	{ WCD934X_CDC_DSD0_CFG1,                           0x00 },
+	{ WCD934X_CDC_DSD0_CFG2,                           0x42 },
+	{ WCD934X_CDC_DSD0_CFG3,                           0x00 },
+	{ WCD934X_CDC_DSD0_CFG4,                           0x02 },
+	{ WCD934X_CDC_DSD0_CFG5,                           0x00 },
+	{ WCD934X_CDC_DSD1_PATH_CTL,                       0x00 },
+	{ WCD934X_CDC_DSD1_CFG0,                           0x00 },
+	{ WCD934X_CDC_DSD1_CFG1,                           0x00 },
+	{ WCD934X_CDC_DSD1_CFG2,                           0x42 },
+	{ WCD934X_CDC_DSD1_CFG3,                           0x00 },
+	{ WCD934X_CDC_DSD1_CFG4,                           0x02 },
+	{ WCD934X_CDC_DSD1_CFG5,                           0x00 },
+	{ WCD934X_CDC_RX_IDLE_DET_PATH_CTL,                0x00 },
+	{ WCD934X_CDC_RX_IDLE_DET_CFG0,                    0x07 },
+	{ WCD934X_CDC_RX_IDLE_DET_CFG1,                    0x3c },
+	{ WCD934X_CDC_RX_IDLE_DET_CFG2,                    0x00 },
+	{ WCD934X_CDC_RX_IDLE_DET_CFG3,                    0x00 },
+	{ WCD934X_PAGE14_PAGE_REGISTER,                    0x00 },
+	{ WCD934X_CDC_RATE_EST0_RE_CLK_RST_CTL,            0x00 },
+	{ WCD934X_CDC_RATE_EST0_RE_CTL,                    0x09 },
+	{ WCD934X_CDC_RATE_EST0_RE_PULSE_SUPR_CTL,         0x06 },
+	{ WCD934X_CDC_RATE_EST0_RE_TIMER,                  0x01 },
+	{ WCD934X_CDC_RATE_EST0_RE_BW_SW,                  0x20 },
+	{ WCD934X_CDC_RATE_EST0_RE_THRESH,                 0xa0 },
+	{ WCD934X_CDC_RATE_EST0_RE_STATUS,                 0x00 },
+	{ WCD934X_CDC_RATE_EST0_RE_DIAG_CTRL,              0x00 },
+	{ WCD934X_CDC_RATE_EST0_RE_DIAG_TIMER2,            0x00 },
+	{ WCD934X_CDC_RATE_EST0_RE_DIAG_OFFSET_BW1,        0x00 },
+	{ WCD934X_CDC_RATE_EST0_RE_DIAG_OFFSET_BW2,        0x00 },
+	{ WCD934X_CDC_RATE_EST0_RE_DIAG_OFFSET_BW3,        0x00 },
+	{ WCD934X_CDC_RATE_EST0_RE_DIAG_OFFSET_BW4,        0x00 },
+	{ WCD934X_CDC_RATE_EST0_RE_DIAG_OFFSET_BW5,        0x00 },
+	{ WCD934X_CDC_RATE_EST0_RE_DIAG_LIMIT_BW1,         0x08 },
+	{ WCD934X_CDC_RATE_EST0_RE_DIAG_LIMIT_BW2,         0x07 },
+	{ WCD934X_CDC_RATE_EST0_RE_DIAG_LIMIT_BW3,         0x05 },
+	{ WCD934X_CDC_RATE_EST0_RE_DIAG_LIMIT_BW4,         0x05 },
+	{ WCD934X_CDC_RATE_EST0_RE_DIAG_LIMIT_BW5,         0x05 },
+	{ WCD934X_CDC_RATE_EST0_RE_DIAG_LIMITD1_BW1,       0x08 },
+	{ WCD934X_CDC_RATE_EST0_RE_DIAG_LIMITD1_BW2,       0x07 },
+	{ WCD934X_CDC_RATE_EST0_RE_DIAG_LIMITD1_BW3,       0x05 },
+	{ WCD934X_CDC_RATE_EST0_RE_DIAG_LIMITD1_BW4,       0x05 },
+	{ WCD934X_CDC_RATE_EST0_RE_DIAG_LIMITD1_BW5,       0x05 },
+	{ WCD934X_CDC_RATE_EST0_RE_DIAG_HYST_BW1,          0x03 },
+	{ WCD934X_CDC_RATE_EST0_RE_DIAG_HYST_BW2,          0x03 },
+	{ WCD934X_CDC_RATE_EST0_RE_DIAG_HYST_BW3,          0x03 },
+	{ WCD934X_CDC_RATE_EST0_RE_DIAG_HYST_BW4,          0x03 },
+	{ WCD934X_CDC_RATE_EST0_RE_DIAG_HYST_BW5,          0x03 },
+	{ WCD934X_CDC_RATE_EST0_RE_RMAX_DIAG,              0x00 },
+	{ WCD934X_CDC_RATE_EST0_RE_RMIN_DIAG,              0x00 },
+	{ WCD934X_CDC_RATE_EST0_RE_PH_DET,                 0x00 },
+	{ WCD934X_CDC_RATE_EST0_RE_DIAG_CLR,               0x00 },
+	{ WCD934X_CDC_RATE_EST0_RE_MB_SW_STATE,            0x00 },
+	{ WCD934X_CDC_RATE_EST0_RE_MAST_DIAG_STATE,        0x00 },
+	{ WCD934X_CDC_RATE_EST0_RE_RATE_OUT_7_0,           0x00 },
+	{ WCD934X_CDC_RATE_EST0_RE_RATE_OUT_15_8,          0x00 },
+	{ WCD934X_CDC_RATE_EST0_RE_RATE_OUT_23_16,         0x00 },
+	{ WCD934X_CDC_RATE_EST0_RE_RATE_OUT_31_24,         0x00 },
+	{ WCD934X_CDC_RATE_EST0_RE_RATE_OUT_39_32,         0x00 },
+	{ WCD934X_CDC_RATE_EST0_RE_RATE_OUT_40_43,         0x00 },
+	{ WCD934X_CDC_RATE_EST1_RE_CLK_RST_CTL,            0x00 },
+	{ WCD934X_CDC_RATE_EST1_RE_CTL,                    0x09 },
+	{ WCD934X_CDC_RATE_EST1_RE_PULSE_SUPR_CTL,         0x06 },
+	{ WCD934X_CDC_RATE_EST1_RE_TIMER,                  0x01 },
+	{ WCD934X_CDC_RATE_EST1_RE_BW_SW,                  0x20 },
+	{ WCD934X_CDC_RATE_EST1_RE_THRESH,                 0xa0 },
+	{ WCD934X_CDC_RATE_EST1_RE_STATUS,                 0x00 },
+	{ WCD934X_CDC_RATE_EST1_RE_DIAG_CTRL,              0x00 },
+	{ WCD934X_CDC_RATE_EST1_RE_DIAG_TIMER2,            0x00 },
+	{ WCD934X_CDC_RATE_EST1_RE_DIAG_OFFSET_BW1,        0x00 },
+	{ WCD934X_CDC_RATE_EST1_RE_DIAG_OFFSET_BW2,        0x00 },
+	{ WCD934X_CDC_RATE_EST1_RE_DIAG_OFFSET_BW3,        0x00 },
+	{ WCD934X_CDC_RATE_EST1_RE_DIAG_OFFSET_BW4,        0x00 },
+	{ WCD934X_CDC_RATE_EST1_RE_DIAG_OFFSET_BW5,        0x00 },
+	{ WCD934X_CDC_RATE_EST1_RE_DIAG_LIMIT_BW1,         0x08 },
+	{ WCD934X_CDC_RATE_EST1_RE_DIAG_LIMIT_BW2,         0x07 },
+	{ WCD934X_CDC_RATE_EST1_RE_DIAG_LIMIT_BW3,         0x05 },
+	{ WCD934X_CDC_RATE_EST1_RE_DIAG_LIMIT_BW4,         0x05 },
+	{ WCD934X_CDC_RATE_EST1_RE_DIAG_LIMIT_BW5,         0x05 },
+	{ WCD934X_CDC_RATE_EST1_RE_DIAG_LIMITD1_BW1,       0x08 },
+	{ WCD934X_CDC_RATE_EST1_RE_DIAG_LIMITD1_BW2,       0x07 },
+	{ WCD934X_CDC_RATE_EST1_RE_DIAG_LIMITD1_BW3,       0x05 },
+	{ WCD934X_CDC_RATE_EST1_RE_DIAG_LIMITD1_BW4,       0x05 },
+	{ WCD934X_CDC_RATE_EST1_RE_DIAG_LIMITD1_BW5,       0x05 },
+	{ WCD934X_CDC_RATE_EST1_RE_DIAG_HYST_BW1,          0x03 },
+	{ WCD934X_CDC_RATE_EST1_RE_DIAG_HYST_BW2,          0x03 },
+	{ WCD934X_CDC_RATE_EST1_RE_DIAG_HYST_BW3,          0x03 },
+	{ WCD934X_CDC_RATE_EST1_RE_DIAG_HYST_BW4,          0x03 },
+	{ WCD934X_CDC_RATE_EST1_RE_DIAG_HYST_BW5,          0x03 },
+	{ WCD934X_CDC_RATE_EST1_RE_RMAX_DIAG,              0x00 },
+	{ WCD934X_CDC_RATE_EST1_RE_RMIN_DIAG,              0x00 },
+	{ WCD934X_CDC_RATE_EST1_RE_PH_DET,                 0x00 },
+	{ WCD934X_CDC_RATE_EST1_RE_DIAG_CLR,               0x00 },
+	{ WCD934X_CDC_RATE_EST1_RE_MB_SW_STATE,            0x00 },
+	{ WCD934X_CDC_RATE_EST1_RE_MAST_DIAG_STATE,        0x00 },
+	{ WCD934X_CDC_RATE_EST1_RE_RATE_OUT_7_0,           0x00 },
+	{ WCD934X_CDC_RATE_EST1_RE_RATE_OUT_15_8,          0x00 },
+	{ WCD934X_CDC_RATE_EST1_RE_RATE_OUT_23_16,         0x00 },
+	{ WCD934X_CDC_RATE_EST1_RE_RATE_OUT_31_24,         0x00 },
+	{ WCD934X_CDC_RATE_EST1_RE_RATE_OUT_39_32,         0x00 },
+	{ WCD934X_CDC_RATE_EST1_RE_RATE_OUT_40_43,         0x00 },
+	{ WCD934X_CDC_RATE_EST2_RE_CLK_RST_CTL,            0x00 },
+	{ WCD934X_CDC_RATE_EST2_RE_CTL,                    0x09 },
+	{ WCD934X_CDC_RATE_EST2_RE_PULSE_SUPR_CTL,         0x06 },
+	{ WCD934X_CDC_RATE_EST2_RE_TIMER,                  0x01 },
+	{ WCD934X_CDC_RATE_EST2_RE_BW_SW,                  0x20 },
+	{ WCD934X_CDC_RATE_EST2_RE_THRESH,                 0xa0 },
+	{ WCD934X_CDC_RATE_EST2_RE_STATUS,                 0x00 },
+	{ WCD934X_CDC_RATE_EST2_RE_DIAG_CTRL,              0x00 },
+	{ WCD934X_CDC_RATE_EST2_RE_DIAG_TIMER2,            0x00 },
+	{ WCD934X_CDC_RATE_EST2_RE_DIAG_OFFSET_BW1,        0x00 },
+	{ WCD934X_CDC_RATE_EST2_RE_DIAG_OFFSET_BW2,        0x00 },
+	{ WCD934X_CDC_RATE_EST2_RE_DIAG_OFFSET_BW3,        0x00 },
+	{ WCD934X_CDC_RATE_EST2_RE_DIAG_OFFSET_BW4,        0x00 },
+	{ WCD934X_CDC_RATE_EST2_RE_DIAG_OFFSET_BW5,        0x00 },
+	{ WCD934X_CDC_RATE_EST2_RE_DIAG_LIMIT_BW1,         0x08 },
+	{ WCD934X_CDC_RATE_EST2_RE_DIAG_LIMIT_BW2,         0x07 },
+	{ WCD934X_CDC_RATE_EST2_RE_DIAG_LIMIT_BW3,         0x05 },
+	{ WCD934X_CDC_RATE_EST2_RE_DIAG_LIMIT_BW4,         0x05 },
+	{ WCD934X_CDC_RATE_EST2_RE_DIAG_LIMIT_BW5,         0x05 },
+	{ WCD934X_CDC_RATE_EST2_RE_DIAG_LIMITD1_BW1,       0x08 },
+	{ WCD934X_CDC_RATE_EST2_RE_DIAG_LIMITD1_BW2,       0x07 },
+	{ WCD934X_CDC_RATE_EST2_RE_DIAG_LIMITD1_BW3,       0x05 },
+	{ WCD934X_CDC_RATE_EST2_RE_DIAG_LIMITD1_BW4,       0x05 },
+	{ WCD934X_CDC_RATE_EST2_RE_DIAG_LIMITD1_BW5,       0x05 },
+	{ WCD934X_CDC_RATE_EST2_RE_DIAG_HYST_BW1,          0x03 },
+	{ WCD934X_CDC_RATE_EST2_RE_DIAG_HYST_BW2,          0x03 },
+	{ WCD934X_CDC_RATE_EST2_RE_DIAG_HYST_BW3,          0x03 },
+	{ WCD934X_CDC_RATE_EST2_RE_DIAG_HYST_BW4,          0x03 },
+	{ WCD934X_CDC_RATE_EST2_RE_DIAG_HYST_BW5,          0x03 },
+	{ WCD934X_CDC_RATE_EST2_RE_RMAX_DIAG,              0x00 },
+	{ WCD934X_CDC_RATE_EST2_RE_RMIN_DIAG,              0x00 },
+	{ WCD934X_CDC_RATE_EST2_RE_PH_DET,                 0x00 },
+	{ WCD934X_CDC_RATE_EST2_RE_DIAG_CLR,               0x00 },
+	{ WCD934X_CDC_RATE_EST2_RE_MB_SW_STATE,            0x00 },
+	{ WCD934X_CDC_RATE_EST2_RE_MAST_DIAG_STATE,        0x00 },
+	{ WCD934X_CDC_RATE_EST2_RE_RATE_OUT_7_0,           0x00 },
+	{ WCD934X_CDC_RATE_EST2_RE_RATE_OUT_15_8,          0x00 },
+	{ WCD934X_CDC_RATE_EST2_RE_RATE_OUT_23_16,         0x00 },
+	{ WCD934X_CDC_RATE_EST2_RE_RATE_OUT_31_24,         0x00 },
+	{ WCD934X_CDC_RATE_EST2_RE_RATE_OUT_39_32,         0x00 },
+	{ WCD934X_CDC_RATE_EST2_RE_RATE_OUT_40_43,         0x00 },
+	{ WCD934X_CDC_RATE_EST3_RE_CLK_RST_CTL,            0x00 },
+	{ WCD934X_CDC_RATE_EST3_RE_CTL,                    0x09 },
+	{ WCD934X_CDC_RATE_EST3_RE_PULSE_SUPR_CTL,         0x06 },
+	{ WCD934X_CDC_RATE_EST3_RE_TIMER,                  0x01 },
+	{ WCD934X_CDC_RATE_EST3_RE_BW_SW,                  0x20 },
+	{ WCD934X_CDC_RATE_EST3_RE_THRESH,                 0xa0 },
+	{ WCD934X_CDC_RATE_EST3_RE_STATUS,                 0x00 },
+	{ WCD934X_CDC_RATE_EST3_RE_DIAG_CTRL,              0x00 },
+	{ WCD934X_CDC_RATE_EST3_RE_DIAG_TIMER2,            0x00 },
+	{ WCD934X_CDC_RATE_EST3_RE_DIAG_OFFSET_BW1,        0x00 },
+	{ WCD934X_CDC_RATE_EST3_RE_DIAG_OFFSET_BW2,        0x00 },
+	{ WCD934X_CDC_RATE_EST3_RE_DIAG_OFFSET_BW3,        0x00 },
+	{ WCD934X_CDC_RATE_EST3_RE_DIAG_OFFSET_BW4,        0x00 },
+	{ WCD934X_CDC_RATE_EST3_RE_DIAG_OFFSET_BW5,        0x00 },
+	{ WCD934X_CDC_RATE_EST3_RE_DIAG_LIMIT_BW1,         0x08 },
+	{ WCD934X_CDC_RATE_EST3_RE_DIAG_LIMIT_BW2,         0x07 },
+	{ WCD934X_CDC_RATE_EST3_RE_DIAG_LIMIT_BW3,         0x05 },
+	{ WCD934X_CDC_RATE_EST3_RE_DIAG_LIMIT_BW4,         0x05 },
+	{ WCD934X_CDC_RATE_EST3_RE_DIAG_LIMIT_BW5,         0x05 },
+	{ WCD934X_CDC_RATE_EST3_RE_DIAG_LIMITD1_BW1,       0x08 },
+	{ WCD934X_CDC_RATE_EST3_RE_DIAG_LIMITD1_BW2,       0x07 },
+	{ WCD934X_CDC_RATE_EST3_RE_DIAG_LIMITD1_BW3,       0x05 },
+	{ WCD934X_CDC_RATE_EST3_RE_DIAG_LIMITD1_BW4,       0x05 },
+	{ WCD934X_CDC_RATE_EST3_RE_DIAG_LIMITD1_BW5,       0x05 },
+	{ WCD934X_CDC_RATE_EST3_RE_DIAG_HYST_BW1,          0x03 },
+	{ WCD934X_CDC_RATE_EST3_RE_DIAG_HYST_BW2,          0x03 },
+	{ WCD934X_CDC_RATE_EST3_RE_DIAG_HYST_BW3,          0x03 },
+	{ WCD934X_CDC_RATE_EST3_RE_DIAG_HYST_BW4,          0x03 },
+	{ WCD934X_CDC_RATE_EST3_RE_DIAG_HYST_BW5,          0x03 },
+	{ WCD934X_CDC_RATE_EST3_RE_RMAX_DIAG,              0x00 },
+	{ WCD934X_CDC_RATE_EST3_RE_RMIN_DIAG,              0x00 },
+	{ WCD934X_CDC_RATE_EST3_RE_PH_DET,                 0x00 },
+	{ WCD934X_CDC_RATE_EST3_RE_DIAG_CLR,               0x00 },
+	{ WCD934X_CDC_RATE_EST3_RE_MB_SW_STATE,            0x00 },
+	{ WCD934X_CDC_RATE_EST3_RE_MAST_DIAG_STATE,        0x00 },
+	{ WCD934X_CDC_RATE_EST3_RE_RATE_OUT_7_0,           0x00 },
+	{ WCD934X_CDC_RATE_EST3_RE_RATE_OUT_15_8,          0x00 },
+	{ WCD934X_CDC_RATE_EST3_RE_RATE_OUT_23_16,         0x00 },
+	{ WCD934X_CDC_RATE_EST3_RE_RATE_OUT_31_24,         0x00 },
+	{ WCD934X_CDC_RATE_EST3_RE_RATE_OUT_39_32,         0x00 },
+	{ WCD934X_CDC_RATE_EST3_RE_RATE_OUT_40_43,         0x00 },
+	{ WCD934X_PAGE15_PAGE_REGISTER,                    0x00 },
+	{ WCD934X_SPLINE_SRC0_CLK_RST_CTL_0,               0x20 },
+	{ WCD934X_SPLINE_SRC0_STATUS,                      0x00 },
+	{ WCD934X_SPLINE_SRC1_CLK_RST_CTL_0,               0x20 },
+	{ WCD934X_SPLINE_SRC1_STATUS,                      0x00 },
+	{ WCD934X_SPLINE_SRC2_CLK_RST_CTL_0,               0x20 },
+	{ WCD934X_SPLINE_SRC2_STATUS,                      0x00 },
+	{ WCD934X_SPLINE_SRC3_CLK_RST_CTL_0,               0x20 },
+	{ WCD934X_SPLINE_SRC3_STATUS,                      0x00 },
+	{ WCD934X_CDC_DEBUG_DSD0_DEBUG_CFG0,               0x11 },
+	{ WCD934X_CDC_DEBUG_DSD0_DEBUG_CFG1,               0x20 },
+	{ WCD934X_CDC_DEBUG_DSD0_DEBUG_CFG2,               0x00 },
+	{ WCD934X_CDC_DEBUG_DSD0_DEBUG_CFG3,               0x08 },
+	{ WCD934X_CDC_DEBUG_DSD1_DEBUG_CFG0,               0x11 },
+	{ WCD934X_CDC_DEBUG_DSD1_DEBUG_CFG1,               0x20 },
+	{ WCD934X_CDC_DEBUG_DSD1_DEBUG_CFG2,               0x00 },
+	{ WCD934X_CDC_DEBUG_DSD1_DEBUG_CFG3,               0x08 },
+	{ WCD934X_CDC_DEBUG_SPLINE_SRC_DEBUG_CFG0,         0x00 },
+	{ WCD934X_CDC_DEBUG_SPLINE_SRC_DEBUG_CFG1,         0x00 },
+	{ WCD934X_CDC_DEBUG_RC_RE_ASRC_DEBUG_CFG0,         0x00 },
+	{ WCD934X_CDC_DEBUG_ANC0_RC0_FIFO_CTL,             0x4c },
+	{ WCD934X_CDC_DEBUG_ANC0_RC1_FIFO_CTL,             0x4c },
+	{ WCD934X_CDC_DEBUG_ANC1_RC0_FIFO_CTL,             0x4c },
+	{ WCD934X_CDC_DEBUG_ANC1_RC1_FIFO_CTL,             0x4c },
+	{ WCD934X_CDC_DEBUG_ANC_RC_RST_DBG_CNTR,           0x00 },
+	{ WCD934X_PAGE80_PAGE_REGISTER,                    0x00 },
+	{ WCD934X_CODEC_CPR_WR_DATA_0,                     0x00 },
+	{ WCD934X_CODEC_CPR_WR_DATA_1,                     0x00 },
+	{ WCD934X_CODEC_CPR_WR_DATA_2,                     0x00 },
+	{ WCD934X_CODEC_CPR_WR_DATA_3,                     0x00 },
+	{ WCD934X_CODEC_CPR_WR_ADDR_0,                     0x00 },
+	{ WCD934X_CODEC_CPR_WR_ADDR_1,                     0x00 },
+	{ WCD934X_CODEC_CPR_WR_ADDR_2,                     0x00 },
+	{ WCD934X_CODEC_CPR_WR_ADDR_3,                     0x00 },
+	{ WCD934X_CODEC_CPR_RD_ADDR_0,                     0x00 },
+	{ WCD934X_CODEC_CPR_RD_ADDR_1,                     0x00 },
+	{ WCD934X_CODEC_CPR_RD_ADDR_2,                     0x00 },
+	{ WCD934X_CODEC_CPR_RD_ADDR_3,                     0x00 },
+	{ WCD934X_CODEC_CPR_RD_DATA_0,                     0x00 },
+	{ WCD934X_CODEC_CPR_RD_DATA_1,                     0x00 },
+	{ WCD934X_CODEC_CPR_RD_DATA_2,                     0x00 },
+	{ WCD934X_CODEC_CPR_RD_DATA_3,                     0x00 },
+	{ WCD934X_CODEC_CPR_ACCESS_CFG,                    0x0f },
+	{ WCD934X_CODEC_CPR_ACCESS_STATUS,                 0x03 },
+	{ WCD934X_CODEC_CPR_NOM_CX_VDD,                    0xb4 },
+	{ WCD934X_CODEC_CPR_SVS_CX_VDD,                    0x5c },
+	{ WCD934X_CODEC_CPR_SVS2_CX_VDD,                   0x40 },
+	{ WCD934X_CODEC_CPR_NOM_MX_VDD,                    0xb4 },
+	{ WCD934X_CODEC_CPR_SVS_MX_VDD,                    0xb4 },
+	{ WCD934X_CODEC_CPR_SVS2_MX_VDD,                   0xa0 },
+	{ WCD934X_CODEC_CPR_SVS2_MIN_CX_VDD,               0x28 },
+	{ WCD934X_CODEC_CPR_MAX_SVS2_STEP,                 0x08 },
+	{ WCD934X_CODEC_CPR_CTL,                           0x00 },
+	{ WCD934X_CODEC_CPR_SW_MODECHNG_STATUS,            0x00 },
+	{ WCD934X_CODEC_CPR_SW_MODECHNG_START,             0x00 },
+	{ WCD934X_CODEC_CPR_CPR_STATUS,                    0x00 },
+	{ WCD934X_PAGE128_PAGE_REGISTER,                   0x00 },
+	{ WCD934X_TLMM_BIST_MODE_PINCFG,                   0x00 },
+	{ WCD934X_TLMM_RF_PA_ON_PINCFG,                    0x00 },
+	{ WCD934X_TLMM_INTR1_PINCFG,                       0x00 },
+	{ WCD934X_TLMM_INTR2_PINCFG,                       0x00 },
+	{ WCD934X_TLMM_SWR_DATA_PINCFG,                    0x00 },
+	{ WCD934X_TLMM_SWR_CLK_PINCFG,                     0x00 },
+	{ WCD934X_TLMM_I2S_2_SCK_PINCFG,                   0x00 },
+	{ WCD934X_TLMM_SLIMBUS_DATA1_PINCFG,               0x00 },
+	{ WCD934X_TLMM_SLIMBUS_DATA2_PINCFG,               0x00 },
+	{ WCD934X_TLMM_SLIMBUS_CLK_PINCFG,                 0x00 },
+	{ WCD934X_TLMM_I2C_CLK_PINCFG,                     0x00 },
+	{ WCD934X_TLMM_I2C_DATA_PINCFG,                    0x00 },
+	{ WCD934X_TLMM_I2S_0_RX_PINCFG,                    0x00 },
+	{ WCD934X_TLMM_I2S_0_TX_PINCFG,                    0x00 },
+	{ WCD934X_TLMM_I2S_0_SCK_PINCFG,                   0x00 },
+	{ WCD934X_TLMM_I2S_0_WS_PINCFG,                    0x00 },
+	{ WCD934X_TLMM_I2S_1_RX_PINCFG,                    0x00 },
+	{ WCD934X_TLMM_I2S_1_TX_PINCFG,                    0x00 },
+	{ WCD934X_TLMM_I2S_1_SCK_PINCFG,                   0x00 },
+	{ WCD934X_TLMM_I2S_1_WS_PINCFG,                    0x00 },
+	{ WCD934X_TLMM_DMIC1_CLK_PINCFG,                   0x00 },
+	{ WCD934X_TLMM_DMIC1_DATA_PINCFG,                  0x00 },
+	{ WCD934X_TLMM_DMIC2_CLK_PINCFG,                   0x00 },
+	{ WCD934X_TLMM_DMIC2_DATA_PINCFG,                  0x00 },
+	{ WCD934X_TLMM_DMIC3_CLK_PINCFG,                   0x00 },
+	{ WCD934X_TLMM_DMIC3_DATA_PINCFG,                  0x00 },
+	{ WCD934X_TLMM_JTCK_PINCFG,                        0x00 },
+	{ WCD934X_TLMM_GPIO1_PINCFG,                       0x00 },
+	{ WCD934X_TLMM_GPIO2_PINCFG,                       0x00 },
+	{ WCD934X_TLMM_GPIO3_PINCFG,                       0x00 },
+	{ WCD934X_TLMM_GPIO4_PINCFG,                       0x00 },
+	{ WCD934X_TLMM_SPI_S_CSN_PINCFG,                   0x00 },
+	{ WCD934X_TLMM_SPI_S_CLK_PINCFG,                   0x00 },
+	{ WCD934X_TLMM_SPI_S_DOUT_PINCFG,                  0x00 },
+	{ WCD934X_TLMM_SPI_S_DIN_PINCFG,                   0x00 },
+	{ WCD934X_TLMM_BA_N_PINCFG,                        0x00 },
+	{ WCD934X_TLMM_GPIO0_PINCFG,                       0x00 },
+	{ WCD934X_TLMM_I2S_2_RX_PINCFG,                    0x00 },
+	{ WCD934X_TLMM_I2S_2_WS_PINCFG,                    0x00 },
+	{ WCD934X_TEST_DEBUG_PIN_CTL_OE_0,                 0x00 },
+	{ WCD934X_TEST_DEBUG_PIN_CTL_OE_1,                 0x00 },
+	{ WCD934X_TEST_DEBUG_PIN_CTL_OE_2,                 0x00 },
+	{ WCD934X_TEST_DEBUG_PIN_CTL_OE_3,                 0x00 },
+	{ WCD934X_TEST_DEBUG_PIN_CTL_OE_4,                 0x00 },
+	{ WCD934X_TEST_DEBUG_PIN_CTL_DATA_0,               0x00 },
+	{ WCD934X_TEST_DEBUG_PIN_CTL_DATA_1,               0x00 },
+	{ WCD934X_TEST_DEBUG_PIN_CTL_DATA_2,               0x00 },
+	{ WCD934X_TEST_DEBUG_PIN_CTL_DATA_3,               0x00 },
+	{ WCD934X_TEST_DEBUG_PIN_CTL_DATA_4,               0x00 },
+	{ WCD934X_TEST_DEBUG_PAD_DRVCTL_0,                 0x00 },
+	{ WCD934X_TEST_DEBUG_PAD_DRVCTL_1,                 0x00 },
+	{ WCD934X_TEST_DEBUG_PIN_STATUS,                   0x00 },
+	{ WCD934X_TEST_DEBUG_NPL_DLY_TEST_1,               0x10 },
+	{ WCD934X_TEST_DEBUG_NPL_DLY_TEST_2,               0x60 },
+	{ WCD934X_TEST_DEBUG_MEM_CTRL,                     0x00 },
+	{ WCD934X_TEST_DEBUG_DEBUG_BUS_SEL,                0x00 },
+	{ WCD934X_TEST_DEBUG_DEBUG_JTAG,                   0x00 },
+	{ WCD934X_TEST_DEBUG_DEBUG_EN_1,                   0x00 },
+	{ WCD934X_TEST_DEBUG_DEBUG_EN_2,                   0x00 },
+	{ WCD934X_TEST_DEBUG_DEBUG_EN_3,                   0x00 },
+	{ WCD934X_TEST_DEBUG_DEBUG_EN_4,                   0x00 },
+	{ WCD934X_TEST_DEBUG_DEBUG_EN_5,                   0x00 },
+	{ WCD934X_TEST_DEBUG_ANA_DTEST_DIR,                0x00 },
+	{ WCD934X_TEST_DEBUG_PAD_INP_DISABLE_0,            0x00 },
+	{ WCD934X_TEST_DEBUG_PAD_INP_DISABLE_1,            0x00 },
+	{ WCD934X_TEST_DEBUG_PAD_INP_DISABLE_2,            0x00 },
+	{ WCD934X_TEST_DEBUG_PAD_INP_DISABLE_3,            0x00 },
+	{ WCD934X_TEST_DEBUG_PAD_INP_DISABLE_4,            0x00 },
+	{ WCD934X_TEST_DEBUG_SYSMEM_CTRL,                  0x00 },
+	{ WCD934X_TEST_DEBUG_SOC_SW_PWR_SEQ_DELAY,         0x00 },
+	{ WCD934X_TEST_DEBUG_LVAL_NOM_LOW,                 0x96 },
+	{ WCD934X_TEST_DEBUG_LVAL_NOM_HIGH,                0x00 },
+	{ WCD934X_TEST_DEBUG_LVAL_SVS_SVS2_LOW,            0x53 },
+	{ WCD934X_TEST_DEBUG_LVAL_SVS_SVS2_HIGH,           0x00 },
+	{ WCD934X_TEST_DEBUG_SPI_SLAVE_CHAR,               0x00 },
+	{ WCD934X_TEST_DEBUG_CODEC_DIAGS,                  0x00 },
+};
+
+/*
+ * wcd934x_regmap_register_patch: Update register defaults based on version
+ * @regmap: handle to wcd9xxx regmap
+ * @version: wcd934x version
+ *
+ * Returns error code in case of failure or 0 for success
+ */
+int wcd934x_regmap_register_patch(struct regmap *regmap, int revision)
+{
+	int rc = 0;
+
+	if (!regmap) {
+		pr_err("%s: regmap struct is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (revision) {
+	case TAVIL_VERSION_1_1:
+	case TAVIL_VERSION_WCD9340_1_1:
+	case TAVIL_VERSION_WCD9341_1_1:
+		regcache_cache_only(regmap, true);
+		rc = regmap_multi_reg_write(regmap, wcd934x_1_1_defaults,
+					    ARRAY_SIZE(wcd934x_1_1_defaults));
+		regcache_cache_only(regmap, false);
+		break;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(wcd934x_regmap_register_patch);
+
+static bool wcd934x_is_readable_register(struct device *dev, unsigned int reg)
+{
+	u8 pg_num, reg_offset;
+	const u8 *reg_tbl = NULL;
+
+	/*
+	 * Get the page number from MSB of codec register. If its 0x80, assign
+	 * the corresponding page index PAGE_0x80.
+	 */
+	pg_num = reg >> 0x8;
+	if (pg_num == 0x80)
+		pg_num = WCD934X_PAGE_0X80;
+	else if (pg_num == 0x50)
+		pg_num = WCD934X_PAGE_0x50;
+	else if (pg_num > 0xF)
+		return false;
+
+	reg_tbl = wcd934x_reg[pg_num];
+	reg_offset = reg & 0xFF;
+
+	if (reg_tbl && reg_tbl[reg_offset])
+		return true;
+	else
+		return false;
+}
+
+static bool wcd934x_is_volatile_register(struct device *dev, unsigned int reg)
+{
+	u8 pg_num, reg_offset;
+	const u8 *reg_tbl = NULL;
+
+	pg_num = reg >> 0x8;
+	if (pg_num == 0x80)
+		pg_num = WCD934X_PAGE_0X80;
+	else if (pg_num == 0x50)
+		pg_num = WCD934X_PAGE_0x50;
+	else if (pg_num > 0xF)
+		return false;
+
+	reg_tbl = wcd934x_reg[pg_num];
+	reg_offset = reg & 0xFF;
+
+	if (reg_tbl && reg_tbl[reg_offset] == WCD934X_READ)
+		return true;
+
+	/* IIR Coeff registers are not cacheable */
+	if ((reg >= WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B1_CTL) &&
+	    (reg <= WCD934X_CDC_SIDETONE_IIR1_IIR_COEF_B2_CTL))
+		return true;
+
+	if ((reg >= WCD934X_CDC_ANC0_IIR_COEFF_1_CTL) &&
+	    (reg <= WCD934X_CDC_ANC0_FB_GAIN_CTL))
+		return true;
+
+	if ((reg >= WCD934X_CDC_ANC1_IIR_COEFF_1_CTL) &&
+	    (reg <= WCD934X_CDC_ANC1_FB_GAIN_CTL))
+		return true;
+
+	if ((reg >= WCD934X_CODEC_CPR_WR_DATA_0) &&
+	    (reg <= WCD934X_CODEC_CPR_RD_DATA_3))
+		return true;
+
+	/*
+	 * Need to mark volatile for registers that are writable but
+	 * only few bits are read-only
+	 */
+	switch (reg) {
+	case WCD934X_CPE_SS_SOC_SW_COLLAPSE_CTL:
+	case WCD934X_CPE_SS_PWR_SYS_PSTATE_CTL_0:
+	case WCD934X_CPE_SS_PWR_SYS_PSTATE_CTL_1:
+	case WCD934X_CPE_SS_CPAR_CTL:
+	case WCD934X_CPE_SS_STATUS:
+	case WCD934X_CODEC_RPM_RST_CTL:
+	case WCD934X_SIDO_NEW_VOUT_A_STARTUP:
+	case WCD934X_SIDO_NEW_VOUT_D_STARTUP:
+	case WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL:
+	case WCD934X_ANA_MBHC_MECH:
+	case WCD934X_ANA_MBHC_ELECT:
+	case WCD934X_ANA_MBHC_ZDET:
+	case WCD934X_ANA_MICB2:
+	case WCD934X_CODEC_RPM_CLK_MCLK_CFG:
+	case WCD934X_CLK_SYS_MCLK_PRG:
+	case WCD934X_CHIP_TIER_CTRL_EFUSE_CTL:
+	case WCD934X_ANA_BIAS:
+	case WCD934X_ANA_BUCK_CTL:
+	case WCD934X_ANA_RCO:
+	case WCD934X_CODEC_RPM_CLK_GATE:
+	case WCD934X_BIAS_VBG_FINE_ADJ:
+	case WCD934X_CODEC_CPR_SVS_CX_VDD:
+	case WCD934X_CODEC_CPR_SVS2_CX_VDD:
+		return true;
+	}
+
+	return false;
+}
+
+struct regmap_config wcd934x_regmap_config = {
+	.reg_bits = 16,
+	.val_bits = 8,
+	.cache_type = REGCACHE_RBTREE,
+	.reg_defaults = wcd934x_defaults,
+	.num_reg_defaults = ARRAY_SIZE(wcd934x_defaults),
+	.max_register = WCD934X_MAX_REGISTER,
+	.volatile_reg = wcd934x_is_volatile_register,
+	.readable_reg = wcd934x_is_readable_register,
+	.can_multi_write = true,
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/mfd/wcd934x-tables.c	2019-01-22 16:16:24.691257165 +0100
@@ -0,0 +1,2155 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/mfd/wcd934x/registers.h>
+
+#define WCD934X_REG(reg)  ((reg) & 0xFF)
+
+const u8 wcd934x_page0_reg_access[WCD934X_PAGE_SIZE] = {
+	[WCD934X_REG(WCD934X_PAGE0_PAGE_REGISTER)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_RPM_CLK_BYPASS)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_RPM_CLK_GATE)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_RPM_CLK_MCLK_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_RPM_CLK_MCLK2_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_RPM_I2S_DSD_CLK_SEL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_RPM_RST_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE0)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE1)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE2)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE3)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_EFUSE_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_EFUSE_TEST0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_EFUSE_TEST1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT0)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT1)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT2)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT3)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT4)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT5)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT6)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT7)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT8)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT9)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT10)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT11)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT12)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT13)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT14)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT15)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_EFUSE_STATUS)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_I2C_SLAVE_ID_NONNEGO)] =
+								WCD934X_READ,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_I2C_SLAVE_ID_1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_I2C_SLAVE_ID_2)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_I2C_SLAVE_ID_3)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_ANA_WAIT_STATE_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_SLNQ_WAIT_STATE_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_I2C_ACTIVE)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_ALT_FUNC_EN)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_GPIO_CTL_OE)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CHIP_TIER_CTRL_GPIO_CTL_DATA)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_RX0_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_RX1_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_RX2_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_RX3_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_RX4_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_RX5_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_RX6_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_RX7_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_SB_TX0_INP_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_SB_TX1_INP_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_SB_TX2_INP_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_SB_TX3_INP_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_SB_TX4_INP_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_SB_TX5_INP_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_SB_TX6_INP_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_SB_TX7_INP_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_SB_TX8_INP_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_SB_TX9_INP_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_SB_TX10_INP_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_SB_TX11_INP_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_SB_TX13_INP_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_SB_TX14_INP_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_SB_TX15_INP_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_I2S_TX0_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_I2S_TX1_0_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_I2S_TX1_1_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_I2S_0_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_I2S_1_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_I2S_2_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_I2S_3_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_I2S_CLKSRC_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_I2S_COMMON_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_I2S_0_TDM_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DATA_HUB_I2S_STATUS)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_DMA_RDMA_CTL_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_CH_2_3_CFG_RDMA_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_CH_0_1_CFG_RDMA_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_RDMA_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_CH_2_3_CFG_RDMA_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_CH_0_1_CFG_RDMA_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_RDMA_CTL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_CH_2_3_CFG_RDMA_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_CH_0_1_CFG_RDMA_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_RDMA_CTL_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_CH_2_3_CFG_RDMA_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_CH_0_1_CFG_RDMA_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_RDMA_CTL_4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_CH_2_3_CFG_RDMA_4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_CH_0_1_CFG_RDMA_4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_RDMA4_PRT_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_RDMA_SBTX0_7_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_RDMA_SBTX8_11_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_WDMA_CTL_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_CH_4_5_CFG_WDMA_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_CH_2_3_CFG_WDMA_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_CH_0_1_CFG_WDMA_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_WDMA_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_CH_4_5_CFG_WDMA_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_CH_2_3_CFG_WDMA_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_CH_0_1_CFG_WDMA_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_WDMA_CTL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_CH_4_5_CFG_WDMA_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_CH_2_3_CFG_WDMA_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_CH_0_1_CFG_WDMA_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_WDMA_CTL_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_CH_4_5_CFG_WDMA_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_CH_2_3_CFG_WDMA_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_CH_0_1_CFG_WDMA_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_WDMA_CTL_4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_CH_4_5_CFG_WDMA_4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_CH_2_3_CFG_WDMA_4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_CH_0_1_CFG_WDMA_4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_WDMA0_PRT_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_WDMA3_PRT_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_WDMA4_PRT0_3_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DMA_WDMA4_PRT4_7_CFG)] = WCD934X_READ_WRITE,
+};
+
+const u8 wcd934x_page1_reg_access[WCD934X_PAGE_SIZE] = {
+	[WCD934X_REG(WCD934X_PAGE1_PAGE_REGISTER)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_USER_CTL_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_USER_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_USER_CTL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_USER_CTL_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_USER_CTL_4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_USER_CTL_5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_USER_CTL_6)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_USER_CTL_7)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_USER_CTL_8)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_USER_CTL_9)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_L_VAL_CTL_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_L_VAL_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_DSM_FRAC_CTL_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_DSM_FRAC_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_CONFIG_CTL_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_CONFIG_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_CONFIG_CTL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_CONFIG_CTL_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_CONFIG_CTL_4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_TEST_CTL_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_TEST_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_TEST_CTL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_TEST_CTL_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_TEST_CTL_4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_TEST_CTL_5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_TEST_CTL_6)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_TEST_CTL_7)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_FREQ_CTL_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_FREQ_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_FREQ_CTL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_FREQ_CTL_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_SSC_CTL_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_SSC_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_SSC_CTL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_SSC_CTL_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_FLL_MODE)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_FLL_STATUS_0)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CPE_FLL_STATUS_1)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CPE_FLL_STATUS_2)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CPE_FLL_STATUS_3)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_I2S_FLL_USER_CTL_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_USER_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_USER_CTL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_USER_CTL_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_USER_CTL_4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_USER_CTL_5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_USER_CTL_6)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_USER_CTL_7)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_USER_CTL_8)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_USER_CTL_9)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_L_VAL_CTL_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_L_VAL_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_DSM_FRAC_CTL_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_DSM_FRAC_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_CONFIG_CTL_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_CONFIG_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_CONFIG_CTL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_CONFIG_CTL_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_CONFIG_CTL_4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_TEST_CTL_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_TEST_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_TEST_CTL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_TEST_CTL_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_TEST_CTL_4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_TEST_CTL_5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_TEST_CTL_6)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_TEST_CTL_7)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_FREQ_CTL_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_FREQ_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_FREQ_CTL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_FREQ_CTL_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_SSC_CTL_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_SSC_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_SSC_CTL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_SSC_CTL_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_FLL_MODE)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_I2S_FLL_STATUS_0)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_I2S_FLL_STATUS_1)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_I2S_FLL_STATUS_2)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_I2S_FLL_STATUS_3)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SB_FLL_USER_CTL_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_USER_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_USER_CTL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_USER_CTL_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_USER_CTL_4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_USER_CTL_5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_USER_CTL_6)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_USER_CTL_7)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_USER_CTL_8)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_USER_CTL_9)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_L_VAL_CTL_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_L_VAL_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_DSM_FRAC_CTL_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_DSM_FRAC_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_CONFIG_CTL_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_CONFIG_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_CONFIG_CTL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_CONFIG_CTL_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_CONFIG_CTL_4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_TEST_CTL_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_TEST_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_TEST_CTL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_TEST_CTL_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_TEST_CTL_4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_TEST_CTL_5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_TEST_CTL_6)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_TEST_CTL_7)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_FREQ_CTL_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_FREQ_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_FREQ_CTL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_FREQ_CTL_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_SSC_CTL_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_SSC_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_SSC_CTL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_SSC_CTL_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_FLL_MODE)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SB_FLL_STATUS_0)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SB_FLL_STATUS_1)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SB_FLL_STATUS_2)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SB_FLL_STATUS_3)] = WCD934X_READ,
+};
+
+const u8 wcd934x_page2_reg_access[WCD934X_PAGE_SIZE] = {
+	[WCD934X_REG(WCD934X_PAGE2_PAGE_REGISTER)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_CPE_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_PWR_SYS_PSTATE_CTL_0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_PWR_SYS_PSTATE_CTL_1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_PWR_CPEFLL_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_OVERRIDE)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_2)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_3)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_4)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_5)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_PWR_CPE_DRAM1_SHUTDOWN)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_SOC_SW_COLLAPSE_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_SOC_SW_COLLAPSE_OVERRIDE_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_SOC_SW_COLLAPSE_OVERRIDE_CTL1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_US_BUF_INT_PERIOD)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_CPARMAD_BUFRDY_INT_PERIOD)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_SVA_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_US_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_MAD_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_CPAR_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_DMIC0_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_DMIC1_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_DMIC2_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_DMIC_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_CPAR_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_WDOG_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_BACKUP_INT)] = WCD934X_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_STATUS)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_CPE_OCD_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_SS_ERROR_INT_MASK_0A)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_SS_ERROR_INT_MASK_0B)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_SS_ERROR_INT_MASK_1A)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_SS_ERROR_INT_MASK_1B)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_SS_ERROR_INT_STATUS_0A)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CPE_SS_SS_ERROR_INT_STATUS_0B)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CPE_SS_SS_ERROR_INT_STATUS_1A)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CPE_SS_SS_ERROR_INT_STATUS_1B)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CPE_SS_SS_ERROR_INT_CLEAR_0A)] = WCD934X_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_SS_ERROR_INT_CLEAR_0B)] = WCD934X_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_SS_ERROR_INT_CLEAR_1A)] = WCD934X_WRITE,
+	[WCD934X_REG(WCD934X_CPE_SS_SS_ERROR_INT_CLEAR_1B)] = WCD934X_WRITE,
+	[WCD934X_REG(WCD934X_SOC_MAD_MAIN_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SOC_MAD_MAIN_CTL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SOC_MAD_AUDIO_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SOC_MAD_AUDIO_CTL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SOC_MAD_AUDIO_CTL_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SOC_MAD_AUDIO_CTL_4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SOC_MAD_AUDIO_CTL_5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SOC_MAD_AUDIO_CTL_6)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SOC_MAD_AUDIO_CTL_7)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SOC_MAD_AUDIO_CTL_8)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SOC_MAD_AUDIO_IIR_CTL_PTR)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SOC_MAD_AUDIO_IIR_CTL_VAL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SOC_MAD_ULTR_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SOC_MAD_ULTR_CTL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SOC_MAD_ULTR_CTL_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SOC_MAD_ULTR_CTL_4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SOC_MAD_ULTR_CTL_5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SOC_MAD_ULTR_CTL_6)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SOC_MAD_ULTR_CTL_7)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SOC_MAD_BEACON_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SOC_MAD_BEACON_CTL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SOC_MAD_BEACON_CTL_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SOC_MAD_BEACON_CTL_4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SOC_MAD_BEACON_CTL_5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SOC_MAD_BEACON_CTL_6)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SOC_MAD_BEACON_CTL_7)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SOC_MAD_BEACON_CTL_8)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SOC_MAD_BEACON_IIR_CTL_PTR)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SOC_MAD_BEACON_IIR_CTL_VAL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SOC_MAD_INP_SEL)] = WCD934X_READ_WRITE,
+};
+
+const u8 wcd934x_page4_reg_access[WCD934X_PAGE_SIZE] = {
+	[WCD934X_REG(WCD934X_PAGE4_PAGE_REGISTER)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_INTR_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_INTR_CLR_COMMIT)] = WCD934X_WRITE,
+	[WCD934X_REG(WCD934X_INTR_PIN1_MASK0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_INTR_PIN1_MASK1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_INTR_PIN1_MASK2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_INTR_PIN1_MASK3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_INTR_PIN1_STATUS0)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_INTR_PIN1_STATUS1)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_INTR_PIN1_STATUS2)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_INTR_PIN1_STATUS3)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_INTR_PIN1_CLEAR0)] = WCD934X_WRITE,
+	[WCD934X_REG(WCD934X_INTR_PIN1_CLEAR1)] = WCD934X_WRITE,
+	[WCD934X_REG(WCD934X_INTR_PIN1_CLEAR2)] = WCD934X_WRITE,
+	[WCD934X_REG(WCD934X_INTR_PIN1_CLEAR3)] = WCD934X_WRITE,
+	[WCD934X_REG(WCD934X_INTR_PIN2_MASK3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_INTR_PIN2_STATUS3)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_INTR_PIN2_CLEAR3)] = WCD934X_WRITE,
+	[WCD934X_REG(WCD934X_INTR_CPESS_SUMRY_MASK2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_INTR_CPESS_SUMRY_MASK3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_INTR_CPESS_SUMRY_STATUS2)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_INTR_CPESS_SUMRY_STATUS3)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_INTR_CPESS_SUMRY_CLEAR2)] = WCD934X_WRITE,
+	[WCD934X_REG(WCD934X_INTR_CPESS_SUMRY_CLEAR3)] = WCD934X_WRITE,
+	[WCD934X_REG(WCD934X_INTR_LEVEL0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_INTR_LEVEL1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_INTR_LEVEL2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_INTR_LEVEL3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_INTR_BYPASS0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_INTR_BYPASS1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_INTR_BYPASS2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_INTR_BYPASS3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_INTR_SET0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_INTR_SET1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_INTR_SET2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_INTR_SET3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_INTR_CODEC_MISC_MASK)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_INTR_CODEC_MISC_STATUS)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_INTR_CODEC_MISC_CLEAR)] = WCD934X_WRITE,
+};
+
+const u8 wcd934x_page5_reg_access[WCD934X_PAGE_SIZE] = {
+	[WCD934X_REG(WCD934X_PAGE5_PAGE_REGISTER)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_DEVICE)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_REVISION)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_H_COMMAND)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_NUMBER_OF_BYTE_MSB)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_NUMBER_OF_BYTE_LSB)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_MASTER_ADDRESS_MSB)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_MASTER_ADDRESS_LSB)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SLAVE_ADDRESS_MSB)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SLAVE_ADDRESS_LSB)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_TIMER0_INTERRUPT_MSB)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_TIMER0_INTERRUPT_LSB)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_TIMER1_INTERRUPT_MSB)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_TIMER1_INTERRUPT_LSB)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_TIMER2_INTERRUPT_MSB)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_TIMER2_INTERRUPT_LSB)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_COMM_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_FRAME_CTRL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_PDM_2ND_DATA_CH1_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_PDM_2ND_DATA_CH3_4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_PDM_2ND_DATA_CH5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SW_EVENT_RD)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SW_EVENT_CTRL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_PDM_SELECT_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_PDM_SELECT_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_PDM_SELECT_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_PDM_SAMPLING_FREQ)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_PDM_DC_CONVERSION_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_PDM_DC_CONVERSION_SEL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_PDM_DC_CONV_CHA_MSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_PDM_DC_CONV_CHA_LSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_PDM_DC_CONV_CHB_MSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_PDM_DC_CONV_CHB_LSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_RAM_CNTRL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BANK)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_6)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_7)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_8)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_9)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_A)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_B)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_C)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_D)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_E)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_F)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_10)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_11)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_12)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_13)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_14)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_15)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_16)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_17)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_18)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_19)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_1A)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_1B)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_1C)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_1D)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_1E)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_1F)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_20)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_21)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_22)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_23)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_24)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_25)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_26)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_27)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_28)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_29)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_2A)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_2B)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_2C)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_2D)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_2E)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_2F)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_30)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_31)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_32)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_33)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_34)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_35)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_36)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_37)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_38)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_39)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_3A)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_3B)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_3C)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_3D)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_3E)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_SRAM_BYTE_3F)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_TOP_CTRL1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_TOP_CTRL2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_PDM_CTRL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_PDM_MUTE_CTRL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_DEC_BYPASS_CTRL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_DEC_BYPASS_STATUS)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_DEC_BYPASS_FS)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_DEC_BYPASS_IN_SEL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_GPOUT_ENABLE)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_GPOUT_VAL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_ANA_INTERRUPT_MASK)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_ANA_INTERRUPT_STATUS)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_ANA_INTERRUPT_CLR)] = WCD934X_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_IP_TESTING)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_INTERRUPT_CNTRL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_INTERRUPT_CNT)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_INTERRUPT_CNT_MSB)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_INTERRUPT_CNT_LSB)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_INTERRUPT_MASK0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_INTERRUPT_MASK1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_INTERRUPT_MASK2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_INTERRUPT_MASK3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_INTERRUPT_MASK4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_INTERRUPT_STATUS0)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_INTERRUPT_STATUS1)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_INTERRUPT_STATUS2)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_INTERRUPT_STATUS3)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_INTERRUPT_STATUS4)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_INTERRUPT_CLR0)] = WCD934X_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_INTERRUPT_CLR1)] = WCD934X_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_INTERRUPT_CLR2)] = WCD934X_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_INTERRUPT_CLR3)] = WCD934X_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_DIG_INTERRUPT_CLR4)] = WCD934X_WRITE,
+};
+
+const u8 wcd934x_page6_reg_access[WCD934X_PAGE_SIZE] = {
+	[WCD934X_REG(WCD934X_ANA_PAGE_REGISTER)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_BIAS)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_RCO)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_PAGE6_SPARE2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_PAGE6_SPARE3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_BUCK_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_BUCK_STATUS)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_ANA_RX_SUPPLIES)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_HPH)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_EAR)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_LO_1_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_MAD_SETUP)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_AMIC1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_AMIC2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_AMIC3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_AMIC4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_MBHC_MECH)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_MBHC_ELECT)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_MBHC_ZDET)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_MBHC_RESULT_1)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_ANA_MBHC_RESULT_2)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_ANA_MBHC_RESULT_3)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_ANA_MBHC_BTN0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_MBHC_BTN1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_MBHC_BTN2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_MBHC_BTN3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_MBHC_BTN4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_MBHC_BTN5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_MBHC_BTN6)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_MBHC_BTN7)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_MICB1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_MICB2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_MICB2_RAMP)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_MICB3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_MICB4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_ANA_VBADC)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_BIAS_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_BIAS_VBG_FINE_ADJ)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RCO_CTRL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RCO_CTRL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RCO_CAL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RCO_CAL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RCO_CAL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RCO_TEST_CTRL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RCO_CAL_OUT_1)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_RCO_CAL_OUT_2)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_RCO_CAL_OUT_3)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_RCO_CAL_OUT_4)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_RCO_CAL_OUT_5)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SIDO_MODE_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_MODE_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_MODE_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_MODE_4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_VCL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_VCL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_VCL_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_CCL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_CCL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_CCL_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_CCL_4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_CCL_5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_CCL_6)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_CCL_7)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_CCL_8)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_CCL_9)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_CCL_10)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_FILTER_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_FILTER_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_DRIVER_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_DRIVER_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_DRIVER_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_CAL_CODE_EXT_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_CAL_CODE_EXT_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_CAL_CODE_OUT_1)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SIDO_CAL_CODE_OUT_2)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SIDO_TEST_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_TEST_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MBHC_CTL_CLK)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MBHC_CTL_ANA)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MBHC_CTL_SPARE_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MBHC_CTL_SPARE_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MBHC_CTL_BCS)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MBHC_STATUS_SPARE_1)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_MBHC_TEST_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_VBADC_SUBBLOCK_EN)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_VBADC_IBIAS_FE)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_VBADC_BIAS_ADC)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_VBADC_FE_CTRL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_VBADC_ADC_REF)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_VBADC_ADC_IO)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_VBADC_ADC_SAR)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_VBADC_DEBUG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_LDOH_MODE)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_LDOH_BIAS)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_LDOH_STB_LOADS)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_LDOH_SLOWRAMP)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MICB1_TEST_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MICB1_TEST_CTL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MICB1_TEST_CTL_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MICB2_TEST_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MICB2_TEST_CTL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MICB2_TEST_CTL_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MICB3_TEST_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MICB3_TEST_CTL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MICB3_TEST_CTL_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MICB4_TEST_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MICB4_TEST_CTL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MICB4_TEST_CTL_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TX_COM_ADC_VCM)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TX_COM_BIAS_ATEST)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TX_COM_ADC_INT1_IB)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TX_COM_ADC_INT2_IB)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TX_COM_TXFE_DIV_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TX_COM_TXFE_DIV_START)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TX_COM_TXFE_DIV_STOP_9P6M)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TX_COM_TXFE_DIV_STOP_12P288M)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TX_1_2_TEST_EN)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TX_1_2_ADC_IB)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TX_1_2_ATEST_REFCTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TX_1_2_TEST_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TX_1_2_TEST_BLK_EN)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TX_1_2_TXFE_CLKDIV)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TX_1_2_SAR1_ERR)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_TX_1_2_SAR2_ERR)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_TX_3_4_TEST_EN)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TX_3_4_ADC_IB)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TX_3_4_ATEST_REFCTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TX_3_4_TEST_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TX_3_4_TEST_BLK_EN)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TX_3_4_TXFE_CLKDIV)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TX_3_4_SAR1_ERR)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_TX_3_4_SAR2_ERR)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CLASSH_MODE_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLASSH_MODE_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLASSH_MODE_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLASSH_CTRL_VCL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLASSH_CTRL_VCL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLASSH_CTRL_CCL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLASSH_CTRL_CCL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLASSH_CTRL_CCL_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLASSH_CTRL_CCL_4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLASSH_CTRL_CCL_5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLASSH_BUCK_TMUX_A_D)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLASSH_BUCK_SW_DRV_CNTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLASSH_SPARE)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_FLYBACK_EN)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_FLYBACK_VNEG_CTRL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_FLYBACK_VNEG_CTRL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_FLYBACK_VNEG_CTRL_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_FLYBACK_VNEG_CTRL_4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_FLYBACK_VNEG_CTRL_5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_FLYBACK_VNEG_CTRL_6)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_FLYBACK_VNEG_CTRL_7)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_FLYBACK_VNEG_CTRL_8)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_FLYBACK_VNEG_CTRL_9)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_FLYBACK_VNEGDAC_CTRL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_FLYBACK_VNEGDAC_CTRL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_FLYBACK_VNEGDAC_CTRL_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_FLYBACK_CTRL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_FLYBACK_TEST_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RX_AUX_SW_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RX_PA_AUX_IN_CONN)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RX_TIMER_DIV)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RX_OCP_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RX_OCP_COUNT)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RX_BIAS_EAR_DAC)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RX_BIAS_EAR_AMP)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RX_BIAS_HPH_LDO)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RX_BIAS_HPH_PA)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RX_BIAS_HPH_RDACBUFF_CNP2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RX_BIAS_HPH_RDAC_LDO)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RX_BIAS_HPH_CNP1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RX_BIAS_HPH_LOWPOWER)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RX_BIAS_DIFFLO_PA)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RX_BIAS_DIFFLO_REF)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RX_BIAS_DIFFLO_LDO)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RX_BIAS_SELO_DAC_PA)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RX_BIAS_BUCK_RST)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RX_BIAS_BUCK_VREF_ERRAMP)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RX_BIAS_FLYB_ERRAMP)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RX_BIAS_FLYB_BUFF)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RX_BIAS_FLYB_MID_RST)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_L_STATUS)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_HPH_R_STATUS)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_HPH_CNP_EN)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_CNP_WG_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_CNP_WG_TIME)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_OCP_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_AUTO_CHOP)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_CHOP_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_PA_CTL1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_PA_CTL2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_L_EN)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_L_TEST)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_L_ATEST)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_R_EN)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_R_TEST)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_R_ATEST)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_RDAC_CLK_CTL1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_RDAC_CLK_CTL2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_RDAC_LDO_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_RDAC_CHOP_CLK_LP_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_REFBUFF_UHQA_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_REFBUFF_LP_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_L_DAC_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_R_DAC_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_EAR_EN_REG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_EAR_CMBUFF)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_EAR_ICTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_EAR_EN_DBG_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_EAR_CNP)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_EAR_DAC_CTL_ATEST)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_EAR_STATUS_REG)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_EAR_EAR_MISC)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DIFF_LO_MISC)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DIFF_LO_LO2_COMPANDER)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DIFF_LO_LO1_COMPANDER)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DIFF_LO_COMMON)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DIFF_LO_BYPASS_EN)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DIFF_LO_CNP)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DIFF_LO_CORE_OUT_PROG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DIFF_LO_LDO_OUT_PROG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DIFF_LO_COM_SWCAP_REFBUF_FREQ)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DIFF_LO_COM_PA_FREQ)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DIFF_LO_RESERVED_REG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_DIFF_LO_LO1_STATUS_1)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_DIFF_LO_LO1_STATUS_2)] = WCD934X_READ,
+};
+
+const u8 wcd934x_page7_reg_access[WCD934X_PAGE_SIZE] = {
+	[WCD934X_REG(WCD934X_ANA_NEW_PAGE_REGISTER)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_NEW_ANA_HPH2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_NEW_ANA_HPH3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_ANA_EN)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_ANA_STATUS)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SLNQ_ANA_LDO_CONFIG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_ANA_LDO_OCP_CONFIG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_ANA_TX_LDO_CONFIG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_ANA_TX_DRV_CONFIG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_ANA_RX_CONFIG_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_ANA_RX_CONFIG_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_ANA_PLL_ENABLES)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_ANA_PLL_PRESET)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_ANA_PLL_STATUS)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CLK_SYS_PLL_ENABLES)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLK_SYS_PLL_PRESET)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLK_SYS_PLL_STATUS)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CLK_SYS_MCLK_PRG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLK_SYS_MCLK2_PRG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLK_SYS_MCLK2_PRG2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLK_SYS_XO_PRG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLK_SYS_XO_CAP_XTP)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLK_SYS_XO_CAP_XTM)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_BOOST_BST_EN_DLY)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_BOOST_CTRL_ILIM)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_BOOST_VOUT_SETTING)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_NEW_VOUT_A_STARTUP)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_NEW_VOUT_D_STARTUP)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_NEW_VOUT_D_FREQ1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_NEW_VOUT_D_FREQ2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MBHC_NEW_ELECT_REM_CLAMP_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MBHC_NEW_CTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MBHC_NEW_CTL_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MBHC_NEW_PLUG_DETECT_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MBHC_NEW_ZDET_ANA_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MBHC_NEW_ZDET_RAMP_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MBHC_NEW_FSM_STATUS)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_MBHC_NEW_ADC_RESULT)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_TX_NEW_AMIC_4_5_SEL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_VBADC_NEW_ADC_MODE)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_VBADC_NEW_ADC_DOUTMSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_VBADC_NEW_ADC_DOUTLSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_NEW_INT_RDAC_HD2_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_NEW_INT_RDAC_VREF_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_NEW_INT_RDAC_OVERRIDE_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_NEW_INT_RDAC_MISC1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_NEW_INT_PA_MISC1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_NEW_INT_PA_MISC2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_NEW_INT_PA_RDAC_MISC)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_NEW_INT_HPH_TIMER1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_NEW_INT_HPH_TIMER2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_NEW_INT_HPH_TIMER3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_NEW_INT_HPH_TIMER4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_NEW_INT_PA_RDAC_MISC2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_HPH_NEW_INT_PA_RDAC_MISC3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RX_NEW_INT_HPH_RDAC_BIAS_LOHIFI)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RX_NEW_INT_HPH_RDAC_BIAS_ULP)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_RX_NEW_INT_HPH_RDAC_LDO_LP)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_INT_ANA_INT_LDO_TEST)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_INT_ANA_INT_LDO_DEBUG_1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_INT_ANA_INT_LDO_DEBUG_2)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_INT_ANA_INT_TX_LDO_TEST)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_INT_ANA_INT_TX_DRV_TEST)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_INT_ANA_INT_RX_TEST)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_INT_ANA_INT_RX_TEST_STATUS)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SLNQ_INT_ANA_INT_RX_DEBUG_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_INT_ANA_INT_RX_DEBUG_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_INT_ANA_INT_CLK_CTRL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_INT_ANA_INT_RESERVED_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_INT_ANA_INT_RESERVED_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_INT_ANA_INT_PLL_POST_DIV_REG0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_INT_ANA_INT_PLL_POST_DIV_REG1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_INT_ANA_INT_PLL_REF_DIV_REG0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_INT_ANA_INT_PLL_REF_DIV_REG1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_INT_ANA_INT_PLL_FILTER_REG0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_INT_ANA_INT_PLL_FILTER_REG1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_INT_ANA_INT_PLL_L_VAL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_INT_ANA_INT_PLL_M_VAL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_INT_ANA_INT_PLL_N_VAL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_INT_ANA_INT_PLL_TEST_REG0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_INT_ANA_INT_PLL_PFD_CP_DSM_PROG)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_INT_ANA_INT_PLL_VCO_PROG)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_INT_ANA_INT_PLL_TEST_REG1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_INT_ANA_INT_PLL_LDO_LOCK_CFG)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SLNQ_INT_ANA_INT_PLL_DIG_LOCK_DET_CFG)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLK_SYS_INT_POST_DIV_REG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLK_SYS_INT_POST_DIV_REG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLK_SYS_INT_REF_DIV_REG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLK_SYS_INT_REF_DIV_REG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLK_SYS_INT_FILTER_REG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLK_SYS_INT_FILTER_REG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLK_SYS_INT_PLL_L_VAL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLK_SYS_INT_PLL_M_VAL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLK_SYS_INT_PLL_N_VAL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLK_SYS_INT_TEST_REG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLK_SYS_INT_PFD_CP_DSM_PROG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLK_SYS_INT_VCO_PROG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLK_SYS_INT_TEST_REG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLK_SYS_INT_LDO_LOCK_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLK_SYS_INT_DIG_LOCK_DET_CFG)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLK_SYS_INT_CLK_TEST1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLK_SYS_INT_CLK_TEST2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLK_SYS_INT_CLK_TEST3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLK_SYS_INT_XO_TEST1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CLK_SYS_INT_XO_TEST2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_BOOST_INT_VCOMP_HYST)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_BOOST_INT_VLOOP_FILTER)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_BOOST_INT_CTRL_IDELTA)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_BOOST_INT_CTRL_ILIM_STARTUP)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_BOOST_INT_CTRL_MIN_ONTIME)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_BOOST_INT_CTRL_MAX_ONTIME)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_BOOST_INT_CTRL_TIMING)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_BOOST_INT_TMUX_A_D)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_BOOST_INT_SW_DRV_CNTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_BOOST_INT_SPARE1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_BOOST_INT_SPARE2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_NEW_INT_RAMP_STATUS)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SIDO_NEW_INT_SPARE_1)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SIDO_NEW_INT_DEBUG_VOUT_SETTING_A)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_NEW_INT_DEBUG_VOUT_SETTING_D)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_NEW_INT_RAMP_INC_WAIT)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_NEW_INT_DYNAMIC_IPEAK_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_NEW_INT_RAMP_IBLEED_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_NEW_INT_DEBUG_CPROVR_TEST)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_NEW_INT_RAMP_CTL_A)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_NEW_INT_RAMP_CTL_D)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_NEW_INT_RAMP_TIMEOUT_PERIOD)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_NEW_INT_DYNAMIC_IPEAK_SETTING1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_NEW_INT_DYNAMIC_IPEAK_SETTING2)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_NEW_INT_DYNAMIC_IPEAK_SETTING3)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_NEW_INT_HIGH_ACCU_MODE_SEL1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDO_NEW_INT_HIGH_ACCU_MODE_SEL2)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MBHC_NEW_INT_SLNQ_HPF)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MBHC_NEW_INT_SLNQ_REF)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MBHC_NEW_INT_SLNQ_COMP)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MBHC_NEW_INT_SPARE_2)] = WCD934X_READ_WRITE,
+
+};
+
+const u8 wcd934x_page10_reg_access[WCD934X_PAGE_SIZE] = {
+	[WCD934X_REG(WCD934X_PAGE10_PAGE_REGISTER)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC0_CLK_RESET_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC0_MODE_1_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC0_MODE_2_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC0_FF_SHIFT)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC0_FB_SHIFT)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC0_LPF_FF_A_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC0_LPF_FF_B_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC0_LPF_FB_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC0_SMLPF_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC0_DCFLT_SHIFT_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC0_IIR_ADAPT_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC0_IIR_COEFF_1_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC0_IIR_COEFF_2_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC0_FF_A_GAIN_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC0_FF_B_GAIN_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC0_FB_GAIN_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC0_RC_COMMON_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC0_FIFO_COMMON_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC0_RC0_STATUS_FMIN_CNTR)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_ANC0_RC1_STATUS_FMIN_CNTR)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_ANC0_RC0_STATUS_FMAX_CNTR)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_ANC0_RC1_STATUS_FMAX_CNTR)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_ANC0_STATUS_FIFO)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_ANC1_CLK_RESET_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC1_MODE_1_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC1_MODE_2_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC1_FF_SHIFT)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC1_FB_SHIFT)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC1_LPF_FF_A_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC1_LPF_FF_B_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC1_LPF_FB_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC1_SMLPF_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC1_DCFLT_SHIFT_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC1_IIR_ADAPT_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC1_IIR_COEFF_1_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC1_IIR_COEFF_2_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC1_FF_A_GAIN_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC1_FF_B_GAIN_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC1_FB_GAIN_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC1_RC_COMMON_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC1_FIFO_COMMON_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_ANC1_RC0_STATUS_FMIN_CNTR)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_ANC1_RC1_STATUS_FMIN_CNTR)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_ANC1_RC0_STATUS_FMAX_CNTR)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_ANC1_RC1_STATUS_FMAX_CNTR)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_ANC1_STATUS_FIFO)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_TX0_TX_PATH_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX0_TX_PATH_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX0_TX_PATH_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX0_TX_VOL_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX0_TX_PATH_192_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX0_TX_PATH_192_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX0_TX_PATH_SEC0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX0_TX_PATH_SEC1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX0_TX_PATH_SEC2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX0_TX_PATH_SEC3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX0_TX_PATH_SEC4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX0_TX_PATH_SEC5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX0_TX_PATH_SEC6)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX0_TX_PATH_SEC7)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX1_TX_PATH_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX1_TX_PATH_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX1_TX_PATH_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX1_TX_VOL_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX1_TX_PATH_192_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX1_TX_PATH_192_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX1_TX_PATH_SEC0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX1_TX_PATH_SEC1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX1_TX_PATH_SEC2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX1_TX_PATH_SEC3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX1_TX_PATH_SEC4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX1_TX_PATH_SEC5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX1_TX_PATH_SEC6)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX2_TX_PATH_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX2_TX_PATH_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX2_TX_PATH_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX2_TX_VOL_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX2_TX_PATH_192_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX2_TX_PATH_192_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX2_TX_PATH_SEC0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX2_TX_PATH_SEC1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX2_TX_PATH_SEC2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX2_TX_PATH_SEC3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX2_TX_PATH_SEC4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX2_TX_PATH_SEC5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX2_TX_PATH_SEC6)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX3_TX_PATH_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX3_TX_PATH_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX3_TX_PATH_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX3_TX_VOL_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX3_TX_PATH_192_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX3_TX_PATH_192_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX3_TX_PATH_SEC0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX3_TX_PATH_SEC1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX3_TX_PATH_SEC2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX3_TX_PATH_SEC3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX3_TX_PATH_SEC4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX3_TX_PATH_SEC5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX3_TX_PATH_SEC6)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX4_TX_PATH_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX4_TX_PATH_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX4_TX_PATH_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX4_TX_VOL_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX4_TX_PATH_192_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX4_TX_PATH_192_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX4_TX_PATH_SEC0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX4_TX_PATH_SEC1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX4_TX_PATH_SEC2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX4_TX_PATH_SEC3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX4_TX_PATH_SEC4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX4_TX_PATH_SEC5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX4_TX_PATH_SEC6)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX5_TX_PATH_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX5_TX_PATH_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX5_TX_PATH_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX5_TX_VOL_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX5_TX_PATH_192_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX5_TX_PATH_192_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX5_TX_PATH_SEC0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX5_TX_PATH_SEC1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX5_TX_PATH_SEC2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX5_TX_PATH_SEC3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX5_TX_PATH_SEC4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX5_TX_PATH_SEC5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX5_TX_PATH_SEC6)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX6_TX_PATH_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX6_TX_PATH_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX6_TX_PATH_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX6_TX_VOL_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX6_TX_PATH_192_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX6_TX_PATH_192_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX6_TX_PATH_SEC0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX6_TX_PATH_SEC1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX6_TX_PATH_SEC2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX6_TX_PATH_SEC3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX6_TX_PATH_SEC4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX6_TX_PATH_SEC5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX6_TX_PATH_SEC6)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX7_TX_PATH_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX7_TX_PATH_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX7_TX_PATH_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX7_TX_VOL_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX7_TX_PATH_192_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX7_TX_PATH_192_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX7_TX_PATH_SEC0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX7_TX_PATH_SEC1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX7_TX_PATH_SEC2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX7_TX_PATH_SEC3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX7_TX_PATH_SEC4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX7_TX_PATH_SEC5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX7_TX_PATH_SEC6)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX8_TX_PATH_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX8_TX_PATH_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX8_TX_PATH_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX8_TX_VOL_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX8_TX_PATH_192_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX8_TX_PATH_192_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX8_TX_PATH_SEC0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX8_TX_PATH_SEC1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX8_TX_PATH_SEC2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX8_TX_PATH_SEC3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX8_TX_PATH_SEC4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX8_TX_PATH_SEC5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX8_TX_PATH_SEC6)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX9_SPKR_PROT_PATH_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX9_SPKR_PROT_PATH_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX10_SPKR_PROT_PATH_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX10_SPKR_PROT_PATH_CFG0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX11_SPKR_PROT_PATH_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX11_SPKR_PROT_PATH_CFG0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX12_SPKR_PROT_PATH_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX12_SPKR_PROT_PATH_CFG0)] =
+							WCD934X_READ_WRITE,
+};
+
+const u8 wcd934x_page11_reg_access[WCD934X_PAGE_SIZE] = {
+	[WCD934X_REG(WCD934X_PAGE11_PAGE_REGISTER)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER1_CTL0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER1_CTL1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER1_CTL2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER1_CTL3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER1_CTL4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER1_CTL5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER1_CTL6)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER1_CTL7)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER2_CTL0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER2_CTL1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER2_CTL2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER2_CTL3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER2_CTL4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER2_CTL5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER2_CTL6)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER2_CTL7)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER3_CTL0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER3_CTL1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER3_CTL2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER3_CTL3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER3_CTL4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER3_CTL5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER3_CTL6)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER3_CTL7)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER4_CTL0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER4_CTL1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER4_CTL2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER4_CTL3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER4_CTL4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER4_CTL5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER4_CTL6)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER4_CTL7)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER7_CTL0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER7_CTL1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER7_CTL2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER7_CTL3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER7_CTL4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER7_CTL5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER7_CTL6)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER7_CTL7)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER8_CTL0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER8_CTL1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER8_CTL2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER8_CTL3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER8_CTL4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER8_CTL5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER8_CTL6)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_COMPANDER8_CTL7)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX0_RX_PATH_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX0_RX_PATH_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX0_RX_PATH_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX0_RX_PATH_CFG2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX0_RX_VOL_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX0_RX_PATH_MIX_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX0_RX_PATH_MIX_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX0_RX_VOL_MIX_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX0_RX_PATH_SEC0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX0_RX_PATH_SEC1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX0_RX_PATH_SEC2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX0_RX_PATH_SEC3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX0_RX_PATH_SEC5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX0_RX_PATH_SEC6)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX0_RX_PATH_SEC7)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX0_RX_PATH_MIX_SEC0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX0_RX_PATH_MIX_SEC1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX0_RX_PATH_DSMDEM_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX1_RX_PATH_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX1_RX_PATH_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX1_RX_PATH_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX1_RX_PATH_CFG2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX1_RX_VOL_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX1_RX_PATH_MIX_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX1_RX_PATH_MIX_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX1_RX_VOL_MIX_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX1_RX_PATH_SEC0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX1_RX_PATH_SEC1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX1_RX_PATH_SEC2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX1_RX_PATH_SEC3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX1_RX_PATH_SEC4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX1_RX_PATH_SEC5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX1_RX_PATH_SEC6)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX1_RX_PATH_SEC7)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX1_RX_PATH_MIX_SEC0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX1_RX_PATH_MIX_SEC1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX1_RX_PATH_DSMDEM_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX2_RX_PATH_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX2_RX_PATH_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX2_RX_PATH_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX2_RX_PATH_CFG2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX2_RX_VOL_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX2_RX_PATH_MIX_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX2_RX_PATH_MIX_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX2_RX_VOL_MIX_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX2_RX_PATH_SEC0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX2_RX_PATH_SEC1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX2_RX_PATH_SEC2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX2_RX_PATH_SEC3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX2_RX_PATH_SEC4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX2_RX_PATH_SEC5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX2_RX_PATH_SEC6)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX2_RX_PATH_SEC7)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX2_RX_PATH_MIX_SEC0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX2_RX_PATH_MIX_SEC1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX2_RX_PATH_DSMDEM_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX3_RX_PATH_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX3_RX_PATH_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX3_RX_PATH_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX3_RX_PATH_CFG2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX3_RX_VOL_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX3_RX_PATH_MIX_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX3_RX_PATH_MIX_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX3_RX_VOL_MIX_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX3_RX_PATH_SEC0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX3_RX_PATH_SEC1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX3_RX_PATH_SEC2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX3_RX_PATH_SEC3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX3_RX_PATH_SEC5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX3_RX_PATH_SEC6)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX3_RX_PATH_SEC7)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX3_RX_PATH_MIX_SEC0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX3_RX_PATH_MIX_SEC1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX3_RX_PATH_DSMDEM_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX4_RX_PATH_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX4_RX_PATH_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX4_RX_PATH_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX4_RX_PATH_CFG2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX4_RX_VOL_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX4_RX_PATH_MIX_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX4_RX_PATH_MIX_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX4_RX_VOL_MIX_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX4_RX_PATH_SEC0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX4_RX_PATH_SEC1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX4_RX_PATH_SEC2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX4_RX_PATH_SEC3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX4_RX_PATH_SEC5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX4_RX_PATH_SEC6)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX4_RX_PATH_SEC7)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX4_RX_PATH_MIX_SEC0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX4_RX_PATH_MIX_SEC1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX4_RX_PATH_DSMDEM_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX7_RX_PATH_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX7_RX_PATH_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX7_RX_PATH_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX7_RX_PATH_CFG2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX7_RX_VOL_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX7_RX_PATH_MIX_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX7_RX_PATH_MIX_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX7_RX_VOL_MIX_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX7_RX_PATH_SEC0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX7_RX_PATH_SEC1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX7_RX_PATH_SEC2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX7_RX_PATH_SEC3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX7_RX_PATH_SEC5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX7_RX_PATH_SEC6)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX7_RX_PATH_SEC7)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX7_RX_PATH_MIX_SEC0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX7_RX_PATH_MIX_SEC1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX7_RX_PATH_DSMDEM_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX8_RX_PATH_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX8_RX_PATH_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX8_RX_PATH_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX8_RX_PATH_CFG2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX8_RX_VOL_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX8_RX_PATH_MIX_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX8_RX_PATH_MIX_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX8_RX_VOL_MIX_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX8_RX_PATH_SEC0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX8_RX_PATH_SEC1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX8_RX_PATH_SEC2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX8_RX_PATH_SEC3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX8_RX_PATH_SEC5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX8_RX_PATH_SEC6)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX8_RX_PATH_SEC7)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX8_RX_PATH_MIX_SEC0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX8_RX_PATH_MIX_SEC1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX8_RX_PATH_DSMDEM_CTL)] = WCD934X_READ_WRITE,
+};
+
+const u8 wcd934x_page12_reg_access[WCD934X_PAGE_SIZE] = {
+	[WCD934X_REG(WCD934X_PAGE12_PAGE_REGISTER)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_CLSH_CRC)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_CLSH_DLY_CTRL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_CLSH_DECAY_CTRL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_CLSH_HPH_V_PA)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_CLSH_EAR_V_PA)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_CLSH_HPH_V_HD)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_CLSH_EAR_V_HD)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_CLSH_K1_MSB)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_CLSH_K1_LSB)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_CLSH_K2_MSB)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_CLSH_K2_LSB)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_CLSH_IDLE_CTRL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_CLSH_IDLE_HPH)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_CLSH_IDLE_EAR)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_CLSH_TEST0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_CLSH_TEST1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_CLSH_OVR_VREF)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_BOOST0_BOOST_PATH_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_BOOST0_BOOST_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_BOOST0_BOOST_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_BOOST0_BOOST_CFG2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_BOOST1_BOOST_PATH_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_BOOST1_BOOST_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_BOOST1_BOOST_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_BOOST1_BOOST_CFG2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_VBAT_VBAT_PATH_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_VBAT_VBAT_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_VBAT_VBAT_ADC_CAL1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_VBAT_VBAT_ADC_CAL2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_VBAT_VBAT_ADC_CAL3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_VBAT_VBAT_PK_EST1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_VBAT_VBAT_PK_EST2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_VBAT_VBAT_PK_EST3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_VBAT_VBAT_RF_PROC1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_VBAT_VBAT_RF_PROC2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_VBAT_VBAT_TAC1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_VBAT_VBAT_TAC2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_VBAT_VBAT_TAC3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_VBAT_VBAT_TAC4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_VBAT_VBAT_GAIN_UPD1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_VBAT_VBAT_GAIN_UPD2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_VBAT_VBAT_GAIN_UPD3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_VBAT_VBAT_GAIN_UPD4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_VBAT_VBAT_DEBUG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_VBAT_VBAT_GAIN_UPD_MON)] = WCD934X_WRITE,
+	[WCD934X_REG(WCD934X_CDC_VBAT_VBAT_GAIN_MON_VAL)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_VBAT_VBAT_BAN)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MIXING_ASRC0_CLK_RST_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MIXING_ASRC0_CTL0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MIXING_ASRC0_CTL1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MIXING_ASRC0_FIFO_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MIXING_ASRC0_STATUS_FMIN_CNTR_LSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_MIXING_ASRC0_STATUS_FMIN_CNTR_MSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_MIXING_ASRC0_STATUS_FMAX_CNTR_LSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_MIXING_ASRC0_STATUS_FMAX_CNTR_MSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_MIXING_ASRC0_STATUS_FIFO)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_MIXING_ASRC1_CLK_RST_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MIXING_ASRC1_CTL0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MIXING_ASRC1_CTL1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MIXING_ASRC1_FIFO_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MIXING_ASRC1_STATUS_FMIN_CNTR_LSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_MIXING_ASRC1_STATUS_FMIN_CNTR_MSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_MIXING_ASRC1_STATUS_FMAX_CNTR_LSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_MIXING_ASRC1_STATUS_FMAX_CNTR_MSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_MIXING_ASRC1_STATUS_FIFO)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_MIXING_ASRC2_CLK_RST_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MIXING_ASRC2_CTL0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MIXING_ASRC2_CTL1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MIXING_ASRC2_FIFO_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MIXING_ASRC2_STATUS_FMIN_CNTR_LSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_MIXING_ASRC2_STATUS_FMIN_CNTR_MSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_MIXING_ASRC2_STATUS_FMAX_CNTR_LSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_MIXING_ASRC2_STATUS_FMAX_CNTR_MSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_MIXING_ASRC2_STATUS_FIFO)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_MIXING_ASRC3_CLK_RST_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MIXING_ASRC3_CTL0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MIXING_ASRC3_CTL1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MIXING_ASRC3_FIFO_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_MIXING_ASRC3_STATUS_FMIN_CNTR_LSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_MIXING_ASRC3_STATUS_FMIN_CNTR_MSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_MIXING_ASRC3_STATUS_FMAX_CNTR_LSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_MIXING_ASRC3_STATUS_FMAX_CNTR_MSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_MIXING_ASRC3_STATUS_FIFO)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SWR_AHB_BRIDGE_WR_DATA_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SWR_AHB_BRIDGE_WR_DATA_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SWR_AHB_BRIDGE_WR_DATA_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SWR_AHB_BRIDGE_WR_DATA_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SWR_AHB_BRIDGE_WR_ADDR_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SWR_AHB_BRIDGE_WR_ADDR_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SWR_AHB_BRIDGE_WR_ADDR_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SWR_AHB_BRIDGE_WR_ADDR_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SWR_AHB_BRIDGE_RD_ADDR_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SWR_AHB_BRIDGE_RD_ADDR_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SWR_AHB_BRIDGE_RD_ADDR_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SWR_AHB_BRIDGE_RD_ADDR_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SWR_AHB_BRIDGE_RD_DATA_0)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SWR_AHB_BRIDGE_RD_DATA_1)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SWR_AHB_BRIDGE_RD_DATA_2)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SWR_AHB_BRIDGE_RD_DATA_3)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SWR_AHB_BRIDGE_ACCESS_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SWR_AHB_BRIDGE_ACCESS_STATUS)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_SRC0_ST_SRC_PATH_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_SRC0_ST_SRC_PATH_CFG1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_SRC1_ST_SRC_PATH_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_SRC1_ST_SRC_PATH_CFG1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDETONE_ASRC0_CLK_RST_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDETONE_ASRC0_CTL0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDETONE_ASRC0_CTL1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDETONE_ASRC0_FIFO_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDETONE_ASRC0_STATUS_FMIN_CNTR_LSB)] =
+							WCD934X_READ,
+	[WCD934X_REG(WCD934X_SIDETONE_ASRC0_STATUS_FMIN_CNTR_MSB)] =
+							WCD934X_READ,
+	[WCD934X_REG(WCD934X_SIDETONE_ASRC0_STATUS_FMAX_CNTR_LSB)] =
+							WCD934X_READ,
+	[WCD934X_REG(WCD934X_SIDETONE_ASRC0_STATUS_FMAX_CNTR_MSB)] =
+							WCD934X_READ,
+	[WCD934X_REG(WCD934X_SIDETONE_ASRC0_STATUS_FIFO)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SIDETONE_ASRC1_CLK_RST_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDETONE_ASRC1_CTL0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDETONE_ASRC1_CTL1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDETONE_ASRC1_FIFO_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SIDETONE_ASRC1_STATUS_FMIN_CNTR_LSB)] =
+							WCD934X_READ,
+	[WCD934X_REG(WCD934X_SIDETONE_ASRC1_STATUS_FMIN_CNTR_MSB)] =
+							WCD934X_READ,
+	[WCD934X_REG(WCD934X_SIDETONE_ASRC1_STATUS_FMAX_CNTR_LSB)] =
+							WCD934X_READ,
+	[WCD934X_REG(WCD934X_SIDETONE_ASRC1_STATUS_FMAX_CNTR_MSB)] =
+							WCD934X_READ,
+	[WCD934X_REG(WCD934X_SIDETONE_ASRC1_STATUS_FIFO)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_EC_REF_HQ0_EC_REF_HQ_PATH_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_EC_REF_HQ0_EC_REF_HQ_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_EC_REF_HQ1_EC_REF_HQ_PATH_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_EC_REF_HQ1_EC_REF_HQ_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_EC_ASRC0_CLK_RST_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_EC_ASRC0_CTL0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_EC_ASRC0_CTL1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_EC_ASRC0_FIFO_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_EC_ASRC0_STATUS_FMIN_CNTR_LSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_EC_ASRC0_STATUS_FMIN_CNTR_MSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_EC_ASRC0_STATUS_FMAX_CNTR_LSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_EC_ASRC0_STATUS_FMAX_CNTR_MSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_EC_ASRC0_STATUS_FIFO)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_EC_ASRC1_CLK_RST_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_EC_ASRC1_CTL0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_EC_ASRC1_CTL1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_EC_ASRC1_FIFO_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_EC_ASRC1_STATUS_FMIN_CNTR_LSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_EC_ASRC1_STATUS_FMIN_CNTR_MSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_EC_ASRC1_STATUS_FMAX_CNTR_LSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_EC_ASRC1_STATUS_FMAX_CNTR_MSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_EC_ASRC1_STATUS_FIFO)] = WCD934X_READ,
+};
+
+const u8 wcd934x_page13_reg_access[WCD934X_PAGE_SIZE] = {
+	[WCD934X_REG(WCD934X_PAGE13_PAGE_REGISTER)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX_INP_MUX_RX_INT0_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX_INP_MUX_RX_INT0_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX_INP_MUX_RX_INT1_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX_INP_MUX_RX_INT1_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX_INP_MUX_RX_INT2_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX_INP_MUX_RX_INT2_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX_INP_MUX_RX_INT3_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX_INP_MUX_RX_INT3_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX_INP_MUX_RX_INT4_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX_INP_MUX_RX_INT4_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX_INP_MUX_RX_INT7_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX_INP_MUX_RX_INT7_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX_INP_MUX_RX_INT8_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX_INP_MUX_RX_INT8_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX_INP_MUX_SIDETONE_SRC_CFG0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX_INP_MUX_SIDETONE_SRC_CFG1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX_INP_MUX_ANC_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX_INP_MUX_SPLINE_ASRC_CFG0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX_INP_MUX_EC_REF_HQ_CFG0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX_INP_MUX_ADC_MUX1_CFG0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX_INP_MUX_ADC_MUX1_CFG1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX_INP_MUX_ADC_MUX2_CFG0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX_INP_MUX_ADC_MUX2_CFG1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX_INP_MUX_ADC_MUX3_CFG0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX_INP_MUX_ADC_MUX3_CFG1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX_INP_MUX_ADC_MUX4_CFG0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX_INP_MUX_ADC_MUX5_CFG0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX_INP_MUX_ADC_MUX6_CFG0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX_INP_MUX_ADC_MUX7_CFG0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX_INP_MUX_ADC_MUX8_CFG0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX_INP_MUX_ADC_MUX10_CFG0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX_INP_MUX_ADC_MUX11_CFG0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX_INP_MUX_ADC_MUX12_CFG0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TX_INP_MUX_ADC_MUX13_CFG0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG2)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG3)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG2)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG3)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_IF_ROUTER_TX_MUX_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_IF_ROUTER_TX_MUX_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_IF_ROUTER_TX_MUX_CFG2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_IF_ROUTER_TX_MUX_CFG3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_CLK_RST_CTRL_MCLK_CONTROL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_CLK_RST_CTRL_FS_CNT_CONTROL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_CLK_RST_CTRL_SWR_CONTROL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_CLK_RST_CTRL_DSD_CONTROL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_CLK_RST_CTRL_ASRC_SHARE_CONTROL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_CLK_RST_CTRL_GFM_CONTROL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_PROX_DETECT_PROX_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_PROX_DETECT_PROX_POLL_PERIOD0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_PROX_DETECT_PROX_POLL_PERIOD1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_PROX_DETECT_PROX_SIG_PATTERN_LSB)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_PROX_DETECT_PROX_SIG_PATTERN_MSB)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_PROX_DETECT_PROX_STATUS)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_PROX_DETECT_PROX_TEST_CTRL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_PROX_DETECT_PROX_TEST_BUFF_LSB)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_PROX_DETECT_PROX_TEST_BUFF_MSB)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_PROX_DETECT_PROX_TEST_BUFF_LSB_RD)] =
+							WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_PROX_DETECT_PROX_TEST_BUFF_MSB_RD)] =
+							WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_PROX_DETECT_PROX_CTL_REPEAT_PAT)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR0_IIR_PATH_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B1_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B2_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B3_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B4_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B5_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B6_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B7_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B8_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR0_IIR_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_TIMER_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B1_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B2_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR1_IIR_PATH_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B1_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B2_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B3_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B4_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B5_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B6_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B7_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B8_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR1_IIR_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_TIMER_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR1_IIR_COEF_B1_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_SIDETONE_IIR1_IIR_COEF_B2_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TOP_TOP_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TOP_TOP_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TOP_TOP_CFG7)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TOP_HPHL_COMP_WR_LSB)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TOP_HPHL_COMP_WR_MSB)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TOP_HPHL_COMP_LUT)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TOP_HPHL_COMP_RD_LSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_TOP_HPHL_COMP_RD_MSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_TOP_HPHR_COMP_WR_LSB)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TOP_HPHR_COMP_WR_MSB)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TOP_HPHR_COMP_LUT)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TOP_HPHR_COMP_RD_LSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_TOP_HPHR_COMP_RD_MSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_TOP_DIFFL_COMP_WR_LSB)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TOP_DIFFL_COMP_WR_MSB)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TOP_DIFFL_COMP_LUT)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TOP_DIFFL_COMP_RD_LSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_TOP_DIFFL_COMP_RD_MSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_TOP_DIFFR_COMP_WR_LSB)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TOP_DIFFR_COMP_WR_MSB)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TOP_DIFFR_COMP_LUT)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_TOP_DIFFR_COMP_RD_LSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_TOP_DIFFR_COMP_RD_MSB)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_DSD0_PATH_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_DSD0_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_DSD0_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_DSD0_CFG2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_DSD0_CFG3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_DSD0_CFG4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_DSD0_CFG5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_DSD1_PATH_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_DSD1_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_DSD1_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_DSD1_CFG2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_DSD1_CFG3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_DSD1_CFG4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_DSD1_CFG5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX_IDLE_DET_PATH_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX_IDLE_DET_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX_IDLE_DET_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX_IDLE_DET_CFG2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RX_IDLE_DET_CFG3)] = WCD934X_READ_WRITE,
+};
+
+const u8 wcd934x_page14_reg_access[WCD934X_PAGE_SIZE] = {
+	[WCD934X_REG(WCD934X_PAGE14_PAGE_REGISTER)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_CLK_RST_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_PULSE_SUPR_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_TIMER)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_BW_SW)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_THRESH)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_STATUS)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_DIAG_CTRL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_DIAG_TIMER2)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_DIAG_OFFSET_BW1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_DIAG_OFFSET_BW2)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_DIAG_OFFSET_BW3)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_DIAG_OFFSET_BW4)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_DIAG_OFFSET_BW5)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_DIAG_LIMIT_BW1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_DIAG_LIMIT_BW2)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_DIAG_LIMIT_BW3)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_DIAG_LIMIT_BW4)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_DIAG_LIMIT_BW5)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_DIAG_LIMITD1_BW1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_DIAG_LIMITD1_BW2)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_DIAG_LIMITD1_BW3)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_DIAG_LIMITD1_BW4)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_DIAG_LIMITD1_BW5)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_DIAG_HYST_BW1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_DIAG_HYST_BW2)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_DIAG_HYST_BW3)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_DIAG_HYST_BW4)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_DIAG_HYST_BW5)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_RMAX_DIAG)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_RMIN_DIAG)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_PH_DET)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_DIAG_CLR)] = WCD934X_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_MB_SW_STATE)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_MAST_DIAG_STATE)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_RATE_OUT_7_0)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_RATE_OUT_15_8)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_RATE_OUT_23_16)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_RATE_OUT_31_24)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_RATE_OUT_39_32)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST0_RE_RATE_OUT_40_43)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_CLK_RST_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_PULSE_SUPR_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_TIMER)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_BW_SW)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_THRESH)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_STATUS)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_DIAG_CTRL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_DIAG_TIMER2)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_DIAG_OFFSET_BW1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_DIAG_OFFSET_BW2)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_DIAG_OFFSET_BW3)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_DIAG_OFFSET_BW4)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_DIAG_OFFSET_BW5)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_DIAG_LIMIT_BW1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_DIAG_LIMIT_BW2)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_DIAG_LIMIT_BW3)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_DIAG_LIMIT_BW4)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_DIAG_LIMIT_BW5)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_DIAG_LIMITD1_BW1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_DIAG_LIMITD1_BW2)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_DIAG_LIMITD1_BW3)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_DIAG_LIMITD1_BW4)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_DIAG_LIMITD1_BW5)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_DIAG_HYST_BW1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_DIAG_HYST_BW2)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_DIAG_HYST_BW3)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_DIAG_HYST_BW4)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_DIAG_HYST_BW5)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_RMAX_DIAG)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_RMIN_DIAG)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_PH_DET)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_DIAG_CLR)] = WCD934X_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_MB_SW_STATE)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_MAST_DIAG_STATE)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_RATE_OUT_7_0)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_RATE_OUT_15_8)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_RATE_OUT_23_16)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_RATE_OUT_31_24)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_RATE_OUT_39_32)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST1_RE_RATE_OUT_40_43)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_CLK_RST_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_PULSE_SUPR_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_TIMER)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_BW_SW)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_THRESH)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_STATUS)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_DIAG_CTRL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_DIAG_TIMER2)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_DIAG_OFFSET_BW1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_DIAG_OFFSET_BW2)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_DIAG_OFFSET_BW3)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_DIAG_OFFSET_BW4)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_DIAG_OFFSET_BW5)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_DIAG_LIMIT_BW1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_DIAG_LIMIT_BW2)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_DIAG_LIMIT_BW3)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_DIAG_LIMIT_BW4)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_DIAG_LIMIT_BW5)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_DIAG_LIMITD1_BW1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_DIAG_LIMITD1_BW2)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_DIAG_LIMITD1_BW3)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_DIAG_LIMITD1_BW4)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_DIAG_LIMITD1_BW5)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_DIAG_HYST_BW1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_DIAG_HYST_BW2)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_DIAG_HYST_BW3)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_DIAG_HYST_BW4)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_DIAG_HYST_BW5)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_RMAX_DIAG)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_RMIN_DIAG)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_PH_DET)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_DIAG_CLR)] = WCD934X_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_MB_SW_STATE)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_MAST_DIAG_STATE)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_RATE_OUT_7_0)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_RATE_OUT_15_8)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_RATE_OUT_23_16)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_RATE_OUT_31_24)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_RATE_OUT_39_32)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST2_RE_RATE_OUT_40_43)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_CLK_RST_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_PULSE_SUPR_CTL)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_TIMER)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_BW_SW)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_THRESH)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_STATUS)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_DIAG_CTRL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_DIAG_TIMER2)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_DIAG_OFFSET_BW1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_DIAG_OFFSET_BW2)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_DIAG_OFFSET_BW3)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_DIAG_OFFSET_BW4)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_DIAG_OFFSET_BW5)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_DIAG_LIMIT_BW1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_DIAG_LIMIT_BW2)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_DIAG_LIMIT_BW3)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_DIAG_LIMIT_BW4)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_DIAG_LIMIT_BW5)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_DIAG_LIMITD1_BW1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_DIAG_LIMITD1_BW2)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_DIAG_LIMITD1_BW3)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_DIAG_LIMITD1_BW4)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_DIAG_LIMITD1_BW5)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_DIAG_HYST_BW1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_DIAG_HYST_BW2)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_DIAG_HYST_BW3)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_DIAG_HYST_BW4)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_DIAG_HYST_BW5)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_RMAX_DIAG)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_RMIN_DIAG)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_PH_DET)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_DIAG_CLR)] = WCD934X_WRITE,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_MB_SW_STATE)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_MAST_DIAG_STATE)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_RATE_OUT_7_0)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_RATE_OUT_15_8)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_RATE_OUT_23_16)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_RATE_OUT_31_24)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_RATE_OUT_39_32)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_RATE_EST3_RE_RATE_OUT_40_43)] = WCD934X_READ,
+};
+
+const u8 wcd934x_page15_reg_access[WCD934X_PAGE_SIZE] = {
+	[WCD934X_REG(WCD934X_PAGE15_PAGE_REGISTER)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SPLINE_SRC0_CLK_RST_CTL_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SPLINE_SRC0_STATUS)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SPLINE_SRC1_CLK_RST_CTL_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SPLINE_SRC1_STATUS)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SPLINE_SRC2_CLK_RST_CTL_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SPLINE_SRC2_STATUS)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_SPLINE_SRC3_CLK_RST_CTL_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_SPLINE_SRC3_STATUS)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_DEBUG_DSD0_DEBUG_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_DEBUG_DSD0_DEBUG_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_DEBUG_DSD0_DEBUG_CFG2)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_DEBUG_DSD0_DEBUG_CFG3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_DEBUG_DSD1_DEBUG_CFG0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_DEBUG_DSD1_DEBUG_CFG1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_DEBUG_DSD1_DEBUG_CFG2)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CDC_DEBUG_DSD1_DEBUG_CFG3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_DEBUG_SPLINE_SRC_DEBUG_CFG0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_DEBUG_SPLINE_SRC_DEBUG_CFG1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_DEBUG_RC_RE_ASRC_DEBUG_CFG0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_DEBUG_ANC0_RC0_FIFO_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_DEBUG_ANC0_RC1_FIFO_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_DEBUG_ANC1_RC0_FIFO_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_DEBUG_ANC1_RC1_FIFO_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CDC_DEBUG_ANC_RC_RST_DBG_CNTR)] =
+							WCD934X_READ_WRITE,
+};
+
+const u8 wcd934x_page_0x50_reg_access[WCD934X_PAGE_SIZE] = {
+	[WCD934X_REG(WCD934X_PAGE80_PAGE_REGISTER)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_WR_DATA_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_WR_DATA_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_WR_DATA_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_WR_DATA_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_WR_ADDR_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_WR_ADDR_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_WR_ADDR_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_WR_ADDR_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_RD_ADDR_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_RD_ADDR_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_RD_ADDR_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_RD_ADDR_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_RD_DATA_0)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CODEC_CPR_RD_DATA_1)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CODEC_CPR_RD_DATA_2)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CODEC_CPR_RD_DATA_3)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CODEC_CPR_ACCESS_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_ACCESS_STATUS)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CODEC_CPR_NOM_CX_VDD)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_SVS_CX_VDD)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_SVS2_CX_VDD)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_NOM_MX_VDD)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_SVS_MX_VDD)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_SVS2_MX_VDD)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_SVS2_MIN_CX_VDD)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_MAX_SVS2_STEP)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_SW_MODECHNG_STATUS)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CODEC_CPR_SW_MODECHNG_START)] = WCD934X_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_CPR_STATUS)] = WCD934X_READ_WRITE,
+};
+
+const u8 wcd934x_page_0x80_reg_access[WCD934X_PAGE_SIZE] = {
+	[WCD934X_REG(WCD934X_PAGE80_PAGE_REGISTER)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_WR_DATA_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_WR_DATA_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_WR_DATA_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_WR_DATA_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_WR_ADDR_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_WR_ADDR_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_WR_ADDR_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_WR_ADDR_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_RD_ADDR_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_RD_ADDR_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_RD_ADDR_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_RD_ADDR_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_RD_DATA_0)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CODEC_CPR_RD_DATA_1)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CODEC_CPR_RD_DATA_2)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CODEC_CPR_RD_DATA_3)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CODEC_CPR_ACCESS_CFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_ACCESS_STATUS)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CODEC_CPR_NOM_CX_VDD)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_SVS_CX_VDD)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_SVS2_CX_VDD)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_NOM_MX_VDD)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_SVS_MX_VDD)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_SVS2_MX_VDD)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_SVS2_MIN_CX_VDD)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_MAX_SVS2_STEP)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_CTL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_SW_MODECHNG_STATUS)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_CODEC_CPR_SW_MODECHNG_START)] = WCD934X_WRITE,
+	[WCD934X_REG(WCD934X_CODEC_CPR_CPR_STATUS)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_PAGE128_PAGE_REGISTER)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_BIST_MODE_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_RF_PA_ON_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_INTR1_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_INTR2_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_SWR_DATA_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_SWR_CLK_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_I2S_2_SCK_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_SLIMBUS_DATA1_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_SLIMBUS_DATA2_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_SLIMBUS_CLK_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_I2C_CLK_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_I2C_DATA_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_I2S_0_RX_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_I2S_0_TX_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_I2S_0_SCK_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_I2S_0_WS_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_I2S_1_RX_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_I2S_1_TX_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_I2S_1_SCK_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_I2S_1_WS_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_DMIC1_CLK_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_DMIC1_DATA_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_DMIC2_CLK_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_DMIC2_DATA_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_DMIC3_CLK_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_DMIC3_DATA_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_JTCK_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_GPIO1_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_GPIO2_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_GPIO3_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_GPIO4_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_SPI_S_CSN_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_SPI_S_CLK_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_SPI_S_DOUT_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_SPI_S_DIN_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_BA_N_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_GPIO0_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_I2S_2_RX_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TLMM_I2S_2_WS_PINCFG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_PIN_CTL_OE_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_PIN_CTL_OE_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_PIN_CTL_OE_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_PIN_CTL_OE_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_PIN_CTL_OE_4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_PIN_CTL_DATA_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_PIN_CTL_DATA_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_PIN_CTL_DATA_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_PIN_CTL_DATA_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_PIN_CTL_DATA_4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_PAD_DRVCTL_0)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_PAD_DRVCTL_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_PIN_STATUS)] = WCD934X_READ,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_NPL_DLY_TEST_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_NPL_DLY_TEST_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_MEM_CTRL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_DEBUG_BUS_SEL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_DEBUG_JTAG)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_DEBUG_EN_1)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_DEBUG_EN_2)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_DEBUG_EN_3)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_DEBUG_EN_4)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_DEBUG_EN_5)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_ANA_DTEST_DIR)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_PAD_INP_DISABLE_0)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_PAD_INP_DISABLE_1)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_PAD_INP_DISABLE_2)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_PAD_INP_DISABLE_3)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_PAD_INP_DISABLE_4)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_SYSMEM_CTRL)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_SOC_SW_PWR_SEQ_DELAY)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_LVAL_NOM_LOW)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_LVAL_NOM_HIGH)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_LVAL_SVS_SVS2_LOW)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_LVAL_SVS_SVS2_HIGH)] =
+							WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_SPI_SLAVE_CHAR)] = WCD934X_READ_WRITE,
+	[WCD934X_REG(WCD934X_TEST_DEBUG_CODEC_DIAGS)] = WCD934X_READ,
+};
+
+const u8 * const wcd934x_reg[WCD934X_NUM_PAGES] = {
+	[WCD934X_PAGE_0] = wcd934x_page0_reg_access,
+	[WCD934X_PAGE_1] = wcd934x_page1_reg_access,
+	[WCD934X_PAGE_2] = wcd934x_page2_reg_access,
+	[WCD934X_PAGE_4] = wcd934x_page4_reg_access,
+	[WCD934X_PAGE_5] = wcd934x_page5_reg_access,
+	[WCD934X_PAGE_6] = wcd934x_page6_reg_access,
+	[WCD934X_PAGE_7] = wcd934x_page7_reg_access,
+	[WCD934X_PAGE_10] = wcd934x_page10_reg_access,
+	[WCD934X_PAGE_11] = wcd934x_page11_reg_access,
+	[WCD934X_PAGE_12] = wcd934x_page12_reg_access,
+	[WCD934X_PAGE_13] = wcd934x_page13_reg_access,
+	[WCD934X_PAGE_14] = wcd934x_page14_reg_access,
+	[WCD934X_PAGE_15] = wcd934x_page15_reg_access,
+	[WCD934X_PAGE_0x50] = wcd934x_page_0x50_reg_access,
+	[WCD934X_PAGE_0X80] = wcd934x_page_0x80_reg_access,
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/mfd/wcd9xxx-core.c	2019-01-22 16:16:24.691257165 +0100
@@ -0,0 +1,1716 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/ratelimit.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx-slimslave.h>
+#include <linux/mfd/wcd9xxx/core.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx-irq.h>
+#include <linux/mfd/wcd9xxx/pdata.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
+#include <linux/mfd/msm-cdc-pinctrl.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx-utils.h>
+#include <linux/mfd/msm-cdc-supply.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/debugfs.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <sound/soc.h>
+#include "wcd9xxx-regmap.h"
+
+#define WCD9XXX_REGISTER_START_OFFSET 0x800
+#define WCD9XXX_SLIM_RW_MAX_TRIES 3
+#define SLIMBUS_PRESENT_TIMEOUT 100
+
+#define MAX_WCD9XXX_DEVICE	4
+#define WCD9XXX_I2C_GSBI_SLAVE_ID "3-000d"
+#define WCD9XXX_I2C_TOP_SLAVE_ADDR	0x0d
+#define WCD9XXX_ANALOG_I2C_SLAVE_ADDR	0x77
+#define WCD9XXX_DIGITAL1_I2C_SLAVE_ADDR	0x66
+#define WCD9XXX_DIGITAL2_I2C_SLAVE_ADDR	0x55
+#define WCD9XXX_I2C_TOP_LEVEL	0
+#define WCD9XXX_I2C_ANALOG	1
+#define WCD9XXX_I2C_DIGITAL_1	2
+#define WCD9XXX_I2C_DIGITAL_2	3
+
+/*
+ * Number of return values needs to be checked for each
+ * registration of Slimbus of I2C bus for each codec
+ */
+#define NUM_WCD9XXX_REG_RET	4
+
+#define SLIM_USR_MC_REPEAT_CHANGE_VALUE 0x0
+#define SLIM_REPEAT_WRITE_MAX_SLICE 16
+#define REG_BYTES 2
+#define VAL_BYTES 1
+#define WCD9XXX_PAGE_NUM(reg)    (((reg) >> 8) & 0xff)
+#define WCD9XXX_PAGE_SIZE 256
+
+struct wcd9xxx_i2c {
+	struct i2c_client *client;
+	struct i2c_msg xfer_msg[2];
+	struct mutex xfer_lock;
+	int mod_id;
+};
+
+static struct regmap_config wcd9xxx_base_regmap_config = {
+	.reg_bits = 16,
+	.val_bits = 8,
+	.can_multi_write = true,
+};
+
+static struct regmap_config wcd9xxx_i2c_base_regmap_config = {
+	.reg_bits = 16,
+	.val_bits = 8,
+	.can_multi_write = false,
+	.use_single_rw = true,
+};
+
+static u8 wcd9xxx_pgd_la;
+static u8 wcd9xxx_inf_la;
+
+static const int wcd9xxx_cdc_types[] = {
+	[WCD9XXX] = WCD9XXX,
+	[WCD9330] = WCD9330,
+	[WCD9335] = WCD9335,
+	[WCD934X] = WCD934X,
+};
+
+static const struct of_device_id wcd9xxx_of_match[] = {
+	{ .compatible = "qcom,tasha-i2c-pgd",
+	  .data = (void *)&wcd9xxx_cdc_types[WCD9335]},
+	{ .compatible = "qcom,wcd9xxx-i2c",
+	  .data = (void *)&wcd9xxx_cdc_types[WCD9330]},
+	{ }
+};
+MODULE_DEVICE_TABLE(of, wcd9xxx_of_match);
+
+static int wcd9xxx_slim_device_up(struct slim_device *sldev);
+static int wcd9xxx_slim_device_down(struct slim_device *sldev);
+
+struct wcd9xxx_i2c wcd9xxx_modules[MAX_WCD9XXX_DEVICE];
+
+static int wcd9xxx_slim_multi_reg_write(struct wcd9xxx *wcd9xxx,
+					const void *data, size_t count)
+{
+	unsigned int reg;
+	struct device *dev;
+	u8 val[WCD9XXX_PAGE_SIZE];
+	int ret = 0;
+	int i = 0;
+	int n = 0;
+	unsigned int page_num;
+	size_t num_regs = (count / (REG_BYTES + VAL_BYTES));
+	struct wcd9xxx_reg_val *bulk_reg;
+	u8 *buf;
+
+	dev = wcd9xxx->dev;
+	if (!data) {
+		dev_err(dev, "%s: data is NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (num_regs == 0)
+		return -EINVAL;
+
+	bulk_reg = kzalloc(num_regs * (sizeof(struct wcd9xxx_reg_val)),
+			   GFP_KERNEL);
+	if (!bulk_reg)
+		return -ENOMEM;
+
+	buf = (u8 *)data;
+	reg = *(u16 *)buf;
+	page_num = WCD9XXX_PAGE_NUM(reg);
+	for (i = 0, n = 0; n < num_regs; i++, n++) {
+		reg = *(u16 *)buf;
+		if (page_num != WCD9XXX_PAGE_NUM(reg)) {
+			ret = wcd9xxx_slim_bulk_write(wcd9xxx, bulk_reg,
+						      i, false);
+			page_num = WCD9XXX_PAGE_NUM(reg);
+			i = 0;
+		}
+		buf += REG_BYTES;
+		val[i] = *buf;
+		buf += VAL_BYTES;
+		bulk_reg[i].reg = reg;
+		bulk_reg[i].buf = &val[i];
+		bulk_reg[i].bytes = 1;
+	}
+	ret = wcd9xxx_slim_bulk_write(wcd9xxx, bulk_reg,
+				      i, false);
+	if (ret)
+		dev_err(dev, "%s: error writing bulk regs\n",
+			__func__);
+
+	kfree(bulk_reg);
+	return ret;
+}
+
+/*
+ * wcd9xxx_interface_reg_read: Read slim interface registers
+ *
+ * @wcd9xxx: Pointer to wcd9xxx structure
+ * @reg: register adderss
+ *
+ * Returns register value in success and negative error code in case of failure
+ */
+int wcd9xxx_interface_reg_read(struct wcd9xxx *wcd9xxx, unsigned short reg)
+{
+	u8 val;
+	int ret;
+
+	mutex_lock(&wcd9xxx->io_lock);
+	ret = wcd9xxx->read_dev(wcd9xxx, reg, 1, (void *)&val,
+				true);
+	if (ret < 0)
+		dev_err(wcd9xxx->dev, "%s: Codec read 0x%x failed\n",
+			__func__, reg);
+	else
+		dev_dbg(wcd9xxx->dev, "%s: Read 0x%02x from 0x%x\n",
+			__func__, val, reg);
+
+	mutex_unlock(&wcd9xxx->io_lock);
+
+	if (ret < 0)
+		return ret;
+	else
+		return val;
+}
+EXPORT_SYMBOL(wcd9xxx_interface_reg_read);
+
+/*
+ * wcd9xxx_interface_reg_write: Write slim interface registers
+ *
+ * @wcd9xxx: Pointer to wcd9xxx structure
+ * @reg: register adderss
+ * @val: value of the register to be written
+ *
+ * Returns 0 for success and negative error code in case of failure
+ */
+int wcd9xxx_interface_reg_write(struct wcd9xxx *wcd9xxx, unsigned short reg,
+		     u8 val)
+{
+	int ret;
+
+	mutex_lock(&wcd9xxx->io_lock);
+	ret = wcd9xxx->write_dev(wcd9xxx, reg, 1, (void *)&val, true);
+	dev_dbg(wcd9xxx->dev, "%s: Write %02x to 0x%x ret(%d)\n",
+		__func__, val, reg, ret);
+	mutex_unlock(&wcd9xxx->io_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(wcd9xxx_interface_reg_write);
+
+static int wcd9xxx_slim_read_device(struct wcd9xxx *wcd9xxx, unsigned short reg,
+				int bytes, void *dest, bool interface)
+{
+	int ret;
+	struct slim_ele_access msg;
+	int slim_read_tries = WCD9XXX_SLIM_RW_MAX_TRIES;
+
+	msg.start_offset = WCD9XXX_REGISTER_START_OFFSET + reg;
+	msg.num_bytes = bytes;
+	msg.comp = NULL;
+
+	if (!wcd9xxx->dev_up) {
+		dev_dbg_ratelimited(
+			wcd9xxx->dev, "%s: No read allowed. dev_up = %d\n",
+			__func__, wcd9xxx->dev_up);
+		return 0;
+	}
+
+	while (1) {
+		mutex_lock(&wcd9xxx->xfer_lock);
+		ret = slim_request_val_element(interface ?
+			       wcd9xxx->slim_slave : wcd9xxx->slim,
+			       &msg, dest, bytes);
+		mutex_unlock(&wcd9xxx->xfer_lock);
+		if (likely(ret == 0) || (--slim_read_tries == 0))
+			break;
+		usleep_range(5000, 5100);
+	}
+
+	if (ret)
+		dev_err(wcd9xxx->dev, "%s: Error, Codec read failed (%d)\n",
+			__func__, ret);
+
+	return ret;
+}
+
+/*
+ * Interface specifies whether the write is to the interface or general
+ * registers.
+ */
+static int wcd9xxx_slim_write_device(struct wcd9xxx *wcd9xxx,
+		unsigned short reg, int bytes, void *src, bool interface)
+{
+	int ret;
+	struct slim_ele_access msg;
+	int slim_write_tries = WCD9XXX_SLIM_RW_MAX_TRIES;
+
+	msg.start_offset = WCD9XXX_REGISTER_START_OFFSET + reg;
+	msg.num_bytes = bytes;
+	msg.comp = NULL;
+
+	if (!wcd9xxx->dev_up) {
+		dev_dbg_ratelimited(
+			wcd9xxx->dev, "%s: No write allowed. dev_up = %d\n",
+			__func__, wcd9xxx->dev_up);
+		return 0;
+	}
+
+	while (1) {
+		mutex_lock(&wcd9xxx->xfer_lock);
+		ret = slim_change_val_element(interface ?
+			      wcd9xxx->slim_slave : wcd9xxx->slim,
+			      &msg, src, bytes);
+		mutex_unlock(&wcd9xxx->xfer_lock);
+		if (likely(ret == 0) || (--slim_write_tries == 0))
+			break;
+		usleep_range(5000, 5100);
+	}
+
+	if (ret)
+		pr_err("%s: Error, Codec write failed (%d)\n", __func__, ret);
+
+	return ret;
+}
+
+static int wcd9xxx_slim_get_allowed_slice(struct wcd9xxx *wcd9xxx,
+					  int bytes)
+{
+	int allowed_sz = bytes;
+
+	if (likely(bytes == SLIM_REPEAT_WRITE_MAX_SLICE))
+		allowed_sz = 16;
+	else if (bytes >= 12)
+		allowed_sz = 12;
+	else if (bytes >= 8)
+		allowed_sz = 8;
+	else if (bytes >= 6)
+		allowed_sz = 6;
+	else if (bytes >= 4)
+		allowed_sz = 4;
+	else
+		allowed_sz = bytes;
+
+	return allowed_sz;
+}
+
+/*
+ * wcd9xxx_slim_write_repeat: Write the same register with multiple values
+ * @wcd9xxx: handle to wcd core
+ * @reg: register to be written
+ * @bytes: number of bytes to be written to reg
+ * @src: buffer with data content to be written to reg
+ * This API will write reg with bytes from src in a single slimbus
+ * transaction. All values from 1 to 16 are supported by this API.
+ */
+int wcd9xxx_slim_write_repeat(struct wcd9xxx *wcd9xxx, unsigned short reg,
+			      int bytes, void *src)
+{
+	int ret = 0, bytes_to_write = bytes, bytes_allowed;
+	struct slim_ele_access slim_msg;
+
+	mutex_lock(&wcd9xxx->io_lock);
+	if (wcd9xxx->type == WCD9335 || wcd9xxx->type == WCD934X) {
+		ret = wcd9xxx_page_write(wcd9xxx, &reg);
+		if (ret)
+			goto done;
+	}
+
+	slim_msg.start_offset = WCD9XXX_REGISTER_START_OFFSET + reg;
+	slim_msg.comp = NULL;
+
+	if (unlikely(bytes > SLIM_REPEAT_WRITE_MAX_SLICE)) {
+		dev_err(wcd9xxx->dev, "%s: size %d not supported\n",
+			__func__, bytes);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (!wcd9xxx->dev_up) {
+		dev_dbg_ratelimited(
+			wcd9xxx->dev, "%s: No write allowed. dev_up = %d\n",
+			__func__, wcd9xxx->dev_up);
+		ret = 0;
+		goto done;
+	}
+
+	while (bytes_to_write > 0) {
+		bytes_allowed = wcd9xxx_slim_get_allowed_slice(wcd9xxx,
+				       bytes_to_write);
+
+		slim_msg.num_bytes = bytes_allowed;
+		mutex_lock(&wcd9xxx->xfer_lock);
+		ret = slim_user_msg(wcd9xxx->slim, wcd9xxx->slim->laddr,
+				    SLIM_MSG_MT_DEST_REFERRED_USER,
+				    SLIM_USR_MC_REPEAT_CHANGE_VALUE,
+				    &slim_msg, src, bytes_allowed);
+		mutex_unlock(&wcd9xxx->xfer_lock);
+
+		if (ret) {
+			dev_err(wcd9xxx->dev, "%s: failed, ret = %d\n",
+				__func__, ret);
+			break;
+		}
+
+		bytes_to_write = bytes_to_write - bytes_allowed;
+		src = ((u8 *)src) + bytes_allowed;
+	}
+
+done:
+	mutex_unlock(&wcd9xxx->io_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(wcd9xxx_slim_write_repeat);
+
+/*
+ * wcd9xxx_slim_reserve_bw: API to reserve the slimbus bandwidth
+ * @wcd9xxx: Handle to the wcd9xxx core
+ * @bw_ops: value of the bandwidth that is requested
+ * @commit: Flag to indicate if bandwidth change is to be committed
+ *	    right away
+ */
+int wcd9xxx_slim_reserve_bw(struct wcd9xxx *wcd9xxx,
+		u32 bw_ops, bool commit)
+{
+	if (!wcd9xxx || !wcd9xxx->slim) {
+		pr_err("%s: Invalid handle to %s\n",
+			__func__,
+			(!wcd9xxx) ? "wcd9xxx" : "slim_device");
+		return -EINVAL;
+	}
+
+	return slim_reservemsg_bw(wcd9xxx->slim, bw_ops, commit);
+}
+EXPORT_SYMBOL(wcd9xxx_slim_reserve_bw);
+
+/*
+ * wcd9xxx_slim_bulk_write: API to write multiple registers with one descriptor
+ * @wcd9xxx: Handle to the wcd9xxx core
+ * @wcd9xxx_reg_val: structure holding register and values to be written
+ * @size: Indicates number of messages to be written with one descriptor
+ * @is_interface: Indicates whether the register is for slim interface or for
+ *	       general registers.
+ * @return: returns 0 if success or error information to the caller in case
+ *	    of failure.
+ */
+int wcd9xxx_slim_bulk_write(struct wcd9xxx *wcd9xxx,
+			    struct wcd9xxx_reg_val *bulk_reg,
+			    unsigned int size, bool is_interface)
+{
+	int ret, i;
+	struct slim_val_inf *msgs;
+	unsigned short reg;
+
+	if (!bulk_reg || !size || !wcd9xxx) {
+		pr_err("%s: Invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!wcd9xxx->dev_up) {
+		dev_dbg_ratelimited(
+			wcd9xxx->dev, "%s: No write allowed. dev_up = %d\n",
+			__func__, wcd9xxx->dev_up);
+		return 0;
+	}
+
+	msgs = kzalloc(size * (sizeof(struct slim_val_inf)), GFP_KERNEL);
+	if (!msgs) {
+		ret = -ENOMEM;
+		goto mem_fail;
+	}
+
+	mutex_lock(&wcd9xxx->io_lock);
+	reg = bulk_reg->reg;
+	for (i = 0; i < size; i++) {
+		msgs[i].start_offset = WCD9XXX_REGISTER_START_OFFSET +
+					(bulk_reg->reg & 0xFF);
+		msgs[i].num_bytes = bulk_reg->bytes;
+		msgs[i].wbuf = bulk_reg->buf;
+		bulk_reg++;
+	}
+	ret = wcd9xxx_page_write(wcd9xxx, &reg);
+	if (ret) {
+		pr_err("%s: Page write error for reg: 0x%x\n",
+			__func__, reg);
+		goto err;
+	}
+
+	ret = slim_bulk_msg_write(is_interface ?
+				  wcd9xxx->slim_slave : wcd9xxx->slim,
+				  SLIM_MSG_MT_CORE,
+				  SLIM_MSG_MC_CHANGE_VALUE, msgs, size,
+				  NULL, NULL);
+	if (ret)
+		pr_err("%s: Error, Codec bulk write failed (%d)\n",
+			__func__, ret);
+	/* 100 usec sleep is needed as per HW requirement */
+	usleep_range(100, 110);
+err:
+	mutex_unlock(&wcd9xxx->io_lock);
+	kfree(msgs);
+mem_fail:
+	return ret;
+}
+EXPORT_SYMBOL(wcd9xxx_slim_bulk_write);
+
+static int wcd9xxx_num_irq_regs(const struct wcd9xxx *wcd9xxx)
+{
+	return (wcd9xxx->codec_type->num_irqs / 8) +
+		((wcd9xxx->codec_type->num_irqs % 8) ? 1 : 0);
+}
+
+static int wcd9xxx_regmap_init_cache(struct wcd9xxx *wcd9xxx)
+{
+	struct regmap_config *regmap_config;
+	int rc;
+
+	regmap_config = wcd9xxx_get_regmap_config(wcd9xxx->type);
+	if (!regmap_config) {
+		dev_err(wcd9xxx->dev, "regmap config is not defined\n");
+		return -EINVAL;
+	}
+
+	rc = regmap_reinit_cache(wcd9xxx->regmap, regmap_config);
+	if (rc != 0) {
+		dev_err(wcd9xxx->dev, "%s:Failed to reinit register cache: %d\n",
+			__func__, rc);
+	}
+
+	return rc;
+}
+
+static int wcd9xxx_device_init(struct wcd9xxx *wcd9xxx)
+{
+	int ret = 0, i;
+	struct wcd9xxx_core_resource *core_res = &wcd9xxx->core_res;
+	regmap_patch_fptr regmap_apply_patch = NULL;
+
+	mutex_init(&wcd9xxx->io_lock);
+	mutex_init(&wcd9xxx->xfer_lock);
+	mutex_init(&wcd9xxx->reset_lock);
+
+	ret = wcd9xxx_bringup(wcd9xxx->dev);
+	if (ret) {
+		ret = -EPROBE_DEFER;
+		goto err_bring_up;
+	}
+
+	wcd9xxx->codec_type = devm_kzalloc(wcd9xxx->dev,
+			sizeof(struct wcd9xxx_codec_type), GFP_KERNEL);
+	if (!wcd9xxx->codec_type) {
+		ret = -ENOMEM;
+		goto err_bring_up;
+	}
+	ret = wcd9xxx_get_codec_info(wcd9xxx->dev);
+	if (ret) {
+		ret = -EPROBE_DEFER;
+		goto fail_cdc_fill;
+	}
+	wcd9xxx->version = wcd9xxx->codec_type->version;
+	if (!wcd9xxx->codec_type->dev || !wcd9xxx->codec_type->size)
+		goto fail_cdc_fill;
+
+	core_res->parent = wcd9xxx;
+	core_res->dev = wcd9xxx->dev;
+	core_res->intr_table = wcd9xxx->codec_type->intr_tbl;
+	core_res->intr_table_size = wcd9xxx->codec_type->intr_tbl_size;
+
+	for (i = 0; i < WCD9XXX_INTR_REG_MAX; i++)
+		wcd9xxx->core_res.intr_reg[i] =
+			wcd9xxx->codec_type->intr_reg[i];
+
+	wcd9xxx_core_res_init(&wcd9xxx->core_res,
+			      wcd9xxx->codec_type->num_irqs,
+			      wcd9xxx_num_irq_regs(wcd9xxx),
+			      wcd9xxx->regmap);
+
+	if (wcd9xxx_core_irq_init(&wcd9xxx->core_res))
+		goto err;
+
+	ret = wcd9xxx_regmap_init_cache(wcd9xxx);
+	if (ret)
+		goto err_irq;
+
+	regmap_apply_patch = wcd9xxx_get_regmap_reg_patch(
+			wcd9xxx->type);
+	if (regmap_apply_patch) {
+		ret = regmap_apply_patch(wcd9xxx->regmap,
+				wcd9xxx->version);
+		if (ret)
+			dev_err(wcd9xxx->dev,
+					"Failed to register patch: %d\n", ret);
+	}
+
+	ret = mfd_add_devices(wcd9xxx->dev, -1, wcd9xxx->codec_type->dev,
+			      wcd9xxx->codec_type->size, NULL, 0, NULL);
+	if (ret != 0) {
+		dev_err(wcd9xxx->dev, "Failed to add children: %d\n", ret);
+		goto err_irq;
+	}
+
+	ret = device_init_wakeup(wcd9xxx->dev, true);
+	if (ret) {
+		dev_err(wcd9xxx->dev, "Device wakeup init failed: %d\n", ret);
+		goto err_irq;
+	}
+
+	return ret;
+err_irq:
+	wcd9xxx_irq_exit(&wcd9xxx->core_res);
+fail_cdc_fill:
+	devm_kfree(wcd9xxx->dev, wcd9xxx->codec_type);
+	wcd9xxx->codec_type = NULL;
+err:
+	wcd9xxx_bringdown(wcd9xxx->dev);
+	wcd9xxx_core_res_deinit(&wcd9xxx->core_res);
+err_bring_up:
+	mutex_destroy(&wcd9xxx->io_lock);
+	mutex_destroy(&wcd9xxx->xfer_lock);
+	mutex_destroy(&wcd9xxx->reset_lock);
+	return ret;
+}
+
+static void wcd9xxx_device_exit(struct wcd9xxx *wcd9xxx)
+{
+	device_init_wakeup(wcd9xxx->dev, false);
+	wcd9xxx_irq_exit(&wcd9xxx->core_res);
+	wcd9xxx_bringdown(wcd9xxx->dev);
+	wcd9xxx_reset_low(wcd9xxx->dev);
+	wcd9xxx_core_res_deinit(&wcd9xxx->core_res);
+	mutex_destroy(&wcd9xxx->io_lock);
+	mutex_destroy(&wcd9xxx->xfer_lock);
+	mutex_destroy(&wcd9xxx->reset_lock);
+	if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_SLIMBUS)
+		slim_remove_device(wcd9xxx->slim_slave);
+}
+
+
+#ifdef CONFIG_DEBUG_FS
+struct wcd9xxx *debugCodec;
+
+static struct dentry *debugfs_wcd9xxx_dent;
+static struct dentry *debugfs_peek;
+static struct dentry *debugfs_poke;
+static struct dentry *debugfs_power_state;
+static struct dentry *debugfs_reg_dump;
+
+static unsigned char read_data;
+
+static int codec_debug_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static int get_parameters(char *buf, long int *param1, int num_of_par)
+{
+	char *token;
+	int base, cnt;
+
+	token = strsep(&buf, " ");
+
+	for (cnt = 0; cnt < num_of_par; cnt++) {
+		if (token != NULL) {
+			if ((token[1] == 'x') || (token[1] == 'X'))
+				base = 16;
+			else
+				base = 10;
+
+			if (kstrtoul(token, base, &param1[cnt]) != 0)
+				return -EINVAL;
+
+			token = strsep(&buf, " ");
+		} else
+			return -EINVAL;
+	}
+	return 0;
+}
+
+static ssize_t wcd9xxx_slimslave_reg_show(char __user *ubuf, size_t count,
+					  loff_t *ppos)
+{
+	int i, reg_val, len;
+	ssize_t total = 0;
+	char tmp_buf[25]; /* each line is 12 bytes but 25 for margin of error */
+
+	for (i = (int) *ppos / 12; i <= SLIM_MAX_REG_ADDR; i++) {
+		reg_val = wcd9xxx_interface_reg_read(debugCodec, i);
+		len = snprintf(tmp_buf, sizeof(tmp_buf),
+			"0x%.3x: 0x%.2x\n", i, reg_val);
+
+		if ((total + len) >= count - 1)
+			break;
+		if (copy_to_user((ubuf + total), tmp_buf, len)) {
+			pr_err("%s: fail to copy reg dump\n", __func__);
+			total = -EFAULT;
+			goto copy_err;
+		}
+		*ppos += len;
+		total += len;
+	}
+
+copy_err:
+	return total;
+}
+
+static ssize_t codec_debug_read(struct file *file, char __user *ubuf,
+				size_t count, loff_t *ppos)
+{
+	char lbuf[8];
+	char *access_str = file->private_data;
+	ssize_t ret_cnt;
+
+	if (*ppos < 0 || !count)
+		return -EINVAL;
+
+	if (!strcmp(access_str, "slimslave_peek")) {
+		snprintf(lbuf, sizeof(lbuf), "0x%x\n", read_data);
+		ret_cnt = simple_read_from_buffer(ubuf, count, ppos, lbuf,
+					       strnlen(lbuf, 7));
+	} else if (!strcmp(access_str, "slimslave_reg_dump")) {
+		ret_cnt = wcd9xxx_slimslave_reg_show(ubuf, count, ppos);
+	} else {
+		pr_err("%s: %s not permitted to read\n", __func__, access_str);
+		ret_cnt = -EPERM;
+	}
+
+	return ret_cnt;
+}
+
+static void wcd9xxx_set_reset_pin_state(struct wcd9xxx *wcd9xxx,
+					struct wcd9xxx_pdata *pdata,
+					bool active)
+{
+	if (wcd9xxx->wcd_rst_np) {
+		if (active)
+			msm_cdc_pinctrl_select_active_state(
+						wcd9xxx->wcd_rst_np);
+		else
+			msm_cdc_pinctrl_select_sleep_state(
+						wcd9xxx->wcd_rst_np);
+
+		return;
+	} else if (gpio_is_valid(wcd9xxx->reset_gpio)) {
+		gpio_direction_output(wcd9xxx->reset_gpio,
+				      (active == true ? 1 : 0));
+	}
+}
+
+static int codec_debug_process_cdc_power(char *lbuf)
+{
+	long int param;
+	int rc;
+	struct wcd9xxx_pdata *pdata;
+
+	if (wcd9xxx_get_intf_type() != WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
+		pr_err("%s: CODEC is not in SLIMBUS mode\n", __func__);
+		rc = -EPERM;
+		goto error_intf;
+	}
+
+	rc = get_parameters(lbuf, &param, 1);
+
+	if (likely(!rc)) {
+		pdata = debugCodec->slim->dev.platform_data;
+		if (param == 0) {
+			wcd9xxx_slim_device_down(debugCodec->slim);
+			msm_cdc_disable_static_supplies(debugCodec->dev,
+							debugCodec->supplies,
+							pdata->regulator,
+							pdata->num_supplies);
+			wcd9xxx_set_reset_pin_state(debugCodec, pdata, false);
+		} else if (param == 1) {
+			msm_cdc_enable_static_supplies(debugCodec->dev,
+						       debugCodec->supplies,
+						       pdata->regulator,
+						       pdata->num_supplies);
+			usleep_range(1000, 2000);
+			wcd9xxx_set_reset_pin_state(debugCodec, pdata, false);
+			usleep_range(1000, 2000);
+			wcd9xxx_set_reset_pin_state(debugCodec, pdata, true);
+			usleep_range(1000, 2000);
+			wcd9xxx_slim_device_up(debugCodec->slim);
+		} else {
+			pr_err("%s: invalid command %ld\n", __func__, param);
+		}
+	}
+
+error_intf:
+	return rc;
+}
+
+static ssize_t codec_debug_write(struct file *filp,
+	const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+	char *access_str = filp->private_data;
+	char lbuf[32];
+	int rc;
+	long int param[5];
+
+	if (cnt > sizeof(lbuf) - 1)
+		return -EINVAL;
+
+	rc = copy_from_user(lbuf, ubuf, cnt);
+	if (rc)
+		return -EFAULT;
+
+	lbuf[cnt] = '\0';
+
+	if (!strcmp(access_str, "slimslave_poke")) {
+		/* write */
+		rc = get_parameters(lbuf, param, 2);
+		if ((param[0] <= 0x3FF) && (param[1] <= 0xFF) &&
+			(rc == 0))
+			wcd9xxx_interface_reg_write(debugCodec, param[0],
+				param[1]);
+		else
+			rc = -EINVAL;
+	} else if (!strcmp(access_str, "slimslave_peek")) {
+		/* read */
+		rc = get_parameters(lbuf, param, 1);
+		if ((param[0] <= 0x3FF) && (rc == 0))
+			read_data = wcd9xxx_interface_reg_read(debugCodec,
+				param[0]);
+		else
+			rc = -EINVAL;
+	} else if (!strcmp(access_str, "power_state")) {
+		rc = codec_debug_process_cdc_power(lbuf);
+	}
+
+	if (rc == 0)
+		rc = cnt;
+	else
+		pr_err("%s: rc = %d\n", __func__, rc);
+
+	return rc;
+}
+
+static const struct file_operations codec_debug_ops = {
+	.open = codec_debug_open,
+	.write = codec_debug_write,
+	.read = codec_debug_read
+};
+#endif
+
+static struct wcd9xxx_i2c *wcd9xxx_i2c_get_device_info(struct wcd9xxx *wcd9xxx,
+						u16 reg)
+{
+	u16 mask = 0x0f00;
+	int value = 0;
+	struct wcd9xxx_i2c *wcd9xxx_i2c = NULL;
+
+	if (wcd9xxx->type == WCD9335) {
+		wcd9xxx_i2c = &wcd9xxx_modules[0];
+	} else {
+		value = ((reg & mask) >> 8) & 0x000f;
+		switch (value) {
+		case 0:
+			wcd9xxx_i2c = &wcd9xxx_modules[0];
+			break;
+		case 1:
+			wcd9xxx_i2c = &wcd9xxx_modules[1];
+			break;
+		case 2:
+			wcd9xxx_i2c = &wcd9xxx_modules[2];
+			break;
+		case 3:
+			wcd9xxx_i2c = &wcd9xxx_modules[3];
+			break;
+
+		default:
+			break;
+		}
+	}
+	return wcd9xxx_i2c;
+}
+
+static int wcd9xxx_i2c_write_device(struct wcd9xxx *wcd9xxx, u16 reg, u8 *value,
+				u32 bytes)
+{
+
+	struct i2c_msg *msg;
+	int ret = 0;
+	u8 reg_addr = 0;
+	u8 data[bytes + 1];
+	struct wcd9xxx_i2c *wcd9xxx_i2c;
+
+	wcd9xxx_i2c = wcd9xxx_i2c_get_device_info(wcd9xxx, reg);
+	if (wcd9xxx_i2c == NULL || wcd9xxx_i2c->client == NULL) {
+		pr_err("failed to get device info\n");
+		return -ENODEV;
+	}
+	reg_addr = (u8)reg;
+	msg = &wcd9xxx_i2c->xfer_msg[0];
+	msg->addr = wcd9xxx_i2c->client->addr;
+	msg->len = bytes + 1;
+	msg->flags = 0;
+	data[0] = reg;
+	data[1] = *value;
+	msg->buf = data;
+	ret = i2c_transfer(wcd9xxx_i2c->client->adapter,
+			   wcd9xxx_i2c->xfer_msg, 1);
+	/* Try again if the write fails */
+	if (ret != 1) {
+		ret = i2c_transfer(wcd9xxx_i2c->client->adapter,
+						wcd9xxx_i2c->xfer_msg, 1);
+		if (ret != 1) {
+			pr_err("failed to write the device\n");
+			return ret;
+		}
+	}
+	pr_debug("write sucess register = %x val = %x\n", reg, data[1]);
+	return 0;
+}
+
+
+static int wcd9xxx_i2c_read_device(struct wcd9xxx *wcd9xxx, unsigned short reg,
+				  int bytes, unsigned char *dest)
+{
+	struct i2c_msg *msg;
+	int ret = 0;
+	u8 reg_addr = 0;
+	struct wcd9xxx_i2c *wcd9xxx_i2c;
+	u8 i = 0;
+
+	wcd9xxx_i2c = wcd9xxx_i2c_get_device_info(wcd9xxx, reg);
+	if (wcd9xxx_i2c == NULL || wcd9xxx_i2c->client == NULL) {
+		pr_err("failed to get device info\n");
+		return -ENODEV;
+	}
+	for (i = 0; i < bytes; i++) {
+		reg_addr = (u8)reg++;
+		msg = &wcd9xxx_i2c->xfer_msg[0];
+		msg->addr = wcd9xxx_i2c->client->addr;
+		msg->len = 1;
+		msg->flags = 0;
+		msg->buf = &reg_addr;
+
+		msg = &wcd9xxx_i2c->xfer_msg[1];
+		msg->addr = wcd9xxx_i2c->client->addr;
+		msg->len = 1;
+		msg->flags = I2C_M_RD;
+		msg->buf = dest++;
+		ret = i2c_transfer(wcd9xxx_i2c->client->adapter,
+				wcd9xxx_i2c->xfer_msg, 2);
+
+		/* Try again if read fails first time */
+		if (ret != 2) {
+			ret = i2c_transfer(wcd9xxx_i2c->client->adapter,
+					   wcd9xxx_i2c->xfer_msg, 2);
+			if (ret != 2) {
+				pr_err("failed to read wcd9xxx register\n");
+				return ret;
+			}
+		}
+	}
+	return 0;
+}
+
+int wcd9xxx_i2c_read(struct wcd9xxx *wcd9xxx, unsigned short reg,
+			int bytes, void *dest, bool interface_reg)
+{
+	return wcd9xxx_i2c_read_device(wcd9xxx, reg, bytes, dest);
+}
+
+int wcd9xxx_i2c_write(struct wcd9xxx *wcd9xxx, unsigned short reg,
+			 int bytes, void *src, bool interface_reg)
+{
+	return wcd9xxx_i2c_write_device(wcd9xxx, reg, src, bytes);
+}
+
+static int wcd9xxx_i2c_get_client_index(struct i2c_client *client,
+					int *wcd9xx_index)
+{
+	int ret = 0;
+
+	switch (client->addr) {
+	case WCD9XXX_I2C_TOP_SLAVE_ADDR:
+		*wcd9xx_index = WCD9XXX_I2C_TOP_LEVEL;
+	break;
+	case WCD9XXX_ANALOG_I2C_SLAVE_ADDR:
+		*wcd9xx_index = WCD9XXX_I2C_ANALOG;
+	break;
+	case WCD9XXX_DIGITAL1_I2C_SLAVE_ADDR:
+		*wcd9xx_index = WCD9XXX_I2C_DIGITAL_1;
+	break;
+	case WCD9XXX_DIGITAL2_I2C_SLAVE_ADDR:
+		*wcd9xx_index = WCD9XXX_I2C_DIGITAL_2;
+	break;
+	default:
+		ret = -EINVAL;
+	break;
+	}
+	return ret;
+}
+
+static int wcd9xxx_i2c_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	struct wcd9xxx *wcd9xxx = NULL;
+	struct wcd9xxx_pdata *pdata = NULL;
+	int val = 0;
+	int ret = 0;
+	int wcd9xx_index = 0;
+	struct device *dev;
+	int intf_type;
+	const struct of_device_id *of_id;
+
+	intf_type = wcd9xxx_get_intf_type();
+
+	pr_debug("%s: interface status %d\n", __func__, intf_type);
+	if (intf_type == WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
+		dev_dbg(&client->dev, "%s:Codec is detected in slimbus mode\n",
+			__func__);
+		return -ENODEV;
+	} else if (intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
+		ret = wcd9xxx_i2c_get_client_index(client, &wcd9xx_index);
+		if (ret != 0)
+			dev_err(&client->dev, "%s: I2C set codec I2C\n"
+				"client failed\n", __func__);
+		else {
+			dev_err(&client->dev, "%s:probe for other slaves\n"
+				"devices of codec I2C slave Addr = %x\n",
+				__func__, client->addr);
+			wcd9xxx_modules[wcd9xx_index].client = client;
+		}
+		return ret;
+	} else if (intf_type == WCD9XXX_INTERFACE_TYPE_PROBING) {
+		dev = &client->dev;
+		if (client->dev.of_node) {
+			dev_dbg(&client->dev, "%s:Platform data\n"
+				"from device tree\n", __func__);
+			pdata = wcd9xxx_populate_dt_data(&client->dev);
+			if (!pdata) {
+				dev_err(&client->dev,
+					"%s: Fail to obtain pdata from device tree\n",
+					 __func__);
+				ret = -EINVAL;
+				goto fail;
+			}
+			client->dev.platform_data = pdata;
+		} else {
+			dev_dbg(&client->dev, "%s:Platform data from\n"
+				"board file\n", __func__);
+			pdata = client->dev.platform_data;
+		}
+		wcd9xxx = devm_kzalloc(&client->dev, sizeof(struct wcd9xxx),
+				       GFP_KERNEL);
+		if (!wcd9xxx) {
+			ret = -ENOMEM;
+			goto fail;
+		}
+
+		if (!pdata) {
+			dev_dbg(&client->dev, "no platform data?\n");
+			ret = -EINVAL;
+			goto fail;
+		}
+		wcd9xxx->type = WCD9XXX;
+		if (client->dev.of_node) {
+			of_id = of_match_device(wcd9xxx_of_match, &client->dev);
+			if (of_id) {
+				wcd9xxx->type = *((int *)of_id->data);
+				dev_info(&client->dev, "%s: codec type is %d\n",
+					 __func__, wcd9xxx->type);
+			}
+		} else {
+			dev_info(&client->dev, "%s: dev.of_node is NULL, default to WCD9XXX\n",
+				 __func__);
+			wcd9xxx->type = WCD9XXX;
+		}
+		wcd9xxx->regmap = wcd9xxx_regmap_init(&client->dev,
+				&wcd9xxx_i2c_base_regmap_config);
+		if (IS_ERR(wcd9xxx->regmap)) {
+			ret = PTR_ERR(wcd9xxx->regmap);
+			dev_err(&client->dev, "%s: Failed to allocate register map: %d\n",
+					__func__, ret);
+			goto err_codec;
+		}
+		wcd9xxx->reset_gpio = pdata->reset_gpio;
+		wcd9xxx->wcd_rst_np = pdata->wcd_rst_np;
+
+		if (!wcd9xxx->wcd_rst_np) {
+			pdata->use_pinctrl = false;
+			dev_err(&client->dev, "%s: pinctrl not used for rst_n\n",
+				 __func__);
+			goto err_codec;
+		}
+
+		if (i2c_check_functionality(client->adapter,
+					    I2C_FUNC_I2C) == 0) {
+			dev_dbg(&client->dev, "can't talk I2C?\n");
+			ret = -EIO;
+			goto fail;
+		}
+		dev_set_drvdata(&client->dev, wcd9xxx);
+		wcd9xxx->dev = &client->dev;
+		wcd9xxx->dev_up = true;
+		if (client->dev.of_node)
+			wcd9xxx->mclk_rate = pdata->mclk_rate;
+
+		wcd9xxx->num_of_supplies = pdata->num_supplies;
+		ret = msm_cdc_init_supplies(wcd9xxx->dev, &wcd9xxx->supplies,
+					    pdata->regulator,
+					    pdata->num_supplies);
+		if (!wcd9xxx->supplies) {
+			dev_err(wcd9xxx->dev, "%s: Cannot init wcd supplies\n",
+				__func__);
+			goto err_codec;
+		}
+		ret = msm_cdc_enable_static_supplies(wcd9xxx->dev,
+						     wcd9xxx->supplies,
+						     pdata->regulator,
+						     pdata->num_supplies);
+		if (ret) {
+			dev_err(wcd9xxx->dev, "%s: wcd static supply enable failed!\n",
+				__func__);
+			goto err_codec;
+		}
+		/* For WCD9335, it takes about 600us for the Vout_A and
+		 * Vout_D to be ready after BUCK_SIDO is powered up\
+		 * SYS_RST_N shouldn't be pulled high during this time
+		 */
+		if (wcd9xxx->type == WCD9335)
+			usleep_range(600, 650);
+		else
+			usleep_range(5, 10);
+
+		ret = wcd9xxx_reset(wcd9xxx->dev);
+		if (ret) {
+			pr_err("%s: Resetting Codec failed\n", __func__);
+			goto err_supplies;
+		}
+
+		ret = wcd9xxx_i2c_get_client_index(client, &wcd9xx_index);
+		if (ret != 0) {
+			pr_err("%s:Set codec I2C client failed\n", __func__);
+			goto err_supplies;
+		}
+
+		wcd9xxx_modules[wcd9xx_index].client = client;
+		wcd9xxx->read_dev = wcd9xxx_i2c_read;
+		wcd9xxx->write_dev = wcd9xxx_i2c_write;
+		if (!wcd9xxx->dev->of_node)
+			wcd9xxx_assign_irq(&wcd9xxx->core_res,
+					pdata->irq, pdata->irq_base);
+
+		ret = wcd9xxx_device_init(wcd9xxx);
+		if (ret) {
+			pr_err("%s: error, initializing device failed (%d)\n",
+			       __func__, ret);
+			goto err_device_init;
+		}
+
+		ret = wcd9xxx_i2c_read(wcd9xxx, WCD9XXX_A_CHIP_STATUS, 1,
+				       &val, 0);
+		if (ret < 0)
+			pr_err("%s: failed to read the wcd9xxx status (%d)\n",
+			       __func__, ret);
+		if (val != wcd9xxx->codec_type->i2c_chip_status)
+			pr_err("%s: unknown chip status 0x%x\n", __func__, val);
+
+		wcd9xxx_set_intf_type(WCD9XXX_INTERFACE_TYPE_I2C);
+
+		return ret;
+	} else
+		pr_err("%s: I2C probe in wrong state\n", __func__);
+
+
+err_device_init:
+	wcd9xxx_reset_low(wcd9xxx->dev);
+err_supplies:
+	msm_cdc_release_supplies(wcd9xxx->dev, wcd9xxx->supplies,
+				 pdata->regulator,
+				 pdata->num_supplies);
+	pdata->regulator = NULL;
+	pdata->num_supplies = 0;
+err_codec:
+	devm_kfree(&client->dev, wcd9xxx);
+	dev_set_drvdata(&client->dev, NULL);
+fail:
+	return ret;
+}
+
+static int wcd9xxx_i2c_remove(struct i2c_client *client)
+{
+	struct wcd9xxx *wcd9xxx;
+	struct wcd9xxx_pdata *pdata = client->dev.platform_data;
+
+	wcd9xxx = dev_get_drvdata(&client->dev);
+	msm_cdc_release_supplies(wcd9xxx->dev, wcd9xxx->supplies,
+				 pdata->regulator,
+				 pdata->num_supplies);
+	wcd9xxx_device_exit(wcd9xxx);
+	dev_set_drvdata(&client->dev, NULL);
+	return 0;
+}
+
+static int wcd9xxx_dt_parse_slim_interface_dev_info(struct device *dev,
+						struct slim_device *slim_ifd)
+{
+	int ret = 0;
+	struct property *prop;
+
+	ret = of_property_read_string(dev->of_node, "qcom,cdc-slim-ifd",
+				      &slim_ifd->name);
+	if (ret) {
+		dev_err(dev, "Looking up %s property in node %s failed",
+			"qcom,cdc-slim-ifd-dev", dev->of_node->full_name);
+		return -ENODEV;
+	}
+	prop = of_find_property(dev->of_node,
+			"qcom,cdc-slim-ifd-elemental-addr", NULL);
+	if (!prop) {
+		dev_err(dev, "Looking up %s property in node %s failed",
+			"qcom,cdc-slim-ifd-elemental-addr",
+			dev->of_node->full_name);
+		return -ENODEV;
+	} else if (prop->length != 6) {
+		dev_err(dev, "invalid codec slim ifd addr. addr length = %d\n",
+			      prop->length);
+		return -ENODEV;
+	}
+	memcpy(slim_ifd->e_addr, prop->value, 6);
+
+	return 0;
+}
+
+static int wcd9xxx_slim_get_laddr(struct slim_device *sb,
+				  const u8 *e_addr, u8 e_len, u8 *laddr)
+{
+	int ret;
+	const unsigned long timeout = jiffies +
+				      msecs_to_jiffies(SLIMBUS_PRESENT_TIMEOUT);
+
+	do {
+		ret = slim_get_logical_addr(sb, e_addr, e_len, laddr);
+		if (!ret)
+			break;
+		/* Give SLIMBUS time to report present and be ready. */
+		usleep_range(1000, 1100);
+		pr_debug_ratelimited("%s: retyring get logical addr\n",
+				     __func__);
+	} while time_before(jiffies, timeout);
+
+	return ret;
+}
+
+static int wcd9xxx_slim_probe(struct slim_device *slim)
+{
+	struct wcd9xxx *wcd9xxx;
+	struct wcd9xxx_pdata *pdata;
+	const struct slim_device_id *device_id;
+	int ret = 0;
+	int intf_type;
+
+	intf_type = wcd9xxx_get_intf_type();
+
+	wcd9xxx = devm_kzalloc(&slim->dev, sizeof(struct wcd9xxx),
+				GFP_KERNEL);
+	if (!wcd9xxx) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	if (!slim) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	if (intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
+		dev_dbg(&slim->dev, "%s:Codec is detected in I2C mode\n",
+			__func__);
+		ret = -ENODEV;
+		goto err;
+	}
+	if (slim->dev.of_node) {
+		dev_info(&slim->dev, "Platform data from device tree\n");
+		pdata = wcd9xxx_populate_dt_data(&slim->dev);
+		if (!pdata) {
+			dev_err(&slim->dev,
+				"%s: Fail to obtain pdata from device tree\n",
+				__func__);
+			ret = -EINVAL;
+			goto err;
+		}
+
+		ret = wcd9xxx_dt_parse_slim_interface_dev_info(&slim->dev,
+				&pdata->slimbus_slave_device);
+		if (ret) {
+			dev_err(&slim->dev, "Error, parsing slim interface\n");
+			devm_kfree(&slim->dev, pdata);
+			ret = -EINVAL;
+			goto err;
+		}
+		slim->dev.platform_data = pdata;
+
+	} else {
+		dev_info(&slim->dev, "Platform data from board file\n");
+		pdata = slim->dev.platform_data;
+	}
+
+	if (!pdata) {
+		dev_err(&slim->dev, "Error, no platform data\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	if (!slim->ctrl) {
+		dev_err(&slim->dev, "%s: Error, no SLIMBUS control data\n",
+			__func__);
+		ret = -EINVAL;
+		goto err_codec;
+	}
+	device_id = slim_get_device_id(slim);
+	if (!device_id) {
+		dev_err(&slim->dev, "%s: Error, no device id\n", __func__);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	wcd9xxx->type = device_id->driver_data;
+	dev_info(&slim->dev, "%s: probing for wcd type: %d, name: %s\n",
+		 __func__, wcd9xxx->type, device_id->name);
+
+	/* wcd9xxx members init */
+	wcd9xxx->multi_reg_write = wcd9xxx_slim_multi_reg_write;
+	wcd9xxx->slim = slim;
+	slim_set_clientdata(slim, wcd9xxx);
+	wcd9xxx->reset_gpio = pdata->reset_gpio;
+	wcd9xxx->dev = &slim->dev;
+	wcd9xxx->mclk_rate = pdata->mclk_rate;
+	wcd9xxx->dev_up = true;
+	wcd9xxx->wcd_rst_np = pdata->wcd_rst_np;
+
+	wcd9xxx->regmap = wcd9xxx_regmap_init(&slim->dev,
+					      &wcd9xxx_base_regmap_config);
+	if (IS_ERR(wcd9xxx->regmap)) {
+		ret = PTR_ERR(wcd9xxx->regmap);
+		dev_err(&slim->dev, "%s: Failed to allocate register map: %d\n",
+			__func__, ret);
+		goto err_codec;
+	}
+
+	if (!wcd9xxx->wcd_rst_np) {
+		pdata->use_pinctrl = false;
+		dev_err(&slim->dev, "%s: pinctrl not used for rst_n\n",
+			__func__);
+		goto err_codec;
+	}
+
+	wcd9xxx->num_of_supplies = pdata->num_supplies;
+	ret = msm_cdc_init_supplies(&slim->dev, &wcd9xxx->supplies,
+				    pdata->regulator,
+				    pdata->num_supplies);
+	if (!wcd9xxx->supplies) {
+		dev_err(wcd9xxx->dev, "%s: Cannot init wcd supplies\n",
+			__func__);
+		goto err_codec;
+	}
+	ret = msm_cdc_enable_static_supplies(wcd9xxx->dev,
+					     wcd9xxx->supplies,
+					     pdata->regulator,
+					     pdata->num_supplies);
+	if (ret) {
+		dev_err(wcd9xxx->dev, "%s: wcd static supply enable failed!\n",
+			__func__);
+		goto err_codec;
+	}
+
+	/*
+	 * For WCD9335, it takes about 600us for the Vout_A and
+	 * Vout_D to be ready after BUCK_SIDO is powered up.
+	 * SYS_RST_N shouldn't be pulled high during this time
+	 */
+	if (wcd9xxx->type == WCD9335 || wcd9xxx->type == WCD934X)
+		usleep_range(600, 650);
+	else
+		usleep_range(5, 10);
+
+	ret = wcd9xxx_reset(&slim->dev);
+	if (ret) {
+		dev_err(&slim->dev, "%s: Resetting Codec failed\n", __func__);
+		goto err_supplies;
+	}
+
+	ret = wcd9xxx_slim_get_laddr(wcd9xxx->slim, wcd9xxx->slim->e_addr,
+				     ARRAY_SIZE(wcd9xxx->slim->e_addr),
+				     &wcd9xxx->slim->laddr);
+	if (ret) {
+		dev_err(&slim->dev, "%s: failed to get slimbus %s logical address: %d\n",
+		       __func__, wcd9xxx->slim->name, ret);
+		goto err_reset;
+	}
+	wcd9xxx->read_dev = wcd9xxx_slim_read_device;
+	wcd9xxx->write_dev = wcd9xxx_slim_write_device;
+	wcd9xxx_pgd_la = wcd9xxx->slim->laddr;
+	wcd9xxx->slim_slave = &pdata->slimbus_slave_device;
+	if (!wcd9xxx->dev->of_node)
+		wcd9xxx_assign_irq(&wcd9xxx->core_res,
+					pdata->irq, pdata->irq_base);
+
+	ret = slim_add_device(slim->ctrl, wcd9xxx->slim_slave);
+	if (ret) {
+		dev_err(&slim->dev, "%s: error, adding SLIMBUS device failed\n",
+			__func__);
+		goto err_reset;
+	}
+
+	ret = wcd9xxx_slim_get_laddr(wcd9xxx->slim_slave,
+				     wcd9xxx->slim_slave->e_addr,
+				     ARRAY_SIZE(wcd9xxx->slim_slave->e_addr),
+				     &wcd9xxx->slim_slave->laddr);
+	if (ret) {
+		dev_err(&slim->dev, "%s: failed to get slimbus %s logical address: %d\n",
+		       __func__, wcd9xxx->slim->name, ret);
+		goto err_slim_add;
+	}
+	wcd9xxx_inf_la = wcd9xxx->slim_slave->laddr;
+	wcd9xxx_set_intf_type(WCD9XXX_INTERFACE_TYPE_SLIMBUS);
+
+	ret = wcd9xxx_device_init(wcd9xxx);
+	if (ret) {
+		dev_err(&slim->dev, "%s: error, initializing device failed (%d)\n",
+			__func__, ret);
+		goto err_slim_add;
+	}
+#ifdef CONFIG_DEBUG_FS
+	debugCodec = wcd9xxx;
+
+	debugfs_wcd9xxx_dent = debugfs_create_dir
+		("wcd9xxx_core", 0);
+	if (!IS_ERR(debugfs_wcd9xxx_dent)) {
+		debugfs_peek = debugfs_create_file("slimslave_peek",
+		S_IFREG | S_IRUSR, debugfs_wcd9xxx_dent,
+		(void *) "slimslave_peek", &codec_debug_ops);
+
+		debugfs_poke = debugfs_create_file("slimslave_poke",
+		S_IFREG | S_IRUSR, debugfs_wcd9xxx_dent,
+		(void *) "slimslave_poke", &codec_debug_ops);
+
+		debugfs_power_state = debugfs_create_file("power_state",
+		S_IFREG | S_IRUSR, debugfs_wcd9xxx_dent,
+		(void *) "power_state", &codec_debug_ops);
+
+		debugfs_reg_dump = debugfs_create_file("slimslave_reg_dump",
+		S_IFREG | S_IRUSR, debugfs_wcd9xxx_dent,
+		(void *) "slimslave_reg_dump", &codec_debug_ops);
+	}
+#endif
+
+	return ret;
+
+err_slim_add:
+	slim_remove_device(wcd9xxx->slim_slave);
+err_reset:
+	wcd9xxx_reset_low(wcd9xxx->dev);
+err_supplies:
+	msm_cdc_release_supplies(wcd9xxx->dev, wcd9xxx->supplies,
+				 pdata->regulator,
+				 pdata->num_supplies);
+err_codec:
+	slim_set_clientdata(slim, NULL);
+err:
+	devm_kfree(&slim->dev, wcd9xxx);
+	return ret;
+}
+static int wcd9xxx_slim_remove(struct slim_device *pdev)
+{
+	struct wcd9xxx *wcd9xxx;
+	struct wcd9xxx_pdata *pdata = pdev->dev.platform_data;
+
+#ifdef CONFIG_DEBUG_FS
+	debugfs_remove_recursive(debugfs_wcd9xxx_dent);
+#endif
+	wcd9xxx = slim_get_devicedata(pdev);
+	wcd9xxx_deinit_slimslave(wcd9xxx);
+	slim_remove_device(wcd9xxx->slim_slave);
+	msm_cdc_release_supplies(wcd9xxx->dev, wcd9xxx->supplies,
+				 pdata->regulator,
+				 pdata->num_supplies);
+	wcd9xxx_device_exit(wcd9xxx);
+	slim_set_clientdata(pdev, NULL);
+	return 0;
+}
+
+static int wcd9xxx_device_up(struct wcd9xxx *wcd9xxx)
+{
+	int ret = 0;
+	struct wcd9xxx_core_resource *wcd9xxx_res = &wcd9xxx->core_res;
+
+	dev_info(wcd9xxx->dev, "%s: codec bring up\n", __func__);
+	wcd9xxx_bringup(wcd9xxx->dev);
+	ret = wcd9xxx_irq_init(wcd9xxx_res);
+	if (ret) {
+		pr_err("%s: wcd9xx_irq_init failed : %d\n", __func__, ret);
+	} else {
+		if (wcd9xxx->post_reset)
+			ret = wcd9xxx->post_reset(wcd9xxx);
+	}
+	return ret;
+}
+
+static int wcd9xxx_slim_device_reset(struct slim_device *sldev)
+{
+	int ret;
+	struct wcd9xxx *wcd9xxx = slim_get_devicedata(sldev);
+
+	if (!wcd9xxx) {
+		pr_err("%s: wcd9xxx is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	dev_info(wcd9xxx->dev, "%s: device reset, dev_up = %d\n",
+		__func__, wcd9xxx->dev_up);
+	if (wcd9xxx->dev_up)
+		return 0;
+
+	mutex_lock(&wcd9xxx->reset_lock);
+	ret = wcd9xxx_reset(wcd9xxx->dev);
+	if (ret)
+		dev_err(wcd9xxx->dev, "%s: Resetting Codec failed\n", __func__);
+	mutex_unlock(&wcd9xxx->reset_lock);
+
+	return ret;
+}
+
+static int wcd9xxx_slim_device_up(struct slim_device *sldev)
+{
+	struct wcd9xxx *wcd9xxx = slim_get_devicedata(sldev);
+	int ret = 0;
+
+	if (!wcd9xxx) {
+		pr_err("%s: wcd9xxx is NULL\n", __func__);
+		return -EINVAL;
+	}
+	dev_info(wcd9xxx->dev, "%s: slim device up, dev_up = %d\n",
+		__func__, wcd9xxx->dev_up);
+	if (wcd9xxx->dev_up)
+		return 0;
+
+	wcd9xxx->dev_up = true;
+
+	mutex_lock(&wcd9xxx->reset_lock);
+	ret = wcd9xxx_device_up(wcd9xxx);
+	mutex_unlock(&wcd9xxx->reset_lock);
+
+	return ret;
+}
+
+static int wcd9xxx_slim_device_down(struct slim_device *sldev)
+{
+	struct wcd9xxx *wcd9xxx = slim_get_devicedata(sldev);
+
+	if (!wcd9xxx) {
+		pr_err("%s: wcd9xxx is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	dev_info(wcd9xxx->dev, "%s: device down, dev_up = %d\n",
+		__func__, wcd9xxx->dev_up);
+	if (!wcd9xxx->dev_up)
+		return 0;
+
+	wcd9xxx->dev_up = false;
+
+	mutex_lock(&wcd9xxx->reset_lock);
+	if (wcd9xxx->dev_down)
+		wcd9xxx->dev_down(wcd9xxx);
+	wcd9xxx_irq_exit(&wcd9xxx->core_res);
+	wcd9xxx_reset_low(wcd9xxx->dev);
+	mutex_unlock(&wcd9xxx->reset_lock);
+
+	return 0;
+}
+
+static int wcd9xxx_slim_resume(struct slim_device *sldev)
+{
+	struct wcd9xxx *wcd9xxx = slim_get_devicedata(sldev);
+
+	return wcd9xxx_core_res_resume(&wcd9xxx->core_res);
+}
+
+static int wcd9xxx_i2c_resume(struct device *dev)
+{
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(dev);
+
+	if (wcd9xxx)
+		return wcd9xxx_core_res_resume(&wcd9xxx->core_res);
+	else
+		return 0;
+}
+
+static int wcd9xxx_slim_suspend(struct slim_device *sldev, pm_message_t pmesg)
+{
+	struct wcd9xxx *wcd9xxx = slim_get_devicedata(sldev);
+
+	return wcd9xxx_core_res_suspend(&wcd9xxx->core_res, pmesg);
+}
+
+static int wcd9xxx_i2c_suspend(struct device *dev)
+{
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(dev);
+	pm_message_t pmesg = {0};
+
+	if (wcd9xxx)
+		return wcd9xxx_core_res_suspend(&wcd9xxx->core_res, pmesg);
+	else
+		return 0;
+}
+
+static const struct slim_device_id wcd_slim_device_id[] = {
+	{"sitar-slim", 0},
+	{"sitar1p1-slim", 0},
+	{"tabla-slim", 0},
+	{"tabla2x-slim", 0},
+	{"taiko-slim-pgd", 0},
+	{"tapan-slim-pgd", 0},
+	{"tomtom-slim-pgd", WCD9330},
+	{"tasha-slim-pgd", WCD9335},
+	{"tavil-slim-pgd", WCD934X},
+	{}
+};
+
+static struct slim_driver wcd_slim_driver = {
+	.driver = {
+		.name = "wcd-slim",
+		.owner = THIS_MODULE,
+	},
+	.probe = wcd9xxx_slim_probe,
+	.remove = wcd9xxx_slim_remove,
+	.id_table = wcd_slim_device_id,
+	.resume = wcd9xxx_slim_resume,
+	.suspend = wcd9xxx_slim_suspend,
+	.device_up = wcd9xxx_slim_device_up,
+	.reset_device = wcd9xxx_slim_device_reset,
+	.device_down = wcd9xxx_slim_device_down,
+};
+
+static struct i2c_device_id wcd9xxx_id_table[] = {
+	{"wcd9xxx-i2c", WCD9XXX_I2C_TOP_LEVEL},
+	{"wcd9xxx-i2c", WCD9XXX_I2C_ANALOG},
+	{"wcd9xxx-i2c", WCD9XXX_I2C_DIGITAL_1},
+	{"wcd9xxx-i2c", WCD9XXX_I2C_DIGITAL_2},
+	{}
+};
+
+static struct i2c_device_id tasha_id_table[] = {
+	{"tasha-i2c-pgd", WCD9XXX_I2C_TOP_LEVEL},
+	{}
+};
+
+static struct i2c_device_id tabla_id_table[] = {
+	{"tabla top level", WCD9XXX_I2C_TOP_LEVEL},
+	{"tabla analog", WCD9XXX_I2C_ANALOG},
+	{"tabla digital1", WCD9XXX_I2C_DIGITAL_1},
+	{"tabla digital2", WCD9XXX_I2C_DIGITAL_2},
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, tabla_id_table);
+
+static const struct dev_pm_ops wcd9xxx_i2c_pm_ops = {
+	.suspend = wcd9xxx_i2c_suspend,
+	.resume	= wcd9xxx_i2c_resume,
+};
+
+static struct i2c_driver tabla_i2c_driver = {
+	.driver                 = {
+		.owner          =       THIS_MODULE,
+		.name           =       "tabla-i2c-core",
+		.pm		=	&wcd9xxx_i2c_pm_ops,
+	},
+	.id_table               =       tabla_id_table,
+	.probe                  =       wcd9xxx_i2c_probe,
+	.remove                 =       wcd9xxx_i2c_remove,
+};
+
+static struct i2c_driver wcd9xxx_i2c_driver = {
+	.driver                 = {
+		.owner          =       THIS_MODULE,
+		.name           =       "wcd9xxx-i2c-core",
+		.pm		=	&wcd9xxx_i2c_pm_ops,
+	},
+	.id_table               =       wcd9xxx_id_table,
+	.probe                  =       wcd9xxx_i2c_probe,
+	.remove                 =       wcd9xxx_i2c_remove,
+};
+
+static struct i2c_driver wcd9335_i2c_driver = {
+	.driver	                = {
+		.owner	        =       THIS_MODULE,
+		.name           =       "tasha-i2c-core",
+		.pm		=	&wcd9xxx_i2c_pm_ops,
+	},
+	.id_table               =       tasha_id_table,
+	.probe                  =       wcd9xxx_i2c_probe,
+	.remove                 =       wcd9xxx_i2c_remove,
+};
+
+static int __init wcd9xxx_init(void)
+{
+	int ret[NUM_WCD9XXX_REG_RET] = {0};
+	int i = 0;
+
+	wcd9xxx_set_intf_type(WCD9XXX_INTERFACE_TYPE_PROBING);
+
+	ret[0] = i2c_add_driver(&tabla_i2c_driver);
+	if (ret[0])
+		pr_err("%s: Failed to add the tabla2x I2C driver: %d\n",
+			__func__, ret[0]);
+
+	ret[1] = i2c_add_driver(&wcd9xxx_i2c_driver);
+	if (ret[1])
+		pr_err("%s: Failed to add the wcd9xxx I2C driver: %d\n",
+			__func__, ret[1]);
+
+	ret[2] = i2c_add_driver(&wcd9335_i2c_driver);
+	if (ret[2])
+		pr_err("%s: Failed to add the wcd9335 I2C driver: %d\n",
+			__func__, ret[2]);
+
+	ret[3] = slim_driver_register(&wcd_slim_driver);
+	if (ret[3])
+		pr_err("%s: Failed to register wcd SB driver: %d\n",
+			__func__, ret[3]);
+
+	for (i = 0; i < NUM_WCD9XXX_REG_RET; i++) {
+		if (ret[i])
+			return ret[i];
+	}
+
+	return 0;
+}
+module_init(wcd9xxx_init);
+
+static void __exit wcd9xxx_exit(void)
+{
+	wcd9xxx_set_intf_type(WCD9XXX_INTERFACE_TYPE_PROBING);
+
+	i2c_del_driver(&tabla_i2c_driver);
+	i2c_del_driver(&wcd9xxx_i2c_driver);
+	i2c_del_driver(&wcd9335_i2c_driver);
+	slim_driver_unregister(&wcd_slim_driver);
+}
+module_exit(wcd9xxx_exit);
+
+MODULE_DESCRIPTION("Codec core driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/mfd/wcd9xxx-irq.c	2019-01-22 16:16:24.691257165 +0100
@@ -0,0 +1,804 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/irq.h>
+#include <linux/mfd/core.h>
+#include <linux/regmap.h>
+#include <linux/mfd/wcd9xxx/core.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx-irq.h>
+#include <linux/delay.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+#include <linux/ratelimit.h>
+#include <soc/qcom/pm.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+
+#define BYTE_BIT_MASK(nr)		(1UL << ((nr) % BITS_PER_BYTE))
+#define BIT_BYTE(nr)			((nr) / BITS_PER_BYTE)
+
+#define WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS 100
+
+#ifndef NO_IRQ
+#define NO_IRQ	(-1)
+#endif
+
+#ifdef CONFIG_OF
+struct wcd9xxx_irq_drv_data {
+	struct irq_domain *domain;
+	int irq;
+};
+#endif
+
+static int virq_to_phyirq(
+	struct wcd9xxx_core_resource *wcd9xxx_res, int virq);
+static int phyirq_to_virq(
+	struct wcd9xxx_core_resource *wcd9xxx_res, int irq);
+static unsigned int wcd9xxx_irq_get_upstream_irq(
+	struct wcd9xxx_core_resource *wcd9xxx_res);
+static void wcd9xxx_irq_put_upstream_irq(
+	struct wcd9xxx_core_resource *wcd9xxx_res);
+static int wcd9xxx_map_irq(
+	struct wcd9xxx_core_resource *wcd9xxx_res, int irq);
+
+static void wcd9xxx_irq_lock(struct irq_data *data)
+{
+	struct wcd9xxx_core_resource *wcd9xxx_res =
+			irq_data_get_irq_chip_data(data);
+	mutex_lock(&wcd9xxx_res->irq_lock);
+}
+
+static void wcd9xxx_irq_sync_unlock(struct irq_data *data)
+{
+	struct wcd9xxx_core_resource *wcd9xxx_res =
+			irq_data_get_irq_chip_data(data);
+	int i;
+
+	if ((ARRAY_SIZE(wcd9xxx_res->irq_masks_cur) >
+			WCD9XXX_MAX_IRQ_REGS) ||
+		(ARRAY_SIZE(wcd9xxx_res->irq_masks_cache) >
+			WCD9XXX_MAX_IRQ_REGS)) {
+			pr_err("%s: Array Size out of bound\n", __func__);
+			 return;
+	}
+	if (!wcd9xxx_res->wcd_core_regmap) {
+		pr_err("%s: Codec core regmap not defined\n",
+			__func__);
+		return;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(wcd9xxx_res->irq_masks_cur); i++) {
+		/* If there's been a change in the mask write it back
+		 * to the hardware.
+		 */
+		if (wcd9xxx_res->irq_masks_cur[i] !=
+					wcd9xxx_res->irq_masks_cache[i]) {
+
+			wcd9xxx_res->irq_masks_cache[i] =
+					wcd9xxx_res->irq_masks_cur[i];
+			regmap_write(wcd9xxx_res->wcd_core_regmap,
+			wcd9xxx_res->intr_reg[WCD9XXX_INTR_MASK_BASE] + i,
+			wcd9xxx_res->irq_masks_cur[i]);
+		}
+	}
+
+	mutex_unlock(&wcd9xxx_res->irq_lock);
+}
+
+static void wcd9xxx_irq_enable(struct irq_data *data)
+{
+	struct wcd9xxx_core_resource *wcd9xxx_res =
+			irq_data_get_irq_chip_data(data);
+	int wcd9xxx_irq = virq_to_phyirq(wcd9xxx_res, data->irq);
+	int byte = BIT_BYTE(wcd9xxx_irq);
+	int size = ARRAY_SIZE(wcd9xxx_res->irq_masks_cur);
+	if ((byte < size) && (byte >= 0)) {
+		wcd9xxx_res->irq_masks_cur[byte] &=
+			~(BYTE_BIT_MASK(wcd9xxx_irq));
+	} else {
+		pr_err("%s: Array size is %d but index is %d: Out of range\n",
+			__func__, size, byte);
+	}
+}
+
+static void wcd9xxx_irq_disable(struct irq_data *data)
+{
+	struct wcd9xxx_core_resource *wcd9xxx_res =
+			irq_data_get_irq_chip_data(data);
+	int wcd9xxx_irq = virq_to_phyirq(wcd9xxx_res, data->irq);
+	int byte = BIT_BYTE(wcd9xxx_irq);
+	int size = ARRAY_SIZE(wcd9xxx_res->irq_masks_cur);
+	if ((byte < size) && (byte >= 0)) {
+		wcd9xxx_res->irq_masks_cur[byte]
+			|= BYTE_BIT_MASK(wcd9xxx_irq);
+	} else {
+		pr_err("%s: Array size is %d but index is %d: Out of range\n",
+			__func__, size, byte);
+	}
+}
+
+static void wcd9xxx_irq_ack(struct irq_data *data)
+{
+	int wcd9xxx_irq = 0;
+	struct wcd9xxx_core_resource *wcd9xxx_res =
+			irq_data_get_irq_chip_data(data);
+
+	if (wcd9xxx_res == NULL) {
+		pr_err("%s: wcd9xxx_res is NULL\n", __func__);
+		return;
+	}
+	wcd9xxx_irq = virq_to_phyirq(wcd9xxx_res, data->irq);
+	pr_debug("%s: IRQ_ACK called for WCD9XXX IRQ: %d\n",
+				__func__, wcd9xxx_irq);
+}
+
+static void wcd9xxx_irq_mask(struct irq_data *d)
+{
+	/* do nothing but required as linux calls irq_mask without NULL check */
+}
+
+static struct irq_chip wcd9xxx_irq_chip = {
+	.name = "wcd9xxx",
+	.irq_bus_lock = wcd9xxx_irq_lock,
+	.irq_bus_sync_unlock = wcd9xxx_irq_sync_unlock,
+	.irq_disable = wcd9xxx_irq_disable,
+	.irq_enable = wcd9xxx_irq_enable,
+	.irq_mask = wcd9xxx_irq_mask,
+	.irq_ack = wcd9xxx_irq_ack,
+};
+
+bool wcd9xxx_lock_sleep(
+	struct wcd9xxx_core_resource *wcd9xxx_res)
+{
+	enum wcd9xxx_pm_state os;
+
+	/*
+	 * wcd9xxx_{lock/unlock}_sleep will be called by wcd9xxx_irq_thread
+	 * and its subroutines only motly.
+	 * but btn0_lpress_fn is not wcd9xxx_irq_thread's subroutine and
+	 * It can race with wcd9xxx_irq_thread.
+	 * So need to embrace wlock_holders with mutex.
+	 *
+	 * If system didn't resume, we can simply return false so codec driver's
+	 * IRQ handler can return without handling IRQ.
+	 * As interrupt line is still active, codec will have another IRQ to
+	 * retry shortly.
+	 */
+	mutex_lock(&wcd9xxx_res->pm_lock);
+	if (wcd9xxx_res->wlock_holders++ == 0) {
+		pr_debug("%s: holding wake lock\n", __func__);
+		pm_qos_update_request(&wcd9xxx_res->pm_qos_req,
+				      msm_cpuidle_get_deep_idle_latency());
+		pm_stay_awake(wcd9xxx_res->dev);
+	}
+	mutex_unlock(&wcd9xxx_res->pm_lock);
+
+	if (!wait_event_timeout(wcd9xxx_res->pm_wq,
+				((os =  wcd9xxx_pm_cmpxchg(wcd9xxx_res,
+						  WCD9XXX_PM_SLEEPABLE,
+						  WCD9XXX_PM_AWAKE)) ==
+							WCD9XXX_PM_SLEEPABLE ||
+					(os == WCD9XXX_PM_AWAKE)),
+				msecs_to_jiffies(
+					WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS))) {
+		pr_warn("%s: system didn't resume within %dms, s %d, w %d\n",
+			__func__,
+			WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS, wcd9xxx_res->pm_state,
+			wcd9xxx_res->wlock_holders);
+		wcd9xxx_unlock_sleep(wcd9xxx_res);
+		return false;
+	}
+	wake_up_all(&wcd9xxx_res->pm_wq);
+	return true;
+}
+EXPORT_SYMBOL(wcd9xxx_lock_sleep);
+
+void wcd9xxx_unlock_sleep(
+	struct wcd9xxx_core_resource *wcd9xxx_res)
+{
+	mutex_lock(&wcd9xxx_res->pm_lock);
+	if (--wcd9xxx_res->wlock_holders == 0) {
+		pr_debug("%s: releasing wake lock pm_state %d -> %d\n",
+			 __func__, wcd9xxx_res->pm_state, WCD9XXX_PM_SLEEPABLE);
+		/*
+		 * if wcd9xxx_lock_sleep failed, pm_state would be still
+		 * WCD9XXX_PM_ASLEEP, don't overwrite
+		 */
+		if (likely(wcd9xxx_res->pm_state == WCD9XXX_PM_AWAKE))
+			wcd9xxx_res->pm_state = WCD9XXX_PM_SLEEPABLE;
+		pm_qos_update_request(&wcd9xxx_res->pm_qos_req,
+				PM_QOS_DEFAULT_VALUE);
+		pm_relax(wcd9xxx_res->dev);
+	}
+	mutex_unlock(&wcd9xxx_res->pm_lock);
+	wake_up_all(&wcd9xxx_res->pm_wq);
+}
+EXPORT_SYMBOL(wcd9xxx_unlock_sleep);
+
+void wcd9xxx_nested_irq_lock(struct wcd9xxx_core_resource *wcd9xxx_res)
+{
+	mutex_lock(&wcd9xxx_res->nested_irq_lock);
+}
+
+void wcd9xxx_nested_irq_unlock(struct wcd9xxx_core_resource *wcd9xxx_res)
+{
+	mutex_unlock(&wcd9xxx_res->nested_irq_lock);
+}
+
+
+static void wcd9xxx_irq_dispatch(struct wcd9xxx_core_resource *wcd9xxx_res,
+			struct intr_data *irqdata)
+{
+	int irqbit = irqdata->intr_num;
+	if (!wcd9xxx_res->wcd_core_regmap) {
+		pr_err("%s: codec core regmap not defined\n",
+			__func__);
+		return;
+	}
+
+	if (irqdata->clear_first) {
+		wcd9xxx_nested_irq_lock(wcd9xxx_res);
+		regmap_write(wcd9xxx_res->wcd_core_regmap,
+			wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLEAR_BASE] +
+					      BIT_BYTE(irqbit),
+			BYTE_BIT_MASK(irqbit));
+
+		if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
+			regmap_write(wcd9xxx_res->wcd_core_regmap,
+				wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLR_COMMIT],
+				0x02);
+		handle_nested_irq(phyirq_to_virq(wcd9xxx_res, irqbit));
+		wcd9xxx_nested_irq_unlock(wcd9xxx_res);
+	} else {
+		wcd9xxx_nested_irq_lock(wcd9xxx_res);
+		handle_nested_irq(phyirq_to_virq(wcd9xxx_res, irqbit));
+		regmap_write(wcd9xxx_res->wcd_core_regmap,
+			wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLEAR_BASE] +
+					      BIT_BYTE(irqbit),
+			BYTE_BIT_MASK(irqbit));
+		if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
+			regmap_write(wcd9xxx_res->wcd_core_regmap,
+				wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLR_COMMIT],
+				0x02);
+
+		wcd9xxx_nested_irq_unlock(wcd9xxx_res);
+	}
+}
+
+static irqreturn_t wcd9xxx_irq_thread(int irq, void *data)
+{
+	int ret;
+	int i;
+	struct intr_data irqdata;
+	char linebuf[128];
+	static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 1);
+	struct wcd9xxx_core_resource *wcd9xxx_res = data;
+	int num_irq_regs = wcd9xxx_res->num_irq_regs;
+	u8 status[4], status1[4] = {0}, unmask_status[4] = {0};
+
+	if (unlikely(wcd9xxx_lock_sleep(wcd9xxx_res) == false)) {
+		dev_err(wcd9xxx_res->dev, "Failed to hold suspend\n");
+		return IRQ_NONE;
+	}
+
+	if (!wcd9xxx_res->wcd_core_regmap) {
+		dev_err(wcd9xxx_res->dev,
+			"%s: Codec core regmap not supplied\n",
+			   __func__);
+		goto err_disable_irq;
+	}
+
+	memset(status, 0, sizeof(status));
+	ret = regmap_bulk_read(wcd9xxx_res->wcd_core_regmap,
+		wcd9xxx_res->intr_reg[WCD9XXX_INTR_STATUS_BASE],
+		status, num_irq_regs);
+
+	if (ret < 0) {
+		dev_err(wcd9xxx_res->dev,
+				"Failed to read interrupt status: %d\n", ret);
+		goto err_disable_irq;
+	}
+	/*
+	 * If status is 0 return without clearing.
+	 * status contains: HW status - masked interrupts
+	 * status1 contains: unhandled interrupts - masked interrupts
+	 * unmasked_status contains: unhandled interrupts
+	 */
+	if (unlikely(!memcmp(status, status1, sizeof(status)))) {
+		pr_debug("%s: status is 0\n", __func__);
+		wcd9xxx_unlock_sleep(wcd9xxx_res);
+		return IRQ_HANDLED;
+	}
+
+	/*
+	 * Copy status to unmask_status before masking, otherwise SW may miss
+	 * to clear masked interrupt in corner case.
+	 */
+	memcpy(unmask_status, status, sizeof(unmask_status));
+
+	/* Apply masking */
+	for (i = 0; i < num_irq_regs; i++)
+		status[i] &= ~wcd9xxx_res->irq_masks_cur[i];
+
+	memcpy(status1, status, sizeof(status1));
+
+	/* Find out which interrupt was triggered and call that interrupt's
+	 * handler function
+	 *
+	 * Since codec has only one hardware irq line which is shared by
+	 * codec's different internal interrupts, so it's possible master irq
+	 * handler dispatches multiple nested irq handlers after breaking
+	 * order.  Dispatch interrupts in the order that is maintained by
+	 * the interrupt table.
+	 */
+	for (i = 0; i < wcd9xxx_res->intr_table_size; i++) {
+		irqdata = wcd9xxx_res->intr_table[i];
+		if (status[BIT_BYTE(irqdata.intr_num)] &
+			BYTE_BIT_MASK(irqdata.intr_num)) {
+			wcd9xxx_irq_dispatch(wcd9xxx_res, &irqdata);
+			status1[BIT_BYTE(irqdata.intr_num)] &=
+					~BYTE_BIT_MASK(irqdata.intr_num);
+			unmask_status[BIT_BYTE(irqdata.intr_num)] &=
+					~BYTE_BIT_MASK(irqdata.intr_num);
+		}
+	}
+
+	/*
+	 * As a failsafe if unhandled irq is found, clear it to prevent
+	 * interrupt storm.
+	 * Note that we can say there was an unhandled irq only when no irq
+	 * handled by nested irq handler since Taiko supports qdsp as irqs'
+	 * destination for few irqs.  Therefore driver shouldn't clear pending
+	 * irqs when few handled while few others not.
+	 */
+	if (unlikely(!memcmp(status, status1, sizeof(status)))) {
+		if (__ratelimit(&ratelimit)) {
+			pr_warn("%s: Unhandled irq found\n", __func__);
+			hex_dump_to_buffer(status, sizeof(status), 16, 1,
+					   linebuf, sizeof(linebuf), false);
+			pr_warn("%s: status0 : %s\n", __func__, linebuf);
+			hex_dump_to_buffer(status1, sizeof(status1), 16, 1,
+					   linebuf, sizeof(linebuf), false);
+			pr_warn("%s: status1 : %s\n", __func__, linebuf);
+		}
+		/*
+		 * unmask_status contains unhandled interrupts, hence clear all
+		 * unhandled interrupts.
+		 */
+		ret = regmap_bulk_write(wcd9xxx_res->wcd_core_regmap,
+			wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLEAR_BASE],
+			unmask_status, num_irq_regs);
+		if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
+			regmap_write(wcd9xxx_res->wcd_core_regmap,
+				wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLR_COMMIT],
+				0x02);
+	}
+	wcd9xxx_unlock_sleep(wcd9xxx_res);
+
+	return IRQ_HANDLED;
+
+err_disable_irq:
+		dev_err(wcd9xxx_res->dev,
+				"Disable irq %d\n", wcd9xxx_res->irq);
+
+		disable_irq_wake(wcd9xxx_res->irq);
+		disable_irq_nosync(wcd9xxx_res->irq);
+		wcd9xxx_unlock_sleep(wcd9xxx_res);
+		return IRQ_NONE;
+}
+
+void wcd9xxx_free_irq(struct wcd9xxx_core_resource *wcd9xxx_res,
+			int irq, void *data)
+{
+	free_irq(phyirq_to_virq(wcd9xxx_res, irq), data);
+}
+
+void wcd9xxx_enable_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
+{
+	if (wcd9xxx_res->irq)
+		enable_irq(phyirq_to_virq(wcd9xxx_res, irq));
+}
+
+void wcd9xxx_disable_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
+{
+	if (wcd9xxx_res->irq)
+		disable_irq_nosync(phyirq_to_virq(wcd9xxx_res, irq));
+}
+
+void wcd9xxx_disable_irq_sync(
+			struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
+{
+	if (wcd9xxx_res->irq)
+		disable_irq(phyirq_to_virq(wcd9xxx_res, irq));
+}
+
+static int wcd9xxx_irq_setup_downstream_irq(
+			struct wcd9xxx_core_resource *wcd9xxx_res)
+{
+	int irq, virq, ret;
+
+	pr_debug("%s: enter\n", __func__);
+
+	for (irq = 0; irq < wcd9xxx_res->num_irqs; irq++) {
+		/* Map OF irq */
+		virq = wcd9xxx_map_irq(wcd9xxx_res, irq);
+		pr_debug("%s: irq %d -> %d\n", __func__, irq, virq);
+		if (virq == NO_IRQ) {
+			pr_err("%s, No interrupt specifier for irq %d\n",
+			       __func__, irq);
+			return NO_IRQ;
+		}
+
+		ret = irq_set_chip_data(virq, wcd9xxx_res);
+		if (ret) {
+			pr_err("%s: Failed to configure irq %d (%d)\n",
+			       __func__, irq, ret);
+			return ret;
+		}
+
+		if (wcd9xxx_res->irq_level_high[irq])
+			irq_set_chip_and_handler(virq, &wcd9xxx_irq_chip,
+						 handle_level_irq);
+		else
+			irq_set_chip_and_handler(virq, &wcd9xxx_irq_chip,
+						 handle_edge_irq);
+
+		irq_set_nested_thread(virq, 1);
+	}
+
+	pr_debug("%s: leave\n", __func__);
+
+	return 0;
+}
+
+int wcd9xxx_irq_init(struct wcd9xxx_core_resource *wcd9xxx_res)
+{
+	int i, ret;
+	u8 irq_level[wcd9xxx_res->num_irq_regs];
+	struct irq_domain *domain;
+	struct device_node *pnode;
+
+	mutex_init(&wcd9xxx_res->irq_lock);
+	mutex_init(&wcd9xxx_res->nested_irq_lock);
+
+	pnode = of_irq_find_parent(wcd9xxx_res->dev->of_node);
+	if (unlikely(!pnode))
+		return -EINVAL;
+
+	domain = irq_find_host(pnode);
+	if (unlikely(!domain))
+		return -EINVAL;
+
+	wcd9xxx_res->domain = domain;
+
+	wcd9xxx_res->irq = wcd9xxx_irq_get_upstream_irq(wcd9xxx_res);
+	if (!wcd9xxx_res->irq) {
+		pr_warn("%s: irq driver is not yet initialized\n", __func__);
+		mutex_destroy(&wcd9xxx_res->irq_lock);
+		mutex_destroy(&wcd9xxx_res->nested_irq_lock);
+		return -EPROBE_DEFER;
+	}
+	pr_debug("%s: probed irq %d\n", __func__, wcd9xxx_res->irq);
+
+	/* Setup downstream IRQs */
+	ret = wcd9xxx_irq_setup_downstream_irq(wcd9xxx_res);
+	if (ret) {
+		pr_err("%s: Failed to setup downstream IRQ\n", __func__);
+		wcd9xxx_irq_put_upstream_irq(wcd9xxx_res);
+		mutex_destroy(&wcd9xxx_res->irq_lock);
+		mutex_destroy(&wcd9xxx_res->nested_irq_lock);
+		return ret;
+	}
+
+	/* All other wcd9xxx interrupts are edge triggered */
+	wcd9xxx_res->irq_level_high[0] = true;
+
+	/* mask all the interrupts */
+	memset(irq_level, 0, wcd9xxx_res->num_irq_regs);
+	for (i = 0; i < wcd9xxx_res->num_irqs; i++) {
+		wcd9xxx_res->irq_masks_cur[BIT_BYTE(i)] |= BYTE_BIT_MASK(i);
+		wcd9xxx_res->irq_masks_cache[BIT_BYTE(i)] |= BYTE_BIT_MASK(i);
+		irq_level[BIT_BYTE(i)] |=
+		    wcd9xxx_res->irq_level_high[i] << (i % BITS_PER_BYTE);
+	}
+
+	if (!wcd9xxx_res->wcd_core_regmap) {
+		dev_err(wcd9xxx_res->dev,
+			"%s: Codec core regmap not defined\n",
+			   __func__);
+		ret = -EINVAL;
+		goto fail_irq_init;
+	}
+
+	for (i = 0; i < wcd9xxx_res->num_irq_regs; i++) {
+		/* Initialize interrupt mask and level registers */
+		regmap_write(wcd9xxx_res->wcd_core_regmap,
+			wcd9xxx_res->intr_reg[WCD9XXX_INTR_LEVEL_BASE] + i,
+					irq_level[i]);
+		regmap_write(wcd9xxx_res->wcd_core_regmap,
+			wcd9xxx_res->intr_reg[WCD9XXX_INTR_MASK_BASE] + i,
+			wcd9xxx_res->irq_masks_cur[i]);
+	}
+
+	ret = request_threaded_irq(wcd9xxx_res->irq, NULL, wcd9xxx_irq_thread,
+				   IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+				   "wcd9xxx", wcd9xxx_res);
+	if (ret != 0)
+		dev_err(wcd9xxx_res->dev, "Failed to request IRQ %d: %d\n",
+			wcd9xxx_res->irq, ret);
+	else {
+		ret = enable_irq_wake(wcd9xxx_res->irq);
+		if (ret)
+			dev_err(wcd9xxx_res->dev,
+				"Failed to set wake interrupt on IRQ %d: %d\n",
+				wcd9xxx_res->irq, ret);
+		if (ret)
+			free_irq(wcd9xxx_res->irq, wcd9xxx_res);
+	}
+
+	if (ret)
+		goto fail_irq_init;
+
+	return ret;
+
+fail_irq_init:
+	dev_err(wcd9xxx_res->dev,
+			"%s: Failed to init wcd9xxx irq\n", __func__);
+	wcd9xxx_irq_put_upstream_irq(wcd9xxx_res);
+	mutex_destroy(&wcd9xxx_res->irq_lock);
+	mutex_destroy(&wcd9xxx_res->nested_irq_lock);
+	return ret;
+}
+
+int wcd9xxx_request_irq(struct wcd9xxx_core_resource *wcd9xxx_res,
+			int irq, irq_handler_t handler,
+			const char *name, void *data)
+{
+	int virq;
+
+	virq = phyirq_to_virq(wcd9xxx_res, irq);
+
+	return request_threaded_irq(virq, NULL, handler, IRQF_TRIGGER_RISING,
+				    name, data);
+}
+
+void wcd9xxx_irq_exit(struct wcd9xxx_core_resource *wcd9xxx_res)
+{
+	dev_dbg(wcd9xxx_res->dev, "%s: Cleaning up irq %d\n", __func__,
+		wcd9xxx_res->irq);
+
+	if (wcd9xxx_res->irq) {
+		disable_irq_wake(wcd9xxx_res->irq);
+		free_irq(wcd9xxx_res->irq, wcd9xxx_res);
+		wcd9xxx_res->irq = 0;
+		wcd9xxx_irq_put_upstream_irq(wcd9xxx_res);
+	}
+	mutex_destroy(&wcd9xxx_res->irq_lock);
+	mutex_destroy(&wcd9xxx_res->nested_irq_lock);
+}
+
+#ifndef CONFIG_OF
+static int phyirq_to_virq(
+	struct wcd9xxx_core_resource *wcd9xxx_res,
+	int offset)
+{
+	return wcd9xxx_res->irq_base + offset;
+}
+
+static int virq_to_phyirq(
+	struct wcd9xxx_core_resource *wcd9xxx_res,
+	int virq)
+{
+	return virq - wcd9xxx_res->irq_base;
+}
+
+static unsigned int wcd9xxx_irq_get_upstream_irq(
+	struct wcd9xxx_core_resource *wcd9xxx_res)
+{
+	return wcd9xxx_res->irq;
+}
+
+static void wcd9xxx_irq_put_upstream_irq(
+	struct wcd9xxx_core_resource *wcd9xxx_res)
+{
+	/* Do nothing */
+}
+
+static int wcd9xxx_map_irq(
+	struct wcd9xxx_core_resource *wcd9xxx_core_res, int irq)
+{
+	return phyirq_to_virq(wcd9xxx_core_res, irq);
+}
+#else
+static struct wcd9xxx_irq_drv_data *
+wcd9xxx_irq_add_domain(struct device_node *node,
+			       struct device_node *parent)
+{
+	struct wcd9xxx_irq_drv_data *data = NULL;
+
+	pr_debug("%s: node %s, node parent %s\n", __func__,
+		 node->name, node->parent->name);
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return NULL;
+
+	/*
+	 * wcd9xxx_intc interrupt controller supports N to N irq mapping with
+	 * single cell binding with irq numbers(offsets) only.
+	 * Use irq_domain_simple_ops that has irq_domain_simple_map and
+	 * irq_domain_xlate_onetwocell.
+	 */
+	data->domain = irq_domain_add_linear(node, WCD9XXX_MAX_NUM_IRQS,
+					     &irq_domain_simple_ops, data);
+	if (!data->domain) {
+		kfree(data);
+		return NULL;
+	}
+
+	return data;
+}
+
+static struct wcd9xxx_irq_drv_data *
+wcd9xxx_get_irq_drv_d(const struct wcd9xxx_core_resource *wcd9xxx_res)
+{
+	struct irq_domain *domain;
+
+	domain = wcd9xxx_res->domain;
+
+	if (domain)
+		return domain->host_data;
+	else
+		return NULL;
+}
+
+static int phyirq_to_virq(struct wcd9xxx_core_resource *wcd9xxx_res, int offset)
+{
+	struct wcd9xxx_irq_drv_data *data;
+
+	data = wcd9xxx_get_irq_drv_d(wcd9xxx_res);
+	if (!data) {
+		pr_warn("%s: not registered to interrupt controller\n",
+			__func__);
+		return -EINVAL;
+	}
+	return irq_linear_revmap(data->domain, offset);
+}
+
+static int virq_to_phyirq(struct wcd9xxx_core_resource *wcd9xxx_res, int virq)
+{
+	struct irq_data *irq_data = irq_get_irq_data(virq);
+	if (unlikely(!irq_data)) {
+		pr_err("%s: irq_data is NULL", __func__);
+		return -EINVAL;
+	}
+	return irq_data->hwirq;
+}
+
+static unsigned int wcd9xxx_irq_get_upstream_irq(
+				struct wcd9xxx_core_resource *wcd9xxx_res)
+{
+	struct wcd9xxx_irq_drv_data *data;
+
+	data = wcd9xxx_get_irq_drv_d(wcd9xxx_res);
+	if (!data) {
+		pr_err("%s: interrupt controller is not registerd\n", __func__);
+		return 0;
+	}
+
+	rmb();
+	return data->irq;
+}
+
+static void wcd9xxx_irq_put_upstream_irq(
+			struct wcd9xxx_core_resource *wcd9xxx_res)
+{
+	wcd9xxx_res->domain = NULL;
+}
+
+static int wcd9xxx_map_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
+{
+	return of_irq_to_resource(wcd9xxx_res->dev->of_node, irq, NULL);
+}
+
+static int wcd9xxx_irq_probe(struct platform_device *pdev)
+{
+	int irq, dir_apps_irq = -EINVAL;
+	struct wcd9xxx_irq_drv_data *data;
+	struct device_node *node = pdev->dev.of_node;
+	int ret = -EINVAL;
+
+	irq = of_get_named_gpio(node, "qcom,gpio-connect", 0);
+	if (!gpio_is_valid(irq))
+		dir_apps_irq = platform_get_irq_byname(pdev, "wcd_irq");
+
+	if (!gpio_is_valid(irq) && dir_apps_irq < 0) {
+		dev_err(&pdev->dev, "TLMM connect gpio not found\n");
+		return -EPROBE_DEFER;
+	} else {
+		if (dir_apps_irq > 0) {
+			irq = dir_apps_irq;
+		} else {
+			irq = gpio_to_irq(irq);
+			if (irq < 0) {
+				dev_err(&pdev->dev, "Unable to configure irq\n");
+				return irq;
+			}
+		}
+		dev_dbg(&pdev->dev, "%s: virq = %d\n", __func__, irq);
+		data = wcd9xxx_irq_add_domain(node, node->parent);
+		if (!data) {
+			pr_err("%s: irq_add_domain failed\n", __func__);
+			return -EINVAL;
+		}
+		data->irq = irq;
+		wmb();
+		ret = 0;
+	}
+
+	return ret;
+}
+
+static int wcd9xxx_irq_remove(struct platform_device *pdev)
+{
+	struct irq_domain *domain;
+	struct wcd9xxx_irq_drv_data *data;
+
+	domain = irq_find_host(pdev->dev.of_node);
+	if (unlikely(!domain)) {
+		pr_err("%s: domain is NULL", __func__);
+		return -EINVAL;
+	}
+	data = (struct wcd9xxx_irq_drv_data *)domain->host_data;
+	data->irq = 0;
+	wmb();
+	irq_domain_remove(data->domain);
+	kfree(data);
+	domain->host_data = NULL;
+
+	return 0;
+}
+
+static const struct of_device_id of_match[] = {
+	{ .compatible = "qcom,wcd9xxx-irq" },
+	{ }
+};
+
+static struct platform_driver wcd9xxx_irq_driver = {
+	.probe = wcd9xxx_irq_probe,
+	.remove = wcd9xxx_irq_remove,
+	.driver = {
+		.name = "wcd9xxx_intc",
+		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(of_match),
+	},
+};
+
+static int wcd9xxx_irq_drv_init(void)
+{
+	return platform_driver_register(&wcd9xxx_irq_driver);
+}
+subsys_initcall(wcd9xxx_irq_drv_init);
+
+static void wcd9xxx_irq_drv_exit(void)
+{
+	platform_driver_unregister(&wcd9xxx_irq_driver);
+}
+module_exit(wcd9xxx_irq_drv_exit);
+#endif /* CONFIG_OF */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/mfd/wcd9xxx-regmap.h	2019-01-22 16:16:24.691257165 +0100
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _WCD9XXX_REGMAP_
+#define _WCD9XXX_REGMAP_
+
+#include <linux/regmap.h>
+#include <linux/mfd/wcd9xxx/core.h>
+
+typedef int (*regmap_patch_fptr)(struct regmap *, int);
+
+#ifdef CONFIG_WCD934X_CODEC
+extern struct regmap_config wcd934x_regmap_config;
+extern int wcd934x_regmap_register_patch(struct regmap *regmap,
+					 int version);
+#endif
+
+#ifdef CONFIG_WCD9335_CODEC
+extern struct regmap_config wcd9335_regmap_config;
+extern int wcd9335_regmap_register_patch(struct regmap *regmap,
+					 int version);
+#endif
+
+#ifdef CONFIG_WCD9330_CODEC
+extern struct regmap_config wcd9330_regmap_config;
+#endif
+
+static inline struct regmap_config *wcd9xxx_get_regmap_config(int type)
+{
+	struct regmap_config *regmap_config;
+
+	switch (type) {
+#ifdef CONFIG_WCD934X_CODEC
+	case WCD934X:
+		regmap_config = &wcd934x_regmap_config;
+		break;
+#endif
+#ifdef CONFIG_WCD9335_CODEC
+	case WCD9335:
+		regmap_config = &wcd9335_regmap_config;
+		break;
+#endif
+#ifdef CONFIG_WCD9330_CODEC
+	case WCD9330:
+		regmap_config = &wcd9330_regmap_config;
+		break;
+#endif
+	default:
+		regmap_config = NULL;
+		break;
+	};
+
+	return regmap_config;
+}
+
+static inline regmap_patch_fptr wcd9xxx_get_regmap_reg_patch(int type)
+{
+	regmap_patch_fptr apply_patch;
+
+	switch (type) {
+#ifdef CONFIG_WCD9335_CODEC
+	case WCD9335:
+		apply_patch = wcd9335_regmap_register_patch;
+		break;
+#endif
+#ifdef CONFIG_WCD934X_CODEC
+	case WCD934X:
+		apply_patch = wcd934x_regmap_register_patch;
+		break;
+#endif
+	default:
+		apply_patch = NULL;
+		break;
+	}
+
+	return apply_patch;
+}
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/mfd/wcd9xxx-slimslave.c	2019-01-22 16:16:24.691257165 +0100
@@ -0,0 +1,572 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx-slimslave.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
+
+struct wcd9xxx_slim_sch {
+	u16 rx_port_ch_reg_base;
+	u16 port_tx_cfg_reg_base;
+	u16 port_rx_cfg_reg_base;
+};
+
+static struct wcd9xxx_slim_sch sh_ch;
+
+static int wcd9xxx_alloc_slim_sh_ch(struct wcd9xxx *wcd9xxx,
+				    u8 wcd9xxx_pgd_la, u32 cnt,
+				    struct wcd9xxx_ch *channels, u32 path);
+
+static int wcd9xxx_dealloc_slim_sh_ch(struct slim_device *slim,
+				      u32 cnt, struct wcd9xxx_ch *channels);
+
+static int wcd9xxx_configure_ports(struct wcd9xxx *wcd9xxx)
+{
+	if (wcd9xxx->codec_type->slim_slave_type ==
+	    WCD9XXX_SLIM_SLAVE_ADDR_TYPE_0) {
+		sh_ch.rx_port_ch_reg_base = 0x180;
+		sh_ch.port_rx_cfg_reg_base = 0x040;
+		sh_ch.port_tx_cfg_reg_base = 0x040;
+	} else {
+		sh_ch.rx_port_ch_reg_base =
+			0x180 - (TAIKO_SB_PGD_OFFSET_OF_RX_SLAVE_DEV_PORTS * 4);
+		sh_ch.port_rx_cfg_reg_base =
+			0x040 - TAIKO_SB_PGD_OFFSET_OF_RX_SLAVE_DEV_PORTS;
+		sh_ch.port_tx_cfg_reg_base = 0x050;
+	}
+
+	return 0;
+}
+
+
+int wcd9xxx_init_slimslave(struct wcd9xxx *wcd9xxx, u8 wcd9xxx_pgd_la,
+			   unsigned int tx_num, unsigned int *tx_slot,
+			   unsigned int rx_num, unsigned int *rx_slot)
+{
+	int ret = 0;
+	int i;
+
+	ret = wcd9xxx_configure_ports(wcd9xxx);
+	if (ret) {
+		pr_err("%s: Failed to configure register address offset\n",
+		       __func__);
+		goto err;
+	}
+
+	if (!rx_num || rx_num > wcd9xxx->num_rx_port) {
+		pr_err("%s: invalid rx num %d\n", __func__, rx_num);
+		return -EINVAL;
+	}
+	if (wcd9xxx->rx_chs) {
+		wcd9xxx->num_rx_port = rx_num;
+		for (i = 0; i < rx_num; i++) {
+			wcd9xxx->rx_chs[i].ch_num = rx_slot[i];
+			INIT_LIST_HEAD(&wcd9xxx->rx_chs[i].list);
+		}
+		ret = wcd9xxx_alloc_slim_sh_ch(wcd9xxx, wcd9xxx_pgd_la,
+						wcd9xxx->num_rx_port,
+						wcd9xxx->rx_chs,
+						SLIM_SINK);
+		if (ret) {
+			pr_err("%s: Failed to alloc %d rx slimbus channels\n",
+				__func__, wcd9xxx->num_rx_port);
+			kfree(wcd9xxx->rx_chs);
+			wcd9xxx->rx_chs = NULL;
+			wcd9xxx->num_rx_port = 0;
+		}
+	} else {
+		pr_err("Not able to allocate memory for %d slimbus rx ports\n",
+			wcd9xxx->num_rx_port);
+	}
+
+	if (!tx_num || tx_num > wcd9xxx->num_tx_port) {
+		pr_err("%s: invalid tx num %d\n", __func__, tx_num);
+		return -EINVAL;
+	}
+	if (wcd9xxx->tx_chs) {
+		wcd9xxx->num_tx_port = tx_num;
+		for (i = 0; i < tx_num; i++) {
+			wcd9xxx->tx_chs[i].ch_num = tx_slot[i];
+			INIT_LIST_HEAD(&wcd9xxx->tx_chs[i].list);
+		}
+		ret = wcd9xxx_alloc_slim_sh_ch(wcd9xxx, wcd9xxx_pgd_la,
+						wcd9xxx->num_tx_port,
+						wcd9xxx->tx_chs,
+						SLIM_SRC);
+		if (ret) {
+			pr_err("%s: Failed to alloc %d tx slimbus channels\n",
+				__func__, wcd9xxx->num_tx_port);
+			kfree(wcd9xxx->tx_chs);
+			wcd9xxx->tx_chs = NULL;
+			wcd9xxx->num_tx_port = 0;
+		}
+	} else {
+		pr_err("Not able to allocate memory for %d slimbus tx ports\n",
+			wcd9xxx->num_tx_port);
+	}
+	return 0;
+err:
+	return ret;
+}
+
+int wcd9xxx_deinit_slimslave(struct wcd9xxx *wcd9xxx)
+{
+	if (wcd9xxx->num_rx_port) {
+		wcd9xxx_dealloc_slim_sh_ch(wcd9xxx->slim,
+					wcd9xxx->num_rx_port,
+					wcd9xxx->rx_chs);
+		wcd9xxx->num_rx_port = 0;
+	}
+	if (wcd9xxx->num_tx_port) {
+		wcd9xxx_dealloc_slim_sh_ch(wcd9xxx->slim,
+					wcd9xxx->num_tx_port,
+					wcd9xxx->tx_chs);
+		wcd9xxx->num_tx_port = 0;
+	}
+	return 0;
+}
+
+
+static int wcd9xxx_alloc_slim_sh_ch(struct wcd9xxx *wcd9xxx,
+				    u8 wcd9xxx_pgd_la, u32 cnt,
+				    struct wcd9xxx_ch *channels, u32 path)
+{
+	int ret = 0;
+	u32 ch_idx ;
+
+	/* The slimbus channel allocation seem take longer time
+	 * so do the allocation up front to avoid delay in start of
+	 * playback
+	 */
+	pr_debug("%s: pgd_la[%d]\n", __func__, wcd9xxx_pgd_la);
+	for (ch_idx = 0; ch_idx < cnt; ch_idx++) {
+		ret = slim_get_slaveport(wcd9xxx_pgd_la,
+					channels[ch_idx].port,
+					&channels[ch_idx].sph, path);
+		pr_debug("%s: pgd_la[%d] channels[%d].port[%d]\n"
+			"channels[%d].sph[%d] path[%d]\n",
+			__func__, wcd9xxx_pgd_la, ch_idx,
+			channels[ch_idx].port,
+			ch_idx, channels[ch_idx].sph, path);
+		if (ret < 0) {
+			pr_err("%s: slave port failure id[%d] ret[%d]\n",
+				__func__, channels[ch_idx].ch_num, ret);
+			goto err;
+		}
+
+		ret = slim_query_ch(wcd9xxx->slim,
+				    channels[ch_idx].ch_num,
+				    &channels[ch_idx].ch_h);
+		if (ret < 0) {
+			pr_err("%s: slim_query_ch failed ch-num[%d] ret[%d]\n",
+				__func__, channels[ch_idx].ch_num, ret);
+			goto err;
+		}
+	}
+err:
+	return ret;
+}
+
+static int wcd9xxx_dealloc_slim_sh_ch(struct slim_device *slim,
+			u32 cnt, struct wcd9xxx_ch *channels)
+{
+	int idx = 0;
+	int ret = 0;
+	/* slim_dealloc_ch */
+	for (idx = 0; idx < cnt; idx++) {
+		ret = slim_dealloc_ch(slim, channels[idx].ch_h);
+		if (ret < 0) {
+			pr_err("%s: slim_dealloc_ch fail ret[%d] ch_h[%d]\n",
+				__func__, ret, channels[idx].ch_h);
+		}
+	}
+	return ret;
+}
+
+/* Enable slimbus slave device for RX path */
+int wcd9xxx_cfg_slim_sch_rx(struct wcd9xxx *wcd9xxx,
+			    struct list_head *wcd9xxx_ch_list,
+			    unsigned int rate, unsigned int bit_width,
+			    u16 *grph)
+{
+	u8 ch_cnt = 0;
+	u16 ch_h[SLIM_MAX_RX_PORTS] = {0};
+	u8  payload = 0;
+	u16 codec_port = 0;
+	int ret;
+	struct slim_ch prop;
+	struct wcd9xxx_ch *rx;
+	int size = ARRAY_SIZE(ch_h);
+
+	/* Configure slave interface device */
+
+	list_for_each_entry(rx, wcd9xxx_ch_list, list) {
+		payload |= 1 << rx->shift;
+		if (ch_cnt < size) {
+			ch_h[ch_cnt] = rx->ch_h;
+			ch_cnt++;
+			pr_debug("list ch->ch_h %d ch->sph %d\n",
+				 rx->ch_h, rx->sph);
+		} else {
+			pr_err("%s: allocated channel number %u is out of max rangae %d\n",
+			       __func__, ch_cnt,
+			       size);
+			ret = EINVAL;
+			goto err;
+		}
+	}
+	pr_debug("%s: ch_cnt[%d] rate=%d WATER_MARK_VAL %d\n",
+		 __func__, ch_cnt, rate, WATER_MARK_VAL);
+	/* slim_define_ch api */
+	prop.prot = SLIM_AUTO_ISO;
+	if ((rate == 44100) || (rate == 88200) || (rate == 176400) ||
+	    (rate == 352800)) {
+		prop.baser = SLIM_RATE_11025HZ;
+		prop.ratem = (rate/11025);
+	} else {
+		prop.baser = SLIM_RATE_4000HZ;
+		prop.ratem = (rate/4000);
+	}
+	prop.dataf = SLIM_CH_DATAF_NOT_DEFINED;
+	prop.auxf = SLIM_CH_AUXF_NOT_APPLICABLE;
+	prop.sampleszbits = bit_width;
+
+	pr_debug("Before slim_define_ch:\n"
+		 "ch_cnt %d,ch_h[0] %d ch_h[1] %d, grph %d\n",
+		 ch_cnt, ch_h[0], ch_h[1], *grph);
+	ret = slim_define_ch(wcd9xxx->slim, &prop, ch_h, ch_cnt,
+			     true, grph);
+	if (ret < 0) {
+		pr_err("%s: slim_define_ch failed ret[%d]\n",
+		       __func__, ret);
+		goto err;
+	}
+
+	list_for_each_entry(rx, wcd9xxx_ch_list, list) {
+		codec_port = rx->port;
+		pr_debug("%s: codec_port %d rx 0x%p, payload %d\n"
+			 "sh_ch.rx_port_ch_reg_base0 0x%x\n"
+			 "sh_ch.port_rx_cfg_reg_base 0x%x\n",
+			 __func__, codec_port, rx, payload,
+			 sh_ch.rx_port_ch_reg_base,
+			sh_ch.port_rx_cfg_reg_base);
+
+		/* look for the valid port range and chose the
+		 * payload accordingly
+		 */
+		/* write to interface device */
+		ret = wcd9xxx_interface_reg_write(wcd9xxx,
+				SB_PGD_RX_PORT_MULTI_CHANNEL_0(
+				sh_ch.rx_port_ch_reg_base, codec_port),
+				payload);
+
+		if (ret < 0) {
+			pr_err("%s:Intf-dev fail reg[%d] payload[%d] ret[%d]\n",
+				__func__,
+				SB_PGD_RX_PORT_MULTI_CHANNEL_0(
+				sh_ch.rx_port_ch_reg_base, codec_port),
+				payload, ret);
+			goto err;
+		}
+		/* configure the slave port for water mark and enable*/
+		ret = wcd9xxx_interface_reg_write(wcd9xxx,
+				SB_PGD_PORT_CFG_BYTE_ADDR(
+				sh_ch.port_rx_cfg_reg_base, codec_port),
+				WATER_MARK_VAL);
+		if (ret < 0) {
+			pr_err("%s:watermark set failure for port[%d] ret[%d]",
+				__func__, codec_port, ret);
+		}
+
+		ret = slim_connect_sink(wcd9xxx->slim, &rx->sph, 1, rx->ch_h);
+		if (ret < 0) {
+			pr_err("%s: slim_connect_sink failed ret[%d]\n",
+				__func__, ret);
+			goto err_close_slim_sch;
+		}
+	}
+	/* slim_control_ch */
+	ret = slim_control_ch(wcd9xxx->slim, *grph, SLIM_CH_ACTIVATE,
+			      true);
+	if (ret < 0) {
+		pr_err("%s: slim_control_ch failed ret[%d]\n",
+			__func__, ret);
+		goto err_close_slim_sch;
+	}
+	return 0;
+
+err_close_slim_sch:
+	/*  release all acquired handles */
+	wcd9xxx_close_slim_sch_rx(wcd9xxx, wcd9xxx_ch_list, *grph);
+err:
+	return ret;
+}
+EXPORT_SYMBOL_GPL(wcd9xxx_cfg_slim_sch_rx);
+
+/* Enable slimbus slave device for RX path */
+int wcd9xxx_cfg_slim_sch_tx(struct wcd9xxx *wcd9xxx,
+			    struct list_head *wcd9xxx_ch_list,
+			    unsigned int rate, unsigned int bit_width,
+			    u16 *grph)
+{
+	u16 ch_cnt = 0;
+	u16 payload = 0;
+	u16 ch_h[SLIM_MAX_TX_PORTS] = {0};
+	u16 codec_port;
+	int ret = 0;
+	struct wcd9xxx_ch *tx;
+	int size = ARRAY_SIZE(ch_h);
+
+	struct slim_ch prop;
+
+	list_for_each_entry(tx, wcd9xxx_ch_list, list) {
+		payload |= 1 << tx->shift;
+		if (ch_cnt < size) {
+			ch_h[ch_cnt] = tx->ch_h;
+			ch_cnt++;
+		} else {
+			pr_err("%s: allocated channel number %u is out of max rangae %d\n",
+			       __func__, ch_cnt,
+			       size);
+			ret = EINVAL;
+			goto err;
+		}
+	}
+
+	/* slim_define_ch api */
+	prop.prot = SLIM_AUTO_ISO;
+	prop.baser = SLIM_RATE_4000HZ;
+	prop.dataf = SLIM_CH_DATAF_NOT_DEFINED;
+	prop.auxf = SLIM_CH_AUXF_NOT_APPLICABLE;
+	prop.ratem = (rate/4000);
+	prop.sampleszbits = bit_width;
+	ret = slim_define_ch(wcd9xxx->slim, &prop, ch_h, ch_cnt,
+			     true, grph);
+	if (ret < 0) {
+		pr_err("%s: slim_define_ch failed ret[%d]\n",
+		       __func__, ret);
+		goto err;
+	}
+
+	pr_debug("%s: ch_cnt[%d] rate[%d] bitwidth[%u]\n", __func__, ch_cnt,
+		 rate, bit_width);
+	list_for_each_entry(tx, wcd9xxx_ch_list, list) {
+		codec_port = tx->port;
+		pr_debug("%s: codec_port %d tx 0x%p, payload 0x%x\n",
+			 __func__, codec_port, tx, payload);
+		/* write to interface device */
+		ret = wcd9xxx_interface_reg_write(wcd9xxx,
+				SB_PGD_TX_PORT_MULTI_CHANNEL_0(codec_port),
+				payload & 0x00FF);
+		if (ret < 0) {
+			pr_err("%s:Intf-dev fail reg[%d] payload[%d] ret[%d]\n",
+				__func__,
+				SB_PGD_TX_PORT_MULTI_CHANNEL_0(codec_port),
+				payload, ret);
+			goto err;
+		}
+		/* ports 8,9 */
+		ret = wcd9xxx_interface_reg_write(wcd9xxx,
+				SB_PGD_TX_PORT_MULTI_CHANNEL_1(codec_port),
+				(payload & 0xFF00)>>8);
+		if (ret < 0) {
+			pr_err("%s:Intf-dev fail reg[%d] payload[%d] ret[%d]\n",
+				__func__,
+				SB_PGD_TX_PORT_MULTI_CHANNEL_1(codec_port),
+				payload, ret);
+			goto err;
+		}
+		/* configure the slave port for water mark and enable*/
+		ret = wcd9xxx_interface_reg_write(wcd9xxx,
+				SB_PGD_PORT_CFG_BYTE_ADDR(
+				sh_ch.port_tx_cfg_reg_base, codec_port),
+				WATER_MARK_VAL);
+		if (ret < 0) {
+			pr_err("%s:watermark set failure for port[%d] ret[%d]",
+				__func__, codec_port, ret);
+		}
+
+		ret = slim_connect_src(wcd9xxx->slim, tx->sph, tx->ch_h);
+
+		if (ret < 0) {
+			pr_err("%s: slim_connect_src failed ret[%d]\n",
+			       __func__, ret);
+			goto err;
+		}
+	}
+	/* slim_control_ch */
+	ret = slim_control_ch(wcd9xxx->slim, *grph, SLIM_CH_ACTIVATE,
+			      true);
+	if (ret < 0) {
+		pr_err("%s: slim_control_ch failed ret[%d]\n",
+			__func__, ret);
+		goto err;
+	}
+	return 0;
+err:
+	/* release all acquired handles */
+	wcd9xxx_close_slim_sch_tx(wcd9xxx, wcd9xxx_ch_list, *grph);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(wcd9xxx_cfg_slim_sch_tx);
+
+int wcd9xxx_close_slim_sch_rx(struct wcd9xxx *wcd9xxx,
+			      struct list_head *wcd9xxx_ch_list, u16 grph)
+{
+	u32 sph[SLIM_MAX_RX_PORTS] = {0};
+	int ch_cnt = 0 ;
+	int ret = 0;
+	struct wcd9xxx_ch *rx;
+
+	list_for_each_entry(rx, wcd9xxx_ch_list, list)
+		sph[ch_cnt++] = rx->sph;
+
+	pr_debug("%s ch_cht %d, sph[0] %d sph[1] %d\n", __func__, ch_cnt,
+		sph[0], sph[1]);
+
+	/* slim_control_ch (REMOVE) */
+	pr_debug("%s before slim_control_ch grph %d\n", __func__, grph);
+	ret = slim_control_ch(wcd9xxx->slim, grph, SLIM_CH_REMOVE, true);
+	if (ret < 0) {
+		pr_err("%s: slim_control_ch failed ret[%d]\n", __func__, ret);
+		goto err;
+	}
+err:
+	return ret;
+}
+EXPORT_SYMBOL_GPL(wcd9xxx_close_slim_sch_rx);
+
+int wcd9xxx_close_slim_sch_tx(struct wcd9xxx *wcd9xxx,
+			      struct list_head *wcd9xxx_ch_list,
+			      u16 grph)
+{
+	u32 sph[SLIM_MAX_TX_PORTS] = {0};
+	int ret = 0;
+	int ch_cnt = 0 ;
+	struct wcd9xxx_ch *tx;
+
+	pr_debug("%s\n", __func__);
+	list_for_each_entry(tx, wcd9xxx_ch_list, list)
+		sph[ch_cnt++] = tx->sph;
+
+	pr_debug("%s ch_cht %d, sph[0] %d sph[1] %d\n",
+		__func__, ch_cnt, sph[0], sph[1]);
+	/* slim_control_ch (REMOVE) */
+	ret = slim_control_ch(wcd9xxx->slim, grph, SLIM_CH_REMOVE, true);
+	if (ret < 0) {
+		pr_err("%s: slim_control_ch failed ret[%d]\n",
+			__func__, ret);
+		goto err;
+	}
+err:
+	return ret;
+}
+EXPORT_SYMBOL_GPL(wcd9xxx_close_slim_sch_tx);
+
+int wcd9xxx_get_slave_port(unsigned int ch_num)
+{
+	int ret = 0;
+
+	ret = (ch_num - BASE_CH_NUM);
+	pr_debug("%s: ch_num[%d] slave port[%d]\n", __func__, ch_num, ret);
+	if (ret < 0) {
+		pr_err("%s: Error:- Invalid slave port found = %d\n",
+			__func__, ret);
+		return -EINVAL;
+	}
+	return ret;
+}
+EXPORT_SYMBOL_GPL(wcd9xxx_get_slave_port);
+
+int wcd9xxx_disconnect_port(struct wcd9xxx *wcd9xxx,
+			    struct list_head *wcd9xxx_ch_list, u16 grph)
+{
+	u32 sph[SLIM_MAX_TX_PORTS + SLIM_MAX_RX_PORTS] = {0};
+	int ch_cnt = 0 ;
+	int ret = 0;
+	struct wcd9xxx_ch *slim_ch;
+
+	list_for_each_entry(slim_ch, wcd9xxx_ch_list, list)
+		sph[ch_cnt++] = slim_ch->sph;
+
+	/* slim_disconnect_port */
+	ret = slim_disconnect_ports(wcd9xxx->slim, sph, ch_cnt);
+	if (ret < 0) {
+		pr_err("%s: slim_disconnect_ports failed ret[%d]\n",
+			__func__, ret);
+	}
+	return ret;
+}
+EXPORT_SYMBOL_GPL(wcd9xxx_disconnect_port);
+
+/* This function is called with mutex acquired */
+int wcd9xxx_rx_vport_validation(u32 port_id,
+				struct list_head *codec_dai_list)
+{
+	struct wcd9xxx_ch *ch;
+	int ret = 0;
+
+	pr_debug("%s: port_id %u\n", __func__, port_id);
+
+	list_for_each_entry(ch,
+		codec_dai_list, list) {
+		pr_debug("%s: ch->port %u\n", __func__, ch->port);
+		if (ch->port == port_id) {
+			ret = -EINVAL;
+			break;
+		}
+	}
+	return ret;
+}
+EXPORT_SYMBOL_GPL(wcd9xxx_rx_vport_validation);
+
+
+/* This function is called with mutex acquired */
+int wcd9xxx_tx_vport_validation(u32 table, u32 port_id,
+				struct wcd9xxx_codec_dai_data *codec_dai,
+				u32 num_codec_dais)
+{
+	struct wcd9xxx_ch *ch;
+	int ret = 0;
+	u32 index;
+	unsigned long vtable = table;
+	u32 size = sizeof(table) * BITS_PER_BYTE;
+
+	pr_debug("%s: vtable 0x%lx port_id %u size %d\n", __func__,
+		 vtable, port_id, size);
+	for_each_set_bit(index, &vtable, size) {
+		if (index < num_codec_dais) {
+			list_for_each_entry(ch,
+					&codec_dai[index].wcd9xxx_ch_list,
+					list) {
+				pr_debug("%s: index %u ch->port %u vtable 0x%lx\n",
+						__func__, index, ch->port,
+						vtable);
+				if (ch->port == port_id) {
+					pr_err("%s: TX%u is used by AIF%u_CAP Mixer\n",
+							__func__, port_id + 1,
+							(index + 1)/2);
+					ret = -EINVAL;
+					break;
+				}
+			}
+		} else {
+			pr_err("%s: Invalid index %d of codec dai",
+					__func__, index);
+			ret = -EINVAL;
+		}
+		if (ret)
+			break;
+	}
+	return ret;
+}
+EXPORT_SYMBOL_GPL(wcd9xxx_tx_vport_validation);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/mfd/wcd9xxx-utils.c	2019-01-22 16:16:24.691257165 +0100
@@ -0,0 +1,1199 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/wcd9xxx/pdata.h>
+#include <linux/mfd/wcd9xxx/core.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx-irq.h>
+#include <linux/mfd/msm-cdc-supply.h>
+#include <linux/mfd/msm-cdc-pinctrl.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx-utils.h>
+
+#define REG_BYTES 2
+#define VAL_BYTES 1
+/*
+ * Page Register Address that APP Proc uses to
+ * access WCD9335 Codec registers is identified
+ * as 0x00
+ */
+#define PAGE_REG_ADDR 0x00
+
+static enum wcd9xxx_intf_status wcd9xxx_intf = -1;
+
+static struct mfd_cell tavil_devs[] = {
+	{
+		.name = "qcom-wcd-pinctrl",
+		.of_compatible = "qcom,wcd-pinctrl",
+	},
+	{
+		.name = "tavil_codec",
+	},
+};
+
+static struct mfd_cell tasha_devs[] = {
+	{
+		.name = "tasha_codec",
+	},
+};
+
+static struct mfd_cell tomtom_devs[] = {
+	{
+		.name = "tomtom_codec",
+	},
+};
+
+static int wcd9xxx_read_of_property_u32(struct device *dev, const char *name,
+					u32 *val)
+{
+	int rc = 0;
+
+	rc = of_property_read_u32(dev->of_node, name, val);
+	if (rc)
+		dev_err(dev, "%s: Looking up %s property in node %s failed",
+			__func__, name, dev->of_node->full_name);
+
+	return rc;
+}
+
+static void wcd9xxx_dt_parse_micbias_info(struct device *dev,
+					  struct wcd9xxx_micbias_setting *mb)
+{
+	u32 prop_val;
+	int rc;
+
+	if (of_find_property(dev->of_node, "qcom,cdc-micbias-ldoh-v", NULL)) {
+		rc = wcd9xxx_read_of_property_u32(dev,
+						  "qcom,cdc-micbias-ldoh-v",
+						  &prop_val);
+		if (!rc)
+			mb->ldoh_v  =  (u8)prop_val;
+	}
+
+	/* MB1 */
+	if (of_find_property(dev->of_node, "qcom,cdc-micbias-cfilt1-mv",
+			     NULL)) {
+		rc = wcd9xxx_read_of_property_u32(dev,
+						  "qcom,cdc-micbias-cfilt1-mv",
+						   &prop_val);
+		if (!rc)
+			mb->cfilt1_mv = prop_val;
+
+		rc = wcd9xxx_read_of_property_u32(dev,
+						"qcom,cdc-micbias1-cfilt-sel",
+						&prop_val);
+		if (!rc)
+			mb->bias1_cfilt_sel = (u8)prop_val;
+
+	} else if (of_find_property(dev->of_node, "qcom,cdc-micbias1-mv",
+				    NULL)) {
+		rc = wcd9xxx_read_of_property_u32(dev,
+						  "qcom,cdc-micbias1-mv",
+						  &prop_val);
+		if (!rc)
+			mb->micb1_mv = prop_val;
+	} else {
+		dev_info(dev, "%s: Micbias1 DT property not found\n",
+			__func__);
+	}
+
+	/* MB2 */
+	if (of_find_property(dev->of_node, "qcom,cdc-micbias-cfilt2-mv",
+			     NULL)) {
+		rc = wcd9xxx_read_of_property_u32(dev,
+						  "qcom,cdc-micbias-cfilt2-mv",
+						   &prop_val);
+		if (!rc)
+			mb->cfilt2_mv = prop_val;
+
+		rc = wcd9xxx_read_of_property_u32(dev,
+						"qcom,cdc-micbias2-cfilt-sel",
+						&prop_val);
+		if (!rc)
+			mb->bias2_cfilt_sel = (u8)prop_val;
+
+	} else if (of_find_property(dev->of_node, "qcom,cdc-micbias2-mv",
+				    NULL)) {
+		rc = wcd9xxx_read_of_property_u32(dev,
+						  "qcom,cdc-micbias2-mv",
+						  &prop_val);
+		if (!rc)
+			mb->micb2_mv = prop_val;
+	} else {
+		dev_info(dev, "%s: Micbias2 DT property not found\n",
+			__func__);
+	}
+
+	/* MB3 */
+	if (of_find_property(dev->of_node, "qcom,cdc-micbias-cfilt3-mv",
+			     NULL)) {
+		rc = wcd9xxx_read_of_property_u32(dev,
+						  "qcom,cdc-micbias-cfilt3-mv",
+						   &prop_val);
+		if (!rc)
+			mb->cfilt3_mv = prop_val;
+
+		rc = wcd9xxx_read_of_property_u32(dev,
+						"qcom,cdc-micbias3-cfilt-sel",
+						&prop_val);
+		if (!rc)
+			mb->bias3_cfilt_sel = (u8)prop_val;
+
+	} else if (of_find_property(dev->of_node, "qcom,cdc-micbias3-mv",
+				    NULL)) {
+		rc = wcd9xxx_read_of_property_u32(dev,
+						  "qcom,cdc-micbias3-mv",
+						  &prop_val);
+		if (!rc)
+			mb->micb3_mv = prop_val;
+	} else {
+		dev_info(dev, "%s: Micbias3 DT property not found\n",
+			__func__);
+	}
+
+	/* MB4 */
+	if (of_find_property(dev->of_node, "qcom,cdc-micbias4-cfilt-sel",
+			     NULL)) {
+		rc = wcd9xxx_read_of_property_u32(dev,
+						"qcom,cdc-micbias4-cfilt-sel",
+						&prop_val);
+		if (!rc)
+			mb->bias4_cfilt_sel = (u8)prop_val;
+
+	} else if (of_find_property(dev->of_node, "qcom,cdc-micbias4-mv",
+				    NULL)) {
+		rc = wcd9xxx_read_of_property_u32(dev,
+						  "qcom,cdc-micbias4-mv",
+						  &prop_val);
+		if (!rc)
+			mb->micb4_mv = prop_val;
+	} else {
+		dev_info(dev, "%s: Micbias4 DT property not found\n",
+			__func__);
+	}
+
+	mb->bias1_cap_mode =
+	   (of_property_read_bool(dev->of_node, "qcom,cdc-micbias1-ext-cap") ?
+	 MICBIAS_EXT_BYP_CAP : MICBIAS_NO_EXT_BYP_CAP);
+	mb->bias2_cap_mode =
+	   (of_property_read_bool(dev->of_node, "qcom,cdc-micbias2-ext-cap") ?
+	    MICBIAS_EXT_BYP_CAP : MICBIAS_NO_EXT_BYP_CAP);
+	mb->bias3_cap_mode =
+	   (of_property_read_bool(dev->of_node, "qcom,cdc-micbias3-ext-cap") ?
+	    MICBIAS_EXT_BYP_CAP : MICBIAS_NO_EXT_BYP_CAP);
+	mb->bias4_cap_mode =
+	   (of_property_read_bool(dev->of_node, "qcom,cdc-micbias4-ext-cap") ?
+	    MICBIAS_EXT_BYP_CAP : MICBIAS_NO_EXT_BYP_CAP);
+
+	mb->bias2_is_headset_only =
+		of_property_read_bool(dev->of_node,
+				      "qcom,cdc-micbias2-headset-only");
+
+	/* Print micbias info */
+	dev_dbg(dev, "%s: ldoh_v  %u cfilt1_mv %u cfilt2_mv %u cfilt3_mv %u",
+		__func__, (u32)mb->ldoh_v, (u32)mb->cfilt1_mv,
+		(u32)mb->cfilt2_mv, (u32)mb->cfilt3_mv);
+
+	dev_dbg(dev, "%s: micb1_mv %u micb2_mv %u micb3_mv %u micb4_mv %u",
+		__func__, mb->micb1_mv, mb->micb2_mv,
+		mb->micb3_mv, mb->micb4_mv);
+
+	dev_dbg(dev, "%s: bias1_cfilt_sel %u bias2_cfilt_sel %u\n",
+		__func__, (u32)mb->bias1_cfilt_sel, (u32)mb->bias2_cfilt_sel);
+
+	dev_dbg(dev, "%s: bias3_cfilt_sel %u bias4_cfilt_sel %u\n",
+		__func__, (u32)mb->bias3_cfilt_sel, (u32)mb->bias4_cfilt_sel);
+
+	dev_dbg(dev, "%s: bias1_ext_cap %d bias2_ext_cap %d\n",
+		__func__, mb->bias1_cap_mode, mb->bias2_cap_mode);
+
+	dev_dbg(dev, "%s: bias3_ext_cap %d bias4_ext_cap %d\n",
+		__func__, mb->bias3_cap_mode, mb->bias4_cap_mode);
+
+	dev_dbg(dev, "%s: bias2_is_headset_only %d\n",
+		__func__, mb->bias2_is_headset_only);
+}
+
+/*
+ * wcd9xxx_validate_dmic_sample_rate:
+ *	Given the dmic_sample_rate and mclk rate, validate the
+ *	dmic_sample_rate. If dmic rate is found to be invalid,
+ *	assign the dmic rate as undefined, so individual codec
+ *	drivers can use their own defaults
+ * @dev: the device for which the dmic is to be configured
+ * @dmic_sample_rate: The input dmic_sample_rate
+ * @mclk_rate: The input codec mclk rate
+ * @dmic_rate_type: String to indicate the type of dmic sample
+ *		    rate, used for debug/error logging.
+ */
+static u32 wcd9xxx_validate_dmic_sample_rate(struct device *dev,
+		u32 dmic_sample_rate, u32 mclk_rate,
+		const char *dmic_rate_type)
+{
+	u32 div_factor;
+
+	if (dmic_sample_rate == WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED ||
+	    mclk_rate % dmic_sample_rate != 0)
+		goto undefined_rate;
+
+	div_factor = mclk_rate / dmic_sample_rate;
+
+	switch (div_factor) {
+	case 2:
+	case 3:
+	case 4:
+	case 8:
+	case 16:
+		/* Valid dmic DIV factors */
+		dev_dbg(dev, "%s: DMIC_DIV = %u, mclk_rate = %u\n",
+			__func__, div_factor, mclk_rate);
+		break;
+	case 6:
+		/*
+		 * DIV 6 is valid for both 9.6MHz and 12.288MHz
+		 * MCLK on Tavil. Older codecs support DIV6 only
+		 * for 12.288MHz MCLK.
+		 */
+		if ((mclk_rate == WCD9XXX_MCLK_CLK_9P6HZ) &&
+		    (of_device_is_compatible(dev->of_node,
+					     "qcom,tavil-slim-pgd")))
+			dev_dbg(dev, "%s: DMIC_DIV = %u, mclk_rate = %u\n",
+				__func__, div_factor, mclk_rate);
+		else if (mclk_rate != WCD9XXX_MCLK_CLK_12P288MHZ)
+			goto undefined_rate;
+		break;
+	default:
+		/* Any other DIV factor is invalid */
+		goto undefined_rate;
+	}
+
+	return dmic_sample_rate;
+
+undefined_rate:
+	dev_dbg(dev, "%s: Invalid %s = %d, for mclk %d\n",
+		 __func__, dmic_rate_type, dmic_sample_rate, mclk_rate);
+	dmic_sample_rate = WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED;
+
+	return dmic_sample_rate;
+}
+
+/*
+ * wcd9xxx_populate_dt_data:
+ *	Parse device tree properties for the given codec device
+ *
+ * @dev: pointer to codec device
+ *
+ * Returns pointer to the platform data resulting from parsing
+ * device tree.
+ */
+struct wcd9xxx_pdata *wcd9xxx_populate_dt_data(struct device *dev)
+{
+	struct wcd9xxx_pdata *pdata;
+	u32 dmic_sample_rate = WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED;
+	u32 mad_dmic_sample_rate = WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED;
+	u32 ecpp_dmic_sample_rate = WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED;
+	u32 dmic_clk_drive = WCD9XXX_DMIC_CLK_DRIVE_UNDEFINED;
+	u32 prop_val;
+	int rc = 0;
+
+	if (!dev || !dev->of_node)
+		return NULL;
+
+	pdata = devm_kzalloc(dev, sizeof(struct wcd9xxx_pdata),
+			     GFP_KERNEL);
+	if (!pdata)
+		return NULL;
+
+	/* Parse power supplies */
+	msm_cdc_get_power_supplies(dev, &pdata->regulator,
+				   &pdata->num_supplies);
+	if (!pdata->regulator || (pdata->num_supplies <= 0)) {
+		dev_err(dev, "%s: no power supplies defined for codec\n",
+			__func__);
+		goto err_power_sup;
+	}
+
+	/* Parse micbias info */
+	wcd9xxx_dt_parse_micbias_info(dev, &pdata->micbias);
+
+	pdata->wcd_rst_np = of_parse_phandle(dev->of_node,
+					     "qcom,wcd-rst-gpio-node", 0);
+	if (!pdata->wcd_rst_np) {
+		dev_err(dev, "%s: Looking up %s property in node %s failed\n",
+			__func__, "qcom,wcd-rst-gpio-node",
+			dev->of_node->full_name);
+		goto err_parse_dt_prop;
+	}
+
+	if (!(wcd9xxx_read_of_property_u32(dev, "qcom,cdc-mclk-clk-rate",
+					   &prop_val)))
+		pdata->mclk_rate = prop_val;
+
+	if (pdata->mclk_rate != WCD9XXX_MCLK_CLK_9P6HZ &&
+	    pdata->mclk_rate != WCD9XXX_MCLK_CLK_12P288MHZ) {
+		dev_err(dev, "%s: Invalid mclk_rate = %u\n", __func__,
+			pdata->mclk_rate);
+		goto err_parse_dt_prop;
+	}
+
+	if (!(wcd9xxx_read_of_property_u32(dev, "qcom,cdc-dmic-sample-rate",
+					   &prop_val)))
+		dmic_sample_rate = prop_val;
+
+	pdata->dmic_sample_rate = wcd9xxx_validate_dmic_sample_rate(dev,
+							dmic_sample_rate,
+							pdata->mclk_rate,
+							"audio_dmic_rate");
+	if (!(wcd9xxx_read_of_property_u32(dev, "qcom,cdc-mad-dmic-rate",
+					   &prop_val)))
+		mad_dmic_sample_rate = prop_val;
+
+	pdata->mad_dmic_sample_rate = wcd9xxx_validate_dmic_sample_rate(dev,
+							mad_dmic_sample_rate,
+							pdata->mclk_rate,
+							"mad_dmic_rate");
+
+	if (of_find_property(dev->of_node, "qcom,cdc-ecpp-dmic-rate", NULL)) {
+		rc = wcd9xxx_read_of_property_u32(dev,
+						  "qcom,cdc-ecpp-dmic-rate",
+						  &prop_val);
+		if (!rc)
+			ecpp_dmic_sample_rate = prop_val;
+	}
+
+	pdata->ecpp_dmic_sample_rate = wcd9xxx_validate_dmic_sample_rate(dev,
+							ecpp_dmic_sample_rate,
+							pdata->mclk_rate,
+							"ecpp_dmic_rate");
+
+	if (!(of_property_read_u32(dev->of_node,
+				   "qcom,cdc-dmic-clk-drv-strength",
+				   &prop_val))) {
+		dmic_clk_drive = prop_val;
+
+		if (dmic_clk_drive != 2 && dmic_clk_drive != 4 &&
+		    dmic_clk_drive != 8 && dmic_clk_drive != 16)
+			dev_err(dev, "Invalid cdc-dmic-clk-drv-strength %d\n",
+				dmic_clk_drive);
+	}
+
+	pdata->dmic_clk_drv = dmic_clk_drive;
+
+	return pdata;
+
+err_parse_dt_prop:
+	devm_kfree(dev, pdata->regulator);
+	pdata->regulator = NULL;
+	pdata->num_supplies = 0;
+err_power_sup:
+	devm_kfree(dev, pdata);
+	return NULL;
+}
+EXPORT_SYMBOL(wcd9xxx_populate_dt_data);
+
+static bool is_wcd9xxx_reg_power_down(struct wcd9xxx *wcd9xxx, u16 rreg)
+{
+	bool ret = false;
+	int i;
+	struct wcd9xxx_power_region *wcd9xxx_pwr;
+
+	if (!wcd9xxx)
+		return ret;
+
+	for (i = 0; i < WCD9XXX_MAX_PWR_REGIONS; i++) {
+		wcd9xxx_pwr = wcd9xxx->wcd9xxx_pwr[i];
+		if (!wcd9xxx_pwr)
+			continue;
+		if (((wcd9xxx_pwr->pwr_collapse_reg_min == 0) &&
+		     (wcd9xxx_pwr->pwr_collapse_reg_max == 0)) ||
+		    (wcd9xxx_pwr->power_state ==
+		     WCD_REGION_POWER_COLLAPSE_REMOVE))
+			ret = false;
+		else if (((wcd9xxx_pwr->power_state ==
+			   WCD_REGION_POWER_DOWN) ||
+			  (wcd9xxx_pwr->power_state ==
+			   WCD_REGION_POWER_COLLAPSE_BEGIN)) &&
+			 (rreg >= wcd9xxx_pwr->pwr_collapse_reg_min) &&
+			 (rreg <= wcd9xxx_pwr->pwr_collapse_reg_max))
+			ret = true;
+	}
+	return ret;
+}
+
+/*
+ * wcd9xxx_page_write:
+ *	Retrieve page number from register and
+ *	write that page number to the page address.
+ *	Called under io_lock acquisition.
+ *
+ * @wcd9xxx: pointer to wcd9xxx
+ * @reg: Register address from which page number is retrieved
+ *
+ * Returns 0 for success and negative error code for failure.
+ */
+int wcd9xxx_page_write(struct wcd9xxx *wcd9xxx, unsigned short *reg)
+{
+	int ret = 0;
+	unsigned short c_reg, reg_addr;
+	u8 pg_num, prev_pg_num;
+
+	if (wcd9xxx->type != WCD9335 && wcd9xxx->type != WCD934X)
+		return ret;
+
+	c_reg = *reg;
+	pg_num = c_reg >> 8;
+	reg_addr = c_reg & 0xff;
+	if (wcd9xxx->prev_pg_valid) {
+		prev_pg_num = wcd9xxx->prev_pg;
+		if (prev_pg_num != pg_num) {
+			ret = wcd9xxx->write_dev(
+					wcd9xxx, PAGE_REG_ADDR, 1,
+					(void *) &pg_num, false);
+			if (ret < 0)
+				pr_err("page write error, pg_num: 0x%x\n",
+					pg_num);
+			else {
+				wcd9xxx->prev_pg = pg_num;
+				dev_dbg(wcd9xxx->dev, "%s: Page 0x%x Write to 0x00\n",
+					__func__, pg_num);
+			}
+		}
+	} else {
+		ret = wcd9xxx->write_dev(
+				wcd9xxx, PAGE_REG_ADDR, 1, (void *) &pg_num,
+				false);
+		if (ret < 0)
+			pr_err("page write error, pg_num: 0x%x\n", pg_num);
+		else {
+			wcd9xxx->prev_pg = pg_num;
+			wcd9xxx->prev_pg_valid = true;
+			dev_dbg(wcd9xxx->dev, "%s: Page 0x%x Write to 0x00\n",
+				__func__, pg_num);
+		}
+	}
+	*reg = reg_addr;
+	return ret;
+}
+EXPORT_SYMBOL(wcd9xxx_page_write);
+
+static int regmap_bus_read(void *context, const void *reg, size_t reg_size,
+			   void *val, size_t val_size)
+{
+	struct device *dev = context;
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(dev);
+	unsigned short c_reg, rreg;
+	int ret, i;
+
+	if (!wcd9xxx) {
+		dev_err(dev, "%s: wcd9xxx is NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (!reg || !val) {
+		dev_err(dev, "%s: reg or val is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	if (reg_size != REG_BYTES) {
+		dev_err(dev, "%s: register size %zd bytes, not supported\n",
+			__func__, reg_size);
+		return -EINVAL;
+	}
+
+	mutex_lock(&wcd9xxx->io_lock);
+	c_reg = *(u16 *)reg;
+	rreg = c_reg;
+
+	if (is_wcd9xxx_reg_power_down(wcd9xxx, rreg)) {
+		ret = 0;
+		for (i = 0; i < val_size; i++)
+			((u8 *)val)[i] = 0;
+		goto err;
+	}
+	ret = wcd9xxx_page_write(wcd9xxx, &c_reg);
+	if (ret)
+		goto err;
+	ret = wcd9xxx->read_dev(wcd9xxx, c_reg, val_size, val, false);
+	if (ret < 0)
+		dev_err(dev, "%s: Codec read failed (%d), reg: 0x%x, size:%zd\n",
+			__func__, ret, rreg, val_size);
+	else {
+		for (i = 0; i < val_size; i++)
+			dev_dbg(dev, "%s: Read 0x%02x from 0x%x\n",
+				__func__, ((u8 *)val)[i], rreg + i);
+	}
+err:
+	mutex_unlock(&wcd9xxx->io_lock);
+
+	return ret;
+}
+
+static int regmap_bus_gather_write(void *context,
+				   const void *reg, size_t reg_size,
+				   const void *val, size_t val_size)
+{
+	struct device *dev = context;
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(dev);
+	unsigned short c_reg, rreg;
+	int ret, i;
+
+	if (!wcd9xxx) {
+		dev_err(dev, "%s: wcd9xxx is NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (!reg || !val) {
+		dev_err(dev, "%s: reg or val is NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (reg_size != REG_BYTES) {
+		dev_err(dev, "%s: register size %zd bytes, not supported\n",
+			__func__, reg_size);
+		return -EINVAL;
+	}
+	mutex_lock(&wcd9xxx->io_lock);
+	c_reg = *(u16 *)reg;
+	rreg = c_reg;
+
+	if (is_wcd9xxx_reg_power_down(wcd9xxx, rreg)) {
+		ret = 0;
+		goto err;
+	}
+	ret = wcd9xxx_page_write(wcd9xxx, &c_reg);
+	if (ret)
+		goto err;
+
+	for (i = 0; i < val_size; i++)
+		dev_dbg(dev, "Write %02x to 0x%x\n", ((u8 *)val)[i],
+			rreg + i);
+
+	ret = wcd9xxx->write_dev(wcd9xxx, c_reg, val_size, (void *) val,
+				 false);
+	if (ret < 0)
+		dev_err(dev, "%s: Codec write failed (%d), reg:0x%x, size:%zd\n",
+			__func__, ret, rreg, val_size);
+
+err:
+	mutex_unlock(&wcd9xxx->io_lock);
+	return ret;
+}
+
+static int regmap_bus_write(void *context, const void *data, size_t count)
+{
+	struct device *dev = context;
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(dev);
+
+	if (!wcd9xxx)
+		return -EINVAL;
+
+	WARN_ON(count < REG_BYTES);
+
+	if (count > (REG_BYTES + VAL_BYTES)) {
+		if (wcd9xxx->multi_reg_write)
+			return wcd9xxx->multi_reg_write(wcd9xxx,
+							data, count);
+	} else
+		return regmap_bus_gather_write(context, data, REG_BYTES,
+					       data + REG_BYTES,
+					       count - REG_BYTES);
+
+	dev_err(dev, "%s: bus multi reg write failure\n", __func__);
+
+	return -EINVAL;
+}
+
+static struct regmap_bus regmap_bus_config = {
+	.write = regmap_bus_write,
+	.gather_write = regmap_bus_gather_write,
+	.read = regmap_bus_read,
+	.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+	.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+/*
+ * wcd9xxx_regmap_init:
+ *	Initialize wcd9xxx register map
+ *
+ * @dev: pointer to wcd device
+ * @config: pointer to register map config
+ *
+ * Returns pointer to regmap structure for success
+ * or NULL in case of failure.
+ */
+struct regmap *wcd9xxx_regmap_init(struct device *dev,
+				   const struct regmap_config *config)
+{
+	return devm_regmap_init(dev, &regmap_bus_config, dev, config);
+}
+EXPORT_SYMBOL(wcd9xxx_regmap_init);
+
+/*
+ * wcd9xxx_reset:
+ *	Reset wcd9xxx codec
+ *
+ * @dev: pointer to wcd device
+ *
+ * Returns 0 for success or negative error code in case of failure
+ */
+int wcd9xxx_reset(struct device *dev)
+{
+	struct wcd9xxx *wcd9xxx;
+	int rc;
+	int value;
+
+	if (!dev)
+		return -ENODEV;
+
+	wcd9xxx = dev_get_drvdata(dev);
+	if (!wcd9xxx)
+		return -EINVAL;
+
+	if (!wcd9xxx->wcd_rst_np) {
+		dev_err(dev, "%s: reset gpio device node not specified\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	value = msm_cdc_pinctrl_get_state(wcd9xxx->wcd_rst_np);
+	if (value > 0) {
+		wcd9xxx->avoid_cdc_rstlow = 1;
+		return 0;
+	}
+
+	rc = msm_cdc_pinctrl_select_sleep_state(wcd9xxx->wcd_rst_np);
+	if (rc) {
+		dev_err(dev, "%s: wcd sleep state request fail!\n",
+			__func__);
+		return rc;
+	}
+
+	/* 20ms sleep required after pulling the reset gpio to LOW */
+	msleep(20);
+
+	rc = msm_cdc_pinctrl_select_active_state(wcd9xxx->wcd_rst_np);
+	if (rc) {
+		dev_err(dev, "%s: wcd active state request fail!\n",
+			__func__);
+		return rc;
+	}
+	msleep(20);
+
+	return rc;
+}
+EXPORT_SYMBOL(wcd9xxx_reset);
+
+/*
+ * wcd9xxx_reset_low:
+ *	Pull the wcd9xxx codec reset_n to low
+ *
+ * @dev: pointer to wcd device
+ *
+ * Returns 0 for success or negative error code in case of failure
+ */
+int wcd9xxx_reset_low(struct device *dev)
+{
+	struct wcd9xxx *wcd9xxx;
+	int rc;
+
+	if (!dev)
+		return -ENODEV;
+
+	wcd9xxx = dev_get_drvdata(dev);
+	if (!wcd9xxx)
+		return -EINVAL;
+
+	if (!wcd9xxx->wcd_rst_np) {
+		dev_err(dev, "%s: reset gpio device node not specified\n",
+			__func__);
+		return -EINVAL;
+	}
+	if (wcd9xxx->avoid_cdc_rstlow) {
+		wcd9xxx->avoid_cdc_rstlow = 0;
+		dev_dbg(dev, "%s: avoid pull down of reset GPIO\n", __func__);
+		return 0;
+	}
+
+	rc = msm_cdc_pinctrl_select_sleep_state(wcd9xxx->wcd_rst_np);
+	if (rc)
+		dev_err(dev, "%s: wcd sleep state request fail!\n",
+			__func__);
+
+	return rc;
+}
+EXPORT_SYMBOL(wcd9xxx_reset_low);
+
+/*
+ * wcd9xxx_bringup:
+ *	Toggle reset analog and digital cores of wcd9xxx codec
+ *
+ * @dev: pointer to wcd device
+ *
+ * Returns 0 for success or negative error code in case of failure
+ */
+int wcd9xxx_bringup(struct device *dev)
+{
+	struct wcd9xxx *wcd9xxx;
+	int rc;
+	codec_bringup_fn cdc_bup_fn;
+
+	if (!dev)
+		return -ENODEV;
+
+	wcd9xxx = dev_get_drvdata(dev);
+	if (!wcd9xxx)
+		return -EINVAL;
+
+	cdc_bup_fn = wcd9xxx_bringup_fn(wcd9xxx->type);
+	if (!cdc_bup_fn) {
+		dev_err(dev, "%s: Codec bringup fn NULL!\n",
+			__func__);
+		return -EINVAL;
+	}
+	rc = cdc_bup_fn(wcd9xxx);
+	if (rc)
+		dev_err(dev, "%s: Codec bringup error, rc: %d\n",
+			__func__, rc);
+
+	return rc;
+}
+EXPORT_SYMBOL(wcd9xxx_bringup);
+
+/*
+ * wcd9xxx_bringup:
+ *	Set analog and digital cores of wcd9xxx codec in reset state
+ *
+ * @dev: pointer to wcd device
+ *
+ * Returns 0 for success or negative error code in case of failure
+ */
+int wcd9xxx_bringdown(struct device *dev)
+{
+	struct wcd9xxx *wcd9xxx;
+	int rc;
+	codec_bringdown_fn cdc_bdown_fn;
+
+	if (!dev)
+		return -ENODEV;
+
+	wcd9xxx = dev_get_drvdata(dev);
+	if (!wcd9xxx)
+		return -EINVAL;
+
+	cdc_bdown_fn = wcd9xxx_bringdown_fn(wcd9xxx->type);
+	if (!cdc_bdown_fn) {
+		dev_err(dev, "%s: Codec bring down fn NULL!\n",
+			__func__);
+		return -EINVAL;
+	}
+	rc = cdc_bdown_fn(wcd9xxx);
+	if (rc)
+		dev_err(dev, "%s: Codec bring down error, rc: %d\n",
+			__func__, rc);
+
+	return rc;
+}
+EXPORT_SYMBOL(wcd9xxx_bringdown);
+
+/*
+ * wcd9xxx_get_codec_info:
+ *	Fill codec specific information like interrupts, version
+ *
+ * @dev: pointer to wcd device
+ *
+ * Returns 0 for success or negative error code in case of failure
+ */
+int wcd9xxx_get_codec_info(struct device *dev)
+{
+	struct wcd9xxx *wcd9xxx;
+	int rc;
+	codec_type_fn cdc_type_fn;
+	struct wcd9xxx_codec_type *cinfo;
+
+	if (!dev)
+		return -ENODEV;
+
+	wcd9xxx = dev_get_drvdata(dev);
+	if (!wcd9xxx)
+		return -EINVAL;
+
+	cdc_type_fn = wcd9xxx_get_codec_info_fn(wcd9xxx->type);
+	if (!cdc_type_fn) {
+		dev_err(dev, "%s: Codec fill type fn NULL!\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	cinfo = wcd9xxx->codec_type;
+	if (!cinfo)
+		return -EINVAL;
+
+	rc = cdc_type_fn(wcd9xxx, cinfo);
+	if (rc) {
+		dev_err(dev, "%s: Codec type fill failed, rc:%d\n",
+			__func__, rc);
+		return rc;
+
+	}
+
+	switch (wcd9xxx->type) {
+	case WCD934X:
+		cinfo->dev = tavil_devs;
+		cinfo->size = ARRAY_SIZE(tavil_devs);
+		break;
+	case WCD9335:
+		cinfo->dev = tasha_devs;
+		cinfo->size = ARRAY_SIZE(tasha_devs);
+		break;
+	case WCD9330:
+		cinfo->dev = tomtom_devs;
+		cinfo->size = ARRAY_SIZE(tomtom_devs);
+		break;
+	default:
+		cinfo->dev = NULL;
+		cinfo->size = 0;
+		break;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(wcd9xxx_get_codec_info);
+
+/*
+ * wcd9xxx_core_irq_init:
+ *	Initialize wcd9xxx codec irq instance
+ *
+ * @wcd9xxx_core_res: pointer to wcd core resource
+ *
+ * Returns 0 for success or negative error code in case of failure
+ */
+int wcd9xxx_core_irq_init(
+	struct wcd9xxx_core_resource *wcd9xxx_core_res)
+{
+	int ret = 0;
+
+	if (!wcd9xxx_core_res)
+		return -EINVAL;
+
+	if (wcd9xxx_core_res->irq != 1) {
+		ret = wcd9xxx_irq_init(wcd9xxx_core_res);
+		if (ret)
+			pr_err("IRQ initialization failed\n");
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(wcd9xxx_core_irq_init);
+
+/*
+ * wcd9xxx_assign_irq:
+ *	Assign irq and irq_base to wcd9xxx core resource
+ *
+ * @wcd9xxx_core_res: pointer to wcd core resource
+ * @irq: irq number
+ * @irq_base: base irq number
+ *
+ * Returns 0 for success or negative error code in case of failure
+ */
+int wcd9xxx_assign_irq(
+	struct wcd9xxx_core_resource *wcd9xxx_core_res,
+	unsigned int irq,
+	unsigned int irq_base)
+{
+	if (!wcd9xxx_core_res)
+		return -EINVAL;
+
+	wcd9xxx_core_res->irq = irq;
+	wcd9xxx_core_res->irq_base = irq_base;
+
+	return 0;
+}
+EXPORT_SYMBOL(wcd9xxx_assign_irq);
+
+/*
+ * wcd9xxx_core_res_init:
+ *	Initialize wcd core resource instance
+ *
+ * @wcd9xxx_core_res: pointer to wcd core resource
+ * @num_irqs: number of irqs for wcd9xxx core
+ * @num_irq_regs: number of irq registers
+ * @wcd_regmap: pointer to the wcd register map
+ *
+ * Returns 0 for success or negative error code in case of failure
+ */
+int wcd9xxx_core_res_init(
+	struct wcd9xxx_core_resource *wcd9xxx_core_res,
+	int num_irqs, int num_irq_regs, struct regmap *wcd_regmap)
+{
+	if (!wcd9xxx_core_res || !wcd_regmap)
+		return -EINVAL;
+
+	mutex_init(&wcd9xxx_core_res->pm_lock);
+	wcd9xxx_core_res->wlock_holders = 0;
+	wcd9xxx_core_res->pm_state = WCD9XXX_PM_SLEEPABLE;
+	init_waitqueue_head(&wcd9xxx_core_res->pm_wq);
+	pm_qos_add_request(&wcd9xxx_core_res->pm_qos_req,
+				PM_QOS_CPU_DMA_LATENCY,
+				PM_QOS_DEFAULT_VALUE);
+
+	wcd9xxx_core_res->num_irqs = num_irqs;
+	wcd9xxx_core_res->num_irq_regs = num_irq_regs;
+	wcd9xxx_core_res->wcd_core_regmap = wcd_regmap;
+
+	pr_info("%s: num_irqs = %d, num_irq_regs = %d\n",
+			__func__, wcd9xxx_core_res->num_irqs,
+			wcd9xxx_core_res->num_irq_regs);
+
+	return 0;
+}
+EXPORT_SYMBOL(wcd9xxx_core_res_init);
+
+/*
+ * wcd9xxx_core_res_deinit:
+ *	Deinit wcd core resource instance
+ *
+ * @wcd9xxx_core_res: pointer to wcd core resource
+ */
+void wcd9xxx_core_res_deinit(struct wcd9xxx_core_resource *wcd9xxx_core_res)
+{
+	if (!wcd9xxx_core_res)
+		return;
+
+	pm_qos_remove_request(&wcd9xxx_core_res->pm_qos_req);
+	mutex_destroy(&wcd9xxx_core_res->pm_lock);
+}
+EXPORT_SYMBOL(wcd9xxx_core_res_deinit);
+
+/*
+ * wcd9xxx_pm_cmpxchg:
+ *	Check old state and exchange with pm new state
+ *	if old state matches with current state
+ *
+ * @wcd9xxx_core_res: pointer to wcd core resource
+ * @o: pm old state
+ * @n: pm new state
+ *
+ * Returns old state
+ */
+enum wcd9xxx_pm_state wcd9xxx_pm_cmpxchg(
+		struct wcd9xxx_core_resource *wcd9xxx_core_res,
+		enum wcd9xxx_pm_state o,
+		enum wcd9xxx_pm_state n)
+{
+	enum wcd9xxx_pm_state old;
+
+	if (!wcd9xxx_core_res)
+		return o;
+
+	mutex_lock(&wcd9xxx_core_res->pm_lock);
+	old = wcd9xxx_core_res->pm_state;
+	if (old == o)
+		wcd9xxx_core_res->pm_state = n;
+	mutex_unlock(&wcd9xxx_core_res->pm_lock);
+
+	return old;
+}
+EXPORT_SYMBOL(wcd9xxx_pm_cmpxchg);
+
+/*
+ * wcd9xxx_core_res_suspend:
+ *	Suspend callback function for wcd9xxx core
+ *
+ * @wcd9xxx_core_res: pointer to wcd core resource
+ * @pm_message_t: pm message
+ *
+ * Returns 0 for success or negative error code for failure/busy
+ */
+int wcd9xxx_core_res_suspend(
+	struct wcd9xxx_core_resource *wcd9xxx_core_res,
+	pm_message_t pmesg)
+{
+	int ret = 0;
+
+	pr_debug("%s: enter\n", __func__);
+	/*
+	 * pm_qos_update_request() can be called after this suspend chain call
+	 * started. thus suspend can be called while lock is being held
+	 */
+	mutex_lock(&wcd9xxx_core_res->pm_lock);
+	if (wcd9xxx_core_res->pm_state == WCD9XXX_PM_SLEEPABLE) {
+		pr_debug("%s: suspending system, state %d, wlock %d\n",
+			 __func__, wcd9xxx_core_res->pm_state,
+			 wcd9xxx_core_res->wlock_holders);
+		wcd9xxx_core_res->pm_state = WCD9XXX_PM_ASLEEP;
+	} else if (wcd9xxx_core_res->pm_state == WCD9XXX_PM_AWAKE) {
+		/*
+		 * unlock to wait for pm_state == WCD9XXX_PM_SLEEPABLE
+		 * then set to WCD9XXX_PM_ASLEEP
+		 */
+		pr_debug("%s: waiting to suspend system, state %d, wlock %d\n",
+			 __func__, wcd9xxx_core_res->pm_state,
+			 wcd9xxx_core_res->wlock_holders);
+		mutex_unlock(&wcd9xxx_core_res->pm_lock);
+		if (!(wait_event_timeout(wcd9xxx_core_res->pm_wq,
+					 wcd9xxx_pm_cmpxchg(wcd9xxx_core_res,
+						  WCD9XXX_PM_SLEEPABLE,
+						  WCD9XXX_PM_ASLEEP) ==
+							WCD9XXX_PM_SLEEPABLE,
+					 HZ))) {
+			pr_debug("%s: suspend failed state %d, wlock %d\n",
+				 __func__, wcd9xxx_core_res->pm_state,
+				 wcd9xxx_core_res->wlock_holders);
+			ret = -EBUSY;
+		} else {
+			pr_debug("%s: done, state %d, wlock %d\n", __func__,
+				 wcd9xxx_core_res->pm_state,
+				 wcd9xxx_core_res->wlock_holders);
+		}
+		mutex_lock(&wcd9xxx_core_res->pm_lock);
+	} else if (wcd9xxx_core_res->pm_state == WCD9XXX_PM_ASLEEP) {
+		pr_warn("%s: system is already suspended, state %d, wlock %dn",
+			__func__, wcd9xxx_core_res->pm_state,
+			wcd9xxx_core_res->wlock_holders);
+	}
+	mutex_unlock(&wcd9xxx_core_res->pm_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(wcd9xxx_core_res_suspend);
+
+/*
+ * wcd9xxx_core_res_resume:
+ *	Resume callback function for wcd9xxx core
+ *
+ * @wcd9xxx_core_res: pointer to wcd core resource
+ *
+ * Returns 0 for success or negative error code for failure/busy
+ */
+int wcd9xxx_core_res_resume(
+	struct wcd9xxx_core_resource *wcd9xxx_core_res)
+{
+	int ret = 0;
+
+	pr_debug("%s: enter\n", __func__);
+	mutex_lock(&wcd9xxx_core_res->pm_lock);
+	if (wcd9xxx_core_res->pm_state == WCD9XXX_PM_ASLEEP) {
+		pr_debug("%s: resuming system, state %d, wlock %d\n", __func__,
+				wcd9xxx_core_res->pm_state,
+				wcd9xxx_core_res->wlock_holders);
+		wcd9xxx_core_res->pm_state = WCD9XXX_PM_SLEEPABLE;
+	} else {
+		pr_warn("%s: system is already awake, state %d wlock %d\n",
+				__func__, wcd9xxx_core_res->pm_state,
+				wcd9xxx_core_res->wlock_holders);
+	}
+	mutex_unlock(&wcd9xxx_core_res->pm_lock);
+	wake_up_all(&wcd9xxx_core_res->pm_wq);
+
+	return ret;
+}
+EXPORT_SYMBOL(wcd9xxx_core_res_resume);
+
+/*
+ * wcd9xxx_get_intf_type:
+ *	Get interface type of wcd9xxx core
+ *
+ * Returns interface type
+ */
+enum wcd9xxx_intf_status wcd9xxx_get_intf_type(void)
+{
+	return wcd9xxx_intf;
+}
+EXPORT_SYMBOL(wcd9xxx_get_intf_type);
+
+/*
+ * wcd9xxx_set_intf_type:
+ *	Set interface type of wcd9xxx core
+ *
+ */
+void wcd9xxx_set_intf_type(enum wcd9xxx_intf_status intf_status)
+{
+	wcd9xxx_intf = intf_status;
+}
+EXPORT_SYMBOL(wcd9xxx_set_intf_type);
+
+/*
+ * wcd9xxx_set_power_state: set power state for the region
+ * @wcd9xxx: handle to wcd core
+ * @state: power state to be set
+ * @region: region index
+ *
+ * Returns error code in case of failure or 0 for success
+ */
+int wcd9xxx_set_power_state(struct wcd9xxx *wcd9xxx,
+			    enum codec_power_states state,
+			    enum wcd_power_regions region)
+{
+	if (!wcd9xxx) {
+		pr_err("%s: wcd9xxx is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	if ((region < 0) || (region >= WCD9XXX_MAX_PWR_REGIONS)) {
+		dev_err(wcd9xxx->dev, "%s: region index %d out of bounds\n",
+			__func__, region);
+		return -EINVAL;
+	}
+	if (!wcd9xxx->wcd9xxx_pwr[region]) {
+		dev_err(wcd9xxx->dev, "%s: memory not created for region: %d\n",
+			__func__, region);
+		return -EINVAL;
+	}
+	mutex_lock(&wcd9xxx->io_lock);
+	wcd9xxx->wcd9xxx_pwr[region]->power_state = state;
+	mutex_unlock(&wcd9xxx->io_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(wcd9xxx_set_power_state);
+
+/*
+ * wcd9xxx_get_current_power_state: Get power state of the region
+ * @wcd9xxx: handle to wcd core
+ * @region: region index
+ *
+ * Returns current power state of the region or error code for failure
+ */
+int wcd9xxx_get_current_power_state(struct wcd9xxx *wcd9xxx,
+				    enum wcd_power_regions region)
+{
+	int state;
+
+	if (!wcd9xxx) {
+		pr_err("%s: wcd9xxx is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	if ((region < 0) || (region >= WCD9XXX_MAX_PWR_REGIONS)) {
+		dev_err(wcd9xxx->dev, "%s: region index %d out of bounds\n",
+			__func__, region);
+		return -EINVAL;
+	}
+	if (!wcd9xxx->wcd9xxx_pwr[region]) {
+		dev_err(wcd9xxx->dev, "%s: memory not created for region: %d\n",
+			__func__, region);
+		return -EINVAL;
+	}
+
+	mutex_lock(&wcd9xxx->io_lock);
+	state = wcd9xxx->wcd9xxx_pwr[region]->power_state;
+	mutex_unlock(&wcd9xxx->io_lock);
+
+	return state;
+}
+EXPORT_SYMBOL(wcd9xxx_get_current_power_state);
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/misc/compat_qseecom.c	2019-01-22 16:16:24.699257237 +0100
@@ -0,0 +1,923 @@
+/* Copyright (c) 2014-2015,2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#define pr_fmt(fmt) "COMPAT-QSEECOM: %s: " fmt, __func__
+
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/qseecom.h>
+#include <linux/compat.h>
+#include "compat_qseecom.h"
+
+static int compat_get_qseecom_register_listener_req(
+		struct compat_qseecom_register_listener_req __user *data32,
+		struct qseecom_register_listener_req __user *data)
+{
+	int err;
+	compat_ulong_t listener_id;
+	compat_long_t ifd_data_fd;
+	compat_uptr_t virt_sb_base;
+	compat_ulong_t sb_size;
+
+	err = get_user(listener_id, &data32->listener_id);
+	err |= put_user(listener_id, &data->listener_id);
+	err |= get_user(ifd_data_fd, &data32->ifd_data_fd);
+	err |= put_user(ifd_data_fd, &data->ifd_data_fd);
+
+	err |= get_user(virt_sb_base, &data32->virt_sb_base);
+	/* upper bits won't get set, zero them */
+	err |= put_user(NULL, &data->virt_sb_base);
+	err |= put_user(virt_sb_base, (compat_uptr_t *)&data->virt_sb_base);
+
+	err |= get_user(sb_size, &data32->sb_size);
+	err |= put_user(sb_size, &data->sb_size);
+	return err;
+}
+
+static int compat_get_qseecom_load_img_req(
+		struct compat_qseecom_load_img_req __user *data32,
+		struct qseecom_load_img_req __user *data)
+{
+	int err;
+	compat_ulong_t mdt_len;
+	compat_ulong_t img_len;
+	compat_long_t ifd_data_fd;
+	compat_ulong_t app_arch;
+	compat_uint_t app_id;
+
+	err = get_user(mdt_len, &data32->mdt_len);
+	err |= put_user(mdt_len, &data->mdt_len);
+	err |= get_user(img_len, &data32->img_len);
+	err |= put_user(img_len, &data->img_len);
+	err |= get_user(ifd_data_fd, &data32->ifd_data_fd);
+	err |= put_user(ifd_data_fd, &data->ifd_data_fd);
+	err |= copy_in_user(data->img_name, data32->img_name,
+				MAX_APP_NAME_SIZE);
+	err |= get_user(app_arch, &data32->app_arch);
+	err |= put_user(app_arch, &data->app_arch);
+	err |= get_user(app_id, &data32->app_id);
+	err |= put_user(app_id, &data->app_id);
+	return err;
+}
+
+static int compat_get_qseecom_send_cmd_req(
+		struct compat_qseecom_send_cmd_req __user *data32,
+		struct qseecom_send_cmd_req __user *data)
+{
+	int err;
+	compat_uptr_t cmd_req_buf;
+	compat_uint_t cmd_req_len;
+	compat_uptr_t resp_buf;
+	compat_uint_t resp_len;
+
+	err = get_user(cmd_req_buf, &data32->cmd_req_buf);
+	err |= put_user(NULL, &data->cmd_req_buf);
+	err |= put_user(cmd_req_buf, (compat_uptr_t *)&data->cmd_req_buf);
+	err |= get_user(cmd_req_len, &data32->cmd_req_len);
+	err |= put_user(cmd_req_len, &data->cmd_req_len);
+
+	err |= get_user(resp_buf, &data32->resp_buf);
+	err |= put_user(NULL, &data->resp_buf);
+	err |= put_user(resp_buf, (compat_uptr_t *)&data->resp_buf);
+	err |= get_user(resp_len, &data32->resp_len);
+	err |= put_user(resp_len, &data->resp_len);
+	return err;
+}
+
+static int compat_get_qseecom_send_modfd_cmd_req(
+		struct compat_qseecom_send_modfd_cmd_req __user *data32,
+		struct qseecom_send_modfd_cmd_req __user *data)
+{
+	int err;
+	unsigned int i;
+	compat_uptr_t cmd_req_buf;
+	compat_uint_t cmd_req_len;
+	compat_uptr_t resp_buf;
+	compat_uint_t resp_len;
+	compat_long_t fd;
+	compat_ulong_t cmd_buf_offset;
+
+	err = get_user(cmd_req_buf, &data32->cmd_req_buf);
+	err |= put_user(NULL, &data->cmd_req_buf);
+	err |= put_user(cmd_req_buf, (compat_uptr_t *)&data->cmd_req_buf);
+	err |= get_user(cmd_req_len, &data32->cmd_req_len);
+	err |= put_user(cmd_req_len, &data->cmd_req_len);
+	err |= get_user(resp_buf, &data32->resp_buf);
+	err |= put_user(NULL, &data->resp_buf);
+	err |= put_user(resp_buf, (compat_uptr_t *)&data->resp_buf);
+	err |= get_user(resp_len, &data32->resp_len);
+	err |= put_user(resp_len, &data->resp_len);
+	for (i = 0; i < MAX_ION_FD; i++) {
+		err |= get_user(fd, &data32->ifd_data[i].fd);
+		err |= put_user(fd, &data->ifd_data[i].fd);
+		err |= get_user(cmd_buf_offset,
+				&data32->ifd_data[i].cmd_buf_offset);
+		err |= put_user(cmd_buf_offset,
+				&data->ifd_data[i].cmd_buf_offset);
+	}
+	return err;
+}
+
+static int compat_get_qseecom_set_sb_mem_param_req(
+		struct compat_qseecom_set_sb_mem_param_req __user *data32,
+		struct qseecom_set_sb_mem_param_req __user *data)
+{
+	int err;
+	compat_long_t ifd_data_fd;
+	compat_uptr_t virt_sb_base;
+	compat_ulong_t sb_len;
+
+	err = get_user(ifd_data_fd, &data32->ifd_data_fd);
+	err |= put_user(ifd_data_fd, &data->ifd_data_fd);
+	err |= get_user(virt_sb_base, &data32->virt_sb_base);
+	err |= put_user(NULL, &data->virt_sb_base);
+	err |= put_user(virt_sb_base, (compat_uptr_t *)&data->virt_sb_base);
+	err |= get_user(sb_len, &data32->sb_len);
+	err |= put_user(sb_len, &data->sb_len);
+	return err;
+}
+
+static int compat_get_qseecom_qseos_version_req(
+		struct compat_qseecom_qseos_version_req __user *data32,
+		struct qseecom_qseos_version_req __user *data)
+{
+	int err;
+	compat_uint_t qseos_version;
+
+	err = get_user(qseos_version, &data32->qseos_version);
+	err |= put_user(qseos_version, &data->qseos_version);
+	return err;
+}
+
+static int compat_get_qseecom_qseos_app_load_query(
+		struct compat_qseecom_qseos_app_load_query __user *data32,
+		struct qseecom_qseos_app_load_query __user *data)
+{
+	int err = 0;
+	unsigned int i;
+	compat_uint_t app_id;
+	char app_name;
+	compat_ulong_t app_arch;
+
+	for (i = 0; i < MAX_APP_NAME_SIZE; i++) {
+		err |= get_user(app_name, &(data32->app_name[i]));
+		err |= put_user(app_name, &(data->app_name[i]));
+	}
+	err |= get_user(app_id, &data32->app_id);
+	err |= put_user(app_id, &data->app_id);
+	err |= get_user(app_arch, &data32->app_arch);
+	err |= put_user(app_arch, &data->app_arch);
+	return err;
+}
+
+static int compat_get_qseecom_send_svc_cmd_req(
+		struct compat_qseecom_send_svc_cmd_req __user *data32,
+		struct qseecom_send_svc_cmd_req __user *data)
+{
+	int err;
+	compat_ulong_t cmd_id;
+	compat_uptr_t cmd_req_buf;
+	compat_uint_t cmd_req_len;
+	compat_uptr_t resp_buf;
+	compat_uint_t resp_len;
+
+	err = get_user(cmd_id, &data32->cmd_id);
+	err |= put_user(cmd_id, &data->cmd_id);
+	err |= get_user(cmd_req_buf, &data32->cmd_req_buf);
+	err |= put_user(NULL, &data->cmd_req_buf);
+	err |= put_user(cmd_req_buf, (compat_uptr_t *)&data->cmd_req_buf);
+	err |= get_user(cmd_req_len, &data32->cmd_req_len);
+	err |= put_user(cmd_req_len, &data->cmd_req_len);
+	err |= get_user(resp_buf, &data32->resp_buf);
+	err |= put_user(NULL, &data->resp_buf);
+	err |= put_user(resp_buf, (compat_uptr_t *)&data->resp_buf);
+	err |= get_user(resp_len, &data32->resp_len);
+	err |= put_user(resp_len, &data->resp_len);
+	return err;
+}
+
+static int compat_get_qseecom_create_key_req(
+		struct compat_qseecom_create_key_req __user *data32,
+		struct qseecom_create_key_req __user *data)
+{
+	int err;
+	compat_uint_t usage;
+
+	err = copy_in_user(data->hash32, data32->hash32, QSEECOM_HASH_SIZE);
+	err |= get_user(usage, &data32->usage);
+	err |= put_user(usage, &data->usage);
+
+	return err;
+}
+
+static int compat_get_qseecom_wipe_key_req(
+		struct compat_qseecom_wipe_key_req __user *data32,
+		struct qseecom_wipe_key_req __user *data)
+{
+	int err;
+	compat_uint_t usage;
+	compat_int_t wipe_key_flag;
+
+	err = get_user(usage, &data32->usage);
+	err |= put_user(usage, &data->usage);
+	err |= get_user(wipe_key_flag, &data32->wipe_key_flag);
+	err |= put_user(wipe_key_flag, &data->wipe_key_flag);
+
+	return err;
+}
+
+static int compat_get_qseecom_update_key_userinfo_req(
+		struct compat_qseecom_update_key_userinfo_req __user *data32,
+		struct qseecom_update_key_userinfo_req __user *data)
+{
+	int err = 0;
+	compat_uint_t usage;
+
+	err = copy_in_user(data->current_hash32, data32->current_hash32,
+				QSEECOM_HASH_SIZE);
+	err |= copy_in_user(data->new_hash32, data32->new_hash32,
+				QSEECOM_HASH_SIZE);
+	err |= get_user(usage, &data32->usage);
+	err |= put_user(usage, &data->usage);
+
+	return err;
+}
+
+static int compat_get_qseecom_save_partition_hash_req(
+		struct compat_qseecom_save_partition_hash_req __user *data32,
+		struct qseecom_save_partition_hash_req __user *data)
+{
+	int err;
+	compat_int_t partition_id;
+
+	err = get_user(partition_id, &data32->partition_id);
+	err |= put_user(partition_id, &data->partition_id);
+	err |= copy_in_user(data->digest, data32->digest,
+				SHA256_DIGEST_LENGTH);
+	return err;
+}
+
+static int compat_get_qseecom_is_es_activated_req(
+		struct compat_qseecom_is_es_activated_req __user *data32,
+		struct qseecom_is_es_activated_req __user *data)
+{
+	compat_int_t is_activated;
+	int err;
+
+	err = get_user(is_activated, &data32->is_activated);
+	err |= put_user(is_activated, &data->is_activated);
+	return err;
+}
+
+static int compat_get_qseecom_mdtp_cipher_dip_req(
+		struct compat_qseecom_mdtp_cipher_dip_req __user *data32,
+		struct qseecom_mdtp_cipher_dip_req __user *data)
+{
+	int err;
+	compat_int_t in_buf_size;
+	compat_uptr_t in_buf;
+	compat_int_t out_buf_size;
+	compat_uptr_t out_buf;
+	compat_int_t direction;
+
+	err = get_user(in_buf_size, &data32->in_buf_size);
+	err |= put_user(in_buf_size, &data->in_buf_size);
+	err |= get_user(out_buf_size, &data32->out_buf_size);
+	err |= put_user(out_buf_size, &data->out_buf_size);
+	err |= get_user(direction, &data32->direction);
+	err |= put_user(direction, &data->direction);
+	err |= get_user(in_buf, &data32->in_buf);
+	err |= put_user(NULL, &data->in_buf);
+	err |= put_user(in_buf, (compat_uptr_t *)&data->in_buf);
+	err |= get_user(out_buf, &data32->out_buf);
+	err |= put_user(NULL, &data->out_buf);
+	err |= put_user(out_buf, (compat_uptr_t *)&data->out_buf);
+
+	return err;
+}
+
+static int compat_get_qseecom_send_modfd_listener_resp(
+		struct compat_qseecom_send_modfd_listener_resp __user *data32,
+		struct qseecom_send_modfd_listener_resp __user *data)
+{
+	int err;
+	unsigned int i;
+	compat_uptr_t resp_buf_ptr;
+	compat_uint_t resp_len;
+	compat_long_t fd;
+	compat_ulong_t cmd_buf_offset;
+
+	err = get_user(resp_buf_ptr, &data32->resp_buf_ptr);
+	err |= put_user(NULL, &data->resp_buf_ptr);
+	err |= put_user(resp_buf_ptr, (compat_uptr_t *)&data->resp_buf_ptr);
+	err |= get_user(resp_len, &data32->resp_len);
+	err |= put_user(resp_len, &data->resp_len);
+
+	for (i = 0; i < MAX_ION_FD; i++) {
+		err |= get_user(fd, &data32->ifd_data[i].fd);
+		err |= put_user(fd, &data->ifd_data[i].fd);
+		err |= get_user(cmd_buf_offset,
+				&data32->ifd_data[i].cmd_buf_offset);
+		err |= put_user(cmd_buf_offset,
+				&data->ifd_data[i].cmd_buf_offset);
+	}
+	return err;
+}
+
+
+static int compat_get_qseecom_qteec_req(
+		struct compat_qseecom_qteec_req __user *data32,
+		struct qseecom_qteec_req __user *data)
+{
+	compat_uptr_t req_ptr;
+	compat_ulong_t req_len;
+	compat_uptr_t resp_ptr;
+	compat_ulong_t resp_len;
+	int err;
+
+	err = get_user(req_ptr, &data32->req_ptr);
+	err |= put_user(NULL, &data->req_ptr);
+	err |= put_user(req_ptr, (compat_uptr_t *)&data->req_ptr);
+	err |= get_user(req_len, &data32->req_len);
+	err |= put_user(req_len, &data->req_len);
+
+	err |= get_user(resp_ptr, &data32->resp_ptr);
+	err |= put_user(NULL, &data->resp_ptr);
+	err |= put_user(resp_ptr, (compat_uptr_t *)&data->resp_ptr);
+	err |= get_user(resp_len, &data32->resp_len);
+	err |= put_user(resp_len, &data->resp_len);
+	return err;
+}
+
+static int compat_get_qseecom_qteec_modfd_req(
+		struct compat_qseecom_qteec_modfd_req __user *data32,
+		struct qseecom_qteec_modfd_req __user *data)
+{
+	compat_uptr_t req_ptr;
+	compat_ulong_t req_len;
+	compat_uptr_t resp_ptr;
+	compat_ulong_t resp_len;
+	compat_long_t fd;
+	compat_ulong_t cmd_buf_offset;
+	int err, i;
+
+	err = get_user(req_ptr, &data32->req_ptr);
+	err |= put_user(NULL, &data->req_ptr);
+	err |= put_user(req_ptr, (compat_uptr_t *)&data->req_ptr);
+	err |= get_user(req_len, &data32->req_len);
+	err |= put_user(req_len, &data->req_len);
+
+	err |= get_user(resp_ptr, &data32->resp_ptr);
+	err |= put_user(NULL, &data->resp_ptr);
+	err |= put_user(resp_ptr, (compat_uptr_t *)&data->resp_ptr);
+	err |= get_user(resp_len, &data32->resp_len);
+	err |= put_user(resp_len, &data->resp_len);
+
+	for (i = 0; i < MAX_ION_FD; i++) {
+		err |= get_user(fd, &data32->ifd_data[i].fd);
+		err |= put_user(fd, &data->ifd_data[i].fd);
+		err |= get_user(cmd_buf_offset,
+				&data32->ifd_data[i].cmd_buf_offset);
+		err |= put_user(cmd_buf_offset,
+				&data->ifd_data[i].cmd_buf_offset);
+	}
+	return err;
+}
+
+static int compat_get_int(compat_int_t __user *data32,
+		int __user *data)
+{
+	compat_int_t x;
+	int err;
+
+	err = get_user(x, data32);
+	err |= put_user(x, data);
+	return err;
+}
+
+static int compat_put_qseecom_load_img_req(
+		struct compat_qseecom_load_img_req __user *data32,
+		struct qseecom_load_img_req __user *data)
+{
+	int err;
+	compat_ulong_t mdt_len;
+	compat_ulong_t img_len;
+	compat_long_t ifd_data_fd;
+	compat_ulong_t app_arch;
+	compat_int_t app_id;
+
+	err = get_user(mdt_len, &data->mdt_len);
+	err |= put_user(mdt_len, &data32->mdt_len);
+	err |= get_user(img_len, &data->img_len);
+	err |= put_user(img_len, &data32->img_len);
+	err |= get_user(ifd_data_fd, &data->ifd_data_fd);
+	err |= put_user(ifd_data_fd, &data32->ifd_data_fd);
+	err |= copy_in_user(data32->img_name, data->img_name,
+				MAX_APP_NAME_SIZE);
+	err |= get_user(app_arch, &data->app_arch);
+	err |= put_user(app_arch, &data32->app_arch);
+	err |= get_user(app_id, &data->app_id);
+	err |= put_user(app_id, &data32->app_id);
+	return err;
+}
+
+static int compat_put_qseecom_qseos_version_req(
+		struct compat_qseecom_qseos_version_req __user *data32,
+		struct qseecom_qseos_version_req __user *data)
+{
+	compat_uint_t qseos_version;
+	int err;
+
+	err = get_user(qseos_version, &data->qseos_version);
+	err |= put_user(qseos_version, &data32->qseos_version);
+	return err;
+}
+
+static int compat_put_qseecom_qseos_app_load_query(
+		struct compat_qseecom_qseos_app_load_query __user *data32,
+		struct qseecom_qseos_app_load_query __user *data)
+{
+	int err = 0;
+	unsigned int i;
+	compat_int_t app_id;
+	compat_ulong_t app_arch;
+	char app_name;
+
+	for (i = 0; i < MAX_APP_NAME_SIZE; i++) {
+		err |= get_user(app_name, &(data->app_name[i]));
+		err |= put_user(app_name, &(data32->app_name[i]));
+	}
+	err |= get_user(app_id, &data->app_id);
+	err |= put_user(app_id, &data32->app_id);
+	err |= get_user(app_arch, &data->app_arch);
+	err |= put_user(app_arch, &data32->app_arch);
+
+	return err;
+}
+
+static int compat_put_qseecom_is_es_activated_req(
+		struct compat_qseecom_is_es_activated_req __user *data32,
+		struct qseecom_is_es_activated_req __user *data)
+{
+	compat_int_t is_activated;
+	int err;
+
+	err = get_user(is_activated, &data->is_activated);
+	err |= put_user(is_activated, &data32->is_activated);
+	return err;
+}
+
+static unsigned int convert_cmd(unsigned int cmd)
+{
+	switch (cmd) {
+	case COMPAT_QSEECOM_IOCTL_REGISTER_LISTENER_REQ:
+		return QSEECOM_IOCTL_REGISTER_LISTENER_REQ;
+	case COMPAT_QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ:
+		return QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ;
+	case COMPAT_QSEECOM_IOCTL_LOAD_APP_REQ:
+		return QSEECOM_IOCTL_LOAD_APP_REQ;
+	case COMPAT_QSEECOM_IOCTL_RECEIVE_REQ:
+		return QSEECOM_IOCTL_RECEIVE_REQ;
+	case COMPAT_QSEECOM_IOCTL_SEND_RESP_REQ:
+		return QSEECOM_IOCTL_SEND_RESP_REQ;
+	case COMPAT_QSEECOM_IOCTL_UNLOAD_APP_REQ:
+		return QSEECOM_IOCTL_UNLOAD_APP_REQ;
+	case COMPAT_QSEECOM_IOCTL_PERF_ENABLE_REQ:
+		return QSEECOM_IOCTL_PERF_ENABLE_REQ;
+	case COMPAT_QSEECOM_IOCTL_PERF_DISABLE_REQ:
+		return QSEECOM_IOCTL_PERF_DISABLE_REQ;
+	case COMPAT_QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ:
+		return QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ;
+	case COMPAT_QSEECOM_IOCTL_SET_BUS_SCALING_REQ:
+		return QSEECOM_IOCTL_SET_BUS_SCALING_REQ;
+	case COMPAT_QSEECOM_IOCTL_SEND_CMD_REQ:
+		return QSEECOM_IOCTL_SEND_CMD_REQ;
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
+		return QSEECOM_IOCTL_SEND_MODFD_CMD_REQ;
+	case COMPAT_QSEECOM_IOCTL_SET_MEM_PARAM_REQ:
+		return QSEECOM_IOCTL_SET_MEM_PARAM_REQ;
+	case COMPAT_QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ:
+		return QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ;
+	case COMPAT_QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ:
+		return QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ;
+	case COMPAT_QSEECOM_IOCTL_APP_LOADED_QUERY_REQ:
+		return QSEECOM_IOCTL_APP_LOADED_QUERY_REQ;
+	case COMPAT_QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ:
+		return QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ;
+	case COMPAT_QSEECOM_IOCTL_CREATE_KEY_REQ:
+		return QSEECOM_IOCTL_CREATE_KEY_REQ;
+	case COMPAT_QSEECOM_IOCTL_WIPE_KEY_REQ:
+		return QSEECOM_IOCTL_WIPE_KEY_REQ;
+	case COMPAT_QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ:
+		return QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ;
+	case COMPAT_QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ:
+		return QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ;
+	case COMPAT_QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ:
+		return QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ;
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP:
+		return QSEECOM_IOCTL_SEND_MODFD_RESP;
+	case COMPAT_QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ:
+		return QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ;
+	case COMPAT_QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ:
+		return QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ;
+	case COMPAT_QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ:
+		return QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ;
+	case COMPAT_QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ:
+		return QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ;
+	case COMPAT_QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ:
+		return QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ;
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ:
+		return QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ;
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP_64:
+		return QSEECOM_IOCTL_SEND_MODFD_RESP_64;
+
+	default:
+		return cmd;
+	}
+}
+
+long compat_qseecom_ioctl(struct file *file,
+		unsigned int cmd, unsigned long arg)
+{
+	long ret;
+	switch (cmd) {
+
+	case COMPAT_QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ:
+	case COMPAT_QSEECOM_IOCTL_RECEIVE_REQ:
+	case COMPAT_QSEECOM_IOCTL_SEND_RESP_REQ:
+	case COMPAT_QSEECOM_IOCTL_UNLOAD_APP_REQ:
+	case COMPAT_QSEECOM_IOCTL_PERF_ENABLE_REQ:
+	case COMPAT_QSEECOM_IOCTL_PERF_DISABLE_REQ:
+	case COMPAT_QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
+		return qseecom_ioctl(file, convert_cmd(cmd), 0);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
+		struct compat_qseecom_register_listener_req __user *data32;
+		struct qseecom_register_listener_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_register_listener_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_LOAD_APP_REQ: {
+		struct compat_qseecom_load_img_req __user *data32;
+		struct qseecom_load_img_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_load_img_req(data32, data);
+		if (err)
+			return err;
+
+		ret = qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+		err = compat_put_qseecom_load_img_req(data32, data);
+		return ret ? ret : err;
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_SEND_CMD_REQ: {
+		struct compat_qseecom_send_cmd_req __user *data32;
+		struct qseecom_send_cmd_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_send_cmd_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
+		struct compat_qseecom_send_modfd_cmd_req __user *data32;
+		struct qseecom_send_modfd_cmd_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_send_modfd_cmd_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
+		struct compat_qseecom_set_sb_mem_param_req __user *data32;
+		struct qseecom_set_sb_mem_param_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_set_sb_mem_param_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
+		struct compat_qseecom_qseos_version_req __user *data32;
+		struct qseecom_qseos_version_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_qseos_version_req(data32, data);
+		if (err)
+			return err;
+
+		ret = qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+		err = compat_put_qseecom_qseos_version_req(data32, data);
+
+		return ret ? ret : err;
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
+		compat_int_t __user *data32;
+		int __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+		err = compat_get_int(data32, data);
+		if (err)
+			return err;
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
+		struct compat_qseecom_load_img_req __user *data32;
+		struct qseecom_load_img_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_load_img_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
+		struct compat_qseecom_qseos_app_load_query __user *data32;
+		struct qseecom_qseos_app_load_query __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_qseos_app_load_query(data32, data);
+		if (err)
+			return err;
+
+		ret = qseecom_ioctl(file, convert_cmd(cmd),
+					(unsigned long)data);
+		err = compat_put_qseecom_qseos_app_load_query(data32, data);
+		return ret ? ret : err;
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
+		struct compat_qseecom_send_svc_cmd_req __user *data32;
+		struct qseecom_send_svc_cmd_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_send_svc_cmd_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_CREATE_KEY_REQ: {
+		struct compat_qseecom_create_key_req __user *data32;
+		struct qseecom_create_key_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_create_key_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_WIPE_KEY_REQ: {
+		struct compat_qseecom_wipe_key_req __user *data32;
+		struct qseecom_wipe_key_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_wipe_key_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
+		struct compat_qseecom_update_key_userinfo_req __user *data32;
+		struct qseecom_update_key_userinfo_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_update_key_userinfo_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
+		struct compat_qseecom_save_partition_hash_req __user *data32;
+		struct qseecom_save_partition_hash_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_save_partition_hash_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
+		struct compat_qseecom_is_es_activated_req __user *data32;
+		struct qseecom_is_es_activated_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_is_es_activated_req(data32, data);
+		if (err)
+			return err;
+
+		ret = qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+		err = compat_put_qseecom_is_es_activated_req(data32, data);
+		return ret ? ret : err;
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
+		struct compat_qseecom_mdtp_cipher_dip_req __user *data32;
+		struct qseecom_mdtp_cipher_dip_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_mdtp_cipher_dip_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP:
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
+		struct compat_qseecom_send_modfd_listener_resp __user *data32;
+		struct qseecom_send_modfd_listener_resp __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_send_modfd_listener_resp(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
+		struct compat_qseecom_qteec_req __user *data32;
+		struct qseecom_qteec_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_qteec_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ:
+	case COMPAT_QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ:
+	case COMPAT_QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
+		struct compat_qseecom_qteec_modfd_req __user *data32;
+		struct qseecom_qteec_modfd_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_qteec_modfd_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	default:
+		return -ENOIOCTLCMD;
+	break;
+	}
+	return 0;
+}
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/misc/compat_qseecom.h	2019-01-22 16:16:24.699257237 +0100
@@ -0,0 +1,334 @@
+#ifndef _UAPI_COMPAT_QSEECOM_H_
+#define _UAPI_COMPAT_QSEECOM_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#if IS_ENABLED(CONFIG_COMPAT)
+#include <linux/compat.h>
+
+/*
+ * struct compat_qseecom_register_listener_req -
+ *      for register listener ioctl request
+ * @listener_id - service id (shared between userspace and QSE)
+ * @ifd_data_fd - ion handle
+ * @virt_sb_base - shared buffer base in user space
+ * @sb_size - shared buffer size
+ */
+struct compat_qseecom_register_listener_req {
+	compat_ulong_t listener_id; /* in */
+	compat_long_t ifd_data_fd; /* in */
+	compat_uptr_t virt_sb_base; /* in */
+	compat_ulong_t sb_size; /* in */
+};
+
+/*
+ * struct compat_qseecom_send_cmd_req - for send command ioctl request
+ * @cmd_req_len - command buffer length
+ * @cmd_req_buf - command buffer
+ * @resp_len - response buffer length
+ * @resp_buf - response buffer
+ */
+struct compat_qseecom_send_cmd_req {
+	compat_uptr_t cmd_req_buf; /* in */
+	compat_uint_t cmd_req_len; /* in */
+	compat_uptr_t resp_buf; /* in/out */
+	compat_uint_t resp_len; /* in/out */
+};
+
+/*
+ * struct qseecom_ion_fd_info - ion fd handle data information
+ * @fd - ion handle to some memory allocated in user space
+ * @cmd_buf_offset - command buffer offset
+ */
+struct compat_qseecom_ion_fd_info {
+	compat_long_t fd;
+	compat_ulong_t cmd_buf_offset;
+};
+/*
+ * struct qseecom_send_modfd_cmd_req - for send command ioctl request
+ * @cmd_req_len - command buffer length
+ * @cmd_req_buf - command buffer
+ * @resp_len - response buffer length
+ * @resp_buf - response buffer
+ * @ifd_data_fd - ion handle to memory allocated in user space
+ * @cmd_buf_offset - command buffer offset
+ */
+struct compat_qseecom_send_modfd_cmd_req {
+	compat_uptr_t cmd_req_buf; /* in */
+	compat_uint_t cmd_req_len; /* in */
+	compat_uptr_t resp_buf; /* in/out */
+	compat_uint_t resp_len; /* in/out */
+	struct compat_qseecom_ion_fd_info ifd_data[MAX_ION_FD];
+};
+
+/*
+ * struct compat_qseecom_listener_send_resp_req
+ * signal to continue the send_cmd req.
+ * Used as a trigger from HLOS service to notify QSEECOM that it's done with its
+ * operation and provide the response for QSEECOM can continue the incomplete
+ * command execution
+ * @resp_len - Length of the response
+ * @resp_buf - Response buffer where the response of the cmd should go.
+ */
+struct compat_qseecom_send_resp_req {
+	compat_uptr_t resp_buf; /* in */
+	compat_uint_t resp_len; /* in */
+};
+
+/*
+ * struct compat_qseecom_load_img_data
+ * for sending image length information and
+ * ion file descriptor to the qseecom driver. ion file descriptor is used
+ * for retrieving the ion file handle and in turn the physical address of
+ * the image location.
+ * @mdt_len - Length of the .mdt file in bytes.
+ * @img_len - Length of the .mdt + .b00 +..+.bxx images files in bytes
+ * @ion_fd - Ion file descriptor used when allocating memory.
+ * @img_name - Name of the image.
+*/
+struct compat_qseecom_load_img_req {
+	compat_ulong_t mdt_len; /* in */
+	compat_ulong_t img_len; /* in */
+	compat_long_t  ifd_data_fd; /* in */
+	char	 img_name[MAX_APP_NAME_SIZE]; /* in */
+	compat_ulong_t app_arch; /* in */
+	compat_uint_t app_id; /* out*/
+};
+
+struct compat_qseecom_set_sb_mem_param_req {
+	compat_long_t ifd_data_fd; /* in */
+	compat_uptr_t virt_sb_base; /* in */
+	compat_ulong_t sb_len; /* in */
+};
+
+/*
+ * struct compat_qseecom_qseos_version_req - get qseos version
+ * @qseos_version - version number
+ */
+struct compat_qseecom_qseos_version_req {
+	compat_uint_t qseos_version; /* in */
+};
+
+/*
+ * struct compat_qseecom_qseos_app_load_query - verify if app is loaded in qsee
+ * @app_name[MAX_APP_NAME_SIZE]-  name of the app.
+ * @app_id - app id.
+ */
+struct compat_qseecom_qseos_app_load_query {
+	char app_name[MAX_APP_NAME_SIZE]; /* in */
+	compat_uint_t app_id; /* out */
+	compat_ulong_t app_arch;
+};
+
+struct compat_qseecom_send_svc_cmd_req {
+	compat_ulong_t cmd_id;
+	compat_uptr_t cmd_req_buf; /* in */
+	compat_uint_t cmd_req_len; /* in */
+	compat_uptr_t resp_buf; /* in/out */
+	compat_uint_t resp_len; /* in/out */
+};
+
+struct compat_qseecom_create_key_req {
+	unsigned char hash32[QSEECOM_HASH_SIZE];
+	enum qseecom_key_management_usage_type usage;
+};
+
+struct compat_qseecom_wipe_key_req {
+	enum qseecom_key_management_usage_type usage;
+	compat_int_t wipe_key_flag;
+};
+
+struct compat_qseecom_update_key_userinfo_req {
+	unsigned char current_hash32[QSEECOM_HASH_SIZE];
+	unsigned char new_hash32[QSEECOM_HASH_SIZE];
+	enum qseecom_key_management_usage_type usage;
+};
+
+/*
+ * struct compat_qseecom_save_partition_hash_req
+ * @partition_id - partition id.
+ * @hash[SHA256_DIGEST_LENGTH] -  sha256 digest.
+ */
+struct compat_qseecom_save_partition_hash_req {
+	compat_int_t partition_id; /* in */
+	char digest[SHA256_DIGEST_LENGTH]; /* in */
+};
+
+/*
+ * struct compat_qseecom_is_es_activated_req
+ * @is_activated - 1=true , 0=false
+ */
+struct compat_qseecom_is_es_activated_req {
+	compat_int_t is_activated; /* out */
+};
+
+/*
+ * struct compat_qseecom_mdtp_cipher_dip_req
+ * @in_buf - input buffer
+ * @in_buf_size - input buffer size
+ * @out_buf - output buffer
+ * @out_buf_size - output buffer size
+ * @direction - 0=encrypt, 1=decrypt
+ */
+struct compat_qseecom_mdtp_cipher_dip_req {
+	compat_uptr_t in_buf;
+	compat_uint_t in_buf_size;
+	compat_uptr_t out_buf;
+	compat_uint_t out_buf_size;
+	compat_uint_t direction;
+};
+
+/*
+ * struct qseecom_send_modfd_resp - for send command ioctl request
+ * @req_len - command buffer length
+ * @req_buf - command buffer
+ * @ifd_data_fd - ion handle to memory allocated in user space
+ * @cmd_buf_offset - command buffer offset
+ */
+struct compat_qseecom_send_modfd_listener_resp {
+	compat_uptr_t resp_buf_ptr; /* in */
+	compat_uint_t resp_len; /* in */
+	struct compat_qseecom_ion_fd_info ifd_data[MAX_ION_FD]; /* in */
+};
+
+struct compat_qseecom_qteec_req {
+	compat_uptr_t req_ptr;
+	compat_ulong_t req_len;
+	compat_uptr_t resp_ptr;
+	compat_ulong_t resp_len;
+};
+
+struct compat_qseecom_qteec_modfd_req {
+	compat_uptr_t req_ptr;
+	compat_ulong_t req_len;
+	compat_uptr_t resp_ptr;
+	compat_ulong_t resp_len;
+	struct compat_qseecom_ion_fd_info ifd_data[MAX_ION_FD];
+};
+
+struct compat_qseecom_ce_pipe_entry {
+	compat_int_t valid;
+	compat_uint_t ce_num;
+	compat_uint_t ce_pipe_pair;
+};
+
+struct compat_qseecom_ce_info_req {
+	unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
+	compat_uint_t usage;
+	compat_uint_t unit_num;
+	compat_uint_t num_ce_pipe_entries;
+	struct compat_qseecom_ce_pipe_entry
+				ce_pipe_entry[MAX_CE_PIPE_PAIR_PER_UNIT];
+};
+
+struct file;
+extern long compat_qseecom_ioctl(struct file *file,
+					unsigned int cmd, unsigned long arg);
+
+#define COMPAT_QSEECOM_IOCTL_REGISTER_LISTENER_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 1, struct compat_qseecom_register_listener_req)
+
+#define COMPAT_QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 2)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_CMD_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 3, struct compat_qseecom_send_cmd_req)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 4, struct compat_qseecom_send_modfd_cmd_req)
+
+#define COMPAT_QSEECOM_IOCTL_RECEIVE_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 5)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_RESP_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 6)
+
+#define COMPAT_QSEECOM_IOCTL_LOAD_APP_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 7, struct compat_qseecom_load_img_req)
+
+#define COMPAT_QSEECOM_IOCTL_SET_MEM_PARAM_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 8, struct compat_qseecom_set_sb_mem_param_req)
+
+#define COMPAT_QSEECOM_IOCTL_UNLOAD_APP_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 9)
+
+#define COMPAT_QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 10, struct compat_qseecom_qseos_version_req)
+
+#define COMPAT_QSEECOM_IOCTL_PERF_ENABLE_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 11)
+
+#define COMPAT_QSEECOM_IOCTL_PERF_DISABLE_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 12)
+
+#define COMPAT_QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 13, struct compat_qseecom_load_img_req)
+
+#define COMPAT_QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 14)
+
+#define COMPAT_QSEECOM_IOCTL_APP_LOADED_QUERY_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 15, struct compat_qseecom_qseos_app_load_query)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 16, struct compat_qseecom_send_svc_cmd_req)
+
+#define COMPAT_QSEECOM_IOCTL_CREATE_KEY_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 17, struct compat_qseecom_create_key_req)
+
+#define COMPAT_QSEECOM_IOCTL_WIPE_KEY_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 18, struct compat_qseecom_wipe_key_req)
+
+#define COMPAT_QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 19, \
+				struct compat_qseecom_save_partition_hash_req)
+
+#define COMPAT_QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 20, struct compat_qseecom_is_es_activated_req)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP \
+	_IOWR(QSEECOM_IOC_MAGIC, 21, \
+				struct compat_qseecom_send_modfd_listener_resp)
+
+#define COMPAT_QSEECOM_IOCTL_SET_BUS_SCALING_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 23, int)
+
+#define COMPAT_QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 24, \
+			struct compat_qseecom_update_key_userinfo_req)
+
+#define COMPAT_QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 30, struct compat_qseecom_qteec_modfd_req)
+
+#define COMPAT_QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 31, struct compat_qseecom_qteec_req)
+
+#define COMPAT_QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 32, struct compat_qseecom_qteec_modfd_req)
+
+#define COMPAT_QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 33, struct compat_qseecom_qteec_modfd_req)
+
+#define COMPAT_QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 34, struct qseecom_mdtp_cipher_dip_req)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 35, struct compat_qseecom_send_modfd_cmd_req)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP_64 \
+	_IOWR(QSEECOM_IOC_MAGIC, 36, \
+				struct compat_qseecom_send_modfd_listener_resp)
+#define COMPAT_QSEECOM_IOCTL_GET_CE_PIPE_INFO \
+	_IOWR(QSEECOM_IOC_MAGIC, 40, \
+				struct compat_qseecom_ce_info_req)
+#define COMPAT_QSEECOM_IOCTL_FREE_CE_PIPE_INFO \
+	_IOWR(QSEECOM_IOC_MAGIC, 41, \
+				struct compat_qseecom_ce_info_req)
+#define COMPAT_QSEECOM_IOCTL_QUERY_CE_PIPE_INFO \
+	_IOWR(QSEECOM_IOC_MAGIC, 42, \
+				struct compat_qseecom_ce_info_req)
+
+#endif
+#endif /* _UAPI_COMPAT_QSEECOM_H_ */
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/misc/fbxserial_of.c	2019-01-22 16:16:24.707257309 +0100
@@ -0,0 +1,35 @@
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/fbxserial.h>
+#include <linux/random.h>
+
+static struct fbx_serial serial_info;
+
+const struct fbx_serial *arch_get_fbxserial(void)
+{
+	return &serial_info;
+}
+
+EXPORT_SYMBOL(arch_get_fbxserial);
+
+/*
+ *
+ */
+static __init int fbxserial_of_read(void)
+{
+	struct device_node *np;
+	const void *fbxserial_data;
+	int len;
+
+	np = of_find_node_by_path("/chosen");
+	if (!np)
+		return 0;
+
+	fbxserial_data = of_get_property(np, "fbx,serialinfo", &len);
+	fbxserialinfo_read(fbxserial_data, &serial_info);
+	add_device_randomness(&serial_info, sizeof (serial_info));
+
+	return 0;
+}
+
+arch_initcall(fbxserial_of_read);
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./Kconfig linux-4.4.115-fbx/drivers/misc/freebox/Kconfig
--- linux-4.4.115-fbx/drivers/misc/freebox./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/Kconfig	2019-01-22 16:16:24.707257309 +0100
@@ -0,0 +1,9 @@
+config NET_RTL8367C_SPI
+	tristate "Realtek RTL8367C ethernet switch with SPI management"
+	select SPI
+	---help---
+	  This enables support for the Realtek RTL8367C ethernet switch.
+
+config NET_RTL8367C_SPI_CONFIG
+	bool "do initial switch configuration"
+	depends on NET_RTL8367C_SPI
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./Makefile linux-4.4.115-fbx/drivers/misc/freebox/Makefile
--- linux-4.4.115-fbx/drivers/misc/freebox./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/Makefile	2019-01-22 16:16:24.707257309 +0100
@@ -0,0 +1,37 @@
+obj-$(CONFIG_NET_RTL8367C_SPI) += rtl8367c.o
+
+rtl8367c-objs += rtl8367c-spi.o
+
+ifeq ($(CONFIG_NET_RTL8367C_SPI_CONFIG),y)
+ccflags-y += -Idrivers/misc/freebox/rtlapi -DSPI_FBX_OPERATION
+rtl8367c-objs += \
+	rtlapi/acl.o rtlapi/cpu.o rtlapi/dot1x.o rtlapi/eee.o	\
+	rtlapi/i2c.o rtlapi/igmp.o rtlapi/interrupt.o rtlapi/l2.o	\
+	rtlapi/leaky.o rtlapi/led.o rtlapi/mirror.o rtlapi/oam.o	\
+	rtlapi/port.o rtlapi/ptp.o rtlapi/qos.o rtlapi/rate.o		\
+	rtlapi/rldp.o rtlapi/rtk_switch.o				\
+	rtlapi/rtl8367c_asicdrv_acl.o rtlapi/rtl8367c_asicdrv.o		\
+	rtlapi/rtl8367c_asicdrv_cputag.o				\
+	rtlapi/rtl8367c_asicdrv_dot1x.o rtlapi/rtl8367c_asicdrv_eav.o	\
+	rtlapi/rtl8367c_asicdrv_eee.o rtlapi/rtl8367c_asicdrv_fc.o	\
+	rtlapi/rtl8367c_asicdrv_green.o rtlapi/rtl8367c_asicdrv_hsb.o	\
+	rtlapi/rtl8367c_asicdrv_i2c.o rtlapi/rtl8367c_asicdrv_igmp.o	\
+	rtlapi/rtl8367c_asicdrv_inbwctrl.o				\
+	rtlapi/rtl8367c_asicdrv_interrupt.o				\
+	rtlapi/rtl8367c_asicdrv_led.o rtlapi/rtl8367c_asicdrv_lut.o	\
+	rtlapi/rtl8367c_asicdrv_meter.o rtlapi/rtl8367c_asicdrv_mib.o	\
+	rtlapi/rtl8367c_asicdrv_mirror.o				\
+	rtlapi/rtl8367c_asicdrv_misc.o rtlapi/rtl8367c_asicdrv_oam.o	\
+	rtlapi/rtl8367c_asicdrv_phy.o rtlapi/rtl8367c_asicdrv_port.o	\
+	rtlapi/rtl8367c_asicdrv_portIsolation.o				\
+	rtlapi/rtl8367c_asicdrv_qos.o rtlapi/rtl8367c_asicdrv_rldp.o	\
+	rtlapi/rtl8367c_asicdrv_rma.o					\
+	rtlapi/rtl8367c_asicdrv_scheduling.o				\
+	rtlapi/rtl8367c_asicdrv_storm.o					\
+	rtlapi/rtl8367c_asicdrv_svlan.o					\
+	rtlapi/rtl8367c_asicdrv_trunking.o				\
+	rtlapi/rtl8367c_asicdrv_unknownMulticast.o			\
+	rtlapi/rtl8367c_asicdrv_vlan.o rtlapi/smi.o rtlapi/stat.o	\
+	rtlapi/storm.o rtlapi/svlan.o rtlapi/trap.o rtlapi/trunk.o	\
+	rtlapi/vlan.o
+endif
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtl8367c-spi.c linux-4.4.115-fbx/drivers/misc/freebox/rtl8367c-spi.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtl8367c-spi.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtl8367c-spi.c	2019-01-22 16:16:24.707257309 +0100
@@ -0,0 +1,618 @@
+/*
+ * Copyright (C) 2017 Freebox SAS, Arnaud Vrac <avrac@freebox.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/gpio/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/fs.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/rtl8367c_ioctl.h>
+
+#define RTL8367C_MAX_PORTS			4
+
+#define RTL8367C_CHIP_NUMBER_REG		0x1300
+#define RTL8367C_CHIP_VER_REG			0x1301
+
+#define RTL8367C_PHY_AD_REG			0x130f
+#define   RTL8367C_PHY_AD_PWRDN_SHIFT		5
+#define   RTL8367C_PHY_AD_PWRDN_MASK		0x20
+
+#define RTL8367C_MAGIC_ID_REG			0x13c2
+#define   RTL8367C_MAGIC_ID_VAL			0x0249
+
+#define RTL8367C_IA_CTRL_REG			0x1f00
+#define   RTL8367C_IA_CTRL_RW_SHIFT		1
+#define   RTL8367C_IA_CTRL_RW_MASK		0x2
+#define   RTL8367C_IA_CTRL_RW_READ		(0 << RTL8367C_IA_CTRL_RW_SHIFT)
+#define   RTL8367C_IA_CTRL_RW_WRITE		(1 << RTL8367C_IA_CTRL_RW_SHIFT)
+#define   RTL8367C_IA_CTRL_CMD_SHIFT		0
+#define   RTL8367C_IA_CTRL_CMD_MASK		0x1
+
+#define RTL8367C_IA_STATUS_REG			0x1f01
+#define   RTL8367C_IA_STATUS_SHIFT		2
+#define   RTL8367C_IA_STATUS_MASK		0x7
+#define   RTL8367C_IA_STATUS_PHY_BUSY		BIT(2)
+#define   RTL8367C_IA_STATUS_SDS_BUSY		BIT(1)
+#define   RTL8367C_IA_STATUS_MDX_BUSY		BIT(0)
+
+#define RTL8367C_IA_ADDRESS_REG			0x1f02
+#define RTL8367C_IA_WRITE_DATA_REG		0x1f03
+#define RTL8367C_IA_READ_DATA_REG		0x1f04
+
+#define RTL8367C_PHY_BASE			0x2000
+#define RTL8367C_PHY_SHIFT			5
+#define RTL8367C_INTERNAL_PHY_REG(_addr, _reg) \
+	(RTL8367C_PHY_BASE + ((_addr) << RTL8367C_PHY_SHIFT) + (_reg))
+
+#define RTL8367C_PHY_REG_MAX			31
+#define RTL8367C_PHY_ADDR_MAX			7
+
+struct rtl8367c {
+	struct gpio_desc	*reset_gpio;
+	struct spi_device	*spi;
+	struct device		*dev;
+	struct list_head	next;
+};
+
+/*
+ * FIXME: this is board specific
+ */
+#define CPU_PORT				0
+#define SIDE_PORT				1
+#define FREEPLUG_PORT				2
+#define REAR_PORT				3
+
+static LIST_HEAD(device_list);
+static struct miscdevice miscdev;
+
+static int
+rtl8367c_read_reg(struct rtl8367c *chip, u16 reg)
+{
+	u8 buf[] = { 0x03, reg >> 8, reg & 0xff };
+	__be16 value;
+	int ret;
+
+	ret = spi_write_then_read(chip->spi, buf, sizeof (buf), &value, 2);
+	if (ret < 0)
+		return ret;
+
+	return be16_to_cpu(value);
+}
+
+static int
+rtl8367c_write_reg(struct rtl8367c *chip, u16 reg, u16 value)
+{
+	u8 buf[] = { 0x02, reg >> 8, reg & 0xff, value >> 8, value & 0xff };
+
+	return spi_write(chip->spi, buf, sizeof (buf));
+}
+
+static inline int
+rtl8367c_write_reg_mask(struct rtl8367c *chip, u16 reg,
+			u16 value, u16 mask)
+{
+	int status;
+	status = rtl8367c_read_reg(chip, reg);
+	if (status < 0)
+		return status;
+
+	status &= ~mask;
+	value &= mask;
+
+	return rtl8367c_write_reg(chip, reg, status | value);
+}
+
+static int
+rtl8367c_wait_phy(struct rtl8367c *chip)
+{
+	int ret;
+	int timeout = 5;
+
+	while (1) {
+		ret = rtl8367c_read_reg(chip, RTL8367C_IA_STATUS_REG);
+		if (ret < 0)
+			return ret;
+
+		if ((ret & RTL8367C_IA_STATUS_PHY_BUSY) == 0)
+			break;
+
+		if (timeout--)
+			return -ETIMEDOUT;
+
+		udelay(1);
+	}
+
+	return 0;
+}
+
+static int
+rtl8367c_read_phy_reg(struct rtl8367c *chip, u8 phy_addr,
+		      u8 phy_reg)
+{
+	int ret;
+
+	if (phy_addr > RTL8367C_PHY_ADDR_MAX)
+		return -EINVAL;
+
+	if (phy_reg > RTL8367C_PHY_REG_MAX)
+		return -EINVAL;
+
+	ret = rtl8367c_read_reg(chip, RTL8367C_IA_STATUS_REG);
+	if (ret < 0)
+		return ret;
+
+	if (ret & RTL8367C_IA_STATUS_PHY_BUSY)
+		return -ETIMEDOUT;
+
+	ret = rtl8367c_write_reg(chip, RTL8367C_IA_ADDRESS_REG,
+				 RTL8367C_INTERNAL_PHY_REG(phy_addr, phy_reg));
+	if (ret < 0)
+		return ret;
+
+	ret = rtl8367c_write_reg(chip, RTL8367C_IA_CTRL_REG,
+				 RTL8367C_IA_CTRL_CMD_MASK |
+				 RTL8367C_IA_CTRL_RW_READ);
+	if (ret < 0)
+		return ret;
+
+	ret = rtl8367c_wait_phy(chip);
+	if (ret < 0)
+		return ret;
+
+	return rtl8367c_read_reg(chip, RTL8367C_IA_READ_DATA_REG);
+}
+
+static int
+rtl8367c_write_phy_reg(struct rtl8367c *chip, u8 phy_addr,
+		       u8 phy_reg, u16 value)
+{
+	int ret;
+
+	if (phy_addr > RTL8367C_PHY_ADDR_MAX)
+		return -EINVAL;
+
+	if (phy_reg > RTL8367C_PHY_REG_MAX)
+		return -EINVAL;
+
+	ret = rtl8367c_read_reg(chip, RTL8367C_IA_STATUS_REG);
+	if (ret < 0)
+		return ret;
+
+	if (ret & RTL8367C_IA_STATUS_PHY_BUSY)
+		return -ETIMEDOUT;
+
+	ret = rtl8367c_write_reg(chip, RTL8367C_IA_WRITE_DATA_REG, value);
+	if (ret < 0)
+		return ret;
+
+	ret = rtl8367c_write_reg(chip, RTL8367C_IA_ADDRESS_REG,
+				 RTL8367C_INTERNAL_PHY_REG(phy_addr, phy_reg));
+	if (ret < 0)
+		return ret;
+
+	ret = rtl8367c_write_reg(chip, RTL8367C_IA_CTRL_REG,
+				 RTL8367C_IA_CTRL_CMD_MASK |
+				 RTL8367C_IA_CTRL_RW_WRITE);
+	if (ret < 0)
+		return ret;
+
+	ret = rtl8367c_wait_phy(chip);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int
+rtl8367c_enable_phys(struct rtl8367c *chip, unsigned int enable_mask)
+{
+	int phy_num, ret;
+
+	ret = rtl8367c_write_reg_mask(chip, RTL8367C_PHY_AD_REG,
+				      1 << RTL8367C_PHY_AD_PWRDN_SHIFT,
+				      RTL8367C_PHY_AD_PWRDN_MASK);
+	if (ret < 0) {
+		dev_err(chip->dev, "Failed to enable embedded PHYs\n");
+		return ret;
+	}
+
+	for (phy_num = 0; phy_num < RTL8367C_MAX_PORTS; phy_num++) {
+		if (!((1 << phy_num) & enable_mask))
+			continue;
+
+		ret = rtl8367c_write_phy_reg(chip, phy_num, 0, 0x1140);
+		if (ret < 0) {
+			dev_warn(chip->dev, "Failed to power up PHY %d\n",
+				 phy_num);
+		}
+	}
+
+	return 0;
+}
+
+static void
+rtl8367c_do_reset(struct rtl8367c *chip)
+{
+	dev_dbg(chip->dev, "reset");
+	gpiod_set_value_cansleep(chip->reset_gpio, 1);
+	usleep_range(10000, 11000);
+	gpiod_set_value_cansleep(chip->reset_gpio, 0);
+	msleep(1000);
+}
+
+#ifdef CONFIG_NET_RTL8367C_SPI_CONFIG
+#include "rtk_error.h"
+#include "rtk_switch.h"
+#include "cpu.h"
+#include "l2.h"
+#include "vlan.h"
+
+static struct rtl8367c *gchip;
+
+extern void rtk_api_read_reg_wrapper(rtk_uint32 *, rtk_uint16);
+void rtk_api_read_reg_wrapper(rtk_uint32 *val32, rtk_uint16 reg)
+{
+	int ret;
+
+	ret = rtl8367c_read_reg(gchip, reg);
+	if (ret < 0) {
+		dev_err(gchip->dev, "read failed: %d\n", ret);
+		*val32 = 0xffff;
+	} else
+		*val32 = ret;
+}
+
+extern void rtk_api_write_reg_wrapper(rtk_uint32, rtk_uint16);
+void rtk_api_write_reg_wrapper(rtk_uint32 val32, rtk_uint16 reg)
+{
+	int ret;
+
+//	printk("rtk_api_write_reg_wrapper: reg:%x val:%x\n", reg, val32);
+
+	ret = rtl8367c_write_reg(gchip, reg, val32);
+	if (ret < 0)
+		dev_err(gchip->dev, "write failed: %d\n", ret);
+}
+
+static int
+rtl8367c_config(struct rtl8367c *chip)
+{
+	rtk_vlan_cfg_t vlan1, vlan41;
+	size_t i;
+	u16 chip_num;
+	u16 chip_ver;
+	int ret;
+
+	rtl8367c_do_reset(chip);
+
+	ret = rtl8367c_write_reg(chip, RTL8367C_MAGIC_ID_REG,
+				 RTL8367C_MAGIC_ID_VAL);
+	if (ret < 0)
+		return ret;
+
+	ret = rtl8367c_read_reg(chip, RTL8367C_CHIP_NUMBER_REG);
+	if (ret < 0) {
+		dev_err(chip->dev, "failed to read chip number: %d\n", ret);
+		return ret;
+	}
+
+	chip_num = ret;
+
+	ret = rtl8367c_read_reg(chip, RTL8367C_CHIP_VER_REG);
+	if (ret < 0) {
+		dev_err(chip->dev, "failed to read chip version: %d\n", ret);
+		return ret;
+	}
+
+	chip_ver = ret;
+
+	if (chip_num != 0x6367) {
+		dev_err(chip->dev, "unsupported chip id %04x\n", chip_num);
+		return -ENXIO;
+	}
+
+	dev_info(chip->dev, "RTL8367C detected, chip id=%04x ver=%04x\n",
+		 chip_num, chip_ver);
+
+	ret = rtl8367c_enable_phys(chip, (1 << CPU_PORT) | (1 << SIDE_PORT));
+	if (ret)
+		return ret;
+
+	gchip = chip;
+	if (rtk_switch_init() != RT_ERR_OK) {
+		dev_err(chip->dev, "rtk_switch_init failed\n");
+                return 1;
+        }
+
+	if (rtk_l2_init() != RT_ERR_OK) {
+		dev_err(chip->dev, "l2_init failed\n");
+                return 1;
+        }
+
+	if (rtk_vlan_init() != RT_ERR_OK) {
+		dev_err(chip->dev, "rtk_vlan_init failed\n");
+                return 1;
+        }
+
+	memset(&vlan1, 0x00, sizeof(rtk_vlan_cfg_t));
+	RTK_PORTMASK_PORT_SET(vlan1.mbr, CPU_PORT);
+	RTK_PORTMASK_PORT_SET(vlan1.mbr, REAR_PORT);
+	RTK_PORTMASK_PORT_SET(vlan1.mbr, FREEPLUG_PORT);
+	RTK_PORTMASK_PORT_SET(vlan1.untag, CPU_PORT);
+	RTK_PORTMASK_PORT_SET(vlan1.untag, REAR_PORT);
+	RTK_PORTMASK_PORT_SET(vlan1.untag, FREEPLUG_PORT);
+	vlan1.ivl_en = 1;
+	rtk_vlan_set(1, &vlan1);
+
+	rtk_vlan_portPvid_set(CPU_PORT, 1, 0);
+	rtk_vlan_portPvid_set(REAR_PORT, 1, 0);
+	rtk_vlan_portPvid_set(FREEPLUG_PORT, 1, 0);
+
+	memset(&vlan41, 0x00, sizeof(rtk_vlan_cfg_t));
+	RTK_PORTMASK_PORT_SET(vlan41.mbr, CPU_PORT);
+	RTK_PORTMASK_PORT_SET(vlan41.mbr, SIDE_PORT);
+	RTK_PORTMASK_PORT_SET(vlan41.untag, SIDE_PORT);
+	vlan41.ivl_en = 1;
+	rtk_vlan_set(41, &vlan41);
+
+	rtk_vlan_portPvid_set(SIDE_PORT, 41, 0);
+
+	for (i = 0; i < 4; i++) {
+		if (i == CPU_PORT)
+			continue;
+
+		if (rtk_vlan_portIgrFilterEnable_set(i, 1) != RT_ERR_OK)
+			dev_err(chip->dev, "rtk_vlan_portIgrFilterEnable_set failed\n");
+
+		if (rtk_vlan_portAcceptFrameType_set(i, ACCEPT_FRAME_TYPE_UNTAG_ONLY) != RT_ERR_OK)
+			dev_err(chip->dev, "rtk_vlan_portAcceptFrameType_set failed\n");
+	}
+	rtk_vlan_egrFilterEnable_set(1);
+
+	return 0;
+}
+#endif
+
+/*
+ *
+ */
+static long
+rtl8367c_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	void __user *p = (void __user *)arg;
+	struct rtl8367c *chip;
+	struct rtl8367c_pioctl pioc;
+	bool found;
+	int ret;
+
+	if (!capable(CAP_SYS_RAWIO))
+		return -EPERM;
+
+	ret = copy_from_user(&pioc, p, sizeof (pioc));
+	if (ret)
+		return -EFAULT;
+
+	found = false;
+	list_for_each_entry(chip, &device_list, next) {
+		if (chip->spi->master->bus_num == pioc.bus &&
+		    chip->spi->chip_select == pioc.cs) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found)
+		return -ENOENT;
+
+	switch (cmd) {
+	case RTL8367C_IOC_OP_REG_READ:
+	{
+		u16 val;
+
+		if (pioc.len != 2)
+			return -EINVAL;
+
+		ret = rtl8367c_read_reg(chip, pioc.offset);
+		if (ret < 0)
+			return ret;
+
+		val = ret;
+		if (copy_to_user(pioc.buf_addr, &val, sizeof (val)))
+			return -EFAULT;
+
+		return 0;
+	}
+
+	case RTL8367C_IOC_OP_REG_WRITE:
+	{
+		u16 val;
+
+		if (pioc.len != 2)
+			return -EINVAL;
+
+		if (copy_from_user(&val, pioc.buf_addr, sizeof (val)))
+			return -EFAULT;
+
+		return rtl8367c_write_reg(chip, pioc.offset, val);
+	}
+
+	case RTL8367C_IOC_OP_SPI_READ:
+	{
+		u8 *buf;
+
+		if (pioc.len > 128)
+			return -EINVAL;
+
+		buf = vmalloc(pioc.len);
+		if (!buf)
+			return -ENOMEM;
+
+		ret = spi_write_then_read(chip->spi, NULL, 0, buf, pioc.len);
+		if (ret < 0) {
+			vfree(buf);
+			return ret;
+		}
+
+		if (copy_to_user(pioc.buf_addr, buf, pioc.len)) {
+			vfree(buf);
+			return -EFAULT;
+		}
+
+		vfree(buf);
+		return 0;
+	}
+
+	case RTL8367C_IOC_OP_SPI_WRITE:
+	{
+		u8 *buf;
+
+		if (pioc.len > 128)
+			return -EINVAL;
+
+		buf = vmalloc(pioc.len);
+		if (!buf)
+			return -ENOMEM;
+
+		if (copy_from_user(buf, pioc.buf_addr, pioc.len))
+			return -EFAULT;
+
+		ret = spi_write(chip->spi, buf, pioc.len);
+		vfree(buf);
+		return ret;
+	}
+
+	case RTL8367C_IOC_OP_RESET:
+		rtl8367c_do_reset(chip);
+		return 0;
+
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+static const struct file_operations rtl8367c_fops = {
+	.unlocked_ioctl	= rtl8367c_do_ioctl,
+};
+
+static int
+rtl8367c_spi_probe(struct spi_device *spi)
+{
+	struct rtl8367c *chip;
+	int ret;
+
+	chip = devm_kzalloc(&spi->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	spi_set_drvdata(spi, chip);
+	chip->spi = spi;
+	chip->dev = &spi->dev;
+
+	chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset",
+						   GPIOD_OUT_HIGH);
+        if (IS_ERR(chip->reset_gpio)) {
+                ret = PTR_ERR(chip->reset_gpio);
+                if (ret != -EPROBE_DEFER)
+			dev_err(chip->dev,
+				"failed to get reset gpio: %d\n", ret);
+		return ret;
+	}
+
+#ifdef CONFIG_NET_RTL8367C_SPI_CONFIG
+	ret = rtl8367c_config(chip);
+	if (ret)
+		return ret;
+#endif
+
+	dev_info(chip->dev, "RTL8367C driver instanciated\n");
+	list_add_tail(&chip->next, &device_list);
+	return 0;
+}
+
+static int
+rtl8367c_spi_remove(struct spi_device *spi)
+{
+	struct rtl8367c *chip;
+
+	chip = spi_get_drvdata(spi);
+	list_del(&chip->next);
+
+	if (chip->reset_gpio) {
+		gpiod_set_value_cansleep(chip->reset_gpio, 1);
+		gpiod_unexport(chip->reset_gpio);
+	}
+
+	return 0;
+}
+
+static const struct spi_device_id rtl8367c_spi_id[] = {
+	{ "rtl8367c", 0 },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, rtl8367c_spi_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id rtl8367c_spi_of_match[] = {
+	{ .compatible = "realtek,rtl8367c-spi" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rtl8367c_spi_of_match);
+#endif
+
+static struct spi_driver rtl8367c_spi_driver = {
+	.driver = {
+		.name = "rtl8367c_spi",
+		.of_match_table = of_match_ptr(rtl8367c_spi_of_match),
+	},
+	.probe = rtl8367c_spi_probe,
+	.remove = rtl8367c_spi_remove,
+	.id_table = rtl8367c_spi_id,
+};
+
+static int __init rtl8367c_spi_init(void)
+{
+	int ret;
+
+	memset(&miscdev, 0, sizeof (miscdev));
+	miscdev.minor = MISC_DYNAMIC_MINOR;
+	miscdev.fops = &rtl8367c_fops;
+	miscdev.name = "rtl8367c";
+	miscdev.nodename = "rtl8367c";
+
+	ret = misc_register(&miscdev);
+	if (ret < 0) {
+		printk(KERN_ERR "rtl8367c: can't register miscdev: %d\n", ret);
+		return ret;
+	}
+
+	return spi_register_driver(&rtl8367c_spi_driver);
+}
+
+static void __exit rtl8367c_spi_exit(void)
+{
+	misc_deregister(&miscdev);
+	spi_unregister_driver(&rtl8367c_spi_driver);
+}
+
+module_init(rtl8367c_spi_init);
+module_exit(rtl8367c_spi_exit);
+
+MODULE_AUTHOR("Arnaud Vrac <avrac@freebox.fr>");
+MODULE_DESCRIPTION("Driver for Realtek RTL8367C Switch in SPI managed mode");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/acl.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/acl.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/acl.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/acl.c	2019-01-22 16:16:24.707257309 +0100
@@ -0,0 +1,2300 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 80956 $
+ * $Date: 2017-08-02 10:15:29 +0800 (é€±ä¸‰, 02 å…«æœˆ 2017) $
+ *
+ * Purpose : RTK switch high-level API for RTL8367/RTL8367C
+ * Feature : Here is a list of all functions and variables in ACL module.
+ *
+ */
+
+#include <rtk_switch.h>
+#include <rtk_error.h>
+#include <acl.h>
+#include <vlan.h>
+#include <svlan.h>
+#include <rate.h>
+#include <string.h>
+
+#include <rtl8367c_asicdrv.h>
+#include <rtl8367c_asicdrv_acl.h>
+#include <rtl8367c_asicdrv_hsb.h>
+#include <rtl8367c_asicdrv_vlan.h>
+#include <rtl8367c_asicdrv_svlan.h>
+#include <rtl8367c_asicdrv_cputag.h>
+#include <rtl8367c_asicdrv_mib.h>
+
+CONST_T rtk_uint8 filter_templateField[RTL8367C_ACLTEMPLATENO][RTL8367C_ACLRULEFIELDNO] = {
+    {ACL_DMAC0,             ACL_DMAC1,          ACL_DMAC2,          ACL_SMAC0,          ACL_SMAC1,          ACL_SMAC2,          ACL_ETHERTYPE,      ACL_FIELD_SELECT15},
+    {ACL_IP4SIP0,           ACL_IP4SIP1,        ACL_IP4DIP0,        ACL_IP4DIP1,        ACL_FIELD_SELECT13, ACL_FIELD_SELECT14, ACL_FIELD_SELECT02, ACL_FIELD_SELECT15},
+    {ACL_IP6SIP0WITHIPV4,   ACL_IP6SIP1WITHIPV4,ACL_FIELD_SELECT03, ACL_FIELD_SELECT04, ACL_FIELD_SELECT05, ACL_FIELD_SELECT06, ACL_FIELD_SELECT07, ACL_FIELD_SELECT08},
+    {ACL_IP6DIP0WITHIPV4,   ACL_IP6DIP1WITHIPV4,ACL_FIELD_SELECT09, ACL_FIELD_SELECT10, ACL_FIELD_SELECT11, ACL_FIELD_SELECT12, ACL_FIELD_SELECT13, ACL_FIELD_SELECT14},
+    {ACL_VIDRANGE,          ACL_IPRANGE,        ACL_PORTRANGE,      ACL_CTAG,           ACL_STAG,           ACL_FIELD_SELECT13, ACL_FIELD_SELECT14, ACL_FIELD_SELECT15}
+};
+
+CONST_T rtk_uint8 filter_advanceCaretagField[RTL8367C_ACLTEMPLATENO][2] = {
+    {TRUE,      7},
+    {TRUE,      7},
+    {FALSE,     0},
+    {FALSE,     0},
+    {TRUE,      7},
+};
+
+
+CONST_T rtk_uint8 filter_fieldTemplateIndex[FILTER_FIELD_END][RTK_FILTER_FIELD_USED_MAX] = {
+    {0x00, 0x01,0x02},
+    {0x03, 0x04,0x05},
+    {0x06},
+    {0x43},
+    {0x44},
+    {0x10, 0x11},
+    {0x12, 0x13},
+    {0x24},
+    {0x25},
+    {0x35},
+    {0x35},
+    {0x20, 0x21,0x22,0x23},
+    {0x30, 0x31,0x32,0x33},
+    {0x26},
+    {0x27},
+    {0x14},
+    {0x15},
+    {0x16},
+    {0x14},
+    {0x15},
+    {0x14},
+    {0x14},
+    {0x14},
+
+    {0x40},
+    {0x41},
+    {0x42},
+
+    {0x14},
+    {0x15},
+    {0x16},
+    {0x22},
+    {0x23},
+    {0x24},
+    {0x25},
+    {0x26},
+    {0x27},
+    {0x32},
+    {0x33},
+    {0x34},
+    {0x35},
+    {0x36},
+    {0x37},
+    {0x47},
+
+    {0xFF} /* Pattern Match */
+};
+
+CONST_T rtk_uint8 filter_fieldSize[FILTER_FIELD_END] = {
+    3, 3, 1, 1, 1,
+    2, 2, 1, 1, 1, 1, 4, 4, 1, 1,
+    1, 1, 1, 1, 1, 1, 1, 1,
+    1, 1, 1,
+    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+    8
+};
+
+CONST_T rtk_uint16 field_selector[RTL8367C_FIELDSEL_FORMAT_NUMBER][2] =
+{
+    {FIELDSEL_FORMAT_DEFAULT, 0},    /* Field Selector 0 */
+    {FIELDSEL_FORMAT_DEFAULT, 0},    /* Field Selector 1 */
+    {FIELDSEL_FORMAT_IPPAYLOAD, 12}, /* Field Selector 2 */
+    {FIELDSEL_FORMAT_IPV6, 10},      /* Field Selector 3 */
+    {FIELDSEL_FORMAT_IPV6, 8},       /* Field Selector 4 */
+    {FIELDSEL_FORMAT_IPV4, 0},       /* Field Selector 5 */
+    {FIELDSEL_FORMAT_IPV4, 8},       /* Field Selector 6 */
+    {FIELDSEL_FORMAT_IPV6, 0},       /* Field Selector 7 */
+    {FIELDSEL_FORMAT_IPV6, 6},       /* Field Selector 8 */
+    {FIELDSEL_FORMAT_IPV6, 26},      /* Field Selector 9 */
+    {FIELDSEL_FORMAT_IPV6, 24},      /* Field Selector 10 */
+    {FIELDSEL_FORMAT_DEFAULT, 0},    /* Field Selector 11 */
+    {FIELDSEL_FORMAT_IPV4, 6},       /* Field Selector 12 */
+    {FIELDSEL_FORMAT_IPPAYLOAD, 0},  /* Field Selector 13 */
+    {FIELDSEL_FORMAT_IPPAYLOAD, 2},  /* Field Selector 14 */
+    {FIELDSEL_FORMAT_DEFAULT, 0}     /* Field Selector 15 */
+};
+
+static rtk_api_ret_t _rtk_filter_igrAcl_cfg_delAll(void);
+
+
+static rtk_api_ret_t _rtk_filter_igrAcl_init(void)
+{
+    rtl8367c_acltemplate_t       aclTemp;
+    rtk_uint32                 i, j;
+    rtk_api_ret_t          ret;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if ((ret = _rtk_filter_igrAcl_cfg_delAll()) != RT_ERR_OK)
+        return ret;
+
+    for(i = 0; i < RTL8367C_ACLTEMPLATENO; i++)
+    {
+        for(j = 0; j < RTL8367C_ACLRULEFIELDNO;j++)
+            aclTemp.field[j] = filter_templateField[i][j];
+
+        if ((ret = rtl8367c_setAsicAclTemplate(i, &aclTemp)) != RT_ERR_OK)
+            return ret;
+    }
+
+    for(i = 0; i < RTL8367C_FIELDSEL_FORMAT_NUMBER; i++)
+    {
+        if ((ret = rtl8367c_setAsicFieldSelector(i, field_selector[i][0], field_selector[i][1])) != RT_ERR_OK)
+            return ret;
+    }
+
+    RTK_SCAN_ALL_PHY_PORTMASK(i)
+    {
+        if ((ret = rtl8367c_setAsicAcl(i, TRUE)) != RT_ERR_OK)
+            return ret;
+
+        if ((ret = rtl8367c_setAsicAclUnmatchedPermit(i, TRUE)) != RT_ERR_OK)
+            return ret;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_filter_igrAcl_field_add(rtk_filter_cfg_t* pFilter_cfg, rtk_filter_field_t* pFilter_field)
+{
+    rtk_uint32 i;
+    rtk_filter_field_t *tailPtr;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pFilter_cfg || NULL == pFilter_field)
+        return RT_ERR_NULL_POINTER;
+
+    if(pFilter_field->fieldType >= FILTER_FIELD_END)
+        return RT_ERR_ENTRY_INDEX;
+
+
+    if(0 == pFilter_field->fieldTemplateNo)
+    {
+        pFilter_field->fieldTemplateNo = filter_fieldSize[pFilter_field->fieldType];
+
+        for(i = 0; i < pFilter_field->fieldTemplateNo; i++)
+        {
+            pFilter_field->fieldTemplateIdx[i] = filter_fieldTemplateIndex[pFilter_field->fieldType][i];
+        }
+    }
+
+    if(NULL == pFilter_cfg->fieldHead)
+    {
+        pFilter_cfg->fieldHead = pFilter_field;
+    }
+    else
+    {
+        if (pFilter_cfg->fieldHead->next == NULL)
+        {
+            pFilter_cfg->fieldHead->next = pFilter_field;
+        }
+        else
+        {
+            tailPtr = pFilter_cfg->fieldHead->next;
+            while( tailPtr->next != NULL)
+            {
+                tailPtr = tailPtr->next;
+            }
+            tailPtr->next = pFilter_field;
+        }
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_filter_igrAcl_writeDataField(rtl8367c_aclrule *aclRule, rtk_filter_field_t *fieldPtr)
+{
+    rtk_uint32 i, tempIdx,fieldIdx, ipValue, ipMask;
+    rtk_uint32 ip6addr[RTK_IPV6_ADDR_WORD_LENGTH];
+    rtk_uint32 ip6mask[RTK_IPV6_ADDR_WORD_LENGTH];
+
+    for(i = 0; i < fieldPtr->fieldTemplateNo; i++)
+    {
+        tempIdx = (fieldPtr->fieldTemplateIdx[i] & 0xF0) >> 4;
+
+        aclRule[tempIdx].valid = TRUE;
+    }
+
+    switch (fieldPtr->fieldType)
+    {
+    /* use DMAC structure as representative for mac structure */
+    case FILTER_FIELD_DMAC:
+    case FILTER_FIELD_SMAC:
+
+        for(i = 0; i < fieldPtr->fieldTemplateNo; i++)
+        {
+            tempIdx = (fieldPtr->fieldTemplateIdx[i] & 0xF0) >> 4;
+            fieldIdx = fieldPtr->fieldTemplateIdx[i] & 0x0F;
+
+            aclRule[tempIdx].data_bits.field[fieldIdx] = fieldPtr->filter_pattern_union.mac.value.octet[5 - i*2] | (fieldPtr->filter_pattern_union.mac.value.octet[5 - (i*2 + 1)] << 8);
+            aclRule[tempIdx].care_bits.field[fieldIdx] = fieldPtr->filter_pattern_union.mac.mask.octet[5 - i*2] | (fieldPtr->filter_pattern_union.mac.mask.octet[5 - (i*2 + 1)] << 8);
+        }
+        break;
+    case FILTER_FIELD_ETHERTYPE:
+        for(i = 0; i < fieldPtr->fieldTemplateNo; i++)
+        {
+            tempIdx = (fieldPtr->fieldTemplateIdx[i] & 0xF0) >> 4;
+            fieldIdx = fieldPtr->fieldTemplateIdx[i] & 0x0F;
+
+            aclRule[tempIdx].data_bits.field[fieldIdx] = fieldPtr->filter_pattern_union.etherType.value;
+            aclRule[tempIdx].care_bits.field[fieldIdx] = fieldPtr->filter_pattern_union.etherType.mask;
+        }
+        break;
+    case FILTER_FIELD_IPV4_SIP:
+    case FILTER_FIELD_IPV4_DIP:
+
+        ipValue = fieldPtr->filter_pattern_union.sip.value;
+        ipMask = fieldPtr->filter_pattern_union.sip.mask;
+
+        for(i = 0; i < fieldPtr->fieldTemplateNo; i++)
+        {
+            tempIdx = (fieldPtr->fieldTemplateIdx[i] & 0xF0) >> 4;
+            fieldIdx = fieldPtr->fieldTemplateIdx[i] & 0x0F;
+
+            aclRule[tempIdx].data_bits.field[fieldIdx] = (0xFFFF & (ipValue >> (i*16)));
+            aclRule[tempIdx].care_bits.field[fieldIdx] = (0xFFFF & (ipMask >> (i*16)));
+        }
+        break;
+    case FILTER_FIELD_IPV4_TOS:
+        for(i = 0; i < fieldPtr->fieldTemplateNo; i++)
+        {
+            tempIdx = (fieldPtr->fieldTemplateIdx[i] & 0xF0) >> 4;
+            fieldIdx = fieldPtr->fieldTemplateIdx[i] & 0x0F;
+
+            aclRule[tempIdx].data_bits.field[fieldIdx] = fieldPtr->filter_pattern_union.ipTos.value & 0xFF;
+            aclRule[tempIdx].care_bits.field[fieldIdx] = fieldPtr->filter_pattern_union.ipTos.mask  & 0xFF;
+        }
+        break;
+    case FILTER_FIELD_IPV4_PROTOCOL:
+        for(i = 0; i < fieldPtr->fieldTemplateNo; i++)
+        {
+            tempIdx = (fieldPtr->fieldTemplateIdx[i] & 0xF0) >> 4;
+            fieldIdx = fieldPtr->fieldTemplateIdx[i] & 0x0F;
+
+            aclRule[tempIdx].data_bits.field[fieldIdx] = fieldPtr->filter_pattern_union.protocol.value & 0xFF;
+            aclRule[tempIdx].care_bits.field[fieldIdx] = fieldPtr->filter_pattern_union.protocol.mask  & 0xFF;
+        }
+        break;
+    case FILTER_FIELD_IPV6_SIPV6:
+    case FILTER_FIELD_IPV6_DIPV6:
+        for(i = 0; i < RTK_IPV6_ADDR_WORD_LENGTH; i++)
+        {
+            ip6addr[i] = fieldPtr->filter_pattern_union.sipv6.value.addr[i];
+            ip6mask[i] = fieldPtr->filter_pattern_union.sipv6.mask.addr[i];
+        }
+
+        for(i = 0; i < fieldPtr->fieldTemplateNo; i++)
+        {
+            tempIdx = (fieldPtr->fieldTemplateIdx[i] & 0xF0) >> 4;
+            fieldIdx = fieldPtr->fieldTemplateIdx[i] & 0x0F;
+
+            if(i < 2)
+            {
+                aclRule[tempIdx].data_bits.field[fieldIdx] = ((ip6addr[0] & (0xFFFF << (i * 16))) >> (i * 16));
+                aclRule[tempIdx].care_bits.field[fieldIdx] = ((ip6mask[0] & (0xFFFF << (i * 16))) >> (i * 16));
+            }
+            else
+            {
+                /*default acl template for ipv6 address supports MSB 32-bits and LSB 32-bits only*/
+                aclRule[tempIdx].data_bits.field[fieldIdx] = ((ip6addr[3] & (0xFFFF << ((i&1) * 16))) >> ((i&1) * 16));
+                aclRule[tempIdx].care_bits.field[fieldIdx] = ((ip6mask[3] & (0xFFFF << ((i&1) * 16))) >> ((i&1) * 16));
+            }
+        }
+
+        break;
+    case FILTER_FIELD_CTAG:
+    case FILTER_FIELD_STAG:
+
+        for(i = 0; i < fieldPtr->fieldTemplateNo; i++)
+        {
+            tempIdx = (fieldPtr->fieldTemplateIdx[i] & 0xF0) >> 4;
+            fieldIdx = fieldPtr->fieldTemplateIdx[i] & 0x0F;
+
+            aclRule[tempIdx].data_bits.field[fieldIdx] = (fieldPtr->filter_pattern_union.l2tag.pri.value << 13) | (fieldPtr->filter_pattern_union.l2tag.cfi.value << 12) | fieldPtr->filter_pattern_union.l2tag.vid.value;
+            aclRule[tempIdx].care_bits.field[fieldIdx] = (fieldPtr->filter_pattern_union.l2tag.pri.mask << 13) | (fieldPtr->filter_pattern_union.l2tag.cfi.mask << 12) | fieldPtr->filter_pattern_union.l2tag.vid.mask;
+        }
+        break;
+    case FILTER_FIELD_IPV4_FLAG:
+
+        for(i = 0; i < fieldPtr->fieldTemplateNo; i++)
+        {
+            tempIdx = (fieldPtr->fieldTemplateIdx[i] & 0xF0) >> 4;
+            fieldIdx = fieldPtr->fieldTemplateIdx[i] & 0x0F;
+
+            aclRule[tempIdx].data_bits.field[fieldIdx] &= 0x1FFF;
+            aclRule[tempIdx].data_bits.field[fieldIdx] |= (fieldPtr->filter_pattern_union.ipFlag.xf.value << 15);
+            aclRule[tempIdx].data_bits.field[fieldIdx] |= (fieldPtr->filter_pattern_union.ipFlag.df.value << 14);
+            aclRule[tempIdx].data_bits.field[fieldIdx] |= (fieldPtr->filter_pattern_union.ipFlag.mf.value << 13);
+
+            aclRule[tempIdx].care_bits.field[fieldIdx] &= 0x1FFF;
+            aclRule[tempIdx].care_bits.field[fieldIdx] |= (fieldPtr->filter_pattern_union.ipFlag.xf.mask << 15);
+            aclRule[tempIdx].care_bits.field[fieldIdx] |= (fieldPtr->filter_pattern_union.ipFlag.df.mask << 14);
+            aclRule[tempIdx].care_bits.field[fieldIdx] |= (fieldPtr->filter_pattern_union.ipFlag.mf.mask << 13);
+        }
+
+        break;
+    case FILTER_FIELD_IPV4_OFFSET:
+
+        for(i = 0; i < fieldPtr->fieldTemplateNo; i++)
+        {
+            tempIdx = (fieldPtr->fieldTemplateIdx[i] & 0xF0) >> 4;
+            fieldIdx = fieldPtr->fieldTemplateIdx[i] & 0x0F;
+
+            aclRule[tempIdx].data_bits.field[fieldIdx] &= 0xE000;
+            aclRule[tempIdx].data_bits.field[fieldIdx] |= fieldPtr->filter_pattern_union.inData.value;
+
+            aclRule[tempIdx].care_bits.field[fieldIdx] &= 0xE000;
+            aclRule[tempIdx].care_bits.field[fieldIdx] |= fieldPtr->filter_pattern_union.inData.mask;
+        }
+
+        break;
+
+    case FILTER_FIELD_IPV6_TRAFFIC_CLASS:
+        for(i = 0; i < fieldPtr->fieldTemplateNo; i++)
+        {
+            tempIdx = (fieldPtr->fieldTemplateIdx[i] & 0xF0) >> 4;
+            fieldIdx = fieldPtr->fieldTemplateIdx[i] & 0x0F;
+
+
+            aclRule[tempIdx].data_bits.field[fieldIdx] = (fieldPtr->filter_pattern_union.inData.value << 4)&0x0FF0;
+            aclRule[tempIdx].care_bits.field[fieldIdx] = (fieldPtr->filter_pattern_union.inData.mask << 4)&0x0FF0;
+        }
+        break;
+    case FILTER_FIELD_IPV6_NEXT_HEADER:
+        for(i = 0; i < fieldPtr->fieldTemplateNo; i++)
+        {
+            tempIdx = (fieldPtr->fieldTemplateIdx[i] & 0xF0) >> 4;
+            fieldIdx = fieldPtr->fieldTemplateIdx[i] & 0x0F;
+
+            aclRule[tempIdx].data_bits.field[fieldIdx] = fieldPtr->filter_pattern_union.inData.value << 8;
+            aclRule[tempIdx].care_bits.field[fieldIdx] = fieldPtr->filter_pattern_union.inData.mask << 8;
+        }
+        break;
+    case FILTER_FIELD_TCP_SPORT:
+        for(i = 0; i < fieldPtr->fieldTemplateNo; i++)
+        {
+            tempIdx = (fieldPtr->fieldTemplateIdx[i] & 0xF0) >> 4;
+            fieldIdx = fieldPtr->fieldTemplateIdx[i] & 0x0F;
+
+            aclRule[tempIdx].data_bits.field[fieldIdx] = fieldPtr->filter_pattern_union.tcpSrcPort.value;
+            aclRule[tempIdx].care_bits.field[fieldIdx] = fieldPtr->filter_pattern_union.tcpSrcPort.mask;
+        }
+        break;
+    case FILTER_FIELD_TCP_DPORT:
+        for(i = 0; i < fieldPtr->fieldTemplateNo; i++)
+        {
+            tempIdx = (fieldPtr->fieldTemplateIdx[i] & 0xF0) >> 4;
+            fieldIdx = fieldPtr->fieldTemplateIdx[i] & 0x0F;
+
+            aclRule[tempIdx].data_bits.field[fieldIdx] = fieldPtr->filter_pattern_union.tcpDstPort.value;
+            aclRule[tempIdx].care_bits.field[fieldIdx] = fieldPtr->filter_pattern_union.tcpDstPort.mask;
+        }
+        break;
+    case FILTER_FIELD_TCP_FLAG:
+
+        for(i = 0; i < fieldPtr->fieldTemplateNo; i++)
+        {
+            tempIdx = (fieldPtr->fieldTemplateIdx[i] & 0xF0) >> 4;
+            fieldIdx = fieldPtr->fieldTemplateIdx[i] & 0x0F;
+
+            aclRule[tempIdx].data_bits.field[fieldIdx] |= (fieldPtr->filter_pattern_union.tcpFlag.cwr.value << 7);
+            aclRule[tempIdx].data_bits.field[fieldIdx] |= (fieldPtr->filter_pattern_union.tcpFlag.ece.value << 6);
+            aclRule[tempIdx].data_bits.field[fieldIdx] |= (fieldPtr->filter_pattern_union.tcpFlag.urg.value << 5);
+            aclRule[tempIdx].data_bits.field[fieldIdx] |= (fieldPtr->filter_pattern_union.tcpFlag.ack.value << 4);
+            aclRule[tempIdx].data_bits.field[fieldIdx] |= (fieldPtr->filter_pattern_union.tcpFlag.psh.value << 3);
+            aclRule[tempIdx].data_bits.field[fieldIdx] |= (fieldPtr->filter_pattern_union.tcpFlag.rst.value << 2);
+            aclRule[tempIdx].data_bits.field[fieldIdx] |= (fieldPtr->filter_pattern_union.tcpFlag.syn.value << 1);
+            aclRule[tempIdx].data_bits.field[fieldIdx] |= fieldPtr->filter_pattern_union.tcpFlag.fin.value;
+
+            aclRule[tempIdx].care_bits.field[fieldIdx] |= (fieldPtr->filter_pattern_union.tcpFlag.cwr.mask << 7);
+            aclRule[tempIdx].care_bits.field[fieldIdx] |= (fieldPtr->filter_pattern_union.tcpFlag.ece.mask << 6);
+            aclRule[tempIdx].care_bits.field[fieldIdx] |= (fieldPtr->filter_pattern_union.tcpFlag.urg.mask << 5);
+            aclRule[tempIdx].care_bits.field[fieldIdx] |= (fieldPtr->filter_pattern_union.tcpFlag.ack.mask << 4);
+            aclRule[tempIdx].care_bits.field[fieldIdx] |= (fieldPtr->filter_pattern_union.tcpFlag.psh.mask << 3);
+            aclRule[tempIdx].care_bits.field[fieldIdx] |= (fieldPtr->filter_pattern_union.tcpFlag.rst.mask << 2);
+            aclRule[tempIdx].care_bits.field[fieldIdx] |= (fieldPtr->filter_pattern_union.tcpFlag.syn.mask << 1);
+            aclRule[tempIdx].care_bits.field[fieldIdx] |= fieldPtr->filter_pattern_union.tcpFlag.fin.mask;
+        }
+        break;
+    case FILTER_FIELD_UDP_SPORT:
+        for(i = 0; i < fieldPtr->fieldTemplateNo; i++)
+        {
+            tempIdx = (fieldPtr->fieldTemplateIdx[i] & 0xF0) >> 4;
+            fieldIdx = fieldPtr->fieldTemplateIdx[i] & 0x0F;
+
+            aclRule[tempIdx].data_bits.field[fieldIdx] = fieldPtr->filter_pattern_union.udpSrcPort.value;
+            aclRule[tempIdx].care_bits.field[fieldIdx] = fieldPtr->filter_pattern_union.udpSrcPort.mask;
+        }
+        break;
+    case FILTER_FIELD_UDP_DPORT:
+        for(i = 0; i < fieldPtr->fieldTemplateNo; i++)
+        {
+            tempIdx = (fieldPtr->fieldTemplateIdx[i] & 0xF0) >> 4;
+            fieldIdx = fieldPtr->fieldTemplateIdx[i] & 0x0F;
+
+            aclRule[tempIdx].data_bits.field[fieldIdx] = fieldPtr->filter_pattern_union.udpDstPort.value;
+            aclRule[tempIdx].care_bits.field[fieldIdx] = fieldPtr->filter_pattern_union.udpDstPort.mask;
+        }
+        break;
+    case FILTER_FIELD_ICMP_CODE:
+        for(i = 0; i < fieldPtr->fieldTemplateNo; i++)
+        {
+            tempIdx = (fieldPtr->fieldTemplateIdx[i] & 0xF0) >> 4;
+            fieldIdx = fieldPtr->fieldTemplateIdx[i] & 0x0F;
+
+            aclRule[tempIdx].data_bits.field[fieldIdx] &= 0xFF00;
+            aclRule[tempIdx].data_bits.field[fieldIdx] |= fieldPtr->filter_pattern_union.icmpCode.value;
+            aclRule[tempIdx].care_bits.field[fieldIdx] &= 0xFF00;
+            aclRule[tempIdx].care_bits.field[fieldIdx] |= fieldPtr->filter_pattern_union.icmpCode.mask;
+        }
+        break;
+    case FILTER_FIELD_ICMP_TYPE:
+        for(i = 0; i < fieldPtr->fieldTemplateNo; i++)
+        {
+            tempIdx = (fieldPtr->fieldTemplateIdx[i] & 0xF0) >> 4;
+            fieldIdx = fieldPtr->fieldTemplateIdx[i] & 0x0F;
+
+            aclRule[tempIdx].data_bits.field[fieldIdx] &= 0x00FF;
+            aclRule[tempIdx].data_bits.field[fieldIdx] |= (fieldPtr->filter_pattern_union.icmpType.value << 8);
+            aclRule[tempIdx].care_bits.field[fieldIdx] &= 0x00FF;
+            aclRule[tempIdx].care_bits.field[fieldIdx] |= (fieldPtr->filter_pattern_union.icmpType.mask << 8);
+        }
+        break;
+    case FILTER_FIELD_IGMP_TYPE:
+        for(i = 0; i < fieldPtr->fieldTemplateNo; i++)
+        {
+            tempIdx = (fieldPtr->fieldTemplateIdx[i] & 0xF0) >> 4;
+            fieldIdx = fieldPtr->fieldTemplateIdx[i] & 0x0F;
+
+            aclRule[tempIdx].data_bits.field[fieldIdx] = (fieldPtr->filter_pattern_union.igmpType.value << 8);
+            aclRule[tempIdx].care_bits.field[fieldIdx] = (fieldPtr->filter_pattern_union.igmpType.mask << 8);
+        }
+        break;
+    case FILTER_FIELD_PATTERN_MATCH:
+        for(i = 0; i < fieldPtr->fieldTemplateNo; i++)
+        {
+            tempIdx = (fieldPtr->fieldTemplateIdx[i] & 0xF0) >> 4;
+            fieldIdx = fieldPtr->fieldTemplateIdx[i] & 0x0F;
+
+            aclRule[tempIdx].data_bits.field[fieldIdx] = ((fieldPtr->filter_pattern_union.pattern.value[i/2] >> (16 * (i%2))) & 0x0000FFFF );
+            aclRule[tempIdx].care_bits.field[fieldIdx] = ((fieldPtr->filter_pattern_union.pattern.mask[i/2] >> (16 * (i%2))) & 0x0000FFFF );
+        }
+        break;
+    case FILTER_FIELD_VID_RANGE:
+    case FILTER_FIELD_IP_RANGE:
+    case FILTER_FIELD_PORT_RANGE:
+    default:
+        tempIdx = (fieldPtr->fieldTemplateIdx[0] & 0xF0) >> 4;
+        fieldIdx = fieldPtr->fieldTemplateIdx[0] & 0x0F;
+
+        aclRule[tempIdx].data_bits.field[fieldIdx] = fieldPtr->filter_pattern_union.inData.value;
+        aclRule[tempIdx].care_bits.field[fieldIdx] = fieldPtr->filter_pattern_union.inData.mask;
+        break;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_filter_igrAcl_cfg_add(rtk_filter_id_t filter_id, rtk_filter_cfg_t* pFilter_cfg, rtk_filter_action_t* pFilter_action, rtk_filter_number_t *ruleNum)
+{
+    rtk_api_ret_t               retVal;
+    rtk_uint32                  careTagData, careTagMask;
+    rtk_uint32                  i,vidx, svidx, actType, ruleId;
+    rtk_uint32                  aclActCtrl;
+    rtk_uint32                  cpuPort;
+    rtk_filter_field_t*         fieldPtr;
+    rtl8367c_aclrule            aclRule[RTL8367C_ACLTEMPLATENO];
+    rtl8367c_aclrule            tempRule;
+    rtl8367c_acl_act_t          aclAct;
+    rtk_uint32                  noRulesAdd;
+    rtk_uint32                  portmask;
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(filter_id > RTL8367C_ACLRULEMAX )
+        return RT_ERR_ENTRY_INDEX;
+
+    if((NULL == pFilter_cfg) || (NULL == pFilter_action) || (NULL == ruleNum))
+        return RT_ERR_NULL_POINTER;
+
+    fieldPtr = pFilter_cfg->fieldHead;
+
+    /* init RULE */
+    for(i = 0; i < RTL8367C_ACLTEMPLATENO; i++)
+    {
+        memset(&aclRule[i], 0, sizeof(rtl8367c_aclrule));
+
+        aclRule[i].data_bits.type= i;
+        aclRule[i].care_bits.type= 0x7;
+    }
+
+    while(NULL != fieldPtr)
+    {
+        _rtk_filter_igrAcl_writeDataField(aclRule, fieldPtr);
+
+        fieldPtr = fieldPtr->next;
+    }
+
+    /*set care tag mask in User Defined Field 15*/
+    /*Follow care tag should not be used while ACL template and User defined fields are fully control by system designer*/
+    /*those advanced packet type care tag is used in default template design structure only*/
+    careTagData = 0;
+    careTagMask = 0;
+
+    for(i = CARE_TAG_TCP; i < CARE_TAG_END; i++)
+    {
+        if(pFilter_cfg->careTag.tagType[i].mask)
+            careTagMask = careTagMask | (1 << (i-CARE_TAG_TCP));
+
+        if(pFilter_cfg->careTag.tagType[i].value)
+            careTagData = careTagData | (1 << (i-CARE_TAG_TCP));
+    }
+
+    if(careTagData || careTagMask)
+    {
+        i = 0;
+        while(i < RTL8367C_ACLTEMPLATENO)
+        {
+            if(aclRule[i].valid == 1 && filter_advanceCaretagField[i][0] == TRUE)
+            {
+
+                aclRule[i].data_bits.field[filter_advanceCaretagField[i][1]] = careTagData & 0xFFFF;
+                aclRule[i].care_bits.field[filter_advanceCaretagField[i][1]] = careTagMask & 0xFFFF;
+                break;
+            }
+            i++;
+        }
+        /*none of previous used template containing field 15*/
+        if(i == RTL8367C_ACLTEMPLATENO)
+        {
+            i = 0;
+            while(i < RTL8367C_ACLTEMPLATENO)
+            {
+                if(filter_advanceCaretagField[i][0] == TRUE)
+                {
+                    aclRule[i].data_bits.field[filter_advanceCaretagField[i][1]] = careTagData & 0xFFFF;
+                    aclRule[i].care_bits.field[filter_advanceCaretagField[i][1]] = careTagMask & 0xFFFF;
+                    aclRule[i].valid = 1;
+                    break;
+                }
+                i++;
+            }
+        }
+    }
+
+    /*Check rule number*/
+    noRulesAdd = 0;
+    for(i = 0; i < RTL8367C_ACLTEMPLATENO; i++)
+    {
+        if(1 == aclRule[i].valid)
+        {
+            noRulesAdd ++;
+        }
+    }
+
+    *ruleNum = noRulesAdd;
+
+    if((filter_id + noRulesAdd - 1) > RTL8367C_ACLRULEMAX)
+    {
+        return RT_ERR_ENTRY_INDEX;
+    }
+
+    /*set care tag mask in TAG Indicator*/
+    careTagData = 0;
+    careTagMask = 0;
+
+    for(i = 0; i <= CARE_TAG_IPV6;i++)
+    {
+        if(0 == pFilter_cfg->careTag.tagType[i].mask )
+        {
+            careTagMask &=  ~(1 << i);
+        }
+        else
+        {
+            careTagMask |= (1 << i);
+            if(0 == pFilter_cfg->careTag.tagType[i].value )
+                careTagData &= ~(1 << i);
+            else
+                careTagData |= (1 << i);
+        }
+    }
+
+    for(i = 0; i < RTL8367C_ACLTEMPLATENO; i++)
+    {
+        aclRule[i].data_bits.tag_exist = (careTagData) & ACL_RULE_CARETAG_MASK;
+        aclRule[i].care_bits.tag_exist = (careTagMask) & ACL_RULE_CARETAG_MASK;
+    }
+
+    RTK_CHK_PORTMASK_VALID(&pFilter_cfg->activeport.value);
+    RTK_CHK_PORTMASK_VALID(&pFilter_cfg->activeport.mask);
+
+    for(i = 0; i < RTL8367C_ACLTEMPLATENO; i++)
+    {
+        if(TRUE == aclRule[i].valid)
+        {
+            if(rtk_switch_portmask_L2P_get(&pFilter_cfg->activeport.value, &portmask) != RT_ERR_OK)
+                return RT_ERR_PORT_MASK;
+
+            aclRule[i].data_bits.active_portmsk = portmask;
+
+            if(rtk_switch_portmask_L2P_get(&pFilter_cfg->activeport.mask, &portmask) != RT_ERR_OK)
+                return RT_ERR_PORT_MASK;
+
+            aclRule[i].care_bits.active_portmsk = portmask;
+        }
+    }
+
+    if(pFilter_cfg->invert >= FILTER_INVERT_END )
+        return RT_ERR_INPUT;
+
+
+    /*Last action gets high priority if actions are the same*/
+    memset(&aclAct, 0, sizeof(rtl8367c_acl_act_t));
+    aclActCtrl = 0;
+    for(actType = 0; actType < FILTER_ENACT_END; actType ++)
+    {
+        if(pFilter_action->actEnable[actType])
+        {
+            switch (actType)
+            {
+            case FILTER_ENACT_CVLAN_INGRESS:
+                if(pFilter_action->filterCvlanVid > RTL8367C_EVIDMAX)
+                    return RT_ERR_INPUT;
+
+                if((retVal = rtk_vlan_checkAndCreateMbr(pFilter_action->filterCvlanVid, &vidx)) != RT_ERR_OK)
+                {
+                    return retVal;
+                }
+                aclAct.cact = FILTER_ENACT_CVLAN_TYPE(actType);
+                aclAct.cvidx_cact = vidx;
+
+                if(aclActCtrl &(FILTER_ENACT_CVLAN_MASK))
+                {
+                    if(aclAct.cact_ext == FILTER_ENACT_CACTEXT_TAGONLY)
+                        aclAct.cact_ext = FILTER_ENACT_CACTEXT_BOTHVLANTAG;
+                }
+                else
+                {
+                    aclAct.cact_ext = FILTER_ENACT_CACTEXT_VLANONLY;
+                }
+
+                aclActCtrl |= FILTER_ENACT_CVLAN_MASK;
+                break;
+            case FILTER_ENACT_CVLAN_EGRESS:
+                if(pFilter_action->filterCvlanVid > RTL8367C_EVIDMAX)
+                    return RT_ERR_INPUT;
+
+                if((retVal = rtk_vlan_checkAndCreateMbr(pFilter_action->filterCvlanVid, &vidx)) != RT_ERR_OK)
+                    return retVal;
+
+                aclAct.cact = FILTER_ENACT_CVLAN_TYPE(actType);
+                aclAct.cvidx_cact = vidx;
+
+                if(aclActCtrl &(FILTER_ENACT_CVLAN_MASK))
+                {
+                    if(aclAct.cact_ext == FILTER_ENACT_CACTEXT_TAGONLY)
+                        aclAct.cact_ext = FILTER_ENACT_CACTEXT_BOTHVLANTAG;
+                }
+                else
+                {
+                    aclAct.cact_ext = FILTER_ENACT_CACTEXT_VLANONLY;
+                }
+
+                aclActCtrl |= FILTER_ENACT_CVLAN_MASK;
+                break;
+             case FILTER_ENACT_CVLAN_SVID:
+
+                aclAct.cact = FILTER_ENACT_CVLAN_TYPE(actType);
+
+                if(aclActCtrl &(FILTER_ENACT_CVLAN_MASK))
+                {
+                    if(aclAct.cact_ext == FILTER_ENACT_CACTEXT_TAGONLY)
+                        aclAct.cact_ext = FILTER_ENACT_CACTEXT_BOTHVLANTAG;
+                }
+                else
+                {
+                    aclAct.cact_ext = FILTER_ENACT_CACTEXT_VLANONLY;
+                }
+
+                aclActCtrl |= FILTER_ENACT_CVLAN_MASK;
+                break;
+             case FILTER_ENACT_POLICING_1:
+                if(pFilter_action->filterPolicingIdx[1] >= (RTK_METER_NUM + RTL8367C_MAX_LOG_CNT_NUM))
+                    return RT_ERR_INPUT;
+
+                aclAct.cact = FILTER_ENACT_CVLAN_TYPE(actType);
+                aclAct.cvidx_cact = pFilter_action->filterPolicingIdx[1];
+
+                if(aclActCtrl &(FILTER_ENACT_CVLAN_MASK))
+                {
+                    if(aclAct.cact_ext == FILTER_ENACT_CACTEXT_TAGONLY)
+                        aclAct.cact_ext = FILTER_ENACT_CACTEXT_BOTHVLANTAG;
+                }
+                else
+                {
+                    aclAct.cact_ext = FILTER_ENACT_CACTEXT_VLANONLY;
+                }
+
+                aclActCtrl |= FILTER_ENACT_CVLAN_MASK;
+                break;
+
+            case FILTER_ENACT_SVLAN_INGRESS:
+            case FILTER_ENACT_SVLAN_EGRESS:
+
+                if((retVal = rtk_svlan_checkAndCreateMbr(pFilter_action->filterSvlanVid, &svidx)) != RT_ERR_OK)
+                    return retVal;
+
+                aclAct.sact = FILTER_ENACT_SVLAN_TYPE(actType);
+                aclAct.svidx_sact = svidx;
+                aclActCtrl |= FILTER_ENACT_SVLAN_MASK;
+                break;
+            case FILTER_ENACT_SVLAN_CVID:
+
+                aclAct.sact = FILTER_ENACT_SVLAN_TYPE(actType);
+                aclActCtrl |= FILTER_ENACT_SVLAN_MASK;
+                break;
+            case FILTER_ENACT_POLICING_2:
+                if(pFilter_action->filterPolicingIdx[2] >= (RTK_METER_NUM + RTL8367C_MAX_LOG_CNT_NUM))
+                    return RT_ERR_INPUT;
+
+                aclAct.sact = FILTER_ENACT_SVLAN_TYPE(actType);
+                aclAct.svidx_sact = pFilter_action->filterPolicingIdx[2];
+                aclActCtrl |= FILTER_ENACT_SVLAN_MASK;
+                break;
+            case FILTER_ENACT_POLICING_0:
+                if(pFilter_action->filterPolicingIdx[0] >= (RTK_METER_NUM + RTL8367C_MAX_LOG_CNT_NUM))
+                    return RT_ERR_INPUT;
+
+                aclAct.aclmeteridx = pFilter_action->filterPolicingIdx[0];
+                aclActCtrl |= FILTER_ENACT_POLICING_MASK;
+                break;
+            case FILTER_ENACT_PRIORITY:
+            case FILTER_ENACT_1P_REMARK:
+                if(pFilter_action->filterPriority > RTL8367C_PRIMAX)
+                    return RT_ERR_INPUT;
+
+                aclAct.priact = FILTER_ENACT_PRI_TYPE(actType);
+                aclAct.pridx = pFilter_action->filterPriority;
+                aclActCtrl |= FILTER_ENACT_PRIORITY_MASK;
+                break;
+            case FILTER_ENACT_DSCP_REMARK:
+                if(pFilter_action->filterPriority > RTL8367C_DSCPMAX)
+                    return RT_ERR_INPUT;
+
+                aclAct.priact = FILTER_ENACT_PRI_TYPE(actType);
+                aclAct.pridx = pFilter_action->filterPriority;
+                aclActCtrl |= FILTER_ENACT_PRIORITY_MASK;
+                break;
+            case FILTER_ENACT_POLICING_3:
+                if(pFilter_action->filterPriority >= (RTK_METER_NUM + RTL8367C_MAX_LOG_CNT_NUM))
+                    return RT_ERR_INPUT;
+
+                aclAct.priact = FILTER_ENACT_PRI_TYPE(actType);
+                aclAct.pridx = pFilter_action->filterPolicingIdx[3];
+                aclActCtrl |= FILTER_ENACT_PRIORITY_MASK;
+                break;
+            case FILTER_ENACT_DROP:
+
+                aclAct.fwdact = FILTER_ENACT_FWD_TYPE(FILTER_ENACT_REDIRECT);
+                aclAct.fwdact_ext = FALSE;
+
+                aclAct.fwdpmask = 0;
+                aclActCtrl |= FILTER_ENACT_FWD_MASK;
+                break;
+            case FILTER_ENACT_REDIRECT:
+                RTK_CHK_PORTMASK_VALID(&pFilter_action->filterPortmask);
+
+                aclAct.fwdact = FILTER_ENACT_FWD_TYPE(actType);
+                aclAct.fwdact_ext = FALSE;
+
+                if(rtk_switch_portmask_L2P_get(&pFilter_action->filterPortmask, &portmask) != RT_ERR_OK)
+                    return RT_ERR_PORT_MASK;
+                aclAct.fwdpmask = portmask;
+
+                aclActCtrl |= FILTER_ENACT_FWD_MASK;
+                break;
+
+            case FILTER_ENACT_ADD_DSTPORT:
+                RTK_CHK_PORTMASK_VALID(&pFilter_action->filterPortmask);
+
+                aclAct.fwdact = FILTER_ENACT_FWD_TYPE(actType);
+                aclAct.fwdact_ext = FALSE;
+
+                if(rtk_switch_portmask_L2P_get(&pFilter_action->filterPortmask, &portmask) != RT_ERR_OK)
+                    return RT_ERR_PORT_MASK;
+                aclAct.fwdpmask = portmask;
+
+                aclActCtrl |= FILTER_ENACT_FWD_MASK;
+                break;
+            case FILTER_ENACT_MIRROR:
+                RTK_CHK_PORTMASK_VALID(&pFilter_action->filterPortmask);
+
+                aclAct.fwdact = FILTER_ENACT_FWD_TYPE(actType);
+                aclAct.cact_ext = FALSE;
+
+                if(rtk_switch_portmask_L2P_get(&pFilter_action->filterPortmask, &portmask) != RT_ERR_OK)
+                    return RT_ERR_PORT_MASK;
+                aclAct.fwdpmask = portmask;
+
+                aclActCtrl |= FILTER_ENACT_FWD_MASK;
+                break;
+            case FILTER_ENACT_TRAP_CPU:
+
+                aclAct.fwdact = FILTER_ENACT_FWD_TYPE(actType);
+                aclAct.fwdact_ext = FALSE;
+
+                aclActCtrl |= FILTER_ENACT_FWD_MASK;
+                break;
+            case FILTER_ENACT_COPY_CPU:
+                if((retVal = rtl8367c_getAsicCputagTrapPort(&cpuPort)) != RT_ERR_OK)
+                    return retVal;
+
+                aclAct.fwdact = FILTER_ENACT_FWD_TYPE(FILTER_ENACT_MIRROR);
+                aclAct.fwdact_ext = FALSE;
+
+                aclAct.fwdpmask = 1 << cpuPort;
+                aclActCtrl |= FILTER_ENACT_FWD_MASK;
+                break;
+            case FILTER_ENACT_ISOLATION:
+                RTK_CHK_PORTMASK_VALID(&pFilter_action->filterPortmask);
+
+                aclAct.fwdact_ext = TRUE;
+
+                if(rtk_switch_portmask_L2P_get(&pFilter_action->filterPortmask, &portmask) != RT_ERR_OK)
+                    return RT_ERR_PORT_MASK;
+                aclAct.fwdpmask = portmask;
+
+                aclActCtrl |= FILTER_ENACT_FWD_MASK;
+                break;
+
+            case FILTER_ENACT_INTERRUPT:
+
+                aclAct.aclint = TRUE;
+                aclActCtrl |= FILTER_ENACT_INTGPIO_MASK;
+                break;
+            case FILTER_ENACT_GPO:
+
+                aclAct.gpio_en = TRUE;
+                aclAct.gpio_pin = pFilter_action->filterPin;
+                aclActCtrl |= FILTER_ENACT_INTGPIO_MASK;
+                break;
+             case FILTER_ENACT_EGRESSCTAG_TAG:
+
+                if(aclActCtrl &(FILTER_ENACT_CVLAN_MASK))
+                {
+                    if(aclAct.cact_ext == FILTER_ENACT_CACTEXT_VLANONLY)
+                        aclAct.cact_ext = FILTER_ENACT_CACTEXT_BOTHVLANTAG;
+                }
+                else
+                {
+                    aclAct.cact_ext = FILTER_ENACT_CACTEXT_TAGONLY;
+                }
+                aclAct.tag_fmt = FILTER_CTAGFMT_TAG;
+                aclActCtrl |= FILTER_ENACT_CVLAN_MASK;
+                break;
+             case FILTER_ENACT_EGRESSCTAG_UNTAG:
+
+                if(aclActCtrl &(FILTER_ENACT_CVLAN_MASK))
+                {
+                    if(aclAct.cact_ext == FILTER_ENACT_CACTEXT_VLANONLY)
+                        aclAct.cact_ext = FILTER_ENACT_CACTEXT_BOTHVLANTAG;
+                }
+                else
+                {
+                    aclAct.cact_ext = FILTER_ENACT_CACTEXT_TAGONLY;
+                }
+                aclAct.tag_fmt = FILTER_CTAGFMT_UNTAG;
+                aclActCtrl |= FILTER_ENACT_CVLAN_MASK;
+                break;
+             case FILTER_ENACT_EGRESSCTAG_KEEP:
+
+                if(aclActCtrl &(FILTER_ENACT_CVLAN_MASK))
+                {
+                    if(aclAct.cact_ext == FILTER_ENACT_CACTEXT_VLANONLY)
+                        aclAct.cact_ext = FILTER_ENACT_CACTEXT_BOTHVLANTAG;
+                }
+                else
+                {
+                    aclAct.cact_ext = FILTER_ENACT_CACTEXT_TAGONLY;
+                }
+                aclAct.tag_fmt = FILTER_CTAGFMT_KEEP;
+                aclActCtrl |= FILTER_ENACT_CVLAN_MASK;
+                break;
+             case FILTER_ENACT_EGRESSCTAG_KEEPAND1PRMK:
+
+                if(aclActCtrl &(FILTER_ENACT_CVLAN_MASK))
+                {
+                    if(aclAct.cact_ext == FILTER_ENACT_CACTEXT_VLANONLY)
+                        aclAct.cact_ext = FILTER_ENACT_CACTEXT_BOTHVLANTAG;
+                }
+                else
+                {
+                    aclAct.cact_ext = FILTER_ENACT_CACTEXT_TAGONLY;
+                }
+                aclAct.tag_fmt = FILTER_CTAGFMT_KEEP1PRMK;
+                aclActCtrl |= FILTER_ENACT_CVLAN_MASK;
+                break;
+           default:
+                return RT_ERR_FILTER_INACL_ACT_NOT_SUPPORT;
+            }
+        }
+    }
+
+
+    /*check if free ACL rules are enough*/
+    for(i = filter_id; i < (filter_id + noRulesAdd); i++)
+    {
+        if((retVal = rtl8367c_getAsicAclRule(i, &tempRule)) != RT_ERR_OK )
+            return retVal;
+
+        if(tempRule.valid == TRUE)
+        {
+            return RT_ERR_TBL_FULL;
+        }
+    }
+
+    ruleId = 0;
+    for(i = 0; i < RTL8367C_ACLTEMPLATENO; i++)
+    {
+        if(aclRule[i].valid == TRUE)
+        {
+            /* write ACL action control */
+            if((retVal = rtl8367c_setAsicAclActCtrl(filter_id + ruleId, aclActCtrl)) != RT_ERR_OK )
+                return retVal;
+            /* write ACL action */
+            if((retVal = rtl8367c_setAsicAclAct(filter_id + ruleId, &aclAct)) != RT_ERR_OK )
+                return retVal;
+
+            /* write ACL not */
+            if((retVal = rtl8367c_setAsicAclNot(filter_id + ruleId, pFilter_cfg->invert)) != RT_ERR_OK )
+                return retVal;
+            /* write ACL rule */
+            if((retVal = rtl8367c_setAsicAclRule(filter_id + ruleId, &aclRule[i])) != RT_ERR_OK )
+                return retVal;
+
+            /* only the first rule will be written with input action control, aclActCtrl of other rules will be zero */
+            aclActCtrl = 0;
+            memset(&aclAct, 0, sizeof(rtl8367c_acl_act_t));
+
+            ruleId ++;
+        }
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_filter_igrAcl_cfg_del(rtk_filter_id_t filter_id)
+{
+    rtl8367c_aclrule initRule;
+    rtl8367c_acl_act_t  initAct;
+    rtk_api_ret_t ret;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(filter_id > RTL8367C_ACLRULEMAX )
+        return RT_ERR_FILTER_ENTRYIDX;
+
+    memset(&initRule, 0, sizeof(rtl8367c_aclrule));
+    memset(&initAct, 0, sizeof(rtl8367c_acl_act_t));
+
+    if((ret = rtl8367c_setAsicAclRule(filter_id, &initRule)) != RT_ERR_OK)
+        return ret;
+    if((ret = rtl8367c_setAsicAclActCtrl(filter_id, FILTER_ENACT_INIT_MASK))!= RT_ERR_OK)
+        return ret;
+    if((ret = rtl8367c_setAsicAclAct(filter_id, &initAct)) != RT_ERR_OK)
+        return ret;
+    if((ret = rtl8367c_setAsicAclNot(filter_id, DISABLED)) != RT_ERR_OK )
+        return ret;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_filter_igrAcl_cfg_delAll(void)
+{
+    rtk_uint32            i;
+    rtk_api_ret_t     ret;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    for(i = 0; i < RTL8367C_ACLRULENO; i++)
+    {
+        if((ret = rtl8367c_setAsicAclActCtrl(i, FILTER_ENACT_INIT_MASK))!= RT_ERR_OK)
+            return ret;
+        if((ret = rtl8367c_setAsicAclNot(i, DISABLED)) != RT_ERR_OK )
+            return ret;
+    }
+
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_ACL_RESET_CFG, RTL8367C_ACL_RESET_CFG_OFFSET, TRUE);;
+}
+
+static rtk_api_ret_t _rtk_filter_igrAcl_cfg_get(rtk_filter_id_t filter_id, rtk_filter_cfg_raw_t *pFilter_cfg, rtk_filter_action_t *pAction)
+{
+    rtk_api_ret_t               retVal;
+    rtk_uint32                  i, tmp;
+    rtl8367c_aclrule            aclRule;
+    rtl8367c_acl_act_t          aclAct;
+    rtk_uint32                  cpuPort;
+    rtl8367c_acltemplate_t      type;
+    rtl8367c_svlan_memconf_t    svlan_cfg;
+    rtl8367c_vlanconfiguser     vlanMC;
+    rtk_uint32                  phyPmask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pFilter_cfg || NULL == pAction)
+        return RT_ERR_NULL_POINTER;
+
+    if(filter_id > RTL8367C_ACLRULEMAX)
+        return RT_ERR_ENTRY_INDEX;
+
+    if ((retVal = rtl8367c_getAsicAclRule(filter_id, &aclRule)) != RT_ERR_OK)
+        return retVal;
+
+    /* Check valid */
+    if(aclRule.valid == 0)
+    {
+        pFilter_cfg->valid = DISABLED;
+        return RT_ERR_OK;
+    }
+
+    phyPmask = aclRule.data_bits.active_portmsk;
+    if(rtk_switch_portmask_P2L_get(phyPmask,&(pFilter_cfg->activeport.value)) != RT_ERR_OK)
+        return RT_ERR_FAILED;
+
+    phyPmask = aclRule.care_bits.active_portmsk;
+    if(rtk_switch_portmask_P2L_get(phyPmask,&(pFilter_cfg->activeport.mask)) != RT_ERR_OK)
+        return RT_ERR_FAILED;
+
+    for(i = 0; i <= CARE_TAG_IPV6; i++)
+    {
+        if(aclRule.data_bits.tag_exist & (1 << i))
+            pFilter_cfg->careTag.tagType[i].value = 1;
+        else
+            pFilter_cfg->careTag.tagType[i].value = 0;
+
+        if (aclRule.care_bits.tag_exist & (1 << i))
+            pFilter_cfg->careTag.tagType[i].mask = 1;
+        else
+            pFilter_cfg->careTag.tagType[i].mask = 0;
+    }
+
+    if(filter_advanceCaretagField[aclRule.data_bits.type][0] == TRUE)
+    {
+        /* Advanced Care tag setting */
+        for(i = CARE_TAG_TCP; i < CARE_TAG_END; i++)
+        {
+            if(aclRule.data_bits.field[filter_advanceCaretagField[aclRule.data_bits.type][1]] & (0x0001 << (i-CARE_TAG_TCP)) )
+                pFilter_cfg->careTag.tagType[i].value = 1;
+            else
+                pFilter_cfg->careTag.tagType[i].value = 0;
+
+            if(aclRule.care_bits.field[filter_advanceCaretagField[aclRule.care_bits.type][1]] & (0x0001 << (i-CARE_TAG_TCP)) )
+                pFilter_cfg->careTag.tagType[i].mask = 1;
+            else
+                pFilter_cfg->careTag.tagType[i].mask = 0;
+        }
+    }
+
+    for(i = 0; i < RTL8367C_ACLRULEFIELDNO; i++)
+    {
+        pFilter_cfg->careFieldRaw[i] = aclRule.care_bits.field[i];
+        pFilter_cfg->dataFieldRaw[i] = aclRule.data_bits.field[i];
+    }
+
+    if ((retVal = rtl8367c_getAsicAclNot(filter_id, &tmp))!= RT_ERR_OK)
+        return retVal;
+
+    pFilter_cfg->invert = tmp;
+
+    pFilter_cfg->valid = aclRule.valid;
+
+    memset(pAction, 0, sizeof(rtk_filter_action_t));
+
+    if ((retVal = rtl8367c_getAsicAclActCtrl(filter_id, &tmp))!= RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_getAsicAclAct(filter_id, &aclAct)) != RT_ERR_OK)
+        return retVal;
+
+    if(tmp & FILTER_ENACT_FWD_MASK)
+    {
+        if(TRUE == aclAct.fwdact_ext)
+        {
+            pAction->actEnable[FILTER_ENACT_ISOLATION] = TRUE;
+
+            phyPmask = aclAct.fwdpmask;
+            if(rtk_switch_portmask_P2L_get(phyPmask,&(pAction->filterPortmask)) != RT_ERR_OK)
+                return RT_ERR_FAILED;
+        }
+        else if(aclAct.fwdact == RTL8367C_ACL_FWD_TRAP)
+        {
+            pAction->actEnable[FILTER_ENACT_TRAP_CPU] = TRUE;
+        }
+        else if (aclAct.fwdact == RTL8367C_ACL_FWD_MIRRORFUNTION )
+        {
+            pAction->actEnable[FILTER_ENACT_MIRROR] = TRUE;
+
+            phyPmask = aclAct.fwdpmask;
+            if(rtk_switch_portmask_P2L_get(phyPmask,&(pAction->filterPortmask)) != RT_ERR_OK)
+                return RT_ERR_FAILED;
+        }
+        else if (aclAct.fwdact == RTL8367C_ACL_FWD_REDIRECT)
+        {
+            if(aclAct.fwdpmask == 0 )
+                pAction->actEnable[FILTER_ENACT_DROP] = TRUE;
+            else
+            {
+                pAction->actEnable[FILTER_ENACT_REDIRECT] = TRUE;
+
+                phyPmask = aclAct.fwdpmask;
+                if(rtk_switch_portmask_P2L_get(phyPmask,&(pAction->filterPortmask)) != RT_ERR_OK)
+                    return RT_ERR_FAILED;
+            }
+        }
+        else if (aclAct.fwdact == RTL8367C_ACL_FWD_MIRROR)
+        {
+            if((retVal = rtl8367c_getAsicCputagTrapPort(&cpuPort)) != RT_ERR_OK)
+                return retVal;
+            if (aclAct.fwdpmask == (1 << cpuPort))
+            {
+                pAction->actEnable[FILTER_ENACT_COPY_CPU] = TRUE;
+            }
+            else
+            {
+                pAction->actEnable[FILTER_ENACT_ADD_DSTPORT] = TRUE;
+
+                phyPmask = aclAct.fwdpmask;
+                if(rtk_switch_portmask_P2L_get(phyPmask,&(pAction->filterPortmask)) != RT_ERR_OK)
+                    return RT_ERR_FAILED;
+            }
+        }
+        else
+        {
+            return RT_ERR_FAILED;
+        }
+    }
+
+    if(tmp & FILTER_ENACT_POLICING_MASK)
+    {
+        pAction->actEnable[FILTER_ENACT_POLICING_0] = TRUE;
+        pAction->filterPolicingIdx[0] = aclAct.aclmeteridx;
+    }
+
+    if(tmp & FILTER_ENACT_PRIORITY_MASK)
+    {
+        if(aclAct.priact == FILTER_ENACT_PRI_TYPE(FILTER_ENACT_PRIORITY))
+        {
+            pAction->actEnable[FILTER_ENACT_PRIORITY] = TRUE;
+            pAction->filterPriority = aclAct.pridx;
+        }
+        else if(aclAct.priact == FILTER_ENACT_PRI_TYPE(FILTER_ENACT_1P_REMARK))
+        {
+            pAction->actEnable[FILTER_ENACT_1P_REMARK] = TRUE;
+            pAction->filterPriority = aclAct.pridx;
+        }
+        else if(aclAct.priact == FILTER_ENACT_PRI_TYPE(FILTER_ENACT_DSCP_REMARK))
+        {
+            pAction->actEnable[FILTER_ENACT_DSCP_REMARK] = TRUE;
+            pAction->filterPriority = aclAct.pridx;
+        }
+        else if(aclAct.priact == FILTER_ENACT_PRI_TYPE(FILTER_ENACT_POLICING_3))
+        {
+            pAction->actEnable[FILTER_ENACT_POLICING_3] = TRUE;
+            pAction->filterPolicingIdx[3]  = aclAct.pridx;
+        }
+    }
+
+    if(tmp & FILTER_ENACT_SVLAN_MASK)
+    {
+        if(aclAct.sact == FILTER_ENACT_SVLAN_TYPE(FILTER_ENACT_SVLAN_INGRESS))
+        {
+            if((retVal = rtl8367c_getAsicSvlanMemberConfiguration(aclAct.svidx_sact, &svlan_cfg)) != RT_ERR_OK)
+                return retVal;
+
+            pAction->actEnable[FILTER_ENACT_SVLAN_INGRESS] = TRUE;
+            pAction->filterSvlanIdx = aclAct.svidx_sact;
+            pAction->filterSvlanVid = svlan_cfg.vs_svid;
+        }
+        else if(aclAct.sact == FILTER_ENACT_SVLAN_TYPE(FILTER_ENACT_SVLAN_EGRESS))
+        {
+            if((retVal = rtl8367c_getAsicSvlanMemberConfiguration(aclAct.svidx_sact, &svlan_cfg)) != RT_ERR_OK)
+                return retVal;
+
+            pAction->actEnable[FILTER_ENACT_SVLAN_EGRESS] = TRUE;
+            pAction->filterSvlanIdx = aclAct.svidx_sact;
+            pAction->filterSvlanVid = svlan_cfg.vs_svid;
+        }
+        else if(aclAct.sact == FILTER_ENACT_SVLAN_TYPE(FILTER_ENACT_SVLAN_CVID))
+            pAction->actEnable[FILTER_ENACT_SVLAN_CVID] = TRUE;
+        else if(aclAct.sact == FILTER_ENACT_SVLAN_TYPE(FILTER_ENACT_POLICING_2))
+        {
+            pAction->actEnable[FILTER_ENACT_POLICING_2] = TRUE;
+            pAction->filterPolicingIdx[2]  = aclAct.svidx_sact;
+        }
+    }
+
+
+    if(tmp & FILTER_ENACT_CVLAN_MASK)
+    {
+        if(FILTER_ENACT_CACTEXT_TAGONLY == aclAct.cact_ext ||
+            FILTER_ENACT_CACTEXT_BOTHVLANTAG == aclAct.cact_ext )
+        {
+            if(FILTER_CTAGFMT_UNTAG == aclAct.tag_fmt)
+            {
+                pAction->actEnable[FILTER_ENACT_EGRESSCTAG_UNTAG] = TRUE;
+            }
+            else if(FILTER_CTAGFMT_TAG == aclAct.tag_fmt)
+            {
+                pAction->actEnable[FILTER_ENACT_EGRESSCTAG_TAG] = TRUE;
+            }
+            else if(FILTER_CTAGFMT_KEEP == aclAct.tag_fmt)
+            {
+                pAction->actEnable[FILTER_ENACT_EGRESSCTAG_KEEP] = TRUE;
+            }
+             else if(FILTER_CTAGFMT_KEEP1PRMK== aclAct.tag_fmt)
+            {
+                pAction->actEnable[FILTER_ENACT_EGRESSCTAG_KEEPAND1PRMK] = TRUE;
+            }
+
+        }
+
+        if(FILTER_ENACT_CACTEXT_VLANONLY == aclAct.cact_ext ||
+            FILTER_ENACT_CACTEXT_BOTHVLANTAG == aclAct.cact_ext )
+        {
+            if(aclAct.cact == FILTER_ENACT_CVLAN_TYPE(FILTER_ENACT_CVLAN_INGRESS))
+            {
+                if((retVal = rtl8367c_getAsicVlanMemberConfig(aclAct.cvidx_cact, &vlanMC)) != RT_ERR_OK)
+                    return retVal;
+
+                pAction->actEnable[FILTER_ENACT_CVLAN_INGRESS] = TRUE;
+                pAction->filterCvlanIdx  = aclAct.cvidx_cact;
+                pAction->filterCvlanVid  = vlanMC.evid;
+            }
+            else if(aclAct.cact == FILTER_ENACT_CVLAN_TYPE(FILTER_ENACT_CVLAN_EGRESS))
+            {
+                if((retVal = rtl8367c_getAsicVlanMemberConfig(aclAct.cvidx_cact, &vlanMC)) != RT_ERR_OK)
+                    return retVal;
+
+                pAction->actEnable[FILTER_ENACT_CVLAN_EGRESS] = TRUE;
+                pAction->filterCvlanIdx  = aclAct.cvidx_cact;
+                pAction->filterCvlanVid  = vlanMC.evid;
+            }
+            else if(aclAct.cact == FILTER_ENACT_CVLAN_TYPE(FILTER_ENACT_CVLAN_SVID))
+            {
+                pAction->actEnable[FILTER_ENACT_CVLAN_SVID] = TRUE;
+            }
+            else if(aclAct.cact == FILTER_ENACT_CVLAN_TYPE(FILTER_ENACT_POLICING_1))
+            {
+                pAction->actEnable[FILTER_ENACT_POLICING_1] = TRUE;
+                pAction->filterPolicingIdx[1]  = aclAct.cvidx_cact;
+            }
+        }
+    }
+
+    if(tmp & FILTER_ENACT_INTGPIO_MASK)
+    {
+        if(TRUE == aclAct.aclint)
+        {
+            pAction->actEnable[FILTER_ENACT_INTERRUPT] = TRUE;
+        }
+
+        if(TRUE == aclAct.gpio_en)
+        {
+            pAction->actEnable[FILTER_ENACT_GPO] = TRUE;
+            pAction->filterPin = aclAct.gpio_pin;
+        }
+    }
+
+    /* Get field type of RAW data */
+    if ((retVal = rtl8367c_getAsicAclTemplate(aclRule.data_bits.type, &type))!= RT_ERR_OK)
+        return retVal;
+
+    for(i = 0; i < RTL8367C_ACLRULEFIELDNO; i++)
+    {
+        pFilter_cfg->fieldRawType[i] = type.field[i];
+    }/* end of for(i...) */
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_filter_igrAcl_unmatchAction_set(rtk_port_t port, rtk_filter_unmatch_action_t action)
+{
+    rtk_api_ret_t ret;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check port valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(action >= FILTER_UNMATCH_END)
+        return RT_ERR_INPUT;
+
+    if((ret = rtl8367c_setAsicAclUnmatchedPermit(rtk_switch_port_L2P_get(port), action)) != RT_ERR_OK)
+       return ret;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_filter_igrAcl_unmatchAction_get(rtk_port_t port, rtk_filter_unmatch_action_t* pAction)
+{
+    rtk_api_ret_t ret;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pAction)
+        return RT_ERR_NULL_POINTER;
+
+    /* Check port valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if((ret = rtl8367c_getAsicAclUnmatchedPermit(rtk_switch_port_L2P_get(port), pAction)) != RT_ERR_OK)
+       return ret;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_filter_igrAcl_state_set(rtk_port_t port, rtk_filter_state_t state)
+{
+    rtk_api_ret_t ret;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check port valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(state >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if((ret = rtl8367c_setAsicAcl(rtk_switch_port_L2P_get(port), state)) != RT_ERR_OK)
+       return ret;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_filter_igrAcl_state_get(rtk_port_t port, rtk_filter_state_t* pState)
+{
+    rtk_api_ret_t ret;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pState)
+        return RT_ERR_NULL_POINTER;
+
+    /* Check port valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if((ret = rtl8367c_getAsicAcl(rtk_switch_port_L2P_get(port), pState)) != RT_ERR_OK)
+       return ret;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_filter_igrAcl_template_set(rtk_filter_template_t *aclTemplate)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 idxField;
+    rtl8367c_acltemplate_t aclType;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(aclTemplate->index >= RTK_MAX_NUM_OF_FILTER_TYPE)
+        return RT_ERR_INPUT;
+
+    for(idxField = 0; idxField < RTK_MAX_NUM_OF_FILTER_FIELD; idxField++)
+    {
+        if(aclTemplate->fieldType[idxField] < FILTER_FIELD_RAW_DMAC_15_0 ||
+            (aclTemplate->fieldType[idxField] > FILTER_FIELD_RAW_CTAG && aclTemplate->fieldType[idxField] < FILTER_FIELD_RAW_IPV4_SIP_15_0 ) ||
+            (aclTemplate->fieldType[idxField] > FILTER_FIELD_RAW_IPV4_DIP_31_16 && aclTemplate->fieldType[idxField] < FILTER_FIELD_RAW_IPV6_SIP_15_0 ) ||
+            (aclTemplate->fieldType[idxField] > FILTER_FIELD_RAW_IPV6_DIP_31_16 && aclTemplate->fieldType[idxField] < FILTER_FIELD_RAW_VIDRANGE ) ||
+            (aclTemplate->fieldType[idxField] > FILTER_FIELD_RAW_FIELD_VALID && aclTemplate->fieldType[idxField] < FILTER_FIELD_RAW_FIELD_SELECT00 ) ||
+            aclTemplate->fieldType[idxField] >= FILTER_FIELD_RAW_END)
+        {
+            return RT_ERR_INPUT;
+        }
+    }
+
+    for(idxField = 0; idxField < RTK_MAX_NUM_OF_FILTER_FIELD; idxField++)
+    {
+        aclType.field[idxField] = aclTemplate->fieldType[idxField];
+    }
+
+    if((retVal = rtl8367c_setAsicAclTemplate(aclTemplate->index, &aclType)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_filter_igrAcl_template_get(rtk_filter_template_t *aclTemplate)
+{
+    rtk_api_ret_t ret;
+    rtk_uint32 idxField;
+    rtl8367c_acltemplate_t aclType;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == aclTemplate)
+        return RT_ERR_NULL_POINTER;
+
+    if(aclTemplate->index >= RTK_MAX_NUM_OF_FILTER_TYPE)
+        return RT_ERR_INPUT;
+
+   if((ret = rtl8367c_getAsicAclTemplate(aclTemplate->index, &aclType)) != RT_ERR_OK)
+       return ret;
+
+    for(idxField = 0; idxField < RTK_MAX_NUM_OF_FILTER_FIELD; idxField ++)
+    {
+        aclTemplate->fieldType[idxField] = aclType.field[idxField];
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_filter_igrAcl_field_sel_set(rtk_uint32 index, rtk_field_sel_t format, rtk_uint32 offset)
+{
+    rtk_api_ret_t ret;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(index >= RTL8367C_FIELDSEL_FORMAT_NUMBER)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if(format >= FORMAT_END)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if(offset > RTL8367C_FIELDSEL_MAX_OFFSET)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if((ret = rtl8367c_setAsicFieldSelector(index, (rtk_uint32)format, offset)) != RT_ERR_OK)
+       return ret;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_filter_igrAcl_field_sel_get(rtk_uint32 index, rtk_field_sel_t *pFormat, rtk_uint32 *pOffset)
+{
+    rtk_api_ret_t ret;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pFormat || NULL == pOffset)
+        return RT_ERR_NULL_POINTER;
+
+    if(index >= RTL8367C_FIELDSEL_FORMAT_NUMBER)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if((ret = rtl8367c_getAsicFieldSelector(index, pFormat, pOffset)) != RT_ERR_OK)
+       return ret;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_filter_iprange_set(rtk_uint32 index, rtk_filter_iprange_t type, ipaddr_t upperIp, ipaddr_t lowerIp)
+{
+    rtk_api_ret_t ret;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(index > RTL8367C_ACLRANGEMAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if(type >= IPRANGE_END)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if(lowerIp > upperIp)
+        return RT_ERR_INPUT;
+
+    if((ret = rtl8367c_setAsicAclIpRange(index, type, upperIp, lowerIp)) != RT_ERR_OK)
+       return ret;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_filter_iprange_get(rtk_uint32 index, rtk_filter_iprange_t *pType, ipaddr_t *pUpperIp, ipaddr_t *pLowerIp)
+{
+    rtk_api_ret_t ret;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if((NULL == pType) || (NULL == pUpperIp) || (NULL == pLowerIp))
+        return RT_ERR_NULL_POINTER;
+
+    if(index > RTL8367C_ACLRANGEMAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if((ret = rtl8367c_getAsicAclIpRange(index, pType, pUpperIp, pLowerIp)) != RT_ERR_OK)
+       return ret;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_filter_vidrange_set(rtk_uint32 index, rtk_filter_vidrange_t type, rtk_uint32 upperVid, rtk_uint32 lowerVid)
+{
+    rtk_api_ret_t ret;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(index > RTL8367C_ACLRANGEMAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if(type >= VIDRANGE_END)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if(lowerVid > upperVid)
+        return RT_ERR_INPUT;
+
+    if( (upperVid > RTL8367C_VIDMAX) || (lowerVid > RTL8367C_VIDMAX))
+        return RT_ERR_OUT_OF_RANGE;
+
+    if((ret = rtl8367c_setAsicAclVidRange(index, type, upperVid, lowerVid)) != RT_ERR_OK)
+       return ret;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_filter_vidrange_get(rtk_uint32 index, rtk_filter_vidrange_t *pType, rtk_uint32 *pUpperVid, rtk_uint32 *pLowerVid)
+{
+    rtk_api_ret_t ret;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if((NULL == pType) || (NULL == pUpperVid) || (NULL == pLowerVid))
+        return RT_ERR_NULL_POINTER;
+
+    if(index > RTL8367C_ACLRANGEMAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if((ret = rtl8367c_getAsicAclVidRange(index, pType, pUpperVid, pLowerVid)) != RT_ERR_OK)
+       return ret;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_filter_portrange_set(rtk_uint32 index, rtk_filter_portrange_t type, rtk_uint32 upperPort, rtk_uint32 lowerPort)
+{
+    rtk_api_ret_t ret;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(index > RTL8367C_ACLRANGEMAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if(type >= PORTRANGE_END)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if(lowerPort > upperPort)
+        return RT_ERR_INPUT;
+
+    if(upperPort > RTL8367C_ACL_PORTRANGEMAX)
+        return RT_ERR_INPUT;
+
+    if(lowerPort > RTL8367C_ACL_PORTRANGEMAX)
+        return RT_ERR_INPUT;
+
+    if((ret = rtl8367c_setAsicAclPortRange(index, type, upperPort, lowerPort)) != RT_ERR_OK)
+       return ret;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_filter_portrange_get(rtk_uint32 index, rtk_filter_portrange_t *pType, rtk_uint32 *pUpperPort, rtk_uint32 *pLowerPort)
+{
+    rtk_api_ret_t ret;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if((NULL == pType) || (NULL == pUpperPort) || (NULL == pLowerPort))
+        return RT_ERR_NULL_POINTER;
+
+    if(index > RTL8367C_ACLRANGEMAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if((ret = rtl8367c_getAsicAclPortRange(index, pType, pUpperPort, pLowerPort)) != RT_ERR_OK)
+       return ret;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_filter_igrAclPolarity_set(rtk_uint32 polarity)
+{
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(polarity > 1)
+        return RT_ERR_OUT_OF_RANGE;
+
+    return rtl8367c_setAsicAclGpioPolarity(polarity);
+}
+
+static rtk_api_ret_t _rtk_filter_igrAclPolarity_get(rtk_uint32* pPolarity)
+{
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pPolarity)
+        return RT_ERR_NULL_POINTER;
+
+    return rtl8367c_getAsicAclGpioPolarity(pPolarity);
+}
+/* Function Name:
+ *      rtk_filter_igrAcl_init
+ * Description:
+ *      ACL initialization function
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_NULL_POINTER - Pointer pFilter_field or pFilter_cfg point to NULL.
+ * Note:
+ *      This function enable and intialize ACL function
+ */
+rtk_api_ret_t rtk_filter_igrAcl_init(void)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_filter_igrAcl_init();
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_filter_igrAcl_field_add
+ * Description:
+ *      Add comparison rule to an ACL configuration
+ * Input:
+ *      pFilter_cfg     - The ACL configuration that this function will add comparison rule
+ *      pFilter_field   - The comparison rule that will be added.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_NULL_POINTER     - Pointer pFilter_field or pFilter_cfg point to NULL.
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ * Note:
+ *      This function add a comparison rule (*pFilter_field) to an ACL configuration (*pFilter_cfg).
+ *      Pointer pFilter_cfg points to an ACL configuration structure, this structure keeps multiple ACL
+ *      comparison rules by means of linked list. Pointer pFilter_field will be added to linked
+ *      list keeped by structure that pFilter_cfg points to.
+ */
+rtk_api_ret_t rtk_filter_igrAcl_field_add(rtk_filter_cfg_t* pFilter_cfg, rtk_filter_field_t* pFilter_field)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_filter_igrAcl_field_add(pFilter_cfg, pFilter_field);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_filter_igrAcl_cfg_add
+ * Description:
+ *      Add an ACL configuration to ASIC
+ * Input:
+ *      filter_id       - Start index of ACL configuration.
+ *      pFilter_cfg     - The ACL configuration that this function will add comparison rule
+ *      pFilter_action  - Action(s) of ACL configuration.
+ * Output:
+ *      ruleNum - number of rules written in acl table
+ * Return:
+ *      RT_ERR_OK                               - OK
+ *      RT_ERR_FAILED                           - Failed
+ *      RT_ERR_SMI                              - SMI access error
+ *      RT_ERR_NULL_POINTER                     - Pointer pFilter_field or pFilter_cfg point to NULL.
+ *      RT_ERR_INPUT                            - Invalid input parameters.
+ *      RT_ERR_ENTRY_INDEX                      - Invalid filter_id .
+ *      RT_ERR_NULL_POINTER                     - Pointer pFilter_action or pFilter_cfg point to NULL.
+ *      RT_ERR_FILTER_INACL_ACT_NOT_SUPPORT     - Action is not supported in this chip.
+ *      RT_ERR_FILTER_INACL_RULE_NOT_SUPPORT    - Rule is not supported.
+ * Note:
+ *      This function store pFilter_cfg, pFilter_action into ASIC. The starting
+ *      index(es) is filter_id.
+ */
+rtk_api_ret_t rtk_filter_igrAcl_cfg_add(rtk_filter_id_t filter_id, rtk_filter_cfg_t* pFilter_cfg, rtk_filter_action_t* pFilter_action, rtk_filter_number_t *ruleNum)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_filter_igrAcl_cfg_add(filter_id, pFilter_cfg, pFilter_action, ruleNum);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_filter_igrAcl_cfg_del
+ * Description:
+ *      Delete an ACL configuration from ASIC
+ * Input:
+ *      filter_id   - Start index of ACL configuration.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_FILTER_ENTRYIDX  - Invalid filter_id.
+ * Note:
+ *      This function delete a group of ACL rules starting from filter_id.
+ */
+rtk_api_ret_t rtk_filter_igrAcl_cfg_del(rtk_filter_id_t filter_id)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_filter_igrAcl_cfg_del(filter_id);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_filter_igrAcl_cfg_delAll
+ * Description:
+ *      Delete all ACL entries from ASIC
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      This function delete all ACL configuration from ASIC.
+ */
+rtk_api_ret_t rtk_filter_igrAcl_cfg_delAll(void)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_filter_igrAcl_cfg_delAll();
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_filter_igrAcl_cfg_get
+ * Description:
+ *      Get one ingress acl configuration from ASIC.
+ * Input:
+ *      filter_id       - Start index of ACL configuration.
+ * Output:
+ *      pFilter_cfg     - buffer pointer of ingress acl data
+ *      pFilter_action  - buffer pointer of ingress acl action
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_NULL_POINTER     - Pointer pFilter_action or pFilter_cfg point to NULL.
+ *      RT_ERR_FILTER_ENTRYIDX  - Invalid entry index.
+ * Note:
+ *      This function get configuration from ASIC.
+ */
+rtk_api_ret_t rtk_filter_igrAcl_cfg_get(rtk_filter_id_t filter_id, rtk_filter_cfg_raw_t *pFilter_cfg, rtk_filter_action_t *pAction)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_filter_igrAcl_cfg_get(filter_id,  pFilter_cfg,  pAction);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_filter_igrAcl_unmatchAction_set
+ * Description:
+ *      Set action to packets when no ACL configuration match
+ * Input:
+ *      port    - Port id.
+ *      action  - Action.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port id.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This function sets action of packets when no ACL configruation matches.
+ */
+rtk_api_ret_t rtk_filter_igrAcl_unmatchAction_set(rtk_port_t port, rtk_filter_unmatch_action_t action)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_filter_igrAcl_unmatchAction_set(port, action);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_filter_igrAcl_unmatchAction_get
+ * Description:
+ *      Get action to packets when no ACL configuration match
+ * Input:
+ *      port    - Port id.
+ * Output:
+ *      pAction - Action.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port id.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This function gets action of packets when no ACL configruation matches.
+ */
+rtk_api_ret_t rtk_filter_igrAcl_unmatchAction_get(rtk_port_t port, rtk_filter_unmatch_action_t* pAction)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_filter_igrAcl_unmatchAction_get(port, pAction);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_filter_igrAcl_state_set
+ * Description:
+ *      Set state of ingress ACL.
+ * Input:
+ *      port    - Port id.
+ *      state   - Ingress ACL state.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port id.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This function gets action of packets when no ACL configruation matches.
+ */
+rtk_api_ret_t rtk_filter_igrAcl_state_set(rtk_port_t port, rtk_filter_state_t state)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_filter_igrAcl_state_set(port, state);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_filter_igrAcl_state_get
+ * Description:
+ *      Get state of ingress ACL.
+ * Input:
+ *      port    - Port id.
+ * Output:
+ *      pState  - Ingress ACL state.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port id.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This function gets action of packets when no ACL configruation matches.
+ */
+rtk_api_ret_t rtk_filter_igrAcl_state_get(rtk_port_t port, rtk_filter_state_t* pState)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_filter_igrAcl_state_get(port, pState);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_filter_igrAcl_template_set
+ * Description:
+ *      Set template of ingress ACL.
+ * Input:
+ *      template - Ingress ACL template
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Invalid input parameters.
+ * Note:
+ *      This function set ACL template.
+ */
+rtk_api_ret_t rtk_filter_igrAcl_template_set(rtk_filter_template_t *aclTemplate)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_filter_igrAcl_template_set(aclTemplate);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_filter_igrAcl_template_get
+ * Description:
+ *      Get template of ingress ACL.
+ * Input:
+ *      template - Ingress ACL template
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ *      This function gets template of ACL.
+ */
+rtk_api_ret_t rtk_filter_igrAcl_template_get(rtk_filter_template_t *aclTemplate)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_filter_igrAcl_template_get(aclTemplate);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_filter_igrAcl_field_sel_set
+ * Description:
+ *      Set user defined field selectors in HSB
+ * Input:
+ *      index       - index of field selector 0-15
+ *      format      - Format of field selector
+ *      offset      - Retrieving data offset
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ *      System support 16 user defined field selctors.
+ *      Each selector can be enabled or disable.
+ *      User can defined retrieving 16-bits in many predefiend
+ *      standard l2/l3/l4 payload.
+ */
+rtk_api_ret_t rtk_filter_igrAcl_field_sel_set(rtk_uint32 index, rtk_field_sel_t format, rtk_uint32 offset)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_filter_igrAcl_field_sel_set(index, format, offset);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_filter_igrAcl_field_sel_get
+ * Description:
+ *      Get user defined field selectors in HSB
+ * Input:
+ *      index       - index of field selector 0-15
+ * Output:
+ *      pFormat     - Format of field selector
+ *      pOffset     - Retrieving data offset
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ *      None.
+ */
+rtk_api_ret_t rtk_filter_igrAcl_field_sel_get(rtk_uint32 index, rtk_field_sel_t *pFormat, rtk_uint32 *pOffset)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_filter_igrAcl_field_sel_get(index, pFormat, pOffset);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_filter_iprange_set
+ * Description:
+ *      Set IP Range check
+ * Input:
+ *      index       - index of IP Range 0-15
+ *      type        - IP Range check type, 0:Delete a entry, 1: IPv4_SIP, 2: IPv4_DIP, 3:IPv6_SIP, 4:IPv6_DIP
+ *      upperIp     - The upper bound of IP range
+ *      lowerIp     - The lower Bound of IP range
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_OUT_OF_RANGE    - The parameter is out of range
+ *      RT_ERR_INPUT           - Input error
+ * Note:
+ *      upperIp must be larger or equal than lowerIp.
+ */
+rtk_api_ret_t rtk_filter_iprange_set(rtk_uint32 index, rtk_filter_iprange_t type, ipaddr_t upperIp, ipaddr_t lowerIp)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_filter_iprange_set(index, type, upperIp, lowerIp);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_filter_iprange_get
+ * Description:
+ *      Set IP Range check
+ * Input:
+ *      index       - index of IP Range 0-15
+ * Output:
+ *      pType        - IP Range check type, 0:Delete a entry, 1: IPv4_SIP, 2: IPv4_DIP, 3:IPv6_SIP, 4:IPv6_DIP
+ *      pUpperIp     - The upper bound of IP range
+ *      pLowerIp     - The lower Bound of IP range
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_OUT_OF_RANGE    - The parameter is out of range
+ * Note:
+ *      None.
+ */
+rtk_api_ret_t rtk_filter_iprange_get(rtk_uint32 index, rtk_filter_iprange_t *pType, ipaddr_t *pUpperIp, ipaddr_t *pLowerIp)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_filter_iprange_get(index, pType, pUpperIp, pLowerIp);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_filter_vidrange_set
+ * Description:
+ *      Set VID Range check
+ * Input:
+ *      index       - index of VID Range 0-15
+ *      type        - IP Range check type, 0:Delete a entry, 1: CVID, 2: SVID
+ *      upperVid    - The upper bound of VID range
+ *      lowerVid    - The lower Bound of VID range
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_OUT_OF_RANGE    - The parameter is out of range
+ *      RT_ERR_INPUT           - Input error
+ * Note:
+ *      upperVid must be larger or equal than lowerVid.
+ */
+rtk_api_ret_t rtk_filter_vidrange_set(rtk_uint32 index, rtk_filter_vidrange_t type, rtk_uint32 upperVid, rtk_uint32 lowerVid)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_filter_vidrange_set(index, type, upperVid, lowerVid);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_filter_vidrange_get
+ * Description:
+ *      Get VID Range check
+ * Input:
+ *      index       - index of VID Range 0-15
+ * Output:
+ *      pType        - IP Range check type, 0:Unused, 1: CVID, 2: SVID
+ *      pUpperVid    - The upper bound of VID range
+ *      pLowerVid    - The lower Bound of VID range
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_OUT_OF_RANGE    - The parameter is out of range
+ * Note:
+ *      None.
+ */
+rtk_api_ret_t rtk_filter_vidrange_get(rtk_uint32 index, rtk_filter_vidrange_t *pType, rtk_uint32 *pUpperVid, rtk_uint32 *pLowerVid)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_filter_vidrange_get(index, pType, pUpperVid, pLowerVid);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_filter_portrange_set
+ * Description:
+ *      Set Port Range check
+ * Input:
+ *      index       - index of Port Range 0-15
+ *      type        - IP Range check type, 0:Delete a entry, 1: Source Port, 2: Destnation Port
+ *      upperPort   - The upper bound of Port range
+ *      lowerPort   - The lower Bound of Port range
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_OUT_OF_RANGE    - The parameter is out of range
+ *      RT_ERR_INPUT           - Input error
+ * Note:
+ *      upperPort must be larger or equal than lowerPort.
+ */
+rtk_api_ret_t rtk_filter_portrange_set(rtk_uint32 index, rtk_filter_portrange_t type, rtk_uint32 upperPort, rtk_uint32 lowerPort)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_filter_portrange_set(index, type, upperPort, lowerPort);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_filter_portrange_get
+ * Description:
+ *      Set Port Range check
+ * Input:
+ *      index       - index of Port Range 0-15
+ * Output:
+ *      pType       - IP Range check type, 0:Delete a entry, 1: Source Port, 2: Destnation Port
+ *      pUpperPort  - The upper bound of Port range
+ *      pLowerPort  - The lower Bound of Port range
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_OUT_OF_RANGE    - The parameter is out of range
+ *      RT_ERR_INPUT           - Input error
+ * Note:
+ *      None.
+ */
+rtk_api_ret_t rtk_filter_portrange_get(rtk_uint32 index, rtk_filter_portrange_t *pType, rtk_uint32 *pUpperPort, rtk_uint32 *pLowerPort)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_filter_portrange_get(index, pType, pUpperPort, pLowerPort);
+    RTK_API_UNLOCK();
+
+    return retVal;
+
+}
+/* Function Name:
+ *      rtk_filter_igrAclPolarity_set
+ * Description:
+ *      Set ACL Goip control palarity
+ * Input:
+ *      polarity - 1: High, 0: Low
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      none
+ */
+rtk_api_ret_t rtk_filter_igrAclPolarity_set(rtk_uint32 polarity)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_filter_igrAclPolarity_set(polarity);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_filter_igrAclPolarity_get
+ * Description:
+ *      Get ACL Goip control palarity
+ * Input:
+ *      pPolarity - 1: High, 0: Low
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      none
+ */
+rtk_api_ret_t rtk_filter_igrAclPolarity_get(rtk_uint32* pPolarity)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_filter_igrAclPolarity_get(pPolarity);
+    RTK_API_UNLOCK();
+
+    return retVal;
+
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/acl.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/acl.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/acl.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/acl.h	2019-01-22 16:16:24.707257309 +0100
@@ -0,0 +1,992 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * Purpose : RTL8367/RTL8367C switch high-level API
+ *
+ * Feature : The file includes ACL module high-layer API defination
+ *
+ */
+
+#ifndef __RTK_API_ACL_H__
+#define __RTK_API_ACL_H__
+
+/*
+ * Data Type Declaration
+ */
+#define RTK_FILTER_RAW_FIELD_NUMBER                8
+
+#define ACL_DEFAULT_ABILITY                         0
+#define ACL_DEFAULT_UNMATCH_PERMIT                  1
+
+#define ACL_RULE_FREE                               0
+#define ACL_RULE_INAVAILABLE                        1
+#define ACL_RULE_CARETAG_MASK                       0x1F
+#define FILTER_POLICING_MAX                         4
+#define FILTER_LOGGING_MAX                          8
+#define FILTER_PATTERN_MAX                          4
+
+#define FILTER_ENACT_CVLAN_MASK         0x01
+#define FILTER_ENACT_SVLAN_MASK         0x02
+#define FILTER_ENACT_PRIORITY_MASK      0x04
+#define FILTER_ENACT_POLICING_MASK      0x08
+#define FILTER_ENACT_FWD_MASK           0x10
+#define FILTER_ENACT_INTGPIO_MASK       0x20
+#define FILTER_ENACT_INIT_MASK          0x3F
+
+typedef enum rtk_filter_act_cactext_e
+{
+    FILTER_ENACT_CACTEXT_VLANONLY=0,
+    FILTER_ENACT_CACTEXT_BOTHVLANTAG,
+    FILTER_ENACT_CACTEXT_TAGONLY,
+    FILTER_ENACT_CACTEXT_END,
+
+
+}rtk_filter_act_cactext_t;
+
+typedef enum rtk_filter_act_ctagfmt_e
+{
+    FILTER_CTAGFMT_UNTAG=0,
+    FILTER_CTAGFMT_TAG,
+    FILTER_CTAGFMT_KEEP,
+    FILTER_CTAGFMT_KEEP1PRMK,
+
+
+}rtk_filter_act_ctag_t;
+
+
+
+
+
+#define RTK_MAX_NUM_OF_FILTER_TYPE                  5
+#define RTK_MAX_NUM_OF_FILTER_FIELD                 8
+
+#define RTK_DOT_1AS_TIMESTAMP_UNIT_IN_WORD_LENGTH   3UL
+#define RTK_IPV6_ADDR_WORD_LENGTH                   4UL
+
+#define FILTER_ENACT_CVLAN_TYPE(type)   (type - FILTER_ENACT_CVLAN_INGRESS)
+#define FILTER_ENACT_SVLAN_TYPE(type)   (type - FILTER_ENACT_SVLAN_INGRESS)
+#define FILTER_ENACT_FWD_TYPE(type)     (type - FILTER_ENACT_ADD_DSTPORT)
+#define FILTER_ENACT_PRI_TYPE(type)     (type - FILTER_ENACT_PRIORITY)
+
+#define RTK_FILTER_FIELD_USED_MAX                   8
+#define RTK_FILTER_FIELD_INDEX(template, index)     ((template << 4) + index)
+
+
+typedef enum rtk_filter_act_enable_e
+{
+    /* CVLAN */
+    FILTER_ENACT_CVLAN_INGRESS = 0,
+    FILTER_ENACT_CVLAN_EGRESS,
+    FILTER_ENACT_CVLAN_SVID,
+    FILTER_ENACT_POLICING_1,
+
+    /* SVLAN */
+    FILTER_ENACT_SVLAN_INGRESS,
+    FILTER_ENACT_SVLAN_EGRESS,
+    FILTER_ENACT_SVLAN_CVID,
+    FILTER_ENACT_POLICING_2,
+
+    /* Policing and Logging */
+    FILTER_ENACT_POLICING_0,
+
+    /* Forward */
+    FILTER_ENACT_COPY_CPU,
+    FILTER_ENACT_DROP,
+    FILTER_ENACT_ADD_DSTPORT,
+    FILTER_ENACT_REDIRECT,
+    FILTER_ENACT_MIRROR,
+    FILTER_ENACT_TRAP_CPU,
+    FILTER_ENACT_ISOLATION,
+
+    /* QoS */
+    FILTER_ENACT_PRIORITY,
+    FILTER_ENACT_DSCP_REMARK,
+    FILTER_ENACT_1P_REMARK,
+    FILTER_ENACT_POLICING_3,
+
+    /* Interrutp and GPO */
+    FILTER_ENACT_INTERRUPT,
+    FILTER_ENACT_GPO,
+
+    /*VLAN tag*/
+    FILTER_ENACT_EGRESSCTAG_UNTAG,
+    FILTER_ENACT_EGRESSCTAG_TAG,
+    FILTER_ENACT_EGRESSCTAG_KEEP,
+    FILTER_ENACT_EGRESSCTAG_KEEPAND1PRMK,
+
+    FILTER_ENACT_END,
+} rtk_filter_act_enable_t;
+
+
+typedef struct
+{
+    rtk_filter_act_enable_t actEnable[FILTER_ENACT_END];
+
+    /* CVLAN acton */
+    rtk_uint32      filterCvlanVid;
+    rtk_uint32      filterCvlanIdx;
+    /* SVLAN action */
+    rtk_uint32      filterSvlanVid;
+    rtk_uint32      filterSvlanIdx;
+
+    /* Policing action */
+    rtk_uint32      filterPolicingIdx[FILTER_POLICING_MAX];
+
+    /* Forwarding action */
+    rtk_portmask_t  filterPortmask;
+
+    /* QOS action */
+    rtk_uint32      filterPriority;
+
+    /*GPO*/
+    rtk_uint32      filterPin;
+
+} rtk_filter_action_t;
+
+typedef struct rtk_filter_flag_s
+{
+    rtk_uint32 value;
+    rtk_uint32 mask;
+} rtk_filter_flag_t;
+
+typedef enum rtk_filter_care_tag_index_e
+{
+    CARE_TAG_CTAG = 0,
+    CARE_TAG_STAG,
+    CARE_TAG_PPPOE,
+    CARE_TAG_IPV4,
+    CARE_TAG_IPV6,
+    CARE_TAG_TCP,
+    CARE_TAG_UDP,
+    CARE_TAG_ARP,
+    CARE_TAG_RSV1,
+    CARE_TAG_RSV2,
+    CARE_TAG_ICMP,
+    CARE_TAG_IGMP,
+    CARE_TAG_LLC,
+    CARE_TAG_RSV3,
+    CARE_TAG_HTTP,
+    CARE_TAG_RSV4,
+    CARE_TAG_RSV5,
+    CARE_TAG_DHCP,
+    CARE_TAG_DHCPV6,
+    CARE_TAG_SNMP,
+    CARE_TAG_OAM,
+    CARE_TAG_END,
+} rtk_filter_care_tag_index_t;
+
+typedef struct rtk_filter_care_tag_s
+{
+    rtk_filter_flag_t tagType[CARE_TAG_END];
+} rtk_filter_care_tag_t;
+
+typedef struct rtk_filter_field rtk_filter_field_t;
+
+typedef struct
+{
+    rtk_uint32 value[RTK_DOT_1AS_TIMESTAMP_UNIT_IN_WORD_LENGTH];
+} rtk_filter_dot1as_timestamp_t;
+
+typedef enum rtk_filter_field_data_type_e
+{
+    FILTER_FIELD_DATA_MASK = 0,
+    FILTER_FIELD_DATA_RANGE,
+    FILTER_FIELD_DATA_END ,
+} rtk_filter_field_data_type_t;
+
+typedef struct rtk_filter_ip_s
+{
+    rtk_uint32 dataType;
+    rtk_uint32 rangeStart;
+    rtk_uint32 rangeEnd;
+    rtk_uint32 value;
+    rtk_uint32 mask;
+} rtk_filter_ip_t;
+
+typedef struct rtk_filter_mac_s
+{
+    rtk_uint32 dataType;
+    rtk_mac_t value;
+    rtk_mac_t mask;
+    rtk_mac_t rangeStart;
+    rtk_mac_t rangeEnd;
+} rtk_filter_mac_t;
+
+typedef rtk_uint32 rtk_filter_op_t;
+
+typedef struct rtk_filter_value_s
+{
+    rtk_uint32 dataType;
+    rtk_uint32 value;
+    rtk_uint32 mask;
+    rtk_uint32 rangeStart;
+    rtk_uint32 rangeEnd;
+
+} rtk_filter_value_t;
+
+typedef struct rtk_filter_activeport_s
+{
+    rtk_portmask_t value;
+    rtk_portmask_t mask;
+
+} rtk_filter_activeport_t;
+
+
+
+typedef struct rtk_filter_tag_s
+{
+    rtk_filter_value_t pri;
+    rtk_filter_flag_t cfi;
+    rtk_filter_value_t vid;
+} rtk_filter_tag_t;
+
+typedef struct rtk_filter_ipFlag_s
+{
+    rtk_filter_flag_t xf;
+    rtk_filter_flag_t mf;
+    rtk_filter_flag_t df;
+} rtk_filter_ipFlag_t;
+
+typedef struct
+{
+    rtk_uint32 addr[RTK_IPV6_ADDR_WORD_LENGTH];
+} rtk_filter_ip6_addr_t;
+
+typedef struct
+{
+    rtk_uint32 dataType;
+    rtk_filter_ip6_addr_t value;
+    rtk_filter_ip6_addr_t mask;
+    rtk_filter_ip6_addr_t rangeStart;
+    rtk_filter_ip6_addr_t rangeEnd;
+} rtk_filter_ip6_t;
+
+typedef rtk_uint32 rtk_filter_number_t;
+
+typedef struct rtk_filter_pattern_s
+{
+    rtk_uint32 value[FILTER_PATTERN_MAX];
+    rtk_uint32 mask[FILTER_PATTERN_MAX];
+} rtk_filter_pattern_t;
+
+typedef struct rtk_filter_tcpFlag_s
+{
+    rtk_filter_flag_t urg;
+    rtk_filter_flag_t ack;
+    rtk_filter_flag_t psh;
+    rtk_filter_flag_t rst;
+    rtk_filter_flag_t syn;
+    rtk_filter_flag_t fin;
+    rtk_filter_flag_t ns;
+    rtk_filter_flag_t cwr;
+    rtk_filter_flag_t ece;
+} rtk_filter_tcpFlag_t;
+
+typedef rtk_uint32 rtk_filter_field_raw_t;
+
+typedef enum rtk_filter_field_temple_input_e
+{
+    FILTER_FIELD_TEMPLE_INPUT_TYPE = 0,
+    FILTER_FIELD_TEMPLE_INPUT_INDEX,
+    FILTER_FIELD_TEMPLE_INPUT_MAX ,
+} rtk_filter_field_temple_input_t;
+
+struct rtk_filter_field
+{
+    rtk_uint32 fieldType;
+
+    union
+    {
+        /* L2 struct */
+        rtk_filter_mac_t       dmac;
+        rtk_filter_mac_t       smac;
+        rtk_filter_value_t     etherType;
+        rtk_filter_tag_t       ctag;
+        rtk_filter_tag_t       relayCtag;
+        rtk_filter_tag_t       stag;
+        rtk_filter_tag_t       l2tag;
+        rtk_filter_dot1as_timestamp_t dot1asTimeStamp;
+        rtk_filter_mac_t       mac;
+
+        /* L3 struct */
+        rtk_filter_ip_t      sip;
+        rtk_filter_ip_t      dip;
+        rtk_filter_ip_t      ip;
+        rtk_filter_value_t   protocol;
+        rtk_filter_value_t   ipTos;
+        rtk_filter_ipFlag_t  ipFlag;
+        rtk_filter_value_t   ipOffset;
+        rtk_filter_ip6_t     sipv6;
+        rtk_filter_ip6_t     dipv6;
+        rtk_filter_ip6_t     ipv6;
+        rtk_filter_value_t   ipv6TrafficClass;
+        rtk_filter_value_t   ipv6NextHeader;
+        rtk_filter_value_t   flowLabel;
+
+        /* L4 struct */
+        rtk_filter_value_t   tcpSrcPort;
+        rtk_filter_value_t   tcpDstPort;
+        rtk_filter_tcpFlag_t tcpFlag;
+        rtk_filter_value_t   tcpSeqNumber;
+        rtk_filter_value_t   tcpAckNumber;
+        rtk_filter_value_t   udpSrcPort;
+        rtk_filter_value_t   udpDstPort;
+        rtk_filter_value_t   icmpCode;
+        rtk_filter_value_t   icmpType;
+        rtk_filter_value_t   igmpType;
+
+        /* pattern match */
+        rtk_filter_pattern_t pattern;
+
+        rtk_filter_value_t   inData;
+
+    } filter_pattern_union;
+
+    rtk_uint32 fieldTemplateNo;
+    rtk_uint32 fieldTemplateIdx[RTK_FILTER_FIELD_USED_MAX];
+
+    struct rtk_filter_field *next;
+};
+
+typedef enum rtk_filter_field_type_e
+{
+    FILTER_FIELD_DMAC = 0,
+    FILTER_FIELD_SMAC,
+    FILTER_FIELD_ETHERTYPE,
+    FILTER_FIELD_CTAG,
+    FILTER_FIELD_STAG,
+
+    FILTER_FIELD_IPV4_SIP,
+    FILTER_FIELD_IPV4_DIP,
+    FILTER_FIELD_IPV4_TOS,
+    FILTER_FIELD_IPV4_PROTOCOL,
+    FILTER_FIELD_IPV4_FLAG,
+    FILTER_FIELD_IPV4_OFFSET,
+    FILTER_FIELD_IPV6_SIPV6,
+    FILTER_FIELD_IPV6_DIPV6,
+    FILTER_FIELD_IPV6_TRAFFIC_CLASS,
+    FILTER_FIELD_IPV6_NEXT_HEADER,
+
+    FILTER_FIELD_TCP_SPORT,
+    FILTER_FIELD_TCP_DPORT,
+    FILTER_FIELD_TCP_FLAG,
+    FILTER_FIELD_UDP_SPORT,
+    FILTER_FIELD_UDP_DPORT,
+    FILTER_FIELD_ICMP_CODE,
+    FILTER_FIELD_ICMP_TYPE,
+    FILTER_FIELD_IGMP_TYPE,
+
+    FILTER_FIELD_VID_RANGE,
+    FILTER_FIELD_IP_RANGE,
+    FILTER_FIELD_PORT_RANGE,
+
+    FILTER_FIELD_USER_DEFINED00,
+    FILTER_FIELD_USER_DEFINED01,
+    FILTER_FIELD_USER_DEFINED02,
+    FILTER_FIELD_USER_DEFINED03,
+    FILTER_FIELD_USER_DEFINED04,
+    FILTER_FIELD_USER_DEFINED05,
+    FILTER_FIELD_USER_DEFINED06,
+    FILTER_FIELD_USER_DEFINED07,
+    FILTER_FIELD_USER_DEFINED08,
+    FILTER_FIELD_USER_DEFINED09,
+    FILTER_FIELD_USER_DEFINED10,
+    FILTER_FIELD_USER_DEFINED11,
+    FILTER_FIELD_USER_DEFINED12,
+    FILTER_FIELD_USER_DEFINED13,
+    FILTER_FIELD_USER_DEFINED14,
+    FILTER_FIELD_USER_DEFINED15,
+
+    FILTER_FIELD_PATTERN_MATCH,
+
+    FILTER_FIELD_END,
+} rtk_filter_field_type_t;
+
+
+typedef enum rtk_filter_field_type_raw_e
+{
+    FILTER_FIELD_RAW_UNUSED = 0,
+    FILTER_FIELD_RAW_DMAC_15_0,
+    FILTER_FIELD_RAW_DMAC_31_16,
+    FILTER_FIELD_RAW_DMAC_47_32,
+    FILTER_FIELD_RAW_SMAC_15_0,
+    FILTER_FIELD_RAW_SMAC_31_16,
+    FILTER_FIELD_RAW_SMAC_47_32,
+    FILTER_FIELD_RAW_ETHERTYPE,
+    FILTER_FIELD_RAW_STAG,
+    FILTER_FIELD_RAW_CTAG,
+
+    FILTER_FIELD_RAW_IPV4_SIP_15_0 = 0x10,
+    FILTER_FIELD_RAW_IPV4_SIP_31_16,
+    FILTER_FIELD_RAW_IPV4_DIP_15_0,
+    FILTER_FIELD_RAW_IPV4_DIP_31_16,
+
+
+    FILTER_FIELD_RAW_IPV6_SIP_15_0 = 0x20,
+    FILTER_FIELD_RAW_IPV6_SIP_31_16,
+    FILTER_FIELD_RAW_IPV6_DIP_15_0 = 0x28,
+    FILTER_FIELD_RAW_IPV6_DIP_31_16,
+
+    FILTER_FIELD_RAW_VIDRANGE = 0x30,
+    FILTER_FIELD_RAW_IPRANGE,
+    FILTER_FIELD_RAW_PORTRANGE,
+    FILTER_FIELD_RAW_FIELD_VALID,
+
+    FILTER_FIELD_RAW_FIELD_SELECT00 = 0x40,
+    FILTER_FIELD_RAW_FIELD_SELECT01,
+    FILTER_FIELD_RAW_FIELD_SELECT02,
+    FILTER_FIELD_RAW_FIELD_SELECT03,
+    FILTER_FIELD_RAW_FIELD_SELECT04,
+    FILTER_FIELD_RAW_FIELD_SELECT05,
+    FILTER_FIELD_RAW_FIELD_SELECT06,
+    FILTER_FIELD_RAW_FIELD_SELECT07,
+    FILTER_FIELD_RAW_FIELD_SELECT08,
+    FILTER_FIELD_RAW_FIELD_SELECT09,
+    FILTER_FIELD_RAW_FIELD_SELECT10,
+    FILTER_FIELD_RAW_FIELD_SELECT11,
+    FILTER_FIELD_RAW_FIELD_SELECT12,
+    FILTER_FIELD_RAW_FIELD_SELECT13,
+    FILTER_FIELD_RAW_FIELD_SELECT14,
+    FILTER_FIELD_RAW_FIELD_SELECT15,
+
+    FILTER_FIELD_RAW_END,
+} rtk_filter_field_type_raw_t;
+
+typedef enum rtk_filter_flag_care_type_e
+{
+    FILTER_FLAG_CARE_DONT_CARE = 0,
+    FILTER_FLAG_CARE_1,
+    FILTER_FLAG_CARE_0,
+    FILTER_FLAG_END
+} rtk_filter_flag_care_type_t;
+
+typedef rtk_uint32  rtk_filter_id_t;    /* filter id type */
+
+typedef enum rtk_filter_invert_e
+{
+    FILTER_INVERT_DISABLE = 0,
+    FILTER_INVERT_ENABLE,
+    FILTER_INVERT_END,
+} rtk_filter_invert_t;
+
+typedef rtk_uint32 rtk_filter_state_t;
+
+typedef rtk_uint32 rtk_filter_unmatch_action_t;
+
+typedef enum rtk_filter_unmatch_action_e
+{
+    FILTER_UNMATCH_DROP = 0,
+    FILTER_UNMATCH_PERMIT,
+    FILTER_UNMATCH_END,
+} rtk_filter_unmatch_action_type_t;
+
+typedef struct
+{
+    rtk_filter_field_t      *fieldHead;
+    rtk_filter_care_tag_t   careTag;
+    rtk_filter_activeport_t activeport;
+
+    rtk_filter_invert_t     invert;
+} rtk_filter_cfg_t;
+
+typedef struct
+{
+    rtk_filter_field_raw_t      dataFieldRaw[RTK_FILTER_RAW_FIELD_NUMBER];
+    rtk_filter_field_raw_t      careFieldRaw[RTK_FILTER_RAW_FIELD_NUMBER];
+    rtk_filter_field_type_raw_t fieldRawType[RTK_FILTER_RAW_FIELD_NUMBER];
+    rtk_filter_care_tag_t       careTag;
+    rtk_filter_activeport_t     activeport;
+
+    rtk_filter_invert_t         invert;
+    rtk_enable_t                valid;
+} rtk_filter_cfg_raw_t;
+
+typedef struct
+{
+    rtk_uint32 index;
+    rtk_filter_field_type_raw_t fieldType[RTK_FILTER_RAW_FIELD_NUMBER];
+} rtk_filter_template_t;
+
+typedef enum rtk_field_sel_e
+{
+    FORMAT_DEFAULT = 0,
+    FORMAT_RAW,
+    FORMAT_LLC,
+    FORMAT_IPV4,
+    FORMAT_ARP,
+    FORMAT_IPV6,
+    FORMAT_IPPAYLOAD,
+    FORMAT_L4PAYLOAD,
+    FORMAT_END
+}rtk_field_sel_t;
+
+typedef enum rtk_filter_iprange_e
+{
+    IPRANGE_UNUSED = 0,
+    IPRANGE_IPV4_SIP,
+    IPRANGE_IPV4_DIP,
+    IPRANGE_IPV6_SIP,
+    IPRANGE_IPV6_DIP,
+    IPRANGE_END
+}rtk_filter_iprange_t;
+
+typedef enum rtk_filter_vidrange_e
+{
+    VIDRANGE_UNUSED = 0,
+    VIDRANGE_CVID,
+    VIDRANGE_SVID,
+    VIDRANGE_END
+}rtk_filter_vidrange_t;
+
+typedef enum rtk_filter_portrange_e
+{
+    PORTRANGE_UNUSED = 0,
+    PORTRANGE_SPORT,
+    PORTRANGE_DPORT,
+    PORTRANGE_END
+}rtk_filter_portrange_t;
+
+/* Function Name:
+ *      rtk_filter_igrAcl_init
+ * Description:
+ *      ACL initialization function
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_NULL_POINTER - Pointer pFilter_field or pFilter_cfg point to NULL.
+ * Note:
+ *      This function enable and intialize ACL function
+ */
+extern rtk_api_ret_t rtk_filter_igrAcl_init(void);
+
+/* Function Name:
+ *      rtk_filter_igrAcl_field_add
+ * Description:
+ *      Add comparison rule to an ACL configuration
+ * Input:
+ *      pFilter_cfg     - The ACL configuration that this function will add comparison rule
+ *      pFilter_field   - The comparison rule that will be added.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_NULL_POINTER     - Pointer pFilter_field or pFilter_cfg point to NULL.
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ * Note:
+ *      This function add a comparison rule (*pFilter_field) to an ACL configuration (*pFilter_cfg).
+ *      Pointer pFilter_cfg points to an ACL configuration structure, this structure keeps multiple ACL
+ *      comparison rules by means of linked list. Pointer pFilter_field will be added to linked
+ *      list keeped by structure that pFilter_cfg points to.
+ */
+extern rtk_api_ret_t rtk_filter_igrAcl_field_add(rtk_filter_cfg_t *pFilter_cfg, rtk_filter_field_t *pFilter_field);
+
+/* Function Name:
+ *      rtk_filter_igrAcl_cfg_add
+ * Description:
+ *      Add an ACL configuration to ASIC
+ * Input:
+ *      filter_id       - Start index of ACL configuration.
+ *      pFilter_cfg     - The ACL configuration that this function will add comparison rule
+ *      pFilter_action  - Action(s) of ACL configuration.
+ * Output:
+ *      ruleNum - number of rules written in acl table
+ * Return:
+ *      RT_ERR_OK                               - OK
+ *      RT_ERR_FAILED                           - Failed
+ *      RT_ERR_SMI                              - SMI access error
+ *      RT_ERR_NULL_POINTER                     - Pointer pFilter_field or pFilter_cfg point to NULL.
+ *      RT_ERR_INPUT                            - Invalid input parameters.
+ *      RT_ERR_ENTRY_INDEX                      - Invalid filter_id .
+ *      RT_ERR_NULL_POINTER                     - Pointer pFilter_action or pFilter_cfg point to NULL.
+ *      RT_ERR_FILTER_INACL_ACT_NOT_SUPPORT     - Action is not supported in this chip.
+ *      RT_ERR_FILTER_INACL_RULE_NOT_SUPPORT    - Rule is not supported.
+ * Note:
+ *      This function store pFilter_cfg, pFilter_action into ASIC. The starting
+ *      index(es) is filter_id.
+ */
+extern rtk_api_ret_t rtk_filter_igrAcl_cfg_add(rtk_filter_id_t filter_id, rtk_filter_cfg_t *pFilter_cfg, rtk_filter_action_t *pAction, rtk_filter_number_t *ruleNum);
+
+/* Function Name:
+ *      rtk_filter_igrAcl_cfg_del
+ * Description:
+ *      Delete an ACL configuration from ASIC
+ * Input:
+ *      filter_id   - Start index of ACL configuration.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_FILTER_ENTRYIDX  - Invalid filter_id.
+ * Note:
+ *      This function delete a group of ACL rules starting from filter_id.
+ */
+extern rtk_api_ret_t rtk_filter_igrAcl_cfg_del(rtk_filter_id_t filter_id);
+
+/* Function Name:
+ *      rtk_filter_igrAcl_cfg_delAll
+ * Description:
+ *      Delete all ACL entries from ASIC
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      This function delete all ACL configuration from ASIC.
+ */
+extern rtk_api_ret_t rtk_filter_igrAcl_cfg_delAll(void);
+
+/* Function Name:
+ *      rtk_filter_igrAcl_cfg_get
+ * Description:
+ *      Get one ingress acl configuration from ASIC.
+ * Input:
+ *      filter_id       - Start index of ACL configuration.
+ * Output:
+ *      pFilter_cfg     - buffer pointer of ingress acl data
+ *      pFilter_action  - buffer pointer of ingress acl action
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_NULL_POINTER     - Pointer pFilter_action or pFilter_cfg point to NULL.
+ *      RT_ERR_FILTER_ENTRYIDX  - Invalid entry index.
+ * Note:
+ *      This function delete all ACL configuration from ASIC.
+ */
+extern rtk_api_ret_t rtk_filter_igrAcl_cfg_get(rtk_filter_id_t filter_id, rtk_filter_cfg_raw_t *pFilter_cfg, rtk_filter_action_t *pAction);
+
+/* Function Name:
+ *      rtk_filter_igrAcl_unmatchAction_set
+ * Description:
+ *      Set action to packets when no ACL configuration match
+ * Input:
+ *      port    - Port id.
+ *      action  - Action.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port id.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This function sets action of packets when no ACL configruation matches.
+ */
+extern rtk_api_ret_t rtk_filter_igrAcl_unmatchAction_set(rtk_port_t port, rtk_filter_unmatch_action_t action);
+
+/* Function Name:
+ *      rtk_filter_igrAcl_unmatchAction_get
+ * Description:
+ *      Get action to packets when no ACL configuration match
+ * Input:
+ *      port    - Port id.
+ * Output:
+ *      pAction - Action.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port id.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This function gets action of packets when no ACL configruation matches.
+ */
+extern rtk_api_ret_t rtk_filter_igrAcl_unmatchAction_get(rtk_port_t port, rtk_filter_unmatch_action_t* action);
+
+/* Function Name:
+ *      rtk_filter_igrAcl_state_set
+ * Description:
+ *      Set state of ingress ACL.
+ * Input:
+ *      port    - Port id.
+ *      state   - Ingress ACL state.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port id.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This function gets action of packets when no ACL configruation matches.
+ */
+extern rtk_api_ret_t rtk_filter_igrAcl_state_set(rtk_port_t port, rtk_filter_state_t state);
+
+/* Function Name:
+ *      rtk_filter_igrAcl_state_get
+ * Description:
+ *      Get state of ingress ACL.
+ * Input:
+ *      port    - Port id.
+ * Output:
+ *      pState  - Ingress ACL state.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port id.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This function gets action of packets when no ACL configruation matches.
+ */
+extern rtk_api_ret_t rtk_filter_igrAcl_state_get(rtk_port_t port, rtk_filter_state_t* state);
+
+/* Function Name:
+ *      rtk_filter_igrAcl_template_set
+ * Description:
+ *      Set template of ingress ACL.
+ * Input:
+ *      template - Ingress ACL template
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Invalid input parameters.
+ * Note:
+ *      This function set ACL template.
+ */
+extern rtk_api_ret_t rtk_filter_igrAcl_template_set(rtk_filter_template_t *aclTemplate);
+
+/* Function Name:
+ *      rtk_filter_igrAcl_template_get
+ * Description:
+ *      Get template of ingress ACL.
+ * Input:
+ *      template - Ingress ACL template
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ *      This function gets template of ACL.
+ */
+extern rtk_api_ret_t rtk_filter_igrAcl_template_get(rtk_filter_template_t *aclTemplate);
+
+/* Function Name:
+ *      rtk_filter_igrAcl_field_sel_set
+ * Description:
+ *      Set user defined field selectors in HSB
+ * Input:
+ *      index       - index of field selector 0-15
+ *      format      - Format of field selector
+ *      offset      - Retrieving data offset
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ *      System support 16 user defined field selctors.
+ *      Each selector can be enabled or disable.
+ *      User can defined retrieving 16-bits in many predefiend
+ *      standard l2/l3/l4 payload.
+ */
+extern rtk_api_ret_t rtk_filter_igrAcl_field_sel_set(rtk_uint32 index, rtk_field_sel_t format, rtk_uint32 offset);
+
+/* Function Name:
+ *      rtk_filter_igrAcl_field_sel_get
+ * Description:
+ *      Get user defined field selectors in HSB
+ * Input:
+ *      index       - index of field selector 0-15
+ * Output:
+ *      pFormat     - Format of field selector
+ *      pOffset     - Retrieving data offset
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ *      None.
+ */
+extern rtk_api_ret_t rtk_filter_igrAcl_field_sel_get(rtk_uint32 index, rtk_field_sel_t *pFormat, rtk_uint32 *pOffset);
+
+/* Function Name:
+ *      rtk_filter_iprange_set
+ * Description:
+ *      Set IP Range check
+ * Input:
+ *      index       - index of IP Range 0-15
+ *      type        - IP Range check type, 0:Delete a entry, 1: IPv4_SIP, 2: IPv4_DIP, 3:IPv6_SIP, 4:IPv6_DIP
+ *      upperIp     - The upper bound of IP range
+ *      lowerIp     - The lower Bound of IP range
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_OUT_OF_RANGE    - The parameter is out of range
+ *      RT_ERR_INPUT           - Input error
+ * Note:
+ *      upperIp must be larger or equal than lowerIp.
+ */
+extern rtk_api_ret_t rtk_filter_iprange_set(rtk_uint32 index, rtk_filter_iprange_t type, ipaddr_t upperIp, ipaddr_t lowerIp);
+
+/* Function Name:
+ *      rtk_filter_iprange_get
+ * Description:
+ *      Set IP Range check
+ * Input:
+ *      index       - index of IP Range 0-15
+ * Output:
+ *      pType        - IP Range check type, 0:Delete a entry, 1: IPv4_SIP, 2: IPv4_DIP, 3:IPv6_SIP, 4:IPv6_DIP
+ *      pUpperIp     - The upper bound of IP range
+ *      pLowerIp     - The lower Bound of IP range
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_OUT_OF_RANGE    - The parameter is out of range
+ * Note:
+ *      upperIp must be larger or equal than lowerIp.
+ */
+extern rtk_api_ret_t rtk_filter_iprange_get(rtk_uint32 index, rtk_filter_iprange_t *pType, ipaddr_t *pUpperIp, ipaddr_t *pLowerIp);
+
+/* Function Name:
+ *      rtk_filter_vidrange_set
+ * Description:
+ *      Set VID Range check
+ * Input:
+ *      index       - index of VID Range 0-15
+ *      type        - IP Range check type, 0:Delete a entry, 1: CVID, 2: SVID
+ *      upperVid    - The upper bound of VID range
+ *      lowerVid    - The lower Bound of VID range
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_OUT_OF_RANGE    - The parameter is out of range
+ *      RT_ERR_INPUT           - Input error
+ * Note:
+ *      upperVid must be larger or equal than lowerVid.
+ */
+extern rtk_api_ret_t rtk_filter_vidrange_set(rtk_uint32 index, rtk_filter_vidrange_t type, rtk_uint32 upperVid, rtk_uint32 lowerVid);
+
+/* Function Name:
+ *      rtk_filter_vidrange_get
+ * Description:
+ *      Get VID Range check
+ * Input:
+ *      index       - index of VID Range 0-15
+ * Output:
+ *      pType        - IP Range check type, 0:Unused, 1: CVID, 2: SVID
+ *      pUpperVid    - The upper bound of VID range
+ *      pLowerVid    - The lower Bound of VID range
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_OUT_OF_RANGE    - The parameter is out of range
+ * Note:
+ *      None.
+ */
+extern rtk_api_ret_t rtk_filter_vidrange_get(rtk_uint32 index, rtk_filter_vidrange_t *pType, rtk_uint32 *pUpperVid, rtk_uint32 *pLowerVid);
+
+/* Function Name:
+ *      rtk_filter_portrange_set
+ * Description:
+ *      Set Port Range check
+ * Input:
+ *      index       - index of Port Range 0-15
+ *      type        - IP Range check type, 0:Delete a entry, 1: Source Port, 2: Destnation Port
+ *      upperPort   - The upper bound of Port range
+ *      lowerPort   - The lower Bound of Port range
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_OUT_OF_RANGE    - The parameter is out of range
+ *      RT_ERR_INPUT           - Input error
+ * Note:
+ *      upperPort must be larger or equal than lowerPort.
+ */
+extern rtk_api_ret_t rtk_filter_portrange_set(rtk_uint32 index, rtk_filter_portrange_t type, rtk_uint32 upperPort, rtk_uint32 lowerPort);
+
+/* Function Name:
+ *      rtk_filter_portrange_get
+ * Description:
+ *      Set Port Range check
+ * Input:
+ *      index       - index of Port Range 0-15
+ * Output:
+ *      pType       - IP Range check type, 0:Delete a entry, 1: Source Port, 2: Destnation Port
+ *      pUpperPort  - The upper bound of Port range
+ *      pLowerPort  - The lower Bound of Port range
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_OUT_OF_RANGE    - The parameter is out of range
+ *      RT_ERR_INPUT           - Input error
+ * Note:
+ *      None.
+ */
+extern rtk_api_ret_t rtk_filter_portrange_get(rtk_uint32 index, rtk_filter_portrange_t *pType, rtk_uint32 *pUpperPort, rtk_uint32 *pLowerPort);
+
+/* Function Name:
+ *      rtk_filter_igrAclPolarity_set
+ * Description:
+ *      Set ACL Goip control palarity
+ * Input:
+ *      polarity - 1: High, 0: Low
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      none
+ */
+extern rtk_api_ret_t rtk_filter_igrAclPolarity_set(rtk_uint32 polarity);
+
+/* Function Name:
+ *      rtk_filter_igrAclPolarity_get
+ * Description:
+ *      Get ACL Goip control palarity
+ * Input:
+ *      pPolarity - 1: High, 0: Low
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      none
+ */
+extern rtk_api_ret_t rtk_filter_igrAclPolarity_get(rtk_uint32* pPolarity);
+
+
+#endif /* __RTK_API_ACL_H__ */
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/cpu.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/cpu.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/cpu.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/cpu.c	2019-01-22 16:16:24.707257309 +0100
@@ -0,0 +1,695 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 79629 $
+ * $Date: 2017-06-14 18:08:03 +0800 (é€±ä¸‰, 14 å…­æœˆ 2017) $
+ *
+ * Purpose : RTK switch high-level API for RTL8367/RTL8367C
+ * Feature : Here is a list of all functions and variables in CPU module.
+ *
+ */
+
+#include <rtk_switch.h>
+#include <rtk_error.h>
+#include <cpu.h>
+#include <string.h>
+
+#include <rtl8367c_asicdrv.h>
+#include <rtl8367c_asicdrv_cputag.h>
+
+static rtk_api_ret_t _rtk_cpu_enable_set(rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (enable >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    if ((retVal = rtl8367c_setAsicCputagEnable(enable)) != RT_ERR_OK)
+        return retVal;
+
+    if (DISABLED == enable)
+    {
+        if ((retVal = rtl8367c_setAsicCputagPortmask(0)) != RT_ERR_OK)
+            return retVal;
+    }
+
+    return RT_ERR_OK;
+}
+
+
+static rtk_api_ret_t _rtk_cpu_enable_get(rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pEnable)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicCputagEnable(pEnable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_cpu_tagPort_set(rtk_port_t port, rtk_cpu_insert_t mode)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (mode >= CPU_INSERT_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicCputagPortmask(1<<rtk_switch_port_L2P_get(port))) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicCputagTrapPort(rtk_switch_port_L2P_get(port))) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicCputagInsertMode(mode)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_cpu_tagPort_get(rtk_port_t *pPort, rtk_cpu_insert_t *pMode)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 pmsk, port;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pPort)
+        return RT_ERR_NULL_POINTER;
+
+    if(NULL == pMode)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicCputagPortmask(&pmsk)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_getAsicCputagTrapPort(&port)) != RT_ERR_OK)
+        return retVal;
+
+    *pPort = rtk_switch_port_P2L_get(port);
+
+    if ((retVal = rtl8367c_getAsicCputagInsertMode(pMode)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_cpu_awarePort_set(rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 phyMbrPmask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Valid port mask */
+    if(NULL == pPortmask)
+        return RT_ERR_NULL_POINTER;
+
+    /* Check port mask valid */
+    RTK_CHK_PORTMASK_VALID(pPortmask);
+
+    if(rtk_switch_portmask_L2P_get(pPortmask, &phyMbrPmask) != RT_ERR_OK)
+        return RT_ERR_FAILED;
+
+    if ((retVal = rtl8367c_setAsicCputagPortmask(phyMbrPmask)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_cpu_awarePort_get(rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 pmsk;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pPortmask)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicCputagPortmask(&pmsk)) != RT_ERR_OK)
+        return retVal;
+
+    if(rtk_switch_portmask_P2L_get(pmsk, pPortmask) != RT_ERR_OK)
+        return RT_ERR_FAILED;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_cpu_tagPosition_set(rtk_cpu_position_t position)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (position >= CPU_POS_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicCputagPosition(position)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_cpu_tagPosition_get(rtk_cpu_position_t *pPosition)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pPosition)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicCputagPosition(pPosition)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_cpu_tagLength_set(rtk_cpu_tag_length_t length)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (length >= CPU_LEN_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicCputagMode(length)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_cpu_tagLength_get(rtk_cpu_tag_length_t *pLength)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pLength)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicCputagMode(pLength)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_cpu_priRemap_set(rtk_pri_t int_pri, rtk_pri_t new_pri)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (new_pri > RTL8367C_PRIMAX || int_pri > RTL8367C_PRIMAX)
+        return  RT_ERR_VLAN_PRIORITY;
+
+    if ((retVal = rtl8367c_setAsicCputagPriorityRemapping(int_pri, new_pri)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_cpu_priRemap_get(rtk_pri_t int_pri, rtk_pri_t *pNew_pri)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pNew_pri)
+        return RT_ERR_NULL_POINTER;
+
+    if (int_pri > RTL8367C_PRIMAX)
+        return  RT_ERR_QOS_INT_PRIORITY;
+
+    if ((retVal = rtl8367c_getAsicCputagPriorityRemapping(int_pri, pNew_pri)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_cpu_acceptLength_set(rtk_cpu_rx_length_t length)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (length >= CPU_RX_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicCputagRxMinLength(length)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_cpu_acceptLength_get(rtk_cpu_rx_length_t *pLength)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pLength)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicCputagRxMinLength(pLength)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+
+/* Function Name:
+ *      rtk_cpu_enable_set
+ * Description:
+ *      Set CPU port function enable/disable.
+ * Input:
+ *      enable - CPU port function enable
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameter.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can set CPU port function enable/disable.
+ */
+rtk_api_ret_t rtk_cpu_enable_set(rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_cpu_enable_set(enable);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_cpu_enable_get
+ * Description:
+ *      Get CPU port and its setting.
+ * Input:
+ *      None
+ * Output:
+ *      pEnable - CPU port function enable
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ *      RT_ERR_L2_NO_CPU_PORT   - CPU port is not exist
+ * Note:
+ *      The API can get CPU port function enable/disable.
+ */
+rtk_api_ret_t rtk_cpu_enable_get(rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_cpu_enable_get(pEnable);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_cpu_tagPort_set
+ * Description:
+ *      Set CPU port and CPU tag insert mode.
+ * Input:
+ *      port - Port id.
+ *      mode - CPU tag insert for packets egress from CPU port.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameter.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can set CPU port and inserting proprietary CPU tag mode (Length/Type 0x8899)
+ *      to the frame that transmitting to CPU port.
+ *      The inset cpu tag mode is as following:
+ *      - CPU_INSERT_TO_ALL
+ *      - CPU_INSERT_TO_TRAPPING
+ *      - CPU_INSERT_TO_NONE
+ */
+rtk_api_ret_t rtk_cpu_tagPort_set(rtk_port_t port, rtk_cpu_insert_t mode)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_cpu_tagPort_set(port, mode);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_cpu_tagPort_get
+ * Description:
+ *      Get CPU port and CPU tag insert mode.
+ * Input:
+ *      None
+ * Output:
+ *      pPort - Port id.
+ *      pMode - CPU tag insert for packets egress from CPU port, 0:all insert 1:Only for trapped packets 2:no insert.
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ *      RT_ERR_L2_NO_CPU_PORT   - CPU port is not exist
+ * Note:
+ *      The API can get configured CPU port and its setting.
+ *      The inset cpu tag mode is as following:
+ *      - CPU_INSERT_TO_ALL
+ *      - CPU_INSERT_TO_TRAPPING
+ *      - CPU_INSERT_TO_NONE
+ */
+rtk_api_ret_t rtk_cpu_tagPort_get(rtk_port_t *pPort, rtk_cpu_insert_t *pMode)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_cpu_tagPort_get(pPort, pMode);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+
+/* Function Name:
+ *      rtk_cpu_awarePort_set
+ * Description:
+ *      Set CPU aware port mask.
+ * Input:
+ *      portmask - Port mask.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_MASK      - Invalid port mask.
+ * Note:
+ *      The API can set configured CPU aware port mask.
+ */
+rtk_api_ret_t rtk_cpu_awarePort_set(rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_cpu_awarePort_set(pPortmask);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_cpu_awarePort_get
+ * Description:
+ *      Get CPU aware port mask.
+ * Input:
+ *      None
+ * Output:
+ *      pPortmask - Port mask.
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ * Note:
+ *      The API can get configured CPU aware port mask.
+ */
+rtk_api_ret_t rtk_cpu_awarePort_get(rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_cpu_awarePort_get(pPortmask);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_cpu_tagPosition_set
+ * Description:
+ *      Set CPU tag position.
+ * Input:
+ *      position - CPU tag position.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT      - Invalid input.
+ * Note:
+ *      The API can set CPU tag position.
+ */
+rtk_api_ret_t rtk_cpu_tagPosition_set(rtk_cpu_position_t position)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_cpu_tagPosition_set(position);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_cpu_tagPosition_get
+ * Description:
+ *      Get CPU tag position.
+ * Input:
+ *      None
+ * Output:
+ *      pPosition - CPU tag position.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT      - Invalid input.
+ * Note:
+ *      The API can get CPU tag position.
+ */
+rtk_api_ret_t rtk_cpu_tagPosition_get(rtk_cpu_position_t *pPosition)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_cpu_tagPosition_get(pPosition);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_cpu_tagLength_set
+ * Description:
+ *      Set CPU tag length.
+ * Input:
+ *      length - CPU tag length.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT      - Invalid input.
+ * Note:
+ *      The API can set CPU tag length.
+ */
+rtk_api_ret_t rtk_cpu_tagLength_set(rtk_cpu_tag_length_t length)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_cpu_tagLength_set(length);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_cpu_tagLength_get
+ * Description:
+ *      Get CPU tag length.
+ * Input:
+ *      None
+ * Output:
+ *      pLength - CPU tag length.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT      - Invalid input.
+ * Note:
+ *      The API can get CPU tag length.
+ */
+rtk_api_ret_t rtk_cpu_tagLength_get(rtk_cpu_tag_length_t *pLength)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_cpu_tagLength_get(pLength);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_cpu_priRemap_set
+ * Description:
+ *      Configure CPU priorities mapping to internal absolute priority.
+ * Input:
+ *      int_pri     - internal priority value.
+ *      new_pri    - new internal priority value.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ *      RT_ERR_VLAN_PRIORITY    - Invalid 1p priority.
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority.
+ * Note:
+ *      Priority of CPU tag assignment for internal asic priority, and it is used for queue usage and packet scheduling.
+ */
+rtk_api_ret_t rtk_cpu_priRemap_set(rtk_pri_t int_pri, rtk_pri_t new_pri)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_cpu_priRemap_set(int_pri, new_pri);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_cpu_priRemap_get
+ * Description:
+ *      Configure CPU priorities mapping to internal absolute priority.
+ * Input:
+ *      int_pri     - internal priority value.
+ * Output:
+ *      pNew_pri    - new internal priority value.
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ *      RT_ERR_VLAN_PRIORITY    - Invalid 1p priority.
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority.
+ * Note:
+ *      Priority of CPU tag assignment for internal asic priority, and it is used for queue usage and packet scheduling.
+ */
+rtk_api_ret_t rtk_cpu_priRemap_get(rtk_pri_t int_pri, rtk_pri_t *pNew_pri)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_cpu_priRemap_get(int_pri, pNew_pri);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_cpu_acceptLength_set
+ * Description:
+ *      Set CPU accept  length.
+ * Input:
+ *      length - CPU tag length.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT      - Invalid input.
+ * Note:
+ *      The API can set CPU accept length.
+ */
+rtk_api_ret_t rtk_cpu_acceptLength_set(rtk_cpu_rx_length_t length)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_cpu_acceptLength_set(length);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_cpu_acceptLength_get
+ * Description:
+ *      Get CPU accept length.
+ * Input:
+ *      None
+ * Output:
+ *      pLength - CPU tag length.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT      - Invalid input.
+ * Note:
+ *      The API can get CPU accept length.
+ */
+rtk_api_ret_t rtk_cpu_acceptLength_get(rtk_cpu_rx_length_t *pLength)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_cpu_acceptLength_get(pLength);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/cpu.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/cpu.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/cpu.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/cpu.h	2019-01-22 16:16:24.707257309 +0100
@@ -0,0 +1,329 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * Purpose : RTL8367/RTL8367C switch high-level API
+ *
+ * Feature : The file includes CPU module high-layer API defination
+ *
+ */
+
+#ifndef __RTK_API_CPU_H__
+#define __RTK_API_CPU_H__
+
+
+/*
+ * Data Type Declaration
+ */
+typedef enum rtk_cpu_insert_e
+{
+    CPU_INSERT_TO_ALL = 0,
+    CPU_INSERT_TO_TRAPPING,
+    CPU_INSERT_TO_NONE,
+    CPU_INSERT_END
+}rtk_cpu_insert_t;
+
+typedef enum rtk_cpu_position_e
+{
+    CPU_POS_AFTER_SA = 0,
+    CPU_POS_BEFORE_CRC,
+    CPU_POS_END
+}rtk_cpu_position_t;
+
+typedef enum rtk_cpu_tag_length_e
+{
+    CPU_LEN_8BYTES = 0,
+    CPU_LEN_4BYTES,
+    CPU_LEN_END
+}rtk_cpu_tag_length_t;
+
+
+typedef enum rtk_cpu_rx_length_e
+{
+    CPU_RX_72BYTES = 0,
+    CPU_RX_64BYTES,
+    CPU_RX_END
+}rtk_cpu_rx_length_t;
+
+
+/* Function Name:
+ *      rtk_cpu_enable_set
+ * Description:
+ *      Set CPU port function enable/disable.
+ * Input:
+ *      enable - CPU port function enable
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameter.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can set CPU port function enable/disable.
+ */
+extern rtk_api_ret_t rtk_cpu_enable_set(rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_cpu_enable_get
+ * Description:
+ *      Get CPU port and its setting.
+ * Input:
+ *      None
+ * Output:
+ *      pEnable - CPU port function enable
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ *      RT_ERR_L2_NO_CPU_PORT   - CPU port is not exist
+ * Note:
+ *      The API can get CPU port function enable/disable.
+ */
+extern rtk_api_ret_t rtk_cpu_enable_get(rtk_enable_t *pEnable);
+
+/* Function Name:
+ *      rtk_cpu_tagPort_set
+ * Description:
+ *      Set CPU port and CPU tag insert mode.
+ * Input:
+ *      port - Port id.
+ *      mode - CPU tag insert for packets egress from CPU port.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameter.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can set CPU port and inserting proprietary CPU tag mode (Length/Type 0x8899)
+ *      to the frame that transmitting to CPU port.
+ *      The inset cpu tag mode is as following:
+ *      - CPU_INSERT_TO_ALL
+ *      - CPU_INSERT_TO_TRAPPING
+ *      - CPU_INSERT_TO_NONE
+ */
+extern rtk_api_ret_t rtk_cpu_tagPort_set(rtk_port_t port, rtk_cpu_insert_t mode);
+
+/* Function Name:
+ *      rtk_cpu_tagPort_get
+ * Description:
+ *      Get CPU port and CPU tag insert mode.
+ * Input:
+ *      None
+ * Output:
+ *      pPort - Port id.
+ *      pMode - CPU tag insert for packets egress from CPU port, 0:all insert 1:Only for trapped packets 2:no insert.
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ *      RT_ERR_L2_NO_CPU_PORT   - CPU port is not exist
+ * Note:
+ *      The API can get configured CPU port and its setting.
+ *      The inset cpu tag mode is as following:
+ *      - CPU_INSERT_TO_ALL
+ *      - CPU_INSERT_TO_TRAPPING
+ *      - CPU_INSERT_TO_NONE
+ */
+extern rtk_api_ret_t rtk_cpu_tagPort_get(rtk_port_t *pPort, rtk_cpu_insert_t *pMode);
+
+/* Function Name:
+ *      rtk_cpu_awarePort_set
+ * Description:
+ *      Set CPU aware port mask.
+ * Input:
+ *      portmask - Port mask.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_MASK      - Invalid port mask.
+ * Note:
+ *      The API can set configured CPU aware port mask.
+ */
+extern rtk_api_ret_t rtk_cpu_awarePort_set(rtk_portmask_t *pPortmask);
+
+
+/* Function Name:
+ *      rtk_cpu_awarePort_get
+ * Description:
+ *      Get CPU aware port mask.
+ * Input:
+ *      None
+ * Output:
+ *      pPortmask - Port mask.
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ * Note:
+ *      The API can get configured CPU aware port mask.
+ */
+extern rtk_api_ret_t rtk_cpu_awarePort_get(rtk_portmask_t *pPortmask);
+
+/* Function Name:
+ *      rtk_cpu_tagPosition_set
+ * Description:
+ *      Set CPU tag position.
+ * Input:
+ *      position - CPU tag position.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT      - Invalid input.
+ * Note:
+ *      The API can set CPU tag position.
+ */
+extern rtk_api_ret_t rtk_cpu_tagPosition_set(rtk_cpu_position_t position);
+
+/* Function Name:
+ *      rtk_cpu_tagPosition_get
+ * Description:
+ *      Get CPU tag position.
+ * Input:
+ *      None
+ * Output:
+ *      pPosition - CPU tag position.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT      - Invalid input.
+ * Note:
+ *      The API can get CPU tag position.
+ */
+extern rtk_api_ret_t rtk_cpu_tagPosition_get(rtk_cpu_position_t *pPosition);
+
+/* Function Name:
+ *      rtk_cpu_tagLength_set
+ * Description:
+ *      Set CPU tag length.
+ * Input:
+ *      length - CPU tag length.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT      - Invalid input.
+ * Note:
+ *      The API can set CPU tag length.
+ */
+extern rtk_api_ret_t rtk_cpu_tagLength_set(rtk_cpu_tag_length_t length);
+
+/* Function Name:
+ *      rtk_cpu_tagLength_get
+ * Description:
+ *      Get CPU tag length.
+ * Input:
+ *      None
+ * Output:
+ *      pLength - CPU tag length.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT      - Invalid input.
+ * Note:
+ *      The API can get CPU tag length.
+ */
+extern rtk_api_ret_t rtk_cpu_tagLength_get(rtk_cpu_tag_length_t *pLength);
+
+/* Function Name:
+ *      rtk_cpu_acceptLength_set
+ * Description:
+ *      Set CPU accept  length.
+ * Input:
+ *      length - CPU tag length.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT      - Invalid input.
+ * Note:
+ *      The API can set CPU accept length.
+ */
+extern rtk_api_ret_t rtk_cpu_acceptLength_set(rtk_cpu_rx_length_t length);
+
+/* Function Name:
+ *      rtk_cpu_acceptLength_get
+ * Description:
+ *      Get CPU accept length.
+ * Input:
+ *      None
+ * Output:
+ *      pLength - CPU tag length.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT      - Invalid input.
+ * Note:
+ *      The API can get CPU accept length.
+ */
+extern rtk_api_ret_t rtk_cpu_acceptLength_get(rtk_cpu_rx_length_t *pLength);
+
+/* Function Name:
+ *      rtk_cpu_priRemap_set
+ * Description:
+ *      Configure CPU priorities mapping to internal absolute priority.
+ * Input:
+ *      int_pri     - internal priority value.
+ *      new_pri    - new internal priority value.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ *      RT_ERR_VLAN_PRIORITY    - Invalid 1p priority.
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority.
+ * Note:
+ *      Priority of CPU tag assignment for internal asic priority, and it is used for queue usage and packet scheduling.
+ */
+extern rtk_api_ret_t rtk_cpu_priRemap_set(rtk_pri_t int_pri, rtk_pri_t new_pri);
+
+/* Function Name:
+ *      rtk_cpu_priRemap_get
+ * Description:
+ *      Configure CPU priorities mapping to internal absolute priority.
+ * Input:
+ *      int_pri     - internal priority value.
+ * Output:
+ *      pNew_pri    - new internal priority value.
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ *      RT_ERR_VLAN_PRIORITY    - Invalid 1p priority.
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority.
+ * Note:
+ *      Priority of CPU tag assignment for internal asic priority, and it is used for queue usage and packet scheduling.
+ */
+extern rtk_api_ret_t rtk_cpu_priRemap_get(rtk_pri_t int_pri, rtk_pri_t *pNew_pri);
+
+
+#endif /* __RTK_API_CPU_H__ */
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/dot1x.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/dot1x.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/dot1x.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/dot1x.c	2019-01-22 16:16:24.707257309 +0100
@@ -0,0 +1,1066 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 79629 $
+ * $Date: 2017-06-14 18:08:03 +0800 (é€±ä¸‰, 14 å…­æœˆ 2017) $
+ *
+ * Purpose : RTK switch high-level API for RTL8367/RTL8367C
+ * Feature : Here is a list of all functions and variables in 1X module.
+ *
+ */
+
+#include <rtk_switch.h>
+#include <rtk_error.h>
+#include <dot1x.h>
+#include <string.h>
+#include <vlan.h>
+#include <rtl8367c_asicdrv.h>
+#include <rtl8367c_asicdrv_dot1x.h>
+#include <rtl8367c_asicdrv_rma.h>
+#include <rtl8367c_asicdrv_lut.h>
+#include <rtl8367c_asicdrv_vlan.h>
+
+static rtk_api_ret_t _rtk_dot1x_unauthPacketOper_set(rtk_port_t port, rtk_dot1x_unauth_action_t unauth_action)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (unauth_action >= DOT1X_ACTION_END)
+        return RT_ERR_DOT1X_PROC;
+
+    if ((retVal = rtl8367c_setAsic1xProcConfig(rtk_switch_port_L2P_get(port), unauth_action)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_dot1x_unauthPacketOper_get(rtk_port_t port, rtk_dot1x_unauth_action_t *pUnauth_action)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(NULL == pUnauth_action)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsic1xProcConfig(rtk_switch_port_L2P_get(port), pUnauth_action)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_dot1x_eapolFrame2CpuEnable_set(rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+    rtl8367c_rma_t rmacfg;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (enable >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    if ((retVal = rtl8367c_getAsicRma(3, &rmacfg)) != RT_ERR_OK)
+        return retVal;
+
+    if (ENABLED == enable)
+        rmacfg.operation = RMAOP_TRAP_TO_CPU;
+    else if (DISABLED == enable)
+        rmacfg.operation = RMAOP_FORWARD;
+
+    if ((retVal = rtl8367c_setAsicRma(3, &rmacfg)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_dot1x_eapolFrame2CpuEnable_get(rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+    rtl8367c_rma_t rmacfg;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pEnable)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicRma(3, &rmacfg)) != RT_ERR_OK)
+        return retVal;
+
+    if (RMAOP_TRAP_TO_CPU == rmacfg.operation)
+        *pEnable = ENABLED;
+    else
+        *pEnable = DISABLED;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_dot1x_portBasedEnable_set(rtk_port_t port, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (enable >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    if ((retVal = rtl8367c_setAsic1xPBEnConfig(rtk_switch_port_L2P_get(port),enable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_dot1x_portBasedEnable_get(rtk_port_t port, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(NULL == pEnable)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsic1xPBEnConfig(rtk_switch_port_L2P_get(port), pEnable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_dot1x_portBasedAuthStatus_set(rtk_port_t port, rtk_dot1x_auth_status_t port_auth)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+     if (port_auth >= AUTH_STATUS_END)
+        return RT_ERR_DOT1X_PORTBASEDAUTH;
+
+    if ((retVal = rtl8367c_setAsic1xPBAuthConfig(rtk_switch_port_L2P_get(port), port_auth)) != RT_ERR_OK)
+        return retVal;
+
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_dot1x_portBasedAuthStatus_get(rtk_port_t port, rtk_dot1x_auth_status_t *pPort_auth)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pPort_auth)
+        return RT_ERR_NULL_POINTER;
+
+    /* Check port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if ((retVal = rtl8367c_getAsic1xPBAuthConfig(rtk_switch_port_L2P_get(port), pPort_auth)) != RT_ERR_OK)
+        return retVal;
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_dot1x_portBasedDirection_set(rtk_port_t port, rtk_dot1x_direction_t port_direction)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (port_direction >= DIRECTION_END)
+        return RT_ERR_DOT1X_PORTBASEDOPDIR;
+
+    if ((retVal = rtl8367c_setAsic1xPBOpdirConfig(rtk_switch_port_L2P_get(port), port_direction)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_dot1x_portBasedDirection_get(rtk_port_t port, rtk_dot1x_direction_t *pPort_direction)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pPort_direction)
+        return RT_ERR_NULL_POINTER;
+
+    /* Check port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if ((retVal = rtl8367c_getAsic1xPBOpdirConfig(rtk_switch_port_L2P_get(port), pPort_direction)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_dot1x_macBasedEnable_set(rtk_port_t port, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (enable >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    if ((retVal = rtl8367c_setAsic1xMBEnConfig(rtk_switch_port_L2P_get(port),enable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_dot1x_macBasedEnable_get(rtk_port_t port, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pEnable)
+        return RT_ERR_NULL_POINTER;
+
+    /* Check port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if ((retVal = rtl8367c_getAsic1xMBEnConfig(rtk_switch_port_L2P_get(port), pEnable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_dot1x_macBasedAuthMac_add(rtk_port_t port, rtk_mac_t *pAuth_mac, rtk_fid_t fid)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 method;
+    rtl8367c_luttb l2Table;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* must be unicast address */
+    if ((pAuth_mac == NULL) || (pAuth_mac->octet[0] & 0x1))
+        return RT_ERR_MAC;
+
+    /* Check port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (fid > RTL8367C_FIDMAX)
+        return RT_ERR_L2_FID;
+
+    memset(&l2Table, 0, sizeof(rtl8367c_luttb));
+
+    /* fill key (MAC,FID) to get L2 entry */
+    memcpy(l2Table.mac.octet, pAuth_mac->octet, ETHER_ADDR_LEN);
+    l2Table.fid = fid;
+    method = LUTREADMETHOD_MAC;
+    retVal = rtl8367c_getAsicL2LookupTb(method, &l2Table);
+    if ( RT_ERR_OK == retVal)
+    {
+        if (l2Table.spa != rtk_switch_port_L2P_get(port))
+            return RT_ERR_DOT1X_MAC_PORT_MISMATCH;
+
+        memcpy(l2Table.mac.octet, pAuth_mac->octet, ETHER_ADDR_LEN);
+        l2Table.fid = fid;
+        l2Table.efid = 0;
+        l2Table.auth = 1;
+        retVal = rtl8367c_setAsicL2LookupTb(&l2Table);
+        return retVal;
+    }
+    else
+        return retVal;
+
+}
+
+static rtk_api_ret_t _rtk_dot1x_macBasedAuthMac_del(rtk_port_t port, rtk_mac_t *pAuth_mac, rtk_fid_t fid)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 method;
+    rtl8367c_luttb l2Table;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* must be unicast address */
+    if ((pAuth_mac == NULL) || (pAuth_mac->octet[0] & 0x1))
+        return RT_ERR_MAC;
+
+    /* Check port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (fid > RTL8367C_FIDMAX)
+        return RT_ERR_L2_FID;
+
+    memset(&l2Table, 0, sizeof(rtl8367c_luttb));
+
+    /* fill key (MAC,FID) to get L2 entry */
+    memcpy(l2Table.mac.octet, pAuth_mac->octet, ETHER_ADDR_LEN);
+    l2Table.fid = fid;
+    method = LUTREADMETHOD_MAC;
+    retVal = rtl8367c_getAsicL2LookupTb(method, &l2Table);
+    if (RT_ERR_OK == retVal)
+    {
+        if (l2Table.spa != rtk_switch_port_L2P_get(port))
+            return RT_ERR_DOT1X_MAC_PORT_MISMATCH;
+
+        memcpy(l2Table.mac.octet, pAuth_mac->octet, ETHER_ADDR_LEN);
+        l2Table.fid = fid;
+        l2Table.auth = 0;
+        retVal = rtl8367c_setAsicL2LookupTb(&l2Table);
+        return retVal;
+    }
+    else
+        return retVal;
+
+}
+
+static rtk_api_ret_t _rtk_dot1x_macBasedDirection_set(rtk_dot1x_direction_t mac_direction)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (mac_direction >= DIRECTION_END)
+        return RT_ERR_DOT1X_MACBASEDOPDIR;
+
+    if ((retVal = rtl8367c_setAsic1xMBOpdirConfig(mac_direction)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_dot1x_macBasedDirection_get(rtk_dot1x_direction_t *pMac_direction)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pMac_direction)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsic1xMBOpdirConfig(pMac_direction)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_dot1x_guestVlan_set(rtk_vlan_t vid)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 index;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* vid must be 0~4095 */
+    if (vid > RTL8367C_VIDMAX)
+        return RT_ERR_VLAN_VID;
+
+    if((retVal = rtk_vlan_checkAndCreateMbr(vid, &index)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsic1xGuestVidx(index)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_dot1x_guestVlan_get(rtk_vlan_t *pVid)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 gvidx;
+    rtl8367c_vlanconfiguser vlanMC;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pVid)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsic1xGuestVidx(&gvidx)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_getAsicVlanMemberConfig(gvidx, &vlanMC)) != RT_ERR_OK)
+        return retVal;
+
+    *pVid = vlanMC.evid;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_dot1x_guestVlan2Auth_set(rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (enable >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    if ((retVal = rtl8367c_setAsic1xGVOpdir(enable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_dot1x_guestVlan2Auth_get(rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pEnable)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsic1xGVOpdir(pEnable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+
+/* Function Name:
+ *      rtk_dot1x_unauthPacketOper_set
+ * Description:
+ *      Set 802.1x unauth action configuration.
+ * Input:
+ *      port            - Port id.
+ *      unauth_action   - 802.1X unauth action.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_INPUT        - Invalid input parameter.
+ * Note:
+ *      This API can set 802.1x unauth action configuration.
+ *      The unauth action is as following:
+ *      - DOT1X_ACTION_DROP
+ *      - DOT1X_ACTION_TRAP2CPU
+ *      - DOT1X_ACTION_GUESTVLAN
+ */
+rtk_api_ret_t rtk_dot1x_unauthPacketOper_set(rtk_port_t port, rtk_dot1x_unauth_action_t unauth_action)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_dot1x_unauthPacketOper_set(port, unauth_action);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_dot1x_unauthPacketOper_get
+ * Description:
+ *      Get 802.1x unauth action configuration.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pUnauth_action - 802.1X unauth action.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      This API can get 802.1x unauth action configuration.
+ *      The unauth action is as following:
+ *      - DOT1X_ACTION_DROP
+ *      - DOT1X_ACTION_TRAP2CPU
+ *      - DOT1X_ACTION_GUESTVLAN
+ */
+rtk_api_ret_t rtk_dot1x_unauthPacketOper_get(rtk_port_t port, rtk_dot1x_unauth_action_t *pUnauth_action)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_dot1x_unauthPacketOper_get(port, pUnauth_action);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_dot1x_eapolFrame2CpuEnable_set
+ * Description:
+ *      Set 802.1x EAPOL packet trap to CPU configuration
+ * Input:
+ *      enable - The status of 802.1x EAPOL packet.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_ENABLE       - Invalid enable input.
+ * Note:
+ *      To support 802.1x authentication functionality, EAPOL frame (ether type = 0x888E) has to
+ *      be trapped to CPU.
+ *      The status of EAPOL frame trap to CPU is as following:
+ *      - DISABLED
+ *      - ENABLED
+ */
+rtk_api_ret_t rtk_dot1x_eapolFrame2CpuEnable_set(rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_dot1x_eapolFrame2CpuEnable_set(enable);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_dot1x_eapolFrame2CpuEnable_get
+ * Description:
+ *      Get 802.1x EAPOL packet trap to CPU configuration
+ * Input:
+ *      None
+ * Output:
+ *      pEnable - The status of 802.1x EAPOL packet.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      To support 802.1x authentication functionality, EAPOL frame (ether type = 0x888E) has to
+ *      be trapped to CPU.
+ *      The status of EAPOL frame trap to CPU is as following:
+ *      - DISABLED
+ *      - ENABLED
+ */
+rtk_api_ret_t rtk_dot1x_eapolFrame2CpuEnable_get(rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_dot1x_eapolFrame2CpuEnable_get(pEnable);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_dot1x_portBasedEnable_set
+ * Description:
+ *      Set 802.1x port-based enable configuration
+ * Input:
+ *      port - Port id.
+ *      enable - The status of 802.1x port.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port number.
+ *      RT_ERR_ENABLE               - Invalid enable input.
+ *      RT_ERR_DOT1X_PORTBASEDPNEN  - 802.1X port-based enable error
+ * Note:
+ *      The API can update the port-based port enable register content. If a port is 802.1x
+ *      port based network access control "enabled", it should be authenticated so packets
+ *      from that port won't be dropped or trapped to CPU.
+ *      The status of 802.1x port-based network access control is as following:
+ *      - DISABLED
+ *      - ENABLED
+ */
+rtk_api_ret_t rtk_dot1x_portBasedEnable_set(rtk_port_t port, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_dot1x_portBasedEnable_set(port, enable);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_dot1x_portBasedEnable_get
+ * Description:
+ *      Get 802.1x port-based enable configuration
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pEnable - The status of 802.1x port.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can get the 802.1x port-based port status.
+ */
+rtk_api_ret_t rtk_dot1x_portBasedEnable_get(rtk_port_t port, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_dot1x_portBasedEnable_get(port, pEnable);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_dot1x_portBasedAuthStatus_set
+ * Description:
+ *      Set 802.1x port-based auth. port configuration
+ * Input:
+ *      port - Port id.
+ *      port_auth - The status of 802.1x port.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port number.
+ *     RT_ERR_DOT1X_PORTBASEDAUTH   - 802.1X port-based auth error
+ * Note:
+ *      The authenticated status of 802.1x port-based network access control is as following:
+ *      - UNAUTH
+ *      - AUTH
+ */
+rtk_api_ret_t rtk_dot1x_portBasedAuthStatus_set(rtk_port_t port, rtk_dot1x_auth_status_t port_auth)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_dot1x_portBasedAuthStatus_set(port, port_auth);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_dot1x_portBasedAuthStatus_get
+ * Description:
+ *      Get 802.1x port-based auth. port configuration
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pPort_auth - The status of 802.1x port.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can get 802.1x port-based port auth.information.
+ */
+rtk_api_ret_t rtk_dot1x_portBasedAuthStatus_get(rtk_port_t port, rtk_dot1x_auth_status_t *pPort_auth)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_dot1x_portBasedAuthStatus_get(port, pPort_auth);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_dot1x_portBasedDirection_set
+ * Description:
+ *      Set 802.1x port-based operational direction configuration
+ * Input:
+ *      port            - Port id.
+ *      port_direction  - Operation direction
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port number.
+ *      RT_ERR_DOT1X_PORTBASEDOPDIR - 802.1X port-based operation direction error
+ * Note:
+ *      The operate controlled direction of 802.1x port-based network access control is as following:
+ *      - BOTH
+ *      - IN
+ */
+rtk_api_ret_t rtk_dot1x_portBasedDirection_set(rtk_port_t port, rtk_dot1x_direction_t port_direction)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_dot1x_portBasedDirection_set(port, port_direction);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_dot1x_portBasedDirection_get
+ * Description:
+ *      Get 802.1X port-based operational direction configuration
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pPort_direction - Operation direction
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can get 802.1x port-based operational direction information.
+ */
+rtk_api_ret_t rtk_dot1x_portBasedDirection_get(rtk_port_t port, rtk_dot1x_direction_t *pPort_direction)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_dot1x_portBasedDirection_get(port, pPort_direction);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_dot1x_macBasedEnable_set
+ * Description:
+ *      Set 802.1x mac-based port enable configuration
+ * Input:
+ *      port - Port id.
+ *      enable - The status of 802.1x port.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port number.
+ *      RT_ERR_ENABLE               - Invalid enable input.
+ *      RT_ERR_DOT1X_MACBASEDPNEN   - 802.1X mac-based enable error
+ * Note:
+ *      If a port is 802.1x MAC based network access control "enabled", the incoming packets should
+ *       be authenticated so packets from that port won't be dropped or trapped to CPU.
+ *      The status of 802.1x MAC-based network access control is as following:
+ *      - DISABLED
+ *      - ENABLED
+ */
+rtk_api_ret_t rtk_dot1x_macBasedEnable_set(rtk_port_t port, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_dot1x_macBasedEnable_set(port, enable);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_dot1x_macBasedEnable_get
+ * Description:
+ *      Get 802.1x mac-based port enable configuration
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pEnable - The status of 802.1x port.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      If a port is 802.1x MAC based network access control "enabled", the incoming packets should
+ *      be authenticated so packets from that port wont be dropped or trapped to CPU.
+ *      The status of 802.1x MAC-based network access control is as following:
+ *      - DISABLED
+ *      - ENABLED
+ */
+rtk_api_ret_t rtk_dot1x_macBasedEnable_get(rtk_port_t port, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_dot1x_macBasedEnable_get(port, pEnable);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_dot1x_macBasedAuthMac_add
+ * Description:
+ *      Add an authenticated MAC to ASIC
+ * Input:
+ *      port        - Port id.
+ *      pAuth_mac   - The authenticated MAC.
+ *      fid         - filtering database.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port number.
+ *      RT_ERR_ENABLE               - Invalid enable input.
+ *      RT_ERR_DOT1X_MACBASEDPNEN   - 802.1X mac-based enable error
+ * Note:
+ *      The API can add a 802.1x authenticated MAC address to port. If the MAC does not exist in LUT,
+ *      user can't add this MAC to auth status.
+ */
+rtk_api_ret_t rtk_dot1x_macBasedAuthMac_add(rtk_port_t port, rtk_mac_t *pAuth_mac, rtk_fid_t fid)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_dot1x_macBasedAuthMac_add(port, pAuth_mac, fid);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_dot1x_macBasedAuthMac_del
+ * Description:
+ *      Delete an authenticated MAC to ASIC
+ * Input:
+ *      port - Port id.
+ *      pAuth_mac - The authenticated MAC.
+ *      fid - filtering database.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_MAC          - Invalid MAC address.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can delete a 802.1x authenticated MAC address to port. It only change the auth status of
+ *      the MAC and won't delete it from LUT.
+ */
+rtk_api_ret_t rtk_dot1x_macBasedAuthMac_del(rtk_port_t port, rtk_mac_t *pAuth_mac, rtk_fid_t fid)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_dot1x_macBasedAuthMac_del(port, pAuth_mac, fid);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_dot1x_macBasedDirection_set
+ * Description:
+ *      Set 802.1x mac-based operational direction configuration
+ * Input:
+ *      mac_direction - Operation direction
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_INPUT                - Invalid input parameter.
+ *      RT_ERR_DOT1X_MACBASEDOPDIR  - 802.1X mac-based operation direction error
+ * Note:
+ *      The operate controlled direction of 802.1x mac-based network access control is as following:
+ *      - BOTH
+ *      - IN
+ */
+rtk_api_ret_t rtk_dot1x_macBasedDirection_set(rtk_dot1x_direction_t mac_direction)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_dot1x_macBasedDirection_set(mac_direction);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_dot1x_macBasedDirection_get
+ * Description:
+ *      Get 802.1x mac-based operational direction configuration
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pMac_direction - Operation direction
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can get 802.1x mac-based operational direction information.
+ */
+rtk_api_ret_t rtk_dot1x_macBasedDirection_get(rtk_dot1x_direction_t *pMac_direction)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_dot1x_macBasedDirection_get(pMac_direction);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      Set 802.1x guest VLAN configuration
+ * Description:
+ *      Set 802.1x mac-based operational direction configuration
+ * Input:
+ *      vid - 802.1x guest VLAN ID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameter.
+ * Note:
+ *      The operate controlled 802.1x guest VLAN
+ */
+rtk_api_ret_t rtk_dot1x_guestVlan_set(rtk_vlan_t vid)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_dot1x_guestVlan_set(vid);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_dot1x_guestVlan_get
+ * Description:
+ *      Get 802.1x guest VLAN configuration
+ * Input:
+ *      None
+ * Output:
+ *      pVid - 802.1x guest VLAN ID
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can get 802.1x guest VLAN information.
+ */
+rtk_api_ret_t rtk_dot1x_guestVlan_get(rtk_vlan_t *pVid)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_dot1x_guestVlan_get(pVid);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_dot1x_guestVlan2Auth_set
+ * Description:
+ *      Set 802.1x guest VLAN to auth host configuration
+ * Input:
+ *      enable - The status of guest VLAN to auth host.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameter.
+ * Note:
+ *      The operational direction of 802.1x guest VLAN to auth host control is as following:
+ *      - ENABLED
+ *      - DISABLED
+ */
+rtk_api_ret_t rtk_dot1x_guestVlan2Auth_set(rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_dot1x_guestVlan2Auth_set(enable);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_dot1x_guestVlan2Auth_get
+ * Description:
+ *      Get 802.1x guest VLAN to auth host configuration
+ * Input:
+ *      None
+ * Output:
+ *      pEnable - The status of guest VLAN to auth host.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can get 802.1x guest VLAN to auth host information.
+ */
+rtk_api_ret_t rtk_dot1x_guestVlan2Auth_get(rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_dot1x_guestVlan2Auth_get(pEnable);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/dot1x.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/dot1x.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/dot1x.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/dot1x.h	2019-01-22 16:16:24.707257309 +0100
@@ -0,0 +1,472 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * Purpose : RTL8367/RTL8367C switch high-level API
+ *
+ * Feature : The file includes 1X module high-layer API defination
+ *
+ */
+
+#ifndef __RTK_API_DOT1X_H__
+#define __RTK_API_DOT1X_H__
+
+
+/* Type of port-based dot1x auth/unauth*/
+typedef enum rtk_dot1x_auth_status_e
+{
+    UNAUTH = 0,
+    AUTH,
+    AUTH_STATUS_END
+} rtk_dot1x_auth_status_t;
+
+typedef enum rtk_dot1x_direction_e
+{
+    DIR_BOTH = 0,
+    DIR_IN,
+    DIRECTION_END
+} rtk_dot1x_direction_t;
+
+/* unauth pkt action */
+typedef enum rtk_dot1x_unauth_action_e
+{
+    DOT1X_ACTION_DROP = 0,
+    DOT1X_ACTION_TRAP2CPU,
+    DOT1X_ACTION_GUESTVLAN,
+    DOT1X_ACTION_END
+} rtk_dot1x_unauth_action_t;
+
+/* Function Name:
+ *      rtk_dot1x_unauthPacketOper_set
+ * Description:
+ *      Set 802.1x unauth action configuration.
+ * Input:
+ *      port            - Port id.
+ *      unauth_action   - 802.1X unauth action.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_INPUT        - Invalid input parameter.
+ * Note:
+ *      This API can set 802.1x unauth action configuration.
+ *      The unauth action is as following:
+ *      - DOT1X_ACTION_DROP
+ *      - DOT1X_ACTION_TRAP2CPU
+ *      - DOT1X_ACTION_GUESTVLAN
+ */
+extern rtk_api_ret_t rtk_dot1x_unauthPacketOper_set(rtk_port_t port, rtk_dot1x_unauth_action_t unauth_action);
+
+/* Function Name:
+ *      rtk_dot1x_unauthPacketOper_get
+ * Description:
+ *      Get 802.1x unauth action configuration.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pUnauth_action - 802.1X unauth action.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      This API can get 802.1x unauth action configuration.
+ *      The unauth action is as following:
+ *      - DOT1X_ACTION_DROP
+ *      - DOT1X_ACTION_TRAP2CPU
+ *      - DOT1X_ACTION_GUESTVLAN
+ */
+extern rtk_api_ret_t rtk_dot1x_unauthPacketOper_get(rtk_port_t port, rtk_dot1x_unauth_action_t *pUnauth_action);
+
+/* Function Name:
+ *      rtk_dot1x_eapolFrame2CpuEnable_set
+ * Description:
+ *      Set 802.1x EAPOL packet trap to CPU configuration
+ * Input:
+ *      enable - The status of 802.1x EAPOL packet.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_ENABLE       - Invalid enable input.
+ * Note:
+ *      To support 802.1x authentication functionality, EAPOL frame (ether type = 0x888E) has to
+ *      be trapped to CPU.
+ *      The status of EAPOL frame trap to CPU is as following:
+ *      - DISABLED
+ *      - ENABLED
+ */
+extern rtk_api_ret_t rtk_dot1x_eapolFrame2CpuEnable_set(rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_dot1x_eapolFrame2CpuEnable_get
+ * Description:
+ *      Get 802.1x EAPOL packet trap to CPU configuration
+ * Input:
+ *      None
+ * Output:
+ *      pEnable - The status of 802.1x EAPOL packet.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      To support 802.1x authentication functionality, EAPOL frame (ether type = 0x888E) has to
+ *      be trapped to CPU.
+ *      The status of EAPOL frame trap to CPU is as following:
+ *      - DISABLED
+ *      - ENABLED
+ */
+extern rtk_api_ret_t rtk_dot1x_eapolFrame2CpuEnable_get(rtk_enable_t *pEnable);
+
+/* Function Name:
+ *      rtk_dot1x_portBasedEnable_set
+ * Description:
+ *      Set 802.1x port-based enable configuration
+ * Input:
+ *      port - Port id.
+ *      enable - The status of 802.1x port.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port number.
+ *      RT_ERR_ENABLE               - Invalid enable input.
+ *      RT_ERR_DOT1X_PORTBASEDPNEN  - 802.1X port-based enable error
+ * Note:
+ *      The API can update the port-based port enable register content. If a port is 802.1x
+ *      port based network access control "enabled", it should be authenticated so packets
+ *      from that port won't be dropped or trapped to CPU.
+ *      The status of 802.1x port-based network access control is as following:
+ *      - DISABLED
+ *      - ENABLED
+ */
+extern rtk_api_ret_t rtk_dot1x_portBasedEnable_set(rtk_port_t port, rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_dot1x_portBasedEnable_get
+ * Description:
+ *      Get 802.1x port-based enable configuration
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pEnable - The status of 802.1x port.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can get the 802.1x port-based port status.
+ */
+extern rtk_api_ret_t rtk_dot1x_portBasedEnable_get(rtk_port_t port, rtk_enable_t *pEnable);
+
+/* Function Name:
+ *      rtk_dot1x_portBasedAuthStatus_set
+ * Description:
+ *      Set 802.1x port-based auth. port configuration
+ * Input:
+ *      port - Port id.
+ *      port_auth - The status of 802.1x port.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port number.
+ *     RT_ERR_DOT1X_PORTBASEDAUTH   - 802.1X port-based auth error
+ * Note:
+ *      The authenticated status of 802.1x port-based network access control is as following:
+ *      - UNAUTH
+ *      - AUTH
+ */
+extern rtk_api_ret_t rtk_dot1x_portBasedAuthStatus_set(rtk_port_t port, rtk_dot1x_auth_status_t port_auth);
+
+/* Function Name:
+ *      rtk_dot1x_portBasedAuthStatus_get
+ * Description:
+ *      Get 802.1x port-based auth. port configuration
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pPort_auth - The status of 802.1x port.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can get 802.1x port-based port auth.information.
+ */
+extern rtk_api_ret_t rtk_dot1x_portBasedAuthStatus_get(rtk_port_t port, rtk_dot1x_auth_status_t *pPort_auth);
+
+/* Function Name:
+ *      rtk_dot1x_portBasedDirection_set
+ * Description:
+ *      Set 802.1x port-based operational direction configuration
+ * Input:
+ *      port            - Port id.
+ *      port_direction  - Operation direction
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port number.
+ *      RT_ERR_DOT1X_PORTBASEDOPDIR - 802.1X port-based operation direction error
+ * Note:
+ *      The operate controlled direction of 802.1x port-based network access control is as following:
+ *      - BOTH
+ *      - IN
+ */
+extern rtk_api_ret_t rtk_dot1x_portBasedDirection_set(rtk_port_t port, rtk_dot1x_direction_t port_direction);
+
+/* Function Name:
+ *      rtk_dot1x_portBasedDirection_get
+ * Description:
+ *      Get 802.1X port-based operational direction configuration
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pPort_direction - Operation direction
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can get 802.1x port-based operational direction information.
+ */
+extern rtk_api_ret_t rtk_dot1x_portBasedDirection_get(rtk_port_t port, rtk_dot1x_direction_t *pPort_direction);
+
+/* Function Name:
+ *      rtk_dot1x_macBasedEnable_set
+ * Description:
+ *      Set 802.1x mac-based port enable configuration
+ * Input:
+ *      port - Port id.
+ *      enable - The status of 802.1x port.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port number.
+ *      RT_ERR_ENABLE               - Invalid enable input.
+ *      RT_ERR_DOT1X_MACBASEDPNEN   - 802.1X mac-based enable error
+ * Note:
+ *      If a port is 802.1x MAC based network access control "enabled", the incoming packets should
+ *       be authenticated so packets from that port won't be dropped or trapped to CPU.
+ *      The status of 802.1x MAC-based network access control is as following:
+ *      - DISABLED
+ *      - ENABLED
+ */
+extern rtk_api_ret_t rtk_dot1x_macBasedEnable_set(rtk_port_t port, rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_dot1x_macBasedEnable_get
+ * Description:
+ *      Get 802.1x mac-based port enable configuration
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pEnable - The status of 802.1x port.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      If a port is 802.1x MAC based network access control "enabled", the incoming packets should
+ *      be authenticated so packets from that port wont be dropped or trapped to CPU.
+ *      The status of 802.1x MAC-based network access control is as following:
+ *      - DISABLED
+ *      - ENABLED
+ */
+extern rtk_api_ret_t rtk_dot1x_macBasedEnable_get(rtk_port_t port, rtk_enable_t *pEnable);
+
+/* Function Name:
+ *      rtk_dot1x_macBasedAuthMac_add
+ * Description:
+ *      Add an authenticated MAC to ASIC
+ * Input:
+ *      port        - Port id.
+ *      pAuth_mac   - The authenticated MAC.
+ *      fid         - filtering database.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port number.
+ *      RT_ERR_ENABLE               - Invalid enable input.
+ *      RT_ERR_DOT1X_MACBASEDPNEN   - 802.1X mac-based enable error
+ * Note:
+ *      The API can add a 802.1x authenticated MAC address to port. If the MAC does not exist in LUT,
+ *      user can't add this MAC to auth status.
+ */
+extern rtk_api_ret_t rtk_dot1x_macBasedAuthMac_add(rtk_port_t port, rtk_mac_t *pAuth_mac, rtk_fid_t fid);
+
+/* Function Name:
+ *      rtk_dot1x_macBasedAuthMac_del
+ * Description:
+ *      Delete an authenticated MAC to ASIC
+ * Input:
+ *      port - Port id.
+ *      pAuth_mac - The authenticated MAC.
+ *      fid - filtering database.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_MAC          - Invalid MAC address.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can delete a 802.1x authenticated MAC address to port. It only change the auth status of
+ *      the MAC and won't delete it from LUT.
+ */
+extern rtk_api_ret_t rtk_dot1x_macBasedAuthMac_del(rtk_port_t port, rtk_mac_t *pAuth_mac, rtk_fid_t fid);
+
+/* Function Name:
+ *      rtk_dot1x_macBasedDirection_set
+ * Description:
+ *      Set 802.1x mac-based operational direction configuration
+ * Input:
+ *      mac_direction - Operation direction
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_INPUT                - Invalid input parameter.
+ *      RT_ERR_DOT1X_MACBASEDOPDIR  - 802.1X mac-based operation direction error
+ * Note:
+ *      The operate controlled direction of 802.1x mac-based network access control is as following:
+ *      - BOTH
+ *      - IN
+ */
+extern rtk_api_ret_t rtk_dot1x_macBasedDirection_set(rtk_dot1x_direction_t mac_direction);
+
+/* Function Name:
+ *      rtk_dot1x_macBasedDirection_get
+ * Description:
+ *      Get 802.1x mac-based operational direction configuration
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pMac_direction - Operation direction
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can get 802.1x mac-based operational direction information.
+ */
+extern rtk_api_ret_t rtk_dot1x_macBasedDirection_get(rtk_dot1x_direction_t *pMac_direction);
+
+/* Function Name:
+ *      Set 802.1x guest VLAN configuration
+ * Description:
+ *      Set 802.1x mac-based operational direction configuration
+ * Input:
+ *      vid - 802.1x guest VLAN ID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameter.
+ * Note:
+ *      The operate controlled 802.1x guest VLAN
+ */
+extern rtk_api_ret_t rtk_dot1x_guestVlan_set(rtk_vlan_t vid);
+
+/* Function Name:
+ *      rtk_dot1x_guestVlan_get
+ * Description:
+ *      Get 802.1x guest VLAN configuration
+ * Input:
+ *      None
+ * Output:
+ *      pVid - 802.1x guest VLAN ID
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can get 802.1x guest VLAN information.
+ */
+extern rtk_api_ret_t rtk_dot1x_guestVlan_get(rtk_vlan_t *pVid);
+
+/* Function Name:
+ *      rtk_dot1x_guestVlan2Auth_set
+ * Description:
+ *      Set 802.1x guest VLAN to auth host configuration
+ * Input:
+ *      enable - The status of guest VLAN to auth host.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameter.
+ * Note:
+ *      The operational direction of 802.1x guest VLAN to auth host control is as following:
+ *      - ENABLED
+ *      - DISABLED
+ */
+extern rtk_api_ret_t rtk_dot1x_guestVlan2Auth_set(rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_dot1x_guestVlan2Auth_get
+ * Description:
+ *      Get 802.1x guest VLAN to auth host configuration
+ * Input:
+ *      None
+ * Output:
+ *      pEnable - The status of guest VLAN to auth host.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can get 802.1x guest VLAN to auth host information.
+ */
+extern rtk_api_ret_t rtk_dot1x_guestVlan2Auth_get(rtk_enable_t *pEnable);
+
+
+#endif /* __RTK_API_DOT1X_H__ */
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/eee.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/eee.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/eee.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/eee.c	2019-01-22 16:16:24.707257309 +0100
@@ -0,0 +1,197 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 79629 $
+ * $Date: 2017-06-14 18:08:03 +0800 (é€±ä¸‰, 14 å…­æœˆ 2017) $
+ *
+ * Purpose : RTK switch high-level API for RTL8367/RTL8367C
+ * Feature : Here is a list of all functions and variables in EEE module.
+ *
+ */
+
+#include <rtk_switch.h>
+#include <rtk_error.h>
+#include <eee.h>
+#include <string.h>
+
+#include <rtl8367c_asicdrv.h>
+#include <rtl8367c_asicdrv_eee.h>
+#include <rtl8367c_asicdrv_phy.h>
+
+static rtk_api_ret_t _rtk_eee_init(void)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if((retVal = rtl8367c_setAsicRegBit(0x0018, 10, 1)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicRegBit(0x0018, 11, 1)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_eee_portEnable_set(rtk_port_t port, rtk_enable_t enable)
+{
+    rtk_api_ret_t   retVal;
+    rtk_uint32      regData;
+    rtk_uint32    phy_port;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check port is UTP port */
+    RTK_CHK_PORT_IS_UTP(port);
+
+    if (enable>=RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    phy_port = rtk_switch_port_L2P_get(port);
+
+    if ((retVal = rtl8367c_setAsicEee100M(phy_port,enable))!=RT_ERR_OK)
+        return retVal;
+    if ((retVal = rtl8367c_setAsicEeeGiga(phy_port,enable))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicPHYReg(phy_port, RTL8367C_PHY_PAGE_ADDRESS, 0))!=RT_ERR_OK)
+        return retVal;
+    if ((retVal = rtl8367c_getAsicPHYReg(phy_port, 0, &regData))!=RT_ERR_OK)
+        return retVal;
+    regData |= 0x0200;
+    if ((retVal = rtl8367c_setAsicPHYReg(phy_port, 0, regData))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_eee_portEnable_get(rtk_port_t port, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 regData1, regData2;
+    rtk_uint32    phy_port;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check port is UTP port */
+    RTK_CHK_PORT_IS_UTP(port);
+
+    if(NULL == pEnable)
+        return RT_ERR_NULL_POINTER;
+
+    phy_port = rtk_switch_port_L2P_get(port);
+
+    if ((retVal = rtl8367c_getAsicEee100M(phy_port,&regData1))!=RT_ERR_OK)
+        return retVal;
+    if ((retVal = rtl8367c_getAsicEeeGiga(phy_port,&regData2))!=RT_ERR_OK)
+        return retVal;
+
+    if (regData1==1&&regData2==1)
+        *pEnable = ENABLED;
+    else
+        *pEnable = DISABLED;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtk_eee_init
+ * Description:
+ *      EEE function initialization.
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ *      This API is used to initialize EEE status.
+ */
+rtk_api_ret_t rtk_eee_init(void)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_eee_init();    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_eee_portEnable_set
+ * Description:
+ *      Set enable status of EEE function.
+ * Input:
+ *      port - port id.
+ *      enable - enable EEE status.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_PORT_ID - Invalid port number.
+ *      RT_ERR_ENABLE - Invalid enable input.
+ * Note:
+ *      This API can set EEE function to the specific port.
+ *      The configuration of the port is as following:
+ *      - DISABLE
+ *      - ENABLE
+ */
+rtk_api_ret_t rtk_eee_portEnable_set(rtk_port_t port, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_eee_portEnable_set(port, enable);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_eee_portEnable_get
+ * Description:
+ *      Get enable status of EEE function
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pEnable - Back pressure status.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_PORT_ID - Invalid port number.
+ * Note:
+ *      This API can get EEE function to the specific port.
+ *      The configuration of the port is as following:
+ *      - DISABLE
+ *      - ENABLE
+ */
+
+rtk_api_ret_t rtk_eee_portEnable_get(rtk_port_t port, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_eee_portEnable_get(port, pEnable);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/eee.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/eee.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/eee.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/eee.h	2019-01-22 16:16:24.707257309 +0100
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * Purpose : RTL8367/RTL8367C switch high-level API
+ *
+ * Feature : The file includes EEE module high-layer API defination
+ *
+ */
+
+#ifndef __RTK_API_EEE_H__
+#define __RTK_API_EEE_H__
+
+/* Function Name:
+ *      rtk_eee_init
+ * Description:
+ *      EEE function initialization.
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ *      This API is used to initialize EEE status.
+ */
+extern rtk_api_ret_t rtk_eee_init(void);
+
+/* Function Name:
+ *      rtk_eee_portEnable_set
+ * Description:
+ *      Set enable status of EEE function.
+ * Input:
+ *      port - port id.
+ *      enable - enable EEE status.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_PORT_ID - Invalid port number.
+ *      RT_ERR_ENABLE - Invalid enable input.
+ * Note:
+ *      This API can set EEE function to the specific port.
+ *      The configuration of the port is as following:
+ *      - DISABLE
+ *      - ENABLE
+ */
+extern rtk_api_ret_t rtk_eee_portEnable_set(rtk_port_t port, rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_eee_portEnable_get
+ * Description:
+ *      Get port admin configuration of the specific port.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pEnable - Back pressure status.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_PORT_ID - Invalid port number.
+ * Note:
+ *      This API can set EEE function to the specific port.
+ *      The configuration of the port is as following:
+ *      - DISABLE
+ *      - ENABLE
+ */
+extern rtk_api_ret_t rtk_eee_portEnable_get(rtk_port_t port, rtk_enable_t *pEnable);
+
+
+#endif /* __RTK_API_EEE_H__ */
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/i2c.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/i2c.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/i2c.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/i2c.c	2019-01-22 16:16:24.707257309 +0100
@@ -0,0 +1,536 @@
+/* Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 63932 $
+ * $Date: 2015-12-08 14:06:29 +0800 (å‘¨äºŒ, 08 åäºŒæœˆ 2015) $
+ *
+ * Purpose : RTK switch high-level API for RTL8367/RTL8367C
+ * Feature : Here is a list of all functions and variables in i2c module.
+ *
+ */
+
+#include <rtk_switch.h>
+#include <rtk_error.h>
+#include <port.h>
+#include <string.h>
+#include <rtl8367c_reg.h>
+
+#include <rtl8367c_asicdrv_i2c.h>
+#include <rtk_switch.h>
+#include <rtl8367c_asicdrv.h>
+#include <rtk_types.h>
+#include <i2c.h>
+
+
+static rtk_I2C_16bit_mode_t rtk_i2c_mode = I2C_LSB_16BIT_MODE;
+
+
+static rtk_api_ret_t _rtk_i2c_init(void)
+{
+    rtk_uint32 retVal;
+  switch_chip_t ChipID;
+  /* probe switch */
+  if((retVal = rtk_switch_probe(&ChipID)) != RT_ERR_OK)
+      return retVal;
+
+  if( ChipID == CHIP_RTL8370B )
+  {
+   /*set GPIO8, GPIO9, OpenDrain as I2C, clock = 124KHZ   */
+      if((retVal = rtl8367c_setAsicReg(RTL8367C_REG_M_I2C_SYS_CTL, 0x5c3f)) != RT_ERR_OK)
+        return retVal;
+  }
+  else
+      return RT_ERR_FAILED;
+  return  RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_i2c_mode_set( rtk_I2C_16bit_mode_t i2cmode )
+{
+    if(i2cmode >= I2C_Mode_END)
+    {
+        return RT_ERR_INPUT;
+    }
+    else if(i2cmode == I2C_70B_LSB_16BIT_MODE)
+    {
+        rtk_i2c_mode = I2C_70B_LSB_16BIT_MODE;
+
+        return RT_ERR_OK;
+    }
+    else if( i2cmode == I2C_LSB_16BIT_MODE)
+    {
+        rtk_i2c_mode = I2C_LSB_16BIT_MODE;
+        return RT_ERR_OK;
+    }
+    else
+        return RT_ERR_FAILED;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_i2c_mode_get( rtk_I2C_16bit_mode_t * pI2cMode)
+{
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+    if(NULL == pI2cMode)
+        return RT_ERR_NULL_POINTER;
+    if(rtk_i2c_mode == I2C_70B_LSB_16BIT_MODE)
+        *pI2cMode = 1;
+    else if ((rtk_i2c_mode == I2C_LSB_16BIT_MODE))
+        *pI2cMode = 0;
+    else
+        return RT_ERR_FAILED;
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_i2c_gpioPinGroup_set( rtk_I2C_gpio_pin_t pins_group )
+{
+    rtk_uint32 retVal;
+
+
+    if( ( pins_group > I2C_GPIO_PIN_END )|| ( pins_group < I2C_GPIO_PIN_8_9) )
+        return RT_ERR_INPUT;
+
+    if( (retVal = rtl8367c_setAsicI2CGpioPinGroup(pins_group) ) != RT_ERR_OK )
+        return retVal ;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_i2c_gpioPinGroup_get( rtk_I2C_gpio_pin_t * pPins_group )
+{
+    rtk_uint32 retVal;
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pPins_group)
+        return RT_ERR_NULL_POINTER;
+    if( (retVal = rtl8367c_getAsicI2CGpioPinGroup(pPins_group) ) != RT_ERR_OK )
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_i2c_data_read(rtk_uint8 deviceAddr, rtk_uint32 slaveRegAddr, rtk_uint32 *pRegData)
+{
+     rtk_uint32 retVal, counter=0;
+     rtk_uint8 controlByte_W, controlByte_R;
+     rtk_uint8 slaveRegAddr_L, slaveRegAddr_H = 0x0, temp;
+     rtk_uint8 regData_L, regData_H;
+
+   /* control byte :deviceAddress + W,  deviceAddress + R   */
+    controlByte_W = (rtk_uint8)(deviceAddr << 1) ;
+    controlByte_R = (rtk_uint8)(controlByte_W | 0x1);
+
+    slaveRegAddr_L = (rtk_uint8) (slaveRegAddr & 0x00FF) ;
+    slaveRegAddr_H = (rtk_uint8) (slaveRegAddr >>8) ;
+
+    if( rtk_i2c_mode == I2C_70B_LSB_16BIT_MODE)
+    {
+        temp = slaveRegAddr_L ;
+        slaveRegAddr_L = slaveRegAddr_H;
+        slaveRegAddr_H = temp;
+    }
+
+
+  /*check bus state: idle*/
+  for(counter = 3000; counter>0; counter--)
+  {
+    if ( (retVal = rtl8367c_setAsicI2C_checkBusIdle() ) == RT_ERR_OK)
+         break;
+  }
+  if( counter ==0 )
+      return retVal; /*i2c is busy*/
+
+   /*tx Start cmd*/
+   if( (retVal = rtl8367c_setAsicI2CStartCmd() ) != RT_ERR_OK )
+       return retVal ;
+
+
+  /*tx control _W*/
+   if( (retVal = rtl8367c_setAsicI2CTxOneCharCmd(controlByte_W))!= RT_ERR_OK )
+      return retVal ;
+
+
+  /*check if RX ack from slave*/
+   if( (retVal = rtl8367c_setAsicI2CcheckRxAck()) != RT_ERR_OK )
+        return retVal;
+
+    /* tx slave buffer address low 8 bits */
+   if( (retVal = rtl8367c_setAsicI2CTxOneCharCmd(slaveRegAddr_L))!= RT_ERR_OK )
+         return retVal  ;
+
+   /*check if RX ack from slave*/
+   if( (retVal = rtl8367c_setAsicI2CcheckRxAck()) != RT_ERR_OK )
+        return retVal;
+
+
+
+        /* tx slave buffer address high 8 bits */
+   if( (retVal = rtl8367c_setAsicI2CTxOneCharCmd(slaveRegAddr_H))!= RT_ERR_OK )
+         return retVal  ;
+
+
+   /*check if RX ack from slave*/
+   if( (retVal = rtl8367c_setAsicI2CcheckRxAck()) != RT_ERR_OK )
+        return retVal;
+
+
+   /*tx Start cmd*/
+   if( (retVal = rtl8367c_setAsicI2CStartCmd() ) != RT_ERR_OK )
+       return retVal ;
+
+      /*tx control _R*/
+   if( (retVal = rtl8367c_setAsicI2CTxOneCharCmd(controlByte_R))!= RT_ERR_OK )
+       return retVal ;
+
+
+  /*check if RX ack from slave*/
+   if( (retVal = rtl8367c_setAsicI2CcheckRxAck()) != RT_ERR_OK )
+        return retVal;
+
+
+    /* rx low 8bit data*/
+   if( ( retVal = rtl8367c_setAsicI2CRxOneCharCmd( &regData_L) ) != RT_ERR_OK )
+        return retVal;
+
+
+
+    /* tx ack to slave, keep receive */
+    if( (retVal = rtl8367c_setAsicI2CTxAckCmd()) != RT_ERR_OK )
+        return retVal;
+
+     /* rx high 8bit data*/
+    if( ( retVal = rtl8367c_setAsicI2CRxOneCharCmd( &regData_H) ) != RT_ERR_OK )
+        return retVal;
+
+
+
+    /* tx Noack to slave, Stop receive */
+     if( (retVal = rtl8367c_setAsicI2CTxNoAckCmd()) != RT_ERR_OK )
+        return retVal;
+
+
+    /*tx Stop cmd */
+    if( (retVal = rtl8367c_setAsicI2CStopCmd()) != RT_ERR_OK )
+        return retVal;
+
+    *pRegData = (regData_H << 8) | regData_L;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_i2c_data_write(rtk_uint8 deviceAddr, rtk_uint32 slaveRegAddr, rtk_uint32 regData)
+{
+     rtk_uint32 retVal,counter;
+     rtk_uint8 controlByte_W;
+     rtk_uint8 slaveRegAddr_L, slaveRegAddr_H = 0x0, temp;
+     rtk_uint8 regData_L, regData_H;
+
+  /* control byte :deviceAddress + W    */
+    controlByte_W = (rtk_uint8)(deviceAddr<< 1) ;
+
+    slaveRegAddr_L = (rtk_uint8) (slaveRegAddr & 0x00FF) ;
+    slaveRegAddr_H = (rtk_uint8) (slaveRegAddr >>8) ;
+
+    regData_H   = (rtk_uint8) (regData>> 8);
+    regData_L   = (rtk_uint8) (regData & 0x00FF);
+
+    if( rtk_i2c_mode == I2C_70B_LSB_16BIT_MODE)
+    {
+        temp = slaveRegAddr_L ;
+        slaveRegAddr_L = slaveRegAddr_H;
+        slaveRegAddr_H = temp;
+    }
+
+
+  /*check bus state: idle*/
+  for(counter = 3000; counter>0; counter--)
+  {
+    if ( (retVal = rtl8367c_setAsicI2C_checkBusIdle() ) == RT_ERR_OK)
+        break;
+  }
+
+  if( counter ==0 )
+      return retVal; /*i2c is busy*/
+
+
+   /*tx Start cmd*/
+   if( (retVal = rtl8367c_setAsicI2CStartCmd() ) != RT_ERR_OK )
+       return retVal ;
+
+
+  /*tx control _W*/
+   if( (retVal = rtl8367c_setAsicI2CTxOneCharCmd(controlByte_W))!= RT_ERR_OK )
+      return retVal ;
+
+
+  /*check if RX ack from slave*/
+   if( (retVal = rtl8367c_setAsicI2CcheckRxAck()) != RT_ERR_OK )
+        return retVal;
+
+
+    /* tx slave buffer address low 8 bits */
+   if( (retVal = rtl8367c_setAsicI2CTxOneCharCmd(slaveRegAddr_L))!= RT_ERR_OK )
+        return retVal;
+
+
+   /*check if RX ack from slave*/
+   if( (retVal = rtl8367c_setAsicI2CcheckRxAck()) != RT_ERR_OK )
+        return retVal;
+
+
+   /* tx slave buffer address high 8 bits */
+   if( (retVal = rtl8367c_setAsicI2CTxOneCharCmd(slaveRegAddr_H))!= RT_ERR_OK )
+        return retVal;
+
+
+   /*check if RX ack from slave*/
+   if( (retVal = rtl8367c_setAsicI2CcheckRxAck()) != RT_ERR_OK )
+        return retVal;
+
+
+     /*tx Datavlue LSB*/
+   if( (retVal = rtl8367c_setAsicI2CTxOneCharCmd(regData_L))!= RT_ERR_OK )
+        return retVal;
+
+
+   /*check if RX ack from slave*/
+   if( (retVal = rtl8367c_setAsicI2CcheckRxAck()) != RT_ERR_OK )
+        return retVal;
+
+
+   /*tx Datavlue MSB*/
+   if( (retVal = rtl8367c_setAsicI2CTxOneCharCmd(regData_H))!= RT_ERR_OK )
+        return retVal;
+
+
+   /*check if RX ack from slave*/
+   if( (retVal = rtl8367c_setAsicI2CcheckRxAck()) != RT_ERR_OK )
+        return retVal;
+
+
+    /*tx Stop cmd */
+    if( (retVal = rtl8367c_setAsicI2CStopCmd()) != RT_ERR_OK )
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/* Function Name:
+ *      rtk_i2c_init
+ * Description:
+ *      I2C smart function initialization.
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ * Note:
+ *      This API is used to initialize EEE status.
+ *      need used GPIO pins
+ *      OpenDrain and clock
+ */
+rtk_api_ret_t rtk_i2c_init(void)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_i2c_init();
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_i2c_mode_set
+ * Description:
+ *      Set I2C data byte-order.
+ * Input:
+ *      i2cmode - byte-order mode
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_INPUT        - Invalid input parameter.
+ * Note:
+ *      This API can set I2c traffic's byte-order .
+ */
+rtk_api_ret_t rtk_i2c_mode_set( rtk_I2C_16bit_mode_t i2cmode )
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_i2c_mode_set(i2cmode);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_i2c_mode_get
+ * Description:
+ *      Get i2c traffic byte-order setting.
+ * Input:
+ *      None
+ * Output:
+ *      pI2cMode - i2c byte-order
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_NULL_POINTER     - input parameter is null pointer
+ * Note:
+ *      The API can get i2c traffic byte-order setting.
+ */
+rtk_api_ret_t rtk_i2c_mode_get( rtk_I2C_16bit_mode_t * pI2cMode)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_i2c_mode_get(pI2cMode);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_i2c_gpioPinGroup_set
+ * Description:
+ *      Set i2c SDA & SCL used GPIO pins group.
+ * Input:
+ *      pins_group - GPIO pins group
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_INPUT        - Invalid input parameter.
+ * Note:
+ *      The API can set i2c used gpio pins group.
+ *      There are three group pins could be used
+ */
+rtk_api_ret_t rtk_i2c_gpioPinGroup_set( rtk_I2C_gpio_pin_t pins_group )
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_i2c_gpioPinGroup_set(pins_group);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_i2c_gpioPinGroup_get
+ * Description:
+ *      Get i2c SDA & SCL used GPIO pins group.
+ * Input:
+ *      None
+ * Output:
+ *      pPins_group - GPIO pins group
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_NULL_POINTER     - input parameter is null pointer
+ * Note:
+ *      The API can get i2c used gpio pins group.
+ *      There are three group pins could be used
+ */
+rtk_api_ret_t rtk_i2c_gpioPinGroup_get( rtk_I2C_gpio_pin_t * pPins_group )
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_i2c_gpioPinGroup_get(pPins_group);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_i2c_data_read
+ * Description:
+ *      read i2c slave device register.
+ * Input:
+ *      deviceAddr   -   access Slave device address
+ *      slaveRegAddr -   access Slave register address
+ * Output:
+ *      pRegData     -   read data
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_NULL_POINTER     - input parameter is null pointer
+ * Note:
+ *      The API can access i2c slave and read i2c slave device register.
+ */
+rtk_api_ret_t rtk_i2c_data_read(rtk_uint8 deviceAddr, rtk_uint32 slaveRegAddr, rtk_uint32 *pRegData)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_i2c_data_read(deviceAddr, slaveRegAddr, pRegData);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_i2c_data_write
+ * Description:
+ *      write data to i2c slave device register
+ * Input:
+ *      deviceAddr   -   access Slave device address
+ *      slaveRegAddr -   access Slave register address
+ *      regData      -   data to set
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ * Note:
+ *      The API can access i2c slave and setting i2c slave device register.
+ */
+rtk_api_ret_t rtk_i2c_data_write(rtk_uint8 deviceAddr, rtk_uint32 slaveRegAddr, rtk_uint32 regData)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_i2c_data_write(deviceAddr, slaveRegAddr, regData);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/i2c.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/i2c.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/i2c.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/i2c.h	2019-01-22 16:16:24.707257309 +0100
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * Purpose : RTL8367/RTL8367C switch high-level API
+ *
+ * Feature : The file includes I2C module high-layer API defination
+ *
+ */
+
+
+#ifndef __RTK_API_I2C_H__
+#define __RTK_API_I2C_H__
+#include <rtk_types.h>
+
+#define I2C_GPIO_MAX_GROUP (3)
+
+typedef enum rtk_I2C_16bit_mode_e{
+    I2C_LSB_16BIT_MODE = 0,
+    I2C_70B_LSB_16BIT_MODE,
+    I2C_Mode_END
+}rtk_I2C_16bit_mode_t;
+
+
+typedef enum rtk_I2C_gpio_pin_e{
+    I2C_GPIO_PIN_8_9 = 0,
+    I2C_GPIO_PIN_15_16 ,
+    I2C_GPIO_PIN_35_36 ,
+    I2C_GPIO_PIN_END
+}rtk_I2C_gpio_pin_t;
+
+
+/* Function Name:
+ *      rtk_i2c_data_read
+ * Description:
+ *      read i2c slave device register.
+ * Input:
+ *      deviceAddr   -   access Slave device address
+ *      slaveRegAddr -   access Slave register address
+ * Output:
+ *      pRegData     -   read data
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_NULL_POINTER     - input parameter is null pointer
+ * Note:
+ *      The API can access i2c slave and read i2c slave device register.
+ */
+extern rtk_api_ret_t rtk_i2c_data_read(rtk_uint8 deviceAddr, rtk_uint32 slaveRegAddr, rtk_uint32 *pRegData);
+
+/* Function Name:
+ *      rtk_i2c_data_write
+ * Description:
+ *      write data to i2c slave device register
+ * Input:
+ *      deviceAddr   -   access Slave device address
+ *      slaveRegAddr -   access Slave register address
+ *      regData      -   data to set
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ * Note:
+ *      The API can access i2c slave and setting i2c slave device register.
+ */
+extern rtk_api_ret_t rtk_i2c_data_write(rtk_uint8 deviceAddr, rtk_uint32 slaveRegAddr, rtk_uint32 regData);
+
+
+/* Function Name:
+ *      rtk_i2c_init
+ * Description:
+ *      I2C smart function initialization.
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ * Note:
+ *      This API is used to initialize EEE status.
+ *      need used GPIO pins
+ *      OpenDrain and clock
+ */
+extern rtk_api_ret_t rtk_i2c_init(void);
+
+/* Function Name:
+ *      rtk_i2c_mode_set
+ * Description:
+ *      Set I2C data byte-order.
+ * Input:
+ *      i2cmode - byte-order mode
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_INPUT        - Invalid input parameter.
+ * Note:
+ *      This API can set I2c traffic's byte-order .
+ */
+extern rtk_api_ret_t rtk_i2c_mode_set( rtk_I2C_16bit_mode_t i2cmode);
+
+/* Function Name:
+ *      rtk_i2c_mode_get
+ * Description:
+ *      Get i2c traffic byte-order setting.
+ * Input:
+ *      None
+ * Output:
+ *      pI2cMode - i2c byte-order
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_NULL_POINTER     - input parameter is null pointer
+ * Note:
+ *      The API can get i2c traffic byte-order setting.
+ */
+extern rtk_api_ret_t rtk_i2c_mode_get( rtk_I2C_16bit_mode_t * pI2cMode);
+
+
+/* Function Name:
+ *      rtk_i2c_gpioPinGroup_set
+ * Description:
+ *      Set i2c SDA & SCL used GPIO pins group.
+ * Input:
+ *      pins_group - GPIO pins group
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_INPUT        - Invalid input parameter.
+ * Note:
+ *      The API can set i2c used gpio pins group.
+ *      There are three group pins could be used
+ */
+extern rtk_api_ret_t rtk_i2c_gpioPinGroup_set( rtk_I2C_gpio_pin_t pins_group);
+
+/* Function Name:
+ *      rtk_i2c_gpioPinGroup_get
+ * Description:
+ *      Get i2c SDA & SCL used GPIO pins group.
+ * Input:
+ *      None
+ * Output:
+ *      pPins_group - GPIO pins group
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_NULL_POINTER     - input parameter is null pointer
+ * Note:
+ *      The API can get i2c used gpio pins group.
+ *      There are three group pins could be used
+ */
+extern rtk_api_ret_t rtk_i2c_gpioPinGroup_get(rtk_I2C_gpio_pin_t * pPins_group);
+
+
+
+
+
+
+
+#endif
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/igmp.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/igmp.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/igmp.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/igmp.c	2019-01-22 16:16:24.707257309 +0100
@@ -0,0 +1,1954 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 79443 $
+ * $Date: 2017-06-06 20:35:43 +0800 (é€±äºŒ, 06 å…­æœˆ 2017) $
+ *
+ * Purpose : RTK switch high-level API for RTL8367/RTL8367C
+ * Feature : Here is a list of all functions and variables in IGMP module.
+ *
+ */
+
+#include <rtk_switch.h>
+#include <rtk_error.h>
+#include <igmp.h>
+#include <string.h>
+
+#include <rtl8367c_asicdrv.h>
+#include <rtl8367c_asicdrv_igmp.h>
+#include <rtl8367c_asicdrv_lut.h>
+
+static rtk_api_ret_t _rtk_igmp_init(void)
+{
+    rtk_api_ret_t retVal;
+    rtk_port_t port;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if ((retVal = rtl8367c_setAsicLutIpMulticastLookup(ENABLED))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicLutIpLookupMethod(1))!=RT_ERR_OK)
+        return retVal;
+
+    RTK_SCAN_ALL_PHY_PORTMASK(port)
+    {
+        if ((retVal = rtl8367c_setAsicIGMPv1Opeartion(port, PROTOCOL_OP_ASIC))!=RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicIGMPv2Opeartion(port, PROTOCOL_OP_ASIC))!=RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicIGMPv3Opeartion(port, PROTOCOL_OP_FLOOD))!=RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicMLDv1Opeartion(port, PROTOCOL_OP_ASIC))!=RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicMLDv2Opeartion(port, PROTOCOL_OP_FLOOD))!=RT_ERR_OK)
+            return retVal;
+    }
+
+    if ((retVal = rtl8367c_setAsicIGMPAllowDynamicRouterPort(rtk_switch_phyPortMask_get()))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicIGMPFastLeaveEn(ENABLED))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicIGMPReportLeaveFlood(1))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicIgmp(ENABLED))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_state_set(rtk_enable_t enabled)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (enabled >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicIgmp(enabled))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_state_get(rtk_enable_t *pEnabled)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(pEnabled == NULL)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicIgmp(pEnabled))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_static_router_port_set(rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 pmask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Valid port mask */
+    if(pPortmask == NULL)
+        return RT_ERR_NULL_POINTER;
+
+    RTK_CHK_PORTMASK_VALID(pPortmask);
+
+    if ((retVal = rtk_switch_portmask_L2P_get(pPortmask, &pmask))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicIGMPStaticRouterPort(pmask))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_static_router_port_get(rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 pmask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(pPortmask == NULL)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicIGMPStaticRouterPort(&pmask))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtk_switch_portmask_P2L_get(pmask, pPortmask))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_protocol_set(rtk_port_t port, rtk_igmp_protocol_t protocol, rtk_igmp_action_t action)
+{
+    rtk_uint32      operation;
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check port valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(protocol >= PROTOCOL_END)
+        return RT_ERR_INPUT;
+
+    if(action >= IGMP_ACTION_END)
+        return RT_ERR_INPUT;
+
+    switch(action)
+    {
+        case IGMP_ACTION_FORWARD:
+            operation = PROTOCOL_OP_FLOOD;
+            break;
+        case IGMP_ACTION_TRAP2CPU:
+            operation = PROTOCOL_OP_TRAP;
+            break;
+        case IGMP_ACTION_DROP:
+            operation = PROTOCOL_OP_DROP;
+            break;
+        case IGMP_ACTION_ASIC:
+            operation = PROTOCOL_OP_ASIC;
+            break;
+        default:
+            return RT_ERR_INPUT;
+    }
+
+    switch(protocol)
+    {
+        case PROTOCOL_IGMPv1:
+            if ((retVal = rtl8367c_setAsicIGMPv1Opeartion(rtk_switch_port_L2P_get(port), operation))!=RT_ERR_OK)
+                return retVal;
+
+            break;
+        case PROTOCOL_IGMPv2:
+            if ((retVal = rtl8367c_setAsicIGMPv2Opeartion(rtk_switch_port_L2P_get(port), operation))!=RT_ERR_OK)
+                return retVal;
+
+            break;
+        case PROTOCOL_IGMPv3:
+            if ((retVal = rtl8367c_setAsicIGMPv3Opeartion(rtk_switch_port_L2P_get(port), operation))!=RT_ERR_OK)
+                return retVal;
+
+            break;
+        case PROTOCOL_MLDv1:
+            if ((retVal = rtl8367c_setAsicMLDv1Opeartion(rtk_switch_port_L2P_get(port), operation))!=RT_ERR_OK)
+                return retVal;
+
+            break;
+        case PROTOCOL_MLDv2:
+            if ((retVal = rtl8367c_setAsicMLDv2Opeartion(rtk_switch_port_L2P_get(port), operation))!=RT_ERR_OK)
+                return retVal;
+
+            break;
+        default:
+            return RT_ERR_INPUT;
+
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_protocol_get(rtk_port_t port, rtk_igmp_protocol_t protocol, rtk_igmp_action_t *pAction)
+{
+    rtk_uint32      operation;
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check port valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(protocol >= PROTOCOL_END)
+        return RT_ERR_INPUT;
+
+    if(pAction == NULL)
+        return RT_ERR_NULL_POINTER;
+
+    switch(protocol)
+    {
+        case PROTOCOL_IGMPv1:
+            if ((retVal = rtl8367c_getAsicIGMPv1Opeartion(rtk_switch_port_L2P_get(port), &operation))!=RT_ERR_OK)
+                return retVal;
+
+            break;
+        case PROTOCOL_IGMPv2:
+            if ((retVal = rtl8367c_getAsicIGMPv2Opeartion(rtk_switch_port_L2P_get(port), &operation))!=RT_ERR_OK)
+                return retVal;
+
+            break;
+        case PROTOCOL_IGMPv3:
+            if ((retVal = rtl8367c_getAsicIGMPv3Opeartion(rtk_switch_port_L2P_get(port), &operation))!=RT_ERR_OK)
+                return retVal;
+
+            break;
+        case PROTOCOL_MLDv1:
+            if ((retVal = rtl8367c_getAsicMLDv1Opeartion(rtk_switch_port_L2P_get(port), &operation))!=RT_ERR_OK)
+                return retVal;
+
+            break;
+        case PROTOCOL_MLDv2:
+            if ((retVal = rtl8367c_getAsicMLDv2Opeartion(rtk_switch_port_L2P_get(port), &operation))!=RT_ERR_OK)
+                return retVal;
+
+            break;
+        default:
+            return RT_ERR_INPUT;
+
+    }
+
+    switch(operation)
+    {
+        case PROTOCOL_OP_FLOOD:
+            *pAction = IGMP_ACTION_FORWARD;
+            break;
+        case PROTOCOL_OP_TRAP:
+            *pAction = IGMP_ACTION_TRAP2CPU;
+            break;
+        case PROTOCOL_OP_DROP:
+            *pAction = IGMP_ACTION_DROP;
+            break;
+        case PROTOCOL_OP_ASIC:
+            *pAction = IGMP_ACTION_ASIC;
+            break;
+        default:
+            return RT_ERR_FAILED;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_fastLeave_set(rtk_enable_t state)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(state >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicIGMPFastLeaveEn((rtk_uint32)state))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_fastLeave_get(rtk_enable_t *pState)
+{
+    rtk_uint32      fast_leave;
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(pState == NULL)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicIGMPFastLeaveEn(&fast_leave))!=RT_ERR_OK)
+        return retVal;
+
+    *pState = ((fast_leave == 1) ? ENABLED : DISABLED);
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_maxGroup_set(rtk_port_t port, rtk_uint32 group)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check port valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(group > RTL8367C_IGMP_MAX_GOUP)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if ((retVal = rtl8367c_setAsicIGMPPortMAXGroup(rtk_switch_port_L2P_get(port), group))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_maxGroup_get(rtk_port_t port, rtk_uint32 *pGroup)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check port valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(pGroup == NULL)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicIGMPPortMAXGroup(rtk_switch_port_L2P_get(port), pGroup))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_currentGroup_get(rtk_port_t port, rtk_uint32 *pGroup)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check port valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(pGroup == NULL)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicIGMPPortCurrentGroup(rtk_switch_port_L2P_get(port), pGroup))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_tableFullAction_set(rtk_igmp_tableFullAction_t action)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(action >= IGMP_TABLE_FULL_OP_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicIGMPTableFullOP((rtk_uint32)action))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_tableFullAction_get(rtk_igmp_tableFullAction_t *pAction)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pAction)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicIGMPTableFullOP((rtk_uint32 *)pAction))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_checksumErrorAction_set(rtk_igmp_checksumErrorAction_t action)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(action >= IGMP_CRC_ERR_OP_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicIGMPCRCErrOP((rtk_uint32)action))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_checksumErrorAction_get(rtk_igmp_checksumErrorAction_t *pAction)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pAction)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicIGMPCRCErrOP((rtk_uint32 *)pAction))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_leaveTimer_set(rtk_uint32 timer)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(timer > RTL8367C_MAX_LEAVE_TIMER)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicIGMPLeaveTimer(timer))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_leaveTimer_get(rtk_uint32 *pTimer)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pTimer)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicIGMPLeaveTimer(pTimer))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_queryInterval_set(rtk_uint32 interval)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(interval > RTL8367C_MAX_QUERY_INT)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicIGMPQueryInterval(interval))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_queryInterval_get(rtk_uint32 *pInterval)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pInterval)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicIGMPQueryInterval(pInterval))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_robustness_set(rtk_uint32 robustness)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(robustness > RTL8367C_MAX_ROB_VAR)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicIGMPRobVar(robustness))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_robustness_get(rtk_uint32 *pRobustness)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pRobustness)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicIGMPRobVar(pRobustness))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_dynamicRouterPortAllow_set(rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t   retVal;
+    rtk_uint32 pmask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pPortmask)
+        return RT_ERR_NULL_POINTER;
+
+    RTK_CHK_PORTMASK_VALID(pPortmask);
+
+    if ((retVal = rtk_switch_portmask_L2P_get(pPortmask, &pmask))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicIGMPAllowDynamicRouterPort(pmask))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_dynamicRouterPortAllow_get(rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t   retVal;
+    rtk_uint32 pmask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pPortmask)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicIGMPAllowDynamicRouterPort(&pmask))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtk_switch_portmask_P2L_get(pmask, pPortmask))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_dynamicRouterPort_get(rtk_igmp_dynamicRouterPort_t *pDynamicRouterPort)
+{
+    rtk_api_ret_t   retVal;
+    rtk_uint32 port;
+    rtk_uint32 timer;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pDynamicRouterPort)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicIGMPdynamicRouterPort1(&port, &timer))!= RT_ERR_OK)
+        return retVal;
+
+    if (port == RTL8367C_ROUTER_PORT_INVALID)
+    {
+        pDynamicRouterPort->dynamicRouterPort0Valid = DISABLED;
+        pDynamicRouterPort->dynamicRouterPort0      = 0;
+        pDynamicRouterPort->dynamicRouterPort0Timer = 0;
+    }
+    else
+    {
+        pDynamicRouterPort->dynamicRouterPort0Valid = ENABLED;
+        pDynamicRouterPort->dynamicRouterPort0      = rtk_switch_port_P2L_get(port);
+        pDynamicRouterPort->dynamicRouterPort0Timer = timer;
+    }
+
+    if ((retVal = rtl8367c_getAsicIGMPdynamicRouterPort2(&port, &timer))!= RT_ERR_OK)
+        return retVal;
+
+    if (port == RTL8367C_ROUTER_PORT_INVALID)
+    {
+        pDynamicRouterPort->dynamicRouterPort1Valid = DISABLED;
+        pDynamicRouterPort->dynamicRouterPort1      = 0;
+        pDynamicRouterPort->dynamicRouterPort1Timer = 0;
+    }
+    else
+    {
+        pDynamicRouterPort->dynamicRouterPort1Valid = ENABLED;
+        pDynamicRouterPort->dynamicRouterPort1      = rtk_switch_port_P2L_get(port);
+        pDynamicRouterPort->dynamicRouterPort1Timer = timer;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_suppressionEnable_set(rtk_enable_t reportSuppression, rtk_enable_t leaveSuppression)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(reportSuppression >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if(leaveSuppression >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicIGMPSuppression((rtk_uint32)reportSuppression, (rtk_uint32)leaveSuppression))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_suppressionEnable_get(rtk_enable_t *pReportSuppression, rtk_enable_t *pLeaveSuppression)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pReportSuppression)
+        return RT_ERR_NULL_POINTER;
+
+    if(NULL == pLeaveSuppression)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicIGMPSuppression((rtk_uint32 *)pReportSuppression, (rtk_uint32 *)pLeaveSuppression))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_portRxPktEnable_set(rtk_port_t port, rtk_igmp_rxPktEnable_t *pRxCfg)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check port valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(NULL == pRxCfg)
+        return RT_ERR_NULL_POINTER;
+
+    if(pRxCfg->rxQuery >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if(pRxCfg->rxReport >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if(pRxCfg->rxLeave >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if(pRxCfg->rxMRP >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if(pRxCfg->rxMcast >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicIGMPQueryRX(rtk_switch_port_L2P_get(port), (rtk_uint32)pRxCfg->rxQuery))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicIGMPReportRX(rtk_switch_port_L2P_get(port), (rtk_uint32)pRxCfg->rxReport))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicIGMPLeaveRX(rtk_switch_port_L2P_get(port), (rtk_uint32)pRxCfg->rxLeave))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicIGMPMRPRX(rtk_switch_port_L2P_get(port), (rtk_uint32)pRxCfg->rxMRP))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicIGMPMcDataRX(rtk_switch_port_L2P_get(port), (rtk_uint32)pRxCfg->rxMcast))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_portRxPktEnable_get(rtk_port_t port, rtk_igmp_rxPktEnable_t *pRxCfg)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check port valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(NULL == pRxCfg)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicIGMPQueryRX(rtk_switch_port_L2P_get(port), (rtk_uint32 *)&(pRxCfg->rxQuery)))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_getAsicIGMPReportRX(rtk_switch_port_L2P_get(port), (rtk_uint32 *)&(pRxCfg->rxReport)))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_getAsicIGMPLeaveRX(rtk_switch_port_L2P_get(port), (rtk_uint32 *)&(pRxCfg->rxLeave)))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_getAsicIGMPMRPRX(rtk_switch_port_L2P_get(port), (rtk_uint32 *)&(pRxCfg->rxMRP)))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_getAsicIGMPMcDataRX(rtk_switch_port_L2P_get(port), (rtk_uint32 *)&(pRxCfg->rxMcast)))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_groupInfo_get(rtk_uint32 index, rtk_igmp_groupInfo_t *pGroup)
+{
+    rtk_api_ret_t   retVal;
+    rtk_uint32      valid;
+    rtl8367c_igmpgroup  grp;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check index */
+    if(index > RTL8367C_IGMP_MAX_GOUP)
+        return RT_ERR_INPUT;
+
+    if(NULL == pGroup)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicIGMPGroup(index, &valid, &grp))!=RT_ERR_OK)
+        return retVal;
+
+    memset(pGroup, 0x00, sizeof(rtk_igmp_groupInfo_t));
+    pGroup->valid = valid;
+    pGroup->reportSuppFlag = grp.report_supp_flag;
+
+    if(grp.p0_timer != 0)
+    {
+        RTK_PORTMASK_PORT_SET((pGroup->member), rtk_switch_port_P2L_get(0));
+        pGroup->timer[rtk_switch_port_P2L_get(0)] = grp.p0_timer;
+    }
+
+    if(grp.p1_timer != 0)
+    {
+        RTK_PORTMASK_PORT_SET((pGroup->member), rtk_switch_port_P2L_get(1));
+        pGroup->timer[rtk_switch_port_P2L_get(1)] = grp.p1_timer;
+    }
+
+    if(grp.p2_timer != 0)
+    {
+        RTK_PORTMASK_PORT_SET((pGroup->member), rtk_switch_port_P2L_get(2));
+        pGroup->timer[rtk_switch_port_P2L_get(2)] = grp.p2_timer;
+    }
+
+    if(grp.p3_timer != 0)
+    {
+        RTK_PORTMASK_PORT_SET((pGroup->member), rtk_switch_port_P2L_get(3));
+        pGroup->timer[rtk_switch_port_P2L_get(3)] = grp.p3_timer;
+    }
+
+    if(grp.p4_timer != 0)
+    {
+        RTK_PORTMASK_PORT_SET((pGroup->member), rtk_switch_port_P2L_get(4));
+        pGroup->timer[rtk_switch_port_P2L_get(4)] = grp.p4_timer;
+    }
+
+    if(grp.p5_timer != 0)
+    {
+        RTK_PORTMASK_PORT_SET((pGroup->member), rtk_switch_port_P2L_get(5));
+        pGroup->timer[rtk_switch_port_P2L_get(5)] = grp.p5_timer;
+    }
+
+    if(grp.p6_timer != 0)
+    {
+        RTK_PORTMASK_PORT_SET((pGroup->member), rtk_switch_port_P2L_get(6));
+        pGroup->timer[rtk_switch_port_P2L_get(6)] = grp.p6_timer;
+    }
+
+    if(grp.p7_timer != 0)
+    {
+        RTK_PORTMASK_PORT_SET((pGroup->member), rtk_switch_port_P2L_get(7));
+        pGroup->timer[rtk_switch_port_P2L_get(7)] = grp.p7_timer;
+    }
+
+    if(grp.p8_timer != 0)
+    {
+        RTK_PORTMASK_PORT_SET((pGroup->member), rtk_switch_port_P2L_get(8));
+        pGroup->timer[rtk_switch_port_P2L_get(8)] = grp.p8_timer;
+    }
+
+    if(grp.p9_timer != 0)
+    {
+        RTK_PORTMASK_PORT_SET((pGroup->member), rtk_switch_port_P2L_get(9));
+        pGroup->timer[rtk_switch_port_P2L_get(9)] = grp.p9_timer;
+    }
+
+    if(grp.p10_timer != 0)
+    {
+        RTK_PORTMASK_PORT_SET((pGroup->member), rtk_switch_port_P2L_get(10));
+        pGroup->timer[rtk_switch_port_P2L_get(10)] = grp.p10_timer;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_ReportLeaveFwdAction_set(rtk_igmp_ReportLeaveFwdAct_t action)
+{
+    rtk_api_ret_t   retVal;
+    rtk_uint32      regData;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    switch(action)
+    {
+        case IGMP_REPORT_LEAVE_TO_ROUTER:
+            regData = 1;
+            break;
+        case IGMP_REPORT_LEAVE_TO_ALLPORT:
+            regData = 2;
+            break;
+        case IGMP_REPORT_LEAVE_TO_ROUTER_PORT_ADV:
+            regData = 3;
+            break;
+        default:
+            return RT_ERR_INPUT;
+    }
+
+    if ((retVal = rtl8367c_setAsicIGMPReportLeaveFlood(regData))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_ReportLeaveFwdAction_get(rtk_igmp_ReportLeaveFwdAct_t *pAction)
+{
+    rtk_api_ret_t   retVal;
+    rtk_uint32      regData;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pAction)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicIGMPReportLeaveFlood(&regData))!=RT_ERR_OK)
+        return retVal;
+
+    switch(regData)
+    {
+        case 1:
+            *pAction = IGMP_REPORT_LEAVE_TO_ROUTER;
+            break;
+        case 2:
+            *pAction = IGMP_REPORT_LEAVE_TO_ALLPORT;
+            break;
+        case 3:
+            *pAction = IGMP_REPORT_LEAVE_TO_ROUTER_PORT_ADV;
+            break;
+        default:
+            return RT_ERR_FAILED;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_dropLeaveZeroEnable_set(rtk_enable_t enabled)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(enabled >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicIGMPDropLeaveZero(enabled))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+
+}
+
+static rtk_api_ret_t _rtk_igmp_dropLeaveZeroEnable_get(rtk_enable_t *pEnabled)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pEnabled)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicIGMPDropLeaveZero((rtk_uint32 *)pEnabled))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+
+}
+
+static rtk_api_ret_t _rtk_igmp_bypassGroupRange_set(rtk_igmp_bypassGroup_t group, rtk_enable_t enabled)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(group >= IGMP_BYPASS_GROUP_END)
+        return RT_ERR_INPUT;
+
+    if(enabled >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicIGMPBypassGroup(group, enabled))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_igmp_bypassGroupRange_get(rtk_igmp_bypassGroup_t group, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(group >= IGMP_BYPASS_GROUP_END)
+        return RT_ERR_INPUT;
+
+    if(NULL == pEnable)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicIGMPBypassGroup(group, pEnable))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+
+/* Function Name:
+ *      rtk_igmp_init
+ * Description:
+ *      This API enables H/W IGMP and set a default initial configuration.
+ * Input:
+ *      None.
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ *      This API enables H/W IGMP and set a default initial configuration.
+ */
+rtk_api_ret_t rtk_igmp_init(void)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_init();
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_state_set
+ * Description:
+ *      This API set H/W IGMP state.
+ * Input:
+ *      enabled     - H/W IGMP state
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error parameter
+ * Note:
+ *      This API set H/W IGMP state.
+ */
+rtk_api_ret_t rtk_igmp_state_set(rtk_enable_t enabled)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_state_set(enabled);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_state_get
+ * Description:
+ *      This API get H/W IGMP state.
+ * Input:
+ *      None.
+ * Output:
+ *      pEnabled        - H/W IGMP state
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error parameter
+ * Note:
+ *      This API set current H/W IGMP state.
+ */
+rtk_api_ret_t rtk_igmp_state_get(rtk_enable_t *pEnabled)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_state_get(pEnabled);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_static_router_port_set
+ * Description:
+ *      Configure static router port
+ * Input:
+ *      pPortmask    - Static Port mask
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_PORT_MASK       - Error parameter
+ * Note:
+ *      This API set static router port
+ */
+rtk_api_ret_t rtk_igmp_static_router_port_set(rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_static_router_port_set(pPortmask);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_static_router_port_get
+ * Description:
+ *      Get static router port
+ * Input:
+ *      None.
+ * Output:
+ *      pPortmask       - Static port mask
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_PORT_MASK       - Error parameter
+ * Note:
+ *      This API get static router port
+ */
+rtk_api_ret_t rtk_igmp_static_router_port_get(rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_static_router_port_get(pPortmask);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_protocol_set
+ * Description:
+ *      set IGMP/MLD protocol action
+ * Input:
+ *      port        - Port ID
+ *      protocol    - IGMP/MLD protocol
+ *      action      - Per-port and per-protocol IGMP action seeting
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_PORT_MASK       - Error parameter
+ * Note:
+ *      This API set IGMP/MLD protocol action
+ */
+rtk_api_ret_t rtk_igmp_protocol_set(rtk_port_t port, rtk_igmp_protocol_t protocol, rtk_igmp_action_t action)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_protocol_set(port, protocol, action);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_protocol_get
+ * Description:
+ *      set IGMP/MLD protocol action
+ * Input:
+ *      port        - Port ID
+ *      protocol    - IGMP/MLD protocol
+ *      action      - Per-port and per-protocol IGMP action seeting
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_PORT_MASK       - Error parameter
+ * Note:
+ *      This API set IGMP/MLD protocol action
+ */
+rtk_api_ret_t rtk_igmp_protocol_get(rtk_port_t port, rtk_igmp_protocol_t protocol, rtk_igmp_action_t *pAction)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_protocol_get(port, protocol, pAction);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_fastLeave_set
+ * Description:
+ *      set IGMP/MLD FastLeave state
+ * Input:
+ *      state       - ENABLED: Enable FastLeave, DISABLED: disable FastLeave
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_INPUT           - Error Input
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ *      This API set IGMP/MLD FastLeave state
+ */
+rtk_api_ret_t rtk_igmp_fastLeave_set(rtk_enable_t state)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_fastLeave_set(state);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_fastLeave_get
+ * Description:
+ *      get IGMP/MLD FastLeave state
+ * Input:
+ *      None
+ * Output:
+ *      pState      - ENABLED: Enable FastLeave, DISABLED: disable FastLeave
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_NULL_POINTER    - NULL pointer
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ *      This API get IGMP/MLD FastLeave state
+ */
+rtk_api_ret_t rtk_igmp_fastLeave_get(rtk_enable_t *pState)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_fastLeave_get(pState);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_maxGroup_set
+ * Description:
+ *      Set per port multicast group learning limit.
+ * Input:
+ *      port        - Port ID
+ *      group       - The number of multicast group learning limit.
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_PORT_ID         - Error Port ID
+ *      RT_ERR_OUT_OF_RANGE    - parameter out of range
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ *      This API set per port multicast group learning limit.
+ */
+rtk_api_ret_t rtk_igmp_maxGroup_set(rtk_port_t port, rtk_uint32 group)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_maxGroup_set(port, group);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_maxGroup_get
+ * Description:
+ *      Get per port multicast group learning limit.
+ * Input:
+ *      port        - Port ID
+ * Output:
+ *      pGroup      - The number of multicast group learning limit.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_PORT_ID         - Error Port ID
+ *      RT_ERR_NULL_POINTER    - Null pointer
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ *      This API get per port multicast group learning limit.
+ */
+rtk_api_ret_t rtk_igmp_maxGroup_get(rtk_port_t port, rtk_uint32 *pGroup)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_maxGroup_get(port, pGroup);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_currentGroup_get
+ * Description:
+ *      Get per port multicast group learning count.
+ * Input:
+ *      port        - Port ID
+ * Output:
+ *      pGroup      - The number of multicast group learning count.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_PORT_ID         - Error Port ID
+ *      RT_ERR_NULL_POINTER    - Null pointer
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ *      This API get per port multicast group learning count.
+ */
+rtk_api_ret_t rtk_igmp_currentGroup_get(rtk_port_t port, rtk_uint32 *pGroup)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_currentGroup_get(port, pGroup);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_tableFullAction_set
+ * Description:
+ *      set IGMP/MLD Table Full Action
+ * Input:
+ *      action      - Table Full Action
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_INPUT           - Error Input
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ */
+rtk_api_ret_t rtk_igmp_tableFullAction_set(rtk_igmp_tableFullAction_t action)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_tableFullAction_set(action);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_tableFullAction_get
+ * Description:
+ *      get IGMP/MLD Table Full Action
+ * Input:
+ *      None
+ * Output:
+ *      pAction     - Table Full Action
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_NULL_POINTER    - Null pointer
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ */
+rtk_api_ret_t rtk_igmp_tableFullAction_get(rtk_igmp_tableFullAction_t *pAction)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_tableFullAction_get(pAction);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_checksumErrorAction_set
+ * Description:
+ *      set IGMP/MLD Checksum Error Action
+ * Input:
+ *      action      - Checksum error Action
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_INPUT           - Error Input
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ */
+rtk_api_ret_t rtk_igmp_checksumErrorAction_set(rtk_igmp_checksumErrorAction_t action)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_checksumErrorAction_set(action);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+
+/* Function Name:
+ *      rtk_igmp_checksumErrorAction_get
+ * Description:
+ *      get IGMP/MLD Checksum Error Action
+ * Input:
+ *      None
+ * Output:
+ *      pAction     - Checksum error Action
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_NULL_POINTER    - Null pointer
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ */
+rtk_api_ret_t rtk_igmp_checksumErrorAction_get(rtk_igmp_checksumErrorAction_t *pAction)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_checksumErrorAction_get(pAction);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_leaveTimer_set
+ * Description:
+ *      set IGMP/MLD Leave timer
+ * Input:
+ *      timer       - Leave timer
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_INPUT           - Error Input
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ */
+rtk_api_ret_t rtk_igmp_leaveTimer_set(rtk_uint32 timer)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_leaveTimer_set(timer);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_leaveTimer_get
+ * Description:
+ *      get IGMP/MLD Leave timer
+ * Input:
+ *      None
+ * Output:
+ *      pTimer      - Leave Timer.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_NULL_POINTER    - Null pointer
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ */
+rtk_api_ret_t rtk_igmp_leaveTimer_get(rtk_uint32 *pTimer)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_leaveTimer_get(pTimer);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_queryInterval_set
+ * Description:
+ *      set IGMP/MLD Query Interval
+ * Input:
+ *      interval     - Query Interval
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_INPUT           - Error Input
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ */
+rtk_api_ret_t rtk_igmp_queryInterval_set(rtk_uint32 interval)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_queryInterval_set(interval);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_queryInterval_get
+ * Description:
+ *      get IGMP/MLD Query Interval
+ * Input:
+ *      None.
+ * Output:
+ *      pInterval   - Query Interval
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_NULL_POINTER    - Null pointer
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ */
+rtk_api_ret_t rtk_igmp_queryInterval_get(rtk_uint32 *pInterval)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_queryInterval_get(pInterval);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_robustness_set
+ * Description:
+ *      set IGMP/MLD Robustness value
+ * Input:
+ *      robustness     - Robustness value
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_INPUT           - Error Input
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ */
+rtk_api_ret_t rtk_igmp_robustness_set(rtk_uint32 robustness)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_robustness_set(robustness);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_robustness_get
+ * Description:
+ *      get IGMP/MLD Robustness value
+ * Input:
+ *      None
+ * Output:
+ *      pRobustness     - Robustness value.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_NULL_POINTER    - Null pointer
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ */
+rtk_api_ret_t rtk_igmp_robustness_get(rtk_uint32 *pRobustness)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_robustness_get(pRobustness);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_dynamicRouterRortAllow_set
+ * Description:
+ *      Configure dynamic router port allow option
+ * Input:
+ *      pPortmask    - Dynamic Port allow mask
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_PORT_MASK       - Error parameter
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_igmp_dynamicRouterPortAllow_set(rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_dynamicRouterPortAllow_set(pPortmask);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_dynamicRouterRortAllow_get
+ * Description:
+ *      Get dynamic router port allow option
+ * Input:
+ *      None.
+ * Output:
+ *      pPortmask    - Dynamic Port allow mask
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_NULL_POINTER    - Null pointer
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_PORT_MASK       - Error parameter
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_igmp_dynamicRouterPortAllow_get(rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_dynamicRouterPortAllow_get(pPortmask);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_dynamicRouterPort_get
+ * Description:
+ *      Get dynamic router port
+ * Input:
+ *      None.
+ * Output:
+ *      pDynamicRouterPort    - Dynamic Router Port
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_NULL_POINTER    - Null pointer
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_PORT_MASK       - Error parameter
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_igmp_dynamicRouterPort_get(rtk_igmp_dynamicRouterPort_t *pDynamicRouterPort)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_dynamicRouterPort_get(pDynamicRouterPort);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_suppressionEnable_set
+ * Description:
+ *      Configure IGMPv1/v2 & MLDv1 Report/Leave/Done suppression
+ * Input:
+ *      reportSuppression   - Report suppression
+ *      leaveSuppression    - Leave suppression
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error Input
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_igmp_suppressionEnable_set(rtk_enable_t reportSuppression, rtk_enable_t leaveSuppression)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_suppressionEnable_set(reportSuppression, leaveSuppression);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_suppressionEnable_get
+ * Description:
+ *      Get IGMPv1/v2 & MLDv1 Report/Leave/Done suppression
+ * Input:
+ *      None
+ * Output:
+ *      pReportSuppression  - Report suppression
+ *      pLeaveSuppression   - Leave suppression
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_NULL_POINTER    - Null pointer
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_igmp_suppressionEnable_get(rtk_enable_t *pReportSuppression, rtk_enable_t *pLeaveSuppression)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_suppressionEnable_get(pReportSuppression, pLeaveSuppression);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_portRxPktEnable_set
+ * Description:
+ *      Configure IGMP/MLD RX Packet configuration
+ * Input:
+ *      port       - Port ID
+ *      pRxCfg     - RX Packet Configuration
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error Input
+ *      RT_ERR_NULL_POINTER    - Null pointer
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_igmp_portRxPktEnable_set(rtk_port_t port, rtk_igmp_rxPktEnable_t *pRxCfg)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_portRxPktEnable_set(port, pRxCfg);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_portRxPktEnable_get
+ * Description:
+ *      Get IGMP/MLD RX Packet configuration
+ * Input:
+ *      port       - Port ID
+ *      pRxCfg     - RX Packet Configuration
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error Input
+ *      RT_ERR_NULL_POINTER    - Null pointer
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_igmp_portRxPktEnable_get(rtk_port_t port, rtk_igmp_rxPktEnable_t *pRxCfg)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_portRxPktEnable_get(port, pRxCfg);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_groupInfo_get
+ * Description:
+ *      Get IGMP/MLD Group database
+ * Input:
+ *      indes       - Index (0~255)
+ * Output:
+ *      pGroup      - Group database information.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error Input
+ *      RT_ERR_NULL_POINTER    - Null pointer
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_igmp_groupInfo_get(rtk_uint32 index, rtk_igmp_groupInfo_t *pGroup)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_groupInfo_get(index, pGroup);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_ReportLeaveFwdAction_set
+ * Description:
+ *      Set Report Leave packet forwarding action
+ * Input:
+ *      action      - Action
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error Input
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_igmp_ReportLeaveFwdAction_set(rtk_igmp_ReportLeaveFwdAct_t action)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_ReportLeaveFwdAction_set(action);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_ReportLeaveFwdAction_get
+ * Description:
+ *      Get Report Leave packet forwarding action
+ * Input:
+ *      action      - Action
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error Input
+ *      RT_ERR_NULL_POINTER    - Null Pointer
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_igmp_ReportLeaveFwdAction_get(rtk_igmp_ReportLeaveFwdAct_t *pAction)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_ReportLeaveFwdAction_get(pAction);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_dropLeaveZeroEnable_set
+ * Description:
+ *      Set the function of droppping Leave packet with group IP = 0.0.0.0
+ * Input:
+ *      enabled      - Action 1: drop, 0:pass
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error Input
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_igmp_dropLeaveZeroEnable_set(rtk_enable_t enabled)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_dropLeaveZeroEnable_set(enabled);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_dropLeaveZeroEnable_get
+ * Description:
+ *      Get the function of droppping Leave packet with group IP = 0.0.0.0
+ * Input:
+ *      None
+ * Output:
+ *      pEnabled.   - Action 1: drop, 0:pass
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error Input
+ *      RT_ERR_NULL_POINTER    - Null Pointer
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_igmp_dropLeaveZeroEnable_get(rtk_enable_t *pEnabled)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_dropLeaveZeroEnable_get(pEnabled);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_bypassGroupRange_set
+ * Description:
+ *      Set Bypass group
+ * Input:
+ *      group       - bypassed group
+ *      enabled     - enabled 1: Bypassed, 0: not bypass
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error Input
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_igmp_bypassGroupRange_set(rtk_igmp_bypassGroup_t group, rtk_enable_t enabled)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_bypassGroupRange_set(group, enabled);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_igmp_bypassGroupRange_get
+ * Description:
+ *      get Bypass group
+ * Input:
+ *      group       - bypassed group
+ * Output:
+ *      pEnable     - enabled 1: Bypassed, 0: not bypass
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error Input
+ *      RT_ERR_NULL_POINTER    - Null Pointer
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_igmp_bypassGroupRange_get(rtk_igmp_bypassGroup_t group, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_igmp_bypassGroupRange_get(group, pEnable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/igmp.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/igmp.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/igmp.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/igmp.h	2019-01-22 16:16:24.707257309 +0100
@@ -0,0 +1,771 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * Purpose : RTL8367/RTL8367C switch high-level API
+ *
+ * Feature : The file includes IGMP module high-layer API defination
+ *
+ */
+
+#ifndef __RTK_API_IGMP_H__
+#define __RTK_API_IGMP_H__
+
+/*
+ * Data Type Declaration
+ */
+typedef enum rtk_igmp_type_e
+{
+    IGMP_IPV4 = 0,
+    IGMP_PPPOE_IPV4,
+    IGMP_MLD,
+    IGMP_PPPOE_MLD,
+    IGMP_TYPE_END
+} rtk_igmp_type_t;
+
+typedef enum rtk_trap_igmp_action_e
+{
+    IGMP_ACTION_FORWARD = 0,
+    IGMP_ACTION_TRAP2CPU,
+    IGMP_ACTION_DROP,
+    IGMP_ACTION_ASIC,
+    IGMP_ACTION_END
+} rtk_igmp_action_t;
+
+typedef enum rtk_igmp_protocol_e
+{
+    PROTOCOL_IGMPv1 = 0,
+    PROTOCOL_IGMPv2,
+    PROTOCOL_IGMPv3,
+    PROTOCOL_MLDv1,
+    PROTOCOL_MLDv2,
+    PROTOCOL_END
+} rtk_igmp_protocol_t;
+
+typedef enum rtk_igmp_tableFullAction_e
+{
+    IGMP_TABLE_FULL_FORWARD = 0,
+    IGMP_TABLE_FULL_DROP,
+    IGMP_TABLE_FULL_TRAP,
+    IGMP_TABLE_FULL_OP_END
+}rtk_igmp_tableFullAction_t;
+
+typedef enum rtk_igmp_checksumErrorAction_e
+{
+    IGMP_CRC_ERR_DROP = 0,
+    IGMP_CRC_ERR_TRAP,
+    IGMP_CRC_ERR_FORWARD,
+    IGMP_CRC_ERR_OP_END
+}rtk_igmp_checksumErrorAction_t;
+
+typedef enum rtk_igmp_bypassGroup_e
+{
+    IGMP_BYPASS_224_0_0_X = 0,
+    IGMP_BYPASS_224_0_1_X,
+    IGMP_BYPASS_239_255_255_X,
+    IGMP_BYPASS_IPV6_00XX,
+    IGMP_BYPASS_GROUP_END
+}rtk_igmp_bypassGroup_t;
+
+
+typedef struct rtk_igmp_dynamicRouterPort_s
+{
+    rtk_enable_t    dynamicRouterPort0Valid;
+    rtk_port_t      dynamicRouterPort0;
+    rtk_uint32      dynamicRouterPort0Timer;
+    rtk_enable_t    dynamicRouterPort1Valid;
+    rtk_port_t      dynamicRouterPort1;
+    rtk_uint32      dynamicRouterPort1Timer;
+
+}rtk_igmp_dynamicRouterPort_t;
+
+typedef struct rtk_igmp_rxPktEnable_s
+{
+    rtk_enable_t rxQuery;
+    rtk_enable_t rxReport;
+    rtk_enable_t rxLeave;
+    rtk_enable_t rxMRP;
+    rtk_enable_t rxMcast;
+}rtk_igmp_rxPktEnable_t;
+
+typedef struct rtk_igmp_groupInfo_s
+{
+    rtk_enable_t    valid;
+    rtk_portmask_t  member;
+    rtk_uint32      timer[RTK_PORT_MAX];
+    rtk_uint32      reportSuppFlag;
+}rtk_igmp_groupInfo_t;
+
+typedef enum rtk_igmp_ReportLeaveFwdAct_e
+{
+    IGMP_REPORT_LEAVE_TO_ROUTER = 0,
+    IGMP_REPORT_LEAVE_TO_ALLPORT,
+    IGMP_REPORT_LEAVE_TO_ROUTER_PORT_ADV,
+    IGMP_REPORT_LEAVE_ACT_END
+}rtk_igmp_ReportLeaveFwdAct_t;
+
+/* Function Name:
+ *      rtk_igmp_init
+ * Description:
+ *      This API enables H/W IGMP and set a default initial configuration.
+ * Input:
+ *      None.
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ *      This API enables H/W IGMP and set a default initial configuration.
+ */
+extern rtk_api_ret_t rtk_igmp_init(void);
+
+/* Function Name:
+ *      rtk_igmp_state_set
+ * Description:
+ *      This API set H/W IGMP state.
+ * Input:
+ *      enabled     - H/W IGMP state
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error parameter
+ * Note:
+ *      This API set H/W IGMP state.
+ */
+extern rtk_api_ret_t rtk_igmp_state_set(rtk_enable_t enabled);
+
+/* Function Name:
+ *      rtk_igmp_state_get
+ * Description:
+ *      This API get H/W IGMP state.
+ * Input:
+ *      None.
+ * Output:
+ *      pEnabled        - H/W IGMP state
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error parameter
+ * Note:
+ *      This API set current H/W IGMP state.
+ */
+extern rtk_api_ret_t rtk_igmp_state_get(rtk_enable_t *pEnabled);
+
+/* Function Name:
+ *      rtk_igmp_static_router_port_set
+ * Description:
+ *      Configure static router port
+ * Input:
+ *      pPortmask    - Static Port mask
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_PORT_MASK       - Error parameter
+ * Note:
+ *      This API set static router port
+ */
+extern rtk_api_ret_t rtk_igmp_static_router_port_set(rtk_portmask_t *pPortmask);
+
+/* Function Name:
+ *      rtk_igmp_static_router_port_get
+ * Description:
+ *      Get static router port
+ * Input:
+ *      None.
+ * Output:
+ *      pPortmask       - Static port mask
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_PORT_MASK       - Error parameter
+ * Note:
+ *      This API get static router port
+ */
+extern rtk_api_ret_t rtk_igmp_static_router_port_get(rtk_portmask_t *pPortmask);
+
+/* Function Name:
+ *      rtk_igmp_protocol_set
+ * Description:
+ *      set IGMP/MLD protocol action
+ * Input:
+ *      port        - Port ID
+ *      protocol    - IGMP/MLD protocol
+ *      action      - Per-port and per-protocol IGMP action seeting
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_PORT_MASK       - Error parameter
+ * Note:
+ *      This API set IGMP/MLD protocol action
+ */
+extern rtk_api_ret_t rtk_igmp_protocol_set(rtk_port_t port, rtk_igmp_protocol_t protocol, rtk_igmp_action_t action);
+
+/* Function Name:
+ *      rtk_igmp_protocol_get
+ * Description:
+ *      set IGMP/MLD protocol action
+ * Input:
+ *      port        - Port ID
+ *      protocol    - IGMP/MLD protocol
+ *      action      - Per-port and per-protocol IGMP action seeting
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_PORT_MASK       - Error parameter
+ * Note:
+ *      This API set IGMP/MLD protocol action
+ */
+extern rtk_api_ret_t rtk_igmp_protocol_get(rtk_port_t port, rtk_igmp_protocol_t protocol, rtk_igmp_action_t *pAction);
+
+/* Function Name:
+ *      rtk_igmp_fastLeave_set
+ * Description:
+ *      set IGMP/MLD FastLeave state
+ * Input:
+ *      state       - ENABLED: Enable FastLeave, DISABLED: disable FastLeave
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_INPUT           - Error Input
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ *      This API set IGMP/MLD FastLeave state
+ */
+extern rtk_api_ret_t rtk_igmp_fastLeave_set(rtk_enable_t state);
+
+/* Function Name:
+ *      rtk_igmp_fastLeave_get
+ * Description:
+ *      get IGMP/MLD FastLeave state
+ * Input:
+ *      None
+ * Output:
+ *      pState      - ENABLED: Enable FastLeave, DISABLED: disable FastLeave
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_NULL_POINTER    - NULL pointer
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ *      This API get IGMP/MLD FastLeave state
+ */
+extern rtk_api_ret_t rtk_igmp_fastLeave_get(rtk_enable_t *pState);
+
+/* Function Name:
+ *      rtk_igmp_maxGroup_set
+ * Description:
+ *      Set per port multicast group learning limit.
+ * Input:
+ *      port        - Port ID
+ *      group       - The number of multicast group learning limit.
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_PORT_ID         - Error Port ID
+ *      RT_ERR_OUT_OF_RANGE    - parameter out of range
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ *      This API set per port multicast group learning limit.
+ */
+extern rtk_api_ret_t rtk_igmp_maxGroup_set(rtk_port_t port, rtk_uint32 group);
+
+/* Function Name:
+ *      rtk_igmp_maxGroup_get
+ * Description:
+ *      Get per port multicast group learning limit.
+ * Input:
+ *      port        - Port ID
+ * Output:
+ *      pGroup      - The number of multicast group learning limit.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_PORT_ID         - Error Port ID
+ *      RT_ERR_NULL_POINTER    - Null pointer
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ *      This API get per port multicast group learning limit.
+ */
+extern rtk_api_ret_t rtk_igmp_maxGroup_get(rtk_port_t port, rtk_uint32 *pGroup);
+
+/* Function Name:
+ *      rtk_igmp_currentGroup_get
+ * Description:
+ *      Get per port multicast group learning count.
+ * Input:
+ *      port        - Port ID
+ * Output:
+ *      pGroup      - The number of multicast group learning count.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_PORT_ID         - Error Port ID
+ *      RT_ERR_NULL_POINTER    - Null pointer
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ *      This API get per port multicast group learning count.
+ */
+extern rtk_api_ret_t rtk_igmp_currentGroup_get(rtk_port_t port, rtk_uint32 *pGroup);
+
+/* Function Name:
+ *      rtk_igmp_tableFullAction_set
+ * Description:
+ *      set IGMP/MLD Table Full Action
+ * Input:
+ *      action      - Table Full Action
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_INPUT           - Error Input
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ */
+extern rtk_api_ret_t rtk_igmp_tableFullAction_set(rtk_igmp_tableFullAction_t action);
+
+/* Function Name:
+ *      rtk_igmp_tableFullAction_get
+ * Description:
+ *      get IGMP/MLD Table Full Action
+ * Input:
+ *      None
+ * Output:
+ *      pAction     - Table Full Action
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_NULL_POINTER    - Null pointer
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ */
+extern rtk_api_ret_t rtk_igmp_tableFullAction_get(rtk_igmp_tableFullAction_t *pAction);
+
+/* Function Name:
+ *      rtk_igmp_checksumErrorAction_set
+ * Description:
+ *      set IGMP/MLD Checksum Error Action
+ * Input:
+ *      action      - Checksum error Action
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_INPUT           - Error Input
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ */
+extern rtk_api_ret_t rtk_igmp_checksumErrorAction_set(rtk_igmp_checksumErrorAction_t action);
+
+/* Function Name:
+ *      rtk_igmp_checksumErrorAction_get
+ * Description:
+ *      get IGMP/MLD Checksum Error Action
+ * Input:
+ *      None
+ * Output:
+ *      pAction     - Checksum error Action
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_NULL_POINTER    - Null pointer
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ */
+extern rtk_api_ret_t rtk_igmp_checksumErrorAction_get(rtk_igmp_checksumErrorAction_t *pAction);
+
+/* Function Name:
+ *      rtk_igmp_leaveTimer_set
+ * Description:
+ *      set IGMP/MLD Leave timer
+ * Input:
+ *      timer       - Leave timer
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_INPUT           - Error Input
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ */
+extern rtk_api_ret_t rtk_igmp_leaveTimer_set(rtk_uint32 timer);
+
+/* Function Name:
+ *      rtk_igmp_leaveTimer_get
+ * Description:
+ *      get IGMP/MLD Leave timer
+ * Input:
+ *      None
+ * Output:
+ *      pTimer      - Leave Timer.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_NULL_POINTER    - Null pointer
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ */
+extern rtk_api_ret_t rtk_igmp_leaveTimer_get(rtk_uint32 *pTimer);
+
+/* Function Name:
+ *      rtk_igmp_queryInterval_set
+ * Description:
+ *      set IGMP/MLD Query Interval
+ * Input:
+ *      interval     - Query Interval
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_INPUT           - Error Input
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ */
+extern rtk_api_ret_t rtk_igmp_queryInterval_set(rtk_uint32 interval);
+
+/* Function Name:
+ *      rtk_igmp_queryInterval_get
+ * Description:
+ *      get IGMP/MLD Query Interval
+ * Input:
+ *      None.
+ * Output:
+ *      pInterval   - Query Interval
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_NULL_POINTER    - Null pointer
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ */
+extern rtk_api_ret_t rtk_igmp_queryInterval_get(rtk_uint32 *pInterval);
+
+/* Function Name:
+ *      rtk_igmp_robustness_set
+ * Description:
+ *      set IGMP/MLD Robustness value
+ * Input:
+ *      robustness     - Robustness value
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_INPUT           - Error Input
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ */
+extern rtk_api_ret_t rtk_igmp_robustness_set(rtk_uint32 robustness);
+
+/* Function Name:
+ *      rtk_igmp_robustness_get
+ * Description:
+ *      get IGMP/MLD Robustness value
+ * Input:
+ *      None
+ * Output:
+ *      pRobustness     - Robustness value.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_NULL_POINTER    - Null pointer
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ */
+extern rtk_api_ret_t rtk_igmp_robustness_get(rtk_uint32 *pRobustness);
+
+/* Function Name:
+ *      rtk_igmp_dynamicRouterRortAllow_set
+ * Description:
+ *      Configure dynamic router port allow option
+ * Input:
+ *      pPortmask    - Dynamic Port allow mask
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_PORT_MASK       - Error parameter
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_igmp_dynamicRouterPortAllow_set(rtk_portmask_t *pPortmask);
+
+/* Function Name:
+ *      rtk_igmp_dynamicRouterRortAllow_get
+ * Description:
+ *      Get dynamic router port allow option
+ * Input:
+ *      None.
+ * Output:
+ *      pPortmask    - Dynamic Port allow mask
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_PORT_MASK       - Error parameter
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_igmp_dynamicRouterPortAllow_get(rtk_portmask_t *pPortmask);
+
+/* Function Name:
+ *      rtk_igmp_dynamicRouterPort_get
+ * Description:
+ *      Get dynamic router port
+ * Input:
+ *      None.
+ * Output:
+ *      pDynamicRouterPort    - Dynamic Router Port
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_NULL_POINTER    - Null pointer
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_PORT_MASK       - Error parameter
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_igmp_dynamicRouterPort_get(rtk_igmp_dynamicRouterPort_t *pDynamicRouterPort);
+
+/* Function Name:
+ *      rtk_igmp_suppressionEnable_set
+ * Description:
+ *      Configure IGMPv1/v2 & MLDv1 Report/Leave/Done suppression
+ * Input:
+ *      reportSuppression   - Report suppression
+ *      leaveSuppression    - Leave suppression
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error Input
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_igmp_suppressionEnable_set(rtk_enable_t reportSuppression, rtk_enable_t leaveSuppression);
+
+/* Function Name:
+ *      rtk_igmp_suppressionEnable_get
+ * Description:
+ *      Get IGMPv1/v2 & MLDv1 Report/Leave/Done suppression
+ * Input:
+ *      None
+ * Output:
+ *      pReportSuppression  - Report suppression
+ *      pLeaveSuppression   - Leave suppression
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_NULL_POINTER    - Null pointer
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_igmp_suppressionEnable_get(rtk_enable_t *pReportSuppression, rtk_enable_t *pLeaveSuppression);
+
+/* Function Name:
+ *      rtk_igmp_portRxPktEnable_set
+ * Description:
+ *      Configure IGMP/MLD RX Packet configuration
+ * Input:
+ *      port       - Port ID
+ *      pRxCfg     - RX Packet Configuration
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error Input
+ *      RT_ERR_NULL_POINTER    - Null pointer
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_igmp_portRxPktEnable_set(rtk_port_t port, rtk_igmp_rxPktEnable_t *pRxCfg);
+
+/* Function Name:
+ *      rtk_igmp_portRxPktEnable_get
+ * Description:
+ *      Get IGMP/MLD RX Packet configuration
+ * Input:
+ *      port       - Port ID
+ *      pRxCfg     - RX Packet Configuration
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error Input
+ *      RT_ERR_NULL_POINTER    - Null pointer
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_igmp_portRxPktEnable_get(rtk_port_t port, rtk_igmp_rxPktEnable_t *pRxCfg);
+
+/* Function Name:
+ *      rtk_igmp_groupInfo_get
+ * Description:
+ *      Get IGMP/MLD Group database
+ * Input:
+ *      indes       - Index (0~255)
+ * Output:
+ *      pGroup      - Group database information.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error Input
+ *      RT_ERR_NULL_POINTER    - Null pointer
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_igmp_groupInfo_get(rtk_uint32 index, rtk_igmp_groupInfo_t *pGroup);
+
+/* Function Name:
+ *      rtk_igmp_ReportLeaveFwdAction_set
+ * Description:
+ *      Set Report Leave packet forwarding action
+ * Input:
+ *      action      - Action
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error Input
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_igmp_ReportLeaveFwdAction_set(rtk_igmp_ReportLeaveFwdAct_t action);
+
+/* Function Name:
+ *      rtk_igmp_ReportLeaveFwdAction_get
+ * Description:
+ *      Get Report Leave packet forwarding action
+ * Input:
+ *      action      - Action
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error Input
+ *      RT_ERR_NULL_POINTER    - Null Pointer
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_igmp_ReportLeaveFwdAction_get(rtk_igmp_ReportLeaveFwdAct_t *pAction);
+
+/* Function Name:
+ *      rtk_igmp_dropLeaveZeroEnable_set
+ * Description:
+ *      Set the function of droppping Leave packet with group IP = 0.0.0.0
+ * Input:
+ *      enabled      - Action 1: drop, 0:pass
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error Input
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_igmp_dropLeaveZeroEnable_set(rtk_enable_t enabled);
+
+/* Function Name:
+ *      rtk_igmp_dropLeaveZeroEnable_get
+ * Description:
+ *      Get the function of droppping Leave packet with group IP = 0.0.0.0
+ * Input:
+ *      None
+ * Output:
+ *      pEnabled.   - Action 1: drop, 0:pass
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error Input
+ *      RT_ERR_NULL_POINTER    - Null Pointer
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_igmp_dropLeaveZeroEnable_get(rtk_enable_t *pEnabled);
+
+/* Function Name:
+ *      rtk_igmp_bypassGroupRange_set
+ * Description:
+ *      Set Bypass group
+ * Input:
+ *      group       - bypassed group
+ *      enabled     - enabled 1: Bypassed, 0: not bypass
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error Input
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_igmp_bypassGroupRange_set(rtk_igmp_bypassGroup_t group, rtk_enable_t enabled);
+
+/* Function Name:
+ *      rtk_igmp_bypassGroupRange_get
+ * Description:
+ *      get Bypass group
+ * Input:
+ *      group       - bypassed group
+ * Output:
+ *      pEnable     - enabled 1: Bypassed, 0: not bypass
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error Input
+ *      RT_ERR_NULL_POINTER    - Null Pointer
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_igmp_bypassGroupRange_get(rtk_igmp_bypassGroup_t group, rtk_enable_t *pEnable);
+
+#endif /* __RTK_API_IGMP_H__ */
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/interrupt.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/interrupt.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/interrupt.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/interrupt.c	2019-01-22 16:16:24.707257309 +0100
@@ -0,0 +1,514 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 79629 $
+ * $Date: 2017-06-14 18:08:03 +0800 (é€±ä¸‰, 14 å…­æœˆ 2017) $
+ *
+ * Purpose : RTK switch high-level API for RTL8367/RTL8367C
+ * Feature : Here is a list of all functions and variables in Interrupt module.
+ *
+ */
+
+#include <rtk_switch.h>
+#include <rtk_error.h>
+#include <interrupt.h>
+#include <string.h>
+
+#include <rtl8367c_asicdrv.h>
+#include <rtl8367c_asicdrv_interrupt.h>
+
+static rtk_api_ret_t _rtk_int_polarity_set(rtk_int_polarity_t type)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(type >= INT_POLAR_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicInterruptPolarity(type)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_int_polarity_get(rtk_int_polarity_t *pType)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pType)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicInterruptPolarity(pType)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_int_control_set(rtk_int_type_t type, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 mask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (type >= INT_TYPE_END)
+        return RT_ERR_INPUT;
+
+    if (type == INT_TYPE_RESERVED)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_getAsicInterruptMask(&mask)) != RT_ERR_OK)
+        return retVal;
+
+    if (ENABLED == enable)
+        mask = mask | (1<<type);
+    else if (DISABLED == enable)
+        mask = mask & ~(1<<type);
+    else
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicInterruptMask(mask)) != RT_ERR_OK)
+        return retVal;
+
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_int_control_get(rtk_int_type_t type, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 mask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pEnable)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicInterruptMask(&mask)) != RT_ERR_OK)
+        return retVal;
+
+    if (0 == (mask&(1<<type)))
+        *pEnable=DISABLED;
+    else
+        *pEnable=ENABLED;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_int_status_set(rtk_int_status_t *pStatusMask)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pStatusMask)
+        return RT_ERR_NULL_POINTER;
+
+    if(pStatusMask->value[0] & (0x0001 << INT_TYPE_RESERVED))
+        return RT_ERR_INPUT;
+
+    if(pStatusMask->value[0] >= (0x0001 << INT_TYPE_END))
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicInterruptStatus((rtk_uint32)pStatusMask->value[0]))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_int_status_get(rtk_int_status_t* pStatusMask)
+{
+    rtk_api_ret_t   retVal;
+    rtk_uint32          ims_mask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pStatusMask)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicInterruptStatus(&ims_mask)) != RT_ERR_OK)
+        return retVal;
+
+    pStatusMask->value[0] = (ims_mask & 0x00000FFF);
+    return RT_ERR_OK;
+}
+
+#define ADV_NOT_SUPPORT (0xFFFF)
+static rtk_api_ret_t _rtk_int_Advidx_get(rtk_int_advType_t adv_type, rtk_uint32 *pAsic_idx)
+{
+    rtk_uint32 asic_idx[ADV_END] =
+    {
+        INTRST_L2_LEARN,
+        INTRST_SPEED_CHANGE,
+        INTRST_SPECIAL_CONGESTION,
+        INTRST_PORT_LINKDOWN,
+        INTRST_PORT_LINKUP,
+        ADV_NOT_SUPPORT,
+        INTRST_RLDP_LOOPED,
+        INTRST_RLDP_RELEASED,
+    };
+
+    if(adv_type >= ADV_END)
+        return RT_ERR_INPUT;
+
+    if(asic_idx[adv_type] == ADV_NOT_SUPPORT)
+        return RT_ERR_CHIP_NOT_SUPPORTED;
+
+    *pAsic_idx = asic_idx[adv_type];
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_int_advanceInfo_get(rtk_int_advType_t adv_type, rtk_int_info_t *pInfo)
+{
+    rtk_api_ret_t   retVal;
+    rtk_uint32      data;
+    rtk_uint32      intAdvType = 0;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(adv_type >= ADV_END)
+        return RT_ERR_INPUT;
+
+    if(NULL == pInfo)
+        return RT_ERR_NULL_POINTER;
+
+    if(adv_type != ADV_METER_EXCEED_MASK)
+    {
+        if((retVal = _rtk_int_Advidx_get(adv_type, &intAdvType)) != RT_ERR_OK)
+            return retVal;
+    }
+
+    switch(adv_type)
+    {
+        case ADV_L2_LEARN_PORT_MASK:
+            /* Get physical portmask */
+            if((retVal = rtl8367c_getAsicInterruptRelatedStatus(intAdvType, &data)) != RT_ERR_OK)
+                return retVal;
+
+            /* Clear Advanced Info */
+            if((retVal = rtl8367c_setAsicInterruptRelatedStatus(intAdvType, 0xFFFF)) != RT_ERR_OK)
+                return retVal;
+
+            /* Translate to logical portmask */
+            if((retVal = rtk_switch_portmask_P2L_get(data, &(pInfo->portMask))) != RT_ERR_OK)
+                return retVal;
+
+            /* Get system learn */
+            if((retVal = rtl8367c_getAsicInterruptRelatedStatus(INTRST_SYS_LEARN, &data)) != RT_ERR_OK)
+                return retVal;
+
+            /* Clear system learn */
+            if((retVal = rtl8367c_setAsicInterruptRelatedStatus(INTRST_SYS_LEARN, 0x0001)) != RT_ERR_OK)
+                return retVal;
+
+            pInfo->systemLearnOver = data;
+            break;
+        case ADV_SPEED_CHANGE_PORT_MASK:
+        case ADV_SPECIAL_CONGESTION_PORT_MASK:
+        case ADV_PORT_LINKDOWN_PORT_MASK:
+        case ADV_PORT_LINKUP_PORT_MASK:
+        case ADV_RLDP_LOOPED:
+        case ADV_RLDP_RELEASED:
+            /* Get physical portmask */
+            if((retVal = rtl8367c_getAsicInterruptRelatedStatus(intAdvType, &data)) != RT_ERR_OK)
+                return retVal;
+
+            /* Clear Advanced Info */
+            if((retVal = rtl8367c_setAsicInterruptRelatedStatus(intAdvType, 0xFFFF)) != RT_ERR_OK)
+                return retVal;
+
+            /* Translate to logical portmask */
+            if((retVal = rtk_switch_portmask_P2L_get(data, &(pInfo->portMask))) != RT_ERR_OK)
+                return retVal;
+
+            break;
+        case ADV_METER_EXCEED_MASK:
+            /* Get Meter Mask */
+            if((retVal = rtl8367c_getAsicInterruptRelatedStatus(INTRST_METER0_15, &data)) != RT_ERR_OK)
+                return retVal;
+
+            /* Clear Advanced Info */
+            if((retVal = rtl8367c_setAsicInterruptRelatedStatus(INTRST_METER0_15, 0xFFFF)) != RT_ERR_OK)
+                return retVal;
+
+            pInfo->meterMask = data & 0xFFFF;
+
+            /* Get Meter Mask */
+            if((retVal = rtl8367c_getAsicInterruptRelatedStatus(INTRST_METER16_31, &data)) != RT_ERR_OK)
+                return retVal;
+
+            /* Clear Advanced Info */
+            if((retVal = rtl8367c_setAsicInterruptRelatedStatus(INTRST_METER16_31, 0xFFFF)) != RT_ERR_OK)
+                return retVal;
+
+            pInfo->meterMask = pInfo->meterMask | ((data << 16) & 0xFFFF0000);
+
+            break;
+        default:
+            return RT_ERR_INPUT;
+    }
+
+    return RT_ERR_OK;
+}
+
+
+/* Function Name:
+ *      rtk_int_polarity_set
+ * Description:
+ *      Set interrupt polarity configuration.
+ * Input:
+ *      type - Interruptpolarity type.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can set interrupt polarity configuration.
+ */
+rtk_api_ret_t rtk_int_polarity_set(rtk_int_polarity_t type)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_int_polarity_set(type);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_int_polarity_get
+ * Description:
+ *      Get interrupt polarity configuration.
+ * Input:
+ *      None
+ * Output:
+ *      pType - Interruptpolarity type.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      The API can get interrupt polarity configuration.
+ */
+rtk_api_ret_t rtk_int_polarity_get(rtk_int_polarity_t *pType)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_int_polarity_get(pType);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_int_control_set
+ * Description:
+ *      Set interrupt trigger status configuration.
+ * Input:
+ *      type - Interrupt type.
+ *      enable - Interrupt status.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_ENABLE       - Invalid enable input.
+ * Note:
+ *      The API can set interrupt status configuration.
+ *      The interrupt trigger status is shown in the following:
+ *      - INT_TYPE_LINK_STATUS
+ *      - INT_TYPE_METER_EXCEED
+ *      - INT_TYPE_LEARN_LIMIT
+ *      - INT_TYPE_LINK_SPEED
+ *      - INT_TYPE_CONGEST
+ *      - INT_TYPE_GREEN_FEATURE
+ *      - INT_TYPE_LOOP_DETECT
+ *      - INT_TYPE_8051,
+ *      - INT_TYPE_CABLE_DIAG,
+ *      - INT_TYPE_ACL,
+ *      - INT_TYPE_SLIENT
+ */
+rtk_api_ret_t rtk_int_control_set(rtk_int_type_t type, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_int_control_set(type, enable);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_int_control_get
+ * Description:
+ *      Get interrupt trigger status configuration.
+ * Input:
+ *      type - Interrupt type.
+ * Output:
+ *      pEnable - Interrupt status.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can get interrupt status configuration.
+ *      The interrupt trigger status is shown in the following:
+ *      - INT_TYPE_LINK_STATUS
+ *      - INT_TYPE_METER_EXCEED
+ *      - INT_TYPE_LEARN_LIMIT
+ *      - INT_TYPE_LINK_SPEED
+ *      - INT_TYPE_CONGEST
+ *      - INT_TYPE_GREEN_FEATURE
+ *      - INT_TYPE_LOOP_DETECT
+ *      - INT_TYPE_8051,
+ *      - INT_TYPE_CABLE_DIAG,
+ *      - INT_TYPE_ACL,
+ *      - INT_TYPE_UPS,
+ *      - INT_TYPE_SLIENT
+ */
+rtk_api_ret_t rtk_int_control_get(rtk_int_type_t type, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_int_control_get(type, pEnable);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_int_status_set
+ * Description:
+ *      Set interrupt trigger status to clean.
+ * Input:
+ *      None
+ * Output:
+ *      pStatusMask - Interrupt status bit mask.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT - Invalid input parameters.
+ * Note:
+ *      The API can clean interrupt trigger status when interrupt happened.
+ *      The interrupt trigger status is shown in the following:
+ *      - INT_TYPE_LINK_STATUS    (value[0] (Bit0))
+ *      - INT_TYPE_METER_EXCEED   (value[0] (Bit1))
+ *      - INT_TYPE_LEARN_LIMIT    (value[0] (Bit2))
+ *      - INT_TYPE_LINK_SPEED     (value[0] (Bit3))
+ *      - INT_TYPE_CONGEST        (value[0] (Bit4))
+ *      - INT_TYPE_GREEN_FEATURE  (value[0] (Bit5))
+ *      - INT_TYPE_LOOP_DETECT    (value[0] (Bit6))
+ *      - INT_TYPE_8051           (value[0] (Bit7))
+ *      - INT_TYPE_CABLE_DIAG     (value[0] (Bit8))
+ *      - INT_TYPE_ACL            (value[0] (Bit9))
+ *      - INT_TYPE_SLIENT         (value[0] (Bit11))
+ *      The status will be cleared after execute this API.
+ */
+rtk_api_ret_t rtk_int_status_set(rtk_int_status_t *pStatusMask)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_int_status_set(pStatusMask);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_int_status_get
+ * Description:
+ *      Get interrupt trigger status.
+ * Input:
+ *      None
+ * Output:
+ *      pStatusMask - Interrupt status bit mask.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can get interrupt trigger status when interrupt happened.
+ *      The interrupt trigger status is shown in the following:
+ *      - INT_TYPE_LINK_STATUS    (value[0] (Bit0))
+ *      - INT_TYPE_METER_EXCEED   (value[0] (Bit1))
+ *      - INT_TYPE_LEARN_LIMIT    (value[0] (Bit2))
+ *      - INT_TYPE_LINK_SPEED     (value[0] (Bit3))
+ *      - INT_TYPE_CONGEST        (value[0] (Bit4))
+ *      - INT_TYPE_GREEN_FEATURE  (value[0] (Bit5))
+ *      - INT_TYPE_LOOP_DETECT    (value[0] (Bit6))
+ *      - INT_TYPE_8051           (value[0] (Bit7))
+ *      - INT_TYPE_CABLE_DIAG     (value[0] (Bit8))
+ *      - INT_TYPE_ACL            (value[0] (Bit9))
+ *      - INT_TYPE_SLIENT         (value[0] (Bit11))
+ *
+ */
+rtk_api_ret_t rtk_int_status_get(rtk_int_status_t* pStatusMask)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_int_status_get(pStatusMask);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_int_advanceInfo_get
+ * Description:
+ *      Get interrupt advanced information.
+ * Input:
+ *      adv_type - Advanced interrupt type.
+ * Output:
+ *      info - Information per type.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This API can get advanced information when interrupt happened.
+ *      The status will be cleared after execute this API.
+ */
+rtk_api_ret_t rtk_int_advanceInfo_get(rtk_int_advType_t adv_type, rtk_int_info_t *pInfo)
+{
+     rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_int_advanceInfo_get(adv_type, pInfo);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/interrupt.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/interrupt.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/interrupt.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/interrupt.h	2019-01-22 16:16:24.707257309 +0100
@@ -0,0 +1,256 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * Purpose : RTL8367/RTL8367C switch high-level API
+ *
+ * Feature : The file includes Interrupt module high-layer API defination
+ *
+ */
+
+#ifndef __RTK_API_INTERRUPT_H__
+#define __RTK_API_INTERRUPT_H__
+
+
+/*
+ * Data Type Declaration
+ */
+#define RTK_MAX_NUM_OF_INTERRUPT_TYPE               1
+
+
+typedef struct  rtk_int_status_s
+{
+    rtk_uint16 value[RTK_MAX_NUM_OF_INTERRUPT_TYPE];
+} rtk_int_status_t;
+
+typedef struct rtk_int_info_s
+{
+    rtk_portmask_t  portMask;
+    rtk_uint32      meterMask;
+    rtk_uint32      systemLearnOver;
+}rtk_int_info_t;
+
+typedef enum rtk_int_type_e
+{
+    INT_TYPE_LINK_STATUS = 0,
+    INT_TYPE_METER_EXCEED,
+    INT_TYPE_LEARN_LIMIT,
+    INT_TYPE_LINK_SPEED,
+    INT_TYPE_CONGEST,
+    INT_TYPE_GREEN_FEATURE,
+    INT_TYPE_LOOP_DETECT,
+    INT_TYPE_8051,
+    INT_TYPE_CABLE_DIAG,
+    INT_TYPE_ACL,
+    INT_TYPE_RESERVED, /* Unused */
+    INT_TYPE_SLIENT,
+    INT_TYPE_END
+}rtk_int_type_t;
+
+typedef enum rtk_int_advType_e
+{
+    ADV_L2_LEARN_PORT_MASK = 0,
+    ADV_SPEED_CHANGE_PORT_MASK,
+    ADV_SPECIAL_CONGESTION_PORT_MASK,
+    ADV_PORT_LINKDOWN_PORT_MASK,
+    ADV_PORT_LINKUP_PORT_MASK,
+    ADV_METER_EXCEED_MASK,
+    ADV_RLDP_LOOPED,
+    ADV_RLDP_RELEASED,
+    ADV_END,
+} rtk_int_advType_t;
+
+typedef enum rtk_int_polarity_e
+{
+    INT_POLAR_HIGH = 0,
+    INT_POLAR_LOW,
+    INT_POLAR_END
+} rtk_int_polarity_t;
+
+/* Function Name:
+ *      rtk_int_polarity_set
+ * Description:
+ *      Set interrupt polarity configuration.
+ * Input:
+ *      type - Interruptpolarity type.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can set interrupt polarity configuration.
+ */
+extern rtk_api_ret_t rtk_int_polarity_set(rtk_int_polarity_t type);
+
+/* Function Name:
+ *      rtk_int_polarity_get
+ * Description:
+ *      Get interrupt polarity configuration.
+ * Input:
+ *      None
+ * Output:
+ *      pType - Interruptpolarity type.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      The API can get interrupt polarity configuration.
+ */
+extern rtk_api_ret_t rtk_int_polarity_get(rtk_int_polarity_t *pType);
+
+/* Function Name:
+ *      rtk_int_control_set
+ * Description:
+ *      Set interrupt trigger status configuration.
+ * Input:
+ *      type - Interrupt type.
+ *      enable - Interrupt status.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_ENABLE       - Invalid enable input.
+ * Note:
+ *      The API can set interrupt status configuration.
+ *      The interrupt trigger status is shown in the following:
+ *      - INT_TYPE_LINK_STATUS
+ *      - INT_TYPE_METER_EXCEED
+ *      - INT_TYPE_LEARN_LIMIT
+ *      - INT_TYPE_LINK_SPEED
+ *      - INT_TYPE_CONGEST
+ *      - INT_TYPE_GREEN_FEATURE
+ *      - INT_TYPE_LOOP_DETECT
+ *      - INT_TYPE_8051,
+ *      - INT_TYPE_CABLE_DIAG,
+ *      - INT_TYPE_ACL,
+ *      - INT_TYPE_SLIENT
+ */
+extern rtk_api_ret_t rtk_int_control_set(rtk_int_type_t type, rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_int_control_get
+ * Description:
+ *      Get interrupt trigger status configuration.
+ * Input:
+ *      type - Interrupt type.
+ * Output:
+ *      pEnable - Interrupt status.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can get interrupt status configuration.
+ *      The interrupt trigger status is shown in the following:
+ *      - INT_TYPE_LINK_STATUS
+ *      - INT_TYPE_METER_EXCEED
+ *      - INT_TYPE_LEARN_LIMIT
+ *      - INT_TYPE_LINK_SPEED
+ *      - INT_TYPE_CONGEST
+ *      - INT_TYPE_GREEN_FEATURE
+ *      - INT_TYPE_LOOP_DETECT
+ *      - INT_TYPE_8051,
+ *      - INT_TYPE_CABLE_DIAG,
+ *      - INT_TYPE_ACL,
+ *      - INT_TYPE_SLIENT
+ */
+extern rtk_api_ret_t rtk_int_control_get(rtk_int_type_t type, rtk_enable_t* pEnable);
+
+/* Function Name:
+ *      rtk_int_status_set
+ * Description:
+ *      Set interrupt trigger status to clean.
+ * Input:
+ *      None
+ * Output:
+ *      pStatusMask - Interrupt status bit mask.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT - Invalid input parameters.
+ * Note:
+ *      The API can clean interrupt trigger status when interrupt happened.
+ *      The interrupt trigger status is shown in the following:
+ *      - INT_TYPE_LINK_STATUS    (value[0] (Bit0))
+ *      - INT_TYPE_METER_EXCEED   (value[0] (Bit1))
+ *      - INT_TYPE_LEARN_LIMIT    (value[0] (Bit2))
+ *      - INT_TYPE_LINK_SPEED     (value[0] (Bit3))
+ *      - INT_TYPE_CONGEST        (value[0] (Bit4))
+ *      - INT_TYPE_GREEN_FEATURE  (value[0] (Bit5))
+ *      - INT_TYPE_LOOP_DETECT    (value[0] (Bit6))
+ *      - INT_TYPE_8051           (value[0] (Bit7))
+ *      - INT_TYPE_CABLE_DIAG     (value[0] (Bit8))
+ *      - INT_TYPE_ACL            (value[0] (Bit9))
+ *      - INT_TYPE_SLIENT         (value[0] (Bit11))
+ *      The status will be cleared after execute this API.
+ */
+extern rtk_api_ret_t rtk_int_status_set(rtk_int_status_t *pStatusMask);
+
+/* Function Name:
+ *      rtk_int_status_get
+ * Description:
+ *      Get interrupt trigger status.
+ * Input:
+ *      None
+ * Output:
+ *      pStatusMask - Interrupt status bit mask.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can get interrupt trigger status when interrupt happened.
+ *      The interrupt trigger status is shown in the following:
+ *      - INT_TYPE_LINK_STATUS    (value[0] (Bit0))
+ *      - INT_TYPE_METER_EXCEED   (value[0] (Bit1))
+ *      - INT_TYPE_LEARN_LIMIT    (value[0] (Bit2))
+ *      - INT_TYPE_LINK_SPEED     (value[0] (Bit3))
+ *      - INT_TYPE_CONGEST        (value[0] (Bit4))
+ *      - INT_TYPE_GREEN_FEATURE  (value[0] (Bit5))
+ *      - INT_TYPE_LOOP_DETECT    (value[0] (Bit6))
+ *      - INT_TYPE_8051           (value[0] (Bit7))
+ *      - INT_TYPE_CABLE_DIAG     (value[0] (Bit8))
+ *      - INT_TYPE_ACL            (value[0] (Bit9))
+ *      - INT_TYPE_SLIENT         (value[0] (Bit11))
+ *
+ */
+extern rtk_api_ret_t rtk_int_status_get(rtk_int_status_t* pStatusMask);
+
+/* Function Name:
+ *      rtk_int_advanceInfo_get
+ * Description:
+ *      Get interrupt advanced information.
+ * Input:
+ *      adv_type - Advanced interrupt type.
+ * Output:
+ *      info - Information per type.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This API can get advanced information when interrupt happened.
+ *      The status will be cleared after execute this API.
+ */
+extern rtk_api_ret_t rtk_int_advanceInfo_get(rtk_int_advType_t adv_type, rtk_int_info_t* info);
+
+
+#endif /* __RTK_API_INTERRUPT_H__ */
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/l2.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/l2.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/l2.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/l2.c	2019-01-22 16:16:24.707257309 +0100
@@ -0,0 +1,3457 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 82894 $
+ * $Date: 2017-10-24 14:47:07 +0800 (é€±äºŒ, 24 åæœˆ 2017) $
+ *
+ * Purpose : RTK switch high-level API for RTL8367/RTL8367C
+ * Feature : Here is a list of all functions and variables in L2 module.
+ *
+ */
+
+#include <rtk_switch.h>
+#include <rtk_error.h>
+#include <l2.h>
+#include <string.h>
+
+#include <rtl8367c_asicdrv.h>
+#include <rtl8367c_asicdrv_lut.h>
+#include <rtl8367c_asicdrv_port.h>
+
+static rtk_api_ret_t _rtk_l2_init(void)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 port;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if ((retVal = rtl8367c_setAsicLutIpMulticastLookup(DISABLED)) != RT_ERR_OK)
+        return retVal;
+
+    /*Enable CAM Usage*/
+    if ((retVal = rtl8367c_setAsicLutCamTbUsage(ENABLED)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicLutAgeTimerSpeed(6,2)) != RT_ERR_OK)
+        return retVal;
+
+    RTK_SCAN_ALL_LOG_PORT(port)
+    {
+        if ((retVal = rtl8367c_setAsicLutLearnLimitNo(rtk_switch_port_L2P_get(port), rtk_switch_maxLutAddrNumber_get())) != RT_ERR_OK)
+            return retVal;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_addr_add(rtk_mac_t *pMac, rtk_l2_ucastAddr_t *pL2_data)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 method;
+    rtl8367c_luttb l2Table;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* must be unicast address */
+    if ((pMac == NULL) || (pMac->octet[0] & 0x1))
+        return RT_ERR_MAC;
+
+    if(pL2_data == NULL)
+        return RT_ERR_MAC;
+
+    RTK_CHK_PORT_VALID(pL2_data->port);
+
+    if (pL2_data->ivl >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if (pL2_data->cvid > RTL8367C_VIDMAX)
+        return RT_ERR_L2_VID;
+
+    if (pL2_data->fid > RTL8367C_FIDMAX)
+        return RT_ERR_L2_FID;
+
+    if (pL2_data->is_static>= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if (pL2_data->sa_block>= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if (pL2_data->da_block>= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if (pL2_data->auth>= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if (pL2_data->efid> RTL8367C_EFIDMAX)
+        return RT_ERR_INPUT;
+
+    if (pL2_data->priority > RTL8367C_PRIMAX)
+        return RT_ERR_INPUT;
+
+    if (pL2_data->sa_pri_en >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if (pL2_data->fwd_pri_en >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    memset(&l2Table, 0, sizeof(rtl8367c_luttb));
+
+    /* fill key (MAC,FID) to get L2 entry */
+    memcpy(l2Table.mac.octet, pMac->octet, ETHER_ADDR_LEN);
+    l2Table.ivl_svl     = pL2_data->ivl;
+    l2Table.fid         = pL2_data->fid;
+    l2Table.cvid_fid    = pL2_data->cvid;
+    l2Table.efid        = pL2_data->efid;
+    method = LUTREADMETHOD_MAC;
+    retVal = rtl8367c_getAsicL2LookupTb(method, &l2Table);
+    if (RT_ERR_OK == retVal )
+    {
+        memcpy(l2Table.mac.octet, pMac->octet, ETHER_ADDR_LEN);
+        l2Table.ivl_svl     = pL2_data->ivl;
+        l2Table.cvid_fid    = pL2_data->cvid;
+        l2Table.fid         = pL2_data->fid;
+        l2Table.efid        = pL2_data->efid;
+        l2Table.spa         = rtk_switch_port_L2P_get(pL2_data->port);
+        l2Table.nosalearn   = pL2_data->is_static;
+        l2Table.sa_block    = pL2_data->sa_block;
+        l2Table.da_block    = pL2_data->da_block;
+        l2Table.l3lookup    = 0;
+        l2Table.auth        = pL2_data->auth;
+        l2Table.age         = 6;
+        l2Table.lut_pri     = pL2_data->priority;
+        l2Table.sa_en       = pL2_data->sa_pri_en;
+        l2Table.fwd_en      = pL2_data->fwd_pri_en;
+        if((retVal = rtl8367c_setAsicL2LookupTb(&l2Table)) != RT_ERR_OK)
+            return retVal;
+
+        pL2_data->address = l2Table.address;
+        return RT_ERR_OK;
+    }
+    else if (RT_ERR_L2_ENTRY_NOTFOUND == retVal )
+    {
+        memset(&l2Table, 0, sizeof(rtl8367c_luttb));
+        memcpy(l2Table.mac.octet, pMac->octet, ETHER_ADDR_LEN);
+        l2Table.ivl_svl     = pL2_data->ivl;
+        l2Table.cvid_fid    = pL2_data->cvid;
+        l2Table.fid         = pL2_data->fid;
+        l2Table.efid        = pL2_data->efid;
+        l2Table.spa         = rtk_switch_port_L2P_get(pL2_data->port);
+        l2Table.nosalearn   = pL2_data->is_static;
+        l2Table.sa_block    = pL2_data->sa_block;
+        l2Table.da_block    = pL2_data->da_block;
+        l2Table.l3lookup    = 0;
+        l2Table.auth        = pL2_data->auth;
+        l2Table.age         = 6;
+        l2Table.lut_pri     = pL2_data->priority;
+        l2Table.sa_en       = pL2_data->sa_pri_en;
+        l2Table.fwd_en      = pL2_data->fwd_pri_en;
+
+        if ((retVal = rtl8367c_setAsicL2LookupTb(&l2Table)) != RT_ERR_OK)
+            return retVal;
+
+        pL2_data->address = l2Table.address;
+
+        method = LUTREADMETHOD_MAC;
+        retVal = rtl8367c_getAsicL2LookupTb(method, &l2Table);
+        if (RT_ERR_L2_ENTRY_NOTFOUND == retVal )
+            return RT_ERR_L2_INDEXTBL_FULL;
+        else
+            return retVal;
+    }
+    else
+        return retVal;
+
+}
+
+static rtk_api_ret_t _rtk_l2_addr_get(rtk_mac_t *pMac, rtk_l2_ucastAddr_t *pL2_data)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 method;
+    rtl8367c_luttb l2Table;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* must be unicast address */
+    if ((pMac == NULL) || (pMac->octet[0] & 0x1))
+        return RT_ERR_MAC;
+
+    if (pL2_data->fid > RTL8367C_FIDMAX || pL2_data->efid > RTL8367C_EFIDMAX)
+        return RT_ERR_L2_FID;
+
+    memset(&l2Table, 0, sizeof(rtl8367c_luttb));
+
+    memcpy(l2Table.mac.octet, pMac->octet, ETHER_ADDR_LEN);
+    l2Table.ivl_svl     = pL2_data->ivl;
+    l2Table.cvid_fid    = pL2_data->cvid;
+    l2Table.fid         = pL2_data->fid;
+    l2Table.efid        = pL2_data->efid;
+    method = LUTREADMETHOD_MAC;
+
+    if ((retVal = rtl8367c_getAsicL2LookupTb(method, &l2Table)) != RT_ERR_OK)
+        return retVal;
+
+    memcpy(pL2_data->mac.octet, pMac->octet,ETHER_ADDR_LEN);
+    pL2_data->port      = rtk_switch_port_P2L_get(l2Table.spa);
+    pL2_data->fid       = l2Table.fid;
+    pL2_data->efid      = l2Table.efid;
+    pL2_data->ivl       = l2Table.ivl_svl;
+    pL2_data->cvid      = l2Table.cvid_fid;
+    pL2_data->is_static = l2Table.nosalearn;
+    pL2_data->auth      = l2Table.auth;
+    pL2_data->sa_block  = l2Table.sa_block;
+    pL2_data->da_block  = l2Table.da_block;
+    pL2_data->priority  = l2Table.lut_pri;
+    pL2_data->sa_pri_en = l2Table.sa_en;
+    pL2_data->fwd_pri_en= l2Table.fwd_en;
+    pL2_data->address   = l2Table.address;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_addr_next_get(rtk_l2_read_method_t read_method, rtk_port_t port, rtk_uint32 *pAddress, rtk_l2_ucastAddr_t *pL2_data)
+{
+    rtk_api_ret_t   retVal;
+    rtk_uint32      method;
+    rtl8367c_luttb  l2Table;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Error Checking */
+    if ((pL2_data == NULL) || (pAddress == NULL))
+        return RT_ERR_MAC;
+
+    if(read_method == READMETHOD_NEXT_L2UC)
+        method = LUTREADMETHOD_NEXT_L2UC;
+    else if(read_method == READMETHOD_NEXT_L2UCSPA)
+        method = LUTREADMETHOD_NEXT_L2UCSPA;
+    else
+        return RT_ERR_INPUT;
+
+    if(read_method == READMETHOD_NEXT_L2UCSPA)
+    {
+        /* Check Port Valid */
+        RTK_CHK_PORT_VALID(port);
+    }
+
+    if(*pAddress > RTK_MAX_LUT_ADDR_ID )
+        return RT_ERR_L2_L2UNI_PARAM;
+
+    memset(&l2Table, 0, sizeof(rtl8367c_luttb));
+    l2Table.address = *pAddress;
+
+    if(read_method == READMETHOD_NEXT_L2UCSPA)
+        l2Table.spa = rtk_switch_port_L2P_get(port);
+
+    if ((retVal = rtl8367c_getAsicL2LookupTb(method, &l2Table)) != RT_ERR_OK)
+        return retVal;
+
+    if(l2Table.address < *pAddress)
+        return RT_ERR_L2_ENTRY_NOTFOUND;
+
+    memcpy(pL2_data->mac.octet, l2Table.mac.octet, ETHER_ADDR_LEN);
+    pL2_data->port      = rtk_switch_port_P2L_get(l2Table.spa);
+    pL2_data->fid       = l2Table.fid;
+    pL2_data->efid      = l2Table.efid;
+    pL2_data->ivl       = l2Table.ivl_svl;
+    pL2_data->cvid      = l2Table.cvid_fid;
+    pL2_data->is_static = l2Table.nosalearn;
+    pL2_data->auth      = l2Table.auth;
+    pL2_data->sa_block  = l2Table.sa_block;
+    pL2_data->da_block  = l2Table.da_block;
+    pL2_data->priority  = l2Table.lut_pri;
+    pL2_data->sa_pri_en = l2Table.sa_en;
+    pL2_data->fwd_pri_en= l2Table.fwd_en;
+    pL2_data->address   = l2Table.address;
+
+    *pAddress = l2Table.address;
+
+    return RT_ERR_OK;
+
+}
+
+static rtk_api_ret_t _rtk_l2_addr_del(rtk_mac_t *pMac, rtk_l2_ucastAddr_t *pL2_data)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 method;
+    rtl8367c_luttb l2Table;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* must be unicast address */
+    if ((pMac == NULL) || (pMac->octet[0] & 0x1))
+        return RT_ERR_MAC;
+
+    if (pL2_data->fid > RTL8367C_FIDMAX || pL2_data->efid > RTL8367C_EFIDMAX)
+        return RT_ERR_L2_FID;
+
+    memset(&l2Table, 0, sizeof(rtl8367c_luttb));
+
+    /* fill key (MAC,FID) to get L2 entry */
+    memcpy(l2Table.mac.octet, pMac->octet, ETHER_ADDR_LEN);
+    l2Table.ivl_svl     = pL2_data->ivl;
+    l2Table.cvid_fid    = pL2_data->cvid;
+    l2Table.fid         = pL2_data->fid;
+    l2Table.efid        = pL2_data->efid;
+    method = LUTREADMETHOD_MAC;
+    retVal = rtl8367c_getAsicL2LookupTb(method, &l2Table);
+    if (RT_ERR_OK ==  retVal)
+    {
+        memcpy(l2Table.mac.octet, pMac->octet, ETHER_ADDR_LEN);
+        l2Table.ivl_svl     = pL2_data->ivl;
+        l2Table.cvid_fid    = pL2_data->cvid;
+        l2Table.fid = pL2_data->fid;
+        l2Table.efid = pL2_data->efid;
+        l2Table.spa = 0;
+        l2Table.nosalearn = 0;
+        l2Table.sa_block = 0;
+        l2Table.da_block = 0;
+        l2Table.auth = 0;
+        l2Table.age = 0;
+        l2Table.lut_pri = 0;
+        l2Table.sa_en = 0;
+        l2Table.fwd_en = 0;
+        if((retVal = rtl8367c_setAsicL2LookupTb(&l2Table)) != RT_ERR_OK)
+            return retVal;
+
+        pL2_data->address = l2Table.address;
+        return RT_ERR_OK;
+    }
+    else
+        return retVal;
+}
+
+static rtk_api_ret_t _rtk_l2_mcastAddr_add(rtk_l2_mcastAddr_t *pMcastAddr)
+{
+    rtk_api_ret_t   retVal;
+    rtk_uint32      method;
+    rtl8367c_luttb  l2Table;
+    rtk_uint32      pmask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pMcastAddr)
+        return RT_ERR_NULL_POINTER;
+
+    /* must be L2 multicast address */
+    if( (pMcastAddr->mac.octet[0] & 0x01) != 0x01)
+        return RT_ERR_MAC;
+
+    RTK_CHK_PORTMASK_VALID(&pMcastAddr->portmask);
+
+    if(pMcastAddr->ivl == 1)
+    {
+        if (pMcastAddr->vid > RTL8367C_VIDMAX)
+            return RT_ERR_L2_VID;
+    }
+    else if(pMcastAddr->ivl == 0)
+    {
+        if (pMcastAddr->fid > RTL8367C_FIDMAX)
+            return RT_ERR_L2_FID;
+    }
+    else
+        return RT_ERR_INPUT;
+
+    if(pMcastAddr->fwd_pri_en >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if(pMcastAddr->priority > RTL8367C_PRIMAX)
+        return RT_ERR_INPUT;
+
+    /* Get physical port mask */
+    if ((retVal = rtk_switch_portmask_L2P_get(&pMcastAddr->portmask, &pmask)) != RT_ERR_OK)
+        return retVal;
+
+    memset(&l2Table, 0, sizeof(rtl8367c_luttb));
+
+    /* fill key (MAC,FID) to get L2 entry */
+    memcpy(l2Table.mac.octet, pMcastAddr->mac.octet, ETHER_ADDR_LEN);
+    l2Table.ivl_svl     = pMcastAddr->ivl;
+
+    if(pMcastAddr->ivl)
+        l2Table.cvid_fid    = pMcastAddr->vid;
+    else
+        l2Table.cvid_fid    = pMcastAddr->fid;
+
+    method = LUTREADMETHOD_MAC;
+    retVal = rtl8367c_getAsicL2LookupTb(method, &l2Table);
+    if (RT_ERR_OK == retVal)
+    {
+        memcpy(l2Table.mac.octet, pMcastAddr->mac.octet, ETHER_ADDR_LEN);
+        l2Table.ivl_svl     = pMcastAddr->ivl;
+
+        if(pMcastAddr->ivl)
+            l2Table.cvid_fid    = pMcastAddr->vid;
+        else
+            l2Table.cvid_fid    = pMcastAddr->fid;
+
+        l2Table.mbr         = pmask;
+        l2Table.nosalearn   = 1;
+        l2Table.l3lookup    = 0;
+        l2Table.lut_pri     = pMcastAddr->priority;
+        l2Table.fwd_en      = pMcastAddr->fwd_pri_en;
+        if((retVal = rtl8367c_setAsicL2LookupTb(&l2Table)) != RT_ERR_OK)
+            return retVal;
+
+        pMcastAddr->address = l2Table.address;
+        return RT_ERR_OK;
+    }
+    else if (RT_ERR_L2_ENTRY_NOTFOUND == retVal)
+    {
+        memset(&l2Table, 0, sizeof(rtl8367c_luttb));
+        memcpy(l2Table.mac.octet, pMcastAddr->mac.octet, ETHER_ADDR_LEN);
+        l2Table.ivl_svl     = pMcastAddr->ivl;
+        if(pMcastAddr->ivl)
+            l2Table.cvid_fid    = pMcastAddr->vid;
+        else
+            l2Table.cvid_fid    = pMcastAddr->fid;
+
+        l2Table.mbr         = pmask;
+        l2Table.nosalearn   = 1;
+        l2Table.l3lookup    = 0;
+        l2Table.lut_pri     = pMcastAddr->priority;
+        l2Table.fwd_en      = pMcastAddr->fwd_pri_en;
+        if ((retVal = rtl8367c_setAsicL2LookupTb(&l2Table)) != RT_ERR_OK)
+            return retVal;
+
+        pMcastAddr->address = l2Table.address;
+
+        method = LUTREADMETHOD_MAC;
+        retVal = rtl8367c_getAsicL2LookupTb(method, &l2Table);
+        if (RT_ERR_L2_ENTRY_NOTFOUND == retVal)
+            return     RT_ERR_L2_INDEXTBL_FULL;
+        else
+            return retVal;
+    }
+    else
+        return retVal;
+
+}
+
+static rtk_api_ret_t _rtk_l2_mcastAddr_get(rtk_l2_mcastAddr_t *pMcastAddr)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 method;
+    rtl8367c_luttb l2Table;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pMcastAddr)
+        return RT_ERR_NULL_POINTER;
+
+    /* must be L2 multicast address */
+    if( (pMcastAddr->mac.octet[0] & 0x01) != 0x01)
+        return RT_ERR_MAC;
+
+    if(pMcastAddr->ivl == 1)
+    {
+        if (pMcastAddr->vid > RTL8367C_VIDMAX)
+            return RT_ERR_L2_VID;
+    }
+    else if(pMcastAddr->ivl == 0)
+    {
+        if (pMcastAddr->fid > RTL8367C_FIDMAX)
+            return RT_ERR_L2_FID;
+    }
+    else
+        return RT_ERR_INPUT;
+
+    memset(&l2Table, 0, sizeof(rtl8367c_luttb));
+    memcpy(l2Table.mac.octet, pMcastAddr->mac.octet, ETHER_ADDR_LEN);
+    l2Table.ivl_svl     = pMcastAddr->ivl;
+
+    if(pMcastAddr->ivl)
+        l2Table.cvid_fid    = pMcastAddr->vid;
+    else
+        l2Table.cvid_fid    = pMcastAddr->fid;
+
+    method = LUTREADMETHOD_MAC;
+
+    if ((retVal = rtl8367c_getAsicL2LookupTb(method, &l2Table)) != RT_ERR_OK)
+        return retVal;
+
+    pMcastAddr->priority    = l2Table.lut_pri;
+    pMcastAddr->fwd_pri_en  = l2Table.fwd_en;
+    pMcastAddr->igmp_asic   = l2Table.igmp_asic;
+    pMcastAddr->igmp_index  = l2Table.igmpidx;
+    pMcastAddr->address     = l2Table.address;
+
+    /* Get Logical port mask */
+    if ((retVal = rtk_switch_portmask_P2L_get(l2Table.mbr, &pMcastAddr->portmask)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_mcastAddr_next_get(rtk_uint32 *pAddress, rtk_l2_mcastAddr_t *pMcastAddr)
+{
+    rtk_api_ret_t   retVal;
+    rtl8367c_luttb  l2Table;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Error Checking */
+    if ((pAddress == NULL) || (pMcastAddr == NULL))
+        return RT_ERR_INPUT;
+
+    if(*pAddress > RTK_MAX_LUT_ADDR_ID )
+        return RT_ERR_L2_L2UNI_PARAM;
+
+    memset(&l2Table, 0, sizeof(rtl8367c_luttb));
+    l2Table.address = *pAddress;
+
+    if ((retVal = rtl8367c_getAsicL2LookupTb(LUTREADMETHOD_NEXT_L2MC, &l2Table)) != RT_ERR_OK)
+        return retVal;
+
+    if(l2Table.address < *pAddress)
+        return RT_ERR_L2_ENTRY_NOTFOUND;
+
+    memcpy(pMcastAddr->mac.octet, l2Table.mac.octet, ETHER_ADDR_LEN);
+    pMcastAddr->ivl     = l2Table.ivl_svl;
+
+    if(pMcastAddr->ivl)
+        pMcastAddr->vid = l2Table.cvid_fid;
+    else
+        pMcastAddr->fid = l2Table.cvid_fid;
+
+    pMcastAddr->priority    = l2Table.lut_pri;
+    pMcastAddr->fwd_pri_en  = l2Table.fwd_en;
+    pMcastAddr->igmp_asic   = l2Table.igmp_asic;
+    pMcastAddr->igmp_index  = l2Table.igmpidx;
+    pMcastAddr->address     = l2Table.address;
+
+    /* Get Logical port mask */
+    if ((retVal = rtk_switch_portmask_P2L_get(l2Table.mbr, &pMcastAddr->portmask)) != RT_ERR_OK)
+        return retVal;
+
+    *pAddress = l2Table.address;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_mcastAddr_del(rtk_l2_mcastAddr_t *pMcastAddr)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 method;
+    rtl8367c_luttb l2Table;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pMcastAddr)
+        return RT_ERR_NULL_POINTER;
+
+    /* must be L2 multicast address */
+    if( (pMcastAddr->mac.octet[0] & 0x01) != 0x01)
+        return RT_ERR_MAC;
+
+    if(pMcastAddr->ivl == 1)
+    {
+        if (pMcastAddr->vid > RTL8367C_VIDMAX)
+            return RT_ERR_L2_VID;
+    }
+    else if(pMcastAddr->ivl == 0)
+    {
+        if (pMcastAddr->fid > RTL8367C_FIDMAX)
+            return RT_ERR_L2_FID;
+    }
+    else
+        return RT_ERR_INPUT;
+
+    memset(&l2Table, 0, sizeof(rtl8367c_luttb));
+
+    /* fill key (MAC,FID) to get L2 entry */
+    memcpy(l2Table.mac.octet, pMcastAddr->mac.octet, ETHER_ADDR_LEN);
+    l2Table.ivl_svl     = pMcastAddr->ivl;
+
+    if(pMcastAddr->ivl)
+        l2Table.cvid_fid    = pMcastAddr->vid;
+    else
+        l2Table.cvid_fid    = pMcastAddr->fid;
+
+    method = LUTREADMETHOD_MAC;
+    retVal = rtl8367c_getAsicL2LookupTb(method, &l2Table);
+    if (RT_ERR_OK == retVal)
+    {
+        memcpy(l2Table.mac.octet, pMcastAddr->mac.octet, ETHER_ADDR_LEN);
+        l2Table.ivl_svl     = pMcastAddr->ivl;
+
+        if(pMcastAddr->ivl)
+            l2Table.cvid_fid    = pMcastAddr->vid;
+        else
+            l2Table.cvid_fid    = pMcastAddr->fid;
+
+        l2Table.mbr         = 0;
+        l2Table.nosalearn   = 0;
+        l2Table.sa_block    = 0;
+        l2Table.l3lookup    = 0;
+        l2Table.lut_pri     = 0;
+        l2Table.fwd_en      = 0;
+        if((retVal = rtl8367c_setAsicL2LookupTb(&l2Table)) != RT_ERR_OK)
+            return retVal;
+
+        pMcastAddr->address = l2Table.address;
+        return RT_ERR_OK;
+    }
+    else
+        return retVal;
+}
+
+static rtk_api_ret_t _rtk_l2_ipMcastAddr_add(rtk_l2_ipMcastAddr_t *pIpMcastAddr)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 method;
+    rtl8367c_luttb l2Table;
+    rtk_uint32 pmask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pIpMcastAddr)
+        return RT_ERR_NULL_POINTER;
+
+    /* check port mask */
+    RTK_CHK_PORTMASK_VALID(&pIpMcastAddr->portmask);
+
+    if( (pIpMcastAddr->dip & 0xF0000000) != 0xE0000000)
+        return RT_ERR_INPUT;
+
+    if(pIpMcastAddr->fwd_pri_en >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    if(pIpMcastAddr->priority > RTL8367C_PRIMAX)
+        return RT_ERR_INPUT;
+
+    /* Get Physical port mask */
+    if ((retVal = rtk_switch_portmask_L2P_get(&pIpMcastAddr->portmask, &pmask)) != RT_ERR_OK)
+        return retVal;
+
+    memset(&l2Table, 0x00, sizeof(rtl8367c_luttb));
+    l2Table.sip = pIpMcastAddr->sip;
+    l2Table.dip = pIpMcastAddr->dip;
+    l2Table.l3lookup = 1;
+    l2Table.l3vidlookup = 0;
+    method = LUTREADMETHOD_MAC;
+    retVal = rtl8367c_getAsicL2LookupTb(method, &l2Table);
+    if (RT_ERR_OK == retVal)
+    {
+        l2Table.sip = pIpMcastAddr->sip;
+        l2Table.dip = pIpMcastAddr->dip;
+        l2Table.mbr = pmask;
+        l2Table.nosalearn = 1;
+        l2Table.l3lookup = 1;
+        l2Table.l3vidlookup = 0;
+        l2Table.lut_pri = pIpMcastAddr->priority;
+        l2Table.fwd_en  = pIpMcastAddr->fwd_pri_en;
+        if((retVal = rtl8367c_setAsicL2LookupTb(&l2Table)) != RT_ERR_OK)
+            return retVal;
+
+        pIpMcastAddr->address = l2Table.address;
+        return RT_ERR_OK;
+    }
+    else if (RT_ERR_L2_ENTRY_NOTFOUND == retVal)
+    {
+        memset(&l2Table, 0, sizeof(rtl8367c_luttb));
+        l2Table.sip = pIpMcastAddr->sip;
+        l2Table.dip = pIpMcastAddr->dip;
+        l2Table.mbr = pmask;
+        l2Table.nosalearn = 1;
+        l2Table.l3lookup = 1;
+        l2Table.l3vidlookup = 0;
+        l2Table.lut_pri = pIpMcastAddr->priority;
+        l2Table.fwd_en  = pIpMcastAddr->fwd_pri_en;
+        if ((retVal = rtl8367c_setAsicL2LookupTb(&l2Table)) != RT_ERR_OK)
+            return retVal;
+
+        pIpMcastAddr->address = l2Table.address;
+
+        method = LUTREADMETHOD_MAC;
+        retVal = rtl8367c_getAsicL2LookupTb(method, &l2Table);
+        if (RT_ERR_L2_ENTRY_NOTFOUND == retVal)
+            return     RT_ERR_L2_INDEXTBL_FULL;
+        else
+            return retVal;
+
+    }
+    else
+        return retVal;
+
+}
+
+static rtk_api_ret_t _rtk_l2_ipMcastAddr_get(rtk_l2_ipMcastAddr_t *pIpMcastAddr)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 method;
+    rtl8367c_luttb l2Table;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pIpMcastAddr)
+        return RT_ERR_NULL_POINTER;
+
+    if( (pIpMcastAddr->dip & 0xF0000000) != 0xE0000000)
+        return RT_ERR_INPUT;
+
+    memset(&l2Table, 0x00, sizeof(rtl8367c_luttb));
+    l2Table.sip = pIpMcastAddr->sip;
+    l2Table.dip = pIpMcastAddr->dip;
+    l2Table.l3lookup = 1;
+    l2Table.l3vidlookup = 0;
+    method = LUTREADMETHOD_MAC;
+    if ((retVal = rtl8367c_getAsicL2LookupTb(method, &l2Table)) != RT_ERR_OK)
+        return retVal;
+
+    /* Get Logical port mask */
+    if ((retVal = rtk_switch_portmask_P2L_get(l2Table.mbr, &pIpMcastAddr->portmask)) != RT_ERR_OK)
+        return retVal;
+
+    pIpMcastAddr->priority      = l2Table.lut_pri;
+    pIpMcastAddr->fwd_pri_en    = l2Table.fwd_en;
+    pIpMcastAddr->igmp_asic     = l2Table.igmp_asic;
+    pIpMcastAddr->igmp_index    = l2Table.igmpidx;
+    pIpMcastAddr->address       = l2Table.address;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_ipMcastAddr_next_get(rtk_uint32 *pAddress, rtk_l2_ipMcastAddr_t *pIpMcastAddr)
+{
+    rtk_api_ret_t   retVal;
+    rtl8367c_luttb  l2Table;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Error Checking */
+    if ((pAddress == NULL) || (pIpMcastAddr == NULL) )
+        return RT_ERR_INPUT;
+
+    if(*pAddress > RTK_MAX_LUT_ADDR_ID )
+        return RT_ERR_L2_L2UNI_PARAM;
+
+    memset(&l2Table, 0, sizeof(rtl8367c_luttb));
+    l2Table.address = *pAddress;
+
+    do
+    {
+        if ((retVal = rtl8367c_getAsicL2LookupTb(LUTREADMETHOD_NEXT_L3MC, &l2Table)) != RT_ERR_OK)
+            return retVal;
+
+        if(l2Table.address < *pAddress)
+            return RT_ERR_L2_ENTRY_NOTFOUND;
+
+    }while(l2Table.l3vidlookup == 1);
+
+    pIpMcastAddr->sip = l2Table.sip;
+    pIpMcastAddr->dip = l2Table.dip;
+
+    /* Get Logical port mask */
+    if ((retVal = rtk_switch_portmask_P2L_get(l2Table.mbr, &pIpMcastAddr->portmask)) != RT_ERR_OK)
+        return retVal;
+
+    pIpMcastAddr->priority      = l2Table.lut_pri;
+    pIpMcastAddr->fwd_pri_en    = l2Table.fwd_en;
+    pIpMcastAddr->igmp_asic     = l2Table.igmp_asic;
+    pIpMcastAddr->igmp_index    = l2Table.igmpidx;
+    pIpMcastAddr->address       = l2Table.address;
+    *pAddress = l2Table.address;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_ipMcastAddr_del(rtk_l2_ipMcastAddr_t *pIpMcastAddr)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 method;
+    rtl8367c_luttb l2Table;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Error Checking */
+    if (pIpMcastAddr == NULL)
+        return RT_ERR_INPUT;
+
+    if( (pIpMcastAddr->dip & 0xF0000000) != 0xE0000000)
+        return RT_ERR_INPUT;
+
+    memset(&l2Table, 0x00, sizeof(rtl8367c_luttb));
+    l2Table.sip = pIpMcastAddr->sip;
+    l2Table.dip = pIpMcastAddr->dip;
+    l2Table.l3lookup = 1;
+    l2Table.l3vidlookup = 0;
+    method = LUTREADMETHOD_MAC;
+    retVal = rtl8367c_getAsicL2LookupTb(method, &l2Table);
+    if (RT_ERR_OK == retVal)
+    {
+        l2Table.sip = pIpMcastAddr->sip;
+        l2Table.dip = pIpMcastAddr->dip;
+        l2Table.mbr = 0;
+        l2Table.nosalearn = 0;
+        l2Table.l3lookup = 1;
+        l2Table.l3vidlookup = 0;
+        l2Table.lut_pri = 0;
+        l2Table.fwd_en  = 0;
+        if((retVal = rtl8367c_setAsicL2LookupTb(&l2Table)) != RT_ERR_OK)
+            return retVal;
+
+        pIpMcastAddr->address = l2Table.address;
+        return RT_ERR_OK;
+    }
+    else
+        return retVal;
+}
+
+static rtk_api_ret_t _rtk_l2_ipVidMcastAddr_add(rtk_l2_ipVidMcastAddr_t *pIpVidMcastAddr)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 method;
+    rtl8367c_luttb l2Table;
+    rtk_uint32 pmask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pIpVidMcastAddr)
+        return RT_ERR_NULL_POINTER;
+
+    /* check port mask */
+    RTK_CHK_PORTMASK_VALID(&pIpVidMcastAddr->portmask);
+
+    if (pIpVidMcastAddr->vid > RTL8367C_VIDMAX)
+        return RT_ERR_L2_VID;
+
+    if( (pIpVidMcastAddr->dip & 0xF0000000) != 0xE0000000)
+        return RT_ERR_INPUT;
+
+    /* Get Physical port mask */
+    if ((retVal = rtk_switch_portmask_L2P_get(&pIpVidMcastAddr->portmask, &pmask)) != RT_ERR_OK)
+        return retVal;
+
+    memset(&l2Table, 0x00, sizeof(rtl8367c_luttb));
+    l2Table.sip = pIpVidMcastAddr->sip;
+    l2Table.dip = pIpVidMcastAddr->dip;
+    l2Table.l3lookup = 1;
+    l2Table.l3vidlookup = 1;
+    l2Table.l3_vid = pIpVidMcastAddr->vid;
+    method = LUTREADMETHOD_MAC;
+    retVal = rtl8367c_getAsicL2LookupTb(method, &l2Table);
+    if (RT_ERR_OK == retVal)
+    {
+        l2Table.sip = pIpVidMcastAddr->sip;
+        l2Table.dip = pIpVidMcastAddr->dip;
+        l2Table.mbr = pmask;
+        l2Table.nosalearn = 1;
+        l2Table.l3lookup = 1;
+        l2Table.l3vidlookup = 1;
+        l2Table.l3_vid = pIpVidMcastAddr->vid;
+        if((retVal = rtl8367c_setAsicL2LookupTb(&l2Table)) != RT_ERR_OK)
+            return retVal;
+
+        pIpVidMcastAddr->address = l2Table.address;
+        return RT_ERR_OK;
+    }
+    else if (RT_ERR_L2_ENTRY_NOTFOUND == retVal)
+    {
+        memset(&l2Table, 0, sizeof(rtl8367c_luttb));
+        l2Table.sip = pIpVidMcastAddr->sip;
+        l2Table.dip = pIpVidMcastAddr->dip;
+        l2Table.mbr = pmask;
+        l2Table.nosalearn = 1;
+        l2Table.l3lookup = 1;
+        l2Table.l3vidlookup = 1;
+        l2Table.l3_vid = pIpVidMcastAddr->vid;
+        if ((retVal = rtl8367c_setAsicL2LookupTb(&l2Table)) != RT_ERR_OK)
+            return retVal;
+
+        pIpVidMcastAddr->address = l2Table.address;
+
+        method = LUTREADMETHOD_MAC;
+        retVal = rtl8367c_getAsicL2LookupTb(method, &l2Table);
+        if (RT_ERR_L2_ENTRY_NOTFOUND == retVal)
+            return     RT_ERR_L2_INDEXTBL_FULL;
+        else
+            return retVal;
+
+    }
+    else
+        return retVal;
+}
+
+static rtk_api_ret_t _rtk_l2_ipVidMcastAddr_get(rtk_l2_ipVidMcastAddr_t *pIpVidMcastAddr)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 method;
+    rtl8367c_luttb l2Table;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pIpVidMcastAddr)
+        return RT_ERR_NULL_POINTER;
+
+    if (pIpVidMcastAddr->vid > RTL8367C_VIDMAX)
+        return RT_ERR_L2_VID;
+
+    if( (pIpVidMcastAddr->dip & 0xF0000000) != 0xE0000000)
+        return RT_ERR_INPUT;
+
+    memset(&l2Table, 0x00, sizeof(rtl8367c_luttb));
+    l2Table.sip = pIpVidMcastAddr->sip;
+    l2Table.dip = pIpVidMcastAddr->dip;
+    l2Table.l3lookup = 1;
+    l2Table.l3vidlookup = 1;
+    l2Table.l3_vid = pIpVidMcastAddr->vid;
+    method = LUTREADMETHOD_MAC;
+    if ((retVal = rtl8367c_getAsicL2LookupTb(method, &l2Table)) != RT_ERR_OK)
+        return retVal;
+
+    pIpVidMcastAddr->address = l2Table.address;
+
+     /* Get Logical port mask */
+    if ((retVal = rtk_switch_portmask_P2L_get(l2Table.mbr, &pIpVidMcastAddr->portmask)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_ipVidMcastAddr_next_get(rtk_uint32 *pAddress, rtk_l2_ipVidMcastAddr_t *pIpVidMcastAddr)
+{
+    rtk_api_ret_t   retVal;
+    rtl8367c_luttb  l2Table;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Error Checking */
+    if ((pAddress == NULL) || (pIpVidMcastAddr == NULL))
+        return RT_ERR_INPUT;
+
+    if(*pAddress > RTK_MAX_LUT_ADDR_ID )
+        return RT_ERR_L2_L2UNI_PARAM;
+
+    memset(&l2Table, 0, sizeof(rtl8367c_luttb));
+    l2Table.address = *pAddress;
+
+    do
+    {
+        if ((retVal = rtl8367c_getAsicL2LookupTb(LUTREADMETHOD_NEXT_L3MC, &l2Table)) != RT_ERR_OK)
+            return retVal;
+
+        if(l2Table.address < *pAddress)
+            return RT_ERR_L2_ENTRY_NOTFOUND;
+
+    }while(l2Table.l3vidlookup == 0);
+
+    pIpVidMcastAddr->sip        = l2Table.sip;
+    pIpVidMcastAddr->dip        = l2Table.dip;
+    pIpVidMcastAddr->vid        = l2Table.l3_vid;
+    pIpVidMcastAddr->address    = l2Table.address;
+
+    /* Get Logical port mask */
+    if ((retVal = rtk_switch_portmask_P2L_get(l2Table.mbr, &pIpVidMcastAddr->portmask)) != RT_ERR_OK)
+        return retVal;
+
+    *pAddress = l2Table.address;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_ipVidMcastAddr_del(rtk_l2_ipVidMcastAddr_t *pIpVidMcastAddr)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 method;
+    rtl8367c_luttb l2Table;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pIpVidMcastAddr)
+        return RT_ERR_NULL_POINTER;
+
+    if (pIpVidMcastAddr->vid > RTL8367C_VIDMAX)
+        return RT_ERR_L2_VID;
+
+    if( (pIpVidMcastAddr->dip & 0xF0000000) != 0xE0000000)
+        return RT_ERR_INPUT;
+
+    memset(&l2Table, 0x00, sizeof(rtl8367c_luttb));
+    l2Table.sip = pIpVidMcastAddr->sip;
+    l2Table.dip = pIpVidMcastAddr->dip;
+    l2Table.l3lookup = 1;
+    l2Table.l3vidlookup = 1;
+    l2Table.l3_vid = pIpVidMcastAddr->vid;
+    method = LUTREADMETHOD_MAC;
+    retVal = rtl8367c_getAsicL2LookupTb(method, &l2Table);
+    if (RT_ERR_OK == retVal)
+    {
+        l2Table.sip = pIpVidMcastAddr->sip;
+        l2Table.dip = pIpVidMcastAddr->dip;
+        l2Table.mbr= 0;
+        l2Table.nosalearn = 0;
+        l2Table.l3lookup = 1;
+        l2Table.l3vidlookup = 1;
+        l2Table.l3_vid = pIpVidMcastAddr->vid;
+        if((retVal = rtl8367c_setAsicL2LookupTb(&l2Table)) != RT_ERR_OK)
+            return retVal;
+
+        pIpVidMcastAddr->address = l2Table.address;
+        return RT_ERR_OK;
+    }
+    else
+        return retVal;
+}
+
+static rtk_api_ret_t _rtk_l2_ucastAddr_flush(rtk_l2_flushCfg_t *pConfig)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(pConfig == NULL)
+        return RT_ERR_NULL_POINTER;
+
+    if(pConfig->flushByVid >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    if(pConfig->flushByFid >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    if(pConfig->flushByPort >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    if(pConfig->flushByMac >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    if(pConfig->flushStaticAddr >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    if(pConfig->flushAddrOnAllPorts >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    if(pConfig->vid > RTL8367C_VIDMAX)
+        return RT_ERR_VLAN_VID;
+
+    if(pConfig->fid > RTL8367C_FIDMAX)
+        return RT_ERR_INPUT;
+
+    /* check port valid */
+    RTK_CHK_PORT_VALID(pConfig->port);
+
+    if(pConfig->flushByVid == ENABLED)
+    {
+        if ((retVal = rtl8367c_setAsicLutFlushMode(FLUSHMDOE_VID)) != RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicLutFlushVid(pConfig->vid)) != RT_ERR_OK)
+                return retVal;
+
+        if ((retVal = rtl8367c_setAsicLutFlushType((pConfig->flushStaticAddr == ENABLED) ? FLUSHTYPE_BOTH : FLUSHTYPE_DYNAMIC)) != RT_ERR_OK)
+            return retVal;
+
+        if(pConfig->flushAddrOnAllPorts == ENABLED)
+        {
+            if ((retVal = rtl8367c_setAsicLutForceFlush(RTL8367C_PORTMASK)) != RT_ERR_OK)
+                return retVal;
+        }
+        else if(pConfig->flushByPort == ENABLED)
+        {
+            if ((retVal = rtl8367c_setAsicLutForceFlush(1 << rtk_switch_port_L2P_get(pConfig->port))) != RT_ERR_OK)
+                return retVal;
+        }
+        else
+            return RT_ERR_INPUT;
+    }
+    else if(pConfig->flushByFid == ENABLED)
+    {
+        if ((retVal = rtl8367c_setAsicLutFlushMode(FLUSHMDOE_FID)) != RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicLutFlushFid(pConfig->fid)) != RT_ERR_OK)
+                return retVal;
+
+        if ((retVal = rtl8367c_setAsicLutFlushType((pConfig->flushStaticAddr == ENABLED) ? FLUSHTYPE_BOTH : FLUSHTYPE_DYNAMIC)) != RT_ERR_OK)
+            return retVal;
+
+        if(pConfig->flushAddrOnAllPorts == ENABLED)
+        {
+            if ((retVal = rtl8367c_setAsicLutForceFlush(RTL8367C_PORTMASK)) != RT_ERR_OK)
+                return retVal;
+        }
+        else if(pConfig->flushByPort == ENABLED)
+        {
+            if ((retVal = rtl8367c_setAsicLutForceFlush(1 << rtk_switch_port_L2P_get(pConfig->port))) != RT_ERR_OK)
+                return retVal;
+        }
+        else
+            return RT_ERR_INPUT;
+    }
+    else if(pConfig->flushByPort == ENABLED)
+    {
+        if ((retVal = rtl8367c_setAsicLutFlushType((pConfig->flushStaticAddr == ENABLED) ? FLUSHTYPE_BOTH : FLUSHTYPE_DYNAMIC)) != RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicLutFlushMode(FLUSHMDOE_PORT)) != RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicLutForceFlush(1 << rtk_switch_port_L2P_get(pConfig->port))) != RT_ERR_OK)
+            return retVal;
+    }
+    else if(pConfig->flushByMac == ENABLED)
+    {
+        /* Should use API "rtk_l2_addr_del" to remove a specified entry*/
+        return RT_ERR_CHIP_NOT_SUPPORTED;
+    }
+    else
+        return RT_ERR_INPUT;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_table_clear(void)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if ((retVal = rtl8367c_setAsicLutFlushAll()) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_table_clearStatus_get(rtk_l2_clearStatus_t *pStatus)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pStatus)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicLutFlushAllStatus((rtk_uint32 *)pStatus)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_flushLinkDownPortAddrEnable_set(rtk_port_t port, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (port != RTK_WHOLE_SYSTEM)
+        return RT_ERR_PORT_ID;
+
+    if (enable >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    if ((retVal = rtl8367c_setAsicLutLinkDownForceAging(enable)) != RT_ERR_OK)
+        return retVal;
+
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_flushLinkDownPortAddrEnable_get(rtk_port_t port, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (port != RTK_WHOLE_SYSTEM)
+        return RT_ERR_PORT_ID;
+
+    if(NULL == pEnable)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicLutLinkDownForceAging(pEnable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_agingEnable_set(rtk_port_t port, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* check port valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (enable >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    if(enable == 1)
+        enable = 0;
+    else
+        enable = 1;
+
+    if ((retVal = rtl8367c_setAsicLutDisableAging(rtk_switch_port_L2P_get(port), enable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_agingEnable_get(rtk_port_t port, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* check port valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(NULL == pEnable)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicLutDisableAging(rtk_switch_port_L2P_get(port), pEnable)) != RT_ERR_OK)
+        return retVal;
+
+    if(*pEnable == 1)
+        *pEnable = 0;
+    else
+        *pEnable = 1;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_limitLearningCnt_set(rtk_port_t port, rtk_mac_cnt_t mac_cnt)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* check port valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (mac_cnt > rtk_switch_maxLutAddrNumber_get())
+        return RT_ERR_LIMITED_L2ENTRY_NUM;
+
+    if ((retVal = rtl8367c_setAsicLutLearnLimitNo(rtk_switch_port_L2P_get(port), mac_cnt)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_limitLearningCnt_get(rtk_port_t port, rtk_mac_cnt_t *pMac_cnt)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* check port valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(NULL == pMac_cnt)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicLutLearnLimitNo(rtk_switch_port_L2P_get(port), pMac_cnt)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_limitSystemLearningCnt_set(rtk_mac_cnt_t mac_cnt)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (mac_cnt > rtk_switch_maxLutAddrNumber_get())
+        return RT_ERR_LIMITED_L2ENTRY_NUM;
+
+    if ((retVal = rtl8367c_setAsicSystemLutLearnLimitNo(mac_cnt)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_limitSystemLearningCnt_get(rtk_mac_cnt_t *pMac_cnt)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pMac_cnt)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicSystemLutLearnLimitNo(pMac_cnt)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_limitLearningCntAction_set(rtk_port_t port, rtk_l2_limitLearnCntAction_t action)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 data;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (port != RTK_WHOLE_SYSTEM)
+        return RT_ERR_PORT_ID;
+
+    if ( LIMIT_LEARN_CNT_ACTION_DROP == action )
+        data = 1;
+    else if ( LIMIT_LEARN_CNT_ACTION_FORWARD == action )
+        data = 0;
+    else if ( LIMIT_LEARN_CNT_ACTION_TO_CPU == action )
+        data = 2;
+    else
+        return RT_ERR_NOT_ALLOWED;
+
+    if ((retVal = rtl8367c_setAsicLutLearnOverAct(data)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_limitLearningCntAction_get(rtk_port_t port, rtk_l2_limitLearnCntAction_t *pAction)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 action;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (port != RTK_WHOLE_SYSTEM)
+        return RT_ERR_PORT_ID;
+
+    if(NULL == pAction)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicLutLearnOverAct(&action)) != RT_ERR_OK)
+        return retVal;
+
+    if ( 1 == action )
+        *pAction = LIMIT_LEARN_CNT_ACTION_DROP;
+    else if ( 0 == action )
+        *pAction = LIMIT_LEARN_CNT_ACTION_FORWARD;
+    else if ( 2 == action )
+        *pAction = LIMIT_LEARN_CNT_ACTION_TO_CPU;
+    else
+    *pAction = action;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_limitSystemLearningCntAction_set(rtk_l2_limitLearnCntAction_t action)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 data;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if ( LIMIT_LEARN_CNT_ACTION_DROP == action )
+        data = 1;
+    else if ( LIMIT_LEARN_CNT_ACTION_FORWARD == action )
+        data = 0;
+    else if ( LIMIT_LEARN_CNT_ACTION_TO_CPU == action )
+        data = 2;
+    else
+        return RT_ERR_NOT_ALLOWED;
+
+    if ((retVal = rtl8367c_setAsicSystemLutLearnOverAct(data)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_limitSystemLearningCntAction_get(rtk_l2_limitLearnCntAction_t *pAction)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 action;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pAction)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicSystemLutLearnOverAct(&action)) != RT_ERR_OK)
+        return retVal;
+
+    if ( 1 == action )
+        *pAction = LIMIT_LEARN_CNT_ACTION_DROP;
+    else if ( 0 == action )
+        *pAction = LIMIT_LEARN_CNT_ACTION_FORWARD;
+    else if ( 2 == action )
+        *pAction = LIMIT_LEARN_CNT_ACTION_TO_CPU;
+    else
+    *pAction = action;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_limitSystemLearningCntPortMask_set(rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 pmask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pPortmask)
+        return RT_ERR_NULL_POINTER;
+
+    /* Check port mask */
+    RTK_CHK_PORTMASK_VALID(pPortmask);
+
+    if ((retVal = rtk_switch_portmask_L2P_get(pPortmask, &pmask)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicSystemLutLearnPortMask(pmask)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_limitSystemLearningCntPortMask_get(rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 pmask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pPortmask)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicSystemLutLearnPortMask(&pmask)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtk_switch_portmask_P2L_get(pmask, pPortmask)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_learningCnt_get(rtk_port_t port, rtk_mac_cnt_t *pMac_cnt)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* check port valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(NULL == pMac_cnt)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicLutLearnNo(rtk_switch_port_L2P_get(port), pMac_cnt)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_floodPortMask_set(rtk_l2_flood_type_t floood_type, rtk_portmask_t *pFlood_portmask)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 pmask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (floood_type >= FLOOD_END)
+        return RT_ERR_INPUT;
+
+    /* check port valid */
+    RTK_CHK_PORTMASK_VALID(pFlood_portmask);
+
+    /* Get Physical port mask */
+    if ((retVal = rtk_switch_portmask_L2P_get(pFlood_portmask, &pmask))!=RT_ERR_OK)
+        return retVal;
+
+    switch (floood_type)
+    {
+        case FLOOD_UNKNOWNDA:
+            if ((retVal = rtl8367c_setAsicPortUnknownDaFloodingPortmask(pmask)) != RT_ERR_OK)
+                return retVal;
+            break;
+        case FLOOD_UNKNOWNMC:
+            if ((retVal = rtl8367c_setAsicPortUnknownMulticastFloodingPortmask(pmask)) != RT_ERR_OK)
+                return retVal;
+            break;
+        case FLOOD_BC:
+            if ((retVal = rtl8367c_setAsicPortBcastFloodingPortmask(pmask)) != RT_ERR_OK)
+                return retVal;
+            break;
+        default:
+            break;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_floodPortMask_get(rtk_l2_flood_type_t floood_type, rtk_portmask_t *pFlood_portmask)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 pmask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (floood_type >= FLOOD_END)
+        return RT_ERR_INPUT;
+
+    if(NULL == pFlood_portmask)
+        return RT_ERR_NULL_POINTER;
+
+    switch (floood_type)
+    {
+        case FLOOD_UNKNOWNDA:
+            if ((retVal = rtl8367c_getAsicPortUnknownDaFloodingPortmask(&pmask)) != RT_ERR_OK)
+                return retVal;
+            break;
+        case FLOOD_UNKNOWNMC:
+            if ((retVal = rtl8367c_getAsicPortUnknownMulticastFloodingPortmask(&pmask)) != RT_ERR_OK)
+                return retVal;
+            break;
+        case FLOOD_BC:
+            if ((retVal = rtl8367c_getAsicPortBcastFloodingPortmask(&pmask)) != RT_ERR_OK)
+                return retVal;
+            break;
+        default:
+            break;
+    }
+
+    /* Get Logical port mask */
+    if ((retVal = rtk_switch_portmask_P2L_get(pmask, pFlood_portmask))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_localPktPermit_set(rtk_port_t port, rtk_enable_t permit)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* check port valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (permit >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    if ((retVal = rtl8367c_setAsicPortBlockSpa(rtk_switch_port_L2P_get(port), permit)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_localPktPermit_get(rtk_port_t port, rtk_enable_t *pPermit)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* check port valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(NULL == pPermit)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicPortBlockSpa(rtk_switch_port_L2P_get(port), pPermit)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_aging_set(rtk_l2_age_time_t aging_time)
+{
+    rtk_uint32 i;
+    CONST_T rtk_uint32 agePara[10][3] = {
+        {45, 0, 1}, {88, 0, 2}, {133, 0, 3}, {177, 0, 4}, {221, 0, 5}, {266, 0, 6}, {310, 0, 7},
+        {354, 2, 6}, {413, 2, 7}, {458, 3, 7}};
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (aging_time>agePara[9][0])
+        return RT_ERR_OUT_OF_RANGE;
+
+    for (i = 0; i<10; i++)
+    {
+        if (aging_time<=agePara[i][0])
+        {
+            return rtl8367c_setAsicLutAgeTimerSpeed(agePara[i][2], agePara[i][1]);
+        }
+    }
+
+    return RT_ERR_FAILED;
+}
+
+static rtk_api_ret_t _rtk_l2_aging_get(rtk_l2_age_time_t *pAging_time)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 i,time, speed;
+    CONST_T rtk_uint32 agePara[10][3] = {
+        {45, 0, 1}, {88, 0, 2}, {133, 0, 3}, {177, 0, 4}, {221, 0, 5}, {266, 0, 6}, {310, 0, 7},
+        {354, 2, 6}, {413, 2, 7}, {458, 3, 7}};
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pAging_time)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicLutAgeTimerSpeed(&time, &speed)) != RT_ERR_OK)
+        return retVal;
+
+    for (i = 0; i<10; i++)
+    {
+        if (time==agePara[i][2]&&speed==agePara[i][1])
+        {
+            *pAging_time = agePara[i][0];
+            return RT_ERR_OK;
+        }
+    }
+
+    return RT_ERR_FAILED;
+}
+
+static rtk_api_ret_t _rtk_l2_ipMcastAddrLookup_set(rtk_l2_ipmc_lookup_type_t type)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(type == LOOKUP_MAC)
+    {
+        if((retVal = rtl8367c_setAsicLutIpMulticastLookup(DISABLED)) != RT_ERR_OK)
+            return retVal;
+    }
+    else if(type == LOOKUP_IP)
+    {
+        if((retVal = rtl8367c_setAsicLutIpMulticastLookup(ENABLED)) != RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicLutIpMulticastVidLookup(DISABLED))!=RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicLutIpLookupMethod(1))!=RT_ERR_OK)
+            return retVal;
+    }
+    else if(type == LOOKUP_IP_VID)
+    {
+        if((retVal = rtl8367c_setAsicLutIpMulticastLookup(ENABLED)) != RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicLutIpMulticastVidLookup(ENABLED))!=RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicLutIpLookupMethod(1))!=RT_ERR_OK)
+            return retVal;
+    }
+    else
+        return RT_ERR_INPUT;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_ipMcastAddrLookup_get(rtk_l2_ipmc_lookup_type_t *pType)
+{
+    rtk_api_ret_t       retVal;
+    rtk_uint32          enabled, vid_lookup;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pType)
+        return RT_ERR_NULL_POINTER;
+
+    if((retVal = rtl8367c_getAsicLutIpMulticastLookup(&enabled)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_getAsicLutIpMulticastVidLookup(&vid_lookup))!=RT_ERR_OK)
+        return retVal;
+
+    if(enabled == ENABLED)
+    {
+        if(vid_lookup == ENABLED)
+            *pType = LOOKUP_IP_VID;
+        else
+            *pType = LOOKUP_IP;
+    }
+    else
+        *pType = LOOKUP_MAC;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_ipMcastForwardRouterPort_set(rtk_enable_t enabled)
+{
+    rtk_api_ret_t       retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (enabled >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    if((retVal = rtl8367c_setAsicLutIpmcFwdRouterPort(enabled)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_ipMcastForwardRouterPort_get(rtk_enable_t *pEnabled)
+{
+    rtk_api_ret_t       retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (NULL == pEnabled)
+        return RT_ERR_NULL_POINTER;
+
+    if((retVal = rtl8367c_getAsicLutIpmcFwdRouterPort(pEnabled)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_ipMcastGroupEntry_add(ipaddr_t ip_addr, rtk_uint32 vid, rtk_portmask_t *pPortmask)
+{
+    rtk_uint32      empty_idx = 0xFFFF;
+    rtk_int32       index;
+    ipaddr_t        group_addr;
+    rtk_uint32      group_vid;
+    rtk_uint32      pmask;
+    rtk_uint32      valid;
+    rtk_uint32      physicalPortmask;
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (vid > RTL8367C_VIDMAX)
+        return RT_ERR_L2_VID;
+
+    if(NULL == pPortmask)
+        return RT_ERR_NULL_POINTER;
+
+    if((ip_addr & 0xF0000000) != 0xE0000000)
+        return RT_ERR_INPUT;
+
+    /* Get Physical port mask */
+    if ((retVal = rtk_switch_portmask_L2P_get(pPortmask, &physicalPortmask))!=RT_ERR_OK)
+        return retVal;
+
+    for(index = 0; index <= RTL8367C_LUT_IPMCGRP_TABLE_MAX; index++)
+    {
+        if ((retVal = rtl8367c_getAsicLutIPMCGroup((rtk_uint32)index, &group_addr, &group_vid, &pmask, &valid))!=RT_ERR_OK)
+            return retVal;
+
+        if( (valid == ENABLED) && (group_addr == ip_addr) && (group_vid == vid) )
+        {
+            if(pmask != physicalPortmask)
+            {
+                pmask = physicalPortmask;
+                if ((retVal = rtl8367c_setAsicLutIPMCGroup(index, ip_addr, vid, pmask, valid))!=RT_ERR_OK)
+                    return retVal;
+            }
+
+            return RT_ERR_OK;
+        }
+
+        if( (valid == DISABLED) && (empty_idx == 0xFFFF) ) /* Unused */
+            empty_idx = (rtk_uint32)index;
+    }
+
+    if(empty_idx == 0xFFFF)
+        return RT_ERR_TBL_FULL;
+
+    pmask = physicalPortmask;
+    if ((retVal = rtl8367c_setAsicLutIPMCGroup(empty_idx, ip_addr, vid, pmask, ENABLED))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_l2_ipMcastGroupEntry_del(ipaddr_t ip_addr, rtk_uint32 vid)
+{
+    rtk_int32       index;
+    ipaddr_t        group_addr;
+    rtk_uint32      group_vid;
+    rtk_uint32      pmask;
+    rtk_uint32      valid;
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (vid > RTL8367C_VIDMAX)
+        return RT_ERR_L2_VID;
+
+    if((ip_addr & 0xF0000000) != 0xE0000000)
+        return RT_ERR_INPUT;
+
+    for(index = 0; index <= RTL8367C_LUT_IPMCGRP_TABLE_MAX; index++)
+    {
+        if ((retVal = rtl8367c_getAsicLutIPMCGroup((rtk_uint32)index, &group_addr, &group_vid, &pmask, &valid))!=RT_ERR_OK)
+            return retVal;
+
+        if( (valid == ENABLED) && (group_addr == ip_addr) && (group_vid == vid) )
+        {
+            group_addr = 0xE0000000;
+            group_vid = 0;
+            pmask = 0;
+            if ((retVal = rtl8367c_setAsicLutIPMCGroup(index, group_addr, group_vid, pmask, DISABLED))!=RT_ERR_OK)
+                return retVal;
+
+            return RT_ERR_OK;
+        }
+    }
+
+    return RT_ERR_FAILED;
+}
+
+static rtk_api_ret_t _rtk_l2_ipMcastGroupEntry_get(ipaddr_t ip_addr, rtk_uint32 vid, rtk_portmask_t *pPortmask)
+{
+    rtk_int32       index;
+    ipaddr_t        group_addr;
+    rtk_uint32      group_vid;
+    rtk_uint32      valid;
+    rtk_uint32      pmask;
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if((ip_addr & 0xF0000000) != 0xE0000000)
+        return RT_ERR_INPUT;
+
+    if (vid > RTL8367C_VIDMAX)
+        return RT_ERR_L2_VID;
+
+    if(NULL == pPortmask)
+        return RT_ERR_NULL_POINTER;
+
+    for(index = 0; index <= RTL8367C_LUT_IPMCGRP_TABLE_MAX; index++)
+    {
+        if ((retVal = rtl8367c_getAsicLutIPMCGroup((rtk_uint32)index, &group_addr, &group_vid, &pmask, &valid))!=RT_ERR_OK)
+            return retVal;
+
+        if( (valid == ENABLED) && (group_addr == ip_addr) && (group_vid == vid) )
+        {
+            if ((retVal = rtk_switch_portmask_P2L_get(pmask, pPortmask))!=RT_ERR_OK)
+                return retVal;
+
+            return RT_ERR_OK;
+        }
+    }
+
+    return RT_ERR_FAILED;
+}
+
+static rtk_api_ret_t _rtk_l2_entry_get(rtk_l2_addr_table_t *pL2_entry)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 method;
+    rtl8367c_luttb l2Table;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (pL2_entry->index >= rtk_switch_maxLutAddrNumber_get())
+        return RT_ERR_INPUT;
+
+    memset(&l2Table, 0x00, sizeof(rtl8367c_luttb));
+    l2Table.address= pL2_entry->index;
+    method = LUTREADMETHOD_ADDRESS;
+    if ((retVal = rtl8367c_getAsicL2LookupTb(method, &l2Table)) != RT_ERR_OK)
+        return retVal;
+
+    if ((pL2_entry->index>0x800)&&(l2Table.lookup_hit==0))
+         return RT_ERR_L2_EMPTY_ENTRY;
+
+    if(l2Table.l3lookup)
+    {
+        if(l2Table.l3vidlookup)
+        {
+            memset(&pL2_entry->mac, 0, sizeof(rtk_mac_t));
+            pL2_entry->is_ipmul  = l2Table.l3lookup;
+            pL2_entry->sip       = l2Table.sip;
+            pL2_entry->dip       = l2Table.dip;
+            pL2_entry->is_static = l2Table.nosalearn;
+
+            /* Get Logical port mask */
+            if ((retVal = rtk_switch_portmask_P2L_get(l2Table.mbr, &(pL2_entry->portmask)))!=RT_ERR_OK)
+                return retVal;
+
+            pL2_entry->fid       = 0;
+            pL2_entry->age       = 0;
+            pL2_entry->auth      = 0;
+            pL2_entry->sa_block  = 0;
+            pL2_entry->is_ipvidmul = 1;
+            pL2_entry->l3_vid      = l2Table.l3_vid;
+        }
+        else
+        {
+            memset(&pL2_entry->mac, 0, sizeof(rtk_mac_t));
+            pL2_entry->is_ipmul  = l2Table.l3lookup;
+            pL2_entry->sip       = l2Table.sip;
+            pL2_entry->dip       = l2Table.dip;
+            pL2_entry->is_static = l2Table.nosalearn;
+
+            /* Get Logical port mask */
+            if ((retVal = rtk_switch_portmask_P2L_get(l2Table.mbr, &(pL2_entry->portmask)))!=RT_ERR_OK)
+                return retVal;
+
+            pL2_entry->fid       = 0;
+            pL2_entry->age       = 0;
+            pL2_entry->auth      = 0;
+            pL2_entry->sa_block  = 0;
+            pL2_entry->is_ipvidmul = 0;
+            pL2_entry->l3_vid      = 0;
+        }
+    }
+    else if(l2Table.mac.octet[0]&0x01)
+    {
+        memset(&pL2_entry->sip, 0, sizeof(ipaddr_t));
+        memset(&pL2_entry->dip, 0, sizeof(ipaddr_t));
+        pL2_entry->mac.octet[0] = l2Table.mac.octet[0];
+        pL2_entry->mac.octet[1] = l2Table.mac.octet[1];
+        pL2_entry->mac.octet[2] = l2Table.mac.octet[2];
+        pL2_entry->mac.octet[3] = l2Table.mac.octet[3];
+        pL2_entry->mac.octet[4] = l2Table.mac.octet[4];
+        pL2_entry->mac.octet[5] = l2Table.mac.octet[5];
+        pL2_entry->is_ipmul  = l2Table.l3lookup;
+        pL2_entry->is_static = l2Table.nosalearn;
+
+        /* Get Logical port mask */
+        if ((retVal = rtk_switch_portmask_P2L_get(l2Table.mbr, &(pL2_entry->portmask)))!=RT_ERR_OK)
+            return retVal;
+
+        pL2_entry->ivl       = l2Table.ivl_svl;
+        if(l2Table.ivl_svl == 1) /* IVL */
+        {
+            pL2_entry->cvid      = l2Table.cvid_fid;
+            pL2_entry->fid       = 0;
+        }
+        else /* SVL*/
+        {
+            pL2_entry->cvid      = 0;
+            pL2_entry->fid       = l2Table.cvid_fid;
+        }
+        pL2_entry->auth      = l2Table.auth;
+        pL2_entry->sa_block  = l2Table.sa_block;
+        pL2_entry->age       = 0;
+        pL2_entry->is_ipvidmul = 0;
+        pL2_entry->l3_vid      = 0;
+    }
+    else if((l2Table.age != 0)||(l2Table.nosalearn == 1))
+    {
+        memset(&pL2_entry->sip, 0, sizeof(ipaddr_t));
+        memset(&pL2_entry->dip, 0, sizeof(ipaddr_t));
+        pL2_entry->mac.octet[0] = l2Table.mac.octet[0];
+        pL2_entry->mac.octet[1] = l2Table.mac.octet[1];
+        pL2_entry->mac.octet[2] = l2Table.mac.octet[2];
+        pL2_entry->mac.octet[3] = l2Table.mac.octet[3];
+        pL2_entry->mac.octet[4] = l2Table.mac.octet[4];
+        pL2_entry->mac.octet[5] = l2Table.mac.octet[5];
+        pL2_entry->is_ipmul  = l2Table.l3lookup;
+        pL2_entry->is_static = l2Table.nosalearn;
+
+        /* Get Logical port mask */
+        if ((retVal = rtk_switch_portmask_P2L_get(1<<(l2Table.spa), &(pL2_entry->portmask)))!=RT_ERR_OK)
+            return retVal;
+
+        pL2_entry->ivl       = l2Table.ivl_svl;
+        pL2_entry->cvid      = l2Table.cvid_fid;
+        pL2_entry->fid       = l2Table.fid;
+        pL2_entry->auth      = l2Table.auth;
+        pL2_entry->sa_block  = l2Table.sa_block;
+        pL2_entry->age       = l2Table.age;
+        pL2_entry->is_ipvidmul = 0;
+        pL2_entry->l3_vid      = 0;
+    }
+    else
+       return RT_ERR_L2_EMPTY_ENTRY;
+
+    return RT_ERR_OK;
+}
+
+
+/* Function Name:
+ *      rtk_l2_init
+ * Description:
+ *      Initialize l2 module of the specified device.
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK          - OK
+ *      RT_ERR_FAILED      - Failed
+ *      RT_ERR_SMI         - SMI access error
+ * Note:
+ *      Initialize l2 module before calling any l2 APIs.
+ */
+rtk_api_ret_t rtk_l2_init(void)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_init();
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+
+/* Function Name:
+ *      rtk_l2_addr_add
+ * Description:
+ *      Add LUT unicast entry.
+ * Input:
+ *      pMac - 6 bytes unicast(I/G bit is 0) mac address to be written into LUT.
+ *      pL2_data - Unicast entry parameter
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_MAC              - Invalid MAC address.
+ *      RT_ERR_L2_FID           - Invalid FID .
+ *      RT_ERR_L2_INDEXTBL_FULL - hashed index is full of entries.
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ * Note:
+ *      If the unicast mac address already existed in LUT, it will udpate the status of the entry.
+ *      Otherwise, it will find an empty or asic auto learned entry to write. If all the entries
+ *      with the same hash value can't be replaced, ASIC will return a RT_ERR_L2_INDEXTBL_FULL error.
+ */
+rtk_api_ret_t rtk_l2_addr_add(rtk_mac_t *pMac, rtk_l2_ucastAddr_t *pL2_data)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_addr_add(pMac, pL2_data);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_addr_get
+ * Description:
+ *      Get LUT unicast entry.
+ * Input:
+ *      pMac    - 6 bytes unicast(I/G bit is 0) mac address to be written into LUT.
+ * Output:
+ *      pL2_data - Unicast entry parameter
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port number.
+ *      RT_ERR_MAC                  - Invalid MAC address.
+ *      RT_ERR_L2_FID               - Invalid FID .
+ *      RT_ERR_L2_ENTRY_NOTFOUND    - No such LUT entry.
+ *      RT_ERR_INPUT                - Invalid input parameters.
+ * Note:
+ *      If the unicast mac address existed in LUT, it will return the port and fid where
+ *      the mac is learned. Otherwise, it will return a RT_ERR_L2_ENTRY_NOTFOUND error.
+ */
+rtk_api_ret_t rtk_l2_addr_get(rtk_mac_t *pMac, rtk_l2_ucastAddr_t *pL2_data)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_addr_get(pMac, pL2_data);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_addr_next_get
+ * Description:
+ *      Get Next LUT unicast entry.
+ * Input:
+ *      read_method     - The reading method.
+ *      port            - The port number if the read_metohd is READMETHOD_NEXT_L2UCSPA
+ *      pAddress        - The Address ID
+ * Output:
+ *      pL2_data - Unicast entry parameter
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port number.
+ *      RT_ERR_MAC                  - Invalid MAC address.
+ *      RT_ERR_L2_FID               - Invalid FID .
+ *      RT_ERR_L2_ENTRY_NOTFOUND    - No such LUT entry.
+ *      RT_ERR_INPUT                - Invalid input parameters.
+ * Note:
+ *      Get the next unicast entry after the current entry pointed by pAddress.
+ *      The address of next entry is returned by pAddress. User can use (address + 1)
+ *      as pAddress to call this API again for dumping all entries is LUT.
+ */
+rtk_api_ret_t rtk_l2_addr_next_get(rtk_l2_read_method_t read_method, rtk_port_t port, rtk_uint32 *pAddress, rtk_l2_ucastAddr_t *pL2_data)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_addr_next_get(read_method, port, pAddress, pL2_data);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_addr_del
+ * Description:
+ *      Delete LUT unicast entry.
+ * Input:
+ *      pMac - 6 bytes unicast(I/G bit is 0) mac address to be written into LUT.
+ *      fid - Filtering database
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port number.
+ *      RT_ERR_MAC                  - Invalid MAC address.
+ *      RT_ERR_L2_FID               - Invalid FID .
+ *      RT_ERR_L2_ENTRY_NOTFOUND    - No such LUT entry.
+ *      RT_ERR_INPUT                - Invalid input parameters.
+ * Note:
+ *      If the mac has existed in the LUT, it will be deleted. Otherwise, it will return RT_ERR_L2_ENTRY_NOTFOUND.
+ */
+rtk_api_ret_t rtk_l2_addr_del(rtk_mac_t *pMac, rtk_l2_ucastAddr_t *pL2_data)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_addr_del(pMac, pL2_data);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_mcastAddr_add
+ * Description:
+ *      Add LUT multicast entry.
+ * Input:
+ *      pMcastAddr  - L2 multicast entry structure
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_MAC              - Invalid MAC address.
+ *      RT_ERR_L2_FID           - Invalid FID .
+ *      RT_ERR_L2_VID           - Invalid VID .
+ *      RT_ERR_L2_INDEXTBL_FULL - hashed index is full of entries.
+ *      RT_ERR_PORT_MASK        - Invalid portmask.
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ * Note:
+ *      If the multicast mac address already existed in the LUT, it will udpate the
+ *      port mask of the entry. Otherwise, it will find an empty or asic auto learned
+ *      entry to write. If all the entries with the same hash value can't be replaced,
+ *      ASIC will return a RT_ERR_L2_INDEXTBL_FULL error.
+ */
+rtk_api_ret_t rtk_l2_mcastAddr_add(rtk_l2_mcastAddr_t *pMcastAddr)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_mcastAddr_add(pMcastAddr);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_mcastAddr_get
+ * Description:
+ *      Get LUT multicast entry.
+ * Input:
+ *      pMcastAddr  - L2 multicast entry structure
+ * Output:
+ *      pMcastAddr  - L2 multicast entry structure
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_MAC                  - Invalid MAC address.
+ *      RT_ERR_L2_FID               - Invalid FID .
+ *      RT_ERR_L2_VID               - Invalid VID .
+ *      RT_ERR_L2_ENTRY_NOTFOUND    - No such LUT entry.
+ *      RT_ERR_INPUT                - Invalid input parameters.
+ * Note:
+ *      If the multicast mac address existed in the LUT, it will return the port where
+ *      the mac is learned. Otherwise, it will return a RT_ERR_L2_ENTRY_NOTFOUND error.
+ */
+rtk_api_ret_t rtk_l2_mcastAddr_get(rtk_l2_mcastAddr_t *pMcastAddr)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_mcastAddr_get(pMcastAddr);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_mcastAddr_next_get
+ * Description:
+ *      Get Next L2 Multicast entry.
+ * Input:
+ *      pAddress        - The Address ID
+ * Output:
+ *      pMcastAddr  - L2 multicast entry structure
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_L2_ENTRY_NOTFOUND    - No such LUT entry.
+ *      RT_ERR_INPUT                - Invalid input parameters.
+ * Note:
+ *      Get the next L2 multicast entry after the current entry pointed by pAddress.
+ *      The address of next entry is returned by pAddress. User can use (address + 1)
+ *      as pAddress to call this API again for dumping all multicast entries is LUT.
+ */
+rtk_api_ret_t rtk_l2_mcastAddr_next_get(rtk_uint32 *pAddress, rtk_l2_mcastAddr_t *pMcastAddr)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_mcastAddr_next_get(pAddress, pMcastAddr);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_mcastAddr_del
+ * Description:
+ *      Delete LUT multicast entry.
+ * Input:
+ *      pMcastAddr  - L2 multicast entry structure
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_MAC                  - Invalid MAC address.
+ *      RT_ERR_L2_FID               - Invalid FID .
+ *      RT_ERR_L2_VID               - Invalid VID .
+ *      RT_ERR_L2_ENTRY_NOTFOUND    - No such LUT entry.
+ *      RT_ERR_INPUT                - Invalid input parameters.
+ * Note:
+ *      If the mac has existed in the LUT, it will be deleted. Otherwise, it will return RT_ERR_L2_ENTRY_NOTFOUND.
+ */
+rtk_api_ret_t rtk_l2_mcastAddr_del(rtk_l2_mcastAddr_t *pMcastAddr)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_mcastAddr_del(pMcastAddr);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_ipMcastAddr_add
+ * Description:
+ *      Add Lut IP multicast entry
+ * Input:
+ *      pIpMcastAddr    - IP Multicast entry
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_L2_INDEXTBL_FULL - hashed index is full of entries.
+ *      RT_ERR_PORT_MASK        - Invalid portmask.
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ * Note:
+ *      System supports L2 entry with IP multicast DIP/SIP to forward IP multicasting frame as user
+ *      desired. If this function is enabled, then system will be looked up L2 IP multicast entry to
+ *      forward IP multicast frame directly without flooding.
+ */
+rtk_api_ret_t rtk_l2_ipMcastAddr_add(rtk_l2_ipMcastAddr_t *pIpMcastAddr)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_ipMcastAddr_add(pIpMcastAddr);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_ipMcastAddr_get
+ * Description:
+ *      Get LUT IP multicast entry.
+ * Input:
+ *      pIpMcastAddr    - IP Multicast entry
+ * Output:
+ *      pIpMcastAddr    - IP Multicast entry
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_L2_ENTRY_NOTFOUND    - No such LUT entry.
+ *      RT_ERR_INPUT                - Invalid input parameters.
+ * Note:
+ *      The API can get Lut table of IP multicast entry.
+ */
+rtk_api_ret_t rtk_l2_ipMcastAddr_get(rtk_l2_ipMcastAddr_t *pIpMcastAddr)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_ipMcastAddr_get(pIpMcastAddr);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_ipMcastAddr_next_get
+ * Description:
+ *      Get Next IP Multicast entry.
+ * Input:
+ *      pAddress        - The Address ID
+ * Output:
+ *      pIpMcastAddr    - IP Multicast entry
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_L2_ENTRY_NOTFOUND    - No such LUT entry.
+ *      RT_ERR_INPUT                - Invalid input parameters.
+ * Note:
+ *      Get the next IP multicast entry after the current entry pointed by pAddress.
+ *      The address of next entry is returned by pAddress. User can use (address + 1)
+ *      as pAddress to call this API again for dumping all IP multicast entries is LUT.
+ */
+rtk_api_ret_t rtk_l2_ipMcastAddr_next_get(rtk_uint32 *pAddress, rtk_l2_ipMcastAddr_t *pIpMcastAddr)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_ipMcastAddr_next_get(pAddress, pIpMcastAddr);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_ipMcastAddr_del
+ * Description:
+ *      Delete a ip multicast address entry from the specified device.
+ * Input:
+ *      pIpMcastAddr    - IP Multicast entry
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_L2_ENTRY_NOTFOUND    - No such LUT entry.
+ *      RT_ERR_INPUT                - Invalid input parameters.
+ * Note:
+ *      The API can delete a IP multicast address entry from the specified device.
+ */
+rtk_api_ret_t rtk_l2_ipMcastAddr_del(rtk_l2_ipMcastAddr_t *pIpMcastAddr)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_ipMcastAddr_del(pIpMcastAddr);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_ipVidMcastAddr_add
+ * Description:
+ *      Add Lut IP multicast+VID entry
+ * Input:
+ *      pIpVidMcastAddr - IP & VID multicast Entry
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_L2_INDEXTBL_FULL - hashed index is full of entries.
+ *      RT_ERR_PORT_MASK        - Invalid portmask.
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_l2_ipVidMcastAddr_add(rtk_l2_ipVidMcastAddr_t *pIpVidMcastAddr)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_ipVidMcastAddr_add(pIpVidMcastAddr);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_ipVidMcastAddr_get
+ * Description:
+ *      Get LUT IP multicast+VID entry.
+ * Input:
+ *      pIpVidMcastAddr - IP & VID multicast Entry
+ * Output:
+ *      pIpVidMcastAddr - IP & VID multicast Entry
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_L2_ENTRY_NOTFOUND    - No such LUT entry.
+ *      RT_ERR_INPUT                - Invalid input parameters.
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_l2_ipVidMcastAddr_get(rtk_l2_ipVidMcastAddr_t *pIpVidMcastAddr)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_ipVidMcastAddr_get(pIpVidMcastAddr);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_ipVidMcastAddr_next_get
+ * Description:
+ *      Get Next IP Multicast+VID entry.
+ * Input:
+ *      pAddress        - The Address ID
+ * Output:
+ *      pIpVidMcastAddr - IP & VID multicast Entry
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_L2_ENTRY_NOTFOUND    - No such LUT entry.
+ *      RT_ERR_INPUT                - Invalid input parameters.
+ * Note:
+ *      Get the next IP multicast entry after the current entry pointed by pAddress.
+ *      The address of next entry is returned by pAddress. User can use (address + 1)
+ *      as pAddress to call this API again for dumping all IP multicast entries is LUT.
+ */
+rtk_api_ret_t rtk_l2_ipVidMcastAddr_next_get(rtk_uint32 *pAddress, rtk_l2_ipVidMcastAddr_t *pIpVidMcastAddr)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_ipVidMcastAddr_next_get(pAddress, pIpVidMcastAddr);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_ipVidMcastAddr_del
+ * Description:
+ *      Delete a ip multicast+VID address entry from the specified device.
+ * Input:
+ *      pIpVidMcastAddr - IP & VID multicast Entry
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_L2_ENTRY_NOTFOUND    - No such LUT entry.
+ *      RT_ERR_INPUT                - Invalid input parameters.
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_l2_ipVidMcastAddr_del(rtk_l2_ipVidMcastAddr_t *pIpVidMcastAddr)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_ipVidMcastAddr_del(pIpVidMcastAddr);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_ucastAddr_flush
+ * Description:
+ *      Flush L2 mac address by type in the specified device (both dynamic and static).
+ * Input:
+ *      pConfig - flush configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_VLAN_VID     - Invalid VID parameter.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      flushByVid          - 1: Flush by VID, 0: Don't flush by VID
+ *      vid                 - VID (0 ~ 4095)
+ *      flushByFid          - 1: Flush by FID, 0: Don't flush by FID
+ *      fid                 - FID (0 ~ 15)
+ *      flushByPort         - 1: Flush by Port, 0: Don't flush by Port
+ *      port                - Port ID
+ *      flushByMac          - Not Supported
+ *      ucastAddr           - Not Supported
+ *      flushStaticAddr     - 1: Flush both Static and Dynamic entries, 0: Flush only Dynamic entries
+ *      flushAddrOnAllPorts - 1: Flush VID-matched entries at all ports, 0: Flush VID-matched entries per port.
+ */
+rtk_api_ret_t rtk_l2_ucastAddr_flush(rtk_l2_flushCfg_t *pConfig)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_ucastAddr_flush(pConfig);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_table_clear
+ * Description:
+ *      Flush all static & dynamic entries in LUT.
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_l2_table_clear(void)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_table_clear();
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_table_clearStatus_get
+ * Description:
+ *      Get table clear status
+ * Input:
+ *      None
+ * Output:
+ *      pStatus - Clear status, 1:Busy, 0:finish
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_l2_table_clearStatus_get(rtk_l2_clearStatus_t *pStatus)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_table_clearStatus_get(pStatus);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_flushLinkDownPortAddrEnable_set
+ * Description:
+ *      Set HW flush linkdown port mac configuration of the specified device.
+ * Input:
+ *      port - Port id.
+ *      enable - link down flush status
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_ENABLE       - Invalid enable input.
+ * Note:
+ *      The status of flush linkdown port address is as following:
+ *      - DISABLED
+ *      - ENABLED
+ */
+rtk_api_ret_t rtk_l2_flushLinkDownPortAddrEnable_set(rtk_port_t port, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_flushLinkDownPortAddrEnable_set(port, enable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_flushLinkDownPortAddrEnable_get
+ * Description:
+ *      Get HW flush linkdown port mac configuration of the specified device.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pEnable - link down flush status
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The status of flush linkdown port address is as following:
+ *      - DISABLED
+ *      - ENABLED
+ */
+rtk_api_ret_t rtk_l2_flushLinkDownPortAddrEnable_get(rtk_port_t port, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_flushLinkDownPortAddrEnable_get(port, pEnable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_agingEnable_set
+ * Description:
+ *      Set L2 LUT aging status per port setting.
+ * Input:
+ *      port    - Port id.
+ *      enable  - Aging status
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_ENABLE       - Invalid enable input.
+ * Note:
+ *      This API can be used to set L2 LUT aging status per port.
+ */
+rtk_api_ret_t rtk_l2_agingEnable_set(rtk_port_t port, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_agingEnable_set(port, enable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_agingEnable_get
+ * Description:
+ *      Get L2 LUT aging status per port setting.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pEnable - Aging status
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      This API can be used to get L2 LUT aging function per port.
+ */
+rtk_api_ret_t rtk_l2_agingEnable_get(rtk_port_t port, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_agingEnable_get(port, pEnable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_limitLearningCnt_set
+ * Description:
+ *      Set per-Port auto learning limit number
+ * Input:
+ *      port    - Port id.
+ *      mac_cnt - Auto learning entries limit number
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port number.
+ *      RT_ERR_LIMITED_L2ENTRY_NUM  - Invalid auto learning limit number
+ * Note:
+ *      The API can set per-port ASIC auto learning limit number from 0(disable learning)
+ *      to 2112.
+ */
+rtk_api_ret_t rtk_l2_limitLearningCnt_set(rtk_port_t port, rtk_mac_cnt_t mac_cnt)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_limitLearningCnt_set(port, mac_cnt);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_limitLearningCnt_get
+ * Description:
+ *      Get per-Port auto learning limit number
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pMac_cnt - Auto learning entries limit number
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can get per-port ASIC auto learning limit number.
+ */
+rtk_api_ret_t rtk_l2_limitLearningCnt_get(rtk_port_t port, rtk_mac_cnt_t *pMac_cnt)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_limitLearningCnt_get(port, pMac_cnt);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_limitSystemLearningCnt_set
+ * Description:
+ *      Set System auto learning limit number
+ * Input:
+ *      mac_cnt - Auto learning entries limit number
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_LIMITED_L2ENTRY_NUM  - Invalid auto learning limit number
+ * Note:
+ *      The API can set system ASIC auto learning limit number from 0(disable learning)
+ *      to 2112.
+ */
+rtk_api_ret_t rtk_l2_limitSystemLearningCnt_set(rtk_mac_cnt_t mac_cnt)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_limitSystemLearningCnt_set(mac_cnt);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_limitSystemLearningCnt_get
+ * Description:
+ *      Get System auto learning limit number
+ * Input:
+ *      None
+ * Output:
+ *      pMac_cnt - Auto learning entries limit number
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can get system ASIC auto learning limit number.
+ */
+rtk_api_ret_t rtk_l2_limitSystemLearningCnt_get(rtk_mac_cnt_t *pMac_cnt)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_limitSystemLearningCnt_get(pMac_cnt);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_limitLearningCntAction_set
+ * Description:
+ *      Configure auto learn over limit number action.
+ * Input:
+ *      port - Port id.
+ *      action - Auto learning entries limit number
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_NOT_ALLOWED  - Invalid learn over action
+ * Note:
+ *      The API can set SA unknown packet action while auto learn limit number is over
+ *      The action symbol as following:
+ *      - LIMIT_LEARN_CNT_ACTION_DROP,
+ *      - LIMIT_LEARN_CNT_ACTION_FORWARD,
+ *      - LIMIT_LEARN_CNT_ACTION_TO_CPU,
+ */
+rtk_api_ret_t rtk_l2_limitLearningCntAction_set(rtk_port_t port, rtk_l2_limitLearnCntAction_t action)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_limitLearningCntAction_set(port, action);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_limitLearningCntAction_get
+ * Description:
+ *      Get auto learn over limit number action.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pAction - Learn over action
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can get SA unknown packet action while auto learn limit number is over
+ *      The action symbol as following:
+ *      - LIMIT_LEARN_CNT_ACTION_DROP,
+ *      - LIMIT_LEARN_CNT_ACTION_FORWARD,
+ *      - LIMIT_LEARN_CNT_ACTION_TO_CPU,
+ */
+rtk_api_ret_t rtk_l2_limitLearningCntAction_get(rtk_port_t port, rtk_l2_limitLearnCntAction_t *pAction)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_limitLearningCntAction_get(port, pAction);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_limitSystemLearningCntAction_set
+ * Description:
+ *      Configure system auto learn over limit number action.
+ * Input:
+ *      port - Port id.
+ *      action - Auto learning entries limit number
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_NOT_ALLOWED  - Invalid learn over action
+ * Note:
+ *      The API can set SA unknown packet action while auto learn limit number is over
+ *      The action symbol as following:
+ *      - LIMIT_LEARN_CNT_ACTION_DROP,
+ *      - LIMIT_LEARN_CNT_ACTION_FORWARD,
+ *      - LIMIT_LEARN_CNT_ACTION_TO_CPU,
+ */
+rtk_api_ret_t rtk_l2_limitSystemLearningCntAction_set(rtk_l2_limitLearnCntAction_t action)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_limitSystemLearningCntAction_set(action);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_limitSystemLearningCntAction_get
+ * Description:
+ *      Get system auto learn over limit number action.
+ * Input:
+ *      None.
+ * Output:
+ *      pAction - Learn over action
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can get SA unknown packet action while auto learn limit number is over
+ *      The action symbol as following:
+ *      - LIMIT_LEARN_CNT_ACTION_DROP,
+ *      - LIMIT_LEARN_CNT_ACTION_FORWARD,
+ *      - LIMIT_LEARN_CNT_ACTION_TO_CPU,
+ */
+rtk_api_ret_t rtk_l2_limitSystemLearningCntAction_get(rtk_l2_limitLearnCntAction_t *pAction)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_limitSystemLearningCntAction_get(pAction);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_limitSystemLearningCntPortMask_set
+ * Description:
+ *      Configure system auto learn portmask
+ * Input:
+ *      pPortmask - Port Mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_MASK    - Invalid port mask.
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_l2_limitSystemLearningCntPortMask_set(rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_limitSystemLearningCntPortMask_set(pPortmask);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_limitSystemLearningCntPortMask_get
+ * Description:
+ *      get system auto learn portmask
+ * Input:
+ *      None
+ * Output:
+ *      pPortmask - Port Mask
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_NULL_POINTER - Null pointer.
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_l2_limitSystemLearningCntPortMask_get(rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_limitSystemLearningCntPortMask_get(pPortmask);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_learningCnt_get
+ * Description:
+ *      Get per-Port current auto learning number
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pMac_cnt - ASIC auto learning entries number
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can get per-port ASIC auto learning number
+ */
+rtk_api_ret_t rtk_l2_learningCnt_get(rtk_port_t port, rtk_mac_cnt_t *pMac_cnt)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_learningCnt_get(port, pMac_cnt);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_floodPortMask_set
+ * Description:
+ *      Set flooding portmask
+ * Input:
+ *      type - flooding type.
+ *      pFlood_portmask - flooding porkmask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_MASK    - Invalid portmask.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This API can set the flooding mask.
+ *      The flooding type is as following:
+ *      - FLOOD_UNKNOWNDA
+ *      - FLOOD_UNKNOWNMC
+ *      - FLOOD_BC
+ */
+rtk_api_ret_t rtk_l2_floodPortMask_set(rtk_l2_flood_type_t floood_type, rtk_portmask_t *pFlood_portmask)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_floodPortMask_set(floood_type, pFlood_portmask);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_l2_floodPortMask_get
+ * Description:
+ *      Get flooding portmask
+ * Input:
+ *      type - flooding type.
+ * Output:
+ *      pFlood_portmask - flooding porkmask
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      This API can get the flooding mask.
+ *      The flooding type is as following:
+ *      - FLOOD_UNKNOWNDA
+ *      - FLOOD_UNKNOWNMC
+ *      - FLOOD_BC
+ */
+rtk_api_ret_t rtk_l2_floodPortMask_get(rtk_l2_flood_type_t floood_type, rtk_portmask_t *pFlood_portmask)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_floodPortMask_get(floood_type, pFlood_portmask);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_localPktPermit_set
+ * Description:
+ *      Set permittion of frames if source port and destination port are the same.
+ * Input:
+ *      port - Port id.
+ *      permit - permittion status
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_ENABLE       - Invalid permit value.
+ * Note:
+ *      This API is setted to permit frame if its source port is equal to destination port.
+ */
+rtk_api_ret_t rtk_l2_localPktPermit_set(rtk_port_t port, rtk_enable_t permit)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_localPktPermit_set(port, permit);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_localPktPermit_get
+ * Description:
+ *      Get permittion of frames if source port and destination port are the same.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pPermit - permittion status
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      This API is to get permittion status for frames if its source port is equal to destination port.
+ */
+rtk_api_ret_t rtk_l2_localPktPermit_get(rtk_port_t port, rtk_enable_t *pPermit)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_localPktPermit_get(port, pPermit);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_aging_set
+ * Description:
+ *      Set LUT agging out speed
+ * Input:
+ *      aging_time - Agging out time.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_OUT_OF_RANGE     - input out of range.
+ * Note:
+ *      The API can set LUT agging out period for each entry and the range is from 45s to 458s.
+ */
+rtk_api_ret_t rtk_l2_aging_set(rtk_l2_age_time_t aging_time)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_aging_set(aging_time);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_aging_get
+ * Description:
+ *      Get LUT agging out time
+ * Input:
+ *      None
+ * Output:
+ *      pEnable - Aging status
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can get LUT agging out period for each entry.
+ */
+rtk_api_ret_t rtk_l2_aging_get(rtk_l2_age_time_t *pAging_time)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_aging_get(pAging_time);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_ipMcastAddrLookup_set
+ * Description:
+ *      Set Lut IP multicast lookup function
+ * Input:
+ *      type - Lookup type for IPMC packet.
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK          - OK
+ *      RT_ERR_FAILED      - Failed
+ *      RT_ERR_SMI         - SMI access error
+ * Note:
+ *      LOOKUP_MAC      - Lookup by MAC address
+ *      LOOKUP_IP       - Lookup by IP address
+ *      LOOKUP_IP_VID   - Lookup by IP address & VLAN ID
+ */
+rtk_api_ret_t rtk_l2_ipMcastAddrLookup_set(rtk_l2_ipmc_lookup_type_t type)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_ipMcastAddrLookup_set(type);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_ipMcastAddrLookup_get
+ * Description:
+ *      Get Lut IP multicast lookup function
+ * Input:
+ *      None.
+ * Output:
+ *      pType - Lookup type for IPMC packet.
+ * Return:
+ *      RT_ERR_OK          - OK
+ *      RT_ERR_FAILED      - Failed
+ *      RT_ERR_SMI         - SMI access error
+ * Note:
+ *      None.
+ */
+rtk_api_ret_t rtk_l2_ipMcastAddrLookup_get(rtk_l2_ipmc_lookup_type_t *pType)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_ipMcastAddrLookup_get(pType);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_ipMcastForwardRouterPort_set
+ * Description:
+ *      Set IPMC packet forward to rounter port also or not
+ * Input:
+ *      enabled - 1: Inlcude router port, 0, exclude router port
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK          - OK
+ *      RT_ERR_FAILED      - Failed
+ *      RT_ERR_SMI         - SMI access error
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_l2_ipMcastForwardRouterPort_set(rtk_enable_t enabled)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_ipMcastForwardRouterPort_set(enabled);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_ipMcastForwardRouterPort_get
+ * Description:
+ *      Get IPMC packet forward to rounter port also or not
+ * Input:
+ *      None.
+ * Output:
+ *      pEnabled    - 1: Inlcude router port, 0, exclude router port
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_NULL_POINTER - Null pointer
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_l2_ipMcastForwardRouterPort_get(rtk_enable_t *pEnabled)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_ipMcastForwardRouterPort_get(pEnabled);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_ipMcastGroupEntry_add
+ * Description:
+ *      Add an IP Multicast entry to group table
+ * Input:
+ *      ip_addr     - IP address
+ *      vid         - VLAN ID
+ *      pPortmask   - portmask
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK          - OK
+ *      RT_ERR_FAILED      - Failed
+ *      RT_ERR_SMI         - SMI access error
+ *      RT_ERR_TBL_FULL    - Table Full
+ * Note:
+ *      Add an entry to IP Multicast Group table.
+ */
+rtk_api_ret_t rtk_l2_ipMcastGroupEntry_add(ipaddr_t ip_addr, rtk_uint32 vid, rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_ipMcastGroupEntry_add(ip_addr, vid, pPortmask);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_ipMcastGroupEntry_del
+ * Description:
+ *      Delete an entry from IP Multicast group table
+ * Input:
+ *      ip_addr     - IP address
+ *      vid         - VLAN ID
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK          - OK
+ *      RT_ERR_FAILED      - Failed
+ *      RT_ERR_SMI         - SMI access error
+ *      RT_ERR_TBL_FULL    - Table Full
+ * Note:
+ *      Delete an entry from IP Multicast group table.
+ */
+rtk_api_ret_t rtk_l2_ipMcastGroupEntry_del(ipaddr_t ip_addr, rtk_uint32 vid)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_ipMcastGroupEntry_del(ip_addr, vid);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_ipMcastGroupEntry_get
+ * Description:
+ *      get an entry from IP Multicast group table
+ * Input:
+ *      ip_addr     - IP address
+ *      vid         - VLAN ID
+ * Output:
+ *      pPortmask   - member port mask
+ * Return:
+ *      RT_ERR_OK          - OK
+ *      RT_ERR_FAILED      - Failed
+ *      RT_ERR_SMI         - SMI access error
+ *      RT_ERR_TBL_FULL    - Table Full
+ * Note:
+ *      Delete an entry from IP Multicast group table.
+ */
+rtk_api_ret_t rtk_l2_ipMcastGroupEntry_get(ipaddr_t ip_addr, rtk_uint32 vid, rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_ipMcastGroupEntry_get(ip_addr, vid, pPortmask);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_l2_entry_get
+ * Description:
+ *      Get LUT unicast entry.
+ * Input:
+ *      pL2_entry - Index field in the structure.
+ * Output:
+ *      pL2_entry - other fields such as MAC, port, age...
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_L2_EMPTY_ENTRY   - Empty LUT entry.
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ * Note:
+ *      This API is used to get address by index from 0~2111.
+ */
+rtk_api_ret_t rtk_l2_entry_get(rtk_l2_addr_table_t *pL2_entry)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_l2_entry_get(pL2_entry);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/l2.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/l2.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/l2.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/l2.h	2019-01-22 16:16:24.707257309 +0100
@@ -0,0 +1,1183 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * Purpose : RTL8367/RTL8367C switch high-level API
+ *
+ * Feature : The file includes L2 module high-layer API defination
+ *
+ */
+
+#ifndef __RTK_API_L2_H__
+#define __RTK_API_L2_H__
+
+
+/*
+ * Data Type Declaration
+ */
+#define RTK_MAX_NUM_OF_LEARN_LIMIT                  (rtk_switch_maxLutAddrNumber_get())
+
+#define RTK_MAC_ADDR_LEN                            6
+#define RTK_MAX_LUT_ADDRESS                         (RTK_MAX_NUM_OF_LEARN_LIMIT)
+#define RTK_MAX_LUT_ADDR_ID                         (RTK_MAX_LUT_ADDRESS - 1)
+
+typedef rtk_uint32 rtk_l2_age_time_t;
+
+typedef enum rtk_l2_flood_type_e
+{
+    FLOOD_UNKNOWNDA = 0,
+    FLOOD_UNKNOWNMC,
+    FLOOD_BC,
+    FLOOD_END
+} rtk_l2_flood_type_t;
+
+typedef rtk_uint32 rtk_l2_flushItem_t;
+
+typedef enum rtk_l2_flushType_e
+{
+    FLUSH_TYPE_BY_PORT = 0,       /* physical port       */
+    FLUSH_TYPE_BY_PORT_VID,       /* physical port + VID */
+    FLUSH_TYPE_BY_PORT_FID,       /* physical port + FID */
+    FLUSH_TYPE_END
+} rtk_l2_flushType_t;
+
+typedef struct rtk_l2_flushCfg_s
+{
+    rtk_enable_t    flushByVid;
+    rtk_vlan_t      vid;
+    rtk_enable_t    flushByFid;
+    rtk_uint32      fid;
+    rtk_enable_t    flushByPort;
+    rtk_port_t      port;
+    rtk_enable_t    flushByMac;
+    rtk_mac_t       ucastAddr;
+    rtk_enable_t    flushStaticAddr;
+    rtk_enable_t    flushAddrOnAllPorts; /* this is used when flushByVid */
+} rtk_l2_flushCfg_t;
+
+typedef enum rtk_l2_read_method_e{
+
+    READMETHOD_MAC = 0,
+    READMETHOD_ADDRESS,
+    READMETHOD_NEXT_ADDRESS,
+    READMETHOD_NEXT_L2UC,
+    READMETHOD_NEXT_L2MC,
+    READMETHOD_NEXT_L3MC,
+    READMETHOD_NEXT_L2L3MC,
+    READMETHOD_NEXT_L2UCSPA,
+    READMETHOD_END
+}rtk_l2_read_method_t;
+
+/* l2 limit learning count action */
+typedef enum rtk_l2_limitLearnCntAction_e
+{
+    LIMIT_LEARN_CNT_ACTION_DROP = 0,
+    LIMIT_LEARN_CNT_ACTION_FORWARD,
+    LIMIT_LEARN_CNT_ACTION_TO_CPU,
+    LIMIT_LEARN_CNT_ACTION_END
+} rtk_l2_limitLearnCntAction_t;
+
+typedef enum rtk_l2_ipmc_lookup_type_e
+{
+    LOOKUP_MAC = 0,
+    LOOKUP_IP,
+    LOOKUP_IP_VID,
+    LOOKUP_END
+} rtk_l2_ipmc_lookup_type_t;
+
+/* l2 address table - unicast data structure */
+typedef struct rtk_l2_ucastAddr_s
+{
+    rtk_mac_t       mac;
+    rtk_uint32      ivl;
+    rtk_uint32      cvid;
+    rtk_uint32      fid;
+    rtk_uint32      efid;
+    rtk_uint32      port;
+    rtk_uint32      sa_block;
+    rtk_uint32      da_block;
+    rtk_uint32      auth;
+    rtk_uint32      is_static;
+    rtk_uint32      priority;
+    rtk_uint32      sa_pri_en;
+    rtk_uint32      fwd_pri_en;
+    rtk_uint32      address;
+}rtk_l2_ucastAddr_t;
+
+/* l2 address table - multicast data structure */
+typedef struct rtk_l2_mcastAddr_s
+{
+    rtk_uint32      vid;
+    rtk_mac_t       mac;
+    rtk_uint32      fid;
+    rtk_portmask_t  portmask;
+    rtk_uint32      ivl;
+    rtk_uint32      priority;
+    rtk_uint32      fwd_pri_en;
+    rtk_uint32      igmp_asic;
+    rtk_uint32      igmp_index;
+    rtk_uint32      address;
+}rtk_l2_mcastAddr_t;
+
+/* l2 address table - ip multicast data structure */
+typedef struct rtk_l2_ipMcastAddr_s
+{
+    ipaddr_t        dip;
+    ipaddr_t        sip;
+    rtk_portmask_t  portmask;
+    rtk_uint32      priority;
+    rtk_uint32      fwd_pri_en;
+    rtk_uint32      igmp_asic;
+    rtk_uint32      igmp_index;
+    rtk_uint32      address;
+}rtk_l2_ipMcastAddr_t;
+
+/* l2 address table - ip VID multicast data structure */
+typedef struct rtk_l2_ipVidMcastAddr_s
+{
+    ipaddr_t        dip;
+    ipaddr_t        sip;
+    rtk_uint32      vid;
+    rtk_portmask_t  portmask;
+    rtk_uint32      address;
+}rtk_l2_ipVidMcastAddr_t;
+
+typedef struct rtk_l2_addr_table_s
+{
+    rtk_uint32  index;
+    ipaddr_t    sip;
+    ipaddr_t    dip;
+    rtk_mac_t   mac;
+    rtk_uint32  sa_block;
+    rtk_uint32  auth;
+    rtk_portmask_t  portmask;
+    rtk_uint32  age;
+    rtk_uint32  ivl;
+    rtk_uint32  cvid;
+    rtk_uint32  fid;
+    rtk_uint32  is_ipmul;
+    rtk_uint32  is_static;
+    rtk_uint32  is_ipvidmul;
+    rtk_uint32  l3_vid;
+}rtk_l2_addr_table_t;
+
+typedef enum rtk_l2_clearStatus_e
+{
+    L2_CLEAR_STATE_FINISH = 0,
+    L2_CLEAR_STATE_BUSY,
+    L2_CLEAR_STATE_END
+}rtk_l2_clearStatus_t;
+
+/* Function Name:
+ *      rtk_l2_init
+ * Description:
+ *      Initialize l2 module of the specified device.
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK          - OK
+ *      RT_ERR_FAILED      - Failed
+ *      RT_ERR_SMI         - SMI access error
+ * Note:
+ *      Initialize l2 module before calling any l2 APIs.
+ */
+extern rtk_api_ret_t rtk_l2_init(void);
+
+/* Function Name:
+ *      rtk_l2_addr_add
+ * Description:
+ *      Add LUT unicast entry.
+ * Input:
+ *      pMac - 6 bytes unicast(I/G bit is 0) mac address to be written into LUT.
+ *      pL2_data - Unicast entry parameter
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_MAC              - Invalid MAC address.
+ *      RT_ERR_L2_FID           - Invalid FID .
+ *      RT_ERR_L2_INDEXTBL_FULL - hashed index is full of entries.
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ * Note:
+ *      If the unicast mac address already existed in LUT, it will udpate the status of the entry.
+ *      Otherwise, it will find an empty or asic auto learned entry to write. If all the entries
+ *      with the same hash value can't be replaced, ASIC will return a RT_ERR_L2_INDEXTBL_FULL error.
+ */
+extern rtk_api_ret_t rtk_l2_addr_add(rtk_mac_t *pMac, rtk_l2_ucastAddr_t *pL2_data);
+
+/* Function Name:
+ *      rtk_l2_addr_get
+ * Description:
+ *      Get LUT unicast entry.
+ * Input:
+ *      pMac    - 6 bytes unicast(I/G bit is 0) mac address to be written into LUT.
+ * Output:
+ *      pL2_data - Unicast entry parameter
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port number.
+ *      RT_ERR_MAC                  - Invalid MAC address.
+ *      RT_ERR_L2_FID               - Invalid FID .
+ *      RT_ERR_L2_ENTRY_NOTFOUND    - No such LUT entry.
+ *      RT_ERR_INPUT                - Invalid input parameters.
+ * Note:
+ *      If the unicast mac address existed in LUT, it will return the port and fid where
+ *      the mac is learned. Otherwise, it will return a RT_ERR_L2_ENTRY_NOTFOUND error.
+ */
+extern rtk_api_ret_t rtk_l2_addr_get(rtk_mac_t *pMac, rtk_l2_ucastAddr_t *pL2_data);
+
+/* Function Name:
+ *      rtk_l2_addr_next_get
+ * Description:
+ *      Get Next LUT unicast entry.
+ * Input:
+ *      read_method     - The reading method.
+ *      port            - The port number if the read_metohd is READMETHOD_NEXT_L2UCSPA
+ *      pAddress        - The Address ID
+ * Output:
+ *      pL2_data - Unicast entry parameter
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port number.
+ *      RT_ERR_MAC                  - Invalid MAC address.
+ *      RT_ERR_L2_FID               - Invalid FID .
+ *      RT_ERR_L2_ENTRY_NOTFOUND    - No such LUT entry.
+ *      RT_ERR_INPUT                - Invalid input parameters.
+ * Note:
+ *      Get the next unicast entry after the current entry pointed by pAddress.
+ *      The address of next entry is returned by pAddress. User can use (address + 1)
+ *      as pAddress to call this API again for dumping all entries is LUT.
+ */
+extern rtk_api_ret_t rtk_l2_addr_next_get(rtk_l2_read_method_t read_method, rtk_port_t port, rtk_uint32 *pAddress, rtk_l2_ucastAddr_t *pL2_data);
+
+/* Function Name:
+ *      rtk_l2_addr_del
+ * Description:
+ *      Delete LUT unicast entry.
+ * Input:
+ *      pMac - 6 bytes unicast(I/G bit is 0) mac address to be written into LUT.
+ *      fid - Filtering database
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port number.
+ *      RT_ERR_MAC                  - Invalid MAC address.
+ *      RT_ERR_L2_FID               - Invalid FID .
+ *      RT_ERR_L2_ENTRY_NOTFOUND    - No such LUT entry.
+ *      RT_ERR_INPUT                - Invalid input parameters.
+ * Note:
+ *      If the mac has existed in the LUT, it will be deleted. Otherwise, it will return RT_ERR_L2_ENTRY_NOTFOUND.
+ */
+extern rtk_api_ret_t rtk_l2_addr_del(rtk_mac_t *pMac, rtk_l2_ucastAddr_t *pL2_data);
+
+/* Function Name:
+ *      rtk_l2_mcastAddr_add
+ * Description:
+ *      Add LUT multicast entry.
+ * Input:
+ *      pMcastAddr  - L2 multicast entry structure
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_MAC              - Invalid MAC address.
+ *      RT_ERR_L2_FID           - Invalid FID .
+ *      RT_ERR_L2_VID           - Invalid VID .
+ *      RT_ERR_L2_INDEXTBL_FULL - hashed index is full of entries.
+ *      RT_ERR_PORT_MASK        - Invalid portmask.
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ * Note:
+ *      If the multicast mac address already existed in the LUT, it will udpate the
+ *      port mask of the entry. Otherwise, it will find an empty or asic auto learned
+ *      entry to write. If all the entries with the same hash value can't be replaced,
+ *      ASIC will return a RT_ERR_L2_INDEXTBL_FULL error.
+ */
+extern rtk_api_ret_t rtk_l2_mcastAddr_add(rtk_l2_mcastAddr_t *pMcastAddr);
+
+/* Function Name:
+ *      rtk_l2_mcastAddr_get
+ * Description:
+ *      Get LUT multicast entry.
+ * Input:
+ *      pMcastAddr  - L2 multicast entry structure
+ * Output:
+ *      pMcastAddr  - L2 multicast entry structure
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_MAC                  - Invalid MAC address.
+ *      RT_ERR_L2_FID               - Invalid FID .
+ *      RT_ERR_L2_VID               - Invalid VID .
+ *      RT_ERR_L2_ENTRY_NOTFOUND    - No such LUT entry.
+ *      RT_ERR_INPUT                - Invalid input parameters.
+ * Note:
+ *      If the multicast mac address existed in the LUT, it will return the port where
+ *      the mac is learned. Otherwise, it will return a RT_ERR_L2_ENTRY_NOTFOUND error.
+ */
+extern rtk_api_ret_t rtk_l2_mcastAddr_get(rtk_l2_mcastAddr_t *pMcastAddr);
+
+/* Function Name:
+ *      rtk_l2_mcastAddr_next_get
+ * Description:
+ *      Get Next L2 Multicast entry.
+ * Input:
+ *      pAddress        - The Address ID
+ * Output:
+ *      pMcastAddr  - L2 multicast entry structure
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_L2_ENTRY_NOTFOUND    - No such LUT entry.
+ *      RT_ERR_INPUT                - Invalid input parameters.
+ * Note:
+ *      Get the next L2 multicast entry after the current entry pointed by pAddress.
+ *      The address of next entry is returned by pAddress. User can use (address + 1)
+ *      as pAddress to call this API again for dumping all multicast entries is LUT.
+ */
+extern rtk_api_ret_t rtk_l2_mcastAddr_next_get(rtk_uint32 *pAddress, rtk_l2_mcastAddr_t *pMcastAddr);
+
+/* Function Name:
+ *      rtk_l2_mcastAddr_del
+ * Description:
+ *      Delete LUT multicast entry.
+ * Input:
+ *      pMcastAddr  - L2 multicast entry structure
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_MAC                  - Invalid MAC address.
+ *      RT_ERR_L2_FID               - Invalid FID .
+ *      RT_ERR_L2_VID               - Invalid VID .
+ *      RT_ERR_L2_ENTRY_NOTFOUND    - No such LUT entry.
+ *      RT_ERR_INPUT                - Invalid input parameters.
+ * Note:
+ *      If the mac has existed in the LUT, it will be deleted. Otherwise, it will return RT_ERR_L2_ENTRY_NOTFOUND.
+ */
+extern rtk_api_ret_t rtk_l2_mcastAddr_del(rtk_l2_mcastAddr_t *pMcastAddr);
+
+/* Function Name:
+ *      rtk_l2_ipMcastAddr_add
+ * Description:
+ *      Add Lut IP multicast entry
+ * Input:
+ *      pIpMcastAddr    - IP Multicast entry
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_L2_INDEXTBL_FULL - hashed index is full of entries.
+ *      RT_ERR_PORT_MASK        - Invalid portmask.
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ * Note:
+ *      System supports L2 entry with IP multicast DIP/SIP to forward IP multicasting frame as user
+ *      desired. If this function is enabled, then system will be looked up L2 IP multicast entry to
+ *      forward IP multicast frame directly without flooding.
+ */
+extern rtk_api_ret_t rtk_l2_ipMcastAddr_add(rtk_l2_ipMcastAddr_t *pIpMcastAddr);
+
+/* Function Name:
+ *      rtk_l2_ipMcastAddr_get
+ * Description:
+ *      Get LUT IP multicast entry.
+ * Input:
+ *      pIpMcastAddr    - IP Multicast entry
+ * Output:
+ *      pIpMcastAddr    - IP Multicast entry
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_L2_ENTRY_NOTFOUND    - No such LUT entry.
+ *      RT_ERR_INPUT                - Invalid input parameters.
+ * Note:
+ *      The API can get Lut table of IP multicast entry.
+ */
+extern rtk_api_ret_t rtk_l2_ipMcastAddr_get(rtk_l2_ipMcastAddr_t *pIpMcastAddr);
+
+/* Function Name:
+ *      rtk_l2_ipMcastAddr_next_get
+ * Description:
+ *      Get Next IP Multicast entry.
+ * Input:
+ *      pAddress        - The Address ID
+ * Output:
+ *      pIpMcastAddr    - IP Multicast entry
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_L2_ENTRY_NOTFOUND    - No such LUT entry.
+ *      RT_ERR_INPUT                - Invalid input parameters.
+ * Note:
+ *      Get the next IP multicast entry after the current entry pointed by pAddress.
+ *      The address of next entry is returned by pAddress. User can use (address + 1)
+ *      as pAddress to call this API again for dumping all IP multicast entries is LUT.
+ */
+extern rtk_api_ret_t rtk_l2_ipMcastAddr_next_get(rtk_uint32 *pAddress, rtk_l2_ipMcastAddr_t *pIpMcastAddr);
+
+/* Function Name:
+ *      rtk_l2_ipMcastAddr_del
+ * Description:
+ *      Delete a ip multicast address entry from the specified device.
+ * Input:
+ *      pIpMcastAddr    - IP Multicast entry
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_L2_ENTRY_NOTFOUND    - No such LUT entry.
+ *      RT_ERR_INPUT                - Invalid input parameters.
+ * Note:
+ *      The API can delete a IP multicast address entry from the specified device.
+ */
+extern rtk_api_ret_t rtk_l2_ipMcastAddr_del(rtk_l2_ipMcastAddr_t *pIpMcastAddr);
+
+/* Function Name:
+ *      rtk_l2_ipVidMcastAddr_add
+ * Description:
+ *      Add Lut IP multicast+VID entry
+ * Input:
+ *      pIpVidMcastAddr - IP & VID multicast Entry
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_L2_INDEXTBL_FULL - hashed index is full of entries.
+ *      RT_ERR_PORT_MASK        - Invalid portmask.
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_l2_ipVidMcastAddr_add(rtk_l2_ipVidMcastAddr_t *pIpVidMcastAddr);
+
+/* Function Name:
+ *      rtk_l2_ipVidMcastAddr_get
+ * Description:
+ *      Get LUT IP multicast+VID entry.
+ * Input:
+ *      pIpVidMcastAddr - IP & VID multicast Entry
+ * Output:
+ *      pIpVidMcastAddr - IP & VID multicast Entry
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_L2_ENTRY_NOTFOUND    - No such LUT entry.
+ *      RT_ERR_INPUT                - Invalid input parameters.
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_l2_ipVidMcastAddr_get(rtk_l2_ipVidMcastAddr_t *pIpVidMcastAddr);
+
+/* Function Name:
+ *      rtk_l2_ipVidMcastAddr_next_get
+ * Description:
+ *      Get Next IP Multicast+VID entry.
+ * Input:
+ *      pAddress        - The Address ID
+ * Output:
+ *      pIpVidMcastAddr - IP & VID multicast Entry
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_L2_ENTRY_NOTFOUND    - No such LUT entry.
+ *      RT_ERR_INPUT                - Invalid input parameters.
+ * Note:
+ *      Get the next IP multicast entry after the current entry pointed by pAddress.
+ *      The address of next entry is returned by pAddress. User can use (address + 1)
+ *      as pAddress to call this API again for dumping all IP multicast entries is LUT.
+ */
+extern rtk_api_ret_t rtk_l2_ipVidMcastAddr_next_get(rtk_uint32 *pAddress, rtk_l2_ipVidMcastAddr_t *pIpVidMcastAddr);
+
+/* Function Name:
+ *      rtk_l2_ipVidMcastAddr_del
+ * Description:
+ *      Delete a ip multicast+VID address entry from the specified device.
+ * Input:
+ *      pIpVidMcastAddr - IP & VID multicast Entry
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_L2_ENTRY_NOTFOUND    - No such LUT entry.
+ *      RT_ERR_INPUT                - Invalid input parameters.
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_l2_ipVidMcastAddr_del(rtk_l2_ipVidMcastAddr_t *pIpVidMcastAddr);
+
+/* Function Name:
+ *      rtk_l2_ucastAddr_flush
+ * Description:
+ *      Flush L2 mac address by type in the specified device (both dynamic and static).
+ * Input:
+ *      pConfig - flush configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_VLAN_VID     - Invalid VID parameter.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      flushByVid          - 1: Flush by VID, 0: Don't flush by VID
+ *      vid                 - VID (0 ~ 4095)
+ *      flushByFid          - 1: Flush by FID, 0: Don't flush by FID
+ *      fid                 - FID (0 ~ 15)
+ *      flushByPort         - 1: Flush by Port, 0: Don't flush by Port
+ *      port                - Port ID
+ *      flushByMac          - Not Supported
+ *      ucastAddr           - Not Supported
+ *      flushStaticAddr     - 1: Flush both Static and Dynamic entries, 0: Flush only Dynamic entries
+ *      flushAddrOnAllPorts - 1: Flush VID-matched entries at all ports, 0: Flush VID-matched entries per port.
+ */
+extern rtk_api_ret_t rtk_l2_ucastAddr_flush(rtk_l2_flushCfg_t *pConfig);
+
+/* Function Name:
+ *      rtk_l2_table_clear
+ * Description:
+ *      Flush all static & dynamic entries in LUT.
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_l2_table_clear(void);
+
+/* Function Name:
+ *      rtk_l2_table_clearStatus_get
+ * Description:
+ *      Get table clear status
+ * Input:
+ *      None
+ * Output:
+ *      pStatus - Clear status, 1:Busy, 0:finish
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_l2_table_clearStatus_get(rtk_l2_clearStatus_t *pStatus);
+
+/* Function Name:
+ *      rtk_l2_flushLinkDownPortAddrEnable_set
+ * Description:
+ *      Set HW flush linkdown port mac configuration of the specified device.
+ * Input:
+ *      port - Port id.
+ *      enable - link down flush status
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_ENABLE       - Invalid enable input.
+ * Note:
+ *      The status of flush linkdown port address is as following:
+ *      - DISABLED
+ *      - ENABLED
+ */
+extern rtk_api_ret_t rtk_l2_flushLinkDownPortAddrEnable_set(rtk_port_t port, rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_l2_flushLinkDownPortAddrEnable_get
+ * Description:
+ *      Get HW flush linkdown port mac configuration of the specified device.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pEnable - link down flush status
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The status of flush linkdown port address is as following:
+ *      - DISABLED
+ *      - ENABLED
+ */
+extern rtk_api_ret_t rtk_l2_flushLinkDownPortAddrEnable_get(rtk_port_t port, rtk_enable_t *pEnable);
+
+/* Function Name:
+ *      rtk_l2_agingEnable_set
+ * Description:
+ *      Set L2 LUT aging status per port setting.
+ * Input:
+ *      port    - Port id.
+ *      enable  - Aging status
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_ENABLE       - Invalid enable input.
+ * Note:
+ *      This API can be used to set L2 LUT aging status per port.
+ */
+extern rtk_api_ret_t rtk_l2_agingEnable_set(rtk_port_t port, rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_l2_agingEnable_get
+ * Description:
+ *      Get L2 LUT aging status per port setting.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pEnable - Aging status
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      This API can be used to get L2 LUT aging function per port.
+ */
+extern rtk_api_ret_t rtk_l2_agingEnable_get(rtk_port_t port, rtk_enable_t *pEnable);
+
+/* Function Name:
+ *      rtk_l2_limitLearningCnt_set
+ * Description:
+ *      Set per-Port auto learning limit number
+ * Input:
+ *      port    - Port id.
+ *      mac_cnt - Auto learning entries limit number
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port number.
+ *      RT_ERR_LIMITED_L2ENTRY_NUM  - Invalid auto learning limit number
+ * Note:
+ *      The API can set per-port ASIC auto learning limit number from 0(disable learning)
+ *      to 8k.
+ */
+extern rtk_api_ret_t rtk_l2_limitLearningCnt_set(rtk_port_t port, rtk_mac_cnt_t mac_cnt);
+
+/* Function Name:
+ *      rtk_l2_limitLearningCnt_get
+ * Description:
+ *      Get per-Port auto learning limit number
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pMac_cnt - Auto learning entries limit number
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can get per-port ASIC auto learning limit number.
+ */
+extern rtk_api_ret_t rtk_l2_limitLearningCnt_get(rtk_port_t port, rtk_mac_cnt_t *pMac_cnt);
+
+/* Function Name:
+ *      rtk_l2_limitSystemLearningCnt_set
+ * Description:
+ *      Set System auto learning limit number
+ * Input:
+ *      mac_cnt - Auto learning entries limit number
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_LIMITED_L2ENTRY_NUM  - Invalid auto learning limit number
+ * Note:
+ *      The API can set system ASIC auto learning limit number from 0(disable learning)
+ *      to 2112.
+ */
+extern rtk_api_ret_t rtk_l2_limitSystemLearningCnt_set(rtk_mac_cnt_t mac_cnt);
+
+/* Function Name:
+ *      rtk_l2_limitSystemLearningCnt_get
+ * Description:
+ *      Get System auto learning limit number
+ * Input:
+ *      None
+ * Output:
+ *      pMac_cnt - Auto learning entries limit number
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can get system ASIC auto learning limit number.
+ */
+extern rtk_api_ret_t rtk_l2_limitSystemLearningCnt_get(rtk_mac_cnt_t *pMac_cnt);
+
+/* Function Name:
+ *      rtk_l2_limitLearningCntAction_set
+ * Description:
+ *      Configure auto learn over limit number action.
+ * Input:
+ *      port - Port id.
+ *      action - Auto learning entries limit number
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_NOT_ALLOWED  - Invalid learn over action
+ * Note:
+ *      The API can set SA unknown packet action while auto learn limit number is over
+ *      The action symbol as following:
+ *      - LIMIT_LEARN_CNT_ACTION_DROP,
+ *      - LIMIT_LEARN_CNT_ACTION_FORWARD,
+ *      - LIMIT_LEARN_CNT_ACTION_TO_CPU,
+ */
+extern rtk_api_ret_t rtk_l2_limitLearningCntAction_set(rtk_port_t port, rtk_l2_limitLearnCntAction_t action);
+
+/* Function Name:
+ *      rtk_l2_limitLearningCntAction_get
+ * Description:
+ *      Get auto learn over limit number action.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pAction - Learn over action
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can get SA unknown packet action while auto learn limit number is over
+ *      The action symbol as following:
+ *      - LIMIT_LEARN_CNT_ACTION_DROP,
+ *      - LIMIT_LEARN_CNT_ACTION_FORWARD,
+ *      - LIMIT_LEARN_CNT_ACTION_TO_CPU,
+ */
+extern rtk_api_ret_t rtk_l2_limitLearningCntAction_get(rtk_port_t port, rtk_l2_limitLearnCntAction_t *pAction);
+
+/* Function Name:
+ *      rtk_l2_limitSystemLearningCntAction_set
+ * Description:
+ *      Configure system auto learn over limit number action.
+ * Input:
+ *      port - Port id.
+ *      action - Auto learning entries limit number
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_NOT_ALLOWED  - Invalid learn over action
+ * Note:
+ *      The API can set SA unknown packet action while auto learn limit number is over
+ *      The action symbol as following:
+ *      - LIMIT_LEARN_CNT_ACTION_DROP,
+ *      - LIMIT_LEARN_CNT_ACTION_FORWARD,
+ *      - LIMIT_LEARN_CNT_ACTION_TO_CPU,
+ */
+extern rtk_api_ret_t rtk_l2_limitSystemLearningCntAction_set(rtk_l2_limitLearnCntAction_t action);
+
+/* Function Name:
+ *      rtk_l2_limitSystemLearningCntAction_get
+ * Description:
+ *      Get system auto learn over limit number action.
+ * Input:
+ *      None.
+ * Output:
+ *      pAction - Learn over action
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can get SA unknown packet action while auto learn limit number is over
+ *      The action symbol as following:
+ *      - LIMIT_LEARN_CNT_ACTION_DROP,
+ *      - LIMIT_LEARN_CNT_ACTION_FORWARD,
+ *      - LIMIT_LEARN_CNT_ACTION_TO_CPU,
+ */
+extern rtk_api_ret_t rtk_l2_limitSystemLearningCntAction_get(rtk_l2_limitLearnCntAction_t *pAction);
+
+/* Function Name:
+ *      rtk_l2_limitSystemLearningCntPortMask_set
+ * Description:
+ *      Configure system auto learn portmask
+ * Input:
+ *      pPortmask - Port Mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_MASK    - Invalid port mask.
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_l2_limitSystemLearningCntPortMask_set(rtk_portmask_t *pPortmask);
+
+/* Function Name:
+ *      rtk_l2_limitSystemLearningCntPortMask_get
+ * Description:
+ *      get system auto learn portmask
+ * Input:
+ *      None
+ * Output:
+ *      pPortmask - Port Mask
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_NULL_POINTER - Null pointer.
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_l2_limitSystemLearningCntPortMask_get(rtk_portmask_t *pPortmask);
+
+/* Function Name:
+ *      rtk_l2_learningCnt_get
+ * Description:
+ *      Get per-Port current auto learning number
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pMac_cnt - ASIC auto learning entries number
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can get per-port ASIC auto learning number
+ */
+extern rtk_api_ret_t rtk_l2_learningCnt_get(rtk_port_t port, rtk_mac_cnt_t *pMac_cnt);
+
+/* Function Name:
+ *      rtk_l2_floodPortMask_set
+ * Description:
+ *      Set flooding portmask
+ * Input:
+ *      type - flooding type.
+ *      pFlood_portmask - flooding porkmask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_MASK    - Invalid portmask.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This API can set the flooding mask.
+ *      The flooding type is as following:
+ *      - FLOOD_UNKNOWNDA
+ *      - FLOOD_UNKNOWNMC
+ *      - FLOOD_BC
+ */
+extern rtk_api_ret_t rtk_l2_floodPortMask_set(rtk_l2_flood_type_t floood_type, rtk_portmask_t *pFlood_portmask);
+
+/* Function Name:
+ *      rtk_l2_floodPortMask_get
+ * Description:
+ *      Get flooding portmask
+ * Input:
+ *      type - flooding type.
+ * Output:
+ *      pFlood_portmask - flooding porkmask
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      This API can get the flooding mask.
+ *      The flooding type is as following:
+ *      - FLOOD_UNKNOWNDA
+ *      - FLOOD_UNKNOWNMC
+ *      - FLOOD_BC
+ */
+extern rtk_api_ret_t rtk_l2_floodPortMask_get(rtk_l2_flood_type_t floood_type, rtk_portmask_t *pFlood_portmask);
+
+/* Function Name:
+ *      rtk_l2_localPktPermit_set
+ * Description:
+ *      Set permittion of frames if source port and destination port are the same.
+ * Input:
+ *      port - Port id.
+ *      permit - permittion status
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_ENABLE       - Invalid permit value.
+ * Note:
+ *      This API is setted to permit frame if its source port is equal to destination port.
+ */
+extern rtk_api_ret_t rtk_l2_localPktPermit_set(rtk_port_t port, rtk_enable_t permit);
+
+/* Function Name:
+ *      rtk_l2_localPktPermit_get
+ * Description:
+ *      Get permittion of frames if source port and destination port are the same.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pPermit - permittion status
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      This API is to get permittion status for frames if its source port is equal to destination port.
+ */
+extern rtk_api_ret_t rtk_l2_localPktPermit_get(rtk_port_t port, rtk_enable_t *pPermit);
+
+/* Function Name:
+ *      rtk_l2_aging_set
+ * Description:
+ *      Set LUT agging out speed
+ * Input:
+ *      aging_time - Agging out time.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_OUT_OF_RANGE     - input out of range.
+ * Note:
+ *      The API can set LUT agging out period for each entry and the range is from 14s to 800s.
+ */
+extern rtk_api_ret_t rtk_l2_aging_set(rtk_l2_age_time_t aging_time);
+
+/* Function Name:
+ *      rtk_l2_aging_get
+ * Description:
+ *      Get LUT agging out time
+ * Input:
+ *      None
+ * Output:
+ *      pEnable - Aging status
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can get LUT agging out period for each entry.
+ */
+extern rtk_api_ret_t rtk_l2_aging_get(rtk_l2_age_time_t *pAging_time);
+
+/* Function Name:
+ *      rtk_l2_ipMcastAddrLookup_set
+ * Description:
+ *      Set Lut IP multicast lookup function
+ * Input:
+ *      type - Lookup type for IPMC packet.
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK          - OK
+ *      RT_ERR_FAILED      - Failed
+ *      RT_ERR_SMI         - SMI access error
+ * Note:
+ *      This API can work with rtk_l2_ipMcastAddrLookupException_add.
+ *      If users set the lookup type to DIP, the group in exception table
+ *      will be lookup by DIP+SIP
+ *      If users set the lookup type to DIP+SIP, the group in exception table
+ *      will be lookup by only DIP
+ */
+extern rtk_api_ret_t rtk_l2_ipMcastAddrLookup_set(rtk_l2_ipmc_lookup_type_t type);
+
+/* Function Name:
+ *      rtk_l2_ipMcastAddrLookup_get
+ * Description:
+ *      Get Lut IP multicast lookup function
+ * Input:
+ *      None.
+ * Output:
+ *      pType - Lookup type for IPMC packet.
+ * Return:
+ *      RT_ERR_OK          - OK
+ *      RT_ERR_FAILED      - Failed
+ *      RT_ERR_SMI         - SMI access error
+ * Note:
+ *      None.
+ */
+extern rtk_api_ret_t rtk_l2_ipMcastAddrLookup_get(rtk_l2_ipmc_lookup_type_t *pType);
+
+/* Function Name:
+ *      rtk_l2_ipMcastForwardRouterPort_set
+ * Description:
+ *      Set IPMC packet forward to rounter port also or not
+ * Input:
+ *      enabled - 1: Inlcude router port, 0, exclude router port
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK          - OK
+ *      RT_ERR_FAILED      - Failed
+ *      RT_ERR_SMI         - SMI access error
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_l2_ipMcastForwardRouterPort_set(rtk_enable_t enabled);
+
+/* Function Name:
+ *      rtk_l2_ipMcastForwardRouterPort_get
+ * Description:
+ *      Get IPMC packet forward to rounter port also or not
+ * Input:
+ *      None.
+ * Output:
+ *      pEnabled    - 1: Inlcude router port, 0, exclude router port
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_NULL_POINTER - Null pointer
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_l2_ipMcastForwardRouterPort_get(rtk_enable_t *pEnabled);
+
+/* Function Name:
+ *      rtk_l2_ipMcastGroupEntry_add
+ * Description:
+ *      Add an IP Multicast entry to group table
+ * Input:
+ *      ip_addr     - IP address
+ *      vid         - VLAN ID
+ *      pPortmask   - portmask
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK          - OK
+ *      RT_ERR_FAILED      - Failed
+ *      RT_ERR_SMI         - SMI access error
+ *      RT_ERR_TBL_FULL    - Table Full
+ * Note:
+ *      Add an entry to IP Multicast Group table.
+ */
+extern rtk_api_ret_t rtk_l2_ipMcastGroupEntry_add(ipaddr_t ip_addr, rtk_uint32 vid, rtk_portmask_t *pPortmask);
+
+/* Function Name:
+ *      rtk_l2_ipMcastGroupEntry_del
+ * Description:
+ *      Delete an entry from IP Multicast group table
+ * Input:
+ *      ip_addr     - IP address
+ *      vid         - VLAN ID
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK          - OK
+ *      RT_ERR_FAILED      - Failed
+ *      RT_ERR_SMI         - SMI access error
+ *      RT_ERR_TBL_FULL    - Table Full
+ * Note:
+ *      Delete an entry from IP Multicast group table.
+ */
+extern rtk_api_ret_t rtk_l2_ipMcastGroupEntry_del(ipaddr_t ip_addr, rtk_uint32 vid);
+
+/* Function Name:
+ *      rtk_l2_ipMcastGroupEntry_get
+ * Description:
+ *      get an entry from IP Multicast group table
+ * Input:
+ *      ip_addr     - IP address
+ *      vid         - VLAN ID
+ * Output:
+ *      pPortmask   - member port mask
+ * Return:
+ *      RT_ERR_OK          - OK
+ *      RT_ERR_FAILED      - Failed
+ *      RT_ERR_SMI         - SMI access error
+ *      RT_ERR_TBL_FULL    - Table Full
+ * Note:
+ *      Delete an entry from IP Multicast group table.
+ */
+extern rtk_api_ret_t rtk_l2_ipMcastGroupEntry_get(ipaddr_t ip_addr, rtk_uint32 vid, rtk_portmask_t *pPortmask);
+
+/* Function Name:
+ *      rtk_l2_entry_get
+ * Description:
+ *      Get LUT unicast entry.
+ * Input:
+ *      pL2_entry - Index field in the structure.
+ * Output:
+ *      pL2_entry - other fields such as MAC, port, age...
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_L2_EMPTY_ENTRY   - Empty LUT entry.
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ * Note:
+ *      This API is used to get address by index from 0~2111.
+ */
+extern rtk_api_ret_t rtk_l2_entry_get(rtk_l2_addr_table_t *pL2_entry);
+
+
+#endif /* __RTK_API_L2_H__ */
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/leaky.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/leaky.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/leaky.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/leaky.c	2019-01-22 16:16:24.707257309 +0100
@@ -0,0 +1,636 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 79629 $
+ * $Date: 2017-06-14 18:08:03 +0800 (é€±ä¸‰, 14 å…­æœˆ 2017) $
+ *
+ * Purpose : RTK switch high-level API for RTL8367/RTL8367C
+ * Feature : Here is a list of all functions and variables in Leaky module.
+ *
+ */
+
+#include <rtk_switch.h>
+#include <rtk_error.h>
+#include <leaky.h>
+#include <string.h>
+
+#include <rtl8367c_asicdrv.h>
+#include <rtl8367c_asicdrv_portIsolation.h>
+#include <rtl8367c_asicdrv_rma.h>
+#include <rtl8367c_asicdrv_igmp.h>
+
+static rtk_api_ret_t _rtk_leaky_vlan_set(rtk_leaky_type_t type, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 port;
+    rtl8367c_rma_t rmacfg;
+    rtk_uint32 tmp;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (type >= LEAKY_END)
+        return RT_ERR_INPUT;
+
+    if (enable >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if (type >= 0 && type <= LEAKY_UNDEF_GARP_2F)
+    {
+        if ((retVal = rtl8367c_getAsicRma(type, &rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        rmacfg.vlan_leaky = enable;
+
+        if ((retVal = rtl8367c_setAsicRma(type, &rmacfg)) != RT_ERR_OK)
+            return retVal;
+    }
+    else if (LEAKY_IPMULTICAST == type)
+    {
+        for (port = 0; port <= RTK_PORT_ID_MAX; port++)
+        {
+            if ((retVal = rtl8367c_setAsicIpMulticastVlanLeaky(port,enable)) != RT_ERR_OK)
+                return retVal;
+        }
+    }
+    else if (LEAKY_IGMP == type)
+    {
+        if ((retVal = rtl8367c_setAsicIGMPVLANLeaky(enable)) != RT_ERR_OK)
+            return retVal;
+    }
+    else if (LEAKY_CDP == type)
+    {
+        if ((retVal = rtl8367c_getAsicRmaCdp(&rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        rmacfg.vlan_leaky = enable;
+
+        if ((retVal = rtl8367c_setAsicRmaCdp(&rmacfg)) != RT_ERR_OK)
+            return retVal;
+    }
+    else if (LEAKY_CSSTP == type)
+    {
+        if ((retVal = rtl8367c_getAsicRmaCsstp(&rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        rmacfg.vlan_leaky = enable;
+
+        if ((retVal = rtl8367c_setAsicRmaCsstp(&rmacfg)) != RT_ERR_OK)
+            return retVal;
+    }
+    else if (LEAKY_LLDP == type)
+    {
+        if ((retVal = rtl8367c_getAsicRmaLldp(&tmp,&rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        rmacfg.vlan_leaky = enable;
+
+        if ((retVal = rtl8367c_setAsicRmaLldp(tmp, &rmacfg)) != RT_ERR_OK)
+            return retVal;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_leaky_vlan_get(rtk_leaky_type_t type, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 port,tmp;
+    rtl8367c_rma_t rmacfg;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (type >= LEAKY_END)
+        return RT_ERR_INPUT;
+
+    if(NULL == pEnable)
+        return RT_ERR_NULL_POINTER;
+
+    if (type >= 0 && type <= LEAKY_UNDEF_GARP_2F)
+    {
+        if ((retVal = rtl8367c_getAsicRma(type, &rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        *pEnable = rmacfg.vlan_leaky;
+
+    }
+    else if (LEAKY_IPMULTICAST == type)
+    {
+        for (port = 0; port <= RTK_PORT_ID_MAX; port++)
+        {
+            if ((retVal = rtl8367c_getAsicIpMulticastVlanLeaky(port, &tmp)) != RT_ERR_OK)
+                return retVal;
+            if (port>0&&(tmp!=*pEnable))
+                return RT_ERR_FAILED;
+            *pEnable = tmp;
+        }
+    }
+    else if (LEAKY_IGMP == type)
+    {
+        if ((retVal = rtl8367c_getAsicIGMPVLANLeaky(&tmp)) != RT_ERR_OK)
+            return retVal;
+
+        *pEnable = tmp;
+    }
+    else if (LEAKY_CDP == type)
+    {
+        if ((retVal = rtl8367c_getAsicRmaCdp(&rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        *pEnable = rmacfg.vlan_leaky;
+    }
+    else if (LEAKY_CSSTP == type)
+    {
+        if ((retVal = rtl8367c_getAsicRmaCsstp(&rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        *pEnable = rmacfg.vlan_leaky;
+    }
+    else if (LEAKY_LLDP == type)
+    {
+        if ((retVal = rtl8367c_getAsicRmaLldp(&tmp, &rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        *pEnable = rmacfg.vlan_leaky;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_leaky_portIsolation_set(rtk_leaky_type_t type, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 port;
+    rtl8367c_rma_t rmacfg;
+    rtk_uint32 tmp;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (type >= LEAKY_END)
+        return RT_ERR_INPUT;
+
+    if (enable >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if (type >= 0 && type <= LEAKY_UNDEF_GARP_2F)
+    {
+        if ((retVal = rtl8367c_getAsicRma(type, &rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        rmacfg.portiso_leaky = enable;
+
+        if ((retVal = rtl8367c_setAsicRma(type, &rmacfg)) != RT_ERR_OK)
+            return retVal;
+    }
+    else if (LEAKY_IPMULTICAST == type)
+    {
+        for (port = 0; port < RTK_MAX_NUM_OF_PORT; port++)
+        {
+            if ((retVal = rtl8367c_setAsicIpMulticastPortIsoLeaky(port,enable)) != RT_ERR_OK)
+                return retVal;
+        }
+    }
+    else if (LEAKY_IGMP == type)
+    {
+        if ((retVal = rtl8367c_setAsicIGMPIsoLeaky(enable)) != RT_ERR_OK)
+            return retVal;
+    }
+    else if (LEAKY_CDP == type)
+    {
+        if ((retVal = rtl8367c_getAsicRmaCdp(&rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        rmacfg.portiso_leaky = enable;
+
+        if ((retVal = rtl8367c_setAsicRmaCdp(&rmacfg)) != RT_ERR_OK)
+            return retVal;
+    }
+    else if (LEAKY_CSSTP == type)
+    {
+        if ((retVal = rtl8367c_getAsicRmaCsstp(&rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        rmacfg.portiso_leaky = enable;
+
+        if ((retVal = rtl8367c_setAsicRmaCsstp(&rmacfg)) != RT_ERR_OK)
+            return retVal;
+    }
+    else if (LEAKY_LLDP == type)
+    {
+        if ((retVal = rtl8367c_getAsicRmaLldp(&tmp, &rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        rmacfg.portiso_leaky = enable;
+
+        if ((retVal = rtl8367c_setAsicRmaLldp(tmp, &rmacfg)) != RT_ERR_OK)
+            return retVal;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_leaky_portIsolation_get(rtk_leaky_type_t type, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 port, tmp;
+    rtl8367c_rma_t rmacfg;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (type >= LEAKY_END)
+        return RT_ERR_INPUT;
+
+    if(NULL == pEnable)
+        return RT_ERR_NULL_POINTER;
+
+    if (type >= 0 && type <= LEAKY_UNDEF_GARP_2F)
+    {
+        if ((retVal = rtl8367c_getAsicRma(type, &rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        *pEnable = rmacfg.portiso_leaky;
+
+    }
+    else if (LEAKY_IPMULTICAST == type)
+    {
+        for (port = 0; port < RTK_MAX_NUM_OF_PORT; port++)
+        {
+            if ((retVal = rtl8367c_getAsicIpMulticastPortIsoLeaky(port, &tmp)) != RT_ERR_OK)
+                return retVal;
+            if (port > 0 &&(tmp != *pEnable))
+                return RT_ERR_FAILED;
+            *pEnable = tmp;
+        }
+    }
+    else if (LEAKY_IGMP == type)
+    {
+        if ((retVal = rtl8367c_getAsicIGMPIsoLeaky(&tmp)) != RT_ERR_OK)
+            return retVal;
+
+        *pEnable = tmp;
+    }
+    else if (LEAKY_CDP == type)
+    {
+        if ((retVal = rtl8367c_getAsicRmaCdp(&rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        *pEnable = rmacfg.portiso_leaky;
+    }
+    else if (LEAKY_CSSTP == type)
+    {
+        if ((retVal = rtl8367c_getAsicRmaCsstp(&rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        *pEnable = rmacfg.portiso_leaky;
+    }
+    else if (LEAKY_LLDP == type)
+    {
+        if ((retVal = rtl8367c_getAsicRmaLldp(&tmp, &rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        *pEnable = rmacfg.portiso_leaky;
+    }
+
+
+    return RT_ERR_OK;
+}
+
+
+/* Function Name:
+ *      rtk_leaky_vlan_set
+ * Description:
+ *      Set VLAN leaky.
+ * Input:
+ *      type - Packet type for VLAN leaky.
+ *      enable - Leaky status.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_ENABLE       - Invalid enable input
+ * Note:
+ *      This API can set VLAN leaky for RMA ,IGMP/MLD, CDP, CSSTP, and LLDP packets.
+ *      The leaky frame types are as following:
+ *      - LEAKY_BRG_GROUP,
+ *      - LEAKY_FD_PAUSE,
+ *      - LEAKY_SP_MCAST,
+ *      - LEAKY_1X_PAE,
+ *      - LEAKY_UNDEF_BRG_04,
+ *      - LEAKY_UNDEF_BRG_05,
+ *      - LEAKY_UNDEF_BRG_06,
+ *      - LEAKY_UNDEF_BRG_07,
+ *      - LEAKY_PROVIDER_BRIDGE_GROUP_ADDRESS,
+ *      - LEAKY_UNDEF_BRG_09,
+ *      - LEAKY_UNDEF_BRG_0A,
+ *      - LEAKY_UNDEF_BRG_0B,
+ *      - LEAKY_UNDEF_BRG_0C,
+ *      - LEAKY_PROVIDER_BRIDGE_GVRP_ADDRESS,
+ *      - LEAKY_8021AB,
+ *      - LEAKY_UNDEF_BRG_0F,
+ *      - LEAKY_BRG_MNGEMENT,
+ *      - LEAKY_UNDEFINED_11,
+ *      - LEAKY_UNDEFINED_12,
+ *      - LEAKY_UNDEFINED_13,
+ *      - LEAKY_UNDEFINED_14,
+ *      - LEAKY_UNDEFINED_15,
+ *      - LEAKY_UNDEFINED_16,
+ *      - LEAKY_UNDEFINED_17,
+ *      - LEAKY_UNDEFINED_18,
+ *      - LEAKY_UNDEFINED_19,
+ *      - LEAKY_UNDEFINED_1A,
+ *      - LEAKY_UNDEFINED_1B,
+ *      - LEAKY_UNDEFINED_1C,
+ *      - LEAKY_UNDEFINED_1D,
+ *      - LEAKY_UNDEFINED_1E,
+ *      - LEAKY_UNDEFINED_1F,
+ *      - LEAKY_GMRP,
+ *      - LEAKY_GVRP,
+ *      - LEAKY_UNDEF_GARP_22,
+ *      - LEAKY_UNDEF_GARP_23,
+ *      - LEAKY_UNDEF_GARP_24,
+ *      - LEAKY_UNDEF_GARP_25,
+ *      - LEAKY_UNDEF_GARP_26,
+ *      - LEAKY_UNDEF_GARP_27,
+ *      - LEAKY_UNDEF_GARP_28,
+ *      - LEAKY_UNDEF_GARP_29,
+ *      - LEAKY_UNDEF_GARP_2A,
+ *      - LEAKY_UNDEF_GARP_2B,
+ *      - LEAKY_UNDEF_GARP_2C,
+ *      - LEAKY_UNDEF_GARP_2D,
+ *      - LEAKY_UNDEF_GARP_2E,
+ *      - LEAKY_UNDEF_GARP_2F,
+ *      - LEAKY_IGMP,
+ *      - LEAKY_IPMULTICAST.
+ *      - LEAKY_CDP,
+ *      - LEAKY_CSSTP,
+ *      - LEAKY_LLDP.
+ */
+rtk_api_ret_t rtk_leaky_vlan_set(rtk_leaky_type_t type, rtk_enable_t enable)
+{
+     rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_leaky_vlan_set(type,  enable);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_leaky_vlan_get
+ * Description:
+ *      Get VLAN leaky.
+ * Input:
+ *      type - Packet type for VLAN leaky.
+ * Output:
+ *      pEnable - Leaky status.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This API can get VLAN leaky status for RMA ,IGMP/MLD, CDP, CSSTP, and LLDP  packets.
+ *      The leaky frame types are as following:
+ *      - LEAKY_BRG_GROUP,
+ *      - LEAKY_FD_PAUSE,
+ *      - LEAKY_SP_MCAST,
+ *      - LEAKY_1X_PAE,
+ *      - LEAKY_UNDEF_BRG_04,
+ *      - LEAKY_UNDEF_BRG_05,
+ *      - LEAKY_UNDEF_BRG_06,
+ *      - LEAKY_UNDEF_BRG_07,
+ *      - LEAKY_PROVIDER_BRIDGE_GROUP_ADDRESS,
+ *      - LEAKY_UNDEF_BRG_09,
+ *      - LEAKY_UNDEF_BRG_0A,
+ *      - LEAKY_UNDEF_BRG_0B,
+ *      - LEAKY_UNDEF_BRG_0C,
+ *      - LEAKY_PROVIDER_BRIDGE_GVRP_ADDRESS,
+ *      - LEAKY_8021AB,
+ *      - LEAKY_UNDEF_BRG_0F,
+ *      - LEAKY_BRG_MNGEMENT,
+ *      - LEAKY_UNDEFINED_11,
+ *      - LEAKY_UNDEFINED_12,
+ *      - LEAKY_UNDEFINED_13,
+ *      - LEAKY_UNDEFINED_14,
+ *      - LEAKY_UNDEFINED_15,
+ *      - LEAKY_UNDEFINED_16,
+ *      - LEAKY_UNDEFINED_17,
+ *      - LEAKY_UNDEFINED_18,
+ *      - LEAKY_UNDEFINED_19,
+ *      - LEAKY_UNDEFINED_1A,
+ *      - LEAKY_UNDEFINED_1B,
+ *      - LEAKY_UNDEFINED_1C,
+ *      - LEAKY_UNDEFINED_1D,
+ *      - LEAKY_UNDEFINED_1E,
+ *      - LEAKY_UNDEFINED_1F,
+ *      - LEAKY_GMRP,
+ *      - LEAKY_GVRP,
+ *      - LEAKY_UNDEF_GARP_22,
+ *      - LEAKY_UNDEF_GARP_23,
+ *      - LEAKY_UNDEF_GARP_24,
+ *      - LEAKY_UNDEF_GARP_25,
+ *      - LEAKY_UNDEF_GARP_26,
+ *      - LEAKY_UNDEF_GARP_27,
+ *      - LEAKY_UNDEF_GARP_28,
+ *      - LEAKY_UNDEF_GARP_29,
+ *      - LEAKY_UNDEF_GARP_2A,
+ *      - LEAKY_UNDEF_GARP_2B,
+ *      - LEAKY_UNDEF_GARP_2C,
+ *      - LEAKY_UNDEF_GARP_2D,
+ *      - LEAKY_UNDEF_GARP_2E,
+ *      - LEAKY_UNDEF_GARP_2F,
+ *      - LEAKY_IGMP,
+ *      - LEAKY_IPMULTICAST.
+ *      - LEAKY_CDP,
+ *      - LEAKY_CSSTP,
+ *      - LEAKY_LLDP.
+ */
+rtk_api_ret_t rtk_leaky_vlan_get(rtk_leaky_type_t type, rtk_enable_t *pEnable)
+{
+     rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_leaky_vlan_get(type, pEnable);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_leaky_portIsolation_set
+ * Description:
+ *      Set port isolation leaky.
+ * Input:
+ *      type - Packet type for port isolation leaky.
+ *      enable - Leaky status.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_ENABLE       - Invalid enable input
+ * Note:
+ *      This API can set port isolation leaky for RMA ,IGMP/MLD, CDP, CSSTP, and LLDP  packets.
+ *      The leaky frame types are as following:
+ *      - LEAKY_BRG_GROUP,
+ *      - LEAKY_FD_PAUSE,
+ *      - LEAKY_SP_MCAST,
+ *      - LEAKY_1X_PAE,
+ *      - LEAKY_UNDEF_BRG_04,
+ *      - LEAKY_UNDEF_BRG_05,
+ *      - LEAKY_UNDEF_BRG_06,
+ *      - LEAKY_UNDEF_BRG_07,
+ *      - LEAKY_PROVIDER_BRIDGE_GROUP_ADDRESS,
+ *      - LEAKY_UNDEF_BRG_09,
+ *      - LEAKY_UNDEF_BRG_0A,
+ *      - LEAKY_UNDEF_BRG_0B,
+ *      - LEAKY_UNDEF_BRG_0C,
+ *      - LEAKY_PROVIDER_BRIDGE_GVRP_ADDRESS,
+ *      - LEAKY_8021AB,
+ *      - LEAKY_UNDEF_BRG_0F,
+ *      - LEAKY_BRG_MNGEMENT,
+ *      - LEAKY_UNDEFINED_11,
+ *      - LEAKY_UNDEFINED_12,
+ *      - LEAKY_UNDEFINED_13,
+ *      - LEAKY_UNDEFINED_14,
+ *      - LEAKY_UNDEFINED_15,
+ *      - LEAKY_UNDEFINED_16,
+ *      - LEAKY_UNDEFINED_17,
+ *      - LEAKY_UNDEFINED_18,
+ *      - LEAKY_UNDEFINED_19,
+ *      - LEAKY_UNDEFINED_1A,
+ *      - LEAKY_UNDEFINED_1B,
+ *      - LEAKY_UNDEFINED_1C,
+ *      - LEAKY_UNDEFINED_1D,
+ *      - LEAKY_UNDEFINED_1E,
+ *      - LEAKY_UNDEFINED_1F,
+ *      - LEAKY_GMRP,
+ *      - LEAKY_GVRP,
+ *      - LEAKY_UNDEF_GARP_22,
+ *      - LEAKY_UNDEF_GARP_23,
+ *      - LEAKY_UNDEF_GARP_24,
+ *      - LEAKY_UNDEF_GARP_25,
+ *      - LEAKY_UNDEF_GARP_26,
+ *      - LEAKY_UNDEF_GARP_27,
+ *      - LEAKY_UNDEF_GARP_28,
+ *      - LEAKY_UNDEF_GARP_29,
+ *      - LEAKY_UNDEF_GARP_2A,
+ *      - LEAKY_UNDEF_GARP_2B,
+ *      - LEAKY_UNDEF_GARP_2C,
+ *      - LEAKY_UNDEF_GARP_2D,
+ *      - LEAKY_UNDEF_GARP_2E,
+ *      - LEAKY_UNDEF_GARP_2F,
+ *      - LEAKY_IGMP,
+ *      - LEAKY_IPMULTICAST.
+ *      - LEAKY_CDP,
+ *      - LEAKY_CSSTP,
+ *      - LEAKY_LLDP.
+ */
+rtk_api_ret_t rtk_leaky_portIsolation_set(rtk_leaky_type_t type, rtk_enable_t enable)
+{
+     rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_leaky_portIsolation_set(type, enable);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_leaky_portIsolation_get
+ * Description:
+ *      Get port isolation leaky.
+ * Input:
+ *      type - Packet type for port isolation leaky.
+ * Output:
+ *      pEnable - Leaky status.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This API can get port isolation leaky status for RMA ,IGMP/MLD, CDP, CSSTP, and LLDP  packets.
+ *      The leaky frame types are as following:
+ *      - LEAKY_BRG_GROUP,
+ *      - LEAKY_FD_PAUSE,
+ *      - LEAKY_SP_MCAST,
+ *      - LEAKY_1X_PAE,
+ *      - LEAKY_UNDEF_BRG_04,
+ *      - LEAKY_UNDEF_BRG_05,
+ *      - LEAKY_UNDEF_BRG_06,
+ *      - LEAKY_UNDEF_BRG_07,
+ *      - LEAKY_PROVIDER_BRIDGE_GROUP_ADDRESS,
+ *      - LEAKY_UNDEF_BRG_09,
+ *      - LEAKY_UNDEF_BRG_0A,
+ *      - LEAKY_UNDEF_BRG_0B,
+ *      - LEAKY_UNDEF_BRG_0C,
+ *      - LEAKY_PROVIDER_BRIDGE_GVRP_ADDRESS,
+ *      - LEAKY_8021AB,
+ *      - LEAKY_UNDEF_BRG_0F,
+ *      - LEAKY_BRG_MNGEMENT,
+ *      - LEAKY_UNDEFINED_11,
+ *      - LEAKY_UNDEFINED_12,
+ *      - LEAKY_UNDEFINED_13,
+ *      - LEAKY_UNDEFINED_14,
+ *      - LEAKY_UNDEFINED_15,
+ *      - LEAKY_UNDEFINED_16,
+ *      - LEAKY_UNDEFINED_17,
+ *      - LEAKY_UNDEFINED_18,
+ *      - LEAKY_UNDEFINED_19,
+ *      - LEAKY_UNDEFINED_1A,
+ *      - LEAKY_UNDEFINED_1B,
+ *      - LEAKY_UNDEFINED_1C,
+ *      - LEAKY_UNDEFINED_1D,
+ *      - LEAKY_UNDEFINED_1E,
+ *      - LEAKY_UNDEFINED_1F,
+ *      - LEAKY_GMRP,
+ *      - LEAKY_GVRP,
+ *      - LEAKY_UNDEF_GARP_22,
+ *      - LEAKY_UNDEF_GARP_23,
+ *      - LEAKY_UNDEF_GARP_24,
+ *      - LEAKY_UNDEF_GARP_25,
+ *      - LEAKY_UNDEF_GARP_26,
+ *      - LEAKY_UNDEF_GARP_27,
+ *      - LEAKY_UNDEF_GARP_28,
+ *      - LEAKY_UNDEF_GARP_29,
+ *      - LEAKY_UNDEF_GARP_2A,
+ *      - LEAKY_UNDEF_GARP_2B,
+ *      - LEAKY_UNDEF_GARP_2C,
+ *      - LEAKY_UNDEF_GARP_2D,
+ *      - LEAKY_UNDEF_GARP_2E,
+ *      - LEAKY_UNDEF_GARP_2F,
+ *      - LEAKY_IGMP,
+ *      - LEAKY_IPMULTICAST.
+ *      - LEAKY_CDP,
+ *      - LEAKY_CSSTP,
+ *      - LEAKY_LLDP.
+ */
+rtk_api_ret_t rtk_leaky_portIsolation_get(rtk_leaky_type_t type, rtk_enable_t *pEnable)
+{
+     rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_leaky_portIsolation_get(type, pEnable);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/leaky.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/leaky.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/leaky.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/leaky.h	2019-01-22 16:16:24.707257309 +0100
@@ -0,0 +1,373 @@
+ /*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * Purpose : RTL8367/RTL8367C switch high-level API
+ *
+ * Feature : The file includes Leaky module high-layer API defination
+ *
+ */
+
+#ifndef __RTK_API_LEAKY_H__
+#define __RTK_API_LEAKY_H__
+
+
+typedef enum rtk_leaky_type_e
+{
+    LEAKY_BRG_GROUP = 0,
+    LEAKY_FD_PAUSE,
+    LEAKY_SP_MCAST,
+    LEAKY_1X_PAE,
+    LEAKY_UNDEF_BRG_04,
+    LEAKY_UNDEF_BRG_05,
+    LEAKY_UNDEF_BRG_06,
+    LEAKY_UNDEF_BRG_07,
+    LEAKY_PROVIDER_BRIDGE_GROUP_ADDRESS,
+    LEAKY_UNDEF_BRG_09,
+    LEAKY_UNDEF_BRG_0A,
+    LEAKY_UNDEF_BRG_0B,
+    LEAKY_UNDEF_BRG_0C,
+    LEAKY_PROVIDER_BRIDGE_GVRP_ADDRESS,
+    LEAKY_8021AB,
+    LEAKY_UNDEF_BRG_0F,
+    LEAKY_BRG_MNGEMENT,
+    LEAKY_UNDEFINED_11,
+    LEAKY_UNDEFINED_12,
+    LEAKY_UNDEFINED_13,
+    LEAKY_UNDEFINED_14,
+    LEAKY_UNDEFINED_15,
+    LEAKY_UNDEFINED_16,
+    LEAKY_UNDEFINED_17,
+    LEAKY_UNDEFINED_18,
+    LEAKY_UNDEFINED_19,
+    LEAKY_UNDEFINED_1A,
+    LEAKY_UNDEFINED_1B,
+    LEAKY_UNDEFINED_1C,
+    LEAKY_UNDEFINED_1D,
+    LEAKY_UNDEFINED_1E,
+    LEAKY_UNDEFINED_1F,
+    LEAKY_GMRP,
+    LEAKY_GVRP,
+    LEAKY_UNDEF_GARP_22,
+    LEAKY_UNDEF_GARP_23,
+    LEAKY_UNDEF_GARP_24,
+    LEAKY_UNDEF_GARP_25,
+    LEAKY_UNDEF_GARP_26,
+    LEAKY_UNDEF_GARP_27,
+    LEAKY_UNDEF_GARP_28,
+    LEAKY_UNDEF_GARP_29,
+    LEAKY_UNDEF_GARP_2A,
+    LEAKY_UNDEF_GARP_2B,
+    LEAKY_UNDEF_GARP_2C,
+    LEAKY_UNDEF_GARP_2D,
+    LEAKY_UNDEF_GARP_2E,
+    LEAKY_UNDEF_GARP_2F,
+    LEAKY_IGMP,
+    LEAKY_IPMULTICAST,
+    LEAKY_CDP,
+    LEAKY_CSSTP,
+    LEAKY_LLDP,
+    LEAKY_END,
+}rtk_leaky_type_t;
+
+/* Function Name:
+ *      rtk_leaky_vlan_set
+ * Description:
+ *      Set VLAN leaky.
+ * Input:
+ *      type - Packet type for VLAN leaky.
+ *      enable - Leaky status.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_ENABLE       - Invalid enable input
+ * Note:
+ *      This API can set VLAN leaky for RMA ,IGMP/MLD, CDP, CSSTP, and LLDP packets.
+ *      The leaky frame types are as following:
+ *      - LEAKY_BRG_GROUP,
+ *      - LEAKY_FD_PAUSE,
+ *      - LEAKY_SP_MCAST,
+ *      - LEAKY_1X_PAE,
+ *      - LEAKY_UNDEF_BRG_04,
+ *      - LEAKY_UNDEF_BRG_05,
+ *      - LEAKY_UNDEF_BRG_06,
+ *      - LEAKY_UNDEF_BRG_07,
+ *      - LEAKY_PROVIDER_BRIDGE_GROUP_ADDRESS,
+ *      - LEAKY_UNDEF_BRG_09,
+ *      - LEAKY_UNDEF_BRG_0A,
+ *      - LEAKY_UNDEF_BRG_0B,
+ *      - LEAKY_UNDEF_BRG_0C,
+ *      - LEAKY_PROVIDER_BRIDGE_GVRP_ADDRESS,
+ *      - LEAKY_8021AB,
+ *      - LEAKY_UNDEF_BRG_0F,
+ *      - LEAKY_BRG_MNGEMENT,
+ *      - LEAKY_UNDEFINED_11,
+ *      - LEAKY_UNDEFINED_12,
+ *      - LEAKY_UNDEFINED_13,
+ *      - LEAKY_UNDEFINED_14,
+ *      - LEAKY_UNDEFINED_15,
+ *      - LEAKY_UNDEFINED_16,
+ *      - LEAKY_UNDEFINED_17,
+ *      - LEAKY_UNDEFINED_18,
+ *      - LEAKY_UNDEFINED_19,
+ *      - LEAKY_UNDEFINED_1A,
+ *      - LEAKY_UNDEFINED_1B,
+ *      - LEAKY_UNDEFINED_1C,
+ *      - LEAKY_UNDEFINED_1D,
+ *      - LEAKY_UNDEFINED_1E,
+ *      - LEAKY_UNDEFINED_1F,
+ *      - LEAKY_GMRP,
+ *      - LEAKY_GVRP,
+ *      - LEAKY_UNDEF_GARP_22,
+ *      - LEAKY_UNDEF_GARP_23,
+ *      - LEAKY_UNDEF_GARP_24,
+ *      - LEAKY_UNDEF_GARP_25,
+ *      - LEAKY_UNDEF_GARP_26,
+ *      - LEAKY_UNDEF_GARP_27,
+ *      - LEAKY_UNDEF_GARP_28,
+ *      - LEAKY_UNDEF_GARP_29,
+ *      - LEAKY_UNDEF_GARP_2A,
+ *      - LEAKY_UNDEF_GARP_2B,
+ *      - LEAKY_UNDEF_GARP_2C,
+ *      - LEAKY_UNDEF_GARP_2D,
+ *      - LEAKY_UNDEF_GARP_2E,
+ *      - LEAKY_UNDEF_GARP_2F,
+ *      - LEAKY_IGMP,
+ *      - LEAKY_IPMULTICAST.
+ *      - LEAKY_CDP,
+ *      - LEAKY_CSSTP,
+ *      - LEAKY_LLDP.
+ */
+extern rtk_api_ret_t rtk_leaky_vlan_set(rtk_leaky_type_t type, rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_leaky_vlan_get
+ * Description:
+ *      Get VLAN leaky.
+ * Input:
+ *      type - Packet type for VLAN leaky.
+ * Output:
+ *      pEnable - Leaky status.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This API can get VLAN leaky status for RMA ,IGMP/MLD, CDP, CSSTP, and LLDP  packets.
+ *      The leaky frame types are as following:
+ *      - LEAKY_BRG_GROUP,
+ *      - LEAKY_FD_PAUSE,
+ *      - LEAKY_SP_MCAST,
+ *      - LEAKY_1X_PAE,
+ *      - LEAKY_UNDEF_BRG_04,
+ *      - LEAKY_UNDEF_BRG_05,
+ *      - LEAKY_UNDEF_BRG_06,
+ *      - LEAKY_UNDEF_BRG_07,
+ *      - LEAKY_PROVIDER_BRIDGE_GROUP_ADDRESS,
+ *      - LEAKY_UNDEF_BRG_09,
+ *      - LEAKY_UNDEF_BRG_0A,
+ *      - LEAKY_UNDEF_BRG_0B,
+ *      - LEAKY_UNDEF_BRG_0C,
+ *      - LEAKY_PROVIDER_BRIDGE_GVRP_ADDRESS,
+ *      - LEAKY_8021AB,
+ *      - LEAKY_UNDEF_BRG_0F,
+ *      - LEAKY_BRG_MNGEMENT,
+ *      - LEAKY_UNDEFINED_11,
+ *      - LEAKY_UNDEFINED_12,
+ *      - LEAKY_UNDEFINED_13,
+ *      - LEAKY_UNDEFINED_14,
+ *      - LEAKY_UNDEFINED_15,
+ *      - LEAKY_UNDEFINED_16,
+ *      - LEAKY_UNDEFINED_17,
+ *      - LEAKY_UNDEFINED_18,
+ *      - LEAKY_UNDEFINED_19,
+ *      - LEAKY_UNDEFINED_1A,
+ *      - LEAKY_UNDEFINED_1B,
+ *      - LEAKY_UNDEFINED_1C,
+ *      - LEAKY_UNDEFINED_1D,
+ *      - LEAKY_UNDEFINED_1E,
+ *      - LEAKY_UNDEFINED_1F,
+ *      - LEAKY_GMRP,
+ *      - LEAKY_GVRP,
+ *      - LEAKY_UNDEF_GARP_22,
+ *      - LEAKY_UNDEF_GARP_23,
+ *      - LEAKY_UNDEF_GARP_24,
+ *      - LEAKY_UNDEF_GARP_25,
+ *      - LEAKY_UNDEF_GARP_26,
+ *      - LEAKY_UNDEF_GARP_27,
+ *      - LEAKY_UNDEF_GARP_28,
+ *      - LEAKY_UNDEF_GARP_29,
+ *      - LEAKY_UNDEF_GARP_2A,
+ *      - LEAKY_UNDEF_GARP_2B,
+ *      - LEAKY_UNDEF_GARP_2C,
+ *      - LEAKY_UNDEF_GARP_2D,
+ *      - LEAKY_UNDEF_GARP_2E,
+ *      - LEAKY_UNDEF_GARP_2F,
+ *      - LEAKY_IGMP,
+ *      - LEAKY_IPMULTICAST.
+ *      - LEAKY_CDP,
+ *      - LEAKY_CSSTP,
+ *      - LEAKY_LLDP.
+ */
+extern rtk_api_ret_t rtk_leaky_vlan_get(rtk_leaky_type_t type, rtk_enable_t *pEnable);
+
+/* Function Name:
+ *      rtk_leaky_portIsolation_set
+ * Description:
+ *      Set port isolation leaky.
+ * Input:
+ *      type - Packet type for port isolation leaky.
+ *      enable - Leaky status.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_ENABLE       - Invalid enable input
+ * Note:
+ *      This API can set port isolation leaky for RMA ,IGMP/MLD, CDP, CSSTP, and LLDP  packets.
+ *      The leaky frame types are as following:
+ *      - LEAKY_BRG_GROUP,
+ *      - LEAKY_FD_PAUSE,
+ *      - LEAKY_SP_MCAST,
+ *      - LEAKY_1X_PAE,
+ *      - LEAKY_UNDEF_BRG_04,
+ *      - LEAKY_UNDEF_BRG_05,
+ *      - LEAKY_UNDEF_BRG_06,
+ *      - LEAKY_UNDEF_BRG_07,
+ *      - LEAKY_PROVIDER_BRIDGE_GROUP_ADDRESS,
+ *      - LEAKY_UNDEF_BRG_09,
+ *      - LEAKY_UNDEF_BRG_0A,
+ *      - LEAKY_UNDEF_BRG_0B,
+ *      - LEAKY_UNDEF_BRG_0C,
+ *      - LEAKY_PROVIDER_BRIDGE_GVRP_ADDRESS,
+ *      - LEAKY_8021AB,
+ *      - LEAKY_UNDEF_BRG_0F,
+ *      - LEAKY_BRG_MNGEMENT,
+ *      - LEAKY_UNDEFINED_11,
+ *      - LEAKY_UNDEFINED_12,
+ *      - LEAKY_UNDEFINED_13,
+ *      - LEAKY_UNDEFINED_14,
+ *      - LEAKY_UNDEFINED_15,
+ *      - LEAKY_UNDEFINED_16,
+ *      - LEAKY_UNDEFINED_17,
+ *      - LEAKY_UNDEFINED_18,
+ *      - LEAKY_UNDEFINED_19,
+ *      - LEAKY_UNDEFINED_1A,
+ *      - LEAKY_UNDEFINED_1B,
+ *      - LEAKY_UNDEFINED_1C,
+ *      - LEAKY_UNDEFINED_1D,
+ *      - LEAKY_UNDEFINED_1E,
+ *      - LEAKY_UNDEFINED_1F,
+ *      - LEAKY_GMRP,
+ *      - LEAKY_GVRP,
+ *      - LEAKY_UNDEF_GARP_22,
+ *      - LEAKY_UNDEF_GARP_23,
+ *      - LEAKY_UNDEF_GARP_24,
+ *      - LEAKY_UNDEF_GARP_25,
+ *      - LEAKY_UNDEF_GARP_26,
+ *      - LEAKY_UNDEF_GARP_27,
+ *      - LEAKY_UNDEF_GARP_28,
+ *      - LEAKY_UNDEF_GARP_29,
+ *      - LEAKY_UNDEF_GARP_2A,
+ *      - LEAKY_UNDEF_GARP_2B,
+ *      - LEAKY_UNDEF_GARP_2C,
+ *      - LEAKY_UNDEF_GARP_2D,
+ *      - LEAKY_UNDEF_GARP_2E,
+ *      - LEAKY_UNDEF_GARP_2F,
+ *      - LEAKY_IGMP,
+ *      - LEAKY_IPMULTICAST.
+ *      - LEAKY_CDP,
+ *      - LEAKY_CSSTP,
+ *      - LEAKY_LLDP.
+ */
+extern rtk_api_ret_t rtk_leaky_portIsolation_set(rtk_leaky_type_t type, rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_leaky_portIsolation_get
+ * Description:
+ *      Get port isolation leaky.
+ * Input:
+ *      type - Packet type for port isolation leaky.
+ * Output:
+ *      pEnable - Leaky status.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This API can get port isolation leaky status for RMA ,IGMP/MLD, CDP, CSSTP, and LLDP  packets.
+ *      The leaky frame types are as following:
+ *      - LEAKY_BRG_GROUP,
+ *      - LEAKY_FD_PAUSE,
+ *      - LEAKY_SP_MCAST,
+ *      - LEAKY_1X_PAE,
+ *      - LEAKY_UNDEF_BRG_04,
+ *      - LEAKY_UNDEF_BRG_05,
+ *      - LEAKY_UNDEF_BRG_06,
+ *      - LEAKY_UNDEF_BRG_07,
+ *      - LEAKY_PROVIDER_BRIDGE_GROUP_ADDRESS,
+ *      - LEAKY_UNDEF_BRG_09,
+ *      - LEAKY_UNDEF_BRG_0A,
+ *      - LEAKY_UNDEF_BRG_0B,
+ *      - LEAKY_UNDEF_BRG_0C,
+ *      - LEAKY_PROVIDER_BRIDGE_GVRP_ADDRESS,
+ *      - LEAKY_8021AB,
+ *      - LEAKY_UNDEF_BRG_0F,
+ *      - LEAKY_BRG_MNGEMENT,
+ *      - LEAKY_UNDEFINED_11,
+ *      - LEAKY_UNDEFINED_12,
+ *      - LEAKY_UNDEFINED_13,
+ *      - LEAKY_UNDEFINED_14,
+ *      - LEAKY_UNDEFINED_15,
+ *      - LEAKY_UNDEFINED_16,
+ *      - LEAKY_UNDEFINED_17,
+ *      - LEAKY_UNDEFINED_18,
+ *      - LEAKY_UNDEFINED_19,
+ *      - LEAKY_UNDEFINED_1A,
+ *      - LEAKY_UNDEFINED_1B,
+ *      - LEAKY_UNDEFINED_1C,
+ *      - LEAKY_UNDEFINED_1D,
+ *      - LEAKY_UNDEFINED_1E,
+ *      - LEAKY_UNDEFINED_1F,
+ *      - LEAKY_GMRP,
+ *      - LEAKY_GVRP,
+ *      - LEAKY_UNDEF_GARP_22,
+ *      - LEAKY_UNDEF_GARP_23,
+ *      - LEAKY_UNDEF_GARP_24,
+ *      - LEAKY_UNDEF_GARP_25,
+ *      - LEAKY_UNDEF_GARP_26,
+ *      - LEAKY_UNDEF_GARP_27,
+ *      - LEAKY_UNDEF_GARP_28,
+ *      - LEAKY_UNDEF_GARP_29,
+ *      - LEAKY_UNDEF_GARP_2A,
+ *      - LEAKY_UNDEF_GARP_2B,
+ *      - LEAKY_UNDEF_GARP_2C,
+ *      - LEAKY_UNDEF_GARP_2D,
+ *      - LEAKY_UNDEF_GARP_2E,
+ *      - LEAKY_UNDEF_GARP_2F,
+ *      - LEAKY_IGMP,
+ *      - LEAKY_IPMULTICAST.
+ *      - LEAKY_CDP,
+ *      - LEAKY_CSSTP,
+ *      - LEAKY_LLDP.
+ */
+extern rtk_api_ret_t rtk_leaky_portIsolation_get(rtk_leaky_type_t type, rtk_enable_t *pEnable);
+
+#endif /* __RTK_API_LEAKY_H__ */
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/led.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/led.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/led.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/led.c	2019-01-22 16:16:24.707257309 +0100
@@ -0,0 +1,992 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 79629 $
+ * $Date: 2017-06-14 18:08:03 +0800 (é€±ä¸‰, 14 å…­æœˆ 2017) $
+ *
+ * Purpose : RTK switch high-level API for RTL8367/RTL8367C
+ * Feature : Here is a list of all functions and variables in LED module.
+ *
+ */
+
+#include <rtk_switch.h>
+#include <rtk_error.h>
+#include <led.h>
+#include <string.h>
+
+#include <rtl8367c_asicdrv.h>
+#include <rtl8367c_asicdrv_led.h>
+
+static rtk_api_ret_t _rtk_led_enable_set(rtk_led_group_t group, rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 pmask;
+    rtk_port_t port;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (group >= LED_GROUP_END)
+        return RT_ERR_INPUT;
+
+    RTK_CHK_PORTMASK_VALID(pPortmask);
+
+    RTK_PORTMASK_SCAN((*pPortmask), port)
+    {
+        if(rtk_switch_isCPUPort(port) == RT_ERR_OK)
+            return RT_ERR_PORT_MASK;
+    }
+
+    if((retVal = rtk_switch_portmask_L2P_get(pPortmask, &pmask)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicLedGroupEnable(group, pmask)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_led_enable_get(rtk_led_group_t group, rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 pmask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (group >= LED_GROUP_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_getAsicLedGroupEnable(group, &pmask)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtk_switch_portmask_P2L_get(pmask, pPortmask)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+
+}
+
+static rtk_api_ret_t _rtk_led_operation_set(rtk_led_operation_t mode)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 regData;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if ( mode >= LED_OP_END)
+      return RT_ERR_INPUT;
+
+    switch (mode)
+    {
+        case LED_OP_PARALLEL:
+            regData = LEDOP_PARALLEL;
+            break;
+        case LED_OP_SERIAL:
+            regData = LEDOP_SERIAL;
+            break;
+        default:
+            return RT_ERR_CHIP_NOT_SUPPORTED;
+            break;
+    }
+
+    if ((retVal = rtl8367c_setAsicLedOperationMode(regData)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_led_operation_get(rtk_led_operation_t *pMode)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 regData;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pMode)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicLedOperationMode(&regData)) != RT_ERR_OK)
+        return retVal;
+
+    if (regData == LEDOP_SERIAL)
+        *pMode = LED_OP_SERIAL;
+    else if (regData ==LEDOP_PARALLEL)
+        *pMode = LED_OP_PARALLEL;
+    else
+       return RT_ERR_FAILED;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_led_modeForce_set(rtk_port_t port, rtk_led_group_t group, rtk_led_force_mode_t mode)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    /* No LED for CPU port */
+    if(rtk_switch_isCPUPort(port) == RT_ERR_OK)
+        return RT_ERR_PORT_ID;
+
+    if (group >= LED_GROUP_END)
+        return RT_ERR_INPUT;
+
+    if (mode >= LED_FORCE_END)
+        return RT_ERR_NOT_ALLOWED;
+
+    if ((retVal = rtl8367c_setAsicForceLed(rtk_switch_port_L2P_get(port), group, mode)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_led_modeForce_get(rtk_port_t port, rtk_led_group_t group, rtk_led_force_mode_t *pMode)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    /* No LED for CPU port */
+    if(rtk_switch_isCPUPort(port) == RT_ERR_OK)
+        return RT_ERR_PORT_ID;
+
+    if (group >= LED_GROUP_END)
+        return RT_ERR_INPUT;
+
+    if (NULL == pMode)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicForceLed(rtk_switch_port_L2P_get(port), group, pMode)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_led_blinkRate_set(rtk_led_blink_rate_t blinkRate)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (blinkRate >= LED_BLINKRATE_END)
+        return RT_ERR_FAILED;
+
+    if ((retVal = rtl8367c_setAsicLedBlinkRate(blinkRate)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_led_blinkRate_get(rtk_led_blink_rate_t *pBlinkRate)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pBlinkRate)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicLedBlinkRate(pBlinkRate)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_led_groupConfig_set(rtk_led_group_t group, rtk_led_congig_t config)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (LED_GROUP_END <= group)
+        return RT_ERR_FAILED;
+
+    if (LED_CONFIG_END <= config)
+        return RT_ERR_FAILED;
+
+    if ((retVal = rtl8367c_setAsicLedIndicateInfoConfig(group, config)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_led_groupConfig_get(rtk_led_group_t group, rtk_led_congig_t *pConfig)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (LED_GROUP_END <= group)
+        return RT_ERR_FAILED;
+
+    if(NULL == pConfig)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicLedIndicateInfoConfig(group, pConfig)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_led_groupAbility_set(rtk_led_group_t group, rtk_led_ability_t *pAbility)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 regData;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (LED_GROUP_END <= group)
+        return RT_ERR_FAILED;
+
+    if(pAbility == NULL)
+        return RT_ERR_NULL_POINTER;
+
+    if( (pAbility->link_10m >= RTK_ENABLE_END) || (pAbility->link_100m >= RTK_ENABLE_END)||
+        (pAbility->link_500m >= RTK_ENABLE_END) || (pAbility->link_1000m >= RTK_ENABLE_END)||
+        (pAbility->act_rx >= RTK_ENABLE_END) || (pAbility->act_tx >= RTK_ENABLE_END) )
+    {
+        return RT_ERR_INPUT;
+    }
+
+    if ((retVal = rtl8367c_getAsicReg(RTL8367C_REG_LED0_DATA_CTRL + (rtk_uint32)group, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    if(pAbility->link_10m == ENABLED)
+        regData |= 0x0001;
+    else
+        regData &= ~0x0001;
+
+    if(pAbility->link_100m == ENABLED)
+        regData |= 0x0002;
+    else
+        regData &= ~0x0002;
+
+    if(pAbility->link_500m == ENABLED)
+        regData |= 0x0004;
+    else
+        regData &= ~0x0004;
+
+    if(pAbility->link_1000m == ENABLED)
+        regData |= 0x0008;
+    else
+        regData &= ~0x0008;
+
+    if(pAbility->act_rx == ENABLED)
+        regData |= 0x0010;
+    else
+        regData &= ~0x0010;
+
+    if(pAbility->act_tx == ENABLED)
+        regData |= 0x0020;
+    else
+        regData &= ~0x0020;
+
+    regData |= (0x0001 << 6);
+
+    if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_LED0_DATA_CTRL + (rtk_uint32)group, regData)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_led_groupAbility_get(rtk_led_group_t group, rtk_led_ability_t *pAbility)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 regData;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (LED_GROUP_END <= group)
+        return RT_ERR_FAILED;
+
+    if(pAbility == NULL)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicReg(RTL8367C_REG_LED0_DATA_CTRL + (rtk_uint32)group, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    pAbility->link_10m = (regData & 0x0001) ? ENABLED : DISABLED;
+    pAbility->link_100m = (regData & 0x0002) ? ENABLED : DISABLED;
+    pAbility->link_500m = (regData & 0x0004) ? ENABLED : DISABLED;
+    pAbility->link_1000m = (regData & 0x0008) ? ENABLED : DISABLED;
+    pAbility->act_rx = (regData & 0x0010) ? ENABLED : DISABLED;
+    pAbility->act_tx = (regData & 0x0020) ? ENABLED : DISABLED;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_led_serialMode_set(rtk_led_active_t active)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if ( active >= LED_ACTIVE_END)
+        return RT_ERR_INPUT;
+
+     if ((retVal = rtl8367c_setAsicLedSerialModeConfig(active,1))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_led_serialMode_get(rtk_led_active_t *pActive)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 regData;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pActive)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicLedSerialModeConfig(pActive,&regData))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_led_OutputEnable_set(rtk_enable_t state)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (state >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicLedOutputEnable(state))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_led_OutputEnable_get(rtk_enable_t *pState)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(pState == NULL)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicLedOutputEnable(pState))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+
+}
+
+static rtk_api_ret_t _rtk_led_serialModePortmask_set(rtk_led_serialOutput_t output, rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 pmask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(output >= SERIAL_LED_END)
+        return RT_ERR_INPUT;
+
+    if(pPortmask == NULL)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtk_switch_portmask_L2P_get(pPortmask, &pmask)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicLedSerialOutput((rtk_uint32)output, pmask))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_led_serialModePortmask_get(rtk_led_serialOutput_t *pOutput, rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 pmask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(pOutput == NULL)
+        return RT_ERR_NULL_POINTER;
+
+    if(pPortmask == NULL)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicLedSerialOutput((rtk_uint32 *)pOutput, &pmask))!=RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtk_switch_portmask_P2L_get(pmask, pPortmask)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+
+/* Function Name:
+ *      rtk_led_enable_set
+ * Description:
+ *      Set Led enable congiuration
+ * Input:
+ *      group       - LED group id.
+ *      pPortmask   - LED enable port mask.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_MASK    - Error portmask
+ * Note:
+ *      The API can be used to enable LED per port per group.
+ */
+rtk_api_ret_t rtk_led_enable_set(rtk_led_group_t group, rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_led_enable_set(group, pPortmask);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_led_enable_get
+ * Description:
+ *      Get Led enable congiuration
+ * Input:
+ *      group - LED group id.
+ * Output:
+ *      pPortmask - LED enable port mask.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can be used to get LED enable status.
+ */
+rtk_api_ret_t rtk_led_enable_get(rtk_led_group_t group, rtk_portmask_t *pPortmask)
+{
+     rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_led_enable_get(group, pPortmask);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_led_operation_set
+ * Description:
+ *      Set Led operation mode
+ * Input:
+ *      mode - LED operation mode.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can set Led operation mode.
+ *      The modes that can be set are as following:
+ *      - LED_OP_SCAN,
+ *      - LED_OP_PARALLEL,
+ *      - LED_OP_SERIAL,
+ */
+rtk_api_ret_t rtk_led_operation_set(rtk_led_operation_t mode)
+{
+     rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_led_operation_set(mode);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_led_operation_get
+ * Description:
+ *      Get Led operation mode
+ * Input:
+ *      None
+ * Output:
+ *      pMode - Support LED operation mode.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can get Led operation mode.
+ *      The modes that can be set are as following:
+ *      - LED_OP_SCAN,
+ *      - LED_OP_PARALLEL,
+ *      - LED_OP_SERIAL,
+ */
+rtk_api_ret_t rtk_led_operation_get(rtk_led_operation_t *pMode)
+{
+     rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_led_operation_get(pMode);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_led_modeForce_set
+ * Description:
+ *      Set Led group to congiuration force mode
+ * Input:
+ *      port    - port ID
+ *      group   - Support LED group id.
+ *      mode    - Support LED force mode.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Error Port ID
+ * Note:
+ *      The API can force to one force mode.
+ *      The force modes that can be set are as following:
+ *      - LED_FORCE_NORMAL,
+ *      - LED_FORCE_BLINK,
+ *      - LED_FORCE_OFF,
+ *      - LED_FORCE_ON.
+ */
+rtk_api_ret_t rtk_led_modeForce_set(rtk_port_t port, rtk_led_group_t group, rtk_led_force_mode_t mode)
+{
+     rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_led_modeForce_set(port, group, mode);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_led_modeForce_get
+ * Description:
+ *      Get Led group to congiuration force mode
+ * Input:
+ *      port  - port ID
+ *      group - Support LED group id.
+ *      pMode - Support LED force mode.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Error Port ID
+ * Note:
+ *      The API can get forced Led group mode.
+ *      The force modes that can be set are as following:
+ *      - LED_FORCE_NORMAL,
+ *      - LED_FORCE_BLINK,
+ *      - LED_FORCE_OFF,
+ *      - LED_FORCE_ON.
+ */
+rtk_api_ret_t rtk_led_modeForce_get(rtk_port_t port, rtk_led_group_t group, rtk_led_force_mode_t *pMode)
+{
+     rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_led_modeForce_get(port, group, pMode);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_led_blinkRate_set
+ * Description:
+ *      Set LED blinking rate
+ * Input:
+ *      blinkRate - blinking rate.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      ASIC support 6 types of LED blinking rates at 43ms, 84ms, 120ms, 170ms, 340ms and 670ms.
+ */
+rtk_api_ret_t rtk_led_blinkRate_set(rtk_led_blink_rate_t blinkRate)
+{
+     rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_led_blinkRate_set(blinkRate);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_led_blinkRate_get
+ * Description:
+ *      Get LED blinking rate at mode 0 to mode 3
+ * Input:
+ *      None
+ * Output:
+ *      pBlinkRate - blinking rate.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      There are  6 types of LED blinking rates at 43ms, 84ms, 120ms, 170ms, 340ms and 670ms.
+ */
+rtk_api_ret_t rtk_led_blinkRate_get(rtk_led_blink_rate_t *pBlinkRate)
+{
+     rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_led_blinkRate_get(pBlinkRate);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_led_groupConfig_set
+ * Description:
+ *      Set per group Led to congiuration mode
+ * Input:
+ *      group   - LED group.
+ *      config  - LED configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can set LED indicated information configuration for each LED group with 1 to 1 led mapping to each port.
+ *      - Definition  LED Statuses      Description
+ *      - 0000        LED_Off           LED pin Tri-State.
+ *      - 0001        Dup/Col           Collision, Full duplex Indicator.
+ *      - 0010        Link/Act          Link, Activity Indicator.
+ *      - 0011        Spd1000           1000Mb/s Speed Indicator.
+ *      - 0100        Spd100            100Mb/s Speed Indicator.
+ *      - 0101        Spd10             10Mb/s Speed Indicator.
+ *      - 0110        Spd1000/Act       1000Mb/s Speed/Activity Indicator.
+ *      - 0111        Spd100/Act        100Mb/s Speed/Activity Indicator.
+ *      - 1000        Spd10/Act         10Mb/s Speed/Activity Indicator.
+ *      - 1001        Spd100 (10)/Act   10/100Mb/s Speed/Activity Indicator.
+ *      - 1010        LoopDetect        LoopDetect Indicator.
+ *      - 1011        EEE               EEE Indicator.
+ *      - 1100        Link/Rx           Link, Activity Indicator.
+ *      - 1101        Link/Tx           Link, Activity Indicator.
+ *      - 1110        Master            Link on Master Indicator.
+ *      - 1111        Act               Activity Indicator. Low for link established.
+ */
+rtk_api_ret_t rtk_led_groupConfig_set(rtk_led_group_t group, rtk_led_congig_t config)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_led_groupConfig_set(group, config);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_led_groupConfig_get
+ * Description:
+ *      Get Led group congiuration mode
+ * Input:
+ *      group - LED group.
+ * Output:
+ *      pConfig - LED configuration.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *       The API can get LED indicated information configuration for each LED group.
+ */
+rtk_api_ret_t rtk_led_groupConfig_get(rtk_led_group_t group, rtk_led_congig_t *pConfig)
+{
+     rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_led_groupConfig_get(group, pConfig);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_led_groupAbility_set
+ * Description:
+ *      Configure per group Led ability
+ * Input:
+ *      group    - LED group.
+ *      pAbility - LED ability
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      None.
+ */
+
+rtk_api_ret_t rtk_led_groupAbility_set(rtk_led_group_t group, rtk_led_ability_t *pAbility)
+{
+     rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_led_groupAbility_set(group, pAbility);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_led_groupAbility_get
+ * Description:
+ *      Get per group Led ability
+ * Input:
+ *      group    - LED group.
+ *      pAbility - LED ability
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      None.
+ */
+
+rtk_api_ret_t rtk_led_groupAbility_get(rtk_led_group_t group, rtk_led_ability_t *pAbility)
+{
+     rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_led_groupAbility_get(group, pAbility);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+
+/* Function Name:
+ *      rtk_led_serialMode_set
+ * Description:
+ *      Set Led serial mode active congiuration
+ * Input:
+ *      active - LED group.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can set LED serial mode active congiuration.
+ */
+rtk_api_ret_t rtk_led_serialMode_set(rtk_led_active_t active)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_led_serialMode_set(active);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_led_serialMode_get
+ * Description:
+ *      Get Led group congiuration mode
+ * Input:
+ *      group - LED group.
+ * Output:
+ *      pConfig - LED configuration.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *       The API can get LED serial mode active configuration.
+ */
+rtk_api_ret_t rtk_led_serialMode_get(rtk_led_active_t *pActive)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_led_serialMode_get(pActive);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_led_OutputEnable_set
+ * Description:
+ *      This API set LED I/O state.
+ * Input:
+ *      enabled     - LED I/O state
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error parameter
+ * Note:
+ *      This API set LED I/O state.
+ */
+rtk_api_ret_t rtk_led_OutputEnable_set(rtk_enable_t state)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_led_OutputEnable_set(state);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_led_OutputEnable_get
+ * Description:
+ *      This API get LED I/O state.
+ * Input:
+ *      None.
+ * Output:
+ *      pEnabled        - LED I/O state
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error parameter
+ * Note:
+ *      This API set current LED I/O  state.
+ */
+rtk_api_ret_t rtk_led_OutputEnable_get(rtk_enable_t *pState)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_led_OutputEnable_get(pState);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_led_serialModePortmask_set
+ * Description:
+ *      This API configure Serial LED output Group and portmask
+ * Input:
+ *      output          - output group
+ *      pPortmask       - output portmask
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error parameter
+ * Note:
+ *      None.
+ */
+rtk_api_ret_t rtk_led_serialModePortmask_set(rtk_led_serialOutput_t output, rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_led_serialModePortmask_set(output, pPortmask);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_led_serialModePortmask_get
+ * Description:
+ *      This API get Serial LED output Group and portmask
+ * Input:
+ *      None.
+ * Output:
+ *      pOutput         - output group
+ *      pPortmask       - output portmask
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error parameter
+ * Note:
+ *      None.
+ */
+rtk_api_ret_t rtk_led_serialModePortmask_get(rtk_led_serialOutput_t *pOutput, rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_led_serialModePortmask_get(pOutput, pPortmask);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/led.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/led.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/led.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/led.h	2019-01-22 16:16:24.707257309 +0100
@@ -0,0 +1,483 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * Purpose : RTL8367/RTL8367C switch high-level API
+ *
+ * Feature : The file includes LED module high-layer API defination
+ *
+ */
+
+#ifndef __RTK_API_LED_H__
+#define __RTK_API_LED_H__
+
+typedef enum rtk_led_operation_e
+{
+    LED_OP_SCAN=0,
+    LED_OP_PARALLEL,
+    LED_OP_SERIAL,
+    LED_OP_END,
+}rtk_led_operation_t;
+
+
+typedef enum rtk_led_active_e
+{
+    LED_ACTIVE_HIGH=0,
+    LED_ACTIVE_LOW,
+    LED_ACTIVE_END,
+}rtk_led_active_t;
+
+typedef enum rtk_led_config_e
+{
+    LED_CONFIG_LEDOFF=0,
+    LED_CONFIG_DUPCOL,
+    LED_CONFIG_LINK_ACT,
+    LED_CONFIG_SPD1000,
+    LED_CONFIG_SPD100,
+    LED_CONFIG_SPD10,
+    LED_CONFIG_SPD1000ACT,
+    LED_CONFIG_SPD100ACT,
+    LED_CONFIG_SPD10ACT,
+    LED_CONFIG_SPD10010ACT,
+    LED_CONFIG_LOOPDETECT,
+    LED_CONFIG_EEE,
+    LED_CONFIG_LINKRX,
+    LED_CONFIG_LINKTX,
+    LED_CONFIG_MASTER,
+    LED_CONFIG_ACT,
+    LED_CONFIG_END,
+}rtk_led_congig_t;
+
+typedef struct rtk_led_ability_s
+{
+    rtk_enable_t link_10m;
+    rtk_enable_t link_100m;
+    rtk_enable_t link_500m;
+    rtk_enable_t link_1000m;
+    rtk_enable_t act_rx;
+    rtk_enable_t act_tx;
+}rtk_led_ability_t;
+
+typedef enum rtk_led_blink_rate_e
+{
+    LED_BLINKRATE_32MS=0,
+    LED_BLINKRATE_64MS,
+    LED_BLINKRATE_128MS,
+    LED_BLINKRATE_256MS,
+    LED_BLINKRATE_512MS,
+    LED_BLINKRATE_1024MS,
+    LED_BLINKRATE_48MS,
+    LED_BLINKRATE_96MS,
+    LED_BLINKRATE_END,
+}rtk_led_blink_rate_t;
+
+typedef enum rtk_led_group_e
+{
+    LED_GROUP_0 = 0,
+    LED_GROUP_1,
+    LED_GROUP_2,
+    LED_GROUP_END
+}rtk_led_group_t;
+
+
+typedef enum rtk_led_force_mode_e
+{
+    LED_FORCE_NORMAL=0,
+    LED_FORCE_BLINK,
+    LED_FORCE_OFF,
+    LED_FORCE_ON,
+    LED_FORCE_END
+}rtk_led_force_mode_t;
+
+typedef enum rtk_led_serialOutput_e
+{
+    SERIAL_LED_NONE = 0,
+    SERIAL_LED_0,
+    SERIAL_LED_0_1,
+    SERIAL_LED_0_2,
+    SERIAL_LED_END,
+}rtk_led_serialOutput_t;
+
+
+/* Function Name:
+ *      rtk_led_enable_set
+ * Description:
+ *      Set Led enable congiuration
+ * Input:
+ *      group       - LED group id.
+ *      pPortmask    - LED enable port mask.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can be used to enable LED per port per group.
+ */
+extern rtk_api_ret_t rtk_led_enable_set(rtk_led_group_t group, rtk_portmask_t *pPortmask);
+
+/* Function Name:
+ *      rtk_led_enable_get
+ * Description:
+ *      Get Led enable congiuration
+ * Input:
+ *      group - LED group id.
+ * Output:
+ *      pPortmask - LED enable port mask.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can be used to get LED enable status.
+ */
+extern rtk_api_ret_t rtk_led_enable_get(rtk_led_group_t group, rtk_portmask_t *pPortmask);
+
+/* Function Name:
+ *      rtk_led_operation_set
+ * Description:
+ *      Set Led operation mode
+ * Input:
+ *      mode - LED operation mode.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can set Led operation mode.
+ *      The modes that can be set are as following:
+ *      - LED_OP_SCAN,
+ *      - LED_OP_PARALLEL,
+ *      - LED_OP_SERIAL,
+ */
+extern rtk_api_ret_t rtk_led_operation_set(rtk_led_operation_t mode);
+
+/* Function Name:
+ *      rtk_led_operation_get
+ * Description:
+ *      Get Led operation mode
+ * Input:
+ *      None
+ * Output:
+ *      pMode - Support LED operation mode.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can get Led operation mode.
+ *      The modes that can be set are as following:
+ *      - LED_OP_SCAN,
+ *      - LED_OP_PARALLEL,
+ *      - LED_OP_SERIAL,
+ */
+extern rtk_api_ret_t rtk_led_operation_get(rtk_led_operation_t *pMode);
+
+/* Function Name:
+ *      rtk_led_modeForce_set
+ * Description:
+ *      Set Led group to congiuration force mode
+ * Input:
+ *      port    - port ID
+ *      group   - Support LED group id.
+ *      mode    - Support LED force mode.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Error Port ID
+ * Note:
+ *      The API can force to one force mode.
+ *      The force modes that can be set are as following:
+ *      - LED_FORCE_NORMAL,
+ *      - LED_FORCE_BLINK,
+ *      - LED_FORCE_OFF,
+ *      - LED_FORCE_ON.
+ */
+extern rtk_api_ret_t rtk_led_modeForce_set(rtk_port_t port, rtk_led_group_t group, rtk_led_force_mode_t mode);
+
+/* Function Name:
+ *      rtk_led_modeForce_get
+ * Description:
+ *      Get Led group to congiuration force mode
+ * Input:
+ *      port  - port ID
+ *      group - Support LED group id.
+ *      pMode - Support LED force mode.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Error Port ID
+ * Note:
+ *      The API can get forced Led group mode.
+ *      The force modes that can be set are as following:
+ *      - LED_FORCE_NORMAL,
+ *      - LED_FORCE_BLINK,
+ *      - LED_FORCE_OFF,
+ *      - LED_FORCE_ON.
+ */
+extern rtk_api_ret_t rtk_led_modeForce_get(rtk_port_t port, rtk_led_group_t group, rtk_led_force_mode_t *pMode);
+
+/* Function Name:
+ *      rtk_led_blinkRate_set
+ * Description:
+ *      Set LED blinking rate
+ * Input:
+ *      blinkRate - blinking rate.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      ASIC support 6 types of LED blinking rates at 43ms, 84ms, 120ms, 170ms, 340ms and 670ms.
+ */
+extern rtk_api_ret_t rtk_led_blinkRate_set(rtk_led_blink_rate_t blinkRate);
+
+/* Function Name:
+ *      rtk_led_blinkRate_get
+ * Description:
+ *      Get LED blinking rate at mode 0 to mode 3
+ * Input:
+ *      None
+ * Output:
+ *      pBlinkRate - blinking rate.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      There are  6 types of LED blinking rates at 43ms, 84ms, 120ms, 170ms, 340ms and 670ms.
+ */
+extern rtk_api_ret_t rtk_led_blinkRate_get(rtk_led_blink_rate_t *pBlinkRate);
+
+/* Function Name:
+ *      rtk_led_groupConfig_set
+ * Description:
+ *      Set per group Led to congiuration mode
+ * Input:
+ *      group   - LED group.
+ *      config  - LED configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can set LED indicated information configuration for each LED group with 1 to 1 led mapping to each port.
+ *      - Definition  LED Statuses      Description
+ *      - 0000        LED_Off           LED pin Tri-State.
+ *      - 0001        Dup/Col           Collision, Full duplex Indicator.
+ *      - 0010        Link/Act          Link, Activity Indicator.
+ *      - 0011        Spd1000           1000Mb/s Speed Indicator.
+ *      - 0100        Spd100            100Mb/s Speed Indicator.
+ *      - 0101        Spd10             10Mb/s Speed Indicator.
+ *      - 0110        Spd1000/Act       1000Mb/s Speed/Activity Indicator.
+ *      - 0111        Spd100/Act        100Mb/s Speed/Activity Indicator.
+ *      - 1000        Spd10/Act         10Mb/s Speed/Activity Indicator.
+ *      - 1001        Spd100 (10)/Act   10/100Mb/s Speed/Activity Indicator.
+ *      - 1010        LoopDetect        LoopDetect Indicator.
+ *      - 1011        EEE               EEE Indicator.
+ *      - 1100        Link/Rx           Link, Activity Indicator.
+ *      - 1101        Link/Tx           Link, Activity Indicator.
+ *      - 1110        Master            Link on Master Indicator.
+ *      - 1111        Act               Activity Indicator. Low for link established.
+ */
+extern rtk_api_ret_t rtk_led_groupConfig_set(rtk_led_group_t group, rtk_led_congig_t config);
+
+/* Function Name:
+ *      rtk_led_groupConfig_get
+ * Description:
+ *      Get Led group congiuration mode
+ * Input:
+ *      group - LED group.
+ * Output:
+ *      pConfig - LED configuration.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *       The API can get LED indicated information configuration for each LED group.
+ */
+extern rtk_api_ret_t rtk_led_groupConfig_get(rtk_led_group_t group, rtk_led_congig_t *pConfig);
+
+/* Function Name:
+ *      rtk_led_groupAbility_set
+ * Description:
+ *      Configure per group Led ability
+ * Input:
+ *      group    - LED group.
+ *      pAbility - LED ability
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      None.
+ */
+
+extern rtk_api_ret_t rtk_led_groupAbility_set(rtk_led_group_t group, rtk_led_ability_t *pAbility);
+
+/* Function Name:
+ *      rtk_led_groupAbility_get
+ * Description:
+ *      Get per group Led ability
+ * Input:
+ *      group    - LED group.
+ *      pAbility - LED ability
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      None.
+ */
+
+extern rtk_api_ret_t rtk_led_groupAbility_get(rtk_led_group_t group, rtk_led_ability_t *pAbility);
+
+/* Function Name:
+ *      rtk_led_serialMode_set
+ * Description:
+ *      Set Led serial mode active congiuration
+ * Input:
+ *      active - LED group.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can set LED serial mode active congiuration.
+ */
+extern rtk_api_ret_t rtk_led_serialMode_set(rtk_led_active_t active);
+
+/* Function Name:
+ *      rtk_led_serialMode_get
+ * Description:
+ *      Get Led group congiuration mode
+ * Input:
+ *      group - LED group.
+ * Output:
+ *      pConfig - LED configuration.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *       The API can get LED serial mode active configuration.
+ */
+extern rtk_api_ret_t rtk_led_serialMode_get(rtk_led_active_t *pActive);
+
+/* Function Name:
+ *      rtk_led_OutputEnable_set
+ * Description:
+ *      This API set LED I/O state.
+ * Input:
+ *      enabled     - LED I/O state
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error parameter
+ * Note:
+ *      This API set LED I/O state.
+ */
+extern rtk_api_ret_t rtk_led_OutputEnable_set(rtk_enable_t state);
+
+
+/* Function Name:
+ *      rtk_led_OutputEnable_get
+ * Description:
+ *      This API get LED I/O state.
+ * Input:
+ *      None.
+ * Output:
+ *      pEnabled        - LED I/O state
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error parameter
+ * Note:
+ *      This API set current LED I/O  state.
+ */
+extern rtk_api_ret_t rtk_led_OutputEnable_get(rtk_enable_t *pState);
+
+/* Function Name:
+ *      rtk_led_serialModePortmask_set
+ * Description:
+ *      This API configure Serial LED output Group and portmask
+ * Input:
+ *      output          - output group
+ *      pPortmask       - output portmask
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error parameter
+ * Note:
+ *      None.
+ */
+extern rtk_api_ret_t rtk_led_serialModePortmask_set(rtk_led_serialOutput_t output, rtk_portmask_t *pPortmask);
+
+/* Function Name:
+ *      rtk_led_serialModePortmask_get
+ * Description:
+ *      This API get Serial LED output Group and portmask
+ * Input:
+ *      None.
+ * Output:
+ *      pOutput         - output group
+ *      pPortmask       - output portmask
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error parameter
+ * Note:
+ *      None.
+ */
+extern rtk_api_ret_t rtk_led_serialModePortmask_get(rtk_led_serialOutput_t *pOutput, rtk_portmask_t *pPortmask);
+
+#endif /* __RTK_API_LED_H__ */
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/mirror.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/mirror.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/mirror.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/mirror.c	2019-01-22 16:16:24.707257309 +0100
@@ -0,0 +1,682 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 79629 $
+ * $Date: 2017-06-14 18:08:03 +0800 (é€±ä¸‰, 14 å…­æœˆ 2017) $
+ *
+ * Purpose : RTK switch high-level API for RTL8367/RTL8367C
+ * Feature : Here is a list of all functions and variables in Mirror module.
+ *
+ */
+
+#include <rtk_switch.h>
+#include <rtk_error.h>
+#include <mirror.h>
+#include <string.h>
+#include <rtl8367c_asicdrv.h>
+#include <rtl8367c_asicdrv_mirror.h>
+
+static rtk_api_ret_t _rtk_mirror_portBased_set(rtk_port_t mirroring_port, rtk_portmask_t *pMirrored_rx_portmask, rtk_portmask_t *pMirrored_tx_portmask)
+{
+    rtk_api_ret_t retVal;
+    rtk_enable_t mirRx, mirTx;
+    rtk_uint32 i, pmask;
+    rtk_port_t source_port;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check port valid */
+    RTK_CHK_PORT_VALID(mirroring_port);
+
+    if(NULL == pMirrored_rx_portmask)
+        return RT_ERR_NULL_POINTER;
+
+    if(NULL == pMirrored_tx_portmask)
+        return RT_ERR_NULL_POINTER;
+
+    RTK_CHK_PORTMASK_VALID(pMirrored_rx_portmask);
+
+    RTK_CHK_PORTMASK_VALID(pMirrored_tx_portmask);
+
+    /*Mirror Sorce Port Mask Check*/
+    if (pMirrored_tx_portmask->bits[0]!=pMirrored_rx_portmask->bits[0]&&pMirrored_tx_portmask->bits[0]!=0&&pMirrored_rx_portmask->bits[0]!=0)
+        return RT_ERR_PORT_MASK;
+
+     /*mirror port != source port*/
+    if(RTK_PORTMASK_IS_PORT_SET((*pMirrored_tx_portmask), mirroring_port) || RTK_PORTMASK_IS_PORT_SET((*pMirrored_rx_portmask), mirroring_port))
+        return RT_ERR_PORT_MASK;
+
+    source_port = rtk_switch_maxLogicalPort_get();
+
+    RTK_SCAN_ALL_LOG_PORT(i)
+    {
+        if (pMirrored_tx_portmask->bits[0]&(1<<i))
+        {
+            source_port = i;
+            break;
+        }
+
+        if (pMirrored_rx_portmask->bits[0]&(1<<i))
+        {
+            source_port = i;
+            break;
+        }
+    }
+
+    if ((retVal = rtl8367c_setAsicPortMirror(rtk_switch_port_L2P_get(source_port), rtk_switch_port_L2P_get(mirroring_port))) != RT_ERR_OK)
+        return retVal;
+    if(pMirrored_rx_portmask->bits[0] != 0)
+    {
+        if ((retVal = rtk_switch_portmask_L2P_get(pMirrored_rx_portmask, &pmask)) != RT_ERR_OK)
+            return retVal;
+        if ((retVal = rtl8367c_setAsicPortMirrorMask(pmask)) != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        if ((retVal = rtk_switch_portmask_L2P_get(pMirrored_tx_portmask, &pmask)) != RT_ERR_OK)
+            return retVal;
+        if ((retVal = rtl8367c_setAsicPortMirrorMask(pmask)) != RT_ERR_OK)
+            return retVal;
+    }
+
+
+    if (pMirrored_rx_portmask->bits[0])
+        mirRx = ENABLED;
+    else
+        mirRx = DISABLED;
+
+    if ((retVal = rtl8367c_setAsicPortMirrorRxFunction(mirRx)) != RT_ERR_OK)
+        return retVal;
+
+    if (pMirrored_tx_portmask->bits[0])
+        mirTx = ENABLED;
+    else
+        mirTx = DISABLED;
+
+    if ((retVal = rtl8367c_setAsicPortMirrorTxFunction(mirTx)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+
+}
+
+static rtk_api_ret_t _rtk_mirror_portBased_get(rtk_port_t *pMirroring_port, rtk_portmask_t *pMirrored_rx_portmask, rtk_portmask_t *pMirrored_tx_portmask)
+{
+    rtk_api_ret_t retVal;
+    rtk_port_t source_port;
+    rtk_enable_t mirRx, mirTx;
+    rtk_uint32 sport, mport, pmask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pMirrored_rx_portmask)
+        return RT_ERR_NULL_POINTER;
+
+    if(NULL == pMirrored_tx_portmask)
+        return RT_ERR_NULL_POINTER;
+
+    if(NULL == pMirroring_port)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicPortMirror(&sport, &mport)) != RT_ERR_OK)
+        return retVal;
+    source_port = rtk_switch_port_P2L_get(sport);
+    *pMirroring_port = rtk_switch_port_P2L_get(mport);
+
+    if ((retVal = rtl8367c_getAsicPortMirrorRxFunction((rtk_uint32*)&mirRx)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_getAsicPortMirrorTxFunction((rtk_uint32*)&mirTx)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_getAsicPortMirrorMask(&pmask)) != RT_ERR_OK)
+        return retVal;
+
+    if (DISABLED == mirRx)
+        pMirrored_rx_portmask->bits[0]=0;
+    else
+    {
+        if ((retVal = rtk_switch_portmask_P2L_get(pmask, pMirrored_rx_portmask)) != RT_ERR_OK)
+            return retVal;
+        pMirrored_rx_portmask->bits[0] |= 1<<source_port;
+    }
+
+     if (DISABLED == mirTx)
+        pMirrored_tx_portmask->bits[0]=0;
+    else
+    {
+        if ((retVal = rtk_switch_portmask_P2L_get(pmask, pMirrored_tx_portmask)) != RT_ERR_OK)
+            return retVal;
+        pMirrored_tx_portmask->bits[0] |= 1<<source_port;
+    }
+
+    return RT_ERR_OK;
+
+}
+
+static rtk_api_ret_t _rtk_mirror_portIso_set(rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (enable >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    if ((retVal = rtl8367c_setAsicPortMirrorIsolation(enable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_mirror_portIso_get(rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pEnable)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicPortMirrorIsolation(pEnable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_mirror_vlanLeaky_set(rtk_enable_t txenable, rtk_enable_t rxenable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if ((txenable >= RTK_ENABLE_END) ||(rxenable >= RTK_ENABLE_END))
+        return RT_ERR_ENABLE;
+
+    if ((retVal = rtl8367c_setAsicPortMirrorVlanTxLeaky(txenable)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicPortMirrorVlanRxLeaky(rxenable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_mirror_vlanLeaky_get(rtk_enable_t *pTxenable, rtk_enable_t *pRxenable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if( (NULL == pTxenable) || (NULL == pRxenable) )
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicPortMirrorVlanTxLeaky(pTxenable)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_getAsicPortMirrorVlanRxLeaky(pRxenable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_mirror_isolationLeaky_set(rtk_enable_t txenable, rtk_enable_t rxenable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if ((txenable >= RTK_ENABLE_END) ||(rxenable >= RTK_ENABLE_END))
+        return RT_ERR_ENABLE;
+
+    if ((retVal = rtl8367c_setAsicPortMirrorIsolationTxLeaky(txenable)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicPortMirrorIsolationRxLeaky(rxenable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_mirror_isolationLeaky_get(rtk_enable_t *pTxenable, rtk_enable_t *pRxenable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if( (NULL == pTxenable) || (NULL == pRxenable) )
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicPortMirrorIsolationTxLeaky(pTxenable)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_getAsicPortMirrorIsolationRxLeaky(pRxenable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_mirror_keep_set(rtk_mirror_keep_t mode)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (mode >= MIRROR_KEEP_END)
+        return RT_ERR_ENABLE;
+
+    if ((retVal = rtl8367c_setAsicPortMirrorRealKeep(mode)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_mirror_keep_get(rtk_mirror_keep_t *pMode)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pMode)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicPortMirrorRealKeep(pMode)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_mirror_override_set(rtk_enable_t rxMirror, rtk_enable_t txMirror, rtk_enable_t aclMirror)
+{
+    rtk_api_ret_t retVal;
+
+    if( (rxMirror >= RTK_ENABLE_END) || (txMirror >= RTK_ENABLE_END) || (aclMirror >= RTK_ENABLE_END))
+        return RT_ERR_ENABLE;
+
+    if ((retVal = rtl8367c_setAsicPortMirrorOverride((rtk_uint32)rxMirror, (rtk_uint32)txMirror, (rtk_uint32)aclMirror)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_mirror_override_get(rtk_enable_t *pRxMirror, rtk_enable_t *pTxMirror, rtk_enable_t *pAclMirror)
+{
+    rtk_api_ret_t retVal;
+
+    if( (pRxMirror == NULL) || (pTxMirror == NULL) || (pAclMirror == NULL))
+        return RT_ERR_ENABLE;
+
+    if ((retVal = rtl8367c_getAsicPortMirrorOverride((rtk_uint32 *)pRxMirror, (rtk_uint32 *)pTxMirror, (rtk_uint32 *)pAclMirror)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtk_mirror_portBased_set
+ * Description:
+ *      Set port mirror function.
+ * Input:
+ *      mirroring_port          - Monitor port.
+ *      pMirrored_rx_portmask   - Rx mirror port mask.
+ *      pMirrored_tx_portmask   - Tx mirror port mask.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ *      RT_ERR_PORT_MASK    - Invalid portmask.
+ * Note:
+ *      The API is to set mirror function of source port and mirror port.
+ *      The mirror port can only be set to one port and the TX and RX mirror ports
+ *      should be identical.
+ */
+rtk_api_ret_t rtk_mirror_portBased_set(rtk_port_t mirroring_port, rtk_portmask_t *pMirrored_rx_portmask, rtk_portmask_t *pMirrored_tx_portmask)
+{
+     rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_mirror_portBased_set(mirroring_port, pMirrored_rx_portmask, pMirrored_tx_portmask);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_mirror_portBased_get
+ * Description:
+ *      Get port mirror function.
+ * Input:
+ *      None
+ * Output:
+ *      pMirroring_port         - Monitor port.
+ *      pMirrored_rx_portmask   - Rx mirror port mask.
+ *      pMirrored_tx_portmask   - Tx mirror port mask.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API is to get mirror function of source port and mirror port.
+ */
+rtk_api_ret_t rtk_mirror_portBased_get(rtk_port_t *pMirroring_port, rtk_portmask_t *pMirrored_rx_portmask, rtk_portmask_t *pMirrored_tx_portmask)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_mirror_portBased_get(pMirroring_port, pMirrored_rx_portmask, pMirrored_tx_portmask);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_mirror_portIso_set
+ * Description:
+ *      Set mirror port isolation.
+ * Input:
+ *      enable |Mirror isolation status.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_ENABLE       - Invalid enable input
+ * Note:
+ *      The API is to set mirror isolation function that prevent normal forwarding packets to miror port.
+ */
+rtk_api_ret_t rtk_mirror_portIso_set(rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_mirror_portIso_set(enable);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_mirror_portIso_get
+ * Description:
+ *      Get mirror port isolation.
+ * Input:
+ *      None
+ * Output:
+ *      pEnable |Mirror isolation status.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API is to get mirror isolation status.
+ */
+rtk_api_ret_t rtk_mirror_portIso_get(rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_mirror_portIso_get(pEnable);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_mirror_vlanLeaky_set
+ * Description:
+ *      Set mirror VLAN leaky.
+ * Input:
+ *      txenable -TX leaky enable.
+ *      rxenable - RX leaky enable.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_ENABLE       - Invalid enable input
+ * Note:
+ *      The API is to set mirror VLAN leaky function forwarding packets to miror port.
+ */
+rtk_api_ret_t rtk_mirror_vlanLeaky_set(rtk_enable_t txenable, rtk_enable_t rxenable)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_mirror_vlanLeaky_set(txenable, rxenable);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_mirror_vlanLeaky_get
+ * Description:
+ *      Get mirror VLAN leaky.
+ * Input:
+ *      None
+ * Output:
+ *      pTxenable - TX leaky enable.
+ *      pRxenable - RX leaky enable.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API is to get mirror VLAN leaky status.
+ */
+rtk_api_ret_t rtk_mirror_vlanLeaky_get(rtk_enable_t *pTxenable, rtk_enable_t *pRxenable)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_mirror_vlanLeaky_get(pTxenable, pRxenable);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_mirror_isolationLeaky_set
+ * Description:
+ *      Set mirror Isolation leaky.
+ * Input:
+ *      txenable -TX leaky enable.
+ *      rxenable - RX leaky enable.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_ENABLE       - Invalid enable input
+ * Note:
+ *      The API is to set mirror VLAN leaky function forwarding packets to miror port.
+ */
+rtk_api_ret_t rtk_mirror_isolationLeaky_set(rtk_enable_t txenable, rtk_enable_t rxenable)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_mirror_isolationLeaky_set(txenable, rxenable);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_mirror_isolationLeaky_get
+ * Description:
+ *      Get mirror isolation leaky.
+ * Input:
+ *      None
+ * Output:
+ *      pTxenable - TX leaky enable.
+ *      pRxenable - RX leaky enable.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API is to get mirror isolation leaky status.
+ */
+rtk_api_ret_t rtk_mirror_isolationLeaky_get(rtk_enable_t *pTxenable, rtk_enable_t *pRxenable)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_mirror_isolationLeaky_get(pTxenable, pRxenable);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_mirror_keep_set
+ * Description:
+ *      Set mirror packet format keep.
+ * Input:
+ *      mode - -mirror keep mode.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_ENABLE       - Invalid enable input
+ * Note:
+ *      The API is to set  -mirror keep mode.
+ *      The mirror keep mode is as following:
+ *      - MIRROR_FOLLOW_VLAN
+ *      - MIRROR_KEEP_ORIGINAL
+ *      - MIRROR_KEEP_END
+ */
+rtk_api_ret_t rtk_mirror_keep_set(rtk_mirror_keep_t mode)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_mirror_keep_set(mode);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_mirror_keep_get
+ * Description:
+ *      Get mirror packet format keep.
+ * Input:
+ *      None
+ * Output:
+ *      pMode -mirror keep mode.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API is to get mirror keep mode.
+  *      The mirror keep mode is as following:
+ *      - MIRROR_FOLLOW_VLAN
+ *      - MIRROR_KEEP_ORIGINAL
+ *      - MIRROR_KEEP_END
+ */
+rtk_api_ret_t rtk_mirror_keep_get(rtk_mirror_keep_t *pMode)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_mirror_keep_get(pMode);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_mirror_override_set
+ * Description:
+ *      Set port mirror override function.
+ * Input:
+ *      rxMirror        - 1: output mirrored packet, 0: output normal forward packet
+ *      txMirror        - 1: output mirrored packet, 0: output normal forward packet
+ *      aclMirror       - 1: output mirrored packet, 0: output normal forward packet
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      The API is to set mirror override function.
+ *      This function control the output format when a port output
+ *      normal forward & mirrored packet at the same time.
+ */
+rtk_api_ret_t rtk_mirror_override_set(rtk_enable_t rxMirror, rtk_enable_t txMirror, rtk_enable_t aclMirror)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_mirror_override_set(rxMirror, txMirror, aclMirror);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_mirror_override_get
+ * Description:
+ *      Get port mirror override function.
+ * Input:
+ *      None
+ * Output:
+ *      pRxMirror       - 1: output mirrored packet, 0: output normal forward packet
+ *      pTxMirror       - 1: output mirrored packet, 0: output normal forward packet
+ *      pAclMirror      - 1: output mirrored packet, 0: output normal forward packet
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_NULL_POINTER - Null Pointer
+ * Note:
+ *      The API is to Get mirror override function.
+ *      This function control the output format when a port output
+ *      normal forward & mirrored packet at the same time.
+ */
+rtk_api_ret_t rtk_mirror_override_get(rtk_enable_t *pRxMirror, rtk_enable_t *pTxMirror, rtk_enable_t *pAclMirror)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_mirror_override_get(pRxMirror, pTxMirror, pAclMirror);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/mirror.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/mirror.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/mirror.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/mirror.h	2019-01-22 16:16:24.707257309 +0100
@@ -0,0 +1,274 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * Purpose : RTL8367/RTL8367C switch high-level API
+ *
+ * Feature : The file includes Mirror module high-layer API defination
+ *
+ */
+
+#ifndef __RTK_API_MIRROR_H__
+#define __RTK_API_MIRROR_H__
+
+typedef enum rtk_mirror_keep_e
+{
+    MIRROR_FOLLOW_VLAN = 0,
+    MIRROR_KEEP_ORIGINAL,
+    MIRROR_KEEP_END
+}rtk_mirror_keep_t;
+
+
+/* Function Name:
+ *      rtk_mirror_portBased_set
+ * Description:
+ *      Set port mirror function.
+ * Input:
+ *      mirroring_port          - Monitor port.
+ *      pMirrored_rx_portmask   - Rx mirror port mask.
+ *      pMirrored_tx_portmask   - Tx mirror port mask.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ *      RT_ERR_PORT_MASK    - Invalid portmask.
+ * Note:
+ *      The API is to set mirror function of source port and mirror port.
+ *      The mirror port can only be set to one port and the TX and RX mirror ports
+ *      should be identical.
+ */
+extern rtk_api_ret_t rtk_mirror_portBased_set(rtk_port_t mirroring_port, rtk_portmask_t *pMirrored_rx_portmask, rtk_portmask_t *pMirrored_tx_portmask);
+
+/* Function Name:
+ *      rtk_mirror_portBased_get
+ * Description:
+ *      Get port mirror function.
+ * Input:
+ *      None
+ * Output:
+ *      pMirroring_port         - Monitor port.
+ *      pMirrored_rx_portmask   - Rx mirror port mask.
+ *      pMirrored_tx_portmask   - Tx mirror port mask.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API is to get mirror function of source port and mirror port.
+ */
+extern rtk_api_ret_t rtk_mirror_portBased_get(rtk_port_t* pMirroring_port, rtk_portmask_t *pMirrored_rx_portmask, rtk_portmask_t *pMirrored_tx_portmask);
+
+/* Function Name:
+ *      rtk_mirror_portIso_set
+ * Description:
+ *      Set mirror port isolation.
+ * Input:
+ *      enable |Mirror isolation status.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_ENABLE       - Invalid enable input
+ * Note:
+ *      The API is to set mirror isolation function that prevent normal forwarding packets to miror port.
+ */
+extern rtk_api_ret_t rtk_mirror_portIso_set(rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_mirror_portIso_get
+ * Description:
+ *      Get mirror port isolation.
+ * Input:
+ *      None
+ * Output:
+ *      pEnable |Mirror isolation status.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API is to get mirror isolation status.
+ */
+extern rtk_api_ret_t rtk_mirror_portIso_get(rtk_enable_t *pEnable);
+
+/* Function Name:
+ *      rtk_mirror_vlanLeaky_set
+ * Description:
+ *      Set mirror VLAN leaky.
+ * Input:
+ *      txenable -TX leaky enable.
+ *      rxenable - RX leaky enable.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_ENABLE       - Invalid enable input
+ * Note:
+ *      The API is to set mirror VLAN leaky function forwarding packets to miror port.
+ */
+extern rtk_api_ret_t rtk_mirror_vlanLeaky_set(rtk_enable_t txenable, rtk_enable_t rxenable);
+
+
+/* Function Name:
+ *      rtk_mirror_vlanLeaky_get
+ * Description:
+ *      Get mirror VLAN leaky.
+ * Input:
+ *      None
+ * Output:
+ *      pTxenable - TX leaky enable.
+ *      pRxenable - RX leaky enable.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API is to get mirror VLAN leaky status.
+ */
+extern rtk_api_ret_t rtk_mirror_vlanLeaky_get(rtk_enable_t *pTxenable, rtk_enable_t *pRxenable);
+
+/* Function Name:
+ *      rtk_mirror_isolationLeaky_set
+ * Description:
+ *      Set mirror Isolation leaky.
+ * Input:
+ *      txenable -TX leaky enable.
+ *      rxenable - RX leaky enable.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_ENABLE       - Invalid enable input
+ * Note:
+ *      The API is to set mirror VLAN leaky function forwarding packets to miror port.
+ */
+extern rtk_api_ret_t rtk_mirror_isolationLeaky_set(rtk_enable_t txenable, rtk_enable_t rxenable);
+
+/* Function Name:
+ *      rtk_mirror_isolationLeaky_get
+ * Description:
+ *      Get mirror isolation leaky.
+ * Input:
+ *      None
+ * Output:
+ *      pTxenable - TX leaky enable.
+ *      pRxenable - RX leaky enable.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API is to get mirror isolation leaky status.
+ */
+extern rtk_api_ret_t rtk_mirror_isolationLeaky_get(rtk_enable_t *pTxenable, rtk_enable_t *pRxenable);
+
+/* Function Name:
+ *      rtk_mirror_keep_set
+ * Description:
+ *      Set mirror packet format keep.
+ * Input:
+ *      mode - -mirror keep mode.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_ENABLE       - Invalid enable input
+ * Note:
+ *      The API is to set  -mirror keep mode.
+ *      The mirror keep mode is as following:
+ *      - MIRROR_FOLLOW_VLAN
+ *      - MIRROR_KEEP_ORIGINAL
+ *      - MIRROR_KEEP_END
+ */
+extern rtk_api_ret_t rtk_mirror_keep_set(rtk_mirror_keep_t mode);
+
+
+/* Function Name:
+ *      rtk_mirror_keep_get
+ * Description:
+ *      Get mirror packet format keep.
+ * Input:
+ *      None
+ * Output:
+ *      pMode -mirror keep mode.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API is to get mirror keep mode.
+  *      The mirror keep mode is as following:
+ *      - MIRROR_FOLLOW_VLAN
+ *      - MIRROR_KEEP_ORIGINAL
+ *      - MIRROR_KEEP_END
+ */
+extern rtk_api_ret_t rtk_mirror_keep_get(rtk_mirror_keep_t *pMode);
+
+/* Function Name:
+ *      rtk_mirror_override_set
+ * Description:
+ *      Set port mirror override function.
+ * Input:
+ *      rxMirror        - 1: output mirrored packet, 0: output normal forward packet
+ *      txMirror        - 1: output mirrored packet, 0: output normal forward packet
+ *      aclMirror       - 1: output mirrored packet, 0: output normal forward packet
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      The API is to set mirror override function.
+ *      This function control the output format when a port output
+ *      normal forward & mirrored packet at the same time.
+ */
+extern rtk_api_ret_t rtk_mirror_override_set(rtk_enable_t rxMirror, rtk_enable_t txMirror, rtk_enable_t aclMirror);
+
+/* Function Name:
+ *      rtk_mirror_override_get
+ * Description:
+ *      Get port mirror override function.
+ * Input:
+ *      None
+ * Output:
+ *      pRxMirror       - 1: output mirrored packet, 0: output normal forward packet
+ *      pTxMirror       - 1: output mirrored packet, 0: output normal forward packet
+ *      pAclMirror      - 1: output mirrored packet, 0: output normal forward packet
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_NULL_POINTER - Null Pointer
+ * Note:
+ *      The API is to Get mirror override function.
+ *      This function control the output format when a port output
+ *      normal forward & mirrored packet at the same time.
+ */
+extern rtk_api_ret_t rtk_mirror_override_get(rtk_enable_t *pRxMirror, rtk_enable_t *pTxMirror, rtk_enable_t *pAclMirror);
+
+#endif /* __RTK_API_MIRROR_H__ */
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/oam.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/oam.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/oam.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/oam.c	2019-01-22 16:16:24.707257309 +0100
@@ -0,0 +1,324 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 79629 $
+ * $Date: 2017-06-14 18:08:03 +0800 (é€±ä¸‰, 14 å…­æœˆ 2017) $
+ *
+ * Purpose : RTK switch high-level API for RTL8367/RTL8367C
+ * Feature : Here is a list of all functions and variables in OAM(802.3ah)  module.
+ *
+ */
+
+#include <rtk_switch.h>
+#include <rtk_error.h>
+#include <oam.h>
+#include <string.h>
+
+#include <rtl8367c_asicdrv.h>
+#include <rtl8367c_asicdrv_oam.h>
+
+
+/* Module Name : OAM */
+
+static rtk_api_ret_t _rtk_oam_init(void)
+{
+    return RT_ERR_OK;
+} /* end of rtk_oam_init */
+
+static rtk_api_ret_t _rtk_oam_state_set(rtk_enable_t enabled)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (enabled >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicOamEnable(enabled))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_oam_state_get(rtk_enable_t *pEnabled)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if ((retVal = rtl8367c_getAsicOamEnable(pEnabled))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_oam_parserAction_set(rtk_port_t port, rtk_oam_parser_act_t action)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (action >= OAM_PARSER_ACTION_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicOamParser(rtk_switch_port_L2P_get(port), action))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_oam_parserAction_get(rtk_port_t port, rtk_oam_parser_act_t *pAction)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if ((retVal = rtl8367c_getAsicOamParser(rtk_switch_port_L2P_get(port), pAction))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_oam_multiplexerAction_set(rtk_port_t port, rtk_oam_multiplexer_act_t action)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (action >= OAM_MULTIPLEXER_ACTION_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicOamMultiplexer(rtk_switch_port_L2P_get(port), action))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_oam_multiplexerAction_get(rtk_port_t port, rtk_oam_multiplexer_act_t *pAction)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if ((retVal = rtl8367c_getAsicOamMultiplexer(rtk_switch_port_L2P_get(port), pAction))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtk_oam_init
+ * Description:
+ *      Initialize oam module.
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ * Note:
+ *      Must initialize oam module before calling any oam APIs.
+ */
+rtk_api_ret_t rtk_oam_init(void)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_oam_init();    
+    RTK_API_UNLOCK();
+
+    return retVal;
+} /* end of rtk_oam_init */
+
+
+/* Function Name:
+ *      rtk_oam_state_set
+ * Description:
+ *      This API set OAM state.
+ * Input:
+ *      enabled     -OAMstate
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error parameter
+ * Note:
+ *      This API set OAM state.
+ */
+rtk_api_ret_t rtk_oam_state_set(rtk_enable_t enabled)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_oam_state_set(enabled);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_oam_state_get
+ * Description:
+ *      This API get OAM state.
+ * Input:
+ *      None.
+ * Output:
+ *      pEnabled        - H/W IGMP state
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error parameter
+ * Note:
+ *      This API set current OAM state.
+ */
+rtk_api_ret_t rtk_oam_state_get(rtk_enable_t *pEnabled)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_oam_state_get(pEnabled);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+
+
+/* Function Name:
+ *      rtk_oam_parserAction_set
+ * Description:
+ *      Set OAM parser action
+ * Input:
+ *      port    - port id
+ *      action  - parser action
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_PORT_ID      - invalid port id
+ * Note:
+ *      None
+ */
+rtk_api_ret_t  rtk_oam_parserAction_set(rtk_port_t port, rtk_oam_parser_act_t action)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_oam_parserAction_set(port, action);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_oam_parserAction_set
+ * Description:
+ *      Get OAM parser action
+ * Input:
+ *      port    - port id
+ * Output:
+ *      pAction  - parser action
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_PORT_ID      - invalid port id
+ * Note:
+ *      None
+ */
+rtk_api_ret_t  rtk_oam_parserAction_get(rtk_port_t port, rtk_oam_parser_act_t *pAction)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_oam_parserAction_get(port, pAction);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+
+/* Function Name:
+ *      rtk_oam_multiplexerAction_set
+ * Description:
+ *      Set OAM multiplexer action
+ * Input:
+ *      port    - port id
+ *      action  - parser action
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_PORT_ID      - invalid port id
+ * Note:
+ *      None
+ */
+rtk_api_ret_t  rtk_oam_multiplexerAction_set(rtk_port_t port, rtk_oam_multiplexer_act_t action)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_oam_multiplexerAction_set(port, action);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_oam_parserAction_set
+ * Description:
+ *      Get OAM multiplexer action
+ * Input:
+ *      port    - port id
+ * Output:
+ *      pAction  - parser action
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_PORT_ID      - invalid port id
+ * Note:
+ *      None
+ */
+rtk_api_ret_t  rtk_oam_multiplexerAction_get(rtk_port_t port, rtk_oam_multiplexer_act_t *pAction)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_oam_multiplexerAction_get(port, pAction);    
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/oam.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/oam.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/oam.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/oam.h	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2012 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 76306 $
+ * $Date: 2017-03-08 15:13:58 +0800 (é€±ä¸‰, 08 ä¸‰æœˆ 2017) $
+ *
+ * Purpose : RTL8367/RTL8367C switch high-level API
+ *
+ * Feature : The file includes the following modules and sub-modules
+ *           (1) OAM (802.3ah) configuration
+ *
+ */
+
+#ifndef __RTK_OAM_H__
+#define __RTK_OAM_H__
+
+/*
+ * Symbol Definition
+ */
+
+
+/*
+ * Data Declaration
+ */
+
+
+/*
+ * Macro Declaration
+ */
+
+typedef enum rtk_oam_parser_act_e
+{
+    OAM_PARSER_ACTION_FORWARD = 0,
+    OAM_PARSER_ACTION_LOOPBACK,
+    OAM_PARSER_ACTION_DISCARD,
+    OAM_PARSER_ACTION_END,
+
+} rtk_oam_parser_act_t;
+
+typedef enum rtk_oam_multiplexer_act_e
+{
+    OAM_MULTIPLEXER_ACTION_FORWARD = 0,
+    OAM_MULTIPLEXER_ACTION_DISCARD,
+    OAM_MULTIPLEXER_ACTION_CPUONLY,
+    OAM_MULTIPLEXER_ACTION_END,
+
+} rtk_oam_multiplexer_act_t;
+
+
+/*
+ * Function Declaration
+ */
+
+/* Function Name:
+ *      rtk_oam_init
+ * Description:
+ *      Initialize oam module.
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ * Note:
+ *      Must initialize oam module before calling any oam APIs.
+ */
+extern rtk_api_ret_t rtk_oam_init(void);
+
+/* Function Name:
+ *      rtk_oam_state_set
+ * Description:
+ *      This API set OAM state.
+ * Input:
+ *      enabled     -OAMstate
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error parameter
+ * Note:
+ *      This API set OAM state.
+ */
+extern rtk_api_ret_t rtk_oam_state_set(rtk_enable_t enabled);
+
+/* Function Name:
+ *      rtk_oam_state_get
+ * Description:
+ *      This API get OAM state.
+ * Input:
+ *      None.
+ * Output:
+ *      pEnabled        - H/W IGMP state
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT           - Error parameter
+ * Note:
+ *      This API set current OAM state.
+ */
+extern rtk_api_ret_t rtk_oam_state_get(rtk_enable_t *pEnabled);
+
+
+/* Module Name : OAM */
+
+/* Function Name:
+ *      rtk_oam_parserAction_set
+ * Description:
+ *      Set OAM parser action
+ * Input:
+ *      port    - port id
+ *      action  - parser action
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_PORT_ID      - invalid port id
+ * Note:
+ *      None
+ */
+extern rtk_api_ret_t rtk_oam_parserAction_set(rtk_port_t port, rtk_oam_parser_act_t action);
+
+/* Function Name:
+ *      rtk_oam_parserAction_set
+ * Description:
+ *      Get OAM parser action
+ * Input:
+ *      port    - port id
+ * Output:
+ *      pAction  - parser action
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_PORT_ID      - invalid port id
+ * Note:
+ *      None
+ */
+extern rtk_api_ret_t rtk_oam_parserAction_get(rtk_port_t port, rtk_oam_parser_act_t *pAction);
+
+
+/* Function Name:
+ *      rtk_oam_multiplexerAction_set
+ * Description:
+ *      Set OAM multiplexer action
+ * Input:
+ *      port    - port id
+ *      action  - parser action
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_PORT_ID      - invalid port id
+ * Note:
+ *      None
+ */
+extern rtk_api_ret_t rtk_oam_multiplexerAction_set(rtk_port_t port, rtk_oam_multiplexer_act_t action);
+
+/* Function Name:
+ *      rtk_oam_multiplexerAction_set
+ * Description:
+ *      Get OAM multiplexer action
+ * Input:
+ *      port    - port id
+ * Output:
+ *      pAction  - parser action
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_PORT_ID      - invalid port id
+ * Note:
+ *      None
+ */
+extern rtk_api_ret_t rtk_oam_multiplexerAction_get(rtk_port_t port, rtk_oam_multiplexer_act_t *pAction);
+
+
+#endif /* __RTK_OAM_H__ */
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/port.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/port.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/port.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/port.c	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,2861 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 82056 $
+ * $Date: 2017-09-08 11:41:31 +0800 (é€±äº”, 08 ä¹æœˆ 2017) $
+ *
+ * Purpose : RTK switch high-level API for RTL8367/RTL8367C
+ * Feature : Here is a list of all functions and variables in Port module.
+ *
+ */
+
+#include <rtk_switch.h>
+#include <rtk_error.h>
+#include <port.h>
+#include <string.h>
+
+#include <rtl8367c_asicdrv.h>
+#include <rtl8367c_asicdrv_port.h>
+#include <rtl8367c_asicdrv_misc.h>
+#include <rtl8367c_asicdrv_portIsolation.h>
+
+#define FIBER_INIT_SIZE 1507
+CONST_T rtk_uint8 Fiber[FIBER_INIT_SIZE] = {
+0x02,0x04,0x41,0xE4,0xF5,0xA8,0xD2,0xAF,
+0x22,0x00,0x00,0x02,0x05,0x2D,0xE4,0x90,
+0x06,0x2A,0xF0,0xFD,0x7C,0x01,0x7F,0x3F,
+0x7E,0x1D,0x12,0x05,0xAF,0x7D,0x40,0x12,
+0x02,0x5F,0xE4,0xFF,0xFE,0xFD,0x80,0x08,
+0x12,0x05,0x9E,0x50,0x0C,0x12,0x05,0x8B,
+0xFC,0x90,0x06,0x24,0x12,0x03,0x76,0x80,
+0xEF,0xE4,0xF5,0xA8,0xD2,0xAF,0x7D,0x1F,
+0xFC,0x7F,0x49,0x7E,0x13,0x12,0x05,0xAF,
+0x12,0x05,0xD6,0x7D,0xD7,0x12,0x02,0x1E,
+0x7D,0x80,0x12,0x01,0xCA,0x7D,0x94,0x7C,
+0xF9,0x12,0x02,0x3B,0x7D,0x81,0x12,0x01,
+0xCA,0x7D,0xA2,0x7C,0x31,0x12,0x02,0x3B,
+0x7D,0x82,0x12,0x01,0xDF,0x7D,0x60,0x7C,
+0x69,0x12,0x02,0x43,0x7D,0x83,0x12,0x01,
+0xDF,0x7D,0x28,0x7C,0x97,0x12,0x02,0x43,
+0x7D,0x84,0x12,0x01,0xF4,0x7D,0x85,0x7C,
+0x9D,0x12,0x02,0x57,0x7D,0x23,0x12,0x01,
+0xF4,0x7D,0x10,0x7C,0xD8,0x12,0x02,0x57,
+0x7D,0x24,0x7C,0x04,0x12,0x02,0x28,0x7D,
+0x00,0x12,0x02,0x1E,0x7D,0x2F,0x12,0x02,
+0x09,0x7D,0x20,0x7C,0x0F,0x7F,0x02,0x7E,
+0x66,0x12,0x05,0xAF,0x7D,0x01,0x12,0x02,
+0x09,0x7D,0x04,0x7C,0x00,0x7F,0x01,0x7E,
+0x66,0x12,0x05,0xAF,0x7D,0x80,0x7C,0x00,
+0x7F,0x00,0x7E,0x66,0x12,0x05,0xAF,0x7F,
+0x02,0x7E,0x66,0x12,0x02,0x4B,0x44,0x02,
+0xFF,0x90,0x06,0x28,0xEE,0xF0,0xA3,0xEF,
+0xF0,0x44,0x04,0xFF,0x90,0x06,0x28,0xEE,
+0xF0,0xFC,0xA3,0xEF,0xF0,0xFD,0x7F,0x02,
+0x7E,0x66,0x12,0x05,0xAF,0x7D,0x04,0x7C,
+0x00,0x12,0x02,0x28,0x7D,0xB9,0x7C,0x15,
+0x7F,0xEB,0x7E,0x13,0x12,0x05,0xAF,0x7D,
+0x07,0x7C,0x00,0x7F,0xE7,0x7E,0x13,0x12,
+0x05,0xAF,0x7D,0x40,0x7C,0x11,0x7F,0x00,
+0x7E,0x62,0x12,0x05,0xAF,0x12,0x03,0x82,
+0x7D,0x41,0x12,0x02,0x5F,0xE4,0xFF,0xFE,
+0xFD,0x80,0x08,0x12,0x05,0x9E,0x50,0x0C,
+0x12,0x05,0x8B,0xFC,0x90,0x06,0x24,0x12,
+0x03,0x76,0x80,0xEF,0xC2,0x00,0xC2,0x01,
+0xD2,0xA9,0xD2,0x8C,0x7F,0x01,0x7E,0x62,
+0x12,0x02,0x4B,0x30,0xE2,0x05,0xE4,0xA3,
+0xF0,0x80,0xF1,0x90,0x06,0x2A,0xE0,0x70,
+0x12,0x12,0x01,0x89,0x90,0x06,0x2A,0x74,
+0x01,0xF0,0xE4,0x90,0x06,0x2D,0xF0,0xA3,
+0xF0,0x80,0xD9,0xC3,0x90,0x06,0x2E,0xE0,
+0x94,0x64,0x90,0x06,0x2D,0xE0,0x94,0x00,
+0x40,0xCA,0xE4,0xF0,0xA3,0xF0,0x12,0x01,
+0x89,0x90,0x06,0x2A,0x74,0x01,0xF0,0x80,
+0xBB,0x7D,0x04,0xFC,0x7F,0x02,0x7E,0x66,
+0x12,0x05,0xAF,0x7D,0x00,0x7C,0x04,0x7F,
+0x01,0x7E,0x66,0x12,0x05,0xAF,0x7D,0xC0,
+0x7C,0x00,0x7F,0x00,0x7E,0x66,0x12,0x05,
+0xAF,0xE4,0xFD,0xFC,0x7F,0x02,0x7E,0x66,
+0x12,0x05,0xAF,0x7D,0x00,0x7C,0x04,0x7F,
+0x01,0x7E,0x66,0x12,0x05,0xAF,0x7D,0xC0,
+0x7C,0x00,0x7F,0x00,0x7E,0x66,0x12,0x05,
+0xAF,0x22,0x7C,0x04,0x7F,0x01,0x7E,0x66,
+0x12,0x05,0xAF,0x7D,0xC0,0x7C,0x00,0x7F,
+0x00,0x7E,0x66,0x12,0x05,0xAF,0x22,0x7C,
+0x04,0x7F,0x01,0x7E,0x66,0x12,0x05,0xAF,
+0x7D,0xC0,0x7C,0x00,0x7F,0x00,0x7E,0x66,
+0x12,0x05,0xAF,0x22,0x7C,0x04,0x7F,0x01,
+0x7E,0x66,0x12,0x05,0xAF,0x7D,0xC0,0x7C,
+0x00,0x7F,0x00,0x7E,0x66,0x12,0x05,0xAF,
+0x22,0x7C,0x00,0x7F,0x01,0x7E,0x66,0x12,
+0x05,0xAF,0x7D,0xC0,0x7C,0x00,0x7F,0x00,
+0x7E,0x66,0x12,0x05,0xAF,0x22,0x7C,0x04,
+0x7F,0x02,0x7E,0x66,0x12,0x05,0xAF,0x22,
+0x7F,0x01,0x7E,0x66,0x12,0x05,0xAF,0x7D,
+0xC0,0x7C,0x00,0x7F,0x00,0x7E,0x66,0x12,
+0x05,0xAF,0x22,0x7F,0x02,0x7E,0x66,0x12,
+0x05,0xAF,0x22,0x7F,0x02,0x7E,0x66,0x12,
+0x05,0xAF,0x22,0x12,0x05,0x67,0x90,0x06,
+0x28,0xEE,0xF0,0xA3,0xEF,0xF0,0x22,0x7F,
+0x02,0x7E,0x66,0x12,0x05,0xAF,0x22,0x7C,
+0x00,0x7F,0x36,0x7E,0x13,0x12,0x05,0xAF,
+0x22,0xC5,0xF0,0xF8,0xA3,0xE0,0x28,0xF0,
+0xC5,0xF0,0xF8,0xE5,0x82,0x15,0x82,0x70,
+0x02,0x15,0x83,0xE0,0x38,0xF0,0x22,0x75,
+0xF0,0x08,0x75,0x82,0x00,0xEF,0x2F,0xFF,
+0xEE,0x33,0xFE,0xCD,0x33,0xCD,0xCC,0x33,
+0xCC,0xC5,0x82,0x33,0xC5,0x82,0x9B,0xED,
+0x9A,0xEC,0x99,0xE5,0x82,0x98,0x40,0x0C,
+0xF5,0x82,0xEE,0x9B,0xFE,0xED,0x9A,0xFD,
+0xEC,0x99,0xFC,0x0F,0xD5,0xF0,0xD6,0xE4,
+0xCE,0xFB,0xE4,0xCD,0xFA,0xE4,0xCC,0xF9,
+0xA8,0x82,0x22,0xB8,0x00,0xC1,0xB9,0x00,
+0x59,0xBA,0x00,0x2D,0xEC,0x8B,0xF0,0x84,
+0xCF,0xCE,0xCD,0xFC,0xE5,0xF0,0xCB,0xF9,
+0x78,0x18,0xEF,0x2F,0xFF,0xEE,0x33,0xFE,
+0xED,0x33,0xFD,0xEC,0x33,0xFC,0xEB,0x33,
+0xFB,0x10,0xD7,0x03,0x99,0x40,0x04,0xEB,
+0x99,0xFB,0x0F,0xD8,0xE5,0xE4,0xF9,0xFA,
+0x22,0x78,0x18,0xEF,0x2F,0xFF,0xEE,0x33,
+0xFE,0xED,0x33,0xFD,0xEC,0x33,0xFC,0xC9,
+0x33,0xC9,0x10,0xD7,0x05,0x9B,0xE9,0x9A,
+0x40,0x07,0xEC,0x9B,0xFC,0xE9,0x9A,0xF9,
+0x0F,0xD8,0xE0,0xE4,0xC9,0xFA,0xE4,0xCC,
+0xFB,0x22,0x75,0xF0,0x10,0xEF,0x2F,0xFF,
+0xEE,0x33,0xFE,0xED,0x33,0xFD,0xCC,0x33,
+0xCC,0xC8,0x33,0xC8,0x10,0xD7,0x07,0x9B,
+0xEC,0x9A,0xE8,0x99,0x40,0x0A,0xED,0x9B,
+0xFD,0xEC,0x9A,0xFC,0xE8,0x99,0xF8,0x0F,
+0xD5,0xF0,0xDA,0xE4,0xCD,0xFB,0xE4,0xCC,
+0xFA,0xE4,0xC8,0xF9,0x22,0xEB,0x9F,0xF5,
+0xF0,0xEA,0x9E,0x42,0xF0,0xE9,0x9D,0x42,
+0xF0,0xE8,0x9C,0x45,0xF0,0x22,0xE0,0xFC,
+0xA3,0xE0,0xFD,0xA3,0xE0,0xFE,0xA3,0xE0,
+0xFF,0x22,0xE0,0xF8,0xA3,0xE0,0xF9,0xA3,
+0xE0,0xFA,0xA3,0xE0,0xFB,0x22,0xEC,0xF0,
+0xA3,0xED,0xF0,0xA3,0xEE,0xF0,0xA3,0xEF,
+0xF0,0x22,0x12,0x03,0xF8,0x12,0x04,0x1A,
+0x44,0x40,0x12,0x04,0x0F,0x7D,0x03,0x7C,
+0x00,0x12,0x04,0x23,0x12,0x05,0xAF,0x12,
+0x03,0xF8,0x12,0x04,0x1A,0x54,0xBF,0x12,
+0x04,0x0F,0x7D,0x03,0x7C,0x00,0x12,0x03,
+0xD0,0x7F,0x02,0x7E,0x66,0x12,0x05,0x67,
+0xEF,0x54,0xFD,0x54,0xFE,0x12,0x04,0x33,
+0x12,0x03,0xD0,0x7F,0x02,0x7E,0x66,0x12,
+0x05,0x67,0xEF,0x44,0x02,0x44,0x01,0x12,
+0x04,0x33,0x12,0x04,0x23,0x02,0x05,0xAF,
+0x7F,0x01,0x7E,0x66,0x12,0x05,0xAF,0x7D,
+0xC0,0x7C,0x00,0x7F,0x00,0x7E,0x66,0x12,
+0x05,0xAF,0xE4,0xFD,0xFC,0x7F,0x01,0x7E,
+0x66,0x12,0x05,0xAF,0x7D,0x80,0x7C,0x00,
+0x7F,0x00,0x7E,0x66,0x12,0x05,0xAF,0x22,
+0x7D,0x03,0x7C,0x00,0x7F,0x01,0x7E,0x66,
+0x12,0x05,0xAF,0x7D,0x80,0x7C,0x00,0x7F,
+0x00,0x7E,0x66,0x12,0x05,0xAF,0x22,0xFD,
+0xAC,0x06,0x7F,0x02,0x7E,0x66,0x12,0x05,
+0xAF,0x22,0x7F,0x02,0x7E,0x66,0x12,0x05,
+0x67,0xEF,0x22,0x7F,0x01,0x7E,0x66,0x12,
+0x05,0xAF,0x7D,0xC0,0x7C,0x00,0x7F,0x00,
+0x7E,0x66,0x22,0xFD,0xAC,0x06,0x7F,0x02,
+0x7E,0x66,0x12,0x05,0xAF,0xE4,0xFD,0xFC,
+0x22,0x78,0x7F,0xE4,0xF6,0xD8,0xFD,0x75,
+0x81,0x3C,0x02,0x04,0x88,0x02,0x00,0x0E,
+0xE4,0x93,0xA3,0xF8,0xE4,0x93,0xA3,0x40,
+0x03,0xF6,0x80,0x01,0xF2,0x08,0xDF,0xF4,
+0x80,0x29,0xE4,0x93,0xA3,0xF8,0x54,0x07,
+0x24,0x0C,0xC8,0xC3,0x33,0xC4,0x54,0x0F,
+0x44,0x20,0xC8,0x83,0x40,0x04,0xF4,0x56,
+0x80,0x01,0x46,0xF6,0xDF,0xE4,0x80,0x0B,
+0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80,
+0x90,0x05,0xCB,0xE4,0x7E,0x01,0x93,0x60,
+0xBC,0xA3,0xFF,0x54,0x3F,0x30,0xE5,0x09,
+0x54,0x1F,0xFE,0xE4,0x93,0xA3,0x60,0x01,
+0x0E,0xCF,0x54,0xC0,0x25,0xE0,0x60,0xA8,
+0x40,0xB8,0xE4,0x93,0xA3,0xFA,0xE4,0x93,
+0xA3,0xF8,0xE4,0x93,0xA3,0xC8,0xC5,0x82,
+0xC8,0xCA,0xC5,0x83,0xCA,0xF0,0xA3,0xC8,
+0xC5,0x82,0xC8,0xCA,0xC5,0x83,0xCA,0xDF,
+0xE9,0xDE,0xE7,0x80,0xBE,0x75,0x0F,0x80,
+0x75,0x0E,0x7E,0x75,0x0D,0xAA,0x75,0x0C,
+0x83,0xE4,0xF5,0x10,0x75,0x0B,0xA0,0x75,
+0x0A,0xAC,0x75,0x09,0xB9,0x75,0x08,0x03,
+0x75,0x89,0x11,0x7B,0x60,0x7A,0x09,0xF9,
+0xF8,0xAF,0x0B,0xAE,0x0A,0xAD,0x09,0xAC,
+0x08,0x12,0x02,0xBB,0xAD,0x07,0xAC,0x06,
+0xC3,0xE4,0x9D,0xFD,0xE4,0x9C,0xFC,0x78,
+0x17,0xF6,0xAF,0x05,0xEF,0x08,0xF6,0x18,
+0xE6,0xF5,0x8C,0x08,0xE6,0xF5,0x8A,0x74,
+0x0D,0x2D,0xFD,0xE4,0x3C,0x18,0xF6,0xAF,
+0x05,0xEF,0x08,0xF6,0x75,0x88,0x10,0x53,
+0x8E,0xC7,0xD2,0xA9,0x22,0xC0,0xE0,0xC0,
+0xF0,0xC0,0x83,0xC0,0x82,0xC0,0xD0,0x75,
+0xD0,0x00,0xC0,0x00,0x78,0x17,0xE6,0xF5,
+0x8C,0x78,0x18,0xE6,0xF5,0x8A,0x90,0x06,
+0x2B,0xE4,0x75,0xF0,0x01,0x12,0x02,0x69,
+0x90,0x06,0x2D,0xE4,0x75,0xF0,0x01,0x12,
+0x02,0x69,0xD0,0x00,0xD0,0xD0,0xD0,0x82,
+0xD0,0x83,0xD0,0xF0,0xD0,0xE0,0x32,0xC2,
+0xAF,0xAD,0x07,0xAC,0x06,0x8C,0xA2,0x8D,
+0xA3,0x75,0xA0,0x01,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xAE,
+0xA1,0xBE,0x00,0xF0,0xAE,0xA6,0xAF,0xA7,
+0xD2,0xAF,0x22,0x90,0x06,0x24,0x12,0x03,
+0x5E,0xEF,0x24,0x01,0xFF,0xE4,0x3E,0xFE,
+0xE4,0x3D,0xFD,0xE4,0x3C,0x22,0xE4,0x7F,
+0x20,0x7E,0x4E,0xFD,0xFC,0x90,0x06,0x24,
+0x12,0x03,0x6A,0xC3,0x02,0x03,0x4D,0xC2,
+0xAF,0xAB,0x07,0xAA,0x06,0x8A,0xA2,0x8B,
+0xA3,0x8C,0xA4,0x8D,0xA5,0x75,0xA0,0x03,
+0x00,0x00,0x00,0xAA,0xA1,0xBA,0x00,0xF8,
+0xD2,0xAF,0x22,0x42,0x06,0x2D,0x00,0x00,
+0x42,0x06,0x2B,0x00,0x00,0x00,0x12,0x05,
+0xDF,0x12,0x04,0xCD,0x02,0x00,0x03,0xE4,
+0xF5,0x8E,0x22};
+
+static rtk_api_ret_t _rtk_port_phyComboPortMedia_get(rtk_port_t port, rtk_port_media_t *pMedia);
+
+static rtk_api_ret_t _rtk_port_FiberModeAbility_set(rtk_port_t port, rtk_port_phy_ability_t *pAbility)
+{
+    rtk_api_ret_t   retVal;
+    rtk_uint32      regData;
+
+    /* Check Combo port or not */
+    RTK_CHK_PORT_IS_COMBO(port);
+
+    /* Flow Control */
+    if ((retVal = rtl8367c_getAsicReg(RTL8367C_REG_FIB0_CFG04, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    if (pAbility->AsyFC == 1)
+        regData |= (0x0001 << 8);
+    else
+        regData &= ~(0x0001 << 8);
+
+    if (pAbility->FC == 1)
+        regData |= (0x0001 << 7);
+    else
+        regData &= ~(0x0001 << 7);
+
+    if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_FIB0_CFG04, regData)) != RT_ERR_OK)
+        return retVal;
+
+    /* Speed ability */
+    if( (pAbility->Full_1000 == 1) && (pAbility->Full_100 == 1) && (pAbility->AutoNegotiation == 1) )
+    {
+        if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_FIBER_CFG_1, RTL8367C_SDS_FRC_MODE_OFFSET, 0)) != RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_FIBER_CFG_1, RTL8367C_SDS_MODE_MASK, 7)) != RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_FIB0_CFG00, 0x1140)) != RT_ERR_OK)
+            return retVal;
+    }
+    else if(pAbility->Full_1000 == 1)
+    {
+        if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_FIBER_CFG_1, RTL8367C_SDS_FRC_MODE_OFFSET, 1)) != RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_FIBER_CFG_1, RTL8367C_SDS_MODE_MASK, 4)) != RT_ERR_OK)
+            return retVal;
+
+        if(pAbility->AutoNegotiation == 1)
+        {
+            if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_FIB0_CFG00, 0x1140)) != RT_ERR_OK)
+                return retVal;
+        }
+        else
+        {
+            if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_FIB0_CFG00, 0x0140)) != RT_ERR_OK)
+                return retVal;
+        }
+    }
+    else if(pAbility->Full_100 == 1)
+    {
+        if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_FIBER_CFG_1, RTL8367C_SDS_FRC_MODE_OFFSET, 1)) != RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_FIBER_CFG_1, RTL8367C_SDS_MODE_MASK, 5)) != RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_FIB0_CFG00, 0x2100)) != RT_ERR_OK)
+            return retVal;
+    }
+
+    /* Digital software reset */
+    if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_ADR, 0x0003)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_CMD, 0x0080)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_getAsicReg(RTL8367C_REG_SDS_INDACS_DATA, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    regData |= (0x0001 << 6);
+
+    if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_DATA, regData)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_ADR, 0x0003)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_CMD, 0x00C0)) != RT_ERR_OK)
+        return retVal;
+
+    regData &= ~(0x0001 << 6);
+
+    if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_DATA, regData)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_ADR, 0x0003)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_CMD, 0x00C0)) != RT_ERR_OK)
+        return retVal;
+
+    /* CDR reset */
+    if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_DATA, 0x1401))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_ADR, 0x0000))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_CMD, 0x00C0))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_DATA, 0x1403))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_ADR, 0x0000))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_CMD, 0x00C0))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_FiberModeAbility_get(rtk_port_t port, rtk_port_phy_ability_t *pAbility)
+{
+    rtk_api_ret_t   retVal;
+    rtk_uint32      data, regData;
+
+    /* Check Combo port or not */
+    RTK_CHK_PORT_IS_COMBO(port);
+
+    memset(pAbility, 0x00, sizeof(rtk_port_phy_ability_t));
+
+    /* Flow Control */
+    if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_FIBER_CFG_1, RTL8367C_SDS_FRC_REG4_OFFSET, 1)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_FIBER_CFG_1, RTL8367C_SDS_FRC_REG4_FIB100_OFFSET, 0)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_ADR, 0x0044)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_CMD, 0x0080)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_getAsicReg(RTL8367C_REG_SDS_INDACS_DATA, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    if(regData & (0x0001 << 8))
+        pAbility->AsyFC = 1;
+
+    if(regData & (0x0001 << 7))
+        pAbility->FC = 1;
+
+    /* Speed ability */
+    if ((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_FIBER_CFG_1, RTL8367C_SDS_FRC_MODE_OFFSET, &data)) != RT_ERR_OK)
+            return retVal;
+
+    if(data == 0)
+    {
+        pAbility->AutoNegotiation = 1;
+        pAbility->Full_1000 = 1;
+        pAbility->Full_100 = 1;
+    }
+    else
+    {
+        if ((retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_FIBER_CFG_1, RTL8367C_SDS_MODE_MASK, &data)) != RT_ERR_OK)
+            return retVal;
+
+        if(data == 4)
+        {
+            pAbility->Full_1000 = 1;
+
+            if ((retVal = rtl8367c_getAsicReg(RTL8367C_REG_FIB0_CFG00, &data)) != RT_ERR_OK)
+                return retVal;
+
+            if(data & 0x1000)
+                pAbility->AutoNegotiation = 1;
+            else
+                pAbility->AutoNegotiation = 0;
+        }
+        else if(data == 5)
+            pAbility->Full_100 = 1;
+        else
+            return RT_ERR_FAILED;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_phyAutoNegoAbility_set(rtk_port_t port, rtk_port_phy_ability_t *pAbility)
+{
+    rtk_api_ret_t       retVal;
+    rtk_uint32          phyData;
+    rtk_uint32          phyEnMsk0;
+    rtk_uint32          phyEnMsk4;
+    rtk_uint32          phyEnMsk9;
+    rtk_port_media_t    media_type;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_IS_UTP(port);
+
+    if(NULL == pAbility)
+        return RT_ERR_NULL_POINTER;
+
+    if (pAbility->Half_10 >= RTK_ENABLE_END || pAbility->Full_10 >= RTK_ENABLE_END ||
+       pAbility->Half_100 >= RTK_ENABLE_END || pAbility->Full_100 >= RTK_ENABLE_END ||
+       pAbility->Full_1000 >= RTK_ENABLE_END || pAbility->AutoNegotiation >= RTK_ENABLE_END ||
+       pAbility->AsyFC >= RTK_ENABLE_END || pAbility->FC >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if (rtk_switch_isComboPort(port) == RT_ERR_OK)
+    {
+        if ((retVal = _rtk_port_phyComboPortMedia_get(port, &media_type)) != RT_ERR_OK)
+            return retVal;
+
+        if(media_type == PORT_MEDIA_FIBER)
+        {
+            return _rtk_port_FiberModeAbility_set(port, pAbility);
+        }
+    }
+
+    /*for PHY auto mode setup*/
+    pAbility->AutoNegotiation = 1;
+
+    phyEnMsk0 = 0;
+    phyEnMsk4 = 0;
+    phyEnMsk9 = 0;
+
+    if (1 == pAbility->Half_10)
+    {
+        /*10BASE-TX half duplex capable in reg 4.5*/
+        phyEnMsk4 = phyEnMsk4 | (1 << 5);
+
+        /*Speed selection [1:0] */
+        /* 11=Reserved*/
+        /* 10= 1000Mpbs*/
+        /* 01= 100Mpbs*/
+        /* 00= 10Mpbs*/
+        phyEnMsk0 = phyEnMsk0 & (~(1 << 6));
+        phyEnMsk0 = phyEnMsk0 & (~(1 << 13));
+    }
+
+    if (1 == pAbility->Full_10)
+    {
+        /*10BASE-TX full duplex capable in reg 4.6*/
+        phyEnMsk4 = phyEnMsk4 | (1 << 6);
+        /*Speed selection [1:0] */
+        /* 11=Reserved*/
+        /* 10= 1000Mpbs*/
+        /* 01= 100Mpbs*/
+        /* 00= 10Mpbs*/
+        phyEnMsk0 = phyEnMsk0 & (~(1 << 6));
+        phyEnMsk0 = phyEnMsk0 & (~(1 << 13));
+
+        /*Full duplex mode in reg 0.8*/
+        phyEnMsk0 = phyEnMsk0 | (1 << 8);
+
+    }
+
+    if (1 == pAbility->Half_100)
+    {
+        /*100BASE-TX half duplex capable in reg 4.7*/
+        phyEnMsk4 = phyEnMsk4 | (1 << 7);
+        /*Speed selection [1:0] */
+        /* 11=Reserved*/
+        /* 10= 1000Mpbs*/
+        /* 01= 100Mpbs*/
+        /* 00= 10Mpbs*/
+        phyEnMsk0 = phyEnMsk0 & (~(1 << 6));
+        phyEnMsk0 = phyEnMsk0 | (1 << 13);
+    }
+
+
+    if (1 == pAbility->Full_100)
+    {
+        /*100BASE-TX full duplex capable in reg 4.8*/
+        phyEnMsk4 = phyEnMsk4 | (1 << 8);
+        /*Speed selection [1:0] */
+        /* 11=Reserved*/
+        /* 10= 1000Mpbs*/
+        /* 01= 100Mpbs*/
+        /* 00= 10Mpbs*/
+        phyEnMsk0 = phyEnMsk0 & (~(1 << 6));
+        phyEnMsk0 = phyEnMsk0 | (1 << 13);
+        /*Full duplex mode in reg 0.8*/
+        phyEnMsk0 = phyEnMsk0 | (1 << 8);
+    }
+
+
+    if (1 == pAbility->Full_1000)
+    {
+        /*1000 BASE-T FULL duplex capable setting in reg 9.9*/
+        phyEnMsk9 = phyEnMsk9 | (1 << 9);
+
+        /*Speed selection [1:0] */
+        /* 11=Reserved*/
+        /* 10= 1000Mpbs*/
+        /* 01= 100Mpbs*/
+        /* 00= 10Mpbs*/
+        phyEnMsk0 = phyEnMsk0 | (1 << 6);
+        phyEnMsk0 = phyEnMsk0 & (~(1 << 13));
+
+
+        /*Auto-Negotiation setting in reg 0.12*/
+        phyEnMsk0 = phyEnMsk0 | (1 << 12);
+
+     }
+
+    if (1 == pAbility->AutoNegotiation)
+    {
+        /*Auto-Negotiation setting in reg 0.12*/
+        phyEnMsk0 = phyEnMsk0 | (1 << 12);
+    }
+
+    if (1 == pAbility->AsyFC)
+    {
+        /*Asymetric flow control in reg 4.11*/
+        phyEnMsk4 = phyEnMsk4 | (1 << 11);
+    }
+    if (1 == pAbility->FC)
+    {
+        /*Flow control in reg 4.10*/
+        phyEnMsk4 = phyEnMsk4 | (1 << 10);
+    }
+
+    /*1000 BASE-T control register setting*/
+    if ((retVal = rtl8367c_getAsicPHYReg(rtk_switch_port_L2P_get(port), PHY_1000_BASET_CONTROL_REG, &phyData)) != RT_ERR_OK)
+        return retVal;
+
+    phyData = (phyData & (~0x0200)) | phyEnMsk9 ;
+
+    if ((retVal = rtl8367c_setAsicPHYReg(rtk_switch_port_L2P_get(port), PHY_1000_BASET_CONTROL_REG, phyData)) != RT_ERR_OK)
+        return retVal;
+
+    /*Auto-Negotiation control register setting*/
+    if ((retVal = rtl8367c_getAsicPHYReg(rtk_switch_port_L2P_get(port), PHY_AN_ADVERTISEMENT_REG, &phyData)) != RT_ERR_OK)
+        return retVal;
+
+    phyData = (phyData & (~0x0DE0)) | phyEnMsk4;
+    if ((retVal = rtl8367c_setAsicPHYReg(rtk_switch_port_L2P_get(port), PHY_AN_ADVERTISEMENT_REG, phyData)) != RT_ERR_OK)
+        return retVal;
+
+    /*Control register setting and restart auto*/
+    if ((retVal = rtl8367c_getAsicPHYReg(rtk_switch_port_L2P_get(port), PHY_CONTROL_REG, &phyData)) != RT_ERR_OK)
+        return retVal;
+
+    phyData = (phyData & (~0x3140)) | phyEnMsk0;
+    /*If have auto-negotiation capable, then restart auto negotiation*/
+    if (1 == pAbility->AutoNegotiation)
+    {
+        phyData = phyData | (1 << 9);
+    }
+
+    if ((retVal = rtl8367c_setAsicPHYReg(rtk_switch_port_L2P_get(port), PHY_CONTROL_REG, phyData)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_phyAutoNegoAbility_get(rtk_port_t port, rtk_port_phy_ability_t *pAbility)
+{
+    rtk_api_ret_t       retVal;
+    rtk_uint32          phyData0;
+    rtk_uint32          phyData4;
+    rtk_uint32          phyData9;
+    rtk_port_media_t    media_type;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_IS_UTP(port);
+
+    if(NULL == pAbility)
+        return RT_ERR_NULL_POINTER;
+
+    if (rtk_switch_isComboPort(port) == RT_ERR_OK)
+    {
+        if ((retVal = _rtk_port_phyComboPortMedia_get(port, &media_type)) != RT_ERR_OK)
+            return retVal;
+
+        if(media_type == PORT_MEDIA_FIBER)
+        {
+            return _rtk_port_FiberModeAbility_get(port, pAbility);
+        }
+    }
+
+    /*Control register setting and restart auto*/
+    if ((retVal = rtl8367c_getAsicPHYReg(rtk_switch_port_L2P_get(port), PHY_CONTROL_REG, &phyData0)) != RT_ERR_OK)
+        return retVal;
+
+    /*Auto-Negotiation control register setting*/
+    if ((retVal = rtl8367c_getAsicPHYReg(rtk_switch_port_L2P_get(port), PHY_AN_ADVERTISEMENT_REG, &phyData4)) != RT_ERR_OK)
+        return retVal;
+
+    /*1000 BASE-T control register setting*/
+    if ((retVal = rtl8367c_getAsicPHYReg(rtk_switch_port_L2P_get(port), PHY_1000_BASET_CONTROL_REG, &phyData9)) != RT_ERR_OK)
+        return retVal;
+
+    if (phyData9 & (1 << 9))
+        pAbility->Full_1000 = 1;
+    else
+        pAbility->Full_1000 = 0;
+
+    if (phyData4 & (1 << 11))
+        pAbility->AsyFC = 1;
+    else
+        pAbility->AsyFC = 0;
+
+    if (phyData4 & (1 << 10))
+        pAbility->FC = 1;
+    else
+        pAbility->FC = 0;
+
+
+    if (phyData4 & (1 << 8))
+        pAbility->Full_100 = 1;
+    else
+        pAbility->Full_100 = 0;
+
+    if (phyData4 & (1 << 7))
+        pAbility->Half_100 = 1;
+    else
+        pAbility->Half_100 = 0;
+
+    if (phyData4 & (1 << 6))
+        pAbility->Full_10 = 1;
+    else
+        pAbility->Full_10 = 0;
+
+    if (phyData4 & (1 << 5))
+        pAbility->Half_10 = 1;
+    else
+        pAbility->Half_10 = 0;
+
+
+    if (phyData0 & (1 << 12))
+        pAbility->AutoNegotiation = 1;
+    else
+        pAbility->AutoNegotiation = 0;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_phyForceModeAbility_set(rtk_port_t port, rtk_port_phy_ability_t *pAbility)
+{
+     rtk_api_ret_t      retVal;
+     rtk_uint32         phyData;
+     rtk_uint32         phyEnMsk0;
+     rtk_uint32         phyEnMsk4;
+     rtk_uint32         phyEnMsk9;
+     rtk_port_media_t   media_type;
+
+     /* Check initialization state */
+     RTK_CHK_INIT_STATE();
+
+     /* Check Port Valid */
+     RTK_CHK_PORT_IS_UTP(port);
+
+     if(NULL == pAbility)
+        return RT_ERR_NULL_POINTER;
+
+     if (pAbility->Half_10 >= RTK_ENABLE_END || pAbility->Full_10 >= RTK_ENABLE_END ||
+        pAbility->Half_100 >= RTK_ENABLE_END || pAbility->Full_100 >= RTK_ENABLE_END ||
+        pAbility->Full_1000 >= RTK_ENABLE_END || pAbility->AutoNegotiation >= RTK_ENABLE_END ||
+        pAbility->AsyFC >= RTK_ENABLE_END || pAbility->FC >= RTK_ENABLE_END)
+         return RT_ERR_INPUT;
+
+     if (rtk_switch_isComboPort(port) == RT_ERR_OK)
+     {
+         if ((retVal = _rtk_port_phyComboPortMedia_get(port, &media_type)) != RT_ERR_OK)
+             return retVal;
+
+         if(media_type == PORT_MEDIA_FIBER)
+         {
+             return _rtk_port_FiberModeAbility_set(port, pAbility);
+         }
+     }
+
+     if (1 == pAbility->Full_1000)
+         return RT_ERR_INPUT;
+
+     /*for PHY force mode setup*/
+     pAbility->AutoNegotiation = 0;
+
+     phyEnMsk0 = 0;
+     phyEnMsk4 = 0;
+     phyEnMsk9 = 0;
+
+     if (1 == pAbility->Half_10)
+     {
+         /*10BASE-TX half duplex capable in reg 4.5*/
+         phyEnMsk4 = phyEnMsk4 | (1 << 5);
+
+         /*Speed selection [1:0] */
+         /* 11=Reserved*/
+         /* 10= 1000Mpbs*/
+         /* 01= 100Mpbs*/
+         /* 00= 10Mpbs*/
+         phyEnMsk0 = phyEnMsk0 & (~(1 << 6));
+         phyEnMsk0 = phyEnMsk0 & (~(1 << 13));
+     }
+
+     if (1 == pAbility->Full_10)
+     {
+         /*10BASE-TX full duplex capable in reg 4.6*/
+         phyEnMsk4 = phyEnMsk4 | (1 << 6);
+         /*Speed selection [1:0] */
+         /* 11=Reserved*/
+         /* 10= 1000Mpbs*/
+         /* 01= 100Mpbs*/
+         /* 00= 10Mpbs*/
+         phyEnMsk0 = phyEnMsk0 & (~(1 << 6));
+         phyEnMsk0 = phyEnMsk0 & (~(1 << 13));
+
+         /*Full duplex mode in reg 0.8*/
+         phyEnMsk0 = phyEnMsk0 | (1 << 8);
+
+     }
+
+     if (1 == pAbility->Half_100)
+     {
+         /*100BASE-TX half duplex capable in reg 4.7*/
+         phyEnMsk4 = phyEnMsk4 | (1 << 7);
+         /*Speed selection [1:0] */
+         /* 11=Reserved*/
+         /* 10= 1000Mpbs*/
+         /* 01= 100Mpbs*/
+         /* 00= 10Mpbs*/
+         phyEnMsk0 = phyEnMsk0 & (~(1 << 6));
+         phyEnMsk0 = phyEnMsk0 | (1 << 13);
+     }
+
+
+     if (1 == pAbility->Full_100)
+     {
+         /*100BASE-TX full duplex capable in reg 4.8*/
+         phyEnMsk4 = phyEnMsk4 | (1 << 8);
+         /*Speed selection [1:0] */
+         /* 11=Reserved*/
+         /* 10= 1000Mpbs*/
+         /* 01= 100Mpbs*/
+         /* 00= 10Mpbs*/
+         phyEnMsk0 = phyEnMsk0 & (~(1 << 6));
+         phyEnMsk0 = phyEnMsk0 | (1 << 13);
+         /*Full duplex mode in reg 0.8*/
+         phyEnMsk0 = phyEnMsk0 | (1 << 8);
+     }
+
+     if (1 == pAbility->AsyFC)
+     {
+         /*Asymetric flow control in reg 4.11*/
+         phyEnMsk4 = phyEnMsk4 | (1 << 11);
+     }
+     if (1 == pAbility->FC)
+     {
+         /*Flow control in reg 4.10*/
+         phyEnMsk4 = phyEnMsk4 | ((1 << 10));
+     }
+
+     /*1000 BASE-T control register setting*/
+     if ((retVal = rtl8367c_getAsicPHYReg(rtk_switch_port_L2P_get(port), PHY_1000_BASET_CONTROL_REG, &phyData)) != RT_ERR_OK)
+         return retVal;
+
+     phyData = (phyData & (~0x0200)) | phyEnMsk9 ;
+
+     if ((retVal = rtl8367c_setAsicPHYReg(rtk_switch_port_L2P_get(port), PHY_1000_BASET_CONTROL_REG, phyData)) != RT_ERR_OK)
+         return retVal;
+
+     /*Auto-Negotiation control register setting*/
+     if ((retVal = rtl8367c_getAsicPHYReg(rtk_switch_port_L2P_get(port), PHY_AN_ADVERTISEMENT_REG, &phyData)) != RT_ERR_OK)
+         return retVal;
+
+     phyData = (phyData & (~0x0DE0)) | phyEnMsk4;
+     if ((retVal = rtl8367c_setAsicPHYReg(rtk_switch_port_L2P_get(port), PHY_AN_ADVERTISEMENT_REG, phyData)) != RT_ERR_OK)
+         return retVal;
+
+     /*Control register setting and power off/on*/
+     phyData = phyEnMsk0 & (~(1 << 12));
+     phyData |= (1 << 11);   /* power down PHY, bit 11 should be set to 1 */
+     if ((retVal = rtl8367c_setAsicPHYReg(rtk_switch_port_L2P_get(port), PHY_CONTROL_REG, phyData)) != RT_ERR_OK)
+         return retVal;
+
+     phyData = phyData & (~(1 << 11));   /* power on PHY, bit 11 should be set to 0*/
+     if ((retVal = rtl8367c_setAsicPHYReg(rtk_switch_port_L2P_get(port), PHY_CONTROL_REG, phyData)) != RT_ERR_OK)
+         return retVal;
+
+     return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_phyForceModeAbility_get(rtk_port_t port, rtk_port_phy_ability_t *pAbility)
+{
+    rtk_api_ret_t       retVal;
+    rtk_uint32          phyData0;
+    rtk_uint32          phyData4;
+    rtk_uint32          phyData9;
+    rtk_port_media_t    media_type;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+     RTK_CHK_PORT_IS_UTP(port);
+
+     if(NULL == pAbility)
+        return RT_ERR_NULL_POINTER;
+
+     if (rtk_switch_isComboPort(port) == RT_ERR_OK)
+     {
+         if ((retVal = _rtk_port_phyComboPortMedia_get(port, &media_type)) != RT_ERR_OK)
+             return retVal;
+
+         if(media_type == PORT_MEDIA_FIBER)
+         {
+             return _rtk_port_FiberModeAbility_get(port, pAbility);
+         }
+     }
+
+    /*Control register setting and restart auto*/
+    if ((retVal = rtl8367c_getAsicPHYReg(rtk_switch_port_L2P_get(port), PHY_CONTROL_REG, &phyData0)) != RT_ERR_OK)
+        return retVal;
+
+    /*Auto-Negotiation control register setting*/
+    if ((retVal = rtl8367c_getAsicPHYReg(rtk_switch_port_L2P_get(port), PHY_AN_ADVERTISEMENT_REG, &phyData4)) != RT_ERR_OK)
+        return retVal;
+
+    /*1000 BASE-T control register setting*/
+    if ((retVal = rtl8367c_getAsicPHYReg(rtk_switch_port_L2P_get(port), PHY_1000_BASET_CONTROL_REG, &phyData9)) != RT_ERR_OK)
+        return retVal;
+
+    if (phyData9 & (1 << 9))
+        pAbility->Full_1000 = 1;
+    else
+        pAbility->Full_1000 = 0;
+
+    if (phyData4 & (1 << 11))
+        pAbility->AsyFC = 1;
+    else
+        pAbility->AsyFC = 0;
+
+    if (phyData4 & ((1 << 10)))
+        pAbility->FC = 1;
+    else
+        pAbility->FC = 0;
+
+
+    if (phyData4 & (1 << 8))
+        pAbility->Full_100 = 1;
+    else
+        pAbility->Full_100 = 0;
+
+    if (phyData4 & (1 << 7))
+        pAbility->Half_100 = 1;
+    else
+        pAbility->Half_100 = 0;
+
+    if (phyData4 & (1 << 6))
+        pAbility->Full_10 = 1;
+    else
+        pAbility->Full_10 = 0;
+
+    if (phyData4 & (1 << 5))
+        pAbility->Half_10 = 1;
+    else
+        pAbility->Half_10 = 0;
+
+
+    if (phyData0 & (1 << 12))
+        pAbility->AutoNegotiation = 1;
+    else
+        pAbility->AutoNegotiation = 0;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_phyStatus_get(rtk_port_t port, rtk_port_linkStatus_t *pLinkStatus, rtk_port_speed_t *pSpeed, rtk_port_duplex_t *pDuplex)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 phyData;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_IS_UTP(port);
+
+    if( (NULL == pLinkStatus) || (NULL == pSpeed) || (NULL == pDuplex) )
+        return RT_ERR_NULL_POINTER;
+
+    /*Get PHY resolved register*/
+    if ((retVal = rtl8367c_getAsicPHYReg(rtk_switch_port_L2P_get(port), PHY_RESOLVED_REG, &phyData)) != RT_ERR_OK)
+        return retVal;
+
+    /*check link status*/
+    if (phyData & (1<<2))
+    {
+        *pLinkStatus = 1;
+
+        /*check link speed*/
+        *pSpeed = (phyData&0x0030) >> 4;
+
+        /*check link duplex*/
+        *pDuplex = (phyData&0x0008) >> 3;
+    }
+    else
+    {
+        *pLinkStatus = 0;
+        *pSpeed = 0;
+        *pDuplex = 0;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_macForceLink_set(rtk_port_t port, rtk_port_mac_ability_t *pPortability)
+{
+    rtk_api_ret_t retVal;
+    rtl8367c_port_ability_t ability;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_IS_UTP(port);
+
+    if(NULL == pPortability)
+        return RT_ERR_NULL_POINTER;
+
+    if (pPortability->forcemode >1|| pPortability->speed > 2 || pPortability->duplex > 1 ||
+       pPortability->link > 1 || pPortability->nway > 1 || pPortability->txpause > 1 || pPortability->rxpause > 1)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_getAsicPortForceLink(rtk_switch_port_L2P_get(port), &ability)) != RT_ERR_OK)
+        return retVal;
+
+    ability.forcemode = pPortability->forcemode;
+    ability.speed     = pPortability->speed;
+    ability.duplex    = pPortability->duplex;
+    ability.link      = pPortability->link;
+    ability.nway      = pPortability->nway;
+    ability.txpause   = pPortability->txpause;
+    ability.rxpause   = pPortability->rxpause;
+
+    if ((retVal = rtl8367c_setAsicPortForceLink(rtk_switch_port_L2P_get(port), &ability)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_macForceLink_get(rtk_port_t port, rtk_port_mac_ability_t *pPortability)
+{
+    rtk_api_ret_t retVal;
+    rtl8367c_port_ability_t ability;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_IS_UTP(port);
+
+    if(NULL == pPortability)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicPortForceLink(rtk_switch_port_L2P_get(port), &ability)) != RT_ERR_OK)
+        return retVal;
+
+    pPortability->forcemode = ability.forcemode;
+    pPortability->speed     = ability.speed;
+    pPortability->duplex    = ability.duplex;
+    pPortability->link      = ability.link;
+    pPortability->nway      = ability.nway;
+    pPortability->txpause   = ability.txpause;
+    pPortability->rxpause   = ability.rxpause;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_macForceLinkExt_set(rtk_port_t port, rtk_mode_ext_t mode, rtk_port_mac_ability_t *pPortability)
+{
+    rtk_api_ret_t retVal;
+    rtl8367c_port_ability_t ability;
+    rtk_uint32 ext_id;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_IS_EXT(port);
+
+    if(NULL == pPortability)
+        return RT_ERR_NULL_POINTER;
+
+    if (mode >=MODE_EXT_END)
+        return RT_ERR_INPUT;
+
+    if(mode == MODE_EXT_HSGMII)
+    {
+        if (pPortability->forcemode > 1 || pPortability->speed != PORT_SPEED_2500M || pPortability->duplex != PORT_FULL_DUPLEX ||
+           pPortability->link >= PORT_LINKSTATUS_END || pPortability->nway > 1 || pPortability->txpause > 1 || pPortability->rxpause > 1)
+            return RT_ERR_INPUT;
+
+        if(rtk_switch_isHsgPort(port) != RT_ERR_OK)
+            return RT_ERR_PORT_ID;
+    }
+    else
+    {
+        if (pPortability->forcemode > 1 || pPortability->speed > PORT_SPEED_1000M || pPortability->duplex >= PORT_DUPLEX_END ||
+           pPortability->link >= PORT_LINKSTATUS_END || pPortability->nway > 1 || pPortability->txpause > 1 || pPortability->rxpause > 1)
+            return RT_ERR_INPUT;
+    }
+
+    ext_id = port - 15;
+
+    if(mode == MODE_EXT_DISABLE)
+    {
+        memset(&ability, 0x00, sizeof(rtl8367c_port_ability_t));
+        if ((retVal = rtl8367c_setAsicPortForceLinkExt(ext_id, &ability)) != RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicPortExtMode(ext_id, mode)) != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        if ((retVal = rtl8367c_setAsicPortExtMode(ext_id, mode)) != RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_getAsicPortForceLinkExt(ext_id, &ability)) != RT_ERR_OK)
+            return retVal;
+
+        ability.forcemode = pPortability->forcemode;
+        ability.speed     = (mode == MODE_EXT_HSGMII) ? PORT_SPEED_1000M : pPortability->speed;
+        ability.duplex    = pPortability->duplex;
+        ability.link      = pPortability->link;
+        ability.nway      = pPortability->nway;
+        ability.txpause   = pPortability->txpause;
+        ability.rxpause   = pPortability->rxpause;
+
+        if ((retVal = rtl8367c_setAsicPortForceLinkExt(ext_id, &ability)) != RT_ERR_OK)
+            return retVal;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_macForceLinkExt_get(rtk_port_t port, rtk_mode_ext_t *pMode, rtk_port_mac_ability_t *pPortability)
+{
+    rtk_api_ret_t retVal;
+    rtl8367c_port_ability_t ability;
+    rtk_uint32 ext_id;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_IS_EXT(port);
+
+    if(NULL == pMode)
+        return RT_ERR_NULL_POINTER;
+
+    if(NULL == pPortability)
+        return RT_ERR_NULL_POINTER;
+
+    ext_id = port - 15;
+
+    if ((retVal = rtl8367c_getAsicPortExtMode(ext_id, (rtk_uint32 *)pMode)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_getAsicPortForceLinkExt(ext_id, &ability)) != RT_ERR_OK)
+        return retVal;
+
+    pPortability->forcemode = ability.forcemode;
+    pPortability->speed     = (*pMode == MODE_EXT_HSGMII) ? PORT_SPEED_2500M : ability.speed;
+    pPortability->duplex    = ability.duplex;
+    pPortability->link      = ability.link;
+    pPortability->nway      = ability.nway;
+    pPortability->txpause   = ability.txpause;
+    pPortability->rxpause   = ability.rxpause;
+
+    return RT_ERR_OK;
+
+}
+
+static rtk_api_ret_t _rtk_port_macStatus_get(rtk_port_t port, rtk_port_mac_ability_t *pPortstatus)
+{
+    rtk_api_ret_t retVal;
+    rtl8367c_port_status_t status;
+    rtk_uint32 hsgsel;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(NULL == pPortstatus)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicPortStatus(rtk_switch_port_L2P_get(port), &status)) != RT_ERR_OK)
+        return retVal;
+
+
+    pPortstatus->duplex    = status.duplex;
+    pPortstatus->link      = status.link;
+    pPortstatus->nway      = status.nway;
+    pPortstatus->txpause   = status.txpause;
+    pPortstatus->rxpause   = status.rxpause;
+
+    if( (retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_MAC8_SEL_HSGMII_OFFSET, &hsgsel)) != RT_ERR_OK)
+            return retVal;
+
+    if( (rtk_switch_isHsgPort(port) == RT_ERR_OK) && (hsgsel == 1) )
+        pPortstatus->speed = PORT_SPEED_2500M;
+    else
+        pPortstatus->speed = status.speed;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_macLocalLoopbackEnable_set(rtk_port_t port, rtk_enable_t enable)
+{
+    rtk_api_ret_t   retVal;
+    rtk_uint32      data;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(enable >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicPortLoopback(rtk_switch_port_L2P_get(port), enable)) != RT_ERR_OK)
+        return retVal;
+
+    if(rtk_switch_isUtpPort(port) == RT_ERR_OK)
+    {
+        if ((retVal = rtl8367c_getAsicPHYReg(rtk_switch_port_L2P_get(port), PHY_CONTROL_REG, &data)) != RT_ERR_OK)
+            return retVal;
+
+        if(enable == ENABLED)
+            data |= (0x0001 << 14);
+        else
+            data &= ~(0x0001 << 14);
+
+        if ((retVal = rtl8367c_setAsicPHYReg(rtk_switch_port_L2P_get(port), PHY_CONTROL_REG, data)) != RT_ERR_OK)
+            return retVal;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_macLocalLoopbackEnable_get(rtk_port_t port, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(NULL == pEnable)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicPortLoopback(rtk_switch_port_L2P_get(port), pEnable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_phyReg_set(rtk_port_t port, rtk_port_phy_reg_t reg, rtk_port_phy_data_t regData)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_IS_UTP(port);
+
+    if ((retVal = rtl8367c_setAsicPHYReg(rtk_switch_port_L2P_get(port), reg, regData)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_phyReg_get(rtk_port_t port, rtk_port_phy_reg_t reg, rtk_port_phy_data_t *pData)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_IS_UTP(port);
+
+    if ((retVal = rtl8367c_getAsicPHYReg(rtk_switch_port_L2P_get(port), reg, pData)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_backpressureEnable_set(rtk_port_t port, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (port != RTK_WHOLE_SYSTEM)
+        return RT_ERR_PORT_ID;
+
+    if (enable >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicPortJamMode(!enable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_backpressureEnable_get(rtk_port_t port, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 regData;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (port != RTK_WHOLE_SYSTEM)
+        return RT_ERR_PORT_ID;
+
+    if(NULL == pEnable)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicPortJamMode(&regData)) != RT_ERR_OK)
+        return retVal;
+
+    *pEnable = !regData;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_adminEnable_set(rtk_port_t port, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32      data;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_IS_UTP(port);
+
+    if (enable >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_getAsicPHYReg(rtk_switch_port_L2P_get(port), PHY_CONTROL_REG, &data)) != RT_ERR_OK)
+        return retVal;
+
+    if (ENABLED == enable)
+    {
+        data &= 0xF7FF;
+        data |= 0x0200;
+    }
+    else if (DISABLED == enable)
+    {
+        data |= 0x0800;
+    }
+
+    if ((retVal = rtl8367c_setAsicPHYReg(rtk_switch_port_L2P_get(port), PHY_CONTROL_REG, data)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_adminEnable_get(rtk_port_t port, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32      data;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_IS_UTP(port);
+
+    if(NULL == pEnable)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtk_port_phyReg_get(port, PHY_CONTROL_REG, &data)) != RT_ERR_OK)
+        return retVal;
+
+    if ( (data & 0x0800) == 0x0800)
+    {
+        *pEnable = DISABLED;
+    }
+    else
+    {
+        *pEnable = ENABLED;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_isolation_set(rtk_port_t port, rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 pmask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(NULL == pPortmask)
+        return RT_ERR_NULL_POINTER;
+
+    /* check port mask */
+    RTK_CHK_PORTMASK_VALID(pPortmask);
+
+    if ((retVal = rtk_switch_portmask_L2P_get(pPortmask, &pmask)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicPortIsolationPermittedPortmask(rtk_switch_port_L2P_get(port), pmask)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_isolation_get(rtk_port_t port, rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 pmask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(NULL == pPortmask)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicPortIsolationPermittedPortmask(rtk_switch_port_L2P_get(port), &pmask)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtk_switch_portmask_P2L_get(pmask, pPortmask)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_rgmiiDelayExt_set(rtk_port_t port, rtk_data_t txDelay, rtk_data_t rxDelay)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 regAddr, regData;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_IS_EXT(port);
+
+    if ((txDelay > 1) || (rxDelay > 7))
+        return RT_ERR_INPUT;
+
+    if(port == EXT_PORT0)
+        regAddr = RTL8367C_REG_EXT1_RGMXF;
+    else if(port == EXT_PORT1)
+        regAddr = RTL8367C_REG_EXT2_RGMXF;
+    else
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_getAsicReg(regAddr, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    regData = (regData & 0xFFF0) | ((txDelay << 3) & 0x0008) | (rxDelay & 0x0007);
+
+    if ((retVal = rtl8367c_setAsicReg(regAddr, regData)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_rgmiiDelayExt_get(rtk_port_t port, rtk_data_t *pTxDelay, rtk_data_t *pRxDelay)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 regAddr, regData;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_IS_EXT(port);
+
+    if( (NULL == pTxDelay) || (NULL == pRxDelay) )
+        return RT_ERR_NULL_POINTER;
+
+    if(port == EXT_PORT0)
+        regAddr = RTL8367C_REG_EXT1_RGMXF;
+    else if(port == EXT_PORT1)
+        regAddr = RTL8367C_REG_EXT2_RGMXF;
+    else
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_getAsicReg(regAddr, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    *pTxDelay = (regData & 0x0008) >> 3;
+    *pRxDelay = regData & 0x0007;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_phyEnableAll_set(rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 data;
+    rtk_uint32 port;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (enable >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    if ((retVal = rtl8367c_setAsicPortEnableAll(enable)) != RT_ERR_OK)
+        return retVal;
+
+    RTK_SCAN_ALL_LOG_PORT(port)
+    {
+        if(rtk_switch_isUtpPort(port) == RT_ERR_OK)
+        {
+            if ((retVal = _rtk_port_phyReg_get(port, PHY_CONTROL_REG, &data)) != RT_ERR_OK)
+                return retVal;
+
+            if (ENABLED == enable)
+            {
+                data &= 0xF7FF;
+                data |= 0x0200;
+            }
+            else
+            {
+                data |= 0x0800;
+            }
+
+            if ((retVal = _rtk_port_phyReg_set(port, PHY_CONTROL_REG, data)) != RT_ERR_OK)
+                return retVal;
+        }
+    }
+
+    return RT_ERR_OK;
+
+}
+
+static rtk_api_ret_t _rtk_port_phyEnableAll_get(rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pEnable)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicPortEnableAll(pEnable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_efid_set(rtk_port_t port, rtk_data_t efid)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    /* efid must be 0~7 */
+    if (efid > RTK_EFID_MAX)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicPortIsolationEfid(rtk_switch_port_L2P_get(port), efid))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_efid_get(rtk_port_t port, rtk_data_t *pEfid)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(NULL == pEfid)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicPortIsolationEfid(rtk_switch_port_L2P_get(port), pEfid))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_phyComboPortMedia_set(rtk_port_t port, rtk_port_media_t media)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 regData;
+    rtk_uint32 idx;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_IS_UTP(port);
+
+    /* Check Combo Port ID */
+    RTK_CHK_PORT_IS_COMBO(port);
+
+    if (media >= PORT_MEDIA_END)
+        return RT_ERR_INPUT;
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0249)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_getAsicReg(0x1300, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0000)) != RT_ERR_OK)
+        return retVal;
+
+    if(regData != 0x6367)
+        return RT_ERR_CHIP_NOT_SUPPORTED;
+
+    if(media == PORT_MEDIA_FIBER)
+    {
+        /* software init */
+        if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_CHIP_RESET, RTL8367C_DW8051_RST_OFFSET, 1)) != RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_MISCELLANEOUS_CONFIGURE0, RTL8367C_DW8051_EN_OFFSET, 1)) != RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_DW8051_RDY, RTL8367C_ACS_IROM_ENABLE_OFFSET, 1)) != RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_DW8051_RDY, RTL8367C_IROM_MSB_OFFSET, 0)) != RT_ERR_OK)
+            return retVal;
+
+        for(idx = 0; idx < FIBER_INIT_SIZE; idx++)
+        {
+            if ((retVal = rtl8367c_setAsicReg(0xE000 + idx, (rtk_uint32)Fiber[idx])) != RT_ERR_OK)
+                return retVal;
+        }
+
+        if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_DW8051_RDY, RTL8367C_IROM_MSB_OFFSET, 0)) != RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_DW8051_RDY, RTL8367C_ACS_IROM_ENABLE_OFFSET, 0)) != RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_CHIP_RESET, RTL8367C_DW8051_RST_OFFSET, 0)) != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_UTP_FIB_DET, RTL8367C_UTP_FIRST_OFFSET, 1))!=RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_DW8051_RDY, RTL8367C_DW8051_READY_OFFSET, 0)) != RT_ERR_OK)
+            return retVal;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_phyComboPortMedia_get(rtk_port_t port, rtk_port_media_t *pMedia)
+{
+    rtk_api_ret_t   retVal;
+    rtk_uint32      regData;
+    rtk_uint32      data;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_IS_UTP(port);
+
+    /* Check Combo Port ID */
+    RTK_CHK_PORT_IS_COMBO(port);
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0249)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_getAsicReg(0x1300, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0000)) != RT_ERR_OK)
+        return retVal;
+
+    if(regData != 0x6367)
+    {
+        *pMedia = PORT_MEDIA_COPPER;
+    }
+    else
+    {
+        if ((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_UTP_FIB_DET, RTL8367C_UTP_FIRST_OFFSET, &data))!=RT_ERR_OK)
+                return retVal;
+
+        if(data == 1)
+            *pMedia = PORT_MEDIA_COPPER;
+        else
+            *pMedia = PORT_MEDIA_FIBER;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_rtctEnable_set(rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Mask Valid */
+    RTK_CHK_PORTMASK_VALID_ONLY_UTP(pPortmask);
+
+    if ((retVal = rtl8367c_setAsicPortRTCTEnable(pPortmask->bits[0]))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_rtctDisable_set(rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Mask Valid */
+    RTK_CHK_PORTMASK_VALID_ONLY_UTP(pPortmask);
+
+    if ((retVal = rtl8367c_setAsicPortRTCTDisable(pPortmask->bits[0]))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_rtctResult_get(rtk_port_t port, rtk_rtctResult_t *pRtctResult)
+{
+    rtk_api_ret_t               retVal;
+    rtl8367c_port_rtct_result_t result;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_IS_UTP(port);
+
+    memset(pRtctResult, 0x00, sizeof(rtk_rtctResult_t));
+    if ((retVal = rtl8367c_getAsicPortRTCTResult(port, &result))!=RT_ERR_OK)
+        return retVal;
+
+    pRtctResult->result.ge_result.channelALen = result.channelALen;
+    pRtctResult->result.ge_result.channelBLen = result.channelBLen;
+    pRtctResult->result.ge_result.channelCLen = result.channelCLen;
+    pRtctResult->result.ge_result.channelDLen = result.channelDLen;
+
+    pRtctResult->result.ge_result.channelALinedriver = result.channelALinedriver;
+    pRtctResult->result.ge_result.channelBLinedriver = result.channelBLinedriver;
+    pRtctResult->result.ge_result.channelCLinedriver = result.channelCLinedriver;
+    pRtctResult->result.ge_result.channelDLinedriver = result.channelDLinedriver;
+
+    pRtctResult->result.ge_result.channelAMismatch = result.channelAMismatch;
+    pRtctResult->result.ge_result.channelBMismatch = result.channelBMismatch;
+    pRtctResult->result.ge_result.channelCMismatch = result.channelCMismatch;
+    pRtctResult->result.ge_result.channelDMismatch = result.channelDMismatch;
+
+    pRtctResult->result.ge_result.channelAOpen = result.channelAOpen;
+    pRtctResult->result.ge_result.channelBOpen = result.channelBOpen;
+    pRtctResult->result.ge_result.channelCOpen = result.channelCOpen;
+    pRtctResult->result.ge_result.channelDOpen = result.channelDOpen;
+
+    pRtctResult->result.ge_result.channelAShort = result.channelAShort;
+    pRtctResult->result.ge_result.channelBShort = result.channelBShort;
+    pRtctResult->result.ge_result.channelCShort = result.channelCShort;
+    pRtctResult->result.ge_result.channelDShort = result.channelDShort;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_port_sds_reset(rtk_port_t port)
+{
+    rtk_uint32 ext_id;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    if(rtk_switch_isSgmiiPort(port) != RT_ERR_OK)
+        return RT_ERR_PORT_ID;
+
+    ext_id = port - 15;
+    return rtl8367c_sdsReset(ext_id);
+}
+
+static rtk_api_ret_t _rtk_port_sgmiiLinkStatus_get(rtk_port_t port, rtk_data_t *pSignalDetect, rtk_data_t *pSync, rtk_port_linkStatus_t *pLink)
+{
+    rtk_uint32 ext_id;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    if(rtk_switch_isSgmiiPort(port) != RT_ERR_OK)
+        return RT_ERR_PORT_ID;
+
+    if(NULL == pSignalDetect)
+        return RT_ERR_NULL_POINTER;
+
+    if(NULL == pSync)
+        return RT_ERR_NULL_POINTER;
+
+    if(NULL == pLink)
+        return RT_ERR_NULL_POINTER;
+
+    ext_id = port - 15;
+    return rtl8367c_getSdsLinkStatus(ext_id, (rtk_uint32 *)pSignalDetect, (rtk_uint32 *)pSync, (rtk_uint32 *)pLink);
+}
+
+static rtk_api_ret_t _rtk_port_sgmiiNway_set(rtk_port_t port, rtk_enable_t state)
+{
+    rtk_uint32 ext_id;
+
+     /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    if(rtk_switch_isSgmiiPort(port) != RT_ERR_OK)
+        return RT_ERR_PORT_ID;
+
+    if(state >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    ext_id = port - 15;
+    return rtl8367c_setSgmiiNway(ext_id, (rtk_uint32)state);
+}
+
+static rtk_api_ret_t _rtk_port_sgmiiNway_get(rtk_port_t port, rtk_enable_t *pState)
+{
+    rtk_uint32 ext_id;
+
+     /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    if(rtk_switch_isSgmiiPort(port) != RT_ERR_OK)
+        return RT_ERR_PORT_ID;
+
+    if(NULL == pState)
+        return RT_ERR_NULL_POINTER;
+
+    ext_id = port - 15;
+    return rtl8367c_getSgmiiNway(ext_id, (rtk_uint32 *)pState);
+}
+
+
+
+/* Function Name:
+ *      rtk_port_phyAutoNegoAbility_set
+ * Description:
+ *      Set ethernet PHY auto-negotiation desired ability.
+ * Input:
+ *      port        - port id.
+ *      pAbility    - Ability structure
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_PHY_REG_ID       - Invalid PHY address
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ *      RT_ERR_BUSYWAIT_TIMEOUT - PHY access busy
+ * Note:
+ *      If Full_1000 bit is set to 1, the AutoNegotiation will be automatic set to 1. While both AutoNegotiation and Full_1000 are set to 0, the PHY speed and duplex selection will
+ *      be set as following 100F > 100H > 10F > 10H priority sequence.
+ */
+rtk_api_ret_t rtk_port_phyAutoNegoAbility_set(rtk_port_t port, rtk_port_phy_ability_t *pAbility)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_phyAutoNegoAbility_set(port, pAbility);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_phyAutoNegoAbility_get
+ * Description:
+ *      Get PHY ability through PHY registers.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pAbility - Ability structure
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_PHY_REG_ID       - Invalid PHY address
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ *      RT_ERR_BUSYWAIT_TIMEOUT - PHY access busy
+ * Note:
+ *      Get the capablity of specified PHY.
+ */
+rtk_api_ret_t rtk_port_phyAutoNegoAbility_get(rtk_port_t port, rtk_port_phy_ability_t *pAbility)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_phyAutoNegoAbility_get(port, pAbility);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_phyForceModeAbility_set
+ * Description:
+ *      Set the port speed/duplex mode/pause/asy_pause in the PHY force mode.
+ * Input:
+ *      port        - port id.
+ *      pAbility    - Ability structure
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_PHY_REG_ID       - Invalid PHY address
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ *      RT_ERR_BUSYWAIT_TIMEOUT - PHY access busy
+ * Note:
+ *      While both AutoNegotiation and Full_1000 are set to 0, the PHY speed and duplex selection will
+ *      be set as following 100F > 100H > 10F > 10H priority sequence.
+ *      This API can be used to configure combo port in fiber mode.
+ *      The possible parameters in fiber mode are Full_1000 and Full 100.
+ *      All the other fields in rtk_port_phy_ability_t will be ignored in fiber port.
+ */
+rtk_api_ret_t rtk_port_phyForceModeAbility_set(rtk_port_t port, rtk_port_phy_ability_t *pAbility)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_phyForceModeAbility_set(port, pAbility);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_phyForceModeAbility_get
+ * Description:
+ *      Get PHY ability through PHY registers.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pAbility - Ability structure
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_PHY_REG_ID       - Invalid PHY address
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ *      RT_ERR_BUSYWAIT_TIMEOUT - PHY access busy
+ * Note:
+ *      Get the capablity of specified PHY.
+ */
+rtk_api_ret_t rtk_port_phyForceModeAbility_get(rtk_port_t port, rtk_port_phy_ability_t *pAbility)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_phyForceModeAbility_get(port, pAbility);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_phyStatus_get
+ * Description:
+ *      Get ethernet PHY linking status
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      linkStatus  - PHY link status
+ *      speed       - PHY link speed
+ *      duplex      - PHY duplex mode
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_PHY_REG_ID       - Invalid PHY address
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ *      RT_ERR_BUSYWAIT_TIMEOUT - PHY access busy
+ * Note:
+ *      API will return auto negotiation status of phy.
+ */
+rtk_api_ret_t rtk_port_phyStatus_get(rtk_port_t port, rtk_port_linkStatus_t *pLinkStatus, rtk_port_speed_t *pSpeed, rtk_port_duplex_t *pDuplex)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_phyStatus_get(port, pLinkStatus, pSpeed, pDuplex);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_macForceLink_set
+ * Description:
+ *      Set port force linking configuration.
+ * Input:
+ *      port            - port id.
+ *      pPortability    - port ability configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      This API can set Port/MAC force mode properties.
+ */
+rtk_api_ret_t rtk_port_macForceLink_set(rtk_port_t port, rtk_port_mac_ability_t *pPortability)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_macForceLink_set(port, pPortability);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_macForceLink_get
+ * Description:
+ *      Get port force linking configuration.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pPortability - port ability configuration
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This API can get Port/MAC force mode properties.
+ */
+rtk_api_ret_t rtk_port_macForceLink_get(rtk_port_t port, rtk_port_mac_ability_t *pPortability)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_macForceLink_get(port, pPortability);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_macForceLinkExt_set
+ * Description:
+ *      Set external interface force linking configuration.
+ * Input:
+ *      port            - external port ID
+ *      mode            - external interface mode
+ *      pPortability    - port ability configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This API can set external interface force mode properties.
+ *      The external interface can be set to:
+ *      - MODE_EXT_DISABLE,
+ *      - MODE_EXT_RGMII,
+ *      - MODE_EXT_MII_MAC,
+ *      - MODE_EXT_MII_PHY,
+ *      - MODE_EXT_TMII_MAC,
+ *      - MODE_EXT_TMII_PHY,
+ *      - MODE_EXT_GMII,
+ *      - MODE_EXT_RMII_MAC,
+ *      - MODE_EXT_RMII_PHY,
+ *      - MODE_EXT_SGMII,
+ *      - MODE_EXT_HSGMII,
+ *      - MODE_EXT_1000X_100FX,
+ *      - MODE_EXT_1000X,
+ *      - MODE_EXT_100FX,
+ */
+rtk_api_ret_t rtk_port_macForceLinkExt_set(rtk_port_t port, rtk_mode_ext_t mode, rtk_port_mac_ability_t *pPortability)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_macForceLinkExt_set(port, mode, pPortability);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_macForceLinkExt_get
+ * Description:
+ *      Set external interface force linking configuration.
+ * Input:
+ *      port            - external port ID
+ * Output:
+ *      pMode           - external interface mode
+ *      pPortability    - port ability configuration
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This API can get external interface force mode properties.
+ */
+rtk_api_ret_t rtk_port_macForceLinkExt_get(rtk_port_t port, rtk_mode_ext_t *pMode, rtk_port_mac_ability_t *pPortability)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_macForceLinkExt_get(port, pMode, pPortability);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_macStatus_get
+ * Description:
+ *      Get port link status.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pPortstatus - port ability configuration
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      This API can get Port/PHY properties.
+ */
+rtk_api_ret_t rtk_port_macStatus_get(rtk_port_t port, rtk_port_mac_ability_t *pPortstatus)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_macStatus_get(port, pPortstatus);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_macLocalLoopbackEnable_set
+ * Description:
+ *      Set Port Local Loopback. (Redirect TX to RX.)
+ * Input:
+ *      port    - Port id.
+ *      enable  - Loopback state, 0:disable, 1:enable
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      This API can enable/disable Local loopback in MAC.
+ *      For UTP port, This API will also enable the digital
+ *      loopback bit in PHY register for sync of speed between
+ *      PHY and MAC. For EXT port, users need to force the
+ *      link state by themself.
+ */
+rtk_api_ret_t rtk_port_macLocalLoopbackEnable_set(rtk_port_t port, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_macLocalLoopbackEnable_set(port, enable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_macLocalLoopbackEnable_get
+ * Description:
+ *      Get Port Local Loopback. (Redirect TX to RX.)
+ * Input:
+ *      port    - Port id.
+ * Output:
+ *      pEnable  - Loopback state, 0:disable, 1:enable
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      None.
+ */
+rtk_api_ret_t rtk_port_macLocalLoopbackEnable_get(rtk_port_t port, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_macLocalLoopbackEnable_get(port, pEnable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_phyReg_set
+ * Description:
+ *      Set PHY register data of the specific port.
+ * Input:
+ *      port    - port id.
+ *      reg     - Register id
+ *      regData - Register data
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_PHY_REG_ID       - Invalid PHY address
+ *      RT_ERR_BUSYWAIT_TIMEOUT - PHY access busy
+ * Note:
+ *      This API can set PHY register data of the specific port.
+ */
+rtk_api_ret_t rtk_port_phyReg_set(rtk_port_t port, rtk_port_phy_reg_t reg, rtk_port_phy_data_t regData)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_phyReg_set(port, reg, regData);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_phyReg_get
+ * Description:
+ *      Get PHY register data of the specific port.
+ * Input:
+ *      port    - Port id.
+ *      reg     - Register id
+ * Output:
+ *      pData   - Register data
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_PHY_REG_ID       - Invalid PHY address
+ *      RT_ERR_BUSYWAIT_TIMEOUT - PHY access busy
+ * Note:
+ *      This API can get PHY register data of the specific port.
+ */
+rtk_api_ret_t rtk_port_phyReg_get(rtk_port_t port, rtk_port_phy_reg_t reg, rtk_port_phy_data_t *pData)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_phyReg_get(port, reg, pData);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_backpressureEnable_set
+ * Description:
+ *      Set the half duplex backpressure enable status of the specific port.
+ * Input:
+ *      port    - port id.
+ *      enable  - Back pressure status.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_ENABLE       - Invalid enable input.
+ * Note:
+ *      This API can set the half duplex backpressure enable status of the specific port.
+ *      The half duplex backpressure enable status of the port is as following:
+ *      - DISABLE(Defer)
+ *      - ENABLE (Backpressure)
+ */
+rtk_api_ret_t rtk_port_backpressureEnable_set(rtk_port_t port, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_backpressureEnable_set(port, enable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_backpressureEnable_get
+ * Description:
+ *      Get the half duplex backpressure enable status of the specific port.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pEnable - Back pressure status.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      This API can get the half duplex backpressure enable status of the specific port.
+ *      The half duplex backpressure enable status of the port is as following:
+ *      - DISABLE(Defer)
+ *      - ENABLE (Backpressure)
+ */
+rtk_api_ret_t rtk_port_backpressureEnable_get(rtk_port_t port, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_backpressureEnable_get(port, pEnable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_adminEnable_set
+ * Description:
+ *      Set port admin configuration of the specific port.
+ * Input:
+ *      port    - port id.
+ *      enable  - Back pressure status.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_ENABLE       - Invalid enable input.
+ * Note:
+ *      This API can set port admin configuration of the specific port.
+ *      The port admin configuration of the port is as following:
+ *      - DISABLE
+ *      - ENABLE
+ */
+rtk_api_ret_t rtk_port_adminEnable_set(rtk_port_t port, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_adminEnable_set(port, enable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_adminEnable_get
+ * Description:
+ *      Get port admin configurationof the specific port.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pEnable - Back pressure status.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      This API can get port admin configuration of the specific port.
+ *      The port admin configuration of the port is as following:
+ *      - DISABLE
+ *      - ENABLE
+ */
+rtk_api_ret_t rtk_port_adminEnable_get(rtk_port_t port, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_adminEnable_get(port, pEnable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_isolation_set
+ * Description:
+ *      Set permitted port isolation portmask
+ * Input:
+ *      port         - port id.
+ *      pPortmask    - Permit port mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_PORT_MASK    - Invalid portmask.
+ * Note:
+ *      This API set the port mask that a port can trasmit packet to of each port
+ *      A port can only transmit packet to ports included in permitted portmask
+ */
+rtk_api_ret_t rtk_port_isolation_set(rtk_port_t port, rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_isolation_set(port, pPortmask);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_isolation_get
+ * Description:
+ *      Get permitted port isolation portmask
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pPortmask - Permit port mask
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      This API get the port mask that a port can trasmit packet to of each port
+ *      A port can only transmit packet to ports included in permitted portmask
+ */
+rtk_api_ret_t rtk_port_isolation_get(rtk_port_t port, rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_isolation_get(port, pPortmask);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_rgmiiDelayExt_set
+ * Description:
+ *      Set RGMII interface delay value for TX and RX.
+ * Input:
+ *      txDelay - TX delay value, 1 for delay 2ns and 0 for no-delay
+ *      rxDelay - RX delay value, 0~7 for delay setup.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This API can set external interface 2 RGMII delay.
+ *      In TX delay, there are 2 selection: no-delay and 2ns delay.
+ *      In RX dekay, there are 8 steps for delay tunning. 0 for no-delay, and 7 for maximum delay.
+ *      Note. This API should be called before rtk_port_macForceLinkExt_set().
+ */
+rtk_api_ret_t rtk_port_rgmiiDelayExt_set(rtk_port_t port, rtk_data_t txDelay, rtk_data_t rxDelay)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_rgmiiDelayExt_set(port, txDelay, rxDelay);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_rgmiiDelayExt_get
+ * Description:
+ *      Get RGMII interface delay value for TX and RX.
+ * Input:
+ *      None
+ * Output:
+ *      pTxDelay - TX delay value
+ *      pRxDelay - RX delay value
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This API can set external interface 2 RGMII delay.
+ *      In TX delay, there are 2 selection: no-delay and 2ns delay.
+ *      In RX dekay, there are 8 steps for delay tunning. 0 for n0-delay, and 7 for maximum delay.
+ */
+rtk_api_ret_t rtk_port_rgmiiDelayExt_get(rtk_port_t port, rtk_data_t *pTxDelay, rtk_data_t *pRxDelay)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_rgmiiDelayExt_get(port, pTxDelay, pRxDelay);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_phyEnableAll_set
+ * Description:
+ *      Set all PHY enable status.
+ * Input:
+ *      enable - PHY Enable State.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_ENABLE       - Invalid enable input.
+ * Note:
+ *      This API can set all PHY status.
+ *      The configuration of all PHY is as following:
+ *      - DISABLE
+ *      - ENABLE
+ */
+rtk_api_ret_t rtk_port_phyEnableAll_set(rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_phyEnableAll_set(enable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_phyEnableAll_get
+ * Description:
+ *      Get all PHY enable status.
+ * Input:
+ *      None
+ * Output:
+ *      pEnable - PHY Enable State.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      This API can set all PHY status.
+ *      The configuration of all PHY is as following:
+ *      - DISABLE
+ *      - ENABLE
+ */
+rtk_api_ret_t rtk_port_phyEnableAll_get(rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_phyEnableAll_get(pEnable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_efid_set
+ * Description:
+ *      Set port-based enhanced filtering database
+ * Input:
+ *      port - Port id.
+ *      efid - Specified enhanced filtering database.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_L2_FID - Invalid fid.
+ *      RT_ERR_INPUT - Invalid input parameter.
+ *      RT_ERR_PORT_ID - Invalid port ID.
+ * Note:
+ *      The API can set port-based enhanced filtering database.
+ */
+rtk_api_ret_t rtk_port_efid_set(rtk_port_t port, rtk_data_t efid)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_efid_set(port, efid);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_efid_get
+ * Description:
+ *      Get port-based enhanced filtering database
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pEfid - Specified enhanced filtering database.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT - Invalid input parameters.
+ *      RT_ERR_PORT_ID - Invalid port ID.
+ * Note:
+ *      The API can get port-based enhanced filtering database status.
+ */
+rtk_api_ret_t rtk_port_efid_get(rtk_port_t port, rtk_data_t *pEfid)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_efid_get(port, pEfid);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_phyComboPortMedia_set
+ * Description:
+ *      Set Combo port media type
+ * Input:
+ *      port    - Port id.
+ *      media   - Media (COPPER or FIBER)
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ *      RT_ERR_PORT_ID          - Invalid port ID.
+ * Note:
+ *      The API can Set Combo port media type.
+ */
+rtk_api_ret_t rtk_port_phyComboPortMedia_set(rtk_port_t port, rtk_port_media_t media)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_phyComboPortMedia_set(port, media);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_phyComboPortMedia_get
+ * Description:
+ *      Get Combo port media type
+ * Input:
+ *      port    - Port id.
+ * Output:
+ *      pMedia  - Media (COPPER or FIBER)
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ *      RT_ERR_PORT_ID          - Invalid port ID.
+ * Note:
+ *      The API can Set Combo port media type.
+ */
+rtk_api_ret_t rtk_port_phyComboPortMedia_get(rtk_port_t port, rtk_port_media_t *pMedia)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_phyComboPortMedia_get(port, pMedia);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_rtctEnable_set
+ * Description:
+ *      Enable RTCT test
+ * Input:
+ *      pPortmask    - Port mask of RTCT enabled port
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_MASK        - Invalid port mask.
+ * Note:
+ *      The API can enable RTCT Test
+ */
+rtk_api_ret_t rtk_port_rtctEnable_set(rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_rtctEnable_set(pPortmask);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_rtctDisable_set
+ * Description:
+ *      Disable RTCT test
+ * Input:
+ *      pPortmask    - Port mask of RTCT disabled port
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_MASK        - Invalid port mask.
+ * Note:
+ *      The API can disable RTCT Test
+ */
+rtk_api_ret_t rtk_port_rtctDisable_set(rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_rtctDisable_set(pPortmask);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+
+/* Function Name:
+ *      rtk_port_rtctResult_get
+ * Description:
+ *      Get the result of RTCT test
+ * Input:
+ *      port        - Port ID
+ * Output:
+ *      pRtctResult - The result of RTCT result
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port ID.
+ *      RT_ERR_PHY_RTCT_NOT_FINISH  - Testing does not finish.
+ * Note:
+ *      The API can get RTCT test result.
+ *      RTCT test may takes 4.8 seconds to finish its test at most.
+ *      Thus, if this API return RT_ERR_PHY_RTCT_NOT_FINISH or
+ *      other error code, the result can not be referenced and
+ *      user should call this API again until this API returns
+ *      a RT_ERR_OK.
+ *      The result is stored at pRtctResult->ge_result
+ *      pRtctResult->linkType is unused.
+ *      The unit of channel length is 2.5cm. Ex. 300 means 300 * 2.5 = 750cm = 7.5M
+ */
+rtk_api_ret_t rtk_port_rtctResult_get(rtk_port_t port, rtk_rtctResult_t *pRtctResult)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_rtctResult_get(port, pRtctResult);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_sds_reset
+ * Description:
+ *      Reset Serdes
+ * Input:
+ *      port        - Port ID
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port ID.
+ * Note:
+ *      The API can reset Serdes
+ */
+rtk_api_ret_t rtk_port_sds_reset(rtk_port_t port)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_sds_reset(port);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_sgmiiLinkStatus_get
+ * Description:
+ *      Get SGMII status
+ * Input:
+ *      port        - Port ID
+ * Output:
+ *      pSignalDetect   - Signal detect
+ *      pSync           - Sync
+ *      pLink           - Link
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port ID.
+ * Note:
+ *      The API can reset Serdes
+ */
+rtk_api_ret_t rtk_port_sgmiiLinkStatus_get(rtk_port_t port, rtk_data_t *pSignalDetect, rtk_data_t *pSync, rtk_port_linkStatus_t *pLink)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_sgmiiLinkStatus_get(port, pSignalDetect, pSync, pLink);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_sgmiiNway_set
+ * Description:
+ *      Configure SGMII/HSGMII port Nway state
+ * Input:
+ *      port        - Port ID
+ *      state       - Nway state
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port ID.
+ * Note:
+ *      The API configure SGMII/HSGMII port Nway state
+ */
+rtk_api_ret_t rtk_port_sgmiiNway_set(rtk_port_t port, rtk_enable_t state)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_sgmiiNway_set(port, state);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_port_sgmiiNway_get
+ * Description:
+ *      Get SGMII/HSGMII port Nway state
+ * Input:
+ *      port        - Port ID
+ * Output:
+ *      pState      - Nway state
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port ID.
+ * Note:
+ *      The API can get SGMII/HSGMII port Nway state
+ */
+rtk_api_ret_t rtk_port_sgmiiNway_get(rtk_port_t port, rtk_enable_t *pState)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_port_sgmiiNway_get(port, pState);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/port.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/port.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/port.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/port.h	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,962 @@
+ /*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * Purpose : RTL8367/RTL8367C switch high-level API
+ *
+ * Feature : The file includes port module high-layer API defination
+ *
+ */
+
+#ifndef __RTK_API_PORT_H__
+#define __RTK_API_PORT_H__
+
+/*
+ * Data Type Declaration
+ */
+
+#define PHY_CONTROL_REG                             0
+#define PHY_STATUS_REG                              1
+#define PHY_AN_ADVERTISEMENT_REG                    4
+#define PHY_AN_LINKPARTNER_REG                      5
+#define PHY_1000_BASET_CONTROL_REG                  9
+#define PHY_1000_BASET_STATUS_REG                   10
+#define PHY_RESOLVED_REG                            26
+
+#define RTK_EFID_MAX                                0x7
+
+#define RTK_FIBER_FORCE_1000M                       3
+#define RTK_FIBER_FORCE_100M                        5
+#define RTK_FIBER_FORCE_100M1000M                   7
+
+#define RTK_INDRECT_ACCESS_CRTL                     0x1f00
+#define RTK_INDRECT_ACCESS_STATUS                   0x1f01
+#define RTK_INDRECT_ACCESS_ADDRESS                  0x1f02
+#define RTK_INDRECT_ACCESS_WRITE_DATA               0x1f03
+#define RTK_INDRECT_ACCESS_READ_DATA                0x1f04
+#define RTK_INDRECT_ACCESS_DELAY                    0x1f80
+#define RTK_INDRECT_ACCESS_BURST                    0x1f81
+#define RTK_RW_MASK                                 0x2
+#define RTK_CMD_MASK                                0x1
+#define RTK_PHY_BUSY_OFFSET                         2
+
+
+typedef enum rtk_mode_ext_e
+{
+    MODE_EXT_DISABLE = 0,
+    MODE_EXT_RGMII,
+    MODE_EXT_MII_MAC,
+    MODE_EXT_MII_PHY,
+    MODE_EXT_TMII_MAC,
+    MODE_EXT_TMII_PHY,
+    MODE_EXT_GMII,
+    MODE_EXT_RMII_MAC,
+    MODE_EXT_RMII_PHY,
+    MODE_EXT_SGMII,
+    MODE_EXT_HSGMII,
+    MODE_EXT_1000X_100FX,
+    MODE_EXT_1000X,
+    MODE_EXT_100FX,
+    MODE_EXT_RGMII_2,
+    MODE_EXT_MII_MAC_2,
+    MODE_EXT_MII_PHY_2,
+    MODE_EXT_TMII_MAC_2,
+    MODE_EXT_TMII_PHY_2,
+    MODE_EXT_RMII_MAC_2,
+    MODE_EXT_RMII_PHY_2,
+    MODE_EXT_END
+} rtk_mode_ext_t;
+
+typedef enum rtk_port_duplex_e
+{
+    PORT_HALF_DUPLEX = 0,
+    PORT_FULL_DUPLEX,
+    PORT_DUPLEX_END
+} rtk_port_duplex_t;
+
+typedef enum rtk_port_linkStatus_e
+{
+    PORT_LINKDOWN = 0,
+    PORT_LINKUP,
+    PORT_LINKSTATUS_END
+} rtk_port_linkStatus_t;
+
+typedef struct  rtk_port_mac_ability_s
+{
+    rtk_uint32 forcemode;
+    rtk_uint32 speed;
+    rtk_uint32 duplex;
+    rtk_uint32 link;
+    rtk_uint32 nway;
+    rtk_uint32 txpause;
+    rtk_uint32 rxpause;
+}rtk_port_mac_ability_t;
+
+typedef struct rtk_port_phy_ability_s
+{
+    rtk_uint32    AutoNegotiation;  /*PHY register 0.12 setting for auto-negotiation process*/
+    rtk_uint32    Half_10;          /*PHY register 4.5 setting for 10BASE-TX half duplex capable*/
+    rtk_uint32    Full_10;          /*PHY register 4.6 setting for 10BASE-TX full duplex capable*/
+    rtk_uint32    Half_100;         /*PHY register 4.7 setting for 100BASE-TX half duplex capable*/
+    rtk_uint32    Full_100;         /*PHY register 4.8 setting for 100BASE-TX full duplex capable*/
+    rtk_uint32    Full_1000;        /*PHY register 9.9 setting for 1000BASE-T full duplex capable*/
+    rtk_uint32    FC;               /*PHY register 4.10 setting for flow control capability*/
+    rtk_uint32    AsyFC;            /*PHY register 4.11 setting for  asymmetric flow control capability*/
+} rtk_port_phy_ability_t;
+
+typedef rtk_uint32  rtk_port_phy_data_t;     /* phy page  */
+
+typedef enum rtk_port_phy_mdix_mode_e
+{
+    PHY_AUTO_CROSSOVER_MODE= 0,
+    PHY_FORCE_MDI_MODE,
+    PHY_FORCE_MDIX_MODE,
+    PHY_FORCE_MODE_END
+} rtk_port_phy_mdix_mode_t;
+
+typedef enum rtk_port_phy_mdix_status_e
+{
+    PHY_STATUS_AUTO_MDI_MODE= 0,
+    PHY_STATUS_AUTO_MDIX_MODE,
+    PHY_STATUS_FORCE_MDI_MODE,
+    PHY_STATUS_FORCE_MDIX_MODE,
+    PHY_STATUS_FORCE_MODE_END
+} rtk_port_phy_mdix_status_t;
+
+typedef rtk_uint32  rtk_port_phy_page_t;     /* phy page  */
+
+typedef enum rtk_port_phy_reg_e
+{
+    PHY_REG_CONTROL             = 0,
+    PHY_REG_STATUS,
+    PHY_REG_IDENTIFIER_1,
+    PHY_REG_IDENTIFIER_2,
+    PHY_REG_AN_ADVERTISEMENT,
+    PHY_REG_AN_LINKPARTNER,
+    PHY_REG_1000_BASET_CONTROL  = 9,
+    PHY_REG_1000_BASET_STATUS,
+    PHY_REG_END                 = 32
+} rtk_port_phy_reg_t;
+
+typedef enum rtk_port_phy_test_mode_e
+{
+    PHY_TEST_MODE_NORMAL= 0,
+    PHY_TEST_MODE_1,
+    PHY_TEST_MODE_2,
+    PHY_TEST_MODE_3,
+    PHY_TEST_MODE_4,
+    PHY_TEST_MODE_END
+} rtk_port_phy_test_mode_t;
+
+typedef enum rtk_port_speed_e
+{
+    PORT_SPEED_10M = 0,
+    PORT_SPEED_100M,
+    PORT_SPEED_1000M,
+    PORT_SPEED_500M,
+    PORT_SPEED_2500M,
+    PORT_SPEED_END
+} rtk_port_speed_t;
+
+typedef enum rtk_port_media_e
+{
+    PORT_MEDIA_COPPER = 0,
+    PORT_MEDIA_FIBER,
+    PORT_MEDIA_END
+}rtk_port_media_t;
+
+typedef struct rtk_rtctResult_s
+{
+    rtk_port_speed_t    linkType;
+    union
+    {
+        struct fe_result_s
+        {
+            rtk_uint32      isRxShort;
+            rtk_uint32      isTxShort;
+            rtk_uint32      isRxOpen;
+            rtk_uint32      isTxOpen;
+            rtk_uint32      isRxMismatch;
+            rtk_uint32      isTxMismatch;
+            rtk_uint32      isRxLinedriver;
+            rtk_uint32      isTxLinedriver;
+            rtk_uint32      rxLen;
+            rtk_uint32      txLen;
+        } fe_result;
+
+        struct ge_result_s
+        {
+            rtk_uint32      channelAShort;
+            rtk_uint32      channelBShort;
+            rtk_uint32      channelCShort;
+            rtk_uint32      channelDShort;
+
+            rtk_uint32      channelAOpen;
+            rtk_uint32      channelBOpen;
+            rtk_uint32      channelCOpen;
+            rtk_uint32      channelDOpen;
+
+            rtk_uint32      channelAMismatch;
+            rtk_uint32      channelBMismatch;
+            rtk_uint32      channelCMismatch;
+            rtk_uint32      channelDMismatch;
+
+            rtk_uint32      channelALinedriver;
+            rtk_uint32      channelBLinedriver;
+            rtk_uint32      channelCLinedriver;
+            rtk_uint32      channelDLinedriver;
+
+            rtk_uint32      channelALen;
+            rtk_uint32      channelBLen;
+            rtk_uint32      channelCLen;
+            rtk_uint32      channelDLen;
+        } ge_result;
+    }result;
+} rtk_rtctResult_t;
+
+/* Function Name:
+ *      rtk_port_phyAutoNegoAbility_set
+ * Description:
+ *      Set ethernet PHY auto-negotiation desired ability.
+ * Input:
+ *      port        - port id.
+ *      pAbility    - Ability structure
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_PHY_REG_ID       - Invalid PHY address
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ *      RT_ERR_BUSYWAIT_TIMEOUT - PHY access busy
+ * Note:
+ *      If Full_1000 bit is set to 1, the AutoNegotiation will be automatic set to 1. While both AutoNegotiation and Full_1000 are set to 0, the PHY speed and duplex selection will
+ *      be set as following 100F > 100H > 10F > 10H priority sequence.
+ */
+extern rtk_api_ret_t rtk_port_phyAutoNegoAbility_set(rtk_port_t port, rtk_port_phy_ability_t *pAbility);
+
+/* Function Name:
+ *      rtk_port_phyAutoNegoAbility_get
+ * Description:
+ *      Get PHY ability through PHY registers.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pAbility - Ability structure
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_PHY_REG_ID       - Invalid PHY address
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ *      RT_ERR_BUSYWAIT_TIMEOUT - PHY access busy
+ * Note:
+ *      Get the capablity of specified PHY.
+ */
+extern rtk_api_ret_t rtk_port_phyAutoNegoAbility_get(rtk_port_t port, rtk_port_phy_ability_t *pAbility);
+
+/* Function Name:
+ *      rtk_port_phyForceModeAbility_set
+ * Description:
+ *      Set the port speed/duplex mode/pause/asy_pause in the PHY force mode.
+ * Input:
+ *      port        - port id.
+ *      pAbility    - Ability structure
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_PHY_REG_ID       - Invalid PHY address
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ *      RT_ERR_BUSYWAIT_TIMEOUT - PHY access busy
+ * Note:
+ *      While both AutoNegotiation and Full_1000 are set to 0, the PHY speed and duplex selection will
+ *      be set as following 100F > 100H > 10F > 10H priority sequence.
+ */
+extern rtk_api_ret_t rtk_port_phyForceModeAbility_set(rtk_port_t port, rtk_port_phy_ability_t *pAbility);
+
+/* Function Name:
+ *      rtk_port_phyForceModeAbility_get
+ * Description:
+ *      Get PHY ability through PHY registers.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pAbility - Ability structure
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_PHY_REG_ID       - Invalid PHY address
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ *      RT_ERR_BUSYWAIT_TIMEOUT - PHY access busy
+ * Note:
+ *      Get the capablity of specified PHY.
+ */
+extern rtk_api_ret_t rtk_port_phyForceModeAbility_get(rtk_port_t port, rtk_port_phy_ability_t *pAbility);
+
+/* Function Name:
+ *      rtk_port_phyStatus_get
+ * Description:
+ *      Get ethernet PHY linking status
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      linkStatus  - PHY link status
+ *      speed       - PHY link speed
+ *      duplex      - PHY duplex mode
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_PHY_REG_ID       - Invalid PHY address
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ *      RT_ERR_BUSYWAIT_TIMEOUT - PHY access busy
+ * Note:
+ *      API will return auto negotiation status of phy.
+ */
+extern rtk_api_ret_t rtk_port_phyStatus_get(rtk_port_t port, rtk_port_linkStatus_t *pLinkStatus, rtk_port_speed_t *pSpeed, rtk_port_duplex_t *pDuplex);
+
+/* Function Name:
+ *      rtk_port_macForceLink_set
+ * Description:
+ *      Set port force linking configuration.
+ * Input:
+ *      port            - port id.
+ *      pPortability    - port ability configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      This API can set Port/MAC force mode properties.
+ */
+extern rtk_api_ret_t rtk_port_macForceLink_set(rtk_port_t port, rtk_port_mac_ability_t *pPortability);
+
+/* Function Name:
+ *      rtk_port_macForceLink_get
+ * Description:
+ *      Get port force linking configuration.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pPortability - port ability configuration
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This API can get Port/MAC force mode properties.
+ */
+extern rtk_api_ret_t rtk_port_macForceLink_get(rtk_port_t port, rtk_port_mac_ability_t *pPortability);
+
+/* Function Name:
+ *      rtk_port_macForceLinkExt_set
+ * Description:
+ *      Set external interface force linking configuration.
+ * Input:
+ *      port            - external port ID
+ *      mode            - external interface mode
+ *      pPortability    - port ability configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This API can set external interface force mode properties.
+ *      The external interface can be set to:
+ *      - MODE_EXT_DISABLE,
+ *      - MODE_EXT_RGMII,
+ *      - MODE_EXT_MII_MAC,
+ *      - MODE_EXT_MII_PHY,
+ *      - MODE_EXT_TMII_MAC,
+ *      - MODE_EXT_TMII_PHY,
+ *      - MODE_EXT_GMII,
+ *      - MODE_EXT_RMII_MAC,
+ *      - MODE_EXT_RMII_PHY,
+ *      - MODE_EXT_SGMII,
+ *      - MODE_EXT_HSGMII,
+ *      - MODE_EXT_1000X_100FX,
+ *      - MODE_EXT_1000X,
+ *      - MODE_EXT_100FX,
+ */
+extern rtk_api_ret_t rtk_port_macForceLinkExt_set(rtk_port_t port, rtk_mode_ext_t mode, rtk_port_mac_ability_t *pPortability);
+
+/* Function Name:
+ *      rtk_port_macForceLinkExt_get
+ * Description:
+ *      Set external interface force linking configuration.
+ * Input:
+ *      port            - external port ID
+ * Output:
+ *      pMode           - external interface mode
+ *      pPortability    - port ability configuration
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This API can get external interface force mode properties.
+ */
+extern rtk_api_ret_t rtk_port_macForceLinkExt_get(rtk_port_t port, rtk_mode_ext_t *pMode, rtk_port_mac_ability_t *pPortability);
+
+/* Function Name:
+ *      rtk_port_macStatus_get
+ * Description:
+ *      Get port link status.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pPortstatus - port ability configuration
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      This API can get Port/PHY properties.
+ */
+extern rtk_api_ret_t rtk_port_macStatus_get(rtk_port_t port, rtk_port_mac_ability_t *pPortstatus);
+
+/* Function Name:
+ *      rtk_port_macLocalLoopbackEnable_set
+ * Description:
+ *      Set Port Local Loopback. (Redirect TX to RX.)
+ * Input:
+ *      port    - Port id.
+ *      enable  - Loopback state, 0:disable, 1:enable
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      This API can enable/disable Local loopback in MAC.
+ *      For UTP port, This API will also enable the digital
+ *      loopback bit in PHY register for sync of speed between
+ *      PHY and MAC. For EXT port, users need to force the
+ *      link state by themself.
+ */
+extern rtk_api_ret_t rtk_port_macLocalLoopbackEnable_set(rtk_port_t port, rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_port_macLocalLoopbackEnable_get
+ * Description:
+ *      Get Port Local Loopback. (Redirect TX to RX.)
+ * Input:
+ *      port    - Port id.
+ * Output:
+ *      pEnable  - Loopback state, 0:disable, 1:enable
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      None.
+ */
+extern rtk_api_ret_t rtk_port_macLocalLoopbackEnable_get(rtk_port_t port, rtk_enable_t *pEnable);
+
+/* Function Name:
+ *      rtk_port_phyReg_set
+ * Description:
+ *      Set PHY register data of the specific port.
+ * Input:
+ *      port    - port id.
+ *      reg     - Register id
+ *      regData - Register data
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_PHY_REG_ID       - Invalid PHY address
+ *      RT_ERR_BUSYWAIT_TIMEOUT - PHY access busy
+ * Note:
+ *      This API can set PHY register data of the specific port.
+ */
+extern rtk_api_ret_t rtk_port_phyReg_set(rtk_port_t port, rtk_port_phy_reg_t reg, rtk_port_phy_data_t value);
+
+/* Function Name:
+ *      rtk_port_phyReg_get
+ * Description:
+ *      Get PHY register data of the specific port.
+ * Input:
+ *      port    - Port id.
+ *      reg     - Register id
+ * Output:
+ *      pData   - Register data
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_PHY_REG_ID       - Invalid PHY address
+ *      RT_ERR_BUSYWAIT_TIMEOUT - PHY access busy
+ * Note:
+ *      This API can get PHY register data of the specific port.
+ */
+extern rtk_api_ret_t rtk_port_phyReg_get(rtk_port_t port, rtk_port_phy_reg_t reg, rtk_port_phy_data_t *pData);
+
+/* Function Name:
+ *      rtk_port_backpressureEnable_set
+ * Description:
+ *      Set the half duplex backpressure enable status of the specific port.
+ * Input:
+ *      port    - port id.
+ *      enable  - Back pressure status.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_ENABLE       - Invalid enable input.
+ * Note:
+ *      This API can set the half duplex backpressure enable status of the specific port.
+ *      The half duplex backpressure enable status of the port is as following:
+ *      - DISABLE
+ *      - ENABLE
+ */
+extern rtk_api_ret_t rtk_port_backpressureEnable_set(rtk_port_t port, rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_port_backpressureEnable_get
+ * Description:
+ *      Get the half duplex backpressure enable status of the specific port.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pEnable - Back pressure status.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      This API can get the half duplex backpressure enable status of the specific port.
+ *      The half duplex backpressure enable status of the port is as following:
+ *      - DISABLE
+ *      - ENABLE
+ */
+extern rtk_api_ret_t rtk_port_backpressureEnable_get(rtk_port_t port, rtk_enable_t *pEnable);
+
+/* Function Name:
+ *      rtk_port_adminEnable_set
+ * Description:
+ *      Set port admin configuration of the specific port.
+ * Input:
+ *      port    - port id.
+ *      enable  - Back pressure status.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_ENABLE       - Invalid enable input.
+ * Note:
+ *      This API can set port admin configuration of the specific port.
+ *      The port admin configuration of the port is as following:
+ *      - DISABLE
+ *      - ENABLE
+ */
+extern rtk_api_ret_t rtk_port_adminEnable_set(rtk_port_t port, rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_port_adminEnable_get
+ * Description:
+ *      Get port admin configurationof the specific port.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pEnable - Back pressure status.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      This API can get port admin configuration of the specific port.
+ *      The port admin configuration of the port is as following:
+ *      - DISABLE
+ *      - ENABLE
+ */
+extern rtk_api_ret_t rtk_port_adminEnable_get(rtk_port_t port, rtk_enable_t *pEnable);
+
+/* Function Name:
+ *      rtk_port_isolation_set
+ * Description:
+ *      Set permitted port isolation portmask
+ * Input:
+ *      port         - port id.
+ *      pPortmask    - Permit port mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_PORT_MASK    - Invalid portmask.
+ * Note:
+ *      This API set the port mask that a port can trasmit packet to of each port
+ *      A port can only transmit packet to ports included in permitted portmask
+ */
+extern rtk_api_ret_t rtk_port_isolation_set(rtk_port_t port, rtk_portmask_t *pPortmask);
+
+/* Function Name:
+ *      rtk_port_isolation_get
+ * Description:
+ *      Get permitted port isolation portmask
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pPortmask - Permit port mask
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      This API get the port mask that a port can trasmit packet to of each port
+ *      A port can only transmit packet to ports included in permitted portmask
+ */
+extern rtk_api_ret_t rtk_port_isolation_get(rtk_port_t port, rtk_portmask_t *pPortmask);
+
+/* Function Name:
+ *      rtk_port_rgmiiDelayExt_set
+ * Description:
+ *      Set RGMII interface delay value for TX and RX.
+ * Input:
+ *      txDelay - TX delay value, 1 for delay 2ns and 0 for no-delay
+ *      rxDelay - RX delay value, 0~7 for delay setup.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This API can set external interface 2 RGMII delay.
+ *      In TX delay, there are 2 selection: no-delay and 2ns delay.
+ *      In RX dekay, there are 8 steps for delay tunning. 0 for no-delay, and 7 for maximum delay.
+ *      Note. This API should be called before rtk_port_macForceLinkExt_set().
+ */
+extern rtk_api_ret_t rtk_port_rgmiiDelayExt_set(rtk_port_t port, rtk_data_t txDelay, rtk_data_t rxDelay);
+
+/* Function Name:
+ *      rtk_port_rgmiiDelayExt_get
+ * Description:
+ *      Get RGMII interface delay value for TX and RX.
+ * Input:
+ *      None
+ * Output:
+ *      pTxDelay - TX delay value
+ *      pRxDelay - RX delay value
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This API can set external interface 2 RGMII delay.
+ *      In TX delay, there are 2 selection: no-delay and 2ns delay.
+ *      In RX dekay, there are 8 steps for delay tunning. 0 for n0-delay, and 7 for maximum delay.
+ */
+extern rtk_api_ret_t rtk_port_rgmiiDelayExt_get(rtk_port_t port, rtk_data_t *pTxDelay, rtk_data_t *pRxDelay);
+
+/* Function Name:
+ *      rtk_port_phyEnableAll_set
+ * Description:
+ *      Set all PHY enable status.
+ * Input:
+ *      enable - PHY Enable State.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_ENABLE       - Invalid enable input.
+ * Note:
+ *      This API can set all PHY status.
+ *      The configuration of all PHY is as following:
+ *      - DISABLE
+ *      - ENABLE
+ */
+extern rtk_api_ret_t rtk_port_phyEnableAll_set(rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_port_phyEnableAll_get
+ * Description:
+ *      Get all PHY enable status.
+ * Input:
+ *      None
+ * Output:
+ *      pEnable - PHY Enable State.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      This API can set all PHY status.
+ *      The configuration of all PHY is as following:
+ *      - DISABLE
+ *      - ENABLE
+ */
+extern rtk_api_ret_t rtk_port_phyEnableAll_get(rtk_enable_t *pEnable);
+
+/* Function Name:
+ *      rtk_port_efid_set
+ * Description:
+ *      Set port-based enhanced filtering database
+ * Input:
+ *      port - Port id.
+ *      efid - Specified enhanced filtering database.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_L2_FID - Invalid fid.
+ *      RT_ERR_INPUT - Invalid input parameter.
+ *      RT_ERR_PORT_ID - Invalid port ID.
+ * Note:
+ *      The API can set port-based enhanced filtering database.
+ */
+extern rtk_api_ret_t rtk_port_efid_set(rtk_port_t port, rtk_data_t efid);
+
+/* Function Name:
+ *      rtk_port_efid_get
+ * Description:
+ *      Get port-based enhanced filtering database
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pEfid - Specified enhanced filtering database.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT - Invalid input parameters.
+ *      RT_ERR_PORT_ID - Invalid port ID.
+ * Note:
+ *      The API can get port-based enhanced filtering database status.
+ */
+extern rtk_api_ret_t rtk_port_efid_get(rtk_port_t port, rtk_data_t *pEfid);
+
+/* Function Name:
+ *      rtk_port_phyComboPortMedia_set
+ * Description:
+ *      Set Combo port media type
+ * Input:
+ *      port    - Port id. (Should be Port 4)
+ *      media   - Media (COPPER or FIBER)
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ *      RT_ERR_PORT_ID          - Invalid port ID.
+ * Note:
+ *      The API can Set Combo port media type.
+ */
+extern rtk_api_ret_t rtk_port_phyComboPortMedia_set(rtk_port_t port, rtk_port_media_t media);
+
+/* Function Name:
+ *      rtk_port_phyComboPortMedia_get
+ * Description:
+ *      Get Combo port media type
+ * Input:
+ *      port    - Port id. (Should be Port 4)
+ * Output:
+ *      pMedia  - Media (COPPER or FIBER)
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ *      RT_ERR_PORT_ID          - Invalid port ID.
+ * Note:
+ *      The API can Set Combo port media type.
+ */
+extern rtk_api_ret_t rtk_port_phyComboPortMedia_get(rtk_port_t port, rtk_port_media_t *pMedia);
+
+/* Function Name:
+ *      rtk_port_rtctEnable_set
+ * Description:
+ *      Enable RTCT test
+ * Input:
+ *      pPortmask    - Port mask of RTCT enabled port
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_MASK        - Invalid port mask.
+ * Note:
+ *      The API can enable RTCT Test
+ */
+extern rtk_api_ret_t rtk_port_rtctEnable_set(rtk_portmask_t *pPortmask);
+
+/* Function Name:
+ *      rtk_port_rtctDisable_set
+ * Description:
+ *      Disable RTCT test
+ * Input:
+ *      pPortmask    - Port mask of RTCT disabled port
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_MASK        - Invalid port mask.
+ * Note:
+ *      The API can disable RTCT Test
+ */
+rtk_api_ret_t rtk_port_rtctDisable_set(rtk_portmask_t *pPortmask);
+
+
+/* Function Name:
+ *      rtk_port_rtctResult_get
+ * Description:
+ *      Get the result of RTCT test
+ * Input:
+ *      port        - Port ID
+ * Output:
+ *      pRtctResult - The result of RTCT result
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port ID.
+ *      RT_ERR_PHY_RTCT_NOT_FINISH  - Testing does not finish.
+ * Note:
+ *      The API can get RTCT test result.
+ *      RTCT test may takes 4.8 seconds to finish its test at most.
+ *      Thus, if this API return RT_ERR_PHY_RTCT_NOT_FINISH or
+ *      other error code, the result can not be referenced and
+ *      user should call this API again until this API returns
+ *      a RT_ERR_OK.
+ *      The result is stored at pRtctResult->ge_result
+ *      pRtctResult->linkType is unused.
+ *      The unit of channel length is 2.5cm. Ex. 300 means 300 * 2.5 = 750cm = 7.5M
+ */
+extern rtk_api_ret_t rtk_port_rtctResult_get(rtk_port_t port, rtk_rtctResult_t *pRtctResult);
+
+/* Function Name:
+ *      rtk_port_sds_reset
+ * Description:
+ *      Reset Serdes
+ * Input:
+ *      port        - Port ID
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port ID.
+ * Note:
+ *      The API can reset Serdes
+ */
+extern rtk_api_ret_t rtk_port_sds_reset(rtk_port_t port);
+
+/* Function Name:
+ *      rtk_port_sgmiiLinkStatus_get
+ * Description:
+ *      Get SGMII status
+ * Input:
+ *      port        - Port ID
+ * Output:
+ *      pSignalDetect   - Signal detect
+ *      pSync           - Sync
+ *      pLink           - Link
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port ID.
+ * Note:
+ *      The API can reset Serdes
+ */
+extern rtk_api_ret_t rtk_port_sgmiiLinkStatus_get(rtk_port_t port, rtk_data_t *pSignalDetect, rtk_data_t *pSync, rtk_port_linkStatus_t *pLink);
+
+/* Function Name:
+ *      rtk_port_sgmiiNway_set
+ * Description:
+ *      Configure SGMII/HSGMII port Nway state
+ * Input:
+ *      port        - Port ID
+ *      state       - Nway state
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port ID.
+ * Note:
+ *      The API configure SGMII/HSGMII port Nway state
+ */
+extern rtk_api_ret_t rtk_port_sgmiiNway_set(rtk_port_t port, rtk_enable_t state);
+
+/* Function Name:
+ *      rtk_port_sgmiiNway_get
+ * Description:
+ *      Get SGMII/HSGMII port Nway state
+ * Input:
+ *      port        - Port ID
+ * Output:
+ *      pState      - Nway state
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port ID.
+ * Note:
+ *      The API can get SGMII/HSGMII port Nway state
+ */
+extern rtk_api_ret_t rtk_port_sgmiiNway_get(rtk_port_t port, rtk_enable_t *pState);
+
+#endif /* __RTK_API_PORT_H__ */
+
+
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/ptp.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/ptp.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/ptp.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/ptp.c	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,978 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 39583 $
+ * $Date: 2013-05-20 16:59:23 +0800 (æ˜ŸæœŸä¸€, 20 äº”æœˆ 2013) $
+ *
+ * Purpose : RTK switch high-level API for RTL8367/RTL8367C
+ * Feature : Here is a list of all functions and variables in time module.
+ *
+ */
+
+#include <rtk_switch.h>
+#include <rtk_error.h>
+#include <ptp.h>
+#include <string.h>
+
+#include <rtl8367c_asicdrv.h>
+#include <rtl8367c_asicdrv_eav.h>
+
+static rtk_api_ret_t _rtk_ptp_init(void)
+{
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_ptp_mac_set(rtk_mac_t mac)
+{
+    rtk_api_ret_t retVal;
+    ether_addr_t sw_mac;
+
+    memcpy(sw_mac.octet, mac.octet, ETHER_ADDR_LEN);
+
+    if((retVal=rtl8367c_setAsicEavMacAddress(sw_mac))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_ptp_mac_get(rtk_mac_t *pMac)
+{
+    rtk_api_ret_t retVal;
+    ether_addr_t sw_mac;
+
+    if((retVal=rtl8367c_getAsicEavMacAddress(&sw_mac))!=RT_ERR_OK)
+        return retVal;
+
+    memcpy(pMac->octet, sw_mac.octet, ETHER_ADDR_LEN);
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_ptp_tpid_set(rtk_ptp_tpid_t outerId, rtk_ptp_tpid_t innerId)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if ((outerId>RTK_MAX_NUM_OF_TPID) ||(innerId>RTK_MAX_NUM_OF_TPID))
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicEavTpid(outerId, innerId)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_ptp_tpid_get(rtk_ptp_tpid_t *pOuterId, rtk_ptp_tpid_t *pInnerId)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if ((retVal = rtl8367c_getAsicEavTpid(pOuterId, pInnerId)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_ptp_refTime_set(rtk_ptp_timeStamp_t timeStamp)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (timeStamp.nsec > RTK_MAX_NUM_OF_NANO_SECOND)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicEavSysTime(timeStamp.sec, timeStamp.nsec))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_ptp_refTime_get(rtk_ptp_timeStamp_t *pTimeStamp)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if ((retVal = rtl8367c_getAsicEavSysTime(&pTimeStamp->sec, &pTimeStamp->nsec))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_ptp_refTimeAdjust_set(rtk_ptp_sys_adjust_t sign, rtk_ptp_timeStamp_t timeStamp)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (timeStamp.nsec > RTK_MAX_NUM_OF_NANO_SECOND)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicEavSysTimeAdjust(sign, timeStamp.sec, timeStamp.nsec))!=RT_ERR_OK)
+        return retVal;
+
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_ptp_refTimeEnable_set(rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (enable >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    if ((retVal = rtl8367c_setAsicEavSysTimeCtrl(enable))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_ptp_refTimeEnable_get(rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if ((retVal = rtl8367c_getAsicEavSysTimeCtrl(pEnable))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_ptp_portEnable_set(rtk_port_t port, rtk_enable_t enable)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check port is PTP port */
+    RTK_CHK_PORT_IS_PTP(port);
+
+    if (enable>=RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicEavPortEnable(rtk_switch_port_L2P_get(port), enable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_ptp_portEnable_get(rtk_port_t port, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check port is PTP port */
+    RTK_CHK_PORT_IS_PTP(port);
+
+    if ((retVal = rtl8367c_getAsicEavPortEnable(rtk_switch_port_L2P_get(port), pEnable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_ptp_portTimestamp_get( rtk_port_t port, rtk_ptp_msgType_t type, rtk_ptp_info_t *pInfo)
+{
+    rtk_api_ret_t retVal;
+    rtl8367c_ptp_time_stamp_t time;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check port is PTP port */
+    RTK_CHK_PORT_IS_PTP(port);
+
+    if ((retVal = rtl8367c_getAsicEavPortTimeStamp(rtk_switch_port_L2P_get(port), type, &time)) != RT_ERR_OK)
+        return retVal;
+
+    pInfo->sequenceId = time.sequence_id;
+    pInfo->timeStamp.sec = time.second;
+    pInfo->timeStamp.nsec = time.nano_second;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_ptp_intControl_set(rtk_ptp_intType_t type, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 mask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (type>=PTP_INT_TYPE_END)
+        return RT_ERR_INPUT;
+
+    if (PTP_INT_TYPE_ALL!=type)
+    {
+        if ((retVal = rtl8367c_getAsicEavInterruptMask(&mask)) != RT_ERR_OK)
+            return retVal;
+
+        if (ENABLED == enable)
+            mask = mask | (1<<type);
+        else if (DISABLED == enable)
+            mask = mask & ~(1<<type);
+        else
+            return RT_ERR_INPUT;
+
+        if ((retVal = rtl8367c_setAsicEavInterruptMask(mask)) != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        if (ENABLED == enable)
+            mask = RTK_PTP_INTR_MASK;
+        else if (DISABLED == enable)
+            mask = 0;
+        else
+            return RT_ERR_INPUT;
+
+        if ((retVal = rtl8367c_setAsicEavInterruptMask(mask)) != RT_ERR_OK)
+            return retVal;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_ptp_intControl_get(rtk_ptp_intType_t type, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 mask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (type>=PTP_INT_TYPE_ALL)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_getAsicEavInterruptMask(&mask)) != RT_ERR_OK)
+        return retVal;
+
+    if (0 == (mask&(1<<type)))
+        *pEnable=DISABLED;
+    else
+        *pEnable=ENABLED;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_ptp_intStatus_get(rtk_ptp_intStatus_t *pStatusMask)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pStatusMask)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicEavInterruptStatus(pStatusMask)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_ptp_portIntStatus_set(rtk_port_t port, rtk_ptp_intStatus_t statusMask)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check port is PTP port */
+    RTK_CHK_PORT_IS_PTP(port);
+
+    if ((retVal = rtl8367c_setAsicEavPortInterruptStatus(rtk_switch_port_L2P_get(port), statusMask))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_ptp_portIntStatus_get(rtk_port_t port, rtk_ptp_intStatus_t *pStatusMask)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check port is PTP port */
+    RTK_CHK_PORT_IS_PTP(port);
+
+    if(NULL == pStatusMask)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicEavPortInterruptStatus(rtk_switch_port_L2P_get(port), pStatusMask)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_ptp_portTrap_set(rtk_port_t port, rtk_enable_t enable)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (enable>=RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicEavTrap(rtk_switch_port_L2P_get(port), enable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_ptp_portTrap_get(rtk_port_t port, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if ((retVal = rtl8367c_getAsicEavTrap(rtk_switch_port_L2P_get(port), pEnable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtk_ptp_init
+ * Description:
+ *      PTP function initialization.
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ *      This API is used to initialize PTP status.
+ */
+rtk_api_ret_t rtk_ptp_init(void)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_ptp_init();
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_ptp_mac_set
+ * Description:
+ *      Configure PTP mac address.
+ * Input:
+ *      mac - mac address to parser PTP packets.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameter.
+ * Note:
+ *      None
+ */
+rtk_api_ret_t rtk_ptp_mac_set(rtk_mac_t mac)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_ptp_mac_set(mac);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_ptp_mac_get
+ * Description:
+ *      Get PTP mac address.
+ * Input:
+ *      None
+ * Output:
+ *      pMac - mac address to parser PTP packets.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameter.
+ * Note:
+ *      None
+ */
+rtk_api_ret_t rtk_ptp_mac_get(rtk_mac_t *pMac)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_ptp_mac_get(pMac);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_ptp_tpid_set
+ * Description:
+ *      Configure PTP accepted outer & inner tag TPID.
+ * Input:
+ *      outerId - Ether type of S-tag frame parsing in PTP ports.
+ *      innerId - Ether type of C-tag frame parsing in PTP ports.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameter.
+ * Note:
+ *      None
+ */
+rtk_api_ret_t rtk_ptp_tpid_set(rtk_ptp_tpid_t outerId, rtk_ptp_tpid_t innerId)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_ptp_tpid_set(outerId, innerId);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_ptp_tpid_get
+ * Description:
+ *      Get PTP accepted outer & inner tag TPID.
+ * Input:
+ *      None
+ * Output:
+ *      pOuterId - Ether type of S-tag frame parsing in PTP ports.
+ *      pInnerId - Ether type of C-tag frame parsing in PTP ports.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      None
+ */
+rtk_api_ret_t rtk_ptp_tpid_get(rtk_ptp_tpid_t *pOuterId, rtk_ptp_tpid_t *pInnerId)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_ptp_tpid_get(pOuterId, pInnerId);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_ptp_refTime_set
+ * Description:
+ *      Set the reference time of the specified device.
+ * Input:
+ *      timeStamp - reference timestamp value
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_INPUT    - invalid input parameter
+ * Applicable:
+ *      8390, 8380
+ * Note:
+ *      None
+ */
+rtk_api_ret_t rtk_ptp_refTime_set(rtk_ptp_timeStamp_t timeStamp)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_ptp_refTime_set(timeStamp);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_ptp_refTime_get
+ * Description:
+ *      Get the reference time of the specified device.
+ * Input:
+ * Output:
+ *      pTimeStamp - pointer buffer of the reference time
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_UNIT_ID      - invalid unit id
+ *      RT_ERR_NOT_INIT     - The module is not initial
+ *      RT_ERR_NULL_POINTER - input parameter may be null pointer
+ * Applicable:
+ *      8390, 8380
+ * Note:
+ *      None
+ */
+rtk_api_ret_t rtk_ptp_refTime_get(rtk_ptp_timeStamp_t *pTimeStamp)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_ptp_refTime_get(pTimeStamp);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_ptp_refTimeAdjust_set
+ * Description:
+ *      Adjust the reference time.
+ * Input:
+ *      unit      - unit id
+ *      sign      - significant
+ *      timeStamp - reference timestamp value
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_UNIT_ID  - invalid unit id
+ *      RT_ERR_NOT_INIT - The module is not initial
+ *      RT_ERR_INPUT    - invalid input parameter
+ * Note:
+ *      sign=0 for positive adjustment, sign=1 for negative adjustment.
+ */
+rtk_api_ret_t rtk_ptp_refTimeAdjust_set(rtk_ptp_sys_adjust_t sign, rtk_ptp_timeStamp_t timeStamp)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_ptp_refTimeAdjust_set(sign, timeStamp);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_ptp_refTimeEnable_set
+ * Description:
+ *      Set the enable state of reference time of the specified device.
+ * Input:
+ *      enable - status
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_INPUT    - invalid input parameter
+ * Note:
+ *      None
+ */
+rtk_api_ret_t rtk_ptp_refTimeEnable_set(rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_ptp_refTimeEnable_set(enable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_ptp_refTimeEnable_get
+ * Description:
+ *      Get the enable state of reference time of the specified device.
+ * Input:
+ * Output:
+ *      pEnable - status
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_UNIT_ID      - invalid unit id
+ *      RT_ERR_NOT_INIT     - The module is not initial
+ *      RT_ERR_NULL_POINTER - input parameter may be null pointer
+ * Applicable:
+ *      8390, 8380
+ * Note:
+ *      None
+ */
+rtk_api_ret_t rtk_ptp_refTimeEnable_get(rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_ptp_refTimeEnable_get(pEnable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_ptp_portEnable_set
+ * Description:
+ *      Set PTP status of the specified port.
+ * Input:
+ *      port   - port id
+ *      enable - status
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_PORT     - invalid port id
+ *      RT_ERR_INPUT    - invalid input parameter
+ * Note:
+ *      None
+ */
+rtk_api_ret_t rtk_ptp_portEnable_set(rtk_port_t port, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_ptp_portEnable_set(port, enable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_ptp_portEnable_get
+ * Description:
+ *      Get PTP status of the specified port.
+ * Input:
+ *      port    - port id
+ * Output:
+ *      pEnable - status
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_PORT         - invalid port id
+ *      RT_ERR_NULL_POINTER - input parameter may be null pointer
+ * Note:
+ *      None
+ */
+rtk_api_ret_t rtk_ptp_portEnable_get(rtk_port_t port, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_ptp_portEnable_get(port, pEnable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_ptp_portTimestamp_get
+ * Description:
+ *      Get PTP timstamp according to the PTP identifier on the dedicated port from the specified device.
+ * Input:
+ *      unit       - unit id
+ *      port       - port id
+ *      type       - PTP message type
+ * Output:
+ *      pInfo      - pointer buffer of sequence ID and timestamp
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_PORT_ID      - invalid port id
+ *      RT_ERR_INPUT        - invalid input parameter
+ *      RT_ERR_NULL_POINTER - input parameter may be null pointer
+ * Applicable:
+ *      8390, 8380
+ * Note:
+ *      None
+ */
+rtk_api_ret_t rtk_ptp_portTimestamp_get( rtk_port_t port, rtk_ptp_msgType_t type, rtk_ptp_info_t *pInfo)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_ptp_portTimestamp_get(port, type, pInfo);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_ptp_intControl_set
+ * Description:
+ *      Set PTP interrupt trigger status configuration.
+ * Input:
+ *      type - Interrupt type.
+ *      enable - Interrupt status.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_ENABLE       - Invalid enable input.
+ * Note:
+ *      The API can set PTP interrupt status configuration.
+ *      The interrupt trigger status is shown in the following:
+ *          PTP_INT_TYPE_TX_SYNC = 0,
+ *          PTP_INT_TYPE_TX_DELAY_REQ,
+ *          PTP_INT_TYPE_TX_PDELAY_REQ,
+ *          PTP_INT_TYPE_TX_PDELAY_RESP,
+ *          PTP_INT_TYPE_RX_SYNC,
+ *          PTP_INT_TYPE_RX_DELAY_REQ,
+ *          PTP_INT_TYPE_RX_PDELAY_REQ,
+ *          PTP_INT_TYPE_RX_PDELAY_RESP,
+ *          PTP_INT_TYPE_ALL,
+ */
+rtk_api_ret_t rtk_ptp_intControl_set(rtk_ptp_intType_t type, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_ptp_intControl_set(type, enable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_ptp_intControl_get
+ * Description:
+ *      Get PTP interrupt trigger status configuration.
+ * Input:
+ *      type - Interrupt type.
+ * Output:
+ *      pEnable - Interrupt status.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can get interrupt status configuration.
+ *      The interrupt trigger status is shown in the following:
+ *          PTP_INT_TYPE_TX_SYNC = 0,
+ *          PTP_INT_TYPE_TX_DELAY_REQ,
+ *          PTP_INT_TYPE_TX_PDELAY_REQ,
+ *          PTP_INT_TYPE_TX_PDELAY_RESP,
+ *          PTP_INT_TYPE_RX_SYNC,
+ *          PTP_INT_TYPE_RX_DELAY_REQ,
+ *          PTP_INT_TYPE_RX_PDELAY_REQ,
+ *          PTP_INT_TYPE_RX_PDELAY_RESP,
+ */
+rtk_api_ret_t rtk_ptp_intControl_get(rtk_ptp_intType_t type, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_ptp_intControl_get(type, pEnable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_ptp_intStatus_get
+ * Description:
+ *      Get PTP port interrupt trigger status.
+ * Input:
+ *      port           - physical port
+ * Output:
+ *      pStatusMask - Interrupt status bit mask.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can get interrupt trigger status when interrupt happened.
+ *      The interrupt trigger status is shown in the following:
+ *      - PORT 0  INT    (value[0] (Bit0))
+ *      - PORT 1  INT    (value[0] (Bit1))
+ *      - PORT 2  INT    (value[0] (Bit2))
+ *      - PORT 3  INT    (value[0] (Bit3))
+ *      - PORT 4  INT   (value[0] (Bit4))
+
+ *
+ */
+rtk_api_ret_t rtk_ptp_intStatus_get(rtk_ptp_intStatus_t *pStatusMask)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_ptp_intStatus_get(pStatusMask);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_ptp_portIntStatus_set
+ * Description:
+ *      Set PTP port interrupt trigger status to clean.
+ * Input:
+ *      port           - physical port
+ *      statusMask - Interrupt status bit mask.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT - Invalid input parameters.
+ * Note:
+ *      The API can clean interrupt trigger status when interrupt happened.
+ *      The interrupt trigger status is shown in the following:
+ *      - PTP_INT_TYPE_TX_SYNC              (value[0] (Bit0))
+ *      - PTP_INT_TYPE_TX_DELAY_REQ      (value[0] (Bit1))
+ *      - PTP_INT_TYPE_TX_PDELAY_REQ    (value[0] (Bit2))
+ *      - PTP_INT_TYPE_TX_PDELAY_RESP   (value[0] (Bit3))
+ *      - PTP_INT_TYPE_RX_SYNC              (value[0] (Bit4))
+ *      - PTP_INT_TYPE_RX_DELAY_REQ      (value[0] (Bit5))
+ *      - PTP_INT_TYPE_RX_PDELAY_REQ    (value[0] (Bit6))
+ *      - PTP_INT_TYPE_RX_PDELAY_RESP   (value[0] (Bit7))
+ *      The status will be cleared after execute this API.
+ */
+rtk_api_ret_t rtk_ptp_portIntStatus_set(rtk_port_t port, rtk_ptp_intStatus_t statusMask)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_ptp_portIntStatus_set(port, statusMask);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_ptp_portIntStatus_get
+ * Description:
+ *      Get PTP port interrupt trigger status.
+ * Input:
+ *      port           - physical port
+ * Output:
+ *      pStatusMask - Interrupt status bit mask.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can get interrupt trigger status when interrupt happened.
+ *      The interrupt trigger status is shown in the following:
+ *      - PTP_INT_TYPE_TX_SYNC              (value[0] (Bit0))
+ *      - PTP_INT_TYPE_TX_DELAY_REQ      (value[0] (Bit1))
+ *      - PTP_INT_TYPE_TX_PDELAY_REQ    (value[0] (Bit2))
+ *      - PTP_INT_TYPE_TX_PDELAY_RESP   (value[0] (Bit3))
+ *      - PTP_INT_TYPE_RX_SYNC              (value[0] (Bit4))
+ *      - PTP_INT_TYPE_RX_DELAY_REQ      (value[0] (Bit5))
+ *      - PTP_INT_TYPE_RX_PDELAY_REQ    (value[0] (Bit6))
+ *      - PTP_INT_TYPE_RX_PDELAY_RESP   (value[0] (Bit7))
+ *
+ */
+rtk_api_ret_t rtk_ptp_portIntStatus_get(rtk_port_t port, rtk_ptp_intStatus_t *pStatusMask)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_ptp_portIntStatus_get(port, pStatusMask);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_ptp_portTrap_set
+ * Description:
+ *      Set PTP packet trap of the specified port.
+ * Input:
+ *      port   - port id
+ *      enable - status
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_PORT     - invalid port id
+ *      RT_ERR_INPUT    - invalid input parameter
+ * Note:
+ *      None
+ */
+rtk_api_ret_t rtk_ptp_portTrap_set(rtk_port_t port, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_ptp_portTrap_set(port, enable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_ptp_portTrap_get
+ * Description:
+ *      Get PTP packet trap of the specified port.
+ * Input:
+ *      port    - port id
+ * Output:
+ *      pEnable - status
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_PORT         - invalid port id
+ *      RT_ERR_NULL_POINTER - input parameter may be null pointer
+ * Note:
+ *      None
+ */
+rtk_api_ret_t rtk_ptp_portTrap_get(rtk_port_t port, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_ptp_portTrap_get(port, pEnable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+
+
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/ptp.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/ptp.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/ptp.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/ptp.h	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,513 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * Purpose : RTL8367/RTL8367C switch high-level API
+ *
+ * Feature : The file includes time module high-layer API defination
+ *
+ */
+
+#ifndef __RTK_API_PTP_H__
+#define __RTK_API_PTP_H__
+
+/*
+ * Symbol Definition
+ */
+#define RTK_MAX_NUM_OF_NANO_SECOND                     0x3B9AC9FF
+#define RTK_PTP_INTR_MASK                                          0xFF
+#define RTK_MAX_NUM_OF_TPID                                    0xFFFF
+
+/* Message Type */
+typedef enum rtk_ptp_msgType_e
+{
+    PTP_MSG_TYPE_TX_SYNC = 0,
+    PTP_MSG_TYPE_TX_DELAY_REQ,
+    PTP_MSG_TYPE_TX_PDELAY_REQ,
+    PTP_MSG_TYPE_TX_PDELAY_RESP,
+    PTP_MSG_TYPE_RX_SYNC,
+    PTP_MSG_TYPE_RX_DELAY_REQ,
+    PTP_MSG_TYPE_RX_PDELAY_REQ,
+    PTP_MSG_TYPE_RX_PDELAY_RESP,
+    PTP_MSG_TYPE_END
+} rtk_ptp_msgType_t;
+
+typedef enum rtk_ptp_intType_e
+{
+    PTP_INT_TYPE_TX_SYNC = 0,
+    PTP_INT_TYPE_TX_DELAY_REQ,
+    PTP_INT_TYPE_TX_PDELAY_REQ,
+    PTP_INT_TYPE_TX_PDELAY_RESP,
+    PTP_INT_TYPE_RX_SYNC,
+    PTP_INT_TYPE_RX_DELAY_REQ,
+    PTP_INT_TYPE_RX_PDELAY_REQ,
+    PTP_INT_TYPE_RX_PDELAY_RESP,
+    PTP_INT_TYPE_ALL,
+    PTP_INT_TYPE_END
+}rtk_ptp_intType_t;
+
+typedef enum rtk_ptp_sys_adjust_e
+{
+    SYS_ADJUST_PLUS = 0,
+    SYS_ADJUST_MINUS,
+    SYS_ADJUST_END
+} rtk_ptp_sys_adjust_t;
+
+
+/* Reference Time */
+typedef struct rtk_ptp_timeStamp_s
+{
+    rtk_uint32 sec;
+    rtk_uint32 nsec;
+} rtk_ptp_timeStamp_t;
+
+typedef struct rtk_ptp_info_s
+{
+    rtk_uint32 sequenceId;
+    rtk_ptp_timeStamp_t   timeStamp;
+} rtk_ptp_info_t;
+
+typedef rtk_uint32 rtk_ptp_tpid_t;
+
+typedef rtk_uint32  rtk_ptp_intStatus_t;     /* interrupt status mask  */
+
+/*
+ * Data Declaration
+ */
+
+/*
+ * Function Declaration
+ */
+/* Function Name:
+ *      rtk_time_init
+ * Description:
+ *      PTP function initialization.
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ *      This API is used to initialize EEE status.
+ */
+extern rtk_api_ret_t rtk_ptp_init(void);
+
+/* Function Name:
+ *      rtk_ptp_mac_set
+ * Description:
+ *      Configure PTP mac address.
+ * Input:
+ *      mac - mac address to parser PTP packets.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameter.
+ * Note:
+ *      None
+ */
+extern rtk_api_ret_t rtk_ptp_mac_set(rtk_mac_t mac);
+
+/* Function Name:
+ *      rtk_ptp_mac_get
+ * Description:
+ *      Get PTP mac address.
+ * Input:
+ *      None
+ * Output:
+ *      pMac - mac address to parser PTP packets.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameter.
+ * Note:
+ *      None
+ */
+extern rtk_api_ret_t rtk_ptp_mac_get(rtk_mac_t *pMac);
+
+/* Function Name:
+ *      rtk_ptp_tpid_set
+ * Description:
+ *      Configure PTP accepted outer & inner tag TPID.
+ * Input:
+ *      outerId - Ether type of S-tag frame parsing in PTP ports.
+ *      innerId - Ether type of C-tag frame parsing in PTP ports.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameter.
+ * Note:
+ *      None
+ */
+extern rtk_api_ret_t rtk_ptp_tpid_set(rtk_ptp_tpid_t outerId, rtk_ptp_tpid_t innerId);
+
+/* Function Name:
+ *      rtk_ptp_tpid_get
+ * Description:
+ *      Get PTP accepted outer & inner tag TPID.
+ * Input:
+ *      None
+ * Output:
+ *      pOuterId - Ether type of S-tag frame parsing in PTP ports.
+ *      pInnerId - Ether type of C-tag frame parsing in PTP ports.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      None
+ */
+extern rtk_api_ret_t rtk_ptp_tpid_get(rtk_ptp_tpid_t *pOuterId, rtk_ptp_tpid_t *pInnerId);
+
+/* Function Name:
+ *      rtk_ptp_refTime_set
+ * Description:
+ *      Set the reference time of the specified device.
+ * Input:
+ *      timeStamp - reference timestamp value
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_INPUT    - invalid input parameter
+ * Applicable:
+ *      8390, 8380
+ * Note:
+ *      None
+ */
+extern rtk_api_ret_t rtk_ptp_refTime_set(rtk_ptp_timeStamp_t timeStamp);
+
+/* Function Name:
+ *      rtk_ptp_refTime_get
+ * Description:
+ *      Get the reference time of the specified device.
+ * Input:
+ * Output:
+ *      pTimeStamp - pointer buffer of the reference time
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_UNIT_ID      - invalid unit id
+ *      RT_ERR_NOT_INIT     - The module is not initial
+ *      RT_ERR_NULL_POINTER - input parameter may be null pointer
+ * Applicable:
+ *      8390, 8380
+ * Note:
+ *      None
+ */
+extern rtk_api_ret_t rtk_ptp_refTime_get(rtk_ptp_timeStamp_t *pTimeStamp);
+
+/* Function Name:
+ *      rtk_ptp_refTimeAdjust_set
+ * Description:
+ *      Adjust the reference time.
+ * Input:
+ *      unit      - unit id
+ *      sign      - significant
+ *      timeStamp - reference timestamp value
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_UNIT_ID  - invalid unit id
+ *      RT_ERR_NOT_INIT - The module is not initial
+ *      RT_ERR_INPUT    - invalid input parameter
+ * Note:
+ *      sign=0 for positive adjustment, sign=1 for negative adjustment.
+ */
+extern rtk_api_ret_t rtk_ptp_refTimeAdjust_set(rtk_ptp_sys_adjust_t sign, rtk_ptp_timeStamp_t timeStamp);
+
+/* Function Name:
+ *      rtk_ptp_refTimeEnable_set
+ * Description:
+ *      Set the enable state of reference time of the specified device.
+ * Input:
+ *      enable - status
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_INPUT    - invalid input parameter
+ * Note:
+ *      None
+ */
+extern rtk_api_ret_t rtk_ptp_refTimeEnable_set(rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_ptp_refTimeEnable_get
+ * Description:
+ *      Get the enable state of reference time of the specified device.
+ * Input:
+ * Output:
+ *      pEnable - status
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_UNIT_ID      - invalid unit id
+ *      RT_ERR_NOT_INIT     - The module is not initial
+ *      RT_ERR_NULL_POINTER - input parameter may be null pointer
+ * Applicable:
+ *      8390, 8380
+ * Note:
+ *      None
+ */
+extern rtk_api_ret_t rtk_ptp_refTimeEnable_get(rtk_enable_t *pEnable);
+
+/* Function Name:
+ *      rtk_ptp_portEnable_set
+ * Description:
+ *      Set PTP status of the specified port.
+ * Input:
+ *      port   - port id
+ *      enable - status
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_PORT     - invalid port id
+ *      RT_ERR_INPUT    - invalid input parameter
+ * Note:
+ *      None
+ */
+extern rtk_api_ret_t rtk_ptp_portEnable_set(rtk_port_t port, rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_ptp_portEnable_get
+ * Description:
+ *      Get PTP status of the specified port.
+ * Input:
+ *      port    - port id
+ * Output:
+ *      pEnable - status
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_PORT         - invalid port id
+ *      RT_ERR_NULL_POINTER - input parameter may be null pointer
+ * Note:
+ *      None
+ */
+extern rtk_api_ret_t rtk_ptp_portEnable_get(rtk_port_t port, rtk_enable_t *pEnable);
+
+/* Function Name:
+ *      rtk_ptp_portTimestamp_get
+ * Description:
+ *      Get PTP timstamp according to the PTP identifier on the dedicated port from the specified device.
+ * Input:
+ *      unit       - unit id
+ *      port       - port id
+ *      type       - PTP message type
+ * Output:
+ *      pInfo      - pointer buffer of sequence ID and timestamp
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_PORT_ID      - invalid port id
+ *      RT_ERR_INPUT        - invalid input parameter
+ *      RT_ERR_NULL_POINTER - input parameter may be null pointer
+ * Applicable:
+ *      8390, 8380
+ * Note:
+ *      None
+ */
+extern rtk_api_ret_t rtk_ptp_portTimestamp_get( rtk_port_t port, rtk_ptp_msgType_t type, rtk_ptp_info_t *pInfo);
+
+/* Function Name:
+ *      rtk_ptp_intControl_set
+ * Description:
+ *      Set PTP interrupt trigger status configuration.
+ * Input:
+ *      type - Interrupt type.
+ *      enable - Interrupt status.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_ENABLE       - Invalid enable input.
+ * Note:
+ *      The API can set PTP interrupt status configuration.
+ *      The interrupt trigger status is shown in the following:
+ *          PTP_INT_TYPE_TX_SYNC = 0,
+ *          PTP_INT_TYPE_TX_DELAY_REQ,
+ *          PTP_INT_TYPE_TX_PDELAY_REQ,
+ *          PTP_INT_TYPE_TX_PDELAY_RESP,
+ *          PTP_INT_TYPE_RX_SYNC,
+ *          PTP_INT_TYPE_RX_DELAY_REQ,
+ *          PTP_INT_TYPE_RX_PDELAY_REQ,
+ *          PTP_INT_TYPE_RX_PDELAY_RESP,
+ *          PTP_INT_TYPE_ALL,
+ */
+extern rtk_api_ret_t rtk_ptp_intControl_set(rtk_ptp_intType_t type, rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_ptp_intControl_get
+ * Description:
+ *      Get PTP interrupt trigger status configuration.
+ * Input:
+ *      type - Interrupt type.
+ * Output:
+ *      pEnable - Interrupt status.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can get interrupt status configuration.
+ *      The interrupt trigger status is shown in the following:
+ *          PTP_INT_TYPE_TX_SYNC = 0,
+ *          PTP_INT_TYPE_TX_DELAY_REQ,
+ *          PTP_INT_TYPE_TX_PDELAY_REQ,
+ *          PTP_INT_TYPE_TX_PDELAY_RESP,
+ *          PTP_INT_TYPE_RX_SYNC,
+ *          PTP_INT_TYPE_RX_DELAY_REQ,
+ *          PTP_INT_TYPE_RX_PDELAY_REQ,
+ *          PTP_INT_TYPE_RX_PDELAY_RESP,
+ */
+extern rtk_api_ret_t rtk_ptp_intControl_get(rtk_ptp_intType_t type, rtk_enable_t *pEnable);
+
+
+/* Function Name:
+ *      rtk_ptp_intStatus_get
+ * Description:
+ *      Get PTP port interrupt trigger status.
+ * Input:
+ *      port           - physical port
+ * Output:
+ *      pStatusMask - Interrupt status bit mask.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can get interrupt trigger status when interrupt happened.
+ *      The interrupt trigger status is shown in the following:
+ *      - PORT 0  INT    (value[0] (Bit0))
+ *      - PORT 1  INT    (value[0] (Bit1))
+ *      - PORT 2  INT    (value[0] (Bit2))
+ *      - PORT 3  INT    (value[0] (Bit3))
+ *      - PORT 4  INT   (value[0] (Bit4))
+
+ *
+ */
+extern rtk_api_ret_t rtk_ptp_intStatus_get(rtk_ptp_intStatus_t *pStatusMask);
+
+/* Function Name:
+ *      rtk_ptp_portIntStatus_set
+ * Description:
+ *      Set PTP port interrupt trigger status to clean.
+ * Input:
+ *      port           - physical port
+ *      statusMask - Interrupt status bit mask.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT - Invalid input parameters.
+ * Note:
+ *      The API can clean interrupt trigger status when interrupt happened.
+ *      The interrupt trigger status is shown in the following:
+ *      - PTP_INT_TYPE_TX_SYNC              (value[0] (Bit0))
+ *      - PTP_INT_TYPE_TX_DELAY_REQ      (value[0] (Bit1))
+ *      - PTP_INT_TYPE_TX_PDELAY_REQ    (value[0] (Bit2))
+ *      - PTP_INT_TYPE_TX_PDELAY_RESP   (value[0] (Bit3))
+ *      - PTP_INT_TYPE_RX_SYNC              (value[0] (Bit4))
+ *      - PTP_INT_TYPE_RX_DELAY_REQ      (value[0] (Bit5))
+ *      - PTP_INT_TYPE_RX_PDELAY_REQ    (value[0] (Bit6))
+ *      - PTP_INT_TYPE_RX_PDELAY_RESP   (value[0] (Bit7))
+ *      The status will be cleared after execute this API.
+ */
+extern rtk_api_ret_t rtk_ptp_portIntStatus_set(rtk_port_t port, rtk_ptp_intStatus_t statusMask);
+
+/* Function Name:
+ *      rtk_ptp_portIntStatus_get
+ * Description:
+ *      Get PTP port interrupt trigger status.
+ * Input:
+ *      port           - physical port
+ * Output:
+ *      pStatusMask - Interrupt status bit mask.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can get interrupt trigger status when interrupt happened.
+ *      The interrupt trigger status is shown in the following:
+ *      - PTP_INT_TYPE_TX_SYNC              (value[0] (Bit0))
+ *      - PTP_INT_TYPE_TX_DELAY_REQ      (value[0] (Bit1))
+ *      - PTP_INT_TYPE_TX_PDELAY_REQ    (value[0] (Bit2))
+ *      - PTP_INT_TYPE_TX_PDELAY_RESP   (value[0] (Bit3))
+ *      - PTP_INT_TYPE_RX_SYNC              (value[0] (Bit4))
+ *      - PTP_INT_TYPE_RX_DELAY_REQ      (value[0] (Bit5))
+ *      - PTP_INT_TYPE_RX_PDELAY_REQ    (value[0] (Bit6))
+ *      - PTP_INT_TYPE_RX_PDELAY_RESP   (value[0] (Bit7))
+ *
+ */
+extern rtk_api_ret_t rtk_ptp_portIntStatus_get(rtk_port_t port, rtk_ptp_intStatus_t *pStatusMask);
+
+/* Function Name:
+ *      rtk_ptp_portPtpTrap_set
+ * Description:
+ *      Set PTP packet trap of the specified port.
+ * Input:
+ *      port   - port id
+ *      enable - status
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_PORT     - invalid port id
+ *      RT_ERR_INPUT    - invalid input parameter
+ * Note:
+ *      None
+ */
+extern rtk_api_ret_t rtk_ptp_portTrap_set(rtk_port_t port, rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_ptp_portPtpEnable_get
+ * Description:
+ *      Get PTP packet trap of the specified port.
+ * Input:
+ *      port    - port id
+ * Output:
+ *      pEnable - status
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_PORT         - invalid port id
+ *      RT_ERR_NULL_POINTER - input parameter may be null pointer
+ * Note:
+ *      None
+ */
+extern rtk_api_ret_t rtk_ptp_portTrap_get(rtk_port_t port, rtk_enable_t *pEnable);
+
+#endif /* __RTK_API_PTP_H__ */
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/qos.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/qos.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/qos.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/qos.c	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,1795 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 79429 $
+ * $Date: 2017-06-06 15:54:55 +0800 (é€±äºŒ, 06 å…­æœˆ 2017) $
+ *
+ * Purpose : RTK switch high-level API for RTL8367/RTL8367C
+ * Feature : Here is a list of all functions and variables in QoS module.
+ *
+ */
+
+#include <rtk_switch.h>
+#include <rtk_error.h>
+#include <qos.h>
+#include <string.h>
+
+#include <rtl8367c_asicdrv.h>
+#include <rtl8367c_asicdrv_qos.h>
+#include <rtl8367c_asicdrv_fc.h>
+#include <rtl8367c_asicdrv_scheduling.h>
+
+static rtk_api_ret_t _rtk_qos_init(rtk_queue_num_t queueNum)
+{
+    CONST_T rtk_uint16 g_prioritytToQid[8][8]= {
+            {0, 0,0,0,0,0,0,0},
+            {0, 0,0,0,7,7,7,7},
+            {0, 0,0,0,1,1,7,7},
+            {0, 0,1,1,2,2,7,7},
+            {0, 0,1,1,2,3,7,7},
+            {0, 0,1,2,3,4,7,7},
+            {0, 0,1,2,3,4,5,7},
+            {0,1,2,3,4,5,6,7}
+    };
+
+    CONST_T rtk_uint32 g_priorityDecision[8] = {0x01, 0x80,0x04,0x02,0x20,0x40,0x10,0x08};
+    CONST_T rtk_uint32 g_prioritytRemap[8] = {0,1,2,3,4,5,6,7};
+
+    rtk_api_ret_t retVal;
+    rtk_uint32 qmapidx;
+    rtk_uint32 priority;
+    rtk_uint32 priDec;
+    rtk_uint32 port;
+    rtk_uint32 dscp;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (queueNum <= 0 || queueNum > RTK_MAX_NUM_OF_QUEUE)
+        return RT_ERR_QUEUE_NUM;
+
+    /*Set Output Queue Number*/
+    if (RTK_MAX_NUM_OF_QUEUE == queueNum)
+        qmapidx = 0;
+    else
+        qmapidx = queueNum;
+
+    RTK_SCAN_ALL_PHY_PORTMASK(port)
+    {
+        if ((retVal = rtl8367c_setAsicOutputQueueMappingIndex(port, qmapidx)) != RT_ERR_OK)
+            return retVal;
+    }
+
+    /*Set Priority to Qid*/
+    for (priority = 0; priority <= RTK_PRIMAX; priority++)
+    {
+        if ((retVal = rtl8367c_setAsicPriorityToQIDMappingTable(queueNum - 1, priority, g_prioritytToQid[queueNum - 1][priority])) != RT_ERR_OK)
+            return retVal;
+    }
+
+    /*Set Flow Control Type to Ingress Flow Control*/
+    if ((retVal = rtl8367c_setAsicFlowControlSelect(FC_INGRESS)) != RT_ERR_OK)
+        return retVal;
+
+
+    /*Priority Decision Order*/
+    for (priDec = 0;priDec < PRIDEC_END;priDec++)
+    {
+        if ((retVal = rtl8367c_setAsicPriorityDecision(PRIDECTBL_IDX0, priDec, g_priorityDecision[priDec])) != RT_ERR_OK)
+            return retVal;
+        if ((retVal = rtl8367c_setAsicPriorityDecision(PRIDECTBL_IDX1, priDec, g_priorityDecision[priDec])) != RT_ERR_OK)
+            return retVal;
+    }
+
+    /*Set Port-based Priority to 0*/
+    RTK_SCAN_ALL_PHY_PORTMASK(port)
+    {
+        if ((retVal = rtl8367c_setAsicPriorityPortBased(port, 0)) != RT_ERR_OK)
+            return retVal;
+    }
+
+    /*Disable 1p Remarking*/
+    RTK_SCAN_ALL_PHY_PORTMASK(port)
+    {
+        if ((retVal = rtl8367c_setAsicRemarkingDot1pAbility(port, DISABLED)) != RT_ERR_OK)
+            return retVal;
+    }
+
+    /*Disable DSCP Remarking*/
+    if ((retVal = rtl8367c_setAsicRemarkingDscpAbility(DISABLED)) != RT_ERR_OK)
+        return retVal;
+
+    /*Set 1p & DSCP  Priority Remapping & Remarking*/
+    for (priority = 0; priority <= RTL8367C_PRIMAX; priority++)
+    {
+        if ((retVal = rtl8367c_setAsicPriorityDot1qRemapping(priority, g_prioritytRemap[priority])) != RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicRemarkingDot1pParameter(priority, 0)) != RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicRemarkingDscpParameter(priority, 0)) != RT_ERR_OK)
+            return retVal;
+    }
+
+    /*Set DSCP Priority*/
+    for (dscp = 0; dscp <= 63; dscp++)
+    {
+        if ((retVal = rtl8367c_setAsicPriorityDscpBased(dscp, 0)) != RT_ERR_OK)
+            return retVal;
+    }
+
+    /* Finetune B/T value */
+    if((retVal = rtl8367c_setAsicReg(0x1722, 0x1158)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_qos_priSel_set(rtk_qos_priDecTbl_t index, rtk_priority_select_t *pPriDec)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 port_pow;
+    rtk_uint32 dot1q_pow;
+    rtk_uint32 dscp_pow;
+    rtk_uint32 acl_pow;
+    rtk_uint32 svlan_pow;
+    rtk_uint32 cvlan_pow;
+    rtk_uint32 smac_pow;
+    rtk_uint32 dmac_pow;
+    rtk_uint32 i;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (index < 0 || index >= PRIDECTBL_END)
+        return RT_ERR_ENTRY_INDEX;
+
+    if (pPriDec->port_pri >= 8 || pPriDec->dot1q_pri >= 8 || pPriDec->acl_pri >= 8 || pPriDec->dscp_pri >= 8 ||
+       pPriDec->cvlan_pri >= 8 || pPriDec->svlan_pri >= 8 || pPriDec->dmac_pri >= 8 || pPriDec->smac_pri >= 8)
+        return RT_ERR_QOS_SEL_PRI_SOURCE;
+
+    port_pow = 1;
+    for (i = pPriDec->port_pri; i > 0; i--)
+        port_pow = (port_pow)*2;
+
+    dot1q_pow = 1;
+    for (i = pPriDec->dot1q_pri; i > 0; i--)
+        dot1q_pow = (dot1q_pow)*2;
+
+    acl_pow = 1;
+    for (i = pPriDec->acl_pri; i > 0; i--)
+        acl_pow = (acl_pow)*2;
+
+    dscp_pow = 1;
+    for (i = pPriDec->dscp_pri; i > 0; i--)
+        dscp_pow = (dscp_pow)*2;
+
+    svlan_pow = 1;
+    for (i = pPriDec->svlan_pri; i > 0; i--)
+        svlan_pow = (svlan_pow)*2;
+
+    cvlan_pow = 1;
+    for (i = pPriDec->cvlan_pri; i > 0; i--)
+        cvlan_pow = (cvlan_pow)*2;
+
+    dmac_pow = 1;
+    for (i = pPriDec->dmac_pri; i > 0; i--)
+        dmac_pow = (dmac_pow)*2;
+
+    smac_pow = 1;
+    for (i = pPriDec->smac_pri; i > 0; i--)
+        smac_pow = (smac_pow)*2;
+
+    if ((retVal = rtl8367c_setAsicPriorityDecision(index, PRIDEC_PORT, port_pow)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicPriorityDecision(index, PRIDEC_ACL, acl_pow)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicPriorityDecision(index, PRIDEC_DSCP, dscp_pow)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicPriorityDecision(index, PRIDEC_1Q, dot1q_pow)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicPriorityDecision(index, PRIDEC_1AD, svlan_pow)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicPriorityDecision(index, PRIDEC_CVLAN, cvlan_pow)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicPriorityDecision(index, PRIDEC_DA, dmac_pow)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicPriorityDecision(index, PRIDEC_SA, smac_pow)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_qos_priSel_get(rtk_qos_priDecTbl_t index, rtk_priority_select_t *pPriDec)
+{
+
+    rtk_api_ret_t retVal;
+    rtk_int32 i;
+    rtk_uint32 port_pow;
+    rtk_uint32 dot1q_pow;
+    rtk_uint32 dscp_pow;
+    rtk_uint32 acl_pow;
+    rtk_uint32 svlan_pow;
+    rtk_uint32 cvlan_pow;
+    rtk_uint32 smac_pow;
+    rtk_uint32 dmac_pow;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (index < 0 || index >= PRIDECTBL_END)
+        return RT_ERR_ENTRY_INDEX;
+
+    if ((retVal = rtl8367c_getAsicPriorityDecision(index, PRIDEC_PORT, &port_pow)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_getAsicPriorityDecision(index, PRIDEC_ACL, &acl_pow)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_getAsicPriorityDecision(index, PRIDEC_DSCP, &dscp_pow)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_getAsicPriorityDecision(index, PRIDEC_1Q, &dot1q_pow)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_getAsicPriorityDecision(index, PRIDEC_1AD, &svlan_pow)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_getAsicPriorityDecision(index, PRIDEC_CVLAN, &cvlan_pow)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_getAsicPriorityDecision(index, PRIDEC_DA, &dmac_pow)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_getAsicPriorityDecision(index, PRIDEC_SA, &smac_pow)) != RT_ERR_OK)
+        return retVal;
+
+    for (i = 31; i >= 0; i--)
+    {
+        if (port_pow & (1 << i))
+        {
+            pPriDec->port_pri = i;
+            break;
+        }
+    }
+
+    for (i = 31; i >= 0; i--)
+    {
+        if (dot1q_pow & (1 << i))
+        {
+            pPriDec->dot1q_pri = i;
+            break;
+        }
+    }
+
+    for (i = 31; i >= 0; i--)
+    {
+        if (acl_pow & (1 << i))
+        {
+            pPriDec->acl_pri = i;
+            break;
+        }
+    }
+
+    for (i = 31; i >= 0; i--)
+    {
+        if (dscp_pow & (1 << i))
+        {
+            pPriDec->dscp_pri = i;
+            break;
+        }
+    }
+
+    for (i = 31; i >= 0; i--)
+    {
+        if (svlan_pow & (1 << i))
+        {
+            pPriDec->svlan_pri = i;
+            break;
+        }
+    }
+
+    for (i = 31;i  >= 0; i--)
+    {
+        if (cvlan_pow & (1 << i))
+        {
+            pPriDec->cvlan_pri = i;
+            break;
+        }
+    }
+
+    for (i = 31; i >= 0; i--)
+    {
+        if (dmac_pow&(1<<i))
+        {
+            pPriDec->dmac_pri = i;
+            break;
+        }
+    }
+
+    for (i = 31; i >= 0; i--)
+    {
+        if (smac_pow & (1 << i))
+        {
+            pPriDec->smac_pri = i;
+            break;
+        }
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_qos_1pPriRemap_set(rtk_pri_t dot1p_pri, rtk_pri_t int_pri)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (dot1p_pri > RTL8367C_PRIMAX || int_pri > RTL8367C_PRIMAX)
+        return  RT_ERR_VLAN_PRIORITY;
+
+    if ((retVal = rtl8367c_setAsicPriorityDot1qRemapping(dot1p_pri, int_pri)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_qos_1pPriRemap_get(rtk_pri_t dot1p_pri, rtk_pri_t *pInt_pri)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (dot1p_pri > RTL8367C_PRIMAX)
+        return  RT_ERR_QOS_INT_PRIORITY;
+
+    if ((retVal = rtl8367c_getAsicPriorityDot1qRemapping(dot1p_pri, pInt_pri)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_qos_dscpPriRemap_set(rtk_dscp_t dscp, rtk_pri_t int_pri)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (int_pri > RTL8367C_PRIMAX )
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    if (dscp > RTL8367C_DSCPMAX)
+        return RT_ERR_QOS_DSCP_VALUE;
+
+    if ((retVal = rtl8367c_setAsicPriorityDscpBased(dscp, int_pri)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_qos_dscpPriRemap_get(rtk_dscp_t dscp, rtk_pri_t *pInt_pri)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (dscp > RTL8367C_DSCPMAX)
+        return RT_ERR_QOS_DSCP_VALUE;
+
+    if ((retVal = rtl8367c_getAsicPriorityDscpBased(dscp, pInt_pri)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_qos_portPri_set(rtk_port_t port, rtk_pri_t int_pri)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (int_pri > RTL8367C_PRIMAX )
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    if ((retVal = rtl8367c_setAsicPriorityPortBased(rtk_switch_port_L2P_get(port), int_pri)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_qos_portPri_get(rtk_port_t port, rtk_pri_t *pInt_pri)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if ((retVal = rtl8367c_getAsicPriorityPortBased(rtk_switch_port_L2P_get(port), pInt_pri)) != RT_ERR_OK)
+        return retVal;
+
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_qos_queueNum_set(rtk_port_t port, rtk_queue_num_t queue_num)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if ((0 == queue_num) || (queue_num > RTK_MAX_NUM_OF_QUEUE))
+        return RT_ERR_FAILED;
+
+    if (RTK_MAX_NUM_OF_QUEUE == queue_num)
+        queue_num = 0;
+
+    if ((retVal = rtl8367c_setAsicOutputQueueMappingIndex(rtk_switch_port_L2P_get(port), queue_num)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_qos_queueNum_get(rtk_port_t port, rtk_queue_num_t *pQueue_num)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 qidx;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if ((retVal = rtl8367c_getAsicOutputQueueMappingIndex(rtk_switch_port_L2P_get(port), &qidx)) != RT_ERR_OK)
+        return retVal;
+
+    if (0 == qidx)
+        *pQueue_num = 8;
+    else
+        *pQueue_num = qidx;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_qos_priMap_set(rtk_queue_num_t queue_num, rtk_qos_pri2queue_t *pPri2qid)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 pri;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if ((0 == queue_num) || (queue_num > RTK_MAX_NUM_OF_QUEUE))
+        return RT_ERR_QUEUE_NUM;
+
+    for (pri = 0; pri <= RTK_PRIMAX; pri++)
+    {
+        if (pPri2qid->pri2queue[pri] > RTK_QIDMAX)
+            return RT_ERR_QUEUE_ID;
+
+        if ((retVal = rtl8367c_setAsicPriorityToQIDMappingTable(queue_num - 1, pri, pPri2qid->pri2queue[pri])) != RT_ERR_OK)
+            return retVal;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_qos_priMap_get(rtk_queue_num_t queue_num, rtk_qos_pri2queue_t *pPri2qid)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 pri;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if ((0 == queue_num) || (queue_num > RTK_MAX_NUM_OF_QUEUE))
+        return RT_ERR_QUEUE_NUM;
+
+    for (pri = 0; pri <= RTK_PRIMAX; pri++)
+    {
+        if ((retVal = rtl8367c_getAsicPriorityToQIDMappingTable(queue_num-1, pri, &pPri2qid->pri2queue[pri])) != RT_ERR_OK)
+            return retVal;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_qos_schedulingQueue_set(rtk_port_t port, rtk_qos_queue_weights_t *pQweights)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 qid;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    for (qid = 0; qid < RTL8367C_QUEUENO; qid ++)
+    {
+
+        if (pQweights->weights[qid] > QOS_WEIGHT_MAX)
+            return RT_ERR_QOS_QUEUE_WEIGHT;
+
+        if (0 == pQweights->weights[qid])
+        {
+            if ((retVal = rtl8367c_setAsicQueueType(rtk_switch_port_L2P_get(port), qid, QTYPE_STRICT)) != RT_ERR_OK)
+                return retVal;
+        }
+        else
+        {
+            if ((retVal = rtl8367c_setAsicQueueType(rtk_switch_port_L2P_get(port), qid, QTYPE_WFQ)) != RT_ERR_OK)
+                return retVal;
+
+            if ((retVal = rtl8367c_setAsicWFQWeight(rtk_switch_port_L2P_get(port),qid, pQweights->weights[qid])) != RT_ERR_OK)
+                return retVal;
+        }
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_qos_schedulingQueue_get(rtk_port_t port, rtk_qos_queue_weights_t *pQweights)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 qid,qtype,qweight;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    for (qid = 0; qid < RTL8367C_QUEUENO; qid++)
+    {
+        if ((retVal = rtl8367c_getAsicQueueType(rtk_switch_port_L2P_get(port), qid, &qtype)) != RT_ERR_OK)
+            return retVal;
+
+        if (QTYPE_STRICT == qtype)
+        {
+            pQweights->weights[qid] = 0;
+        }
+        else if (QTYPE_WFQ == qtype)
+        {
+            if ((retVal = rtl8367c_getAsicWFQWeight(rtk_switch_port_L2P_get(port), qid, &qweight)) != RT_ERR_OK)
+                return retVal;
+            pQweights->weights[qid] = qweight;
+        }
+    }
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_qos_1pRemarkEnable_set(rtk_port_t port, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (enable >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicRemarkingDot1pAbility(rtk_switch_port_L2P_get(port), enable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_qos_1pRemarkEnable_get(rtk_port_t port, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if ((retVal = rtl8367c_getAsicRemarkingDot1pAbility(rtk_switch_port_L2P_get(port), pEnable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_qos_1pRemark_set(rtk_pri_t int_pri, rtk_pri_t dot1p_pri)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (int_pri > RTL8367C_PRIMAX )
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    if (dot1p_pri > RTL8367C_PRIMAX)
+        return RT_ERR_VLAN_PRIORITY;
+
+    if ((retVal = rtl8367c_setAsicRemarkingDot1pParameter(int_pri, dot1p_pri)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_qos_1pRemark_get(rtk_pri_t int_pri, rtk_pri_t *pDot1p_pri)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (int_pri > RTL8367C_PRIMAX )
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    if ((retVal = rtl8367c_getAsicRemarkingDot1pParameter(int_pri, pDot1p_pri)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_qos_1pRemarkSrcSel_set(rtk_qos_1pRmkSrc_t type)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (type >= DOT1P_RMK_SRC_END )
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    if ((retVal = rtl8367c_setAsicRemarkingDot1pSrc(type)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_qos_1pRemarkSrcSel_get(rtk_qos_1pRmkSrc_t *pType)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if ((retVal = rtl8367c_getAsicRemarkingDot1pSrc(pType)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_qos_dscpRemarkEnable_set(rtk_port_t port, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /*for whole system function, the port value should be 0xFF*/
+    if (port != RTK_WHOLE_SYSTEM)
+        return RT_ERR_PORT_ID;
+
+    if (enable >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicRemarkingDscpAbility(enable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_qos_dscpRemarkEnable_get(rtk_port_t port, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /*for whole system function, the port value should be 0xFF*/
+    if (port != RTK_WHOLE_SYSTEM)
+        return RT_ERR_PORT_ID;
+
+    if ((retVal = rtl8367c_getAsicRemarkingDscpAbility(pEnable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_qos_dscpRemark_set(rtk_pri_t int_pri, rtk_dscp_t dscp)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (int_pri > RTK_PRIMAX )
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    if (dscp > RTK_DSCPMAX)
+        return RT_ERR_QOS_DSCP_VALUE;
+
+    if ((retVal = rtl8367c_setAsicRemarkingDscpParameter(int_pri, dscp)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_qos_dscpRemark_get(rtk_pri_t int_pri, rtk_dscp_t *pDscp)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (int_pri > RTK_PRIMAX )
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    if ((retVal = rtl8367c_getAsicRemarkingDscpParameter(int_pri, pDscp)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_qos_dscpRemarkSrcSel_set(rtk_qos_dscpRmkSrc_t type)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (type >= DSCP_RMK_SRC_END )
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    if ((retVal = rtl8367c_setAsicRemarkingDscpSrc(type)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_qos_dscpRemarkSrcSel_get(rtk_qos_dscpRmkSrc_t *pType)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if ((retVal = rtl8367c_getAsicRemarkingDscpSrc(pType)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_qos_dscpRemark2Dscp_set(rtk_dscp_t dscp, rtk_dscp_t rmkDscp)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if ((dscp > RTK_DSCPMAX) || (rmkDscp > RTK_DSCPMAX))
+        return RT_ERR_QOS_DSCP_VALUE;
+
+    if ((retVal = rtl8367c_setAsicRemarkingDscp2Dscp(dscp, rmkDscp)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_qos_dscpRemark2Dscp_get(rtk_dscp_t dscp, rtk_dscp_t *pDscp)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (dscp > RTK_DSCPMAX)
+        return RT_ERR_QOS_DSCP_VALUE;
+
+    if ((retVal = rtl8367c_getAsicRemarkingDscp2Dscp(dscp, pDscp)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_qos_portPriSelIndex_set(rtk_port_t port, rtk_qos_priDecTbl_t index)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (index >= PRIDECTBL_END )
+        return RT_ERR_ENTRY_INDEX;
+
+    if ((retVal = rtl8367c_setAsicPortPriorityDecisionIndex(rtk_switch_port_L2P_get(port), index)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_qos_portPriSelIndex_get(rtk_port_t port, rtk_qos_priDecTbl_t *pIndex)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if ((retVal = rtl8367c_getAsicPortPriorityDecisionIndex(rtk_switch_port_L2P_get(port), pIndex)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtk_qos_init
+ * Description:
+ *      Configure Qos default settings with queue number assigment to each port.
+ * Input:
+ *      queueNum - Queue number of each port.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_QUEUE_NUM    - Invalid queue number.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This API will initialize related Qos setting with queue number assigment.
+ *      The queue number is from 1 to 8.
+ */
+rtk_api_ret_t rtk_qos_init(rtk_queue_num_t queueNum)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_init(queueNum);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_qos_priSel_set
+ * Description:
+ *      Configure the priority order among different priority mechanism.
+ * Input:
+ *      index - Priority decision table index (0~1)
+ *      pPriDec - Priority assign for port, dscp, 802.1p, cvlan, svlan, acl based priority decision.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_QOS_SEL_PRI_SOURCE   - Invalid priority decision source parameter.
+ * Note:
+ *      ASIC will follow user priority setting of mechanisms to select mapped queue priority for receiving frame.
+ *      If two priority mechanisms are the same, the ASIC will chose the highest priority from mechanisms to
+ *      assign queue priority to receiving frame.
+ *      The priority sources are:
+ *      - PRIDEC_PORT
+ *      - PRIDEC_ACL
+ *      - PRIDEC_DSCP
+ *      - PRIDEC_1Q
+ *      - PRIDEC_1AD
+ *      - PRIDEC_CVLAN
+ *      - PRIDEC_DA
+ *      - PRIDEC_SA
+ */
+rtk_api_ret_t rtk_qos_priSel_set(rtk_qos_priDecTbl_t index, rtk_priority_select_t *pPriDec)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_priSel_set(index, pPriDec);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_qos_priSel_get
+ * Description:
+ *      Get the priority order configuration among different priority mechanism.
+ * Input:
+ *      index - Priority decision table index (0~1)
+ * Output:
+ *      pPriDec - Priority assign for port, dscp, 802.1p, cvlan, svlan, acl based priority decision .
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      ASIC will follow user priority setting of mechanisms to select mapped queue priority for receiving frame.
+ *      If two priority mechanisms are the same, the ASIC will chose the highest priority from mechanisms to
+ *      assign queue priority to receiving frame.
+ *      The priority sources are:
+ *      - PRIDEC_PORT,
+ *      - PRIDEC_ACL,
+ *      - PRIDEC_DSCP,
+ *      - PRIDEC_1Q,
+ *      - PRIDEC_1AD,
+ *      - PRIDEC_CVLAN,
+ *      - PRIDEC_DA,
+ *      - PRIDEC_SA,
+ */
+rtk_api_ret_t rtk_qos_priSel_get(rtk_qos_priDecTbl_t index, rtk_priority_select_t *pPriDec)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_priSel_get(index, pPriDec);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_qos_1pPriRemap_set
+ * Description:
+ *      Configure 1Q priorities mapping to internal absolute priority.
+ * Input:
+ *      dot1p_pri   - 802.1p priority value.
+ *      int_pri     - internal priority value.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ *      RT_ERR_VLAN_PRIORITY    - Invalid 1p priority.
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority.
+ * Note:
+ *      Priority of 802.1Q assignment for internal asic priority, and it is used for queue usage and packet scheduling.
+ */
+rtk_api_ret_t rtk_qos_1pPriRemap_set(rtk_pri_t dot1p_pri, rtk_pri_t int_pri)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_1pPriRemap_set(dot1p_pri, int_pri);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_qos_1pPriRemap_get
+ * Description:
+ *      Get 1Q priorities mapping to internal absolute priority.
+ * Input:
+ *      dot1p_pri - 802.1p priority value .
+ * Output:
+ *      pInt_pri - internal priority value.
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_VLAN_PRIORITY    - Invalid priority.
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority.
+ * Note:
+ *      Priority of 802.1Q assigment for internal asic priority, and it is uesed for queue usage and packet scheduling.
+ */
+rtk_api_ret_t rtk_qos_1pPriRemap_get(rtk_pri_t dot1p_pri, rtk_pri_t *pInt_pri)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_1pPriRemap_get(dot1p_pri, pInt_pri);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_qos_dscpPriRemap_set
+ * Description:
+ *      Map dscp value to internal priority.
+ * Input:
+ *      dscp    - Dscp value of receiving frame
+ *      int_pri - internal priority value .
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ *      RT_ERR_QOS_DSCP_VALUE   - Invalid DSCP value.
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority.
+ * Note:
+ *      The Differentiated Service Code Point is a selector for router's per-hop behaviors. As a selector, there is no implication that a numerically
+ *      greater DSCP implies a better network service. As can be seen, the DSCP totally overlaps the old precedence field of TOS. So if values of
+ *      DSCP are carefully chosen then backward compatibility can be achieved.
+ */
+rtk_api_ret_t rtk_qos_dscpPriRemap_set(rtk_dscp_t dscp, rtk_pri_t int_pri)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_dscpPriRemap_set(dscp, int_pri);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_qos_dscpPriRemap_get
+ * Description:
+ *      Get dscp value to internal priority.
+ * Input:
+ *      dscp - Dscp value of receiving frame
+ * Output:
+ *      pInt_pri - internal priority value.
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_QOS_DSCP_VALUE   - Invalid DSCP value.
+ * Note:
+ *      The Differentiated Service Code Point is a selector for router's per-hop behaviors. As a selector, there is no implication that a numerically
+ *      greater DSCP implies a better network service. As can be seen, the DSCP totally overlaps the old precedence field of TOS. So if values of
+ *      DSCP are carefully chosen then backward compatibility can be achieved.
+ */
+rtk_api_ret_t rtk_qos_dscpPriRemap_get(rtk_dscp_t dscp, rtk_pri_t *pInt_pri)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_dscpPriRemap_get(dscp, pInt_pri);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_qos_portPri_set
+ * Description:
+ *      Configure priority usage to each port.
+ * Input:
+ *      port - Port id.
+ *      int_pri - internal priority value.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_QOS_SEL_PORT_PRI - Invalid port priority.
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority.
+ * Note:
+ *      The API can set priority of port assignments for queue usage and packet scheduling.
+ */
+rtk_api_ret_t rtk_qos_portPri_set(rtk_port_t port, rtk_pri_t int_pri)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_portPri_set(port, int_pri);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_qos_portPri_get
+ * Description:
+ *      Get priority usage to each port.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pInt_pri - internal priority value.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can get priority of port assignments for queue usage and packet scheduling.
+ */
+rtk_api_ret_t rtk_qos_portPri_get(rtk_port_t port, rtk_pri_t *pInt_pri)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_portPri_get(port, pInt_pri);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_qos_queueNum_set
+ * Description:
+ *      Set output queue number for each port.
+ * Input:
+ *      port    - Port id.
+ *      index   - Mapping queue number (1~8)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_QUEUE_NUM    - Invalid queue number.
+ * Note:
+ *      The API can set the output queue number of the specified port. The queue number is from 1 to 8.
+ */
+rtk_api_ret_t rtk_qos_queueNum_set(rtk_port_t port, rtk_queue_num_t queue_num)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_queueNum_set(port, queue_num);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_qos_queueNum_get
+ * Description:
+ *      Get output queue number.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pQueue_num - Mapping queue number
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API will return the output queue number of the specified port. The queue number is from 1 to 8.
+ */
+rtk_api_ret_t rtk_qos_queueNum_get(rtk_port_t port, rtk_queue_num_t *pQueue_num)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_queueNum_get(port, pQueue_num);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_qos_priMap_set
+ * Description:
+ *      Set output queue number for each port.
+ * Input:
+ *      queue_num   - Queue number usage.
+ *      pPri2qid    - Priority mapping to queue ID.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ *      RT_ERR_QUEUE_NUM        - Invalid queue number.
+ *      RT_ERR_QUEUE_ID         - Invalid queue id.
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority.
+ * Note:
+ *      ASIC supports priority mapping to queue with different queue number from 1 to 8.
+ *      For different queue numbers usage, ASIC supports different internal available queue IDs.
+ */
+rtk_api_ret_t rtk_qos_priMap_set(rtk_queue_num_t queue_num, rtk_qos_pri2queue_t *pPri2qid)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_priMap_set(queue_num, pPri2qid);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_qos_priMap_get
+ * Description:
+ *      Get priority to queue ID mapping table parameters.
+ * Input:
+ *      queue_num - Queue number usage.
+ * Output:
+ *      pPri2qid - Priority mapping to queue ID.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_QUEUE_NUM    - Invalid queue number.
+ * Note:
+ *      The API can return the mapping queue id of the specified priority and queue number.
+ *      The queue number is from 1 to 8.
+ */
+rtk_api_ret_t rtk_qos_priMap_get(rtk_queue_num_t queue_num, rtk_qos_pri2queue_t *pPri2qid)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_priMap_get(queue_num, pPri2qid);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_qos_schedulingQueue_set
+ * Description:
+ *      Set weight and type of queues in dedicated port.
+ * Input:
+ *      port        - Port id.
+ *      pQweights   - The array of weights for WRR/WFQ queue (0 for STRICT_PRIORITY queue).
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_QOS_QUEUE_WEIGHT - Invalid queue weight.
+ * Note:
+ *      The API can set weight and type, strict priority or weight fair queue (WFQ) for
+ *      dedicated port for using queues. If queue id is not included in queue usage,
+ *      then its type and weight setting in dummy for setting. There are priorities
+ *      as queue id in strict queues. It means strict queue id 5 carrying higher priority
+ *      than strict queue id 4. The WFQ queue weight is from 1 to 127, and weight 0 is
+ *      for strict priority queue type.
+ */
+rtk_api_ret_t rtk_qos_schedulingQueue_set(rtk_port_t port, rtk_qos_queue_weights_t *pQweights)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_schedulingQueue_set(port, pQweights);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_qos_schedulingQueue_get
+ * Description:
+ *      Get weight and type of queues in dedicated port.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pQweights - The array of weights for WRR/WFQ queue (0 for STRICT_PRIORITY queue).
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can get weight and type, strict priority or weight fair queue (WFQ) for dedicated port for using queues.
+ *      The WFQ queue weight is from 1 to 127, and weight 0 is for strict priority queue type.
+ */
+rtk_api_ret_t rtk_qos_schedulingQueue_get(rtk_port_t port, rtk_qos_queue_weights_t *pQweights)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_schedulingQueue_get(port, pQweights);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_qos_1pRemarkEnable_set
+ * Description:
+ *      Set 1p Remarking state
+ * Input:
+ *      port        - Port id.
+ *      enable      - State of per-port 1p Remarking
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_ENABLE       - Invalid enable parameter.
+ * Note:
+ *      The API can enable or disable 802.1p remarking ability for whole system.
+ *      The status of 802.1p remark:
+ *      - DISABLED
+ *      - ENABLED
+ */
+rtk_api_ret_t rtk_qos_1pRemarkEnable_set(rtk_port_t port, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_1pRemarkEnable_set(port, enable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_qos_1pRemarkEnable_get
+ * Description:
+ *      Get 802.1p remarking ability.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pEnable - Status of 802.1p remark.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can get 802.1p remarking ability.
+ *      The status of 802.1p remark:
+ *      - DISABLED
+ *      - ENABLED
+ */
+rtk_api_ret_t rtk_qos_1pRemarkEnable_get(rtk_port_t port, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_1pRemarkEnable_get(port, pEnable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_qos_1pRemark_set
+ * Description:
+ *      Set 802.1p remarking parameter.
+ * Input:
+ *      int_pri     - Internal priority value.
+ *      dot1p_pri   - 802.1p priority value.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_VLAN_PRIORITY    - Invalid 1p priority.
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority.
+ * Note:
+ *      The API can set 802.1p parameters source priority and new priority.
+ */
+rtk_api_ret_t rtk_qos_1pRemark_set(rtk_pri_t int_pri, rtk_pri_t dot1p_pri)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_1pRemark_set(int_pri, dot1p_pri);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_qos_1pRemark_get
+ * Description:
+ *      Get 802.1p remarking parameter.
+ * Input:
+ *      int_pri - Internal priority value.
+ * Output:
+ *      pDot1p_pri - 802.1p priority value.
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority.
+ * Note:
+ *      The API can get 802.1p remarking parameters. It would return new priority of ingress priority.
+ */
+rtk_api_ret_t rtk_qos_1pRemark_get(rtk_pri_t int_pri, rtk_pri_t *pDot1p_pri)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_1pRemark_get(int_pri, pDot1p_pri);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+
+/* Function Name:
+ *      rtk_qos_1pRemarkSrcSel_set
+ * Description:
+ *      Set remarking source of 802.1p remarking.
+ * Input:
+ *      type      - remarking source
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT         - The module is not initial
+ *      RT_ERR_PORT_ID  - invalid port id
+ *      RT_ERR_INPUT            - invalid input parameter
+
+ * Note:
+ *      The API can configure 802.1p remark functionality to map original 802.1p value or internal
+ *      priority to TX DSCP value.
+ */
+rtk_api_ret_t rtk_qos_1pRemarkSrcSel_set(rtk_qos_1pRmkSrc_t type)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_1pRemarkSrcSel_set(type);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_qos_1pRemarkSrcSel_get
+ * Description:
+ *      Get remarking source of 802.1p remarking.
+ * Input:
+ *      none
+ * Output:
+ *      pType      - remarking source
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT         - The module is not initial
+ *      RT_ERR_PORT_ID          - invalid port id
+ *      RT_ERR_INPUT            - invalid input parameter
+ *      RT_ERR_NULL_POINTER     - input parameter may be null pointer
+
+ * Note:
+ *      None
+ */
+rtk_api_ret_t rtk_qos_1pRemarkSrcSel_get(rtk_qos_1pRmkSrc_t *pType)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_1pRemarkSrcSel_get(pType);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_qos_dscpRemarkEnable_set
+ * Description:
+ *      Set DSCP remarking ability.
+ * Input:
+ *      port    - Port id.
+ *      enable  - status of DSCP remark.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority.
+ *      RT_ERR_ENABLE           - Invalid enable parameter.
+ * Note:
+ *      The API can enable or disable DSCP remarking ability for whole system.
+ *      The status of DSCP remark:
+ *      - DISABLED
+ *      - ENABLED
+ */
+rtk_api_ret_t rtk_qos_dscpRemarkEnable_set(rtk_port_t port, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_dscpRemarkEnable_set(port, enable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_qos_dscpRemarkEnable_get
+ * Description:
+ *      Get DSCP remarking ability.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pEnable - status of DSCP remarking.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can get DSCP remarking ability.
+ *      The status of DSCP remark:
+ *      - DISABLED
+ *      - ENABLED
+ */
+rtk_api_ret_t rtk_qos_dscpRemarkEnable_get(rtk_port_t port, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_dscpRemarkEnable_get(port, pEnable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_qos_dscpRemark_set
+ * Description:
+ *      Set DSCP remarking parameter.
+ * Input:
+ *      int_pri - Internal priority value.
+ *      dscp    - DSCP value.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority.
+ *      RT_ERR_QOS_DSCP_VALUE   - Invalid DSCP value.
+ * Note:
+ *      The API can set DSCP value and mapping priority.
+ */
+rtk_api_ret_t rtk_qos_dscpRemark_set(rtk_pri_t int_pri, rtk_dscp_t dscp)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_dscpRemark_set(int_pri, dscp);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_qos_dscpRemark_get
+ * Description:
+ *      Get DSCP remarking parameter.
+ * Input:
+ *      int_pri - Internal priority value.
+ * Output:
+ *      Dscp - DSCP value.
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority.
+ * Note:
+ *      The API can get DSCP parameters. It would return DSCP value for mapping priority.
+ */
+rtk_api_ret_t rtk_qos_dscpRemark_get(rtk_pri_t int_pri, rtk_dscp_t *pDscp)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_dscpRemark_get(int_pri, pDscp);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_qos_dscpRemarkSrcSel_set
+ * Description:
+ *      Set remarking source of DSCP remarking.
+ * Input:
+ *      type      - remarking source
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT         - The module is not initial
+ *      RT_ERR_PORT_ID  - invalid port id
+ *      RT_ERR_INPUT            - invalid input parameter
+
+ * Note:
+ *      The API can configure DSCP remark functionality to map original DSCP value or internal
+ *      priority to TX DSCP value.
+ */
+rtk_api_ret_t rtk_qos_dscpRemarkSrcSel_set(rtk_qos_dscpRmkSrc_t type)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_dscpRemarkSrcSel_set(type);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_qos_dscpRemarkSrcSel_get
+ * Description:
+ *      Get remarking source of DSCP remarking.
+ * Input:
+ *      none
+ * Output:
+ *      pType      - remarking source
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT         - The module is not initial
+ *      RT_ERR_PORT_ID          - invalid port id
+ *      RT_ERR_INPUT            - invalid input parameter
+ *      RT_ERR_NULL_POINTER     - input parameter may be null pointer
+
+ * Note:
+ *      None
+ */
+rtk_api_ret_t rtk_qos_dscpRemarkSrcSel_get(rtk_qos_dscpRmkSrc_t *pType)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_dscpRemarkSrcSel_get(pType);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_qos_dscpRemark2Dscp_set
+ * Description:
+ *      Set DSCP to remarked DSCP mapping.
+ * Input:
+ *      dscp    - DSCP value
+ *      rmkDscp - remarked DSCP value
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_QOS_DSCP_VALUE   - Invalid dscp value
+ * Note:
+ *      dscp parameter can be DSCP value or internal priority according to configuration of API
+ *      dal_apollomp_qos_dscpRemarkSrcSel_set(), because DSCP remark functionality can map original DSCP
+ *      value or internal priority to TX DSCP value.
+ */
+rtk_api_ret_t rtk_qos_dscpRemark2Dscp_set(rtk_dscp_t dscp, rtk_dscp_t rmkDscp)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_dscpRemark2Dscp_set(dscp, rmkDscp);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_qos_dscpRemark2Dscp_get
+ * Description:
+ *      Get DSCP to remarked DSCP mapping.
+ * Input:
+ *      dscp    - DSCP value
+ * Output:
+ *      pDscp   - remarked DSCP value
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_QOS_DSCP_VALUE   - Invalid dscp value
+ *      RT_ERR_NULL_POINTER     - NULL pointer
+ * Note:
+ *      None.
+ */
+rtk_api_ret_t rtk_qos_dscpRemark2Dscp_get(rtk_dscp_t dscp, rtk_dscp_t *pDscp)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_dscpRemark2Dscp_get(dscp, pDscp);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_qos_portPriSelIndex_set
+ * Description:
+ *      Configure priority decision index to each port.
+ * Input:
+ *      port - Port id.
+ *      index - priority decision index.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_ENTRY_INDEX - Invalid entry index.
+ * Note:
+ *      The API can set priority of port assignments for queue usage and packet scheduling.
+ */
+rtk_api_ret_t rtk_qos_portPriSelIndex_set(rtk_port_t port, rtk_qos_priDecTbl_t index)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_portPriSelIndex_set(port, index);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_qos_portPriSelIndex_get
+ * Description:
+ *      Get priority decision index from each port.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pIndex - priority decision index.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can get priority of port assignments for queue usage and packet scheduling.
+ */
+rtk_api_ret_t rtk_qos_portPriSelIndex_get(rtk_port_t port, rtk_qos_priDecTbl_t *pIndex)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_qos_portPriSelIndex_get(port, pIndex);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+
+
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/qos.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/qos.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/qos.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/qos.h	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,783 @@
+ /*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * Purpose : RTL8367/RTL8367C switch high-level API
+ *
+ * Feature : The file includes QoS module high-layer API defination
+ *
+ */
+
+#ifndef __RTK_API_QOS_H__
+#define __RTK_API_QOS_H__
+
+/*
+ * Data Type Declaration
+ */
+#define QOS_DEFAULT_TICK_PERIOD                     (19-1)
+#define QOS_DEFAULT_BYTE_PER_TOKEN                  34
+#define QOS_DEFAULT_LK_THRESHOLD                    (34*3) /* Why use 0x400? */
+
+
+#define QOS_DEFAULT_INGRESS_BANDWIDTH               0x3FFF /* 0x3FFF => unlimit */
+#define QOS_DEFAULT_EGRESS_BANDWIDTH                0x3D08 /*( 0x3D08 + 1) * 64Kbps => 1Gbps*/
+#define QOS_DEFAULT_PREIFP                          1
+#define QOS_DEFAULT_PACKET_USED_PAGES_FC            0x60
+#define QOS_DEFAULT_PACKET_USED_FC_EN               0
+#define QOS_DEFAULT_QUEUE_BASED_FC_EN               1
+
+#define QOS_DEFAULT_PRIORITY_SELECT_PORT            8
+#define QOS_DEFAULT_PRIORITY_SELECT_1Q              0
+#define QOS_DEFAULT_PRIORITY_SELECT_ACL             0
+#define QOS_DEFAULT_PRIORITY_SELECT_DSCP            0
+
+#define QOS_DEFAULT_DSCP_MAPPING_PRIORITY           0
+
+#define QOS_DEFAULT_1Q_REMARKING_ABILITY            0
+#define QOS_DEFAULT_DSCP_REMARKING_ABILITY          0
+#define QOS_DEFAULT_QUEUE_GAP                       20
+#define QOS_DEFAULT_QUEUE_NO_MAX                    6
+#define QOS_DEFAULT_AVERAGE_PACKET_RATE             0x3FFF
+#define QOS_DEFAULT_BURST_SIZE_IN_APR               0x3F
+#define QOS_DEFAULT_PEAK_PACKET_RATE                2
+#define QOS_DEFAULT_SCHEDULER_ABILITY_APR           1     /*disable*/
+#define QOS_DEFAULT_SCHEDULER_ABILITY_PPR           1    /*disable*/
+#define QOS_DEFAULT_SCHEDULER_ABILITY_WFQ           1    /*disable*/
+
+#define QOS_WEIGHT_MAX                              127
+
+#define RTK_MAX_NUM_OF_PRIORITY                     8
+#define RTK_MAX_NUM_OF_QUEUE                        8
+
+#define RTK_PRIMAX                                             7
+#define RTK_QIDMAX                                             7
+#define RTK_DSCPMAX                                         63
+
+
+/* enum Priority Selection Index */
+typedef enum rtk_qos_priDecTbl_e
+{
+    PRIDECTBL_IDX0 = 0,
+    PRIDECTBL_IDX1,
+    PRIDECTBL_END,
+}rtk_qos_priDecTbl_t;
+
+
+/* Types of 802.1p remarking source */
+typedef enum rtk_qos_1pRmkSrc_e
+{
+    DOT1P_RMK_SRC_USER_PRI,
+    DOT1P_RMK_SRC_TAG_PRI,
+    DOT1P_RMK_SRC_END
+} rtk_qos_1pRmkSrc_t;
+
+
+/* Types of DSCP remarking source */
+typedef enum rtk_qos_dscpRmkSrc_e
+{
+    DSCP_RMK_SRC_INT_PRI,
+    DSCP_RMK_SRC_DSCP,
+    DSCP_RMK_SRC_USER_PRI,
+    DSCP_RMK_SRC_END
+} rtk_qos_dscpRmkSrc_t;
+
+
+
+
+typedef struct rtk_priority_select_s
+{
+    rtk_uint32 port_pri;
+    rtk_uint32 dot1q_pri;
+    rtk_uint32 acl_pri;
+    rtk_uint32 dscp_pri;
+    rtk_uint32 cvlan_pri;
+    rtk_uint32 svlan_pri;
+    rtk_uint32 dmac_pri;
+    rtk_uint32 smac_pri;
+} rtk_priority_select_t;
+
+typedef struct rtk_qos_pri2queue_s
+{
+    rtk_uint32 pri2queue[RTK_MAX_NUM_OF_PRIORITY];
+} rtk_qos_pri2queue_t;
+
+typedef struct rtk_qos_queue_weights_s
+{
+    rtk_uint32 weights[RTK_MAX_NUM_OF_QUEUE];
+} rtk_qos_queue_weights_t;
+
+typedef enum rtk_qos_scheduling_type_e
+{
+    WFQ = 0,        /* Weighted-Fair-Queue */
+    WRR,            /* Weighted-Round-Robin */
+    SCHEDULING_TYPE_END
+} rtk_qos_scheduling_type_t;
+
+typedef rtk_uint32  rtk_queue_num_t;    /* queue number*/
+
+/* Function Name:
+ *      rtk_qos_init
+ * Description:
+ *      Configure Qos default settings with queue number assigment to each port.
+ * Input:
+ *      queueNum - Queue number of each port.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_QUEUE_NUM    - Invalid queue number.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This API will initialize related Qos setting with queue number assigment.
+ *      The queue number is from 1 to 8.
+ */
+extern rtk_api_ret_t rtk_qos_init(rtk_queue_num_t queueNum);
+
+/* Function Name:
+ *      rtk_qos_priSel_set
+ * Description:
+ *      Configure the priority order among different priority mechanism.
+ * Input:
+ *      index - Priority decision table index (0~1)
+ *      pPriDec - Priority assign for port, dscp, 802.1p, cvlan, svlan, acl based priority decision.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_QOS_SEL_PRI_SOURCE   - Invalid priority decision source parameter.
+ * Note:
+ *      ASIC will follow user priority setting of mechanisms to select mapped queue priority for receiving frame.
+ *      If two priority mechanisms are the same, the ASIC will chose the highest priority from mechanisms to
+ *      assign queue priority to receiving frame.
+ *      The priority sources are:
+ *      - PRIDEC_PORT
+ *      - PRIDEC_ACL
+ *      - PRIDEC_DSCP
+ *      - PRIDEC_1Q
+ *      - PRIDEC_1AD
+ *      - PRIDEC_CVLAN
+ *      - PRIDEC_DA
+ *      - PRIDEC_SA
+ */
+extern rtk_api_ret_t rtk_qos_priSel_set(rtk_qos_priDecTbl_t index, rtk_priority_select_t *pPriDec);
+
+
+/* Function Name:
+ *      rtk_qos_priSel_get
+ * Description:
+ *      Get the priority order configuration among different priority mechanism.
+ * Input:
+ *      index - Priority decision table index (0~1)
+ * Output:
+ *      pPriDec - Priority assign for port, dscp, 802.1p, cvlan, svlan, acl based priority decision .
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      ASIC will follow user priority setting of mechanisms to select mapped queue priority for receiving frame.
+ *      If two priority mechanisms are the same, the ASIC will chose the highest priority from mechanisms to
+ *      assign queue priority to receiving frame.
+ *      The priority sources are:
+ *      - PRIDEC_PORT,
+ *      - PRIDEC_ACL,
+ *      - PRIDEC_DSCP,
+ *      - PRIDEC_1Q,
+ *      - PRIDEC_1AD,
+ *      - PRIDEC_CVLAN,
+ *      - PRIDEC_DA,
+ *      - PRIDEC_SA,
+ */
+extern rtk_api_ret_t rtk_qos_priSel_get(rtk_qos_priDecTbl_t index, rtk_priority_select_t *pPriDec);
+
+/* Function Name:
+ *      rtk_qos_1pPriRemap_set
+ * Description:
+ *      Configure 1Q priorities mapping to internal absolute priority.
+ * Input:
+ *      dot1p_pri   - 802.1p priority value.
+ *      int_pri     - internal priority value.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ *      RT_ERR_VLAN_PRIORITY    - Invalid 1p priority.
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority.
+ * Note:
+ *      Priority of 802.1Q assignment for internal asic priority, and it is used for queue usage and packet scheduling.
+ */
+extern rtk_api_ret_t rtk_qos_1pPriRemap_set(rtk_pri_t dot1p_pri, rtk_pri_t int_pri);
+
+/* Function Name:
+ *      rtk_qos_1pPriRemap_get
+ * Description:
+ *      Get 1Q priorities mapping to internal absolute priority.
+ * Input:
+ *      dot1p_pri - 802.1p priority value .
+ * Output:
+ *      pInt_pri - internal priority value.
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_VLAN_PRIORITY    - Invalid priority.
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority.
+ * Note:
+ *      Priority of 802.1Q assigment for internal asic priority, and it is uesed for queue usage and packet scheduling.
+ */
+extern rtk_api_ret_t rtk_qos_1pPriRemap_get(rtk_pri_t dot1p_pri, rtk_pri_t *pInt_pri);
+
+
+/* Function Name:
+ *      rtk_qos_1pRemarkSrcSel_set
+ * Description:
+ *      Set remarking source of 802.1p remarking.
+ * Input:
+ *      type      - remarking source
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT         - The module is not initial
+ *      RT_ERR_PORT_ID  - invalid port id
+ *      RT_ERR_INPUT            - invalid input parameter
+
+ * Note:
+ *      The API can configure 802.1p remark functionality to map original 802.1p value or internal
+ *      priority to TX DSCP value.
+ */
+extern rtk_api_ret_t rtk_qos_1pRemarkSrcSel_set(rtk_qos_1pRmkSrc_t type);
+
+/* Function Name:
+ *      rtk_qos_1pRemarkSrcSel_get
+ * Description:
+ *      Get remarking source of 802.1p remarking.
+ * Input:
+ *      none
+ * Output:
+ *      pType      - remarking source
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT         - The module is not initial
+ *      RT_ERR_PORT_ID          - invalid port id
+ *      RT_ERR_INPUT            - invalid input parameter
+ *      RT_ERR_NULL_POINTER     - input parameter may be null pointer
+
+ * Note:
+ *      None
+ */
+extern rtk_api_ret_t rtk_qos_1pRemarkSrcSel_get(rtk_qos_1pRmkSrc_t *pType);
+
+/* Function Name:
+ *      rtk_qos_dscpPriRemap_set
+ * Description:
+ *      Map dscp value to internal priority.
+ * Input:
+ *      dscp    - Dscp value of receiving frame
+ *      int_pri - internal priority value .
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ *      RT_ERR_QOS_DSCP_VALUE   - Invalid DSCP value.
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority.
+ * Note:
+ *      The Differentiated Service Code Point is a selector for router's per-hop behaviors. As a selector, there is no implication that a numerically
+ *      greater DSCP implies a better network service. As can be seen, the DSCP totally overlaps the old precedence field of TOS. So if values of
+ *      DSCP are carefully chosen then backward compatibility can be achieved.
+ */
+extern rtk_api_ret_t rtk_qos_dscpPriRemap_set(rtk_dscp_t dscp, rtk_pri_t int_pri);
+
+/* Function Name:
+ *      rtk_qos_dscpPriRemap_get
+ * Description:
+ *      Get dscp value to internal priority.
+ * Input:
+ *      dscp - Dscp value of receiving frame
+ * Output:
+ *      pInt_pri - internal priority value.
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_QOS_DSCP_VALUE   - Invalid DSCP value.
+ * Note:
+ *      The Differentiated Service Code Point is a selector for router's per-hop behaviors. As a selector, there is no implication that a numerically
+ *      greater DSCP implies a better network service. As can be seen, the DSCP totally overlaps the old precedence field of TOS. So if values of
+ *      DSCP are carefully chosen then backward compatibility can be achieved.
+ */
+extern rtk_api_ret_t rtk_qos_dscpPriRemap_get(rtk_dscp_t dscp, rtk_pri_t *pInt_pri);
+
+/* Function Name:
+ *      rtk_qos_portPri_set
+ * Description:
+ *      Configure priority usage to each port.
+ * Input:
+ *      port - Port id.
+ *      int_pri - internal priority value.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_QOS_SEL_PORT_PRI - Invalid port priority.
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority.
+ * Note:
+ *      The API can set priority of port assignments for queue usage and packet scheduling.
+ */
+extern rtk_api_ret_t rtk_qos_portPri_set(rtk_port_t port, rtk_pri_t int_pri) ;
+
+/* Function Name:
+ *      rtk_qos_portPri_get
+ * Description:
+ *      Get priority usage to each port.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pInt_pri - internal priority value.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can get priority of port assignments for queue usage and packet scheduling.
+ */
+extern rtk_api_ret_t rtk_qos_portPri_get(rtk_port_t port, rtk_pri_t *pInt_pri) ;
+
+/* Function Name:
+ *      rtk_qos_queueNum_set
+ * Description:
+ *      Set output queue number for each port.
+ * Input:
+ *      port    - Port id.
+ *      index   - Mapping queue number (1~8)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_QUEUE_NUM    - Invalid queue number.
+ * Note:
+ *      The API can set the output queue number of the specified port. The queue number is from 1 to 8.
+ */
+extern rtk_api_ret_t rtk_qos_queueNum_set(rtk_port_t port, rtk_queue_num_t queue_num);
+
+/* Function Name:
+ *      rtk_qos_queueNum_get
+ * Description:
+ *      Get output queue number.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pQueue_num - Mapping queue number
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API will return the output queue number of the specified port. The queue number is from 1 to 8.
+ */
+extern rtk_api_ret_t rtk_qos_queueNum_get(rtk_port_t port, rtk_queue_num_t *pQueue_num);
+
+/* Function Name:
+ *      rtk_qos_priMap_set
+ * Description:
+ *      Set output queue number for each port.
+ * Input:
+ *      queue_num   - Queue number usage.
+ *      pPri2qid    - Priority mapping to queue ID.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ *      RT_ERR_QUEUE_NUM        - Invalid queue number.
+ *      RT_ERR_QUEUE_ID         - Invalid queue id.
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority.
+ * Note:
+ *      ASIC supports priority mapping to queue with different queue number from 1 to 8.
+ *      For different queue numbers usage, ASIC supports different internal available queue IDs.
+ */
+extern rtk_api_ret_t rtk_qos_priMap_set(rtk_queue_num_t queue_num, rtk_qos_pri2queue_t *pPri2qid);
+
+
+/* Function Name:
+ *      rtk_qos_priMap_get
+ * Description:
+ *      Get priority to queue ID mapping table parameters.
+ * Input:
+ *      queue_num - Queue number usage.
+ * Output:
+ *      pPri2qid - Priority mapping to queue ID.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_QUEUE_NUM    - Invalid queue number.
+ * Note:
+ *      The API can return the mapping queue id of the specified priority and queue number.
+ *      The queue number is from 1 to 8.
+ */
+extern rtk_api_ret_t rtk_qos_priMap_get(rtk_queue_num_t queue_num, rtk_qos_pri2queue_t *pPri2qid);
+
+/* Function Name:
+ *      rtk_qos_schedulingQueue_set
+ * Description:
+ *      Set weight and type of queues in dedicated port.
+ * Input:
+ *      port        - Port id.
+ *      pQweights   - The array of weights for WRR/WFQ queue (0 for STRICT_PRIORITY queue).
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_QOS_QUEUE_WEIGHT - Invalid queue weight.
+ * Note:
+ *      The API can set weight and type, strict priority or weight fair queue (WFQ) for
+ *      dedicated port for using queues. If queue id is not included in queue usage,
+ *      then its type and weight setting in dummy for setting. There are priorities
+ *      as queue id in strict queues. It means strict queue id 5 carrying higher priority
+ *      than strict queue id 4. The WFQ queue weight is from 1 to 128, and weight 0 is
+ *      for strict priority queue type.
+ */
+extern rtk_api_ret_t rtk_qos_schedulingQueue_set(rtk_port_t port, rtk_qos_queue_weights_t *pQweights);
+
+/* Function Name:
+ *      rtk_qos_schedulingQueue_get
+ * Description:
+ *      Get weight and type of queues in dedicated port.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pQweights - The array of weights for WRR/WFQ queue (0 for STRICT_PRIORITY queue).
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can get weight and type, strict priority or weight fair queue (WFQ) for dedicated port for using queues.
+ *      The WFQ queue weight is from 1 to 128, and weight 0 is for strict priority queue type.
+ */
+extern rtk_api_ret_t rtk_qos_schedulingQueue_get(rtk_port_t port, rtk_qos_queue_weights_t *pQweights);
+
+/* Function Name:
+ *      rtk_qos_1pRemarkEnable_set
+ * Description:
+ *      Set 1p Remarking state
+ * Input:
+ *      port        - Port id.
+ *      enable      - State of per-port 1p Remarking
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_ENABLE       - Invalid enable parameter.
+ * Note:
+ *      The API can enable or disable 802.1p remarking ability for whole system.
+ *      The status of 802.1p remark:
+ *      - DISABLED
+ *      - ENABLED
+ */
+extern rtk_api_ret_t rtk_qos_1pRemarkEnable_set(rtk_port_t port, rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_qos_1pRemarkEnable_get
+ * Description:
+ *      Get 802.1p remarking ability.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pEnable - Status of 802.1p remark.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can get 802.1p remarking ability.
+ *      The status of 802.1p remark:
+ *      - DISABLED
+ *      - ENABLED
+ */
+extern rtk_api_ret_t rtk_qos_1pRemarkEnable_get(rtk_port_t port, rtk_enable_t *pEnable);
+
+/* Function Name:
+ *      rtk_qos_1pRemark_set
+ * Description:
+ *      Set 802.1p remarking parameter.
+ * Input:
+ *      int_pri     - Internal priority value.
+ *      dot1p_pri   - 802.1p priority value.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_VLAN_PRIORITY    - Invalid 1p priority.
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority.
+ * Note:
+ *      The API can set 802.1p parameters source priority and new priority.
+ */
+extern rtk_api_ret_t rtk_qos_1pRemark_set(rtk_pri_t int_pri, rtk_pri_t dot1p_pri);
+
+/* Function Name:
+ *      rtk_qos_1pRemark_get
+ * Description:
+ *      Get 802.1p remarking parameter.
+ * Input:
+ *      int_pri - Internal priority value.
+ * Output:
+ *      pDot1p_pri - 802.1p priority value.
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority.
+ * Note:
+ *      The API can get 802.1p remarking parameters. It would return new priority of ingress priority.
+ */
+extern rtk_api_ret_t rtk_qos_1pRemark_get(rtk_pri_t int_pri, rtk_pri_t *pDot1p_pri);
+
+/* Function Name:
+ *      rtk_qos_dscpRemarkEnable_set
+ * Description:
+ *      Set DSCP remarking ability.
+ * Input:
+ *      port    - Port id.
+ *      enable  - status of DSCP remark.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority.
+ *      RT_ERR_ENABLE           - Invalid enable parameter.
+ * Note:
+ *      The API can enable or disable DSCP remarking ability for whole system.
+ *      The status of DSCP remark:
+ *      - DISABLED
+ *      - ENABLED
+ */
+extern rtk_api_ret_t rtk_qos_dscpRemarkEnable_set(rtk_port_t port, rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_qos_dscpRemarkEnable_get
+ * Description:
+ *      Get DSCP remarking ability.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pEnable - status of DSCP remarking.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can get DSCP remarking ability.
+ *      The status of DSCP remark:
+ *      - DISABLED
+ *      - ENABLED
+ */
+extern rtk_api_ret_t rtk_qos_dscpRemarkEnable_get(rtk_port_t port, rtk_enable_t *pEnable);
+
+/* Function Name:
+ *      rtk_qos_dscpRemark_set
+ * Description:
+ *      Set DSCP remarking parameter.
+ * Input:
+ *      int_pri - Internal priority value.
+ *      dscp    - DSCP value.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority.
+ *      RT_ERR_QOS_DSCP_VALUE   - Invalid DSCP value.
+ * Note:
+ *      The API can set DSCP value and mapping priority.
+ */
+extern rtk_api_ret_t rtk_qos_dscpRemark_set(rtk_pri_t int_pri, rtk_dscp_t dscp);
+
+/* Function Name:
+ *      rtk_qos_dscpRemark_get
+ * Description:
+ *      Get DSCP remarking parameter.
+ * Input:
+ *      int_pri - Internal priority value.
+ * Output:
+ *      Dscp - DSCP value.
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority.
+ * Note:
+ *      The API can get DSCP parameters. It would return DSCP value for mapping priority.
+ */
+extern rtk_api_ret_t rtk_qos_dscpRemark_get(rtk_pri_t int_pri, rtk_dscp_t *pDscp);
+
+/* Function Name:
+ *      rtk_qos_dscpRemarkSrcSel_set
+ * Description:
+ *      Set remarking source of DSCP remarking.
+ * Input:
+ *      type      - remarking source
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT         - The module is not initial
+ *      RT_ERR_PORT_ID  - invalid port id
+ *      RT_ERR_INPUT            - invalid input parameter
+
+ * Note:
+ *      The API can configure DSCP remark functionality to map original DSCP value or internal
+ *      priority to TX DSCP value.
+ */
+extern rtk_api_ret_t rtk_qos_dscpRemarkSrcSel_set(rtk_qos_dscpRmkSrc_t type);
+
+
+/* Function Name:
+ *      rtk_qos_dcpRemarkSrcSel_get
+ * Description:
+ *      Get remarking source of DSCP remarking.
+ * Input:
+ *      none
+ * Output:
+ *      pType      - remarking source
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT         - The module is not initial
+ *      RT_ERR_PORT_ID          - invalid port id
+ *      RT_ERR_INPUT            - invalid input parameter
+ *      RT_ERR_NULL_POINTER     - input parameter may be null pointer
+
+ * Note:
+ *      None
+ */
+extern rtk_api_ret_t rtk_qos_dscpRemarkSrcSel_get(rtk_qos_dscpRmkSrc_t *pType);
+
+
+/* Function Name:
+ *      rtk_qos_dscpRemark2Dscp_set
+ * Description:
+ *      Set DSCP to remarked DSCP mapping.
+ * Input:
+ *      dscp    - DSCP value
+ *      rmkDscp - remarked DSCP value
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_QOS_DSCP_VALUE   - Invalid dscp value
+ * Note:
+ *      dscp parameter can be DSCP value or internal priority according to configuration of API
+ *      dal_apollomp_qos_dscpRemarkSrcSel_set(), because DSCP remark functionality can map original DSCP
+ *      value or internal priority to TX DSCP value.
+ */
+extern rtk_api_ret_t rtk_qos_dscpRemark2Dscp_set(rtk_dscp_t dscp, rtk_dscp_t rmkDscp);
+
+/* Function Name:
+ *      rtk_qos_dscpRemark2Dscp_get
+ * Description:
+ *      Get DSCP to remarked DSCP mapping.
+ * Input:
+ *      dscp    - DSCP value
+ * Output:
+ *      pDscp   - remarked DSCP value
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_QOS_DSCP_VALUE   - Invalid dscp value
+ *      RT_ERR_NULL_POINTER     - NULL pointer
+ * Note:
+ *      None.
+ */
+extern rtk_api_ret_t rtk_qos_dscpRemark2Dscp_get(rtk_dscp_t dscp, rtk_dscp_t *pDscp);
+
+/* Function Name:
+ *      rtk_qos_portPriSelIndex_set
+ * Description:
+ *      Configure priority decision index to each port.
+ * Input:
+ *      port - Port id.
+ *      index - priority decision index.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_ENTRY_INDEX - Invalid entry index.
+ * Note:
+ *      The API can set priority of port assignments for queue usage and packet scheduling.
+ */
+extern rtk_api_ret_t rtk_qos_portPriSelIndex_set(rtk_port_t port, rtk_qos_priDecTbl_t index);
+
+/* Function Name:
+ *      rtk_qos_portPriSelIndex_get
+ * Description:
+ *      Get priority decision index from each port.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pIndex - priority decision index.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can get priority of port assignments for queue usage and packet scheduling.
+ */
+extern rtk_api_ret_t rtk_qos_portPriSelIndex_get(rtk_port_t port, rtk_qos_priDecTbl_t *pIndex);
+
+#endif /* __RTK_API_QOS_H__ */
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rate.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rate.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rate.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rate.c	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,739 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 79432 $
+ * $Date: 2017-06-06 16:36:50 +0800 (é€±äºŒ, 06 å…­æœˆ 2017) $
+ *
+ * Purpose : RTK switch high-level API for RTL8367/RTL8367C
+ * Feature : Here is a list of all functions and variables in rate module.
+ *
+ */
+
+#include <rtk_switch.h>
+#include <rtk_error.h>
+#include <rate.h>
+#include <qos.h>
+#include <string.h>
+
+#include <rtl8367c_asicdrv.h>
+#include <rtl8367c_asicdrv_meter.h>
+#include <rtl8367c_asicdrv_inbwctrl.h>
+#include <rtl8367c_asicdrv_scheduling.h>
+
+static rtk_api_ret_t _rtk_rate_shareMeter_set(rtk_meter_id_t index, rtk_meter_type_t type, rtk_rate_t rate, rtk_enable_t ifg_include)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (index > RTK_MAX_METER_ID)
+        return RT_ERR_FILTER_METER_ID;
+
+    if (type >= METER_TYPE_END)
+        return RT_ERR_INPUT;
+
+    if (ifg_include >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    switch (type)
+    {
+        case METER_TYPE_KBPS:
+            if (rate > RTL8367C_QOS_RATE_INPUT_MAX_HSG || rate < RTL8367C_QOS_RATE_INPUT_MIN)
+                return RT_ERR_RATE ;
+
+            if ((retVal = rtl8367c_setAsicShareMeter(index, rate >> 3, ifg_include)) != RT_ERR_OK)
+                return retVal;
+
+            break;
+        case METER_TYPE_PPS:
+            if (rate > RTL8367C_QOS_PPS_INPUT_MAX || rate < RTL8367C_QOS_PPS_INPUT_MIN)
+                return RT_ERR_RATE ;
+
+            if ((retVal = rtl8367c_setAsicShareMeter(index, rate, ifg_include)) != RT_ERR_OK)
+                return retVal;
+
+            break;
+        default:
+            return RT_ERR_INPUT;
+    }
+
+    /* Set Type */
+    if ((retVal = rtl8367c_setAsicShareMeterType(index, (rtk_uint32)type)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_rate_shareMeter_get(rtk_meter_id_t index, rtk_meter_type_t *pType, rtk_rate_t *pRate, rtk_enable_t *pIfg_include)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 regData;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (index > RTK_MAX_METER_ID)
+        return RT_ERR_FILTER_METER_ID;
+
+    if(NULL == pType)
+        return RT_ERR_NULL_POINTER;
+
+    if(NULL == pRate)
+        return RT_ERR_NULL_POINTER;
+
+    if(NULL == pIfg_include)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicShareMeter(index, &regData, pIfg_include)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_getAsicShareMeterType(index, (rtk_uint32 *)pType)) != RT_ERR_OK)
+        return retVal;
+
+    if(*pType == METER_TYPE_KBPS)
+        *pRate = regData<<3;
+    else
+        *pRate = regData;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_rate_shareMeterBucket_set(rtk_meter_id_t index, rtk_uint32 bucket_size)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (index > RTK_MAX_METER_ID)
+        return RT_ERR_FILTER_METER_ID;
+
+    if(bucket_size > RTL8367C_METERBUCKETSIZEMAX)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicShareMeterBucketSize(index, bucket_size)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_rate_shareMeterBucket_get(rtk_meter_id_t index, rtk_uint32 *pBucket_size)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (index > RTK_MAX_METER_ID)
+        return RT_ERR_FILTER_METER_ID;
+
+    if(NULL == pBucket_size)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicShareMeterBucketSize(index, pBucket_size)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_rate_igrBandwidthCtrlRate_set(rtk_port_t port, rtk_rate_t rate, rtk_enable_t ifg_include, rtk_enable_t fc_enable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(ifg_include >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if(fc_enable >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if(rtk_switch_isHsgPort(port) == RT_ERR_OK)
+    {
+        if ((rate > RTL8367C_QOS_RATE_INPUT_MAX_HSG) || (rate < RTL8367C_QOS_RATE_INPUT_MIN))
+            return RT_ERR_QOS_EBW_RATE ;
+    }
+    else
+    {
+        if ((rate > RTL8367C_QOS_RATE_INPUT_MAX) || (rate < RTL8367C_QOS_RATE_INPUT_MIN))
+            return RT_ERR_QOS_EBW_RATE ;
+    }
+
+    if ((retVal = rtl8367c_setAsicPortIngressBandwidth(rtk_switch_port_L2P_get(port), rate>>3, ifg_include,fc_enable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_rate_igrBandwidthCtrlRate_get(rtk_port_t port, rtk_rate_t *pRate, rtk_enable_t *pIfg_include, rtk_enable_t *pFc_enable)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 regData;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(NULL == pIfg_include)
+        return RT_ERR_NULL_POINTER;
+
+    if(NULL == pFc_enable)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicPortIngressBandwidth(rtk_switch_port_L2P_get(port), &regData, pIfg_include, pFc_enable)) != RT_ERR_OK)
+        return retVal;
+
+    *pRate = regData<<3;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_rate_egrBandwidthCtrlRate_set( rtk_port_t port, rtk_rate_t rate,  rtk_enable_t ifg_include)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(rtk_switch_isHsgPort(port) == RT_ERR_OK)
+    {
+        if ((rate > RTL8367C_QOS_RATE_INPUT_MAX_HSG) || (rate < RTL8367C_QOS_RATE_INPUT_MIN))
+            return RT_ERR_QOS_EBW_RATE ;
+    }
+    else
+    {
+        if ((rate > RTL8367C_QOS_RATE_INPUT_MAX) || (rate < RTL8367C_QOS_RATE_INPUT_MIN))
+            return RT_ERR_QOS_EBW_RATE ;
+    }
+
+    if (ifg_include >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    if ((retVal = rtl8367c_setAsicPortEgressRate(rtk_switch_port_L2P_get(port), rate>>3)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicPortEgressRateIfg(ifg_include)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_rate_egrBandwidthCtrlRate_get(rtk_port_t port, rtk_rate_t *pRate, rtk_enable_t *pIfg_include)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 regData;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(NULL == pRate)
+        return RT_ERR_NULL_POINTER;
+
+    if(NULL == pIfg_include)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicPortEgressRate(rtk_switch_port_L2P_get(port), &regData)) != RT_ERR_OK)
+        return retVal;
+
+    *pRate = regData << 3;
+
+    if ((retVal = rtl8367c_getAsicPortEgressRateIfg((rtk_uint32*)pIfg_include)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_rate_egrQueueBwCtrlEnable_get(rtk_port_t port, rtk_qid_t queue, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    /*for whole port function, the queue value should be 0xFF*/
+    if (queue != RTK_WHOLE_SYSTEM)
+        return RT_ERR_QUEUE_ID;
+
+    if(NULL == pEnable)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicAprEnable(rtk_switch_port_L2P_get(port),pEnable))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_rate_egrQueueBwCtrlEnable_set(rtk_port_t port, rtk_qid_t queue, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    /*for whole port function, the queue value should be 0xFF*/
+    if (queue != RTK_WHOLE_SYSTEM)
+        return RT_ERR_QUEUE_ID;
+
+    if (enable>=RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicAprEnable(rtk_switch_port_L2P_get(port), enable))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_rate_egrQueueBwCtrlRate_get(rtk_port_t port, rtk_qid_t queue, rtk_meter_id_t *pIndex)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 offset_idx;
+    rtk_uint32 phy_port;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (queue >= RTK_MAX_NUM_OF_QUEUE)
+        return RT_ERR_QUEUE_ID;
+
+    if(NULL == pIndex)
+        return RT_ERR_NULL_POINTER;
+
+    phy_port = rtk_switch_port_L2P_get(port);
+    if ((retVal=rtl8367c_getAsicAprMeter(phy_port, queue,&offset_idx))!=RT_ERR_OK)
+        return retVal;
+
+    *pIndex = offset_idx + ((phy_port%4)*8);
+
+     return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_rate_egrQueueBwCtrlRate_set(rtk_port_t port, rtk_qid_t queue, rtk_meter_id_t index)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 offset_idx;
+    rtk_uint32 phy_port;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (queue >= RTK_MAX_NUM_OF_QUEUE)
+        return RT_ERR_QUEUE_ID;
+
+    if (index > RTK_MAX_METER_ID)
+        return RT_ERR_FILTER_METER_ID;
+
+    phy_port = rtk_switch_port_L2P_get(port);
+    if (index < ((phy_port%4)*8) ||  index > (7 + (phy_port%4)*8))
+        return RT_ERR_FILTER_METER_ID;
+
+    offset_idx = index - ((phy_port%4)*8);
+
+    if ((retVal=rtl8367c_setAsicAprMeter(phy_port,queue,offset_idx))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtk_rate_shareMeter_set
+ * Description:
+ *      Set meter configuration
+ * Input:
+ *      index       - shared meter index
+ *      type        - shared meter type
+ *      rate        - rate of share meter
+ *      ifg_include - include IFG or not, ENABLE:include DISABLE:exclude
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter
+ *      RT_ERR_RATE             - Invalid rate
+ *      RT_ERR_INPUT            - Invalid input parameters
+ * Note:
+ *      The API can set shared meter rate and ifg include for each meter.
+ *      The rate unit is 1 kbps and the range is from 8k to 1048568k if type is METER_TYPE_KBPS and
+ *      the granularity of rate is 8 kbps.
+ *      The rate unit is packets per second and the range is 1 ~ 0x1FFF if type is METER_TYPE_PPS.
+ *      The ifg_include parameter is used
+ *      for rate calculation with/without inter-frame-gap and preamble.
+ */
+rtk_api_ret_t rtk_rate_shareMeter_set(rtk_meter_id_t index, rtk_meter_type_t type, rtk_rate_t rate, rtk_enable_t ifg_include)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_rate_shareMeter_set(index, type, rate, ifg_include);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_rate_shareMeter_get
+ * Description:
+ *      Get meter configuration
+ * Input:
+ *      index        - shared meter index
+ * Output:
+ *      pType        - Meter Type
+ *      pRate        - pointer of rate of share meter
+ *      pIfg_include - include IFG or not, ENABLE:include DISABLE:exclude
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_rate_shareMeter_get(rtk_meter_id_t index, rtk_meter_type_t *pType, rtk_rate_t *pRate, rtk_enable_t *pIfg_include)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_rate_shareMeter_get(index, pType, pRate, pIfg_include);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_rate_shareMeterBucket_set
+ * Description:
+ *      Set meter Bucket Size
+ * Input:
+ *      index        - shared meter index
+ *      bucket_size  - Bucket Size
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_INPUT            - Error Input
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter
+ * Note:
+ *      The API can set shared meter bucket size.
+ */
+rtk_api_ret_t rtk_rate_shareMeterBucket_set(rtk_meter_id_t index, rtk_uint32 bucket_size)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_rate_shareMeterBucket_set(index, bucket_size);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_rate_shareMeterBucket_get
+ * Description:
+ *      Get meter Bucket Size
+ * Input:
+ *      index        - shared meter index
+ * Output:
+ *      pBucket_size - Bucket Size
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter
+ * Note:
+ *      The API can get shared meter bucket size.
+ */
+rtk_api_ret_t rtk_rate_shareMeterBucket_get(rtk_meter_id_t index, rtk_uint32 *pBucket_size)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_rate_shareMeterBucket_get(index, pBucket_size);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_rate_igrBandwidthCtrlRate_set
+ * Description:
+ *      Set port ingress bandwidth control
+ * Input:
+ *      port        - Port id
+ *      rate        - Rate of share meter
+ *      ifg_include - include IFG or not, ENABLE:include DISABLE:exclude
+ *      fc_enable   - enable flow control or not, ENABLE:use flow control DISABLE:drop
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_ENABLE       - Invalid IFG parameter.
+ *      RT_ERR_INBW_RATE    - Invalid ingress rate parameter.
+ * Note:
+ *      The rate unit is 1 kbps and the range is from 8k to 1048568k. The granularity of rate is 8 kbps.
+ *      The ifg_include parameter is used for rate calculation with/without inter-frame-gap and preamble.
+ */
+rtk_api_ret_t rtk_rate_igrBandwidthCtrlRate_set(rtk_port_t port, rtk_rate_t rate, rtk_enable_t ifg_include, rtk_enable_t fc_enable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_rate_igrBandwidthCtrlRate_set(port, rate, ifg_include, fc_enable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_rate_igrBandwidthCtrlRate_get
+ * Description:
+ *      Get port ingress bandwidth control
+ * Input:
+ *      port - Port id
+ * Output:
+ *      pRate           - Rate of share meter
+ *      pIfg_include    - Rate's calculation including IFG, ENABLE:include DISABLE:exclude
+ *      pFc_enable      - enable flow control or not, ENABLE:use flow control DISABLE:drop
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *     The rate unit is 1 kbps and the range is from 8k to 1048568k. The granularity of rate is 8 kbps.
+ *     The ifg_include parameter is used for rate calculation with/without inter-frame-gap and preamble.
+ */
+rtk_api_ret_t rtk_rate_igrBandwidthCtrlRate_get(rtk_port_t port, rtk_rate_t *pRate, rtk_enable_t *pIfg_include, rtk_enable_t *pFc_enable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_rate_igrBandwidthCtrlRate_get(port, pRate, pIfg_include, pFc_enable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_rate_egrBandwidthCtrlRate_set
+ * Description:
+ *      Set port egress bandwidth control
+ * Input:
+ *      port        - Port id
+ *      rate        - Rate of egress bandwidth
+ *      ifg_include - include IFG or not, ENABLE:include DISABLE:exclude
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_QOS_EBW_RATE - Invalid egress bandwidth/rate
+ * Note:
+ *     The rate unit is 1 kbps and the range is from 8k to 1048568k. The granularity of rate is 8 kbps.
+ *     The ifg_include parameter is used for rate calculation with/without inter-frame-gap and preamble.
+ */
+rtk_api_ret_t rtk_rate_egrBandwidthCtrlRate_set( rtk_port_t port, rtk_rate_t rate,  rtk_enable_t ifg_include)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_rate_egrBandwidthCtrlRate_set(port, rate, ifg_include);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_rate_egrBandwidthCtrlRate_get
+ * Description:
+ *      Get port egress bandwidth control
+ * Input:
+ *      port - Port id
+ * Output:
+ *      pRate           - Rate of egress bandwidth
+ *      pIfg_include    - Rate's calculation including IFG, ENABLE:include DISABLE:exclude
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *     The rate unit is 1 kbps and the range is from 8k to 1048568k. The granularity of rate is 8 kbps.
+ *     The ifg_include parameter is used for rate calculation with/without inter-frame-gap and preamble.
+ */
+rtk_api_ret_t rtk_rate_egrBandwidthCtrlRate_get(rtk_port_t port, rtk_rate_t *pRate, rtk_enable_t *pIfg_include)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_rate_egrBandwidthCtrlRate_get(port, pRate, pIfg_include);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_rate_egrQueueBwCtrlEnable_get
+ * Description:
+ *      Get enable status of egress bandwidth control on specified queue.
+ * Input:
+ *      unit    - unit id
+ *      port    - port id
+ *      queue   - queue id
+ * Output:
+ *      pEnable - Pointer to enable status of egress queue bandwidth control
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_PORT_ID          - invalid port id
+ *      RT_ERR_QUEUE_ID         - invalid queue id
+ *      RT_ERR_NULL_POINTER     - input parameter may be null pointer
+ * Note:
+ *      None
+ */
+rtk_api_ret_t rtk_rate_egrQueueBwCtrlEnable_get(rtk_port_t port, rtk_qid_t queue, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_rate_egrQueueBwCtrlEnable_get(port, queue, pEnable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_rate_egrQueueBwCtrlEnable_set
+ * Description:
+ *      Set enable status of egress bandwidth control on specified queue.
+ * Input:
+ *      port   - port id
+ *      queue  - queue id
+ *      enable - enable status of egress queue bandwidth control
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_PORT_ID          - invalid port id
+ *      RT_ERR_QUEUE_ID         - invalid queue id
+ *      RT_ERR_INPUT            - invalid input parameter
+ * Note:
+ *      None
+ */
+rtk_api_ret_t rtk_rate_egrQueueBwCtrlEnable_set(rtk_port_t port, rtk_qid_t queue, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_rate_egrQueueBwCtrlEnable_set(port, queue, enable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_rate_egrQueueBwCtrlRate_get
+ * Description:
+ *      Get rate of egress bandwidth control on specified queue.
+ * Input:
+ *      port  - port id
+ *      queue - queue id
+ *      pIndex - shared meter index
+ * Output:
+ *      pRate - pointer to rate of egress queue bandwidth control
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_PORT_ID          - invalid port id
+ *      RT_ERR_QUEUE_ID         - invalid queue id
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter id
+ * Note:
+ *    The actual rate control is set in shared meters.
+ *    The unit of granularity is 8Kbps.
+ */
+rtk_api_ret_t rtk_rate_egrQueueBwCtrlRate_get(rtk_port_t port, rtk_qid_t queue, rtk_meter_id_t *pIndex)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_rate_egrQueueBwCtrlRate_get(port, queue, pIndex);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_rate_egrQueueBwCtrlRate_set
+ * Description:
+ *      Set rate of egress bandwidth control on specified queue.
+ * Input:
+ *      port  - port id
+ *      queue - queue id
+ *      index - shared meter index
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_PORT_ID          - invalid port id
+ *      RT_ERR_QUEUE_ID         - invalid queue id
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter id
+ * Note:
+ *    The actual rate control is set in shared meters.
+ *    The unit of granularity is 8Kbps.
+ */
+rtk_api_ret_t rtk_rate_egrQueueBwCtrlRate_set(rtk_port_t port, rtk_qid_t queue, rtk_meter_id_t index)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_rate_egrQueueBwCtrlRate_set(port, queue, index);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rate.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rate.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rate.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rate.h	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,307 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * Purpose : RTL8367/RTL8367C switch high-level API
+ *
+ * Feature : The file includes rate module high-layer API defination
+ *
+ */
+
+#ifndef __RTK_API_RATE_H__
+#define __RTK_API_RATE_H__
+
+/*
+ * Include Files
+ */
+//#include <rtk_types.h>
+
+/*
+ * Data Type Declaration
+ */
+#define RTK_MAX_METER_ID            (rtk_switch_maxMeterId_get())
+#define RTK_METER_NUM               (RTK_MAX_METER_ID + 1)
+
+typedef enum rtk_meter_type_e{
+    METER_TYPE_KBPS = 0,    /* Kbps */
+    METER_TYPE_PPS,         /* Packet per second */
+    METER_TYPE_END
+}rtk_meter_type_t;
+
+
+/*
+ * Function Declaration
+ */
+
+ /* Rate */
+/* Function Name:
+ *      rtk_rate_shareMeter_set
+ * Description:
+ *      Set meter configuration
+ * Input:
+ *      index       - shared meter index
+ *      type        - shared meter type
+ *      rate        - rate of share meter
+ *      ifg_include - include IFG or not, ENABLE:include DISABLE:exclude
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter
+ *      RT_ERR_RATE             - Invalid rate
+ *      RT_ERR_INPUT            - Invalid input parameters
+ * Note:
+ *      The API can set shared meter rate and ifg include for each meter.
+ *      The rate unit is 1 kbps and the range is from 8k to 1048568k if type is METER_TYPE_KBPS and
+ *      the granularity of rate is 8 kbps.
+ *      The rate unit is packets per second and the range is 1 ~ 0x1FFF if type is METER_TYPE_PPS.
+ *      The ifg_include parameter is used
+ *      for rate calculation with/without inter-frame-gap and preamble.
+ */
+rtk_api_ret_t rtk_rate_shareMeter_set(rtk_meter_id_t index, rtk_meter_type_t type, rtk_rate_t rate, rtk_enable_t ifg_include);
+
+/* Function Name:
+ *      rtk_rate_shareMeter_get
+ * Description:
+ *      Get meter configuration
+ * Input:
+ *      index        - shared meter index
+ * Output:
+ *      pType        - Meter Type
+ *      pRate        - pointer of rate of share meter
+ *      pIfg_include - include IFG or not, ENABLE:include DISABLE:exclude
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_rate_shareMeter_get(rtk_meter_id_t index, rtk_meter_type_t *pType, rtk_rate_t *pRate, rtk_enable_t *pIfg_include);
+
+/* Function Name:
+ *      rtk_rate_shareMeterBucket_set
+ * Description:
+ *      Set meter Bucket Size
+ * Input:
+ *      index        - shared meter index
+ *      bucket_size  - Bucket Size
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_INPUT            - Error Input
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter
+ * Note:
+ *      The API can set shared meter bucket size.
+ */
+extern rtk_api_ret_t rtk_rate_shareMeterBucket_set(rtk_meter_id_t index, rtk_uint32 bucket_size);
+
+/* Function Name:
+ *      rtk_rate_shareMeterBucket_get
+ * Description:
+ *      Get meter Bucket Size
+ * Input:
+ *      index        - shared meter index
+ * Output:
+ *      pBucket_size - Bucket Size
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter
+ * Note:
+ *      The API can get shared meter bucket size.
+ */
+extern rtk_api_ret_t rtk_rate_shareMeterBucket_get(rtk_meter_id_t index, rtk_uint32 *pBucket_size);
+
+/* Function Name:
+ *      rtk_rate_igrBandwidthCtrlRate_set
+ * Description:
+ *      Set port ingress bandwidth control
+ * Input:
+ *      port        - Port id
+ *      rate        - Rate of share meter
+ *      ifg_include - include IFG or not, ENABLE:include DISABLE:exclude
+ *      fc_enable   - enable flow control or not, ENABLE:use flow control DISABLE:drop
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_ENABLE       - Invalid IFG parameter.
+ *      RT_ERR_INBW_RATE    - Invalid ingress rate parameter.
+ * Note:
+ *      The rate unit is 1 kbps and the range is from 8k to 1048568k. The granularity of rate is 8 kbps.
+ *      The ifg_include parameter is used for rate calculation with/without inter-frame-gap and preamble.
+ */
+extern rtk_api_ret_t rtk_rate_igrBandwidthCtrlRate_set( rtk_port_t port, rtk_rate_t rate,  rtk_enable_t ifg_include, rtk_enable_t fc_enable);
+
+/* Function Name:
+ *      rtk_rate_igrBandwidthCtrlRate_get
+ * Description:
+ *      Get port ingress bandwidth control
+ * Input:
+ *      port - Port id
+ * Output:
+ *      pRate           - Rate of share meter
+ *      pIfg_include    - Rate's calculation including IFG, ENABLE:include DISABLE:exclude
+ *      pFc_enable      - enable flow control or not, ENABLE:use flow control DISABLE:drop
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *     The rate unit is 1 kbps and the range is from 8k to 1048568k. The granularity of rate is 8 kbps.
+ *     The ifg_include parameter is used for rate calculation with/without inter-frame-gap and preamble.
+ */
+extern rtk_api_ret_t rtk_rate_igrBandwidthCtrlRate_get(rtk_port_t port, rtk_rate_t *pRate, rtk_enable_t *pIfg_include, rtk_enable_t *pFc_enable);
+
+/* Function Name:
+ *      rtk_rate_egrBandwidthCtrlRate_set
+ * Description:
+ *      Set port egress bandwidth control
+ * Input:
+ *      port        - Port id
+ *      rate        - Rate of egress bandwidth
+ *      ifg_include - include IFG or not, ENABLE:include DISABLE:exclude
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_QOS_EBW_RATE - Invalid egress bandwidth/rate
+ * Note:
+ *     The rate unit is 1 kbps and the range is from 8k to 1048568k. The granularity of rate is 8 kbps.
+ *     The ifg_include parameter is used for rate calculation with/without inter-frame-gap and preamble.
+ */
+extern rtk_api_ret_t rtk_rate_egrBandwidthCtrlRate_set(rtk_port_t port, rtk_rate_t rate,  rtk_enable_t ifg_includ);
+
+/* Function Name:
+ *      rtk_rate_egrBandwidthCtrlRate_get
+ * Description:
+ *      Get port egress bandwidth control
+ * Input:
+ *      port - Port id
+ * Output:
+ *      pRate           - Rate of egress bandwidth
+ *      pIfg_include    - Rate's calculation including IFG, ENABLE:include DISABLE:exclude
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *     The rate unit is 1 kbps and the range is from 8k to 1048568k. The granularity of rate is 8 kbps.
+ *     The ifg_include parameter is used for rate calculation with/without inter-frame-gap and preamble.
+ */
+extern rtk_api_ret_t rtk_rate_egrBandwidthCtrlRate_get(rtk_port_t port, rtk_rate_t *pRate, rtk_enable_t *pIfg_include);
+
+/* Function Name:
+ *      rtk_rate_egrQueueBwCtrlEnable_set
+ * Description:
+ *      Set enable status of egress bandwidth control on specified queue.
+ * Input:
+ *      port   - port id
+ *      queue  - queue id
+ *      enable - enable status of egress queue bandwidth control
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_PORT_ID          - invalid port id
+ *      RT_ERR_QUEUE_ID         - invalid queue id
+ *      RT_ERR_INPUT            - invalid input parameter
+ * Note:
+ *      None
+ */
+extern rtk_api_ret_t rtk_rate_egrQueueBwCtrlEnable_set(rtk_port_t port, rtk_qid_t queue, rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_rate_egrQueueBwCtrlRate_get
+ * Description:
+ *      Get rate of egress bandwidth control on specified queue.
+ * Input:
+ *      port  - port id
+ *      queue - queue id
+ *      pIndex - shared meter index
+ * Output:
+ *      pRate - pointer to rate of egress queue bandwidth control
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_PORT_ID          - invalid port id
+ *      RT_ERR_QUEUE_ID         - invalid queue id
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter id
+ * Note:
+ *    None.
+ */
+extern rtk_api_ret_t rtk_rate_egrQueueBwCtrlEnable_get(rtk_port_t port, rtk_qid_t queue, rtk_enable_t *pEnable);
+
+/* Function Name:
+ *      rtk_rate_egrQueueBwCtrlRate_set
+ * Description:
+ *      Set rate of egress bandwidth control on specified queue.
+ * Input:
+ *      port  - port id
+ *      queue - queue id
+ *      index - shared meter index
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_PORT_ID          - invalid port id
+ *      RT_ERR_QUEUE_ID         - invalid queue id
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter id
+ * Note:
+ *    The actual rate control is set in shared meters.
+ *    The unit of granularity is 8Kbps.
+ */
+extern rtk_api_ret_t rtk_rate_egrQueueBwCtrlRate_set(rtk_port_t port, rtk_qid_t queue, rtk_meter_id_t index);
+
+/* Function Name:
+ *      rtk_rate_egrQueueBwCtrlRate_get
+ * Description:
+ *      Get rate of egress bandwidth control on specified queue.
+ * Input:
+ *      port  - port id
+ *      queue - queue id
+ *      pIndex - shared meter index
+ * Output:
+ *      pRate - pointer to rate of egress queue bandwidth control
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_PORT_ID          - invalid port id
+ *      RT_ERR_QUEUE_ID         - invalid queue id
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter id
+ * Note:
+ *    The actual rate control is set in shared meters.
+ *    The unit of granularity is 8Kbps.
+ */
+extern rtk_api_ret_t rtk_rate_egrQueueBwCtrlRate_get(rtk_port_t port, rtk_qid_t queue, rtk_meter_id_t *pIndex);
+
+#endif /* __RTK_API_RATE_H__ */
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rldp.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rldp.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rldp.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rldp.c	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,555 @@
+/*
+ * Copyright (C) 2012 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: $
+ * $Date: $
+ *
+ * Purpose : Declaration of RLDP and RLPP API
+ *
+ * Feature : The file have include the following module and sub-modules
+ *           1) RLDP and RLPP configuration and status
+ *
+ */
+
+
+/*
+ * Include Files
+ */
+#include <rtk_switch.h>
+#include <rtk_error.h>
+//#include <rtk_types.h>
+#include <rldp.h>
+
+#include <rtl8367c_asicdrv.h>
+#include <rtl8367c_asicdrv_rldp.h>
+
+/*
+ * Symbol Definition
+ */
+
+
+/*
+ * Data Declaration
+ */
+
+
+/*
+ * Macro Declaration
+ */
+
+
+/*
+ * Function Declaration
+ */
+
+/* Module Name : RLDP */
+
+static rtk_api_ret_t _rtk_rldp_config_set(rtk_rldp_config_t *pConfig)
+{
+    rtk_api_ret_t retVal;
+    ether_addr_t magic;
+    rtk_uint32 pmsk;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (pConfig->rldp_enable >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if (pConfig->trigger_mode >= RTK_RLDP_TRIGGER_END)
+        return RT_ERR_INPUT;
+
+    if (pConfig->compare_type >= RTK_RLDP_CMPTYPE_END)
+        return RT_ERR_INPUT;
+
+    if (pConfig->num_check >= RTK_RLDP_NUM_MAX)
+        return RT_ERR_INPUT;
+
+    if (pConfig->interval_check >= RTK_RLDP_INTERVAL_MAX)
+        return RT_ERR_INPUT;
+
+    if (pConfig->num_loop >= RTK_RLDP_NUM_MAX)
+        return RT_ERR_INPUT;
+
+    if (pConfig->interval_loop >= RTK_RLDP_INTERVAL_MAX)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_getAsicRldpTxPortmask(&pmsk))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicRldpTxPortmask(0x00))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicRldpTxPortmask(pmsk))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicRldp(pConfig->rldp_enable))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicRldpTriggerMode(pConfig->trigger_mode))!=RT_ERR_OK)
+        return retVal;
+
+    memcpy(&magic, &pConfig->magic, sizeof(ether_addr_t));
+    if ((retVal = rtl8367c_setAsicRldpMagicNum(magic))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicRldpCompareRandomNumber(pConfig->compare_type))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicRldpCompareRandomNumber(pConfig->compare_type))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicRldpCheckingStatePara(pConfig->num_check, pConfig->interval_check))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicRldpLoopStatePara(pConfig->num_loop, pConfig->interval_loop))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_rldp_config_get(rtk_rldp_config_t *pConfig)
+{
+    rtk_api_ret_t retVal;
+    ether_addr_t magic;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if ((retVal = rtl8367c_getAsicRldp(&pConfig->rldp_enable))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_getAsicRldpTriggerMode(&pConfig->trigger_mode))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_getAsicRldpMagicNum(&magic))!=RT_ERR_OK)
+        return retVal;
+    memcpy(&pConfig->magic, &magic, sizeof(ether_addr_t));
+
+    if ((retVal = rtl8367c_getAsicRldpCompareRandomNumber(&pConfig->compare_type))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_getAsicRldpCompareRandomNumber(&pConfig->compare_type))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_getAsicRldpCheckingStatePara(&pConfig->num_check, &pConfig->interval_check))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_getAsicRldpLoopStatePara(&pConfig->num_loop, &pConfig->interval_loop))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_rldp_portConfig_set(rtk_port_t port, rtk_rldp_portConfig_t *pPortConfig)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 pmsk;
+    rtk_uint32 phy_port;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (pPortConfig->tx_enable>= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    phy_port = rtk_switch_port_L2P_get(port);
+
+    if ((retVal = rtl8367c_getAsicRldpTxPortmask(&pmsk))!=RT_ERR_OK)
+        return retVal;
+
+    if (pPortConfig->tx_enable)
+    {
+         pmsk |=(1<<phy_port);
+    }
+    else
+    {
+         pmsk &= ~(1<<phy_port);
+    }
+
+    if ((retVal = rtl8367c_setAsicRldpTxPortmask(pmsk))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+
+}
+
+static rtk_api_ret_t _rtk_rldp_portConfig_get(rtk_port_t port, rtk_rldp_portConfig_t *pPortConfig)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 pmsk;
+    rtk_portmask_t logicalPmask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if ((retVal = rtl8367c_getAsicRldpTxPortmask(&pmsk))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtk_switch_portmask_P2L_get(pmsk, &logicalPmask)) != RT_ERR_OK)
+        return retVal;
+
+
+    if (logicalPmask.bits[0] & (1<<port))
+    {
+         pPortConfig->tx_enable = ENABLED;
+    }
+    else
+    {
+         pPortConfig->tx_enable = DISABLED;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_rldp_status_get(rtk_rldp_status_t *pStatus)
+{
+    rtk_api_ret_t retVal;
+    ether_addr_t seed;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if ((retVal = rtl8367c_getAsicRldpRandomNumber(&seed))!=RT_ERR_OK)
+        return retVal;
+    memcpy(&pStatus->id, &seed, sizeof(ether_addr_t));
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_rldp_portStatus_get(rtk_port_t port, rtk_rldp_portStatus_t *pPortStatus)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 pmsk;
+    rtk_portmask_t logicalPmask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if ((retVal = rtl8367c_getAsicRldpLoopedPortmask(&pmsk))!=RT_ERR_OK)
+        return retVal;
+    if ((retVal = rtk_switch_portmask_P2L_get(pmsk, &logicalPmask)) != RT_ERR_OK)
+        return retVal;
+
+    if (logicalPmask.bits[0] & (1<<port))
+    {
+         pPortStatus->loop_status = RTK_RLDP_LOOPSTS_LOOPING;
+    }
+    else
+    {
+         pPortStatus->loop_status  = RTK_RLDP_LOOPSTS_NONE;
+    }
+
+    if ((retVal = rtl8367c_getAsicRldpEnterLoopedPortmask(&pmsk))!=RT_ERR_OK)
+        return retVal;
+    if ((retVal = rtk_switch_portmask_P2L_get(pmsk, &logicalPmask)) != RT_ERR_OK)
+        return retVal;
+
+    if (logicalPmask.bits[0] & (1<<port))
+    {
+         pPortStatus->loop_enter = RTK_RLDP_LOOPSTS_LOOPING;
+    }
+    else
+    {
+         pPortStatus->loop_enter  = RTK_RLDP_LOOPSTS_NONE;
+    }
+
+    if ((retVal = rtl8367c_getAsicRldpLeaveLoopedPortmask(&pmsk))!=RT_ERR_OK)
+        return retVal;
+    if ((retVal = rtk_switch_portmask_P2L_get(pmsk, &logicalPmask)) != RT_ERR_OK)
+        return retVal;
+
+    if (logicalPmask.bits[0] & (1<<port))
+    {
+         pPortStatus->loop_leave = RTK_RLDP_LOOPSTS_LOOPING;
+    }
+    else
+    {
+         pPortStatus->loop_leave  = RTK_RLDP_LOOPSTS_NONE;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_rldp_portStatus_set(rtk_port_t port, rtk_rldp_portStatus_t *pPortStatus)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 pmsk;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    pmsk = (pPortStatus->loop_enter)<<rtk_switch_port_L2P_get(port);
+    if ((retVal = rtl8367c_setAsicRldpEnterLoopedPortmask(pmsk))!=RT_ERR_OK)
+        return retVal;
+
+    pmsk = (pPortStatus->loop_leave)<<rtk_switch_port_L2P_get(port);
+    if ((retVal = rtl8367c_setAsicRldpLeaveLoopedPortmask(pmsk))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_rldp_portLoopPair_get(rtk_port_t port, rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 pmsk;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if ((retVal = rtl8367c_getAsicRldpLoopedPortPair(rtk_switch_port_L2P_get(port), &pmsk))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtk_switch_portmask_P2L_get(pmsk, pPortmask)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtk_rldp_config_set
+ * Description:
+ *      Set RLDP module configuration
+ * Input:
+ *      pConfig - configuration structure of RLDP
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_INPUT
+ *      RT_ERR_NULL_POINTER
+ * Note:
+ *      None
+ */
+rtk_api_ret_t rtk_rldp_config_set(rtk_rldp_config_t *pConfig)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_rldp_config_set(pConfig);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_rldp_config_get
+ * Description:
+ *      Get RLDP module configuration
+ * Input:
+ *      None
+ * Output:
+ *      pConfig - configuration structure of RLDP
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_INPUT
+ *      RT_ERR_NULL_POINTER
+ * Note:
+ *      None
+ */
+rtk_api_ret_t rtk_rldp_config_get(rtk_rldp_config_t *pConfig)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_rldp_config_get(pConfig);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_rldp_portConfig_set
+ * Description:
+ *      Set per port RLDP module configuration
+ * Input:
+ *      port   - port number to be configured
+ *      pPortConfig - per port configuration structure of RLDP
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_INPUT
+ *      RT_ERR_NULL_POINTER
+ * Note:
+ *      None
+ */
+rtk_api_ret_t rtk_rldp_portConfig_set(rtk_port_t port, rtk_rldp_portConfig_t *pPortConfig)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_rldp_portConfig_set(port, pPortConfig);
+    RTK_API_UNLOCK();
+
+    return retVal;
+} /* end of rtk_rldp_portConfig_set */
+
+
+/* Function Name:
+ *      rtk_rldp_portConfig_get
+ * Description:
+ *      Get per port RLDP module configuration
+ * Input:
+ *      port    - port number to be get
+ * Output:
+ *      pPortConfig - per port configuration structure of RLDP
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_INPUT
+ *      RT_ERR_NULL_POINTER
+ * Note:
+ *      None
+ */
+rtk_api_ret_t rtk_rldp_portConfig_get(rtk_port_t port, rtk_rldp_portConfig_t *pPortConfig)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_rldp_portConfig_get(port, pPortConfig);
+    RTK_API_UNLOCK();
+
+    return retVal;
+} /* end of rtk_rldp_portConfig_get */
+
+
+/* Function Name:
+ *      rtk_rldp_status_get
+ * Description:
+ *      Get RLDP module status
+ * Input:
+ *      None
+ * Output:
+ *      pStatus - status structure of RLDP
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NULL_POINTER
+ * Note:
+ *      None
+ */
+rtk_api_ret_t rtk_rldp_status_get(rtk_rldp_status_t *pStatus)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_rldp_status_get(pStatus);
+    RTK_API_UNLOCK();
+
+    return retVal;
+} /* end of rtk_rldp_status_get */
+
+
+/* Function Name:
+ *      rtk_rldp_portStatus_get
+ * Description:
+ *      Get RLDP module status
+ * Input:
+ *      port    - port number to be get
+ * Output:
+ *      pPortStatus - per port status structure of RLDP
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_INPUT
+ *      RT_ERR_NULL_POINTER
+ * Note:
+ *      None
+ */
+rtk_api_ret_t rtk_rldp_portStatus_get(rtk_port_t port, rtk_rldp_portStatus_t *pPortStatus)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_rldp_portStatus_get(port, pPortStatus);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_rldp_portStatus_set
+ * Description:
+ *      Clear RLDP module status
+ * Input:
+ *      port    - port number to be clear
+ *      pPortStatus - per port status structure of RLDP
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_INPUT
+ *      RT_ERR_NULL_POINTER
+ * Note:
+ *      Clear operation effect loop_enter and loop_leave only, other field in
+ *      the structure are don't care. Loop status cab't be clean.
+ */
+rtk_api_ret_t rtk_rldp_portStatus_set(rtk_port_t port, rtk_rldp_portStatus_t *pPortStatus)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_rldp_portStatus_set(port, pPortStatus);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_rldp_portLoopPair_get
+ * Description:
+ *      Get RLDP port loop pairs
+ * Input:
+ *      port    - port number to be get
+ * Output:
+ *      pPortmask - per port related loop ports
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_INPUT
+ *      RT_ERR_NULL_POINTER
+ * Note:
+ *      None
+ */
+rtk_api_ret_t rtk_rldp_portLoopPair_get(rtk_port_t port, rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_rldp_portLoopPair_get(port, pPortmask);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rldp.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rldp.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rldp.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rldp.h	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,266 @@
+/*
+ * Copyright (C) 2012 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: $
+ * $Date: $
+ *
+ * Purpose : Declaration of RLDP and RLPP API
+ *
+ * Feature : The file have include the following module and sub-modules
+ *           1) RLDP and RLPP configuration and status
+ *
+ */
+
+
+#ifndef __RTK_RLDP_H__
+#define __RTK_RLDP_H__
+
+
+/*
+ * Include Files
+ */
+
+
+/*
+ * Symbol Definition
+ */
+typedef enum rtk_rldp_trigger_e
+{
+    RTK_RLDP_TRIGGER_SAMOVING = 0,
+    RTK_RLDP_TRIGGER_PERIOD,
+    RTK_RLDP_TRIGGER_END
+} rtk_rldp_trigger_t;
+
+typedef enum rtk_rldp_cmpType_e
+{
+    RTK_RLDP_CMPTYPE_MAGIC = 0,     /* Compare the RLDP with magic only */
+    RTK_RLDP_CMPTYPE_MAGIC_ID,      /* Compare the RLDP with both magic + ID */
+    RTK_RLDP_CMPTYPE_END
+} rtk_rldp_cmpType_t;
+
+typedef enum rtk_rldp_loopStatus_e
+{
+    RTK_RLDP_LOOPSTS_NONE = 0,
+    RTK_RLDP_LOOPSTS_LOOPING,
+    RTK_RLDP_LOOPSTS_END
+} rtk_rldp_loopStatus_t;
+
+typedef enum rtk_rlpp_trapType_e
+{
+    RTK_RLPP_TRAPTYPE_NONE = 0,
+    RTK_RLPP_TRAPTYPE_CPU,
+    RTK_RLPP_TRAPTYPE_END
+} rtk_rlpp_trapType_t;
+
+typedef struct rtk_rldp_config_s
+{
+    rtk_enable_t        rldp_enable;
+    rtk_rldp_trigger_t trigger_mode;
+    rtk_mac_t           magic;
+    rtk_rldp_cmpType_t  compare_type;
+    rtk_uint32              interval_check; /* Checking interval for check state */
+    rtk_uint32              num_check;      /* Checking number for check state */
+    rtk_uint32              interval_loop;  /* Checking interval for loop state */
+    rtk_uint32              num_loop;       /* Checking number for loop state */
+} rtk_rldp_config_t;
+
+typedef struct rtk_rldp_portConfig_s
+{
+    rtk_enable_t        tx_enable;
+} rtk_rldp_portConfig_t;
+
+typedef struct rtk_rldp_status_s
+{
+    rtk_mac_t           id;
+} rtk_rldp_status_t;
+
+typedef struct rtk_rldp_portStatus_s
+{
+    rtk_rldp_loopStatus_t   loop_status;
+    rtk_rldp_loopStatus_t   loop_enter;
+    rtk_rldp_loopStatus_t   loop_leave;
+} rtk_rldp_portStatus_t;
+
+/*
+ * Data Declaration
+ */
+
+
+/*
+ * Macro Declaration
+ */
+
+#define RTK_RLDP_INTERVAL_MAX  0xffff
+#define RTK_RLDP_NUM_MAX       0xff
+
+
+/*
+ * Function Declaration
+ */
+
+/* Module Name : RLDP */
+
+
+/* Function Name:
+ *      rtk_rldp_config_set
+ * Description:
+ *      Set RLDP module configuration
+ * Input:
+ *      pConfig - configuration structure of RLDP
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_INPUT
+ *      RT_ERR_NULL_POINTER
+ * Note:
+ *      None
+ */
+extern rtk_api_ret_t rtk_rldp_config_set(rtk_rldp_config_t *pConfig);
+
+
+/* Function Name:
+ *      rtk_rldp_config_get
+ * Description:
+ *      Get RLDP module configuration
+ * Input:
+ *      None
+ * Output:
+ *      pConfig - configuration structure of RLDP
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_INPUT
+ *      RT_ERR_NULL_POINTER
+ * Note:
+ *      None
+ */
+extern rtk_api_ret_t rtk_rldp_config_get(rtk_rldp_config_t *pConfig);
+
+
+/* Function Name:
+ *      rtk_rldp_portConfig_set
+ * Description:
+ *      Set per port RLDP module configuration
+ * Input:
+ *      port   - port number to be configured
+ *      pPortConfig - per port configuration structure of RLDP
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_INPUT
+ *      RT_ERR_NULL_POINTER
+ * Note:
+ *      None
+ */
+extern rtk_api_ret_t rtk_rldp_portConfig_set(rtk_port_t port, rtk_rldp_portConfig_t *pPortConfig);
+
+
+/* Function Name:
+ *      rtk_rldp_portConfig_get
+ * Description:
+ *      Get per port RLDP module configuration
+ * Input:
+ *      port    - port number to be get
+ * Output:
+ *      pPortConfig - per port configuration structure of RLDP
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_INPUT
+ *      RT_ERR_NULL_POINTER
+ * Note:
+ *      None
+ */
+extern rtk_api_ret_t rtk_rldp_portConfig_get(rtk_port_t port, rtk_rldp_portConfig_t *pPortConfig);
+
+
+/* Function Name:
+ *      rtk_rldp_status_get
+ * Description:
+ *      Get RLDP module status
+ * Input:
+ *      None
+ * Output:
+ *      pStatus - status structure of RLDP
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NULL_POINTER
+ * Note:
+ *      None
+ */
+extern rtk_api_ret_t rtk_rldp_status_get(rtk_rldp_status_t *pStatus);
+
+
+/* Function Name:
+ *      rtk_rldp_portStatus_get
+ * Description:
+ *      Get RLDP module status
+ * Input:
+ *      port    - port number to be get
+ * Output:
+ *      pPortStatus - per port status structure of RLDP
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_INPUT
+ *      RT_ERR_NULL_POINTER
+ * Note:
+ *      None
+ */
+extern rtk_api_ret_t rtk_rldp_portStatus_get(rtk_port_t port, rtk_rldp_portStatus_t *pPortStatus);
+
+
+/* Function Name:
+ *      rtk_rldp_portStatus_clear
+ * Description:
+ *      Clear RLDP module status
+ * Input:
+ *      port    - port number to be clear
+ *      pPortStatus - per port status structure of RLDP
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_INPUT
+ *      RT_ERR_NULL_POINTER
+ * Note:
+ *      Clear operation effect loop_enter and loop_leave only, other field in
+ *      the structure are don't care
+ */
+extern rtk_api_ret_t rtk_rldp_portStatus_set(rtk_port_t port, rtk_rldp_portStatus_t *pPortStatus);
+
+
+/* Function Name:
+ *      rtk_rldp_portLoopPair_get
+ * Description:
+ *      Get RLDP port loop pairs
+ * Input:
+ *      port    - port number to be get
+ * Output:
+ *      pPortmask - per port related loop ports
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_INPUT
+ *      RT_ERR_NULL_POINTER
+ * Note:
+ *      None
+ */
+extern rtk_api_ret_t rtk_rldp_portLoopPair_get(rtk_port_t port, rtk_portmask_t *pPortmask);
+
+#endif /* __RTK_RLDP_H__ */
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtk_error.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtk_error.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtk_error.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtk_error.h	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,225 @@
+/*
+ * Copyright(c) Realtek Semiconductor Corporation, 2008
+ * All rights reserved.
+ *
+ * $Revision: 38651 $
+ * $Date: 2013-04-17 14:32:56 +0800 (é€±ä¸‰, 17 å››æœˆ 2013) $
+ *
+ * Purpose : Definition the error number in the SDK.
+ *
+ * Feature : error definition
+ *
+ */
+
+#ifndef __COMMON_RT_ERROR_H__
+#define __COMMON_RT_ERROR_H__
+
+/*
+ * Include Files
+ */
+
+/*
+ * Data Type Declaration
+ */
+typedef enum rt_error_code_e
+{
+    RT_ERR_FAILED = -1,                             /* General Error                                                                    */
+
+    /* 0x0000xxxx for common error code */
+    RT_ERR_OK = 0,                                  /* 0x00000000, OK                                                                   */
+    RT_ERR_INPUT,                                   /* 0x00000001, invalid input parameter                                              */
+    RT_ERR_UNIT_ID,                                 /* 0x00000002, invalid unit id                                                      */
+    RT_ERR_PORT_ID,                                 /* 0x00000003, invalid port id                                                      */
+    RT_ERR_PORT_MASK,                               /* 0x00000004, invalid port mask                                                    */
+    RT_ERR_PORT_LINKDOWN,                           /* 0x00000005, link down port status                                                */
+    RT_ERR_ENTRY_INDEX,                             /* 0x00000006, invalid entry index                                                  */
+    RT_ERR_NULL_POINTER,                            /* 0x00000007, input parameter is null pointer                                      */
+    RT_ERR_QUEUE_ID,                                /* 0x00000008, invalid queue id                                                     */
+    RT_ERR_QUEUE_NUM,                               /* 0x00000009, invalid queue number                                                 */
+    RT_ERR_BUSYWAIT_TIMEOUT,                        /* 0x0000000a, busy watting time out                                                */
+    RT_ERR_MAC,                                     /* 0x0000000b, invalid mac address                                                  */
+    RT_ERR_OUT_OF_RANGE,                            /* 0x0000000c, input parameter out of range                                         */
+    RT_ERR_CHIP_NOT_SUPPORTED,                      /* 0x0000000d, functions not supported by this chip model                           */
+    RT_ERR_SMI,                                     /* 0x0000000e, SMI error                                                            */
+    RT_ERR_NOT_INIT,                                /* 0x0000000f, The module is not initial                                            */
+    RT_ERR_CHIP_NOT_FOUND,                          /* 0x00000010, The chip can not found                                               */
+    RT_ERR_NOT_ALLOWED,                             /* 0x00000011, actions not allowed by the function                                  */
+    RT_ERR_DRIVER_NOT_FOUND,                        /* 0x00000012, The driver can not found                                             */
+    RT_ERR_SEM_LOCK_FAILED,                         /* 0x00000013, Failed to lock semaphore                                             */
+    RT_ERR_SEM_UNLOCK_FAILED,                       /* 0x00000014, Failed to unlock semaphore                                           */
+    RT_ERR_ENABLE,                                  /* 0x00000015, invalid enable parameter                                             */
+    RT_ERR_TBL_FULL,                                /* 0x00000016, input table full                                                     */
+
+    /* 0x0001xxxx for vlan */
+    RT_ERR_VLAN_VID = 0x00010000,                   /* 0x00010000, invalid vid                                                          */
+    RT_ERR_VLAN_PRIORITY,                           /* 0x00010001, invalid 1p priority                                                  */
+    RT_ERR_VLAN_EMPTY_ENTRY,                        /* 0x00010002, emtpy entry of vlan table                                            */
+    RT_ERR_VLAN_ACCEPT_FRAME_TYPE,                  /* 0x00010003, invalid accept frame type                                            */
+    RT_ERR_VLAN_EXIST,                              /* 0x00010004, vlan is exist                                                        */
+    RT_ERR_VLAN_ENTRY_NOT_FOUND,                    /* 0x00010005, specified vlan entry not found                                       */
+    RT_ERR_VLAN_PORT_MBR_EXIST,                     /* 0x00010006, member port exist in the specified vlan                              */
+    RT_ERR_VLAN_PROTO_AND_PORT,                     /* 0x00010008, invalid protocol and port based vlan                              */
+
+    /* 0x0002xxxx for svlan */
+    RT_ERR_SVLAN_ENTRY_INDEX = 0x00020000,          /* 0x00020000, invalid svid entry no                                                */
+    RT_ERR_SVLAN_ETHER_TYPE,                        /* 0x00020001, invalid SVLAN ether type                                             */
+    RT_ERR_SVLAN_TABLE_FULL,                        /* 0x00020002, no empty entry in SVLAN table                                        */
+    RT_ERR_SVLAN_ENTRY_NOT_FOUND,                   /* 0x00020003, specified svlan entry not found                                      */
+    RT_ERR_SVLAN_EXIST,                             /* 0x00020004, SVLAN entry is exist                                                 */
+    RT_ERR_SVLAN_VID,                               /* 0x00020005, invalid svid                                                         */
+
+    /* 0x0003xxxx for MSTP */
+    RT_ERR_MSTI = 0x00030000,                       /* 0x00030000, invalid msti                                                         */
+    RT_ERR_MSTP_STATE,                              /* 0x00030001, invalid spanning tree status                                         */
+    RT_ERR_MSTI_EXIST,                              /* 0x00030002, MSTI exist                                                           */
+    RT_ERR_MSTI_NOT_EXIST,                          /* 0x00030003, MSTI not exist                                                       */
+
+    /* 0x0004xxxx for BUCKET */
+    RT_ERR_TIMESLOT = 0x00040000,                   /* 0x00040000, invalid time slot                                                    */
+    RT_ERR_TOKEN,                                   /* 0x00040001, invalid token amount                                                 */
+    RT_ERR_RATE,                                    /* 0x00040002, invalid rate                                                         */
+    RT_ERR_TICK,                                    /* 0x00040003, invalid tick                                                 */
+
+    /* 0x0005xxxx for RMA */
+    RT_ERR_RMA_ADDR = 0x00050000,                   /* 0x00050000, invalid rma mac address                                              */
+    RT_ERR_RMA_ACTION,                              /* 0x00050001, invalid rma action                                                   */
+
+    /* 0x0006xxxx for L2 */
+    RT_ERR_L2_HASH_KEY = 0x00060000,                /* 0x00060000, invalid L2 Hash key                                                  */
+    RT_ERR_L2_HASH_INDEX,                           /* 0x00060001, invalid L2 Hash index                                                */
+    RT_ERR_L2_CAM_INDEX,                            /* 0x00060002, invalid L2 CAM index                                                 */
+    RT_ERR_L2_ENRTYSEL,                             /* 0x00060003, invalid EntrySel                                                     */
+    RT_ERR_L2_INDEXTABLE_INDEX,                     /* 0x00060004, invalid L2 index table(=portMask table) index                        */
+    RT_ERR_LIMITED_L2ENTRY_NUM,                     /* 0x00060005, invalid limited L2 entry number                                      */
+    RT_ERR_L2_AGGREG_PORT,                          /* 0x00060006, this aggregated port is not the lowest physical
+                                                                   port of its aggregation group                                        */
+    RT_ERR_L2_FID,                                  /* 0x00060007, invalid fid                                                          */
+    RT_ERR_L2_VID,                                 /* 0x00060008, invalid cvid                                                         */
+    RT_ERR_L2_NO_EMPTY_ENTRY,                       /* 0x00060009, no empty entry in L2 table                                           */
+    RT_ERR_L2_ENTRY_NOTFOUND,                       /* 0x0006000a, specified entry not found                                            */
+    RT_ERR_L2_INDEXTBL_FULL,                        /* 0x0006000b, the L2 index table is full                                           */
+    RT_ERR_L2_INVALID_FLOWTYPE,                     /* 0x0006000c, invalid L2 flow type                                                 */
+    RT_ERR_L2_L2UNI_PARAM,                          /* 0x0006000d, invalid L2 unicast parameter                                         */
+    RT_ERR_L2_L2MULTI_PARAM,                        /* 0x0006000e, invalid L2 multicast parameter                                       */
+    RT_ERR_L2_IPMULTI_PARAM,                        /* 0x0006000f, invalid L2 ip multicast parameter                                    */
+    RT_ERR_L2_PARTIAL_HASH_KEY,                     /* 0x00060010, invalid L2 partial Hash key                                          */
+    RT_ERR_L2_EMPTY_ENTRY,                          /* 0x00060011, the entry is empty(invalid)                                          */
+    RT_ERR_L2_FLUSH_TYPE,                           /* 0x00060012, the flush type is invalid                                            */
+    RT_ERR_L2_NO_CPU_PORT,                          /* 0x00060013, CPU port not exist                                                   */
+
+    /* 0x0007xxxx for FILTER (PIE) */
+    RT_ERR_FILTER_BLOCKNUM = 0x00070000,            /* 0x00070000, invalid block number                                                 */
+    RT_ERR_FILTER_ENTRYIDX,                         /* 0x00070001, invalid entry index                                                  */
+    RT_ERR_FILTER_CUTLINE,                          /* 0x00070002, invalid cutline value                                                */
+    RT_ERR_FILTER_FLOWTBLBLOCK,                     /* 0x00070003, block belongs to flow table                                          */
+    RT_ERR_FILTER_INACLBLOCK,                       /* 0x00070004, block belongs to ingress ACL                                         */
+    RT_ERR_FILTER_ACTION,                           /* 0x00070005, action doesn't consist to entry type                                 */
+    RT_ERR_FILTER_INACL_RULENUM,                    /* 0x00070006, invalid ACL rulenum                                                  */
+    RT_ERR_FILTER_INACL_TYPE,                       /* 0x00070007, entry type isn't an ingress ACL rule                                 */
+    RT_ERR_FILTER_INACL_EXIST,                      /* 0x00070008, ACL entry is already exit                                            */
+    RT_ERR_FILTER_INACL_EMPTY,                      /* 0x00070009, ACL entry is empty                                                   */
+    RT_ERR_FILTER_FLOWTBL_TYPE,                     /* 0x0007000a, entry type isn't an flow table rule                                  */
+    RT_ERR_FILTER_FLOWTBL_RULENUM,                  /* 0x0007000b, invalid flow table rulenum                                           */
+    RT_ERR_FILTER_FLOWTBL_EMPTY,                    /* 0x0007000c, flow table entry is empty                                            */
+    RT_ERR_FILTER_FLOWTBL_EXIST,                    /* 0x0007000d, flow table entry is already exist                                    */
+    RT_ERR_FILTER_METER_ID,                         /* 0x0007000e, invalid metering id                                                  */
+    RT_ERR_FILTER_LOG_ID,                           /* 0x0007000f, invalid log id                                                       */
+    RT_ERR_FILTER_INACL_NONE_BEGIN_IDX,             /* 0x00070010, entry index is not starting index of a group of rules                */
+    RT_ERR_FILTER_INACL_ACT_NOT_SUPPORT,            /* 0x00070011, action not support                                                    */
+    RT_ERR_FILTER_INACL_RULE_NOT_SUPPORT,           /* 0x00070012, rule not support                                                   */
+
+    /* 0x0008xxxx for ACL Rate Limit */
+    RT_ERR_ACLRL_HTHR = 0x00080000,                 /* 0x00080000, invalid high threshold                                               */
+    RT_ERR_ACLRL_TIMESLOT,                          /* 0x00080001, invalid time slot                                                    */
+    RT_ERR_ACLRL_TOKEN,                             /* 0x00080002, invalid token amount                                                 */
+    RT_ERR_ACLRL_RATE,                              /* 0x00080003, invalid rate                                                         */
+
+    /* 0x0009xxxx for Link aggregation */
+    RT_ERR_LA_CPUPORT = 0x00090000,                 /* 0x00090000, CPU port can not be aggregated port                                  */
+    RT_ERR_LA_TRUNK_ID,                             /* 0x00090001, invalid trunk id                                                     */
+    RT_ERR_LA_PORTMASK,                             /* 0x00090002, invalid port mask                                                    */
+    RT_ERR_LA_HASHMASK,                             /* 0x00090003, invalid hash mask                                                    */
+    RT_ERR_LA_DUMB,                                 /* 0x00090004, this API should be used in 802.1ad dumb mode                         */
+    RT_ERR_LA_PORTNUM_DUMB,                         /* 0x00090005, it can only aggregate at most four ports when 802.1ad dumb mode      */
+    RT_ERR_LA_PORTNUM_NORMAL,                       /* 0x00090006, it can only aggregate at most eight ports when 802.1ad normal mode   */
+    RT_ERR_LA_MEMBER_OVERLAP,                       /* 0x00090007, the specified port mask is overlapped with other group               */
+    RT_ERR_LA_NOT_MEMBER_PORT,                      /* 0x00090008, the port is not a member port of the trunk                           */
+    RT_ERR_LA_TRUNK_NOT_EXIST,                      /* 0x00090009, the trunk doesn't exist                                              */
+
+
+    /* 0x000axxxx for storm filter */
+    RT_ERR_SFC_TICK_PERIOD = 0x000a0000,            /* 0x000a0000, invalid SFC tick period                                              */
+    RT_ERR_SFC_UNKNOWN_GROUP,                       /* 0x000a0001, Unknown Storm filter group                                           */
+
+    /* 0x000bxxxx for pattern match */
+    RT_ERR_PM_MASK = 0x000b0000,                    /* 0x000b0000, invalid pattern length. Pattern length should be 8                   */
+    RT_ERR_PM_LENGTH,                               /* 0x000b0001, invalid pattern match mask, first byte must care                     */
+    RT_ERR_PM_MODE,                                 /* 0x000b0002, invalid pattern match mode                                           */
+
+    /* 0x000cxxxx for input bandwidth control */
+    RT_ERR_INBW_TICK_PERIOD = 0x000c0000,           /* 0x000c0000, invalid tick period for input bandwidth control                      */
+    RT_ERR_INBW_TOKEN_AMOUNT,                       /* 0x000c0001, invalid amount of token for input bandwidth control                  */
+    RT_ERR_INBW_FCON_VALUE,                         /* 0x000c0002, invalid flow control ON threshold value for input bandwidth control  */
+    RT_ERR_INBW_FCOFF_VALUE,                        /* 0x000c0003, invalid flow control OFF threshold value for input bandwidth control */
+    RT_ERR_INBW_FC_ALLOWANCE,                       /* 0x000c0004, invalid allowance of incomming packet for input bandwidth control    */
+    RT_ERR_INBW_RATE,                               /* 0x000c0005, invalid input bandwidth                                              */
+
+    /* 0x000dxxxx for QoS */
+    RT_ERR_QOS_1P_PRIORITY = 0x000d0000,            /* 0x000d0000, invalid 802.1P priority                                              */
+    RT_ERR_QOS_DSCP_VALUE,                          /* 0x000d0001, invalid DSCP value                                                   */
+    RT_ERR_QOS_INT_PRIORITY,                        /* 0x000d0002, invalid internal priority                                            */
+    RT_ERR_QOS_SEL_DSCP_PRI,                        /* 0x000d0003, invalid DSCP selection priority                                      */
+    RT_ERR_QOS_SEL_PORT_PRI,                        /* 0x000d0004, invalid port selection priority                                      */
+    RT_ERR_QOS_SEL_IN_ACL_PRI,                      /* 0x000d0005, invalid ingress ACL selection priority                               */
+    RT_ERR_QOS_SEL_CLASS_PRI,                       /* 0x000d0006, invalid classifier selection priority                                */
+    RT_ERR_QOS_EBW_RATE,                            /* 0x000d0007, invalid egress bandwidth rate                                        */
+    RT_ERR_QOS_SCHE_TYPE,                           /* 0x000d0008, invalid QoS scheduling type                                          */
+    RT_ERR_QOS_QUEUE_WEIGHT,                        /* 0x000d0009, invalid Queue weight                                                 */
+    RT_ERR_QOS_SEL_PRI_SOURCE,                      /* 0x000d000a, invalid selection of priority source                                                 */
+
+    /* 0x000exxxx for port ability */
+    RT_ERR_PHY_PAGE_ID = 0x000e0000,                /* 0x000e0000, invalid PHY page id                                                  */
+    RT_ERR_PHY_REG_ID,                              /* 0x000e0001, invalid PHY reg id                                                   */
+    RT_ERR_PHY_DATAMASK,                            /* 0x000e0002, invalid PHY data mask                                                */
+    RT_ERR_PHY_AUTO_NEGO_MODE,                      /* 0x000e0003, invalid PHY auto-negotiation mode*/
+    RT_ERR_PHY_SPEED,                               /* 0x000e0004, invalid PHY speed setting                                            */
+    RT_ERR_PHY_DUPLEX,                              /* 0x000e0005, invalid PHY duplex setting                                           */
+    RT_ERR_PHY_FORCE_ABILITY,                       /* 0x000e0006, invalid PHY force mode ability parameter                             */
+    RT_ERR_PHY_FORCE_1000,                          /* 0x000e0007, invalid PHY force mode 1G speed setting                              */
+    RT_ERR_PHY_TXRX,                                /* 0x000e0008, invalid PHY tx/rx                                                    */
+    RT_ERR_PHY_ID,                                  /* 0x000e0009, invalid PHY id                                                       */
+    RT_ERR_PHY_RTCT_NOT_FINISH,                     /* 0x000e000a, PHY RTCT in progress                                                 */
+
+    /* 0x000fxxxx for mirror */
+    RT_ERR_MIRROR_DIRECTION = 0x000f0000,           /* 0x000f0000, invalid error mirror direction                                       */
+    RT_ERR_MIRROR_SESSION_FULL,                     /* 0x000f0001, mirroring session is full                                            */
+    RT_ERR_MIRROR_SESSION_NOEXIST,                  /* 0x000f0002, mirroring session not exist                                          */
+    RT_ERR_MIRROR_PORT_EXIST,                       /* 0x000f0003, mirroring port already exists                                        */
+    RT_ERR_MIRROR_PORT_NOT_EXIST,                   /* 0x000f0004, mirroring port does not exists                                       */
+    RT_ERR_MIRROR_PORT_FULL,                        /* 0x000f0005, Exceeds maximum number of supported mirroring port                   */
+
+    /* 0x0010xxxx for stat */
+    RT_ERR_STAT_INVALID_GLOBAL_CNTR = 0x00100000,   /* 0x00100000, Invalid Global Counter                                               */
+    RT_ERR_STAT_INVALID_PORT_CNTR,                  /* 0x00100001, Invalid Port Counter                                                 */
+    RT_ERR_STAT_GLOBAL_CNTR_FAIL,                   /* 0x00100002, Could not retrieve/reset Global Counter                              */
+    RT_ERR_STAT_PORT_CNTR_FAIL,                     /* 0x00100003, Could not retrieve/reset Port Counter                                */
+    RT_ERR_STAT_INVALID_CNTR,                       /* 0x00100004, Invalid Counter                                                      */
+    RT_ERR_STAT_CNTR_FAIL,                          /* 0x00100005, Could not retrieve/reset Counter                                     */
+
+    /* 0x0011xxxx for dot1x */
+    RT_ERR_DOT1X_INVALID_DIRECTION = 0x00110000,    /* 0x00110000, Invalid Authentication Direction                                     */
+    RT_ERR_DOT1X_PORTBASEDPNEN,                     /* 0x00110001, Port-based enable port error                                         */
+    RT_ERR_DOT1X_PORTBASEDAUTH,                     /* 0x00110002, Port-based auth port error                                           */
+    RT_ERR_DOT1X_PORTBASEDOPDIR,                    /* 0x00110003, Port-based opdir error                                               */
+    RT_ERR_DOT1X_MACBASEDPNEN,                      /* 0x00110004, MAC-based enable port error                                          */
+    RT_ERR_DOT1X_MACBASEDOPDIR,                     /* 0x00110005, MAC-based opdir error                                                */
+    RT_ERR_DOT1X_PROC,                              /* 0x00110006, unauthorized behavior error                                          */
+    RT_ERR_DOT1X_GVLANIDX,                          /* 0x00110007, guest vlan index error                                               */
+    RT_ERR_DOT1X_GVLANTALK,                         /* 0x00110008, guest vlan OPDIR error                                               */
+    RT_ERR_DOT1X_MAC_PORT_MISMATCH,                 /* 0x00110009, Auth MAC and port mismatch eror                                      */
+
+    RT_ERR_END                                       /* The symbol is the latest symbol                                                  */
+} rt_error_code_t;
+
+
+#endif /* __COMMON_RT_ERROR_H__ */
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtk_switch.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtk_switch.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtk_switch.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtk_switch.c	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,1904 @@
+/*
+ * Copyright (C) 2010 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 79923 $
+ * $Date: 2017-06-21 16:40:41 +0800 (é€±ä¸‰, 21 å…­æœˆ 2017) $
+ *
+ * Purpose : RTK switch high-level API
+ * Feature : Here is a list of all functions and variables in this module.
+ *
+ */
+#include <rtk_switch.h>
+#include <rtk_error.h>
+#include <string.h>
+
+#include <rate.h>
+#include <rtl8367c_asicdrv.h>
+#include <rtl8367c_asicdrv_misc.h>
+#include <rtl8367c_asicdrv_green.h>
+#include <rtl8367c_asicdrv_lut.h>
+#include <rtl8367c_asicdrv_rma.h>
+#include <rtl8367c_asicdrv_mirror.h>
+#include <rtl8367c_asicdrv_scheduling.h>
+#include <rtl8367c_asicdrv_inbwctrl.h>
+
+#if defined(FORCE_PROBE_RTL8367C)
+static init_state_t    init_state = INIT_COMPLETED;
+#elif defined(FORCE_PROBE_RTL8370B)
+static init_state_t    init_state = INIT_COMPLETED;
+#elif defined(FORCE_PROBE_RTL8364B)
+static init_state_t    init_state = INIT_COMPLETED;
+#elif defined(FORCE_PROBE_RTL8363SC_VB)
+static init_state_t    init_state = INIT_COMPLETED;
+#else
+static init_state_t    init_state = INIT_NOT_COMPLETED;
+#endif
+
+#if defined(RTK_X86_CLE)
+pthread_mutex_t api_mutex = PTHREAD_MUTEX_INITIALIZER;
+#endif
+
+#define AUTO_PROBE (!defined(FORCE_PROBE_RTL8367C) && !defined(FORCE_PROBE_RTL8370B) && !defined(FORCE_PROBE_RTL8364B) && !defined(FORCE_PROBE_RTL8363SC_VB))
+
+#if (AUTO_PROBE || defined(FORCE_PROBE_RTL8367C))
+static rtk_switch_halCtrl_t rtl8367c_hal_Ctrl =
+{
+    /* Switch Chip */
+    CHIP_RTL8367C,
+
+    /* Logical to Physical */
+    {0, 1, 2, 3, 4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+     6, 7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+
+    /* Physical to Logical */
+    {UTP_PORT0, UTP_PORT1, UTP_PORT2, UTP_PORT3, UTP_PORT4, UNDEFINE_PORT, EXT_PORT0, EXT_PORT1,
+     UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT,
+     UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT,
+     UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT},
+
+    /* Port Type */
+    {UTP_PORT, UTP_PORT, UTP_PORT, UTP_PORT, UTP_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT,
+     UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT,
+     EXT_PORT, EXT_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT,
+     UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT},
+
+    /* PTP port */
+    {1, 1, 1, 1, 1, 0, 0, 0,
+     0, 0, 0, 0, 0, 0, 0, 0,
+     0, 0, 0, 0, 0, 0, 0, 0,
+     0, 0, 0, 0, 0, 0, 0, 0 },
+
+    /* Valid port mask */
+    ( (0x1 << UTP_PORT0) | (0x1 << UTP_PORT1) | (0x1 << UTP_PORT2) | (0x1 << UTP_PORT3) | (0x1 << UTP_PORT4) | (0x1 << EXT_PORT0) | (0x1 << EXT_PORT1) ),
+
+    /* Valid UTP port mask */
+    ( (0x1 << UTP_PORT0) | (0x1 << UTP_PORT1) | (0x1 << UTP_PORT2) | (0x1 << UTP_PORT3) | (0x1 << UTP_PORT4) ),
+
+    /* Valid EXT port mask */
+    ( (0x1 << EXT_PORT0) | (0x1 << EXT_PORT1) ),
+
+    /* Valid CPU port mask */
+    0x00,
+
+    /* Minimum physical port number */
+    0,
+
+    /* Maxmum physical port number */
+    7,
+
+    /* Physical port mask */
+    0xDF,
+
+    /* Combo Logical port ID */
+    4,
+
+    /* HSG Logical port ID */
+    EXT_PORT0,
+
+    /* SGMII Logical portmask */
+    (0x1 << EXT_PORT0),
+
+    /* Max Meter ID */
+    31,
+
+    /* MAX LUT Address Number */
+    2112,
+
+    /* Trunk Group Mask */
+    0x03
+};
+#endif
+
+#if (AUTO_PROBE || defined(FORCE_PROBE_RTL8370B))
+static rtk_switch_halCtrl_t rtl8370b_hal_Ctrl =
+{
+    /* Switch Chip */
+    CHIP_RTL8370B,
+
+    /* Logical to Physical */
+    {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+     8, 9, 10, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+
+    /* Physical to Logical */
+    {UTP_PORT0, UTP_PORT1, UTP_PORT2, UTP_PORT3, UTP_PORT4, UTP_PORT5, UTP_PORT6, UTP_PORT7,
+     EXT_PORT0, EXT_PORT1, EXT_PORT2, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT,
+     UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT,
+     UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT},
+
+    /* Port Type */
+    {UTP_PORT, UTP_PORT, UTP_PORT, UTP_PORT, UTP_PORT, UTP_PORT, UTP_PORT, UTP_PORT,
+     UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT,
+     EXT_PORT, EXT_PORT, EXT_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT,
+     UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT},
+
+    /* PTP port */
+    {1, 1, 1, 1, 1, 1, 1, 1,
+     0, 0, 0, 0, 0, 0, 0, 0,
+     1, 1, 0, 0, 0, 0, 0, 0,
+     0, 0, 0, 0, 0, 0, 0, 0 },
+
+    /* Valid port mask */
+    ( (0x1 << UTP_PORT0) | (0x1 << UTP_PORT1) | (0x1 << UTP_PORT2) | (0x1 << UTP_PORT3) | (0x1 << UTP_PORT4) | (0x1 << UTP_PORT5) | (0x1 << UTP_PORT6) | (0x1 << UTP_PORT7) | (0x1 << EXT_PORT0) | (0x1 << EXT_PORT1) | (0x1 << EXT_PORT2) ),
+
+    /* Valid UTP port mask */
+    ( (0x1 << UTP_PORT0) | (0x1 << UTP_PORT1) | (0x1 << UTP_PORT2) | (0x1 << UTP_PORT3) | (0x1 << UTP_PORT4) | (0x1 << UTP_PORT5) | (0x1 << UTP_PORT6) | (0x1 << UTP_PORT7) ),
+
+    /* Valid EXT port mask */
+    ( (0x1 << EXT_PORT0) | (0x1 << EXT_PORT1) | (0x1 << EXT_PORT2) ),
+
+    /* Valid CPU port mask */
+    (0x1 << EXT_PORT2),
+
+    /* Minimum physical port number */
+    0,
+
+    /* Maxmum physical port number */
+    10,
+
+    /* Physical port mask */
+    0x7FF,
+
+    /* Combo Logical port ID */
+    7,
+
+    /* HSG Logical port ID */
+    EXT_PORT1,
+
+    /* SGMII Logical portmask */
+    ( (0x1 << EXT_PORT0) | (0x1 << EXT_PORT1) ),
+
+    /* Max Meter ID */
+    63,
+
+    /* MAX LUT Address Number 4096 + 64*/
+    4160,
+
+    /* Trunk Group Mask */
+    0x07
+};
+#endif
+
+#if (AUTO_PROBE || defined(FORCE_PROBE_RTL8364B))
+static rtk_switch_halCtrl_t rtl8364b_hal_Ctrl =
+{
+    /* Switch Chip */
+    CHIP_RTL8364B,
+
+    /* Logical to Physical */
+    {0xFF, 1, 0xFF, 3, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+     6, 7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+
+    /* Physical to Logical */
+    {UNDEFINE_PORT, UTP_PORT1, UNDEFINE_PORT, UTP_PORT3, UNDEFINE_PORT, UNDEFINE_PORT, EXT_PORT0, EXT_PORT1,
+     UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT,
+     UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT,
+     UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT},
+
+    /* Port Type */
+    {UNKNOWN_PORT, UTP_PORT, UNKNOWN_PORT, UTP_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT,
+     UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT,
+     EXT_PORT, EXT_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT,
+     UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT},
+
+    /* PTP port */
+    {0, 0, 0, 0, 0, 0, 0, 0,
+     0, 0, 0, 0, 0, 0, 0, 0,
+     0, 0, 0, 0, 0, 0, 0, 0,
+     0, 0, 0, 0, 0, 0, 0, 0 },
+
+    /* Valid port mask */
+    ( (0x1 << UTP_PORT1) | (0x1 << UTP_PORT3) | (0x1 << EXT_PORT0) | (0x1 << EXT_PORT1) ),
+
+    /* Valid UTP port mask */
+    ( (0x1 << UTP_PORT1) | (0x1 << UTP_PORT3) ),
+
+    /* Valid EXT port mask */
+    ( (0x1 << EXT_PORT0) | (0x1 << EXT_PORT1) ),
+
+    /* Valid CPU port mask */
+    0x00,
+
+    /* Minimum physical port number */
+    0,
+
+    /* Maxmum physical port number */
+    7,
+
+    /* Physical port mask */
+    0xCA,
+
+    /* Combo Logical port ID */
+    4,
+
+    /* HSG Logical port ID */
+    EXT_PORT0,
+
+    /* SGMII Logical portmask */
+    ( (0x1 << EXT_PORT0) | (0x1 << EXT_PORT1) ),
+
+    /* Max Meter ID */
+    32,
+
+    /* MAX LUT Address Number */
+    2112,
+
+    /* Trunk Group Mask */
+    0x01
+};
+#endif
+
+#if (AUTO_PROBE || defined(FORCE_PROBE_RTL8363SC_VB))
+static rtk_switch_halCtrl_t rtl8363sc_vb_hal_Ctrl =
+{
+    /* Switch Chip */
+    CHIP_RTL8363SC_VB,
+
+    /* Logical to Physical */
+    {0xFF, 0xFF, 1, 3, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+     6, 7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+
+    /* Physical to Logical */
+    {UNDEFINE_PORT, UTP_PORT2, UNDEFINE_PORT, UTP_PORT3, UNDEFINE_PORT, UNDEFINE_PORT, EXT_PORT0, EXT_PORT1,
+     UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT,
+     UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT,
+     UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT, UNDEFINE_PORT},
+
+    /* Port Type */
+    {UNKNOWN_PORT, UNKNOWN_PORT, UTP_PORT, UTP_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT,
+     UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT,
+     EXT_PORT, EXT_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT,
+     UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT, UNKNOWN_PORT},
+
+    /* PTP port */
+    {0, 0, 0, 0, 0, 0, 0, 0,
+     0, 0, 0, 0, 0, 0, 0, 0,
+     0, 0, 0, 0, 0, 0, 0, 0,
+     0, 0, 0, 0, 0, 0, 0, 0 },
+
+    /* Valid port mask */
+    ( (0x1 << UTP_PORT2) | (0x1 << UTP_PORT3) | (0x1 << EXT_PORT0) | (0x1 << EXT_PORT1) ),
+
+    /* Valid UTP port mask */
+    ( (0x1 << UTP_PORT2) | (0x1 << UTP_PORT3) ),
+
+    /* Valid EXT port mask */
+    ( (0x1 << EXT_PORT0) | (0x1 << EXT_PORT1) ),
+
+    /* Valid CPU port mask */
+    0x00,
+
+    /* Minimum physical port number */
+    0,
+
+    /* Maxmum physical port number */
+    7,
+
+    /* Physical port mask */
+    0xCA,
+
+    /* Combo Logical port ID */
+    4,
+
+    /* HSG Logical port ID */
+    EXT_PORT0,
+
+    /* SGMII Logical portmask */
+    ( (0x1 << EXT_PORT0) | (0x1 << EXT_PORT1) ),
+
+    /* Max Meter ID */
+    32,
+
+    /* MAX LUT Address Number */
+    2112,
+
+    /* Trunk Group Mask */
+    0x01
+};
+#endif
+
+#if defined(FORCE_PROBE_RTL8367C)
+static rtk_switch_halCtrl_t *halCtrl = &rtl8367c_hal_Ctrl;
+#elif defined(FORCE_PROBE_RTL8370B)
+static rtk_switch_halCtrl_t *halCtrl = &rtl8370b_hal_Ctrl;
+#elif defined(FORCE_PROBE_RTL8364B)
+static rtk_switch_halCtrl_t *halCtrl = &rtl8364b_hal_Ctrl;
+#elif defined(FORCE_PROBE_RTL8363SC_VB)
+static rtk_switch_halCtrl_t *halCtrl = &rtl8363sc_vb_hal_Ctrl;
+#else
+static rtk_switch_halCtrl_t *halCtrl = NULL;
+#endif
+
+static rtk_uint32 PatchChipData[210][2] =
+{
+        {0xa436, 0x8028}, {0xa438, 0x6800}, {0xb82e, 0x0001}, {0xa436, 0xb820}, {0xa438, 0x0090}, {0xa436, 0xa012}, {0xa438, 0x0000}, {0xa436, 0xa014}, {0xa438, 0x2c04}, {0xa438, 0x2c6c},
+        {0xa438, 0x2c75}, {0xa438, 0x2c77}, {0xa438, 0x1414}, {0xa438, 0x1579}, {0xa438, 0x1536}, {0xa438, 0xc432}, {0xa438, 0x32c0}, {0xa438, 0x42d6}, {0xa438, 0x32b5}, {0xa438, 0x003e},
+        {0xa438, 0x614c}, {0xa438, 0x1569}, {0xa438, 0xd705}, {0xa438, 0x318c}, {0xa438, 0x42d6}, {0xa438, 0xd702}, {0xa438, 0x31ef}, {0xa438, 0x42d6}, {0xa438, 0x629c}, {0xa438, 0x2c04},
+        {0xa438, 0x653c}, {0xa438, 0x422a}, {0xa438, 0x5d83}, {0xa438, 0xd06a}, {0xa438, 0xd1b0}, {0xa438, 0x1536}, {0xa438, 0xc43a}, {0xa438, 0x32c0}, {0xa438, 0x42d6}, {0xa438, 0x32b5},
+        {0xa438, 0x003e}, {0xa438, 0x314a}, {0xa438, 0x42fe}, {0xa438, 0x337b}, {0xa438, 0x02d6}, {0xa438, 0x3063}, {0xa438, 0x0c1b}, {0xa438, 0x22fe}, {0xa438, 0xc435}, {0xa438, 0xd0be},
+        {0xa438, 0xd1f7}, {0xa438, 0xe0f0}, {0xa438, 0x1a40}, {0xa438, 0xa320}, {0xa438, 0xd702}, {0xa438, 0x154a}, {0xa438, 0xc434}, {0xa438, 0x32c0}, {0xa438, 0x42d6}, {0xa438, 0x32b5},
+        {0xa438, 0x003e}, {0xa438, 0x60ec}, {0xa438, 0x1569}, {0xa438, 0xd705}, {0xa438, 0x619f}, {0xa438, 0xd702}, {0xa438, 0x414f}, {0xa438, 0x2c2e}, {0xa438, 0x610a}, {0xa438, 0xd705},
+        {0xa438, 0x5e1f}, {0xa438, 0xc43f}, {0xa438, 0xc88b}, {0xa438, 0xd702}, {0xa438, 0x7fe0}, {0xa438, 0x22f3}, {0xa438, 0xd0a0}, {0xa438, 0xd1b2}, {0xa438, 0xd0c3}, {0xa438, 0xd1c3},
+        {0xa438, 0x8d01}, {0xa438, 0x1536}, {0xa438, 0xc438}, {0xa438, 0xe0f0}, {0xa438, 0x1a80}, {0xa438, 0xd706}, {0xa438, 0x60c0}, {0xa438, 0xd710}, {0xa438, 0x409e}, {0xa438, 0xa804},
+        {0xa438, 0xad01}, {0xa438, 0x8804}, {0xa438, 0xd702}, {0xa438, 0x32c0}, {0xa438, 0x42d6}, {0xa438, 0x32b5}, {0xa438, 0x003e}, {0xa438, 0x405b}, {0xa438, 0x1576}, {0xa438, 0x7c9c},
+        {0xa438, 0x60ec}, {0xa438, 0x1569}, {0xa438, 0xd702}, {0xa438, 0x5d43}, {0xa438, 0x31ef}, {0xa438, 0x02fe}, {0xa438, 0x22d6}, {0xa438, 0x590a}, {0xa438, 0xd706}, {0xa438, 0x5c80},
+        {0xa438, 0xd702}, {0xa438, 0x5c44}, {0xa438, 0x3063}, {0xa438, 0x02d6}, {0xa438, 0x5be2}, {0xa438, 0x22fb}, {0xa438, 0xa240}, {0xa438, 0xa104}, {0xa438, 0x8c03}, {0xa438, 0x8178},
+        {0xa438, 0xd701}, {0xa438, 0x31ad}, {0xa438, 0x4917}, {0xa438, 0x8102}, {0xa438, 0x2917}, {0xa438, 0xc302}, {0xa438, 0x268a}, {0xa436, 0xA01A}, {0xa438, 0x0000}, {0xa436, 0xA006},
+        {0xa438, 0x0fff}, {0xa436, 0xA004}, {0xa438, 0x0689}, {0xa436, 0xA002}, {0xa438, 0x0911}, {0xa436, 0xA000}, {0xa438, 0x7302}, {0xa436, 0xB820}, {0xa438, 0x0010}, {0xa436, 0x8412},
+        {0xa438, 0xaf84}, {0xa438, 0x1eaf}, {0xa438, 0x8427}, {0xa438, 0xaf84}, {0xa438, 0x27af}, {0xa438, 0x8427}, {0xa438, 0x0251}, {0xa438, 0x6802}, {0xa438, 0x8427}, {0xa438, 0xaf04},
+        {0xa438, 0x0af8}, {0xa438, 0xf9bf}, {0xa438, 0x5581}, {0xa438, 0x0255}, {0xa438, 0x27ef}, {0xa438, 0x310d}, {0xa438, 0x345b}, {0xa438, 0x0fa3}, {0xa438, 0x032a}, {0xa438, 0xe087},
+        {0xa438, 0xffac}, {0xa438, 0x2040}, {0xa438, 0xbf56}, {0xa438, 0x7402}, {0xa438, 0x5527}, {0xa438, 0xef31}, {0xa438, 0xef20}, {0xa438, 0xe787}, {0xa438, 0xfee6}, {0xa438, 0x87fd},
+        {0xa438, 0xd488}, {0xa438, 0x88bf}, {0xa438, 0x5674}, {0xa438, 0x0254}, {0xa438, 0xe3e0}, {0xa438, 0x87ff}, {0xa438, 0xf720}, {0xa438, 0xe487}, {0xa438, 0xffaf}, {0xa438, 0x847e},
+        {0xa438, 0xe087}, {0xa438, 0xffad}, {0xa438, 0x2016}, {0xa438, 0xe387}, {0xa438, 0xfee2}, {0xa438, 0x87fd}, {0xa438, 0xef45}, {0xa438, 0xbf56}, {0xa438, 0x7402}, {0xa438, 0x54e3},
+        {0xa438, 0xe087}, {0xa438, 0xfff6}, {0xa438, 0x20e4}, {0xa438, 0x87ff}, {0xa438, 0xfdfc}, {0xa438, 0x0400}, {0xa436, 0xb818}, {0xa438, 0x0407}, {0xa436, 0xb81a}, {0xa438, 0xfffd},
+        {0xa436, 0xb81c}, {0xa438, 0xfffd}, {0xa436, 0xb81e}, {0xa438, 0xfffd}, {0xa436, 0xb832}, {0xa438, 0x0001}, {0xb820, 0x0000}, {0xb82e, 0x0000}, {0xa436, 0x8028}, {0xa438, 0x0000}
+};
+
+static rtk_api_ret_t _rtk_switch_init_8367c(void)
+{
+    rtk_port_t port;
+    rtk_uint32 retVal;
+    rtk_uint32 regData;
+    rtk_uint32 regValue;
+
+    if( (retVal = rtl8367c_setAsicReg(0x13c2, 0x0249)) != RT_ERR_OK)
+        return retVal;
+
+    if( (retVal = rtl8367c_getAsicReg(0x1301, &regValue)) != RT_ERR_OK)
+        return retVal;
+
+    if( (retVal = rtl8367c_setAsicReg(0x13c2, 0x0000)) != RT_ERR_OK)
+        return retVal;
+
+    RTK_SCAN_ALL_LOG_PORT(port)
+    {
+         if(rtk_switch_isUtpPort(port) == RT_ERR_OK)
+         {
+             if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_PORT0_EEECFG + (0x20 * port), RTL8367C_PORT0_EEECFG_EEE_100M_OFFSET, 1)) != RT_ERR_OK)
+                 return retVal;
+
+             if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_PORT0_EEECFG + (0x20 * port), RTL8367C_PORT0_EEECFG_EEE_GIGA_500M_OFFSET, 1)) != RT_ERR_OK)
+                 return retVal;
+
+             if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_PORT0_EEECFG + (0x20 * port), RTL8367C_PORT0_EEECFG_EEE_TX_OFFSET, 1)) != RT_ERR_OK)
+                 return retVal;
+
+             if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_PORT0_EEECFG + (0x20 * port), RTL8367C_PORT0_EEECFG_EEE_RX_OFFSET, 1)) != RT_ERR_OK)
+                 return retVal;
+
+             if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xA428, &regData)) != RT_ERR_OK)
+                return retVal;
+
+             regData &= ~(0x0200);
+             if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xA428, regData)) != RT_ERR_OK)
+                 return retVal;
+
+             if((regValue & 0x00F0) == 0x00A0)
+             {
+                 if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xA5D0, &regData)) != RT_ERR_OK)
+                     return retVal;
+
+                 regData |= 0x0006;
+                 if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xA5D0, regData)) != RT_ERR_OK)
+                     return retVal;
+             }
+         }
+    }
+
+    if((retVal = rtl8367c_setAsicReg(RTL8367C_REG_UTP_FIB_DET, 0x15BB)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicReg(0x1303, 0x06D6)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicReg(0x1304, 0x0700)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicReg(0x13E2, 0x003F)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicReg(0x13F9, 0x0090)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicReg(0x121e, 0x03CA)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicReg(0x1233, 0x0352)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicReg(0x1237, 0x00a0)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicReg(0x123a, 0x0030)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicReg(0x1239, 0x0084)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicReg(0x0301, 0x1000)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicReg(0x1349, 0x001F)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicRegBit(0x18e0, 0, 0)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicRegBit(0x122b, 14, 1)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicRegBits(0x1305, 0xC000, 3)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_switch_init_8370b(void)
+{
+    ret_t retVal;
+    rtk_uint32 regData, tmp = 0;
+    rtk_uint32 i, prf, counter;
+    rtk_uint32 long_link[8] = {0x0210, 0x03e8, 0x0218, 0x03f0, 0x0220, 0x03f8, 0x0208, 0x03e0 };
+
+    if((retVal = rtl8367c_setAsicRegBits(0x1205, 0x0300, 3)) != RT_ERR_OK)
+        return retVal;
+
+
+    for(i=0; i<8; i++)
+    {
+      if ((retVal = rtl8367c_getAsicPHYOCPReg(i, 0xa420, &regData)) != RT_ERR_OK)
+          return retVal;
+        tmp = regData & 0x7 ;
+       if(tmp == 0x3)
+       {
+           prf = 1;
+           if((retVal = rtl8367c_setAsicPHYOCPReg(i, 0xb83e, 0x6fa9)) != RT_ERR_OK)
+              return retVal;
+           if((retVal = rtl8367c_setAsicPHYOCPReg(i, 0xb840, 0xa9)) != RT_ERR_OK)
+               return retVal;
+           for(counter = 0; counter < 10000; counter++); //delay
+
+           if ((retVal = rtl8367c_getAsicPHYOCPReg(i, 0xb820, &regData)) != RT_ERR_OK)
+               return retVal;
+           tmp = regData | 0x10;
+           if ((retVal = rtl8367c_setAsicPHYOCPReg(i, 0xb820, tmp)) != RT_ERR_OK)
+               return retVal;
+           for(counter = 0; counter < 10000; counter++); //delay
+           counter = 0;
+           do{
+              counter = counter + 1;
+              if ((retVal = rtl8367c_getAsicPHYOCPReg(i, 0xb800, &regData)) != RT_ERR_OK)
+                   return retVal;
+              tmp = regData & 0x40;
+              if(tmp != 0)
+                break;
+           } while (counter < 20);   //Wait for patch ready = 1...
+       }
+   }
+    if ((retVal = rtl8367c_getAsicReg(0x1d01, &regData)) != RT_ERR_OK)
+        return retVal;
+    tmp = regData;
+    tmp = tmp | 0x3BE0; /*Broadcast port enable*/
+    tmp = tmp & 0xFFE0; /*Phy_id = 0 */
+    if((retVal = rtl8367c_setAsicReg(0x1d01, tmp)) != RT_ERR_OK)
+        return retVal;
+
+    for(i=0;i < 210; i++)
+    {
+        if((retVal = rtl8367c_setAsicPHYOCPReg(0, PatchChipData[i][0], PatchChipData[i][1])) != RT_ERR_OK)
+             return retVal;
+    }
+
+   if((retVal = rtl8367c_setAsicReg(0x1d01, regData)) != RT_ERR_OK)
+        return retVal;
+
+    for(i=0; i < 8; i++)
+    {
+        if((retVal = rtl8367c_setAsicPHYOCPReg(i, 0xa4b4, long_link[i])) != RT_ERR_OK)
+             return retVal;
+    }
+
+  if (prf == 0x1)
+     {
+        for(i=0; i<8; i++)
+        {
+         if ((retVal = rtl8367c_getAsicPHYOCPReg(i, 0xb820, &regData)) != RT_ERR_OK)
+             return retVal;
+       tmp = regData & 0xFFEF;
+       if ((retVal = rtl8367c_setAsicPHYOCPReg(i, 0xb820, tmp)) != RT_ERR_OK)
+             return retVal;
+
+       for(counter = 0; counter < 10000; counter++); //delay
+
+       counter = 0;
+       do{
+            counter = counter + 1;
+            if ((retVal = rtl8367c_getAsicPHYOCPReg(i, 0xb800, &regData)) != RT_ERR_OK)
+              return retVal;
+            tmp = regData & 0x40;
+            if( tmp == 0 )
+               break;
+       } while (counter < 20);   //Wait for patch ready = 1...
+      if ((retVal = rtl8367c_setAsicPHYOCPReg(i, 0xb83e, 0x6f48)) != RT_ERR_OK)
+          return retVal;
+      if ((retVal = rtl8367c_setAsicPHYOCPReg(i, 0xb840, 0xfa)) != RT_ERR_OK)
+          return retVal;
+          }
+   }
+
+    /*Check phy link status*/
+    for(i=0; i<8; i++)
+    {
+      if ((retVal = rtl8367c_getAsicPHYOCPReg(i, 0xa400, &regData)) != RT_ERR_OK)
+          return retVal;
+      tmp = regData & 0x800;
+        if(tmp == 0x0)
+            {
+              tmp = regData | 0x200;
+          if ((retVal = rtl8367c_setAsicPHYOCPReg(i, 0xa400, tmp)) != RT_ERR_OK)
+             return retVal;
+            }
+    }
+
+  for(counter = 0; counter < 10000; counter++); //delay
+
+  return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_switch_init_8364b(void)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+
+    /*enable EEE, include mac & phy*/
+
+    if ((retVal = rtl8367c_setAsicRegBits(0x38, 0x300, 3)) != RT_ERR_OK)
+        return retVal;
+    if ((retVal = rtl8367c_setAsicRegBits(0x78, 0x300, 3)) != RT_ERR_OK)
+        return retVal;
+    if ((retVal = rtl8367c_setAsicRegBits(0xd8, 0x300, 0)) != RT_ERR_OK)
+        return retVal;
+    if ((retVal = rtl8367c_setAsicRegBits(0xf8, 0x300, 0)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicPHYOCPReg(1, 0xa5d0, 6)) != RT_ERR_OK)
+        return retVal;
+    if ((retVal = rtl8367c_setAsicPHYOCPReg(3, 0xa5d0, 6)) != RT_ERR_OK)
+        return retVal;
+
+    /*PAD para*/
+
+    /*EXT1 PAD Para*/
+    if ((retVal = rtl8367c_getAsicReg(0x1303, &regData)) != RT_ERR_OK)
+        return retVal;
+    regData &= 0xFFFFFFFE;
+    regData |= 0x250;
+    if((retVal = rtl8367c_setAsicReg(0x1303, regData)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicRegBits(0x1304, 0x7000, 0)) != RT_ERR_OK)
+        return retVal;
+    if ((retVal = rtl8367c_setAsicRegBits(0x1304, 0x700, 7)) != RT_ERR_OK)
+        return retVal;
+    if ((retVal = rtl8367c_setAsicRegBits(0x13f9, 0x38, 0)) != RT_ERR_OK)
+        return retVal;
+
+    /*EXT2 PAD Para*/
+    if ((retVal = rtl8367c_setAsicRegBit(0x1303, 10, 1)) != RT_ERR_OK)
+        return retVal;
+    if ((retVal = rtl8367c_setAsicRegBits(0x13E2, 0x1ff, 0x26)) != RT_ERR_OK)
+        return retVal;
+    if ((retVal = rtl8367c_setAsicRegBits(0x13f9, 0x1c0, 0)) != RT_ERR_OK)
+        return retVal;
+
+
+    /*SDS PATCH*/
+    /*SP_CFG_EN_LINK_FIB1G*/
+    if((retVal = rtl8367c_getAsicSdsReg(0, 4, 0, &regData)) != RT_ERR_OK)
+        return retVal;
+    regData |= 0x4;
+    if((retVal = rtl8367c_setAsicSdsReg(0,4,0, regData)) != RT_ERR_OK)
+        return retVal;
+
+    /*FIB100 Down-speed*/
+    if((retVal = rtl8367c_getAsicSdsReg(0, 1, 0, &regData)) != RT_ERR_OK)
+        return retVal;
+    regData |= 0x20;
+    if((retVal = rtl8367c_setAsicSdsReg(0,1,0, regData)) != RT_ERR_OK)
+        return retVal;
+
+    /*gphy endurance crc patch*/
+    if((retVal = rtl8367c_setAsicPHYSram(1, 0x8016, 0xb00)) != RT_ERR_OK)
+        return retVal;
+    if((retVal = rtl8367c_setAsicPHYSram(3, 0x8016, 0xb00)) != RT_ERR_OK)
+        return retVal;
+    if((retVal = rtl8367c_setAsicPHYSram(1, 0x83a7, 0x160c)) != RT_ERR_OK)
+        return retVal;
+    if((retVal = rtl8367c_setAsicPHYSram(3, 0x83a7, 0x160c)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_switch_init_8363sc_vb(void)
+{
+
+    ret_t retVal;
+    rtk_uint32 regData;
+
+    /*enable EEE, include mac & phy*/
+
+    if ((retVal = rtl8367c_setAsicRegBits(0x38, 0x300, 3)) != RT_ERR_OK)
+        return retVal;
+    if ((retVal = rtl8367c_setAsicRegBits(0x78, 0x300, 3)) != RT_ERR_OK)
+        return retVal;
+    if ((retVal = rtl8367c_setAsicRegBits(0xd8, 0x300, 0)) != RT_ERR_OK)
+        return retVal;
+    if ((retVal = rtl8367c_setAsicRegBits(0xf8, 0x300, 0)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicPHYOCPReg(1, 0xa5d0, 6)) != RT_ERR_OK)
+        return retVal;
+    if ((retVal = rtl8367c_setAsicPHYOCPReg(3, 0xa5d0, 6)) != RT_ERR_OK)
+        return retVal;
+
+    /*PAD para*/
+
+    /*EXT1 PAD Para*/
+    if ((retVal = rtl8367c_getAsicReg(0x1303, &regData)) != RT_ERR_OK)
+        return retVal;
+    regData &= 0xFFFFFFFE;
+    regData |= 0x250;
+    if((retVal = rtl8367c_setAsicReg(0x1303, regData)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicRegBits(0x1304, 0x7000, 0)) != RT_ERR_OK)
+        return retVal;
+    if ((retVal = rtl8367c_setAsicRegBits(0x1304, 0x700, 7)) != RT_ERR_OK)
+        return retVal;
+    if ((retVal = rtl8367c_setAsicRegBits(0x13f9, 0x38, 0)) != RT_ERR_OK)
+        return retVal;
+
+    /*EXT2 PAD Para*/
+    if ((retVal = rtl8367c_setAsicRegBit(0x1303, 10, 1)) != RT_ERR_OK)
+        return retVal;
+    if ((retVal = rtl8367c_setAsicRegBits(0x13E2, 0x1ff, 0x26)) != RT_ERR_OK)
+        return retVal;
+    if ((retVal = rtl8367c_setAsicRegBits(0x13f9, 0x1c0, 0)) != RT_ERR_OK)
+        return retVal;
+
+
+    /*SDS PATCH*/
+    /*SP_CFG_EN_LINK_FIB1G*/
+    if((retVal = rtl8367c_getAsicSdsReg(0, 4, 0, &regData)) != RT_ERR_OK)
+        return retVal;
+    regData |= 0x4;
+    if((retVal = rtl8367c_setAsicSdsReg(0,4,0, regData)) != RT_ERR_OK)
+        return retVal;
+
+    /*FIB100 Down-speed*/
+    if((retVal = rtl8367c_getAsicSdsReg(0, 1, 0, &regData)) != RT_ERR_OK)
+        return retVal;
+    regData |= 0x20;
+    if((retVal = rtl8367c_setAsicSdsReg(0,1,0, regData)) != RT_ERR_OK)
+        return retVal;
+
+    /*gphy endurance crc patch*/
+    if((retVal = rtl8367c_setAsicPHYSram(1, 0x8016, 0xb00)) != RT_ERR_OK)
+        return retVal;
+    if((retVal = rtl8367c_setAsicPHYSram(3, 0x8016, 0xb00)) != RT_ERR_OK)
+        return retVal;
+    if((retVal = rtl8367c_setAsicPHYSram(1, 0x83a7, 0x160c)) != RT_ERR_OK)
+        return retVal;
+    if((retVal = rtl8367c_setAsicPHYSram(3, 0x83a7, 0x160c)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_switch_init(void)
+{
+    rtk_uint32  retVal;
+    rtl8367c_rma_t rmaCfg;
+    switch_chip_t   switchChip;
+
+    /* probe switch */
+    if((retVal = rtk_switch_probe(&switchChip)) != RT_ERR_OK)
+        return retVal;
+
+    /* Set initial state */
+
+    if((retVal = rtk_switch_initialState_set(INIT_COMPLETED)) != RT_ERR_OK)
+        return retVal;
+
+    /* Initial */
+    switch(switchChip)
+    {
+        case CHIP_RTL8367C:
+            if((retVal = _rtk_switch_init_8367c()) != RT_ERR_OK)
+                return retVal;
+            break;
+        case CHIP_RTL8370B:
+            if((retVal = _rtk_switch_init_8370b()) != RT_ERR_OK)
+                return retVal;
+            break;
+        case CHIP_RTL8364B:
+            if((retVal = _rtk_switch_init_8364b()) != RT_ERR_OK)
+                return retVal;
+            break;
+        case CHIP_RTL8363SC_VB:
+            if((retVal = _rtk_switch_init_8363sc_vb()) != RT_ERR_OK)
+                return retVal;
+            break;
+        default:
+            return RT_ERR_CHIP_NOT_FOUND;
+    }
+
+    /* Set Old max packet length to 16K */
+    if((retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_MAX_LENGTH_LIMINT_IPG, RTL8367C_MAX_LENTH_CTRL_MASK, 3)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_MAX_LEN_RX_TX, RTL8367C_MAX_LEN_RX_TX_MASK, 3)) != RT_ERR_OK)
+        return retVal;
+
+    /* ACL Mode */
+    if((retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_ACL_ACCESS_MODE, RTL8367C_ACL_ACCESS_MODE_MASK, 1)) != RT_ERR_OK)
+        return retVal;
+
+    /* Max rate */
+    if((retVal = rtl8367c_setAsicPortIngressBandwidth(rtk_switch_port_L2P_get(halCtrl->hsg_logical_port), RTL8367C_QOS_RATE_INPUT_MAX_HSG>>3, DISABLED, ENABLED)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicPortEgressRate(rtk_switch_port_L2P_get(halCtrl->hsg_logical_port), RTL8367C_QOS_RATE_INPUT_MAX_HSG>>3)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicPortEgressRateIfg(ENABLED)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicReg(0x03fa, 0x0007)) != RT_ERR_OK)
+        return retVal;
+
+    /* Change unknown DA to per port setting */
+    if((retVal = rtl8367c_setAsicRegBits(RTL8367C_PORT_SECURIT_CTRL_REG, RTL8367C_UNKNOWN_UNICAST_DA_BEHAVE_MASK, 3)) != RT_ERR_OK)
+        return retVal;
+
+    /* LUT lookup OP = 1 */
+    if ((retVal = rtl8367c_setAsicLutIpLookupMethod(1))!=RT_ERR_OK)
+        return retVal;
+
+    /* Set RMA */
+    rmaCfg.portiso_leaky = 0;
+    rmaCfg.vlan_leaky = 0;
+    rmaCfg.keep_format = 0;
+    rmaCfg.trap_priority = 0;
+    rmaCfg.discard_storm_filter = 0;
+    rmaCfg.operation = 0;
+    if ((retVal = rtl8367c_setAsicRma(2, &rmaCfg))!=RT_ERR_OK)
+        return retVal;
+
+    /* Enable TX Mirror isolation leaky */
+    if ((retVal = rtl8367c_setAsicPortMirrorIsolationTxLeaky(ENABLED)) != RT_ERR_OK)
+        return retVal;
+
+    /* INT EN */
+    if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_IO_MISC_FUNC, RTL8367C_INT_EN_OFFSET, 1)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_switch_portMaxPktLen_set(rtk_port_t port, rtk_switch_maxPktLen_linkSpeed_t speed, rtk_uint32 cfgId)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(speed >= MAXPKTLEN_LINK_SPEED_END)
+        return RT_ERR_INPUT;
+
+    if(cfgId > MAXPKTLEN_CFG_ID_MAX)
+        return RT_ERR_INPUT;
+
+    if((retVal = rtl8367c_setAsicMaxLength(rtk_switch_port_L2P_get(port), (rtk_uint32)speed, cfgId)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_switch_portMaxPktLen_get(rtk_port_t port, rtk_switch_maxPktLen_linkSpeed_t speed, rtk_uint32 *pCfgId)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(speed >= MAXPKTLEN_LINK_SPEED_END)
+        return RT_ERR_INPUT;
+
+    if(NULL == pCfgId)
+        return RT_ERR_NULL_POINTER;
+
+    if((retVal = rtl8367c_getAsicMaxLength(rtk_switch_port_L2P_get(port), (rtk_uint32)speed, pCfgId)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_switch_maxPktLenCfg_set(rtk_uint32 cfgId, rtk_uint32 pktLen)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(cfgId > MAXPKTLEN_CFG_ID_MAX)
+        return RT_ERR_INPUT;
+
+    if(pktLen > RTK_SWITCH_MAX_PKTLEN)
+        return RT_ERR_INPUT;
+
+    if((retVal = rtl8367c_setAsicMaxLengthCfg(cfgId, pktLen)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_switch_maxPktLenCfg_get(rtk_uint32 cfgId, rtk_uint32 *pPktLen)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(cfgId > MAXPKTLEN_CFG_ID_MAX)
+        return RT_ERR_INPUT;
+
+    if(NULL == pPktLen)
+        return RT_ERR_NULL_POINTER;
+
+    if((retVal = rtl8367c_getAsicMaxLengthCfg(cfgId, pPktLen)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_switch_greenEthernet_set(rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 port;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    RTK_SCAN_ALL_LOG_PORT(port)
+    {
+        if(rtk_switch_isUtpPort(port) == RT_ERR_OK)
+        {
+            if ((retVal = rtl8367c_setAsicPowerSaving(rtk_switch_port_L2P_get(port),enable))!=RT_ERR_OK)
+                return retVal;
+
+            if ((retVal = rtl8367c_setAsicGreenEthernet(rtk_switch_port_L2P_get(port), enable))!=RT_ERR_OK)
+                return retVal;
+        }
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_switch_greenEthernet_get(rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 port;
+    rtk_uint32 state;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    RTK_SCAN_ALL_LOG_PORT(port)
+    {
+        if(rtk_switch_isUtpPort(port) == RT_ERR_OK)
+        {
+            if ((retVal = rtl8367c_getAsicPowerSaving(rtk_switch_port_L2P_get(port), &state))!=RT_ERR_OK)
+                return retVal;
+
+            if(state == DISABLED)
+            {
+                *pEnable = DISABLED;
+                return RT_ERR_OK;
+            }
+
+            if ((retVal = rtl8367c_getAsicGreenEthernet(rtk_switch_port_L2P_get(port), &state))!=RT_ERR_OK)
+                return retVal;
+
+            if(state == DISABLED)
+            {
+                *pEnable = DISABLED;
+                return RT_ERR_OK;
+            }
+        }
+    }
+
+    *pEnable = ENABLED;
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtk_switch_probe
+ * Description:
+ *      Probe switch
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Switch probed
+ *      RT_ERR_FAILED   - Switch Unprobed.
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_switch_probe(switch_chip_t *pSwitchChip)
+{
+#if defined(FORCE_PROBE_RTL8367C)
+
+    *pSwitchChip = CHIP_RTL8367C;
+    halCtrl = &rtl8367c_hal_Ctrl;
+
+#elif defined(FORCE_PROBE_RTL8370B)
+
+    *pSwitchChip = CHIP_RTL8370B;
+    halCtrl = &rtl8370b_hal_Ctrl;
+
+#elif defined(FORCE_PROBE_RTL8364B)
+
+    *pSwitchChip = CHIP_RTL8364B;
+    halCtrl = &rtl8364b_hal_Ctrl;
+
+#elif defined(FORCE_PROBE_RTL8363SC_VB)
+
+    *pSwitchChip = CHIP_RTL8363SC_VB;
+    halCtrl = &rtl8363sc_vb_hal_Ctrl;
+
+#else
+    rtk_uint32 retVal;
+    rtk_uint32 data, regValue;
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0249)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_getAsicReg(0x1300, &data)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_getAsicReg(0x1301, &regValue)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0000)) != RT_ERR_OK)
+        return retVal;
+
+    switch (data)
+    {
+        case 0x0276:
+        case 0x0597:
+        case 0x6367:
+            *pSwitchChip = CHIP_RTL8367C;
+            halCtrl = &rtl8367c_hal_Ctrl;
+            break;
+        case 0x0652:
+        case 0x6368:
+            *pSwitchChip = CHIP_RTL8370B;
+            halCtrl = &rtl8370b_hal_Ctrl;
+            break;
+        case 0x0801:
+        case 0x6511:
+            if( (regValue & 0x00F0) == 0x0080)
+            {
+                *pSwitchChip = CHIP_RTL8363SC_VB;
+                halCtrl = &rtl8363sc_vb_hal_Ctrl;
+            }
+            else
+            {
+                *pSwitchChip = CHIP_RTL8364B;
+                halCtrl = &rtl8364b_hal_Ctrl;
+            }
+            break;
+        default:
+            return RT_ERR_FAILED;
+    }
+#endif
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtk_switch_initialState_set
+ * Description:
+ *      Set initial status
+ * Input:
+ *      state   - Initial state;
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Initialized
+ *      RT_ERR_FAILED   - Uninitialized
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_switch_initialState_set(init_state_t state)
+{
+    if(state >= INIT_STATE_END)
+        return RT_ERR_FAILED;
+
+    init_state = state;
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtk_switch_initialState_get
+ * Description:
+ *      Get initial status
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      INIT_COMPLETED     - Initialized
+ *      INIT_NOT_COMPLETED - Uninitialized
+ * Note:
+ *
+ */
+init_state_t rtk_switch_initialState_get(void)
+{
+    return init_state;
+}
+
+/* Function Name:
+ *      rtk_switch_logicalPortCheck
+ * Description:
+ *      Check logical port ID.
+ * Input:
+ *      logicalPort     - logical port ID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Port ID is correct
+ *      RT_ERR_FAILED   - Port ID is not correct
+ *      RT_ERR_NOT_INIT - Not Initialize
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_switch_logicalPortCheck(rtk_port_t logicalPort)
+{
+    if(init_state != INIT_COMPLETED)
+        return RT_ERR_NOT_INIT;
+
+    if(logicalPort >= RTK_SWITCH_PORT_NUM)
+        return RT_ERR_FAILED;
+
+    if(halCtrl->l2p_port[logicalPort] == 0xFF)
+        return RT_ERR_FAILED;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtk_switch_isUtpPort
+ * Description:
+ *      Check is logical port a UTP port
+ * Input:
+ *      logicalPort     - logical port ID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Port ID is a UTP port
+ *      RT_ERR_FAILED   - Port ID is not a UTP port
+ *      RT_ERR_NOT_INIT - Not Initialize
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_switch_isUtpPort(rtk_port_t logicalPort)
+{
+    if(init_state != INIT_COMPLETED)
+        return RT_ERR_NOT_INIT;
+
+    if(logicalPort >= RTK_SWITCH_PORT_NUM)
+        return RT_ERR_FAILED;
+
+    if(halCtrl->log_port_type[logicalPort] == UTP_PORT)
+        return RT_ERR_OK;
+    else
+        return RT_ERR_FAILED;
+}
+
+/* Function Name:
+ *      rtk_switch_isExtPort
+ * Description:
+ *      Check is logical port a Extension port
+ * Input:
+ *      logicalPort     - logical port ID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Port ID is a EXT port
+ *      RT_ERR_FAILED   - Port ID is not a EXT port
+ *      RT_ERR_NOT_INIT - Not Initialize
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_switch_isExtPort(rtk_port_t logicalPort)
+{
+    if(init_state != INIT_COMPLETED)
+        return RT_ERR_NOT_INIT;
+
+    if(logicalPort >= RTK_SWITCH_PORT_NUM)
+        return RT_ERR_FAILED;
+
+    if(halCtrl->log_port_type[logicalPort] == EXT_PORT)
+        return RT_ERR_OK;
+    else
+        return RT_ERR_FAILED;
+}
+
+
+/* Function Name:
+ *      rtk_switch_isHsgPort
+ * Description:
+ *      Check is logical port a HSG port
+ * Input:
+ *      logicalPort     - logical port ID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Port ID is a HSG port
+ *      RT_ERR_FAILED   - Port ID is not a HSG port
+ *      RT_ERR_NOT_INIT - Not Initialize
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_switch_isHsgPort(rtk_port_t logicalPort)
+{
+    if(init_state != INIT_COMPLETED)
+        return RT_ERR_NOT_INIT;
+
+    if(logicalPort >= RTK_SWITCH_PORT_NUM)
+        return RT_ERR_FAILED;
+
+    if(logicalPort == halCtrl->hsg_logical_port)
+        return RT_ERR_OK;
+    else
+        return RT_ERR_FAILED;
+}
+
+/* Function Name:
+ *      rtk_switch_isSgmiiPort
+ * Description:
+ *      Check is logical port a SGMII port
+ * Input:
+ *      logicalPort     - logical port ID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Port ID is a SGMII port
+ *      RT_ERR_FAILED   - Port ID is not a SGMII port
+ *      RT_ERR_NOT_INIT - Not Initialize
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_switch_isSgmiiPort(rtk_port_t logicalPort)
+{
+    if(init_state != INIT_COMPLETED)
+        return RT_ERR_NOT_INIT;
+
+    if(logicalPort >= RTK_SWITCH_PORT_NUM)
+        return RT_ERR_FAILED;
+
+    if( ((0x01 << logicalPort) & halCtrl->sg_logical_portmask) != 0)
+        return RT_ERR_OK;
+    else
+        return RT_ERR_FAILED;
+}
+
+/* Function Name:
+ *      rtk_switch_isCPUPort
+ * Description:
+ *      Check is logical port a CPU port
+ * Input:
+ *      logicalPort     - logical port ID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Port ID is a CPU port
+ *      RT_ERR_FAILED   - Port ID is not a CPU port
+ *      RT_ERR_NOT_INIT - Not Initialize
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_switch_isCPUPort(rtk_port_t logicalPort)
+{
+    if(init_state != INIT_COMPLETED)
+        return RT_ERR_NOT_INIT;
+
+    if(logicalPort >= RTK_SWITCH_PORT_NUM)
+        return RT_ERR_FAILED;
+
+    if( ((0x01 << logicalPort) & halCtrl->valid_cpu_portmask) != 0)
+        return RT_ERR_OK;
+    else
+        return RT_ERR_FAILED;
+}
+
+/* Function Name:
+ *      rtk_switch_isComboPort
+ * Description:
+ *      Check is logical port a Combo port
+ * Input:
+ *      logicalPort     - logical port ID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Port ID is a combo port
+ *      RT_ERR_FAILED   - Port ID is not a combo port
+ *      RT_ERR_NOT_INIT - Not Initialize
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_switch_isComboPort(rtk_port_t logicalPort)
+{
+    if(init_state != INIT_COMPLETED)
+        return RT_ERR_NOT_INIT;
+
+    if(logicalPort >= RTK_SWITCH_PORT_NUM)
+        return RT_ERR_FAILED;
+
+    if(halCtrl->combo_logical_port == logicalPort)
+        return RT_ERR_OK;
+    else
+        return RT_ERR_FAILED;
+}
+
+/* Function Name:
+ *      rtk_switch_ComboPort_get
+ * Description:
+ *      Get Combo port ID
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      Port ID of combo port
+ * Note:
+ *
+ */
+rtk_uint32 rtk_switch_ComboPort_get(void)
+{
+    return halCtrl->combo_logical_port;
+}
+
+/* Function Name:
+ *      rtk_switch_isPtpPort
+ * Description:
+ *      Check is logical port a PTP port
+ * Input:
+ *      logicalPort     - logical port ID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Port ID is a PTP port
+ *      RT_ERR_FAILED   - Port ID is not a PTP port
+ *      RT_ERR_NOT_INIT - Not Initialize
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_switch_isPtpPort(rtk_port_t logicalPort)
+{
+    if(init_state != INIT_COMPLETED)
+        return RT_ERR_NOT_INIT;
+
+    if(logicalPort >= RTK_SWITCH_PORT_NUM)
+        return RT_ERR_FAILED;
+
+    if(halCtrl->ptp_port[logicalPort] == 1)
+        return RT_ERR_OK;
+    else
+        return RT_ERR_FAILED;
+}
+
+/* Function Name:
+ *      rtk_switch_port_L2P_get
+ * Description:
+ *      Get physical port ID
+ * Input:
+ *      logicalPort       - logical port ID
+ * Output:
+ *      None
+ * Return:
+ *      Physical port ID
+ * Note:
+ *
+ */
+rtk_uint32 rtk_switch_port_L2P_get(rtk_port_t logicalPort)
+{
+    if(init_state != INIT_COMPLETED)
+        return UNDEFINE_PHY_PORT;
+
+    if(logicalPort >= RTK_SWITCH_PORT_NUM)
+        return UNDEFINE_PHY_PORT;
+
+    return (halCtrl->l2p_port[logicalPort]);
+}
+
+/* Function Name:
+ *      rtk_switch_port_P2L_get
+ * Description:
+ *      Get logical port ID
+ * Input:
+ *      physicalPort       - physical port ID
+ * Output:
+ *      None
+ * Return:
+ *      logical port ID
+ * Note:
+ *
+ */
+rtk_port_t rtk_switch_port_P2L_get(rtk_uint32 physicalPort)
+{
+    if(init_state != INIT_COMPLETED)
+        return UNDEFINE_PORT;
+
+    if(physicalPort >= RTK_SWITCH_PORT_NUM)
+        return UNDEFINE_PORT;
+
+    return (halCtrl->p2l_port[physicalPort]);
+}
+
+/* Function Name:
+ *      rtk_switch_isPortMaskValid
+ * Description:
+ *      Check portmask is valid or not
+ * Input:
+ *      pPmask       - logical port mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - port mask is valid
+ *      RT_ERR_FAILED       - port mask is not valid
+ *      RT_ERR_NOT_INIT     - Not Initialize
+ *      RT_ERR_NULL_POINTER - Null pointer
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_switch_isPortMaskValid(rtk_portmask_t *pPmask)
+{
+    if(init_state != INIT_COMPLETED)
+        return RT_ERR_NOT_INIT;
+
+    if(NULL == pPmask)
+        return RT_ERR_NULL_POINTER;
+
+    if( (pPmask->bits[0] | halCtrl->valid_portmask) != halCtrl->valid_portmask )
+        return RT_ERR_FAILED;
+    else
+        return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtk_switch_isPortMaskUtp
+ * Description:
+ *      Check all ports in portmask are only UTP port
+ * Input:
+ *      pPmask       - logical port mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Only UTP port in port mask
+ *      RT_ERR_FAILED       - Not only UTP port in port mask
+ *      RT_ERR_NOT_INIT     - Not Initialize
+ *      RT_ERR_NULL_POINTER - Null pointer
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_switch_isPortMaskUtp(rtk_portmask_t *pPmask)
+{
+    if(init_state != INIT_COMPLETED)
+        return RT_ERR_NOT_INIT;
+
+    if(NULL == pPmask)
+        return RT_ERR_NULL_POINTER;
+
+    if( (pPmask->bits[0] | halCtrl->valid_utp_portmask) != halCtrl->valid_utp_portmask )
+        return RT_ERR_FAILED;
+    else
+        return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtk_switch_isPortMaskExt
+ * Description:
+ *      Check all ports in portmask are only EXT port
+ * Input:
+ *      pPmask       - logical port mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Only EXT port in port mask
+ *      RT_ERR_FAILED       - Not only EXT port in port mask
+ *      RT_ERR_NOT_INIT     - Not Initialize
+ *      RT_ERR_NULL_POINTER - Null pointer
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_switch_isPortMaskExt(rtk_portmask_t *pPmask)
+{
+    if(init_state != INIT_COMPLETED)
+        return RT_ERR_NOT_INIT;
+
+    if(NULL == pPmask)
+        return RT_ERR_NULL_POINTER;
+
+    if( (pPmask->bits[0] | halCtrl->valid_ext_portmask) != halCtrl->valid_ext_portmask )
+        return RT_ERR_FAILED;
+    else
+        return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtk_switch_portmask_L2P_get
+ * Description:
+ *      Get physicl portmask from logical portmask
+ * Input:
+ *      pLogicalPmask       - logical port mask
+ * Output:
+ *      pPhysicalPortmask   - physical port mask
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_NOT_INIT     - Not Initialize
+ *      RT_ERR_NULL_POINTER - Null pointer
+ *      RT_ERR_PORT_MASK    - Error port mask
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_switch_portmask_L2P_get(rtk_portmask_t *pLogicalPmask, rtk_uint32 *pPhysicalPortmask)
+{
+    rtk_uint32 log_port, phy_port;
+
+    if(init_state != INIT_COMPLETED)
+        return RT_ERR_NOT_INIT;
+
+    if(NULL == pLogicalPmask)
+        return RT_ERR_NULL_POINTER;
+
+    if(NULL == pPhysicalPortmask)
+        return RT_ERR_NULL_POINTER;
+
+    if(rtk_switch_isPortMaskValid(pLogicalPmask) != RT_ERR_OK)
+        return RT_ERR_PORT_MASK;
+
+    /* reset physical port mask */
+    *pPhysicalPortmask = 0;
+
+    RTK_PORTMASK_SCAN((*pLogicalPmask), log_port)
+    {
+        phy_port = rtk_switch_port_L2P_get((rtk_port_t)log_port);
+        *pPhysicalPortmask |= (0x0001 << phy_port);
+    }
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtk_switch_portmask_P2L_get
+ * Description:
+ *      Get logical portmask from physical portmask
+ * Input:
+ *      physicalPortmask    - physical port mask
+ * Output:
+ *      pLogicalPmask       - logical port mask
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_NOT_INIT     - Not Initialize
+ *      RT_ERR_NULL_POINTER - Null pointer
+ *      RT_ERR_PORT_MASK    - Error port mask
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_switch_portmask_P2L_get(rtk_uint32 physicalPortmask, rtk_portmask_t *pLogicalPmask)
+{
+    rtk_uint32 log_port, phy_port;
+
+    if(init_state != INIT_COMPLETED)
+        return RT_ERR_NOT_INIT;
+
+    if(NULL == pLogicalPmask)
+        return RT_ERR_NULL_POINTER;
+
+    RTK_PORTMASK_CLEAR(*pLogicalPmask);
+
+    for(phy_port = halCtrl->min_phy_port; phy_port <= halCtrl->max_phy_port; phy_port++)
+    {
+        if(physicalPortmask & (0x0001 << phy_port))
+        {
+            log_port = rtk_switch_port_P2L_get(phy_port);
+            if(log_port != UNDEFINE_PORT)
+            {
+                RTK_PORTMASK_PORT_SET(*pLogicalPmask, log_port);
+            }
+        }
+    }
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtk_switch_phyPortMask_get
+ * Description:
+ *      Get physical portmask
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      0x00                - Not Initialize
+ *      Other value         - Physical port mask
+ * Note:
+ *
+ */
+rtk_uint32 rtk_switch_phyPortMask_get(void)
+{
+    if(init_state != INIT_COMPLETED)
+        return 0x00; /* No port in portmask */
+
+    return (halCtrl->phy_portmask);
+}
+
+/* Function Name:
+ *      rtk_switch_logPortMask_get
+ * Description:
+ *      Get Logical portmask
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_NOT_INIT     - Not Initialize
+ *      RT_ERR_NULL_POINTER - Null pointer
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_switch_logPortMask_get(rtk_portmask_t *pPortmask)
+{
+    if(init_state != INIT_COMPLETED)
+        return RT_ERR_FAILED;
+
+    if(NULL == pPortmask)
+        return RT_ERR_NULL_POINTER;
+
+    pPortmask->bits[0] = halCtrl->valid_portmask;
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtk_switch_init
+ * Description:
+ *      Set chip to default configuration enviroment
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      The API can set chip registers to default configuration for different release chip model.
+ */
+rtk_api_ret_t rtk_switch_init(void)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_switch_init();
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_switch_portMaxPktLen_set
+ * Description:
+ *      Set Max packet length
+ * Input:
+ *      port    - Port ID
+ *      speed   - Speed
+ *      cfgId   - Configuration ID
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Error Input
+ * Note:
+ */
+rtk_api_ret_t rtk_switch_portMaxPktLen_set(rtk_port_t port, rtk_switch_maxPktLen_linkSpeed_t speed, rtk_uint32 cfgId)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_switch_portMaxPktLen_set(port, speed, cfgId);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_switch_portMaxPktLen_get
+ * Description:
+ *      Get Max packet length
+ * Input:
+ *      port    - Port ID
+ *      speed   - Speed
+ * Output:
+ *      pCfgId  - Configuration ID
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Error Input
+ * Note:
+ */
+rtk_api_ret_t rtk_switch_portMaxPktLen_get(rtk_port_t port, rtk_switch_maxPktLen_linkSpeed_t speed, rtk_uint32 *pCfgId)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_switch_portMaxPktLen_get(port, speed, pCfgId);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_switch_maxPktLenCfg_set
+ * Description:
+ *      Set Max packet length configuration
+ * Input:
+ *      cfgId   - Configuration ID
+ *      pktLen  - Max packet length
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Error Input
+ * Note:
+ */
+rtk_api_ret_t rtk_switch_maxPktLenCfg_set(rtk_uint32 cfgId, rtk_uint32 pktLen)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_switch_maxPktLenCfg_set(cfgId, pktLen);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_switch_maxPktLenCfg_get
+ * Description:
+ *      Get Max packet length configuration
+ * Input:
+ *      cfgId   - Configuration ID
+ *      pPktLen - Max packet length
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Error Input
+ * Note:
+ */
+rtk_api_ret_t rtk_switch_maxPktLenCfg_get(rtk_uint32 cfgId, rtk_uint32 *pPktLen)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_switch_maxPktLenCfg_get(cfgId, pPktLen);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_switch_greenEthernet_set
+ * Description:
+ *      Set all Ports Green Ethernet state.
+ * Input:
+ *      enable - Green Ethernet state.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - OK
+ *      RT_ERR_FAILED   - Failed
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_ENABLE   - Invalid enable input.
+ * Note:
+ *      This API can set all Ports Green Ethernet state.
+ *      The configuration is as following:
+ *      - DISABLE
+ *      - ENABLE
+ */
+rtk_api_ret_t rtk_switch_greenEthernet_set(rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_switch_greenEthernet_set(enable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_switch_greenEthernet_get
+ * Description:
+ *      Get all Ports Green Ethernet state.
+ * Input:
+ *      None
+ * Output:
+ *      pEnable - Green Ethernet state.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ *      This API can get Green Ethernet state.
+ */
+rtk_api_ret_t rtk_switch_greenEthernet_get(rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_switch_greenEthernet_get(pEnable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_switch_maxLogicalPort_get
+ * Description:
+ *      Get Max logical port ID
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      Max logical port
+ * Note:
+ *      This API can get max logical port
+ */
+rtk_port_t rtk_switch_maxLogicalPort_get(void)
+{
+    rtk_port_t port, maxLogicalPort = 0;
+
+    /* Check initialization state */
+    if(rtk_switch_initialState_get() != INIT_COMPLETED)
+    {
+        return UNDEFINE_PORT;
+    }
+
+    for(port = 0; port < RTK_SWITCH_PORT_NUM; port++)
+    {
+        if( (halCtrl->log_port_type[port] == UTP_PORT) || (halCtrl->log_port_type[port] == EXT_PORT) )
+            maxLogicalPort = port;
+    }
+
+    return maxLogicalPort;
+}
+
+/* Function Name:
+ *      rtk_switch_maxMeterId_get
+ * Description:
+ *      Get Max Meter ID
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      0x00                - Not Initialize
+ *      Other value         - Max Meter ID
+ * Note:
+ *
+ */
+rtk_uint32 rtk_switch_maxMeterId_get(void)
+{
+    if(init_state != INIT_COMPLETED)
+        return 0x00;
+
+    return (halCtrl->max_meter_id);
+}
+
+/* Function Name:
+ *      rtk_switch_maxLutAddrNumber_get
+ * Description:
+ *      Get Max LUT Address number
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      0x00                - Not Initialize
+ *      Other value         - Max LUT Address number
+ * Note:
+ *
+ */
+rtk_uint32 rtk_switch_maxLutAddrNumber_get(void)
+{
+    if(init_state != INIT_COMPLETED)
+        return 0x00;
+
+    return (halCtrl->max_lut_addr_num);
+}
+
+/* Function Name:
+ *      rtk_switch_isValidTrunkGrpId
+ * Description:
+ *      Check if trunk group is valid or not
+ * Input:
+ *      grpId       - Group ID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Trunk Group ID is valid
+ *      RT_ERR_LA_TRUNK_ID  - Trunk Group ID is not valid
+ * Note:
+ *
+ */
+rtk_uint32 rtk_switch_isValidTrunkGrpId(rtk_uint32 grpId)
+{
+    if(init_state != INIT_COMPLETED)
+        return 0x00;
+
+    if( (halCtrl->trunk_group_mask & (0x01 << grpId) ) != 0)
+        return RT_ERR_OK;
+    else
+        return RT_ERR_LA_TRUNK_ID;
+
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtk_switch.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtk_switch.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtk_switch.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtk_switch.h	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,746 @@
+/*
+ * Copyright(c) Realtek Semiconductor Corporation, 2008
+ * All rights reserved.
+ *
+ * $Revision: 79921 $
+ * $Date: 2017-06-21 16:32:10 +0800 (é€±ä¸‰, 21 å…­æœˆ 2017) $
+ *
+ * Purpose : Definition function prototype of RTK switch API.
+ *
+ * Feature : Function prototype definition
+ *
+ */
+
+#ifndef __RTK_SWITCH_H__
+#define __RTK_SWITCH_H__
+
+#include <rtk_types.h>
+
+#if defined(RTK_X86_CLE)
+#include <pthread.h>
+#endif
+
+#define UNDEFINE_PHY_PORT   (0xFF)
+#define RTK_SWITCH_PORT_NUM (32)
+
+#define MAXPKTLEN_CFG_ID_MAX (1)
+
+#define RTK_SWITCH_MAX_PKTLEN (0x3FFF)
+
+#if defined(RTK_X86_CLE)
+extern pthread_mutex_t api_mutex;
+#define RTK_API_LOCK()      pthread_mutex_lock(&api_mutex)
+#define RTK_API_UNLOCK()    pthread_mutex_unlock(&api_mutex)
+#else
+#define RTK_API_LOCK()
+#define RTK_API_UNLOCK()
+#endif
+
+typedef enum init_state_e
+{
+    INIT_NOT_COMPLETED = 0,
+    INIT_COMPLETED,
+    INIT_STATE_END
+} init_state_t;
+
+typedef enum switch_chip_e
+{
+    CHIP_RTL8367C = 0,
+    CHIP_RTL8370B,
+    CHIP_RTL8364B,
+    CHIP_RTL8363SC_VB,
+    CHIP_END
+}switch_chip_t;
+
+typedef enum port_type_e
+{
+    UTP_PORT = 0,
+    EXT_PORT,
+    UNKNOWN_PORT = 0xFF,
+    PORT_TYPE_END
+}port_type_t;
+
+typedef struct rtk_switch_halCtrl_s
+{
+    switch_chip_t   switch_type;
+    rtk_uint32      l2p_port[RTK_SWITCH_PORT_NUM];
+    rtk_uint32      p2l_port[RTK_SWITCH_PORT_NUM];
+    port_type_t     log_port_type[RTK_SWITCH_PORT_NUM];
+    rtk_uint32      ptp_port[RTK_SWITCH_PORT_NUM];
+    rtk_uint32      valid_portmask;
+    rtk_uint32      valid_utp_portmask;
+    rtk_uint32      valid_ext_portmask;
+    rtk_uint32      valid_cpu_portmask;
+    rtk_uint32      min_phy_port;
+    rtk_uint32      max_phy_port;
+    rtk_uint32      phy_portmask;
+    rtk_uint32      combo_logical_port;
+    rtk_uint32      hsg_logical_port;
+    rtk_uint32      sg_logical_portmask;
+    rtk_uint32      max_meter_id;
+    rtk_uint32      max_lut_addr_num;
+    rtk_uint32      trunk_group_mask;
+
+}rtk_switch_halCtrl_t;
+
+typedef enum rtk_switch_maxPktLen_linkSpeed_e {
+     MAXPKTLEN_LINK_SPEED_FE = 0,
+     MAXPKTLEN_LINK_SPEED_GE,
+     MAXPKTLEN_LINK_SPEED_END,
+} rtk_switch_maxPktLen_linkSpeed_t;
+
+
+/* UTIL MACRO */
+#define RTK_CHK_INIT_STATE()                                \
+    do                                                      \
+    {                                                       \
+        if(rtk_switch_initialState_get() != INIT_COMPLETED) \
+        {                                                   \
+            return RT_ERR_NOT_INIT;                         \
+        }                                                   \
+    }while(0)
+
+#define RTK_CHK_PORT_VALID(__port__)                            \
+    do                                                          \
+    {                                                           \
+        if(rtk_switch_logicalPortCheck(__port__) != RT_ERR_OK)  \
+        {                                                       \
+            return RT_ERR_PORT_ID;                              \
+        }                                                       \
+    }while(0)
+
+#define RTK_CHK_PORT_IS_UTP(__port__)                           \
+    do                                                          \
+    {                                                           \
+        if(rtk_switch_isUtpPort(__port__) != RT_ERR_OK)         \
+        {                                                       \
+            return RT_ERR_PORT_ID;                              \
+        }                                                       \
+    }while(0)
+
+#define RTK_CHK_PORT_IS_EXT(__port__)                           \
+    do                                                          \
+    {                                                           \
+        if(rtk_switch_isExtPort(__port__) != RT_ERR_OK)         \
+        {                                                       \
+            return RT_ERR_PORT_ID;                              \
+        }                                                       \
+    }while(0)
+
+#define RTK_CHK_PORT_IS_COMBO(__port__)                         \
+    do                                                          \
+    {                                                           \
+        if(rtk_switch_isComboPort(__port__) != RT_ERR_OK)       \
+        {                                                       \
+            return RT_ERR_PORT_ID;                              \
+        }                                                       \
+    }while(0)
+
+#define RTK_CHK_PORT_IS_PTP(__port__)                           \
+    do                                                          \
+    {                                                           \
+        if(rtk_switch_isPtpPort(__port__) != RT_ERR_OK)         \
+        {                                                       \
+            return RT_ERR_PORT_ID;                              \
+        }                                                       \
+    }while(0)
+
+#define RTK_CHK_PORTMASK_VALID(__portmask__)                        \
+    do                                                              \
+    {                                                               \
+        if(rtk_switch_isPortMaskValid(__portmask__) != RT_ERR_OK)   \
+        {                                                           \
+            return RT_ERR_PORT_MASK;                                \
+        }                                                           \
+    }while(0)
+
+#define RTK_CHK_PORTMASK_VALID_ONLY_UTP(__portmask__)               \
+    do                                                              \
+    {                                                               \
+        if(rtk_switch_isPortMaskUtp(__portmask__) != RT_ERR_OK)     \
+        {                                                           \
+            return RT_ERR_PORT_MASK;                                \
+        }                                                           \
+    }while(0)
+
+#define RTK_CHK_PORTMASK_VALID_ONLY_EXT(__portmask__)               \
+    do                                                              \
+    {                                                               \
+        if(rtk_switch_isPortMaskExt(__portmask__) != RT_ERR_OK)     \
+        {                                                           \
+            return RT_ERR_PORT_MASK;                                \
+        }                                                           \
+    }while(0)
+
+#define RTK_CHK_TRUNK_GROUP_VALID(__grpId__)                        \
+    do                                                              \
+    {                                                               \
+        if(rtk_switch_isValidTrunkGrpId(__grpId__) != RT_ERR_OK)    \
+        {                                                           \
+            return RT_ERR_LA_TRUNK_ID;                              \
+        }                                                           \
+    }while(0)
+
+#define RTK_PORTMASK_IS_PORT_SET(__portmask__, __port__)    (((__portmask__).bits[0] & (0x00000001 << __port__)) ? 1 : 0)
+#define RTK_PORTMASK_IS_EMPTY(__portmask__)                 (((__portmask__).bits[0] == 0) ? 1 : 0)
+#define RTK_PORTMASK_CLEAR(__portmask__)                    ((__portmask__).bits[0] = 0)
+#define RTK_PORTMASK_PORT_SET(__portmask__, __port__)       ((__portmask__).bits[0] |= (0x00000001 << __port__))
+#define RTK_PORTMASK_PORT_CLEAR(__portmask__, __port__)     ((__portmask__).bits[0] &= ~(0x00000001 << __port__))
+#define RTK_PORTMASK_ALLPORT_SET(__portmask__)              (rtk_switch_logPortMask_get(&__portmask__))
+#define RTK_PORTMASK_SCAN(__portmask__, __port__)           for(__port__ = 0; __port__ < RTK_SWITCH_PORT_NUM; __port__++)  if(RTK_PORTMASK_IS_PORT_SET(__portmask__, __port__))
+#define RTK_PORTMASK_COMPARE(__portmask_A__, __portmask_B__)    ((__portmask_A__).bits[0] - (__portmask_B__).bits[0])
+
+#define RTK_SCAN_ALL_PHY_PORTMASK(__port__)                 for(__port__ = 0; __port__ < RTK_SWITCH_PORT_NUM; __port__++)  if( (rtk_switch_phyPortMask_get() & (0x00000001 << __port__)))
+#define RTK_SCAN_ALL_LOG_PORT(__port__)                     for(__port__ = 0; __port__ < RTK_SWITCH_PORT_NUM; __port__++)  if( rtk_switch_logicalPortCheck(__port__) == RT_ERR_OK)
+#define RTK_SCAN_ALL_LOG_PORTMASK(__portmask__)             for((__portmask__).bits[0] = 0; (__portmask__).bits[0] < 0x7FFFF; (__portmask__).bits[0]++)  if( rtk_switch_isPortMaskValid(&__portmask__) == RT_ERR_OK)
+
+/* Port mask defination */
+#define RTK_PHY_PORTMASK_ALL                                (rtk_switch_phyPortMask_get())
+
+/* Port defination*/
+#define RTK_MAX_LOGICAL_PORT_ID                             (rtk_switch_maxLogicalPort_get())
+
+/* Function Name:
+ *      rtk_switch_probe
+ * Description:
+ *      Probe switch
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Switch probed
+ *      RT_ERR_FAILED   - Switch Unprobed.
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_switch_probe(switch_chip_t *pSwitchChip);
+
+/* Function Name:
+ *      rtk_switch_initialState_set
+ * Description:
+ *      Set initial status
+ * Input:
+ *      state   - Initial state;
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Initialized
+ *      RT_ERR_FAILED   - Uninitialized
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_switch_initialState_set(init_state_t state);
+
+/* Function Name:
+ *      rtk_switch_initialState_get
+ * Description:
+ *      Get initial status
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      INIT_COMPLETED     - Initialized
+ *      INIT_NOT_COMPLETED - Uninitialized
+ * Note:
+ *
+ */
+extern init_state_t rtk_switch_initialState_get(void);
+
+/* Function Name:
+ *      rtk_switch_logicalPortCheck
+ * Description:
+ *      Check logical port ID.
+ * Input:
+ *      logicalPort     - logical port ID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Port ID is correct
+ *      RT_ERR_FAILED   - Port ID is not correct
+ *      RT_ERR_NOT_INIT - Not Initialize
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_switch_logicalPortCheck(rtk_port_t logicalPort);
+
+/* Function Name:
+ *      rtk_switch_isUtpPort
+ * Description:
+ *      Check is logical port a UTP port
+ * Input:
+ *      logicalPort     - logical port ID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Port ID is a UTP port
+ *      RT_ERR_FAILED   - Port ID is not a UTP port
+ *      RT_ERR_NOT_INIT - Not Initialize
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_switch_isUtpPort(rtk_port_t logicalPort);
+
+/* Function Name:
+ *      rtk_switch_isExtPort
+ * Description:
+ *      Check is logical port a Extension port
+ * Input:
+ *      logicalPort     - logical port ID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Port ID is a EXT port
+ *      RT_ERR_FAILED   - Port ID is not a EXT port
+ *      RT_ERR_NOT_INIT - Not Initialize
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_switch_isExtPort(rtk_port_t logicalPort);
+
+/* Function Name:
+ *      rtk_switch_isHsgPort
+ * Description:
+ *      Check is logical port a HSG port
+ * Input:
+ *      logicalPort     - logical port ID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Port ID is a HSG port
+ *      RT_ERR_FAILED   - Port ID is not a HSG port
+ *      RT_ERR_NOT_INIT - Not Initialize
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_switch_isHsgPort(rtk_port_t logicalPort);
+
+/* Function Name:
+ *      rtk_switch_isSgmiiPort
+ * Description:
+ *      Check is logical port a SGMII port
+ * Input:
+ *      logicalPort     - logical port ID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Port ID is a SGMII port
+ *      RT_ERR_FAILED   - Port ID is not a SGMII port
+ *      RT_ERR_NOT_INIT - Not Initialize
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_switch_isSgmiiPort(rtk_port_t logicalPort);
+
+/* Function Name:
+ *      rtk_switch_isCPUPort
+ * Description:
+ *      Check is logical port a CPU port
+ * Input:
+ *      logicalPort     - logical port ID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Port ID is a CPU port
+ *      RT_ERR_FAILED   - Port ID is not a CPU port
+ *      RT_ERR_NOT_INIT - Not Initialize
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_switch_isCPUPort(rtk_port_t logicalPort);
+
+/* Function Name:
+ *      rtk_switch_isComboPort
+ * Description:
+ *      Check is logical port a Combo port
+ * Input:
+ *      logicalPort     - logical port ID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Port ID is a combo port
+ *      RT_ERR_FAILED   - Port ID is not a combo port
+ *      RT_ERR_NOT_INIT - Not Initialize
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_switch_isComboPort(rtk_port_t logicalPort);
+
+/* Function Name:
+ *      rtk_switch_ComboPort_get
+ * Description:
+ *      Get Combo port ID
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      Port ID of combo port
+ * Note:
+ *
+ */
+extern rtk_uint32 rtk_switch_ComboPort_get(void);
+
+/* Function Name:
+ *      rtk_switch_isPtpPort
+ * Description:
+ *      Check is logical port a PTP port
+ * Input:
+ *      logicalPort     - logical port ID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Port ID is a PTP port
+ *      RT_ERR_FAILED   - Port ID is not a PTP port
+ *      RT_ERR_NOT_INIT - Not Initialize
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_switch_isPtpPort(rtk_port_t logicalPort);
+
+/* Function Name:
+ *      rtk_switch_port_L2P_get
+ * Description:
+ *      Get physical port ID
+ * Input:
+ *      logicalPort       - logical port ID
+ * Output:
+ *      None
+ * Return:
+ *      Physical port ID
+ * Note:
+ *
+ */
+extern rtk_uint32 rtk_switch_port_L2P_get(rtk_port_t logicalPort);
+
+/* Function Name:
+ *      rtk_switch_port_P2L_get
+ * Description:
+ *      Get logical port ID
+ * Input:
+ *      physicalPort       - physical port ID
+ * Output:
+ *      None
+ * Return:
+ *      logical port ID
+ * Note:
+ *
+ */
+extern rtk_port_t rtk_switch_port_P2L_get(rtk_uint32 physicalPort);
+
+/* Function Name:
+ *      rtk_switch_isPortMaskValid
+ * Description:
+ *      Check portmask is valid or not
+ * Input:
+ *      pPmask       - logical port mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - port mask is valid
+ *      RT_ERR_FAILED       - port mask is not valid
+ *      RT_ERR_NOT_INIT     - Not Initialize
+ *      RT_ERR_NULL_POINTER - Null pointer
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_switch_isPortMaskValid(rtk_portmask_t *pPmask);
+
+/* Function Name:
+ *      rtk_switch_isPortMaskUtp
+ * Description:
+ *      Check all ports in portmask are only UTP port
+ * Input:
+ *      pPmask       - logical port mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Only UTP port in port mask
+ *      RT_ERR_FAILED       - Not only UTP port in port mask
+ *      RT_ERR_NOT_INIT     - Not Initialize
+ *      RT_ERR_NULL_POINTER - Null pointer
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_switch_isPortMaskUtp(rtk_portmask_t *pPmask);
+
+/* Function Name:
+ *      rtk_switch_isPortMaskExt
+ * Description:
+ *      Check all ports in portmask are only EXT port
+ * Input:
+ *      pPmask       - logical port mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Only EXT port in port mask
+ *      RT_ERR_FAILED       - Not only EXT port in port mask
+ *      RT_ERR_NOT_INIT     - Not Initialize
+ *      RT_ERR_NULL_POINTER - Null pointer
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_switch_isPortMaskExt(rtk_portmask_t *pPmask);
+
+/* Function Name:
+ *      rtk_switch_portmask_L2P_get
+ * Description:
+ *      Get physicl portmask from logical portmask
+ * Input:
+ *      pLogicalPmask       - logical port mask
+ * Output:
+ *      pPhysicalPortmask   - physical port mask
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_NOT_INIT     - Not Initialize
+ *      RT_ERR_NULL_POINTER - Null pointer
+ *      RT_ERR_PORT_MASK    - Error port mask
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_switch_portmask_L2P_get(rtk_portmask_t *pLogicalPmask, rtk_uint32 *pPhysicalPortmask);
+
+/* Function Name:
+ *      rtk_switch_portmask_P2L_get
+ * Description:
+ *      Get logical portmask from physical portmask
+ * Input:
+ *      physicalPortmask    - physical port mask
+ * Output:
+ *      pLogicalPmask       - logical port mask
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_NOT_INIT     - Not Initialize
+ *      RT_ERR_NULL_POINTER - Null pointer
+ *      RT_ERR_PORT_MASK    - Error port mask
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_switch_portmask_P2L_get(rtk_uint32 physicalPortmask, rtk_portmask_t *pLogicalPmask);
+
+/* Function Name:
+ *      rtk_switch_phyPortMask_get
+ * Description:
+ *      Get physical portmask
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      0x00                - Not Initialize
+ *      Other value         - Physical port mask
+ * Note:
+ *
+ */
+rtk_uint32 rtk_switch_phyPortMask_get(void);
+
+/* Function Name:
+ *      rtk_switch_logPortMask_get
+ * Description:
+ *      Get Logical portmask
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_NOT_INIT     - Not Initialize
+ *      RT_ERR_NULL_POINTER - Null pointer
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_switch_logPortMask_get(rtk_portmask_t *pPortmask);
+
+/* Function Name:
+ *      rtk_switch_init
+ * Description:
+ *      Set chip to default configuration enviroment
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      The API can set chip registers to default configuration for different release chip model.
+ */
+extern rtk_api_ret_t rtk_switch_init(void);
+
+/* Function Name:
+ *      rtk_switch_portMaxPktLen_set
+ * Description:
+ *      Set Max packet length
+ * Input:
+ *      port    - Port ID
+ *      speed   - Speed
+ *      cfgId   - Configuration ID
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Error Input
+ * Note:
+ */
+extern rtk_api_ret_t rtk_switch_portMaxPktLen_set(rtk_port_t port, rtk_switch_maxPktLen_linkSpeed_t speed, rtk_uint32 cfgId);
+
+/* Function Name:
+ *      rtk_switch_portMaxPktLen_get
+ * Description:
+ *      Get Max packet length
+ * Input:
+ *      port    - Port ID
+ *      speed   - Speed
+ * Output:
+ *      pCfgId  - Configuration ID
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Error Input
+ * Note:
+ */
+extern rtk_api_ret_t rtk_switch_portMaxPktLen_get(rtk_port_t port, rtk_switch_maxPktLen_linkSpeed_t speed, rtk_uint32 *pCfgId);
+
+/* Function Name:
+ *      rtk_switch_maxPktLenCfg_set
+ * Description:
+ *      Set Max packet length configuration
+ * Input:
+ *      cfgId   - Configuration ID
+ *      pktLen  - Max packet length
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Error Input
+ * Note:
+ */
+extern rtk_api_ret_t rtk_switch_maxPktLenCfg_set(rtk_uint32 cfgId, rtk_uint32 pktLen);
+
+/* Function Name:
+ *      rtk_switch_maxPktLenCfg_get
+ * Description:
+ *      Get Max packet length configuration
+ * Input:
+ *      cfgId   - Configuration ID
+ *      pPktLen - Max packet length
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Error Input
+ * Note:
+ */
+extern rtk_api_ret_t rtk_switch_maxPktLenCfg_get(rtk_uint32 cfgId, rtk_uint32 *pPktLen);
+
+/* Function Name:
+ *      rtk_switch_greenEthernet_set
+ * Description:
+ *      Set all Ports Green Ethernet state.
+ * Input:
+ *      enable - Green Ethernet state.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - OK
+ *      RT_ERR_FAILED   - Failed
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_ENABLE   - Invalid enable input.
+ * Note:
+ *      This API can set all Ports Green Ethernet state.
+ *      The configuration is as following:
+ *      - DISABLE
+ *      - ENABLE
+ */
+extern rtk_api_ret_t rtk_switch_greenEthernet_set(rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_switch_greenEthernet_get
+ * Description:
+ *      Get all Ports Green Ethernet state.
+ * Input:
+ *      None
+ * Output:
+ *      pEnable - Green Ethernet state.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ * Note:
+ *      This API can get Green Ethernet state.
+ */
+extern rtk_api_ret_t rtk_switch_greenEthernet_get(rtk_enable_t *pEnable);
+
+/* Function Name:
+ *      rtk_switch_maxLogicalPort_get
+ * Description:
+ *      Get Max logical port ID
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      Max logical port
+ * Note:
+ *      This API can get max logical port
+ */
+extern rtk_port_t rtk_switch_maxLogicalPort_get(void);
+
+/* Function Name:
+ *      rtk_switch_maxMeterId_get
+ * Description:
+ *      Get Max Meter ID
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      0x00                - Not Initialize
+ *      Other value         - Max Meter ID
+ * Note:
+ *
+ */
+extern rtk_uint32 rtk_switch_maxMeterId_get(void);
+
+/* Function Name:
+ *      rtk_switch_maxLutAddrNumber_get
+ * Description:
+ *      Get Max LUT Address number
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      0x00                - Not Initialize
+ *      Other value         - Max LUT Address number
+ * Note:
+ *
+ */
+extern rtk_uint32 rtk_switch_maxLutAddrNumber_get(void);
+
+/* Function Name:
+ *      rtk_switch_isValidTrunkGrpId
+ * Description:
+ *      Check if trunk group is valid or not
+ * Input:
+ *      grpId       - Group ID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Trunk Group ID is valid
+ *      RT_ERR_LA_TRUNK_ID  - Trunk Group ID is not valid
+ * Note:
+ *
+ */
+rtk_uint32 rtk_switch_isValidTrunkGrpId(rtk_uint32 grpId);
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtk_types.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtk_types.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtk_types.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtk_types.h	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,136 @@
+#ifndef _RTL8367C_TYPES_H_
+#define _RTL8367C_TYPES_H_
+
+typedef unsigned long long      rtk_uint64;
+typedef long long               rtk_int64;
+typedef unsigned int            rtk_uint32;
+typedef int                     rtk_int32;
+typedef unsigned short          rtk_uint16;
+typedef short                   rtk_int16;
+typedef unsigned char           rtk_uint8;
+typedef char                    rtk_int8;
+
+#define CONST_T     const
+
+#define RTK_TOTAL_NUM_OF_WORD_FOR_1BIT_PORT_LIST    1
+
+#define RTK_MAX_NUM_OF_PORT                         8
+#define RTK_PORT_ID_MAX                             (RTK_MAX_NUM_OF_PORT-1)
+#define RTK_PHY_ID_MAX                              (RTK_MAX_NUM_OF_PORT-4)
+#define RTK_MAX_PORT_MASK                           0xFF
+
+#define RTK_WHOLE_SYSTEM                            0xFF
+
+typedef struct rtk_portmask_s
+{
+    rtk_uint32  bits[RTK_TOTAL_NUM_OF_WORD_FOR_1BIT_PORT_LIST];
+} rtk_portmask_t;
+
+typedef enum rtk_enable_e
+{
+    DISABLED = 0,
+    ENABLED,
+    RTK_ENABLE_END
+} rtk_enable_t;
+
+#ifndef ETHER_ADDR_LEN
+#define ETHER_ADDR_LEN      6
+#endif
+
+/* ethernet address type */
+typedef struct  rtk_mac_s
+{
+    rtk_uint8 octet[ETHER_ADDR_LEN];
+} rtk_mac_t;
+
+typedef rtk_uint32  rtk_pri_t;      /* priority vlaue */
+typedef rtk_uint32  rtk_qid_t;      /* queue id type */
+typedef rtk_uint32  rtk_data_t;
+typedef rtk_uint32  rtk_dscp_t;     /* dscp vlaue */
+typedef rtk_uint32  rtk_fid_t;      /* filter id type */
+typedef rtk_uint32  rtk_vlan_t;     /* vlan id type */
+typedef rtk_uint32  rtk_mac_cnt_t;  /* MAC count type  */
+typedef rtk_uint32  rtk_meter_id_t; /* meter id type  */
+typedef rtk_uint32  rtk_rate_t;     /* rate type  */
+
+typedef enum rtk_port_e
+{
+    UTP_PORT0 = 0,
+    UTP_PORT1,
+    UTP_PORT2,
+    UTP_PORT3,
+    UTP_PORT4,
+    UTP_PORT5,
+    UTP_PORT6,
+    UTP_PORT7,
+
+    EXT_PORT0 = 16,
+    EXT_PORT1,
+    EXT_PORT2,
+
+    UNDEFINE_PORT = 30,
+    RTK_PORT_MAX = 31
+} rtk_port_t;
+
+
+#ifndef _RTL_TYPES_H
+
+#if 0
+typedef unsigned long long      uint64;
+typedef long long               int64;
+typedef unsigned int            uint32;
+typedef int                     int32;
+typedef unsigned short          uint16;
+typedef short                   int16;
+typedef unsigned char           uint8;
+typedef char                    int8;
+#endif
+
+typedef rtk_uint32                  ipaddr_t;
+typedef rtk_uint32                  memaddr;
+
+#ifndef ETHER_ADDR_LEN
+#define ETHER_ADDR_LEN      6
+#endif
+
+typedef struct ether_addr_s {
+    rtk_uint8 octet[ETHER_ADDR_LEN];
+} ether_addr_t;
+
+#ifdef __KERNEL__
+#define rtlglue_printf printk
+#else
+#define rtlglue_printf printf
+#endif
+#define PRINT           rtlglue_printf
+#endif /*_RTL_TYPES_H*/
+
+/* type abstraction */
+#ifdef EMBEDDED_SUPPORT
+
+typedef rtk_int16                   rtk_api_ret_t;
+typedef rtk_int16                   ret_t;
+typedef rtk_uint32                  rtk_u_long;
+
+#else
+
+typedef rtk_int32                   rtk_api_ret_t;
+typedef rtk_int32                   ret_t;
+typedef rtk_uint64                  rtk_u_long_t;
+
+#endif
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#define CONST           const
+#endif /* _RTL8367C_TYPES_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_acl.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_acl.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_acl.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_acl.c	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,1175 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 76306 $
+ * $Date: 2017-03-08 15:13:58 +0800 (é€±ä¸‰, 08 ä¸‰æœˆ 2017) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature : ACL related function drivers
+ *
+ */
+#include <rtl8367c_asicdrv_acl.h>
+
+#include <string.h>
+
+#if defined(CONFIG_RTL8367C_ASICDRV_TEST)
+rtl8367c_aclrulesmi Rtl8370sVirtualAclRuleTable[RTL8367C_ACLRULENO];
+rtk_uint16 Rtl8370sVirtualAclActTable[RTL8367C_ACLRULENO][RTL8367C_ACL_ACT_TABLE_LEN];
+#endif
+
+/*
+    Exchange structure type define with MMI and SMI
+*/
+static void _rtl8367c_aclRuleStSmi2User( rtl8367c_aclrule *pAclUser, rtl8367c_aclrulesmi *pAclSmi)
+{
+    rtk_uint8 *care_ptr, *data_ptr;
+    rtk_uint8 care_tmp, data_tmp;
+    rtk_uint32 i;
+
+    pAclUser->data_bits.active_portmsk = (((pAclSmi->data_bits_ext.rule_info >> 1) & 0x0007) << 8) | ((pAclSmi->data_bits.rule_info >> 8) & 0x00FF);
+    pAclUser->data_bits.type = (pAclSmi->data_bits.rule_info & 0x0007);
+    pAclUser->data_bits.tag_exist = (pAclSmi->data_bits.rule_info & 0x00F8) >> 3;
+
+    care_ptr = (rtk_uint8*)&pAclSmi->care_bits;
+    data_ptr = (rtk_uint8*)&pAclSmi->data_bits;
+
+    for ( i = 0; i < sizeof(struct acl_rule_smi_st); i++)
+    {
+        care_tmp = *(care_ptr + i) ^ (*(data_ptr + i));
+        data_tmp = *(data_ptr + i);
+
+        *(care_ptr + i) = care_tmp;
+        *(data_ptr + i) = data_tmp;
+    }
+
+    care_ptr = (rtk_uint8*)&pAclSmi->care_bits_ext;
+    data_ptr = (rtk_uint8*)&pAclSmi->data_bits_ext;
+    care_tmp = (*care_ptr) ^ (*data_ptr);
+    data_tmp = (*data_ptr);
+    *care_ptr = care_tmp;
+    *data_ptr = data_tmp;
+
+    for(i = 0; i < RTL8367C_ACLRULEFIELDNO; i++)
+        pAclUser->data_bits.field[i] = pAclSmi->data_bits.field[i];
+
+    pAclUser->valid = pAclSmi->valid;
+
+    pAclUser->care_bits.active_portmsk = (((pAclSmi->care_bits_ext.rule_info >> 1) & 0x0007) << 8) | ((pAclSmi->care_bits.rule_info >> 8) & 0x00FF);
+    pAclUser->care_bits.type = (pAclSmi->care_bits.rule_info & 0x0007);
+    pAclUser->care_bits.tag_exist = (pAclSmi->care_bits.rule_info & 0x00F8) >> 3;
+
+    for(i = 0; i < RTL8367C_ACLRULEFIELDNO; i++)
+        pAclUser->care_bits.field[i] = pAclSmi->care_bits.field[i];
+}
+
+/*
+    Exchange structure type define with MMI and SMI
+*/
+static void _rtl8367c_aclRuleStUser2Smi(rtl8367c_aclrule *pAclUser, rtl8367c_aclrulesmi *pAclSmi)
+{
+    rtk_uint8 *care_ptr, *data_ptr;
+    rtk_uint8 care_tmp, data_tmp;
+    rtk_uint32 i;
+
+    pAclSmi->data_bits_ext.rule_info = ((pAclUser->data_bits.active_portmsk >> 8) & 0x7) << 1;
+    pAclSmi->data_bits.rule_info = ((pAclUser->data_bits.active_portmsk & 0xff) << 8) | ((pAclUser->data_bits.tag_exist & 0x1F) << 3) | (pAclUser->data_bits.type & 0x07);
+
+    for(i = 0;i < RTL8367C_ACLRULEFIELDNO; i++)
+        pAclSmi->data_bits.field[i] = pAclUser->data_bits.field[i];
+
+    pAclSmi->valid = pAclUser->valid;
+
+    pAclSmi->care_bits_ext.rule_info = ((pAclUser->care_bits.active_portmsk >> 8) & 0x7) << 1;
+    pAclSmi->care_bits.rule_info = ((pAclUser->care_bits.active_portmsk & 0xff) << 8) | ((pAclUser->care_bits.tag_exist & 0x1F) << 3) | (pAclUser->care_bits.type & 0x07);
+
+    for(i = 0; i < RTL8367C_ACLRULEFIELDNO; i++)
+        pAclSmi->care_bits.field[i] = pAclUser->care_bits.field[i];
+
+    care_ptr = (rtk_uint8*)&pAclSmi->care_bits;
+    data_ptr = (rtk_uint8*)&pAclSmi->data_bits;
+
+    for ( i = 0; i < sizeof(struct acl_rule_smi_st); i++)
+    {
+        care_tmp = *(care_ptr + i) & ~(*(data_ptr + i));
+        data_tmp = *(care_ptr + i) & *(data_ptr + i);
+
+        *(care_ptr + i) = care_tmp;
+        *(data_ptr + i) = data_tmp;
+    }
+
+    care_ptr = (rtk_uint8*)&pAclSmi->care_bits_ext;
+    data_ptr = (rtk_uint8*)&pAclSmi->data_bits_ext;
+    care_tmp = *care_ptr & ~(*data_ptr);
+    data_tmp = *care_ptr & *data_ptr;
+
+    *care_ptr = care_tmp;
+    *data_ptr = data_tmp;
+}
+
+/*
+    Exchange structure type define with MMI and SMI
+*/
+static void _rtl8367c_aclActStSmi2User(rtl8367c_acl_act_t *pAclUser, rtk_uint16 *pAclSmi)
+{
+    pAclUser->cact = (pAclSmi[0] & 0x00C0) >> 6;
+    pAclUser->cvidx_cact = (pAclSmi[0] & 0x003F) | (((pAclSmi[3] & 0x0008) >> 3) << 6);
+
+    pAclUser->sact = (pAclSmi[0] & 0xC000) >> 14;
+    pAclUser->svidx_sact = ((pAclSmi[0] & 0x3F00) >> 8) | (((pAclSmi[3] & 0x0010) >> 4) << 6);
+
+    pAclUser->aclmeteridx = (pAclSmi[1] & 0x003F) | (((pAclSmi[3] & 0x0020) >> 5) << 6);
+
+    pAclUser->fwdact = (pAclSmi[1] & 0xC000) >> 14;
+    pAclUser->fwdpmask = ((pAclSmi[1] & 0x3FC0) >> 6) | (((pAclSmi[3] & 0x01C0) >> 6) << 8);
+
+    pAclUser->priact = (pAclSmi[2] & 0x00C0) >> 6;
+    pAclUser->pridx = (pAclSmi[2] & 0x003F) | (((pAclSmi[3] & 0x0200) >> 9) << 6);
+
+    pAclUser->aclint = (pAclSmi[2] & 0x2000) >> 13;
+    pAclUser->gpio_en = (pAclSmi[2] & 0x1000) >> 12;
+    pAclUser->gpio_pin = (pAclSmi[2] & 0x0F00) >> 8;
+
+    pAclUser->cact_ext = (pAclSmi[2] & 0xC000) >> 14;
+    pAclUser->tag_fmt = (pAclSmi[3] & 0x0003);
+    pAclUser->fwdact_ext = (pAclSmi[3] & 0x0004) >> 2;
+}
+
+/*
+    Exchange structure type define with MMI and SMI
+*/
+static void _rtl8367c_aclActStUser2Smi(rtl8367c_acl_act_t *pAclUser, rtk_uint16 *pAclSmi)
+{
+    pAclSmi[0] |= (pAclUser->cvidx_cact & 0x003F);
+    pAclSmi[0] |= (pAclUser->cact & 0x0003) << 6;
+    pAclSmi[0] |= (pAclUser->svidx_sact & 0x003F) << 8;
+    pAclSmi[0] |= (pAclUser->sact & 0x0003) << 14;
+
+    pAclSmi[1] |= (pAclUser->aclmeteridx & 0x003F);
+    pAclSmi[1] |= (pAclUser->fwdpmask & 0x00FF) << 6;
+    pAclSmi[1] |= (pAclUser->fwdact & 0x0003) << 14;
+
+    pAclSmi[2] |= (pAclUser->pridx & 0x003F);
+    pAclSmi[2] |= (pAclUser->priact & 0x0003) << 6;
+    pAclSmi[2] |= (pAclUser->gpio_pin & 0x000F) << 8;
+    pAclSmi[2] |= (pAclUser->gpio_en & 0x0001) << 12;
+    pAclSmi[2] |= (pAclUser->aclint & 0x0001) << 13;
+    pAclSmi[2] |= (pAclUser->cact_ext & 0x0003) << 14;
+
+    pAclSmi[3] |= (pAclUser->tag_fmt & 0x0003);
+    pAclSmi[3] |= (pAclUser->fwdact_ext & 0x0001) << 2;
+    pAclSmi[3] |= ((pAclUser->cvidx_cact & 0x0040) >> 6) << 3;
+    pAclSmi[3] |= ((pAclUser->svidx_sact & 0x0040) >> 6) << 4;
+    pAclSmi[3] |= ((pAclUser->aclmeteridx & 0x0040) >> 6) << 5;
+    pAclSmi[3] |= ((pAclUser->fwdpmask & 0x0700) >> 8) << 6;
+    pAclSmi[3] |= ((pAclUser->pridx & 0x0040) >> 6) << 9;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicAcl
+ * Description:
+ *      Set port acl function enable/disable
+ * Input:
+ *      port    - Physical port number (0~10)
+ *      enabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicAcl(rtk_uint32 port, rtk_uint32 enabled)
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_ACL_ENABLE_REG, port, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicAcl
+ * Description:
+ *      Get port acl function enable/disable
+ * Input:
+ *      port    - Physical port number (0~10)
+ *      enabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicAcl(rtk_uint32 port, rtk_uint32* pEnabled)
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_ACL_ENABLE_REG, port, pEnabled);
+}
+/* Function Name:
+ *      rtl8367c_setAsicAclUnmatchedPermit
+ * Description:
+ *      Set port acl function unmatched permit action
+ * Input:
+ *      port    - Physical port number (0~10)
+ *      enabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicAclUnmatchedPermit(rtk_uint32 port, rtk_uint32 enabled)
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_ACL_UNMATCH_PERMIT_REG, port, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicAclUnmatchedPermit
+ * Description:
+ *      Get port acl function unmatched permit action
+ * Input:
+ *      port    - Physical port number (0~10)
+ *      enabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicAclUnmatchedPermit(rtk_uint32 port, rtk_uint32* pEnabled)
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_ACL_UNMATCH_PERMIT_REG, port, pEnabled);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicAclRule
+ * Description:
+ *      Set acl rule content
+ * Input:
+ *      index   - ACL rule index (0-95) of 96 ACL rules
+ *      pAclRule - ACL rule stucture for setting
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_OUT_OF_RANGE     - Invalid ACL rule index (0-95)
+ * Note:
+ *      System supported 95 shared 289-bit ACL ingress rule. Index was available at range 0-95 only.
+ *      If software want to modify ACL rule, the ACL function should be disable at first or unspecify
+ *      acl action will be executed.
+ *      One ACL rule structure has three parts setting:
+ *      Bit 0-147       Data Bits of this Rule
+ *      Bit 148     Valid Bit
+ *      Bit 149-296 Care Bits of this Rule
+ *      There are four kinds of field in Data Bits and Care Bits: Active Portmask, Type, Tag Exist, and 8 fields
+ */
+ret_t rtl8367c_setAsicAclRule(rtk_uint32 index, rtl8367c_aclrule* pAclRule)
+{
+    rtl8367c_aclrulesmi aclRuleSmi;
+    rtk_uint16* tableAddr;
+    rtk_uint32 regAddr;
+    rtk_uint32  regData;
+    rtk_uint32 i;
+    ret_t retVal;
+
+    if(index > RTL8367C_ACLRULEMAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    memset(&aclRuleSmi, 0x00, sizeof(rtl8367c_aclrulesmi));
+
+    _rtl8367c_aclRuleStUser2Smi(pAclRule, &aclRuleSmi);
+
+    /* Write valid bit = 0 */
+    regAddr = RTL8367C_TABLE_ACCESS_ADDR_REG;
+    if(index >= 64)
+        regData = RTL8367C_ACLRULETBADDR2(DATABITS, index);
+    else
+        regData = RTL8367C_ACLRULETBADDR(DATABITS, index);
+    retVal = rtl8367c_setAsicReg(regAddr,regData);
+    if(retVal !=RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_TABLE_ACCESS_WRDATA_REG(RTL8367C_ACLRULETBLEN), 0x1, 0);
+    if(retVal !=RT_ERR_OK)
+        return retVal;
+
+    regAddr = RTL8367C_TABLE_ACCESS_CTRL_REG;
+    regData = RTL8367C_TABLE_ACCESS_REG_DATA(TB_OP_WRITE, TB_TARGET_ACLRULE);
+    retVal = rtl8367c_setAsicReg(regAddr, regData);
+    if(retVal !=RT_ERR_OK)
+        return retVal;
+
+
+
+    /* Write ACS_ADR register */
+    regAddr = RTL8367C_TABLE_ACCESS_ADDR_REG;
+    if(index >= 64)
+        regData = RTL8367C_ACLRULETBADDR2(CAREBITS, index);
+    else
+        regData = RTL8367C_ACLRULETBADDR(CAREBITS, index);
+    retVal = rtl8367c_setAsicReg(regAddr, regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    /* Write Care Bits to ACS_DATA registers */
+     tableAddr = (rtk_uint16*)&aclRuleSmi.care_bits;
+     regAddr = RTL8367C_TABLE_ACCESS_WRDATA_BASE;
+
+    for(i = 0; i < RTL8367C_ACLRULETBLEN; i++)
+    {
+        regData = *tableAddr;
+        retVal = rtl8367c_setAsicReg(regAddr, regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        regAddr++;
+        tableAddr++;
+    }
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_TABLE_ACCESS_WRDATA_REG(RTL8367C_ACLRULETBLEN), (0x0007 << 1), (aclRuleSmi.care_bits_ext.rule_info >> 1) & 0x0007);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    /* Write ACS_CMD register */
+    regAddr = RTL8367C_TABLE_ACCESS_CTRL_REG;
+    regData = RTL8367C_TABLE_ACCESS_REG_DATA(TB_OP_WRITE, TB_TARGET_ACLRULE);
+    retVal = rtl8367c_setAsicRegBits(regAddr, RTL8367C_TABLE_TYPE_MASK | RTL8367C_COMMAND_TYPE_MASK,regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+
+
+    /* Write ACS_ADR register for data bits */
+    regAddr = RTL8367C_TABLE_ACCESS_ADDR_REG;
+    if(index >= 64)
+        regData = RTL8367C_ACLRULETBADDR2(DATABITS, index);
+    else
+        regData = RTL8367C_ACLRULETBADDR(DATABITS, index);
+
+    retVal = rtl8367c_setAsicReg(regAddr, regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    /* Write Data Bits to ACS_DATA registers */
+     tableAddr = (rtk_uint16*)&aclRuleSmi.data_bits;
+     regAddr = RTL8367C_TABLE_ACCESS_WRDATA_BASE;
+
+    for(i = 0; i < RTL8367C_ACLRULETBLEN; i++)
+    {
+        regData = *tableAddr;
+        retVal = rtl8367c_setAsicReg(regAddr, regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        regAddr++;
+        tableAddr++;
+    }
+
+    retVal = rtl8367c_setAsicRegBit(RTL8367C_TABLE_ACCESS_WRDATA_REG(RTL8367C_ACLRULETBLEN), 0, aclRuleSmi.valid);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_TABLE_ACCESS_WRDATA_REG(RTL8367C_ACLRULETBLEN), (0x0007 << 1), (aclRuleSmi.data_bits_ext.rule_info >> 1) & 0x0007);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    /* Write ACS_CMD register for care bits*/
+    regAddr = RTL8367C_TABLE_ACCESS_CTRL_REG;
+    regData = RTL8367C_TABLE_ACCESS_REG_DATA(TB_OP_WRITE, TB_TARGET_ACLRULE);
+    retVal = rtl8367c_setAsicRegBits(regAddr, RTL8367C_TABLE_TYPE_MASK | RTL8367C_COMMAND_TYPE_MASK, regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+#ifdef CONFIG_RTL8367C_ASICDRV_TEST
+    memcpy(&Rtl8370sVirtualAclRuleTable[index], &aclRuleSmi, sizeof(rtl8367c_aclrulesmi));
+#endif
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicAclRule
+ * Description:
+ *      Get acl rule content
+ * Input:
+ *      index   - ACL rule index (0-63) of 64 ACL rules
+ *      pAclRule - ACL rule stucture for setting
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_OUT_OF_RANGE     - Invalid ACL rule index (0-63)
+  * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicAclRule(rtk_uint32 index, rtl8367c_aclrule *pAclRule)
+{
+    rtl8367c_aclrulesmi aclRuleSmi;
+    rtk_uint32 regAddr, regData;
+    ret_t retVal;
+    rtk_uint16* tableAddr;
+    rtk_uint32 i;
+
+    if(index > RTL8367C_ACLRULEMAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    memset(&aclRuleSmi, 0x00, sizeof(rtl8367c_aclrulesmi));
+
+    /* Write ACS_ADR register for data bits */
+    regAddr = RTL8367C_TABLE_ACCESS_ADDR_REG;
+    if(index >= 64)
+        regData = RTL8367C_ACLRULETBADDR2(DATABITS, index);
+    else
+        regData = RTL8367C_ACLRULETBADDR(DATABITS, index);
+
+    retVal = rtl8367c_setAsicReg(regAddr, regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+
+    /* Write ACS_CMD register */
+    regAddr = RTL8367C_TABLE_ACCESS_CTRL_REG;
+    regData = RTL8367C_TABLE_ACCESS_REG_DATA(TB_OP_READ, TB_TARGET_ACLRULE);
+    retVal = rtl8367c_setAsicRegBits(regAddr, RTL8367C_TABLE_TYPE_MASK | RTL8367C_COMMAND_TYPE_MASK, regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    /* Read Data Bits */
+    regAddr = RTL8367C_TABLE_ACCESS_RDDATA_BASE;
+    tableAddr = (rtk_uint16*)&aclRuleSmi.data_bits;
+    for(i = 0; i < RTL8367C_ACLRULETBLEN; i++)
+    {
+        retVal = rtl8367c_getAsicReg(regAddr, &regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        *tableAddr = regData;
+
+        regAddr ++;
+        tableAddr ++;
+    }
+
+    /* Read Valid Bit */
+    retVal = rtl8367c_getAsicRegBit(RTL8367C_TABLE_ACCESS_RDDATA_REG(RTL8367C_ACLRULETBLEN), 0, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+    aclRuleSmi.valid = regData & 0x1;
+    /* Read active_portmsk_ext Bits */
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_TABLE_ACCESS_RDDATA_REG(RTL8367C_ACLRULETBLEN), 0x7<<1, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+    aclRuleSmi.data_bits_ext.rule_info = (regData % 0x0007) << 1;
+
+
+    /* Write ACS_ADR register for carebits*/
+    regAddr = RTL8367C_TABLE_ACCESS_ADDR_REG;
+    if(index >= 64)
+        regData = RTL8367C_ACLRULETBADDR2(CAREBITS, index);
+    else
+        regData = RTL8367C_ACLRULETBADDR(CAREBITS, index);
+
+    retVal = rtl8367c_setAsicReg(regAddr, regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    /* Write ACS_CMD register */
+    regAddr = RTL8367C_TABLE_ACCESS_CTRL_REG;
+    regData = RTL8367C_TABLE_ACCESS_REG_DATA(TB_OP_READ, TB_TARGET_ACLRULE);
+    retVal = rtl8367c_setAsicRegBits(regAddr, RTL8367C_TABLE_TYPE_MASK | RTL8367C_COMMAND_TYPE_MASK, regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    /* Read Care Bits */
+    regAddr = RTL8367C_TABLE_ACCESS_RDDATA_BASE;
+    tableAddr = (rtk_uint16*)&aclRuleSmi.care_bits;
+    for(i = 0; i < RTL8367C_ACLRULETBLEN; i++)
+    {
+        retVal = rtl8367c_getAsicReg(regAddr, &regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        *tableAddr = regData;
+
+        regAddr ++;
+        tableAddr ++;
+    }
+    /* Read active_portmsk_ext care Bits */
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_TABLE_ACCESS_RDDATA_REG(RTL8367C_ACLRULETBLEN), 0x7<<1, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+    aclRuleSmi.care_bits_ext.rule_info = (regData & 0x0007) << 1;
+
+#ifdef CONFIG_RTL8367C_ASICDRV_TEST
+    memcpy(&aclRuleSmi,&Rtl8370sVirtualAclRuleTable[index], sizeof(rtl8367c_aclrulesmi));
+#endif
+
+     _rtl8367c_aclRuleStSmi2User(pAclRule, &aclRuleSmi);
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicAclNot
+ * Description:
+ *      Set rule comparison result inversion / no inversion
+ * Input:
+ *      index   - ACL rule index (0-95) of 96 ACL rules
+ *      not     - 1: inverse, 0: don't inverse
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_OUT_OF_RANGE     - Invalid ACL rule index (0-95)
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicAclNot(rtk_uint32 index, rtk_uint32 not)
+{
+    if(index > RTL8367C_ACLRULEMAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if(index < 64)
+        return rtl8367c_setAsicRegBit(RTL8367C_ACL_ACTION_CTRL_REG(index), RTL8367C_ACL_OP_NOT_OFFSET(index), not);
+    else
+        return rtl8367c_setAsicRegBit(RTL8367C_ACL_ACTION_CTRL2_REG(index), RTL8367C_ACL_OP_NOT_OFFSET(index), not);
+
+}
+/* Function Name:
+ *      rtl8367c_getAsicAcl
+ * Description:
+ *      Get rule comparison result inversion / no inversion
+ * Input:
+ *      index   - ACL rule index (0-95) of 95 ACL rules
+ *      pNot    - 1: inverse, 0: don't inverse
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_OUT_OF_RANGE     - Invalid ACL rule index (0-95)
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicAclNot(rtk_uint32 index, rtk_uint32* pNot)
+{
+    if(index > RTL8367C_ACLRULEMAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if(index < 64)
+        return rtl8367c_getAsicRegBit(RTL8367C_ACL_ACTION_CTRL_REG(index), RTL8367C_ACL_OP_NOT_OFFSET(index), pNot);
+    else
+        return rtl8367c_getAsicRegBit(RTL8367C_ACL_ACTION_CTRL2_REG(index), RTL8367C_ACL_OP_NOT_OFFSET(index), pNot);
+
+}
+/* Function Name:
+ *      rtl8367c_setAsicAclTemplate
+ * Description:
+ *      Set fields of a ACL Template
+ * Input:
+ *      index   - ACL template index(0~4)
+ *      pAclType - ACL type stucture for setting
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_OUT_OF_RANGE     - Invalid ACL template index(0~4)
+ * Note:
+ *      The API can set type field of the 5 ACL rule templates.
+ *      Each type has 8 fields. One field means what data in one field of a ACL rule means
+ *      8 fields of ACL rule 0~95 is descripted by one type in ACL group
+ */
+ret_t rtl8367c_setAsicAclTemplate(rtk_uint32 index, rtl8367c_acltemplate_t* pAclType)
+{
+    ret_t retVal;
+    rtk_uint32 i;
+    rtk_uint32 regAddr, regData;
+
+    if(index >= RTL8367C_ACLTEMPLATENO)
+        return RT_ERR_OUT_OF_RANGE;
+
+    regAddr = RTL8367C_ACL_RULE_TEMPLATE_CTRL_REG(index);
+
+    for(i = 0; i < (RTL8367C_ACLRULEFIELDNO/2); i++)
+    {
+        regData = pAclType->field[i*2+1];
+        regData = regData << 8 | pAclType->field[i*2];
+
+        retVal = rtl8367c_setAsicReg(regAddr + i, regData);
+
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicAclTemplate
+ * Description:
+ *      Get fields of a ACL Template
+ * Input:
+ *      index   - ACL template index(0~4)
+ *      pAclType - ACL type stucture for setting
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_OUT_OF_RANGE     - Invalid ACL template index(0~4)
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicAclTemplate(rtk_uint32 index, rtl8367c_acltemplate_t *pAclType)
+{
+    ret_t retVal;
+    rtk_uint32 i;
+    rtk_uint32 regData, regAddr;
+
+    if(index >= RTL8367C_ACLTEMPLATENO)
+        return RT_ERR_OUT_OF_RANGE;
+
+    regAddr = RTL8367C_ACL_RULE_TEMPLATE_CTRL_REG(index);
+
+    for(i = 0; i < (RTL8367C_ACLRULEFIELDNO/2); i++)
+    {
+        retVal = rtl8367c_getAsicReg(regAddr + i,&regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        pAclType->field[i*2] = regData & 0xFF;
+        pAclType->field[i*2 + 1] = (regData >> 8) & 0xFF;
+    }
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicAclAct
+ * Description:
+ *      Set ACL rule matched Action
+ * Input:
+ *      index   - ACL rule index (0-95) of 96 ACL rules
+ *      pAclAct     - ACL action stucture for setting
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_OUT_OF_RANGE     - Invalid ACL rule index (0-95)
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicAclAct(rtk_uint32 index, rtl8367c_acl_act_t* pAclAct)
+{
+    rtk_uint16 aclActSmi[RTL8367C_ACL_ACT_TABLE_LEN];
+    ret_t retVal;
+    rtk_uint32 regAddr, regData;
+    rtk_uint16* tableAddr;
+    rtk_uint32 i;
+
+    if(index > RTL8367C_ACLRULEMAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    memset(aclActSmi, 0x00, sizeof(rtk_uint16) * RTL8367C_ACL_ACT_TABLE_LEN);
+     _rtl8367c_aclActStUser2Smi(pAclAct, aclActSmi);
+
+    /* Write ACS_ADR register for data bits */
+    regAddr = RTL8367C_TABLE_ACCESS_ADDR_REG;
+    regData = index;
+    retVal = rtl8367c_setAsicReg(regAddr, regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    /* Write Data Bits to ACS_DATA registers */
+     tableAddr = aclActSmi;
+     regAddr = RTL8367C_TABLE_ACCESS_WRDATA_BASE;
+
+    for(i = 0; i < RTL8367C_ACLACTTBLEN; i++)
+    {
+        regData = *tableAddr;
+        retVal = rtl8367c_setAsicReg(regAddr, regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        regAddr++;
+        tableAddr++;
+    }
+
+    /* Write ACS_CMD register for care bits*/
+    regAddr = RTL8367C_TABLE_ACCESS_CTRL_REG;
+    regData = RTL8367C_TABLE_ACCESS_REG_DATA(TB_OP_WRITE, TB_TARGET_ACLACT);
+    retVal = rtl8367c_setAsicRegBits(regAddr, RTL8367C_TABLE_TYPE_MASK | RTL8367C_COMMAND_TYPE_MASK, regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+#ifdef CONFIG_RTL8367C_ASICDRV_TEST
+    memcpy(&Rtl8370sVirtualAclActTable[index][0], aclActSmi, sizeof(rtk_uint16) * RTL8367C_ACL_ACT_TABLE_LEN);
+#endif
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicAclAct
+ * Description:
+ *      Get ACL rule matched Action
+ * Input:
+ *      index   - ACL rule index (0-95) of 96 ACL rules
+ *      pAclAct     - ACL action stucture for setting
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_OUT_OF_RANGE     - Invalid ACL rule index (0-95)
+  * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicAclAct(rtk_uint32 index, rtl8367c_acl_act_t *pAclAct)
+{
+    rtk_uint16 aclActSmi[RTL8367C_ACL_ACT_TABLE_LEN];
+    ret_t retVal;
+    rtk_uint32 regAddr, regData;
+    rtk_uint16 *tableAddr;
+    rtk_uint32 i;
+
+    if(index > RTL8367C_ACLRULEMAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    memset(aclActSmi, 0x00, sizeof(rtk_uint16) * RTL8367C_ACL_ACT_TABLE_LEN);
+
+    /* Write ACS_ADR register for data bits */
+    regAddr = RTL8367C_TABLE_ACCESS_ADDR_REG;
+    regData = index;
+    retVal = rtl8367c_setAsicReg(regAddr, regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    /* Write ACS_CMD register */
+    regAddr = RTL8367C_TABLE_ACCESS_CTRL_REG;
+    regData = RTL8367C_TABLE_ACCESS_REG_DATA(TB_OP_READ, TB_TARGET_ACLACT);
+    retVal = rtl8367c_setAsicRegBits(regAddr, RTL8367C_TABLE_TYPE_MASK | RTL8367C_COMMAND_TYPE_MASK, regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    /* Read Data Bits */
+    regAddr = RTL8367C_TABLE_ACCESS_RDDATA_BASE;
+    tableAddr = aclActSmi;
+    for(i = 0; i < RTL8367C_ACLACTTBLEN; i++)
+    {
+        retVal = rtl8367c_getAsicReg(regAddr, &regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        *tableAddr = regData;
+
+        regAddr ++;
+        tableAddr ++;
+    }
+
+#ifdef CONFIG_RTL8367C_ASICDRV_TEST
+    memcpy(aclActSmi, &Rtl8370sVirtualAclActTable[index][0], sizeof(rtk_uint16) * RTL8367C_ACL_ACT_TABLE_LEN);
+#endif
+
+     _rtl8367c_aclActStSmi2User(pAclAct, aclActSmi);
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicAclActCtrl
+ * Description:
+ *      Set ACL rule matched Action Control Bits
+ * Input:
+ *      index       - ACL rule index (0-95) of 96 ACL rules
+ *      aclActCtrl  - 6 ACL Control Bits
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_OUT_OF_RANGE     - Invalid ACL rule index (0-95)
+ * Note:
+ *      ACL Action Control Bits Indicate which actions will be take when a rule matches
+ */
+ret_t rtl8367c_setAsicAclActCtrl(rtk_uint32 index, rtk_uint32 aclActCtrl)
+{
+    ret_t retVal;
+
+    if(index > RTL8367C_ACLRULEMAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if(index >= 64)
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_ACL_ACTION_CTRL2_REG(index), RTL8367C_ACL_OP_ACTION_MASK(index), aclActCtrl);
+    else
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_ACL_ACTION_CTRL_REG(index), RTL8367C_ACL_OP_ACTION_MASK(index), aclActCtrl);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicAclActCtrl
+ * Description:
+ *      Get ACL rule matched Action Control Bits
+ * Input:
+ *      index       - ACL rule index (0-95) of 96 ACL rules
+ *      pAclActCtrl     - 6 ACL Control Bits
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_OUT_OF_RANGE     - Invalid ACL rule index (0-95)
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicAclActCtrl(rtk_uint32 index, rtk_uint32 *pAclActCtrl)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+
+    if(index > RTL8367C_ACLRULEMAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if(index >= 64)
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_ACL_ACTION_CTRL2_REG(index), RTL8367C_ACL_OP_ACTION_MASK(index), &regData);
+    else
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_ACL_ACTION_CTRL_REG(index), RTL8367C_ACL_OP_ACTION_MASK(index), &regData);
+
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    *pAclActCtrl = regData;
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicAclPortRange
+ * Description:
+ *      Set ACL TCP/UDP range check
+ * Input:
+ *      index       - TCP/UDP port range check table index
+ *      type        - Range check type
+ *      upperPort   - TCP/UDP port range upper bound
+ *      lowerPort   - TCP/UDP port range lower bound
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_OUT_OF_RANGE     - Invalid TCP/UDP port range check table index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicAclPortRange(rtk_uint32 index, rtk_uint32 type, rtk_uint32 upperPort, rtk_uint32 lowerPort)
+{
+    ret_t retVal;
+
+    if(index > RTL8367C_ACLRANGEMAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY0_CTRL2 + index*3, RTL8367C_ACL_SDPORT_RANGE_ENTRY0_CTRL2_MASK, type);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_setAsicReg(RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY0_CTRL1 + index*3, upperPort);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_setAsicReg(RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY0_CTRL0 + index*3, lowerPort);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicAclPortRange
+ * Description:
+ *      Get ACL TCP/UDP range check
+ * Input:
+ *      index       - TCP/UDP port range check table index
+ *      pType       - Range check type
+ *      pUpperPort  - TCP/UDP port range upper bound
+ *      pLowerPort  - TCP/UDP port range lower bound
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_OUT_OF_RANGE     - Invalid TCP/UDP port range check table index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicAclPortRange(rtk_uint32 index, rtk_uint32* pType, rtk_uint32* pUpperPort, rtk_uint32* pLowerPort)
+{
+    ret_t retVal;
+
+    if(index > RTL8367C_ACLRANGEMAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY0_CTRL2 + index*3, RTL8367C_ACL_SDPORT_RANGE_ENTRY0_CTRL2_MASK, pType);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_getAsicReg(RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY0_CTRL1 + index*3, pUpperPort);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_getAsicReg(RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY0_CTRL0 + index*3, pLowerPort);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicAclVidRange
+ * Description:
+ *      Set ACL VID range check
+ * Input:
+ *      index       - ACL VID range check index(0~15)
+ *      type        - Range check type
+ *      upperVid    - VID range upper bound
+ *      lowerVid    - VID range lower bound
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_OUT_OF_RANGE     - Invalid ACL  VID range check index(0~15)
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicAclVidRange(rtk_uint32 index, rtk_uint32 type, rtk_uint32 upperVid, rtk_uint32 lowerVid)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+
+    if(index > RTL8367C_ACLRANGEMAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    regData = ((type << RTL8367C_ACL_VID_RANGE_ENTRY0_CTRL1_CHECK0_TYPE_OFFSET) & RTL8367C_ACL_VID_RANGE_ENTRY0_CTRL1_CHECK0_TYPE_MASK) |
+                (upperVid & RTL8367C_ACL_VID_RANGE_ENTRY0_CTRL1_CHECK0_HIGH_MASK);
+
+    retVal = rtl8367c_setAsicReg(RTL8367C_REG_ACL_VID_RANGE_ENTRY0_CTRL1 + index*2, regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_setAsicReg(RTL8367C_REG_ACL_VID_RANGE_ENTRY0_CTRL0 + index*2, lowerVid);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicAclVidRange
+ * Description:
+ *      Get ACL VID range check
+ * Input:
+ *      index       - ACL VID range check index(0~15)
+ *      pType       - Range check type
+ *      pUpperVid   - VID range upper bound
+ *      pLowerVid   - VID range lower bound
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_OUT_OF_RANGE     - Invalid ACL VID range check index(0~15)
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicAclVidRange(rtk_uint32 index, rtk_uint32* pType, rtk_uint32* pUpperVid, rtk_uint32* pLowerVid)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+
+    if(index > RTL8367C_ACLRANGEMAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    retVal = rtl8367c_getAsicReg(RTL8367C_REG_ACL_VID_RANGE_ENTRY0_CTRL1 + index*2, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    *pType = (regData & RTL8367C_ACL_VID_RANGE_ENTRY0_CTRL1_CHECK0_TYPE_MASK) >> RTL8367C_ACL_VID_RANGE_ENTRY0_CTRL1_CHECK0_TYPE_OFFSET;
+    *pUpperVid = regData & RTL8367C_ACL_VID_RANGE_ENTRY0_CTRL1_CHECK0_HIGH_MASK;
+
+    retVal = rtl8367c_getAsicReg(RTL8367C_REG_ACL_VID_RANGE_ENTRY0_CTRL0 + index*2, pLowerVid);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicAclIpRange
+ * Description:
+ *      Set ACL IP range check
+ * Input:
+ *      index       - ACL IP range check index(0~15)
+ *      type        - Range check type
+ *      upperIp     - IP range upper bound
+ *      lowerIp     - IP range lower bound
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_OUT_OF_RANGE     - Invalid ACL IP range check index(0~15)
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicAclIpRange(rtk_uint32 index, rtk_uint32 type, ipaddr_t upperIp, ipaddr_t lowerIp)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+    ipaddr_t ipData;
+
+    if(index > RTL8367C_ACLRANGEMAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_ACL_IP_RANGE_ENTRY0_CTRL4 + index*5, RTL8367C_ACL_IP_RANGE_ENTRY0_CTRL4_MASK, type);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    ipData = upperIp;
+
+    regData = ipData & 0xFFFF;
+    retVal = rtl8367c_setAsicReg(RTL8367C_REG_ACL_IP_RANGE_ENTRY0_CTRL2 + index*5, regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    regData = (ipData>>16) & 0xFFFF;
+    retVal = rtl8367c_setAsicReg(RTL8367C_REG_ACL_IP_RANGE_ENTRY0_CTRL3 + index*5, regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    ipData = lowerIp;
+
+    regData = ipData & 0xFFFF;
+    retVal = rtl8367c_setAsicReg(RTL8367C_REG_ACL_IP_RANGE_ENTRY0_CTRL0 + index*5, regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    regData = (ipData>>16) & 0xFFFF;
+    retVal = rtl8367c_setAsicReg(RTL8367C_REG_ACL_IP_RANGE_ENTRY0_CTRL1 + index*5, regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicAclIpRange
+ * Description:
+ *      Get ACL IP range check
+ * Input:
+ *      index       - ACL IP range check index(0~15)
+ *      pType       - Range check type
+ *      pUpperIp    - IP range upper bound
+ *      pLowerIp    - IP range lower bound
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_OUT_OF_RANGE     - Invalid ACL IP range check index(0~15)
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicAclIpRange(rtk_uint32 index, rtk_uint32* pType, ipaddr_t* pUpperIp, ipaddr_t* pLowerIp)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+    ipaddr_t ipData;
+
+    if(index > RTL8367C_ACLRANGEMAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_ACL_IP_RANGE_ENTRY0_CTRL4 + index*5, RTL8367C_ACL_IP_RANGE_ENTRY0_CTRL4_MASK, pType);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_getAsicReg(RTL8367C_REG_ACL_IP_RANGE_ENTRY0_CTRL2 + index*5, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+    ipData = regData;
+
+
+    retVal = rtl8367c_getAsicReg(RTL8367C_REG_ACL_IP_RANGE_ENTRY0_CTRL3 + index*5, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    ipData = (regData <<16) | ipData;
+    *pUpperIp = ipData;
+
+
+    retVal = rtl8367c_getAsicReg(RTL8367C_REG_ACL_IP_RANGE_ENTRY0_CTRL0 + index*5, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+    ipData = regData;
+
+
+    retVal = rtl8367c_getAsicReg(RTL8367C_REG_ACL_IP_RANGE_ENTRY0_CTRL1 + index*5, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    ipData = (regData << 16) | ipData;
+    *pLowerIp = ipData;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicAclGpioPolarity
+ * Description:
+ *      Set ACL Goip control palarity
+ * Input:
+ *      polarity - 1: High, 0: Low
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      none
+ */
+ret_t rtl8367c_setAsicAclGpioPolarity(rtk_uint32 polarity)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_ACL_GPIO_POLARITY, RTL8367C_ACL_GPIO_POLARITY_OFFSET, polarity);
+}
+/* Function Name:
+ *      rtl8367c_getAsicAclGpioPolarity
+ * Description:
+ *      Get ACL Goip control palarity
+ * Input:
+ *      pPolarity - 1: High, 0: Low
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      none
+ */
+ret_t rtl8367c_getAsicAclGpioPolarity(rtk_uint32* pPolarity)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_ACL_GPIO_POLARITY, RTL8367C_ACL_GPIO_POLARITY_OFFSET, pPolarity);
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_acl.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_acl.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_acl.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_acl.h	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,214 @@
+#ifndef _RTL8367C_ASICDRV_ACL_H_
+#define _RTL8367C_ASICDRV_ACL_H_
+
+#include <rtl8367c_asicdrv.h>
+
+#define RTL8367C_ACLRULENO                  96
+
+#define RTL8367C_ACLRULEMAX                 (RTL8367C_ACLRULENO-1)
+#define RTL8367C_ACLRULEFIELDNO             8
+#define RTL8367C_ACLTEMPLATENO              5
+#define RTL8367C_ACLTYPEMAX                 (RTL8367C_ACLTEMPLATENO-1)
+
+#define RTL8367C_ACLRULETBLEN               9
+#define RTL8367C_ACLACTTBLEN                4
+#define RTL8367C_ACLRULETBADDR(type, rule)  ((type << 6) | rule)
+#define RTL8367C_ACLRULETBADDR2(type, rule) ((type << 5) | (rule + 64))
+
+#define ACL_ACT_CVLAN_ENABLE_MASK           0x1
+#define ACL_ACT_SVLAN_ENABLE_MASK           0x2
+#define ACL_ACT_PRIORITY_ENABLE_MASK        0x4
+#define ACL_ACT_POLICING_ENABLE_MASK        0x8
+#define ACL_ACT_FWD_ENABLE_MASK             0x10
+#define ACL_ACT_INTGPIO_ENABLE_MASK         0x20
+
+#define RTL8367C_ACLRULETAGBITS             5
+
+#define RTL8367C_ACLRANGENO                 16
+
+#define RTL8367C_ACLRANGEMAX                (RTL8367C_ACLRANGENO-1)
+
+#define RTL8367C_ACL_PORTRANGEMAX           (0xFFFF)
+#define RTL8367C_ACL_ACT_TABLE_LEN          (4)
+
+enum ACLTCAMTYPES
+{
+    CAREBITS= 0,
+    DATABITS
+};
+
+typedef enum aclFwdAct
+{
+    RTL8367C_ACL_FWD_MIRROR = 0,
+    RTL8367C_ACL_FWD_REDIRECT,
+    RTL8367C_ACL_FWD_MIRRORFUNTION,
+    RTL8367C_ACL_FWD_TRAP,
+} rtl8367c_aclFwd_t;
+
+enum ACLFIELDTYPES
+{
+    ACL_UNUSED,
+    ACL_DMAC0,
+    ACL_DMAC1,
+    ACL_DMAC2,
+    ACL_SMAC0,
+    ACL_SMAC1,
+    ACL_SMAC2,
+    ACL_ETHERTYPE,
+    ACL_STAG,
+    ACL_CTAG,
+    ACL_IP4SIP0 = 0x10,
+    ACL_IP4SIP1,
+    ACL_IP4DIP0,
+    ACL_IP4DIP1,
+    ACL_IP6SIP0WITHIPV4 = 0x20,
+    ACL_IP6SIP1WITHIPV4,
+    ACL_IP6DIP0WITHIPV4 = 0x28,
+    ACL_IP6DIP1WITHIPV4,
+    ACL_VIDRANGE = 0x30,
+    ACL_IPRANGE,
+    ACL_PORTRANGE,
+    ACL_FIELD_VALID,
+    ACL_FIELD_SELECT00 = 0x40,
+    ACL_FIELD_SELECT01,
+    ACL_FIELD_SELECT02,
+    ACL_FIELD_SELECT03,
+    ACL_FIELD_SELECT04,
+    ACL_FIELD_SELECT05,
+    ACL_FIELD_SELECT06,
+    ACL_FIELD_SELECT07,
+    ACL_FIELD_SELECT08,
+    ACL_FIELD_SELECT09,
+    ACL_FIELD_SELECT10,
+    ACL_FIELD_SELECT11,
+    ACL_FIELD_SELECT12,
+    ACL_FIELD_SELECT13,
+    ACL_FIELD_SELECT14,
+    ACL_FIELD_SELECT15,
+    ACL_TCPSPORT = 0x80,
+    ACL_TCPDPORT,
+    ACL_TCPFLAG,
+    ACL_UDPSPORT,
+    ACL_UDPDPORT,
+    ACL_ICMPCODETYPE,
+    ACL_IGMPTYPE,
+    ACL_SPORT,
+    ACL_DPORT,
+    ACL_IP4TOSPROTO,
+    ACL_IP4FLAGOFF,
+    ACL_TCNH,
+    ACL_CPUTAG,
+    ACL_L2PAYLOAD,
+    ACL_IP6SIP0,
+    ACL_IP6SIP1,
+    ACL_IP6SIP2,
+    ACL_IP6SIP3,
+    ACL_IP6SIP4,
+    ACL_IP6SIP5,
+    ACL_IP6SIP6,
+    ACL_IP6SIP7,
+    ACL_IP6DIP0,
+    ACL_IP6DIP1,
+    ACL_IP6DIP2,
+    ACL_IP6DIP3,
+    ACL_IP6DIP4,
+    ACL_IP6DIP5,
+    ACL_IP6DIP6,
+    ACL_IP6DIP7,
+    ACL_TYPE_END
+};
+
+struct acl_rule_smi_st{
+    rtk_uint16 rule_info;
+    rtk_uint16 field[RTL8367C_ACLRULEFIELDNO];
+};
+
+struct acl_rule_smi_ext_st{
+    rtk_uint16 rule_info;
+};
+
+typedef struct ACLRULESMI{
+    struct acl_rule_smi_st  care_bits;
+    rtk_uint16      valid:1;
+    struct acl_rule_smi_st  data_bits;
+
+    struct acl_rule_smi_ext_st care_bits_ext;
+    struct acl_rule_smi_ext_st data_bits_ext;
+}rtl8367c_aclrulesmi;
+
+struct acl_rule_st{
+    rtk_uint16 active_portmsk:11;
+    rtk_uint16 type:3;
+    rtk_uint16 tag_exist:5;
+    rtk_uint16 field[RTL8367C_ACLRULEFIELDNO];
+};
+
+typedef struct ACLRULE{
+    struct acl_rule_st  data_bits;
+    rtk_uint16      valid:1;
+    struct acl_rule_st  care_bits;
+}rtl8367c_aclrule;
+
+
+typedef struct rtl8367c_acltemplate_s{
+    rtk_uint8 field[8];
+}rtl8367c_acltemplate_t;
+
+
+typedef struct acl_act_s{
+    rtk_uint16 cvidx_cact:7;
+    rtk_uint16 cact:2;
+    rtk_uint16 svidx_sact:7;
+    rtk_uint16 sact:2;
+
+
+    rtk_uint16 aclmeteridx:7;
+    rtk_uint16 fwdpmask:11;
+    rtk_uint16 fwdact:2;
+
+    rtk_uint16 pridx:7;
+    rtk_uint16 priact:2;
+    rtk_uint16 gpio_pin:4;
+    rtk_uint16 gpio_en:1;
+    rtk_uint16 aclint:1;
+
+    rtk_uint16 cact_ext:2;
+    rtk_uint16 fwdact_ext:1;
+    rtk_uint16 tag_fmt:2;
+}rtl8367c_acl_act_t;
+
+typedef struct acl_rule_union_s
+{
+    rtl8367c_aclrule aclRule;
+    rtl8367c_acl_act_t aclAct;
+    rtk_uint32 aclActCtrl;
+    rtk_uint32 aclNot;
+}rtl8367c_acl_rule_union_t;
+
+
+extern ret_t rtl8367c_setAsicAcl(rtk_uint32 port, rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicAcl(rtk_uint32 port, rtk_uint32* pEnabled);
+extern ret_t rtl8367c_setAsicAclUnmatchedPermit(rtk_uint32 port, rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicAclUnmatchedPermit(rtk_uint32 port, rtk_uint32* pEnabled);
+extern ret_t rtl8367c_setAsicAclRule(rtk_uint32 index, rtl8367c_aclrule *pAclRule);
+extern ret_t rtl8367c_getAsicAclRule(rtk_uint32 index, rtl8367c_aclrule *pAclRule);
+extern ret_t rtl8367c_setAsicAclNot(rtk_uint32 index, rtk_uint32 not);
+extern ret_t rtl8367c_getAsicAclNot(rtk_uint32 index, rtk_uint32* pNot);
+extern ret_t rtl8367c_setAsicAclTemplate(rtk_uint32 index, rtl8367c_acltemplate_t* pAclType);
+extern ret_t rtl8367c_getAsicAclTemplate(rtk_uint32 index, rtl8367c_acltemplate_t *pAclType);
+extern ret_t rtl8367c_setAsicAclAct(rtk_uint32 index, rtl8367c_acl_act_t* pAclAct);
+extern ret_t rtl8367c_getAsicAclAct(rtk_uint32 index, rtl8367c_acl_act_t *pAclAct);
+extern ret_t rtl8367c_setAsicAclActCtrl(rtk_uint32 index, rtk_uint32 aclActCtrl);
+extern ret_t rtl8367c_getAsicAclActCtrl(rtk_uint32 index, rtk_uint32 *aclActCtrl);
+extern ret_t rtl8367c_setAsicAclPortRange(rtk_uint32 index, rtk_uint32 type, rtk_uint32 upperPort, rtk_uint32 lowerPort);
+extern ret_t rtl8367c_getAsicAclPortRange(rtk_uint32 index, rtk_uint32* pType, rtk_uint32* pUpperPort, rtk_uint32* pLowerPort);
+extern ret_t rtl8367c_setAsicAclVidRange(rtk_uint32 index, rtk_uint32 type, rtk_uint32 upperVid, rtk_uint32 lowerVid);
+extern ret_t rtl8367c_getAsicAclVidRange(rtk_uint32 index, rtk_uint32* pType, rtk_uint32* pUpperVid, rtk_uint32* pLowerVid);
+extern ret_t rtl8367c_setAsicAclIpRange(rtk_uint32 index, rtk_uint32 type, ipaddr_t upperIp, ipaddr_t lowerIp);
+extern ret_t rtl8367c_getAsicAclIpRange(rtk_uint32 index, rtk_uint32* pType, ipaddr_t* pUpperIp, ipaddr_t* pLowerIp);
+extern ret_t rtl8367c_setAsicAclGpioPolarity(rtk_uint32 polarity);
+extern ret_t rtl8367c_getAsicAclGpioPolarity(rtk_uint32* pPolarity);
+
+#endif /*_RTL8367C_ASICDRV_ACL_H_*/
+
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv.c	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,641 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 76306 $
+ * $Date: 2017-03-08 15:13:58 +0800 (é€±ä¸‰, 08 ä¸‰æœˆ 2017) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature :
+ *
+ */
+
+#include <rtl8367c_asicdrv.h>
+
+#if defined(RTK_X86_ASICDRV)
+#include <I2Clib.h>
+#else
+#include <smi.h>
+#endif
+
+/*for driver verify testing only*/
+#ifdef CONFIG_RTL8367C_ASICDRV_TEST
+#define CLE_VIRTUAL_REG_SIZE        0x10000
+rtk_uint16 CleVirtualReg[CLE_VIRTUAL_REG_SIZE];
+#endif
+
+#if defined(CONFIG_RTL865X_CLE) || defined (RTK_X86_CLE)
+rtk_uint32 cleDebuggingDisplay;
+#endif
+
+#ifdef EMBEDDED_SUPPORT
+extern void setReg(rtk_uint16, rtk_uint16);
+extern rtk_uint16 getReg(rtk_uint16);
+#endif
+
+/* Function Name:
+ *      rtl8367c_setAsicRegBit
+ * Description:
+ *      Set a bit value of a specified register
+ * Input:
+ *      reg     - register's address
+ *      bit     - bit location
+ *      value   - value to set. It can be value 0 or 1.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_INPUT    - Invalid input parameter
+ * Note:
+ *      Set a bit of a specified register to 1 or 0.
+ */
+ret_t rtl8367c_setAsicRegBit(rtk_uint32 reg, rtk_uint32 bit, rtk_uint32 value)
+{
+
+#if defined(RTK_X86_ASICDRV)
+    rtk_uint32 regData;
+    ret_t retVal;
+
+    if(bit >= RTL8367C_REGBITLENGTH)
+        return RT_ERR_INPUT;
+
+    retVal = Access_Read(reg, 2, &regData);
+    if(TRUE != retVal)
+        return RT_ERR_SMI;
+
+    if(0x8367B == cleDebuggingDisplay)
+        PRINT("R[0x%4.4x]=0x%4.4x\n", reg, regData);
+
+    if(value)
+        regData = regData | (1 << bit);
+    else
+        regData = regData & (~(1 << bit));
+
+    retVal = Access_Write(reg,2, regData);
+    if(TRUE != retVal)
+        return RT_ERR_SMI;
+
+    if(0x8367B == cleDebuggingDisplay)
+        PRINT("W[0x%4.4x]=0x%4.4x\n", reg, regData);
+
+
+#elif defined(CONFIG_RTL8367C_ASICDRV_TEST)
+
+    if(bit >= RTL8367C_REGBITLENGTH)
+        return RT_ERR_INPUT;
+
+    else if(reg >= CLE_VIRTUAL_REG_SIZE)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if(value)
+    {
+        CleVirtualReg[reg] =  CleVirtualReg[reg] | (1 << bit);
+    }
+    else
+    {
+        CleVirtualReg[reg] =  CleVirtualReg[reg] & (~(1 << bit));
+    }
+
+    if(0x8367B == cleDebuggingDisplay)
+        PRINT("W[0x%4.4x]=0x%4.4x\n", reg, CleVirtualReg[reg]);
+
+#elif defined(EMBEDDED_SUPPORT)
+    rtk_uint16 tmp;
+
+    if(reg > RTL8367C_REGDATAMAX || value > 1)
+        return RT_ERR_INPUT;
+
+    tmp = getReg(reg);
+    tmp &= (1 << bitIdx);
+    tmp |= (value << bitIdx);
+    setReg(reg, tmp);
+
+#else
+    rtk_uint32 regData;
+    ret_t retVal;
+
+    if(bit >= RTL8367C_REGBITLENGTH)
+        return RT_ERR_INPUT;
+
+    retVal = smi_read(reg, &regData);
+    if(retVal != RT_ERR_OK)
+        return RT_ERR_SMI;
+
+  #ifdef CONFIG_RTL865X_CLE
+    if(0x8367B == cleDebuggingDisplay)
+        PRINT("R[0x%4.4x]=0x%4.4x\n", reg, regData);
+  #endif
+    if(value)
+        regData = regData | (1 << bit);
+    else
+        regData = regData & (~(1 << bit));
+
+    retVal = smi_write(reg, regData);
+    if(retVal != RT_ERR_OK)
+        return RT_ERR_SMI;
+
+  #ifdef CONFIG_RTL865X_CLE
+    if(0x8367B == cleDebuggingDisplay)
+        PRINT("W[0x%4.4x]=0x%4.4x\n", reg, regData);
+  #endif
+
+#endif
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicRegBit
+ * Description:
+ *      Get a bit value of a specified register
+ * Input:
+ *      reg     - register's address
+ *      bit     - bit location
+ *      value   - value to get.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_INPUT    - Invalid input parameter
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicRegBit(rtk_uint32 reg, rtk_uint32 bit, rtk_uint32 *pValue)
+{
+
+#if defined(RTK_X86_ASICDRV)
+
+    rtk_uint32 regData;
+    ret_t retVal;
+
+    if(bit >= RTL8367C_REGBITLENGTH)
+        return RT_ERR_INPUT;
+
+    retVal = Access_Read(reg, 2, &regData);
+    if(TRUE != retVal)
+        return RT_ERR_SMI;
+
+    *pValue = (regData & (0x1 << bit)) >> bit;
+
+    if(0x8367B == cleDebuggingDisplay)
+        PRINT("R[0x%4.4x]=0x%4.4x\n", reg, regData);
+
+#elif defined(CONFIG_RTL8367C_ASICDRV_TEST)
+
+    if(bit >= RTL8367C_REGBITLENGTH)
+        return RT_ERR_INPUT;
+
+    if(reg >= CLE_VIRTUAL_REG_SIZE)
+        return RT_ERR_OUT_OF_RANGE;
+
+    *pValue = (CleVirtualReg[reg] & (0x1 << bit)) >> bit;
+
+    if(0x8367B == cleDebuggingDisplay)
+        PRINT("R[0x%4.4x]=0x%4.4x\n", reg, CleVirtualReg[reg]);
+
+#elif defined(EMBEDDED_SUPPORT)
+    rtk_uint16 tmp;
+
+    if(reg > RTL8367C_REGDATAMAX )
+        return RT_ERR_INPUT;
+
+    tmp = getReg(reg);
+    tmp = tmp >> bitIdx;
+    tmp &= 1;
+    *value = tmp;
+#else
+    rtk_uint32 regData;
+    ret_t retVal;
+
+    retVal = smi_read(reg, &regData);
+    if(retVal != RT_ERR_OK)
+        return RT_ERR_SMI;
+
+  #ifdef CONFIG_RTL865X_CLE
+    if(0x8367B == cleDebuggingDisplay)
+        PRINT("R[0x%4.4x]=0x%4.4x\n", reg, regData);
+  #endif
+
+    *pValue = (regData & (0x1 << bit)) >> bit;
+
+#endif
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicRegBits
+ * Description:
+ *      Set bits value of a specified register
+ * Input:
+ *      reg     - register's address
+ *      bits    - bits mask for setting
+ *      value   - bits value for setting
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_INPUT    - Invalid input parameter
+ * Note:
+ *      Set bits of a specified register to value. Both bits and value are be treated as bit-mask
+ */
+ret_t rtl8367c_setAsicRegBits(rtk_uint32 reg, rtk_uint32 bits, rtk_uint32 value)
+{
+
+#if defined(RTK_X86_ASICDRV)
+
+    rtk_uint32 regData;
+    ret_t retVal;
+    rtk_uint32 bitsShift;
+    rtk_uint32 valueShifted;
+
+    if(bits >= (1 << RTL8367C_REGBITLENGTH) )
+        return RT_ERR_INPUT;
+
+    bitsShift = 0;
+    while(!(bits & (1 << bitsShift)))
+    {
+        bitsShift++;
+        if(bitsShift >= RTL8367C_REGBITLENGTH)
+            return RT_ERR_INPUT;
+    }
+
+    valueShifted = value << bitsShift;
+    if(valueShifted > RTL8367C_REGDATAMAX)
+        return RT_ERR_INPUT;
+
+    retVal = Access_Read(reg, 2, &regData);
+    if(TRUE != retVal)
+        return RT_ERR_SMI;
+
+    if(0x8367B == cleDebuggingDisplay)
+        PRINT("R[0x%4.4x]=0x%4.4x\n", reg, regData);
+
+    regData = regData & (~bits);
+    regData = regData | (valueShifted & bits);
+
+    retVal = Access_Write(reg,2, regData);
+    if(TRUE != retVal)
+        return RT_ERR_SMI;
+
+    if(0x8367B == cleDebuggingDisplay)
+        PRINT("W[0x%4.4x]=0x%4.4x\n", reg, regData);
+
+#elif defined(CONFIG_RTL8367C_ASICDRV_TEST)
+    rtk_uint32 regData;
+    rtk_uint32 bitsShift;
+    rtk_uint32 valueShifted;
+
+    if(bits >= (1 << RTL8367C_REGBITLENGTH) )
+        return RT_ERR_INPUT;
+
+    bitsShift = 0;
+    while(!(bits & (1 << bitsShift)))
+    {
+        bitsShift++;
+        if(bitsShift >= RTL8367C_REGBITLENGTH)
+            return RT_ERR_INPUT;
+    }
+    valueShifted = value << bitsShift;
+
+    if(valueShifted > RTL8367C_REGDATAMAX)
+        return RT_ERR_INPUT;
+
+    if(reg >= CLE_VIRTUAL_REG_SIZE)
+        return RT_ERR_OUT_OF_RANGE;
+
+    regData = CleVirtualReg[reg] & (~bits);
+    regData = regData | (valueShifted & bits);
+
+    CleVirtualReg[reg] = regData;
+
+    if(0x8367B == cleDebuggingDisplay)
+        PRINT("W[0x%4.4x]=0x%4.4x\n", reg, regData);
+
+#elif defined(EMBEDDED_SUPPORT)
+    rtk_uint32 regData;
+    rtk_uint32 bitsShift;
+    rtk_uint32 valueShifted;
+
+    if(reg > RTL8367C_REGDATAMAX )
+        return RT_ERR_INPUT;
+
+    if(bits >= (1 << RTL8367C_REGBITLENGTH) )
+        return RT_ERR_INPUT;
+
+    bitsShift = 0;
+    while(!(bits & (1 << bitsShift)))
+    {
+        bitsShift++;
+        if(bitsShift >= RTL8367C_REGBITLENGTH)
+            return RT_ERR_INPUT;
+    }
+
+    valueShifted = value << bitsShift;
+    if(valueShifted > RTL8367C_REGDATAMAX)
+        return RT_ERR_INPUT;
+
+    regData = getReg(reg);
+    regData = regData & (~bits);
+    regData = regData | (valueShifted & bits);
+
+    setReg(reg, regData);
+
+#else
+    rtk_uint32 regData;
+    ret_t retVal;
+    rtk_uint32 bitsShift;
+    rtk_uint32 valueShifted;
+
+    if(bits >= (1 << RTL8367C_REGBITLENGTH) )
+        return RT_ERR_INPUT;
+
+    bitsShift = 0;
+    while(!(bits & (1 << bitsShift)))
+    {
+        bitsShift++;
+        if(bitsShift >= RTL8367C_REGBITLENGTH)
+            return RT_ERR_INPUT;
+    }
+    valueShifted = value << bitsShift;
+
+    if(valueShifted > RTL8367C_REGDATAMAX)
+        return RT_ERR_INPUT;
+
+    retVal = smi_read(reg, &regData);
+    if(retVal != RT_ERR_OK)
+        return RT_ERR_SMI;
+  #ifdef CONFIG_RTL865X_CLE
+    if(0x8367B == cleDebuggingDisplay)
+        PRINT("R[0x%4.4x]=0x%4.4x\n", reg, regData);
+  #endif
+
+    regData = regData & (~bits);
+    regData = regData | (valueShifted & bits);
+
+    retVal = smi_write(reg, regData);
+    if(retVal != RT_ERR_OK)
+        return RT_ERR_SMI;
+  #ifdef CONFIG_RTL865X_CLE
+    if(0x8367B == cleDebuggingDisplay)
+        PRINT("W[0x%4.4x]=0x%4.4x\n", reg, regData);
+  #endif
+#endif
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicRegBits
+ * Description:
+ *      Get bits value of a specified register
+ * Input:
+ *      reg     - register's address
+ *      bits    - bits mask for setting
+ *      value   - bits value for setting
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_INPUT    - Invalid input parameter
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicRegBits(rtk_uint32 reg, rtk_uint32 bits, rtk_uint32 *pValue)
+{
+
+#if defined(RTK_X86_ASICDRV)
+
+    rtk_uint32 regData;
+    ret_t retVal;
+    rtk_uint32 bitsShift;
+
+    if(bits >= (1 << RTL8367C_REGBITLENGTH) )
+        return RT_ERR_INPUT;
+
+    bitsShift = 0;
+    while(!(bits & (1 << bitsShift)))
+    {
+        bitsShift++;
+        if(bitsShift >= RTL8367C_REGBITLENGTH)
+            return RT_ERR_INPUT;
+    }
+
+    retVal = Access_Read(reg, 2, &regData);
+    if(TRUE != retVal)
+        return RT_ERR_SMI;
+
+    *pValue = (regData & bits) >> bitsShift;
+
+    if(0x8367B == cleDebuggingDisplay)
+        PRINT("R[0x%4.4x]=0x%4.4x\n", reg, regData);
+
+#elif defined(CONFIG_RTL8367C_ASICDRV_TEST)
+    rtk_uint32 bitsShift;
+
+    if(bits >= (1 << RTL8367C_REGBITLENGTH) )
+        return RT_ERR_INPUT;
+
+    bitsShift = 0;
+    while(!(bits & (1 << bitsShift)))
+    {
+        bitsShift++;
+        if(bitsShift >= RTL8367C_REGBITLENGTH)
+            return RT_ERR_INPUT;
+    }
+
+    if(reg >= CLE_VIRTUAL_REG_SIZE)
+        return RT_ERR_OUT_OF_RANGE;
+
+     *pValue = (CleVirtualReg[reg] & bits) >> bitsShift;
+
+    if(0x8367B == cleDebuggingDisplay)
+        PRINT("R[0x%4.4x]=0x%4.4x\n", reg, CleVirtualReg[reg]);
+
+#elif defined(EMBEDDED_SUPPORT)
+    rtk_uint32 regData;
+    rtk_uint32 bitsShift;
+
+    if(reg > RTL8367C_REGDATAMAX )
+        return RT_ERR_INPUT;
+
+    if(bits >= (1UL << RTL8367C_REGBITLENGTH) )
+        return RT_ERR_INPUT;
+
+    bitsShift = 0;
+    while(!(bits & (1UL << bitsShift)))
+    {
+        bitsShift++;
+        if(bitsShift >= RTL8367C_REGBITLENGTH)
+            return RT_ERR_INPUT;
+    }
+
+    regData = getReg(reg);
+    *value = (regData & bits) >> bitsShift;
+
+#else
+    rtk_uint32 regData;
+    ret_t retVal;
+    rtk_uint32 bitsShift;
+
+    if(bits>= (1<<RTL8367C_REGBITLENGTH) )
+        return RT_ERR_INPUT;
+
+    bitsShift = 0;
+    while(!(bits & (1 << bitsShift)))
+    {
+        bitsShift++;
+        if(bitsShift >= RTL8367C_REGBITLENGTH)
+            return RT_ERR_INPUT;
+    }
+
+    retVal = smi_read(reg, &regData);
+    if(retVal != RT_ERR_OK) return RT_ERR_SMI;
+
+    *pValue = (regData & bits) >> bitsShift;
+  #ifdef CONFIG_RTL865X_CLE
+    if(0x8367B == cleDebuggingDisplay)
+        PRINT("R[0x%4.4x]=0x%4.4x\n",reg, regData);
+  #endif
+
+#endif
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicReg
+ * Description:
+ *      Set content of asic register
+ * Input:
+ *      reg     - register's address
+ *      value   - Value setting to register
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      The value will be set to ASIC mapping address only and it is always return RT_ERR_OK while setting un-mapping address registers
+ */
+ret_t rtl8367c_setAsicReg(rtk_uint32 reg, rtk_uint32 value)
+{
+#if defined(RTK_X86_ASICDRV)/*RTK-CNSD2-NickWu-20061222: for x86 compile*/
+
+    ret_t retVal;
+
+    retVal = Access_Write(reg,2,value);
+    if(TRUE != retVal) return RT_ERR_SMI;
+
+    if(0x8367B == cleDebuggingDisplay)
+        PRINT("W[0x%4.4x]=0x%4.4x\n",reg,value);
+
+#elif defined(CONFIG_RTL8367C_ASICDRV_TEST)
+
+    /*MIBs emulating*/
+    if(reg == RTL8367C_REG_MIB_ADDRESS)
+    {
+        CleVirtualReg[RTL8367C_MIB_COUNTER_BASE_REG] = 0x1;
+        CleVirtualReg[RTL8367C_MIB_COUNTER_BASE_REG+1] = 0x2;
+        CleVirtualReg[RTL8367C_MIB_COUNTER_BASE_REG+2] = 0x3;
+        CleVirtualReg[RTL8367C_MIB_COUNTER_BASE_REG+3] = 0x4;
+    }
+
+    if(reg >= CLE_VIRTUAL_REG_SIZE)
+        return RT_ERR_OUT_OF_RANGE;
+
+    CleVirtualReg[reg] = value;
+
+    if(0x8367B == cleDebuggingDisplay)
+        PRINT("W[0x%4.4x]=0x%4.4x\n",reg,CleVirtualReg[reg]);
+
+#elif defined(EMBEDDED_SUPPORT)
+    if(reg > RTL8367C_REGDATAMAX || value > RTL8367C_REGDATAMAX )
+        return RT_ERR_INPUT;
+
+    setReg(reg, value);
+
+#else
+    ret_t retVal;
+
+    retVal = smi_write(reg, value);
+    if(retVal != RT_ERR_OK)
+        return RT_ERR_SMI;
+  #ifdef CONFIG_RTL865X_CLE
+    if(0x8367B == cleDebuggingDisplay)
+        PRINT("W[0x%4.4x]=0x%4.4x\n",reg,value);
+  #endif
+
+#endif
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicReg
+ * Description:
+ *      Get content of asic register
+ * Input:
+ *      reg     - register's address
+ *      value   - Value setting to register
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      Value 0x0000 will be returned for ASIC un-mapping address
+ */
+ret_t rtl8367c_getAsicReg(rtk_uint32 reg, rtk_uint32 *pValue)
+{
+
+#if defined(RTK_X86_ASICDRV)
+
+    rtk_uint32 regData;
+    ret_t retVal;
+
+    retVal = Access_Read(reg, 2, &regData);
+    if(TRUE != retVal)
+        return RT_ERR_SMI;
+
+    *pValue = regData;
+
+    if(0x8367B == cleDebuggingDisplay)
+        PRINT("R[0x%4.4x]=0x%4.4x\n", reg, regData);
+
+#elif defined(CONFIG_RTL8367C_ASICDRV_TEST)
+    if(reg >= CLE_VIRTUAL_REG_SIZE)
+        return RT_ERR_OUT_OF_RANGE;
+
+    *pValue = CleVirtualReg[reg];
+
+    if(0x8367B == cleDebuggingDisplay)
+        PRINT("R[0x%4.4x]=0x%4.4x\n", reg, CleVirtualReg[reg]);
+
+#elif defined(EMBEDDED_SUPPORT)
+    if(reg > RTL8367C_REGDATAMAX  )
+        return RT_ERR_INPUT;
+
+    *value = getReg(reg);
+
+#else
+    rtk_uint32 regData;
+    ret_t retVal;
+
+    retVal = smi_read(reg, &regData);
+    if(retVal != RT_ERR_OK)
+        return RT_ERR_SMI;
+
+    *pValue = regData;
+  #ifdef CONFIG_RTL865X_CLE
+    if(0x8367B == cleDebuggingDisplay)
+        PRINT("R[0x%4.4x]=0x%4.4x\n", reg, regData);
+  #endif
+
+#endif
+
+    return RT_ERR_OK;
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_cputag.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_cputag.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_cputag.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_cputag.c	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,371 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 76306 $
+ * $Date: 2017-03-08 15:13:58 +0800 (é€±ä¸‰, 08 ä¸‰æœˆ 2017) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature : Proprietary CPU-tag related function drivers
+ *
+ */
+#include <rtl8367c_asicdrv_cputag.h>
+/* Function Name:
+ *      rtl8367c_setAsicCputagEnable
+ * Description:
+ *      Set cpu tag function enable/disable
+ * Input:
+ *      enabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_ENABLE   - Invalid enable/disable input
+ * Note:
+ *      If CPU tag function is disabled, CPU tag will not be added to frame
+ *      forwarded to CPU port, and all ports cannot parse CPU tag.
+ */
+ret_t rtl8367c_setAsicCputagEnable(rtk_uint32 enabled)
+{
+    if(enabled > 1)
+        return RT_ERR_ENABLE;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_CPU_CTRL, RTL8367C_CPU_EN_OFFSET, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicCputagEnable
+ * Description:
+ *      Get cpu tag function enable/disable
+ * Input:
+ *      pEnabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicCputagEnable(rtk_uint32 *pEnabled)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_CPU_CTRL, RTL8367C_CPU_EN_OFFSET, pEnabled);
+}
+/* Function Name:
+ *      rtl8367c_setAsicCputagTrapPort
+ * Description:
+ *      Set cpu tag trap port
+ * Input:
+ *      port - port number
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *     API can set destination port of trapping frame
+ */
+ret_t rtl8367c_setAsicCputagTrapPort(rtk_uint32 port)
+{
+    ret_t retVal;
+
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_CPU_CTRL, RTL8367C_CPU_TRAP_PORT_MASK, port & 7);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_CPU_CTRL, RTL8367C_CPU_TRAP_PORT_EXT_MASK, (port>>3) & 1);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicCputagTrapPort
+ * Description:
+ *      Get cpu tag trap port
+ * Input:
+ *      pPort - port number
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *     None
+ */
+ret_t rtl8367c_getAsicCputagTrapPort(rtk_uint32 *pPort)
+{
+    ret_t retVal;
+    rtk_uint32 tmpPort;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_CPU_CTRL, RTL8367C_CPU_TRAP_PORT_MASK, &tmpPort);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+    *pPort = tmpPort;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_CPU_CTRL, RTL8367C_CPU_TRAP_PORT_EXT_MASK, &tmpPort);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+    *pPort |= (tmpPort & 1) << 3;
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicCputagPortmask
+ * Description:
+ *      Set ports that can parse CPU tag
+ * Input:
+ *      portmask - port mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_MASK    - Invalid portmask
+ * Note:
+ *     None
+ */
+ret_t rtl8367c_setAsicCputagPortmask(rtk_uint32 portmask)
+{
+    if(portmask > RTL8367C_PORTMASK)
+        return RT_ERR_PORT_MASK;
+
+    return rtl8367c_setAsicReg(RTL8367C_CPU_PORT_MASK_REG, portmask);
+}
+/* Function Name:
+ *      rtl8367c_getAsicCputagPortmask
+ * Description:
+ *      Get ports that can parse CPU tag
+ * Input:
+ *      pPortmask - port mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *     None
+ */
+ret_t rtl8367c_getAsicCputagPortmask(rtk_uint32 *pPortmask)
+{
+    return rtl8367c_getAsicReg(RTL8367C_CPU_PORT_MASK_REG, pPortmask);
+}
+/* Function Name:
+ *      rtl8367c_setAsicCputagInsertMode
+ * Description:
+ *      Set CPU-tag insert mode
+ * Input:
+ *      mode - 0: insert to all packets; 1: insert to trapped packets; 2: don't insert
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_NOT_ALLOWED  - Actions not allowed by the function
+ * Note:
+ *     None
+ */
+ret_t rtl8367c_setAsicCputagInsertMode(rtk_uint32 mode)
+{
+    if(mode >= CPUTAG_INSERT_END)
+        return RT_ERR_NOT_ALLOWED;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_CPU_CTRL, RTL8367C_CPU_INSERTMODE_MASK, mode);
+}
+/* Function Name:
+ *      rtl8367c_getAsicCputagInsertMode
+ * Description:
+ *      Get CPU-tag insert mode
+ * Input:
+ *      pMode - 0: insert to all packets; 1: insert to trapped packets; 2: don't insert
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *     None
+ */
+ret_t rtl8367c_getAsicCputagInsertMode(rtk_uint32 *pMode)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_CPU_CTRL, RTL8367C_CPU_INSERTMODE_MASK, pMode);
+}
+/* Function Name:
+ *      rtl8367c_setAsicCputagPriorityRemapping
+ * Description:
+ *      Set queue assignment of CPU port
+ * Input:
+ *      srcPri - internal priority (0~7)
+ *      newPri - internal priority after remapping (0~7)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - Success
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_QOS_INT_PRIORITY     - Invalid priority
+ * Note:
+ *     None
+ */
+ret_t rtl8367c_setAsicCputagPriorityRemapping(rtk_uint32 srcPri, rtk_uint32 newPri)
+{
+    if((srcPri > RTL8367C_PRIMAX) || (newPri > RTL8367C_PRIMAX))
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_QOS_PRIPORITY_REMAPPING_IN_CPU_REG(srcPri), RTL8367C_QOS_PRIPORITY_REMAPPING_IN_CPU_MASK(srcPri), newPri);
+}
+/* Function Name:
+ *      rtl8367c_getAsicCputagPriorityRemapping
+ * Description:
+ *      Get queue assignment of CPU port
+ * Input:
+ *      srcPri - internal priority (0~7)
+ *      pNewPri - internal priority after remapping (0~7)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - Success
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_QOS_INT_PRIORITY     - Invalid priority
+ * Note:
+ *     None
+ */
+ret_t rtl8367c_getAsicCputagPriorityRemapping(rtk_uint32 srcPri, rtk_uint32 *pNewPri)
+{
+    if(srcPri > RTL8367C_PRIMAX)
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    return rtl8367c_getAsicRegBits(RTL8367C_QOS_PRIPORITY_REMAPPING_IN_CPU_REG(srcPri), RTL8367C_QOS_PRIPORITY_REMAPPING_IN_CPU_MASK(srcPri), pNewPri);
+}
+/* Function Name:
+ *      rtl8367c_setAsicCputagPosition
+ * Description:
+ *      Set cpu tag insert position
+ * Input:
+ *      postion - 1: After entire packet(before CRC field), 0: After MAC_SA (Default)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - Success
+ *      RT_ERR_SMI                  - SMI access error
+ * Note:
+ *     None
+ */
+ret_t rtl8367c_setAsicCputagPosition(rtk_uint32 postion)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_CPU_CTRL, RTL8367C_CPU_TAG_POSITION_OFFSET, postion);
+}
+/* Function Name:
+ *      rtl8367c_getAsicCputagPosition
+ * Description:
+ *      Get cpu tag insert position
+ * Input:
+ *      pPostion - 1: After entire packet(before CRC field), 0: After MAC_SA (Default)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - Success
+ *      RT_ERR_SMI                  - SMI access error
+ * Note:
+ *     None
+ */
+ret_t rtl8367c_getAsicCputagPosition(rtk_uint32* pPostion)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_CPU_CTRL, RTL8367C_CPU_TAG_POSITION_OFFSET, pPostion);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicCputagMode
+ * Description:
+ *      Set cpu tag mode
+ * Input:
+ *      mode - 1: 4bytes mode, 0: 8bytes mode
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_INPUT    - Invalid input parameters
+ * Note:
+ *      If CPU tag function is disabled, CPU tag will not be added to frame
+ *      forwarded to CPU port, and all ports cannot parse CPU tag.
+ */
+ret_t rtl8367c_setAsicCputagMode(rtk_uint32 mode)
+{
+    if(mode > 1)
+        return RT_ERR_INPUT;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_CPU_CTRL, RTL8367C_CPU_TAG_FORMAT_OFFSET, mode);
+}
+/* Function Name:
+ *      rtl8367c_getAsicCputagMode
+ * Description:
+ *      Get cpu tag mode
+ * Input:
+ *      pMode - 1: 4bytes mode, 0: 8bytes mode
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicCputagMode(rtk_uint32 *pMode)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_CPU_CTRL, RTL8367C_CPU_TAG_FORMAT_OFFSET, pMode);
+}
+/* Function Name:
+ *      rtl8367c_setAsicCputagRxMinLength
+ * Description:
+ *      Set cpu tag mode
+ * Input:
+ *      mode - 1: 64bytes, 0: 72bytes
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_INPUT    - Invalid input parameters
+ * Note:
+ *      If CPU tag function is disabled, CPU tag will not be added to frame
+ *      forwarded to CPU port, and all ports cannot parse CPU tag.
+ */
+ret_t rtl8367c_setAsicCputagRxMinLength(rtk_uint32 mode)
+{
+    if(mode > 1)
+        return RT_ERR_INPUT;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_CPU_CTRL, RTL8367C_CPU_TAG_RXBYTECOUNT_OFFSET, mode);
+}
+/* Function Name:
+ *      rtl8367c_getAsicCputagRxMinLength
+ * Description:
+ *      Get cpu tag mode
+ * Input:
+ *      pMode - 1: 64bytes, 0: 72bytes
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicCputagRxMinLength(rtk_uint32 *pMode)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_CPU_CTRL, RTL8367C_CPU_TAG_RXBYTECOUNT_OFFSET, pMode);
+}
+
+
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_cputag.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_cputag.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_cputag.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_cputag.h	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,32 @@
+#ifndef _RTL8367C_ASICDRV_CPUTAG_H_
+#define _RTL8367C_ASICDRV_CPUTAG_H_
+
+#include <rtl8367c_asicdrv.h>
+
+enum CPUTAG_INSERT_MODE
+{
+    CPUTAG_INSERT_TO_ALL = 0,
+    CPUTAG_INSERT_TO_TRAPPING,
+    CPUTAG_INSERT_TO_NO,
+    CPUTAG_INSERT_END
+};
+
+extern ret_t rtl8367c_setAsicCputagEnable(rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicCputagEnable(rtk_uint32 *pEnabled);
+extern ret_t rtl8367c_setAsicCputagTrapPort(rtk_uint32 port);
+extern ret_t rtl8367c_getAsicCputagTrapPort(rtk_uint32 *pPort);
+extern ret_t rtl8367c_setAsicCputagPortmask(rtk_uint32 portmask);
+extern ret_t rtl8367c_getAsicCputagPortmask(rtk_uint32 *pPmsk);
+extern ret_t rtl8367c_setAsicCputagInsertMode(rtk_uint32 mode);
+extern ret_t rtl8367c_getAsicCputagInsertMode(rtk_uint32 *pMode);
+extern ret_t rtl8367c_setAsicCputagPriorityRemapping(rtk_uint32 srcPri, rtk_uint32 newPri);
+extern ret_t rtl8367c_getAsicCputagPriorityRemapping(rtk_uint32 srcPri, rtk_uint32 *pNewPri);
+extern ret_t rtl8367c_setAsicCputagPosition(rtk_uint32 postion);
+extern ret_t rtl8367c_getAsicCputagPosition(rtk_uint32* pPostion);
+extern ret_t rtl8367c_setAsicCputagMode(rtk_uint32 mode);
+extern ret_t rtl8367c_getAsicCputagMode(rtk_uint32 *pMode);
+extern ret_t rtl8367c_setAsicCputagRxMinLength(rtk_uint32 mode);
+extern ret_t rtl8367c_getAsicCputagRxMinLength(rtk_uint32 *pMode);
+
+#endif /*#ifndef _RTL8367C_ASICDRV_CPUTAG_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_dot1x.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_dot1x.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_dot1x.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_dot1x.c	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,417 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 76306 $
+ * $Date: 2017-03-08 15:13:58 +0800 (é€±ä¸‰, 08 ä¸‰æœˆ 2017) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature : 802.1X related functions
+ *
+ */
+#include <rtl8367c_asicdrv_dot1x.h>
+/* Function Name:
+ *      rtl8367c_setAsic1xPBEnConfig
+ * Description:
+ *      Set 802.1x port-based port enable configuration
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      enabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsic1xPBEnConfig(rtk_uint32 port, rtk_uint32 enabled)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_DOT1X_PORT_ENABLE_REG, port, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsic1xPBEnConfig
+ * Description:
+ *      Get 802.1x port-based port enable configuration
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      pEnabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsic1xPBEnConfig(rtk_uint32 port, rtk_uint32 *pEnabled)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_DOT1X_PORT_ENABLE_REG, port, pEnabled);
+}
+/* Function Name:
+ *      rtl8367c_setAsic1xPBAuthConfig
+ * Description:
+ *      Set 802.1x port-based authorised port configuration
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      auth    - 1: authorised, 0: non-authorised
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsic1xPBAuthConfig(rtk_uint32 port, rtk_uint32 auth)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_DOT1X_PORT_AUTH_REG, port, auth);
+}
+/* Function Name:
+ *      rtl8367c_getAsic1xPBAuthConfig
+ * Description:
+ *      Get 802.1x port-based authorised port configuration
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      pAuth   - 1: authorised, 0: non-authorised
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsic1xPBAuthConfig(rtk_uint32 port, rtk_uint32 *pAuth)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_DOT1X_PORT_AUTH_REG, port, pAuth);
+}
+/* Function Name:
+ *      rtl8367c_setAsic1xPBOpdirConfig
+ * Description:
+ *      Set 802.1x port-based operational direction
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      opdir   - Operation direction 1: IN, 0:BOTH
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsic1xPBOpdirConfig(rtk_uint32 port, rtk_uint32 opdir)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_DOT1X_PORT_OPDIR_REG, port, opdir);
+}
+/* Function Name:
+ *      rtl8367c_getAsic1xPBOpdirConfig
+ * Description:
+ *      Get 802.1x port-based operational direction
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      pOpdir  - Operation direction 1: IN, 0:BOTH
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsic1xPBOpdirConfig(rtk_uint32 port, rtk_uint32* pOpdir)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_DOT1X_PORT_OPDIR_REG, port, pOpdir);
+}
+/* Function Name:
+ *      rtl8367c_setAsic1xMBEnConfig
+ * Description:
+ *      Set 802.1x mac-based port enable configuration
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      enabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsic1xMBEnConfig(rtk_uint32 port, rtk_uint32 enabled)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_DOT1X_MAC_ENABLE_REG, port, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsic1xMBEnConfig
+ * Description:
+ *      Get 802.1x mac-based port enable configuration
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      pEnabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsic1xMBEnConfig(rtk_uint32 port, rtk_uint32 *pEnabled)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_DOT1X_MAC_ENABLE_REG, port, pEnabled);
+}
+/* Function Name:
+ *      rtl8367c_setAsic1xMBOpdirConfig
+ * Description:
+ *      Set 802.1x mac-based operational direction
+ * Input:
+ *      opdir       - Operation direction 1: IN, 0:BOTH
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsic1xMBOpdirConfig(rtk_uint32 opdir)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_DOT1X_CFG_REG, RTL8367C_DOT1X_MAC_OPDIR_OFFSET, opdir);
+}
+/* Function Name:
+ *      rtl8367c_getAsic1xMBOpdirConfig
+ * Description:
+ *      Get 802.1x mac-based operational direction
+ * Input:
+ *      pOpdir      - Operation direction 1: IN, 0:BOTH
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsic1xMBOpdirConfig(rtk_uint32 *pOpdir)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_DOT1X_CFG_REG, RTL8367C_DOT1X_MAC_OPDIR_OFFSET, pOpdir);
+}
+/* Function Name:
+ *      rtl8367c_setAsic1xProcConfig
+ * Description:
+ *      Set 802.1x unauth. behavior configuration
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      proc    - 802.1x unauth. behavior configuration 0:drop 1:trap to CPU 2:Guest VLAN
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ *      RT_ERR_DOT1X_PROC   - Unauthorized behavior error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsic1xProcConfig(rtk_uint32 port, rtk_uint32 proc)
+{
+
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    if(proc >= DOT1X_UNAUTH_END)
+        return RT_ERR_DOT1X_PROC;
+
+    if(port < 8)
+    {
+        return rtl8367c_setAsicRegBits(RTL8367C_DOT1X_UNAUTH_ACT_BASE, RTL8367C_DOT1X_UNAUTH_ACT_MASK(port),proc);
+    }
+    else
+    {
+        return rtl8367c_setAsicRegBits(RTL8367C_REG_DOT1X_UNAUTH_ACT_W1, RTL8367C_DOT1X_UNAUTH_ACT_MASK(port),proc);
+    }
+}
+/* Function Name:
+ *      rtl8367c_getAsic1xProcConfig
+ * Description:
+ *      Get 802.1x unauth. behavior configuration
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      pProc   - 802.1x unauth. behavior configuration 0:drop 1:trap to CPU 2:Guest VLAN
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsic1xProcConfig(rtk_uint32 port, rtk_uint32* pProc)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    if(port < 8)
+    return rtl8367c_getAsicRegBits(RTL8367C_DOT1X_UNAUTH_ACT_BASE, RTL8367C_DOT1X_UNAUTH_ACT_MASK(port),pProc);
+    else
+        return rtl8367c_getAsicRegBits(RTL8367C_REG_DOT1X_UNAUTH_ACT_W1, RTL8367C_DOT1X_UNAUTH_ACT_MASK(port),pProc);
+}
+/* Function Name:
+ *      rtl8367c_setAsic1xGuestVidx
+ * Description:
+ *      Set 802.1x guest vlan index
+ * Input:
+ *      index   - 802.1x guest vlan index (0~31)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_DOT1X_GVLANIDX   - Invalid cvid index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsic1xGuestVidx(rtk_uint32 index)
+{
+    if(index >= RTL8367C_CVIDXNO)
+        return RT_ERR_DOT1X_GVLANIDX;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_DOT1X_CFG_REG, RTL8367C_DOT1X_GVIDX_MASK, index);
+}
+/* Function Name:
+ *      rtl8367c_getAsic1xGuestVidx
+ * Description:
+ *      Get 802.1x guest vlan index
+ * Input:
+ *      pIndex  - 802.1x guest vlan index (0~31)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsic1xGuestVidx(rtk_uint32 *pIndex)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_DOT1X_CFG_REG, RTL8367C_DOT1X_GVIDX_MASK, pIndex);
+}
+/* Function Name:
+ *      rtl8367c_setAsic1xGVOpdir
+ * Description:
+ *      Set 802.1x guest vlan talk to auth. DA
+ * Input:
+ *      enabled     - 0:disable 1:enable
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsic1xGVOpdir(rtk_uint32 enabled)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_DOT1X_CFG_REG, RTL8367C_DOT1X_GVOPDIR_OFFSET, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsic1xGVOpdir
+ * Description:
+ *      Get 802.1x guest vlan talk to auth. DA
+ * Input:
+ *      pEnabled        - 0:disable 1:enable
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsic1xGVOpdir(rtk_uint32 *pEnabled)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_DOT1X_CFG_REG, RTL8367C_DOT1X_GVOPDIR_OFFSET, pEnabled);
+}
+/* Function Name:
+ *      rtl8367c_setAsic1xTrapPriority
+ * Description:
+ *      Set 802.1x Trap priority
+ * Input:
+ *      priority    - priority (0~7)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsic1xTrapPriority(rtk_uint32 priority)
+{
+    if(priority > RTL8367C_PRIMAX)
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_QOS_TRAP_PRIORITY0, RTL8367C_DOT1X_PRIORTY_MASK,priority);
+}
+/* Function Name:
+ *      rtl8367c_getAsic1xTrapPriority
+ * Description:
+ *      Get 802.1x Trap priority
+ * Input:
+ *      pPriority   - priority (0~7)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsic1xTrapPriority(rtk_uint32 *pPriority)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_QOS_TRAP_PRIORITY0, RTL8367C_DOT1X_PRIORTY_MASK, pPriority);
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_dot1x.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_dot1x.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_dot1x.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_dot1x.h	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,35 @@
+#ifndef _RTL8367C_ASICDRV_DOT1X_H_
+#define _RTL8367C_ASICDRV_DOT1X_H_
+
+#include <rtl8367c_asicdrv.h>
+
+enum DOT1X_UNAUTH_BEHAV
+{
+    DOT1X_UNAUTH_DROP = 0,
+    DOT1X_UNAUTH_TRAP,
+    DOT1X_UNAUTH_GVLAN,
+    DOT1X_UNAUTH_END
+};
+
+extern ret_t rtl8367c_setAsic1xPBEnConfig(rtk_uint32 port, rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsic1xPBEnConfig(rtk_uint32 port, rtk_uint32 *pEnabled);
+extern ret_t rtl8367c_setAsic1xPBAuthConfig(rtk_uint32 port, rtk_uint32 auth);
+extern ret_t rtl8367c_getAsic1xPBAuthConfig(rtk_uint32 port, rtk_uint32 *pAuth);
+extern ret_t rtl8367c_setAsic1xPBOpdirConfig(rtk_uint32 port, rtk_uint32 opdir);
+extern ret_t rtl8367c_getAsic1xPBOpdirConfig(rtk_uint32 port, rtk_uint32 *pOpdir);
+extern ret_t rtl8367c_setAsic1xMBEnConfig(rtk_uint32 port, rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsic1xMBEnConfig(rtk_uint32 port, rtk_uint32 *pEnabled);
+extern ret_t rtl8367c_setAsic1xMBOpdirConfig(rtk_uint32 opdir);
+extern ret_t rtl8367c_getAsic1xMBOpdirConfig(rtk_uint32 *pOpdir);
+extern ret_t rtl8367c_setAsic1xProcConfig(rtk_uint32 port, rtk_uint32 proc);
+extern ret_t rtl8367c_getAsic1xProcConfig(rtk_uint32 port, rtk_uint32 *pProc);
+extern ret_t rtl8367c_setAsic1xGuestVidx(rtk_uint32 index);
+extern ret_t rtl8367c_getAsic1xGuestVidx(rtk_uint32 *pIndex);
+extern ret_t rtl8367c_setAsic1xGVOpdir(rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsic1xGVOpdir(rtk_uint32 *pEnabled);
+extern ret_t rtl8367c_setAsic1xTrapPriority(rtk_uint32 priority);
+extern ret_t rtl8367c_getAsic1xTrapPriority(rtk_uint32 *pPriority);
+
+
+#endif /*_RTL8367C_ASICDRV_DOT1X_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_eav.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_eav.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_eav.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_eav.c	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,871 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 79623 $
+ * $Date: 2017-06-14 17:15:42 +0800 (é€±ä¸‰, 14 å…­æœˆ 2017) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature : Ethernet AV related functions
+ *
+ */
+
+#include <rtl8367c_asicdrv_eav.h>
+/* Function Name:
+ *      rtl8367c_setAsicEavMacAddress
+ * Description:
+ *      Set PTP MAC address
+ * Input:
+ *      mac     - PTP mac
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK         - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicEavMacAddress(ether_addr_t mac)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+    rtk_uint8 *accessPtr;
+    rtk_uint32 i;
+
+    accessPtr =  (rtk_uint8*)&mac;
+
+    for(i = 0; i <=2; i++)
+    {
+        regData = (*(accessPtr + (i*2)) << 8) | *(accessPtr + (i*2) + 1);
+        retVal = rtl8367c_setAsicReg(RTL8367C_REG_MAC_ADDR_H - i, regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicEavMacAddress
+ * Description:
+ *      Get PTP MAC address
+ * Input:
+ *      None
+ * Output:
+ *      pMac     - PTP  mac
+ * Return:
+ *      RT_ERR_OK         - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicEavMacAddress(ether_addr_t *pMac)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+    rtk_uint8 *accessPtr;
+    rtk_uint32 i;
+
+    accessPtr = (rtk_uint8*)pMac;
+
+    for(i = 0; i <= 2; i++)
+    {
+        retVal = rtl8367c_getAsicReg(RTL8367C_REG_MAC_ADDR_H - i, &regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        *accessPtr = (regData & 0xFF00) >> 8;
+        accessPtr ++;
+        *accessPtr = regData & 0xFF;
+        accessPtr ++;
+    }
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicEavTpid
+ * Description:
+ *      Set PTP parser tag TPID.
+ * Input:
+ *       outerTag - outter tag TPID
+ *       innerTag  - inner tag TPID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *     None
+ */
+ret_t rtl8367c_setAsicEavTpid(rtk_uint32 outerTag, rtk_uint32 innerTag)
+{
+    ret_t retVal;
+
+    if((retVal = rtl8367c_setAsicReg(RTL8367C_REG_OTAG_TPID, outerTag)) != RT_ERR_OK)
+        return retVal;
+    if((retVal = rtl8367c_setAsicReg(RTL8367C_REG_ITAG_TPID, innerTag)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicEavTpid
+ * Description:
+ *      Get PTP parser tag TPID.
+ * Input:
+ *      None
+ * Output:
+ *       pOuterTag - outter tag TPID
+ *       pInnerTag  - inner tag TPID
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicEavTpid(rtk_uint32* pOuterTag, rtk_uint32* pInnerTag)
+{
+    ret_t retVal;
+
+    if((retVal = rtl8367c_getAsicReg(RTL8367C_REG_OTAG_TPID, pOuterTag)) != RT_ERR_OK)
+        return retVal;
+    if((retVal = rtl8367c_getAsicReg(RTL8367C_REG_ITAG_TPID, pInnerTag)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicEavSysTime
+ * Description:
+ *      Set PTP system time
+ * Input:
+ *      second - seconds
+ *      nanoSecond - nano seconds
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK     - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      The time granuality is 8 nano seconds.
+ */
+ret_t rtl8367c_setAsicEavSysTime(rtk_uint32 second, rtk_uint32 nanoSecond)
+{
+    ret_t retVal;
+    rtk_uint32 sec_h, sec_l, nsec8_h, nsec8_l;
+    rtk_uint32 nano_second_8;
+    rtk_uint32 regData, busyFlag, count;
+
+    if(nanoSecond > RTL8367C_EAV_NANOSECONDMAX)
+        return RT_ERR_INPUT;
+
+    regData = 0;
+    sec_h = second >>16;
+    sec_l = second & 0xFFFF;
+    nano_second_8 = nanoSecond >> 3;
+    nsec8_h = (nano_second_8 >>16) & RTL8367C_PTP_TIME_NSEC_H_NSEC_MASK;
+    nsec8_l = nano_second_8 &0xFFFF;
+
+    if((retVal = rtl8367c_setAsicReg(RTL8367C_REG_PTP_TIME_SEC_H_SEC, sec_h)) != RT_ERR_OK)
+        return retVal;
+    if((retVal = rtl8367c_setAsicReg(RTL8367C_REG_PTP_TIME_SEC_L_SEC, sec_l)) != RT_ERR_OK)
+        return retVal;
+    if((retVal = rtl8367c_setAsicReg(RTL8367C_REG_PTP_TIME_NSEC_L_NSEC, nsec8_l)) != RT_ERR_OK)
+        return retVal;
+
+    regData = nsec8_h | (PTP_TIME_WRITE<<RTL8367C_PTP_TIME_NSEC_H_CMD_OFFSET) | RTL8367C_PTP_TIME_NSEC_H_EXEC_MASK;
+
+    if((retVal = rtl8367c_setAsicReg(RTL8367C_REG_PTP_TIME_NSEC_H_NSEC, regData)) != RT_ERR_OK)
+        return retVal;
+
+    count = 0;
+    do {
+        if((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_PTP_TIME_NSEC_H_NSEC, RTL8367C_PTP_TIME_NSEC_H_EXEC_OFFSET, &busyFlag)) != RT_ERR_OK)
+            return retVal;
+        count++;
+    } while ((busyFlag != 0)&&(count<5));
+
+    if (busyFlag != 0)
+        return RT_ERR_BUSYWAIT_TIMEOUT;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicEavSysTime
+ * Description:
+ *      Get PTP system time
+ * Input:
+ *      None
+ * Output:
+ *      second - seconds
+ *      nanoSecond - nano seconds
+ * Return:
+ *      RT_ERR_OK     - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      The time granuality is 8 nano seconds.
+ */
+ret_t rtl8367c_getAsicEavSysTime(rtk_uint32* pSecond, rtk_uint32* pNanoSecond)
+{
+    ret_t retVal;
+    rtk_uint32 sec_h, sec_l, nsec8_h, nsec8_l;
+    rtk_uint32 nano_second_8;
+    rtk_uint32 regData, busyFlag, count;
+
+    regData = 0;
+    regData = (PTP_TIME_READ<<RTL8367C_PTP_TIME_NSEC_H_CMD_OFFSET) | RTL8367C_PTP_TIME_NSEC_H_EXEC_MASK;
+
+    if((retVal = rtl8367c_setAsicReg(RTL8367C_REG_PTP_TIME_NSEC_H_NSEC, regData)) != RT_ERR_OK)
+        return retVal;
+
+    count = 0;
+    do {
+        if((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_PTP_TIME_NSEC_H_NSEC, RTL8367C_PTP_TIME_NSEC_H_EXEC_OFFSET, &busyFlag)) != RT_ERR_OK)
+            return retVal;
+        count++;
+    } while ((busyFlag != 0)&&(count<5));
+
+    if (busyFlag != 0)
+        return RT_ERR_BUSYWAIT_TIMEOUT;
+
+    if((retVal = rtl8367c_getAsicReg(RTL8367C_REG_PTP_TIME_SEC_H_SEC_RD, &sec_h)) != RT_ERR_OK)
+        return retVal;
+    if((retVal = rtl8367c_getAsicReg(RTL8367C_REG_PTP_TIME_SEC_L_SEC_RD, &sec_l)) != RT_ERR_OK)
+        return retVal;
+    if((retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_PTP_TIME_NSEC_H_NSEC_RD, RTL8367C_PTP_TIME_NSEC_H_NSEC_RD_MASK,&nsec8_h)) != RT_ERR_OK)
+        return retVal;
+    if((retVal = rtl8367c_getAsicReg(RTL8367C_REG_PTP_TIME_NSEC_L_NSEC_RD, &nsec8_l)) != RT_ERR_OK)
+        return retVal;
+
+    *pSecond = (sec_h<<16) | sec_l;
+    nano_second_8 = (nsec8_h<<16) | nsec8_l;
+    *pNanoSecond = nano_second_8<<3;
+
+    return RT_ERR_OK;
+}
+
+
+/* Function Name:
+ *      rtl8367c_setAsicEavSysTimeAdjust
+ * Description:
+ *      Set PTP system time adjust
+ * Input:
+ *      type - incresae or decrease
+ *      second - seconds
+ *      nanoSecond - nano seconds
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK     - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      Ethernet AV second offset of timer for tuning
+ */
+ret_t rtl8367c_setAsicEavSysTimeAdjust(rtk_uint32 type, rtk_uint32 second, rtk_uint32 nanoSecond)
+{
+    ret_t retVal;
+    rtk_uint32 sec_h, sec_l, nsec8_h, nsec8_l;
+    rtk_uint32 nano_second_8;
+    rtk_uint32 regData, busyFlag, count;
+
+    if (type >= PTP_TIME_ADJ_END)
+        return RT_ERR_INPUT;
+    if(nanoSecond > RTL8367C_EAV_NANOSECONDMAX)
+        return RT_ERR_INPUT;
+
+    regData = 0;
+    sec_h = second >>16;
+    sec_l = second & 0xFFFF;
+    nano_second_8 = nanoSecond >> 3;
+    nsec8_h = (nano_second_8 >>16) & RTL8367C_PTP_TIME_NSEC_H_NSEC_MASK;
+    nsec8_l = nano_second_8 &0xFFFF;
+
+    if((retVal = rtl8367c_setAsicReg(RTL8367C_REG_PTP_TIME_SEC_H_SEC, sec_h)) != RT_ERR_OK)
+        return retVal;
+    if((retVal = rtl8367c_setAsicReg(RTL8367C_REG_PTP_TIME_SEC_L_SEC, sec_l)) != RT_ERR_OK)
+        return retVal;
+    if((retVal = rtl8367c_setAsicReg(RTL8367C_REG_PTP_TIME_NSEC_L_NSEC, nsec8_l)) != RT_ERR_OK)
+        return retVal;
+
+    if (PTP_TIME_ADJ_INC == type)
+        regData = nsec8_h | (PTP_TIME_INC<<RTL8367C_PTP_TIME_NSEC_H_CMD_OFFSET) | RTL8367C_PTP_TIME_NSEC_H_EXEC_MASK;
+    else
+        regData = nsec8_h | (PTP_TIME_DEC<<RTL8367C_PTP_TIME_NSEC_H_CMD_OFFSET) | RTL8367C_PTP_TIME_NSEC_H_EXEC_MASK;
+
+    if((retVal = rtl8367c_setAsicReg(RTL8367C_REG_PTP_TIME_NSEC_H_NSEC, regData)) != RT_ERR_OK)
+        return retVal;
+
+    count = 0;
+    do {
+        if((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_PTP_TIME_NSEC_H_NSEC, RTL8367C_PTP_TIME_NSEC_H_EXEC_OFFSET, &busyFlag)) != RT_ERR_OK)
+            return retVal;
+        count++;
+    } while ((busyFlag != 0)&&(count<5));
+
+    if (busyFlag != 0)
+        return RT_ERR_BUSYWAIT_TIMEOUT;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicEavSysTimeCtrl
+ * Description:
+ *      Set PTP system time control
+ * Input:
+ *      command - start or stop
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK     - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicEavSysTimeCtrl(rtk_uint32 control)
+{
+    ret_t  retVal;
+    rtk_uint32 regData;
+
+    if (control>=PTP_TIME_CTRL_END)
+         return RT_ERR_INPUT;
+
+    regData = 0;
+    if (PTP_TIME_CTRL_START == control)
+            regData = RTL8367C_CFG_TIMER_EN_FRC_MASK | RTL8367C_CFG_TIMER_1588_EN_MASK;
+    else
+        regData = 0;
+
+    if((retVal = rtl8367c_setAsicReg(RTL8367C_REG_PTP_TIME_CFG, regData)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicEavSysTimeCtrl
+ * Description:
+ *      Get PTP system time control
+ * Input:
+ *      None
+ * Output:
+ *      pControl - start or stop
+ * Return:
+ *      RT_ERR_OK     - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicEavSysTimeCtrl(rtk_uint32* pControl)
+{
+    ret_t  retVal;
+    rtk_uint32 regData;
+    rtk_uint32 mask;
+
+    mask = RTL8367C_CFG_TIMER_EN_FRC_MASK | RTL8367C_CFG_TIMER_1588_EN_MASK;
+
+    if((retVal = rtl8367c_getAsicReg(RTL8367C_REG_PTP_TIME_CFG, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    if( (regData & mask) == mask)
+        *pControl = PTP_TIME_CTRL_START;
+    else if( (regData & mask) == 0)
+        *pControl = PTP_TIME_CTRL_STOP;
+    else
+        return RT_ERR_NOT_ALLOWED;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicEavInterruptMask
+ * Description:
+ *      Set PTP interrupt enable mask
+ * Input:
+ *      imr     - Interrupt mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      [0]:TX_SYNC,
+ *      [1]:TX_DELAY,
+ *      [2]:TX_PDELAY_REQ,
+ *      [3]:TX_PDELAY_RESP,
+ *      [4]:RX_SYNC,
+ *      [5]:RX_DELAY,
+ *      [6]:RX_PDELAY_REQ,
+ *      [7]:RX_PDELAY_RESP,
+ */
+ret_t rtl8367c_setAsicEavInterruptMask(rtk_uint32 imr)
+{
+    if ((imr&(RTL8367C_PTP_INTR_MASK<<8))>0)
+         return RT_ERR_INPUT;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_PTP_TIME_CFG2, RTL8367C_PTP_INTR_MASK, imr);
+}
+/* Function Name:
+ *      rtl8367c_getAsicEavInterruptMask
+ * Description:
+ *      Get PTP interrupt enable mask
+ * Input:
+ *      pImr    - Interrupt mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      [0]:TX_SYNC,
+ *      [1]:TX_DELAY,
+ *      [2]:TX_PDELAY_REQ,
+ *      [3]:TX_PDELAY_RESP,
+ *      [4]:RX_SYNC,
+ *      [5]:RX_DELAY,
+ *      [6]:RX_PDELAY_REQ,
+ *      [7]:RX_PDELAY_RESP,
+ */
+ret_t rtl8367c_getAsicEavInterruptMask(rtk_uint32* pImr)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_PTP_TIME_CFG2, RTL8367C_PTP_INTR_MASK, pImr);
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicEavInterruptStatus
+ * Description:
+ *      Get PTP interrupt port status mask
+ * Input:
+ *      pIms    - Interrupt mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      [0]:p0 interrupt,
+ *      [1]:p1 interrupt,
+ *      [2]:p2 interrupt,
+ *      [3]:p3 interrupt,
+ *      [4]:p4 interrupt,
+ */
+ret_t rtl8367c_getAsicEavInterruptStatus(rtk_uint32* pIms)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_PTP_INTERRUPT_CFG, RTL8367C_PTP_PORT_MASK, pIms);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicInterruptMask
+ * Description:
+ *      Clear interrupt enable mask
+ * Input:
+ *      ims     - Interrupt status mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      This API can be used to clear ASIC interrupt status and register will be cleared by writting 1.
+ *      [0]:TX_SYNC,
+ *      [1]:TX_DELAY,
+ *      [2]:TX_PDELAY_REQ,
+ *      [3]:TX_PDELAY_RESP,
+ *      [4]:RX_SYNC,
+ *      [5]:RX_DELAY,
+ *      [6]:RX_PDELAY_REQ,
+ *      [7]:RX_PDELAY_RESP,
+ */
+ret_t rtl8367c_setAsicEavPortInterruptStatus(rtk_uint32 port, rtk_uint32 ims)
+{
+
+    if(port > RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    if(port < 5)
+        return rtl8367c_setAsicRegBits(RTL8367C_EAV_PORT_CFG_REG(port), RTL8367C_PTP_INTR_MASK,ims);
+    else if(port == 5)
+        return rtl8367c_setAsicRegBits(RTL8367C_REG_P5_EAV_CFG, RTL8367C_PTP_INTR_MASK,ims);
+    else if(port == 6)
+        return rtl8367c_setAsicRegBits(RTL8367C_REG_P6_EAV_CFG, RTL8367C_PTP_INTR_MASK,ims);
+    else if(port == 7)
+        return rtl8367c_setAsicRegBits(RTL8367C_REG_P7_EAV_CFG, RTL8367C_PTP_INTR_MASK,ims);
+    else if(port == 8)
+        return rtl8367c_setAsicRegBits(RTL8367C_REG_P8_EAV_CFG, RTL8367C_PTP_INTR_MASK,ims);
+    else if(port == 9)
+        return rtl8367c_setAsicRegBits(RTL8367C_REG_P9_EAV_CFG, RTL8367C_PTP_INTR_MASK,ims);
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicInterruptStatus
+ * Description:
+ *      Get interrupt enable mask
+ * Input:
+ *      pIms    - Interrupt status mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      [0]:TX_SYNC,
+ *      [1]:TX_DELAY,
+ *      [2]:TX_PDELAY_REQ,
+ *      [3]:TX_PDELAY_RESP,
+ *      [4]:RX_SYNC,
+ *      [5]:RX_DELAY,
+ *      [6]:RX_PDELAY_REQ,
+ *      [7]:RX_PDELAY_RESP,
+ */
+ret_t rtl8367c_getAsicEavPortInterruptStatus(rtk_uint32 port, rtk_uint32* pIms)
+{
+
+    if(port > RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+    if(port < 5)
+        return rtl8367c_getAsicRegBits(RTL8367C_EAV_PORT_CFG_REG(port), RTL8367C_PTP_INTR_MASK, pIms);
+    else if(port == 5)
+        return rtl8367c_getAsicRegBits(RTL8367C_REG_P5_EAV_CFG, RTL8367C_PTP_INTR_MASK, pIms);
+    else if(port == 6)
+        return rtl8367c_getAsicRegBits(RTL8367C_REG_P6_EAV_CFG, RTL8367C_PTP_INTR_MASK,pIms);
+    else if(port == 7)
+        return rtl8367c_getAsicRegBits(RTL8367C_REG_P7_EAV_CFG, RTL8367C_PTP_INTR_MASK,pIms);
+    else if(port == 8)
+        return rtl8367c_getAsicRegBits(RTL8367C_REG_P8_EAV_CFG, RTL8367C_PTP_INTR_MASK,pIms);
+    else if(port == 9)
+        return rtl8367c_getAsicRegBits(RTL8367C_REG_P9_EAV_CFG, RTL8367C_PTP_INTR_MASK,pIms);
+
+    return RT_ERR_OK;
+
+}
+
+
+/* Function Name:
+ *      rtl8367c_setAsicEavPortEnable
+ * Description:
+ *      Set per-port EAV function enable/disable
+ * Input:
+ *      port         - Physical port number (0~9)
+ *      enabled     - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK         - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      If EAV function is enabled, PTP event messgae packet will be attached PTP timestamp for trapping
+ */
+ret_t rtl8367c_setAsicEavPortEnable(rtk_uint32 port, rtk_uint32 enabled)
+{
+    if(port > RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    if(port < 5)
+        return rtl8367c_setAsicRegBit(RTL8367C_EAV_PORT_CFG_REG(port), RTL8367C_EAV_CFG_PTP_PHY_EN_EN_OFFSET, enabled);
+    else if(port == 5)
+        return rtl8367c_setAsicRegBit(RTL8367C_REG_P5_EAV_CFG, RTL8367C_EAV_CFG_PTP_PHY_EN_EN_OFFSET, enabled);
+    else if(port == 6)
+        return rtl8367c_setAsicRegBit(RTL8367C_REG_P6_EAV_CFG, RTL8367C_EAV_CFG_PTP_PHY_EN_EN_OFFSET, enabled);
+    else if(port == 7)
+        return rtl8367c_setAsicRegBit(RTL8367C_REG_P7_EAV_CFG, RTL8367C_EAV_CFG_PTP_PHY_EN_EN_OFFSET, enabled);
+    else if(port == 8)
+        return rtl8367c_setAsicRegBit(RTL8367C_REG_P8_EAV_CFG, RTL8367C_EAV_CFG_PTP_PHY_EN_EN_OFFSET, enabled);
+    else if(port == 9)
+        return rtl8367c_setAsicRegBit(RTL8367C_REG_P9_EAV_CFG, RTL8367C_EAV_CFG_PTP_PHY_EN_EN_OFFSET, enabled);
+
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicEavPortEnable
+ * Description:
+ *      Get per-port EAV function enable/disable
+ * Input:
+ *      port         - Physical port number (0~9)
+ *      pEnabled     - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK         - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicEavPortEnable(rtk_uint32 port, rtk_uint32 *pEnabled)
+{
+    if(port > RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+
+
+    if(port < 5)
+        return rtl8367c_getAsicRegBit(RTL8367C_EAV_PORT_CFG_REG(port), RTL8367C_EAV_CFG_PTP_PHY_EN_EN_OFFSET, pEnabled);
+    else if(port == 5)
+        return rtl8367c_getAsicRegBit(RTL8367C_REG_P5_EAV_CFG, RTL8367C_EAV_CFG_PTP_PHY_EN_EN_OFFSET, pEnabled);
+    else if(port == 6)
+        return rtl8367c_getAsicRegBit(RTL8367C_REG_P6_EAV_CFG, RTL8367C_EAV_CFG_PTP_PHY_EN_EN_OFFSET, pEnabled);
+    else if(port == 7)
+        return rtl8367c_getAsicRegBit(RTL8367C_REG_P7_EAV_CFG, RTL8367C_EAV_CFG_PTP_PHY_EN_EN_OFFSET, pEnabled);
+    else if(port == 8)
+        return rtl8367c_getAsicRegBit(RTL8367C_REG_P8_EAV_CFG, RTL8367C_EAV_CFG_PTP_PHY_EN_EN_OFFSET, pEnabled);
+    else if(port == 9)
+        return rtl8367c_getAsicRegBit(RTL8367C_REG_P9_EAV_CFG, RTL8367C_EAV_CFG_PTP_PHY_EN_EN_OFFSET, pEnabled);
+
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicEavPortTimeStamp
+ * Description:
+ *      Get PTP port time stamp
+ * Input:
+ *      port         - Physical port number (0~9)
+ *      type     -  PTP packet type
+ * Output:
+ *      timeStamp - seconds
+ * Return:
+ *      RT_ERR_OK     - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      The time granuality is 8 nano seconds.
+ */
+ret_t rtl8367c_getAsicEavPortTimeStamp(rtk_uint32 port, rtk_uint32 type, rtl8367c_ptp_time_stamp_t* timeStamp)
+{
+    ret_t retVal;
+    rtk_uint32 sec_h, sec_l, nsec8_h, nsec8_l;
+    rtk_uint32 nano_second_8;
+
+    if(port > 9)
+        return RT_ERR_PORT_ID;
+    if(type >= PTP_PKT_TYPE_END)
+        return RT_ERR_INPUT;
+
+    if(port < 5){
+        if((retVal = rtl8367c_getAsicReg(RTL8367C_REG_SEQ_ID(port, type), &timeStamp->sequence_id))!=  RT_ERR_OK)
+            return retVal;
+        if((retVal = rtl8367c_getAsicReg(RTL8367C_REG_PORT_SEC_H(port) , &sec_h)) != RT_ERR_OK)
+           return retVal;
+        if((retVal = rtl8367c_getAsicReg(RTL8367C_REG_PORT_SEC_L(port), &sec_l)) != RT_ERR_OK)
+           return retVal;
+        if((retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_PORT_NSEC_H(port) , RTL8367C_PORT_NSEC_H_MASK,&nsec8_h)) != RT_ERR_OK)
+           return retVal;
+        if((retVal = rtl8367c_getAsicReg(RTL8367C_REG_PORT_NSEC_L(port) , &nsec8_l)) != RT_ERR_OK)
+           return retVal;
+    }else if(port == 5){
+        if((retVal = rtl8367c_getAsicReg(RTL8367C_REG_P5_TX_SYNC_SEQ_ID+type, &timeStamp->sequence_id))!=  RT_ERR_OK)
+            return retVal;
+        if((retVal = rtl8367c_getAsicReg(RTL8367C_REG_P5_PORT_SEC_31_16, &sec_h)) != RT_ERR_OK)
+           return retVal;
+        if((retVal = rtl8367c_getAsicReg(RTL8367C_REG_P5_PORT_SEC_15_0, &sec_l)) != RT_ERR_OK)
+           return retVal;
+        if((retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_P5_PORT_NSEC_26_16 , RTL8367C_PORT_NSEC_H_MASK,&nsec8_h)) != RT_ERR_OK)
+           return retVal;
+        if((retVal = rtl8367c_getAsicReg(RTL8367C_REG_P5_PORT_NSEC_15_0, &nsec8_l)) != RT_ERR_OK)
+           return retVal;
+    }else if(port == 6){
+        if((retVal = rtl8367c_getAsicReg(RTL8367C_REG_P6_TX_SYNC_SEQ_ID+type, &timeStamp->sequence_id))!=  RT_ERR_OK)
+            return retVal;
+        if((retVal = rtl8367c_getAsicReg(RTL8367C_REG_P6_PORT_SEC_31_16, &sec_h)) != RT_ERR_OK)
+           return retVal;
+        if((retVal = rtl8367c_getAsicReg(RTL8367C_REG_P6_PORT_SEC_15_0, &sec_l)) != RT_ERR_OK)
+           return retVal;
+        if((retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_P6_PORT_NSEC_26_16 , RTL8367C_PORT_NSEC_H_MASK,&nsec8_h)) != RT_ERR_OK)
+           return retVal;
+        if((retVal = rtl8367c_getAsicReg(RTL8367C_REG_P6_PORT_NSEC_15_0, &nsec8_l)) != RT_ERR_OK)
+           return retVal;
+    }else if(port == 7){
+        if((retVal = rtl8367c_getAsicReg(RTL8367C_REG_P7_TX_SYNC_SEQ_ID+type, &timeStamp->sequence_id))!=  RT_ERR_OK)
+            return retVal;
+        if((retVal = rtl8367c_getAsicReg(RTL8367C_REG_P7_PORT_SEC_31_16, &sec_h)) != RT_ERR_OK)
+           return retVal;
+        if((retVal = rtl8367c_getAsicReg(RTL8367C_REG_P7_PORT_SEC_15_0, &sec_l)) != RT_ERR_OK)
+           return retVal;
+        if((retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_P7_PORT_NSEC_26_16 , RTL8367C_PORT_NSEC_H_MASK,&nsec8_h)) != RT_ERR_OK)
+           return retVal;
+        if((retVal = rtl8367c_getAsicReg(RTL8367C_REG_P7_PORT_NSEC_15_0, &nsec8_l)) != RT_ERR_OK)
+           return retVal;
+    }else if(port == 8){
+        if((retVal = rtl8367c_getAsicReg(RTL8367C_REG_P8_TX_SYNC_SEQ_ID+type, &timeStamp->sequence_id))!=  RT_ERR_OK)
+            return retVal;
+        if((retVal = rtl8367c_getAsicReg(RTL8367C_REG_P8_PORT_SEC_31_16, &sec_h)) != RT_ERR_OK)
+           return retVal;
+        if((retVal = rtl8367c_getAsicReg(RTL8367C_REG_P8_PORT_SEC_15_0, &sec_l)) != RT_ERR_OK)
+           return retVal;
+        if((retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_P8_PORT_NSEC_26_16 , RTL8367C_PORT_NSEC_H_MASK,&nsec8_h)) != RT_ERR_OK)
+           return retVal;
+        if((retVal = rtl8367c_getAsicReg(RTL8367C_REG_P8_PORT_NSEC_15_0, &nsec8_l)) != RT_ERR_OK)
+           return retVal;
+    }else if(port == 9){
+        if((retVal = rtl8367c_getAsicReg(RTL8367C_REG_P9_TX_SYNC_SEQ_ID+type, &timeStamp->sequence_id))!=  RT_ERR_OK)
+            return retVal;
+        if((retVal = rtl8367c_getAsicReg(RTL8367C_REG_P9_PORT_SEC_31_16, &sec_h)) != RT_ERR_OK)
+           return retVal;
+        if((retVal = rtl8367c_getAsicReg(RTL8367C_REG_P9_PORT_SEC_15_0, &sec_l)) != RT_ERR_OK)
+           return retVal;
+        if((retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_P9_PORT_NSEC_26_16 , RTL8367C_PORT_NSEC_H_MASK,&nsec8_h)) != RT_ERR_OK)
+           return retVal;
+        if((retVal = rtl8367c_getAsicReg(RTL8367C_REG_P9_PORT_NSEC_15_0, &nsec8_l)) != RT_ERR_OK)
+           return retVal;
+    }
+
+    timeStamp->second = (sec_h<<16) | sec_l;
+    nano_second_8 = (nsec8_h<<16) | nsec8_l;
+    timeStamp->nano_second = nano_second_8<<3;
+
+    return RT_ERR_OK;
+}
+
+
+/* Function Name:
+ *      rtl8367c_setAsicEavTrap
+ * Description:
+ *      Set per-port PTP packet trap to CPU
+ * Input:
+ *      port         - Physical port number (0~5)
+ *      enabled     - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK         - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      If EAV trap enabled, switch will trap PTP packet to CPU
+ */
+ret_t rtl8367c_setAsicEavTrap(rtk_uint32 port, rtk_uint32 enabled)
+{
+    if(port > RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_PTP_PORT0_CFG1 + (port * 0x20), RTL8367C_PTP_PORT0_CFG1_OFFSET, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicEavTimeSyncEn
+ * Description:
+ *      Get per-port EPTP packet trap to CPU
+ * Input:
+ *      port         - Physical port number (0~5)
+ *      pEnabled     - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK         - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicEavTrap(rtk_uint32 port, rtk_uint32 *pEnabled)
+{
+    if(port > RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_PTP_PORT0_CFG1 + (port * 0x20), RTL8367C_PTP_PORT0_CFG1_OFFSET, pEnabled);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicEavEnable
+ * Description:
+ *      Set per-port EAV function enable/disable
+ * Input:
+ *      port         - Physical port number (0~5)
+ *      enabled     - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK         - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      If EAV function is enabled, PTP event messgae packet will be attached PTP timestamp for trapping
+ */
+ret_t rtl8367c_setAsicEavEnable(rtk_uint32 port, rtk_uint32 enabled)
+{
+    if(port > RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_EAV_CTRL0, port, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicEavEnable
+ * Description:
+ *      Get per-port EAV function enable/disable
+ * Input:
+ *      port         - Physical port number (0~5)
+ *      pEnabled     - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK         - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicEavEnable(rtk_uint32 port, rtk_uint32 *pEnabled)
+{
+    if(port > RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_EAV_CTRL0, port, pEnabled);
+}
+/* Function Name:
+ *      rtl8367c_setAsicEavPriRemapping
+ * Description:
+ *      Set non-EAV streaming priority remapping
+ * Input:
+ *      srcpriority - Priority value
+ *      priority     - Absolute priority value
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                     - Success
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_QOS_INT_PRIORITY      - Invalid priority
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicEavPriRemapping(rtk_uint32 srcpriority, rtk_uint32 priority)
+{
+    if(srcpriority > RTL8367C_PRIMAX || priority > RTL8367C_PRIMAX)
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_EAV_PRIORITY_REMAPPING_REG(srcpriority), RTL8367C_EAV_PRIORITY_REMAPPING_MASK(srcpriority),priority);
+}
+/* Function Name:
+ *      rtl8367c_getAsicEavPriRemapping
+ * Description:
+ *      Get non-EAV streaming priority remapping
+ * Input:
+ *      srcpriority - Priority value
+ *      pPriority     - Absolute priority value
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                     - Success
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_QOS_INT_PRIORITY      - Invalid priority
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicEavPriRemapping(rtk_uint32 srcpriority, rtk_uint32 *pPriority)
+{
+    if(srcpriority > RTL8367C_PRIMAX )
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    return rtl8367c_getAsicRegBits(RTL8367C_EAV_PRIORITY_REMAPPING_REG(srcpriority), RTL8367C_EAV_PRIORITY_REMAPPING_MASK(srcpriority),pPriority);
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_eav.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_eav.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_eav.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_eav.h	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,92 @@
+#ifndef _RTL8367C_ASICDRV_EAV_H_
+#define _RTL8367C_ASICDRV_EAV_H_
+
+#include <rtl8367c_asicdrv.h>
+
+typedef enum RTL8367C_PTP_TIME_CMD_E
+{
+    PTP_TIME_READ = 0,
+    PTP_TIME_WRITE,
+    PTP_TIME_INC,
+    PTP_TIME_DEC,
+    PTP_TIME_CMD_END
+}RTL8367C_PTP_TIME_CMD;
+
+typedef enum RTL8367C_PTP_TIME_ADJ_E
+{
+    PTP_TIME_ADJ_INC = 0,
+    PTP_TIME_ADJ_DEC,
+    PTP_TIME_ADJ_END
+}RTL8367C_PTP_TIME_ADJ;
+
+typedef enum RTL8367C_PTP_TIME_CTRL_E
+{
+    PTP_TIME_CTRL_STOP = 0,
+    PTP_TIME_CTRL_START,
+    PTP_TIME_CTRL_END
+}RTL8367C_PTP_TIME_CTRL;
+
+typedef enum RTL8367C_PTP_INTR_IMRS_E
+{
+    PTP_IMRS_TX_SYNC,
+    PTP_IMRS_TX_DELAY_REQ,
+    PTP_IMRS_TX_PDELAY_REQ,
+    PTP_IMRS_TX_PDELAY_RESP,
+    PTP_IMRS_RX_SYNC,
+    PTP_IMRS_RX_DELAY_REQ,
+    PTP_IMRS_RX_PDELAY_REQ,
+    PTP_IMRS_RX_PDELAY_RESP,
+    PTP_IMRS_END,
+}RTL8367C_PTP_INTR_IMRS;
+
+
+typedef enum RTL8367C_PTP_PKT_TYPE_E
+{
+    PTP_PKT_TYPE_TX_SYNC,
+    PTP_PKT_TYPE_TX_DELAY_REQ,
+    PTP_PKT_TYPE_TX_PDELAY_REQ,
+    PTP_PKT_TYPE_TX_PDELAY_RESP,
+    PTP_PKT_TYPE_RX_SYNC,
+    PTP_PKT_TYPE_RX_DELAY_REQ,
+    PTP_PKT_TYPE_RX_PDELAY_REQ,
+    PTP_PKT_TYPE_RX_PDELAY_RESP,
+    PTP_PKT_TYPE_END,
+}RTL8367C_PTP_PKT_TYPE;
+
+typedef struct  rtl8367c_ptp_time_stamp_s{
+    rtk_uint32 sequence_id;
+    rtk_uint32 second;
+    rtk_uint32 nano_second;
+}rtl8367c_ptp_time_stamp_t;
+
+#define RTL8367C_PTP_INTR_MASK        0xFF
+
+#define RTL8367C_PTP_PORT_MASK        0x3FF
+
+extern ret_t rtl8367c_setAsicEavMacAddress(ether_addr_t mac);
+extern ret_t rtl8367c_getAsicEavMacAddress(ether_addr_t *pMac);
+extern ret_t rtl8367c_setAsicEavTpid(rtk_uint32 outerTag, rtk_uint32 innerTag);
+extern ret_t rtl8367c_getAsicEavTpid(rtk_uint32* pOuterTag, rtk_uint32* pInnerTag);
+extern ret_t rtl8367c_setAsicEavSysTime(rtk_uint32 second, rtk_uint32 nanoSecond);
+extern ret_t rtl8367c_getAsicEavSysTime(rtk_uint32* pSecond, rtk_uint32* pNanoSecond);
+extern ret_t rtl8367c_setAsicEavSysTimeAdjust(rtk_uint32 type, rtk_uint32 second, rtk_uint32 nanoSecond);
+extern ret_t rtl8367c_setAsicEavSysTimeCtrl(rtk_uint32 control);
+extern ret_t rtl8367c_getAsicEavSysTimeCtrl(rtk_uint32* pControl);
+extern ret_t rtl8367c_setAsicEavInterruptMask(rtk_uint32 imr);
+extern ret_t rtl8367c_getAsicEavInterruptMask(rtk_uint32* pImr);
+extern ret_t rtl8367c_getAsicEavInterruptStatus(rtk_uint32* pIms);
+extern ret_t rtl8367c_setAsicEavPortInterruptStatus(rtk_uint32 port, rtk_uint32 ims);
+extern ret_t rtl8367c_getAsicEavPortInterruptStatus(rtk_uint32 port, rtk_uint32* pIms);
+extern ret_t rtl8367c_setAsicEavPortEnable(rtk_uint32 port, rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicEavPortEnable(rtk_uint32 port, rtk_uint32 *pEnabled);
+extern ret_t rtl8367c_getAsicEavPortTimeStamp(rtk_uint32 port, rtk_uint32 type, rtl8367c_ptp_time_stamp_t* timeStamp);
+
+extern ret_t rtl8367c_setAsicEavTrap(rtk_uint32 port, rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicEavTrap(rtk_uint32 port, rtk_uint32 *pEnabled);
+extern ret_t rtl8367c_setAsicEavEnable(rtk_uint32 port, rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicEavEnable(rtk_uint32 port, rtk_uint32 *pEnabled);
+extern ret_t rtl8367c_setAsicEavPriRemapping(rtk_uint32 srcpriority, rtk_uint32 priority);
+extern ret_t rtl8367c_getAsicEavPriRemapping(rtk_uint32 srcpriority, rtk_uint32 *pPriority);
+
+#endif /*#ifndef _RTL8367C_ASICDRV_EAV_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_eee.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_eee.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_eee.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_eee.c	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 80668 $
+ * $Date: 2017-07-19 14:21:43 +0800 (é€±ä¸‰, 19 ä¸ƒæœˆ 2017) $
+ *
+ * Purpose : RTL8370 switch high-level API for RTL8367C
+ * Feature :
+ *
+ */
+
+#include <rtl8367c_asicdrv_eee.h>
+#include <rtl8367c_asicdrv_phy.h>
+
+/*
+@func ret_t | rtl8367c_setAsicEee100M | Set eee force mode function enable/disable.
+@parm rtk_uint32 | port | The port number.
+@parm rtk_uint32 | enabled | 1: enabled, 0: disabled.
+@rvalue RT_ERR_OK | Success.
+@rvalue RT_ERR_SMI | SMI access error.
+@rvalue RT_ERR_INPUT | Invalid input parameter.
+@comm
+    This API set the 100M EEE enable function.
+
+*/
+ret_t rtl8367c_setAsicEee100M(rtk_uint32 port, rtk_uint32 enable)
+{
+    rtk_api_ret_t   retVal;
+    rtk_uint32      regData;
+
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    if (enable > 1)
+        return RT_ERR_INPUT;
+
+    if((retVal = rtl8367c_getAsicPHYOCPReg(port, EEE_OCP_PHY_ADDR, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    if(enable)
+        regData |= (0x0001 << 1);
+    else
+        regData &= ~(0x0001 << 1);
+
+    if((retVal = rtl8367c_setAsicPHYOCPReg(port, EEE_OCP_PHY_ADDR, regData)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_getAsicReg(RTL8367C_PORT_EEE_CFG_REG(port), &regData)) != RT_ERR_OK)
+        return retVal;
+
+    if(enable)
+        regData |= (0x0001 << 11);
+    else
+        regData &= ~(0x0001 << 11);
+
+    if((retVal = rtl8367c_setAsicReg(RTL8367C_PORT_EEE_CFG_REG(port),regData)) != RT_ERR_OK)
+        return retVal; 
+
+    return RT_ERR_OK;
+}
+
+/*
+@func ret_t | rtl8367c_getAsicEee100M | Get 100M eee enable/disable.
+@parm rtk_uint32 | port | The port number.
+@parm rtk_uint32* | enabled | 1: enabled, 0: disabled.
+@rvalue RT_ERR_OK | Success.
+@rvalue RT_ERR_SMI | SMI access error.
+@rvalue RT_ERR_INPUT | Invalid input parameter.
+@comm
+    This API get the 100M EEE function.
+*/
+ret_t rtl8367c_getAsicEee100M(rtk_uint32 port, rtk_uint32 *enable)
+{
+    rtk_api_ret_t   retVal;
+    rtk_uint32      regData;
+
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    if((retVal = rtl8367c_getAsicPHYOCPReg(port, EEE_OCP_PHY_ADDR, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    *enable = (regData & (0x0001 << 1)) ? ENABLED : DISABLED;
+    return RT_ERR_OK;
+}
+
+/*
+@func ret_t | rtl8367c_setAsicEeeGiga | Set eee force mode function enable/disable.
+@parm rtk_uint32 | port | The port number.
+@parm rtk_uint32 | enabled | 1: enabled, 0: disabled.
+@rvalue RT_ERR_OK | Success.
+@rvalue RT_ERR_SMI | SMI access error.
+@rvalue RT_ERR_INPUT | Invalid input parameter.
+@comm
+    This API set the 100M EEE enable function.
+
+*/
+ret_t rtl8367c_setAsicEeeGiga(rtk_uint32 port, rtk_uint32 enable)
+{
+    rtk_api_ret_t   retVal;
+    rtk_uint32      regData;
+
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    if (enable > 1)
+        return RT_ERR_INPUT;
+
+    if((retVal = rtl8367c_getAsicPHYOCPReg(port, EEE_OCP_PHY_ADDR, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    if(enable)
+        regData |= (0x0001 << 2);
+    else
+        regData &= ~(0x0001 << 2);
+
+    if((retVal = rtl8367c_setAsicPHYOCPReg(port, EEE_OCP_PHY_ADDR, regData)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_getAsicReg(RTL8367C_PORT_EEE_CFG_REG(port), &regData)) != RT_ERR_OK)
+        return retVal;
+
+    if(enable)
+        regData |= (0x0001 << 10);
+    else
+        regData &= ~(0x0001 << 10);
+
+    if((retVal = rtl8367c_setAsicReg(RTL8367C_PORT_EEE_CFG_REG(port),regData)) != RT_ERR_OK)
+        return retVal; 
+
+    return RT_ERR_OK;
+}
+
+/*
+@func ret_t | rtl8367c_getAsicEeeGiga | Get 100M eee enable/disable.
+@parm rtk_uint32 | port | The port number.
+@parm rtk_uint32* | enabled | 1: enabled, 0: disabled.
+@rvalue RT_ERR_OK | Success.
+@rvalue RT_ERR_SMI | SMI access error.
+@rvalue RT_ERR_INPUT | Invalid input parameter.
+@comm
+    This API get the 100M EEE function.
+*/
+ret_t rtl8367c_getAsicEeeGiga(rtk_uint32 port, rtk_uint32 *enable)
+{
+    rtk_api_ret_t   retVal;
+    rtk_uint32      regData;
+
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    if((retVal = rtl8367c_getAsicPHYOCPReg(port, EEE_OCP_PHY_ADDR, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    *enable = (regData & (0x0001 << 2)) ? ENABLED : DISABLED;
+    return RT_ERR_OK;
+}
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_eee.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_eee.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_eee.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_eee.h	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,14 @@
+#ifndef _RTL8367C_ASICDRV_EEE_H_
+#define _RTL8367C_ASICDRV_EEE_H_
+
+#include <rtl8367c_asicdrv.h>
+
+#define EEE_OCP_PHY_ADDR    (0xA5D0)
+
+extern ret_t rtl8367c_setAsicEee100M(rtk_uint32 port, rtk_uint32 enable);
+extern ret_t rtl8367c_getAsicEee100M(rtk_uint32 port, rtk_uint32 *enable);
+extern ret_t rtl8367c_setAsicEeeGiga(rtk_uint32 port, rtk_uint32 enable);
+extern ret_t rtl8367c_getAsicEeeGiga(rtk_uint32 port, rtk_uint32 *enable);
+
+
+#endif /*_RTL8367C_ASICDRV_EEE_H_*/
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_fc.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_fc.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_fc.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_fc.c	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,1356 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 76306 $
+ * $Date: 2017-03-08 15:13:58 +0800 (é€±ä¸‰, 08 ä¸‰æœˆ 2017) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature : Flow control related functions
+ *
+ */
+
+#include <rtl8367c_asicdrv_fc.h>
+/* Function Name:
+ *      rtl8367c_setAsicFlowControlSelect
+ * Description:
+ *      Set system flow control type
+ * Input:
+ *      select      - System flow control type 1: Ingress flow control 0:Egress flow control
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicFlowControlSelect(rtk_uint32 select)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_FLOWCTRL_CTRL0, RTL8367C_FLOWCTRL_TYPE_OFFSET, select);
+}
+/* Function Name:
+ *      rtl8367c_getAsicFlowControlSelect
+ * Description:
+ *      Get system flow control type
+ * Input:
+ *      pSelect         - System flow control type 1: Ingress flow control 0:Egress flow control
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicFlowControlSelect(rtk_uint32 *pSelect)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_FLOWCTRL_CTRL0, RTL8367C_FLOWCTRL_TYPE_OFFSET, pSelect);
+}
+/* Function Name:
+ *      rtl8367c_setAsicFlowControlJumboMode
+ * Description:
+ *      Set Jumbo threhsold for flow control
+ * Input:
+ *      enabled         - Jumbo mode flow control 1: Enable 0:Disable
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicFlowControlJumboMode(rtk_uint32 enabled)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_FLOWCTRL_JUMBO_SIZE, RTL8367C_JUMBO_MODE_OFFSET, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicFlowControlJumboMode
+ * Description:
+ *      Get Jumbo threhsold for flow control
+ * Input:
+ *      pEnabled        - Jumbo mode flow control 1: Enable 0:Disable
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicFlowControlJumboMode(rtk_uint32* pEnabled)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_FLOWCTRL_JUMBO_SIZE, RTL8367C_JUMBO_MODE_OFFSET, pEnabled);
+}
+/* Function Name:
+ *      rtl8367c_setAsicFlowControlJumboModeSize
+ * Description:
+ *      Set Jumbo size for Jumbo mode flow control
+ * Input:
+ *      size        - Jumbo size 0:3Kbytes 1:4Kbytes 2:6Kbytes 3:9Kbytes
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicFlowControlJumboModeSize(rtk_uint32 size)
+{
+    if(size >= FC_JUMBO_SIZE_END)
+        return RT_ERR_OUT_OF_RANGE;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_FLOWCTRL_JUMBO_SIZE, RTL8367C_JUMBO_SIZE_MASK, size);
+}
+/* Function Name:
+ *      rtl8367c_getAsicFlowControlJumboModeSize
+ * Description:
+ *      Get Jumbo size for Jumbo mode flow control
+ * Input:
+ *      pSize       - Jumbo size 0:3Kbytes 1:4Kbytes 2:6Kbytes 3:9Kbytes
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicFlowControlJumboModeSize(rtk_uint32* pSize)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_JUMBO_SIZE, RTL8367C_JUMBO_SIZE_MASK, pSize);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicFlowControlQueueEgressEnable
+ * Description:
+ *      Set flow control ability for each queue
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      qid     - Queue id
+ *      enabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ *      RT_ERR_QUEUE_ID - Invalid queue id
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicFlowControlQueueEgressEnable(rtk_uint32 port, rtk_uint32 qid, rtk_uint32 enabled)
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(qid > RTL8367C_QIDMAX)
+        return RT_ERR_QUEUE_ID;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_FLOWCRTL_EGRESS_QUEUE_ENABLE_REG(port), RTL8367C_FLOWCRTL_EGRESS_QUEUE_ENABLE_REG_OFFSET(port)+ qid, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicFlowControlQueueEgressEnable
+ * Description:
+ *      Get flow control ability for each queue
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      qid     - Queue id
+ *      pEnabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ *      RT_ERR_QUEUE_ID - Invalid queue id
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicFlowControlQueueEgressEnable(rtk_uint32 port, rtk_uint32 qid, rtk_uint32* pEnabled)
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(qid > RTL8367C_QIDMAX)
+        return RT_ERR_QUEUE_ID;
+
+    return  rtl8367c_getAsicRegBit(RTL8367C_FLOWCRTL_EGRESS_QUEUE_ENABLE_REG(port), RTL8367C_FLOWCRTL_EGRESS_QUEUE_ENABLE_REG_OFFSET(port)+ qid, pEnabled);
+}
+/* Function Name:
+ *      rtl8367c_setAsicFlowControlDropAll
+ * Description:
+ *      Set system-based drop parameters
+ * Input:
+ *      dropall     - Whole system drop threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicFlowControlDropAll(rtk_uint32 dropall)
+{
+    if(dropall >= RTL8367C_PAGE_NUMBER)
+        return RT_ERR_OUT_OF_RANGE;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_FLOWCTRL_CTRL0, RTL8367C_DROP_ALL_THRESHOLD_MASK, dropall);
+}
+/* Function Name:
+ *      rtl8367c_getAsicFlowControlDropAll
+ * Description:
+ *      Get system-based drop parameters
+ * Input:
+ *      pDropall    - Whole system drop threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicFlowControlDropAll(rtk_uint32* pDropall)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_CTRL0, RTL8367C_DROP_ALL_THRESHOLD_MASK, pDropall);
+}
+/* Function Name:
+ *      rtl8367c_setAsicFlowControlPauseAll
+ * Description:
+ *      Set system-based all ports enable flow control parameters
+ * Input:
+ *      threshold   - Whole system pause all threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicFlowControlPauseAllThreshold(rtk_uint32 threshold)
+{
+    if(threshold >= RTL8367C_PAGE_NUMBER)
+        return RT_ERR_OUT_OF_RANGE;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_FLOWCTRL_ALL_ON, RTL8367C_FLOWCTRL_ALL_ON_THRESHOLD_MASK, threshold);
+}
+/* Function Name:
+ *      rtl8367c_getAsicFlowControlPauseAllThreshold
+ * Description:
+ *      Get system-based all ports enable flow control parameters
+ * Input:
+ *      pThreshold  - Whole system pause all threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicFlowControlPauseAllThreshold(rtk_uint32 *pThreshold)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_ALL_ON, RTL8367C_FLOWCTRL_ALL_ON_THRESHOLD_MASK, pThreshold);
+}
+/* Function Name:
+ *      rtl8367c_setAsicFlowControlSystemThreshold
+ * Description:
+ *      Set system-based flow control parameters
+ * Input:
+ *      onThreshold     - Flow control turn ON threshold
+ *      offThreshold    - Flow control turn OFF threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicFlowControlSystemThreshold(rtk_uint32 onThreshold, rtk_uint32 offThreshold)
+{
+    ret_t retVal;
+
+    if((onThreshold >= RTL8367C_PAGE_NUMBER) || (offThreshold >= RTL8367C_PAGE_NUMBER))
+        return RT_ERR_OUT_OF_RANGE;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_FLOWCTRL_SYS_OFF, RTL8367C_FLOWCTRL_SYS_OFF_MASK, offThreshold);
+
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_FLOWCTRL_SYS_ON, RTL8367C_FLOWCTRL_SYS_ON_MASK, onThreshold);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicFlowControlSystemThreshold
+ * Description:
+ *      Get system-based flow control parameters
+ * Input:
+ *      pOnThreshold    - Flow control turn ON threshold
+ *      pOffThreshold   - Flow control turn OFF threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicFlowControlSystemThreshold(rtk_uint32 *pOnThreshold, rtk_uint32 *pOffThreshold)
+{
+    ret_t retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_SYS_OFF, RTL8367C_FLOWCTRL_SYS_OFF_MASK, pOffThreshold);
+
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_SYS_ON, RTL8367C_FLOWCTRL_SYS_ON_MASK, pOnThreshold);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_setAsicFlowControlSharedThreshold
+ * Description:
+ *      Set share-based flow control parameters
+ * Input:
+ *      onThreshold     - Flow control turn ON threshold
+ *      offThreshold    - Flow control turn OFF threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicFlowControlSharedThreshold(rtk_uint32 onThreshold, rtk_uint32 offThreshold)
+{
+    ret_t retVal;
+
+    if((onThreshold >= RTL8367C_PAGE_NUMBER) || (offThreshold >= RTL8367C_PAGE_NUMBER))
+        return RT_ERR_OUT_OF_RANGE;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_FLOWCTRL_SHARE_OFF, RTL8367C_FLOWCTRL_SHARE_OFF_MASK, offThreshold);
+
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_FLOWCTRL_SHARE_ON, RTL8367C_FLOWCTRL_SHARE_ON_MASK, onThreshold);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicFlowControlSharedThreshold
+ * Description:
+ *      Get share-based flow control parameters
+ * Input:
+ *      pOnThreshold    - Flow control turn ON threshold
+ *      pOffThreshold   - Flow control turn OFF threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicFlowControlSharedThreshold(rtk_uint32 *pOnThreshold, rtk_uint32 *pOffThreshold)
+{
+    ret_t retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_SHARE_OFF, RTL8367C_FLOWCTRL_SHARE_OFF_MASK, pOffThreshold);
+
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_SHARE_ON, RTL8367C_FLOWCTRL_SHARE_ON_MASK, pOnThreshold);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_setAsicFlowControlPortThreshold
+ * Description:
+ *      Set Port-based flow control parameters
+ * Input:
+ *      onThreshold     - Flow control turn ON threshold
+ *      offThreshold    - Flow control turn OFF threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicFlowControlPortThreshold(rtk_uint32 onThreshold, rtk_uint32 offThreshold)
+{
+    ret_t retVal;
+
+    if((onThreshold >= RTL8367C_PAGE_NUMBER) || (offThreshold >= RTL8367C_PAGE_NUMBER))
+        return RT_ERR_OUT_OF_RANGE;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_FLOWCTRL_PORT_OFF, RTL8367C_FLOWCTRL_PORT_OFF_MASK, offThreshold);
+
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_FLOWCTRL_PORT_ON, RTL8367C_FLOWCTRL_PORT_ON_MASK, onThreshold);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicFlowControlPortThreshold
+ * Description:
+ *      Get Port-based flow control parameters
+ * Input:
+ *      pOnThreshold    - Flow control turn ON threshold
+ *      pOffThreshold   - Flow control turn OFF threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicFlowControlPortThreshold(rtk_uint32 *pOnThreshold, rtk_uint32 *pOffThreshold)
+{
+    ret_t retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_PORT_OFF, RTL8367C_FLOWCTRL_PORT_OFF_MASK, pOffThreshold);
+
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_PORT_ON, RTL8367C_FLOWCTRL_PORT_ON_MASK, pOnThreshold);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_setAsicFlowControlPortPrivateThreshold
+ * Description:
+ *      Set Port-private-based flow control parameters
+ * Input:
+ *      onThreshold     - Flow control turn ON threshold
+ *      offThreshold    - Flow control turn OFF threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicFlowControlPortPrivateThreshold(rtk_uint32 onThreshold, rtk_uint32 offThreshold)
+{
+    ret_t retVal;
+
+    if((onThreshold >= RTL8367C_PAGE_NUMBER) || (offThreshold >= RTL8367C_PAGE_NUMBER))
+        return RT_ERR_OUT_OF_RANGE;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_FLOWCTRL_PORT_PRIVATE_OFF, RTL8367C_FLOWCTRL_PORT_PRIVATE_OFF_MASK, offThreshold);
+
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_FLOWCTRL_PORT_PRIVATE_ON, RTL8367C_FLOWCTRL_PORT_PRIVATE_ON_MASK, onThreshold);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicFlowControlPortPrivateThreshold
+ * Description:
+ *      Get Port-private-based flow control parameters
+ * Input:
+ *      pOnThreshold    - Flow control turn ON threshold
+ *      pOffThreshold   - Flow control turn OFF threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicFlowControlPortPrivateThreshold(rtk_uint32 *pOnThreshold, rtk_uint32 *pOffThreshold)
+{
+    ret_t retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_PORT_PRIVATE_OFF, RTL8367C_FLOWCTRL_PORT_PRIVATE_OFF_MASK, pOffThreshold);
+
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_PORT_PRIVATE_ON, RTL8367C_FLOWCTRL_PORT_PRIVATE_ON_MASK, pOnThreshold);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_setAsicFlowControlSystemDropThreshold
+ * Description:
+ *      Set system-based drop parameters
+ * Input:
+ *      onThreshold     - Drop turn ON threshold
+ *      offThreshold    - Drop turn OFF threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicFlowControlSystemDropThreshold(rtk_uint32 onThreshold, rtk_uint32 offThreshold)
+{
+    ret_t retVal;
+
+    if((onThreshold >= RTL8367C_PAGE_NUMBER) || (offThreshold >= RTL8367C_PAGE_NUMBER))
+        return RT_ERR_OUT_OF_RANGE;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_FLOWCTRL_FCOFF_SYS_OFF, RTL8367C_FLOWCTRL_FCOFF_SYS_OFF_MASK, offThreshold);
+
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_FLOWCTRL_FCOFF_SYS_ON, RTL8367C_FLOWCTRL_FCOFF_SYS_ON_MASK, onThreshold);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicFlowControlSystemDropThreshold
+ * Description:
+ *      Get system-based drop parameters
+ * Input:
+ *      pOnThreshold    - Drop turn ON threshold
+ *      pOffThreshold   - Drop turn OFF threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicFlowControlSystemDropThreshold(rtk_uint32 *pOnThreshold, rtk_uint32 *pOffThreshold)
+{
+    ret_t retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_FCOFF_SYS_OFF, RTL8367C_FLOWCTRL_FCOFF_SYS_OFF_MASK, pOffThreshold);
+
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_FCOFF_SYS_ON, RTL8367C_FLOWCTRL_FCOFF_SYS_ON_MASK, pOnThreshold);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_setAsicFlowControlSharedDropThreshold
+ * Description:
+ *      Set share-based fdrop parameters
+ * Input:
+ *      onThreshold     - Drop turn ON threshold
+ *      offThreshold    - Drop turn OFF threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicFlowControlSharedDropThreshold(rtk_uint32 onThreshold, rtk_uint32 offThreshold)
+{
+    ret_t retVal;
+
+    if((onThreshold >= RTL8367C_PAGE_NUMBER) || (offThreshold >= RTL8367C_PAGE_NUMBER))
+        return RT_ERR_OUT_OF_RANGE;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_FLOWCTRL_FCOFF_SHARE_OFF, RTL8367C_FLOWCTRL_FCOFF_SHARE_OFF_MASK, offThreshold);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_FLOWCTRL_FCOFF_SHARE_ON, RTL8367C_FLOWCTRL_FCOFF_SHARE_ON_MASK, onThreshold);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicFlowControlSharedDropThreshold
+ * Description:
+ *      Get share-based fdrop parameters
+ * Input:
+ *      pOnThreshold    - Drop turn ON threshold
+ *      pOffThreshold   - Drop turn OFF threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicFlowControlSharedDropThreshold(rtk_uint32 *pOnThreshold, rtk_uint32 *pOffThreshold)
+{
+    ret_t retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_FCOFF_SHARE_OFF, RTL8367C_FLOWCTRL_FCOFF_SHARE_OFF_MASK, pOffThreshold);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_FCOFF_SHARE_ON, RTL8367C_FLOWCTRL_FCOFF_SHARE_ON_MASK, pOnThreshold);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_setAsicFlowControlPortDropThreshold
+ * Description:
+ *      Set Port-based drop parameters
+ * Input:
+ *      onThreshold     - Drop turn ON threshold
+ *      offThreshold    - Drop turn OFF threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicFlowControlPortDropThreshold(rtk_uint32 onThreshold, rtk_uint32 offThreshold)
+{
+    ret_t retVal;
+
+    if((onThreshold >= RTL8367C_PAGE_NUMBER) || (offThreshold >= RTL8367C_PAGE_NUMBER))
+        return RT_ERR_OUT_OF_RANGE;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_FLOWCTRL_FCOFF_PORT_OFF, RTL8367C_FLOWCTRL_FCOFF_PORT_OFF_MASK, offThreshold);
+
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_FLOWCTRL_FCOFF_PORT_ON, RTL8367C_FLOWCTRL_FCOFF_PORT_ON_MASK, onThreshold);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicFlowControlPortDropThreshold
+ * Description:
+ *      Get Port-based drop parameters
+ * Input:
+ *      pOnThreshold    - Drop turn ON threshold
+ *      pOffThreshold   - Drop turn OFF threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicFlowControlPortDropThreshold(rtk_uint32 *pOnThreshold, rtk_uint32 *pOffThreshold)
+{
+    ret_t retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_FCOFF_PORT_OFF, RTL8367C_FLOWCTRL_FCOFF_PORT_OFF_MASK, pOffThreshold);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_FCOFF_PORT_ON, RTL8367C_FLOWCTRL_FCOFF_PORT_ON_MASK, pOnThreshold);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_setAsicFlowControlPortPrivateDropThreshold
+ * Description:
+ *      Set Port-private-based drop parameters
+ * Input:
+ *      onThreshold     - Drop turn ON threshold
+ *      offThreshold    - Drop turn OFF threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicFlowControlPortPrivateDropThreshold(rtk_uint32 onThreshold, rtk_uint32 offThreshold)
+{
+    ret_t retVal;
+
+    if((onThreshold >= RTL8367C_PAGE_NUMBER) || (offThreshold >= RTL8367C_PAGE_NUMBER))
+        return RT_ERR_OUT_OF_RANGE;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_FLOWCTRL_FCOFF_PORT_PRIVATE_OFF, RTL8367C_FLOWCTRL_FCOFF_PORT_PRIVATE_OFF_MASK, offThreshold);
+
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_FLOWCTRL_FCOFF_PORT_PRIVATE_ON, RTL8367C_FLOWCTRL_FCOFF_PORT_PRIVATE_ON_MASK, onThreshold);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicFlowControlPortPrivateDropThreshold
+ * Description:
+ *      Get Port-private-based drop parameters
+ * Input:
+ *      pOnThreshold    - Drop turn ON threshold
+ *      pOffThreshold   - Drop turn OFF threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicFlowControlPortPrivateDropThreshold(rtk_uint32 *pOnThreshold, rtk_uint32 *pOffThreshold)
+{
+    ret_t retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_FCOFF_PORT_PRIVATE_OFF, RTL8367C_FLOWCTRL_FCOFF_PORT_PRIVATE_OFF_MASK, pOffThreshold);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_FCOFF_PORT_PRIVATE_ON, RTL8367C_FLOWCTRL_FCOFF_PORT_PRIVATE_ON_MASK, pOnThreshold);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_setAsicFlowControlSystemJumboThreshold
+ * Description:
+ *      Set Jumbo system-based flow control parameters
+ * Input:
+ *      onThreshold     - Flow control turn ON threshold
+ *      offThreshold    - Flow control turn OFF threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicFlowControlSystemJumboThreshold(rtk_uint32 onThreshold, rtk_uint32 offThreshold)
+{
+    ret_t retVal;
+
+    if((onThreshold >= RTL8367C_PAGE_NUMBER) || (offThreshold >= RTL8367C_PAGE_NUMBER))
+        return RT_ERR_OUT_OF_RANGE;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_FLOWCTRL_JUMBO_SYS_OFF, RTL8367C_FLOWCTRL_JUMBO_SYS_OFF_MASK, offThreshold);
+
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_FLOWCTRL_JUMBO_SYS_ON, RTL8367C_FLOWCTRL_JUMBO_SYS_ON_MASK, onThreshold);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicFlowControlSystemJumboThreshold
+ * Description:
+ *      Get Jumbo system-based flow control parameters
+ * Input:
+ *      pOnThreshold    - Flow control turn ON threshold
+ *      pOffThreshold   - Flow control turn OFF threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicFlowControlSystemJumboThreshold(rtk_uint32 *pOnThreshold, rtk_uint32 *pOffThreshold)
+{
+    ret_t retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_JUMBO_SYS_OFF, RTL8367C_FLOWCTRL_JUMBO_SYS_OFF_MASK, pOffThreshold);
+
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_JUMBO_SYS_ON, RTL8367C_FLOWCTRL_JUMBO_SYS_ON_MASK, pOnThreshold);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_setAsicFlowControlSharedJumboThreshold
+ * Description:
+ *      Set Jumbo share-based flow control parameters
+ * Input:
+ *      onThreshold     - Flow control turn ON threshold
+ *      offThreshold    - Flow control turn OFF threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicFlowControlSharedJumboThreshold(rtk_uint32 onThreshold, rtk_uint32 offThreshold)
+{
+    ret_t retVal;
+
+    if((onThreshold >= RTL8367C_PAGE_NUMBER) || (offThreshold >= RTL8367C_PAGE_NUMBER))
+        return RT_ERR_OUT_OF_RANGE;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_FLOWCTRL_JUMBO_SHARE_OFF, RTL8367C_FLOWCTRL_JUMBO_SHARE_OFF_MASK, offThreshold);
+
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_FLOWCTRL_JUMBO_SHARE_ON, RTL8367C_FLOWCTRL_JUMBO_SHARE_ON_MASK, onThreshold);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicFlowControlSharedJumboThreshold
+ * Description:
+ *      Get Jumbo share-based flow control parameters
+ * Input:
+ *      pOnThreshold    - Flow control turn ON threshold
+ *      pOffThreshold   - Flow control turn OFF threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicFlowControlSharedJumboThreshold(rtk_uint32 *pOnThreshold, rtk_uint32 *pOffThreshold)
+{
+    ret_t retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_JUMBO_SHARE_OFF, RTL8367C_FLOWCTRL_JUMBO_SHARE_OFF_MASK, pOffThreshold);
+
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_JUMBO_SHARE_ON, RTL8367C_FLOWCTRL_JUMBO_SHARE_ON_MASK, pOnThreshold);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_setAsicFlowControlPortJumboThreshold
+ * Description:
+ *      Set Jumbo Port-based flow control parameters
+ * Input:
+ *      onThreshold     - Flow control turn ON threshold
+ *      offThreshold    - Flow control turn OFF threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicFlowControlPortJumboThreshold(rtk_uint32 onThreshold, rtk_uint32 offThreshold)
+{
+    ret_t retVal;
+
+    if((onThreshold >= RTL8367C_PAGE_NUMBER) || (offThreshold >= RTL8367C_PAGE_NUMBER))
+        return RT_ERR_OUT_OF_RANGE;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_FLOWCTRL_JUMBO_PORT_OFF, RTL8367C_FLOWCTRL_JUMBO_PORT_OFF_MASK, offThreshold);
+
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_FLOWCTRL_JUMBO_PORT_ON, RTL8367C_FLOWCTRL_JUMBO_PORT_ON_MASK, onThreshold);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicFlowControlPortJumboThreshold
+ * Description:
+ *      Get Jumbo Port-based flow control parameters
+ * Input:
+ *      pOnThreshold    - Flow control turn ON threshold
+ *      pOffThreshold   - Flow control turn OFF threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicFlowControlPortJumboThreshold(rtk_uint32 *pOnThreshold, rtk_uint32 *pOffThreshold)
+{
+    ret_t retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_JUMBO_PORT_OFF, RTL8367C_FLOWCTRL_JUMBO_PORT_OFF_MASK, pOffThreshold);
+
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_JUMBO_PORT_ON, RTL8367C_FLOWCTRL_JUMBO_PORT_ON_MASK, pOnThreshold);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_setAsicFlowControlPortPrivateJumboThreshold
+ * Description:
+ *      Set Jumbo Port-private-based flow control parameters
+ * Input:
+ *      onThreshold     - Flow control turn ON threshold
+ *      offThreshold    - Flow control turn OFF threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicFlowControlPortPrivateJumboThreshold(rtk_uint32 onThreshold, rtk_uint32 offThreshold)
+{
+    ret_t retVal;
+
+    if((onThreshold >= RTL8367C_PAGE_NUMBER) || (offThreshold >= RTL8367C_PAGE_NUMBER))
+        return RT_ERR_OUT_OF_RANGE;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_FLOWCTRL_JUMBO_PORT_PRIVATE_OFF, RTL8367C_FLOWCTRL_JUMBO_PORT_PRIVATE_OFF_MASK, offThreshold);
+
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_FLOWCTRL_JUMBO_PORT_PRIVATE_ON, RTL8367C_FLOWCTRL_JUMBO_PORT_PRIVATE_ON_MASK, onThreshold);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicFlowControlPortPrivateJumboThreshold
+ * Description:
+ *      Get Jumbo Port-private-based flow control parameters
+ * Input:
+ *      pOnThreshold    - Flow control turn ON threshold
+ *      pOffThreshold   - Flow control turn OFF threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicFlowControlPortPrivateJumboThreshold(rtk_uint32 *pOnThreshold, rtk_uint32 *pOffThreshold)
+{
+    ret_t retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_JUMBO_PORT_PRIVATE_OFF, RTL8367C_FLOWCTRL_JUMBO_PORT_PRIVATE_OFF_MASK, pOffThreshold);
+
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_JUMBO_PORT_PRIVATE_ON, RTL8367C_FLOWCTRL_JUMBO_PORT_PRIVATE_ON_MASK, pOnThreshold);
+
+    return retVal;
+}
+
+
+
+/* Function Name:
+ *      rtl8367c_setAsicEgressFlowControlQueueDropThreshold
+ * Description:
+ *      Set Queue-based egress flow control turn on or ingress flow control drop on threshold
+ * Input:
+ *      qid         - The queue id
+ *      threshold   - Queue-based flown control/drop turn ON threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ *      RT_ERR_QUEUE_ID     - Invalid queue id
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicEgressFlowControlQueueDropThreshold(rtk_uint32 qid, rtk_uint32 threshold)
+{
+    if( threshold >= RTL8367C_PAGE_NUMBER)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if(qid > RTL8367C_QIDMAX)
+        return RT_ERR_QUEUE_ID;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_FLOWCTRL_QUEUE_DROP_ON_REG(qid), RTL8367C_FLOWCTRL_QUEUE_DROP_ON_MASK, threshold);
+}
+/* Function Name:
+ *      rtl8367c_getAsicEgressFlowControlQueueDropThreshold
+ * Description:
+ *      Get Queue-based egress flow control turn on or ingress flow control drop on threshold
+ * Input:
+ *      qid         - The queue id
+ *      pThreshold  - Queue-based flown control/drop turn ON threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_QUEUE_ID     - Invalid queue id
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicEgressFlowControlQueueDropThreshold(rtk_uint32 qid, rtk_uint32 *pThreshold)
+{
+    if(qid > RTL8367C_QIDMAX)
+      return RT_ERR_QUEUE_ID;
+
+    return rtl8367c_getAsicRegBits(RTL8367C_FLOWCTRL_QUEUE_DROP_ON_REG(qid), RTL8367C_FLOWCTRL_QUEUE_DROP_ON_MASK, pThreshold);
+}
+/* Function Name:
+ *      rtl8367c_setAsicEgressFlowControlPortDropThreshold
+ * Description:
+ *      Set port-based egress flow control turn on or ingress flow control drop on threshold
+ * Input:
+ *      port        - Physical port number (0~7)
+ *      threshold   - Queue-based flown control/drop turn ON threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicEgressFlowControlPortDropThreshold(rtk_uint32 port, rtk_uint32 threshold)
+{
+    if(port > RTL8367C_PORTIDMAX)
+      return RT_ERR_PORT_ID;
+
+    if(threshold >= RTL8367C_PAGE_NUMBER)
+      return RT_ERR_OUT_OF_RANGE;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_FLOWCTRL_PORT_DROP_ON_REG(port), RTL8367C_FLOWCTRL_PORT_DROP_ON_MASK, threshold);
+}
+/* Function Name:
+ *      rtl8367c_setAsicEgressFlowControlPortDropThreshold
+ * Description:
+ *      Set port-based egress flow control turn on or ingress flow control drop on threshold
+ * Input:
+ *      port        - Physical port number (0~7)
+ *      pThreshold  - Queue-based flown control/drop turn ON threshold
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicEgressFlowControlPortDropThreshold(rtk_uint32 port, rtk_uint32 *pThreshold)
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBits(RTL8367C_FLOWCTRL_PORT_DROP_ON_REG(port), RTL8367C_FLOWCTRL_PORT_DROP_ON_MASK, pThreshold);
+}
+/* Function Name:
+ *      rtl8367c_setAsicEgressFlowControlPortDropGap
+ * Description:
+ *      Set port-based egress flow control turn off or ingress flow control drop off gap
+ * Input:
+ *      gap     - Flow control/drop turn OFF threshold = turn ON threshold - gap
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicEgressFlowControlPortDropGap(rtk_uint32 gap)
+{
+    if(gap >= RTL8367C_PAGE_NUMBER)
+        return RT_ERR_OUT_OF_RANGE;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_FLOWCTRL_PORT_GAP, RTL8367C_FLOWCTRL_PORT_GAP_MASK, gap);
+}
+/* Function Name:
+ *      rtl8367c_getAsicEgressFlowControlPortDropGap
+ * Description:
+ *      Get port-based egress flow control turn off or ingress flow control drop off gap
+ * Input:
+ *      pGap    - Flow control/drop turn OFF threshold = turn ON threshold - gap
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicEgressFlowControlPortDropGap(rtk_uint32 *pGap)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_PORT_GAP, RTL8367C_FLOWCTRL_PORT_GAP_MASK, pGap);
+}
+/* Function Name:
+ *      rtl8367c_setAsicEgressFlowControlQueueDropGap
+ * Description:
+ *      Set Queue-based egress flow control turn off or ingress flow control drop off gap
+ * Input:
+ *      gap     - Flow control/drop turn OFF threshold = turn ON threshold - gap
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicEgressFlowControlQueueDropGap(rtk_uint32 gap)
+{
+    if(gap >= RTL8367C_PAGE_NUMBER)
+        return RT_ERR_OUT_OF_RANGE;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_FLOWCTRL_QUEUE_GAP, RTL8367C_FLOWCTRL_QUEUE_GAP_MASK, gap);
+}
+/* Function Name:
+ *      rtl8367c_getAsicEgressFlowControlQueueDropGap
+ * Description:
+ *      Get Queue-based egress flow control turn off or ingress flow control drop off gap
+ * Input:
+ *      pGap    - Flow control/drop turn OFF threshold = turn ON threshold - gap
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicEgressFlowControlQueueDropGap(rtk_uint32 *pGap)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_QUEUE_GAP, RTL8367C_FLOWCTRL_QUEUE_GAP_MASK, pGap);
+}
+/* Function Name:
+ *      rtl8367c_getAsicEgressQueueEmptyPortMask
+ * Description:
+ *      Get queue empty port mask
+ * Input:
+ *      pPortmask   -  Queue empty port mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicEgressQueueEmptyPortMask(rtk_uint32 *pPortmask)
+{
+    return rtl8367c_getAsicReg(RTL8367C_REG_PORT_QEMPTY, pPortmask);
+}
+/* Function Name:
+ *      rtl8367c_getAsicTotalPage
+ * Description:
+ *      Get system total page usage number
+ * Input:
+ *      pPageCount  -  page usage number
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicTotalPage(rtk_uint32 *pPageCount)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_TOTAL_PAGE_COUNTER, RTL8367C_FLOWCTRL_TOTAL_PAGE_COUNTER_MASK, pPageCount);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPulbicPage
+ * Description:
+ *      Get system public page usage number
+ * Input:
+ *      pPageCount  -  page usage number
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPulbicPage(rtk_uint32 *pPageCount)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_PUBLIC_PAGE_COUNTER, RTL8367C_FLOWCTRL_PUBLIC_PAGE_COUNTER_MASK, pPageCount);
+}
+/* Function Name:
+ *      rtl8367c_getAsicMaxTotalPage
+ * Description:
+ *      Get system total page max usage number
+ * Input:
+ *      pPageCount  -  page usage number
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicMaxTotalPage(rtk_uint32 *pPageCount)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_TOTAL_PAGE_MAX, RTL8367C_FLOWCTRL_TOTAL_PAGE_MAX_MASK, pPageCount);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPulbicPage
+ * Description:
+ *      Get system public page max usage number
+ * Input:
+ *      pPageCount  -  page usage number
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicMaxPulbicPage(rtk_uint32 *pPageCount)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_PUBLIC_PAGE_MAX, RTL8367C_FLOWCTRL_PUBLIC_PAGE_MAX_MASK, pPageCount);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortPage
+ * Description:
+ *      Get per-port page usage number
+ * Input:
+ *      port        -  Physical port number (0~7)
+ *      pPageCount  -  page usage number
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortPage(rtk_uint32 port, rtk_uint32 *pPageCount)
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(port < 8)
+        return rtl8367c_getAsicRegBits(RTL8367C_FLOWCTRL_PORT_PAGE_COUNTER_REG(port), RTL8367C_FLOWCTRL_PORT_PAGE_COUNTER_MASK, pPageCount);
+    else
+        return rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_PORT8_PAGE_COUNTER+port - 8, RTL8367C_FLOWCTRL_PORT_PAGE_COUNTER_MASK, pPageCount);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortPage
+ * Description:
+ *      Get per-port page max usage number
+ * Input:
+ *      port        -  Physical port number (0~7)
+ *      pPageCount  -  page usage number
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortPageMax(rtk_uint32 port, rtk_uint32 *pPageCount)
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+    if(port < 8)
+        return rtl8367c_getAsicRegBits(RTL8367C_FLOWCTRL_PORT_PAGE_MAX_REG(port), RTL8367C_FLOWCTRL_PORT_PAGE_MAX_MASK, pPageCount);
+    else
+        return rtl8367c_getAsicRegBits(RTL8367C_REG_FLOWCTRL_PORT0_PAGE_MAX+port-8, RTL8367C_FLOWCTRL_PORT_PAGE_MAX_MASK, pPageCount);
+
+
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicFlowControlEgressPortIndep
+ * Description:
+ *      Set per-port egress flow control independent
+ * Input:
+ *      port        - Physical port number (0~7)
+ *      enabled     - Egress port flow control usage 1:enable 0:disable.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicFlowControlEgressPortIndep(rtk_uint32 port, rtk_uint32 enable)
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_PORT0_MISC_CFG + (port *0x20), RTL8367C_PORT0_MISC_CFG_FLOWCTRL_INDEP_OFFSET,enable);
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicFlowControlEgressPortIndep
+ * Description:
+ *      Get per-port egress flow control independent
+ * Input:
+ *      port        - Physical port number (0~7)
+ *      enabled     - Egress port flow control usage 1:enable 0:disable.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicFlowControlEgressPortIndep(rtk_uint32 port, rtk_uint32 *pEnable)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_PORT0_MISC_CFG + (port *0x20),RTL8367C_PORT0_MISC_CFG_FLOWCTRL_INDEP_OFFSET,pEnable);
+}
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_fc.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_fc.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_fc.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_fc.h	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,82 @@
+#ifndef _RTL8367C_ASICDRV_FC_H_
+#define _RTL8367C_ASICDRV_FC_H_
+
+#include <rtl8367c_asicdrv.h>
+
+#define RTL8367C_PAGE_NUMBER    0x600
+
+
+enum FLOW_CONTROL_TYPE
+{
+    FC_EGRESS = 0,
+    FC_INGRESS,
+};
+
+enum FC_JUMBO_SIZE
+{
+    FC_JUMBO_SIZE_3K = 0,
+    FC_JUMBO_SIZE_4K,
+    FC_JUMBO_SIZE_6K,
+    FC_JUMBO_SIZE_9K,
+    FC_JUMBO_SIZE_END,
+
+};
+
+
+extern ret_t rtl8367c_setAsicFlowControlSelect(rtk_uint32 select);
+extern ret_t rtl8367c_getAsicFlowControlSelect(rtk_uint32 *pSelect);
+extern ret_t rtl8367c_setAsicFlowControlJumboMode(rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicFlowControlJumboMode(rtk_uint32* pEnabled);
+extern ret_t rtl8367c_setAsicFlowControlJumboModeSize(rtk_uint32 size);
+extern ret_t rtl8367c_getAsicFlowControlJumboModeSize(rtk_uint32* pSize);
+extern ret_t rtl8367c_setAsicFlowControlQueueEgressEnable(rtk_uint32 port, rtk_uint32 qid, rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicFlowControlQueueEgressEnable(rtk_uint32 port, rtk_uint32 qid, rtk_uint32* pEnabled);
+extern ret_t rtl8367c_setAsicFlowControlDropAll(rtk_uint32 dropall);
+extern ret_t rtl8367c_getAsicFlowControlDropAll(rtk_uint32* pDropall);
+extern ret_t rtl8367c_setAsicFlowControlPauseAllThreshold(rtk_uint32 threshold);
+extern ret_t rtl8367c_getAsicFlowControlPauseAllThreshold(rtk_uint32 *pThreshold);
+extern ret_t rtl8367c_setAsicFlowControlSystemThreshold(rtk_uint32 onThreshold, rtk_uint32 offThreshold);
+extern ret_t rtl8367c_getAsicFlowControlSystemThreshold(rtk_uint32 *pOnThreshold, rtk_uint32 *pOffThreshold);
+extern ret_t rtl8367c_setAsicFlowControlSharedThreshold(rtk_uint32 onThreshold, rtk_uint32 offThreshold);
+extern ret_t rtl8367c_getAsicFlowControlSharedThreshold(rtk_uint32 *pOnThreshold, rtk_uint32 *pOffThreshold);
+extern ret_t rtl8367c_setAsicFlowControlPortThreshold(rtk_uint32 onThreshold, rtk_uint32 offThreshold);
+extern ret_t rtl8367c_getAsicFlowControlPortThreshold(rtk_uint32 *pOnThreshold, rtk_uint32 *pOffThreshold);
+extern ret_t rtl8367c_setAsicFlowControlPortPrivateThreshold(rtk_uint32 onThreshold, rtk_uint32 offThreshold);
+extern ret_t rtl8367c_getAsicFlowControlPortPrivateThreshold(rtk_uint32 *pOnThreshold, rtk_uint32 *pOffThreshold);
+extern ret_t rtl8367c_setAsicFlowControlSystemDropThreshold(rtk_uint32 onThreshold, rtk_uint32 offThreshold);
+extern ret_t rtl8367c_getAsicFlowControlSystemDropThreshold(rtk_uint32 *pOnThreshold, rtk_uint32 *pOffThreshold);
+extern ret_t rtl8367c_setAsicFlowControlSharedDropThreshold(rtk_uint32 onThreshold, rtk_uint32 offThreshold);
+extern ret_t rtl8367c_getAsicFlowControlSharedDropThreshold(rtk_uint32 *pOnThreshold, rtk_uint32 *pOffThreshold);
+extern ret_t rtl8367c_setAsicFlowControlPortDropThreshold(rtk_uint32 onThreshold, rtk_uint32 offThreshold);
+extern ret_t rtl8367c_getAsicFlowControlPortDropThreshold(rtk_uint32 *pOnThreshold, rtk_uint32 *pOffThreshold);
+extern ret_t rtl8367c_setAsicFlowControlPortPrivateDropThreshold(rtk_uint32 onThreshold, rtk_uint32 offThreshold);
+extern ret_t rtl8367c_getAsicFlowControlPortPrivateDropThreshold(rtk_uint32 *pOnThreshold, rtk_uint32 *pOffThreshold);
+extern ret_t rtl8367c_setAsicFlowControlSystemJumboThreshold(rtk_uint32 onThreshold, rtk_uint32 offThreshold);
+extern ret_t rtl8367c_getAsicFlowControlSystemJumboThreshold(rtk_uint32 *pOnThreshold, rtk_uint32 *pOffThreshold);
+extern ret_t rtl8367c_setAsicFlowControlSharedJumboThreshold(rtk_uint32 onThreshold, rtk_uint32 offThreshold);
+extern ret_t rtl8367c_getAsicFlowControlSharedJumboThreshold(rtk_uint32 *pOnThreshold, rtk_uint32 *pOffThreshold);
+extern ret_t rtl8367c_setAsicFlowControlPortJumboThreshold(rtk_uint32 onThreshold, rtk_uint32 offThreshold);
+extern ret_t rtl8367c_getAsicFlowControlPortJumboThreshold(rtk_uint32 *pOnThreshold, rtk_uint32 *pOffThreshold);
+extern ret_t rtl8367c_setAsicFlowControlPortPrivateJumboThreshold(rtk_uint32 onThreshold, rtk_uint32 offThreshold);
+extern ret_t rtl8367c_getAsicFlowControlPortPrivateJumboThreshold(rtk_uint32 *pOnThreshold, rtk_uint32 *pOffThreshold);
+
+extern ret_t rtl8367c_setAsicEgressFlowControlPortDropGap(rtk_uint32 gap);
+extern ret_t rtl8367c_getAsicEgressFlowControlPortDropGap(rtk_uint32 *pGap);
+extern ret_t rtl8367c_setAsicEgressFlowControlQueueDropGap(rtk_uint32 gap);
+extern ret_t rtl8367c_getAsicEgressFlowControlQueueDropGap(rtk_uint32 *pGap);
+extern ret_t rtl8367c_setAsicEgressFlowControlPortDropThreshold(rtk_uint32 port, rtk_uint32 threshold);
+extern ret_t rtl8367c_getAsicEgressFlowControlPortDropThreshold(rtk_uint32 port, rtk_uint32 *pThreshold);
+extern ret_t rtl8367c_setAsicEgressFlowControlQueueDropThreshold(rtk_uint32 qid, rtk_uint32 threshold);
+extern ret_t rtl8367c_getAsicEgressFlowControlQueueDropThreshold(rtk_uint32 qid, rtk_uint32 *pThreshold);
+extern ret_t rtl8367c_getAsicEgressQueueEmptyPortMask(rtk_uint32 *pPortmask);
+extern ret_t rtl8367c_getAsicTotalPage(rtk_uint32 *pPageCount);
+extern ret_t rtl8367c_getAsicPulbicPage(rtk_uint32 *pPageCount);
+extern ret_t rtl8367c_getAsicMaxTotalPage(rtk_uint32 *pPageCount);
+extern ret_t rtl8367c_getAsicMaxPulbicPage(rtk_uint32 *pPageCount);
+extern ret_t rtl8367c_getAsicPortPage(rtk_uint32 port, rtk_uint32 *pPageCount);
+extern ret_t rtl8367c_getAsicPortPageMax(rtk_uint32 port, rtk_uint32 *pPageCount);
+extern ret_t rtl8367c_setAsicFlowControlEgressPortIndep(rtk_uint32 port, rtk_uint32 enable);
+extern ret_t rtl8367c_getAsicFlowControlEgressPortIndep(rtk_uint32 port, rtk_uint32 *pEnable);
+
+#endif /*_RTL8367C_ASICDRV_FC_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_green.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_green.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_green.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_green.c	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,625 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 80668 $
+ * $Date: 2017-07-19 14:21:43 +0800 (é€±ä¸‰, 19 ä¸ƒæœˆ 2017) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature : Green ethernet related functions
+ *
+ */
+#include <rtl8367c_asicdrv_green.h>
+
+/* Function Name:
+ *      rtl8367c_getAsicGreenPortPage
+ * Description:
+ *      Get per-Port ingress page usage per second
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      pPage   - page number of ingress packet occuping per second
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      Ingress traffic occuping page number per second for high layer green feature usage
+ */
+ret_t rtl8367c_getAsicGreenPortPage(rtk_uint32 port, rtk_uint32* pPage)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+    rtk_uint32 pageMeter;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    retVal = rtl8367c_getAsicReg(RTL8367C_PAGEMETER_PORT_REG(port), &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+   pageMeter = regData;
+
+    retVal = rtl8367c_getAsicReg(RTL8367C_PAGEMETER_PORT_REG(port) + 1, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    pageMeter = pageMeter + (regData << 16);
+
+    *pPage = pageMeter;
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicGreenTrafficType
+ * Description:
+ *      Set traffic type for each priority
+ * Input:
+ *      priority    - internal priority (0~7)
+ *      traffictype - high/low traffic type, 1:high priority traffic type, 0:low priority traffic type
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicGreenTrafficType(rtk_uint32 priority, rtk_uint32 traffictype)
+{
+
+    if(priority > RTL8367C_PRIMAX)
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_HIGHPRI_CFG, priority, (traffictype?1:0));
+}
+/* Function Name:
+ *      rtl8367c_getAsicGreenTrafficType
+ * Description:
+ *      Get traffic type for each priority
+ * Input:
+ *      priority    - internal priority (0~7)
+ *      pTraffictype - high/low traffic type, 1:high priority traffic type, 0:low priority traffic type
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicGreenTrafficType(rtk_uint32 priority, rtk_uint32* pTraffictype)
+{
+    if(priority > RTL8367C_PRIMAX)
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_HIGHPRI_CFG, priority, pTraffictype);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicGreenHighPriorityTraffic
+ * Description:
+ *      Set indicator which ASIC had received high priority traffic
+ * Input:
+ *      port            - Physical port number (0~7)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicGreenHighPriorityTraffic(rtk_uint32 port)
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_HIGHPRI_INDICATOR, port, 1);
+}
+
+
+/* Function Name:
+ *      rtl8367c_getAsicGreenHighPriorityTraffic
+ * Description:
+ *      Get indicator which ASIC had received high priority traffic or not
+ * Input:
+ *      port        - Physical port number (0~7)
+ *      pIndicator  - Have received high priority traffic indicator. If 1 means ASCI had received high priority in 1second checking priod
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicGreenHighPriorityTraffic(rtk_uint32 port, rtk_uint32* pIndicator)
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_HIGHPRI_INDICATOR, port, pIndicator);
+}
+
+/*
+@func rtk_int32 | rtl8367c_setAsicGreenEthernet | Set green ethernet function.
+@parm rtk_uint32 | green | Green feature function usage 1:enable 0:disable.
+@rvalue RT_ERR_OK | Success.
+@rvalue RT_ERR_SMI | SMI access error.
+@comm
+    The API can set Green Ethernet function to reduce power consumption. While green feature is enabled, ASIC will automatic
+ detect the cable length and then select different power mode for best performance with minimums power consumption. Link down
+ ports will enter power savining mode in 10 seconds after the cable disconnected if power saving function is enabled.
+*/
+ret_t rtl8367c_setAsicGreenEthernet(rtk_uint32 port, rtk_uint32 green)
+{
+    ret_t retVal;
+    rtk_uint32 checkCounter;
+    rtk_uint32 regData;
+    rtk_uint32 phy_status;
+    rtk_uint32 patchData[6][2] = { {0x809A, 0x8911}, {0x80A3, 0x9233}, {0x80AC, 0xA444}, {0x809F, 0x6B20}, {0x80A8, 0x6B22}, {0x80B1, 0x6B23} };
+    rtk_uint32 idx;
+    rtk_uint32 data;
+
+    if (green > 1)
+        return RT_ERR_INPUT;
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0249)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_getAsicReg(0x1300, &data)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0000)) != RT_ERR_OK)
+        return retVal;
+
+    /* Stop Watchdog */
+    switch (data)
+    {
+        case 0x0652:
+        case 0x6368:
+        case 0x0801:
+        case 0x6511:
+            if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xB83E, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            regData &= 0xFF00;
+            regData |= 0x00A9;
+            if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xB83E, regData)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xB840, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            regData &= 0xFF00;
+            regData |= 0x00A9;
+            if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xB840, regData)) != RT_ERR_OK)
+                return retVal;
+            break;
+        default:
+            break;;
+    }
+
+    /* 0xa420[2:0] */
+    if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xA420, &regData)) != RT_ERR_OK)
+        return retVal;
+    phy_status = (regData & 0x0007);
+
+    if(phy_status == 3)
+    {
+        /* 0xb820[4] = 1 */
+        if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xB820, &regData)) != RT_ERR_OK)
+            return retVal;
+
+        regData |= (0x0001 << 4);
+
+        if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xB820, regData)) != RT_ERR_OK)
+            return retVal;
+
+        /* wait 0xb800[6] = 1 */
+        checkCounter = 100;
+        while(checkCounter)
+        {
+            retVal = rtl8367c_getAsicPHYOCPReg(port, 0xB800, &regData);
+            if( (retVal != RT_ERR_OK) || ((regData & 0x0040) != 0x0040) )
+            {
+                checkCounter --;
+                if(0 == checkCounter)
+                    return RT_ERR_BUSYWAIT_TIMEOUT;
+            }
+            else
+                checkCounter = 0;
+        }
+    }
+
+    switch (data)
+    {
+        case 0x0276:
+        case 0x0597:
+        case 0x6367:
+            if(green)
+            {
+                for(idx = 0; idx < 6; idx++ )
+                {
+                    if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xA436, patchData[idx][0])) != RT_ERR_OK)
+                        return retVal;
+
+                    if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xA438, patchData[idx][1])) != RT_ERR_OK)
+                        return retVal;
+                }
+            }
+            break;
+        default:
+            break;;
+    }
+
+
+
+    /* 0xa436 = 0x8011 */
+    if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xA436, 0x8011)) != RT_ERR_OK)
+        return retVal;
+
+    /* wr 0xa438[15] = 0: disable, 1: enable */
+    if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xA438, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    if(green)
+        regData |= 0x8000;
+    else
+        regData &= 0x7FFF;
+
+    if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xA438, regData)) != RT_ERR_OK)
+        return retVal;
+
+    if(green == 0)
+    {
+        /* Disable Green, write analog parameter as long cable parameter */
+        /* 0xBCC2   [14:12]     0x4
+           0xBCC2   [10:8]      0x4
+           0xBCC2   [7:4]       0x4
+           0xBCC2   [3:0]       0x4 */
+        if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xBCC2, &regData)) != RT_ERR_OK)
+            return retVal;
+
+        regData &= 0x8800;
+        regData |= 0x4444;
+        if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xBCC2, regData)) != RT_ERR_OK)
+            return retVal;
+
+        /* 0xBCCA   [9:8]       0x3
+           0xBCCA   [7]         0x0 */
+        if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xBCCA, &regData)) != RT_ERR_OK)
+            return retVal;
+
+        regData &= 0xFF7F;
+        regData |= 0x0300;
+        if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xBCCA, regData)) != RT_ERR_OK)
+            return retVal;
+
+        /* 0xBCC0   [2:0]       0x0 */
+        if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xBCC0, &regData)) != RT_ERR_OK)
+            return retVal;
+
+        regData &= 0xFFF8;
+        if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xBCC0, regData)) != RT_ERR_OK)
+            return retVal;
+
+        /* 0xA80E   [11:10]     0x2 */
+        if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xA80E, &regData)) != RT_ERR_OK)
+            return retVal;
+
+        regData &= 0xF3FF;
+        regData |= 0x0800;
+        if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xA80E, regData)) != RT_ERR_OK)
+            return retVal;
+
+        /* 0xBCD6   [2:0]       0x3 */
+        if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xBCD6, &regData)) != RT_ERR_OK)
+            return retVal;
+
+        regData &= 0xFFF8;
+        regData |= 0x0003;
+        if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xBCD6, regData)) != RT_ERR_OK)
+            return retVal;
+    }
+
+    /* Re-nway */
+    if((retVal = rtl8367c_getAsicPHYReg(port, 0, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    regData |= 0x0200;
+    if((retVal = rtl8367c_setAsicPHYReg(port, 0, regData)) != RT_ERR_OK)
+        return retVal;
+
+    if(phy_status == 3)
+    {
+        /* 0xb820[4] = 0  */
+        if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xB820, &regData)) != RT_ERR_OK)
+            return retVal;
+
+        regData &= ~(0x0001 << 4);
+
+        if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xB820, regData)) != RT_ERR_OK)
+            return retVal;
+
+        /* wait 0xb800[6] = 0 */
+        checkCounter = 100;
+        while(checkCounter)
+        {
+            retVal = rtl8367c_getAsicPHYOCPReg(port, 0xB800, &regData);
+            if( (retVal != RT_ERR_OK) || ((regData & 0x0040) != 0x0000) )
+            {
+                checkCounter --;
+                if(0 == checkCounter)
+                    return RT_ERR_BUSYWAIT_TIMEOUT;
+            }
+            else
+                checkCounter = 0;
+        }
+    }
+
+    /* Start Watchdog */
+    switch (data)
+    {
+        case 0x0652:
+        case 0x6368:
+        case 0x0801:
+        case 0x6511:
+            if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xB83E, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            regData &= 0xFF00;
+            regData |= 0x0048;
+            if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xB83E, regData)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xB840, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            regData &= 0xFF00;
+            regData |= 0x00FA;
+            if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xB840, regData)) != RT_ERR_OK)
+                return retVal;
+            break;
+        default:
+            break;;
+    }
+
+    return RT_ERR_OK;
+}
+
+/*
+@func rtk_int32 | rtl8367c_getAsicGreenEthernet | Get green ethernet function.
+@parm rtk_uint32 | *green | Green feature function usage 1:enable 0:disable.
+@rvalue RT_ERR_OK | Success.
+@rvalue RT_ERR_SMI | SMI access error.
+@comm
+    The API can set Green Ethernet function to reduce power consumption. While green feature is enabled, ASIC will automatic
+ detect the cable length and then select different power mode for best performance with minimums power consumption. Link down
+ ports will enter power savining mode in 10 seconds after the cable disconnected if power saving function is enabled.
+*/
+ret_t rtl8367c_getAsicGreenEthernet(rtk_uint32 port, rtk_uint32* green)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+
+    /* 0xa436 = 0x8011 */
+    if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xA436, 0x8011)) != RT_ERR_OK)
+        return retVal;
+
+    /* wr 0xa438[15] = 0: disable, 1: enable */
+    if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xA438, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    if(regData & 0x8000)
+        *green = ENABLED;
+    else
+        *green = DISABLED;
+
+    return RT_ERR_OK;
+}
+
+
+/*
+@func ret_t | rtl8367c_setAsicPowerSaving | Set power saving mode
+@parm rtk_uint32 | phy | phy number
+@parm rtk_uint32 | enable | enable power saving mode.
+@rvalue RT_ERR_OK | Success.
+@rvalue RT_ERR_SMI | SMI access error.
+@rvalue RT_ERR_PORT_ID | Invalid port number.
+@comm
+    The API can set power saving mode per phy.
+*/
+ret_t rtl8367c_setAsicPowerSaving(rtk_uint32 phy, rtk_uint32 enable)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 phyData;
+    rtk_uint32 regData;
+    rtk_uint32 phy_status;
+    rtk_uint32 checkCounter;
+    rtk_uint32 data;
+
+    if (enable > 1)
+        return RT_ERR_INPUT;
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0249)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_getAsicReg(0x1300, &data)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0000)) != RT_ERR_OK)
+        return retVal;
+
+    /* Stop Watchdog */
+    switch (data)
+    {
+        case 0x0652:
+        case 0x6368:
+        case 0x0801:
+        case 0x6511:
+            if((retVal = rtl8367c_getAsicPHYOCPReg(phy, 0xB83E, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            regData &= 0xFF00;
+            regData |= 0x00A9;
+            if((retVal = rtl8367c_setAsicPHYOCPReg(phy, 0xB83E, regData)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_getAsicPHYOCPReg(phy, 0xB840, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            regData &= 0xFF00;
+            regData |= 0x00A9;
+            if((retVal = rtl8367c_setAsicPHYOCPReg(phy, 0xB840, regData)) != RT_ERR_OK)
+                return retVal;
+            break;
+        default:
+            break;;
+    }
+
+    /* 0xa420[2:0] */
+    if((retVal = rtl8367c_getAsicPHYOCPReg(phy, 0xA420, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    phy_status = (regData & 0x0007);
+
+    if(phy_status == 3)
+    {
+        /* 0xb820[4] = 1 */
+        if((retVal = rtl8367c_getAsicPHYOCPReg(phy, 0xB820, &regData)) != RT_ERR_OK)
+            return retVal;
+
+        regData |= (0x0001 << 4);
+
+        if((retVal = rtl8367c_setAsicPHYOCPReg(phy, 0xB820, regData)) != RT_ERR_OK)
+            return retVal;
+
+        /* wait 0xb800[6] = 1 */
+        checkCounter = 100;
+        while(checkCounter)
+        {
+            retVal = rtl8367c_getAsicPHYOCPReg(phy, 0xB800, &regData);
+            if( (retVal != RT_ERR_OK) || ((regData & 0x0040) != 0x0040) )
+            {
+                checkCounter --;
+                if(0 == checkCounter)
+                {
+                     return RT_ERR_BUSYWAIT_TIMEOUT;
+                }
+            }
+            else
+                checkCounter = 0;
+        }
+    }
+
+    if ((retVal = rtl8367c_getAsicPHYReg(phy,PHY_POWERSAVING_REG,&phyData))!=RT_ERR_OK)
+        return retVal;
+
+    phyData = phyData & ~(0x0001 << 2);
+    phyData = phyData | (enable << 2);
+
+    if ((retVal = rtl8367c_setAsicPHYReg(phy,PHY_POWERSAVING_REG,phyData))!=RT_ERR_OK)
+        return retVal;
+
+    if(phy_status == 3)
+    {
+        /* 0xb820[4] = 0  */
+        if((retVal = rtl8367c_getAsicPHYOCPReg(phy, 0xB820, &regData)) != RT_ERR_OK)
+            return retVal;
+
+        regData &= ~(0x0001 << 4);
+
+        if((retVal = rtl8367c_setAsicPHYOCPReg(phy, 0xB820, regData)) != RT_ERR_OK)
+            return retVal;
+
+        /* wait 0xb800[6] = 0 */
+        checkCounter = 100;
+        while(checkCounter)
+        {
+            retVal = rtl8367c_getAsicPHYOCPReg(phy, 0xB800, &regData);
+            if( (retVal != RT_ERR_OK) || ((regData & 0x0040) != 0x0000) )
+            {
+                checkCounter --;
+                if(0 == checkCounter)
+                {
+                    return RT_ERR_BUSYWAIT_TIMEOUT;
+                }
+            }
+            else
+                checkCounter = 0;
+        }
+    }
+
+    /* Start Watchdog */
+    switch (data)
+    {
+        case 0x0652:
+        case 0x6368:
+        case 0x0801:
+        case 0x6511:
+            if((retVal = rtl8367c_getAsicPHYOCPReg(phy, 0xB83E, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            regData &= 0xFF00;
+            regData |= 0x0048;
+            if((retVal = rtl8367c_setAsicPHYOCPReg(phy, 0xB83E, regData)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_getAsicPHYOCPReg(phy, 0xB840, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            regData &= 0xFF00;
+            regData |= 0x00FA;
+            if((retVal = rtl8367c_setAsicPHYOCPReg(phy, 0xB840, regData)) != RT_ERR_OK)
+                return retVal;
+            break;
+        default:
+            break;;
+    }
+
+    return RT_ERR_OK;
+}
+
+/*
+@func ret_t | rtl8367c_getAsicPowerSaving | Get power saving mode
+@parm rtk_uint32 | port | The port number
+@parm rtk_uint32* | enable | enable power saving mode.
+@rvalue RT_ERR_OK | Success.
+@rvalue RT_ERR_SMI | SMI access error.
+@rvalue RT_ERR_PORT_ID | Invalid port number.
+@comm
+    The API can get power saving mode per phy.
+*/
+ret_t rtl8367c_getAsicPowerSaving(rtk_uint32 phy, rtk_uint32* enable)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 phyData;
+
+    if(NULL == enable)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicPHYReg(phy,PHY_POWERSAVING_REG,&phyData))!=RT_ERR_OK)
+        return retVal;
+
+    if ((phyData & 0x0004) > 0)
+        *enable = 1;
+    else
+        *enable = 0;
+
+    return RT_ERR_OK;
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_green.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_green.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_green.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_green.h	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,19 @@
+#ifndef _RTL8367C_ASICDRV_GREEN_H_
+#define _RTL8367C_ASICDRV_GREEN_H_
+
+#include <rtl8367c_asicdrv.h>
+#include <rtl8367c_asicdrv_phy.h>
+
+#define PHY_POWERSAVING_REG                         24
+
+extern ret_t rtl8367c_setAsicGreenTrafficType(rtk_uint32 priority, rtk_uint32 traffictype);
+extern ret_t rtl8367c_getAsicGreenTrafficType(rtk_uint32 priority, rtk_uint32* pTraffictype);
+extern ret_t rtl8367c_getAsicGreenPortPage(rtk_uint32 port, rtk_uint32* pPage);
+extern ret_t rtl8367c_getAsicGreenHighPriorityTraffic(rtk_uint32 port, rtk_uint32* pIndicator);
+extern ret_t rtl8367c_setAsicGreenHighPriorityTraffic(rtk_uint32 port);
+extern ret_t rtl8367c_setAsicGreenEthernet(rtk_uint32 port, rtk_uint32 green);
+extern ret_t rtl8367c_getAsicGreenEthernet(rtk_uint32 port, rtk_uint32* green);
+extern ret_t rtl8367c_setAsicPowerSaving(rtk_uint32 phy, rtk_uint32 enable);
+extern ret_t rtl8367c_getAsicPowerSaving(rtk_uint32 phy, rtk_uint32* enable);
+#endif /*#ifndef _RTL8367C_ASICDRV_GREEN_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv.h	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,111 @@
+#ifndef _RTL8367C_ASICDRV_H_
+#define _RTL8367C_ASICDRV_H_
+
+#include <rtk_types.h>
+#include <rtk_error.h>
+#include <rtl8367c_reg.h>
+#include <rtl8367c_base.h>
+
+#define RTL8367C_REGBITLENGTH               16
+#define RTL8367C_REGDATAMAX                 0xFFFF
+
+#define RTL8367C_VIDMAX                     0xFFF
+#define RTL8367C_EVIDMAX                    0x1FFF
+#define RTL8367C_CVIDXNO                    32
+#define RTL8367C_CVIDXMAX                   (RTL8367C_CVIDXNO-1)
+
+#define RTL8367C_PRIMAX                     7
+#define RTL8367C_DSCPMAX                    63
+
+#define RTL8367C_PORTNO                     11
+#define RTL8367C_PORTIDMAX                  (RTL8367C_PORTNO-1)
+#define RTL8367C_PMSKMAX                    ((1<<(RTL8367C_PORTNO))-1)
+#define RTL8367C_PORTMASK                   0x7FF
+
+#define RTL8367C_PHYNO                      5
+#define RTL8367C_PHYIDMAX                  (RTL8367C_PHYNO-1)
+
+#define RTL8367C_SVIDXNO                    64
+#define RTL8367C_SVIDXMAX                   (RTL8367C_SVIDXNO-1)
+#define RTL8367C_MSTIMAX                    15
+
+#define RTL8367C_METERNO                    64
+#define RTL8367C_METERMAX                   (RTL8367C_METERNO-1)
+#define RTL8367C_METERBUCKETSIZEMAX         0xFFFF
+
+#define RTL8367C_QUEUENO                    8
+#define RTL8367C_QIDMAX                     (RTL8367C_QUEUENO-1)
+
+#define RTL8367C_PHY_BUSY_CHECK_COUNTER     1000
+
+#define RTL8367C_QOS_GRANULARTY_MAX         0x7FFFF
+#define RTL8367C_QOS_GRANULARTY_LSB_MASK    0xFFFF
+#define RTL8367C_QOS_GRANULARTY_LSB_OFFSET  0
+#define RTL8367C_QOS_GRANULARTY_MSB_MASK    0x70000
+#define RTL8367C_QOS_GRANULARTY_MSB_OFFSET  16
+
+#define RTL8367C_QOS_GRANULARTY_UNIT_KBPS   8
+
+#define RTL8367C_QOS_RATE_INPUT_MAX         (0x1FFFF * 8)
+#define RTL8367C_QOS_RATE_INPUT_MAX_HSG     (0x7FFFF * 8)
+#define RTL8367C_QOS_RATE_INPUT_MIN         8
+#define RTL8367C_QOS_PPS_INPUT_MAX          (0x7FFFF)
+#define RTL8367C_QOS_PPS_INPUT_MIN          1
+
+#define RTL8367C_QUEUE_MASK                 0xFF
+
+#define RTL8367C_EFIDMAX                    0x7
+#define RTL8367C_FIDMAX                     0xF
+
+#define RTL8367C_EAV_SECONDMAX                  0xFFFFFFFF
+#define RTL8367C_EAV_NANOSECONDMAX          0x3B9AC9FF
+
+
+/* the above macro is generated by genDotH */
+#define RTL8367C_VALID_REG_NO               3869
+
+/*=======================================================================
+ *  Enum
+ *========================================================================*/
+enum RTL8367C_TABLE_ACCESS_OP
+{
+    TB_OP_READ = 0,
+    TB_OP_WRITE
+};
+
+enum RTL8367C_TABLE_ACCESS_TARGET
+{
+    TB_TARGET_ACLRULE = 1,
+    TB_TARGET_ACLACT,
+    TB_TARGET_CVLAN,
+    TB_TARGET_L2,
+    TB_TARGET_IGMP_GROUP
+};
+
+#define RTL8367C_TABLE_ACCESS_REG_DATA(op, target)    ((op << 3) | target)
+
+/*=======================================================================
+ *  Structures
+ *========================================================================*/
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+extern ret_t rtl8367c_setAsicRegBit(rtk_uint32 reg, rtk_uint32 bit, rtk_uint32 value);
+extern ret_t rtl8367c_getAsicRegBit(rtk_uint32 reg, rtk_uint32 bit, rtk_uint32 *pValue);
+
+extern ret_t rtl8367c_setAsicRegBits(rtk_uint32 reg, rtk_uint32 bits, rtk_uint32 value);
+extern ret_t rtl8367c_getAsicRegBits(rtk_uint32 reg, rtk_uint32 bits, rtk_uint32 *pValue);
+
+extern ret_t rtl8367c_setAsicReg(rtk_uint32 reg, rtk_uint32 value);
+extern ret_t rtl8367c_getAsicReg(rtk_uint32 reg, rtk_uint32 *pValue);
+
+#ifdef __cplusplus
+}
+#endif
+
+
+
+#endif /*#ifndef _RTL8367C_ASICDRV_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_hsb.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_hsb.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_hsb.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_hsb.c	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 76306 $
+ * $Date: 2017-03-08 15:13:58 +0800 (é€±ä¸‰, 08 ä¸‰æœˆ 2017) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature : Field selector related functions
+ *
+ */
+#include <rtl8367c_asicdrv_hsb.h>
+/* Function Name:
+ *      rtl8367c_setAsicFieldSelector
+ * Description:
+ *      Set user defined field selectors in HSB
+ * Input:
+ *      index       - index of field selector 0-15
+ *      format      - Format of field selector
+ *      offset      - Retrieving data offset
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      System support 16 user defined field selctors.
+ *      Each selector can be enabled or disable. User can defined retrieving 16-bits in many predefiend
+ *      standard l2/l3/l4 payload.
+ */
+ret_t rtl8367c_setAsicFieldSelector(rtk_uint32 index, rtk_uint32 format, rtk_uint32 offset)
+{
+    rtk_uint32 regData;
+
+    if(index > RTL8367C_FIELDSEL_FORMAT_NUMBER)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if(format >= FIELDSEL_FORMAT_END)
+        return RT_ERR_OUT_OF_RANGE;
+
+    regData = (((format << RTL8367C_FIELD_SELECTOR_FORMAT_OFFSET) & RTL8367C_FIELD_SELECTOR_FORMAT_MASK ) |
+               ((offset << RTL8367C_FIELD_SELECTOR_OFFSET_OFFSET) & RTL8367C_FIELD_SELECTOR_OFFSET_MASK ));
+
+    return rtl8367c_setAsicReg(RTL8367C_FIELD_SELECTOR_REG(index), regData);
+}
+/* Function Name:
+ *      rtl8367c_getAsicFieldSelector
+ * Description:
+ *      Get user defined field selectors in HSB
+ * Input:
+ *      index       - index of field selector 0-15
+ *      pFormat     - Format of field selector
+ *      pOffset     - Retrieving data offset
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicFieldSelector(rtk_uint32 index, rtk_uint32* pFormat, rtk_uint32* pOffset)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+
+    retVal = rtl8367c_getAsicReg(RTL8367C_FIELD_SELECTOR_REG(index), &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    *pFormat    = ((regData & RTL8367C_FIELD_SELECTOR_FORMAT_MASK) >> RTL8367C_FIELD_SELECTOR_FORMAT_OFFSET);
+    *pOffset    = ((regData & RTL8367C_FIELD_SELECTOR_OFFSET_MASK) >> RTL8367C_FIELD_SELECTOR_OFFSET_OFFSET);
+
+    return RT_ERR_OK;
+}
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_hsb.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_hsb.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_hsb.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_hsb.h	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,26 @@
+#ifndef _RTL8367C_ASICDRV__HSB_H_
+#define _RTL8367C_ASICDRV__HSB_H_
+
+#include <rtl8367c_asicdrv.h>
+
+#define RTL8367C_FIELDSEL_FORMAT_NUMBER      (16)
+#define RTL8367C_FIELDSEL_MAX_OFFSET         (255)
+
+enum FIELDSEL_FORMAT_FORMAT
+{
+    FIELDSEL_FORMAT_DEFAULT = 0,
+    FIELDSEL_FORMAT_RAW,
+    FIELDSEL_FORMAT_LLC,
+    FIELDSEL_FORMAT_IPV4,
+    FIELDSEL_FORMAT_ARP,
+    FIELDSEL_FORMAT_IPV6,
+    FIELDSEL_FORMAT_IPPAYLOAD,
+    FIELDSEL_FORMAT_L4PAYLOAD,
+    FIELDSEL_FORMAT_END
+};
+
+extern ret_t rtl8367c_setAsicFieldSelector(rtk_uint32 index, rtk_uint32 format, rtk_uint32 offset);
+extern ret_t rtl8367c_getAsicFieldSelector(rtk_uint32 index, rtk_uint32* pFormat, rtk_uint32* pOffset);
+
+#endif /*_RTL8367C_ASICDRV__HSB_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_i2c.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_i2c.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_i2c.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_i2c.c	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,476 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 38651 $
+ * $Date: 2016-02-27 14:32:56 +0800 (å‘¨ä¸‰, 17 å››æœˆ 2016) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature : I2C related functions
+ *
+ */
+
+
+#include <rtl8367c_asicdrv_i2c.h>
+#include <rtk_error.h>
+#include <rtk_types.h>
+
+
+
+/* Function Name:
+ *      rtl8367c_setAsicI2C_checkBusIdle
+ * Description:
+ *      Check i2c bus status idle or not
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                - Success
+ *      RT_ERR_BUSYWAIT_TIMEOUT  - i2c bus is busy
+ * Note:
+ *      This API can check i2c bus status.
+ */
+ret_t rtl8367c_setAsicI2C_checkBusIdle(void)
+{
+    rtk_uint32 regData;
+    ret_t retVal;
+
+    if ((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_M_I2C_CTL_STA_REG, RTL8367C_M_I2C_BUS_IDLE_OFFSET, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    if(regData == 0x0001)
+        return RT_ERR_OK; /*i2c is idle*/
+    else
+        return RT_ERR_BUSYWAIT_TIMEOUT; /*i2c is busy*/
+}
+
+
+/* Function Name:
+ *      rtl8367c_setAsicI2CStartCmd
+ * Description:
+ *      Set I2C start command
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                - Success
+ * Note:
+ *      This API can set i2c start command ,start a i2c traffic  .
+ */
+ret_t rtl8367c_setAsicI2CStartCmd(void)
+{
+    rtk_uint32 regData;
+    ret_t retVal;
+
+    /* Bits [4-1] = 0b0000, Start Command; Bit [0] = 1, Trigger the Command */
+    if ((retVal = rtl8367c_getAsicReg(RTL8367C_REG_M_I2C_CTL_STA_REG, &regData)) != RT_ERR_OK)
+        return retVal;
+    regData &= 0xFFE0;
+    regData |= 0x0001;
+
+    if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_M_I2C_CTL_STA_REG, regData)) != RT_ERR_OK)
+        return retVal;
+
+    /* wait for command finished */
+    do{
+       if ((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_M_I2C_CTL_STA_REG, RTL8367C_I2C_CMD_EXEC_OFFSET, &regData)) != RT_ERR_OK)
+            return retVal;
+    }while( regData != 0x0);
+
+    return RT_ERR_OK ;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicI2CStopCmd
+ * Description:
+ *      Set I2C stop command
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                - Success
+ * Note:
+ *      This API can set i2c stop command ,stop a i2c traffic.
+ */
+ret_t rtl8367c_setAsicI2CStopCmd(void)
+{
+
+    rtk_uint32 regData;
+    ret_t retVal;
+
+    /* Bits [4-1] = 0b0001, Stop Command; Bit [0] = 1, Trigger the Command */
+    if ((retVal = rtl8367c_getAsicReg(RTL8367C_REG_M_I2C_CTL_STA_REG, &regData)) != RT_ERR_OK)
+        return retVal;
+    regData &= 0xFFE0;
+    regData |= 0x0003;
+
+    if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_M_I2C_CTL_STA_REG, regData)) != RT_ERR_OK)
+        return retVal;
+
+
+    /* wait for command finished */
+    do{
+       if ((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_M_I2C_CTL_STA_REG, RTL8367C_I2C_CMD_EXEC_OFFSET, &regData)) != RT_ERR_OK)
+            return retVal;
+    }while( regData != 0x0);
+
+    return RT_ERR_OK ;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicI2CTxOneCharCmd
+ * Description:
+ *      Set I2C Tx a char command, with a 8-bit data
+ * Input:
+ *      oneChar - 8-bit data
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                - Success
+ * Note:
+ *      This API can set i2c Tx command and with a 8-bit data.
+ */
+ret_t rtl8367c_setAsicI2CTxOneCharCmd(rtk_uint8 oneChar)
+{
+    rtk_uint32 regData;
+    ret_t retVal;
+
+    /* Bits [4-1] = 0b0010, tx one char; Bit [0] = 1, Trigger the Command */
+    if ((retVal = rtl8367c_getAsicReg(RTL8367C_REG_M_I2C_CTL_STA_REG, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    regData &= 0xFFE0;
+    regData |= 0x0005;
+    regData &= 0x00FF;
+    regData |= (rtk_uint16) (oneChar << 8);
+
+    if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_M_I2C_CTL_STA_REG, regData)) != RT_ERR_OK)
+        return retVal;
+
+
+   /* wait for command finished */
+    do{
+       if ((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_M_I2C_CTL_STA_REG, RTL8367C_I2C_CMD_EXEC_OFFSET, &regData)) != RT_ERR_OK)
+            return retVal;
+    }while( regData != 0x0);
+
+    return RT_ERR_OK ;
+}
+
+
+/* Function Name:
+ *      rtl8367c_setAsicI2CcheckRxAck
+ * Description:
+ *      Check if rx an Ack
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                - Success
+ * Note:
+ *      This API can check if rx an ack from i2c slave.
+ */
+ret_t rtl8367c_setAsicI2CcheckRxAck(void)
+{
+    rtk_uint32 regData;
+    ret_t retVal;
+    rtk_uint32 count = 0;
+
+    do{
+         count++;
+         if ((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_M_I2C_CTL_STA_REG, RTL8367C_SLV_ACK_FLAG_OFFSET, &regData)) != RT_ERR_OK)
+            return retVal;
+    }while( (regData != 0x1) && (count < TIMEROUT_FOR_MICROSEMI) );
+
+    if(regData != 0x1)
+        return RT_ERR_FAILED;
+    else
+        return RT_ERR_OK;
+}
+
+
+/* Function Name:
+ *      rtl8367c_setAsicI2CRxOneCharCmd
+ * Description:
+ *      Set I2C Rx command and get 8-bit data
+ * Input:
+ *      None
+ * Output:
+ *      pValue - 8bit-data
+ * Return:
+ *      RT_ERR_OK                - Success
+ * Note:
+ *      This API can set I2C Rx command and get 8-bit data.
+ */
+ret_t rtl8367c_setAsicI2CRxOneCharCmd(rtk_uint8 *pValue)
+{
+    rtk_uint32 regData;
+    ret_t retVal;
+
+    /* Bits [4-1] = 0b0011, Rx one char; Bit [0] = 1, Trigger the Command */
+    if ((retVal = rtl8367c_getAsicReg(RTL8367C_REG_M_I2C_CTL_STA_REG, &regData)) != RT_ERR_OK)
+        return retVal;
+    regData &= 0xFFE0;
+    regData |= 0x0007;
+    if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_M_I2C_CTL_STA_REG, regData)) != RT_ERR_OK)
+        return retVal;
+
+    /* wait for command finished */
+     do{
+        if ((retVal = rtl8367c_getAsicReg(RTL8367C_REG_M_I2C_CTL_STA_REG, &regData)) != RT_ERR_OK)
+             return retVal;
+     }while( (regData & 0x1) != 0x0);
+
+    *pValue = (rtk_uint8)(regData >> 8);
+     return RT_ERR_OK ;
+
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicI2CTxAckCmd
+ * Description:
+ *      Set I2C Tx ACK command
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                - Success
+ * Note:
+ *      This API can set I2C Tx ack command.
+ */
+ret_t rtl8367c_setAsicI2CTxAckCmd(void)
+{
+    rtk_uint32 regData;
+    ret_t retVal;
+
+    /* Bits [4-1] = 0b0100, tx ACK Command; Bit [0] = 1, Trigger the Command */
+    if ((retVal = rtl8367c_getAsicReg(RTL8367C_REG_M_I2C_CTL_STA_REG, &regData)) != RT_ERR_OK)
+        return retVal;
+    regData &= 0xFFE0;
+    regData |= 0x0009;
+    if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_M_I2C_CTL_STA_REG, regData)) != RT_ERR_OK)
+        return retVal;
+
+     /* wait for command finished */
+    do{
+       if ((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_M_I2C_CTL_STA_REG, RTL8367C_I2C_CMD_EXEC_OFFSET, &regData)) != RT_ERR_OK)
+            return retVal;
+    }while( regData != 0x0);
+
+    return RT_ERR_OK ;
+
+}
+
+
+/* Function Name:
+ *      rtl8367c_setAsicI2CTxNoAckCmd
+ * Description:
+ *      Set I2C master Tx noACK command
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                - Success
+ * Note:
+ *      This API can set I2C master Tx noACK command.
+ */
+ret_t rtl8367c_setAsicI2CTxNoAckCmd(void)
+{
+    rtk_uint32 regData;
+    ret_t retVal;
+
+    /* Bits [4-1] = 0b0101, tx noACK Command; Bit [0] = 1, Trigger the Command */
+    if ((retVal = rtl8367c_getAsicReg(RTL8367C_REG_M_I2C_CTL_STA_REG, &regData)) != RT_ERR_OK)
+        return retVal;
+    regData &= 0xFFE0;
+    regData |= 0x000b;
+    if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_M_I2C_CTL_STA_REG, regData)) != RT_ERR_OK)
+        return retVal;
+
+     /* wait for command finished */
+    do{
+       if ((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_M_I2C_CTL_STA_REG, RTL8367C_I2C_CMD_EXEC_OFFSET, &regData)) != RT_ERR_OK)
+            return retVal;
+    }while( regData != 0x0);
+
+    return RT_ERR_OK ;
+
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicI2CSoftRSTseqCmd
+ * Description:
+ *      set I2C master tx soft reset command
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                - Success
+ * Note:
+ *      This API can set I2C master tx soft reset command.
+ */
+ret_t rtl8367c_setAsicI2CSoftRSTseqCmd(void)
+{
+
+    rtk_uint32 regData;
+    ret_t retVal;
+
+    /*Bits [4-1] = 0b0110, tx soft reset Command;  Bit [0] = 1, Trigger the Command */
+    if ((retVal = rtl8367c_getAsicReg(RTL8367C_REG_M_I2C_CTL_STA_REG, &regData)) != RT_ERR_OK)
+        return retVal;
+    regData &= 0xFFE0;
+    regData |= 0x000d;
+
+    if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_M_I2C_CTL_STA_REG, regData)) != RT_ERR_OK)
+        return retVal;
+
+
+    /* wait for command finished */
+    do{
+       if ((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_M_I2C_CTL_STA_REG, RTL8367C_I2C_CMD_EXEC_OFFSET, &regData)) != RT_ERR_OK)
+            return retVal;
+    }while( regData != 0x0);
+
+    return RT_ERR_OK ;
+}
+
+
+/* Function Name:
+ *      rtl8367c_setAsicI2CGpioPinGroup
+ * Description:
+ *      set I2C function used gpio pins
+ * Input:
+ *      pinGroup_ID - gpio pins group
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                - Success
+ *      RT_ERR_INPUT             _ Invalid input parameter
+ * Note:
+ *      This API can set I2C function used gpio pins.
+ *      There are three group gpio pins
+ */
+ret_t rtl8367c_setAsicI2CGpioPinGroup(rtk_uint32 pinGroup_ID)
+{
+    rtk_uint32 regData;
+    ret_t retVal;
+
+    if ((retVal = rtl8367c_getAsicReg(RTL8367C_REG_M_I2C_SYS_CTL, &regData)) != RT_ERR_OK)
+         return retVal;
+    if( pinGroup_ID==0 )
+    {
+        regData &= 0x0FFF;
+        regData |= 0x5000;
+
+        if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_M_I2C_SYS_CTL, regData)) != RT_ERR_OK)
+             return retVal;
+    }
+
+    else if( pinGroup_ID==1 )
+    {
+        regData &= 0x0FFF;
+        regData |= 0xA000;
+
+        if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_M_I2C_SYS_CTL, regData)) != RT_ERR_OK)
+             return retVal;
+    }
+
+    else if( pinGroup_ID==2 )
+    {
+        regData &= 0x0FFF;
+        regData |= 0xF000;
+
+        if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_M_I2C_SYS_CTL, regData)) != RT_ERR_OK)
+             return retVal;
+    }
+    else
+        return RT_ERR_INPUT;
+
+    return RT_ERR_OK ;
+
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicI2CGpioPinGroup
+ * Description:
+ *      set I2C function used gpio pins
+ * Input:
+ *      pinGroup_ID - gpio pins group
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                - Success
+ *      RT_ERR_INPUT             _ Invalid input parameter
+ * Note:
+ *      This API can set I2C function used gpio pins.
+ *      There are three group gpio pins
+ */
+ret_t rtl8367c_getAsicI2CGpioPinGroup(rtk_uint32 * pPinGroup_ID)
+{
+
+    rtk_uint32 regData;
+    ret_t retVal;
+    if( (retVal = rtl8367c_getAsicReg(RTL8367C_REG_M_I2C_SYS_CTL, &regData)) != RT_ERR_OK)
+        return retVal;
+    regData &= 0xF000 ;
+    regData = (regData >> 12);
+
+    if( regData == 0x5 )
+        *pPinGroup_ID = 0;
+    else if(regData == 0xA)
+        *pPinGroup_ID = 1;
+    else if(regData == 0xF)
+        *pPinGroup_ID = 2;
+    else
+       return RT_ERR_FAILED;
+    return RT_ERR_OK ;
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_i2c.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_i2c.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_i2c.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_i2c.h	2019-01-22 16:16:24.711257346 +0100
@@ -0,0 +1,29 @@
+#ifndef _RTL8367C_ASICDRV_I2C_H_
+#define _RTL8367C_ASICDRV_I2C_H_
+#include <rtk_types.h>
+#include <rtl8367c_asicdrv.h>
+
+
+#define TIMEROUT_FOR_MICROSEMI (0x400)
+
+#define GPIO_INPUT 1
+#define GPIO_OUTPUT 2
+
+extern ret_t rtl8367c_setAsicI2C_checkBusIdle(void);
+extern ret_t rtl8367c_setAsicI2CStartCmd(void);
+extern ret_t rtl8367c_setAsicI2CStopCmd(void);
+extern ret_t rtl8367c_setAsicI2CTxOneCharCmd(rtk_uint8 oneChar);
+extern ret_t rtl8367c_setAsicI2CcheckRxAck(void);
+extern ret_t rtl8367c_setAsicI2CRxOneCharCmd(rtk_uint8 *pValue);
+extern ret_t rtl8367c_setAsicI2CTxAckCmd(void);
+extern ret_t rtl8367c_setAsicI2CTxNoAckCmd(void);
+extern ret_t rtl8367c_setAsicI2CSoftRSTseqCmd(void);
+extern ret_t rtl8367c_setAsicI2CGpioPinGroup(rtk_uint32 pinGroup_ID);
+extern ret_t rtl8367c_getAsicI2CGpioPinGroup(rtk_uint32 * pPinGroup_ID);
+
+
+
+
+
+#endif /*#ifndef _RTL8367C_ASICDRV_I2C_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_igmp.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_igmp.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_igmp.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_igmp.c	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,2111 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 76306 $
+ * $Date: 2017-03-08 15:13:58 +0800 (é€±ä¸‰, 08 ä¸‰æœˆ 2017) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature : IGMP related functions
+ *
+ */
+#include <rtl8367c_asicdrv_igmp.h>
+/* Function Name:
+ *      rtl8367c_setAsicIgmp
+ * Description:
+ *      Set IGMP/MLD state
+ * Input:
+ *      enabled     - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicIgmp(rtk_uint32 enabled)
+{
+    ret_t retVal;
+
+    /* Enable/Disable H/W IGMP/MLD */
+    retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_IGMP_MLD_CFG0, RTL8367C_IGMP_MLD_EN_OFFSET, enabled);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicIgmp
+ * Description:
+ *      Get IGMP/MLD state
+ * Input:
+ *      enabled     - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIgmp(rtk_uint32 *ptr_enabled)
+{
+    ret_t retVal;
+
+    retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_IGMP_MLD_CFG0, RTL8367C_IGMP_MLD_EN_OFFSET, ptr_enabled);
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_setAsicIpMulticastVlanLeaky
+ * Description:
+ *      Set IP multicast VLAN Leaky function
+ * Input:
+ *      port        - Physical port number (0~7)
+ *      enabled     - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      When enabling this function,
+ *      if the lookup result(forwarding portmap) of IP Multicast packet is over VLAN boundary,
+ *      the packet can be forwarded across VLAN
+ */
+ret_t rtl8367c_setAsicIpMulticastVlanLeaky(rtk_uint32 port, rtk_uint32 enabled)
+{
+    ret_t  retVal;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_IPMCAST_VLAN_LEAKY, port, enabled);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicIpMulticastVlanLeaky
+ * Description:
+ *      Get IP multicast VLAN Leaky function
+ * Input:
+ *      port        - Physical port number (0~7)
+ *      enabled     - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIpMulticastVlanLeaky(rtk_uint32 port, rtk_uint32 *ptr_enabled)
+{
+    ret_t  retVal;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_IPMCAST_VLAN_LEAKY, port, ptr_enabled);
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicIGMPTableFullOP
+ * Description:
+ *      Set Table Full operation
+ * Input:
+ *      operation   - The operation should be taken when the IGMP table is full.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter is out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicIGMPTableFullOP(rtk_uint32 operation)
+{
+    ret_t  retVal;
+
+    if(operation >= TABLE_FULL_OP_END)
+        return RT_ERR_OUT_OF_RANGE;
+
+    /* Table full Operation */
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_MLD_CFG1, RTL8367C_TABLE_FULL_OP_MASK, operation);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicIGMPTableFullOP
+ * Description:
+ *      Get Table Full operation
+ * Input:
+ *      None
+ * Output:
+ *      poperation  - The operation should be taken when the IGMP table is full.
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIGMPTableFullOP(rtk_uint32 *poperation)
+{
+    ret_t   retVal;
+    rtk_uint32  value;
+
+    /* Table full Operation */
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_MLD_CFG1, RTL8367C_TABLE_FULL_OP_MASK, &value);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    *poperation = value;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicIGMPCRCErrOP
+ * Description:
+ *      Set the operation when ASIC receive a Checksum error packet
+ * Input:
+ *      operation   -The operation when ASIC receive a Checksum error packet
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter is out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicIGMPCRCErrOP(rtk_uint32 operation)
+{
+    ret_t  retVal;
+
+    if(operation >= CRC_ERR_OP_END)
+        return RT_ERR_OUT_OF_RANGE;
+
+    /* CRC Error Operation */
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_MLD_CFG0, RTL8367C_CKS_ERR_OP_MASK, operation);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicIGMPCRCErrOP
+ * Description:
+ *      Get the operation when ASIC receive a Checksum error packet
+ * Input:
+ *      None
+ * Output:
+ *      poperation  - The operation of Checksum error packet
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIGMPCRCErrOP(rtk_uint32 *poperation)
+{
+    ret_t   retVal;
+    rtk_uint32  value;
+
+    /* CRC Error Operation */
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_MLD_CFG0, RTL8367C_CKS_ERR_OP_MASK, &value);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    *poperation = value;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicIGMPFastLeaveEn
+ * Description:
+ *      Enable/Disable Fast Leave
+ * Input:
+ *      enabled - 1:enable Fast Leave; 0:disable Fast Leave
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicIGMPFastLeaveEn(rtk_uint32 enabled)
+{
+    ret_t  retVal;
+
+    /* Fast Leave */
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_MLD_CFG0, RTL8367C_FAST_LEAVE_EN_MASK, (enabled >= 1) ? 1 : 0);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicIGMPFastLeaveEn
+ * Description:
+ *      Get Fast Leave state
+ * Input:
+ *      None
+ * Output:
+ *      penabled        - 1:enable Fast Leave; 0:disable Fast Leave
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIGMPFastLeaveEn(rtk_uint32 *penabled)
+{
+    ret_t   retVal;
+    rtk_uint32  value;
+
+    /* Fast Leave */
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_MLD_CFG0, RTL8367C_FAST_LEAVE_EN_MASK, &value);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    *penabled = value;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicIGMPLeaveTimer
+ * Description:
+ *      Set the Leave timer of IGMP/MLD
+ * Input:
+ *      leave_timer     - Leave timer
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter is out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicIGMPLeaveTimer(rtk_uint32 leave_timer)
+{
+    ret_t  retVal;
+
+    if(leave_timer > RTL8367C_MAX_LEAVE_TIMER)
+        return RT_ERR_OUT_OF_RANGE;
+
+    /* Leave timer */
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_MLD_CFG0, RTL8367C_LEAVE_TIMER_MASK, leave_timer);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicIGMPLeaveTimer
+ * Description:
+ *      Get the Leave timer of IGMP/MLD
+ * Input:
+ *      None
+ * Output:
+ *      pleave_timer    - Leave timer
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIGMPLeaveTimer(rtk_uint32 *pleave_timer)
+{
+    ret_t   retVal;
+    rtk_uint32  value;
+
+    /* Leave timer */
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_MLD_CFG0, RTL8367C_LEAVE_TIMER_MASK, &value);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    *pleave_timer = value;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicIGMPQueryInterval
+ * Description:
+ *      Set Query Interval of IGMP/MLD
+ * Input:
+ *      interval    - Query Interval
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter is out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicIGMPQueryInterval(rtk_uint32 interval)
+{
+    ret_t  retVal;
+
+    if(interval > RTL8367C_MAX_QUERY_INT)
+        return RT_ERR_OUT_OF_RANGE;
+
+    /* Query Interval */
+    retVal = rtl8367c_setAsicReg(RTL8367C_REG_IGMP_MLD_CFG2, interval);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicIGMPQueryInterval
+ * Description:
+ *      Get Query Interval of IGMP/MLD
+ * Input:
+ *      None
+ * Output:
+ *      pinterval       - Query Interval
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIGMPQueryInterval(rtk_uint32 *pinterval)
+{
+    ret_t   retVal;
+    rtk_uint32  value;
+
+    /* Query Interval */
+    retVal = rtl8367c_getAsicReg(RTL8367C_REG_IGMP_MLD_CFG2, &value);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    *pinterval = value;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicIGMPRobVar
+ * Description:
+ *      Set Robustness Variable of IGMP/MLD
+ * Input:
+ *      rob_var     - Robustness Variable
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter is out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicIGMPRobVar(rtk_uint32 rob_var)
+{
+    ret_t  retVal;
+
+    if(rob_var > RTL8367C_MAX_ROB_VAR)
+        return RT_ERR_OUT_OF_RANGE;
+
+    /* Bourstness variable */
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_MLD_CFG0, RTL8367C_ROBURSTNESS_VAR_MASK, rob_var);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicIGMPRobVar
+ * Description:
+ *      Get Robustness Variable of IGMP/MLD
+ * Input:
+ *      none
+ * Output:
+ *      prob_var     - Robustness Variable
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIGMPRobVar(rtk_uint32 *prob_var)
+{
+    ret_t   retVal;
+    rtk_uint32  value;
+
+    /* Bourstness variable */
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_MLD_CFG0, RTL8367C_ROBURSTNESS_VAR_MASK, &value);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    *prob_var = value;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicIGMPStaticRouterPort
+ * Description:
+ *      Set IGMP static router port mask
+ * Input:
+ *      pmsk    - Static portmask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_MASK    - Invalid port mask
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicIGMPStaticRouterPort(rtk_uint32 pmsk)
+{
+    if(pmsk > RTL8367C_PORTMASK)
+        return RT_ERR_PORT_MASK;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_STATIC_ROUTER_PORT, RTL8367C_IGMP_STATIC_ROUTER_PORT_MASK, pmsk);
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicIGMPStaticRouterPort
+ * Description:
+ *      Get IGMP static router port mask
+ * Input:
+ *      pmsk    - Static portmask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIGMPStaticRouterPort(rtk_uint32 *pmsk)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_STATIC_ROUTER_PORT, RTL8367C_IGMP_STATIC_ROUTER_PORT_MASK, pmsk);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicIGMPAllowDynamicRouterPort
+ * Description:
+ *      Set IGMP dynamic router port allow mask
+ * Input:
+ *      pmsk    - Allow dynamic router port mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_MASK    - Invalid port mask
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicIGMPAllowDynamicRouterPort(rtk_uint32 pmsk)
+{
+    return rtl8367c_setAsicReg(RTL8367C_REG_IGMP_MLD_CFG4, pmsk);
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicIGMPAllowDynamicRouterPort
+ * Description:
+ *      Get IGMP dynamic router port allow mask
+ * Input:
+ *      None.
+ * Output:
+ *      pPmsk   - Allow dynamic router port mask
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_MASK    - Invalid port mask
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIGMPAllowDynamicRouterPort(rtk_uint32 *pPmsk)
+{
+    return rtl8367c_getAsicReg(RTL8367C_REG_IGMP_MLD_CFG4, pPmsk);
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicIGMPdynamicRouterPort1
+ * Description:
+ *      Get 1st dynamic router port and timer
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      timer   - router port timer
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIGMPdynamicRouterPort1(rtk_uint32 *port, rtk_uint32 *timer)
+{
+    ret_t   retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_DYNAMIC_ROUTER_PORT, RTL8367C_D_ROUTER_PORT_1_MASK, port);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_DYNAMIC_ROUTER_PORT, RTL8367C_D_ROUTER_PORT_TMR_1_MASK, timer);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicIGMPdynamicRouterPort2
+ * Description:
+ *      Get 2nd dynamic router port and timer
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      timer   - router port timer
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIGMPdynamicRouterPort2(rtk_uint32 *port, rtk_uint32 *timer)
+{
+    ret_t   retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_DYNAMIC_ROUTER_PORT, RTL8367C_D_ROUTER_PORT_2_MASK, port);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_DYNAMIC_ROUTER_PORT, RTL8367C_D_ROUTER_PORT_TMR_2_MASK, timer);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicIGMPSuppression
+ * Description:
+ *      Set the suppression function
+ * Input:
+ *      report_supp_enabled     - Report suppression, 1:Enable, 0:disable
+ *      leave_supp_enabled      - Leave suppression, 1:Enable, 0:disable
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicIGMPSuppression(rtk_uint32 report_supp_enabled, rtk_uint32 leave_supp_enabled)
+{
+    ret_t   retVal;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_MLD_CFG0, RTL8367C_REPORT_SUPPRESSION_MASK, report_supp_enabled);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_MLD_CFG0, RTL8367C_LEAVE_SUPPRESSION_MASK, leave_supp_enabled);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicIGMPSuppression
+ * Description:
+ *      Get the suppression function
+ * Input:
+ *      report_supp_enabled     - Report suppression, 1:Enable, 0:disable
+ *      leave_supp_enabled      - Leave suppression, 1:Enable, 0:disable
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIGMPSuppression(rtk_uint32 *report_supp_enabled, rtk_uint32 *leave_supp_enabled)
+{
+    ret_t   retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_MLD_CFG0, RTL8367C_REPORT_SUPPRESSION_MASK, report_supp_enabled);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_MLD_CFG0, RTL8367C_LEAVE_SUPPRESSION_MASK, leave_supp_enabled);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicIGMPQueryRX
+ * Description:
+ *      Set port-based Query packet RX allowance
+ * Input:
+ *      port            - port number
+ *      allow_query     - allowance of Query packet RX, 1:Allow, 0:Drop
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_PORT_ID  - Error PORT ID
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicIGMPQueryRX(rtk_uint32 port, rtk_uint32 allow_query)
+{
+    ret_t   retVal;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    /* Allow Query */
+    if (port < 8)
+    {
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_PORT0_CONTROL + port, RTL8367C_IGMP_PORT0_CONTROL_ALLOW_QUERY_MASK, allow_query);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_PORT8_CONTROL + port - 8, RTL8367C_IGMP_PORT0_CONTROL_ALLOW_QUERY_MASK, allow_query);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicIGMPQueryRX
+ * Description:
+ *      Get port-based Query packet RX allowance
+ * Input:
+ *      port            - port number
+ * Output:
+ *      allow_query     - allowance of Query packet RX, 1:Allow, 0:Drop
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_PORT_ID  - Error PORT ID
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIGMPQueryRX(rtk_uint32 port, rtk_uint32 *allow_query)
+{
+    ret_t   retVal;
+    rtk_uint32  value;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    /* Allow Query */
+    if (port < 8)
+    {
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_PORT0_CONTROL + port, RTL8367C_IGMP_PORT0_CONTROL_ALLOW_QUERY_MASK, &value);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_PORT8_CONTROL + port - 8, RTL8367C_IGMP_PORT0_CONTROL_ALLOW_QUERY_MASK, &value);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    *allow_query = value;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicIGMPReportRX
+ * Description:
+ *      Set port-based Report packet RX allowance
+ * Input:
+ *      port            - port number
+ *      allow_report    - allowance of Report packet RX, 1:Allow, 0:Drop
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_PORT_ID  - Error PORT ID
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicIGMPReportRX(rtk_uint32 port, rtk_uint32 allow_report)
+{
+    ret_t   retVal;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(port < 8)
+    {
+    /* Allow Report */
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_PORT0_CONTROL + port, RTL8367C_IGMP_PORT0_CONTROL_ALLOW_REPORT_MASK, allow_report);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_PORT8_CONTROL + port - 8, RTL8367C_IGMP_PORT0_CONTROL_ALLOW_REPORT_MASK, allow_report);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicIGMPReportRX
+ * Description:
+ *      Get port-based Report packet RX allowance
+ * Input:
+ *      port            - port number
+ * Output:
+ *      allow_report    - allowance of Report packet RX, 1:Allow, 0:Drop
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_PORT_ID  - Error PORT ID
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIGMPReportRX(rtk_uint32 port, rtk_uint32 *allow_report)
+{
+    ret_t   retVal;
+    rtk_uint32  value;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(port < 8)
+    {
+        /* Allow Report */
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_PORT0_CONTROL + port, RTL8367C_IGMP_PORT0_CONTROL_ALLOW_REPORT_MASK, &value);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_PORT8_CONTROL + port - 8, RTL8367C_IGMP_PORT0_CONTROL_ALLOW_REPORT_MASK, &value);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    *allow_report = value;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicIGMPLeaveRX
+ * Description:
+ *      Set port-based Leave packet RX allowance
+ * Input:
+ *      port            - port number
+ *      allow_leave     - allowance of Leave packet RX, 1:Allow, 0:Drop
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_PORT_ID  - Error PORT ID
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicIGMPLeaveRX(rtk_uint32 port, rtk_uint32 allow_leave)
+{
+    ret_t   retVal;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(port < 8)
+    {
+        /* Allow Leave */
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_PORT0_CONTROL + port, RTL8367C_IGMP_PORT0_CONTROL_ALLOW_LEAVE_MASK, allow_leave);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_PORT8_CONTROL + port - 8, RTL8367C_IGMP_PORT0_CONTROL_ALLOW_LEAVE_MASK, allow_leave);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicIGMPLeaveRX
+ * Description:
+ *      Get port-based Leave packet RX allowance
+ * Input:
+ *      port            - port number
+ * Output:
+ *      allow_leave     - allowance of Leave packet RX, 1:Allow, 0:Drop
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_PORT_ID  - Error PORT ID
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIGMPLeaveRX(rtk_uint32 port, rtk_uint32 *allow_leave)
+{
+    ret_t   retVal;
+    rtk_uint32  value;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(port < 8)
+    {
+    /* Allow Leave */
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_PORT0_CONTROL + port, RTL8367C_IGMP_PORT0_CONTROL_ALLOW_LEAVE_MASK, &value);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_PORT8_CONTROL + port - 8, RTL8367C_IGMP_PORT0_CONTROL_ALLOW_LEAVE_MASK, &value);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    *allow_leave = value;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicIGMPMRPRX
+ * Description:
+ *      Set port-based Multicast Routing Protocol packet RX allowance
+ * Input:
+ *      port            - port number
+ *      allow_mrp       - allowance of Multicast Routing Protocol packet RX, 1:Allow, 0:Drop
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_PORT_ID  - Error PORT ID
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicIGMPMRPRX(rtk_uint32 port, rtk_uint32 allow_mrp)
+{
+    ret_t   retVal;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(port < 8)
+    {
+    /* Allow Multicast Routing Protocol */
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_PORT0_CONTROL + port, RTL8367C_IGMP_PORT0_CONTROL_ALLOW_MRP_MASK, allow_mrp);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_PORT8_CONTROL + port - 8, RTL8367C_IGMP_PORT0_CONTROL_ALLOW_MRP_MASK, allow_mrp);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicIGMPMRPRX
+ * Description:
+ *      Get port-based Multicast Routing Protocol packet RX allowance
+ * Input:
+ *      port            - port number
+ * Output:
+ *      allow_mrp       - allowance of Multicast Routing Protocol packet RX, 1:Allow, 0:Drop
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_PORT_ID  - Error PORT ID
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIGMPMRPRX(rtk_uint32 port, rtk_uint32 *allow_mrp)
+{
+    ret_t   retVal;
+    rtk_uint32  value;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    /* Allow Multicast Routing Protocol */
+    if(port < 8)
+    {
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_PORT0_CONTROL + port, RTL8367C_IGMP_PORT0_CONTROL_ALLOW_MRP_MASK, &value);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_PORT8_CONTROL + port - 8, RTL8367C_IGMP_PORT0_CONTROL_ALLOW_MRP_MASK, &value);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    *allow_mrp = value;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicIGMPMcDataRX
+ * Description:
+ *      Set port-based Multicast data packet RX allowance
+ * Input:
+ *      port            - port number
+ *      allow_mcdata    - allowance of Multicast data packet RX, 1:Allow, 0:Drop
+ * Output:
+ *      none
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_PORT_ID  - Error PORT ID
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicIGMPMcDataRX(rtk_uint32 port, rtk_uint32 allow_mcdata)
+{
+    ret_t   retVal;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    /* Allow Multicast Data */
+    if(port < 8)
+    {
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_PORT0_CONTROL + port, RTL8367C_IGMP_PORT0_CONTROL_ALLOW_MC_DATA_MASK, allow_mcdata);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_PORT8_CONTROL + port - 8, RTL8367C_IGMP_PORT0_CONTROL_ALLOW_MC_DATA_MASK, allow_mcdata);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicIGMPMcDataRX
+ * Description:
+ *      Get port-based Multicast data packet RX allowance
+ * Input:
+ *      port            - port number
+ * Output:
+ *      allow_mcdata    - allowance of Multicast data packet RX, 1:Allow, 0:Drop
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_PORT_ID  - Error PORT ID
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIGMPMcDataRX(rtk_uint32 port, rtk_uint32 *allow_mcdata)
+{
+    ret_t   retVal;
+    rtk_uint32  value;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    /* Allow Multicast data */
+    if(port < 8)
+    {
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_PORT0_CONTROL + port, RTL8367C_IGMP_PORT0_CONTROL_ALLOW_MC_DATA_MASK, &value);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_PORT8_CONTROL + port - 8, RTL8367C_IGMP_PORT0_CONTROL_ALLOW_MC_DATA_MASK, &value);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    *allow_mcdata = value;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicIGMPv1Opeartion
+ * Description:
+ *      Set port-based IGMPv1 Control packet action
+ * Input:
+ *      port            - port number
+ *      igmpv1_op       - IGMPv1 control packet action
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_PORT_ID  - Error PORT ID
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicIGMPv1Opeartion(rtk_uint32 port, rtk_uint32 igmpv1_op)
+{
+    ret_t   retVal;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(igmpv1_op >= PROTOCOL_OP_END)
+        return RT_ERR_INPUT;
+
+    /* IGMPv1 operation */
+    if(port < 8)
+    {
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_PORT0_CONTROL + port, RTL8367C_IGMP_PORT0_CONTROL_IGMPV1_OP_MASK, igmpv1_op);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_PORT8_CONTROL + port - 8, RTL8367C_IGMP_PORT0_CONTROL_IGMPV1_OP_MASK, igmpv1_op);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicIGMPv1Opeartion
+ * Description:
+ *      Get port-based IGMPv1 Control packet action
+ * Input:
+ *      port            - port number
+ * Output:
+ *      igmpv1_op       - IGMPv1 control packet action
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_PORT_ID  - Error PORT ID
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIGMPv1Opeartion(rtk_uint32 port, rtk_uint32 *igmpv1_op)
+{
+    ret_t   retVal;
+    rtk_uint32  value;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    /* IGMPv1 operation */
+    if(port < 8)
+    {
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_PORT0_CONTROL + port, RTL8367C_IGMP_PORT0_CONTROL_IGMPV1_OP_MASK, &value);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_PORT8_CONTROL + port - 8, RTL8367C_IGMP_PORT0_CONTROL_IGMPV1_OP_MASK, &value);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    *igmpv1_op = value;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicIGMPv2Opeartion
+ * Description:
+ *      Set port-based IGMPv2 Control packet action
+ * Input:
+ *      port            - port number
+ *      igmpv2_op       - IGMPv2 control packet action
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_PORT_ID  - Error PORT ID
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicIGMPv2Opeartion(rtk_uint32 port, rtk_uint32 igmpv2_op)
+{
+    ret_t   retVal;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(igmpv2_op >= PROTOCOL_OP_END)
+        return RT_ERR_INPUT;
+
+    /* IGMPv2 operation */
+    if(port < 8)
+    {
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_PORT0_CONTROL + port, RTL8367C_IGMP_PORT0_CONTROL_IGMPV2_OP_MASK, igmpv2_op);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_PORT8_CONTROL + port - 8, RTL8367C_IGMP_PORT0_CONTROL_IGMPV2_OP_MASK, igmpv2_op);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicIGMPv2Opeartion
+ * Description:
+ *      Get port-based IGMPv2 Control packet action
+ * Input:
+ *      port            - port number
+ * Output:
+ *      igmpv2_op       - IGMPv2 control packet action
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_PORT_ID  - Error PORT ID
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIGMPv2Opeartion(rtk_uint32 port, rtk_uint32 *igmpv2_op)
+{
+    ret_t   retVal;
+    rtk_uint32  value;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    /* IGMPv2 operation */
+    if(port < 8)
+    {
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_PORT0_CONTROL + port, RTL8367C_IGMP_PORT0_CONTROL_IGMPV2_OP_MASK, &value);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_PORT8_CONTROL + port - 8, RTL8367C_IGMP_PORT0_CONTROL_IGMPV2_OP_MASK, &value);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    *igmpv2_op = value;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicIGMPv3Opeartion
+ * Description:
+ *      Set port-based IGMPv3 Control packet action
+ * Input:
+ *      port            - port number
+ *      igmpv3_op       - IGMPv3 control packet action
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_PORT_ID  - Error PORT ID
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicIGMPv3Opeartion(rtk_uint32 port, rtk_uint32 igmpv3_op)
+{
+    ret_t   retVal;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(igmpv3_op >= PROTOCOL_OP_END)
+        return RT_ERR_INPUT;
+
+    /* IGMPv3 operation */
+    if(port < 8)
+    {
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_PORT0_CONTROL + port, RTL8367C_IGMP_PORT0_CONTROL_IGMPV3_OP_MASK, igmpv3_op);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_PORT8_CONTROL + port - 8, RTL8367C_IGMP_PORT0_CONTROL_IGMPV3_OP_MASK, igmpv3_op);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicIGMPv3Opeartion
+ * Description:
+ *      Get port-based IGMPv3 Control packet action
+ * Input:
+ *      port            - port number
+ * Output:
+ *      igmpv3_op       - IGMPv3 control packet action
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_PORT_ID  - Error PORT ID
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIGMPv3Opeartion(rtk_uint32 port, rtk_uint32 *igmpv3_op)
+{
+    ret_t   retVal;
+    rtk_uint32  value;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    /* IGMPv3 operation */
+    if(port < 8)
+    {
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_PORT0_CONTROL + port, RTL8367C_IGMP_PORT0_CONTROL_IGMPV3_OP_MASK, &value);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_PORT8_CONTROL + port - 8, RTL8367C_IGMP_PORT0_CONTROL_IGMPV3_OP_MASK, &value);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    *igmpv3_op = value;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicMLDv1Opeartion
+ * Description:
+ *      Set port-based MLDv1 Control packet action
+ * Input:
+ *      port            - port number
+ *      mldv1_op        - MLDv1 control packet action
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_PORT_ID  - Error PORT ID
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicMLDv1Opeartion(rtk_uint32 port, rtk_uint32 mldv1_op)
+{
+    ret_t   retVal;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(mldv1_op >= PROTOCOL_OP_END)
+        return RT_ERR_INPUT;
+
+    /* MLDv1 operation */
+    if(port < 8)
+    {
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_PORT0_CONTROL + port, RTL8367C_IGMP_PORT0_CONTROL_MLDv1_OP_MASK, mldv1_op);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_PORT8_CONTROL + port - 8, RTL8367C_IGMP_PORT0_CONTROL_MLDv1_OP_MASK, mldv1_op);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicMLDv1Opeartion
+ * Description:
+ *      Get port-based MLDv1 Control packet action
+ * Input:
+ *      port            - port number
+ * Output:
+ *      mldv1_op        - MLDv1 control packet action
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_PORT_ID  - Error PORT ID
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicMLDv1Opeartion(rtk_uint32 port, rtk_uint32 *mldv1_op)
+{
+    ret_t   retVal;
+    rtk_uint32  value;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    /* MLDv1 operation */
+    if(port < 8)
+    {
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_PORT0_CONTROL + port, RTL8367C_IGMP_PORT0_CONTROL_MLDv1_OP_MASK, &value);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_PORT8_CONTROL + port - 8, RTL8367C_IGMP_PORT0_CONTROL_MLDv1_OP_MASK, &value);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    *mldv1_op = value;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicMLDv2Opeartion
+ * Description:
+ *      Set port-based MLDv2 Control packet action
+ * Input:
+ *      port            - port number
+ *      mldv2_op        - MLDv2 control packet action
+ * Output:
+ *      none
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_PORT_ID  - Error PORT ID
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicMLDv2Opeartion(rtk_uint32 port, rtk_uint32 mldv2_op)
+{
+    ret_t   retVal;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(mldv2_op >= PROTOCOL_OP_END)
+        return RT_ERR_INPUT;
+
+    /* MLDv2 operation */
+    if(port < 8)
+    {
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_PORT0_CONTROL + port, RTL8367C_IGMP_PORT0_CONTROL_MLDv2_OP_MASK, mldv2_op);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_PORT8_CONTROL + port - 8, RTL8367C_IGMP_PORT0_CONTROL_MLDv2_OP_MASK, mldv2_op);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicMLDv2Opeartion
+ * Description:
+ *      Get port-based MLDv2 Control packet action
+ * Input:
+ *      port            - port number
+ * Output:
+ *      mldv2_op        - MLDv2 control packet action
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_PORT_ID  - Error PORT ID
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicMLDv2Opeartion(rtk_uint32 port, rtk_uint32 *mldv2_op)
+{
+    ret_t   retVal;
+    rtk_uint32  value;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    /* MLDv2 operation */
+    if(port < 8)
+    {
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_PORT0_CONTROL + port, RTL8367C_IGMP_PORT0_CONTROL_MLDv2_OP_MASK, &value);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_PORT8_CONTROL + port - 8, RTL8367C_IGMP_PORT0_CONTROL_MLDv2_OP_MASK, &value);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    *mldv2_op = value;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicIGMPPortMAXGroup
+ * Description:
+ *      Set per-port Max group number
+ * Input:
+ *      port        - Physical port number (0~7)
+ *      max_group   - max IGMP group
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicIGMPPortMAXGroup(rtk_uint32 port, rtk_uint32 max_group)
+{
+    ret_t retVal;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(max_group > RTL8367C_IGMP_MAX_GOUP)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if(port < 8)
+    {
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_PORT01_MAX_GROUP + (port/2), RTL8367C_PORT0_MAX_GROUP_MASK << (RTL8367C_PORT1_MAX_GROUP_OFFSET * (port%2)), max_group);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_PORT89_MAX_GROUP + (port/2), RTL8367C_PORT0_MAX_GROUP_MASK << (RTL8367C_PORT1_MAX_GROUP_OFFSET * (port%2)), max_group);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicIGMPPortMAXGroup
+ * Description:
+ *      Get per-port Max group number
+ * Input:
+ *      port        - Physical port number (0~7)
+ *      max_group   - max IGMP group
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIGMPPortMAXGroup(rtk_uint32 port, rtk_uint32 *max_group)
+{
+    ret_t   retVal;
+    rtk_uint32  value;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(port < 8)
+    {
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_PORT01_MAX_GROUP + (port/2), RTL8367C_PORT0_MAX_GROUP_MASK << (RTL8367C_PORT1_MAX_GROUP_OFFSET * (port%2)), &value);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_PORT89_MAX_GROUP + (port/2), RTL8367C_PORT0_MAX_GROUP_MASK << (RTL8367C_PORT1_MAX_GROUP_OFFSET * (port%2)), &value);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    *max_group = value;
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicIGMPPortCurrentGroup
+ * Description:
+ *      Get per-port current group number
+ * Input:
+ *      port            - Physical port number (0~7)
+ *      current_group   - current IGMP group
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIGMPPortCurrentGroup(rtk_uint32 port, rtk_uint32 *current_group)
+{
+    ret_t   retVal;
+    rtk_uint32  value;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(port < 8)
+    {
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_PORT01_CURRENT_GROUP + (port/2), RTL8367C_PORT0_CURRENT_GROUP_MASK << (RTL8367C_PORT1_CURRENT_GROUP_OFFSET * (port%2)), &value);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_PORT89_CURRENT_GROUP + ((port - 8)/2), RTL8367C_PORT0_CURRENT_GROUP_MASK << (RTL8367C_PORT1_CURRENT_GROUP_OFFSET * (port%2)), &value);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    *current_group = value;
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicIGMPGroup
+ * Description:
+ *      Get IGMP group
+ * Input:
+ *      idx     - Group index (0~255)
+ *      valid   - valid bit
+ *      grp     - IGMP group
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_OUT_OF_RANGE     - Group index is out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIGMPGroup(rtk_uint32 idx, rtk_uint32 *valid, rtl8367c_igmpgroup *grp)
+{
+    ret_t   retVal;
+    rtk_uint32  regAddr, regData;
+    rtk_uint32  i;
+    rtk_uint32  groupInfo = 0;
+
+    if(idx > RTL8367C_IGMP_MAX_GOUP)
+        return RT_ERR_OUT_OF_RANGE;
+
+    /* Write ACS_ADR register for data bits */
+    regAddr = RTL8367C_TABLE_ACCESS_ADDR_REG;
+    regData = idx;
+    retVal = rtl8367c_setAsicReg(regAddr, regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    /* Write ACS_CMD register */
+    regAddr = RTL8367C_TABLE_ACCESS_CTRL_REG;
+    regData = RTL8367C_TABLE_ACCESS_REG_DATA(TB_OP_READ, TB_TARGET_IGMP_GROUP);
+    retVal = rtl8367c_setAsicRegBits(regAddr, RTL8367C_TABLE_TYPE_MASK | RTL8367C_COMMAND_TYPE_MASK, regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    /* Read Data Bits */
+    regAddr = RTL8367C_TABLE_ACCESS_RDDATA_BASE;
+    for(i = 0 ;i <= 1; i++)
+    {
+        retVal = rtl8367c_getAsicReg(regAddr, &regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        groupInfo |= ((regData & 0xFFFF) << (i * 16));
+        regAddr ++;
+    }
+
+    grp->p0_timer = groupInfo & 0x00000007;
+    grp->p1_timer = (groupInfo >> 3) & 0x00000007;
+    grp->p2_timer = (groupInfo >> 6) & 0x00000007;
+    grp->p3_timer = (groupInfo >> 9) & 0x00000007;
+    grp->p4_timer = (groupInfo >> 12) & 0x00000007;
+    grp->p5_timer = (groupInfo >> 15) & 0x00000007;
+    grp->p6_timer = (groupInfo >> 18) & 0x00000007;
+    grp->p7_timer = (groupInfo >> 21) & 0x00000007;
+    grp->report_supp_flag = (groupInfo >> 24) & 0x00000001;
+    grp->p8_timer = (groupInfo >> 25) & 0x00000007;
+    grp->p9_timer = (groupInfo >> 28) & 0x00000007;
+    grp->p10_timer = (groupInfo >> 31) & 0x00000001;
+
+    regAddr = RTL8367C_TABLE_ACCESS_RDDATA_BASE + 2;
+    retVal = rtl8367c_getAsicReg(regAddr, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    grp->p10_timer |= (regData & 0x00000003) << 1;
+
+    /* Valid bit */
+    retVal = rtl8367c_getAsicReg(RTL8367C_IGMP_GROUP_USAGE_REG(idx), &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    *valid = ((regData & (0x0001 << (idx %16))) != 0) ? 1 : 0;
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicIpMulticastPortIsoLeaky
+ * Description:
+ *      Set IP multicast Port Isolation leaky
+ * Input:
+ *      port        - Physical port number (0~7)
+ *      enabled     - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicIpMulticastPortIsoLeaky(rtk_uint32 port, rtk_uint32 enabled)
+{
+    ret_t   retVal;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_IPMCAST_PORTISO_LEAKY_REG, (0x0001 << port), enabled);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicIpMulticastPortIsoLeaky
+ * Description:
+ *      Get IP multicast Port Isolation leaky
+ * Input:
+ *      port        - Physical port number (0~7)
+ *      enabled     - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIpMulticastPortIsoLeaky(rtk_uint32 port, rtk_uint32 *enabled)
+{
+    ret_t   retVal;
+    rtk_uint32  regData;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_IPMCAST_PORTISO_LEAKY_REG, (0x0001 << port), &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    *enabled = regData;
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicIGMPReportLeaveFlood
+ * Description:
+ *      Set IGMP/MLD Report/Leave flood
+ * Input:
+ *      flood   - 0: Reserved, 1: flooding to router ports, 2: flooding to all ports, 3: flooding to router port or to all ports if there is no router port
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicIGMPReportLeaveFlood(rtk_uint32 flood)
+{
+    ret_t   retVal;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_IGMP_MLD_CFG3, RTL8367C_REPORT_LEAVE_FORWARD_MASK, flood);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicIGMPReportLeaveFlood
+ * Description:
+ *      Get IGMP/MLD Report/Leave flood
+ * Input:
+ *      None
+ * Output:
+ *      pflood  - 0: Reserved, 1: flooding to router ports, 2: flooding to all ports, 3: flooding to router port or to all ports if there is no router port
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIGMPReportLeaveFlood(rtk_uint32 *pFlood)
+{
+    ret_t   retVal;
+    rtk_uint32  regData;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_IGMP_MLD_CFG3, RTL8367C_REPORT_LEAVE_FORWARD_MASK, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    *pFlood = regData;
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicIGMPDropLeaveZero
+ * Description:
+ *      Set the function of droppping Leave packet with group IP = 0.0.0.0
+ * Input:
+ *      drop    - 1: Drop, 0:Bypass
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicIGMPDropLeaveZero(rtk_uint32 drop)
+{
+    ret_t   retVal;
+
+    retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_IGMP_MLD_CFG1, RTL8367C_DROP_LEAVE_ZERO_OFFSET, drop);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicIGMPDropLeaveZero
+ * Description:
+ *      Get the function of droppping Leave packet with group IP = 0.0.0.0
+ * Input:
+ *      None
+ * Output:
+ *      pDrop    - 1: Drop, 0:Bypass
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIGMPDropLeaveZero(rtk_uint32 *pDrop)
+{
+    ret_t   retVal;
+    rtk_uint32  regData;
+
+    retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_IGMP_MLD_CFG1, RTL8367C_DROP_LEAVE_ZERO_OFFSET, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    *pDrop = regData;
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicIGMPBypassStormCTRL
+ * Description:
+ *      Set the function of bypass strom control for IGMP/MLD packet
+ * Input:
+ *      bypass    - 1: Bypass, 0:not bypass
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicIGMPBypassStormCTRL(rtk_uint32 bypass)
+{
+    ret_t   retVal;
+
+    retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_IGMP_MLD_CFG0, RTL8367C_IGMP_MLD_DISCARD_STORM_FILTER_OFFSET, bypass);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicIGMPBypassStormCTRL
+ * Description:
+ *      Set the function of bypass strom control for IGMP/MLD packet
+ * Input:
+ *      None
+ * Output:
+ *      pBypass    - 1: Bypass, 0:not bypass
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIGMPBypassStormCTRL(rtk_uint32 *pBypass)
+{
+    ret_t   retVal;
+    rtk_uint32  regData;
+
+    retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_IGMP_MLD_CFG0, RTL8367C_IGMP_MLD_DISCARD_STORM_FILTER_OFFSET, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    *pBypass = regData;
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicIGMPIsoLeaky
+ * Description:
+ *      Set Port Isolation leaky for IGMP/MLD packet
+ * Input:
+ *      leaky    - 1: Leaky, 0:not leaky
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicIGMPIsoLeaky(rtk_uint32 leaky)
+{
+    ret_t   retVal;
+
+    retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_IGMP_MLD_CFG0, RTL8367C_IGMP_MLD_PORTISO_LEAKY_OFFSET, leaky);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicIGMPIsoLeaky
+ * Description:
+ *      Get Port Isolation leaky for IGMP/MLD packet
+ * Input:
+ *      Noen
+ * Output:
+ *      pLeaky    - 1: Leaky, 0:not leaky
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIGMPIsoLeaky(rtk_uint32 *pLeaky)
+{
+    ret_t   retVal;
+    rtk_uint32  regData;
+
+    retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_IGMP_MLD_CFG0, RTL8367C_IGMP_MLD_PORTISO_LEAKY_OFFSET, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    *pLeaky = regData;
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicIGMPVLANLeaky
+ * Description:
+ *      Set VLAN leaky for IGMP/MLD packet
+ * Input:
+ *      leaky    - 1: Leaky, 0:not leaky
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicIGMPVLANLeaky(rtk_uint32 leaky)
+{
+    ret_t   retVal;
+
+    retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_IGMP_MLD_CFG0, RTL8367C_IGMP_MLD_VLAN_LEAKY_OFFSET, leaky);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicIGMPVLANLeaky
+ * Description:
+ *      Get VLAN leaky for IGMP/MLD packet
+ * Input:
+ *      Noen
+ * Output:
+ *      pLeaky    - 1: Leaky, 0:not leaky
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIGMPVLANLeaky(rtk_uint32 *pLeaky)
+{
+    ret_t   retVal;
+    rtk_uint32  regData;
+
+    retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_IGMP_MLD_CFG0, RTL8367C_IGMP_MLD_VLAN_LEAKY_OFFSET, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    *pLeaky = regData;
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicIGMPBypassGroup
+ * Description:
+ *      Set IGMP/MLD Bypass group
+ * Input:
+ *      bypassType  - Bypass type
+ *      enabled     - enabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicIGMPBypassGroup(rtk_uint32 bypassType, rtk_uint32 enabled)
+{
+    ret_t   retVal;
+    rtk_uint32 offset;
+
+    switch(bypassType)
+    {
+        case BYPASS_224_0_0_X:
+            offset = RTL8367C_IGMP_MLD_IP4_BYPASS_224_0_0_OFFSET;
+            break;
+        case BYPASS_224_0_1_X:
+            offset = RTL8367C_IGMP_MLD_IP4_BYPASS_224_0_1_OFFSET;
+            break;
+        case BYPASS_239_255_255_X:
+            offset = RTL8367C_IGMP_MLD_IP4_BYPASS_239_255_255_OFFSET;
+            break;
+        case BYPASS_IPV6_00XX:
+            offset = RTL8367C_IGMP_MLD_IP6_BYPASS_OFFSET;
+            break;
+        default:
+            return RT_ERR_INPUT;
+    }
+
+    retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_IGMP_MLD_CFG3, offset, enabled);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicIGMPBypassGroup
+ * Description:
+ *      Get IGMP/MLD Bypass group
+ * Input:
+ *      bypassType  - Bypass type
+ * Output:
+ *      pEnabled    - enabled
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicIGMPBypassGroup(rtk_uint32 bypassType, rtk_uint32 *pEnabled)
+{
+    ret_t   retVal;
+    rtk_uint32 offset;
+
+    switch(bypassType)
+    {
+        case BYPASS_224_0_0_X:
+            offset = RTL8367C_IGMP_MLD_IP4_BYPASS_224_0_0_OFFSET;
+            break;
+        case BYPASS_224_0_1_X:
+            offset = RTL8367C_IGMP_MLD_IP4_BYPASS_224_0_1_OFFSET;
+            break;
+        case BYPASS_239_255_255_X:
+            offset = RTL8367C_IGMP_MLD_IP4_BYPASS_239_255_255_OFFSET;
+            break;
+        case BYPASS_IPV6_00XX:
+            offset = RTL8367C_IGMP_MLD_IP6_BYPASS_OFFSET;
+            break;
+        default:
+            return RT_ERR_INPUT;
+    }
+
+    retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_IGMP_MLD_CFG3, offset, pEnabled);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_igmp.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_igmp.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_igmp.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_igmp.h	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,134 @@
+#ifndef _RTL8367C_ASICDRV_IGMP_H_
+#define _RTL8367C_ASICDRV_IGMP_H_
+
+/****************************************************************/
+/* Header File inclusion                                        */
+/****************************************************************/
+#include <rtl8367c_asicdrv.h>
+
+#define RTL8367C_MAX_LEAVE_TIMER        (7)
+#define RTL8367C_MAX_QUERY_INT          (0xFFFF)
+#define RTL8367C_MAX_ROB_VAR            (7)
+
+#define RTL8367C_IGMP_GOUP_NO           (256)
+#define RTL8367C_IGMP_MAX_GOUP          (0xFF)
+#define RTL8367C_IGMP_GRP_BLEN          (3)
+#define RTL8367C_ROUTER_PORT_INVALID    (0xF)
+
+enum RTL8367C_IGMPTABLE_FULL_OP
+{
+    TABLE_FULL_FORWARD = 0,
+    TABLE_FULL_DROP,
+    TABLE_FULL_TRAP,
+    TABLE_FULL_OP_END
+};
+
+enum RTL8367C_CRC_ERR_OP
+{
+    CRC_ERR_DROP = 0,
+    CRC_ERR_TRAP,
+    CRC_ERR_FORWARD,
+    CRC_ERR_OP_END
+};
+
+enum RTL8367C_IGMP_MLD_PROTOCOL_OP
+{
+    PROTOCOL_OP_ASIC = 0,
+    PROTOCOL_OP_FLOOD,
+    PROTOCOL_OP_TRAP,
+    PROTOCOL_OP_DROP,
+    PROTOCOL_OP_END
+};
+
+enum RTL8367C_IGMP_MLD_BYPASS_GROUP
+{
+    BYPASS_224_0_0_X = 0,
+    BYPASS_224_0_1_X,
+    BYPASS_239_255_255_X,
+    BYPASS_IPV6_00XX,
+    BYPASS_GROUP_END
+};
+
+typedef struct
+{
+    rtk_uint32 p0_timer;
+    rtk_uint32 p1_timer;
+    rtk_uint32 p2_timer;
+    rtk_uint32 p3_timer;
+    rtk_uint32 p4_timer;
+    rtk_uint32 p5_timer;
+    rtk_uint32 p6_timer;
+    rtk_uint32 p7_timer;
+    rtk_uint32 p8_timer;
+    rtk_uint32 p9_timer;
+    rtk_uint32 p10_timer;
+    rtk_uint32 report_supp_flag;
+
+}rtl8367c_igmpgroup;
+
+
+ret_t rtl8367c_setAsicIgmp(rtk_uint32 enabled);
+ret_t rtl8367c_getAsicIgmp(rtk_uint32 *pEnabled);
+ret_t rtl8367c_setAsicIpMulticastVlanLeaky(rtk_uint32 port, rtk_uint32 enabled );
+ret_t rtl8367c_getAsicIpMulticastVlanLeaky(rtk_uint32 port, rtk_uint32 *pEnabled );
+ret_t rtl8367c_setAsicIGMPTableFullOP(rtk_uint32 operation);
+ret_t rtl8367c_getAsicIGMPTableFullOP(rtk_uint32 *pOperation);
+ret_t rtl8367c_setAsicIGMPCRCErrOP(rtk_uint32 operation);
+ret_t rtl8367c_getAsicIGMPCRCErrOP(rtk_uint32 *pOperation);
+ret_t rtl8367c_setAsicIGMPFastLeaveEn(rtk_uint32 enabled);
+ret_t rtl8367c_getAsicIGMPFastLeaveEn(rtk_uint32 *pEnabled);
+ret_t rtl8367c_setAsicIGMPLeaveTimer(rtk_uint32 leave_timer);
+ret_t rtl8367c_getAsicIGMPLeaveTimer(rtk_uint32 *pLeave_timer);
+ret_t rtl8367c_setAsicIGMPQueryInterval(rtk_uint32 interval);
+ret_t rtl8367c_getAsicIGMPQueryInterval(rtk_uint32 *pInterval);
+ret_t rtl8367c_setAsicIGMPRobVar(rtk_uint32 rob_var);
+ret_t rtl8367c_getAsicIGMPRobVar(rtk_uint32 *pRob_var);
+ret_t rtl8367c_setAsicIGMPStaticRouterPort(rtk_uint32 pmsk);
+ret_t rtl8367c_getAsicIGMPStaticRouterPort(rtk_uint32 *pMsk);
+ret_t rtl8367c_setAsicIGMPAllowDynamicRouterPort(rtk_uint32 pmsk);
+ret_t rtl8367c_getAsicIGMPAllowDynamicRouterPort(rtk_uint32 *pPmsk);
+ret_t rtl8367c_getAsicIGMPdynamicRouterPort1(rtk_uint32 *pPort, rtk_uint32 *pTimer);
+ret_t rtl8367c_getAsicIGMPdynamicRouterPort2(rtk_uint32 *pPort, rtk_uint32 *pTimer);
+ret_t rtl8367c_setAsicIGMPSuppression(rtk_uint32 report_supp_enabled, rtk_uint32 leave_supp_enabled);
+ret_t rtl8367c_getAsicIGMPSuppression(rtk_uint32 *pReport_supp_enabled, rtk_uint32 *pLeave_supp_enabled);
+ret_t rtl8367c_setAsicIGMPQueryRX(rtk_uint32 port, rtk_uint32 allow_query);
+ret_t rtl8367c_getAsicIGMPQueryRX(rtk_uint32 port, rtk_uint32 *pAllow_query);
+ret_t rtl8367c_setAsicIGMPReportRX(rtk_uint32 port, rtk_uint32 allow_report);
+ret_t rtl8367c_getAsicIGMPReportRX(rtk_uint32 port, rtk_uint32 *pAllow_report);
+ret_t rtl8367c_setAsicIGMPLeaveRX(rtk_uint32 port, rtk_uint32 allow_leave);
+ret_t rtl8367c_getAsicIGMPLeaveRX(rtk_uint32 port, rtk_uint32 *pAllow_leave);
+ret_t rtl8367c_setAsicIGMPMRPRX(rtk_uint32 port, rtk_uint32 allow_mrp);
+ret_t rtl8367c_getAsicIGMPMRPRX(rtk_uint32 port, rtk_uint32 *pAllow_mrp);
+ret_t rtl8367c_setAsicIGMPMcDataRX(rtk_uint32 port, rtk_uint32 allow_mcdata);
+ret_t rtl8367c_getAsicIGMPMcDataRX(rtk_uint32 port, rtk_uint32 *pAllow_mcdata);
+ret_t rtl8367c_setAsicIGMPv1Opeartion(rtk_uint32 port, rtk_uint32 igmpv1_op);
+ret_t rtl8367c_getAsicIGMPv1Opeartion(rtk_uint32 port, rtk_uint32 *pIgmpv1_op);
+ret_t rtl8367c_setAsicIGMPv2Opeartion(rtk_uint32 port, rtk_uint32 igmpv2_op);
+ret_t rtl8367c_getAsicIGMPv2Opeartion(rtk_uint32 port, rtk_uint32 *pIgmpv2_op);
+ret_t rtl8367c_setAsicIGMPv3Opeartion(rtk_uint32 port, rtk_uint32 igmpv3_op);
+ret_t rtl8367c_getAsicIGMPv3Opeartion(rtk_uint32 port, rtk_uint32 *pIgmpv3_op);
+ret_t rtl8367c_setAsicMLDv1Opeartion(rtk_uint32 port, rtk_uint32 mldv1_op);
+ret_t rtl8367c_getAsicMLDv1Opeartion(rtk_uint32 port, rtk_uint32 *pMldv1_op);
+ret_t rtl8367c_setAsicMLDv2Opeartion(rtk_uint32 port, rtk_uint32 mldv2_op);
+ret_t rtl8367c_getAsicMLDv2Opeartion(rtk_uint32 port, rtk_uint32 *pMldv2_op);
+ret_t rtl8367c_setAsicIGMPPortMAXGroup(rtk_uint32 port, rtk_uint32 max_group);
+ret_t rtl8367c_getAsicIGMPPortMAXGroup(rtk_uint32 port, rtk_uint32 *pMax_group);
+ret_t rtl8367c_getAsicIGMPPortCurrentGroup(rtk_uint32 port, rtk_uint32 *pCurrent_group);
+ret_t rtl8367c_getAsicIGMPGroup(rtk_uint32 idx, rtk_uint32 *pValid, rtl8367c_igmpgroup *pGrp);
+ret_t rtl8367c_setAsicIpMulticastPortIsoLeaky(rtk_uint32 port, rtk_uint32 enabled);
+ret_t rtl8367c_getAsicIpMulticastPortIsoLeaky(rtk_uint32 port, rtk_uint32 *pEnabled);
+ret_t rtl8367c_setAsicIGMPReportLeaveFlood(rtk_uint32 flood);
+ret_t rtl8367c_getAsicIGMPReportLeaveFlood(rtk_uint32 *pFlood);
+ret_t rtl8367c_setAsicIGMPDropLeaveZero(rtk_uint32 drop);
+ret_t rtl8367c_getAsicIGMPDropLeaveZero(rtk_uint32 *pDrop);
+ret_t rtl8367c_setAsicIGMPBypassStormCTRL(rtk_uint32 bypass);
+ret_t rtl8367c_getAsicIGMPBypassStormCTRL(rtk_uint32 *pBypass);
+ret_t rtl8367c_setAsicIGMPIsoLeaky(rtk_uint32 leaky);
+ret_t rtl8367c_getAsicIGMPIsoLeaky(rtk_uint32 *pLeaky);
+ret_t rtl8367c_setAsicIGMPVLANLeaky(rtk_uint32 leaky);
+ret_t rtl8367c_getAsicIGMPVLANLeaky(rtk_uint32 *pLeaky);
+ret_t rtl8367c_setAsicIGMPBypassGroup(rtk_uint32 bypassType, rtk_uint32 enabled);
+ret_t rtl8367c_getAsicIGMPBypassGroup(rtk_uint32 bypassType, rtk_uint32 *pEnabled);
+
+#endif /*#ifndef _RTL8367C_ASICDRV_IGMP_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_inbwctrl.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_inbwctrl.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_inbwctrl.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_inbwctrl.c	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 76306 $
+ * $Date: 2017-03-08 15:13:58 +0800 (é€±ä¸‰, 08 ä¸‰æœˆ 2017) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature : Ingress bandwidth control related functions
+ *
+ */
+#include <rtl8367c_asicdrv_inbwctrl.h>
+/* Function Name:
+ *      rtl8367c_setAsicPortIngressBandwidth
+ * Description:
+ *      Set per-port total ingress bandwidth
+ * Input:
+ *      port        - Physical port number (0~7)
+ *      bandwidth   - The total ingress bandwidth (unit: 8Kbps), 0x1FFFF:disable
+ *      preifg      - Include preamble and IFG, 0:Exclude, 1:Include
+ *      enableFC    - Action when input rate exceeds. 0: Drop   1: Flow Control
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortIngressBandwidth(rtk_uint32 port, rtk_uint32 bandwidth, rtk_uint32 preifg, rtk_uint32 enableFC)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+    rtk_uint32 regAddr;
+
+    /* Invalid input parameter */
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    if(bandwidth > RTL8367C_QOS_GRANULARTY_MAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    regAddr = RTL8367C_INGRESSBW_PORT_RATE_LSB_REG(port);
+    regData = bandwidth & RTL8367C_QOS_GRANULARTY_LSB_MASK;
+    retVal = rtl8367c_setAsicReg(regAddr, regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    regAddr += 1;
+    regData = (bandwidth & RTL8367C_QOS_GRANULARTY_MSB_MASK) >> RTL8367C_QOS_GRANULARTY_MSB_OFFSET;
+    retVal = rtl8367c_setAsicRegBits(regAddr, RTL8367C_INGRESSBW_PORT0_RATE_CTRL1_INGRESSBW_RATE16_MASK, regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    regAddr = RTL8367C_PORT_MISC_CFG_REG(port);
+    retVal = rtl8367c_setAsicRegBit(regAddr, RTL8367C_PORT0_MISC_CFG_INGRESSBW_IFG_OFFSET, preifg);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    regAddr = RTL8367C_PORT_MISC_CFG_REG(port);
+    retVal = rtl8367c_setAsicRegBit(regAddr, RTL8367C_PORT0_MISC_CFG_INGRESSBW_FLOWCTRL_OFFSET, enableFC);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortIngressBandwidth
+ * Description:
+ *      Get per-port total ingress bandwidth
+ * Input:
+ *      port        - Physical port number (0~7)
+ *      pBandwidth  - The total ingress bandwidth (unit: 8Kbps), 0x1FFFF:disable
+ *      pPreifg         - Include preamble and IFG, 0:Exclude, 1:Include
+ *      pEnableFC   - Action when input rate exceeds. 0: Drop   1: Flow Control
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortIngressBandwidth(rtk_uint32 port, rtk_uint32* pBandwidth, rtk_uint32* pPreifg, rtk_uint32* pEnableFC)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+    rtk_uint32 regAddr;
+
+    /* Invalid input parameter */
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    regAddr = RTL8367C_INGRESSBW_PORT_RATE_LSB_REG(port);
+    retVal = rtl8367c_getAsicReg(regAddr, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    *pBandwidth = regData;
+
+    regAddr += 1;
+    retVal = rtl8367c_getAsicRegBits(regAddr, RTL8367C_INGRESSBW_PORT0_RATE_CTRL1_INGRESSBW_RATE16_MASK, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    *pBandwidth |= (regData << RTL8367C_QOS_GRANULARTY_MSB_OFFSET);
+
+    regAddr = RTL8367C_PORT_MISC_CFG_REG(port);
+    retVal = rtl8367c_getAsicRegBit(regAddr, RTL8367C_PORT0_MISC_CFG_INGRESSBW_IFG_OFFSET, pPreifg);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    regAddr = RTL8367C_PORT_MISC_CFG_REG(port);
+    retVal = rtl8367c_getAsicRegBit(regAddr, RTL8367C_PORT0_MISC_CFG_INGRESSBW_FLOWCTRL_OFFSET, pEnableFC);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicPortIngressBandwidthBypass
+ * Description:
+ *      Set ingress bandwidth control bypasss 8899, RMA 01-80-C2-00-00-xx and IGMP
+ * Input:
+ *      enabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortIngressBandwidthBypass(rtk_uint32 enabled)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_SW_DUMMY0, RTL8367C_INGRESSBW_BYPASS_EN_OFFSET, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortIngressBandwidthBypass
+ * Description:
+ *      Set ingress bandwidth control bypasss 8899, RMA 01-80-C2-00-00-xx and IGMP
+ * Input:
+ *      pEnabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortIngressBandwidthBypass(rtk_uint32* pEnabled)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_SW_DUMMY0, RTL8367C_INGRESSBW_BYPASS_EN_OFFSET, pEnabled);
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_inbwctrl.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_inbwctrl.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_inbwctrl.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_inbwctrl.h	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,13 @@
+#ifndef _RTL8367C_ASICDRV_INBWCTRL_H_
+#define _RTL8367C_ASICDRV_INBWCTRL_H_
+
+#include <rtl8367c_asicdrv.h>
+
+extern ret_t rtl8367c_setAsicPortIngressBandwidth(rtk_uint32 port, rtk_uint32 bandwidth, rtk_uint32 preifg, rtk_uint32 enableFC);
+extern ret_t rtl8367c_getAsicPortIngressBandwidth(rtk_uint32 port, rtk_uint32* pBandwidth, rtk_uint32* pPreifg, rtk_uint32* pEnableFC );
+extern ret_t rtl8367c_setAsicPortIngressBandwidthBypass(rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicPortIngressBandwidthBypass(rtk_uint32* pEnabled);
+
+
+#endif /*_RTL8367C_ASICDRV_INBWCTRL_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_interrupt.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_interrupt.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_interrupt.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_interrupt.c	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 76306 $
+ * $Date: 2017-03-08 15:13:58 +0800 (é€±ä¸‰, 08 ä¸‰æœˆ 2017) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature : Interrupt related functions
+ *
+ */
+#include <rtl8367c_asicdrv_interrupt.h>
+/* Function Name:
+ *      rtl8367c_setAsicInterruptPolarity
+ * Description:
+ *      Set interrupt trigger polarity
+ * Input:
+ *      polarity    - 0:pull high 1: pull low
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicInterruptPolarity(rtk_uint32 polarity)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_INTR_CTRL, RTL8367C_INTR_CTRL_OFFSET, polarity);
+}
+/* Function Name:
+ *      rtl8367c_getAsicInterruptPolarity
+ * Description:
+ *      Get interrupt trigger polarity
+ * Input:
+ *      pPolarity   - 0:pull high 1: pull low
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicInterruptPolarity(rtk_uint32* pPolarity)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_INTR_CTRL, RTL8367C_INTR_CTRL_OFFSET, pPolarity);
+}
+/* Function Name:
+ *      rtl8367c_setAsicInterruptMask
+ * Description:
+ *      Set interrupt enable mask
+ * Input:
+ *      imr     - Interrupt mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicInterruptMask(rtk_uint32 imr)
+{
+    return rtl8367c_setAsicReg(RTL8367C_REG_INTR_IMR, imr);
+}
+/* Function Name:
+ *      rtl8367c_getAsicInterruptMask
+ * Description:
+ *      Get interrupt enable mask
+ * Input:
+ *      pImr    - Interrupt mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicInterruptMask(rtk_uint32* pImr)
+{
+    return rtl8367c_getAsicReg(RTL8367C_REG_INTR_IMR, pImr);
+}
+/* Function Name:
+ *      rtl8367c_setAsicInterruptMask
+ * Description:
+ *      Clear interrupt enable mask
+ * Input:
+ *      ims     - Interrupt status mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      This API can be used to clear ASIC interrupt status and register will be cleared by writting 1.
+ *      [0]:Link change,
+ *      [1]:Share meter exceed,
+ *      [2]:Learn number overed,
+ *      [3]:Speed Change,
+ *      [4]:Tx special congestion
+ *      [5]:1 second green feature
+ *      [6]:loop detection
+ *      [7]:interrupt from 8051
+ *      [8]:Cable diagnostic finish
+ *      [9]:ACL action interrupt trigger
+ *      [11]: Silent Start
+ */
+ret_t rtl8367c_setAsicInterruptStatus(rtk_uint32 ims)
+{
+    return rtl8367c_setAsicReg(RTL8367C_REG_INTR_IMS, ims);
+}
+/* Function Name:
+ *      rtl8367c_getAsicInterruptStatus
+ * Description:
+ *      Get interrupt enable mask
+ * Input:
+ *      pIms    - Interrupt status mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicInterruptStatus(rtk_uint32* pIms)
+{
+    return rtl8367c_getAsicReg(RTL8367C_REG_INTR_IMS, pIms);
+}
+/* Function Name:
+ *      rtl8367c_setAsicInterruptRelatedStatus
+ * Description:
+ *      Clear interrupt status
+ * Input:
+ *      type    - per port Learn over, per-port speed change, per-port special congest, share meter exceed status
+ *      status  - exceed status, write 1 to clear
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicInterruptRelatedStatus(rtk_uint32 type, rtk_uint32 status)
+{
+    CONST rtk_uint32 indicatorAddress[INTRST_END] = {RTL8367C_REG_LEARN_OVER_INDICATOR,
+                                                    RTL8367C_REG_SPEED_CHANGE_INDICATOR,
+                                                    RTL8367C_REG_SPECIAL_CONGEST_INDICATOR,
+                                                    RTL8367C_REG_PORT_LINKDOWN_INDICATOR,
+                                                    RTL8367C_REG_PORT_LINKUP_INDICATOR,
+                                                    RTL8367C_REG_METER_OVERRATE_INDICATOR0,
+                                                    RTL8367C_REG_METER_OVERRATE_INDICATOR1,
+                                                    RTL8367C_REG_RLDP_LOOPED_INDICATOR,
+                                                    RTL8367C_REG_RLDP_RELEASED_INDICATOR,
+                                                    RTL8367C_REG_SYSTEM_LEARN_OVER_INDICATOR};
+
+    if(type >= INTRST_END )
+        return RT_ERR_OUT_OF_RANGE;
+
+    return rtl8367c_setAsicReg(indicatorAddress[type], status);
+}
+/* Function Name:
+ *      rtl8367c_getAsicInterruptRelatedStatus
+ * Description:
+ *      Get interrupt status
+ * Input:
+ *      type    - per port Learn over, per-port speed change, per-port special congest, share meter exceed status
+ *      pStatus     - exceed status, write 1 to clear
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicInterruptRelatedStatus(rtk_uint32 type, rtk_uint32* pStatus)
+{
+    CONST rtk_uint32 indicatorAddress[INTRST_END] = {RTL8367C_REG_LEARN_OVER_INDICATOR,
+                                                    RTL8367C_REG_SPEED_CHANGE_INDICATOR,
+                                                    RTL8367C_REG_SPECIAL_CONGEST_INDICATOR,
+                                                    RTL8367C_REG_PORT_LINKDOWN_INDICATOR,
+                                                    RTL8367C_REG_PORT_LINKUP_INDICATOR,
+                                                    RTL8367C_REG_METER_OVERRATE_INDICATOR0,
+                                                    RTL8367C_REG_METER_OVERRATE_INDICATOR1,
+                                                    RTL8367C_REG_RLDP_LOOPED_INDICATOR,
+                                                    RTL8367C_REG_RLDP_RELEASED_INDICATOR,
+                                                    RTL8367C_REG_SYSTEM_LEARN_OVER_INDICATOR};
+
+    if(type >= INTRST_END )
+        return RT_ERR_OUT_OF_RANGE;
+
+    return rtl8367c_getAsicReg(indicatorAddress[type], pStatus);
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_interrupt.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_interrupt.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_interrupt.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_interrupt.h	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,49 @@
+#ifndef _RTL8367C_ASICDRV_INTERRUPT_H_
+#define _RTL8367C_ASICDRV_INTERRUPT_H_
+
+#include <rtl8367c_asicdrv.h>
+
+typedef enum RTL8367C_INTR_IMRS_E
+{
+    IMRS_LINK_CHANGE,
+    IMRS_METER_EXCEED,
+    IMRS_L2_LEARN,
+    IMRS_SPEED_CHANGE,
+    IMRS_SPECIAL_CONGESTION,
+    IMRS_GREEN_FEATURE,
+    IMRS_LOOP_DETECTION,
+    IMRS_8051,
+    IMRS_CABLE_DIAG,
+    IMRS_ACL,
+    IMRS_RESERVED, /* Unused */
+    IMRS_SLIENT,
+    IMRS_END,
+}RTL8367C_INTR_IMRS;
+
+typedef enum RTL8367C_INTR_INDICATOR_E
+{
+    INTRST_L2_LEARN = 0,
+    INTRST_SPEED_CHANGE,
+    INTRST_SPECIAL_CONGESTION,
+    INTRST_PORT_LINKDOWN,
+    INTRST_PORT_LINKUP,
+    INTRST_METER0_15,
+    INTRST_METER16_31,
+    INTRST_RLDP_LOOPED,
+    INTRST_RLDP_RELEASED,
+    INTRST_SYS_LEARN,
+    INTRST_END,
+}RTL8367C_INTR_INDICATOR;
+
+extern ret_t rtl8367c_setAsicInterruptPolarity(rtk_uint32 polarity);
+extern ret_t rtl8367c_getAsicInterruptPolarity(rtk_uint32* pPolarity);
+extern ret_t rtl8367c_setAsicInterruptMask(rtk_uint32 imr);
+extern ret_t rtl8367c_getAsicInterruptMask(rtk_uint32* pImr);
+extern ret_t rtl8367c_setAsicInterruptStatus(rtk_uint32 ims);
+extern ret_t rtl8367c_getAsicInterruptStatus(rtk_uint32* pIms);
+extern ret_t rtl8367c_setAsicInterruptRelatedStatus(rtk_uint32 type, rtk_uint32 status);
+extern ret_t rtl8367c_getAsicInterruptRelatedStatus(rtk_uint32 type, rtk_uint32* pStatus);
+
+
+#endif /*#ifndef _RTL8367C_ASICDRV_INTERRUPT_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_led.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_led.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_led.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_led.c	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,729 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 76306 $
+ * $Date: 2017-03-08 15:13:58 +0800 (é€±ä¸‰, 08 ä¸‰æœˆ 2017) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature : LED related functions
+ *
+ */
+#include <rtl8367c_asicdrv_led.h>
+/* Function Name:
+ *      rtl8367c_setAsicLedIndicateInfoConfig
+ * Description:
+ *      Set Leds indicated information mode
+ * Input:
+ *      ledno   - LED group number. There are 1 to 1 led mapping to each port in each led group
+ *      config  - Support 16 types configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      The API can set LED indicated information configuration for each LED group with 1 to 1 led mapping to each port.
+ *      Definition        LED Statuses            Description
+ *      0000        LED_Off                LED pin Tri-State.
+ *      0001        Dup/Col                Collision, Full duplex Indicator. Blinking every 43ms when collision happens. Low for full duplex, and high for half duplex mode.
+ *      0010        Link/Act               Link, Activity Indicator. Low for link established. Link/Act Blinks every 43ms when the corresponding port is transmitting or receiving.
+ *      0011        Spd1000                1000Mb/s Speed Indicator. Low for 1000Mb/s.
+ *      0100        Spd100                 100Mb/s Speed Indicator. Low for 100Mb/s.
+ *      0101        Spd10                  10Mb/s Speed Indicator. Low for 10Mb/s.
+ *      0110        Spd1000/Act            1000Mb/s Speed/Activity Indicator. Low for 1000Mb/s. Blinks every 43ms when the corresponding port is transmitting or receiving.
+ *      0111        Spd100/Act             100Mb/s Speed/Activity Indicator. Low for 100Mb/s. Blinks every 43ms when the corresponding port is transmitting or receiving.
+ *      1000        Spd10/Act              10Mb/s Speed/Activity Indicator. Low for 10Mb/s. Blinks every 43ms when the corresponding port is transmitting or receiving.
+ *      1001        Spd100 (10)/Act        10/100Mb/s Speed/Activity Indicator. Low for 10/100Mb/s. Blinks every 43ms when the corresponding port is transmitting or receiving.
+ *      1010        Fiber                  Fiber link Indicator. Low for Fiber.
+ *      1011        Fault                  Auto-negotiation     Fault Indicator. Low for Fault.
+ *      1100        Link/Rx                Link, Activity Indicator. Low for link established. Link/Rx Blinks every 43ms when the corresponding port is transmitting.
+ *      1101        Link/Tx                Link, Activity Indicator. Low for link established. Link/Tx Blinks every 43ms when the corresponding port is receiving.
+ *      1110        Master                 Link on Master Indicator. Low for link Master established.
+ *      1111        LED_Force              Force LED output, LED output value reference
+ */
+ret_t rtl8367c_setAsicLedIndicateInfoConfig(rtk_uint32 ledno, rtk_uint32 config)
+{
+    ret_t   retVal;
+    CONST rtk_uint16 bits[RTL8367C_LEDGROUPNO] = {RTL8367C_LED0_CFG_MASK, RTL8367C_LED1_CFG_MASK, RTL8367C_LED2_CFG_MASK};
+
+    if(ledno >= RTL8367C_LEDGROUPNO)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if(config >= LEDCONF_END)
+        return RT_ERR_OUT_OF_RANGE;
+
+    retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_LED_CONFIGURATION, RTL8367C_LED_CONFIG_SEL_OFFSET, 0);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_LED_CONFIGURATION, bits[ledno], config);
+}
+/* Function Name:
+ *      rtl8367c_getAsicLedIndicateInfoConfig
+ * Description:
+ *      Get Leds indicated information mode
+ * Input:
+ *      ledno   - LED group number. There are 1 to 1 led mapping to each port in each led group
+ *      pConfig     - Support 16 types configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicLedIndicateInfoConfig(rtk_uint32 ledno, rtk_uint32* pConfig)
+{
+    CONST rtk_uint16 bits[RTL8367C_LEDGROUPNO]= {RTL8367C_LED0_CFG_MASK, RTL8367C_LED1_CFG_MASK, RTL8367C_LED2_CFG_MASK};
+
+    if(ledno >= RTL8367C_LEDGROUPNO)
+        return RT_ERR_OUT_OF_RANGE;
+
+    /* Get register value */
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_LED_CONFIGURATION, bits[ledno], pConfig);
+}
+/* Function Name:
+ *      rtl8367c_setAsicLedGroupMode
+ * Description:
+ *      Set Led Group mode
+ * Input:
+ *      mode    - LED mode
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicLedGroupMode(rtk_uint32 mode)
+{
+    ret_t retVal;
+
+    /* Invalid input parameter */
+    if(mode >= RTL8367C_LED_MODE_END)
+        return RT_ERR_OUT_OF_RANGE;
+
+    retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_LED_CONFIGURATION, RTL8367C_LED_CONFIG_SEL_OFFSET, 1);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_LED_CONFIGURATION, RTL8367C_DATA_LED_MASK, mode);
+}
+/* Function Name:
+ *      rtl8367c_getAsicLedGroupMode
+ * Description:
+ *      Get Led Group mode
+ * Input:
+ *      pMode   - LED mode
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicLedGroupMode(rtk_uint32* pMode)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+
+    retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_LED_CONFIGURATION, RTL8367C_LED_CONFIG_SEL_OFFSET, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    if(regData!=1)
+        return RT_ERR_FAILED;
+
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_LED_CONFIGURATION, RTL8367C_DATA_LED_MASK, pMode);
+}
+/* Function Name:
+ *      rtl8367c_setAsicForceLeds
+ * Description:
+ *      Set group LED mode
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      group   - LED group number
+ *      mode    - LED mode
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicForceLed(rtk_uint32 port, rtk_uint32 group, rtk_uint32 mode)
+{
+    rtk_uint16 regAddr;
+    ret_t retVal;
+
+    /* Invalid input parameter */
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(group >= RTL8367C_LEDGROUPNO)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if(mode >= LEDFORCEMODE_END)
+        return RT_ERR_OUT_OF_RANGE;
+    /* Set Related Registers */
+    if(port < 8){
+        regAddr = RTL8367C_LED_FORCE_MODE_BASE + (group << 1);
+        if((retVal = rtl8367c_setAsicRegBits(regAddr, 0x3 << (port * 2), mode)) != RT_ERR_OK)
+            return retVal;
+    }else if(port >= 8){
+        regAddr = RTL8367C_REG_CPU_FORCE_LED0_CFG1 + (group << 1);
+        if((retVal = rtl8367c_setAsicRegBits(regAddr, 0x3 << ((port-8) * 2), mode)) != RT_ERR_OK)
+            return retVal;
+    }
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicForceLed
+ * Description:
+ *      Get group LED mode
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      group   - LED group number
+ *      pMode   - LED mode
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicForceLed(rtk_uint32 port, rtk_uint32 group, rtk_uint32* pMode)
+{
+    rtk_uint16 regAddr;
+    ret_t retVal;
+
+    /* Invalid input parameter */
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(group >= RTL8367C_LEDGROUPNO)
+        return RT_ERR_INPUT;
+
+    /* Get Related Registers */
+    if(port < 8){
+        regAddr = RTL8367C_LED_FORCE_MODE_BASE + (group << 1);
+        if((retVal = rtl8367c_getAsicRegBits(regAddr, 0x3 << (port * 2), pMode)) != RT_ERR_OK)
+            return retVal;
+    }else if(port >= 8){
+        regAddr = RTL8367C_REG_CPU_FORCE_LED0_CFG1 + (group << 1);
+        if((retVal = rtl8367c_getAsicRegBits(regAddr, 0x3 << ((port-8) * 2), pMode)) != RT_ERR_OK)
+            return retVal;
+    }
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicForceGroupLed
+ * Description:
+ *      Turn on/off Led of all ports
+ * Input:
+ *      group   - LED group number
+ *      mode    - 0b00:normal mode, 0b01:force blink, 0b10:force off, 0b11:force on
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicForceGroupLed(rtk_uint32 groupmask, rtk_uint32 mode)
+{
+    ret_t retVal;
+    rtk_uint32 i,bitmask;
+    CONST rtk_uint16 bits[3]= {0x0004,0x0010,0x0040};
+
+    /* Invalid input parameter */
+    if(groupmask > RTL8367C_LEDGROUPMASK)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if(mode >= LEDFORCEMODE_END)
+        return RT_ERR_OUT_OF_RANGE;
+
+    bitmask = 0;
+    for(i = 0; i <  RTL8367C_LEDGROUPNO; i++)
+    {
+        if(groupmask & (1 << i))
+        {
+            bitmask = bitmask | bits[i];
+        }
+
+    }
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_LED_FORCE_CTRL, RTL8367C_LED_FORCE_MODE_MASK, bitmask);
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_LED_FORCE_CTRL, RTL8367C_FORCE_MODE_MASK, mode);
+
+    if(LEDFORCEMODE_NORMAL == mode)
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_LED_FORCE_CTRL, RTL8367C_LED_FORCE_MODE_MASK, 0);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicForceGroupLed
+ * Description:
+ *      Turn on/off Led of all ports
+ * Input:
+ *      group   - LED group number
+ *      pMode   - 0b00:normal mode, 0b01:force blink, 0b10:force off, 0b11:force on
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicForceGroupLed(rtk_uint32* groupmask, rtk_uint32* pMode)
+{
+    ret_t retVal;
+    rtk_uint32 i,regData;
+    CONST rtk_uint16 bits[3] = {0x0004,0x0010,0x0040};
+
+    /* Get Related Registers */
+    if((retVal = rtl8367c_getAsicRegBits(RTL8367C_LED_FORCE_CTRL, RTL8367C_LED_FORCE_MODE_MASK, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    for(i = 0; i< RTL8367C_LEDGROUPNO; i++)
+    {
+        if((regData & bits[i]) == bits[i])
+        {
+            *groupmask = *groupmask | (1 << i);
+        }
+    }
+
+    return rtl8367c_getAsicRegBits(RTL8367C_LED_FORCE_CTRL, RTL8367C_FORCE_MODE_MASK, pMode);
+}
+/* Function Name:
+ *      rtl8367c_setAsicLedBlinkRate
+ * Description:
+ *      Set led blinking rate at mode 0 to mode 3
+ * Input:
+ *      blinkRate   - Support 6 blink rates
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      LED blink rate can be at 43ms, 84ms, 120ms, 170ms, 340ms and 670ms
+ */
+ret_t rtl8367c_setAsicLedBlinkRate(rtk_uint32 blinkRate)
+{
+    if(blinkRate >= LEDBLINKRATE_END)
+        return RT_ERR_OUT_OF_RANGE;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_LED_MODE, RTL8367C_SEL_LEDRATE_MASK, blinkRate);
+}
+/* Function Name:
+ *      rtl8367c_getAsicLedBlinkRate
+ * Description:
+ *      Get led blinking rate at mode 0 to mode 3
+ * Input:
+ *      pBlinkRate  - Support 6 blink rates
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicLedBlinkRate(rtk_uint32* pBlinkRate)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_LED_MODE, RTL8367C_SEL_LEDRATE_MASK, pBlinkRate);
+}
+/* Function Name:
+ *      rtl8367c_setAsicLedForceBlinkRate
+ * Description:
+ *      Set LEd blinking rate for force mode led
+ * Input:
+ *      blinkRate   - Support 6 blink rates
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicLedForceBlinkRate(rtk_uint32 blinkRate)
+{
+    if(blinkRate >= LEDFORCERATE_END)
+        return RT_ERR_OUT_OF_RANGE;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_LED_MODE, RTL8367C_FORCE_RATE_MASK, blinkRate);
+}
+/* Function Name:
+ *      rtl8367c_getAsicLedForceBlinkRate
+ * Description:
+ *      Get LED blinking rate for force mode led
+ * Input:
+ *      pBlinkRate  - Support 6 blink rates
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicLedForceBlinkRate(rtk_uint32* pBlinkRate)
+{
+     return rtl8367c_getAsicRegBits(RTL8367C_REG_LED_MODE, RTL8367C_FORCE_RATE_MASK, pBlinkRate);
+}
+
+/*
+@func ret_t | rtl8367c_setAsicLedGroupEnable | Turn on/off Led of all system ports
+@parm rtk_uint32 | group | LED group id.
+@parm rtk_uint32 | portmask | LED port mask.
+@rvalue RT_ERR_OK | Success.
+@rvalue RT_ERR_SMI | SMI access error.
+@rvalue RT_ERR_PORT_ID | Invalid port number.
+@rvalue RT_ERR_INPUT | Invalid input value.
+@comm
+    The API can turn on/off leds of dedicated port while indicated information configuration of LED group is set to force mode.
+ */
+ret_t rtl8367c_setAsicLedGroupEnable(rtk_uint32 group, rtk_uint32 portmask)
+{
+    ret_t retVal;
+    rtk_uint32 regAddr;
+    rtk_uint32 regDataMask;
+
+    if ( group >= RTL8367C_LEDGROUPNO )
+        return RT_ERR_INPUT;
+
+    regAddr = RTL8367C_REG_PARA_LED_IO_EN1 + group/2;
+    regDataMask = 0xFF << ((group%2)*8);
+    retVal = rtl8367c_setAsicRegBits(regAddr, regDataMask, portmask&0xff);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    regAddr = RTL8367C_REG_PARA_LED_IO_EN3;
+    regDataMask = 0x3 << (group*2);
+    retVal = rtl8367c_setAsicRegBits(regAddr, regDataMask, (portmask>>8)&0x7);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+
+    return RT_ERR_OK;
+}
+
+/*
+@func ret_t | rtl8367c_getAsicLedGroupEnable | Get on/off status of Led of all system ports
+@parm rtk_uint32 | group | LED group id.
+@parm rtk_uint32 | *portmask | LED port mask.
+@rvalue RT_ERR_OK | Success.
+@rvalue RT_ERR_SMI | SMI access error.
+@rvalue RT_ERR_PORT_ID | Invalid port number.
+@rvalue RT_ERR_INPUT | Invalid input value.
+@comm
+    The API can turn on/off leds of dedicated port while indicated information configuration of LED group is set to force mode.
+ */
+ret_t rtl8367c_getAsicLedGroupEnable(rtk_uint32 group, rtk_uint32 *portmask)
+{
+    ret_t retVal;
+    rtk_uint32 regAddr;
+    rtk_uint32 regDataMask,regData;
+
+    if ( group >= RTL8367C_LEDGROUPNO )
+        return RT_ERR_INPUT;
+
+    regAddr = RTL8367C_REG_PARA_LED_IO_EN1 + group/2;
+    regDataMask = 0xFF << ((group%2)*8);
+    retVal = rtl8367c_getAsicRegBits(regAddr, regDataMask, portmask);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+
+    regAddr = RTL8367C_REG_PARA_LED_IO_EN3;
+    regDataMask = 0x3 << (group*2);
+    retVal = rtl8367c_getAsicRegBits(regAddr, regDataMask, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    *portmask = (regData << 8) | *portmask;
+
+    return RT_ERR_OK;
+}
+
+/*
+@func ret_t | rtl8367c_setAsicLedOperationMode | Set LED operation mode
+@parm rtk_uint32 | mode | LED mode. 1:scan mode 1, 2:parallel mode, 3:mdx mode (serial mode)
+@rvalue RT_ERR_OK | Success.
+@rvalue RT_ERR_SMI | SMI access error.
+@rvalue RT_ERR_INPUT | Invalid input value.
+@comm
+    The API can turn on/off led serial mode and set signal to active high/low.
+ */
+ret_t rtl8367c_setAsicLedOperationMode(rtk_uint32 mode)
+{
+    ret_t retVal;
+
+    /* Invalid input parameter */
+    if( mode >= LEDOP_END)
+        return RT_ERR_INPUT;
+
+    switch(mode)
+    {
+        case LEDOP_PARALLEL:
+            if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_LED_SYS_CONFIG, RTL8367C_LED_SELECT_OFFSET, 0))!=  RT_ERR_OK)
+                return retVal;
+            /*Disable serial CLK mode*/
+            if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SCAN0_LED_IO_EN1,RTL8367C_LED_SERI_CLK_EN_OFFSET, 0))!=  RT_ERR_OK)
+                return retVal;
+            /*Disable serial DATA mode*/
+            if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SCAN0_LED_IO_EN1,RTL8367C_LED_SERI_DATA_EN_OFFSET, 0))!=  RT_ERR_OK)
+                return retVal;
+            break;
+        case LEDOP_SERIAL:
+            if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_LED_SYS_CONFIG, RTL8367C_LED_SELECT_OFFSET, 1))!=  RT_ERR_OK)
+                return retVal;
+            /*Enable serial CLK mode*/
+            if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SCAN0_LED_IO_EN1,RTL8367C_LED_SERI_CLK_EN_OFFSET, 1))!=  RT_ERR_OK)
+                return retVal;
+            /*Enable serial DATA mode*/
+            if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SCAN0_LED_IO_EN1,RTL8367C_LED_SERI_DATA_EN_OFFSET, 1))!=  RT_ERR_OK)
+                return retVal;
+            break;
+        default:
+            return RT_ERR_INPUT;
+            break;
+    }
+
+    return RT_ERR_OK;
+}
+
+
+/*
+@func ret_t | rtl8367c_getAsicLedOperationMode | Get LED OP mode setup
+@parm rtk_uint32*| mode | LED mode. 1:scan mode 1, 2:parallel mode, 3:mdx mode (serial mode)
+@rvalue RT_ERR_OK | Success.
+@rvalue RT_ERR_SMI | SMI access error.
+@rvalue RT_ERR_INPUT | Invalid input value.
+@comm
+    The API can get LED serial mode setup and get signal active high/low.
+ */
+ret_t rtl8367c_getAsicLedOperationMode(rtk_uint32 *mode)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+
+    if((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_LED_SYS_CONFIG, RTL8367C_LED_SELECT_OFFSET, &regData))!=  RT_ERR_OK)
+        return retVal;
+
+    if (regData == 1)
+        *mode = LEDOP_SERIAL;
+    else if (regData == 0)
+        *mode = LEDOP_PARALLEL;
+    else
+        return RT_ERR_FAILED;
+
+    return RT_ERR_OK;
+}
+
+/*
+@func ret_t | rtl8367c_setAsicLedSerialModeConfig | Set LED serial mode
+@parm rtk_uint32 | active | Active High or Low.
+@rvalue RT_ERR_OK | Success.
+@rvalue RT_ERR_SMI | SMI access error.
+@rvalue RT_ERR_INPUT | Invalid input value.
+@comm
+    The API can turn on/off led serial mode and set signal to active high/low.
+ */
+ret_t rtl8367c_setAsicLedSerialModeConfig(rtk_uint32 active, rtk_uint32 serimode)
+{
+    ret_t retVal;
+
+    /* Invalid input parameter */
+    if( active >= LEDSERACT_MAX)
+        return RT_ERR_INPUT;
+    if( serimode >= LEDSER_MAX)
+        return RT_ERR_INPUT;
+
+    /* Set Active High or Low */
+    if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_LED_SYS_CONFIG, RTL8367C_SERI_LED_ACT_LOW_OFFSET, active)) !=  RT_ERR_OK)
+        return retVal;
+
+    /*set to 8G mode (not 16G mode)*/
+    if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_LED_MODE, RTL8367C_DLINK_TIME_OFFSET, serimode))!=  RT_ERR_OK)
+        return retVal;
+
+
+    return RT_ERR_OK;
+}
+
+
+/*
+@func ret_t | rtl8367c_getAsicLedSerialModeConfig | Get LED serial mode setup
+@parm rtk_uint32*| active | Active High or Low.
+@rvalue RT_ERR_OK | Success.
+@rvalue RT_ERR_SMI | SMI access error.
+@rvalue RT_ERR_INPUT | Invalid input value.
+@comm
+    The API can get LED serial mode setup and get signal active high/low.
+ */
+ret_t rtl8367c_getAsicLedSerialModeConfig(rtk_uint32 *active, rtk_uint32 *serimode)
+{
+    ret_t retVal;
+
+    if((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_LED_SYS_CONFIG, RTL8367C_SERI_LED_ACT_LOW_OFFSET, active))!=  RT_ERR_OK)
+        return retVal;
+
+    /*get to 8G mode (not 16G mode)*/
+    if((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_LED_MODE, RTL8367C_DLINK_TIME_OFFSET, serimode))!=  RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+
+/*
+@func ret_t | rtl8367c_setAsicLedOutputEnable | Set LED output enable
+@parm rtk_uint32 | enabled | enable or disalbe.
+@rvalue RT_ERR_OK | Success.
+@rvalue RT_ERR_SMI | SMI access error.
+@rvalue RT_ERR_INPUT | Invalid input value.
+@comm
+    The API can turn on/off LED output Enable
+ */
+ret_t rtl8367c_setAsicLedOutputEnable(rtk_uint32 enabled)
+{
+    ret_t retVal;
+    rtk_uint32 regdata;
+
+    if (enabled == 1)
+        regdata = 0;
+    else
+        regdata = 1;
+
+    /* Enable/Disable H/W IGMP/MLD */
+    retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_LED_SYS_CONFIG, RTL8367C_LED_IO_DISABLE_OFFSET, regdata);
+
+    return retVal;
+}
+
+
+/*
+@func ret_t | rtl8367c_getAsicLedOutputEnable | Get LED serial mode setup
+@parm rtk_uint32*| active | Active High or Low.
+@rvalue RT_ERR_OK | Success.
+@rvalue RT_ERR_SMI | SMI access error.
+@rvalue RT_ERR_INPUT | Invalid input value.
+@comm
+    The API can get LED serial mode setup and get signal active high/low.
+ */
+ret_t rtl8367c_getAsicLedOutputEnable(rtk_uint32 *ptr_enabled)
+{
+    ret_t retVal;
+    rtk_uint32 regdata;
+
+    retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_LED_SYS_CONFIG, RTL8367C_LED_IO_DISABLE_OFFSET, &regdata);
+    if (retVal != RT_ERR_OK)
+        return retVal;
+
+    if (regdata == 1)
+        *ptr_enabled = 0;
+    else
+        *ptr_enabled = 1;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicLedSerialOutput
+ * Description:
+ *      Set serial LED output group and portmask.
+ * Input:
+ *      output      - Serial LED output group
+ *      pmask       - Serial LED output portmask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicLedSerialOutput(rtk_uint32 output, rtk_uint32 pmask)
+{
+    ret_t retVal;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_SERIAL_LED_CTRL, RTL8367C_SERIAL_LED_GROUP_NUM_MASK, output);
+    if (retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_SERIAL_LED_CTRL, RTL8367C_SERIAL_LED_PORT_EN_MASK, pmask);
+    if (retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicLedSerialOutput
+ * Description:
+ *      Get serial LED output group and portmask.
+ * Input:
+ *      None
+ * Output:
+ *      pOutput      - Serial LED output group
+ *      pPmask       - Serial LED output portmask
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicLedSerialOutput(rtk_uint32 *pOutput, rtk_uint32 *pPmask)
+{
+    ret_t retVal;
+
+    if(pOutput == NULL)
+        return RT_ERR_NULL_POINTER;
+
+    if(pPmask == NULL)
+        return RT_ERR_NULL_POINTER;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_SERIAL_LED_CTRL, RTL8367C_SERIAL_LED_GROUP_NUM_MASK, pOutput);
+    if (retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_SERIAL_LED_CTRL, RTL8367C_SERIAL_LED_PORT_EN_MASK, pPmask);
+    if (retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_led.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_led.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_led.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_led.h	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,121 @@
+#ifndef _RTL8367C_ASICDRV_LED_H_
+#define _RTL8367C_ASICDRV_LED_H_
+
+#include <rtl8367c_asicdrv.h>
+
+#define RTL8367C_LEDGROUPNO                 3
+#define RTL8367C_LEDGROUPMASK               0x7
+#define RTL8367C_LED_FORCE_MODE_BASE        RTL8367C_REG_CPU_FORCE_LED0_CFG0
+#define RTL8367C_LED_FORCE_CTRL             RTL8367C_REG_CPU_FORCE_LED_CFG
+
+enum RTL8367C_LEDOP{
+
+    LEDOP_SCAN0=0,
+    LEDOP_SCAN1,
+    LEDOP_PARALLEL,
+    LEDOP_SERIAL,
+    LEDOP_END,
+};
+
+enum RTL8367C_LEDSERACT{
+
+    LEDSERACT_HIGH=0,
+    LEDSERACT_LOW,
+    LEDSERACT_MAX,
+};
+
+enum RTL8367C_LEDSER{
+
+    LEDSER_16G=0,
+    LEDSER_8G,
+    LEDSER_MAX,
+};
+
+enum RTL8367C_LEDCONF{
+
+    LEDCONF_LEDOFF=0,
+    LEDCONF_DUPCOL,
+    LEDCONF_LINK_ACT,
+    LEDCONF_SPD1000,
+    LEDCONF_SPD100,
+    LEDCONF_SPD10,
+    LEDCONF_SPD1000ACT,
+    LEDCONF_SPD100ACT,
+    LEDCONF_SPD10ACT,
+    LEDCONF_SPD10010ACT,
+    LEDCONF_LOOPDETECT,
+    LEDCONF_EEE,
+    LEDCONF_LINKRX,
+    LEDCONF_LINKTX,
+    LEDCONF_MASTER,
+    LEDCONF_ACT,
+    LEDCONF_END
+};
+
+enum RTL8367C_LEDBLINKRATE{
+
+    LEDBLINKRATE_32MS=0,
+    LEDBLINKRATE_64MS,
+    LEDBLINKRATE_128MS,
+    LEDBLINKRATE_256MS,
+    LEDBLINKRATE_512MS,
+    LEDBLINKRATE_1024MS,
+    LEDBLINKRATE_48MS,
+    LEDBLINKRATE_96MS,
+    LEDBLINKRATE_END,
+};
+
+enum RTL8367C_LEDFORCEMODE{
+
+    LEDFORCEMODE_NORMAL=0,
+    LEDFORCEMODE_BLINK,
+    LEDFORCEMODE_OFF,
+    LEDFORCEMODE_ON,
+    LEDFORCEMODE_END,
+};
+
+enum RTL8367C_LEDFORCERATE{
+
+    LEDFORCERATE_512MS=0,
+    LEDFORCERATE_1024MS,
+    LEDFORCERATE_2048MS,
+    LEDFORCERATE_NORMAL,
+    LEDFORCERATE_END,
+
+};
+
+enum RTL8367C_LEDMODE
+{
+    RTL8367C_LED_MODE_0 = 0,
+    RTL8367C_LED_MODE_1,
+    RTL8367C_LED_MODE_2,
+    RTL8367C_LED_MODE_3,
+    RTL8367C_LED_MODE_END
+};
+
+extern ret_t rtl8367c_setAsicLedIndicateInfoConfig(rtk_uint32 ledno, rtk_uint32 config);
+extern ret_t rtl8367c_getAsicLedIndicateInfoConfig(rtk_uint32 ledno, rtk_uint32* pConfig);
+extern ret_t rtl8367c_setAsicForceLed(rtk_uint32 port, rtk_uint32 group, rtk_uint32 mode);
+extern ret_t rtl8367c_getAsicForceLed(rtk_uint32 port, rtk_uint32 group, rtk_uint32* pMode);
+extern ret_t rtl8367c_setAsicForceGroupLed(rtk_uint32 groupmask, rtk_uint32 mode);
+extern ret_t rtl8367c_getAsicForceGroupLed(rtk_uint32* groupmask, rtk_uint32* pMode);
+extern ret_t rtl8367c_setAsicLedBlinkRate(rtk_uint32 blinkRate);
+extern ret_t rtl8367c_getAsicLedBlinkRate(rtk_uint32* pBlinkRate);
+extern ret_t rtl8367c_setAsicLedForceBlinkRate(rtk_uint32 blinkRate);
+extern ret_t rtl8367c_getAsicLedForceBlinkRate(rtk_uint32* pBlinkRate);
+extern ret_t rtl8367c_setAsicLedGroupMode(rtk_uint32 mode);
+extern ret_t rtl8367c_getAsicLedGroupMode(rtk_uint32* pMode);
+extern ret_t rtl8367c_setAsicLedGroupEnable(rtk_uint32 group, rtk_uint32 portmask);
+extern ret_t rtl8367c_getAsicLedGroupEnable(rtk_uint32 group, rtk_uint32 *portmask);
+extern ret_t rtl8367c_setAsicLedOperationMode(rtk_uint32 mode);
+extern ret_t rtl8367c_getAsicLedOperationMode(rtk_uint32 *mode);
+extern ret_t rtl8367c_setAsicLedSerialModeConfig(rtk_uint32 active, rtk_uint32 serimode);
+extern ret_t rtl8367c_getAsicLedSerialModeConfig(rtk_uint32 *active, rtk_uint32 *serimode);
+extern ret_t rtl8367c_setAsicLedOutputEnable(rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicLedOutputEnable(rtk_uint32 *ptr_enabled);
+extern ret_t rtl8367c_setAsicLedSerialOutput(rtk_uint32 output, rtk_uint32 pmask);
+extern ret_t rtl8367c_getAsicLedSerialOutput(rtk_uint32 *pOutput, rtk_uint32 *pPmask);
+
+
+#endif /*#ifndef _RTL8367C_ASICDRV_LED_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_lut.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_lut.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_lut.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_lut.c	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,1548 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 79623 $
+ * $Date: 2017-06-14 17:15:42 +0800 (é€±ä¸‰, 14 å…­æœˆ 2017) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature : LUT related functions
+ *
+ */
+
+#include <rtl8367c_asicdrv_lut.h>
+
+#include <string.h>
+
+static void _rtl8367c_fdbStUser2Smi( rtl8367c_luttb *pLutSt, rtk_uint16 *pFdbSmi)
+{
+    /* L3 lookup */
+    if(pLutSt->l3lookup)
+    {
+        if(pLutSt->l3vidlookup)
+        {
+            pFdbSmi[0] = (pLutSt->sip & 0x0000FFFF);
+            pFdbSmi[1] = (pLutSt->sip & 0xFFFF0000) >> 16;
+
+            pFdbSmi[2] = (pLutSt->dip & 0x0000FFFF);
+            pFdbSmi[3] = (pLutSt->dip & 0x0FFF0000) >> 16;
+
+            pFdbSmi[3] |= (pLutSt->l3lookup & 0x0001) << 12;
+            pFdbSmi[3] |= (pLutSt->l3vidlookup & 0x0001) << 13;
+            pFdbSmi[3] |= ((pLutSt->mbr & 0x0300) >> 8) << 14;
+
+            pFdbSmi[4] |= (pLutSt->mbr & 0x00FF);
+            pFdbSmi[4] |= (pLutSt->l3_vid & 0x00FF) << 8;
+
+            pFdbSmi[5] |= ((pLutSt->l3_vid & 0x0F00) >> 8);
+            pFdbSmi[5] |= (pLutSt->nosalearn & 0x0001) << 5;
+            pFdbSmi[5] |= ((pLutSt->mbr & 0x0400) >> 10) << 7;
+        }
+        else
+        {
+            pFdbSmi[0] = (pLutSt->sip & 0x0000FFFF);
+            pFdbSmi[1] = (pLutSt->sip & 0xFFFF0000) >> 16;
+
+            pFdbSmi[2] = (pLutSt->dip & 0x0000FFFF);
+            pFdbSmi[3] = (pLutSt->dip & 0x0FFF0000) >> 16;
+
+            pFdbSmi[3] |= (pLutSt->l3lookup & 0x0001) << 12;
+            pFdbSmi[3] |= (pLutSt->l3vidlookup & 0x0001) << 13;
+            pFdbSmi[3] |= ((pLutSt->mbr & 0x0300) >> 8) << 14;
+
+            pFdbSmi[4] |= (pLutSt->mbr & 0x00FF);
+            pFdbSmi[4] |= (pLutSt->igmpidx & 0x00FF) << 8;
+
+            pFdbSmi[5] |= (pLutSt->igmp_asic & 0x0001);
+            pFdbSmi[5] |= (pLutSt->lut_pri & 0x0007) << 1;
+            pFdbSmi[5] |= (pLutSt->fwd_en & 0x0001) << 4;
+            pFdbSmi[5] |= (pLutSt->nosalearn & 0x0001) << 5;
+            pFdbSmi[5] |= ((pLutSt->mbr & 0x0400) >> 10) << 7;
+        }
+    }
+    else if(pLutSt->mac.octet[0] & 0x01) /*Multicast L2 Lookup*/
+    {
+        pFdbSmi[0] |= pLutSt->mac.octet[5];
+        pFdbSmi[0] |= pLutSt->mac.octet[4] << 8;
+
+        pFdbSmi[1] |= pLutSt->mac.octet[3];
+        pFdbSmi[1] |= pLutSt->mac.octet[2] << 8;
+
+        pFdbSmi[2] |= pLutSt->mac.octet[1];
+        pFdbSmi[2] |= pLutSt->mac.octet[0] << 8;
+
+        pFdbSmi[3] |= pLutSt->cvid_fid;
+        pFdbSmi[3] |= (pLutSt->l3lookup & 0x0001) << 12;
+        pFdbSmi[3] |= (pLutSt->ivl_svl & 0x0001) << 13;
+        pFdbSmi[3] |= ((pLutSt->mbr & 0x0300) >> 8) << 14;
+
+        pFdbSmi[4] |= (pLutSt->mbr & 0x00FF);
+        pFdbSmi[4] |= (pLutSt->igmpidx & 0x00FF) << 8;
+
+        pFdbSmi[5] |= pLutSt->igmp_asic;
+        pFdbSmi[5] |= (pLutSt->lut_pri & 0x0007) << 1;
+        pFdbSmi[5] |= (pLutSt->fwd_en & 0x0001) << 4;
+        pFdbSmi[5] |= (pLutSt->nosalearn & 0x0001) << 5;
+        pFdbSmi[5] |= ((pLutSt->mbr & 0x0400) >> 10) << 7;
+    }
+    else /*Asic auto-learning*/
+    {
+        pFdbSmi[0] |= pLutSt->mac.octet[5];
+        pFdbSmi[0] |= pLutSt->mac.octet[4] << 8;
+
+        pFdbSmi[1] |= pLutSt->mac.octet[3];
+        pFdbSmi[1] |= pLutSt->mac.octet[2] << 8;
+
+        pFdbSmi[2] |= pLutSt->mac.octet[1];
+        pFdbSmi[2] |= pLutSt->mac.octet[0] << 8;
+
+        pFdbSmi[3] |= pLutSt->cvid_fid;
+        pFdbSmi[3] |= (pLutSt->l3lookup & 0x0001) << 12;
+        pFdbSmi[3] |= (pLutSt->ivl_svl & 0x0001) << 13;
+        pFdbSmi[3] |= ((pLutSt->spa & 0x0008) >> 3) << 15;
+
+        pFdbSmi[4] |= pLutSt->efid;
+        pFdbSmi[4] |= (pLutSt->fid & 0x000F) << 3;
+        pFdbSmi[4] |= (pLutSt->sa_en & 0x0001) << 7;
+        pFdbSmi[4] |= (pLutSt->spa & 0x0007) << 8;
+        pFdbSmi[4] |= (pLutSt->age & 0x0007) << 11;
+        pFdbSmi[4] |= (pLutSt->auth & 0x0001) << 14;
+        pFdbSmi[4] |= (pLutSt->sa_block & 0x0001) << 15;
+
+        pFdbSmi[5] |= pLutSt->da_block;
+        pFdbSmi[5] |= (pLutSt->lut_pri & 0x0007) << 1;
+        pFdbSmi[5] |= (pLutSt->fwd_en & 0x0001) << 4;
+        pFdbSmi[5] |= (pLutSt->nosalearn & 0x0001) << 5;
+    }
+}
+
+
+static void _rtl8367c_fdbStSmi2User( rtl8367c_luttb *pLutSt, rtk_uint16 *pFdbSmi)
+{
+    /*L3 lookup*/
+    if(pFdbSmi[3] & 0x1000)
+    {
+        if(pFdbSmi[3] & 0x2000)
+        {
+            pLutSt->sip             = pFdbSmi[0] | (pFdbSmi[1] << 16);
+            pLutSt->dip             = 0xE0000000 | pFdbSmi[2] | ((pFdbSmi[3] & 0x0FFF) << 16);
+
+            pLutSt->mbr             = (pFdbSmi[4] & 0x00FF) | (((pFdbSmi[3] & 0xC000) >> 14) << 8) | (((pFdbSmi[5] & 0x0080) >> 7) << 10);
+            pLutSt->l3_vid          = ((pFdbSmi[4] & 0xFF00) >> 8) | (pFdbSmi[5] & 0x000F);
+
+            pLutSt->l3lookup        = (pFdbSmi[3] & 0x1000) >> 12;
+            pLutSt->l3vidlookup     = (pFdbSmi[3] & 0x2000) >> 13;
+            pLutSt->nosalearn       = (pFdbSmi[5] & 0x0020) >> 5;
+        }
+        else
+        {
+            pLutSt->sip             = pFdbSmi[0] | (pFdbSmi[1] << 16);
+            pLutSt->dip             = 0xE0000000 | pFdbSmi[2] | ((pFdbSmi[3] & 0x0FFF) << 16);
+
+            pLutSt->lut_pri         = (pFdbSmi[5] & 0x000E) >> 1;
+            pLutSt->fwd_en          = (pFdbSmi[5] & 0x0010) >> 4;
+
+            pLutSt->mbr             = (pFdbSmi[4] & 0x00FF) | (((pFdbSmi[3] & 0xC000) >> 14) << 8) | (((pFdbSmi[5] & 0x0080) >> 7) << 10);
+            pLutSt->igmpidx         = (pFdbSmi[4] & 0xFF00) >> 8;
+
+            pLutSt->igmp_asic       = (pFdbSmi[5] & 0x0001);
+            pLutSt->l3lookup        = (pFdbSmi[3] & 0x1000) >> 12;
+            pLutSt->nosalearn       = (pFdbSmi[5] & 0x0020) >> 5;
+        }
+    }
+    else if(pFdbSmi[2] & 0x0100) /*Multicast L2 Lookup*/
+    {
+        pLutSt->mac.octet[0]    = (pFdbSmi[2] & 0xFF00) >> 8;
+        pLutSt->mac.octet[1]    = (pFdbSmi[2] & 0x00FF);
+        pLutSt->mac.octet[2]    = (pFdbSmi[1] & 0xFF00) >> 8;
+        pLutSt->mac.octet[3]    = (pFdbSmi[1] & 0x00FF);
+        pLutSt->mac.octet[4]    = (pFdbSmi[0] & 0xFF00) >> 8;
+        pLutSt->mac.octet[5]    = (pFdbSmi[0] & 0x00FF);
+
+        pLutSt->cvid_fid        = pFdbSmi[3] & 0x0FFF;
+        pLutSt->lut_pri         = (pFdbSmi[5] & 0x000E) >> 1;
+        pLutSt->fwd_en          = (pFdbSmi[5] & 0x0010) >> 4;
+
+        pLutSt->mbr             = (pFdbSmi[4] & 0x00FF) | (((pFdbSmi[3] & 0xC000) >> 14) << 8) | (((pFdbSmi[5] & 0x0080) >> 7) << 10);
+        pLutSt->igmpidx         = (pFdbSmi[4] & 0xFF00) >> 8;
+
+        pLutSt->igmp_asic       = (pFdbSmi[5] & 0x0001);
+        pLutSt->l3lookup        = (pFdbSmi[3] & 0x1000) >> 12;
+        pLutSt->ivl_svl         = (pFdbSmi[3] & 0x2000) >> 13;
+        pLutSt->nosalearn       = (pFdbSmi[5] & 0x0020) >> 5;
+    }
+    else /*Asic auto-learning*/
+    {
+        pLutSt->mac.octet[0]    = (pFdbSmi[2] & 0xFF00) >> 8;
+        pLutSt->mac.octet[1]    = (pFdbSmi[2] & 0x00FF);
+        pLutSt->mac.octet[2]    = (pFdbSmi[1] & 0xFF00) >> 8;
+        pLutSt->mac.octet[3]    = (pFdbSmi[1] & 0x00FF);
+        pLutSt->mac.octet[4]    = (pFdbSmi[0] & 0xFF00) >> 8;
+        pLutSt->mac.octet[5]    = (pFdbSmi[0] & 0x00FF);
+
+        pLutSt->cvid_fid        = pFdbSmi[3] & 0x0FFF;
+        pLutSt->lut_pri         = (pFdbSmi[5] & 0x000E) >> 1;
+        pLutSt->fwd_en          = (pFdbSmi[5] & 0x0010) >> 4;
+
+        pLutSt->sa_en           = (pFdbSmi[4] & 0x0080) >> 7;
+        pLutSt->auth            = (pFdbSmi[4] & 0x4000) >> 14;
+        pLutSt->spa             = ((pFdbSmi[4] & 0x0700) >> 8) | (((pFdbSmi[3] & 0x8000) >> 15) << 3);
+        pLutSt->age             = (pFdbSmi[4] & 0x3800) >> 11;
+        pLutSt->fid             = (pFdbSmi[4] & 0x0078) >> 3;
+        pLutSt->efid            = (pFdbSmi[4] & 0x0007);
+        pLutSt->sa_block        = (pFdbSmi[4] & 0x8000) >> 15;
+
+        pLutSt->da_block        = (pFdbSmi[5] & 0x0001);
+        pLutSt->l3lookup        = (pFdbSmi[3] & 0x1000) >> 12;
+        pLutSt->ivl_svl         = (pFdbSmi[3] & 0x2000) >> 13;
+        pLutSt->nosalearn       = (pFdbSmi[3] & 0x0020) >> 5;
+    }
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicLutIpMulticastLookup
+ * Description:
+ *      Set Lut IP multicast lookup function
+ * Input:
+ *      enabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicLutIpMulticastLookup(rtk_uint32 enabled)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_LUT_CFG, RTL8367C_LUT_IPMC_HASH_OFFSET, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicLutIpMulticastLookup
+ * Description:
+ *      Get Lut IP multicast lookup function
+ * Input:
+ *      pEnabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicLutIpMulticastLookup(rtk_uint32* pEnabled)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_LUT_CFG, RTL8367C_LUT_IPMC_HASH_OFFSET, pEnabled);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicLutIpMulticastLookup
+ * Description:
+ *      Set Lut IP multicast + VID lookup function
+ * Input:
+ *      enabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicLutIpMulticastVidLookup(rtk_uint32 enabled)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_LUT_CFG2, RTL8367C_LUT_IPMC_VID_HASH_OFFSET, enabled);
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicLutIpMulticastVidLookup
+ * Description:
+ *      Get Lut IP multicast lookup function
+ * Input:
+ *      pEnabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicLutIpMulticastVidLookup(rtk_uint32* pEnabled)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_LUT_CFG2, RTL8367C_LUT_IPMC_VID_HASH_OFFSET, pEnabled);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicLutIpLookupMethod
+ * Description:
+ *      Set Lut IP lookup hash with DIP or {DIP,SIP} pair
+ * Input:
+ *      type - 1: When DIP can be found in IPMC_GROUP_TABLE, use DIP+SIP Hash, otherwise, use DIP+(SIP=0.0.0.0) Hash.
+ *             0: When DIP can be found in IPMC_GROUP_TABLE, use DIP+(SIP=0.0.0.0) Hash, otherwise use DIP+SIP Hash.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicLutIpLookupMethod(rtk_uint32 type)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_LUT_CFG, RTL8367C_LUT_IPMC_LOOKUP_OP_OFFSET, type);
+}
+/* Function Name:
+ *      rtl8367c_getAsicLutIpLookupMethod
+ * Description:
+ *      Get Lut IP lookup hash with DIP or {DIP,SIP} pair
+ * Input:
+ *      pType - 1: When DIP can be found in IPMC_GROUP_TABLE, use DIP+SIP Hash, otherwise, use DIP+(SIP=0.0.0.0) Hash.
+ *              0: When DIP can be found in IPMC_GROUP_TABLE, use DIP+(SIP=0.0.0.0) Hash, otherwise use DIP+SIP Hash.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicLutIpLookupMethod(rtk_uint32* pType)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_LUT_CFG, RTL8367C_LUT_IPMC_LOOKUP_OP_OFFSET, pType);
+}
+/* Function Name:
+ *      rtl8367c_setAsicLutAgeTimerSpeed
+ * Description:
+ *      Set LUT agging out speed
+ * Input:
+ *      timer - Agging out timer 0:Has been aged out
+ *      speed - Agging out speed 0-fastest 3-slowest
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicLutAgeTimerSpeed(rtk_uint32 timer, rtk_uint32 speed)
+{
+    if(timer>RTL8367C_LUT_AGETIMERMAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if(speed >RTL8367C_LUT_AGESPEEDMAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_LUT_CFG, RTL8367C_AGE_TIMER_MASK | RTL8367C_AGE_SPEED_MASK, (timer << RTL8367C_AGE_TIMER_OFFSET) | (speed << RTL8367C_AGE_SPEED_OFFSET));
+}
+/* Function Name:
+ *      rtl8367c_getAsicLutAgeTimerSpeed
+ * Description:
+ *      Get LUT agging out speed
+ * Input:
+ *      pTimer - Agging out timer 0:Has been aged out
+ *      pSpeed - Agging out speed 0-fastest 3-slowest
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicLutAgeTimerSpeed(rtk_uint32* pTimer, rtk_uint32* pSpeed)
+{
+    rtk_uint32 regData;
+    ret_t retVal;
+
+    retVal = rtl8367c_getAsicReg(RTL8367C_REG_LUT_CFG, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    *pTimer =  (regData & RTL8367C_AGE_TIMER_MASK) >> RTL8367C_AGE_TIMER_OFFSET;
+
+    *pSpeed =  (regData & RTL8367C_AGE_SPEED_MASK) >> RTL8367C_AGE_SPEED_OFFSET;
+
+    return RT_ERR_OK;
+
+}
+/* Function Name:
+ *      rtl8367c_setAsicLutCamTbUsage
+ * Description:
+ *      Configure Lut CAM table usage
+ * Input:
+ *      enabled - L2 CAM table usage 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicLutCamTbUsage(rtk_uint32 enabled)
+{
+    ret_t retVal;
+
+    retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_LUT_CFG, RTL8367C_BCAM_DISABLE_OFFSET, enabled ? 0 : 1);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicLutCamTbUsage
+ * Description:
+ *      Get Lut CAM table usage
+ * Input:
+ *      pEnabled - L2 CAM table usage 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicLutCamTbUsage(rtk_uint32* pEnabled)
+{
+    ret_t       retVal;
+    rtk_uint32  regData;
+
+    if ((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_LUT_CFG, RTL8367C_BCAM_DISABLE_OFFSET, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    *pEnabled = regData ? 0 : 1;
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicLutLearnLimitNo
+ * Description:
+ *      Set per-Port auto learning limit number
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      number  - ASIC auto learning entries limit number
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - Success
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port number
+ *      RT_ERR_LIMITED_L2ENTRY_NUM  - Invalid auto learning limit number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicLutLearnLimitNo(rtk_uint32 port, rtk_uint32 number)
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(number > RTL8367C_LUT_LEARNLIMITMAX)
+        return RT_ERR_LIMITED_L2ENTRY_NUM;
+
+    if(port < 8)
+     return rtl8367c_setAsicReg(RTL8367C_LUT_PORT_LEARN_LIMITNO_REG(port), number);
+    else
+        return rtl8367c_setAsicReg(RTL8367C_REG_LUT_PORT8_LEARN_LIMITNO+port-8, number);
+
+}
+/* Function Name:
+ *      rtl8367c_getAsicLutLearnLimitNo
+ * Description:
+ *      Get per-Port auto learning limit number
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      pNumber     - ASIC auto learning entries limit number
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicLutLearnLimitNo(rtk_uint32 port, rtk_uint32* pNumber)
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(port < 8)
+     return rtl8367c_getAsicReg(RTL8367C_LUT_PORT_LEARN_LIMITNO_REG(port), pNumber);
+    else
+        return rtl8367c_getAsicReg(RTL8367C_REG_LUT_PORT8_LEARN_LIMITNO+port-8, pNumber);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicSystemLutLearnLimitNo
+ * Description:
+ *      Set system auto learning limit number
+ * Input:
+ *      number  - ASIC auto learning entries limit number
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - Success
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port number
+ *      RT_ERR_LIMITED_L2ENTRY_NUM  - Invalid auto learning limit number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicSystemLutLearnLimitNo(rtk_uint32 number)
+{
+    if(number > RTL8367C_LUT_LEARNLIMITMAX)
+        return RT_ERR_LIMITED_L2ENTRY_NUM;
+
+    return rtl8367c_setAsicReg(RTL8367C_REG_LUT_SYS_LEARN_LIMITNO, number);
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicSystemLutLearnLimitNo
+ * Description:
+ *      Get system auto learning limit number
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      pNumber     - ASIC auto learning entries limit number
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicSystemLutLearnLimitNo(rtk_uint32 *pNumber)
+{
+    if(NULL == pNumber)
+        return RT_ERR_NULL_POINTER;
+
+    return rtl8367c_getAsicReg(RTL8367C_REG_LUT_SYS_LEARN_LIMITNO, pNumber);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicLutLearnOverAct
+ * Description:
+ *      Set auto learn over limit number action
+ * Input:
+ *      action  - Learn over action 0:normal, 1:drop 2:trap
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_NOT_ALLOWED  - Invalid learn over action
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicLutLearnOverAct(rtk_uint32 action)
+{
+    if(action >= LRNOVERACT_END)
+        return RT_ERR_NOT_ALLOWED;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_PORT_SECURITY_CTRL, RTL8367C_LUT_LEARN_OVER_ACT_MASK, action);
+}
+/* Function Name:
+ *      rtl8367c_getAsicLutLearnOverAct
+ * Description:
+ *      Get auto learn over limit number action
+ * Input:
+ *      pAction     - Learn over action 0:normal, 1:drop 2:trap
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicLutLearnOverAct(rtk_uint32* pAction)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_PORT_SECURITY_CTRL, RTL8367C_LUT_LEARN_OVER_ACT_MASK, pAction);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicSystemLutLearnOverAct
+ * Description:
+ *      Set system auto learn over limit number action
+ * Input:
+ *      action  - Learn over action 0:normal, 1:drop, 2:trap
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_NOT_ALLOWED  - Invalid learn over action
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicSystemLutLearnOverAct(rtk_uint32 action)
+{
+    if(action >= LRNOVERACT_END)
+        return RT_ERR_NOT_ALLOWED;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_LUT_LRN_SYS_LMT_CTRL, RTL8367C_LUT_SYSTEM_LEARN_OVER_ACT_MASK, action);
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicSystemLutLearnOverAct
+ * Description:
+ *      Get system auto learn over limit number action
+ * Input:
+ *      pAction     - Learn over action 0:normal, 1:drop 2:trap
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicSystemLutLearnOverAct(rtk_uint32 *pAction)
+{
+    if(NULL == pAction)
+        return RT_ERR_NULL_POINTER;
+
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_LUT_LRN_SYS_LMT_CTRL, RTL8367C_LUT_SYSTEM_LEARN_OVER_ACT_MASK, pAction);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicSystemLutLearnPortMask
+ * Description:
+ *      Set system auto learn limit port mask
+ * Input:
+ *      portmask    - port mask of system learning limit
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_MASK    - Error port mask
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicSystemLutLearnPortMask(rtk_uint32 portmask)
+{
+    ret_t retVal;
+
+    if(portmask > RTL8367C_PORTMASK)
+        return RT_ERR_PORT_MASK;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_LUT_LRN_SYS_LMT_CTRL, RTL8367C_LUT_SYSTEM_LEARN_PMASK_MASK, portmask & 0xff);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_LUT_LRN_SYS_LMT_CTRL, RTL8367C_LUT_SYSTEM_LEARN_PMASK1_MASK, (portmask>>8) & 0x7);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicSystemLutLearnPortMask
+ * Description:
+ *      Get system auto learn limit port mask
+ * Input:
+ *      None
+ * Output:
+ *      pPortmask   - port mask of system learning limit
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_NULL_POINTER - NULL pointer
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicSystemLutLearnPortMask(rtk_uint32 *pPortmask)
+{
+    rtk_uint32 tmpmask;
+    ret_t retVal;
+
+    if(NULL == pPortmask)
+        return RT_ERR_NULL_POINTER;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_LUT_LRN_SYS_LMT_CTRL, RTL8367C_LUT_SYSTEM_LEARN_PMASK_MASK, &tmpmask);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+    *pPortmask = tmpmask & 0xff;
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_LUT_LRN_SYS_LMT_CTRL, RTL8367C_LUT_SYSTEM_LEARN_PMASK1_MASK, &tmpmask);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+    *pPortmask |= (tmpmask & 0x7) << 8;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicL2LookupTb
+ * Description:
+ *      Set filtering database entry
+ * Input:
+ *      pL2Table    - L2 table entry writing to 8K+64 filtering database
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicL2LookupTb(rtl8367c_luttb *pL2Table)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+    rtk_uint16 *accessPtr;
+    rtk_uint32 i;
+    rtk_uint16 smil2Table[RTL8367C_LUT_TABLE_SIZE];
+    rtk_uint32 tblCmd;
+    rtk_uint32 busyCounter;
+
+    memset(smil2Table, 0x00, sizeof(rtk_uint16) * RTL8367C_LUT_TABLE_SIZE);
+    _rtl8367c_fdbStUser2Smi(pL2Table, smil2Table);
+
+    if(pL2Table->wait_time == 0)
+        busyCounter = RTL8367C_LUT_BUSY_CHECK_NO;
+    else
+        busyCounter = pL2Table->wait_time;
+
+    while(busyCounter)
+    {
+        retVal = rtl8367c_getAsicRegBit(RTL8367C_TABLE_ACCESS_STATUS_REG, RTL8367C_TABLE_LUT_ADDR_BUSY_FLAG_OFFSET,&regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        pL2Table->lookup_busy = regData;
+        if(!regData)
+            break;
+
+        busyCounter --;
+        if(busyCounter == 0)
+            return RT_ERR_BUSYWAIT_TIMEOUT;
+    }
+
+    accessPtr = smil2Table;
+
+    for(i = 0; i < RTL8367C_LUT_ENTRY_SIZE; i++)
+    {
+        regData = *(accessPtr + i);
+        retVal = rtl8367c_setAsicReg(RTL8367C_TABLE_ACCESS_WRDATA_BASE + i, regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    tblCmd = (RTL8367C_TABLE_ACCESS_REG_DATA(TB_OP_WRITE,TB_TARGET_L2)) & (RTL8367C_TABLE_TYPE_MASK  | RTL8367C_COMMAND_TYPE_MASK);
+    /* Write Command */
+    retVal = rtl8367c_setAsicReg(RTL8367C_TABLE_ACCESS_CTRL_REG, tblCmd);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    if(pL2Table->wait_time == 0)
+        busyCounter = RTL8367C_LUT_BUSY_CHECK_NO;
+    else
+        busyCounter = pL2Table->wait_time;
+
+    while(busyCounter)
+    {
+        retVal = rtl8367c_getAsicRegBit(RTL8367C_TABLE_ACCESS_STATUS_REG, RTL8367C_TABLE_LUT_ADDR_BUSY_FLAG_OFFSET,&regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        pL2Table->lookup_busy = regData;
+        if(!regData)
+            break;
+
+        busyCounter --;
+        if(busyCounter == 0)
+            return RT_ERR_BUSYWAIT_TIMEOUT;
+    }
+
+    /*Read access status*/
+    retVal = rtl8367c_getAsicRegBit(RTL8367C_TABLE_ACCESS_STATUS_REG, RTL8367C_HIT_STATUS_OFFSET, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    pL2Table->lookup_hit = regData;
+    if(!pL2Table->lookup_hit)
+        return RT_ERR_FAILED;
+
+    /*Read access address*/
+    /*
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_TABLE_ACCESS_STATUS_REG, RTL8367C_TABLE_LUT_ADDR_TYPE_MASK | RTL8367C_TABLE_LUT_ADDR_ADDRESS_MASK,&regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    pL2Table->address = regData;*/
+
+    retVal = rtl8367c_getAsicReg(RTL8367C_TABLE_ACCESS_STATUS_REG, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    pL2Table->address = (regData & 0x7ff) | ((regData & 0x4000) >> 3) | ((regData & 0x800) << 1);
+    pL2Table->lookup_busy = 0;
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicL2LookupTb
+ * Description:
+ *      Get filtering database entry
+ * Input:
+ *      pL2Table    - L2 table entry writing to 2K+64 filtering database
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_INPUT            - Invalid input parameter
+ *      RT_ERR_BUSYWAIT_TIMEOUT - LUT is busy at retrieving
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicL2LookupTb(rtk_uint32 method, rtl8367c_luttb *pL2Table)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+    rtk_uint16* accessPtr;
+    rtk_uint32 i;
+    rtk_uint16 smil2Table[RTL8367C_LUT_TABLE_SIZE];
+    rtk_uint32 busyCounter;
+    rtk_uint32 tblCmd;
+
+    if(pL2Table->wait_time == 0)
+        busyCounter = RTL8367C_LUT_BUSY_CHECK_NO;
+    else
+        busyCounter = pL2Table->wait_time;
+
+    while(busyCounter)
+    {
+        retVal = rtl8367c_getAsicRegBit(RTL8367C_TABLE_ACCESS_STATUS_REG, RTL8367C_TABLE_LUT_ADDR_BUSY_FLAG_OFFSET,&regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        pL2Table->lookup_busy = regData;
+        if(!pL2Table->lookup_busy)
+            break;
+
+        busyCounter --;
+        if(busyCounter == 0)
+            return RT_ERR_BUSYWAIT_TIMEOUT;
+    }
+
+
+    tblCmd = (method << RTL8367C_ACCESS_METHOD_OFFSET) & RTL8367C_ACCESS_METHOD_MASK;
+
+    switch(method)
+    {
+        case LUTREADMETHOD_ADDRESS:
+        case LUTREADMETHOD_NEXT_ADDRESS:
+        case LUTREADMETHOD_NEXT_L2UC:
+        case LUTREADMETHOD_NEXT_L2MC:
+        case LUTREADMETHOD_NEXT_L3MC:
+        case LUTREADMETHOD_NEXT_L2L3MC:
+            retVal = rtl8367c_setAsicReg(RTL8367C_TABLE_ACCESS_ADDR_REG, pL2Table->address);
+            if(retVal != RT_ERR_OK)
+                return retVal;
+            break;
+        case LUTREADMETHOD_MAC:
+            memset(smil2Table, 0x00, sizeof(rtk_uint16) * RTL8367C_LUT_TABLE_SIZE);
+            _rtl8367c_fdbStUser2Smi(pL2Table, smil2Table);
+
+            accessPtr = smil2Table;
+            regData = *accessPtr;
+            for(i=0; i<RTL8367C_LUT_ENTRY_SIZE; i++)
+            {
+                retVal = rtl8367c_setAsicReg(RTL8367C_TABLE_ACCESS_WRDATA_BASE + i, regData);
+                if(retVal != RT_ERR_OK)
+                    return retVal;
+
+                accessPtr ++;
+                regData = *accessPtr;
+
+            }
+            break;
+        case LUTREADMETHOD_NEXT_L2UCSPA:
+            retVal = rtl8367c_setAsicReg(RTL8367C_TABLE_ACCESS_ADDR_REG, pL2Table->address);
+            if(retVal != RT_ERR_OK)
+                return retVal;
+
+            tblCmd = tblCmd | ((pL2Table->spa << RTL8367C_TABLE_ACCESS_CTRL_SPA_OFFSET) & RTL8367C_TABLE_ACCESS_CTRL_SPA_MASK);
+
+            break;
+        default:
+            return RT_ERR_INPUT;
+    }
+
+    tblCmd = tblCmd | ((RTL8367C_TABLE_ACCESS_REG_DATA(TB_OP_READ,TB_TARGET_L2)) & (RTL8367C_TABLE_TYPE_MASK  | RTL8367C_COMMAND_TYPE_MASK));
+    /* Read Command */
+    retVal = rtl8367c_setAsicReg(RTL8367C_TABLE_ACCESS_CTRL_REG, tblCmd);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    if(pL2Table->wait_time == 0)
+        busyCounter = RTL8367C_LUT_BUSY_CHECK_NO;
+    else
+        busyCounter = pL2Table->wait_time;
+
+    while(busyCounter)
+    {
+        retVal = rtl8367c_getAsicRegBit(RTL8367C_TABLE_ACCESS_STATUS_REG, RTL8367C_TABLE_LUT_ADDR_BUSY_FLAG_OFFSET,&regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        pL2Table->lookup_busy = regData;
+        if(!pL2Table->lookup_busy)
+            break;
+
+        busyCounter --;
+        if(busyCounter == 0)
+            return RT_ERR_BUSYWAIT_TIMEOUT;
+    }
+
+    retVal = rtl8367c_getAsicRegBit(RTL8367C_TABLE_ACCESS_STATUS_REG, RTL8367C_HIT_STATUS_OFFSET,&regData);
+    if(retVal != RT_ERR_OK)
+            return retVal;
+    pL2Table->lookup_hit = regData;
+    if(!pL2Table->lookup_hit)
+        return RT_ERR_L2_ENTRY_NOTFOUND;
+
+    /*Read access address*/
+    //retVal = rtl8367c_getAsicRegBits(RTL8367C_TABLE_ACCESS_STATUS_REG, RTL8367C_TABLE_LUT_ADDR_TYPE_MASK | RTL8367C_TABLE_LUT_ADDR_ADDRESS_MASK,&regData);
+    retVal = rtl8367c_getAsicReg(RTL8367C_TABLE_ACCESS_STATUS_REG, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    pL2Table->address = (regData & 0x7ff) | ((regData & 0x4000) >> 3) | ((regData & 0x800) << 1);
+
+    /*read L2 entry */
+    memset(smil2Table, 0x00, sizeof(rtk_uint16) * RTL8367C_LUT_TABLE_SIZE);
+
+    accessPtr = smil2Table;
+
+    for(i = 0; i < RTL8367C_LUT_ENTRY_SIZE; i++)
+    {
+        retVal = rtl8367c_getAsicReg(RTL8367C_TABLE_ACCESS_RDDATA_BASE + i, &regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        *accessPtr = regData;
+
+        accessPtr ++;
+    }
+
+    _rtl8367c_fdbStSmi2User(pL2Table, smil2Table);
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicLutLearnNo
+ * Description:
+ *      Get per-Port auto learning number
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      pNumber     - ASIC auto learning entries number
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicLutLearnNo(rtk_uint32 port, rtk_uint32* pNumber)
+{
+    ret_t retVal;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(port < 10)
+    {
+     retVal = rtl8367c_getAsicReg(RTL8367C_REG_L2_LRN_CNT_REG(port), pNumber);
+        if (retVal != RT_ERR_OK)
+         return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_getAsicReg(RTL8367C_REG_L2_LRN_CNT_CTRL10, pNumber);
+        if (retVal != RT_ERR_OK)
+         return retVal;
+    }
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicLutFlushAll
+ * Description:
+ *      Flush all entries in LUT. Includes static & dynamic entries
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicLutFlushAll(void)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_L2_FLUSH_CTRL3, RTL8367C_L2_FLUSH_CTRL3_OFFSET, 1);
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicLutFlushAllStatus
+ * Description:
+ *      Get Flush all status, 1:Busy, 0 normal
+ * Input:
+ *      None
+ * Output:
+ *      pBusyStatus - Busy state
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_NULL_POINTER - Null pointer
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicLutFlushAllStatus(rtk_uint32 *pBusyStatus)
+{
+    if(NULL == pBusyStatus)
+        return RT_ERR_NULL_POINTER;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_L2_FLUSH_CTRL3, RTL8367C_L2_FLUSH_CTRL3_OFFSET, pBusyStatus);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicLutForceFlush
+ * Description:
+ *      Set per port force flush setting
+ * Input:
+ *      portmask    - portmask(0~0xFF)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_MASK    - Invalid portmask
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicLutForceFlush(rtk_uint32 portmask)
+{
+    ret_t retVal;
+
+    if(portmask > RTL8367C_PORTMASK)
+        return RT_ERR_PORT_MASK;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_FORCE_FLUSH_REG, RTL8367C_FORCE_FLUSH_PORTMASK_MASK, portmask & 0xff);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_FORCE_FLUSH1, RTL8367C_PORTMASK1_MASK, (portmask >> 8) & 0x7);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicLutForceFlushStatus
+ * Description:
+ *      Get per port force flush status
+ * Input:
+ *      pPortmask   - portmask(0~0xFF)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicLutForceFlushStatus(rtk_uint32 *pPortmask)
+{
+    rtk_uint32 tmpMask;
+    ret_t retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_FORCE_FLUSH_REG, RTL8367C_BUSY_STATUS_MASK,&tmpMask);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+    *pPortmask = tmpMask & 0xff;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_FORCE_FLUSH1, RTL8367C_BUSY_STATUS1_MASK,&tmpMask);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+    *pPortmask |= (tmpMask & 7) << 8;
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicLutFlushMode
+ * Description:
+ *      Set user force L2 pLutSt table flush mode
+ * Input:
+ *      mode    - 0:Port based 1: Port + VLAN based 2:Port + FID/MSTI based
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_NOT_ALLOWED  - Actions not allowed by the function
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicLutFlushMode(rtk_uint32 mode)
+{
+    if( mode >= FLUSHMDOE_END )
+        return RT_ERR_NOT_ALLOWED;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_L2_FLUSH_CTRL2, RTL8367C_LUT_FLUSH_MODE_MASK, mode);
+}
+/* Function Name:
+ *      rtl8367c_getAsicLutFlushMode
+ * Description:
+ *      Get user force L2 pLutSt table flush mode
+ * Input:
+ *      pMode   - 0:Port based 1: Port + VLAN based 2:Port + FID/MSTI based
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicLutFlushMode(rtk_uint32* pMode)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_L2_FLUSH_CTRL2, RTL8367C_LUT_FLUSH_MODE_MASK, pMode);
+}
+/* Function Name:
+ *      rtl8367c_setAsicLutFlushType
+ * Description:
+ *      Get L2 LUT flush type
+ * Input:
+ *      type    - 0: dynamice unicast; 1: both dynamic and static unicast entry
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicLutFlushType(rtk_uint32 type)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_L2_FLUSH_CTRL2, RTL8367C_LUT_FLUSH_TYPE_OFFSET,type);
+}
+/* Function Name:
+ *      rtl8367c_getAsicLutFlushType
+ * Description:
+ *      Set L2 LUT flush type
+ * Input:
+ *      pType   - 0: dynamice unicast; 1: both dynamic and static unicast entry
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicLutFlushType(rtk_uint32* pType)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_L2_FLUSH_CTRL2, RTL8367C_LUT_FLUSH_TYPE_OFFSET,pType);
+}
+
+
+/* Function Name:
+ *      rtl8367c_setAsicLutFlushVid
+ * Description:
+ *      Set VID of Port + VID pLutSt flush mode
+ * Input:
+ *      vid     - Vid (0~4095)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_VLAN_VID - Invalid VID parameter (0~4095)
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicLutFlushVid(rtk_uint32 vid)
+{
+    if( vid > RTL8367C_VIDMAX )
+        return RT_ERR_VLAN_VID;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_L2_FLUSH_CTRL1, RTL8367C_LUT_FLUSH_VID_MASK, vid);
+}
+/* Function Name:
+ *      rtl8367c_getAsicLutFlushVid
+ * Description:
+ *      Get VID of Port + VID pLutSt flush mode
+ * Input:
+ *      pVid    - Vid (0~4095)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicLutFlushVid(rtk_uint32* pVid)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_L2_FLUSH_CTRL1, RTL8367C_LUT_FLUSH_VID_MASK, pVid);
+}
+/* Function Name:
+ *      rtl8367c_setAsicPortFlusdFid
+ * Description:
+ *      Set FID of Port + FID pLutSt flush mode
+ * Input:
+ *      fid     - FID/MSTI for force flush
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_L2_FID   - Invalid FID (0~15)
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicLutFlushFid(rtk_uint32 fid)
+{
+    if( fid > RTL8367C_FIDMAX )
+        return RT_ERR_L2_FID;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_L2_FLUSH_CTRL1, RTL8367C_LUT_FLUSH_FID_MASK, fid);
+}
+/* Function Name:
+ *      rtl8367c_getAsicLutFlushFid
+ * Description:
+ *      Get FID of Port + FID pLutSt flush mode
+ * Input:
+ *      pFid    - FID/MSTI for force flush
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicLutFlushFid(rtk_uint32* pFid)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_L2_FLUSH_CTRL1, RTL8367C_LUT_FLUSH_FID_MASK, pFid);
+}
+/* Function Name:
+ *      rtl8367c_setAsicLutDisableAging
+ * Description:
+ *      Set L2 LUT aging per port setting
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      disabled    - 0: enable aging; 1: disabling aging
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicLutDisableAging(rtk_uint32 port, rtk_uint32 disabled)
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_LUT_AGEOUT_CTRL_REG, port, disabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicLutDisableAging
+ * Description:
+ *      Get L2 LUT aging per port setting
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      pDisabled - 0: enable aging; 1: disabling aging
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicLutDisableAging(rtk_uint32 port, rtk_uint32 *pDisabled)
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_LUT_AGEOUT_CTRL_REG, port, pDisabled);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicLutIPMCGroup
+ * Description:
+ *      Set IPMC Group Table
+ * Input:
+ *      index       - the entry index in table (0 ~ 63)
+ *      group_addr  - the multicast group address (224.0.0.0 ~ 239.255.255.255)
+ *      vid         - VLAN ID
+ *      pmask       - portmask
+ *      valid       - valid bit
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_INPUT    - Invalid parameter
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicLutIPMCGroup(rtk_uint32 index, ipaddr_t group_addr, rtk_uint32 vid, rtk_uint32 pmask, rtk_uint32 valid)
+{
+    rtk_uint32  regAddr, regData, bitoffset;
+    ipaddr_t    ipData;
+    ret_t       retVal;
+
+    if(index > RTL8367C_LUT_IPMCGRP_TABLE_MAX)
+        return RT_ERR_INPUT;
+
+    if (vid > RTL8367C_VIDMAX)
+        return RT_ERR_VLAN_VID;
+
+    ipData = group_addr;
+
+    if( (ipData & 0xF0000000) != 0xE0000000)    /* not in 224.0.0.0 ~ 239.255.255.255 */
+        return RT_ERR_INPUT;
+
+    /* Group Address */
+    regAddr = RTL8367C_REG_IPMC_GROUP_ENTRY0_H + (index * 2);
+    regData = ((ipData & 0x0FFFFFFF) >> 16);
+
+    if( (retVal = rtl8367c_setAsicReg(regAddr, regData)) != RT_ERR_OK)
+        return retVal;
+
+    regAddr++;
+    regData = (ipData & 0x0000FFFF);
+
+    if( (retVal = rtl8367c_setAsicReg(regAddr, regData)) != RT_ERR_OK)
+        return retVal;
+
+    /* VID */
+    regAddr = RTL8367C_REG_IPMC_GROUP_VID_00 + index;
+    regData = vid;
+
+    if( (retVal = rtl8367c_setAsicReg(regAddr, regData)) != RT_ERR_OK)
+        return retVal;
+
+    /* portmask */
+    regAddr = RTL8367C_REG_IPMC_GROUP_PMSK_00 + index;
+    regData = pmask;
+
+    if( (retVal = rtl8367c_setAsicReg(regAddr, regData)) != RT_ERR_OK)
+        return retVal;
+
+    /* valid */
+    regAddr = RTL8367C_REG_IPMC_GROUP_VALID_15_0 + (index / 16);
+    bitoffset = index % 16;
+    if( (retVal = rtl8367c_setAsicRegBit(regAddr, bitoffset, valid)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicLutIPMCGroup
+ * Description:
+ *      Set IPMC Group Table
+ * Input:
+ *      index       - the entry index in table (0 ~ 63)
+ * Output:
+ *      pGroup_addr - the multicast group address (224.0.0.0 ~ 239.255.255.255)
+ *      pVid        - VLAN ID
+ *      pPmask      - portmask
+ *      pValid      - Valid bit
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_INPUT    - Invalid parameter
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicLutIPMCGroup(rtk_uint32 index, ipaddr_t *pGroup_addr, rtk_uint32 *pVid, rtk_uint32 *pPmask, rtk_uint32 *pValid)
+{
+    rtk_uint32      regAddr, regData, bitoffset;
+    ipaddr_t    ipData;
+    ret_t       retVal;
+
+    if(index > RTL8367C_LUT_IPMCGRP_TABLE_MAX)
+        return RT_ERR_INPUT;
+
+    if (NULL == pGroup_addr)
+        return RT_ERR_NULL_POINTER;
+
+    if (NULL == pVid)
+        return RT_ERR_NULL_POINTER;
+
+    if (NULL == pPmask)
+        return RT_ERR_NULL_POINTER;
+
+    /* Group address */
+    regAddr = RTL8367C_REG_IPMC_GROUP_ENTRY0_H + (index * 2);
+    if( (retVal = rtl8367c_getAsicReg(regAddr, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    *pGroup_addr = (((regData & 0x00000FFF) << 16) | 0xE0000000);
+
+    regAddr++;
+    if( (retVal = rtl8367c_getAsicReg(regAddr, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    ipData = (*pGroup_addr | (regData & 0x0000FFFF));
+    *pGroup_addr = ipData;
+
+    /* VID */
+    regAddr = RTL8367C_REG_IPMC_GROUP_VID_00 + index;
+    if( (retVal = rtl8367c_getAsicReg(regAddr, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    *pVid = regData;
+
+    /* portmask */
+    regAddr = RTL8367C_REG_IPMC_GROUP_PMSK_00 + index;
+    if( (retVal = rtl8367c_getAsicReg(regAddr, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    *pPmask = regData;
+
+    /* valid */
+    regAddr = RTL8367C_REG_IPMC_GROUP_VALID_15_0 + (index / 16);
+    bitoffset = index % 16;
+    if( (retVal = rtl8367c_getAsicRegBit(regAddr, bitoffset, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    *pValid = regData;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicLutLinkDownForceAging
+ * Description:
+ *       Set LUT link down aging setting.
+ * Input:
+ *      enable      - link down aging setting
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_ENABLE    - Invalid parameter
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicLutLinkDownForceAging(rtk_uint32 enable)
+{
+    if(enable > 1)
+        return RT_ERR_ENABLE;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_LUT_CFG, RTL8367C_LINKDOWN_AGEOUT_OFFSET, enable ? 0 : 1);
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicLutLinkDownForceAging
+ * Description:
+ *       Get LUT link down aging setting.
+ * Input:
+ *      pEnable         - link down aging setting
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_ENABLE    - Invalid parameter
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicLutLinkDownForceAging(rtk_uint32 *pEnable)
+{
+    rtk_uint32  value;
+    ret_t   retVal;
+
+    if ((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_LUT_CFG, RTL8367C_LINKDOWN_AGEOUT_OFFSET, &value)) != RT_ERR_OK)
+        return retVal;
+
+    *pEnable = value ? 0 : 1;
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicLutIpmcFwdRouterPort
+ * Description:
+ *       Set IPMC packet forward to rounter port also or not
+ * Input:
+ *      enable      - 1: Inlcude router port, 0, exclude router port
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_ENABLE     Invalid parameter
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicLutIpmcFwdRouterPort(rtk_uint32 enable)
+{
+    if(enable > 1)
+        return RT_ERR_ENABLE;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_LUT_CFG2, RTL8367C_LUT_IPMC_FWD_RPORT_OFFSET, enable);
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicLutIpmcFwdRouterPort
+ * Description:
+ *       Get IPMC packet forward to rounter port also or not
+ * Input:
+ *      None
+ * Output:
+ *      pEnable         - 1: Inlcude router port, 0, exclude router port
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_NULL_POINTER     - Null pointer
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicLutIpmcFwdRouterPort(rtk_uint32 *pEnable)
+{
+    if(NULL == pEnable)
+        return RT_ERR_NULL_POINTER;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_LUT_CFG2, RTL8367C_LUT_IPMC_FWD_RPORT_OFFSET, pEnable);
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_lut.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_lut.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_lut.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_lut.h	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,141 @@
+#ifndef _RTL8367C_ASICDRV_LUT_H_
+#define _RTL8367C_ASICDRV_LUT_H_
+
+#include <rtl8367c_asicdrv.h>
+
+#define RTL8367C_LUT_AGETIMERMAX        (7)
+#define RTL8367C_LUT_AGESPEEDMAX        (3)
+#define RTL8367C_LUT_LEARNLIMITMAX      (0x1040)
+#define RTL8367C_LUT_ADDRMAX            (0x103F)
+#define RTL8367C_LUT_IPMCGRP_TABLE_MAX  (0x3F)
+#define RTL8367C_LUT_ENTRY_SIZE         (6)
+#define RTL8367C_LUT_BUSY_CHECK_NO      (10)
+
+#define RTL8367C_LUT_TABLE_SIZE         (6)
+
+enum RTL8367C_LUTHASHMETHOD{
+
+    LUTHASHMETHOD_SVL=0,
+    LUTHASHMETHOD_IVL,
+    LUTHASHMETHOD_END,
+};
+
+
+enum RTL8367C_LRNOVERACT{
+
+    LRNOVERACT_FORWARD=0,
+    LRNOVERACT_DROP,
+    LRNOVERACT_TRAP,
+    LRNOVERACT_END,
+};
+
+enum RTL8367C_LUTREADMETHOD{
+
+    LUTREADMETHOD_MAC =0,
+    LUTREADMETHOD_ADDRESS,
+    LUTREADMETHOD_NEXT_ADDRESS,
+    LUTREADMETHOD_NEXT_L2UC,
+    LUTREADMETHOD_NEXT_L2MC,
+    LUTREADMETHOD_NEXT_L3MC,
+    LUTREADMETHOD_NEXT_L2L3MC,
+    LUTREADMETHOD_NEXT_L2UCSPA,
+};
+
+enum RTL8367C_FLUSHMODE
+{
+    FLUSHMDOE_PORT = 0,
+    FLUSHMDOE_VID,
+    FLUSHMDOE_FID,
+    FLUSHMDOE_END,
+};
+
+enum RTL8367C_FLUSHTYPE
+{
+    FLUSHTYPE_DYNAMIC = 0,
+    FLUSHTYPE_BOTH,
+    FLUSHTYPE_END,
+};
+
+
+typedef struct LUTTABLE{
+
+    ipaddr_t sip;
+    ipaddr_t dip;
+    ether_addr_t mac;
+    rtk_uint16 ivl_svl:1;
+    rtk_uint16 cvid_fid:12;
+    rtk_uint16 fid:4;
+    rtk_uint16 efid:3;
+
+    rtk_uint16 nosalearn:1;
+    rtk_uint16 da_block:1;
+    rtk_uint16 sa_block:1;
+    rtk_uint16 auth:1;
+    rtk_uint16 lut_pri:3;
+    rtk_uint16 sa_en:1;
+    rtk_uint16 fwd_en:1;
+    rtk_uint16 mbr:11;
+    rtk_uint16 spa:4;
+    rtk_uint16 age:3;
+    rtk_uint16 l3lookup:1;
+    rtk_uint16 igmp_asic:1;
+    rtk_uint16 igmpidx:8;
+
+    rtk_uint16 lookup_hit:1;
+    rtk_uint16 lookup_busy:1;
+    rtk_uint16 address:13;
+
+    rtk_uint16 l3vidlookup:1;
+    rtk_uint16 l3_vid:12;
+
+    rtk_uint16 wait_time;
+
+}rtl8367c_luttb;
+
+extern ret_t rtl8367c_setAsicLutIpMulticastLookup(rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicLutIpMulticastLookup(rtk_uint32* pEnabled);
+extern ret_t rtl8367c_setAsicLutIpMulticastVidLookup(rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicLutIpMulticastVidLookup(rtk_uint32* pEnabled);
+extern ret_t rtl8367c_setAsicLutAgeTimerSpeed(rtk_uint32 timer, rtk_uint32 speed);
+extern ret_t rtl8367c_getAsicLutAgeTimerSpeed(rtk_uint32* pTimer, rtk_uint32* pSpeed);
+extern ret_t rtl8367c_setAsicLutCamTbUsage(rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicLutCamTbUsage(rtk_uint32* pEnabled);
+extern ret_t rtl8367c_getAsicLutCamType(rtk_uint32* pType);
+extern ret_t rtl8367c_setAsicLutLearnLimitNo(rtk_uint32 port, rtk_uint32 number);
+extern ret_t rtl8367c_getAsicLutLearnLimitNo(rtk_uint32 port, rtk_uint32* pNumber);
+extern ret_t rtl8367c_setAsicSystemLutLearnLimitNo(rtk_uint32 number);
+extern ret_t rtl8367c_getAsicSystemLutLearnLimitNo(rtk_uint32 *pNumber);
+extern ret_t rtl8367c_setAsicLutLearnOverAct(rtk_uint32 action);
+extern ret_t rtl8367c_getAsicLutLearnOverAct(rtk_uint32* pAction);
+extern ret_t rtl8367c_setAsicSystemLutLearnOverAct(rtk_uint32 action);
+extern ret_t rtl8367c_getAsicSystemLutLearnOverAct(rtk_uint32 *pAction);
+extern ret_t rtl8367c_setAsicSystemLutLearnPortMask(rtk_uint32 portmask);
+extern ret_t rtl8367c_getAsicSystemLutLearnPortMask(rtk_uint32 *pPortmask);
+extern ret_t rtl8367c_setAsicL2LookupTb(rtl8367c_luttb *pL2Table);
+extern ret_t rtl8367c_getAsicL2LookupTb(rtk_uint32 method, rtl8367c_luttb *pL2Table);
+extern ret_t rtl8367c_getAsicLutLearnNo(rtk_uint32 port, rtk_uint32* pNumber);
+extern ret_t rtl8367c_setAsicLutIpLookupMethod(rtk_uint32 type);
+extern ret_t rtl8367c_getAsicLutIpLookupMethod(rtk_uint32* pType);
+extern ret_t rtl8367c_setAsicLutForceFlush(rtk_uint32 portmask);
+extern ret_t rtl8367c_getAsicLutForceFlushStatus(rtk_uint32 *pPortmask);
+extern ret_t rtl8367c_setAsicLutFlushMode(rtk_uint32 mode);
+extern ret_t rtl8367c_getAsicLutFlushMode(rtk_uint32* pMode);
+extern ret_t rtl8367c_setAsicLutFlushType(rtk_uint32 type);
+extern ret_t rtl8367c_getAsicLutFlushType(rtk_uint32* pType);
+extern ret_t rtl8367c_setAsicLutFlushVid(rtk_uint32 vid);
+extern ret_t rtl8367c_getAsicLutFlushVid(rtk_uint32* pVid);
+extern ret_t rtl8367c_setAsicLutFlushFid(rtk_uint32 fid);
+extern ret_t rtl8367c_getAsicLutFlushFid(rtk_uint32* pFid);
+extern ret_t rtl8367c_setAsicLutDisableAging(rtk_uint32 port, rtk_uint32 disabled);
+extern ret_t rtl8367c_getAsicLutDisableAging(rtk_uint32 port, rtk_uint32 *pDisabled);
+extern ret_t rtl8367c_setAsicLutIPMCGroup(rtk_uint32 index, ipaddr_t group_addr, rtk_uint32 vid, rtk_uint32 pmask, rtk_uint32 valid);
+extern ret_t rtl8367c_getAsicLutIPMCGroup(rtk_uint32 index, ipaddr_t *pGroup_addr, rtk_uint32 *pVid, rtk_uint32 *pPmask, rtk_uint32 *pValid);
+extern ret_t rtl8367c_setAsicLutLinkDownForceAging(rtk_uint32 enable);
+extern ret_t rtl8367c_getAsicLutLinkDownForceAging(rtk_uint32 *pEnable);
+extern ret_t rtl8367c_setAsicLutFlushAll(void);
+extern ret_t rtl8367c_getAsicLutFlushAllStatus(rtk_uint32 *pBusyStatus);
+extern ret_t rtl8367c_setAsicLutIpmcFwdRouterPort(rtk_uint32 enable);
+extern ret_t rtl8367c_getAsicLutIpmcFwdRouterPort(rtk_uint32 *pEnable);
+
+#endif /*_RTL8367C_ASICDRV_LUT_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_meter.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_meter.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_meter.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_meter.c	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,307 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 76306 $
+ * $Date: 2017-03-08 15:13:58 +0800 (é€±ä¸‰, 08 ä¸‰æœˆ 2017) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature : Shared meter related functions
+ *
+ */
+#include <rtl8367c_asicdrv_meter.h>
+/* Function Name:
+ *      rtl8367c_setAsicShareMeter
+ * Description:
+ *      Set meter configuration
+ * Input:
+ *      index   - hared meter index (0-31)
+ *      rate    - 17-bits rate of share meter, unit is 8Kpbs
+ *      ifg     - Including IFG in rate calculation, 1:include 0:exclude
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicShareMeter(rtk_uint32 index, rtk_uint32 rate, rtk_uint32 ifg)
+{
+    ret_t retVal;
+
+    if(index > RTL8367C_METERMAX)
+        return RT_ERR_FILTER_METER_ID;
+
+    if(index < 32)
+    {
+    /*19-bits Rate*/
+        retVal = rtl8367c_setAsicReg(RTL8367C_METER_RATE_REG(index), rate&0xFFFF);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        retVal = rtl8367c_setAsicReg(RTL8367C_METER_RATE_REG(index) + 1, (rate &0x70000) >> 16);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        retVal = rtl8367c_setAsicRegBit(RTL8367C_METER_IFG_CTRL_REG(index), RTL8367C_METER_IFG_OFFSET(index), ifg);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+    /*19-bits Rate*/
+        retVal = rtl8367c_setAsicReg(RTL8367C_REG_METER32_RATE_CTRL0 + ((index-32) << 1), rate&0xFFFF);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        retVal = rtl8367c_setAsicReg(RTL8367C_REG_METER32_RATE_CTRL0 + ((index-32) << 1) + 1, (rate &0x70000) >> 16);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_METER_IFG_CTRL2 + ((index-32) >> 4), RTL8367C_METER_IFG_OFFSET(index), ifg);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicShareMeter
+ * Description:
+ *      Get meter configuration
+ * Input:
+ *      index   - hared meter index (0-31)
+ *      pRate   - 17-bits rate of share meter, unit is 8Kpbs
+ *      pIfg    - Including IFG in rate calculation, 1:include 0:exclude
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicShareMeter(rtk_uint32 index, rtk_uint32 *pRate, rtk_uint32 *pIfg)
+{
+    rtk_uint32 regData;
+    rtk_uint32 regData2;
+    ret_t retVal;
+
+    if(index > RTL8367C_METERMAX)
+        return RT_ERR_FILTER_METER_ID;
+
+    if(index < 32)
+    {
+    /*17-bits Rate*/
+     retVal = rtl8367c_getAsicReg(RTL8367C_METER_RATE_REG(index), &regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+     retVal = rtl8367c_getAsicReg(RTL8367C_METER_RATE_REG(index) + 1, &regData2);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+    *pRate = ((regData2 << 16) & 0x70000) | regData;
+    /*IFG*/
+    retVal = rtl8367c_getAsicRegBit(RTL8367C_METER_IFG_CTRL_REG(index), RTL8367C_METER_IFG_OFFSET(index), pIfg);
+
+    return retVal;
+    }
+    else
+    {
+    /*17-bits Rate*/
+     retVal = rtl8367c_getAsicReg(RTL8367C_REG_METER32_RATE_CTRL0 + ((index-32) << 1), &regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+     retVal = rtl8367c_getAsicReg(RTL8367C_REG_METER32_RATE_CTRL0 + ((index-32) << 1) + 1, &regData2);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+    *pRate = ((regData2 << 16) & 0x70000) | regData;
+    /*IFG*/
+    retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_METER_IFG_CTRL2 + ((index-32) >> 4), RTL8367C_METER_IFG_OFFSET(index), pIfg);
+
+    return retVal;
+    }
+}
+/* Function Name:
+ *      rtl8367c_setAsicShareMeterBucketSize
+ * Description:
+ *      Set meter related leaky bucket threshold
+ * Input:
+ *      index       - hared meter index (0-31)
+ *      lbthreshold - Leaky bucket threshold of meter
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicShareMeterBucketSize(rtk_uint32 index, rtk_uint32 lbthreshold)
+{
+
+    if(index > RTL8367C_METERMAX)
+        return RT_ERR_FILTER_METER_ID;
+
+    if(index < 32)
+    return rtl8367c_setAsicReg(RTL8367C_METER_BUCKET_SIZE_REG(index), lbthreshold);
+    else
+       return rtl8367c_setAsicReg(RTL8367C_REG_METER32_BUCKET_SIZE + index - 32, lbthreshold);
+}
+/* Function Name:
+ *      rtl8367c_getAsicShareMeterBucketSize
+ * Description:
+ *      Get meter related leaky bucket threshold
+ * Input:
+ *      index       - hared meter index (0-31)
+ *      pLbthreshold - Leaky bucket threshold of meter
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicShareMeterBucketSize(rtk_uint32 index, rtk_uint32 *pLbthreshold)
+{
+    if(index > RTL8367C_METERMAX)
+        return RT_ERR_FILTER_METER_ID;
+
+    if(index < 32)
+    return rtl8367c_getAsicReg(RTL8367C_METER_BUCKET_SIZE_REG(index), pLbthreshold);
+    else
+       return rtl8367c_getAsicReg(RTL8367C_REG_METER32_BUCKET_SIZE + index - 32, pLbthreshold);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicShareMeterType
+ * Description:
+ *      Set meter Type
+ * Input:
+ *      index       - shared meter index (0-31)
+ *      Type        - 0: kbps, 1: pps
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicShareMeterType(rtk_uint32 index, rtk_uint32 type)
+{
+    rtk_uint32 reg;
+
+    if(index > RTL8367C_METERMAX)
+        return RT_ERR_FILTER_METER_ID;
+
+    if(index < 32)
+        reg = RTL8367C_REG_METER_MODE_SETTING0 + (index / 16);
+    else
+        reg = RTL8367C_REG_METER_MODE_SETTING2 + ((index - 32) / 16);
+    return rtl8367c_setAsicRegBit(reg, index % 16, type);
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicShareMeterType
+ * Description:
+ *      Get meter Type
+ * Input:
+ *      index       - shared meter index (0-31)
+ * Output:
+ *      pType       - 0: kbps, 1: pps
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicShareMeterType(rtk_uint32 index, rtk_uint32 *pType)
+{
+    rtk_uint32 reg;
+
+    if(index > RTL8367C_METERMAX)
+        return RT_ERR_FILTER_METER_ID;
+
+    if(NULL == pType)
+        return RT_ERR_NULL_POINTER;
+
+    if(index < 32)
+        reg = RTL8367C_REG_METER_MODE_SETTING0 + (index / 16);
+    else
+        reg = RTL8367C_REG_METER_MODE_SETTING2 + ((index - 32) / 16);
+    return rtl8367c_getAsicRegBit(reg, index % 16, pType);
+}
+
+
+/* Function Name:
+ *      rtl8367c_setAsicMeterExceedStatus
+ * Description:
+ *      Clear shared meter status
+ * Input:
+ *      index       - hared meter index (0-31)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicMeterExceedStatus(rtk_uint32 index)
+{
+    if(index > RTL8367C_METERMAX)
+        return RT_ERR_FILTER_METER_ID;
+
+    if(index < 32)
+        return rtl8367c_setAsicRegBit(RTL8367C_METER_OVERRATE_INDICATOR_REG(index), RTL8367C_METER_EXCEED_OFFSET(index), 1);
+    else
+        return rtl8367c_setAsicRegBit(RTL8367C_REG_METER_OVERRATE_INDICATOR2 + ((index - 32) >> 4), RTL8367C_METER_EXCEED_OFFSET(index), 1);
+
+}
+/* Function Name:
+ *      rtl8367c_getAsicMeterExceedStatus
+ * Description:
+ *      Get shared meter status
+ * Input:
+ *      index   - hared meter index (0-31)
+ *      pStatus     - 0: rate doesn't exceed    1: rate exceeds
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter
+ * Note:
+ *      If rate is over rate*8Kbps of a meter, the state bit of this meter is set to 1.
+ */
+ret_t rtl8367c_getAsicMeterExceedStatus(rtk_uint32 index, rtk_uint32* pStatus)
+{
+    if(index > RTL8367C_METERMAX)
+        return RT_ERR_FILTER_METER_ID;
+
+    if(index < 32)
+        return rtl8367c_getAsicRegBit(RTL8367C_METER_OVERRATE_INDICATOR_REG(index), RTL8367C_METER_EXCEED_OFFSET(index), pStatus);
+    else
+        return rtl8367c_getAsicRegBit(RTL8367C_REG_METER_OVERRATE_INDICATOR2 + ((index - 32) >> 4), RTL8367C_METER_EXCEED_OFFSET(index), pStatus);
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_meter.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_meter.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_meter.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_meter.h	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,17 @@
+#ifndef _RTL8367C_ASICDRV_METER_H_
+#define _RTL8367C_ASICDRV_METER_H_
+
+#include <rtl8367c_asicdrv.h>
+
+
+extern ret_t rtl8367c_setAsicShareMeter(rtk_uint32 index, rtk_uint32 rate, rtk_uint32 ifg);
+extern ret_t rtl8367c_getAsicShareMeter(rtk_uint32 index, rtk_uint32 *pRate, rtk_uint32 *pIfg);
+extern ret_t rtl8367c_setAsicShareMeterBucketSize(rtk_uint32 index, rtk_uint32 lbThreshold);
+extern ret_t rtl8367c_getAsicShareMeterBucketSize(rtk_uint32 index, rtk_uint32 *pLbThreshold);
+extern ret_t rtl8367c_setAsicShareMeterType(rtk_uint32 index, rtk_uint32 type);
+extern ret_t rtl8367c_getAsicShareMeterType(rtk_uint32 index, rtk_uint32 *pType);
+extern ret_t rtl8367c_setAsicMeterExceedStatus(rtk_uint32 index);
+extern ret_t rtl8367c_getAsicMeterExceedStatus(rtk_uint32 index, rtk_uint32* pStatus);
+
+#endif /*_RTL8367C_ASICDRV_FC_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_mib.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_mib.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_mib.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_mib.c	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,637 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 80485 $
+ * $Date: 2017-07-11 11:34:22 +0800 (é€±äºŒ, 11 ä¸ƒæœˆ 2017) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature : MIB related functions
+ *
+ */
+#include <rtl8367c_asicdrv_mib.h>
+/* Function Name:
+ *      rtl8367c_setAsicMIBsCounterReset
+ * Description:
+ *      Reset global/queue manage or per-port MIB counter
+ * Input:
+ *      greset  - Global reset
+ *      qmreset - Queue maganement reset
+ *      portmask    - Port reset mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicMIBsCounterReset(rtk_uint32 greset, rtk_uint32 qmreset, rtk_uint32 portmask)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+    rtk_uint32 regBits;
+
+    regBits = RTL8367C_GLOBAL_RESET_MASK |
+                RTL8367C_QM_RESET_MASK |
+                    RTL8367C_MIB_PORT07_MASK |
+                    ((rtk_uint32)0x7 << 13);
+    regData = ((greset << RTL8367C_GLOBAL_RESET_OFFSET) & RTL8367C_GLOBAL_RESET_MASK) |
+                ((qmreset << RTL8367C_QM_RESET_OFFSET) & RTL8367C_QM_RESET_MASK) |
+                (((portmask & 0xFF) << RTL8367C_PORT0_RESET_OFFSET) & RTL8367C_MIB_PORT07_MASK) |
+                (((portmask >> 8)&0x7) << 13);
+
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_MIB_CTRL0, regBits, (regData >> RTL8367C_PORT0_RESET_OFFSET));
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicMIBsCounter
+ * Description:
+ *      Get MIBs counter
+ * Input:
+ *      port        - Physical port number (0~7)
+ *      mibIdx      - MIB counter index
+ *      pCounter    - MIB retrived counter
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number
+ *      RT_ERR_BUSYWAIT_TIMEOUT - MIB is busy at retrieving
+ *      RT_ERR_STAT_CNTR_FAIL   - MIB is resetting
+ * Note:
+ *      Before MIBs counter retrieving, writting accessing address to ASIC at first and check the MIB
+ *      control register status. If busy bit of MIB control is set, that means MIB counter have been
+ *      waiting for preparing, then software must wait atfer this busy flag reset by ASIC. This driver
+ *      did not recycle reading user desired counter. Software must use driver again to get MIB counter
+ *      if return value is not RT_ERR_OK.
+ */
+ret_t rtl8367c_getAsicMIBsCounter(rtk_uint32 port, RTL8367C_MIBCOUNTER mibIdx, rtk_uint64* pCounter)
+{
+    ret_t retVal;
+    rtk_uint32 regAddr;
+    rtk_uint32 regData;
+    rtk_uint32 mibAddr;
+    rtk_uint32 mibOff=0;
+
+    /* address offset to MIBs counter */
+    CONST rtk_uint16 mibLength[RTL8367C_MIBS_NUMBER]= {
+        4,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
+        4,2,2,2,2,2,2,2,2,
+        4,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
+        2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2};
+
+    rtk_uint16 i;
+    rtk_uint64 mibCounter;
+
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(mibIdx >= RTL8367C_MIBS_NUMBER)
+        return RT_ERR_STAT_INVALID_CNTR;
+
+    if(dot1dTpLearnedEntryDiscards == mibIdx)
+    {
+        mibAddr = RTL8367C_MIB_LEARNENTRYDISCARD_OFFSET;
+    }
+    else
+    {
+        i = 0;
+        mibOff = RTL8367C_MIB_PORT_OFFSET * port;
+
+        if(port > 7)
+            mibOff = mibOff + 68;
+
+        while(i < mibIdx)
+        {
+            mibOff += mibLength[i];
+            i++;
+        }
+
+        mibAddr = mibOff;
+    }
+
+    /* Read MIB addr before writing */
+    retVal = rtl8367c_getAsicReg(RTL8367C_REG_MIB_ADDRESS, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    if (regData == (mibAddr >> 2))
+    {
+        /* Write MIB addr to an alternate value */
+        retVal = rtl8367c_setAsicReg(RTL8367C_REG_MIB_ADDRESS, (mibAddr >> 2) + 1);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        while(1)
+        {
+            retVal = rtl8367c_getAsicReg(RTL8367C_REG_MIB_ADDRESS, &regData);
+            if(retVal != RT_ERR_OK)
+                return retVal;
+
+            if(regData == ((mibAddr >> 2) + 1))
+            {
+                break;
+            }
+
+            retVal = rtl8367c_setAsicReg(RTL8367C_REG_MIB_ADDRESS, (mibAddr >> 2) + 1);
+            if(retVal != RT_ERR_OK)
+                return retVal;
+        }
+
+        /* polling busy flag */
+        i = 100;
+        while(i > 0)
+        {
+            /*read MIB control register*/
+            retVal = rtl8367c_getAsicReg(RTL8367C_MIB_CTRL_REG,&regData);
+            if(retVal != RT_ERR_OK)
+                return retVal;
+
+            if((regData & RTL8367C_MIB_CTRL0_BUSY_FLAG_MASK) == 0)
+            {
+                break;
+            }
+
+            i--;
+        }
+
+        if(regData & RTL8367C_MIB_CTRL0_BUSY_FLAG_MASK)
+            return RT_ERR_BUSYWAIT_TIMEOUT;
+
+        if(regData & RTL8367C_RESET_FLAG_MASK)
+            return RT_ERR_STAT_CNTR_FAIL;
+    }
+
+    /*writing access counter address first*/
+    /*This address is SRAM address, and SRAM address = MIB register address >> 2*/
+    /*then ASIC will prepare 64bits counter wait for being retrived*/
+    /*Write Mib related address to access control register*/
+    retVal = rtl8367c_setAsicReg(RTL8367C_REG_MIB_ADDRESS, (mibAddr >> 2));
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    /* polling MIB Addr register */
+    while(1)
+    {
+        retVal = rtl8367c_getAsicReg(RTL8367C_REG_MIB_ADDRESS, &regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        if(regData == (mibAddr >> 2))
+        {
+            break;
+        }
+
+        retVal = rtl8367c_setAsicReg(RTL8367C_REG_MIB_ADDRESS, (mibAddr >> 2));
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    /* polling busy flag */
+    i = 100;
+    while(i > 0)
+    {
+        /*read MIB control register*/
+        retVal = rtl8367c_getAsicReg(RTL8367C_MIB_CTRL_REG,&regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        if((regData & RTL8367C_MIB_CTRL0_BUSY_FLAG_MASK) == 0)
+        {
+            break;
+        }
+
+        i--;
+    }
+
+    if(regData & RTL8367C_MIB_CTRL0_BUSY_FLAG_MASK)
+        return RT_ERR_BUSYWAIT_TIMEOUT;
+
+    if(regData & RTL8367C_RESET_FLAG_MASK)
+        return RT_ERR_STAT_CNTR_FAIL;
+
+    mibCounter = 0;
+    i = mibLength[mibIdx];
+    if(4 == i)
+        regAddr = RTL8367C_MIB_COUNTER_BASE_REG + 3;
+    else
+        regAddr = RTL8367C_MIB_COUNTER_BASE_REG + ((mibOff + 1) % 4);
+
+    while(i)
+    {
+        retVal = rtl8367c_getAsicReg(regAddr, &regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        mibCounter = (mibCounter << 16) | (regData & 0xFFFF);
+
+        regAddr --;
+        i --;
+
+    }
+
+    *pCounter = mibCounter;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicMIBsLogCounter
+ * Description:
+ *      Get MIBs Loggin counter
+ * Input:
+ *      index       - The index of 32 logging counter (0 ~ 31)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_ENTRY_INDEX      - Wrong index
+ *      RT_ERR_BUSYWAIT_TIMEOUT - MIB is busy at retrieving
+ *      RT_ERR_STAT_CNTR_FAIL   - MIB is resetting
+ * Note:
+ *      This API get 32 logging counter
+ */
+ret_t rtl8367c_getAsicMIBsLogCounter(rtk_uint32 index, rtk_uint32 *pCounter)
+{
+    ret_t retVal;
+    rtk_uint32 regAddr;
+    rtk_uint32 regData;
+    rtk_uint32 mibAddr;
+    rtk_uint16 i;
+    rtk_uint64 mibCounter;
+
+    if(index > RTL8367C_MIB_MAX_LOG_CNT_IDX)
+        return RT_ERR_ENTRY_INDEX;
+
+    mibAddr = RTL8367C_MIB_LOG_CNT_OFFSET + ((index / 2) * 4);
+
+    retVal = rtl8367c_setAsicReg(RTL8367C_REG_MIB_ADDRESS, (mibAddr >> 2));
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    /*read MIB control register*/
+    retVal = rtl8367c_getAsicReg(RTL8367C_MIB_CTRL_REG, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    if(regData & RTL8367C_MIB_CTRL0_BUSY_FLAG_MASK)
+        return RT_ERR_BUSYWAIT_TIMEOUT;
+
+    if(regData & RTL8367C_RESET_FLAG_MASK)
+        return RT_ERR_STAT_CNTR_FAIL;
+
+    mibCounter = 0;
+    if((index % 2) == 1)
+        regAddr = RTL8367C_MIB_COUNTER_BASE_REG + 3;
+    else
+        regAddr = RTL8367C_MIB_COUNTER_BASE_REG + 1;
+
+    for(i = 0; i <= 1; i++)
+    {
+        retVal = rtl8367c_getAsicReg(regAddr, &regData);
+
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        mibCounter = (mibCounter << 16) | (regData & 0xFFFF);
+
+        regAddr --;
+    }
+
+    *pCounter = mibCounter;
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicMIBsControl
+ * Description:
+ *      Get MIB control register
+ * Input:
+ *      pMask       - MIB control status mask bit[0]-busy bit[1]
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ * Note:
+ *      Software need to check this control register atfer doing port resetting or global resetting
+ */
+ret_t rtl8367c_getAsicMIBsControl(rtk_uint32* pMask)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+
+    retVal = rtl8367c_getAsicReg(RTL8367C_MIB_CTRL_REG, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    *pMask = regData & (RTL8367C_MIB_CTRL0_BUSY_FLAG_MASK | RTL8367C_RESET_FLAG_MASK);
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicMIBsResetValue
+ * Description:
+ *      Reset all counter to 0 or 1
+ * Input:
+ *      value           - Reset to value 0 or 1
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicMIBsResetValue(rtk_uint32 value)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_MIB_CTRL0, RTL8367C_RESET_VALUE_OFFSET, value);
+}
+/* Function Name:
+ *      rtl8367c_getAsicMIBsResetValue
+ * Description:
+ *      Reset all counter to 0 or 1
+ * Input:
+ *      value           - Reset to value 0 or 1
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicMIBsResetValue(rtk_uint32* value)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_MIB_CTRL0, RTL8367C_RESET_VALUE_OFFSET, value);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicMIBsUsageMode
+ * Description:
+ *      MIB update mode
+ * Input:
+ *      mode            - 1: latch all MIBs by timer 0:normal free run counting
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicMIBsUsageMode(rtk_uint32 mode)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_MIB_CTRL4, RTL8367C_MIB_USAGE_MODE_OFFSET, mode);
+}
+/* Function Name:
+ *      rtl8367c_getAsicMIBsUsageMode
+ * Description:
+ *      MIB update mode
+ * Input:
+ *      pMode           - 1: latch all MIBs by timer 0:normal free run counting
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicMIBsUsageMode(rtk_uint32* pMode)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_MIB_CTRL4, RTL8367C_MIB_USAGE_MODE_OFFSET, pMode);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicMIBsTimer
+ * Description:
+ *      MIB latching timer
+ * Input:
+ *      timer           - latch timer, unit 1 second
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicMIBsTimer(rtk_uint32 timer)
+{
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_MIB_CTRL4, RTL8367C_MIB_TIMER_MASK, timer);
+}
+/* Function Name:
+ *      rtl8367c_getAsicMIBsTimer
+ * Description:
+ *      MIB latching timer
+ * Input:
+ *      pTimer          - latch timer, unit 1 second
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicMIBsTimer(rtk_uint32* pTimer)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_MIB_CTRL4, RTL8367C_MIB_TIMER_MASK, pTimer);
+}
+/* Function Name:
+ *      rtl8367c_setAsicMIBsLoggingMode
+ * Description:
+ *      MIB logging counter mode
+ * Input:
+ *      index   - logging counter mode index (0~15)
+ *      mode    - 0:32-bits mode 1:64-bits mode
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_OUT_OF_RANGE     - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicMIBsLoggingMode(rtk_uint32 index, rtk_uint32 mode)
+{
+    if(index > RTL8367C_MIB_MAX_LOG_MODE_IDX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_MIB_CTRL3, index,mode);
+}
+/* Function Name:
+ *      rtl8367c_getAsicMIBsLoggingMode
+ * Description:
+ *      MIB logging counter mode
+ * Input:
+ *      index   - logging counter mode index (0~15)
+ *      pMode   - 0:32-bits mode 1:64-bits mode
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_OUT_OF_RANGE     - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicMIBsLoggingMode(rtk_uint32 index, rtk_uint32* pMode)
+{
+    if(index > RTL8367C_MIB_MAX_LOG_MODE_IDX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_MIB_CTRL3, index,pMode);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicMIBsLoggingType
+ * Description:
+ *      MIB logging counter type
+ * Input:
+ *      index   - logging counter mode index (0~15)
+ *      type    - 0:Packet count 1:Byte count
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_OUT_OF_RANGE     - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicMIBsLoggingType(rtk_uint32 index, rtk_uint32 type)
+{
+    if(index > RTL8367C_MIB_MAX_LOG_MODE_IDX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_MIB_CTRL5, index,type);
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicMIBsLoggingType
+ * Description:
+ *      MIB logging counter type
+ * Input:
+ *      index   - logging counter mode index (0~15)
+ *      pType   - 0:Packet count 1:Byte count
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_OUT_OF_RANGE     - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicMIBsLoggingType(rtk_uint32 index, rtk_uint32* pType)
+{
+    if(index > RTL8367C_MIB_MAX_LOG_MODE_IDX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_MIB_CTRL5, index,pType);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicMIBsResetLoggingCounter
+ * Description:
+ *      MIB logging counter type
+ * Input:
+ *      index   - logging counter index (0~31)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_OUT_OF_RANGE     - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicMIBsResetLoggingCounter(rtk_uint32 index)
+{
+    ret_t retVal;
+
+    if(index > RTL8367C_MIB_MAX_LOG_CNT_IDX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if(index < 16)
+        retVal = rtl8367c_setAsicReg(RTL8367C_REG_MIB_CTRL1, 1<<index);
+    else
+        retVal = rtl8367c_setAsicReg(RTL8367C_REG_MIB_CTRL2, 1<<(index-16));
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicMIBsLength
+ * Description:
+ *      Set MIB length couting mode
+ * Input:
+ *      txLengthMode    - 0: tag length doesn't be counted. 1: tag length is counted.
+ *      rxLengthMode    - 0: tag length doesn't be counted. 1: tag length is counted.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_OUT_OF_RANGE     - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicMIBsLength(rtk_uint32 txLengthMode, rtk_uint32 rxLengthMode)
+{
+    ret_t retVal;
+
+    if( (retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_MIB_RMON_LEN_CTRL, RTL8367C_TX_LENGTH_CTRL_OFFSET, txLengthMode)) != RT_ERR_OK)
+        return retVal;
+
+    if( (retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_MIB_RMON_LEN_CTRL, RTL8367C_RX_LENGTH_CTRL_OFFSET, rxLengthMode)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicMIBsLength
+ * Description:
+ *      Set MIB length couting mode
+ * Input:
+ *      None.
+ * Output:
+ *      pTxLengthMode - 0: tag length doesn't be counted. 1: tag length is counted.
+ *      pRxLengthMode - 0: tag length doesn't be counted. 1: tag length is counted.
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_OUT_OF_RANGE     - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicMIBsLength(rtk_uint32 *pTxLengthMode, rtk_uint32 *pRxLengthMode)
+{
+    ret_t retVal;
+
+    if( (retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_MIB_RMON_LEN_CTRL, RTL8367C_TX_LENGTH_CTRL_OFFSET, pTxLengthMode)) != RT_ERR_OK)
+        return retVal;
+
+    if( (retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_MIB_RMON_LEN_CTRL, RTL8367C_RX_LENGTH_CTRL_OFFSET, pRxLengthMode)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_mib.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_mib.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_mib.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_mib.h	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,116 @@
+#ifndef _RTL8367C_ASICDRV_MIB_H_
+#define _RTL8367C_ASICDRV_MIB_H_
+
+#include <rtl8367c_asicdrv.h>
+
+#define RTL8367C_MIB_PORT_OFFSET                (0x7C)
+#define RTL8367C_MIB_LEARNENTRYDISCARD_OFFSET   (0x420)
+
+#define RTL8367C_MAX_LOG_CNT_NUM                (32)
+#define RTL8367C_MIB_MAX_LOG_CNT_IDX            (RTL8367C_MAX_LOG_CNT_NUM - 1)
+#define RTL8367C_MIB_LOG_CNT_OFFSET             (0x3E0)
+#define RTL8367C_MIB_MAX_LOG_MODE_IDX           (16-1)
+
+typedef enum RTL8367C_MIBCOUNTER_E{
+
+    /* RX */
+    ifInOctets = 0,
+
+    dot3StatsFCSErrors,
+    dot3StatsSymbolErrors,
+    dot3InPauseFrames,
+    dot3ControlInUnknownOpcodes,
+
+    etherStatsFragments,
+    etherStatsJabbers,
+    ifInUcastPkts,
+    etherStatsDropEvents,
+
+    ifInMulticastPkts,
+    ifInBroadcastPkts,
+    inMldChecksumError,
+    inIgmpChecksumError,
+    inMldSpecificQuery,
+    inMldGeneralQuery,
+    inIgmpSpecificQuery,
+    inIgmpGeneralQuery,
+    inMldLeaves,
+    inIgmpLeaves,
+
+    /* TX/RX */
+    etherStatsOctets,
+
+    etherStatsUnderSizePkts,
+    etherOversizeStats,
+    etherStatsPkts64Octets,
+    etherStatsPkts65to127Octets,
+    etherStatsPkts128to255Octets,
+    etherStatsPkts256to511Octets,
+    etherStatsPkts512to1023Octets,
+    etherStatsPkts1024to1518Octets,
+
+    /* TX */
+    ifOutOctets,
+
+    dot3StatsSingleCollisionFrames,
+    dot3StatMultipleCollisionFrames,
+    dot3sDeferredTransmissions,
+    dot3StatsLateCollisions,
+    etherStatsCollisions,
+    dot3StatsExcessiveCollisions,
+    dot3OutPauseFrames,
+    ifOutDiscards,
+
+    /* ALE */
+    dot1dTpPortInDiscards,
+    ifOutUcastPkts,
+    ifOutMulticastPkts,
+    ifOutBroadcastPkts,
+    outOampduPkts,
+    inOampduPkts,
+
+    inIgmpJoinsSuccess,
+    inIgmpJoinsFail,
+    inMldJoinsSuccess,
+    inMldJoinsFail,
+    inReportSuppressionDrop,
+    inLeaveSuppressionDrop,
+    outIgmpReports,
+    outIgmpLeaves,
+    outIgmpGeneralQuery,
+    outIgmpSpecificQuery,
+    outMldReports,
+    outMldLeaves,
+    outMldGeneralQuery,
+    outMldSpecificQuery,
+    inKnownMulticastPkts,
+
+    /*Device only */
+    dot1dTpLearnedEntryDiscards,
+    RTL8367C_MIBS_NUMBER,
+
+}RTL8367C_MIBCOUNTER;
+
+
+extern ret_t rtl8367c_setAsicMIBsCounterReset(rtk_uint32 greset, rtk_uint32 qmreset, rtk_uint32 pmask);
+extern ret_t rtl8367c_getAsicMIBsCounter(rtk_uint32 port,RTL8367C_MIBCOUNTER mibIdx, rtk_uint64* pCounter);
+extern ret_t rtl8367c_getAsicMIBsLogCounter(rtk_uint32 index, rtk_uint32 *pCounter);
+extern ret_t rtl8367c_getAsicMIBsControl(rtk_uint32* pMask);
+
+extern ret_t rtl8367c_setAsicMIBsResetValue(rtk_uint32 value);
+extern ret_t rtl8367c_getAsicMIBsResetValue(rtk_uint32* value);
+
+extern ret_t rtl8367c_setAsicMIBsUsageMode(rtk_uint32 mode);
+extern ret_t rtl8367c_getAsicMIBsUsageMode(rtk_uint32* pMode);
+extern ret_t rtl8367c_setAsicMIBsTimer(rtk_uint32 timer);
+extern ret_t rtl8367c_getAsicMIBsTimer(rtk_uint32* pTimer);
+extern ret_t rtl8367c_setAsicMIBsLoggingMode(rtk_uint32 index, rtk_uint32 mode);
+extern ret_t rtl8367c_getAsicMIBsLoggingMode(rtk_uint32 index, rtk_uint32* pMode);
+extern ret_t rtl8367c_setAsicMIBsLoggingType(rtk_uint32 index, rtk_uint32 type);
+extern ret_t rtl8367c_getAsicMIBsLoggingType(rtk_uint32 index, rtk_uint32* pType);
+extern ret_t rtl8367c_setAsicMIBsResetLoggingCounter(rtk_uint32 index);
+extern ret_t rtl8367c_setAsicMIBsLength(rtk_uint32 txLengthMode, rtk_uint32 rxLengthMode);
+extern ret_t rtl8367c_getAsicMIBsLength(rtk_uint32 *pTxLengthMode, rtk_uint32 *pRxLengthMode);
+
+#endif /*#ifndef _RTL8367C_ASICDRV_MIB_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_mirror.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_mirror.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_mirror.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_mirror.c	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,474 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 76306 $
+ * $Date: 2017-03-08 15:13:58 +0800 (é€±ä¸‰, 08 ä¸‰æœˆ 2017) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature : Port mirror related functions
+ *
+ */
+#include <rtl8367c_asicdrv_mirror.h>
+/* Function Name:
+ *      rtl8367c_setAsicPortMirror
+ * Description:
+ *      Set port mirror function
+ * Input:
+ *      source  - Source port
+ *      monitor - Monitor (destination) port
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortMirror(rtk_uint32 source, rtk_uint32 monitor)
+{
+    ret_t retVal;
+
+    if((source > RTL8367C_PORTIDMAX) || (monitor > RTL8367C_PORTIDMAX))
+        return RT_ERR_PORT_ID;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_MIRROR_CTRL_REG, RTL8367C_MIRROR_SOURCE_PORT_MASK, source);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+
+    return rtl8367c_setAsicRegBits(RTL8367C_MIRROR_CTRL_REG, RTL8367C_MIRROR_MONITOR_PORT_MASK, monitor);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortMirror
+ * Description:
+ *      Get port mirror function
+ * Input:
+ *      pSource     - Source port
+ *      pMonitor - Monitor (destination) port
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortMirror(rtk_uint32 *pSource, rtk_uint32 *pMonitor)
+{
+    ret_t retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_MIRROR_CTRL_REG, RTL8367C_MIRROR_SOURCE_PORT_MASK, pSource);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return rtl8367c_getAsicRegBits(RTL8367C_MIRROR_CTRL_REG, RTL8367C_MIRROR_MONITOR_PORT_MASK, pMonitor);
+}
+/* Function Name:
+ *      rtl8367c_setAsicPortMirrorRxFunction
+ * Description:
+ *      Set the mirror function on RX of the mirrored
+ * Input:
+ *      enabled     - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortMirrorRxFunction(rtk_uint32 enabled)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_MIRROR_CTRL_REG, RTL8367C_MIRROR_RX_OFFSET, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortMirrorRxFunction
+ * Description:
+ *      Get the mirror function on RX of the mirrored
+ * Input:
+ *      pEnabled    - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortMirrorRxFunction(rtk_uint32* pEnabled)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_MIRROR_CTRL_REG, RTL8367C_MIRROR_RX_OFFSET, pEnabled);
+}
+/* Function Name:
+ *      rtl8367c_setAsicPortMirrorTxFunction
+ * Description:
+ *      Set the mirror function on TX of the mirrored
+ * Input:
+ *      enabled     - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortMirrorTxFunction(rtk_uint32 enabled)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_MIRROR_CTRL_REG, RTL8367C_MIRROR_TX_OFFSET, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortMirrorTxFunction
+ * Description:
+ *      Get the mirror function on TX of the mirrored
+ * Input:
+ *      pEnabled    - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortMirrorTxFunction(rtk_uint32* pEnabled)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_MIRROR_CTRL_REG, RTL8367C_MIRROR_TX_OFFSET, pEnabled);
+}
+/* Function Name:
+ *      rtl8367c_setAsicPortMirrorIsolation
+ * Description:
+ *      Set the traffic isolation on monitor port
+ * Input:
+ *      enabled     - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortMirrorIsolation(rtk_uint32 enabled)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_MIRROR_CTRL_REG, RTL8367C_MIRROR_ISO_OFFSET, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortMirrorIsolation
+ * Description:
+ *      Get the traffic isolation on monitor port
+ * Input:
+ *      pEnabled    - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortMirrorIsolation(rtk_uint32* pEnabled)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_MIRROR_CTRL_REG, RTL8367C_MIRROR_ISO_OFFSET, pEnabled);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicPortMirrorMask
+ * Description:
+ *      Set mirror source port mask
+ * Input:
+ *      SourcePortmask  - Source Portmask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_MASK- Port Mask Error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortMirrorMask(rtk_uint32 SourcePortmask)
+{
+    if( SourcePortmask > RTL8367C_PORTMASK)
+        return RT_ERR_PORT_MASK;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_MIRROR_SRC_PMSK, RTL8367C_MIRROR_SRC_PMSK_MASK, SourcePortmask);
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicPortMirrorMask
+ * Description:
+ *      Get mirror source port mask
+ * Input:
+ *      None
+ * Output:
+ *      pSourcePortmask     - Source Portmask
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_MASK- Port Mask Error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortMirrorMask(rtk_uint32 *pSourcePortmask)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_MIRROR_SRC_PMSK, RTL8367C_MIRROR_SRC_PMSK_MASK, pSourcePortmask);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicPortMirrorVlanRxLeaky
+ * Description:
+ *      Set the mirror function of VLAN RX leaky
+ * Input:
+ *      enabled     - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortMirrorVlanRxLeaky(rtk_uint32 enabled)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_MIRROR_CTRL2, RTL8367C_MIRROR_RX_VLAN_LEAKY_OFFSET, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortMirrorVlanRxLeaky
+ * Description:
+ *      Get the mirror function of VLAN RX leaky
+ * Input:
+ *      pEnabled    - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortMirrorVlanRxLeaky(rtk_uint32* pEnabled)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_MIRROR_CTRL2, RTL8367C_MIRROR_RX_VLAN_LEAKY_OFFSET, pEnabled);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicPortMirrorVlanTxLeaky
+ * Description:
+ *      Set the mirror function of VLAN TX leaky
+ * Input:
+ *      enabled     - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortMirrorVlanTxLeaky(rtk_uint32 enabled)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_MIRROR_CTRL2, RTL8367C_MIRROR_TX_VLAN_LEAKY_OFFSET, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortMirrorVlanTxLeaky
+ * Description:
+ *      Get the mirror function of VLAN TX leaky
+ * Input:
+ *      pEnabled    - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortMirrorVlanTxLeaky(rtk_uint32* pEnabled)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_MIRROR_CTRL2, RTL8367C_MIRROR_TX_VLAN_LEAKY_OFFSET, pEnabled);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicPortMirrorIsolationRxLeaky
+ * Description:
+ *      Set the mirror function of  Isolation RX leaky
+ * Input:
+ *      enabled     - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortMirrorIsolationRxLeaky(rtk_uint32 enabled)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_MIRROR_CTRL2, RTL8367C_MIRROR_RX_ISOLATION_LEAKY_OFFSET, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortMirrorIsolationRxLeaky
+ * Description:
+ *      Get the mirror function of VLAN RX leaky
+ * Input:
+ *      pEnabled    - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortMirrorIsolationRxLeaky(rtk_uint32* pEnabled)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_MIRROR_CTRL2, RTL8367C_MIRROR_RX_ISOLATION_LEAKY_OFFSET, pEnabled);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicPortMirrorIsolationTxLeaky
+ * Description:
+ *      Set the mirror function of Isolation TX leaky
+ * Input:
+ *      enabled     - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortMirrorIsolationTxLeaky(rtk_uint32 enabled)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_MIRROR_CTRL2, RTL8367C_MIRROR_TX_ISOLATION_LEAKY_OFFSET, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortMirrorIsolationTxLeaky
+ * Description:
+ *      Get the mirror function of VLAN TX leaky
+ * Input:
+ *      pEnabled    - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortMirrorIsolationTxLeaky(rtk_uint32* pEnabled)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_MIRROR_CTRL2, RTL8367C_MIRROR_TX_ISOLATION_LEAKY_OFFSET, pEnabled);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicPortMirrorRealKeep
+ * Description:
+ *      Set the mirror function of keep format
+ * Input:
+ *      mode    - 1: keep original format, 0: follow VLAN config
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortMirrorRealKeep(rtk_uint32 mode)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_MIRROR_CTRL2, RTL8367C_MIRROR_REALKEEP_EN_OFFSET, mode);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortMirrorRealKeep
+ * Description:
+ *      Get the mirror function of keep format
+ * Input:
+ *      pMode   - 1: keep original format, 0: follow VLAN config
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortMirrorRealKeep(rtk_uint32* pMode)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_MIRROR_CTRL2, RTL8367C_MIRROR_REALKEEP_EN_OFFSET, pMode);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicPortMirrorOverride
+ * Description:
+ *      Set the mirror function of override
+ * Input:
+ *      rxMirror    - 1: output rx Mirror format, 0: output forward format
+ *      txMirror    - 1: output tx Mirror format, 0: output forward format
+ *      aclMirror   - 1: output ACL Mirror format, 0: output forward format
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortMirrorOverride(rtk_uint32 rxMirror, rtk_uint32 txMirror, rtk_uint32 aclMirror)
+{
+    ret_t retVal;
+
+    if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_MIRROR_CTRL3, RTL8367C_MIRROR_RX_OVERRIDE_EN_OFFSET, rxMirror)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_MIRROR_CTRL3, RTL8367C_MIRROR_TX_OVERRIDE_EN_OFFSET, txMirror)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_MIRROR_CTRL3, RTL8367C_MIRROR_ACL_OVERRIDE_EN_OFFSET, aclMirror)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicPortMirrorOverride
+ * Description:
+ *      Get the mirror function of override
+ * Input:
+ *      None
+ * Output:
+ *      pRxMirror   - 1: output rx Mirror format, 0: output forward format
+ *      pTxMirror   - 1: output tx Mirror format, 0: output forward format
+ *      pAclMirror  - 1: output ACL Mirror format, 0: output forward format
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortMirrorOverride(rtk_uint32 *pRxMirror, rtk_uint32 *pTxMirror, rtk_uint32 *pAclMirror)
+{
+    ret_t retVal;
+
+    if((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_MIRROR_CTRL3, RTL8367C_MIRROR_RX_OVERRIDE_EN_OFFSET, pRxMirror)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_MIRROR_CTRL3, RTL8367C_MIRROR_TX_OVERRIDE_EN_OFFSET, pTxMirror)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_MIRROR_CTRL3, RTL8367C_MIRROR_ACL_OVERRIDE_EN_OFFSET, pAclMirror)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_mirror.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_mirror.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_mirror.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_mirror.h	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,32 @@
+#ifndef _RTL8367C_ASICDRV_MIRROR_H_
+#define _RTL8367C_ASICDRV_MIRROR_H_
+
+#include <rtl8367c_asicdrv.h>
+
+extern ret_t rtl8367c_setAsicPortMirror(rtk_uint32 source, rtk_uint32 monitor);
+extern ret_t rtl8367c_getAsicPortMirror(rtk_uint32 *pSource, rtk_uint32 *pMonitor);
+extern ret_t rtl8367c_setAsicPortMirrorRxFunction(rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicPortMirrorRxFunction(rtk_uint32* pEnabled);
+extern ret_t rtl8367c_setAsicPortMirrorTxFunction(rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicPortMirrorTxFunction(rtk_uint32* pEnabled);
+extern ret_t rtl8367c_setAsicPortMirrorIsolation(rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicPortMirrorIsolation(rtk_uint32* pEnabled);
+extern ret_t rtl8367c_setAsicPortMirrorPriority(rtk_uint32 priority);
+extern ret_t rtl8367c_getAsicPortMirrorPriority(rtk_uint32* pPriority);
+extern ret_t rtl8367c_setAsicPortMirrorMask(rtk_uint32 SourcePortmask);
+extern ret_t rtl8367c_getAsicPortMirrorMask(rtk_uint32 *pSourcePortmask);
+extern ret_t rtl8367c_setAsicPortMirrorVlanRxLeaky(rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicPortMirrorVlanRxLeaky(rtk_uint32* pEnabled);
+extern ret_t rtl8367c_setAsicPortMirrorVlanTxLeaky(rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicPortMirrorVlanTxLeaky(rtk_uint32* pEnabled);
+extern ret_t rtl8367c_setAsicPortMirrorIsolationRxLeaky(rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicPortMirrorIsolationRxLeaky(rtk_uint32* pEnabled);
+extern ret_t rtl8367c_setAsicPortMirrorIsolationTxLeaky(rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicPortMirrorIsolationTxLeaky(rtk_uint32* pEnabled);
+extern ret_t rtl8367c_setAsicPortMirrorRealKeep(rtk_uint32 mode);
+extern ret_t rtl8367c_getAsicPortMirrorRealKeep(rtk_uint32* pMode);
+extern ret_t rtl8367c_setAsicPortMirrorOverride(rtk_uint32 rxMirror, rtk_uint32 txMirror, rtk_uint32 aclMirror);
+extern ret_t rtl8367c_getAsicPortMirrorOverride(rtk_uint32 *pRxMirror, rtk_uint32 *pTxMirror, rtk_uint32 *pAclMirror);
+
+#endif /*#ifndef _RTL8367C_ASICDRV_MIRROR_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_misc.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_misc.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_misc.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_misc.c	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,262 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 79623 $
+ * $Date: 2017-06-14 17:15:42 +0800 (é€±ä¸‰, 14 å…­æœˆ 2017) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature : Miscellaneous functions
+ *
+ */
+
+#include <rtl8367c_asicdrv_misc.h>
+/* Function Name:
+ *      rtl8367c_setAsicMacAddress
+ * Description:
+ *      Set switch MAC address
+ * Input:
+ *      mac     - switch mac
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicMacAddress(ether_addr_t mac)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+    rtk_uint8 *accessPtr;
+    rtk_uint32 i;
+
+    accessPtr =  (rtk_uint8*)&mac;
+
+    for(i = 0; i <=2; i++)
+    {
+        regData = (*(accessPtr + (i*2)) << 8) | *(accessPtr + (i*2) + 1);
+        retVal = rtl8367c_setAsicReg(RTL8367C_REG_SWITCH_MAC2 - i, regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicMacAddress
+ * Description:
+ *      Get switch MAC address
+ * Input:
+ *      pMac    - switch mac
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicMacAddress(ether_addr_t *pMac)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+    rtk_uint8 *accessPtr;
+    rtk_uint32 i;
+
+
+    accessPtr = (rtk_uint8*)pMac;
+
+    for(i = 0; i <= 2; i++)
+    {
+        retVal = rtl8367c_getAsicReg(RTL8367C_REG_SWITCH_MAC2 - i, &regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        *accessPtr = (regData & 0xFF00) >> 8;
+        accessPtr ++;
+        *accessPtr = regData & 0xFF;
+        accessPtr ++;
+    }
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicDebugInfo
+ * Description:
+ *      Get per-port packet forward debugging information
+ * Input:
+ *      port        - Physical port number (0~7)
+ *      pDebugifo   - per-port packet trap/drop/forward reason
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicDebugInfo(rtk_uint32 port, rtk_uint32 *pDebugifo)
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBits(RTL8367C_DEBUG_INFO_REG(port), RTL8367C_DEBUG_INFO_MASK(port), pDebugifo);
+}
+/* Function Name:
+ *      rtl8367c_setAsicPortJamMode
+ * Description:
+ *      Set half duplex flow control setting
+ * Input:
+ *      mode    - 0: Back-Pressure 1: DEFER
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortJamMode(rtk_uint32 mode)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_CFG_BACKPRESSURE, RTL8367C_LONGTXE_OFFSET,mode);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortJamMode
+ * Description:
+ *      Get half duplex flow control setting
+ * Input:
+ *      pMode   - 0: Back-Pressure 1: DEFER
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortJamMode(rtk_uint32* pMode)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_CFG_BACKPRESSURE, RTL8367C_LONGTXE_OFFSET, pMode);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicMaxLengthCfg
+ * Description:
+ *      Set Max packet length configuration
+ * Input:
+ *      cfgId       - Configuration ID
+ *      maxLength   - Max Length
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicMaxLengthCfg(rtk_uint32 cfgId, rtk_uint32 maxLength)
+{
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_MAX_LEN_RX_TX_CFG0 + cfgId, RTL8367C_MAX_LEN_RX_TX_CFG0_MASK, maxLength);
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicMaxLengthCfg
+ * Description:
+ *      Get Max packet length configuration
+ * Input:
+ *      cfgId       - Configuration ID
+ *      maxLength   - Max Length
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicMaxLengthCfg(rtk_uint32 cfgId, rtk_uint32 *pMaxLength)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_MAX_LEN_RX_TX_CFG0 + cfgId, RTL8367C_MAX_LEN_RX_TX_CFG0_MASK, pMaxLength);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicMaxLength
+ * Description:
+ *      Set Max packet length
+ * Input:
+ *      port        - port ID
+ *      type        - 0: 10M/100M speed, 1: giga speed
+ *      cfgId       - Configuration ID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicMaxLength(rtk_uint32 port, rtk_uint32 type, rtk_uint32 cfgId)
+{
+    ret_t retVal;
+
+    if(port < 8)
+    {
+        retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_MAX_LENGTH_CFG, (type * 8) + port, cfgId);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_MAX_LENGTH_CFG_EXT, (type * 3) + port - 8, cfgId);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicMaxLength
+ * Description:
+ *      Get Max packet length
+ * Input:
+ *      port        - port ID
+ *      type        - 0: 10M/100M speed, 1: giga speed
+ *      cfgId       - Configuration ID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicMaxLength(rtk_uint32 port, rtk_uint32 type, rtk_uint32 *pCfgId)
+{
+    ret_t retVal;
+
+    if(port < 8)
+    {
+        retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_MAX_LENGTH_CFG, (type * 8) + port, pCfgId);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_MAX_LENGTH_CFG_EXT, (type * 3) + port - 8, pCfgId);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    return RT_ERR_OK;
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_misc.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_misc.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_misc.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_misc.h	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,17 @@
+#ifndef _RTL8367C_ASICDRV_MISC_H_
+#define _RTL8367C_ASICDRV_MISC_H_
+
+#include <rtl8367c_asicdrv.h>
+
+extern ret_t rtl8367c_setAsicMacAddress(ether_addr_t mac);
+extern ret_t rtl8367c_getAsicMacAddress(ether_addr_t *pMac);
+extern ret_t rtl8367c_getAsicDebugInfo(rtk_uint32 port, rtk_uint32 *pDebugifo);
+extern ret_t rtl8367c_setAsicPortJamMode(rtk_uint32 mode);
+extern ret_t rtl8367c_getAsicPortJamMode(rtk_uint32* pMode);
+extern ret_t rtl8367c_setAsicMaxLengthCfg(rtk_uint32 cfgId, rtk_uint32 maxLength);
+extern ret_t rtl8367c_getAsicMaxLengthCfg(rtk_uint32 cfgId, rtk_uint32 *pMaxLength);
+extern ret_t rtl8367c_setAsicMaxLength(rtk_uint32 port, rtk_uint32 type, rtk_uint32 cfgId);
+extern ret_t rtl8367c_getAsicMaxLength(rtk_uint32 port, rtk_uint32 type, rtk_uint32 *pCfgId);
+
+#endif /*_RTL8367C_ASICDRV_MISC_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_oam.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_oam.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_oam.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_oam.c	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 42321 $
+ * $Date: 2013-08-26 13:51:29 +0800 (é€±ä¸€, 26 å…«æœˆ 2013) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature : OAM related functions
+ *
+ */
+
+#include <rtl8367c_asicdrv_oam.h>
+/* Function Name:
+ *      rtl8367c_setAsicOamParser
+ * Description:
+ *      Set OAM parser state
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      parser  - Per-Port OAM parser state
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ *      RT_ERR_NOT_ALLOWED  - Invalid paser state
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicOamParser(rtk_uint32 port, rtk_uint32 parser)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    if(parser > OAM_PARFWDCPU)
+        return RT_ERR_NOT_ALLOWED;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_OAM_PARSER_CTRL0 + port/8, RTL8367C_OAM_PARSER_MASK(port % 8), parser);
+}
+/* Function Name:
+ *      rtl8367c_getAsicOamParser
+ * Description:
+ *      Get OAM parser state
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      pParser     - Per-Port OAM parser state
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicOamParser(rtk_uint32 port, rtk_uint32* pParser)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_OAM_PARSER_CTRL0 + port/8, RTL8367C_OAM_PARSER_MASK(port%8), pParser);
+}
+/* Function Name:
+ *      rtl8367c_setAsicOamMultiplexer
+ * Description:
+ *      Set OAM multiplexer state
+ * Input:
+ *      port        - Physical port number (0~7)
+ *      multiplexer - Per-Port OAM multiplexer state
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ *      RT_ERR_NOT_ALLOWED  - Invalid multiplexer state
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicOamMultiplexer(rtk_uint32 port, rtk_uint32 multiplexer)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    if(multiplexer > OAM_MULCPU)
+        return RT_ERR_NOT_ALLOWED;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_OAM_MULTIPLEXER_CTRL0 + port/8, RTL8367C_OAM_MULTIPLEXER_MASK(port%8), multiplexer);
+}
+/* Function Name:
+ *      rtl8367c_getAsicOamMultiplexer
+ * Description:
+ *      Get OAM multiplexer state
+ * Input:
+ *      port        - Physical port number (0~7)
+ *      pMultiplexer - Per-Port OAM multiplexer state
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicOamMultiplexer(rtk_uint32 port, rtk_uint32* pMultiplexer)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_OAM_MULTIPLEXER_CTRL0 + port/8, RTL8367C_OAM_MULTIPLEXER_MASK(port%8), pMultiplexer);
+}
+/* Function Name:
+ *      rtl8367c_setAsicOamCpuPri
+ * Description:
+ *      Set trap priority for OAM packet
+ * Input:
+ *      priority    - priority (0~7)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - Success
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_QOS_INT_PRIORITY     - Invalid priority
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicOamCpuPri(rtk_uint32 priority)
+{
+    if(priority > RTL8367C_PRIMAX)
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_QOS_TRAP_PRIORITY0, RTL8367C_OAM_PRIOIRTY_MASK, priority);
+}
+/* Function Name:
+ *      rtl8367c_getAsicOamCpuPri
+ * Description:
+ *      Get trap priority for OAM packet
+ * Input:
+ *      pPriority   - priority (0~7)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicOamCpuPri(rtk_uint32 *pPriority)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_QOS_TRAP_PRIORITY0, RTL8367C_OAM_PRIOIRTY_MASK, pPriority);
+}
+/* Function Name:
+ *      rtl8367c_setAsicOamEnable
+ * Description:
+ *      Set OAM function state
+ * Input:
+ *      enabled     - OAM function usage 1:enable, 0:disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicOamEnable(rtk_uint32 enabled)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_OAM_CTRL, RTL8367C_OAM_CTRL_OFFSET, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicOamEnable
+ * Description:
+ *      Get OAM function state
+ * Input:
+ *      pEnabled    - OAM function usage 1:enable, 0:disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicOamEnable(rtk_uint32 *pEnabled)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_OAM_CTRL, RTL8367C_OAM_CTRL_OFFSET, pEnabled);
+}
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_oam.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_oam.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_oam.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_oam.h	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,30 @@
+#ifndef _RTL8367C_ASICDRV_OAM_H_
+#define _RTL8367C_ASICDRV_OAM_H_
+
+#include <rtl8367c_asicdrv.h>
+
+enum OAMPARACT
+{
+    OAM_PARFWD = 0,
+    OAM_PARLB,
+    OAM_PARDISCARD,
+    OAM_PARFWDCPU
+};
+
+enum OAMMULACT
+{
+    OAM_MULFWD = 0,
+    OAM_MULDISCARD,
+    OAM_MULCPU
+};
+
+extern ret_t rtl8367c_setAsicOamParser(rtk_uint32 port, rtk_uint32 parser);
+extern ret_t rtl8367c_getAsicOamParser(rtk_uint32 port, rtk_uint32* pParser);
+extern ret_t rtl8367c_setAsicOamMultiplexer(rtk_uint32 port, rtk_uint32 multiplexer);
+extern ret_t rtl8367c_getAsicOamMultiplexer(rtk_uint32 port, rtk_uint32* pMultiplexer);
+extern ret_t rtl8367c_setAsicOamCpuPri(rtk_uint32 priority);
+extern ret_t rtl8367c_getAsicOamCpuPri(rtk_uint32 *pPriority);
+extern ret_t rtl8367c_setAsicOamEnable(rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicOamEnable(rtk_uint32 *pEnabled);
+#endif /*_RTL8367C_ASICDRV_OAM_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_phy.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_phy.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_phy.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_phy.c	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,429 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 77915 $
+ * $Date: 2017-04-18 14:22:19 +0800 (é€±äºŒ, 18 å››æœˆ 2017) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature : PHY related functions
+ *
+ */
+#include <rtl8367c_asicdrv_phy.h>
+
+#if defined(MDC_MDIO_OPERATION)
+/* Function Name:
+ *      rtl8367c_setAsicPHYOCPReg
+ * Description:
+ *      Set PHY OCP registers
+ * Input:
+ *      phyNo   - Physical port number (0~7)
+ *      ocpAddr - OCP address
+ *      ocpData - Writing data
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PHY_REG_ID       - invalid PHY address
+ *      RT_ERR_PHY_ID           - invalid PHY no
+ *      RT_ERR_BUSYWAIT_TIMEOUT - PHY access busy
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPHYOCPReg(rtk_uint32 phyNo, rtk_uint32 ocpAddr, rtk_uint32 ocpData )
+{
+    ret_t retVal;
+    rtk_uint32 regAddr;
+    rtk_uint32 ocpAddrPrefix, ocpAddr9_6, ocpAddr5_1;
+
+    /* OCP prefix */
+    ocpAddrPrefix = ((ocpAddr & 0xFC00) >> 10);
+    if((retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_GPHY_OCP_MSB_0, RTL8367C_CFG_CPU_OCPADR_MSB_MASK, ocpAddrPrefix)) != RT_ERR_OK)
+        return retVal;
+
+    /*prepare access address*/
+    ocpAddr9_6 = ((ocpAddr >> 6) & 0x000F);
+    ocpAddr5_1 = ((ocpAddr >> 1) & 0x001F);
+    regAddr = RTL8367C_PHY_BASE | (ocpAddr9_6 << 8) | (phyNo << RTL8367C_PHY_OFFSET) | ocpAddr5_1;
+    if((retVal = rtl8367c_setAsicReg(regAddr, ocpData)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicPHYOCPReg
+ * Description:
+ *      Get PHY OCP registers
+ * Input:
+ *      phyNo   - Physical port number (0~7)
+ *      ocpAddr - PHY address
+ *      pRegData - read data
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PHY_REG_ID       - invalid PHY address
+ *      RT_ERR_PHY_ID           - invalid PHY no
+ *      RT_ERR_BUSYWAIT_TIMEOUT - PHY access busy
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPHYOCPReg(rtk_uint32 phyNo, rtk_uint32 ocpAddr, rtk_uint32 *pRegData )
+{
+    ret_t retVal;
+    rtk_uint32 regAddr;
+    rtk_uint32 ocpAddrPrefix, ocpAddr9_6, ocpAddr5_1;
+    /* OCP prefix */
+    ocpAddrPrefix = ((ocpAddr & 0xFC00) >> 10);
+    if((retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_GPHY_OCP_MSB_0, RTL8367C_CFG_CPU_OCPADR_MSB_MASK, ocpAddrPrefix)) != RT_ERR_OK)
+        return retVal;
+
+    /*prepare access address*/
+    ocpAddr9_6 = ((ocpAddr >> 6) & 0x000F);
+    ocpAddr5_1 = ((ocpAddr >> 1) & 0x001F);
+    regAddr = RTL8367C_PHY_BASE | (ocpAddr9_6 << 8) | (phyNo << RTL8367C_PHY_OFFSET) | ocpAddr5_1;
+    if((retVal = rtl8367c_getAsicReg(regAddr, pRegData)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+#else
+
+/* Function Name:
+ *      rtl8367c_setAsicPHYOCPReg
+ * Description:
+ *      Set PHY OCP registers
+ * Input:
+ *      phyNo   - Physical port number (0~7)
+ *      ocpAddr - OCP address
+ *      ocpData - Writing data
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PHY_REG_ID       - invalid PHY address
+ *      RT_ERR_PHY_ID           - invalid PHY no
+ *      RT_ERR_BUSYWAIT_TIMEOUT - PHY access busy
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPHYOCPReg(rtk_uint32 phyNo, rtk_uint32 ocpAddr, rtk_uint32 ocpData )
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+    rtk_uint32 busyFlag, checkCounter;
+    rtk_uint32 ocpAddrPrefix, ocpAddr9_6, ocpAddr5_1;
+
+    /*Check internal phy access busy or not*/
+    /*retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_INDRECT_ACCESS_STATUS, RTL8367C_INDRECT_ACCESS_STATUS_OFFSET,&busyFlag);*/
+    retVal = rtl8367c_getAsicReg(RTL8367C_REG_INDRECT_ACCESS_STATUS,&busyFlag);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    if(busyFlag)
+        return RT_ERR_BUSYWAIT_TIMEOUT;
+
+    /* OCP prefix */
+    ocpAddrPrefix = ((ocpAddr & 0xFC00) >> 10);
+    if((retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_GPHY_OCP_MSB_0, RTL8367C_CFG_CPU_OCPADR_MSB_MASK, ocpAddrPrefix)) != RT_ERR_OK)
+        return retVal;
+
+    /*prepare access data*/
+    retVal = rtl8367c_setAsicReg(RTL8367C_REG_INDRECT_ACCESS_WRITE_DATA, ocpData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    /*prepare access address*/
+    ocpAddr9_6 = ((ocpAddr >> 6) & 0x000F);
+    ocpAddr5_1 = ((ocpAddr >> 1) & 0x001F);
+    regData = RTL8367C_PHY_BASE | (ocpAddr9_6 << 8) | (phyNo << RTL8367C_PHY_OFFSET) | ocpAddr5_1;
+    retVal = rtl8367c_setAsicReg(RTL8367C_REG_INDRECT_ACCESS_ADDRESS, regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    /*Set WRITE Command*/
+    retVal = rtl8367c_setAsicReg(RTL8367C_REG_INDRECT_ACCESS_CTRL, RTL8367C_CMD_MASK | RTL8367C_RW_MASK);
+
+    checkCounter = 100;
+    while(checkCounter)
+    {
+        retVal = rtl8367c_getAsicReg(RTL8367C_REG_INDRECT_ACCESS_STATUS,&busyFlag);
+        if((retVal != RT_ERR_OK) || busyFlag)
+        {
+            checkCounter --;
+            if(0 == checkCounter)
+                return RT_ERR_BUSYWAIT_TIMEOUT;
+        }
+        else
+        {
+            checkCounter = 0;
+        }
+    }
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicPHYOCPReg
+ * Description:
+ *      Get PHY OCP registers
+ * Input:
+ *      phyNo   - Physical port number (0~7)
+ *      ocpAddr - PHY address
+ *      pRegData - read data
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PHY_REG_ID       - invalid PHY address
+ *      RT_ERR_PHY_ID           - invalid PHY no
+ *      RT_ERR_BUSYWAIT_TIMEOUT - PHY access busy
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPHYOCPReg(rtk_uint32 phyNo, rtk_uint32 ocpAddr, rtk_uint32 *pRegData )
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+    rtk_uint32 busyFlag,checkCounter;
+    rtk_uint32 ocpAddrPrefix, ocpAddr9_6, ocpAddr5_1;
+    /*Check internal phy access busy or not*/
+    /*retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_INDRECT_ACCESS_STATUS, RTL8367C_INDRECT_ACCESS_STATUS_OFFSET,&busyFlag);*/
+    retVal = rtl8367c_getAsicReg(RTL8367C_REG_INDRECT_ACCESS_STATUS,&busyFlag);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    if(busyFlag)
+        return RT_ERR_BUSYWAIT_TIMEOUT;
+
+    /* OCP prefix */
+    ocpAddrPrefix = ((ocpAddr & 0xFC00) >> 10);
+    if((retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_GPHY_OCP_MSB_0, RTL8367C_CFG_CPU_OCPADR_MSB_MASK, ocpAddrPrefix)) != RT_ERR_OK)
+        return retVal;
+
+    /*prepare access address*/
+    ocpAddr9_6 = ((ocpAddr >> 6) & 0x000F);
+    ocpAddr5_1 = ((ocpAddr >> 1) & 0x001F);
+    regData = RTL8367C_PHY_BASE | (ocpAddr9_6 << 8) | (phyNo << RTL8367C_PHY_OFFSET) | ocpAddr5_1;
+    retVal = rtl8367c_setAsicReg(RTL8367C_REG_INDRECT_ACCESS_ADDRESS, regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    /*Set READ Command*/
+    retVal = rtl8367c_setAsicReg(RTL8367C_REG_INDRECT_ACCESS_CTRL, RTL8367C_CMD_MASK );
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    checkCounter = 100;
+    while(checkCounter)
+    {
+        retVal = rtl8367c_getAsicReg(RTL8367C_REG_INDRECT_ACCESS_STATUS,&busyFlag);
+        if((retVal != RT_ERR_OK) || busyFlag)
+        {
+            checkCounter --;
+            if(0 == checkCounter)
+                return RT_ERR_FAILED;
+        }
+        else
+        {
+            checkCounter = 0;
+        }
+    }
+
+    /*get PHY register*/
+    retVal = rtl8367c_getAsicReg(RTL8367C_REG_INDRECT_ACCESS_READ_DATA, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    *pRegData = regData;
+
+    return RT_ERR_OK;
+}
+
+#endif
+
+/* Function Name:
+ *      rtl8367c_setAsicPHYReg
+ * Description:
+ *      Set PHY registers
+ * Input:
+ *      phyNo   - Physical port number (0~7)
+ *      phyAddr - PHY address (0~31)
+ *      phyData - Writing data
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PHY_REG_ID       - invalid PHY address
+ *      RT_ERR_PHY_ID           - invalid PHY no
+ *      RT_ERR_BUSYWAIT_TIMEOUT - PHY access busy
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPHYReg(rtk_uint32 phyNo, rtk_uint32 phyAddr, rtk_uint32 phyData )
+{
+    rtk_uint32 ocp_addr;
+
+    if(phyAddr > RTL8367C_PHY_REGNOMAX)
+        return RT_ERR_PHY_REG_ID;
+
+    ocp_addr = 0xa400 + phyAddr*2;
+
+    return rtl8367c_setAsicPHYOCPReg(phyNo, ocp_addr, phyData);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPHYReg
+ * Description:
+ *      Get PHY registers
+ * Input:
+ *      phyNo   - Physical port number (0~7)
+ *      phyAddr - PHY address (0~31)
+ *      pRegData - Writing data
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PHY_REG_ID       - invalid PHY address
+ *      RT_ERR_PHY_ID           - invalid PHY no
+ *      RT_ERR_BUSYWAIT_TIMEOUT - PHY access busy
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPHYReg(rtk_uint32 phyNo, rtk_uint32 phyAddr, rtk_uint32 *pRegData )
+{
+    rtk_uint32 ocp_addr;
+
+    if(phyAddr > RTL8367C_PHY_REGNOMAX)
+        return RT_ERR_PHY_REG_ID;
+
+    ocp_addr = 0xa400 + phyAddr*2;
+
+    return rtl8367c_getAsicPHYOCPReg(phyNo, ocp_addr, pRegData);
+}
+
+
+/* Function Name:
+ *      rtl8367c_setAsicSdsReg
+ * Description:
+ *      Set Serdes registers
+ * Input:
+ *      sdsId   - sdsid (0~1)
+ *      sdsReg - reg address (0~31)
+ *      sdsPage - Writing data
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+
+ * Note:
+ *      None
+ */
+
+ret_t rtl8367c_setAsicSdsReg(rtk_uint32 sdsId, rtk_uint32 sdsReg, rtk_uint32 sdsPage,  rtk_uint32 value)
+{
+    rtk_uint32 retVal;
+
+    if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_DATA, value)) != RT_ERR_OK)
+        return retVal;
+
+    if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_ADR, (sdsPage<<5) | sdsReg)) != RT_ERR_OK)
+        return retVal;
+
+    if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_CMD, 0x00C0|sdsId)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+
+}
+
+/* Function Name:
+ *      rtl8367c_getAiscSdsReg
+ * Description:
+ *      Get Serdes registers
+ * Input:
+ *      sdsId   - sdsid (0~1)
+ *      sdsReg - reg address (0~31)
+ *      sdsPage - Writing data
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicSdsReg(rtk_uint32 sdsId, rtk_uint32 sdsReg, rtk_uint32 sdsPage, rtk_uint32 *value)
+{
+    rtk_uint32 retVal, busy;
+
+    if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_ADR, (sdsPage<<5) | sdsReg)) != RT_ERR_OK)
+        return retVal;
+
+    if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_CMD, 0x0080|sdsId)) != RT_ERR_OK)
+        return retVal;
+
+    while(1)
+    {
+        if ((retVal = rtl8367c_getAsicReg(RTL8367C_REG_SDS_INDACS_CMD, &busy))!=RT_ERR_OK)
+            return retVal;
+
+        if ((busy & 0x100) == 0)
+            break;
+    }
+
+    if ((retVal = rtl8367c_getAsicReg(RTL8367C_REG_SDS_INDACS_DATA, value))!=RT_ERR_OK)
+            return retVal;
+
+    return RT_ERR_OK;
+}
+
+
+/* Function Name:
+ *      rtl8367c_setAsicPHYSram
+ * Description:
+ *      Set PHY registers
+ * Input:
+ *      phyNo   - Physical port number (0~7)
+ *      sramAddr - SRAM address 
+ *      sramData - Writing data
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PHY_REG_ID       - invalid PHY address
+ *      RT_ERR_PHY_ID           - invalid PHY no
+ *      RT_ERR_BUSYWAIT_TIMEOUT - PHY access busy
+ * Note:
+ *      None
+ */
+
+ret_t rtl8367c_setAsicPHYSram(rtk_uint32 phyNo, rtk_uint32 sramAddr, rtk_uint32 sramData )
+{
+    rtk_uint32 retVal;
+
+    if ((retVal = rtl8367c_setAsicPHYOCPReg(phyNo, 0xa436, sramAddr)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicPHYOCPReg(phyNo, 0xa438, sramData)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_phy.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_phy.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_phy.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_phy.h	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,28 @@
+#ifndef _RTL8367C_ASICDRV_PHY_H_
+#define _RTL8367C_ASICDRV_PHY_H_
+
+#include <rtl8367c_asicdrv.h>
+
+#define RTL8367C_PHY_REGNOMAX           0x1F
+#define RTL8367C_PHY_EXTERNALMAX        0x7
+
+#define RTL8367C_PHY_BASE               0x2000
+#define RTL8367C_PHY_EXT_BASE           0xA000
+
+#define RTL8367C_PHY_OFFSET             5
+#define RTL8367C_PHY_EXT_OFFSET         9
+
+#define RTL8367C_PHY_PAGE_ADDRESS       31
+
+
+extern ret_t rtl8367c_setAsicPHYReg(rtk_uint32 phyNo, rtk_uint32 phyAddr, rtk_uint32 regData );
+extern ret_t rtl8367c_getAsicPHYReg(rtk_uint32 phyNo, rtk_uint32 phyAddr, rtk_uint32* pRegData );
+extern ret_t rtl8367c_setAsicPHYOCPReg(rtk_uint32 phyNo, rtk_uint32 ocpAddr, rtk_uint32 ocpData );
+extern ret_t rtl8367c_getAsicPHYOCPReg(rtk_uint32 phyNo, rtk_uint32 ocpAddr, rtk_uint32 *pRegData );
+extern ret_t rtl8367c_setAsicSdsReg(rtk_uint32 sdsId, rtk_uint32 sdsReg, rtk_uint32 sdsPage,  rtk_uint32 value);
+extern ret_t rtl8367c_getAsicSdsReg(rtk_uint32 sdsId, rtk_uint32 sdsReg, rtk_uint32 sdsPage, rtk_uint32 *value);
+extern ret_t rtl8367c_setAsicPHYSram(rtk_uint32 phyNo, rtk_uint32 sramAddr, rtk_uint32 sramData );
+
+
+#endif /*#ifndef _RTL8367C_ASICDRV_PHY_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_port.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_port.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_port.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_port.c	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,5918 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 83681 $
+ * $Date: 2017-11-23 09:03:51 +0800 (é€±å››, 23 åä¸€æœˆ 2017) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature : Port security related functions
+ *
+ */
+
+#include <rtl8367c_asicdrv_port.h>
+
+#include <string.h>
+
+
+#define FIBER2_AUTO_INIT_SIZE 2038
+rtk_uint8 Fiber2_Auto[FIBER2_AUTO_INIT_SIZE] = {
+0x02,0x05,0x8F,0xE4,0xF5,0xA8,0xD2,0xAF,
+0x22,0x00,0x00,0x02,0x07,0x2C,0xC5,0xF0,
+0xF8,0xA3,0xE0,0x28,0xF0,0xC5,0xF0,0xF8,
+0xE5,0x82,0x15,0x82,0x70,0x02,0x15,0x83,
+0xE0,0x38,0xF0,0x22,0x75,0xF0,0x08,0x75,
+0x82,0x00,0xEF,0x2F,0xFF,0xEE,0x33,0xFE,
+0xCD,0x33,0xCD,0xCC,0x33,0xCC,0xC5,0x82,
+0x33,0xC5,0x82,0x9B,0xED,0x9A,0xEC,0x99,
+0xE5,0x82,0x98,0x40,0x0C,0xF5,0x82,0xEE,
+0x9B,0xFE,0xED,0x9A,0xFD,0xEC,0x99,0xFC,
+0x0F,0xD5,0xF0,0xD6,0xE4,0xCE,0xFB,0xE4,
+0xCD,0xFA,0xE4,0xCC,0xF9,0xA8,0x82,0x22,
+0xB8,0x00,0xC1,0xB9,0x00,0x59,0xBA,0x00,
+0x2D,0xEC,0x8B,0xF0,0x84,0xCF,0xCE,0xCD,
+0xFC,0xE5,0xF0,0xCB,0xF9,0x78,0x18,0xEF,
+0x2F,0xFF,0xEE,0x33,0xFE,0xED,0x33,0xFD,
+0xEC,0x33,0xFC,0xEB,0x33,0xFB,0x10,0xD7,
+0x03,0x99,0x40,0x04,0xEB,0x99,0xFB,0x0F,
+0xD8,0xE5,0xE4,0xF9,0xFA,0x22,0x78,0x18,
+0xEF,0x2F,0xFF,0xEE,0x33,0xFE,0xED,0x33,
+0xFD,0xEC,0x33,0xFC,0xC9,0x33,0xC9,0x10,
+0xD7,0x05,0x9B,0xE9,0x9A,0x40,0x07,0xEC,
+0x9B,0xFC,0xE9,0x9A,0xF9,0x0F,0xD8,0xE0,
+0xE4,0xC9,0xFA,0xE4,0xCC,0xFB,0x22,0x75,
+0xF0,0x10,0xEF,0x2F,0xFF,0xEE,0x33,0xFE,
+0xED,0x33,0xFD,0xCC,0x33,0xCC,0xC8,0x33,
+0xC8,0x10,0xD7,0x07,0x9B,0xEC,0x9A,0xE8,
+0x99,0x40,0x0A,0xED,0x9B,0xFD,0xEC,0x9A,
+0xFC,0xE8,0x99,0xF8,0x0F,0xD5,0xF0,0xDA,
+0xE4,0xCD,0xFB,0xE4,0xCC,0xFA,0xE4,0xC8,
+0xF9,0x22,0xEB,0x9F,0xF5,0xF0,0xEA,0x9E,
+0x42,0xF0,0xE9,0x9D,0x42,0xF0,0xE8,0x9C,
+0x45,0xF0,0x22,0xE0,0xFC,0xA3,0xE0,0xFD,
+0xA3,0xE0,0xFE,0xA3,0xE0,0xFF,0x22,0xE0,
+0xF8,0xA3,0xE0,0xF9,0xA3,0xE0,0xFA,0xA3,
+0xE0,0xFB,0x22,0xEC,0xF0,0xA3,0xED,0xF0,
+0xA3,0xEE,0xF0,0xA3,0xEF,0xF0,0x22,0x7D,
+0xD7,0x7C,0x04,0x7F,0x02,0x7E,0x66,0x12,
+0x07,0xAB,0x7D,0x80,0x7C,0x04,0x7F,0x01,
+0x7E,0x66,0x12,0x07,0xAB,0x7D,0xC0,0x7C,
+0x00,0x7F,0x00,0x7E,0x66,0x12,0x07,0xAB,
+0x7D,0x94,0x7C,0xF9,0x7F,0x02,0x7E,0x66,
+0x12,0x07,0xAB,0x7D,0x81,0x7C,0x04,0x7F,
+0x01,0x7E,0x66,0x12,0x07,0xAB,0x7D,0xC0,
+0x7C,0x00,0x7F,0x00,0x7E,0x66,0x12,0x07,
+0xAB,0x7D,0xA2,0x7C,0x31,0x7F,0x02,0x7E,
+0x66,0x12,0x07,0xAB,0x7D,0x82,0x7C,0x04,
+0x7F,0x01,0x7E,0x66,0x12,0x07,0xAB,0x7D,
+0xC0,0x7C,0x00,0x7F,0x00,0x7E,0x66,0x12,
+0x07,0xAB,0x7D,0x60,0x7C,0x69,0x7F,0x02,
+0x7E,0x66,0x12,0x07,0xAB,0x7D,0x83,0x7C,
+0x04,0x7F,0x01,0x7E,0x66,0x12,0x07,0xAB,
+0x7D,0xC0,0x7C,0x00,0x7F,0x00,0x7E,0x66,
+0x12,0x07,0xAB,0x7D,0x28,0x7C,0x97,0x7F,
+0x02,0x7E,0x66,0x12,0x07,0xAB,0x7D,0x84,
+0x7C,0x04,0x7F,0x01,0x7E,0x66,0x12,0x07,
+0xAB,0x7D,0xC0,0x7C,0x00,0x7F,0x00,0x7E,
+0x66,0x12,0x07,0xAB,0x7D,0x85,0x7C,0x9D,
+0x7F,0x02,0x7E,0x66,0x12,0x07,0xAB,0x7D,
+0x23,0x7C,0x04,0x7F,0x01,0x7E,0x66,0x12,
+0x07,0xAB,0x7D,0xC0,0x7C,0x00,0x7F,0x00,
+0x7E,0x66,0x12,0x07,0xAB,0x7D,0x10,0x7C,
+0xD8,0x7F,0x02,0x7E,0x66,0x12,0x07,0xAB,
+0x7D,0x24,0x7C,0x04,0x7F,0x01,0x7E,0x66,
+0x12,0x07,0xAB,0x7D,0xC0,0x7C,0x00,0x7F,
+0x00,0x7E,0x66,0x12,0x07,0xAB,0x7D,0x00,
+0x7C,0x04,0x7F,0x02,0x7E,0x66,0x12,0x07,
+0xAB,0x7D,0x2F,0x7C,0x00,0x7F,0x01,0x7E,
+0x66,0x12,0x07,0xAB,0x7D,0xC0,0x7C,0x00,
+0x7F,0x00,0x7E,0x66,0x02,0x07,0xAB,0x7D,
+0x03,0x7C,0x00,0x7F,0x01,0x7E,0x66,0x12,
+0x07,0xAB,0x7D,0x80,0x7C,0x00,0x7F,0x00,
+0x7E,0x66,0x12,0x07,0xAB,0x7F,0x02,0x7E,
+0x66,0x12,0x07,0x66,0xEF,0x44,0x40,0xFD,
+0xAC,0x06,0x7F,0x02,0x7E,0x66,0x12,0x07,
+0xAB,0x7D,0x03,0x7C,0x00,0x7F,0x01,0x7E,
+0x66,0x12,0x07,0xAB,0x7D,0xC0,0x7C,0x00,
+0x7F,0x00,0x7E,0x66,0x12,0x07,0xAB,0x7D,
+0x03,0x7C,0x00,0x7F,0x01,0x7E,0x66,0x12,
+0x07,0xAB,0x7D,0x80,0x7C,0x00,0x7F,0x00,
+0x7E,0x66,0x12,0x07,0xAB,0x7F,0x02,0x7E,
+0x66,0x12,0x07,0x66,0xEF,0x54,0xBF,0xFD,
+0xAC,0x06,0x7F,0x02,0x7E,0x66,0x12,0x07,
+0xAB,0x7D,0x03,0x7C,0x00,0x7F,0x01,0x7E,
+0x66,0x12,0x07,0xAB,0x7D,0xC0,0x7C,0x00,
+0x7F,0x00,0x7E,0x66,0x12,0x07,0xAB,0xE4,
+0xFD,0xFC,0x7F,0x01,0x7E,0x66,0x12,0x07,
+0xAB,0x7D,0x80,0x7C,0x00,0x7F,0x00,0x7E,
+0x66,0x12,0x07,0xAB,0x7F,0x02,0x7E,0x66,
+0x12,0x07,0x66,0xEF,0x54,0xFD,0x54,0xFE,
+0xFD,0xAC,0x06,0x7F,0x02,0x7E,0x66,0x12,
+0x07,0xAB,0xE4,0xFD,0xFC,0x7F,0x01,0x7E,
+0x66,0x12,0x07,0xAB,0x7D,0xC0,0x7C,0x00,
+0x7F,0x00,0x7E,0x66,0x12,0x07,0xAB,0xE4,
+0xFD,0xFC,0x7F,0x01,0x7E,0x66,0x12,0x07,
+0xAB,0x7D,0x80,0x7C,0x00,0x7F,0x00,0x7E,
+0x66,0x12,0x07,0xAB,0x7F,0x02,0x7E,0x66,
+0x12,0x07,0x66,0xEF,0x44,0x02,0x44,0x01,
+0xFD,0xAC,0x06,0x7F,0x02,0x7E,0x66,0x12,
+0x07,0xAB,0xE4,0xFD,0xFC,0x7F,0x01,0x7E,
+0x66,0x12,0x07,0xAB,0x7D,0xC0,0x7C,0x00,
+0x7F,0x00,0x7E,0x66,0x02,0x07,0xAB,0xE4,
+0x90,0x06,0x2C,0xF0,0xFD,0x7C,0x01,0x7F,
+0x3F,0x7E,0x1D,0x12,0x07,0xAB,0x7D,0x40,
+0x7C,0x00,0x7F,0x36,0x7E,0x13,0x12,0x07,
+0xAB,0xE4,0xFF,0xFE,0xFD,0x80,0x25,0xE4,
+0x7F,0xFF,0x7E,0xFF,0xFD,0xFC,0x90,0x06,
+0x24,0x12,0x01,0x0F,0xC3,0x12,0x00,0xF2,
+0x50,0x1B,0x90,0x06,0x24,0x12,0x01,0x03,
+0xEF,0x24,0x01,0xFF,0xE4,0x3E,0xFE,0xE4,
+0x3D,0xFD,0xE4,0x3C,0xFC,0x90,0x06,0x24,
+0x12,0x01,0x1B,0x80,0xD2,0xE4,0xF5,0xA8,
+0xD2,0xAF,0x7D,0x1F,0xFC,0x7F,0x49,0x7E,
+0x13,0x12,0x07,0xAB,0x12,0x07,0xDB,0x12,
+0x01,0x27,0x12,0x06,0x1B,0x12,0x07,0x8A,
+0x12,0x06,0xEA,0x7D,0x41,0x7C,0x00,0x7F,
+0x36,0x7E,0x13,0x12,0x07,0xAB,0xE4,0xFF,
+0xFE,0xFD,0x80,0x26,0x7F,0xFF,0x7E,0xFF,
+0x7D,0x05,0x7C,0x00,0x90,0x06,0x24,0x12,
+0x01,0x0F,0xC3,0x12,0x00,0xF2,0x50,0x1B,
+0x90,0x06,0x24,0x12,0x01,0x03,0xEF,0x24,
+0x01,0xFF,0xE4,0x3E,0xFE,0xE4,0x3D,0xFD,
+0xE4,0x3C,0xFC,0x90,0x06,0x24,0x12,0x01,
+0x1B,0x80,0xD1,0xC2,0x00,0xC2,0x01,0xD2,
+0xA9,0xD2,0x8C,0x7F,0x01,0x7E,0x62,0x12,
+0x07,0x66,0xEF,0x30,0xE2,0x07,0xE4,0x90,
+0x06,0x2C,0xF0,0x80,0xEE,0x90,0x06,0x2C,
+0xE0,0x70,0x12,0x12,0x04,0xF0,0x90,0x06,
+0x2C,0x74,0x01,0xF0,0xE4,0x90,0x06,0x33,
+0xF0,0xA3,0xF0,0x80,0xD6,0xC3,0x90,0x06,
+0x34,0xE0,0x94,0x62,0x90,0x06,0x33,0xE0,
+0x94,0x00,0x40,0xC7,0xE4,0xF0,0xA3,0xF0,
+0x12,0x04,0xF0,0x90,0x06,0x2C,0x74,0x01,
+0xF0,0x80,0xB8,0x75,0x0F,0x80,0x75,0x0E,
+0x7E,0x75,0x0D,0xAA,0x75,0x0C,0x83,0xE4,
+0xF5,0x10,0x7F,0x36,0x7E,0x13,0x12,0x07,
+0x66,0xEE,0xC4,0xF8,0x54,0xF0,0xC8,0xEF,
+0xC4,0x54,0x0F,0x48,0x54,0x07,0xFB,0x7A,
+0x00,0xEA,0x70,0x4A,0xEB,0x14,0x60,0x1C,
+0x14,0x60,0x27,0x24,0xFE,0x60,0x31,0x14,
+0x60,0x3C,0x24,0x05,0x70,0x38,0x75,0x0B,
+0x00,0x75,0x0A,0xC2,0x75,0x09,0xEB,0x75,
+0x08,0x0B,0x80,0x36,0x75,0x0B,0x40,0x75,
+0x0A,0x59,0x75,0x09,0x73,0x75,0x08,0x07,
+0x80,0x28,0x75,0x0B,0x00,0x75,0x0A,0xE1,
+0x75,0x09,0xF5,0x75,0x08,0x05,0x80,0x1A,
+0x75,0x0B,0xA0,0x75,0x0A,0xAC,0x75,0x09,
+0xB9,0x75,0x08,0x03,0x80,0x0C,0x75,0x0B,
+0x00,0x75,0x0A,0x62,0x75,0x09,0x3D,0x75,
+0x08,0x01,0x75,0x89,0x11,0xE4,0x7B,0x60,
+0x7A,0x09,0xF9,0xF8,0xAF,0x0B,0xAE,0x0A,
+0xAD,0x09,0xAC,0x08,0x12,0x00,0x60,0xAA,
+0x06,0xAB,0x07,0xC3,0xE4,0x9B,0xFB,0xE4,
+0x9A,0xFA,0x78,0x17,0xF6,0xAF,0x03,0xEF,
+0x08,0xF6,0x18,0xE6,0xF5,0x8C,0x08,0xE6,
+0xF5,0x8A,0x74,0x0D,0x2B,0xFB,0xE4,0x3A,
+0x18,0xF6,0xAF,0x03,0xEF,0x08,0xF6,0x75,
+0x88,0x10,0x53,0x8E,0xC7,0xD2,0xA9,0x22,
+0x7F,0x10,0x7E,0x13,0x12,0x07,0x66,0x90,
+0x06,0x2D,0xEE,0xF0,0xA3,0xEF,0xF0,0xEE,
+0x44,0x10,0xFE,0x90,0x06,0x2D,0xF0,0xA3,
+0xEF,0xF0,0x54,0xEF,0xFF,0x90,0x06,0x2D,
+0xEE,0xF0,0xFC,0xA3,0xEF,0xF0,0xFD,0x7F,
+0x10,0x7E,0x13,0x12,0x07,0xAB,0xE4,0xFF,
+0xFE,0x0F,0xBF,0x00,0x01,0x0E,0xEF,0x64,
+0x64,0x4E,0x70,0xF5,0x7D,0x04,0x7C,0x00,
+0x7F,0x02,0x7E,0x66,0x12,0x07,0xAB,0x7D,
+0x00,0x7C,0x04,0x7F,0x01,0x7E,0x66,0x12,
+0x07,0xAB,0x7D,0xC0,0x7C,0x00,0x7F,0x00,
+0x7E,0x66,0x12,0x07,0xAB,0xE4,0xFD,0xFC,
+0x7F,0x02,0x7E,0x66,0x12,0x07,0xAB,0x7D,
+0x00,0x7C,0x04,0x7F,0x01,0x7E,0x66,0x12,
+0x07,0xAB,0x7D,0xC0,0x7C,0x00,0x7F,0x00,
+0x7E,0x66,0x12,0x07,0xAB,0x7F,0x10,0x7E,
+0x13,0x12,0x07,0x66,0x90,0x06,0x2D,0xEE,
+0xF0,0xA3,0xEF,0xF0,0xEE,0x54,0xEF,0x90,
+0x06,0x2D,0xF0,0xFC,0xA3,0xEF,0xF0,0xFD,
+0x7F,0x10,0x7E,0x13,0x02,0x07,0xAB,0x78,
+0x7F,0xE4,0xF6,0xD8,0xFD,0x75,0x81,0x3C,
+0x02,0x05,0xD6,0x02,0x03,0x2F,0xE4,0x93,
+0xA3,0xF8,0xE4,0x93,0xA3,0x40,0x03,0xF6,
+0x80,0x01,0xF2,0x08,0xDF,0xF4,0x80,0x29,
+0xE4,0x93,0xA3,0xF8,0x54,0x07,0x24,0x0C,
+0xC8,0xC3,0x33,0xC4,0x54,0x0F,0x44,0x20,
+0xC8,0x83,0x40,0x04,0xF4,0x56,0x80,0x01,
+0x46,0xF6,0xDF,0xE4,0x80,0x0B,0x01,0x02,
+0x04,0x08,0x10,0x20,0x40,0x80,0x90,0x07,
+0xE7,0xE4,0x7E,0x01,0x93,0x60,0xBC,0xA3,
+0xFF,0x54,0x3F,0x30,0xE5,0x09,0x54,0x1F,
+0xFE,0xE4,0x93,0xA3,0x60,0x01,0x0E,0xCF,
+0x54,0xC0,0x25,0xE0,0x60,0xA8,0x40,0xB8,
+0xE4,0x93,0xA3,0xFA,0xE4,0x93,0xA3,0xF8,
+0xE4,0x93,0xA3,0xC8,0xC5,0x82,0xC8,0xCA,
+0xC5,0x83,0xCA,0xF0,0xA3,0xC8,0xC5,0x82,
+0xC8,0xCA,0xC5,0x83,0xCA,0xDF,0xE9,0xDE,
+0xE7,0x80,0xBE,0x7D,0x40,0x7C,0x17,0x7F,
+0x11,0x7E,0x1D,0x12,0x07,0xAB,0x7F,0x41,
+0x7E,0x1D,0x12,0x07,0x66,0xEF,0x44,0x20,
+0x44,0x80,0xFD,0xAC,0x06,0x7F,0x41,0x7E,
+0x1D,0x12,0x07,0xAB,0x7D,0xBB,0x7C,0x15,
+0x7F,0xEB,0x7E,0x13,0x12,0x07,0xAB,0x7D,
+0x07,0x7C,0x00,0x7F,0xE7,0x7E,0x13,0x12,
+0x07,0xAB,0x7D,0x40,0x7C,0x11,0x7F,0x00,
+0x7E,0x62,0x12,0x07,0xAB,0x02,0x02,0x2F,
+0x7D,0xC0,0x7C,0x16,0x7F,0x11,0x7E,0x1D,
+0x12,0x07,0xAB,0x7D,0xBB,0x7C,0x15,0x7F,
+0xEB,0x7E,0x13,0x12,0x07,0xAB,0x7D,0x0D,
+0x7C,0x00,0x7F,0xE7,0x7E,0x13,0x12,0x07,
+0xAB,0x7F,0x41,0x7E,0x1D,0x12,0x07,0x66,
+0xEF,0x44,0x20,0x44,0x80,0xFD,0xAC,0x06,
+0x7F,0x41,0x7E,0x1D,0x12,0x07,0xAB,0x7D,
+0x00,0x7C,0x21,0x7F,0x00,0x7E,0x62,0x12,
+0x07,0xAB,0x02,0x02,0x2F,0x7D,0x40,0x7C,
+0x17,0x7F,0x11,0x7E,0x1D,0x12,0x07,0xAB,
+0x7D,0xBB,0x7C,0x15,0x7F,0xEB,0x7E,0x13,
+0x12,0x07,0xAB,0x7D,0x0C,0x7C,0x00,0x7F,
+0xE7,0x7E,0x13,0x12,0x07,0xAB,0x7F,0x41,
+0x7E,0x1D,0x12,0x07,0x66,0xEF,0x44,0x20,
+0x44,0x80,0xFD,0xAC,0x06,0x7F,0x41,0x7E,
+0x1D,0x12,0x07,0xAB,0x7D,0x40,0x7C,0x11,
+0x7F,0x00,0x7E,0x62,0x12,0x07,0xAB,0x02,
+0x02,0x2F,0x7D,0x04,0x7C,0x00,0x7F,0x01,
+0x7E,0x66,0x12,0x07,0xAB,0x7D,0x80,0x7C,
+0x00,0x7F,0x00,0x7E,0x66,0x12,0x07,0xAB,
+0x7F,0x02,0x7E,0x66,0x12,0x07,0x66,0xEF,
+0x44,0x02,0x44,0x04,0xFD,0xAC,0x06,0x7F,
+0x02,0x7E,0x66,0x12,0x07,0xAB,0x7D,0x04,
+0x7C,0x00,0x7F,0x01,0x7E,0x66,0x12,0x07,
+0xAB,0x7D,0xC0,0x7C,0x00,0x7F,0x00,0x7E,
+0x66,0x02,0x07,0xAB,0xC0,0xE0,0xC0,0xF0,
+0xC0,0x83,0xC0,0x82,0xC0,0xD0,0x75,0xD0,
+0x00,0xC0,0x00,0x78,0x17,0xE6,0xF5,0x8C,
+0x78,0x18,0xE6,0xF5,0x8A,0x90,0x06,0x31,
+0xE4,0x75,0xF0,0x01,0x12,0x00,0x0E,0x90,
+0x06,0x33,0xE4,0x75,0xF0,0x01,0x12,0x00,
+0x0E,0xD0,0x00,0xD0,0xD0,0xD0,0x82,0xD0,
+0x83,0xD0,0xF0,0xD0,0xE0,0x32,0xC2,0xAF,
+0xAD,0x07,0xAC,0x06,0x8C,0xA2,0x8D,0xA3,
+0x75,0xA0,0x01,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0xAE,0xA1,
+0xBE,0x00,0xF0,0xAE,0xA6,0xAF,0xA7,0xD2,
+0xAF,0x22,0x7D,0x20,0x7C,0x0F,0x7F,0x02,
+0x7E,0x66,0x12,0x07,0xAB,0x7D,0x01,0x7C,
+0x00,0x7F,0x01,0x7E,0x66,0x12,0x07,0xAB,
+0x7D,0xC0,0x7C,0x00,0x7F,0x00,0x7E,0x66,
+0x02,0x07,0xAB,0xC2,0xAF,0xAB,0x07,0xAA,
+0x06,0x8A,0xA2,0x8B,0xA3,0x8C,0xA4,0x8D,
+0xA5,0x75,0xA0,0x03,0x00,0x00,0x00,0xAA,
+0xA1,0xBA,0x00,0xF8,0xD2,0xAF,0x22,0x7F,
+0x0C,0x7E,0x13,0x12,0x07,0x66,0xEF,0x44,
+0x50,0xFD,0xAC,0x06,0x7F,0x0C,0x7E,0x13,
+0x02,0x07,0xAB,0x12,0x07,0xC7,0x12,0x07,
+0xF2,0x12,0x04,0x2B,0x02,0x00,0x03,0x42,
+0x06,0x33,0x00,0x00,0x42,0x06,0x31,0x00,
+0x00,0x00,0xE4,0xF5,0x8E,0x22,};
+
+#define FIBER2_1G_INIT_SIZE 2032
+rtk_uint8 Fiber2_1G[FIBER2_1G_INIT_SIZE] = {
+0x02,0x05,0x89,0xE4,0xF5,0xA8,0xD2,0xAF,
+0x22,0x00,0x00,0x02,0x07,0x26,0xC5,0xF0,
+0xF8,0xA3,0xE0,0x28,0xF0,0xC5,0xF0,0xF8,
+0xE5,0x82,0x15,0x82,0x70,0x02,0x15,0x83,
+0xE0,0x38,0xF0,0x22,0x75,0xF0,0x08,0x75,
+0x82,0x00,0xEF,0x2F,0xFF,0xEE,0x33,0xFE,
+0xCD,0x33,0xCD,0xCC,0x33,0xCC,0xC5,0x82,
+0x33,0xC5,0x82,0x9B,0xED,0x9A,0xEC,0x99,
+0xE5,0x82,0x98,0x40,0x0C,0xF5,0x82,0xEE,
+0x9B,0xFE,0xED,0x9A,0xFD,0xEC,0x99,0xFC,
+0x0F,0xD5,0xF0,0xD6,0xE4,0xCE,0xFB,0xE4,
+0xCD,0xFA,0xE4,0xCC,0xF9,0xA8,0x82,0x22,
+0xB8,0x00,0xC1,0xB9,0x00,0x59,0xBA,0x00,
+0x2D,0xEC,0x8B,0xF0,0x84,0xCF,0xCE,0xCD,
+0xFC,0xE5,0xF0,0xCB,0xF9,0x78,0x18,0xEF,
+0x2F,0xFF,0xEE,0x33,0xFE,0xED,0x33,0xFD,
+0xEC,0x33,0xFC,0xEB,0x33,0xFB,0x10,0xD7,
+0x03,0x99,0x40,0x04,0xEB,0x99,0xFB,0x0F,
+0xD8,0xE5,0xE4,0xF9,0xFA,0x22,0x78,0x18,
+0xEF,0x2F,0xFF,0xEE,0x33,0xFE,0xED,0x33,
+0xFD,0xEC,0x33,0xFC,0xC9,0x33,0xC9,0x10,
+0xD7,0x05,0x9B,0xE9,0x9A,0x40,0x07,0xEC,
+0x9B,0xFC,0xE9,0x9A,0xF9,0x0F,0xD8,0xE0,
+0xE4,0xC9,0xFA,0xE4,0xCC,0xFB,0x22,0x75,
+0xF0,0x10,0xEF,0x2F,0xFF,0xEE,0x33,0xFE,
+0xED,0x33,0xFD,0xCC,0x33,0xCC,0xC8,0x33,
+0xC8,0x10,0xD7,0x07,0x9B,0xEC,0x9A,0xE8,
+0x99,0x40,0x0A,0xED,0x9B,0xFD,0xEC,0x9A,
+0xFC,0xE8,0x99,0xF8,0x0F,0xD5,0xF0,0xDA,
+0xE4,0xCD,0xFB,0xE4,0xCC,0xFA,0xE4,0xC8,
+0xF9,0x22,0xEB,0x9F,0xF5,0xF0,0xEA,0x9E,
+0x42,0xF0,0xE9,0x9D,0x42,0xF0,0xE8,0x9C,
+0x45,0xF0,0x22,0xE0,0xFC,0xA3,0xE0,0xFD,
+0xA3,0xE0,0xFE,0xA3,0xE0,0xFF,0x22,0xE0,
+0xF8,0xA3,0xE0,0xF9,0xA3,0xE0,0xFA,0xA3,
+0xE0,0xFB,0x22,0xEC,0xF0,0xA3,0xED,0xF0,
+0xA3,0xEE,0xF0,0xA3,0xEF,0xF0,0x22,0x7D,
+0xD7,0x7C,0x04,0x7F,0x02,0x7E,0x66,0x12,
+0x07,0xA5,0x7D,0x80,0x7C,0x04,0x7F,0x01,
+0x7E,0x66,0x12,0x07,0xA5,0x7D,0xC0,0x7C,
+0x00,0x7F,0x00,0x7E,0x66,0x12,0x07,0xA5,
+0x7D,0x94,0x7C,0xF9,0x7F,0x02,0x7E,0x66,
+0x12,0x07,0xA5,0x7D,0x81,0x7C,0x04,0x7F,
+0x01,0x7E,0x66,0x12,0x07,0xA5,0x7D,0xC0,
+0x7C,0x00,0x7F,0x00,0x7E,0x66,0x12,0x07,
+0xA5,0x7D,0xA2,0x7C,0x31,0x7F,0x02,0x7E,
+0x66,0x12,0x07,0xA5,0x7D,0x82,0x7C,0x04,
+0x7F,0x01,0x7E,0x66,0x12,0x07,0xA5,0x7D,
+0xC0,0x7C,0x00,0x7F,0x00,0x7E,0x66,0x12,
+0x07,0xA5,0x7D,0x60,0x7C,0x69,0x7F,0x02,
+0x7E,0x66,0x12,0x07,0xA5,0x7D,0x83,0x7C,
+0x04,0x7F,0x01,0x7E,0x66,0x12,0x07,0xA5,
+0x7D,0xC0,0x7C,0x00,0x7F,0x00,0x7E,0x66,
+0x12,0x07,0xA5,0x7D,0x28,0x7C,0x97,0x7F,
+0x02,0x7E,0x66,0x12,0x07,0xA5,0x7D,0x84,
+0x7C,0x04,0x7F,0x01,0x7E,0x66,0x12,0x07,
+0xA5,0x7D,0xC0,0x7C,0x00,0x7F,0x00,0x7E,
+0x66,0x12,0x07,0xA5,0x7D,0x85,0x7C,0x9D,
+0x7F,0x02,0x7E,0x66,0x12,0x07,0xA5,0x7D,
+0x23,0x7C,0x04,0x7F,0x01,0x7E,0x66,0x12,
+0x07,0xA5,0x7D,0xC0,0x7C,0x00,0x7F,0x00,
+0x7E,0x66,0x12,0x07,0xA5,0x7D,0x10,0x7C,
+0xD8,0x7F,0x02,0x7E,0x66,0x12,0x07,0xA5,
+0x7D,0x24,0x7C,0x04,0x7F,0x01,0x7E,0x66,
+0x12,0x07,0xA5,0x7D,0xC0,0x7C,0x00,0x7F,
+0x00,0x7E,0x66,0x12,0x07,0xA5,0x7D,0x00,
+0x7C,0x04,0x7F,0x02,0x7E,0x66,0x12,0x07,
+0xA5,0x7D,0x2F,0x7C,0x00,0x7F,0x01,0x7E,
+0x66,0x12,0x07,0xA5,0x7D,0xC0,0x7C,0x00,
+0x7F,0x00,0x7E,0x66,0x02,0x07,0xA5,0x7D,
+0x03,0x7C,0x00,0x7F,0x01,0x7E,0x66,0x12,
+0x07,0xA5,0x7D,0x80,0x7C,0x00,0x7F,0x00,
+0x7E,0x66,0x12,0x07,0xA5,0x7F,0x02,0x7E,
+0x66,0x12,0x07,0x60,0xEF,0x44,0x40,0xFD,
+0xAC,0x06,0x7F,0x02,0x7E,0x66,0x12,0x07,
+0xA5,0x7D,0x03,0x7C,0x00,0x7F,0x01,0x7E,
+0x66,0x12,0x07,0xA5,0x7D,0xC0,0x7C,0x00,
+0x7F,0x00,0x7E,0x66,0x12,0x07,0xA5,0x7D,
+0x03,0x7C,0x00,0x7F,0x01,0x7E,0x66,0x12,
+0x07,0xA5,0x7D,0x80,0x7C,0x00,0x7F,0x00,
+0x7E,0x66,0x12,0x07,0xA5,0x7F,0x02,0x7E,
+0x66,0x12,0x07,0x60,0xEF,0x54,0xBF,0xFD,
+0xAC,0x06,0x7F,0x02,0x7E,0x66,0x12,0x07,
+0xA5,0x7D,0x03,0x7C,0x00,0x7F,0x01,0x7E,
+0x66,0x12,0x07,0xA5,0x7D,0xC0,0x7C,0x00,
+0x7F,0x00,0x7E,0x66,0x12,0x07,0xA5,0xE4,
+0xFD,0xFC,0x7F,0x01,0x7E,0x66,0x12,0x07,
+0xA5,0x7D,0x80,0x7C,0x00,0x7F,0x00,0x7E,
+0x66,0x12,0x07,0xA5,0x7F,0x02,0x7E,0x66,
+0x12,0x07,0x60,0xEF,0x54,0xFD,0x54,0xFE,
+0xFD,0xAC,0x06,0x7F,0x02,0x7E,0x66,0x12,
+0x07,0xA5,0xE4,0xFD,0xFC,0x7F,0x01,0x7E,
+0x66,0x12,0x07,0xA5,0x7D,0xC0,0x7C,0x00,
+0x7F,0x00,0x7E,0x66,0x12,0x07,0xA5,0xE4,
+0xFD,0xFC,0x7F,0x01,0x7E,0x66,0x12,0x07,
+0xA5,0x7D,0x80,0x7C,0x00,0x7F,0x00,0x7E,
+0x66,0x12,0x07,0xA5,0x7F,0x02,0x7E,0x66,
+0x12,0x07,0x60,0xEF,0x44,0x02,0x44,0x01,
+0xFD,0xAC,0x06,0x7F,0x02,0x7E,0x66,0x12,
+0x07,0xA5,0xE4,0xFD,0xFC,0x7F,0x01,0x7E,
+0x66,0x12,0x07,0xA5,0x7D,0xC0,0x7C,0x00,
+0x7F,0x00,0x7E,0x66,0x02,0x07,0xA5,0xE4,
+0x90,0x06,0x2C,0xF0,0xFD,0x7C,0x01,0x7F,
+0x3F,0x7E,0x1D,0x12,0x07,0xA5,0x7D,0x40,
+0x7C,0x00,0x7F,0x36,0x7E,0x13,0x12,0x07,
+0xA5,0xE4,0xFF,0xFE,0xFD,0x80,0x25,0xE4,
+0x7F,0xFF,0x7E,0xFF,0xFD,0xFC,0x90,0x06,
+0x24,0x12,0x01,0x0F,0xC3,0x12,0x00,0xF2,
+0x50,0x1B,0x90,0x06,0x24,0x12,0x01,0x03,
+0xEF,0x24,0x01,0xFF,0xE4,0x3E,0xFE,0xE4,
+0x3D,0xFD,0xE4,0x3C,0xFC,0x90,0x06,0x24,
+0x12,0x01,0x1B,0x80,0xD2,0xE4,0xF5,0xA8,
+0xD2,0xAF,0x7D,0x1F,0xFC,0x7F,0x49,0x7E,
+0x13,0x12,0x07,0xA5,0x12,0x07,0xD5,0x12,
+0x01,0x27,0x12,0x06,0x9F,0x7D,0x41,0x7C,
+0x00,0x7F,0x36,0x7E,0x13,0x12,0x07,0xA5,
+0xE4,0xFF,0xFE,0xFD,0x80,0x26,0x7F,0xFF,
+0x7E,0xFF,0x7D,0x05,0x7C,0x00,0x90,0x06,
+0x24,0x12,0x01,0x0F,0xC3,0x12,0x00,0xF2,
+0x50,0x1B,0x90,0x06,0x24,0x12,0x01,0x03,
+0xEF,0x24,0x01,0xFF,0xE4,0x3E,0xFE,0xE4,
+0x3D,0xFD,0xE4,0x3C,0xFC,0x90,0x06,0x24,
+0x12,0x01,0x1B,0x80,0xD1,0xC2,0x00,0xC2,
+0x01,0xD2,0xA9,0xD2,0x8C,0x7F,0x01,0x7E,
+0x62,0x12,0x07,0x60,0xEF,0x30,0xE2,0x07,
+0xE4,0x90,0x06,0x2C,0xF0,0x80,0xEE,0x90,
+0x06,0x2C,0xE0,0x70,0x12,0x12,0x04,0xEA,
+0x90,0x06,0x2C,0x74,0x01,0xF0,0xE4,0x90,
+0x06,0x33,0xF0,0xA3,0xF0,0x80,0xD6,0xC3,
+0x90,0x06,0x34,0xE0,0x94,0x62,0x90,0x06,
+0x33,0xE0,0x94,0x00,0x40,0xC7,0xE4,0xF0,
+0xA3,0xF0,0x12,0x04,0xEA,0x90,0x06,0x2C,
+0x74,0x01,0xF0,0x80,0xB8,0x75,0x0F,0x80,
+0x75,0x0E,0x7E,0x75,0x0D,0xAA,0x75,0x0C,
+0x83,0xE4,0xF5,0x10,0x7F,0x36,0x7E,0x13,
+0x12,0x07,0x60,0xEE,0xC4,0xF8,0x54,0xF0,
+0xC8,0xEF,0xC4,0x54,0x0F,0x48,0x54,0x07,
+0xFB,0x7A,0x00,0xEA,0x70,0x4A,0xEB,0x14,
+0x60,0x1C,0x14,0x60,0x27,0x24,0xFE,0x60,
+0x31,0x14,0x60,0x3C,0x24,0x05,0x70,0x38,
+0x75,0x0B,0x00,0x75,0x0A,0xC2,0x75,0x09,
+0xEB,0x75,0x08,0x0B,0x80,0x36,0x75,0x0B,
+0x40,0x75,0x0A,0x59,0x75,0x09,0x73,0x75,
+0x08,0x07,0x80,0x28,0x75,0x0B,0x00,0x75,
+0x0A,0xE1,0x75,0x09,0xF5,0x75,0x08,0x05,
+0x80,0x1A,0x75,0x0B,0xA0,0x75,0x0A,0xAC,
+0x75,0x09,0xB9,0x75,0x08,0x03,0x80,0x0C,
+0x75,0x0B,0x00,0x75,0x0A,0x62,0x75,0x09,
+0x3D,0x75,0x08,0x01,0x75,0x89,0x11,0xE4,
+0x7B,0x60,0x7A,0x09,0xF9,0xF8,0xAF,0x0B,
+0xAE,0x0A,0xAD,0x09,0xAC,0x08,0x12,0x00,
+0x60,0xAA,0x06,0xAB,0x07,0xC3,0xE4,0x9B,
+0xFB,0xE4,0x9A,0xFA,0x78,0x17,0xF6,0xAF,
+0x03,0xEF,0x08,0xF6,0x18,0xE6,0xF5,0x8C,
+0x08,0xE6,0xF5,0x8A,0x74,0x0D,0x2B,0xFB,
+0xE4,0x3A,0x18,0xF6,0xAF,0x03,0xEF,0x08,
+0xF6,0x75,0x88,0x10,0x53,0x8E,0xC7,0xD2,
+0xA9,0x22,0x7F,0x10,0x7E,0x13,0x12,0x07,
+0x60,0x90,0x06,0x2D,0xEE,0xF0,0xA3,0xEF,
+0xF0,0xEE,0x44,0x10,0xFE,0x90,0x06,0x2D,
+0xF0,0xA3,0xEF,0xF0,0x54,0xEF,0xFF,0x90,
+0x06,0x2D,0xEE,0xF0,0xFC,0xA3,0xEF,0xF0,
+0xFD,0x7F,0x10,0x7E,0x13,0x12,0x07,0xA5,
+0xE4,0xFF,0xFE,0x0F,0xBF,0x00,0x01,0x0E,
+0xEF,0x64,0x64,0x4E,0x70,0xF5,0x7D,0x04,
+0x7C,0x00,0x7F,0x02,0x7E,0x66,0x12,0x07,
+0xA5,0x7D,0x00,0x7C,0x04,0x7F,0x01,0x7E,
+0x66,0x12,0x07,0xA5,0x7D,0xC0,0x7C,0x00,
+0x7F,0x00,0x7E,0x66,0x12,0x07,0xA5,0xE4,
+0xFD,0xFC,0x7F,0x02,0x7E,0x66,0x12,0x07,
+0xA5,0x7D,0x00,0x7C,0x04,0x7F,0x01,0x7E,
+0x66,0x12,0x07,0xA5,0x7D,0xC0,0x7C,0x00,
+0x7F,0x00,0x7E,0x66,0x12,0x07,0xA5,0x7F,
+0x10,0x7E,0x13,0x12,0x07,0x60,0x90,0x06,
+0x2D,0xEE,0xF0,0xA3,0xEF,0xF0,0xEE,0x54,
+0xEF,0x90,0x06,0x2D,0xF0,0xFC,0xA3,0xEF,
+0xF0,0xFD,0x7F,0x10,0x7E,0x13,0x02,0x07,
+0xA5,0x78,0x7F,0xE4,0xF6,0xD8,0xFD,0x75,
+0x81,0x3C,0x02,0x05,0xD0,0x02,0x03,0x2F,
+0xE4,0x93,0xA3,0xF8,0xE4,0x93,0xA3,0x40,
+0x03,0xF6,0x80,0x01,0xF2,0x08,0xDF,0xF4,
+0x80,0x29,0xE4,0x93,0xA3,0xF8,0x54,0x07,
+0x24,0x0C,0xC8,0xC3,0x33,0xC4,0x54,0x0F,
+0x44,0x20,0xC8,0x83,0x40,0x04,0xF4,0x56,
+0x80,0x01,0x46,0xF6,0xDF,0xE4,0x80,0x0B,
+0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80,
+0x90,0x07,0xE1,0xE4,0x7E,0x01,0x93,0x60,
+0xBC,0xA3,0xFF,0x54,0x3F,0x30,0xE5,0x09,
+0x54,0x1F,0xFE,0xE4,0x93,0xA3,0x60,0x01,
+0x0E,0xCF,0x54,0xC0,0x25,0xE0,0x60,0xA8,
+0x40,0xB8,0xE4,0x93,0xA3,0xFA,0xE4,0x93,
+0xA3,0xF8,0xE4,0x93,0xA3,0xC8,0xC5,0x82,
+0xC8,0xCA,0xC5,0x83,0xCA,0xF0,0xA3,0xC8,
+0xC5,0x82,0xC8,0xCA,0xC5,0x83,0xCA,0xDF,
+0xE9,0xDE,0xE7,0x80,0xBE,0x7D,0x40,0x7C,
+0x17,0x7F,0x11,0x7E,0x1D,0x12,0x07,0xA5,
+0x7F,0x41,0x7E,0x1D,0x12,0x07,0x60,0xEF,
+0x44,0x20,0x44,0x80,0xFD,0xAC,0x06,0x7F,
+0x41,0x7E,0x1D,0x12,0x07,0xA5,0x7D,0xBB,
+0x7C,0x15,0x7F,0xEB,0x7E,0x13,0x12,0x07,
+0xA5,0x7D,0x07,0x7C,0x00,0x7F,0xE7,0x7E,
+0x13,0x12,0x07,0xA5,0x7D,0x40,0x7C,0x11,
+0x7F,0x00,0x7E,0x62,0x12,0x07,0xA5,0x02,
+0x02,0x2F,0x7D,0xC0,0x7C,0x16,0x7F,0x11,
+0x7E,0x1D,0x12,0x07,0xA5,0x7D,0xBB,0x7C,
+0x15,0x7F,0xEB,0x7E,0x13,0x12,0x07,0xA5,
+0x7D,0x0D,0x7C,0x00,0x7F,0xE7,0x7E,0x13,
+0x12,0x07,0xA5,0x7F,0x41,0x7E,0x1D,0x12,
+0x07,0x60,0xEF,0x44,0x20,0x44,0x80,0xFD,
+0xAC,0x06,0x7F,0x41,0x7E,0x1D,0x12,0x07,
+0xA5,0x7D,0x00,0x7C,0x21,0x7F,0x00,0x7E,
+0x62,0x12,0x07,0xA5,0x02,0x02,0x2F,0x7D,
+0x40,0x7C,0x17,0x7F,0x11,0x7E,0x1D,0x12,
+0x07,0xA5,0x7D,0xBB,0x7C,0x15,0x7F,0xEB,
+0x7E,0x13,0x12,0x07,0xA5,0x7D,0x0C,0x7C,
+0x00,0x7F,0xE7,0x7E,0x13,0x12,0x07,0xA5,
+0x7F,0x41,0x7E,0x1D,0x12,0x07,0x60,0xEF,
+0x44,0x20,0x44,0x80,0xFD,0xAC,0x06,0x7F,
+0x41,0x7E,0x1D,0x12,0x07,0xA5,0x7D,0x40,
+0x7C,0x11,0x7F,0x00,0x7E,0x62,0x12,0x07,
+0xA5,0x02,0x02,0x2F,0x7D,0x04,0x7C,0x00,
+0x7F,0x01,0x7E,0x66,0x12,0x07,0xA5,0x7D,
+0x80,0x7C,0x00,0x7F,0x00,0x7E,0x66,0x12,
+0x07,0xA5,0x7F,0x02,0x7E,0x66,0x12,0x07,
+0x60,0xEF,0x44,0x02,0x44,0x04,0xFD,0xAC,
+0x06,0x7F,0x02,0x7E,0x66,0x12,0x07,0xA5,
+0x7D,0x04,0x7C,0x00,0x7F,0x01,0x7E,0x66,
+0x12,0x07,0xA5,0x7D,0xC0,0x7C,0x00,0x7F,
+0x00,0x7E,0x66,0x02,0x07,0xA5,0xC0,0xE0,
+0xC0,0xF0,0xC0,0x83,0xC0,0x82,0xC0,0xD0,
+0x75,0xD0,0x00,0xC0,0x00,0x78,0x17,0xE6,
+0xF5,0x8C,0x78,0x18,0xE6,0xF5,0x8A,0x90,
+0x06,0x31,0xE4,0x75,0xF0,0x01,0x12,0x00,
+0x0E,0x90,0x06,0x33,0xE4,0x75,0xF0,0x01,
+0x12,0x00,0x0E,0xD0,0x00,0xD0,0xD0,0xD0,
+0x82,0xD0,0x83,0xD0,0xF0,0xD0,0xE0,0x32,
+0xC2,0xAF,0xAD,0x07,0xAC,0x06,0x8C,0xA2,
+0x8D,0xA3,0x75,0xA0,0x01,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0xAE,0xA1,0xBE,0x00,0xF0,0xAE,0xA6,0xAF,
+0xA7,0xD2,0xAF,0x22,0x7D,0x20,0x7C,0x0F,
+0x7F,0x02,0x7E,0x66,0x12,0x07,0xA5,0x7D,
+0x01,0x7C,0x00,0x7F,0x01,0x7E,0x66,0x12,
+0x07,0xA5,0x7D,0xC0,0x7C,0x00,0x7F,0x00,
+0x7E,0x66,0x02,0x07,0xA5,0xC2,0xAF,0xAB,
+0x07,0xAA,0x06,0x8A,0xA2,0x8B,0xA3,0x8C,
+0xA4,0x8D,0xA5,0x75,0xA0,0x03,0x00,0x00,
+0x00,0xAA,0xA1,0xBA,0x00,0xF8,0xD2,0xAF,
+0x22,0x7F,0x0C,0x7E,0x13,0x12,0x07,0x60,
+0xEF,0x44,0x50,0xFD,0xAC,0x06,0x7F,0x0C,
+0x7E,0x13,0x02,0x07,0xA5,0x12,0x07,0xC1,
+0x12,0x07,0xEC,0x12,0x04,0x25,0x02,0x00,
+0x03,0x42,0x06,0x33,0x00,0x00,0x42,0x06,
+0x31,0x00,0x00,0x00,0xE4,0xF5,0x8E,0x22,};
+
+#define FIBER2_100M_INIT_SIZE 2032
+rtk_uint8 Fiber2_100M[FIBER2_100M_INIT_SIZE] = {
+0x02,0x05,0x89,0xE4,0xF5,0xA8,0xD2,0xAF,
+0x22,0x00,0x00,0x02,0x07,0x26,0xC5,0xF0,
+0xF8,0xA3,0xE0,0x28,0xF0,0xC5,0xF0,0xF8,
+0xE5,0x82,0x15,0x82,0x70,0x02,0x15,0x83,
+0xE0,0x38,0xF0,0x22,0x75,0xF0,0x08,0x75,
+0x82,0x00,0xEF,0x2F,0xFF,0xEE,0x33,0xFE,
+0xCD,0x33,0xCD,0xCC,0x33,0xCC,0xC5,0x82,
+0x33,0xC5,0x82,0x9B,0xED,0x9A,0xEC,0x99,
+0xE5,0x82,0x98,0x40,0x0C,0xF5,0x82,0xEE,
+0x9B,0xFE,0xED,0x9A,0xFD,0xEC,0x99,0xFC,
+0x0F,0xD5,0xF0,0xD6,0xE4,0xCE,0xFB,0xE4,
+0xCD,0xFA,0xE4,0xCC,0xF9,0xA8,0x82,0x22,
+0xB8,0x00,0xC1,0xB9,0x00,0x59,0xBA,0x00,
+0x2D,0xEC,0x8B,0xF0,0x84,0xCF,0xCE,0xCD,
+0xFC,0xE5,0xF0,0xCB,0xF9,0x78,0x18,0xEF,
+0x2F,0xFF,0xEE,0x33,0xFE,0xED,0x33,0xFD,
+0xEC,0x33,0xFC,0xEB,0x33,0xFB,0x10,0xD7,
+0x03,0x99,0x40,0x04,0xEB,0x99,0xFB,0x0F,
+0xD8,0xE5,0xE4,0xF9,0xFA,0x22,0x78,0x18,
+0xEF,0x2F,0xFF,0xEE,0x33,0xFE,0xED,0x33,
+0xFD,0xEC,0x33,0xFC,0xC9,0x33,0xC9,0x10,
+0xD7,0x05,0x9B,0xE9,0x9A,0x40,0x07,0xEC,
+0x9B,0xFC,0xE9,0x9A,0xF9,0x0F,0xD8,0xE0,
+0xE4,0xC9,0xFA,0xE4,0xCC,0xFB,0x22,0x75,
+0xF0,0x10,0xEF,0x2F,0xFF,0xEE,0x33,0xFE,
+0xED,0x33,0xFD,0xCC,0x33,0xCC,0xC8,0x33,
+0xC8,0x10,0xD7,0x07,0x9B,0xEC,0x9A,0xE8,
+0x99,0x40,0x0A,0xED,0x9B,0xFD,0xEC,0x9A,
+0xFC,0xE8,0x99,0xF8,0x0F,0xD5,0xF0,0xDA,
+0xE4,0xCD,0xFB,0xE4,0xCC,0xFA,0xE4,0xC8,
+0xF9,0x22,0xEB,0x9F,0xF5,0xF0,0xEA,0x9E,
+0x42,0xF0,0xE9,0x9D,0x42,0xF0,0xE8,0x9C,
+0x45,0xF0,0x22,0xE0,0xFC,0xA3,0xE0,0xFD,
+0xA3,0xE0,0xFE,0xA3,0xE0,0xFF,0x22,0xE0,
+0xF8,0xA3,0xE0,0xF9,0xA3,0xE0,0xFA,0xA3,
+0xE0,0xFB,0x22,0xEC,0xF0,0xA3,0xED,0xF0,
+0xA3,0xEE,0xF0,0xA3,0xEF,0xF0,0x22,0x7D,
+0xD7,0x7C,0x04,0x7F,0x02,0x7E,0x66,0x12,
+0x07,0xA5,0x7D,0x80,0x7C,0x04,0x7F,0x01,
+0x7E,0x66,0x12,0x07,0xA5,0x7D,0xC0,0x7C,
+0x00,0x7F,0x00,0x7E,0x66,0x12,0x07,0xA5,
+0x7D,0x94,0x7C,0xF9,0x7F,0x02,0x7E,0x66,
+0x12,0x07,0xA5,0x7D,0x81,0x7C,0x04,0x7F,
+0x01,0x7E,0x66,0x12,0x07,0xA5,0x7D,0xC0,
+0x7C,0x00,0x7F,0x00,0x7E,0x66,0x12,0x07,
+0xA5,0x7D,0xA2,0x7C,0x31,0x7F,0x02,0x7E,
+0x66,0x12,0x07,0xA5,0x7D,0x82,0x7C,0x04,
+0x7F,0x01,0x7E,0x66,0x12,0x07,0xA5,0x7D,
+0xC0,0x7C,0x00,0x7F,0x00,0x7E,0x66,0x12,
+0x07,0xA5,0x7D,0x60,0x7C,0x69,0x7F,0x02,
+0x7E,0x66,0x12,0x07,0xA5,0x7D,0x83,0x7C,
+0x04,0x7F,0x01,0x7E,0x66,0x12,0x07,0xA5,
+0x7D,0xC0,0x7C,0x00,0x7F,0x00,0x7E,0x66,
+0x12,0x07,0xA5,0x7D,0x28,0x7C,0x97,0x7F,
+0x02,0x7E,0x66,0x12,0x07,0xA5,0x7D,0x84,
+0x7C,0x04,0x7F,0x01,0x7E,0x66,0x12,0x07,
+0xA5,0x7D,0xC0,0x7C,0x00,0x7F,0x00,0x7E,
+0x66,0x12,0x07,0xA5,0x7D,0x85,0x7C,0x9D,
+0x7F,0x02,0x7E,0x66,0x12,0x07,0xA5,0x7D,
+0x23,0x7C,0x04,0x7F,0x01,0x7E,0x66,0x12,
+0x07,0xA5,0x7D,0xC0,0x7C,0x00,0x7F,0x00,
+0x7E,0x66,0x12,0x07,0xA5,0x7D,0x10,0x7C,
+0xD8,0x7F,0x02,0x7E,0x66,0x12,0x07,0xA5,
+0x7D,0x24,0x7C,0x04,0x7F,0x01,0x7E,0x66,
+0x12,0x07,0xA5,0x7D,0xC0,0x7C,0x00,0x7F,
+0x00,0x7E,0x66,0x12,0x07,0xA5,0x7D,0x00,
+0x7C,0x04,0x7F,0x02,0x7E,0x66,0x12,0x07,
+0xA5,0x7D,0x2F,0x7C,0x00,0x7F,0x01,0x7E,
+0x66,0x12,0x07,0xA5,0x7D,0xC0,0x7C,0x00,
+0x7F,0x00,0x7E,0x66,0x02,0x07,0xA5,0x7D,
+0x03,0x7C,0x00,0x7F,0x01,0x7E,0x66,0x12,
+0x07,0xA5,0x7D,0x80,0x7C,0x00,0x7F,0x00,
+0x7E,0x66,0x12,0x07,0xA5,0x7F,0x02,0x7E,
+0x66,0x12,0x07,0x60,0xEF,0x44,0x40,0xFD,
+0xAC,0x06,0x7F,0x02,0x7E,0x66,0x12,0x07,
+0xA5,0x7D,0x03,0x7C,0x00,0x7F,0x01,0x7E,
+0x66,0x12,0x07,0xA5,0x7D,0xC0,0x7C,0x00,
+0x7F,0x00,0x7E,0x66,0x12,0x07,0xA5,0x7D,
+0x03,0x7C,0x00,0x7F,0x01,0x7E,0x66,0x12,
+0x07,0xA5,0x7D,0x80,0x7C,0x00,0x7F,0x00,
+0x7E,0x66,0x12,0x07,0xA5,0x7F,0x02,0x7E,
+0x66,0x12,0x07,0x60,0xEF,0x54,0xBF,0xFD,
+0xAC,0x06,0x7F,0x02,0x7E,0x66,0x12,0x07,
+0xA5,0x7D,0x03,0x7C,0x00,0x7F,0x01,0x7E,
+0x66,0x12,0x07,0xA5,0x7D,0xC0,0x7C,0x00,
+0x7F,0x00,0x7E,0x66,0x12,0x07,0xA5,0xE4,
+0xFD,0xFC,0x7F,0x01,0x7E,0x66,0x12,0x07,
+0xA5,0x7D,0x80,0x7C,0x00,0x7F,0x00,0x7E,
+0x66,0x12,0x07,0xA5,0x7F,0x02,0x7E,0x66,
+0x12,0x07,0x60,0xEF,0x54,0xFD,0x54,0xFE,
+0xFD,0xAC,0x06,0x7F,0x02,0x7E,0x66,0x12,
+0x07,0xA5,0xE4,0xFD,0xFC,0x7F,0x01,0x7E,
+0x66,0x12,0x07,0xA5,0x7D,0xC0,0x7C,0x00,
+0x7F,0x00,0x7E,0x66,0x12,0x07,0xA5,0xE4,
+0xFD,0xFC,0x7F,0x01,0x7E,0x66,0x12,0x07,
+0xA5,0x7D,0x80,0x7C,0x00,0x7F,0x00,0x7E,
+0x66,0x12,0x07,0xA5,0x7F,0x02,0x7E,0x66,
+0x12,0x07,0x60,0xEF,0x44,0x02,0x44,0x01,
+0xFD,0xAC,0x06,0x7F,0x02,0x7E,0x66,0x12,
+0x07,0xA5,0xE4,0xFD,0xFC,0x7F,0x01,0x7E,
+0x66,0x12,0x07,0xA5,0x7D,0xC0,0x7C,0x00,
+0x7F,0x00,0x7E,0x66,0x02,0x07,0xA5,0xE4,
+0x90,0x06,0x2C,0xF0,0xFD,0x7C,0x01,0x7F,
+0x3F,0x7E,0x1D,0x12,0x07,0xA5,0x7D,0x40,
+0x7C,0x00,0x7F,0x36,0x7E,0x13,0x12,0x07,
+0xA5,0xE4,0xFF,0xFE,0xFD,0x80,0x25,0xE4,
+0x7F,0xFF,0x7E,0xFF,0xFD,0xFC,0x90,0x06,
+0x24,0x12,0x01,0x0F,0xC3,0x12,0x00,0xF2,
+0x50,0x1B,0x90,0x06,0x24,0x12,0x01,0x03,
+0xEF,0x24,0x01,0xFF,0xE4,0x3E,0xFE,0xE4,
+0x3D,0xFD,0xE4,0x3C,0xFC,0x90,0x06,0x24,
+0x12,0x01,0x1B,0x80,0xD2,0xE4,0xF5,0xA8,
+0xD2,0xAF,0x7D,0x1F,0xFC,0x7F,0x49,0x7E,
+0x13,0x12,0x07,0xA5,0x12,0x07,0xD5,0x12,
+0x01,0x27,0x12,0x06,0x5A,0x7D,0x41,0x7C,
+0x00,0x7F,0x36,0x7E,0x13,0x12,0x07,0xA5,
+0xE4,0xFF,0xFE,0xFD,0x80,0x26,0x7F,0xFF,
+0x7E,0xFF,0x7D,0x05,0x7C,0x00,0x90,0x06,
+0x24,0x12,0x01,0x0F,0xC3,0x12,0x00,0xF2,
+0x50,0x1B,0x90,0x06,0x24,0x12,0x01,0x03,
+0xEF,0x24,0x01,0xFF,0xE4,0x3E,0xFE,0xE4,
+0x3D,0xFD,0xE4,0x3C,0xFC,0x90,0x06,0x24,
+0x12,0x01,0x1B,0x80,0xD1,0xC2,0x00,0xC2,
+0x01,0xD2,0xA9,0xD2,0x8C,0x7F,0x01,0x7E,
+0x62,0x12,0x07,0x60,0xEF,0x30,0xE2,0x07,
+0xE4,0x90,0x06,0x2C,0xF0,0x80,0xEE,0x90,
+0x06,0x2C,0xE0,0x70,0x12,0x12,0x04,0xEA,
+0x90,0x06,0x2C,0x74,0x01,0xF0,0xE4,0x90,
+0x06,0x33,0xF0,0xA3,0xF0,0x80,0xD6,0xC3,
+0x90,0x06,0x34,0xE0,0x94,0x62,0x90,0x06,
+0x33,0xE0,0x94,0x00,0x40,0xC7,0xE4,0xF0,
+0xA3,0xF0,0x12,0x04,0xEA,0x90,0x06,0x2C,
+0x74,0x01,0xF0,0x80,0xB8,0x75,0x0F,0x80,
+0x75,0x0E,0x7E,0x75,0x0D,0xAA,0x75,0x0C,
+0x83,0xE4,0xF5,0x10,0x7F,0x36,0x7E,0x13,
+0x12,0x07,0x60,0xEE,0xC4,0xF8,0x54,0xF0,
+0xC8,0xEF,0xC4,0x54,0x0F,0x48,0x54,0x07,
+0xFB,0x7A,0x00,0xEA,0x70,0x4A,0xEB,0x14,
+0x60,0x1C,0x14,0x60,0x27,0x24,0xFE,0x60,
+0x31,0x14,0x60,0x3C,0x24,0x05,0x70,0x38,
+0x75,0x0B,0x00,0x75,0x0A,0xC2,0x75,0x09,
+0xEB,0x75,0x08,0x0B,0x80,0x36,0x75,0x0B,
+0x40,0x75,0x0A,0x59,0x75,0x09,0x73,0x75,
+0x08,0x07,0x80,0x28,0x75,0x0B,0x00,0x75,
+0x0A,0xE1,0x75,0x09,0xF5,0x75,0x08,0x05,
+0x80,0x1A,0x75,0x0B,0xA0,0x75,0x0A,0xAC,
+0x75,0x09,0xB9,0x75,0x08,0x03,0x80,0x0C,
+0x75,0x0B,0x00,0x75,0x0A,0x62,0x75,0x09,
+0x3D,0x75,0x08,0x01,0x75,0x89,0x11,0xE4,
+0x7B,0x60,0x7A,0x09,0xF9,0xF8,0xAF,0x0B,
+0xAE,0x0A,0xAD,0x09,0xAC,0x08,0x12,0x00,
+0x60,0xAA,0x06,0xAB,0x07,0xC3,0xE4,0x9B,
+0xFB,0xE4,0x9A,0xFA,0x78,0x17,0xF6,0xAF,
+0x03,0xEF,0x08,0xF6,0x18,0xE6,0xF5,0x8C,
+0x08,0xE6,0xF5,0x8A,0x74,0x0D,0x2B,0xFB,
+0xE4,0x3A,0x18,0xF6,0xAF,0x03,0xEF,0x08,
+0xF6,0x75,0x88,0x10,0x53,0x8E,0xC7,0xD2,
+0xA9,0x22,0x7F,0x10,0x7E,0x13,0x12,0x07,
+0x60,0x90,0x06,0x2D,0xEE,0xF0,0xA3,0xEF,
+0xF0,0xEE,0x44,0x10,0xFE,0x90,0x06,0x2D,
+0xF0,0xA3,0xEF,0xF0,0x54,0xEF,0xFF,0x90,
+0x06,0x2D,0xEE,0xF0,0xFC,0xA3,0xEF,0xF0,
+0xFD,0x7F,0x10,0x7E,0x13,0x12,0x07,0xA5,
+0xE4,0xFF,0xFE,0x0F,0xBF,0x00,0x01,0x0E,
+0xEF,0x64,0x64,0x4E,0x70,0xF5,0x7D,0x04,
+0x7C,0x00,0x7F,0x02,0x7E,0x66,0x12,0x07,
+0xA5,0x7D,0x00,0x7C,0x04,0x7F,0x01,0x7E,
+0x66,0x12,0x07,0xA5,0x7D,0xC0,0x7C,0x00,
+0x7F,0x00,0x7E,0x66,0x12,0x07,0xA5,0xE4,
+0xFD,0xFC,0x7F,0x02,0x7E,0x66,0x12,0x07,
+0xA5,0x7D,0x00,0x7C,0x04,0x7F,0x01,0x7E,
+0x66,0x12,0x07,0xA5,0x7D,0xC0,0x7C,0x00,
+0x7F,0x00,0x7E,0x66,0x12,0x07,0xA5,0x7F,
+0x10,0x7E,0x13,0x12,0x07,0x60,0x90,0x06,
+0x2D,0xEE,0xF0,0xA3,0xEF,0xF0,0xEE,0x54,
+0xEF,0x90,0x06,0x2D,0xF0,0xFC,0xA3,0xEF,
+0xF0,0xFD,0x7F,0x10,0x7E,0x13,0x02,0x07,
+0xA5,0x78,0x7F,0xE4,0xF6,0xD8,0xFD,0x75,
+0x81,0x3C,0x02,0x05,0xD0,0x02,0x03,0x2F,
+0xE4,0x93,0xA3,0xF8,0xE4,0x93,0xA3,0x40,
+0x03,0xF6,0x80,0x01,0xF2,0x08,0xDF,0xF4,
+0x80,0x29,0xE4,0x93,0xA3,0xF8,0x54,0x07,
+0x24,0x0C,0xC8,0xC3,0x33,0xC4,0x54,0x0F,
+0x44,0x20,0xC8,0x83,0x40,0x04,0xF4,0x56,
+0x80,0x01,0x46,0xF6,0xDF,0xE4,0x80,0x0B,
+0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80,
+0x90,0x07,0xE1,0xE4,0x7E,0x01,0x93,0x60,
+0xBC,0xA3,0xFF,0x54,0x3F,0x30,0xE5,0x09,
+0x54,0x1F,0xFE,0xE4,0x93,0xA3,0x60,0x01,
+0x0E,0xCF,0x54,0xC0,0x25,0xE0,0x60,0xA8,
+0x40,0xB8,0xE4,0x93,0xA3,0xFA,0xE4,0x93,
+0xA3,0xF8,0xE4,0x93,0xA3,0xC8,0xC5,0x82,
+0xC8,0xCA,0xC5,0x83,0xCA,0xF0,0xA3,0xC8,
+0xC5,0x82,0xC8,0xCA,0xC5,0x83,0xCA,0xDF,
+0xE9,0xDE,0xE7,0x80,0xBE,0x7D,0x40,0x7C,
+0x17,0x7F,0x11,0x7E,0x1D,0x12,0x07,0xA5,
+0x7F,0x41,0x7E,0x1D,0x12,0x07,0x60,0xEF,
+0x44,0x20,0x44,0x80,0xFD,0xAC,0x06,0x7F,
+0x41,0x7E,0x1D,0x12,0x07,0xA5,0x7D,0xBB,
+0x7C,0x15,0x7F,0xEB,0x7E,0x13,0x12,0x07,
+0xA5,0x7D,0x07,0x7C,0x00,0x7F,0xE7,0x7E,
+0x13,0x12,0x07,0xA5,0x7D,0x40,0x7C,0x11,
+0x7F,0x00,0x7E,0x62,0x12,0x07,0xA5,0x02,
+0x02,0x2F,0x7D,0xC0,0x7C,0x16,0x7F,0x11,
+0x7E,0x1D,0x12,0x07,0xA5,0x7D,0xBB,0x7C,
+0x15,0x7F,0xEB,0x7E,0x13,0x12,0x07,0xA5,
+0x7D,0x0D,0x7C,0x00,0x7F,0xE7,0x7E,0x13,
+0x12,0x07,0xA5,0x7F,0x41,0x7E,0x1D,0x12,
+0x07,0x60,0xEF,0x44,0x20,0x44,0x80,0xFD,
+0xAC,0x06,0x7F,0x41,0x7E,0x1D,0x12,0x07,
+0xA5,0x7D,0x00,0x7C,0x21,0x7F,0x00,0x7E,
+0x62,0x12,0x07,0xA5,0x02,0x02,0x2F,0x7D,
+0x40,0x7C,0x17,0x7F,0x11,0x7E,0x1D,0x12,
+0x07,0xA5,0x7D,0xBB,0x7C,0x15,0x7F,0xEB,
+0x7E,0x13,0x12,0x07,0xA5,0x7D,0x0C,0x7C,
+0x00,0x7F,0xE7,0x7E,0x13,0x12,0x07,0xA5,
+0x7F,0x41,0x7E,0x1D,0x12,0x07,0x60,0xEF,
+0x44,0x20,0x44,0x80,0xFD,0xAC,0x06,0x7F,
+0x41,0x7E,0x1D,0x12,0x07,0xA5,0x7D,0x40,
+0x7C,0x11,0x7F,0x00,0x7E,0x62,0x12,0x07,
+0xA5,0x02,0x02,0x2F,0x7D,0x04,0x7C,0x00,
+0x7F,0x01,0x7E,0x66,0x12,0x07,0xA5,0x7D,
+0x80,0x7C,0x00,0x7F,0x00,0x7E,0x66,0x12,
+0x07,0xA5,0x7F,0x02,0x7E,0x66,0x12,0x07,
+0x60,0xEF,0x44,0x02,0x44,0x04,0xFD,0xAC,
+0x06,0x7F,0x02,0x7E,0x66,0x12,0x07,0xA5,
+0x7D,0x04,0x7C,0x00,0x7F,0x01,0x7E,0x66,
+0x12,0x07,0xA5,0x7D,0xC0,0x7C,0x00,0x7F,
+0x00,0x7E,0x66,0x02,0x07,0xA5,0xC0,0xE0,
+0xC0,0xF0,0xC0,0x83,0xC0,0x82,0xC0,0xD0,
+0x75,0xD0,0x00,0xC0,0x00,0x78,0x17,0xE6,
+0xF5,0x8C,0x78,0x18,0xE6,0xF5,0x8A,0x90,
+0x06,0x31,0xE4,0x75,0xF0,0x01,0x12,0x00,
+0x0E,0x90,0x06,0x33,0xE4,0x75,0xF0,0x01,
+0x12,0x00,0x0E,0xD0,0x00,0xD0,0xD0,0xD0,
+0x82,0xD0,0x83,0xD0,0xF0,0xD0,0xE0,0x32,
+0xC2,0xAF,0xAD,0x07,0xAC,0x06,0x8C,0xA2,
+0x8D,0xA3,0x75,0xA0,0x01,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0xAE,0xA1,0xBE,0x00,0xF0,0xAE,0xA6,0xAF,
+0xA7,0xD2,0xAF,0x22,0x7D,0x20,0x7C,0x0F,
+0x7F,0x02,0x7E,0x66,0x12,0x07,0xA5,0x7D,
+0x01,0x7C,0x00,0x7F,0x01,0x7E,0x66,0x12,
+0x07,0xA5,0x7D,0xC0,0x7C,0x00,0x7F,0x00,
+0x7E,0x66,0x02,0x07,0xA5,0xC2,0xAF,0xAB,
+0x07,0xAA,0x06,0x8A,0xA2,0x8B,0xA3,0x8C,
+0xA4,0x8D,0xA5,0x75,0xA0,0x03,0x00,0x00,
+0x00,0xAA,0xA1,0xBA,0x00,0xF8,0xD2,0xAF,
+0x22,0x7F,0x0C,0x7E,0x13,0x12,0x07,0x60,
+0xEF,0x44,0x50,0xFD,0xAC,0x06,0x7F,0x0C,
+0x7E,0x13,0x02,0x07,0xA5,0x12,0x07,0xC1,
+0x12,0x07,0xEC,0x12,0x04,0x25,0x02,0x00,
+0x03,0x42,0x06,0x33,0x00,0x00,0x42,0x06,
+0x31,0x00,0x00,0x00,0xE4,0xF5,0x8E,0x22,};
+
+
+#define SGMII_INIT_SIZE 1205
+rtk_uint8 Sgmii_Init[SGMII_INIT_SIZE] = {
+0x02,0x03,0x97,0xE4,0xF5,0xA8,0xD2,0xAF,
+0x22,0x00,0x00,0x02,0x04,0x23,0xC5,0xF0,
+0xF8,0xA3,0xE0,0x28,0xF0,0xC5,0xF0,0xF8,
+0xE5,0x82,0x15,0x82,0x70,0x02,0x15,0x83,
+0xE0,0x38,0xF0,0x22,0x75,0xF0,0x08,0x75,
+0x82,0x00,0xEF,0x2F,0xFF,0xEE,0x33,0xFE,
+0xCD,0x33,0xCD,0xCC,0x33,0xCC,0xC5,0x82,
+0x33,0xC5,0x82,0x9B,0xED,0x9A,0xEC,0x99,
+0xE5,0x82,0x98,0x40,0x0C,0xF5,0x82,0xEE,
+0x9B,0xFE,0xED,0x9A,0xFD,0xEC,0x99,0xFC,
+0x0F,0xD5,0xF0,0xD6,0xE4,0xCE,0xFB,0xE4,
+0xCD,0xFA,0xE4,0xCC,0xF9,0xA8,0x82,0x22,
+0xB8,0x00,0xC1,0xB9,0x00,0x59,0xBA,0x00,
+0x2D,0xEC,0x8B,0xF0,0x84,0xCF,0xCE,0xCD,
+0xFC,0xE5,0xF0,0xCB,0xF9,0x78,0x18,0xEF,
+0x2F,0xFF,0xEE,0x33,0xFE,0xED,0x33,0xFD,
+0xEC,0x33,0xFC,0xEB,0x33,0xFB,0x10,0xD7,
+0x03,0x99,0x40,0x04,0xEB,0x99,0xFB,0x0F,
+0xD8,0xE5,0xE4,0xF9,0xFA,0x22,0x78,0x18,
+0xEF,0x2F,0xFF,0xEE,0x33,0xFE,0xED,0x33,
+0xFD,0xEC,0x33,0xFC,0xC9,0x33,0xC9,0x10,
+0xD7,0x05,0x9B,0xE9,0x9A,0x40,0x07,0xEC,
+0x9B,0xFC,0xE9,0x9A,0xF9,0x0F,0xD8,0xE0,
+0xE4,0xC9,0xFA,0xE4,0xCC,0xFB,0x22,0x75,
+0xF0,0x10,0xEF,0x2F,0xFF,0xEE,0x33,0xFE,
+0xED,0x33,0xFD,0xCC,0x33,0xCC,0xC8,0x33,
+0xC8,0x10,0xD7,0x07,0x9B,0xEC,0x9A,0xE8,
+0x99,0x40,0x0A,0xED,0x9B,0xFD,0xEC,0x9A,
+0xFC,0xE8,0x99,0xF8,0x0F,0xD5,0xF0,0xDA,
+0xE4,0xCD,0xFB,0xE4,0xCC,0xFA,0xE4,0xC8,
+0xF9,0x22,0xEB,0x9F,0xF5,0xF0,0xEA,0x9E,
+0x42,0xF0,0xE9,0x9D,0x42,0xF0,0xE8,0x9C,
+0x45,0xF0,0x22,0xE0,0xFC,0xA3,0xE0,0xFD,
+0xA3,0xE0,0xFE,0xA3,0xE0,0xFF,0x22,0xE0,
+0xF8,0xA3,0xE0,0xF9,0xA3,0xE0,0xFA,0xA3,
+0xE0,0xFB,0x22,0xEC,0xF0,0xA3,0xED,0xF0,
+0xA3,0xEE,0xF0,0xA3,0xEF,0xF0,0x22,0xE4,
+0x90,0x06,0x28,0xF0,0xFD,0x7C,0x01,0x7F,
+0x3F,0x7E,0x1D,0x12,0x04,0x81,0x7D,0x40,
+0x7C,0x00,0x7F,0x36,0x7E,0x13,0x12,0x04,
+0x81,0xE4,0xFF,0xFE,0xFD,0x80,0x25,0xE4,
+0x7F,0xFF,0x7E,0xFF,0xFD,0xFC,0x90,0x06,
+0x24,0x12,0x01,0x0F,0xC3,0x12,0x00,0xF2,
+0x50,0x1B,0x90,0x06,0x24,0x12,0x01,0x03,
+0xEF,0x24,0x01,0xFF,0xE4,0x3E,0xFE,0xE4,
+0x3D,0xFD,0xE4,0x3C,0xFC,0x90,0x06,0x24,
+0x12,0x01,0x1B,0x80,0xD2,0xE4,0xF5,0xA8,
+0xD2,0xAF,0x7D,0x1F,0xFC,0x7F,0x49,0x7E,
+0x13,0x12,0x04,0x81,0x12,0x04,0xA8,0x7D,
+0x41,0x7C,0x00,0x7F,0x36,0x7E,0x13,0x12,
+0x04,0x81,0xE4,0xFF,0xFE,0xFD,0x80,0x25,
+0xE4,0x7F,0x20,0x7E,0x4E,0xFD,0xFC,0x90,
+0x06,0x24,0x12,0x01,0x0F,0xC3,0x12,0x00,
+0xF2,0x50,0x1B,0x90,0x06,0x24,0x12,0x01,
+0x03,0xEF,0x24,0x01,0xFF,0xE4,0x3E,0xFE,
+0xE4,0x3D,0xFD,0xE4,0x3C,0xFC,0x90,0x06,
+0x24,0x12,0x01,0x1B,0x80,0xD2,0xC2,0x00,
+0xC2,0x01,0xD2,0xA9,0xD2,0x8C,0x7D,0x3D,
+0x7C,0x00,0x7F,0x01,0x7E,0x66,0x12,0x04,
+0x81,0x7D,0x80,0x7C,0x00,0x7F,0x00,0x7E,
+0x66,0x12,0x04,0x81,0x7F,0x02,0x7E,0x66,
+0x12,0x04,0x5D,0xEF,0x30,0xE4,0x07,0xE4,
+0x90,0x06,0x28,0xF0,0x80,0xD8,0x90,0x06,
+0x28,0xE0,0x70,0x12,0x12,0x02,0xF1,0x90,
+0x06,0x28,0x74,0x01,0xF0,0xE4,0x90,0x06,
+0x2B,0xF0,0xA3,0xF0,0x80,0xC0,0xC3,0x90,
+0x06,0x2C,0xE0,0x94,0x62,0x90,0x06,0x2B,
+0xE0,0x94,0x00,0x40,0xB1,0xE4,0xF0,0xA3,
+0xF0,0x12,0x02,0xF1,0x90,0x06,0x28,0x74,
+0x01,0xF0,0x80,0xA2,0x75,0x0F,0x80,0x75,
+0x0E,0x7E,0x75,0x0D,0xAA,0x75,0x0C,0x83,
+0xE4,0xF5,0x10,0x7F,0x36,0x7E,0x13,0x12,
+0x04,0x5D,0xEE,0xC4,0xF8,0x54,0xF0,0xC8,
+0xEF,0xC4,0x54,0x0F,0x48,0x54,0x07,0xFB,
+0x7A,0x00,0xEA,0x70,0x4A,0xEB,0x14,0x60,
+0x1C,0x14,0x60,0x27,0x24,0xFE,0x60,0x31,
+0x14,0x60,0x3C,0x24,0x05,0x70,0x38,0x75,
+0x0B,0x00,0x75,0x0A,0xC2,0x75,0x09,0xEB,
+0x75,0x08,0x0B,0x80,0x36,0x75,0x0B,0x40,
+0x75,0x0A,0x59,0x75,0x09,0x73,0x75,0x08,
+0x07,0x80,0x28,0x75,0x0B,0x00,0x75,0x0A,
+0xE1,0x75,0x09,0xF5,0x75,0x08,0x05,0x80,
+0x1A,0x75,0x0B,0xA0,0x75,0x0A,0xAC,0x75,
+0x09,0xB9,0x75,0x08,0x03,0x80,0x0C,0x75,
+0x0B,0x00,0x75,0x0A,0x62,0x75,0x09,0x3D,
+0x75,0x08,0x01,0x75,0x89,0x11,0xE4,0x7B,
+0x60,0x7A,0x09,0xF9,0xF8,0xAF,0x0B,0xAE,
+0x0A,0xAD,0x09,0xAC,0x08,0x12,0x00,0x60,
+0xAA,0x06,0xAB,0x07,0xC3,0xE4,0x9B,0xFB,
+0xE4,0x9A,0xFA,0x78,0x17,0xF6,0xAF,0x03,
+0xEF,0x08,0xF6,0x18,0xE6,0xF5,0x8C,0x08,
+0xE6,0xF5,0x8A,0x74,0x0D,0x2B,0xFB,0xE4,
+0x3A,0x18,0xF6,0xAF,0x03,0xEF,0x08,0xF6,
+0x75,0x88,0x10,0x53,0x8E,0xC7,0xD2,0xA9,
+0x22,0x7D,0x02,0x7C,0x00,0x7F,0x4A,0x7E,
+0x13,0x12,0x04,0x81,0x7D,0x46,0x7C,0x71,
+0x7F,0x02,0x7E,0x66,0x12,0x04,0x81,0x7D,
+0x03,0x7C,0x00,0x7F,0x01,0x7E,0x66,0x12,
+0x04,0x81,0x7D,0xC0,0x7C,0x00,0x7F,0x00,
+0x7E,0x66,0x12,0x04,0x81,0xE4,0xFF,0xFE,
+0x0F,0xBF,0x00,0x01,0x0E,0xEF,0x64,0x64,
+0x4E,0x70,0xF5,0x7D,0x04,0x7C,0x00,0x7F,
+0x02,0x7E,0x66,0x12,0x04,0x81,0x7D,0x00,
+0x7C,0x04,0x7F,0x01,0x7E,0x66,0x12,0x04,
+0x81,0x7D,0xC0,0x7C,0x00,0x7F,0x00,0x7E,
+0x66,0x12,0x04,0x81,0xE4,0xFD,0xFC,0x7F,
+0x02,0x7E,0x66,0x12,0x04,0x81,0x7D,0x00,
+0x7C,0x04,0x7F,0x01,0x7E,0x66,0x12,0x04,
+0x81,0x7D,0xC0,0x7C,0x00,0x7F,0x00,0x7E,
+0x66,0x12,0x04,0x81,0xE4,0xFD,0xFC,0x7F,
+0x4A,0x7E,0x13,0x12,0x04,0x81,0x7D,0x06,
+0x7C,0x71,0x7F,0x02,0x7E,0x66,0x12,0x04,
+0x81,0x7D,0x03,0x7C,0x00,0x7F,0x01,0x7E,
+0x66,0x12,0x04,0x81,0x7D,0xC0,0x7C,0x00,
+0x7F,0x00,0x7E,0x66,0x02,0x04,0x81,0x78,
+0x7F,0xE4,0xF6,0xD8,0xFD,0x75,0x81,0x3C,
+0x02,0x03,0xDE,0x02,0x01,0x27,0xE4,0x93,
+0xA3,0xF8,0xE4,0x93,0xA3,0x40,0x03,0xF6,
+0x80,0x01,0xF2,0x08,0xDF,0xF4,0x80,0x29,
+0xE4,0x93,0xA3,0xF8,0x54,0x07,0x24,0x0C,
+0xC8,0xC3,0x33,0xC4,0x54,0x0F,0x44,0x20,
+0xC8,0x83,0x40,0x04,0xF4,0x56,0x80,0x01,
+0x46,0xF6,0xDF,0xE4,0x80,0x0B,0x01,0x02,
+0x04,0x08,0x10,0x20,0x40,0x80,0x90,0x04,
+0x9D,0xE4,0x7E,0x01,0x93,0x60,0xBC,0xA3,
+0xFF,0x54,0x3F,0x30,0xE5,0x09,0x54,0x1F,
+0xFE,0xE4,0x93,0xA3,0x60,0x01,0x0E,0xCF,
+0x54,0xC0,0x25,0xE0,0x60,0xA8,0x40,0xB8,
+0xE4,0x93,0xA3,0xFA,0xE4,0x93,0xA3,0xF8,
+0xE4,0x93,0xA3,0xC8,0xC5,0x82,0xC8,0xCA,
+0xC5,0x83,0xCA,0xF0,0xA3,0xC8,0xC5,0x82,
+0xC8,0xCA,0xC5,0x83,0xCA,0xDF,0xE9,0xDE,
+0xE7,0x80,0xBE,0xC0,0xE0,0xC0,0xF0,0xC0,
+0x83,0xC0,0x82,0xC0,0xD0,0x75,0xD0,0x00,
+0xC0,0x00,0x78,0x17,0xE6,0xF5,0x8C,0x78,
+0x18,0xE6,0xF5,0x8A,0x90,0x06,0x29,0xE4,
+0x75,0xF0,0x01,0x12,0x00,0x0E,0x90,0x06,
+0x2B,0xE4,0x75,0xF0,0x01,0x12,0x00,0x0E,
+0xD0,0x00,0xD0,0xD0,0xD0,0x82,0xD0,0x83,
+0xD0,0xF0,0xD0,0xE0,0x32,0xC2,0xAF,0xAD,
+0x07,0xAC,0x06,0x8C,0xA2,0x8D,0xA3,0x75,
+0xA0,0x01,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0xAE,0xA1,0xBE,
+0x00,0xF0,0xAE,0xA6,0xAF,0xA7,0xD2,0xAF,
+0x22,0xC2,0xAF,0xAB,0x07,0xAA,0x06,0x8A,
+0xA2,0x8B,0xA3,0x8C,0xA4,0x8D,0xA5,0x75,
+0xA0,0x03,0x00,0x00,0x00,0xAA,0xA1,0xBA,
+0x00,0xF8,0xD2,0xAF,0x22,0x42,0x06,0x2B,
+0x00,0x00,0x42,0x06,0x29,0x00,0x00,0x00,
+0x12,0x04,0xB1,0x12,0x02,0x2C,0x02,0x00,
+0x03,0xE4,0xF5,0x8E,0x22,};
+
+#define FIBER1_2_INIT_SIZE 940
+rtk_uint8 Fiber1_2_Init[FIBER1_2_INIT_SIZE] = {
+0x02,0x03,0xA0,0x90,0x00,0x0A,0xEE,0xF0,
+0xA3,0xEF,0xF0,0xE4,0x7F,0x11,0x7E,0x62,
+0x12,0x03,0x85,0x7F,0x11,0x7E,0x62,0x12,
+0x03,0x85,0xEF,0x30,0xE2,0x03,0x02,0x00,
+0xEF,0x90,0x00,0x0B,0xE0,0x04,0xF0,0x70,
+0x06,0x90,0x00,0x0A,0xE0,0x04,0xF0,0x90,
+0x00,0x0A,0xE0,0xFC,0xA3,0xE0,0xFD,0x7F,
+0xA4,0x7E,0x0B,0x12,0x03,0x69,0x7D,0x66,
+0x7C,0x00,0x7F,0x11,0x7E,0x13,0x12,0x03,
+0x69,0x7D,0x66,0x7C,0x10,0x7F,0x11,0x7E,
+0x13,0x12,0x03,0x69,0x7F,0x9D,0x7E,0x1D,
+0x12,0x03,0x85,0xEE,0x30,0xE0,0xF5,0xE4,
+0xFF,0xFE,0x0F,0xBF,0x00,0x01,0x0E,0xBE,
+0x0B,0xF8,0xBF,0xB8,0xF5,0x7D,0x02,0x7C,
+0x00,0x7F,0x3D,0x7E,0x13,0x12,0x03,0x69,
+0xE4,0xFE,0xFF,0x0F,0xBF,0x00,0x01,0x0E,
+0xBE,0x0B,0xF8,0xBF,0xB8,0xF5,0x90,0x00,
+0x12,0x74,0x14,0xF0,0xA3,0x74,0x01,0xF0,
+0xE4,0xFB,0xFA,0xFD,0xFC,0x7F,0x01,0xFE,
+0x12,0x02,0xBB,0x90,0x00,0x12,0x74,0x14,
+0xF0,0xA3,0x74,0x03,0xF0,0xE4,0xFB,0xFA,
+0xFD,0xFC,0x7F,0x01,0xFE,0x12,0x02,0xBB,
+0xE4,0xFE,0xFF,0x0F,0xBF,0x00,0x01,0x0E,
+0xBE,0x75,0xF8,0xBF,0x30,0xF5,0xE4,0xFE,
+0xFF,0x0F,0xBF,0x00,0x01,0x0E,0xBE,0x75,
+0xF8,0xBF,0x30,0xF5,0xE4,0xFD,0xFC,0x7F,
+0x3D,0x7E,0x13,0x12,0x03,0x69,0xE4,0xFE,
+0xFF,0x0F,0xBF,0x00,0x01,0x0E,0xBE,0x0B,
+0xF8,0xBF,0xB8,0xF5,0x7D,0x66,0x7C,0x00,
+0x7F,0x11,0x7E,0x13,0x12,0x03,0x69,0x22,
+0x90,0x00,0x0A,0xEE,0xF0,0xA3,0xEF,0xF0,
+0xE4,0xFF,0x0F,0x7E,0x62,0x12,0x03,0x85,
+0x7F,0x01,0x7E,0x62,0x12,0x03,0x85,0xEF,
+0x30,0xE2,0x03,0x02,0x01,0xDA,0x90,0x00,
+0x0B,0xE0,0x04,0xF0,0x70,0x06,0x90,0x00,
+0x0A,0xE0,0x04,0xF0,0x90,0x00,0x0A,0xE0,
+0xFC,0xA3,0xE0,0xFD,0x7F,0xA3,0x7E,0x0B,
+0x12,0x03,0x69,0x7D,0x66,0x7C,0x00,0x7F,
+0xC4,0x7E,0x13,0x12,0x03,0x69,0x7D,0x66,
+0x7C,0x10,0x7F,0xC4,0x7E,0x13,0x12,0x03,
+0x69,0x7F,0x9D,0x7E,0x1D,0x12,0x03,0x85,
+0xEE,0x30,0xE1,0xF5,0xE4,0xFF,0xFE,0x0F,
+0xBF,0x00,0x01,0x0E,0xBE,0x0B,0xF8,0xBF,
+0xB8,0xF5,0x7D,0x02,0x7C,0x00,0x7F,0x3D,
+0x7E,0x13,0x12,0x03,0x69,0xE4,0xFE,0xFF,
+0x0F,0xBF,0x00,0x01,0x0E,0xBE,0x0B,0xF8,
+0xBF,0xB8,0xF5,0x90,0x00,0x12,0x74,0x14,
+0xF0,0xA3,0x74,0x01,0xF0,0xE4,0xFB,0xFA,
+0xFD,0xFC,0xFF,0xFE,0x12,0x02,0xBB,0x90,
+0x00,0x12,0x74,0x14,0xF0,0xA3,0x74,0x03,
+0xF0,0xE4,0xFB,0xFA,0xFD,0xFC,0xFF,0xFE,
+0x12,0x02,0xBB,0xE4,0xFE,0xFF,0x0F,0xBF,
+0x00,0x01,0x0E,0xBE,0x75,0xF8,0xBF,0x30,
+0xF5,0xE4,0xFE,0xFF,0x0F,0xBF,0x00,0x01,
+0x0E,0xBE,0x75,0xF8,0xBF,0x30,0xF5,0xE4,
+0xFD,0xFC,0x7F,0x3D,0x7E,0x13,0x12,0x03,
+0x69,0xE4,0xFE,0xFF,0x0F,0xBF,0x00,0x01,
+0x0E,0xBE,0x0B,0xF8,0xBF,0xB8,0xF5,0x7D,
+0x66,0x7C,0x00,0x7F,0xC4,0x7E,0x13,0x12,
+0x03,0x69,0x22,0xE4,0x90,0x00,0x00,0xF0,
+0xA3,0xF0,0xA3,0xF0,0xA3,0xF0,0xA3,0xF0,
+0xA3,0xF0,0xA3,0xF0,0xA3,0xF0,0x7D,0x51,
+0xFC,0x7F,0x36,0x7E,0x13,0x12,0x03,0x69,
+0xE4,0x90,0x00,0x08,0xF0,0xA3,0xF0,0x90,
+0x00,0x09,0xE0,0x04,0xF0,0x70,0x06,0x90,
+0x00,0x08,0xE0,0x04,0xF0,0x90,0x00,0x08,
+0xE0,0x70,0x04,0xA3,0xE0,0x64,0x64,0x70,
+0xE6,0xE4,0x90,0x00,0x08,0xF0,0xA3,0xF0,
+0xE4,0xFF,0xFE,0x0F,0xBF,0x00,0x01,0x0E,
+0xEF,0x64,0x32,0x4E,0x70,0xF5,0x90,0x00,
+0x09,0xE0,0x04,0xF0,0x70,0x06,0x90,0x00,
+0x08,0xE0,0x04,0xF0,0x90,0x00,0x08,0xE0,
+0xB4,0x75,0xDD,0xA3,0xE0,0xB4,0x30,0xD8,
+0x7F,0x59,0x7E,0x1B,0x12,0x03,0x85,0xEF,
+0x4E,0x70,0xC6,0x7F,0x92,0x7E,0x1D,0x12,
+0x03,0x85,0x90,0x00,0x06,0xEE,0xF0,0xA3,
+0xEF,0xF0,0x64,0x07,0x60,0x0A,0xEF,0x64,
+0x05,0x60,0x05,0xEF,0x64,0x04,0x70,0x19,
+0x90,0x00,0x01,0xE0,0x04,0xF0,0x70,0x06,
+0x90,0x00,0x00,0xE0,0x04,0xF0,0x90,0x00,
+0x00,0xE0,0xFE,0xA3,0xE0,0xFF,0x12,0x00,
+0xF0,0x90,0x00,0x06,0xE0,0xFF,0x64,0x07,
+0x60,0x0D,0xEF,0x64,0x05,0x60,0x08,0xEF,
+0x64,0x04,0x60,0x03,0x02,0x02,0x19,0x90,
+0x00,0x03,0xE0,0x04,0xF0,0x70,0x06,0x90,
+0x00,0x02,0xE0,0x04,0xF0,0x90,0x00,0x02,
+0xE0,0xFE,0xA3,0xE0,0xFF,0x12,0x00,0x03,
+0x02,0x02,0x19,0x90,0x00,0x0C,0xEE,0xF0,
+0xA3,0xEF,0xF0,0xE4,0x90,0x00,0x14,0xF0,
+0xA3,0xF0,0xA3,0xF0,0xA3,0xF0,0xAE,0x02,
+0xEB,0x78,0x05,0xC3,0x33,0xCE,0x33,0xCE,
+0xD8,0xF9,0xFF,0xEE,0x4C,0xFE,0xEF,0x4D,
+0xFF,0x90,0x00,0x14,0xEE,0xF0,0xA3,0xEF,
+0xF0,0x90,0x00,0x0C,0xE0,0xFF,0xA3,0xE0,
+0x44,0xC0,0x90,0x00,0x16,0xCF,0xF0,0xA3,
+0xEF,0xF0,0x90,0x00,0x12,0xE0,0xFC,0xA3,
+0xE0,0xFD,0x7F,0x02,0x7E,0x66,0x12,0x03,
+0x69,0x90,0x00,0x14,0xE0,0xFC,0xA3,0xE0,
+0xFD,0x7F,0x01,0x7E,0x66,0x12,0x03,0x69,
+0x90,0x00,0x16,0xE0,0xFC,0xA3,0xE0,0xFD,
+0x7F,0x00,0x7E,0x66,0x12,0x03,0x69,0x90,
+0x00,0x18,0xE4,0xF0,0xA3,0x74,0x64,0xF0,
+0x90,0x00,0x18,0xE0,0x70,0x02,0xA3,0xE0,
+0x60,0x2C,0x7F,0x00,0x7E,0x66,0x12,0x03,
+0x85,0xEE,0x30,0xE0,0x18,0x90,0x00,0x19,
+0xE0,0x24,0xFF,0xF0,0x90,0x00,0x18,0xE0,
+0x34,0xFF,0xF0,0xE0,0x70,0x02,0xA3,0xE0,
+0x70,0xD6,0x7F,0xFF,0x22,0xE4,0x90,0x00,
+0x18,0xF0,0xA3,0xF0,0x80,0xCA,0x7F,0x00,
+0x22,0xAB,0x07,0xAA,0x06,0x8A,0xA2,0x00,
+0x8B,0xA3,0x00,0x8C,0xA4,0x00,0x8D,0xA5,
+0x00,0x75,0xA0,0x03,0x00,0x00,0xAA,0xA1,
+0x00,0xBA,0x00,0xF9,0x22,0xAD,0x07,0xAC,
+0x06,0x8C,0xA2,0x00,0x8D,0xA3,0x00,0x75,
+0xA0,0x01,0x00,0xAE,0xA1,0x00,0xBE,0x00,
+0xF9,0x00,0xAE,0xA6,0x00,0xAF,0xA7,0x22,
+0x78,0x7F,0xE4,0xF6,0xD8,0xFD,0x75,0x81,
+0x09,0x02,0x01,0xDB,};
+
+
+/* Function Name:
+ *      rtl8367c_setAsicPortUnknownDaBehavior
+ * Description:
+ *      Set UNDA behavior
+ * Input:
+ *      port        - port ID
+ *      behavior    - 0: flooding to unknwon DA portmask; 1: drop; 2:trap; 3: flooding
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_NOT_ALLOWED  - Invalid behavior
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortUnknownDaBehavior(rtk_uint32 port, rtk_uint32 behavior)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    if(behavior >= L2_UNDA_BEHAVE_END)
+        return RT_ERR_NOT_ALLOWED;
+
+    if(port < 8)
+        return rtl8367c_setAsicRegBits(RTL8367C_REG_UNKNOWN_UNICAST_DA_PORT_BEHAVE, RTL8367C_Port0_ACTION_MASK << (port * 2), behavior);
+    else
+        return rtl8367c_setAsicRegBits(RTL8367C_REG_UNKNOWN_UNICAST_DA_PORT_BEHAVE_EXT, RTL8367C_PORT8_ACTION_MASK << ((port-8) * 2), behavior);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortUnknownDaBehavior
+ * Description:
+ *      Get UNDA behavior
+ * Input:
+ *      port        - port ID
+ * Output:
+ *      pBehavior   - 0: flooding to unknwon DA portmask; 1: drop; 2:trap; 3: flooding
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortUnknownDaBehavior(rtk_uint32 port, rtk_uint32 *pBehavior)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    if(port < 8)
+        return rtl8367c_getAsicRegBits(RTL8367C_REG_UNKNOWN_UNICAST_DA_PORT_BEHAVE, RTL8367C_Port0_ACTION_MASK << (port * 2), pBehavior);
+    else
+        return rtl8367c_getAsicRegBits(RTL8367C_REG_UNKNOWN_UNICAST_DA_PORT_BEHAVE_EXT, RTL8367C_PORT8_ACTION_MASK << ((port-8) * 2), pBehavior);
+}
+/* Function Name:
+ *      rtl8367c_setAsicPortUnknownSaBehavior
+ * Description:
+ *      Set UNSA behavior
+ * Input:
+ *      behavior    - 0: flooding; 1: drop; 2:trap
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_NOT_ALLOWED  - Invalid behavior
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortUnknownSaBehavior(rtk_uint32 behavior)
+{
+    if(behavior >= L2_BEHAVE_SA_END)
+        return RT_ERR_NOT_ALLOWED;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_PORT_SECURIT_CTRL_REG, RTL8367C_UNKNOWN_SA_BEHAVE_MASK, behavior);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortUnknownSaBehavior
+ * Description:
+ *      Get UNSA behavior
+ * Input:
+ *      pBehavior   - 0: flooding; 1: drop; 2:trap
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortUnknownSaBehavior(rtk_uint32 *pBehavior)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_PORT_SECURIT_CTRL_REG, RTL8367C_UNKNOWN_SA_BEHAVE_MASK, pBehavior);
+}
+/* Function Name:
+ *      rtl8367c_setAsicPortUnmatchedSaBehavior
+ * Description:
+ *      Set Unmatched SA behavior
+ * Input:
+ *      behavior    - 0: flooding; 1: drop; 2:trap
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_NOT_ALLOWED  - Invalid behavior
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortUnmatchedSaBehavior(rtk_uint32 behavior)
+{
+    if(behavior >= L2_BEHAVE_SA_END)
+        return RT_ERR_NOT_ALLOWED;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_PORT_SECURIT_CTRL_REG, RTL8367C_UNMATCHED_SA_BEHAVE_MASK, behavior);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortUnmatchedSaBehavior
+ * Description:
+ *      Get Unmatched SA behavior
+ * Input:
+ *      pBehavior   - 0: flooding; 1: drop; 2:trap
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortUnmatchedSaBehavior(rtk_uint32 *pBehavior)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_PORT_SECURIT_CTRL_REG, RTL8367C_UNMATCHED_SA_BEHAVE_MASK, pBehavior);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicPortUnmatchedSaMoving
+ * Description:
+ *      Set Unmatched SA moving state
+ * Input:
+ *      port        - Port ID
+ *      enabled     - 0: can't move to new port; 1: can move to new port
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Error Port ID
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortUnmatchedSaMoving(rtk_uint32 port, rtk_uint32 enabled)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_L2_SA_MOVING_FORBID, port, (enabled == 1) ? 0 : 1);
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicPortUnmatchedSaMoving
+ * Description:
+ *      Get Unmatched SA moving state
+ * Input:
+ *      port        - Port ID
+ * Output:
+ *      pEnabled    - 0: can't move to new port; 1: can move to new port
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Error Port ID
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortUnmatchedSaMoving(rtk_uint32 port, rtk_uint32 *pEnabled)
+{
+    rtk_uint32 data;
+    ret_t retVal;
+
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    if((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_L2_SA_MOVING_FORBID, port, &data)) != RT_ERR_OK)
+        return retVal;
+
+    *pEnabled = (data == 1) ? 0 : 1;
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicPortUnknownDaFloodingPortmask
+ * Description:
+ *      Set UNDA flooding portmask
+ * Input:
+ *      portmask    - portmask(0~0xFF)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_MASK    - Invalid portmask
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortUnknownDaFloodingPortmask(rtk_uint32 portmask)
+{
+    if(portmask > RTL8367C_PORTMASK)
+        return RT_ERR_PORT_MASK;
+
+    return rtl8367c_setAsicReg(RTL8367C_UNUCAST_FLOADING_PMSK_REG, portmask);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortUnknownDaFloodingPortmask
+ * Description:
+ *      Get UNDA flooding portmask
+ * Input:
+ *      pPortmask   - portmask(0~0xFF)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortUnknownDaFloodingPortmask(rtk_uint32 *pPortmask)
+{
+    return rtl8367c_getAsicReg(RTL8367C_UNUCAST_FLOADING_PMSK_REG, pPortmask);
+}
+/* Function Name:
+ *      rtl8367c_setAsicPortUnknownMulticastFloodingPortmask
+ * Description:
+ *      Set UNMC flooding portmask
+ * Input:
+ *      portmask    - portmask(0~0xFF)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_MASK    - Invalid portmask
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortUnknownMulticastFloodingPortmask(rtk_uint32 portmask)
+{
+    if(portmask > RTL8367C_PORTMASK)
+        return RT_ERR_PORT_MASK;
+
+    return rtl8367c_setAsicReg(RTL8367C_UNMCAST_FLOADING_PMSK_REG, portmask);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortUnknownMulticastFloodingPortmask
+ * Description:
+ *      Get UNMC flooding portmask
+ * Input:
+ *      pPortmask   - portmask(0~0xFF)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortUnknownMulticastFloodingPortmask(rtk_uint32 *pPortmask)
+{
+    return rtl8367c_getAsicReg(RTL8367C_UNMCAST_FLOADING_PMSK_REG, pPortmask);
+}
+/* Function Name:
+ *      rtl8367c_setAsicPortBcastFloodingPortmask
+ * Description:
+ *      Set Bcast flooding portmask
+ * Input:
+ *      portmask    - portmask(0~0xFF)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_MASK    - Invalid portmask
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortBcastFloodingPortmask(rtk_uint32 portmask)
+{
+    if(portmask > RTL8367C_PORTMASK)
+        return RT_ERR_PORT_MASK;
+
+    return rtl8367c_setAsicReg(RTL8367C_BCAST_FLOADING_PMSK_REG, portmask);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortBcastFloodingPortmask
+ * Description:
+ *      Get Bcast flooding portmask
+ * Input:
+ *      pPortmask   - portmask(0~0xFF)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortBcastFloodingPortmask(rtk_uint32 *pPortmask)
+{
+    return rtl8367c_getAsicReg(RTL8367C_BCAST_FLOADING_PMSK_REG, pPortmask);
+}
+/* Function Name:
+ *      rtl8367c_setAsicPortBlockSpa
+ * Description:
+ *      Set disabling blocking frame if source port and destination port are the same
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      permit  - 0: block; 1: permit
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortBlockSpa(rtk_uint32 port, rtk_uint32 permit)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_SOURCE_PORT_BLOCK_REG, port, permit);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortBlockSpa
+ * Description:
+ *      Get disabling blocking frame if source port and destination port are the same
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      pPermit     - 0: block; 1: permit
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortBlockSpa(rtk_uint32 port, rtk_uint32* pPermit)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_SOURCE_PORT_BLOCK_REG, port, pPermit);
+}
+/* Function Name:
+ *      rtl8367c_setAsicPortDos
+ * Description:
+ *      Set DOS function
+ * Input:
+ *      type    - DOS type
+ *      drop    - 0: permit; 1: drop
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - Invalid payload index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortDos(rtk_uint32 type, rtk_uint32 drop)
+{
+    if(type >= DOS_END)
+        return RT_ERR_OUT_OF_RANGE;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_DOS_CFG, RTL8367C_DROP_DAEQSA_OFFSET + type, drop);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortDos
+ * Description:
+ *      Get DOS function
+ * Input:
+ *      type    - DOS type
+ *      pDrop   - 0: permit; 1: drop
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - Invalid payload index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortDos(rtk_uint32 type, rtk_uint32* pDrop)
+{
+    if(type >= DOS_END)
+        return RT_ERR_OUT_OF_RANGE;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_DOS_CFG, RTL8367C_DROP_DAEQSA_OFFSET + type,pDrop);
+}
+/* Function Name:
+ *      rtl8367c_setAsicPortForceLink
+ * Description:
+ *      Set port force linking configuration
+ * Input:
+ *      port        - Physical port number (0~7)
+ *      pPortAbility - port ability configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortForceLink(rtk_uint32 port, rtl8367c_port_ability_t *pPortAbility)
+{
+    rtk_uint32 regData = 0;
+
+    /* Invalid input parameter */
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    regData |= pPortAbility->forcemode << 12;
+    regData |= pPortAbility->mstfault << 9;
+    regData |= pPortAbility->mstmode << 8;
+    regData |= pPortAbility->nway << 7;
+    regData |= pPortAbility->txpause << 6;
+    regData |= pPortAbility->rxpause << 5;
+    regData |= pPortAbility->link << 4;
+    regData |= pPortAbility->duplex << 2;
+    regData |= pPortAbility->speed;
+
+    return rtl8367c_setAsicReg(RTL8367C_REG_MAC0_FORCE_SELECT+port, regData);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortForceLink
+ * Description:
+ *      Get port force linking configuration
+ * Input:
+ *      port        - Physical port number (0~7)
+ *      pPortAbility - port ability configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortForceLink(rtk_uint32 port, rtl8367c_port_ability_t *pPortAbility)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+
+    /* Invalid input parameter */
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    retVal = rtl8367c_getAsicReg(RTL8367C_REG_MAC0_FORCE_SELECT + port, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    pPortAbility->forcemode = (regData >> 12) & 0x0001;
+    pPortAbility->mstfault  = (regData >> 9) & 0x0001;
+    pPortAbility->mstmode   = (regData >> 8) & 0x0001;
+    pPortAbility->nway      = (regData >> 7) & 0x0001;
+    pPortAbility->txpause   = (regData >> 6) & 0x0001;
+    pPortAbility->rxpause   = (regData >> 5) & 0x0001;
+    pPortAbility->link      = (regData >> 4) & 0x0001;
+    pPortAbility->duplex    = (regData >> 2) & 0x0001;
+    pPortAbility->speed     = regData & 0x0003;
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortStatus
+ * Description:
+ *      Get port link status
+ * Input:
+ *      port        - Physical port number (0~7)
+ *      pPortAbility - port ability configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortStatus(rtk_uint32 port, rtl8367c_port_status_t *pPortStatus)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+
+    /* Invalid input parameter */
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    retVal = rtl8367c_getAsicReg(RTL8367C_REG_PORT0_STATUS+port,&regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    pPortStatus->lpi1000  = (regData >> 11) & 0x0001;
+    pPortStatus->lpi100   = (regData >> 10) & 0x0001;
+    pPortStatus->mstfault = (regData >> 9) & 0x0001;
+    pPortStatus->mstmode  = (regData >> 8) & 0x0001;
+    pPortStatus->nway     = (regData >> 7) & 0x0001;
+    pPortStatus->txpause  = (regData >> 6) & 0x0001;
+    pPortStatus->rxpause  = (regData >> 5) & 0x0001;
+    pPortStatus->link     = (regData >> 4) & 0x0001;
+    pPortStatus->duplex   = (regData >> 2) & 0x0001;
+    pPortStatus->speed    = regData  & 0x0003;
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicPortForceLinkExt
+ * Description:
+ *      Set external interface force linking configuration
+ * Input:
+ *      id          - external interface id (0~2)
+ *      portAbility - port ability configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortForceLinkExt(rtk_uint32 id, rtl8367c_port_ability_t *pPortAbility)
+{
+    rtk_uint32 retVal, regValue, regValue2, type, sgmiibit, hisgmiibit;
+    rtk_uint32 reg_data = 0;
+    rtk_uint32 i = 0;
+
+    /* Invalid input parameter */
+    if(id >= RTL8367C_EXTNO)
+        return RT_ERR_OUT_OF_RANGE;
+
+    reg_data |= pPortAbility->forcemode << 12;
+    reg_data |= pPortAbility->mstfault << 9;
+    reg_data |= pPortAbility->mstmode << 8;
+    reg_data |= pPortAbility->nway << 7;
+    reg_data |= pPortAbility->txpause << 6;
+    reg_data |= pPortAbility->rxpause << 5;
+    reg_data |= pPortAbility->link << 4;
+    reg_data |= pPortAbility->duplex << 2;
+    reg_data |= pPortAbility->speed;
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0249)) != RT_ERR_OK)
+        return retVal;
+    /*get chip ID */
+    if((retVal = rtl8367c_getAsicReg(0x1300, &regValue)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0000)) != RT_ERR_OK)
+        return retVal;
+
+    type = 0;
+
+    switch (regValue)
+    {
+        case 0x0276:
+        case 0x0597:
+        case 0x6367:
+            type = 1;
+            break;
+        case 0x0652:
+        case 0x6368:
+            type = 2;
+            break;
+        case 0x0801:
+        case 0x6511:
+            type = 3;
+            break;
+        default:
+            return RT_ERR_FAILED;
+    }
+
+    if (1 == type)
+    {
+        if(1 == id)
+        {
+            if ((retVal = rtl8367c_getAsicReg(RTL8367C_REG_REG_TO_ECO4, &regValue)) != RT_ERR_OK)
+                return retVal;
+
+            if((regValue & (0x0001 << 5)) && (regValue & (0x0001 << 7)))
+            {
+                return RT_ERR_OK;
+            }
+
+            if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_FDUP_OFFSET, pPortAbility->duplex)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_SPD_MASK, pPortAbility->speed)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_LINK_OFFSET, pPortAbility->link)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_TXFC_OFFSET, pPortAbility->txpause)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_RXFC_OFFSET, pPortAbility->rxpause)) != RT_ERR_OK)
+                return retVal;
+        }
+
+        if(0 == id || 1 == id)
+            return rtl8367c_setAsicReg(RTL8367C_REG_DIGITAL_INTERFACE0_FORCE + id, reg_data);
+        else
+            return rtl8367c_setAsicReg(RTL8367C_REG_DIGITAL_INTERFACE2_FORCE, reg_data);
+    }
+    else if (2 == type)
+    {
+        if (1 == id)
+        {
+             if((retVal = rtl8367c_setAsicRegBit(0x1311, 2, pPortAbility->duplex)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_setAsicRegBits(0x1311, 0x3, pPortAbility->speed)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_setAsicRegBit(0x1311, 4, pPortAbility->link)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_setAsicRegBit(0x1311, 6, pPortAbility->txpause)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_setAsicRegBit(0x1311, 5, pPortAbility->rxpause)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_setAsicRegBit(0x1311, 12, pPortAbility->forcemode)) != RT_ERR_OK)
+                return retVal;
+
+            if (pPortAbility->link == 1)
+            {
+                if((retVal = rtl8367c_setAsicRegBit(0x1311, 4, 0)) != RT_ERR_OK)
+                    return retVal;
+
+                if((retVal = rtl8367c_setAsicRegBit(0x1311, 4, 1)) != RT_ERR_OK)
+                    return retVal;
+            }
+            else
+            {
+                if((retVal = rtl8367c_setAsicRegBits(0x1311, 0x3, 2)) != RT_ERR_OK)
+                    return retVal;
+            }
+
+
+            if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_FDUP_OFFSET, pPortAbility->duplex)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_SPD_MASK, pPortAbility->speed)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_LINK_OFFSET, pPortAbility->link)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_TXFC_OFFSET, pPortAbility->txpause)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_RXFC_OFFSET, pPortAbility->rxpause)) != RT_ERR_OK)
+                return retVal;
+        }
+        else if (2 == id)
+        {
+            if((retVal = rtl8367c_setAsicRegBit(0x13c4, 2, pPortAbility->duplex)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_setAsicRegBits(0x13c4, 0x3, pPortAbility->speed)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_setAsicRegBit(0x13c4, 4, pPortAbility->link)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_setAsicRegBit(0x13c4, 6, pPortAbility->txpause)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_setAsicRegBit(0x13c4, 5, pPortAbility->rxpause)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_setAsicRegBit(0x13c4, 12, pPortAbility->forcemode)) != RT_ERR_OK)
+                return retVal;
+
+            if (pPortAbility->link == 1)
+            {
+                if((retVal = rtl8367c_setAsicRegBit(0x13c4, 4, 0)) != RT_ERR_OK)
+                    return retVal;
+
+                if((retVal = rtl8367c_setAsicRegBit(0x13c4, 4, 1)) != RT_ERR_OK)
+                    return retVal;
+            }
+            else
+            {
+                if((retVal = rtl8367c_setAsicRegBits(0x13c4, 0x3, 2)) != RT_ERR_OK)
+                    return retVal;
+            }
+
+            if((retVal = rtl8367c_setAsicRegBit(0x1dc1, RTL8367C_CFG_SGMII_FDUP_OFFSET, pPortAbility->duplex)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_setAsicRegBits(0x1dc1, RTL8367C_CFG_SGMII_SPD_MASK, pPortAbility->speed)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_setAsicRegBit(0x1dc1, RTL8367C_CFG_SGMII_LINK_OFFSET, pPortAbility->link)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_setAsicRegBit(0x1dc1, RTL8367C_CFG_SGMII_TXFC_OFFSET, pPortAbility->txpause)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_setAsicRegBit(0x1dc1, RTL8367C_CFG_SGMII_RXFC_OFFSET, pPortAbility->rxpause)) != RT_ERR_OK)
+                return retVal;
+        }
+
+    }
+    else if(3 == type)
+    {
+        if(1 == id)
+        {
+            if((retVal = rtl8367c_getAsicRegBit(0x1d11, 6, &sgmiibit)) != RT_ERR_OK)
+                return retVal;
+            if((retVal = rtl8367c_getAsicRegBit(0x1d11, 11, &hisgmiibit)) != RT_ERR_OK)
+                return retVal;
+
+            if ((sgmiibit == 1) || (hisgmiibit == 1))
+            {
+                /*for 1000x/100fx/1000x_100fx, param has to be set to serdes registers*/
+                if((retVal = rtl8367c_getAsicReg(0x1d41, &regValue)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if((regValue & 0xa0) == 0xa0)
+                {
+
+                    if((retVal = rtl8367c_getAsicRegBits(0x1d95, 0x1f00, &regValue2)) != RT_ERR_OK)
+                        return retVal;
+
+                     /*1000X*/
+                    if(regValue2 == 0x4)
+                    {
+#if 0
+                        /* new_cfg_sds_mode:reset mode */
+                        if((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x1f)) != RT_ERR_OK)
+                            return retVal;
+#endif
+                        /* Enable new sds mode config */
+                        if((retVal = rtl8367c_setAsicRegBit(0x1d95, 13, 1)) != RT_ERR_OK)
+                            return retVal;
+
+                        /* 0 4 0  bit 12  set 1,  bit15~13 = 4*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 4, 0, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        reg_data &= 0xFFFF0FFF;
+                        reg_data |= 0x9000;
+                        if((retVal = rtl8367c_setAsicSdsReg(0,4,0, reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                        /* 0 0 2  bit 6  set 1,  bit13 set to 0, bit12 nway_en*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 0, 2, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        reg_data &= 0xFFFFDFFF;
+                        reg_data |= 0x40;
+                        if(pPortAbility->forcemode)
+                            reg_data &= 0xffffefff;
+                        else
+                            reg_data |= 0x1000;
+
+                        if((retVal = rtl8367c_setAsicSdsReg(0,0,2, reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                        /* 0 4 2  bit 8  rx pause,  bit7 tx pause*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 4, 2, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                        if (pPortAbility->txpause)
+                            reg_data |= 0x80;
+                        else
+                            reg_data &= (~0x80);
+
+                        if (pPortAbility->rxpause)
+                            reg_data |= 0x100;
+                        else
+                            reg_data &= (~0x100);
+
+                        if((retVal = rtl8367c_setAsicSdsReg(0,4,2, reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                         /* 0 4 0  bit 12  set 0*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 4, 0, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        reg_data &= 0xFFFFEFFF;
+                        if((retVal = rtl8367c_setAsicSdsReg(0,4,0, reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                        /*new_cfg_sds_mode=1000x*/
+                        if((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x4)) != RT_ERR_OK)
+                            return retVal;
+
+                    }
+                    else if(regValue2 == 0x5)
+                    {
+#if 0
+                        /*100FX*/
+                        if((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x1f)) != RT_ERR_OK)
+                            return retVal;
+#endif
+
+                        if((retVal = rtl8367c_setAsicRegBit(0x1d95, 13, 1)) != RT_ERR_OK)
+                            return retVal;
+
+                        /* 0 4 0  bit 12  set 1,  bit15~13 = 5*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 4, 0, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        reg_data &= 0xFFFF0FFF;
+                        reg_data |= 0xB000;
+                        if((retVal = rtl8367c_setAsicSdsReg(0,4,0, reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                        /* 0 0 2  bit 6  set 0,  bit13 set to 1, bit12 0*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 0, 2, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        reg_data &= 0xFFFFFFBF;
+                        reg_data |= 0x2000;
+                        reg_data &= 0xffffefff;
+
+                        if((retVal = rtl8367c_setAsicSdsReg(0,0,2, reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                        /* 0 4 2  bit 8  rx pause,  bit7 tx pause*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 4, 2, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        if (pPortAbility->txpause)
+                            reg_data |= 0x80;
+                        else
+                            reg_data &= (~0x80);
+                        if (pPortAbility->rxpause)
+                            reg_data |= 0x100;
+                        else
+                            reg_data &= (~0x100);
+                        if((retVal = rtl8367c_setAsicSdsReg(0,4,2, reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                         /* 0 4 0  bit 12  set 0*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 4, 0, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        reg_data &= 0xFFFFEFFF;
+                        if((retVal = rtl8367c_setAsicSdsReg(0,4,0, reg_data)) != RT_ERR_OK)
+                            return retVal;
+                       /* new_cfg_sds_mode=1000x */
+                        if((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x5)) != RT_ERR_OK)
+                            return retVal;
+
+                    }
+                    else if(regValue2 == 0x7)
+                    {
+#if 0
+                        /*100FX*/
+                        if((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x1f)) != RT_ERR_OK)
+                            return retVal;
+#endif
+                        if((retVal = rtl8367c_setAsicRegBit(0x1d95, 13, 1)) != RT_ERR_OK)
+                            return retVal;
+
+                        /* 0 4 0  bit 12  set 1,  bit15~13 = 4*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 4, 0, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        reg_data &= 0xFFFF0FFF;
+                        reg_data |= 0x9000;
+                        if((retVal = rtl8367c_setAsicSdsReg(0,4,0, reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                        /* 0 0 2  bit 6  set 1,  bit13 set to 0, bit12 nway_en*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 0, 2, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        reg_data &= 0xFFFFDFFF;
+                        reg_data |= 0x40;
+                        if(pPortAbility->forcemode)
+                            reg_data &= 0xffffefff;
+                        else
+                            reg_data |= 0x1000;
+
+                        if((retVal = rtl8367c_setAsicSdsReg(0,0,2, reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                        /* 0 4 2  bit 8  rx pause,  bit7 tx pause*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 4, 2, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        if (pPortAbility->txpause)
+                            reg_data |= 0x80;
+                        else
+                            reg_data &= (~0x80);
+                        if (pPortAbility->rxpause)
+                            reg_data |= 0x100;
+                        else
+                            reg_data &=(~0x100);
+                        if((retVal = rtl8367c_setAsicSdsReg(0,4,2, reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                         /* 0 4 0  bit 12  set 0*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 4, 0, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        reg_data &= 0xFFFFEFFF;
+                        if((retVal = rtl8367c_setAsicSdsReg(0,4,0, reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                        /* 0 4 0  bit 12  set 1,  bit15~13 = 5*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 4, 0, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        reg_data &= 0xFFFF0FFF;
+                        reg_data |= 0xB000;
+                        if((retVal = rtl8367c_setAsicSdsReg(0,4,0, reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                        /* 0 0 2  bit 6  set 0,  bit13 set to 1, bit12 0*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 0, 2, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        reg_data &= 0xFFFFFFBF;
+                        reg_data |= 0x2000;
+                        reg_data &= 0xffffefff;
+
+                        if((retVal = rtl8367c_setAsicSdsReg(0,0,2, reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                        /* 0 4 2  bit 8  rx pause,  bit7 tx pause*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 4, 2, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        if (pPortAbility->txpause)
+                            reg_data |= 0x80;
+                        else
+                            reg_data &= 0xffffff7f;
+                        if (pPortAbility->rxpause)
+                            reg_data |= 0x100;
+                        else
+                            reg_data &= 0xfffffeff;
+                        if((retVal = rtl8367c_setAsicSdsReg(0,4,2, reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                         /* 0 4 0  bit 12  set 0*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 4, 0, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        reg_data &= 0xFFFFEFFF;
+                        if((retVal = rtl8367c_setAsicSdsReg(0,4,0, reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        /*sds_mode:*/
+                        if((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x7)) != RT_ERR_OK)
+                            return retVal;
+
+                    }
+
+                    /*disable force ability   ---      */
+                    if((retVal = rtl8367c_setAsicRegBit(0x137c, 12, 0)) != RT_ERR_OK)
+                        return retVal;
+                    return RT_ERR_OK;
+
+                }
+
+                /* new_cfg_sds_mode */
+                if((retVal = rtl8367c_getAsicRegBits(0x1d95, 0x1f00, &regValue2)) != RT_ERR_OK)
+                    return retVal;
+                if(regValue2 == 0x2)
+                {
+#if 0
+                    /*SGMII*/
+                    if((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x1f)) != RT_ERR_OK)
+                        return retVal;
+#endif
+                    if((retVal = rtl8367c_setAsicRegBit(0x1d95, 13, 1)) != RT_ERR_OK)
+                        return retVal;
+
+                    for(i=0;i<0xfff; i++);
+
+                    if((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x2)) != RT_ERR_OK)
+                        return retVal;
+
+                    for(i=0;i<0xfff; i++);
+
+                    if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_FDUP_OFFSET, pPortAbility->duplex)) != RT_ERR_OK)
+                        return retVal;
+
+                    if((retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_SPD_MASK, pPortAbility->speed)) != RT_ERR_OK)
+                        return retVal;
+
+                    if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_TXFC_OFFSET, pPortAbility->txpause)) != RT_ERR_OK)
+                        return retVal;
+
+                    if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_RXFC_OFFSET, pPortAbility->rxpause)) != RT_ERR_OK)
+                        return retVal;
+
+                    if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_LINK_OFFSET, pPortAbility->link)) != RT_ERR_OK)
+                        return retVal;
+
+                    /*disable force ability   ---      */
+                    if((retVal = rtl8367c_setAsicRegBit(0x137c, 12, 0)) != RT_ERR_OK)
+                        return retVal;
+                    return RT_ERR_OK;
+                }
+                else if(regValue2 == 0x12)
+                {
+#if 0
+                    /*HiSGMII*/
+                    if((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x1f)) != RT_ERR_OK)
+                        return retVal;
+#endif
+                    if((retVal = rtl8367c_setAsicRegBit(0x1d95, 13, 1)) != RT_ERR_OK)
+                        return retVal;
+
+                    for(i=0;i<0xfff; i++);
+
+                    if((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x12)) != RT_ERR_OK)
+                        return retVal;
+
+                    for(i=0;i<0xfff; i++);
+
+                    if((retVal = rtl8367c_setAsicRegBit(0x1d11, 11, 0x1)) != RT_ERR_OK)
+                        return retVal;
+
+                    if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_FDUP_OFFSET, pPortAbility->duplex)) != RT_ERR_OK)
+                        return retVal;
+
+                    if((retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_SPD_MASK, pPortAbility->speed)) != RT_ERR_OK)
+                        return retVal;
+
+                    if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_TXFC_OFFSET, pPortAbility->txpause)) != RT_ERR_OK)
+                        return retVal;
+
+                    if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_RXFC_OFFSET, pPortAbility->rxpause)) != RT_ERR_OK)
+                        return retVal;
+
+                    if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_LINK_OFFSET, pPortAbility->link)) != RT_ERR_OK)
+                        return retVal;
+
+                    /*disable force ability   ---      */
+                    if((retVal = rtl8367c_setAsicRegBit(0x137c, 12, 0)) != RT_ERR_OK)
+                        return retVal;
+                    return RT_ERR_OK;
+
+                }
+            }
+            else
+            {
+                if((retVal = rtl8367c_getAsicRegBits(0x1d3d, 10, &regValue2)) != RT_ERR_OK)
+                    return retVal;
+                if (regValue2 == 0)
+                {
+                    /*ext1_force_ablty*/
+                    if((retVal = rtl8367c_setAsicRegBit(0x1311, 2, pPortAbility->duplex)) != RT_ERR_OK)
+                        return retVal;
+
+                    if((retVal = rtl8367c_setAsicRegBits(0x1311, 0x3, pPortAbility->speed)) != RT_ERR_OK)
+                        return retVal;
+
+                    if((retVal = rtl8367c_setAsicRegBit(0x1311, 4, pPortAbility->link)) != RT_ERR_OK)
+                        return retVal;
+
+                    if((retVal = rtl8367c_setAsicRegBit(0x1311, 6, pPortAbility->txpause)) != RT_ERR_OK)
+                        return retVal;
+
+                    if((retVal = rtl8367c_setAsicRegBit(0x1311, 5, pPortAbility->rxpause)) != RT_ERR_OK)
+                        return retVal;
+
+                    /*force mode for ext1*/
+                    if((retVal = rtl8367c_setAsicRegBit(0x1311, 12, pPortAbility->forcemode)) != RT_ERR_OK)
+                        return retVal;
+
+                    if (pPortAbility->link == 1)
+                    {
+                        if((retVal = rtl8367c_setAsicRegBit(0x1311, 4, 0)) != RT_ERR_OK)
+                            return retVal;
+
+                        if((retVal = rtl8367c_setAsicRegBit(0x1311, 4, 1)) != RT_ERR_OK)
+                            return retVal;
+                    }
+                    else
+                    {
+                        if((retVal = rtl8367c_setAsicRegBits(0x1311, 0x3, 2)) != RT_ERR_OK)
+                            return retVal;
+                    }
+
+                    /*disable force ability   ---      */
+                    if((retVal = rtl8367c_setAsicRegBit(0x137c, 12, 0)) != RT_ERR_OK)
+                        return retVal;
+                    return RT_ERR_OK;
+                }
+            }
+
+
+        }
+        else if (2 == id)
+        {
+
+            if((retVal = rtl8367c_getAsicRegBit(0x1d95, 0, &sgmiibit)) != RT_ERR_OK)
+                    return retVal;
+            if (sgmiibit == 1)
+            {
+                /*for 1000x/100fx/1000x_100fx, param has to bet set to serdes registers*/
+                if((retVal = rtl8367c_getAsicReg(0x1d95, &regValue)) != RT_ERR_OK)
+                    return retVal;
+                /*cfg_mac7_sel_sgmii=1 & cfg_mac7_fib =1*/
+                if((regValue & 0x3) == 0x3)
+                {
+                    if((retVal = rtl8367c_getAsicRegBits(0x1d95, 0x1f00, &regValue2)) != RT_ERR_OK)
+                        return retVal;
+
+                    if(regValue2 == 0x4)
+                    {
+                        /*1000X*/
+#if 0
+                        if((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x1f)) != RT_ERR_OK)
+                            return retVal;
+#endif
+                        if((retVal = rtl8367c_setAsicRegBit(0x1d95, 13, 1)) != RT_ERR_OK)
+                            return retVal;
+
+                        /* 0 4 0  bit 12  set 1,  bit15~13 = 4*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 4, 0, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        reg_data &= 0xFFFF0FFF;
+                        reg_data |= 0x9000;
+                        if((retVal = rtl8367c_setAsicSdsReg(0,4,0, reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                        /* 0 0 2  bit 6  set 1,  bit13 set to 0, bit12 nway_en*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 0, 2, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        reg_data &= 0xFFFFDFFF;
+                        reg_data |= 0x40;
+                        if(pPortAbility->forcemode)
+                            reg_data &= 0xffffefff;
+                        else
+                            reg_data |= 0x1000;
+
+                        if((retVal = rtl8367c_setAsicSdsReg(0,0,2, reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                        /* 0 4 2  bit 8  rx pause,  bit7 tx pause*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 4, 2, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        if (pPortAbility->txpause)
+                            reg_data |= 0x80;
+                        else
+                            reg_data &= 0xffffff7f;
+                        if (pPortAbility->rxpause)
+                            reg_data |= 0x100;
+                        else
+                            reg_data &= 0xfffffeff;
+                        if((retVal = rtl8367c_setAsicSdsReg(0,4,2, reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                         /* 0 4 0  bit 12  set 0*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 4, 0, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        reg_data &= 0xFFFFEFFF;
+                        if((retVal = rtl8367c_setAsicSdsReg(0,4,0, reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                        if((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x4)) != RT_ERR_OK)
+                            return retVal;
+
+                    }
+                    else if(regValue2 == 0x5)
+                    {
+                        /*100FX*/
+#if 0
+                        if((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x1f)) != RT_ERR_OK)
+                            return retVal;
+#endif
+                        if((retVal = rtl8367c_setAsicRegBit(0x1d95, 13, 1)) != RT_ERR_OK)
+                            return retVal;
+
+                        /* 0 4 0  bit 12  set 1,  bit15~13 = 5*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 4, 0, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        reg_data &= 0xFFFF0FFF;
+                        reg_data |= 0xB000;
+                        if((retVal = rtl8367c_setAsicSdsReg(0,4,0, reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                        /* 0 0 2  bit 6  set 0,  bit13 set to 1, bit12 0*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 0, 2, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        reg_data &= 0xFFFFFFBF;
+                        reg_data |= 0x2000;
+                        reg_data &= 0xffffefff;
+
+                        if((retVal = rtl8367c_setAsicSdsReg(0,0,2, reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                        /* 0 4 2  bit 8  rx pause,  bit7 tx pause*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 4, 2, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        if (pPortAbility->txpause)
+                            reg_data |= 0x80;
+                        else
+                            reg_data &= 0xffffff7f;
+                        if (pPortAbility->rxpause)
+                            reg_data |= 0x100;
+                        else
+                            reg_data &= 0xfffffeff;
+                        if((retVal = rtl8367c_setAsicSdsReg(0,4,2, reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                         /* 0 4 0  bit 12  set 0*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 4, 0, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        reg_data &= 0xFFFFEFFF;
+                        if((retVal = rtl8367c_setAsicSdsReg(0,4,0, reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                        if((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x5)) != RT_ERR_OK)
+                            return retVal;
+
+                    }
+                    else if(regValue2 == 0x7)
+                    {
+                        /*100FX*/
+#if 0
+                        if((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x1f)) != RT_ERR_OK)
+                            return retVal;
+#endif
+                        if((retVal = rtl8367c_setAsicRegBit(0x1d95, 13, 1)) != RT_ERR_OK)
+                            return retVal;
+
+                        /* 0 4 0  bit 12  set 1,  bit15~13 = 4*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 4, 0, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        reg_data &= 0xFFFF0FFF;
+                        reg_data |= 0x9000;
+                        if((retVal = rtl8367c_setAsicSdsReg(0,4,0, reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                        /* 0 0 2  bit 6  set 1,  bit13 set to 0, bit12 nway_en*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 0, 2, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        reg_data &= 0xFFFFDFFF;
+                        reg_data |= 0x40;
+                        if(pPortAbility->forcemode)
+                            reg_data &= 0xffffefff;
+                        else
+                            reg_data |= 0x1000;
+
+                        if((retVal = rtl8367c_setAsicSdsReg(0,0,2, reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                        /* 0 4 2  bit 8  rx pause,  bit7 tx pause*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 4, 2, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        if (pPortAbility->txpause)
+                            reg_data |= 0x80;
+                        else
+                            reg_data &= 0xffffff7f;
+                        if (pPortAbility->rxpause)
+                            reg_data |= 0x100;
+                        else
+                            reg_data &= 0xfffffeff;
+                        if((retVal = rtl8367c_setAsicSdsReg(0,4,2, reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                         /* 0 4 0  bit 12  set 0*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 4, 0, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        reg_data &= 0xFFFFEFFF;
+                        if((retVal = rtl8367c_setAsicSdsReg(0,4,0, reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                        /* 0 4 0  bit 12  set 1,  bit15~13 = 5*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 4, 0, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        reg_data &= 0xFFFF0FFF;
+                        reg_data |= 0xB000;
+                        if((retVal = rtl8367c_setAsicSdsReg(0,4,0, reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                        /* 0 0 2  bit 6  set 0,  bit13 set to 1, bit12 0*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 0, 2, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        reg_data &= 0xFFFFFFBF;
+                        reg_data |= 0x2000;
+                        reg_data &= 0xffffefff;
+
+                        if((retVal = rtl8367c_setAsicSdsReg(0,0,2, reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                        /* 0 4 2  bit 8  rx pause,  bit7 tx pause*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 4, 2, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        if (pPortAbility->txpause)
+                            reg_data |= 0x80;
+                        else
+                            reg_data  &= 0xffffff7f;
+                        if (pPortAbility->rxpause)
+                            reg_data |= 0x100;
+                        else
+                            reg_data &= 0xfffffeff;
+                        if((retVal = rtl8367c_setAsicSdsReg(0,4,2, reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                         /* 0 4 0  bit 12  set 0*/
+                        if((retVal = rtl8367c_getAsicSdsReg(0, 4, 0, &reg_data)) != RT_ERR_OK)
+                            return retVal;
+                        reg_data &= 0xFFFFEFFF;
+                        if((retVal = rtl8367c_setAsicSdsReg(0,4,0, reg_data)) != RT_ERR_OK)
+                            return retVal;
+
+                        if((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x7)) != RT_ERR_OK)
+                            return retVal;
+
+                    }
+
+                    if((retVal = rtl8367c_setAsicRegBit(0x137d, 12, 0)) != RT_ERR_OK)
+                        return retVal;
+                    return RT_ERR_OK;
+
+                }
+
+                if((retVal = rtl8367c_getAsicRegBits(0x1d95, 0x1f00, &regValue2)) != RT_ERR_OK)
+                        return retVal;
+                if(regValue2 == 0x2)
+                {
+                    /*SGMII*/
+#if 0
+                    if((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x1f)) != RT_ERR_OK)
+                        return retVal;
+#endif
+                    if((retVal = rtl8367c_setAsicRegBit(0x1d95, 13, 1)) != RT_ERR_OK)
+                        return retVal;
+
+                    for(i=0;i<0xfff; i++);
+
+                    /* 0 2 0  bit 8-9  nway*/
+                    if((retVal = rtl8367c_getAsicSdsReg(0, 2, 0, &reg_data)) != RT_ERR_OK)
+                        return retVal;
+                    reg_data &= 0xfffffcff;
+                    if (pPortAbility->nway)
+                        reg_data &= 0xfffffcff;
+                    else
+                        reg_data |= 0x100;
+                    if((retVal = rtl8367c_setAsicSdsReg(0,2,0, reg_data)) != RT_ERR_OK)
+                        return retVal;
+
+                    if((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x2)) != RT_ERR_OK)
+                        return retVal;
+
+                    for(i=0;i<0xfff; i++);
+
+                    if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_FDUP_OFFSET, pPortAbility->duplex)) != RT_ERR_OK)
+                        return retVal;
+
+                    if((retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_SPD_MASK, pPortAbility->speed)) != RT_ERR_OK)
+                        return retVal;
+
+                    if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_TXFC_OFFSET, pPortAbility->txpause)) != RT_ERR_OK)
+                        return retVal;
+
+                    if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_RXFC_OFFSET, pPortAbility->rxpause)) != RT_ERR_OK)
+                        return retVal;
+
+                    if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_LINK_OFFSET, pPortAbility->link)) != RT_ERR_OK)
+                        return retVal;
+
+                    if((retVal = rtl8367c_setAsicRegBit(0x137d, 12, 0)) != RT_ERR_OK)
+                        return retVal;
+                    return RT_ERR_OK;
+                }
+            }
+            else
+            {
+
+                /*ext2_force_ablty*/
+                if((retVal = rtl8367c_setAsicRegBit(0x13c4, 2, pPortAbility->duplex)) != RT_ERR_OK)
+                    return retVal;
+
+                if((retVal = rtl8367c_setAsicRegBits(0x13c4, 0x3, pPortAbility->speed)) != RT_ERR_OK)
+                    return retVal;
+
+                if((retVal = rtl8367c_setAsicRegBit(0x13c4, 4, pPortAbility->link)) != RT_ERR_OK)
+                    return retVal;
+
+                if((retVal = rtl8367c_setAsicRegBit(0x13c4, 6, pPortAbility->txpause)) != RT_ERR_OK)
+                    return retVal;
+
+                if((retVal = rtl8367c_setAsicRegBit(0x13c4, 5, pPortAbility->rxpause)) != RT_ERR_OK)
+                    return retVal;
+
+                /*force mode for ext2*/
+                if((retVal = rtl8367c_setAsicRegBit(0x13c4, 12, pPortAbility->forcemode)) != RT_ERR_OK)
+                    return retVal;
+
+                if (pPortAbility->link == 1)
+                {
+                    if((retVal = rtl8367c_setAsicRegBit(0x13c4, 4, 0)) != RT_ERR_OK)
+                        return retVal;
+
+                    if((retVal = rtl8367c_setAsicRegBit(0x13c4, 4, 1)) != RT_ERR_OK)
+                        return retVal;
+                }
+                else
+                {
+                    if((retVal = rtl8367c_setAsicRegBits(0x13c4, 0x3, 2)) != RT_ERR_OK)
+                        return retVal;
+                }
+
+
+                if((retVal = rtl8367c_getAsicRegBit(0x1d3d, 10, &reg_data)) != RT_ERR_OK)
+                        return retVal;
+                if(reg_data == 1)
+                {
+                    if((retVal = rtl8367c_setAsicRegBit(0x1311, 2, pPortAbility->duplex)) != RT_ERR_OK)
+                        return retVal;
+
+                    if((retVal = rtl8367c_setAsicRegBits(0x1311, 0x3, pPortAbility->speed)) != RT_ERR_OK)
+                        return retVal;
+
+                    if((retVal = rtl8367c_setAsicRegBit(0x1311, 4, pPortAbility->link)) != RT_ERR_OK)
+                        return retVal;
+
+                    if((retVal = rtl8367c_setAsicRegBit(0x1311, 6, pPortAbility->txpause)) != RT_ERR_OK)
+                        return retVal;
+
+                    if((retVal = rtl8367c_setAsicRegBit(0x1311, 5, pPortAbility->rxpause)) != RT_ERR_OK)
+                        return retVal;
+
+                    /*force mode for ext1*/
+                    if((retVal = rtl8367c_setAsicRegBit(0x1311, 12, pPortAbility->forcemode)) != RT_ERR_OK)
+                        return retVal;
+
+                    if (pPortAbility->link == 1)
+                    {
+                        if((retVal = rtl8367c_setAsicRegBit(0x1311, 4, 0)) != RT_ERR_OK)
+                            return retVal;
+
+                        if((retVal = rtl8367c_setAsicRegBit(0x1311, 4, 1)) != RT_ERR_OK)
+                            return retVal;
+                    }
+                    else
+                    {
+                        if((retVal = rtl8367c_setAsicRegBits(0x1311, 0x3, 2)) != RT_ERR_OK)
+                            return retVal;
+                    }
+                }
+
+
+            }
+
+            /*disable force ability   ---      */
+            if((retVal = rtl8367c_setAsicRegBit(0x137d, 12, 0)) != RT_ERR_OK)
+                return retVal;
+        }
+#if 0
+        if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_FDUP_OFFSET, pPortAbility->duplex)) != RT_ERR_OK)
+            return retVal;
+
+        if((retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_SPD_MASK, pPortAbility->speed)) != RT_ERR_OK)
+            return retVal;
+
+        if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_TXFC_OFFSET, pPortAbility->txpause)) != RT_ERR_OK)
+            return retVal;
+
+        if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_RXFC_OFFSET, pPortAbility->rxpause)) != RT_ERR_OK)
+            return retVal;
+
+        if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_LINK_OFFSET, pPortAbility->link)) != RT_ERR_OK)
+            return retVal;
+#endif
+    }
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortForceLinkExt
+ * Description:
+ *      Get external interface force linking configuration
+ * Input:
+ *      id          - external interface id (0~1)
+ *      pPortAbility - port ability configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortForceLinkExt(rtk_uint32 id, rtl8367c_port_ability_t *pPortAbility)
+{
+    rtk_uint32  reg_data, regValue, type;
+    rtk_uint32  sgmiiSel;
+    rtk_uint32  hsgmiiSel;
+    rtk_uint32  Mode;
+    ret_t       retVal;
+
+
+    if(id >= RTL8367C_EXTNO)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0249)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_getAsicReg(0x1300, &regValue)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0000)) != RT_ERR_OK)
+        return retVal;
+
+    type = 0;
+
+    switch (regValue)
+    {
+        case 0x0276:
+        case 0x0597:
+        case 0x6367:
+            type = 1;
+            break;
+        case 0x0652:
+        case 0x6368:
+            type = 2;
+            break;
+        case 0x0801:
+        case 0x6511:
+            type = 3;
+            break;
+        default:
+            return RT_ERR_FAILED;
+    }
+
+    if (1 == type)
+    {
+        if(1 == id)
+        {
+            if((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_MAC8_SEL_SGMII_OFFSET, &sgmiiSel)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_MAC8_SEL_HSGMII_OFFSET, &hsgmiiSel)) != RT_ERR_OK)
+                return retVal;
+
+            if( (sgmiiSel == 1) || (hsgmiiSel == 1) )
+            {
+                memset(pPortAbility, 0x00, sizeof(rtl8367c_port_ability_t));
+                pPortAbility->forcemode = 1;
+
+                if((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_FDUP_OFFSET, &reg_data)) != RT_ERR_OK)
+                    return retVal;
+
+                pPortAbility->duplex = reg_data;
+
+                if((retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_SPD_MASK, &reg_data)) != RT_ERR_OK)
+                    return retVal;
+
+                pPortAbility->speed = reg_data;
+
+                if((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_LINK_OFFSET, &reg_data)) != RT_ERR_OK)
+                    return retVal;
+
+                pPortAbility->link = reg_data;
+
+                if((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_TXFC_OFFSET, &reg_data)) != RT_ERR_OK)
+                    return retVal;
+
+                pPortAbility->txpause = reg_data;
+
+                if((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_SGMII_RXFC_OFFSET, &reg_data)) != RT_ERR_OK)
+                    return retVal;
+
+                pPortAbility->rxpause = reg_data;
+
+                return RT_ERR_OK;
+            }
+        }
+
+        if(0 == id || 1 == id)
+            retVal = rtl8367c_getAsicReg(RTL8367C_REG_DIGITAL_INTERFACE0_FORCE+id, &reg_data);
+        else
+            retVal = rtl8367c_getAsicReg(RTL8367C_REG_DIGITAL_INTERFACE2_FORCE, &reg_data);
+
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        pPortAbility->forcemode = (reg_data >> 12) & 0x0001;
+        pPortAbility->mstfault  = (reg_data >> 9) & 0x0001;
+        pPortAbility->mstmode   = (reg_data >> 8) & 0x0001;
+        pPortAbility->nway      = (reg_data >> 7) & 0x0001;
+        pPortAbility->txpause   = (reg_data >> 6) & 0x0001;
+        pPortAbility->rxpause   = (reg_data >> 5) & 0x0001;
+        pPortAbility->link      = (reg_data >> 4) & 0x0001;
+        pPortAbility->duplex    = (reg_data >> 2) & 0x0001;
+        pPortAbility->speed     = reg_data & 0x0003;
+    }
+    else if (2 == type)
+    {
+        if (id == 1)
+        {
+            if ((retVal = rtl8367c_getAsicReg(0x1311, &reg_data))!=RT_ERR_OK)
+                return retVal;
+
+            pPortAbility->forcemode = (reg_data >> 12) & 1;
+            pPortAbility->duplex = (reg_data >> 2) & 1;
+            pPortAbility->link = (reg_data >> 4) & 1;
+            pPortAbility->speed = reg_data & 3;
+            pPortAbility->rxpause = (reg_data >> 5) & 1;
+            pPortAbility->txpause = (reg_data >> 6) & 1;
+        }
+        else if (2 == id)
+        {
+            if ((retVal = rtl8367c_getAsicReg(0x13c4, &reg_data))!=RT_ERR_OK)
+                return retVal;
+
+            pPortAbility->forcemode = (reg_data >> 12) & 1;
+            pPortAbility->duplex = (reg_data >> 2) & 1;
+            pPortAbility->link = (reg_data >> 4) & 1;
+            pPortAbility->speed = reg_data & 3;
+            pPortAbility->rxpause = (reg_data >> 5) & 1;
+            pPortAbility->txpause = (reg_data >> 6) & 1;
+        }
+    }
+    else if (3 == type)
+    {
+        if (id == 1)
+        {
+
+            if((retVal = rtl8367c_getAsicPortExtMode(id, &Mode))!=RT_ERR_OK)
+                return retVal;
+            if(Mode < EXT_SGMII)
+            {
+
+                if ((retVal = rtl8367c_getAsicReg(0x1311, &reg_data))!=RT_ERR_OK)
+                    return retVal;
+
+                pPortAbility->forcemode = (reg_data >> 12) & 1;
+                pPortAbility->duplex = (reg_data >> 2) & 1;
+                pPortAbility->link = (reg_data >> 4) & 1;
+                pPortAbility->speed = reg_data & 3;
+                pPortAbility->rxpause = (reg_data >> 5) & 1;
+                pPortAbility->txpause = (reg_data >> 6) & 1;
+            }
+            else if(Mode < EXT_1000X_100FX)
+            {
+                if ((retVal = rtl8367c_getAsicReg(0x1d11, &reg_data))!=RT_ERR_OK)
+                    return retVal;
+
+                //pPortAbility->forcemode = (reg_data >> 12) & 1;
+                pPortAbility->duplex = (reg_data >> 10) & 1;
+                pPortAbility->link = (reg_data >> 9) & 1;
+                pPortAbility->speed = (reg_data >> 7) & 3;
+                pPortAbility->rxpause = (reg_data >> 14) & 1;
+                pPortAbility->txpause = (reg_data >> 13) & 1;
+            }
+            else if(Mode < EXT_RGMII_2)
+            {
+                if ((retVal = rtl8367c_getAsicReg(0x1358, &reg_data))!=RT_ERR_OK)
+                    return retVal;
+
+                //pPortAbility->forcemode = (reg_data >> 12) & 1;
+                pPortAbility->duplex = (reg_data >> 2) & 1;
+                pPortAbility->link = (reg_data >> 4) & 1;
+                pPortAbility->speed = reg_data & 3;
+                pPortAbility->rxpause = (reg_data >> 5) & 1;
+                pPortAbility->txpause = (reg_data >> 6) & 1;
+            }
+
+        }
+        else if (2 == id)
+        {
+            if((retVal = rtl8367c_getAsicPortExtMode(id, &Mode))!=RT_ERR_OK)
+                return retVal;
+            if(Mode < EXT_SGMII)
+            {
+
+                if ((retVal = rtl8367c_getAsicReg(0x13c4, &reg_data))!=RT_ERR_OK)
+                    return retVal;
+
+                pPortAbility->forcemode = (reg_data >> 12) & 1;
+                pPortAbility->duplex = (reg_data >> 2) & 1;
+                pPortAbility->link = (reg_data >> 4) & 1;
+                pPortAbility->speed = reg_data & 3;
+                pPortAbility->rxpause = (reg_data >> 5) & 1;
+                pPortAbility->txpause = (reg_data >> 6) & 1;
+            }
+            else if(Mode < EXT_1000X_100FX)
+            {
+                if ((retVal = rtl8367c_getAsicReg(0x1d11, &reg_data))!=RT_ERR_OK)
+                    return retVal;
+
+                //pPortAbility->forcemode = (reg_data >> 12) & 1;
+                pPortAbility->duplex = (reg_data >> 10) & 1;
+                pPortAbility->link = (reg_data >> 9) & 1;
+                pPortAbility->speed = (reg_data >> 7) & 3;
+                pPortAbility->rxpause = (reg_data >> 14) & 1;
+                pPortAbility->txpause = (reg_data >> 13) & 1;
+            }
+            else if(Mode < EXT_RGMII_2)
+            {
+                if ((retVal = rtl8367c_getAsicReg(0x1359, &reg_data))!=RT_ERR_OK)
+                    return retVal;
+
+                //pPortAbility->forcemode = (reg_data >> 12) & 1;
+                pPortAbility->duplex = (reg_data >> 2) & 1;
+                pPortAbility->link = (reg_data >> 4) & 1;
+                pPortAbility->speed = reg_data & 3;
+                pPortAbility->rxpause = (reg_data >> 5) & 1;
+                pPortAbility->txpause = (reg_data >> 6) & 1;
+            }
+            else if(Mode < EXT_END)
+            {
+
+                if ((retVal = rtl8367c_getAsicReg(0x1311, &reg_data))!=RT_ERR_OK)
+                    return retVal;
+
+                pPortAbility->forcemode = (reg_data >> 12) & 1;
+                pPortAbility->duplex = (reg_data >> 2) & 1;
+                pPortAbility->link = (reg_data >> 4) & 1;
+                pPortAbility->speed = reg_data & 3;
+                pPortAbility->rxpause = (reg_data >> 5) & 1;
+                pPortAbility->txpause = (reg_data >> 6) & 1;
+            }
+        }
+    }
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicPortExtMode
+ * Description:
+ *      Set external interface mode configuration
+ * Input:
+ *      id      - external interface id (0~2)
+ *      mode    - external interface mode
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortExtMode(rtk_uint32 id, rtk_uint32 mode)
+{
+    ret_t   retVal;
+    rtk_uint32 i, regValue, type, option,reg_data;
+    rtk_uint32 idx;
+    rtk_uint32 redData[][2] =   { {0x04D7, 0x0480}, {0xF994, 0x0481}, {0x21A2, 0x0482}, {0x6960, 0x0483}, {0x9728, 0x0484}, {0x9D85, 0x0423}, {0xD810, 0x0424}, {0x83F2, 0x002E} };
+    rtk_uint32 redDataSB[][2] = { {0x04D7, 0x0480}, {0xF994, 0x0481}, {0x31A2, 0x0482}, {0x6960, 0x0483}, {0x9728, 0x0484}, {0x9D85, 0x0423}, {0xD810, 0x0424}, {0x83F2, 0x002E} };
+    rtk_uint32 redData1[][2] =  { {0x82F1, 0x0500}, {0xF195, 0x0501}, {0x31A2, 0x0502}, {0x796C, 0x0503}, {0x9728, 0x0504}, {0x9D85, 0x0423}, {0xD810, 0x0424}, {0x0F80, 0x0001}, {0x83F2, 0x002E} };
+    rtk_uint32 redData5[][2] =  { {0x82F1, 0x0500}, {0xF195, 0x0501}, {0x31A2, 0x0502}, {0x796C, 0x0503}, {0x9728, 0x0504}, {0x9D85, 0x0423}, {0xD810, 0x0424}, {0x0F80, 0x0001}, {0x83F2, 0x002E} };
+    rtk_uint32 redData6[][2] =  { {0x82F1, 0x0500}, {0xF195, 0x0501}, {0x31A2, 0x0502}, {0x796C, 0x0503}, {0x9728, 0x0504}, {0x9D85, 0x0423}, {0xD810, 0x0424}, {0x0F80, 0x0001}, {0x83F2, 0x002E} };
+    rtk_uint32 redData8[][2] =  { {0x82F1, 0x0500}, {0xF995, 0x0501}, {0x31A2, 0x0502}, {0x796C, 0x0503}, {0x9728, 0x0504}, {0x9D85, 0x0423}, {0xD810, 0x0424}, {0x0F80, 0x0001}, {0x83F2, 0x002E} };
+    rtk_uint32 redData9[][2] =  { {0x82F1, 0x0500}, {0xF995, 0x0501}, {0x31A2, 0x0502}, {0x796C, 0x0503}, {0x9728, 0x0504}, {0x9D85, 0x0423}, {0xD810, 0x0424}, {0x0F80, 0x0001}, {0x83F2, 0x002E} };
+    rtk_uint32 redDataHB[][2] = { {0x82F0, 0x0500}, {0xF195, 0x0501}, {0x31A2, 0x0502}, {0x7960, 0x0503}, {0x9728, 0x0504}, {0x9D85, 0x0423}, {0xD810, 0x0424}, {0x0F80, 0x0001}, {0x83F2, 0x002E} };
+
+    if(id >= RTL8367C_EXTNO)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if(mode >= EXT_END)
+        return RT_ERR_OUT_OF_RANGE;
+
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0249)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_getAsicReg(0x1300, &regValue)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0000)) != RT_ERR_OK)
+        return retVal;
+
+    type = 0;
+
+    switch (regValue)
+    {
+        case 0x0276:
+        case 0x0597:
+        case 0x6367:
+            type = 1;
+            break;
+        case 0x0652:
+        case 0x6368:
+            type = 2;
+            break;
+        case 0x0801:
+        case 0x6511:
+            type = 3;
+            break;
+        default:
+            return RT_ERR_FAILED;
+    }
+
+
+    if (1==type)
+    {
+        if((mode == EXT_1000X_100FX) || (mode == EXT_1000X) || (mode == EXT_100FX))
+        {
+            if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_REG_TO_ECO4, 5, 1)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_REG_TO_ECO4, 7, 1)) != RT_ERR_OK)
+                return retVal;
+
+            if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_CHIP_RESET, RTL8367C_DW8051_RST_OFFSET, 1)) != RT_ERR_OK)
+                return retVal;
+
+            if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_MISCELLANEOUS_CONFIGURE0, RTL8367C_DW8051_EN_OFFSET, 1)) != RT_ERR_OK)
+                return retVal;
+
+            if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_DW8051_RDY, RTL8367C_ACS_IROM_ENABLE_OFFSET, 1)) != RT_ERR_OK)
+                return retVal;
+
+            if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_DW8051_RDY, RTL8367C_IROM_MSB_OFFSET, 0)) != RT_ERR_OK)
+                return retVal;
+
+            if(mode == EXT_1000X_100FX)
+            {
+                for(idx = 0; idx < FIBER2_AUTO_INIT_SIZE; idx++)
+                {
+                    if ((retVal = rtl8367c_setAsicReg(0xE000 + idx, (rtk_uint32)Fiber2_Auto[idx])) != RT_ERR_OK)
+                        return retVal;
+                }
+            }
+
+            if(mode == EXT_1000X)
+            {
+                for(idx = 0; idx < FIBER2_1G_INIT_SIZE; idx++)
+                {
+                    if ((retVal = rtl8367c_setAsicReg(0xE000 + idx, (rtk_uint32)Fiber2_1G[idx])) != RT_ERR_OK)
+                        return retVal;
+                }
+            }
+
+            if(mode == EXT_100FX)
+            {
+                for(idx = 0; idx < FIBER2_100M_INIT_SIZE; idx++)
+                {
+                    if ((retVal = rtl8367c_setAsicReg(0xE000 + idx, (rtk_uint32)Fiber2_100M[idx])) != RT_ERR_OK)
+                        return retVal;
+                }
+            }
+
+            if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_DW8051_RDY, RTL8367C_IROM_MSB_OFFSET, 0)) != RT_ERR_OK)
+                return retVal;
+
+            if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_DW8051_RDY, RTL8367C_ACS_IROM_ENABLE_OFFSET, 0)) != RT_ERR_OK)
+                return retVal;
+
+            if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_CHIP_RESET, RTL8367C_DW8051_RST_OFFSET, 0)) != RT_ERR_OK)
+                return retVal;
+        }
+
+        if(mode == EXT_GMII)
+        {
+            if( (retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_EXT0_RGMXF, RTL8367C_EXT0_RGTX_INV_OFFSET, 1)) != RT_ERR_OK)
+                return retVal;
+
+            if( (retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_EXT1_RGMXF, RTL8367C_EXT1_RGTX_INV_OFFSET, 1)) != RT_ERR_OK)
+                return retVal;
+
+            if( (retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_EXT_TXC_DLY, RTL8367C_EXT1_GMII_TX_DELAY_MASK, 5)) != RT_ERR_OK)
+                return retVal;
+
+            if( (retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_EXT_TXC_DLY, RTL8367C_EXT0_GMII_TX_DELAY_MASK, 6)) != RT_ERR_OK)
+                return retVal;
+        }
+
+        /* Serdes reset */
+        if( (mode == EXT_TMII_MAC) || (mode == EXT_TMII_PHY) )
+        {
+            if( (retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_BYPASS_LINE_RATE, id, 1)) != RT_ERR_OK)
+                return retVal;
+        }
+        else
+        {
+            if( (retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_BYPASS_LINE_RATE, id, 0)) != RT_ERR_OK)
+                return retVal;
+        }
+
+        if( (mode == EXT_SGMII) || (mode == EXT_HSGMII) )
+        {
+            if(id != 1)
+                return RT_ERR_PORT_ID;
+
+            if((retVal = rtl8367c_setAsicReg(0x13C0, 0x0249)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_getAsicReg(0x13C1, &option)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_setAsicReg(0x13C0, 0x0000)) != RT_ERR_OK)
+                return retVal;
+        }
+
+        if(mode == EXT_SGMII)
+        {
+            if(option == 0)
+            {
+                for(i = 0; i <= 7; i++)
+                {
+                    if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_DATA, redData[i][0])) != RT_ERR_OK)
+                        return retVal;
+
+                    if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_ADR, redData[i][1])) != RT_ERR_OK)
+                        return retVal;
+
+                    if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_CMD, 0x00C0)) != RT_ERR_OK)
+                        return retVal;
+                }
+            }
+            else
+            {
+                for(i = 0; i <= 7; i++)
+                {
+                    if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_DATA, redDataSB[i][0])) != RT_ERR_OK)
+                        return retVal;
+
+                    if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_ADR, redDataSB[i][1])) != RT_ERR_OK)
+                        return retVal;
+
+                    if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_CMD, 0x00C0)) != RT_ERR_OK)
+                        return retVal;
+                }
+            }
+        }
+
+        if(mode == EXT_HSGMII)
+        {
+            if(option == 0)
+            {
+                if( (retVal = rtl8367c_setAsicReg(0x13c2, 0x0249)) != RT_ERR_OK)
+                    return retVal;
+
+                if( (retVal = rtl8367c_getAsicReg(0x1301, &regValue)) != RT_ERR_OK)
+                    return retVal;
+
+                if( (retVal = rtl8367c_setAsicReg(0x13c2, 0x0000)) != RT_ERR_OK)
+                    return retVal;
+
+                if ( ((regValue & 0x00F0) >> 4) == 0x0001)
+                {
+                    for(i = 0; i <= 8; i++)
+                    {
+                        if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_DATA, redData1[i][0])) != RT_ERR_OK)
+                            return retVal;
+
+                        if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_ADR, redData1[i][1])) != RT_ERR_OK)
+                            return retVal;
+
+                        if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_CMD, 0x00C0)) != RT_ERR_OK)
+                            return retVal;
+                    }
+                }
+                else if ( ((regValue & 0x00F0) >> 4) == 0x0005)
+                {
+                    for(i = 0; i <= 8; i++)
+                    {
+                        if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_DATA, redData5[i][0])) != RT_ERR_OK)
+                            return retVal;
+
+                        if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_ADR, redData5[i][1])) != RT_ERR_OK)
+                            return retVal;
+
+                        if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_CMD, 0x00C0)) != RT_ERR_OK)
+                            return retVal;
+                    }
+                }
+                else if ( ((regValue & 0x00F0) >> 4) == 0x0006)
+                {
+                    for(i = 0; i <= 8; i++)
+                    {
+                        if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_DATA, redData6[i][0])) != RT_ERR_OK)
+                            return retVal;
+
+                        if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_ADR, redData6[i][1])) != RT_ERR_OK)
+                            return retVal;
+
+                        if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_CMD, 0x00C0)) != RT_ERR_OK)
+                            return retVal;
+                    }
+                }
+                else if ( ((regValue & 0x00F0) >> 4) == 0x0008)
+                {
+                    for(i = 0; i <= 8; i++)
+                    {
+                        if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_DATA, redData8[i][0])) != RT_ERR_OK)
+                            return retVal;
+
+                        if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_ADR, redData8[i][1])) != RT_ERR_OK)
+                            return retVal;
+
+                        if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_CMD, 0x00C0)) != RT_ERR_OK)
+                            return retVal;
+                    }
+                }
+                else if ( ((regValue & 0x00F0) >> 4) == 0x0009)
+                {
+                    for(i = 0; i <= 8; i++)
+                    {
+                        if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_DATA, redData9[i][0])) != RT_ERR_OK)
+                            return retVal;
+
+                        if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_ADR, redData9[i][1])) != RT_ERR_OK)
+                            return retVal;
+
+                        if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_CMD, 0x00C0)) != RT_ERR_OK)
+                            return retVal;
+                    }
+                }
+            }
+            else
+            {
+                for(i = 0; i <= 8; i++)
+                {
+                    if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_DATA, redDataHB[i][0])) != RT_ERR_OK)
+                        return retVal;
+
+                    if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_ADR, redDataHB[i][1])) != RT_ERR_OK)
+                        return retVal;
+
+                    if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_CMD, 0x00C0)) != RT_ERR_OK)
+                        return retVal;
+                }
+            }
+        }
+
+        /* Only one ext port should care SGMII setting */
+        if(id == 1)
+        {
+
+            if(mode == EXT_SGMII)
+            {
+                if( (retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_MAC8_SEL_SGMII_OFFSET, 1)) != RT_ERR_OK)
+                    return retVal;
+
+                if( (retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_MAC8_SEL_HSGMII_OFFSET, 0)) != RT_ERR_OK)
+                    return retVal;
+            }
+            else if(mode == EXT_HSGMII)
+            {
+                if( (retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_MAC8_SEL_SGMII_OFFSET, 0)) != RT_ERR_OK)
+                    return retVal;
+
+                if( (retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_MAC8_SEL_HSGMII_OFFSET, 1)) != RT_ERR_OK)
+                    return retVal;
+            }
+            else
+            {
+
+                if((mode != EXT_1000X_100FX) && (mode != EXT_1000X) && (mode != EXT_100FX))
+                {
+                    if( (retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_MAC8_SEL_SGMII_OFFSET, 0)) != RT_ERR_OK)
+                        return retVal;
+
+                    if( (retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_MAC8_SEL_HSGMII_OFFSET, 0)) != RT_ERR_OK)
+                        return retVal;
+                }
+            }
+        }
+
+        if(0 == id || 1 == id)
+        {
+            if((retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_DIGITAL_INTERFACE_SELECT, RTL8367C_SELECT_GMII_0_MASK << (id * RTL8367C_SELECT_GMII_1_OFFSET), mode)) != RT_ERR_OK)
+                return retVal;
+        }
+        else
+        {
+            if((retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_DIGITAL_INTERFACE_SELECT_1, RTL8367C_SELECT_GMII_2_MASK, mode)) != RT_ERR_OK)
+                return retVal;
+        }
+
+        /* Serdes not reset */
+        if( (mode == EXT_SGMII) || (mode == EXT_HSGMII) )
+        {
+            if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_DATA, 0x7106)) != RT_ERR_OK)
+                return retVal;
+
+            if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_ADR, 0x0003)) != RT_ERR_OK)
+                return retVal;
+
+            if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_CMD, 0x00C0)) != RT_ERR_OK)
+                return retVal;
+        }
+
+        if( (mode == EXT_SGMII) || (mode == EXT_HSGMII) )
+        {
+            if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_CHIP_RESET, RTL8367C_DW8051_RST_OFFSET, 1)) != RT_ERR_OK)
+                return retVal;
+
+            if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_MISCELLANEOUS_CONFIGURE0, RTL8367C_DW8051_EN_OFFSET, 1)) != RT_ERR_OK)
+                return retVal;
+
+            if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_DW8051_RDY, RTL8367C_ACS_IROM_ENABLE_OFFSET, 1)) != RT_ERR_OK)
+                return retVal;
+
+            if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_DW8051_RDY, RTL8367C_IROM_MSB_OFFSET, 0)) != RT_ERR_OK)
+                return retVal;
+
+            for(idx = 0; idx < SGMII_INIT_SIZE; idx++)
+            {
+                if ((retVal = rtl8367c_setAsicReg(0xE000 + idx, (rtk_uint32)Sgmii_Init[idx])) != RT_ERR_OK)
+                    return retVal;
+            }
+
+            if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_DW8051_RDY, RTL8367C_IROM_MSB_OFFSET, 0)) != RT_ERR_OK)
+                return retVal;
+
+            if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_DW8051_RDY, RTL8367C_ACS_IROM_ENABLE_OFFSET, 0)) != RT_ERR_OK)
+                return retVal;
+
+            if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_CHIP_RESET, RTL8367C_DW8051_RST_OFFSET, 0)) != RT_ERR_OK)
+                return retVal;
+        }
+    }
+    else if (2 == type)
+    {
+
+        if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_CHIP_RESET, RTL8367C_DW8051_RST_OFFSET, 1)) != RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_MISCELLANEOUS_CONFIGURE0, RTL8367C_DW8051_EN_OFFSET, 1)) != RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_DW8051_RDY, RTL8367C_ACS_IROM_ENABLE_OFFSET, 1)) != RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_DW8051_RDY, RTL8367C_IROM_MSB_OFFSET, 0)) != RT_ERR_OK)
+            return retVal;
+        
+        for(idx = 0; idx < FIBER1_2_INIT_SIZE; idx++)
+        {
+            if ((retVal = rtl8367c_setAsicReg(0xE000 + idx, (rtk_uint32)Fiber1_2_Init[idx])) != RT_ERR_OK)
+                return retVal;
+        }
+
+
+        if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_DW8051_RDY, RTL8367C_IROM_MSB_OFFSET, 0)) != RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_DW8051_RDY, RTL8367C_ACS_IROM_ENABLE_OFFSET, 0)) != RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_CHIP_RESET, RTL8367C_DW8051_RST_OFFSET, 0)) != RT_ERR_OK)
+            return retVal;
+
+
+        if( (mode == EXT_TMII_MAC) || (mode == EXT_TMII_PHY) )
+        {
+            if( (retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_BYPASS_LINE_RATE, id+2, 1)) != RT_ERR_OK)
+                return retVal;
+        }
+        else
+        {
+            if( (retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_BYPASS_LINE_RATE, id+2, 0)) != RT_ERR_OK)
+                return retVal;
+        }
+
+
+        if (id == 1)
+        {
+            if(mode == EXT_HSGMII)
+                return RT_ERR_PORT_ID;
+
+            if (mode == EXT_SGMII)
+            {
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1305, 0xf0, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d92, 14, 1)) != RT_ERR_OK)
+                    return retVal;
+            }
+            else if (mode == EXT_1000X || mode == EXT_100FX || mode == EXT_1000X_100FX)
+            {
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1305, 0xf0, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d92, 14, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if((retVal = rtl8367c_setAsicRegBit(0x6210, 11, 0)) != RT_ERR_OK)
+                    return retVal;
+            }
+            else
+            {
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1305, 0xf0, mode)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d92, 14, 0)) != RT_ERR_OK)
+                    return retVal;
+            }
+
+            if ((retVal = rtl8367c_setAsicRegBits(0x1d92, 0x1f00, 0x1f)) != RT_ERR_OK)
+                return retVal;
+        }
+        else if(id == 2)
+        {
+            if (mode == EXT_HSGMII)
+            {
+                if ((retVal = rtl8367c_setAsicReg(0x130, 7)) != RT_ERR_OK)
+                    return retVal;
+
+                if ((retVal = rtl8367c_setAsicReg(0x39f, 7)) != RT_ERR_OK)
+                    return retVal;
+
+                if ((retVal = rtl8367c_setAsicReg(0x3fa, 7)) != RT_ERR_OK)
+                    return retVal;
+            }
+            else
+            {
+                if ((retVal = rtl8367c_setAsicReg(0x130, 1)) != RT_ERR_OK)
+                    return retVal;
+
+                if ((retVal = rtl8367c_setAsicReg(0x39f, 1)) != RT_ERR_OK)
+                    return retVal;
+
+                if ((retVal = rtl8367c_setAsicReg(0x3fa, 4)) != RT_ERR_OK)
+                    return retVal;
+
+            }
+
+
+            if (mode == EXT_SGMII)
+            {
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x13c3, 0xf, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d92, 6, 1)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d92, 7, 0)) != RT_ERR_OK)
+                    return retVal;
+            }
+            else if (mode == EXT_HSGMII)
+            {
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x13c3, 0xf, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d92, 6, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d92, 7, 1)) != RT_ERR_OK)
+                    return retVal;
+            }
+            else if (mode == EXT_1000X || mode == EXT_100FX || mode == EXT_1000X_100FX)
+            {
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x13c3, 0xf, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d92, 6, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d92, 7, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if((retVal = rtl8367c_setAsicRegBit(0x6200, 11, 0)) != RT_ERR_OK)
+                    return retVal;
+            }
+            else
+            {
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x13c3, 0xf, mode)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d92, 6, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d92, 7, 0)) != RT_ERR_OK)
+                    return retVal;
+            }
+
+            if ((retVal = rtl8367c_setAsicRegBits(0x1d92, 0x1f, 0x1f)) != RT_ERR_OK)
+                return retVal;
+        }
+
+
+        if (mode == EXT_RGMII)
+        {
+
+            if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_PARA_LED_IO_EN3, 0)) != RT_ERR_OK)
+                return retVal;
+
+            if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_PARA_LED_IO_EN1, 0)) != RT_ERR_OK)
+                return retVal;
+
+            if ((retVal = rtl8367c_setAsicReg(RTL8367C_REG_PARA_LED_IO_EN2, 0)) != RT_ERR_OK)
+                return retVal;
+
+
+            if (id == 1)
+            {
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1303, 9, 1)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1303, 6, 1)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1303, 4, 1)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1303, 1, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1307, 3, 1)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x13f9, 0x38, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1307, 0x7, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1304, 0x7000, 4)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x13f9, 0x700, 4)) != RT_ERR_OK)
+                    return retVal;
+            }
+            else if (id == 2)
+            {
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1303, 10, 1)) != RT_ERR_OK)
+                    return retVal;
+
+                /*drving 1*/
+                if ((retVal = rtl8367c_setAsicRegBit(0x13e2, 2, 1)) != RT_ERR_OK)
+                    return retVal;
+
+                /*drving 1*/
+                if ((retVal = rtl8367c_setAsicRegBit(0x13e2, 1, 1)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x13e2, 0, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x13c5, 3, 1)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x13f9, 0x1c0, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x13c5, 0x7, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x13e2, 0x1c0, 4)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x13e2, 0x38, 4)) != RT_ERR_OK)
+                    return retVal;
+            }
+        }
+        else if (mode == EXT_SGMII)
+        {
+            if (id == 1)
+            {
+                /*sds 1     reg 1    page 0x21     write value  0xec91*/
+                if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_DATA, 0xec91)) != RT_ERR_OK)
+                    return retVal;
+
+                if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_ADR, (0x21<<5) | 1)) != RT_ERR_OK)
+                    return retVal;
+
+                if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_CMD, 0x00C1)) != RT_ERR_OK)
+                    return retVal;
+
+                /*sds 1     reg 5    page 0x24     write value  0x5825*/
+                if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_DATA, 0x5825)) != RT_ERR_OK)
+                    return retVal;
+
+                if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_ADR, (0x24<<5) | 5)) != RT_ERR_OK)
+                    return retVal;
+
+                if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_CMD, 0x00C1)) != RT_ERR_OK)
+                    return retVal;
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d92, 0x1f00, 2)) != RT_ERR_OK)
+                    return retVal;
+
+                /*?????????????????*/
+
+            }
+            else if (id == 2)
+            {
+                /*sds 0     reg 0    page 0x28     write value  0x942c*/
+                if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_DATA, 0x942c)) != RT_ERR_OK)
+                    return retVal;
+
+                if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_ADR, (0x28<<5) | 0)) != RT_ERR_OK)
+                    return retVal;
+
+                if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_CMD, 0x00C0)) != RT_ERR_OK)
+                    return retVal;
+
+                /*sds 0     reg 0    page 0x24     write value  0x942c*/
+                if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_DATA, 0x942c)) != RT_ERR_OK)
+                    return retVal;
+
+                if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_ADR, (0x24<<5) | 0)) != RT_ERR_OK)
+                    return retVal;
+
+                if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_CMD, 0x00C0)) != RT_ERR_OK)
+                    return retVal;
+
+                /*sds 0     reg 5    page 0x21     write value  0x8dc3*/
+                if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_DATA, 0x8dc3)) != RT_ERR_OK)
+                    return retVal;
+
+                if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_ADR, (0x21<<5) | 5)) != RT_ERR_OK)
+                    return retVal;
+
+                if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_CMD, 0x00C0)) != RT_ERR_OK)
+                    return retVal;
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d92, 0x1f, 2)) != RT_ERR_OK)
+                    return retVal;
+
+                /*?????????????????*/
+            }
+        }
+        else if (mode == EXT_HSGMII)
+        {
+            if (id == 2)
+            {
+                /*sds 0     reg 0    page 0x28     write value  0x942c*/
+                if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_DATA, 0x942c)) != RT_ERR_OK)
+                    return retVal;
+
+                if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_ADR, (0x28<<5) | 0)) != RT_ERR_OK)
+                    return retVal;
+
+                if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_CMD, 0x00C0)) != RT_ERR_OK)
+                    return retVal;
+
+                /*sds 0     reg 0    page 0x24     write value  0x942c*/
+                if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_DATA, 0x942c)) != RT_ERR_OK)
+                    return retVal;
+
+                if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_ADR, (0x24<<5) | 0)) != RT_ERR_OK)
+                    return retVal;
+
+                if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_CMD, 0x00C0)) != RT_ERR_OK)
+                    return retVal;
+
+                /*sds 0     reg 5    page 0x21     write value  0x8dc3*/
+                if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_DATA, 0x8dc3)) != RT_ERR_OK)
+                    return retVal;
+
+                if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_ADR, (0x21<<5) | 5)) != RT_ERR_OK)
+                    return retVal;
+
+                if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_CMD, 0x00C0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                /* optimizing HISGMII performance while RGMII used & */
+                /*sds 0     reg 9     page 0x21     write value 0x3931*/
+                if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_DATA, 0x3931)) != RT_ERR_OK)
+                        return retVal;
+
+                if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_ADR, (0x21<<5)|9) ) != RT_ERR_OK)
+                        return retVal;
+
+                if( (retVal = rtl8367c_setAsicReg(RTL8367C_REG_SDS_INDACS_CMD, 0x00C0)) != RT_ERR_OK)
+                        return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d92, 0x1f, 0x12)) != RT_ERR_OK)
+                    return retVal;
+
+                /*?????????????????*/
+            }
+        }
+        else if (mode == EXT_1000X)
+        {
+            if (id == 1)
+            {
+
+                if( (retVal = rtl8367c_setAsicSdsReg(1, 1, 0x21, 0xec91)) != RT_ERR_OK)
+                    return retVal;
+                if( (retVal = rtl8367c_setAsicSdsReg(1, 5, 0x24, 0x5825)) != RT_ERR_OK)
+                    return retVal;
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d92, 0x1f00, 4)) != RT_ERR_OK)
+                    return retVal;
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d92, 0x1f00, 0x1f)) != RT_ERR_OK)
+                    return retVal;
+
+                /*patch speed change sds1 1000M*/
+                if( (retVal = rtl8367c_getAsicSdsReg(1, 4, 0, &regValue)) != RT_ERR_OK)
+                    return retVal;
+                regValue &= 0xFFFF0FFF;
+                regValue |= 0x9000;
+                if( (retVal = rtl8367c_setAsicSdsReg(1, 4, 0, regValue)) != RT_ERR_OK)
+                    return retVal;
+
+                if( (retVal = rtl8367c_getAsicSdsReg(1, 0, 2, &regValue)) != RT_ERR_OK)
+                    return retVal;
+                regValue &= 0xFFFFdFFF;
+                regValue |= 0x40;
+                if( (retVal = rtl8367c_setAsicSdsReg(1, 0, 2, regValue)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if( (retVal = rtl8367c_getAsicSdsReg(1, 4, 0, &regValue)) != RT_ERR_OK)
+                    return retVal;
+                regValue &= 0xFFFFEFFF;
+                if( (retVal = rtl8367c_setAsicSdsReg(1, 4, 0, regValue)) != RT_ERR_OK)
+                    return retVal;
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d92, 0x1f00, 4)) != RT_ERR_OK)
+                    return retVal;
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d92, 0x6000, 0)) != RT_ERR_OK)
+                    return retVal;
+
+            }
+            else if (id == 2)
+            {
+                if( (retVal = rtl8367c_setAsicSdsReg(0, 0, 0x28, 0x942c)) != RT_ERR_OK)
+                    return retVal;
+                if( (retVal = rtl8367c_setAsicSdsReg(0, 0, 0x24, 0x942c)) != RT_ERR_OK)
+                    return retVal;
+                if( (retVal = rtl8367c_setAsicSdsReg(0, 5, 0x21, 0x8dc3)) != RT_ERR_OK)
+                    return retVal;
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d92, 0x1f, 4)) != RT_ERR_OK)
+                    return retVal;
+
+                /*patch speed change sds0 1000M*/
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d92, 0x1f, 0x1f)) != RT_ERR_OK)
+                    return retVal;
+
+                if( (retVal = rtl8367c_getAsicSdsReg(0, 4, 0, &regValue)) != RT_ERR_OK)
+                    return retVal;
+                regValue &= 0xFFFF0FFF;
+                regValue |= 0x9000;
+                if( (retVal = rtl8367c_setAsicSdsReg(0, 4, 0, regValue)) != RT_ERR_OK)
+                    return retVal;
+
+                if( (retVal = rtl8367c_getAsicSdsReg(0, 0, 2, &regValue)) != RT_ERR_OK)
+                    return retVal;
+                regValue &= 0xFFFFDFFF;
+                regValue |= 0x40;
+                if( (retVal = rtl8367c_setAsicSdsReg(0, 0, 2, regValue)) != RT_ERR_OK)
+                    return retVal;
+
+                if( (retVal = rtl8367c_getAsicSdsReg(0, 4, 0, &regValue)) != RT_ERR_OK)
+                    return retVal;
+                regValue &= 0xFFFFEFFF;
+                if( (retVal = rtl8367c_setAsicSdsReg(0, 4, 0, regValue)) != RT_ERR_OK)
+                    return retVal;
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d92, 0x1f, 4)) != RT_ERR_OK)
+                    return retVal;
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d92, 0xe0, 0)) != RT_ERR_OK)
+                    return retVal;
+
+            }
+        }
+        else if (mode == EXT_100FX)
+        {
+            if (id == 1)
+            {
+                if( (retVal = rtl8367c_setAsicSdsReg(1, 1, 0x21, 0xec91)) != RT_ERR_OK)
+                    return retVal;
+                if( (retVal = rtl8367c_setAsicSdsReg(1, 5, 0x24, 0x5825)) != RT_ERR_OK)
+                    return retVal;
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d92, 0x1f00, 5)) != RT_ERR_OK)
+                    return retVal;
+
+                /*patch speed change sds1 100M*/
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d92, 0x1f00, 0x1f)) != RT_ERR_OK)
+                    return retVal;
+                if( (retVal = rtl8367c_getAsicSdsReg(1, 4, 0, &regValue)) != RT_ERR_OK)
+                    return retVal;
+                regValue &= 0xFFFF0FFF;
+                regValue |= 0xb000;
+                if( (retVal = rtl8367c_setAsicSdsReg(1, 4, 0, regValue)) != RT_ERR_OK)
+                    return retVal;
+
+                if( (retVal = rtl8367c_getAsicSdsReg(1, 0, 2, &regValue)) != RT_ERR_OK)
+                    return retVal;
+                regValue &= 0xFFFFFFBF;
+                regValue |= 0x2000;
+                if( (retVal = rtl8367c_setAsicSdsReg(1, 0, 2, regValue)) != RT_ERR_OK)
+                    return retVal;
+#if 0
+                if( (retVal = rtl8367c_setAsicReg(0x6214, 0x1a0)) != RT_ERR_OK)
+                    return retVal;
+#endif
+                if( (retVal = rtl8367c_getAsicSdsReg(1, 4, 0, &regValue)) != RT_ERR_OK)
+                    return retVal;
+                regValue &= 0xFFFFEFFF;
+                if( (retVal = rtl8367c_setAsicSdsReg(1, 4, 0, regValue)) != RT_ERR_OK)
+                    return retVal;
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d92, 0x1f00, 5)) != RT_ERR_OK)
+                    return retVal;
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d92, 0x6000, 0)) != RT_ERR_OK)
+                    return retVal;
+            }
+            else if (id == 2)
+            {
+                if( (retVal = rtl8367c_setAsicSdsReg(0, 0, 0x28, 0x942c)) != RT_ERR_OK)
+                    return retVal;
+                if( (retVal = rtl8367c_setAsicSdsReg(0, 0, 0x24, 0x942c)) != RT_ERR_OK)
+                    return retVal;
+                if( (retVal = rtl8367c_setAsicSdsReg(0, 5, 0x21, 0x8dc3)) != RT_ERR_OK)
+                    return retVal;
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d92, 0x1f, 5)) != RT_ERR_OK)
+                    return retVal;
+
+                /*patch speed change sds0 100M*/
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d92, 0x1f, 0x1f)) != RT_ERR_OK)
+                    return retVal;
+
+                if( (retVal = rtl8367c_getAsicSdsReg(0, 4, 0, &regValue)) != RT_ERR_OK)
+                    return retVal;
+                regValue &= 0xFFFF0FFF;
+                regValue |= 0xb000;
+                if( (retVal = rtl8367c_setAsicSdsReg(0, 4, 0, regValue)) != RT_ERR_OK)
+                    return retVal;
+
+                if( (retVal = rtl8367c_getAsicSdsReg(0, 0, 2, &regValue)) != RT_ERR_OK)
+                    return retVal;
+                regValue &= 0xFFFFFFBF;
+                regValue |= 0x2000;
+                if( (retVal = rtl8367c_setAsicSdsReg(0, 0, 2, regValue)) != RT_ERR_OK)
+                    return retVal;
+
+                if( (retVal = rtl8367c_getAsicSdsReg(0, 4, 0, &regValue)) != RT_ERR_OK)
+                    return retVal;
+                regValue &= 0xFFFFEFFF;
+                if( (retVal = rtl8367c_setAsicSdsReg(0, 4, 0, regValue)) != RT_ERR_OK)
+                    return retVal;
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d92, 0x1f, 5)) != RT_ERR_OK)
+                    return retVal;
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d92, 0xe0, 0)) != RT_ERR_OK)
+                    return retVal;
+            }
+        }
+        else if (mode == EXT_1000X_100FX)
+        {
+            if (id == 1)
+            {
+                if( (retVal = rtl8367c_setAsicSdsReg(1, 1, 0x21, 0xec91)) != RT_ERR_OK)
+                    return retVal;
+                if( (retVal = rtl8367c_setAsicSdsReg(1, 5, 0x24, 0x5825)) != RT_ERR_OK)
+                    return retVal;
+                if( (retVal = rtl8367c_setAsicSdsReg(1, 13, 0, 0x4616)) != RT_ERR_OK)
+                    return retVal;
+                if( (retVal = rtl8367c_setAsicSdsReg(1, 1, 0, 0xf20)) != RT_ERR_OK)
+                    return retVal;
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d92, 0x1f00, 7)) != RT_ERR_OK)
+                    return retVal;
+            }
+            else if (id == 2)
+            {
+                if( (retVal = rtl8367c_setAsicSdsReg(0, 0, 0x28, 0x942c)) != RT_ERR_OK)
+                    return retVal;
+                if( (retVal = rtl8367c_setAsicSdsReg(0, 0, 0x24, 0x942c)) != RT_ERR_OK)
+                    return retVal;
+                if( (retVal = rtl8367c_setAsicSdsReg(0, 5, 0x21, 0x8dc3)) != RT_ERR_OK)
+                    return retVal;
+                if( (retVal = rtl8367c_setAsicSdsReg(0, 13, 0, 0x4616)) != RT_ERR_OK)
+                    return retVal;
+                if( (retVal = rtl8367c_setAsicSdsReg(0, 1, 0, 0xf20)) != RT_ERR_OK)
+                    return retVal;
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d92, 0x1f, 7)) != RT_ERR_OK)
+                    return retVal;
+            }
+        }
+
+    }
+    else if (3 == type)
+    {
+
+        /*restore patch, by designer. patch Tx FIFO issue, when not HSGMII 2.5G mode
+         #sds0, page 1, reg 1, bit4=0*/
+        if( (retVal = rtl8367c_getAsicSdsReg(0, 1, 1, &regValue)) != RT_ERR_OK)
+            return retVal;
+        regValue &= 0xFFFFFFEF;
+        if( (retVal = rtl8367c_setAsicSdsReg(0, 1, 1, regValue)) != RT_ERR_OK)
+            return retVal;
+
+        /*set for mac 6*/
+        if (1 == id)
+        {
+
+            if ((retVal = rtl8367c_setAsicReg(0x137c, 0x1000)) != RT_ERR_OK)
+                    return retVal;
+
+            if ((retVal = rtl8367c_getAsicRegBit(0x1d9d, 6, &reg_data)) != RT_ERR_OK)
+                    return retVal;
+            while(reg_data == 0)
+            {
+                if ((retVal = rtl8367c_getAsicRegBit(0x1d9d, 6, &reg_data)) != RT_ERR_OK)
+                    return retVal;
+            }
+
+            if (mode == EXT_SGMII)
+            {
+
+                if ((retVal = rtl8367c_getAsicRegBit(0x1d3d, 10, &reg_data)) != RT_ERR_OK)
+                    return retVal;
+                if(reg_data == 0)
+                {
+                    if ((retVal = rtl8367c_setAsicRegBits(0x1305, 0xf0, 0)) != RT_ERR_OK)
+                        return retVal;
+                }
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x3f7, 1, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d41, 5, 0)) != RT_ERR_OK)
+                    return retVal;
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d41, 7, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d95, 1, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d95, 0, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d11, 9, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d11, 11, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d95, 13, 1)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x1f)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d11, 6, 1)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x2)) != RT_ERR_OK)
+                    return retVal;
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d95, 2, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+            }
+            else if (mode == EXT_HSGMII)
+            {
+
+                /*restore patch, by designer. patch Tx FIFO issue, when  HSGMII 2.5G mode
+                 #sds0, page 1, reg 1, bit4=1*/
+                if( (retVal = rtl8367c_getAsicSdsReg(0, 1, 1, &regValue)) != RT_ERR_OK)
+                    return retVal;
+                regValue |= 0x10;
+                if( (retVal = rtl8367c_setAsicSdsReg(0, 1, 1, regValue)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_getAsicRegBit(0x1d3d, 10, &reg_data)) != RT_ERR_OK)
+                    return retVal;
+                if(reg_data == 0)
+                {
+                    if ((retVal = rtl8367c_setAsicRegBits(0x1305, 0xf0, 0)) != RT_ERR_OK)
+                        return retVal;
+                }
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x3f7, 1, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d41, 5, 0)) != RT_ERR_OK)
+                    return retVal;
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d41, 7, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d95, 1, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d95, 0, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d11, 9, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d11, 11, 1)) != RT_ERR_OK)
+                    return retVal;
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d11, 6, 0)) != RT_ERR_OK)
+                    return retVal;
+
+                if ((retVal = rtl8367c_setAsicReg(0xd0,7)) != RT_ERR_OK)
+                    return retVal;
+                if ((retVal = rtl8367c_setAsicReg(0x399, 7)) != RT_ERR_OK)
+                    return retVal;
+                if ((retVal = rtl8367c_setAsicReg(0x3fa, 7)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d95, 13, 1)) != RT_ERR_OK)
+                    return retVal;
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x12)) != RT_ERR_OK)
+                    return retVal;
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d95, 2, 0)) != RT_ERR_OK)
+                    return retVal;
+
+            }
+            else if(mode == EXT_1000X)
+            {
+
+                if((retVal = rtl8367c_getAsicSdsReg(0, 2, 0, &reg_data)) != RT_ERR_OK)
+                    return retVal;
+                reg_data &= 0xFFFFFCFF;
+                if((retVal = rtl8367c_setAsicSdsReg(0,2,0, reg_data)) != RT_ERR_OK)
+                        return retVal;
+
+
+                if ((retVal = rtl8367c_getAsicRegBit(0x1d3d, 10, &reg_data)) != RT_ERR_OK)
+                    return retVal;
+                if(reg_data == 0)
+                {
+                    if ((retVal = rtl8367c_setAsicRegBits(0x1305, 0xf0, 0)) != RT_ERR_OK)
+                        return retVal;
+                }
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x3f7, 1, 0)) != RT_ERR_OK)
+                    return retVal;
+
+                if ((retVal = rtl8367c_setAsicReg(0x1d11, 0x1500)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x1f)) != RT_ERR_OK)
+                    return retVal;
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d95, 13, 1)) != RT_ERR_OK)
+                    return retVal;
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d95, 3, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicReg(0x13eb, 0x15bb)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicReg(0x13e7, 0xc)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d41, 5, 1)) != RT_ERR_OK)
+                    return retVal;
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d41, 7, 1)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d11, 11, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d11, 6, 1)) != RT_ERR_OK)
+                    return retVal;
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x4)) != RT_ERR_OK)
+                    return retVal;
+            }
+            else if(mode == EXT_100FX)
+            {
+
+                if((retVal = rtl8367c_getAsicSdsReg(0, 2, 0, &reg_data)) != RT_ERR_OK)
+                    return retVal;
+                reg_data &= 0xFFFFFCFF;
+                if((retVal = rtl8367c_setAsicSdsReg(0,2,0, reg_data)) != RT_ERR_OK)
+                        return retVal;
+
+
+                if ((retVal = rtl8367c_getAsicRegBit(0x1d3d, 10, &reg_data)) != RT_ERR_OK)
+                    return retVal;
+                if(reg_data == 0)
+                {
+                    if ((retVal = rtl8367c_setAsicRegBits(0x1305, 0xf0, 0)) != RT_ERR_OK)
+                        return retVal;
+                }
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x3f7, 1, 0)) != RT_ERR_OK)
+                    return retVal;
+
+                if ((retVal = rtl8367c_setAsicReg(0x1d11, 0x1500)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x1f)) != RT_ERR_OK)
+                    return retVal;
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d95, 13, 1)) != RT_ERR_OK)
+                    return retVal;
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d95, 3, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicReg(0x13eb, 0x15bb)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicReg(0x13e7, 0xc)) != RT_ERR_OK)
+                    return retVal;
+
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d41, 5, 1)) != RT_ERR_OK)
+                    return retVal;
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d41, 7, 1)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d11, 11, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d11, 6, 1)) != RT_ERR_OK)
+                    return retVal;
+
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x5)) != RT_ERR_OK)
+                    return retVal;
+            }
+            else if(mode == EXT_1000X_100FX)
+            {
+                /* 0 2 0  bit 8~9  set 0, force n-way*/
+                if((retVal = rtl8367c_getAsicSdsReg(0, 2, 0, &reg_data)) != RT_ERR_OK)
+                    return retVal;
+                reg_data &= 0xFFFFFCFF;
+                if((retVal = rtl8367c_setAsicSdsReg(0,2,0, reg_data)) != RT_ERR_OK)
+                        return retVal;
+
+
+                if ((retVal = rtl8367c_getAsicRegBit(0x1d3d, 10, &reg_data)) != RT_ERR_OK)
+                    return retVal;
+                if(reg_data == 0)
+                {
+                    if ((retVal = rtl8367c_setAsicRegBits(0x1305, 0xf0, 0)) != RT_ERR_OK)
+                        return retVal;
+                }
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x3f7, 1, 0)) != RT_ERR_OK)
+                    return retVal;
+
+                if ((retVal = rtl8367c_setAsicReg(0x1d11, 0x1500)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x1f)) != RT_ERR_OK)
+                    return retVal;
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d95, 13, 0)) != RT_ERR_OK)
+                    return retVal;
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d95, 3, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicReg(0x13eb, 0x15bb)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicReg(0x13e7, 0xc)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d41, 5, 1)) != RT_ERR_OK)
+                    return retVal;
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d41, 7, 1)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d11, 11, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d11, 6, 1)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x7)) != RT_ERR_OK)
+                    return retVal;
+            }
+            else if(mode < EXT_SGMII)
+            {
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d3d, 10, 0)) != RT_ERR_OK)
+                    return retVal;
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x3f7, 1, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d11, 11, 0)) != RT_ERR_OK)
+                    return retVal;
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d11, 6, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d41, 5, 0)) != RT_ERR_OK)
+                    return retVal;
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d41, 7, 0)) != RT_ERR_OK)
+                    return retVal;
+
+                if (mode < EXT_GMII)
+                {
+                    /* set mac6 mode*/
+                    if ((retVal = rtl8367c_setAsicRegBits(0x1305, 0xf0, mode)) != RT_ERR_OK)
+                        return retVal;
+                }
+                else if(mode == EXT_RMII_MAC)
+                {
+
+                    if ((retVal = rtl8367c_setAsicRegBits(0x1305, 0xf0, 7)) != RT_ERR_OK)
+                        return retVal;
+                }
+                else if(mode == EXT_RMII_PHY)
+                {
+                    if ((retVal = rtl8367c_setAsicRegBits(0x1305, 0xf0, 8)) != RT_ERR_OK)
+                        return retVal;
+                }
+
+                if ((mode == EXT_TMII_MAC) || (mode == EXT_TMII_PHY))
+                {
+                    if ((retVal = rtl8367c_setAsicRegBit(0x3f7, 1, 1)) != RT_ERR_OK)
+                        return retVal;
+                }
+            }
+
+        }
+        else if (2 == id)
+        {
+
+            /*force port7 linkdown*/
+            if ((retVal = rtl8367c_setAsicReg(0x137d, 0x1000)) != RT_ERR_OK)
+                    return retVal;
+
+            if ((retVal = rtl8367c_getAsicRegBit(0x1d9d, 7, &reg_data)) != RT_ERR_OK)
+                    return retVal;
+            while(reg_data == 0)
+            {
+                if ((retVal = rtl8367c_getAsicRegBit(0x1d9d, 7, &reg_data)) != RT_ERR_OK)
+                    return retVal;
+            }
+
+            if (mode == EXT_SGMII)
+            {
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x13c3, 0xf,0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicReg(0x13c4, 0)) != RT_ERR_OK)
+                    return retVal;
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x3f7, 2, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d41, 5, 0)) != RT_ERR_OK)
+                    return retVal;
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d41, 7, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d95, 3, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d11, 11, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d11, 6, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d95, 13, 1)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x1f)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d95, 0, 1)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x2)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d95, 2, 0)) != RT_ERR_OK)
+                    return retVal;
+            }
+            else if (mode == EXT_1000X)
+            {
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x13c3, 0xf, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicReg(0x13c4, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if((retVal = rtl8367c_getAsicSdsReg(0, 2, 0, &reg_data)) != RT_ERR_OK)
+                    return retVal;
+                reg_data &= 0xFFFFFCFF;
+                if((retVal = rtl8367c_setAsicSdsReg(0,2,0, reg_data)) != RT_ERR_OK)
+                        return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x3f7, 2, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicReg(0x1d11, 0x1500)) != RT_ERR_OK)
+                    return retVal;
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d95, 13, 1)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x1f)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d41, 5, 0)) != RT_ERR_OK)
+                    return retVal;
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d41, 7, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d95, 3, 3)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x4)) != RT_ERR_OK)
+                    return retVal;
+
+            }
+            else if (mode == EXT_100FX)
+            {
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x13c3, 0xf, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicReg(0x13c4, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if((retVal = rtl8367c_getAsicSdsReg(0, 2, 0, &reg_data)) != RT_ERR_OK)
+                    return retVal;
+                reg_data &= 0xFFFFFCFF;
+                if((retVal = rtl8367c_setAsicSdsReg(0,2,0, reg_data)) != RT_ERR_OK)
+                        return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x3f7, 2, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicReg(0x1d11, 0x1500)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d95, 13, 1)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x1f)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d41, 5, 0)) != RT_ERR_OK)
+                    return retVal;
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d41, 7, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d95, 3, 3)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x5)) != RT_ERR_OK)
+                    return retVal;
+            }
+            else if (mode == EXT_1000X_100FX)
+            {
+                /*  disable mac7 MII/TMM/RMII/GMII/RGMII mode, mode_ext2 = disable  */
+                if ((retVal = rtl8367c_setAsicRegBits(0x13c3, 0xf, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicReg(0x13c4, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if((retVal = rtl8367c_getAsicSdsReg(0, 2, 0, &reg_data)) != RT_ERR_OK)
+                    return retVal;
+                reg_data &= 0xFFFFFCFF;
+                if((retVal = rtl8367c_setAsicSdsReg(0,2,0, reg_data)) != RT_ERR_OK)
+                        return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x3f7, 2, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicReg(0x1d11, 0x1500)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d95, 13, 1)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x1f)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d41, 5, 0)) != RT_ERR_OK)
+                    return retVal;
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d41, 7, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d95, 3, 3)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d95, 0x1f00, 0x7)) != RT_ERR_OK)
+                    return retVal;
+            }
+            else if (mode < EXT_SGMII)
+            {
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d3d, 10, 0)) != RT_ERR_OK)
+                    return retVal;
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x3f7, 2, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicReg(0x1d95, 0x1f00)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d95, 3, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x13c3, 0xf, mode)) != RT_ERR_OK)
+                    return retVal;
+
+                if ((mode == EXT_TMII_MAC) || (mode == EXT_TMII_PHY))
+                {
+                    if ((retVal = rtl8367c_setAsicRegBit(0x3f7, 2, 1)) != RT_ERR_OK)
+                        return retVal;
+                }
+
+            }
+            else if ((mode < EXT_END) && (mode > EXT_100FX))
+            {
+                if ((retVal = rtl8367c_setAsicRegBits(0x13C3, 0xf, 0)) != RT_ERR_OK)
+                    return retVal;
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x3f7, 2, 0)) != RT_ERR_OK)
+                    return retVal;
+
+
+                if ((retVal = rtl8367c_setAsicRegBits(0x1d95, 3, 0)) != RT_ERR_OK)
+                    return retVal;
+
+                if ((retVal = rtl8367c_setAsicRegBit(0x1d3d, 10, 1)) != RT_ERR_OK)
+                    return retVal;
+
+                if ((retVal = rtl8367c_getAsicRegBit(0x1d11, 11, &reg_data)) != RT_ERR_OK)
+                    return retVal;
+                if(reg_data == 0)
+                {
+                    if ((retVal = rtl8367c_setAsicRegBit(0x1d11, 6, 1)) != RT_ERR_OK)
+                        return retVal;
+                }
+
+
+                if (mode < EXT_RMII_MAC_2)
+                {
+                    if ((retVal = rtl8367c_setAsicRegBits(0x1305, 0xf0, (mode-13))) != RT_ERR_OK)
+                        return retVal;
+                }
+                else
+                {
+                    if ((retVal = rtl8367c_setAsicRegBits(0x1305, 0xf0, (mode-12))) != RT_ERR_OK)
+                        return retVal;
+                }
+
+                if ((mode == EXT_TMII_MAC_2) || (mode == EXT_TMII_PHY_2))
+                {
+                    if ((retVal = rtl8367c_setAsicRegBit(0x3f7, 2, 1)) != RT_ERR_OK)
+                        return retVal;
+                }
+            }
+
+        }
+
+    }
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortExtMode
+ * Description:
+ *      Get external interface mode configuration
+ * Input:
+ *      id      - external interface id (0~1)
+ *      pMode   - external interface mode
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortExtMode(rtk_uint32 id, rtk_uint32 *pMode)
+{
+    ret_t   retVal;
+    rtk_uint32 regData, regValue, type;
+
+    if(id >= RTL8367C_EXTNO)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0249)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_getAsicReg(0x1300, &regValue)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0000)) != RT_ERR_OK)
+        return retVal;
+
+    type = 0;
+
+    switch (regValue)
+    {
+        case 0x0276:
+        case 0x0597:
+        case 0x6367:
+            type = 1;
+            break;
+        case 0x0652:
+        case 0x6368:
+            type = 2;
+            break;
+        case 0x0801:
+        case 0x6511:
+            type = 3;
+            break;
+        default:
+            return RT_ERR_FAILED;
+    }
+
+
+    if (1 == type)
+    {
+
+        if (1 == id)
+        {
+            if( (retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_MAC8_SEL_SGMII_OFFSET, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            if(1 == regData)
+            {
+                *pMode = EXT_SGMII;
+                return RT_ERR_OK;
+            }
+
+            if( (retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_SDS_MISC, RTL8367C_CFG_MAC8_SEL_HSGMII_OFFSET, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            if(1 == regData)
+            {
+                *pMode = EXT_HSGMII;
+                return RT_ERR_OK;
+            }
+        }
+
+        if(0 == id || 1 == id)
+            return rtl8367c_getAsicRegBits(RTL8367C_REG_DIGITAL_INTERFACE_SELECT, RTL8367C_SELECT_GMII_0_MASK << (id * RTL8367C_SELECT_GMII_1_OFFSET), pMode);
+        else
+           return rtl8367c_getAsicRegBits(RTL8367C_REG_DIGITAL_INTERFACE_SELECT_1, RTL8367C_SELECT_GMII_2_MASK, pMode);
+
+    }
+    else if (2 == type)
+    {
+        if (1 == id)
+        {
+            if ((retVal = rtl8367c_getAsicReg(0x1d92, &regData))!=RT_ERR_OK)
+                return retVal;
+
+            if (regData & 0x4000)
+            {
+                *pMode = EXT_SGMII;
+                return RT_ERR_OK;
+            }
+
+            else if (((regData >> 8) & 0x1f) == 4)
+            {
+                *pMode = EXT_1000X;
+                return RT_ERR_OK;
+            }
+            else if (((regData >> 8) & 0x1f) == 5)
+            {
+                *pMode = EXT_100FX;
+                return RT_ERR_OK;
+            }
+            else if (((regData >> 8) & 0x1f) == 7)
+            {
+                *pMode = EXT_1000X_100FX;
+                return RT_ERR_OK;
+            }
+
+            return rtl8367c_getAsicRegBits(0x1305, 0xf0, pMode);
+        }
+        else if (2 == id)
+        {
+#if 0
+            if ((retVal = rtl8367c_getAsicRegBit(0x1d92, 6, &regData))!=RT_ERR_OK)
+                return retVal;
+
+            if (regData == 1)
+            {
+                *pMode = EXT_SGMII;
+                return RT_ERR_OK;
+            }
+
+            if ((retVal = rtl8367c_getAsicRegBit(0x1d92, 7, &regData))!=RT_ERR_OK)
+                return retVal;
+
+            if (regData == 1)
+            {
+                *pMode = EXT_HSGMII;
+                return RT_ERR_OK;
+            }
+#endif
+            if ((retVal = rtl8367c_getAsicReg(0x1d92, &regData))!=RT_ERR_OK)
+                return retVal;
+
+            if (regData & 0x40)
+            {
+                *pMode = EXT_SGMII;
+                return RT_ERR_OK;
+            }
+            else if (regData & 0x80)
+            {
+                *pMode = EXT_HSGMII;
+                return RT_ERR_OK;
+            }
+            else if ((regData & 0x1f) == 4)
+            {
+                *pMode = EXT_1000X;
+                return RT_ERR_OK;
+            }
+            else if ((regData & 0x1f) == 5)
+            {
+                *pMode = EXT_100FX;
+                return RT_ERR_OK;
+            }
+            else if ((regData & 0x1f) == 7)
+            {
+                *pMode = EXT_1000X_100FX;
+                return RT_ERR_OK;
+            }
+
+            return rtl8367c_getAsicRegBits(0x1305, 0xf, pMode);
+        }
+    }
+    else if(3 == type)
+    {
+        if (1 == id)
+        {
+            /* SDS_CFG_NEW */
+            if ((retVal = rtl8367c_getAsicReg(0x1d95, &regData))!=RT_ERR_OK)
+                return retVal;
+
+            if ((retVal = rtl8367c_getAsicReg(0x1d41, &regValue))!=RT_ERR_OK)
+                return retVal;
+
+
+            if((regValue & 0xa0)  == 0xa0 )
+            {
+
+                regData = regData >> 8;
+                if((regData & 0x1f) == 4)
+                {
+                    *pMode = EXT_1000X;
+                     return RT_ERR_OK;
+                }
+                else if((regData & 0x1f) == 5)
+                {
+                    *pMode = EXT_100FX;
+                     return RT_ERR_OK;
+                }
+                else if((regData & 0x1f) == 7)
+                {
+                    *pMode = EXT_1000X_100FX;
+                     return RT_ERR_OK;
+                }
+
+            }
+
+
+            if ((retVal = rtl8367c_getAsicReg(0x1d11, &regData))!=RT_ERR_OK)
+                return retVal;
+
+            /* check cfg_mac6_sel_sgmii */
+            if((regData >> 6) & 1)
+            {
+                *pMode = EXT_SGMII;
+                return RT_ERR_OK;
+            }
+            else if((regData >> 11) & 1)
+            {
+                *pMode = EXT_HSGMII;
+                return RT_ERR_OK;
+            }
+            else
+            {
+                /* check port6 MAC mode */
+                if ((retVal = rtl8367c_getAsicRegBits(0x1305, 0xf0, &regData))!=RT_ERR_OK)
+                    return retVal;
+
+                *pMode = regData;
+                return RT_ERR_OK;
+            }
+        }
+        else if (2 == id)
+        {
+            if ((retVal = rtl8367c_getAsicReg(0x1d95, &regData))!=RT_ERR_OK)
+                return retVal;
+
+
+            if(((regData & 0x3) == 3) && (((regData >> 8) & 0x1f) == 0x4))
+            {
+                *pMode = EXT_1000X;
+                    return RT_ERR_OK;
+            }
+            else if (((regData & 0x3) == 3) && (((regData >> 8) & 0x1f) == 0x5))
+            {
+                *pMode = EXT_100FX;
+                    return RT_ERR_OK;
+            }
+            else if (((regData & 0x3) == 3) && (((regData >> 8) & 0x1f) == 0x7))
+            {
+                *pMode = EXT_1000X_100FX;
+                    return RT_ERR_OK;
+            }
+            else if(regData & 1)
+            {
+                *pMode = EXT_SGMII;
+                return RT_ERR_OK;
+            }
+            else
+            {
+
+                if ((retVal = rtl8367c_getAsicRegBits(0x13c3, 0xf, &regData))!=RT_ERR_OK)
+                    return retVal;
+
+                *pMode = regData;
+
+                return RT_ERR_OK;
+            }
+        }
+    }
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8370_setAsicPortEnableAll
+ * Description:
+ *      Set ALL ports enable.
+ * Input:
+ *      enable - enable all ports.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortEnableAll(rtk_uint32 enable)
+{
+    if(enable >= 2)
+        return RT_ERR_INPUT;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_PHY_AD, RTL8367C_PDNPHY_OFFSET, !enable);
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicPortEnableAll
+ * Description:
+ *      Set ALL ports enable.
+ * Input:
+ *      enable - enable all ports.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortEnableAll(rtk_uint32 *pEnable)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+
+    retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_PHY_AD, RTL8367C_PDNPHY_OFFSET, &regData);
+    if(retVal !=  RT_ERR_OK)
+        return retVal;
+
+    if (regData==0)
+        *pEnable = 1;
+    else
+        *pEnable = 0;
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicPortSmallIpg
+ * Description:
+ *      Set small ipg egress mode
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      enable  - 0: normal, 1: small
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortSmallIpg(rtk_uint32 port, rtk_uint32 enable)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_PORT_SMALL_IPG_REG(port), RTL8367C_PORT0_MISC_CFG_SMALL_TAG_IPG_OFFSET, enable);
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicPortSmallIpg
+ * Description:
+ *      Get small ipg egress mode
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      pEnable     - 0: normal, 1: small
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortSmallIpg(rtk_uint32 port, rtk_uint32* pEnable)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_PORT_SMALL_IPG_REG(port), RTL8367C_PORT0_MISC_CFG_SMALL_TAG_IPG_OFFSET, pEnable);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicPortLoopback
+ * Description:
+ *      Set MAC loopback
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      enable  - 0: Disable, 1: enable
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortLoopback(rtk_uint32 port, rtk_uint32 enable)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_PORT_MISC_CFG_REG(port), RTL8367C_PORT0_MISC_CFG_MAC_LOOPBACK_OFFSET, enable);
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicPortLoopback
+ * Description:
+ *      Set MAC loopback
+ * Input:
+ *      port    - Physical port number (0~7)
+ * Output:
+ *      pEnable - 0: Disable, 1: enable
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortLoopback(rtk_uint32 port, rtk_uint32 *pEnable)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_PORT_MISC_CFG_REG(port), RTL8367C_PORT0_MISC_CFG_MAC_LOOPBACK_OFFSET, pEnable);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicPortRTCTEnable
+ * Description:
+ *      Set RTCT Enable echo response mode
+ * Input:
+ *      portmask    - Port mask of RTCT enabled (0-4)
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_MASK    - Invalid port mask
+ * Note:
+ *      RTCT test takes 4.8 seconds at most.
+ */
+ret_t rtl8367c_setAsicPortRTCTEnable(rtk_uint32 portmask)
+{
+    ret_t       retVal;
+    rtk_uint32  regData;
+    rtk_uint32  port;
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0249)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_getAsicReg(0x1300, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    if( (regData == 0x0276) || (regData == 0x0597) )
+        return RT_ERR_CHIP_NOT_SUPPORTED;
+
+    for(port = 0; port <= 10 ; port++)
+    {
+        if(portmask & (0x0001 << port))
+        {
+             if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xa422, &regData)) != RT_ERR_OK)
+                 return retVal;
+
+             regData &= 0x7FFF;
+             if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xa422, regData)) != RT_ERR_OK)
+                 return retVal;
+
+             regData |= 0x00F2;/*RTCT set to  echo response mode*/
+             if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xa422, regData)) != RT_ERR_OK)
+                 return retVal;
+
+             regData |= 0x0001;
+             if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xa422, regData)) != RT_ERR_OK)
+                 return retVal;
+        }
+    }
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicPortRTCTDisable
+ * Description:
+ *      Set RTCT Disable
+ * Input:
+ *      portmask    - Port mask of RTCT enabled (0-4)
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_MASK    - Invalid port mask
+ * Note:
+ *      RTCT test takes 4.8 seconds at most.
+ */
+ret_t rtl8367c_setAsicPortRTCTDisable(rtk_uint32 portmask)
+{
+    ret_t       retVal;
+    rtk_uint32  regData;
+    rtk_uint32  port;
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0249)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_getAsicReg(0x1300, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    if( (regData == 0x0276) || (regData == 0x0597) )
+        return RT_ERR_CHIP_NOT_SUPPORTED;
+
+    for(port = 0; port <= 10 ; port++)
+    {
+        if(portmask & (0x0001 << port))
+        {
+             if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xa422, &regData)) != RT_ERR_OK)
+                 return retVal;
+
+             regData &= 0x7FFF;
+             if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xa422, regData)) != RT_ERR_OK)
+                 return retVal;
+
+             regData |= 0x00F0;
+             if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xa422, regData)) != RT_ERR_OK)
+                 return retVal;
+
+             regData &= ~0x0001;
+             if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xa422, regData)) != RT_ERR_OK)
+                 return retVal;
+        }
+    }
+
+    return RT_ERR_OK;
+}
+
+
+/* Function Name:
+ *      rtl8367c_getAsicPortRTCTResult
+ * Description:
+ *      Get RTCT result
+ * Input:
+ *      port    - Port ID of RTCT result
+ * Output:
+ *      pResult - The result of port ID
+ * Return:
+ *      RT_ERR_OK                   - Success
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_MASK            - Invalid port mask
+ *      RT_ERR_PHY_RTCT_NOT_FINISH  - RTCT test doesn't finish.
+ * Note:
+ *      RTCT test takes 4.8 seconds at most.
+ *      If this API returns RT_ERR_PHY_RTCT_NOT_FINISH,
+ *      users should wait a whole then read it again.
+ */
+ret_t rtl8367c_getAsicPortRTCTResult(rtk_uint32 port, rtl8367c_port_rtct_result_t *pResult)
+{
+    ret_t       retVal;
+    rtk_uint32  regData, finish = 1;
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0249)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_getAsicReg(0x1300, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    if( (regData == 0x6367) )
+    {
+        if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xa422, &regData)) != RT_ERR_OK)
+            return retVal;
+
+        if((regData & 0x8000) == 0x8000)
+        {
+            /* Channel A */
+            if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xa436, 0x802a)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xa438, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            pResult->channelAOpen       = (regData == 0x0048) ? 1 : 0;
+            pResult->channelAShort      = (regData == 0x0050) ? 1 : 0;
+            pResult->channelAMismatch   = ((regData == 0x0042) || (regData == 0x0044)) ? 1 : 0;
+            pResult->channelALinedriver = (regData == 0x0041) ? 1 : 0;
+
+            /* Channel B */
+            if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xa436, 0x802e)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xa438, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            pResult->channelBOpen       = (regData == 0x0048) ? 1 : 0;
+            pResult->channelBShort      = (regData == 0x0050) ? 1 : 0;
+            pResult->channelBMismatch   = ((regData == 0x0042) || (regData == 0x0044)) ? 1 : 0;
+            pResult->channelBLinedriver = (regData == 0x0041) ? 1 : 0;
+
+            /* Channel C */
+            if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xa436, 0x8032)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xa438, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            pResult->channelCOpen       = (regData == 0x0048) ? 1 : 0;
+            pResult->channelCShort      = (regData == 0x0050) ? 1 : 0;
+            pResult->channelCMismatch   = ((regData == 0x0042) || (regData == 0x0044)) ? 1 : 0;
+            pResult->channelCLinedriver = (regData == 0x0041) ? 1 : 0;
+
+            /* Channel D */
+            if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xa436, 0x8036)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xa438, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            pResult->channelDOpen       = (regData == 0x0048) ? 1 : 0;
+            pResult->channelDShort      = (regData == 0x0050) ? 1 : 0;
+            pResult->channelDMismatch   = ((regData == 0x0042) || (regData == 0x0044)) ? 1 : 0;
+            pResult->channelDLinedriver = (regData == 0x0041) ? 1 : 0;
+
+            /* Channel A Length */
+            if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xa436, 0x802c)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xa438, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            pResult->channelALen = (regData / 2);
+
+            /* Channel B Length */
+            if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xa436, 0x8030)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xa438, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            pResult->channelBLen = (regData / 2);
+
+            /* Channel C Length */
+            if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xa436, 0x8034)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xa438, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            pResult->channelCLen = (regData / 2);
+
+            /* Channel D Length */
+            if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xa436, 0x8038)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xa438, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            pResult->channelDLen = (regData / 2);
+        }
+        else
+            finish = 0;
+    }
+    else if(regData == 0x6368)
+    {
+        if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xa422, &regData)) != RT_ERR_OK)
+            return retVal;
+
+        if((regData & 0x8000) == 0x8000)
+        {
+            /* Channel A */
+            if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xa436, 0x802b)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xa438, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            pResult->channelAOpen       = (regData == 0x0048) ? 1 : 0;
+            pResult->channelAShort      = (regData == 0x0050) ? 1 : 0;
+            pResult->channelAMismatch   = ((regData == 0x0042) || (regData == 0x0044)) ? 1 : 0;
+            pResult->channelALinedriver = (regData == 0x0041) ? 1 : 0;
+
+            /* Channel B */
+            if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xa436, 0x802f)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xa438, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            pResult->channelBOpen       = (regData == 0x0048) ? 1 : 0;
+            pResult->channelBShort      = (regData == 0x0050) ? 1 : 0;
+            pResult->channelBMismatch   = ((regData == 0x0042) || (regData == 0x0044)) ? 1 : 0;
+            pResult->channelBLinedriver = (regData == 0x0041) ? 1 : 0;
+
+            /* Channel C */
+            if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xa436, 0x8033)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xa438, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            pResult->channelCOpen       = (regData == 0x0048) ? 1 : 0;
+            pResult->channelCShort      = (regData == 0x0050) ? 1 : 0;
+            pResult->channelCMismatch   = ((regData == 0x0042) || (regData == 0x0044)) ? 1 : 0;
+            pResult->channelCLinedriver = (regData == 0x0041) ? 1 : 0;
+
+            /* Channel D */
+            if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xa436, 0x8037)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xa438, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            pResult->channelDOpen       = (regData == 0x0048) ? 1 : 0;
+            pResult->channelDShort      = (regData == 0x0050) ? 1 : 0;
+            pResult->channelDMismatch   = ((regData == 0x0042) || (regData == 0x0044)) ? 1 : 0;
+            pResult->channelDLinedriver = (regData == 0x0041) ? 1 : 0;
+
+            /* Channel A Length */
+            if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xa436, 0x802d)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xa438, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            pResult->channelALen = (regData / 2);
+
+            /* Channel B Length */
+            if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xa436, 0x8031)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xa438, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            pResult->channelBLen = (regData / 2);
+
+            /* Channel C Length */
+            if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xa436, 0x8035)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xa438, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            pResult->channelCLen = (regData / 2);
+
+            /* Channel D Length */
+            if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xa436, 0x8039)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xa438, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            pResult->channelDLen = (regData / 2);
+        }
+        else
+            finish = 0;
+
+    }
+    else if((regData == 0x6511) || (regData == 0x0801))
+    {
+        if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xa422, &regData)) != RT_ERR_OK)
+            return retVal;
+
+        if((regData & 0x8000) == 0x8000)
+        {
+            /* Channel A */
+            if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xa436, 0x802a)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xa438, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            pResult->channelAOpen       = (regData == 0x0048) ? 1 : 0;
+            pResult->channelAShort      = (regData == 0x0050) ? 1 : 0;
+            pResult->channelAMismatch   = ((regData == 0x0042) || (regData == 0x0044)) ? 1 : 0;
+            pResult->channelALinedriver = (regData == 0x0041) ? 1 : 0;
+
+            /* Channel B */
+            if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xa436, 0x802e)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xa438, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            pResult->channelBOpen       = (regData == 0x0048) ? 1 : 0;
+            pResult->channelBShort      = (regData == 0x0050) ? 1 : 0;
+            pResult->channelBMismatch   = ((regData == 0x0042) || (regData == 0x0044)) ? 1 : 0;
+            pResult->channelBLinedriver = (regData == 0x0041) ? 1 : 0;
+
+            /* Channel C */
+            if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xa436, 0x8032)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xa438, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            pResult->channelCOpen       = (regData == 0x0048) ? 1 : 0;
+            pResult->channelCShort      = (regData == 0x0050) ? 1 : 0;
+            pResult->channelCMismatch   = ((regData == 0x0042) || (regData == 0x0044)) ? 1 : 0;
+            pResult->channelCLinedriver = (regData == 0x0041) ? 1 : 0;
+
+            /* Channel D */
+            if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xa436, 0x8036)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xa438, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            pResult->channelDOpen       = (regData == 0x0048) ? 1 : 0;
+            pResult->channelDShort      = (regData == 0x0050) ? 1 : 0;
+            pResult->channelDMismatch   = ((regData == 0x0042) || (regData == 0x0044)) ? 1 : 0;
+            pResult->channelDLinedriver = (regData == 0x0041) ? 1 : 0;
+
+            /* Channel A Length */
+            if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xa436, 0x802c)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xa438, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            pResult->channelALen = (regData / 2);
+
+            /* Channel B Length */
+            if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xa436, 0x8030)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xa438, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            pResult->channelBLen = (regData / 2);
+
+            /* Channel C Length */
+            if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xa436, 0x8034)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xa438, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            pResult->channelCLen = (regData / 2);
+
+            /* Channel D Length */
+            if((retVal = rtl8367c_setAsicPHYOCPReg(port, 0xa436, 0x8038)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_getAsicPHYOCPReg(port, 0xa438, &regData)) != RT_ERR_OK)
+                return retVal;
+
+            pResult->channelDLen = (regData / 2);
+        }
+        else
+            finish = 0;
+
+    }
+    else
+        return RT_ERR_CHIP_NOT_SUPPORTED;
+
+    if(finish == 0)
+        return RT_ERR_PHY_RTCT_NOT_FINISH;
+    else
+        return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_sdsReset
+ * Description:
+ *      Reset Serdes
+ * Input:
+ *      id  - EXT ID
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK                   - Success
+ *      RT_ERR_SMI                  - SMI access error
+ * Note:
+ *      None.
+ */
+ret_t rtl8367c_sdsReset(rtk_uint32 id)
+{
+    rtk_uint32 retVal, regValue, state, i, option, running = 0, retVal2;
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0249)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_getAsicReg(0x1300, &regValue)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0000)) != RT_ERR_OK)
+        return retVal;
+
+    switch (regValue)
+    {
+        case 0x0276:
+        case 0x0597:
+        case 0x6367:
+            option = 0;
+            break;
+        case 0x0652:
+        case 0x6368:
+            option = 1;
+            break;
+        case 0x0801:
+        case 0x6511:
+            option = 2;
+            break;
+        default:
+            return RT_ERR_FAILED;
+    }
+
+    if(option == 0)
+    {
+        if (1 == id)
+        {
+            if ((retVal = rtl8367c_getAsicRegBit(0x130c, 5, &running))!=RT_ERR_OK)
+                return retVal;
+
+            if(running == 1)
+            {
+                if ((retVal = rtl8367c_setAsicRegBit(0x130c, 5, 0))!=RT_ERR_OK)
+                    return retVal;
+            }
+
+            retVal = rtl8367c_setAsicReg(0x6601, 0x0000);
+
+            if(retVal == RT_ERR_OK)
+                retVal = rtl8367c_setAsicReg(0x6602, 0x1401);
+
+            if(retVal == RT_ERR_OK)
+                retVal = rtl8367c_setAsicReg(0x6600, 0x00C0);
+
+            if(retVal == RT_ERR_OK)
+                retVal = rtl8367c_setAsicReg(0x6601, 0x0000);
+
+            if(retVal == RT_ERR_OK)
+                retVal = rtl8367c_setAsicReg(0x6602, 0x1403);
+
+            if(retVal == RT_ERR_OK)
+                retVal = rtl8367c_setAsicReg(0x6600, 0x00C0);
+
+            if(running == 1)
+            {
+                if ((retVal2 = rtl8367c_setAsicRegBit(0x130c, 5, 1))!=RT_ERR_OK)
+                    return retVal2;
+            }
+
+            if(retVal != RT_ERR_OK)
+                return retVal;
+        }
+        else
+            return RT_ERR_PORT_ID;
+    }
+    else if(option == 1)
+    {
+        if (1 == id)
+        {
+            if((retVal = rtl8367c_getAsicReg(0x1311, &state)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_setAsicReg(0x1311, 0x66)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_setAsicReg(0x1311, 0x1066)) != RT_ERR_OK)
+                return retVal;
+
+            while(1)
+            {
+                if((retVal = rtl8367c_getAsicReg(0x1d9d, &regValue)) != RT_ERR_OK)
+                    return retVal;
+                if((regValue >> 8) & 1)
+                    break;
+            }
+
+            for (i=0; i<0xffff; i++);
+
+            if((retVal = rtl8367c_setAsicReg(0x133d, 0x2)) != RT_ERR_OK)
+                return retVal;
+
+            for (i=0; i<0xffff; i++);
+
+            if((retVal = rtl8367c_setAsicReg(0x6601, 0x0)) != RT_ERR_OK)
+                return retVal;
+            if((retVal = rtl8367c_setAsicReg(0x6602, 0x1401)) != RT_ERR_OK)
+                return retVal;
+            if((retVal = rtl8367c_setAsicReg(0x6600, 0xc1)) != RT_ERR_OK)
+                return retVal;
+            if((retVal = rtl8367c_setAsicReg(0x6601, 0x0)) != RT_ERR_OK)
+                return retVal;
+            if((retVal = rtl8367c_setAsicReg(0x6602, 0x1403)) != RT_ERR_OK)
+                return retVal;
+            if((retVal = rtl8367c_setAsicReg(0x6600, 0xc1)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_setAsicReg(0x133d, 0x0)) != RT_ERR_OK)
+                return retVal;
+
+            for (i=0; i<0xffff; i++);
+
+            if((retVal = rtl8367c_setAsicReg(0x1311, state)) != RT_ERR_OK)
+                return retVal;
+
+
+        }
+        else if (2== id)
+        {
+            if((retVal = rtl8367c_getAsicReg(0x13c4, &state)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_setAsicReg(0x13c4, 0x66)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_setAsicReg(0x13c4, 0x1066)) != RT_ERR_OK)
+                return retVal;
+
+            while(1)
+            {
+                if((retVal = rtl8367c_getAsicReg(0x1d9d, &regValue)) != RT_ERR_OK)
+                    return retVal;
+                if((regValue >> 9) & 1)
+                    break;
+            }
+
+            for (i=0; i<0xffff; i++);
+
+            if((retVal = rtl8367c_setAsicReg(0x133d, 0x2)) != RT_ERR_OK)
+                return retVal;
+
+            for (i=0; i<0xffff; i++);
+
+            if((retVal = rtl8367c_setAsicReg(0x6601, 0x0)) != RT_ERR_OK)
+                return retVal;
+            if((retVal = rtl8367c_setAsicReg(0x6602, 0x1401)) != RT_ERR_OK)
+                return retVal;
+            if((retVal = rtl8367c_setAsicReg(0x6600, 0xc0)) != RT_ERR_OK)
+                return retVal;
+            if((retVal = rtl8367c_setAsicReg(0x6601, 0x0)) != RT_ERR_OK)
+                return retVal;
+            if((retVal = rtl8367c_setAsicReg(0x6602, 0x1403)) != RT_ERR_OK)
+                return retVal;
+            if((retVal = rtl8367c_setAsicReg(0x6600, 0xc0)) != RT_ERR_OK)
+                return retVal;
+
+            if((retVal = rtl8367c_setAsicReg(0x133d, 0x0)) != RT_ERR_OK)
+                return retVal;
+
+            for (i=0; i<0xffff; i++);
+
+            if((retVal = rtl8367c_setAsicReg(0x13c4, state)) != RT_ERR_OK)
+                return retVal;
+        }
+        else
+            return RT_ERR_PORT_ID;
+    }
+    else if(option == 2)
+    {
+        if ((retVal = rtl8367c_getAsicSdsReg(0, 3, 0, &regValue))!=RT_ERR_OK)
+                  return retVal;
+              regValue |= 0x40;
+              if ((retVal = rtl8367c_setAsicSdsReg(0, 3, 0, regValue))!=RT_ERR_OK)
+                  return retVal;
+
+              for (i=0; i<0xffff; i++);
+
+              regValue &= ~(0x40);
+              if ((retVal = rtl8367c_setAsicSdsReg(0, 3, 0, regValue))!=RT_ERR_OK)
+                  return retVal;
+
+    }
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getSdsLinkStatus
+ * Description:
+ *      Get SGMII status
+ * Input:
+ *      id  - EXT ID
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK                   - Success
+ *      RT_ERR_SMI                  - SMI access error
+ * Note:
+ *      None.
+ */
+ret_t rtl8367c_getSdsLinkStatus(rtk_uint32 ext_id, rtk_uint32 *pSignalDetect, rtk_uint32 *pSync, rtk_uint32 *pLink)
+{
+    rtk_uint32 retVal, regValue, type, running = 0, retVal2;
+
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0249)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_getAsicReg(0x1300, &regValue)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0000)) != RT_ERR_OK)
+        return retVal;
+
+    switch (regValue)
+    {
+        case 0x0276:
+        case 0x0597:
+        case 0x6367:
+            type = 0;
+            break;
+        case 0x0652:
+        case 0x6368:
+            type = 1;
+            break;
+        case 0x0801:
+        case 0x6511:
+            type = 2;
+            break;
+        default:
+            return RT_ERR_FAILED;
+    }
+
+    if(type == 0)
+    {
+        if (1 == ext_id)
+        {
+            if ((retVal = rtl8367c_getAsicRegBit(0x130c, 5, &running))!=RT_ERR_OK)
+                return retVal;
+
+            if(running == 1)
+            {
+                if ((retVal = rtl8367c_setAsicRegBit(0x130c, 5, 0))!=RT_ERR_OK)
+                    return retVal;
+            }
+
+            retVal = rtl8367c_setAsicReg(0x6601, 0x003D);
+
+            if(retVal == RT_ERR_OK)
+                retVal = rtl8367c_setAsicReg(0x6600, 0x0080);
+
+            if(retVal == RT_ERR_OK)
+                retVal = rtl8367c_getAsicReg(0x6602, &regValue);
+
+            if(running == 1)
+            {
+                if ((retVal2 = rtl8367c_setAsicRegBit(0x130c, 5, 1))!=RT_ERR_OK)
+                    return retVal2;
+            }
+
+            if(retVal != RT_ERR_OK)
+                return retVal;
+
+            *pSignalDetect = (regValue & 0x0100) ? 1 : 0;
+            *pSync = (regValue & 0x0001) ? 1 : 0;
+            *pLink = (regValue & 0x0010) ? 1 : 0;
+        }
+        else
+            return RT_ERR_PORT_ID;
+    }
+    else if(type == 1)
+    {
+        if (1 == ext_id)
+        {
+            if ((retVal = rtl8367c_setAsicReg(0x6601, 0x003D))!=RT_ERR_OK)
+                return retVal;
+            if ((retVal = rtl8367c_setAsicReg(0x6600, 0x0081))!=RT_ERR_OK)
+                return retVal;
+            if ((retVal = rtl8367c_getAsicReg(0x6602, &regValue))!=RT_ERR_OK)
+                return retVal;
+
+            *pSignalDetect = (regValue & 0x0100) ? 1 : 0;
+            *pSync = (regValue & 0x0001) ? 1 : 0;
+            *pLink = (regValue & 0x0010) ? 1 : 0;
+        }
+        else if (2 == ext_id)
+        {
+            if ((retVal = rtl8367c_setAsicReg(0x6601, 0x003D))!=RT_ERR_OK)
+                return retVal;
+            if ((retVal = rtl8367c_setAsicReg(0x6600, 0x0080))!=RT_ERR_OK)
+                return retVal;
+            if ((retVal = rtl8367c_getAsicReg(0x6602, &regValue))!=RT_ERR_OK)
+                return retVal;
+
+            *pSignalDetect = (regValue & 0x0100) ? 1 : 0;
+            *pSync = (regValue & 0x0001) ? 1 : 0;
+            *pLink = (regValue & 0x0010) ? 1 : 0;
+        }
+        else
+            return RT_ERR_PORT_ID;
+    }
+    else if(type == 2)
+    {
+        if((retVal = rtl8367c_getAsicSdsReg(0, 30, 1, &regValue)) != RT_ERR_OK)
+            return retVal;
+        if((retVal = rtl8367c_getAsicSdsReg(0, 30, 1, &regValue)) != RT_ERR_OK)
+            return retVal;
+
+        *pSignalDetect = (regValue & 0x0100) ? 1 : 0;
+        *pSync = (regValue & 0x0001) ? 1 : 0;
+        *pLink = (regValue & 0x0010) ? 1 : 0;
+
+    }
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setSgmiiNway
+ * Description:
+ *      Set SGMII Nway
+ * Input:
+ *      ext_id      - EXT ID
+ *      state       - SGMII Nway state
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK                   - Success
+ *      RT_ERR_SMI                  - SMI access error
+ * Note:
+ *      None.
+ */
+ret_t rtl8367c_setSgmiiNway(rtk_uint32 ext_id, rtk_uint32 state)
+{
+    rtk_uint32 retVal, regValue, type, running = 0, retVal2;
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0249)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_getAsicReg(0x1300, &regValue)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0000)) != RT_ERR_OK)
+        return retVal;
+
+    switch (regValue)
+    {
+        case 0x0276:
+        case 0x0597:
+        case 0x6367:
+            type = 0;
+            break;
+        case 0x0652:
+        case 0x6368:
+            type = 1;
+            break;
+        case 0x0801:
+        case 0x6511:
+            type = 2;
+            break;
+        default:
+            return RT_ERR_FAILED;
+    }
+
+    if(type == 0)
+    {
+        if (1 == ext_id)
+        {
+            if ((retVal = rtl8367c_getAsicRegBit(0x130c, 5, &running))!=RT_ERR_OK)
+                return retVal;
+
+            if(running == 1)
+            {
+                if ((retVal = rtl8367c_setAsicRegBit(0x130c, 5, 0))!=RT_ERR_OK)
+                    return retVal;
+            }
+
+            retVal = rtl8367c_setAsicReg(0x6601, 0x0002);
+
+            if(retVal == RT_ERR_OK)
+                retVal = rtl8367c_setAsicReg(0x6600, 0x0080);
+
+            if(retVal == RT_ERR_OK)
+                retVal = rtl8367c_getAsicReg(0x6602, &regValue);
+
+            if(retVal == RT_ERR_OK)
+            {
+                if(state)
+                      regValue |= 0x0200;
+                else
+                      regValue &= ~0x0200;
+
+                regValue |= 0x0100;
+            }
+
+            if(retVal == RT_ERR_OK)
+                retVal = rtl8367c_setAsicReg(0x6602, regValue);
+
+            if(retVal == RT_ERR_OK)
+                retVal = rtl8367c_setAsicReg(0x6601, 0x0002);
+
+            if(retVal == RT_ERR_OK)
+                retVal = rtl8367c_setAsicReg(0x6600, 0x00C0);
+
+            if(running == 1)
+            {
+                if ((retVal2 = rtl8367c_setAsicRegBit(0x130c, 5, 1))!=RT_ERR_OK)
+                    return retVal2;
+            }
+
+            if(retVal != RT_ERR_OK)
+                return retVal;
+        }
+        else
+            return RT_ERR_PORT_ID;
+    }
+    else if(type == 1)
+    {
+        if (1 == ext_id)
+        {
+            if ((retVal = rtl8367c_setAsicReg(0x6601, 0x0002))!=RT_ERR_OK)
+                   return retVal;
+            if ((retVal = rtl8367c_setAsicReg(0x6600, 0x0081))!=RT_ERR_OK)
+                   return retVal;
+            if ((retVal = rtl8367c_getAsicReg(0x6602, &regValue))!=RT_ERR_OK)
+                   return retVal;
+
+            if(state)
+                  regValue |= 0x0200;
+            else
+                  regValue &= ~0x0200;
+
+            regValue |= 0x0100;
+
+            if ((retVal = rtl8367c_setAsicReg(0x6602, regValue))!=RT_ERR_OK)
+                   return retVal;
+            if ((retVal = rtl8367c_setAsicReg(0x6601, 0x0002))!=RT_ERR_OK)
+                   return retVal;
+            if ((retVal = rtl8367c_setAsicReg(0x6600, 0x00C1))!=RT_ERR_OK)
+                   return retVal;
+        }
+        else if (2 == ext_id)
+        {
+            if ((retVal = rtl8367c_setAsicReg(0x6601, 0x0002))!=RT_ERR_OK)
+                return retVal;
+            if ((retVal = rtl8367c_setAsicReg(0x6600, 0x0080))!=RT_ERR_OK)
+                return retVal;
+            if ((retVal = rtl8367c_getAsicReg(0x6602, &regValue))!=RT_ERR_OK)
+                return retVal;
+
+            if(state)
+                regValue |= 0x0200;
+            else
+                regValue &= ~0x0200;
+
+            regValue |= 0x0100;
+
+            if ((retVal = rtl8367c_setAsicReg(0x6602, regValue))!=RT_ERR_OK)
+                return retVal;
+            if ((retVal = rtl8367c_setAsicReg(0x6601, 0x0002))!=RT_ERR_OK)
+                return retVal;
+            if ((retVal = rtl8367c_setAsicReg(0x6600, 0x00C0))!=RT_ERR_OK)
+                return retVal;
+        }
+        else
+            return RT_ERR_PORT_ID;
+    }
+    else if(type == 2)
+    {
+        if ((retVal = rtl8367c_getAsicSdsReg(0, 2, 0, &regValue))!=RT_ERR_OK)
+            return retVal;
+
+        if(state & 1)
+            regValue &= ~0x100;
+        else
+            regValue |= 0x100;
+
+        if ((retVal = rtl8367c_setAsicSdsReg(0, 2, 0, regValue))!=RT_ERR_OK)
+            return retVal;
+    }
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getSgmiiNway
+ * Description:
+ *      Get SGMII Nway
+ * Input:
+ *      ext_id      - EXT ID
+ *      state       - SGMII Nway state
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK                   - Success
+ *      RT_ERR_SMI                  - SMI access error
+ * Note:
+ *      None.
+ */
+ret_t rtl8367c_getSgmiiNway(rtk_uint32 ext_id, rtk_uint32 *pState)
+{
+    rtk_uint32 retVal, regValue, type, running = 0, retVal2;
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0249)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_getAsicReg(0x1300, &regValue)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0000)) != RT_ERR_OK)
+        return retVal;
+
+    switch (regValue)
+    {
+        case 0x0276:
+        case 0x0597:
+        case 0x6367:
+            type = 0;
+            break;
+        case 0x0652:
+        case 0x6368:
+            type = 1;
+            break;
+        case 0x0801:
+        case 0x6511:
+            type = 2;
+            break;
+        default:
+            return RT_ERR_FAILED;
+    }
+
+    if(type == 0)
+    {
+        if (1 == ext_id)
+        {
+            if ((retVal = rtl8367c_getAsicRegBit(0x130c, 5, &running))!=RT_ERR_OK)
+                return retVal;
+
+            if(running == 1)
+            {
+                if ((retVal = rtl8367c_setAsicRegBit(0x130c, 5, 0))!=RT_ERR_OK)
+                    return retVal;
+            }
+
+            retVal = rtl8367c_setAsicReg(0x6601, 0x0002);
+
+            if(retVal == RT_ERR_OK)
+                retVal = rtl8367c_setAsicReg(0x6600, 0x0080);
+
+            if(retVal == RT_ERR_OK)
+                retVal = rtl8367c_getAsicReg(0x6602, &regValue);
+
+            if(running == 1)
+            {
+                if ((retVal2 = rtl8367c_setAsicRegBit(0x130c, 5, 1))!=RT_ERR_OK)
+                    return retVal2;
+            }
+
+            if(retVal != RT_ERR_OK)
+                return retVal;
+
+            if(regValue & 0x0200)
+                *pState = 1;
+            else
+                *pState = 0;
+        }
+        else
+            return RT_ERR_PORT_ID;
+    }
+    else if(type == 1)
+    {
+        if (1 == ext_id)
+        {
+                if ((retVal = rtl8367c_setAsicReg(0x6601, 0x0002))!=RT_ERR_OK)
+                    return retVal;
+                if ((retVal = rtl8367c_setAsicReg(0x6600, 0x0081))!=RT_ERR_OK)
+                    return retVal;
+                if ((retVal = rtl8367c_getAsicReg(0x6602, &regValue))!=RT_ERR_OK)
+                    return retVal;
+
+                if(regValue & 0x0200)
+                    *pState = 1;
+                else
+                    *pState = 0;
+        }
+        else if (2 == ext_id)
+        {
+            if ((retVal = rtl8367c_setAsicReg(0x6601, 0x0002))!=RT_ERR_OK)
+                return retVal;
+            if ((retVal = rtl8367c_setAsicReg(0x6600, 0x0080))!=RT_ERR_OK)
+                return retVal;
+            if ((retVal = rtl8367c_getAsicReg(0x6602, &regValue))!=RT_ERR_OK)
+                return retVal;
+
+            if(regValue & 0x0200)
+                *pState = 1;
+            else
+                *pState = 0;
+        }
+        else
+            return RT_ERR_PORT_ID;
+    }
+    else if(type == 2)
+    {
+        if ((retVal = rtl8367c_getAsicSdsReg(0, 2, 0, &regValue))!=RT_ERR_OK)
+            return retVal;
+
+        if(regValue & 0x100)
+            *pState = 0;
+        else
+            *pState = 1;
+    }
+
+    return RT_ERR_OK;
+}
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_port.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_port.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_port.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_port.h	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,220 @@
+#ifndef _RTL8367C_ASICDRV_PORTSECURITY_H_
+#define _RTL8367C_ASICDRV_PORTSECURITY_H_
+
+#include <rtl8367c_asicdrv.h>
+#include <rtl8367c_asicdrv_unknownMulticast.h>
+#include <rtl8367c_asicdrv_phy.h>
+
+/****************************************************************/
+/* Type Definition                                              */
+/****************************************************************/
+
+#define RTL8367C_MAC7       7
+#define RTL8367C_EXTNO       3
+
+#define RTL8367C_RTCT_PAGE          (11)
+#define RTL8367C_RTCT_RESULT_A_REG  (27)
+#define RTL8367C_RTCT_RESULT_B_REG  (28)
+#define RTL8367C_RTCT_RESULT_C_REG  (29)
+#define RTL8367C_RTCT_RESULT_D_REG  (30)
+#define RTL8367C_RTCT_STATUS_REG    (26)
+
+enum L2_SECURITY_BEHAVE
+{
+    L2_BEHAVE_FLOODING = 0,
+    L2_BEHAVE_DROP,
+    L2_BEHAVE_TRAP,
+    L2_BEHAVE_END
+};
+
+enum L2_UNDA_BEHAVE
+{
+    L2_UNDA_BEHAVE_FLOODING_PMASK = 0,
+    L2_UNDA_BEHAVE_DROP,
+    L2_UNDA_BEHAVE_TRAP,
+    L2_UNDA_BEHAVE_FLOODING,
+    L2_UNDA_BEHAVE_END
+};
+
+enum L2_SECURITY_SA_BEHAVE
+{
+    L2_BEHAVE_SA_FLOODING = 0,
+    L2_BEHAVE_SA_DROP,
+    L2_BEHAVE_SA_TRAP,
+    L2_BEHAVE_SA_COPY28051,
+    L2_BEHAVE_SA_END
+};
+
+/* enum for port current link speed */
+enum SPEEDMODE
+{
+    SPD_10M = 0,
+    SPD_100M,
+    SPD_1000M,
+    SPD_2500M
+};
+
+/* enum for mac link mode */
+enum LINKMODE
+{
+    MAC_NORMAL = 0,
+    MAC_FORCE,
+};
+
+/* enum for port current link duplex mode */
+enum DUPLEXMODE
+{
+    HALF_DUPLEX = 0,
+    FULL_DUPLEX
+};
+
+/* enum for port current MST mode */
+enum MSTMODE
+{
+    SLAVE_MODE= 0,
+    MASTER_MODE
+};
+
+
+enum EXTMODE
+{
+    EXT_DISABLE = 0,
+    EXT_RGMII,
+    EXT_MII_MAC,
+    EXT_MII_PHY,
+    EXT_TMII_MAC,
+    EXT_TMII_PHY,
+    EXT_GMII,
+    EXT_RMII_MAC,
+    EXT_RMII_PHY,
+    EXT_SGMII,
+    EXT_HSGMII,
+    EXT_1000X_100FX,
+    EXT_1000X,
+    EXT_100FX,
+    EXT_RGMII_2,
+    EXT_MII_MAC_2,
+    EXT_MII_PHY_2,
+    EXT_TMII_MAC_2,
+    EXT_TMII_PHY_2,
+    EXT_RMII_MAC_2,
+    EXT_RMII_PHY_2,
+    EXT_END
+};
+
+enum DOSTYPE
+{
+    DOS_DAEQSA = 0,
+    DOS_LANDATTACKS,
+    DOS_BLATATTACKS,
+    DOS_SYNFINSCAN,
+    DOS_XMASCAN,
+    DOS_NULLSCAN,
+    DOS_SYN1024,
+    DOS_TCPSHORTHDR,
+    DOS_TCPFRAGERROR,
+    DOS_ICMPFRAGMENT,
+    DOS_END,
+
+};
+
+typedef struct  rtl8367c_port_ability_s{
+    rtk_uint16 forcemode;
+    rtk_uint16 mstfault;
+    rtk_uint16 mstmode;
+    rtk_uint16 nway;
+    rtk_uint16 txpause;
+    rtk_uint16 rxpause;
+    rtk_uint16 link;
+    rtk_uint16 duplex;
+    rtk_uint16 speed;
+}rtl8367c_port_ability_t;
+
+typedef struct  rtl8367c_port_status_s{
+
+    rtk_uint16 lpi1000;
+    rtk_uint16 lpi100;
+    rtk_uint16 mstfault;
+    rtk_uint16 mstmode;
+    rtk_uint16 nway;
+    rtk_uint16 txpause;
+    rtk_uint16 rxpause;
+    rtk_uint16 link;
+    rtk_uint16 duplex;
+    rtk_uint16 speed;
+
+}rtl8367c_port_status_t;
+
+typedef struct rtct_result_s
+{
+    rtk_uint32      channelAShort;
+    rtk_uint32      channelBShort;
+    rtk_uint32      channelCShort;
+    rtk_uint32      channelDShort;
+
+    rtk_uint32      channelAOpen;
+    rtk_uint32      channelBOpen;
+    rtk_uint32      channelCOpen;
+    rtk_uint32      channelDOpen;
+
+    rtk_uint32      channelAMismatch;
+    rtk_uint32      channelBMismatch;
+    rtk_uint32      channelCMismatch;
+    rtk_uint32      channelDMismatch;
+
+    rtk_uint32      channelALinedriver;
+    rtk_uint32      channelBLinedriver;
+    rtk_uint32      channelCLinedriver;
+    rtk_uint32      channelDLinedriver;
+
+    rtk_uint32      channelALen;
+    rtk_uint32      channelBLen;
+    rtk_uint32      channelCLen;
+    rtk_uint32      channelDLen;
+} rtl8367c_port_rtct_result_t;
+
+
+/****************************************************************/
+/* Driver Proto Type Definition                                 */
+/****************************************************************/
+extern ret_t rtl8367c_setAsicPortUnknownDaBehavior(rtk_uint32 port, rtk_uint32 behavior);
+extern ret_t rtl8367c_getAsicPortUnknownDaBehavior(rtk_uint32 port, rtk_uint32 *pBehavior);
+extern ret_t rtl8367c_setAsicPortUnknownSaBehavior(rtk_uint32 behavior);
+extern ret_t rtl8367c_getAsicPortUnknownSaBehavior(rtk_uint32 *pBehavior);
+extern ret_t rtl8367c_setAsicPortUnmatchedSaBehavior(rtk_uint32 behavior);
+extern ret_t rtl8367c_getAsicPortUnmatchedSaBehavior(rtk_uint32 *pBehavior);
+extern ret_t rtl8367c_setAsicPortUnmatchedSaMoving(rtk_uint32 port, rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicPortUnmatchedSaMoving(rtk_uint32 port, rtk_uint32 *pEnabled);
+extern ret_t rtl8367c_setAsicPortUnknownDaFloodingPortmask(rtk_uint32 portmask);
+extern ret_t rtl8367c_getAsicPortUnknownDaFloodingPortmask(rtk_uint32 *pPortmask);
+extern ret_t rtl8367c_setAsicPortUnknownMulticastFloodingPortmask(rtk_uint32 portmask);
+extern ret_t rtl8367c_getAsicPortUnknownMulticastFloodingPortmask(rtk_uint32 *pPortmask);
+extern ret_t rtl8367c_setAsicPortBcastFloodingPortmask(rtk_uint32 portmask);
+extern ret_t rtl8367c_getAsicPortBcastFloodingPortmask(rtk_uint32 *pPortmask);
+extern ret_t rtl8367c_setAsicPortBlockSpa(rtk_uint32 port, rtk_uint32 block);
+extern ret_t rtl8367c_getAsicPortBlockSpa(rtk_uint32 port, rtk_uint32 *pBlock);
+extern ret_t rtl8367c_setAsicPortForceLink(rtk_uint32 port, rtl8367c_port_ability_t *pPortAbility);
+extern ret_t rtl8367c_getAsicPortForceLink(rtk_uint32 port, rtl8367c_port_ability_t *pPortAbility);
+extern ret_t rtl8367c_getAsicPortStatus(rtk_uint32 port, rtl8367c_port_status_t *pPortStatus);
+extern ret_t rtl8367c_setAsicPortForceLinkExt(rtk_uint32 id, rtl8367c_port_ability_t *pPortAbility);
+extern ret_t rtl8367c_getAsicPortForceLinkExt(rtk_uint32 id, rtl8367c_port_ability_t *pPortAbility);
+extern ret_t rtl8367c_setAsicPortExtMode(rtk_uint32 id, rtk_uint32 mode);
+extern ret_t rtl8367c_getAsicPortExtMode(rtk_uint32 id, rtk_uint32 *pMode);
+extern ret_t rtl8367c_setAsicPortDos(rtk_uint32 type, rtk_uint32 drop);
+extern ret_t rtl8367c_getAsicPortDos(rtk_uint32 type, rtk_uint32* pDrop);
+extern ret_t rtl8367c_setAsicPortEnableAll(rtk_uint32 enable);
+extern ret_t rtl8367c_getAsicPortEnableAll(rtk_uint32 *pEnable);
+extern ret_t rtl8367c_setAsicPortSmallIpg(rtk_uint32 port, rtk_uint32 enable);
+extern ret_t rtl8367c_getAsicPortSmallIpg(rtk_uint32 port, rtk_uint32* pEnable);
+extern ret_t rtl8367c_setAsicPortLoopback(rtk_uint32 port, rtk_uint32 enable);
+extern ret_t rtl8367c_getAsicPortLoopback(rtk_uint32 port, rtk_uint32 *pEnable);
+extern ret_t rtl8367c_setAsicPortRTCTEnable(rtk_uint32 portmask);
+extern ret_t rtl8367c_setAsicPortRTCTDisable(rtk_uint32 portmask);
+extern ret_t rtl8367c_getAsicPortRTCTResult(rtk_uint32 port, rtl8367c_port_rtct_result_t *pResult);
+extern ret_t rtl8367c_sdsReset(rtk_uint32 id);
+extern ret_t rtl8367c_getSdsLinkStatus(rtk_uint32 ext_id, rtk_uint32 *pSignalDetect, rtk_uint32 *pSync, rtk_uint32 *pLink);
+extern ret_t rtl8367c_setSgmiiNway(rtk_uint32 ext_id, rtk_uint32 state);
+extern ret_t rtl8367c_getSgmiiNway(rtk_uint32 ext_id, rtk_uint32 *pState);
+
+#endif /*_RTL8367C_ASICDRV_PORTSECURITY_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_portIsolation.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_portIsolation.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_portIsolation.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_portIsolation.c	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 76306 $
+ * $Date: 2017-03-08 15:13:58 +0800 (é€±ä¸‰, 08 ä¸‰æœˆ 2017) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature : Port isolation related functions
+ *
+ */
+
+#include <rtl8367c_asicdrv_portIsolation.h>
+/* Function Name:
+ *      rtl8367c_setAsicPortIsolationPermittedPortmask
+ * Description:
+ *      Set permitted port isolation portmask
+ * Input:
+ *      port            - Physical port number (0~10)
+ *      permitPortmask  - port mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ *      RT_ERR_PORT_MASK    - Invalid portmask
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortIsolationPermittedPortmask(rtk_uint32 port, rtk_uint32 permitPortmask)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    if( permitPortmask > RTL8367C_PORTMASK)
+        return RT_ERR_PORT_MASK;
+
+    return rtl8367c_setAsicReg(RTL8367C_PORT_ISOLATION_PORT_MASK_REG(port), permitPortmask);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortIsolationPermittedPortmask
+ * Description:
+ *      Get permitted port isolation portmask
+ * Input:
+ *      port                - Physical port number (0~10)
+ *      pPermitPortmask     - port mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortIsolationPermittedPortmask(rtk_uint32 port, rtk_uint32 *pPermitPortmask)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicReg(RTL8367C_PORT_ISOLATION_PORT_MASK_REG(port), pPermitPortmask);
+}
+/* Function Name:
+ *      rtl8367c_setAsicPortIsolationEfid
+ * Description:
+ *      Set port isolation EFID
+ * Input:
+ *      port    - Physical port number (0~10)
+ *      efid    - EFID (0~7)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ *      RT_ERR_OUT_OF_RANGE - Input parameter out of range
+ * Note:
+ *      EFID is used in individual learning in filtering database
+ */
+ret_t rtl8367c_setAsicPortIsolationEfid(rtk_uint32 port, rtk_uint32 efid)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    if( efid > RTL8367C_EFIDMAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_PORT_EFID_REG(port), RTL8367C_PORT_EFID_MASK(port), efid);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortIsolationEfid
+ * Description:
+ *      Get port isolation EFID
+ * Input:
+ *      port    - Physical port number (0~10)
+ *      pEfid   - EFID (0~7)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortIsolationEfid(rtk_uint32 port, rtk_uint32 *pEfid)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBits(RTL8367C_PORT_EFID_REG(port), RTL8367C_PORT_EFID_MASK(port), pEfid);
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_portIsolation.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_portIsolation.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_portIsolation.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_portIsolation.h	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,11 @@
+#ifndef _RTL8367C_ASICDRV_PORTISOLATION_H_
+#define _RTL8367C_ASICDRV_PORTISOLATION_H_
+
+#include <rtl8367c_asicdrv.h>
+
+extern ret_t rtl8367c_setAsicPortIsolationPermittedPortmask(rtk_uint32 port, rtk_uint32 permitPortmask);
+extern ret_t rtl8367c_getAsicPortIsolationPermittedPortmask(rtk_uint32 port, rtk_uint32 *pPermitPortmask);
+extern ret_t rtl8367c_setAsicPortIsolationEfid(rtk_uint32 port, rtk_uint32 efid);
+extern ret_t rtl8367c_getAsicPortIsolationEfid(rtk_uint32 port, rtk_uint32 *pEfid);
+
+#endif /*_RTL8367C_ASICDRV_PORTISOLATION_H_*/
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_qos.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_qos.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_qos.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_qos.c	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,780 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 76306 $
+ * $Date: 2017-03-08 15:13:58 +0800 (é€±ä¸‰, 08 ä¸‰æœˆ 2017) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature : Qos related functions
+ *
+ */
+
+#include <rtl8367c_asicdrv_qos.h>
+/* Function Name:
+ *      rtl8367c_setAsicPriorityDot1qRemapping
+ * Description:
+ *      Set 802.1Q absolutely priority
+ * Input:
+ *      srcpriority - Priority value
+ *      priority     - Absolute priority value
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                 - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_QOS_INT_PRIORITY    - Invalid priority
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPriorityDot1qRemapping(rtk_uint32 srcpriority, rtk_uint32 priority )
+{
+    if((srcpriority > RTL8367C_PRIMAX) || (priority > RTL8367C_PRIMAX))
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_QOS_1Q_PRIORITY_REMAPPING_REG(srcpriority), RTL8367C_QOS_1Q_PRIORITY_REMAPPING_MASK(srcpriority),priority);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPriorityDot1qRemapping
+ * Description:
+ *      Get 802.1Q absolutely priority
+ * Input:
+ *      srcpriority - Priority value
+ *      pPriority     - Absolute priority value
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK     - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPriorityDot1qRemapping(rtk_uint32 srcpriority, rtk_uint32 *pPriority )
+{
+    if(srcpriority > RTL8367C_PRIMAX )
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    return rtl8367c_getAsicRegBits(RTL8367C_QOS_1Q_PRIORITY_REMAPPING_REG(srcpriority), RTL8367C_QOS_1Q_PRIORITY_REMAPPING_MASK(srcpriority), pPriority);
+}
+/* Function Name:
+ *      rtl8367c_setAsicPriorityPortBased
+ * Description:
+ *      Set port based priority
+ * Input:
+ *      port         - Physical port number (0~7)
+ *      priority     - Priority value
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                 - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number
+ *      RT_ERR_QOS_INT_PRIORITY    - Invalid priority
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPriorityPortBased(rtk_uint32 port, rtk_uint32 priority )
+{
+    ret_t retVal;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(priority > RTL8367C_PRIMAX )
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    if(port < 8)
+    {
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_QOS_PORTBASED_PRIORITY_REG(port), RTL8367C_QOS_PORTBASED_PRIORITY_MASK(port), priority);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_QOS_PORTBASED_PRIORITY_CTRL2, 0x7 << ((port - 8) << 2), priority);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicPriorityPortBased
+ * Description:
+ *      Get port based priority
+ * Input:
+ *      port         - Physical port number (0~7)
+ *      pPriority     - Priority value
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK         - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPriorityPortBased(rtk_uint32 port, rtk_uint32 *pPriority )
+{
+    ret_t retVal;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(port < 8)
+    {
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_QOS_PORTBASED_PRIORITY_REG(port), RTL8367C_QOS_PORTBASED_PRIORITY_MASK(port), pPriority);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_QOS_PORTBASED_PRIORITY_CTRL2, 0x7 << ((port - 8) << 2), pPriority);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicPriorityDscpBased
+ * Description:
+ *      Set DSCP-based priority
+ * Input:
+ *      dscp         - DSCP value
+ *      priority     - Priority value
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                 - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_QOS_DSCP_VALUE    - Invalid DSCP value
+ *      RT_ERR_QOS_INT_PRIORITY    - Invalid priority
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPriorityDscpBased(rtk_uint32 dscp, rtk_uint32 priority )
+{
+    if(priority > RTL8367C_PRIMAX )
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    if(dscp > RTL8367C_DSCPMAX)
+        return RT_ERR_QOS_DSCP_VALUE;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_QOS_DSCP_TO_PRIORITY_REG(dscp), RTL8367C_QOS_DSCP_TO_PRIORITY_MASK(dscp), priority);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPriorityDscpBased
+ * Description:
+ *      Get DSCP-based priority
+ * Input:
+ *      dscp         - DSCP value
+ *      pPriority     - Priority value
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                 - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_QOS_INT_PRIORITY    - Invalid priority
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPriorityDscpBased(rtk_uint32 dscp, rtk_uint32 *pPriority )
+{
+    if(dscp > RTL8367C_DSCPMAX)
+        return RT_ERR_QOS_DSCP_VALUE;
+
+    return rtl8367c_getAsicRegBits(RTL8367C_QOS_DSCP_TO_PRIORITY_REG(dscp), RTL8367C_QOS_DSCP_TO_PRIORITY_MASK(dscp), pPriority);
+}
+/* Function Name:
+ *      rtl8367c_setAsicPriorityDecision
+ * Description:
+ *      Set priority decision table
+ * Input:
+ *      prisrc         - Priority decision source
+ *      decisionPri - Decision priority assignment
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                     - Success
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_QOS_INT_PRIORITY        - Invalid priority
+ *      RT_ERR_QOS_SEL_PRI_SOURCE    - Invalid priority decision source parameter
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPriorityDecision(rtk_uint32 index, rtk_uint32 prisrc, rtk_uint32 decisionPri)
+{
+    ret_t retVal;
+
+    if(index >= PRIDEC_IDX_END )
+        return RT_ERR_ENTRY_INDEX;
+
+    if(prisrc >= PRIDEC_END )
+        return RT_ERR_QOS_SEL_PRI_SOURCE;
+
+    if(decisionPri > RTL8367C_DECISIONPRIMAX )
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    switch(index)
+    {
+        case PRIDEC_IDX0:
+            if((retVal = rtl8367c_setAsicRegBits(RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_REG(prisrc), RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_MASK(prisrc), decisionPri))!=  RT_ERR_OK)
+                return retVal;
+            break;
+        case PRIDEC_IDX1:
+            if((retVal = rtl8367c_setAsicRegBits(RTL8367C_QOS_INTERNAL_PRIORITY_DECISION2_REG(prisrc), RTL8367C_QOS_INTERNAL_PRIORITY_DECISION2_MASK(prisrc), decisionPri))!=  RT_ERR_OK)
+                return retVal;
+            break;
+        default:
+            break;
+    };
+
+    return RT_ERR_OK;
+
+
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicPriorityDecision
+ * Description:
+ *      Get priority decision table
+ * Input:
+ *      prisrc         - Priority decision source
+ *      pDecisionPri - Decision priority assignment
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                     - Success
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_QOS_SEL_PRI_SOURCE    - Invalid priority decision source parameter
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPriorityDecision(rtk_uint32 index, rtk_uint32 prisrc, rtk_uint32* pDecisionPri)
+{
+    ret_t retVal;
+
+    if(index >= PRIDEC_IDX_END )
+        return RT_ERR_ENTRY_INDEX;
+
+    if(prisrc >= PRIDEC_END )
+        return RT_ERR_QOS_SEL_PRI_SOURCE;
+
+    switch(index)
+    {
+        case PRIDEC_IDX0:
+            if((retVal = rtl8367c_getAsicRegBits(RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_REG(prisrc), RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_MASK(prisrc), pDecisionPri))!=  RT_ERR_OK)
+                return retVal;
+            break;
+        case PRIDEC_IDX1:
+            if((retVal = rtl8367c_getAsicRegBits(RTL8367C_QOS_INTERNAL_PRIORITY_DECISION2_REG(prisrc), RTL8367C_QOS_INTERNAL_PRIORITY_DECISION2_MASK(prisrc), pDecisionPri))!=  RT_ERR_OK)
+                return retVal;
+            break;
+        default:
+            break;
+    };
+
+    return RT_ERR_OK;
+
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicPortPriorityDecisionIndex
+ * Description:
+ *      Set priority decision index for each port
+ * Input:
+ *      port     - Physical port number (0~7)
+ *      index     - Table index
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK             - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ *      RT_ERR_QUEUE_NUM      - Invalid queue number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortPriorityDecisionIndex(rtk_uint32 port, rtk_uint32 index )
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(index >= PRIDEC_IDX_END)
+        return RT_ERR_ENTRY_INDEX;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_IDX_CTRL, port, index);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortPriorityDecisionIndex
+ * Description:
+ *      Get priority decision index  for each port
+ * Input:
+ *      port     - Physical port number (0~7)
+ *      pIndex     - Table index
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK             - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortPriorityDecisionIndex(rtk_uint32 port, rtk_uint32 *pIndex )
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_IDX_CTRL, port, pIndex);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicOutputQueueMappingIndex
+ * Description:
+ *      Set output queue number for each port
+ * Input:
+ *      port     - Physical port number (0~7)
+ *      index     - Mapping table index
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK             - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ *      RT_ERR_QUEUE_NUM      - Invalid queue number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicOutputQueueMappingIndex(rtk_uint32 port, rtk_uint32 index )
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(index >= RTL8367C_QUEUENO)
+        return RT_ERR_QUEUE_NUM;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_QOS_PORT_QUEUE_NUMBER_REG(port), RTL8367C_QOS_PORT_QUEUE_NUMBER_MASK(port), index);
+}
+/* Function Name:
+ *      rtl8367c_getAsicOutputQueueMappingIndex
+ * Description:
+ *      Get output queue number for each port
+ * Input:
+ *      port     - Physical port number (0~7)
+ *      pIndex     - Mapping table index
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK             - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicOutputQueueMappingIndex(rtk_uint32 port, rtk_uint32 *pIndex )
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBits(RTL8367C_QOS_PORT_QUEUE_NUMBER_REG(port), RTL8367C_QOS_PORT_QUEUE_NUMBER_MASK(port), pIndex);
+}
+/* Function Name:
+ *      rtl8367c_setAsicPriorityToQIDMappingTable
+ * Description:
+ *      Set priority to QID mapping table parameters
+ * Input:
+ *      index         - Mapping table index
+ *      priority     - The priority value
+ *      qid         - Queue id
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                 - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_QUEUE_ID          - Invalid queue id
+ *      RT_ERR_QUEUE_NUM          - Invalid queue number
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPriorityToQIDMappingTable(rtk_uint32 index, rtk_uint32 priority, rtk_uint32 qid )
+{
+    if(index >= RTL8367C_QUEUENO)
+        return RT_ERR_QUEUE_NUM;
+
+    if(priority > RTL8367C_PRIMAX)
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    if(qid > RTL8367C_QIDMAX)
+        return RT_ERR_QUEUE_ID;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_QOS_1Q_PRIORITY_TO_QID_REG(index, priority), RTL8367C_QOS_1Q_PRIORITY_TO_QID_MASK(priority), qid);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPriorityToQIDMappingTable
+ * Description:
+ *      Get priority to QID mapping table parameters
+ * Input:
+ *      index         - Mapping table index
+ *      priority     - The priority value
+ *      pQid         - Queue id
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                 - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_QUEUE_NUM          - Invalid queue number
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPriorityToQIDMappingTable(rtk_uint32 index, rtk_uint32 priority, rtk_uint32* pQid)
+{
+    if(index >= RTL8367C_QUEUENO)
+        return RT_ERR_QUEUE_NUM;
+
+    if(priority > RTL8367C_PRIMAX)
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    return rtl8367c_getAsicRegBits(RTL8367C_QOS_1Q_PRIORITY_TO_QID_REG(index, priority), RTL8367C_QOS_1Q_PRIORITY_TO_QID_MASK(priority), pQid);
+}
+/* Function Name:
+ *      rtl8367c_setAsicRemarkingDot1pAbility
+ * Description:
+ *      Set 802.1p remarking ability
+ * Input:
+ *      port     - Physical port number (0~7)
+ *      enabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK             - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicRemarkingDot1pAbility(rtk_uint32 port, rtk_uint32 enabled)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_PORT_MISC_CFG_REG(port), RTL8367C_1QREMARK_ENABLE_OFFSET, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicRemarkingDot1pAbility
+ * Description:
+ *      Get 802.1p remarking ability
+ * Input:
+ *      port     - Physical port number (0~7)
+ *      pEnabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK     - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicRemarkingDot1pAbility(rtk_uint32 port, rtk_uint32* pEnabled)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_PORT_MISC_CFG_REG(port), RTL8367C_1QREMARK_ENABLE_OFFSET, pEnabled);
+}
+/* Function Name:
+ *      rtl8367c_setAsicRemarkingDot1pParameter
+ * Description:
+ *      Set 802.1p remarking parameter
+ * Input:
+ *      priority     - Priority value
+ *      newPriority - New priority value
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                 - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicRemarkingDot1pParameter(rtk_uint32 priority, rtk_uint32 newPriority )
+{
+    if(priority > RTL8367C_PRIMAX || newPriority > RTL8367C_PRIMAX)
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_QOS_1Q_REMARK_REG(priority), RTL8367C_QOS_1Q_REMARK_MASK(priority), newPriority);
+}
+/* Function Name:
+ *      rtl8367c_getAsicRemarkingDot1pParameter
+ * Description:
+ *      Get 802.1p remarking parameter
+ * Input:
+ *      priority     - Priority value
+ *      pNewPriority - New priority value
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                 - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicRemarkingDot1pParameter(rtk_uint32 priority, rtk_uint32 *pNewPriority )
+{
+    if(priority > RTL8367C_PRIMAX )
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    return rtl8367c_getAsicRegBits(RTL8367C_QOS_1Q_REMARK_REG(priority), RTL8367C_QOS_1Q_REMARK_MASK(priority), pNewPriority);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicRemarkingDot1pSrc
+ * Description:
+ *      Set remarking source of 802.1p remarking.
+ * Input:
+ *      type      - remarking source
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT         - The module is not initial
+ *      RT_ERR_PORT_ID  - invalid port id
+ *      RT_ERR_INPUT            - invalid input parameter
+
+ * Note:
+ *      The API can configure 802.1p remark functionality to map original DSCP value or internal
+ *      priority to TX DSCP value.
+ */
+ret_t rtl8367c_setAsicRemarkingDot1pSrc(rtk_uint32 type)
+{
+
+    if(type >= DOT1P_PRISEL_END )
+        return RT_ERR_QOS_SEL_PRI_SOURCE;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_RMK_CFG_SEL_CTRL, RTL8367C_RMK_1Q_CFG_SEL_OFFSET, type);
+}
+
+
+/* Function Name:
+ *      rtl8367c_getAsicRemarkingDot1pSrc
+ * Description:
+ *      Get remarking source of 802.1p remarking.
+ * Output:
+ *      pType      - remarking source
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT         - The module is not initial
+ *      RT_ERR_PORT_ID          - invalid port id
+ *      RT_ERR_INPUT            - invalid input parameter
+ *      RT_ERR_NULL_POINTER     - input parameter may be null pointer
+
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicRemarkingDot1pSrc(rtk_uint32 *pType)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_RMK_CFG_SEL_CTRL, RTL8367C_RMK_1Q_CFG_SEL_OFFSET, pType);
+}
+
+
+
+
+
+/* Function Name:
+ *      rtl8367c_setAsicRemarkingDscpAbility
+ * Description:
+ *      Set DSCP remarking ability
+ * Input:
+ *      enabled     - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK     - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicRemarkingDscpAbility(rtk_uint32 enabled)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REMARKING_CTRL_REG, RTL8367C_REMARKING_DSCP_ENABLE_OFFSET, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicRemarkingDscpAbility
+ * Description:
+ *      Get DSCP remarking ability
+ * Input:
+ *      enabled     - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK     - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicRemarkingDscpAbility(rtk_uint32* pEnabled)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REMARKING_CTRL_REG, RTL8367C_REMARKING_DSCP_ENABLE_OFFSET, pEnabled);
+}
+/* Function Name:
+ *      rtl8367c_setAsicRemarkingDscpParameter
+ * Description:
+ *      Set DSCP remarking parameter
+ * Input:
+ *      priority     - Priority value
+ *      newDscp     - New DSCP value
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                 - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_QOS_DSCP_VALUE    - Invalid DSCP value
+ *      RT_ERR_QOS_INT_PRIORITY    - Invalid priority
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicRemarkingDscpParameter(rtk_uint32 priority, rtk_uint32 newDscp )
+{
+    if(priority > RTL8367C_PRIMAX )
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    if(newDscp > RTL8367C_DSCPMAX)
+        return RT_ERR_QOS_DSCP_VALUE;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_QOS_DSCP_REMARK_REG(priority), RTL8367C_QOS_DSCP_REMARK_MASK(priority), newDscp);
+}
+/* Function Name:
+ *      rtl8367c_getAsicRemarkingDscpParameter
+ * Description:
+ *      Get DSCP remarking parameter
+ * Input:
+ *      priority     - Priority value
+ *      pNewDscp     - New DSCP value
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                 - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_QOS_INT_PRIORITY    - Invalid priority
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicRemarkingDscpParameter(rtk_uint32 priority, rtk_uint32* pNewDscp )
+{
+    if(priority > RTL8367C_PRIMAX )
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    return rtl8367c_getAsicRegBits(RTL8367C_QOS_DSCP_REMARK_REG(priority), RTL8367C_QOS_DSCP_REMARK_MASK(priority), pNewDscp);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicRemarkingDscpSrc
+ * Description:
+ *      Set remarking source of DSCP remarking.
+ * Input:
+ *      type      - remarking source
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT         - The module is not initial
+ *      RT_ERR_PORT_ID  - invalid port id
+ *      RT_ERR_INPUT            - invalid input parameter
+
+ * Note:
+ *      The API can configure DSCP remark functionality to map original DSCP value or internal
+ *      priority to TX DSCP value.
+ */
+ret_t rtl8367c_setAsicRemarkingDscpSrc(rtk_uint32 type)
+{
+
+    if(type >= DSCP_PRISEL_END )
+        return RT_ERR_QOS_SEL_PRI_SOURCE;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_RMK_CFG_SEL_CTRL, RTL8367C_RMK_DSCP_CFG_SEL_MASK, type);
+}
+
+
+/* Function Name:
+ *      rtl8367c_getAsicRemarkingDscpSrc
+ * Description:
+ *      Get remarking source of DSCP remarking.
+ * Output:
+ *      pType      - remarking source
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT         - The module is not initial
+ *      RT_ERR_PORT_ID          - invalid port id
+ *      RT_ERR_INPUT            - invalid input parameter
+ *      RT_ERR_NULL_POINTER     - input parameter may be null pointer
+
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicRemarkingDscpSrc(rtk_uint32 *pType)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_RMK_CFG_SEL_CTRL, RTL8367C_RMK_DSCP_CFG_SEL_MASK, pType);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicRemarkingDscp2Dscp
+ * Description:
+ *      Set DSCP to remarked DSCP mapping.
+ * Input:
+ *      dscp    - DSCP value
+ *      rmkDscp - remarked DSCP value
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_UNIT_ID          - Invalid unit id
+ *      RT_ERR_QOS_DSCP_VALUE   - Invalid dscp value
+ * Note:
+ *      dscp parameter can be DSCP value or internal priority according to configuration of API
+ *      dal_apollomp_qos_dscpRemarkSrcSel_set(), because DSCP remark functionality can map original DSCP
+ *      value or internal priority to TX DSCP value.
+ */
+ret_t rtl8367c_setAsicRemarkingDscp2Dscp(rtk_uint32 dscp, rtk_uint32 rmkDscp)
+{
+    if((dscp > RTL8367C_DSCPMAX ) || (rmkDscp > RTL8367C_DSCPMAX))
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_QOS_DSCP_TO_DSCP_REG(dscp), RTL8367C_QOS_DSCP_TO_DSCP_MASK(dscp), rmkDscp);
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicRemarkingDscp2Dscp
+ * Description:
+ *      Get DSCP to remarked DSCP mapping.
+ * Input:
+ *      dscp    - DSCP value
+ * Output:
+ *      pRmkDscp   - remarked DSCP value
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_QOS_DSCP_VALUE   - Invalid dscp value
+ *      RT_ERR_NULL_POINTER     - NULL pointer
+ * Note:
+ *      None.
+ */
+ret_t rtl8367c_getAsicRemarkingDscp2Dscp(rtk_uint32 dscp, rtk_uint32 *pRmkDscp)
+{
+    if(dscp > RTL8367C_DSCPMAX)
+        return RT_ERR_QOS_DSCP_VALUE;
+
+    return rtl8367c_getAsicRegBits(RTL8367C_QOS_DSCP_TO_DSCP_REG(dscp), RTL8367C_QOS_DSCP_TO_DSCP_MASK(dscp), pRmkDscp);
+
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_qos.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_qos.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_qos.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_qos.h	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,79 @@
+#ifndef _RTL8367C_ASICDRV_QOS_H_
+#define _RTL8367C_ASICDRV_QOS_H_
+
+#include <rtl8367c_asicdrv.h>
+
+#define RTL8367C_DECISIONPRIMAX    0xFF
+
+/* enum Priority Selection Types */
+enum PRIDECISION
+{
+    PRIDEC_PORT = 0,
+    PRIDEC_ACL,
+    PRIDEC_DSCP,
+    PRIDEC_1Q,
+    PRIDEC_1AD,
+    PRIDEC_CVLAN,
+    PRIDEC_DA,
+    PRIDEC_SA,
+    PRIDEC_END,
+};
+
+/* enum Priority Selection Index */
+enum RTL8367C_PRIDEC_TABLE
+{
+    PRIDEC_IDX0 = 0,
+    PRIDEC_IDX1,
+    PRIDEC_IDX_END,
+};
+
+enum RTL8367C_DOT1P_PRISEL
+{
+    DOT1P_PRISEL_USER =  0,
+    DOT1P_PRISEL_TAG,
+    DOT1P_PRISEL_END
+};
+
+enum RTL8367C_DSCP_PRISEL
+{
+    DSCP_PRISEL_INTERNAL =  0,
+    DSCP_PRISEL_DSCP,
+    DSCP_PRISEL_USER ,
+    DSCP_PRISEL_END
+};
+
+
+extern ret_t rtl8367c_setAsicRemarkingDot1pAbility(rtk_uint32 port, rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicRemarkingDot1pAbility(rtk_uint32 port, rtk_uint32* pEnabled);
+extern ret_t rtl8367c_setAsicRemarkingDot1pParameter(rtk_uint32 priority, rtk_uint32 newPriority );
+extern ret_t rtl8367c_getAsicRemarkingDot1pParameter(rtk_uint32 priority, rtk_uint32 *pNewPriority );
+extern  ret_t rtl8367c_setAsicRemarkingDot1pSrc(rtk_uint32 type);
+extern  ret_t rtl8367c_getAsicRemarkingDot1pSrc(rtk_uint32 *pType);
+extern ret_t rtl8367c_setAsicRemarkingDscpAbility(rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicRemarkingDscpAbility(rtk_uint32* pEnabled);
+extern ret_t rtl8367c_setAsicRemarkingDscpParameter(rtk_uint32 priority, rtk_uint32 newDscp );
+extern ret_t rtl8367c_getAsicRemarkingDscpParameter(rtk_uint32 priority, rtk_uint32* pNewDscp );
+
+extern ret_t rtl8367c_setAsicPriorityDot1qRemapping(rtk_uint32 srcpriority, rtk_uint32 priority );
+extern ret_t rtl8367c_getAsicPriorityDot1qRemapping(rtk_uint32 srcpriority, rtk_uint32 *pPriority );
+extern ret_t rtl8367c_setAsicPriorityDscpBased(rtk_uint32 dscp, rtk_uint32 priority );
+extern ret_t rtl8367c_getAsicPriorityDscpBased(rtk_uint32 dscp, rtk_uint32 *pPriority );
+extern ret_t rtl8367c_setAsicPriorityPortBased(rtk_uint32 port, rtk_uint32 priority );
+extern ret_t rtl8367c_getAsicPriorityPortBased(rtk_uint32 port, rtk_uint32 *pPriority );
+extern ret_t rtl8367c_setAsicPriorityDecision(rtk_uint32 index, rtk_uint32 prisrc, rtk_uint32 decisionPri);
+extern ret_t rtl8367c_getAsicPriorityDecision(rtk_uint32 index, rtk_uint32 prisrc, rtk_uint32* pDecisionPri);
+extern ret_t rtl8367c_setAsicPriorityToQIDMappingTable(rtk_uint32 qnum, rtk_uint32 priority, rtk_uint32 qid );
+extern ret_t rtl8367c_getAsicPriorityToQIDMappingTable(rtk_uint32 qnum, rtk_uint32 priority, rtk_uint32* pQid);
+extern ret_t rtl8367c_setAsicOutputQueueMappingIndex(rtk_uint32 port, rtk_uint32 qnum );
+extern ret_t rtl8367c_getAsicOutputQueueMappingIndex(rtk_uint32 port, rtk_uint32 *pQnum );
+
+extern ret_t rtl8367c_setAsicRemarkingDscpSrc(rtk_uint32 type);
+extern ret_t rtl8367c_getAsicRemarkingDscpSrc(rtk_uint32 *pType);
+extern ret_t rtl8367c_setAsicRemarkingDscp2Dscp(rtk_uint32 dscp, rtk_uint32 rmkDscp);
+extern ret_t rtl8367c_getAsicRemarkingDscp2Dscp(rtk_uint32 dscp, rtk_uint32 *pRmkDscp);
+
+extern ret_t rtl8367c_setAsicPortPriorityDecisionIndex(rtk_uint32 port, rtk_uint32 index );
+extern ret_t rtl8367c_getAsicPortPriorityDecisionIndex(rtk_uint32 port, rtk_uint32 *pIndex );
+
+#endif /*#ifndef _RTL8367C_ASICDRV_QOS_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_rldp.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_rldp.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_rldp.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_rldp.c	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,676 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 42321 $
+ * $Date: 2013-08-26 13:51:29 +0800 (é€±ä¸€, 26 å…«æœˆ 2013) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature : RLDP related functions
+ *
+ */
+
+#include <rtl8367c_asicdrv_rldp.h>
+/* Function Name:
+ *      rtl8367c_setAsicRldp
+ * Description:
+ *      Set RLDP function enable/disable
+ * Input:
+ *      enabled     - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicRldp(rtk_uint32 enabled)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_RLDP_CTRL0, RTL8367C_RLDP_ENABLE_OFFSET, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicRldp
+ * Description:
+ *      Get RLDP function enable/disable
+ * Input:
+ *      pEnabled    - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicRldp(rtk_uint32 *pEnabled)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_RLDP_CTRL0, RTL8367C_RLDP_ENABLE_OFFSET, pEnabled);
+}
+/* Function Name:
+ *      rtl8367c_setAsicRldpEnable8051
+ * Description:
+ *      Set RLDP function handled by ASIC or 8051
+ * Input:
+ *      enabled     - 1: enabled 8051, 0: disabled 8051 (RLDP is handled by ASIC)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicRldpEnable8051(rtk_uint32 enabled)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_RLDP_CTRL0, RTL8367C_RLDP_8051_ENABLE_OFFSET, enabled);
+}
+/* Function Name:
+ *      rtl8367c_setAsicRldrtl8367c_getAsicRldpEnable8051pEnable8051
+ * Description:
+ *      Get RLDP function handled by ASIC or 8051
+ * Input:
+ *      pEnabled    - 1: enabled 8051, 0: disabled 8051 (RLDP is handled by ASIC)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicRldpEnable8051(rtk_uint32 *pEnabled)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_RLDP_CTRL0, RTL8367C_RLDP_8051_ENABLE_OFFSET, pEnabled);
+}
+/* Function Name:
+ *      rtl8367c_setAsicRldpCompareRandomNumber
+ * Description:
+ *      Set enable compare the random number field and seed field of RLDP frame
+ * Input:
+ *      enabled     - 1: enabled comparing random number, 0: disabled comparing random number
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicRldpCompareRandomNumber(rtk_uint32 enabled)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_RLDP_CTRL0, RTL8367C_RLDP_COMP_ID_OFFSET, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicRldpCompareRandomNumber
+ * Description:
+ *      Get enable compare the random number field and seed field of RLDP frame
+ * Input:
+ *      pEnabled    - 1: enabled comparing random number, 0: disabled comparing random number
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicRldpCompareRandomNumber(rtk_uint32 *pEnabled)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_RLDP_CTRL0, RTL8367C_RLDP_COMP_ID_OFFSET, pEnabled);
+}
+/* Function Name:
+ *      rtl8367c_setAsicRldpIndicatorSource
+ * Description:
+ *      Set buzzer and LED source when detecting a loop
+ * Input:
+ *      src     - 0: ASIC, 1: 8051
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicRldpIndicatorSource(rtk_uint32 src)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_RLDP_CTRL0, RTL8367C_RLDP_INDICATOR_SOURCE_OFFSET, src);
+}
+/* Function Name:
+ *      rtl8367c_getAsicRldpIndicatorSource
+ * Description:
+ *      Get buzzer and LED source when detecting a loop
+ * Input:
+ *      pSrc    - 0: ASIC, 1: 8051
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicRldpIndicatorSource(rtk_uint32 *pSrc)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_RLDP_CTRL0, RTL8367C_RLDP_INDICATOR_SOURCE_OFFSET, pSrc);
+}
+/* Function Name:
+ *      rtl8367c_setAsicRldpCheckingStatePara
+ * Description:
+ *      Set retry count and retry period of checking state
+ * Input:
+ *      retryCount  - 0~0xFF (times)
+ *      retryPeriod - 0~0xFFFF (ms)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicRldpCheckingStatePara(rtk_uint32 retryCount, rtk_uint32 retryPeriod)
+{
+    ret_t retVal;
+
+    if(retryCount > 0xFF)
+        return RT_ERR_OUT_OF_RANGE;
+    if(retryPeriod > RTL8367C_REGDATAMAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_RLDP_RETRY_COUNT_REG, RTL8367C_RLDP_RETRY_COUNT_CHKSTATE_MASK, retryCount);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return rtl8367c_setAsicReg(RTL8367C_RLDP_RETRY_PERIOD_CHKSTATE_REG, retryPeriod);
+}
+/* Function Name:
+ *      rtl8367c_getAsicRldpCheckingStatePara
+ * Description:
+ *      Get retry count and retry period of checking state
+ * Input:
+ *      pRetryCount     - 0~0xFF (times)
+ *      pRetryPeriod    - 0~0xFFFF (ms)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicRldpCheckingStatePara(rtk_uint32 *pRetryCount, rtk_uint32 *pRetryPeriod)
+{
+    ret_t retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_RLDP_RETRY_COUNT_REG, RTL8367C_RLDP_RETRY_COUNT_CHKSTATE_MASK, pRetryCount);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return rtl8367c_getAsicReg(RTL8367C_RLDP_RETRY_PERIOD_CHKSTATE_REG, pRetryPeriod);
+}
+/* Function Name:
+ *      rtl8367c_setAsicRldpLoopStatePara
+ * Description:
+ *      Set retry count and retry period of loop state
+ * Input:
+ *      retryCount  - 0~0xFF (times)
+ *      retryPeriod - 0~0xFFFF (ms)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicRldpLoopStatePara(rtk_uint32 retryCount, rtk_uint32 retryPeriod)
+{
+    ret_t retVal;
+
+    if(retryCount > 0xFF)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if(retryPeriod > RTL8367C_REGDATAMAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_RLDP_RETRY_COUNT_REG, RTL8367C_RLDP_RETRY_COUNT_LOOPSTATE_MASK, retryCount);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return rtl8367c_setAsicReg(RTL8367C_RLDP_RETRY_PERIOD_LOOPSTATE_REG, retryPeriod);
+}
+/* Function Name:
+ *      rtl8367c_getAsicRldpLoopStatePara
+ * Description:
+ *      Get retry count and retry period of loop state
+ * Input:
+ *      pRetryCount     - 0~0xFF (times)
+ *      pRetryPeriod    - 0~0xFFFF (ms)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - input parameter out of range
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicRldpLoopStatePara(rtk_uint32 *pRetryCount, rtk_uint32 *pRetryPeriod)
+{
+    ret_t retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_RLDP_RETRY_COUNT_REG, RTL8367C_RLDP_RETRY_COUNT_LOOPSTATE_MASK, pRetryCount);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return rtl8367c_getAsicReg(RTL8367C_RLDP_RETRY_PERIOD_LOOPSTATE_REG, pRetryPeriod);
+}
+/* Function Name:
+ *      rtl8367c_setAsicRldpTxPortmask
+ * Description:
+ *      Set portmask that send/forward RLDP frame
+ * Input:
+ *      portmask    - 0~0xFF
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_MASK    - Invalid portmask
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicRldpTxPortmask(rtk_uint32 portmask)
+{
+    if(portmask > RTL8367C_PORTMASK)
+        return RT_ERR_PORT_MASK;
+
+    return rtl8367c_setAsicReg(RTL8367C_RLDP_TX_PMSK_REG, portmask);
+}
+/* Function Name:
+ *      rtl8367c_getAsicRldpTxPortmask
+ * Description:
+ *      Get portmask that send/forward RLDP frame
+ * Input:
+ *      pPortmask   - 0~0xFF
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicRldpTxPortmask(rtk_uint32 *pPortmask)
+{
+    return rtl8367c_getAsicReg(RTL8367C_RLDP_TX_PMSK_REG, pPortmask);
+}
+/* Function Name:
+ *      rtl8367c_setAsicRldpMagicNum
+ * Description:
+ *      Set Random seed of RLDP
+ * Input:
+ *      seed    - MAC
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicRldpMagicNum(ether_addr_t seed)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+    rtk_uint16 *accessPtr;
+    rtk_uint32 i;
+
+    accessPtr = (rtk_uint16*)&seed;
+
+    for (i = 0; i < 3; i++)
+    {
+        regData = *accessPtr;
+        retVal = rtl8367c_setAsicReg(RTL8367C_RLDP_MAGIC_NUM_REG_BASE + i, regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        accessPtr++;
+    }
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicRldpMagicNum
+ * Description:
+ *      Get Random seed of RLDP
+ * Input:
+ *      pSeed   - MAC
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicRldpMagicNum(ether_addr_t *pSeed)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+    rtk_uint16 *accessPtr;
+    rtk_uint32 i;
+
+    accessPtr = (rtk_uint16*)pSeed;
+
+    for(i = 0; i < 3; i++)
+    {
+        retVal = rtl8367c_getAsicReg(RTL8367C_RLDP_MAGIC_NUM_REG_BASE + i, &regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        *accessPtr = regData;
+        accessPtr++;
+    }
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicRldpLoopedPortmask
+ * Description:
+ *      Get looped portmask
+ * Input:
+ *      pPortmask   - 0~0xFF
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicRldpLoopedPortmask(rtk_uint32 *pPortmask)
+{
+    return rtl8367c_getAsicReg(RTL8367C_RLDP_LOOP_PMSK_REG, pPortmask);
+}
+/* Function Name:
+ *      rtl8367c_getAsicRldpRandomNumber
+ * Description:
+ *      Get Random number of RLDP
+ * Input:
+ *      pRandNumber     - MAC
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicRldpRandomNumber(ether_addr_t *pRandNumber)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+    rtk_int16 accessPtr[3];
+    rtk_uint32 i;
+
+    for(i = 0; i < 3; i++)
+    {
+        retVal = rtl8367c_getAsicReg(RTL8367C_RLDP_RAND_NUM_REG_BASE+ i, &regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        accessPtr[i] = regData;
+    }
+
+    memcpy(pRandNumber, accessPtr, 6);
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicRldpLoopedPortmask
+ * Description:
+ *      Get port number of looped pair
+ * Input:
+ *      port        - Physical port number (0~7)
+ *      pLoopedPair     - port (0~7)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicRldpLoopedPortPair(rtk_uint32 port, rtk_uint32 *pLoopedPair)
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(port < 8)
+        return rtl8367c_getAsicRegBits(RTL8367C_RLDP_LOOP_PORT_REG(port), RTL8367C_RLDP_LOOP_PORT_MASK(port), pLoopedPair);
+    else
+        return rtl8367c_getAsicRegBits(RTL8367C_REG_RLDP_LOOP_PORT_REG4 + ((port - 8) >> 1), RTL8367C_RLDP_LOOP_PORT_MASK(port), pLoopedPair);
+}
+/* Function Name:
+ *      rtl8367c_setAsicRlppTrap8051
+ * Description:
+ *      Set trap RLPP packet to 8051
+ * Input:
+ *      enabled     - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicRlppTrap8051(rtk_uint32 enabled)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_RLDP_CTRL0, RTL8367C_RLPP_8051_TRAP_OFFSET, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicRlppTrap8051
+ * Description:
+ *      Get trap RLPP packet to 8051
+ * Input:
+ *      pEnabled    - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicRlppTrap8051(rtk_uint32 *pEnabled)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_RLDP_CTRL0, RTL8367C_RLPP_8051_TRAP_OFFSET, pEnabled);
+}
+/* Function Name:
+ *      rtl8367c_setAsicRldpLeaveLoopedPortmask
+ * Description:
+ *      Clear leaved looped portmask
+ * Input:
+ *      portmask    - 0~0xFF
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicRldpLeaveLoopedPortmask(rtk_uint32 portmask)
+{
+    return rtl8367c_setAsicReg(RTL8367C_REG_RLDP_RELEASED_INDICATOR, portmask);
+}
+/* Function Name:
+ *      rtl8367c_getAsicRldpLeaveLoopedPortmask
+ * Description:
+ *      Get leaved looped portmask
+ * Input:
+ *      pPortmask   - 0~0xFF
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicRldpLeaveLoopedPortmask(rtk_uint32 *pPortmask)
+{
+    return rtl8367c_getAsicReg(RTL8367C_REG_RLDP_RELEASED_INDICATOR, pPortmask);
+}
+/* Function Name:
+ *      rtl8367c_setAsicRldpEnterLoopedPortmask
+ * Description:
+ *      Clear enter loop portmask
+ * Input:
+ *      portmask    - 0~0xFF
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicRldpEnterLoopedPortmask(rtk_uint32 portmask)
+{
+    return rtl8367c_setAsicReg(RTL8367C_REG_RLDP_LOOPED_INDICATOR, portmask);
+}
+/* Function Name:
+ *      rtl8367c_getAsicRldpEnterLoopedPortmask
+ * Description:
+ *      Get enter loop portmask
+ * Input:
+ *      pPortmask   - 0~0xFF
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicRldpEnterLoopedPortmask(rtk_uint32 *pPortmask)
+{
+    return rtl8367c_getAsicReg(RTL8367C_REG_RLDP_LOOPED_INDICATOR, pPortmask);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicRldpTriggerMode
+ * Description:
+ *      Set trigger RLDP mode
+ * Input:
+ *      mode    - 1: Periodically, 0: SA moving
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicRldpTriggerMode(rtk_uint32 enabled)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_RLDP_CTRL0, RTL8367C_RLDP_TRIGGER_MODE_OFFSET, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicRldpTriggerMode
+ * Description:
+ *      Get trigger RLDP mode
+ * Input:
+ *      pMode   - - 1: Periodically, 0: SA moving
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicRldpTriggerMode(rtk_uint32 *pEnabled)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_RLDP_CTRL0, RTL8367C_RLDP_TRIGGER_MODE_OFFSET, pEnabled);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicRldp8051Portmask
+ * Description:
+ *      Set 8051/CPU configured looped portmask
+ * Input:
+ *      portmask    - 0~0xFF
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_MASK    - Invalid portmask
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicRldp8051Portmask(rtk_uint32 portmask)
+{
+    ret_t retVal;
+    if(portmask > RTL8367C_PORTMASK)
+        return RT_ERR_PORT_MASK;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_RLDP_CTRL0_REG,RTL8367C_RLDP_8051_LOOP_PORTMSK_MASK,portmask & 0xff);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_RLDP_CTRL5,RTL8367C_RLDP_CTRL5_MASK,(portmask >> 8) & 7);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicRldp8051Portmask
+ * Description:
+ *      Get 8051/CPU configured looped portmask
+ * Input:
+ *      pPortmask   - 0~0xFF
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicRldp8051Portmask(rtk_uint32 *pPortmask)
+{
+    rtk_uint32 tmpPmsk;
+    ret_t retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_RLDP_CTRL0_REG,RTL8367C_RLDP_8051_LOOP_PORTMSK_MASK,&tmpPmsk);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+    *pPortmask = tmpPmsk & 0xff;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_RLDP_CTRL5,RTL8367C_RLDP_CTRL5_MASK,&tmpPmsk);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+    *pPortmask |= (tmpPmsk & 7) <<8;
+
+    return RT_ERR_OK;
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_rldp.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_rldp.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_rldp.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_rldp.h	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,43 @@
+#ifndef _RTL8367C_ASICDRV_RLDP_H_
+#define _RTL8367C_ASICDRV_RLDP_H_
+
+#include <rtl8367c_asicdrv.h>
+#include <string.h>
+
+extern ret_t rtl8367c_setAsicRldp(rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicRldp(rtk_uint32 *pEnabled);
+extern ret_t rtl8367c_setAsicRldpEnable8051(rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicRldpEnable8051(rtk_uint32 *pEnabled);
+extern ret_t rtl8367c_setAsicRldpCompareRandomNumber(rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicRldpCompareRandomNumber(rtk_uint32 *pEnabled);
+extern ret_t rtl8367c_setAsicRldpIndicatorSource(rtk_uint32 src);
+extern ret_t rtl8367c_getAsicRldpIndicatorSource(rtk_uint32 *pSrc);
+extern ret_t rtl8367c_setAsicRldpCheckingStatePara(rtk_uint32 retryCount, rtk_uint32 retryPeriod);
+extern ret_t rtl8367c_getAsicRldpCheckingStatePara(rtk_uint32 *pRetryCount, rtk_uint32 *pRetryPeriod);
+extern ret_t rtl8367c_setAsicRldpLoopStatePara(rtk_uint32 retryCount, rtk_uint32 retryPeriod);
+extern ret_t rtl8367c_getAsicRldpLoopStatePara(rtk_uint32 *pRetryCount, rtk_uint32 *pRetryPeriod);
+extern ret_t rtl8367c_setAsicRldpTxPortmask(rtk_uint32 portmask);
+extern ret_t rtl8367c_getAsicRldpTxPortmask(rtk_uint32 *pPortmask);
+extern ret_t rtl8367c_setAsicRldpMagicNum(ether_addr_t seed);
+extern ret_t rtl8367c_getAsicRldpMagicNum(ether_addr_t *pSeed);
+extern ret_t rtl8367c_getAsicRldpLoopedPortmask(rtk_uint32 *pPortmask);
+extern ret_t rtl8367c_setAsicRldp8051Portmask(rtk_uint32 portmask);
+extern ret_t rtl8367c_getAsicRldp8051Portmask(rtk_uint32 *pPortmask);
+
+
+extern ret_t rtl8367c_getAsicRldpRandomNumber(ether_addr_t *pRandNumber);
+extern ret_t rtl8367c_getAsicRldpLoopedPortPair(rtk_uint32 port, rtk_uint32 *pLoopedPair);
+extern ret_t rtl8367c_setAsicRlppTrap8051(rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicRlppTrap8051(rtk_uint32 *pEnabled);
+
+extern ret_t rtl8367c_setAsicRldpLeaveLoopedPortmask(rtk_uint32 portmask);
+extern ret_t rtl8367c_getAsicRldpLeaveLoopedPortmask(rtk_uint32 *pPortmask);
+
+extern ret_t rtl8367c_setAsicRldpEnterLoopedPortmask(rtk_uint32 portmask);
+extern ret_t rtl8367c_getAsicRldpEnterLoopedPortmask(rtk_uint32 *pPortmask);
+
+extern ret_t rtl8367c_setAsicRldpTriggerMode(rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicRldpTriggerMode(rtk_uint32 *pEnabled);
+
+#endif /*_RTL8367C_ASICDRV_RLDP_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_rma.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_rma.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_rma.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_rma.c	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,363 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 64716 $
+ * $Date: 2015-12-31 16:31:55 +0800 (é€±å››, 31 åäºŒæœˆ 2015) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature : RMA related functions
+ *
+ */
+#include <rtl8367c_asicdrv_rma.h>
+/* Function Name:
+ *      rtl8367c_setAsicRma
+ * Description:
+ *      Set reserved multicast address for CPU trapping
+ * Input:
+ *      index     - reserved multicast LSB byte, 0x00~0x2F is available value
+ *      pRmacfg     - type of RMA for trapping frame type setting
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK         - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_RMA_ADDR - Invalid RMA address index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicRma(rtk_uint32 index, rtl8367c_rma_t* pRmacfg)
+{
+    rtk_uint32 regData = 0;
+    ret_t retVal;
+
+    if(index > RTL8367C_RMAMAX)
+        return RT_ERR_RMA_ADDR;
+
+    regData |= (pRmacfg->portiso_leaky & 0x0001);
+    regData |= ((pRmacfg->vlan_leaky & 0x0001) << 1);
+    regData |= ((pRmacfg->keep_format & 0x0001) << 2);
+    regData |= ((pRmacfg->trap_priority & 0x0007) << 3);
+    regData |= ((pRmacfg->discard_storm_filter & 0x0001) << 6);
+    regData |= ((pRmacfg->operation & 0x0003) << 7);
+
+    if( (index >= 0x4 && index <= 0x7) || (index >= 0x9 && index <= 0x0C) || (0x0F == index))
+        index = 0x04;
+    else if((index >= 0x13 && index <= 0x17) || (0x19 == index) || (index >= 0x1B && index <= 0x1f))
+        index = 0x13;
+    else if(index >= 0x22 && index <= 0x2F)
+        index = 0x22;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_RMA_CTRL00, RTL8367C_TRAP_PRIORITY_MASK, pRmacfg->trap_priority);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return rtl8367c_setAsicReg(RTL8367C_REG_RMA_CTRL00+index, regData);
+}
+/* Function Name:
+ *      rtl8367c_getAsicRma
+ * Description:
+ *      Get reserved multicast address for CPU trapping
+ * Input:
+ *      index     - reserved multicast LSB byte, 0x00~0x2F is available value
+ *      rmacfg     - type of RMA for trapping frame type setting
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK         - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_RMA_ADDR - Invalid RMA address index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicRma(rtk_uint32 index, rtl8367c_rma_t* pRmacfg)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+
+    if(index > RTL8367C_RMAMAX)
+        return RT_ERR_RMA_ADDR;
+
+    if( (index >= 0x4 && index <= 0x7) || (index >= 0x9 && index <= 0x0C) || (0x0F == index))
+        index = 0x04;
+    else if((index >= 0x13 && index <= 0x17) || (0x19 == index) || (index >= 0x1B && index <= 0x1f))
+        index = 0x13;
+    else if(index >= 0x22 && index <= 0x2F)
+        index = 0x22;
+
+    retVal = rtl8367c_getAsicReg(RTL8367C_REG_RMA_CTRL00+index, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    pRmacfg->operation = ((regData >> 7) & 0x0003);
+    pRmacfg->discard_storm_filter = ((regData >> 6) & 0x0001);
+    pRmacfg->trap_priority = ((regData >> 3) & 0x0007);
+    pRmacfg->keep_format = ((regData >> 2) & 0x0001);
+    pRmacfg->vlan_leaky = ((regData >> 1) & 0x0001);
+    pRmacfg->portiso_leaky = (regData & 0x0001);
+
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_RMA_CTRL00, RTL8367C_TRAP_PRIORITY_MASK, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    pRmacfg->trap_priority = regData;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicRmaCdp
+ * Description:
+ *      Set CDP(Cisco Discovery Protocol) for CPU trapping
+ * Input:
+ *      pRmacfg     - type of RMA for trapping frame type setting
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK         - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_RMA_ADDR - Invalid RMA address index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicRmaCdp(rtl8367c_rma_t* pRmacfg)
+{
+    rtk_uint32 regData = 0;
+    ret_t retVal;
+
+    if(pRmacfg->operation >= RMAOP_END)
+        return RT_ERR_RMA_ACTION;
+
+    if(pRmacfg->trap_priority > RTL8367C_PRIMAX)
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    regData |= (pRmacfg->portiso_leaky & 0x0001);
+    regData |= ((pRmacfg->vlan_leaky & 0x0001) << 1);
+    regData |= ((pRmacfg->keep_format & 0x0001) << 2);
+    regData |= ((pRmacfg->trap_priority & 0x0007) << 3);
+    regData |= ((pRmacfg->discard_storm_filter & 0x0001) << 6);
+    regData |= ((pRmacfg->operation & 0x0003) << 7);
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_RMA_CTRL00, RTL8367C_TRAP_PRIORITY_MASK, pRmacfg->trap_priority);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return rtl8367c_setAsicReg(RTL8367C_REG_RMA_CTRL_CDP, regData);
+}
+/* Function Name:
+ *      rtl8367c_getAsicRmaCdp
+ * Description:
+ *      Get CDP(Cisco Discovery Protocol) for CPU trapping
+ * Input:
+ *      None
+ * Output:
+ *      pRmacfg     - type of RMA for trapping frame type setting
+ * Return:
+ *      RT_ERR_OK         - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_RMA_ADDR - Invalid RMA address index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicRmaCdp(rtl8367c_rma_t* pRmacfg)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+
+    retVal = rtl8367c_getAsicReg(RTL8367C_REG_RMA_CTRL_CDP, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    pRmacfg->operation = ((regData >> 7) & 0x0003);
+    pRmacfg->discard_storm_filter = ((regData >> 6) & 0x0001);
+    pRmacfg->trap_priority = ((regData >> 3) & 0x0007);
+    pRmacfg->keep_format = ((regData >> 2) & 0x0001);
+    pRmacfg->vlan_leaky = ((regData >> 1) & 0x0001);
+    pRmacfg->portiso_leaky = (regData & 0x0001);
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_RMA_CTRL00, RTL8367C_TRAP_PRIORITY_MASK, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    pRmacfg->trap_priority = regData;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicRmaCsstp
+ * Description:
+ *      Set CSSTP(Cisco Shared Spanning Tree Protocol) for CPU trapping
+ * Input:
+ *      pRmacfg     - type of RMA for trapping frame type setting
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK         - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_RMA_ADDR - Invalid RMA address index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicRmaCsstp(rtl8367c_rma_t* pRmacfg)
+{
+    rtk_uint32 regData = 0;
+    ret_t retVal;
+
+    if(pRmacfg->operation >= RMAOP_END)
+        return RT_ERR_RMA_ACTION;
+
+    if(pRmacfg->trap_priority > RTL8367C_PRIMAX)
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    regData |= (pRmacfg->portiso_leaky & 0x0001);
+    regData |= ((pRmacfg->vlan_leaky & 0x0001) << 1);
+    regData |= ((pRmacfg->keep_format & 0x0001) << 2);
+    regData |= ((pRmacfg->trap_priority & 0x0007) << 3);
+    regData |= ((pRmacfg->discard_storm_filter & 0x0001) << 6);
+    regData |= ((pRmacfg->operation & 0x0003) << 7);
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_RMA_CTRL00, RTL8367C_TRAP_PRIORITY_MASK, pRmacfg->trap_priority);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return rtl8367c_setAsicReg(RTL8367C_REG_RMA_CTRL_CSSTP, regData);
+}
+/* Function Name:
+ *      rtl8367c_getAsicRmaCsstp
+ * Description:
+ *      Get CSSTP(Cisco Shared Spanning Tree Protocol) for CPU trapping
+ * Input:
+ *      None
+ * Output:
+ *      pRmacfg     - type of RMA for trapping frame type setting
+ * Return:
+ *      RT_ERR_OK         - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_RMA_ADDR - Invalid RMA address index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicRmaCsstp(rtl8367c_rma_t* pRmacfg)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+
+    retVal = rtl8367c_getAsicReg(RTL8367C_REG_RMA_CTRL_CSSTP, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    pRmacfg->operation = ((regData >> 7) & 0x0003);
+    pRmacfg->discard_storm_filter = ((regData >> 6) & 0x0001);
+    pRmacfg->trap_priority = ((regData >> 3) & 0x0007);
+    pRmacfg->keep_format = ((regData >> 2) & 0x0001);
+    pRmacfg->vlan_leaky = ((regData >> 1) & 0x0001);
+    pRmacfg->portiso_leaky = (regData & 0x0001);
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_RMA_CTRL00, RTL8367C_TRAP_PRIORITY_MASK, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    pRmacfg->trap_priority = regData;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicRmaLldp
+ * Description:
+ *      Set LLDP for CPU trapping
+ * Input:
+ *      pRmacfg     - type of RMA for trapping frame type setting
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK         - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_RMA_ADDR - Invalid RMA address index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicRmaLldp(rtk_uint32 enabled, rtl8367c_rma_t* pRmacfg)
+{
+    rtk_uint32 regData = 0;
+    ret_t retVal;
+
+    if(enabled > 1)
+        return RT_ERR_ENABLE;
+
+    if(pRmacfg->operation >= RMAOP_END)
+        return RT_ERR_RMA_ACTION;
+
+    if(pRmacfg->trap_priority > RTL8367C_PRIMAX)
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_RMA_LLDP_EN, RTL8367C_RMA_LLDP_EN_OFFSET,enabled);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    regData |= (pRmacfg->portiso_leaky & 0x0001);
+    regData |= ((pRmacfg->vlan_leaky & 0x0001) << 1);
+    regData |= ((pRmacfg->keep_format & 0x0001) << 2);
+    regData |= ((pRmacfg->trap_priority & 0x0007) << 3);
+    regData |= ((pRmacfg->discard_storm_filter & 0x0001) << 6);
+    regData |= ((pRmacfg->operation & 0x0003) << 7);
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_RMA_CTRL00, RTL8367C_TRAP_PRIORITY_MASK, pRmacfg->trap_priority);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return rtl8367c_setAsicReg(RTL8367C_REG_RMA_CTRL_LLDP, regData);
+}
+/* Function Name:
+ *      rtl8367c_getAsicRmaLldp
+ * Description:
+ *      Get LLDP for CPU trapping
+ * Input:
+ *      None
+ * Output:
+ *      pRmacfg     - type of RMA for trapping frame type setting
+ * Return:
+ *      RT_ERR_OK         - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_RMA_ADDR - Invalid RMA address index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicRmaLldp(rtk_uint32 *pEnabled, rtl8367c_rma_t* pRmacfg)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+
+    retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_RMA_LLDP_EN, RTL8367C_RMA_LLDP_EN_OFFSET,pEnabled);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_getAsicReg(RTL8367C_REG_RMA_CTRL_LLDP, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    pRmacfg->operation = ((regData >> 7) & 0x0003);
+    pRmacfg->discard_storm_filter = ((regData >> 6) & 0x0001);
+    pRmacfg->trap_priority = ((regData >> 3) & 0x0007);
+    pRmacfg->keep_format = ((regData >> 2) & 0x0001);
+    pRmacfg->vlan_leaky = ((regData >> 1) & 0x0001);
+    pRmacfg->portiso_leaky = (regData & 0x0001);
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_RMA_CTRL00, RTL8367C_TRAP_PRIORITY_MASK, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    pRmacfg->trap_priority = regData;
+
+    return RT_ERR_OK;
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_rma.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_rma.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_rma.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_rma.h	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,40 @@
+#ifndef _RTL8367C_ASICDRV_RMA_H_
+#define _RTL8367C_ASICDRV_RMA_H_
+
+#include <rtl8367c_asicdrv.h>
+
+#define RTL8367C_RMAMAX                     0x2F
+
+enum RTL8367C_RMAOP
+{
+    RMAOP_FORWARD = 0,
+    RMAOP_TRAP_TO_CPU,
+    RMAOP_DROP,
+    RMAOP_FORWARD_EXCLUDE_CPU,
+    RMAOP_END
+};
+
+
+typedef struct  rtl8367c_rma_s{
+
+    rtk_uint16 operation;
+    rtk_uint16 discard_storm_filter;
+    rtk_uint16 trap_priority;
+    rtk_uint16 keep_format;
+    rtk_uint16 vlan_leaky;
+    rtk_uint16 portiso_leaky;
+
+}rtl8367c_rma_t;
+
+
+extern ret_t rtl8367c_setAsicRma(rtk_uint32 index, rtl8367c_rma_t* pRmacfg);
+extern ret_t rtl8367c_getAsicRma(rtk_uint32 index, rtl8367c_rma_t* pRmacfg);
+extern ret_t rtl8367c_setAsicRmaCdp(rtl8367c_rma_t* pRmacfg);
+extern ret_t rtl8367c_getAsicRmaCdp(rtl8367c_rma_t* pRmacfg);
+extern ret_t rtl8367c_setAsicRmaCsstp(rtl8367c_rma_t* pRmacfg);
+extern ret_t rtl8367c_getAsicRmaCsstp(rtl8367c_rma_t* pRmacfg);
+extern ret_t rtl8367c_setAsicRmaLldp(rtk_uint32 enabled, rtl8367c_rma_t* pRmacfg);
+extern ret_t rtl8367c_getAsicRmaLldp(rtk_uint32 *pEnabled, rtl8367c_rma_t* pRmacfg);
+
+#endif /*#ifndef _RTL8367C_ASICDRV_RMA_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_scheduling.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_scheduling.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_scheduling.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_scheduling.c	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,527 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 76306 $
+ * $Date: 2017-03-08 15:13:58 +0800 (é€±ä¸‰, 08 ä¸‰æœˆ 2017) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature : Packet Scheduling related functions
+ *
+ */
+
+#include <rtl8367c_asicdrv_scheduling.h>
+/* Function Name:
+ *      rtl8367c_setAsicLeakyBucketParameter
+ * Description:
+ *      Set Leaky Bucket Paramters
+ * Input:
+ *      tick    - Tick is used for time slot size unit
+ *      token   - Token is used for adding budget in each time slot
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_TICK     - Invalid TICK
+ *      RT_ERR_TOKEN    - Invalid TOKEN
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicLeakyBucketParameter(rtk_uint32 tick, rtk_uint32 token)
+{
+    ret_t retVal;
+
+    if(tick > 0xFF)
+        return RT_ERR_TICK;
+
+    if(token > 0xFF)
+        return RT_ERR_TOKEN;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_LEAKY_BUCKET_TICK_REG, RTL8367C_LEAKY_BUCKET_TICK_MASK, tick);
+
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_LEAKY_BUCKET_TOKEN_REG, RTL8367C_LEAKY_BUCKET_TOKEN_MASK, token);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicLeakyBucketParameter
+ * Description:
+ *      Get Leaky Bucket Paramters
+ * Input:
+ *      tick    - Tick is used for time slot size unit
+ *      token   - Token is used for adding budget in each time slot
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicLeakyBucketParameter(rtk_uint32 *tick, rtk_uint32 *token)
+{
+    ret_t retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_LEAKY_BUCKET_TICK_REG, RTL8367C_LEAKY_BUCKET_TICK_MASK, tick);
+
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_LEAKY_BUCKET_TOKEN_REG, RTL8367C_LEAKY_BUCKET_TOKEN_MASK, token);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_setAsicAprMeter
+ * Description:
+ *      Set per-port per-queue APR shared meter index
+ * Input:
+ *      port    - Physical port number (0~10)
+ *      qid     - Queue id
+ *      apridx  - dedicated shared meter index for APR (0~7)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number
+ *      RT_ERR_QUEUE_ID         - Invalid queue id
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicAprMeter(rtk_uint32 port, rtk_uint32 qid, rtk_uint32 apridx)
+{
+    ret_t retVal;
+    rtk_uint32 regAddr;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(qid > RTL8367C_QIDMAX)
+        return RT_ERR_QUEUE_ID;
+
+    if(apridx > RTL8367C_PORT_QUEUE_METER_INDEX_MAX)
+        return RT_ERR_FILTER_METER_ID;
+
+    if(port < 8)
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_SCHEDULE_PORT_APR_METER_REG(port, qid), RTL8367C_SCHEDULE_PORT_APR_METER_MASK(qid), apridx);
+    else {
+        regAddr = RTL8367C_REG_SCHEDULE_PORT8_APR_METER_CTRL0 + ((port-8) << 1) + (qid / 5);
+        retVal = rtl8367c_setAsicRegBits(regAddr, RTL8367C_SCHEDULE_PORT_APR_METER_MASK(qid), apridx);
+    }
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicAprMeter
+ * Description:
+ *      Get per-port per-queue APR shared meter index
+ * Input:
+ *      port    - Physical port number (0~10)
+ *      qid     - Queue id
+ *      apridx  - dedicated shared meter index for APR (0~7)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ *      RT_ERR_QUEUE_ID - Invalid queue id
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicAprMeter(rtk_uint32 port, rtk_uint32 qid, rtk_uint32 *apridx)
+{
+    ret_t retVal;
+    rtk_uint32 regAddr;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(qid > RTL8367C_QIDMAX)
+        return RT_ERR_QUEUE_ID;
+
+    if(port < 8)
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_SCHEDULE_PORT_APR_METER_REG(port, qid), RTL8367C_SCHEDULE_PORT_APR_METER_MASK(qid), apridx);
+    else {
+        regAddr = RTL8367C_REG_SCHEDULE_PORT8_APR_METER_CTRL0 + ((port-8) << 1) + (qid / 5);
+        retVal = rtl8367c_getAsicRegBits(regAddr, RTL8367C_SCHEDULE_PORT_APR_METER_MASK(qid), apridx);
+    }
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_setAsicAprEnable
+ * Description:
+ *      Set per-port APR enable
+ * Input:
+ *      port        - Physical port number (0~7)
+ *      aprEnable   - APR enable seting 1:enable 0:disable
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicAprEnable(rtk_uint32 port, rtk_uint32 aprEnable)
+{
+    ret_t retVal;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    retVal = rtl8367c_setAsicRegBit(RTL8367C_SCHEDULE_APR_CTRL_REG, RTL8367C_SCHEDULE_APR_CTRL_OFFSET(port), aprEnable);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicAprEnable
+ * Description:
+ *      Get per-port APR enable
+ * Input:
+ *      port        - Physical port number (0~7)
+ *      aprEnable   - APR enable seting 1:enable 0:disable
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicAprEnable(rtk_uint32 port, rtk_uint32 *aprEnable)
+{
+    ret_t retVal;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    retVal = rtl8367c_getAsicRegBit(RTL8367C_SCHEDULE_APR_CTRL_REG, RTL8367C_SCHEDULE_APR_CTRL_OFFSET(port), aprEnable);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_setAsicWFQWeight
+ * Description:
+ *      Set weight  of a queue
+ * Input:
+ *      port    - Physical port number (0~10)
+ *      qid     - The queue ID wanted to set
+ *      qWeight - The weight value wanted to set (valid:0~127)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number
+ *      RT_ERR_QUEUE_ID         - Invalid queue id
+ *      RT_ERR_QOS_QUEUE_WEIGHT - Invalid queue weight
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicWFQWeight(rtk_uint32 port, rtk_uint32 qid, rtk_uint32 qWeight)
+{
+    ret_t retVal;
+
+    /* Invalid input parameter */
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(qid > RTL8367C_QIDMAX)
+        return RT_ERR_QUEUE_ID;
+
+    if(qWeight > RTL8367C_QWEIGHTMAX && qid > 0)
+        return RT_ERR_QOS_QUEUE_WEIGHT;
+
+    retVal = rtl8367c_setAsicReg(RTL8367C_SCHEDULE_PORT_QUEUE_WFQ_WEIGHT_REG(port, qid), qWeight);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicWFQWeight
+ * Description:
+ *      Get weight  of a queue
+ * Input:
+ *      port    - Physical port number (0~10)
+ *      qid     - The queue ID wanted to set
+ *      qWeight - The weight value wanted to set (valid:0~127)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number
+ *      RT_ERR_QUEUE_ID         - Invalid queue id
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicWFQWeight(rtk_uint32 port, rtk_uint32 qid, rtk_uint32 *qWeight)
+{
+    ret_t retVal;
+
+
+    /* Invalid input parameter */
+    if(port  > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(qid > RTL8367C_QIDMAX)
+        return RT_ERR_QUEUE_ID;
+
+
+    retVal = rtl8367c_getAsicReg(RTL8367C_SCHEDULE_PORT_QUEUE_WFQ_WEIGHT_REG(port, qid), qWeight);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_setAsicWFQBurstSize
+ * Description:
+ *      Set WFQ leaky bucket burst size
+ * Input:
+ *      burstsize   - Leaky bucket burst size, unit byte
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicWFQBurstSize(rtk_uint32 burstsize)
+{
+    ret_t retVal;
+
+    retVal = rtl8367c_setAsicReg(RTL8367C_SCHEDULE_WFQ_BURST_SIZE_REG, burstsize);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicWFQBurstSize
+ * Description:
+ *      Get WFQ leaky bucket burst size
+ * Input:
+ *      burstsize   - Leaky bucket burst size, unit byte
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicWFQBurstSize(rtk_uint32 *burstsize)
+{
+    ret_t retVal;
+
+    retVal = rtl8367c_getAsicReg(RTL8367C_SCHEDULE_WFQ_BURST_SIZE_REG, burstsize);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_setAsicQueueType
+ * Description:
+ *      Set type of a queue
+ * Input:
+ *      port        - Physical port number (0~10)
+ *      qid         - The queue ID wanted to set
+ *      queueType   - The specified queue type. 0b0: Strict priority, 0b1: WFQ
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ *      RT_ERR_QUEUE_ID - Invalid queue id
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicQueueType(rtk_uint32 port, rtk_uint32 qid, rtk_uint32 queueType)
+{
+    ret_t retVal;
+
+    /* Invalid input parameter */
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(qid > RTL8367C_QIDMAX)
+        return RT_ERR_QUEUE_ID;
+
+    /* Set Related Registers */
+    retVal = rtl8367c_setAsicRegBit(RTL8367C_SCHEDULE_QUEUE_TYPE_REG(port), RTL8367C_SCHEDULE_QUEUE_TYPE_OFFSET(port, qid),queueType);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicQueueType
+ * Description:
+ *      Get type of a queue
+ * Input:
+ *      port        - Physical port number (0~7)
+ *      qid         - The queue ID wanted to set
+ *      queueType   - The specified queue type. 0b0: Strict priority, 0b1: WFQ
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ *      RT_ERR_QUEUE_ID - Invalid queue id
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicQueueType(rtk_uint32 port, rtk_uint32 qid, rtk_uint32 *queueType)
+{
+    ret_t retVal;
+
+    /* Invalid input parameter */
+    if(port  > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(qid > RTL8367C_QIDMAX)
+        return RT_ERR_QUEUE_ID;
+
+    retVal = rtl8367c_getAsicRegBit(RTL8367C_SCHEDULE_QUEUE_TYPE_REG(port), RTL8367C_SCHEDULE_QUEUE_TYPE_OFFSET(port, qid),queueType);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_setAsicPortEgressRate
+ * Description:
+ *      Set per-port egress rate
+ * Input:
+ *      port        - Physical port number (0~10)
+ *      rate        - Egress rate
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ *      RT_ERR_QOS_EBW_RATE - Invalid bandwidth/rate
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortEgressRate(rtk_uint32 port, rtk_uint32 rate)
+{
+    ret_t retVal;
+    rtk_uint32 regAddr, regData;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(rate > RTL8367C_QOS_GRANULARTY_MAX)
+        return RT_ERR_QOS_EBW_RATE;
+
+    regAddr = RTL8367C_PORT_EGRESSBW_LSB_REG(port);
+    regData = RTL8367C_QOS_GRANULARTY_LSB_MASK & rate;
+
+    retVal = rtl8367c_setAsicReg(regAddr, regData);
+
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    regAddr = RTL8367C_PORT_EGRESSBW_MSB_REG(port);
+    regData = (RTL8367C_QOS_GRANULARTY_MSB_MASK & rate) >> RTL8367C_QOS_GRANULARTY_MSB_OFFSET;
+
+    retVal = rtl8367c_setAsicRegBits(regAddr, RTL8367C_PORT6_EGRESSBW_CTRL1_MASK, regData);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortEgressRate
+ * Description:
+ *      Get per-port egress rate
+ * Input:
+ *      port        - Physical port number (0~10)
+ *      rate        - Egress rate
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortEgressRate(rtk_uint32 port, rtk_uint32 *rate)
+{
+    ret_t retVal;
+    rtk_uint32 regAddr, regData,regData2;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    regAddr = RTL8367C_PORT_EGRESSBW_LSB_REG(port);
+
+    retVal = rtl8367c_getAsicReg(regAddr, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    regAddr = RTL8367C_PORT_EGRESSBW_MSB_REG(port);
+    retVal = rtl8367c_getAsicRegBits(regAddr, RTL8367C_PORT6_EGRESSBW_CTRL1_MASK, &regData2);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    *rate = regData | (regData2 << RTL8367C_QOS_GRANULARTY_MSB_OFFSET);
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicPortEgressRateIfg
+ * Description:
+ *      Set per-port egress rate calculate include/exclude IFG
+ * Input:
+ *      ifg     - 1:include IFG 0:exclude IFG
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortEgressRateIfg(rtk_uint32 ifg)
+{
+    ret_t retVal;
+
+    retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_SCHEDULE_WFQ_CTRL, RTL8367C_SCHEDULE_WFQ_CTRL_OFFSET, ifg);
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortEgressRateIfg
+ * Description:
+ *      Get per-port egress rate calculate include/exclude IFG
+ * Input:
+ *      ifg     - 1:include IFG 0:exclude IFG
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortEgressRateIfg(rtk_uint32 *ifg)
+{
+    ret_t retVal;
+
+    retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_SCHEDULE_WFQ_CTRL, RTL8367C_SCHEDULE_WFQ_CTRL_OFFSET, ifg);
+
+    return retVal;
+}
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_scheduling.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_scheduling.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_scheduling.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_scheduling.h	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,41 @@
+#ifndef _RTL8367C_ASICDRV_SCHEDULING_H_
+#define _RTL8367C_ASICDRV_SCHEDULING_H_
+
+#include <rtl8367c_asicdrv.h>
+
+#define RTL8367C_QWEIGHTMAX    0x7F
+#define RTL8367C_PORT_QUEUE_METER_INDEX_MAX    7
+
+/* enum for queue type */
+enum QUEUETYPE
+{
+    QTYPE_STRICT = 0,
+    QTYPE_WFQ,
+};
+extern ret_t rtl8367c_setAsicLeakyBucketParameter(rtk_uint32 tick, rtk_uint32 token);
+extern ret_t rtl8367c_getAsicLeakyBucketParameter(rtk_uint32 *tick, rtk_uint32 *token);
+extern ret_t rtl8367c_setAsicAprMeter(rtk_uint32 port, rtk_uint32 qid, rtk_uint32 apridx);
+extern ret_t rtl8367c_getAsicAprMeter(rtk_uint32 port, rtk_uint32 qid, rtk_uint32 *apridx);
+extern ret_t rtl8367c_setAsicPprMeter(rtk_uint32 port, rtk_uint32 qid, rtk_uint32 ppridx);
+extern ret_t rtl8367c_getAsicPprMeter(rtk_uint32 port, rtk_uint32 qid, rtk_uint32 *ppridx);
+extern ret_t rtl8367c_setAsicAprEnable(rtk_uint32 port, rtk_uint32 aprEnable);
+extern ret_t rtl8367c_getAsicAprEnable(rtk_uint32 port, rtk_uint32 *aprEnable);
+extern ret_t rtl8367c_setAsicPprEnable(rtk_uint32 port, rtk_uint32 pprEnable);
+extern ret_t rtl8367c_getAsicPprEnable(rtk_uint32 port, rtk_uint32 *pprEnable);
+
+extern ret_t rtl8367c_setAsicWFQWeight(rtk_uint32, rtk_uint32 queueid, rtk_uint32 weight );
+extern ret_t rtl8367c_getAsicWFQWeight(rtk_uint32, rtk_uint32 queueid, rtk_uint32 *weight );
+extern ret_t rtl8367c_setAsicWFQBurstSize(rtk_uint32 burstsize);
+extern ret_t rtl8367c_getAsicWFQBurstSize(rtk_uint32 *burstsize);
+
+extern ret_t rtl8367c_setAsicQueueType(rtk_uint32 port, rtk_uint32 qid, rtk_uint32 queueType);
+extern ret_t rtl8367c_getAsicQueueType(rtk_uint32 port, rtk_uint32 qid, rtk_uint32 *queueType);
+extern ret_t rtl8367c_setAsicQueueRate(rtk_uint32 port, rtk_uint32 qid, rtk_uint32 ppridx, rtk_uint32 apridx );
+extern ret_t rtl8367c_getAsicQueueRate(rtk_uint32 port, rtk_uint32 qid, rtk_uint32* ppridx, rtk_uint32* apridx );
+extern ret_t rtl8367c_setAsicPortEgressRate(rtk_uint32 port, rtk_uint32 rate);
+extern ret_t rtl8367c_getAsicPortEgressRate(rtk_uint32 port, rtk_uint32 *rate);
+extern ret_t rtl8367c_setAsicPortEgressRateIfg(rtk_uint32 ifg);
+extern ret_t rtl8367c_getAsicPortEgressRateIfg(rtk_uint32 *ifg);
+
+#endif /*_RTL8367C_ASICDRV_SCHEDULING_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_storm.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_storm.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_storm.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_storm.c	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,853 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 76306 $
+ * $Date: 2017-03-08 15:13:58 +0800 (é€±ä¸‰, 08 ä¸‰æœˆ 2017) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature : Storm control filtering related functions
+ *
+ */
+
+#include <rtl8367c_asicdrv_storm.h>
+/* Function Name:
+ *      rtl8367c_setAsicStormFilterBroadcastEnable
+ * Description:
+ *      Set per-port broadcast storm filter enable/disable
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      enabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicStormFilterBroadcastEnable(rtk_uint32 port, rtk_uint32 enabled)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_STORM_BCAST_REG, port, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicStormFilterBroadcastEnable
+ * Description:
+ *      Get per-port broadcast storm filter enable/disable
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      pEnabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicStormFilterBroadcastEnable(rtk_uint32 port, rtk_uint32 *pEnabled)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_STORM_BCAST_REG, port, pEnabled);
+}
+/* Function Name:
+ *      rtl8367c_setAsicStormFilterBroadcastMeter
+ * Description:
+ *      Set per-port broadcast storm filter meter
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      meter   - meter index (0~31)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicStormFilterBroadcastMeter(rtk_uint32 port, rtk_uint32 meter)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    if(meter > RTL8367C_METERMAX)
+        return RT_ERR_FILTER_METER_ID;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_STORM_BCAST_METER_CTRL_REG(port), RTL8367C_STORM_BCAST_METER_CTRL_MASK(port), meter);
+}
+/* Function Name:
+ *      rtl8367c_getAsicStormFilterBroadcastMeter
+ * Description:
+ *      Get per-port broadcast storm filter meter
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      pMeter  - meter index (0~31)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicStormFilterBroadcastMeter(rtk_uint32 port, rtk_uint32 *pMeter)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBits(RTL8367C_STORM_BCAST_METER_CTRL_REG(port), RTL8367C_STORM_BCAST_METER_CTRL_MASK(port), pMeter);
+}
+/* Function Name:
+ *      rtl8367c_setAsicStormFilterMulticastEnable
+ * Description:
+ *      Set per-port multicast storm filter enable/disable
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      enabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicStormFilterMulticastEnable(rtk_uint32 port, rtk_uint32 enabled)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_STORM_MCAST_REG, port, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicStormFilterMulticastEnable
+ * Description:
+ *      Get per-port multicast storm filter enable/disable
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      pEnabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicStormFilterMulticastEnable(rtk_uint32 port, rtk_uint32 *pEnabled)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_STORM_MCAST_REG, port, pEnabled);
+}
+/* Function Name:
+ *      rtl8367c_setAsicStormFilterMulticastMeter
+ * Description:
+ *      Set per-port multicast storm filter meter
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      meter   - meter index (0~31)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicStormFilterMulticastMeter(rtk_uint32 port, rtk_uint32 meter)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    if(meter > RTL8367C_METERMAX)
+        return RT_ERR_FILTER_METER_ID;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_STORM_MCAST_METER_CTRL_REG(port), RTL8367C_STORM_MCAST_METER_CTRL_MASK(port), meter);
+}
+/* Function Name:
+ *      rtl8367c_getAsicStormFilterMulticastMeter
+ * Description:
+ *      Get per-port multicast storm filter meter
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      pMeter  - meter index (0~31)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicStormFilterMulticastMeter(rtk_uint32 port, rtk_uint32 *pMeter)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBits(RTL8367C_STORM_MCAST_METER_CTRL_REG(port), RTL8367C_STORM_MCAST_METER_CTRL_MASK(port), pMeter);
+}
+/* Function Name:
+ *      rtl8367c_setAsicStormFilterUnknownMulticastEnable
+ * Description:
+ *      Set per-port unknown multicast storm filter enable/disable
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      enabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicStormFilterUnknownMulticastEnable(rtk_uint32 port, rtk_uint32 enabled)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_STORM_UNKNOWN_MCAST_REG, port, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicStormFilterUnknownMulticastEnable
+ * Description:
+ *      Get per-port unknown multicast storm filter enable/disable
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      pEnabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicStormFilterUnknownMulticastEnable(rtk_uint32 port, rtk_uint32 *pEnabled)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_STORM_UNKNOWN_MCAST_REG, port, pEnabled);
+}
+/* Function Name:
+ *      rtl8367c_setAsicStormFilterUnknownMulticastMeter
+ * Description:
+ *      Set per-port unknown multicast storm filter meter
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      meter   - meter index (0~31)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicStormFilterUnknownMulticastMeter(rtk_uint32 port, rtk_uint32 meter)
+{
+    ret_t retVal;
+
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    if(meter > RTL8367C_METERMAX)
+        return RT_ERR_FILTER_METER_ID;
+
+    if(port < 8)
+    {
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_STORM_UNMC_METER_CTRL_REG(port), RTL8367C_STORM_UNMC_METER_CTRL_MASK(port), meter);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_STORM_UNMC_METER_CTRL4 + ((port - 8) >> 1), RTL8367C_STORM_UNMC_METER_CTRL_MASK(port), meter);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicStormFilterUnknownMulticastMeter
+ * Description:
+ *      Get per-port unknown multicast storm filter meter
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      pMeter  - meter index (0~31)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicStormFilterUnknownMulticastMeter(rtk_uint32 port, rtk_uint32 *pMeter)
+{
+    ret_t retVal;
+
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    if(port < 8)
+    {
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_STORM_UNMC_METER_CTRL_REG(port), RTL8367C_STORM_UNMC_METER_CTRL_MASK(port), pMeter);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_STORM_UNMC_METER_CTRL4 + ((port - 8) >> 1), RTL8367C_STORM_UNMC_METER_CTRL_MASK(port), pMeter);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicStormFilterUnknownUnicastEnable
+ * Description:
+ *      Set per-port unknown unicast storm filter enable/disable
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      enabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicStormFilterUnknownUnicastEnable(rtk_uint32 port, rtk_uint32 enabled)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_STORM_UNKNOWN_UCAST_REG, port, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicStormFilterUnknownUnicastEnable
+ * Description:
+ *      get per-port unknown unicast storm filter enable/disable
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      pEnabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicStormFilterUnknownUnicastEnable(rtk_uint32 port, rtk_uint32 *pEnabled)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_STORM_UNKNOWN_UCAST_REG, port, pEnabled);
+}
+/* Function Name:
+ *      rtl8367c_setAsicStormFilterUnknownUnicastMeter
+ * Description:
+ *      Set per-port unknown unicast storm filter meter
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      meter   - meter index (0~31)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicStormFilterUnknownUnicastMeter(rtk_uint32 port, rtk_uint32 meter)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    if(meter > RTL8367C_METERMAX)
+        return RT_ERR_FILTER_METER_ID;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_STORM_UNDA_METER_CTRL_REG(port), RTL8367C_STORM_UNDA_METER_CTRL_MASK(port), meter);
+}
+/* Function Name:
+ *      rtl8367c_getAsicStormFilterUnknownUnicastMeter
+ * Description:
+ *      Get per-port unknown unicast storm filter meter
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      pMeter  - meter index (0~31)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicStormFilterUnknownUnicastMeter(rtk_uint32 port, rtk_uint32 *pMeter)
+{
+    if(port >= RTL8367C_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBits(RTL8367C_STORM_UNDA_METER_CTRL_REG(port), RTL8367C_STORM_UNDA_METER_CTRL_MASK(port), pMeter);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicStormFilterExtBroadcastMeter
+ * Description:
+ *      Set extension broadcast storm filter meter
+ * Input:
+ *      meter   - meter index (0~31)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicStormFilterExtBroadcastMeter(rtk_uint32 meter)
+{
+    if(meter > RTL8367C_METERMAX)
+        return RT_ERR_FILTER_METER_ID;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_STORM_EXT_MTRIDX_CFG0, RTL8367C_BC_STORM_EXT_METERIDX_MASK, meter);
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicStormFilterExtBroadcastMeter
+ * Description:
+ *      get extension broadcast storm filter meter
+ * Input:
+ *      None
+ * Output:
+ *      pMeter  - meter index (0~31)
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_NULL_POINTER     - Invalid meter index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicStormFilterExtBroadcastMeter(rtk_uint32 *pMeter)
+{
+    if(NULL == pMeter)
+        return RT_ERR_NULL_POINTER;
+
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_STORM_EXT_MTRIDX_CFG0, RTL8367C_BC_STORM_EXT_METERIDX_MASK, pMeter);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicStormFilterExtMulticastMeter
+ * Description:
+ *      Set extension multicast storm filter meter
+ * Input:
+ *      meter   - meter index (0~31)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicStormFilterExtMulticastMeter(rtk_uint32 meter)
+{
+    if(meter > RTL8367C_METERMAX)
+        return RT_ERR_FILTER_METER_ID;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_STORM_EXT_MTRIDX_CFG0, RTL8367C_MC_STORM_EXT_METERIDX_MASK, meter);
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicStormFilterExtMulticastMeter
+ * Description:
+ *      get extension multicast storm filter meter
+ * Input:
+ *      None
+ * Output:
+ *      pMeter  - meter index (0~31)
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_NULL_POINTER     - Invalid meter index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicStormFilterExtMulticastMeter(rtk_uint32 *pMeter)
+{
+    if(NULL == pMeter)
+        return RT_ERR_NULL_POINTER;
+
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_STORM_EXT_MTRIDX_CFG0, RTL8367C_MC_STORM_EXT_METERIDX_MASK, pMeter);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicStormFilterExtUnknownMulticastMeter
+ * Description:
+ *      Set extension unknown multicast storm filter meter
+ * Input:
+ *      meter   - meter index (0~31)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicStormFilterExtUnknownMulticastMeter(rtk_uint32 meter)
+{
+    if(meter > RTL8367C_METERMAX)
+        return RT_ERR_FILTER_METER_ID;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_STORM_EXT_MTRIDX_CFG1, RTL8367C_UNMC_STORM_EXT_METERIDX_MASK, meter);
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicStormFilterExtUnknownMulticastMeter
+ * Description:
+ *      get extension unknown multicast storm filter meter
+ * Input:
+ *      None
+ * Output:
+ *      pMeter  - meter index (0~31)
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_NULL_POINTER     - Invalid meter index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicStormFilterExtUnknownMulticastMeter(rtk_uint32 *pMeter)
+{
+    if(NULL == pMeter)
+        return RT_ERR_NULL_POINTER;
+
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_STORM_EXT_MTRIDX_CFG1, RTL8367C_UNMC_STORM_EXT_METERIDX_MASK, pMeter);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicStormFilterExtUnknownUnicastMeter
+ * Description:
+ *      Set extension unknown unicast storm filter meter
+ * Input:
+ *      meter   - meter index (0~31)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicStormFilterExtUnknownUnicastMeter(rtk_uint32 meter)
+{
+    if(meter > RTL8367C_METERMAX)
+        return RT_ERR_FILTER_METER_ID;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_STORM_EXT_MTRIDX_CFG1, RTL8367C_UNUC_STORM_EXT_METERIDX_MASK, meter);
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicStormFilterExtUnknownUnicastMeter
+ * Description:
+ *      get extension unknown unicast storm filter meter
+ * Input:
+ *      None
+ * Output:
+ *      pMeter  - meter index (0~31)
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_NULL_POINTER     - Invalid meter index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicStormFilterExtUnknownUnicastMeter(rtk_uint32 *pMeter)
+{
+    if(NULL == pMeter)
+        return RT_ERR_NULL_POINTER;
+
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_STORM_EXT_MTRIDX_CFG1, RTL8367C_UNUC_STORM_EXT_METERIDX_MASK, pMeter);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicStormFilterExtBroadcastEnable
+ * Description:
+ *      Set extension broadcast storm filter state
+ * Input:
+ *      enabled     - state
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicStormFilterExtBroadcastEnable(rtk_uint32 enabled)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_STORM_EXT_CFG, RTL8367C_STORM_BCAST_EXT_EN_OFFSET, enabled);
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicStormFilterExtBroadcastEnable
+ * Description:
+ *      Get extension broadcast storm filter state
+ * Input:
+ *      None
+ * Output:
+ *      pEnabled    - state
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_NULL_POINTER     - Null pointer
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicStormFilterExtBroadcastEnable(rtk_uint32 *pEnabled)
+{
+    if(NULL == pEnabled)
+        return RT_ERR_NULL_POINTER;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_STORM_EXT_CFG, RTL8367C_STORM_BCAST_EXT_EN_OFFSET, pEnabled);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicStormFilterExtMulticastEnable
+ * Description:
+ *      Set extension multicast storm filter state
+ * Input:
+ *      enabled     - state
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicStormFilterExtMulticastEnable(rtk_uint32 enabled)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_STORM_EXT_CFG, RTL8367C_STORM_MCAST_EXT_EN_OFFSET, enabled);
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicStormFilterExtMulticastEnable
+ * Description:
+ *      Get extension multicast storm filter state
+ * Input:
+ *      None
+ * Output:
+ *      pEnabled    - state
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_NULL_POINTER     - Null pointer
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicStormFilterExtMulticastEnable(rtk_uint32 *pEnabled)
+{
+    if(NULL == pEnabled)
+        return RT_ERR_NULL_POINTER;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_STORM_EXT_CFG, RTL8367C_STORM_MCAST_EXT_EN_OFFSET, pEnabled);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicStormFilterExtUnknownMulticastEnable
+ * Description:
+ *      Set extension unknown multicast storm filter state
+ * Input:
+ *      enabled     - state
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicStormFilterExtUnknownMulticastEnable(rtk_uint32 enabled)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_STORM_EXT_CFG, RTL8367C_STORM_UNKNOWN_MCAST_EXT_EN_OFFSET, enabled);
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicStormFilterExtUnknownMulticastEnable
+ * Description:
+ *      Get extension unknown multicast storm filter state
+ * Input:
+ *      None
+ * Output:
+ *      pEnabled    - state
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_NULL_POINTER     - Null pointer
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicStormFilterExtUnknownMulticastEnable(rtk_uint32 *pEnabled)
+{
+    if(NULL == pEnabled)
+        return RT_ERR_NULL_POINTER;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_STORM_EXT_CFG, RTL8367C_STORM_UNKNOWN_MCAST_EXT_EN_OFFSET, pEnabled);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicStormFilterExtUnknownUnicastEnable
+ * Description:
+ *      Set extension unknown unicast storm filter state
+ * Input:
+ *      enabled     - state
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicStormFilterExtUnknownUnicastEnable(rtk_uint32 enabled)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_STORM_EXT_CFG, RTL8367C_STORM_UNKNOWN_UCAST_EXT_EN_OFFSET, enabled);
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicStormFilterExtUnknownUnicastEnable
+ * Description:
+ *      Get extension unknown unicast storm filter state
+ * Input:
+ *      None
+ * Output:
+ *      pEnabled    - state
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_NULL_POINTER     - Null pointer
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicStormFilterExtUnknownUnicastEnable(rtk_uint32 *pEnabled)
+{
+    if(NULL == pEnabled)
+        return RT_ERR_NULL_POINTER;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_STORM_EXT_CFG, RTL8367C_STORM_UNKNOWN_UCAST_EXT_EN_OFFSET, pEnabled);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicStormFilterExtEnablePortMask
+ * Description:
+ *      Set extension storm filter port mask
+ * Input:
+ *      portmask    - port mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicStormFilterExtEnablePortMask(rtk_uint32 portmask)
+{
+    ret_t retVal;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_STORM_EXT_CFG, RTL8367C_STORM_EXT_EN_PORTMASK_MASK, portmask & 0x3FF);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_STORM_EXT_CFG, RTL8367C_STORM_EXT_EN_PORTMASK_EXT_MASK, (portmask >> 10)&1);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicStormFilterExtEnablePortMask
+ * Description:
+ *      Get extension storm filter port mask
+ * Input:
+ *      portmask    - port mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_NULL_POINTER     - Null pointer
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicStormFilterExtEnablePortMask(rtk_uint32 *pPortmask)
+{
+    rtk_uint32 tmpPmsk;
+    ret_t retVal;
+
+    if(NULL == pPortmask)
+        return RT_ERR_NULL_POINTER;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_STORM_EXT_CFG, RTL8367C_STORM_EXT_EN_PORTMASK_MASK, &tmpPmsk);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+    *pPortmask = tmpPmsk & 0x3ff;
+
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_STORM_EXT_CFG, RTL8367C_STORM_EXT_EN_PORTMASK_EXT_MASK, &tmpPmsk);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+    *pPortmask |= (tmpPmsk & 1) << 10;
+
+    return RT_ERR_OK;
+}
+
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_storm.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_storm.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_storm.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_storm.h	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,44 @@
+#ifndef _RTL8367C_ASICDRV_STORM_H_
+#define _RTL8367C_ASICDRV_STORM_H_
+
+#include <rtl8367c_asicdrv.h>
+
+extern ret_t rtl8367c_setAsicStormFilterBroadcastEnable(rtk_uint32 port, rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicStormFilterBroadcastEnable(rtk_uint32 port, rtk_uint32 *pEnabled);
+extern ret_t rtl8367c_setAsicStormFilterBroadcastMeter(rtk_uint32 port, rtk_uint32 meter);
+extern ret_t rtl8367c_getAsicStormFilterBroadcastMeter(rtk_uint32 port, rtk_uint32 *pMeter);
+extern ret_t rtl8367c_setAsicStormFilterMulticastEnable(rtk_uint32 port, rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicStormFilterMulticastEnable(rtk_uint32 port, rtk_uint32 *pEnabled);
+extern ret_t rtl8367c_setAsicStormFilterMulticastMeter(rtk_uint32 port, rtk_uint32 meter);
+extern ret_t rtl8367c_getAsicStormFilterMulticastMeter(rtk_uint32 port, rtk_uint32 *pMeter);
+extern ret_t rtl8367c_setAsicStormFilterUnknownMulticastEnable(rtk_uint32 port, rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicStormFilterUnknownMulticastEnable(rtk_uint32 port, rtk_uint32 *pEnabled);
+extern ret_t rtl8367c_setAsicStormFilterUnknownMulticastMeter(rtk_uint32 port, rtk_uint32 meter);
+extern ret_t rtl8367c_getAsicStormFilterUnknownMulticastMeter(rtk_uint32 port, rtk_uint32 *pMeter);
+extern ret_t rtl8367c_setAsicStormFilterUnknownUnicastEnable(rtk_uint32 port, rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicStormFilterUnknownUnicastEnable(rtk_uint32 port, rtk_uint32 *pEnabled);
+extern ret_t rtl8367c_setAsicStormFilterUnknownUnicastMeter(rtk_uint32 port, rtk_uint32 meter);
+extern ret_t rtl8367c_getAsicStormFilterUnknownUnicastMeter(rtk_uint32 port, rtk_uint32 *pMeter);
+extern ret_t rtl8367c_setAsicStormFilterExtBroadcastMeter(rtk_uint32 meter);
+extern ret_t rtl8367c_getAsicStormFilterExtBroadcastMeter(rtk_uint32 *pMeter);
+extern ret_t rtl8367c_setAsicStormFilterExtMulticastMeter(rtk_uint32 meter);
+extern ret_t rtl8367c_getAsicStormFilterExtMulticastMeter(rtk_uint32 *pMeter);
+extern ret_t rtl8367c_setAsicStormFilterExtUnknownMulticastMeter(rtk_uint32 meter);
+extern ret_t rtl8367c_getAsicStormFilterExtUnknownMulticastMeter(rtk_uint32 *pMeter);
+extern ret_t rtl8367c_setAsicStormFilterExtUnknownUnicastMeter(rtk_uint32 meter);
+extern ret_t rtl8367c_getAsicStormFilterExtUnknownUnicastMeter(rtk_uint32 *pMeter);
+extern ret_t rtl8367c_setAsicStormFilterExtBroadcastEnable(rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicStormFilterExtBroadcastEnable(rtk_uint32 *pEnabled);
+extern ret_t rtl8367c_setAsicStormFilterExtMulticastEnable(rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicStormFilterExtMulticastEnable(rtk_uint32 *pEnabled);
+extern ret_t rtl8367c_setAsicStormFilterExtUnknownMulticastEnable(rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicStormFilterExtUnknownMulticastEnable(rtk_uint32 *pEnabled);
+extern ret_t rtl8367c_setAsicStormFilterExtUnknownUnicastEnable(rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicStormFilterExtUnknownUnicastEnable(rtk_uint32 *pEnabled);
+extern ret_t rtl8367c_setAsicStormFilterExtEnablePortMask(rtk_uint32 portmask);
+extern ret_t rtl8367c_getAsicStormFilterExtEnablePortMask(rtk_uint32 *pPortmask);
+
+
+#endif /*_RTL8367C_ASICDRV_STORM_H_*/
+
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_svlan.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_svlan.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_svlan.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_svlan.c	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,999 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 79623 $
+ * $Date: 2017-06-14 17:15:42 +0800 (é€±ä¸‰, 14 å…­æœˆ 2017) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature : SVLAN related functions
+ *
+ */
+#include <rtl8367c_asicdrv_svlan.h>
+
+#include <string.h>
+
+static void _rtl8367c_svlanConfStUser2Smi( rtl8367c_svlan_memconf_t *pUserSt, rtk_uint16 *pSmiSt)
+{
+    pSmiSt[0] |= (pUserSt->vs_member & 0x00FF);
+    pSmiSt[0] |= (pUserSt->vs_untag & 0x00FF) << 8;
+
+    pSmiSt[1] |= (pUserSt->vs_fid_msti & 0x000F);
+    pSmiSt[1] |= (pUserSt->vs_priority & 0x0007) << 4;
+    pSmiSt[1] |= (pUserSt->vs_force_fid & 0x0001) << 7;
+
+    pSmiSt[2] |= (pUserSt->vs_svid & 0x0FFF);
+    pSmiSt[2] |= (pUserSt->vs_efiden & 0x0001) << 12;
+    pSmiSt[2] |= (pUserSt->vs_efid & 0x0007) << 13;
+
+    pSmiSt[3] |= ((pUserSt->vs_member & 0x0700) >> 8);
+    pSmiSt[3] |= ((pUserSt->vs_untag & 0x0700) >> 8) << 3;
+}
+
+static void _rtl8367c_svlanConfStSmi2User( rtl8367c_svlan_memconf_t *pUserSt, rtk_uint16 *pSmiSt)
+{
+
+    pUserSt->vs_member = (pSmiSt[0] & 0x00FF) | ((pSmiSt[3] & 0x0007) << 8);
+    pUserSt->vs_untag = ((pSmiSt[0] & 0xFF00) >> 8) | (((pSmiSt[3] & 0x0038) >> 3) << 8);
+
+    pUserSt->vs_fid_msti = (pSmiSt[1] & 0x000F);
+    pUserSt->vs_priority = (pSmiSt[1] & 0x0070) >> 4;
+    pUserSt->vs_force_fid = (pSmiSt[1] & 0x0080) >> 7;
+
+    pUserSt->vs_svid = (pSmiSt[2] & 0x0FFF);
+    pUserSt->vs_efiden = (pSmiSt[2] & 0x1000) >> 12;
+    pUserSt->vs_efid = (pSmiSt[2] & 0xE000) >> 13;
+}
+
+static void _rtl8367c_svlanMc2sStUser2Smi(rtl8367c_svlan_mc2s_t *pUserSt, rtk_uint16 *pSmiSt)
+{
+    pSmiSt[0] |= (pUserSt->svidx & 0x003F);
+    pSmiSt[0] |= (pUserSt->format & 0x0001) << 6;
+    pSmiSt[0] |= (pUserSt->valid & 0x0001) << 7;
+
+    pSmiSt[1] = (rtk_uint16)(pUserSt->smask & 0x0000FFFF);
+    pSmiSt[2] = (rtk_uint16)((pUserSt->smask & 0xFFFF0000) >> 16);
+
+    pSmiSt[3] = (rtk_uint16)(pUserSt->sdata & 0x0000FFFF);
+    pSmiSt[4] = (rtk_uint16)((pUserSt->sdata & 0xFFFF0000) >> 16);
+}
+
+static void _rtl8367c_svlanMc2sStSmi2User(rtl8367c_svlan_mc2s_t *pUserSt, rtk_uint16 *pSmiSt)
+{
+    pUserSt->svidx = (pSmiSt[0] & 0x003F);
+    pUserSt->format = (pSmiSt[0] & 0x0040) >> 6;
+    pUserSt->valid = (pSmiSt[0] & 0x0080) >> 7;
+
+    pUserSt->smask = pSmiSt[1] | (pSmiSt[2] << 16);
+    pUserSt->sdata = pSmiSt[3] | (pSmiSt[4] << 16);
+}
+
+static void _rtl8367c_svlanSp2cStUser2Smi(rtl8367c_svlan_s2c_t *pUserSt, rtk_uint16 *pSmiSt)
+{
+    pSmiSt[0] |= (pUserSt->dstport & 0x0007);
+    pSmiSt[0] |= (pUserSt->svidx & 0x003F) << 3;
+    pSmiSt[0] |= ((pUserSt->dstport & 0x0008) >> 3) << 9;
+
+    pSmiSt[1] |= (pUserSt->vid & 0x0FFF);
+    pSmiSt[1] |= (pUserSt->valid & 0x0001) << 12;
+}
+
+static void _rtl8367c_svlanSp2cStSmi2User(rtl8367c_svlan_s2c_t *pUserSt, rtk_uint16 *pSmiSt)
+{
+    pUserSt->dstport = (((pSmiSt[0] & 0x0200) >> 9) << 3) | (pSmiSt[0] & 0x0007);
+    pUserSt->svidx   = (pSmiSt[0] & 0x01F8) >> 3;
+    pUserSt->vid     = (pSmiSt[1] & 0x0FFF);
+    pUserSt->valid   = (pSmiSt[1] & 0x1000) >> 12;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicSvlanUplinkPortMask
+ * Description:
+ *      Set uplink ports mask
+ * Input:
+ *      portMask    - Uplink port mask setting
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicSvlanUplinkPortMask(rtk_uint32 portMask)
+{
+    return rtl8367c_setAsicReg(RTL8367C_REG_SVLAN_UPLINK_PORTMASK, portMask);
+}
+/* Function Name:
+ *      rtl8367c_getAsicSvlanUplinkPortMask
+ * Description:
+ *      Get uplink ports mask
+ * Input:
+ *      pPortmask   - Uplink port mask setting
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicSvlanUplinkPortMask(rtk_uint32* pPortmask)
+{
+    return rtl8367c_getAsicReg(RTL8367C_REG_SVLAN_UPLINK_PORTMASK, pPortmask);
+}
+/* Function Name:
+ *      rtl8367c_setAsicSvlanTpid
+ * Description:
+ *      Set accepted S-VLAN ether type. The default ether type of S-VLAN is 0x88a8
+ * Input:
+ *      protocolType    - Ether type of S-tag frame parsing in uplink ports
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      Ether type of S-tag in 802.1ad is 0x88a8 and there are existed ether type 0x9100 and 0x9200
+ *      for Q-in-Q SLAN design. User can set mathced ether type as service provider supported protocol
+ */
+ret_t rtl8367c_setAsicSvlanTpid(rtk_uint32 protocolType)
+{
+    return rtl8367c_setAsicReg(RTL8367C_REG_VS_TPID, protocolType);
+}
+/* Function Name:
+ *      rtl8367c_getAsicReg
+ * Description:
+ *      Get accepted S-VLAN ether type. The default ether type of S-VLAN is 0x88a8
+ * Input:
+ *      pProtocolType   - Ether type of S-tag frame parsing in uplink ports
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicSvlanTpid(rtk_uint32* pProtocolType)
+{
+    return rtl8367c_getAsicReg(RTL8367C_REG_VS_TPID, pProtocolType);
+}
+/* Function Name:
+ *      rtl8367c_setAsicSvlanPrioritySel
+ * Description:
+ *      Set SVLAN priority field setting
+ * Input:
+ *      priSel  - S-priority assignment method, 0:internal priority 1:C-tag priority 2:using Svlan member configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_INPUT    - Invalid input parameter
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicSvlanPrioritySel(rtk_uint32 priSel)
+{
+    if(priSel >= SPRISEL_END)
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_SVLAN_CFG, RTL8367C_VS_SPRISEL_MASK, priSel);
+}
+/* Function Name:
+ *      rtl8367c_getAsicSvlanPrioritySel
+ * Description:
+ *      Get SVLAN priority field setting
+ * Input:
+ *      pPriSel     - S-priority assignment method, 0:internal priority 1:C-tag priority 2:using Svlan member configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicSvlanPrioritySel(rtk_uint32* pPriSel)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_SVLAN_CFG, RTL8367C_VS_SPRISEL_MASK, pPriSel);
+}
+/* Function Name:
+ *      rtl8367c_setAsicSvlanTrapPriority
+ * Description:
+ *      Set trap to CPU priority assignment
+ * Input:
+ *      priority    - Priority assignment
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicSvlanTrapPriority(rtk_uint32 priority)
+{
+    if(priority > RTL8367C_PRIMAX)
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_QOS_TRAP_PRIORITY0, RTL8367C_SVLAN_PRIOIRTY_MASK, priority);
+}
+/* Function Name:
+ *      rtl8367c_getAsicSvlanTrapPriority
+ * Description:
+ *      Get trap to CPU priority assignment
+ * Input:
+ *      pPriority   - Priority assignment
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicSvlanTrapPriority(rtk_uint32* pPriority)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_QOS_TRAP_PRIORITY0, RTL8367C_SVLAN_PRIOIRTY_MASK, pPriority);
+}
+/* Function Name:
+ *      rtl8367c_setAsicSvlanDefaultVlan
+ * Description:
+ *      Set default egress SVLAN
+ * Input:
+ *      port    - Physical port number (0~10)
+ *      index   - index SVLAN member configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - Success
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port number
+ *      RT_ERR_SVLAN_ENTRY_INDEX    - Invalid SVLAN index parameter
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicSvlanDefaultVlan(rtk_uint32 port, rtk_uint32 index)
+{
+    ret_t retVal;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(index > RTL8367C_SVIDXMAX)
+        return RT_ERR_SVLAN_ENTRY_INDEX;
+
+    if(port < 8){
+        if(port & 1)
+            retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_SVLAN_PORTBASED_SVIDX_CTRL0 + (port >> 1), RTL8367C_VS_PORT1_SVIDX_MASK,index);
+        else
+            retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_SVLAN_PORTBASED_SVIDX_CTRL0 + (port >> 1), RTL8367C_VS_PORT0_SVIDX_MASK,index);
+    }else{
+        switch(port){
+            case 8:
+                retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_SVLAN_PORTBASED_SVIDX_CTRL4, RTL8367C_VS_PORT8_SVIDX_MASK,index);
+                break;
+
+            case 9:
+                retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_SVLAN_PORTBASED_SVIDX_CTRL4, RTL8367C_VS_PORT9_SVIDX_MASK,index);
+                break;
+
+            case 10:
+                retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_SVLAN_PORTBASED_SVIDX_CTRL5, RTL8367C_SVLAN_PORTBASED_SVIDX_CTRL5_MASK,index);
+                break;
+        }
+    }
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicSvlanDefaultVlan
+ * Description:
+ *      Get default egress SVLAN
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      pIndex  - index SVLAN member configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicSvlanDefaultVlan(rtk_uint32 port, rtk_uint32* pIndex)
+{
+    ret_t retVal;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(port < 8){
+        if(port & 1)
+            retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_SVLAN_PORTBASED_SVIDX_CTRL0 + (port >> 1), RTL8367C_VS_PORT1_SVIDX_MASK,pIndex);
+        else
+            retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_SVLAN_PORTBASED_SVIDX_CTRL0 + (port >> 1), RTL8367C_VS_PORT0_SVIDX_MASK,pIndex);
+    }else{
+        switch(port){
+            case 8:
+                retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_SVLAN_PORTBASED_SVIDX_CTRL4, RTL8367C_VS_PORT8_SVIDX_MASK,pIndex);
+                break;
+
+            case 9:
+                retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_SVLAN_PORTBASED_SVIDX_CTRL4, RTL8367C_VS_PORT9_SVIDX_MASK,pIndex);
+                break;
+
+            case 10:
+                retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_SVLAN_PORTBASED_SVIDX_CTRL5, RTL8367C_SVLAN_PORTBASED_SVIDX_CTRL5_MASK,pIndex);
+                break;
+        }
+    }
+
+    return retVal;
+
+}
+/* Function Name:
+ *      rtl8367c_setAsicSvlanIngressUntag
+ * Description:
+ *      Set action received un-Stag frame from unplink port
+ * Input:
+ *      mode        - 0:Drop 1:Trap 2:Assign SVLAN
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicSvlanIngressUntag(rtk_uint32 mode)
+{
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_SVLAN_CFG, RTL8367C_VS_UNTAG_MASK, mode);
+}
+/* Function Name:
+ *      rtl8367c_getAsicSvlanIngressUntag
+ * Description:
+ *      Get action received un-Stag frame from unplink port
+ * Input:
+ *      pMode       - 0:Drop 1:Trap 2:Assign SVLAN
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicSvlanIngressUntag(rtk_uint32* pMode)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_SVLAN_CFG, RTL8367C_VS_UNTAG_MASK, pMode);
+}
+/* Function Name:
+ *      rtl8367c_setAsicSvlanIngressUnmatch
+ * Description:
+ *      Set action received unmatched Stag frame from unplink port
+ * Input:
+ *      mode        - 0:Drop 1:Trap 2:Assign SVLAN
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicSvlanIngressUnmatch(rtk_uint32 mode)
+{
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_SVLAN_CFG, RTL8367C_VS_UNMAT_MASK, mode);
+}
+/* Function Name:
+ *      rtl8367c_getAsicSvlanIngressUnmatch
+ * Description:
+ *      Get action received unmatched Stag frame from unplink port
+ * Input:
+ *      pMode       - 0:Drop 1:Trap 2:Assign SVLAN
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicSvlanIngressUnmatch(rtk_uint32* pMode)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_SVLAN_CFG, RTL8367C_VS_UNMAT_MASK, pMode);
+
+}
+/* Function Name:
+ *      rtl8367c_setAsicSvlanEgressUnassign
+ * Description:
+ *      Set unplink stream without egress SVID action
+ * Input:
+ *      enabled     - 1:Trap egress unassigned frames to CPU, 0: Use SVLAN setup in VS_CPSVIDX as egress SVID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicSvlanEgressUnassign(rtk_uint32 enabled)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_SVLAN_CFG, RTL8367C_VS_UIFSEG_OFFSET, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicSvlanEgressUnassign
+ * Description:
+ *      Get unplink stream without egress SVID action
+ * Input:
+ *      pEnabled    - 1:Trap egress unassigned frames to CPU, 0: Use SVLAN setup in VS_CPSVIDX as egress SVID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicSvlanEgressUnassign(rtk_uint32* pEnabled)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_SVLAN_CFG, RTL8367C_VS_UIFSEG_OFFSET, pEnabled);
+}
+
+
+/* Function Name:
+ *      rtl8367c_setAsicSvlanMemberConfiguration
+ * Description:
+ *      Set system 64 S-tag content
+ * Input:
+ *      index           - index of 64 s-tag configuration
+ *      pSvlanMemCfg    - SVLAN member configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - Success
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_SVLAN_ENTRY_INDEX    - Invalid SVLAN index parameter
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicSvlanMemberConfiguration(rtk_uint32 index, rtl8367c_svlan_memconf_t* pSvlanMemCfg)
+{
+    ret_t retVal;
+    rtk_uint32 regAddr, regData;
+    rtk_uint16 *accessPtr;
+    rtk_uint32 i;
+    rtk_uint16 smiSvlanMemConf[RTL8367C_SVLAN_MEMCONF_LEN];
+
+    if(index > RTL8367C_SVIDXMAX)
+        return RT_ERR_SVLAN_ENTRY_INDEX;
+
+    memset(smiSvlanMemConf, 0x00, sizeof(rtk_uint16) * RTL8367C_SVLAN_MEMCONF_LEN);
+    _rtl8367c_svlanConfStUser2Smi(pSvlanMemCfg, smiSvlanMemConf);
+
+    accessPtr = smiSvlanMemConf;
+
+    regData = *accessPtr;
+    for(i = 0; i < 3; i++)
+    {
+        retVal = rtl8367c_setAsicReg(RTL8367C_SVLAN_MEMBERCFG_BASE_REG(index) + i, regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        accessPtr ++;
+        regData = *accessPtr;
+    }
+
+    if(index < 63)
+        regAddr = RTL8367C_REG_SVLAN_MEMBERCFG0_CTRL4+index;
+    else if(index == 63)
+        regAddr = RTL8367C_REG_SVLAN_MEMBERCFG63_CTRL4;
+
+    retVal = rtl8367c_setAsicReg(regAddr, regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+
+}
+/* Function Name:
+ *      rtl8367c_getAsicSvlanMemberConfiguration
+ * Description:
+ *      Get system 64 S-tag content
+ * Input:
+ *      index           - index of 64 s-tag configuration
+ *      pSvlanMemCfg    - SVLAN member configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - Success
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_SVLAN_ENTRY_INDEX    - Invalid SVLAN index parameter
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicSvlanMemberConfiguration(rtk_uint32 index,rtl8367c_svlan_memconf_t* pSvlanMemCfg)
+{
+    ret_t retVal;
+    rtk_uint32 regAddr,regData;
+    rtk_uint16 *accessPtr;
+    rtk_uint32 i;
+    rtk_uint16 smiSvlanMemConf[RTL8367C_SVLAN_MEMCONF_LEN];
+
+    if(index > RTL8367C_SVIDXMAX)
+        return RT_ERR_SVLAN_ENTRY_INDEX;
+
+    memset(smiSvlanMemConf, 0x00, sizeof(rtk_uint16) * RTL8367C_SVLAN_MEMCONF_LEN);
+
+    accessPtr = smiSvlanMemConf;
+
+    for(i = 0; i < 3; i++)
+    {
+        retVal = rtl8367c_getAsicReg(RTL8367C_SVLAN_MEMBERCFG_BASE_REG(index) + i, &regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        *accessPtr = regData;
+
+        accessPtr ++;
+    }
+
+    if(index < 63)
+        regAddr = RTL8367C_REG_SVLAN_MEMBERCFG0_CTRL4+index;
+    else if(index == 63)
+        regAddr = RTL8367C_REG_SVLAN_MEMBERCFG63_CTRL4;
+
+    retVal = rtl8367c_getAsicReg(regAddr, &regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    *accessPtr = regData;
+
+    _rtl8367c_svlanConfStSmi2User(pSvlanMemCfg, smiSvlanMemConf);
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicSvlanC2SConf
+ * Description:
+ *      Set SVLAN C2S table
+ * Input:
+ *      index   - index of 128 Svlan C2S configuration
+ *      evid    - Enhanced VID
+ *      portmask    - available c2s port mask
+ *      svidx   - index of 64 Svlan member configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_ENTRY_INDEX  - Invalid entry index
+ * Note:
+ *      ASIC will check upstream's VID and assign related SVID to mathed packet
+ */
+ret_t rtl8367c_setAsicSvlanC2SConf(rtk_uint32 index, rtk_uint32 evid, rtk_uint32 portmask, rtk_uint32 svidx)
+{
+    ret_t retVal;
+
+    if(index > RTL8367C_C2SIDXMAX)
+        return RT_ERR_ENTRY_INDEX;
+
+    retVal = rtl8367c_setAsicReg(RTL8367C_SVLAN_C2SCFG_BASE_REG(index), svidx);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_setAsicReg(RTL8367C_SVLAN_C2SCFG_BASE_REG(index) + 1, portmask);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_setAsicReg(RTL8367C_SVLAN_C2SCFG_BASE_REG(index) + 2, evid);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicSvlanC2SConf
+ * Description:
+ *      Get SVLAN C2S table
+ * Input:
+ *      index   - index of 128 Svlan C2S configuration
+ *      pEvid   - Enhanced VID
+ *      pPortmask   - available c2s port mask
+ *      pSvidx  - index of 64 Svlan member configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_ENTRY_INDEX  - Invalid entry index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicSvlanC2SConf(rtk_uint32 index, rtk_uint32* pEvid, rtk_uint32* pPortmask, rtk_uint32* pSvidx)
+{
+    ret_t retVal;
+
+    if(index > RTL8367C_C2SIDXMAX)
+        return RT_ERR_ENTRY_INDEX;
+
+    retVal = rtl8367c_getAsicReg(RTL8367C_SVLAN_C2SCFG_BASE_REG(index), pSvidx);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_getAsicReg(RTL8367C_SVLAN_C2SCFG_BASE_REG(index) + 1, pPortmask);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    retVal = rtl8367c_getAsicReg(RTL8367C_SVLAN_C2SCFG_BASE_REG(index) + 2, pEvid);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicSvlanMC2SConf
+ * Description:
+ *      Set system MC2S content
+ * Input:
+ *      index           - index of 32 SVLAN 32 MC2S configuration
+ *      pSvlanMc2sCfg   - SVLAN Multicast to SVLAN member configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_ENTRY_INDEX  - Invalid entry index
+ * Note:
+ *      If upstream packet is L2 multicast or IPv4 multicast packet and DMAC/DIP is matched MC2S
+ *      configuration, ASIC will assign egress SVID to the packet
+ */
+ret_t rtl8367c_setAsicSvlanMC2SConf(rtk_uint32 index,rtl8367c_svlan_mc2s_t* pSvlanMc2sCfg)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+    rtk_uint16 *accessPtr;
+    rtk_uint32 i;
+    rtk_uint16 smiSvlanMC2S[RTL8367C_SVLAN_MC2S_LEN];
+
+    if(index > RTL8367C_MC2SIDXMAX)
+        return RT_ERR_ENTRY_INDEX;
+
+    memset(smiSvlanMC2S, 0x00, sizeof(rtk_uint16) * RTL8367C_SVLAN_MC2S_LEN);
+    _rtl8367c_svlanMc2sStUser2Smi(pSvlanMc2sCfg, smiSvlanMC2S);
+
+    accessPtr = smiSvlanMC2S;
+
+    for(i = 0; i < 5; i++)
+    {
+        regData = *(accessPtr + i);
+        retVal = rtl8367c_setAsicReg(RTL8367C_SVLAN_MCAST2S_ENTRY_BASE_REG(index) + i, regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicSvlanMC2SConf
+ * Description:
+ *      Get system MC2S content
+ * Input:
+ *      index           - index of 32 SVLAN 32 MC2S configuration
+ *      pSvlanMc2sCfg   - SVLAN Multicast to SVLAN member configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_ENTRY_INDEX  - Invalid entry index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicSvlanMC2SConf(rtk_uint32 index, rtl8367c_svlan_mc2s_t* pSvlanMc2sCfg)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+    rtk_uint16 *accessPtr;
+    rtk_uint32 i;
+    rtk_uint16 smiSvlanMC2S[RTL8367C_SVLAN_MC2S_LEN];
+
+    if(index > RTL8367C_MC2SIDXMAX)
+        return RT_ERR_ENTRY_INDEX;
+
+    memset(smiSvlanMC2S, 0x00, sizeof(rtk_uint16) * RTL8367C_SVLAN_MC2S_LEN);
+
+    accessPtr = smiSvlanMC2S;
+
+    for(i = 0; i < 5; i++)
+    {
+        retVal = rtl8367c_getAsicReg(RTL8367C_SVLAN_MCAST2S_ENTRY_BASE_REG(index) + i, &regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        *accessPtr = regData;
+        accessPtr ++;
+    }
+
+
+    _rtl8367c_svlanMc2sStSmi2User(pSvlanMc2sCfg, smiSvlanMC2S);
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicSvlanSP2CConf
+ * Description:
+ *      Set system 128 SP2C content
+ * Input:
+ *      index           - index of 128 SVLAN & Port to CVLAN configuration
+ *      pSvlanSp2cCfg   - SVLAN & Port to CVLAN configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_ENTRY_INDEX  - Invalid entry index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicSvlanSP2CConf(rtk_uint32 index, rtl8367c_svlan_s2c_t* pSvlanSp2cCfg)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+    rtk_uint16 *accessPtr;
+    rtk_uint32 i;
+    rtk_uint16 smiSvlanSP2C[RTL8367C_SVLAN_SP2C_LEN];
+
+    if(index > RTL8367C_SP2CMAX)
+        return RT_ERR_ENTRY_INDEX;
+
+    memset(smiSvlanSP2C, 0x00, sizeof(rtk_uint16) * RTL8367C_SVLAN_SP2C_LEN);
+    _rtl8367c_svlanSp2cStUser2Smi(pSvlanSp2cCfg,smiSvlanSP2C);
+
+    accessPtr = smiSvlanSP2C;
+
+    for(i = 0; i < 2; i++)
+    {
+        regData = *(accessPtr + i);
+        retVal = rtl8367c_setAsicReg(RTL8367C_SVLAN_S2C_ENTRY_BASE_REG(index) + i, regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    return retVal;
+}
+/* Function Name:
+ *      rtl8367c_getAsicSvlanSP2CConf
+ * Description:
+ *      Get system 128 SP2C content
+ * Input:
+ *      index           - index of 128 SVLAN & Port to CVLAN configuration
+ *      pSvlanSp2cCfg   - SVLAN & Port to CVLAN configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_ENTRY_INDEX  - Invalid entry index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicSvlanSP2CConf(rtk_uint32 index,rtl8367c_svlan_s2c_t* pSvlanSp2cCfg)
+{
+    ret_t retVal;
+    rtk_uint32 regData;
+    rtk_uint16 *accessPtr;
+    rtk_uint32 i;
+    rtk_uint16 smiSvlanSP2C[RTL8367C_SVLAN_SP2C_LEN];
+
+    if(index > RTL8367C_SP2CMAX)
+        return RT_ERR_ENTRY_INDEX;
+
+    memset(smiSvlanSP2C, 0x00, sizeof(rtk_uint16) * RTL8367C_SVLAN_SP2C_LEN);
+
+    accessPtr = smiSvlanSP2C;
+
+    for(i = 0; i < 2; i++)
+    {
+        retVal = rtl8367c_getAsicReg(RTL8367C_SVLAN_S2C_ENTRY_BASE_REG(index) + i, &regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        *accessPtr = regData;
+
+        accessPtr ++;
+    }
+
+    _rtl8367c_svlanSp2cStSmi2User(pSvlanSp2cCfg, smiSvlanSP2C);
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicSvlanDmacCvidSel
+ * Description:
+ *      Set downstream CVID decision by DMAC
+ * Input:
+ *      port        - Physical port number (0~7)
+ *      enabled     - 0:disabled, 1:enabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicSvlanDmacCvidSel(rtk_uint32 port, rtk_uint32 enabled)
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(port < 8)
+        return rtl8367c_setAsicRegBit(RTL8367C_REG_SVLAN_CFG, RTL8367C_VS_PORT0_DMACVIDSEL_OFFSET + port, enabled);
+    else
+        return rtl8367c_setAsicRegBit(RTL8367C_REG_SVLAN_CFG_EXT, RTL8367C_VS_PORT8_DMACVIDSEL_OFFSET + (port-8), enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicSvlanDmacCvidSel
+ * Description:
+ *      Get downstream CVID decision by DMAC
+ * Input:
+ *      port        - Physical port number (0~7)
+ *      pEnabled    - 0:disabled, 1:enabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicSvlanDmacCvidSel(rtk_uint32 port, rtk_uint32* pEnabled)
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(port < 8)
+        return rtl8367c_getAsicRegBit(RTL8367C_REG_SVLAN_CFG, RTL8367C_VS_PORT0_DMACVIDSEL_OFFSET + port, pEnabled);
+    else
+        return rtl8367c_getAsicRegBit(RTL8367C_REG_SVLAN_CFG_EXT, RTL8367C_VS_PORT8_DMACVIDSEL_OFFSET + (port-8), pEnabled);
+}
+/* Function Name:
+ *      rtl8367c_setAsicSvlanUntagVlan
+ * Description:
+ *      Set default ingress untag SVLAN
+ * Input:
+ *      index   - index SVLAN member configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - Success
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_SVLAN_ENTRY_INDEX    - Invalid SVLAN index parameter
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicSvlanUntagVlan(rtk_uint32 index)
+{
+    if(index > RTL8367C_SVIDXMAX)
+        return RT_ERR_SVLAN_ENTRY_INDEX;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_SVLAN_UNTAG_UNMAT_CFG, RTL8367C_VS_UNTAG_SVIDX_MASK, index);
+}
+/* Function Name:
+ *      rtl8367c_getAsicSvlanUntagVlan
+ * Description:
+ *      Get default ingress untag SVLAN
+ * Input:
+ *      pIndex  - index SVLAN member configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicSvlanUntagVlan(rtk_uint32* pIndex)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_SVLAN_UNTAG_UNMAT_CFG, RTL8367C_VS_UNTAG_SVIDX_MASK, pIndex);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicSvlanUnmatchVlan
+ * Description:
+ *      Set default ingress unmatch SVLAN
+ * Input:
+ *      index   - index SVLAN member configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - Success
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_SVLAN_ENTRY_INDEX    - Invalid SVLAN index parameter
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicSvlanUnmatchVlan(rtk_uint32 index)
+{
+    if(index > RTL8367C_SVIDXMAX)
+        return RT_ERR_SVLAN_ENTRY_INDEX;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_SVLAN_UNTAG_UNMAT_CFG, RTL8367C_VS_UNMAT_SVIDX_MASK, index);
+}
+/* Function Name:
+ *      rtl8367c_getAsicSvlanUnmatchVlan
+ * Description:
+ *      Get default ingress unmatch SVLAN
+ * Input:
+ *      pIndex  - index SVLAN member configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicSvlanUnmatchVlan(rtk_uint32* pIndex)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_SVLAN_UNTAG_UNMAT_CFG, RTL8367C_VS_UNMAT_SVIDX_MASK, pIndex);
+}
+
+
+/* Function Name:
+ *      rtl8367c_setAsicSvlanLookupType
+ * Description:
+ *      Set svlan lookup table selection
+ * Input:
+ *      type    - lookup type
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - Success
+ *      RT_ERR_SMI                  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicSvlanLookupType(rtk_uint32 type)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_SVLAN_LOOKUP_TYPE, RTL8367C_SVLAN_LOOKUP_TYPE_OFFSET, type);
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicSvlanLookupType
+ * Description:
+ *      Get svlan lookup table selection
+ * Input:
+ *      pType   - lookup type
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - Success
+ *      RT_ERR_SMI                  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicSvlanLookupType(rtk_uint32* pType)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_SVLAN_LOOKUP_TYPE, RTL8367C_SVLAN_LOOKUP_TYPE_OFFSET, pType);
+}
+
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_svlan.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_svlan.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_svlan.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_svlan.h	2019-01-22 16:16:24.715257382 +0100
@@ -0,0 +1,115 @@
+#ifndef _RTL8367C_ASICDRV_SVLAN_H_
+#define _RTL8367C_ASICDRV_SVLAN_H_
+
+#include <rtl8367c_asicdrv.h>
+
+#define RTL8367C_C2SIDXNO               128
+#define RTL8367C_C2SIDXMAX              (RTL8367C_C2SIDXNO-1)
+#define RTL8367C_MC2SIDXNO              32
+#define RTL8367C_MC2SIDXMAX             (RTL8367C_MC2SIDXNO-1)
+#define RTL8367C_SP2CIDXNO              128
+#define RTL8367C_SP2CMAX                (RTL8367C_SP2CIDXNO-1)
+
+#define RTL8367C_SVLAN_MEMCONF_LEN      4
+#define RTL8367C_SVLAN_MC2S_LEN         5
+#define RTL8367C_SVLAN_SP2C_LEN         2
+
+enum RTL8367C_SPRISEL
+{
+    SPRISEL_INTERNALPRI =  0,
+    SPRISEL_CTAGPRI,
+    SPRISEL_VSPRI,
+    SPRISEL_PBPRI,
+    SPRISEL_END
+};
+
+enum RTL8367C_SUNACCEPT
+{
+    SUNACCEPT_DROP =  0,
+    SUNACCEPT_TRAP,
+    SUNACCEPT_SVLAN,
+    SUNACCEPT_END
+};
+
+enum RTL8367C_SVLAN_MC2S_MODE
+{
+    SVLAN_MC2S_MODE_MAC =  0,
+    SVLAN_MC2S_MODE_IP,
+    SVLAN_MC2S_MODE_END
+};
+
+
+typedef struct  rtl8367c_svlan_memconf_s{
+
+    rtk_uint16 vs_member:11;
+    rtk_uint16 vs_untag:11;
+
+    rtk_uint16 vs_fid_msti:4;
+    rtk_uint16 vs_priority:3;
+    rtk_uint16 vs_force_fid:1;
+    rtk_uint16 reserved:8;
+
+    rtk_uint16 vs_svid:12;
+    rtk_uint16 vs_efiden:1;
+    rtk_uint16 vs_efid:3;
+
+
+}rtl8367c_svlan_memconf_t;
+
+
+typedef struct  rtl8367c_svlan_mc2s_s{
+
+    rtk_uint16 valid:1;
+    rtk_uint16 format:1;
+    rtk_uint16 svidx:6;
+    rtk_uint32 sdata;
+    rtk_uint32 smask;
+}rtl8367c_svlan_mc2s_t;
+
+
+typedef struct  rtl8367c_svlan_s2c_s{
+
+    rtk_uint16 valid:1;
+    rtk_uint16 svidx:6;
+    rtk_uint16 dstport:4;
+    rtk_uint32 vid:12;
+}rtl8367c_svlan_s2c_t;
+
+extern ret_t rtl8367c_setAsicSvlanIngressUntag(rtk_uint32 mode);
+extern ret_t rtl8367c_getAsicSvlanIngressUntag(rtk_uint32* pMode);
+extern ret_t rtl8367c_setAsicSvlanIngressUnmatch(rtk_uint32 mode);
+extern ret_t rtl8367c_getAsicSvlanIngressUnmatch(rtk_uint32* pMode);
+extern ret_t rtl8367c_setAsicSvlanTrapPriority(rtk_uint32 priority);
+extern ret_t rtl8367c_getAsicSvlanTrapPriority(rtk_uint32* pPriority);
+extern ret_t rtl8367c_setAsicSvlanDefaultVlan(rtk_uint32 port, rtk_uint32 index);
+extern ret_t rtl8367c_getAsicSvlanDefaultVlan(rtk_uint32 port, rtk_uint32* pIndex);
+
+extern ret_t rtl8367c_setAsicSvlanMemberConfiguration(rtk_uint32 index,rtl8367c_svlan_memconf_t* pSvlanMemCfg);
+extern ret_t rtl8367c_getAsicSvlanMemberConfiguration(rtk_uint32 index,rtl8367c_svlan_memconf_t* pSvlanMemCfg);
+
+extern ret_t rtl8367c_setAsicSvlanPrioritySel(rtk_uint32 priSel);
+extern ret_t rtl8367c_getAsicSvlanPrioritySel(rtk_uint32* pPriSel);
+extern ret_t rtl8367c_setAsicSvlanTpid(rtk_uint32 protocolType);
+extern ret_t rtl8367c_getAsicSvlanTpid(rtk_uint32* pProtocolType);
+extern ret_t rtl8367c_setAsicSvlanUplinkPortMask(rtk_uint32 portMask);
+extern ret_t rtl8367c_getAsicSvlanUplinkPortMask(rtk_uint32* pPortmask);
+extern ret_t rtl8367c_setAsicSvlanEgressUnassign(rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicSvlanEgressUnassign(rtk_uint32* pEnabled);
+extern ret_t rtl8367c_setAsicSvlanC2SConf(rtk_uint32 index, rtk_uint32 evid, rtk_uint32 portmask, rtk_uint32 svidx);
+extern ret_t rtl8367c_getAsicSvlanC2SConf(rtk_uint32 index, rtk_uint32* pEvid, rtk_uint32* pPortmask, rtk_uint32* pSvidx);
+extern ret_t rtl8367c_setAsicSvlanMC2SConf(rtk_uint32 index,rtl8367c_svlan_mc2s_t* pSvlanMc2sCfg);
+extern ret_t rtl8367c_getAsicSvlanMC2SConf(rtk_uint32 index,rtl8367c_svlan_mc2s_t* pSvlanMc2sCfg);
+extern ret_t rtl8367c_setAsicSvlanSP2CConf(rtk_uint32 index,rtl8367c_svlan_s2c_t* pSvlanSp2cCfg);
+extern ret_t rtl8367c_getAsicSvlanSP2CConf(rtk_uint32 index,rtl8367c_svlan_s2c_t* pSvlanSp2cCfg);
+extern ret_t rtl8367c_setAsicSvlanDmacCvidSel(rtk_uint32 port, rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicSvlanDmacCvidSel(rtk_uint32 port, rtk_uint32* pEnabled);
+extern ret_t rtl8367c_setAsicSvlanUntagVlan(rtk_uint32 index);
+extern ret_t rtl8367c_getAsicSvlanUntagVlan(rtk_uint32* pIndex);
+extern ret_t rtl8367c_setAsicSvlanUnmatchVlan(rtk_uint32 index);
+extern ret_t rtl8367c_getAsicSvlanUnmatchVlan(rtk_uint32* pIndex);
+extern ret_t rtl8367c_setAsicSvlanLookupType(rtk_uint32 type);
+extern ret_t rtl8367c_getAsicSvlanLookupType(rtk_uint32* pType);
+
+
+#endif /*#ifndef _RTL8367C_ASICDRV_SVLAN_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_trunking.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_trunking.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_trunking.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_trunking.c	2019-01-22 16:16:24.719257418 +0100
@@ -0,0 +1,358 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 76306 $
+ * $Date: 2017-03-08 15:13:58 +0800 (é€±ä¸‰, 08 ä¸‰æœˆ 2017) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature : Port trunking related functions
+ *
+ */
+
+#include <rtl8367c_asicdrv_trunking.h>
+/* Function Name:
+ *      rtl8367c_setAsicTrunkingMode
+ * Description:
+ *      Set port trunking mode
+ * Input:
+ *      mode    - 1:dumb 0:user defined
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicTrunkingMode(rtk_uint32 mode)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_PORT_TRUNK_CTRL, RTL8367C_PORT_TRUNK_DUMB_OFFSET, mode);
+}
+/* Function Name:
+ *      rtl8367c_getAsicTrunkingMode
+ * Description:
+ *      Get port trunking mode
+ * Input:
+ *      pMode   - 1:dumb 0:user defined
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicTrunkingMode(rtk_uint32* pMode)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_PORT_TRUNK_CTRL, RTL8367C_PORT_TRUNK_DUMB_OFFSET, pMode);
+}
+/* Function Name:
+ *      rtl8367c_setAsicTrunkingFc
+ * Description:
+ *      Set port trunking flow control
+ * Input:
+ *      group       - Trunk Group ID
+ *      enabled     - 0:disable, 1:enable
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicTrunkingFc(rtk_uint32 group, rtk_uint32 enabled)
+{
+    ret_t       retVal;
+
+    if(group > RTL8367C_MAX_TRUNK_GID)
+        return RT_ERR_LA_TRUNK_ID;
+
+    if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_PORT_TRUNK_DROP_CTRL, RTL8367C_PORT_TRUNK_DROP_CTRL_OFFSET, ENABLED)) != RT_ERR_OK)
+        return retVal;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_PORT_TRUNK_FLOWCTRL, (RTL8367C_EN_FLOWCTRL_TG0_OFFSET + group), enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicTrunkingFc
+ * Description:
+ *      Get port trunking flow control
+ * Input:
+ *      group       - Trunk Group ID
+ *      pEnabled    - 0:disable, 1:enable
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicTrunkingFc(rtk_uint32 group, rtk_uint32* pEnabled)
+{
+    if(group > RTL8367C_MAX_TRUNK_GID)
+        return RT_ERR_LA_TRUNK_ID;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_PORT_TRUNK_FLOWCTRL, (RTL8367C_EN_FLOWCTRL_TG0_OFFSET + group), pEnabled);
+}
+/* Function Name:
+ *      rtl8367c_setAsicTrunkingGroup
+ * Description:
+ *      Set trunking group available port mask
+ * Input:
+ *      group       - Trunk Group ID
+ *      portmask    - Logic trunking enable port mask, max 4 ports
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicTrunkingGroup(rtk_uint32 group, rtk_uint32 portmask)
+{
+    if(group > RTL8367C_MAX_TRUNK_GID)
+        return RT_ERR_LA_TRUNK_ID;
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_PORT_TRUNK_GROUP_MASK, RTL8367C_PORT_TRUNK_GROUP0_MASK_MASK << (group * 4), portmask);
+}
+/* Function Name:
+ *      rtl8367c_getAsicTrunkingGroup
+ * Description:
+ *      Get trunking group available port mask
+ * Input:
+ *      group       - Trunk Group ID
+ * Output:
+ *      pPortmask   - Logic trunking enable port mask, max 4 ports
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicTrunkingGroup(rtk_uint32 group, rtk_uint32* pPortmask)
+{
+    if(group > RTL8367C_MAX_TRUNK_GID)
+        return RT_ERR_LA_TRUNK_ID;
+
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_PORT_TRUNK_GROUP_MASK, RTL8367C_PORT_TRUNK_GROUP0_MASK_MASK << (group * 4), pPortmask);
+}
+/* Function Name:
+ *      rtl8367c_setAsicTrunkingFlood
+ * Description:
+ *      Set port trunking flood function
+ * Input:
+ *      enabled     - Port trunking flooding function 0:disable 1:enable
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicTrunkingFlood(rtk_uint32 enabled)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_PORT_TRUNK_CTRL, RTL8367C_PORT_TRUNK_FLOOD_OFFSET, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicTrunkingFlood
+ * Description:
+ *      Get port trunking flood function
+ * Input:
+ *      pEnabled    - Port trunking flooding function 0:disable 1:enable
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicTrunkingFlood(rtk_uint32* pEnabled)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_PORT_TRUNK_CTRL, RTL8367C_PORT_TRUNK_FLOOD_OFFSET, pEnabled);
+}
+/* Function Name:
+ *      rtl8367c_setAsicTrunkingHashSelect
+ * Description:
+ *      Set port trunking hash select sources
+ * Input:
+ *      hashsel     - hash sources mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      7 bits mask for link aggregation group0 hash parameter selection {DIP, SIP, DMAC, SMAC, SPA}
+ *      0b0000001: SPA
+ *      0b0000010: SMAC
+ *      0b0000100: DMAC
+ *      0b0001000: SIP
+ *      0b0010000: DIP
+ *      0b0100000: TCP/UDP Source Port
+ *      0b1000000: TCP/UDP Destination Port
+ */
+ret_t rtl8367c_setAsicTrunkingHashSelect(rtk_uint32 hashsel)
+{
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_PORT_TRUNK_CTRL, RTL8367C_PORT_TRUNK_HASH_MASK, hashsel);
+}
+/* Function Name:
+ *      rtl8367c_getAsicTrunkingHashSelect
+ * Description:
+ *      Get port trunking hash select sources
+ * Input:
+ *      pHashsel    - hash sources mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicTrunkingHashSelect(rtk_uint32* pHashsel)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_PORT_TRUNK_CTRL, RTL8367C_PORT_TRUNK_HASH_MASK, pHashsel);
+}
+/* Function Name:
+ *      rtl8367c_getAsicQeueuEmptyStatus
+ * Description:
+ *      Get current output queue if empty status
+ * Input:
+ *      portmask    - queue empty port mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicQeueuEmptyStatus(rtk_uint32* portmask)
+{
+    return rtl8367c_getAsicReg(RTL8367C_REG_PORT_QEMPTY, portmask);
+}
+/* Function Name:
+ *      rtl8367c_setAsicTrunkingHashTable
+ * Description:
+ *      Set port trunking hash value mapping table
+ * Input:
+ *      hashval     - hashing value 0-15
+ *      portId      - trunking port id 0-3
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ *      RT_ERR_OUT_OF_RANGE - Invalid hashing value (0-15)
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicTrunkingHashTable(rtk_uint32 hashval, rtk_uint32 portId)
+{
+    if(hashval > RTL8367C_TRUNKING_HASHVALUE_MAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if(portId >= RTL8367C_TRUNKING_PORTNO)
+        return RT_ERR_PORT_ID;
+
+    if(hashval >= 8)
+        return rtl8367c_setAsicRegBits(RTL8367C_REG_PORT_TRUNK_HASH_MAPPING_CTRL1, RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL1_HASH8_MASK<<((hashval-8)*2), portId);
+    else
+        return rtl8367c_setAsicRegBits(RTL8367C_REG_PORT_TRUNK_HASH_MAPPING_CTRL0, RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL0_HASH0_MASK<<(hashval*2), portId);
+}
+/* Function Name:
+ *      rtl8367c_getAsicTrunkingHashTable
+ * Description:
+ *      Get port trunking hash value mapping table
+ * Input:
+ *      hashval     - hashing value 0-15
+ *      pPortId         - trunking port id 0-3
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - Invalid hashing value (0-15)
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicTrunkingHashTable(rtk_uint32 hashval, rtk_uint32* pPortId)
+{
+    if(hashval > RTL8367C_TRUNKING_HASHVALUE_MAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if(hashval >= 8)
+        return rtl8367c_getAsicRegBits(RTL8367C_REG_PORT_TRUNK_HASH_MAPPING_CTRL1, RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL1_HASH8_MASK<<((hashval-8)*2), pPortId);
+    else
+        return rtl8367c_getAsicRegBits(RTL8367C_REG_PORT_TRUNK_HASH_MAPPING_CTRL0, RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL0_HASH0_MASK<<(hashval*2), pPortId);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicTrunkingHashTable1
+ * Description:
+ *      Set port trunking hash value mapping table
+ * Input:
+ *      hashval     - hashing value 0-15
+ *      portId      - trunking port id 0-3
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ *      RT_ERR_OUT_OF_RANGE - Invalid hashing value (0-15)
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicTrunkingHashTable1(rtk_uint32 hashval, rtk_uint32 portId)
+{
+    if(hashval > RTL8367C_TRUNKING_HASHVALUE_MAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if(portId >= RTL8367C_TRUNKING1_PORTN0)
+        return RT_ERR_PORT_ID;
+
+    if(hashval >= 8)
+        return rtl8367c_setAsicRegBits(RTL8367C_REG_PORT_TRUNK_HASH_MAPPING_CTRL3, RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL3_HASH8_MASK<<((hashval-8)*2), portId);
+    else
+        return rtl8367c_setAsicRegBits(RTL8367C_REG_PORT_TRUNK_HASH_MAPPING_CTRL2, RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL2_HASH0_MASK<<(hashval*2), portId);
+}
+/* Function Name:
+ *      rtl8367c_getAsicTrunkingHashTable1
+ * Description:
+ *      Get port trunking hash value mapping table
+ * Input:
+ *      hashval     - hashing value 0-15
+ *      pPortId         - trunking port id 0-3
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_OUT_OF_RANGE - Invalid hashing value (0-15)
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicTrunkingHashTable1(rtk_uint32 hashval, rtk_uint32* pPortId)
+{
+    if(hashval > RTL8367C_TRUNKING_HASHVALUE_MAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if(hashval >= 8)
+        return rtl8367c_getAsicRegBits(RTL8367C_REG_PORT_TRUNK_HASH_MAPPING_CTRL3, RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL3_HASH8_MASK<<((hashval-8)*2), pPortId);
+    else
+        return rtl8367c_getAsicRegBits(RTL8367C_REG_PORT_TRUNK_HASH_MAPPING_CTRL2, RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL2_HASH0_MASK<<(hashval*2), pPortId);
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_trunking.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_trunking.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_trunking.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_trunking.h	2019-01-22 16:16:24.719257418 +0100
@@ -0,0 +1,30 @@
+#ifndef _RTL8367C_ASICDRV_TRUNKING_H_
+#define _RTL8367C_ASICDRV_TRUNKING_H_
+
+#include <rtl8367c_asicdrv.h>
+
+#define RTL8367C_MAX_TRUNK_GID              (2)
+#define RTL8367C_TRUNKING_PORTNO            (4)
+#define RTL8367C_TRUNKING1_PORTN0           (2)
+#define RTL8367C_TRUNKING_HASHVALUE_MAX     (15)
+
+extern ret_t rtl8367c_setAsicTrunkingGroup(rtk_uint32 group, rtk_uint32 portmask);
+extern ret_t rtl8367c_getAsicTrunkingGroup(rtk_uint32 group, rtk_uint32* pPortmask);
+extern ret_t rtl8367c_setAsicTrunkingFlood(rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicTrunkingFlood(rtk_uint32* pEnabled);
+extern ret_t rtl8367c_setAsicTrunkingHashSelect(rtk_uint32 hashsel);
+extern ret_t rtl8367c_getAsicTrunkingHashSelect(rtk_uint32* pHashsel);
+
+extern ret_t rtl8367c_getAsicQeueuEmptyStatus(rtk_uint32* pPortmask);
+
+extern ret_t rtl8367c_setAsicTrunkingMode(rtk_uint32 mode);
+extern ret_t rtl8367c_getAsicTrunkingMode(rtk_uint32* pMode);
+extern ret_t rtl8367c_setAsicTrunkingFc(rtk_uint32 group, rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicTrunkingFc(rtk_uint32 group, rtk_uint32* pEnabled);
+extern ret_t rtl8367c_setAsicTrunkingHashTable(rtk_uint32 hashval, rtk_uint32 portId);
+extern ret_t rtl8367c_getAsicTrunkingHashTable(rtk_uint32 hashval, rtk_uint32* pPortId);
+extern ret_t rtl8367c_setAsicTrunkingHashTable1(rtk_uint32 hashval, rtk_uint32 portId);
+extern ret_t rtl8367c_getAsicTrunkingHashTable1(rtk_uint32 hashval, rtk_uint32* pPortId);
+
+#endif /*_RTL8367C_ASICDRV_TRUNKING_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_unknownMulticast.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_unknownMulticast.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_unknownMulticast.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_unknownMulticast.c	2019-01-22 16:16:24.719257418 +0100
@@ -0,0 +1,238 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 76306 $
+ * $Date: 2017-03-08 15:13:58 +0800 (é€±ä¸‰, 08 ä¸‰æœˆ 2017) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature : Unkown multicast related functions
+ *
+ */
+#include <rtl8367c_asicdrv_unknownMulticast.h>
+/* Function Name:
+ *      rtl8367c_setAsicUnknownL2MulticastBehavior
+ * Description:
+ *      Set behavior of L2 multicast
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      behave  - 0: flooding, 1: drop, 2: trap
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ *      RT_ERR_NOT_ALLOWED  - Invalid operation
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicUnknownL2MulticastBehavior(rtk_uint32 port, rtk_uint32 behave)
+{
+    ret_t retVal;
+
+    if(port >  RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(behave >= L2_UNKOWN_MULTICAST_END)
+        return RT_ERR_NOT_ALLOWED;
+    if(port < 8)
+    {
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_UNKNOWN_L2_MULTICAST_REG(port), RTL8367C_UNKNOWN_L2_MULTICAST_MASK(port), behave);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_UNKNOWN_L2_MULTICAST_CTRL1, 3 << ((port - 8) << 1), behave);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicUnknownL2MulticastBehavior
+ * Description:
+ *      Get behavior of L2 multicast
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      pBehave     - 0: flooding, 1: drop, 2: trap
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicUnknownL2MulticastBehavior(rtk_uint32 port, rtk_uint32 *pBehave)
+{
+    ret_t retVal;
+
+    if(port >  RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(port < 8)
+    {
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_UNKNOWN_L2_MULTICAST_REG(port), RTL8367C_UNKNOWN_L2_MULTICAST_MASK(port), pBehave);
+        if (retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_UNKNOWN_L2_MULTICAST_CTRL1, 3 << ((port - 8) << 1), pBehave);
+        if (retVal != RT_ERR_OK)
+            return retVal;
+    }
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicUnknownIPv4MulticastBehavior
+ * Description:
+ *      Set behavior of IPv4 multicast
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      behave  - 0: flooding, 1: drop, 2: trap
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ *      RT_ERR_NOT_ALLOWED  - Invalid operation
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicUnknownIPv4MulticastBehavior(rtk_uint32 port, rtk_uint32 behave)
+{
+    if(port >  RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(behave >= L3_UNKOWN_MULTICAST_END)
+        return RT_ERR_NOT_ALLOWED;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_UNKNOWN_IPV4_MULTICAST_REG(port), RTL8367C_UNKNOWN_IPV4_MULTICAST_MASK(port), behave);
+}
+/* Function Name:
+ *      rtl8367c_getAsicUnknownIPv4MulticastBehavior
+ * Description:
+ *      Get behavior of IPv4 multicast
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      pBehave     - 0: flooding, 1: drop, 2: trap
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicUnknownIPv4MulticastBehavior(rtk_uint32 port, rtk_uint32 *pBehave)
+{
+    if(port >  RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBits(RTL8367C_UNKNOWN_IPV4_MULTICAST_REG(port), RTL8367C_UNKNOWN_IPV4_MULTICAST_MASK(port), pBehave);
+}
+/* Function Name:
+ *      rtl8367c_setAsicUnknownIPv6MulticastBehavior
+ * Description:
+ *      Set behavior of IPv6 multicast
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      behave  - 0: flooding, 1: drop, 2: trap
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ *      RT_ERR_NOT_ALLOWED  - Invalid operation
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicUnknownIPv6MulticastBehavior(rtk_uint32 port, rtk_uint32 behave)
+{
+    if(port >  RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(behave >= L3_UNKOWN_MULTICAST_END)
+        return RT_ERR_NOT_ALLOWED;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_UNKNOWN_IPV6_MULTICAST_REG(port), RTL8367C_UNKNOWN_IPV6_MULTICAST_MASK(port), behave);
+}
+/* Function Name:
+ *      rtl8367c_getAsicUnknownIPv6MulticastBehavior
+ * Description:
+ *      Get behavior of IPv6 multicast
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      pBehave     - 0: flooding, 1: drop, 2: trap
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicUnknownIPv6MulticastBehavior(rtk_uint32 port, rtk_uint32 *pBehave)
+{
+    if(port >  RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBits(RTL8367C_UNKNOWN_IPV6_MULTICAST_REG(port), RTL8367C_UNKNOWN_IPV6_MULTICAST_MASK(port), pBehave);
+}
+/* Function Name:
+ *      rtl8367c_setAsicUnknownMulticastTrapPriority
+ * Description:
+ *      Set trap priority of unknown multicast frame
+ * Input:
+ *      priority    - priority (0~7)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_QOS_INT_PRIORITY - Invalid priority
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicUnknownMulticastTrapPriority(rtk_uint32 priority)
+{
+    if(priority > RTL8367C_PRIMAX)
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_QOS_TRAP_PRIORITY_CTRL0_REG, RTL8367C_UNKNOWN_MC_PRIORTY_MASK, priority);
+}
+/* Function Name:
+ *      rtl8367c_getAsicUnknownMulticastTrapPriority
+ * Description:
+ *      Get trap priority of unknown multicast frame
+ * Input:
+ *      pPriority   - priority (0~7)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK   - Success
+ *      RT_ERR_SMI  - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicUnknownMulticastTrapPriority(rtk_uint32 *pPriority)
+{
+    return rtl8367c_getAsicRegBits(RTL8367C_QOS_TRAP_PRIORITY_CTRL0_REG, RTL8367C_UNKNOWN_MC_PRIORTY_MASK, pPriority);
+}
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_unknownMulticast.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_unknownMulticast.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_unknownMulticast.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_unknownMulticast.h	2019-01-22 16:16:24.719257418 +0100
@@ -0,0 +1,42 @@
+#ifndef _RTL8367C_ASICDRV_UNKNOWNMULTICAST_H_
+#define _RTL8367C_ASICDRV_UNKNOWNMULTICAST_H_
+
+#include <rtl8367c_asicdrv.h>
+
+enum L2_UNKOWN_MULTICAST_BEHAVE
+{
+    L2_UNKOWN_MULTICAST_FLOODING = 0,
+    L2_UNKOWN_MULTICAST_DROP,
+    L2_UNKOWN_MULTICAST_TRAP,
+    L2_UNKOWN_MULTICAST_DROP_EXCLUDE_RMA,
+    L2_UNKOWN_MULTICAST_END
+};
+
+enum L3_UNKOWN_MULTICAST_BEHAVE
+{
+    L3_UNKOWN_MULTICAST_FLOODING = 0,
+    L3_UNKOWN_MULTICAST_DROP,
+    L3_UNKOWN_MULTICAST_TRAP,
+    L3_UNKOWN_MULTICAST_ROUTER,
+    L3_UNKOWN_MULTICAST_END
+};
+
+enum MULTICASTTYPE{
+    MULTICAST_TYPE_IPV4 = 0,
+    MULTICAST_TYPE_IPV6,
+    MULTICAST_TYPE_L2,
+    MULTICAST_TYPE_END
+};
+
+extern ret_t rtl8367c_setAsicUnknownL2MulticastBehavior(rtk_uint32 port, rtk_uint32 behave);
+extern ret_t rtl8367c_getAsicUnknownL2MulticastBehavior(rtk_uint32 port, rtk_uint32 *pBehave);
+extern ret_t rtl8367c_setAsicUnknownIPv4MulticastBehavior(rtk_uint32 port, rtk_uint32 behave);
+extern ret_t rtl8367c_getAsicUnknownIPv4MulticastBehavior(rtk_uint32 port, rtk_uint32 *pBehave);
+extern ret_t rtl8367c_setAsicUnknownIPv6MulticastBehavior(rtk_uint32 port, rtk_uint32 behave);
+extern ret_t rtl8367c_getAsicUnknownIPv6MulticastBehavior(rtk_uint32 port, rtk_uint32 *pBehave);
+extern ret_t rtl8367c_setAsicUnknownMulticastTrapPriority(rtk_uint32 priority);
+extern ret_t rtl8367c_getAsicUnknownMulticastTrapPriority(rtk_uint32 *pPriority);
+
+#endif /*_RTL8367C_ASICDRV_UNKNOWNMULTICAST_H_*/
+
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_vlan.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_vlan.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_vlan.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_vlan.c	2019-01-22 16:16:24.719257418 +0100
@@ -0,0 +1,1507 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 76306 $
+ * $Date: 2017-03-08 15:13:58 +0800 (é€±ä¸‰, 08 ä¸‰æœˆ 2017) $
+ *
+ * Purpose : RTL8367C switch high-level API for RTL8367C
+ * Feature : VLAN related functions
+ *
+ */
+#include <rtl8367c_asicdrv_vlan.h>
+
+#include <string.h>
+
+#if defined(CONFIG_RTL8367C_ASICDRV_TEST)
+rtl8367c_user_vlan4kentry Rtl8370sVirtualVlanTable[RTL8367C_VIDMAX + 1];
+#endif
+
+static void _rtl8367c_VlanMCStUser2Smi(rtl8367c_vlanconfiguser *pVlanCg, rtk_uint16 *pSmiVlanCfg)
+{
+    pSmiVlanCfg[0] |= pVlanCg->mbr & 0x07FF;
+
+    pSmiVlanCfg[1] |= pVlanCg->fid_msti & 0x000F;
+
+    pSmiVlanCfg[2] |= pVlanCg->vbpen & 0x0001;
+    pSmiVlanCfg[2] |= (pVlanCg->vbpri & 0x0007) << 1;
+    pSmiVlanCfg[2] |= (pVlanCg->envlanpol & 0x0001) << 4;
+    pSmiVlanCfg[2] |= (pVlanCg->meteridx & 0x003F) << 5;
+
+    pSmiVlanCfg[3] |= pVlanCg->evid & 0x1FFF;
+}
+
+static void _rtl8367c_VlanMCStSmi2User(rtk_uint16 *pSmiVlanCfg, rtl8367c_vlanconfiguser *pVlanCg)
+{
+    pVlanCg->mbr            = pSmiVlanCfg[0] & 0x07FF;
+    pVlanCg->fid_msti       = pSmiVlanCfg[1] & 0x000F;
+    pVlanCg->meteridx       = (pSmiVlanCfg[2] >> 5) & 0x003F;
+    pVlanCg->envlanpol      = (pSmiVlanCfg[2] >> 4) & 0x0001;
+    pVlanCg->vbpri          = (pSmiVlanCfg[2] >> 1) & 0x0007;
+    pVlanCg->vbpen          = pSmiVlanCfg[2] & 0x0001;
+    pVlanCg->evid           = pSmiVlanCfg[3] & 0x1FFF;
+}
+
+static void _rtl8367c_Vlan4kStUser2Smi(rtl8367c_user_vlan4kentry *pUserVlan4kEntry, rtk_uint16 *pSmiVlan4kEntry)
+{
+    pSmiVlan4kEntry[0] |= (pUserVlan4kEntry->mbr & 0x00FF);
+    pSmiVlan4kEntry[0] |= (pUserVlan4kEntry->untag & 0x00FF) << 8;
+
+    pSmiVlan4kEntry[1] |= (pUserVlan4kEntry->fid_msti & 0x000F);
+    pSmiVlan4kEntry[1] |= (pUserVlan4kEntry->vbpen & 0x0001) << 4;
+    pSmiVlan4kEntry[1] |= (pUserVlan4kEntry->vbpri & 0x0007) << 5;
+    pSmiVlan4kEntry[1] |= (pUserVlan4kEntry->envlanpol & 0x0001) << 8;
+    pSmiVlan4kEntry[1] |= (pUserVlan4kEntry->meteridx & 0x001F) << 9;
+    pSmiVlan4kEntry[1] |= (pUserVlan4kEntry->ivl_svl & 0x0001) << 14;
+
+    pSmiVlan4kEntry[2] |= ((pUserVlan4kEntry->mbr & 0x0700) >> 8);
+    pSmiVlan4kEntry[2] |= ((pUserVlan4kEntry->untag & 0x0700) >> 8) << 3;
+    pSmiVlan4kEntry[2] |= ((pUserVlan4kEntry->meteridx & 0x0020) >> 5) << 6;
+}
+
+
+static void _rtl8367c_Vlan4kStSmi2User(rtk_uint16 *pSmiVlan4kEntry, rtl8367c_user_vlan4kentry *pUserVlan4kEntry)
+{
+    pUserVlan4kEntry->mbr = (pSmiVlan4kEntry[0] & 0x00FF) | ((pSmiVlan4kEntry[2] & 0x0007) << 8);
+    pUserVlan4kEntry->untag = ((pSmiVlan4kEntry[0] & 0xFF00) >> 8) | (((pSmiVlan4kEntry[2] & 0x0038) >> 3) << 8);
+    pUserVlan4kEntry->fid_msti = pSmiVlan4kEntry[1] & 0x000F;
+    pUserVlan4kEntry->vbpen = (pSmiVlan4kEntry[1] & 0x0010) >> 4;
+    pUserVlan4kEntry->vbpri = (pSmiVlan4kEntry[1] & 0x00E0) >> 5;
+    pUserVlan4kEntry->envlanpol = (pSmiVlan4kEntry[1] & 0x0100) >> 8;
+    pUserVlan4kEntry->meteridx = ((pSmiVlan4kEntry[1] & 0x3E00) >> 9) | (((pSmiVlan4kEntry[2] & 0x0040) >> 6) << 5);
+    pUserVlan4kEntry->ivl_svl = (pSmiVlan4kEntry[1] & 0x4000) >> 14;
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicVlanMemberConfig
+ * Description:
+ *      Set 32 VLAN member configurations
+ * Input:
+ *      index       - VLAN member configuration index (0~31)
+ *      pVlanCg - VLAN member configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - Success
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_INPUT                - Invalid input parameter
+ *      RT_ERR_L2_FID               - Invalid FID
+ *      RT_ERR_PORT_MASK            - Invalid portmask
+ *      RT_ERR_FILTER_METER_ID      - Invalid meter
+ *      RT_ERR_QOS_INT_PRIORITY     - Invalid priority
+ *      RT_ERR_VLAN_ENTRY_NOT_FOUND - Invalid VLAN member configuration index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicVlanMemberConfig(rtk_uint32 index, rtl8367c_vlanconfiguser *pVlanCg)
+{
+    ret_t  retVal;
+    rtk_uint32 regAddr;
+    rtk_uint32 regData;
+    rtk_uint16 *tableAddr;
+    rtk_uint32 page_idx;
+    rtk_uint16 smi_vlancfg[RTL8367C_VLAN_MBRCFG_LEN];
+
+    /* Error Checking  */
+    if(index > RTL8367C_CVIDXMAX)
+        return RT_ERR_VLAN_ENTRY_NOT_FOUND;
+
+    if(pVlanCg->evid > RTL8367C_EVIDMAX)
+        return RT_ERR_INPUT;
+
+
+    if(pVlanCg->mbr > RTL8367C_PORTMASK)
+        return RT_ERR_PORT_MASK;
+
+    if(pVlanCg->fid_msti > RTL8367C_FIDMAX)
+        return RT_ERR_L2_FID;
+
+    if(pVlanCg->meteridx > RTL8367C_METERMAX)
+        return RT_ERR_FILTER_METER_ID;
+
+    if(pVlanCg->vbpri > RTL8367C_PRIMAX)
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    memset(smi_vlancfg, 0x00, sizeof(rtk_uint16) * RTL8367C_VLAN_MBRCFG_LEN);
+    _rtl8367c_VlanMCStUser2Smi(pVlanCg, smi_vlancfg);
+    tableAddr = smi_vlancfg;
+
+    for(page_idx = 0; page_idx < 4; page_idx++)  /* 4 pages per VLAN Member Config */
+    {
+        regAddr = RTL8367C_VLAN_MEMBER_CONFIGURATION_BASE + (index * 4) + page_idx;
+        regData = *tableAddr;
+
+        retVal = rtl8367c_setAsicReg(regAddr, regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        tableAddr++;
+    }
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicVlanMemberConfig
+ * Description:
+ *      Get 32 VLAN member configurations
+ * Input:
+ *      index       - VLAN member configuration index (0~31)
+ *      pVlanCg - VLAN member configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - Success
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_INPUT                - Invalid input parameter
+ *      RT_ERR_VLAN_ENTRY_NOT_FOUND - Invalid VLAN member configuration index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicVlanMemberConfig(rtk_uint32 index, rtl8367c_vlanconfiguser *pVlanCg)
+{
+    ret_t  retVal;
+    rtk_uint32 page_idx;
+    rtk_uint32 regAddr;
+    rtk_uint32 regData;
+    rtk_uint16 *tableAddr;
+    rtk_uint16 smi_vlancfg[RTL8367C_VLAN_MBRCFG_LEN];
+
+    if(index > RTL8367C_CVIDXMAX)
+        return RT_ERR_VLAN_ENTRY_NOT_FOUND;
+
+    memset(smi_vlancfg, 0x00, sizeof(rtk_uint16) * RTL8367C_VLAN_MBRCFG_LEN);
+    tableAddr  = smi_vlancfg;
+
+    for(page_idx = 0; page_idx < 4; page_idx++)  /* 4 pages per VLAN Member Config */
+    {
+        regAddr = RTL8367C_VLAN_MEMBER_CONFIGURATION_BASE + (index * 4) + page_idx;
+
+        retVal = rtl8367c_getAsicReg(regAddr, &regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        *tableAddr = (rtk_uint16)regData;
+        tableAddr++;
+    }
+
+    _rtl8367c_VlanMCStSmi2User(smi_vlancfg, pVlanCg);
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicVlan4kEntry
+ * Description:
+ *      Set VID mapped entry to 4K VLAN table
+ * Input:
+ *      pVlan4kEntry - 4K VLAN configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - Success
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_INPUT                - Invalid input parameter
+ *      RT_ERR_L2_FID               - Invalid FID
+ *      RT_ERR_VLAN_VID             - Invalid VID parameter (0~4095)
+ *      RT_ERR_PORT_MASK            - Invalid portmask
+ *      RT_ERR_FILTER_METER_ID      - Invalid meter
+ *      RT_ERR_QOS_INT_PRIORITY     - Invalid priority
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicVlan4kEntry(rtl8367c_user_vlan4kentry *pVlan4kEntry )
+{
+    rtk_uint16              vlan_4k_entry[RTL8367C_VLAN_4KTABLE_LEN];
+    rtk_uint32                  page_idx;
+    rtk_uint16                  *tableAddr;
+    ret_t                   retVal;
+    rtk_uint32                  regData;
+
+    if(pVlan4kEntry->vid > RTL8367C_VIDMAX)
+        return RT_ERR_VLAN_VID;
+
+    if(pVlan4kEntry->mbr > RTL8367C_PORTMASK)
+        return RT_ERR_PORT_MASK;
+
+    if(pVlan4kEntry->untag > RTL8367C_PORTMASK)
+        return RT_ERR_PORT_MASK;
+
+    if(pVlan4kEntry->fid_msti > RTL8367C_FIDMAX)
+        return RT_ERR_L2_FID;
+
+    if(pVlan4kEntry->meteridx > RTL8367C_METERMAX)
+        return RT_ERR_FILTER_METER_ID;
+
+    if(pVlan4kEntry->vbpri > RTL8367C_PRIMAX)
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    memset(vlan_4k_entry, 0x00, sizeof(rtk_uint16) * RTL8367C_VLAN_4KTABLE_LEN);
+    _rtl8367c_Vlan4kStUser2Smi(pVlan4kEntry, vlan_4k_entry);
+
+    /* Prepare Data */
+    tableAddr = vlan_4k_entry;
+    for(page_idx = 0; page_idx < RTL8367C_VLAN_4KTABLE_LEN; page_idx++)
+    {
+        regData = *tableAddr;
+        retVal = rtl8367c_setAsicReg(RTL8367C_TABLE_ACCESS_WRDATA_BASE + page_idx, regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        tableAddr++;
+    }
+
+    /* Write Address (VLAN_ID) */
+    regData = pVlan4kEntry->vid;
+    retVal = rtl8367c_setAsicReg(RTL8367C_TABLE_ACCESS_ADDR_REG, regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    /* Write Command */
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_TABLE_ACCESS_CTRL_REG, RTL8367C_TABLE_TYPE_MASK | RTL8367C_COMMAND_TYPE_MASK,RTL8367C_TABLE_ACCESS_REG_DATA(TB_OP_WRITE,TB_TARGET_CVLAN));
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+#if defined(CONFIG_RTL8367C_ASICDRV_TEST)
+    memcpy(&Rtl8370sVirtualVlanTable[pVlan4kEntry->vid], pVlan4kEntry, sizeof(rtl8367c_user_vlan4kentry));
+#endif
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicVlan4kEntry
+ * Description:
+ *      Get VID mapped entry to 4K VLAN table
+ * Input:
+ *      pVlan4kEntry - 4K VLAN configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - Success
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_VLAN_VID         - Invalid VID parameter (0~4095)
+ *      RT_ERR_BUSYWAIT_TIMEOUT - LUT is busy at retrieving
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicVlan4kEntry(rtl8367c_user_vlan4kentry *pVlan4kEntry )
+{
+    rtk_uint16                  vlan_4k_entry[RTL8367C_VLAN_4KTABLE_LEN];
+    rtk_uint32                  page_idx;
+    rtk_uint16                  *tableAddr;
+    ret_t                       retVal;
+    rtk_uint32                  regData;
+    rtk_uint32                  busyCounter;
+
+    if(pVlan4kEntry->vid > RTL8367C_VIDMAX)
+        return RT_ERR_VLAN_VID;
+
+    /* Polling status */
+    busyCounter = RTL8367C_VLAN_BUSY_CHECK_NO;
+    while(busyCounter)
+    {
+        retVal = rtl8367c_getAsicRegBit(RTL8367C_TABLE_ACCESS_STATUS_REG, RTL8367C_TABLE_LUT_ADDR_BUSY_FLAG_OFFSET,&regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        if(regData == 0)
+            break;
+
+        busyCounter --;
+        if(busyCounter == 0)
+            return RT_ERR_BUSYWAIT_TIMEOUT;
+    }
+
+    /* Write Address (VLAN_ID) */
+    regData = pVlan4kEntry->vid;
+    retVal = rtl8367c_setAsicReg(RTL8367C_TABLE_ACCESS_ADDR_REG, regData);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    /* Read Command */
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_TABLE_ACCESS_CTRL_REG, RTL8367C_TABLE_TYPE_MASK | RTL8367C_COMMAND_TYPE_MASK, RTL8367C_TABLE_ACCESS_REG_DATA(TB_OP_READ,TB_TARGET_CVLAN));
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    /* Polling status */
+    busyCounter = RTL8367C_VLAN_BUSY_CHECK_NO;
+    while(busyCounter)
+    {
+        retVal = rtl8367c_getAsicRegBit(RTL8367C_TABLE_ACCESS_STATUS_REG, RTL8367C_TABLE_LUT_ADDR_BUSY_FLAG_OFFSET,&regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        if(regData == 0)
+            break;
+
+        busyCounter --;
+        if(busyCounter == 0)
+            return RT_ERR_BUSYWAIT_TIMEOUT;
+    }
+
+    /* Read VLAN data from register */
+    tableAddr = vlan_4k_entry;
+    for(page_idx = 0; page_idx < RTL8367C_VLAN_4KTABLE_LEN; page_idx++)
+    {
+        retVal = rtl8367c_getAsicReg(RTL8367C_TABLE_ACCESS_RDDATA_BASE + page_idx, &regData);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+
+        *tableAddr = regData;
+        tableAddr++;
+    }
+
+    _rtl8367c_Vlan4kStSmi2User(vlan_4k_entry, pVlan4kEntry);
+
+#if defined(CONFIG_RTL8367C_ASICDRV_TEST)
+    memcpy(pVlan4kEntry, &Rtl8370sVirtualVlanTable[pVlan4kEntry->vid], sizeof(rtl8367c_user_vlan4kentry));
+#endif
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicVlanAccpetFrameType
+ * Description:
+ *      Set per-port acceptable frame type
+ * Input:
+ *      port        - Physical port number (0~10)
+ *      frameType   - The acceptable frame type
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                       - Success
+ *      RT_ERR_SMI                      - SMI access error
+ *      RT_ERR_PORT_ID                  - Invalid port number
+ *      RT_ERR_VLAN_ACCEPT_FRAME_TYPE   - Invalid frame type
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicVlanAccpetFrameType(rtk_uint32 port, rtl8367c_accframetype frameType)
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(frameType >= FRAME_TYPE_MAX_BOUND)
+        return RT_ERR_VLAN_ACCEPT_FRAME_TYPE;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_VLAN_ACCEPT_FRAME_TYPE_REG(port), RTL8367C_VLAN_ACCEPT_FRAME_TYPE_MASK(port), frameType);
+}
+/* Function Name:
+ *      rtl8367c_getAsicVlanAccpetFrameType
+ * Description:
+ *      Get per-port acceptable frame type
+ * Input:
+ *      port        - Physical port number (0~10)
+ *      pFrameType  - The acceptable frame type
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                       - Success
+ *      RT_ERR_SMI                      - SMI access error
+ *      RT_ERR_PORT_ID                  - Invalid port number
+ *      RT_ERR_VLAN_ACCEPT_FRAME_TYPE   - Invalid frame type
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicVlanAccpetFrameType(rtk_uint32 port, rtl8367c_accframetype *pFrameType)
+{
+    rtk_uint32 regData;
+    ret_t  retVal;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if((retVal = rtl8367c_getAsicRegBits(RTL8367C_VLAN_ACCEPT_FRAME_TYPE_REG(port), RTL8367C_VLAN_ACCEPT_FRAME_TYPE_MASK(port), &regData)) != RT_ERR_OK)
+        return retVal;
+
+    *pFrameType = (rtl8367c_accframetype)regData;
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicVlanIngressFilter
+ * Description:
+ *      Set VLAN Ingress Filter
+ * Input:
+ *      port        - Physical port number (0~10)
+ *      enabled     - Enable or disable Ingress filter
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicVlanIngressFilter(rtk_uint32 port, rtk_uint32 enabled)
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_VLAN_INGRESS_REG, port, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicVlanIngressFilter
+ * Description:
+ *      Get VLAN Ingress Filter
+ * Input:
+ *      port        - Physical port number (0~10)
+ *      pEnable     - Enable or disable Ingress filter
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicVlanIngressFilter(rtk_uint32 port, rtk_uint32 *pEnable)
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_VLAN_INGRESS_REG, port, pEnable);
+}
+/* Function Name:
+ *      rtl8367c_setAsicVlanEgressTagMode
+ * Description:
+ *      Set CVLAN egress tag mode
+ * Input:
+ *      port        - Physical port number (0~10)
+ *      tagMode     - The egress tag mode. Including Original mode, Keep tag mode and Priority tag mode
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_INPUT    - Invalid input parameter
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicVlanEgressTagMode(rtk_uint32 port, rtl8367c_egtagmode tagMode)
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(tagMode >= EG_TAG_MODE_END)
+        return RT_ERR_INPUT;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_PORT_MISC_CFG_REG(port), RTL8367C_VLAN_EGRESS_MDOE_MASK, tagMode);
+}
+/* Function Name:
+ *      rtl8367c_getAsicVlanEgressTagMode
+ * Description:
+ *      Get CVLAN egress tag mode
+ * Input:
+ *      port        - Physical port number (0~10)
+ *      pTagMode    - The egress tag mode. Including Original mode, Keep tag mode and Priority tag mode
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicVlanEgressTagMode(rtk_uint32 port, rtl8367c_egtagmode *pTagMode)
+{
+    rtk_uint32 regData;
+    ret_t  retVal;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if((retVal = rtl8367c_getAsicRegBits(RTL8367C_PORT_MISC_CFG_REG(port), RTL8367C_VLAN_EGRESS_MDOE_MASK, &regData)) != RT_ERR_OK)
+        return retVal;
+
+    *pTagMode = (rtl8367c_egtagmode)regData;
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicVlanPortBasedVID
+ * Description:
+ *      Set port based VID which is indexed to 32 VLAN member configurations
+ * Input:
+ *      port    - Physical port number (0~10)
+ *      index   - Index to VLAN member configuration
+ *      pri     - 1Q Port based VLAN priority
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - Success
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port number
+ *      RT_ERR_QOS_INT_PRIORITY     - Invalid priority
+ *      RT_ERR_VLAN_ENTRY_NOT_FOUND - Invalid VLAN member configuration index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicVlanPortBasedVID(rtk_uint32 port, rtk_uint32 index, rtk_uint32 pri)
+{
+    rtk_uint32 regAddr, bit_mask;
+    ret_t  retVal;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(index > RTL8367C_CVIDXMAX)
+        return RT_ERR_VLAN_ENTRY_NOT_FOUND;
+
+    if(pri > RTL8367C_PRIMAX)
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    regAddr = RTL8367C_VLAN_PVID_CTRL_REG(port);
+    bit_mask = RTL8367C_PORT_VIDX_MASK(port);
+    retVal = rtl8367c_setAsicRegBits(regAddr, bit_mask, index);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    regAddr = RTL8367C_VLAN_PORTBASED_PRIORITY_REG(port);
+    bit_mask = RTL8367C_VLAN_PORTBASED_PRIORITY_MASK(port);
+    retVal = rtl8367c_setAsicRegBits(regAddr, bit_mask, pri);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicVlanPortBasedVID
+ * Description:
+ *      Get port based VID which is indexed to 32 VLAN member configurations
+ * Input:
+ *      port    - Physical port number (0~10)
+ *      pIndex  - Index to VLAN member configuration
+ *      pPri    - 1Q Port based VLAN priority
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicVlanPortBasedVID(rtk_uint32 port, rtk_uint32 *pIndex, rtk_uint32 *pPri)
+{
+    rtk_uint32 regAddr,bit_mask;
+    ret_t  retVal;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    regAddr = RTL8367C_VLAN_PVID_CTRL_REG(port);
+    bit_mask = RTL8367C_PORT_VIDX_MASK(port);
+    retVal = rtl8367c_getAsicRegBits(regAddr, bit_mask, pIndex);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    regAddr = RTL8367C_VLAN_PORTBASED_PRIORITY_REG(port);
+    bit_mask = RTL8367C_VLAN_PORTBASED_PRIORITY_MASK(port);
+    retVal = rtl8367c_getAsicRegBits(regAddr, bit_mask, pPri);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicVlanProtocolBasedGroupData
+ * Description:
+ *      Set protocol and port based group database
+ * Input:
+ *      index       - Index to VLAN member configuration
+ *      pPbCfg  - Protocol and port based group database entry
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - Success
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_INPUT                - Invalid input parameter
+ *      RT_ERR_VLAN_PROTO_AND_PORT  - Invalid protocol base group database index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicVlanProtocolBasedGroupData(rtk_uint32 index, rtl8367c_protocolgdatacfg *pPbCfg)
+{
+    rtk_uint32  frameType;
+    rtk_uint32  etherType;
+    ret_t   retVal;
+
+    /* Error Checking */
+    if(index > RTL8367C_PROTOVLAN_GIDX_MAX)
+        return RT_ERR_VLAN_PROTO_AND_PORT;
+
+    if(pPbCfg->frameType >= PPVLAN_FRAME_TYPE_END )
+        return RT_ERR_INPUT;
+
+    frameType = pPbCfg->frameType;
+    etherType = pPbCfg->etherType;
+
+    /* Frame type */
+    retVal = rtl8367c_setAsicRegBits(RTL8367C_VLAN_PPB_FRAMETYPE_REG(index), RTL8367C_VLAN_PPB_FRAMETYPE_MASK, frameType);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    /* Ether type */
+    retVal = rtl8367c_setAsicReg(RTL8367C_VLAN_PPB_ETHERTYPR_REG(index), etherType);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicVlanProtocolBasedGroupData
+ * Description:
+ *      Get protocol and port based group database
+ * Input:
+ *      index       - Index to VLAN member configuration
+ *      pPbCfg  - Protocol and port based group database entry
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - Success
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_INPUT                - Invalid input parameter
+ *      RT_ERR_VLAN_PROTO_AND_PORT  - Invalid protocol base group database index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicVlanProtocolBasedGroupData(rtk_uint32 index, rtl8367c_protocolgdatacfg *pPbCfg)
+{
+    rtk_uint32  frameType;
+    rtk_uint32  etherType;
+    ret_t   retVal;
+
+    /* Error Checking */
+    if(index > RTL8367C_PROTOVLAN_GIDX_MAX)
+        return RT_ERR_VLAN_PROTO_AND_PORT;
+
+    /* Read Frame type */
+    retVal = rtl8367c_getAsicRegBits(RTL8367C_VLAN_PPB_FRAMETYPE_REG(index), RTL8367C_VLAN_PPB_FRAMETYPE_MASK, &frameType);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    /* Read Ether type */
+    retVal = rtl8367c_getAsicReg(RTL8367C_VLAN_PPB_ETHERTYPR_REG(index), &etherType);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+
+    pPbCfg->frameType = frameType;
+    pPbCfg->etherType = etherType;
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicVlanPortAndProtocolBased
+ * Description:
+ *      Set protocol and port based VLAN configuration
+ * Input:
+ *      port        - Physical port number (0~10)
+ *      index       - Index of protocol and port based database index
+ *      pPpbCfg     - Protocol and port based VLAN configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - Success
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_INPUT                - Invalid input parameter
+ *      RT_ERR_PORT_ID              - Invalid port number
+ *      RT_ERR_QOS_INT_PRIORITY     - Invalid priority
+ *      RT_ERR_VLAN_PROTO_AND_PORT  - Invalid protocol base group database index
+ *      RT_ERR_VLAN_ENTRY_NOT_FOUND - Invalid VLAN member configuration index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicVlanPortAndProtocolBased(rtk_uint32 port, rtk_uint32 index, rtl8367c_protocolvlancfg *pPpbCfg)
+{
+    rtk_uint32  reg_addr, bit_mask, bit_value;
+    ret_t   retVal;
+
+    /* Error Checking */
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(index > RTL8367C_PROTOVLAN_GIDX_MAX)
+        return RT_ERR_VLAN_PROTO_AND_PORT;
+
+    if( (pPpbCfg->valid != FALSE) && (pPpbCfg->valid != TRUE) )
+        return RT_ERR_INPUT;
+
+    if(pPpbCfg->vlan_idx > RTL8367C_CVIDXMAX)
+        return RT_ERR_VLAN_ENTRY_NOT_FOUND;
+
+    if(pPpbCfg->priority > RTL8367C_PRIMAX)
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    /* Valid bit */
+    reg_addr  = RTL8367C_VLAN_PPB_VALID_REG(index);
+    bit_mask  = 0x0001 << port;
+    bit_value = ((TRUE == pPpbCfg->valid) ? 0x1 : 0x0);
+    retVal    = rtl8367c_setAsicRegBits(reg_addr, bit_mask, bit_value);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    /* Calculate the actual register address for CVLAN index*/
+    if(port < 8)
+    {
+        reg_addr = RTL8367C_VLAN_PPB_CTRL_REG(index, port);
+        bit_mask = RTL8367C_VLAN_PPB_CTRL_MASK(port);
+    }
+    else if(port == 8)
+    {
+        reg_addr = RTL8367C_REG_VLAN_PPB0_CTRL4;
+        bit_mask = RTL8367C_VLAN_PPB0_CTRL4_PORT8_INDEX_MASK;
+    }
+    else if(port == 9)
+    {
+        reg_addr = RTL8367C_REG_VLAN_PPB0_CTRL4;
+        bit_mask = RTL8367C_VLAN_PPB0_CTRL4_PORT9_INDEX_MASK;
+    }
+    else if(port == 10)
+    {
+        reg_addr = RTL8367C_REG_VLAN_PPB0_CTRL4;
+        bit_mask = RTL8367C_VLAN_PPB0_CTRL4_PORT10_INDEX_MASK;
+    }
+
+    bit_value = pPpbCfg->vlan_idx;
+    retVal  = rtl8367c_setAsicRegBits(reg_addr, bit_mask, bit_value);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    /* write priority */
+    reg_addr  = RTL8367C_VLAN_PPB_PRIORITY_ITEM_REG(port, index);
+    bit_mask  = RTL8367C_VLAN_PPB_PRIORITY_ITEM_MASK(port);
+    bit_value = pPpbCfg->priority;
+    retVal    = rtl8367c_setAsicRegBits(reg_addr, bit_mask, bit_value);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_getAsicVlanPortAndProtocolBased
+ * Description:
+ *      Get protocol and port based VLAN configuration
+ * Input:
+ *      port        - Physical port number (0~7)
+ *      index       - Index of protocol and port based database index
+ *      pPpbCfg     - Protocol and port based VLAN configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - Success
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_INPUT                - Invalid input parameter
+ *      RT_ERR_PORT_ID              - Invalid port number
+ *      RT_ERR_VLAN_PROTO_AND_PORT  - Invalid protocol base group database index
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicVlanPortAndProtocolBased(rtk_uint32 port, rtk_uint32 index, rtl8367c_protocolvlancfg *pPpbCfg)
+{
+    rtk_uint32  reg_addr, bit_mask, bit_value;
+    ret_t   retVal;
+
+    /* Error Checking */
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(index > RTL8367C_PROTOVLAN_GIDX_MAX)
+        return RT_ERR_VLAN_PROTO_AND_PORT;
+
+    if(pPpbCfg == NULL)
+        return RT_ERR_INPUT;
+
+    /* Valid bit */
+    reg_addr  = RTL8367C_VLAN_PPB_VALID_REG(index);
+    bit_mask  = 0x0001 << port;
+    retVal    = rtl8367c_getAsicRegBits(reg_addr, bit_mask, &bit_value);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    pPpbCfg->valid = bit_value;
+
+    /* CVLAN index */
+    if(port < 8)
+    {
+        reg_addr = RTL8367C_VLAN_PPB_CTRL_REG(index, port);
+        bit_mask = RTL8367C_VLAN_PPB_CTRL_MASK(port);
+    }
+    else if(port == 8)
+    {
+        reg_addr = RTL8367C_REG_VLAN_PPB0_CTRL4;
+        bit_mask = RTL8367C_VLAN_PPB0_CTRL4_PORT8_INDEX_MASK;
+    }
+    else if(port == 9)
+    {
+        reg_addr = RTL8367C_REG_VLAN_PPB0_CTRL4;
+        bit_mask = RTL8367C_VLAN_PPB0_CTRL4_PORT9_INDEX_MASK;
+    }
+    else if(port == 10)
+    {
+        reg_addr = RTL8367C_REG_VLAN_PPB0_CTRL4;
+        bit_mask = RTL8367C_VLAN_PPB0_CTRL4_PORT10_INDEX_MASK;
+    }
+
+    retVal = rtl8367c_getAsicRegBits(reg_addr, bit_mask, &bit_value);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    pPpbCfg->vlan_idx = bit_value;
+
+
+    /* priority */
+    reg_addr = RTL8367C_VLAN_PPB_PRIORITY_ITEM_REG(port,index);
+    bit_mask = RTL8367C_VLAN_PPB_PRIORITY_ITEM_MASK(port);
+    retVal = rtl8367c_getAsicRegBits(reg_addr, bit_mask, &bit_value);
+    if(retVal != RT_ERR_OK)
+        return retVal;
+
+    pPpbCfg->priority = bit_value;
+    return RT_ERR_OK;
+}
+/* Function Name:
+ *      rtl8367c_setAsicVlanFilter
+ * Description:
+ *      Set enable CVLAN filtering function
+ * Input:
+ *      enabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicVlanFilter(rtk_uint32 enabled)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_VLAN_CTRL, RTL8367C_VLAN_CTRL_OFFSET, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicVlanFilter
+ * Description:
+ *      Get enable CVLAN filtering function
+ * Input:
+ *      pEnabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicVlanFilter(rtk_uint32* pEnabled)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_VLAN_CTRL, RTL8367C_VLAN_CTRL_OFFSET, pEnabled);
+}
+/* Function Name:
+ *      rtl8367c_setAsicVlanUntagDscpPriorityEn
+ * Description:
+ *      Set enable Dscp to untag 1Q priority
+ * Input:
+ *      enabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicVlanUntagDscpPriorityEn(rtk_uint32 enabled)
+{
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_UNTAG_DSCP_PRI_CFG, RTL8367C_UNTAG_DSCP_PRI_CFG_OFFSET, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicVlanUntagDscpPriorityEn
+ * Description:
+ *      Get enable Dscp to untag 1Q priority
+ * Input:
+ *      enabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicVlanUntagDscpPriorityEn(rtk_uint32* enabled)
+{
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_UNTAG_DSCP_PRI_CFG, RTL8367C_UNTAG_DSCP_PRI_CFG_OFFSET, enabled);
+}
+/* Function Name:
+ *      rtl8367c_setAsicPortBasedFid
+ * Description:
+ *      Set port based FID
+ * Input:
+ *      port    - Physical port number (0~10)
+ *      fid     - Port based fid
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_L2_FID   - Invalid FID
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortBasedFid(rtk_uint32 port, rtk_uint32 fid)
+{
+    rtk_uint32  reg_addr;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(fid > RTL8367C_FIDMAX)
+        return RT_ERR_L2_FID;
+
+    if(port < 8)
+        return rtl8367c_setAsicReg(RTL8367C_PORT_PBFID_REG(port),fid);
+    else {
+        reg_addr = RTL8367C_REG_PORT8_PBFID + port-8;
+        return rtl8367c_setAsicReg(reg_addr, fid);
+    }
+
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortBasedFid
+ * Description:
+ *      Get port based FID
+ * Input:
+ *      port    - Physical port number (0~7)
+ *      pFid    - Port based fid
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortBasedFid(rtk_uint32 port, rtk_uint32* pFid)
+{
+    rtk_uint32  reg_addr;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(port < 8)
+        return rtl8367c_getAsicReg(RTL8367C_PORT_PBFID_REG(port), pFid);
+    else{
+        reg_addr = RTL8367C_REG_PORT8_PBFID + port-8;
+        return rtl8367c_getAsicReg(reg_addr, pFid);
+    }
+}
+/* Function Name:
+ *      rtl8367c_setAsicPortBasedFidEn
+ * Description:
+ *      Set port based FID selection enable
+ * Input:
+ *      port    - Physical port number (0~10)
+ *      enabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicPortBasedFidEn(rtk_uint32 port, rtk_uint32 enabled)
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_setAsicRegBit(RTL8367C_REG_PORT_PBFIDEN,port, enabled);
+}
+/* Function Name:
+ *      rtl8367c_getAsicPortBasedFidEn
+ * Description:
+ *      Get port based FID selection enable
+ * Input:
+ *      port    - Physical port number (0~10)
+ *      pEnabled - 1: enabled, 0: disabled
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicPortBasedFidEn(rtk_uint32 port, rtk_uint32* pEnabled)
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBit(RTL8367C_REG_PORT_PBFIDEN,port, pEnabled);
+}
+/* Function Name:
+ *      rtl8367c_setAsicSpanningTreeStatus
+ * Description:
+ *      Set spanning tree state per each port
+ * Input:
+ *      port    - Physical port number (0~10)
+ *      msti    - Multiple spanning tree instance
+ *      state   - Spanning tree state for msti
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_MSTI         - Invalid msti parameter
+ *      RT_ERR_PORT_ID      - Invalid port number
+ *      RT_ERR_MSTP_STATE   - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicSpanningTreeStatus(rtk_uint32 port, rtk_uint32 msti, rtk_uint32 state)
+{
+    rtk_uint32  reg_addr,bits_msk;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(msti > RTL8367C_MSTIMAX)
+        return RT_ERR_MSTI;
+
+    if(state > STPST_FORWARDING)
+        return RT_ERR_MSTP_STATE;
+
+    if(port < 8)
+        return rtl8367c_setAsicRegBits(RTL8367C_VLAN_MSTI_REG(msti,port), RTL8367C_VLAN_MSTI_MASK(port),state);
+    else{
+        reg_addr = RTL8367C_VLAN_MSTI_REG(msti,port);
+        switch(port){
+            case 8: bits_msk = RTL8367C_VLAN_MSTI0_CTRL1_PORT8_STATE_MASK;break;
+            case 9: bits_msk = RTL8367C_VLAN_MSTI0_CTRL1_PORT9_STATE_MASK;break;
+            case 10: bits_msk = RTL8367C_VLAN_MSTI0_CTRL1_PORT10_STATE_MASK;break;
+        }
+        return rtl8367c_setAsicRegBits(reg_addr, bits_msk,state);
+    }
+}
+/* Function Name:
+ *      rtl8367c_getAsicSpanningTreeStatus
+ * Description:
+ *      Set spanning tree state per each port
+ * Input:
+ *      port    - Physical port number (0~10)
+ *      msti    - Multiple spanning tree instance
+ *      pState  - Spanning tree state for msti
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_MSTI         - Invalid msti parameter
+ *      RT_ERR_PORT_ID      - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicSpanningTreeStatus(rtk_uint32 port, rtk_uint32 msti, rtk_uint32* pState)
+{
+    rtk_uint32  reg_addr,bits_msk;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(msti > RTL8367C_MSTIMAX)
+        return RT_ERR_MSTI;
+
+    if(port < 8)
+        return rtl8367c_getAsicRegBits(RTL8367C_VLAN_MSTI_REG(msti,port), RTL8367C_VLAN_MSTI_MASK(port), pState);
+    else{
+        reg_addr = RTL8367C_VLAN_MSTI_REG(msti,port);
+        switch(port){
+            case 8: bits_msk = RTL8367C_VLAN_MSTI0_CTRL1_PORT8_STATE_MASK;break;
+            case 9: bits_msk = RTL8367C_VLAN_MSTI0_CTRL1_PORT9_STATE_MASK;break;
+            case 10: bits_msk = RTL8367C_VLAN_MSTI0_CTRL1_PORT10_STATE_MASK;break;
+        }
+        return rtl8367c_getAsicRegBits(reg_addr, bits_msk, pState);
+    }
+
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicVlanTransparent
+ * Description:
+ *      Set VLAN transparent
+ * Input:
+ *      port        - Physical port number (0~10)
+ *      portmask    - portmask(0~0xFF)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_MASK    - Invalid portmask
+ *      RT_ERR_PORT_ID      - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicVlanTransparent(rtk_uint32 port, rtk_uint32 portmask)
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(portmask > RTL8367C_PORTMASK)
+        return RT_ERR_PORT_MASK;
+
+    return rtl8367c_setAsicRegBits(RTL8367C_REG_VLAN_EGRESS_TRANS_CTRL0 + port, RTL8367C_VLAN_EGRESS_TRANS_CTRL0_MASK, portmask);
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicVlanTransparent
+ * Description:
+ *      Get VLAN transparent
+ * Input:
+ *      port        - Physical port number (0~10)
+ * Output:
+ *      pPortmask   - Ingress port mask
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_MASK    - Invalid portmask
+ *      RT_ERR_PORT_ID      - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicVlanTransparent(rtk_uint32 port, rtk_uint32 *pPortmask)
+{
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    return rtl8367c_getAsicRegBits(RTL8367C_REG_VLAN_EGRESS_TRANS_CTRL0 + port, RTL8367C_VLAN_EGRESS_TRANS_CTRL0_MASK, pPortmask);
+}
+
+/* Function Name:
+ *      rtl8367c_setAsicVlanEgressKeep
+ * Description:
+ *      Set per egress port VLAN keep mode
+ * Input:
+ *      port        - Physical port number (0~10)
+ *      portmask    - portmask(0~0xFF)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_MASK    - Invalid portmask
+ *      RT_ERR_PORT_ID      - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setAsicVlanEgressKeep(rtk_uint32 port, rtk_uint32 portmask)
+{
+    rtk_uint32 regAddr, bit_mask;
+    ret_t  retVal;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(portmask > RTL8367C_PORTMASK)
+        return RT_ERR_PORT_MASK;
+
+    if(port < 8){
+        retVal = rtl8367c_setAsicRegBits(RTL8367C_REG_VLAN_EGRESS_KEEP_CTRL0 + (port>>1),RTL8367C_PORT0_VLAN_KEEP_MASK_MASK<<((port&1)*8),portmask & 0xff);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+        regAddr = RTL8367C_REG_VLAN_EGRESS_KEEP_CTRL0_EXT + (port>>1);
+        bit_mask = RTL8367C_PORT0_VLAN_KEEP_MASK_EXT_MASK;
+        bit_mask <<= (port&1)*3;
+        retVal = rtl8367c_setAsicRegBits(regAddr, bit_mask, (portmask>>8)&0x7);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+    }
+    else{
+        switch(port){
+            case 8:
+                regAddr = RTL8367C_REG_VLAN_EGRESS_KEEP_CTRL4;
+                bit_mask = RTL8367C_PORT8_VLAN_KEEP_MASK_MASK;
+                retVal = rtl8367c_setAsicRegBits(regAddr, bit_mask, portmask & 0xff);
+                if(retVal != RT_ERR_OK)
+                    return retVal;
+                regAddr = RTL8367C_REG_VLAN_EGRESS_KEEP_CTRL4_EXT;
+                bit_mask = RTL8367C_PORT8_VLAN_KEEP_MASK_EXT_MASK;
+                retVal = rtl8367c_setAsicRegBits(regAddr, bit_mask, (portmask>>8)&0x7);
+                if(retVal != RT_ERR_OK)
+                    return retVal;
+                break;
+
+            case 9:
+                regAddr = RTL8367C_REG_VLAN_EGRESS_KEEP_CTRL4;
+                bit_mask = RTL8367C_PORT9_VLAN_KEEP_MASK_MASK;
+                retVal = rtl8367c_setAsicRegBits(regAddr, bit_mask, portmask & 0xff);
+                if(retVal != RT_ERR_OK)
+                    return retVal;
+                regAddr = RTL8367C_REG_VLAN_EGRESS_KEEP_CTRL4_EXT;
+                bit_mask = RTL8367C_PORT9_VLAN_KEEP_MASK_EXT_MASK;
+                retVal = rtl8367c_setAsicRegBits(regAddr, bit_mask, (portmask>>8)&0x7);
+                if(retVal != RT_ERR_OK)
+                    return retVal;
+                break;
+
+            case 10:
+                regAddr = RTL8367C_REG_VLAN_EGRESS_KEEP_CTRL5;
+                bit_mask = RTL8367C_VLAN_EGRESS_KEEP_CTRL5_MASK;
+                retVal = rtl8367c_setAsicRegBits(regAddr, bit_mask, portmask & 0xff);
+                if(retVal != RT_ERR_OK)
+                    return retVal;
+                regAddr = RTL8367C_REG_VLAN_EGRESS_KEEP_CTRL5_EXT;
+                bit_mask = RTL8367C_VLAN_EGRESS_KEEP_CTRL5_EXT_MASK;
+                retVal = rtl8367c_setAsicRegBits(regAddr, bit_mask, (portmask>>8)&0x7);
+                if(retVal != RT_ERR_OK)
+                    return retVal;
+                break;
+        }
+    }
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getAsicVlanEgressKeep
+ * Description:
+ *      Get per egress port VLAN keep mode
+ * Input:
+ *      port        - Physical port number (0~7)
+ *      pPortmask   - portmask(0~0xFF)
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_PORT_ID  - Invalid port number
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getAsicVlanEgressKeep(rtk_uint32 port, rtk_uint32* pPortmask)
+{
+    rtk_uint32 regAddr, bit_mask, regval_l, regval_h;
+    ret_t  retVal;
+
+    if(port > RTL8367C_PORTIDMAX)
+        return RT_ERR_PORT_ID;
+
+    if(port < 8){
+        retVal = rtl8367c_getAsicRegBits(RTL8367C_REG_VLAN_EGRESS_KEEP_CTRL0 + (port>>1),RTL8367C_PORT0_VLAN_KEEP_MASK_MASK<<((port&1)*8),&regval_l);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+        regAddr = RTL8367C_REG_VLAN_EGRESS_KEEP_CTRL0_EXT + (port>>1);
+        bit_mask = RTL8367C_PORT0_VLAN_KEEP_MASK_EXT_MASK;
+        bit_mask <<= (port&1)*3;
+        retVal = rtl8367c_getAsicRegBits(regAddr, bit_mask, &regval_h);
+        if(retVal != RT_ERR_OK)
+            return retVal;
+        *pPortmask = (regval_h << 8) | regval_l;
+    }
+    else{
+        switch(port){
+            case 8:
+                regAddr = RTL8367C_REG_VLAN_EGRESS_KEEP_CTRL4;
+                bit_mask = RTL8367C_PORT8_VLAN_KEEP_MASK_MASK;
+                retVal = rtl8367c_getAsicRegBits(regAddr, bit_mask, &regval_l);
+                if(retVal != RT_ERR_OK)
+                    return retVal;
+                regAddr = RTL8367C_REG_VLAN_EGRESS_KEEP_CTRL4_EXT;
+                bit_mask = RTL8367C_PORT8_VLAN_KEEP_MASK_EXT_MASK;
+                retVal = rtl8367c_getAsicRegBits(regAddr, bit_mask, &regval_h);
+                if(retVal != RT_ERR_OK)
+                    return retVal;
+
+                *pPortmask = (regval_h << 8) | regval_l;
+                break;
+
+            case 9:
+                regAddr = RTL8367C_REG_VLAN_EGRESS_KEEP_CTRL4;
+                bit_mask = RTL8367C_PORT9_VLAN_KEEP_MASK_MASK;
+                retVal = rtl8367c_getAsicRegBits(regAddr, bit_mask, &regval_l);
+                if(retVal != RT_ERR_OK)
+                    return retVal;
+                regAddr = RTL8367C_REG_VLAN_EGRESS_KEEP_CTRL4_EXT;
+                bit_mask = RTL8367C_PORT9_VLAN_KEEP_MASK_EXT_MASK;
+                retVal = rtl8367c_getAsicRegBits(regAddr, bit_mask, &regval_h);
+                if(retVal != RT_ERR_OK)
+                    return retVal;
+
+                *pPortmask = (regval_h << 8) | regval_l;
+                break;
+
+            case 10:
+                regAddr = RTL8367C_REG_VLAN_EGRESS_KEEP_CTRL5;
+                bit_mask = RTL8367C_VLAN_EGRESS_KEEP_CTRL5_MASK;
+                retVal = rtl8367c_getAsicRegBits(regAddr, bit_mask, &regval_l);
+                if(retVal != RT_ERR_OK)
+                    return retVal;
+                regAddr = RTL8367C_REG_VLAN_EGRESS_KEEP_CTRL5_EXT;
+                bit_mask = RTL8367C_VLAN_EGRESS_KEEP_CTRL5_EXT_MASK;
+                retVal = rtl8367c_getAsicRegBits(regAddr, bit_mask, &regval_h);
+                if(retVal != RT_ERR_OK)
+                    return retVal;
+
+                *pPortmask = (regval_h << 8) | regval_l;
+                break;
+        }
+    }
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_setReservedVidAction
+ * Description:
+ *      Set reserved VID action
+ * Input:
+ *      vid0Action      - VID 0 action
+ *      vid4095Action   - VID 4095 action
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_INPUT    - Error input
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setReservedVidAction(rtk_uint32 vid0Action, rtk_uint32 vid4095Action)
+{
+    ret_t   retVal;
+
+    if(vid0Action >= RES_VID_ACT_END)
+        return RT_ERR_INPUT;
+
+    if(vid4095Action >= RES_VID_ACT_END)
+        return RT_ERR_INPUT;
+
+    if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_VLAN_EXT_CTRL, RTL8367C_VLAN_VID0_TYPE_OFFSET, vid0Action)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_VLAN_EXT_CTRL, RTL8367C_VLAN_VID4095_TYPE_OFFSET, vid4095Action)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getReservedVidAction
+ * Description:
+ *      Get reserved VID action
+ * Input:
+ *      pVid0Action     - VID 0 action
+ *      pVid4095Action  - VID 4095 action
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - Success
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_NULL_POINTER - Null pointer
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getReservedVidAction(rtk_uint32 *pVid0Action, rtk_uint32 *pVid4095Action)
+{
+    ret_t   retVal;
+
+    if(pVid0Action == NULL)
+        return RT_ERR_NULL_POINTER;
+
+    if(pVid4095Action == NULL)
+        return RT_ERR_NULL_POINTER;
+
+    if((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_VLAN_EXT_CTRL, RTL8367C_VLAN_VID0_TYPE_OFFSET, pVid0Action)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_VLAN_EXT_CTRL, RTL8367C_VLAN_VID4095_TYPE_OFFSET, pVid4095Action)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+
+}
+
+/* Function Name:
+ *      rtl8367c_setRealKeepRemarkEn
+ * Description:
+ *      Set Real Keep Remark
+ * Input:
+ *      enabled         - 0: 1P remarking is forbidden at real keep packet, 1: 1P remarking is enabled at real keep packet
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_INPUT    - Error input
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_setRealKeepRemarkEn(rtk_uint32 enabled)
+{
+    ret_t   retVal;
+
+    if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_VLAN_EXT_CTRL, RTL8367C_VLAN_1P_REMARK_BYPASS_REALKEEP_OFFSET, enabled)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_getRealKeepRemarkEn
+ * Description:
+ *      Get Real Keep Remark
+ * Input:
+ *      None
+ * Output:
+ *      pEnabled        - 0: 1P remarking is forbidden at real keep packet, 1: 1P remarking is enabled at real keep packet
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ *      RT_ERR_INPUT    - Error input
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_getRealKeepRemarkEn(rtk_uint32 *pEnabled)
+{
+    ret_t   retVal;
+
+    if((retVal = rtl8367c_getAsicRegBit(RTL8367C_REG_VLAN_EXT_CTRL, RTL8367C_VLAN_1P_REMARK_BYPASS_REALKEEP_OFFSET, pEnabled)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtl8367c_resetVlan
+ * Description:
+ *      Reset VLAN table
+ * Input:
+ *      None.
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK       - Success
+ *      RT_ERR_SMI      - SMI access error
+ * Note:
+ *      None
+ */
+ret_t rtl8367c_resetVlan(void)
+{
+    ret_t   retVal;
+
+    if((retVal = rtl8367c_setAsicRegBit(RTL8367C_REG_VLAN_EXT_CTRL2, RTL8367C_VLAN_EXT_CTRL2_OFFSET, 1)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_vlan.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_vlan.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_asicdrv_vlan.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_asicdrv_vlan.h	2019-01-22 16:16:24.719257418 +0100
@@ -0,0 +1,139 @@
+#ifndef _RTL8367C_ASICDRV_VLAN_H_
+#define _RTL8367C_ASICDRV_VLAN_H_
+
+/****************************************************************/
+/* Header File inclusion                                        */
+/****************************************************************/
+#include <rtl8367c_asicdrv.h>
+
+/****************************************************************/
+/* Constant Definition                                          */
+/****************************************************************/
+#define RTL8367C_PROTOVLAN_GIDX_MAX 3
+#define RTL8367C_PROTOVLAN_GROUPNO  4
+
+#define RTL8367C_VLAN_BUSY_CHECK_NO     (10)
+
+#define RTL8367C_VLAN_MBRCFG_LEN    (4)
+#define RTL8367C_VLAN_4KTABLE_LEN   (3)
+
+/****************************************************************/
+/* Type Definition                                              */
+/****************************************************************/
+typedef struct  VLANCONFIGUSER
+{
+    rtk_uint16  evid;
+    rtk_uint16  mbr;
+    rtk_uint16  fid_msti;
+    rtk_uint16  envlanpol;
+    rtk_uint16  meteridx;
+    rtk_uint16  vbpen;
+    rtk_uint16  vbpri;
+}rtl8367c_vlanconfiguser;
+
+typedef struct  USER_VLANTABLE{
+
+    rtk_uint16  vid;
+    rtk_uint16  mbr;
+    rtk_uint16  untag;
+    rtk_uint16  fid_msti;
+    rtk_uint16  envlanpol;
+    rtk_uint16  meteridx;
+    rtk_uint16  vbpen;
+    rtk_uint16  vbpri;
+    rtk_uint16  ivl_svl;
+
+}rtl8367c_user_vlan4kentry;
+
+typedef enum
+{
+    FRAME_TYPE_BOTH = 0,
+    FRAME_TYPE_TAGGED_ONLY,
+    FRAME_TYPE_UNTAGGED_ONLY,
+    FRAME_TYPE_MAX_BOUND
+} rtl8367c_accframetype;
+
+typedef enum
+{
+    EG_TAG_MODE_ORI = 0,
+    EG_TAG_MODE_KEEP,
+    EG_TAG_MODE_PRI_TAG,
+    EG_TAG_MODE_REAL_KEEP,
+    EG_TAG_MODE_END
+} rtl8367c_egtagmode;
+
+typedef enum
+{
+    PPVLAN_FRAME_TYPE_ETHERNET = 0,
+    PPVLAN_FRAME_TYPE_LLC,
+    PPVLAN_FRAME_TYPE_RFC1042,
+    PPVLAN_FRAME_TYPE_END
+} rtl8367c_provlan_frametype;
+
+enum RTL8367C_STPST
+{
+    STPST_DISABLED = 0,
+    STPST_BLOCKING,
+    STPST_LEARNING,
+    STPST_FORWARDING
+};
+
+enum RTL8367C_RESVIDACT
+{
+    RES_VID_ACT_UNTAG = 0,
+    RES_VID_ACT_TAG,
+    RES_VID_ACT_END
+};
+
+typedef struct
+{
+    rtl8367c_provlan_frametype  frameType;
+    rtk_uint32                      etherType;
+} rtl8367c_protocolgdatacfg;
+
+typedef struct
+{
+    rtk_uint32 valid;
+    rtk_uint32 vlan_idx;
+    rtk_uint32 priority;
+} rtl8367c_protocolvlancfg;
+
+extern ret_t rtl8367c_setAsicVlanMemberConfig(rtk_uint32 index, rtl8367c_vlanconfiguser *pVlanCg);
+extern ret_t rtl8367c_getAsicVlanMemberConfig(rtk_uint32 index, rtl8367c_vlanconfiguser *pVlanCg);
+extern ret_t rtl8367c_setAsicVlan4kEntry(rtl8367c_user_vlan4kentry *pVlan4kEntry );
+extern ret_t rtl8367c_getAsicVlan4kEntry(rtl8367c_user_vlan4kentry *pVlan4kEntry );
+extern ret_t rtl8367c_setAsicVlanAccpetFrameType(rtk_uint32 port, rtl8367c_accframetype frameType);
+extern ret_t rtl8367c_getAsicVlanAccpetFrameType(rtk_uint32 port, rtl8367c_accframetype *pFrameType);
+extern ret_t rtl8367c_setAsicVlanIngressFilter(rtk_uint32 port, rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicVlanIngressFilter(rtk_uint32 port, rtk_uint32 *pEnable);
+extern ret_t rtl8367c_setAsicVlanEgressTagMode(rtk_uint32 port, rtl8367c_egtagmode tagMode);
+extern ret_t rtl8367c_getAsicVlanEgressTagMode(rtk_uint32 port, rtl8367c_egtagmode *pTagMode);
+extern ret_t rtl8367c_setAsicVlanPortBasedVID(rtk_uint32 port, rtk_uint32 index, rtk_uint32 pri);
+extern ret_t rtl8367c_getAsicVlanPortBasedVID(rtk_uint32 port, rtk_uint32 *pIndex, rtk_uint32 *pPri);
+extern ret_t rtl8367c_setAsicVlanProtocolBasedGroupData(rtk_uint32 index, rtl8367c_protocolgdatacfg *pPbCfg);
+extern ret_t rtl8367c_getAsicVlanProtocolBasedGroupData(rtk_uint32 index, rtl8367c_protocolgdatacfg *pPbCfg);
+extern ret_t rtl8367c_setAsicVlanPortAndProtocolBased(rtk_uint32 port, rtk_uint32 index, rtl8367c_protocolvlancfg *pPpbCfg);
+extern ret_t rtl8367c_getAsicVlanPortAndProtocolBased(rtk_uint32 port, rtk_uint32 index, rtl8367c_protocolvlancfg *pPpbCfg);
+extern ret_t rtl8367c_setAsicVlanFilter(rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicVlanFilter(rtk_uint32* pEnabled);
+
+extern ret_t rtl8367c_setAsicPortBasedFid(rtk_uint32 port, rtk_uint32 fid);
+extern ret_t rtl8367c_getAsicPortBasedFid(rtk_uint32 port, rtk_uint32* pFid);
+extern ret_t rtl8367c_setAsicPortBasedFidEn(rtk_uint32 port, rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicPortBasedFidEn(rtk_uint32 port, rtk_uint32* pEnabled);
+extern ret_t rtl8367c_setAsicSpanningTreeStatus(rtk_uint32 port, rtk_uint32 msti, rtk_uint32 state);
+extern ret_t rtl8367c_getAsicSpanningTreeStatus(rtk_uint32 port, rtk_uint32 msti, rtk_uint32* pState);
+extern ret_t rtl8367c_setAsicVlanUntagDscpPriorityEn(rtk_uint32 enabled);
+extern ret_t rtl8367c_getAsicVlanUntagDscpPriorityEn(rtk_uint32* enabled);
+extern ret_t rtl8367c_setAsicVlanTransparent(rtk_uint32 port, rtk_uint32 portmask);
+extern ret_t rtl8367c_getAsicVlanTransparent(rtk_uint32 port, rtk_uint32 *pPortmask);
+extern ret_t rtl8367c_setAsicVlanEgressKeep(rtk_uint32 port, rtk_uint32 portmask);
+extern ret_t rtl8367c_getAsicVlanEgressKeep(rtk_uint32 port, rtk_uint32* pPortmask);
+extern ret_t rtl8367c_setReservedVidAction(rtk_uint32 vid0Action, rtk_uint32 vid4095Action);
+extern ret_t rtl8367c_getReservedVidAction(rtk_uint32 *pVid0Action, rtk_uint32 *pVid4095Action);
+extern ret_t rtl8367c_setRealKeepRemarkEn(rtk_uint32 enabled);
+extern ret_t rtl8367c_getRealKeepRemarkEn(rtk_uint32 *pEnabled);
+extern ret_t rtl8367c_resetVlan(void);
+
+#endif /*#ifndef _RTL8367C_ASICDRV_VLAN_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_base.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_base.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_base.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_base.h	2019-01-22 16:16:24.719257418 +0100
@@ -0,0 +1,579 @@
+#ifndef _RTL8367C_BASE_H_
+#define _RTL8367C_BASE_H_
+
+#include <rtl8367c_reg.h>
+
+/* (16'h0000) port_reg */
+
+#define    RTL8367C_PORT_SPECIAL_CONGEST_MODE_TIMER_BASE        RTL8367C_REG_PKTGEN_PORT0_TIMER
+#define    RTL8367C_PORT_SPECIAL_CONGEST_MODE_TIMER_REG(port)    (RTL8367C_PORT_SPECIAL_CONGEST_MODE_TIMER_BASE + (port << 5))
+
+#define    RTL8367C_PORT_MISC_CFG_BASE                            RTL8367C_REG_PORT0_MISC_CFG
+#define    RTL8367C_PORT_MISC_CFG_REG(port)                        (RTL8367C_PORT_MISC_CFG_BASE + (port << 5))
+#define    RTL8367C_1QREMARK_ENABLE_OFFSET                         RTL8367C_PORT0_MISC_CFG_DOT1Q_REMARK_ENABLE_OFFSET
+#define    RTL8367C_1QREMARK_ENABLE_MASK                        RTL8367C_PORT0_MISC_CFG_DOT1Q_REMARK_ENABLE_MASK
+
+#define    RTL8367C_INGRESSBW_PORT_IFG_MASK                        RTL8367C_PORT0_MISC_CFG_INGRESSBW_IFG_MASK
+#define    RTL8367C_VLAN_EGRESS_MDOE_MASK                        RTL8367C_PORT0_MISC_CFG_VLAN_EGRESS_MODE_MASK
+#define    RTL8367C_SPECIALCONGEST_SUSTAIN_TIMER_MASK            RTL8367C_PORT0_MISC_CFG_CONGESTION_SUSTAIN_TIME_MASK
+
+#define    RTL8367C_INGRESSBW_PORT_RATE_LSB_BASE                RTL8367C_REG_INGRESSBW_PORT0_RATE_CTRL0
+#define    RTL8367C_INGRESSBW_PORT_RATE_LSB_REG(port)            (RTL8367C_INGRESSBW_PORT_RATE_LSB_BASE + (port << 5))
+
+#define    RTL8367C_PORT_SMALL_IPG_REG(port)                    (RTL8367C_REG_PORT0_MISC_CFG + (port*0x20))
+
+#define    RTL8367C_PORT_EEE_CFG_BASE                           RTL8367C_REG_PORT0_EEECFG
+#define    RTL8367C_PORT_EEE_CFG_REG(port)                      (RTL8367C_REG_PORT0_EEECFG + (port << 5))
+#define    RTL8367C_PORT_EEE_100M_OFFSET                        RTL8367C_PORT0_EEECFG_EEE_100M_OFFSET
+#define    RTL8367C_PORT_EEE_100M_MASK                          RTL8367C_PORT0_EEECFG_EEE_100M_MASK
+#define    RTL8367C_PORT_EEE_GIGA_OFFSET                        RTL8367C_PORT0_EEECFG_EEE_GIGA_500M_OFFSET
+#define    RTL8367C_PORT_EEE_GIGA_MASK                          RTL8367C_PORT0_EEECFG_EEE_GIGA_500M_MASK
+
+
+/* (16'h0200) outq_reg */
+
+#define    RTL8367C_FLOWCTRL_QUEUE_DROP_ON_BASE                    RTL8367C_REG_FLOWCTRL_QUEUE0_DROP_ON
+#define    RTL8367C_FLOWCTRL_QUEUE_DROP_ON_REG(queue)            (RTL8367C_FLOWCTRL_QUEUE_DROP_ON_BASE + queue)
+#define    RTL8367C_FLOWCTRL_QUEUE_DROP_ON_MASK                    RTL8367C_FLOWCTRL_QUEUE0_DROP_ON_MASK
+
+#define    RTL8367C_FLOWCTRL_PORT_DROP_ON_BASE                    RTL8367C_REG_FLOWCTRL_PORT0_DROP_ON
+#define    RTL8367C_FLOWCTRL_PORT_DROP_ON_REG(PORT)                (RTL8367C_FLOWCTRL_PORT_DROP_ON_BASE + PORT)
+#define    RTL8367C_FLOWCTRL_PORT_DROP_ON_MASK                    RTL8367C_FLOWCTRL_PORT0_DROP_ON_MASK
+
+#define    RTL8367C_FLOWCTRL_PORT_GAP_REG                        RTL8367C_REG_FLOWCTRL_PORT_GAP
+#define    RTL8367C_FLOWCTRL_QUEUE_GAP_REG                        RTL8367C_REG_FLOWCTRL_QUEUE_GAP
+#define    RTL8367C_FLOWCTRL_PORT_QEMPTY_REG                    RTL8367C_REG_PORT_QEMPTY
+
+/* (16'h0300) sch_reg */
+
+#define    RTL8367C_SCHEDULE_WFQ_BURST_SIZE_REG                    RTL8367C_REG_SCHEDULE_WFQ_BURST_SIZE
+
+#define    RTL8367C_SCHEDULE_QUEUE_TYPE_BASE                    RTL8367C_REG_SCHEDULE_QUEUE_TYPE_CTRL0
+#define    RTL8367C_SCHEDULE_QUEUE_TYPE_REG(port)                (RTL8367C_SCHEDULE_QUEUE_TYPE_BASE + (port >> 1))
+#define    RTL8367C_SCHEDULE_QUEUE_TYPE_OFFSET(port, queue)        (((port & 0x1) << 3) + queue)
+#define    RTL8367C_SCHEDULE_QUEUE_TYPE_MASK(port, queue)         RTL8367C_SCHEDULE_QUEUE_TYPE_OFFSET(port, queue)
+
+#define    RTL8367C_SCHEDULE_PORT_QUEUE_WFQ_WEIGHT_BASE            RTL8367C_REG_SCHEDULE_PORT0_QUEUE0_WFQ_WEIGHT
+#define    RTL8367C_SCHEDULE_PORT_QUEUE_WFQ_WEIGHT_REG(port, queue)    (RTL8367C_SCHEDULE_PORT_QUEUE_WFQ_WEIGHT_BASE + (port << 3) + queue)
+
+#define    RTL8367C_SCHEDULE_APR_CTRL_REG                       RTL8367C_REG_SCHEDULE_APR_CTRL0
+#define    RTL8367C_SCHEDULE_APR_CTRL_OFFSET(port)                (port)
+#define    RTL8367C_SCHEDULE_APR_CTRL_MASK(port)                (1 << RTL8367C_SCHEDULE_APR_CTRL_OFFSET(port))
+
+#define    RTL8367C_SCHEDULE_PORT_APR_METER_BASE                RTL8367C_REG_SCHEDULE_PORT0_APR_METER_CTRL0
+#define    RTL8367C_SCHEDULE_PORT_APR_METER_REG(port, queue)    (RTL8367C_SCHEDULE_PORT_APR_METER_BASE + (port << 2) + (queue / 5))
+#define    RTL8367C_SCHEDULE_PORT_APR_METER_OFFSET(queue)        (3 * (queue % 5))
+#define    RTL8367C_SCHEDULE_PORT_APR_METER_MASK(queue)            (RTL8367C_SCHEDULE_PORT0_APR_METER_CTRL0_QUEUE0_APR_METER_MASK << RTL8367C_SCHEDULE_PORT_APR_METER_OFFSET(queue))
+
+#define    RTL8367C_PORT_EGRESSBW_LSB_BASE                        RTL8367C_REG_PORT0_EGRESSBW_CTRL0
+#define    RTL8367C_PORT_EGRESSBW_LSB_REG(port)                    (RTL8367C_PORT_EGRESSBW_LSB_BASE + (port << 1))
+
+#define    RTL8367C_PORT_EGRESSBW_MSB_BASE                        RTL8367C_REG_PORT0_EGRESSBW_CTRL1
+#define    RTL8367C_PORT_EGRESSBW_MSB_REG(port)                    (RTL8367C_PORT_EGRESSBW_MSB_BASE + (port << 1))
+
+/* (16'h0500) table_reg */
+
+#define    RTL8367C_TABLE_ACCESS_CTRL_REG                        RTL8367C_REG_TABLE_ACCESS_CTRL
+
+#define    RTL8367C_TABLE_ACCESS_ADDR_REG                        RTL8367C_REG_TABLE_ACCESS_ADDR
+
+#define    RTL8367C_TABLE_ACCESS_STATUS_REG                        RTL8367C_REG_TABLE_LUT_ADDR
+
+#define    RTL8367C_TABLE_ACCESS_WRDATA_BASE                    RTL8367C_REG_TABLE_WRITE_DATA0
+#define    RTL8367C_TABLE_ACCESS_WRDATA_REG(index)                (RTL8367C_TABLE_ACCESS_WRDATA_BASE + index)
+
+#define    RTL8367C_TABLE_ACCESS_RDDATA_BASE                    RTL8367C_REG_TABLE_READ_DATA0
+#define    RTL8367C_TABLE_ACCESS_RDDATA_REG(index)                (RTL8367C_TABLE_ACCESS_RDDATA_BASE + index)
+
+
+
+/* (16'h0600) acl_reg */
+
+#define    RTL8367C_ACL_RULE_TEMPLATE_CTRL_BASE                    RTL8367C_REG_ACL_RULE_TEMPLATE0_CTRL0
+#define    RTL8367C_ACL_RULE_TEMPLATE_CTRL_REG(template)        (RTL8367C_ACL_RULE_TEMPLATE_CTRL_BASE + template * 0x4)
+#define    RTL8367C_ACL_TEMPLATE_FIELD_OFFSET(field)            ((field & 0x01) <<3)
+#define    RTL8367C_ACL_TEMPLATE_FIELD_MASK(field)                (0x3F << RTL8367C_ACL_TEMPLATE_FIELD_OFFSET(field))
+
+#define    RTL8367C_ACL_ACTION_CTRL_BASE                        RTL8367C_REG_ACL_ACTION_CTRL0
+#define    RTL8367C_ACL_ACTION_CTRL_REG(rule)                   (RTL8367C_ACL_ACTION_CTRL_BASE + (rule >> 1))
+#define    RTL8367C_ACL_ACTION_CTRL2_BASE                        RTL8367C_REG_ACL_ACTION_CTRL32
+#define    RTL8367C_ACL_ACTION_CTRL2_REG(rule)                  (RTL8367C_ACL_ACTION_CTRL2_BASE + ((rule-64) >> 1))
+
+#define    RTL8367C_ACL_OP_NOT_OFFSET(rule)                        (6 + ((rule & 0x1) << 3))
+#define    RTL8367C_ACL_OP_NOT_MASK(rule)                        (1 << RTL8367C_ACL_OP_NOT_OFFSET(rule))
+#define    RTL8367C_ACL_OP_ACTION_OFFSET(rule)                    ((rule & 0x1) << 3)
+#define    RTL8367C_ACL_OP_ACTION_MASK(rule)                    (0x3F << RTL8367C_ACL_OP_ACTION_OFFSET(rule))
+
+#define    RTL8367C_ACL_ENABLE_REG                                RTL8367C_REG_ACL_ENABLE
+#define    RTL8367C_ACL_UNMATCH_PERMIT_REG                        RTL8367C_REG_ACL_UNMATCH_PERMIT
+
+/* (16'h0700) cvlan_reg */
+
+#define    RTL8367C_VLAN_PVID_CTRL_BASE                            RTL8367C_REG_VLAN_PVID_CTRL0
+#define    RTL8367C_VLAN_PVID_CTRL_REG(port)                    (RTL8367C_VLAN_PVID_CTRL_BASE + (port >> 1))
+#define    RTL8367C_PORT_VIDX_OFFSET(port)                        ((port &1)<<3)
+#define    RTL8367C_PORT_VIDX_MASK(port)                        (RTL8367C_PORT0_VIDX_MASK << RTL8367C_PORT_VIDX_OFFSET(port))
+
+#define    RTL8367C_VLAN_PPB_VALID_BASE                            RTL8367C_REG_VLAN_PPB0_VALID
+#define    RTL8367C_VLAN_PPB_VALID_REG(item)                    (RTL8367C_VLAN_PPB_VALID_BASE + (item << 3))
+
+#define    RTL8367C_VLAN_PPB_CTRL_BASE                            RTL8367C_REG_VLAN_PPB0_CTRL0
+#define    RTL8367C_VLAN_PPB_CTRL_REG(item, port)               (RTL8367C_VLAN_PPB_CTRL_BASE + (item << 3) + (port / 3) )
+#define    RTL8367C_VLAN_PPB_CTRL_OFFSET(port)                    ((port % 3) * 5)
+#define    RTL8367C_VLAN_PPB_CTRL_MASK(port)                    (RTL8367C_VLAN_PPB0_CTRL0_PORT0_INDEX_MASK << RTL8367C_VLAN_PPB_CTRL_OFFSET(port))
+
+#define    RTL8367C_VLAN_PPB_FRAMETYPE_BASE                    RTL8367C_REG_VLAN_PPB0_CTRL2
+#define    RTL8367C_VLAN_PPB_FRAMETYPE_REG(item)               (RTL8367C_VLAN_PPB_FRAMETYPE_BASE + (item << 3))
+#define    RTL8367C_VLAN_PPB_FRAMETYPE_MASK                    RTL8367C_VLAN_PPB0_CTRL2_FRAME_TYPE_MASK
+
+#define    RTL8367C_VLAN_PPB_ETHERTYPR_BASE                        RTL8367C_REG_VLAN_PPB0_CTRL3
+#define    RTL8367C_VLAN_PPB_ETHERTYPR_REG(item)                (RTL8367C_VLAN_PPB_ETHERTYPR_BASE + (item << 3))
+
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION_BASE                RTL8367C_REG_VLAN_MEMBER_CONFIGURATION0_CTRL0
+
+
+#define    RTL8367C_VLAN_CTRL_REG                                RTL8367C_REG_VLAN_CTRL
+
+#define    RTL8367C_VLAN_INGRESS_REG                            RTL8367C_REG_VLAN_INGRESS
+
+#define    RTL8367C_VLAN_ACCEPT_FRAME_TYPE_BASE                    RTL8367C_REG_VLAN_ACCEPT_FRAME_TYPE_CTRL0
+#define    RTL8367C_VLAN_ACCEPT_FRAME_TYPE_REG(port)            (RTL8367C_VLAN_ACCEPT_FRAME_TYPE_BASE + (port >> 3))
+#define    RTL8367C_VLAN_ACCEPT_FRAME_TYPE_MASK(port)           (RTL8367C_PORT0_FRAME_TYPE_MASK << ((port & 0x7) << 1))
+
+#define    RTL8367C_PORT_EFID_BASE                                RTL8367C_REG_PORT_EFID_CTRL0
+#define    RTL8367C_PORT_EFID_REG(port)                            (RTL8367C_PORT_EFID_BASE + (port >> 2))
+#define    RTL8367C_PORT_EFID_OFFSET(port)                         ((port & 0x3) << 2)
+#define    RTL8367C_PORT_EFID_MASK(port)                        (RTL8367C_PORT0_EFID_MASK << RTL8367C_PORT_EFID_OFFSET(port))
+
+#define    RTL8367C_PORT_PBFIDEN_REG                            RTL8367C_REG_PORT_PBFIDEN
+
+#define    RTL8367C_PORT_PBFID_BASE                             RTL8367C_REG_PORT0_PBFID
+#define    RTL8367C_PORT_PBFID_REG(port)                        (RTL8367C_PORT_PBFID_BASE + port)
+
+/* (16'h0800) dpm_reg */
+
+#define    RTL8367C_RMA_CTRL_BASE                                RTL8367C_REG_RMA_CTRL00
+
+
+#define    RTL8367C_VLAN_PORTBASED_PRIORITY_BASE                RTL8367C_REG_VLAN_PORTBASED_PRIORITY_CTRL0
+#define    RTL8367C_VLAN_PORTBASED_PRIORITY_REG(port)            (RTL8367C_VLAN_PORTBASED_PRIORITY_BASE + (port >> 2))
+#define    RTL8367C_VLAN_PORTBASED_PRIORITY_OFFSET(port)        ((port & 0x3) << 2)
+#define    RTL8367C_VLAN_PORTBASED_PRIORITY_MASK(port)            (0x7 << RTL8367C_VLAN_PORTBASED_PRIORITY_OFFSET(port))
+
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM_BASE                    RTL8367C_REG_VLAN_PPB_PRIORITY_ITEM0_CTRL0
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM_REG(port, item)        (RTL8367C_VLAN_PPB_PRIORITY_ITEM_BASE + (item << 2)+ (port>>2))
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM_OFFSET(port)            ((port & 0x3) <<2)
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM_MASK(port)            (RTL8367C_VLAN_PPB_PRIORITY_ITEM0_CTRL0_PORT0_PRIORITY_MASK << RTL8367C_VLAN_PPB_PRIORITY_ITEM_OFFSET(port))
+
+#define    RTL8367C_QOS_1Q_PRIORITY_REMAPPING_BASE                RTL8367C_REG_QOS_1Q_PRIORITY_REMAPPING_CTRL0
+#define    RTL8367C_QOS_1Q_PRIORITY_REMAPPING_REG(pri)            (RTL8367C_QOS_1Q_PRIORITY_REMAPPING_BASE + (pri >> 2))
+#define    RTL8367C_QOS_1Q_PRIORITY_REMAPPING_OFFSET(pri)        ((pri & 0x3) << 2)
+#define    RTL8367C_QOS_1Q_PRIORITY_REMAPPING_MASK(pri)            (0x7 << RTL8367C_QOS_1Q_PRIORITY_REMAPPING_OFFSET(pri))
+
+#define    RTL8367C_QOS_DSCP_TO_PRIORITY_BASE                    RTL8367C_REG_QOS_DSCP_TO_PRIORITY_CTRL0
+#define    RTL8367C_QOS_DSCP_TO_PRIORITY_REG(dscp)                (RTL8367C_QOS_DSCP_TO_PRIORITY_BASE + (dscp >> 2))
+#define    RTL8367C_QOS_DSCP_TO_PRIORITY_OFFSET(dscp)            ((dscp & 0x3) << 2)
+#define    RTL8367C_QOS_DSCP_TO_PRIORITY_MASK(dscp)                (0x7 << RTL8367C_QOS_DSCP_TO_PRIORITY_OFFSET(dscp))
+
+#define    RTL8367C_QOS_PORTBASED_PRIORITY_BASE                    RTL8367C_REG_QOS_PORTBASED_PRIORITY_CTRL0
+#define    RTL8367C_QOS_PORTBASED_PRIORITY_REG(port)            (RTL8367C_QOS_PORTBASED_PRIORITY_BASE + (port >> 2))
+#define    RTL8367C_QOS_PORTBASED_PRIORITY_OFFSET(port)            ((port & 0x3) << 2)
+#define    RTL8367C_QOS_PORTBASED_PRIORITY_MASK(port)            (0x7 << RTL8367C_QOS_PORTBASED_PRIORITY_OFFSET(port))
+
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_BASE            RTL8367C_REG_QOS_INTERNAL_PRIORITY_DECISION_CTRL0
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_REG(src)        (RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_BASE + (src >> 1))
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_OFFSET(src)  ((src & 1) << 3)
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_MASK(src)    (RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_CTRL0_QOS_PORT_WEIGHT_MASK << RTL8367C_QOS_INTERNAL_PRIORITY_DECISION2_OFFSET(src))
+
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION2_BASE            RTL8367C_REG_QOS_INTERNAL_PRIORITY_DECISION2_CTRL0
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION2_REG(src)        (RTL8367C_QOS_INTERNAL_PRIORITY_DECISION2_BASE + (src >> 1))
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION2_OFFSET(src)  ((src & 1) << 3)
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION2_MASK(src)    (RTL8367C_QOS_INTERNAL_PRIORITY_DECISION2_CTRL0_QOS_PORT_WEIGHT_MASK << RTL8367C_QOS_INTERNAL_PRIORITY_DECISION2_OFFSET(src))
+
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_IDX_CTRL            RTL8367C_REG_QOS_INTERNAL_PRIORITY_DECISION_IDX
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_IDX(port)  (1 << port)
+
+#define    RTL8367C_QOS_PRIPORITY_REMAPPING_IN_CPU_BASE            RTL8367C_REG_QOS_PRIORITY_REMAPPING_IN_CPU_CTRL0
+#define    RTL8367C_QOS_PRIPORITY_REMAPPING_IN_CPU_REG(pri)        (RTL8367C_QOS_PRIPORITY_REMAPPING_IN_CPU_BASE + (pri >> 2))
+#define    RTL8367C_QOS_PRIPORITY_REMAPPING_IN_CPU_OFFSET(pri)  ((pri & 0x3) << 2)
+#define    RTL8367C_QOS_PRIPORITY_REMAPPING_IN_CPU_MASK(pri)    (RTL8367C_QOS_PRIORITY_REMAPPING_IN_CPU_CTRL0_PRIORITY0_MASK << RTL8367C_QOS_PRIPORITY_REMAPPING_IN_CPU_OFFSET(pri))
+
+#define    RTL8367C_QOS_TRAP_PRIORITY_CTRL0_REG                RTL8367C_REG_QOS_TRAP_PRIORITY0
+
+#define    RTL8367C_QOS_TRAP_PRIORITY_CTRL1_REG                RTL8367C_REG_QOS_TRAP_PRIORITY1
+
+#define    RTL8367C_QOS_DSCP_TO_DSCP_BASE                             RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL0
+#define    RTL8367C_QOS_DSCP_TO_DSCP_REG(dscp)                     (RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL0 + (dscp >> 1))
+#define    RTL8367C_QOS_DSCP_TO_DSCP_OFFSET(dscp)                ((dscp & 0x1) << 8)
+#define    RTL8367C_QOS_DSCP_TO_DSCP_MASK(dscp)                   (0x3F << RTL8367C_QOS_DSCP_TO_DSCP_OFFSET(dscp))
+
+#define    RTL8367C_UNUCAST_FLOADING_PMSK_REG                    RTL8367C_REG_UNDA_FLOODING_PMSK
+
+#define    RTL8367C_UNMCAST_FLOADING_PMSK_REG                    RTL8367C_REG_UNMCAST_FLOADING_PMSK
+
+#define    RTL8367C_BCAST_FLOADING_PMSK_REG                        RTL8367C_REG_BCAST_FLOADING_PMSK
+
+#define    RTL8367C_PORT_ISOLATION_PORT_MASK_BASE                RTL8367C_REG_PORT_ISOLATION_PORT0_MASK
+#define    RTL8367C_PORT_ISOLATION_PORT_MASK_REG(port)            (RTL8367C_PORT_ISOLATION_PORT_MASK_BASE + port)
+
+#define    RTL8367C_FORCE_CTRL_REG                                RTL8367C_REG_FORCE_CTRL
+
+#define    RTL8367C_SOURCE_PORT_BLOCK_REG                        RTL8367C_REG_SOURCE_PORT_PERMIT
+
+#define    RTL8367C_IPMCAST_VLAN_LEAKY_REG                        RTL8367C_REG_IPMCAST_VLAN_LEAKY
+
+#define    RTL8367C_IPMCAST_PORTISO_LEAKY_REG                    RTL8367C_REG_IPMCAST_PORTISO_LEAKY
+
+#define    RTL8367C_PORT_SECURIT_CTRL_REG                        RTL8367C_REG_PORT_SECURITY_CTRL
+
+#define    RTL8367C_UNKNOWN_IPV4_MULTICAST_BASE                    RTL8367C_REG_UNKNOWN_IPV4_MULTICAST_CTRL0
+#define    RTL8367C_UNKNOWN_IPV4_MULTICAST_REG(port)            (RTL8367C_UNKNOWN_IPV4_MULTICAST_BASE + (port >> 3))
+#define    RTL8367C_UNKNOWN_IPV4_MULTICAST_OFFSET(port)            ((port & 0x7) << 1)
+#define    RTL8367C_UNKNOWN_IPV4_MULTICAST_MASK(port)            (RTL8367C_PORT0_UNKNOWN_IP4_MCAST_MASK << RTL8367C_UNKNOWN_IPV4_MULTICAST_OFFSET(port))
+
+#define    RTL8367C_UNKNOWN_IPV6_MULTICAST_BASE                    RTL8367C_REG_UNKNOWN_IPV6_MULTICAST_CTRL0
+#define    RTL8367C_UNKNOWN_IPV6_MULTICAST_REG(port)            (RTL8367C_UNKNOWN_IPV6_MULTICAST_BASE + (port >> 3))
+#define    RTL8367C_UNKNOWN_IPV6_MULTICAST_OFFSET(port)            ((port & 0x7) << 1)
+#define    RTL8367C_UNKNOWN_IPV6_MULTICAST_MASK(port)            (RTL8367C_PORT0_UNKNOWN_IP4_MCAST_MASK << RTL8367C_UNKNOWN_IPV6_MULTICAST_OFFSET(port))
+
+#define    RTL8367C_UNKNOWN_L2_MULTICAST_BASE                    RTL8367C_REG_UNKNOWN_L2_MULTICAST_CTRL0
+#define    RTL8367C_UNKNOWN_L2_MULTICAST_REG(port)                (RTL8367C_UNKNOWN_L2_MULTICAST_BASE + (port >> 3))
+#define    RTL8367C_UNKNOWN_L2_MULTICAST_OFFSET(port)            ((port & 0x7) << 1)
+#define    RTL8367C_UNKNOWN_L2_MULTICAST_MASK(port)                (RTL8367C_PORT0_UNKNOWN_L2_MCAST_MASK << RTL8367C_UNKNOWN_L2_MULTICAST_OFFSET(port))
+
+#define    RTL8367C_PORT_TRUNK_CTRL_REG                            RTL8367C_REG_PORT_TRUNK_CTRL
+#define    RTL8367C_PORT_TRUNK_HASH_MASK                           0x007F
+
+#define    RTL8367C_PORT_TRUNK_GROUP_MASK_REG    RTL8367C_REG_PORT_TRUNK_GROUP_MASK
+#define    RTL8367C_PORT_TRUNK_GROUP_MASK_OFFSET(group)    (group << 2)
+#define    RTL8367C_PORT_TRUNK_GROUP_MASK_MASK(group)    (RTL8367C_PORT_TRUNK_GROUP0_MASK_MASK << RTL8367C_PORT_TRUNK_GROUP_MASK_OFFSET(group))
+
+#define    RTL8367C_PORT_TRUNK_FLOWCTRL_REG                        RTL8367C_REG_PORT_TRUNK_FLOWCTRL
+
+#define    RTL8367C_QOS_PORT_QUEUE_NUMBER_BASE                    RTL8367C_REG_QOS_PORT_QUEUE_NUMBER_CTRL0
+#define    RTL8367C_QOS_PORT_QUEUE_NUMBER_REG(port)                (RTL8367C_QOS_PORT_QUEUE_NUMBER_BASE + (port >> 2))
+#define    RTL8367C_QOS_PORT_QUEUE_NUMBER_OFFSET(port)            ((port & 0x3) << 2)
+#define    RTL8367C_QOS_PORT_QUEUE_NUMBER_MASK(port)            (0x7 << RTL8367C_QOS_PORT_QUEUE_NUMBER_OFFSET(port))
+
+#define    RTL8367C_QOS_1Q_PRIORITY_TO_QID_BASE                    RTL8367C_REG_QOS_1Q_PRIORITY_TO_QID_CTRL0
+#define    RTL8367C_QOS_1Q_PRIORITY_TO_QID_REG(index, pri)        (RTL8367C_QOS_1Q_PRIORITY_TO_QID_BASE + (index << 1) + (pri >> 2))
+#define    RTL8367C_QOS_1Q_PRIORITY_TO_QID_OFFSET(pri)            ((pri & 0x3) << 2)
+#define    RTL8367C_QOS_1Q_PRIORITY_TO_QID_MASK(pri)            (RTL8367C_QOS_1Q_PRIORITY_TO_QID_CTRL0_PRIORITY0_TO_QID_MASK << RTL8367C_QOS_1Q_PRIORITY_TO_QID_OFFSET(pri))
+
+#define    RTL8367C_DEBUG_INFO_BASE                                RTL8367C_REG_PORT_DEBUG_INFO_CTRL0
+#define    RTL8367C_DEBUG_INFO_REG(port)                        (RTL8367C_DEBUG_INFO_BASE + (port >>1))
+#define    RTL8367C_DEBUG_INFO_OFFSET(port)                        ((port&1)<<3)
+#define    RTL8367C_DEBUG_INFO_MASK(port)                        (RTL8367C_PORT0_DEBUG_INFO_MASK << RTL8367C_DEBUG_INFO_OFFSET(port))
+
+/* (16'h0a00) l2_reg */
+
+#define    RTL8367C_VLAN_MSTI_BASE                                RTL8367C_REG_VLAN_MSTI0_CTRL0
+#define    RTL8367C_VLAN_MSTI_REG(tree, port)                    (RTL8367C_VLAN_MSTI_BASE + (tree << 1) + (port >> 3))
+#define    RTL8367C_VLAN_MSTI_OFFSET(port)                        ((port & 0x7) << 1)
+#define    RTL8367C_VLAN_MSTI_MASK(port)                        (RTL8367C_VLAN_MSTI0_CTRL0_PORT0_STATE_MASK << RTL8367C_VLAN_MSTI_OFFSET(port))
+
+#define    RTL8367C_LUT_PORT_LEARN_LIMITNO_BASE                    RTL8367C_REG_LUT_PORT0_LEARN_LIMITNO
+#define    RTL8367C_LUT_PORT_LEARN_LIMITNO_REG(port)            (RTL8367C_LUT_PORT_LEARN_LIMITNO_BASE + port)
+
+#define    RTL8367C_LUT_CFG_REG                                    RTL8367C_REG_LUT_CFG
+
+#define    RTL8367C_LUT_AGEOUT_CTRL_REG                            RTL8367C_REG_LUT_AGEOUT_CTRL
+
+#define    RTL8367C_FORCE_FLUSH_REG                                RTL8367C_REG_FORCE_FLUSH
+
+#define    RTL8367C_STORM_BCAST_REG                                RTL8367C_REG_STORM_BCAST
+
+#define    RTL8367C_STORM_MCAST_REG                                RTL8367C_REG_STORM_MCAST
+
+#define    RTL8367C_STORM_UNKNOWN_UCAST_REG                        RTL8367C_REG_STORM_UNKOWN_UCAST
+
+#define    RTL8367C_STORM_UNKNOWN_MCAST_REG                        RTL8367C_REG_STORM_UNKOWN_MCAST
+
+#define    RTL8367C_STORM_BCAST_METER_CTRL_BASE                    RTL8367C_REG_STORM_BCAST_METER_CTRL0
+#define    RTL8367C_STORM_BCAST_METER_CTRL_REG(port)            (RTL8367C_STORM_BCAST_METER_CTRL_BASE + (port >> 1))
+#define    RTL8367C_STORM_BCAST_METER_CTRL_OFFSET(port)            ((port & 0x1) << 3)
+#define    RTL8367C_STORM_BCAST_METER_CTRL_MASK(port)            (0xFF << RTL8367C_STORM_BCAST_METER_CTRL_OFFSET(port))
+
+#define    RTL8367C_STORM_MCAST_METER_CTRL_BASE                    RTL8367C_REG_STORM_MCAST_METER_CTRL0
+#define    RTL8367C_STORM_MCAST_METER_CTRL_REG(port)            (RTL8367C_STORM_MCAST_METER_CTRL_BASE + (port >> 1))
+#define    RTL8367C_STORM_MCAST_METER_CTRL_OFFSET(port)            ((port & 0x1) << 3)
+#define    RTL8367C_STORM_MCAST_METER_CTRL_MASK(port)            (0xFF << RTL8367C_STORM_MCAST_METER_CTRL_OFFSET(port))
+
+#define    RTL8367C_STORM_UNDA_METER_CTRL_BASE                    RTL8367C_REG_STORM_UNDA_METER_CTRL0
+#define    RTL8367C_STORM_UNDA_METER_CTRL_REG(port)                (RTL8367C_STORM_UNDA_METER_CTRL_BASE + (port >> 1))
+#define    RTL8367C_STORM_UNDA_METER_CTRL_OFFSET(port)            ((port & 0x1) << 3)
+#define    RTL8367C_STORM_UNDA_METER_CTRL_MASK(port)            (0xFF << RTL8367C_STORM_UNDA_METER_CTRL_OFFSET(port))
+
+#define    RTL8367C_STORM_UNMC_METER_CTRL_BASE                    RTL8367C_REG_STORM_UNMC_METER_CTRL0
+#define    RTL8367C_STORM_UNMC_METER_CTRL_REG(port)                (RTL8367C_STORM_UNMC_METER_CTRL_BASE + (port >> 1))
+#define    RTL8367C_STORM_UNMC_METER_CTRL_OFFSET(port)            ((port & 0x1) << 3)
+#define    RTL8367C_STORM_UNMC_METER_CTRL_MASK(port)            (0xFF << RTL8367C_STORM_UNMC_METER_CTRL_OFFSET(port))
+
+#define    RTL8367C_OAM_PARSER_OFFSET(port)                        (port*2)
+#define    RTL8367C_OAM_PARSER_MASK(port)                        (RTL8367C_PORT0_PARACT_MASK << RTL8367C_OAM_PARSER_OFFSET(port))
+
+#define    RTL8367C_OAM_MULTIPLEXER_OFFSET(port)                (port*2)
+#define    RTL8367C_OAM_MULTIPLEXER_MASK(port)                    (RTL8367C_PORT0_PARACT_MASK << RTL8367C_OAM_MULTIPLEXER_OFFSET(port))
+
+#define    RTL8367C_OAM_CTRL_REG                                RTL8367C_REG_OAM_CTRL
+
+#define    RTL8367C_DOT1X_PORT_ENABLE_REG                        RTL8367C_REG_DOT1X_PORT_ENABLE
+
+#define    RTL8367C_DOT1X_MAC_ENABLE_REG                        RTL8367C_REG_DOT1X_MAC_ENABLE
+
+#define    RTL8367C_DOT1X_PORT_AUTH_REG                            RTL8367C_REG_DOT1X_PORT_AUTH
+
+#define    RTL8367C_DOT1X_PORT_OPDIR_REG                        RTL8367C_REG_DOT1X_PORT_OPDIR
+
+#define    RTL8367C_DOT1X_UNAUTH_ACT_BASE                        RTL8367C_REG_DOT1X_UNAUTH_ACT_W0
+#define    RTL8367C_DOT1X_UNAUTH_ACT_OFFSET(port)                ((port & 0x7) << 1)
+#define    RTL8367C_DOT1X_UNAUTH_ACT_MASK(port)                    (RTL8367C_DOT1X_PORT0_UNAUTHBH_MASK << RTL8367C_DOT1X_UNAUTH_ACT_OFFSET(port))
+
+#define    RTL8367C_DOT1X_CFG_REG                                RTL8367C_REG_DOT1X_CFG
+
+#define    RTL8367C_REG_L2_LRN_CNT_BASE                            RTL8367C_REG_L2_LRN_CNT_CTRL0
+#define    RTL8367C_REG_L2_LRN_CNT_REG(port)                    (RTL8367C_REG_L2_LRN_CNT_BASE + port)
+
+/* (16'h0b00) mltvlan_reg */
+
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY_BASE_REG(index)        (RTL8367C_REG_SVLAN_MCAST2S_ENTRY0_CTRL0 + index*5)
+
+/* (16'h0c00) svlan_reg */
+
+#define    RTL8367C_SVLAN_MEMBERCFG_BASE_REG(index)                (RTL8367C_REG_SVLAN_MEMBERCFG0_CTRL1 + index*3)
+#define    RTL8367C_SVLAN_C2SCFG_BASE_REG(index)                  (RTL8367C_REG_SVLAN_C2SCFG0_CTRL0+ index*3)
+#define    RTL8367C_SVLAN_CFG_REG                                RTL8367C_REG_SVLAN_CFG
+
+/* (16'h0f00) hsactrl_reg */
+
+#define    RTL8367C_SVLAN_S2C_ENTRY_BASE_REG(index)                (RTL8367C_REG_SVLAN_SP2C_ENTRY0_CTRL0 + index*2)
+
+/* (16'h1000) mib_reg */
+
+#define    RTL8367C_MIB_COUNTER_BASE_REG                        RTL8367C_REG_MIB_COUNTER0
+
+#define    RTL8367C_MIB_ADDRESS_REG                                RTL8367C_REG_MIB_ADDRESS
+
+#define    RTL8367C_MIB_CTRL_REG                                RTL8367C_REG_MIB_CTRL0
+#define    RTL8367C_MIB_PORT07_MASK                                (0xFF<<RTL8367C_PORT0_RESET_OFFSET)
+
+/* (16'h1100) intrpt_reg */
+
+#define    RTL8367C_INTR_CTRL_REG                                RTL8367C_REG_INTR_CTRL
+
+#define    RTL8367C_INTR_IMR_REG                                RTL8367C_REG_INTR_IMR
+
+#define    RTL8367C_INTR_IMS_REG                                RTL8367C_REG_INTR_IMS
+
+#define    RTL8367C_INTR_INDICATOR_BASED                        RTL8367C_REG_LEARN_OVER_INDICATOR
+#define    RTL8367C_LEARN_OVER_INDICATOR_REG                    RTL8367C_REG_LEARN_OVER_INDICATOR
+
+#define    RTL8367C_SPEED_CHANGE_INDICATOR_REG                    RTL8367C_REG_SPEED_CHANGE_INDICATOR
+
+#define    RTL8367C_PORT_LINKDOWN_INDICATOR_REG                    RTL8367C_REG_PORT_LINKDOWN_INDICATOR
+
+#define    RTL8367C_PORT_LINKUP_INDICATOR_REG                    RTL8367C_REG_PORT_LINKUP_INDICATOR
+
+#define    RTL8367C_REG_METER_EXCEED_INDICATOR_BASE                RTL8367C_REG_METER_EXCEED_INDICATOR0
+#define    RTL8367C_REG_METER_EXCEED_INDICATOR_REG(meter)        (RTL8367C_REG_METER_EXCEED_INDICATOR_BASE + (meter >> 4))
+#define    RTL8367C_REG_METER_EXCEED_INDICATOR_OFFSET(meter)    (meter & 0xF)
+
+/* (16'h1200) swcore_reg */
+
+#define    RTL8367C_VS_TPID_REG                                    RTL8367C_REG_VS_TPID
+
+#define    RTL8367C_SWITCH_MAC_BASE                                RTL8367C_REG_SWITCH_MAC0
+
+#define    RTL8367C_REMARKING_CTRL_REG                            RTL8367C_REG_SWITCH_CTRL0
+
+#define    RTL8367C_QOS_DSCP_REMARK_BASE                        RTL8367C_REG_QOS_DSCP_REMARK_CTRL0
+#define    RTL8367C_QOS_DSCP_REMARK_REG(pri)                    (RTL8367C_QOS_DSCP_REMARK_BASE + (pri >> 1))
+#define    RTL8367C_QOS_DSCP_REMARK_OFFSET(pri)                    (((pri) & 0x1) << 3)
+#define    RTL8367C_QOS_DSCP_REMARK_MASK(pri)                    (0x3F << RTL8367C_QOS_DSCP_REMARK_OFFSET(pri))
+
+#define    RTL8367C_QOS_1Q_REMARK_BASE                            RTL8367C_REG_QOS_1Q_REMARK_CTRL0
+#define    RTL8367C_QOS_1Q_REMARK_REG(pri)                        (RTL8367C_QOS_1Q_REMARK_BASE + (pri >> 2))
+#define    RTL8367C_QOS_1Q_REMARK_OFFSET(pri)                    ((pri & 0x3) << 2)
+#define    RTL8367C_QOS_1Q_REMARK_MASK(pri)                        (0x7 << RTL8367C_QOS_1Q_REMARK_OFFSET(pri))
+
+#define    RTL8367C_PTKGEN_PAYLOAD_CTRL0_REG                    RTL8367C_REG_PTKGEN_PAYLOAD_CTRL0
+
+#define    RTL8367C_PTKGEN_PAYLOAD_CTRL1_REG                    RTL8367C_REG_PTKGEN_PAYLOAD_CTRL1
+
+#define    RTL8367C_SVLAN_UPLINK_PORTMASK_REG                    RTL8367C_REG_SVLAN_UPLINK_PORTMASK
+
+#define    RTL8367C_CPU_PORT_MASK_REG                            RTL8367C_REG_CPU_PORT_MASK
+
+#define    RTL8367C_CPU_CTRL_REG                                RTL8367C_REG_CPU_CTRL
+
+#define    RTL8367C_MIRROR_CTRL_REG                                RTL8367C_REG_MIRROR_CTRL
+
+
+#define    RTL8367C_FLOWCRTL_EGRESS_QUEUE_ENABLE_BASE            RTL8367C_REG_FLOWCRTL_EGRESS_QUEUE_ENABLE_CTRL0
+#define    RTL8367C_FLOWCRTL_EGRESS_QUEUE_ENABLE_REG(port)        (RTL8367C_FLOWCRTL_EGRESS_QUEUE_ENABLE_BASE + (port >> 1))
+#define    RTL8367C_FLOWCRTL_EGRESS_QUEUE_ENABLE_REG_OFFSET(port)    ((port & 0x1) << 3)
+#define    RTL8367C_FLOWCRTL_EGRESS_QUEUE_ENABLE_REG_MASK(port)    (RTL8367C_PORT0_QUEUE_MASK_MASK << RTL8367C_FLOWCRTL_EGRESS_QUEUE_ENABLE_REG_OFFSET(port))
+
+
+#define    RTL8367C_FLOWCTRL_PORT_PAGE_COUNTER_BASE                RTL8367C_REG_FLOWCTRL_PORT0_PAGE_COUNTER
+#define    RTL8367C_FLOWCTRL_PORT_PAGE_COUNTER_REG(port)        (RTL8367C_FLOWCTRL_PORT_PAGE_COUNTER_BASE + port)
+#define    RTL8367C_FLOWCTRL_PORT_PAGE_COUNTER_MASK                RTL8367C_FLOWCTRL_PORT0_PAGE_COUNTER_MASK
+
+#define    RTL8367C_FLOWCTRL_PORT_PAGE_MAX_BASE                    RTL8367C_REG_FLOWCTRL_PORT0_PAGE_MAX
+#define    RTL8367C_FLOWCTRL_PORT_PAGE_MAX_REG(port)            (RTL8367C_FLOWCTRL_PORT_PAGE_MAX_BASE + port)
+#define    RTL8367C_FLOWCTRL_PORT_PAGE_MAX_MASK                    RTL8367C_FLOWCTRL_PORT0_PAGE_MAX_MASK
+
+#define    RTL8367C_FIELD_SELECTOR_REG(index)                    (RTL8367C_REG_FIELD_SELECTOR0 + index)
+#define    RTL8367C_FIELD_SELECTOR_ENABLE_OFFSET                 RTL8367C_FIELD_SELECTOR0_ENABLE_OFFSET
+#define    RTL8367C_FIELD_SELECTOR_ENABLE_MASK                    RTL8367C_FIELD_SELECTOR0_ENABLE_MASK
+#define    RTL8367C_FIELD_SELECTOR_FORMAT_OFFSET                RTL8367C_FIELD_SELECTOR0_FORMAT_OFFSET
+#define    RTL8367C_FIELD_SELECTOR_FORMAT_MASK                    RTL8367C_FIELD_SELECTOR0_FORMAT_MASK
+#define    RTL8367C_FIELD_SELECTOR_OFFSET_OFFSET                  RTL8367C_FIELD_SELECTOR0_OFFSET_OFFSET
+#define    RTL8367C_FIELD_SELECTOR_OFFSET_MASK                    RTL8367C_FIELD_SELECTOR0_OFFSET_MASK
+
+/* (16'h1300) chip_reg*/
+
+/* (16'h1400) mtrpool_reg */
+#define    RTL8367C_METER_RATE_BASE                                RTL8367C_REG_METER0_RATE_CTRL0
+#define    RTL8367C_METER_RATE_REG(meter)                        ((meter << 1) + RTL8367C_METER_RATE_BASE)
+
+#define    RTL8367C_METER_BUCKET_SIZE_BASE                        RTL8367C_REG_METER0_BUCKET_SIZE
+#define    RTL8367C_METER_BUCKET_SIZE_REG(meter)                (RTL8367C_METER_BUCKET_SIZE_BASE + meter)
+
+#define    RTL8367C_LEAKY_BUCKET_TICK_REG                        RTL8367C_REG_METER_CTRL0
+#define    RTL8367C_LEAKY_BUCKET_TICK_OFFSET                    RTL8367C_METER_TICK_OFFSET
+#define    RTL8367C_LEAKY_BUCKET_TICK_MASK                        RTL8367C_METER_TICK_MASK
+
+#define    RTL8367C_LEAKY_BUCKET_TOKEN_REG                        RTL8367C_REG_METER_CTRL1
+#define    RTL8367C_LEAKY_BUCKET_TOKEN_OFFSET                    RTL8367C_METER_CTRL1_OFFSET
+#define    RTL8367C_LEAKY_BUCKET_TOKEN_MASK                        RTL8367C_METER_CTRL1_MASK
+
+#define    RTL8367C_METER_OVERRATE_INDICATOR_BASE                RTL8367C_REG_METER_OVERRATE_INDICATOR0
+#define    RTL8367C_METER_OVERRATE_INDICATOR_REG(meter)            (RTL8367C_METER_OVERRATE_INDICATOR_BASE + (meter >> 4))
+#define    RTL8367C_METER_EXCEED_OFFSET(meter)                    (meter & 0xF)
+#define    RTL8367C_METER_EXCEED_MASK(meter)                    (1 << RTL8367C_METER_EXCEED_OFFSET(meter))
+
+#define    RTL8367C_METER_IFG_CTRL_BASE                            RTL8367C_REG_METER_IFG_CTRL0
+#define    RTL8367C_METER_IFG_CTRL_REG(meter)                    (RTL8367C_METER_IFG_CTRL_BASE + (meter >> 4))
+#define    RTL8367C_METER_IFG_OFFSET(meter)                        (meter & 0xF)
+#define    RTL8367C_METER_IFG_MASK(meter)                        (1 << RTL8367C_METER_IFG_OFFSET(meter))
+
+#define    RTL8367C_FLOWCTRL_CTRL_REG                            RTL8367C_REG_FLOWCTRL_CTRL0
+
+/* (16'h1800)8051_RLDP_EEE_reg */
+#define    RTL8367C_EEELLDP_CTRL0_REG                            RTL8367C_REG_EEELLDP_CTRL0
+
+#define    RTL8367C_EEELLDP_CTRL1_REG                            RTL8367C_REG_EEELLDP_CTRL1
+
+#define    RTL8367C_EEELLDP_PMSK_REG                            RTL8367C_REG_EEELLDP_PMSK
+
+#define    RTL8367C_EEELLDP_TX_FRAMEU_REG_BASE                    RTL8367C_REG_EEELLDP_FRAMEU00
+
+#define    RTL8367C_EEELLDP_TX_CAP_FRAMEL_REG_BASE                RTL8367C_REG_EEELLDP_CAP_FRAMEL00
+
+#define    RTL8367C_EEELLDP_RX_VALUE_PORT_BASE                    RTL8367C_REG_EEELLDP_RX_VALUE_P00_00
+#define    RTL8367C_EEELLDP_RX_VALUE_PORT_REG(port)                (RTL8367C_EEELLDP_RX_VALUE_PORT_BASE + (port * 9))
+
+#define    RTL8367C_RLDP_CTRL0_REG                                RTL8367C_REG_RLDP_CTRL0
+#define    RTL8367C_RLDP_MODE_OFFSET    14
+
+#define    RTL8367C_RLDP_RETRY_COUNT_REG                        RTL8367C_REG_RLDP_CTRL1
+
+#define    RTL8367C_RLDP_RETRY_PERIOD_LOOPSTATE_REG                RTL8367C_REG_RLDP_CTRL2
+
+#define    RTL8367C_RLDP_RETRY_PERIOD_CHKSTATE_REG                RTL8367C_REG_RLDP_CTRL3
+
+#define    RTL8367C_RLDP_TX_PMSK_REG                            RTL8367C_REG_RLDP_CTRL4
+
+#define    RTL8367C_RLDP_RAND_NUM_REG_BASE                        RTL8367C_REG_RLDP_RAND_NUM0
+
+#define    RTL8367C_RLDP_MAGIC_NUM_REG_BASE                        RTL8367C_REG_RLDP_MAGIC_NUM0
+
+#define    RTL8367C_RLDP_LOOP_PMSK_REG                            RTL8367C_REG_RLDP_LOOPSTATUS_INDICATOR
+
+#define    RTL8367C_RLDP_LOOP_PORT_BASE                            RTL8367C_REG_RLDP_LOOP_PORT_REG0
+#define    RTL8367C_RLDP_LOOP_PORT_REG(port)                    (RTL8367C_RLDP_LOOP_PORT_BASE + (port >> 1))
+#define    RTL8367C_RLDP_LOOP_PORT_OFFSET(port)                    ((port & 0x1) << 3)
+#define    RTL8367C_RLDP_LOOP_PORT_MASK(port)                    (RTL8367C_RLDP_LOOP_PORT_00_MASK << RTL8367C_RLDP_LOOP_PORT_OFFSET(port))
+
+#define    RTL8367C_PAGEMETER_PORT_BASE                            RTL8367C_REG_PAGEMETER_PORT0_CTRL0
+#define    RTL8367C_PAGEMETER_PORT_REG(port)                    (RTL8367C_PAGEMETER_PORT_BASE + 0x20*port)
+
+#define    RTL8367C_HIGHPRI_INDICATOR_REG                        RTL8367C_REG_HIGHPRI_INDICATOR
+#define    RTL8367C_PORT_INDICATOR_OFFSET(port)                    (port)
+#define    RTL8367C_PORT_INDICATOR_MASK(port)                    (RTL8367C_PORT0_INDICATOR_MASK << RTL8367C_PORT_INDICATOR_OFFSET(port))
+
+#define    RTL8367C_HIGHPRI_CFG_REG                                RTL8367C_REG_HIGHPRI_CFG
+
+#define    RTL8367C_EAV_PRIORITY_REMAPPING_BASE                    RTL8367C_REG_EAV_CTRL1
+#define    RTL8367C_EAV_PRIORITY_REMAPPING_REG(pri)                (RTL8367C_EAV_PRIORITY_REMAPPING_BASE + (pri >> 2))
+#define    RTL8367C_EAV_PRIORITY_REMAPPING_OFFSET(pri)            ((pri & 0x3) * RTL8367C_REMAP_EAV_PRI1_REGEN_OFFSET)
+#define    RTL8367C_EAV_PRIORITY_REMAPPING_MASK(pri)            (RTL8367C_REMAP_EAV_PRI0_REGEN_MASK << RTL8367C_EAV_PRIORITY_REMAPPING_OFFSET(pri))
+
+#define    RTL8367C_EEEP_CFG_BASE                                RTL8367C_REG_PORT0_EEECFG
+#define    RTL8367C_EEEP_CFG_REG(port)                            (RTL8367C_EEEP_CFG_BASE + (port*0x20))
+
+#define    RTL8367C_PKG_CFG_BASE                                RTL8367C_REG_PKTGEN_PORT0_CTRL
+#define    RTL8367C_PKG_CFG_REG(port)                            (RTL8367C_PKG_CFG_BASE + (port*0x20))
+
+#define    RTL8367C_PKG_DA_BASE                                    RTL8367C_REG_PKTGEN_PORT0_DA0
+#define    RTL8367C_PKG_DA_REG(port)                            (RTL8367C_PKG_DA_BASE + (port*0x20))
+
+#define    RTL8367C_PKG_SA_BASE                                    RTL8367C_REG_PKTGEN_PORT0_SA0
+#define    RTL8367C_PKG_SA_REG(port)                            (RTL8367C_PKG_SA_BASE + (port*0x20))
+
+#define    RTL8367C_PKG_NUM_BASE                                RTL8367C_REG_PKTGEN_PORT0_COUNTER0
+#define    RTL8367C_PKG_NUM_REG(port)                            (RTL8367C_PKG_NUM_BASE + (port*0x20))
+
+#define    RTL8367C_PKG_LENGTH_BASE                                RTL8367C_REG_PKTGEN_PORT0_TX_LENGTH
+#define    RTL8367C_PKG_LENGTH_REG(port)                        (RTL8367C_PKG_LENGTH_BASE + (port*0x20))
+
+/* (16'h1c00)IGMP_MLD_reg */
+#define    RTL8367C_IGMP_GROUP_USAGE_BASE                       RTL8367C_REG_IGMP_GROUP_USAGE_LIST0
+#define    RTL8367C_IGMP_GROUP_USAGE_REG(idx)                   (RTL8367C_IGMP_GROUP_USAGE_BASE + (idx / 16))
+
+#define    RTL8367C_FALLBACK_BASE                               RTL8367C_REG_FALLBACK_PORT0_CFG0
+#define    RTL8367C_FALLBACK_PORT_CFG_REG(port)                 (RTL8367C_FALLBACK_BASE + (port * 4))
+#define    RTL8367C_FALLBACK_PORT_MON_CNT_REG(port)             (RTL8367C_FALLBACK_BASE + 1 + (port * 4))
+#define    RTL8367C_FALLBACK_PORT_ERR_CNT_REG(port)             (RTL8367C_FALLBACK_BASE + 3 + (port * 4))
+
+
+/* (16'h6400)timer_1588 */
+#define    RTL8367C_EAV_CFG_BASE                                              RTL8367C_REG_P0_EAV_CFG
+#define    RTL8367C_EAV_PORT_CFG_REG(port)                              (RTL8367C_EAV_CFG_BASE + (port *0x10))
+#define    RTL8367C_EAV_CFG_PTP_PHY_EN_EN_OFFSET                 RTL8367C_P0_EAV_CFG_PTP_PHY_EN_EN_OFFSET
+#define    RTL8367C_EAV_CFG_RX_PDELAY_RESP_OFFSET                RTL8367C_P0_EAV_CFG_RX_PDELAY_RESP_OFFSET
+#define    RTL8367C_EAV_CFG_RX_PDELAY_REQ_OFFSET                 RTL8367C_P0_EAV_CFG_RX_PDELAY_REQ_OFFSET
+#define    RTL8367C_EAV_CFG_RX_DELAY_REQ_OFFSET                   RTL8367C_P0_EAV_CFG_RX_DELAY_REQ_OFFSET
+#define    RTL8367C_EAV_CFG_RX_SYNC_OFFSET                            RTL8367C_P0_EAV_CFG_RX_SYNC_OFFSET
+#define    RTL8367C_EAV_CFG_TX_PDELAY_RESP_OFFSET                RTL8367C_P0_EAV_CFG_TX_PDELAY_RESP_OFFSET
+#define    RTL8367C_EAV_CFG_TX_PDELAY_REQ_OFFSET                 RTL8367C_P0_EAV_CFG_TX_PDELAY_REQ_OFFSET
+#define    RTL8367C_EAV_CFG_TX_DELAY_REQ_OFFSET                   RTL8367C_P0_EAV_CFG_TX_DELAY_REQ_OFFSET
+#define    RTL8367C_EAV_CFG_TX_SYNC_OFFSET                            RTL8367C_P0_EAV_CFG_TX_SYNC_OFFSET
+
+#define    RTL8367C_REG_TX_SYNC_SEQ_ID_BASE                       RTL8367C_REG_P0_TX_SYNC_SEQ_ID
+#define    RTL8367C_REG_TX_SYNC_SEQ_ID(port)                        (RTL8367C_REG_TX_SYNC_SEQ_ID_BASE + (port *0x10))
+#define    RTL8367C_REG_SEQ_ID(port, type)                              (RTL8367C_REG_TX_SYNC_SEQ_ID_BASE + type + (port *0x10))
+
+#define    RTL8367C_REG_TX_DELAY_REQ_SEQ_ID_BASE              RTL8367C_REG_P0_TX_DELAY_REQ_SEQ_ID
+#define    RTL8367C_REG_TX_PDELAY_REQ_SEQ_ID_BASE          RTL8367C_REG_P0_TX_PDELAY_REQ_SEQ_ID
+#define    RTL8367C_REG_TX_PDELAY_RESP_SEQ_ID_BASE        RTL8367C_REG_P0_TX_PDELAY_RESP_SEQ_ID
+#define    RTL8367C_REG_RX_SYNC_SEQ_ID_BASE                        RTL8367C_REG_P0_RX_SYNC_SEQ_ID
+#define    RTL8367C_REG_RX_DELAY_REQ_SEQ_ID_BASE            RTL8367C_REG_P0_RX_DELAY_REQ_SEQ_ID
+#define    RTL8367C_REG_RX_PDELAY_REQ_SEQ_ID_BASE        RTL8367C_REG_P0_RX_PDELAY_REQ_SEQ_ID
+#define    RTL8367C_REG_RX_PDELAY_RESP_SEQ_ID_BASE        RTL8367C_REG_P0_RX_PDELAY_RESP_SEQ_ID
+
+#define    RTL8367C_REG_PORT_NSEC_L_BASE                            RTL8367C_REG_P0_PORT_NSEC_15_0
+#define    RTL8367C_REG_PORT_NSEC_L(port)                            (RTL8367C_REG_PORT_NSEC_L_BASE + (port *0x10))
+#define    RTL8367C_REG_PORT_NSEC_H_BASE                            RTL8367C_REG_P0_PORT_NSEC_26_16
+#define    RTL8367C_REG_PORT_NSEC_H(port)                            (RTL8367C_REG_PORT_NSEC_H_BASE + (port *0x10))
+#define    RTL8367C_PORT_NSEC_H_OFFSET                                RTL8367C_P0_PORT_NSEC_26_16_OFFSET
+#define    RTL8367C_PORT_NSEC_H_MASK                                   RTL8367C_P0_PORT_NSEC_26_16_MASK
+
+#define    RTL8367C_REG_PORT_SEC_L_BASE                                RTL8367C_REG_P0_PORT_SEC_15_0
+#define    RTL8367C_REG_PORT_SEC_L(port)                            (RTL8367C_REG_PORT_SEC_L_BASE + (port *0x10))
+#define    RTL8367C_REG_PORT_SEC_H_BASE                            RTL8367C_REG_P0_PORT_SEC_31_16
+#define    RTL8367C_REG_PORT_SEC_H(port)                            (RTL8367C_REG_PORT_SEC_H_BASE + (port *0x10))
+
+#endif /*#ifndef _RTL8367C_BASE_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_reg.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_reg.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/rtl8367c_reg.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/rtl8367c_reg.h	2019-01-22 16:16:24.723257454 +0100
@@ -0,0 +1,22819 @@
+#ifndef _RTL8367C_REG_H_
+#define _RTL8367C_REG_H_
+
+/************************************************************
+auto-generated register address and field data
+*************************************************************/
+
+/* (16'h0000)port_reg */
+
+#define    RTL8367C_REG_PORT0_CGST_HALF_CFG    0x0000
+#define    RTL8367C_PORT0_CGST_HALF_CFG_CONGESTION_TIME_OFFSET    4
+#define    RTL8367C_PORT0_CGST_HALF_CFG_CONGESTION_TIME_MASK    0xF0
+#define    RTL8367C_PORT0_CGST_HALF_CFG_CONGESTION_SUSTAIN_TIME_OFFSET    0
+#define    RTL8367C_PORT0_CGST_HALF_CFG_CONGESTION_SUSTAIN_TIME_MASK    0xF
+
+#define    RTL8367C_REG_PKTGEN_PORT0_CTRL    0x0001
+#define    RTL8367C_PKTGEN_PORT0_CTRL_STATUS_OFFSET    15
+#define    RTL8367C_PKTGEN_PORT0_CTRL_STATUS_MASK    0x8000
+#define    RTL8367C_PKTGEN_PORT0_CTRL_PKTGEN_STS_OFFSET    13
+#define    RTL8367C_PKTGEN_PORT0_CTRL_PKTGEN_STS_MASK    0x2000
+#define    RTL8367C_PKTGEN_PORT0_CTRL_CRC_NO_ERROR_OFFSET    4
+#define    RTL8367C_PKTGEN_PORT0_CTRL_CRC_NO_ERROR_MASK    0x10
+#define    RTL8367C_PKTGEN_PORT0_CTRL_CMD_START_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT0_CTRL_CMD_START_MASK    0x1
+
+#define    RTL8367C_REG_TX_ERR_CNT_PORT0    0x0002
+#define    RTL8367C_TX_ERR_CNT_PORT0_OFFSET    0
+#define    RTL8367C_TX_ERR_CNT_PORT0_MASK    0x7
+
+#define    RTL8367C_REG_PKTGEN_PORT0_DA0    0x0003
+
+#define    RTL8367C_REG_PKTGEN_PORT0_DA1    0x0004
+
+#define    RTL8367C_REG_PKTGEN_PORT0_DA2    0x0005
+
+#define    RTL8367C_REG_PKTGEN_PORT0_SA0    0x0006
+
+#define    RTL8367C_REG_PKTGEN_PORT0_SA1    0x0007
+
+#define    RTL8367C_REG_PKTGEN_PORT0_SA2    0x0008
+
+#define    RTL8367C_REG_PKTGEN_PORT0_COUNTER0    0x0009
+
+#define    RTL8367C_REG_PKTGEN_PORT0_COUNTER1    0x000a
+#define    RTL8367C_PKTGEN_PORT0_COUNTER1_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT0_COUNTER1_MASK    0xFF
+
+#define    RTL8367C_REG_PKTGEN_PORT0_TX_LENGTH    0x000b
+#define    RTL8367C_PKTGEN_PORT0_TX_LENGTH_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT0_TX_LENGTH_MASK    0x3FFF
+
+#define    RTL8367C_REG_PKTGEN_PORT0_TIMER    0x000d
+#define    RTL8367C_PKTGEN_PORT0_TIMER_TIMER_OFFSET    4
+#define    RTL8367C_PKTGEN_PORT0_TIMER_TIMER_MASK    0xF0
+#define    RTL8367C_PKTGEN_PORT0_TIMER_RX_DMA_ERR_FLAG_OFFSET    3
+#define    RTL8367C_PKTGEN_PORT0_TIMER_RX_DMA_ERR_FLAG_MASK    0x8
+
+#define    RTL8367C_REG_PORT0_MISC_CFG    0x000e
+#define    RTL8367C_PORT0_MISC_CFG_SMALL_TAG_IPG_OFFSET    15
+#define    RTL8367C_PORT0_MISC_CFG_SMALL_TAG_IPG_MASK    0x8000
+#define    RTL8367C_PORT0_MISC_CFG_TX_ITFSP_MODE_OFFSET    14
+#define    RTL8367C_PORT0_MISC_CFG_TX_ITFSP_MODE_MASK    0x4000
+#define    RTL8367C_PORT0_MISC_CFG_FLOWCTRL_INDEP_OFFSET    13
+#define    RTL8367C_PORT0_MISC_CFG_FLOWCTRL_INDEP_MASK    0x2000
+#define    RTL8367C_PORT0_MISC_CFG_DOT1Q_REMARK_ENABLE_OFFSET    12
+#define    RTL8367C_PORT0_MISC_CFG_DOT1Q_REMARK_ENABLE_MASK    0x1000
+#define    RTL8367C_PORT0_MISC_CFG_INGRESSBW_FLOWCTRL_OFFSET    11
+#define    RTL8367C_PORT0_MISC_CFG_INGRESSBW_FLOWCTRL_MASK    0x800
+#define    RTL8367C_PORT0_MISC_CFG_INGRESSBW_IFG_OFFSET    10
+#define    RTL8367C_PORT0_MISC_CFG_INGRESSBW_IFG_MASK    0x400
+#define    RTL8367C_PORT0_MISC_CFG_RX_SPC_OFFSET    9
+#define    RTL8367C_PORT0_MISC_CFG_RX_SPC_MASK    0x200
+#define    RTL8367C_PORT0_MISC_CFG_CRC_SKIP_OFFSET    8
+#define    RTL8367C_PORT0_MISC_CFG_CRC_SKIP_MASK    0x100
+#define    RTL8367C_PORT0_MISC_CFG_PKTGEN_TX_FIRST_OFFSET    7
+#define    RTL8367C_PORT0_MISC_CFG_PKTGEN_TX_FIRST_MASK    0x80
+#define    RTL8367C_PORT0_MISC_CFG_MAC_LOOPBACK_OFFSET    6
+#define    RTL8367C_PORT0_MISC_CFG_MAC_LOOPBACK_MASK    0x40
+#define    RTL8367C_PORT0_MISC_CFG_VLAN_EGRESS_MODE_OFFSET    4
+#define    RTL8367C_PORT0_MISC_CFG_VLAN_EGRESS_MODE_MASK    0x30
+#define    RTL8367C_PORT0_MISC_CFG_CONGESTION_SUSTAIN_TIME_OFFSET    0
+#define    RTL8367C_PORT0_MISC_CFG_CONGESTION_SUSTAIN_TIME_MASK    0xF
+
+#define    RTL8367C_REG_INGRESSBW_PORT0_RATE_CTRL0    0x000f
+
+#define    RTL8367C_REG_INGRESSBW_PORT0_RATE_CTRL1    0x0010
+#define    RTL8367C_INGRESSBW_PORT0_RATE_CTRL1_DUMMY_OFFSET    3
+#define    RTL8367C_INGRESSBW_PORT0_RATE_CTRL1_DUMMY_MASK    0xFFF8
+#define    RTL8367C_INGRESSBW_PORT0_RATE_CTRL1_INGRESSBW_RATE16_OFFSET    0
+#define    RTL8367C_INGRESSBW_PORT0_RATE_CTRL1_INGRESSBW_RATE16_MASK    0x7
+
+#define    RTL8367C_REG_PORT0_FORCE_RATE0    0x0011
+
+#define    RTL8367C_REG_PORT0_FORCE_RATE1    0x0012
+
+#define    RTL8367C_REG_PORT0_CURENT_RATE0    0x0013
+
+#define    RTL8367C_REG_PORT0_CURENT_RATE1    0x0014
+
+#define    RTL8367C_REG_PORT0_PAGE_COUNTER    0x0015
+#define    RTL8367C_PORT0_PAGE_COUNTER_OFFSET    0
+#define    RTL8367C_PORT0_PAGE_COUNTER_MASK    0x7F
+
+#define    RTL8367C_REG_PAGEMETER_PORT0_CTRL0    0x0016
+
+#define    RTL8367C_REG_PAGEMETER_PORT0_CTRL1    0x0017
+
+#define    RTL8367C_REG_PORT0_EEECFG    0x0018
+#define    RTL8367C_PORT0_EEECFG_EEEP_ENABLE_TX_OFFSET    14
+#define    RTL8367C_PORT0_EEECFG_EEEP_ENABLE_TX_MASK    0x4000
+#define    RTL8367C_PORT0_EEECFG_EEEP_ENABLE_RX_OFFSET    13
+#define    RTL8367C_PORT0_EEECFG_EEEP_ENABLE_RX_MASK    0x2000
+#define    RTL8367C_PORT0_EEECFG_EEE_FORCE_OFFSET    12
+#define    RTL8367C_PORT0_EEECFG_EEE_FORCE_MASK    0x1000
+#define    RTL8367C_PORT0_EEECFG_EEE_100M_OFFSET    11
+#define    RTL8367C_PORT0_EEECFG_EEE_100M_MASK    0x800
+#define    RTL8367C_PORT0_EEECFG_EEE_GIGA_500M_OFFSET    10
+#define    RTL8367C_PORT0_EEECFG_EEE_GIGA_500M_MASK    0x400
+#define    RTL8367C_PORT0_EEECFG_EEE_TX_OFFSET    9
+#define    RTL8367C_PORT0_EEECFG_EEE_TX_MASK    0x200
+#define    RTL8367C_PORT0_EEECFG_EEE_RX_OFFSET    8
+#define    RTL8367C_PORT0_EEECFG_EEE_RX_MASK    0x100
+#define    RTL8367C_PORT0_EEECFG_EEE_DSP_RX_OFFSET    6
+#define    RTL8367C_PORT0_EEECFG_EEE_DSP_RX_MASK    0x40
+#define    RTL8367C_PORT0_EEECFG_EEE_LPI_OFFSET    5
+#define    RTL8367C_PORT0_EEECFG_EEE_LPI_MASK    0x20
+#define    RTL8367C_PORT0_EEECFG_EEE_TX_LPI_OFFSET    4
+#define    RTL8367C_PORT0_EEECFG_EEE_TX_LPI_MASK    0x10
+#define    RTL8367C_PORT0_EEECFG_EEE_RX_LPI_OFFSET    3
+#define    RTL8367C_PORT0_EEECFG_EEE_RX_LPI_MASK    0x8
+#define    RTL8367C_PORT0_EEECFG_EEE_PAUSE_INDICATOR_OFFSET    2
+#define    RTL8367C_PORT0_EEECFG_EEE_PAUSE_INDICATOR_MASK    0x4
+#define    RTL8367C_PORT0_EEECFG_EEE_WAKE_REQ_OFFSET    1
+#define    RTL8367C_PORT0_EEECFG_EEE_WAKE_REQ_MASK    0x2
+#define    RTL8367C_PORT0_EEECFG_EEE_SLEEP_REQ_OFFSET    0
+#define    RTL8367C_PORT0_EEECFG_EEE_SLEEP_REQ_MASK    0x1
+
+#define    RTL8367C_REG_PORT0_EEETXMTR    0x0019
+
+#define    RTL8367C_REG_PORT0_EEERXMTR    0x001a
+
+#define    RTL8367C_REG_PORT0_EEEPTXMTR    0x001b
+
+#define    RTL8367C_REG_PORT0_EEEPRXMTR    0x001c
+
+#define    RTL8367C_REG_PTP_PORT0_CFG1    0x001e
+#define    RTL8367C_PTP_PORT0_CFG1_OFFSET    7
+#define    RTL8367C_PTP_PORT0_CFG1_MASK    0xFF
+
+#define    RTL8367C_REG_P0_MSIC1    0x001f
+#define    RTL8367C_P0_MSIC1_OFFSET    0
+#define    RTL8367C_P0_MSIC1_MASK    0x1
+
+#define    RTL8367C_REG_PORT1_CGST_HALF_CFG    0x0020
+#define    RTL8367C_PORT1_CGST_HALF_CFG_CONGESTION_TIME_OFFSET    4
+#define    RTL8367C_PORT1_CGST_HALF_CFG_CONGESTION_TIME_MASK    0xF0
+#define    RTL8367C_PORT1_CGST_HALF_CFG_CONGESTION_SUSTAIN_TIME_OFFSET    0
+#define    RTL8367C_PORT1_CGST_HALF_CFG_CONGESTION_SUSTAIN_TIME_MASK    0xF
+
+#define    RTL8367C_REG_PKTGEN_PORT1_CTRL    0x0021
+#define    RTL8367C_PKTGEN_PORT1_CTRL_STATUS_OFFSET    15
+#define    RTL8367C_PKTGEN_PORT1_CTRL_STATUS_MASK    0x8000
+#define    RTL8367C_PKTGEN_PORT1_CTRL_PKTGEN_STS_OFFSET    13
+#define    RTL8367C_PKTGEN_PORT1_CTRL_PKTGEN_STS_MASK    0x2000
+#define    RTL8367C_PKTGEN_PORT1_CTRL_CRC_NO_ERROR_OFFSET    4
+#define    RTL8367C_PKTGEN_PORT1_CTRL_CRC_NO_ERROR_MASK    0x10
+#define    RTL8367C_PKTGEN_PORT1_CTRL_CMD_START_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT1_CTRL_CMD_START_MASK    0x1
+
+#define    RTL8367C_REG_TX_ERR_CNT_PORT1    0x0022
+#define    RTL8367C_TX_ERR_CNT_PORT1_OFFSET    0
+#define    RTL8367C_TX_ERR_CNT_PORT1_MASK    0x7
+
+#define    RTL8367C_REG_PKTGEN_PORT1_DA0    0x0023
+
+#define    RTL8367C_REG_PKTGEN_PORT1_DA1    0x0024
+
+#define    RTL8367C_REG_PKTGEN_PORT1_DA2    0x0025
+
+#define    RTL8367C_REG_PKTGEN_PORT1_SA0    0x0026
+
+#define    RTL8367C_REG_PKTGEN_PORT1_SA1    0x0027
+
+#define    RTL8367C_REG_PKTGEN_PORT1_SA2    0x0028
+
+#define    RTL8367C_REG_PKTGEN_PORT1_COUNTER0    0x0029
+
+#define    RTL8367C_REG_PKTGEN_PORT1_COUNTER1    0x002a
+#define    RTL8367C_PKTGEN_PORT1_COUNTER1_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT1_COUNTER1_MASK    0xFF
+
+#define    RTL8367C_REG_PKTGEN_PORT1_TX_LENGTH    0x002b
+#define    RTL8367C_PKTGEN_PORT1_TX_LENGTH_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT1_TX_LENGTH_MASK    0x3FFF
+
+#define    RTL8367C_REG_PKTGEN_PORT1_TIMER    0x002d
+#define    RTL8367C_PKTGEN_PORT1_TIMER_TIMER_OFFSET    4
+#define    RTL8367C_PKTGEN_PORT1_TIMER_TIMER_MASK    0xF0
+#define    RTL8367C_PKTGEN_PORT1_TIMER_RX_DMA_ERR_FLAG_OFFSET    3
+#define    RTL8367C_PKTGEN_PORT1_TIMER_RX_DMA_ERR_FLAG_MASK    0x8
+
+#define    RTL8367C_REG_PORT1_MISC_CFG    0x002e
+#define    RTL8367C_PORT1_MISC_CFG_SMALL_TAG_IPG_OFFSET    15
+#define    RTL8367C_PORT1_MISC_CFG_SMALL_TAG_IPG_MASK    0x8000
+#define    RTL8367C_PORT1_MISC_CFG_TX_ITFSP_MODE_OFFSET    14
+#define    RTL8367C_PORT1_MISC_CFG_TX_ITFSP_MODE_MASK    0x4000
+#define    RTL8367C_PORT1_MISC_CFG_FLOWCTRL_INDEP_OFFSET    13
+#define    RTL8367C_PORT1_MISC_CFG_FLOWCTRL_INDEP_MASK    0x2000
+#define    RTL8367C_PORT1_MISC_CFG_DOT1Q_REMARK_ENABLE_OFFSET    12
+#define    RTL8367C_PORT1_MISC_CFG_DOT1Q_REMARK_ENABLE_MASK    0x1000
+#define    RTL8367C_PORT1_MISC_CFG_INGRESSBW_FLOWCTRL_OFFSET    11
+#define    RTL8367C_PORT1_MISC_CFG_INGRESSBW_FLOWCTRL_MASK    0x800
+#define    RTL8367C_PORT1_MISC_CFG_INGRESSBW_IFG_OFFSET    10
+#define    RTL8367C_PORT1_MISC_CFG_INGRESSBW_IFG_MASK    0x400
+#define    RTL8367C_PORT1_MISC_CFG_RX_SPC_OFFSET    9
+#define    RTL8367C_PORT1_MISC_CFG_RX_SPC_MASK    0x200
+#define    RTL8367C_PORT1_MISC_CFG_CRC_SKIP_OFFSET    8
+#define    RTL8367C_PORT1_MISC_CFG_CRC_SKIP_MASK    0x100
+#define    RTL8367C_PORT1_MISC_CFG_PKTGEN_TX_FIRST_OFFSET    7
+#define    RTL8367C_PORT1_MISC_CFG_PKTGEN_TX_FIRST_MASK    0x80
+#define    RTL8367C_PORT1_MISC_CFG_MAC_LOOPBACK_OFFSET    6
+#define    RTL8367C_PORT1_MISC_CFG_MAC_LOOPBACK_MASK    0x40
+#define    RTL8367C_PORT1_MISC_CFG_VLAN_EGRESS_MODE_OFFSET    4
+#define    RTL8367C_PORT1_MISC_CFG_VLAN_EGRESS_MODE_MASK    0x30
+#define    RTL8367C_PORT1_MISC_CFG_CONGESTION_SUSTAIN_TIME_OFFSET    0
+#define    RTL8367C_PORT1_MISC_CFG_CONGESTION_SUSTAIN_TIME_MASK    0xF
+
+#define    RTL8367C_REG_INGRESSBW_PORT1_RATE_CTRL0    0x002f
+
+#define    RTL8367C_REG_INGRESSBW_PORT1_RATE_CTRL1    0x0030
+#define    RTL8367C_INGRESSBW_PORT1_RATE_CTRL1_DUMMY_OFFSET    3
+#define    RTL8367C_INGRESSBW_PORT1_RATE_CTRL1_DUMMY_MASK    0xFFF8
+#define    RTL8367C_INGRESSBW_PORT1_RATE_CTRL1_INGRESSBW_RATE16_OFFSET    0
+#define    RTL8367C_INGRESSBW_PORT1_RATE_CTRL1_INGRESSBW_RATE16_MASK    0x7
+
+#define    RTL8367C_REG_PORT1_FORCE_RATE0    0x0031
+
+#define    RTL8367C_REG_PORT1_FORCE_RATE1    0x0032
+
+#define    RTL8367C_REG_PORT1_CURENT_RATE0    0x0033
+
+#define    RTL8367C_REG_PORT1_CURENT_RATE1    0x0034
+
+#define    RTL8367C_REG_PORT1_PAGE_COUNTER    0x0035
+#define    RTL8367C_PORT1_PAGE_COUNTER_OFFSET    0
+#define    RTL8367C_PORT1_PAGE_COUNTER_MASK    0x7F
+
+#define    RTL8367C_REG_PAGEMETER_PORT1_CTRL0    0x0036
+
+#define    RTL8367C_REG_PAGEMETER_PORT1_CTRL1    0x0037
+
+#define    RTL8367C_REG_PORT1_EEECFG    0x0038
+#define    RTL8367C_PORT1_EEECFG_EEEP_ENABLE_TX_OFFSET    14
+#define    RTL8367C_PORT1_EEECFG_EEEP_ENABLE_TX_MASK    0x4000
+#define    RTL8367C_PORT1_EEECFG_EEEP_ENABLE_RX_OFFSET    13
+#define    RTL8367C_PORT1_EEECFG_EEEP_ENABLE_RX_MASK    0x2000
+#define    RTL8367C_PORT1_EEECFG_EEE_FORCE_OFFSET    12
+#define    RTL8367C_PORT1_EEECFG_EEE_FORCE_MASK    0x1000
+#define    RTL8367C_PORT1_EEECFG_EEE_100M_OFFSET    11
+#define    RTL8367C_PORT1_EEECFG_EEE_100M_MASK    0x800
+#define    RTL8367C_PORT1_EEECFG_EEE_GIGA_500M_OFFSET    10
+#define    RTL8367C_PORT1_EEECFG_EEE_GIGA_500M_MASK    0x400
+#define    RTL8367C_PORT1_EEECFG_EEE_TX_OFFSET    9
+#define    RTL8367C_PORT1_EEECFG_EEE_TX_MASK    0x200
+#define    RTL8367C_PORT1_EEECFG_EEE_RX_OFFSET    8
+#define    RTL8367C_PORT1_EEECFG_EEE_RX_MASK    0x100
+#define    RTL8367C_PORT1_EEECFG_EEE_DSP_RX_OFFSET    6
+#define    RTL8367C_PORT1_EEECFG_EEE_DSP_RX_MASK    0x40
+#define    RTL8367C_PORT1_EEECFG_EEE_LPI_OFFSET    5
+#define    RTL8367C_PORT1_EEECFG_EEE_LPI_MASK    0x20
+#define    RTL8367C_PORT1_EEECFG_EEE_TX_LPI_OFFSET    4
+#define    RTL8367C_PORT1_EEECFG_EEE_TX_LPI_MASK    0x10
+#define    RTL8367C_PORT1_EEECFG_EEE_RX_LPI_OFFSET    3
+#define    RTL8367C_PORT1_EEECFG_EEE_RX_LPI_MASK    0x8
+#define    RTL8367C_PORT1_EEECFG_EEE_PAUSE_INDICATOR_OFFSET    2
+#define    RTL8367C_PORT1_EEECFG_EEE_PAUSE_INDICATOR_MASK    0x4
+#define    RTL8367C_PORT1_EEECFG_EEE_WAKE_REQ_OFFSET    1
+#define    RTL8367C_PORT1_EEECFG_EEE_WAKE_REQ_MASK    0x2
+#define    RTL8367C_PORT1_EEECFG_EEE_SLEEP_REQ_OFFSET    0
+#define    RTL8367C_PORT1_EEECFG_EEE_SLEEP_REQ_MASK    0x1
+
+#define    RTL8367C_REG_PORT1_EEETXMTR    0x0039
+
+#define    RTL8367C_REG_PORT1_EEERXMTR    0x003a
+
+#define    RTL8367C_REG_PORT1_EEEPTXMTR    0x003b
+
+#define    RTL8367C_REG_PORT1_EEEPRXMTR    0x003c
+
+#define    RTL8367C_REG_PTP_PORT1_CFG1    0x003e
+#define    RTL8367C_PTP_PORT1_CFG1_OFFSET    7
+#define    RTL8367C_PTP_PORT1_CFG1_MASK    0xFF
+
+#define    RTL8367C_REG_P1_MSIC1    0x003f
+#define    RTL8367C_P1_MSIC1_OFFSET    0
+#define    RTL8367C_P1_MSIC1_MASK    0x1
+
+#define    RTL8367C_REG_PORT2_CGST_HALF_CFG    0x0040
+#define    RTL8367C_PORT2_CGST_HALF_CFG_CONGESTION_TIME_OFFSET    4
+#define    RTL8367C_PORT2_CGST_HALF_CFG_CONGESTION_TIME_MASK    0xF0
+#define    RTL8367C_PORT2_CGST_HALF_CFG_CONGESTION_SUSTAIN_TIME_OFFSET    0
+#define    RTL8367C_PORT2_CGST_HALF_CFG_CONGESTION_SUSTAIN_TIME_MASK    0xF
+
+#define    RTL8367C_REG_PKTGEN_PORT2_CTRL    0x0041
+#define    RTL8367C_PKTGEN_PORT2_CTRL_STATUS_OFFSET    15
+#define    RTL8367C_PKTGEN_PORT2_CTRL_STATUS_MASK    0x8000
+#define    RTL8367C_PKTGEN_PORT2_CTRL_PKTGEN_STS_OFFSET    13
+#define    RTL8367C_PKTGEN_PORT2_CTRL_PKTGEN_STS_MASK    0x2000
+#define    RTL8367C_PKTGEN_PORT2_CTRL_CRC_NO_ERROR_OFFSET    4
+#define    RTL8367C_PKTGEN_PORT2_CTRL_CRC_NO_ERROR_MASK    0x10
+#define    RTL8367C_PKTGEN_PORT2_CTRL_CMD_START_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT2_CTRL_CMD_START_MASK    0x1
+
+#define    RTL8367C_REG_TX_ERR_CNT_PORT2    0x0042
+#define    RTL8367C_TX_ERR_CNT_PORT2_OFFSET    0
+#define    RTL8367C_TX_ERR_CNT_PORT2_MASK    0x7
+
+#define    RTL8367C_REG_PKTGEN_PORT2_DA0    0x0043
+
+#define    RTL8367C_REG_PKTGEN_PORT2_DA1    0x0044
+
+#define    RTL8367C_REG_PKTGEN_PORT2_DA2    0x0045
+
+#define    RTL8367C_REG_PKTGEN_PORT2_SA0    0x0046
+
+#define    RTL8367C_REG_PKTGEN_PORT2_SA1    0x0047
+
+#define    RTL8367C_REG_PKTGEN_PORT2_SA2    0x0048
+
+#define    RTL8367C_REG_PKTGEN_PORT2_COUNTER0    0x0049
+
+#define    RTL8367C_REG_PKTGEN_PORT2_COUNTER1    0x004a
+#define    RTL8367C_PKTGEN_PORT2_COUNTER1_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT2_COUNTER1_MASK    0xFF
+
+#define    RTL8367C_REG_PKTGEN_PORT2_TX_LENGTH    0x004b
+#define    RTL8367C_PKTGEN_PORT2_TX_LENGTH_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT2_TX_LENGTH_MASK    0x3FFF
+
+#define    RTL8367C_REG_PKTGEN_PORT2_TIMER    0x004d
+#define    RTL8367C_PKTGEN_PORT2_TIMER_TIMER_OFFSET    4
+#define    RTL8367C_PKTGEN_PORT2_TIMER_TIMER_MASK    0xF0
+#define    RTL8367C_PKTGEN_PORT2_TIMER_RX_DMA_ERR_FLAG_OFFSET    3
+#define    RTL8367C_PKTGEN_PORT2_TIMER_RX_DMA_ERR_FLAG_MASK    0x8
+
+#define    RTL8367C_REG_PORT2_MISC_CFG    0x004e
+#define    RTL8367C_PORT2_MISC_CFG_SMALL_TAG_IPG_OFFSET    15
+#define    RTL8367C_PORT2_MISC_CFG_SMALL_TAG_IPG_MASK    0x8000
+#define    RTL8367C_PORT2_MISC_CFG_TX_ITFSP_MODE_OFFSET    14
+#define    RTL8367C_PORT2_MISC_CFG_TX_ITFSP_MODE_MASK    0x4000
+#define    RTL8367C_PORT2_MISC_CFG_FLOWCTRL_INDEP_OFFSET    13
+#define    RTL8367C_PORT2_MISC_CFG_FLOWCTRL_INDEP_MASK    0x2000
+#define    RTL8367C_PORT2_MISC_CFG_DOT1Q_REMARK_ENABLE_OFFSET    12
+#define    RTL8367C_PORT2_MISC_CFG_DOT1Q_REMARK_ENABLE_MASK    0x1000
+#define    RTL8367C_PORT2_MISC_CFG_INGRESSBW_FLOWCTRL_OFFSET    11
+#define    RTL8367C_PORT2_MISC_CFG_INGRESSBW_FLOWCTRL_MASK    0x800
+#define    RTL8367C_PORT2_MISC_CFG_INGRESSBW_IFG_OFFSET    10
+#define    RTL8367C_PORT2_MISC_CFG_INGRESSBW_IFG_MASK    0x400
+#define    RTL8367C_PORT2_MISC_CFG_RX_SPC_OFFSET    9
+#define    RTL8367C_PORT2_MISC_CFG_RX_SPC_MASK    0x200
+#define    RTL8367C_PORT2_MISC_CFG_CRC_SKIP_OFFSET    8
+#define    RTL8367C_PORT2_MISC_CFG_CRC_SKIP_MASK    0x100
+#define    RTL8367C_PORT2_MISC_CFG_PKTGEN_TX_FIRST_OFFSET    7
+#define    RTL8367C_PORT2_MISC_CFG_PKTGEN_TX_FIRST_MASK    0x80
+#define    RTL8367C_PORT2_MISC_CFG_MAC_LOOPBACK_OFFSET    6
+#define    RTL8367C_PORT2_MISC_CFG_MAC_LOOPBACK_MASK    0x40
+#define    RTL8367C_PORT2_MISC_CFG_VLAN_EGRESS_MODE_OFFSET    4
+#define    RTL8367C_PORT2_MISC_CFG_VLAN_EGRESS_MODE_MASK    0x30
+#define    RTL8367C_PORT2_MISC_CFG_CONGESTION_SUSTAIN_TIME_OFFSET    0
+#define    RTL8367C_PORT2_MISC_CFG_CONGESTION_SUSTAIN_TIME_MASK    0xF
+
+#define    RTL8367C_REG_INGRESSBW_PORT2_RATE_CTRL0    0x004f
+
+#define    RTL8367C_REG_INGRESSBW_PORT2_RATE_CTRL1    0x0050
+#define    RTL8367C_INGRESSBW_PORT2_RATE_CTRL1_DUMMY_OFFSET    3
+#define    RTL8367C_INGRESSBW_PORT2_RATE_CTRL1_DUMMY_MASK    0xFFF8
+#define    RTL8367C_INGRESSBW_PORT2_RATE_CTRL1_INGRESSBW_RATE16_OFFSET    0
+#define    RTL8367C_INGRESSBW_PORT2_RATE_CTRL1_INGRESSBW_RATE16_MASK    0x7
+
+#define    RTL8367C_REG_PORT2_FORCE_RATE0    0x0051
+
+#define    RTL8367C_REG_PORT2_FORCE_RATE1    0x0052
+
+#define    RTL8367C_REG_PORT2_CURENT_RATE0    0x0053
+
+#define    RTL8367C_REG_PORT2_CURENT_RATE1    0x0054
+
+#define    RTL8367C_REG_PORT2_PAGE_COUNTER    0x0055
+#define    RTL8367C_PORT2_PAGE_COUNTER_OFFSET    0
+#define    RTL8367C_PORT2_PAGE_COUNTER_MASK    0x7F
+
+#define    RTL8367C_REG_PAGEMETER_PORT2_CTRL0    0x0056
+
+#define    RTL8367C_REG_PAGEMETER_PORT2_CTRL1    0x0057
+
+#define    RTL8367C_REG_PORT2_EEECFG    0x0058
+#define    RTL8367C_PORT2_EEECFG_EEEP_ENABLE_TX_OFFSET    14
+#define    RTL8367C_PORT2_EEECFG_EEEP_ENABLE_TX_MASK    0x4000
+#define    RTL8367C_PORT2_EEECFG_EEEP_ENABLE_RX_OFFSET    13
+#define    RTL8367C_PORT2_EEECFG_EEEP_ENABLE_RX_MASK    0x2000
+#define    RTL8367C_PORT2_EEECFG_EEE_FORCE_OFFSET    12
+#define    RTL8367C_PORT2_EEECFG_EEE_FORCE_MASK    0x1000
+#define    RTL8367C_PORT2_EEECFG_EEE_100M_OFFSET    11
+#define    RTL8367C_PORT2_EEECFG_EEE_100M_MASK    0x800
+#define    RTL8367C_PORT2_EEECFG_EEE_GIGA_500M_OFFSET    10
+#define    RTL8367C_PORT2_EEECFG_EEE_GIGA_500M_MASK    0x400
+#define    RTL8367C_PORT2_EEECFG_EEE_TX_OFFSET    9
+#define    RTL8367C_PORT2_EEECFG_EEE_TX_MASK    0x200
+#define    RTL8367C_PORT2_EEECFG_EEE_RX_OFFSET    8
+#define    RTL8367C_PORT2_EEECFG_EEE_RX_MASK    0x100
+#define    RTL8367C_PORT2_EEECFG_EEE_DSP_RX_OFFSET    6
+#define    RTL8367C_PORT2_EEECFG_EEE_DSP_RX_MASK    0x40
+#define    RTL8367C_PORT2_EEECFG_EEE_LPI_OFFSET    5
+#define    RTL8367C_PORT2_EEECFG_EEE_LPI_MASK    0x20
+#define    RTL8367C_PORT2_EEECFG_EEE_TX_LPI_OFFSET    4
+#define    RTL8367C_PORT2_EEECFG_EEE_TX_LPI_MASK    0x10
+#define    RTL8367C_PORT2_EEECFG_EEE_RX_LPI_OFFSET    3
+#define    RTL8367C_PORT2_EEECFG_EEE_RX_LPI_MASK    0x8
+#define    RTL8367C_PORT2_EEECFG_EEE_PAUSE_INDICATOR_OFFSET    2
+#define    RTL8367C_PORT2_EEECFG_EEE_PAUSE_INDICATOR_MASK    0x4
+#define    RTL8367C_PORT2_EEECFG_EEE_WAKE_REQ_OFFSET    1
+#define    RTL8367C_PORT2_EEECFG_EEE_WAKE_REQ_MASK    0x2
+#define    RTL8367C_PORT2_EEECFG_EEE_SLEEP_REQ_OFFSET    0
+#define    RTL8367C_PORT2_EEECFG_EEE_SLEEP_REQ_MASK    0x1
+
+#define    RTL8367C_REG_PORT2_EEETXMTR    0x0059
+
+#define    RTL8367C_REG_PORT2_EEERXMTR    0x005a
+
+#define    RTL8367C_REG_PORT2_EEEPTXMTR    0x005b
+
+#define    RTL8367C_REG_PORT2_EEEPRXMTR    0x005c
+
+#define    RTL8367C_REG_PTP_PORT2_CFG1    0x005e
+#define    RTL8367C_PTP_PORT2_CFG1_OFFSET    7
+#define    RTL8367C_PTP_PORT2_CFG1_MASK    0xFF
+
+#define    RTL8367C_REG_P2_MSIC1    0x005f
+#define    RTL8367C_P2_MSIC1_OFFSET    0
+#define    RTL8367C_P2_MSIC1_MASK    0x1
+
+#define    RTL8367C_REG_PORT3_CGST_HALF_CFG    0x0060
+#define    RTL8367C_PORT3_CGST_HALF_CFG_CONGESTION_TIME_OFFSET    4
+#define    RTL8367C_PORT3_CGST_HALF_CFG_CONGESTION_TIME_MASK    0xF0
+#define    RTL8367C_PORT3_CGST_HALF_CFG_CONGESTION_SUSTAIN_TIME_OFFSET    0
+#define    RTL8367C_PORT3_CGST_HALF_CFG_CONGESTION_SUSTAIN_TIME_MASK    0xF
+
+#define    RTL8367C_REG_PKTGEN_PORT3_CTRL    0x0061
+#define    RTL8367C_PKTGEN_PORT3_CTRL_STATUS_OFFSET    15
+#define    RTL8367C_PKTGEN_PORT3_CTRL_STATUS_MASK    0x8000
+#define    RTL8367C_PKTGEN_PORT3_CTRL_PKTGEN_STS_OFFSET    13
+#define    RTL8367C_PKTGEN_PORT3_CTRL_PKTGEN_STS_MASK    0x2000
+#define    RTL8367C_PKTGEN_PORT3_CTRL_CRC_NO_ERROR_OFFSET    4
+#define    RTL8367C_PKTGEN_PORT3_CTRL_CRC_NO_ERROR_MASK    0x10
+#define    RTL8367C_PKTGEN_PORT3_CTRL_CMD_START_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT3_CTRL_CMD_START_MASK    0x1
+
+#define    RTL8367C_REG_TX_ERR_CNT_PORT3    0x0062
+#define    RTL8367C_TX_ERR_CNT_PORT3_OFFSET    0
+#define    RTL8367C_TX_ERR_CNT_PORT3_MASK    0x7
+
+#define    RTL8367C_REG_PKTGEN_PORT3_DA0    0x0063
+
+#define    RTL8367C_REG_PKTGEN_PORT3_DA1    0x0064
+
+#define    RTL8367C_REG_PKTGEN_PORT3_DA2    0x0065
+
+#define    RTL8367C_REG_PKTGEN_PORT3_SA0    0x0066
+
+#define    RTL8367C_REG_PKTGEN_PORT3_SA1    0x0067
+
+#define    RTL8367C_REG_PKTGEN_PORT3_SA2    0x0068
+
+#define    RTL8367C_REG_PKTGEN_PORT3_COUNTER0    0x0069
+
+#define    RTL8367C_REG_PKTGEN_PORT3_COUNTER1    0x006a
+#define    RTL8367C_PKTGEN_PORT3_COUNTER1_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT3_COUNTER1_MASK    0xFF
+
+#define    RTL8367C_REG_PKTGEN_PORT3_TX_LENGTH    0x006b
+#define    RTL8367C_PKTGEN_PORT3_TX_LENGTH_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT3_TX_LENGTH_MASK    0x3FFF
+
+#define    RTL8367C_REG_PKTGEN_PORT3_TIMER    0x006d
+#define    RTL8367C_PKTGEN_PORT3_TIMER_TIMER_OFFSET    4
+#define    RTL8367C_PKTGEN_PORT3_TIMER_TIMER_MASK    0xF0
+#define    RTL8367C_PKTGEN_PORT3_TIMER_RX_DMA_ERR_FLAG_OFFSET    3
+#define    RTL8367C_PKTGEN_PORT3_TIMER_RX_DMA_ERR_FLAG_MASK    0x8
+
+#define    RTL8367C_REG_PORT3_MISC_CFG    0x006e
+#define    RTL8367C_PORT3_MISC_CFG_SMALL_TAG_IPG_OFFSET    15
+#define    RTL8367C_PORT3_MISC_CFG_SMALL_TAG_IPG_MASK    0x8000
+#define    RTL8367C_PORT3_MISC_CFG_TX_ITFSP_MODE_OFFSET    14
+#define    RTL8367C_PORT3_MISC_CFG_TX_ITFSP_MODE_MASK    0x4000
+#define    RTL8367C_PORT3_MISC_CFG_FLOWCTRL_INDEP_OFFSET    13
+#define    RTL8367C_PORT3_MISC_CFG_FLOWCTRL_INDEP_MASK    0x2000
+#define    RTL8367C_PORT3_MISC_CFG_DOT1Q_REMARK_ENABLE_OFFSET    12
+#define    RTL8367C_PORT3_MISC_CFG_DOT1Q_REMARK_ENABLE_MASK    0x1000
+#define    RTL8367C_PORT3_MISC_CFG_INGRESSBW_FLOWCTRL_OFFSET    11
+#define    RTL8367C_PORT3_MISC_CFG_INGRESSBW_FLOWCTRL_MASK    0x800
+#define    RTL8367C_PORT3_MISC_CFG_INGRESSBW_IFG_OFFSET    10
+#define    RTL8367C_PORT3_MISC_CFG_INGRESSBW_IFG_MASK    0x400
+#define    RTL8367C_PORT3_MISC_CFG_RX_SPC_OFFSET    9
+#define    RTL8367C_PORT3_MISC_CFG_RX_SPC_MASK    0x200
+#define    RTL8367C_PORT3_MISC_CFG_CRC_SKIP_OFFSET    8
+#define    RTL8367C_PORT3_MISC_CFG_CRC_SKIP_MASK    0x100
+#define    RTL8367C_PORT3_MISC_CFG_PKTGEN_TX_FIRST_OFFSET    7
+#define    RTL8367C_PORT3_MISC_CFG_PKTGEN_TX_FIRST_MASK    0x80
+#define    RTL8367C_PORT3_MISC_CFG_MAC_LOOPBACK_OFFSET    6
+#define    RTL8367C_PORT3_MISC_CFG_MAC_LOOPBACK_MASK    0x40
+#define    RTL8367C_PORT3_MISC_CFG_VLAN_EGRESS_MODE_OFFSET    4
+#define    RTL8367C_PORT3_MISC_CFG_VLAN_EGRESS_MODE_MASK    0x30
+#define    RTL8367C_PORT3_MISC_CFG_CONGESTION_SUSTAIN_TIME_OFFSET    0
+#define    RTL8367C_PORT3_MISC_CFG_CONGESTION_SUSTAIN_TIME_MASK    0xF
+
+#define    RTL8367C_REG_INGRESSBW_PORT3_RATE_CTRL0    0x006f
+
+#define    RTL8367C_REG_INGRESSBW_PORT3_RATE_CTRL1    0x0070
+#define    RTL8367C_INGRESSBW_PORT3_RATE_CTRL1_DUMMY_OFFSET    3
+#define    RTL8367C_INGRESSBW_PORT3_RATE_CTRL1_DUMMY_MASK    0xFFF8
+#define    RTL8367C_INGRESSBW_PORT3_RATE_CTRL1_INGRESSBW_RATE16_OFFSET    0
+#define    RTL8367C_INGRESSBW_PORT3_RATE_CTRL1_INGRESSBW_RATE16_MASK    0x7
+
+#define    RTL8367C_REG_PORT3_FORCE_RATE0    0x0071
+
+#define    RTL8367C_REG_PORT3_FORCE_RATE1    0x0072
+
+#define    RTL8367C_REG_PORT3_CURENT_RATE0    0x0073
+
+#define    RTL8367C_REG_PORT3_CURENT_RATE1    0x0074
+
+#define    RTL8367C_REG_PORT3_PAGE_COUNTER    0x0075
+#define    RTL8367C_PORT3_PAGE_COUNTER_OFFSET    0
+#define    RTL8367C_PORT3_PAGE_COUNTER_MASK    0x7F
+
+#define    RTL8367C_REG_PAGEMETER_PORT3_CTRL0    0x0076
+
+#define    RTL8367C_REG_PAGEMETER_PORT3_CTRL1    0x0077
+
+#define    RTL8367C_REG_PORT3_EEECFG    0x0078
+#define    RTL8367C_PORT3_EEECFG_EEEP_ENABLE_TX_OFFSET    14
+#define    RTL8367C_PORT3_EEECFG_EEEP_ENABLE_TX_MASK    0x4000
+#define    RTL8367C_PORT3_EEECFG_EEEP_ENABLE_RX_OFFSET    13
+#define    RTL8367C_PORT3_EEECFG_EEEP_ENABLE_RX_MASK    0x2000
+#define    RTL8367C_PORT3_EEECFG_EEE_FORCE_OFFSET    12
+#define    RTL8367C_PORT3_EEECFG_EEE_FORCE_MASK    0x1000
+#define    RTL8367C_PORT3_EEECFG_EEE_100M_OFFSET    11
+#define    RTL8367C_PORT3_EEECFG_EEE_100M_MASK    0x800
+#define    RTL8367C_PORT3_EEECFG_EEE_GIGA_500M_OFFSET    10
+#define    RTL8367C_PORT3_EEECFG_EEE_GIGA_500M_MASK    0x400
+#define    RTL8367C_PORT3_EEECFG_EEE_TX_OFFSET    9
+#define    RTL8367C_PORT3_EEECFG_EEE_TX_MASK    0x200
+#define    RTL8367C_PORT3_EEECFG_EEE_RX_OFFSET    8
+#define    RTL8367C_PORT3_EEECFG_EEE_RX_MASK    0x100
+#define    RTL8367C_PORT3_EEECFG_EEE_DSP_RX_OFFSET    6
+#define    RTL8367C_PORT3_EEECFG_EEE_DSP_RX_MASK    0x40
+#define    RTL8367C_PORT3_EEECFG_EEE_LPI_OFFSET    5
+#define    RTL8367C_PORT3_EEECFG_EEE_LPI_MASK    0x20
+#define    RTL8367C_PORT3_EEECFG_EEE_TX_LPI_OFFSET    4
+#define    RTL8367C_PORT3_EEECFG_EEE_TX_LPI_MASK    0x10
+#define    RTL8367C_PORT3_EEECFG_EEE_RX_LPI_OFFSET    3
+#define    RTL8367C_PORT3_EEECFG_EEE_RX_LPI_MASK    0x8
+#define    RTL8367C_PORT3_EEECFG_EEE_PAUSE_INDICATOR_OFFSET    2
+#define    RTL8367C_PORT3_EEECFG_EEE_PAUSE_INDICATOR_MASK    0x4
+#define    RTL8367C_PORT3_EEECFG_EEE_WAKE_REQ_OFFSET    1
+#define    RTL8367C_PORT3_EEECFG_EEE_WAKE_REQ_MASK    0x2
+#define    RTL8367C_PORT3_EEECFG_EEE_SLEEP_REQ_OFFSET    0
+#define    RTL8367C_PORT3_EEECFG_EEE_SLEEP_REQ_MASK    0x1
+
+#define    RTL8367C_REG_PORT3_EEETXMTR    0x0079
+
+#define    RTL8367C_REG_PORT3_EEERXMTR    0x007a
+
+#define    RTL8367C_REG_PORT3_EEEPTXMTR    0x007b
+
+#define    RTL8367C_REG_PORT3_EEEPRXMTR    0x007c
+
+#define    RTL8367C_REG_PTP_PORT3_CFG1    0x007e
+#define    RTL8367C_PTP_PORT3_CFG1_OFFSET    7
+#define    RTL8367C_PTP_PORT3_CFG1_MASK    0xFF
+
+#define    RTL8367C_REG_P3_MSIC1    0x007f
+#define    RTL8367C_P3_MSIC1_OFFSET    0
+#define    RTL8367C_P3_MSIC1_MASK    0x1
+
+#define    RTL8367C_REG_PORT4_CGST_HALF_CFG    0x0080
+#define    RTL8367C_PORT4_CGST_HALF_CFG_CONGESTION_TIME_OFFSET    4
+#define    RTL8367C_PORT4_CGST_HALF_CFG_CONGESTION_TIME_MASK    0xF0
+#define    RTL8367C_PORT4_CGST_HALF_CFG_CONGESTION_SUSTAIN_TIME_OFFSET    0
+#define    RTL8367C_PORT4_CGST_HALF_CFG_CONGESTION_SUSTAIN_TIME_MASK    0xF
+
+#define    RTL8367C_REG_PKTGEN_PORT4_CTRL    0x0081
+#define    RTL8367C_PKTGEN_PORT4_CTRL_STATUS_OFFSET    15
+#define    RTL8367C_PKTGEN_PORT4_CTRL_STATUS_MASK    0x8000
+#define    RTL8367C_PKTGEN_PORT4_CTRL_PKTGEN_STS_OFFSET    13
+#define    RTL8367C_PKTGEN_PORT4_CTRL_PKTGEN_STS_MASK    0x2000
+#define    RTL8367C_PKTGEN_PORT4_CTRL_CRC_NO_ERROR_OFFSET    4
+#define    RTL8367C_PKTGEN_PORT4_CTRL_CRC_NO_ERROR_MASK    0x10
+#define    RTL8367C_PKTGEN_PORT4_CTRL_CMD_START_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT4_CTRL_CMD_START_MASK    0x1
+
+#define    RTL8367C_REG_TX_ERR_CNT_PORT4    0x0082
+#define    RTL8367C_TX_ERR_CNT_PORT4_OFFSET    0
+#define    RTL8367C_TX_ERR_CNT_PORT4_MASK    0x7
+
+#define    RTL8367C_REG_PKTGEN_PORT4_DA0    0x0083
+
+#define    RTL8367C_REG_PKTGEN_PORT4_DA1    0x0084
+
+#define    RTL8367C_REG_PKTGEN_PORT4_DA2    0x0085
+
+#define    RTL8367C_REG_PKTGEN_PORT4_SA0    0x0086
+
+#define    RTL8367C_REG_PKTGEN_PORT4_SA1    0x0087
+
+#define    RTL8367C_REG_PKTGEN_PORT4_SA2    0x0088
+
+#define    RTL8367C_REG_PKTGEN_PORT4_COUNTER0    0x0089
+
+#define    RTL8367C_REG_PKTGEN_PORT4_COUNTER1    0x008a
+#define    RTL8367C_PKTGEN_PORT4_COUNTER1_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT4_COUNTER1_MASK    0xFF
+
+#define    RTL8367C_REG_PKTGEN_PORT4_TX_LENGTH    0x008b
+#define    RTL8367C_PKTGEN_PORT4_TX_LENGTH_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT4_TX_LENGTH_MASK    0x3FFF
+
+#define    RTL8367C_REG_PKTGEN_PORT4_TIMER    0x008d
+#define    RTL8367C_PKTGEN_PORT4_TIMER_TIMER_OFFSET    4
+#define    RTL8367C_PKTGEN_PORT4_TIMER_TIMER_MASK    0xF0
+#define    RTL8367C_PKTGEN_PORT4_TIMER_RX_DMA_ERR_FLAG_OFFSET    3
+#define    RTL8367C_PKTGEN_PORT4_TIMER_RX_DMA_ERR_FLAG_MASK    0x8
+
+#define    RTL8367C_REG_PORT4_MISC_CFG    0x008e
+#define    RTL8367C_PORT4_MISC_CFG_SMALL_TAG_IPG_OFFSET    15
+#define    RTL8367C_PORT4_MISC_CFG_SMALL_TAG_IPG_MASK    0x8000
+#define    RTL8367C_PORT4_MISC_CFG_TX_ITFSP_MODE_OFFSET    14
+#define    RTL8367C_PORT4_MISC_CFG_TX_ITFSP_MODE_MASK    0x4000
+#define    RTL8367C_PORT4_MISC_CFG_FLOWCTRL_INDEP_OFFSET    13
+#define    RTL8367C_PORT4_MISC_CFG_FLOWCTRL_INDEP_MASK    0x2000
+#define    RTL8367C_PORT4_MISC_CFG_DOT1Q_REMARK_ENABLE_OFFSET    12
+#define    RTL8367C_PORT4_MISC_CFG_DOT1Q_REMARK_ENABLE_MASK    0x1000
+#define    RTL8367C_PORT4_MISC_CFG_INGRESSBW_FLOWCTRL_OFFSET    11
+#define    RTL8367C_PORT4_MISC_CFG_INGRESSBW_FLOWCTRL_MASK    0x800
+#define    RTL8367C_PORT4_MISC_CFG_INGRESSBW_IFG_OFFSET    10
+#define    RTL8367C_PORT4_MISC_CFG_INGRESSBW_IFG_MASK    0x400
+#define    RTL8367C_PORT4_MISC_CFG_RX_SPC_OFFSET    9
+#define    RTL8367C_PORT4_MISC_CFG_RX_SPC_MASK    0x200
+#define    RTL8367C_PORT4_MISC_CFG_CRC_SKIP_OFFSET    8
+#define    RTL8367C_PORT4_MISC_CFG_CRC_SKIP_MASK    0x100
+#define    RTL8367C_PORT4_MISC_CFG_PKTGEN_TX_FIRST_OFFSET    7
+#define    RTL8367C_PORT4_MISC_CFG_PKTGEN_TX_FIRST_MASK    0x80
+#define    RTL8367C_PORT4_MISC_CFG_MAC_LOOPBACK_OFFSET    6
+#define    RTL8367C_PORT4_MISC_CFG_MAC_LOOPBACK_MASK    0x40
+#define    RTL8367C_PORT4_MISC_CFG_VLAN_EGRESS_MODE_OFFSET    4
+#define    RTL8367C_PORT4_MISC_CFG_VLAN_EGRESS_MODE_MASK    0x30
+#define    RTL8367C_PORT4_MISC_CFG_CONGESTION_SUSTAIN_TIME_OFFSET    0
+#define    RTL8367C_PORT4_MISC_CFG_CONGESTION_SUSTAIN_TIME_MASK    0xF
+
+#define    RTL8367C_REG_INGRESSBW_PORT4_RATE_CTRL0    0x008f
+
+#define    RTL8367C_REG_INGRESSBW_PORT4_RATE_CTRL1    0x0090
+#define    RTL8367C_INGRESSBW_PORT4_RATE_CTRL1_DUMMY_OFFSET    3
+#define    RTL8367C_INGRESSBW_PORT4_RATE_CTRL1_DUMMY_MASK    0xFFF8
+#define    RTL8367C_INGRESSBW_PORT4_RATE_CTRL1_INGRESSBW_RATE16_OFFSET    0
+#define    RTL8367C_INGRESSBW_PORT4_RATE_CTRL1_INGRESSBW_RATE16_MASK    0x7
+
+#define    RTL8367C_REG_PORT4_FORCE_RATE0    0x0091
+
+#define    RTL8367C_REG_PORT4_FORCE_RATE1    0x0092
+
+#define    RTL8367C_REG_PORT4_CURENT_RATE0    0x0093
+
+#define    RTL8367C_REG_PORT4_CURENT_RATE1    0x0094
+
+#define    RTL8367C_REG_PORT4_PAGE_COUNTER    0x0095
+#define    RTL8367C_PORT4_PAGE_COUNTER_OFFSET    0
+#define    RTL8367C_PORT4_PAGE_COUNTER_MASK    0x7F
+
+#define    RTL8367C_REG_PAGEMETER_PORT4_CTRL0    0x0096
+
+#define    RTL8367C_REG_PAGEMETER_PORT4_CTRL1    0x0097
+
+#define    RTL8367C_REG_PORT4_EEECFG    0x0098
+#define    RTL8367C_PORT4_EEECFG_EEEP_ENABLE_TX_OFFSET    14
+#define    RTL8367C_PORT4_EEECFG_EEEP_ENABLE_TX_MASK    0x4000
+#define    RTL8367C_PORT4_EEECFG_EEEP_ENABLE_RX_OFFSET    13
+#define    RTL8367C_PORT4_EEECFG_EEEP_ENABLE_RX_MASK    0x2000
+#define    RTL8367C_PORT4_EEECFG_EEE_FORCE_OFFSET    12
+#define    RTL8367C_PORT4_EEECFG_EEE_FORCE_MASK    0x1000
+#define    RTL8367C_PORT4_EEECFG_EEE_100M_OFFSET    11
+#define    RTL8367C_PORT4_EEECFG_EEE_100M_MASK    0x800
+#define    RTL8367C_PORT4_EEECFG_EEE_GIGA_500M_OFFSET    10
+#define    RTL8367C_PORT4_EEECFG_EEE_GIGA_500M_MASK    0x400
+#define    RTL8367C_PORT4_EEECFG_EEE_TX_OFFSET    9
+#define    RTL8367C_PORT4_EEECFG_EEE_TX_MASK    0x200
+#define    RTL8367C_PORT4_EEECFG_EEE_RX_OFFSET    8
+#define    RTL8367C_PORT4_EEECFG_EEE_RX_MASK    0x100
+#define    RTL8367C_PORT4_EEECFG_EEE_DSP_RX_OFFSET    6
+#define    RTL8367C_PORT4_EEECFG_EEE_DSP_RX_MASK    0x40
+#define    RTL8367C_PORT4_EEECFG_EEE_LPI_OFFSET    5
+#define    RTL8367C_PORT4_EEECFG_EEE_LPI_MASK    0x20
+#define    RTL8367C_PORT4_EEECFG_EEE_TX_LPI_OFFSET    4
+#define    RTL8367C_PORT4_EEECFG_EEE_TX_LPI_MASK    0x10
+#define    RTL8367C_PORT4_EEECFG_EEE_RX_LPI_OFFSET    3
+#define    RTL8367C_PORT4_EEECFG_EEE_RX_LPI_MASK    0x8
+#define    RTL8367C_PORT4_EEECFG_EEE_PAUSE_INDICATOR_OFFSET    2
+#define    RTL8367C_PORT4_EEECFG_EEE_PAUSE_INDICATOR_MASK    0x4
+#define    RTL8367C_PORT4_EEECFG_EEE_WAKE_REQ_OFFSET    1
+#define    RTL8367C_PORT4_EEECFG_EEE_WAKE_REQ_MASK    0x2
+#define    RTL8367C_PORT4_EEECFG_EEE_SLEEP_REQ_OFFSET    0
+#define    RTL8367C_PORT4_EEECFG_EEE_SLEEP_REQ_MASK    0x1
+
+#define    RTL8367C_REG_PORT4_EEETXMTR    0x0099
+
+#define    RTL8367C_REG_PORT4_EEERXMTR    0x009a
+
+#define    RTL8367C_REG_PORT4_EEEPTXMTR    0x009b
+
+#define    RTL8367C_REG_PORT4_EEEPRXMTR    0x009c
+
+#define    RTL8367C_REG_PTP_PORT4_CFG1    0x009e
+#define    RTL8367C_PTP_PORT4_CFG1_OFFSET    7
+#define    RTL8367C_PTP_PORT4_CFG1_MASK    0xFF
+
+#define    RTL8367C_REG_P4_MSIC1    0x009f
+#define    RTL8367C_P4_MSIC1_OFFSET    0
+#define    RTL8367C_P4_MSIC1_MASK    0x1
+
+#define    RTL8367C_REG_PORT5_CGST_HALF_CFG    0x00a0
+#define    RTL8367C_PORT5_CGST_HALF_CFG_CONGESTION_TIME_OFFSET    4
+#define    RTL8367C_PORT5_CGST_HALF_CFG_CONGESTION_TIME_MASK    0xF0
+#define    RTL8367C_PORT5_CGST_HALF_CFG_CONGESTION_SUSTAIN_TIME_OFFSET    0
+#define    RTL8367C_PORT5_CGST_HALF_CFG_CONGESTION_SUSTAIN_TIME_MASK    0xF
+
+#define    RTL8367C_REG_PKTGEN_PORT5_CTRL    0x00a1
+#define    RTL8367C_PKTGEN_PORT5_CTRL_STATUS_OFFSET    15
+#define    RTL8367C_PKTGEN_PORT5_CTRL_STATUS_MASK    0x8000
+#define    RTL8367C_PKTGEN_PORT5_CTRL_PKTGEN_STS_OFFSET    13
+#define    RTL8367C_PKTGEN_PORT5_CTRL_PKTGEN_STS_MASK    0x2000
+#define    RTL8367C_PKTGEN_PORT5_CTRL_CRC_NO_ERROR_OFFSET    4
+#define    RTL8367C_PKTGEN_PORT5_CTRL_CRC_NO_ERROR_MASK    0x10
+#define    RTL8367C_PKTGEN_PORT5_CTRL_CMD_START_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT5_CTRL_CMD_START_MASK    0x1
+
+#define    RTL8367C_REG_TX_ERR_CNT_PORT5    0x00a2
+#define    RTL8367C_TX_ERR_CNT_PORT5_OFFSET    0
+#define    RTL8367C_TX_ERR_CNT_PORT5_MASK    0x7
+
+#define    RTL8367C_REG_PKTGEN_PORT5_DA0    0x00a3
+
+#define    RTL8367C_REG_PKTGEN_PORT5_DA1    0x00a4
+
+#define    RTL8367C_REG_PKTGEN_PORT5_DA2    0x00a5
+
+#define    RTL8367C_REG_PKTGEN_PORT5_SA0    0x00a6
+
+#define    RTL8367C_REG_PKTGEN_PORT5_SA1    0x00a7
+
+#define    RTL8367C_REG_PKTGEN_PORT5_SA2    0x00a8
+
+#define    RTL8367C_REG_PKTGEN_PORT5_COUNTER0    0x00a9
+
+#define    RTL8367C_REG_PKTGEN_PORT5_COUNTER1    0x00aa
+#define    RTL8367C_PKTGEN_PORT5_COUNTER1_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT5_COUNTER1_MASK    0xFF
+
+#define    RTL8367C_REG_PKTGEN_PORT5_TX_LENGTH    0x00ab
+#define    RTL8367C_PKTGEN_PORT5_TX_LENGTH_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT5_TX_LENGTH_MASK    0x3FFF
+
+#define    RTL8367C_REG_PKTGEN_PORT5_TIMER    0x00ad
+#define    RTL8367C_PKTGEN_PORT5_TIMER_TIMER_OFFSET    4
+#define    RTL8367C_PKTGEN_PORT5_TIMER_TIMER_MASK    0xF0
+#define    RTL8367C_PKTGEN_PORT5_TIMER_RX_DMA_ERR_FLAG_OFFSET    3
+#define    RTL8367C_PKTGEN_PORT5_TIMER_RX_DMA_ERR_FLAG_MASK    0x8
+
+#define    RTL8367C_REG_PORT5_MISC_CFG    0x00ae
+#define    RTL8367C_PORT5_MISC_CFG_SMALL_TAG_IPG_OFFSET    15
+#define    RTL8367C_PORT5_MISC_CFG_SMALL_TAG_IPG_MASK    0x8000
+#define    RTL8367C_PORT5_MISC_CFG_TX_ITFSP_MODE_OFFSET    14
+#define    RTL8367C_PORT5_MISC_CFG_TX_ITFSP_MODE_MASK    0x4000
+#define    RTL8367C_PORT5_MISC_CFG_FLOWCTRL_INDEP_OFFSET    13
+#define    RTL8367C_PORT5_MISC_CFG_FLOWCTRL_INDEP_MASK    0x2000
+#define    RTL8367C_PORT5_MISC_CFG_DOT1Q_REMARK_ENABLE_OFFSET    12
+#define    RTL8367C_PORT5_MISC_CFG_DOT1Q_REMARK_ENABLE_MASK    0x1000
+#define    RTL8367C_PORT5_MISC_CFG_INGRESSBW_FLOWCTRL_OFFSET    11
+#define    RTL8367C_PORT5_MISC_CFG_INGRESSBW_FLOWCTRL_MASK    0x800
+#define    RTL8367C_PORT5_MISC_CFG_INGRESSBW_IFG_OFFSET    10
+#define    RTL8367C_PORT5_MISC_CFG_INGRESSBW_IFG_MASK    0x400
+#define    RTL8367C_PORT5_MISC_CFG_RX_SPC_OFFSET    9
+#define    RTL8367C_PORT5_MISC_CFG_RX_SPC_MASK    0x200
+#define    RTL8367C_PORT5_MISC_CFG_CRC_SKIP_OFFSET    8
+#define    RTL8367C_PORT5_MISC_CFG_CRC_SKIP_MASK    0x100
+#define    RTL8367C_PORT5_MISC_CFG_PKTGEN_TX_FIRST_OFFSET    7
+#define    RTL8367C_PORT5_MISC_CFG_PKTGEN_TX_FIRST_MASK    0x80
+#define    RTL8367C_PORT5_MISC_CFG_MAC_LOOPBACK_OFFSET    6
+#define    RTL8367C_PORT5_MISC_CFG_MAC_LOOPBACK_MASK    0x40
+#define    RTL8367C_PORT5_MISC_CFG_VLAN_EGRESS_MODE_OFFSET    4
+#define    RTL8367C_PORT5_MISC_CFG_VLAN_EGRESS_MODE_MASK    0x30
+#define    RTL8367C_PORT5_MISC_CFG_CONGESTION_SUSTAIN_TIME_OFFSET    0
+#define    RTL8367C_PORT5_MISC_CFG_CONGESTION_SUSTAIN_TIME_MASK    0xF
+
+#define    RTL8367C_REG_INGRESSBW_PORT5_RATE_CTRL0    0x00af
+
+#define    RTL8367C_REG_INGRESSBW_PORT5_RATE_CTRL1    0x00b0
+#define    RTL8367C_INGRESSBW_PORT5_RATE_CTRL1_DUMMY_OFFSET    3
+#define    RTL8367C_INGRESSBW_PORT5_RATE_CTRL1_DUMMY_MASK    0xFFF8
+#define    RTL8367C_INGRESSBW_PORT5_RATE_CTRL1_INGRESSBW_RATE16_OFFSET    0
+#define    RTL8367C_INGRESSBW_PORT5_RATE_CTRL1_INGRESSBW_RATE16_MASK    0x7
+
+#define    RTL8367C_REG_PORT5_FORCE_RATE0    0x00b1
+
+#define    RTL8367C_REG_PORT5_FORCE_RATE1    0x00b2
+
+#define    RTL8367C_REG_PORT5_CURENT_RATE0    0x00b3
+
+#define    RTL8367C_REG_PORT5_CURENT_RATE1    0x00b4
+
+#define    RTL8367C_REG_PORT5_PAGE_COUNTER    0x00b5
+#define    RTL8367C_PORT5_PAGE_COUNTER_OFFSET    0
+#define    RTL8367C_PORT5_PAGE_COUNTER_MASK    0x7F
+
+#define    RTL8367C_REG_PAGEMETER_PORT5_CTRL0    0x00b6
+
+#define    RTL8367C_REG_PAGEMETER_PORT5_CTRL1    0x00b7
+
+#define    RTL8367C_REG_PORT5_EEECFG    0x00b8
+#define    RTL8367C_PORT5_EEECFG_EEEP_ENABLE_TX_OFFSET    14
+#define    RTL8367C_PORT5_EEECFG_EEEP_ENABLE_TX_MASK    0x4000
+#define    RTL8367C_PORT5_EEECFG_EEEP_ENABLE_RX_OFFSET    13
+#define    RTL8367C_PORT5_EEECFG_EEEP_ENABLE_RX_MASK    0x2000
+#define    RTL8367C_PORT5_EEECFG_EEE_FORCE_OFFSET    12
+#define    RTL8367C_PORT5_EEECFG_EEE_FORCE_MASK    0x1000
+#define    RTL8367C_PORT5_EEECFG_EEE_100M_OFFSET    11
+#define    RTL8367C_PORT5_EEECFG_EEE_100M_MASK    0x800
+#define    RTL8367C_PORT5_EEECFG_EEE_GIGA_500M_OFFSET    10
+#define    RTL8367C_PORT5_EEECFG_EEE_GIGA_500M_MASK    0x400
+#define    RTL8367C_PORT5_EEECFG_EEE_TX_OFFSET    9
+#define    RTL8367C_PORT5_EEECFG_EEE_TX_MASK    0x200
+#define    RTL8367C_PORT5_EEECFG_EEE_RX_OFFSET    8
+#define    RTL8367C_PORT5_EEECFG_EEE_RX_MASK    0x100
+#define    RTL8367C_PORT5_EEECFG_EEE_DSP_RX_OFFSET    6
+#define    RTL8367C_PORT5_EEECFG_EEE_DSP_RX_MASK    0x40
+#define    RTL8367C_PORT5_EEECFG_EEE_LPI_OFFSET    5
+#define    RTL8367C_PORT5_EEECFG_EEE_LPI_MASK    0x20
+#define    RTL8367C_PORT5_EEECFG_EEE_TX_LPI_OFFSET    4
+#define    RTL8367C_PORT5_EEECFG_EEE_TX_LPI_MASK    0x10
+#define    RTL8367C_PORT5_EEECFG_EEE_RX_LPI_OFFSET    3
+#define    RTL8367C_PORT5_EEECFG_EEE_RX_LPI_MASK    0x8
+#define    RTL8367C_PORT5_EEECFG_EEE_PAUSE_INDICATOR_OFFSET    2
+#define    RTL8367C_PORT5_EEECFG_EEE_PAUSE_INDICATOR_MASK    0x4
+#define    RTL8367C_PORT5_EEECFG_EEE_WAKE_REQ_OFFSET    1
+#define    RTL8367C_PORT5_EEECFG_EEE_WAKE_REQ_MASK    0x2
+#define    RTL8367C_PORT5_EEECFG_EEE_SLEEP_REQ_OFFSET    0
+#define    RTL8367C_PORT5_EEECFG_EEE_SLEEP_REQ_MASK    0x1
+
+#define    RTL8367C_REG_PORT5_EEETXMTR    0x00b9
+
+#define    RTL8367C_REG_PORT5_EEERXMTR    0x00ba
+
+#define    RTL8367C_REG_PORT5_EEEPTXMTR    0x00bb
+
+#define    RTL8367C_REG_PORT5_EEEPRXMTR    0x00bc
+
+#define    RTL8367C_REG_PTP_PORT5_CFG1    0x00be
+#define    RTL8367C_PTP_PORT5_CFG1_OFFSET    7
+#define    RTL8367C_PTP_PORT5_CFG1_MASK    0xFF
+
+#define    RTL8367C_REG_P5_MSIC1    0x00bf
+#define    RTL8367C_P5_MSIC1_OFFSET    0
+#define    RTL8367C_P5_MSIC1_MASK    0x1
+
+#define    RTL8367C_REG_PORT6_CGST_HALF_CFG    0x00c0
+#define    RTL8367C_PORT6_CGST_HALF_CFG_CONGESTION_TIME_OFFSET    4
+#define    RTL8367C_PORT6_CGST_HALF_CFG_CONGESTION_TIME_MASK    0xF0
+#define    RTL8367C_PORT6_CGST_HALF_CFG_CONGESTION_SUSTAIN_TIME_OFFSET    0
+#define    RTL8367C_PORT6_CGST_HALF_CFG_CONGESTION_SUSTAIN_TIME_MASK    0xF
+
+#define    RTL8367C_REG_PKTGEN_PORT6_CTRL    0x00c1
+#define    RTL8367C_PKTGEN_PORT6_CTRL_STATUS_OFFSET    15
+#define    RTL8367C_PKTGEN_PORT6_CTRL_STATUS_MASK    0x8000
+#define    RTL8367C_PKTGEN_PORT6_CTRL_PKTGEN_STS_OFFSET    13
+#define    RTL8367C_PKTGEN_PORT6_CTRL_PKTGEN_STS_MASK    0x2000
+#define    RTL8367C_PKTGEN_PORT6_CTRL_CRC_NO_ERROR_OFFSET    4
+#define    RTL8367C_PKTGEN_PORT6_CTRL_CRC_NO_ERROR_MASK    0x10
+#define    RTL8367C_PKTGEN_PORT6_CTRL_CMD_START_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT6_CTRL_CMD_START_MASK    0x1
+
+#define    RTL8367C_REG_TX_ERR_CNT_PORT6    0x00c2
+#define    RTL8367C_TX_ERR_CNT_PORT6_OFFSET    0
+#define    RTL8367C_TX_ERR_CNT_PORT6_MASK    0x7
+
+#define    RTL8367C_REG_PKTGEN_PORT6_DA0    0x00c3
+
+#define    RTL8367C_REG_PKTGEN_PORT6_DA1    0x00c4
+
+#define    RTL8367C_REG_PKTGEN_PORT6_DA2    0x00c5
+
+#define    RTL8367C_REG_PKTGEN_PORT6_SA0    0x00c6
+
+#define    RTL8367C_REG_PKTGEN_PORT6_SA1    0x00c7
+
+#define    RTL8367C_REG_PKTGEN_PORT6_SA2    0x00c8
+
+#define    RTL8367C_REG_PKTGEN_PORT6_COUNTER0    0x00c9
+
+#define    RTL8367C_REG_PKTGEN_PORT6_COUNTER1    0x00ca
+#define    RTL8367C_PKTGEN_PORT6_COUNTER1_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT6_COUNTER1_MASK    0xFF
+
+#define    RTL8367C_REG_PKTGEN_PORT6_TX_LENGTH    0x00cb
+#define    RTL8367C_PKTGEN_PORT6_TX_LENGTH_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT6_TX_LENGTH_MASK    0x3FFF
+
+#define    RTL8367C_REG_PKTGEN_PORT6_TIMER    0x00cd
+#define    RTL8367C_PKTGEN_PORT6_TIMER_TIMER_OFFSET    4
+#define    RTL8367C_PKTGEN_PORT6_TIMER_TIMER_MASK    0xF0
+#define    RTL8367C_PKTGEN_PORT6_TIMER_RX_DMA_ERR_FLAG_OFFSET    3
+#define    RTL8367C_PKTGEN_PORT6_TIMER_RX_DMA_ERR_FLAG_MASK    0x8
+
+#define    RTL8367C_REG_PORT6_MISC_CFG    0x00ce
+#define    RTL8367C_PORT6_MISC_CFG_SMALL_TAG_IPG_OFFSET    15
+#define    RTL8367C_PORT6_MISC_CFG_SMALL_TAG_IPG_MASK    0x8000
+#define    RTL8367C_PORT6_MISC_CFG_TX_ITFSP_MODE_OFFSET    14
+#define    RTL8367C_PORT6_MISC_CFG_TX_ITFSP_MODE_MASK    0x4000
+#define    RTL8367C_PORT6_MISC_CFG_FLOWCTRL_INDEP_OFFSET    13
+#define    RTL8367C_PORT6_MISC_CFG_FLOWCTRL_INDEP_MASK    0x2000
+#define    RTL8367C_PORT6_MISC_CFG_DOT1Q_REMARK_ENABLE_OFFSET    12
+#define    RTL8367C_PORT6_MISC_CFG_DOT1Q_REMARK_ENABLE_MASK    0x1000
+#define    RTL8367C_PORT6_MISC_CFG_INGRESSBW_FLOWCTRL_OFFSET    11
+#define    RTL8367C_PORT6_MISC_CFG_INGRESSBW_FLOWCTRL_MASK    0x800
+#define    RTL8367C_PORT6_MISC_CFG_INGRESSBW_IFG_OFFSET    10
+#define    RTL8367C_PORT6_MISC_CFG_INGRESSBW_IFG_MASK    0x400
+#define    RTL8367C_PORT6_MISC_CFG_RX_SPC_OFFSET    9
+#define    RTL8367C_PORT6_MISC_CFG_RX_SPC_MASK    0x200
+#define    RTL8367C_PORT6_MISC_CFG_CRC_SKIP_OFFSET    8
+#define    RTL8367C_PORT6_MISC_CFG_CRC_SKIP_MASK    0x100
+#define    RTL8367C_PORT6_MISC_CFG_PKTGEN_TX_FIRST_OFFSET    7
+#define    RTL8367C_PORT6_MISC_CFG_PKTGEN_TX_FIRST_MASK    0x80
+#define    RTL8367C_PORT6_MISC_CFG_MAC_LOOPBACK_OFFSET    6
+#define    RTL8367C_PORT6_MISC_CFG_MAC_LOOPBACK_MASK    0x40
+#define    RTL8367C_PORT6_MISC_CFG_VLAN_EGRESS_MODE_OFFSET    4
+#define    RTL8367C_PORT6_MISC_CFG_VLAN_EGRESS_MODE_MASK    0x30
+#define    RTL8367C_PORT6_MISC_CFG_CONGESTION_SUSTAIN_TIME_OFFSET    0
+#define    RTL8367C_PORT6_MISC_CFG_CONGESTION_SUSTAIN_TIME_MASK    0xF
+
+#define    RTL8367C_REG_INGRESSBW_PORT6_RATE_CTRL0    0x00cf
+
+#define    RTL8367C_REG_INGRESSBW_PORT6_RATE_CTRL1    0x00d0
+#define    RTL8367C_INGRESSBW_PORT6_RATE_CTRL1_DUMMY_OFFSET    3
+#define    RTL8367C_INGRESSBW_PORT6_RATE_CTRL1_DUMMY_MASK    0xFFF8
+#define    RTL8367C_INGRESSBW_PORT6_RATE_CTRL1_INGRESSBW_RATE16_OFFSET    0
+#define    RTL8367C_INGRESSBW_PORT6_RATE_CTRL1_INGRESSBW_RATE16_MASK    0x7
+
+#define    RTL8367C_REG_PORT6_FORCE_RATE0    0x00d1
+
+#define    RTL8367C_REG_PORT6_FORCE_RATE1    0x00d2
+
+#define    RTL8367C_REG_PORT6_CURENT_RATE0    0x00d3
+
+#define    RTL8367C_REG_PORT6_CURENT_RATE1    0x00d4
+
+#define    RTL8367C_REG_PORT6_PAGE_COUNTER    0x00d5
+#define    RTL8367C_PORT6_PAGE_COUNTER_OFFSET    0
+#define    RTL8367C_PORT6_PAGE_COUNTER_MASK    0x7F
+
+#define    RTL8367C_REG_PAGEMETER_PORT6_CTRL0    0x00d6
+
+#define    RTL8367C_REG_PAGEMETER_PORT6_CTRL1    0x00d7
+
+#define    RTL8367C_REG_PORT6_EEECFG    0x00d8
+#define    RTL8367C_PORT6_EEECFG_EEEP_ENABLE_TX_OFFSET    14
+#define    RTL8367C_PORT6_EEECFG_EEEP_ENABLE_TX_MASK    0x4000
+#define    RTL8367C_PORT6_EEECFG_EEEP_ENABLE_RX_OFFSET    13
+#define    RTL8367C_PORT6_EEECFG_EEEP_ENABLE_RX_MASK    0x2000
+#define    RTL8367C_PORT6_EEECFG_EEE_FORCE_OFFSET    12
+#define    RTL8367C_PORT6_EEECFG_EEE_FORCE_MASK    0x1000
+#define    RTL8367C_PORT6_EEECFG_EEE_100M_OFFSET    11
+#define    RTL8367C_PORT6_EEECFG_EEE_100M_MASK    0x800
+#define    RTL8367C_PORT6_EEECFG_EEE_GIGA_500M_OFFSET    10
+#define    RTL8367C_PORT6_EEECFG_EEE_GIGA_500M_MASK    0x400
+#define    RTL8367C_PORT6_EEECFG_EEE_TX_OFFSET    9
+#define    RTL8367C_PORT6_EEECFG_EEE_TX_MASK    0x200
+#define    RTL8367C_PORT6_EEECFG_EEE_RX_OFFSET    8
+#define    RTL8367C_PORT6_EEECFG_EEE_RX_MASK    0x100
+#define    RTL8367C_PORT6_EEECFG_EEE_DSP_RX_OFFSET    6
+#define    RTL8367C_PORT6_EEECFG_EEE_DSP_RX_MASK    0x40
+#define    RTL8367C_PORT6_EEECFG_EEE_LPI_OFFSET    5
+#define    RTL8367C_PORT6_EEECFG_EEE_LPI_MASK    0x20
+#define    RTL8367C_PORT6_EEECFG_EEE_TX_LPI_OFFSET    4
+#define    RTL8367C_PORT6_EEECFG_EEE_TX_LPI_MASK    0x10
+#define    RTL8367C_PORT6_EEECFG_EEE_RX_LPI_OFFSET    3
+#define    RTL8367C_PORT6_EEECFG_EEE_RX_LPI_MASK    0x8
+#define    RTL8367C_PORT6_EEECFG_EEE_PAUSE_INDICATOR_OFFSET    2
+#define    RTL8367C_PORT6_EEECFG_EEE_PAUSE_INDICATOR_MASK    0x4
+#define    RTL8367C_PORT6_EEECFG_EEE_WAKE_REQ_OFFSET    1
+#define    RTL8367C_PORT6_EEECFG_EEE_WAKE_REQ_MASK    0x2
+#define    RTL8367C_PORT6_EEECFG_EEE_SLEEP_REQ_OFFSET    0
+#define    RTL8367C_PORT6_EEECFG_EEE_SLEEP_REQ_MASK    0x1
+
+#define    RTL8367C_REG_PORT6_EEETXMTR    0x00d9
+
+#define    RTL8367C_REG_PORT6_EEERXMTR    0x00da
+
+#define    RTL8367C_REG_PORT6_EEEPTXMTR    0x00db
+
+#define    RTL8367C_REG_PORT6_EEEPRXMTR    0x00dc
+
+#define    RTL8367C_REG_PTP_PORT6_CFG1    0x00de
+#define    RTL8367C_PTP_PORT6_CFG1_OFFSET    7
+#define    RTL8367C_PTP_PORT6_CFG1_MASK    0xFF
+
+#define    RTL8367C_REG_P6_MSIC1    0x00df
+#define    RTL8367C_P6_MSIC1_OFFSET    0
+#define    RTL8367C_P6_MSIC1_MASK    0x1
+
+#define    RTL8367C_REG_PORT7_CGST_HALF_CFG    0x00e0
+#define    RTL8367C_PORT7_CGST_HALF_CFG_CONGESTION_TIME_OFFSET    4
+#define    RTL8367C_PORT7_CGST_HALF_CFG_CONGESTION_TIME_MASK    0xF0
+#define    RTL8367C_PORT7_CGST_HALF_CFG_CONGESTION_SUSTAIN_TIME_OFFSET    0
+#define    RTL8367C_PORT7_CGST_HALF_CFG_CONGESTION_SUSTAIN_TIME_MASK    0xF
+
+#define    RTL8367C_REG_PKTGEN_PORT7_CTRL    0x00e1
+#define    RTL8367C_PKTGEN_PORT7_CTRL_STATUS_OFFSET    15
+#define    RTL8367C_PKTGEN_PORT7_CTRL_STATUS_MASK    0x8000
+#define    RTL8367C_PKTGEN_PORT7_CTRL_PKTGEN_STS_OFFSET    13
+#define    RTL8367C_PKTGEN_PORT7_CTRL_PKTGEN_STS_MASK    0x2000
+#define    RTL8367C_PKTGEN_PORT7_CTRL_CRC_NO_ERROR_OFFSET    4
+#define    RTL8367C_PKTGEN_PORT7_CTRL_CRC_NO_ERROR_MASK    0x10
+#define    RTL8367C_PKTGEN_PORT7_CTRL_CMD_START_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT7_CTRL_CMD_START_MASK    0x1
+
+#define    RTL8367C_REG_TX_ERR_CNT_PORT7    0x00e2
+#define    RTL8367C_TX_ERR_CNT_PORT7_OFFSET    0
+#define    RTL8367C_TX_ERR_CNT_PORT7_MASK    0x7
+
+#define    RTL8367C_REG_PKTGEN_PORT7_DA0    0x00e3
+
+#define    RTL8367C_REG_PKTGEN_PORT7_DA1    0x00e4
+
+#define    RTL8367C_REG_PKTGEN_PORT7_DA2    0x00e5
+
+#define    RTL8367C_REG_PKTGEN_PORT7_SA0    0x00e6
+
+#define    RTL8367C_REG_PKTGEN_PORT7_SA1    0x00e7
+
+#define    RTL8367C_REG_PKTGEN_PORT7_SA2    0x00e8
+
+#define    RTL8367C_REG_PKTGEN_PORT7_COUNTER0    0x00e9
+
+#define    RTL8367C_REG_PKTGEN_PORT7_COUNTER1    0x00ea
+#define    RTL8367C_PKTGEN_PORT7_COUNTER1_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT7_COUNTER1_MASK    0xFF
+
+#define    RTL8367C_REG_PKTGEN_PORT7_TX_LENGTH    0x00eb
+#define    RTL8367C_PKTGEN_PORT7_TX_LENGTH_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT7_TX_LENGTH_MASK    0x3FFF
+
+#define    RTL8367C_REG_PKTGEN_PORT7_TIMER    0x00ed
+#define    RTL8367C_PKTGEN_PORT7_TIMER_TIMER_OFFSET    4
+#define    RTL8367C_PKTGEN_PORT7_TIMER_TIMER_MASK    0xF0
+#define    RTL8367C_PKTGEN_PORT7_TIMER_RX_DMA_ERR_FLAG_OFFSET    3
+#define    RTL8367C_PKTGEN_PORT7_TIMER_RX_DMA_ERR_FLAG_MASK    0x8
+
+#define    RTL8367C_REG_PORT7_MISC_CFG    0x00ee
+#define    RTL8367C_PORT7_MISC_CFG_SMALL_TAG_IPG_OFFSET    15
+#define    RTL8367C_PORT7_MISC_CFG_SMALL_TAG_IPG_MASK    0x8000
+#define    RTL8367C_PORT7_MISC_CFG_TX_ITFSP_MODE_OFFSET    14
+#define    RTL8367C_PORT7_MISC_CFG_TX_ITFSP_MODE_MASK    0x4000
+#define    RTL8367C_PORT7_MISC_CFG_FLOWCTRL_INDEP_OFFSET    13
+#define    RTL8367C_PORT7_MISC_CFG_FLOWCTRL_INDEP_MASK    0x2000
+#define    RTL8367C_PORT7_MISC_CFG_DOT1Q_REMARK_ENABLE_OFFSET    12
+#define    RTL8367C_PORT7_MISC_CFG_DOT1Q_REMARK_ENABLE_MASK    0x1000
+#define    RTL8367C_PORT7_MISC_CFG_INGRESSBW_FLOWCTRL_OFFSET    11
+#define    RTL8367C_PORT7_MISC_CFG_INGRESSBW_FLOWCTRL_MASK    0x800
+#define    RTL8367C_PORT7_MISC_CFG_INGRESSBW_IFG_OFFSET    10
+#define    RTL8367C_PORT7_MISC_CFG_INGRESSBW_IFG_MASK    0x400
+#define    RTL8367C_PORT7_MISC_CFG_RX_SPC_OFFSET    9
+#define    RTL8367C_PORT7_MISC_CFG_RX_SPC_MASK    0x200
+#define    RTL8367C_PORT7_MISC_CFG_CRC_SKIP_OFFSET    8
+#define    RTL8367C_PORT7_MISC_CFG_CRC_SKIP_MASK    0x100
+#define    RTL8367C_PORT7_MISC_CFG_PKTGEN_TX_FIRST_OFFSET    7
+#define    RTL8367C_PORT7_MISC_CFG_PKTGEN_TX_FIRST_MASK    0x80
+#define    RTL8367C_PORT7_MISC_CFG_MAC_LOOPBACK_OFFSET    6
+#define    RTL8367C_PORT7_MISC_CFG_MAC_LOOPBACK_MASK    0x40
+#define    RTL8367C_PORT7_MISC_CFG_VLAN_EGRESS_MODE_OFFSET    4
+#define    RTL8367C_PORT7_MISC_CFG_VLAN_EGRESS_MODE_MASK    0x30
+#define    RTL8367C_PORT7_MISC_CFG_CONGESTION_SUSTAIN_TIME_OFFSET    0
+#define    RTL8367C_PORT7_MISC_CFG_CONGESTION_SUSTAIN_TIME_MASK    0xF
+
+#define    RTL8367C_REG_INGRESSBW_PORT7_RATE_CTRL0    0x00ef
+
+#define    RTL8367C_REG_INGRESSBW_PORT7_RATE_CTRL1    0x00f0
+#define    RTL8367C_INGRESSBW_PORT7_RATE_CTRL1_DUMMY_OFFSET    3
+#define    RTL8367C_INGRESSBW_PORT7_RATE_CTRL1_DUMMY_MASK    0xFFF8
+#define    RTL8367C_INGRESSBW_PORT7_RATE_CTRL1_INGRESSBW_RATE16_OFFSET    0
+#define    RTL8367C_INGRESSBW_PORT7_RATE_CTRL1_INGRESSBW_RATE16_MASK    0x7
+
+#define    RTL8367C_REG_PORT7_FORCE_RATE0    0x00f1
+
+#define    RTL8367C_REG_PORT7_FORCE_RATE1    0x00f2
+
+#define    RTL8367C_REG_PORT7_CURENT_RATE0    0x00f3
+
+#define    RTL8367C_REG_PORT7_CURENT_RATE1    0x00f4
+
+#define    RTL8367C_REG_PORT7_PAGE_COUNTER    0x00f5
+#define    RTL8367C_PORT7_PAGE_COUNTER_OFFSET    0
+#define    RTL8367C_PORT7_PAGE_COUNTER_MASK    0x7F
+
+#define    RTL8367C_REG_PAGEMETER_PORT7_CTRL0    0x00f6
+
+#define    RTL8367C_REG_PAGEMETER_PORT7_CTRL1    0x00f7
+
+#define    RTL8367C_REG_PORT7_EEECFG    0x00f8
+#define    RTL8367C_PORT7_EEECFG_EEEP_ENABLE_TX_OFFSET    14
+#define    RTL8367C_PORT7_EEECFG_EEEP_ENABLE_TX_MASK    0x4000
+#define    RTL8367C_PORT7_EEECFG_EEEP_ENABLE_RX_OFFSET    13
+#define    RTL8367C_PORT7_EEECFG_EEEP_ENABLE_RX_MASK    0x2000
+#define    RTL8367C_PORT7_EEECFG_EEE_FORCE_OFFSET    12
+#define    RTL8367C_PORT7_EEECFG_EEE_FORCE_MASK    0x1000
+#define    RTL8367C_PORT7_EEECFG_EEE_100M_OFFSET    11
+#define    RTL8367C_PORT7_EEECFG_EEE_100M_MASK    0x800
+#define    RTL8367C_PORT7_EEECFG_EEE_GIGA_500M_OFFSET    10
+#define    RTL8367C_PORT7_EEECFG_EEE_GIGA_500M_MASK    0x400
+#define    RTL8367C_PORT7_EEECFG_EEE_TX_OFFSET    9
+#define    RTL8367C_PORT7_EEECFG_EEE_TX_MASK    0x200
+#define    RTL8367C_PORT7_EEECFG_EEE_RX_OFFSET    8
+#define    RTL8367C_PORT7_EEECFG_EEE_RX_MASK    0x100
+#define    RTL8367C_PORT7_EEECFG_EEE_DSP_RX_OFFSET    6
+#define    RTL8367C_PORT7_EEECFG_EEE_DSP_RX_MASK    0x40
+#define    RTL8367C_PORT7_EEECFG_EEE_LPI_OFFSET    5
+#define    RTL8367C_PORT7_EEECFG_EEE_LPI_MASK    0x20
+#define    RTL8367C_PORT7_EEECFG_EEE_TX_LPI_OFFSET    4
+#define    RTL8367C_PORT7_EEECFG_EEE_TX_LPI_MASK    0x10
+#define    RTL8367C_PORT7_EEECFG_EEE_RX_LPI_OFFSET    3
+#define    RTL8367C_PORT7_EEECFG_EEE_RX_LPI_MASK    0x8
+#define    RTL8367C_PORT7_EEECFG_EEE_PAUSE_INDICATOR_OFFSET    2
+#define    RTL8367C_PORT7_EEECFG_EEE_PAUSE_INDICATOR_MASK    0x4
+#define    RTL8367C_PORT7_EEECFG_EEE_WAKE_REQ_OFFSET    1
+#define    RTL8367C_PORT7_EEECFG_EEE_WAKE_REQ_MASK    0x2
+#define    RTL8367C_PORT7_EEECFG_EEE_SLEEP_REQ_OFFSET    0
+#define    RTL8367C_PORT7_EEECFG_EEE_SLEEP_REQ_MASK    0x1
+
+#define    RTL8367C_REG_PORT7_EEETXMTR    0x00f9
+
+#define    RTL8367C_REG_PORT7_EEERXMTR    0x00fa
+
+#define    RTL8367C_REG_PORT7_EEEPTXMTR    0x00fb
+
+#define    RTL8367C_REG_PORT7_EEEPRXMTR    0x00fc
+
+#define    RTL8367C_REG_PTP_PORT7_CFG1    0x00fe
+#define    RTL8367C_PTP_PORT7_CFG1_OFFSET    7
+#define    RTL8367C_PTP_PORT7_CFG1_MASK    0xFF
+
+#define    RTL8367C_REG_P7_MSIC1    0x00ff
+#define    RTL8367C_P7_MSIC1_OFFSET    0
+#define    RTL8367C_P7_MSIC1_MASK    0x1
+
+#define    RTL8367C_REG_PORT8_CGST_HALF_CFG    0x0100
+#define    RTL8367C_PORT8_CGST_HALF_CFG_CONGESTION_TIME_OFFSET    4
+#define    RTL8367C_PORT8_CGST_HALF_CFG_CONGESTION_TIME_MASK    0xF0
+#define    RTL8367C_PORT8_CGST_HALF_CFG_CONGESTION_SUSTAIN_TIME_OFFSET    0
+#define    RTL8367C_PORT8_CGST_HALF_CFG_CONGESTION_SUSTAIN_TIME_MASK    0xF
+
+#define    RTL8367C_REG_PKTGEN_PORT8_CTRL    0x0101
+#define    RTL8367C_PKTGEN_PORT8_CTRL_STATUS_OFFSET    15
+#define    RTL8367C_PKTGEN_PORT8_CTRL_STATUS_MASK    0x8000
+#define    RTL8367C_PKTGEN_PORT8_CTRL_PKTGEN_STS_OFFSET    13
+#define    RTL8367C_PKTGEN_PORT8_CTRL_PKTGEN_STS_MASK    0x2000
+#define    RTL8367C_PKTGEN_PORT8_CTRL_CRC_NO_ERROR_OFFSET    4
+#define    RTL8367C_PKTGEN_PORT8_CTRL_CRC_NO_ERROR_MASK    0x10
+#define    RTL8367C_PKTGEN_PORT8_CTRL_CMD_START_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT8_CTRL_CMD_START_MASK    0x1
+
+#define    RTL8367C_REG_TX_ERR_CNT_PORT8    0x0102
+#define    RTL8367C_TX_ERR_CNT_PORT8_OFFSET    0
+#define    RTL8367C_TX_ERR_CNT_PORT8_MASK    0x7
+
+#define    RTL8367C_REG_PKTGEN_PORT8_DA0    0x0103
+
+#define    RTL8367C_REG_PKTGEN_PORT8_DA1    0x0104
+
+#define    RTL8367C_REG_PKTGEN_PORT8_DA2    0x0105
+
+#define    RTL8367C_REG_PKTGEN_PORT8_SA0    0x0106
+
+#define    RTL8367C_REG_PKTGEN_PORT8_SA1    0x0107
+
+#define    RTL8367C_REG_PKTGEN_PORT8_SA2    0x0108
+
+#define    RTL8367C_REG_PKTGEN_PORT8_COUNTER0    0x0109
+
+#define    RTL8367C_REG_PKTGEN_PORT8_COUNTER1    0x010a
+#define    RTL8367C_PKTGEN_PORT8_COUNTER1_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT8_COUNTER1_MASK    0xFF
+
+#define    RTL8367C_REG_PKTGEN_PORT8_TX_LENGTH    0x010b
+#define    RTL8367C_PKTGEN_PORT8_TX_LENGTH_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT8_TX_LENGTH_MASK    0x3FFF
+
+#define    RTL8367C_REG_PKTGEN_PORT8_TIMER    0x010d
+#define    RTL8367C_PKTGEN_PORT8_TIMER_TIMER_OFFSET    4
+#define    RTL8367C_PKTGEN_PORT8_TIMER_TIMER_MASK    0xF0
+#define    RTL8367C_PKTGEN_PORT8_TIMER_RX_DMA_ERR_FLAG_OFFSET    3
+#define    RTL8367C_PKTGEN_PORT8_TIMER_RX_DMA_ERR_FLAG_MASK    0x8
+
+#define    RTL8367C_REG_PORT8_MISC_CFG    0x010e
+#define    RTL8367C_PORT8_MISC_CFG_SMALL_TAG_IPG_OFFSET    15
+#define    RTL8367C_PORT8_MISC_CFG_SMALL_TAG_IPG_MASK    0x8000
+#define    RTL8367C_PORT8_MISC_CFG_TX_ITFSP_MODE_OFFSET    14
+#define    RTL8367C_PORT8_MISC_CFG_TX_ITFSP_MODE_MASK    0x4000
+#define    RTL8367C_PORT8_MISC_CFG_FLOWCTRL_INDEP_OFFSET    13
+#define    RTL8367C_PORT8_MISC_CFG_FLOWCTRL_INDEP_MASK    0x2000
+#define    RTL8367C_PORT8_MISC_CFG_DOT1Q_REMARK_ENABLE_OFFSET    12
+#define    RTL8367C_PORT8_MISC_CFG_DOT1Q_REMARK_ENABLE_MASK    0x1000
+#define    RTL8367C_PORT8_MISC_CFG_INGRESSBW_FLOWCTRL_OFFSET    11
+#define    RTL8367C_PORT8_MISC_CFG_INGRESSBW_FLOWCTRL_MASK    0x800
+#define    RTL8367C_PORT8_MISC_CFG_INGRESSBW_IFG_OFFSET    10
+#define    RTL8367C_PORT8_MISC_CFG_INGRESSBW_IFG_MASK    0x400
+#define    RTL8367C_PORT8_MISC_CFG_RX_SPC_OFFSET    9
+#define    RTL8367C_PORT8_MISC_CFG_RX_SPC_MASK    0x200
+#define    RTL8367C_PORT8_MISC_CFG_CRC_SKIP_OFFSET    8
+#define    RTL8367C_PORT8_MISC_CFG_CRC_SKIP_MASK    0x100
+#define    RTL8367C_PORT8_MISC_CFG_PKTGEN_TX_FIRST_OFFSET    7
+#define    RTL8367C_PORT8_MISC_CFG_PKTGEN_TX_FIRST_MASK    0x80
+#define    RTL8367C_PORT8_MISC_CFG_MAC_LOOPBACK_OFFSET    6
+#define    RTL8367C_PORT8_MISC_CFG_MAC_LOOPBACK_MASK    0x40
+#define    RTL8367C_PORT8_MISC_CFG_VLAN_EGRESS_MODE_OFFSET    4
+#define    RTL8367C_PORT8_MISC_CFG_VLAN_EGRESS_MODE_MASK    0x30
+#define    RTL8367C_PORT8_MISC_CFG_CONGESTION_SUSTAIN_TIME_OFFSET    0
+#define    RTL8367C_PORT8_MISC_CFG_CONGESTION_SUSTAIN_TIME_MASK    0xF
+
+#define    RTL8367C_REG_INGRESSBW_PORT8_RATE_CTRL0    0x010f
+
+#define    RTL8367C_REG_INGRESSBW_PORT8_RATE_CTRL1    0x0110
+#define    RTL8367C_INGRESSBW_PORT8_RATE_CTRL1_DUMMY_OFFSET    3
+#define    RTL8367C_INGRESSBW_PORT8_RATE_CTRL1_DUMMY_MASK    0xFFF8
+#define    RTL8367C_INGRESSBW_PORT8_RATE_CTRL1_INGRESSBW_RATE16_OFFSET    0
+#define    RTL8367C_INGRESSBW_PORT8_RATE_CTRL1_INGRESSBW_RATE16_MASK    0x7
+
+#define    RTL8367C_REG_PORT8_FORCE_RATE0    0x0111
+
+#define    RTL8367C_REG_PORT8_FORCE_RATE1    0x0112
+
+#define    RTL8367C_REG_PORT8_CURENT_RATE0    0x0113
+
+#define    RTL8367C_REG_PORT8_CURENT_RATE1    0x0114
+
+#define    RTL8367C_REG_PORT8_PAGE_COUNTER    0x0115
+#define    RTL8367C_PORT8_PAGE_COUNTER_OFFSET    0
+#define    RTL8367C_PORT8_PAGE_COUNTER_MASK    0x7F
+
+#define    RTL8367C_REG_PAGEMETER_PORT8_CTRL0    0x0116
+
+#define    RTL8367C_REG_PAGEMETER_PORT8_CTRL1    0x0117
+
+#define    RTL8367C_REG_PORT8_EEECFG    0x0118
+#define    RTL8367C_PORT8_EEECFG_EEEP_ENABLE_TX_OFFSET    14
+#define    RTL8367C_PORT8_EEECFG_EEEP_ENABLE_TX_MASK    0x4000
+#define    RTL8367C_PORT8_EEECFG_EEEP_ENABLE_RX_OFFSET    13
+#define    RTL8367C_PORT8_EEECFG_EEEP_ENABLE_RX_MASK    0x2000
+#define    RTL8367C_PORT8_EEECFG_EEE_FORCE_OFFSET    12
+#define    RTL8367C_PORT8_EEECFG_EEE_FORCE_MASK    0x1000
+#define    RTL8367C_PORT8_EEECFG_EEE_100M_OFFSET    11
+#define    RTL8367C_PORT8_EEECFG_EEE_100M_MASK    0x800
+#define    RTL8367C_PORT8_EEECFG_EEE_GIGA_500M_OFFSET    10
+#define    RTL8367C_PORT8_EEECFG_EEE_GIGA_500M_MASK    0x400
+#define    RTL8367C_PORT8_EEECFG_EEE_TX_OFFSET    9
+#define    RTL8367C_PORT8_EEECFG_EEE_TX_MASK    0x200
+#define    RTL8367C_PORT8_EEECFG_EEE_RX_OFFSET    8
+#define    RTL8367C_PORT8_EEECFG_EEE_RX_MASK    0x100
+#define    RTL8367C_PORT8_EEECFG_EEE_DSP_RX_OFFSET    6
+#define    RTL8367C_PORT8_EEECFG_EEE_DSP_RX_MASK    0x40
+#define    RTL8367C_PORT8_EEECFG_EEE_LPI_OFFSET    5
+#define    RTL8367C_PORT8_EEECFG_EEE_LPI_MASK    0x20
+#define    RTL8367C_PORT8_EEECFG_EEE_TX_LPI_OFFSET    4
+#define    RTL8367C_PORT8_EEECFG_EEE_TX_LPI_MASK    0x10
+#define    RTL8367C_PORT8_EEECFG_EEE_RX_LPI_OFFSET    3
+#define    RTL8367C_PORT8_EEECFG_EEE_RX_LPI_MASK    0x8
+#define    RTL8367C_PORT8_EEECFG_EEE_PAUSE_INDICATOR_OFFSET    2
+#define    RTL8367C_PORT8_EEECFG_EEE_PAUSE_INDICATOR_MASK    0x4
+#define    RTL8367C_PORT8_EEECFG_EEE_WAKE_REQ_OFFSET    1
+#define    RTL8367C_PORT8_EEECFG_EEE_WAKE_REQ_MASK    0x2
+#define    RTL8367C_PORT8_EEECFG_EEE_SLEEP_REQ_OFFSET    0
+#define    RTL8367C_PORT8_EEECFG_EEE_SLEEP_REQ_MASK    0x1
+
+#define    RTL8367C_REG_PORT8_EEETXMTR    0x0119
+
+#define    RTL8367C_REG_PORT8_EEERXMTR    0x011a
+
+#define    RTL8367C_REG_PORT8_EEEPTXMTR    0x011b
+
+#define    RTL8367C_REG_PORT8_EEEPRXMTR    0x011c
+
+#define    RTL8367C_REG_PTP_PORT8_CFG1    0x011e
+#define    RTL8367C_PTP_PORT8_CFG1_OFFSET    7
+#define    RTL8367C_PTP_PORT8_CFG1_MASK    0xFF
+
+#define    RTL8367C_REG_P8_MSIC1    0x011f
+#define    RTL8367C_P8_MSIC1_OFFSET    0
+#define    RTL8367C_P8_MSIC1_MASK    0x1
+
+#define    RTL8367C_REG_PORT9_CGST_HALF_CFG    0x0120
+#define    RTL8367C_PORT9_CGST_HALF_CFG_CONGESTION_TIME_OFFSET    4
+#define    RTL8367C_PORT9_CGST_HALF_CFG_CONGESTION_TIME_MASK    0xF0
+#define    RTL8367C_PORT9_CGST_HALF_CFG_CONGESTION_SUSTAIN_TIME_OFFSET    0
+#define    RTL8367C_PORT9_CGST_HALF_CFG_CONGESTION_SUSTAIN_TIME_MASK    0xF
+
+#define    RTL8367C_REG_PKTGEN_PORT9_CTRL    0x0121
+#define    RTL8367C_PKTGEN_PORT9_CTRL_STATUS_OFFSET    15
+#define    RTL8367C_PKTGEN_PORT9_CTRL_STATUS_MASK    0x8000
+#define    RTL8367C_PKTGEN_PORT9_CTRL_PKTGEN_STS_OFFSET    13
+#define    RTL8367C_PKTGEN_PORT9_CTRL_PKTGEN_STS_MASK    0x2000
+#define    RTL8367C_PKTGEN_PORT9_CTRL_CRC_NO_ERROR_OFFSET    4
+#define    RTL8367C_PKTGEN_PORT9_CTRL_CRC_NO_ERROR_MASK    0x10
+#define    RTL8367C_PKTGEN_PORT9_CTRL_CMD_START_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT9_CTRL_CMD_START_MASK    0x1
+
+#define    RTL8367C_REG_TX_ERR_CNT_PORT9    0x0122
+#define    RTL8367C_TX_ERR_CNT_PORT9_OFFSET    0
+#define    RTL8367C_TX_ERR_CNT_PORT9_MASK    0x7
+
+#define    RTL8367C_REG_PKTGEN_PORT9_DA0    0x0123
+
+#define    RTL8367C_REG_PKTGEN_PORT9_DA1    0x0124
+
+#define    RTL8367C_REG_PKTGEN_PORT9_DA2    0x0125
+
+#define    RTL8367C_REG_PKTGEN_PORT9_SA0    0x0126
+
+#define    RTL8367C_REG_PKTGEN_PORT9_SA1    0x0127
+
+#define    RTL8367C_REG_PKTGEN_PORT9_SA2    0x0128
+
+#define    RTL8367C_REG_PKTGEN_PORT9_COUNTER0    0x0129
+
+#define    RTL8367C_REG_PKTGEN_PORT9_COUNTER1    0x012a
+#define    RTL8367C_PKTGEN_PORT9_COUNTER1_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT9_COUNTER1_MASK    0xFF
+
+#define    RTL8367C_REG_PKTGEN_PORT9_TX_LENGTH    0x012b
+#define    RTL8367C_PKTGEN_PORT9_TX_LENGTH_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT9_TX_LENGTH_MASK    0x3FFF
+
+#define    RTL8367C_REG_PKTGEN_PORT9_TIMER    0x012d
+#define    RTL8367C_PKTGEN_PORT9_TIMER_TIMER_OFFSET    4
+#define    RTL8367C_PKTGEN_PORT9_TIMER_TIMER_MASK    0xF0
+#define    RTL8367C_PKTGEN_PORT9_TIMER_RX_DMA_ERR_FLAG_OFFSET    3
+#define    RTL8367C_PKTGEN_PORT9_TIMER_RX_DMA_ERR_FLAG_MASK    0x8
+
+#define    RTL8367C_REG_PORT9_MISC_CFG    0x012e
+#define    RTL8367C_PORT9_MISC_CFG_SMALL_TAG_IPG_OFFSET    15
+#define    RTL8367C_PORT9_MISC_CFG_SMALL_TAG_IPG_MASK    0x8000
+#define    RTL8367C_PORT9_MISC_CFG_TX_ITFSP_MODE_OFFSET    14
+#define    RTL8367C_PORT9_MISC_CFG_TX_ITFSP_MODE_MASK    0x4000
+#define    RTL8367C_PORT9_MISC_CFG_FLOWCTRL_INDEP_OFFSET    13
+#define    RTL8367C_PORT9_MISC_CFG_FLOWCTRL_INDEP_MASK    0x2000
+#define    RTL8367C_PORT9_MISC_CFG_DOT1Q_REMARK_ENABLE_OFFSET    12
+#define    RTL8367C_PORT9_MISC_CFG_DOT1Q_REMARK_ENABLE_MASK    0x1000
+#define    RTL8367C_PORT9_MISC_CFG_INGRESSBW_FLOWCTRL_OFFSET    11
+#define    RTL8367C_PORT9_MISC_CFG_INGRESSBW_FLOWCTRL_MASK    0x800
+#define    RTL8367C_PORT9_MISC_CFG_INGRESSBW_IFG_OFFSET    10
+#define    RTL8367C_PORT9_MISC_CFG_INGRESSBW_IFG_MASK    0x400
+#define    RTL8367C_PORT9_MISC_CFG_RX_SPC_OFFSET    9
+#define    RTL8367C_PORT9_MISC_CFG_RX_SPC_MASK    0x200
+#define    RTL8367C_PORT9_MISC_CFG_CRC_SKIP_OFFSET    8
+#define    RTL8367C_PORT9_MISC_CFG_CRC_SKIP_MASK    0x100
+#define    RTL8367C_PORT9_MISC_CFG_PKTGEN_TX_FIRST_OFFSET    7
+#define    RTL8367C_PORT9_MISC_CFG_PKTGEN_TX_FIRST_MASK    0x80
+#define    RTL8367C_PORT9_MISC_CFG_MAC_LOOPBACK_OFFSET    6
+#define    RTL8367C_PORT9_MISC_CFG_MAC_LOOPBACK_MASK    0x40
+#define    RTL8367C_PORT9_MISC_CFG_VLAN_EGRESS_MODE_OFFSET    4
+#define    RTL8367C_PORT9_MISC_CFG_VLAN_EGRESS_MODE_MASK    0x30
+#define    RTL8367C_PORT9_MISC_CFG_CONGESTION_SUSTAIN_TIME_OFFSET    0
+#define    RTL8367C_PORT9_MISC_CFG_CONGESTION_SUSTAIN_TIME_MASK    0xF
+
+#define    RTL8367C_REG_INGRESSBW_PORT9_RATE_CTRL0    0x012f
+
+#define    RTL8367C_REG_INGRESSBW_PORT9_RATE_CTRL1    0x0130
+#define    RTL8367C_INGRESSBW_PORT9_RATE_CTRL1_DUMMY_OFFSET    3
+#define    RTL8367C_INGRESSBW_PORT9_RATE_CTRL1_DUMMY_MASK    0xFFF8
+#define    RTL8367C_INGRESSBW_PORT9_RATE_CTRL1_INGRESSBW_RATE16_OFFSET    0
+#define    RTL8367C_INGRESSBW_PORT9_RATE_CTRL1_INGRESSBW_RATE16_MASK    0x7
+
+#define    RTL8367C_REG_PORT9_FORCE_RATE0    0x0131
+
+#define    RTL8367C_REG_PORT9_FORCE_RATE1    0x0132
+
+#define    RTL8367C_REG_PORT9_CURENT_RATE0    0x0133
+
+#define    RTL8367C_REG_PORT9_CURENT_RATE1    0x0134
+
+#define    RTL8367C_REG_PORT9_PAGE_COUNTER    0x0135
+#define    RTL8367C_PORT9_PAGE_COUNTER_OFFSET    0
+#define    RTL8367C_PORT9_PAGE_COUNTER_MASK    0x7F
+
+#define    RTL8367C_REG_PAGEMETER_PORT9_CTRL0    0x0136
+
+#define    RTL8367C_REG_PAGEMETER_PORT9_CTRL1    0x0137
+
+#define    RTL8367C_REG_PORT9_EEECFG    0x0138
+#define    RTL8367C_PORT9_EEECFG_EEEP_ENABLE_TX_OFFSET    14
+#define    RTL8367C_PORT9_EEECFG_EEEP_ENABLE_TX_MASK    0x4000
+#define    RTL8367C_PORT9_EEECFG_EEEP_ENABLE_RX_OFFSET    13
+#define    RTL8367C_PORT9_EEECFG_EEEP_ENABLE_RX_MASK    0x2000
+#define    RTL8367C_PORT9_EEECFG_EEE_FORCE_OFFSET    12
+#define    RTL8367C_PORT9_EEECFG_EEE_FORCE_MASK    0x1000
+#define    RTL8367C_PORT9_EEECFG_EEE_100M_OFFSET    11
+#define    RTL8367C_PORT9_EEECFG_EEE_100M_MASK    0x800
+#define    RTL8367C_PORT9_EEECFG_EEE_GIGA_500M_OFFSET    10
+#define    RTL8367C_PORT9_EEECFG_EEE_GIGA_500M_MASK    0x400
+#define    RTL8367C_PORT9_EEECFG_EEE_TX_OFFSET    9
+#define    RTL8367C_PORT9_EEECFG_EEE_TX_MASK    0x200
+#define    RTL8367C_PORT9_EEECFG_EEE_RX_OFFSET    8
+#define    RTL8367C_PORT9_EEECFG_EEE_RX_MASK    0x100
+#define    RTL8367C_PORT9_EEECFG_EEE_DSP_RX_OFFSET    6
+#define    RTL8367C_PORT9_EEECFG_EEE_DSP_RX_MASK    0x40
+#define    RTL8367C_PORT9_EEECFG_EEE_LPI_OFFSET    5
+#define    RTL8367C_PORT9_EEECFG_EEE_LPI_MASK    0x20
+#define    RTL8367C_PORT9_EEECFG_EEE_TX_LPI_OFFSET    4
+#define    RTL8367C_PORT9_EEECFG_EEE_TX_LPI_MASK    0x10
+#define    RTL8367C_PORT9_EEECFG_EEE_RX_LPI_OFFSET    3
+#define    RTL8367C_PORT9_EEECFG_EEE_RX_LPI_MASK    0x8
+#define    RTL8367C_PORT9_EEECFG_EEE_PAUSE_INDICATOR_OFFSET    2
+#define    RTL8367C_PORT9_EEECFG_EEE_PAUSE_INDICATOR_MASK    0x4
+#define    RTL8367C_PORT9_EEECFG_EEE_WAKE_REQ_OFFSET    1
+#define    RTL8367C_PORT9_EEECFG_EEE_WAKE_REQ_MASK    0x2
+#define    RTL8367C_PORT9_EEECFG_EEE_SLEEP_REQ_OFFSET    0
+#define    RTL8367C_PORT9_EEECFG_EEE_SLEEP_REQ_MASK    0x1
+
+#define    RTL8367C_REG_PORT9_EEETXMTR    0x0139
+
+#define    RTL8367C_REG_PORT9_EEERXMTR    0x013a
+
+#define    RTL8367C_REG_PORT9_EEEPTXMTR    0x013b
+
+#define    RTL8367C_REG_PORT9_EEEPRXMTR    0x013c
+
+#define    RTL8367C_REG_PTP_PORT9_CFG1    0x013e
+#define    RTL8367C_PTP_PORT9_CFG1_OFFSET    7
+#define    RTL8367C_PTP_PORT9_CFG1_MASK    0xFF
+
+#define    RTL8367C_REG_P9_MSIC1    0x013f
+#define    RTL8367C_P9_MSIC1_OFFSET    0
+#define    RTL8367C_P9_MSIC1_MASK    0x1
+
+#define    RTL8367C_REG_PORT10_CGST_HALF_CFG    0x0140
+#define    RTL8367C_PORT10_CGST_HALF_CFG_CONGESTION_TIME_OFFSET    4
+#define    RTL8367C_PORT10_CGST_HALF_CFG_CONGESTION_TIME_MASK    0xF0
+#define    RTL8367C_PORT10_CGST_HALF_CFG_CONGESTION_SUSTAIN_TIME_OFFSET    0
+#define    RTL8367C_PORT10_CGST_HALF_CFG_CONGESTION_SUSTAIN_TIME_MASK    0xF
+
+#define    RTL8367C_REG_PKTGEN_PORT10_CTRL    0x0141
+#define    RTL8367C_PKTGEN_PORT10_CTRL_STATUS_OFFSET    15
+#define    RTL8367C_PKTGEN_PORT10_CTRL_STATUS_MASK    0x8000
+#define    RTL8367C_PKTGEN_PORT10_CTRL_PKTGEN_STS_OFFSET    13
+#define    RTL8367C_PKTGEN_PORT10_CTRL_PKTGEN_STS_MASK    0x2000
+#define    RTL8367C_PKTGEN_PORT10_CTRL_CRC_NO_ERROR_OFFSET    4
+#define    RTL8367C_PKTGEN_PORT10_CTRL_CRC_NO_ERROR_MASK    0x10
+#define    RTL8367C_PKTGEN_PORT10_CTRL_CMD_START_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT10_CTRL_CMD_START_MASK    0x1
+
+#define    RTL8367C_REG_TX_ERR_CNT_PORT10    0x0142
+#define    RTL8367C_TX_ERR_CNT_PORT10_OFFSET    0
+#define    RTL8367C_TX_ERR_CNT_PORT10_MASK    0x7
+
+#define    RTL8367C_REG_PKTGEN_PORT10_DA0    0x0143
+
+#define    RTL8367C_REG_PKTGEN_PORT10_DA1    0x0144
+
+#define    RTL8367C_REG_PKTGEN_PORT10_DA2    0x0145
+
+#define    RTL8367C_REG_PKTGEN_PORT10_SA0    0x0146
+
+#define    RTL8367C_REG_PKTGEN_PORT10_SA1    0x0147
+
+#define    RTL8367C_REG_PKTGEN_PORT10_SA2    0x0148
+
+#define    RTL8367C_REG_PKTGEN_PORT10_COUNTER0    0x0149
+
+#define    RTL8367C_REG_PKTGEN_PORT10_COUNTER1    0x014a
+#define    RTL8367C_PKTGEN_PORT10_COUNTER1_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT10_COUNTER1_MASK    0xFF
+
+#define    RTL8367C_REG_PKTGEN_PORT10_TX_LENGTH    0x014b
+#define    RTL8367C_PKTGEN_PORT10_TX_LENGTH_OFFSET    0
+#define    RTL8367C_PKTGEN_PORT10_TX_LENGTH_MASK    0x3FFF
+
+#define    RTL8367C_REG_PKTGEN_PORT10_TIMER    0x014d
+#define    RTL8367C_PKTGEN_PORT10_TIMER_TIMER_OFFSET    4
+#define    RTL8367C_PKTGEN_PORT10_TIMER_TIMER_MASK    0xF0
+#define    RTL8367C_PKTGEN_PORT10_TIMER_RX_DMA_ERR_FLAG_OFFSET    3
+#define    RTL8367C_PKTGEN_PORT10_TIMER_RX_DMA_ERR_FLAG_MASK    0x8
+
+#define    RTL8367C_REG_PORT10_MISC_CFG    0x014e
+#define    RTL8367C_PORT10_MISC_CFG_SMALL_TAG_IPG_OFFSET    15
+#define    RTL8367C_PORT10_MISC_CFG_SMALL_TAG_IPG_MASK    0x8000
+#define    RTL8367C_PORT10_MISC_CFG_TX_ITFSP_MODE_OFFSET    14
+#define    RTL8367C_PORT10_MISC_CFG_TX_ITFSP_MODE_MASK    0x4000
+#define    RTL8367C_PORT10_MISC_CFG_FLOWCTRL_INDEP_OFFSET    13
+#define    RTL8367C_PORT10_MISC_CFG_FLOWCTRL_INDEP_MASK    0x2000
+#define    RTL8367C_PORT10_MISC_CFG_DOT1Q_REMARK_ENABLE_OFFSET    12
+#define    RTL8367C_PORT10_MISC_CFG_DOT1Q_REMARK_ENABLE_MASK    0x1000
+#define    RTL8367C_PORT10_MISC_CFG_INGRESSBW_FLOWCTRL_OFFSET    11
+#define    RTL8367C_PORT10_MISC_CFG_INGRESSBW_FLOWCTRL_MASK    0x800
+#define    RTL8367C_PORT10_MISC_CFG_INGRESSBW_IFG_OFFSET    10
+#define    RTL8367C_PORT10_MISC_CFG_INGRESSBW_IFG_MASK    0x400
+#define    RTL8367C_PORT10_MISC_CFG_RX_SPC_OFFSET    9
+#define    RTL8367C_PORT10_MISC_CFG_RX_SPC_MASK    0x200
+#define    RTL8367C_PORT10_MISC_CFG_CRC_SKIP_OFFSET    8
+#define    RTL8367C_PORT10_MISC_CFG_CRC_SKIP_MASK    0x100
+#define    RTL8367C_PORT10_MISC_CFG_PKTGEN_TX_FIRST_OFFSET    7
+#define    RTL8367C_PORT10_MISC_CFG_PKTGEN_TX_FIRST_MASK    0x80
+#define    RTL8367C_PORT10_MISC_CFG_MAC_LOOPBACK_OFFSET    6
+#define    RTL8367C_PORT10_MISC_CFG_MAC_LOOPBACK_MASK    0x40
+#define    RTL8367C_PORT10_MISC_CFG_VLAN_EGRESS_MODE_OFFSET    4
+#define    RTL8367C_PORT10_MISC_CFG_VLAN_EGRESS_MODE_MASK    0x30
+#define    RTL8367C_PORT10_MISC_CFG_CONGESTION_SUSTAIN_TIME_OFFSET    0
+#define    RTL8367C_PORT10_MISC_CFG_CONGESTION_SUSTAIN_TIME_MASK    0xF
+
+#define    RTL8367C_REG_INGRESSBW_PORT10_RATE_CTRL0    0x014f
+
+#define    RTL8367C_REG_INGRESSBW_PORT10_RATE_CTRL1    0x0150
+#define    RTL8367C_INGRESSBW_PORT10_RATE_CTRL1_DUMMY_OFFSET    3
+#define    RTL8367C_INGRESSBW_PORT10_RATE_CTRL1_DUMMY_MASK    0xFFF8
+#define    RTL8367C_INGRESSBW_PORT10_RATE_CTRL1_INGRESSBW_RATE16_OFFSET    0
+#define    RTL8367C_INGRESSBW_PORT10_RATE_CTRL1_INGRESSBW_RATE16_MASK    0x7
+
+#define    RTL8367C_REG_PORT10_FORCE_RATE0    0x0151
+
+#define    RTL8367C_REG_PORT10_FORCE_RATE1    0x0152
+
+#define    RTL8367C_REG_PORT10_CURENT_RATE0    0x0153
+
+#define    RTL8367C_REG_PORT10_CURENT_RATE1    0x0154
+
+#define    RTL8367C_REG_PORT10_PAGE_COUNTER    0x0155
+#define    RTL8367C_PORT10_PAGE_COUNTER_OFFSET    0
+#define    RTL8367C_PORT10_PAGE_COUNTER_MASK    0x7F
+
+#define    RTL8367C_REG_PAGEMETER_PORT10_CTRL0    0x0156
+
+#define    RTL8367C_REG_PAGEMETER_PORT10_CTRL1    0x0157
+
+#define    RTL8367C_REG_PORT10_EEECFG    0x0158
+#define    RTL8367C_PORT10_EEECFG_EEEP_ENABLE_TX_OFFSET    14
+#define    RTL8367C_PORT10_EEECFG_EEEP_ENABLE_TX_MASK    0x4000
+#define    RTL8367C_PORT10_EEECFG_EEEP_ENABLE_RX_OFFSET    13
+#define    RTL8367C_PORT10_EEECFG_EEEP_ENABLE_RX_MASK    0x2000
+#define    RTL8367C_PORT10_EEECFG_EEE_FORCE_OFFSET    12
+#define    RTL8367C_PORT10_EEECFG_EEE_FORCE_MASK    0x1000
+#define    RTL8367C_PORT10_EEECFG_EEE_100M_OFFSET    11
+#define    RTL8367C_PORT10_EEECFG_EEE_100M_MASK    0x800
+#define    RTL8367C_PORT10_EEECFG_EEE_GIGA_500M_OFFSET    10
+#define    RTL8367C_PORT10_EEECFG_EEE_GIGA_500M_MASK    0x400
+#define    RTL8367C_PORT10_EEECFG_EEE_TX_OFFSET    9
+#define    RTL8367C_PORT10_EEECFG_EEE_TX_MASK    0x200
+#define    RTL8367C_PORT10_EEECFG_EEE_RX_OFFSET    8
+#define    RTL8367C_PORT10_EEECFG_EEE_RX_MASK    0x100
+#define    RTL8367C_PORT10_EEECFG_EEE_DSP_RX_OFFSET    6
+#define    RTL8367C_PORT10_EEECFG_EEE_DSP_RX_MASK    0x40
+#define    RTL8367C_PORT10_EEECFG_EEE_LPI_OFFSET    5
+#define    RTL8367C_PORT10_EEECFG_EEE_LPI_MASK    0x20
+#define    RTL8367C_PORT10_EEECFG_EEE_TX_LPI_OFFSET    4
+#define    RTL8367C_PORT10_EEECFG_EEE_TX_LPI_MASK    0x10
+#define    RTL8367C_PORT10_EEECFG_EEE_RX_LPI_OFFSET    3
+#define    RTL8367C_PORT10_EEECFG_EEE_RX_LPI_MASK    0x8
+#define    RTL8367C_PORT10_EEECFG_EEE_PAUSE_INDICATOR_OFFSET    2
+#define    RTL8367C_PORT10_EEECFG_EEE_PAUSE_INDICATOR_MASK    0x4
+#define    RTL8367C_PORT10_EEECFG_EEE_WAKE_REQ_OFFSET    1
+#define    RTL8367C_PORT10_EEECFG_EEE_WAKE_REQ_MASK    0x2
+#define    RTL8367C_PORT10_EEECFG_EEE_SLEEP_REQ_OFFSET    0
+#define    RTL8367C_PORT10_EEECFG_EEE_SLEEP_REQ_MASK    0x1
+
+#define    RTL8367C_REG_PORT10_EEETXMTR    0x0159
+
+#define    RTL8367C_REG_PORT10_EEERXMTR    0x015a
+
+#define    RTL8367C_REG_PORT10_EEEPTXMTR    0x015b
+
+#define    RTL8367C_REG_PORT10_EEEPRXMTR    0x015c
+
+#define    RTL8367C_REG_PTP_PORT10_CFG1    0x015e
+#define    RTL8367C_PTP_PORT10_CFG1_OFFSET    7
+#define    RTL8367C_PTP_PORT10_CFG1_MASK    0xFF
+
+#define    RTL8367C_REG_P10_MSIC1    0x015f
+#define    RTL8367C_P10_MSIC1_OFFSET    0
+#define    RTL8367C_P10_MSIC1_MASK    0x1
+
+/* (16'h0200)outq_reg */
+
+#define    RTL8367C_REG_FLOWCTRL_QUEUE0_DROP_ON    0x0200
+#define    RTL8367C_FLOWCTRL_QUEUE0_DROP_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_QUEUE0_DROP_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_QUEUE1_DROP_ON    0x0201
+#define    RTL8367C_FLOWCTRL_QUEUE1_DROP_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_QUEUE1_DROP_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_QUEUE2_DROP_ON    0x0202
+#define    RTL8367C_FLOWCTRL_QUEUE2_DROP_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_QUEUE2_DROP_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_QUEUE3_DROP_ON    0x0203
+#define    RTL8367C_FLOWCTRL_QUEUE3_DROP_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_QUEUE3_DROP_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_QUEUE4_DROP_ON    0x0204
+#define    RTL8367C_FLOWCTRL_QUEUE4_DROP_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_QUEUE4_DROP_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_QUEUE5_DROP_ON    0x0205
+#define    RTL8367C_FLOWCTRL_QUEUE5_DROP_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_QUEUE5_DROP_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_QUEUE6_DROP_ON    0x0206
+#define    RTL8367C_FLOWCTRL_QUEUE6_DROP_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_QUEUE6_DROP_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_QUEUE7_DROP_ON    0x0207
+#define    RTL8367C_FLOWCTRL_QUEUE7_DROP_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_QUEUE7_DROP_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT0_DROP_ON    0x0208
+#define    RTL8367C_FLOWCTRL_PORT0_DROP_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT0_DROP_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT1_DROP_ON    0x0209
+#define    RTL8367C_FLOWCTRL_PORT1_DROP_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT1_DROP_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT2_DROP_ON    0x020a
+#define    RTL8367C_FLOWCTRL_PORT2_DROP_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT2_DROP_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT3_DROP_ON    0x020b
+#define    RTL8367C_FLOWCTRL_PORT3_DROP_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT3_DROP_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT4_DROP_ON    0x020c
+#define    RTL8367C_FLOWCTRL_PORT4_DROP_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT4_DROP_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT5_DROP_ON    0x020d
+#define    RTL8367C_FLOWCTRL_PORT5_DROP_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT5_DROP_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT6_DROP_ON    0x020e
+#define    RTL8367C_FLOWCTRL_PORT6_DROP_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT6_DROP_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT7_DROP_ON    0x020f
+#define    RTL8367C_FLOWCTRL_PORT7_DROP_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT7_DROP_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT8_DROP_ON    0x0210
+#define    RTL8367C_FLOWCTRL_PORT8_DROP_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT8_DROP_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT9_DROP_ON    0x0211
+#define    RTL8367C_FLOWCTRL_PORT9_DROP_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT9_DROP_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT10_DROP_ON    0x0212
+#define    RTL8367C_FLOWCTRL_PORT10_DROP_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT10_DROP_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT_GAP    0x0218
+#define    RTL8367C_FLOWCTRL_PORT_GAP_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT_GAP_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_QUEUE_GAP    0x0219
+#define    RTL8367C_FLOWCTRL_QUEUE_GAP_OFFSET    0
+#define    RTL8367C_FLOWCTRL_QUEUE_GAP_MASK    0x7FF
+
+#define    RTL8367C_REG_PORT_QEMPTY    0x022d
+#define    RTL8367C_PORT_QEMPTY_OFFSET    0
+#define    RTL8367C_PORT_QEMPTY_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_DEBUG_CTRL0    0x022e
+#define    RTL8367C_FLOWCTRL_DEBUG_CTRL0_OFFSET    0
+#define    RTL8367C_FLOWCTRL_DEBUG_CTRL0_MASK    0xF
+
+#define    RTL8367C_REG_FLOWCTRL_DEBUG_CTRL1    0x022f
+#define    RTL8367C_TOTAL_OFFSET    9
+#define    RTL8367C_TOTAL_MASK    0x200
+#define    RTL8367C_PORT_MAX_OFFSET    8
+#define    RTL8367C_PORT_MAX_MASK    0x100
+#define    RTL8367C_QMAX_MASK_OFFSET    0
+#define    RTL8367C_QMAX_MASK_MASK    0xFF
+
+#define    RTL8367C_REG_FLOWCTRL_QUEUE0_PAGE_COUNT    0x0230
+#define    RTL8367C_FLOWCTRL_QUEUE0_PAGE_COUNT_OFFSET    0
+#define    RTL8367C_FLOWCTRL_QUEUE0_PAGE_COUNT_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_QUEUE1_PAGE_COUNT    0x0231
+#define    RTL8367C_FLOWCTRL_QUEUE1_PAGE_COUNT_OFFSET    0
+#define    RTL8367C_FLOWCTRL_QUEUE1_PAGE_COUNT_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_QUEUE2_PAGE_COUNT    0x0232
+#define    RTL8367C_FLOWCTRL_QUEUE2_PAGE_COUNT_OFFSET    0
+#define    RTL8367C_FLOWCTRL_QUEUE2_PAGE_COUNT_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_QUEUE3_PAGE_COUNT    0x0233
+#define    RTL8367C_FLOWCTRL_QUEUE3_PAGE_COUNT_OFFSET    0
+#define    RTL8367C_FLOWCTRL_QUEUE3_PAGE_COUNT_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_QUEUE4_PAGE_COUNT    0x0234
+#define    RTL8367C_FLOWCTRL_QUEUE4_PAGE_COUNT_OFFSET    0
+#define    RTL8367C_FLOWCTRL_QUEUE4_PAGE_COUNT_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_QUEUE5_PAGE_COUNT    0x0235
+#define    RTL8367C_FLOWCTRL_QUEUE5_PAGE_COUNT_OFFSET    0
+#define    RTL8367C_FLOWCTRL_QUEUE5_PAGE_COUNT_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_QUEUE6_PAGE_COUNT    0x0236
+#define    RTL8367C_FLOWCTRL_QUEUE6_PAGE_COUNT_OFFSET    0
+#define    RTL8367C_FLOWCTRL_QUEUE6_PAGE_COUNT_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_QUEUE7_PAGE_COUNT    0x0237
+#define    RTL8367C_FLOWCTRL_QUEUE7_PAGE_COUNT_OFFSET    0
+#define    RTL8367C_FLOWCTRL_QUEUE7_PAGE_COUNT_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT_PAGE_COUNT    0x0238
+#define    RTL8367C_FLOWCTRL_PORT_PAGE_COUNT_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT_PAGE_COUNT_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_QUEUE0_MAX_PAGE_COUNT    0x0239
+#define    RTL8367C_FLOWCTRL_QUEUE0_MAX_PAGE_COUNT_OFFSET    0
+#define    RTL8367C_FLOWCTRL_QUEUE0_MAX_PAGE_COUNT_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_QUEUE1_MAX_PAGE_COUNT    0x023a
+#define    RTL8367C_FLOWCTRL_QUEUE1_MAX_PAGE_COUNT_OFFSET    0
+#define    RTL8367C_FLOWCTRL_QUEUE1_MAX_PAGE_COUNT_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_QUEUE2_MAX_PAGE_COUNT    0x023b
+#define    RTL8367C_FLOWCTRL_QUEUE2_MAX_PAGE_COUNT_OFFSET    0
+#define    RTL8367C_FLOWCTRL_QUEUE2_MAX_PAGE_COUNT_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_QUEUE3_MAX_PAGE_COUNT    0x023c
+#define    RTL8367C_FLOWCTRL_QUEUE3_MAX_PAGE_COUNT_OFFSET    0
+#define    RTL8367C_FLOWCTRL_QUEUE3_MAX_PAGE_COUNT_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_QUEUE4_MAX_PAGE_COUNT    0x023d
+#define    RTL8367C_FLOWCTRL_QUEUE4_MAX_PAGE_COUNT_OFFSET    0
+#define    RTL8367C_FLOWCTRL_QUEUE4_MAX_PAGE_COUNT_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_QUEUE5_MAX_PAGE_COUNT    0x023e
+#define    RTL8367C_FLOWCTRL_QUEUE5_MAX_PAGE_COUNT_OFFSET    0
+#define    RTL8367C_FLOWCTRL_QUEUE5_MAX_PAGE_COUNT_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_QUEUE6_MAX_PAGE_COUNT    0x023f
+#define    RTL8367C_FLOWCTRL_QUEUE6_MAX_PAGE_COUNT_OFFSET    0
+#define    RTL8367C_FLOWCTRL_QUEUE6_MAX_PAGE_COUNT_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_QUEUE7_MAX_PAGE_COUNT    0x0240
+#define    RTL8367C_FLOWCTRL_QUEUE7_MAX_PAGE_COUNT_OFFSET    0
+#define    RTL8367C_FLOWCTRL_QUEUE7_MAX_PAGE_COUNT_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT_MAX_PAGE_COUNT    0x0241
+#define    RTL8367C_FLOWCTRL_PORT_MAX_PAGE_COUNT_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT_MAX_PAGE_COUNT_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_TOTAL_PACKET_COUNT    0x0243
+
+#define    RTL8367C_REG_HIGH_QUEUE_MASK0    0x0244
+#define    RTL8367C_PORT1_HIGH_QUEUE_MASK_OFFSET    8
+#define    RTL8367C_PORT1_HIGH_QUEUE_MASK_MASK    0xFF00
+#define    RTL8367C_PORT0_HIGH_QUEUE_MASK_OFFSET    0
+#define    RTL8367C_PORT0_HIGH_QUEUE_MASK_MASK    0xFF
+
+#define    RTL8367C_REG_HIGH_QUEUE_MASK1    0x0245
+#define    RTL8367C_PORT3_HIGH_QUEUE_MASK_OFFSET    8
+#define    RTL8367C_PORT3_HIGH_QUEUE_MASK_MASK    0xFF00
+#define    RTL8367C_PORT2_HIGH_QUEUE_MASK_OFFSET    0
+#define    RTL8367C_PORT2_HIGH_QUEUE_MASK_MASK    0xFF
+
+#define    RTL8367C_REG_HIGH_QUEUE_MASK2    0x0246
+#define    RTL8367C_PORT5_HIGH_QUEUE_MASK_OFFSET    8
+#define    RTL8367C_PORT5_HIGH_QUEUE_MASK_MASK    0xFF00
+#define    RTL8367C_PORT4_HIGH_QUEUE_MASK_OFFSET    0
+#define    RTL8367C_PORT4_HIGH_QUEUE_MASK_MASK    0xFF
+
+#define    RTL8367C_REG_HIGH_QUEUE_MASK3    0x0247
+#define    RTL8367C_PORT7_HIGH_QUEUE_MASK_OFFSET    8
+#define    RTL8367C_PORT7_HIGH_QUEUE_MASK_MASK    0xFF00
+#define    RTL8367C_PORT6_HIGH_QUEUE_MASK_OFFSET    0
+#define    RTL8367C_PORT6_HIGH_QUEUE_MASK_MASK    0xFF
+
+#define    RTL8367C_REG_HIGH_QUEUE_MASK4    0x0248
+#define    RTL8367C_PORT9_HIGH_QUEUE_MASK_OFFSET    8
+#define    RTL8367C_PORT9_HIGH_QUEUE_MASK_MASK    0xFF00
+#define    RTL8367C_PORT8_HIGH_QUEUE_MASK_OFFSET    0
+#define    RTL8367C_PORT8_HIGH_QUEUE_MASK_MASK    0xFF
+
+#define    RTL8367C_REG_HIGH_QUEUE_MASK5    0x0249
+#define    RTL8367C_HIGH_QUEUE_MASK5_OFFSET    0
+#define    RTL8367C_HIGH_QUEUE_MASK5_MASK    0xFF
+
+#define    RTL8367C_REG_LOW_QUEUE_TH    0x024c
+#define    RTL8367C_LOW_QUEUE_TH_OFFSET    0
+#define    RTL8367C_LOW_QUEUE_TH_MASK    0x7FF
+
+#define    RTL8367C_REG_TH_TX_PREFET    0x0250
+#define    RTL8367C_TH_TX_PREFET_OFFSET    0
+#define    RTL8367C_TH_TX_PREFET_MASK    0xFF
+
+#define    RTL8367C_REG_DUMMY_0251    0x0251
+
+#define    RTL8367C_REG_DUMMY_0252    0x0252
+
+#define    RTL8367C_REG_DUMMY_0253    0x0253
+
+#define    RTL8367C_REG_DUMMY_0254    0x0254
+
+#define    RTL8367C_REG_DUMMY_0255    0x0255
+
+#define    RTL8367C_REG_DUMMY_0256    0x0256
+
+#define    RTL8367C_REG_DUMMY_0257    0x0257
+
+#define    RTL8367C_REG_DUMMY_0258    0x0258
+
+#define    RTL8367C_REG_DUMMY_0259    0x0259
+
+#define    RTL8367C_REG_DUMMY_025A    0x025A
+
+#define    RTL8367C_REG_DUMMY_025B    0x025B
+
+#define    RTL8367C_REG_DUMMY_025C    0x025C
+
+#define    RTL8367C_REG_Q_TXPKT_CNT_CTL    0x025d
+#define    RTL8367C_QUEUE_PKT_CNT_CLR_OFFSET    4
+#define    RTL8367C_QUEUE_PKT_CNT_CLR_MASK    0x10
+#define    RTL8367C_PORT_ID_QUEUE_PKT_CNT_OFFSET    0
+#define    RTL8367C_PORT_ID_QUEUE_PKT_CNT_MASK    0xF
+
+#define    RTL8367C_REG_Q0_TXPKT_CNT_L    0x025e
+
+#define    RTL8367C_REG_Q0_TXPKT_CNT_H    0x025f
+
+#define    RTL8367C_REG_Q1_TXPKT_CNT_L    0x0260
+
+#define    RTL8367C_REG_Q1_TXPKT_CNT_H    0x0261
+
+#define    RTL8367C_REG_Q2_TXPKT_CNT_L    0x0262
+
+#define    RTL8367C_REG_Q2_TXPKT_CNT_H    0x0263
+
+#define    RTL8367C_REG_Q3_TXPKT_CNT_L    0x0264
+
+#define    RTL8367C_REG_Q3_TXPKT_CNT_H    0x0265
+
+#define    RTL8367C_REG_Q4_TXPKT_CNT_L    0x0266
+
+#define    RTL8367C_REG_Q4_TXPKT_CNT_H    0x0267
+
+#define    RTL8367C_REG_Q5_TXPKT_CNT_L    0x0268
+
+#define    RTL8367C_REG_Q5_TXPKT_CNT_H    0x0269
+
+#define    RTL8367C_REG_Q6_TXPKT_CNT_L    0x026a
+
+#define    RTL8367C_REG_Q6_TXPKT_CNT_H    0x026b
+
+#define    RTL8367C_REG_Q7_TXPKT_CNT_L    0x026c
+
+#define    RTL8367C_REG_Q7_TXPKT_CNT_H    0x026d
+
+/* (16'h0300)sch_reg */
+
+#define    RTL8367C_REG_SCHEDULE_WFQ_CTRL    0x0300
+#define    RTL8367C_SCHEDULE_WFQ_CTRL_OFFSET    0
+#define    RTL8367C_SCHEDULE_WFQ_CTRL_MASK    0x1
+
+#define    RTL8367C_REG_SCHEDULE_WFQ_BURST_SIZE    0x0301
+
+#define    RTL8367C_REG_SCHEDULE_QUEUE_TYPE_CTRL0    0x0302
+#define    RTL8367C_PORT1_QUEUE7_TYPE_OFFSET    15
+#define    RTL8367C_PORT1_QUEUE7_TYPE_MASK    0x8000
+#define    RTL8367C_PORT1_QUEUE6_TYPE_OFFSET    14
+#define    RTL8367C_PORT1_QUEUE6_TYPE_MASK    0x4000
+#define    RTL8367C_PORT1_QUEUE5_TYPE_OFFSET    13
+#define    RTL8367C_PORT1_QUEUE5_TYPE_MASK    0x2000
+#define    RTL8367C_PORT1_QUEUE4_TYPE_OFFSET    12
+#define    RTL8367C_PORT1_QUEUE4_TYPE_MASK    0x1000
+#define    RTL8367C_PORT1_QUEUE3_TYPE_OFFSET    11
+#define    RTL8367C_PORT1_QUEUE3_TYPE_MASK    0x800
+#define    RTL8367C_PORT1_QUEUE2_TYPE_OFFSET    10
+#define    RTL8367C_PORT1_QUEUE2_TYPE_MASK    0x400
+#define    RTL8367C_PORT1_QUEUE1_TYPE_OFFSET    9
+#define    RTL8367C_PORT1_QUEUE1_TYPE_MASK    0x200
+#define    RTL8367C_PORT1_QUEUE0_TYPE_OFFSET    8
+#define    RTL8367C_PORT1_QUEUE0_TYPE_MASK    0x100
+#define    RTL8367C_PORT0_QUEUE7_TYPE_OFFSET    7
+#define    RTL8367C_PORT0_QUEUE7_TYPE_MASK    0x80
+#define    RTL8367C_PORT0_QUEUE6_TYPE_OFFSET    6
+#define    RTL8367C_PORT0_QUEUE6_TYPE_MASK    0x40
+#define    RTL8367C_PORT0_QUEUE5_TYPE_OFFSET    5
+#define    RTL8367C_PORT0_QUEUE5_TYPE_MASK    0x20
+#define    RTL8367C_PORT0_QUEUE4_TYPE_OFFSET    4
+#define    RTL8367C_PORT0_QUEUE4_TYPE_MASK    0x10
+#define    RTL8367C_PORT0_QUEUE3_TYPE_OFFSET    3
+#define    RTL8367C_PORT0_QUEUE3_TYPE_MASK    0x8
+#define    RTL8367C_PORT0_QUEUE2_TYPE_OFFSET    2
+#define    RTL8367C_PORT0_QUEUE2_TYPE_MASK    0x4
+#define    RTL8367C_PORT0_QUEUE1_TYPE_OFFSET    1
+#define    RTL8367C_PORT0_QUEUE1_TYPE_MASK    0x2
+#define    RTL8367C_PORT0_QUEUE0_TYPE_OFFSET    0
+#define    RTL8367C_PORT0_QUEUE0_TYPE_MASK    0x1
+
+#define    RTL8367C_REG_SCHEDULE_QUEUE_TYPE_CTRL1    0x0303
+#define    RTL8367C_PORT3_QUEUE7_TYPE_OFFSET    15
+#define    RTL8367C_PORT3_QUEUE7_TYPE_MASK    0x8000
+#define    RTL8367C_PORT3_QUEUE6_TYPE_OFFSET    14
+#define    RTL8367C_PORT3_QUEUE6_TYPE_MASK    0x4000
+#define    RTL8367C_PORT3_QUEUE5_TYPE_OFFSET    13
+#define    RTL8367C_PORT3_QUEUE5_TYPE_MASK    0x2000
+#define    RTL8367C_PORT3_QUEUE4_TYPE_OFFSET    12
+#define    RTL8367C_PORT3_QUEUE4_TYPE_MASK    0x1000
+#define    RTL8367C_PORT3_QUEUE3_TYPE_OFFSET    11
+#define    RTL8367C_PORT3_QUEUE3_TYPE_MASK    0x800
+#define    RTL8367C_PORT3_QUEUE2_TYPE_OFFSET    10
+#define    RTL8367C_PORT3_QUEUE2_TYPE_MASK    0x400
+#define    RTL8367C_PORT3_QUEUE1_TYPE_OFFSET    9
+#define    RTL8367C_PORT3_QUEUE1_TYPE_MASK    0x200
+#define    RTL8367C_PORT3_QUEUE0_TYPE_OFFSET    8
+#define    RTL8367C_PORT3_QUEUE0_TYPE_MASK    0x100
+#define    RTL8367C_PORT2_QUEUE7_TYPE_OFFSET    7
+#define    RTL8367C_PORT2_QUEUE7_TYPE_MASK    0x80
+#define    RTL8367C_PORT2_QUEUE6_TYPE_OFFSET    6
+#define    RTL8367C_PORT2_QUEUE6_TYPE_MASK    0x40
+#define    RTL8367C_PORT2_QUEUE5_TYPE_OFFSET    5
+#define    RTL8367C_PORT2_QUEUE5_TYPE_MASK    0x20
+#define    RTL8367C_PORT2_QUEUE4_TYPE_OFFSET    4
+#define    RTL8367C_PORT2_QUEUE4_TYPE_MASK    0x10
+#define    RTL8367C_PORT2_QUEUE3_TYPE_OFFSET    3
+#define    RTL8367C_PORT2_QUEUE3_TYPE_MASK    0x8
+#define    RTL8367C_PORT2_QUEUE2_TYPE_OFFSET    2
+#define    RTL8367C_PORT2_QUEUE2_TYPE_MASK    0x4
+#define    RTL8367C_PORT2_QUEUE1_TYPE_OFFSET    1
+#define    RTL8367C_PORT2_QUEUE1_TYPE_MASK    0x2
+#define    RTL8367C_PORT2_QUEUE0_TYPE_OFFSET    0
+#define    RTL8367C_PORT2_QUEUE0_TYPE_MASK    0x1
+
+#define    RTL8367C_REG_SCHEDULE_QUEUE_TYPE_CTRL2    0x0304
+#define    RTL8367C_PORT5_QUEUE7_TYPE_OFFSET    15
+#define    RTL8367C_PORT5_QUEUE7_TYPE_MASK    0x8000
+#define    RTL8367C_PORT5_QUEUE6_TYPE_OFFSET    14
+#define    RTL8367C_PORT5_QUEUE6_TYPE_MASK    0x4000
+#define    RTL8367C_PORT5_QUEUE5_TYPE_OFFSET    13
+#define    RTL8367C_PORT5_QUEUE5_TYPE_MASK    0x2000
+#define    RTL8367C_PORT5_QUEUE4_TYPE_OFFSET    12
+#define    RTL8367C_PORT5_QUEUE4_TYPE_MASK    0x1000
+#define    RTL8367C_PORT5_QUEUE3_TYPE_OFFSET    11
+#define    RTL8367C_PORT5_QUEUE3_TYPE_MASK    0x800
+#define    RTL8367C_PORT5_QUEUE2_TYPE_OFFSET    10
+#define    RTL8367C_PORT5_QUEUE2_TYPE_MASK    0x400
+#define    RTL8367C_PORT5_QUEUE1_TYPE_OFFSET    9
+#define    RTL8367C_PORT5_QUEUE1_TYPE_MASK    0x200
+#define    RTL8367C_PORT5_QUEUE0_TYPE_OFFSET    8
+#define    RTL8367C_PORT5_QUEUE0_TYPE_MASK    0x100
+#define    RTL8367C_PORT4_QUEUE7_TYPE_OFFSET    7
+#define    RTL8367C_PORT4_QUEUE7_TYPE_MASK    0x80
+#define    RTL8367C_PORT4_QUEUE6_TYPE_OFFSET    6
+#define    RTL8367C_PORT4_QUEUE6_TYPE_MASK    0x40
+#define    RTL8367C_PORT4_QUEUE5_TYPE_OFFSET    5
+#define    RTL8367C_PORT4_QUEUE5_TYPE_MASK    0x20
+#define    RTL8367C_PORT4_QUEUE4_TYPE_OFFSET    4
+#define    RTL8367C_PORT4_QUEUE4_TYPE_MASK    0x10
+#define    RTL8367C_PORT4_QUEUE3_TYPE_OFFSET    3
+#define    RTL8367C_PORT4_QUEUE3_TYPE_MASK    0x8
+#define    RTL8367C_PORT4_QUEUE2_TYPE_OFFSET    2
+#define    RTL8367C_PORT4_QUEUE2_TYPE_MASK    0x4
+#define    RTL8367C_PORT4_QUEUE1_TYPE_OFFSET    1
+#define    RTL8367C_PORT4_QUEUE1_TYPE_MASK    0x2
+#define    RTL8367C_PORT4_QUEUE0_TYPE_OFFSET    0
+#define    RTL8367C_PORT4_QUEUE0_TYPE_MASK    0x1
+
+#define    RTL8367C_REG_SCHEDULE_QUEUE_TYPE_CTRL3    0x0305
+#define    RTL8367C_PORT7_QUEUE7_TYPE_OFFSET    15
+#define    RTL8367C_PORT7_QUEUE7_TYPE_MASK    0x8000
+#define    RTL8367C_PORT7_QUEUE6_TYPE_OFFSET    14
+#define    RTL8367C_PORT7_QUEUE6_TYPE_MASK    0x4000
+#define    RTL8367C_PORT7_QUEUE5_TYPE_OFFSET    13
+#define    RTL8367C_PORT7_QUEUE5_TYPE_MASK    0x2000
+#define    RTL8367C_PORT7_QUEUE4_TYPE_OFFSET    12
+#define    RTL8367C_PORT7_QUEUE4_TYPE_MASK    0x1000
+#define    RTL8367C_PORT7_QUEUE3_TYPE_OFFSET    11
+#define    RTL8367C_PORT7_QUEUE3_TYPE_MASK    0x800
+#define    RTL8367C_PORT7_QUEUE2_TYPE_OFFSET    10
+#define    RTL8367C_PORT7_QUEUE2_TYPE_MASK    0x400
+#define    RTL8367C_PORT7_QUEUE1_TYPE_OFFSET    9
+#define    RTL8367C_PORT7_QUEUE1_TYPE_MASK    0x200
+#define    RTL8367C_PORT7_QUEUE0_TYPE_OFFSET    8
+#define    RTL8367C_PORT7_QUEUE0_TYPE_MASK    0x100
+#define    RTL8367C_PORT6_QUEUE7_TYPE_OFFSET    7
+#define    RTL8367C_PORT6_QUEUE7_TYPE_MASK    0x80
+#define    RTL8367C_PORT6_QUEUE6_TYPE_OFFSET    6
+#define    RTL8367C_PORT6_QUEUE6_TYPE_MASK    0x40
+#define    RTL8367C_PORT6_QUEUE5_TYPE_OFFSET    5
+#define    RTL8367C_PORT6_QUEUE5_TYPE_MASK    0x20
+#define    RTL8367C_PORT6_QUEUE4_TYPE_OFFSET    4
+#define    RTL8367C_PORT6_QUEUE4_TYPE_MASK    0x10
+#define    RTL8367C_PORT6_QUEUE3_TYPE_OFFSET    3
+#define    RTL8367C_PORT6_QUEUE3_TYPE_MASK    0x8
+#define    RTL8367C_PORT6_QUEUE2_TYPE_OFFSET    2
+#define    RTL8367C_PORT6_QUEUE2_TYPE_MASK    0x4
+#define    RTL8367C_PORT6_QUEUE1_TYPE_OFFSET    1
+#define    RTL8367C_PORT6_QUEUE1_TYPE_MASK    0x2
+#define    RTL8367C_SCHEDULE_QUEUE_TYPE_CTRL3_PORT6_QUEUE0_TYPE_OFFSET    0
+#define    RTL8367C_SCHEDULE_QUEUE_TYPE_CTRL3_PORT6_QUEUE0_TYPE_MASK    0x1
+
+#define    RTL8367C_REG_SCHEDULE_QUEUE_TYPE_CTRL4    0x0306
+#define    RTL8367C_PORT9_QUEUE7_TYPE_OFFSET    15
+#define    RTL8367C_PORT9_QUEUE7_TYPE_MASK    0x8000
+#define    RTL8367C_PORT9_QUEUE6_TYPE_OFFSET    14
+#define    RTL8367C_PORT9_QUEUE6_TYPE_MASK    0x4000
+#define    RTL8367C_PORT9_QUEUE5_TYPE_OFFSET    13
+#define    RTL8367C_PORT9_QUEUE5_TYPE_MASK    0x2000
+#define    RTL8367C_PORT9_QUEUE4_TYPE_OFFSET    12
+#define    RTL8367C_PORT9_QUEUE4_TYPE_MASK    0x1000
+#define    RTL8367C_PORT9_QUEUE3_TYPE_OFFSET    11
+#define    RTL8367C_PORT9_QUEUE3_TYPE_MASK    0x800
+#define    RTL8367C_PORT9_QUEUE2_TYPE_OFFSET    10
+#define    RTL8367C_PORT9_QUEUE2_TYPE_MASK    0x400
+#define    RTL8367C_PORT9_QUEUE1_TYPE_OFFSET    9
+#define    RTL8367C_PORT9_QUEUE1_TYPE_MASK    0x200
+#define    RTL8367C_PORT9_QUEUE0_TYPE_OFFSET    8
+#define    RTL8367C_PORT9_QUEUE0_TYPE_MASK    0x100
+#define    RTL8367C_PORT8_QUEUE7_TYPE_OFFSET    7
+#define    RTL8367C_PORT8_QUEUE7_TYPE_MASK    0x80
+#define    RTL8367C_PORT8_QUEUE6_TYPE_OFFSET    6
+#define    RTL8367C_PORT8_QUEUE6_TYPE_MASK    0x40
+#define    RTL8367C_PORT8_QUEUE5_TYPE_OFFSET    5
+#define    RTL8367C_PORT8_QUEUE5_TYPE_MASK    0x20
+#define    RTL8367C_PORT8_QUEUE4_TYPE_OFFSET    4
+#define    RTL8367C_PORT8_QUEUE4_TYPE_MASK    0x10
+#define    RTL8367C_PORT8_QUEUE3_TYPE_OFFSET    3
+#define    RTL8367C_PORT8_QUEUE3_TYPE_MASK    0x8
+#define    RTL8367C_PORT8_QUEUE2_TYPE_OFFSET    2
+#define    RTL8367C_PORT8_QUEUE2_TYPE_MASK    0x4
+#define    RTL8367C_PORT8_QUEUE1_TYPE_OFFSET    1
+#define    RTL8367C_PORT8_QUEUE1_TYPE_MASK    0x2
+#define    RTL8367C_SCHEDULE_QUEUE_TYPE_CTRL4_PORT6_QUEUE0_TYPE_OFFSET    0
+#define    RTL8367C_SCHEDULE_QUEUE_TYPE_CTRL4_PORT6_QUEUE0_TYPE_MASK    0x1
+
+#define    RTL8367C_REG_SCHEDULE_QUEUE_TYPE_CTRL5    0x0307
+#define    RTL8367C_PORT10_QUEUE7_TYPE_OFFSET    7
+#define    RTL8367C_PORT10_QUEUE7_TYPE_MASK    0x80
+#define    RTL8367C_PORT10_QUEUE6_TYPE_OFFSET    6
+#define    RTL8367C_PORT10_QUEUE6_TYPE_MASK    0x40
+#define    RTL8367C_PORT10_QUEUE5_TYPE_OFFSET    5
+#define    RTL8367C_PORT10_QUEUE5_TYPE_MASK    0x20
+#define    RTL8367C_PORT10_QUEUE4_TYPE_OFFSET    4
+#define    RTL8367C_PORT10_QUEUE4_TYPE_MASK    0x10
+#define    RTL8367C_PORT10_QUEUE3_TYPE_OFFSET    3
+#define    RTL8367C_PORT10_QUEUE3_TYPE_MASK    0x8
+#define    RTL8367C_PORT10_QUEUE2_TYPE_OFFSET    2
+#define    RTL8367C_PORT10_QUEUE2_TYPE_MASK    0x4
+#define    RTL8367C_PORT10_QUEUE1_TYPE_OFFSET    1
+#define    RTL8367C_PORT10_QUEUE1_TYPE_MASK    0x2
+#define    RTL8367C_PORT10_QUEUE0_TYPE_OFFSET    0
+#define    RTL8367C_PORT10_QUEUE0_TYPE_MASK    0x1
+
+#define    RTL8367C_REG_SCHEDULE_APR_CTRL0    0x030a
+#define    RTL8367C_PORT10_APR_ENABLE_OFFSET    10
+#define    RTL8367C_PORT10_APR_ENABLE_MASK    0x400
+#define    RTL8367C_PORT9_APR_ENABLE_OFFSET    9
+#define    RTL8367C_PORT9_APR_ENABLE_MASK    0x200
+#define    RTL8367C_PORT8_APR_ENABLE_OFFSET    8
+#define    RTL8367C_PORT8_APR_ENABLE_MASK    0x100
+#define    RTL8367C_PORT7_APR_ENABLE_OFFSET    7
+#define    RTL8367C_PORT7_APR_ENABLE_MASK    0x80
+#define    RTL8367C_PORT6_APR_ENABLE_OFFSET    6
+#define    RTL8367C_PORT6_APR_ENABLE_MASK    0x40
+#define    RTL8367C_PORT5_APR_ENABLE_OFFSET    5
+#define    RTL8367C_PORT5_APR_ENABLE_MASK    0x20
+#define    RTL8367C_PORT4_APR_ENABLE_OFFSET    4
+#define    RTL8367C_PORT4_APR_ENABLE_MASK    0x10
+#define    RTL8367C_PORT3_APR_ENABLE_OFFSET    3
+#define    RTL8367C_PORT3_APR_ENABLE_MASK    0x8
+#define    RTL8367C_PORT2_APR_ENABLE_OFFSET    2
+#define    RTL8367C_PORT2_APR_ENABLE_MASK    0x4
+#define    RTL8367C_PORT1_APR_ENABLE_OFFSET    1
+#define    RTL8367C_PORT1_APR_ENABLE_MASK    0x2
+#define    RTL8367C_PORT0_APR_ENABLE_OFFSET    0
+#define    RTL8367C_PORT0_APR_ENABLE_MASK    0x1
+
+#define    RTL8367C_REG_SCHEDULE_PORT0_QUEUE0_WFQ_WEIGHT    0x030c
+
+#define    RTL8367C_REG_SCHEDULE_PORT0_QUEUE1_WFQ_WEIGHT    0x030d
+#define    RTL8367C_SCHEDULE_PORT0_QUEUE1_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT0_QUEUE1_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT0_QUEUE2_WFQ_WEIGHT    0x030e
+#define    RTL8367C_SCHEDULE_PORT0_QUEUE2_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT0_QUEUE2_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT0_QUEUE3_WFQ_WEIGHT    0x030f
+#define    RTL8367C_SCHEDULE_PORT0_QUEUE3_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT0_QUEUE3_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT0_QUEUE4_WFQ_WEIGHT    0x0310
+#define    RTL8367C_SCHEDULE_PORT0_QUEUE4_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT0_QUEUE4_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT0_QUEUE5_WFQ_WEIGHT    0x0311
+#define    RTL8367C_SCHEDULE_PORT0_QUEUE5_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT0_QUEUE5_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT0_QUEUE6_WFQ_WEIGHT    0x0312
+#define    RTL8367C_SCHEDULE_PORT0_QUEUE6_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT0_QUEUE6_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT0_QUEUE7_WFQ_WEIGHT    0x0313
+#define    RTL8367C_SCHEDULE_PORT0_QUEUE7_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT0_QUEUE7_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT1_QUEUE0_WFQ_WEIGHT    0x0314
+
+#define    RTL8367C_REG_SCHEDULE_PORT1_QUEUE1_WFQ_WEIGHT    0x0315
+#define    RTL8367C_SCHEDULE_PORT1_QUEUE1_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT1_QUEUE1_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT1_QUEUE2_WFQ_WEIGHT    0x0316
+#define    RTL8367C_SCHEDULE_PORT1_QUEUE2_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT1_QUEUE2_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT1_QUEUE3_WFQ_WEIGHT    0x0317
+#define    RTL8367C_SCHEDULE_PORT1_QUEUE3_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT1_QUEUE3_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT1_QUEUE4_WFQ_WEIGHT    0x0318
+#define    RTL8367C_SCHEDULE_PORT1_QUEUE4_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT1_QUEUE4_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT1_QUEUE5_WFQ_WEIGHT    0x0319
+#define    RTL8367C_SCHEDULE_PORT1_QUEUE5_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT1_QUEUE5_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT1_QUEUE6_WFQ_WEIGHT    0x031a
+#define    RTL8367C_SCHEDULE_PORT1_QUEUE6_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT1_QUEUE6_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT1_QUEUE7_WFQ_WEIGHT    0x031b
+#define    RTL8367C_SCHEDULE_PORT1_QUEUE7_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT1_QUEUE7_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT2_QUEUE0_WFQ_WEIGHT    0x031c
+
+#define    RTL8367C_REG_SCHEDULE_PORT2_QUEUE1_WFQ_WEIGHT    0x031d
+#define    RTL8367C_SCHEDULE_PORT2_QUEUE1_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT2_QUEUE1_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT2_QUEUE2_WFQ_WEIGHT    0x031e
+#define    RTL8367C_SCHEDULE_PORT2_QUEUE2_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT2_QUEUE2_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT2_QUEUE3_WFQ_WEIGHT    0x031f
+#define    RTL8367C_SCHEDULE_PORT2_QUEUE3_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT2_QUEUE3_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT2_QUEUE4_WFQ_WEIGHT    0x0320
+#define    RTL8367C_SCHEDULE_PORT2_QUEUE4_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT2_QUEUE4_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT2_QUEUE5_WFQ_WEIGHT    0x0321
+#define    RTL8367C_SCHEDULE_PORT2_QUEUE5_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT2_QUEUE5_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT2_QUEUE6_WFQ_WEIGHT    0x0322
+#define    RTL8367C_SCHEDULE_PORT2_QUEUE6_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT2_QUEUE6_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT2_QUEUE7_WFQ_WEIGHT    0x0323
+#define    RTL8367C_SCHEDULE_PORT2_QUEUE7_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT2_QUEUE7_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT3_QUEUE0_WFQ_WEIGHT    0x0324
+
+#define    RTL8367C_REG_SCHEDULE_PORT3_QUEUE1_WFQ_WEIGHT    0x0325
+#define    RTL8367C_SCHEDULE_PORT3_QUEUE1_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT3_QUEUE1_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT3_QUEUE2_WFQ_WEIGHT    0x0326
+#define    RTL8367C_SCHEDULE_PORT3_QUEUE2_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT3_QUEUE2_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT3_QUEUE3_WFQ_WEIGHT    0x0327
+#define    RTL8367C_SCHEDULE_PORT3_QUEUE3_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT3_QUEUE3_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT3_QUEUE4_WFQ_WEIGHT    0x0328
+#define    RTL8367C_SCHEDULE_PORT3_QUEUE4_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT3_QUEUE4_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT3_QUEUE5_WFQ_WEIGHT    0x0329
+#define    RTL8367C_SCHEDULE_PORT3_QUEUE5_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT3_QUEUE5_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT3_QUEUE6_WFQ_WEIGHT    0x032a
+#define    RTL8367C_SCHEDULE_PORT3_QUEUE6_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT3_QUEUE6_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT3_QUEUE7_WFQ_WEIGHT    0x032b
+#define    RTL8367C_SCHEDULE_PORT3_QUEUE7_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT3_QUEUE7_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT4_QUEUE0_WFQ_WEIGHT    0x032c
+
+#define    RTL8367C_REG_SCHEDULE_PORT4_QUEUE1_WFQ_WEIGHT    0x032d
+#define    RTL8367C_SCHEDULE_PORT4_QUEUE1_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT4_QUEUE1_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT4_QUEUE2_WFQ_WEIGHT    0x032e
+#define    RTL8367C_SCHEDULE_PORT4_QUEUE2_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT4_QUEUE2_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT4_QUEUE3_WFQ_WEIGHT    0x032f
+#define    RTL8367C_SCHEDULE_PORT4_QUEUE3_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT4_QUEUE3_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT4_QUEUE4_WFQ_WEIGHT    0x0330
+#define    RTL8367C_SCHEDULE_PORT4_QUEUE4_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT4_QUEUE4_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT4_QUEUE5_WFQ_WEIGHT    0x0331
+#define    RTL8367C_SCHEDULE_PORT4_QUEUE5_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT4_QUEUE5_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT4_QUEUE6_WFQ_WEIGHT    0x0332
+#define    RTL8367C_SCHEDULE_PORT4_QUEUE6_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT4_QUEUE6_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT4_QUEUE7_WFQ_WEIGHT    0x0333
+#define    RTL8367C_SCHEDULE_PORT4_QUEUE7_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT4_QUEUE7_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT5_QUEUE0_WFQ_WEIGHT    0x0334
+
+#define    RTL8367C_REG_SCHEDULE_PORT5_QUEUE1_WFQ_WEIGHT    0x0335
+#define    RTL8367C_SCHEDULE_PORT5_QUEUE1_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT5_QUEUE1_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT5_QUEUE2_WFQ_WEIGHT    0x0336
+#define    RTL8367C_SCHEDULE_PORT5_QUEUE2_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT5_QUEUE2_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT5_QUEUE3_WFQ_WEIGHT    0x0337
+#define    RTL8367C_SCHEDULE_PORT5_QUEUE3_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT5_QUEUE3_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT5_QUEUE4_WFQ_WEIGHT    0x0338
+#define    RTL8367C_SCHEDULE_PORT5_QUEUE4_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT5_QUEUE4_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT5_QUEUE5_WFQ_WEIGHT    0x0339
+#define    RTL8367C_SCHEDULE_PORT5_QUEUE5_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT5_QUEUE5_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT5_QUEUE6_WFQ_WEIGHT    0x033a
+#define    RTL8367C_SCHEDULE_PORT5_QUEUE6_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT5_QUEUE6_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT5_QUEUE7_WFQ_WEIGHT    0x033b
+#define    RTL8367C_SCHEDULE_PORT5_QUEUE7_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT5_QUEUE7_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT6_QUEUE0_WFQ_WEIGHT    0x033c
+
+#define    RTL8367C_REG_SCHEDULE_PORT6_QUEUE1_WFQ_WEIGHT    0x033d
+#define    RTL8367C_SCHEDULE_PORT6_QUEUE1_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT6_QUEUE1_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT6_QUEUE2_WFQ_WEIGHT    0x033e
+#define    RTL8367C_SCHEDULE_PORT6_QUEUE2_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT6_QUEUE2_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT6_QUEUE3_WFQ_WEIGHT    0x033f
+#define    RTL8367C_SCHEDULE_PORT6_QUEUE3_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT6_QUEUE3_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT6_QUEUE4_WFQ_WEIGHT    0x0340
+#define    RTL8367C_SCHEDULE_PORT6_QUEUE4_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT6_QUEUE4_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT6_QUEUE5_WFQ_WEIGHT    0x0341
+#define    RTL8367C_SCHEDULE_PORT6_QUEUE5_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT6_QUEUE5_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT6_QUEUE6_WFQ_WEIGHT    0x0342
+#define    RTL8367C_SCHEDULE_PORT6_QUEUE6_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT6_QUEUE6_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT6_QUEUE7_WFQ_WEIGHT    0x0343
+#define    RTL8367C_SCHEDULE_PORT6_QUEUE7_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT6_QUEUE7_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT7_QUEUE0_WFQ_WEIGHT    0x0344
+
+#define    RTL8367C_REG_SCHEDULE_PORT7_QUEUE1_WFQ_WEIGHT    0x0345
+#define    RTL8367C_SCHEDULE_PORT7_QUEUE1_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT7_QUEUE1_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT7_QUEUE2_WFQ_WEIGHT    0x0346
+#define    RTL8367C_SCHEDULE_PORT7_QUEUE2_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT7_QUEUE2_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT7_QUEUE3_WFQ_WEIGHT    0x0347
+#define    RTL8367C_SCHEDULE_PORT7_QUEUE3_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT7_QUEUE3_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT7_QUEUE4_WFQ_WEIGHT    0x0348
+#define    RTL8367C_SCHEDULE_PORT7_QUEUE4_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT7_QUEUE4_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT7_QUEUE5_WFQ_WEIGHT    0x0349
+#define    RTL8367C_SCHEDULE_PORT7_QUEUE5_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT7_QUEUE5_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT7_QUEUE6_WFQ_WEIGHT    0x034a
+#define    RTL8367C_SCHEDULE_PORT7_QUEUE6_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT7_QUEUE6_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT7_QUEUE7_WFQ_WEIGHT    0x034b
+#define    RTL8367C_SCHEDULE_PORT7_QUEUE7_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT7_QUEUE7_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT8_QUEUE0_WFQ_WEIGHT    0x034c
+
+#define    RTL8367C_REG_SCHEDULE_PORT8_QUEUE1_WFQ_WEIGHT    0x034d
+#define    RTL8367C_SCHEDULE_PORT8_QUEUE1_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT8_QUEUE1_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT8_QUEUE2_WFQ_WEIGHT    0x034e
+#define    RTL8367C_SCHEDULE_PORT8_QUEUE2_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT8_QUEUE2_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT8_QUEUE3_WFQ_WEIGHT    0x034f
+#define    RTL8367C_SCHEDULE_PORT8_QUEUE3_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT8_QUEUE3_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT8_QUEUE4_WFQ_WEIGHT    0x0350
+#define    RTL8367C_SCHEDULE_PORT8_QUEUE4_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT8_QUEUE4_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT8_QUEUE5_WFQ_WEIGHT    0x0351
+#define    RTL8367C_SCHEDULE_PORT8_QUEUE5_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT8_QUEUE5_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT8_QUEUE6_WFQ_WEIGHT    0x0352
+#define    RTL8367C_SCHEDULE_PORT8_QUEUE6_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT8_QUEUE6_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT8_QUEUE7_WFQ_WEIGHT    0x0353
+#define    RTL8367C_SCHEDULE_PORT8_QUEUE7_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT8_QUEUE7_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT9_QUEUE0_WFQ_WEIGHT    0x0354
+
+#define    RTL8367C_REG_SCHEDULE_PORT9_QUEUE1_WFQ_WEIGHT    0x0355
+#define    RTL8367C_SCHEDULE_PORT9_QUEUE1_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT9_QUEUE1_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT9_QUEUE2_WFQ_WEIGHT    0x0356
+#define    RTL8367C_SCHEDULE_PORT9_QUEUE2_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT9_QUEUE2_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT9_QUEUE3_WFQ_WEIGHT    0x0357
+#define    RTL8367C_SCHEDULE_PORT9_QUEUE3_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT9_QUEUE3_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT9_QUEUE4_WFQ_WEIGHT    0x0358
+#define    RTL8367C_SCHEDULE_PORT9_QUEUE4_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT9_QUEUE4_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT9_QUEUE5_WFQ_WEIGHT    0x0359
+#define    RTL8367C_SCHEDULE_PORT9_QUEUE5_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT9_QUEUE5_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT9_QUEUE6_WFQ_WEIGHT    0x035a
+#define    RTL8367C_SCHEDULE_PORT9_QUEUE6_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT9_QUEUE6_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT9_QUEUE7_WFQ_WEIGHT    0x035b
+#define    RTL8367C_SCHEDULE_PORT9_QUEUE7_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT9_QUEUE7_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT10_QUEUE0_WFQ_WEIGHT    0x035c
+
+#define    RTL8367C_REG_SCHEDULE_PORT10_QUEUE1_WFQ_WEIGHT    0x035d
+#define    RTL8367C_SCHEDULE_PORT10_QUEUE1_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT10_QUEUE1_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT10_QUEUE2_WFQ_WEIGHT    0x035e
+#define    RTL8367C_SCHEDULE_PORT10_QUEUE2_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT10_QUEUE2_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT10_QUEUE3_WFQ_WEIGHT    0x035f
+#define    RTL8367C_SCHEDULE_PORT10_QUEUE3_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT10_QUEUE3_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT10_QUEUE4_WFQ_WEIGHT    0x0360
+#define    RTL8367C_SCHEDULE_PORT10_QUEUE4_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT10_QUEUE4_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT10_QUEUE5_WFQ_WEIGHT    0x0361
+#define    RTL8367C_SCHEDULE_PORT10_QUEUE5_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT10_QUEUE5_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT10_QUEUE6_WFQ_WEIGHT    0x0362
+#define    RTL8367C_SCHEDULE_PORT10_QUEUE6_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT10_QUEUE6_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_SCHEDULE_PORT10_QUEUE7_WFQ_WEIGHT    0x0363
+#define    RTL8367C_SCHEDULE_PORT10_QUEUE7_WFQ_WEIGHT_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT10_QUEUE7_WFQ_WEIGHT_MASK    0x7F
+
+#define    RTL8367C_REG_PORT0_EGRESSBW_CTRL0    0x038c
+
+#define    RTL8367C_REG_PORT0_EGRESSBW_CTRL1    0x038d
+#define    RTL8367C_PORT0_EGRESSBW_CTRL1_OFFSET    0
+#define    RTL8367C_PORT0_EGRESSBW_CTRL1_MASK    0x1
+
+#define    RTL8367C_REG_PORT1_EGRESSBW_CTRL0    0x038e
+
+#define    RTL8367C_REG_PORT1_EGRESSBW_CTRL1    0x038f
+#define    RTL8367C_PORT1_EGRESSBW_CTRL1_OFFSET    0
+#define    RTL8367C_PORT1_EGRESSBW_CTRL1_MASK    0x1
+
+#define    RTL8367C_REG_PORT2_EGRESSBW_CTRL0    0x0390
+
+#define    RTL8367C_REG_PORT2_EGRESSBW_CTRL1    0x0391
+#define    RTL8367C_PORT2_EGRESSBW_CTRL1_OFFSET    0
+#define    RTL8367C_PORT2_EGRESSBW_CTRL1_MASK    0x1
+
+#define    RTL8367C_REG_PORT3_EGRESSBW_CTRL0    0x0392
+
+#define    RTL8367C_REG_PORT3_EGRESSBW_CTRL1    0x0393
+#define    RTL8367C_PORT3_EGRESSBW_CTRL1_OFFSET    0
+#define    RTL8367C_PORT3_EGRESSBW_CTRL1_MASK    0x1
+
+#define    RTL8367C_REG_PORT4_EGRESSBW_CTRL0    0x0394
+
+#define    RTL8367C_REG_PORT4_EGRESSBW_CTRL1    0x0395
+#define    RTL8367C_PORT4_EGRESSBW_CTRL1_OFFSET    0
+#define    RTL8367C_PORT4_EGRESSBW_CTRL1_MASK    0x1
+
+#define    RTL8367C_REG_PORT5_EGRESSBW_CTRL0    0x0396
+
+#define    RTL8367C_REG_PORT5_EGRESSBW_CTRL1    0x0397
+#define    RTL8367C_PORT5_EGRESSBW_CTRL1_OFFSET    0
+#define    RTL8367C_PORT5_EGRESSBW_CTRL1_MASK    0x1
+
+#define    RTL8367C_REG_PORT6_EGRESSBW_CTRL0    0x0398
+
+#define    RTL8367C_REG_PORT6_EGRESSBW_CTRL1    0x0399
+#define    RTL8367C_PORT6_EGRESSBW_CTRL1_OFFSET    0
+#define    RTL8367C_PORT6_EGRESSBW_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_PORT7_EGRESSBW_CTRL0    0x039a
+
+#define    RTL8367C_REG_PORT7_EGRESSBW_CTRL1    0x039b
+#define    RTL8367C_PORT7_EGRESSBW_CTRL1_OFFSET    0
+#define    RTL8367C_PORT7_EGRESSBW_CTRL1_MASK    0x1
+
+#define    RTL8367C_REG_PORT8_EGRESSBW_CTRL0    0x039c
+
+#define    RTL8367C_REG_PORT8_EGRESSBW_CTRL1    0x039d
+#define    RTL8367C_PORT8_EGRESSBW_CTRL1_OFFSET    0
+#define    RTL8367C_PORT8_EGRESSBW_CTRL1_MASK    0x1
+
+#define    RTL8367C_REG_PORT9_EGRESSBW_CTRL0    0x039e
+
+#define    RTL8367C_REG_PORT9_EGRESSBW_CTRL1    0x039f
+#define    RTL8367C_PORT9_EGRESSBW_CTRL1_OFFSET    0
+#define    RTL8367C_PORT9_EGRESSBW_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_PORT10_EGRESSBW_CTRL0    0x03a0
+
+#define    RTL8367C_REG_PORT10_EGRESSBW_CTRL1    0x03a1
+#define    RTL8367C_PORT10_EGRESSBW_CTRL1_OFFSET    0
+#define    RTL8367C_PORT10_EGRESSBW_CTRL1_MASK    0x1
+
+#define    RTL8367C_REG_SCHEDULE_PORT0_APR_METER_CTRL0    0x03ac
+#define    RTL8367C_SCHEDULE_PORT0_APR_METER_CTRL0_QUEUE4_APR_METER_OFFSET    12
+#define    RTL8367C_SCHEDULE_PORT0_APR_METER_CTRL0_QUEUE4_APR_METER_MASK    0x7000
+#define    RTL8367C_SCHEDULE_PORT0_APR_METER_CTRL0_QUEUE3_APR_METER_OFFSET    9
+#define    RTL8367C_SCHEDULE_PORT0_APR_METER_CTRL0_QUEUE3_APR_METER_MASK    0xE00
+#define    RTL8367C_SCHEDULE_PORT0_APR_METER_CTRL0_QUEUE2_APR_METER_OFFSET    6
+#define    RTL8367C_SCHEDULE_PORT0_APR_METER_CTRL0_QUEUE2_APR_METER_MASK    0x1C0
+#define    RTL8367C_SCHEDULE_PORT0_APR_METER_CTRL0_QUEUE1_APR_METER_OFFSET    3
+#define    RTL8367C_SCHEDULE_PORT0_APR_METER_CTRL0_QUEUE1_APR_METER_MASK    0x38
+#define    RTL8367C_SCHEDULE_PORT0_APR_METER_CTRL0_QUEUE0_APR_METER_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT0_APR_METER_CTRL0_QUEUE0_APR_METER_MASK    0x7
+
+#define    RTL8367C_REG_SCHEDULE_PORT0_APR_METER_CTRL1    0x03ad
+#define    RTL8367C_SCHEDULE_PORT0_APR_METER_CTRL1_QUEUE7_APR_METER_OFFSET    6
+#define    RTL8367C_SCHEDULE_PORT0_APR_METER_CTRL1_QUEUE7_APR_METER_MASK    0x1C0
+#define    RTL8367C_SCHEDULE_PORT0_APR_METER_CTRL1_QUEUE6_APR_METER_OFFSET    3
+#define    RTL8367C_SCHEDULE_PORT0_APR_METER_CTRL1_QUEUE6_APR_METER_MASK    0x38
+#define    RTL8367C_SCHEDULE_PORT0_APR_METER_CTRL1_QUEUE5_APR_METER_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT0_APR_METER_CTRL1_QUEUE5_APR_METER_MASK    0x7
+
+#define    RTL8367C_REG_SCHEDULE_PORT1_APR_METER_CTRL0    0x03b0
+#define    RTL8367C_SCHEDULE_PORT1_APR_METER_CTRL0_QUEUE4_APR_METER_OFFSET    12
+#define    RTL8367C_SCHEDULE_PORT1_APR_METER_CTRL0_QUEUE4_APR_METER_MASK    0x7000
+#define    RTL8367C_SCHEDULE_PORT1_APR_METER_CTRL0_QUEUE3_APR_METER_OFFSET    9
+#define    RTL8367C_SCHEDULE_PORT1_APR_METER_CTRL0_QUEUE3_APR_METER_MASK    0xE00
+#define    RTL8367C_SCHEDULE_PORT1_APR_METER_CTRL0_QUEUE2_APR_METER_OFFSET    6
+#define    RTL8367C_SCHEDULE_PORT1_APR_METER_CTRL0_QUEUE2_APR_METER_MASK    0x1C0
+#define    RTL8367C_SCHEDULE_PORT1_APR_METER_CTRL0_QUEUE1_APR_METER_OFFSET    3
+#define    RTL8367C_SCHEDULE_PORT1_APR_METER_CTRL0_QUEUE1_APR_METER_MASK    0x38
+#define    RTL8367C_SCHEDULE_PORT1_APR_METER_CTRL0_QUEUE0_APR_METER_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT1_APR_METER_CTRL0_QUEUE0_APR_METER_MASK    0x7
+
+#define    RTL8367C_REG_SCHEDULE_PORT1_APR_METER_CTRL1    0x03b1
+#define    RTL8367C_SCHEDULE_PORT1_APR_METER_CTRL1_QUEUE7_APR_METER_OFFSET    6
+#define    RTL8367C_SCHEDULE_PORT1_APR_METER_CTRL1_QUEUE7_APR_METER_MASK    0x1C0
+#define    RTL8367C_SCHEDULE_PORT1_APR_METER_CTRL1_QUEUE6_APR_METER_OFFSET    3
+#define    RTL8367C_SCHEDULE_PORT1_APR_METER_CTRL1_QUEUE6_APR_METER_MASK    0x38
+#define    RTL8367C_SCHEDULE_PORT1_APR_METER_CTRL1_QUEUE5_APR_METER_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT1_APR_METER_CTRL1_QUEUE5_APR_METER_MASK    0x7
+
+#define    RTL8367C_REG_SCHEDULE_PORT2_APR_METER_CTRL0    0x03b4
+#define    RTL8367C_SCHEDULE_PORT2_APR_METER_CTRL0_QUEUE4_APR_METER_OFFSET    12
+#define    RTL8367C_SCHEDULE_PORT2_APR_METER_CTRL0_QUEUE4_APR_METER_MASK    0x7000
+#define    RTL8367C_SCHEDULE_PORT2_APR_METER_CTRL0_QUEUE3_APR_METER_OFFSET    9
+#define    RTL8367C_SCHEDULE_PORT2_APR_METER_CTRL0_QUEUE3_APR_METER_MASK    0xE00
+#define    RTL8367C_SCHEDULE_PORT2_APR_METER_CTRL0_QUEUE2_APR_METER_OFFSET    6
+#define    RTL8367C_SCHEDULE_PORT2_APR_METER_CTRL0_QUEUE2_APR_METER_MASK    0x1C0
+#define    RTL8367C_SCHEDULE_PORT2_APR_METER_CTRL0_QUEUE1_APR_METER_OFFSET    3
+#define    RTL8367C_SCHEDULE_PORT2_APR_METER_CTRL0_QUEUE1_APR_METER_MASK    0x38
+#define    RTL8367C_SCHEDULE_PORT2_APR_METER_CTRL0_QUEUE0_APR_METER_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT2_APR_METER_CTRL0_QUEUE0_APR_METER_MASK    0x7
+
+#define    RTL8367C_REG_SCHEDULE_PORT2_APR_METER_CTRL1    0x03b5
+#define    RTL8367C_SCHEDULE_PORT2_APR_METER_CTRL1_QUEUE7_APR_METER_OFFSET    6
+#define    RTL8367C_SCHEDULE_PORT2_APR_METER_CTRL1_QUEUE7_APR_METER_MASK    0x1C0
+#define    RTL8367C_SCHEDULE_PORT2_APR_METER_CTRL1_QUEUE6_APR_METER_OFFSET    3
+#define    RTL8367C_SCHEDULE_PORT2_APR_METER_CTRL1_QUEUE6_APR_METER_MASK    0x38
+#define    RTL8367C_SCHEDULE_PORT2_APR_METER_CTRL1_QUEUE5_APR_METER_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT2_APR_METER_CTRL1_QUEUE5_APR_METER_MASK    0x7
+
+#define    RTL8367C_REG_SCHEDULE_PORT3_APR_METER_CTRL0    0x03b8
+#define    RTL8367C_SCHEDULE_PORT3_APR_METER_CTRL0_QUEUE4_APR_METER_OFFSET    12
+#define    RTL8367C_SCHEDULE_PORT3_APR_METER_CTRL0_QUEUE4_APR_METER_MASK    0x7000
+#define    RTL8367C_SCHEDULE_PORT3_APR_METER_CTRL0_QUEUE3_APR_METER_OFFSET    9
+#define    RTL8367C_SCHEDULE_PORT3_APR_METER_CTRL0_QUEUE3_APR_METER_MASK    0xE00
+#define    RTL8367C_SCHEDULE_PORT3_APR_METER_CTRL0_QUEUE2_APR_METER_OFFSET    6
+#define    RTL8367C_SCHEDULE_PORT3_APR_METER_CTRL0_QUEUE2_APR_METER_MASK    0x1C0
+#define    RTL8367C_SCHEDULE_PORT3_APR_METER_CTRL0_QUEUE1_APR_METER_OFFSET    3
+#define    RTL8367C_SCHEDULE_PORT3_APR_METER_CTRL0_QUEUE1_APR_METER_MASK    0x38
+#define    RTL8367C_SCHEDULE_PORT3_APR_METER_CTRL0_QUEUE0_APR_METER_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT3_APR_METER_CTRL0_QUEUE0_APR_METER_MASK    0x7
+
+#define    RTL8367C_REG_SCHEDULE_PORT3_APR_METER_CTRL1    0x03b9
+#define    RTL8367C_SCHEDULE_PORT3_APR_METER_CTRL1_QUEUE7_APR_METER_OFFSET    6
+#define    RTL8367C_SCHEDULE_PORT3_APR_METER_CTRL1_QUEUE7_APR_METER_MASK    0x1C0
+#define    RTL8367C_SCHEDULE_PORT3_APR_METER_CTRL1_QUEUE6_APR_METER_OFFSET    3
+#define    RTL8367C_SCHEDULE_PORT3_APR_METER_CTRL1_QUEUE6_APR_METER_MASK    0x38
+#define    RTL8367C_SCHEDULE_PORT3_APR_METER_CTRL1_QUEUE5_APR_METER_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT3_APR_METER_CTRL1_QUEUE5_APR_METER_MASK    0x7
+
+#define    RTL8367C_REG_SCHEDULE_PORT4_APR_METER_CTRL0    0x03bc
+#define    RTL8367C_SCHEDULE_PORT4_APR_METER_CTRL0_QUEUE4_APR_METER_OFFSET    12
+#define    RTL8367C_SCHEDULE_PORT4_APR_METER_CTRL0_QUEUE4_APR_METER_MASK    0x7000
+#define    RTL8367C_SCHEDULE_PORT4_APR_METER_CTRL0_QUEUE3_APR_METER_OFFSET    9
+#define    RTL8367C_SCHEDULE_PORT4_APR_METER_CTRL0_QUEUE3_APR_METER_MASK    0xE00
+#define    RTL8367C_SCHEDULE_PORT4_APR_METER_CTRL0_QUEUE2_APR_METER_OFFSET    6
+#define    RTL8367C_SCHEDULE_PORT4_APR_METER_CTRL0_QUEUE2_APR_METER_MASK    0x1C0
+#define    RTL8367C_SCHEDULE_PORT4_APR_METER_CTRL0_QUEUE1_APR_METER_OFFSET    3
+#define    RTL8367C_SCHEDULE_PORT4_APR_METER_CTRL0_QUEUE1_APR_METER_MASK    0x38
+#define    RTL8367C_SCHEDULE_PORT4_APR_METER_CTRL0_QUEUE0_APR_METER_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT4_APR_METER_CTRL0_QUEUE0_APR_METER_MASK    0x7
+
+#define    RTL8367C_REG_SCHEDULE_PORT4_APR_METER_CTRL1    0x03bd
+#define    RTL8367C_SCHEDULE_PORT4_APR_METER_CTRL1_QUEUE7_APR_METER_OFFSET    6
+#define    RTL8367C_SCHEDULE_PORT4_APR_METER_CTRL1_QUEUE7_APR_METER_MASK    0x1C0
+#define    RTL8367C_SCHEDULE_PORT4_APR_METER_CTRL1_QUEUE6_APR_METER_OFFSET    3
+#define    RTL8367C_SCHEDULE_PORT4_APR_METER_CTRL1_QUEUE6_APR_METER_MASK    0x38
+#define    RTL8367C_SCHEDULE_PORT4_APR_METER_CTRL1_QUEUE5_APR_METER_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT4_APR_METER_CTRL1_QUEUE5_APR_METER_MASK    0x7
+
+#define    RTL8367C_REG_SCHEDULE_PORT5_APR_METER_CTRL0    0x03c0
+#define    RTL8367C_SCHEDULE_PORT5_APR_METER_CTRL0_QUEUE4_APR_METER_OFFSET    12
+#define    RTL8367C_SCHEDULE_PORT5_APR_METER_CTRL0_QUEUE4_APR_METER_MASK    0x7000
+#define    RTL8367C_SCHEDULE_PORT5_APR_METER_CTRL0_QUEUE3_APR_METER_OFFSET    9
+#define    RTL8367C_SCHEDULE_PORT5_APR_METER_CTRL0_QUEUE3_APR_METER_MASK    0xE00
+#define    RTL8367C_SCHEDULE_PORT5_APR_METER_CTRL0_QUEUE2_APR_METER_OFFSET    6
+#define    RTL8367C_SCHEDULE_PORT5_APR_METER_CTRL0_QUEUE2_APR_METER_MASK    0x1C0
+#define    RTL8367C_SCHEDULE_PORT5_APR_METER_CTRL0_QUEUE1_APR_METER_OFFSET    3
+#define    RTL8367C_SCHEDULE_PORT5_APR_METER_CTRL0_QUEUE1_APR_METER_MASK    0x38
+#define    RTL8367C_SCHEDULE_PORT5_APR_METER_CTRL0_QUEUE0_APR_METER_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT5_APR_METER_CTRL0_QUEUE0_APR_METER_MASK    0x7
+
+#define    RTL8367C_REG_SCHEDULE_PORT5_APR_METER_CTRL1    0x03c1
+#define    RTL8367C_SCHEDULE_PORT5_APR_METER_CTRL1_QUEUE7_APR_METER_OFFSET    6
+#define    RTL8367C_SCHEDULE_PORT5_APR_METER_CTRL1_QUEUE7_APR_METER_MASK    0x1C0
+#define    RTL8367C_SCHEDULE_PORT5_APR_METER_CTRL1_QUEUE6_APR_METER_OFFSET    3
+#define    RTL8367C_SCHEDULE_PORT5_APR_METER_CTRL1_QUEUE6_APR_METER_MASK    0x38
+#define    RTL8367C_SCHEDULE_PORT5_APR_METER_CTRL1_QUEUE5_APR_METER_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT5_APR_METER_CTRL1_QUEUE5_APR_METER_MASK    0x7
+
+#define    RTL8367C_REG_SCHEDULE_PORT6_APR_METER_CTRL0    0x03c4
+#define    RTL8367C_SCHEDULE_PORT6_APR_METER_CTRL0_QUEUE4_APR_METER_OFFSET    12
+#define    RTL8367C_SCHEDULE_PORT6_APR_METER_CTRL0_QUEUE4_APR_METER_MASK    0x7000
+#define    RTL8367C_SCHEDULE_PORT6_APR_METER_CTRL0_QUEUE3_APR_METER_OFFSET    9
+#define    RTL8367C_SCHEDULE_PORT6_APR_METER_CTRL0_QUEUE3_APR_METER_MASK    0xE00
+#define    RTL8367C_SCHEDULE_PORT6_APR_METER_CTRL0_QUEUE2_APR_METER_OFFSET    6
+#define    RTL8367C_SCHEDULE_PORT6_APR_METER_CTRL0_QUEUE2_APR_METER_MASK    0x1C0
+#define    RTL8367C_SCHEDULE_PORT6_APR_METER_CTRL0_QUEUE1_APR_METER_OFFSET    3
+#define    RTL8367C_SCHEDULE_PORT6_APR_METER_CTRL0_QUEUE1_APR_METER_MASK    0x38
+#define    RTL8367C_SCHEDULE_PORT6_APR_METER_CTRL0_QUEUE0_APR_METER_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT6_APR_METER_CTRL0_QUEUE0_APR_METER_MASK    0x7
+
+#define    RTL8367C_REG_SCHEDULE_PORT6_APR_METER_CTRL1    0x03c5
+#define    RTL8367C_SCHEDULE_PORT6_APR_METER_CTRL1_QUEUE7_APR_METER_OFFSET    6
+#define    RTL8367C_SCHEDULE_PORT6_APR_METER_CTRL1_QUEUE7_APR_METER_MASK    0x1C0
+#define    RTL8367C_SCHEDULE_PORT6_APR_METER_CTRL1_QUEUE6_APR_METER_OFFSET    3
+#define    RTL8367C_SCHEDULE_PORT6_APR_METER_CTRL1_QUEUE6_APR_METER_MASK    0x38
+#define    RTL8367C_SCHEDULE_PORT6_APR_METER_CTRL1_QUEUE5_APR_METER_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT6_APR_METER_CTRL1_QUEUE5_APR_METER_MASK    0x7
+
+#define    RTL8367C_REG_SCHEDULE_PORT7_APR_METER_CTRL0    0x03c8
+#define    RTL8367C_SCHEDULE_PORT7_APR_METER_CTRL0_QUEUE4_APR_METER_OFFSET    12
+#define    RTL8367C_SCHEDULE_PORT7_APR_METER_CTRL0_QUEUE4_APR_METER_MASK    0x7000
+#define    RTL8367C_SCHEDULE_PORT7_APR_METER_CTRL0_QUEUE3_APR_METER_OFFSET    9
+#define    RTL8367C_SCHEDULE_PORT7_APR_METER_CTRL0_QUEUE3_APR_METER_MASK    0xE00
+#define    RTL8367C_SCHEDULE_PORT7_APR_METER_CTRL0_QUEUE2_APR_METER_OFFSET    6
+#define    RTL8367C_SCHEDULE_PORT7_APR_METER_CTRL0_QUEUE2_APR_METER_MASK    0x1C0
+#define    RTL8367C_SCHEDULE_PORT7_APR_METER_CTRL0_QUEUE1_APR_METER_OFFSET    3
+#define    RTL8367C_SCHEDULE_PORT7_APR_METER_CTRL0_QUEUE1_APR_METER_MASK    0x38
+#define    RTL8367C_SCHEDULE_PORT7_APR_METER_CTRL0_QUEUE0_APR_METER_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT7_APR_METER_CTRL0_QUEUE0_APR_METER_MASK    0x7
+
+#define    RTL8367C_REG_SCHEDULE_PORT7_APR_METER_CTRL1    0x03c9
+#define    RTL8367C_SCHEDULE_PORT7_APR_METER_CTRL1_QUEUE7_APR_METER_OFFSET    6
+#define    RTL8367C_SCHEDULE_PORT7_APR_METER_CTRL1_QUEUE7_APR_METER_MASK    0x1C0
+#define    RTL8367C_SCHEDULE_PORT7_APR_METER_CTRL1_QUEUE6_APR_METER_OFFSET    3
+#define    RTL8367C_SCHEDULE_PORT7_APR_METER_CTRL1_QUEUE6_APR_METER_MASK    0x38
+#define    RTL8367C_SCHEDULE_PORT7_APR_METER_CTRL1_QUEUE5_APR_METER_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT7_APR_METER_CTRL1_QUEUE5_APR_METER_MASK    0x7
+
+#define    RTL8367C_REG_SCHEDULE_PORT8_APR_METER_CTRL0    0x03ca
+#define    RTL8367C_SCHEDULE_PORT8_APR_METER_CTRL0_QUEUE4_APR_METER_OFFSET    12
+#define    RTL8367C_SCHEDULE_PORT8_APR_METER_CTRL0_QUEUE4_APR_METER_MASK    0x7000
+#define    RTL8367C_SCHEDULE_PORT8_APR_METER_CTRL0_QUEUE3_APR_METER_OFFSET    9
+#define    RTL8367C_SCHEDULE_PORT8_APR_METER_CTRL0_QUEUE3_APR_METER_MASK    0xE00
+#define    RTL8367C_SCHEDULE_PORT8_APR_METER_CTRL0_QUEUE2_APR_METER_OFFSET    6
+#define    RTL8367C_SCHEDULE_PORT8_APR_METER_CTRL0_QUEUE2_APR_METER_MASK    0x1C0
+#define    RTL8367C_SCHEDULE_PORT8_APR_METER_CTRL0_QUEUE1_APR_METER_OFFSET    3
+#define    RTL8367C_SCHEDULE_PORT8_APR_METER_CTRL0_QUEUE1_APR_METER_MASK    0x38
+#define    RTL8367C_SCHEDULE_PORT8_APR_METER_CTRL0_QUEUE0_APR_METER_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT8_APR_METER_CTRL0_QUEUE0_APR_METER_MASK    0x7
+
+#define    RTL8367C_REG_SCHEDULE_PORT8_APR_METER_CTRL1    0x03cb
+#define    RTL8367C_SCHEDULE_PORT8_APR_METER_CTRL1_QUEUE7_APR_METER_OFFSET    6
+#define    RTL8367C_SCHEDULE_PORT8_APR_METER_CTRL1_QUEUE7_APR_METER_MASK    0x1C0
+#define    RTL8367C_SCHEDULE_PORT8_APR_METER_CTRL1_QUEUE6_APR_METER_OFFSET    3
+#define    RTL8367C_SCHEDULE_PORT8_APR_METER_CTRL1_QUEUE6_APR_METER_MASK    0x38
+#define    RTL8367C_SCHEDULE_PORT8_APR_METER_CTRL1_QUEUE5_APR_METER_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT8_APR_METER_CTRL1_QUEUE5_APR_METER_MASK    0x7
+
+#define    RTL8367C_REG_SCHEDULE_PORT9_APR_METER_CTRL0    0x03cc
+#define    RTL8367C_SCHEDULE_PORT9_APR_METER_CTRL0_QUEUE4_APR_METER_OFFSET    12
+#define    RTL8367C_SCHEDULE_PORT9_APR_METER_CTRL0_QUEUE4_APR_METER_MASK    0x7000
+#define    RTL8367C_SCHEDULE_PORT9_APR_METER_CTRL0_QUEUE3_APR_METER_OFFSET    9
+#define    RTL8367C_SCHEDULE_PORT9_APR_METER_CTRL0_QUEUE3_APR_METER_MASK    0xE00
+#define    RTL8367C_SCHEDULE_PORT9_APR_METER_CTRL0_QUEUE2_APR_METER_OFFSET    6
+#define    RTL8367C_SCHEDULE_PORT9_APR_METER_CTRL0_QUEUE2_APR_METER_MASK    0x1C0
+#define    RTL8367C_SCHEDULE_PORT9_APR_METER_CTRL0_QUEUE1_APR_METER_OFFSET    3
+#define    RTL8367C_SCHEDULE_PORT9_APR_METER_CTRL0_QUEUE1_APR_METER_MASK    0x38
+#define    RTL8367C_SCHEDULE_PORT9_APR_METER_CTRL0_QUEUE0_APR_METER_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT9_APR_METER_CTRL0_QUEUE0_APR_METER_MASK    0x7
+
+#define    RTL8367C_REG_SCHEDULE_PORT9_APR_METER_CTRL1    0x03cd
+#define    RTL8367C_SCHEDULE_PORT9_APR_METER_CTRL1_QUEUE7_APR_METER_OFFSET    6
+#define    RTL8367C_SCHEDULE_PORT9_APR_METER_CTRL1_QUEUE7_APR_METER_MASK    0x1C0
+#define    RTL8367C_SCHEDULE_PORT9_APR_METER_CTRL1_QUEUE6_APR_METER_OFFSET    3
+#define    RTL8367C_SCHEDULE_PORT9_APR_METER_CTRL1_QUEUE6_APR_METER_MASK    0x38
+#define    RTL8367C_SCHEDULE_PORT9_APR_METER_CTRL1_QUEUE5_APR_METER_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT9_APR_METER_CTRL1_QUEUE5_APR_METER_MASK    0x7
+
+#define    RTL8367C_REG_SCHEDULE_PORT10_APR_METER_CTRL0    0x03ce
+#define    RTL8367C_SCHEDULE_PORT10_APR_METER_CTRL0_QUEUE4_APR_METER_OFFSET    12
+#define    RTL8367C_SCHEDULE_PORT10_APR_METER_CTRL0_QUEUE4_APR_METER_MASK    0x7000
+#define    RTL8367C_SCHEDULE_PORT10_APR_METER_CTRL0_QUEUE3_APR_METER_OFFSET    9
+#define    RTL8367C_SCHEDULE_PORT10_APR_METER_CTRL0_QUEUE3_APR_METER_MASK    0xE00
+#define    RTL8367C_SCHEDULE_PORT10_APR_METER_CTRL0_QUEUE2_APR_METER_OFFSET    6
+#define    RTL8367C_SCHEDULE_PORT10_APR_METER_CTRL0_QUEUE2_APR_METER_MASK    0x1C0
+#define    RTL8367C_SCHEDULE_PORT10_APR_METER_CTRL0_QUEUE1_APR_METER_OFFSET    3
+#define    RTL8367C_SCHEDULE_PORT10_APR_METER_CTRL0_QUEUE1_APR_METER_MASK    0x38
+#define    RTL8367C_SCHEDULE_PORT10_APR_METER_CTRL0_QUEUE0_APR_METER_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT10_APR_METER_CTRL0_QUEUE0_APR_METER_MASK    0x7
+
+#define    RTL8367C_REG_SCHEDULE_PORT10_APR_METER_CTRL1    0x03cf
+#define    RTL8367C_SCHEDULE_PORT10_APR_METER_CTRL1_QUEUE7_APR_METER_OFFSET    6
+#define    RTL8367C_SCHEDULE_PORT10_APR_METER_CTRL1_QUEUE7_APR_METER_MASK    0x1C0
+#define    RTL8367C_SCHEDULE_PORT10_APR_METER_CTRL1_QUEUE6_APR_METER_OFFSET    3
+#define    RTL8367C_SCHEDULE_PORT10_APR_METER_CTRL1_QUEUE6_APR_METER_MASK    0x38
+#define    RTL8367C_SCHEDULE_PORT10_APR_METER_CTRL1_QUEUE5_APR_METER_OFFSET    0
+#define    RTL8367C_SCHEDULE_PORT10_APR_METER_CTRL1_QUEUE5_APR_METER_MASK    0x7
+
+#define    RTL8367C_REG_LINE_RATE_1G_L    0x03ec
+
+#define    RTL8367C_REG_LINE_RATE_1G_H    0x03ed
+#define    RTL8367C_LINE_RATE_1G_H_OFFSET    0
+#define    RTL8367C_LINE_RATE_1G_H_MASK    0x1
+
+#define    RTL8367C_REG_LINE_RATE_100_L    0x03ee
+
+#define    RTL8367C_REG_LINE_RATE_100_H    0x03ef
+#define    RTL8367C_LINE_RATE_100_H_OFFSET    0
+#define    RTL8367C_LINE_RATE_100_H_MASK    0x1
+
+#define    RTL8367C_REG_LINE_RATE_10_L    0x03f0
+
+#define    RTL8367C_REG_LINE_RATE_10_H    0x03f1
+#define    RTL8367C_LINE_RATE_10_H_OFFSET    0
+#define    RTL8367C_LINE_RATE_10_H_MASK    0x1
+
+#define    RTL8367C_REG_DUMMY_03f2    0x03f2
+
+#define    RTL8367C_REG_DUMMY_03f3    0x03f3
+
+#define    RTL8367C_REG_DUMMY_03f4    0x03f4
+
+#define    RTL8367C_REG_DUMMY_03f5    0x03f5
+
+#define    RTL8367C_REG_DUMMY_03f6    0x03f6
+
+#define    RTL8367C_REG_BYPASS_LINE_RATE    0x03f7
+#define    RTL8367C_BYPASS_PORT10_CONSTRAINT_OFFSET    5
+#define    RTL8367C_BYPASS_PORT10_CONSTRAINT_MASK    0x20
+#define    RTL8367C_BYPASS_PORT9_CONSTRAINT_OFFSET    4
+#define    RTL8367C_BYPASS_PORT9_CONSTRAINT_MASK    0x10
+#define    RTL8367C_BYPASS_PORT8_CONSTRAINT_OFFSET    3
+#define    RTL8367C_BYPASS_PORT8_CONSTRAINT_MASK    0x8
+#define    RTL8367C_BYPASS_PORT7_CONSTRAINT_OFFSET    2
+#define    RTL8367C_BYPASS_PORT7_CONSTRAINT_MASK    0x4
+#define    RTL8367C_BYPASS_PORT6_CONSTRAINT_OFFSET    1
+#define    RTL8367C_BYPASS_PORT6_CONSTRAINT_MASK    0x2
+#define    RTL8367C_BYPASS_PORT5_CONSTRAINT_OFFSET    0
+#define    RTL8367C_BYPASS_PORT5_CONSTRAINT_MASK    0x1
+
+#define    RTL8367C_REG_LINE_RATE_500_H    0x03f8
+#define    RTL8367C_LINE_RATE_500_H_OFFSET    0
+#define    RTL8367C_LINE_RATE_500_H_MASK    0x7
+
+#define    RTL8367C_REG_LINE_RATE_500_L    0x03f9
+
+#define    RTL8367C_REG_LINE_RATE_HSG_H    0x03fa
+#define    RTL8367C_LINE_RATE_HSG_H_OFFSET    0
+#define    RTL8367C_LINE_RATE_HSG_H_MASK    0x7
+
+#define    RTL8367C_REG_LINE_RATE_HSG_L    0x03fb
+
+/* (16'h0500)table_reg */
+
+#define    RTL8367C_REG_TABLE_ACCESS_CTRL    0x0500
+#define    RTL8367C_TABLE_ACCESS_CTRL_SPA_OFFSET    8
+#define    RTL8367C_TABLE_ACCESS_CTRL_SPA_MASK    0xF00
+#define    RTL8367C_ACCESS_METHOD_OFFSET    4
+#define    RTL8367C_ACCESS_METHOD_MASK    0x70
+#define    RTL8367C_COMMAND_TYPE_OFFSET    3
+#define    RTL8367C_COMMAND_TYPE_MASK    0x8
+#define    RTL8367C_TABLE_TYPE_OFFSET    0
+#define    RTL8367C_TABLE_TYPE_MASK    0x7
+
+#define    RTL8367C_REG_TABLE_ACCESS_ADDR    0x0501
+#define    RTL8367C_TABLE_ACCESS_ADDR_OFFSET    0
+#define    RTL8367C_TABLE_ACCESS_ADDR_MASK    0x1FFF
+
+#define    RTL8367C_REG_TABLE_LUT_ADDR    0x0502
+#define    RTL8367C_ADDRESS2_OFFSET    14
+#define    RTL8367C_ADDRESS2_MASK    0x4000
+#define    RTL8367C_TABLE_LUT_ADDR_BUSY_FLAG_OFFSET    13
+#define    RTL8367C_TABLE_LUT_ADDR_BUSY_FLAG_MASK    0x2000
+#define    RTL8367C_HIT_STATUS_OFFSET    12
+#define    RTL8367C_HIT_STATUS_MASK    0x1000
+#define    RTL8367C_TABLE_LUT_ADDR_TYPE_OFFSET    11
+#define    RTL8367C_TABLE_LUT_ADDR_TYPE_MASK    0x800
+#define    RTL8367C_TABLE_LUT_ADDR_ADDRESS_OFFSET    0
+#define    RTL8367C_TABLE_LUT_ADDR_ADDRESS_MASK    0x7FF
+
+#define    RTL8367C_REG_HSA_HSB_LATCH    0x0503
+#define    RTL8367C_LATCH_ALWAYS_OFFSET    15
+#define    RTL8367C_LATCH_ALWAYS_MASK    0x8000
+#define    RTL8367C_LATCH_FIRST_OFFSET    14
+#define    RTL8367C_LATCH_FIRST_MASK    0x4000
+#define    RTL8367C_SPA_EN_OFFSET    13
+#define    RTL8367C_SPA_EN_MASK    0x2000
+#define    RTL8367C_FORWARD_EN_OFFSET    12
+#define    RTL8367C_FORWARD_EN_MASK    0x1000
+#define    RTL8367C_REASON_EN_OFFSET    11
+#define    RTL8367C_REASON_EN_MASK    0x800
+#define    RTL8367C_HSA_HSB_LATCH_SPA_OFFSET    8
+#define    RTL8367C_HSA_HSB_LATCH_SPA_MASK    0x700
+#define    RTL8367C_FORWARD_OFFSET    6
+#define    RTL8367C_FORWARD_MASK    0xC0
+#define    RTL8367C_REASON_OFFSET    0
+#define    RTL8367C_REASON_MASK    0x3F
+
+#define    RTL8367C_REG_HSA_HSB_LATCH2    0x0504
+#define    RTL8367C_HSA_HSB_LATCH2_Reserved_OFFSET    1
+#define    RTL8367C_HSA_HSB_LATCH2_Reserved_MASK    0xFFFE
+#define    RTL8367C_SPA2_OFFSET    0
+#define    RTL8367C_SPA2_MASK    0x1
+
+#define    RTL8367C_REG_TABLE_WRITE_DATA0    0x0510
+
+#define    RTL8367C_REG_TABLE_WRITE_DATA1    0x0511
+
+#define    RTL8367C_REG_TABLE_WRITE_DATA2    0x0512
+
+#define    RTL8367C_REG_TABLE_WRITE_DATA3    0x0513
+
+#define    RTL8367C_REG_TABLE_WRITE_DATA4    0x0514
+
+#define    RTL8367C_REG_TABLE_WRITE_DATA5    0x0515
+
+#define    RTL8367C_REG_TABLE_WRITE_DATA6    0x0516
+
+#define    RTL8367C_REG_TABLE_WRITE_DATA7    0x0517
+
+#define    RTL8367C_REG_TABLE_WRITE_DATA8    0x0518
+
+#define    RTL8367C_REG_TABLE_WRITE_DATA9    0x0519
+#define    RTL8367C_TABLE_WRITE_DATA9_OFFSET    0
+#define    RTL8367C_TABLE_WRITE_DATA9_MASK    0xF
+
+#define    RTL8367C_REG_TABLE_READ_DATA0    0x0520
+
+#define    RTL8367C_REG_TABLE_READ_DATA1    0x0521
+
+#define    RTL8367C_REG_TABLE_READ_DATA2    0x0522
+
+#define    RTL8367C_REG_TABLE_READ_DATA3    0x0523
+
+#define    RTL8367C_REG_TABLE_READ_DATA4    0x0524
+
+#define    RTL8367C_REG_TABLE_READ_DATA5    0x0525
+
+#define    RTL8367C_REG_TABLE_READ_DATA6    0x0526
+
+#define    RTL8367C_REG_TABLE_READ_DATA7    0x0527
+
+#define    RTL8367C_REG_TABLE_READ_DATA8    0x0528
+
+#define    RTL8367C_REG_TABLE_READ_DATA9    0x0529
+#define    RTL8367C_TABLE_READ_DATA9_OFFSET    0
+#define    RTL8367C_TABLE_READ_DATA9_MASK    0xF
+
+#define    RTL8367C_REG_TBL_DUMMY00    0x0550
+
+#define    RTL8367C_REG_TBL_DUMMY01    0x0551
+
+/* (16'h0600)acl_reg */
+
+#define    RTL8367C_REG_ACL_RULE_TEMPLATE0_CTRL0    0x0600
+#define    RTL8367C_ACL_RULE_TEMPLATE0_CTRL0_FIELD1_OFFSET    8
+#define    RTL8367C_ACL_RULE_TEMPLATE0_CTRL0_FIELD1_MASK    0x7F00
+#define    RTL8367C_ACL_RULE_TEMPLATE0_CTRL0_FIELD0_OFFSET    0
+#define    RTL8367C_ACL_RULE_TEMPLATE0_CTRL0_FIELD0_MASK    0x7F
+
+#define    RTL8367C_REG_ACL_RULE_TEMPLATE0_CTRL1    0x0601
+#define    RTL8367C_ACL_RULE_TEMPLATE0_CTRL1_FIELD3_OFFSET    8
+#define    RTL8367C_ACL_RULE_TEMPLATE0_CTRL1_FIELD3_MASK    0x7F00
+#define    RTL8367C_ACL_RULE_TEMPLATE0_CTRL1_FIELD2_OFFSET    0
+#define    RTL8367C_ACL_RULE_TEMPLATE0_CTRL1_FIELD2_MASK    0x7F
+
+#define    RTL8367C_REG_ACL_RULE_TEMPLATE0_CTRL2    0x0602
+#define    RTL8367C_ACL_RULE_TEMPLATE0_CTRL2_FIELD5_OFFSET    8
+#define    RTL8367C_ACL_RULE_TEMPLATE0_CTRL2_FIELD5_MASK    0x7F00
+#define    RTL8367C_ACL_RULE_TEMPLATE0_CTRL2_FIELD4_OFFSET    0
+#define    RTL8367C_ACL_RULE_TEMPLATE0_CTRL2_FIELD4_MASK    0x7F
+
+#define    RTL8367C_REG_ACL_RULE_TEMPLATE0_CTRL3    0x0603
+#define    RTL8367C_ACL_RULE_TEMPLATE0_CTRL3_FIELD7_OFFSET    8
+#define    RTL8367C_ACL_RULE_TEMPLATE0_CTRL3_FIELD7_MASK    0x7F00
+#define    RTL8367C_ACL_RULE_TEMPLATE0_CTRL3_FIELD6_OFFSET    0
+#define    RTL8367C_ACL_RULE_TEMPLATE0_CTRL3_FIELD6_MASK    0x7F
+
+#define    RTL8367C_REG_ACL_RULE_TEMPLATE1_CTRL0    0x0604
+#define    RTL8367C_ACL_RULE_TEMPLATE1_CTRL0_FIELD1_OFFSET    8
+#define    RTL8367C_ACL_RULE_TEMPLATE1_CTRL0_FIELD1_MASK    0x7F00
+#define    RTL8367C_ACL_RULE_TEMPLATE1_CTRL0_FIELD0_OFFSET    0
+#define    RTL8367C_ACL_RULE_TEMPLATE1_CTRL0_FIELD0_MASK    0x7F
+
+#define    RTL8367C_REG_ACL_RULE_TEMPLATE1_CTRL1    0x0605
+#define    RTL8367C_ACL_RULE_TEMPLATE1_CTRL1_FIELD3_OFFSET    8
+#define    RTL8367C_ACL_RULE_TEMPLATE1_CTRL1_FIELD3_MASK    0x7F00
+#define    RTL8367C_ACL_RULE_TEMPLATE1_CTRL1_FIELD2_OFFSET    0
+#define    RTL8367C_ACL_RULE_TEMPLATE1_CTRL1_FIELD2_MASK    0x7F
+
+#define    RTL8367C_REG_ACL_RULE_TEMPLATE1_CTRL2    0x0606
+#define    RTL8367C_ACL_RULE_TEMPLATE1_CTRL2_FIELD5_OFFSET    8
+#define    RTL8367C_ACL_RULE_TEMPLATE1_CTRL2_FIELD5_MASK    0x7F00
+#define    RTL8367C_ACL_RULE_TEMPLATE1_CTRL2_FIELD4_OFFSET    0
+#define    RTL8367C_ACL_RULE_TEMPLATE1_CTRL2_FIELD4_MASK    0x7F
+
+#define    RTL8367C_REG_ACL_RULE_TEMPLATE1_CTRL3    0x0607
+#define    RTL8367C_ACL_RULE_TEMPLATE1_CTRL3_FIELD7_OFFSET    8
+#define    RTL8367C_ACL_RULE_TEMPLATE1_CTRL3_FIELD7_MASK    0x7F00
+#define    RTL8367C_ACL_RULE_TEMPLATE1_CTRL3_FIELD6_OFFSET    0
+#define    RTL8367C_ACL_RULE_TEMPLATE1_CTRL3_FIELD6_MASK    0x7F
+
+#define    RTL8367C_REG_ACL_RULE_TEMPLATE2_CTRL0    0x0608
+#define    RTL8367C_ACL_RULE_TEMPLATE2_CTRL0_FIELD1_OFFSET    8
+#define    RTL8367C_ACL_RULE_TEMPLATE2_CTRL0_FIELD1_MASK    0x7F00
+#define    RTL8367C_ACL_RULE_TEMPLATE2_CTRL0_FIELD0_OFFSET    0
+#define    RTL8367C_ACL_RULE_TEMPLATE2_CTRL0_FIELD0_MASK    0x7F
+
+#define    RTL8367C_REG_ACL_RULE_TEMPLATE2_CTRL1    0x0609
+#define    RTL8367C_ACL_RULE_TEMPLATE2_CTRL1_FIELD3_OFFSET    8
+#define    RTL8367C_ACL_RULE_TEMPLATE2_CTRL1_FIELD3_MASK    0x7F00
+#define    RTL8367C_ACL_RULE_TEMPLATE2_CTRL1_FIELD2_OFFSET    0
+#define    RTL8367C_ACL_RULE_TEMPLATE2_CTRL1_FIELD2_MASK    0x7F
+
+#define    RTL8367C_REG_ACL_RULE_TEMPLATE2_CTRL2    0x060a
+#define    RTL8367C_ACL_RULE_TEMPLATE2_CTRL2_FIELD5_OFFSET    8
+#define    RTL8367C_ACL_RULE_TEMPLATE2_CTRL2_FIELD5_MASK    0x7F00
+#define    RTL8367C_ACL_RULE_TEMPLATE2_CTRL2_FIELD4_OFFSET    0
+#define    RTL8367C_ACL_RULE_TEMPLATE2_CTRL2_FIELD4_MASK    0x7F
+
+#define    RTL8367C_REG_ACL_RULE_TEMPLATE2_CTRL3    0x060b
+#define    RTL8367C_ACL_RULE_TEMPLATE2_CTRL3_FIELD7_OFFSET    8
+#define    RTL8367C_ACL_RULE_TEMPLATE2_CTRL3_FIELD7_MASK    0x7F00
+#define    RTL8367C_ACL_RULE_TEMPLATE2_CTRL3_FIELD6_OFFSET    0
+#define    RTL8367C_ACL_RULE_TEMPLATE2_CTRL3_FIELD6_MASK    0x7F
+
+#define    RTL8367C_REG_ACL_RULE_TEMPLATE3_CTRL0    0x060c
+#define    RTL8367C_ACL_RULE_TEMPLATE3_CTRL0_FIELD1_OFFSET    8
+#define    RTL8367C_ACL_RULE_TEMPLATE3_CTRL0_FIELD1_MASK    0x7F00
+#define    RTL8367C_ACL_RULE_TEMPLATE3_CTRL0_FIELD0_OFFSET    0
+#define    RTL8367C_ACL_RULE_TEMPLATE3_CTRL0_FIELD0_MASK    0x7F
+
+#define    RTL8367C_REG_ACL_RULE_TEMPLATE3_CTRL1    0x060d
+#define    RTL8367C_ACL_RULE_TEMPLATE3_CTRL1_FIELD3_OFFSET    8
+#define    RTL8367C_ACL_RULE_TEMPLATE3_CTRL1_FIELD3_MASK    0x7F00
+#define    RTL8367C_ACL_RULE_TEMPLATE3_CTRL1_FIELD2_OFFSET    0
+#define    RTL8367C_ACL_RULE_TEMPLATE3_CTRL1_FIELD2_MASK    0x7F
+
+#define    RTL8367C_REG_ACL_RULE_TEMPLATE3_CTRL2    0x060e
+#define    RTL8367C_ACL_RULE_TEMPLATE3_CTRL2_FIELD5_OFFSET    8
+#define    RTL8367C_ACL_RULE_TEMPLATE3_CTRL2_FIELD5_MASK    0x7F00
+#define    RTL8367C_ACL_RULE_TEMPLATE3_CTRL2_FIELD4_OFFSET    0
+#define    RTL8367C_ACL_RULE_TEMPLATE3_CTRL2_FIELD4_MASK    0x7F
+
+#define    RTL8367C_REG_ACL_RULE_TEMPLATE3_CTRL3    0x060f
+#define    RTL8367C_ACL_RULE_TEMPLATE3_CTRL3_FIELD7_OFFSET    8
+#define    RTL8367C_ACL_RULE_TEMPLATE3_CTRL3_FIELD7_MASK    0x7F00
+#define    RTL8367C_ACL_RULE_TEMPLATE3_CTRL3_FIELD6_OFFSET    0
+#define    RTL8367C_ACL_RULE_TEMPLATE3_CTRL3_FIELD6_MASK    0x7F
+
+#define    RTL8367C_REG_ACL_RULE_TEMPLATE4_CTRL0    0x0610
+#define    RTL8367C_ACL_RULE_TEMPLATE4_CTRL0_FIELD1_OFFSET    8
+#define    RTL8367C_ACL_RULE_TEMPLATE4_CTRL0_FIELD1_MASK    0x7F00
+#define    RTL8367C_ACL_RULE_TEMPLATE4_CTRL0_FIELD0_OFFSET    0
+#define    RTL8367C_ACL_RULE_TEMPLATE4_CTRL0_FIELD0_MASK    0x7F
+
+#define    RTL8367C_REG_ACL_RULE_TEMPLATE4_CTRL1    0x0611
+#define    RTL8367C_ACL_RULE_TEMPLATE4_CTRL1_FIELD3_OFFSET    8
+#define    RTL8367C_ACL_RULE_TEMPLATE4_CTRL1_FIELD3_MASK    0x7F00
+#define    RTL8367C_ACL_RULE_TEMPLATE4_CTRL1_FIELD2_OFFSET    0
+#define    RTL8367C_ACL_RULE_TEMPLATE4_CTRL1_FIELD2_MASK    0x7F
+
+#define    RTL8367C_REG_ACL_RULE_TEMPLATE4_CTRL2    0x0612
+#define    RTL8367C_ACL_RULE_TEMPLATE4_CTRL2_FIELD5_OFFSET    8
+#define    RTL8367C_ACL_RULE_TEMPLATE4_CTRL2_FIELD5_MASK    0x7F00
+#define    RTL8367C_ACL_RULE_TEMPLATE4_CTRL2_FIELD4_OFFSET    0
+#define    RTL8367C_ACL_RULE_TEMPLATE4_CTRL2_FIELD4_MASK    0x7F
+
+#define    RTL8367C_REG_ACL_RULE_TEMPLATE4_CTRL3    0x0613
+#define    RTL8367C_ACL_RULE_TEMPLATE4_CTRL3_FIELD7_OFFSET    8
+#define    RTL8367C_ACL_RULE_TEMPLATE4_CTRL3_FIELD7_MASK    0x7F00
+#define    RTL8367C_ACL_RULE_TEMPLATE4_CTRL3_FIELD6_OFFSET    0
+#define    RTL8367C_ACL_RULE_TEMPLATE4_CTRL3_FIELD6_MASK    0x7F
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL0    0x0614
+#define    RTL8367C_OP1_NOT_OFFSET    14
+#define    RTL8367C_OP1_NOT_MASK    0x4000
+#define    RTL8367C_ACT1_GPIO_OFFSET    13
+#define    RTL8367C_ACT1_GPIO_MASK    0x2000
+#define    RTL8367C_ACT1_FORWARD_OFFSET    12
+#define    RTL8367C_ACT1_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT1_POLICING_OFFSET    11
+#define    RTL8367C_ACT1_POLICING_MASK    0x800
+#define    RTL8367C_ACT1_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT1_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT1_SVID_OFFSET    9
+#define    RTL8367C_ACT1_SVID_MASK    0x200
+#define    RTL8367C_ACT1_CVID_OFFSET    8
+#define    RTL8367C_ACT1_CVID_MASK    0x100
+#define    RTL8367C_OP0_NOT_OFFSET    6
+#define    RTL8367C_OP0_NOT_MASK    0x40
+#define    RTL8367C_ACT0_GPIO_OFFSET    5
+#define    RTL8367C_ACT0_GPIO_MASK    0x20
+#define    RTL8367C_ACT0_FORWARD_OFFSET    4
+#define    RTL8367C_ACT0_FORWARD_MASK    0x10
+#define    RTL8367C_ACT0_POLICING_OFFSET    3
+#define    RTL8367C_ACT0_POLICING_MASK    0x8
+#define    RTL8367C_ACT0_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT0_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT0_SVID_OFFSET    1
+#define    RTL8367C_ACT0_SVID_MASK    0x2
+#define    RTL8367C_ACT0_CVID_OFFSET    0
+#define    RTL8367C_ACT0_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL1    0x0615
+#define    RTL8367C_OP3_NOT_OFFSET    14
+#define    RTL8367C_OP3_NOT_MASK    0x4000
+#define    RTL8367C_ACT3_GPIO_OFFSET    13
+#define    RTL8367C_ACT3_GPIO_MASK    0x2000
+#define    RTL8367C_ACT3_FORWARD_OFFSET    12
+#define    RTL8367C_ACT3_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT3_POLICING_OFFSET    11
+#define    RTL8367C_ACT3_POLICING_MASK    0x800
+#define    RTL8367C_ACT3_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT3_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT3_SVID_OFFSET    9
+#define    RTL8367C_ACT3_SVID_MASK    0x200
+#define    RTL8367C_ACT3_CVID_OFFSET    8
+#define    RTL8367C_ACT3_CVID_MASK    0x100
+#define    RTL8367C_OP2_NOT_OFFSET    6
+#define    RTL8367C_OP2_NOT_MASK    0x40
+#define    RTL8367C_ACT2_GPIO_OFFSET    5
+#define    RTL8367C_ACT2_GPIO_MASK    0x20
+#define    RTL8367C_ACT2_FORWARD_OFFSET    4
+#define    RTL8367C_ACT2_FORWARD_MASK    0x10
+#define    RTL8367C_ACT2_POLICING_OFFSET    3
+#define    RTL8367C_ACT2_POLICING_MASK    0x8
+#define    RTL8367C_ACT2_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT2_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT2_SVID_OFFSET    1
+#define    RTL8367C_ACT2_SVID_MASK    0x2
+#define    RTL8367C_ACT2_CVID_OFFSET    0
+#define    RTL8367C_ACT2_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL2    0x0616
+#define    RTL8367C_OP5_NOT_OFFSET    14
+#define    RTL8367C_OP5_NOT_MASK    0x4000
+#define    RTL8367C_ACT5_GPIO_OFFSET    13
+#define    RTL8367C_ACT5_GPIO_MASK    0x2000
+#define    RTL8367C_ACT5_FORWARD_OFFSET    12
+#define    RTL8367C_ACT5_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT5_POLICING_OFFSET    11
+#define    RTL8367C_ACT5_POLICING_MASK    0x800
+#define    RTL8367C_ACT5_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT5_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT5_SVID_OFFSET    9
+#define    RTL8367C_ACT5_SVID_MASK    0x200
+#define    RTL8367C_ACT5_CVID_OFFSET    8
+#define    RTL8367C_ACT5_CVID_MASK    0x100
+#define    RTL8367C_OP4_NOT_OFFSET    6
+#define    RTL8367C_OP4_NOT_MASK    0x40
+#define    RTL8367C_ACT4_GPIO_OFFSET    5
+#define    RTL8367C_ACT4_GPIO_MASK    0x20
+#define    RTL8367C_ACT4_FORWARD_OFFSET    4
+#define    RTL8367C_ACT4_FORWARD_MASK    0x10
+#define    RTL8367C_ACT4_POLICING_OFFSET    3
+#define    RTL8367C_ACT4_POLICING_MASK    0x8
+#define    RTL8367C_ACT4_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT4_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT4_SVID_OFFSET    1
+#define    RTL8367C_ACT4_SVID_MASK    0x2
+#define    RTL8367C_ACT4_CVID_OFFSET    0
+#define    RTL8367C_ACT4_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL3    0x0617
+#define    RTL8367C_OP7_NOT_OFFSET    14
+#define    RTL8367C_OP7_NOT_MASK    0x4000
+#define    RTL8367C_ACT7_GPIO_OFFSET    13
+#define    RTL8367C_ACT7_GPIO_MASK    0x2000
+#define    RTL8367C_ACT7_FORWARD_OFFSET    12
+#define    RTL8367C_ACT7_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT7_POLICING_OFFSET    11
+#define    RTL8367C_ACT7_POLICING_MASK    0x800
+#define    RTL8367C_ACT7_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT7_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT7_SVID_OFFSET    9
+#define    RTL8367C_ACT7_SVID_MASK    0x200
+#define    RTL8367C_ACT7_CVID_OFFSET    8
+#define    RTL8367C_ACT7_CVID_MASK    0x100
+#define    RTL8367C_OP6_NOT_OFFSET    6
+#define    RTL8367C_OP6_NOT_MASK    0x40
+#define    RTL8367C_ACT6_GPIO_OFFSET    5
+#define    RTL8367C_ACT6_GPIO_MASK    0x20
+#define    RTL8367C_ACT6_FORWARD_OFFSET    4
+#define    RTL8367C_ACT6_FORWARD_MASK    0x10
+#define    RTL8367C_ACT6_POLICING_OFFSET    3
+#define    RTL8367C_ACT6_POLICING_MASK    0x8
+#define    RTL8367C_ACT6_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT6_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT6_SVID_OFFSET    1
+#define    RTL8367C_ACT6_SVID_MASK    0x2
+#define    RTL8367C_ACT6_CVID_OFFSET    0
+#define    RTL8367C_ACT6_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL4    0x0618
+#define    RTL8367C_OP9_NOT_OFFSET    14
+#define    RTL8367C_OP9_NOT_MASK    0x4000
+#define    RTL8367C_ACT9_GPIO_OFFSET    13
+#define    RTL8367C_ACT9_GPIO_MASK    0x2000
+#define    RTL8367C_ACT9_FORWARD_OFFSET    12
+#define    RTL8367C_ACT9_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT9_POLICING_OFFSET    11
+#define    RTL8367C_ACT9_POLICING_MASK    0x800
+#define    RTL8367C_ACT9_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT9_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT9_SVID_OFFSET    9
+#define    RTL8367C_ACT9_SVID_MASK    0x200
+#define    RTL8367C_ACT9_CVID_OFFSET    8
+#define    RTL8367C_ACT9_CVID_MASK    0x100
+#define    RTL8367C_OP8_NOT_OFFSET    6
+#define    RTL8367C_OP8_NOT_MASK    0x40
+#define    RTL8367C_ACT8_GPIO_OFFSET    5
+#define    RTL8367C_ACT8_GPIO_MASK    0x20
+#define    RTL8367C_ACT8_FORWARD_OFFSET    4
+#define    RTL8367C_ACT8_FORWARD_MASK    0x10
+#define    RTL8367C_ACT8_POLICING_OFFSET    3
+#define    RTL8367C_ACT8_POLICING_MASK    0x8
+#define    RTL8367C_ACT8_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT8_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT8_SVID_OFFSET    1
+#define    RTL8367C_ACT8_SVID_MASK    0x2
+#define    RTL8367C_ACT8_CVID_OFFSET    0
+#define    RTL8367C_ACT8_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL5    0x0619
+#define    RTL8367C_OP11_NOT_OFFSET    14
+#define    RTL8367C_OP11_NOT_MASK    0x4000
+#define    RTL8367C_ACT11_GPIO_OFFSET    13
+#define    RTL8367C_ACT11_GPIO_MASK    0x2000
+#define    RTL8367C_ACT11_FORWARD_OFFSET    12
+#define    RTL8367C_ACT11_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT11_POLICING_OFFSET    11
+#define    RTL8367C_ACT11_POLICING_MASK    0x800
+#define    RTL8367C_ACT11_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT11_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT11_SVID_OFFSET    9
+#define    RTL8367C_ACT11_SVID_MASK    0x200
+#define    RTL8367C_ACT11_CVID_OFFSET    8
+#define    RTL8367C_ACT11_CVID_MASK    0x100
+#define    RTL8367C_OP10_NOT_OFFSET    6
+#define    RTL8367C_OP10_NOT_MASK    0x40
+#define    RTL8367C_ACT10_GPIO_OFFSET    5
+#define    RTL8367C_ACT10_GPIO_MASK    0x20
+#define    RTL8367C_ACT10_FORWARD_OFFSET    4
+#define    RTL8367C_ACT10_FORWARD_MASK    0x10
+#define    RTL8367C_ACT10_POLICING_OFFSET    3
+#define    RTL8367C_ACT10_POLICING_MASK    0x8
+#define    RTL8367C_ACT10_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT10_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT10_SVID_OFFSET    1
+#define    RTL8367C_ACT10_SVID_MASK    0x2
+#define    RTL8367C_ACT10_CVID_OFFSET    0
+#define    RTL8367C_ACT10_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL6    0x061a
+#define    RTL8367C_OP13_NOT_OFFSET    14
+#define    RTL8367C_OP13_NOT_MASK    0x4000
+#define    RTL8367C_ACT13_GPIO_OFFSET    13
+#define    RTL8367C_ACT13_GPIO_MASK    0x2000
+#define    RTL8367C_ACT13_FORWARD_OFFSET    12
+#define    RTL8367C_ACT13_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT13_POLICING_OFFSET    11
+#define    RTL8367C_ACT13_POLICING_MASK    0x800
+#define    RTL8367C_ACT13_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT13_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT13_SVID_OFFSET    9
+#define    RTL8367C_ACT13_SVID_MASK    0x200
+#define    RTL8367C_ACT13_CVID_OFFSET    8
+#define    RTL8367C_ACT13_CVID_MASK    0x100
+#define    RTL8367C_OP12_NOT_OFFSET    6
+#define    RTL8367C_OP12_NOT_MASK    0x40
+#define    RTL8367C_ACT12_GPIO_OFFSET    5
+#define    RTL8367C_ACT12_GPIO_MASK    0x20
+#define    RTL8367C_ACT12_FORWARD_OFFSET    4
+#define    RTL8367C_ACT12_FORWARD_MASK    0x10
+#define    RTL8367C_ACT12_POLICING_OFFSET    3
+#define    RTL8367C_ACT12_POLICING_MASK    0x8
+#define    RTL8367C_ACT12_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT12_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT12_SVID_OFFSET    1
+#define    RTL8367C_ACT12_SVID_MASK    0x2
+#define    RTL8367C_ACT12_CVID_OFFSET    0
+#define    RTL8367C_ACT12_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL7    0x061b
+#define    RTL8367C_OP15_NOT_OFFSET    14
+#define    RTL8367C_OP15_NOT_MASK    0x4000
+#define    RTL8367C_ACT15_GPIO_OFFSET    13
+#define    RTL8367C_ACT15_GPIO_MASK    0x2000
+#define    RTL8367C_ACT15_FORWARD_OFFSET    12
+#define    RTL8367C_ACT15_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT15_POLICING_OFFSET    11
+#define    RTL8367C_ACT15_POLICING_MASK    0x800
+#define    RTL8367C_ACT15_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT15_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT15_SVID_OFFSET    9
+#define    RTL8367C_ACT15_SVID_MASK    0x200
+#define    RTL8367C_ACT15_CVID_OFFSET    8
+#define    RTL8367C_ACT15_CVID_MASK    0x100
+#define    RTL8367C_OP14_NOT_OFFSET    6
+#define    RTL8367C_OP14_NOT_MASK    0x40
+#define    RTL8367C_ACT14_GPIO_OFFSET    5
+#define    RTL8367C_ACT14_GPIO_MASK    0x20
+#define    RTL8367C_ACT14_FORWARD_OFFSET    4
+#define    RTL8367C_ACT14_FORWARD_MASK    0x10
+#define    RTL8367C_ACT14_POLICING_OFFSET    3
+#define    RTL8367C_ACT14_POLICING_MASK    0x8
+#define    RTL8367C_ACT14_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT14_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT14_SVID_OFFSET    1
+#define    RTL8367C_ACT14_SVID_MASK    0x2
+#define    RTL8367C_ACT14_CVID_OFFSET    0
+#define    RTL8367C_ACT14_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL8    0x061c
+#define    RTL8367C_OP17_NOT_OFFSET    14
+#define    RTL8367C_OP17_NOT_MASK    0x4000
+#define    RTL8367C_ACT17_GPIO_OFFSET    13
+#define    RTL8367C_ACT17_GPIO_MASK    0x2000
+#define    RTL8367C_ACT17_FORWARD_OFFSET    12
+#define    RTL8367C_ACT17_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT17_POLICING_OFFSET    11
+#define    RTL8367C_ACT17_POLICING_MASK    0x800
+#define    RTL8367C_ACT17_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT17_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT17_SVID_OFFSET    9
+#define    RTL8367C_ACT17_SVID_MASK    0x200
+#define    RTL8367C_ACT17_CVID_OFFSET    8
+#define    RTL8367C_ACT17_CVID_MASK    0x100
+#define    RTL8367C_OP16_NOT_OFFSET    6
+#define    RTL8367C_OP16_NOT_MASK    0x40
+#define    RTL8367C_ACT16_GPIO_OFFSET    5
+#define    RTL8367C_ACT16_GPIO_MASK    0x20
+#define    RTL8367C_ACT16_FORWARD_OFFSET    4
+#define    RTL8367C_ACT16_FORWARD_MASK    0x10
+#define    RTL8367C_ACT16_POLICING_OFFSET    3
+#define    RTL8367C_ACT16_POLICING_MASK    0x8
+#define    RTL8367C_ACT16_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT16_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT16_SVID_OFFSET    1
+#define    RTL8367C_ACT16_SVID_MASK    0x2
+#define    RTL8367C_ACT16_CVID_OFFSET    0
+#define    RTL8367C_ACT16_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL9    0x061d
+#define    RTL8367C_OP19_NOT_OFFSET    14
+#define    RTL8367C_OP19_NOT_MASK    0x4000
+#define    RTL8367C_ACT19_GPIO_OFFSET    13
+#define    RTL8367C_ACT19_GPIO_MASK    0x2000
+#define    RTL8367C_ACT19_FORWARD_OFFSET    12
+#define    RTL8367C_ACT19_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT19_POLICING_OFFSET    11
+#define    RTL8367C_ACT19_POLICING_MASK    0x800
+#define    RTL8367C_ACT19_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT19_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT19_SVID_OFFSET    9
+#define    RTL8367C_ACT19_SVID_MASK    0x200
+#define    RTL8367C_ACT19_CVID_OFFSET    8
+#define    RTL8367C_ACT19_CVID_MASK    0x100
+#define    RTL8367C_OP18_NOT_OFFSET    6
+#define    RTL8367C_OP18_NOT_MASK    0x40
+#define    RTL8367C_ACT18_GPIO_OFFSET    5
+#define    RTL8367C_ACT18_GPIO_MASK    0x20
+#define    RTL8367C_ACT18_FORWARD_OFFSET    4
+#define    RTL8367C_ACT18_FORWARD_MASK    0x10
+#define    RTL8367C_ACT18_POLICING_OFFSET    3
+#define    RTL8367C_ACT18_POLICING_MASK    0x8
+#define    RTL8367C_ACT18_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT18_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT18_SVID_OFFSET    1
+#define    RTL8367C_ACT18_SVID_MASK    0x2
+#define    RTL8367C_ACT18_CVID_OFFSET    0
+#define    RTL8367C_ACT18_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL10    0x061e
+#define    RTL8367C_OP21_NOT_OFFSET    14
+#define    RTL8367C_OP21_NOT_MASK    0x4000
+#define    RTL8367C_ACT21_GPIO_OFFSET    13
+#define    RTL8367C_ACT21_GPIO_MASK    0x2000
+#define    RTL8367C_ACT21_FORWARD_OFFSET    12
+#define    RTL8367C_ACT21_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT21_POLICING_OFFSET    11
+#define    RTL8367C_ACT21_POLICING_MASK    0x800
+#define    RTL8367C_ACT21_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT21_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT21_SVID_OFFSET    9
+#define    RTL8367C_ACT21_SVID_MASK    0x200
+#define    RTL8367C_ACT21_CVID_OFFSET    8
+#define    RTL8367C_ACT21_CVID_MASK    0x100
+#define    RTL8367C_OP20_NOT_OFFSET    6
+#define    RTL8367C_OP20_NOT_MASK    0x40
+#define    RTL8367C_ACT20_GPIO_OFFSET    5
+#define    RTL8367C_ACT20_GPIO_MASK    0x20
+#define    RTL8367C_ACT20_FORWARD_OFFSET    4
+#define    RTL8367C_ACT20_FORWARD_MASK    0x10
+#define    RTL8367C_ACT20_POLICING_OFFSET    3
+#define    RTL8367C_ACT20_POLICING_MASK    0x8
+#define    RTL8367C_ACT20_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT20_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT20_SVID_OFFSET    1
+#define    RTL8367C_ACT20_SVID_MASK    0x2
+#define    RTL8367C_ACT20_CVID_OFFSET    0
+#define    RTL8367C_ACT20_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL11    0x061f
+#define    RTL8367C_OP23_NOT_OFFSET    14
+#define    RTL8367C_OP23_NOT_MASK    0x4000
+#define    RTL8367C_ACT23_GPIO_OFFSET    13
+#define    RTL8367C_ACT23_GPIO_MASK    0x2000
+#define    RTL8367C_ACT23_FORWARD_OFFSET    12
+#define    RTL8367C_ACT23_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT23_POLICING_OFFSET    11
+#define    RTL8367C_ACT23_POLICING_MASK    0x800
+#define    RTL8367C_ACT23_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT23_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT23_SVID_OFFSET    9
+#define    RTL8367C_ACT23_SVID_MASK    0x200
+#define    RTL8367C_ACT23_CVID_OFFSET    8
+#define    RTL8367C_ACT23_CVID_MASK    0x100
+#define    RTL8367C_OP22_NOT_OFFSET    6
+#define    RTL8367C_OP22_NOT_MASK    0x40
+#define    RTL8367C_ACT22_GPIO_OFFSET    5
+#define    RTL8367C_ACT22_GPIO_MASK    0x20
+#define    RTL8367C_ACT22_FORWARD_OFFSET    4
+#define    RTL8367C_ACT22_FORWARD_MASK    0x10
+#define    RTL8367C_ACT22_POLICING_OFFSET    3
+#define    RTL8367C_ACT22_POLICING_MASK    0x8
+#define    RTL8367C_ACT22_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT22_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT22_SVID_OFFSET    1
+#define    RTL8367C_ACT22_SVID_MASK    0x2
+#define    RTL8367C_ACT22_CVID_OFFSET    0
+#define    RTL8367C_ACT22_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL12    0x0620
+#define    RTL8367C_OP25_NOT_OFFSET    14
+#define    RTL8367C_OP25_NOT_MASK    0x4000
+#define    RTL8367C_ACT25_GPIO_OFFSET    13
+#define    RTL8367C_ACT25_GPIO_MASK    0x2000
+#define    RTL8367C_ACT25_FORWARD_OFFSET    12
+#define    RTL8367C_ACT25_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT25_POLICING_OFFSET    11
+#define    RTL8367C_ACT25_POLICING_MASK    0x800
+#define    RTL8367C_ACT25_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT25_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT25_SVID_OFFSET    9
+#define    RTL8367C_ACT25_SVID_MASK    0x200
+#define    RTL8367C_ACT25_CVID_OFFSET    8
+#define    RTL8367C_ACT25_CVID_MASK    0x100
+#define    RTL8367C_OP24_NOT_OFFSET    6
+#define    RTL8367C_OP24_NOT_MASK    0x40
+#define    RTL8367C_ACT24_GPIO_OFFSET    5
+#define    RTL8367C_ACT24_GPIO_MASK    0x20
+#define    RTL8367C_ACT24_FORWARD_OFFSET    4
+#define    RTL8367C_ACT24_FORWARD_MASK    0x10
+#define    RTL8367C_ACT24_POLICING_OFFSET    3
+#define    RTL8367C_ACT24_POLICING_MASK    0x8
+#define    RTL8367C_ACT24_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT24_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT24_SVID_OFFSET    1
+#define    RTL8367C_ACT24_SVID_MASK    0x2
+#define    RTL8367C_ACT24_CVID_OFFSET    0
+#define    RTL8367C_ACT24_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL13    0x0621
+#define    RTL8367C_OP27_NOT_OFFSET    14
+#define    RTL8367C_OP27_NOT_MASK    0x4000
+#define    RTL8367C_ACT27_GPIO_OFFSET    13
+#define    RTL8367C_ACT27_GPIO_MASK    0x2000
+#define    RTL8367C_ACT27_FORWARD_OFFSET    12
+#define    RTL8367C_ACT27_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT27_POLICING_OFFSET    11
+#define    RTL8367C_ACT27_POLICING_MASK    0x800
+#define    RTL8367C_ACT27_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT27_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT27_SVID_OFFSET    9
+#define    RTL8367C_ACT27_SVID_MASK    0x200
+#define    RTL8367C_ACT27_CVID_OFFSET    8
+#define    RTL8367C_ACT27_CVID_MASK    0x100
+#define    RTL8367C_OP26_NOT_OFFSET    6
+#define    RTL8367C_OP26_NOT_MASK    0x40
+#define    RTL8367C_ACT26_GPIO_OFFSET    5
+#define    RTL8367C_ACT26_GPIO_MASK    0x20
+#define    RTL8367C_ACT26_FORWARD_OFFSET    4
+#define    RTL8367C_ACT26_FORWARD_MASK    0x10
+#define    RTL8367C_ACT26_POLICING_OFFSET    3
+#define    RTL8367C_ACT26_POLICING_MASK    0x8
+#define    RTL8367C_ACT26_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT26_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT26_SVID_OFFSET    1
+#define    RTL8367C_ACT26_SVID_MASK    0x2
+#define    RTL8367C_ACT26_CVID_OFFSET    0
+#define    RTL8367C_ACT26_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL14    0x0622
+#define    RTL8367C_OP29_NOT_OFFSET    14
+#define    RTL8367C_OP29_NOT_MASK    0x4000
+#define    RTL8367C_ACT29_GPIO_OFFSET    13
+#define    RTL8367C_ACT29_GPIO_MASK    0x2000
+#define    RTL8367C_ACT29_FORWARD_OFFSET    12
+#define    RTL8367C_ACT29_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT29_POLICING_OFFSET    11
+#define    RTL8367C_ACT29_POLICING_MASK    0x800
+#define    RTL8367C_ACT29_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT29_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT29_SVID_OFFSET    9
+#define    RTL8367C_ACT29_SVID_MASK    0x200
+#define    RTL8367C_ACT29_CVID_OFFSET    8
+#define    RTL8367C_ACT29_CVID_MASK    0x100
+#define    RTL8367C_OP28_NOT_OFFSET    6
+#define    RTL8367C_OP28_NOT_MASK    0x40
+#define    RTL8367C_ACT28_GPIO_OFFSET    5
+#define    RTL8367C_ACT28_GPIO_MASK    0x20
+#define    RTL8367C_ACT28_FORWARD_OFFSET    4
+#define    RTL8367C_ACT28_FORWARD_MASK    0x10
+#define    RTL8367C_ACT28_POLICING_OFFSET    3
+#define    RTL8367C_ACT28_POLICING_MASK    0x8
+#define    RTL8367C_ACT28_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT28_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT28_SVID_OFFSET    1
+#define    RTL8367C_ACT28_SVID_MASK    0x2
+#define    RTL8367C_ACT28_CVID_OFFSET    0
+#define    RTL8367C_ACT28_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL15    0x0623
+#define    RTL8367C_OP31_NOT_OFFSET    14
+#define    RTL8367C_OP31_NOT_MASK    0x4000
+#define    RTL8367C_ACT31_GPIO_OFFSET    13
+#define    RTL8367C_ACT31_GPIO_MASK    0x2000
+#define    RTL8367C_ACT31_FORWARD_OFFSET    12
+#define    RTL8367C_ACT31_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT31_POLICING_OFFSET    11
+#define    RTL8367C_ACT31_POLICING_MASK    0x800
+#define    RTL8367C_ACT31_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT31_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT31_SVID_OFFSET    9
+#define    RTL8367C_ACT31_SVID_MASK    0x200
+#define    RTL8367C_ACT31_CVID_OFFSET    8
+#define    RTL8367C_ACT31_CVID_MASK    0x100
+#define    RTL8367C_OP30_NOT_OFFSET    6
+#define    RTL8367C_OP30_NOT_MASK    0x40
+#define    RTL8367C_ACT30_GPIO_OFFSET    5
+#define    RTL8367C_ACT30_GPIO_MASK    0x20
+#define    RTL8367C_ACT30_FORWARD_OFFSET    4
+#define    RTL8367C_ACT30_FORWARD_MASK    0x10
+#define    RTL8367C_ACT30_POLICING_OFFSET    3
+#define    RTL8367C_ACT30_POLICING_MASK    0x8
+#define    RTL8367C_ACT30_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT30_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT30_SVID_OFFSET    1
+#define    RTL8367C_ACT30_SVID_MASK    0x2
+#define    RTL8367C_ACT30_CVID_OFFSET    0
+#define    RTL8367C_ACT30_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL16    0x0624
+#define    RTL8367C_OP33_NOT_OFFSET    14
+#define    RTL8367C_OP33_NOT_MASK    0x4000
+#define    RTL8367C_ACT33_GPIO_OFFSET    13
+#define    RTL8367C_ACT33_GPIO_MASK    0x2000
+#define    RTL8367C_ACT33_FORWARD_OFFSET    12
+#define    RTL8367C_ACT33_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT33_POLICING_OFFSET    11
+#define    RTL8367C_ACT33_POLICING_MASK    0x800
+#define    RTL8367C_ACT33_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT33_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT33_SVID_OFFSET    9
+#define    RTL8367C_ACT33_SVID_MASK    0x200
+#define    RTL8367C_ACT33_CVID_OFFSET    8
+#define    RTL8367C_ACT33_CVID_MASK    0x100
+#define    RTL8367C_OP32_NOT_OFFSET    6
+#define    RTL8367C_OP32_NOT_MASK    0x40
+#define    RTL8367C_ACT32_GPIO_OFFSET    5
+#define    RTL8367C_ACT32_GPIO_MASK    0x20
+#define    RTL8367C_ACT32_FORWARD_OFFSET    4
+#define    RTL8367C_ACT32_FORWARD_MASK    0x10
+#define    RTL8367C_ACT32_POLICING_OFFSET    3
+#define    RTL8367C_ACT32_POLICING_MASK    0x8
+#define    RTL8367C_ACT32_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT32_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT32_SVID_OFFSET    1
+#define    RTL8367C_ACT32_SVID_MASK    0x2
+#define    RTL8367C_ACT32_CVID_OFFSET    0
+#define    RTL8367C_ACT32_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL17    0x0625
+#define    RTL8367C_OP35_NOT_OFFSET    14
+#define    RTL8367C_OP35_NOT_MASK    0x4000
+#define    RTL8367C_ACT35_GPIO_OFFSET    13
+#define    RTL8367C_ACT35_GPIO_MASK    0x2000
+#define    RTL8367C_ACT35_FORWARD_OFFSET    12
+#define    RTL8367C_ACT35_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT35_POLICING_OFFSET    11
+#define    RTL8367C_ACT35_POLICING_MASK    0x800
+#define    RTL8367C_ACT35_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT35_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT35_SVID_OFFSET    9
+#define    RTL8367C_ACT35_SVID_MASK    0x200
+#define    RTL8367C_ACT35_CVID_OFFSET    8
+#define    RTL8367C_ACT35_CVID_MASK    0x100
+#define    RTL8367C_OP34_NOT_OFFSET    6
+#define    RTL8367C_OP34_NOT_MASK    0x40
+#define    RTL8367C_ACT34_GPIO_OFFSET    5
+#define    RTL8367C_ACT34_GPIO_MASK    0x20
+#define    RTL8367C_ACT34_FORWARD_OFFSET    4
+#define    RTL8367C_ACT34_FORWARD_MASK    0x10
+#define    RTL8367C_ACT34_POLICING_OFFSET    3
+#define    RTL8367C_ACT34_POLICING_MASK    0x8
+#define    RTL8367C_ACT34_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT34_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT34_SVID_OFFSET    1
+#define    RTL8367C_ACT34_SVID_MASK    0x2
+#define    RTL8367C_ACT34_CVID_OFFSET    0
+#define    RTL8367C_ACT34_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL18    0x0626
+#define    RTL8367C_OP37_NOT_OFFSET    14
+#define    RTL8367C_OP37_NOT_MASK    0x4000
+#define    RTL8367C_ACT37_GPIO_OFFSET    13
+#define    RTL8367C_ACT37_GPIO_MASK    0x2000
+#define    RTL8367C_ACT37_FORWARD_OFFSET    12
+#define    RTL8367C_ACT37_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT37_POLICING_OFFSET    11
+#define    RTL8367C_ACT37_POLICING_MASK    0x800
+#define    RTL8367C_ACT37_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT37_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT37_SVID_OFFSET    9
+#define    RTL8367C_ACT37_SVID_MASK    0x200
+#define    RTL8367C_ACT37_CVID_OFFSET    8
+#define    RTL8367C_ACT37_CVID_MASK    0x100
+#define    RTL8367C_OP36_NOT_OFFSET    6
+#define    RTL8367C_OP36_NOT_MASK    0x40
+#define    RTL8367C_ACT36_GPIO_OFFSET    5
+#define    RTL8367C_ACT36_GPIO_MASK    0x20
+#define    RTL8367C_ACT36_FORWARD_OFFSET    4
+#define    RTL8367C_ACT36_FORWARD_MASK    0x10
+#define    RTL8367C_ACT36_POLICING_OFFSET    3
+#define    RTL8367C_ACT36_POLICING_MASK    0x8
+#define    RTL8367C_ACT36_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT36_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT36_SVID_OFFSET    1
+#define    RTL8367C_ACT36_SVID_MASK    0x2
+#define    RTL8367C_ACT36_CVID_OFFSET    0
+#define    RTL8367C_ACT36_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL19    0x0627
+#define    RTL8367C_OP39_NOT_OFFSET    14
+#define    RTL8367C_OP39_NOT_MASK    0x4000
+#define    RTL8367C_ACT39_GPIO_OFFSET    13
+#define    RTL8367C_ACT39_GPIO_MASK    0x2000
+#define    RTL8367C_ACT39_FORWARD_OFFSET    12
+#define    RTL8367C_ACT39_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT39_POLICING_OFFSET    11
+#define    RTL8367C_ACT39_POLICING_MASK    0x800
+#define    RTL8367C_ACT39_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT39_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT39_SVID_OFFSET    9
+#define    RTL8367C_ACT39_SVID_MASK    0x200
+#define    RTL8367C_ACT39_CVID_OFFSET    8
+#define    RTL8367C_ACT39_CVID_MASK    0x100
+#define    RTL8367C_OP38_NOT_OFFSET    6
+#define    RTL8367C_OP38_NOT_MASK    0x40
+#define    RTL8367C_ACT38_GPIO_OFFSET    5
+#define    RTL8367C_ACT38_GPIO_MASK    0x20
+#define    RTL8367C_ACT38_FORWARD_OFFSET    4
+#define    RTL8367C_ACT38_FORWARD_MASK    0x10
+#define    RTL8367C_ACT38_POLICING_OFFSET    3
+#define    RTL8367C_ACT38_POLICING_MASK    0x8
+#define    RTL8367C_ACT38_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT38_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT38_SVID_OFFSET    1
+#define    RTL8367C_ACT38_SVID_MASK    0x2
+#define    RTL8367C_ACT38_CVID_OFFSET    0
+#define    RTL8367C_ACT38_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL20    0x0628
+#define    RTL8367C_OP41_NOT_OFFSET    14
+#define    RTL8367C_OP41_NOT_MASK    0x4000
+#define    RTL8367C_ACT41_GPIO_OFFSET    13
+#define    RTL8367C_ACT41_GPIO_MASK    0x2000
+#define    RTL8367C_ACT41_FORWARD_OFFSET    12
+#define    RTL8367C_ACT41_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT41_POLICING_OFFSET    11
+#define    RTL8367C_ACT41_POLICING_MASK    0x800
+#define    RTL8367C_ACT41_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT41_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT41_SVID_OFFSET    9
+#define    RTL8367C_ACT41_SVID_MASK    0x200
+#define    RTL8367C_ACT41_CVID_OFFSET    8
+#define    RTL8367C_ACT41_CVID_MASK    0x100
+#define    RTL8367C_OP40_NOT_OFFSET    6
+#define    RTL8367C_OP40_NOT_MASK    0x40
+#define    RTL8367C_ACT40_GPIO_OFFSET    5
+#define    RTL8367C_ACT40_GPIO_MASK    0x20
+#define    RTL8367C_ACT40_FORWARD_OFFSET    4
+#define    RTL8367C_ACT40_FORWARD_MASK    0x10
+#define    RTL8367C_ACT40_POLICING_OFFSET    3
+#define    RTL8367C_ACT40_POLICING_MASK    0x8
+#define    RTL8367C_ACT40_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT40_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT40_SVID_OFFSET    1
+#define    RTL8367C_ACT40_SVID_MASK    0x2
+#define    RTL8367C_ACT40_CVID_OFFSET    0
+#define    RTL8367C_ACT40_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL21    0x0629
+#define    RTL8367C_OP43_NOT_OFFSET    14
+#define    RTL8367C_OP43_NOT_MASK    0x4000
+#define    RTL8367C_ACT43_GPIO_OFFSET    13
+#define    RTL8367C_ACT43_GPIO_MASK    0x2000
+#define    RTL8367C_ACT43_FORWARD_OFFSET    12
+#define    RTL8367C_ACT43_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT43_POLICING_OFFSET    11
+#define    RTL8367C_ACT43_POLICING_MASK    0x800
+#define    RTL8367C_ACT43_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT43_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT43_SVID_OFFSET    9
+#define    RTL8367C_ACT43_SVID_MASK    0x200
+#define    RTL8367C_ACT43_CVID_OFFSET    8
+#define    RTL8367C_ACT43_CVID_MASK    0x100
+#define    RTL8367C_OP42_NOT_OFFSET    6
+#define    RTL8367C_OP42_NOT_MASK    0x40
+#define    RTL8367C_ACT42_GPIO_OFFSET    5
+#define    RTL8367C_ACT42_GPIO_MASK    0x20
+#define    RTL8367C_ACT42_FORWARD_OFFSET    4
+#define    RTL8367C_ACT42_FORWARD_MASK    0x10
+#define    RTL8367C_ACT42_POLICING_OFFSET    3
+#define    RTL8367C_ACT42_POLICING_MASK    0x8
+#define    RTL8367C_ACT42_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT42_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT42_SVID_OFFSET    1
+#define    RTL8367C_ACT42_SVID_MASK    0x2
+#define    RTL8367C_ACT42_CVID_OFFSET    0
+#define    RTL8367C_ACT42_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL22    0x062a
+#define    RTL8367C_OP45_NOT_OFFSET    14
+#define    RTL8367C_OP45_NOT_MASK    0x4000
+#define    RTL8367C_ACT45_GPIO_OFFSET    13
+#define    RTL8367C_ACT45_GPIO_MASK    0x2000
+#define    RTL8367C_ACT45_FORWARD_OFFSET    12
+#define    RTL8367C_ACT45_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT45_POLICING_OFFSET    11
+#define    RTL8367C_ACT45_POLICING_MASK    0x800
+#define    RTL8367C_ACT45_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT45_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT45_SVID_OFFSET    9
+#define    RTL8367C_ACT45_SVID_MASK    0x200
+#define    RTL8367C_ACT45_CVID_OFFSET    8
+#define    RTL8367C_ACT45_CVID_MASK    0x100
+#define    RTL8367C_OP44_NOT_OFFSET    6
+#define    RTL8367C_OP44_NOT_MASK    0x40
+#define    RTL8367C_ACT44_GPIO_OFFSET    5
+#define    RTL8367C_ACT44_GPIO_MASK    0x20
+#define    RTL8367C_ACT44_FORWARD_OFFSET    4
+#define    RTL8367C_ACT44_FORWARD_MASK    0x10
+#define    RTL8367C_ACT44_POLICING_OFFSET    3
+#define    RTL8367C_ACT44_POLICING_MASK    0x8
+#define    RTL8367C_ACT44_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT44_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT44_SVID_OFFSET    1
+#define    RTL8367C_ACT44_SVID_MASK    0x2
+#define    RTL8367C_ACT44_CVID_OFFSET    0
+#define    RTL8367C_ACT44_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL23    0x062b
+#define    RTL8367C_OP47_NOT_OFFSET    14
+#define    RTL8367C_OP47_NOT_MASK    0x4000
+#define    RTL8367C_ACT47_GPIO_OFFSET    13
+#define    RTL8367C_ACT47_GPIO_MASK    0x2000
+#define    RTL8367C_ACT47_FORWARD_OFFSET    12
+#define    RTL8367C_ACT47_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT47_POLICING_OFFSET    11
+#define    RTL8367C_ACT47_POLICING_MASK    0x800
+#define    RTL8367C_ACT47_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT47_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT47_SVID_OFFSET    9
+#define    RTL8367C_ACT47_SVID_MASK    0x200
+#define    RTL8367C_ACT47_CVID_OFFSET    8
+#define    RTL8367C_ACT47_CVID_MASK    0x100
+#define    RTL8367C_OP46_NOT_OFFSET    6
+#define    RTL8367C_OP46_NOT_MASK    0x40
+#define    RTL8367C_ACT46_GPIO_OFFSET    5
+#define    RTL8367C_ACT46_GPIO_MASK    0x20
+#define    RTL8367C_ACT46_FORWARD_OFFSET    4
+#define    RTL8367C_ACT46_FORWARD_MASK    0x10
+#define    RTL8367C_ACT46_POLICING_OFFSET    3
+#define    RTL8367C_ACT46_POLICING_MASK    0x8
+#define    RTL8367C_ACT46_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT46_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT46_SVID_OFFSET    1
+#define    RTL8367C_ACT46_SVID_MASK    0x2
+#define    RTL8367C_ACT46_CVID_OFFSET    0
+#define    RTL8367C_ACT46_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL24    0x062c
+#define    RTL8367C_OP49_NOT_OFFSET    14
+#define    RTL8367C_OP49_NOT_MASK    0x4000
+#define    RTL8367C_ACT49_GPIO_OFFSET    13
+#define    RTL8367C_ACT49_GPIO_MASK    0x2000
+#define    RTL8367C_ACT49_FORWARD_OFFSET    12
+#define    RTL8367C_ACT49_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT49_POLICING_OFFSET    11
+#define    RTL8367C_ACT49_POLICING_MASK    0x800
+#define    RTL8367C_ACT49_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT49_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT49_SVID_OFFSET    9
+#define    RTL8367C_ACT49_SVID_MASK    0x200
+#define    RTL8367C_ACT49_CVID_OFFSET    8
+#define    RTL8367C_ACT49_CVID_MASK    0x100
+#define    RTL8367C_OP48_NOT_OFFSET    6
+#define    RTL8367C_OP48_NOT_MASK    0x40
+#define    RTL8367C_ACT48_GPIO_OFFSET    5
+#define    RTL8367C_ACT48_GPIO_MASK    0x20
+#define    RTL8367C_ACT48_FORWARD_OFFSET    4
+#define    RTL8367C_ACT48_FORWARD_MASK    0x10
+#define    RTL8367C_ACT48_POLICING_OFFSET    3
+#define    RTL8367C_ACT48_POLICING_MASK    0x8
+#define    RTL8367C_ACT48_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT48_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT48_SVID_OFFSET    1
+#define    RTL8367C_ACT48_SVID_MASK    0x2
+#define    RTL8367C_ACT48_CVID_OFFSET    0
+#define    RTL8367C_ACT48_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL25    0x062d
+#define    RTL8367C_OP51_NOT_OFFSET    14
+#define    RTL8367C_OP51_NOT_MASK    0x4000
+#define    RTL8367C_ACT51_GPIO_OFFSET    13
+#define    RTL8367C_ACT51_GPIO_MASK    0x2000
+#define    RTL8367C_ACT51_FORWARD_OFFSET    12
+#define    RTL8367C_ACT51_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT51_POLICING_OFFSET    11
+#define    RTL8367C_ACT51_POLICING_MASK    0x800
+#define    RTL8367C_ACT51_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT51_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT51_SVID_OFFSET    9
+#define    RTL8367C_ACT51_SVID_MASK    0x200
+#define    RTL8367C_ACT51_CVID_OFFSET    8
+#define    RTL8367C_ACT51_CVID_MASK    0x100
+#define    RTL8367C_OP50_NOT_OFFSET    6
+#define    RTL8367C_OP50_NOT_MASK    0x40
+#define    RTL8367C_ACT50_GPIO_OFFSET    5
+#define    RTL8367C_ACT50_GPIO_MASK    0x20
+#define    RTL8367C_ACT50_FORWARD_OFFSET    4
+#define    RTL8367C_ACT50_FORWARD_MASK    0x10
+#define    RTL8367C_ACT50_POLICING_OFFSET    3
+#define    RTL8367C_ACT50_POLICING_MASK    0x8
+#define    RTL8367C_ACT50_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT50_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT50_SVID_OFFSET    1
+#define    RTL8367C_ACT50_SVID_MASK    0x2
+#define    RTL8367C_ACT50_CVID_OFFSET    0
+#define    RTL8367C_ACT50_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL26    0x062e
+#define    RTL8367C_OP53_NOT_OFFSET    14
+#define    RTL8367C_OP53_NOT_MASK    0x4000
+#define    RTL8367C_ACT53_GPIO_OFFSET    13
+#define    RTL8367C_ACT53_GPIO_MASK    0x2000
+#define    RTL8367C_ACT53_FORWARD_OFFSET    12
+#define    RTL8367C_ACT53_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT53_POLICING_OFFSET    11
+#define    RTL8367C_ACT53_POLICING_MASK    0x800
+#define    RTL8367C_ACT53_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT53_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT53_SVID_OFFSET    9
+#define    RTL8367C_ACT53_SVID_MASK    0x200
+#define    RTL8367C_ACT53_CVID_OFFSET    8
+#define    RTL8367C_ACT53_CVID_MASK    0x100
+#define    RTL8367C_OP52_NOT_OFFSET    6
+#define    RTL8367C_OP52_NOT_MASK    0x40
+#define    RTL8367C_ACT52_GPIO_OFFSET    5
+#define    RTL8367C_ACT52_GPIO_MASK    0x20
+#define    RTL8367C_ACT52_FORWARD_OFFSET    4
+#define    RTL8367C_ACT52_FORWARD_MASK    0x10
+#define    RTL8367C_ACT52_POLICING_OFFSET    3
+#define    RTL8367C_ACT52_POLICING_MASK    0x8
+#define    RTL8367C_ACT52_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT52_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT52_SVID_OFFSET    1
+#define    RTL8367C_ACT52_SVID_MASK    0x2
+#define    RTL8367C_ACT52_CVID_OFFSET    0
+#define    RTL8367C_ACT52_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL27    0x062f
+#define    RTL8367C_OP55_NOT_OFFSET    14
+#define    RTL8367C_OP55_NOT_MASK    0x4000
+#define    RTL8367C_ACT55_GPIO_OFFSET    13
+#define    RTL8367C_ACT55_GPIO_MASK    0x2000
+#define    RTL8367C_ACT55_FORWARD_OFFSET    12
+#define    RTL8367C_ACT55_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT55_POLICING_OFFSET    11
+#define    RTL8367C_ACT55_POLICING_MASK    0x800
+#define    RTL8367C_ACT55_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT55_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT55_SVID_OFFSET    9
+#define    RTL8367C_ACT55_SVID_MASK    0x200
+#define    RTL8367C_ACT55_CVID_OFFSET    8
+#define    RTL8367C_ACT55_CVID_MASK    0x100
+#define    RTL8367C_OP54_NOT_OFFSET    6
+#define    RTL8367C_OP54_NOT_MASK    0x40
+#define    RTL8367C_ACT54_GPIO_OFFSET    5
+#define    RTL8367C_ACT54_GPIO_MASK    0x20
+#define    RTL8367C_ACT54_FORWARD_OFFSET    4
+#define    RTL8367C_ACT54_FORWARD_MASK    0x10
+#define    RTL8367C_ACT54_POLICING_OFFSET    3
+#define    RTL8367C_ACT54_POLICING_MASK    0x8
+#define    RTL8367C_ACT54_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT54_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT54_SVID_OFFSET    1
+#define    RTL8367C_ACT54_SVID_MASK    0x2
+#define    RTL8367C_ACT54_CVID_OFFSET    0
+#define    RTL8367C_ACT54_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL28    0x0630
+#define    RTL8367C_OP57_NOT_OFFSET    14
+#define    RTL8367C_OP57_NOT_MASK    0x4000
+#define    RTL8367C_ACT57_GPIO_OFFSET    13
+#define    RTL8367C_ACT57_GPIO_MASK    0x2000
+#define    RTL8367C_ACT57_FORWARD_OFFSET    12
+#define    RTL8367C_ACT57_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT57_POLICING_OFFSET    11
+#define    RTL8367C_ACT57_POLICING_MASK    0x800
+#define    RTL8367C_ACT57_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT57_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT57_SVID_OFFSET    9
+#define    RTL8367C_ACT57_SVID_MASK    0x200
+#define    RTL8367C_ACT57_CVID_OFFSET    8
+#define    RTL8367C_ACT57_CVID_MASK    0x100
+#define    RTL8367C_OP56_NOT_OFFSET    6
+#define    RTL8367C_OP56_NOT_MASK    0x40
+#define    RTL8367C_ACT56_GPIO_OFFSET    5
+#define    RTL8367C_ACT56_GPIO_MASK    0x20
+#define    RTL8367C_ACT56_FORWARD_OFFSET    4
+#define    RTL8367C_ACT56_FORWARD_MASK    0x10
+#define    RTL8367C_ACT56_POLICING_OFFSET    3
+#define    RTL8367C_ACT56_POLICING_MASK    0x8
+#define    RTL8367C_ACT56_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT56_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT56_SVID_OFFSET    1
+#define    RTL8367C_ACT56_SVID_MASK    0x2
+#define    RTL8367C_ACT56_CVID_OFFSET    0
+#define    RTL8367C_ACT56_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL29    0x0631
+#define    RTL8367C_OP59_NOT_OFFSET    14
+#define    RTL8367C_OP59_NOT_MASK    0x4000
+#define    RTL8367C_ACT59_GPIO_OFFSET    13
+#define    RTL8367C_ACT59_GPIO_MASK    0x2000
+#define    RTL8367C_ACT59_FORWARD_OFFSET    12
+#define    RTL8367C_ACT59_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT59_POLICING_OFFSET    11
+#define    RTL8367C_ACT59_POLICING_MASK    0x800
+#define    RTL8367C_ACT59_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT59_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT59_SVID_OFFSET    9
+#define    RTL8367C_ACT59_SVID_MASK    0x200
+#define    RTL8367C_ACT59_CVID_OFFSET    8
+#define    RTL8367C_ACT59_CVID_MASK    0x100
+#define    RTL8367C_OP58_NOT_OFFSET    6
+#define    RTL8367C_OP58_NOT_MASK    0x40
+#define    RTL8367C_ACT58_GPIO_OFFSET    5
+#define    RTL8367C_ACT58_GPIO_MASK    0x20
+#define    RTL8367C_ACT58_FORWARD_OFFSET    4
+#define    RTL8367C_ACT58_FORWARD_MASK    0x10
+#define    RTL8367C_ACT58_POLICING_OFFSET    3
+#define    RTL8367C_ACT58_POLICING_MASK    0x8
+#define    RTL8367C_ACT58_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT58_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT58_SVID_OFFSET    1
+#define    RTL8367C_ACT58_SVID_MASK    0x2
+#define    RTL8367C_ACT58_CVID_OFFSET    0
+#define    RTL8367C_ACT58_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL30    0x0632
+#define    RTL8367C_OP61_NOT_OFFSET    14
+#define    RTL8367C_OP61_NOT_MASK    0x4000
+#define    RTL8367C_ACT61_GPIO_OFFSET    13
+#define    RTL8367C_ACT61_GPIO_MASK    0x2000
+#define    RTL8367C_ACT61_FORWARD_OFFSET    12
+#define    RTL8367C_ACT61_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT61_POLICING_OFFSET    11
+#define    RTL8367C_ACT61_POLICING_MASK    0x800
+#define    RTL8367C_ACT61_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT61_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT61_SVID_OFFSET    9
+#define    RTL8367C_ACT61_SVID_MASK    0x200
+#define    RTL8367C_ACT61_CVID_OFFSET    8
+#define    RTL8367C_ACT61_CVID_MASK    0x100
+#define    RTL8367C_OP60_NOT_OFFSET    6
+#define    RTL8367C_OP60_NOT_MASK    0x40
+#define    RTL8367C_ACT60_GPIO_OFFSET    5
+#define    RTL8367C_ACT60_GPIO_MASK    0x20
+#define    RTL8367C_ACT60_FORWARD_OFFSET    4
+#define    RTL8367C_ACT60_FORWARD_MASK    0x10
+#define    RTL8367C_ACT60_POLICING_OFFSET    3
+#define    RTL8367C_ACT60_POLICING_MASK    0x8
+#define    RTL8367C_ACT60_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT60_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT60_SVID_OFFSET    1
+#define    RTL8367C_ACT60_SVID_MASK    0x2
+#define    RTL8367C_ACT60_CVID_OFFSET    0
+#define    RTL8367C_ACT60_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL31    0x0633
+#define    RTL8367C_OP63_NOT_OFFSET    14
+#define    RTL8367C_OP63_NOT_MASK    0x4000
+#define    RTL8367C_ACT63_GPIO_OFFSET    13
+#define    RTL8367C_ACT63_GPIO_MASK    0x2000
+#define    RTL8367C_ACT63_FORWARD_OFFSET    12
+#define    RTL8367C_ACT63_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT63_POLICING_OFFSET    11
+#define    RTL8367C_ACT63_POLICING_MASK    0x800
+#define    RTL8367C_ACT63_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT63_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT63_SVID_OFFSET    9
+#define    RTL8367C_ACT63_SVID_MASK    0x200
+#define    RTL8367C_ACT63_CVID_OFFSET    8
+#define    RTL8367C_ACT63_CVID_MASK    0x100
+#define    RTL8367C_OP62_NOT_OFFSET    6
+#define    RTL8367C_OP62_NOT_MASK    0x40
+#define    RTL8367C_ACT62_GPIO_OFFSET    5
+#define    RTL8367C_ACT62_GPIO_MASK    0x20
+#define    RTL8367C_ACT62_FORWARD_OFFSET    4
+#define    RTL8367C_ACT62_FORWARD_MASK    0x10
+#define    RTL8367C_ACT62_POLICING_OFFSET    3
+#define    RTL8367C_ACT62_POLICING_MASK    0x8
+#define    RTL8367C_ACT62_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT62_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT62_SVID_OFFSET    1
+#define    RTL8367C_ACT62_SVID_MASK    0x2
+#define    RTL8367C_ACT62_CVID_OFFSET    0
+#define    RTL8367C_ACT62_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY0_CTRL0    0x0635
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY0_CTRL1    0x0636
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY0_CTRL2    0x0637
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY0_CTRL2_OFFSET    0
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY0_CTRL2_MASK    0x3
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY1_CTRL0    0x0638
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY1_CTRL1    0x0639
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY1_CTRL2    0x063a
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY1_CTRL2_OFFSET    0
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY1_CTRL2_MASK    0x3
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY2_CTRL0    0x063b
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY2_CTRL1    0x063c
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY2_CTRL2    0x063d
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY2_CTRL2_OFFSET    0
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY2_CTRL2_MASK    0x3
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY3_CTRL0    0x063e
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY3_CTRL1    0x063f
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY3_CTRL2    0x0640
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY3_CTRL2_OFFSET    0
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY3_CTRL2_MASK    0x3
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY4_CTRL0    0x0641
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY4_CTRL1    0x0642
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY4_CTRL2    0x0643
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY4_CTRL2_OFFSET    0
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY4_CTRL2_MASK    0x3
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY5_CTRL0    0x0644
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY5_CTRL1    0x0645
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY5_CTRL2    0x0646
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY5_CTRL2_OFFSET    0
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY5_CTRL2_MASK    0x3
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY6_CTRL0    0x0647
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY6_CTRL1    0x0648
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY6_CTRL2    0x0649
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY6_CTRL2_OFFSET    0
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY6_CTRL2_MASK    0x3
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY7_CTRL0    0x064a
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY7_CTRL1    0x064b
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY7_CTRL2    0x064c
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY7_CTRL2_OFFSET    0
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY7_CTRL2_MASK    0x3
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY8_CTRL0    0x064d
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY8_CTRL1    0x064e
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY8_CTRL2    0x064f
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY8_CTRL2_OFFSET    0
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY8_CTRL2_MASK    0x3
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY9_CTRL0    0x0650
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY9_CTRL1    0x0651
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY9_CTRL2    0x0652
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY9_CTRL2_OFFSET    0
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY9_CTRL2_MASK    0x3
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY10_CTRL0    0x0653
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY10_CTRL1    0x0654
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY10_CTRL2    0x0655
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY10_CTRL2_OFFSET    0
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY10_CTRL2_MASK    0x3
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY11_CTRL0    0x0656
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY11_CTRL1    0x0657
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY11_CTRL2    0x0658
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY11_CTRL2_OFFSET    0
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY11_CTRL2_MASK    0x3
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY12_CTRL0    0x0659
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY12_CTRL1    0x065a
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY12_CTRL2    0x065b
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY12_CTRL2_OFFSET    0
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY12_CTRL2_MASK    0x3
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY13_CTRL0    0x065c
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY13_CTRL1    0x065d
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY13_CTRL2    0x065e
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY13_CTRL2_OFFSET    0
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY13_CTRL2_MASK    0x3
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY14_CTRL0    0x065f
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY14_CTRL1    0x0660
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY14_CTRL2    0x0661
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY14_CTRL2_OFFSET    0
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY14_CTRL2_MASK    0x3
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY15_CTRL0    0x0662
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY15_CTRL1    0x0663
+
+#define    RTL8367C_REG_ACL_SDPORT_RANGE_ENTRY15_CTRL2    0x0664
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY15_CTRL2_OFFSET    0
+#define    RTL8367C_ACL_SDPORT_RANGE_ENTRY15_CTRL2_MASK    0x3
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY0_CTRL0    0x0665
+#define    RTL8367C_ACL_VID_RANGE_ENTRY0_CTRL0_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY0_CTRL0_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY0_CTRL1    0x0666
+#define    RTL8367C_ACL_VID_RANGE_ENTRY0_CTRL1_CHECK0_TYPE_OFFSET    12
+#define    RTL8367C_ACL_VID_RANGE_ENTRY0_CTRL1_CHECK0_TYPE_MASK    0x3000
+#define    RTL8367C_ACL_VID_RANGE_ENTRY0_CTRL1_CHECK0_HIGH_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY0_CTRL1_CHECK0_HIGH_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY1_CTRL0    0x0667
+#define    RTL8367C_ACL_VID_RANGE_ENTRY1_CTRL0_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY1_CTRL0_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY1_CTRL1    0x0668
+#define    RTL8367C_ACL_VID_RANGE_ENTRY1_CTRL1_CHECK1_TYPE_OFFSET    12
+#define    RTL8367C_ACL_VID_RANGE_ENTRY1_CTRL1_CHECK1_TYPE_MASK    0x3000
+#define    RTL8367C_ACL_VID_RANGE_ENTRY1_CTRL1_CHECK1_HIGH_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY1_CTRL1_CHECK1_HIGH_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY2_CTRL0    0x0669
+#define    RTL8367C_ACL_VID_RANGE_ENTRY2_CTRL0_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY2_CTRL0_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY2_CTRL1    0x066a
+#define    RTL8367C_ACL_VID_RANGE_ENTRY2_CTRL1_CHECK2_TYPE_OFFSET    12
+#define    RTL8367C_ACL_VID_RANGE_ENTRY2_CTRL1_CHECK2_TYPE_MASK    0x3000
+#define    RTL8367C_ACL_VID_RANGE_ENTRY2_CTRL1_CHECK2_HIGH_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY2_CTRL1_CHECK2_HIGH_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY3_CTRL0    0x066b
+#define    RTL8367C_ACL_VID_RANGE_ENTRY3_CTRL0_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY3_CTRL0_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY3_CTRL1    0x066c
+#define    RTL8367C_ACL_VID_RANGE_ENTRY3_CTRL1_CHECK3_TYPE_OFFSET    12
+#define    RTL8367C_ACL_VID_RANGE_ENTRY3_CTRL1_CHECK3_TYPE_MASK    0x3000
+#define    RTL8367C_ACL_VID_RANGE_ENTRY3_CTRL1_CHECK3_HIGH_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY3_CTRL1_CHECK3_HIGH_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY4_CTRL0    0x066d
+#define    RTL8367C_ACL_VID_RANGE_ENTRY4_CTRL0_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY4_CTRL0_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY4_CTRL1    0x066e
+#define    RTL8367C_ACL_VID_RANGE_ENTRY4_CTRL1_CHECK4_TYPE_OFFSET    12
+#define    RTL8367C_ACL_VID_RANGE_ENTRY4_CTRL1_CHECK4_TYPE_MASK    0x3000
+#define    RTL8367C_ACL_VID_RANGE_ENTRY4_CTRL1_CHECK4_HIGH_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY4_CTRL1_CHECK4_HIGH_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY5_CTRL0    0x066f
+#define    RTL8367C_ACL_VID_RANGE_ENTRY5_CTRL0_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY5_CTRL0_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY5_CTRL1    0x0670
+#define    RTL8367C_ACL_VID_RANGE_ENTRY5_CTRL1_CHECK5_TYPE_OFFSET    12
+#define    RTL8367C_ACL_VID_RANGE_ENTRY5_CTRL1_CHECK5_TYPE_MASK    0x3000
+#define    RTL8367C_ACL_VID_RANGE_ENTRY5_CTRL1_CHECK5_HIGH_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY5_CTRL1_CHECK5_HIGH_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY6_CTRL0    0x0671
+#define    RTL8367C_ACL_VID_RANGE_ENTRY6_CTRL0_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY6_CTRL0_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY6_CTRL1    0x0672
+#define    RTL8367C_ACL_VID_RANGE_ENTRY6_CTRL1_CHECK6_TYPE_OFFSET    12
+#define    RTL8367C_ACL_VID_RANGE_ENTRY6_CTRL1_CHECK6_TYPE_MASK    0x3000
+#define    RTL8367C_ACL_VID_RANGE_ENTRY6_CTRL1_CHECK6_HIGH_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY6_CTRL1_CHECK6_HIGH_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY7_CTRL0    0x0673
+#define    RTL8367C_ACL_VID_RANGE_ENTRY7_CTRL0_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY7_CTRL0_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY7_CTRL1    0x0674
+#define    RTL8367C_ACL_VID_RANGE_ENTRY7_CTRL1_CHECK7_TYPE_OFFSET    12
+#define    RTL8367C_ACL_VID_RANGE_ENTRY7_CTRL1_CHECK7_TYPE_MASK    0x3000
+#define    RTL8367C_ACL_VID_RANGE_ENTRY7_CTRL1_CHECK7_HIGH_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY7_CTRL1_CHECK7_HIGH_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY8_CTRL0    0x0675
+#define    RTL8367C_ACL_VID_RANGE_ENTRY8_CTRL0_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY8_CTRL0_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY8_CTRL1    0x0676
+#define    RTL8367C_ACL_VID_RANGE_ENTRY8_CTRL1_CHECK8_TYPE_OFFSET    12
+#define    RTL8367C_ACL_VID_RANGE_ENTRY8_CTRL1_CHECK8_TYPE_MASK    0x3000
+#define    RTL8367C_ACL_VID_RANGE_ENTRY8_CTRL1_CHECK8_HIGH_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY8_CTRL1_CHECK8_HIGH_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY9_CTRL0    0x0677
+#define    RTL8367C_ACL_VID_RANGE_ENTRY9_CTRL0_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY9_CTRL0_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY9_CTRL1    0x0678
+#define    RTL8367C_ACL_VID_RANGE_ENTRY9_CTRL1_CHECK9_TYPE_OFFSET    12
+#define    RTL8367C_ACL_VID_RANGE_ENTRY9_CTRL1_CHECK9_TYPE_MASK    0x3000
+#define    RTL8367C_ACL_VID_RANGE_ENTRY9_CTRL1_CHECK9_HIGH_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY9_CTRL1_CHECK9_HIGH_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY10_CTRL0    0x0679
+#define    RTL8367C_ACL_VID_RANGE_ENTRY10_CTRL0_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY10_CTRL0_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY10_CTRL1    0x067a
+#define    RTL8367C_ACL_VID_RANGE_ENTRY10_CTRL1_CHECK10_TYPE_OFFSET    12
+#define    RTL8367C_ACL_VID_RANGE_ENTRY10_CTRL1_CHECK10_TYPE_MASK    0x3000
+#define    RTL8367C_ACL_VID_RANGE_ENTRY10_CTRL1_CHECK10_HIGH_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY10_CTRL1_CHECK10_HIGH_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY11_CTRL0    0x067b
+#define    RTL8367C_ACL_VID_RANGE_ENTRY11_CTRL0_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY11_CTRL0_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY11_CTRL1    0x067c
+#define    RTL8367C_ACL_VID_RANGE_ENTRY11_CTRL1_CHECK11_TYPE_OFFSET    12
+#define    RTL8367C_ACL_VID_RANGE_ENTRY11_CTRL1_CHECK11_TYPE_MASK    0x3000
+#define    RTL8367C_ACL_VID_RANGE_ENTRY11_CTRL1_CHECK11_HIGH_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY11_CTRL1_CHECK11_HIGH_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY12_CTRL0    0x067d
+#define    RTL8367C_ACL_VID_RANGE_ENTRY12_CTRL0_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY12_CTRL0_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY12_CTRL1    0x067e
+#define    RTL8367C_ACL_VID_RANGE_ENTRY12_CTRL1_CHECK12_TYPE_OFFSET    12
+#define    RTL8367C_ACL_VID_RANGE_ENTRY12_CTRL1_CHECK12_TYPE_MASK    0x3000
+#define    RTL8367C_ACL_VID_RANGE_ENTRY12_CTRL1_CHECK12_HIGH_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY12_CTRL1_CHECK12_HIGH_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY13_CTRL0    0x067f
+#define    RTL8367C_ACL_VID_RANGE_ENTRY13_CTRL0_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY13_CTRL0_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY13_CTRL1    0x0680
+#define    RTL8367C_ACL_VID_RANGE_ENTRY13_CTRL1_CHECK13_TYPE_OFFSET    12
+#define    RTL8367C_ACL_VID_RANGE_ENTRY13_CTRL1_CHECK13_TYPE_MASK    0x3000
+#define    RTL8367C_ACL_VID_RANGE_ENTRY13_CTRL1_CHECK13_HIGH_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY13_CTRL1_CHECK13_HIGH_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY14_CTRL0    0x0681
+#define    RTL8367C_ACL_VID_RANGE_ENTRY14_CTRL0_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY14_CTRL0_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY14_CTRL1    0x0682
+#define    RTL8367C_ACL_VID_RANGE_ENTRY14_CTRL1_CHECK14_TYPE_OFFSET    12
+#define    RTL8367C_ACL_VID_RANGE_ENTRY14_CTRL1_CHECK14_TYPE_MASK    0x3000
+#define    RTL8367C_ACL_VID_RANGE_ENTRY14_CTRL1_CHECK14_HIGH_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY14_CTRL1_CHECK14_HIGH_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY15_CTRL0    0x0683
+#define    RTL8367C_ACL_VID_RANGE_ENTRY15_CTRL0_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY15_CTRL0_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_VID_RANGE_ENTRY15_CTRL1    0x0684
+#define    RTL8367C_ACL_VID_RANGE_ENTRY15_CTRL1_CHECK15_TYPE_OFFSET    12
+#define    RTL8367C_ACL_VID_RANGE_ENTRY15_CTRL1_CHECK15_TYPE_MASK    0x3000
+#define    RTL8367C_ACL_VID_RANGE_ENTRY15_CTRL1_CHECK15_HIGH_OFFSET    0
+#define    RTL8367C_ACL_VID_RANGE_ENTRY15_CTRL1_CHECK15_HIGH_MASK    0xFFF
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY0_CTRL0    0x0685
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY0_CTRL1    0x0686
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY0_CTRL2    0x0687
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY0_CTRL3    0x0688
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY0_CTRL4    0x0689
+#define    RTL8367C_ACL_IP_RANGE_ENTRY0_CTRL4_OFFSET    0
+#define    RTL8367C_ACL_IP_RANGE_ENTRY0_CTRL4_MASK    0x7
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY1_CTRL0    0x068a
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY1_CTRL1    0x068b
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY1_CTRL2    0x068c
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY1_CTRL3    0x068d
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY1_CTRL4    0x068e
+#define    RTL8367C_ACL_IP_RANGE_ENTRY1_CTRL4_OFFSET    0
+#define    RTL8367C_ACL_IP_RANGE_ENTRY1_CTRL4_MASK    0x7
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY2_CTRL0    0x068f
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY2_CTRL1    0x0690
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY2_CTRL2    0x0691
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY2_CTRL3    0x0692
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY2_CTRL4    0x0693
+#define    RTL8367C_ACL_IP_RANGE_ENTRY2_CTRL4_OFFSET    0
+#define    RTL8367C_ACL_IP_RANGE_ENTRY2_CTRL4_MASK    0x7
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY3_CTRL0    0x0694
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY3_CTRL1    0x0695
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY3_CTRL2    0x0696
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY3_CTRL3    0x0697
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY3_CTRL4    0x0698
+#define    RTL8367C_ACL_IP_RANGE_ENTRY3_CTRL4_OFFSET    0
+#define    RTL8367C_ACL_IP_RANGE_ENTRY3_CTRL4_MASK    0x7
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY4_CTRL0    0x0699
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY4_CTRL1    0x069a
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY4_CTRL2    0x069b
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY4_CTRL3    0x069c
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY4_CTRL4    0x069d
+#define    RTL8367C_ACL_IP_RANGE_ENTRY4_CTRL4_OFFSET    0
+#define    RTL8367C_ACL_IP_RANGE_ENTRY4_CTRL4_MASK    0x7
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY5_CTRL0    0x069e
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY5_CTRL1    0x069f
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY5_CTRL2    0x06a0
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY5_CTRL3    0x06a1
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY5_CTRL4    0x06a2
+#define    RTL8367C_ACL_IP_RANGE_ENTRY5_CTRL4_OFFSET    0
+#define    RTL8367C_ACL_IP_RANGE_ENTRY5_CTRL4_MASK    0x7
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY6_CTRL0    0x06a3
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY6_CTRL1    0x06a4
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY6_CTRL2    0x06a5
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY6_CTRL3    0x06a6
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY6_CTRL4    0x06a7
+#define    RTL8367C_ACL_IP_RANGE_ENTRY6_CTRL4_OFFSET    0
+#define    RTL8367C_ACL_IP_RANGE_ENTRY6_CTRL4_MASK    0x7
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY7_CTRL0    0x06a8
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY7_CTRL1    0x06a9
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY7_CTRL2    0x06aa
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY7_CTRL3    0x06ab
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY7_CTRL4    0x06ac
+#define    RTL8367C_ACL_IP_RANGE_ENTRY7_CTRL4_OFFSET    0
+#define    RTL8367C_ACL_IP_RANGE_ENTRY7_CTRL4_MASK    0x7
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY8_CTRL0    0x06ad
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY8_CTRL1    0x06ae
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY8_CTRL2    0x06af
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY8_CTRL3    0x06b0
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY8_CTRL4    0x06b1
+#define    RTL8367C_ACL_IP_RANGE_ENTRY8_CTRL4_OFFSET    0
+#define    RTL8367C_ACL_IP_RANGE_ENTRY8_CTRL4_MASK    0x7
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY9_CTRL0    0x06b2
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY9_CTRL1    0x06b3
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY9_CTRL2    0x06b4
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY9_CTRL3    0x06b5
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY9_CTRL4    0x06b6
+#define    RTL8367C_ACL_IP_RANGE_ENTRY9_CTRL4_OFFSET    0
+#define    RTL8367C_ACL_IP_RANGE_ENTRY9_CTRL4_MASK    0x7
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY10_CTRL0    0x06b7
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY10_CTRL1    0x06b8
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY10_CTRL2    0x06b9
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY10_CTRL3    0x06ba
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY10_CTRL4    0x06bb
+#define    RTL8367C_ACL_IP_RANGE_ENTRY10_CTRL4_OFFSET    0
+#define    RTL8367C_ACL_IP_RANGE_ENTRY10_CTRL4_MASK    0x7
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY11_CTRL0    0x06bc
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY11_CTRL1    0x06bd
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY11_CTRL2    0x06be
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY11_CTRL3    0x06bf
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY11_CTRL4    0x06c0
+#define    RTL8367C_ACL_IP_RANGE_ENTRY11_CTRL4_OFFSET    0
+#define    RTL8367C_ACL_IP_RANGE_ENTRY11_CTRL4_MASK    0x7
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY12_CTRL0    0x06c1
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY12_CTRL1    0x06c2
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY12_CTRL2    0x06c3
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY12_CTRL3    0x06c4
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY12_CTRL4    0x06c5
+#define    RTL8367C_ACL_IP_RANGE_ENTRY12_CTRL4_OFFSET    0
+#define    RTL8367C_ACL_IP_RANGE_ENTRY12_CTRL4_MASK    0x7
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY13_CTRL0    0x06c6
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY13_CTRL1    0x06c7
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY13_CTRL2    0x06c8
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY13_CTRL3    0x06c9
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY13_CTRL4    0x06ca
+#define    RTL8367C_ACL_IP_RANGE_ENTRY13_CTRL4_OFFSET    0
+#define    RTL8367C_ACL_IP_RANGE_ENTRY13_CTRL4_MASK    0x7
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY14_CTRL0    0x06cb
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY14_CTRL1    0x06cc
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY14_CTRL2    0x06cd
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY14_CTRL3    0x06ce
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY14_CTRL4    0x06cf
+#define    RTL8367C_ACL_IP_RANGE_ENTRY14_CTRL4_OFFSET    0
+#define    RTL8367C_ACL_IP_RANGE_ENTRY14_CTRL4_MASK    0x7
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY15_CTRL0    0x06d0
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY15_CTRL1    0x06d1
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY15_CTRL2    0x06d2
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY15_CTRL3    0x06d3
+
+#define    RTL8367C_REG_ACL_IP_RANGE_ENTRY15_CTRL4    0x06d4
+#define    RTL8367C_ACL_IP_RANGE_ENTRY15_CTRL4_OFFSET    0
+#define    RTL8367C_ACL_IP_RANGE_ENTRY15_CTRL4_MASK    0x7
+
+#define    RTL8367C_REG_ACL_ENABLE    0x06d5
+#define    RTL8367C_PORT10_ENABLE_OFFSET    10
+#define    RTL8367C_PORT10_ENABLE_MASK    0x400
+#define    RTL8367C_PORT9_ENABLE_OFFSET    9
+#define    RTL8367C_PORT9_ENABLE_MASK    0x200
+#define    RTL8367C_PORT8_ENABLE_OFFSET    8
+#define    RTL8367C_PORT8_ENABLE_MASK    0x100
+#define    RTL8367C_PORT7_ENABLE_OFFSET    7
+#define    RTL8367C_PORT7_ENABLE_MASK    0x80
+#define    RTL8367C_PORT6_ENABLE_OFFSET    6
+#define    RTL8367C_PORT6_ENABLE_MASK    0x40
+#define    RTL8367C_PORT5_ENABLE_OFFSET    5
+#define    RTL8367C_PORT5_ENABLE_MASK    0x20
+#define    RTL8367C_PORT4_ENABLE_OFFSET    4
+#define    RTL8367C_PORT4_ENABLE_MASK    0x10
+#define    RTL8367C_PORT3_ENABLE_OFFSET    3
+#define    RTL8367C_PORT3_ENABLE_MASK    0x8
+#define    RTL8367C_PORT2_ENABLE_OFFSET    2
+#define    RTL8367C_PORT2_ENABLE_MASK    0x4
+#define    RTL8367C_PORT1_ENABLE_OFFSET    1
+#define    RTL8367C_PORT1_ENABLE_MASK    0x2
+#define    RTL8367C_PORT0_ENABLE_OFFSET    0
+#define    RTL8367C_PORT0_ENABLE_MASK    0x1
+
+#define    RTL8367C_REG_ACL_UNMATCH_PERMIT    0x06d6
+#define    RTL8367C_PORT10_PERMIT_OFFSET    10
+#define    RTL8367C_PORT10_PERMIT_MASK    0x400
+#define    RTL8367C_PORT9_PERMIT_OFFSET    9
+#define    RTL8367C_PORT9_PERMIT_MASK    0x200
+#define    RTL8367C_PORT8_PERMIT_OFFSET    8
+#define    RTL8367C_PORT8_PERMIT_MASK    0x100
+#define    RTL8367C_PORT7_PERMIT_OFFSET    7
+#define    RTL8367C_PORT7_PERMIT_MASK    0x80
+#define    RTL8367C_PORT6_PERMIT_OFFSET    6
+#define    RTL8367C_PORT6_PERMIT_MASK    0x40
+#define    RTL8367C_PORT5_PERMIT_OFFSET    5
+#define    RTL8367C_PORT5_PERMIT_MASK    0x20
+#define    RTL8367C_PORT4_PERMIT_OFFSET    4
+#define    RTL8367C_PORT4_PERMIT_MASK    0x10
+#define    RTL8367C_PORT3_PERMIT_OFFSET    3
+#define    RTL8367C_PORT3_PERMIT_MASK    0x8
+#define    RTL8367C_PORT2_PERMIT_OFFSET    2
+#define    RTL8367C_PORT2_PERMIT_MASK    0x4
+#define    RTL8367C_PORT1_PERMIT_OFFSET    1
+#define    RTL8367C_PORT1_PERMIT_MASK    0x2
+#define    RTL8367C_PORT0_PERMIT_OFFSET    0
+#define    RTL8367C_PORT0_PERMIT_MASK    0x1
+
+#define    RTL8367C_REG_ACL_GPIO_POLARITY    0x06d7
+#define    RTL8367C_ACL_GPIO_POLARITY_OFFSET    0
+#define    RTL8367C_ACL_GPIO_POLARITY_MASK    0x1
+
+#define    RTL8367C_REG_ACL_LOG_CNT_TYPE    0x06d8
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER15_TYPE_OFFSET    15
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER15_TYPE_MASK    0x8000
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER14_TYPE_OFFSET    14
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER14_TYPE_MASK    0x4000
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER13_TYPE_OFFSET    13
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER13_TYPE_MASK    0x2000
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER12_TYPE_OFFSET    12
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER12_TYPE_MASK    0x1000
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER11_TYPE_OFFSET    11
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER11_TYPE_MASK    0x800
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER10_TYPE_OFFSET    10
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER10_TYPE_MASK    0x400
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER9_TYPE_OFFSET    9
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER9_TYPE_MASK    0x200
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER8_TYPE_OFFSET    8
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER8_TYPE_MASK    0x100
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER7_TYPE_OFFSET    7
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER7_TYPE_MASK    0x80
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER6_TYPE_OFFSET    6
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER6_TYPE_MASK    0x40
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER5_TYPE_OFFSET    5
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER5_TYPE_MASK    0x20
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER4_TYPE_OFFSET    4
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER4_TYPE_MASK    0x10
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER3_TYPE_OFFSET    3
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER3_TYPE_MASK    0x8
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER2_TYPE_OFFSET    2
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER2_TYPE_MASK    0x4
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER1_TYPE_OFFSET    1
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER1_TYPE_MASK    0x2
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER0_TYPE_OFFSET    0
+#define    RTL8367C_ACL_LOG_CNT_TYPE_COUNTER0_TYPE_MASK    0x1
+
+#define    RTL8367C_REG_ACL_RESET_CFG    0x06d9
+#define    RTL8367C_ACL_RESET_CFG_OFFSET    0
+#define    RTL8367C_ACL_RESET_CFG_MASK    0x1
+
+#define    RTL8367C_REG_ACL_DUMMY00    0x06E0
+
+#define    RTL8367C_REG_ACL_DUMMY01    0x06E1
+
+#define    RTL8367C_REG_ACL_DUMMY02    0x06E2
+
+#define    RTL8367C_REG_ACL_DUMMY03    0x06E3
+
+#define    RTL8367C_REG_ACL_DUMMY04    0x06E4
+
+#define    RTL8367C_REG_ACL_DUMMY05    0x06E5
+
+#define    RTL8367C_REG_ACL_DUMMY06    0x06E6
+
+#define    RTL8367C_REG_ACL_DUMMY07    0x06E7
+
+#define    RTL8367C_REG_ACL_REASON_01    0x06E8
+#define    RTL8367C_ACL_ACT_1_OFFSET    8
+#define    RTL8367C_ACL_ACT_1_MASK    0xFF00
+#define    RTL8367C_ACL_ACT_0_OFFSET    0
+#define    RTL8367C_ACL_ACT_0_MASK    0xFF
+
+#define    RTL8367C_REG_ACL_REASON_23    0x06E9
+#define    RTL8367C_ACL_ACT_3_OFFSET    8
+#define    RTL8367C_ACL_ACT_3_MASK    0xFF00
+#define    RTL8367C_ACL_ACT_2_OFFSET    0
+#define    RTL8367C_ACL_ACT_2_MASK    0xFF
+
+#define    RTL8367C_REG_ACL_REASON_45    0x06EA
+#define    RTL8367C_ACL_ACT_5_OFFSET    8
+#define    RTL8367C_ACL_ACT_5_MASK    0xFF00
+#define    RTL8367C_ACL_ACT_4_OFFSET    0
+#define    RTL8367C_ACL_ACT_4_MASK    0xFF
+
+#define    RTL8367C_REG_ACL_ACCESS_MODE    0x06EB
+#define    RTL8367C_ACL_ACCESS_MODE_OFFSET    0
+#define    RTL8367C_ACL_ACCESS_MODE_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL32    0x06F0
+#define    RTL8367C_OP65_NOT_OFFSET    14
+#define    RTL8367C_OP65_NOT_MASK    0x4000
+#define    RTL8367C_ACT65_GPIO_OFFSET    13
+#define    RTL8367C_ACT65_GPIO_MASK    0x2000
+#define    RTL8367C_ACT65_FORWARD_OFFSET    12
+#define    RTL8367C_ACT65_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT65_POLICING_OFFSET    11
+#define    RTL8367C_ACT65_POLICING_MASK    0x800
+#define    RTL8367C_ACT65_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT65_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT65_SVID_OFFSET    9
+#define    RTL8367C_ACT65_SVID_MASK    0x200
+#define    RTL8367C_ACT65_CVID_OFFSET    8
+#define    RTL8367C_ACT65_CVID_MASK    0x100
+#define    RTL8367C_OP64_NOT_OFFSET    6
+#define    RTL8367C_OP64_NOT_MASK    0x40
+#define    RTL8367C_ACT64_GPIO_OFFSET    5
+#define    RTL8367C_ACT64_GPIO_MASK    0x20
+#define    RTL8367C_ACT64_FORWARD_OFFSET    4
+#define    RTL8367C_ACT64_FORWARD_MASK    0x10
+#define    RTL8367C_ACT64_POLICING_OFFSET    3
+#define    RTL8367C_ACT64_POLICING_MASK    0x8
+#define    RTL8367C_ACT64_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT64_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT64_SVID_OFFSET    1
+#define    RTL8367C_ACT64_SVID_MASK    0x2
+#define    RTL8367C_ACT64_CVID_OFFSET    0
+#define    RTL8367C_ACT64_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL33    0x06F1
+#define    RTL8367C_OP67_NOT_OFFSET    14
+#define    RTL8367C_OP67_NOT_MASK    0x4000
+#define    RTL8367C_ACT67_GPIO_OFFSET    13
+#define    RTL8367C_ACT67_GPIO_MASK    0x2000
+#define    RTL8367C_ACT67_FORWARD_OFFSET    12
+#define    RTL8367C_ACT67_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT67_POLICING_OFFSET    11
+#define    RTL8367C_ACT67_POLICING_MASK    0x800
+#define    RTL8367C_ACT67_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT67_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT67_SVID_OFFSET    9
+#define    RTL8367C_ACT67_SVID_MASK    0x200
+#define    RTL8367C_ACT67_CVID_OFFSET    8
+#define    RTL8367C_ACT67_CVID_MASK    0x100
+#define    RTL8367C_OP66_NOT_OFFSET    6
+#define    RTL8367C_OP66_NOT_MASK    0x40
+#define    RTL8367C_ACT66_GPIO_OFFSET    5
+#define    RTL8367C_ACT66_GPIO_MASK    0x20
+#define    RTL8367C_ACT66_FORWARD_OFFSET    4
+#define    RTL8367C_ACT66_FORWARD_MASK    0x10
+#define    RTL8367C_ACT66_POLICING_OFFSET    3
+#define    RTL8367C_ACT66_POLICING_MASK    0x8
+#define    RTL8367C_ACT66_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT66_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT66_SVID_OFFSET    1
+#define    RTL8367C_ACT66_SVID_MASK    0x2
+#define    RTL8367C_ACT66_CVID_OFFSET    0
+#define    RTL8367C_ACT66_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL34    0x06F2
+#define    RTL8367C_OP69_NOT_OFFSET    14
+#define    RTL8367C_OP69_NOT_MASK    0x4000
+#define    RTL8367C_ACT69_GPIO_OFFSET    13
+#define    RTL8367C_ACT69_GPIO_MASK    0x2000
+#define    RTL8367C_ACT69_FORWARD_OFFSET    12
+#define    RTL8367C_ACT69_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT69_POLICING_OFFSET    11
+#define    RTL8367C_ACT69_POLICING_MASK    0x800
+#define    RTL8367C_ACT69_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT69_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT69_SVID_OFFSET    9
+#define    RTL8367C_ACT69_SVID_MASK    0x200
+#define    RTL8367C_ACT69_CVID_OFFSET    8
+#define    RTL8367C_ACT69_CVID_MASK    0x100
+#define    RTL8367C_OP68_NOT_OFFSET    6
+#define    RTL8367C_OP68_NOT_MASK    0x40
+#define    RTL8367C_ACT68_GPIO_OFFSET    5
+#define    RTL8367C_ACT68_GPIO_MASK    0x20
+#define    RTL8367C_ACT68_FORWARD_OFFSET    4
+#define    RTL8367C_ACT68_FORWARD_MASK    0x10
+#define    RTL8367C_ACT68_POLICING_OFFSET    3
+#define    RTL8367C_ACT68_POLICING_MASK    0x8
+#define    RTL8367C_ACT68_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT68_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT68_SVID_OFFSET    1
+#define    RTL8367C_ACT68_SVID_MASK    0x2
+#define    RTL8367C_ACT68_CVID_OFFSET    0
+#define    RTL8367C_ACT68_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL35    0x06F3
+#define    RTL8367C_OP71_NOT_OFFSET    14
+#define    RTL8367C_OP71_NOT_MASK    0x4000
+#define    RTL8367C_ACT71_GPIO_OFFSET    13
+#define    RTL8367C_ACT71_GPIO_MASK    0x2000
+#define    RTL8367C_ACT71_FORWARD_OFFSET    12
+#define    RTL8367C_ACT71_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT71_POLICING_OFFSET    11
+#define    RTL8367C_ACT71_POLICING_MASK    0x800
+#define    RTL8367C_ACT71_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT71_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT71_SVID_OFFSET    9
+#define    RTL8367C_ACT71_SVID_MASK    0x200
+#define    RTL8367C_ACT71_CVID_OFFSET    8
+#define    RTL8367C_ACT71_CVID_MASK    0x100
+#define    RTL8367C_OP70_NOT_OFFSET    6
+#define    RTL8367C_OP70_NOT_MASK    0x40
+#define    RTL8367C_ACT70_GPIO_OFFSET    5
+#define    RTL8367C_ACT70_GPIO_MASK    0x20
+#define    RTL8367C_ACT70_FORWARD_OFFSET    4
+#define    RTL8367C_ACT70_FORWARD_MASK    0x10
+#define    RTL8367C_ACT70_POLICING_OFFSET    3
+#define    RTL8367C_ACT70_POLICING_MASK    0x8
+#define    RTL8367C_ACT70_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT70_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT70_SVID_OFFSET    1
+#define    RTL8367C_ACT70_SVID_MASK    0x2
+#define    RTL8367C_ACT70_CVID_OFFSET    0
+#define    RTL8367C_ACT70_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL36    0x06F4
+#define    RTL8367C_OP73_NOT_OFFSET    14
+#define    RTL8367C_OP73_NOT_MASK    0x4000
+#define    RTL8367C_ACT73_GPIO_OFFSET    13
+#define    RTL8367C_ACT73_GPIO_MASK    0x2000
+#define    RTL8367C_ACT73_FORWARD_OFFSET    12
+#define    RTL8367C_ACT73_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT73_POLICING_OFFSET    11
+#define    RTL8367C_ACT73_POLICING_MASK    0x800
+#define    RTL8367C_ACT73_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT73_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT73_SVID_OFFSET    9
+#define    RTL8367C_ACT73_SVID_MASK    0x200
+#define    RTL8367C_ACT73_CVID_OFFSET    8
+#define    RTL8367C_ACT73_CVID_MASK    0x100
+#define    RTL8367C_OP72_NOT_OFFSET    6
+#define    RTL8367C_OP72_NOT_MASK    0x40
+#define    RTL8367C_ACT72_GPIO_OFFSET    5
+#define    RTL8367C_ACT72_GPIO_MASK    0x20
+#define    RTL8367C_ACT72_FORWARD_OFFSET    4
+#define    RTL8367C_ACT72_FORWARD_MASK    0x10
+#define    RTL8367C_ACT72_POLICING_OFFSET    3
+#define    RTL8367C_ACT72_POLICING_MASK    0x8
+#define    RTL8367C_ACT72_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT72_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT72_SVID_OFFSET    1
+#define    RTL8367C_ACT72_SVID_MASK    0x2
+#define    RTL8367C_ACT72_CVID_OFFSET    0
+#define    RTL8367C_ACT72_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL37    0x06F5
+#define    RTL8367C_OP75_NOT_OFFSET    14
+#define    RTL8367C_OP75_NOT_MASK    0x4000
+#define    RTL8367C_ACT75_GPIO_OFFSET    13
+#define    RTL8367C_ACT75_GPIO_MASK    0x2000
+#define    RTL8367C_ACT75_FORWARD_OFFSET    12
+#define    RTL8367C_ACT75_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT75_POLICING_OFFSET    11
+#define    RTL8367C_ACT75_POLICING_MASK    0x800
+#define    RTL8367C_ACT75_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT75_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT75_SVID_OFFSET    9
+#define    RTL8367C_ACT75_SVID_MASK    0x200
+#define    RTL8367C_ACT75_CVID_OFFSET    8
+#define    RTL8367C_ACT75_CVID_MASK    0x100
+#define    RTL8367C_OP74_NOT_OFFSET    6
+#define    RTL8367C_OP74_NOT_MASK    0x40
+#define    RTL8367C_ACT74_GPIO_OFFSET    5
+#define    RTL8367C_ACT74_GPIO_MASK    0x20
+#define    RTL8367C_ACT74_FORWARD_OFFSET    4
+#define    RTL8367C_ACT74_FORWARD_MASK    0x10
+#define    RTL8367C_ACT74_POLICING_OFFSET    3
+#define    RTL8367C_ACT74_POLICING_MASK    0x8
+#define    RTL8367C_ACT74_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT74_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT74_SVID_OFFSET    1
+#define    RTL8367C_ACT74_SVID_MASK    0x2
+#define    RTL8367C_ACT74_CVID_OFFSET    0
+#define    RTL8367C_ACT74_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL38    0x06F6
+#define    RTL8367C_OP77_NOT_OFFSET    14
+#define    RTL8367C_OP77_NOT_MASK    0x4000
+#define    RTL8367C_ACT77_GPIO_OFFSET    13
+#define    RTL8367C_ACT77_GPIO_MASK    0x2000
+#define    RTL8367C_ACT77_FORWARD_OFFSET    12
+#define    RTL8367C_ACT77_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT77_POLICING_OFFSET    11
+#define    RTL8367C_ACT77_POLICING_MASK    0x800
+#define    RTL8367C_ACT77_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT77_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT77_SVID_OFFSET    9
+#define    RTL8367C_ACT77_SVID_MASK    0x200
+#define    RTL8367C_ACT77_CVID_OFFSET    8
+#define    RTL8367C_ACT77_CVID_MASK    0x100
+#define    RTL8367C_OP76_NOT_OFFSET    6
+#define    RTL8367C_OP76_NOT_MASK    0x40
+#define    RTL8367C_ACT76_GPIO_OFFSET    5
+#define    RTL8367C_ACT76_GPIO_MASK    0x20
+#define    RTL8367C_ACT76_FORWARD_OFFSET    4
+#define    RTL8367C_ACT76_FORWARD_MASK    0x10
+#define    RTL8367C_ACT76_POLICING_OFFSET    3
+#define    RTL8367C_ACT76_POLICING_MASK    0x8
+#define    RTL8367C_ACT76_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT76_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT76_SVID_OFFSET    1
+#define    RTL8367C_ACT76_SVID_MASK    0x2
+#define    RTL8367C_ACT76_CVID_OFFSET    0
+#define    RTL8367C_ACT76_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL39    0x06F7
+#define    RTL8367C_OP79_NOT_OFFSET    14
+#define    RTL8367C_OP79_NOT_MASK    0x4000
+#define    RTL8367C_ACT79_GPIO_OFFSET    13
+#define    RTL8367C_ACT79_GPIO_MASK    0x2000
+#define    RTL8367C_ACT79_FORWARD_OFFSET    12
+#define    RTL8367C_ACT79_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT79_POLICING_OFFSET    11
+#define    RTL8367C_ACT79_POLICING_MASK    0x800
+#define    RTL8367C_ACT79_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT79_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT79_SVID_OFFSET    9
+#define    RTL8367C_ACT79_SVID_MASK    0x200
+#define    RTL8367C_ACT79_CVID_OFFSET    8
+#define    RTL8367C_ACT79_CVID_MASK    0x100
+#define    RTL8367C_OP78_NOT_OFFSET    6
+#define    RTL8367C_OP78_NOT_MASK    0x40
+#define    RTL8367C_ACT78_GPIO_OFFSET    5
+#define    RTL8367C_ACT78_GPIO_MASK    0x20
+#define    RTL8367C_ACT78_FORWARD_OFFSET    4
+#define    RTL8367C_ACT78_FORWARD_MASK    0x10
+#define    RTL8367C_ACT78_POLICING_OFFSET    3
+#define    RTL8367C_ACT78_POLICING_MASK    0x8
+#define    RTL8367C_ACT78_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT78_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT78_SVID_OFFSET    1
+#define    RTL8367C_ACT78_SVID_MASK    0x2
+#define    RTL8367C_ACT78_CVID_OFFSET    0
+#define    RTL8367C_ACT78_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL40    0x06F8
+#define    RTL8367C_OP81_NOT_OFFSET    14
+#define    RTL8367C_OP81_NOT_MASK    0x4000
+#define    RTL8367C_ACT81_GPIO_OFFSET    13
+#define    RTL8367C_ACT81_GPIO_MASK    0x2000
+#define    RTL8367C_ACT81_FORWARD_OFFSET    12
+#define    RTL8367C_ACT81_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT81_POLICING_OFFSET    11
+#define    RTL8367C_ACT81_POLICING_MASK    0x800
+#define    RTL8367C_ACT81_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT81_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT81_SVID_OFFSET    9
+#define    RTL8367C_ACT81_SVID_MASK    0x200
+#define    RTL8367C_ACT81_CVID_OFFSET    8
+#define    RTL8367C_ACT81_CVID_MASK    0x100
+#define    RTL8367C_OP80_NOT_OFFSET    6
+#define    RTL8367C_OP80_NOT_MASK    0x40
+#define    RTL8367C_ACT80_GPIO_OFFSET    5
+#define    RTL8367C_ACT80_GPIO_MASK    0x20
+#define    RTL8367C_ACT80_FORWARD_OFFSET    4
+#define    RTL8367C_ACT80_FORWARD_MASK    0x10
+#define    RTL8367C_ACT80_POLICING_OFFSET    3
+#define    RTL8367C_ACT80_POLICING_MASK    0x8
+#define    RTL8367C_ACT80_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT80_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT80_SVID_OFFSET    1
+#define    RTL8367C_ACT80_SVID_MASK    0x2
+#define    RTL8367C_ACT80_CVID_OFFSET    0
+#define    RTL8367C_ACT80_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL41    0x06F9
+#define    RTL8367C_OP83_NOT_OFFSET    14
+#define    RTL8367C_OP83_NOT_MASK    0x4000
+#define    RTL8367C_ACT83_GPIO_OFFSET    13
+#define    RTL8367C_ACT83_GPIO_MASK    0x2000
+#define    RTL8367C_ACT83_FORWARD_OFFSET    12
+#define    RTL8367C_ACT83_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT83_POLICING_OFFSET    11
+#define    RTL8367C_ACT83_POLICING_MASK    0x800
+#define    RTL8367C_ACT83_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT83_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT83_SVID_OFFSET    9
+#define    RTL8367C_ACT83_SVID_MASK    0x200
+#define    RTL8367C_ACT83_CVID_OFFSET    8
+#define    RTL8367C_ACT83_CVID_MASK    0x100
+#define    RTL8367C_OP82_NOT_OFFSET    6
+#define    RTL8367C_OP82_NOT_MASK    0x40
+#define    RTL8367C_ACT82_GPIO_OFFSET    5
+#define    RTL8367C_ACT82_GPIO_MASK    0x20
+#define    RTL8367C_ACT82_FORWARD_OFFSET    4
+#define    RTL8367C_ACT82_FORWARD_MASK    0x10
+#define    RTL8367C_ACT82_POLICING_OFFSET    3
+#define    RTL8367C_ACT82_POLICING_MASK    0x8
+#define    RTL8367C_ACT82_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT82_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT82_SVID_OFFSET    1
+#define    RTL8367C_ACT82_SVID_MASK    0x2
+#define    RTL8367C_ACT82_CVID_OFFSET    0
+#define    RTL8367C_ACT82_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL42    0x06FA
+#define    RTL8367C_OP85_NOT_OFFSET    14
+#define    RTL8367C_OP85_NOT_MASK    0x4000
+#define    RTL8367C_ACT85_GPIO_OFFSET    13
+#define    RTL8367C_ACT85_GPIO_MASK    0x2000
+#define    RTL8367C_ACT85_FORWARD_OFFSET    12
+#define    RTL8367C_ACT85_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT85_POLICING_OFFSET    11
+#define    RTL8367C_ACT85_POLICING_MASK    0x800
+#define    RTL8367C_ACT85_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT85_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT85_SVID_OFFSET    9
+#define    RTL8367C_ACT85_SVID_MASK    0x200
+#define    RTL8367C_ACT85_CVID_OFFSET    8
+#define    RTL8367C_ACT85_CVID_MASK    0x100
+#define    RTL8367C_OP84_NOT_OFFSET    6
+#define    RTL8367C_OP84_NOT_MASK    0x40
+#define    RTL8367C_ACT84_GPIO_OFFSET    5
+#define    RTL8367C_ACT84_GPIO_MASK    0x20
+#define    RTL8367C_ACT84_FORWARD_OFFSET    4
+#define    RTL8367C_ACT84_FORWARD_MASK    0x10
+#define    RTL8367C_ACT84_POLICING_OFFSET    3
+#define    RTL8367C_ACT84_POLICING_MASK    0x8
+#define    RTL8367C_ACT84_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT84_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT84_SVID_OFFSET    1
+#define    RTL8367C_ACT84_SVID_MASK    0x2
+#define    RTL8367C_ACT84_CVID_OFFSET    0
+#define    RTL8367C_ACT84_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL43    0x06FB
+#define    RTL8367C_OP87_NOT_OFFSET    14
+#define    RTL8367C_OP87_NOT_MASK    0x4000
+#define    RTL8367C_ACT87_GPIO_OFFSET    13
+#define    RTL8367C_ACT87_GPIO_MASK    0x2000
+#define    RTL8367C_ACT87_FORWARD_OFFSET    12
+#define    RTL8367C_ACT87_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT87_POLICING_OFFSET    11
+#define    RTL8367C_ACT87_POLICING_MASK    0x800
+#define    RTL8367C_ACT87_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT87_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT87_SVID_OFFSET    9
+#define    RTL8367C_ACT87_SVID_MASK    0x200
+#define    RTL8367C_ACT87_CVID_OFFSET    8
+#define    RTL8367C_ACT87_CVID_MASK    0x100
+#define    RTL8367C_OP86_NOT_OFFSET    6
+#define    RTL8367C_OP86_NOT_MASK    0x40
+#define    RTL8367C_ACT86_GPIO_OFFSET    5
+#define    RTL8367C_ACT86_GPIO_MASK    0x20
+#define    RTL8367C_ACT86_FORWARD_OFFSET    4
+#define    RTL8367C_ACT86_FORWARD_MASK    0x10
+#define    RTL8367C_ACT86_POLICING_OFFSET    3
+#define    RTL8367C_ACT86_POLICING_MASK    0x8
+#define    RTL8367C_ACT86_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT86_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT86_SVID_OFFSET    1
+#define    RTL8367C_ACT86_SVID_MASK    0x2
+#define    RTL8367C_ACT86_CVID_OFFSET    0
+#define    RTL8367C_ACT86_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL44    0x06FC
+#define    RTL8367C_OP89_NOT_OFFSET    14
+#define    RTL8367C_OP89_NOT_MASK    0x4000
+#define    RTL8367C_ACT89_GPIO_OFFSET    13
+#define    RTL8367C_ACT89_GPIO_MASK    0x2000
+#define    RTL8367C_ACT89_FORWARD_OFFSET    12
+#define    RTL8367C_ACT89_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT89_POLICING_OFFSET    11
+#define    RTL8367C_ACT89_POLICING_MASK    0x800
+#define    RTL8367C_ACT89_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT89_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT89_SVID_OFFSET    9
+#define    RTL8367C_ACT89_SVID_MASK    0x200
+#define    RTL8367C_ACT89_CVID_OFFSET    8
+#define    RTL8367C_ACT89_CVID_MASK    0x100
+#define    RTL8367C_OP88_NOT_OFFSET    6
+#define    RTL8367C_OP88_NOT_MASK    0x40
+#define    RTL8367C_ACT88_GPIO_OFFSET    5
+#define    RTL8367C_ACT88_GPIO_MASK    0x20
+#define    RTL8367C_ACT88_FORWARD_OFFSET    4
+#define    RTL8367C_ACT88_FORWARD_MASK    0x10
+#define    RTL8367C_ACT88_POLICING_OFFSET    3
+#define    RTL8367C_ACT88_POLICING_MASK    0x8
+#define    RTL8367C_ACT88_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT88_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT88_SVID_OFFSET    1
+#define    RTL8367C_ACT88_SVID_MASK    0x2
+#define    RTL8367C_ACT88_CVID_OFFSET    0
+#define    RTL8367C_ACT88_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL45    0x06FD
+#define    RTL8367C_OP91_NOT_OFFSET    14
+#define    RTL8367C_OP91_NOT_MASK    0x4000
+#define    RTL8367C_ACT91_GPIO_OFFSET    13
+#define    RTL8367C_ACT91_GPIO_MASK    0x2000
+#define    RTL8367C_ACT91_FORWARD_OFFSET    12
+#define    RTL8367C_ACT91_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT91_POLICING_OFFSET    11
+#define    RTL8367C_ACT91_POLICING_MASK    0x800
+#define    RTL8367C_ACT91_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT91_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT91_SVID_OFFSET    9
+#define    RTL8367C_ACT91_SVID_MASK    0x200
+#define    RTL8367C_ACT91_CVID_OFFSET    8
+#define    RTL8367C_ACT91_CVID_MASK    0x100
+#define    RTL8367C_OP90_NOT_OFFSET    6
+#define    RTL8367C_OP90_NOT_MASK    0x40
+#define    RTL8367C_ACT90_GPIO_OFFSET    5
+#define    RTL8367C_ACT90_GPIO_MASK    0x20
+#define    RTL8367C_ACT90_FORWARD_OFFSET    4
+#define    RTL8367C_ACT90_FORWARD_MASK    0x10
+#define    RTL8367C_ACT90_POLICING_OFFSET    3
+#define    RTL8367C_ACT90_POLICING_MASK    0x8
+#define    RTL8367C_ACT90_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT90_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT90_SVID_OFFSET    1
+#define    RTL8367C_ACT90_SVID_MASK    0x2
+#define    RTL8367C_ACT90_CVID_OFFSET    0
+#define    RTL8367C_ACT90_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL46    0x06FE
+#define    RTL8367C_OP93_NOT_OFFSET    14
+#define    RTL8367C_OP93_NOT_MASK    0x4000
+#define    RTL8367C_ACT93_GPIO_OFFSET    13
+#define    RTL8367C_ACT93_GPIO_MASK    0x2000
+#define    RTL8367C_ACT93_FORWARD_OFFSET    12
+#define    RTL8367C_ACT93_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT93_POLICING_OFFSET    11
+#define    RTL8367C_ACT93_POLICING_MASK    0x800
+#define    RTL8367C_ACT93_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT93_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT93_SVID_OFFSET    9
+#define    RTL8367C_ACT93_SVID_MASK    0x200
+#define    RTL8367C_ACT93_CVID_OFFSET    8
+#define    RTL8367C_ACT93_CVID_MASK    0x100
+#define    RTL8367C_OP92_NOT_OFFSET    6
+#define    RTL8367C_OP92_NOT_MASK    0x40
+#define    RTL8367C_ACT92_GPIO_OFFSET    5
+#define    RTL8367C_ACT92_GPIO_MASK    0x20
+#define    RTL8367C_ACT92_FORWARD_OFFSET    4
+#define    RTL8367C_ACT92_FORWARD_MASK    0x10
+#define    RTL8367C_ACT92_POLICING_OFFSET    3
+#define    RTL8367C_ACT92_POLICING_MASK    0x8
+#define    RTL8367C_ACT92_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT92_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT92_SVID_OFFSET    1
+#define    RTL8367C_ACT92_SVID_MASK    0x2
+#define    RTL8367C_ACT92_CVID_OFFSET    0
+#define    RTL8367C_ACT92_CVID_MASK    0x1
+
+#define    RTL8367C_REG_ACL_ACTION_CTRL47    0x06FF
+#define    RTL8367C_OP95_NOT_OFFSET    14
+#define    RTL8367C_OP95_NOT_MASK    0x4000
+#define    RTL8367C_ACT95_GPIO_OFFSET    13
+#define    RTL8367C_ACT95_GPIO_MASK    0x2000
+#define    RTL8367C_ACT95_FORWARD_OFFSET    12
+#define    RTL8367C_ACT95_FORWARD_MASK    0x1000
+#define    RTL8367C_ACT95_POLICING_OFFSET    11
+#define    RTL8367C_ACT95_POLICING_MASK    0x800
+#define    RTL8367C_ACT95_PRIORITY_OFFSET    10
+#define    RTL8367C_ACT95_PRIORITY_MASK    0x400
+#define    RTL8367C_ACT95_SVID_OFFSET    9
+#define    RTL8367C_ACT95_SVID_MASK    0x200
+#define    RTL8367C_ACT95_CVID_OFFSET    8
+#define    RTL8367C_ACT95_CVID_MASK    0x100
+#define    RTL8367C_OP94_NOT_OFFSET    6
+#define    RTL8367C_OP94_NOT_MASK    0x40
+#define    RTL8367C_ACT94_GPIO_OFFSET    5
+#define    RTL8367C_ACT94_GPIO_MASK    0x20
+#define    RTL8367C_ACT94_FORWARD_OFFSET    4
+#define    RTL8367C_ACT94_FORWARD_MASK    0x10
+#define    RTL8367C_ACT94_POLICING_OFFSET    3
+#define    RTL8367C_ACT94_POLICING_MASK    0x8
+#define    RTL8367C_ACT94_PRIORITY_OFFSET    2
+#define    RTL8367C_ACT94_PRIORITY_MASK    0x4
+#define    RTL8367C_ACT94_SVID_OFFSET    1
+#define    RTL8367C_ACT94_SVID_MASK    0x2
+#define    RTL8367C_ACT94_CVID_OFFSET    0
+#define    RTL8367C_ACT94_CVID_MASK    0x1
+
+/* (16'h0700)cvlan_reg */
+
+#define    RTL8367C_REG_VLAN_PVID_CTRL0    0x0700
+#define    RTL8367C_PORT1_VIDX_OFFSET    8
+#define    RTL8367C_PORT1_VIDX_MASK    0x1F00
+#define    RTL8367C_PORT0_VIDX_OFFSET    0
+#define    RTL8367C_PORT0_VIDX_MASK    0x1F
+
+#define    RTL8367C_REG_VLAN_PVID_CTRL1    0x0701
+#define    RTL8367C_PORT3_VIDX_OFFSET    8
+#define    RTL8367C_PORT3_VIDX_MASK    0x1F00
+#define    RTL8367C_PORT2_VIDX_OFFSET    0
+#define    RTL8367C_PORT2_VIDX_MASK    0x1F
+
+#define    RTL8367C_REG_VLAN_PVID_CTRL2    0x0702
+#define    RTL8367C_PORT5_VIDX_OFFSET    8
+#define    RTL8367C_PORT5_VIDX_MASK    0x1F00
+#define    RTL8367C_PORT4_VIDX_OFFSET    0
+#define    RTL8367C_PORT4_VIDX_MASK    0x1F
+
+#define    RTL8367C_REG_VLAN_PVID_CTRL3    0x0703
+#define    RTL8367C_PORT7_VIDX_OFFSET    8
+#define    RTL8367C_PORT7_VIDX_MASK    0x1F00
+#define    RTL8367C_PORT6_VIDX_OFFSET    0
+#define    RTL8367C_PORT6_VIDX_MASK    0x1F
+
+#define    RTL8367C_REG_VLAN_PVID_CTRL4    0x0704
+#define    RTL8367C_PORT9_VIDX_OFFSET    8
+#define    RTL8367C_PORT9_VIDX_MASK    0x1F00
+#define    RTL8367C_PORT8_VIDX_OFFSET    0
+#define    RTL8367C_PORT8_VIDX_MASK    0x1F
+
+#define    RTL8367C_REG_VLAN_PVID_CTRL5    0x0705
+#define    RTL8367C_VLAN_PVID_CTRL5_OFFSET    0
+#define    RTL8367C_VLAN_PVID_CTRL5_MASK    0x1F
+
+#define    RTL8367C_REG_VLAN_PPB0_VALID    0x0708
+#define    RTL8367C_VLAN_PPB0_VALID_VALID_EXT_OFFSET    8
+#define    RTL8367C_VLAN_PPB0_VALID_VALID_EXT_MASK    0x700
+#define    RTL8367C_VLAN_PPB0_VALID_VALID_OFFSET    0
+#define    RTL8367C_VLAN_PPB0_VALID_VALID_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_PPB0_CTRL0    0x0709
+#define    RTL8367C_VLAN_PPB0_CTRL0_PORT2_INDEX_OFFSET    10
+#define    RTL8367C_VLAN_PPB0_CTRL0_PORT2_INDEX_MASK    0x7C00
+#define    RTL8367C_VLAN_PPB0_CTRL0_PORT1_INDEX_OFFSET    5
+#define    RTL8367C_VLAN_PPB0_CTRL0_PORT1_INDEX_MASK    0x3E0
+#define    RTL8367C_VLAN_PPB0_CTRL0_PORT0_INDEX_OFFSET    0
+#define    RTL8367C_VLAN_PPB0_CTRL0_PORT0_INDEX_MASK    0x1F
+
+#define    RTL8367C_REG_VLAN_PPB0_CTRL1    0x070a
+#define    RTL8367C_VLAN_PPB0_CTRL1_PORT5_INDEX_OFFSET    10
+#define    RTL8367C_VLAN_PPB0_CTRL1_PORT5_INDEX_MASK    0x7C00
+#define    RTL8367C_VLAN_PPB0_CTRL1_PORT4_INDEX_OFFSET    5
+#define    RTL8367C_VLAN_PPB0_CTRL1_PORT4_INDEX_MASK    0x3E0
+#define    RTL8367C_VLAN_PPB0_CTRL1_PORT3_INDEX_OFFSET    0
+#define    RTL8367C_VLAN_PPB0_CTRL1_PORT3_INDEX_MASK    0x1F
+
+#define    RTL8367C_REG_VLAN_PPB0_CTRL2    0x070b
+#define    RTL8367C_VLAN_PPB0_CTRL2_FRAME_TYPE_OFFSET    10
+#define    RTL8367C_VLAN_PPB0_CTRL2_FRAME_TYPE_MASK    0xC00
+#define    RTL8367C_VLAN_PPB0_CTRL2_PORT7_INDEX_OFFSET    5
+#define    RTL8367C_VLAN_PPB0_CTRL2_PORT7_INDEX_MASK    0x3E0
+#define    RTL8367C_VLAN_PPB0_CTRL2_PORT6_INDEX_OFFSET    0
+#define    RTL8367C_VLAN_PPB0_CTRL2_PORT6_INDEX_MASK    0x1F
+
+#define    RTL8367C_REG_VLAN_PPB0_CTRL4    0x070c
+#define    RTL8367C_VLAN_PPB0_CTRL4_PORT10_INDEX_OFFSET    10
+#define    RTL8367C_VLAN_PPB0_CTRL4_PORT10_INDEX_MASK    0x7C00
+#define    RTL8367C_VLAN_PPB0_CTRL4_PORT9_INDEX_OFFSET    5
+#define    RTL8367C_VLAN_PPB0_CTRL4_PORT9_INDEX_MASK    0x3E0
+#define    RTL8367C_VLAN_PPB0_CTRL4_PORT8_INDEX_OFFSET    0
+#define    RTL8367C_VLAN_PPB0_CTRL4_PORT8_INDEX_MASK    0x1F
+
+#define    RTL8367C_REG_VLAN_PPB0_CTRL3    0x070f
+
+#define    RTL8367C_REG_VLAN_PPB1_VALID    0x0710
+#define    RTL8367C_VLAN_PPB1_VALID_VALID_EXT_OFFSET    8
+#define    RTL8367C_VLAN_PPB1_VALID_VALID_EXT_MASK    0x700
+#define    RTL8367C_VLAN_PPB1_VALID_VALID_OFFSET    0
+#define    RTL8367C_VLAN_PPB1_VALID_VALID_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_PPB1_CTRL0    0x0711
+#define    RTL8367C_VLAN_PPB1_CTRL0_PORT2_INDEX_OFFSET    10
+#define    RTL8367C_VLAN_PPB1_CTRL0_PORT2_INDEX_MASK    0x7C00
+#define    RTL8367C_VLAN_PPB1_CTRL0_PORT1_INDEX_OFFSET    5
+#define    RTL8367C_VLAN_PPB1_CTRL0_PORT1_INDEX_MASK    0x3E0
+#define    RTL8367C_VLAN_PPB1_CTRL0_PORT0_INDEX_OFFSET    0
+#define    RTL8367C_VLAN_PPB1_CTRL0_PORT0_INDEX_MASK    0x1F
+
+#define    RTL8367C_REG_VLAN_PPB1_CTRL1    0x0712
+#define    RTL8367C_VLAN_PPB1_CTRL1_PORT5_INDEX_OFFSET    10
+#define    RTL8367C_VLAN_PPB1_CTRL1_PORT5_INDEX_MASK    0x7C00
+#define    RTL8367C_VLAN_PPB1_CTRL1_PORT4_INDEX_OFFSET    5
+#define    RTL8367C_VLAN_PPB1_CTRL1_PORT4_INDEX_MASK    0x3E0
+#define    RTL8367C_VLAN_PPB1_CTRL1_PORT3_INDEX_OFFSET    0
+#define    RTL8367C_VLAN_PPB1_CTRL1_PORT3_INDEX_MASK    0x1F
+
+#define    RTL8367C_REG_VLAN_PPB1_CTRL2    0x0713
+#define    RTL8367C_VLAN_PPB1_CTRL2_FRAME_TYPE_OFFSET    10
+#define    RTL8367C_VLAN_PPB1_CTRL2_FRAME_TYPE_MASK    0xC00
+#define    RTL8367C_VLAN_PPB1_CTRL2_PORT7_INDEX_OFFSET    5
+#define    RTL8367C_VLAN_PPB1_CTRL2_PORT7_INDEX_MASK    0x3E0
+#define    RTL8367C_VLAN_PPB1_CTRL2_PORT6_INDEX_OFFSET    0
+#define    RTL8367C_VLAN_PPB1_CTRL2_PORT6_INDEX_MASK    0x1F
+
+#define    RTL8367C_REG_VLAN_PPB1_CTRL4    0x0714
+#define    RTL8367C_VLAN_PPB1_CTRL4_PORT10_INDEX_OFFSET    10
+#define    RTL8367C_VLAN_PPB1_CTRL4_PORT10_INDEX_MASK    0x7C00
+#define    RTL8367C_VLAN_PPB1_CTRL4_PORT9_INDEX_OFFSET    5
+#define    RTL8367C_VLAN_PPB1_CTRL4_PORT9_INDEX_MASK    0x3E0
+#define    RTL8367C_VLAN_PPB1_CTRL4_PORT8_INDEX_OFFSET    0
+#define    RTL8367C_VLAN_PPB1_CTRL4_PORT8_INDEX_MASK    0x1F
+
+#define    RTL8367C_REG_VLAN_PPB1_CTRL3    0x0717
+
+#define    RTL8367C_REG_VLAN_PPB2_VALID    0x0718
+#define    RTL8367C_VLAN_PPB2_VALID_VALID_EXT_OFFSET    8
+#define    RTL8367C_VLAN_PPB2_VALID_VALID_EXT_MASK    0x700
+#define    RTL8367C_VLAN_PPB2_VALID_VALID_OFFSET    0
+#define    RTL8367C_VLAN_PPB2_VALID_VALID_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_PPB2_CTRL0    0x0719
+#define    RTL8367C_VLAN_PPB2_CTRL0_PORT2_INDEX_OFFSET    10
+#define    RTL8367C_VLAN_PPB2_CTRL0_PORT2_INDEX_MASK    0x7C00
+#define    RTL8367C_VLAN_PPB2_CTRL0_PORT1_INDEX_OFFSET    5
+#define    RTL8367C_VLAN_PPB2_CTRL0_PORT1_INDEX_MASK    0x3E0
+#define    RTL8367C_VLAN_PPB2_CTRL0_PORT0_INDEX_OFFSET    0
+#define    RTL8367C_VLAN_PPB2_CTRL0_PORT0_INDEX_MASK    0x1F
+
+#define    RTL8367C_REG_VLAN_PPB2_CTRL1    0x071a
+#define    RTL8367C_VLAN_PPB2_CTRL1_PORT5_INDEX_OFFSET    10
+#define    RTL8367C_VLAN_PPB2_CTRL1_PORT5_INDEX_MASK    0x7C00
+#define    RTL8367C_VLAN_PPB2_CTRL1_PORT4_INDEX_OFFSET    5
+#define    RTL8367C_VLAN_PPB2_CTRL1_PORT4_INDEX_MASK    0x3E0
+#define    RTL8367C_VLAN_PPB2_CTRL1_PORT3_INDEX_OFFSET    0
+#define    RTL8367C_VLAN_PPB2_CTRL1_PORT3_INDEX_MASK    0x1F
+
+#define    RTL8367C_REG_VLAN_PPB2_CTRL2    0x071b
+#define    RTL8367C_VLAN_PPB2_CTRL2_FRAME_TYPE_OFFSET    10
+#define    RTL8367C_VLAN_PPB2_CTRL2_FRAME_TYPE_MASK    0xC00
+#define    RTL8367C_VLAN_PPB2_CTRL2_PORT7_INDEX_OFFSET    5
+#define    RTL8367C_VLAN_PPB2_CTRL2_PORT7_INDEX_MASK    0x3E0
+#define    RTL8367C_VLAN_PPB2_CTRL2_PORT6_INDEX_OFFSET    0
+#define    RTL8367C_VLAN_PPB2_CTRL2_PORT6_INDEX_MASK    0x1F
+
+#define    RTL8367C_REG_VLAN_PPB2_CTRL4    0x071c
+#define    RTL8367C_VLAN_PPB2_CTRL4_PORT10_INDEX_OFFSET    10
+#define    RTL8367C_VLAN_PPB2_CTRL4_PORT10_INDEX_MASK    0x7C00
+#define    RTL8367C_VLAN_PPB2_CTRL4_PORT9_INDEX_OFFSET    5
+#define    RTL8367C_VLAN_PPB2_CTRL4_PORT9_INDEX_MASK    0x3E0
+#define    RTL8367C_VLAN_PPB2_CTRL4_PORT8_INDEX_OFFSET    0
+#define    RTL8367C_VLAN_PPB2_CTRL4_PORT8_INDEX_MASK    0x1F
+
+#define    RTL8367C_REG_VLAN_PPB2_CTRL3    0x071f
+
+#define    RTL8367C_REG_VLAN_PPB3_VALID    0x0720
+#define    RTL8367C_VLAN_PPB3_VALID_VALID_EXT_OFFSET    8
+#define    RTL8367C_VLAN_PPB3_VALID_VALID_EXT_MASK    0x700
+#define    RTL8367C_VLAN_PPB3_VALID_VALID_OFFSET    0
+#define    RTL8367C_VLAN_PPB3_VALID_VALID_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_PPB3_CTRL0    0x0721
+#define    RTL8367C_VLAN_PPB3_CTRL0_PORT2_INDEX_OFFSET    10
+#define    RTL8367C_VLAN_PPB3_CTRL0_PORT2_INDEX_MASK    0x7C00
+#define    RTL8367C_VLAN_PPB3_CTRL0_PORT1_INDEX_OFFSET    5
+#define    RTL8367C_VLAN_PPB3_CTRL0_PORT1_INDEX_MASK    0x3E0
+#define    RTL8367C_VLAN_PPB3_CTRL0_PORT0_INDEX_OFFSET    0
+#define    RTL8367C_VLAN_PPB3_CTRL0_PORT0_INDEX_MASK    0x1F
+
+#define    RTL8367C_REG_VLAN_PPB3_CTRL1    0x0722
+#define    RTL8367C_VLAN_PPB3_CTRL1_PORT5_INDEX_OFFSET    10
+#define    RTL8367C_VLAN_PPB3_CTRL1_PORT5_INDEX_MASK    0x7C00
+#define    RTL8367C_VLAN_PPB3_CTRL1_PORT4_INDEX_OFFSET    5
+#define    RTL8367C_VLAN_PPB3_CTRL1_PORT4_INDEX_MASK    0x3E0
+#define    RTL8367C_VLAN_PPB3_CTRL1_PORT3_INDEX_OFFSET    0
+#define    RTL8367C_VLAN_PPB3_CTRL1_PORT3_INDEX_MASK    0x1F
+
+#define    RTL8367C_REG_VLAN_PPB3_CTRL2    0x0723
+#define    RTL8367C_VLAN_PPB3_CTRL2_FRAME_TYPE_OFFSET    10
+#define    RTL8367C_VLAN_PPB3_CTRL2_FRAME_TYPE_MASK    0xC00
+#define    RTL8367C_VLAN_PPB3_CTRL2_PORT7_INDEX_OFFSET    5
+#define    RTL8367C_VLAN_PPB3_CTRL2_PORT7_INDEX_MASK    0x3E0
+#define    RTL8367C_VLAN_PPB3_CTRL2_PORT6_INDEX_OFFSET    0
+#define    RTL8367C_VLAN_PPB3_CTRL2_PORT6_INDEX_MASK    0x1F
+
+#define    RTL8367C_REG_VLAN_PPB3_CTRL4    0x0724
+#define    RTL8367C_VLAN_PPB3_CTRL4_PORT10_INDEX_OFFSET    10
+#define    RTL8367C_VLAN_PPB3_CTRL4_PORT10_INDEX_MASK    0x7C00
+#define    RTL8367C_VLAN_PPB3_CTRL4_PORT9_INDEX_OFFSET    5
+#define    RTL8367C_VLAN_PPB3_CTRL4_PORT9_INDEX_MASK    0x3E0
+#define    RTL8367C_VLAN_PPB3_CTRL4_PORT8_INDEX_OFFSET    0
+#define    RTL8367C_VLAN_PPB3_CTRL4_PORT8_INDEX_MASK    0x1F
+
+#define    RTL8367C_REG_VLAN_PPB3_CTRL3    0x0727
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION0_CTRL0    0x0728
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION0_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION0_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION0_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION0_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION0_CTRL1    0x0729
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION0_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION0_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION0_CTRL2    0x072a
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION0_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION0_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION0_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION0_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION0_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION0_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION0_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION0_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION0_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION0_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION0_CTRL3    0x072b
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION0_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION0_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION1_CTRL0    0x072c
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION1_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION1_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION1_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION1_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION1_CTRL1    0x072d
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION1_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION1_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION1_CTRL2    0x072e
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION1_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION1_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION1_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION1_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION1_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION1_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION1_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION1_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION1_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION1_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION1_CTRL3    0x072f
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION1_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION1_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION2_CTRL0    0x0730
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION2_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION2_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION2_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION2_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION2_CTRL1    0x0731
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION2_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION2_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION2_CTRL2    0x0732
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION2_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION2_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION2_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION2_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION2_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION2_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION2_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION2_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION2_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION2_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION2_CTRL3    0x0733
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION2_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION2_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION3_CTRL0    0x0734
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION3_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION3_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION3_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION3_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION3_CTRL1    0x0735
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION3_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION3_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION3_CTRL2    0x0736
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION3_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION3_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION3_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION3_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION3_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION3_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION3_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION3_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION3_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION3_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION3_CTRL3    0x0737
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION3_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION3_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION4_CTRL0    0x0738
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION4_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION4_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION4_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION4_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION4_CTRL1    0x0739
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION4_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION4_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION4_CTRL2    0x073a
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION4_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION4_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION4_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION4_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION4_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION4_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION4_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION4_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION4_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION4_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION4_CTRL3    0x073b
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION4_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION4_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION5_CTRL0    0x073c
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION5_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION5_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION5_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION5_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION5_CTRL1    0x073d
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION5_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION5_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION5_CTRL2    0x073e
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION5_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION5_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION5_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION5_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION5_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION5_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION5_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION5_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION5_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION5_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION5_CTRL3    0x073f
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION5_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION5_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION6_CTRL0    0x0740
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION6_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION6_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION6_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION6_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION6_CTRL1    0x0741
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION6_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION6_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION6_CTRL2    0x0742
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION6_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION6_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION6_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION6_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION6_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION6_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION6_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION6_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION6_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION6_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION6_CTRL3    0x0743
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION6_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION6_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION7_CTRL0    0x0744
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION7_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION7_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION7_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION7_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION7_CTRL1    0x0745
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION7_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION7_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION7_CTRL2    0x0746
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION7_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION7_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION7_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION7_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION7_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION7_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION7_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION7_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION7_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION7_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION7_CTRL3    0x0747
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION7_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION7_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION8_CTRL0    0x0748
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION8_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION8_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION8_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION8_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION8_CTRL1    0x0749
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION8_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION8_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION8_CTRL2    0x074a
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION8_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION8_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION8_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION8_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION8_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION8_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION8_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION8_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION8_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION8_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION8_CTRL3    0x074b
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION8_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION8_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION9_CTRL0    0x074c
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION9_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION9_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION9_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION9_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION9_CTRL1    0x074d
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION9_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION9_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION9_CTRL2    0x074e
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION9_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION9_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION9_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION9_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION9_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION9_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION9_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION9_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION9_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION9_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION9_CTRL3    0x074f
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION9_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION9_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION10_CTRL0    0x0750
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION10_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION10_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION10_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION10_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION10_CTRL1    0x0751
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION10_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION10_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION10_CTRL2    0x0752
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION10_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION10_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION10_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION10_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION10_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION10_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION10_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION10_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION10_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION10_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION10_CTRL3    0x0753
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION10_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION10_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION11_CTRL0    0x0754
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION11_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION11_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION11_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION11_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION11_CTRL1    0x0755
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION11_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION11_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION11_CTRL2    0x0756
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION11_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION11_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION11_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION11_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION11_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION11_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION11_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION11_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION11_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION11_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION11_CTRL3    0x0757
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION11_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION11_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION12_CTRL0    0x0758
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION12_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION12_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION12_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION12_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION12_CTRL1    0x0759
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION12_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION12_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION12_CTRL2    0x075a
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION12_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION12_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION12_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION12_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION12_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION12_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION12_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION12_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION12_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION12_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION12_CTRL3    0x075b
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION12_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION12_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION13_CTRL0    0x075c
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION13_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION13_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION13_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION13_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION13_CTRL1    0x075d
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION13_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION13_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION13_CTRL2    0x075e
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION13_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION13_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION13_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION13_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION13_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION13_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION13_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION13_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION13_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION13_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION13_CTRL3    0x075f
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION13_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION13_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION14_CTRL0    0x0760
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION14_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION14_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION14_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION14_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION14_CTRL1    0x0761
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION14_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION14_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION14_CTRL2    0x0762
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION14_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION14_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION14_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION14_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION14_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION14_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION14_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION14_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION14_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION14_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION14_CTRL3    0x0763
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION14_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION14_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION15_CTRL0    0x0764
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION15_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION15_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION15_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION15_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION15_CTRL1    0x0765
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION15_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION15_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION15_CTRL2    0x0766
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION15_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION15_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION15_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION15_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION15_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION15_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION15_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION15_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION15_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION15_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION15_CTRL3    0x0767
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION15_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION15_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION16_CTRL0    0x0768
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION16_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION16_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION16_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION16_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION16_CTRL1    0x0769
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION16_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION16_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION16_CTRL2    0x076a
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION16_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION16_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION16_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION16_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION16_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION16_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION16_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION16_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION16_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION16_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION16_CTRL3    0x076b
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION16_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION16_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION17_CTRL0    0x076c
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION17_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION17_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION17_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION17_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION17_CTRL1    0x076d
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION17_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION17_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION17_CTRL2    0x076e
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION17_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION17_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION17_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION17_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION17_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION17_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION17_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION17_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION17_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION17_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION17_CTRL3    0x076f
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION17_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION17_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION18_CTRL0    0x0770
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION18_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION18_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION18_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION18_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION18_CTRL1    0x0771
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION18_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION18_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION18_CTRL2    0x0772
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION18_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION18_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION18_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION18_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION18_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION18_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION18_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION18_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION18_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION18_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION18_CTRL3    0x0773
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION18_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION18_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION19_CTRL0    0x0774
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION19_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION19_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION19_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION19_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION19_CTRL1    0x0775
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION19_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION19_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION19_CTRL2    0x0776
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION19_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION19_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION19_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION19_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION19_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION19_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION19_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION19_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION19_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION19_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION19_CTRL3    0x0777
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION19_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION19_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION20_CTRL0    0x0778
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION20_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION20_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION20_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION20_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION20_CTRL1    0x0779
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION20_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION20_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION20_CTRL2    0x077a
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION20_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION20_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION20_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION20_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION20_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION20_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION20_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION20_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION20_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION20_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION20_CTRL3    0x077b
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION20_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION20_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION21_CTRL0    0x077c
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION21_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION21_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION21_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION21_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION21_CTRL1    0x077d
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION21_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION21_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION21_CTRL2    0x077e
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION21_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION21_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION21_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION21_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION21_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION21_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION21_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION21_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION21_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION21_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION21_CTRL3    0x077f
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION21_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION21_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION22_CTRL0    0x0780
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION22_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION22_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION22_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION22_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION22_CTRL1    0x0781
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION22_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION22_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION22_CTRL2    0x0782
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION22_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION22_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION22_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION22_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION22_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION22_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION22_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION22_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION22_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION22_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION22_CTRL3    0x0783
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION22_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION22_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION23_CTRL0    0x0784
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION23_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION23_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION23_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION23_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION23_CTRL1    0x0785
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION23_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION23_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION23_CTRL2    0x0786
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION23_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION23_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION23_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION23_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION23_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION23_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION23_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION23_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION23_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION23_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION23_CTRL3    0x0787
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION23_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION23_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION24_CTRL0    0x0788
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION24_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION24_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION24_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION24_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION24_CTRL1    0x0789
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION24_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION24_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION24_CTRL2    0x078a
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION24_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION24_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION24_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION24_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION24_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION24_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION24_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION24_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION24_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION24_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION24_CTRL3    0x078b
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION24_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION24_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION25_CTRL0    0x078c
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION25_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION25_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION25_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION25_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION25_CTRL1    0x078d
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION25_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION25_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION25_CTRL2    0x078e
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION25_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION25_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION25_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION25_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION25_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION25_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION25_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION25_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION25_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION25_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION25_CTRL3    0x078f
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION25_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION25_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION26_CTRL0    0x0790
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION26_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION26_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION26_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION26_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION26_CTRL1    0x0791
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION26_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION26_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION26_CTRL2    0x0792
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION26_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION26_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION26_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION26_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION26_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION26_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION26_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION26_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION26_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION26_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION26_CTRL3    0x0793
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION26_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION26_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION27_CTRL0    0x0794
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION27_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION27_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION27_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION27_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION27_CTRL1    0x0795
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION27_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION27_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION27_CTRL2    0x0796
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION27_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION27_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION27_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION27_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION27_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION27_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION27_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION27_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION27_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION27_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION27_CTRL3    0x0797
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION27_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION27_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION28_CTRL0    0x0798
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION28_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION28_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION28_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION28_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION28_CTRL1    0x0799
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION28_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION28_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION28_CTRL2    0x079a
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION28_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION28_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION28_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION28_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION28_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION28_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION28_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION28_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION28_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION28_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION28_CTRL3    0x079b
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION28_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION28_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION29_CTRL0    0x079c
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION29_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION29_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION29_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION29_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION29_CTRL1    0x079d
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION29_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION29_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION29_CTRL2    0x079e
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION29_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION29_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION29_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION29_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION29_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION29_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION29_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION29_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION29_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION29_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION29_CTRL3    0x079f
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION29_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION29_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION30_CTRL0    0x07a0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION30_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION30_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION30_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION30_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION30_CTRL1    0x07a1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION30_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION30_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION30_CTRL2    0x07a2
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION30_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION30_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION30_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION30_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION30_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION30_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION30_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION30_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION30_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION30_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION30_CTRL3    0x07a3
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION30_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION30_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION31_CTRL0    0x07a4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION31_CTRL0_MBR_EXT_OFFSET    8
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION31_CTRL0_MBR_EXT_MASK    0x700
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION31_CTRL0_MBR_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION31_CTRL0_MBR_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION31_CTRL1    0x07a5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION31_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION31_CTRL1_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION31_CTRL2    0x07a6
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION31_CTRL2_METERIDX_EXT_OFFSET    10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION31_CTRL2_METERIDX_EXT_MASK    0x400
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION31_CTRL2_METERIDX_OFFSET    5
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION31_CTRL2_METERIDX_MASK    0x3E0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION31_CTRL2_ENVLANPOL_OFFSET    4
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION31_CTRL2_ENVLANPOL_MASK    0x10
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION31_CTRL2_VBPRI_OFFSET    1
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION31_CTRL2_VBPRI_MASK    0xE
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION31_CTRL2_VBPEN_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION31_CTRL2_VBPEN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_MEMBER_CONFIGURATION31_CTRL3    0x07a7
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION31_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_MEMBER_CONFIGURATION31_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_VLAN_CTRL    0x07a8
+#define    RTL8367C_VLAN_CTRL_OFFSET    0
+#define    RTL8367C_VLAN_CTRL_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_INGRESS    0x07a9
+#define    RTL8367C_VLAN_INGRESS_OFFSET    0
+#define    RTL8367C_VLAN_INGRESS_MASK    0x7FF
+
+#define    RTL8367C_REG_VLAN_ACCEPT_FRAME_TYPE_CTRL0    0x07aa
+#define    RTL8367C_PORT7_FRAME_TYPE_OFFSET    14
+#define    RTL8367C_PORT7_FRAME_TYPE_MASK    0xC000
+#define    RTL8367C_PORT6_FRAME_TYPE_OFFSET    12
+#define    RTL8367C_PORT6_FRAME_TYPE_MASK    0x3000
+#define    RTL8367C_PORT5_FRAME_TYPE_OFFSET    10
+#define    RTL8367C_PORT5_FRAME_TYPE_MASK    0xC00
+#define    RTL8367C_PORT4_FRAME_TYPE_OFFSET    8
+#define    RTL8367C_PORT4_FRAME_TYPE_MASK    0x300
+#define    RTL8367C_PORT3_FRAME_TYPE_OFFSET    6
+#define    RTL8367C_PORT3_FRAME_TYPE_MASK    0xC0
+#define    RTL8367C_PORT2_FRAME_TYPE_OFFSET    4
+#define    RTL8367C_PORT2_FRAME_TYPE_MASK    0x30
+#define    RTL8367C_PORT1_FRAME_TYPE_OFFSET    2
+#define    RTL8367C_PORT1_FRAME_TYPE_MASK    0xC
+#define    RTL8367C_PORT0_FRAME_TYPE_OFFSET    0
+#define    RTL8367C_PORT0_FRAME_TYPE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_ACCEPT_FRAME_TYPE_CTRL1    0x07ab
+#define    RTL8367C_PORT10_FRAME_TYPE_OFFSET    4
+#define    RTL8367C_PORT10_FRAME_TYPE_MASK    0x30
+#define    RTL8367C_PORT9_FRAME_TYPE_OFFSET    2
+#define    RTL8367C_PORT9_FRAME_TYPE_MASK    0xC
+#define    RTL8367C_PORT8_FRAME_TYPE_OFFSET    0
+#define    RTL8367C_PORT8_FRAME_TYPE_MASK    0x3
+
+#define    RTL8367C_REG_PORT_PBFIDEN    0x07ac
+#define    RTL8367C_PORT_PBFIDEN_OFFSET    0
+#define    RTL8367C_PORT_PBFIDEN_MASK    0x7FF
+
+#define    RTL8367C_REG_PORT0_PBFID    0x07ad
+#define    RTL8367C_PORT0_PBFID_OFFSET    0
+#define    RTL8367C_PORT0_PBFID_MASK    0xF
+
+#define    RTL8367C_REG_PORT1_PBFID    0x07ae
+#define    RTL8367C_PORT1_PBFID_OFFSET    0
+#define    RTL8367C_PORT1_PBFID_MASK    0xF
+
+#define    RTL8367C_REG_PORT2_PBFID    0x07af
+#define    RTL8367C_PORT2_PBFID_OFFSET    0
+#define    RTL8367C_PORT2_PBFID_MASK    0xF
+
+#define    RTL8367C_REG_PORT3_PBFID    0x07b0
+#define    RTL8367C_PORT3_PBFID_OFFSET    0
+#define    RTL8367C_PORT3_PBFID_MASK    0xF
+
+#define    RTL8367C_REG_PORT4_PBFID    0x07b1
+#define    RTL8367C_PORT4_PBFID_OFFSET    0
+#define    RTL8367C_PORT4_PBFID_MASK    0xF
+
+#define    RTL8367C_REG_PORT5_PBFID    0x07b2
+#define    RTL8367C_PORT5_PBFID_OFFSET    0
+#define    RTL8367C_PORT5_PBFID_MASK    0xF
+
+#define    RTL8367C_REG_PORT6_PBFID    0x07b3
+#define    RTL8367C_PORT6_PBFID_OFFSET    0
+#define    RTL8367C_PORT6_PBFID_MASK    0xF
+
+#define    RTL8367C_REG_PORT7_PBFID    0x07b4
+#define    RTL8367C_PORT7_PBFID_OFFSET    0
+#define    RTL8367C_PORT7_PBFID_MASK    0xF
+
+#define    RTL8367C_REG_VLAN_EXT_CTRL    0x07b5
+#define    RTL8367C_VLAN_1P_REMARK_BYPASS_REALKEEP_OFFSET    2
+#define    RTL8367C_VLAN_1P_REMARK_BYPASS_REALKEEP_MASK    0x4
+#define    RTL8367C_VLAN_VID4095_TYPE_OFFSET    1
+#define    RTL8367C_VLAN_VID4095_TYPE_MASK    0x2
+#define    RTL8367C_VLAN_VID0_TYPE_OFFSET    0
+#define    RTL8367C_VLAN_VID0_TYPE_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_EXT_CTRL2    0x07b6
+#define    RTL8367C_VLAN_EXT_CTRL2_OFFSET    0
+#define    RTL8367C_VLAN_EXT_CTRL2_MASK    0x1
+
+#define    RTL8367C_REG_PORT8_PBFID    0x07b7
+#define    RTL8367C_PORT8_PBFID_OFFSET    0
+#define    RTL8367C_PORT8_PBFID_MASK    0xF
+
+#define    RTL8367C_REG_PORT9_PBFID    0x07b8
+#define    RTL8367C_PORT9_PBFID_OFFSET    0
+#define    RTL8367C_PORT9_PBFID_MASK    0xF
+
+#define    RTL8367C_REG_PORT10_PBFID    0x07b9
+#define    RTL8367C_PORT10_PBFID_OFFSET    0
+#define    RTL8367C_PORT10_PBFID_MASK    0xF
+
+#define    RTL8367C_REG_CVLAN_DUMMY00    0x07E0
+
+#define    RTL8367C_REG_CVLAN_DUMMY01    0x07E1
+
+#define    RTL8367C_REG_CVLAN_DUMMY02    0x07E2
+
+#define    RTL8367C_REG_CVLAN_DUMMY03    0x07E3
+
+#define    RTL8367C_REG_CVLAN_DUMMY04    0x07E4
+
+#define    RTL8367C_REG_CVLAN_DUMMY05    0x07E5
+
+#define    RTL8367C_REG_CVLAN_DUMMY06    0x07E6
+
+#define    RTL8367C_REG_CVLAN_DUMMY07    0x07E7
+
+#define    RTL8367C_REG_CVLAN_DUMMY08    0x07E8
+
+#define    RTL8367C_REG_CVLAN_DUMMY09    0x07E9
+
+#define    RTL8367C_REG_CVLAN_DUMMY10    0x07EA
+
+#define    RTL8367C_REG_CVLAN_DUMMY11    0x07EB
+
+#define    RTL8367C_REG_CVLAN_DUMMY12    0x07EC
+
+#define    RTL8367C_REG_CVLAN_DUMMY13    0x07ED
+
+#define    RTL8367C_REG_CVLAN_DUMMY14    0x07EE
+
+#define    RTL8367C_REG_CVLAN_DUMMY15    0x07EF
+
+/* (16'h0800)dpm_reg */
+
+#define    RTL8367C_REG_RMA_CTRL00    0x0800
+#define    RTL8367C_RMA_CTRL00_OPERATION_OFFSET    7
+#define    RTL8367C_RMA_CTRL00_OPERATION_MASK    0x180
+#define    RTL8367C_RMA_CTRL00_DISCARD_STORM_FILTER_OFFSET    6
+#define    RTL8367C_RMA_CTRL00_DISCARD_STORM_FILTER_MASK    0x40
+#define    RTL8367C_TRAP_PRIORITY_OFFSET    3
+#define    RTL8367C_TRAP_PRIORITY_MASK    0x38
+#define    RTL8367C_RMA_CTRL00_KEEP_FORMAT_OFFSET    2
+#define    RTL8367C_RMA_CTRL00_KEEP_FORMAT_MASK    0x4
+#define    RTL8367C_RMA_CTRL00_VLAN_LEAKY_OFFSET    1
+#define    RTL8367C_RMA_CTRL00_VLAN_LEAKY_MASK    0x2
+#define    RTL8367C_RMA_CTRL00_PORTISO_LEAKY_OFFSET    0
+#define    RTL8367C_RMA_CTRL00_PORTISO_LEAKY_MASK    0x1
+
+#define    RTL8367C_REG_RMA_CTRL01    0x0801
+#define    RTL8367C_RMA_CTRL01_OPERATION_OFFSET    7
+#define    RTL8367C_RMA_CTRL01_OPERATION_MASK    0x180
+#define    RTL8367C_RMA_CTRL01_DISCARD_STORM_FILTER_OFFSET    6
+#define    RTL8367C_RMA_CTRL01_DISCARD_STORM_FILTER_MASK    0x40
+#define    RTL8367C_RMA_CTRL01_KEEP_FORMAT_OFFSET    2
+#define    RTL8367C_RMA_CTRL01_KEEP_FORMAT_MASK    0x4
+#define    RTL8367C_RMA_CTRL01_VLAN_LEAKY_OFFSET    1
+#define    RTL8367C_RMA_CTRL01_VLAN_LEAKY_MASK    0x2
+#define    RTL8367C_RMA_CTRL01_PORTISO_LEAKY_OFFSET    0
+#define    RTL8367C_RMA_CTRL01_PORTISO_LEAKY_MASK    0x1
+
+#define    RTL8367C_REG_RMA_CTRL02    0x0802
+#define    RTL8367C_RMA_CTRL02_OPERATION_OFFSET    7
+#define    RTL8367C_RMA_CTRL02_OPERATION_MASK    0x180
+#define    RTL8367C_RMA_CTRL02_DISCARD_STORM_FILTER_OFFSET    6
+#define    RTL8367C_RMA_CTRL02_DISCARD_STORM_FILTER_MASK    0x40
+#define    RTL8367C_RMA_CTRL02_KEEP_FORMAT_OFFSET    2
+#define    RTL8367C_RMA_CTRL02_KEEP_FORMAT_MASK    0x4
+#define    RTL8367C_RMA_CTRL02_VLAN_LEAKY_OFFSET    1
+#define    RTL8367C_RMA_CTRL02_VLAN_LEAKY_MASK    0x2
+#define    RTL8367C_RMA_CTRL02_PORTISO_LEAKY_OFFSET    0
+#define    RTL8367C_RMA_CTRL02_PORTISO_LEAKY_MASK    0x1
+
+#define    RTL8367C_REG_RMA_CTRL03    0x0803
+#define    RTL8367C_RMA_CTRL03_OPERATION_OFFSET    7
+#define    RTL8367C_RMA_CTRL03_OPERATION_MASK    0x180
+#define    RTL8367C_RMA_CTRL03_DISCARD_STORM_FILTER_OFFSET    6
+#define    RTL8367C_RMA_CTRL03_DISCARD_STORM_FILTER_MASK    0x40
+#define    RTL8367C_RMA_CTRL03_KEEP_FORMAT_OFFSET    2
+#define    RTL8367C_RMA_CTRL03_KEEP_FORMAT_MASK    0x4
+#define    RTL8367C_RMA_CTRL03_VLAN_LEAKY_OFFSET    1
+#define    RTL8367C_RMA_CTRL03_VLAN_LEAKY_MASK    0x2
+#define    RTL8367C_RMA_CTRL03_PORTISO_LEAKY_OFFSET    0
+#define    RTL8367C_RMA_CTRL03_PORTISO_LEAKY_MASK    0x1
+
+#define    RTL8367C_REG_RMA_CTRL04    0x0804
+#define    RTL8367C_RMA_CTRL04_OPERATION_OFFSET    7
+#define    RTL8367C_RMA_CTRL04_OPERATION_MASK    0x180
+#define    RTL8367C_RMA_CTRL04_DISCARD_STORM_FILTER_OFFSET    6
+#define    RTL8367C_RMA_CTRL04_DISCARD_STORM_FILTER_MASK    0x40
+#define    RTL8367C_RMA_CTRL04_KEEP_FORMAT_OFFSET    2
+#define    RTL8367C_RMA_CTRL04_KEEP_FORMAT_MASK    0x4
+#define    RTL8367C_RMA_CTRL04_VLAN_LEAKY_OFFSET    1
+#define    RTL8367C_RMA_CTRL04_VLAN_LEAKY_MASK    0x2
+#define    RTL8367C_RMA_CTRL04_PORTISO_LEAKY_OFFSET    0
+#define    RTL8367C_RMA_CTRL04_PORTISO_LEAKY_MASK    0x1
+
+#define    RTL8367C_REG_RMA_CTRL08    0x0808
+#define    RTL8367C_RMA_CTRL08_OPERATION_OFFSET    7
+#define    RTL8367C_RMA_CTRL08_OPERATION_MASK    0x180
+#define    RTL8367C_RMA_CTRL08_DISCARD_STORM_FILTER_OFFSET    6
+#define    RTL8367C_RMA_CTRL08_DISCARD_STORM_FILTER_MASK    0x40
+#define    RTL8367C_RMA_CTRL08_KEEP_FORMAT_OFFSET    2
+#define    RTL8367C_RMA_CTRL08_KEEP_FORMAT_MASK    0x4
+#define    RTL8367C_RMA_CTRL08_VLAN_LEAKY_OFFSET    1
+#define    RTL8367C_RMA_CTRL08_VLAN_LEAKY_MASK    0x2
+#define    RTL8367C_RMA_CTRL08_PORTISO_LEAKY_OFFSET    0
+#define    RTL8367C_RMA_CTRL08_PORTISO_LEAKY_MASK    0x1
+
+#define    RTL8367C_REG_RMA_CTRL0D    0x080d
+#define    RTL8367C_RMA_CTRL0D_OPERATION_OFFSET    7
+#define    RTL8367C_RMA_CTRL0D_OPERATION_MASK    0x180
+#define    RTL8367C_RMA_CTRL0D_DISCARD_STORM_FILTER_OFFSET    6
+#define    RTL8367C_RMA_CTRL0D_DISCARD_STORM_FILTER_MASK    0x40
+#define    RTL8367C_RMA_CTRL0D_KEEP_FORMAT_OFFSET    2
+#define    RTL8367C_RMA_CTRL0D_KEEP_FORMAT_MASK    0x4
+#define    RTL8367C_RMA_CTRL0D_VLAN_LEAKY_OFFSET    1
+#define    RTL8367C_RMA_CTRL0D_VLAN_LEAKY_MASK    0x2
+#define    RTL8367C_RMA_CTRL0D_PORTISO_LEAKY_OFFSET    0
+#define    RTL8367C_RMA_CTRL0D_PORTISO_LEAKY_MASK    0x1
+
+#define    RTL8367C_REG_RMA_CTRL0E    0x080e
+#define    RTL8367C_RMA_CTRL0E_OPERATION_OFFSET    7
+#define    RTL8367C_RMA_CTRL0E_OPERATION_MASK    0x180
+#define    RTL8367C_RMA_CTRL0E_DISCARD_STORM_FILTER_OFFSET    6
+#define    RTL8367C_RMA_CTRL0E_DISCARD_STORM_FILTER_MASK    0x40
+#define    RTL8367C_RMA_CTRL0E_KEEP_FORMAT_OFFSET    2
+#define    RTL8367C_RMA_CTRL0E_KEEP_FORMAT_MASK    0x4
+#define    RTL8367C_RMA_CTRL0E_VLAN_LEAKY_OFFSET    1
+#define    RTL8367C_RMA_CTRL0E_VLAN_LEAKY_MASK    0x2
+#define    RTL8367C_RMA_CTRL0E_PORTISO_LEAKY_OFFSET    0
+#define    RTL8367C_RMA_CTRL0E_PORTISO_LEAKY_MASK    0x1
+
+#define    RTL8367C_REG_RMA_CTRL10    0x0810
+#define    RTL8367C_RMA_CTRL10_OPERATION_OFFSET    7
+#define    RTL8367C_RMA_CTRL10_OPERATION_MASK    0x180
+#define    RTL8367C_RMA_CTRL10_DISCARD_STORM_FILTER_OFFSET    6
+#define    RTL8367C_RMA_CTRL10_DISCARD_STORM_FILTER_MASK    0x40
+#define    RTL8367C_RMA_CTRL10_KEEP_FORMAT_OFFSET    2
+#define    RTL8367C_RMA_CTRL10_KEEP_FORMAT_MASK    0x4
+#define    RTL8367C_RMA_CTRL10_VLAN_LEAKY_OFFSET    1
+#define    RTL8367C_RMA_CTRL10_VLAN_LEAKY_MASK    0x2
+#define    RTL8367C_RMA_CTRL10_PORTISO_LEAKY_OFFSET    0
+#define    RTL8367C_RMA_CTRL10_PORTISO_LEAKY_MASK    0x1
+
+#define    RTL8367C_REG_RMA_CTRL11    0x0811
+#define    RTL8367C_RMA_CTRL11_OPERATION_OFFSET    7
+#define    RTL8367C_RMA_CTRL11_OPERATION_MASK    0x180
+#define    RTL8367C_RMA_CTRL11_DISCARD_STORM_FILTER_OFFSET    6
+#define    RTL8367C_RMA_CTRL11_DISCARD_STORM_FILTER_MASK    0x40
+#define    RTL8367C_RMA_CTRL11_KEEP_FORMAT_OFFSET    2
+#define    RTL8367C_RMA_CTRL11_KEEP_FORMAT_MASK    0x4
+#define    RTL8367C_RMA_CTRL11_VLAN_LEAKY_OFFSET    1
+#define    RTL8367C_RMA_CTRL11_VLAN_LEAKY_MASK    0x2
+#define    RTL8367C_RMA_CTRL11_PORTISO_LEAKY_OFFSET    0
+#define    RTL8367C_RMA_CTRL11_PORTISO_LEAKY_MASK    0x1
+
+#define    RTL8367C_REG_RMA_CTRL12    0x0812
+#define    RTL8367C_RMA_CTRL12_OPERATION_OFFSET    7
+#define    RTL8367C_RMA_CTRL12_OPERATION_MASK    0x180
+#define    RTL8367C_RMA_CTRL12_DISCARD_STORM_FILTER_OFFSET    6
+#define    RTL8367C_RMA_CTRL12_DISCARD_STORM_FILTER_MASK    0x40
+#define    RTL8367C_RMA_CTRL12_KEEP_FORMAT_OFFSET    2
+#define    RTL8367C_RMA_CTRL12_KEEP_FORMAT_MASK    0x4
+#define    RTL8367C_RMA_CTRL12_VLAN_LEAKY_OFFSET    1
+#define    RTL8367C_RMA_CTRL12_VLAN_LEAKY_MASK    0x2
+#define    RTL8367C_RMA_CTRL12_PORTISO_LEAKY_OFFSET    0
+#define    RTL8367C_RMA_CTRL12_PORTISO_LEAKY_MASK    0x1
+
+#define    RTL8367C_REG_RMA_CTRL13    0x0813
+#define    RTL8367C_RMA_CTRL13_OPERATION_OFFSET    7
+#define    RTL8367C_RMA_CTRL13_OPERATION_MASK    0x180
+#define    RTL8367C_RMA_CTRL13_DISCARD_STORM_FILTER_OFFSET    6
+#define    RTL8367C_RMA_CTRL13_DISCARD_STORM_FILTER_MASK    0x40
+#define    RTL8367C_RMA_CTRL13_KEEP_FORMAT_OFFSET    2
+#define    RTL8367C_RMA_CTRL13_KEEP_FORMAT_MASK    0x4
+#define    RTL8367C_RMA_CTRL13_VLAN_LEAKY_OFFSET    1
+#define    RTL8367C_RMA_CTRL13_VLAN_LEAKY_MASK    0x2
+#define    RTL8367C_RMA_CTRL13_PORTISO_LEAKY_OFFSET    0
+#define    RTL8367C_RMA_CTRL13_PORTISO_LEAKY_MASK    0x1
+
+#define    RTL8367C_REG_RMA_CTRL18    0x0818
+#define    RTL8367C_RMA_CTRL18_OPERATION_OFFSET    7
+#define    RTL8367C_RMA_CTRL18_OPERATION_MASK    0x180
+#define    RTL8367C_RMA_CTRL18_DISCARD_STORM_FILTER_OFFSET    6
+#define    RTL8367C_RMA_CTRL18_DISCARD_STORM_FILTER_MASK    0x40
+#define    RTL8367C_RMA_CTRL18_KEEP_FORMAT_OFFSET    2
+#define    RTL8367C_RMA_CTRL18_KEEP_FORMAT_MASK    0x4
+#define    RTL8367C_RMA_CTRL18_VLAN_LEAKY_OFFSET    1
+#define    RTL8367C_RMA_CTRL18_VLAN_LEAKY_MASK    0x2
+#define    RTL8367C_RMA_CTRL18_PORTISO_LEAKY_OFFSET    0
+#define    RTL8367C_RMA_CTRL18_PORTISO_LEAKY_MASK    0x1
+
+#define    RTL8367C_REG_RMA_CTRL1A    0x081a
+#define    RTL8367C_RMA_CTRL1A_OPERATION_OFFSET    7
+#define    RTL8367C_RMA_CTRL1A_OPERATION_MASK    0x180
+#define    RTL8367C_RMA_CTRL1A_DISCARD_STORM_FILTER_OFFSET    6
+#define    RTL8367C_RMA_CTRL1A_DISCARD_STORM_FILTER_MASK    0x40
+#define    RTL8367C_RMA_CTRL1A_KEEP_FORMAT_OFFSET    2
+#define    RTL8367C_RMA_CTRL1A_KEEP_FORMAT_MASK    0x4
+#define    RTL8367C_RMA_CTRL1A_VLAN_LEAKY_OFFSET    1
+#define    RTL8367C_RMA_CTRL1A_VLAN_LEAKY_MASK    0x2
+#define    RTL8367C_RMA_CTRL1A_PORTISO_LEAKY_OFFSET    0
+#define    RTL8367C_RMA_CTRL1A_PORTISO_LEAKY_MASK    0x1
+
+#define    RTL8367C_REG_RMA_CTRL20    0x0820
+#define    RTL8367C_RMA_CTRL20_OPERATION_OFFSET    7
+#define    RTL8367C_RMA_CTRL20_OPERATION_MASK    0x180
+#define    RTL8367C_RMA_CTRL20_DISCARD_STORM_FILTER_OFFSET    6
+#define    RTL8367C_RMA_CTRL20_DISCARD_STORM_FILTER_MASK    0x40
+#define    RTL8367C_RMA_CTRL20_KEEP_FORMAT_OFFSET    2
+#define    RTL8367C_RMA_CTRL20_KEEP_FORMAT_MASK    0x4
+#define    RTL8367C_RMA_CTRL20_VLAN_LEAKY_OFFSET    1
+#define    RTL8367C_RMA_CTRL20_VLAN_LEAKY_MASK    0x2
+#define    RTL8367C_RMA_CTRL20_PORTISO_LEAKY_OFFSET    0
+#define    RTL8367C_RMA_CTRL20_PORTISO_LEAKY_MASK    0x1
+
+#define    RTL8367C_REG_RMA_CTRL21    0x0821
+#define    RTL8367C_RMA_CTRL21_OPERATION_OFFSET    7
+#define    RTL8367C_RMA_CTRL21_OPERATION_MASK    0x180
+#define    RTL8367C_RMA_CTRL21_DISCARD_STORM_FILTER_OFFSET    6
+#define    RTL8367C_RMA_CTRL21_DISCARD_STORM_FILTER_MASK    0x40
+#define    RTL8367C_RMA_CTRL21_KEEP_FORMAT_OFFSET    2
+#define    RTL8367C_RMA_CTRL21_KEEP_FORMAT_MASK    0x4
+#define    RTL8367C_RMA_CTRL21_VLAN_LEAKY_OFFSET    1
+#define    RTL8367C_RMA_CTRL21_VLAN_LEAKY_MASK    0x2
+#define    RTL8367C_RMA_CTRL21_PORTISO_LEAKY_OFFSET    0
+#define    RTL8367C_RMA_CTRL21_PORTISO_LEAKY_MASK    0x1
+
+#define    RTL8367C_REG_RMA_CTRL22    0x0822
+#define    RTL8367C_RMA_CTRL22_OPERATION_OFFSET    7
+#define    RTL8367C_RMA_CTRL22_OPERATION_MASK    0x180
+#define    RTL8367C_RMA_CTRL22_DISCARD_STORM_FILTER_OFFSET    6
+#define    RTL8367C_RMA_CTRL22_DISCARD_STORM_FILTER_MASK    0x40
+#define    RTL8367C_RMA_CTRL22_KEEP_FORMAT_OFFSET    2
+#define    RTL8367C_RMA_CTRL22_KEEP_FORMAT_MASK    0x4
+#define    RTL8367C_RMA_CTRL22_VLAN_LEAKY_OFFSET    1
+#define    RTL8367C_RMA_CTRL22_VLAN_LEAKY_MASK    0x2
+#define    RTL8367C_RMA_CTRL22_PORTISO_LEAKY_OFFSET    0
+#define    RTL8367C_RMA_CTRL22_PORTISO_LEAKY_MASK    0x1
+
+#define    RTL8367C_REG_RMA_CTRL_CDP    0x0830
+#define    RTL8367C_RMA_CTRL_CDP_OPERATION_OFFSET    7
+#define    RTL8367C_RMA_CTRL_CDP_OPERATION_MASK    0x180
+#define    RTL8367C_RMA_CTRL_CDP_DISCARD_STORM_FILTER_OFFSET    6
+#define    RTL8367C_RMA_CTRL_CDP_DISCARD_STORM_FILTER_MASK    0x40
+#define    RTL8367C_RMA_CTRL_CDP_KEEP_FORMAT_OFFSET    2
+#define    RTL8367C_RMA_CTRL_CDP_KEEP_FORMAT_MASK    0x4
+#define    RTL8367C_RMA_CTRL_CDP_VLAN_LEAKY_OFFSET    1
+#define    RTL8367C_RMA_CTRL_CDP_VLAN_LEAKY_MASK    0x2
+#define    RTL8367C_RMA_CTRL_CDP_PORTISO_LEAKY_OFFSET    0
+#define    RTL8367C_RMA_CTRL_CDP_PORTISO_LEAKY_MASK    0x1
+
+#define    RTL8367C_REG_RMA_CTRL_CSSTP    0x0831
+#define    RTL8367C_RMA_CTRL_CSSTP_OPERATION_OFFSET    7
+#define    RTL8367C_RMA_CTRL_CSSTP_OPERATION_MASK    0x180
+#define    RTL8367C_RMA_CTRL_CSSTP_DISCARD_STORM_FILTER_OFFSET    6
+#define    RTL8367C_RMA_CTRL_CSSTP_DISCARD_STORM_FILTER_MASK    0x40
+#define    RTL8367C_RMA_CTRL_CSSTP_KEEP_FORMAT_OFFSET    2
+#define    RTL8367C_RMA_CTRL_CSSTP_KEEP_FORMAT_MASK    0x4
+#define    RTL8367C_RMA_CTRL_CSSTP_VLAN_LEAKY_OFFSET    1
+#define    RTL8367C_RMA_CTRL_CSSTP_VLAN_LEAKY_MASK    0x2
+#define    RTL8367C_RMA_CTRL_CSSTP_PORTISO_LEAKY_OFFSET    0
+#define    RTL8367C_RMA_CTRL_CSSTP_PORTISO_LEAKY_MASK    0x1
+
+#define    RTL8367C_REG_RMA_CTRL_LLDP    0x0832
+#define    RTL8367C_RMA_CTRL_LLDP_OPERATION_OFFSET    7
+#define    RTL8367C_RMA_CTRL_LLDP_OPERATION_MASK    0x180
+#define    RTL8367C_RMA_CTRL_LLDP_DISCARD_STORM_FILTER_OFFSET    6
+#define    RTL8367C_RMA_CTRL_LLDP_DISCARD_STORM_FILTER_MASK    0x40
+#define    RTL8367C_RMA_CTRL_LLDP_KEEP_FORMAT_OFFSET    2
+#define    RTL8367C_RMA_CTRL_LLDP_KEEP_FORMAT_MASK    0x4
+#define    RTL8367C_RMA_CTRL_LLDP_VLAN_LEAKY_OFFSET    1
+#define    RTL8367C_RMA_CTRL_LLDP_VLAN_LEAKY_MASK    0x2
+#define    RTL8367C_RMA_CTRL_LLDP_PORTISO_LEAKY_OFFSET    0
+#define    RTL8367C_RMA_CTRL_LLDP_PORTISO_LEAKY_MASK    0x1
+
+#define    RTL8367C_REG_RMA_LLDP_EN    0x0833
+#define    RTL8367C_RMA_LLDP_EN_OFFSET    0
+#define    RTL8367C_RMA_LLDP_EN_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_PORTBASED_PRIORITY_CTRL0    0x0851
+#define    RTL8367C_VLAN_PORTBASED_PRIORITY_CTRL0_PORT3_PRIORITY_OFFSET    12
+#define    RTL8367C_VLAN_PORTBASED_PRIORITY_CTRL0_PORT3_PRIORITY_MASK    0x7000
+#define    RTL8367C_VLAN_PORTBASED_PRIORITY_CTRL0_PORT2_PRIORITY_OFFSET    8
+#define    RTL8367C_VLAN_PORTBASED_PRIORITY_CTRL0_PORT2_PRIORITY_MASK    0x700
+#define    RTL8367C_VLAN_PORTBASED_PRIORITY_CTRL0_PORT1_PRIORITY_OFFSET    4
+#define    RTL8367C_VLAN_PORTBASED_PRIORITY_CTRL0_PORT1_PRIORITY_MASK    0x70
+#define    RTL8367C_VLAN_PORTBASED_PRIORITY_CTRL0_PORT0_PRIORITY_OFFSET    0
+#define    RTL8367C_VLAN_PORTBASED_PRIORITY_CTRL0_PORT0_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_VLAN_PORTBASED_PRIORITY_CTRL1    0x0852
+#define    RTL8367C_VLAN_PORTBASED_PRIORITY_CTRL1_PORT7_PRIORITY_OFFSET    12
+#define    RTL8367C_VLAN_PORTBASED_PRIORITY_CTRL1_PORT7_PRIORITY_MASK    0x7000
+#define    RTL8367C_VLAN_PORTBASED_PRIORITY_CTRL1_PORT6_PRIORITY_OFFSET    8
+#define    RTL8367C_VLAN_PORTBASED_PRIORITY_CTRL1_PORT6_PRIORITY_MASK    0x700
+#define    RTL8367C_VLAN_PORTBASED_PRIORITY_CTRL1_PORT5_PRIORITY_OFFSET    4
+#define    RTL8367C_VLAN_PORTBASED_PRIORITY_CTRL1_PORT5_PRIORITY_MASK    0x70
+#define    RTL8367C_VLAN_PORTBASED_PRIORITY_CTRL1_PORT4_PRIORITY_OFFSET    0
+#define    RTL8367C_VLAN_PORTBASED_PRIORITY_CTRL1_PORT4_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_VLAN_PORTBASED_PRIORITY_CTRL2    0x0853
+#define    RTL8367C_VLAN_PORTBASED_PRIORITY_CTRL2_PORT10_PRIORITY_OFFSET    8
+#define    RTL8367C_VLAN_PORTBASED_PRIORITY_CTRL2_PORT10_PRIORITY_MASK    0x700
+#define    RTL8367C_VLAN_PORTBASED_PRIORITY_CTRL2_PORT9_PRIORITY_OFFSET    4
+#define    RTL8367C_VLAN_PORTBASED_PRIORITY_CTRL2_PORT9_PRIORITY_MASK    0x70
+#define    RTL8367C_VLAN_PORTBASED_PRIORITY_CTRL2_PORT8_PRIORITY_OFFSET    0
+#define    RTL8367C_VLAN_PORTBASED_PRIORITY_CTRL2_PORT8_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_VLAN_PPB_PRIORITY_ITEM0_CTRL0    0x0855
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM0_CTRL0_PORT3_PRIORITY_OFFSET    12
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM0_CTRL0_PORT3_PRIORITY_MASK    0x7000
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM0_CTRL0_PORT2_PRIORITY_OFFSET    8
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM0_CTRL0_PORT2_PRIORITY_MASK    0x700
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM0_CTRL0_PORT1_PRIORITY_OFFSET    4
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM0_CTRL0_PORT1_PRIORITY_MASK    0x70
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM0_CTRL0_PORT0_PRIORITY_OFFSET    0
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM0_CTRL0_PORT0_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_VLAN_PPB_PRIORITY_ITEM0_CTRL1    0x0856
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM0_CTRL1_PORT7_PRIORITY_OFFSET    12
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM0_CTRL1_PORT7_PRIORITY_MASK    0x7000
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM0_CTRL1_PORT6_PRIORITY_OFFSET    8
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM0_CTRL1_PORT6_PRIORITY_MASK    0x700
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM0_CTRL1_PORT5_PRIORITY_OFFSET    4
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM0_CTRL1_PORT5_PRIORITY_MASK    0x70
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM0_CTRL1_PORT4_PRIORITY_OFFSET    0
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM0_CTRL1_PORT4_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_VLAN_PPB_PRIORITY_ITEM0_CTRL2    0x0857
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM0_CTRL2_PORT10_PRIORITY_OFFSET    8
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM0_CTRL2_PORT10_PRIORITY_MASK    0x700
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM0_CTRL2_PORT9_PRIORITY_OFFSET    4
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM0_CTRL2_PORT9_PRIORITY_MASK    0x70
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM0_CTRL2_PORT8_PRIORITY_OFFSET    0
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM0_CTRL2_PORT8_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_VLAN_PPB_PRIORITY_ITEM1_CTRL0    0x0859
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM1_CTRL0_PORT3_PRIORITY_OFFSET    12
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM1_CTRL0_PORT3_PRIORITY_MASK    0x7000
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM1_CTRL0_PORT2_PRIORITY_OFFSET    8
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM1_CTRL0_PORT2_PRIORITY_MASK    0x700
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM1_CTRL0_PORT1_PRIORITY_OFFSET    4
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM1_CTRL0_PORT1_PRIORITY_MASK    0x70
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM1_CTRL0_PORT0_PRIORITY_OFFSET    0
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM1_CTRL0_PORT0_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_VLAN_PPB_PRIORITY_ITEM1_CTRL1    0x085a
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM1_CTRL1_PORT7_PRIORITY_OFFSET    12
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM1_CTRL1_PORT7_PRIORITY_MASK    0x7000
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM1_CTRL1_PORT6_PRIORITY_OFFSET    8
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM1_CTRL1_PORT6_PRIORITY_MASK    0x700
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM1_CTRL1_PORT5_PRIORITY_OFFSET    4
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM1_CTRL1_PORT5_PRIORITY_MASK    0x70
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM1_CTRL1_PORT4_PRIORITY_OFFSET    0
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM1_CTRL1_PORT4_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_VLAN_PPB_PRIORITY_ITEM1_CTRL2    0x085b
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM1_CTRL2_PORT10_PRIORITY_OFFSET    8
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM1_CTRL2_PORT10_PRIORITY_MASK    0x700
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM1_CTRL2_PORT9_PRIORITY_OFFSET    4
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM1_CTRL2_PORT9_PRIORITY_MASK    0x70
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM1_CTRL2_PORT8_PRIORITY_OFFSET    0
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM1_CTRL2_PORT8_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_VLAN_PPB_PRIORITY_ITEM2_CTRL0    0x085d
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM2_CTRL0_PORT3_PRIORITY_OFFSET    12
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM2_CTRL0_PORT3_PRIORITY_MASK    0x7000
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM2_CTRL0_PORT2_PRIORITY_OFFSET    8
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM2_CTRL0_PORT2_PRIORITY_MASK    0x700
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM2_CTRL0_PORT1_PRIORITY_OFFSET    4
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM2_CTRL0_PORT1_PRIORITY_MASK    0x70
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM2_CTRL0_PORT0_PRIORITY_OFFSET    0
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM2_CTRL0_PORT0_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_VLAN_PPB_PRIORITY_ITEM2_CTRL1    0x085e
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM2_CTRL1_PORT7_PRIORITY_OFFSET    12
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM2_CTRL1_PORT7_PRIORITY_MASK    0x7000
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM2_CTRL1_PORT6_PRIORITY_OFFSET    8
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM2_CTRL1_PORT6_PRIORITY_MASK    0x700
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM2_CTRL1_PORT5_PRIORITY_OFFSET    4
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM2_CTRL1_PORT5_PRIORITY_MASK    0x70
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM2_CTRL1_PORT4_PRIORITY_OFFSET    0
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM2_CTRL1_PORT4_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_VLAN_PPB_PRIORITY_ITEM2_CTRL2    0x085f
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM2_CTRL2_PORT10_PRIORITY_OFFSET    8
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM2_CTRL2_PORT10_PRIORITY_MASK    0x700
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM2_CTRL2_PORT9_PRIORITY_OFFSET    4
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM2_CTRL2_PORT9_PRIORITY_MASK    0x70
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM2_CTRL2_PORT8_PRIORITY_OFFSET    0
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM2_CTRL2_PORT8_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_VLAN_PPB_PRIORITY_ITEM3_CTRL0    0x0861
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM3_CTRL0_PORT3_PRIORITY_OFFSET    12
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM3_CTRL0_PORT3_PRIORITY_MASK    0x7000
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM3_CTRL0_PORT2_PRIORITY_OFFSET    8
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM3_CTRL0_PORT2_PRIORITY_MASK    0x700
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM3_CTRL0_PORT1_PRIORITY_OFFSET    4
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM3_CTRL0_PORT1_PRIORITY_MASK    0x70
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM3_CTRL0_PORT0_PRIORITY_OFFSET    0
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM3_CTRL0_PORT0_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_VLAN_PPB_PRIORITY_ITEM3_CTRL1    0x0862
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM3_CTRL1_PORT7_PRIORITY_OFFSET    12
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM3_CTRL1_PORT7_PRIORITY_MASK    0x7000
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM3_CTRL1_PORT6_PRIORITY_OFFSET    8
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM3_CTRL1_PORT6_PRIORITY_MASK    0x700
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM3_CTRL1_PORT5_PRIORITY_OFFSET    4
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM3_CTRL1_PORT5_PRIORITY_MASK    0x70
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM3_CTRL1_PORT4_PRIORITY_OFFSET    0
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM3_CTRL1_PORT4_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_VLAN_PPB_PRIORITY_ITEM3_CTRL2    0x0863
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM3_CTRL2_PORT10_PRIORITY_OFFSET    8
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM3_CTRL2_PORT10_PRIORITY_MASK    0x700
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM3_CTRL2_PORT9_PRIORITY_OFFSET    4
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM3_CTRL2_PORT9_PRIORITY_MASK    0x70
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM3_CTRL2_PORT8_PRIORITY_OFFSET    0
+#define    RTL8367C_VLAN_PPB_PRIORITY_ITEM3_CTRL2_PORT8_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_QOS_1Q_PRIORITY_REMAPPING_CTRL0    0x0865
+#define    RTL8367C_QOS_1Q_PRIORITY_REMAPPING_CTRL0_PRIORITY3_OFFSET    12
+#define    RTL8367C_QOS_1Q_PRIORITY_REMAPPING_CTRL0_PRIORITY3_MASK    0x7000
+#define    RTL8367C_QOS_1Q_PRIORITY_REMAPPING_CTRL0_PRIORITY2_OFFSET    8
+#define    RTL8367C_QOS_1Q_PRIORITY_REMAPPING_CTRL0_PRIORITY2_MASK    0x700
+#define    RTL8367C_QOS_1Q_PRIORITY_REMAPPING_CTRL0_PRIORITY1_OFFSET    4
+#define    RTL8367C_QOS_1Q_PRIORITY_REMAPPING_CTRL0_PRIORITY1_MASK    0x70
+#define    RTL8367C_QOS_1Q_PRIORITY_REMAPPING_CTRL0_PRIORITY0_OFFSET    0
+#define    RTL8367C_QOS_1Q_PRIORITY_REMAPPING_CTRL0_PRIORITY0_MASK    0x7
+
+#define    RTL8367C_REG_QOS_1Q_PRIORITY_REMAPPING_CTRL1    0x0866
+#define    RTL8367C_QOS_1Q_PRIORITY_REMAPPING_CTRL1_PRIORITY7_OFFSET    12
+#define    RTL8367C_QOS_1Q_PRIORITY_REMAPPING_CTRL1_PRIORITY7_MASK    0x7000
+#define    RTL8367C_QOS_1Q_PRIORITY_REMAPPING_CTRL1_PRIORITY6_OFFSET    8
+#define    RTL8367C_QOS_1Q_PRIORITY_REMAPPING_CTRL1_PRIORITY6_MASK    0x700
+#define    RTL8367C_QOS_1Q_PRIORITY_REMAPPING_CTRL1_PRIORITY5_OFFSET    4
+#define    RTL8367C_QOS_1Q_PRIORITY_REMAPPING_CTRL1_PRIORITY5_MASK    0x70
+#define    RTL8367C_QOS_1Q_PRIORITY_REMAPPING_CTRL1_PRIORITY4_OFFSET    0
+#define    RTL8367C_QOS_1Q_PRIORITY_REMAPPING_CTRL1_PRIORITY4_MASK    0x7
+
+#define    RTL8367C_REG_QOS_DSCP_TO_PRIORITY_CTRL0    0x0867
+#define    RTL8367C_DSCP3_PRIORITY_OFFSET    12
+#define    RTL8367C_DSCP3_PRIORITY_MASK    0x7000
+#define    RTL8367C_DSCP2_PRIORITY_OFFSET    8
+#define    RTL8367C_DSCP2_PRIORITY_MASK    0x700
+#define    RTL8367C_DSCP1_PRIORITY_OFFSET    4
+#define    RTL8367C_DSCP1_PRIORITY_MASK    0x70
+#define    RTL8367C_DSCP0_PRIORITY_OFFSET    0
+#define    RTL8367C_DSCP0_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_QOS_DSCP_TO_PRIORITY_CTRL1    0x0868
+#define    RTL8367C_DSCP7_PRIORITY_OFFSET    12
+#define    RTL8367C_DSCP7_PRIORITY_MASK    0x7000
+#define    RTL8367C_DSCP6_PRIORITY_OFFSET    8
+#define    RTL8367C_DSCP6_PRIORITY_MASK    0x700
+#define    RTL8367C_DSCP5_PRIORITY_OFFSET    4
+#define    RTL8367C_DSCP5_PRIORITY_MASK    0x70
+#define    RTL8367C_DSCP4_PRIORITY_OFFSET    0
+#define    RTL8367C_DSCP4_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_QOS_DSCP_TO_PRIORITY_CTRL2    0x0869
+#define    RTL8367C_DSCP11_PRIORITY_OFFSET    12
+#define    RTL8367C_DSCP11_PRIORITY_MASK    0x7000
+#define    RTL8367C_DSCP10_PRIORITY_OFFSET    8
+#define    RTL8367C_DSCP10_PRIORITY_MASK    0x700
+#define    RTL8367C_DSCP9_PRIORITY_OFFSET    4
+#define    RTL8367C_DSCP9_PRIORITY_MASK    0x70
+#define    RTL8367C_DSCP8_PRIORITY_OFFSET    0
+#define    RTL8367C_DSCP8_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_QOS_DSCP_TO_PRIORITY_CTRL3    0x086a
+#define    RTL8367C_DSCP15_PRIORITY_OFFSET    12
+#define    RTL8367C_DSCP15_PRIORITY_MASK    0x7000
+#define    RTL8367C_DSCP14_PRIORITY_OFFSET    8
+#define    RTL8367C_DSCP14_PRIORITY_MASK    0x700
+#define    RTL8367C_DSCP13_PRIORITY_OFFSET    4
+#define    RTL8367C_DSCP13_PRIORITY_MASK    0x70
+#define    RTL8367C_DSCP12_PRIORITY_OFFSET    0
+#define    RTL8367C_DSCP12_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_QOS_DSCP_TO_PRIORITY_CTRL4    0x086b
+#define    RTL8367C_DSCP19_PRIORITY_OFFSET    12
+#define    RTL8367C_DSCP19_PRIORITY_MASK    0x7000
+#define    RTL8367C_DSCP18_PRIORITY_OFFSET    8
+#define    RTL8367C_DSCP18_PRIORITY_MASK    0x700
+#define    RTL8367C_DSCP17_PRIORITY_OFFSET    4
+#define    RTL8367C_DSCP17_PRIORITY_MASK    0x70
+#define    RTL8367C_DSCP16_PRIORITY_OFFSET    0
+#define    RTL8367C_DSCP16_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_QOS_DSCP_TO_PRIORITY_CTRL5    0x086c
+#define    RTL8367C_DSCP23_PRIORITY_OFFSET    12
+#define    RTL8367C_DSCP23_PRIORITY_MASK    0x7000
+#define    RTL8367C_DSCP22_PRIORITY_OFFSET    8
+#define    RTL8367C_DSCP22_PRIORITY_MASK    0x700
+#define    RTL8367C_DSCP21_PRIORITY_OFFSET    4
+#define    RTL8367C_DSCP21_PRIORITY_MASK    0x70
+#define    RTL8367C_DSCP20_PRIORITY_OFFSET    0
+#define    RTL8367C_DSCP20_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_QOS_DSCP_TO_PRIORITY_CTRL6    0x086d
+#define    RTL8367C_DSCP27_PRIORITY_OFFSET    12
+#define    RTL8367C_DSCP27_PRIORITY_MASK    0x7000
+#define    RTL8367C_DSCP26_PRIORITY_OFFSET    8
+#define    RTL8367C_DSCP26_PRIORITY_MASK    0x700
+#define    RTL8367C_DSCP25_PRIORITY_OFFSET    4
+#define    RTL8367C_DSCP25_PRIORITY_MASK    0x70
+#define    RTL8367C_DSCP24_PRIORITY_OFFSET    0
+#define    RTL8367C_DSCP24_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_QOS_DSCP_TO_PRIORITY_CTRL7    0x086e
+#define    RTL8367C_DSCP31_PRIORITY_OFFSET    12
+#define    RTL8367C_DSCP31_PRIORITY_MASK    0x7000
+#define    RTL8367C_DSCP30_PRIORITY_OFFSET    8
+#define    RTL8367C_DSCP30_PRIORITY_MASK    0x700
+#define    RTL8367C_DSCP29_PRIORITY_OFFSET    4
+#define    RTL8367C_DSCP29_PRIORITY_MASK    0x70
+#define    RTL8367C_DSCP28_PRIORITY_OFFSET    0
+#define    RTL8367C_DSCP28_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_QOS_DSCP_TO_PRIORITY_CTRL8    0x086f
+#define    RTL8367C_DSCP35_PRIORITY_OFFSET    12
+#define    RTL8367C_DSCP35_PRIORITY_MASK    0x7000
+#define    RTL8367C_DSCP34_PRIORITY_OFFSET    8
+#define    RTL8367C_DSCP34_PRIORITY_MASK    0x700
+#define    RTL8367C_DSCP33_PRIORITY_OFFSET    4
+#define    RTL8367C_DSCP33_PRIORITY_MASK    0x70
+#define    RTL8367C_DSCP32_PRIORITY_OFFSET    0
+#define    RTL8367C_DSCP32_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_QOS_DSCP_TO_PRIORITY_CTRL9    0x0870
+#define    RTL8367C_DSCP39_PRIORITY_OFFSET    12
+#define    RTL8367C_DSCP39_PRIORITY_MASK    0x7000
+#define    RTL8367C_DSCP38_PRIORITY_OFFSET    8
+#define    RTL8367C_DSCP38_PRIORITY_MASK    0x700
+#define    RTL8367C_DSCP37_PRIORITY_OFFSET    4
+#define    RTL8367C_DSCP37_PRIORITY_MASK    0x70
+#define    RTL8367C_DSCP36_PRIORITY_OFFSET    0
+#define    RTL8367C_DSCP36_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_QOS_DSCP_TO_PRIORITY_CTRL10    0x0871
+#define    RTL8367C_DSCP43_PRIORITY_OFFSET    12
+#define    RTL8367C_DSCP43_PRIORITY_MASK    0x7000
+#define    RTL8367C_DSCP42_PRIORITY_OFFSET    8
+#define    RTL8367C_DSCP42_PRIORITY_MASK    0x700
+#define    RTL8367C_DSCP41_PRIORITY_OFFSET    4
+#define    RTL8367C_DSCP41_PRIORITY_MASK    0x70
+#define    RTL8367C_DSCP40_PRIORITY_OFFSET    0
+#define    RTL8367C_DSCP40_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_QOS_DSCP_TO_PRIORITY_CTRL11    0x0872
+#define    RTL8367C_DSCP47_PRIORITY_OFFSET    12
+#define    RTL8367C_DSCP47_PRIORITY_MASK    0x7000
+#define    RTL8367C_DSCP46_PRIORITY_OFFSET    8
+#define    RTL8367C_DSCP46_PRIORITY_MASK    0x700
+#define    RTL8367C_DSCP45_PRIORITY_OFFSET    4
+#define    RTL8367C_DSCP45_PRIORITY_MASK    0x70
+#define    RTL8367C_DSCP44_PRIORITY_OFFSET    0
+#define    RTL8367C_DSCP44_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_QOS_DSCP_TO_PRIORITY_CTRL12    0x0873
+#define    RTL8367C_DSCP51_PRIORITY_OFFSET    12
+#define    RTL8367C_DSCP51_PRIORITY_MASK    0x7000
+#define    RTL8367C_DSCP50_PRIORITY_OFFSET    8
+#define    RTL8367C_DSCP50_PRIORITY_MASK    0x700
+#define    RTL8367C_DSCP49_PRIORITY_OFFSET    4
+#define    RTL8367C_DSCP49_PRIORITY_MASK    0x70
+#define    RTL8367C_DSCP48_PRIORITY_OFFSET    0
+#define    RTL8367C_DSCP48_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_QOS_DSCP_TO_PRIORITY_CTRL13    0x0874
+#define    RTL8367C_DSCP55_PRIORITY_OFFSET    12
+#define    RTL8367C_DSCP55_PRIORITY_MASK    0x7000
+#define    RTL8367C_DSCP54_PRIORITY_OFFSET    8
+#define    RTL8367C_DSCP54_PRIORITY_MASK    0x700
+#define    RTL8367C_DSCP53_PRIORITY_OFFSET    4
+#define    RTL8367C_DSCP53_PRIORITY_MASK    0x70
+#define    RTL8367C_DSCP52_PRIORITY_OFFSET    0
+#define    RTL8367C_DSCP52_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_QOS_DSCP_TO_PRIORITY_CTRL14    0x0875
+#define    RTL8367C_DSCP59_PRIORITY_OFFSET    12
+#define    RTL8367C_DSCP59_PRIORITY_MASK    0x7000
+#define    RTL8367C_DSCP58_PRIORITY_OFFSET    8
+#define    RTL8367C_DSCP58_PRIORITY_MASK    0x700
+#define    RTL8367C_DSCP57_PRIORITY_OFFSET    4
+#define    RTL8367C_DSCP57_PRIORITY_MASK    0x70
+#define    RTL8367C_DSCP56_PRIORITY_OFFSET    0
+#define    RTL8367C_DSCP56_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_QOS_DSCP_TO_PRIORITY_CTRL15    0x0876
+#define    RTL8367C_DSCP63_PRIORITY_OFFSET    12
+#define    RTL8367C_DSCP63_PRIORITY_MASK    0x7000
+#define    RTL8367C_DSCP62_PRIORITY_OFFSET    8
+#define    RTL8367C_DSCP62_PRIORITY_MASK    0x700
+#define    RTL8367C_DSCP61_PRIORITY_OFFSET    4
+#define    RTL8367C_DSCP61_PRIORITY_MASK    0x70
+#define    RTL8367C_DSCP60_PRIORITY_OFFSET    0
+#define    RTL8367C_DSCP60_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_QOS_PORTBASED_PRIORITY_CTRL0    0x0877
+#define    RTL8367C_QOS_PORTBASED_PRIORITY_CTRL0_PORT3_PRIORITY_OFFSET    12
+#define    RTL8367C_QOS_PORTBASED_PRIORITY_CTRL0_PORT3_PRIORITY_MASK    0x7000
+#define    RTL8367C_QOS_PORTBASED_PRIORITY_CTRL0_PORT2_PRIORITY_OFFSET    8
+#define    RTL8367C_QOS_PORTBASED_PRIORITY_CTRL0_PORT2_PRIORITY_MASK    0x700
+#define    RTL8367C_QOS_PORTBASED_PRIORITY_CTRL0_PORT1_PRIORITY_OFFSET    4
+#define    RTL8367C_QOS_PORTBASED_PRIORITY_CTRL0_PORT1_PRIORITY_MASK    0x70
+#define    RTL8367C_QOS_PORTBASED_PRIORITY_CTRL0_PORT0_PRIORITY_OFFSET    0
+#define    RTL8367C_QOS_PORTBASED_PRIORITY_CTRL0_PORT0_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_QOS_PORTBASED_PRIORITY_CTRL1    0x0878
+#define    RTL8367C_QOS_PORTBASED_PRIORITY_CTRL1_PORT7_PRIORITY_OFFSET    12
+#define    RTL8367C_QOS_PORTBASED_PRIORITY_CTRL1_PORT7_PRIORITY_MASK    0x7000
+#define    RTL8367C_QOS_PORTBASED_PRIORITY_CTRL1_PORT6_PRIORITY_OFFSET    8
+#define    RTL8367C_QOS_PORTBASED_PRIORITY_CTRL1_PORT6_PRIORITY_MASK    0x700
+#define    RTL8367C_QOS_PORTBASED_PRIORITY_CTRL1_PORT5_PRIORITY_OFFSET    4
+#define    RTL8367C_QOS_PORTBASED_PRIORITY_CTRL1_PORT5_PRIORITY_MASK    0x70
+#define    RTL8367C_QOS_PORTBASED_PRIORITY_CTRL1_PORT4_PRIORITY_OFFSET    0
+#define    RTL8367C_QOS_PORTBASED_PRIORITY_CTRL1_PORT4_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_DUMMY0879    0x0879
+#define    RTL8367C_DUMMY0879_OFFSET    0
+#define    RTL8367C_DUMMY0879_MASK    0x1
+
+#define    RTL8367C_REG_QOS_PORTBASED_PRIORITY_CTRL2    0x087a
+#define    RTL8367C_QOS_PORTBASED_PRIORITY_CTRL2_PORT10_PRIORITY_OFFSET    8
+#define    RTL8367C_QOS_PORTBASED_PRIORITY_CTRL2_PORT10_PRIORITY_MASK    0x700
+#define    RTL8367C_QOS_PORTBASED_PRIORITY_CTRL2_PORT9_PRIORITY_OFFSET    4
+#define    RTL8367C_QOS_PORTBASED_PRIORITY_CTRL2_PORT9_PRIORITY_MASK    0x70
+#define    RTL8367C_QOS_PORTBASED_PRIORITY_CTRL2_PORT8_PRIORITY_OFFSET    0
+#define    RTL8367C_QOS_PORTBASED_PRIORITY_CTRL2_PORT8_PRIORITY_MASK    0x7
+
+#define    RTL8367C_REG_QOS_INTERNAL_PRIORITY_DECISION_CTRL0    0x087b
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_CTRL0_QOS_ACL_WEIGHT_OFFSET    8
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_CTRL0_QOS_ACL_WEIGHT_MASK    0xFF00
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_CTRL0_QOS_PORT_WEIGHT_OFFSET    0
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_CTRL0_QOS_PORT_WEIGHT_MASK    0xFF
+
+#define    RTL8367C_REG_QOS_INTERNAL_PRIORITY_DECISION_CTRL1    0x087c
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_CTRL1_QOS_DOT1Q_WEIGHT_OFFSET    8
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_CTRL1_QOS_DOT1Q_WEIGHT_MASK    0xFF00
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_CTRL1_QOS_DSCP_WEIGHT_OFFSET    0
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_CTRL1_QOS_DSCP_WEIGHT_MASK    0xFF
+
+#define    RTL8367C_REG_QOS_INTERNAL_PRIORITY_DECISION_CTRL2    0x087d
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_CTRL2_QOS_CVLAN_WEIGHT_OFFSET    8
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_CTRL2_QOS_CVLAN_WEIGHT_MASK    0xFF00
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_CTRL2_QOS_SVLAN_WEIGHT_OFFSET    0
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_CTRL2_QOS_SVLAN_WEIGHT_MASK    0xFF
+
+#define    RTL8367C_REG_QOS_INTERNAL_PRIORITY_DECISION_CTRL3    0x087e
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_CTRL3_QOS_SA_WEIGHT_OFFSET    8
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_CTRL3_QOS_SA_WEIGHT_MASK    0xFF00
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_CTRL3_QOS_LUTFWD_WEIGHT_OFFSET    0
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_CTRL3_QOS_LUTFWD_WEIGHT_MASK    0xFF
+
+#define    RTL8367C_REG_QOS_PRIORITY_REMAPPING_IN_CPU_CTRL0    0x087f
+#define    RTL8367C_QOS_PRIORITY_REMAPPING_IN_CPU_CTRL0_PRIORITY3_OFFSET    12
+#define    RTL8367C_QOS_PRIORITY_REMAPPING_IN_CPU_CTRL0_PRIORITY3_MASK    0x7000
+#define    RTL8367C_QOS_PRIORITY_REMAPPING_IN_CPU_CTRL0_PRIORITY2_OFFSET    8
+#define    RTL8367C_QOS_PRIORITY_REMAPPING_IN_CPU_CTRL0_PRIORITY2_MASK    0x700
+#define    RTL8367C_QOS_PRIORITY_REMAPPING_IN_CPU_CTRL0_PRIORITY1_OFFSET    4
+#define    RTL8367C_QOS_PRIORITY_REMAPPING_IN_CPU_CTRL0_PRIORITY1_MASK    0x70
+#define    RTL8367C_QOS_PRIORITY_REMAPPING_IN_CPU_CTRL0_PRIORITY0_OFFSET    0
+#define    RTL8367C_QOS_PRIORITY_REMAPPING_IN_CPU_CTRL0_PRIORITY0_MASK    0x7
+
+#define    RTL8367C_REG_QOS_PRIORITY_REMAPPING_IN_CPU_CTRL1    0x0880
+#define    RTL8367C_QOS_PRIORITY_REMAPPING_IN_CPU_CTRL1_PRIORITY7_OFFSET    12
+#define    RTL8367C_QOS_PRIORITY_REMAPPING_IN_CPU_CTRL1_PRIORITY7_MASK    0x7000
+#define    RTL8367C_QOS_PRIORITY_REMAPPING_IN_CPU_CTRL1_PRIORITY6_OFFSET    8
+#define    RTL8367C_QOS_PRIORITY_REMAPPING_IN_CPU_CTRL1_PRIORITY6_MASK    0x700
+#define    RTL8367C_QOS_PRIORITY_REMAPPING_IN_CPU_CTRL1_PRIORITY5_OFFSET    4
+#define    RTL8367C_QOS_PRIORITY_REMAPPING_IN_CPU_CTRL1_PRIORITY5_MASK    0x70
+#define    RTL8367C_QOS_PRIORITY_REMAPPING_IN_CPU_CTRL1_PRIORITY4_OFFSET    0
+#define    RTL8367C_QOS_PRIORITY_REMAPPING_IN_CPU_CTRL1_PRIORITY4_MASK    0x7
+
+#define    RTL8367C_REG_QOS_TRAP_PRIORITY0    0x0881
+#define    RTL8367C_UNKNOWN_MC_PRIORTY_OFFSET    12
+#define    RTL8367C_UNKNOWN_MC_PRIORTY_MASK    0x7000
+#define    RTL8367C_SVLAN_PRIOIRTY_OFFSET    8
+#define    RTL8367C_SVLAN_PRIOIRTY_MASK    0x700
+#define    RTL8367C_OAM_PRIOIRTY_OFFSET    4
+#define    RTL8367C_OAM_PRIOIRTY_MASK    0x70
+#define    RTL8367C_DOT1X_PRIORTY_OFFSET    0
+#define    RTL8367C_DOT1X_PRIORTY_MASK    0x7
+
+#define    RTL8367C_REG_QOS_TRAP_PRIORITY1    0x0882
+#define    RTL8367C_DW8051_TRAP_PRI_OFFSET    4
+#define    RTL8367C_DW8051_TRAP_PRI_MASK    0x70
+#define    RTL8367C_EEELLDP_TRAP_PRI_OFFSET    0
+#define    RTL8367C_EEELLDP_TRAP_PRI_MASK    0x7
+
+#define    RTL8367C_REG_MAX_LENGTH_CFG    0x0883
+#define    RTL8367C_MAX_LENGTH_GIGA_OFFSET    8
+#define    RTL8367C_MAX_LENGTH_GIGA_MASK    0xFF00
+#define    RTL8367C_MAX_LENGTH_10_100M_OFFSET    0
+#define    RTL8367C_MAX_LENGTH_10_100M_MASK    0xFF
+
+#define    RTL8367C_REG_MAX_LEN_RX_TX    0x0884
+#define    RTL8367C_MAX_LEN_RX_TX_OFFSET    0
+#define    RTL8367C_MAX_LEN_RX_TX_MASK    0x3
+
+#define    RTL8367C_REG_QOS_INTERNAL_PRIORITY_DECISION2_CTRL0    0x0885
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION2_CTRL0_QOS_ACL_WEIGHT_OFFSET    8
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION2_CTRL0_QOS_ACL_WEIGHT_MASK    0xFF00
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION2_CTRL0_QOS_PORT_WEIGHT_OFFSET    0
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION2_CTRL0_QOS_PORT_WEIGHT_MASK    0xFF
+
+#define    RTL8367C_REG_QOS_INTERNAL_PRIORITY_DECISION2_CTRL1    0x0886
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION2_CTRL1_QOS_DOT1Q_WEIGHT_OFFSET    8
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION2_CTRL1_QOS_DOT1Q_WEIGHT_MASK    0xFF00
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION2_CTRL1_QOS_DSCP_WEIGHT_OFFSET    0
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION2_CTRL1_QOS_DSCP_WEIGHT_MASK    0xFF
+
+#define    RTL8367C_REG_QOS_INTERNAL_PRIORITY_DECISION2_CTRL2    0x0887
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION2_CTRL2_QOS_CVLAN_WEIGHT_OFFSET    8
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION2_CTRL2_QOS_CVLAN_WEIGHT_MASK    0xFF00
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION2_CTRL2_QOS_SVLAN_WEIGHT_OFFSET    0
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION2_CTRL2_QOS_SVLAN_WEIGHT_MASK    0xFF
+
+#define    RTL8367C_REG_QOS_INTERNAL_PRIORITY_DECISION2_CTRL3    0x0888
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION2_CTRL3_QOS_SA_WEIGHT_OFFSET    8
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION2_CTRL3_QOS_SA_WEIGHT_MASK    0xFF00
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION2_CTRL3_QOS_LUTFWD_WEIGHT_OFFSET    0
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION2_CTRL3_QOS_LUTFWD_WEIGHT_MASK    0xFF
+
+#define    RTL8367C_REG_QOS_INTERNAL_PRIORITY_DECISION_IDX    0x0889
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_IDX_OFFSET    0
+#define    RTL8367C_QOS_INTERNAL_PRIORITY_DECISION_IDX_MASK    0x7FF
+
+#define    RTL8367C_REG_MAX_LENGTH_CFG_EXT    0x088a
+#define    RTL8367C_MAX_LENGTH_GIGA_EXT_OFFSET    3
+#define    RTL8367C_MAX_LENGTH_GIGA_EXT_MASK    0x38
+#define    RTL8367C_MAX_LENGTH_10_100M_EXT_OFFSET    0
+#define    RTL8367C_MAX_LENGTH_10_100M_EXT_MASK    0x7
+
+#define    RTL8367C_REG_MAX_LEN_RX_TX_CFG0    0x088c
+#define    RTL8367C_MAX_LEN_RX_TX_CFG0_OFFSET    0
+#define    RTL8367C_MAX_LEN_RX_TX_CFG0_MASK    0x3FFF
+
+#define    RTL8367C_REG_MAX_LEN_RX_TX_CFG1    0x088d
+#define    RTL8367C_MAX_LEN_RX_TX_CFG1_OFFSET    0
+#define    RTL8367C_MAX_LEN_RX_TX_CFG1_MASK    0x3FFF
+
+#define    RTL8367C_REG_UNDA_FLOODING_PMSK    0x0890
+#define    RTL8367C_UNDA_FLOODING_PMSK_OFFSET    0
+#define    RTL8367C_UNDA_FLOODING_PMSK_MASK    0x7FF
+
+#define    RTL8367C_REG_UNMCAST_FLOADING_PMSK    0x0891
+#define    RTL8367C_UNMCAST_FLOADING_PMSK_OFFSET    0
+#define    RTL8367C_UNMCAST_FLOADING_PMSK_MASK    0x7FF
+
+#define    RTL8367C_REG_BCAST_FLOADING_PMSK    0x0892
+#define    RTL8367C_BCAST_FLOADING_PMSK_OFFSET    0
+#define    RTL8367C_BCAST_FLOADING_PMSK_MASK    0x7FF
+
+#define    RTL8367C_REG_PORT_TRUNK_HASH_MAPPING_CTRL2    0x08a0
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL2_HASH7_OFFSET    14
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL2_HASH7_MASK    0xC000
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL2_HASH6_OFFSET    12
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL2_HASH6_MASK    0x3000
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL2_HASH5_OFFSET    10
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL2_HASH5_MASK    0xC00
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL2_HASH4_OFFSET    8
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL2_HASH4_MASK    0x300
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL2_HASH3_OFFSET    6
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL2_HASH3_MASK    0xC0
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL2_HASH2_OFFSET    4
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL2_HASH2_MASK    0x30
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL2_HASH1_OFFSET    2
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL2_HASH1_MASK    0xC
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL2_HASH0_OFFSET    0
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL2_HASH0_MASK    0x3
+
+#define    RTL8367C_REG_PORT_TRUNK_HASH_MAPPING_CTRL3    0x08a1
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL3_HASH15_OFFSET    14
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL3_HASH15_MASK    0xC000
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL3_HASH14_OFFSET    12
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL3_HASH14_MASK    0x3000
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL3_HASH13_OFFSET    10
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL3_HASH13_MASK    0xC00
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL3_HASH12_OFFSET    8
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL3_HASH12_MASK    0x300
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL3_HASH11_OFFSET    6
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL3_HASH11_MASK    0xC0
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL3_HASH10_OFFSET    4
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL3_HASH10_MASK    0x30
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL3_HASH9_OFFSET    2
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL3_HASH9_MASK    0xC
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL3_HASH8_OFFSET    0
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL3_HASH8_MASK    0x3
+
+#define    RTL8367C_REG_PORT_ISOLATION_PORT0_MASK    0x08a2
+#define    RTL8367C_PORT_ISOLATION_PORT0_MASK_OFFSET    0
+#define    RTL8367C_PORT_ISOLATION_PORT0_MASK_MASK    0x7FF
+
+#define    RTL8367C_REG_PORT_ISOLATION_PORT1_MASK    0x08a3
+#define    RTL8367C_PORT_ISOLATION_PORT1_MASK_OFFSET    0
+#define    RTL8367C_PORT_ISOLATION_PORT1_MASK_MASK    0x7FF
+
+#define    RTL8367C_REG_PORT_ISOLATION_PORT2_MASK    0x08a4
+#define    RTL8367C_PORT_ISOLATION_PORT2_MASK_OFFSET    0
+#define    RTL8367C_PORT_ISOLATION_PORT2_MASK_MASK    0x7FF
+
+#define    RTL8367C_REG_PORT_ISOLATION_PORT3_MASK    0x08a5
+#define    RTL8367C_PORT_ISOLATION_PORT3_MASK_OFFSET    0
+#define    RTL8367C_PORT_ISOLATION_PORT3_MASK_MASK    0x7FF
+
+#define    RTL8367C_REG_PORT_ISOLATION_PORT4_MASK    0x08a6
+#define    RTL8367C_PORT_ISOLATION_PORT4_MASK_OFFSET    0
+#define    RTL8367C_PORT_ISOLATION_PORT4_MASK_MASK    0x7FF
+
+#define    RTL8367C_REG_PORT_ISOLATION_PORT5_MASK    0x08a7
+#define    RTL8367C_PORT_ISOLATION_PORT5_MASK_OFFSET    0
+#define    RTL8367C_PORT_ISOLATION_PORT5_MASK_MASK    0x7FF
+
+#define    RTL8367C_REG_PORT_ISOLATION_PORT6_MASK    0x08a8
+#define    RTL8367C_PORT_ISOLATION_PORT6_MASK_OFFSET    0
+#define    RTL8367C_PORT_ISOLATION_PORT6_MASK_MASK    0x7FF
+
+#define    RTL8367C_REG_PORT_ISOLATION_PORT7_MASK    0x08a9
+#define    RTL8367C_PORT_ISOLATION_PORT7_MASK_OFFSET    0
+#define    RTL8367C_PORT_ISOLATION_PORT7_MASK_MASK    0x7FF
+
+#define    RTL8367C_REG_PORT_ISOLATION_PORT8_MASK    0x08aa
+#define    RTL8367C_PORT_ISOLATION_PORT8_MASK_OFFSET    0
+#define    RTL8367C_PORT_ISOLATION_PORT8_MASK_MASK    0x7FF
+
+#define    RTL8367C_REG_PORT_ISOLATION_PORT9_MASK    0x08ab
+#define    RTL8367C_PORT_ISOLATION_PORT9_MASK_OFFSET    0
+#define    RTL8367C_PORT_ISOLATION_PORT9_MASK_MASK    0x7FF
+
+#define    RTL8367C_REG_PORT_ISOLATION_PORT10_MASK    0x08ac
+#define    RTL8367C_PORT_ISOLATION_PORT10_MASK_OFFSET    0
+#define    RTL8367C_PORT_ISOLATION_PORT10_MASK_MASK    0x7FF
+
+#define    RTL8367C_REG_FORCE_CTRL    0x08b4
+#define    RTL8367C_FORCE_CTRL_OFFSET    0
+#define    RTL8367C_FORCE_CTRL_MASK    0x7FF
+
+#define    RTL8367C_REG_FORCE_PORT0_MASK    0x08b5
+#define    RTL8367C_FORCE_PORT0_MASK_OFFSET    0
+#define    RTL8367C_FORCE_PORT0_MASK_MASK    0x7FF
+
+#define    RTL8367C_REG_FORCE_PORT1_MASK    0x08b6
+#define    RTL8367C_FORCE_PORT1_MASK_OFFSET    0
+#define    RTL8367C_FORCE_PORT1_MASK_MASK    0x7FF
+
+#define    RTL8367C_REG_FORCE_PORT2_MASK    0x08b7
+#define    RTL8367C_FORCE_PORT2_MASK_OFFSET    0
+#define    RTL8367C_FORCE_PORT2_MASK_MASK    0x7FF
+
+#define    RTL8367C_REG_FORCE_PORT3_MASK    0x08b8
+#define    RTL8367C_FORCE_PORT3_MASK_OFFSET    0
+#define    RTL8367C_FORCE_PORT3_MASK_MASK    0x7FF
+
+#define    RTL8367C_REG_FORCE_PORT4_MASK    0x08b9
+#define    RTL8367C_FORCE_PORT4_MASK_OFFSET    0
+#define    RTL8367C_FORCE_PORT4_MASK_MASK    0x7FF
+
+#define    RTL8367C_REG_FORCE_PORT5_MASK    0x08ba
+#define    RTL8367C_FORCE_PORT5_MASK_OFFSET    0
+#define    RTL8367C_FORCE_PORT5_MASK_MASK    0x7FF
+
+#define    RTL8367C_REG_FORCE_PORT6_MASK    0x08bb
+#define    RTL8367C_FORCE_PORT6_MASK_OFFSET    0
+#define    RTL8367C_FORCE_PORT6_MASK_MASK    0x7FF
+
+#define    RTL8367C_REG_FORCE_PORT7_MASK    0x08bc
+#define    RTL8367C_FORCE_PORT7_MASK_OFFSET    0
+#define    RTL8367C_FORCE_PORT7_MASK_MASK    0x7FF
+
+#define    RTL8367C_REG_FORCE_PORT8_MASK    0x08bd
+#define    RTL8367C_FORCE_PORT8_MASK_OFFSET    0
+#define    RTL8367C_FORCE_PORT8_MASK_MASK    0x7FF
+
+#define    RTL8367C_REG_FORCE_PORT9_MASK    0x08be
+#define    RTL8367C_FORCE_PORT9_MASK_OFFSET    0
+#define    RTL8367C_FORCE_PORT9_MASK_MASK    0x7FF
+
+#define    RTL8367C_REG_FORCE_PORT10_MASK    0x08bf
+#define    RTL8367C_FORCE_PORT10_MASK_OFFSET    0
+#define    RTL8367C_FORCE_PORT10_MASK_MASK    0x7FF
+
+#define    RTL8367C_REG_SOURCE_PORT_PERMIT    0x08c5
+#define    RTL8367C_SOURCE_PORT_PERMIT_OFFSET    0
+#define    RTL8367C_SOURCE_PORT_PERMIT_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMCAST_VLAN_LEAKY    0x08c6
+#define    RTL8367C_IPMCAST_VLAN_LEAKY_OFFSET    0
+#define    RTL8367C_IPMCAST_VLAN_LEAKY_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMCAST_PORTISO_LEAKY    0x08c7
+#define    RTL8367C_IPMCAST_PORTISO_LEAKY_OFFSET    0
+#define    RTL8367C_IPMCAST_PORTISO_LEAKY_MASK    0x7FF
+
+#define    RTL8367C_REG_PORT_SECURITY_CTRL    0x08c8
+#define    RTL8367C_UNKNOWN_UNICAST_DA_BEHAVE_OFFSET    6
+#define    RTL8367C_UNKNOWN_UNICAST_DA_BEHAVE_MASK    0xC0
+#define    RTL8367C_LUT_LEARN_OVER_ACT_OFFSET    4
+#define    RTL8367C_LUT_LEARN_OVER_ACT_MASK    0x30
+#define    RTL8367C_UNMATCHED_SA_BEHAVE_OFFSET    2
+#define    RTL8367C_UNMATCHED_SA_BEHAVE_MASK    0xC
+#define    RTL8367C_UNKNOWN_SA_BEHAVE_OFFSET    0
+#define    RTL8367C_UNKNOWN_SA_BEHAVE_MASK    0x3
+
+#define    RTL8367C_REG_UNKNOWN_IPV4_MULTICAST_CTRL0    0x08c9
+#define    RTL8367C_PORT7_UNKNOWN_IP4_MCAST_OFFSET    14
+#define    RTL8367C_PORT7_UNKNOWN_IP4_MCAST_MASK    0xC000
+#define    RTL8367C_PORT6_UNKNOWN_IP4_MCAST_OFFSET    12
+#define    RTL8367C_PORT6_UNKNOWN_IP4_MCAST_MASK    0x3000
+#define    RTL8367C_PORT5_UNKNOWN_IP4_MCAST_OFFSET    10
+#define    RTL8367C_PORT5_UNKNOWN_IP4_MCAST_MASK    0xC00
+#define    RTL8367C_PORT4_UNKNOWN_IP4_MCAST_OFFSET    8
+#define    RTL8367C_PORT4_UNKNOWN_IP4_MCAST_MASK    0x300
+#define    RTL8367C_PORT3_UNKNOWN_IP4_MCAST_OFFSET    6
+#define    RTL8367C_PORT3_UNKNOWN_IP4_MCAST_MASK    0xC0
+#define    RTL8367C_PORT2_UNKNOWN_IP4_MCAST_OFFSET    4
+#define    RTL8367C_PORT2_UNKNOWN_IP4_MCAST_MASK    0x30
+#define    RTL8367C_PORT1_UNKNOWN_IP4_MCAST_OFFSET    2
+#define    RTL8367C_PORT1_UNKNOWN_IP4_MCAST_MASK    0xC
+#define    RTL8367C_PORT0_UNKNOWN_IP4_MCAST_OFFSET    0
+#define    RTL8367C_PORT0_UNKNOWN_IP4_MCAST_MASK    0x3
+
+#define    RTL8367C_REG_UNKNOWN_IPV4_MULTICAST_CTRL1    0x08ca
+#define    RTL8367C_PORT10_UNKNOWN_IP4_MCAST_OFFSET    4
+#define    RTL8367C_PORT10_UNKNOWN_IP4_MCAST_MASK    0x30
+#define    RTL8367C_PORT9_UNKNOWN_IP4_MCAST_OFFSET    2
+#define    RTL8367C_PORT9_UNKNOWN_IP4_MCAST_MASK    0xC
+#define    RTL8367C_PORT8_UNKNOWN_IP4_MCAST_OFFSET    0
+#define    RTL8367C_PORT8_UNKNOWN_IP4_MCAST_MASK    0x3
+
+#define    RTL8367C_REG_UNKNOWN_IPV6_MULTICAST_CTRL0    0x08cb
+#define    RTL8367C_PORT7_UNKNOWN_IP6_MCAST_OFFSET    14
+#define    RTL8367C_PORT7_UNKNOWN_IP6_MCAST_MASK    0xC000
+#define    RTL8367C_PORT6_UNKNOWN_IP6_MCAST_OFFSET    12
+#define    RTL8367C_PORT6_UNKNOWN_IP6_MCAST_MASK    0x3000
+#define    RTL8367C_PORT5_UNKNOWN_IP6_MCAST_OFFSET    10
+#define    RTL8367C_PORT5_UNKNOWN_IP6_MCAST_MASK    0xC00
+#define    RTL8367C_PORT4_UNKNOWN_IP6_MCAST_OFFSET    8
+#define    RTL8367C_PORT4_UNKNOWN_IP6_MCAST_MASK    0x300
+#define    RTL8367C_PORT3_UNKNOWN_IP6_MCAST_OFFSET    6
+#define    RTL8367C_PORT3_UNKNOWN_IP6_MCAST_MASK    0xC0
+#define    RTL8367C_PORT2_UNKNOWN_IP6_MCAST_OFFSET    4
+#define    RTL8367C_PORT2_UNKNOWN_IP6_MCAST_MASK    0x30
+#define    RTL8367C_PORT1_UNKNOWN_IP6_MCAST_OFFSET    2
+#define    RTL8367C_PORT1_UNKNOWN_IP6_MCAST_MASK    0xC
+#define    RTL8367C_PORT0_UNKNOWN_IP6_MCAST_OFFSET    0
+#define    RTL8367C_PORT0_UNKNOWN_IP6_MCAST_MASK    0x3
+
+#define    RTL8367C_REG_UNKNOWN_IPV6_MULTICAST_CTRL1    0x08cc
+#define    RTL8367C_PORT10_UNKNOWN_IP6_MCAST_OFFSET    4
+#define    RTL8367C_PORT10_UNKNOWN_IP6_MCAST_MASK    0x30
+#define    RTL8367C_PORT9_UNKNOWN_IP6_MCAST_OFFSET    2
+#define    RTL8367C_PORT9_UNKNOWN_IP6_MCAST_MASK    0xC
+#define    RTL8367C_PORT8_UNKNOWN_IP6_MCAST_OFFSET    0
+#define    RTL8367C_PORT8_UNKNOWN_IP6_MCAST_MASK    0x3
+
+#define    RTL8367C_REG_UNKNOWN_L2_MULTICAST_CTRL0    0x08cd
+#define    RTL8367C_PORT7_UNKNOWN_L2_MCAST_OFFSET    14
+#define    RTL8367C_PORT7_UNKNOWN_L2_MCAST_MASK    0xC000
+#define    RTL8367C_PORT6_UNKNOWN_L2_MCAST_OFFSET    12
+#define    RTL8367C_PORT6_UNKNOWN_L2_MCAST_MASK    0x3000
+#define    RTL8367C_PORT5_UNKNOWN_L2_MCAST_OFFSET    10
+#define    RTL8367C_PORT5_UNKNOWN_L2_MCAST_MASK    0xC00
+#define    RTL8367C_PORT4_UNKNOWN_L2_MCAST_OFFSET    8
+#define    RTL8367C_PORT4_UNKNOWN_L2_MCAST_MASK    0x300
+#define    RTL8367C_PORT3_UNKNOWN_L2_MCAST_OFFSET    6
+#define    RTL8367C_PORT3_UNKNOWN_L2_MCAST_MASK    0xC0
+#define    RTL8367C_PORT2_UNKNOWN_L2_MCAST_OFFSET    4
+#define    RTL8367C_PORT2_UNKNOWN_L2_MCAST_MASK    0x30
+#define    RTL8367C_PORT1_UNKNOWN_L2_MCAST_OFFSET    2
+#define    RTL8367C_PORT1_UNKNOWN_L2_MCAST_MASK    0xC
+#define    RTL8367C_PORT0_UNKNOWN_L2_MCAST_OFFSET    0
+#define    RTL8367C_PORT0_UNKNOWN_L2_MCAST_MASK    0x3
+
+#define    RTL8367C_REG_PORT_TRUNK_DROP_CTRL    0x08ce
+#define    RTL8367C_PORT_TRUNK_DROP_CTRL_OFFSET    0
+#define    RTL8367C_PORT_TRUNK_DROP_CTRL_MASK    0x1
+
+#define    RTL8367C_REG_PORT_TRUNK_CTRL    0x08cf
+#define    RTL8367C_PORT_TRUNK_DUMB_OFFSET    8
+#define    RTL8367C_PORT_TRUNK_DUMB_MASK    0x100
+#define    RTL8367C_PORT_TRUNK_FLOOD_OFFSET    7
+#define    RTL8367C_PORT_TRUNK_FLOOD_MASK    0x80
+#define    RTL8367C_DPORT_HASH_OFFSET    6
+#define    RTL8367C_DPORT_HASH_MASK    0x40
+#define    RTL8367C_SPORT_HASH_OFFSET    5
+#define    RTL8367C_SPORT_HASH_MASK    0x20
+#define    RTL8367C_DIP_HASH_OFFSET    4
+#define    RTL8367C_DIP_HASH_MASK    0x10
+#define    RTL8367C_SIP_HASH_OFFSET    3
+#define    RTL8367C_SIP_HASH_MASK    0x8
+#define    RTL8367C_DMAC_HASH_OFFSET    2
+#define    RTL8367C_DMAC_HASH_MASK    0x4
+#define    RTL8367C_SMAC_HASH_OFFSET    1
+#define    RTL8367C_SMAC_HASH_MASK    0x2
+#define    RTL8367C_SPA_HASH_OFFSET    0
+#define    RTL8367C_SPA_HASH_MASK    0x1
+
+#define    RTL8367C_REG_PORT_TRUNK_GROUP_MASK    0x08d0
+#define    RTL8367C_PORT_TRUNK_GROUP2_MASK_OFFSET    8
+#define    RTL8367C_PORT_TRUNK_GROUP2_MASK_MASK    0x300
+#define    RTL8367C_PORT_TRUNK_GROUP1_MASK_OFFSET    4
+#define    RTL8367C_PORT_TRUNK_GROUP1_MASK_MASK    0xF0
+#define    RTL8367C_PORT_TRUNK_GROUP0_MASK_OFFSET    0
+#define    RTL8367C_PORT_TRUNK_GROUP0_MASK_MASK    0xF
+
+#define    RTL8367C_REG_PORT_TRUNK_FLOWCTRL    0x08d1
+#define    RTL8367C_EN_FLOWCTRL_TG2_OFFSET    2
+#define    RTL8367C_EN_FLOWCTRL_TG2_MASK    0x4
+#define    RTL8367C_EN_FLOWCTRL_TG1_OFFSET    1
+#define    RTL8367C_EN_FLOWCTRL_TG1_MASK    0x2
+#define    RTL8367C_EN_FLOWCTRL_TG0_OFFSET    0
+#define    RTL8367C_EN_FLOWCTRL_TG0_MASK    0x1
+
+#define    RTL8367C_REG_PORT_TRUNK_HASH_MAPPING_CTRL0    0x08d2
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL0_HASH7_OFFSET    14
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL0_HASH7_MASK    0xC000
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL0_HASH6_OFFSET    12
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL0_HASH6_MASK    0x3000
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL0_HASH5_OFFSET    10
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL0_HASH5_MASK    0xC00
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL0_HASH4_OFFSET    8
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL0_HASH4_MASK    0x300
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL0_HASH3_OFFSET    6
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL0_HASH3_MASK    0xC0
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL0_HASH2_OFFSET    4
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL0_HASH2_MASK    0x30
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL0_HASH1_OFFSET    2
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL0_HASH1_MASK    0xC
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL0_HASH0_OFFSET    0
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL0_HASH0_MASK    0x3
+
+#define    RTL8367C_REG_PORT_TRUNK_HASH_MAPPING_CTRL1    0x08d3
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL1_HASH15_OFFSET    14
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL1_HASH15_MASK    0xC000
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL1_HASH14_OFFSET    12
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL1_HASH14_MASK    0x3000
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL1_HASH13_OFFSET    10
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL1_HASH13_MASK    0xC00
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL1_HASH12_OFFSET    8
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL1_HASH12_MASK    0x300
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL1_HASH11_OFFSET    6
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL1_HASH11_MASK    0xC0
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL1_HASH10_OFFSET    4
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL1_HASH10_MASK    0x30
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL1_HASH9_OFFSET    2
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL1_HASH9_MASK    0xC
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL1_HASH8_OFFSET    0
+#define    RTL8367C_PORT_TRUNK_HASH_MAPPING_CTRL1_HASH8_MASK    0x3
+
+#define    RTL8367C_REG_DOS_CFG    0x08d4
+#define    RTL8367C_DROP_ICMPFRAGMENT_OFFSET    9
+#define    RTL8367C_DROP_ICMPFRAGMENT_MASK    0x200
+#define    RTL8367C_DROP_TCPFRAGERROR_OFFSET    8
+#define    RTL8367C_DROP_TCPFRAGERROR_MASK    0x100
+#define    RTL8367C_DROP_TCPSHORTHDR_OFFSET    7
+#define    RTL8367C_DROP_TCPSHORTHDR_MASK    0x80
+#define    RTL8367C_DROP_SYN1024_OFFSET    6
+#define    RTL8367C_DROP_SYN1024_MASK    0x40
+#define    RTL8367C_DROP_NULLSCAN_OFFSET    5
+#define    RTL8367C_DROP_NULLSCAN_MASK    0x20
+#define    RTL8367C_DROP_XMASCAN_OFFSET    4
+#define    RTL8367C_DROP_XMASCAN_MASK    0x10
+#define    RTL8367C_DROP_SYNFINSCAN_OFFSET    3
+#define    RTL8367C_DROP_SYNFINSCAN_MASK    0x8
+#define    RTL8367C_DROP_BLATATTACKS_OFFSET    2
+#define    RTL8367C_DROP_BLATATTACKS_MASK    0x4
+#define    RTL8367C_DROP_LANDATTACKS_OFFSET    1
+#define    RTL8367C_DROP_LANDATTACKS_MASK    0x2
+#define    RTL8367C_DROP_DAEQSA_OFFSET    0
+#define    RTL8367C_DROP_DAEQSA_MASK    0x1
+
+#define    RTL8367C_REG_UNKNOWN_L2_MULTICAST_CTRL1    0x08d5
+#define    RTL8367C_PORT10_UNKNOWN_L2_MCAST_OFFSET    4
+#define    RTL8367C_PORT10_UNKNOWN_L2_MCAST_MASK    0x30
+#define    RTL8367C_PORT9_UNKNOWN_L2_MCAST_OFFSET    2
+#define    RTL8367C_PORT9_UNKNOWN_L2_MCAST_MASK    0xC
+#define    RTL8367C_PORT8_UNKNOWN_L2_MCAST_OFFSET    0
+#define    RTL8367C_PORT8_UNKNOWN_L2_MCAST_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_EGRESS_KEEP_CTRL4    0x08d6
+#define    RTL8367C_PORT9_VLAN_KEEP_MASK_OFFSET    8
+#define    RTL8367C_PORT9_VLAN_KEEP_MASK_MASK    0xFF00
+#define    RTL8367C_PORT8_VLAN_KEEP_MASK_OFFSET    0
+#define    RTL8367C_PORT8_VLAN_KEEP_MASK_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_EGRESS_KEEP_CTRL5    0x08d7
+#define    RTL8367C_VLAN_EGRESS_KEEP_CTRL5_OFFSET    0
+#define    RTL8367C_VLAN_EGRESS_KEEP_CTRL5_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_EGRESS_KEEP_CTRL0_EXT    0x08d8
+#define    RTL8367C_PORT1_VLAN_KEEP_MASK_EXT_OFFSET    3
+#define    RTL8367C_PORT1_VLAN_KEEP_MASK_EXT_MASK    0x38
+#define    RTL8367C_PORT0_VLAN_KEEP_MASK_EXT_OFFSET    0
+#define    RTL8367C_PORT0_VLAN_KEEP_MASK_EXT_MASK    0x7
+
+#define    RTL8367C_REG_VLAN_EGRESS_KEEP_CTRL1_EXT    0x08d9
+#define    RTL8367C_PORT3_VLAN_KEEP_MASK_EXT_OFFSET    3
+#define    RTL8367C_PORT3_VLAN_KEEP_MASK_EXT_MASK    0x38
+#define    RTL8367C_PORT2_VLAN_KEEP_MASK_EXT_OFFSET    0
+#define    RTL8367C_PORT2_VLAN_KEEP_MASK_EXT_MASK    0x7
+
+#define    RTL8367C_REG_VLAN_EGRESS_KEEP_CTRL2_EXT    0x08da
+#define    RTL8367C_PORT5_VLAN_KEEP_MASK_EXT_OFFSET    3
+#define    RTL8367C_PORT5_VLAN_KEEP_MASK_EXT_MASK    0x38
+#define    RTL8367C_PORT4_VLAN_KEEP_MASK_EXT_OFFSET    0
+#define    RTL8367C_PORT4_VLAN_KEEP_MASK_EXT_MASK    0x7
+
+#define    RTL8367C_REG_VLAN_EGRESS_KEEP_CTRL3_EXT    0x08db
+#define    RTL8367C_PORT7_VLAN_KEEP_MASK_EXT_OFFSET    3
+#define    RTL8367C_PORT7_VLAN_KEEP_MASK_EXT_MASK    0x38
+#define    RTL8367C_PORT6_VLAN_KEEP_MASK_EXT_OFFSET    0
+#define    RTL8367C_PORT6_VLAN_KEEP_MASK_EXT_MASK    0x7
+
+#define    RTL8367C_REG_VLAN_EGRESS_KEEP_CTRL4_EXT    0x08dc
+#define    RTL8367C_PORT9_VLAN_KEEP_MASK_EXT_OFFSET    3
+#define    RTL8367C_PORT9_VLAN_KEEP_MASK_EXT_MASK    0x38
+#define    RTL8367C_PORT8_VLAN_KEEP_MASK_EXT_OFFSET    0
+#define    RTL8367C_PORT8_VLAN_KEEP_MASK_EXT_MASK    0x7
+
+#define    RTL8367C_REG_VLAN_EGRESS_KEEP_CTRL5_EXT    0x08dd
+#define    RTL8367C_VLAN_EGRESS_KEEP_CTRL5_EXT_OFFSET    0
+#define    RTL8367C_VLAN_EGRESS_KEEP_CTRL5_EXT_MASK    0x7
+
+#define    RTL8367C_REG_VLAN_EGRESS_TRANS_CTRL10    0x08de
+#define    RTL8367C_VLAN_EGRESS_TRANS_CTRL10_OFFSET    0
+#define    RTL8367C_VLAN_EGRESS_TRANS_CTRL10_MASK    0x7FF
+
+#define    RTL8367C_REG_FPGA_VER_CEN    0x08e0
+
+#define    RTL8367C_REG_FPGA_TIME_CEN    0x08e1
+
+#define    RTL8367C_REG_FPGA_DATE_CEN    0x08e2
+
+#define    RTL8367C_REG_QOS_PORT_QUEUE_NUMBER_CTRL0    0x0900
+#define    RTL8367C_PORT3_NUMBER_OFFSET    12
+#define    RTL8367C_PORT3_NUMBER_MASK    0x7000
+#define    RTL8367C_PORT2_NUMBER_OFFSET    8
+#define    RTL8367C_PORT2_NUMBER_MASK    0x700
+#define    RTL8367C_PORT1_NUMBER_OFFSET    4
+#define    RTL8367C_PORT1_NUMBER_MASK    0x70
+#define    RTL8367C_PORT0_NUMBER_OFFSET    0
+#define    RTL8367C_PORT0_NUMBER_MASK    0x7
+
+#define    RTL8367C_REG_QOS_PORT_QUEUE_NUMBER_CTRL1    0x0901
+#define    RTL8367C_PORT7_NUMBER_OFFSET    12
+#define    RTL8367C_PORT7_NUMBER_MASK    0x7000
+#define    RTL8367C_PORT6_NUMBER_OFFSET    8
+#define    RTL8367C_PORT6_NUMBER_MASK    0x700
+#define    RTL8367C_PORT5_NUMBER_OFFSET    4
+#define    RTL8367C_PORT5_NUMBER_MASK    0x70
+#define    RTL8367C_PORT4_NUMBER_OFFSET    0
+#define    RTL8367C_PORT4_NUMBER_MASK    0x7
+
+#define    RTL8367C_REG_QOS_PORT_QUEUE_NUMBER_CTRL2    0x0902
+#define    RTL8367C_PORT10_NUMBER_OFFSET    8
+#define    RTL8367C_PORT10_NUMBER_MASK    0x700
+#define    RTL8367C_PORT9_NUMBER_OFFSET    4
+#define    RTL8367C_PORT9_NUMBER_MASK    0x70
+#define    RTL8367C_PORT8_NUMBER_OFFSET    0
+#define    RTL8367C_PORT8_NUMBER_MASK    0x7
+
+#define    RTL8367C_REG_QOS_1Q_PRIORITY_TO_QID_CTRL0    0x0904
+#define    RTL8367C_QOS_1Q_PRIORITY_TO_QID_CTRL0_PRIORITY3_TO_QID_OFFSET    12
+#define    RTL8367C_QOS_1Q_PRIORITY_TO_QID_CTRL0_PRIORITY3_TO_QID_MASK    0x7000
+#define    RTL8367C_QOS_1Q_PRIORITY_TO_QID_CTRL0_PRIORITY2_TO_QID_OFFSET    8
+#define    RTL8367C_QOS_1Q_PRIORITY_TO_QID_CTRL0_PRIORITY2_TO_QID_MASK    0x700
+#define    RTL8367C_QOS_1Q_PRIORITY_TO_QID_CTRL0_PRIORITY1_TO_QID_OFFSET    4
+#define    RTL8367C_QOS_1Q_PRIORITY_TO_QID_CTRL0_PRIORITY1_TO_QID_MASK    0x70
+#define    RTL8367C_QOS_1Q_PRIORITY_TO_QID_CTRL0_PRIORITY0_TO_QID_OFFSET    0
+#define    RTL8367C_QOS_1Q_PRIORITY_TO_QID_CTRL0_PRIORITY0_TO_QID_MASK    0x7
+
+#define    RTL8367C_REG_QOS_1Q_PRIORITY_TO_QID_CTRL1    0x0905
+#define    RTL8367C_QOS_1Q_PRIORITY_TO_QID_CTRL1_PRIORITY7_TO_QID_OFFSET    12
+#define    RTL8367C_QOS_1Q_PRIORITY_TO_QID_CTRL1_PRIORITY7_TO_QID_MASK    0x7000
+#define    RTL8367C_QOS_1Q_PRIORITY_TO_QID_CTRL1_PRIORITY6_TO_QID_OFFSET    8
+#define    RTL8367C_QOS_1Q_PRIORITY_TO_QID_CTRL1_PRIORITY6_TO_QID_MASK    0x700
+#define    RTL8367C_QOS_1Q_PRIORITY_TO_QID_CTRL1_PRIORITY5_TO_QID_OFFSET    4
+#define    RTL8367C_QOS_1Q_PRIORITY_TO_QID_CTRL1_PRIORITY5_TO_QID_MASK    0x70
+#define    RTL8367C_QOS_1Q_PRIORITY_TO_QID_CTRL1_PRIORITY4_TO_QID_OFFSET    0
+#define    RTL8367C_QOS_1Q_PRIORITY_TO_QID_CTRL1_PRIORITY4_TO_QID_MASK    0x7
+
+#define    RTL8367C_REG_QOS_2Q_PRIORITY_TO_QID_CTRL0    0x0906
+#define    RTL8367C_QOS_2Q_PRIORITY_TO_QID_CTRL0_PRIORITY3_TO_QID_OFFSET    12
+#define    RTL8367C_QOS_2Q_PRIORITY_TO_QID_CTRL0_PRIORITY3_TO_QID_MASK    0x7000
+#define    RTL8367C_QOS_2Q_PRIORITY_TO_QID_CTRL0_PRIORITY2_TO_QID_OFFSET    8
+#define    RTL8367C_QOS_2Q_PRIORITY_TO_QID_CTRL0_PRIORITY2_TO_QID_MASK    0x700
+#define    RTL8367C_QOS_2Q_PRIORITY_TO_QID_CTRL0_PRIORITY1_TO_QID_OFFSET    4
+#define    RTL8367C_QOS_2Q_PRIORITY_TO_QID_CTRL0_PRIORITY1_TO_QID_MASK    0x70
+#define    RTL8367C_QOS_2Q_PRIORITY_TO_QID_CTRL0_PRIORITY0_TO_QID_OFFSET    0
+#define    RTL8367C_QOS_2Q_PRIORITY_TO_QID_CTRL0_PRIORITY0_TO_QID_MASK    0x7
+
+#define    RTL8367C_REG_QOS_2Q_PRIORITY_TO_QID_CTRL1    0x0907
+#define    RTL8367C_QOS_2Q_PRIORITY_TO_QID_CTRL1_PRIORITY7_TO_QID_OFFSET    12
+#define    RTL8367C_QOS_2Q_PRIORITY_TO_QID_CTRL1_PRIORITY7_TO_QID_MASK    0x7000
+#define    RTL8367C_QOS_2Q_PRIORITY_TO_QID_CTRL1_PRIORITY6_TO_QID_OFFSET    8
+#define    RTL8367C_QOS_2Q_PRIORITY_TO_QID_CTRL1_PRIORITY6_TO_QID_MASK    0x700
+#define    RTL8367C_QOS_2Q_PRIORITY_TO_QID_CTRL1_PRIORITY5_TO_QID_OFFSET    4
+#define    RTL8367C_QOS_2Q_PRIORITY_TO_QID_CTRL1_PRIORITY5_TO_QID_MASK    0x70
+#define    RTL8367C_QOS_2Q_PRIORITY_TO_QID_CTRL1_PRIORITY4_TO_QID_OFFSET    0
+#define    RTL8367C_QOS_2Q_PRIORITY_TO_QID_CTRL1_PRIORITY4_TO_QID_MASK    0x7
+
+#define    RTL8367C_REG_QOS_3Q_PRIORITY_TO_QID_CTRL0    0x0908
+#define    RTL8367C_QOS_3Q_PRIORITY_TO_QID_CTRL0_PRIORITY3_TO_QID_OFFSET    12
+#define    RTL8367C_QOS_3Q_PRIORITY_TO_QID_CTRL0_PRIORITY3_TO_QID_MASK    0x7000
+#define    RTL8367C_QOS_3Q_PRIORITY_TO_QID_CTRL0_PRIORITY2_TO_QID_OFFSET    8
+#define    RTL8367C_QOS_3Q_PRIORITY_TO_QID_CTRL0_PRIORITY2_TO_QID_MASK    0x700
+#define    RTL8367C_QOS_3Q_PRIORITY_TO_QID_CTRL0_PRIORITY1_TO_QID_OFFSET    4
+#define    RTL8367C_QOS_3Q_PRIORITY_TO_QID_CTRL0_PRIORITY1_TO_QID_MASK    0x70
+#define    RTL8367C_QOS_3Q_PRIORITY_TO_QID_CTRL0_PRIORITY0_TO_QID_OFFSET    0
+#define    RTL8367C_QOS_3Q_PRIORITY_TO_QID_CTRL0_PRIORITY0_TO_QID_MASK    0x7
+
+#define    RTL8367C_REG_QOS_3Q_PRIORITY_TO_QID_CTRL1    0x0909
+#define    RTL8367C_QOS_3Q_PRIORITY_TO_QID_CTRL1_PRIORITY7_TO_QID_OFFSET    12
+#define    RTL8367C_QOS_3Q_PRIORITY_TO_QID_CTRL1_PRIORITY7_TO_QID_MASK    0x7000
+#define    RTL8367C_QOS_3Q_PRIORITY_TO_QID_CTRL1_PRIORITY6_TO_QID_OFFSET    8
+#define    RTL8367C_QOS_3Q_PRIORITY_TO_QID_CTRL1_PRIORITY6_TO_QID_MASK    0x700
+#define    RTL8367C_QOS_3Q_PRIORITY_TO_QID_CTRL1_PRIORITY5_TO_QID_OFFSET    4
+#define    RTL8367C_QOS_3Q_PRIORITY_TO_QID_CTRL1_PRIORITY5_TO_QID_MASK    0x70
+#define    RTL8367C_QOS_3Q_PRIORITY_TO_QID_CTRL1_PRIORITY4_TO_QID_OFFSET    0
+#define    RTL8367C_QOS_3Q_PRIORITY_TO_QID_CTRL1_PRIORITY4_TO_QID_MASK    0x7
+
+#define    RTL8367C_REG_QOS_4Q_PRIORITY_TO_QID_CTRL0    0x090a
+#define    RTL8367C_QOS_4Q_PRIORITY_TO_QID_CTRL0_PRIORITY3_TO_QID_OFFSET    12
+#define    RTL8367C_QOS_4Q_PRIORITY_TO_QID_CTRL0_PRIORITY3_TO_QID_MASK    0x7000
+#define    RTL8367C_QOS_4Q_PRIORITY_TO_QID_CTRL0_PRIORITY2_TO_QID_OFFSET    8
+#define    RTL8367C_QOS_4Q_PRIORITY_TO_QID_CTRL0_PRIORITY2_TO_QID_MASK    0x700
+#define    RTL8367C_QOS_4Q_PRIORITY_TO_QID_CTRL0_PRIORITY1_TO_QID_OFFSET    4
+#define    RTL8367C_QOS_4Q_PRIORITY_TO_QID_CTRL0_PRIORITY1_TO_QID_MASK    0x70
+#define    RTL8367C_QOS_4Q_PRIORITY_TO_QID_CTRL0_PRIORITY0_TO_QID_OFFSET    0
+#define    RTL8367C_QOS_4Q_PRIORITY_TO_QID_CTRL0_PRIORITY0_TO_QID_MASK    0x7
+
+#define    RTL8367C_REG_QOS_4Q_PRIORITY_TO_QID_CTRL1    0x090b
+#define    RTL8367C_QOS_4Q_PRIORITY_TO_QID_CTRL1_PRIORITY7_TO_QID_OFFSET    12
+#define    RTL8367C_QOS_4Q_PRIORITY_TO_QID_CTRL1_PRIORITY7_TO_QID_MASK    0x7000
+#define    RTL8367C_QOS_4Q_PRIORITY_TO_QID_CTRL1_PRIORITY6_TO_QID_OFFSET    8
+#define    RTL8367C_QOS_4Q_PRIORITY_TO_QID_CTRL1_PRIORITY6_TO_QID_MASK    0x700
+#define    RTL8367C_QOS_4Q_PRIORITY_TO_QID_CTRL1_PRIORITY5_TO_QID_OFFSET    4
+#define    RTL8367C_QOS_4Q_PRIORITY_TO_QID_CTRL1_PRIORITY5_TO_QID_MASK    0x70
+#define    RTL8367C_QOS_4Q_PRIORITY_TO_QID_CTRL1_PRIORITY4_TO_QID_OFFSET    0
+#define    RTL8367C_QOS_4Q_PRIORITY_TO_QID_CTRL1_PRIORITY4_TO_QID_MASK    0x7
+
+#define    RTL8367C_REG_QOS_5Q_PRIORITY_TO_QID_CTRL0    0x090c
+#define    RTL8367C_QOS_5Q_PRIORITY_TO_QID_CTRL0_PRIORITY3_TO_QID_OFFSET    12
+#define    RTL8367C_QOS_5Q_PRIORITY_TO_QID_CTRL0_PRIORITY3_TO_QID_MASK    0x7000
+#define    RTL8367C_QOS_5Q_PRIORITY_TO_QID_CTRL0_PRIORITY2_TO_QID_OFFSET    8
+#define    RTL8367C_QOS_5Q_PRIORITY_TO_QID_CTRL0_PRIORITY2_TO_QID_MASK    0x700
+#define    RTL8367C_QOS_5Q_PRIORITY_TO_QID_CTRL0_PRIORITY1_TO_QID_OFFSET    4
+#define    RTL8367C_QOS_5Q_PRIORITY_TO_QID_CTRL0_PRIORITY1_TO_QID_MASK    0x70
+#define    RTL8367C_QOS_5Q_PRIORITY_TO_QID_CTRL0_PRIORITY0_TO_QID_OFFSET    0
+#define    RTL8367C_QOS_5Q_PRIORITY_TO_QID_CTRL0_PRIORITY0_TO_QID_MASK    0x7
+
+#define    RTL8367C_REG_QOS_5Q_PRIORITY_TO_QID_CTRL1    0x090d
+#define    RTL8367C_QOS_5Q_PRIORITY_TO_QID_CTRL1_PRIORITY7_TO_QID_OFFSET    12
+#define    RTL8367C_QOS_5Q_PRIORITY_TO_QID_CTRL1_PRIORITY7_TO_QID_MASK    0x7000
+#define    RTL8367C_QOS_5Q_PRIORITY_TO_QID_CTRL1_PRIORITY6_TO_QID_OFFSET    8
+#define    RTL8367C_QOS_5Q_PRIORITY_TO_QID_CTRL1_PRIORITY6_TO_QID_MASK    0x700
+#define    RTL8367C_QOS_5Q_PRIORITY_TO_QID_CTRL1_PRIORITY5_TO_QID_OFFSET    4
+#define    RTL8367C_QOS_5Q_PRIORITY_TO_QID_CTRL1_PRIORITY5_TO_QID_MASK    0x70
+#define    RTL8367C_QOS_5Q_PRIORITY_TO_QID_CTRL1_PRIORITY4_TO_QID_OFFSET    0
+#define    RTL8367C_QOS_5Q_PRIORITY_TO_QID_CTRL1_PRIORITY4_TO_QID_MASK    0x7
+
+#define    RTL8367C_REG_QOS_6Q_PRIORITY_TO_QID_CTRL0    0x090e
+#define    RTL8367C_QOS_6Q_PRIORITY_TO_QID_CTRL0_PRIORITY3_TO_QID_OFFSET    12
+#define    RTL8367C_QOS_6Q_PRIORITY_TO_QID_CTRL0_PRIORITY3_TO_QID_MASK    0x7000
+#define    RTL8367C_QOS_6Q_PRIORITY_TO_QID_CTRL0_PRIORITY2_TO_QID_OFFSET    8
+#define    RTL8367C_QOS_6Q_PRIORITY_TO_QID_CTRL0_PRIORITY2_TO_QID_MASK    0x700
+#define    RTL8367C_QOS_6Q_PRIORITY_TO_QID_CTRL0_PRIORITY1_TO_QID_OFFSET    4
+#define    RTL8367C_QOS_6Q_PRIORITY_TO_QID_CTRL0_PRIORITY1_TO_QID_MASK    0x70
+#define    RTL8367C_QOS_6Q_PRIORITY_TO_QID_CTRL0_PRIORITY0_TO_QID_OFFSET    0
+#define    RTL8367C_QOS_6Q_PRIORITY_TO_QID_CTRL0_PRIORITY0_TO_QID_MASK    0x7
+
+#define    RTL8367C_REG_QOS_6Q_PRIORITY_TO_QID_CTRL1    0x090f
+#define    RTL8367C_QOS_6Q_PRIORITY_TO_QID_CTRL1_PRIORITY7_TO_QID_OFFSET    12
+#define    RTL8367C_QOS_6Q_PRIORITY_TO_QID_CTRL1_PRIORITY7_TO_QID_MASK    0x7000
+#define    RTL8367C_QOS_6Q_PRIORITY_TO_QID_CTRL1_PRIORITY6_TO_QID_OFFSET    8
+#define    RTL8367C_QOS_6Q_PRIORITY_TO_QID_CTRL1_PRIORITY6_TO_QID_MASK    0x700
+#define    RTL8367C_QOS_6Q_PRIORITY_TO_QID_CTRL1_PRIORITY5_TO_QID_OFFSET    4
+#define    RTL8367C_QOS_6Q_PRIORITY_TO_QID_CTRL1_PRIORITY5_TO_QID_MASK    0x70
+#define    RTL8367C_QOS_6Q_PRIORITY_TO_QID_CTRL1_PRIORITY4_TO_QID_OFFSET    0
+#define    RTL8367C_QOS_6Q_PRIORITY_TO_QID_CTRL1_PRIORITY4_TO_QID_MASK    0x7
+
+#define    RTL8367C_REG_QOS_7Q_PRIORITY_TO_QID_CTRL0    0x0910
+#define    RTL8367C_QOS_7Q_PRIORITY_TO_QID_CTRL0_PRIORITY3_TO_QID_OFFSET    12
+#define    RTL8367C_QOS_7Q_PRIORITY_TO_QID_CTRL0_PRIORITY3_TO_QID_MASK    0x7000
+#define    RTL8367C_QOS_7Q_PRIORITY_TO_QID_CTRL0_PRIORITY2_TO_QID_OFFSET    8
+#define    RTL8367C_QOS_7Q_PRIORITY_TO_QID_CTRL0_PRIORITY2_TO_QID_MASK    0x700
+#define    RTL8367C_QOS_7Q_PRIORITY_TO_QID_CTRL0_PRIORITY1_TO_QID_OFFSET    4
+#define    RTL8367C_QOS_7Q_PRIORITY_TO_QID_CTRL0_PRIORITY1_TO_QID_MASK    0x70
+#define    RTL8367C_QOS_7Q_PRIORITY_TO_QID_CTRL0_PRIORITY0_TO_QID_OFFSET    0
+#define    RTL8367C_QOS_7Q_PRIORITY_TO_QID_CTRL0_PRIORITY0_TO_QID_MASK    0x7
+
+#define    RTL8367C_REG_QOS_7Q_PRIORITY_TO_QID_CTRL1    0x0911
+#define    RTL8367C_QOS_7Q_PRIORITY_TO_QID_CTRL1_PRIORITY7_TO_QID_OFFSET    12
+#define    RTL8367C_QOS_7Q_PRIORITY_TO_QID_CTRL1_PRIORITY7_TO_QID_MASK    0x7000
+#define    RTL8367C_QOS_7Q_PRIORITY_TO_QID_CTRL1_PRIORITY6_TO_QID_OFFSET    8
+#define    RTL8367C_QOS_7Q_PRIORITY_TO_QID_CTRL1_PRIORITY6_TO_QID_MASK    0x700
+#define    RTL8367C_QOS_7Q_PRIORITY_TO_QID_CTRL1_PRIORITY5_TO_QID_OFFSET    4
+#define    RTL8367C_QOS_7Q_PRIORITY_TO_QID_CTRL1_PRIORITY5_TO_QID_MASK    0x70
+#define    RTL8367C_QOS_7Q_PRIORITY_TO_QID_CTRL1_PRIORITY4_TO_QID_OFFSET    0
+#define    RTL8367C_QOS_7Q_PRIORITY_TO_QID_CTRL1_PRIORITY4_TO_QID_MASK    0x7
+
+#define    RTL8367C_REG_QOS_8Q_PRIORITY_TO_QID_CTRL0    0x0912
+#define    RTL8367C_QOS_8Q_PRIORITY_TO_QID_CTRL0_PRIORITY3_TO_QID_OFFSET    12
+#define    RTL8367C_QOS_8Q_PRIORITY_TO_QID_CTRL0_PRIORITY3_TO_QID_MASK    0x7000
+#define    RTL8367C_QOS_8Q_PRIORITY_TO_QID_CTRL0_PRIORITY2_TO_QID_OFFSET    8
+#define    RTL8367C_QOS_8Q_PRIORITY_TO_QID_CTRL0_PRIORITY2_TO_QID_MASK    0x700
+#define    RTL8367C_QOS_8Q_PRIORITY_TO_QID_CTRL0_PRIORITY1_TO_QID_OFFSET    4
+#define    RTL8367C_QOS_8Q_PRIORITY_TO_QID_CTRL0_PRIORITY1_TO_QID_MASK    0x70
+#define    RTL8367C_QOS_8Q_PRIORITY_TO_QID_CTRL0_PRIORITY0_TO_QID_OFFSET    0
+#define    RTL8367C_QOS_8Q_PRIORITY_TO_QID_CTRL0_PRIORITY0_TO_QID_MASK    0x7
+
+#define    RTL8367C_REG_QOS_8Q_PRIORITY_TO_QID_CTRL1    0x0913
+#define    RTL8367C_QOS_8Q_PRIORITY_TO_QID_CTRL1_PRIORITY7_TO_QID_OFFSET    12
+#define    RTL8367C_QOS_8Q_PRIORITY_TO_QID_CTRL1_PRIORITY7_TO_QID_MASK    0x7000
+#define    RTL8367C_QOS_8Q_PRIORITY_TO_QID_CTRL1_PRIORITY6_TO_QID_OFFSET    8
+#define    RTL8367C_QOS_8Q_PRIORITY_TO_QID_CTRL1_PRIORITY6_TO_QID_MASK    0x700
+#define    RTL8367C_QOS_8Q_PRIORITY_TO_QID_CTRL1_PRIORITY5_TO_QID_OFFSET    4
+#define    RTL8367C_QOS_8Q_PRIORITY_TO_QID_CTRL1_PRIORITY5_TO_QID_MASK    0x70
+#define    RTL8367C_QOS_8Q_PRIORITY_TO_QID_CTRL1_PRIORITY4_TO_QID_OFFSET    0
+#define    RTL8367C_QOS_8Q_PRIORITY_TO_QID_CTRL1_PRIORITY4_TO_QID_MASK    0x7
+
+#define    RTL8367C_REG_HIGHPRI_INDICATOR    0x0915
+#define    RTL8367C_PORT10_INDICATOR_OFFSET    10
+#define    RTL8367C_PORT10_INDICATOR_MASK    0x400
+#define    RTL8367C_PORT9_INDICATOR_OFFSET    9
+#define    RTL8367C_PORT9_INDICATOR_MASK    0x200
+#define    RTL8367C_PORT8_INDICATOR_OFFSET    8
+#define    RTL8367C_PORT8_INDICATOR_MASK    0x100
+#define    RTL8367C_PORT7_INDICATOR_OFFSET    7
+#define    RTL8367C_PORT7_INDICATOR_MASK    0x80
+#define    RTL8367C_PORT6_INDICATOR_OFFSET    6
+#define    RTL8367C_PORT6_INDICATOR_MASK    0x40
+#define    RTL8367C_PORT5_INDICATOR_OFFSET    5
+#define    RTL8367C_PORT5_INDICATOR_MASK    0x20
+#define    RTL8367C_PORT4_INDICATOR_OFFSET    4
+#define    RTL8367C_PORT4_INDICATOR_MASK    0x10
+#define    RTL8367C_PORT3_INDICATOR_OFFSET    3
+#define    RTL8367C_PORT3_INDICATOR_MASK    0x8
+#define    RTL8367C_PORT2_INDICATOR_OFFSET    2
+#define    RTL8367C_PORT2_INDICATOR_MASK    0x4
+#define    RTL8367C_PORT1_INDICATOR_OFFSET    1
+#define    RTL8367C_PORT1_INDICATOR_MASK    0x2
+#define    RTL8367C_PORT0_INDICATOR_OFFSET    0
+#define    RTL8367C_PORT0_INDICATOR_MASK    0x1
+
+#define    RTL8367C_REG_HIGHPRI_CFG    0x0916
+#define    RTL8367C_HIGHPRI_CFG_OFFSET    0
+#define    RTL8367C_HIGHPRI_CFG_MASK    0xFF
+
+#define    RTL8367C_REG_PORT_DEBUG_INFO_CTRL0    0x0917
+#define    RTL8367C_PORT1_DEBUG_INFO_OFFSET    8
+#define    RTL8367C_PORT1_DEBUG_INFO_MASK    0xFF00
+#define    RTL8367C_PORT0_DEBUG_INFO_OFFSET    0
+#define    RTL8367C_PORT0_DEBUG_INFO_MASK    0xFF
+
+#define    RTL8367C_REG_PORT_DEBUG_INFO_CTRL1    0x0918
+#define    RTL8367C_PORT3_DEBUG_INFO_OFFSET    8
+#define    RTL8367C_PORT3_DEBUG_INFO_MASK    0xFF00
+#define    RTL8367C_PORT2_DEBUG_INFO_OFFSET    0
+#define    RTL8367C_PORT2_DEBUG_INFO_MASK    0xFF
+
+#define    RTL8367C_REG_PORT_DEBUG_INFO_CTRL2    0x0919
+#define    RTL8367C_PORT5_DEBUG_INFO_OFFSET    8
+#define    RTL8367C_PORT5_DEBUG_INFO_MASK    0xFF00
+#define    RTL8367C_PORT4_DEBUG_INFO_OFFSET    0
+#define    RTL8367C_PORT4_DEBUG_INFO_MASK    0xFF
+
+#define    RTL8367C_REG_PORT_DEBUG_INFO_CTRL3    0x091a
+#define    RTL8367C_PORT7_DEBUG_INFO_OFFSET    8
+#define    RTL8367C_PORT7_DEBUG_INFO_MASK    0xFF00
+#define    RTL8367C_PORT6_DEBUG_INFO_OFFSET    0
+#define    RTL8367C_PORT6_DEBUG_INFO_MASK    0xFF
+
+#define    RTL8367C_REG_PORT_DEBUG_INFO_CTRL4    0x091b
+#define    RTL8367C_PORT9_DEBUG_INFO_OFFSET    8
+#define    RTL8367C_PORT9_DEBUG_INFO_MASK    0xFF00
+#define    RTL8367C_PORT8_DEBUG_INFO_OFFSET    0
+#define    RTL8367C_PORT8_DEBUG_INFO_MASK    0xFF
+
+#define    RTL8367C_REG_PORT_DEBUG_INFO_CTRL5    0x091c
+#define    RTL8367C_PORT10_DEBUG_INFO_OFFSET    0
+#define    RTL8367C_PORT10_DEBUG_INFO_MASK    0xFF
+
+#define    RTL8367C_REG_PORT_DEBUG_INFO_CTRL6    0x091d
+#define    RTL8367C_PORT7_DEBUG_INDICATOR_OFFSET    14
+#define    RTL8367C_PORT7_DEBUG_INDICATOR_MASK    0xC000
+#define    RTL8367C_PORT6_DEBUG_INDICATOR_OFFSET    12
+#define    RTL8367C_PORT6_DEBUG_INDICATOR_MASK    0x3000
+#define    RTL8367C_PORT5_DEBUG_INDICATOR_OFFSET    10
+#define    RTL8367C_PORT5_DEBUG_INDICATOR_MASK    0xC00
+#define    RTL8367C_PORT4_DEBUG_INDICATOR_OFFSET    8
+#define    RTL8367C_PORT4_DEBUG_INDICATOR_MASK    0x300
+#define    RTL8367C_PORT3_DEBUG_INDICATOR_OFFSET    6
+#define    RTL8367C_PORT3_DEBUG_INDICATOR_MASK    0xC0
+#define    RTL8367C_PORT2_DEBUG_INDICATOR_OFFSET    4
+#define    RTL8367C_PORT2_DEBUG_INDICATOR_MASK    0x30
+#define    RTL8367C_PORT1_DEBUG_INDICATOR_OFFSET    2
+#define    RTL8367C_PORT1_DEBUG_INDICATOR_MASK    0xC
+#define    RTL8367C_PORT0_DEBUG_INDICATOR_OFFSET    0
+#define    RTL8367C_PORT0_DEBUG_INDICATOR_MASK    0x3
+
+#define    RTL8367C_REG_PORT_DEBUG_INFO_CTRL7    0x091e
+#define    RTL8367C_PORT10_DEBUG_INDICATOR_OFFSET    4
+#define    RTL8367C_PORT10_DEBUG_INDICATOR_MASK    0x30
+#define    RTL8367C_PORT9_DEBUG_INDICATOR_OFFSET    2
+#define    RTL8367C_PORT9_DEBUG_INDICATOR_MASK    0xC
+#define    RTL8367C_PORT8_DEBUG_INDICATOR_OFFSET    0
+#define    RTL8367C_PORT8_DEBUG_INDICATOR_MASK    0x3
+
+#define    RTL8367C_REG_FLOWCRTL_EGRESS_QUEUE_ENABLE_CTRL0    0x0930
+#define    RTL8367C_PORT1_QUEUE_MASK_OFFSET    8
+#define    RTL8367C_PORT1_QUEUE_MASK_MASK    0xFF00
+#define    RTL8367C_PORT0_QUEUE_MASK_OFFSET    0
+#define    RTL8367C_PORT0_QUEUE_MASK_MASK    0xFF
+
+#define    RTL8367C_REG_FLOWCRTL_EGRESS_QUEUE_ENABLE_CTRL1    0x0931
+#define    RTL8367C_PORT3_QUEUE_MASK_OFFSET    8
+#define    RTL8367C_PORT3_QUEUE_MASK_MASK    0xFF00
+#define    RTL8367C_PORT2_QUEUE_MASK_OFFSET    0
+#define    RTL8367C_PORT2_QUEUE_MASK_MASK    0xFF
+
+#define    RTL8367C_REG_FLOWCRTL_EGRESS_QUEUE_ENABLE_CTRL2    0x0932
+#define    RTL8367C_PORT5_QUEUE_MASK_OFFSET    8
+#define    RTL8367C_PORT5_QUEUE_MASK_MASK    0xFF00
+#define    RTL8367C_PORT4_QUEUE_MASK_OFFSET    0
+#define    RTL8367C_PORT4_QUEUE_MASK_MASK    0xFF
+
+#define    RTL8367C_REG_FLOWCRTL_EGRESS_QUEUE_ENABLE_CTRL3    0x0933
+#define    RTL8367C_PORT7_QUEUE_MASK_OFFSET    8
+#define    RTL8367C_PORT7_QUEUE_MASK_MASK    0xFF00
+#define    RTL8367C_PORT6_QUEUE_MASK_OFFSET    0
+#define    RTL8367C_PORT6_QUEUE_MASK_MASK    0xFF
+
+#define    RTL8367C_REG_FLOWCRTL_EGRESS_QUEUE_ENABLE_CTRL4    0x0934
+#define    RTL8367C_PORT9_QUEUE_MASK_OFFSET    8
+#define    RTL8367C_PORT9_QUEUE_MASK_MASK    0xFF00
+#define    RTL8367C_PORT8_QUEUE_MASK_OFFSET    0
+#define    RTL8367C_PORT8_QUEUE_MASK_MASK    0xFF
+
+#define    RTL8367C_REG_FLOWCRTL_EGRESS_QUEUE_ENABLE_CTRL5    0x0935
+#define    RTL8367C_FLOWCRTL_EGRESS_QUEUE_ENABLE_CTRL5_OFFSET    0
+#define    RTL8367C_FLOWCRTL_EGRESS_QUEUE_ENABLE_CTRL5_MASK    0xFF
+
+#define    RTL8367C_REG_FLOWCRTL_EGRESS_PORT_ENABLE    0x0938
+#define    RTL8367C_FLOWCRTL_EGRESS_PORT_ENABLE_OFFSET    0
+#define    RTL8367C_FLOWCRTL_EGRESS_PORT_ENABLE_MASK    0xFF
+
+#define    RTL8367C_REG_EAV_CTRL    0x0939
+#define    RTL8367C_EAV_TRAP_CPU_OFFSET    1
+#define    RTL8367C_EAV_TRAP_CPU_MASK    0x2
+#define    RTL8367C_EAV_TRAP_8051_OFFSET    0
+#define    RTL8367C_EAV_TRAP_8051_MASK    0x1
+
+#define    RTL8367C_REG_UNTAG_DSCP_PRI_CFG    0x093a
+#define    RTL8367C_UNTAG_DSCP_PRI_CFG_OFFSET    0
+#define    RTL8367C_UNTAG_DSCP_PRI_CFG_MASK    0x1
+
+#define    RTL8367C_REG_VLAN_EGRESS_KEEP_CTRL0    0x093b
+#define    RTL8367C_PORT1_VLAN_KEEP_MASK_OFFSET    8
+#define    RTL8367C_PORT1_VLAN_KEEP_MASK_MASK    0xFF00
+#define    RTL8367C_PORT0_VLAN_KEEP_MASK_OFFSET    0
+#define    RTL8367C_PORT0_VLAN_KEEP_MASK_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_EGRESS_KEEP_CTRL1    0x093c
+#define    RTL8367C_PORT3_VLAN_KEEP_MASK_OFFSET    8
+#define    RTL8367C_PORT3_VLAN_KEEP_MASK_MASK    0xFF00
+#define    RTL8367C_PORT2_VLAN_KEEP_MASK_OFFSET    0
+#define    RTL8367C_PORT2_VLAN_KEEP_MASK_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_EGRESS_KEEP_CTRL2    0x093d
+#define    RTL8367C_PORT5_VLAN_KEEP_MASK_OFFSET    8
+#define    RTL8367C_PORT5_VLAN_KEEP_MASK_MASK    0xFF00
+#define    RTL8367C_PORT4_VLAN_KEEP_MASK_OFFSET    0
+#define    RTL8367C_PORT4_VLAN_KEEP_MASK_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_EGRESS_KEEP_CTRL3    0x093e
+#define    RTL8367C_PORT7_VLAN_KEEP_MASK_OFFSET    8
+#define    RTL8367C_PORT7_VLAN_KEEP_MASK_MASK    0xFF00
+#define    RTL8367C_PORT6_VLAN_KEEP_MASK_OFFSET    0
+#define    RTL8367C_PORT6_VLAN_KEEP_MASK_MASK    0xFF
+
+#define    RTL8367C_REG_VLAN_TRANSPARENT_EN_CFG    0x093f
+#define    RTL8367C_VLAN_TRANSPARENT_EN_CFG_OFFSET    0
+#define    RTL8367C_VLAN_TRANSPARENT_EN_CFG_MASK    0x1
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY0_H    0x0940
+#define    RTL8367C_IPMC_GROUP_ENTRY0_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY0_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY0_L    0x0941
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY1_H    0x0942
+#define    RTL8367C_IPMC_GROUP_ENTRY1_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY1_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY1_L    0x0943
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY2_H    0x0944
+#define    RTL8367C_IPMC_GROUP_ENTRY2_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY2_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY2_L    0x0945
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY3_H    0x0946
+#define    RTL8367C_IPMC_GROUP_ENTRY3_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY3_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY3_L    0x0947
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY4_H    0x0948
+#define    RTL8367C_IPMC_GROUP_ENTRY4_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY4_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY4_L    0x0949
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY5_H    0x094a
+#define    RTL8367C_IPMC_GROUP_ENTRY5_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY5_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY5_L    0x094b
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY6_H    0x094c
+#define    RTL8367C_IPMC_GROUP_ENTRY6_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY6_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY6_L    0x094d
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY7_H    0x094e
+#define    RTL8367C_IPMC_GROUP_ENTRY7_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY7_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY7_L    0x094f
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY8_H    0x0950
+#define    RTL8367C_IPMC_GROUP_ENTRY8_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY8_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY8_L    0x0951
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY9_H    0x0952
+#define    RTL8367C_IPMC_GROUP_ENTRY9_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY9_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY9_L    0x0953
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY10_H    0x0954
+#define    RTL8367C_IPMC_GROUP_ENTRY10_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY10_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY10_L    0x0955
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY11_H    0x0956
+#define    RTL8367C_IPMC_GROUP_ENTRY11_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY11_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY11_L    0x0957
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY12_H    0x0958
+#define    RTL8367C_IPMC_GROUP_ENTRY12_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY12_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY12_L    0x0959
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY13_H    0x095a
+#define    RTL8367C_IPMC_GROUP_ENTRY13_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY13_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY13_L    0x095b
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY14_H    0x095c
+#define    RTL8367C_IPMC_GROUP_ENTRY14_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY14_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY14_L    0x095d
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY15_H    0x095e
+#define    RTL8367C_IPMC_GROUP_ENTRY15_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY15_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY15_L    0x095f
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY16_H    0x0960
+#define    RTL8367C_IPMC_GROUP_ENTRY16_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY16_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY16_L    0x0961
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY17_H    0x0962
+#define    RTL8367C_IPMC_GROUP_ENTRY17_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY17_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY17_L    0x0963
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY18_H    0x0964
+#define    RTL8367C_IPMC_GROUP_ENTRY18_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY18_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY18_L    0x0965
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY19_H    0x0966
+#define    RTL8367C_IPMC_GROUP_ENTRY19_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY19_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY19_L    0x0967
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY20_H    0x0968
+#define    RTL8367C_IPMC_GROUP_ENTRY20_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY20_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY20_L    0x0969
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY21_H    0x096a
+#define    RTL8367C_IPMC_GROUP_ENTRY21_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY21_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY21_L    0x096b
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY22_H    0x096c
+#define    RTL8367C_IPMC_GROUP_ENTRY22_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY22_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY22_L    0x096d
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY23_H    0x096e
+#define    RTL8367C_IPMC_GROUP_ENTRY23_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY23_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY23_L    0x096f
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY24_H    0x0970
+#define    RTL8367C_IPMC_GROUP_ENTRY24_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY24_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY24_L    0x0971
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY25_H    0x0972
+#define    RTL8367C_IPMC_GROUP_ENTRY25_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY25_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY25_L    0x0973
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY26_H    0x0974
+#define    RTL8367C_IPMC_GROUP_ENTRY26_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY26_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY26_L    0x0975
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY27_H    0x0976
+#define    RTL8367C_IPMC_GROUP_ENTRY27_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY27_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY27_L    0x0977
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY28_H    0x0978
+#define    RTL8367C_IPMC_GROUP_ENTRY28_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY28_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY28_L    0x0979
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY29_H    0x097a
+#define    RTL8367C_IPMC_GROUP_ENTRY29_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY29_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY29_L    0x097b
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY30_H    0x097c
+#define    RTL8367C_IPMC_GROUP_ENTRY30_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY30_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY30_L    0x097d
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY31_H    0x097e
+#define    RTL8367C_IPMC_GROUP_ENTRY31_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY31_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY31_L    0x097f
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY32_H    0x0980
+#define    RTL8367C_IPMC_GROUP_ENTRY32_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY32_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY32_L    0x0981
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY33_H    0x0982
+#define    RTL8367C_IPMC_GROUP_ENTRY33_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY33_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY33_L    0x0983
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY34_H    0x0984
+#define    RTL8367C_IPMC_GROUP_ENTRY34_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY34_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY34_L    0x0985
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY35_H    0x0986
+#define    RTL8367C_IPMC_GROUP_ENTRY35_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY35_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY35_L    0x0987
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY36_H    0x0988
+#define    RTL8367C_IPMC_GROUP_ENTRY36_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY36_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY36_L    0x0989
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY37_H    0x098a
+#define    RTL8367C_IPMC_GROUP_ENTRY37_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY37_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY37_L    0x098b
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY38_H    0x098c
+#define    RTL8367C_IPMC_GROUP_ENTRY38_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY38_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY38_L    0x098d
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY39_H    0x098e
+#define    RTL8367C_IPMC_GROUP_ENTRY39_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY39_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY39_L    0x098f
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY40_H    0x0990
+#define    RTL8367C_IPMC_GROUP_ENTRY40_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY40_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY40_L    0x0991
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY41_H    0x0992
+#define    RTL8367C_IPMC_GROUP_ENTRY41_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY41_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY41_L    0x0993
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY42_H    0x0994
+#define    RTL8367C_IPMC_GROUP_ENTRY42_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY42_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY42_L    0x0995
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY43_H    0x0996
+#define    RTL8367C_IPMC_GROUP_ENTRY43_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY43_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY43_L    0x0997
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY44_H    0x0998
+#define    RTL8367C_IPMC_GROUP_ENTRY44_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY44_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY44_L    0x0999
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY45_H    0x099a
+#define    RTL8367C_IPMC_GROUP_ENTRY45_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY45_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY45_L    0x099b
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY46_H    0x099c
+#define    RTL8367C_IPMC_GROUP_ENTRY46_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY46_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY46_L    0x099d
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY47_H    0x099e
+#define    RTL8367C_IPMC_GROUP_ENTRY47_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY47_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY47_L    0x099f
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY48_H    0x09a0
+#define    RTL8367C_IPMC_GROUP_ENTRY48_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY48_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY48_L    0x09a1
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY49_H    0x09a2
+#define    RTL8367C_IPMC_GROUP_ENTRY49_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY49_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY49_L    0x09a3
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY50_H    0x09a4
+#define    RTL8367C_IPMC_GROUP_ENTRY50_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY50_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY50_L    0x09a5
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY51_H    0x09a6
+#define    RTL8367C_IPMC_GROUP_ENTRY51_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY51_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY51_L    0x09a7
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY52_H    0x09a8
+#define    RTL8367C_IPMC_GROUP_ENTRY52_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY52_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY52_L    0x09a9
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY53_H    0x09aa
+#define    RTL8367C_IPMC_GROUP_ENTRY53_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY53_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY53_L    0x09ab
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY54_H    0x09ac
+#define    RTL8367C_IPMC_GROUP_ENTRY54_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY54_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY54_L    0x09ad
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY55_H    0x09ae
+#define    RTL8367C_IPMC_GROUP_ENTRY55_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY55_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY55_L    0x09af
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY56_H    0x09b0
+#define    RTL8367C_IPMC_GROUP_ENTRY56_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY56_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY56_L    0x09b1
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY57_H    0x09b2
+#define    RTL8367C_IPMC_GROUP_ENTRY57_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY57_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY57_L    0x09b3
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY58_H    0x09b4
+#define    RTL8367C_IPMC_GROUP_ENTRY58_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY58_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY58_L    0x09b5
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY59_H    0x09b6
+#define    RTL8367C_IPMC_GROUP_ENTRY59_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY59_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY59_L    0x09b7
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY60_H    0x09b8
+#define    RTL8367C_IPMC_GROUP_ENTRY60_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY60_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY60_L    0x09b9
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY61_H    0x09ba
+#define    RTL8367C_IPMC_GROUP_ENTRY61_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY61_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY61_L    0x09bb
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY62_H    0x09bc
+#define    RTL8367C_IPMC_GROUP_ENTRY62_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY62_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY62_L    0x09bd
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY63_H    0x09be
+#define    RTL8367C_IPMC_GROUP_ENTRY63_H_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_ENTRY63_H_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_ENTRY63_L    0x09bf
+
+#define    RTL8367C_REG_UNKNOWN_UNICAST_DA_PORT_BEHAVE    0x09C0
+#define    RTL8367C_Port7_ACTION_OFFSET    14
+#define    RTL8367C_Port7_ACTION_MASK    0xC000
+#define    RTL8367C_Port6_ACTION_OFFSET    12
+#define    RTL8367C_Port6_ACTION_MASK    0x3000
+#define    RTL8367C_Port5_ACTION_OFFSET    10
+#define    RTL8367C_Port5_ACTION_MASK    0xC00
+#define    RTL8367C_Port4_ACTION_OFFSET    8
+#define    RTL8367C_Port4_ACTION_MASK    0x300
+#define    RTL8367C_Port3_ACTION_OFFSET    6
+#define    RTL8367C_Port3_ACTION_MASK    0xC0
+#define    RTL8367C_Port2_ACTION_OFFSET    4
+#define    RTL8367C_Port2_ACTION_MASK    0x30
+#define    RTL8367C_Port1_ACTION_OFFSET    2
+#define    RTL8367C_Port1_ACTION_MASK    0xC
+#define    RTL8367C_Port0_ACTION_OFFSET    0
+#define    RTL8367C_Port0_ACTION_MASK    0x3
+
+#define    RTL8367C_REG_MIRROR_CTRL3    0x09C1
+#define    RTL8367C_MIRROR_ACL_OVERRIDE_EN_OFFSET    2
+#define    RTL8367C_MIRROR_ACL_OVERRIDE_EN_MASK    0x4
+#define    RTL8367C_MIRROR_TX_OVERRIDE_EN_OFFSET    1
+#define    RTL8367C_MIRROR_TX_OVERRIDE_EN_MASK    0x2
+#define    RTL8367C_MIRROR_RX_OVERRIDE_EN_OFFSET    0
+#define    RTL8367C_MIRROR_RX_OVERRIDE_EN_MASK    0x1
+
+#define    RTL8367C_REG_DPM_DUMMY02    0x09C2
+
+#define    RTL8367C_REG_DPM_DUMMY03    0x09C3
+
+#define    RTL8367C_REG_DPM_DUMMY04    0x09C4
+
+#define    RTL8367C_REG_DPM_DUMMY05    0x09C5
+
+#define    RTL8367C_REG_DPM_DUMMY06    0x09C6
+
+#define    RTL8367C_REG_DPM_DUMMY07    0x09C7
+
+#define    RTL8367C_REG_DPM_DUMMY08    0x09C8
+
+#define    RTL8367C_REG_DPM_DUMMY09    0x09C9
+
+#define    RTL8367C_REG_DPM_DUMMY10    0x09CA
+
+#define    RTL8367C_REG_DPM_DUMMY11    0x09CB
+
+#define    RTL8367C_REG_DPM_DUMMY12    0x09CC
+
+#define    RTL8367C_REG_DPM_DUMMY13    0x09CD
+
+#define    RTL8367C_REG_DPM_DUMMY14    0x09CE
+
+#define    RTL8367C_REG_DPM_DUMMY15    0x09CF
+
+#define    RTL8367C_REG_VLAN_EGRESS_TRANS_CTRL0    0x09D0
+#define    RTL8367C_VLAN_EGRESS_TRANS_CTRL0_OFFSET    0
+#define    RTL8367C_VLAN_EGRESS_TRANS_CTRL0_MASK    0x7FF
+
+#define    RTL8367C_REG_VLAN_EGRESS_TRANS_CTRL1    0x09D1
+#define    RTL8367C_VLAN_EGRESS_TRANS_CTRL1_OFFSET    0
+#define    RTL8367C_VLAN_EGRESS_TRANS_CTRL1_MASK    0x7FF
+
+#define    RTL8367C_REG_VLAN_EGRESS_TRANS_CTRL2    0x09D2
+#define    RTL8367C_VLAN_EGRESS_TRANS_CTRL2_OFFSET    0
+#define    RTL8367C_VLAN_EGRESS_TRANS_CTRL2_MASK    0x7FF
+
+#define    RTL8367C_REG_VLAN_EGRESS_TRANS_CTRL3    0x09D3
+#define    RTL8367C_VLAN_EGRESS_TRANS_CTRL3_OFFSET    0
+#define    RTL8367C_VLAN_EGRESS_TRANS_CTRL3_MASK    0x7FF
+
+#define    RTL8367C_REG_VLAN_EGRESS_TRANS_CTRL4    0x09D4
+#define    RTL8367C_VLAN_EGRESS_TRANS_CTRL4_OFFSET    0
+#define    RTL8367C_VLAN_EGRESS_TRANS_CTRL4_MASK    0x7FF
+
+#define    RTL8367C_REG_VLAN_EGRESS_TRANS_CTRL5    0x09D5
+#define    RTL8367C_VLAN_EGRESS_TRANS_CTRL5_OFFSET    0
+#define    RTL8367C_VLAN_EGRESS_TRANS_CTRL5_MASK    0x7FF
+
+#define    RTL8367C_REG_VLAN_EGRESS_TRANS_CTRL6    0x09D6
+#define    RTL8367C_VLAN_EGRESS_TRANS_CTRL6_OFFSET    0
+#define    RTL8367C_VLAN_EGRESS_TRANS_CTRL6_MASK    0x7FF
+
+#define    RTL8367C_REG_VLAN_EGRESS_TRANS_CTRL7    0x09D7
+#define    RTL8367C_VLAN_EGRESS_TRANS_CTRL7_OFFSET    0
+#define    RTL8367C_VLAN_EGRESS_TRANS_CTRL7_MASK    0x7FF
+
+#define    RTL8367C_REG_VLAN_EGRESS_TRANS_CTRL8    0x09D8
+#define    RTL8367C_VLAN_EGRESS_TRANS_CTRL8_OFFSET    0
+#define    RTL8367C_VLAN_EGRESS_TRANS_CTRL8_MASK    0x7FF
+
+#define    RTL8367C_REG_VLAN_EGRESS_TRANS_CTRL9    0x09D9
+#define    RTL8367C_VLAN_EGRESS_TRANS_CTRL9_OFFSET    0
+#define    RTL8367C_VLAN_EGRESS_TRANS_CTRL9_MASK    0x7FF
+
+#define    RTL8367C_REG_MIRROR_CTRL2    0x09DA
+#define    RTL8367C_MIRROR_REALKEEP_EN_OFFSET    4
+#define    RTL8367C_MIRROR_REALKEEP_EN_MASK    0x10
+#define    RTL8367C_MIRROR_RX_ISOLATION_LEAKY_OFFSET    3
+#define    RTL8367C_MIRROR_RX_ISOLATION_LEAKY_MASK    0x8
+#define    RTL8367C_MIRROR_TX_ISOLATION_LEAKY_OFFSET    2
+#define    RTL8367C_MIRROR_TX_ISOLATION_LEAKY_MASK    0x4
+#define    RTL8367C_MIRROR_RX_VLAN_LEAKY_OFFSET    1
+#define    RTL8367C_MIRROR_RX_VLAN_LEAKY_MASK    0x2
+#define    RTL8367C_MIRROR_TX_VLAN_LEAKY_OFFSET    0
+#define    RTL8367C_MIRROR_TX_VLAN_LEAKY_MASK    0x1
+
+#define    RTL8367C_REG_OUTPUT_DROP_CFG    0x09DB
+#define    RTL8367C_ENABLE_PMASK_EXT_OFFSET    13
+#define    RTL8367C_ENABLE_PMASK_EXT_MASK    0xE000
+#define    RTL8367C_ENABLE_BC_OFFSET    12
+#define    RTL8367C_ENABLE_BC_MASK    0x1000
+#define    RTL8367C_ENABLE_MC_OFFSET    11
+#define    RTL8367C_ENABLE_MC_MASK    0x800
+#define    RTL8367C_ENABLE_UC_OFFSET    10
+#define    RTL8367C_ENABLE_UC_MASK    0x400
+#define    RTL8367C_ENABLE_PMASK_OFFSET    0
+#define    RTL8367C_ENABLE_PMASK_MASK    0xFF
+
+#define    RTL8367C_REG_UNKNOWN_UNICAST_DA_PORT_BEHAVE_EXT    0x09DC
+#define    RTL8367C_PORT10_ACTION_OFFSET    4
+#define    RTL8367C_PORT10_ACTION_MASK    0x30
+#define    RTL8367C_PORT9_ACTION_OFFSET    2
+#define    RTL8367C_PORT9_ACTION_MASK    0xC
+#define    RTL8367C_PORT8_ACTION_OFFSET    0
+#define    RTL8367C_PORT8_ACTION_MASK    0x3
+
+#define    RTL8367C_REG_RMK_CFG_SEL_CTRL    0x09DF
+#define    RTL8367C_RMK_1Q_CFG_SEL_OFFSET    2
+#define    RTL8367C_RMK_1Q_CFG_SEL_MASK    0x4
+#define    RTL8367C_RMK_DSCP_CFG_SEL_OFFSET    0
+#define    RTL8367C_RMK_DSCP_CFG_SEL_MASK    0x3
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL0    0x09E0
+#define    RTL8367C_DSCP1_DSCP_OFFSET    8
+#define    RTL8367C_DSCP1_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP0_DSCP_OFFSET    0
+#define    RTL8367C_DSCP0_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL1    0x09E1
+#define    RTL8367C_DSCP3_DSCP_OFFSET    8
+#define    RTL8367C_DSCP3_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP2_DSCP_OFFSET    0
+#define    RTL8367C_DSCP2_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL2    0x09E2
+#define    RTL8367C_DSCP5_DSCP_OFFSET    8
+#define    RTL8367C_DSCP5_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP4_DSCP_OFFSET    0
+#define    RTL8367C_DSCP4_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL3    0x09E3
+#define    RTL8367C_DSCP7_DSCP_OFFSET    8
+#define    RTL8367C_DSCP7_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP6_DSCP_OFFSET    0
+#define    RTL8367C_DSCP6_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL4    0x09E4
+#define    RTL8367C_DSCP9_DSCP_OFFSET    8
+#define    RTL8367C_DSCP9_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP8_DSCP_OFFSET    0
+#define    RTL8367C_DSCP8_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL5    0x09E5
+#define    RTL8367C_DSCP11_DSCP_OFFSET    8
+#define    RTL8367C_DSCP11_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP10_DSCP_OFFSET    0
+#define    RTL8367C_DSCP10_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL6    0x09E6
+#define    RTL8367C_DSCP13_DSCP_OFFSET    8
+#define    RTL8367C_DSCP13_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP12_DSCP_OFFSET    0
+#define    RTL8367C_DSCP12_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL7    0x09E7
+#define    RTL8367C_DSCP15_DSCP_OFFSET    8
+#define    RTL8367C_DSCP15_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP14_DSCP_OFFSET    0
+#define    RTL8367C_DSCP14_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL8    0x09E8
+#define    RTL8367C_DSCP17_DSCP_OFFSET    8
+#define    RTL8367C_DSCP17_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP16_DSCP_OFFSET    0
+#define    RTL8367C_DSCP16_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL9    0x09E9
+#define    RTL8367C_DSCP19_DSCP_OFFSET    8
+#define    RTL8367C_DSCP19_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP18_DSCP_OFFSET    0
+#define    RTL8367C_DSCP18_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL10    0x09EA
+#define    RTL8367C_DSCP21_DSCP_OFFSET    8
+#define    RTL8367C_DSCP21_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP20_DSCP_OFFSET    0
+#define    RTL8367C_DSCP20_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL11    0x09EB
+#define    RTL8367C_DSCP23_DSCP_OFFSET    8
+#define    RTL8367C_DSCP23_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP22_DSCP_OFFSET    0
+#define    RTL8367C_DSCP22_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL12    0x09EC
+#define    RTL8367C_DSCP25_DSCP_OFFSET    8
+#define    RTL8367C_DSCP25_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP24_DSCP_OFFSET    0
+#define    RTL8367C_DSCP24_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL13    0x09ED
+#define    RTL8367C_DSCP27_DSCP_OFFSET    8
+#define    RTL8367C_DSCP27_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP26_DSCP_OFFSET    0
+#define    RTL8367C_DSCP26_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL14    0x09EE
+#define    RTL8367C_DSCP29_DSCP_OFFSET    8
+#define    RTL8367C_DSCP29_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP28_DSCP_OFFSET    0
+#define    RTL8367C_DSCP28_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL15    0x09EF
+#define    RTL8367C_DSCP31_DSCP_OFFSET    8
+#define    RTL8367C_DSCP31_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP30_DSCP_OFFSET    0
+#define    RTL8367C_DSCP30_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL16    0x09F0
+#define    RTL8367C_DSCP33_DSCP_OFFSET    8
+#define    RTL8367C_DSCP33_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP32_DSCP_OFFSET    0
+#define    RTL8367C_DSCP32_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL17    0x09F1
+#define    RTL8367C_DSCP35_DSCP_OFFSET    8
+#define    RTL8367C_DSCP35_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP34_DSCP_OFFSET    0
+#define    RTL8367C_DSCP34_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL18    0x09F2
+#define    RTL8367C_DSCP37_DSCP_OFFSET    8
+#define    RTL8367C_DSCP37_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP36_DSCP_OFFSET    0
+#define    RTL8367C_DSCP36_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL19    0x09F3
+#define    RTL8367C_DSCP39_DSCP_OFFSET    8
+#define    RTL8367C_DSCP39_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP38_DSCP_OFFSET    0
+#define    RTL8367C_DSCP38_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL20    0x09F4
+#define    RTL8367C_DSCP41_DSCP_OFFSET    8
+#define    RTL8367C_DSCP41_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP40_DSCP_OFFSET    0
+#define    RTL8367C_DSCP40_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL21    0x09F5
+#define    RTL8367C_DSCP43_DSCP_OFFSET    8
+#define    RTL8367C_DSCP43_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP42_DSCP_OFFSET    0
+#define    RTL8367C_DSCP42_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL22    0x09F6
+#define    RTL8367C_DSCP45_DSCP_OFFSET    8
+#define    RTL8367C_DSCP45_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP44_DSCP_OFFSET    0
+#define    RTL8367C_DSCP44_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL23    0x09F7
+#define    RTL8367C_DSCP47_DSCP_OFFSET    8
+#define    RTL8367C_DSCP47_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP46_DSCP_OFFSET    0
+#define    RTL8367C_DSCP46_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL24    0x09F8
+#define    RTL8367C_DSCP49_DSCP_OFFSET    8
+#define    RTL8367C_DSCP49_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP48_DSCP_OFFSET    0
+#define    RTL8367C_DSCP48_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL25    0x09F9
+#define    RTL8367C_DSCP51_DSCP_OFFSET    8
+#define    RTL8367C_DSCP51_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP50_DSCP_OFFSET    0
+#define    RTL8367C_DSCP50_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL26    0x09FA
+#define    RTL8367C_DSCP53_DSCP_OFFSET    8
+#define    RTL8367C_DSCP53_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP52_DSCP_OFFSET    0
+#define    RTL8367C_DSCP52_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL27    0x09FB
+#define    RTL8367C_DSCP55_DSCP_OFFSET    8
+#define    RTL8367C_DSCP55_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP54_DSCP_OFFSET    0
+#define    RTL8367C_DSCP54_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL28    0x09FC
+#define    RTL8367C_DSCP57_DSCP_OFFSET    8
+#define    RTL8367C_DSCP57_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP56_DSCP_OFFSET    0
+#define    RTL8367C_DSCP56_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL29    0x09FD
+#define    RTL8367C_DSCP59_DSCP_OFFSET    8
+#define    RTL8367C_DSCP59_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP58_DSCP_OFFSET    0
+#define    RTL8367C_DSCP58_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL30    0x09FE
+#define    RTL8367C_DSCP61_DSCP_OFFSET    8
+#define    RTL8367C_DSCP61_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP60_DSCP_OFFSET    0
+#define    RTL8367C_DSCP60_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_DSCP_CTRL31    0x09FF
+#define    RTL8367C_DSCP63_DSCP_OFFSET    8
+#define    RTL8367C_DSCP63_DSCP_MASK    0x3F00
+#define    RTL8367C_DSCP62_DSCP_OFFSET    0
+#define    RTL8367C_DSCP62_DSCP_MASK    0x3F
+
+/* (16'h0a00)l2_reg */
+
+#define    RTL8367C_REG_VLAN_MSTI0_CTRL0    0x0a00
+#define    RTL8367C_VLAN_MSTI0_CTRL0_PORT7_STATE_OFFSET    14
+#define    RTL8367C_VLAN_MSTI0_CTRL0_PORT7_STATE_MASK    0xC000
+#define    RTL8367C_VLAN_MSTI0_CTRL0_PORT6_STATE_OFFSET    12
+#define    RTL8367C_VLAN_MSTI0_CTRL0_PORT6_STATE_MASK    0x3000
+#define    RTL8367C_VLAN_MSTI0_CTRL0_PORT5_STATE_OFFSET    10
+#define    RTL8367C_VLAN_MSTI0_CTRL0_PORT5_STATE_MASK    0xC00
+#define    RTL8367C_VLAN_MSTI0_CTRL0_PORT4_STATE_OFFSET    8
+#define    RTL8367C_VLAN_MSTI0_CTRL0_PORT4_STATE_MASK    0x300
+#define    RTL8367C_VLAN_MSTI0_CTRL0_PORT3_STATE_OFFSET    6
+#define    RTL8367C_VLAN_MSTI0_CTRL0_PORT3_STATE_MASK    0xC0
+#define    RTL8367C_VLAN_MSTI0_CTRL0_PORT2_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI0_CTRL0_PORT2_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI0_CTRL0_PORT1_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI0_CTRL0_PORT1_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI0_CTRL0_PORT0_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI0_CTRL0_PORT0_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI0_CTRL1    0x0a01
+#define    RTL8367C_VLAN_MSTI0_CTRL1_PORT10_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI0_CTRL1_PORT10_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI0_CTRL1_PORT9_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI0_CTRL1_PORT9_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI0_CTRL1_PORT8_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI0_CTRL1_PORT8_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI1_CTRL0    0x0a02
+#define    RTL8367C_VLAN_MSTI1_CTRL0_PORT7_STATE_OFFSET    14
+#define    RTL8367C_VLAN_MSTI1_CTRL0_PORT7_STATE_MASK    0xC000
+#define    RTL8367C_VLAN_MSTI1_CTRL0_PORT6_STATE_OFFSET    12
+#define    RTL8367C_VLAN_MSTI1_CTRL0_PORT6_STATE_MASK    0x3000
+#define    RTL8367C_VLAN_MSTI1_CTRL0_PORT5_STATE_OFFSET    10
+#define    RTL8367C_VLAN_MSTI1_CTRL0_PORT5_STATE_MASK    0xC00
+#define    RTL8367C_VLAN_MSTI1_CTRL0_PORT4_STATE_OFFSET    8
+#define    RTL8367C_VLAN_MSTI1_CTRL0_PORT4_STATE_MASK    0x300
+#define    RTL8367C_VLAN_MSTI1_CTRL0_PORT3_STATE_OFFSET    6
+#define    RTL8367C_VLAN_MSTI1_CTRL0_PORT3_STATE_MASK    0xC0
+#define    RTL8367C_VLAN_MSTI1_CTRL0_PORT2_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI1_CTRL0_PORT2_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI1_CTRL0_PORT1_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI1_CTRL0_PORT1_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI1_CTRL0_PORT0_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI1_CTRL0_PORT0_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI1_CTRL1    0x0a03
+#define    RTL8367C_VLAN_MSTI1_CTRL1_PORT10_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI1_CTRL1_PORT10_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI1_CTRL1_PORT9_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI1_CTRL1_PORT9_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI1_CTRL1_PORT8_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI1_CTRL1_PORT8_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI2_CTRL0    0x0a04
+#define    RTL8367C_VLAN_MSTI2_CTRL0_PORT7_STATE_OFFSET    14
+#define    RTL8367C_VLAN_MSTI2_CTRL0_PORT7_STATE_MASK    0xC000
+#define    RTL8367C_VLAN_MSTI2_CTRL0_PORT6_STATE_OFFSET    12
+#define    RTL8367C_VLAN_MSTI2_CTRL0_PORT6_STATE_MASK    0x3000
+#define    RTL8367C_VLAN_MSTI2_CTRL0_PORT5_STATE_OFFSET    10
+#define    RTL8367C_VLAN_MSTI2_CTRL0_PORT5_STATE_MASK    0xC00
+#define    RTL8367C_VLAN_MSTI2_CTRL0_PORT4_STATE_OFFSET    8
+#define    RTL8367C_VLAN_MSTI2_CTRL0_PORT4_STATE_MASK    0x300
+#define    RTL8367C_VLAN_MSTI2_CTRL0_PORT3_STATE_OFFSET    6
+#define    RTL8367C_VLAN_MSTI2_CTRL0_PORT3_STATE_MASK    0xC0
+#define    RTL8367C_VLAN_MSTI2_CTRL0_PORT2_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI2_CTRL0_PORT2_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI2_CTRL0_PORT1_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI2_CTRL0_PORT1_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI2_CTRL0_PORT0_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI2_CTRL0_PORT0_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI2_CTRL1    0x0a05
+#define    RTL8367C_VLAN_MSTI2_CTRL1_PORT10_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI2_CTRL1_PORT10_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI2_CTRL1_PORT9_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI2_CTRL1_PORT9_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI2_CTRL1_PORT8_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI2_CTRL1_PORT8_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI3_CTRL0    0x0a06
+#define    RTL8367C_VLAN_MSTI3_CTRL0_PORT7_STATE_OFFSET    14
+#define    RTL8367C_VLAN_MSTI3_CTRL0_PORT7_STATE_MASK    0xC000
+#define    RTL8367C_VLAN_MSTI3_CTRL0_PORT6_STATE_OFFSET    12
+#define    RTL8367C_VLAN_MSTI3_CTRL0_PORT6_STATE_MASK    0x3000
+#define    RTL8367C_VLAN_MSTI3_CTRL0_PORT5_STATE_OFFSET    10
+#define    RTL8367C_VLAN_MSTI3_CTRL0_PORT5_STATE_MASK    0xC00
+#define    RTL8367C_VLAN_MSTI3_CTRL0_PORT4_STATE_OFFSET    8
+#define    RTL8367C_VLAN_MSTI3_CTRL0_PORT4_STATE_MASK    0x300
+#define    RTL8367C_VLAN_MSTI3_CTRL0_PORT3_STATE_OFFSET    6
+#define    RTL8367C_VLAN_MSTI3_CTRL0_PORT3_STATE_MASK    0xC0
+#define    RTL8367C_VLAN_MSTI3_CTRL0_PORT2_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI3_CTRL0_PORT2_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI3_CTRL0_PORT1_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI3_CTRL0_PORT1_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI3_CTRL0_PORT0_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI3_CTRL0_PORT0_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI3_CTRL1    0x0a07
+#define    RTL8367C_VLAN_MSTI3_CTRL1_PORT10_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI3_CTRL1_PORT10_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI3_CTRL1_PORT9_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI3_CTRL1_PORT9_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI3_CTRL1_PORT8_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI3_CTRL1_PORT8_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI4_CTRL0    0x0a08
+#define    RTL8367C_VLAN_MSTI4_CTRL0_PORT7_STATE_OFFSET    14
+#define    RTL8367C_VLAN_MSTI4_CTRL0_PORT7_STATE_MASK    0xC000
+#define    RTL8367C_VLAN_MSTI4_CTRL0_PORT6_STATE_OFFSET    12
+#define    RTL8367C_VLAN_MSTI4_CTRL0_PORT6_STATE_MASK    0x3000
+#define    RTL8367C_VLAN_MSTI4_CTRL0_PORT5_STATE_OFFSET    10
+#define    RTL8367C_VLAN_MSTI4_CTRL0_PORT5_STATE_MASK    0xC00
+#define    RTL8367C_VLAN_MSTI4_CTRL0_PORT4_STATE_OFFSET    8
+#define    RTL8367C_VLAN_MSTI4_CTRL0_PORT4_STATE_MASK    0x300
+#define    RTL8367C_VLAN_MSTI4_CTRL0_PORT3_STATE_OFFSET    6
+#define    RTL8367C_VLAN_MSTI4_CTRL0_PORT3_STATE_MASK    0xC0
+#define    RTL8367C_VLAN_MSTI4_CTRL0_PORT2_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI4_CTRL0_PORT2_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI4_CTRL0_PORT1_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI4_CTRL0_PORT1_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI4_CTRL0_PORT0_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI4_CTRL0_PORT0_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI4_CTRL1    0x0a09
+#define    RTL8367C_VLAN_MSTI4_CTRL1_PORT10_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI4_CTRL1_PORT10_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI4_CTRL1_PORT9_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI4_CTRL1_PORT9_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI4_CTRL1_PORT8_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI4_CTRL1_PORT8_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI5_CTRL0    0x0a0a
+#define    RTL8367C_VLAN_MSTI5_CTRL0_PORT7_STATE_OFFSET    14
+#define    RTL8367C_VLAN_MSTI5_CTRL0_PORT7_STATE_MASK    0xC000
+#define    RTL8367C_VLAN_MSTI5_CTRL0_PORT6_STATE_OFFSET    12
+#define    RTL8367C_VLAN_MSTI5_CTRL0_PORT6_STATE_MASK    0x3000
+#define    RTL8367C_VLAN_MSTI5_CTRL0_PORT5_STATE_OFFSET    10
+#define    RTL8367C_VLAN_MSTI5_CTRL0_PORT5_STATE_MASK    0xC00
+#define    RTL8367C_VLAN_MSTI5_CTRL0_PORT4_STATE_OFFSET    8
+#define    RTL8367C_VLAN_MSTI5_CTRL0_PORT4_STATE_MASK    0x300
+#define    RTL8367C_VLAN_MSTI5_CTRL0_PORT3_STATE_OFFSET    6
+#define    RTL8367C_VLAN_MSTI5_CTRL0_PORT3_STATE_MASK    0xC0
+#define    RTL8367C_VLAN_MSTI5_CTRL0_PORT2_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI5_CTRL0_PORT2_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI5_CTRL0_PORT1_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI5_CTRL0_PORT1_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI5_CTRL0_PORT0_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI5_CTRL0_PORT0_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI5_CTRL1    0x0a0b
+#define    RTL8367C_VLAN_MSTI5_CTRL1_PORT10_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI5_CTRL1_PORT10_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI5_CTRL1_PORT9_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI5_CTRL1_PORT9_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI5_CTRL1_PORT8_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI5_CTRL1_PORT8_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI6_CTRL0    0x0a0c
+#define    RTL8367C_VLAN_MSTI6_CTRL0_PORT7_STATE_OFFSET    14
+#define    RTL8367C_VLAN_MSTI6_CTRL0_PORT7_STATE_MASK    0xC000
+#define    RTL8367C_VLAN_MSTI6_CTRL0_PORT6_STATE_OFFSET    12
+#define    RTL8367C_VLAN_MSTI6_CTRL0_PORT6_STATE_MASK    0x3000
+#define    RTL8367C_VLAN_MSTI6_CTRL0_PORT5_STATE_OFFSET    10
+#define    RTL8367C_VLAN_MSTI6_CTRL0_PORT5_STATE_MASK    0xC00
+#define    RTL8367C_VLAN_MSTI6_CTRL0_PORT4_STATE_OFFSET    8
+#define    RTL8367C_VLAN_MSTI6_CTRL0_PORT4_STATE_MASK    0x300
+#define    RTL8367C_VLAN_MSTI6_CTRL0_PORT3_STATE_OFFSET    6
+#define    RTL8367C_VLAN_MSTI6_CTRL0_PORT3_STATE_MASK    0xC0
+#define    RTL8367C_VLAN_MSTI6_CTRL0_PORT2_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI6_CTRL0_PORT2_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI6_CTRL0_PORT1_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI6_CTRL0_PORT1_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI6_CTRL0_PORT0_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI6_CTRL0_PORT0_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI6_CTRL1    0x0a0d
+#define    RTL8367C_VLAN_MSTI6_CTRL1_PORT10_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI6_CTRL1_PORT10_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI6_CTRL1_PORT9_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI6_CTRL1_PORT9_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI6_CTRL1_PORT8_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI6_CTRL1_PORT8_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI7_CTRL0    0x0a0e
+#define    RTL8367C_VLAN_MSTI7_CTRL0_PORT7_STATE_OFFSET    14
+#define    RTL8367C_VLAN_MSTI7_CTRL0_PORT7_STATE_MASK    0xC000
+#define    RTL8367C_VLAN_MSTI7_CTRL0_PORT6_STATE_OFFSET    12
+#define    RTL8367C_VLAN_MSTI7_CTRL0_PORT6_STATE_MASK    0x3000
+#define    RTL8367C_VLAN_MSTI7_CTRL0_PORT5_STATE_OFFSET    10
+#define    RTL8367C_VLAN_MSTI7_CTRL0_PORT5_STATE_MASK    0xC00
+#define    RTL8367C_VLAN_MSTI7_CTRL0_PORT4_STATE_OFFSET    8
+#define    RTL8367C_VLAN_MSTI7_CTRL0_PORT4_STATE_MASK    0x300
+#define    RTL8367C_VLAN_MSTI7_CTRL0_PORT3_STATE_OFFSET    6
+#define    RTL8367C_VLAN_MSTI7_CTRL0_PORT3_STATE_MASK    0xC0
+#define    RTL8367C_VLAN_MSTI7_CTRL0_PORT2_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI7_CTRL0_PORT2_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI7_CTRL0_PORT1_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI7_CTRL0_PORT1_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI7_CTRL0_PORT0_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI7_CTRL0_PORT0_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI7_CTRL1    0x0a0f
+#define    RTL8367C_VLAN_MSTI7_CTRL1_PORT10_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI7_CTRL1_PORT10_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI7_CTRL1_PORT9_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI7_CTRL1_PORT9_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI7_CTRL1_PORT8_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI7_CTRL1_PORT8_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI8_CTRL0    0x0a10
+#define    RTL8367C_VLAN_MSTI8_CTRL0_PORT7_STATE_OFFSET    14
+#define    RTL8367C_VLAN_MSTI8_CTRL0_PORT7_STATE_MASK    0xC000
+#define    RTL8367C_VLAN_MSTI8_CTRL0_PORT6_STATE_OFFSET    12
+#define    RTL8367C_VLAN_MSTI8_CTRL0_PORT6_STATE_MASK    0x3000
+#define    RTL8367C_VLAN_MSTI8_CTRL0_PORT5_STATE_OFFSET    10
+#define    RTL8367C_VLAN_MSTI8_CTRL0_PORT5_STATE_MASK    0xC00
+#define    RTL8367C_VLAN_MSTI8_CTRL0_PORT4_STATE_OFFSET    8
+#define    RTL8367C_VLAN_MSTI8_CTRL0_PORT4_STATE_MASK    0x300
+#define    RTL8367C_VLAN_MSTI8_CTRL0_PORT3_STATE_OFFSET    6
+#define    RTL8367C_VLAN_MSTI8_CTRL0_PORT3_STATE_MASK    0xC0
+#define    RTL8367C_VLAN_MSTI8_CTRL0_PORT2_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI8_CTRL0_PORT2_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI8_CTRL0_PORT1_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI8_CTRL0_PORT1_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI8_CTRL0_PORT0_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI8_CTRL0_PORT0_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI8_CTRL1    0x0a11
+#define    RTL8367C_VLAN_MSTI8_CTRL1_PORT10_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI8_CTRL1_PORT10_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI8_CTRL1_PORT9_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI8_CTRL1_PORT9_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI8_CTRL1_PORT8_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI8_CTRL1_PORT8_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI9_CTRL0    0x0a12
+#define    RTL8367C_VLAN_MSTI9_CTRL0_PORT7_STATE_OFFSET    14
+#define    RTL8367C_VLAN_MSTI9_CTRL0_PORT7_STATE_MASK    0xC000
+#define    RTL8367C_VLAN_MSTI9_CTRL0_PORT6_STATE_OFFSET    12
+#define    RTL8367C_VLAN_MSTI9_CTRL0_PORT6_STATE_MASK    0x3000
+#define    RTL8367C_VLAN_MSTI9_CTRL0_PORT5_STATE_OFFSET    10
+#define    RTL8367C_VLAN_MSTI9_CTRL0_PORT5_STATE_MASK    0xC00
+#define    RTL8367C_VLAN_MSTI9_CTRL0_PORT4_STATE_OFFSET    8
+#define    RTL8367C_VLAN_MSTI9_CTRL0_PORT4_STATE_MASK    0x300
+#define    RTL8367C_VLAN_MSTI9_CTRL0_PORT3_STATE_OFFSET    6
+#define    RTL8367C_VLAN_MSTI9_CTRL0_PORT3_STATE_MASK    0xC0
+#define    RTL8367C_VLAN_MSTI9_CTRL0_PORT2_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI9_CTRL0_PORT2_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI9_CTRL0_PORT1_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI9_CTRL0_PORT1_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI9_CTRL0_PORT0_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI9_CTRL0_PORT0_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI9_CTRL1    0x0a13
+#define    RTL8367C_VLAN_MSTI9_CTRL1_PORT10_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI9_CTRL1_PORT10_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI9_CTRL1_PORT9_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI9_CTRL1_PORT9_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI9_CTRL1_PORT8_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI9_CTRL1_PORT8_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI10_CTRL0    0x0a14
+#define    RTL8367C_VLAN_MSTI10_CTRL0_PORT7_STATE_OFFSET    14
+#define    RTL8367C_VLAN_MSTI10_CTRL0_PORT7_STATE_MASK    0xC000
+#define    RTL8367C_VLAN_MSTI10_CTRL0_PORT6_STATE_OFFSET    12
+#define    RTL8367C_VLAN_MSTI10_CTRL0_PORT6_STATE_MASK    0x3000
+#define    RTL8367C_VLAN_MSTI10_CTRL0_PORT5_STATE_OFFSET    10
+#define    RTL8367C_VLAN_MSTI10_CTRL0_PORT5_STATE_MASK    0xC00
+#define    RTL8367C_VLAN_MSTI10_CTRL0_PORT4_STATE_OFFSET    8
+#define    RTL8367C_VLAN_MSTI10_CTRL0_PORT4_STATE_MASK    0x300
+#define    RTL8367C_VLAN_MSTI10_CTRL0_PORT3_STATE_OFFSET    6
+#define    RTL8367C_VLAN_MSTI10_CTRL0_PORT3_STATE_MASK    0xC0
+#define    RTL8367C_VLAN_MSTI10_CTRL0_PORT2_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI10_CTRL0_PORT2_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI10_CTRL0_PORT1_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI10_CTRL0_PORT1_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI10_CTRL0_PORT0_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI10_CTRL0_PORT0_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI10_CTRL1    0x0a15
+#define    RTL8367C_VLAN_MSTI10_CTRL1_PORT10_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI10_CTRL1_PORT10_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI10_CTRL1_PORT9_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI10_CTRL1_PORT9_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI10_CTRL1_PORT8_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI10_CTRL1_PORT8_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI11_CTRL0    0x0a16
+#define    RTL8367C_VLAN_MSTI11_CTRL0_PORT7_STATE_OFFSET    14
+#define    RTL8367C_VLAN_MSTI11_CTRL0_PORT7_STATE_MASK    0xC000
+#define    RTL8367C_VLAN_MSTI11_CTRL0_PORT6_STATE_OFFSET    12
+#define    RTL8367C_VLAN_MSTI11_CTRL0_PORT6_STATE_MASK    0x3000
+#define    RTL8367C_VLAN_MSTI11_CTRL0_PORT5_STATE_OFFSET    10
+#define    RTL8367C_VLAN_MSTI11_CTRL0_PORT5_STATE_MASK    0xC00
+#define    RTL8367C_VLAN_MSTI11_CTRL0_PORT4_STATE_OFFSET    8
+#define    RTL8367C_VLAN_MSTI11_CTRL0_PORT4_STATE_MASK    0x300
+#define    RTL8367C_VLAN_MSTI11_CTRL0_PORT3_STATE_OFFSET    6
+#define    RTL8367C_VLAN_MSTI11_CTRL0_PORT3_STATE_MASK    0xC0
+#define    RTL8367C_VLAN_MSTI11_CTRL0_PORT2_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI11_CTRL0_PORT2_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI11_CTRL0_PORT1_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI11_CTRL0_PORT1_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI11_CTRL0_PORT0_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI11_CTRL0_PORT0_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI11_CTRL1    0x0a17
+#define    RTL8367C_VLAN_MSTI11_CTRL1_PORT10_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI11_CTRL1_PORT10_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI11_CTRL1_PORT9_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI11_CTRL1_PORT9_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI11_CTRL1_PORT8_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI11_CTRL1_PORT8_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI12_CTRL0    0x0a18
+#define    RTL8367C_VLAN_MSTI12_CTRL0_PORT7_STATE_OFFSET    14
+#define    RTL8367C_VLAN_MSTI12_CTRL0_PORT7_STATE_MASK    0xC000
+#define    RTL8367C_VLAN_MSTI12_CTRL0_PORT6_STATE_OFFSET    12
+#define    RTL8367C_VLAN_MSTI12_CTRL0_PORT6_STATE_MASK    0x3000
+#define    RTL8367C_VLAN_MSTI12_CTRL0_PORT5_STATE_OFFSET    10
+#define    RTL8367C_VLAN_MSTI12_CTRL0_PORT5_STATE_MASK    0xC00
+#define    RTL8367C_VLAN_MSTI12_CTRL0_PORT4_STATE_OFFSET    8
+#define    RTL8367C_VLAN_MSTI12_CTRL0_PORT4_STATE_MASK    0x300
+#define    RTL8367C_VLAN_MSTI12_CTRL0_PORT3_STATE_OFFSET    6
+#define    RTL8367C_VLAN_MSTI12_CTRL0_PORT3_STATE_MASK    0xC0
+#define    RTL8367C_VLAN_MSTI12_CTRL0_PORT2_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI12_CTRL0_PORT2_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI12_CTRL0_PORT1_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI12_CTRL0_PORT1_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI12_CTRL0_PORT0_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI12_CTRL0_PORT0_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI12_CTRL1    0x0a19
+#define    RTL8367C_VLAN_MSTI12_CTRL1_PORT10_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI12_CTRL1_PORT10_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI12_CTRL1_PORT9_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI12_CTRL1_PORT9_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI12_CTRL1_PORT8_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI12_CTRL1_PORT8_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI13_CTRL0    0x0a1a
+#define    RTL8367C_VLAN_MSTI13_CTRL0_PORT7_STATE_OFFSET    14
+#define    RTL8367C_VLAN_MSTI13_CTRL0_PORT7_STATE_MASK    0xC000
+#define    RTL8367C_VLAN_MSTI13_CTRL0_PORT6_STATE_OFFSET    12
+#define    RTL8367C_VLAN_MSTI13_CTRL0_PORT6_STATE_MASK    0x3000
+#define    RTL8367C_VLAN_MSTI13_CTRL0_PORT5_STATE_OFFSET    10
+#define    RTL8367C_VLAN_MSTI13_CTRL0_PORT5_STATE_MASK    0xC00
+#define    RTL8367C_VLAN_MSTI13_CTRL0_PORT4_STATE_OFFSET    8
+#define    RTL8367C_VLAN_MSTI13_CTRL0_PORT4_STATE_MASK    0x300
+#define    RTL8367C_VLAN_MSTI13_CTRL0_PORT3_STATE_OFFSET    6
+#define    RTL8367C_VLAN_MSTI13_CTRL0_PORT3_STATE_MASK    0xC0
+#define    RTL8367C_VLAN_MSTI13_CTRL0_PORT2_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI13_CTRL0_PORT2_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI13_CTRL0_PORT1_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI13_CTRL0_PORT1_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI13_CTRL0_PORT0_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI13_CTRL0_PORT0_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI13_CTRL1    0x0a1b
+#define    RTL8367C_VLAN_MSTI13_CTRL1_PORT10_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI13_CTRL1_PORT10_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI13_CTRL1_PORT9_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI13_CTRL1_PORT9_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI13_CTRL1_PORT8_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI13_CTRL1_PORT8_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI14_CTRL0    0x0a1c
+#define    RTL8367C_VLAN_MSTI14_CTRL0_PORT7_STATE_OFFSET    14
+#define    RTL8367C_VLAN_MSTI14_CTRL0_PORT7_STATE_MASK    0xC000
+#define    RTL8367C_VLAN_MSTI14_CTRL0_PORT6_STATE_OFFSET    12
+#define    RTL8367C_VLAN_MSTI14_CTRL0_PORT6_STATE_MASK    0x3000
+#define    RTL8367C_VLAN_MSTI14_CTRL0_PORT5_STATE_OFFSET    10
+#define    RTL8367C_VLAN_MSTI14_CTRL0_PORT5_STATE_MASK    0xC00
+#define    RTL8367C_VLAN_MSTI14_CTRL0_PORT4_STATE_OFFSET    8
+#define    RTL8367C_VLAN_MSTI14_CTRL0_PORT4_STATE_MASK    0x300
+#define    RTL8367C_VLAN_MSTI14_CTRL0_PORT3_STATE_OFFSET    6
+#define    RTL8367C_VLAN_MSTI14_CTRL0_PORT3_STATE_MASK    0xC0
+#define    RTL8367C_VLAN_MSTI14_CTRL0_PORT2_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI14_CTRL0_PORT2_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI14_CTRL0_PORT1_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI14_CTRL0_PORT1_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI14_CTRL0_PORT0_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI14_CTRL0_PORT0_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI14_CTRL1    0x0a1d
+#define    RTL8367C_VLAN_MSTI14_CTRL1_PORT10_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI14_CTRL1_PORT10_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI14_CTRL1_PORT9_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI14_CTRL1_PORT9_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI14_CTRL1_PORT8_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI14_CTRL1_PORT8_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI15_CTRL0    0x0a1e
+#define    RTL8367C_VLAN_MSTI15_CTRL0_PORT7_STATE_OFFSET    14
+#define    RTL8367C_VLAN_MSTI15_CTRL0_PORT7_STATE_MASK    0xC000
+#define    RTL8367C_VLAN_MSTI15_CTRL0_PORT6_STATE_OFFSET    12
+#define    RTL8367C_VLAN_MSTI15_CTRL0_PORT6_STATE_MASK    0x3000
+#define    RTL8367C_VLAN_MSTI15_CTRL0_PORT5_STATE_OFFSET    10
+#define    RTL8367C_VLAN_MSTI15_CTRL0_PORT5_STATE_MASK    0xC00
+#define    RTL8367C_VLAN_MSTI15_CTRL0_PORT4_STATE_OFFSET    8
+#define    RTL8367C_VLAN_MSTI15_CTRL0_PORT4_STATE_MASK    0x300
+#define    RTL8367C_VLAN_MSTI15_CTRL0_PORT3_STATE_OFFSET    6
+#define    RTL8367C_VLAN_MSTI15_CTRL0_PORT3_STATE_MASK    0xC0
+#define    RTL8367C_VLAN_MSTI15_CTRL0_PORT2_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI15_CTRL0_PORT2_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI15_CTRL0_PORT1_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI15_CTRL0_PORT1_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI15_CTRL0_PORT0_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI15_CTRL0_PORT0_STATE_MASK    0x3
+
+#define    RTL8367C_REG_VLAN_MSTI15_CTRL1    0x0a1f
+#define    RTL8367C_VLAN_MSTI15_CTRL1_PORT10_STATE_OFFSET    4
+#define    RTL8367C_VLAN_MSTI15_CTRL1_PORT10_STATE_MASK    0x30
+#define    RTL8367C_VLAN_MSTI15_CTRL1_PORT9_STATE_OFFSET    2
+#define    RTL8367C_VLAN_MSTI15_CTRL1_PORT9_STATE_MASK    0xC
+#define    RTL8367C_VLAN_MSTI15_CTRL1_PORT8_STATE_OFFSET    0
+#define    RTL8367C_VLAN_MSTI15_CTRL1_PORT8_STATE_MASK    0x3
+
+#define    RTL8367C_REG_LUT_PORT0_LEARN_LIMITNO    0x0a20
+#define    RTL8367C_LUT_PORT0_LEARN_LIMITNO_OFFSET    0
+#define    RTL8367C_LUT_PORT0_LEARN_LIMITNO_MASK    0x1FFF
+
+#define    RTL8367C_REG_LUT_PORT1_LEARN_LIMITNO    0x0a21
+#define    RTL8367C_LUT_PORT1_LEARN_LIMITNO_OFFSET    0
+#define    RTL8367C_LUT_PORT1_LEARN_LIMITNO_MASK    0x1FFF
+
+#define    RTL8367C_REG_LUT_PORT2_LEARN_LIMITNO    0x0a22
+#define    RTL8367C_LUT_PORT2_LEARN_LIMITNO_OFFSET    0
+#define    RTL8367C_LUT_PORT2_LEARN_LIMITNO_MASK    0x1FFF
+
+#define    RTL8367C_REG_LUT_PORT3_LEARN_LIMITNO    0x0a23
+#define    RTL8367C_LUT_PORT3_LEARN_LIMITNO_OFFSET    0
+#define    RTL8367C_LUT_PORT3_LEARN_LIMITNO_MASK    0x1FFF
+
+#define    RTL8367C_REG_LUT_PORT4_LEARN_LIMITNO    0x0a24
+#define    RTL8367C_LUT_PORT4_LEARN_LIMITNO_OFFSET    0
+#define    RTL8367C_LUT_PORT4_LEARN_LIMITNO_MASK    0x1FFF
+
+#define    RTL8367C_REG_LUT_PORT5_LEARN_LIMITNO    0x0a25
+#define    RTL8367C_LUT_PORT5_LEARN_LIMITNO_OFFSET    0
+#define    RTL8367C_LUT_PORT5_LEARN_LIMITNO_MASK    0x1FFF
+
+#define    RTL8367C_REG_LUT_PORT6_LEARN_LIMITNO    0x0a26
+#define    RTL8367C_LUT_PORT6_LEARN_LIMITNO_OFFSET    0
+#define    RTL8367C_LUT_PORT6_LEARN_LIMITNO_MASK    0x1FFF
+
+#define    RTL8367C_REG_LUT_PORT7_LEARN_LIMITNO    0x0a27
+#define    RTL8367C_LUT_PORT7_LEARN_LIMITNO_OFFSET    0
+#define    RTL8367C_LUT_PORT7_LEARN_LIMITNO_MASK    0x1FFF
+
+#define    RTL8367C_REG_LUT_SYS_LEARN_LIMITNO    0x0a28
+#define    RTL8367C_LUT_SYS_LEARN_LIMITNO_OFFSET    0
+#define    RTL8367C_LUT_SYS_LEARN_LIMITNO_MASK    0x1FFF
+
+#define    RTL8367C_REG_LUT_LRN_SYS_LMT_CTRL    0x0a29
+#define    RTL8367C_LUT_SYSTEM_LEARN_PMASK1_OFFSET    12
+#define    RTL8367C_LUT_SYSTEM_LEARN_PMASK1_MASK    0x7000
+#define    RTL8367C_LUT_SYSTEM_LEARN_OVER_ACT_OFFSET    10
+#define    RTL8367C_LUT_SYSTEM_LEARN_OVER_ACT_MASK    0xC00
+#define    RTL8367C_LUT_SYSTEM_LEARN_PMASK_OFFSET    0
+#define    RTL8367C_LUT_SYSTEM_LEARN_PMASK_MASK    0xFF
+
+#define    RTL8367C_REG_LUT_PORT8_LEARN_LIMITNO    0x0a2a
+#define    RTL8367C_LUT_PORT8_LEARN_LIMITNO_OFFSET    0
+#define    RTL8367C_LUT_PORT8_LEARN_LIMITNO_MASK    0x1FFF
+
+#define    RTL8367C_REG_LUT_PORT9_LEARN_LIMITNO    0x0a2b
+#define    RTL8367C_LUT_PORT9_LEARN_LIMITNO_OFFSET    0
+#define    RTL8367C_LUT_PORT9_LEARN_LIMITNO_MASK    0x1FFF
+
+#define    RTL8367C_REG_LUT_PORT10_LEARN_LIMITNO    0x0a2c
+#define    RTL8367C_LUT_PORT10_LEARN_LIMITNO_OFFSET    0
+#define    RTL8367C_LUT_PORT10_LEARN_LIMITNO_MASK    0x1FFF
+
+#define    RTL8367C_REG_LUT_CFG    0x0a30
+#define    RTL8367C_AGE_SPEED_OFFSET    8
+#define    RTL8367C_AGE_SPEED_MASK    0x300
+#define    RTL8367C_BCAM_DISABLE_OFFSET    6
+#define    RTL8367C_BCAM_DISABLE_MASK    0x40
+#define    RTL8367C_LINKDOWN_AGEOUT_OFFSET    5
+#define    RTL8367C_LINKDOWN_AGEOUT_MASK    0x20
+#define    RTL8367C_LUT_IPMC_HASH_OFFSET    4
+#define    RTL8367C_LUT_IPMC_HASH_MASK    0x10
+#define    RTL8367C_LUT_IPMC_LOOKUP_OP_OFFSET    3
+#define    RTL8367C_LUT_IPMC_LOOKUP_OP_MASK    0x8
+#define    RTL8367C_AGE_TIMER_OFFSET    0
+#define    RTL8367C_AGE_TIMER_MASK    0x7
+
+#define    RTL8367C_REG_LUT_AGEOUT_CTRL    0x0a31
+#define    RTL8367C_LUT_AGEOUT_CTRL_OFFSET    0
+#define    RTL8367C_LUT_AGEOUT_CTRL_MASK    0x7FF
+
+#define    RTL8367C_REG_PORT_EFID_CTRL0    0x0a32
+#define    RTL8367C_PORT3_EFID_OFFSET    12
+#define    RTL8367C_PORT3_EFID_MASK    0x7000
+#define    RTL8367C_PORT2_EFID_OFFSET    8
+#define    RTL8367C_PORT2_EFID_MASK    0x700
+#define    RTL8367C_PORT1_EFID_OFFSET    4
+#define    RTL8367C_PORT1_EFID_MASK    0x70
+#define    RTL8367C_PORT0_EFID_OFFSET    0
+#define    RTL8367C_PORT0_EFID_MASK    0x7
+
+#define    RTL8367C_REG_PORT_EFID_CTRL1    0x0a33
+#define    RTL8367C_PORT7_EFID_OFFSET    12
+#define    RTL8367C_PORT7_EFID_MASK    0x7000
+#define    RTL8367C_PORT6_EFID_OFFSET    8
+#define    RTL8367C_PORT6_EFID_MASK    0x700
+#define    RTL8367C_PORT5_EFID_OFFSET    4
+#define    RTL8367C_PORT5_EFID_MASK    0x70
+#define    RTL8367C_PORT4_EFID_OFFSET    0
+#define    RTL8367C_PORT4_EFID_MASK    0x7
+
+#define    RTL8367C_REG_PORT_EFID_CTRL2    0x0a34
+#define    RTL8367C_PORT10_EFID_OFFSET    8
+#define    RTL8367C_PORT10_EFID_MASK    0x700
+#define    RTL8367C_PORT9_EFID_OFFSET    4
+#define    RTL8367C_PORT9_EFID_MASK    0x70
+#define    RTL8367C_PORT8_EFID_OFFSET    0
+#define    RTL8367C_PORT8_EFID_MASK    0x7
+
+#define    RTL8367C_REG_FORCE_FLUSH1    0x0a35
+#define    RTL8367C_BUSY_STATUS1_OFFSET    3
+#define    RTL8367C_BUSY_STATUS1_MASK    0x38
+#define    RTL8367C_PORTMASK1_OFFSET    0
+#define    RTL8367C_PORTMASK1_MASK    0x7
+
+#define    RTL8367C_REG_FORCE_FLUSH    0x0a36
+#define    RTL8367C_BUSY_STATUS_OFFSET    8
+#define    RTL8367C_BUSY_STATUS_MASK    0xFF00
+#define    RTL8367C_FORCE_FLUSH_PORTMASK_OFFSET    0
+#define    RTL8367C_FORCE_FLUSH_PORTMASK_MASK    0xFF
+
+#define    RTL8367C_REG_L2_FLUSH_CTRL1    0x0a37
+#define    RTL8367C_LUT_FLUSH_FID_OFFSET    12
+#define    RTL8367C_LUT_FLUSH_FID_MASK    0xF000
+#define    RTL8367C_LUT_FLUSH_VID_OFFSET    0
+#define    RTL8367C_LUT_FLUSH_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_L2_FLUSH_CTRL2    0x0a38
+#define    RTL8367C_LUT_FLUSH_TYPE_OFFSET    2
+#define    RTL8367C_LUT_FLUSH_TYPE_MASK    0x4
+#define    RTL8367C_LUT_FLUSH_MODE_OFFSET    0
+#define    RTL8367C_LUT_FLUSH_MODE_MASK    0x3
+
+#define    RTL8367C_REG_L2_FLUSH_CTRL3    0x0a39
+#define    RTL8367C_L2_FLUSH_CTRL3_OFFSET    0
+#define    RTL8367C_L2_FLUSH_CTRL3_MASK    0x1
+
+#define    RTL8367C_REG_LUT_CFG2    0x0a3a
+#define    RTL8367C_LUT_IPMC_FWD_RPORT_OFFSET    1
+#define    RTL8367C_LUT_IPMC_FWD_RPORT_MASK    0x2
+#define    RTL8367C_LUT_IPMC_VID_HASH_OFFSET    0
+#define    RTL8367C_LUT_IPMC_VID_HASH_MASK    0x1
+
+#define    RTL8367C_REG_FLUSH_STATUS    0x0a3f
+#define    RTL8367C_FLUSH_STATUS_OFFSET    0
+#define    RTL8367C_FLUSH_STATUS_MASK    0x1
+
+#define    RTL8367C_REG_STORM_BCAST    0x0a40
+#define    RTL8367C_STORM_BCAST_OFFSET    0
+#define    RTL8367C_STORM_BCAST_MASK    0x7FF
+
+#define    RTL8367C_REG_STORM_MCAST    0x0a41
+#define    RTL8367C_STORM_MCAST_OFFSET    0
+#define    RTL8367C_STORM_MCAST_MASK    0x7FF
+
+#define    RTL8367C_REG_STORM_UNKOWN_UCAST    0x0a42
+#define    RTL8367C_STORM_UNKOWN_UCAST_OFFSET    0
+#define    RTL8367C_STORM_UNKOWN_UCAST_MASK    0x7FF
+
+#define    RTL8367C_REG_STORM_UNKOWN_MCAST    0x0a43
+#define    RTL8367C_STORM_UNKOWN_MCAST_OFFSET    0
+#define    RTL8367C_STORM_UNKOWN_MCAST_MASK    0x7FF
+
+#define    RTL8367C_REG_STORM_BCAST_METER_CTRL0    0x0a44
+#define    RTL8367C_STORM_BCAST_METER_CTRL0_PORT1_METERIDX_OFFSET    8
+#define    RTL8367C_STORM_BCAST_METER_CTRL0_PORT1_METERIDX_MASK    0x3F00
+#define    RTL8367C_STORM_BCAST_METER_CTRL0_PORT0_METERIDX_OFFSET    0
+#define    RTL8367C_STORM_BCAST_METER_CTRL0_PORT0_METERIDX_MASK    0x3F
+
+#define    RTL8367C_REG_STORM_BCAST_METER_CTRL1    0x0a45
+#define    RTL8367C_STORM_BCAST_METER_CTRL1_PORT3_METERIDX_OFFSET    8
+#define    RTL8367C_STORM_BCAST_METER_CTRL1_PORT3_METERIDX_MASK    0x3F00
+#define    RTL8367C_STORM_BCAST_METER_CTRL1_PORT2_METERIDX_OFFSET    0
+#define    RTL8367C_STORM_BCAST_METER_CTRL1_PORT2_METERIDX_MASK    0x3F
+
+#define    RTL8367C_REG_STORM_BCAST_METER_CTRL2    0x0a46
+#define    RTL8367C_STORM_BCAST_METER_CTRL2_PORT5_METERIDX_OFFSET    8
+#define    RTL8367C_STORM_BCAST_METER_CTRL2_PORT5_METERIDX_MASK    0x3F00
+#define    RTL8367C_STORM_BCAST_METER_CTRL2_PORT4_METERIDX_OFFSET    0
+#define    RTL8367C_STORM_BCAST_METER_CTRL2_PORT4_METERIDX_MASK    0x3F
+
+#define    RTL8367C_REG_STORM_BCAST_METER_CTRL3    0x0a47
+#define    RTL8367C_STORM_BCAST_METER_CTRL3_PORT7_METERIDX_OFFSET    8
+#define    RTL8367C_STORM_BCAST_METER_CTRL3_PORT7_METERIDX_MASK    0x3F00
+#define    RTL8367C_STORM_BCAST_METER_CTRL3_PORT6_METERIDX_OFFSET    0
+#define    RTL8367C_STORM_BCAST_METER_CTRL3_PORT6_METERIDX_MASK    0x3F
+
+#define    RTL8367C_REG_STORM_BCAST_METER_CTRL4    0x0a48
+#define    RTL8367C_STORM_BCAST_METER_CTRL4_PORT9_METERIDX_OFFSET    8
+#define    RTL8367C_STORM_BCAST_METER_CTRL4_PORT9_METERIDX_MASK    0x3F00
+#define    RTL8367C_STORM_BCAST_METER_CTRL4_PORT8_METERIDX_OFFSET    0
+#define    RTL8367C_STORM_BCAST_METER_CTRL4_PORT8_METERIDX_MASK    0x3F
+
+#define    RTL8367C_REG_STORM_BCAST_METER_CTRL5    0x0a49
+#define    RTL8367C_STORM_BCAST_METER_CTRL5_OFFSET    0
+#define    RTL8367C_STORM_BCAST_METER_CTRL5_MASK    0x3F
+
+#define    RTL8367C_REG_STORM_MCAST_METER_CTRL0    0x0a4c
+#define    RTL8367C_STORM_MCAST_METER_CTRL0_PORT1_METERIDX_OFFSET    8
+#define    RTL8367C_STORM_MCAST_METER_CTRL0_PORT1_METERIDX_MASK    0x3F00
+#define    RTL8367C_STORM_MCAST_METER_CTRL0_PORT0_METERIDX_OFFSET    0
+#define    RTL8367C_STORM_MCAST_METER_CTRL0_PORT0_METERIDX_MASK    0x3F
+
+#define    RTL8367C_REG_STORM_MCAST_METER_CTRL1    0x0a4d
+#define    RTL8367C_STORM_MCAST_METER_CTRL1_PORT3_METERIDX_OFFSET    8
+#define    RTL8367C_STORM_MCAST_METER_CTRL1_PORT3_METERIDX_MASK    0x3F00
+#define    RTL8367C_STORM_MCAST_METER_CTRL1_PORT2_METERIDX_OFFSET    0
+#define    RTL8367C_STORM_MCAST_METER_CTRL1_PORT2_METERIDX_MASK    0x3F
+
+#define    RTL8367C_REG_STORM_MCAST_METER_CTRL2    0x0a4e
+#define    RTL8367C_STORM_MCAST_METER_CTRL2_PORT5_METERIDX_OFFSET    8
+#define    RTL8367C_STORM_MCAST_METER_CTRL2_PORT5_METERIDX_MASK    0x3F00
+#define    RTL8367C_STORM_MCAST_METER_CTRL2_PORT4_METERIDX_OFFSET    0
+#define    RTL8367C_STORM_MCAST_METER_CTRL2_PORT4_METERIDX_MASK    0x3F
+
+#define    RTL8367C_REG_STORM_MCAST_METER_CTRL3    0x0a4f
+#define    RTL8367C_STORM_MCAST_METER_CTRL3_PORT7_METERIDX_OFFSET    8
+#define    RTL8367C_STORM_MCAST_METER_CTRL3_PORT7_METERIDX_MASK    0x3F00
+#define    RTL8367C_STORM_MCAST_METER_CTRL3_PORT6_METERIDX_OFFSET    0
+#define    RTL8367C_STORM_MCAST_METER_CTRL3_PORT6_METERIDX_MASK    0x3F
+
+#define    RTL8367C_REG_STORM_MCAST_METER_CTRL4    0x0a50
+#define    RTL8367C_STORM_MCAST_METER_CTRL4_PORT9_METERIDX_OFFSET    8
+#define    RTL8367C_STORM_MCAST_METER_CTRL4_PORT9_METERIDX_MASK    0x3F00
+#define    RTL8367C_STORM_MCAST_METER_CTRL4_PORT8_METERIDX_OFFSET    0
+#define    RTL8367C_STORM_MCAST_METER_CTRL4_PORT8_METERIDX_MASK    0x3F
+
+#define    RTL8367C_REG_STORM_MCAST_METER_CTRL5    0x0a51
+#define    RTL8367C_STORM_MCAST_METER_CTRL5_OFFSET    0
+#define    RTL8367C_STORM_MCAST_METER_CTRL5_MASK    0x3F
+
+#define    RTL8367C_REG_STORM_UNDA_METER_CTRL0    0x0a54
+#define    RTL8367C_STORM_UNDA_METER_CTRL0_PORT1_METERIDX_OFFSET    8
+#define    RTL8367C_STORM_UNDA_METER_CTRL0_PORT1_METERIDX_MASK    0x3F00
+#define    RTL8367C_STORM_UNDA_METER_CTRL0_PORT0_METERIDX_OFFSET    0
+#define    RTL8367C_STORM_UNDA_METER_CTRL0_PORT0_METERIDX_MASK    0x3F
+
+#define    RTL8367C_REG_STORM_UNDA_METER_CTRL1    0x0a55
+#define    RTL8367C_STORM_UNDA_METER_CTRL1_PORT3_METERIDX_OFFSET    8
+#define    RTL8367C_STORM_UNDA_METER_CTRL1_PORT3_METERIDX_MASK    0x3F00
+#define    RTL8367C_STORM_UNDA_METER_CTRL1_PORT2_METERIDX_OFFSET    0
+#define    RTL8367C_STORM_UNDA_METER_CTRL1_PORT2_METERIDX_MASK    0x3F
+
+#define    RTL8367C_REG_STORM_UNDA_METER_CTRL2    0x0a56
+#define    RTL8367C_STORM_UNDA_METER_CTRL2_PORT5_METERIDX_OFFSET    8
+#define    RTL8367C_STORM_UNDA_METER_CTRL2_PORT5_METERIDX_MASK    0x3F00
+#define    RTL8367C_STORM_UNDA_METER_CTRL2_PORT4_METERIDX_OFFSET    0
+#define    RTL8367C_STORM_UNDA_METER_CTRL2_PORT4_METERIDX_MASK    0x3F
+
+#define    RTL8367C_REG_STORM_UNDA_METER_CTRL3    0x0a57
+#define    RTL8367C_STORM_UNDA_METER_CTRL3_PORT7_METERIDX_OFFSET    8
+#define    RTL8367C_STORM_UNDA_METER_CTRL3_PORT7_METERIDX_MASK    0x3F00
+#define    RTL8367C_STORM_UNDA_METER_CTRL3_PORT6_METERIDX_OFFSET    0
+#define    RTL8367C_STORM_UNDA_METER_CTRL3_PORT6_METERIDX_MASK    0x3F
+
+#define    RTL8367C_REG_STORM_UNDA_METER_CTRL4    0x0a58
+#define    RTL8367C_STORM_UNDA_METER_CTRL4_PORT9_METERIDX_OFFSET    8
+#define    RTL8367C_STORM_UNDA_METER_CTRL4_PORT9_METERIDX_MASK    0x3F00
+#define    RTL8367C_STORM_UNDA_METER_CTRL4_PORT8_METERIDX_OFFSET    0
+#define    RTL8367C_STORM_UNDA_METER_CTRL4_PORT8_METERIDX_MASK    0x3F
+
+#define    RTL8367C_REG_STORM_UNDA_METER_CTRL5    0x0a59
+#define    RTL8367C_STORM_UNDA_METER_CTRL5_OFFSET    0
+#define    RTL8367C_STORM_UNDA_METER_CTRL5_MASK    0x3F
+
+#define    RTL8367C_REG_STORM_UNMC_METER_CTRL0    0x0a5c
+#define    RTL8367C_STORM_UNMC_METER_CTRL0_PORT1_METERIDX_OFFSET    8
+#define    RTL8367C_STORM_UNMC_METER_CTRL0_PORT1_METERIDX_MASK    0x3F00
+#define    RTL8367C_STORM_UNMC_METER_CTRL0_PORT0_METERIDX_OFFSET    0
+#define    RTL8367C_STORM_UNMC_METER_CTRL0_PORT0_METERIDX_MASK    0x3F
+
+#define    RTL8367C_REG_STORM_UNMC_METER_CTRL1    0x0a5d
+#define    RTL8367C_STORM_UNMC_METER_CTRL1_PORT3_METERIDX_OFFSET    8
+#define    RTL8367C_STORM_UNMC_METER_CTRL1_PORT3_METERIDX_MASK    0x3F00
+#define    RTL8367C_STORM_UNMC_METER_CTRL1_PORT2_METERIDX_OFFSET    0
+#define    RTL8367C_STORM_UNMC_METER_CTRL1_PORT2_METERIDX_MASK    0x3F
+
+#define    RTL8367C_REG_STORM_UNMC_METER_CTRL2    0x0a5e
+#define    RTL8367C_STORM_UNMC_METER_CTRL2_PORT5_METERIDX_OFFSET    8
+#define    RTL8367C_STORM_UNMC_METER_CTRL2_PORT5_METERIDX_MASK    0x3F00
+#define    RTL8367C_STORM_UNMC_METER_CTRL2_PORT4_METERIDX_OFFSET    0
+#define    RTL8367C_STORM_UNMC_METER_CTRL2_PORT4_METERIDX_MASK    0x3F
+
+#define    RTL8367C_REG_STORM_UNMC_METER_CTRL3    0x0a5f
+#define    RTL8367C_STORM_UNMC_METER_CTRL3_PORT7_METERIDX_OFFSET    8
+#define    RTL8367C_STORM_UNMC_METER_CTRL3_PORT7_METERIDX_MASK    0x3F00
+#define    RTL8367C_STORM_UNMC_METER_CTRL3_PORT6_METERIDX_OFFSET    0
+#define    RTL8367C_STORM_UNMC_METER_CTRL3_PORT6_METERIDX_MASK    0x3F
+
+#define    RTL8367C_REG_STORM_EXT_CFG    0x0a60
+#define    RTL8367C_STORM_EXT_EN_PORTMASK_EXT_OFFSET    14
+#define    RTL8367C_STORM_EXT_EN_PORTMASK_EXT_MASK    0x4000
+#define    RTL8367C_STORM_UNKNOWN_MCAST_EXT_EN_OFFSET    13
+#define    RTL8367C_STORM_UNKNOWN_MCAST_EXT_EN_MASK    0x2000
+#define    RTL8367C_STORM_UNKNOWN_UCAST_EXT_EN_OFFSET    12
+#define    RTL8367C_STORM_UNKNOWN_UCAST_EXT_EN_MASK    0x1000
+#define    RTL8367C_STORM_MCAST_EXT_EN_OFFSET    11
+#define    RTL8367C_STORM_MCAST_EXT_EN_MASK    0x800
+#define    RTL8367C_STORM_BCAST_EXT_EN_OFFSET    10
+#define    RTL8367C_STORM_BCAST_EXT_EN_MASK    0x400
+#define    RTL8367C_STORM_EXT_EN_PORTMASK_OFFSET    0
+#define    RTL8367C_STORM_EXT_EN_PORTMASK_MASK    0x3FF
+
+#define    RTL8367C_REG_STORM_EXT_MTRIDX_CFG0    0x0a61
+#define    RTL8367C_MC_STORM_EXT_METERIDX_OFFSET    8
+#define    RTL8367C_MC_STORM_EXT_METERIDX_MASK    0x3F00
+#define    RTL8367C_BC_STORM_EXT_METERIDX_OFFSET    0
+#define    RTL8367C_BC_STORM_EXT_METERIDX_MASK    0x3F
+
+#define    RTL8367C_REG_STORM_EXT_MTRIDX_CFG1    0x0a62
+#define    RTL8367C_UNMC_STORM_EXT_METERIDX_OFFSET    8
+#define    RTL8367C_UNMC_STORM_EXT_METERIDX_MASK    0x3F00
+#define    RTL8367C_UNUC_STORM_EXT_METERIDX_OFFSET    0
+#define    RTL8367C_UNUC_STORM_EXT_METERIDX_MASK    0x3F
+
+#define    RTL8367C_REG_STORM_UNMC_METER_CTRL4    0x0a63
+#define    RTL8367C_STORM_UNMC_METER_CTRL4_PORT9_METERIDX_OFFSET    8
+#define    RTL8367C_STORM_UNMC_METER_CTRL4_PORT9_METERIDX_MASK    0x3F00
+#define    RTL8367C_STORM_UNMC_METER_CTRL4_PORT8_METERIDX_OFFSET    0
+#define    RTL8367C_STORM_UNMC_METER_CTRL4_PORT8_METERIDX_MASK    0x3F
+
+#define    RTL8367C_REG_STORM_UNMC_METER_CTRL5    0x0a64
+#define    RTL8367C_STORM_UNMC_METER_CTRL5_OFFSET    0
+#define    RTL8367C_STORM_UNMC_METER_CTRL5_MASK    0x3F
+
+#define    RTL8367C_REG_OAM_PARSER_CTRL0    0x0a70
+#define    RTL8367C_PORT7_PARACT_OFFSET    14
+#define    RTL8367C_PORT7_PARACT_MASK    0xC000
+#define    RTL8367C_PORT6_PARACT_OFFSET    12
+#define    RTL8367C_PORT6_PARACT_MASK    0x3000
+#define    RTL8367C_PORT5_PARACT_OFFSET    10
+#define    RTL8367C_PORT5_PARACT_MASK    0xC00
+#define    RTL8367C_PORT4_PARACT_OFFSET    8
+#define    RTL8367C_PORT4_PARACT_MASK    0x300
+#define    RTL8367C_PORT3_PARACT_OFFSET    6
+#define    RTL8367C_PORT3_PARACT_MASK    0xC0
+#define    RTL8367C_PORT2_PARACT_OFFSET    4
+#define    RTL8367C_PORT2_PARACT_MASK    0x30
+#define    RTL8367C_PORT1_PARACT_OFFSET    2
+#define    RTL8367C_PORT1_PARACT_MASK    0xC
+#define    RTL8367C_PORT0_PARACT_OFFSET    0
+#define    RTL8367C_PORT0_PARACT_MASK    0x3
+
+#define    RTL8367C_REG_OAM_PARSER_CTRL1    0x0a71
+#define    RTL8367C_PORT10_PARACT_OFFSET    4
+#define    RTL8367C_PORT10_PARACT_MASK    0x30
+#define    RTL8367C_PORT9_PARACT_OFFSET    2
+#define    RTL8367C_PORT9_PARACT_MASK    0xC
+#define    RTL8367C_PORT8_PARACT_OFFSET    0
+#define    RTL8367C_PORT8_PARACT_MASK    0x3
+
+#define    RTL8367C_REG_OAM_MULTIPLEXER_CTRL0    0x0a72
+#define    RTL8367C_PORT7_MULACT_OFFSET    14
+#define    RTL8367C_PORT7_MULACT_MASK    0xC000
+#define    RTL8367C_PORT6_MULACT_OFFSET    12
+#define    RTL8367C_PORT6_MULACT_MASK    0x3000
+#define    RTL8367C_PORT5_MULACT_OFFSET    10
+#define    RTL8367C_PORT5_MULACT_MASK    0xC00
+#define    RTL8367C_PORT4_MULACT_OFFSET    8
+#define    RTL8367C_PORT4_MULACT_MASK    0x300
+#define    RTL8367C_PORT3_MULACT_OFFSET    6
+#define    RTL8367C_PORT3_MULACT_MASK    0xC0
+#define    RTL8367C_PORT2_MULACT_OFFSET    4
+#define    RTL8367C_PORT2_MULACT_MASK    0x30
+#define    RTL8367C_PORT1_MULACT_OFFSET    2
+#define    RTL8367C_PORT1_MULACT_MASK    0xC
+#define    RTL8367C_PORT0_MULACT_OFFSET    0
+#define    RTL8367C_PORT0_MULACT_MASK    0x3
+
+#define    RTL8367C_REG_OAM_MULTIPLEXER_CTRL1    0x0a73
+#define    RTL8367C_PORT10_MULACT_OFFSET    4
+#define    RTL8367C_PORT10_MULACT_MASK    0x30
+#define    RTL8367C_PORT9_MULACT_OFFSET    2
+#define    RTL8367C_PORT9_MULACT_MASK    0xC
+#define    RTL8367C_PORT8_MULACT_OFFSET    0
+#define    RTL8367C_PORT8_MULACT_MASK    0x3
+
+#define    RTL8367C_REG_OAM_CTRL    0x0a74
+#define    RTL8367C_OAM_CTRL_OFFSET    0
+#define    RTL8367C_OAM_CTRL_MASK    0x1
+
+#define    RTL8367C_REG_DOT1X_PORT_ENABLE    0x0a80
+#define    RTL8367C_DOT1X_PORT_ENABLE_OFFSET    0
+#define    RTL8367C_DOT1X_PORT_ENABLE_MASK    0x7FF
+
+#define    RTL8367C_REG_DOT1X_MAC_ENABLE    0x0a81
+#define    RTL8367C_DOT1X_MAC_ENABLE_OFFSET    0
+#define    RTL8367C_DOT1X_MAC_ENABLE_MASK    0x7FF
+
+#define    RTL8367C_REG_DOT1X_PORT_AUTH    0x0a82
+#define    RTL8367C_DOT1X_PORT_AUTH_OFFSET    0
+#define    RTL8367C_DOT1X_PORT_AUTH_MASK    0x7FF
+
+#define    RTL8367C_REG_DOT1X_PORT_OPDIR    0x0a83
+#define    RTL8367C_DOT1X_PORT_OPDIR_OFFSET    0
+#define    RTL8367C_DOT1X_PORT_OPDIR_MASK    0x7FF
+
+#define    RTL8367C_REG_DOT1X_UNAUTH_ACT_W0    0x0a84
+#define    RTL8367C_DOT1X_PORT7_UNAUTHBH_OFFSET    14
+#define    RTL8367C_DOT1X_PORT7_UNAUTHBH_MASK    0xC000
+#define    RTL8367C_DOT1X_PORT6_UNAUTHBH_OFFSET    12
+#define    RTL8367C_DOT1X_PORT6_UNAUTHBH_MASK    0x3000
+#define    RTL8367C_DOT1X_PORT5_UNAUTHBH_OFFSET    10
+#define    RTL8367C_DOT1X_PORT5_UNAUTHBH_MASK    0xC00
+#define    RTL8367C_DOT1X_PORT4_UNAUTHBH_OFFSET    8
+#define    RTL8367C_DOT1X_PORT4_UNAUTHBH_MASK    0x300
+#define    RTL8367C_DOT1X_PORT3_UNAUTHBH_OFFSET    6
+#define    RTL8367C_DOT1X_PORT3_UNAUTHBH_MASK    0xC0
+#define    RTL8367C_DOT1X_PORT2_UNAUTHBH_OFFSET    4
+#define    RTL8367C_DOT1X_PORT2_UNAUTHBH_MASK    0x30
+#define    RTL8367C_DOT1X_PORT1_UNAUTHBH_OFFSET    2
+#define    RTL8367C_DOT1X_PORT1_UNAUTHBH_MASK    0xC
+#define    RTL8367C_DOT1X_PORT0_UNAUTHBH_OFFSET    0
+#define    RTL8367C_DOT1X_PORT0_UNAUTHBH_MASK    0x3
+
+#define    RTL8367C_REG_DOT1X_UNAUTH_ACT_W1    0x0a85
+#define    RTL8367C_DOT1X_PORT10_UNAUTHBH_OFFSET    4
+#define    RTL8367C_DOT1X_PORT10_UNAUTHBH_MASK    0x30
+#define    RTL8367C_DOT1X_PORT9_UNAUTHBH_OFFSET    2
+#define    RTL8367C_DOT1X_PORT9_UNAUTHBH_MASK    0xC
+#define    RTL8367C_DOT1X_PORT8_UNAUTHBH_OFFSET    0
+#define    RTL8367C_DOT1X_PORT8_UNAUTHBH_MASK    0x3
+
+#define    RTL8367C_REG_DOT1X_CFG    0x0a86
+#define    RTL8367C_DOT1X_GVOPDIR_OFFSET    6
+#define    RTL8367C_DOT1X_GVOPDIR_MASK    0x40
+#define    RTL8367C_DOT1X_MAC_OPDIR_OFFSET    5
+#define    RTL8367C_DOT1X_MAC_OPDIR_MASK    0x20
+#define    RTL8367C_DOT1X_GVIDX_OFFSET    0
+#define    RTL8367C_DOT1X_GVIDX_MASK    0x1F
+
+#define    RTL8367C_REG_L2_LRN_CNT_CTRL0    0x0a87
+#define    RTL8367C_L2_LRN_CNT_CTRL0_OFFSET    0
+#define    RTL8367C_L2_LRN_CNT_CTRL0_MASK    0x1FFF
+
+#define    RTL8367C_REG_L2_LRN_CNT_CTRL1    0x0a88
+#define    RTL8367C_L2_LRN_CNT_CTRL1_OFFSET    0
+#define    RTL8367C_L2_LRN_CNT_CTRL1_MASK    0x1FFF
+
+#define    RTL8367C_REG_L2_LRN_CNT_CTRL2    0x0a89
+#define    RTL8367C_L2_LRN_CNT_CTRL2_OFFSET    0
+#define    RTL8367C_L2_LRN_CNT_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_L2_LRN_CNT_CTRL3    0x0a8a
+#define    RTL8367C_L2_LRN_CNT_CTRL3_OFFSET    0
+#define    RTL8367C_L2_LRN_CNT_CTRL3_MASK    0x1FFF
+
+#define    RTL8367C_REG_L2_LRN_CNT_CTRL4    0x0a8b
+#define    RTL8367C_L2_LRN_CNT_CTRL4_OFFSET    0
+#define    RTL8367C_L2_LRN_CNT_CTRL4_MASK    0x1FFF
+
+#define    RTL8367C_REG_L2_LRN_CNT_CTRL5    0x0a8c
+#define    RTL8367C_L2_LRN_CNT_CTRL5_OFFSET    0
+#define    RTL8367C_L2_LRN_CNT_CTRL5_MASK    0x1FFF
+
+#define    RTL8367C_REG_L2_LRN_CNT_CTRL6    0x0a8d
+#define    RTL8367C_L2_LRN_CNT_CTRL6_OFFSET    0
+#define    RTL8367C_L2_LRN_CNT_CTRL6_MASK    0x1FFF
+
+#define    RTL8367C_REG_L2_LRN_CNT_CTRL7    0x0a8e
+#define    RTL8367C_L2_LRN_CNT_CTRL7_OFFSET    0
+#define    RTL8367C_L2_LRN_CNT_CTRL7_MASK    0x1FFF
+
+#define    RTL8367C_REG_L2_LRN_CNT_CTRL8    0x0a8f
+#define    RTL8367C_L2_LRN_CNT_CTRL8_OFFSET    0
+#define    RTL8367C_L2_LRN_CNT_CTRL8_MASK    0x1FFF
+
+#define    RTL8367C_REG_L2_LRN_CNT_CTRL9    0x0a90
+#define    RTL8367C_L2_LRN_CNT_CTRL9_OFFSET    0
+#define    RTL8367C_L2_LRN_CNT_CTRL9_MASK    0x1FFF
+
+#define    RTL8367C_REG_L2_LRN_CNT_CTRL10    0x0a92
+#define    RTL8367C_L2_LRN_CNT_CTRL10_OFFSET    0
+#define    RTL8367C_L2_LRN_CNT_CTRL10_MASK    0x1FFF
+
+#define    RTL8367C_REG_LUT_LRN_UNDER_STATUS    0x0a91
+#define    RTL8367C_LUT_LRN_UNDER_STATUS_OFFSET    0
+#define    RTL8367C_LUT_LRN_UNDER_STATUS_MASK    0x7FF
+
+#define    RTL8367C_REG_L2_SA_MOVING_FORBID    0x0aa0
+#define    RTL8367C_L2_SA_MOVING_FORBID_OFFSET    0
+#define    RTL8367C_L2_SA_MOVING_FORBID_MASK    0x7FF
+
+#define    RTL8367C_REG_DRPORT_LEARN_CTRL    0x0aa1
+#define    RTL8367C_FORBID1_OFFSET    1
+#define    RTL8367C_FORBID1_MASK    0x2
+#define    RTL8367C_FORBID0_OFFSET    0
+#define    RTL8367C_FORBID0_MASK    0x1
+
+#define    RTL8367C_REG_L2_DUMMY02    0x0aa2
+
+#define    RTL8367C_REG_L2_DUMMY03    0x0aa3
+
+#define    RTL8367C_REG_L2_DUMMY04    0x0aa4
+
+#define    RTL8367C_REG_L2_DUMMY05    0x0aa5
+
+#define    RTL8367C_REG_L2_DUMMY06    0x0aa6
+
+#define    RTL8367C_REG_L2_DUMMY07    0x0aa7
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_00    0x0AC0
+#define    RTL8367C_IPMC_GROUP_PMSK_00_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_00_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_01    0x0AC1
+#define    RTL8367C_IPMC_GROUP_PMSK_01_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_01_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_02    0x0AC2
+#define    RTL8367C_IPMC_GROUP_PMSK_02_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_02_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_03    0x0AC3
+#define    RTL8367C_IPMC_GROUP_PMSK_03_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_03_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_04    0x0AC4
+#define    RTL8367C_IPMC_GROUP_PMSK_04_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_04_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_05    0x0AC5
+#define    RTL8367C_IPMC_GROUP_PMSK_05_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_05_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_06    0x0AC6
+#define    RTL8367C_IPMC_GROUP_PMSK_06_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_06_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_07    0x0AC7
+#define    RTL8367C_IPMC_GROUP_PMSK_07_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_07_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_08    0x0AC8
+#define    RTL8367C_IPMC_GROUP_PMSK_08_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_08_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_09    0x0AC9
+#define    RTL8367C_IPMC_GROUP_PMSK_09_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_09_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_10    0x0ACA
+#define    RTL8367C_IPMC_GROUP_PMSK_10_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_10_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_11    0x0ACB
+#define    RTL8367C_IPMC_GROUP_PMSK_11_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_11_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_12    0x0ACC
+#define    RTL8367C_IPMC_GROUP_PMSK_12_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_12_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_13    0x0ACD
+#define    RTL8367C_IPMC_GROUP_PMSK_13_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_13_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_14    0x0ACE
+#define    RTL8367C_IPMC_GROUP_PMSK_14_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_14_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_15    0x0ACF
+#define    RTL8367C_IPMC_GROUP_PMSK_15_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_15_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_16    0x0AD0
+#define    RTL8367C_IPMC_GROUP_PMSK_16_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_16_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_17    0x0AD1
+#define    RTL8367C_IPMC_GROUP_PMSK_17_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_17_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_18    0x0AD2
+#define    RTL8367C_IPMC_GROUP_PMSK_18_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_18_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_19    0x0AD3
+#define    RTL8367C_IPMC_GROUP_PMSK_19_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_19_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_20    0x0AD4
+#define    RTL8367C_IPMC_GROUP_PMSK_20_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_20_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_21    0x0AD5
+#define    RTL8367C_IPMC_GROUP_PMSK_21_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_21_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_22    0x0AD6
+#define    RTL8367C_IPMC_GROUP_PMSK_22_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_22_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_23    0x0AD7
+#define    RTL8367C_IPMC_GROUP_PMSK_23_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_23_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_24    0x0AD8
+#define    RTL8367C_IPMC_GROUP_PMSK_24_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_24_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_25    0x0AD9
+#define    RTL8367C_IPMC_GROUP_PMSK_25_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_25_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_26    0x0ADA
+#define    RTL8367C_IPMC_GROUP_PMSK_26_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_26_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_27    0x0ADB
+#define    RTL8367C_IPMC_GROUP_PMSK_27_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_27_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_28    0x0ADC
+#define    RTL8367C_IPMC_GROUP_PMSK_28_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_28_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_29    0x0ADD
+#define    RTL8367C_IPMC_GROUP_PMSK_29_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_29_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_30    0x0ADE
+#define    RTL8367C_IPMC_GROUP_PMSK_30_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_30_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_31    0x0ADF
+#define    RTL8367C_IPMC_GROUP_PMSK_31_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_31_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_32    0x0AE0
+#define    RTL8367C_IPMC_GROUP_PMSK_32_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_32_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_33    0x0AE1
+#define    RTL8367C_IPMC_GROUP_PMSK_33_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_33_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_34    0x0AE2
+#define    RTL8367C_IPMC_GROUP_PMSK_34_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_34_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_35    0x0AE3
+#define    RTL8367C_IPMC_GROUP_PMSK_35_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_35_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_36    0x0AE4
+#define    RTL8367C_IPMC_GROUP_PMSK_36_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_36_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_37    0x0AE5
+#define    RTL8367C_IPMC_GROUP_PMSK_37_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_37_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_38    0x0AE6
+#define    RTL8367C_IPMC_GROUP_PMSK_38_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_38_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_39    0x0AE7
+#define    RTL8367C_IPMC_GROUP_PMSK_39_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_39_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_40    0x0AE8
+#define    RTL8367C_IPMC_GROUP_PMSK_40_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_40_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_41    0x0AE9
+#define    RTL8367C_IPMC_GROUP_PMSK_41_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_41_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_42    0x0AEA
+#define    RTL8367C_IPMC_GROUP_PMSK_42_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_42_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_43    0x0AEB
+#define    RTL8367C_IPMC_GROUP_PMSK_43_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_43_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_44    0x0AEC
+#define    RTL8367C_IPMC_GROUP_PMSK_44_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_44_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_45    0x0AED
+#define    RTL8367C_IPMC_GROUP_PMSK_45_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_45_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_46    0x0AEE
+#define    RTL8367C_IPMC_GROUP_PMSK_46_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_46_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_47    0x0AEF
+#define    RTL8367C_IPMC_GROUP_PMSK_47_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_47_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_48    0x0AF0
+#define    RTL8367C_IPMC_GROUP_PMSK_48_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_48_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_49    0x0AF1
+#define    RTL8367C_IPMC_GROUP_PMSK_49_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_49_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_50    0x0AF2
+#define    RTL8367C_IPMC_GROUP_PMSK_50_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_50_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_51    0x0AF3
+#define    RTL8367C_IPMC_GROUP_PMSK_51_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_51_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_52    0x0AF4
+#define    RTL8367C_IPMC_GROUP_PMSK_52_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_52_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_53    0x0AF5
+#define    RTL8367C_IPMC_GROUP_PMSK_53_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_53_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_54    0x0AF6
+#define    RTL8367C_IPMC_GROUP_PMSK_54_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_54_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_55    0x0AF7
+#define    RTL8367C_IPMC_GROUP_PMSK_55_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_55_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_56    0x0AF8
+#define    RTL8367C_IPMC_GROUP_PMSK_56_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_56_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_57    0x0AF9
+#define    RTL8367C_IPMC_GROUP_PMSK_57_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_57_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_58    0x0AFA
+#define    RTL8367C_IPMC_GROUP_PMSK_58_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_58_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_59    0x0AFB
+#define    RTL8367C_IPMC_GROUP_PMSK_59_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_59_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_60    0x0AFC
+#define    RTL8367C_IPMC_GROUP_PMSK_60_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_60_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_61    0x0AFD
+#define    RTL8367C_IPMC_GROUP_PMSK_61_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_61_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_62    0x0AFE
+#define    RTL8367C_IPMC_GROUP_PMSK_62_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_62_MASK    0x7FF
+
+#define    RTL8367C_REG_IPMC_GROUP_PMSK_63    0x0AFF
+#define    RTL8367C_IPMC_GROUP_PMSK_63_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_PMSK_63_MASK    0x7FF
+
+/* (16'h0b00)mltvlan_reg */
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY0_CTRL0    0x0b00
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY0_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY0_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY0_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY0_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY0_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY0_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY0_CTRL1    0x0b01
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY0_CTRL2    0x0b02
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY0_CTRL3    0x0b03
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY0_CTRL4    0x0b04
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY1_CTRL0    0x0b05
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY1_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY1_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY1_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY1_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY1_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY1_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY1_CTRL1    0x0b06
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY1_CTRL2    0x0b07
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY1_CTRL3    0x0b08
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY1_CTRL4    0x0b09
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY2_CTRL0    0x0b0a
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY2_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY2_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY2_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY2_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY2_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY2_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY2_CTRL1    0x0b0b
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY2_CTRL2    0x0b0c
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY2_CTRL3    0x0b0d
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY2_CTRL4    0x0b0e
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY3_CTRL0    0x0b0f
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY3_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY3_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY3_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY3_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY3_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY3_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY3_CTRL1    0x0b10
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY3_CTRL2    0x0b11
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY3_CTRL3    0x0b12
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY3_CTRL4    0x0b13
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY4_CTRL0    0x0b14
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY4_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY4_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY4_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY4_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY4_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY4_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY4_CTRL1    0x0b15
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY4_CTRL2    0x0b16
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY4_CTRL3    0x0b17
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY4_CTRL4    0x0b18
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY5_CTRL0    0x0b19
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY5_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY5_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY5_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY5_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY5_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY5_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY5_CTRL1    0x0b1a
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY5_CTRL2    0x0b1b
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY5_CTRL3    0x0b1c
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY5_CTRL4    0x0b1d
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY6_CTRL0    0x0b1e
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY6_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY6_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY6_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY6_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY6_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY6_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY6_CTRL1    0x0b1f
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY6_CTRL2    0x0b20
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY6_CTRL3    0x0b21
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY6_CTRL4    0x0b22
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY7_CTRL0    0x0b23
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY7_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY7_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY7_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY7_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY7_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY7_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY7_CTRL1    0x0b24
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY7_CTRL2    0x0b25
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY7_CTRL3    0x0b26
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY7_CTRL4    0x0b27
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY8_CTRL0    0x0b28
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY8_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY8_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY8_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY8_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY8_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY8_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY8_CTRL1    0x0b29
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY8_CTRL2    0x0b2a
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY8_CTRL3    0x0b2b
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY8_CTRL4    0x0b2c
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY9_CTRL0    0x0b2d
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY9_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY9_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY9_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY9_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY9_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY9_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY9_CTRL1    0x0b2e
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY9_CTRL2    0x0b2f
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY9_CTRL3    0x0b30
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY9_CTRL4    0x0b31
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY10_CTRL0    0x0b32
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY10_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY10_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY10_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY10_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY10_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY10_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY10_CTRL1    0x0b33
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY10_CTRL2    0x0b34
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY10_CTRL3    0x0b35
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY10_CTRL4    0x0b36
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY11_CTRL0    0x0b37
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY11_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY11_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY11_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY11_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY11_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY11_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY11_CTRL1    0x0b38
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY11_CTRL2    0x0b39
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY11_CTRL3    0x0b3a
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY11_CTRL4    0x0b3b
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY12_CTRL0    0x0b3c
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY12_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY12_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY12_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY12_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY12_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY12_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY12_CTRL1    0x0b3d
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY12_CTRL2    0x0b3e
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY12_CTRL3    0x0b3f
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY12_CTRL4    0x0b40
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY13_CTRL0    0x0b41
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY13_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY13_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY13_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY13_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY13_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY13_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY13_CTRL1    0x0b42
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY13_CTRL2    0x0b43
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY13_CTRL3    0x0b44
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY13_CTRL4    0x0b45
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY14_CTRL0    0x0b46
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY14_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY14_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY14_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY14_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY14_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY14_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY14_CTRL1    0x0b47
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY14_CTRL2    0x0b48
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY14_CTRL3    0x0b49
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY14_CTRL4    0x0b4a
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY15_CTRL0    0x0b4b
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY15_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY15_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY15_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY15_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY15_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY15_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY15_CTRL1    0x0b4c
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY15_CTRL2    0x0b4d
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY15_CTRL3    0x0b4e
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY15_CTRL4    0x0b4f
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY16_CTRL0    0x0b50
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY16_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY16_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY16_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY16_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY16_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY16_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY16_CTRL1    0x0b51
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY16_CTRL2    0x0b52
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY16_CTRL3    0x0b53
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY16_CTRL4    0x0b54
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY17_CTRL0    0x0b55
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY17_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY17_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY17_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY17_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY17_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY17_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY17_CTRL1    0x0b56
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY17_CTRL2    0x0b57
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY17_CTRL3    0x0b58
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY17_CTRL4    0x0b59
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY18_CTRL0    0x0b5a
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY18_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY18_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY18_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY18_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY18_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY18_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY18_CTRL1    0x0b5b
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY18_CTRL2    0x0b5c
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY18_CTRL3    0x0b5d
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY18_CTRL4    0x0b5e
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY19_CTRL0    0x0b5f
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY19_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY19_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY19_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY19_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY19_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY19_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY19_CTRL1    0x0b60
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY19_CTRL2    0x0b61
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY19_CTRL3    0x0b62
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY19_CTRL4    0x0b63
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY20_CTRL0    0x0b64
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY20_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY20_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY20_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY20_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY20_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY20_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY20_CTRL1    0x0b65
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY20_CTRL2    0x0b66
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY20_CTRL3    0x0b67
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY20_CTRL4    0x0b68
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY21_CTRL0    0x0b69
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY21_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY21_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY21_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY21_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY21_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY21_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY21_CTRL1    0x0b6a
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY21_CTRL2    0x0b6b
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY21_CTRL3    0x0b6c
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY21_CTRL4    0x0b6d
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY22_CTRL0    0x0b6e
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY22_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY22_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY22_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY22_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY22_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY22_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY22_CTRL1    0x0b6f
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY22_CTRL2    0x0b70
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY22_CTRL3    0x0b71
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY22_CTRL4    0x0b72
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY23_CTRL0    0x0b73
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY23_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY23_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY23_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY23_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY23_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY23_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY23_CTRL1    0x0b74
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY23_CTRL2    0x0b75
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY23_CTRL3    0x0b76
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY23_CTRL4    0x0b77
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY24_CTRL0    0x0b78
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY24_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY24_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY24_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY24_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY24_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY24_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY24_CTRL1    0x0b79
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY24_CTRL2    0x0b7a
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY24_CTRL3    0x0b7b
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY24_CTRL4    0x0b7c
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY25_CTRL0    0x0b7d
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY25_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY25_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY25_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY25_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY25_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY25_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY25_CTRL1    0x0b7e
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY25_CTRL2    0x0b7f
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY25_CTRL3    0x0b80
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY25_CTRL4    0x0b81
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY26_CTRL0    0x0b82
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY26_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY26_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY26_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY26_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY26_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY26_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY26_CTRL1    0x0b83
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY26_CTRL2    0x0b84
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY26_CTRL3    0x0b85
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY26_CTRL4    0x0b86
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY27_CTRL0    0x0b87
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY27_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY27_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY27_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY27_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY27_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY27_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY27_CTRL1    0x0b88
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY27_CTRL2    0x0b89
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY27_CTRL3    0x0b8a
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY27_CTRL4    0x0b8b
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY28_CTRL0    0x0b8c
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY28_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY28_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY28_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY28_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY28_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY28_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY28_CTRL1    0x0b8d
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY28_CTRL2    0x0b8e
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY28_CTRL3    0x0b8f
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY28_CTRL4    0x0b90
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY29_CTRL0    0x0b91
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY29_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY29_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY29_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY29_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY29_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY29_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY29_CTRL1    0x0b92
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY29_CTRL2    0x0b93
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY29_CTRL3    0x0b94
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY29_CTRL4    0x0b95
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY30_CTRL0    0x0b96
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY30_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY30_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY30_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY30_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY30_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY30_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY30_CTRL1    0x0b97
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY30_CTRL2    0x0b98
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY30_CTRL3    0x0b99
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY30_CTRL4    0x0b9a
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY31_CTRL0    0x0b9b
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY31_CTRL0_VALID_OFFSET    7
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY31_CTRL0_VALID_MASK    0x80
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY31_CTRL0_FORMAT_OFFSET    6
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY31_CTRL0_FORMAT_MASK    0x40
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY31_CTRL0_SVIDX_OFFSET    0
+#define    RTL8367C_SVLAN_MCAST2S_ENTRY31_CTRL0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY31_CTRL1    0x0b9c
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY31_CTRL2    0x0b9d
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY31_CTRL3    0x0b9e
+
+#define    RTL8367C_REG_SVLAN_MCAST2S_ENTRY31_CTRL4    0x0b9f
+
+#define    RTL8367C_REG_MLTVLAN_DUMMY_0    0x0ba0
+
+#define    RTL8367C_REG_MLTVLAN_DUMMY_1    0x0ba1
+
+#define    RTL8367C_REG_MLTVLAN_DUMMY_2    0x0ba2
+
+#define    RTL8367C_REG_MLTVLAN_DUMMY_3    0x0ba3
+
+#define    RTL8367C_REG_MLTVLAN_DUMMY_4    0x0ba4
+
+#define    RTL8367C_REG_MLTVLAN_DUMMY_5    0x0ba5
+
+#define    RTL8367C_REG_MLTVLAN_DUMMY_6    0x0ba6
+
+#define    RTL8367C_REG_MLTVLAN_DUMMY_7    0x0ba7
+
+/* (16'h0c00)svlan_reg */
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG0_CTRL1    0x0c01
+#define    RTL8367C_SVLAN_MEMBERCFG0_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG0_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG0_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG0_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG0_CTRL2    0x0c02
+#define    RTL8367C_SVLAN_MEMBERCFG0_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG0_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG0_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG0_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG0_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG0_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG0_CTRL3    0x0c03
+#define    RTL8367C_SVLAN_MEMBERCFG0_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG0_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG0_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG0_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG0_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG0_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG1_CTRL1    0x0c04
+#define    RTL8367C_SVLAN_MEMBERCFG1_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG1_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG1_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG1_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG1_CTRL2    0x0c05
+#define    RTL8367C_SVLAN_MEMBERCFG1_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG1_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG1_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG1_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG1_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG1_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG1_CTRL3    0x0c06
+#define    RTL8367C_SVLAN_MEMBERCFG1_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG1_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG1_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG1_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG1_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG1_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG2_CTRL1    0x0c07
+#define    RTL8367C_SVLAN_MEMBERCFG2_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG2_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG2_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG2_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG2_CTRL2    0x0c08
+#define    RTL8367C_SVLAN_MEMBERCFG2_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG2_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG2_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG2_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG2_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG2_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG2_CTRL3    0x0c09
+#define    RTL8367C_SVLAN_MEMBERCFG2_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG2_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG2_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG2_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG2_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG2_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG3_CTRL1    0x0c0a
+#define    RTL8367C_SVLAN_MEMBERCFG3_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG3_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG3_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG3_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG3_CTRL2    0x0c0b
+#define    RTL8367C_SVLAN_MEMBERCFG3_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG3_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG3_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG3_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG3_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG3_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG3_CTRL3    0x0c0c
+#define    RTL8367C_SVLAN_MEMBERCFG3_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG3_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG3_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG3_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG3_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG3_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG4_CTRL1    0x0c0d
+#define    RTL8367C_SVLAN_MEMBERCFG4_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG4_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG4_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG4_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG4_CTRL2    0x0c0e
+#define    RTL8367C_SVLAN_MEMBERCFG4_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG4_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG4_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG4_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG4_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG4_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG4_CTRL3    0x0c0f
+#define    RTL8367C_SVLAN_MEMBERCFG4_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG4_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG4_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG4_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG4_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG4_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG5_CTRL1    0x0c10
+#define    RTL8367C_SVLAN_MEMBERCFG5_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG5_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG5_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG5_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG5_CTRL2    0x0c11
+#define    RTL8367C_SVLAN_MEMBERCFG5_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG5_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG5_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG5_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG5_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG5_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG5_CTRL3    0x0c12
+#define    RTL8367C_SVLAN_MEMBERCFG5_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG5_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG5_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG5_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG5_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG5_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG6_CTRL1    0x0c13
+#define    RTL8367C_SVLAN_MEMBERCFG6_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG6_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG6_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG6_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG6_CTRL2    0x0c14
+#define    RTL8367C_SVLAN_MEMBERCFG6_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG6_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG6_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG6_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG6_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG6_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG6_CTRL3    0x0c15
+#define    RTL8367C_SVLAN_MEMBERCFG6_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG6_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG6_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG6_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG6_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG6_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG7_CTRL1    0x0c16
+#define    RTL8367C_SVLAN_MEMBERCFG7_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG7_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG7_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG7_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG7_CTRL2    0x0c17
+#define    RTL8367C_SVLAN_MEMBERCFG7_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG7_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG7_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG7_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG7_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG7_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG7_CTRL3    0x0c18
+#define    RTL8367C_SVLAN_MEMBERCFG7_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG7_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG7_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG7_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG7_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG7_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG8_CTRL1    0x0c19
+#define    RTL8367C_SVLAN_MEMBERCFG8_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG8_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG8_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG8_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG8_CTRL2    0x0c1a
+#define    RTL8367C_SVLAN_MEMBERCFG8_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG8_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG8_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG8_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG8_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG8_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG8_CTRL3    0x0c1b
+#define    RTL8367C_SVLAN_MEMBERCFG8_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG8_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG8_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG8_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG8_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG8_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG9_CTRL1    0x0c1c
+#define    RTL8367C_SVLAN_MEMBERCFG9_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG9_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG9_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG9_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG9_CTRL2    0x0c1d
+#define    RTL8367C_SVLAN_MEMBERCFG9_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG9_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG9_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG9_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG9_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG9_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG9_CTRL3    0x0c1e
+#define    RTL8367C_SVLAN_MEMBERCFG9_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG9_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG9_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG9_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG9_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG9_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG10_CTRL1    0x0c1f
+#define    RTL8367C_SVLAN_MEMBERCFG10_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG10_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG10_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG10_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG10_CTRL2    0x0c20
+#define    RTL8367C_SVLAN_MEMBERCFG10_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG10_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG10_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG10_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG10_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG10_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG10_CTRL3    0x0c21
+#define    RTL8367C_SVLAN_MEMBERCFG10_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG10_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG10_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG10_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG10_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG10_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG11_CTRL1    0x0c22
+#define    RTL8367C_SVLAN_MEMBERCFG11_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG11_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG11_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG11_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG11_CTRL2    0x0c23
+#define    RTL8367C_SVLAN_MEMBERCFG11_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG11_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG11_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG11_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG11_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG11_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG11_CTRL3    0x0c24
+#define    RTL8367C_SVLAN_MEMBERCFG11_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG11_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG11_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG11_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG11_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG11_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG12_CTRL1    0x0c25
+#define    RTL8367C_SVLAN_MEMBERCFG12_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG12_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG12_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG12_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG12_CTRL2    0x0c26
+#define    RTL8367C_SVLAN_MEMBERCFG12_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG12_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG12_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG12_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG12_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG12_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG12_CTRL3    0x0c27
+#define    RTL8367C_SVLAN_MEMBERCFG12_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG12_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG12_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG12_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG12_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG12_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG13_CTRL1    0x0c28
+#define    RTL8367C_SVLAN_MEMBERCFG13_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG13_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG13_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG13_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG13_CTRL2    0x0c29
+#define    RTL8367C_SVLAN_MEMBERCFG13_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG13_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG13_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG13_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG13_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG13_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG13_CTRL3    0x0c2a
+#define    RTL8367C_SVLAN_MEMBERCFG13_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG13_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG13_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG13_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG13_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG13_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG14_CTRL1    0x0c2b
+#define    RTL8367C_SVLAN_MEMBERCFG14_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG14_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG14_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG14_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG14_CTRL2    0x0c2c
+#define    RTL8367C_SVLAN_MEMBERCFG14_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG14_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG14_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG14_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG14_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG14_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG14_CTRL3    0x0c2d
+#define    RTL8367C_SVLAN_MEMBERCFG14_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG14_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG14_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG14_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG14_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG14_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG15_CTRL1    0x0c2e
+#define    RTL8367C_SVLAN_MEMBERCFG15_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG15_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG15_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG15_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG15_CTRL2    0x0c2f
+#define    RTL8367C_SVLAN_MEMBERCFG15_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG15_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG15_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG15_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG15_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG15_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG15_CTRL3    0x0c30
+#define    RTL8367C_SVLAN_MEMBERCFG15_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG15_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG15_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG15_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG15_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG15_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG16_CTRL1    0x0c31
+#define    RTL8367C_SVLAN_MEMBERCFG16_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG16_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG16_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG16_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG16_CTRL2    0x0c32
+#define    RTL8367C_SVLAN_MEMBERCFG16_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG16_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG16_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG16_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG16_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG16_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG16_CTRL3    0x0c33
+#define    RTL8367C_SVLAN_MEMBERCFG16_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG16_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG16_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG16_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG16_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG16_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG17_CTRL1    0x0c34
+#define    RTL8367C_SVLAN_MEMBERCFG17_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG17_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG17_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG17_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG17_CTRL2    0x0c35
+#define    RTL8367C_SVLAN_MEMBERCFG17_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG17_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG17_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG17_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG17_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG17_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG17_CTRL3    0x0c36
+#define    RTL8367C_SVLAN_MEMBERCFG17_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG17_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG17_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG17_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG17_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG17_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG18_CTRL1    0x0c37
+#define    RTL8367C_SVLAN_MEMBERCFG18_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG18_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG18_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG18_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG18_CTRL2    0x0c38
+#define    RTL8367C_SVLAN_MEMBERCFG18_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG18_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG18_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG18_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG18_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG18_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG18_CTRL3    0x0c39
+#define    RTL8367C_SVLAN_MEMBERCFG18_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG18_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG18_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG18_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG18_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG18_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG19_CTRL1    0x0c3a
+#define    RTL8367C_SVLAN_MEMBERCFG19_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG19_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG19_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG19_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG19_CTRL2    0x0c3b
+#define    RTL8367C_SVLAN_MEMBERCFG19_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG19_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG19_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG19_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG19_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG19_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG19_CTRL3    0x0c3c
+#define    RTL8367C_SVLAN_MEMBERCFG19_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG19_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG19_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG19_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG19_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG19_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG20_CTRL1    0x0c3d
+#define    RTL8367C_SVLAN_MEMBERCFG20_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG20_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG20_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG20_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG20_CTRL2    0x0c3e
+#define    RTL8367C_SVLAN_MEMBERCFG20_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG20_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG20_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG20_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG20_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG20_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG20_CTRL3    0x0c3f
+#define    RTL8367C_SVLAN_MEMBERCFG20_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG20_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG20_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG20_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG20_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG20_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG21_CTRL1    0x0c40
+#define    RTL8367C_SVLAN_MEMBERCFG21_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG21_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG21_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG21_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG21_CTRL2    0x0c41
+#define    RTL8367C_SVLAN_MEMBERCFG21_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG21_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG21_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG21_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG21_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG21_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG21_CTRL3    0x0c42
+#define    RTL8367C_SVLAN_MEMBERCFG21_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG21_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG21_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG21_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG21_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG21_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG22_CTRL1    0x0c43
+#define    RTL8367C_SVLAN_MEMBERCFG22_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG22_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG22_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG22_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG22_CTRL2    0x0c44
+#define    RTL8367C_SVLAN_MEMBERCFG22_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG22_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG22_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG22_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG22_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG22_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG22_CTRL3    0x0c45
+#define    RTL8367C_SVLAN_MEMBERCFG22_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG22_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG22_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG22_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG22_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG22_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG23_CTRL1    0x0c46
+#define    RTL8367C_SVLAN_MEMBERCFG23_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG23_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG23_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG23_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG23_CTRL2    0x0c47
+#define    RTL8367C_SVLAN_MEMBERCFG23_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG23_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG23_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG23_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG23_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG23_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG23_CTRL3    0x0c48
+#define    RTL8367C_SVLAN_MEMBERCFG23_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG23_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG23_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG23_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG23_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG23_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG24_CTRL1    0x0c49
+#define    RTL8367C_SVLAN_MEMBERCFG24_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG24_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG24_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG24_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG24_CTRL2    0x0c4a
+#define    RTL8367C_SVLAN_MEMBERCFG24_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG24_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG24_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG24_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG24_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG24_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG24_CTRL3    0x0c4b
+#define    RTL8367C_SVLAN_MEMBERCFG24_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG24_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG24_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG24_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG24_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG24_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG25_CTRL1    0x0c4c
+#define    RTL8367C_SVLAN_MEMBERCFG25_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG25_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG25_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG25_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG25_CTRL2    0x0c4d
+#define    RTL8367C_SVLAN_MEMBERCFG25_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG25_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG25_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG25_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG25_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG25_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG25_CTRL3    0x0c4e
+#define    RTL8367C_SVLAN_MEMBERCFG25_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG25_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG25_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG25_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG25_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG25_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG26_CTRL1    0x0c4f
+#define    RTL8367C_SVLAN_MEMBERCFG26_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG26_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG26_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG26_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG26_CTRL2    0x0c50
+#define    RTL8367C_SVLAN_MEMBERCFG26_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG26_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG26_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG26_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG26_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG26_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG26_CTRL3    0x0c51
+#define    RTL8367C_SVLAN_MEMBERCFG26_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG26_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG26_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG26_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG26_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG26_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG27_CTRL1    0x0c52
+#define    RTL8367C_SVLAN_MEMBERCFG27_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG27_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG27_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG27_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG27_CTRL2    0x0c53
+#define    RTL8367C_SVLAN_MEMBERCFG27_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG27_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG27_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG27_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG27_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG27_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG27_CTRL3    0x0c54
+#define    RTL8367C_SVLAN_MEMBERCFG27_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG27_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG27_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG27_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG27_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG27_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG28_CTRL1    0x0c55
+#define    RTL8367C_SVLAN_MEMBERCFG28_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG28_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG28_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG28_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG28_CTRL2    0x0c56
+#define    RTL8367C_SVLAN_MEMBERCFG28_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG28_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG28_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG28_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG28_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG28_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG28_CTRL3    0x0c57
+#define    RTL8367C_SVLAN_MEMBERCFG28_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG28_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG28_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG28_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG28_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG28_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG29_CTRL1    0x0c58
+#define    RTL8367C_SVLAN_MEMBERCFG29_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG29_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG29_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG29_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG29_CTRL2    0x0c59
+#define    RTL8367C_SVLAN_MEMBERCFG29_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG29_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG29_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG29_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG29_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG29_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG29_CTRL3    0x0c5a
+#define    RTL8367C_SVLAN_MEMBERCFG29_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG29_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG29_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG29_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG29_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG29_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG30_CTRL1    0x0c5b
+#define    RTL8367C_SVLAN_MEMBERCFG30_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG30_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG30_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG30_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG30_CTRL2    0x0c5c
+#define    RTL8367C_SVLAN_MEMBERCFG30_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG30_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG30_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG30_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG30_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG30_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG30_CTRL3    0x0c5d
+#define    RTL8367C_SVLAN_MEMBERCFG30_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG30_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG30_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG30_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG30_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG30_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG31_CTRL1    0x0c5e
+#define    RTL8367C_SVLAN_MEMBERCFG31_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG31_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG31_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG31_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG31_CTRL2    0x0c5f
+#define    RTL8367C_SVLAN_MEMBERCFG31_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG31_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG31_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG31_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG31_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG31_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG31_CTRL3    0x0c60
+#define    RTL8367C_SVLAN_MEMBERCFG31_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG31_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG31_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG31_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG31_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG31_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG32_CTRL1    0x0c61
+#define    RTL8367C_SVLAN_MEMBERCFG32_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG32_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG32_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG32_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG32_CTRL2    0x0c62
+#define    RTL8367C_SVLAN_MEMBERCFG32_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG32_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG32_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG32_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG32_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG32_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG32_CTRL3    0x0c63
+#define    RTL8367C_SVLAN_MEMBERCFG32_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG32_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG32_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG32_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG32_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG32_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG33_CTRL1    0x0c64
+#define    RTL8367C_SVLAN_MEMBERCFG33_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG33_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG33_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG33_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG33_CTRL2    0x0c65
+#define    RTL8367C_SVLAN_MEMBERCFG33_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG33_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG33_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG33_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG33_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG33_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG33_CTRL3    0x0c66
+#define    RTL8367C_SVLAN_MEMBERCFG33_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG33_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG33_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG33_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG33_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG33_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG34_CTRL1    0x0c67
+#define    RTL8367C_SVLAN_MEMBERCFG34_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG34_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG34_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG34_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG34_CTRL2    0x0c68
+#define    RTL8367C_SVLAN_MEMBERCFG34_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG34_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG34_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG34_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG34_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG34_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG34_CTRL3    0x0c69
+#define    RTL8367C_SVLAN_MEMBERCFG34_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG34_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG34_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG34_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG34_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG34_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG35_CTRL1    0x0c6a
+#define    RTL8367C_SVLAN_MEMBERCFG35_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG35_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG35_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG35_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG35_CTRL2    0x0c6b
+#define    RTL8367C_SVLAN_MEMBERCFG35_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG35_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG35_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG35_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG35_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG35_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG35_CTRL3    0x0c6c
+#define    RTL8367C_SVLAN_MEMBERCFG35_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG35_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG35_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG35_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG35_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG35_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG36_CTRL1    0x0c6d
+#define    RTL8367C_SVLAN_MEMBERCFG36_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG36_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG36_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG36_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG36_CTRL2    0x0c6e
+#define    RTL8367C_SVLAN_MEMBERCFG36_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG36_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG36_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG36_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG36_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG36_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG36_CTRL3    0x0c6f
+#define    RTL8367C_SVLAN_MEMBERCFG36_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG36_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG36_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG36_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG36_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG36_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG37_CTRL1    0x0c70
+#define    RTL8367C_SVLAN_MEMBERCFG37_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG37_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG37_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG37_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG37_CTRL2    0x0c71
+#define    RTL8367C_SVLAN_MEMBERCFG37_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG37_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG37_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG37_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG37_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG37_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG37_CTRL3    0x0c72
+#define    RTL8367C_SVLAN_MEMBERCFG37_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG37_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG37_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG37_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG37_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG37_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG38_CTRL1    0x0c73
+#define    RTL8367C_SVLAN_MEMBERCFG38_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG38_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG38_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG38_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG38_CTRL2    0x0c74
+#define    RTL8367C_SVLAN_MEMBERCFG38_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG38_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG38_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG38_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG38_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG38_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG38_CTRL3    0x0c75
+#define    RTL8367C_SVLAN_MEMBERCFG38_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG38_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG38_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG38_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG38_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG38_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG39_CTRL1    0x0c76
+#define    RTL8367C_SVLAN_MEMBERCFG39_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG39_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG39_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG39_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG39_CTRL2    0x0c77
+#define    RTL8367C_SVLAN_MEMBERCFG39_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG39_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG39_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG39_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG39_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG39_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG39_CTRL3    0x0c78
+#define    RTL8367C_SVLAN_MEMBERCFG39_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG39_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG39_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG39_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG39_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG39_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG40_CTRL1    0x0c79
+#define    RTL8367C_SVLAN_MEMBERCFG40_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG40_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG40_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG40_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG40_CTRL2    0x0c7a
+#define    RTL8367C_SVLAN_MEMBERCFG40_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG40_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG40_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG40_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG40_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG40_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG40_CTRL3    0x0c7b
+#define    RTL8367C_SVLAN_MEMBERCFG40_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG40_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG40_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG40_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG40_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG40_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG41_CTRL1    0x0c7c
+#define    RTL8367C_SVLAN_MEMBERCFG41_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG41_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG41_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG41_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG41_CTRL2    0x0c7d
+#define    RTL8367C_SVLAN_MEMBERCFG41_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG41_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG41_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG41_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG41_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG41_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG41_CTRL3    0x0c7e
+#define    RTL8367C_SVLAN_MEMBERCFG41_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG41_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG41_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG41_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG41_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG41_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG42_CTRL1    0x0c7f
+#define    RTL8367C_SVLAN_MEMBERCFG42_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG42_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG42_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG42_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG42_CTRL2    0x0c80
+#define    RTL8367C_SVLAN_MEMBERCFG42_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG42_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG42_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG42_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG42_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG42_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG42_CTRL3    0x0c81
+#define    RTL8367C_SVLAN_MEMBERCFG42_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG42_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG42_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG42_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG42_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG42_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG43_CTRL1    0x0c82
+#define    RTL8367C_SVLAN_MEMBERCFG43_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG43_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG43_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG43_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG43_CTRL2    0x0c83
+#define    RTL8367C_SVLAN_MEMBERCFG43_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG43_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG43_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG43_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG43_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG43_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG43_CTRL3    0x0c84
+#define    RTL8367C_SVLAN_MEMBERCFG43_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG43_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG43_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG43_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG43_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG43_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG44_CTRL1    0x0c85
+#define    RTL8367C_SVLAN_MEMBERCFG44_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG44_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG44_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG44_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG44_CTRL2    0x0c86
+#define    RTL8367C_SVLAN_MEMBERCFG44_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG44_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG44_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG44_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG44_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG44_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG44_CTRL3    0x0c87
+#define    RTL8367C_SVLAN_MEMBERCFG44_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG44_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG44_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG44_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG44_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG44_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG45_CTRL1    0x0c88
+#define    RTL8367C_SVLAN_MEMBERCFG45_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG45_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG45_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG45_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG45_CTRL2    0x0c89
+#define    RTL8367C_SVLAN_MEMBERCFG45_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG45_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG45_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG45_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG45_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG45_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG45_CTRL3    0x0c8a
+#define    RTL8367C_SVLAN_MEMBERCFG45_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG45_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG45_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG45_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG45_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG45_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG46_CTRL1    0x0c8b
+#define    RTL8367C_SVLAN_MEMBERCFG46_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG46_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG46_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG46_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG46_CTRL2    0x0c8c
+#define    RTL8367C_SVLAN_MEMBERCFG46_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG46_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG46_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG46_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG46_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG46_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG46_CTRL3    0x0c8d
+#define    RTL8367C_SVLAN_MEMBERCFG46_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG46_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG46_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG46_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG46_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG46_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG47_CTRL1    0x0c8e
+#define    RTL8367C_SVLAN_MEMBERCFG47_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG47_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG47_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG47_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG47_CTRL2    0x0c8f
+#define    RTL8367C_SVLAN_MEMBERCFG47_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG47_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG47_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG47_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG47_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG47_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG47_CTRL3    0x0c90
+#define    RTL8367C_SVLAN_MEMBERCFG47_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG47_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG47_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG47_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG47_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG47_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG48_CTRL1    0x0c91
+#define    RTL8367C_SVLAN_MEMBERCFG48_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG48_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG48_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG48_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG48_CTRL2    0x0c92
+#define    RTL8367C_SVLAN_MEMBERCFG48_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG48_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG48_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG48_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG48_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG48_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG48_CTRL3    0x0c93
+#define    RTL8367C_SVLAN_MEMBERCFG48_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG48_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG48_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG48_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG48_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG48_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG49_CTRL1    0x0c94
+#define    RTL8367C_SVLAN_MEMBERCFG49_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG49_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG49_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG49_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG49_CTRL2    0x0c95
+#define    RTL8367C_SVLAN_MEMBERCFG49_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG49_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG49_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG49_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG49_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG49_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG49_CTRL3    0x0c96
+#define    RTL8367C_SVLAN_MEMBERCFG49_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG49_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG49_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG49_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG49_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG49_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG50_CTRL1    0x0c97
+#define    RTL8367C_SVLAN_MEMBERCFG50_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG50_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG50_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG50_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG50_CTRL2    0x0c98
+#define    RTL8367C_SVLAN_MEMBERCFG50_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG50_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG50_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG50_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG50_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG50_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG50_CTRL3    0x0c99
+#define    RTL8367C_SVLAN_MEMBERCFG50_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG50_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG50_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG50_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG50_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG50_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG51_CTRL1    0x0c9a
+#define    RTL8367C_SVLAN_MEMBERCFG51_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG51_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG51_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG51_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG51_CTRL2    0x0c9b
+#define    RTL8367C_SVLAN_MEMBERCFG51_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG51_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG51_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG51_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG51_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG51_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG51_CTRL3    0x0c9c
+#define    RTL8367C_SVLAN_MEMBERCFG51_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG51_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG51_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG51_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG51_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG51_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG52_CTRL1    0x0c9d
+#define    RTL8367C_SVLAN_MEMBERCFG52_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG52_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG52_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG52_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG52_CTRL2    0x0c9e
+#define    RTL8367C_SVLAN_MEMBERCFG52_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG52_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG52_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG52_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG52_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG52_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG52_CTRL3    0x0c9f
+#define    RTL8367C_SVLAN_MEMBERCFG52_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG52_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG52_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG52_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG52_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG52_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG53_CTRL1    0x0ca0
+#define    RTL8367C_SVLAN_MEMBERCFG53_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG53_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG53_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG53_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG53_CTRL2    0x0ca1
+#define    RTL8367C_SVLAN_MEMBERCFG53_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG53_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG53_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG53_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG53_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG53_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG53_CTRL3    0x0ca2
+#define    RTL8367C_SVLAN_MEMBERCFG53_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG53_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG53_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG53_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG53_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG53_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG54_CTRL1    0x0ca3
+#define    RTL8367C_SVLAN_MEMBERCFG54_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG54_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG54_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG54_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG54_CTRL2    0x0ca4
+#define    RTL8367C_SVLAN_MEMBERCFG54_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG54_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG54_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG54_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG54_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG54_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG54_CTRL3    0x0ca5
+#define    RTL8367C_SVLAN_MEMBERCFG54_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG54_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG54_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG54_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG54_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG54_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG55_CTRL1    0x0ca6
+#define    RTL8367C_SVLAN_MEMBERCFG55_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG55_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG55_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG55_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG55_CTRL2    0x0ca7
+#define    RTL8367C_SVLAN_MEMBERCFG55_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG55_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG55_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG55_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG55_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG55_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG55_CTRL3    0x0ca8
+#define    RTL8367C_SVLAN_MEMBERCFG55_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG55_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG55_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG55_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG55_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG55_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG56_CTRL1    0x0ca9
+#define    RTL8367C_SVLAN_MEMBERCFG56_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG56_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG56_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG56_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG56_CTRL2    0x0caa
+#define    RTL8367C_SVLAN_MEMBERCFG56_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG56_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG56_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG56_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG56_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG56_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG56_CTRL3    0x0cab
+#define    RTL8367C_SVLAN_MEMBERCFG56_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG56_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG56_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG56_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG56_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG56_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG57_CTRL1    0x0cac
+#define    RTL8367C_SVLAN_MEMBERCFG57_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG57_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG57_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG57_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG57_CTRL2    0x0cad
+#define    RTL8367C_SVLAN_MEMBERCFG57_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG57_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG57_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG57_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG57_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG57_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG57_CTRL3    0x0cae
+#define    RTL8367C_SVLAN_MEMBERCFG57_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG57_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG57_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG57_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG57_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG57_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG58_CTRL1    0x0caf
+#define    RTL8367C_SVLAN_MEMBERCFG58_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG58_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG58_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG58_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG58_CTRL2    0x0cb0
+#define    RTL8367C_SVLAN_MEMBERCFG58_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG58_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG58_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG58_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG58_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG58_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG58_CTRL3    0x0cb1
+#define    RTL8367C_SVLAN_MEMBERCFG58_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG58_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG58_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG58_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG58_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG58_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG59_CTRL1    0x0cb2
+#define    RTL8367C_SVLAN_MEMBERCFG59_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG59_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG59_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG59_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG59_CTRL2    0x0cb3
+#define    RTL8367C_SVLAN_MEMBERCFG59_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG59_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG59_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG59_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG59_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG59_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG59_CTRL3    0x0cb4
+#define    RTL8367C_SVLAN_MEMBERCFG59_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG59_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG59_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG59_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG59_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG59_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG60_CTRL1    0x0cb5
+#define    RTL8367C_SVLAN_MEMBERCFG60_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG60_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG60_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG60_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG60_CTRL2    0x0cb6
+#define    RTL8367C_SVLAN_MEMBERCFG60_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG60_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG60_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG60_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG60_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG60_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG60_CTRL3    0x0cb7
+#define    RTL8367C_SVLAN_MEMBERCFG60_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG60_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG60_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG60_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG60_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG60_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG61_CTRL1    0x0cb8
+#define    RTL8367C_SVLAN_MEMBERCFG61_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG61_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG61_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG61_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG61_CTRL2    0x0cb9
+#define    RTL8367C_SVLAN_MEMBERCFG61_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG61_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG61_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG61_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG61_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG61_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG61_CTRL3    0x0cba
+#define    RTL8367C_SVLAN_MEMBERCFG61_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG61_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG61_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG61_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG61_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG61_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG62_CTRL1    0x0cbb
+#define    RTL8367C_SVLAN_MEMBERCFG62_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG62_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG62_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG62_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG62_CTRL2    0x0cbc
+#define    RTL8367C_SVLAN_MEMBERCFG62_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG62_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG62_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG62_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG62_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG62_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG62_CTRL3    0x0cbd
+#define    RTL8367C_SVLAN_MEMBERCFG62_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG62_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG62_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG62_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG62_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG62_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG63_CTRL1    0x0cbe
+#define    RTL8367C_SVLAN_MEMBERCFG63_CTRL1_VS_UNTAGSET_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG63_CTRL1_VS_UNTAGSET_MASK    0xFF00
+#define    RTL8367C_SVLAN_MEMBERCFG63_CTRL1_VS_SMBR_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG63_CTRL1_VS_SMBR_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG63_CTRL2    0x0cbf
+#define    RTL8367C_SVLAN_MEMBERCFG63_CTRL2_VS_FIDEN_OFFSET    7
+#define    RTL8367C_SVLAN_MEMBERCFG63_CTRL2_VS_FIDEN_MASK    0x80
+#define    RTL8367C_SVLAN_MEMBERCFG63_CTRL2_VS_SPRI_OFFSET    4
+#define    RTL8367C_SVLAN_MEMBERCFG63_CTRL2_VS_SPRI_MASK    0x70
+#define    RTL8367C_SVLAN_MEMBERCFG63_CTRL2_VS_FID_MSTI_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG63_CTRL2_VS_FID_MSTI_MASK    0xF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG63_CTRL3    0x0cc0
+#define    RTL8367C_SVLAN_MEMBERCFG63_CTRL3_VS_EFID_OFFSET    13
+#define    RTL8367C_SVLAN_MEMBERCFG63_CTRL3_VS_EFID_MASK    0xE000
+#define    RTL8367C_SVLAN_MEMBERCFG63_CTRL3_VS_EFIDEN_OFFSET    12
+#define    RTL8367C_SVLAN_MEMBERCFG63_CTRL3_VS_EFIDEN_MASK    0x1000
+#define    RTL8367C_SVLAN_MEMBERCFG63_CTRL3_VS_SVID_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG63_CTRL3_VS_SVID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG0_CTRL4    0x0cc1
+#define    RTL8367C_SVLAN_MEMBERCFG0_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG0_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG0_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG0_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG1_CTRL4    0x0cc2
+#define    RTL8367C_SVLAN_MEMBERCFG1_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG1_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG1_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG1_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG2_CTRL4    0x0cc3
+#define    RTL8367C_SVLAN_MEMBERCFG2_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG2_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG2_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG2_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG3_CTRL4    0x0cc4
+#define    RTL8367C_SVLAN_MEMBERCFG3_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG3_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG3_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG3_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG4_CTRL4    0x0cc5
+#define    RTL8367C_SVLAN_MEMBERCFG4_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG4_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG4_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG4_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG5_CTRL4    0x0cc6
+#define    RTL8367C_SVLAN_MEMBERCFG5_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG5_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG5_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG5_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG6_CTRL4    0x0cc7
+#define    RTL8367C_SVLAN_MEMBERCFG6_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG6_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG6_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG6_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG7_CTRL4    0x0cc8
+#define    RTL8367C_SVLAN_MEMBERCFG7_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG7_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG7_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG7_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG8_CTRL4    0x0cc9
+#define    RTL8367C_SVLAN_MEMBERCFG8_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG8_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG8_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG8_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG9_CTRL4    0x0cca
+#define    RTL8367C_SVLAN_MEMBERCFG9_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG9_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG9_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG9_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG10_CTRL4    0x0ccb
+#define    RTL8367C_SVLAN_MEMBERCFG10_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG10_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG10_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG10_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG11_CTRL4    0x0ccc
+#define    RTL8367C_SVLAN_MEMBERCFG11_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG11_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG11_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG11_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG12_CTRL4    0x0ccd
+#define    RTL8367C_SVLAN_MEMBERCFG12_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG12_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG12_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG12_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG13_CTRL4    0x0cce
+#define    RTL8367C_SVLAN_MEMBERCFG13_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG13_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG13_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG13_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG14_CTRL4    0x0ccf
+#define    RTL8367C_SVLAN_MEMBERCFG14_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG14_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG14_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG14_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG15_CTRL4    0x0cd0
+#define    RTL8367C_SVLAN_MEMBERCFG15_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG15_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG15_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG15_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG16_CTRL4    0x0cd1
+#define    RTL8367C_SVLAN_MEMBERCFG16_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG16_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG16_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG16_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG17_CTRL4    0x0cd2
+#define    RTL8367C_SVLAN_MEMBERCFG17_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG17_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG17_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG17_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG18_CTRL4    0x0cd3
+#define    RTL8367C_SVLAN_MEMBERCFG18_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG18_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG18_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG18_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG19_CTRL4    0x0cd4
+#define    RTL8367C_SVLAN_MEMBERCFG19_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG19_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG19_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG19_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG20_CTRL4    0x0cd5
+#define    RTL8367C_SVLAN_MEMBERCFG20_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG20_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG20_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG20_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG21_CTRL4    0x0cd6
+#define    RTL8367C_SVLAN_MEMBERCFG21_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG21_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG21_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG21_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG22_CTRL4    0x0cd7
+#define    RTL8367C_SVLAN_MEMBERCFG22_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG22_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG22_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG22_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG23_CTRL4    0x0cd8
+#define    RTL8367C_SVLAN_MEMBERCFG23_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG23_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG23_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG23_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG24_CTRL4    0x0cd9
+#define    RTL8367C_SVLAN_MEMBERCFG24_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG24_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG24_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG24_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG25_CTRL4    0x0cda
+#define    RTL8367C_SVLAN_MEMBERCFG25_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG25_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG25_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG25_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG26_CTRL4    0x0cdb
+#define    RTL8367C_SVLAN_MEMBERCFG26_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG26_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG26_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG26_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG27_CTRL4    0x0cdc
+#define    RTL8367C_SVLAN_MEMBERCFG27_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG27_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG27_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG27_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG28_CTRL4    0x0cdd
+#define    RTL8367C_SVLAN_MEMBERCFG28_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG28_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG28_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG28_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG29_CTRL4    0x0cde
+#define    RTL8367C_SVLAN_MEMBERCFG29_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG29_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG29_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG29_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG30_CTRL4    0x0cdf
+#define    RTL8367C_SVLAN_MEMBERCFG30_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG30_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG30_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG30_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG31_CTRL4    0x0ce0
+#define    RTL8367C_SVLAN_MEMBERCFG31_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG31_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG31_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG31_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG32_CTRL4    0x0ce1
+#define    RTL8367C_SVLAN_MEMBERCFG32_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG32_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG32_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG32_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG33_CTRL4    0x0ce2
+#define    RTL8367C_SVLAN_MEMBERCFG33_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG33_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG33_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG33_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG34_CTRL4    0x0ce3
+#define    RTL8367C_SVLAN_MEMBERCFG34_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG34_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG34_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG34_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG35_CTRL4    0x0ce4
+#define    RTL8367C_SVLAN_MEMBERCFG35_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG35_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG35_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG35_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG36_CTRL4    0x0ce5
+#define    RTL8367C_SVLAN_MEMBERCFG36_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG36_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG36_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG36_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG37_CTRL4    0x0ce6
+#define    RTL8367C_SVLAN_MEMBERCFG37_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG37_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG37_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG37_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG38_CTRL4    0x0ce7
+#define    RTL8367C_SVLAN_MEMBERCFG38_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG38_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG38_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG38_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG39_CTRL4    0x0ce8
+#define    RTL8367C_SVLAN_MEMBERCFG39_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG39_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG39_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG39_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG40_CTRL4    0x0ce9
+#define    RTL8367C_SVLAN_MEMBERCFG40_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG40_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG40_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG40_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG41_CTRL4    0x0cea
+#define    RTL8367C_SVLAN_MEMBERCFG41_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG41_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG41_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG41_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG42_CTRL4    0x0ceb
+#define    RTL8367C_SVLAN_MEMBERCFG42_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG42_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG42_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG42_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG43_CTRL4    0x0cec
+#define    RTL8367C_SVLAN_MEMBERCFG43_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG43_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG43_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG43_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG44_CTRL4    0x0ced
+#define    RTL8367C_SVLAN_MEMBERCFG44_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG44_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG44_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG44_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG45_CTRL4    0x0cee
+#define    RTL8367C_SVLAN_MEMBERCFG45_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG45_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG45_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG45_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG46_CTRL4    0x0cef
+#define    RTL8367C_SVLAN_MEMBERCFG46_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG46_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG46_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG46_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG47_CTRL4    0x0cf0
+#define    RTL8367C_SVLAN_MEMBERCFG47_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG47_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG47_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG47_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG48_CTRL4    0x0cf1
+#define    RTL8367C_SVLAN_MEMBERCFG48_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG48_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG48_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG48_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG49_CTRL4    0x0cf2
+#define    RTL8367C_SVLAN_MEMBERCFG49_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG49_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG49_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG49_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG50_CTRL4    0x0cf3
+#define    RTL8367C_SVLAN_MEMBERCFG50_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG50_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG50_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG50_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG51_CTRL4    0x0cf4
+#define    RTL8367C_SVLAN_MEMBERCFG51_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG51_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG51_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG51_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG52_CTRL4    0x0cf5
+#define    RTL8367C_SVLAN_MEMBERCFG52_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG52_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG52_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG52_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG53_CTRL4    0x0cf6
+#define    RTL8367C_SVLAN_MEMBERCFG53_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG53_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG53_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG53_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG54_CTRL4    0x0cf7
+#define    RTL8367C_SVLAN_MEMBERCFG54_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG54_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG54_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG54_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG55_CTRL4    0x0cf8
+#define    RTL8367C_SVLAN_MEMBERCFG55_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG55_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG55_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG55_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG56_CTRL4    0x0cf9
+#define    RTL8367C_SVLAN_MEMBERCFG56_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG56_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG56_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG56_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG57_CTRL4    0x0cfa
+#define    RTL8367C_SVLAN_MEMBERCFG57_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG57_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG57_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG57_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG58_CTRL4    0x0cfb
+#define    RTL8367C_SVLAN_MEMBERCFG58_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG58_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG58_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG58_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG59_CTRL4    0x0cfc
+#define    RTL8367C_SVLAN_MEMBERCFG59_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG59_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG59_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG59_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG60_CTRL4    0x0cfd
+#define    RTL8367C_SVLAN_MEMBERCFG60_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG60_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG60_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG60_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG61_CTRL4    0x0cfe
+#define    RTL8367C_SVLAN_MEMBERCFG61_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG61_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG61_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG61_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG62_CTRL4    0x0cff
+#define    RTL8367C_SVLAN_MEMBERCFG62_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG62_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG62_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG62_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_C2SCFG0_CTRL0    0x0d00
+#define    RTL8367C_SVLAN_C2SCFG0_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG0_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG0_CTRL1    0x0d01
+#define    RTL8367C_SVLAN_C2SCFG0_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG0_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG0_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG0_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG0_CTRL2    0x0d02
+#define    RTL8367C_SVLAN_C2SCFG0_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG0_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG1_CTRL0    0x0d03
+#define    RTL8367C_SVLAN_C2SCFG1_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG1_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG1_CTRL1    0x0d04
+#define    RTL8367C_SVLAN_C2SCFG1_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG1_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG1_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG1_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG1_CTRL2    0x0d05
+#define    RTL8367C_SVLAN_C2SCFG1_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG1_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG2_CTRL0    0x0d06
+#define    RTL8367C_SVLAN_C2SCFG2_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG2_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG2_CTRL1    0x0d07
+#define    RTL8367C_SVLAN_C2SCFG2_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG2_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG2_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG2_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG2_CTRL2    0x0d08
+#define    RTL8367C_SVLAN_C2SCFG2_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG2_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG3_CTRL0    0x0d09
+#define    RTL8367C_SVLAN_C2SCFG3_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG3_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG3_CTRL1    0x0d0a
+#define    RTL8367C_SVLAN_C2SCFG3_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG3_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG3_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG3_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG3_CTRL2    0x0d0b
+#define    RTL8367C_SVLAN_C2SCFG3_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG3_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG4_CTRL0    0x0d0c
+#define    RTL8367C_SVLAN_C2SCFG4_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG4_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG4_CTRL1    0x0d0d
+#define    RTL8367C_SVLAN_C2SCFG4_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG4_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG4_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG4_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG4_CTRL2    0x0d0e
+#define    RTL8367C_SVLAN_C2SCFG4_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG4_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG5_CTRL0    0x0d0f
+#define    RTL8367C_SVLAN_C2SCFG5_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG5_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG5_CTRL1    0x0d10
+#define    RTL8367C_SVLAN_C2SCFG5_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG5_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG5_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG5_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG5_CTRL2    0x0d11
+#define    RTL8367C_SVLAN_C2SCFG5_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG5_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG6_CTRL0    0x0d12
+#define    RTL8367C_SVLAN_C2SCFG6_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG6_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG6_CTRL1    0x0d13
+#define    RTL8367C_SVLAN_C2SCFG6_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG6_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG6_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG6_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG6_CTRL2    0x0d14
+#define    RTL8367C_SVLAN_C2SCFG6_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG6_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG7_CTRL0    0x0d15
+#define    RTL8367C_SVLAN_C2SCFG7_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG7_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG7_CTRL1    0x0d16
+#define    RTL8367C_SVLAN_C2SCFG7_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG7_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG7_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG7_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG7_CTRL2    0x0d17
+#define    RTL8367C_SVLAN_C2SCFG7_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG7_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG8_CTRL0    0x0d18
+#define    RTL8367C_SVLAN_C2SCFG8_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG8_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG8_CTRL1    0x0d19
+#define    RTL8367C_SVLAN_C2SCFG8_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG8_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG8_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG8_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG8_CTRL2    0x0d1a
+#define    RTL8367C_SVLAN_C2SCFG8_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG8_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG9_CTRL0    0x0d1b
+#define    RTL8367C_SVLAN_C2SCFG9_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG9_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG9_CTRL1    0x0d1c
+#define    RTL8367C_SVLAN_C2SCFG9_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG9_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG9_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG9_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG9_CTRL2    0x0d1d
+#define    RTL8367C_SVLAN_C2SCFG9_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG9_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG10_CTRL0    0x0d1e
+#define    RTL8367C_SVLAN_C2SCFG10_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG10_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG10_CTRL1    0x0d1f
+#define    RTL8367C_SVLAN_C2SCFG10_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG10_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG10_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG10_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG10_CTRL2    0x0d20
+#define    RTL8367C_SVLAN_C2SCFG10_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG10_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG11_CTRL0    0x0d21
+#define    RTL8367C_SVLAN_C2SCFG11_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG11_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG11_CTRL1    0x0d22
+#define    RTL8367C_SVLAN_C2SCFG11_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG11_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG11_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG11_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG11_CTRL2    0x0d23
+#define    RTL8367C_SVLAN_C2SCFG11_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG11_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG12_CTRL0    0x0d24
+#define    RTL8367C_SVLAN_C2SCFG12_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG12_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG12_CTRL1    0x0d25
+#define    RTL8367C_SVLAN_C2SCFG12_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG12_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG12_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG12_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG12_CTRL2    0x0d26
+#define    RTL8367C_SVLAN_C2SCFG12_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG12_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG13_CTRL0    0x0d27
+#define    RTL8367C_SVLAN_C2SCFG13_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG13_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG13_CTRL1    0x0d28
+#define    RTL8367C_SVLAN_C2SCFG13_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG13_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG13_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG13_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG13_CTRL2    0x0d29
+#define    RTL8367C_SVLAN_C2SCFG13_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG13_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG14_CTRL0    0x0d2a
+#define    RTL8367C_SVLAN_C2SCFG14_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG14_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG14_CTRL1    0x0d2b
+#define    RTL8367C_SVLAN_C2SCFG14_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG14_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG14_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG14_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG14_CTRL2    0x0d2c
+#define    RTL8367C_SVLAN_C2SCFG14_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG14_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG15_CTRL0    0x0d2d
+#define    RTL8367C_SVLAN_C2SCFG15_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG15_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG15_CTRL1    0x0d2e
+#define    RTL8367C_SVLAN_C2SCFG15_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG15_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG15_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG15_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG15_CTRL2    0x0d2f
+#define    RTL8367C_SVLAN_C2SCFG15_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG15_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG16_CTRL0    0x0d30
+#define    RTL8367C_SVLAN_C2SCFG16_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG16_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG16_CTRL1    0x0d31
+#define    RTL8367C_SVLAN_C2SCFG16_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG16_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG16_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG16_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG16_CTRL2    0x0d32
+#define    RTL8367C_SVLAN_C2SCFG16_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG16_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG17_CTRL0    0x0d33
+#define    RTL8367C_SVLAN_C2SCFG17_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG17_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG17_CTRL1    0x0d34
+#define    RTL8367C_SVLAN_C2SCFG17_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG17_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG17_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG17_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG17_CTRL2    0x0d35
+#define    RTL8367C_SVLAN_C2SCFG17_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG17_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG18_CTRL0    0x0d36
+#define    RTL8367C_SVLAN_C2SCFG18_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG18_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG18_CTRL1    0x0d37
+#define    RTL8367C_SVLAN_C2SCFG18_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG18_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG18_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG18_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG18_CTRL2    0x0d38
+#define    RTL8367C_SVLAN_C2SCFG18_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG18_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG19_CTRL0    0x0d39
+#define    RTL8367C_SVLAN_C2SCFG19_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG19_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG19_CTRL1    0x0d3a
+#define    RTL8367C_SVLAN_C2SCFG19_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG19_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG19_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG19_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG19_CTRL2    0x0d3b
+#define    RTL8367C_SVLAN_C2SCFG19_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG19_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG20_CTRL0    0x0d3c
+#define    RTL8367C_SVLAN_C2SCFG20_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG20_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG20_CTRL1    0x0d3d
+#define    RTL8367C_SVLAN_C2SCFG20_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG20_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG20_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG20_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG20_CTRL2    0x0d3e
+#define    RTL8367C_SVLAN_C2SCFG20_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG20_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG21_CTRL0    0x0d3f
+#define    RTL8367C_SVLAN_C2SCFG21_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG21_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG21_CTRL1    0x0d40
+#define    RTL8367C_SVLAN_C2SCFG21_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG21_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG21_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG21_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG21_CTRL2    0x0d41
+#define    RTL8367C_SVLAN_C2SCFG21_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG21_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG22_CTRL0    0x0d42
+#define    RTL8367C_SVLAN_C2SCFG22_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG22_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG22_CTRL1    0x0d43
+#define    RTL8367C_SVLAN_C2SCFG22_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG22_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG22_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG22_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG22_CTRL2    0x0d44
+#define    RTL8367C_SVLAN_C2SCFG22_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG22_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG23_CTRL0    0x0d45
+#define    RTL8367C_SVLAN_C2SCFG23_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG23_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG23_CTRL1    0x0d46
+#define    RTL8367C_SVLAN_C2SCFG23_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG23_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG23_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG23_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG23_CTRL2    0x0d47
+#define    RTL8367C_SVLAN_C2SCFG23_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG23_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG24_CTRL0    0x0d48
+#define    RTL8367C_SVLAN_C2SCFG24_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG24_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG24_CTRL1    0x0d49
+#define    RTL8367C_SVLAN_C2SCFG24_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG24_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG24_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG24_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG24_CTRL2    0x0d4a
+#define    RTL8367C_SVLAN_C2SCFG24_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG24_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG25_CTRL0    0x0d4b
+#define    RTL8367C_SVLAN_C2SCFG25_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG25_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG25_CTRL1    0x0d4c
+#define    RTL8367C_SVLAN_C2SCFG25_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG25_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG25_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG25_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG25_CTRL2    0x0d4d
+#define    RTL8367C_SVLAN_C2SCFG25_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG25_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG26_CTRL0    0x0d4e
+#define    RTL8367C_SVLAN_C2SCFG26_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG26_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG26_CTRL1    0x0d4f
+#define    RTL8367C_SVLAN_C2SCFG26_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG26_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG26_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG26_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG26_CTRL2    0x0d50
+#define    RTL8367C_SVLAN_C2SCFG26_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG26_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG27_CTRL0    0x0d51
+#define    RTL8367C_SVLAN_C2SCFG27_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG27_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG27_CTRL1    0x0d52
+#define    RTL8367C_SVLAN_C2SCFG27_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG27_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG27_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG27_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG27_CTRL2    0x0d53
+#define    RTL8367C_SVLAN_C2SCFG27_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG27_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG28_CTRL0    0x0d54
+#define    RTL8367C_SVLAN_C2SCFG28_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG28_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG28_CTRL1    0x0d55
+#define    RTL8367C_SVLAN_C2SCFG28_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG28_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG28_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG28_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG28_CTRL2    0x0d56
+#define    RTL8367C_SVLAN_C2SCFG28_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG28_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG29_CTRL0    0x0d57
+#define    RTL8367C_SVLAN_C2SCFG29_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG29_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG29_CTRL1    0x0d58
+#define    RTL8367C_SVLAN_C2SCFG29_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG29_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG29_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG29_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG29_CTRL2    0x0d59
+#define    RTL8367C_SVLAN_C2SCFG29_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG29_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG30_CTRL0    0x0d5a
+#define    RTL8367C_SVLAN_C2SCFG30_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG30_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG30_CTRL1    0x0d5b
+#define    RTL8367C_SVLAN_C2SCFG30_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG30_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG30_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG30_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG30_CTRL2    0x0d5c
+#define    RTL8367C_SVLAN_C2SCFG30_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG30_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG31_CTRL0    0x0d5d
+#define    RTL8367C_SVLAN_C2SCFG31_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG31_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG31_CTRL1    0x0d5e
+#define    RTL8367C_SVLAN_C2SCFG31_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG31_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG31_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG31_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG31_CTRL2    0x0d5f
+#define    RTL8367C_SVLAN_C2SCFG31_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG31_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG32_CTRL0    0x0d60
+#define    RTL8367C_SVLAN_C2SCFG32_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG32_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG32_CTRL1    0x0d61
+#define    RTL8367C_SVLAN_C2SCFG32_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG32_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG32_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG32_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG32_CTRL2    0x0d62
+#define    RTL8367C_SVLAN_C2SCFG32_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG32_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG33_CTRL0    0x0d63
+#define    RTL8367C_SVLAN_C2SCFG33_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG33_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG33_CTRL1    0x0d64
+#define    RTL8367C_SVLAN_C2SCFG33_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG33_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG33_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG33_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG33_CTRL2    0x0d65
+#define    RTL8367C_SVLAN_C2SCFG33_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG33_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG34_CTRL0    0x0d66
+#define    RTL8367C_SVLAN_C2SCFG34_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG34_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG34_CTRL1    0x0d67
+#define    RTL8367C_SVLAN_C2SCFG34_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG34_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG34_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG34_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG34_CTRL2    0x0d68
+#define    RTL8367C_SVLAN_C2SCFG34_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG34_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG35_CTRL0    0x0d69
+#define    RTL8367C_SVLAN_C2SCFG35_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG35_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG35_CTRL1    0x0d6a
+#define    RTL8367C_SVLAN_C2SCFG35_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG35_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG35_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG35_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG35_CTRL2    0x0d6b
+#define    RTL8367C_SVLAN_C2SCFG35_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG35_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG36_CTRL0    0x0d6c
+#define    RTL8367C_SVLAN_C2SCFG36_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG36_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG36_CTRL1    0x0d6d
+#define    RTL8367C_SVLAN_C2SCFG36_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG36_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG36_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG36_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG36_CTRL2    0x0d6e
+#define    RTL8367C_SVLAN_C2SCFG36_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG36_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG37_CTRL0    0x0d6f
+#define    RTL8367C_SVLAN_C2SCFG37_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG37_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG37_CTRL1    0x0d70
+#define    RTL8367C_SVLAN_C2SCFG37_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG37_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG37_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG37_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG37_CTRL2    0x0d71
+#define    RTL8367C_SVLAN_C2SCFG37_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG37_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG38_CTRL0    0x0d72
+#define    RTL8367C_SVLAN_C2SCFG38_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG38_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG38_CTRL1    0x0d73
+#define    RTL8367C_SVLAN_C2SCFG38_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG38_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG38_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG38_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG38_CTRL2    0x0d74
+#define    RTL8367C_SVLAN_C2SCFG38_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG38_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG39_CTRL0    0x0d75
+#define    RTL8367C_SVLAN_C2SCFG39_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG39_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG39_CTRL1    0x0d76
+#define    RTL8367C_SVLAN_C2SCFG39_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG39_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG39_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG39_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG39_CTRL2    0x0d77
+#define    RTL8367C_SVLAN_C2SCFG39_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG39_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG40_CTRL0    0x0d78
+#define    RTL8367C_SVLAN_C2SCFG40_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG40_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG40_CTRL1    0x0d79
+#define    RTL8367C_SVLAN_C2SCFG40_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG40_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG40_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG40_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG40_CTRL2    0x0d7a
+#define    RTL8367C_SVLAN_C2SCFG40_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG40_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG41_CTRL0    0x0d7b
+#define    RTL8367C_SVLAN_C2SCFG41_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG41_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG41_CTRL1    0x0d7c
+#define    RTL8367C_SVLAN_C2SCFG41_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG41_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG41_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG41_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG41_CTRL2    0x0d7d
+#define    RTL8367C_SVLAN_C2SCFG41_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG41_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG42_CTRL0    0x0d7e
+#define    RTL8367C_SVLAN_C2SCFG42_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG42_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG42_CTRL1    0x0d7f
+#define    RTL8367C_SVLAN_C2SCFG42_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG42_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG42_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG42_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG42_CTRL2    0x0d80
+#define    RTL8367C_SVLAN_C2SCFG42_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG42_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG43_CTRL0    0x0d81
+#define    RTL8367C_SVLAN_C2SCFG43_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG43_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG43_CTRL1    0x0d82
+#define    RTL8367C_SVLAN_C2SCFG43_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG43_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG43_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG43_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG43_CTRL2    0x0d83
+#define    RTL8367C_SVLAN_C2SCFG43_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG43_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG44_CTRL0    0x0d84
+#define    RTL8367C_SVLAN_C2SCFG44_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG44_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG44_CTRL1    0x0d85
+#define    RTL8367C_SVLAN_C2SCFG44_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG44_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG44_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG44_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG44_CTRL2    0x0d86
+#define    RTL8367C_SVLAN_C2SCFG44_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG44_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG45_CTRL0    0x0d87
+#define    RTL8367C_SVLAN_C2SCFG45_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG45_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG45_CTRL1    0x0d88
+#define    RTL8367C_SVLAN_C2SCFG45_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG45_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG45_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG45_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG45_CTRL2    0x0d89
+#define    RTL8367C_SVLAN_C2SCFG45_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG45_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG46_CTRL0    0x0d8a
+#define    RTL8367C_SVLAN_C2SCFG46_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG46_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG46_CTRL1    0x0d8b
+#define    RTL8367C_SVLAN_C2SCFG46_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG46_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG46_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG46_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG46_CTRL2    0x0d8c
+#define    RTL8367C_SVLAN_C2SCFG46_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG46_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG47_CTRL0    0x0d8d
+#define    RTL8367C_SVLAN_C2SCFG47_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG47_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG47_CTRL1    0x0d8e
+#define    RTL8367C_SVLAN_C2SCFG47_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG47_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG47_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG47_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG47_CTRL2    0x0d8f
+#define    RTL8367C_SVLAN_C2SCFG47_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG47_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG48_CTRL0    0x0d90
+#define    RTL8367C_SVLAN_C2SCFG48_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG48_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG48_CTRL1    0x0d91
+#define    RTL8367C_SVLAN_C2SCFG48_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG48_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG48_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG48_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG48_CTRL2    0x0d92
+#define    RTL8367C_SVLAN_C2SCFG48_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG48_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG49_CTRL0    0x0d93
+#define    RTL8367C_SVLAN_C2SCFG49_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG49_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG49_CTRL1    0x0d94
+#define    RTL8367C_SVLAN_C2SCFG49_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG49_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG49_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG49_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG49_CTRL2    0x0d95
+#define    RTL8367C_SVLAN_C2SCFG49_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG49_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG50_CTRL0    0x0d96
+#define    RTL8367C_SVLAN_C2SCFG50_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG50_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG50_CTRL1    0x0d97
+#define    RTL8367C_SVLAN_C2SCFG50_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG50_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG50_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG50_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG50_CTRL2    0x0d98
+#define    RTL8367C_SVLAN_C2SCFG50_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG50_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG51_CTRL0    0x0d99
+#define    RTL8367C_SVLAN_C2SCFG51_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG51_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG51_CTRL1    0x0d9a
+#define    RTL8367C_SVLAN_C2SCFG51_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG51_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG51_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG51_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG51_CTRL2    0x0d9b
+#define    RTL8367C_SVLAN_C2SCFG51_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG51_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG52_CTRL0    0x0d9c
+#define    RTL8367C_SVLAN_C2SCFG52_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG52_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG52_CTRL1    0x0d9d
+#define    RTL8367C_SVLAN_C2SCFG52_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG52_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG52_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG52_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG52_CTRL2    0x0d9e
+#define    RTL8367C_SVLAN_C2SCFG52_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG52_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG53_CTRL0    0x0d9f
+#define    RTL8367C_SVLAN_C2SCFG53_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG53_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG53_CTRL1    0x0da0
+#define    RTL8367C_SVLAN_C2SCFG53_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG53_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG53_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG53_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG53_CTRL2    0x0da1
+#define    RTL8367C_SVLAN_C2SCFG53_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG53_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG54_CTRL0    0x0da2
+#define    RTL8367C_SVLAN_C2SCFG54_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG54_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG54_CTRL1    0x0da3
+#define    RTL8367C_SVLAN_C2SCFG54_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG54_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG54_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG54_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG54_CTRL2    0x0da4
+#define    RTL8367C_SVLAN_C2SCFG54_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG54_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG55_CTRL0    0x0da5
+#define    RTL8367C_SVLAN_C2SCFG55_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG55_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG55_CTRL1    0x0da6
+#define    RTL8367C_SVLAN_C2SCFG55_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG55_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG55_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG55_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG55_CTRL2    0x0da7
+#define    RTL8367C_SVLAN_C2SCFG55_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG55_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG56_CTRL0    0x0da8
+#define    RTL8367C_SVLAN_C2SCFG56_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG56_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG56_CTRL1    0x0da9
+#define    RTL8367C_SVLAN_C2SCFG56_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG56_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG56_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG56_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG56_CTRL2    0x0daa
+#define    RTL8367C_SVLAN_C2SCFG56_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG56_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG57_CTRL0    0x0dab
+#define    RTL8367C_SVLAN_C2SCFG57_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG57_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG57_CTRL1    0x0dac
+#define    RTL8367C_SVLAN_C2SCFG57_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG57_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG57_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG57_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG57_CTRL2    0x0dad
+#define    RTL8367C_SVLAN_C2SCFG57_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG57_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG58_CTRL0    0x0dae
+#define    RTL8367C_SVLAN_C2SCFG58_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG58_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG58_CTRL1    0x0daf
+#define    RTL8367C_SVLAN_C2SCFG58_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG58_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG58_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG58_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG58_CTRL2    0x0db0
+#define    RTL8367C_SVLAN_C2SCFG58_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG58_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG59_CTRL0    0x0db1
+#define    RTL8367C_SVLAN_C2SCFG59_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG59_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG59_CTRL1    0x0db2
+#define    RTL8367C_SVLAN_C2SCFG59_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG59_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG59_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG59_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG59_CTRL2    0x0db3
+#define    RTL8367C_SVLAN_C2SCFG59_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG59_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG60_CTRL0    0x0db4
+#define    RTL8367C_SVLAN_C2SCFG60_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG60_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG60_CTRL1    0x0db5
+#define    RTL8367C_SVLAN_C2SCFG60_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG60_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG60_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG60_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG60_CTRL2    0x0db6
+#define    RTL8367C_SVLAN_C2SCFG60_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG60_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG61_CTRL0    0x0db7
+#define    RTL8367C_SVLAN_C2SCFG61_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG61_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG61_CTRL1    0x0db8
+#define    RTL8367C_SVLAN_C2SCFG61_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG61_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG61_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG61_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG61_CTRL2    0x0db9
+#define    RTL8367C_SVLAN_C2SCFG61_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG61_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG62_CTRL0    0x0dba
+#define    RTL8367C_SVLAN_C2SCFG62_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG62_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG62_CTRL1    0x0dbb
+#define    RTL8367C_SVLAN_C2SCFG62_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG62_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG62_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG62_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG62_CTRL2    0x0dbc
+#define    RTL8367C_SVLAN_C2SCFG62_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG62_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG63_CTRL0    0x0dbd
+#define    RTL8367C_SVLAN_C2SCFG63_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG63_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG63_CTRL1    0x0dbe
+#define    RTL8367C_SVLAN_C2SCFG63_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG63_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG63_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG63_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG63_CTRL2    0x0dbf
+#define    RTL8367C_SVLAN_C2SCFG63_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG63_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG64_CTRL0    0x0dc0
+#define    RTL8367C_SVLAN_C2SCFG64_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG64_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG64_CTRL1    0x0dc1
+#define    RTL8367C_SVLAN_C2SCFG64_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG64_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG64_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG64_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG64_CTRL2    0x0dc2
+#define    RTL8367C_SVLAN_C2SCFG64_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG64_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG65_CTRL0    0x0dc3
+#define    RTL8367C_SVLAN_C2SCFG65_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG65_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG65_CTRL1    0x0dc4
+#define    RTL8367C_SVLAN_C2SCFG65_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG65_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG65_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG65_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG65_CTRL2    0x0dc5
+#define    RTL8367C_SVLAN_C2SCFG65_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG65_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG66_CTRL0    0x0dc6
+#define    RTL8367C_SVLAN_C2SCFG66_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG66_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG66_CTRL1    0x0dc7
+#define    RTL8367C_SVLAN_C2SCFG66_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG66_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG66_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG66_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG66_CTRL2    0x0dc8
+#define    RTL8367C_SVLAN_C2SCFG66_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG66_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG67_CTRL0    0x0dc9
+#define    RTL8367C_SVLAN_C2SCFG67_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG67_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG67_CTRL1    0x0dca
+#define    RTL8367C_SVLAN_C2SCFG67_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG67_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG67_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG67_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG67_CTRL2    0x0dcb
+#define    RTL8367C_SVLAN_C2SCFG67_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG67_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG68_CTRL0    0x0dcc
+#define    RTL8367C_SVLAN_C2SCFG68_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG68_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG68_CTRL1    0x0dcd
+#define    RTL8367C_SVLAN_C2SCFG68_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG68_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG68_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG68_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG68_CTRL2    0x0dce
+#define    RTL8367C_SVLAN_C2SCFG68_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG68_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG69_CTRL0    0x0dcf
+#define    RTL8367C_SVLAN_C2SCFG69_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG69_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG69_CTRL1    0x0dd0
+#define    RTL8367C_SVLAN_C2SCFG69_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG69_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG69_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG69_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG69_CTRL2    0x0dd1
+#define    RTL8367C_SVLAN_C2SCFG69_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG69_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG70_CTRL0    0x0dd2
+#define    RTL8367C_SVLAN_C2SCFG70_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG70_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG70_CTRL1    0x0dd3
+#define    RTL8367C_SVLAN_C2SCFG70_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG70_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG70_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG70_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG70_CTRL2    0x0dd4
+#define    RTL8367C_SVLAN_C2SCFG70_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG70_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG71_CTRL0    0x0dd5
+#define    RTL8367C_SVLAN_C2SCFG71_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG71_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG71_CTRL1    0x0dd6
+#define    RTL8367C_SVLAN_C2SCFG71_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG71_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG71_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG71_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG71_CTRL2    0x0dd7
+#define    RTL8367C_SVLAN_C2SCFG71_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG71_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG72_CTRL0    0x0dd8
+#define    RTL8367C_SVLAN_C2SCFG72_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG72_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG72_CTRL1    0x0dd9
+#define    RTL8367C_SVLAN_C2SCFG72_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG72_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG72_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG72_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG72_CTRL2    0x0dda
+#define    RTL8367C_SVLAN_C2SCFG72_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG72_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG73_CTRL0    0x0ddb
+#define    RTL8367C_SVLAN_C2SCFG73_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG73_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG73_CTRL1    0x0ddc
+#define    RTL8367C_SVLAN_C2SCFG73_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG73_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG73_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG73_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG73_CTRL2    0x0ddd
+#define    RTL8367C_SVLAN_C2SCFG73_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG73_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG74_CTRL0    0x0dde
+#define    RTL8367C_SVLAN_C2SCFG74_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG74_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG74_CTRL1    0x0ddf
+#define    RTL8367C_SVLAN_C2SCFG74_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG74_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG74_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG74_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG74_CTRL2    0x0de0
+#define    RTL8367C_SVLAN_C2SCFG74_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG74_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG75_CTRL0    0x0de1
+#define    RTL8367C_SVLAN_C2SCFG75_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG75_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG75_CTRL1    0x0de2
+#define    RTL8367C_SVLAN_C2SCFG75_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG75_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG75_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG75_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG75_CTRL2    0x0de3
+#define    RTL8367C_SVLAN_C2SCFG75_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG75_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG76_CTRL0    0x0de4
+#define    RTL8367C_SVLAN_C2SCFG76_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG76_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG76_CTRL1    0x0de5
+#define    RTL8367C_SVLAN_C2SCFG76_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG76_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG76_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG76_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG76_CTRL2    0x0de6
+#define    RTL8367C_SVLAN_C2SCFG76_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG76_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG77_CTRL0    0x0de7
+#define    RTL8367C_SVLAN_C2SCFG77_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG77_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG77_CTRL1    0x0de8
+#define    RTL8367C_SVLAN_C2SCFG77_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG77_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG77_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG77_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG77_CTRL2    0x0de9
+#define    RTL8367C_SVLAN_C2SCFG77_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG77_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG78_CTRL0    0x0dea
+#define    RTL8367C_SVLAN_C2SCFG78_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG78_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG78_CTRL1    0x0deb
+#define    RTL8367C_SVLAN_C2SCFG78_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG78_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG78_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG78_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG78_CTRL2    0x0dec
+#define    RTL8367C_SVLAN_C2SCFG78_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG78_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG79_CTRL0    0x0ded
+#define    RTL8367C_SVLAN_C2SCFG79_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG79_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG79_CTRL1    0x0dee
+#define    RTL8367C_SVLAN_C2SCFG79_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG79_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG79_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG79_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG79_CTRL2    0x0def
+#define    RTL8367C_SVLAN_C2SCFG79_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG79_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG80_CTRL0    0x0df0
+#define    RTL8367C_SVLAN_C2SCFG80_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG80_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG80_CTRL1    0x0df1
+#define    RTL8367C_SVLAN_C2SCFG80_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG80_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG80_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG80_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG80_CTRL2    0x0df2
+#define    RTL8367C_SVLAN_C2SCFG80_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG80_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG81_CTRL0    0x0df3
+#define    RTL8367C_SVLAN_C2SCFG81_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG81_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG81_CTRL1    0x0df4
+#define    RTL8367C_SVLAN_C2SCFG81_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG81_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG81_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG81_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG81_CTRL2    0x0df5
+#define    RTL8367C_SVLAN_C2SCFG81_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG81_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG82_CTRL0    0x0df6
+#define    RTL8367C_SVLAN_C2SCFG82_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG82_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG82_CTRL1    0x0df7
+#define    RTL8367C_SVLAN_C2SCFG82_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG82_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG82_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG82_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG82_CTRL2    0x0df8
+#define    RTL8367C_SVLAN_C2SCFG82_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG82_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG83_CTRL0    0x0df9
+#define    RTL8367C_SVLAN_C2SCFG83_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG83_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG83_CTRL1    0x0dfa
+#define    RTL8367C_SVLAN_C2SCFG83_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG83_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG83_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG83_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG83_CTRL2    0x0dfb
+#define    RTL8367C_SVLAN_C2SCFG83_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG83_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG84_CTRL0    0x0dfc
+#define    RTL8367C_SVLAN_C2SCFG84_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG84_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG84_CTRL1    0x0dfd
+#define    RTL8367C_SVLAN_C2SCFG84_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG84_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG84_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG84_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG84_CTRL2    0x0dfe
+#define    RTL8367C_SVLAN_C2SCFG84_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG84_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG85_CTRL0    0x0dff
+#define    RTL8367C_SVLAN_C2SCFG85_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG85_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG85_CTRL1    0x0e00
+#define    RTL8367C_SVLAN_C2SCFG85_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG85_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG85_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG85_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG85_CTRL2    0x0e01
+#define    RTL8367C_SVLAN_C2SCFG85_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG85_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG86_CTRL0    0x0e02
+#define    RTL8367C_SVLAN_C2SCFG86_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG86_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG86_CTRL1    0x0e03
+#define    RTL8367C_SVLAN_C2SCFG86_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG86_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG86_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG86_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG86_CTRL2    0x0e04
+#define    RTL8367C_SVLAN_C2SCFG86_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG86_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG87_CTRL0    0x0e05
+#define    RTL8367C_SVLAN_C2SCFG87_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG87_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG87_CTRL1    0x0e06
+#define    RTL8367C_SVLAN_C2SCFG87_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG87_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG87_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG87_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG87_CTRL2    0x0e07
+#define    RTL8367C_SVLAN_C2SCFG87_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG87_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG88_CTRL0    0x0e08
+#define    RTL8367C_SVLAN_C2SCFG88_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG88_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG88_CTRL1    0x0e09
+#define    RTL8367C_SVLAN_C2SCFG88_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG88_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG88_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG88_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG88_CTRL2    0x0e0a
+#define    RTL8367C_SVLAN_C2SCFG88_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG88_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG89_CTRL0    0x0e0b
+#define    RTL8367C_SVLAN_C2SCFG89_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG89_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG89_CTRL1    0x0e0c
+#define    RTL8367C_SVLAN_C2SCFG89_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG89_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG89_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG89_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG89_CTRL2    0x0e0d
+#define    RTL8367C_SVLAN_C2SCFG89_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG89_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG90_CTRL0    0x0e0e
+#define    RTL8367C_SVLAN_C2SCFG90_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG90_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG90_CTRL1    0x0e0f
+#define    RTL8367C_SVLAN_C2SCFG90_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG90_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG90_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG90_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG90_CTRL2    0x0e10
+#define    RTL8367C_SVLAN_C2SCFG90_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG90_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG91_CTRL0    0x0e11
+#define    RTL8367C_SVLAN_C2SCFG91_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG91_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG91_CTRL1    0x0e12
+#define    RTL8367C_SVLAN_C2SCFG91_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG91_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG91_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG91_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG91_CTRL2    0x0e13
+#define    RTL8367C_SVLAN_C2SCFG91_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG91_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG92_CTRL0    0x0e14
+#define    RTL8367C_SVLAN_C2SCFG92_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG92_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG92_CTRL1    0x0e15
+#define    RTL8367C_SVLAN_C2SCFG92_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG92_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG92_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG92_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG92_CTRL2    0x0e16
+#define    RTL8367C_SVLAN_C2SCFG92_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG92_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG93_CTRL0    0x0e17
+#define    RTL8367C_SVLAN_C2SCFG93_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG93_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG93_CTRL1    0x0e18
+#define    RTL8367C_SVLAN_C2SCFG93_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG93_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG93_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG93_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG93_CTRL2    0x0e19
+#define    RTL8367C_SVLAN_C2SCFG93_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG93_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG94_CTRL0    0x0e1a
+#define    RTL8367C_SVLAN_C2SCFG94_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG94_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG94_CTRL1    0x0e1b
+#define    RTL8367C_SVLAN_C2SCFG94_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG94_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG94_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG94_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG94_CTRL2    0x0e1c
+#define    RTL8367C_SVLAN_C2SCFG94_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG94_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG95_CTRL0    0x0e1d
+#define    RTL8367C_SVLAN_C2SCFG95_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG95_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG95_CTRL1    0x0e1e
+#define    RTL8367C_SVLAN_C2SCFG95_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG95_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG95_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG95_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG95_CTRL2    0x0e1f
+#define    RTL8367C_SVLAN_C2SCFG95_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG95_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG96_CTRL0    0x0e20
+#define    RTL8367C_SVLAN_C2SCFG96_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG96_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG96_CTRL1    0x0e21
+#define    RTL8367C_SVLAN_C2SCFG96_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG96_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG96_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG96_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG96_CTRL2    0x0e22
+#define    RTL8367C_SVLAN_C2SCFG96_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG96_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG97_CTRL0    0x0e23
+#define    RTL8367C_SVLAN_C2SCFG97_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG97_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG97_CTRL1    0x0e24
+#define    RTL8367C_SVLAN_C2SCFG97_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG97_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG97_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG97_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG97_CTRL2    0x0e25
+#define    RTL8367C_SVLAN_C2SCFG97_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG97_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG98_CTRL0    0x0e26
+#define    RTL8367C_SVLAN_C2SCFG98_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG98_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG98_CTRL1    0x0e27
+#define    RTL8367C_SVLAN_C2SCFG98_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG98_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG98_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG98_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG98_CTRL2    0x0e28
+#define    RTL8367C_SVLAN_C2SCFG98_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG98_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG99_CTRL0    0x0e29
+#define    RTL8367C_SVLAN_C2SCFG99_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG99_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG99_CTRL1    0x0e2a
+#define    RTL8367C_SVLAN_C2SCFG99_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG99_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG99_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG99_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG99_CTRL2    0x0e2b
+#define    RTL8367C_SVLAN_C2SCFG99_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG99_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG100_CTRL0    0x0e2c
+#define    RTL8367C_SVLAN_C2SCFG100_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG100_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG100_CTRL1    0x0e2d
+#define    RTL8367C_SVLAN_C2SCFG100_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG100_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG100_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG100_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG100_CTRL2    0x0e2e
+#define    RTL8367C_SVLAN_C2SCFG100_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG100_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG101_CTRL0    0x0e2f
+#define    RTL8367C_SVLAN_C2SCFG101_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG101_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG101_CTRL1    0x0e30
+#define    RTL8367C_SVLAN_C2SCFG101_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG101_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG101_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG101_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG101_CTRL2    0x0e31
+#define    RTL8367C_SVLAN_C2SCFG101_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG101_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG102_CTRL0    0x0e32
+#define    RTL8367C_SVLAN_C2SCFG102_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG102_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG102_CTRL1    0x0e33
+#define    RTL8367C_SVLAN_C2SCFG102_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG102_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG102_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG102_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG102_CTRL2    0x0e34
+#define    RTL8367C_SVLAN_C2SCFG102_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG102_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG103_CTRL0    0x0e35
+#define    RTL8367C_SVLAN_C2SCFG103_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG103_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG103_CTRL1    0x0e36
+#define    RTL8367C_SVLAN_C2SCFG103_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG103_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG103_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG103_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG103_CTRL2    0x0e37
+#define    RTL8367C_SVLAN_C2SCFG103_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG103_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG104_CTRL0    0x0e38
+#define    RTL8367C_SVLAN_C2SCFG104_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG104_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG104_CTRL1    0x0e39
+#define    RTL8367C_SVLAN_C2SCFG104_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG104_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG104_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG104_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG104_CTRL2    0x0e3a
+#define    RTL8367C_SVLAN_C2SCFG104_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG104_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG105_CTRL0    0x0e3b
+#define    RTL8367C_SVLAN_C2SCFG105_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG105_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG105_CTRL1    0x0e3c
+#define    RTL8367C_SVLAN_C2SCFG105_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG105_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG105_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG105_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG105_CTRL2    0x0e3d
+#define    RTL8367C_SVLAN_C2SCFG105_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG105_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG106_CTRL0    0x0e3e
+#define    RTL8367C_SVLAN_C2SCFG106_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG106_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG106_CTRL1    0x0e3f
+#define    RTL8367C_SVLAN_C2SCFG106_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG106_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG106_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG106_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG106_CTRL2    0x0e40
+#define    RTL8367C_SVLAN_C2SCFG106_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG106_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG107_CTRL0    0x0e41
+#define    RTL8367C_SVLAN_C2SCFG107_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG107_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG107_CTRL1    0x0e42
+#define    RTL8367C_SVLAN_C2SCFG107_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG107_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG107_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG107_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG107_CTRL2    0x0e43
+#define    RTL8367C_SVLAN_C2SCFG107_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG107_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG108_CTRL0    0x0e44
+#define    RTL8367C_SVLAN_C2SCFG108_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG108_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG108_CTRL1    0x0e45
+#define    RTL8367C_SVLAN_C2SCFG108_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG108_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG108_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG108_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG108_CTRL2    0x0e46
+#define    RTL8367C_SVLAN_C2SCFG108_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG108_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG109_CTRL0    0x0e47
+#define    RTL8367C_SVLAN_C2SCFG109_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG109_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG109_CTRL1    0x0e48
+#define    RTL8367C_SVLAN_C2SCFG109_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG109_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG109_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG109_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG109_CTRL2    0x0e49
+#define    RTL8367C_SVLAN_C2SCFG109_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG109_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG110_CTRL0    0x0e4a
+#define    RTL8367C_SVLAN_C2SCFG110_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG110_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG110_CTRL1    0x0e4b
+#define    RTL8367C_SVLAN_C2SCFG110_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG110_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG110_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG110_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG110_CTRL2    0x0e4c
+#define    RTL8367C_SVLAN_C2SCFG110_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG110_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG111_CTRL0    0x0e4d
+#define    RTL8367C_SVLAN_C2SCFG111_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG111_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG111_CTRL1    0x0e4e
+#define    RTL8367C_SVLAN_C2SCFG111_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG111_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG111_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG111_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG111_CTRL2    0x0e4f
+#define    RTL8367C_SVLAN_C2SCFG111_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG111_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG112_CTRL0    0x0e50
+#define    RTL8367C_SVLAN_C2SCFG112_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG112_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG112_CTRL1    0x0e51
+#define    RTL8367C_SVLAN_C2SCFG112_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG112_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG112_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG112_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG112_CTRL2    0x0e52
+#define    RTL8367C_SVLAN_C2SCFG112_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG112_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG113_CTRL0    0x0e53
+#define    RTL8367C_SVLAN_C2SCFG113_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG113_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG113_CTRL1    0x0e54
+#define    RTL8367C_SVLAN_C2SCFG113_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG113_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG113_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG113_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG113_CTRL2    0x0e55
+#define    RTL8367C_SVLAN_C2SCFG113_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG113_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG114_CTRL0    0x0e56
+#define    RTL8367C_SVLAN_C2SCFG114_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG114_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG114_CTRL1    0x0e57
+#define    RTL8367C_SVLAN_C2SCFG114_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG114_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG114_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG114_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG114_CTRL2    0x0e58
+#define    RTL8367C_SVLAN_C2SCFG114_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG114_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG115_CTRL0    0x0e59
+#define    RTL8367C_SVLAN_C2SCFG115_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG115_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG115_CTRL1    0x0e5a
+#define    RTL8367C_SVLAN_C2SCFG115_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG115_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG115_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG115_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG115_CTRL2    0x0e5b
+#define    RTL8367C_SVLAN_C2SCFG115_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG115_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG116_CTRL0    0x0e5c
+#define    RTL8367C_SVLAN_C2SCFG116_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG116_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG116_CTRL1    0x0e5d
+#define    RTL8367C_SVLAN_C2SCFG116_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG116_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG116_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG116_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG116_CTRL2    0x0e5e
+#define    RTL8367C_SVLAN_C2SCFG116_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG116_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG117_CTRL0    0x0e5f
+#define    RTL8367C_SVLAN_C2SCFG117_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG117_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG117_CTRL1    0x0e60
+#define    RTL8367C_SVLAN_C2SCFG117_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG117_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG117_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG117_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG117_CTRL2    0x0e61
+#define    RTL8367C_SVLAN_C2SCFG117_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG117_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG118_CTRL0    0x0e62
+#define    RTL8367C_SVLAN_C2SCFG118_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG118_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG118_CTRL1    0x0e63
+#define    RTL8367C_SVLAN_C2SCFG118_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG118_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG118_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG118_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG118_CTRL2    0x0e64
+#define    RTL8367C_SVLAN_C2SCFG118_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG118_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG119_CTRL0    0x0e65
+#define    RTL8367C_SVLAN_C2SCFG119_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG119_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG119_CTRL1    0x0e66
+#define    RTL8367C_SVLAN_C2SCFG119_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG119_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG119_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG119_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG119_CTRL2    0x0e67
+#define    RTL8367C_SVLAN_C2SCFG119_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG119_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG120_CTRL0    0x0e68
+#define    RTL8367C_SVLAN_C2SCFG120_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG120_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG120_CTRL1    0x0e69
+#define    RTL8367C_SVLAN_C2SCFG120_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG120_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG120_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG120_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG120_CTRL2    0x0e6a
+#define    RTL8367C_SVLAN_C2SCFG120_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG120_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG121_CTRL0    0x0e6b
+#define    RTL8367C_SVLAN_C2SCFG121_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG121_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG121_CTRL1    0x0e6c
+#define    RTL8367C_SVLAN_C2SCFG121_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG121_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG121_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG121_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG121_CTRL2    0x0e6d
+#define    RTL8367C_SVLAN_C2SCFG121_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG121_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG122_CTRL0    0x0e6e
+#define    RTL8367C_SVLAN_C2SCFG122_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG122_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG122_CTRL1    0x0e6f
+#define    RTL8367C_SVLAN_C2SCFG122_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG122_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG122_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG122_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG122_CTRL2    0x0e70
+#define    RTL8367C_SVLAN_C2SCFG122_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG122_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG123_CTRL0    0x0e71
+#define    RTL8367C_SVLAN_C2SCFG123_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG123_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG123_CTRL1    0x0e72
+#define    RTL8367C_SVLAN_C2SCFG123_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG123_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG123_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG123_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG123_CTRL2    0x0e73
+#define    RTL8367C_SVLAN_C2SCFG123_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG123_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG124_CTRL0    0x0e74
+#define    RTL8367C_SVLAN_C2SCFG124_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG124_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG124_CTRL1    0x0e75
+#define    RTL8367C_SVLAN_C2SCFG124_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG124_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG124_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG124_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG124_CTRL2    0x0e76
+#define    RTL8367C_SVLAN_C2SCFG124_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG124_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG125_CTRL0    0x0e77
+#define    RTL8367C_SVLAN_C2SCFG125_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG125_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG125_CTRL1    0x0e78
+#define    RTL8367C_SVLAN_C2SCFG125_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG125_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG125_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG125_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG125_CTRL2    0x0e79
+#define    RTL8367C_SVLAN_C2SCFG125_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG125_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG126_CTRL0    0x0e7a
+#define    RTL8367C_SVLAN_C2SCFG126_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG126_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG126_CTRL1    0x0e7b
+#define    RTL8367C_SVLAN_C2SCFG126_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG126_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG126_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG126_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG126_CTRL2    0x0e7c
+#define    RTL8367C_SVLAN_C2SCFG126_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG126_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG127_CTRL0    0x0e7d
+#define    RTL8367C_SVLAN_C2SCFG127_CTRL0_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG127_CTRL0_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_C2SCFG127_CTRL1    0x0e7e
+#define    RTL8367C_SVLAN_C2SCFG127_CTRL1_C2SENPMSK_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_C2SCFG127_CTRL1_C2SENPMSK_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_C2SCFG127_CTRL1_C2SENPMSK_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG127_CTRL1_C2SENPMSK_MASK    0xFF
+
+#define    RTL8367C_REG_SVLAN_C2SCFG127_CTRL2    0x0e7f
+#define    RTL8367C_SVLAN_C2SCFG127_CTRL2_OFFSET    0
+#define    RTL8367C_SVLAN_C2SCFG127_CTRL2_MASK    0x1FFF
+
+#define    RTL8367C_REG_SVLAN_CFG    0x0e80
+#define    RTL8367C_VS_PORT7_DMACVIDSEL_OFFSET    14
+#define    RTL8367C_VS_PORT7_DMACVIDSEL_MASK    0x4000
+#define    RTL8367C_VS_PORT6_DMACVIDSEL_OFFSET    13
+#define    RTL8367C_VS_PORT6_DMACVIDSEL_MASK    0x2000
+#define    RTL8367C_VS_PORT5_DMACVIDSEL_OFFSET    12
+#define    RTL8367C_VS_PORT5_DMACVIDSEL_MASK    0x1000
+#define    RTL8367C_VS_PORT4_DMACVIDSEL_OFFSET    11
+#define    RTL8367C_VS_PORT4_DMACVIDSEL_MASK    0x800
+#define    RTL8367C_VS_PORT3_DMACVIDSEL_OFFSET    10
+#define    RTL8367C_VS_PORT3_DMACVIDSEL_MASK    0x400
+#define    RTL8367C_VS_PORT2_DMACVIDSEL_OFFSET    9
+#define    RTL8367C_VS_PORT2_DMACVIDSEL_MASK    0x200
+#define    RTL8367C_VS_PORT1_DMACVIDSEL_OFFSET    8
+#define    RTL8367C_VS_PORT1_DMACVIDSEL_MASK    0x100
+#define    RTL8367C_VS_PORT0_DMACVIDSEL_OFFSET    7
+#define    RTL8367C_VS_PORT0_DMACVIDSEL_MASK    0x80
+#define    RTL8367C_VS_UIFSEG_OFFSET    6
+#define    RTL8367C_VS_UIFSEG_MASK    0x40
+#define    RTL8367C_VS_UNMAT_OFFSET    4
+#define    RTL8367C_VS_UNMAT_MASK    0x30
+#define    RTL8367C_VS_UNTAG_OFFSET    2
+#define    RTL8367C_VS_UNTAG_MASK    0xC
+#define    RTL8367C_VS_SPRISEL_OFFSET    0
+#define    RTL8367C_VS_SPRISEL_MASK    0x3
+
+#define    RTL8367C_REG_SVLAN_PORTBASED_SVIDX_CTRL0    0x0e81
+#define    RTL8367C_VS_PORT1_SVIDX_OFFSET    8
+#define    RTL8367C_VS_PORT1_SVIDX_MASK    0x3F00
+#define    RTL8367C_VS_PORT0_SVIDX_OFFSET    0
+#define    RTL8367C_VS_PORT0_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_PORTBASED_SVIDX_CTRL1    0x0e82
+#define    RTL8367C_VS_PORT3_SVIDX_OFFSET    8
+#define    RTL8367C_VS_PORT3_SVIDX_MASK    0x3F00
+#define    RTL8367C_VS_PORT2_SVIDX_OFFSET    0
+#define    RTL8367C_VS_PORT2_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_PORTBASED_SVIDX_CTRL2    0x0e83
+#define    RTL8367C_VS_PORT5_SVIDX_OFFSET    8
+#define    RTL8367C_VS_PORT5_SVIDX_MASK    0x3F00
+#define    RTL8367C_VS_PORT4_SVIDX_OFFSET    0
+#define    RTL8367C_VS_PORT4_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_PORTBASED_SVIDX_CTRL3    0x0e84
+#define    RTL8367C_VS_PORT7_SVIDX_OFFSET    8
+#define    RTL8367C_VS_PORT7_SVIDX_MASK    0x3F00
+#define    RTL8367C_VS_PORT6_SVIDX_OFFSET    0
+#define    RTL8367C_VS_PORT6_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_UNTAG_UNMAT_CFG    0x0e85
+#define    RTL8367C_VS_UNTAG_SVIDX_OFFSET    8
+#define    RTL8367C_VS_UNTAG_SVIDX_MASK    0x3F00
+#define    RTL8367C_VS_UNMAT_SVIDX_OFFSET    0
+#define    RTL8367C_VS_UNMAT_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_LOOKUP_TYPE    0x0e86
+#define    RTL8367C_SVLAN_LOOKUP_TYPE_OFFSET    0
+#define    RTL8367C_SVLAN_LOOKUP_TYPE_MASK    0x1
+
+#define    RTL8367C_REG_IPMC_GROUP_VALID_15_0    0x0e87
+
+#define    RTL8367C_REG_IPMC_GROUP_VALID_31_16    0x0e88
+
+#define    RTL8367C_REG_IPMC_GROUP_VALID_47_32    0x0e89
+
+#define    RTL8367C_REG_IPMC_GROUP_VALID_63_48    0x0e8a
+
+#define    RTL8367C_REG_SVLAN_PORTBASED_SVIDX_CTRL4    0x0e8b
+#define    RTL8367C_VS_PORT9_SVIDX_OFFSET    8
+#define    RTL8367C_VS_PORT9_SVIDX_MASK    0x3F00
+#define    RTL8367C_VS_PORT8_SVIDX_OFFSET    0
+#define    RTL8367C_VS_PORT8_SVIDX_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_PORTBASED_SVIDX_CTRL5    0x0e8c
+#define    RTL8367C_SVLAN_PORTBASED_SVIDX_CTRL5_OFFSET    0
+#define    RTL8367C_SVLAN_PORTBASED_SVIDX_CTRL5_MASK    0x3F
+
+#define    RTL8367C_REG_SVLAN_CFG_EXT    0x0e8d
+#define    RTL8367C_VS_PORT10_DMACVIDSEL_OFFSET    2
+#define    RTL8367C_VS_PORT10_DMACVIDSEL_MASK    0x4
+#define    RTL8367C_VS_PORT9_DMACVIDSEL_OFFSET    1
+#define    RTL8367C_VS_PORT9_DMACVIDSEL_MASK    0x2
+#define    RTL8367C_VS_PORT8_DMACVIDSEL_OFFSET    0
+#define    RTL8367C_VS_PORT8_DMACVIDSEL_MASK    0x1
+
+#define    RTL8367C_REG_SVLAN_MEMBERCFG63_CTRL4    0x0e8e
+#define    RTL8367C_SVLAN_MEMBERCFG63_CTRL4_VS_UNTAGSET_EXT_OFFSET    8
+#define    RTL8367C_SVLAN_MEMBERCFG63_CTRL4_VS_UNTAGSET_EXT_MASK    0x700
+#define    RTL8367C_SVLAN_MEMBERCFG63_CTRL4_VS_SMBR_EXT_OFFSET    0
+#define    RTL8367C_SVLAN_MEMBERCFG63_CTRL4_VS_SMBR_EXT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_DUMMY_0    0x0e90
+
+#define    RTL8367C_REG_SVLAN_DUMMY_1    0x0e91
+
+#define    RTL8367C_REG_SVLAN_DUMMY_2    0x0e92
+
+#define    RTL8367C_REG_SVLAN_DUMMY_3    0x0e93
+
+#define    RTL8367C_REG_SVLAN_DUMMY_4    0x0e94
+
+#define    RTL8367C_REG_SVLAN_DUMMY_5    0x0e95
+
+#define    RTL8367C_REG_SVLAN_DUMMY_6    0x0e96
+
+#define    RTL8367C_REG_SVLAN_DUMMY_7    0x0e97
+
+#define    RTL8367C_REG_SVLAN_DUMMY_8    0x0e98
+
+#define    RTL8367C_REG_SVLAN_DUMMY_9    0x0e99
+
+#define    RTL8367C_REG_SVLAN_DUMMY_10    0x0e9a
+
+#define    RTL8367C_REG_SVLAN_DUMMY_11    0x0e9b
+
+#define    RTL8367C_REG_SVLAN_DUMMY_12    0x0e9c
+
+#define    RTL8367C_REG_SVLAN_DUMMY_13    0x0e9d
+
+#define    RTL8367C_REG_SVLAN_DUMMY_14    0x0e9e
+
+#define    RTL8367C_REG_SVLAN_DUMMY_15    0x0e9f
+
+#define    RTL8367C_REG_SVLAN_DUMMY_16    0x0ea0
+
+#define    RTL8367C_REG_SVLAN_DUMMY_17    0x0ea1
+
+#define    RTL8367C_REG_SVLAN_DUMMY_18    0x0ea2
+
+#define    RTL8367C_REG_SVLAN_DUMMY_19    0x0ea3
+
+#define    RTL8367C_REG_SVLAN_DUMMY_20    0x0ea4
+
+#define    RTL8367C_REG_SVLAN_DUMMY_21    0x0ea5
+
+#define    RTL8367C_REG_SVLAN_DUMMY_22    0x0ea6
+
+#define    RTL8367C_REG_SVLAN_DUMMY_23    0x0ea7
+
+#define    RTL8367C_REG_SVLAN_DUMMY_24    0x0ea8
+
+#define    RTL8367C_REG_SVLAN_DUMMY_25    0x0ea9
+
+#define    RTL8367C_REG_SVLAN_DUMMY_26    0x0eaa
+
+#define    RTL8367C_REG_SVLAN_DUMMY_27    0x0eab
+
+#define    RTL8367C_REG_SVLAN_DUMMY_28    0x0eac
+
+#define    RTL8367C_REG_SVLAN_DUMMY_29    0x0ead
+
+#define    RTL8367C_REG_SVLAN_DUMMY_30    0x0eae
+
+#define    RTL8367C_REG_SVLAN_DUMMY_31    0x0eaf
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_00    0x0eb0
+#define    RTL8367C_IPMC_GROUP_VID_00_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_00_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_01    0x0eb1
+#define    RTL8367C_IPMC_GROUP_VID_01_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_01_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_02    0x0eb2
+#define    RTL8367C_IPMC_GROUP_VID_02_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_02_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_03    0x0eb3
+#define    RTL8367C_IPMC_GROUP_VID_03_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_03_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_04    0x0eb4
+#define    RTL8367C_IPMC_GROUP_VID_04_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_04_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_05    0x0eb5
+#define    RTL8367C_IPMC_GROUP_VID_05_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_05_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_06    0x0eb6
+#define    RTL8367C_IPMC_GROUP_VID_06_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_06_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_07    0x0eb7
+#define    RTL8367C_IPMC_GROUP_VID_07_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_07_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_08    0x0eb8
+#define    RTL8367C_IPMC_GROUP_VID_08_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_08_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_09    0x0eb9
+#define    RTL8367C_IPMC_GROUP_VID_09_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_09_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_10    0x0eba
+#define    RTL8367C_IPMC_GROUP_VID_10_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_10_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_11    0x0ebb
+#define    RTL8367C_IPMC_GROUP_VID_11_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_11_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_12    0x0ebc
+#define    RTL8367C_IPMC_GROUP_VID_12_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_12_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_13    0x0ebd
+#define    RTL8367C_IPMC_GROUP_VID_13_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_13_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_14    0x0ebe
+#define    RTL8367C_IPMC_GROUP_VID_14_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_14_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_15    0x0ebf
+#define    RTL8367C_IPMC_GROUP_VID_15_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_15_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_16    0x0ec0
+#define    RTL8367C_IPMC_GROUP_VID_16_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_16_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_17    0x0ec1
+#define    RTL8367C_IPMC_GROUP_VID_17_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_17_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_18    0x0ec2
+#define    RTL8367C_IPMC_GROUP_VID_18_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_18_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_19    0x0ec3
+#define    RTL8367C_IPMC_GROUP_VID_19_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_19_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_20    0x0ec4
+#define    RTL8367C_IPMC_GROUP_VID_20_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_20_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_21    0x0ec5
+#define    RTL8367C_IPMC_GROUP_VID_21_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_21_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_22    0x0ec6
+#define    RTL8367C_IPMC_GROUP_VID_22_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_22_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_23    0x0ec7
+#define    RTL8367C_IPMC_GROUP_VID_23_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_23_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_24    0x0ec8
+#define    RTL8367C_IPMC_GROUP_VID_24_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_24_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_25    0x0ec9
+#define    RTL8367C_IPMC_GROUP_VID_25_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_25_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_26    0x0eca
+#define    RTL8367C_IPMC_GROUP_VID_26_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_26_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_27    0x0ecb
+#define    RTL8367C_IPMC_GROUP_VID_27_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_27_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_28    0x0ecc
+#define    RTL8367C_IPMC_GROUP_VID_28_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_28_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_29    0x0ecd
+#define    RTL8367C_IPMC_GROUP_VID_29_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_29_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_30    0x0ece
+#define    RTL8367C_IPMC_GROUP_VID_30_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_30_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_31    0x0ecf
+#define    RTL8367C_IPMC_GROUP_VID_31_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_31_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_32    0x0ed0
+#define    RTL8367C_IPMC_GROUP_VID_32_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_32_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_33    0x0ed1
+#define    RTL8367C_IPMC_GROUP_VID_33_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_33_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_34    0x0ed2
+#define    RTL8367C_IPMC_GROUP_VID_34_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_34_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_35    0x0ed3
+#define    RTL8367C_IPMC_GROUP_VID_35_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_35_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_36    0x0ed4
+#define    RTL8367C_IPMC_GROUP_VID_36_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_36_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_37    0x0ed5
+#define    RTL8367C_IPMC_GROUP_VID_37_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_37_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_38    0x0ed6
+#define    RTL8367C_IPMC_GROUP_VID_38_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_38_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_39    0x0ed7
+#define    RTL8367C_IPMC_GROUP_VID_39_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_39_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_40    0x0ed8
+#define    RTL8367C_IPMC_GROUP_VID_40_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_40_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_41    0x0ed9
+#define    RTL8367C_IPMC_GROUP_VID_41_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_41_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_42    0x0eda
+#define    RTL8367C_IPMC_GROUP_VID_42_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_42_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_43    0x0edb
+#define    RTL8367C_IPMC_GROUP_VID_43_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_43_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_44    0x0edc
+#define    RTL8367C_IPMC_GROUP_VID_44_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_44_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_45    0x0edd
+#define    RTL8367C_IPMC_GROUP_VID_45_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_45_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_46    0x0ede
+#define    RTL8367C_IPMC_GROUP_VID_46_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_46_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_47    0x0edf
+#define    RTL8367C_IPMC_GROUP_VID_47_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_47_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_48    0x0ef0
+#define    RTL8367C_IPMC_GROUP_VID_48_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_48_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_49    0x0ef1
+#define    RTL8367C_IPMC_GROUP_VID_49_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_49_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_50    0x0ef2
+#define    RTL8367C_IPMC_GROUP_VID_50_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_50_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_51    0x0ef3
+#define    RTL8367C_IPMC_GROUP_VID_51_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_51_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_52    0x0ef4
+#define    RTL8367C_IPMC_GROUP_VID_52_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_52_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_53    0x0ef5
+#define    RTL8367C_IPMC_GROUP_VID_53_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_53_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_54    0x0ef6
+#define    RTL8367C_IPMC_GROUP_VID_54_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_54_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_55    0x0ef7
+#define    RTL8367C_IPMC_GROUP_VID_55_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_55_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_56    0x0ef8
+#define    RTL8367C_IPMC_GROUP_VID_56_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_56_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_57    0x0ef9
+#define    RTL8367C_IPMC_GROUP_VID_57_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_57_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_58    0x0efa
+#define    RTL8367C_IPMC_GROUP_VID_58_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_58_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_59    0x0efb
+#define    RTL8367C_IPMC_GROUP_VID_59_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_59_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_60    0x0efc
+#define    RTL8367C_IPMC_GROUP_VID_60_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_60_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_61    0x0efd
+#define    RTL8367C_IPMC_GROUP_VID_61_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_61_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_62    0x0efe
+#define    RTL8367C_IPMC_GROUP_VID_62_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_62_MASK    0xFFF
+
+#define    RTL8367C_REG_IPMC_GROUP_VID_63    0x0eff
+#define    RTL8367C_IPMC_GROUP_VID_63_OFFSET    0
+#define    RTL8367C_IPMC_GROUP_VID_63_MASK    0xFFF
+
+/* (16'h0f00)hsactrl_reg */
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY0_CTRL0    0x0f00
+#define    RTL8367C_SVLAN_SP2C_ENTRY0_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY0_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY0_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY0_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY0_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY0_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY0_CTRL1    0x0f01
+#define    RTL8367C_SVLAN_SP2C_ENTRY0_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY0_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY0_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY0_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY1_CTRL0    0x0f02
+#define    RTL8367C_SVLAN_SP2C_ENTRY1_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY1_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY1_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY1_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY1_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY1_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY1_CTRL1    0x0f03
+#define    RTL8367C_SVLAN_SP2C_ENTRY1_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY1_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY1_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY1_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY2_CTRL0    0x0f04
+#define    RTL8367C_SVLAN_SP2C_ENTRY2_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY2_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY2_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY2_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY2_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY2_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY2_CTRL1    0x0f05
+#define    RTL8367C_SVLAN_SP2C_ENTRY2_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY2_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY2_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY2_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY3_CTRL0    0x0f06
+#define    RTL8367C_SVLAN_SP2C_ENTRY3_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY3_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY3_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY3_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY3_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY3_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY3_CTRL1    0x0f07
+#define    RTL8367C_SVLAN_SP2C_ENTRY3_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY3_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY3_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY3_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY4_CTRL0    0x0f08
+#define    RTL8367C_SVLAN_SP2C_ENTRY4_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY4_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY4_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY4_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY4_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY4_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY4_CTRL1    0x0f09
+#define    RTL8367C_SVLAN_SP2C_ENTRY4_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY4_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY4_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY4_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY5_CTRL0    0x0f0a
+#define    RTL8367C_SVLAN_SP2C_ENTRY5_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY5_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY5_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY5_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY5_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY5_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY5_CTRL1    0x0f0b
+#define    RTL8367C_SVLAN_SP2C_ENTRY5_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY5_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY5_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY5_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY6_CTRL0    0x0f0c
+#define    RTL8367C_SVLAN_SP2C_ENTRY6_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY6_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY6_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY6_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY6_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY6_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY6_CTRL1    0x0f0d
+#define    RTL8367C_SVLAN_SP2C_ENTRY6_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY6_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY6_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY6_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY7_CTRL0    0x0f0e
+#define    RTL8367C_SVLAN_SP2C_ENTRY7_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY7_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY7_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY7_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY7_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY7_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY7_CTRL1    0x0f0f
+#define    RTL8367C_SVLAN_SP2C_ENTRY7_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY7_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY7_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY7_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY8_CTRL0    0x0f10
+#define    RTL8367C_SVLAN_SP2C_ENTRY8_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY8_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY8_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY8_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY8_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY8_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY8_CTRL1    0x0f11
+#define    RTL8367C_SVLAN_SP2C_ENTRY8_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY8_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY8_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY8_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY9_CTRL0    0x0f12
+#define    RTL8367C_SVLAN_SP2C_ENTRY9_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY9_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY9_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY9_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY9_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY9_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY9_CTRL1    0x0f13
+#define    RTL8367C_SVLAN_SP2C_ENTRY9_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY9_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY9_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY9_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY10_CTRL0    0x0f14
+#define    RTL8367C_SVLAN_SP2C_ENTRY10_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY10_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY10_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY10_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY10_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY10_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY10_CTRL1    0x0f15
+#define    RTL8367C_SVLAN_SP2C_ENTRY10_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY10_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY10_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY10_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY11_CTRL0    0x0f16
+#define    RTL8367C_SVLAN_SP2C_ENTRY11_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY11_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY11_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY11_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY11_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY11_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY11_CTRL1    0x0f17
+#define    RTL8367C_SVLAN_SP2C_ENTRY11_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY11_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY11_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY11_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY12_CTRL0    0x0f18
+#define    RTL8367C_SVLAN_SP2C_ENTRY12_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY12_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY12_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY12_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY12_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY12_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY12_CTRL1    0x0f19
+#define    RTL8367C_SVLAN_SP2C_ENTRY12_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY12_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY12_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY12_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY13_CTRL0    0x0f1a
+#define    RTL8367C_SVLAN_SP2C_ENTRY13_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY13_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY13_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY13_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY13_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY13_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY13_CTRL1    0x0f1b
+#define    RTL8367C_SVLAN_SP2C_ENTRY13_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY13_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY13_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY13_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY14_CTRL0    0x0f1c
+#define    RTL8367C_SVLAN_SP2C_ENTRY14_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY14_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY14_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY14_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY14_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY14_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY14_CTRL1    0x0f1d
+#define    RTL8367C_SVLAN_SP2C_ENTRY14_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY14_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY14_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY14_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY15_CTRL0    0x0f1e
+#define    RTL8367C_SVLAN_SP2C_ENTRY15_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY15_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY15_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY15_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY15_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY15_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY15_CTRL1    0x0f1f
+#define    RTL8367C_SVLAN_SP2C_ENTRY15_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY15_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY15_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY15_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY16_CTRL0    0x0f20
+#define    RTL8367C_SVLAN_SP2C_ENTRY16_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY16_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY16_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY16_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY16_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY16_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY16_CTRL1    0x0f21
+#define    RTL8367C_SVLAN_SP2C_ENTRY16_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY16_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY16_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY16_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY17_CTRL0    0x0f22
+#define    RTL8367C_SVLAN_SP2C_ENTRY17_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY17_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY17_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY17_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY17_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY17_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY17_CTRL1    0x0f23
+#define    RTL8367C_SVLAN_SP2C_ENTRY17_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY17_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY17_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY17_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY18_CTRL0    0x0f24
+#define    RTL8367C_SVLAN_SP2C_ENTRY18_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY18_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY18_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY18_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY18_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY18_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY18_CTRL1    0x0f25
+#define    RTL8367C_SVLAN_SP2C_ENTRY18_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY18_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY18_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY18_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY19_CTRL0    0x0f26
+#define    RTL8367C_SVLAN_SP2C_ENTRY19_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY19_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY19_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY19_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY19_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY19_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY19_CTRL1    0x0f27
+#define    RTL8367C_SVLAN_SP2C_ENTRY19_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY19_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY19_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY19_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY20_CTRL0    0x0f28
+#define    RTL8367C_SVLAN_SP2C_ENTRY20_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY20_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY20_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY20_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY20_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY20_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY20_CTRL1    0x0f29
+#define    RTL8367C_SVLAN_SP2C_ENTRY20_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY20_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY20_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY20_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY21_CTRL0    0x0f2a
+#define    RTL8367C_SVLAN_SP2C_ENTRY21_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY21_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY21_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY21_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY21_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY21_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY21_CTRL1    0x0f2b
+#define    RTL8367C_SVLAN_SP2C_ENTRY21_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY21_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY21_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY21_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY22_CTRL0    0x0f2c
+#define    RTL8367C_SVLAN_SP2C_ENTRY22_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY22_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY22_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY22_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY22_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY22_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY22_CTRL1    0x0f2d
+#define    RTL8367C_SVLAN_SP2C_ENTRY22_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY22_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY22_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY22_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY23_CTRL0    0x0f2e
+#define    RTL8367C_SVLAN_SP2C_ENTRY23_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY23_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY23_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY23_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY23_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY23_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY23_CTRL1    0x0f2f
+#define    RTL8367C_SVLAN_SP2C_ENTRY23_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY23_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY23_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY23_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY24_CTRL0    0x0f30
+#define    RTL8367C_SVLAN_SP2C_ENTRY24_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY24_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY24_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY24_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY24_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY24_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY24_CTRL1    0x0f31
+#define    RTL8367C_SVLAN_SP2C_ENTRY24_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY24_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY24_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY24_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY25_CTRL0    0x0f32
+#define    RTL8367C_SVLAN_SP2C_ENTRY25_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY25_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY25_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY25_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY25_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY25_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY25_CTRL1    0x0f33
+#define    RTL8367C_SVLAN_SP2C_ENTRY25_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY25_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY25_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY25_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY26_CTRL0    0x0f34
+#define    RTL8367C_SVLAN_SP2C_ENTRY26_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY26_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY26_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY26_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY26_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY26_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY26_CTRL1    0x0f35
+#define    RTL8367C_SVLAN_SP2C_ENTRY26_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY26_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY26_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY26_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY27_CTRL0    0x0f36
+#define    RTL8367C_SVLAN_SP2C_ENTRY27_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY27_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY27_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY27_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY27_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY27_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY27_CTRL1    0x0f37
+#define    RTL8367C_SVLAN_SP2C_ENTRY27_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY27_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY27_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY27_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY28_CTRL0    0x0f38
+#define    RTL8367C_SVLAN_SP2C_ENTRY28_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY28_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY28_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY28_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY28_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY28_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY28_CTRL1    0x0f39
+#define    RTL8367C_SVLAN_SP2C_ENTRY28_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY28_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY28_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY28_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY29_CTRL0    0x0f3a
+#define    RTL8367C_SVLAN_SP2C_ENTRY29_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY29_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY29_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY29_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY29_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY29_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY29_CTRL1    0x0f3b
+#define    RTL8367C_SVLAN_SP2C_ENTRY29_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY29_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY29_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY29_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY30_CTRL0    0x0f3c
+#define    RTL8367C_SVLAN_SP2C_ENTRY30_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY30_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY30_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY30_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY30_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY30_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY30_CTRL1    0x0f3d
+#define    RTL8367C_SVLAN_SP2C_ENTRY30_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY30_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY30_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY30_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY31_CTRL0    0x0f3e
+#define    RTL8367C_SVLAN_SP2C_ENTRY31_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY31_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY31_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY31_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY31_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY31_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY31_CTRL1    0x0f3f
+#define    RTL8367C_SVLAN_SP2C_ENTRY31_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY31_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY31_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY31_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY32_CTRL0    0x0f40
+#define    RTL8367C_SVLAN_SP2C_ENTRY32_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY32_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY32_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY32_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY32_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY32_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY32_CTRL1    0x0f41
+#define    RTL8367C_SVLAN_SP2C_ENTRY32_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY32_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY32_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY32_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY33_CTRL0    0x0f42
+#define    RTL8367C_SVLAN_SP2C_ENTRY33_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY33_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY33_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY33_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY33_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY33_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY33_CTRL1    0x0f43
+#define    RTL8367C_SVLAN_SP2C_ENTRY33_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY33_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY33_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY33_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY34_CTRL0    0x0f44
+#define    RTL8367C_SVLAN_SP2C_ENTRY34_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY34_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY34_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY34_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY34_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY34_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY34_CTRL1    0x0f45
+#define    RTL8367C_SVLAN_SP2C_ENTRY34_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY34_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY34_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY34_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY35_CTRL0    0x0f46
+#define    RTL8367C_SVLAN_SP2C_ENTRY35_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY35_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY35_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY35_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY35_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY35_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY35_CTRL1    0x0f47
+#define    RTL8367C_SVLAN_SP2C_ENTRY35_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY35_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY35_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY35_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY36_CTRL0    0x0f48
+#define    RTL8367C_SVLAN_SP2C_ENTRY36_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY36_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY36_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY36_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY36_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY36_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY36_CTRL1    0x0f49
+#define    RTL8367C_SVLAN_SP2C_ENTRY36_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY36_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY36_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY36_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY37_CTRL0    0x0f4a
+#define    RTL8367C_SVLAN_SP2C_ENTRY37_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY37_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY37_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY37_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY37_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY37_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY37_CTRL1    0x0f4b
+#define    RTL8367C_SVLAN_SP2C_ENTRY37_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY37_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY37_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY37_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY38_CTRL0    0x0f4c
+#define    RTL8367C_SVLAN_SP2C_ENTRY38_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY38_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY38_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY38_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY38_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY38_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY38_CTRL1    0x0f4d
+#define    RTL8367C_SVLAN_SP2C_ENTRY38_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY38_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY38_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY38_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY39_CTRL0    0x0f4e
+#define    RTL8367C_SVLAN_SP2C_ENTRY39_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY39_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY39_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY39_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY39_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY39_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY39_CTRL1    0x0f4f
+#define    RTL8367C_SVLAN_SP2C_ENTRY39_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY39_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY39_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY39_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY40_CTRL0    0x0f50
+#define    RTL8367C_SVLAN_SP2C_ENTRY40_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY40_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY40_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY40_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY40_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY40_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY40_CTRL1    0x0f51
+#define    RTL8367C_SVLAN_SP2C_ENTRY40_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY40_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY40_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY40_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY41_CTRL0    0x0f52
+#define    RTL8367C_SVLAN_SP2C_ENTRY41_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY41_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY41_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY41_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY41_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY41_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY41_CTRL1    0x0f53
+#define    RTL8367C_SVLAN_SP2C_ENTRY41_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY41_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY41_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY41_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY42_CTRL0    0x0f54
+#define    RTL8367C_SVLAN_SP2C_ENTRY42_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY42_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY42_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY42_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY42_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY42_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY42_CTRL1    0x0f55
+#define    RTL8367C_SVLAN_SP2C_ENTRY42_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY42_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY42_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY42_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY43_CTRL0    0x0f56
+#define    RTL8367C_SVLAN_SP2C_ENTRY43_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY43_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY43_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY43_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY43_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY43_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY43_CTRL1    0x0f57
+#define    RTL8367C_SVLAN_SP2C_ENTRY43_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY43_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY43_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY43_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY44_CTRL0    0x0f58
+#define    RTL8367C_SVLAN_SP2C_ENTRY44_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY44_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY44_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY44_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY44_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY44_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY44_CTRL1    0x0f59
+#define    RTL8367C_SVLAN_SP2C_ENTRY44_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY44_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY44_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY44_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY45_CTRL0    0x0f5a
+#define    RTL8367C_SVLAN_SP2C_ENTRY45_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY45_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY45_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY45_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY45_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY45_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY45_CTRL1    0x0f5b
+#define    RTL8367C_SVLAN_SP2C_ENTRY45_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY45_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY45_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY45_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY46_CTRL0    0x0f5c
+#define    RTL8367C_SVLAN_SP2C_ENTRY46_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY46_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY46_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY46_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY46_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY46_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY46_CTRL1    0x0f5d
+#define    RTL8367C_SVLAN_SP2C_ENTRY46_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY46_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY46_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY46_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY47_CTRL0    0x0f5e
+#define    RTL8367C_SVLAN_SP2C_ENTRY47_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY47_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY47_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY47_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY47_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY47_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY47_CTRL1    0x0f5f
+#define    RTL8367C_SVLAN_SP2C_ENTRY47_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY47_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY47_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY47_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY48_CTRL0    0x0f60
+#define    RTL8367C_SVLAN_SP2C_ENTRY48_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY48_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY48_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY48_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY48_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY48_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY48_CTRL1    0x0f61
+#define    RTL8367C_SVLAN_SP2C_ENTRY48_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY48_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY48_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY48_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY49_CTRL0    0x0f62
+#define    RTL8367C_SVLAN_SP2C_ENTRY49_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY49_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY49_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY49_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY49_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY49_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY49_CTRL1    0x0f63
+#define    RTL8367C_SVLAN_SP2C_ENTRY49_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY49_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY49_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY49_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY50_CTRL0    0x0f64
+#define    RTL8367C_SVLAN_SP2C_ENTRY50_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY50_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY50_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY50_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY50_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY50_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY50_CTRL1    0x0f65
+#define    RTL8367C_SVLAN_SP2C_ENTRY50_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY50_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY50_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY50_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY51_CTRL0    0x0f66
+#define    RTL8367C_SVLAN_SP2C_ENTRY51_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY51_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY51_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY51_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY51_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY51_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY51_CTRL1    0x0f67
+#define    RTL8367C_SVLAN_SP2C_ENTRY51_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY51_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY51_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY51_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY52_CTRL0    0x0f68
+#define    RTL8367C_SVLAN_SP2C_ENTRY52_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY52_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY52_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY52_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY52_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY52_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY52_CTRL1    0x0f69
+#define    RTL8367C_SVLAN_SP2C_ENTRY52_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY52_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY52_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY52_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY53_CTRL0    0x0f6a
+#define    RTL8367C_SVLAN_SP2C_ENTRY53_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY53_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY53_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY53_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY53_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY53_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY53_CTRL1    0x0f6b
+#define    RTL8367C_SVLAN_SP2C_ENTRY53_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY53_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY53_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY53_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY54_CTRL0    0x0f6c
+#define    RTL8367C_SVLAN_SP2C_ENTRY54_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY54_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY54_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY54_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY54_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY54_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY54_CTRL1    0x0f6d
+#define    RTL8367C_SVLAN_SP2C_ENTRY54_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY54_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY54_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY54_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY55_CTRL0    0x0f6e
+#define    RTL8367C_SVLAN_SP2C_ENTRY55_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY55_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY55_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY55_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY55_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY55_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY55_CTRL1    0x0f6f
+#define    RTL8367C_SVLAN_SP2C_ENTRY55_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY55_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY55_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY55_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY56_CTRL0    0x0f70
+#define    RTL8367C_SVLAN_SP2C_ENTRY56_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY56_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY56_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY56_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY56_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY56_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY56_CTRL1    0x0f71
+#define    RTL8367C_SVLAN_SP2C_ENTRY56_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY56_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY56_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY56_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY57_CTRL0    0x0f72
+#define    RTL8367C_SVLAN_SP2C_ENTRY57_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY57_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY57_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY57_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY57_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY57_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY57_CTRL1    0x0f73
+#define    RTL8367C_SVLAN_SP2C_ENTRY57_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY57_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY57_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY57_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY58_CTRL0    0x0f74
+#define    RTL8367C_SVLAN_SP2C_ENTRY58_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY58_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY58_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY58_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY58_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY58_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY58_CTRL1    0x0f75
+#define    RTL8367C_SVLAN_SP2C_ENTRY58_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY58_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY58_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY58_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY59_CTRL0    0x0f76
+#define    RTL8367C_SVLAN_SP2C_ENTRY59_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY59_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY59_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY59_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY59_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY59_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY59_CTRL1    0x0f77
+#define    RTL8367C_SVLAN_SP2C_ENTRY59_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY59_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY59_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY59_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY60_CTRL0    0x0f78
+#define    RTL8367C_SVLAN_SP2C_ENTRY60_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY60_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY60_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY60_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY60_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY60_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY60_CTRL1    0x0f79
+#define    RTL8367C_SVLAN_SP2C_ENTRY60_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY60_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY60_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY60_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY61_CTRL0    0x0f7a
+#define    RTL8367C_SVLAN_SP2C_ENTRY61_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY61_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY61_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY61_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY61_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY61_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY61_CTRL1    0x0f7b
+#define    RTL8367C_SVLAN_SP2C_ENTRY61_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY61_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY61_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY61_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY62_CTRL0    0x0f7c
+#define    RTL8367C_SVLAN_SP2C_ENTRY62_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY62_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY62_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY62_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY62_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY62_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY62_CTRL1    0x0f7d
+#define    RTL8367C_SVLAN_SP2C_ENTRY62_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY62_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY62_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY62_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY63_CTRL0    0x0f7e
+#define    RTL8367C_SVLAN_SP2C_ENTRY63_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY63_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY63_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY63_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY63_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY63_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY63_CTRL1    0x0f7f
+#define    RTL8367C_SVLAN_SP2C_ENTRY63_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY63_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY63_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY63_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY64_CTRL0    0x0f80
+#define    RTL8367C_SVLAN_SP2C_ENTRY64_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY64_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY64_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY64_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY64_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY64_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY64_CTRL1    0x0f81
+#define    RTL8367C_SVLAN_SP2C_ENTRY64_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY64_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY64_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY64_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY65_CTRL0    0x0f82
+#define    RTL8367C_SVLAN_SP2C_ENTRY65_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY65_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY65_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY65_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY65_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY65_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY65_CTRL1    0x0f83
+#define    RTL8367C_SVLAN_SP2C_ENTRY65_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY65_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY65_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY65_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY66_CTRL0    0x0f84
+#define    RTL8367C_SVLAN_SP2C_ENTRY66_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY66_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY66_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY66_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY66_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY66_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY66_CTRL1    0x0f85
+#define    RTL8367C_SVLAN_SP2C_ENTRY66_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY66_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY66_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY66_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY67_CTRL0    0x0f86
+#define    RTL8367C_SVLAN_SP2C_ENTRY67_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY67_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY67_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY67_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY67_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY67_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY67_CTRL1    0x0f87
+#define    RTL8367C_SVLAN_SP2C_ENTRY67_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY67_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY67_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY67_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY68_CTRL0    0x0f88
+#define    RTL8367C_SVLAN_SP2C_ENTRY68_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY68_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY68_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY68_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY68_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY68_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY68_CTRL1    0x0f89
+#define    RTL8367C_SVLAN_SP2C_ENTRY68_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY68_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY68_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY68_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY69_CTRL0    0x0f8a
+#define    RTL8367C_SVLAN_SP2C_ENTRY69_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY69_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY69_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY69_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY69_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY69_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY69_CTRL1    0x0f8b
+#define    RTL8367C_SVLAN_SP2C_ENTRY69_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY69_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY69_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY69_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY70_CTRL0    0x0f8c
+#define    RTL8367C_SVLAN_SP2C_ENTRY70_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY70_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY70_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY70_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY70_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY70_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY70_CTRL1    0x0f8d
+#define    RTL8367C_SVLAN_SP2C_ENTRY70_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY70_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY70_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY70_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY71_CTRL0    0x0f8e
+#define    RTL8367C_SVLAN_SP2C_ENTRY71_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY71_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY71_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY71_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY71_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY71_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY71_CTRL1    0x0f8f
+#define    RTL8367C_SVLAN_SP2C_ENTRY71_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY71_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY71_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY71_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY72_CTRL0    0x0f90
+#define    RTL8367C_SVLAN_SP2C_ENTRY72_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY72_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY72_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY72_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY72_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY72_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY72_CTRL1    0x0f91
+#define    RTL8367C_SVLAN_SP2C_ENTRY72_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY72_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY72_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY72_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY73_CTRL0    0x0f92
+#define    RTL8367C_SVLAN_SP2C_ENTRY73_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY73_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY73_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY73_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY73_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY73_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY73_CTRL1    0x0f93
+#define    RTL8367C_SVLAN_SP2C_ENTRY73_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY73_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY73_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY73_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY74_CTRL0    0x0f94
+#define    RTL8367C_SVLAN_SP2C_ENTRY74_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY74_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY74_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY74_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY74_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY74_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY74_CTRL1    0x0f95
+#define    RTL8367C_SVLAN_SP2C_ENTRY74_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY74_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY74_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY74_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY75_CTRL0    0x0f96
+#define    RTL8367C_SVLAN_SP2C_ENTRY75_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY75_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY75_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY75_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY75_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY75_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY75_CTRL1    0x0f97
+#define    RTL8367C_SVLAN_SP2C_ENTRY75_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY75_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY75_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY75_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY76_CTRL0    0x0f98
+#define    RTL8367C_SVLAN_SP2C_ENTRY76_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY76_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY76_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY76_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY76_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY76_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY76_CTRL1    0x0f99
+#define    RTL8367C_SVLAN_SP2C_ENTRY76_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY76_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY76_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY76_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY77_CTRL0    0x0f9a
+#define    RTL8367C_SVLAN_SP2C_ENTRY77_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY77_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY77_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY77_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY77_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY77_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY77_CTRL1    0x0f9b
+#define    RTL8367C_SVLAN_SP2C_ENTRY77_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY77_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY77_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY77_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY78_CTRL0    0x0f9c
+#define    RTL8367C_SVLAN_SP2C_ENTRY78_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY78_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY78_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY78_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY78_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY78_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY78_CTRL1    0x0f9d
+#define    RTL8367C_SVLAN_SP2C_ENTRY78_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY78_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY78_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY78_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY79_CTRL0    0x0f9e
+#define    RTL8367C_SVLAN_SP2C_ENTRY79_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY79_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY79_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY79_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY79_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY79_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY79_CTRL1    0x0f9f
+#define    RTL8367C_SVLAN_SP2C_ENTRY79_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY79_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY79_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY79_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY80_CTRL0    0x0fa0
+#define    RTL8367C_SVLAN_SP2C_ENTRY80_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY80_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY80_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY80_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY80_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY80_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY80_CTRL1    0x0fa1
+#define    RTL8367C_SVLAN_SP2C_ENTRY80_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY80_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY80_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY80_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY81_CTRL0    0x0fa2
+#define    RTL8367C_SVLAN_SP2C_ENTRY81_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY81_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY81_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY81_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY81_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY81_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY81_CTRL1    0x0fa3
+#define    RTL8367C_SVLAN_SP2C_ENTRY81_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY81_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY81_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY81_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY82_CTRL0    0x0fa4
+#define    RTL8367C_SVLAN_SP2C_ENTRY82_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY82_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY82_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY82_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY82_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY82_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY82_CTRL1    0x0fa5
+#define    RTL8367C_SVLAN_SP2C_ENTRY82_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY82_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY82_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY82_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY83_CTRL0    0x0fa6
+#define    RTL8367C_SVLAN_SP2C_ENTRY83_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY83_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY83_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY83_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY83_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY83_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY83_CTRL1    0x0fa7
+#define    RTL8367C_SVLAN_SP2C_ENTRY83_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY83_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY83_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY83_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY84_CTRL0    0x0fa8
+#define    RTL8367C_SVLAN_SP2C_ENTRY84_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY84_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY84_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY84_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY84_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY84_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY84_CTRL1    0x0fa9
+#define    RTL8367C_SVLAN_SP2C_ENTRY84_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY84_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY84_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY84_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY85_CTRL0    0x0faa
+#define    RTL8367C_SVLAN_SP2C_ENTRY85_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY85_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY85_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY85_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY85_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY85_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY85_CTRL1    0x0fab
+#define    RTL8367C_SVLAN_SP2C_ENTRY85_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY85_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY85_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY85_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY86_CTRL0    0x0fac
+#define    RTL8367C_SVLAN_SP2C_ENTRY86_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY86_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY86_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY86_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY86_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY86_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY86_CTRL1    0x0fad
+#define    RTL8367C_SVLAN_SP2C_ENTRY86_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY86_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY86_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY86_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY87_CTRL0    0x0fae
+#define    RTL8367C_SVLAN_SP2C_ENTRY87_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY87_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY87_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY87_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY87_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY87_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY87_CTRL1    0x0faf
+#define    RTL8367C_SVLAN_SP2C_ENTRY87_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY87_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY87_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY87_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY88_CTRL0    0x0fb0
+#define    RTL8367C_SVLAN_SP2C_ENTRY88_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY88_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY88_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY88_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY88_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY88_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY88_CTRL1    0x0fb1
+#define    RTL8367C_SVLAN_SP2C_ENTRY88_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY88_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY88_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY88_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY89_CTRL0    0x0fb2
+#define    RTL8367C_SVLAN_SP2C_ENTRY89_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY89_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY89_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY89_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY89_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY89_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY89_CTRL1    0x0fb3
+#define    RTL8367C_SVLAN_SP2C_ENTRY89_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY89_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY89_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY89_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY90_CTRL0    0x0fb4
+#define    RTL8367C_SVLAN_SP2C_ENTRY90_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY90_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY90_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY90_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY90_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY90_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY90_CTRL1    0x0fb5
+#define    RTL8367C_SVLAN_SP2C_ENTRY90_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY90_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY90_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY90_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY91_CTRL0    0x0fb6
+#define    RTL8367C_SVLAN_SP2C_ENTRY91_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY91_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY91_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY91_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY91_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY91_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY91_CTRL1    0x0fb7
+#define    RTL8367C_SVLAN_SP2C_ENTRY91_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY91_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY91_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY91_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY92_CTRL0    0x0fb8
+#define    RTL8367C_SVLAN_SP2C_ENTRY92_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY92_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY92_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY92_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY92_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY92_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY92_CTRL1    0x0fb9
+#define    RTL8367C_SVLAN_SP2C_ENTRY92_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY92_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY92_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY92_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY93_CTRL0    0x0fba
+#define    RTL8367C_SVLAN_SP2C_ENTRY93_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY93_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY93_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY93_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY93_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY93_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY93_CTRL1    0x0fbb
+#define    RTL8367C_SVLAN_SP2C_ENTRY93_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY93_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY93_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY93_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY94_CTRL0    0x0fbc
+#define    RTL8367C_SVLAN_SP2C_ENTRY94_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY94_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY94_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY94_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY94_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY94_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY94_CTRL1    0x0fbd
+#define    RTL8367C_SVLAN_SP2C_ENTRY94_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY94_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY94_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY94_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY95_CTRL0    0x0fbe
+#define    RTL8367C_SVLAN_SP2C_ENTRY95_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY95_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY95_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY95_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY95_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY95_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY95_CTRL1    0x0fbf
+#define    RTL8367C_SVLAN_SP2C_ENTRY95_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY95_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY95_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY95_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY96_CTRL0    0x0fc0
+#define    RTL8367C_SVLAN_SP2C_ENTRY96_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY96_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY96_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY96_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY96_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY96_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY96_CTRL1    0x0fc1
+#define    RTL8367C_SVLAN_SP2C_ENTRY96_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY96_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY96_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY96_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY97_CTRL0    0x0fc2
+#define    RTL8367C_SVLAN_SP2C_ENTRY97_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY97_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY97_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY97_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY97_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY97_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY97_CTRL1    0x0fc3
+#define    RTL8367C_SVLAN_SP2C_ENTRY97_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY97_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY97_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY97_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY98_CTRL0    0x0fc4
+#define    RTL8367C_SVLAN_SP2C_ENTRY98_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY98_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY98_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY98_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY98_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY98_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY98_CTRL1    0x0fc5
+#define    RTL8367C_SVLAN_SP2C_ENTRY98_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY98_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY98_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY98_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY99_CTRL0    0x0fc6
+#define    RTL8367C_SVLAN_SP2C_ENTRY99_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY99_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY99_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY99_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY99_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY99_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY99_CTRL1    0x0fc7
+#define    RTL8367C_SVLAN_SP2C_ENTRY99_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY99_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY99_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY99_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY100_CTRL0    0x0fc8
+#define    RTL8367C_SVLAN_SP2C_ENTRY100_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY100_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY100_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY100_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY100_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY100_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY100_CTRL1    0x0fc9
+#define    RTL8367C_SVLAN_SP2C_ENTRY100_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY100_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY100_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY100_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY101_CTRL0    0x0fca
+#define    RTL8367C_SVLAN_SP2C_ENTRY101_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY101_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY101_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY101_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY101_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY101_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY101_CTRL1    0x0fcb
+#define    RTL8367C_SVLAN_SP2C_ENTRY101_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY101_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY101_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY101_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY102_CTRL0    0x0fcc
+#define    RTL8367C_SVLAN_SP2C_ENTRY102_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY102_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY102_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY102_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY102_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY102_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY102_CTRL1    0x0fcd
+#define    RTL8367C_SVLAN_SP2C_ENTRY102_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY102_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY102_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY102_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY103_CTRL0    0x0fce
+#define    RTL8367C_SVLAN_SP2C_ENTRY103_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY103_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY103_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY103_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY103_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY103_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY103_CTRL1    0x0fcf
+#define    RTL8367C_SVLAN_SP2C_ENTRY103_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY103_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY103_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY103_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY104_CTRL0    0x0fd0
+#define    RTL8367C_SVLAN_SP2C_ENTRY104_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY104_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY104_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY104_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY104_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY104_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY104_CTRL1    0x0fd1
+#define    RTL8367C_SVLAN_SP2C_ENTRY104_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY104_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY104_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY104_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY105_CTRL0    0x0fd2
+#define    RTL8367C_SVLAN_SP2C_ENTRY105_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY105_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY105_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY105_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY105_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY105_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY105_CTRL1    0x0fd3
+#define    RTL8367C_SVLAN_SP2C_ENTRY105_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY105_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY105_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY105_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY106_CTRL0    0x0fd4
+#define    RTL8367C_SVLAN_SP2C_ENTRY106_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY106_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY106_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY106_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY106_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY106_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY106_CTRL1    0x0fd5
+#define    RTL8367C_SVLAN_SP2C_ENTRY106_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY106_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY106_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY106_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY107_CTRL0    0x0fd6
+#define    RTL8367C_SVLAN_SP2C_ENTRY107_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY107_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY107_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY107_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY107_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY107_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY107_CTRL1    0x0fd7
+#define    RTL8367C_SVLAN_SP2C_ENTRY107_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY107_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY107_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY107_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY108_CTRL0    0x0fd8
+#define    RTL8367C_SVLAN_SP2C_ENTRY108_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY108_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY108_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY108_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY108_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY108_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY108_CTRL1    0x0fd9
+#define    RTL8367C_SVLAN_SP2C_ENTRY108_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY108_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY108_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY108_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY109_CTRL0    0x0fda
+#define    RTL8367C_SVLAN_SP2C_ENTRY109_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY109_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY109_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY109_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY109_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY109_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY109_CTRL1    0x0fdb
+#define    RTL8367C_SVLAN_SP2C_ENTRY109_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY109_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY109_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY109_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY110_CTRL0    0x0fdc
+#define    RTL8367C_SVLAN_SP2C_ENTRY110_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY110_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY110_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY110_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY110_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY110_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY110_CTRL1    0x0fdd
+#define    RTL8367C_SVLAN_SP2C_ENTRY110_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY110_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY110_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY110_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY111_CTRL0    0x0fde
+#define    RTL8367C_SVLAN_SP2C_ENTRY111_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY111_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY111_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY111_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY111_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY111_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY111_CTRL1    0x0fdf
+#define    RTL8367C_SVLAN_SP2C_ENTRY111_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY111_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY111_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY111_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY112_CTRL0    0x0fe0
+#define    RTL8367C_SVLAN_SP2C_ENTRY112_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY112_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY112_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY112_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY112_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY112_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY112_CTRL1    0x0fe1
+#define    RTL8367C_SVLAN_SP2C_ENTRY112_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY112_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY112_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY112_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY113_CTRL0    0x0fe2
+#define    RTL8367C_SVLAN_SP2C_ENTRY113_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY113_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY113_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY113_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY113_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY113_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY113_CTRL1    0x0fe3
+#define    RTL8367C_SVLAN_SP2C_ENTRY113_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY113_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY113_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY113_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY114_CTRL0    0x0fe4
+#define    RTL8367C_SVLAN_SP2C_ENTRY114_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY114_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY114_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY114_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY114_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY114_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY114_CTRL1    0x0fe5
+#define    RTL8367C_SVLAN_SP2C_ENTRY114_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY114_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY114_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY114_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY115_CTRL0    0x0fe6
+#define    RTL8367C_SVLAN_SP2C_ENTRY115_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY115_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY115_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY115_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY115_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY115_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY115_CTRL1    0x0fe7
+#define    RTL8367C_SVLAN_SP2C_ENTRY115_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY115_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY115_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY115_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY116_CTRL0    0x0fe8
+#define    RTL8367C_SVLAN_SP2C_ENTRY116_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY116_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY116_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY116_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY116_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY116_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY116_CTRL1    0x0fe9
+#define    RTL8367C_SVLAN_SP2C_ENTRY116_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY116_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY116_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY116_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY117_CTRL0    0x0fea
+#define    RTL8367C_SVLAN_SP2C_ENTRY117_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY117_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY117_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY117_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY117_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY117_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY117_CTRL1    0x0feb
+#define    RTL8367C_SVLAN_SP2C_ENTRY117_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY117_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY117_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY117_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY118_CTRL0    0x0fec
+#define    RTL8367C_SVLAN_SP2C_ENTRY118_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY118_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY118_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY118_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY118_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY118_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY118_CTRL1    0x0fed
+#define    RTL8367C_SVLAN_SP2C_ENTRY118_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY118_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY118_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY118_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY119_CTRL0    0x0fee
+#define    RTL8367C_SVLAN_SP2C_ENTRY119_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY119_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY119_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY119_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY119_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY119_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY119_CTRL1    0x0fef
+#define    RTL8367C_SVLAN_SP2C_ENTRY119_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY119_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY119_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY119_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY120_CTRL0    0x0ff0
+#define    RTL8367C_SVLAN_SP2C_ENTRY120_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY120_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY120_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY120_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY120_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY120_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY120_CTRL1    0x0ff1
+#define    RTL8367C_SVLAN_SP2C_ENTRY120_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY120_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY120_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY120_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY121_CTRL0    0x0ff2
+#define    RTL8367C_SVLAN_SP2C_ENTRY121_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY121_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY121_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY121_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY121_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY121_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY121_CTRL1    0x0ff3
+#define    RTL8367C_SVLAN_SP2C_ENTRY121_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY121_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY121_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY121_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY122_CTRL0    0x0ff4
+#define    RTL8367C_SVLAN_SP2C_ENTRY122_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY122_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY122_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY122_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY122_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY122_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY122_CTRL1    0x0ff5
+#define    RTL8367C_SVLAN_SP2C_ENTRY122_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY122_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY122_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY122_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY123_CTRL0    0x0ff6
+#define    RTL8367C_SVLAN_SP2C_ENTRY123_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY123_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY123_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY123_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY123_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY123_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY123_CTRL1    0x0ff7
+#define    RTL8367C_SVLAN_SP2C_ENTRY123_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY123_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY123_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY123_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY124_CTRL0    0x0ff8
+#define    RTL8367C_SVLAN_SP2C_ENTRY124_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY124_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY124_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY124_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY124_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY124_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY124_CTRL1    0x0ff9
+#define    RTL8367C_SVLAN_SP2C_ENTRY124_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY124_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY124_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY124_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY125_CTRL0    0x0ffa
+#define    RTL8367C_SVLAN_SP2C_ENTRY125_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY125_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY125_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY125_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY125_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY125_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY125_CTRL1    0x0ffb
+#define    RTL8367C_SVLAN_SP2C_ENTRY125_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY125_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY125_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY125_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY126_CTRL0    0x0ffc
+#define    RTL8367C_SVLAN_SP2C_ENTRY126_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY126_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY126_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY126_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY126_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY126_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY126_CTRL1    0x0ffd
+#define    RTL8367C_SVLAN_SP2C_ENTRY126_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY126_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY126_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY126_CTRL1_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY127_CTRL0    0x0ffe
+#define    RTL8367C_SVLAN_SP2C_ENTRY127_CTRL0_DST_PORT1_OFFSET    9
+#define    RTL8367C_SVLAN_SP2C_ENTRY127_CTRL0_DST_PORT1_MASK    0x200
+#define    RTL8367C_SVLAN_SP2C_ENTRY127_CTRL0_SVIDX_OFFSET    3
+#define    RTL8367C_SVLAN_SP2C_ENTRY127_CTRL0_SVIDX_MASK    0x1F8
+#define    RTL8367C_SVLAN_SP2C_ENTRY127_CTRL0_DST_PORT_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY127_CTRL0_DST_PORT_MASK    0x7
+
+#define    RTL8367C_REG_SVLAN_SP2C_ENTRY127_CTRL1    0x0fff
+#define    RTL8367C_SVLAN_SP2C_ENTRY127_CTRL1_VALID_OFFSET    12
+#define    RTL8367C_SVLAN_SP2C_ENTRY127_CTRL1_VALID_MASK    0x1000
+#define    RTL8367C_SVLAN_SP2C_ENTRY127_CTRL1_VID_OFFSET    0
+#define    RTL8367C_SVLAN_SP2C_ENTRY127_CTRL1_VID_MASK    0xFFF
+
+/* (16'h1000)mib_reg */
+
+#define    RTL8367C_REG_MIB_COUNTER0    0x1000
+
+#define    RTL8367C_REG_MIB_COUNTER1    0x1001
+
+#define    RTL8367C_REG_MIB_COUNTER2    0x1002
+
+#define    RTL8367C_REG_MIB_COUNTER3    0x1003
+
+#define    RTL8367C_REG_MIB_ADDRESS    0x1004
+#define    RTL8367C_MIB_ADDRESS_OFFSET    0
+#define    RTL8367C_MIB_ADDRESS_MASK    0x1FF
+
+#define    RTL8367C_REG_MIB_CTRL0    0x1005
+#define    RTL8367C_PORT10_RESET_OFFSET    15
+#define    RTL8367C_PORT10_RESET_MASK    0x8000
+#define    RTL8367C_PORT9_RESET_OFFSET    14
+#define    RTL8367C_PORT9_RESET_MASK    0x4000
+#define    RTL8367C_PORT8_RESET_OFFSET    13
+#define    RTL8367C_PORT8_RESET_MASK    0x2000
+#define    RTL8367C_RESET_VALUE_OFFSET    12
+#define    RTL8367C_RESET_VALUE_MASK    0x1000
+#define    RTL8367C_GLOBAL_RESET_OFFSET    11
+#define    RTL8367C_GLOBAL_RESET_MASK    0x800
+#define    RTL8367C_QM_RESET_OFFSET    10
+#define    RTL8367C_QM_RESET_MASK    0x400
+#define    RTL8367C_PORT7_RESET_OFFSET    9
+#define    RTL8367C_PORT7_RESET_MASK    0x200
+#define    RTL8367C_PORT6_RESET_OFFSET    8
+#define    RTL8367C_PORT6_RESET_MASK    0x100
+#define    RTL8367C_PORT5_RESET_OFFSET    7
+#define    RTL8367C_PORT5_RESET_MASK    0x80
+#define    RTL8367C_PORT4_RESET_OFFSET    6
+#define    RTL8367C_PORT4_RESET_MASK    0x40
+#define    RTL8367C_PORT3_RESET_OFFSET    5
+#define    RTL8367C_PORT3_RESET_MASK    0x20
+#define    RTL8367C_PORT2_RESET_OFFSET    4
+#define    RTL8367C_PORT2_RESET_MASK    0x10
+#define    RTL8367C_PORT1_RESET_OFFSET    3
+#define    RTL8367C_PORT1_RESET_MASK    0x8
+#define    RTL8367C_PORT0_RESET_OFFSET    2
+#define    RTL8367C_PORT0_RESET_MASK    0x4
+#define    RTL8367C_RESET_FLAG_OFFSET    1
+#define    RTL8367C_RESET_FLAG_MASK    0x2
+#define    RTL8367C_MIB_CTRL0_BUSY_FLAG_OFFSET    0
+#define    RTL8367C_MIB_CTRL0_BUSY_FLAG_MASK    0x1
+
+#define    RTL8367C_REG_MIB_CTRL1    0x1007
+#define    RTL8367C_COUNTER15_RESET_OFFSET    15
+#define    RTL8367C_COUNTER15_RESET_MASK    0x8000
+#define    RTL8367C_COUNTER14_RESET_OFFSET    14
+#define    RTL8367C_COUNTER14_RESET_MASK    0x4000
+#define    RTL8367C_COUNTER13_RESET_OFFSET    13
+#define    RTL8367C_COUNTER13_RESET_MASK    0x2000
+#define    RTL8367C_COUNTER12_RESET_OFFSET    12
+#define    RTL8367C_COUNTER12_RESET_MASK    0x1000
+#define    RTL8367C_COUNTER11_RESET_OFFSET    11
+#define    RTL8367C_COUNTER11_RESET_MASK    0x800
+#define    RTL8367C_COUNTER10_RESET_OFFSET    10
+#define    RTL8367C_COUNTER10_RESET_MASK    0x400
+#define    RTL8367C_COUNTER9_RESET_OFFSET    9
+#define    RTL8367C_COUNTER9_RESET_MASK    0x200
+#define    RTL8367C_COUNTER8_RESET_OFFSET    8
+#define    RTL8367C_COUNTER8_RESET_MASK    0x100
+#define    RTL8367C_COUNTER7_RESET_OFFSET    7
+#define    RTL8367C_COUNTER7_RESET_MASK    0x80
+#define    RTL8367C_COUNTER6_RESET_OFFSET    6
+#define    RTL8367C_COUNTER6_RESET_MASK    0x40
+#define    RTL8367C_COUNTER5_RESET_OFFSET    5
+#define    RTL8367C_COUNTER5_RESET_MASK    0x20
+#define    RTL8367C_COUNTER4_RESET_OFFSET    4
+#define    RTL8367C_COUNTER4_RESET_MASK    0x10
+#define    RTL8367C_COUNTER3_RESET_OFFSET    3
+#define    RTL8367C_COUNTER3_RESET_MASK    0x8
+#define    RTL8367C_COUNTER2_RESET_OFFSET    2
+#define    RTL8367C_COUNTER2_RESET_MASK    0x4
+#define    RTL8367C_COUNTER1_RESET_OFFSET    1
+#define    RTL8367C_COUNTER1_RESET_MASK    0x2
+#define    RTL8367C_COUNTER0_RESET_OFFSET    0
+#define    RTL8367C_COUNTER0_RESET_MASK    0x1
+
+#define    RTL8367C_REG_MIB_CTRL2    0x1008
+#define    RTL8367C_COUNTER31_RESET_OFFSET    15
+#define    RTL8367C_COUNTER31_RESET_MASK    0x8000
+#define    RTL8367C_COUNTER30_RESET_OFFSET    14
+#define    RTL8367C_COUNTER30_RESET_MASK    0x4000
+#define    RTL8367C_COUNTER29_RESET_OFFSET    13
+#define    RTL8367C_COUNTER29_RESET_MASK    0x2000
+#define    RTL8367C_COUNTER28_RESET_OFFSET    12
+#define    RTL8367C_COUNTER28_RESET_MASK    0x1000
+#define    RTL8367C_COUNTER27_RESET_OFFSET    11
+#define    RTL8367C_COUNTER27_RESET_MASK    0x800
+#define    RTL8367C_COUNTER26_RESET_OFFSET    10
+#define    RTL8367C_COUNTER26_RESET_MASK    0x400
+#define    RTL8367C_COUNTER25_RESET_OFFSET    9
+#define    RTL8367C_COUNTER25_RESET_MASK    0x200
+#define    RTL8367C_COUNTER24_RESET_OFFSET    8
+#define    RTL8367C_COUNTER24_RESET_MASK    0x100
+#define    RTL8367C_COUNTER23_RESET_OFFSET    7
+#define    RTL8367C_COUNTER23_RESET_MASK    0x80
+#define    RTL8367C_COUNTER22_RESET_OFFSET    6
+#define    RTL8367C_COUNTER22_RESET_MASK    0x40
+#define    RTL8367C_COUNTER21_RESET_OFFSET    5
+#define    RTL8367C_COUNTER21_RESET_MASK    0x20
+#define    RTL8367C_COUNTER20_RESET_OFFSET    4
+#define    RTL8367C_COUNTER20_RESET_MASK    0x10
+#define    RTL8367C_COUNTER19_RESET_OFFSET    3
+#define    RTL8367C_COUNTER19_RESET_MASK    0x8
+#define    RTL8367C_COUNTER18_RESET_OFFSET    2
+#define    RTL8367C_COUNTER18_RESET_MASK    0x4
+#define    RTL8367C_COUNTER17_RESET_OFFSET    1
+#define    RTL8367C_COUNTER17_RESET_MASK    0x2
+#define    RTL8367C_COUNTER16_RESET_OFFSET    0
+#define    RTL8367C_COUNTER16_RESET_MASK    0x1
+
+#define    RTL8367C_REG_MIB_CTRL3    0x1009
+#define    RTL8367C_COUNTER15_MODE_OFFSET    15
+#define    RTL8367C_COUNTER15_MODE_MASK    0x8000
+#define    RTL8367C_COUNTER14_MODE_OFFSET    14
+#define    RTL8367C_COUNTER14_MODE_MASK    0x4000
+#define    RTL8367C_COUNTER13_MODE_OFFSET    13
+#define    RTL8367C_COUNTER13_MODE_MASK    0x2000
+#define    RTL8367C_COUNTER12_MODE_OFFSET    12
+#define    RTL8367C_COUNTER12_MODE_MASK    0x1000
+#define    RTL8367C_COUNTER11_MODE_OFFSET    11
+#define    RTL8367C_COUNTER11_MODE_MASK    0x800
+#define    RTL8367C_COUNTER10_MODE_OFFSET    10
+#define    RTL8367C_COUNTER10_MODE_MASK    0x400
+#define    RTL8367C_COUNTER9_MODE_OFFSET    9
+#define    RTL8367C_COUNTER9_MODE_MASK    0x200
+#define    RTL8367C_COUNTER8_MODE_OFFSET    8
+#define    RTL8367C_COUNTER8_MODE_MASK    0x100
+#define    RTL8367C_COUNTER7_MODE_OFFSET    7
+#define    RTL8367C_COUNTER7_MODE_MASK    0x80
+#define    RTL8367C_COUNTER6_MODE_OFFSET    6
+#define    RTL8367C_COUNTER6_MODE_MASK    0x40
+#define    RTL8367C_COUNTER5_MODE_OFFSET    5
+#define    RTL8367C_COUNTER5_MODE_MASK    0x20
+#define    RTL8367C_COUNTER4_MODE_OFFSET    4
+#define    RTL8367C_COUNTER4_MODE_MASK    0x10
+#define    RTL8367C_COUNTER3_MODE_OFFSET    3
+#define    RTL8367C_COUNTER3_MODE_MASK    0x8
+#define    RTL8367C_COUNTER2_MODE_OFFSET    2
+#define    RTL8367C_COUNTER2_MODE_MASK    0x4
+#define    RTL8367C_COUNTER1_MODE_OFFSET    1
+#define    RTL8367C_COUNTER1_MODE_MASK    0x2
+#define    RTL8367C_COUNTER0_MODE_OFFSET    0
+#define    RTL8367C_COUNTER0_MODE_MASK    0x1
+
+#define    RTL8367C_REG_MIB_CTRL4    0x100a
+#define    RTL8367C_MIB_USAGE_MODE_OFFSET    8
+#define    RTL8367C_MIB_USAGE_MODE_MASK    0x100
+#define    RTL8367C_MIB_TIMER_OFFSET    0
+#define    RTL8367C_MIB_TIMER_MASK    0xFF
+
+#define    RTL8367C_REG_MIB_CTRL5    0x100b
+#define    RTL8367C_MIB_CTRL5_COUNTER15_TYPE_OFFSET    15
+#define    RTL8367C_MIB_CTRL5_COUNTER15_TYPE_MASK    0x8000
+#define    RTL8367C_MIB_CTRL5_COUNTER14_TYPE_OFFSET    14
+#define    RTL8367C_MIB_CTRL5_COUNTER14_TYPE_MASK    0x4000
+#define    RTL8367C_MIB_CTRL5_COUNTER13_TYPE_OFFSET    13
+#define    RTL8367C_MIB_CTRL5_COUNTER13_TYPE_MASK    0x2000
+#define    RTL8367C_MIB_CTRL5_COUNTER12_TYPE_OFFSET    12
+#define    RTL8367C_MIB_CTRL5_COUNTER12_TYPE_MASK    0x1000
+#define    RTL8367C_MIB_CTRL5_COUNTER11_TYPE_OFFSET    11
+#define    RTL8367C_MIB_CTRL5_COUNTER11_TYPE_MASK    0x800
+#define    RTL8367C_MIB_CTRL5_COUNTER10_TYPE_OFFSET    10
+#define    RTL8367C_MIB_CTRL5_COUNTER10_TYPE_MASK    0x400
+#define    RTL8367C_MIB_CTRL5_COUNTER9_TYPE_OFFSET    9
+#define    RTL8367C_MIB_CTRL5_COUNTER9_TYPE_MASK    0x200
+#define    RTL8367C_MIB_CTRL5_COUNTER8_TYPE_OFFSET    8
+#define    RTL8367C_MIB_CTRL5_COUNTER8_TYPE_MASK    0x100
+#define    RTL8367C_MIB_CTRL5_COUNTER7_TYPE_OFFSET    7
+#define    RTL8367C_MIB_CTRL5_COUNTER7_TYPE_MASK    0x80
+#define    RTL8367C_MIB_CTRL5_COUNTER6_TYPE_OFFSET    6
+#define    RTL8367C_MIB_CTRL5_COUNTER6_TYPE_MASK    0x40
+#define    RTL8367C_MIB_CTRL5_COUNTER5_TYPE_OFFSET    5
+#define    RTL8367C_MIB_CTRL5_COUNTER5_TYPE_MASK    0x20
+#define    RTL8367C_MIB_CTRL5_COUNTER4_TYPE_OFFSET    4
+#define    RTL8367C_MIB_CTRL5_COUNTER4_TYPE_MASK    0x10
+#define    RTL8367C_MIB_CTRL5_COUNTER3_TYPE_OFFSET    3
+#define    RTL8367C_MIB_CTRL5_COUNTER3_TYPE_MASK    0x8
+#define    RTL8367C_MIB_CTRL5_COUNTER2_TYPE_OFFSET    2
+#define    RTL8367C_MIB_CTRL5_COUNTER2_TYPE_MASK    0x4
+#define    RTL8367C_MIB_CTRL5_COUNTER1_TYPE_OFFSET    1
+#define    RTL8367C_MIB_CTRL5_COUNTER1_TYPE_MASK    0x2
+#define    RTL8367C_MIB_CTRL5_COUNTER0_TYPE_OFFSET    0
+#define    RTL8367C_MIB_CTRL5_COUNTER0_TYPE_MASK    0x1
+
+/* (16'h1100)intrpt_reg */
+
+#define    RTL8367C_REG_INTR_CTRL    0x1100
+#define    RTL8367C_INTR_CTRL_OFFSET    0
+#define    RTL8367C_INTR_CTRL_MASK    0x1
+
+#define    RTL8367C_REG_INTR_IMR    0x1101
+#define    RTL8367C_INTR_IMR_SLIENT_START_2_OFFSET    12
+#define    RTL8367C_INTR_IMR_SLIENT_START_2_MASK    0x1000
+#define    RTL8367C_INTR_IMR_SLIENT_START_OFFSET    11
+#define    RTL8367C_INTR_IMR_SLIENT_START_MASK    0x800
+#define    RTL8367C_INTR_IMR_ACL_ACTION_OFFSET    9
+#define    RTL8367C_INTR_IMR_ACL_ACTION_MASK    0x200
+#define    RTL8367C_INTR_IMR_CABLE_DIAG_FIN_OFFSET    8
+#define    RTL8367C_INTR_IMR_CABLE_DIAG_FIN_MASK    0x100
+#define    RTL8367C_INTR_IMR_INTERRUPT_8051_OFFSET    7
+#define    RTL8367C_INTR_IMR_INTERRUPT_8051_MASK    0x80
+#define    RTL8367C_INTR_IMR_LOOP_DETECTION_OFFSET    6
+#define    RTL8367C_INTR_IMR_LOOP_DETECTION_MASK    0x40
+#define    RTL8367C_INTR_IMR_GREEN_TIMER_OFFSET    5
+#define    RTL8367C_INTR_IMR_GREEN_TIMER_MASK    0x20
+#define    RTL8367C_INTR_IMR_SPECIAL_CONGEST_OFFSET    4
+#define    RTL8367C_INTR_IMR_SPECIAL_CONGEST_MASK    0x10
+#define    RTL8367C_INTR_IMR_SPEED_CHANGE_OFFSET    3
+#define    RTL8367C_INTR_IMR_SPEED_CHANGE_MASK    0x8
+#define    RTL8367C_INTR_IMR_LEARN_OVER_OFFSET    2
+#define    RTL8367C_INTR_IMR_LEARN_OVER_MASK    0x4
+#define    RTL8367C_INTR_IMR_METER_EXCEEDED_OFFSET    1
+#define    RTL8367C_INTR_IMR_METER_EXCEEDED_MASK    0x2
+#define    RTL8367C_INTR_IMR_LINK_CHANGE_OFFSET    0
+#define    RTL8367C_INTR_IMR_LINK_CHANGE_MASK    0x1
+
+#define    RTL8367C_REG_INTR_IMS    0x1102
+#define    RTL8367C_INTR_IMS_SLIENT_START_2_OFFSET    12
+#define    RTL8367C_INTR_IMS_SLIENT_START_2_MASK    0x1000
+#define    RTL8367C_INTR_IMS_SLIENT_START_OFFSET    11
+#define    RTL8367C_INTR_IMS_SLIENT_START_MASK    0x800
+#define    RTL8367C_INTR_IMS_ACL_ACTION_OFFSET    9
+#define    RTL8367C_INTR_IMS_ACL_ACTION_MASK    0x200
+#define    RTL8367C_INTR_IMS_CABLE_DIAG_FIN_OFFSET    8
+#define    RTL8367C_INTR_IMS_CABLE_DIAG_FIN_MASK    0x100
+#define    RTL8367C_INTR_IMS_INTERRUPT_8051_OFFSET    7
+#define    RTL8367C_INTR_IMS_INTERRUPT_8051_MASK    0x80
+#define    RTL8367C_INTR_IMS_LOOP_DETECTION_OFFSET    6
+#define    RTL8367C_INTR_IMS_LOOP_DETECTION_MASK    0x40
+#define    RTL8367C_INTR_IMS_GREEN_TIMER_OFFSET    5
+#define    RTL8367C_INTR_IMS_GREEN_TIMER_MASK    0x20
+#define    RTL8367C_INTR_IMS_SPECIAL_CONGEST_OFFSET    4
+#define    RTL8367C_INTR_IMS_SPECIAL_CONGEST_MASK    0x10
+#define    RTL8367C_INTR_IMS_SPEED_CHANGE_OFFSET    3
+#define    RTL8367C_INTR_IMS_SPEED_CHANGE_MASK    0x8
+#define    RTL8367C_INTR_IMS_LEARN_OVER_OFFSET    2
+#define    RTL8367C_INTR_IMS_LEARN_OVER_MASK    0x4
+#define    RTL8367C_INTR_IMS_METER_EXCEEDED_OFFSET    1
+#define    RTL8367C_INTR_IMS_METER_EXCEEDED_MASK    0x2
+#define    RTL8367C_INTR_IMS_LINK_CHANGE_OFFSET    0
+#define    RTL8367C_INTR_IMS_LINK_CHANGE_MASK    0x1
+
+#define    RTL8367C_REG_LEARN_OVER_INDICATOR    0x1103
+#define    RTL8367C_LEARN_OVER_INDICATOR_OFFSET    0
+#define    RTL8367C_LEARN_OVER_INDICATOR_MASK    0x7FF
+
+#define    RTL8367C_REG_SPEED_CHANGE_INDICATOR    0x1104
+#define    RTL8367C_SPEED_CHANGE_INDICATOR_OFFSET    0
+#define    RTL8367C_SPEED_CHANGE_INDICATOR_MASK    0x7FF
+
+#define    RTL8367C_REG_SPECIAL_CONGEST_INDICATOR    0x1105
+#define    RTL8367C_SPECIAL_CONGEST_INDICATOR_OFFSET    0
+#define    RTL8367C_SPECIAL_CONGEST_INDICATOR_MASK    0x7FF
+
+#define    RTL8367C_REG_PORT_LINKDOWN_INDICATOR    0x1106
+#define    RTL8367C_PORT_LINKDOWN_INDICATOR_OFFSET    0
+#define    RTL8367C_PORT_LINKDOWN_INDICATOR_MASK    0x7FF
+
+#define    RTL8367C_REG_PORT_LINKUP_INDICATOR    0x1107
+#define    RTL8367C_PORT_LINKUP_INDICATOR_OFFSET    0
+#define    RTL8367C_PORT_LINKUP_INDICATOR_MASK    0x7FF
+
+#define    RTL8367C_REG_SYSTEM_LEARN_OVER_INDICATOR    0x1108
+#define    RTL8367C_SYSTEM_LEARN_OVER_INDICATOR_OFFSET    0
+#define    RTL8367C_SYSTEM_LEARN_OVER_INDICATOR_MASK    0x1
+
+#define    RTL8367C_REG_INTR_IMR_8051    0x1118
+#define    RTL8367C_INTR_IMR_8051_SLIENT_START_2_OFFSET    13
+#define    RTL8367C_INTR_IMR_8051_SLIENT_START_2_MASK    0x2000
+#define    RTL8367C_INTR_IMR_8051_SLIENT_START_OFFSET    12
+#define    RTL8367C_INTR_IMR_8051_SLIENT_START_MASK    0x1000
+#define    RTL8367C_INTR_IMR_8051_ACL_ACTION_OFFSET    10
+#define    RTL8367C_INTR_IMR_8051_ACL_ACTION_MASK    0x400
+#define    RTL8367C_INTR_IMR_8051_SAMOVING_8051_OFFSET    9
+#define    RTL8367C_INTR_IMR_8051_SAMOVING_8051_MASK    0x200
+#define    RTL8367C_INTR_IMR_8051_CABLE_DIAG_FIN_8051_OFFSET    8
+#define    RTL8367C_INTR_IMR_8051_CABLE_DIAG_FIN_8051_MASK    0x100
+#define    RTL8367C_INTR_IMR_8051_EEELLDP_8051_OFFSET    7
+#define    RTL8367C_INTR_IMR_8051_EEELLDP_8051_MASK    0x80
+#define    RTL8367C_INTR_IMR_8051_LOOP_DETECTION_8051_OFFSET    6
+#define    RTL8367C_INTR_IMR_8051_LOOP_DETECTION_8051_MASK    0x40
+#define    RTL8367C_INTR_IMR_8051_GREEN_TIMER_8051_OFFSET    5
+#define    RTL8367C_INTR_IMR_8051_GREEN_TIMER_8051_MASK    0x20
+#define    RTL8367C_INTR_IMR_8051_SPECIAL_CONGEST_8051_OFFSET    4
+#define    RTL8367C_INTR_IMR_8051_SPECIAL_CONGEST_8051_MASK    0x10
+#define    RTL8367C_INTR_IMR_8051_SPEED_CHANGE_8051_OFFSET    3
+#define    RTL8367C_INTR_IMR_8051_SPEED_CHANGE_8051_MASK    0x8
+#define    RTL8367C_INTR_IMR_8051_LEARN_OVER_8051_OFFSET    2
+#define    RTL8367C_INTR_IMR_8051_LEARN_OVER_8051_MASK    0x4
+#define    RTL8367C_INTR_IMR_8051_METER_EXCEEDED_8051_OFFSET    1
+#define    RTL8367C_INTR_IMR_8051_METER_EXCEEDED_8051_MASK    0x2
+#define    RTL8367C_INTR_IMR_8051_LINK_CHANGE_8051_OFFSET    0
+#define    RTL8367C_INTR_IMR_8051_LINK_CHANGE_8051_MASK    0x1
+
+#define    RTL8367C_REG_INTR_IMS_8051    0x1119
+#define    RTL8367C_INTR_IMS_8051_SLIENT_START_2_OFFSET    13
+#define    RTL8367C_INTR_IMS_8051_SLIENT_START_2_MASK    0x2000
+#define    RTL8367C_INTR_IMS_8051_SLIENT_START_OFFSET    12
+#define    RTL8367C_INTR_IMS_8051_SLIENT_START_MASK    0x1000
+#define    RTL8367C_INTR_IMS_8051_ACL_ACTION_OFFSET    10
+#define    RTL8367C_INTR_IMS_8051_ACL_ACTION_MASK    0x400
+#define    RTL8367C_INTR_IMS_8051_SAMOVING_8051_OFFSET    9
+#define    RTL8367C_INTR_IMS_8051_SAMOVING_8051_MASK    0x200
+#define    RTL8367C_INTR_IMS_8051_CABLE_DIAG_FIN_8051_OFFSET    8
+#define    RTL8367C_INTR_IMS_8051_CABLE_DIAG_FIN_8051_MASK    0x100
+#define    RTL8367C_INTR_IMS_8051_EEELLDP_8051_OFFSET    7
+#define    RTL8367C_INTR_IMS_8051_EEELLDP_8051_MASK    0x80
+#define    RTL8367C_INTR_IMS_8051_LOOP_DETECTION_8051_OFFSET    6
+#define    RTL8367C_INTR_IMS_8051_LOOP_DETECTION_8051_MASK    0x40
+#define    RTL8367C_INTR_IMS_8051_GREEN_TIMER_8051_OFFSET    5
+#define    RTL8367C_INTR_IMS_8051_GREEN_TIMER_8051_MASK    0x20
+#define    RTL8367C_INTR_IMS_8051_SPECIAL_CONGEST_8051_OFFSET    4
+#define    RTL8367C_INTR_IMS_8051_SPECIAL_CONGEST_8051_MASK    0x10
+#define    RTL8367C_INTR_IMS_8051_SPEED_CHANGE_8051_OFFSET    3
+#define    RTL8367C_INTR_IMS_8051_SPEED_CHANGE_8051_MASK    0x8
+#define    RTL8367C_INTR_IMS_8051_LEARN_OVER_8051_OFFSET    2
+#define    RTL8367C_INTR_IMS_8051_LEARN_OVER_8051_MASK    0x4
+#define    RTL8367C_INTR_IMS_8051_METER_EXCEEDED_8051_OFFSET    1
+#define    RTL8367C_INTR_IMS_8051_METER_EXCEEDED_8051_MASK    0x2
+#define    RTL8367C_INTR_IMS_8051_LINK_CHANGE_8051_OFFSET    0
+#define    RTL8367C_INTR_IMS_8051_LINK_CHANGE_8051_MASK    0x1
+
+#define    RTL8367C_REG_DW8051_INT_CPU    0x111a
+#define    RTL8367C_DW8051_INT_CPU_OFFSET    0
+#define    RTL8367C_DW8051_INT_CPU_MASK    0x1
+
+#define    RTL8367C_REG_LEARN_OVER_INDICATOR_8051    0x1120
+#define    RTL8367C_LEARN_OVER_INDICATOR_8051_OFFSET    0
+#define    RTL8367C_LEARN_OVER_INDICATOR_8051_MASK    0x7FF
+
+#define    RTL8367C_REG_SPEED_CHANGE_INDICATOR_8051    0x1121
+#define    RTL8367C_SPEED_CHANGE_INDICATOR_8051_OFFSET    0
+#define    RTL8367C_SPEED_CHANGE_INDICATOR_8051_MASK    0x7FF
+
+#define    RTL8367C_REG_SPECIAL_CONGEST_INDICATOR_8051    0x1122
+#define    RTL8367C_SPECIAL_CONGEST_INDICATOR_8051_OFFSET    0
+#define    RTL8367C_SPECIAL_CONGEST_INDICATOR_8051_MASK    0x7FF
+
+#define    RTL8367C_REG_PORT_LINKDOWN_INDICATOR_8051    0x1123
+#define    RTL8367C_PORT_LINKDOWN_INDICATOR_8051_OFFSET    0
+#define    RTL8367C_PORT_LINKDOWN_INDICATOR_8051_MASK    0x7FF
+
+#define    RTL8367C_REG_PORT_LINKUP_INDICATOR_8051    0x1124
+#define    RTL8367C_PORT_LINKUP_INDICATOR_8051_OFFSET    0
+#define    RTL8367C_PORT_LINKUP_INDICATOR_8051_MASK    0x7FF
+
+#define    RTL8367C_REG_DUMMY_1125    0x1125
+
+#define    RTL8367C_REG_DUMMY_1126    0x1126
+
+#define    RTL8367C_REG_DUMMY_1127    0x1127
+
+#define    RTL8367C_REG_DUMMY_1128    0x1128
+
+#define    RTL8367C_REG_DUMMY_1129    0x1129
+
+#define    RTL8367C_REG_INTR_IMS_BUFFER_RESET    0x112a
+#define    RTL8367C_INTR_IMS_BUFFER_RESET_IMR_BUFF_RESET_OFFSET    1
+#define    RTL8367C_INTR_IMS_BUFFER_RESET_IMR_BUFF_RESET_MASK    0x2
+#define    RTL8367C_INTR_IMS_BUFFER_RESET_BUFFER_RESET_OFFSET    0
+#define    RTL8367C_INTR_IMS_BUFFER_RESET_BUFFER_RESET_MASK    0x1
+
+#define    RTL8367C_REG_INTR_IMS_8051_BUFFER_RESET    0x112b
+#define    RTL8367C_INTR_IMS_8051_BUFFER_RESET_IMR_BUFF_RESET_OFFSET    1
+#define    RTL8367C_INTR_IMS_8051_BUFFER_RESET_IMR_BUFF_RESET_MASK    0x2
+#define    RTL8367C_INTR_IMS_8051_BUFFER_RESET_BUFFER_RESET_OFFSET    0
+#define    RTL8367C_INTR_IMS_8051_BUFFER_RESET_BUFFER_RESET_MASK    0x1
+
+#define    RTL8367C_REG_GPHY_INTRPT_8051    0x112c
+#define    RTL8367C_IMS_GPHY_8051_H_OFFSET    13
+#define    RTL8367C_IMS_GPHY_8051_H_MASK    0xE000
+#define    RTL8367C_IMR_GPHY_8051_H_OFFSET    10
+#define    RTL8367C_IMR_GPHY_8051_H_MASK    0x1C00
+#define    RTL8367C_IMS_GPHY_8051_OFFSET    5
+#define    RTL8367C_IMS_GPHY_8051_MASK    0x3E0
+#define    RTL8367C_IMR_GPHY_8051_OFFSET    0
+#define    RTL8367C_IMR_GPHY_8051_MASK    0x1F
+
+#define    RTL8367C_REG_GPHY_INTRPT    0x112d
+#define    RTL8367C_IMS_GPHY_H_OFFSET    13
+#define    RTL8367C_IMS_GPHY_H_MASK    0xE000
+#define    RTL8367C_IMR_GPHY_H_OFFSET    10
+#define    RTL8367C_IMR_GPHY_H_MASK    0x1C00
+#define    RTL8367C_IMS_GPHY_OFFSET    5
+#define    RTL8367C_IMS_GPHY_MASK    0x3E0
+#define    RTL8367C_IMR_GPHY_OFFSET    0
+#define    RTL8367C_IMR_GPHY_MASK    0x1F
+
+#define    RTL8367C_REG_THERMAL_INTRPT    0x112e
+#define    RTL8367C_IMS_TM_HIGH_OFFSET    3
+#define    RTL8367C_IMS_TM_HIGH_MASK    0x8
+#define    RTL8367C_IMR_TM_HIGH_OFFSET    2
+#define    RTL8367C_IMR_TM_HIGH_MASK    0x4
+#define    RTL8367C_IMS_TM_LOW_OFFSET    1
+#define    RTL8367C_IMS_TM_LOW_MASK    0x2
+#define    RTL8367C_IMR_TM_LOW_OFFSET    0
+#define    RTL8367C_IMR_TM_LOW_MASK    0x1
+
+#define    RTL8367C_REG_THERMAL_INTRPT_8051    0x112f
+#define    RTL8367C_IMS_TM_HIGH_8051_OFFSET    3
+#define    RTL8367C_IMS_TM_HIGH_8051_MASK    0x8
+#define    RTL8367C_IMR_TM_HIGH_8051_OFFSET    2
+#define    RTL8367C_IMR_TM_HIGH_8051_MASK    0x4
+#define    RTL8367C_IMS_TM_LOW_8051_OFFSET    1
+#define    RTL8367C_IMS_TM_LOW_8051_MASK    0x2
+#define    RTL8367C_IMR_TM_LOW_8051_OFFSET    0
+#define    RTL8367C_IMR_TM_LOW_8051_MASK    0x1
+
+#define    RTL8367C_REG_SDS_LINK_CHG_INT    0x1130
+#define    RTL8367C_IMS_SDS_LINK_STS_C7_OFFSET    15
+#define    RTL8367C_IMS_SDS_LINK_STS_C7_MASK    0x8000
+#define    RTL8367C_IMS_SDS_LINK_STS_C6_OFFSET    14
+#define    RTL8367C_IMS_SDS_LINK_STS_C6_MASK    0x4000
+#define    RTL8367C_IMS_SDS_LINK_STS_C5_OFFSET    13
+#define    RTL8367C_IMS_SDS_LINK_STS_C5_MASK    0x2000
+#define    RTL8367C_IMS_SDS_LINK_STS_C4_OFFSET    12
+#define    RTL8367C_IMS_SDS_LINK_STS_C4_MASK    0x1000
+#define    RTL8367C_IMS_SDS_LINK_STS_C3_OFFSET    11
+#define    RTL8367C_IMS_SDS_LINK_STS_C3_MASK    0x800
+#define    RTL8367C_IMS_SDS_LINK_STS_C2_OFFSET    10
+#define    RTL8367C_IMS_SDS_LINK_STS_C2_MASK    0x400
+#define    RTL8367C_IMS_SDS_LINK_STS_C1_OFFSET    9
+#define    RTL8367C_IMS_SDS_LINK_STS_C1_MASK    0x200
+#define    RTL8367C_IMS_SDS_LINK_STS_C0_OFFSET    8
+#define    RTL8367C_IMS_SDS_LINK_STS_C0_MASK    0x100
+#define    RTL8367C_IMR_SDS_LINK_STS_C7_OFFSET    7
+#define    RTL8367C_IMR_SDS_LINK_STS_C7_MASK    0x80
+#define    RTL8367C_IMR_SDS_LINK_STS_C6_OFFSET    6
+#define    RTL8367C_IMR_SDS_LINK_STS_C6_MASK    0x40
+#define    RTL8367C_IMR_SDS_LINK_STS_C5_OFFSET    5
+#define    RTL8367C_IMR_SDS_LINK_STS_C5_MASK    0x20
+#define    RTL8367C_IMR_SDS_LINK_STS_C4_OFFSET    4
+#define    RTL8367C_IMR_SDS_LINK_STS_C4_MASK    0x10
+#define    RTL8367C_IMR_SDS_LINK_STS_C3_OFFSET    3
+#define    RTL8367C_IMR_SDS_LINK_STS_C3_MASK    0x8
+#define    RTL8367C_IMR_SDS_LINK_STS_C2_OFFSET    2
+#define    RTL8367C_IMR_SDS_LINK_STS_C2_MASK    0x4
+#define    RTL8367C_IMR_SDS_LINK_STS_C1_OFFSET    1
+#define    RTL8367C_IMR_SDS_LINK_STS_C1_MASK    0x2
+#define    RTL8367C_IMR_SDS_LINK_STS_C0_OFFSET    0
+#define    RTL8367C_IMR_SDS_LINK_STS_C0_MASK    0x1
+
+#define    RTL8367C_REG_SDS_LINK_CHG_INT_8051    0x1131
+#define    RTL8367C_IMS_SDS_LINK_STS_C7_8051_OFFSET    15
+#define    RTL8367C_IMS_SDS_LINK_STS_C7_8051_MASK    0x8000
+#define    RTL8367C_IMS_SDS_LINK_STS_C6_8051_OFFSET    14
+#define    RTL8367C_IMS_SDS_LINK_STS_C6_8051_MASK    0x4000
+#define    RTL8367C_IMS_SDS_LINK_STS_C5_8051_OFFSET    13
+#define    RTL8367C_IMS_SDS_LINK_STS_C5_8051_MASK    0x2000
+#define    RTL8367C_IMS_SDS_LINK_STS_C4_8051_OFFSET    12
+#define    RTL8367C_IMS_SDS_LINK_STS_C4_8051_MASK    0x1000
+#define    RTL8367C_IMS_SDS_LINK_STS_C3_8051_OFFSET    11
+#define    RTL8367C_IMS_SDS_LINK_STS_C3_8051_MASK    0x800
+#define    RTL8367C_IMS_SDS_LINK_STS_C2_8051_OFFSET    10
+#define    RTL8367C_IMS_SDS_LINK_STS_C2_8051_MASK    0x400
+#define    RTL8367C_IMS_SDS_LINK_STS_C1_8051_OFFSET    9
+#define    RTL8367C_IMS_SDS_LINK_STS_C1_8051_MASK    0x200
+#define    RTL8367C_IMS_SDS_LINK_STS_C0_8051_OFFSET    8
+#define    RTL8367C_IMS_SDS_LINK_STS_C0_8051_MASK    0x100
+#define    RTL8367C_IMR_SDS_LINK_STS_C7_8051_OFFSET    7
+#define    RTL8367C_IMR_SDS_LINK_STS_C7_8051_MASK    0x80
+#define    RTL8367C_IMR_SDS_LINK_STS_C6_8051_OFFSET    6
+#define    RTL8367C_IMR_SDS_LINK_STS_C6_8051_MASK    0x40
+#define    RTL8367C_IMR_SDS_LINK_STS_C5_8051_OFFSET    5
+#define    RTL8367C_IMR_SDS_LINK_STS_C5_8051_MASK    0x20
+#define    RTL8367C_IMR_SDS_LINK_STS_C4_8051_OFFSET    4
+#define    RTL8367C_IMR_SDS_LINK_STS_C4_8051_MASK    0x10
+#define    RTL8367C_IMR_SDS_LINK_STS_C3_8051_OFFSET    3
+#define    RTL8367C_IMR_SDS_LINK_STS_C3_8051_MASK    0x8
+#define    RTL8367C_IMR_SDS_LINK_STS_C2_8051_OFFSET    2
+#define    RTL8367C_IMR_SDS_LINK_STS_C2_8051_MASK    0x4
+#define    RTL8367C_IMR_SDS_LINK_STS_C1_8051_OFFSET    1
+#define    RTL8367C_IMR_SDS_LINK_STS_C1_8051_MASK    0x2
+#define    RTL8367C_IMR_SDS_LINK_STS_C0_8051_OFFSET    0
+#define    RTL8367C_IMR_SDS_LINK_STS_C0_8051_MASK    0x1
+
+/* (16'h1200)swcore_reg */
+
+#define    RTL8367C_REG_MAX_LENGTH_LIMINT_IPG    0x1200
+#define    RTL8367C_MAX_LENTH_CTRL_OFFSET    13
+#define    RTL8367C_MAX_LENTH_CTRL_MASK    0x6000
+#define    RTL8367C_PAGES_BEFORE_FCDROP_OFFSET    6
+#define    RTL8367C_PAGES_BEFORE_FCDROP_MASK    0x1FC0
+#define    RTL8367C_CHECK_MIN_IPG_RXDV_OFFSET    5
+#define    RTL8367C_CHECK_MIN_IPG_RXDV_MASK    0x20
+#define    RTL8367C_LIMIT_IPG_CFG_OFFSET    0
+#define    RTL8367C_LIMIT_IPG_CFG_MASK    0x1F
+
+#define    RTL8367C_REG_IOL_RXDROP_CFG    0x1201
+#define    RTL8367C_RX_IOL_MAX_LENGTH_CFG_OFFSET    13
+#define    RTL8367C_RX_IOL_MAX_LENGTH_CFG_MASK    0x2000
+#define    RTL8367C_RX_IOL_ERROR_LENGTH_CFG_OFFSET    12
+#define    RTL8367C_RX_IOL_ERROR_LENGTH_CFG_MASK    0x1000
+#define    RTL8367C_RX_NODROP_PAUSE_CFG_OFFSET    8
+#define    RTL8367C_RX_NODROP_PAUSE_CFG_MASK    0x100
+#define    RTL8367C_RX_DV_CNT_CFG_OFFSET    0
+#define    RTL8367C_RX_DV_CNT_CFG_MASK    0x3F
+
+#define    RTL8367C_REG_VS_TPID    0x1202
+
+#define    RTL8367C_REG_INBW_BOUND    0x1203
+#define    RTL8367C_LBOUND_OFFSET    4
+#define    RTL8367C_LBOUND_MASK    0xF0
+#define    RTL8367C_HBOUND_OFFSET    0
+#define    RTL8367C_HBOUND_MASK    0xF
+
+#define    RTL8367C_REG_CFG_TX_ITFSP_OP    0x1204
+#define    RTL8367C_MASK_OFFSET    1
+#define    RTL8367C_MASK_MASK    0x2
+#define    RTL8367C_OP_OFFSET    0
+#define    RTL8367C_OP_MASK    0x1
+
+#define    RTL8367C_REG_INBW_BOUND2    0x1205
+#define    RTL8367C_LBOUND2_H_OFFSET    9
+#define    RTL8367C_LBOUND2_H_MASK    0x200
+#define    RTL8367C_HBOUND2_H_OFFSET    8
+#define    RTL8367C_HBOUND2_H_MASK    0x100
+#define    RTL8367C_LBOUND2_OFFSET    4
+#define    RTL8367C_LBOUND2_MASK    0xF0
+#define    RTL8367C_HBOUND2_OFFSET    0
+#define    RTL8367C_HBOUND2_MASK    0xF
+
+#define    RTL8367C_REG_CFG_48PASS1_DROP    0x1206
+#define    RTL8367C_CFG_48PASS1_DROP_OFFSET    0
+#define    RTL8367C_CFG_48PASS1_DROP_MASK    0x1
+
+#define    RTL8367C_REG_CFG_BACKPRESSURE    0x1207
+#define    RTL8367C_LONGTXE_OFFSET    12
+#define    RTL8367C_LONGTXE_MASK    0x1000
+#define    RTL8367C_EN_BYPASS_ERROR_OFFSET    8
+#define    RTL8367C_EN_BYPASS_ERROR_MASK    0x100
+#define    RTL8367C_EN_BACKPRESSURE_OFFSET    4
+#define    RTL8367C_EN_BACKPRESSURE_MASK    0x10
+#define    RTL8367C_EN_48_PASS_1_OFFSET    0
+#define    RTL8367C_EN_48_PASS_1_MASK    0x1
+
+#define    RTL8367C_REG_CFG_UNHIOL    0x1208
+#define    RTL8367C_IOL_BACKOFF_OFFSET    12
+#define    RTL8367C_IOL_BACKOFF_MASK    0x1000
+#define    RTL8367C_BACKOFF_RANDOM_TIME_OFFSET    8
+#define    RTL8367C_BACKOFF_RANDOM_TIME_MASK    0x100
+#define    RTL8367C_DISABLE_BACK_OFF_OFFSET    4
+#define    RTL8367C_DISABLE_BACK_OFF_MASK    0x10
+#define    RTL8367C_IPG_COMPENSATION_OFFSET    0
+#define    RTL8367C_IPG_COMPENSATION_MASK    0x1
+
+#define    RTL8367C_REG_SWITCH_MAC0    0x1209
+
+#define    RTL8367C_REG_SWITCH_MAC1    0x120a
+
+#define    RTL8367C_REG_SWITCH_MAC2    0x120b
+
+#define    RTL8367C_REG_SWITCH_CTRL0    0x120c
+#define    RTL8367C_REMARKING_DSCP_ENABLE_OFFSET    8
+#define    RTL8367C_REMARKING_DSCP_ENABLE_MASK    0x100
+#define    RTL8367C_SHORT_IPG_OFFSET    4
+#define    RTL8367C_SHORT_IPG_MASK    0x10
+#define    RTL8367C_PAUSE_MAX128_OFFSET    0
+#define    RTL8367C_PAUSE_MAX128_MASK    0x1
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_CTRL0    0x120d
+#define    RTL8367C_INTPRI1_DSCP_OFFSET    8
+#define    RTL8367C_INTPRI1_DSCP_MASK    0x3F00
+#define    RTL8367C_INTPRI0_DSCP_OFFSET    0
+#define    RTL8367C_INTPRI0_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_CTRL1    0x120e
+#define    RTL8367C_INTPRI3_DSCP_OFFSET    8
+#define    RTL8367C_INTPRI3_DSCP_MASK    0x3F00
+#define    RTL8367C_INTPRI2_DSCP_OFFSET    0
+#define    RTL8367C_INTPRI2_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_CTRL2    0x120f
+#define    RTL8367C_INTPRI5_DSCP_OFFSET    8
+#define    RTL8367C_INTPRI5_DSCP_MASK    0x3F00
+#define    RTL8367C_INTPRI4_DSCP_OFFSET    0
+#define    RTL8367C_INTPRI4_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_DSCP_REMARK_CTRL3    0x1210
+#define    RTL8367C_INTPRI7_DSCP_OFFSET    8
+#define    RTL8367C_INTPRI7_DSCP_MASK    0x3F00
+#define    RTL8367C_INTPRI6_DSCP_OFFSET    0
+#define    RTL8367C_INTPRI6_DSCP_MASK    0x3F
+
+#define    RTL8367C_REG_QOS_1Q_REMARK_CTRL0    0x1211
+#define    RTL8367C_INTPRI3_PRI_OFFSET    12
+#define    RTL8367C_INTPRI3_PRI_MASK    0x7000
+#define    RTL8367C_INTPRI2_PRI_OFFSET    8
+#define    RTL8367C_INTPRI2_PRI_MASK    0x700
+#define    RTL8367C_INTPRI1_PRI_OFFSET    4
+#define    RTL8367C_INTPRI1_PRI_MASK    0x70
+#define    RTL8367C_INTPRI0_PRI_OFFSET    0
+#define    RTL8367C_INTPRI0_PRI_MASK    0x7
+
+#define    RTL8367C_REG_QOS_1Q_REMARK_CTRL1    0x1212
+#define    RTL8367C_INTPRI7_PRI_OFFSET    12
+#define    RTL8367C_INTPRI7_PRI_MASK    0x7000
+#define    RTL8367C_INTPRI6_PRI_OFFSET    8
+#define    RTL8367C_INTPRI6_PRI_MASK    0x700
+#define    RTL8367C_INTPRI5_PRI_OFFSET    4
+#define    RTL8367C_INTPRI5_PRI_MASK    0x70
+#define    RTL8367C_INTPRI4_PRI_OFFSET    0
+#define    RTL8367C_INTPRI4_PRI_MASK    0x7
+
+#define    RTL8367C_REG_PKTGEN_COMMAND    0x1213
+#define    RTL8367C_PKTGEN_STOP_OFFSET    8
+#define    RTL8367C_PKTGEN_STOP_MASK    0x100
+#define    RTL8367C_PKTGEN_START_OFFSET    4
+#define    RTL8367C_PKTGEN_START_MASK    0x10
+#define    RTL8367C_PKTGEN_BYPASS_FLOWCONTROL_OFFSET    0
+#define    RTL8367C_PKTGEN_BYPASS_FLOWCONTROL_MASK    0x1
+
+#define    RTL8367C_REG_SW_DUMMY0    0x1214
+#define    RTL8367C_SW_DUMMY0_DUMMY_OFFSET    4
+#define    RTL8367C_SW_DUMMY0_DUMMY_MASK    0xFFF0
+#define    RTL8367C_EEE_DEFER_TXLPI_OFFSET    3
+#define    RTL8367C_EEE_DEFER_TXLPI_MASK    0x8
+#define    RTL8367C_INGRESSBW_BYPASS_EN_OFFSET    2
+#define    RTL8367C_INGRESSBW_BYPASS_EN_MASK    0x4
+#define    RTL8367C_CFG_RX_MIN_OFFSET    0
+#define    RTL8367C_CFG_RX_MIN_MASK    0x3
+
+#define    RTL8367C_REG_SW_DUMMY1    0x1215
+
+#define    RTL8367C_REG_PKTGEN_PAUSE_TIME    0x1216
+
+#define    RTL8367C_REG_SVLAN_UPLINK_PORTMASK    0x1218
+#define    RTL8367C_SVLAN_UPLINK_PORTMASK_OFFSET    0
+#define    RTL8367C_SVLAN_UPLINK_PORTMASK_MASK    0x7FF
+
+#define    RTL8367C_REG_CPU_PORT_MASK    0x1219
+#define    RTL8367C_CPU_PORT_MASK_OFFSET    0
+#define    RTL8367C_CPU_PORT_MASK_MASK    0x7FF
+
+#define    RTL8367C_REG_CPU_CTRL    0x121a
+#define    RTL8367C_CPU_TRAP_PORT_EXT_OFFSET    10
+#define    RTL8367C_CPU_TRAP_PORT_EXT_MASK    0x400
+#define    RTL8367C_CPU_TAG_FORMAT_OFFSET    9
+#define    RTL8367C_CPU_TAG_FORMAT_MASK    0x200
+#define    RTL8367C_IOL_16DROP_OFFSET    8
+#define    RTL8367C_IOL_16DROP_MASK    0x100
+#define    RTL8367C_CPU_TAG_RXBYTECOUNT_OFFSET    7
+#define    RTL8367C_CPU_TAG_RXBYTECOUNT_MASK    0x80
+#define    RTL8367C_CPU_TAG_POSITION_OFFSET    6
+#define    RTL8367C_CPU_TAG_POSITION_MASK    0x40
+#define    RTL8367C_CPU_TRAP_PORT_OFFSET    3
+#define    RTL8367C_CPU_TRAP_PORT_MASK    0x38
+#define    RTL8367C_CPU_INSERTMODE_OFFSET    1
+#define    RTL8367C_CPU_INSERTMODE_MASK    0x6
+#define    RTL8367C_CPU_EN_OFFSET    0
+#define    RTL8367C_CPU_EN_MASK    0x1
+
+#define    RTL8367C_REG_MIRROR_CTRL    0x121c
+#define    RTL8367C_MIRROR_CTRL_DUMMY_OFFSET    12
+#define    RTL8367C_MIRROR_CTRL_DUMMY_MASK    0xF000
+#define    RTL8367C_MIRROR_ISO_OFFSET    11
+#define    RTL8367C_MIRROR_ISO_MASK    0x800
+#define    RTL8367C_MIRROR_TX_OFFSET    10
+#define    RTL8367C_MIRROR_TX_MASK    0x400
+#define    RTL8367C_MIRROR_RX_OFFSET    9
+#define    RTL8367C_MIRROR_RX_MASK    0x200
+#define    RTL8367C_MIRROR_MONITOR_PORT_OFFSET    4
+#define    RTL8367C_MIRROR_MONITOR_PORT_MASK    0xF0
+#define    RTL8367C_MIRROR_SOURCE_PORT_OFFSET    0
+#define    RTL8367C_MIRROR_SOURCE_PORT_MASK    0xF
+
+#define    RTL8367C_REG_FLOWCTRL_CTRL0    0x121d
+#define    RTL8367C_FLOWCTRL_TYPE_OFFSET    15
+#define    RTL8367C_FLOWCTRL_TYPE_MASK    0x8000
+#define    RTL8367C_DROP_ALL_THRESHOLD_OFFSET    5
+#define    RTL8367C_DROP_ALL_THRESHOLD_MASK    0x7FE0
+#define    RTL8367C_DROP_ALL_THRESHOLD_MSB_OFFSET    4
+#define    RTL8367C_DROP_ALL_THRESHOLD_MSB_MASK    0x10
+#define    RTL8367C_ITFSP_REG_OFFSET    0
+#define    RTL8367C_ITFSP_REG_MASK    0x7
+
+#define    RTL8367C_REG_FLOWCTRL_ALL_ON    0x121e
+#define    RTL8367C_CFG_RLDPACT_OFFSET    12
+#define    RTL8367C_CFG_RLDPACT_MASK    0x1000
+#define    RTL8367C_FLOWCTRL_ALL_ON_THRESHOLD_OFFSET    0
+#define    RTL8367C_FLOWCTRL_ALL_ON_THRESHOLD_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_SYS_ON    0x121f
+#define    RTL8367C_FLOWCTRL_SYS_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_SYS_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_SYS_OFF    0x1220
+#define    RTL8367C_FLOWCTRL_SYS_OFF_OFFSET    0
+#define    RTL8367C_FLOWCTRL_SYS_OFF_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_SHARE_ON    0x1221
+#define    RTL8367C_FLOWCTRL_SHARE_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_SHARE_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_SHARE_OFF    0x1222
+#define    RTL8367C_FLOWCTRL_SHARE_OFF_OFFSET    0
+#define    RTL8367C_FLOWCTRL_SHARE_OFF_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_FCOFF_SYS_ON    0x1223
+#define    RTL8367C_FLOWCTRL_FCOFF_SYS_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_FCOFF_SYS_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_FCOFF_SYS_OFF    0x1224
+#define    RTL8367C_FLOWCTRL_FCOFF_SYS_OFF_OFFSET    0
+#define    RTL8367C_FLOWCTRL_FCOFF_SYS_OFF_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_FCOFF_SHARE_ON    0x1225
+#define    RTL8367C_FLOWCTRL_FCOFF_SHARE_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_FCOFF_SHARE_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_FCOFF_SHARE_OFF    0x1226
+#define    RTL8367C_FLOWCTRL_FCOFF_SHARE_OFF_OFFSET    0
+#define    RTL8367C_FLOWCTRL_FCOFF_SHARE_OFF_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT_ON    0x1227
+#define    RTL8367C_FLOWCTRL_PORT_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT_OFF    0x1228
+#define    RTL8367C_FLOWCTRL_PORT_OFF_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT_OFF_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT_PRIVATE_ON    0x1229
+#define    RTL8367C_FLOWCTRL_PORT_PRIVATE_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT_PRIVATE_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT_PRIVATE_OFF    0x122a
+#define    RTL8367C_FLOWCTRL_PORT_PRIVATE_OFF_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT_PRIVATE_OFF_MASK    0x7FF
+
+#define    RTL8367C_REG_RRCP_CTRL0    0x122b
+#define    RTL8367C_COL_SEL_OFFSET    14
+#define    RTL8367C_COL_SEL_MASK    0x4000
+#define    RTL8367C_CRS_SEL_OFFSET    13
+#define    RTL8367C_CRS_SEL_MASK    0x2000
+#define    RTL8367C_RRCP_PBVLAN_EN_OFFSET    11
+#define    RTL8367C_RRCP_PBVLAN_EN_MASK    0x800
+#define    RTL8367C_RRCPV3_SECURITY_CRC_OFFSET    10
+#define    RTL8367C_RRCPV3_SECURITY_CRC_MASK    0x400
+#define    RTL8367C_RRCPV3_HANDLE_OFFSET    8
+#define    RTL8367C_RRCPV3_HANDLE_MASK    0x300
+#define    RTL8367C_RRCPV1_MALFORMED_ACT_OFFSET    5
+#define    RTL8367C_RRCPV1_MALFORMED_ACT_MASK    0x60
+#define    RTL8367C_RRCP_VLANLEAKY_OFFSET    4
+#define    RTL8367C_RRCP_VLANLEAKY_MASK    0x10
+#define    RTL8367C_RRCPV1_SECURITY_CRC_GET_OFFSET    3
+#define    RTL8367C_RRCPV1_SECURITY_CRC_GET_MASK    0x8
+#define    RTL8367C_RRCPV1_SECURITY_CRC_SET_OFFSET    2
+#define    RTL8367C_RRCPV1_SECURITY_CRC_SET_MASK    0x4
+#define    RTL8367C_RRCPV1_HANDLE_OFFSET    1
+#define    RTL8367C_RRCPV1_HANDLE_MASK    0x2
+#define    RTL8367C_RRCP_ENABLE_OFFSET    0
+#define    RTL8367C_RRCP_ENABLE_MASK    0x1
+
+#define    RTL8367C_REG_RRCP_CTRL1    0x122c
+#define    RTL8367C_RRCP_ADMIN_PMSK_OFFSET    8
+#define    RTL8367C_RRCP_ADMIN_PMSK_MASK    0xFF00
+#define    RTL8367C_RRCP_AUTH_PMSK_OFFSET    0
+#define    RTL8367C_RRCP_AUTH_PMSK_MASK    0xFF
+
+#define    RTL8367C_REG_RRCP_CTRL2    0x122d
+#define    RTL8367C_RRCPV1_HELLOFWD_TAG_OFFSET    9
+#define    RTL8367C_RRCPV1_HELLOFWD_TAG_MASK    0x600
+#define    RTL8367C_RRCP_FWD_TAG_OFFSET    7
+#define    RTL8367C_RRCP_FWD_TAG_MASK    0x180
+#define    RTL8367C_RRCPV1_REPLY_TAG_OFFSET    6
+#define    RTL8367C_RRCPV1_REPLY_TAG_MASK    0x40
+#define    RTL8367C_RRCPV1_HELLO_COUNT_OFFSET    3
+#define    RTL8367C_RRCPV1_HELLO_COUNT_MASK    0x38
+#define    RTL8367C_RRCPV1_HELLO_PEDIOD_OFFSET    0
+#define    RTL8367C_RRCPV1_HELLO_PEDIOD_MASK    0x3
+
+#define    RTL8367C_REG_RRCP_CTRL3    0x122e
+#define    RTL8367C_RRCP_TAG_PRIORITY_OFFSET    13
+#define    RTL8367C_RRCP_TAG_PRIORITY_MASK    0xE000
+#define    RTL8367C_RRCP_TAG_VID_OFFSET    0
+#define    RTL8367C_RRCP_TAG_VID_MASK    0xFFF
+
+#define    RTL8367C_REG_FLOWCTRL_FCOFF_PORT_ON    0x122f
+#define    RTL8367C_FLOWCTRL_FCOFF_PORT_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_FCOFF_PORT_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_FCOFF_PORT_OFF    0x1230
+#define    RTL8367C_FLOWCTRL_FCOFF_PORT_OFF_OFFSET    0
+#define    RTL8367C_FLOWCTRL_FCOFF_PORT_OFF_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_FCOFF_PORT_PRIVATE_ON    0x1231
+#define    RTL8367C_FLOWCTRL_FCOFF_PORT_PRIVATE_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_FCOFF_PORT_PRIVATE_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_FCOFF_PORT_PRIVATE_OFF    0x1232
+#define    RTL8367C_FLOWCTRL_FCOFF_PORT_PRIVATE_OFF_OFFSET    0
+#define    RTL8367C_FLOWCTRL_FCOFF_PORT_PRIVATE_OFF_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_JUMBO_SYS_ON    0x1233
+#define    RTL8367C_FLOWCTRL_JUMBO_SYS_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_JUMBO_SYS_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_JUMBO_SYS_OFF    0x1234
+#define    RTL8367C_FLOWCTRL_JUMBO_SYS_OFF_OFFSET    0
+#define    RTL8367C_FLOWCTRL_JUMBO_SYS_OFF_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_JUMBO_SHARE_ON    0x1235
+#define    RTL8367C_FLOWCTRL_JUMBO_SHARE_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_JUMBO_SHARE_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_JUMBO_SHARE_OFF    0x1236
+#define    RTL8367C_FLOWCTRL_JUMBO_SHARE_OFF_OFFSET    0
+#define    RTL8367C_FLOWCTRL_JUMBO_SHARE_OFF_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_JUMBO_PORT_ON    0x1237
+#define    RTL8367C_FLOWCTRL_JUMBO_PORT_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_JUMBO_PORT_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_JUMBO_PORT_OFF    0x1238
+#define    RTL8367C_FLOWCTRL_JUMBO_PORT_OFF_OFFSET    0
+#define    RTL8367C_FLOWCTRL_JUMBO_PORT_OFF_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_JUMBO_PORT_PRIVATE_ON    0x1239
+#define    RTL8367C_FLOWCTRL_JUMBO_PORT_PRIVATE_ON_OFFSET    0
+#define    RTL8367C_FLOWCTRL_JUMBO_PORT_PRIVATE_ON_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_JUMBO_PORT_PRIVATE_OFF    0x123a
+#define    RTL8367C_FLOWCTRL_JUMBO_PORT_PRIVATE_OFF_OFFSET    0
+#define    RTL8367C_FLOWCTRL_JUMBO_PORT_PRIVATE_OFF_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_JUMBO_SIZE    0x123b
+#define    RTL8367C_JUMBO_MODE_OFFSET    2
+#define    RTL8367C_JUMBO_MODE_MASK    0x4
+#define    RTL8367C_JUMBO_SIZE_OFFSET    0
+#define    RTL8367C_JUMBO_SIZE_MASK    0x3
+
+#define    RTL8367C_REG_FLOWCTRL_TOTAL_PAGE_COUNTER    0x124c
+#define    RTL8367C_FLOWCTRL_TOTAL_PAGE_COUNTER_OFFSET    0
+#define    RTL8367C_FLOWCTRL_TOTAL_PAGE_COUNTER_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PUBLIC_PAGE_COUNTER    0x124d
+#define    RTL8367C_FLOWCTRL_PUBLIC_PAGE_COUNTER_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PUBLIC_PAGE_COUNTER_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_TOTAL_PAGE_MAX    0x124e
+#define    RTL8367C_FLOWCTRL_TOTAL_PAGE_MAX_OFFSET    0
+#define    RTL8367C_FLOWCTRL_TOTAL_PAGE_MAX_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PUBLIC_PAGE_MAX    0x124f
+#define    RTL8367C_FLOWCTRL_PUBLIC_PAGE_MAX_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PUBLIC_PAGE_MAX_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT0_PAGE_COUNTER    0x1250
+#define    RTL8367C_FLOWCTRL_PORT0_PAGE_COUNTER_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT0_PAGE_COUNTER_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT1_PAGE_COUNTER    0x1251
+#define    RTL8367C_FLOWCTRL_PORT1_PAGE_COUNTER_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT1_PAGE_COUNTER_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT2_PAGE_COUNTER    0x1252
+#define    RTL8367C_FLOWCTRL_PORT2_PAGE_COUNTER_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT2_PAGE_COUNTER_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT3_PAGE_COUNTER    0x1253
+#define    RTL8367C_FLOWCTRL_PORT3_PAGE_COUNTER_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT3_PAGE_COUNTER_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT4_PAGE_COUNTER    0x1254
+#define    RTL8367C_FLOWCTRL_PORT4_PAGE_COUNTER_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT4_PAGE_COUNTER_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT5_PAGE_COUNTER    0x1255
+#define    RTL8367C_FLOWCTRL_PORT5_PAGE_COUNTER_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT5_PAGE_COUNTER_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT6_PAGE_COUNTER    0x1256
+#define    RTL8367C_FLOWCTRL_PORT6_PAGE_COUNTER_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT6_PAGE_COUNTER_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT7_PAGE_COUNTER    0x1257
+#define    RTL8367C_FLOWCTRL_PORT7_PAGE_COUNTER_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT7_PAGE_COUNTER_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PUBLIC_FCOFF_PAGE_COUNTER    0x1258
+#define    RTL8367C_FLOWCTRL_PUBLIC_FCOFF_PAGE_COUNTER_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PUBLIC_FCOFF_PAGE_COUNTER_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PUBLIC_JUMBO_PAGE_COUNTER    0x1259
+#define    RTL8367C_FLOWCTRL_PUBLIC_JUMBO_PAGE_COUNTER_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PUBLIC_JUMBO_PAGE_COUNTER_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_MAX_PUBLIC_FCOFF_PAGE_COUNTER    0x125a
+#define    RTL8367C_FLOWCTRL_MAX_PUBLIC_FCOFF_PAGE_COUNTER_OFFSET    0
+#define    RTL8367C_FLOWCTRL_MAX_PUBLIC_FCOFF_PAGE_COUNTER_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_MAX_PUBLIC_JUMBO_PAGE_COUNTER    0x125b
+#define    RTL8367C_FLOWCTRL_MAX_PUBLIC_JUMBO_PAGE_COUNTER_OFFSET    0
+#define    RTL8367C_FLOWCTRL_MAX_PUBLIC_JUMBO_PAGE_COUNTER_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT0_PAGE_MAX    0x1260
+#define    RTL8367C_FLOWCTRL_PORT0_PAGE_MAX_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT0_PAGE_MAX_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT1_PAGE_MAX    0x1261
+#define    RTL8367C_FLOWCTRL_PORT1_PAGE_MAX_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT1_PAGE_MAX_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT2_PAGE_MAX    0x1262
+#define    RTL8367C_FLOWCTRL_PORT2_PAGE_MAX_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT2_PAGE_MAX_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT3_PAGE_MAX    0x1263
+#define    RTL8367C_FLOWCTRL_PORT3_PAGE_MAX_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT3_PAGE_MAX_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT4_PAGE_MAX    0x1264
+#define    RTL8367C_FLOWCTRL_PORT4_PAGE_MAX_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT4_PAGE_MAX_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT5_PAGE_MAX    0x1265
+#define    RTL8367C_FLOWCTRL_PORT5_PAGE_MAX_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT5_PAGE_MAX_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT6_PAGE_MAX    0x1266
+#define    RTL8367C_FLOWCTRL_PORT6_PAGE_MAX_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT6_PAGE_MAX_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT7_PAGE_MAX    0x1267
+#define    RTL8367C_FLOWCTRL_PORT7_PAGE_MAX_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT7_PAGE_MAX_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PAGE_COUNT_CLEAR    0x1268
+#define    RTL8367C_DIS_SKIP_FP_OFFSET    1
+#define    RTL8367C_DIS_SKIP_FP_MASK    0x2
+#define    RTL8367C_PAGE_COUNT_CLEAR_OFFSET    0
+#define    RTL8367C_PAGE_COUNT_CLEAR_MASK    0x1
+
+#define    RTL8367C_REG_FLOWCTRL_PORT8_PAGE_MAX    0x1269
+#define    RTL8367C_FLOWCTRL_PORT8_PAGE_MAX_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT8_PAGE_MAX_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT9_PAGE_MAX    0x126a
+#define    RTL8367C_FLOWCTRL_PORT9_PAGE_MAX_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT9_PAGE_MAX_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT10_PAGE_MAX    0x126b
+#define    RTL8367C_FLOWCTRL_PORT10_PAGE_MAX_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT10_PAGE_MAX_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT8_PAGE_COUNTER    0x126c
+#define    RTL8367C_FLOWCTRL_PORT8_PAGE_COUNTER_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT8_PAGE_COUNTER_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT9_PAGE_COUNTER    0x126d
+#define    RTL8367C_FLOWCTRL_PORT9_PAGE_COUNTER_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT9_PAGE_COUNTER_MASK    0x7FF
+
+#define    RTL8367C_REG_FLOWCTRL_PORT10_PAGE_COUNTER    0x126e
+#define    RTL8367C_FLOWCTRL_PORT10_PAGE_COUNTER_OFFSET    0
+#define    RTL8367C_FLOWCTRL_PORT10_PAGE_COUNTER_MASK    0x7FF
+
+#define    RTL8367C_REG_RRCP_CTRL1_H    0x126f
+#define    RTL8367C_RRCP_ADMIN_PMSK_P10_8_OFFSET    3
+#define    RTL8367C_RRCP_ADMIN_PMSK_P10_8_MASK    0x38
+#define    RTL8367C_RRCP_AUTH_PMSK_P10_8_OFFSET    0
+#define    RTL8367C_RRCP_AUTH_PMSK_P10_8_MASK    0x7
+
+#define    RTL8367C_REG_EMA_CTRL0    0x1270
+#define    RTL8367C_CFG_DVSE_VIAROM_OFFSET    13
+#define    RTL8367C_CFG_DVSE_VIAROM_MASK    0x2000
+#define    RTL8367C_CFG_DVSE_MIBRAM_OFFSET    12
+#define    RTL8367C_CFG_DVSE_MIBRAM_MASK    0x1000
+#define    RTL8367C_CFG_DVSE_IROM_OFFSET    11
+#define    RTL8367C_CFG_DVSE_IROM_MASK    0x800
+#define    RTL8367C_CFG_DVSE_ERAM_OFFSET    10
+#define    RTL8367C_CFG_DVSE_ERAM_MASK    0x400
+#define    RTL8367C_CFG_DVSE_IRAM_OFFSET    9
+#define    RTL8367C_CFG_DVSE_IRAM_MASK    0x200
+#define    RTL8367C_CFG_DVSE_NICRAM_OFFSET    8
+#define    RTL8367C_CFG_DVSE_NICRAM_MASK    0x100
+#define    RTL8367C_CFG_DVSE_CVLANRAM_OFFSET    7
+#define    RTL8367C_CFG_DVSE_CVLANRAM_MASK    0x80
+#define    RTL8367C_CFG_DVSE_ACTRAM_OFFSET    6
+#define    RTL8367C_CFG_DVSE_ACTRAM_MASK    0x40
+#define    RTL8367C_CFG_DVSE_INQRAM_OFFSET    5
+#define    RTL8367C_CFG_DVSE_INQRAM_MASK    0x20
+#define    RTL8367C_CFG_DVSE_HSARAM_OFFSET    4
+#define    RTL8367C_CFG_DVSE_HSARAM_MASK    0x10
+#define    RTL8367C_CFG_DVSE_OUTQRAM_OFFSET    3
+#define    RTL8367C_CFG_DVSE_OUTQRAM_MASK    0x8
+#define    RTL8367C_CFG_DVSE_HTRAM_OFFSET    2
+#define    RTL8367C_CFG_DVSE_HTRAM_MASK    0x4
+#define    RTL8367C_CFG_DVSE_PBRAM_OFFSET    1
+#define    RTL8367C_CFG_DVSE_PBRAM_MASK    0x2
+#define    RTL8367C_CFG_DVSE_L2RAM_OFFSET    0
+#define    RTL8367C_CFG_DVSE_L2RAM_MASK    0x1
+
+#define    RTL8367C_REG_EMA_CTRL1    0x1271
+#define    RTL8367C_CFG_DVS_OUTQRAM_OFFSET    12
+#define    RTL8367C_CFG_DVS_OUTQRAM_MASK    0xF000
+#define    RTL8367C_CFG_DVS_HTRAM_OFFSET    8
+#define    RTL8367C_CFG_DVS_HTRAM_MASK    0x700
+#define    RTL8367C_CFG_DVS_PBRAM_OFFSET    4
+#define    RTL8367C_CFG_DVS_PBRAM_MASK    0xF0
+#define    RTL8367C_CFG_DVS_L2RAM_OFFSET    0
+#define    RTL8367C_CFG_DVS_L2RAM_MASK    0xF
+
+#define    RTL8367C_REG_EMA_CTRL2    0x1272
+#define    RTL8367C_CFG_DVS_CVLANRAM_OFFSET    12
+#define    RTL8367C_CFG_DVS_CVLANRAM_MASK    0xF000
+#define    RTL8367C_CFG_DVS_ACTRAM_OFFSET    8
+#define    RTL8367C_CFG_DVS_ACTRAM_MASK    0xF00
+#define    RTL8367C_CFG_DVS_INQRAM_OFFSET    4
+#define    RTL8367C_CFG_DVS_INQRAM_MASK    0xF0
+#define    RTL8367C_CFG_DVS_HSARAM_OFFSET    0
+#define    RTL8367C_CFG_DVS_HSARAM_MASK    0xF
+
+#define    RTL8367C_REG_EMA_CTRL3    0x1273
+#define    RTL8367C_CFG_DVS_IROM_OFFSET    12
+#define    RTL8367C_CFG_DVS_IROM_MASK    0xF000
+#define    RTL8367C_CFG_DVS_ERAM_OFFSET    8
+#define    RTL8367C_CFG_DVS_ERAM_MASK    0xF00
+#define    RTL8367C_CFG_DVS_IRAM_OFFSET    4
+#define    RTL8367C_CFG_DVS_IRAM_MASK    0xF0
+#define    RTL8367C_CFG_DVS_NICRAM_OFFSET    0
+#define    RTL8367C_CFG_DVS_NICRAM_MASK    0xF
+
+#define    RTL8367C_REG_EMA_CTRL4    0x1274
+#define    RTL8367C_CFG_DVS_VIAROM_OFFSET    4
+#define    RTL8367C_CFG_DVS_VIAROM_MASK    0xF0
+#define    RTL8367C_CFG_DVS_MIBRAM_OFFSET    0
+#define    RTL8367C_CFG_DVS_MIBRAM_MASK    0xF
+
+#define    RTL8367C_REG_DIAG_MODE    0x1275
+#define    RTL8367C_DIAG_MODE_OFFSET    0
+#define    RTL8367C_DIAG_MODE_MASK    0x1F
+
+#define    RTL8367C_REG_BIST_MODE    0x1276
+
+#define    RTL8367C_REG_STS_BIST_DONE    0x1277
+
+#define    RTL8367C_REG_STS_BIST_RLT0    0x1278
+#define    RTL8367C_STS_BIST_RLT0_OFFSET    0
+#define    RTL8367C_STS_BIST_RLT0_MASK    0x1
+
+#define    RTL8367C_REG_STS_BIST_RLT1    0x1279
+
+#define    RTL8367C_REG_STS_BIST_RLT2    0x127a
+
+#define    RTL8367C_REG_STS_BIST_RLT3    0x127b
+#define    RTL8367C_STS_BIST_RLT3_OFFSET    0
+#define    RTL8367C_STS_BIST_RLT3_MASK    0x3FF
+
+#define    RTL8367C_REG_STS_BIST_RLT4    0x127c
+#define    RTL8367C_STS_BIST_RLT4_OFFSET    0
+#define    RTL8367C_STS_BIST_RLT4_MASK    0x7
+
+#define    RTL8367C_REG_VIAROM_MISR    0x127d
+
+#define    RTL8367C_REG_DRF_BIST_MODE    0x1280
+#define    RTL8367C_DRF_TCAMDEL_OFFSET    15
+#define    RTL8367C_DRF_TCAMDEL_MASK    0x8000
+#define    RTL8367C_CFG_DRF_BIST_MODE_OFFSET    0
+#define    RTL8367C_CFG_DRF_BIST_MODE_MASK    0x7FFF
+
+#define    RTL8367C_REG_STS_DRF_BIST    0x1281
+#define    RTL8367C_STS_DRF_BIST_OFFSET    0
+#define    RTL8367C_STS_DRF_BIST_MASK    0x7FFF
+
+#define    RTL8367C_REG_STS_DRF_BIST_RLT0    0x1282
+#define    RTL8367C_STS_DRF_BIST_RLT0_OFFSET    0
+#define    RTL8367C_STS_DRF_BIST_RLT0_MASK    0x1
+
+#define    RTL8367C_REG_STS_DRF_BIST_RLT1    0x1283
+
+#define    RTL8367C_REG_STS_DRF_BIST_RLT2    0x1284
+
+#define    RTL8367C_REG_STS_DRF_BIST_RLT3    0x1285
+#define    RTL8367C_STS_DRF_BIST_RLT3_OFFSET    0
+#define    RTL8367C_STS_DRF_BIST_RLT3_MASK    0x3FF
+
+#define    RTL8367C_REG_STS_DRF_BIST_RLT4    0x1286
+#define    RTL8367C_STS_DRF_BIST_RLT4_OFFSET    0
+#define    RTL8367C_STS_DRF_BIST_RLT4_MASK    0x7FFF
+
+#define    RTL8367C_REG_RAM_DRF_CTRL    0x1289
+#define    RTL8367C_RAM_DRF_CTRL_OFFSET    0
+#define    RTL8367C_RAM_DRF_CTRL_MASK    0x1
+
+#define    RTL8367C_REG_MIB_RMON_LEN_CTRL    0x128a
+#define    RTL8367C_RX_LENGTH_CTRL_OFFSET    1
+#define    RTL8367C_RX_LENGTH_CTRL_MASK    0x2
+#define    RTL8367C_TX_LENGTH_CTRL_OFFSET    0
+#define    RTL8367C_TX_LENGTH_CTRL_MASK    0x1
+
+#define    RTL8367C_REG_COND0_BISR_OUT0    0x1290
+
+#define    RTL8367C_REG_COND0_BISR_OUT1    0x1291
+
+#define    RTL8367C_REG_COND0_BISR_OUT2    0x1292
+
+#define    RTL8367C_REG_COND0_BISR_OUT3    0x1293
+
+#define    RTL8367C_REG_COND0_BISR_OUT4    0x1294
+#define    RTL8367C_COND0_BISR_OUT4_OFFSET    0
+#define    RTL8367C_COND0_BISR_OUT4_MASK    0x3F
+
+#define    RTL8367C_REG_COND0_BISR_OUT5    0x1295
+#define    RTL8367C_COND0_BISR_OUT5_OFFSET    0
+#define    RTL8367C_COND0_BISR_OUT5_MASK    0x7
+
+#define    RTL8367C_REG_CHG_DUPLEX_CFG    0x1296
+#define    RTL8367C_CHG_COL_CNT_PORT_OFFSET    13
+#define    RTL8367C_CHG_COL_CNT_PORT_MASK    0xE000
+#define    RTL8367C_CHG_COL_CNT_OFFSET    8
+#define    RTL8367C_CHG_COL_CNT_MASK    0x1F00
+#define    RTL8367C_CFG_CHG_DUP_EN_OFFSET    7
+#define    RTL8367C_CFG_CHG_DUP_EN_MASK    0x80
+#define    RTL8367C_CFG_CHG_DUP_THR_OFFSET    2
+#define    RTL8367C_CFG_CHG_DUP_THR_MASK    0x7C
+#define    RTL8367C_CFG_CHG_DUP_CONGEST_OFFSET    1
+#define    RTL8367C_CFG_CHG_DUP_CONGEST_MASK    0x2
+#define    RTL8367C_CFG_CHG_DUP_REF_OFFSET    0
+#define    RTL8367C_CFG_CHG_DUP_REF_MASK    0x1
+
+#define    RTL8367C_REG_COND0_BIST_PASS    0x1297
+#define    RTL8367C_COND0_DRF_BIST_NOFAIL_OFFSET    1
+#define    RTL8367C_COND0_DRF_BIST_NOFAIL_MASK    0x2
+#define    RTL8367C_COND0_BIST_NOFAIL_OFFSET    0
+#define    RTL8367C_COND0_BIST_NOFAIL_MASK    0x1
+
+#define    RTL8367C_REG_COND1_BISR_OUT0    0x1298
+
+#define    RTL8367C_REG_COND1_BISR_OUT1    0x1299
+
+#define    RTL8367C_REG_COND1_BISR_OUT2    0x129a
+
+#define    RTL8367C_REG_COND1_BISR_OUT3    0x129b
+
+#define    RTL8367C_REG_COND1_BISR_OUT4    0x129c
+#define    RTL8367C_COND1_BISR_OUT4_OFFSET    0
+#define    RTL8367C_COND1_BISR_OUT4_MASK    0x3F
+
+#define    RTL8367C_REG_COND1_BISR_OUT5    0x129d
+#define    RTL8367C_COND1_BISR_OUT5_OFFSET    0
+#define    RTL8367C_COND1_BISR_OUT5_MASK    0x7
+
+#define    RTL8367C_REG_COND1_BIST_PASS    0x129f
+#define    RTL8367C_COND1_DRF_BIST_NOFAIL_OFFSET    1
+#define    RTL8367C_COND1_DRF_BIST_NOFAIL_MASK    0x2
+#define    RTL8367C_COND1_BIST_NOFAIL_OFFSET    0
+#define    RTL8367C_COND1_BIST_NOFAIL_MASK    0x1
+
+#define    RTL8367C_REG_EEE_TX_THR_Giga_500M    0x12a0
+
+#define    RTL8367C_REG_EEE_TX_THR_FE    0x12a1
+
+#define    RTL8367C_REG_EEE_MISC    0x12a3
+#define    RTL8367C_EEE_REQ_SET1_OFFSET    13
+#define    RTL8367C_EEE_REQ_SET1_MASK    0x2000
+#define    RTL8367C_EEE_REQ_SET0_OFFSET    12
+#define    RTL8367C_EEE_REQ_SET0_MASK    0x1000
+#define    RTL8367C_EEE_WAKE_SET1_OFFSET    9
+#define    RTL8367C_EEE_WAKE_SET1_MASK    0x200
+#define    RTL8367C_EEE_Wake_SET0_OFFSET    8
+#define    RTL8367C_EEE_Wake_SET0_MASK    0x100
+#define    RTL8367C_EEE_TU_GIGA_500M_OFFSET    4
+#define    RTL8367C_EEE_TU_GIGA_500M_MASK    0x30
+#define    RTL8367C_EEE_TU_100M_OFFSET    2
+#define    RTL8367C_EEE_TU_100M_MASK    0xC
+
+#define    RTL8367C_REG_EEE_GIGA_CTRL0    0x12a4
+#define    RTL8367C_EEE_TW_GIGA_OFFSET    8
+#define    RTL8367C_EEE_TW_GIGA_MASK    0xFF00
+#define    RTL8367C_EEE_TR_GIGA_500M_OFFSET    0
+#define    RTL8367C_EEE_TR_GIGA_500M_MASK    0xFF
+
+#define    RTL8367C_REG_EEE_GIGA_CTRL1    0x12a5
+#define    RTL8367C_EEE_TD_GIGA_500M_OFFSET    8
+#define    RTL8367C_EEE_TD_GIGA_500M_MASK    0xFF00
+#define    RTL8367C_EEE_TP_GIGA_OFFSET    0
+#define    RTL8367C_EEE_TP_GIGA_MASK    0xFF
+
+#define    RTL8367C_REG_EEE_100M_CTRL0    0x12a6
+#define    RTL8367C_EEE_TW_100M_OFFSET    8
+#define    RTL8367C_EEE_TW_100M_MASK    0xFF00
+#define    RTL8367C_EEE_TR_100M_OFFSET    0
+#define    RTL8367C_EEE_TR_100M_MASK    0xFF
+
+#define    RTL8367C_REG_EEE_100M_CTRL1    0x12a7
+#define    RTL8367C_EEE_TD_100M_OFFSET    8
+#define    RTL8367C_EEE_TD_100M_MASK    0xFF00
+#define    RTL8367C_EEE_TP_100M_OFFSET    0
+#define    RTL8367C_EEE_TP_100M_MASK    0xFF
+
+#define    RTL8367C_REG_RX_FC_REG    0x12aa
+#define    RTL8367C_EN_EEE_HALF_DUP_OFFSET    8
+#define    RTL8367C_EN_EEE_HALF_DUP_MASK    0x100
+#define    RTL8367C_RX_PGCNT_OFFSET    0
+#define    RTL8367C_RX_PGCNT_MASK    0xFF
+
+#define    RTL8367C_REG_MAX_FIFO_SIZE    0x12af
+#define    RTL8367C_MAX_FIFO_SIZE_OFFSET    0
+#define    RTL8367C_MAX_FIFO_SIZE_MASK    0xF
+
+#define    RTL8367C_REG_EEEP_RX_RATE_GIGA    0x12b0
+
+#define    RTL8367C_REG_EEEP_RX_RATE_100M    0x12b1
+
+#define    RTL8367C_REG_DUMMY_REG_12_2    0x12b2
+
+#define    RTL8367C_REG_EEEP_TX_RATE_GIGA    0x12b3
+
+#define    RTL8367C_REG_EEEP_TX_RATE_100M    0x12b4
+
+#define    RTL8367C_REG_DUMMY_REG_12_3    0x12b5
+
+#define    RTL8367C_REG_EEEP_GIGA_CTRL0    0x12b6
+#define    RTL8367C_EEEP_TR_GIGA_OFFSET    8
+#define    RTL8367C_EEEP_TR_GIGA_MASK    0xFF00
+#define    RTL8367C_EEEP_RW_GIGA_MST_OFFSET    0
+#define    RTL8367C_EEEP_RW_GIGA_MST_MASK    0xFF
+
+#define    RTL8367C_REG_EEEP_GIGA_CTRL1    0x12b7
+#define    RTL8367C_EEEP_TW_GIGA_OFFSET    8
+#define    RTL8367C_EEEP_TW_GIGA_MASK    0xFF00
+#define    RTL8367C_EEEP_TP_GIGA_OFFSET    0
+#define    RTL8367C_EEEP_TP_GIGA_MASK    0xFF
+
+#define    RTL8367C_REG_EEEP_GIGA_CTRL2    0x12b8
+#define    RTL8367C_EEEP_TXEN_GIGA_OFFSET    12
+#define    RTL8367C_EEEP_TXEN_GIGA_MASK    0x1000
+#define    RTL8367C_EEEP_TU_GIGA_OFFSET    8
+#define    RTL8367C_EEEP_TU_GIGA_MASK    0x300
+#define    RTL8367C_EEEP_TS_GIGA_OFFSET    0
+#define    RTL8367C_EEEP_TS_GIGA_MASK    0xFF
+
+#define    RTL8367C_REG_EEEP_100M_CTRL0    0x12b9
+#define    RTL8367C_EEEP_TR_100M_OFFSET    8
+#define    RTL8367C_EEEP_TR_100M_MASK    0xFF00
+#define    RTL8367C_EEEP_RW_100M_OFFSET    0
+#define    RTL8367C_EEEP_RW_100M_MASK    0xFF
+
+#define    RTL8367C_REG_EEEP_100M_CTRL1    0x12ba
+#define    RTL8367C_EEEP_TW_100M_OFFSET    8
+#define    RTL8367C_EEEP_TW_100M_MASK    0xFF00
+#define    RTL8367C_EEEP_TP_100M_OFFSET    0
+#define    RTL8367C_EEEP_TP_100M_MASK    0xFF
+
+#define    RTL8367C_REG_EEEP_100M_CTRL2    0x12bb
+#define    RTL8367C_EEEP_TXEN_100M_OFFSET    12
+#define    RTL8367C_EEEP_TXEN_100M_MASK    0x1000
+#define    RTL8367C_EEEP_TU_100M_OFFSET    8
+#define    RTL8367C_EEEP_TU_100M_MASK    0x300
+#define    RTL8367C_EEEP_TS_100M_OFFSET    0
+#define    RTL8367C_EEEP_TS_100M_MASK    0xFF
+
+#define    RTL8367C_REG_EEEP_CTRL0    0x12bc
+#define    RTL8367C_EEEP_CTRL0_DUMMY_OFFSET    8
+#define    RTL8367C_EEEP_CTRL0_DUMMY_MASK    0xFF00
+#define    RTL8367C_EEEP_SLEEP_STEP_OFFSET    0
+#define    RTL8367C_EEEP_SLEEP_STEP_MASK    0xFF
+
+#define    RTL8367C_REG_EEEP_CTRL1    0x12bd
+#define    RTL8367C_EEEP_TXR_GIGA_OFFSET    8
+#define    RTL8367C_EEEP_TXR_GIGA_MASK    0xFF00
+#define    RTL8367C_EEEP_TXR_100M_OFFSET    0
+#define    RTL8367C_EEEP_TXR_100M_MASK    0xFF
+
+#define    RTL8367C_REG_BACK_PRESSURE_IPG    0x12be
+#define    RTL8367C_BACK_PRESSURE_IPG_OFFSET    0
+#define    RTL8367C_BACK_PRESSURE_IPG_MASK    0x3
+
+#define    RTL8367C_REG_TX_ESD_LEVEL    0x12bf
+#define    RTL8367C_TX_ESD_LEVEL_MODE_OFFSET    8
+#define    RTL8367C_TX_ESD_LEVEL_MODE_MASK    0x100
+#define    RTL8367C_LEVEL_OFFSET    0
+#define    RTL8367C_LEVEL_MASK    0xFF
+
+#define    RTL8367C_REG_RRCP_CTRL4    0x12e0
+
+#define    RTL8367C_REG_RRCP_CTRL5    0x12e1
+
+#define    RTL8367C_REG_RRCP_CTRL6    0x12e2
+
+#define    RTL8367C_REG_RRCP_CTRL7    0x12e3
+
+#define    RTL8367C_REG_RRCP_CTRL8    0x12e4
+
+#define    RTL8367C_REG_RRCP_CTRL9    0x12e5
+
+#define    RTL8367C_REG_RRCP_CTRL10    0x12e6
+
+#define    RTL8367C_REG_FIELD_SELECTOR0    0x12e7
+#define    RTL8367C_FIELD_SELECTOR0_FORMAT_OFFSET    8
+#define    RTL8367C_FIELD_SELECTOR0_FORMAT_MASK    0x700
+#define    RTL8367C_FIELD_SELECTOR0_OFFSET_OFFSET    0
+#define    RTL8367C_FIELD_SELECTOR0_OFFSET_MASK    0xFF
+
+#define    RTL8367C_REG_FIELD_SELECTOR1    0x12e8
+#define    RTL8367C_FIELD_SELECTOR1_FORMAT_OFFSET    8
+#define    RTL8367C_FIELD_SELECTOR1_FORMAT_MASK    0x700
+#define    RTL8367C_FIELD_SELECTOR1_OFFSET_OFFSET    0
+#define    RTL8367C_FIELD_SELECTOR1_OFFSET_MASK    0xFF
+
+#define    RTL8367C_REG_FIELD_SELECTOR2    0x12e9
+#define    RTL8367C_FIELD_SELECTOR2_FORMAT_OFFSET    8
+#define    RTL8367C_FIELD_SELECTOR2_FORMAT_MASK    0x700
+#define    RTL8367C_FIELD_SELECTOR2_OFFSET_OFFSET    0
+#define    RTL8367C_FIELD_SELECTOR2_OFFSET_MASK    0xFF
+
+#define    RTL8367C_REG_FIELD_SELECTOR3    0x12ea
+#define    RTL8367C_FIELD_SELECTOR3_FORMAT_OFFSET    8
+#define    RTL8367C_FIELD_SELECTOR3_FORMAT_MASK    0x700
+#define    RTL8367C_FIELD_SELECTOR3_OFFSET_OFFSET    0
+#define    RTL8367C_FIELD_SELECTOR3_OFFSET_MASK    0xFF
+
+#define    RTL8367C_REG_FIELD_SELECTOR4    0x12eb
+#define    RTL8367C_FIELD_SELECTOR4_FORMAT_OFFSET    8
+#define    RTL8367C_FIELD_SELECTOR4_FORMAT_MASK    0x700
+#define    RTL8367C_FIELD_SELECTOR4_OFFSET_OFFSET    0
+#define    RTL8367C_FIELD_SELECTOR4_OFFSET_MASK    0xFF
+
+#define    RTL8367C_REG_FIELD_SELECTOR5    0x12ec
+#define    RTL8367C_FIELD_SELECTOR5_FORMAT_OFFSET    8
+#define    RTL8367C_FIELD_SELECTOR5_FORMAT_MASK    0x700
+#define    RTL8367C_FIELD_SELECTOR5_OFFSET_OFFSET    0
+#define    RTL8367C_FIELD_SELECTOR5_OFFSET_MASK    0xFF
+
+#define    RTL8367C_REG_FIELD_SELECTOR6    0x12ed
+#define    RTL8367C_FIELD_SELECTOR6_FORMAT_OFFSET    8
+#define    RTL8367C_FIELD_SELECTOR6_FORMAT_MASK    0x700
+#define    RTL8367C_FIELD_SELECTOR6_OFFSET_OFFSET    0
+#define    RTL8367C_FIELD_SELECTOR6_OFFSET_MASK    0xFF
+
+#define    RTL8367C_REG_FIELD_SELECTOR7    0x12ee
+#define    RTL8367C_FIELD_SELECTOR7_FORMAT_OFFSET    8
+#define    RTL8367C_FIELD_SELECTOR7_FORMAT_MASK    0x700
+#define    RTL8367C_FIELD_SELECTOR7_OFFSET_OFFSET    0
+#define    RTL8367C_FIELD_SELECTOR7_OFFSET_MASK    0xFF
+
+#define    RTL8367C_REG_FIELD_SELECTOR8    0x12ef
+#define    RTL8367C_FIELD_SELECTOR8_FORMAT_OFFSET    8
+#define    RTL8367C_FIELD_SELECTOR8_FORMAT_MASK    0x700
+#define    RTL8367C_FIELD_SELECTOR8_OFFSET_OFFSET    0
+#define    RTL8367C_FIELD_SELECTOR8_OFFSET_MASK    0xFF
+
+#define    RTL8367C_REG_FIELD_SELECTOR9    0x12f0
+#define    RTL8367C_FIELD_SELECTOR9_FORMAT_OFFSET    8
+#define    RTL8367C_FIELD_SELECTOR9_FORMAT_MASK    0x700
+#define    RTL8367C_FIELD_SELECTOR9_OFFSET_OFFSET    0
+#define    RTL8367C_FIELD_SELECTOR9_OFFSET_MASK    0xFF
+
+#define    RTL8367C_REG_FIELD_SELECTOR10    0x12f1
+#define    RTL8367C_FIELD_SELECTOR10_FORMAT_OFFSET    8
+#define    RTL8367C_FIELD_SELECTOR10_FORMAT_MASK    0x700
+#define    RTL8367C_FIELD_SELECTOR10_OFFSET_OFFSET    0
+#define    RTL8367C_FIELD_SELECTOR10_OFFSET_MASK    0xFF
+
+#define    RTL8367C_REG_FIELD_SELECTOR11    0x12f2
+#define    RTL8367C_FIELD_SELECTOR11_FORMAT_OFFSET    8
+#define    RTL8367C_FIELD_SELECTOR11_FORMAT_MASK    0x700
+#define    RTL8367C_FIELD_SELECTOR11_OFFSET_OFFSET    0
+#define    RTL8367C_FIELD_SELECTOR11_OFFSET_MASK    0xFF
+
+#define    RTL8367C_REG_FIELD_SELECTOR12    0x12f3
+#define    RTL8367C_FIELD_SELECTOR12_FORMAT_OFFSET    8
+#define    RTL8367C_FIELD_SELECTOR12_FORMAT_MASK    0x700
+#define    RTL8367C_FIELD_SELECTOR12_OFFSET_OFFSET    0
+#define    RTL8367C_FIELD_SELECTOR12_OFFSET_MASK    0xFF
+
+#define    RTL8367C_REG_FIELD_SELECTOR13    0x12f4
+#define    RTL8367C_FIELD_SELECTOR13_FORMAT_OFFSET    8
+#define    RTL8367C_FIELD_SELECTOR13_FORMAT_MASK    0x700
+#define    RTL8367C_FIELD_SELECTOR13_OFFSET_OFFSET    0
+#define    RTL8367C_FIELD_SELECTOR13_OFFSET_MASK    0xFF
+
+#define    RTL8367C_REG_FIELD_SELECTOR14    0x12f5
+#define    RTL8367C_FIELD_SELECTOR14_FORMAT_OFFSET    8
+#define    RTL8367C_FIELD_SELECTOR14_FORMAT_MASK    0x700
+#define    RTL8367C_FIELD_SELECTOR14_OFFSET_OFFSET    0
+#define    RTL8367C_FIELD_SELECTOR14_OFFSET_MASK    0xFF
+
+#define    RTL8367C_REG_FIELD_SELECTOR15    0x12f6
+#define    RTL8367C_FIELD_SELECTOR15_FORMAT_OFFSET    8
+#define    RTL8367C_FIELD_SELECTOR15_FORMAT_MASK    0x700
+#define    RTL8367C_FIELD_SELECTOR15_OFFSET_OFFSET    0
+#define    RTL8367C_FIELD_SELECTOR15_OFFSET_MASK    0xFF
+
+#define    RTL8367C_REG_HWPKT_GEN_MISC_H    0x12f7
+#define    RTL8367C_PKT_GEN_SUSPEND_P10_8_OFFSET    3
+#define    RTL8367C_PKT_GEN_SUSPEND_P10_8_MASK    0x38
+#define    RTL8367C_PKT_GEN_STATUS_P10_8_OFFSET    0
+#define    RTL8367C_PKT_GEN_STATUS_P10_8_MASK    0x7
+
+#define    RTL8367C_REG_MIRROR_SRC_PMSK    0x12fb
+#define    RTL8367C_MIRROR_SRC_PMSK_OFFSET    0
+#define    RTL8367C_MIRROR_SRC_PMSK_MASK    0x7FF
+
+#define    RTL8367C_REG_EEE_BURSTSIZE    0x12fc
+
+#define    RTL8367C_REG_EEE_IFG_CFG    0x12fd
+#define    RTL8367C_EEE_IFG_CFG_OFFSET    0
+#define    RTL8367C_EEE_IFG_CFG_MASK    0x1
+
+#define    RTL8367C_REG_FPGA_VER_MAC    0x12fe
+
+#define    RTL8367C_REG_HWPKT_GEN_MISC    0x12ff
+#define    RTL8367C_PKT_GEN_SUSPEND_OFFSET    8
+#define    RTL8367C_PKT_GEN_SUSPEND_MASK    0xFF00
+#define    RTL8367C_PKT_GEN_STATUS_OFFSET    0
+#define    RTL8367C_PKT_GEN_STATUS_MASK    0xFF
+
+/* (16'h1300)chip_reg */
+
+#define    RTL8367C_REG_CHIP_NUMBER    0x1300
+
+#define    RTL8367C_REG_CHIP_VER    0x1301
+#define    RTL8367C_VERID_OFFSET    12
+#define    RTL8367C_VERID_MASK    0xF000
+#define    RTL8367C_MCID_OFFSET    8
+#define    RTL8367C_MCID_MASK    0xF00
+#define    RTL8367C_MODEL_ID_OFFSET    4
+#define    RTL8367C_MODEL_ID_MASK    0xF0
+#define    RTL8367C_AFE_VERSION_OFFSET    0
+#define    RTL8367C_AFE_VERSION_MASK    0x1
+
+#define    RTL8367C_REG_CHIP_DEBUG0    0x1303
+#define    RTL8367C_SEL33_EXT2_OFFSET    10
+#define    RTL8367C_SEL33_EXT2_MASK    0x400
+#define    RTL8367C_SEL33_EXT1_OFFSET    9
+#define    RTL8367C_SEL33_EXT1_MASK    0x200
+#define    RTL8367C_SEL33_EXT0_OFFSET    8
+#define    RTL8367C_SEL33_EXT0_MASK    0x100
+#define    RTL8367C_DRI_OTHER_OFFSET    7
+#define    RTL8367C_DRI_OTHER_MASK    0x80
+#define    RTL8367C_DRI_EXT1_RG_OFFSET    6
+#define    RTL8367C_DRI_EXT1_RG_MASK    0x40
+#define    RTL8367C_DRI_EXT0_RG_OFFSET    5
+#define    RTL8367C_DRI_EXT0_RG_MASK    0x20
+#define    RTL8367C_DRI_EXT1_OFFSET    4
+#define    RTL8367C_DRI_EXT1_MASK    0x10
+#define    RTL8367C_DRI_EXT0_OFFSET    3
+#define    RTL8367C_DRI_EXT0_MASK    0x8
+#define    RTL8367C_SLR_OTHER_OFFSET    2
+#define    RTL8367C_SLR_OTHER_MASK    0x4
+#define    RTL8367C_SLR_EXT1_OFFSET    1
+#define    RTL8367C_SLR_EXT1_MASK    0x2
+#define    RTL8367C_SLR_EXT0_OFFSET    0
+#define    RTL8367C_SLR_EXT0_MASK    0x1
+
+#define    RTL8367C_REG_CHIP_DEBUG1    0x1304
+#define    RTL8367C_RG1_DN_OFFSET    12
+#define    RTL8367C_RG1_DN_MASK    0x7000
+#define    RTL8367C_RG1_DP_OFFSET    8
+#define    RTL8367C_RG1_DP_MASK    0x700
+#define    RTL8367C_RG0_DN_OFFSET    4
+#define    RTL8367C_RG0_DN_MASK    0x70
+#define    RTL8367C_RG0_DP_OFFSET    0
+#define    RTL8367C_RG0_DP_MASK    0x7
+
+#define    RTL8367C_REG_DIGITAL_INTERFACE_SELECT    0x1305
+#define    RTL8367C_ORG_COL_OFFSET    15
+#define    RTL8367C_ORG_COL_MASK    0x8000
+#define    RTL8367C_ORG_CRS_OFFSET    14
+#define    RTL8367C_ORG_CRS_MASK    0x4000
+#define    RTL8367C_SKIP_MII_1_RXER_OFFSET    13
+#define    RTL8367C_SKIP_MII_1_RXER_MASK    0x2000
+#define    RTL8367C_SKIP_MII_0_RXER_OFFSET    12
+#define    RTL8367C_SKIP_MII_0_RXER_MASK    0x1000
+#define    RTL8367C_SELECT_GMII_1_OFFSET    4
+#define    RTL8367C_SELECT_GMII_1_MASK    0xF0
+#define    RTL8367C_SELECT_GMII_0_OFFSET    0
+#define    RTL8367C_SELECT_GMII_0_MASK    0xF
+
+#define    RTL8367C_REG_EXT0_RGMXF    0x1306
+#define    RTL8367C_EXT0_RGTX_INV_OFFSET    6
+#define    RTL8367C_EXT0_RGTX_INV_MASK    0x40
+#define    RTL8367C_EXT0_RGRX_INV_OFFSET    5
+#define    RTL8367C_EXT0_RGRX_INV_MASK    0x20
+#define    RTL8367C_EXT0_RGMXF_OFFSET    0
+#define    RTL8367C_EXT0_RGMXF_MASK    0x1F
+
+#define    RTL8367C_REG_EXT1_RGMXF    0x1307
+#define    RTL8367C_EXT1_RGTX_INV_OFFSET    6
+#define    RTL8367C_EXT1_RGTX_INV_MASK    0x40
+#define    RTL8367C_EXT1_RGRX_INV_OFFSET    5
+#define    RTL8367C_EXT1_RGRX_INV_MASK    0x20
+#define    RTL8367C_EXT1_RGMXF_OFFSET    0
+#define    RTL8367C_EXT1_RGMXF_MASK    0x1F
+
+#define    RTL8367C_REG_BISR_CTRL    0x1308
+#define    RTL8367C_BISR_CTRL_OFFSET    0
+#define    RTL8367C_BISR_CTRL_MASK    0x7
+
+#define    RTL8367C_REG_SLF_IF    0x1309
+#define    RTL8367C_LINK_DOWN_CLR_FIFO_OFFSET    7
+#define    RTL8367C_LINK_DOWN_CLR_FIFO_MASK    0x80
+#define    RTL8367C_LOOPBACK_OFFSET    6
+#define    RTL8367C_LOOPBACK_MASK    0x40
+#define    RTL8367C_WATER_LEVEL_OFFSET    4
+#define    RTL8367C_WATER_LEVEL_MASK    0x30
+#define    RTL8367C_SLF_IF_OFFSET    0
+#define    RTL8367C_SLF_IF_MASK    0x3
+
+#define    RTL8367C_REG_I2C_CLOCK_DIV    0x130a
+#define    RTL8367C_I2C_CLOCK_DIV_OFFSET    0
+#define    RTL8367C_I2C_CLOCK_DIV_MASK    0x3FF
+
+#define    RTL8367C_REG_MDX_MDC_DIV    0x130b
+#define    RTL8367C_MDX_MDC_DIV_OFFSET    0
+#define    RTL8367C_MDX_MDC_DIV_MASK    0x3FF
+
+#define    RTL8367C_REG_MISCELLANEOUS_CONFIGURE0    0x130c
+#define    RTL8367C_ADCCKI_FROM_PAD_OFFSET    14
+#define    RTL8367C_ADCCKI_FROM_PAD_MASK    0x4000
+#define    RTL8367C_ADCCKI_EN_OFFSET    13
+#define    RTL8367C_ADCCKI_EN_MASK    0x2000
+#define    RTL8367C_FLASH_ENABLE_OFFSET    12
+#define    RTL8367C_FLASH_ENABLE_MASK    0x1000
+#define    RTL8367C_EEE_ENABLE_OFFSET    11
+#define    RTL8367C_EEE_ENABLE_MASK    0x800
+#define    RTL8367C_NIC_ENABLE_OFFSET    10
+#define    RTL8367C_NIC_ENABLE_MASK    0x400
+#define    RTL8367C_FT_ENABLE_OFFSET    9
+#define    RTL8367C_FT_ENABLE_MASK    0x200
+#define    RTL8367C_OLT_ENABLE_OFFSET    8
+#define    RTL8367C_OLT_ENABLE_MASK    0x100
+#define    RTL8367C_RTCT_EN_OFFSET    7
+#define    RTL8367C_RTCT_EN_MASK    0x80
+#define    RTL8367C_PON_LIGHT_EN_OFFSET    6
+#define    RTL8367C_PON_LIGHT_EN_MASK    0x40
+#define    RTL8367C_DW8051_EN_OFFSET    5
+#define    RTL8367C_DW8051_EN_MASK    0x20
+#define    RTL8367C_AUTOLOAD_EN_OFFSET    4
+#define    RTL8367C_AUTOLOAD_EN_MASK    0x10
+#define    RTL8367C_NRESTORE_EN_OFFSET    3
+#define    RTL8367C_NRESTORE_EN_MASK    0x8
+#define    RTL8367C_DIS_PON_TABLE_INIT_OFFSET    2
+#define    RTL8367C_DIS_PON_TABLE_INIT_MASK    0x4
+#define    RTL8367C_DIS_PON_BIST_OFFSET    1
+#define    RTL8367C_DIS_PON_BIST_MASK    0x2
+#define    RTL8367C_EFUSE_EN_OFFSET    0
+#define    RTL8367C_EFUSE_EN_MASK    0x1
+
+#define    RTL8367C_REG_MISCELLANEOUS_CONFIGURE1    0x130d
+#define    RTL8367C_EEPROM_DEV_ADR_OFFSET    8
+#define    RTL8367C_EEPROM_DEV_ADR_MASK    0x7F00
+#define    RTL8367C_EEPROM_MSB_OFFSET    7
+#define    RTL8367C_EEPROM_MSB_MASK    0x80
+#define    RTL8367C_EEPROM_ADDRESS_16B_OFFSET    6
+#define    RTL8367C_EEPROM_ADDRESS_16B_MASK    0x40
+#define    RTL8367C_EEPROM_DWONLOAD_COMPLETE_OFFSET    3
+#define    RTL8367C_EEPROM_DWONLOAD_COMPLETE_MASK    0x8
+#define    RTL8367C_SPI_SLAVE_EN_OFFSET    2
+#define    RTL8367C_SPI_SLAVE_EN_MASK    0x4
+#define    RTL8367C_SMI_SEL_OFFSET    0
+#define    RTL8367C_SMI_SEL_MASK    0x3
+
+#define    RTL8367C_REG_PHY_AD    0x130f
+#define    RTL8367C_EN_PHY_MAX_POWER_OFFSET    14
+#define    RTL8367C_EN_PHY_MAX_POWER_MASK    0x4000
+#define    RTL8367C_EN_PHY_SEL_DEG_OFFSET    13
+#define    RTL8367C_EN_PHY_SEL_DEG_MASK    0x2000
+#define    RTL8367C_EXTPHY_AD_OFFSET    8
+#define    RTL8367C_EXTPHY_AD_MASK    0x1F00
+#define    RTL8367C_EN_PHY_LOW_POWER_MODE_OFFSET    7
+#define    RTL8367C_EN_PHY_LOW_POWER_MODE_MASK    0x80
+#define    RTL8367C_EN_PHY_GREEN_OFFSET    6
+#define    RTL8367C_EN_PHY_GREEN_MASK    0x40
+#define    RTL8367C_PDNPHY_OFFSET    5
+#define    RTL8367C_PDNPHY_MASK    0x20
+#define    RTL8367C_INTPHY_AD_OFFSET    0
+#define    RTL8367C_INTPHY_AD_MASK    0x1F
+
+#define    RTL8367C_REG_DIGITAL_INTERFACE0_FORCE    0x1310
+#define    RTL8367C_GMII_0_FORCE_OFFSET    12
+#define    RTL8367C_GMII_0_FORCE_MASK    0x1000
+#define    RTL8367C_RGMII_0_FORCE_OFFSET    0
+#define    RTL8367C_RGMII_0_FORCE_MASK    0xFFF
+
+#define    RTL8367C_REG_DIGITAL_INTERFACE1_FORCE    0x1311
+#define    RTL8367C_GMII_1_FORCE_OFFSET    12
+#define    RTL8367C_GMII_1_FORCE_MASK    0x1000
+#define    RTL8367C_RGMII_1_FORCE_OFFSET    0
+#define    RTL8367C_RGMII_1_FORCE_MASK    0xFFF
+
+#define    RTL8367C_REG_MAC0_FORCE_SELECT    0x1312
+#define    RTL8367C_EN_MAC0_FORCE_OFFSET    12
+#define    RTL8367C_EN_MAC0_FORCE_MASK    0x1000
+#define    RTL8367C_MAC0_FORCE_ABLTY_OFFSET    0
+#define    RTL8367C_MAC0_FORCE_ABLTY_MASK    0xFFF
+
+#define    RTL8367C_REG_MAC1_FORCE_SELECT    0x1313
+#define    RTL8367C_EN_MAC1_FORCE_OFFSET    12
+#define    RTL8367C_EN_MAC1_FORCE_MASK    0x1000
+#define    RTL8367C_MAC1_FORCE_ABLTY_OFFSET    0
+#define    RTL8367C_MAC1_FORCE_ABLTY_MASK    0xFFF
+
+#define    RTL8367C_REG_MAC2_FORCE_SELECT    0x1314
+#define    RTL8367C_EN_MAC2_FORCE_OFFSET    12
+#define    RTL8367C_EN_MAC2_FORCE_MASK    0x1000
+#define    RTL8367C_MAC2_FORCE_ABLTY_OFFSET    0
+#define    RTL8367C_MAC2_FORCE_ABLTY_MASK    0xFFF
+
+#define    RTL8367C_REG_MAC3_FORCE_SELECT    0x1315
+#define    RTL8367C_EN_MAC3_FORCE_OFFSET    12
+#define    RTL8367C_EN_MAC3_FORCE_MASK    0x1000
+#define    RTL8367C_MAC3_FORCE_ABLTY_OFFSET    0
+#define    RTL8367C_MAC3_FORCE_ABLTY_MASK    0xFFF
+
+#define    RTL8367C_REG_MAC4_FORCE_SELECT    0x1316
+#define    RTL8367C_EN_MAC4_FORCE_OFFSET    12
+#define    RTL8367C_EN_MAC4_FORCE_MASK    0x1000
+#define    RTL8367C_MAC4_FORCE_ABLTY_OFFSET    0
+#define    RTL8367C_MAC4_FORCE_ABLTY_MASK    0xFFF
+
+#define    RTL8367C_REG_MAC5_FORCE_SELECT    0x1317
+#define    RTL8367C_EN_MAC5_FORCE_OFFSET    12
+#define    RTL8367C_EN_MAC5_FORCE_MASK    0x1000
+#define    RTL8367C_MAC5_FORCE_ABLTY_OFFSET    0
+#define    RTL8367C_MAC5_FORCE_ABLTY_MASK    0xFFF
+
+#define    RTL8367C_REG_MAC6_FORCE_SELECT    0x1318
+#define    RTL8367C_EN_MAC6_FORCE_OFFSET    12
+#define    RTL8367C_EN_MAC6_FORCE_MASK    0x1000
+#define    RTL8367C_MAC6_FORCE_ABLTY_OFFSET    0
+#define    RTL8367C_MAC6_FORCE_ABLTY_MASK    0xFFF
+
+#define    RTL8367C_REG_MAC7_FORCE_SELECT    0x1319
+#define    RTL8367C_EN_MAC7_FORCE_OFFSET    12
+#define    RTL8367C_EN_MAC7_FORCE_MASK    0x1000
+#define    RTL8367C_MAC7_FORCE_ABLTY_OFFSET    0
+#define    RTL8367C_MAC7_FORCE_ABLTY_MASK    0xFFF
+
+#define    RTL8367C_REG_M10_FORCE_SELECT    0x131c
+#define    RTL8367C_EN_M10_FORCE_OFFSET    12
+#define    RTL8367C_EN_M10_FORCE_MASK    0x1000
+#define    RTL8367C_M10_FORCE_ABLTY_OFFSET    0
+#define    RTL8367C_M10_FORCE_ABLTY_MASK    0xFFF
+
+#define    RTL8367C_REG_CHIP_RESET    0x1322
+#define    RTL8367C_GPHY_RESET_OFFSET    6
+#define    RTL8367C_GPHY_RESET_MASK    0x40
+#define    RTL8367C_NIC_RST_OFFSET    5
+#define    RTL8367C_NIC_RST_MASK    0x20
+#define    RTL8367C_DW8051_RST_OFFSET    4
+#define    RTL8367C_DW8051_RST_MASK    0x10
+#define    RTL8367C_SDS_RST_OFFSET    3
+#define    RTL8367C_SDS_RST_MASK    0x8
+#define    RTL8367C_CONFIG_RST_OFFSET    2
+#define    RTL8367C_CONFIG_RST_MASK    0x4
+#define    RTL8367C_SW_RST_OFFSET    1
+#define    RTL8367C_SW_RST_MASK    0x2
+#define    RTL8367C_CHIP_RST_OFFSET    0
+#define    RTL8367C_CHIP_RST_MASK    0x1
+
+#define    RTL8367C_REG_DIGITAL_DEBUG_0    0x1323
+
+#define    RTL8367C_REG_DIGITAL_DEBUG_1    0x1324
+
+#define    RTL8367C_REG_INTERNAL_PHY_MDC_DRIVER    0x1325
+#define    RTL8367C_INTERNAL_PHY_MDC_DRIVER_OFFSET    0
+#define    RTL8367C_INTERNAL_PHY_MDC_DRIVER_MASK    0x3FF
+
+#define    RTL8367C_REG_LINKDOWN_TIME_CTRL    0x1326
+#define    RTL8367C_LINKDOWN_TIME_CFG_OFFSET    9
+#define    RTL8367C_LINKDOWN_TIME_CFG_MASK    0x7E00
+#define    RTL8367C_LINKDOWN_TIME_ENABLE_OFFSET    8
+#define    RTL8367C_LINKDOWN_TIME_ENABLE_MASK    0x100
+#define    RTL8367C_LINKDOWN_TIME_OFFSET    0
+#define    RTL8367C_LINKDOWN_TIME_MASK    0xFF
+
+#define    RTL8367C_REG_PHYACK_TIMEOUT    0x1331
+
+#define    RTL8367C_REG_MDXACK_TIMEOUT    0x1333
+
+#define    RTL8367C_REG_DW8051_RDY    0x1336
+#define    RTL8367C_VIAROM_WRITE_EN_OFFSET    9
+#define    RTL8367C_VIAROM_WRITE_EN_MASK    0x200
+#define    RTL8367C_SPIF_CK2_OFFSET    8
+#define    RTL8367C_SPIF_CK2_MASK    0x100
+#define    RTL8367C_RRCP_MDOE_OFFSET    7
+#define    RTL8367C_RRCP_MDOE_MASK    0x80
+#define    RTL8367C_DW8051_RATE_OFFSET    4
+#define    RTL8367C_DW8051_RATE_MASK    0x70
+#define    RTL8367C_IROM_MSB_OFFSET    2
+#define    RTL8367C_IROM_MSB_MASK    0xC
+#define    RTL8367C_ACS_IROM_ENABLE_OFFSET    1
+#define    RTL8367C_ACS_IROM_ENABLE_MASK    0x2
+#define    RTL8367C_DW8051_READY_OFFSET    0
+#define    RTL8367C_DW8051_READY_MASK    0x1
+
+#define    RTL8367C_REG_BIST_CTRL    0x133c
+#define    RTL8367C_DRF_BIST_DONE_ALL_OFFSET    5
+#define    RTL8367C_DRF_BIST_DONE_ALL_MASK    0x20
+#define    RTL8367C_DRF_BIST_PAUSE_ALL_OFFSET    4
+#define    RTL8367C_DRF_BIST_PAUSE_ALL_MASK    0x10
+#define    RTL8367C_BIST_DOAN_ALL_OFFSET    3
+#define    RTL8367C_BIST_DOAN_ALL_MASK    0x8
+#define    RTL8367C_BIST_PASS_OFFSET    0
+#define    RTL8367C_BIST_PASS_MASK    0x7
+
+#define    RTL8367C_REG_DIAG_MODE2    0x133d
+#define    RTL8367C_DIAG_MODE2_ACTRAM_OFFSET    1
+#define    RTL8367C_DIAG_MODE2_ACTRAM_MASK    0x2
+#define    RTL8367C_DIAG_MODE2_BCAM_ACTION_OFFSET    0
+#define    RTL8367C_DIAG_MODE2_BCAM_ACTION_MASK    0x1
+
+#define    RTL8367C_REG_MDX_PHY_REG0    0x133e
+#define    RTL8367C_PHY_BRD_MASK_OFFSET    4
+#define    RTL8367C_PHY_BRD_MASK_MASK    0x1F0
+#define    RTL8367C_MDX_INDACC_PAGE_OFFSET    0
+#define    RTL8367C_MDX_INDACC_PAGE_MASK    0xF
+
+#define    RTL8367C_REG_MDX_PHY_REG1    0x133f
+#define    RTL8367C_PHY_BRD_MODE_OFFSET    5
+#define    RTL8367C_PHY_BRD_MODE_MASK    0x20
+#define    RTL8367C_BRD_PHYAD_OFFSET    0
+#define    RTL8367C_BRD_PHYAD_MASK    0x1F
+
+#define    RTL8367C_REG_DEBUG_SIGNAL_SELECT_SW    0x1340
+
+#define    RTL8367C_REG_DEBUG_SIGNAL_SELECT_B    0x1341
+#define    RTL8367C_DEBUG_MX_OFFSET    9
+#define    RTL8367C_DEBUG_MX_MASK    0xE00
+#define    RTL8367C_DEBUG_SHIFT_MISC_OFFSET    6
+#define    RTL8367C_DEBUG_SHIFT_MISC_MASK    0x1C0
+#define    RTL8367C_DEBUG_SHIFT_SW_OFFSET    3
+#define    RTL8367C_DEBUG_SHIFT_SW_MASK    0x38
+#define    RTL8367C_DEBUG_SHIFT_GPHY_OFFSET    0
+#define    RTL8367C_DEBUG_SHIFT_GPHY_MASK    0x7
+
+#define    RTL8367C_REG_DEBUG_SIGNAL_I    0x1343
+
+#define    RTL8367C_REG_DEBUG_SIGNAL_H    0x1344
+
+#define    RTL8367C_REG_DBGO_SEL_GPHY    0x1345
+
+#define    RTL8367C_REG_DBGO_SEL_MISC    0x1346
+
+#define    RTL8367C_REG_BYPASS_ABLTY_LOCK    0x1349
+#define    RTL8367C_BYPASS_ABLTY_LOCK_OFFSET    0
+#define    RTL8367C_BYPASS_ABLTY_LOCK_MASK    0xFF
+
+#define    RTL8367C_REG_BYPASS_ABLTY_LOCK_EXT    0x134a
+#define    RTL8367C_BYPASS_P10_ABILIITY_LOCK_OFFSET    3
+#define    RTL8367C_BYPASS_P10_ABILIITY_LOCK_MASK    0x8
+#define    RTL8367C_BYPASS_EXT_ABILITY_LOCK_OFFSET    0
+#define    RTL8367C_BYPASS_EXT_ABILITY_LOCK_MASK    0x7
+
+#define    RTL8367C_REG_ACL_GPIO    0x134f
+#define    RTL8367C_ACL_GPIO_13_OFFSET    13
+#define    RTL8367C_ACL_GPIO_13_MASK    0x2000
+#define    RTL8367C_ACL_GPIO_12_OFFSET    12
+#define    RTL8367C_ACL_GPIO_12_MASK    0x1000
+#define    RTL8367C_ACL_GPIO_11_OFFSET    11
+#define    RTL8367C_ACL_GPIO_11_MASK    0x800
+#define    RTL8367C_ACL_GPIO_10_OFFSET    10
+#define    RTL8367C_ACL_GPIO_10_MASK    0x400
+#define    RTL8367C_ACL_GPIO_9_OFFSET    9
+#define    RTL8367C_ACL_GPIO_9_MASK    0x200
+#define    RTL8367C_ACL_GPIO_8_OFFSET    8
+#define    RTL8367C_ACL_GPIO_8_MASK    0x100
+#define    RTL8367C_ACL_GPIO_7_OFFSET    7
+#define    RTL8367C_ACL_GPIO_7_MASK    0x80
+#define    RTL8367C_ACL_GPIO_6_OFFSET    6
+#define    RTL8367C_ACL_GPIO_6_MASK    0x40
+#define    RTL8367C_ACL_GPIO_5_OFFSET    5
+#define    RTL8367C_ACL_GPIO_5_MASK    0x20
+#define    RTL8367C_ACL_GPIO_4_OFFSET    4
+#define    RTL8367C_ACL_GPIO_4_MASK    0x10
+#define    RTL8367C_ACL_GPIO_3_OFFSET    3
+#define    RTL8367C_ACL_GPIO_3_MASK    0x8
+#define    RTL8367C_ACL_GPIO_2_OFFSET    2
+#define    RTL8367C_ACL_GPIO_2_MASK    0x4
+#define    RTL8367C_ACL_GPIO_1_OFFSET    1
+#define    RTL8367C_ACL_GPIO_1_MASK    0x2
+#define    RTL8367C_ACL_GPIO_0_OFFSET    0
+#define    RTL8367C_ACL_GPIO_0_MASK    0x1
+
+#define    RTL8367C_REG_EN_GPIO    0x1350
+#define    RTL8367C_EN_GPIO_13_OFFSET    13
+#define    RTL8367C_EN_GPIO_13_MASK    0x2000
+#define    RTL8367C_EN_GPIO_12_OFFSET    12
+#define    RTL8367C_EN_GPIO_12_MASK    0x1000
+#define    RTL8367C_EN_GPIO_11_OFFSET    11
+#define    RTL8367C_EN_GPIO_11_MASK    0x800
+#define    RTL8367C_EN_GPIO_10_OFFSET    10
+#define    RTL8367C_EN_GPIO_10_MASK    0x400
+#define    RTL8367C_EN_GPIO_9_OFFSET    9
+#define    RTL8367C_EN_GPIO_9_MASK    0x200
+#define    RTL8367C_EN_GPIO_8_OFFSET    8
+#define    RTL8367C_EN_GPIO_8_MASK    0x100
+#define    RTL8367C_EN_GPIO_7_OFFSET    7
+#define    RTL8367C_EN_GPIO_7_MASK    0x80
+#define    RTL8367C_EN_GPIO_6_OFFSET    6
+#define    RTL8367C_EN_GPIO_6_MASK    0x40
+#define    RTL8367C_EN_GPIO_5_OFFSET    5
+#define    RTL8367C_EN_GPIO_5_MASK    0x20
+#define    RTL8367C_EN_GPIO_4_OFFSET    4
+#define    RTL8367C_EN_GPIO_4_MASK    0x10
+#define    RTL8367C_EN_GPIO_3_OFFSET    3
+#define    RTL8367C_EN_GPIO_3_MASK    0x8
+#define    RTL8367C_EN_GPIO_2_OFFSET    2
+#define    RTL8367C_EN_GPIO_2_MASK    0x4
+#define    RTL8367C_EN_GPIO_1_OFFSET    1
+#define    RTL8367C_EN_GPIO_1_MASK    0x2
+#define    RTL8367C_EN_GPIO_0_OFFSET    0
+#define    RTL8367C_EN_GPIO_0_MASK    0x1
+
+#define    RTL8367C_REG_CFG_MULTI_PIN    0x1351
+#define    RTL8367C_CFG_MULTI_PIN_OFFSET    0
+#define    RTL8367C_CFG_MULTI_PIN_MASK    0x3
+
+#define    RTL8367C_REG_PORT0_STATUS    0x1352
+#define    RTL8367C_PORT0_STATUS_EN_1000_LPI_OFFSET    11
+#define    RTL8367C_PORT0_STATUS_EN_1000_LPI_MASK    0x800
+#define    RTL8367C_PORT0_STATUS_EN_100_LPI_OFFSET    10
+#define    RTL8367C_PORT0_STATUS_EN_100_LPI_MASK    0x400
+#define    RTL8367C_PORT0_STATUS_NWAY_FAULT_OFFSET    9
+#define    RTL8367C_PORT0_STATUS_NWAY_FAULT_MASK    0x200
+#define    RTL8367C_PORT0_STATUS_LINK_ON_MASTER_OFFSET    8
+#define    RTL8367C_PORT0_STATUS_LINK_ON_MASTER_MASK    0x100
+#define    RTL8367C_PORT0_STATUS_NWAY_CAP_OFFSET    7
+#define    RTL8367C_PORT0_STATUS_NWAY_CAP_MASK    0x80
+#define    RTL8367C_PORT0_STATUS_TX_FLOWCTRL_CAP_OFFSET    6
+#define    RTL8367C_PORT0_STATUS_TX_FLOWCTRL_CAP_MASK    0x40
+#define    RTL8367C_PORT0_STATUS_RX_FLOWCTRL_CAP_OFFSET    5
+#define    RTL8367C_PORT0_STATUS_RX_FLOWCTRL_CAP_MASK    0x20
+#define    RTL8367C_PORT0_STATUS_LINK_STATE_OFFSET    4
+#define    RTL8367C_PORT0_STATUS_LINK_STATE_MASK    0x10
+#define    RTL8367C_PORT0_STATUS_FULL_DUPLUX_CAP_OFFSET    2
+#define    RTL8367C_PORT0_STATUS_FULL_DUPLUX_CAP_MASK    0x4
+#define    RTL8367C_PORT0_STATUS_LINK_SPEED_OFFSET    0
+#define    RTL8367C_PORT0_STATUS_LINK_SPEED_MASK    0x3
+
+#define    RTL8367C_REG_PORT1_STATUS    0x1353
+#define    RTL8367C_PORT1_STATUS_EN_1000_LPI_OFFSET    11
+#define    RTL8367C_PORT1_STATUS_EN_1000_LPI_MASK    0x800
+#define    RTL8367C_PORT1_STATUS_EN_100_LPI_OFFSET    10
+#define    RTL8367C_PORT1_STATUS_EN_100_LPI_MASK    0x400
+#define    RTL8367C_PORT1_STATUS_NWAY_FAULT_OFFSET    9
+#define    RTL8367C_PORT1_STATUS_NWAY_FAULT_MASK    0x200
+#define    RTL8367C_PORT1_STATUS_LINK_ON_MASTER_OFFSET    8
+#define    RTL8367C_PORT1_STATUS_LINK_ON_MASTER_MASK    0x100
+#define    RTL8367C_PORT1_STATUS_NWAY_CAP_OFFSET    7
+#define    RTL8367C_PORT1_STATUS_NWAY_CAP_MASK    0x80
+#define    RTL8367C_PORT1_STATUS_TX_FLOWCTRL_CAP_OFFSET    6
+#define    RTL8367C_PORT1_STATUS_TX_FLOWCTRL_CAP_MASK    0x40
+#define    RTL8367C_PORT1_STATUS_RX_FLOWCTRL_CAP_OFFSET    5
+#define    RTL8367C_PORT1_STATUS_RX_FLOWCTRL_CAP_MASK    0x20
+#define    RTL8367C_PORT1_STATUS_LINK_STATE_OFFSET    4
+#define    RTL8367C_PORT1_STATUS_LINK_STATE_MASK    0x10
+#define    RTL8367C_PORT1_STATUS_FULL_DUPLUX_CAP_OFFSET    2
+#define    RTL8367C_PORT1_STATUS_FULL_DUPLUX_CAP_MASK    0x4
+#define    RTL8367C_PORT1_STATUS_LINK_SPEED_OFFSET    0
+#define    RTL8367C_PORT1_STATUS_LINK_SPEED_MASK    0x3
+
+#define    RTL8367C_REG_PORT2_STATUS    0x1354
+#define    RTL8367C_PORT2_STATUS_EN_1000_LPI_OFFSET    11
+#define    RTL8367C_PORT2_STATUS_EN_1000_LPI_MASK    0x800
+#define    RTL8367C_PORT2_STATUS_EN_100_LPI_OFFSET    10
+#define    RTL8367C_PORT2_STATUS_EN_100_LPI_MASK    0x400
+#define    RTL8367C_PORT2_STATUS_NWAY_FAULT_OFFSET    9
+#define    RTL8367C_PORT2_STATUS_NWAY_FAULT_MASK    0x200
+#define    RTL8367C_PORT2_STATUS_LINK_ON_MASTER_OFFSET    8
+#define    RTL8367C_PORT2_STATUS_LINK_ON_MASTER_MASK    0x100
+#define    RTL8367C_PORT2_STATUS_NWAY_CAP_OFFSET    7
+#define    RTL8367C_PORT2_STATUS_NWAY_CAP_MASK    0x80
+#define    RTL8367C_PORT2_STATUS_TX_FLOWCTRL_CAP_OFFSET    6
+#define    RTL8367C_PORT2_STATUS_TX_FLOWCTRL_CAP_MASK    0x40
+#define    RTL8367C_PORT2_STATUS_RX_FLOWCTRL_CAP_OFFSET    5
+#define    RTL8367C_PORT2_STATUS_RX_FLOWCTRL_CAP_MASK    0x20
+#define    RTL8367C_PORT2_STATUS_LINK_STATE_OFFSET    4
+#define    RTL8367C_PORT2_STATUS_LINK_STATE_MASK    0x10
+#define    RTL8367C_PORT2_STATUS_FULL_DUPLUX_CAP_OFFSET    2
+#define    RTL8367C_PORT2_STATUS_FULL_DUPLUX_CAP_MASK    0x4
+#define    RTL8367C_PORT2_STATUS_LINK_SPEED_OFFSET    0
+#define    RTL8367C_PORT2_STATUS_LINK_SPEED_MASK    0x3
+
+#define    RTL8367C_REG_PORT3_STATUS    0x1355
+#define    RTL8367C_PORT3_STATUS_EN_1000_LPI_OFFSET    11
+#define    RTL8367C_PORT3_STATUS_EN_1000_LPI_MASK    0x800
+#define    RTL8367C_PORT3_STATUS_EN_100_LPI_OFFSET    10
+#define    RTL8367C_PORT3_STATUS_EN_100_LPI_MASK    0x400
+#define    RTL8367C_PORT3_STATUS_NWAY_FAULT_OFFSET    9
+#define    RTL8367C_PORT3_STATUS_NWAY_FAULT_MASK    0x200
+#define    RTL8367C_PORT3_STATUS_LINK_ON_MASTER_OFFSET    8
+#define    RTL8367C_PORT3_STATUS_LINK_ON_MASTER_MASK    0x100
+#define    RTL8367C_PORT3_STATUS_NWAY_CAP_OFFSET    7
+#define    RTL8367C_PORT3_STATUS_NWAY_CAP_MASK    0x80
+#define    RTL8367C_PORT3_STATUS_TX_FLOWCTRL_CAP_OFFSET    6
+#define    RTL8367C_PORT3_STATUS_TX_FLOWCTRL_CAP_MASK    0x40
+#define    RTL8367C_PORT3_STATUS_RX_FLOWCTRL_CAP_OFFSET    5
+#define    RTL8367C_PORT3_STATUS_RX_FLOWCTRL_CAP_MASK    0x20
+#define    RTL8367C_PORT3_STATUS_LINK_STATE_OFFSET    4
+#define    RTL8367C_PORT3_STATUS_LINK_STATE_MASK    0x10
+#define    RTL8367C_PORT3_STATUS_FULL_DUPLUX_CAP_OFFSET    2
+#define    RTL8367C_PORT3_STATUS_FULL_DUPLUX_CAP_MASK    0x4
+#define    RTL8367C_PORT3_STATUS_LINK_SPEED_OFFSET    0
+#define    RTL8367C_PORT3_STATUS_LINK_SPEED_MASK    0x3
+
+#define    RTL8367C_REG_PORT4_STATUS    0x1356
+#define    RTL8367C_PORT4_STATUS_EN_1000_LPI_OFFSET    11
+#define    RTL8367C_PORT4_STATUS_EN_1000_LPI_MASK    0x800
+#define    RTL8367C_PORT4_STATUS_EN_100_LPI_OFFSET    10
+#define    RTL8367C_PORT4_STATUS_EN_100_LPI_MASK    0x400
+#define    RTL8367C_PORT4_STATUS_NWAY_FAULT_OFFSET    9
+#define    RTL8367C_PORT4_STATUS_NWAY_FAULT_MASK    0x200
+#define    RTL8367C_PORT4_STATUS_LINK_ON_MASTER_OFFSET    8
+#define    RTL8367C_PORT4_STATUS_LINK_ON_MASTER_MASK    0x100
+#define    RTL8367C_PORT4_STATUS_NWAY_CAP_OFFSET    7
+#define    RTL8367C_PORT4_STATUS_NWAY_CAP_MASK    0x80
+#define    RTL8367C_PORT4_STATUS_TX_FLOWCTRL_CAP_OFFSET    6
+#define    RTL8367C_PORT4_STATUS_TX_FLOWCTRL_CAP_MASK    0x40
+#define    RTL8367C_PORT4_STATUS_RX_FLOWCTRL_CAP_OFFSET    5
+#define    RTL8367C_PORT4_STATUS_RX_FLOWCTRL_CAP_MASK    0x20
+#define    RTL8367C_PORT4_STATUS_LINK_STATE_OFFSET    4
+#define    RTL8367C_PORT4_STATUS_LINK_STATE_MASK    0x10
+#define    RTL8367C_PORT4_STATUS_FULL_DUPLUX_CAP_OFFSET    2
+#define    RTL8367C_PORT4_STATUS_FULL_DUPLUX_CAP_MASK    0x4
+#define    RTL8367C_PORT4_STATUS_LINK_SPEED_OFFSET    0
+#define    RTL8367C_PORT4_STATUS_LINK_SPEED_MASK    0x3
+
+#define    RTL8367C_REG_PORT5_STATUS    0x1357
+#define    RTL8367C_PORT5_STATUS_EN_1000_LPI_OFFSET    11
+#define    RTL8367C_PORT5_STATUS_EN_1000_LPI_MASK    0x800
+#define    RTL8367C_PORT5_STATUS_EN_100_LPI_OFFSET    10
+#define    RTL8367C_PORT5_STATUS_EN_100_LPI_MASK    0x400
+#define    RTL8367C_PORT5_STATUS_NWAY_FAULT_OFFSET    9
+#define    RTL8367C_PORT5_STATUS_NWAY_FAULT_MASK    0x200
+#define    RTL8367C_PORT5_STATUS_LINK_ON_MASTER_OFFSET    8
+#define    RTL8367C_PORT5_STATUS_LINK_ON_MASTER_MASK    0x100
+#define    RTL8367C_PORT5_STATUS_NWAY_CAP_OFFSET    7
+#define    RTL8367C_PORT5_STATUS_NWAY_CAP_MASK    0x80
+#define    RTL8367C_PORT5_STATUS_TX_FLOWCTRL_CAP_OFFSET    6
+#define    RTL8367C_PORT5_STATUS_TX_FLOWCTRL_CAP_MASK    0x40
+#define    RTL8367C_PORT5_STATUS_RX_FLOWCTRL_CAP_OFFSET    5
+#define    RTL8367C_PORT5_STATUS_RX_FLOWCTRL_CAP_MASK    0x20
+#define    RTL8367C_PORT5_STATUS_LINK_STATE_OFFSET    4
+#define    RTL8367C_PORT5_STATUS_LINK_STATE_MASK    0x10
+#define    RTL8367C_PORT5_STATUS_FULL_DUPLUX_CAP_OFFSET    2
+#define    RTL8367C_PORT5_STATUS_FULL_DUPLUX_CAP_MASK    0x4
+#define    RTL8367C_PORT5_STATUS_LINK_SPEED_OFFSET    0
+#define    RTL8367C_PORT5_STATUS_LINK_SPEED_MASK    0x3
+
+#define    RTL8367C_REG_PORT6_STATUS    0x1358
+#define    RTL8367C_PORT6_STATUS_EN_1000_LPI_OFFSET    11
+#define    RTL8367C_PORT6_STATUS_EN_1000_LPI_MASK    0x800
+#define    RTL8367C_PORT6_STATUS_EN_100_LPI_OFFSET    10
+#define    RTL8367C_PORT6_STATUS_EN_100_LPI_MASK    0x400
+#define    RTL8367C_PORT6_STATUS_NWAY_FAULT_OFFSET    9
+#define    RTL8367C_PORT6_STATUS_NWAY_FAULT_MASK    0x200
+#define    RTL8367C_PORT6_STATUS_LINK_ON_MASTER_OFFSET    8
+#define    RTL8367C_PORT6_STATUS_LINK_ON_MASTER_MASK    0x100
+#define    RTL8367C_PORT6_STATUS_NWAY_CAP_OFFSET    7
+#define    RTL8367C_PORT6_STATUS_NWAY_CAP_MASK    0x80
+#define    RTL8367C_PORT6_STATUS_TX_FLOWCTRL_CAP_OFFSET    6
+#define    RTL8367C_PORT6_STATUS_TX_FLOWCTRL_CAP_MASK    0x40
+#define    RTL8367C_PORT6_STATUS_RX_FLOWCTRL_CAP_OFFSET    5
+#define    RTL8367C_PORT6_STATUS_RX_FLOWCTRL_CAP_MASK    0x20
+#define    RTL8367C_PORT6_STATUS_LINK_STATE_OFFSET    4
+#define    RTL8367C_PORT6_STATUS_LINK_STATE_MASK    0x10
+#define    RTL8367C_PORT6_STATUS_FULL_DUPLUX_CAP_OFFSET    2
+#define    RTL8367C_PORT6_STATUS_FULL_DUPLUX_CAP_MASK    0x4
+#define    RTL8367C_PORT6_STATUS_LINK_SPEED_OFFSET    0
+#define    RTL8367C_PORT6_STATUS_LINK_SPEED_MASK    0x3
+
+#define    RTL8367C_REG_PORT7_STATUS    0x1359
+#define    RTL8367C_PORT7_STATUS_EN_1000_LPI_OFFSET    11
+#define    RTL8367C_PORT7_STATUS_EN_1000_LPI_MASK    0x800
+#define    RTL8367C_PORT7_STATUS_EN_100_LPI_OFFSET    10
+#define    RTL8367C_PORT7_STATUS_EN_100_LPI_MASK    0x400
+#define    RTL8367C_PORT7_STATUS_NWAY_FAULT_OFFSET    9
+#define    RTL8367C_PORT7_STATUS_NWAY_FAULT_MASK    0x200
+#define    RTL8367C_PORT7_STATUS_LINK_ON_MASTER_OFFSET    8
+#define    RTL8367C_PORT7_STATUS_LINK_ON_MASTER_MASK    0x100
+#define    RTL8367C_PORT7_STATUS_NWAY_CAP_OFFSET    7
+#define    RTL8367C_PORT7_STATUS_NWAY_CAP_MASK    0x80
+#define    RTL8367C_PORT7_STATUS_TX_FLOWCTRL_CAP_OFFSET    6
+#define    RTL8367C_PORT7_STATUS_TX_FLOWCTRL_CAP_MASK    0x40
+#define    RTL8367C_PORT7_STATUS_RX_FLOWCTRL_CAP_OFFSET    5
+#define    RTL8367C_PORT7_STATUS_RX_FLOWCTRL_CAP_MASK    0x20
+#define    RTL8367C_PORT7_STATUS_LINK_STATE_OFFSET    4
+#define    RTL8367C_PORT7_STATUS_LINK_STATE_MASK    0x10
+#define    RTL8367C_PORT7_STATUS_FULL_DUPLUX_CAP_OFFSET    2
+#define    RTL8367C_PORT7_STATUS_FULL_DUPLUX_CAP_MASK    0x4
+#define    RTL8367C_PORT7_STATUS_LINK_SPEED_OFFSET    0
+#define    RTL8367C_PORT7_STATUS_LINK_SPEED_MASK    0x3
+
+#define    RTL8367C_REG_PORT8_STATUS    0x135a
+#define    RTL8367C_PORT8_STATUS_EN_1000_LPI_OFFSET    11
+#define    RTL8367C_PORT8_STATUS_EN_1000_LPI_MASK    0x800
+#define    RTL8367C_PORT8_STATUS_EN_100_LPI_OFFSET    10
+#define    RTL8367C_PORT8_STATUS_EN_100_LPI_MASK    0x400
+#define    RTL8367C_PORT8_STATUS_NWAY_FAULT_OFFSET    9
+#define    RTL8367C_PORT8_STATUS_NWAY_FAULT_MASK    0x200
+#define    RTL8367C_PORT8_STATUS_LINK_ON_MASTER_OFFSET    8
+#define    RTL8367C_PORT8_STATUS_LINK_ON_MASTER_MASK    0x100
+#define    RTL8367C_PORT8_STATUS_NWAY_CAP_OFFSET    7
+#define    RTL8367C_PORT8_STATUS_NWAY_CAP_MASK    0x80
+#define    RTL8367C_PORT8_STATUS_TX_FLOWCTRL_CAP_OFFSET    6
+#define    RTL8367C_PORT8_STATUS_TX_FLOWCTRL_CAP_MASK    0x40
+#define    RTL8367C_PORT8_STATUS_RX_FLOWCTRL_CAP_OFFSET    5
+#define    RTL8367C_PORT8_STATUS_RX_FLOWCTRL_CAP_MASK    0x20
+#define    RTL8367C_PORT8_STATUS_LINK_STATE_OFFSET    4
+#define    RTL8367C_PORT8_STATUS_LINK_STATE_MASK    0x10
+#define    RTL8367C_PORT8_STATUS_FULL_DUPLUX_CAP_OFFSET    2
+#define    RTL8367C_PORT8_STATUS_FULL_DUPLUX_CAP_MASK    0x4
+#define    RTL8367C_PORT8_STATUS_LINK_SPEED_OFFSET    0
+#define    RTL8367C_PORT8_STATUS_LINK_SPEED_MASK    0x3
+
+#define    RTL8367C_REG_PORT9_STATUS    0x135b
+#define    RTL8367C_PORT9_STATUS_EN_1000_LPI_OFFSET    11
+#define    RTL8367C_PORT9_STATUS_EN_1000_LPI_MASK    0x800
+#define    RTL8367C_PORT9_STATUS_EN_100_LPI_OFFSET    10
+#define    RTL8367C_PORT9_STATUS_EN_100_LPI_MASK    0x400
+#define    RTL8367C_PORT9_STATUS_NWAY_FAULT_OFFSET    9
+#define    RTL8367C_PORT9_STATUS_NWAY_FAULT_MASK    0x200
+#define    RTL8367C_PORT9_STATUS_LINK_ON_MASTER_OFFSET    8
+#define    RTL8367C_PORT9_STATUS_LINK_ON_MASTER_MASK    0x100
+#define    RTL8367C_PORT9_STATUS_NWAY_CAP_OFFSET    7
+#define    RTL8367C_PORT9_STATUS_NWAY_CAP_MASK    0x80
+#define    RTL8367C_PORT9_STATUS_TX_FLOWCTRL_CAP_OFFSET    6
+#define    RTL8367C_PORT9_STATUS_TX_FLOWCTRL_CAP_MASK    0x40
+#define    RTL8367C_PORT9_STATUS_RX_FLOWCTRL_CAP_OFFSET    5
+#define    RTL8367C_PORT9_STATUS_RX_FLOWCTRL_CAP_MASK    0x20
+#define    RTL8367C_PORT9_STATUS_LINK_STATE_OFFSET    4
+#define    RTL8367C_PORT9_STATUS_LINK_STATE_MASK    0x10
+#define    RTL8367C_PORT9_STATUS_FULL_DUPLUX_CAP_OFFSET    2
+#define    RTL8367C_PORT9_STATUS_FULL_DUPLUX_CAP_MASK    0x4
+#define    RTL8367C_PORT9_STATUS_LINK_SPEED_OFFSET    0
+#define    RTL8367C_PORT9_STATUS_LINK_SPEED_MASK    0x3
+
+#define    RTL8367C_REG_PORT10_STATUS    0x135c
+#define    RTL8367C_PORT10_STATUS_EN_1000_LPI_OFFSET    11
+#define    RTL8367C_PORT10_STATUS_EN_1000_LPI_MASK    0x800
+#define    RTL8367C_PORT10_STATUS_EN_100_LPI_OFFSET    10
+#define    RTL8367C_PORT10_STATUS_EN_100_LPI_MASK    0x400
+#define    RTL8367C_PORT10_STATUS_NWAY_FAULT_OFFSET    9
+#define    RTL8367C_PORT10_STATUS_NWAY_FAULT_MASK    0x200
+#define    RTL8367C_PORT10_STATUS_LINK_ON_MASTER_OFFSET    8
+#define    RTL8367C_PORT10_STATUS_LINK_ON_MASTER_MASK    0x100
+#define    RTL8367C_PORT10_STATUS_NWAY_CAP_OFFSET    7
+#define    RTL8367C_PORT10_STATUS_NWAY_CAP_MASK    0x80
+#define    RTL8367C_PORT10_STATUS_TX_FLOWCTRL_CAP_OFFSET    6
+#define    RTL8367C_PORT10_STATUS_TX_FLOWCTRL_CAP_MASK    0x40
+#define    RTL8367C_PORT10_STATUS_RX_FLOWCTRL_CAP_OFFSET    5
+#define    RTL8367C_PORT10_STATUS_RX_FLOWCTRL_CAP_MASK    0x20
+#define    RTL8367C_PORT10_STATUS_LINK_STATE_OFFSET    4
+#define    RTL8367C_PORT10_STATUS_LINK_STATE_MASK    0x10
+#define    RTL8367C_PORT10_STATUS_FULL_DUPLUX_CAP_OFFSET    2
+#define    RTL8367C_PORT10_STATUS_FULL_DUPLUX_CAP_MASK    0x4
+#define    RTL8367C_PORT10_STATUS_LINK_SPEED_OFFSET    0
+#define    RTL8367C_PORT10_STATUS_LINK_SPEED_MASK    0x3
+
+#define    RTL8367C_REG_UPS_CTRL0    0x1362
+#define    RTL8367C_P3_REF_SD_BIT0_OFFSET    8
+#define    RTL8367C_P3_REF_SD_BIT0_MASK    0xFF00
+#define    RTL8367C_P2_REF_SD_OFFSET    0
+#define    RTL8367C_P2_REF_SD_MASK    0xFF
+
+#define    RTL8367C_REG_UPS_CTRL1    0x1363
+#define    RTL8367C_UPS_OUT_OFFSET    8
+#define    RTL8367C_UPS_OUT_MASK    0xFF00
+#define    RTL8367C_UPS_WRITE_PULSE_OFFSET    1
+#define    RTL8367C_UPS_WRITE_PULSE_MASK    0x2
+#define    RTL8367C_UPS_EN_OFFSET    0
+#define    RTL8367C_UPS_EN_MASK    0x1
+
+#define    RTL8367C_REG_UPS_CTRL2    0x1364
+#define    RTL8367C_IGNOE_MAC8_LINK_OFFSET    15
+#define    RTL8367C_IGNOE_MAC8_LINK_MASK    0x8000
+#define    RTL8367C_AGREE_SLEEP_OFFSET    14
+#define    RTL8367C_AGREE_SLEEP_MASK    0x4000
+#define    RTL8367C_WAIT_FOR_AGREEMENT_OFFSET    13
+#define    RTL8367C_WAIT_FOR_AGREEMENT_MASK    0x2000
+#define    RTL8367C_WAKE_UP_BY_LINK_OFFSET    12
+#define    RTL8367C_WAKE_UP_BY_LINK_MASK    0x1000
+#define    RTL8367C_WAKE_UP_BY_PHY_OFFSET    11
+#define    RTL8367C_WAKE_UP_BY_PHY_MASK    0x800
+#define    RTL8367C_SLOW_CLK_TGL_RATE_OFFSET    7
+#define    RTL8367C_SLOW_CLK_TGL_RATE_MASK    0x780
+#define    RTL8367C_PLL_G1_CTRL_EN_OFFSET    6
+#define    RTL8367C_PLL_G1_CTRL_EN_MASK    0x40
+#define    RTL8367C_PLL_G0_CTRL_EN_OFFSET    5
+#define    RTL8367C_PLL_G0_CTRL_EN_MASK    0x20
+#define    RTL8367C_SLOW_DOWN_PLL_EN_OFFSET    4
+#define    RTL8367C_SLOW_DOWN_PLL_EN_MASK    0x10
+#define    RTL8367C_SLOW_DOWN_CLK_EN_OFFSET    3
+#define    RTL8367C_SLOW_DOWN_CLK_EN_MASK    0x8
+#define    RTL8367C_GATING_CLK_SDS_EN_OFFSET    2
+#define    RTL8367C_GATING_CLK_SDS_EN_MASK    0x4
+#define    RTL8367C_GATING_CLK_CHIP_EN_OFFSET    1
+#define    RTL8367C_GATING_CLK_CHIP_EN_MASK    0x2
+#define    RTL8367C_GATING_SW_EN_OFFSET    0
+#define    RTL8367C_GATING_SW_EN_MASK    0x1
+
+#define    RTL8367C_REG_GATING_CLK_1    0x1365
+#define    RTL8367C_ALDPS_MODE_4_OFFSET    15
+#define    RTL8367C_ALDPS_MODE_4_MASK    0x8000
+#define    RTL8367C_ALDPS_MODE_3_OFFSET    14
+#define    RTL8367C_ALDPS_MODE_3_MASK    0x4000
+#define    RTL8367C_ALDPS_MODE_2_OFFSET    13
+#define    RTL8367C_ALDPS_MODE_2_MASK    0x2000
+#define    RTL8367C_ALDPS_MODE_1_OFFSET    12
+#define    RTL8367C_ALDPS_MODE_1_MASK    0x1000
+#define    RTL8367C_ALDPS_MODE_0_OFFSET    11
+#define    RTL8367C_ALDPS_MODE_0_MASK    0x800
+#define    RTL8367C_UPS_DBGO_OFFSET    10
+#define    RTL8367C_UPS_DBGO_MASK    0x400
+#define    RTL8367C_IFMX_AFF_NOT_FF_OUT_OFFSET    9
+#define    RTL8367C_IFMX_AFF_NOT_FF_OUT_MASK    0x200
+#define    RTL8367C_WATER_LEVEL_FD_OFFSET    6
+#define    RTL8367C_WATER_LEVEL_FD_MASK    0x1C0
+#define    RTL8367C_WATER_LEVEL_Y2X_OFFSET    3
+#define    RTL8367C_WATER_LEVEL_Y2X_MASK    0x38
+#define    RTL8367C_WATER_LEVEL_X2Y_2_OFFSET    2
+#define    RTL8367C_WATER_LEVEL_X2Y_2_MASK    0x4
+#define    RTL8367C_IGNOE_MAC10_LINK_OFFSET    1
+#define    RTL8367C_IGNOE_MAC10_LINK_MASK    0x2
+#define    RTL8367C_IGNOE_MAC9_LINK_OFFSET    0
+#define    RTL8367C_IGNOE_MAC9_LINK_MASK    0x1
+
+#define    RTL8367C_REG_UPS_CTRL4    0x1366
+#define    RTL8367C_PROB_EN_OFFSET    6
+#define    RTL8367C_PROB_EN_MASK    0x40
+#define    RTL8367C_PLL_DOWN_OFFSET    1
+#define    RTL8367C_PLL_DOWN_MASK    0x2
+#define    RTL8367C_XTAL_DOWN_OFFSET    0
+#define    RTL8367C_XTAL_DOWN_MASK    0x1
+
+#define    RTL8367C_REG_UPS_CTRL5    0x1367
+#define    RTL8367C_FRC_CPU_ACPT_OFFSET    3
+#define    RTL8367C_FRC_CPU_ACPT_MASK    0x8
+#define    RTL8367C_UPS_CPU_ACPT_OFFSET    2
+#define    RTL8367C_UPS_CPU_ACPT_MASK    0x4
+#define    RTL8367C_UPS_DBG_4_OFFSET    0
+#define    RTL8367C_UPS_DBG_4_MASK    0x3
+
+#define    RTL8367C_REG_UPS_CTRL6    0x1368
+#define    RTL8367C_UPS_CTRL6_OFFSET    0
+#define    RTL8367C_UPS_CTRL6_MASK    0xF
+
+#define    RTL8367C_REG_EFUSE_CMD_70B    0x1369
+
+#define    RTL8367C_REG_EFUSE_CMD    0x1370
+#define    RTL8367C_EFUSE_TIME_OUT_FLAG_OFFSET    3
+#define    RTL8367C_EFUSE_TIME_OUT_FLAG_MASK    0x8
+#define    RTL8367C_EFUSE_ACCESS_BUSY_OFFSET    2
+#define    RTL8367C_EFUSE_ACCESS_BUSY_MASK    0x4
+#define    RTL8367C_EFUSE_COMMAND_EN_OFFSET    1
+#define    RTL8367C_EFUSE_COMMAND_EN_MASK    0x2
+#define    RTL8367C_EFUSE_WR_OFFSET    0
+#define    RTL8367C_EFUSE_WR_MASK    0x1
+
+#define    RTL8367C_REG_EFUSE_ADR    0x1371
+#define    RTL8367C_DUMMY_15_10_OFFSET    8
+#define    RTL8367C_DUMMY_15_10_MASK    0xFF00
+#define    RTL8367C_EFUSE_ADDRESS_OFFSET    0
+#define    RTL8367C_EFUSE_ADDRESS_MASK    0xFF
+
+#define    RTL8367C_REG_EFUSE_WDAT    0x1372
+
+#define    RTL8367C_REG_EFUSE_RDAT    0x1373
+
+#define    RTL8367C_REG_I2C_CTRL    0x1374
+#define    RTL8367C_MDX_MST_FAIL_LAT_OFFSET    1
+#define    RTL8367C_MDX_MST_FAIL_LAT_MASK    0x2
+#define    RTL8367C_MDX_MST_FAIL_CLRPS_OFFSET    0
+#define    RTL8367C_MDX_MST_FAIL_CLRPS_MASK    0x1
+
+#define    RTL8367C_REG_EEE_CFG    0x1375
+#define    RTL8367C_CFG_BYPASS_GATELPTD_OFFSET    11
+#define    RTL8367C_CFG_BYPASS_GATELPTD_MASK    0x800
+#define    RTL8367C_EEE_ABT_ADDR2_OFFSET    6
+#define    RTL8367C_EEE_ABT_ADDR2_MASK    0x7C0
+#define    RTL8367C_EEE_ABT_ADDR1_OFFSET    1
+#define    RTL8367C_EEE_ABT_ADDR1_MASK    0x3E
+#define    RTL8367C_EEE_POLL_EN_OFFSET    0
+#define    RTL8367C_EEE_POLL_EN_MASK    0x1
+
+#define    RTL8367C_REG_EEE_PAGE    0x1376
+
+#define    RTL8367C_REG_EEE_EXT_PAGE    0x1377
+
+#define    RTL8367C_REG_EEE_EN_SPD1000    0x1378
+
+#define    RTL8367C_REG_EEE_EN_SPD100    0x1379
+
+#define    RTL8367C_REG_EEE_LP_SPD1000    0x137a
+
+#define    RTL8367C_REG_EEE_LP_SPD100    0x137b
+
+#define    RTL8367C_REG_DW8051_PRO_REG0    0x13a0
+
+#define    RTL8367C_REG_DW8051_PRO_REG1    0x13a1
+
+#define    RTL8367C_REG_DW8051_PRO_REG2    0x13a2
+
+#define    RTL8367C_REG_DW8051_PRO_REG3    0x13a3
+
+#define    RTL8367C_REG_DW8051_PRO_REG4    0x13a4
+
+#define    RTL8367C_REG_DW8051_PRO_REG5    0x13a5
+
+#define    RTL8367C_REG_DW8051_PRO_REG6    0x13a6
+
+#define    RTL8367C_REG_DW8051_PRO_REG7    0x13a7
+
+#define    RTL8367C_REG_PROTECT_ID    0x13c0
+
+#define    RTL8367C_REG_CHIP_VER_INTL    0x13c1
+#define    RTL8367C_CHIP_VER_INTL_OFFSET    0
+#define    RTL8367C_CHIP_VER_INTL_MASK    0xF
+
+#define    RTL8367C_REG_MAGIC_ID    0x13c2
+
+#define    RTL8367C_REG_DIGITAL_INTERFACE_SELECT_1    0x13c3
+#define    RTL8367C_SKIP_MII_2_RXER_OFFSET    4
+#define    RTL8367C_SKIP_MII_2_RXER_MASK    0x10
+#define    RTL8367C_SELECT_GMII_2_OFFSET    0
+#define    RTL8367C_SELECT_GMII_2_MASK    0xF
+
+#define    RTL8367C_REG_DIGITAL_INTERFACE2_FORCE    0x13c4
+#define    RTL8367C_GMII_2_FORCE_OFFSET    12
+#define    RTL8367C_GMII_2_FORCE_MASK    0x1000
+#define    RTL8367C_RGMII_2_FORCE_OFFSET    0
+#define    RTL8367C_RGMII_2_FORCE_MASK    0xFFF
+
+#define    RTL8367C_REG_EXT2_RGMXF    0x13c5
+#define    RTL8367C_EXT2_RGTX_INV_OFFSET    6
+#define    RTL8367C_EXT2_RGTX_INV_MASK    0x40
+#define    RTL8367C_EXT2_RGRX_INV_OFFSET    5
+#define    RTL8367C_EXT2_RGRX_INV_MASK    0x20
+#define    RTL8367C_EXT2_RGMXF_OFFSET    0
+#define    RTL8367C_EXT2_RGMXF_MASK    0x1F
+
+#define    RTL8367C_REG_ROUTER_UPS_CFG    0x13c6
+#define    RTL8367C_UPS_Status_OFFSET    1
+#define    RTL8367C_UPS_Status_MASK    0x2
+#define    RTL8367C_SoftStart_OFFSET    0
+#define    RTL8367C_SoftStart_MASK    0x1
+
+#define    RTL8367C_REG_CTRL_GPIO    0x13c7
+#define    RTL8367C_CTRL_GPIO_13_OFFSET    13
+#define    RTL8367C_CTRL_GPIO_13_MASK    0x2000
+#define    RTL8367C_CTRL_GPIO_12_OFFSET    12
+#define    RTL8367C_CTRL_GPIO_12_MASK    0x1000
+#define    RTL8367C_CTRL_GPIO_11_OFFSET    11
+#define    RTL8367C_CTRL_GPIO_11_MASK    0x800
+#define    RTL8367C_CTRL_GPIO_10_OFFSET    10
+#define    RTL8367C_CTRL_GPIO_10_MASK    0x400
+#define    RTL8367C_CTRL_GPIO_9_OFFSET    9
+#define    RTL8367C_CTRL_GPIO_9_MASK    0x200
+#define    RTL8367C_CTRL_GPIO_8_OFFSET    8
+#define    RTL8367C_CTRL_GPIO_8_MASK    0x100
+#define    RTL8367C_CTRL_GPIO_7_OFFSET    7
+#define    RTL8367C_CTRL_GPIO_7_MASK    0x80
+#define    RTL8367C_CTRL_GPIO_6_OFFSET    6
+#define    RTL8367C_CTRL_GPIO_6_MASK    0x40
+#define    RTL8367C_CTRL_GPIO_5_OFFSET    5
+#define    RTL8367C_CTRL_GPIO_5_MASK    0x20
+#define    RTL8367C_CTRL_GPIO_4_OFFSET    4
+#define    RTL8367C_CTRL_GPIO_4_MASK    0x10
+#define    RTL8367C_CTRL_GPIO_3_OFFSET    3
+#define    RTL8367C_CTRL_GPIO_3_MASK    0x8
+#define    RTL8367C_CTRL_GPIO_2_OFFSET    2
+#define    RTL8367C_CTRL_GPIO_2_MASK    0x4
+#define    RTL8367C_CTRL_GPIO_1_OFFSET    1
+#define    RTL8367C_CTRL_GPIO_1_MASK    0x2
+#define    RTL8367C_CTRL_GPIO_0_OFFSET    0
+#define    RTL8367C_CTRL_GPIO_0_MASK    0x1
+
+#define    RTL8367C_REG_SEL_GPIO    0x13c8
+#define    RTL8367C_SEL_GPIO_13_OFFSET    13
+#define    RTL8367C_SEL_GPIO_13_MASK    0x2000
+#define    RTL8367C_SEL_GPIO_12_OFFSET    12
+#define    RTL8367C_SEL_GPIO_12_MASK    0x1000
+#define    RTL8367C_SEL_GPIO_11_OFFSET    11
+#define    RTL8367C_SEL_GPIO_11_MASK    0x800
+#define    RTL8367C_SEL_GPIO_10_OFFSET    10
+#define    RTL8367C_SEL_GPIO_10_MASK    0x400
+#define    RTL8367C_SEL_GPIO_9_OFFSET    9
+#define    RTL8367C_SEL_GPIO_9_MASK    0x200
+#define    RTL8367C_SEL_GPIO_8_OFFSET    8
+#define    RTL8367C_SEL_GPIO_8_MASK    0x100
+#define    RTL8367C_SEL_GPIO_7_OFFSET    7
+#define    RTL8367C_SEL_GPIO_7_MASK    0x80
+#define    RTL8367C_SEL_GPIO_6_OFFSET    6
+#define    RTL8367C_SEL_GPIO_6_MASK    0x40
+#define    RTL8367C_SEL_GPIO_5_OFFSET    5
+#define    RTL8367C_SEL_GPIO_5_MASK    0x20
+#define    RTL8367C_SEL_GPIO_4_OFFSET    4
+#define    RTL8367C_SEL_GPIO_4_MASK    0x10
+#define    RTL8367C_SEL_GPIO_3_OFFSET    3
+#define    RTL8367C_SEL_GPIO_3_MASK    0x8
+#define    RTL8367C_SEL_GPIO_2_OFFSET    2
+#define    RTL8367C_SEL_GPIO_2_MASK    0x4
+#define    RTL8367C_SEL_GPIO_1_OFFSET    1
+#define    RTL8367C_SEL_GPIO_1_MASK    0x2
+#define    RTL8367C_SEL_GPIO_0_OFFSET    0
+#define    RTL8367C_SEL_GPIO_0_MASK    0x1
+
+#define    RTL8367C_REG_STATUS_GPIO    0x13c9
+#define    RTL8367C_STATUS_GPIO_OFFSET    0
+#define    RTL8367C_STATUS_GPIO_MASK    0x3FFF
+
+#define    RTL8367C_REG_SYNC_ETH_CFG    0x13e0
+#define    RTL8367C_DUMMY2_OFFSET    9
+#define    RTL8367C_DUMMY2_MASK    0xFE00
+#define    RTL8367C_RFC2819_TYPE_OFFSET    8
+#define    RTL8367C_RFC2819_TYPE_MASK    0x100
+#define    RTL8367C_DUMMY1_OFFSET    7
+#define    RTL8367C_DUMMY1_MASK    0x80
+#define    RTL8367C_FIBER_SYNCE125_L_SEL_OFFSET    6
+#define    RTL8367C_FIBER_SYNCE125_L_SEL_MASK    0x40
+#define    RTL8367C_SYNC_ETH_EN_RTT2_OFFSET    5
+#define    RTL8367C_SYNC_ETH_EN_RTT2_MASK    0x20
+#define    RTL8367C_SYNC_ETH_EN_RTT1_OFFSET    4
+#define    RTL8367C_SYNC_ETH_EN_RTT1_MASK    0x10
+#define    RTL8367C_SYNC_ETH_SEL_DPLL_OFFSET    3
+#define    RTL8367C_SYNC_ETH_SEL_DPLL_MASK    0x8
+#define    RTL8367C_SYNC_ETH_SEL_PHYREF_OFFSET    2
+#define    RTL8367C_SYNC_ETH_SEL_PHYREF_MASK    0x4
+#define    RTL8367C_SYNC_ETH_SEL_XTAL_OFFSET    1
+#define    RTL8367C_SYNC_ETH_SEL_XTAL_MASK    0x2
+#define    RTL8367C_DUMMY0_OFFSET    0
+#define    RTL8367C_DUMMY0_MASK    0x1
+
+#define    RTL8367C_REG_LED_DRI_CFG    0x13e1
+#define    RTL8367C_LED_DRI_CFG_DUMMY_OFFSET    1
+#define    RTL8367C_LED_DRI_CFG_DUMMY_MASK    0xFFFE
+#define    RTL8367C_LED_DRIVING_OFFSET    0
+#define    RTL8367C_LED_DRIVING_MASK    0x1
+
+#define    RTL8367C_REG_CHIP_DEBUG2    0x13e2
+#define    RTL8367C_RG2_DN_OFFSET    6
+#define    RTL8367C_RG2_DN_MASK    0x1C0
+#define    RTL8367C_RG2_DP_OFFSET    3
+#define    RTL8367C_RG2_DP_MASK    0x38
+#define    RTL8367C_DRI_EXT2_RG_OFFSET    2
+#define    RTL8367C_DRI_EXT2_RG_MASK    0x4
+#define    RTL8367C_DRI_EXT2_OFFSET    1
+#define    RTL8367C_DRI_EXT2_MASK    0x2
+#define    RTL8367C_SLR_EXT2_OFFSET    0
+#define    RTL8367C_SLR_EXT2_MASK    0x1
+
+#define    RTL8367C_REG_DIGITAL_DEBUG_2    0x13e3
+
+#define    RTL8367C_REG_FIBER_RTL_OUI_CFG0    0x13e4
+#define    RTL8367C_FIBER_RTL_OUI_CFG0_OFFSET    0
+#define    RTL8367C_FIBER_RTL_OUI_CFG0_MASK    0xFF
+
+#define    RTL8367C_REG_FIBER_RTL_OUI_CFG1    0x13e5
+
+#define    RTL8367C_REG_FIBER_CFG_0    0x13e6
+#define    RTL8367C_REV_NUM_OFFSET    8
+#define    RTL8367C_REV_NUM_MASK    0xF00
+#define    RTL8367C_MODEL_NUM_OFFSET    0
+#define    RTL8367C_MODEL_NUM_MASK    0x3F
+
+#define    RTL8367C_REG_FIBER_CFG_1    0x13e7
+#define    RTL8367C_SDS_FRC_REG4_OFFSET    12
+#define    RTL8367C_SDS_FRC_REG4_MASK    0x1000
+#define    RTL8367C_SDS_FRC_REG4_FIB100_OFFSET    11
+#define    RTL8367C_SDS_FRC_REG4_FIB100_MASK    0x800
+#define    RTL8367C_SEL_MASK_ONL_OFFSET    5
+#define    RTL8367C_SEL_MASK_ONL_MASK    0x20
+#define    RTL8367C_DIS_QUALITY_IN_MASK_OFFSET    4
+#define    RTL8367C_DIS_QUALITY_IN_MASK_MASK    0x10
+#define    RTL8367C_SDS_FRC_MODE_OFFSET    3
+#define    RTL8367C_SDS_FRC_MODE_MASK    0x8
+#define    RTL8367C_SDS_MODE_OFFSET    0
+#define    RTL8367C_SDS_MODE_MASK    0x7
+
+#define    RTL8367C_REG_FIBER_CFG_2    0x13e8
+#define    RTL8367C_SEL_SDET_PS_OFFSET    12
+#define    RTL8367C_SEL_SDET_PS_MASK    0xF000
+#define    RTL8367C_UTP_DIS_RX_OFFSET    10
+#define    RTL8367C_UTP_DIS_RX_MASK    0xC00
+#define    RTL8367C_UTP_FRC_LD_OFFSET    8
+#define    RTL8367C_UTP_FRC_LD_MASK    0x300
+#define    RTL8367C_SDS_RX_DISABLE_OFFSET    6
+#define    RTL8367C_SDS_RX_DISABLE_MASK    0xC0
+#define    RTL8367C_SDS_TX_DISABLE_OFFSET    4
+#define    RTL8367C_SDS_TX_DISABLE_MASK    0x30
+#define    RTL8367C_FIBER_CFG_2_SDS_PWR_ISO_OFFSET    2
+#define    RTL8367C_FIBER_CFG_2_SDS_PWR_ISO_MASK    0xC
+#define    RTL8367C_SDS_FRC_LD_OFFSET    0
+#define    RTL8367C_SDS_FRC_LD_MASK    0x3
+
+#define    RTL8367C_REG_FIBER_CFG_3    0x13e9
+#define    RTL8367C_FIBER_CFG_3_OFFSET    0
+#define    RTL8367C_FIBER_CFG_3_MASK    0xFFF
+
+#define    RTL8367C_REG_FIBER_CFG_4    0x13ea
+
+#define    RTL8367C_REG_UTP_FIB_DET    0x13eb
+#define    RTL8367C_FORCE_SEL_FIBER_OFFSET    14
+#define    RTL8367C_FORCE_SEL_FIBER_MASK    0xC000
+#define    RTL8367C_FIB_FINAL_TIMER_OFFSET    12
+#define    RTL8367C_FIB_FINAL_TIMER_MASK    0x3000
+#define    RTL8367C_FIB_LINK_TIMER_OFFSET    10
+#define    RTL8367C_FIB_LINK_TIMER_MASK    0xC00
+#define    RTL8367C_FIB_SDET_TIMER_OFFSET    8
+#define    RTL8367C_FIB_SDET_TIMER_MASK    0x300
+#define    RTL8367C_UTP_LINK_TIMER_OFFSET    6
+#define    RTL8367C_UTP_LINK_TIMER_MASK    0xC0
+#define    RTL8367C_UTP_SDET_TIMER_OFFSET    4
+#define    RTL8367C_UTP_SDET_TIMER_MASK    0x30
+#define    RTL8367C_FORCE_AUTODET_OFFSET    3
+#define    RTL8367C_FORCE_AUTODET_MASK    0x8
+#define    RTL8367C_AUTODET_FSM_CLR_OFFSET    2
+#define    RTL8367C_AUTODET_FSM_CLR_MASK    0x4
+#define    RTL8367C_UTP_FIRST_OFFSET    1
+#define    RTL8367C_UTP_FIRST_MASK    0x2
+#define    RTL8367C_UTP_FIB_DISAUTODET_OFFSET    0
+#define    RTL8367C_UTP_FIB_DISAUTODET_MASK    0x1
+
+#define    RTL8367C_REG_NRESTORE_MAGIC_NUM    0x13ec
+#define    RTL8367C_NRESTORE_MAGIC_NUM_MASK    0xFFFF
+#define    RTL8367C_EEPROM_PROGRAM_CYCLE_OFFSET    0
+#define    RTL8367C_EEPROM_PROGRAM_CYCLE_MASK    0x3
+
+#define    RTL8367C_REG_MAC_ACTIVE    0x13ee
+#define    RTL8367C_MAC_ACTIVE_H_OFFSET    9
+#define    RTL8367C_MAC_ACTIVE_H_MASK    0xE00
+#define    RTL8367C_FORCE_MAC_ACTIVE_OFFSET    8
+#define    RTL8367C_FORCE_MAC_ACTIVE_MASK    0x100
+#define    RTL8367C_MAC_ACTIVE_OFFSET    0
+#define    RTL8367C_MAC_ACTIVE_MASK    0xFF
+
+#define    RTL8367C_REG_SERDES_RESULT    0x13ef
+#define    RTL8367C_FIB100_DET_1_OFFSET    12
+#define    RTL8367C_FIB100_DET_1_MASK    0x1000
+#define    RTL8367C_FIB_ISO_1_OFFSET    11
+#define    RTL8367C_FIB_ISO_1_MASK    0x800
+#define    RTL8367C_SDS_ANFAULT_1_OFFSET    10
+#define    RTL8367C_SDS_ANFAULT_1_MASK    0x400
+#define    RTL8367C_SDS_INTB_1_OFFSET    9
+#define    RTL8367C_SDS_INTB_1_MASK    0x200
+#define    RTL8367C_SDS_LINK_OK_1_OFFSET    8
+#define    RTL8367C_SDS_LINK_OK_1_MASK    0x100
+#define    RTL8367C_FIB100_DET_OFFSET    4
+#define    RTL8367C_FIB100_DET_MASK    0x10
+#define    RTL8367C_FIB_ISO_OFFSET    3
+#define    RTL8367C_FIB_ISO_MASK    0x8
+#define    RTL8367C_SDS_ANFAULT_OFFSET    2
+#define    RTL8367C_SDS_ANFAULT_MASK    0x4
+#define    RTL8367C_SDS_INTB_OFFSET    1
+#define    RTL8367C_SDS_INTB_MASK    0x2
+#define    RTL8367C_SDS_LINK_OK_OFFSET    0
+#define    RTL8367C_SDS_LINK_OK_MASK    0x1
+
+#define    RTL8367C_REG_CHIP_ECO    0x13f0
+#define    RTL8367C_CFG_CHIP_ECO_OFFSET    1
+#define    RTL8367C_CFG_CHIP_ECO_MASK    0xFFFE
+#define    RTL8367C_CFG_CKOUTEN_OFFSET    0
+#define    RTL8367C_CFG_CKOUTEN_MASK    0x1
+
+#define    RTL8367C_REG_WAKELPI_SLOT_PRD    0x13f1
+#define    RTL8367C_WAKELPI_SLOT_PRD_OFFSET    0
+#define    RTL8367C_WAKELPI_SLOT_PRD_MASK    0x1F
+
+#define    RTL8367C_REG_WAKELPI_SLOT_PG0    0x13f2
+#define    RTL8367C_WAKELPI_SLOT_P1_OFFSET    8
+#define    RTL8367C_WAKELPI_SLOT_P1_MASK    0x1F00
+#define    RTL8367C_WAKELPI_SLOT_P0_OFFSET    0
+#define    RTL8367C_WAKELPI_SLOT_P0_MASK    0x1F
+
+#define    RTL8367C_REG_WAKELPI_SLOT_PG1    0x13f3
+#define    RTL8367C_WAKELPI_SLOT_P3_OFFSET    8
+#define    RTL8367C_WAKELPI_SLOT_P3_MASK    0x1F00
+#define    RTL8367C_WAKELPI_SLOT_P2_OFFSET    0
+#define    RTL8367C_WAKELPI_SLOT_P2_MASK    0x1F
+
+#define    RTL8367C_REG_WAKELPI_SLOT_PG2    0x13f4
+#define    RTL8367C_WAKELPI_SLOT_P5_OFFSET    8
+#define    RTL8367C_WAKELPI_SLOT_P5_MASK    0x1F00
+#define    RTL8367C_WAKELPI_SLOT_P4_OFFSET    0
+#define    RTL8367C_WAKELPI_SLOT_P4_MASK    0x1F
+
+#define    RTL8367C_REG_WAKELPI_SLOT_PG3    0x13f5
+#define    RTL8367C_WAKELPI_SLOT_P7_OFFSET    8
+#define    RTL8367C_WAKELPI_SLOT_P7_MASK    0x1F00
+#define    RTL8367C_WAKELPI_SLOT_P6_OFFSET    0
+#define    RTL8367C_WAKELPI_SLOT_P6_MASK    0x1F
+
+#define    RTL8367C_REG_SYNC_FIFO_0    0x13f6
+#define    RTL8367C_SYNC_FIFO_TX_OFFSET    8
+#define    RTL8367C_SYNC_FIFO_TX_MASK    0x700
+#define    RTL8367C_SYNC_FIFO_RX_OFFSET    0
+#define    RTL8367C_SYNC_FIFO_RX_MASK    0xFF
+
+#define    RTL8367C_REG_SYNC_FIFO_1    0x13f7
+#define    RTL8367C_SYNC_FIFO_RX_ERR_P10_8_OFFSET    11
+#define    RTL8367C_SYNC_FIFO_RX_ERR_P10_8_MASK    0x3800
+#define    RTL8367C_SYNC_FIFO_TX_ERR_OFFSET    8
+#define    RTL8367C_SYNC_FIFO_TX_ERR_MASK    0x700
+#define    RTL8367C_SYNC_FIFO_RX_ERR_OFFSET    0
+#define    RTL8367C_SYNC_FIFO_RX_ERR_MASK    0xFF
+
+#define    RTL8367C_REG_RGM_EEE    0x13f8
+#define    RTL8367C_EXT2_PAD_STOP_EN_OFFSET    14
+#define    RTL8367C_EXT2_PAD_STOP_EN_MASK    0x4000
+#define    RTL8367C_EXT1_PAD_STOP_EN_OFFSET    13
+#define    RTL8367C_EXT1_PAD_STOP_EN_MASK    0x2000
+#define    RTL8367C_EXT0_PAD_STOP_EN_OFFSET    12
+#define    RTL8367C_EXT0_PAD_STOP_EN_MASK    0x1000
+#define    RTL8367C_EXT2_CYCLE_PAD_OFFSET    8
+#define    RTL8367C_EXT2_CYCLE_PAD_MASK    0xF00
+#define    RTL8367C_EXT1_CYCLE_PAD_OFFSET    4
+#define    RTL8367C_EXT1_CYCLE_PAD_MASK    0xF0
+#define    RTL8367C_EXT0_CYCLE_PAD_OFFSET    0
+#define    RTL8367C_EXT0_CYCLE_PAD_MASK    0xF
+
+#define    RTL8367C_REG_EXT_TXC_DLY    0x13f9
+#define    RTL8367C_EXT1_GMII_TX_DELAY_OFFSET    12
+#define    RTL8367C_EXT1_GMII_TX_DELAY_MASK    0x7000
+#define    RTL8367C_EXT0_GMII_TX_DELAY_OFFSET    9
+#define    RTL8367C_EXT0_GMII_TX_DELAY_MASK    0xE00
+#define    RTL8367C_EXT2_RGMII_TX_DELAY_OFFSET    6
+#define    RTL8367C_EXT2_RGMII_TX_DELAY_MASK    0x1C0
+#define    RTL8367C_EXT1_RGMII_TX_DELAY_OFFSET    3
+#define    RTL8367C_EXT1_RGMII_TX_DELAY_MASK    0x38
+#define    RTL8367C_EXT0_RGMII_TX_DELAY_OFFSET    0
+#define    RTL8367C_EXT0_RGMII_TX_DELAY_MASK    0x7
+
+#define    RTL8367C_REG_IO_MISC_CTRL    0x13fa
+#define    RTL8367C_IO_BUZZER_EN_OFFSET    3
+#define    RTL8367C_IO_BUZZER_EN_MASK    0x8
+#define    RTL8367C_IO_INTRPT_EN_OFFSET    2
+#define    RTL8367C_IO_INTRPT_EN_MASK    0x4
+#define    RTL8367C_IO_NRESTORE_EN_OFFSET    1
+#define    RTL8367C_IO_NRESTORE_EN_MASK    0x2
+#define    RTL8367C_IO_UART_EN_OFFSET    0
+#define    RTL8367C_IO_UART_EN_MASK    0x1
+
+#define    RTL8367C_REG_CHIP_DUMMY_NO    0x13fb
+#define    RTL8367C_CHIP_DUMMY_NO_OFFSET    0
+#define    RTL8367C_CHIP_DUMMY_NO_MASK    0xF
+
+#define    RTL8367C_REG_RC_CALIB_CFG    0x13fc
+#define    RTL8367C_TRIG_BURN_EFUSE_OFFSET    9
+#define    RTL8367C_TRIG_BURN_EFUSE_MASK    0x200
+#define    RTL8367C_AMP_CALIB_FAIL_OFFSET    8
+#define    RTL8367C_AMP_CALIB_FAIL_MASK    0x100
+#define    RTL8367C_R_CALIB_FAIL_OFFSET    7
+#define    RTL8367C_R_CALIB_FAIL_MASK    0x80
+#define    RTL8367C_CFG_CALIB_MODE_OFFSET    6
+#define    RTL8367C_CFG_CALIB_MODE_MASK    0x40
+#define    RTL8367C_CENTER_PORT_SEL_OFFSET    3
+#define    RTL8367C_CENTER_PORT_SEL_MASK    0x38
+#define    RTL8367C_CALIB_FINISH_OFFSET    2
+#define    RTL8367C_CALIB_FINISH_MASK    0x4
+#define    RTL8367C_CFG_CALIB_OPTION_OFFSET    1
+#define    RTL8367C_CFG_CALIB_OPTION_MASK    0x2
+#define    RTL8367C_CFG_CALIB_EN_OFFSET    0
+#define    RTL8367C_CFG_CALIB_EN_MASK    0x1
+
+#define    RTL8367C_REG_WAKELPI_SLOT_PG4    0x13fd
+#define    RTL8367C_WAKELPI_SLOT_P9_OFFSET    8
+#define    RTL8367C_WAKELPI_SLOT_P9_MASK    0x1F00
+#define    RTL8367C_WAKELPI_SLOT_P8_OFFSET    0
+#define    RTL8367C_WAKELPI_SLOT_P8_MASK    0x1F
+
+#define    RTL8367C_REG_WAKELPI_SLOT_PG5    0x13fe
+#define    RTL8367C_WAKELPI_SLOT_PG5_OFFSET    0
+#define    RTL8367C_WAKELPI_SLOT_PG5_MASK    0x1F
+
+/* (16'h1400)mtrpool_reg */
+
+#define    RTL8367C_REG_METER0_RATE_CTRL0    0x1400
+
+#define    RTL8367C_REG_METER0_RATE_CTRL1    0x1401
+#define    RTL8367C_METER0_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER0_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER1_RATE_CTRL0    0x1402
+
+#define    RTL8367C_REG_METER1_RATE_CTRL1    0x1403
+#define    RTL8367C_METER1_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER1_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER2_RATE_CTRL0    0x1404
+
+#define    RTL8367C_REG_METER2_RATE_CTRL1    0x1405
+#define    RTL8367C_METER2_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER2_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER3_RATE_CTRL0    0x1406
+
+#define    RTL8367C_REG_METER3_RATE_CTRL1    0x1407
+#define    RTL8367C_METER3_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER3_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER4_RATE_CTRL0    0x1408
+
+#define    RTL8367C_REG_METER4_RATE_CTRL1    0x1409
+#define    RTL8367C_METER4_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER4_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER5_RATE_CTRL0    0x140a
+
+#define    RTL8367C_REG_METER5_RATE_CTRL1    0x140b
+#define    RTL8367C_METER5_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER5_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER6_RATE_CTRL0    0x140c
+
+#define    RTL8367C_REG_METER6_RATE_CTRL1    0x140d
+#define    RTL8367C_METER6_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER6_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER7_RATE_CTRL0    0x140e
+
+#define    RTL8367C_REG_METER7_RATE_CTRL1    0x140f
+#define    RTL8367C_METER7_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER7_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER8_RATE_CTRL0    0x1410
+
+#define    RTL8367C_REG_METER8_RATE_CTRL1    0x1411
+#define    RTL8367C_METER8_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER8_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER9_RATE_CTRL0    0x1412
+
+#define    RTL8367C_REG_METER9_RATE_CTRL1    0x1413
+#define    RTL8367C_METER9_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER9_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER10_RATE_CTRL0    0x1414
+
+#define    RTL8367C_REG_METER10_RATE_CTRL1    0x1415
+#define    RTL8367C_METER10_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER10_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER11_RATE_CTRL0    0x1416
+
+#define    RTL8367C_REG_METER11_RATE_CTRL1    0x1417
+#define    RTL8367C_METER11_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER11_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER12_RATE_CTRL0    0x1418
+
+#define    RTL8367C_REG_METER12_RATE_CTRL1    0x1419
+#define    RTL8367C_METER12_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER12_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER13_RATE_CTRL0    0x141a
+
+#define    RTL8367C_REG_METER13_RATE_CTRL1    0x141b
+#define    RTL8367C_METER13_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER13_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER14_RATE_CTRL0    0x141c
+
+#define    RTL8367C_REG_METER14_RATE_CTRL1    0x141d
+#define    RTL8367C_METER14_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER14_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER15_RATE_CTRL0    0x141e
+
+#define    RTL8367C_REG_METER15_RATE_CTRL1    0x141f
+#define    RTL8367C_METER15_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER15_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER16_RATE_CTRL0    0x1420
+
+#define    RTL8367C_REG_METER16_RATE_CTRL1    0x1421
+#define    RTL8367C_METER16_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER16_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER17_RATE_CTRL0    0x1422
+
+#define    RTL8367C_REG_METER17_RATE_CTRL1    0x1423
+#define    RTL8367C_METER17_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER17_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER18_RATE_CTRL0    0x1424
+
+#define    RTL8367C_REG_METER18_RATE_CTRL1    0x1425
+#define    RTL8367C_METER18_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER18_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER19_RATE_CTRL0    0x1426
+
+#define    RTL8367C_REG_METER19_RATE_CTRL1    0x1427
+#define    RTL8367C_METER19_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER19_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER20_RATE_CTRL0    0x1428
+
+#define    RTL8367C_REG_METER20_RATE_CTRL1    0x1429
+#define    RTL8367C_METER20_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER20_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER21_RATE_CTRL0    0x142a
+
+#define    RTL8367C_REG_METER21_RATE_CTRL1    0x142b
+#define    RTL8367C_METER21_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER21_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER22_RATE_CTRL0    0x142c
+
+#define    RTL8367C_REG_METER22_RATE_CTRL1    0x142d
+#define    RTL8367C_METER22_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER22_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER23_RATE_CTRL0    0x142e
+
+#define    RTL8367C_REG_METER23_RATE_CTRL1    0x142f
+#define    RTL8367C_METER23_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER23_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER24_RATE_CTRL0    0x1430
+
+#define    RTL8367C_REG_METER24_RATE_CTRL1    0x1431
+#define    RTL8367C_METER24_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER24_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER25_RATE_CTRL0    0x1432
+
+#define    RTL8367C_REG_METER25_RATE_CTRL1    0x1433
+#define    RTL8367C_METER25_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER25_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER26_RATE_CTRL0    0x1434
+
+#define    RTL8367C_REG_METER26_RATE_CTRL1    0x1435
+#define    RTL8367C_METER26_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER26_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER27_RATE_CTRL0    0x1436
+
+#define    RTL8367C_REG_METER27_RATE_CTRL1    0x1437
+#define    RTL8367C_METER27_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER27_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER28_RATE_CTRL0    0x1438
+
+#define    RTL8367C_REG_METER28_RATE_CTRL1    0x1439
+#define    RTL8367C_METER28_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER28_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER29_RATE_CTRL0    0x143a
+
+#define    RTL8367C_REG_METER29_RATE_CTRL1    0x143b
+#define    RTL8367C_METER29_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER29_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER30_RATE_CTRL0    0x143c
+
+#define    RTL8367C_REG_METER30_RATE_CTRL1    0x143d
+#define    RTL8367C_METER30_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER30_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER31_RATE_CTRL0    0x143e
+
+#define    RTL8367C_REG_METER31_RATE_CTRL1    0x143f
+#define    RTL8367C_METER31_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER31_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER_MODE_SETTING0    0x1440
+
+#define    RTL8367C_REG_METER_MODE_SETTING1    0x1441
+
+#define    RTL8367C_REG_METER_MODE_TOKEN_CFG    0x1442
+#define    RTL8367C_METER_MODE_TOKEN_CFG_OFFSET    0
+#define    RTL8367C_METER_MODE_TOKEN_CFG_MASK    0x7FF
+
+#define    RTL8367C_REG_METER0_BUCKET_SIZE    0x1600
+
+#define    RTL8367C_REG_METER1_BUCKET_SIZE    0x1601
+
+#define    RTL8367C_REG_METER2_BUCKET_SIZE    0x1602
+
+#define    RTL8367C_REG_METER3_BUCKET_SIZE    0x1603
+
+#define    RTL8367C_REG_METER4_BUCKET_SIZE    0x1604
+
+#define    RTL8367C_REG_METER5_BUCKET_SIZE    0x1605
+
+#define    RTL8367C_REG_METER6_BUCKET_SIZE    0x1606
+
+#define    RTL8367C_REG_METER7_BUCKET_SIZE    0x1607
+
+#define    RTL8367C_REG_METER8_BUCKET_SIZE    0x1608
+
+#define    RTL8367C_REG_METER9_BUCKET_SIZE    0x1609
+
+#define    RTL8367C_REG_METER10_BUCKET_SIZE    0x160a
+
+#define    RTL8367C_REG_METER11_BUCKET_SIZE    0x160b
+
+#define    RTL8367C_REG_METER12_BUCKET_SIZE    0x160c
+
+#define    RTL8367C_REG_METER13_BUCKET_SIZE    0x160d
+
+#define    RTL8367C_REG_METER14_BUCKET_SIZE    0x160e
+
+#define    RTL8367C_REG_METER15_BUCKET_SIZE    0x160f
+
+#define    RTL8367C_REG_METER16_BUCKET_SIZE    0x1610
+
+#define    RTL8367C_REG_METER17_BUCKET_SIZE    0x1611
+
+#define    RTL8367C_REG_METER18_BUCKET_SIZE    0x1612
+
+#define    RTL8367C_REG_METER19_BUCKET_SIZE    0x1613
+
+#define    RTL8367C_REG_METER20_BUCKET_SIZE    0x1614
+
+#define    RTL8367C_REG_METER21_BUCKET_SIZE    0x1615
+
+#define    RTL8367C_REG_METER22_BUCKET_SIZE    0x1616
+
+#define    RTL8367C_REG_METER23_BUCKET_SIZE    0x1617
+
+#define    RTL8367C_REG_METER24_BUCKET_SIZE    0x1618
+
+#define    RTL8367C_REG_METER25_BUCKET_SIZE    0x1619
+
+#define    RTL8367C_REG_METER26_BUCKET_SIZE    0x161a
+
+#define    RTL8367C_REG_METER27_BUCKET_SIZE    0x161b
+
+#define    RTL8367C_REG_METER28_BUCKET_SIZE    0x161c
+
+#define    RTL8367C_REG_METER29_BUCKET_SIZE    0x161d
+
+#define    RTL8367C_REG_METER30_BUCKET_SIZE    0x161e
+
+#define    RTL8367C_REG_METER31_BUCKET_SIZE    0x161f
+
+#define    RTL8367C_REG_METER_CTRL0    0x1700
+#define    RTL8367C_METER_OP_OFFSET    8
+#define    RTL8367C_METER_OP_MASK    0x100
+#define    RTL8367C_METER_TICK_OFFSET    0
+#define    RTL8367C_METER_TICK_MASK    0xFF
+
+#define    RTL8367C_REG_METER_CTRL1    0x1701
+#define    RTL8367C_METER_CTRL1_OFFSET    0
+#define    RTL8367C_METER_CTRL1_MASK    0xFF
+
+#define    RTL8367C_REG_METER_OVERRATE_INDICATOR0    0x1702
+
+#define    RTL8367C_REG_METER_OVERRATE_INDICATOR1    0x1703
+
+#define    RTL8367C_REG_METER_OVERRATE_INDICATOR0_8051    0x1704
+
+#define    RTL8367C_REG_METER_OVERRATE_INDICATOR1_8051    0x1705
+
+#define    RTL8367C_REG_METER_IFG_CTRL0    0x1712
+#define    RTL8367C_METER15_IFG_OFFSET    15
+#define    RTL8367C_METER15_IFG_MASK    0x8000
+#define    RTL8367C_METER14_IFG_OFFSET    14
+#define    RTL8367C_METER14_IFG_MASK    0x4000
+#define    RTL8367C_METER13_IFG_OFFSET    13
+#define    RTL8367C_METER13_IFG_MASK    0x2000
+#define    RTL8367C_METER12_IFG_OFFSET    12
+#define    RTL8367C_METER12_IFG_MASK    0x1000
+#define    RTL8367C_METER11_IFG_OFFSET    11
+#define    RTL8367C_METER11_IFG_MASK    0x800
+#define    RTL8367C_METER10_IFG_OFFSET    10
+#define    RTL8367C_METER10_IFG_MASK    0x400
+#define    RTL8367C_METER9_IFG_OFFSET    9
+#define    RTL8367C_METER9_IFG_MASK    0x200
+#define    RTL8367C_METER8_IFG_OFFSET    8
+#define    RTL8367C_METER8_IFG_MASK    0x100
+#define    RTL8367C_METER7_IFG_OFFSET    7
+#define    RTL8367C_METER7_IFG_MASK    0x80
+#define    RTL8367C_METER6_IFG_OFFSET    6
+#define    RTL8367C_METER6_IFG_MASK    0x40
+#define    RTL8367C_METER5_IFG_OFFSET    5
+#define    RTL8367C_METER5_IFG_MASK    0x20
+#define    RTL8367C_METER4_IFG_OFFSET    4
+#define    RTL8367C_METER4_IFG_MASK    0x10
+#define    RTL8367C_METER3_IFG_OFFSET    3
+#define    RTL8367C_METER3_IFG_MASK    0x8
+#define    RTL8367C_METER2_IFG_OFFSET    2
+#define    RTL8367C_METER2_IFG_MASK    0x4
+#define    RTL8367C_METER1_IFG_OFFSET    1
+#define    RTL8367C_METER1_IFG_MASK    0x2
+#define    RTL8367C_METER0_IFG_OFFSET    0
+#define    RTL8367C_METER0_IFG_MASK    0x1
+
+#define    RTL8367C_REG_METER_IFG_CTRL1    0x1713
+#define    RTL8367C_METER31_IFG_OFFSET    15
+#define    RTL8367C_METER31_IFG_MASK    0x8000
+#define    RTL8367C_METER30_IFG_OFFSET    14
+#define    RTL8367C_METER30_IFG_MASK    0x4000
+#define    RTL8367C_METER29_IFG_OFFSET    13
+#define    RTL8367C_METER29_IFG_MASK    0x2000
+#define    RTL8367C_METER28_IFG_OFFSET    12
+#define    RTL8367C_METER28_IFG_MASK    0x1000
+#define    RTL8367C_METER27_IFG_OFFSET    11
+#define    RTL8367C_METER27_IFG_MASK    0x800
+#define    RTL8367C_METER26_IFG_OFFSET    10
+#define    RTL8367C_METER26_IFG_MASK    0x400
+#define    RTL8367C_METER25_IFG_OFFSET    9
+#define    RTL8367C_METER25_IFG_MASK    0x200
+#define    RTL8367C_METER24_IFG_OFFSET    8
+#define    RTL8367C_METER24_IFG_MASK    0x100
+#define    RTL8367C_METER23_IFG_OFFSET    7
+#define    RTL8367C_METER23_IFG_MASK    0x80
+#define    RTL8367C_METER22_IFG_OFFSET    6
+#define    RTL8367C_METER22_IFG_MASK    0x40
+#define    RTL8367C_METER21_IFG_OFFSET    5
+#define    RTL8367C_METER21_IFG_MASK    0x20
+#define    RTL8367C_METER20_IFG_OFFSET    4
+#define    RTL8367C_METER20_IFG_MASK    0x10
+#define    RTL8367C_METER19_IFG_OFFSET    3
+#define    RTL8367C_METER19_IFG_MASK    0x8
+#define    RTL8367C_METER18_IFG_OFFSET    2
+#define    RTL8367C_METER18_IFG_MASK    0x4
+#define    RTL8367C_METER17_IFG_OFFSET    1
+#define    RTL8367C_METER17_IFG_MASK    0x2
+#define    RTL8367C_METER16_IFG_OFFSET    0
+#define    RTL8367C_METER16_IFG_MASK    0x1
+
+#define    RTL8367C_REG_METER_CTRL2    0x1722
+#define    RTL8367C_cfg_mtr_tick_8g_OFFSET    8
+#define    RTL8367C_cfg_mtr_tick_8g_MASK    0xFF00
+#define    RTL8367C_cfg_mtr_dec_cnt_8g_OFFSET    0
+#define    RTL8367C_cfg_mtr_dec_cnt_8g_MASK    0xFF
+
+#define    RTL8367C_REG_DUMMY_1723    0x1723
+
+#define    RTL8367C_REG_DUMMY_1724    0x1724
+
+#define    RTL8367C_REG_DUMMY_1725    0x1725
+
+#define    RTL8367C_REG_DUMMY_1726    0x1726
+
+#define    RTL8367C_REG_DUMMY_1727    0x1727
+
+#define    RTL8367C_REG_DUMMY_1728    0x1728
+
+#define    RTL8367C_REG_DUMMY_1729    0x1729
+
+#define    RTL8367C_REG_DUMMY_172A    0x172a
+
+#define    RTL8367C_REG_DUMMY_172B    0x172b
+
+#define    RTL8367C_REG_DUMMY_172C    0x172c
+
+#define    RTL8367C_REG_DUMMY_172D    0x172d
+
+#define    RTL8367C_REG_DUMMY_172E    0x172e
+
+#define    RTL8367C_REG_DUMMY_172F    0x172f
+
+#define    RTL8367C_REG_DUMMY_1730    0x1730
+
+#define    RTL8367C_REG_DUMMY_1731    0x1731
+
+#define    RTL8367C_REG_METER32_RATE_CTRL0    0x1740
+
+#define    RTL8367C_REG_METER32_RATE_CTRL1    0x1741
+#define    RTL8367C_METER32_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER32_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER33_RATE_CTRL0    0x1742
+
+#define    RTL8367C_REG_METER33_RATE_CTRL1    0x1743
+#define    RTL8367C_METER33_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER33_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER34_RATE_CTRL0    0x1744
+
+#define    RTL8367C_REG_METER34_RATE_CTRL1    0x1745
+#define    RTL8367C_METER34_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER34_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER35_RATE_CTRL0    0x1746
+
+#define    RTL8367C_REG_METER35_RATE_CTRL1    0x1747
+#define    RTL8367C_METER35_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER35_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER36_RATE_CTRL0    0x1748
+
+#define    RTL8367C_REG_METER36_RATE_CTRL1    0x1749
+#define    RTL8367C_METER36_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER36_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER37_RATE_CTRL0    0x174a
+
+#define    RTL8367C_REG_METER37_RATE_CTRL1    0x174b
+#define    RTL8367C_METER37_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER37_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER38_RATE_CTRL0    0x174c
+
+#define    RTL8367C_REG_METER38_RATE_CTRL1    0x174d
+#define    RTL8367C_METER38_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER38_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER39_RATE_CTRL0    0x174e
+
+#define    RTL8367C_REG_METER39_RATE_CTRL1    0x174f
+#define    RTL8367C_METER39_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER39_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER40_RATE_CTRL0    0x1750
+
+#define    RTL8367C_REG_METER40_RATE_CTRL1    0x1751
+#define    RTL8367C_METER40_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER40_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER41_RATE_CTRL0    0x1752
+
+#define    RTL8367C_REG_METER41_RATE_CTRL1    0x1753
+#define    RTL8367C_METER41_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER41_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER42_RATE_CTRL0    0x1754
+
+#define    RTL8367C_REG_METER42_RATE_CTRL1    0x1755
+#define    RTL8367C_METER42_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER42_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER43_RATE_CTRL0    0x1756
+
+#define    RTL8367C_REG_METER43_RATE_CTRL1    0x1757
+#define    RTL8367C_METER43_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER43_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER44_RATE_CTRL0    0x1758
+
+#define    RTL8367C_REG_METER44_RATE_CTRL1    0x1759
+#define    RTL8367C_METER44_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER44_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER45_RATE_CTRL0    0x175a
+
+#define    RTL8367C_REG_METER45_RATE_CTRL1    0x175b
+#define    RTL8367C_METER45_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER45_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER46_RATE_CTRL0    0x175c
+
+#define    RTL8367C_REG_METER46_RATE_CTRL1    0x175d
+#define    RTL8367C_METER46_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER46_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER47_RATE_CTRL0    0x175e
+
+#define    RTL8367C_REG_METER47_RATE_CTRL1    0x175f
+#define    RTL8367C_METER47_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER47_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER48_RATE_CTRL0    0x1760
+
+#define    RTL8367C_REG_METER48_RATE_CTRL1    0x1761
+#define    RTL8367C_METER48_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER48_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER49_RATE_CTRL0    0x1762
+
+#define    RTL8367C_REG_METER49_RATE_CTRL1    0x1763
+#define    RTL8367C_METER49_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER49_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER50_RATE_CTRL0    0x1764
+
+#define    RTL8367C_REG_METER50_RATE_CTRL1    0x1765
+#define    RTL8367C_METER50_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER50_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER51_RATE_CTRL0    0x1766
+
+#define    RTL8367C_REG_METER51_RATE_CTRL1    0x1767
+#define    RTL8367C_METER51_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER51_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER52_RATE_CTRL0    0x1768
+
+#define    RTL8367C_REG_METER52_RATE_CTRL1    0x1769
+#define    RTL8367C_METER52_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER52_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER53_RATE_CTRL0    0x176a
+
+#define    RTL8367C_REG_METER53_RATE_CTRL1    0x176b
+#define    RTL8367C_METER53_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER53_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER54_RATE_CTRL0    0x176c
+
+#define    RTL8367C_REG_METER54_RATE_CTRL1    0x176d
+#define    RTL8367C_METER54_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER54_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER55_RATE_CTRL0    0x176e
+
+#define    RTL8367C_REG_METER55_RATE_CTRL1    0x176f
+#define    RTL8367C_METER55_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER55_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER56_RATE_CTRL0    0x1770
+
+#define    RTL8367C_REG_METER56_RATE_CTRL1    0x1771
+#define    RTL8367C_METER56_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER56_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER57_RATE_CTRL0    0x1772
+
+#define    RTL8367C_REG_METER57_RATE_CTRL1    0x1773
+#define    RTL8367C_METER57_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER57_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER58_RATE_CTRL0    0x1774
+
+#define    RTL8367C_REG_METER58_RATE_CTRL1    0x1775
+#define    RTL8367C_METER58_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER58_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER59_RATE_CTRL0    0x1776
+
+#define    RTL8367C_REG_METER59_RATE_CTRL1    0x1777
+#define    RTL8367C_METER59_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER59_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER60_RATE_CTRL0    0x1778
+
+#define    RTL8367C_REG_METER60_RATE_CTRL1    0x1779
+#define    RTL8367C_METER60_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER60_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER61_RATE_CTRL0    0x177a
+
+#define    RTL8367C_REG_METER61_RATE_CTRL1    0x177b
+#define    RTL8367C_METER61_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER61_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER62_RATE_CTRL0    0x177c
+
+#define    RTL8367C_REG_METER62_RATE_CTRL1    0x177d
+#define    RTL8367C_METER62_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER62_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER63_RATE_CTRL0    0x177e
+
+#define    RTL8367C_REG_METER63_RATE_CTRL1    0x177f
+#define    RTL8367C_METER63_RATE_CTRL1_OFFSET    0
+#define    RTL8367C_METER63_RATE_CTRL1_MASK    0x7
+
+#define    RTL8367C_REG_METER_MODE_SETTING2    0x1780
+
+#define    RTL8367C_REG_METER_MODE_SETTING3    0x1781
+
+#define    RTL8367C_REG_METER32_BUCKET_SIZE    0x1790
+
+#define    RTL8367C_REG_METER33_BUCKET_SIZE    0x1791
+
+#define    RTL8367C_REG_METER34_BUCKET_SIZE    0x1792
+
+#define    RTL8367C_REG_METER35_BUCKET_SIZE    0x1793
+
+#define    RTL8367C_REG_METER36_BUCKET_SIZE    0x1794
+
+#define    RTL8367C_REG_METER37_BUCKET_SIZE    0x1795
+
+#define    RTL8367C_REG_METER38_BUCKET_SIZE    0x1796
+
+#define    RTL8367C_REG_METER39_BUCKET_SIZE    0x1797
+
+#define    RTL8367C_REG_METER40_BUCKET_SIZE    0x1798
+
+#define    RTL8367C_REG_METER41_BUCKET_SIZE    0x1799
+
+#define    RTL8367C_REG_METER42_BUCKET_SIZE    0x179a
+
+#define    RTL8367C_REG_METER43_BUCKET_SIZE    0x179b
+
+#define    RTL8367C_REG_METER44_BUCKET_SIZE    0x179c
+
+#define    RTL8367C_REG_METER45_BUCKET_SIZE    0x179d
+
+#define    RTL8367C_REG_METER46_BUCKET_SIZE    0x179e
+
+#define    RTL8367C_REG_METER47_BUCKET_SIZE    0x179f
+
+#define    RTL8367C_REG_METER48_BUCKET_SIZE    0x17a0
+
+#define    RTL8367C_REG_METER49_BUCKET_SIZE    0x17a1
+
+#define    RTL8367C_REG_METER50_BUCKET_SIZE    0x17a2
+
+#define    RTL8367C_REG_METER51_BUCKET_SIZE    0x17a3
+
+#define    RTL8367C_REG_METER52_BUCKET_SIZE    0x17a4
+
+#define    RTL8367C_REG_METER53_BUCKET_SIZE    0x17a5
+
+#define    RTL8367C_REG_METER54_BUCKET_SIZE    0x17a6
+
+#define    RTL8367C_REG_METER55_BUCKET_SIZE    0x17a7
+
+#define    RTL8367C_REG_METER56_BUCKET_SIZE    0x17a8
+
+#define    RTL8367C_REG_METER57_BUCKET_SIZE    0x17a9
+
+#define    RTL8367C_REG_METER58_BUCKET_SIZE    0x17aa
+
+#define    RTL8367C_REG_METER59_BUCKET_SIZE    0x17ab
+
+#define    RTL8367C_REG_METER60_BUCKET_SIZE    0x17ac
+
+#define    RTL8367C_REG_METER61_BUCKET_SIZE    0x17ad
+
+#define    RTL8367C_REG_METER62_BUCKET_SIZE    0x17ae
+
+#define    RTL8367C_REG_METER63_BUCKET_SIZE    0x17af
+
+#define    RTL8367C_REG_METER_OVERRATE_INDICATOR2    0x17b0
+
+#define    RTL8367C_REG_METER_OVERRATE_INDICATOR3    0x17b1
+
+#define    RTL8367C_REG_METER_OVERRATE_INDICATOR2_8051    0x17b2
+
+#define    RTL8367C_REG_METER_OVERRATE_INDICATOR3_8051    0x17b3
+
+#define    RTL8367C_REG_METER_IFG_CTRL2    0x17b4
+#define    RTL8367C_METER47_IFG_OFFSET    15
+#define    RTL8367C_METER47_IFG_MASK    0x8000
+#define    RTL8367C_METER46_IFG_OFFSET    14
+#define    RTL8367C_METER46_IFG_MASK    0x4000
+#define    RTL8367C_METER45_IFG_OFFSET    13
+#define    RTL8367C_METER45_IFG_MASK    0x2000
+#define    RTL8367C_METER44_IFG_OFFSET    12
+#define    RTL8367C_METER44_IFG_MASK    0x1000
+#define    RTL8367C_METER43_IFG_OFFSET    11
+#define    RTL8367C_METER43_IFG_MASK    0x800
+#define    RTL8367C_METER42_IFG_OFFSET    10
+#define    RTL8367C_METER42_IFG_MASK    0x400
+#define    RTL8367C_METER41_IFG_OFFSET    9
+#define    RTL8367C_METER41_IFG_MASK    0x200
+#define    RTL8367C_METER40_IFG_OFFSET    8
+#define    RTL8367C_METER40_IFG_MASK    0x100
+#define    RTL8367C_METER39_IFG_OFFSET    7
+#define    RTL8367C_METER39_IFG_MASK    0x80
+#define    RTL8367C_METER38_IFG_OFFSET    6
+#define    RTL8367C_METER38_IFG_MASK    0x40
+#define    RTL8367C_METER37_IFG_OFFSET    5
+#define    RTL8367C_METER37_IFG_MASK    0x20
+#define    RTL8367C_METER36_IFG_OFFSET    4
+#define    RTL8367C_METER36_IFG_MASK    0x10
+#define    RTL8367C_METER35_IFG_OFFSET    3
+#define    RTL8367C_METER35_IFG_MASK    0x8
+#define    RTL8367C_METER34_IFG_OFFSET    2
+#define    RTL8367C_METER34_IFG_MASK    0x4
+#define    RTL8367C_METER33_IFG_OFFSET    1
+#define    RTL8367C_METER33_IFG_MASK    0x2
+#define    RTL8367C_METER32_IFG_OFFSET    0
+#define    RTL8367C_METER32_IFG_MASK    0x1
+
+#define    RTL8367C_REG_METER_IFG_CTRL3    0x17b5
+#define    RTL8367C_METER63_IFG_OFFSET    15
+#define    RTL8367C_METER63_IFG_MASK    0x8000
+#define    RTL8367C_METER62_IFG_OFFSET    14
+#define    RTL8367C_METER62_IFG_MASK    0x4000
+#define    RTL8367C_METER61_IFG_OFFSET    13
+#define    RTL8367C_METER61_IFG_MASK    0x2000
+#define    RTL8367C_METER60_IFG_OFFSET    12
+#define    RTL8367C_METER60_IFG_MASK    0x1000
+#define    RTL8367C_METER59_IFG_OFFSET    11
+#define    RTL8367C_METER59_IFG_MASK    0x800
+#define    RTL8367C_METER58_IFG_OFFSET    10
+#define    RTL8367C_METER58_IFG_MASK    0x400
+#define    RTL8367C_METER57_IFG_OFFSET    9
+#define    RTL8367C_METER57_IFG_MASK    0x200
+#define    RTL8367C_METER56_IFG_OFFSET    8
+#define    RTL8367C_METER56_IFG_MASK    0x100
+#define    RTL8367C_METER55_IFG_OFFSET    7
+#define    RTL8367C_METER55_IFG_MASK    0x80
+#define    RTL8367C_METER54_IFG_OFFSET    6
+#define    RTL8367C_METER54_IFG_MASK    0x40
+#define    RTL8367C_METER53_IFG_OFFSET    5
+#define    RTL8367C_METER53_IFG_MASK    0x20
+#define    RTL8367C_METER52_IFG_OFFSET    4
+#define    RTL8367C_METER52_IFG_MASK    0x10
+#define    RTL8367C_METER51_IFG_OFFSET    3
+#define    RTL8367C_METER51_IFG_MASK    0x8
+#define    RTL8367C_METER50_IFG_OFFSET    2
+#define    RTL8367C_METER50_IFG_MASK    0x4
+#define    RTL8367C_METER49_IFG_OFFSET    1
+#define    RTL8367C_METER49_IFG_MASK    0x2
+#define    RTL8367C_METER48_IFG_OFFSET    0
+#define    RTL8367C_METER48_IFG_MASK    0x1
+
+#define    RTL8367C_REG_METER_MISC    0x17b6
+#define    RTL8367C_METER_MISC_OFFSET    0
+#define    RTL8367C_METER_MISC_MASK    0x1
+
+/* (16'h1800)8051_RLDP_EEE_reg */
+
+#define    RTL8367C_REG_EEELLDP_CTRL0    0x1820
+#define    RTL8367C_EEELLDP_SUBTYPE_OFFSET    6
+#define    RTL8367C_EEELLDP_SUBTYPE_MASK    0x3FC0
+#define    RTL8367C_EEELLDP_TRAP_8051_OFFSET    2
+#define    RTL8367C_EEELLDP_TRAP_8051_MASK    0x4
+#define    RTL8367C_EEELLDP_TRAP_CPU_OFFSET    1
+#define    RTL8367C_EEELLDP_TRAP_CPU_MASK    0x2
+#define    RTL8367C_EEELLDP_ENABLE_OFFSET    0
+#define    RTL8367C_EEELLDP_ENABLE_MASK    0x1
+
+#define    RTL8367C_REG_EEELLDP_PMSK    0x1822
+#define    RTL8367C_EEELLDP_PMSK_OFFSET    0
+#define    RTL8367C_EEELLDP_PMSK_MASK    0x7FF
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P00_08    0x1843
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P00_07    0x1844
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P00_06    0x1845
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P00_05    0x1846
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P00_04    0x1847
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P00_03    0x1848
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P00_02    0x1849
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P00_01    0x184a
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P00_00    0x184b
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P01_08    0x184c
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P01_07    0x184d
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P01_06    0x184e
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P01_05    0x184f
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P01_04    0x1850
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P01_03    0x1851
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P01_02    0x1852
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P01_01    0x1853
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P01_00    0x1854
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P02_08    0x1855
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P02_07    0x1856
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P02_06    0x1857
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P02_05    0x1858
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P02_04    0x1859
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P02_03    0x185a
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P02_02    0x185b
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P02_01    0x185c
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P02_00    0x185d
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P03_08    0x185e
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P03_07    0x185f
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P03_06    0x1860
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P03_05    0x1861
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P03_04    0x1862
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P03_03    0x1863
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P03_02    0x1864
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P03_01    0x1865
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P03_00    0x1866
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P04_08    0x1867
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P04_07    0x1868
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P04_06    0x1869
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P04_05    0x186a
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P04_04    0x186b
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P04_03    0x186c
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P04_02    0x186d
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P04_01    0x186e
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P04_00    0x186f
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P05_08    0x1870
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P05_07    0x1871
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P05_06    0x1872
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P05_05    0x1873
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P05_04    0x1874
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P05_03    0x1875
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P05_02    0x1876
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P05_01    0x1877
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P05_00    0x1878
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P06_08    0x1879
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P06_07    0x187a
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P06_06    0x187b
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P06_05    0x187c
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P06_04    0x187d
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P06_03    0x187e
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P06_02    0x187f
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P06_01    0x1880
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P06_00    0x1881
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P07_08    0x1882
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P07_07    0x1883
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P07_06    0x1884
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P07_05    0x1885
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P07_04    0x1886
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P07_03    0x1887
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P07_02    0x1888
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P07_01    0x1889
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P07_00    0x188a
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P08_08    0x188b
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P08_07    0x188c
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P08_06    0x188d
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P08_05    0x188e
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P08_04    0x188f
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P08_03    0x1890
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P08_02    0x1891
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P08_01    0x1892
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P08_00    0x1893
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P09_08    0x1894
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P09_07    0x1895
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P09_06    0x1896
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P09_05    0x1897
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P09_04    0x1898
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P09_03    0x1899
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P09_02    0x189a
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P09_01    0x189b
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P09_00    0x189c
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P10_08    0x189d
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P10_07    0x189e
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P10_06    0x189f
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P10_05    0x18a0
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P10_04    0x18a1
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P10_03    0x18a2
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P10_02    0x18a3
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P10_01    0x18a4
+
+#define    RTL8367C_REG_EEELLDP_RX_VALUE_P10_00    0x18a5
+
+#define    RTL8367C_REG_RLDP_CTRL0    0x18e0
+#define    RTL8367C_RLDP_TRIGGER_MODE_OFFSET    14
+#define    RTL8367C_RLDP_TRIGGER_MODE_MASK    0x4000
+#define    RTL8367C_RLDP_8051_LOOP_PORTMSK_OFFSET    6
+#define    RTL8367C_RLDP_8051_LOOP_PORTMSK_MASK    0x3FC0
+#define    RTL8367C_RLPP_8051_TRAP_OFFSET    5
+#define    RTL8367C_RLPP_8051_TRAP_MASK    0x20
+#define    RTL8367C_RLDP_INDICATOR_SOURCE_OFFSET    4
+#define    RTL8367C_RLDP_INDICATOR_SOURCE_MASK    0x10
+#define    RTL8367C_RLDP_GEN_RANDOM_OFFSET    3
+#define    RTL8367C_RLDP_GEN_RANDOM_MASK    0x8
+#define    RTL8367C_RLDP_COMP_ID_OFFSET    2
+#define    RTL8367C_RLDP_COMP_ID_MASK    0x4
+#define    RTL8367C_RLDP_8051_ENABLE_OFFSET    1
+#define    RTL8367C_RLDP_8051_ENABLE_MASK    0x2
+#define    RTL8367C_RLDP_ENABLE_OFFSET    0
+#define    RTL8367C_RLDP_ENABLE_MASK    0x1
+
+#define    RTL8367C_REG_RLDP_CTRL1    0x18e1
+#define    RTL8367C_RLDP_RETRY_COUNT_LOOPSTATE_OFFSET    8
+#define    RTL8367C_RLDP_RETRY_COUNT_LOOPSTATE_MASK    0xFF00
+#define    RTL8367C_RLDP_RETRY_COUNT_CHKSTATE_OFFSET    0
+#define    RTL8367C_RLDP_RETRY_COUNT_CHKSTATE_MASK    0xFF
+
+#define    RTL8367C_REG_RLDP_CTRL2    0x18e2
+
+#define    RTL8367C_REG_RLDP_CTRL3    0x18e3
+
+#define    RTL8367C_REG_RLDP_CTRL4    0x18e4
+#define    RTL8367C_RLDP_CTRL4_OFFSET    0
+#define    RTL8367C_RLDP_CTRL4_MASK    0x7FF
+
+#define    RTL8367C_REG_RLDP_RAND_NUM0    0x18e5
+
+#define    RTL8367C_REG_RLDP_RAND_NUM1    0x18e6
+
+#define    RTL8367C_REG_RLDP_RAND_NUM2    0x18e7
+
+#define    RTL8367C_REG_RLDP_MAGIC_NUM0    0x18e8
+
+#define    RTL8367C_REG_RLDP_MAGIC_NUM1    0x18e9
+
+#define    RTL8367C_REG_RLDP_MAGIC_NUM2    0x18ea
+
+#define    RTL8367C_REG_RLDP_LOOPED_INDICATOR    0x18eb
+#define    RTL8367C_RLDP_LOOPED_INDICATOR_OFFSET    0
+#define    RTL8367C_RLDP_LOOPED_INDICATOR_MASK    0x7FF
+
+#define    RTL8367C_REG_RLDP_LOOP_PORT_REG0    0x18ec
+#define    RTL8367C_RLDP_LOOP_PORT_01_OFFSET    8
+#define    RTL8367C_RLDP_LOOP_PORT_01_MASK    0xF00
+#define    RTL8367C_RLDP_LOOP_PORT_00_OFFSET    0
+#define    RTL8367C_RLDP_LOOP_PORT_00_MASK    0xF
+
+#define    RTL8367C_REG_RLDP_LOOP_PORT_REG1    0x18ed
+#define    RTL8367C_RLDP_LOOP_PORT_03_OFFSET    8
+#define    RTL8367C_RLDP_LOOP_PORT_03_MASK    0xF00
+#define    RTL8367C_RLDP_LOOP_PORT_02_OFFSET    0
+#define    RTL8367C_RLDP_LOOP_PORT_02_MASK    0xF
+
+#define    RTL8367C_REG_RLDP_LOOP_PORT_REG2    0x18ee
+#define    RTL8367C_RLDP_LOOP_PORT_05_OFFSET    8
+#define    RTL8367C_RLDP_LOOP_PORT_05_MASK    0xF00
+#define    RTL8367C_RLDP_LOOP_PORT_04_OFFSET    0
+#define    RTL8367C_RLDP_LOOP_PORT_04_MASK    0xF
+
+#define    RTL8367C_REG_RLDP_LOOP_PORT_REG3    0x18ef
+#define    RTL8367C_RLDP_LOOP_PORT_07_OFFSET    8
+#define    RTL8367C_RLDP_LOOP_PORT_07_MASK    0xF00
+#define    RTL8367C_RLDP_LOOP_PORT_06_OFFSET    0
+#define    RTL8367C_RLDP_LOOP_PORT_06_MASK    0xF
+
+#define    RTL8367C_REG_RLDP_RELEASED_INDICATOR    0x18f0
+#define    RTL8367C_RLDP_RELEASED_INDICATOR_OFFSET    0
+#define    RTL8367C_RLDP_RELEASED_INDICATOR_MASK    0x7FF
+
+#define    RTL8367C_REG_RLDP_LOOPSTATUS_INDICATOR    0x18f1
+#define    RTL8367C_RLDP_LOOPSTATUS_INDICATOR_OFFSET    0
+#define    RTL8367C_RLDP_LOOPSTATUS_INDICATOR_MASK    0x7FF
+
+#define    RTL8367C_REG_RLDP_LOOP_PORT_REG4    0x18f2
+#define    RTL8367C_RLDP_LOOP_PORT_9_OFFSET    8
+#define    RTL8367C_RLDP_LOOP_PORT_9_MASK    0xF00
+#define    RTL8367C_RLDP_LOOP_PORT_8_OFFSET    0
+#define    RTL8367C_RLDP_LOOP_PORT_8_MASK    0xF
+
+#define    RTL8367C_REG_RLDP_LOOP_PORT_REG5    0x18f3
+#define    RTL8367C_RLDP_LOOP_PORT_REG5_OFFSET    0
+#define    RTL8367C_RLDP_LOOP_PORT_REG5_MASK    0xF
+
+#define    RTL8367C_REG_RLDP_CTRL5    0x18f4
+#define    RTL8367C_RLDP_CTRL5_OFFSET    0
+#define    RTL8367C_RLDP_CTRL5_MASK    0x7
+
+/* (16'h1900)EEE_EEEP_reg */
+
+#define    RTL8367C_REG_EEE_500M_CTRL0    0x1900
+#define    RTL8367C_EEE_500M_CTRL0_OFFSET    0
+#define    RTL8367C_EEE_500M_CTRL0_MASK    0xFF
+
+#define    RTL8367C_REG_EEE_RXIDLE_GIGA_CTRL    0x1901
+#define    RTL8367C_EEE_RXIDLE_GIGA_EN_OFFSET    8
+#define    RTL8367C_EEE_RXIDLE_GIGA_EN_MASK    0x100
+#define    RTL8367C_EEE_RXIDLE_GIGA_OFFSET    0
+#define    RTL8367C_EEE_RXIDLE_GIGA_MASK    0xFF
+
+#define    RTL8367C_REG_EEE_RXIDLE_500M_CTRL    0x1902
+#define    RTL8367C_EEE_RXIDLE_500M_EN_OFFSET    8
+#define    RTL8367C_EEE_RXIDLE_500M_EN_MASK    0x100
+#define    RTL8367C_EEE_RXIDLE_500M_OFFSET    0
+#define    RTL8367C_EEE_RXIDLE_500M_MASK    0xFF
+
+#define    RTL8367C_REG_EEE_DECISION_GIGA_500M    0x1903
+#define    RTL8367C_EEE_DECISION_GIGA_OFFSET    8
+#define    RTL8367C_EEE_DECISION_GIGA_MASK    0xFF00
+#define    RTL8367C_EEE_DECISION_500M_OFFSET    0
+#define    RTL8367C_EEE_DECISION_500M_MASK    0xFF
+
+#define    RTL8367C_REG_EEE_DECISION_100M    0x1904
+#define    RTL8367C_EEE_DECISION_100M_OFFSET    0
+#define    RTL8367C_EEE_DECISION_100M_MASK    0xFF
+
+#define    RTL8367C_REG_EEEP_DEFER_TXLPI    0x1905
+#define    RTL8367C_EEEP_DEFER_TXLPI_OFFSET    0
+#define    RTL8367C_EEEP_DEFER_TXLPI_MASK    0x1
+
+#define    RTL8367C_REG_EEEP_EN    0x1906
+#define    RTL8367C_EEEP_SLAVE_EN_OFFSET    3
+#define    RTL8367C_EEEP_SLAVE_EN_MASK    0x8
+#define    RTL8367C_EEEP_100M_OFFSET    2
+#define    RTL8367C_EEEP_100M_MASK    0x4
+#define    RTL8367C_EEEP_500M_OFFSET    1
+#define    RTL8367C_EEEP_500M_MASK    0x2
+#define    RTL8367C_EEEP_GIGA_OFFSET    0
+#define    RTL8367C_EEEP_GIGA_MASK    0x1
+
+#define    RTL8367C_REG_EEEP_TI_GIGA_500M    0x1907
+#define    RTL8367C_EEEP_TI_GIGA_OFFSET    8
+#define    RTL8367C_EEEP_TI_GIGA_MASK    0xFF00
+#define    RTL8367C_EEEP_TI_500M_OFFSET    0
+#define    RTL8367C_EEEP_TI_500M_MASK    0xFF
+
+#define    RTL8367C_REG_EEEP_TI_100M    0x1908
+#define    RTL8367C_EEEP_TI_100M_OFFSET    0
+#define    RTL8367C_EEEP_TI_100M_MASK    0xFF
+
+#define    RTL8367C_REG_EEEP_CTRL2    0x1909
+#define    RTL8367C_EEEP_CTRL2_OFFSET    0
+#define    RTL8367C_EEEP_CTRL2_MASK    0xFF
+
+#define    RTL8367C_REG_EEEP_RX_RATE_500M    0x190b
+
+#define    RTL8367C_REG_EEEP_RW_GIGA_SLV    0x190c
+#define    RTL8367C_EEEP_RW_GIGA_SLV_OFFSET    0
+#define    RTL8367C_EEEP_RW_GIGA_SLV_MASK    0xFF
+
+#define    RTL8367C_REG_EEEP_TMR_GIGA    0x190d
+#define    RTL8367C_RX_IDLE_EEEP_GIGA_OFFSET    8
+#define    RTL8367C_RX_IDLE_EEEP_GIGA_MASK    0xFF00
+#define    RTL8367C_RX_MIN_SLP_TMR_GIGA_OFFSET    0
+#define    RTL8367C_RX_MIN_SLP_TMR_GIGA_MASK    0xFF
+
+#define    RTL8367C_REG_EEEP_TMR_500M    0x190e
+#define    RTL8367C_RX_IDLE_EEEP_500M_OFFSET    8
+#define    RTL8367C_RX_IDLE_EEEP_500M_MASK    0xFF00
+#define    RTL8367C_RX_MIN_SLP_TMR_500M_OFFSET    0
+#define    RTL8367C_RX_MIN_SLP_TMR_500M_MASK    0xFF
+
+#define    RTL8367C_REG_EEEP_TMR_100M    0x190f
+#define    RTL8367C_RX_IDLE_EEEP_100M_OFFSET    8
+#define    RTL8367C_RX_IDLE_EEEP_100M_MASK    0xFF00
+#define    RTL8367C_RX_MIN_SLP_TMR_100M_OFFSET    0
+#define    RTL8367C_RX_MIN_SLP_TMR_100M_MASK    0xFF
+
+#define    RTL8367C_REG_EEEP_RW_500M_MST_SLV    0x1910
+#define    RTL8367C_EEEP_RW_500M_MST_OFFSET    8
+#define    RTL8367C_EEEP_RW_500M_MST_MASK    0xFF00
+#define    RTL8367C_EEEP_RW_500M_SLV_OFFSET    0
+#define    RTL8367C_EEEP_RW_500M_SLV_MASK    0xFF
+
+#define    RTL8367C_REG_EEEP_500M_CTRL0    0x1911
+#define    RTL8367C_EEEP_500M_CTRL0_OFFSET    0
+#define    RTL8367C_EEEP_500M_CTRL0_MASK    0xFF
+
+#define    RTL8367C_REG_EEEP_500M_CTRL1    0x1912
+#define    RTL8367C_EEEP_TW_500M_OFFSET    8
+#define    RTL8367C_EEEP_TW_500M_MASK    0xFF00
+#define    RTL8367C_EEEP_TP_500M_OFFSET    0
+#define    RTL8367C_EEEP_TP_500M_MASK    0xFF
+
+#define    RTL8367C_REG_EEEP_500M_CTRL2    0x1913
+#define    RTL8367C_EEEP_TXEN_500M_OFFSET    12
+#define    RTL8367C_EEEP_TXEN_500M_MASK    0x1000
+#define    RTL8367C_EEEP_TU_500M_OFFSET    8
+#define    RTL8367C_EEEP_TU_500M_MASK    0x300
+#define    RTL8367C_EEEP_TS_500M_OFFSET    0
+#define    RTL8367C_EEEP_TS_500M_MASK    0xFF
+
+#define    RTL8367C_REG_EEE_NEW_CTRL0    0x1914
+#define    RTL8367C_LINK_UP_DELAY_OFFSET    3
+#define    RTL8367C_LINK_UP_DELAY_MASK    0x18
+#define    RTL8367C_EEE_TXLPI_ORI_OFFSET    2
+#define    RTL8367C_EEE_TXLPI_ORI_MASK    0x4
+#define    RTL8367C_REALTX_SEL_OFFSET    1
+#define    RTL8367C_REALTX_SEL_MASK    0x2
+#define    RTL8367C_EN_FC_EFCT_OFFSET    0
+#define    RTL8367C_EN_FC_EFCT_MASK    0x1
+
+#define    RTL8367C_REG_EEE_LONGIDLE_100M    0x1915
+#define    RTL8367C_EEE_LONGIDLE_100M_OFFSET    0
+#define    RTL8367C_EEE_LONGIDLE_100M_MASK    0x3FF
+
+#define    RTL8367C_REG_EEE_LONGIDLE_500M    0x1916
+#define    RTL8367C_EEE_LONGIDLE_500M_OFFSET    0
+#define    RTL8367C_EEE_LONGIDLE_500M_MASK    0x3FF
+
+#define    RTL8367C_REG_EEE_LONGIDLE_GIGA    0x1917
+#define    RTL8367C_EEE_LONGIDLE_GIGA_OFFSET    0
+#define    RTL8367C_EEE_LONGIDLE_GIGA_MASK    0x3FF
+
+#define    RTL8367C_REG_EEE_MINIPG_100M    0x1918
+
+#define    RTL8367C_REG_EEE_MINIPG_500M    0x1919
+
+#define    RTL8367C_REG_EEE_MINIPG_GIGA    0x191A
+
+#define    RTL8367C_REG_EEE_LONGIDLE_CTRL0    0x191B
+#define    RTL8367C_TX_IDLEN_REQ_100M_OFFSET    10
+#define    RTL8367C_TX_IDLEN_REQ_100M_MASK    0x400
+#define    RTL8367C_TX_IDLEN_REQ_500M_OFFSET    9
+#define    RTL8367C_TX_IDLEN_REQ_500M_MASK    0x200
+#define    RTL8367C_TX_IDLEN_REQ_GIGA_OFFSET    8
+#define    RTL8367C_TX_IDLEN_REQ_GIGA_MASK    0x100
+#define    RTL8367C_EEE_LONGIDLE_CTRL0_TX_LPI_MINIPG_100M_OFFSET    0
+#define    RTL8367C_EEE_LONGIDLE_CTRL0_TX_LPI_MINIPG_100M_MASK    0xFF
+
+#define    RTL8367C_REG_EEE_LONGIDLE_CTRL1    0x191C
+#define    RTL8367C_EEE_LONGIDLE_CTRL1_TX_LPI_MINIPG_GELITE_OFFSET    8
+#define    RTL8367C_EEE_LONGIDLE_CTRL1_TX_LPI_MINIPG_GELITE_MASK    0xFF00
+#define    RTL8367C_EEE_LONGIDLE_CTRL1_TX_LPI_MINIPG_GIGA_OFFSET    0
+#define    RTL8367C_EEE_LONGIDLE_CTRL1_TX_LPI_MINIPG_GIGA_MASK    0xFF
+
+#define    RTL8367C_REG_EEE_TD_CTRL_H    0x191d
+#define    RTL8367C_REF_RXLPI_OFFSET    8
+#define    RTL8367C_REF_RXLPI_MASK    0x100
+#define    RTL8367C_LOW_Q_TX_DELAY_GE_500M_H_OFFSET    4
+#define    RTL8367C_LOW_Q_TX_DELAY_GE_500M_H_MASK    0xF0
+#define    RTL8367C_LOW_Q_TX_DELAY_FE_H_OFFSET    0
+#define    RTL8367C_LOW_Q_TX_DELAY_FE_H_MASK    0xF
+
+/* (16'h1a00)nic_reg */
+
+#define    RTL8367C_REG_NIC_RXRDRL    0x1a04
+#define    RTL8367C_NIC_RXRDRL_OFFSET    0
+#define    RTL8367C_NIC_RXRDRL_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_RXRDRH    0x1a05
+#define    RTL8367C_NIC_RXRDRH_OFFSET    0
+#define    RTL8367C_NIC_RXRDRH_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_TXASRL    0x1a08
+#define    RTL8367C_NIC_TXASRL_OFFSET    0
+#define    RTL8367C_NIC_TXASRL_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_TXASRH    0x1a09
+#define    RTL8367C_NIC_TXASRH_OFFSET    0
+#define    RTL8367C_NIC_TXASRH_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_RXCMDR    0x1a0c
+#define    RTL8367C_NIC_RXCMDR_OFFSET    0
+#define    RTL8367C_NIC_RXCMDR_MASK    0x1
+
+#define    RTL8367C_REG_NIC_TXCMDR    0x1a0d
+#define    RTL8367C_NIC_TXCMDR_OFFSET    0
+#define    RTL8367C_NIC_TXCMDR_MASK    0x1
+
+#define    RTL8367C_REG_NIC_IMS    0x1a0e
+#define    RTL8367C_NIC_RXIS_OFFSET    7
+#define    RTL8367C_NIC_RXIS_MASK    0x80
+#define    RTL8367C_NIC_TXIS_OFFSET    6
+#define    RTL8367C_NIC_TXIS_MASK    0x40
+#define    RTL8367C_NIC_TXES_OFFSET    5
+#define    RTL8367C_NIC_TXES_MASK    0x20
+#define    RTL8367C_NIC_IMS_DMY_OFFSET    4
+#define    RTL8367C_NIC_IMS_DMY_MASK    0x10
+#define    RTL8367C_NIC_RXBUS_OFFSET    3
+#define    RTL8367C_NIC_RXBUS_MASK    0x8
+#define    RTL8367C_NIC_TXBOS_OFFSET    2
+#define    RTL8367C_NIC_TXBOS_MASK    0x4
+#define    RTL8367C_NIC_RXMIS_OFFSET    1
+#define    RTL8367C_NIC_RXMIS_MASK    0x2
+#define    RTL8367C_NIC_TXNLS_OFFSET    0
+#define    RTL8367C_NIC_TXNLS_MASK    0x1
+
+#define    RTL8367C_REG_NIC_IMR    0x1a0f
+#define    RTL8367C_NIC_RXIE_OFFSET    7
+#define    RTL8367C_NIC_RXIE_MASK    0x80
+#define    RTL8367C_NIC_TXIE_OFFSET    6
+#define    RTL8367C_NIC_TXIE_MASK    0x40
+#define    RTL8367C_NIC_TXEE_OFFSET    5
+#define    RTL8367C_NIC_TXEE_MASK    0x20
+#define    RTL8367C_NIC_IMR_DMY_OFFSET    4
+#define    RTL8367C_NIC_IMR_DMY_MASK    0x10
+#define    RTL8367C_NIC_RXBUE_OFFSET    3
+#define    RTL8367C_NIC_RXBUE_MASK    0x8
+#define    RTL8367C_NIC_TXBOE_OFFSET    2
+#define    RTL8367C_NIC_TXBOE_MASK    0x4
+#define    RTL8367C_NIC_RXMIE_OFFSET    1
+#define    RTL8367C_NIC_RXMIE_MASK    0x2
+#define    RTL8367C_NIC_TXNLE_OFFSET    0
+#define    RTL8367C_NIC_TXNLE_MASK    0x1
+
+#define    RTL8367C_REG_NIC_RXCR0    0x1a14
+#define    RTL8367C_NIC_HFPPE_OFFSET    7
+#define    RTL8367C_NIC_HFPPE_MASK    0x80
+#define    RTL8367C_NIC_HFMPE_OFFSET    6
+#define    RTL8367C_NIC_HFMPE_MASK    0x40
+#define    RTL8367C_NIC_RXBPE_OFFSET    5
+#define    RTL8367C_NIC_RXBPE_MASK    0x20
+#define    RTL8367C_NIC_RXMPE_OFFSET    4
+#define    RTL8367C_NIC_RXMPE_MASK    0x10
+#define    RTL8367C_NIC_RXPPS_OFFSET    2
+#define    RTL8367C_NIC_RXPPS_MASK    0xC
+#define    RTL8367C_NIC_RXAPE_OFFSET    1
+#define    RTL8367C_NIC_RXAPE_MASK    0x2
+#define    RTL8367C_NIC_ARPPE_OFFSET    0
+#define    RTL8367C_NIC_ARPPE_MASK    0x1
+
+#define    RTL8367C_REG_NIC_RXCR1    0x1a15
+#define    RTL8367C_NIC_RL4CEPE_OFFSET    4
+#define    RTL8367C_NIC_RL4CEPE_MASK    0x10
+#define    RTL8367C_NIC_RL3CEPE_OFFSET    3
+#define    RTL8367C_NIC_RL3CEPE_MASK    0x8
+#define    RTL8367C_NIC_RCRCEPE_OFFSET    2
+#define    RTL8367C_NIC_RCRCEPE_MASK    0x4
+#define    RTL8367C_NIC_RMCRC_OFFSET    1
+#define    RTL8367C_NIC_RMCRC_MASK    0x2
+#define    RTL8367C_NIC_RXENABLE_OFFSET    0
+#define    RTL8367C_NIC_RXENABLE_MASK    0x1
+
+#define    RTL8367C_REG_NIC_TXCR    0x1a16
+#define    RTL8367C_NIC_LBE_OFFSET    2
+#define    RTL8367C_NIC_LBE_MASK    0x4
+#define    RTL8367C_NIC_TXMFM_OFFSET    1
+#define    RTL8367C_NIC_TXMFM_MASK    0x2
+#define    RTL8367C_NIC_TXENABLE_OFFSET    0
+#define    RTL8367C_NIC_TXENABLE_MASK    0x1
+
+#define    RTL8367C_REG_NIC_GCR    0x1a17
+#define    RTL8367C_DUMMY_7_6_OFFSET    6
+#define    RTL8367C_DUMMY_7_6_MASK    0xC0
+#define    RTL8367C_NIC_RXMTU_OFFSET    4
+#define    RTL8367C_NIC_RXMTU_MASK    0x30
+#define    RTL8367C_NIC_GCR_DUMMY_0_OFFSET    0
+#define    RTL8367C_NIC_GCR_DUMMY_0_MASK    0x1
+
+#define    RTL8367C_REG_NIC_MHR0    0x1a24
+#define    RTL8367C_NIC_MHR0_OFFSET    0
+#define    RTL8367C_NIC_MHR0_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_MHR1    0x1a25
+#define    RTL8367C_NIC_MHR1_OFFSET    0
+#define    RTL8367C_NIC_MHR1_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_MHR2    0x1a26
+#define    RTL8367C_NIC_MHR2_OFFSET    0
+#define    RTL8367C_NIC_MHR2_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_MHR3    0x1a27
+#define    RTL8367C_NIC_MHR3_OFFSET    0
+#define    RTL8367C_NIC_MHR3_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_MHR4    0x1a28
+#define    RTL8367C_NIC_MHR4_OFFSET    0
+#define    RTL8367C_NIC_MHR4_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_MHR5    0x1a29
+#define    RTL8367C_NIC_MHR5_OFFSET    0
+#define    RTL8367C_NIC_MHR5_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_MHR6    0x1a2a
+#define    RTL8367C_NIC_MHR6_OFFSET    0
+#define    RTL8367C_NIC_MHR6_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_MHR7    0x1a2b
+#define    RTL8367C_NIC_MHR7_OFFSET    0
+#define    RTL8367C_NIC_MHR7_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_PAHR0    0x1a2c
+#define    RTL8367C_NIC_PAHR0_OFFSET    0
+#define    RTL8367C_NIC_PAHR0_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_PAHR1    0x1a2d
+#define    RTL8367C_NIC_PAHR1_OFFSET    0
+#define    RTL8367C_NIC_PAHR1_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_PAHR2    0x1a2e
+#define    RTL8367C_NIC_PAHR2_OFFSET    0
+#define    RTL8367C_NIC_PAHR2_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_PAHR3    0x1a2f
+#define    RTL8367C_NIC_PAHR3_OFFSET    0
+#define    RTL8367C_NIC_PAHR3_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_PAHR4    0x1a30
+#define    RTL8367C_NIC_PAHR4_OFFSET    0
+#define    RTL8367C_NIC_PAHR4_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_PAHR5    0x1a31
+#define    RTL8367C_NIC_PAHR5_OFFSET    0
+#define    RTL8367C_NIC_PAHR5_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_PAHR6    0x1a32
+#define    RTL8367C_NIC_PAHR6_OFFSET    0
+#define    RTL8367C_NIC_PAHR6_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_PAHR7    0x1a33
+#define    RTL8367C_NIC_PAHR7_OFFSET    0
+#define    RTL8367C_NIC_PAHR7_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_TXSTOPRL    0x1a44
+#define    RTL8367C_NIC_TXSTOPRL_OFFSET    0
+#define    RTL8367C_NIC_TXSTOPRL_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_TXSTOPRH    0x1a45
+#define    RTL8367C_NIC_TXSTOPRH_OFFSET    0
+#define    RTL8367C_NIC_TXSTOPRH_MASK    0x3
+
+#define    RTL8367C_REG_NIC_RXSTOPRL    0x1a46
+#define    RTL8367C_NIC_RXSTOPRL_OFFSET    0
+#define    RTL8367C_NIC_RXSTOPRL_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_RXSTOPRH    0x1a47
+#define    RTL8367C_NIC_RXSTOPRH_OFFSET    0
+#define    RTL8367C_NIC_RXSTOPRH_MASK    0x3
+
+#define    RTL8367C_REG_NIC_RXFSTR    0x1a48
+#define    RTL8367C_NIC_RXFSTR_OFFSET    0
+#define    RTL8367C_NIC_RXFSTR_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_RXMBTRL    0x1a4c
+#define    RTL8367C_NIC_RXMBTRL_OFFSET    0
+#define    RTL8367C_NIC_RXMBTRL_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_RXMBTRH    0x1a4d
+#define    RTL8367C_NIC_RXMBTRH_OFFSET    0
+#define    RTL8367C_NIC_RXMBTRH_MASK    0x7F
+
+#define    RTL8367C_REG_NIC_RXMPTR    0x1a4e
+#define    RTL8367C_NIC_RXMPTR_OFFSET    0
+#define    RTL8367C_NIC_RXMPTR_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_T0TR    0x1a4f
+#define    RTL8367C_NIC_T0TR_OFFSET    0
+#define    RTL8367C_NIC_T0TR_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_CRXCPRL    0x1a50
+#define    RTL8367C_NIC_CRXCPRL_OFFSET    0
+#define    RTL8367C_NIC_CRXCPRL_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_CRXCPRH    0x1a51
+#define    RTL8367C_NIC_CRXCPRH_OFFSET    0
+#define    RTL8367C_NIC_CRXCPRH_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_CTXCPRL    0x1a52
+#define    RTL8367C_NIC_CTXCPRL_OFFSET    0
+#define    RTL8367C_NIC_CTXCPRL_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_CTXPCRH    0x1a53
+#define    RTL8367C_NIC_CTXPCRH_OFFSET    0
+#define    RTL8367C_NIC_CTXPCRH_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_SRXCURPKTRL    0x1a54
+#define    RTL8367C_NIC_SRXCURPKTRL_OFFSET    0
+#define    RTL8367C_NIC_SRXCURPKTRL_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_SRXCURPKTRH    0x1a55
+#define    RTL8367C_NIC_SRXCURPKTRH_OFFSET    0
+#define    RTL8367C_NIC_SRXCURPKTRH_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_STXCURPKTRL    0x1a56
+#define    RTL8367C_NIC_STXCURPKTRL_OFFSET    0
+#define    RTL8367C_NIC_STXCURPKTRL_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_STXCURPKTRH    0x1a57
+#define    RTL8367C_NIC_STXCURPKTRH_OFFSET    0
+#define    RTL8367C_NIC_STXCURPKTRH_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_STXPKTLENRL    0x1a58
+#define    RTL8367C_NIC_STXPKTLENRL_OFFSET    0
+#define    RTL8367C_NIC_STXPKTLENRL_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_STXPKTLENRH    0x1a59
+#define    RTL8367C_NIC_STXPKTLENRH_OFFSET    0
+#define    RTL8367C_NIC_STXPKTLENRH_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_STXCURUNITRL    0x1a5a
+#define    RTL8367C_NIC_STXCURUNITRL_OFFSET    0
+#define    RTL8367C_NIC_STXCURUNITRL_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_STXCURUNITRH    0x1a5b
+#define    RTL8367C_NIC_STXCURUNITRH_OFFSET    0
+#define    RTL8367C_NIC_STXCURUNITRH_MASK    0xFF
+
+#define    RTL8367C_REG_NIC_DROP_MODE    0x1a5c
+#define    RTL8367C_NIC_RXDV_MODE_OFFSET    1
+#define    RTL8367C_NIC_RXDV_MODE_MASK    0x2
+#define    RTL8367C_NIC_DROP_MODE_OFFSET    0
+#define    RTL8367C_NIC_DROP_MODE_MASK    0x1
+
+/* (16'h1b00)LED */
+
+#define    RTL8367C_REG_LED_SYS_CONFIG    0x1b00
+#define    RTL8367C_LED_SYS_CONFIG_DUMMY_15_OFFSET    15
+#define    RTL8367C_LED_SYS_CONFIG_DUMMY_15_MASK    0x8000
+#define    RTL8367C_LED_SERIAL_OUT_MODE_OFFSET    14
+#define    RTL8367C_LED_SERIAL_OUT_MODE_MASK    0x4000
+#define    RTL8367C_LED_EEE_LPI_MODE_OFFSET    13
+#define    RTL8367C_LED_EEE_LPI_MODE_MASK    0x2000
+#define    RTL8367C_LED_EEE_LPI_EN_OFFSET    12
+#define    RTL8367C_LED_EEE_LPI_EN_MASK    0x1000
+#define    RTL8367C_LED_EEE_LPI_10_OFFSET    11
+#define    RTL8367C_LED_EEE_LPI_10_MASK    0x800
+#define    RTL8367C_LED_EEE_CAP_10_OFFSET    10
+#define    RTL8367C_LED_EEE_CAP_10_MASK    0x400
+#define    RTL8367C_LED_LPI_SEL_OFFSET    8
+#define    RTL8367C_LED_LPI_SEL_MASK    0x300
+#define    RTL8367C_SERI_LED_ACT_LOW_OFFSET    7
+#define    RTL8367C_SERI_LED_ACT_LOW_MASK    0x80
+#define    RTL8367C_LED_POWERON_2_OFFSET    6
+#define    RTL8367C_LED_POWERON_2_MASK    0x40
+#define    RTL8367C_LED_POWERON_1_OFFSET    5
+#define    RTL8367C_LED_POWERON_1_MASK    0x20
+#define    RTL8367C_LED_POWERON_0_OFFSET    4
+#define    RTL8367C_LED_POWERON_0_MASK    0x10
+#define    RTL8367C_LED_IO_DISABLE_OFFSET    3
+#define    RTL8367C_LED_IO_DISABLE_MASK    0x8
+#define    RTL8367C_DUMMY_2_2_OFFSET    2
+#define    RTL8367C_DUMMY_2_2_MASK    0x4
+#define    RTL8367C_LED_SELECT_OFFSET    0
+#define    RTL8367C_LED_SELECT_MASK    0x3
+
+#define    RTL8367C_REG_LED_SYS_CONFIG2    0x1b01
+#define    RTL8367C_LED_SYS_CONFIG2_DUMMY_OFFSET    2
+#define    RTL8367C_LED_SYS_CONFIG2_DUMMY_MASK    0xFFFC
+#define    RTL8367C_GATE_LPTD_BYPASS_OFFSET    1
+#define    RTL8367C_GATE_LPTD_BYPASS_MASK    0x2
+#define    RTL8367C_LED_SPD_MODE_OFFSET    0
+#define    RTL8367C_LED_SPD_MODE_MASK    0x1
+
+#define    RTL8367C_REG_LED_MODE    0x1b02
+#define    RTL8367C_DLINK_TIME_OFFSET    15
+#define    RTL8367C_DLINK_TIME_MASK    0x8000
+#define    RTL8367C_LED_BUZZ_DUTY_OFFSET    14
+#define    RTL8367C_LED_BUZZ_DUTY_MASK    0x4000
+#define    RTL8367C_BUZZER_RATE_OFFSET    12
+#define    RTL8367C_BUZZER_RATE_MASK    0x3000
+#define    RTL8367C_LOOP_DETECT_MODE_OFFSET    11
+#define    RTL8367C_LOOP_DETECT_MODE_MASK    0x800
+#define    RTL8367C_SEL_PWRON_TIME_OFFSET    9
+#define    RTL8367C_SEL_PWRON_TIME_MASK    0x600
+#define    RTL8367C_EN_DLINK_LED_OFFSET    8
+#define    RTL8367C_EN_DLINK_LED_MASK    0x100
+#define    RTL8367C_LOOP_DETECT_RATE_OFFSET    6
+#define    RTL8367C_LOOP_DETECT_RATE_MASK    0xC0
+#define    RTL8367C_FORCE_RATE_OFFSET    4
+#define    RTL8367C_FORCE_RATE_MASK    0x30
+#define    RTL8367C_SEL_LEDRATE_OFFSET    1
+#define    RTL8367C_SEL_LEDRATE_MASK    0xE
+#define    RTL8367C_SPEED_UP_OFFSET    0
+#define    RTL8367C_SPEED_UP_MASK    0x1
+
+#define    RTL8367C_REG_LED_CONFIGURATION    0x1b03
+#define    RTL8367C_LED_CONFIGURATION_DUMMY_OFFSET    15
+#define    RTL8367C_LED_CONFIGURATION_DUMMY_MASK    0x8000
+#define    RTL8367C_LED_CONFIG_SEL_OFFSET    14
+#define    RTL8367C_LED_CONFIG_SEL_MASK    0x4000
+#define    RTL8367C_DATA_LED_OFFSET    12
+#define    RTL8367C_DATA_LED_MASK    0x3000
+#define    RTL8367C_LED2_CFG_OFFSET    8
+#define    RTL8367C_LED2_CFG_MASK    0xF00
+#define    RTL8367C_LED1_CFG_OFFSET    4
+#define    RTL8367C_LED1_CFG_MASK    0xF0
+#define    RTL8367C_LED0_CFG_OFFSET    0
+#define    RTL8367C_LED0_CFG_MASK    0xF
+
+#define    RTL8367C_REG_RTCT_RESULTS_CFG    0x1b04
+#define    RTL8367C_RTCT_2PAIR_FTT_OFFSET    15
+#define    RTL8367C_RTCT_2PAIR_FTT_MASK    0x8000
+#define    RTL8367C_RTCT_2PAIR_MODE_OFFSET    14
+#define    RTL8367C_RTCT_2PAIR_MODE_MASK    0x4000
+#define    RTL8367C_BLINK_EN_OFFSET    13
+#define    RTL8367C_BLINK_EN_MASK    0x2000
+#define    RTL8367C_TIMEOUT_OFFSET    12
+#define    RTL8367C_TIMEOUT_MASK    0x1000
+#define    RTL8367C_EN_CD_SAME_SHORT_OFFSET    11
+#define    RTL8367C_EN_CD_SAME_SHORT_MASK    0x800
+#define    RTL8367C_EN_CD_SAME_OPEN_OFFSET    10
+#define    RTL8367C_EN_CD_SAME_OPEN_MASK    0x400
+#define    RTL8367C_EN_CD_SAME_LINEDRIVER_OFFSET    9
+#define    RTL8367C_EN_CD_SAME_LINEDRIVER_MASK    0x200
+#define    RTL8367C_EN_CD_SAME_MISMATCH_OFFSET    8
+#define    RTL8367C_EN_CD_SAME_MISMATCH_MASK    0x100
+#define    RTL8367C_EN_CD_SHORT_OFFSET    7
+#define    RTL8367C_EN_CD_SHORT_MASK    0x80
+#define    RTL8367C_EN_AB_SHORT_OFFSET    6
+#define    RTL8367C_EN_AB_SHORT_MASK    0x40
+#define    RTL8367C_EN_CD_OPEN_OFFSET    5
+#define    RTL8367C_EN_CD_OPEN_MASK    0x20
+#define    RTL8367C_EN_AB_OPEN_OFFSET    4
+#define    RTL8367C_EN_AB_OPEN_MASK    0x10
+#define    RTL8367C_EN_CD_MISMATCH_OFFSET    3
+#define    RTL8367C_EN_CD_MISMATCH_MASK    0x8
+#define    RTL8367C_EN_AB_MISMATCH_OFFSET    2
+#define    RTL8367C_EN_AB_MISMATCH_MASK    0x4
+#define    RTL8367C_EN_CD_LINEDRIVER_OFFSET    1
+#define    RTL8367C_EN_CD_LINEDRIVER_MASK    0x2
+#define    RTL8367C_EN_AB_LINEDRIVER_OFFSET    0
+#define    RTL8367C_EN_AB_LINEDRIVER_MASK    0x1
+
+#define    RTL8367C_REG_RTCT_LED    0x1b05
+#define    RTL8367C_DUMMY_1b05a_OFFSET    12
+#define    RTL8367C_DUMMY_1b05a_MASK    0xF000
+#define    RTL8367C_RTCT_LED2_OFFSET    8
+#define    RTL8367C_RTCT_LED2_MASK    0xF00
+#define    RTL8367C_RTCT_LED1_OFFSET    4
+#define    RTL8367C_RTCT_LED1_MASK    0xF0
+#define    RTL8367C_RTCT_LED0_OFFSET    0
+#define    RTL8367C_RTCT_LED0_MASK    0xF
+
+#define    RTL8367C_REG_CPU_FORCE_LED_CFG    0x1b07
+#define    RTL8367C_DUMMY_1b07a_OFFSET    8
+#define    RTL8367C_DUMMY_1b07a_MASK    0xFF00
+#define    RTL8367C_LED_FORCE_MODE_OFFSET    2
+#define    RTL8367C_LED_FORCE_MODE_MASK    0xFC
+#define    RTL8367C_FORCE_MODE_OFFSET    0
+#define    RTL8367C_FORCE_MODE_MASK    0x3
+
+#define    RTL8367C_REG_CPU_FORCE_LED0_CFG0    0x1b08
+#define    RTL8367C_PORT7_LED0_MODE_OFFSET    14
+#define    RTL8367C_PORT7_LED0_MODE_MASK    0xC000
+#define    RTL8367C_PORT6_LED0_MODE_OFFSET    12
+#define    RTL8367C_PORT6_LED0_MODE_MASK    0x3000
+#define    RTL8367C_PORT5_LED0_MODE_OFFSET    10
+#define    RTL8367C_PORT5_LED0_MODE_MASK    0xC00
+#define    RTL8367C_PORT4_LED0_MODE_OFFSET    8
+#define    RTL8367C_PORT4_LED0_MODE_MASK    0x300
+#define    RTL8367C_PORT3_LED0_MODE_OFFSET    6
+#define    RTL8367C_PORT3_LED0_MODE_MASK    0xC0
+#define    RTL8367C_PORT2_LED0_MODE_OFFSET    4
+#define    RTL8367C_PORT2_LED0_MODE_MASK    0x30
+#define    RTL8367C_PORT1_LED0_MODE_OFFSET    2
+#define    RTL8367C_PORT1_LED0_MODE_MASK    0xC
+#define    RTL8367C_PORT0_LED0_MODE_OFFSET    0
+#define    RTL8367C_PORT0_LED0_MODE_MASK    0x3
+
+#define    RTL8367C_REG_CPU_FORCE_LED0_CFG1    0x1b09
+#define    RTL8367C_DUMMY_1b09a_OFFSET    4
+#define    RTL8367C_DUMMY_1b09a_MASK    0xFFF0
+#define    RTL8367C_PORT9_LED0_MODE_OFFSET    2
+#define    RTL8367C_PORT9_LED0_MODE_MASK    0xC
+#define    RTL8367C_PORT8_LED0_MODE_OFFSET    0
+#define    RTL8367C_PORT8_LED0_MODE_MASK    0x3
+
+#define    RTL8367C_REG_CPU_FORCE_LED1_CFG0    0x1b0a
+#define    RTL8367C_PORT7_LED1_MODE_OFFSET    14
+#define    RTL8367C_PORT7_LED1_MODE_MASK    0xC000
+#define    RTL8367C_PORT6_LED1_MODE_OFFSET    12
+#define    RTL8367C_PORT6_LED1_MODE_MASK    0x3000
+#define    RTL8367C_PORT5_LED1_MODE_OFFSET    10
+#define    RTL8367C_PORT5_LED1_MODE_MASK    0xC00
+#define    RTL8367C_PORT4_LED1_MODE_OFFSET    8
+#define    RTL8367C_PORT4_LED1_MODE_MASK    0x300
+#define    RTL8367C_PORT3_LED1_MODE_OFFSET    6
+#define    RTL8367C_PORT3_LED1_MODE_MASK    0xC0
+#define    RTL8367C_PORT2_LED1_MODE_OFFSET    4
+#define    RTL8367C_PORT2_LED1_MODE_MASK    0x30
+#define    RTL8367C_PORT1_LED1_MODE_OFFSET    2
+#define    RTL8367C_PORT1_LED1_MODE_MASK    0xC
+#define    RTL8367C_PORT0_LED1_MODE_OFFSET    0
+#define    RTL8367C_PORT0_LED1_MODE_MASK    0x3
+
+#define    RTL8367C_REG_CPU_FORCE_LED1_CFG1    0x1b0b
+#define    RTL8367C_DUMMY_1b0ba_OFFSET    4
+#define    RTL8367C_DUMMY_1b0ba_MASK    0xFFF0
+#define    RTL8367C_PORT9_LED1_MODE_OFFSET    2
+#define    RTL8367C_PORT9_LED1_MODE_MASK    0xC
+#define    RTL8367C_PORT8_LED1_MODE_OFFSET    0
+#define    RTL8367C_PORT8_LED1_MODE_MASK    0x3
+
+#define    RTL8367C_REG_CPU_FORCE_LED2_CFG0    0x1b0c
+#define    RTL8367C_PORT7_LED2_MODE_OFFSET    14
+#define    RTL8367C_PORT7_LED2_MODE_MASK    0xC000
+#define    RTL8367C_PORT6_LED2_MODE_OFFSET    12
+#define    RTL8367C_PORT6_LED2_MODE_MASK    0x3000
+#define    RTL8367C_PORT5_LED2_MODE_OFFSET    10
+#define    RTL8367C_PORT5_LED2_MODE_MASK    0xC00
+#define    RTL8367C_PORT4_LED2_MODE_OFFSET    8
+#define    RTL8367C_PORT4_LED2_MODE_MASK    0x300
+#define    RTL8367C_PORT3_LED2_MODE_OFFSET    6
+#define    RTL8367C_PORT3_LED2_MODE_MASK    0xC0
+#define    RTL8367C_PORT2_LED2_MODE_OFFSET    4
+#define    RTL8367C_PORT2_LED2_MODE_MASK    0x30
+#define    RTL8367C_PORT1_LED2_MODE_OFFSET    2
+#define    RTL8367C_PORT1_LED2_MODE_MASK    0xC
+#define    RTL8367C_PORT0_LED2_MODE_OFFSET    0
+#define    RTL8367C_PORT0_LED2_MODE_MASK    0x3
+
+#define    RTL8367C_REG_CPU_FORCE_LED2_CFG1    0x1b0d
+#define    RTL8367C_DUMMY_1b0da_OFFSET    4
+#define    RTL8367C_DUMMY_1b0da_MASK    0xFFF0
+#define    RTL8367C_PORT9_LED2_MODE_OFFSET    2
+#define    RTL8367C_PORT9_LED2_MODE_MASK    0xC
+#define    RTL8367C_PORT8_LED2_MODE_OFFSET    0
+#define    RTL8367C_PORT8_LED2_MODE_MASK    0x3
+
+#define    RTL8367C_REG_LED_ACTIVE_LOW_CFG0    0x1b0e
+#define    RTL8367C_LED_ACTIVE_LOW_CFG0_DUMMY_15_OFFSET    15
+#define    RTL8367C_LED_ACTIVE_LOW_CFG0_DUMMY_15_MASK    0x8000
+#define    RTL8367C_PORT3_LED_ACTIVE_LOW_OFFSET    12
+#define    RTL8367C_PORT3_LED_ACTIVE_LOW_MASK    0x7000
+#define    RTL8367C_LED_ACTIVE_LOW_CFG0_DUMMY_11_OFFSET    11
+#define    RTL8367C_LED_ACTIVE_LOW_CFG0_DUMMY_11_MASK    0x800
+#define    RTL8367C_PORT2_LED_ACTIVE_LOW_OFFSET    8
+#define    RTL8367C_PORT2_LED_ACTIVE_LOW_MASK    0x700
+#define    RTL8367C_DUMMY_7_OFFSET    7
+#define    RTL8367C_DUMMY_7_MASK    0x80
+#define    RTL8367C_PORT1_LED_ACTIVE_LOW_OFFSET    4
+#define    RTL8367C_PORT1_LED_ACTIVE_LOW_MASK    0x70
+#define    RTL8367C_DUMMY_3_OFFSET    3
+#define    RTL8367C_DUMMY_3_MASK    0x8
+#define    RTL8367C_PORT0_LED_ACTIVE_LOW_OFFSET    0
+#define    RTL8367C_PORT0_LED_ACTIVE_LOW_MASK    0x7
+
+#define    RTL8367C_REG_LED_ACTIVE_LOW_CFG1    0x1b0f
+#define    RTL8367C_LED_ACTIVE_LOW_CFG1_DUMMY_15_OFFSET    15
+#define    RTL8367C_LED_ACTIVE_LOW_CFG1_DUMMY_15_MASK    0x8000
+#define    RTL8367C_PORT7_LED_ACTIVE_LOW_OFFSET    12
+#define    RTL8367C_PORT7_LED_ACTIVE_LOW_MASK    0x7000
+#define    RTL8367C_LED_ACTIVE_LOW_CFG1_DUMMY_11_OFFSET    11
+#define    RTL8367C_LED_ACTIVE_LOW_CFG1_DUMMY_11_MASK    0x800
+#define    RTL8367C_PORT6_LED_ACTIVE_LOW_OFFSET    8
+#define    RTL8367C_PORT6_LED_ACTIVE_LOW_MASK    0x700
+#define    RTL8367C_DUMMY_1b0f_b_OFFSET    7
+#define    RTL8367C_DUMMY_1b0f_b_MASK    0x80
+#define    RTL8367C_PORT5_LED_ACTIVE_LOW_OFFSET    4
+#define    RTL8367C_PORT5_LED_ACTIVE_LOW_MASK    0x70
+#define    RTL8367C_DUMMY_1b0f_a_OFFSET    3
+#define    RTL8367C_DUMMY_1b0f_a_MASK    0x8
+#define    RTL8367C_PORT4_LED_ACTIVE_LOW_OFFSET    0
+#define    RTL8367C_PORT4_LED_ACTIVE_LOW_MASK    0x7
+
+#define    RTL8367C_REG_LED_ACTIVE_LOW_CFG2    0x1b10
+#define    RTL8367C_DUMMY_1b10_b_OFFSET    7
+#define    RTL8367C_DUMMY_1b10_b_MASK    0xFF80
+#define    RTL8367C_PORT9_LED_ACTIVE_LOW_OFFSET    4
+#define    RTL8367C_PORT9_LED_ACTIVE_LOW_MASK    0x70
+#define    RTL8367C_DUMMY_1b10_a_OFFSET    3
+#define    RTL8367C_DUMMY_1b10_a_MASK    0x8
+#define    RTL8367C_PORT8_LED_ACTIVE_LOW_OFFSET    0
+#define    RTL8367C_PORT8_LED_ACTIVE_LOW_MASK    0x7
+
+#define    RTL8367C_REG_SEL_RTCT_PARA    0x1b21
+#define    RTL8367C_DO_RTCT_COMMAND_OFFSET    15
+#define    RTL8367C_DO_RTCT_COMMAND_MASK    0x8000
+#define    RTL8367C_SEL_RTCT_PARA_DUMMY_OFFSET    12
+#define    RTL8367C_SEL_RTCT_PARA_DUMMY_MASK    0x7000
+#define    RTL8367C_SEL_RTCT_RLSTLED_TIME_OFFSET    10
+#define    RTL8367C_SEL_RTCT_RLSTLED_TIME_MASK    0xC00
+#define    RTL8367C_SEL_RTCT_TEST_LED_TIME_OFFSET    8
+#define    RTL8367C_SEL_RTCT_TEST_LED_TIME_MASK    0x300
+#define    RTL8367C_EN_SCAN_RTCT_OFFSET    7
+#define    RTL8367C_EN_SCAN_RTCT_MASK    0x80
+#define    RTL8367C_EN_RTCT_TIMOUT_OFFSET    6
+#define    RTL8367C_EN_RTCT_TIMOUT_MASK    0x40
+#define    RTL8367C_EN_ALL_RTCT_OFFSET    5
+#define    RTL8367C_EN_ALL_RTCT_MASK    0x20
+#define    RTL8367C_SEL_RTCT_PLE_WID_OFFSET    0
+#define    RTL8367C_SEL_RTCT_PLE_WID_MASK    0x1F
+
+#define    RTL8367C_REG_RTCT_ENABLE    0x1b22
+#define    RTL8367C_RTCT_ENABLE_DUMMY_OFFSET    8
+#define    RTL8367C_RTCT_ENABLE_DUMMY_MASK    0xFF00
+#define    RTL8367C_RTCT_ENABLE_PORT_MASK_OFFSET    0
+#define    RTL8367C_RTCT_ENABLE_PORT_MASK_MASK    0xFF
+
+#define    RTL8367C_REG_RTCT_TIMEOUT    0x1b23
+
+#define    RTL8367C_REG_PARA_LED_IO_EN1    0x1b24
+#define    RTL8367C_LED1_PARA_P07_00_OFFSET    8
+#define    RTL8367C_LED1_PARA_P07_00_MASK    0xFF00
+#define    RTL8367C_LED0_PARA_P07_00_OFFSET    0
+#define    RTL8367C_LED0_PARA_P07_00_MASK    0xFF
+
+#define    RTL8367C_REG_PARA_LED_IO_EN2    0x1b25
+#define    RTL8367C_DUMMY_15_8_OFFSET    8
+#define    RTL8367C_DUMMY_15_8_MASK    0xFF00
+#define    RTL8367C_LED2_PARA_P07_00_OFFSET    0
+#define    RTL8367C_LED2_PARA_P07_00_MASK    0xFF
+
+#define    RTL8367C_REG_SCAN0_LED_IO_EN1    0x1b26
+#define    RTL8367C_SCAN0_LED_IO_EN1_DUMMY_OFFSET    3
+#define    RTL8367C_SCAN0_LED_IO_EN1_DUMMY_MASK    0xFFF8
+#define    RTL8367C_LED_LOOP_DET_BUZZER_EN_OFFSET    2
+#define    RTL8367C_LED_LOOP_DET_BUZZER_EN_MASK    0x4
+#define    RTL8367C_LED_SERI_DATA_EN_OFFSET    1
+#define    RTL8367C_LED_SERI_DATA_EN_MASK    0x2
+#define    RTL8367C_LED_SERI_CLK_EN_OFFSET    0
+#define    RTL8367C_LED_SERI_CLK_EN_MASK    0x1
+
+#define    RTL8367C_REG_SCAN1_LED_IO_EN2    0x1b27
+#define    RTL8367C_LED_SCAN1_BI_PORT_EN_OFFSET    8
+#define    RTL8367C_LED_SCAN1_BI_PORT_EN_MASK    0xFF00
+#define    RTL8367C_LED_SCAN1_BI_STA_EN_OFFSET    7
+#define    RTL8367C_LED_SCAN1_BI_STA_EN_MASK    0x80
+#define    RTL8367C_SCAN1_LED_IO_EN2_DUMMY_0_OFFSET    6
+#define    RTL8367C_SCAN1_LED_IO_EN2_DUMMY_0_MASK    0x40
+#define    RTL8367C_LED_SCAN1_SI_PORT_EN_OFFSET    2
+#define    RTL8367C_LED_SCAN1_SI_PORT_EN_MASK    0x3C
+#define    RTL8367C_LED_SCAN1_SI_STA_EN_OFFSET    0
+#define    RTL8367C_LED_SCAN1_SI_STA_EN_MASK    0x3
+
+#define    RTL8367C_REG_LPI_LED_OPT1    0x1b28
+#define    RTL8367C_LPI_TAG4_OFFSET    12
+#define    RTL8367C_LPI_TAG4_MASK    0xF000
+#define    RTL8367C_LPI_TAG3_OFFSET    8
+#define    RTL8367C_LPI_TAG3_MASK    0xF00
+#define    RTL8367C_LPI_TAG2_OFFSET    4
+#define    RTL8367C_LPI_TAG2_MASK    0xF0
+#define    RTL8367C_LPI_TAG1_OFFSET    0
+#define    RTL8367C_LPI_TAG1_MASK    0xF
+
+#define    RTL8367C_REG_LPI_LED_OPT2    0x1b29
+#define    RTL8367C_LPI_LED_OPT2_DUMMY_OFFSET    15
+#define    RTL8367C_LPI_LED_OPT2_DUMMY_MASK    0x8000
+#define    RTL8367C_LPI_LED2_WEAK_OFFSET    14
+#define    RTL8367C_LPI_LED2_WEAK_MASK    0x4000
+#define    RTL8367C_LPI_LED1_WEAK_OFFSET    13
+#define    RTL8367C_LPI_LED1_WEAK_MASK    0x2000
+#define    RTL8367C_LPI_LED0_WEAK_OFFSET    12
+#define    RTL8367C_LPI_LED0_WEAK_MASK    0x1000
+#define    RTL8367C_LPI_LED2_OFFSET    11
+#define    RTL8367C_LPI_LED2_MASK    0x800
+#define    RTL8367C_LPI_LED1_OFFSET    10
+#define    RTL8367C_LPI_LED1_MASK    0x400
+#define    RTL8367C_LPI_LED0_OFFSET    9
+#define    RTL8367C_LPI_LED0_MASK    0x200
+#define    RTL8367C_LPI_TAG8_OFFSET    8
+#define    RTL8367C_LPI_TAG8_MASK    0x100
+#define    RTL8367C_LPI_TAG7_OFFSET    6
+#define    RTL8367C_LPI_TAG7_MASK    0xC0
+#define    RTL8367C_LPI_TAG6_OFFSET    4
+#define    RTL8367C_LPI_TAG6_MASK    0x30
+#define    RTL8367C_LPI_TAG5_OFFSET    0
+#define    RTL8367C_LPI_TAG5_MASK    0xF
+
+#define    RTL8367C_REG_LPI_LED_OPT3    0x1b2a
+#define    RTL8367C_LPI_LED_OPT3_DUMMY_OFFSET    3
+#define    RTL8367C_LPI_LED_OPT3_DUMMY_MASK    0xFFF8
+#define    RTL8367C_RESTORE_LED_RATE_SEL_OFFSET    1
+#define    RTL8367C_RESTORE_LED_RATE_SEL_MASK    0x6
+#define    RTL8367C_RESTORE_LED_SEL_OFFSET    0
+#define    RTL8367C_RESTORE_LED_SEL_MASK    0x1
+
+#define    RTL8367C_REG_P0_LED_MUX    0x1b2b
+#define    RTL8367C_CFG_P0_LED2_MUX_OFFSET    10
+#define    RTL8367C_CFG_P0_LED2_MUX_MASK    0x7C00
+#define    RTL8367C_CFG_P0_LED1_MUX_OFFSET    5
+#define    RTL8367C_CFG_P0_LED1_MUX_MASK    0x3E0
+#define    RTL8367C_CFG_P0_LED0_MUX_OFFSET    0
+#define    RTL8367C_CFG_P0_LED0_MUX_MASK    0x1F
+
+#define    RTL8367C_REG_P1_LED_MUX    0x1b2c
+#define    RTL8367C_CFG_P1_LED2_MUX_OFFSET    10
+#define    RTL8367C_CFG_P1_LED2_MUX_MASK    0x7C00
+#define    RTL8367C_CFG_P1_LED1_MUX_OFFSET    5
+#define    RTL8367C_CFG_P1_LED1_MUX_MASK    0x3E0
+#define    RTL8367C_CFG_P1_LED0_MUX_OFFSET    0
+#define    RTL8367C_CFG_P1_LED0_MUX_MASK    0x1F
+
+#define    RTL8367C_REG_P2_LED_MUX    0x1b2d
+#define    RTL8367C_CFG_P2_LED2_MUX_OFFSET    10
+#define    RTL8367C_CFG_P2_LED2_MUX_MASK    0x7C00
+#define    RTL8367C_CFG_P2_LED1_MUX_OFFSET    5
+#define    RTL8367C_CFG_P2_LED1_MUX_MASK    0x3E0
+#define    RTL8367C_CFG_P2_LED0_MUX_OFFSET    0
+#define    RTL8367C_CFG_P2_LED0_MUX_MASK    0x1F
+
+#define    RTL8367C_REG_P3_LED_MUX    0x1b2e
+#define    RTL8367C_CFG_P3_LED2_MUX_OFFSET    10
+#define    RTL8367C_CFG_P3_LED2_MUX_MASK    0x7C00
+#define    RTL8367C_CFG_P3_LED1_MUX_OFFSET    5
+#define    RTL8367C_CFG_P3_LED1_MUX_MASK    0x3E0
+#define    RTL8367C_CFG_P3_LED0_MUX_OFFSET    0
+#define    RTL8367C_CFG_P3_LED0_MUX_MASK    0x1F
+
+#define    RTL8367C_REG_P4_LED_MUX    0x1b2f
+#define    RTL8367C_CFG_P4_LED2_MUX_OFFSET    10
+#define    RTL8367C_CFG_P4_LED2_MUX_MASK    0x7C00
+#define    RTL8367C_CFG_P4_LED1_MUX_OFFSET    5
+#define    RTL8367C_CFG_P4_LED1_MUX_MASK    0x3E0
+#define    RTL8367C_CFG_P4_LED0_MUX_OFFSET    0
+#define    RTL8367C_CFG_P4_LED0_MUX_MASK    0x1F
+
+#define    RTL8367C_REG_LED0_DATA_CTRL    0x1b30
+#define    RTL8367C_CFG_DATA_LED0_SEL_OFFSET    6
+#define    RTL8367C_CFG_DATA_LED0_SEL_MASK    0x40
+#define    RTL8367C_CFG_DATA_LED0_ACT_OFFSET    4
+#define    RTL8367C_CFG_DATA_LED0_ACT_MASK    0x30
+#define    RTL8367C_CFG_DATA_LED0_SPD_OFFSET    0
+#define    RTL8367C_CFG_DATA_LED0_SPD_MASK    0xF
+
+#define    RTL8367C_REG_LED1_DATA_CTRL    0x1b31
+#define    RTL8367C_CFG_DATA_LED1_SEL_OFFSET    6
+#define    RTL8367C_CFG_DATA_LED1_SEL_MASK    0x40
+#define    RTL8367C_CFG_DATA_LED1_ACT_OFFSET    4
+#define    RTL8367C_CFG_DATA_LED1_ACT_MASK    0x30
+#define    RTL8367C_CFG_DATA_LED1_SPD_OFFSET    0
+#define    RTL8367C_CFG_DATA_LED1_SPD_MASK    0xF
+
+#define    RTL8367C_REG_LED2_DATA_CTRL    0x1b32
+#define    RTL8367C_CFG_DATA_LED2_SEL_OFFSET    6
+#define    RTL8367C_CFG_DATA_LED2_SEL_MASK    0x40
+#define    RTL8367C_CFG_DATA_LED2_ACT_OFFSET    4
+#define    RTL8367C_CFG_DATA_LED2_ACT_MASK    0x30
+#define    RTL8367C_CFG_DATA_LED2_SPD_OFFSET    0
+#define    RTL8367C_CFG_DATA_LED2_SPD_MASK    0xF
+
+#define    RTL8367C_REG_PARA_LED_IO_EN3    0x1b33
+#define    RTL8367C_dummy_1b33a_OFFSET    6
+#define    RTL8367C_dummy_1b33a_MASK    0xFFC0
+#define    RTL8367C_LED2_PARA_P09_08_OFFSET    4
+#define    RTL8367C_LED2_PARA_P09_08_MASK    0x30
+#define    RTL8367C_LED1_PARA_P09_08_OFFSET    2
+#define    RTL8367C_LED1_PARA_P09_08_MASK    0xC
+#define    RTL8367C_LED0_PARA_P09_08_OFFSET    0
+#define    RTL8367C_LED0_PARA_P09_08_MASK    0x3
+
+#define    RTL8367C_REG_SCAN1_LED_IO_EN3    0x1b34
+#define    RTL8367C_dummy_1b34a_OFFSET    3
+#define    RTL8367C_dummy_1b34a_MASK    0xFFF8
+#define    RTL8367C_LED_SCAN1_BI_PORT9_8_EN_OFFSET    1
+#define    RTL8367C_LED_SCAN1_BI_PORT9_8_EN_MASK    0x6
+#define    RTL8367C_LED_SCAN1_SI_PORT9_8_EN_OFFSET    0
+#define    RTL8367C_LED_SCAN1_SI_PORT9_8_EN_MASK    0x1
+
+#define    RTL8367C_REG_P5_LED_MUX    0x1b35
+#define    RTL8367C_CFG_P5_LED2_MUX_OFFSET    10
+#define    RTL8367C_CFG_P5_LED2_MUX_MASK    0x7C00
+#define    RTL8367C_CFG_P5_LED1_MUX_OFFSET    5
+#define    RTL8367C_CFG_P5_LED1_MUX_MASK    0x3E0
+#define    RTL8367C_CFG_P5_LED0_MUX_OFFSET    0
+#define    RTL8367C_CFG_P5_LED0_MUX_MASK    0x1F
+
+#define    RTL8367C_REG_P6_LED_MUX    0x1b36
+#define    RTL8367C_CFG_P6_LED2_MUX_OFFSET    10
+#define    RTL8367C_CFG_P6_LED2_MUX_MASK    0x7C00
+#define    RTL8367C_CFG_P6_LED1_MUX_OFFSET    5
+#define    RTL8367C_CFG_P6_LED1_MUX_MASK    0x3E0
+#define    RTL8367C_CFG_P6_LED0_MUX_OFFSET    0
+#define    RTL8367C_CFG_P6_LED0_MUX_MASK    0x1F
+
+#define    RTL8367C_REG_P7_LED_MUX    0x1b37
+#define    RTL8367C_CFG_P7_LED2_MUX_OFFSET    10
+#define    RTL8367C_CFG_P7_LED2_MUX_MASK    0x7C00
+#define    RTL8367C_CFG_P7_LED1_MUX_OFFSET    5
+#define    RTL8367C_CFG_P7_LED1_MUX_MASK    0x3E0
+#define    RTL8367C_CFG_P7_LED0_MUX_OFFSET    0
+#define    RTL8367C_CFG_P7_LED0_MUX_MASK    0x1F
+
+#define    RTL8367C_REG_P8_LED_MUX    0x1b38
+#define    RTL8367C_CFG_P8_LED2_MUX_OFFSET    10
+#define    RTL8367C_CFG_P8_LED2_MUX_MASK    0x7C00
+#define    RTL8367C_CFG_P8_LED1_MUX_OFFSET    5
+#define    RTL8367C_CFG_P8_LED1_MUX_MASK    0x3E0
+#define    RTL8367C_CFG_P8_LED0_MUX_OFFSET    0
+#define    RTL8367C_CFG_P8_LED0_MUX_MASK    0x1F
+
+#define    RTL8367C_REG_P9_LED_MUX    0x1b39
+#define    RTL8367C_CFG_P9_LED2_MUX_OFFSET    10
+#define    RTL8367C_CFG_P9_LED2_MUX_MASK    0x7C00
+#define    RTL8367C_CFG_P9_LED1_MUX_OFFSET    5
+#define    RTL8367C_CFG_P9_LED1_MUX_MASK    0x3E0
+#define    RTL8367C_CFG_P9_LED0_MUX_OFFSET    0
+#define    RTL8367C_CFG_P9_LED0_MUX_MASK    0x1F
+
+#define    RTL8367C_REG_SERIAL_LED_CTRL    0x1b3a
+#define    RTL8367C_SERIAL_LED_SHIFT_SEQUENCE_OFFSET    13
+#define    RTL8367C_SERIAL_LED_SHIFT_SEQUENCE_MASK    0x6000
+#define    RTL8367C_SERIAL_LED_SHIFT_SEQUENCE_EN_OFFSET    12
+#define    RTL8367C_SERIAL_LED_SHIFT_SEQUENCE_EN_MASK    0x1000
+#define    RTL8367C_SERIAL_LED_GROUP_NUM_OFFSET    10
+#define    RTL8367C_SERIAL_LED_GROUP_NUM_MASK    0xC00
+#define    RTL8367C_SERIAL_LED_PORT_EN_OFFSET    0
+#define    RTL8367C_SERIAL_LED_PORT_EN_MASK    0x3FF
+
+/* (16'h1c00)IGMP_EAV */
+
+#define    RTL8367C_REG_IGMP_MLD_CFG0    0x1c00
+#define    RTL8367C_IGMP_MLD_PORTISO_LEAKY_OFFSET    15
+#define    RTL8367C_IGMP_MLD_PORTISO_LEAKY_MASK    0x8000
+#define    RTL8367C_IGMP_MLD_VLAN_LEAKY_OFFSET    14
+#define    RTL8367C_IGMP_MLD_VLAN_LEAKY_MASK    0x4000
+#define    RTL8367C_IGMP_MLD_DISCARD_STORM_FILTER_OFFSET    13
+#define    RTL8367C_IGMP_MLD_DISCARD_STORM_FILTER_MASK    0x2000
+#define    RTL8367C_REPORT_FORWARD_OFFSET    12
+#define    RTL8367C_REPORT_FORWARD_MASK    0x1000
+#define    RTL8367C_ROBURSTNESS_VAR_OFFSET    9
+#define    RTL8367C_ROBURSTNESS_VAR_MASK    0xE00
+#define    RTL8367C_LEAVE_SUPPRESSION_OFFSET    8
+#define    RTL8367C_LEAVE_SUPPRESSION_MASK    0x100
+#define    RTL8367C_REPORT_SUPPRESSION_OFFSET    7
+#define    RTL8367C_REPORT_SUPPRESSION_MASK    0x80
+#define    RTL8367C_LEAVE_TIMER_OFFSET    4
+#define    RTL8367C_LEAVE_TIMER_MASK    0x70
+#define    RTL8367C_FAST_LEAVE_EN_OFFSET    3
+#define    RTL8367C_FAST_LEAVE_EN_MASK    0x8
+#define    RTL8367C_CKS_ERR_OP_OFFSET    1
+#define    RTL8367C_CKS_ERR_OP_MASK    0x6
+#define    RTL8367C_IGMP_MLD_EN_OFFSET    0
+#define    RTL8367C_IGMP_MLD_EN_MASK    0x1
+
+#define    RTL8367C_REG_IGMP_MLD_CFG1    0x1c01
+#define    RTL8367C_DROP_LEAVE_ZERO_OFFSET    2
+#define    RTL8367C_DROP_LEAVE_ZERO_MASK    0x4
+#define    RTL8367C_TABLE_FULL_OP_OFFSET    0
+#define    RTL8367C_TABLE_FULL_OP_MASK    0x3
+
+#define    RTL8367C_REG_IGMP_MLD_CFG2    0x1c02
+
+#define    RTL8367C_REG_IGMP_DYNAMIC_ROUTER_PORT    0x1c03
+#define    RTL8367C_D_ROUTER_PORT_2_OFFSET    11
+#define    RTL8367C_D_ROUTER_PORT_2_MASK    0x7800
+#define    RTL8367C_D_ROUTER_PORT_TMR_2_OFFSET    8
+#define    RTL8367C_D_ROUTER_PORT_TMR_2_MASK    0x700
+#define    RTL8367C_D_ROUTER_PORT_1_OFFSET    3
+#define    RTL8367C_D_ROUTER_PORT_1_MASK    0x78
+#define    RTL8367C_D_ROUTER_PORT_TMR_1_OFFSET    0
+#define    RTL8367C_D_ROUTER_PORT_TMR_1_MASK    0x7
+
+#define    RTL8367C_REG_IGMP_STATIC_ROUTER_PORT    0x1c04
+#define    RTL8367C_IGMP_STATIC_ROUTER_PORT_OFFSET    0
+#define    RTL8367C_IGMP_STATIC_ROUTER_PORT_MASK    0x7FF
+
+#define    RTL8367C_REG_IGMP_PORT0_CONTROL    0x1c05
+#define    RTL8367C_IGMP_PORT0_CONTROL_ALLOW_QUERY_OFFSET    14
+#define    RTL8367C_IGMP_PORT0_CONTROL_ALLOW_QUERY_MASK    0x4000
+#define    RTL8367C_IGMP_PORT0_CONTROL_ALLOW_REPORT_OFFSET    13
+#define    RTL8367C_IGMP_PORT0_CONTROL_ALLOW_REPORT_MASK    0x2000
+#define    RTL8367C_IGMP_PORT0_CONTROL_ALLOW_LEAVE_OFFSET    12
+#define    RTL8367C_IGMP_PORT0_CONTROL_ALLOW_LEAVE_MASK    0x1000
+#define    RTL8367C_IGMP_PORT0_CONTROL_ALLOW_MRP_OFFSET    11
+#define    RTL8367C_IGMP_PORT0_CONTROL_ALLOW_MRP_MASK    0x800
+#define    RTL8367C_IGMP_PORT0_CONTROL_ALLOW_MC_DATA_OFFSET    10
+#define    RTL8367C_IGMP_PORT0_CONTROL_ALLOW_MC_DATA_MASK    0x400
+#define    RTL8367C_IGMP_PORT0_CONTROL_MLDv2_OP_OFFSET    8
+#define    RTL8367C_IGMP_PORT0_CONTROL_MLDv2_OP_MASK    0x300
+#define    RTL8367C_IGMP_PORT0_CONTROL_MLDv1_OP_OFFSET    6
+#define    RTL8367C_IGMP_PORT0_CONTROL_MLDv1_OP_MASK    0xC0
+#define    RTL8367C_IGMP_PORT0_CONTROL_IGMPV3_OP_OFFSET    4
+#define    RTL8367C_IGMP_PORT0_CONTROL_IGMPV3_OP_MASK    0x30
+#define    RTL8367C_IGMP_PORT0_CONTROL_IGMPV2_OP_OFFSET    2
+#define    RTL8367C_IGMP_PORT0_CONTROL_IGMPV2_OP_MASK    0xC
+#define    RTL8367C_IGMP_PORT0_CONTROL_IGMPV1_OP_OFFSET    0
+#define    RTL8367C_IGMP_PORT0_CONTROL_IGMPV1_OP_MASK    0x3
+
+#define    RTL8367C_REG_IGMP_PORT1_CONTROL    0x1c06
+#define    RTL8367C_IGMP_PORT1_CONTROL_ALLOW_QUERY_OFFSET    14
+#define    RTL8367C_IGMP_PORT1_CONTROL_ALLOW_QUERY_MASK    0x4000
+#define    RTL8367C_IGMP_PORT1_CONTROL_ALLOW_REPORT_OFFSET    13
+#define    RTL8367C_IGMP_PORT1_CONTROL_ALLOW_REPORT_MASK    0x2000
+#define    RTL8367C_IGMP_PORT1_CONTROL_ALLOW_LEAVE_OFFSET    12
+#define    RTL8367C_IGMP_PORT1_CONTROL_ALLOW_LEAVE_MASK    0x1000
+#define    RTL8367C_IGMP_PORT1_CONTROL_ALLOW_MRP_OFFSET    11
+#define    RTL8367C_IGMP_PORT1_CONTROL_ALLOW_MRP_MASK    0x800
+#define    RTL8367C_IGMP_PORT1_CONTROL_ALLOW_MC_DATA_OFFSET    10
+#define    RTL8367C_IGMP_PORT1_CONTROL_ALLOW_MC_DATA_MASK    0x400
+#define    RTL8367C_IGMP_PORT1_CONTROL_MLDv2_OP_OFFSET    8
+#define    RTL8367C_IGMP_PORT1_CONTROL_MLDv2_OP_MASK    0x300
+#define    RTL8367C_IGMP_PORT1_CONTROL_MLDv1_OP_OFFSET    6
+#define    RTL8367C_IGMP_PORT1_CONTROL_MLDv1_OP_MASK    0xC0
+#define    RTL8367C_IGMP_PORT1_CONTROL_IGMPV3_OP_OFFSET    4
+#define    RTL8367C_IGMP_PORT1_CONTROL_IGMPV3_OP_MASK    0x30
+#define    RTL8367C_IGMP_PORT1_CONTROL_IGMPV2_OP_OFFSET    2
+#define    RTL8367C_IGMP_PORT1_CONTROL_IGMPV2_OP_MASK    0xC
+#define    RTL8367C_IGMP_PORT1_CONTROL_IGMPV1_OP_OFFSET    0
+#define    RTL8367C_IGMP_PORT1_CONTROL_IGMPV1_OP_MASK    0x3
+
+#define    RTL8367C_REG_IGMP_PORT2_CONTROL    0x1c07
+#define    RTL8367C_IGMP_PORT2_CONTROL_ALLOW_QUERY_OFFSET    14
+#define    RTL8367C_IGMP_PORT2_CONTROL_ALLOW_QUERY_MASK    0x4000
+#define    RTL8367C_IGMP_PORT2_CONTROL_ALLOW_REPORT_OFFSET    13
+#define    RTL8367C_IGMP_PORT2_CONTROL_ALLOW_REPORT_MASK    0x2000
+#define    RTL8367C_IGMP_PORT2_CONTROL_ALLOW_LEAVE_OFFSET    12
+#define    RTL8367C_IGMP_PORT2_CONTROL_ALLOW_LEAVE_MASK    0x1000
+#define    RTL8367C_IGMP_PORT2_CONTROL_ALLOW_MRP_OFFSET    11
+#define    RTL8367C_IGMP_PORT2_CONTROL_ALLOW_MRP_MASK    0x800
+#define    RTL8367C_IGMP_PORT2_CONTROL_ALLOW_MC_DATA_OFFSET    10
+#define    RTL8367C_IGMP_PORT2_CONTROL_ALLOW_MC_DATA_MASK    0x400
+#define    RTL8367C_IGMP_PORT2_CONTROL_MLDv2_OP_OFFSET    8
+#define    RTL8367C_IGMP_PORT2_CONTROL_MLDv2_OP_MASK    0x300
+#define    RTL8367C_IGMP_PORT2_CONTROL_MLDv1_OP_OFFSET    6
+#define    RTL8367C_IGMP_PORT2_CONTROL_MLDv1_OP_MASK    0xC0
+#define    RTL8367C_IGMP_PORT2_CONTROL_IGMPV3_OP_OFFSET    4
+#define    RTL8367C_IGMP_PORT2_CONTROL_IGMPV3_OP_MASK    0x30
+#define    RTL8367C_IGMP_PORT2_CONTROL_IGMPV2_OP_OFFSET    2
+#define    RTL8367C_IGMP_PORT2_CONTROL_IGMPV2_OP_MASK    0xC
+#define    RTL8367C_IGMP_PORT2_CONTROL_IGMPV1_OP_OFFSET    0
+#define    RTL8367C_IGMP_PORT2_CONTROL_IGMPV1_OP_MASK    0x3
+
+#define    RTL8367C_REG_IGMP_PORT3_CONTROL    0x1c08
+#define    RTL8367C_IGMP_PORT3_CONTROL_ALLOW_QUERY_OFFSET    14
+#define    RTL8367C_IGMP_PORT3_CONTROL_ALLOW_QUERY_MASK    0x4000
+#define    RTL8367C_IGMP_PORT3_CONTROL_ALLOW_REPORT_OFFSET    13
+#define    RTL8367C_IGMP_PORT3_CONTROL_ALLOW_REPORT_MASK    0x2000
+#define    RTL8367C_IGMP_PORT3_CONTROL_ALLOW_LEAVE_OFFSET    12
+#define    RTL8367C_IGMP_PORT3_CONTROL_ALLOW_LEAVE_MASK    0x1000
+#define    RTL8367C_IGMP_PORT3_CONTROL_ALLOW_MRP_OFFSET    11
+#define    RTL8367C_IGMP_PORT3_CONTROL_ALLOW_MRP_MASK    0x800
+#define    RTL8367C_IGMP_PORT3_CONTROL_ALLOW_MC_DATA_OFFSET    10
+#define    RTL8367C_IGMP_PORT3_CONTROL_ALLOW_MC_DATA_MASK    0x400
+#define    RTL8367C_IGMP_PORT3_CONTROL_MLDv2_OP_OFFSET    8
+#define    RTL8367C_IGMP_PORT3_CONTROL_MLDv2_OP_MASK    0x300
+#define    RTL8367C_IGMP_PORT3_CONTROL_MLDv1_OP_OFFSET    6
+#define    RTL8367C_IGMP_PORT3_CONTROL_MLDv1_OP_MASK    0xC0
+#define    RTL8367C_IGMP_PORT3_CONTROL_IGMPV3_OP_OFFSET    4
+#define    RTL8367C_IGMP_PORT3_CONTROL_IGMPV3_OP_MASK    0x30
+#define    RTL8367C_IGMP_PORT3_CONTROL_IGMPV2_OP_OFFSET    2
+#define    RTL8367C_IGMP_PORT3_CONTROL_IGMPV2_OP_MASK    0xC
+#define    RTL8367C_IGMP_PORT3_CONTROL_IGMPV1_OP_OFFSET    0
+#define    RTL8367C_IGMP_PORT3_CONTROL_IGMPV1_OP_MASK    0x3
+
+#define    RTL8367C_REG_IGMP_PORT4_CONTROL    0x1c09
+#define    RTL8367C_IGMP_PORT4_CONTROL_ALLOW_QUERY_OFFSET    14
+#define    RTL8367C_IGMP_PORT4_CONTROL_ALLOW_QUERY_MASK    0x4000
+#define    RTL8367C_IGMP_PORT4_CONTROL_ALLOW_REPORT_OFFSET    13
+#define    RTL8367C_IGMP_PORT4_CONTROL_ALLOW_REPORT_MASK    0x2000
+#define    RTL8367C_IGMP_PORT4_CONTROL_ALLOW_LEAVE_OFFSET    12
+#define    RTL8367C_IGMP_PORT4_CONTROL_ALLOW_LEAVE_MASK    0x1000
+#define    RTL8367C_IGMP_PORT4_CONTROL_ALLOW_MRP_OFFSET    11
+#define    RTL8367C_IGMP_PORT4_CONTROL_ALLOW_MRP_MASK    0x800
+#define    RTL8367C_IGMP_PORT4_CONTROL_ALLOW_MC_DATA_OFFSET    10
+#define    RTL8367C_IGMP_PORT4_CONTROL_ALLOW_MC_DATA_MASK    0x400
+#define    RTL8367C_IGMP_PORT4_CONTROL_MLDv2_OP_OFFSET    8
+#define    RTL8367C_IGMP_PORT4_CONTROL_MLDv2_OP_MASK    0x300
+#define    RTL8367C_IGMP_PORT4_CONTROL_MLDv1_OP_OFFSET    6
+#define    RTL8367C_IGMP_PORT4_CONTROL_MLDv1_OP_MASK    0xC0
+#define    RTL8367C_IGMP_PORT4_CONTROL_IGMPV3_OP_OFFSET    4
+#define    RTL8367C_IGMP_PORT4_CONTROL_IGMPV3_OP_MASK    0x30
+#define    RTL8367C_IGMP_PORT4_CONTROL_IGMPV2_OP_OFFSET    2
+#define    RTL8367C_IGMP_PORT4_CONTROL_IGMPV2_OP_MASK    0xC
+#define    RTL8367C_IGMP_PORT4_CONTROL_IGMPV1_OP_OFFSET    0
+#define    RTL8367C_IGMP_PORT4_CONTROL_IGMPV1_OP_MASK    0x3
+
+#define    RTL8367C_REG_IGMP_PORT5_CONTROL    0x1c0a
+#define    RTL8367C_IGMP_PORT5_CONTROL_ALLOW_QUERY_OFFSET    14
+#define    RTL8367C_IGMP_PORT5_CONTROL_ALLOW_QUERY_MASK    0x4000
+#define    RTL8367C_IGMP_PORT5_CONTROL_ALLOW_REPORT_OFFSET    13
+#define    RTL8367C_IGMP_PORT5_CONTROL_ALLOW_REPORT_MASK    0x2000
+#define    RTL8367C_IGMP_PORT5_CONTROL_ALLOW_LEAVE_OFFSET    12
+#define    RTL8367C_IGMP_PORT5_CONTROL_ALLOW_LEAVE_MASK    0x1000
+#define    RTL8367C_IGMP_PORT5_CONTROL_ALLOW_MRP_OFFSET    11
+#define    RTL8367C_IGMP_PORT5_CONTROL_ALLOW_MRP_MASK    0x800
+#define    RTL8367C_IGMP_PORT5_CONTROL_ALLOW_MC_DATA_OFFSET    10
+#define    RTL8367C_IGMP_PORT5_CONTROL_ALLOW_MC_DATA_MASK    0x400
+#define    RTL8367C_IGMP_PORT5_CONTROL_MLDv2_OP_OFFSET    8
+#define    RTL8367C_IGMP_PORT5_CONTROL_MLDv2_OP_MASK    0x300
+#define    RTL8367C_IGMP_PORT5_CONTROL_MLDv1_OP_OFFSET    6
+#define    RTL8367C_IGMP_PORT5_CONTROL_MLDv1_OP_MASK    0xC0
+#define    RTL8367C_IGMP_PORT5_CONTROL_IGMPV3_OP_OFFSET    4
+#define    RTL8367C_IGMP_PORT5_CONTROL_IGMPV3_OP_MASK    0x30
+#define    RTL8367C_IGMP_PORT5_CONTROL_IGMPV2_OP_OFFSET    2
+#define    RTL8367C_IGMP_PORT5_CONTROL_IGMPV2_OP_MASK    0xC
+#define    RTL8367C_IGMP_PORT5_CONTROL_IGMPV1_OP_OFFSET    0
+#define    RTL8367C_IGMP_PORT5_CONTROL_IGMPV1_OP_MASK    0x3
+
+#define    RTL8367C_REG_IGMP_PORT6_CONTROL    0x1c0b
+#define    RTL8367C_IGMP_PORT6_CONTROL_ALLOW_QUERY_OFFSET    14
+#define    RTL8367C_IGMP_PORT6_CONTROL_ALLOW_QUERY_MASK    0x4000
+#define    RTL8367C_IGMP_PORT6_CONTROL_ALLOW_REPORT_OFFSET    13
+#define    RTL8367C_IGMP_PORT6_CONTROL_ALLOW_REPORT_MASK    0x2000
+#define    RTL8367C_IGMP_PORT6_CONTROL_ALLOW_LEAVE_OFFSET    12
+#define    RTL8367C_IGMP_PORT6_CONTROL_ALLOW_LEAVE_MASK    0x1000
+#define    RTL8367C_IGMP_PORT6_CONTROL_ALLOW_MRP_OFFSET    11
+#define    RTL8367C_IGMP_PORT6_CONTROL_ALLOW_MRP_MASK    0x800
+#define    RTL8367C_IGMP_PORT6_CONTROL_ALLOW_MC_DATA_OFFSET    10
+#define    RTL8367C_IGMP_PORT6_CONTROL_ALLOW_MC_DATA_MASK    0x400
+#define    RTL8367C_IGMP_PORT6_CONTROL_MLDv2_OP_OFFSET    8
+#define    RTL8367C_IGMP_PORT6_CONTROL_MLDv2_OP_MASK    0x300
+#define    RTL8367C_IGMP_PORT6_CONTROL_MLDv1_OP_OFFSET    6
+#define    RTL8367C_IGMP_PORT6_CONTROL_MLDv1_OP_MASK    0xC0
+#define    RTL8367C_IGMP_PORT6_CONTROL_IGMPV3_OP_OFFSET    4
+#define    RTL8367C_IGMP_PORT6_CONTROL_IGMPV3_OP_MASK    0x30
+#define    RTL8367C_IGMP_PORT6_CONTROL_IGMPV2_OP_OFFSET    2
+#define    RTL8367C_IGMP_PORT6_CONTROL_IGMPV2_OP_MASK    0xC
+#define    RTL8367C_IGMP_PORT6_CONTROL_IGMPV1_OP_OFFSET    0
+#define    RTL8367C_IGMP_PORT6_CONTROL_IGMPV1_OP_MASK    0x3
+
+#define    RTL8367C_REG_IGMP_PORT7_CONTROL    0x1c0c
+#define    RTL8367C_IGMP_PORT7_CONTROL_ALLOW_QUERY_OFFSET    14
+#define    RTL8367C_IGMP_PORT7_CONTROL_ALLOW_QUERY_MASK    0x4000
+#define    RTL8367C_IGMP_PORT7_CONTROL_ALLOW_REPORT_OFFSET    13
+#define    RTL8367C_IGMP_PORT7_CONTROL_ALLOW_REPORT_MASK    0x2000
+#define    RTL8367C_IGMP_PORT7_CONTROL_ALLOW_LEAVE_OFFSET    12
+#define    RTL8367C_IGMP_PORT7_CONTROL_ALLOW_LEAVE_MASK    0x1000
+#define    RTL8367C_IGMP_PORT7_CONTROL_ALLOW_MRP_OFFSET    11
+#define    RTL8367C_IGMP_PORT7_CONTROL_ALLOW_MRP_MASK    0x800
+#define    RTL8367C_IGMP_PORT7_CONTROL_ALLOW_MC_DATA_OFFSET    10
+#define    RTL8367C_IGMP_PORT7_CONTROL_ALLOW_MC_DATA_MASK    0x400
+#define    RTL8367C_IGMP_PORT7_CONTROL_MLDv2_OP_OFFSET    8
+#define    RTL8367C_IGMP_PORT7_CONTROL_MLDv2_OP_MASK    0x300
+#define    RTL8367C_IGMP_PORT7_CONTROL_MLDv1_OP_OFFSET    6
+#define    RTL8367C_IGMP_PORT7_CONTROL_MLDv1_OP_MASK    0xC0
+#define    RTL8367C_IGMP_PORT7_CONTROL_IGMPV3_OP_OFFSET    4
+#define    RTL8367C_IGMP_PORT7_CONTROL_IGMPV3_OP_MASK    0x30
+#define    RTL8367C_IGMP_PORT7_CONTROL_IGMPV2_OP_OFFSET    2
+#define    RTL8367C_IGMP_PORT7_CONTROL_IGMPV2_OP_MASK    0xC
+#define    RTL8367C_IGMP_PORT7_CONTROL_IGMPV1_OP_OFFSET    0
+#define    RTL8367C_IGMP_PORT7_CONTROL_IGMPV1_OP_MASK    0x3
+
+#define    RTL8367C_REG_IGMP_PORT01_MAX_GROUP    0x1c0d
+#define    RTL8367C_PORT1_MAX_GROUP_OFFSET    8
+#define    RTL8367C_PORT1_MAX_GROUP_MASK    0xFF00
+#define    RTL8367C_PORT0_MAX_GROUP_OFFSET    0
+#define    RTL8367C_PORT0_MAX_GROUP_MASK    0xFF
+
+#define    RTL8367C_REG_IGMP_PORT23_MAX_GROUP    0x1c0e
+#define    RTL8367C_PORT3_MAX_GROUP_OFFSET    8
+#define    RTL8367C_PORT3_MAX_GROUP_MASK    0xFF00
+#define    RTL8367C_PORT2_MAX_GROUP_OFFSET    0
+#define    RTL8367C_PORT2_MAX_GROUP_MASK    0xFF
+
+#define    RTL8367C_REG_IGMP_PORT45_MAX_GROUP    0x1c0f
+#define    RTL8367C_PORT5_MAX_GROUP_OFFSET    8
+#define    RTL8367C_PORT5_MAX_GROUP_MASK    0xFF00
+#define    RTL8367C_PORT4_MAX_GROUP_OFFSET    0
+#define    RTL8367C_PORT4_MAX_GROUP_MASK    0xFF
+
+#define    RTL8367C_REG_IGMP_PORT67_MAX_GROUP    0x1c10
+#define    RTL8367C_PORT7_MAX_GROUP_OFFSET    8
+#define    RTL8367C_PORT7_MAX_GROUP_MASK    0xFF00
+#define    RTL8367C_PORT6_MAX_GROUP_OFFSET    0
+#define    RTL8367C_PORT6_MAX_GROUP_MASK    0xFF
+
+#define    RTL8367C_REG_IGMP_PORT01_CURRENT_GROUP    0x1c11
+#define    RTL8367C_PORT1_CURRENT_GROUP_OFFSET    8
+#define    RTL8367C_PORT1_CURRENT_GROUP_MASK    0xFF00
+#define    RTL8367C_PORT0_CURRENT_GROUP_OFFSET    0
+#define    RTL8367C_PORT0_CURRENT_GROUP_MASK    0xFF
+
+#define    RTL8367C_REG_IGMP_PORT23_CURRENT_GROUP    0x1c12
+#define    RTL8367C_PORT3_CURRENT_GROUP_OFFSET    8
+#define    RTL8367C_PORT3_CURRENT_GROUP_MASK    0xFF00
+#define    RTL8367C_PORT2_CURRENT_GROUP_OFFSET    0
+#define    RTL8367C_PORT2_CURRENT_GROUP_MASK    0xFF
+
+#define    RTL8367C_REG_IGMP_PORT45_CURRENT_GROUP    0x1c13
+#define    RTL8367C_PORT5_CURRENT_GROUP_OFFSET    8
+#define    RTL8367C_PORT5_CURRENT_GROUP_MASK    0xFF00
+#define    RTL8367C_PORT4_CURRENT_GROUP_OFFSET    0
+#define    RTL8367C_PORT4_CURRENT_GROUP_MASK    0xFF
+
+#define    RTL8367C_REG_IGMP_PORT67_CURRENT_GROUP    0x1c14
+#define    RTL8367C_PORT7_CURRENT_GROUP_OFFSET    8
+#define    RTL8367C_PORT7_CURRENT_GROUP_MASK    0xFF00
+#define    RTL8367C_PORT6_CURRENT_GROUP_OFFSET    0
+#define    RTL8367C_PORT6_CURRENT_GROUP_MASK    0xFF
+
+#define    RTL8367C_REG_IGMP_MLD_CFG3    0x1c15
+#define    RTL8367C_IGMP_MLD_IP6_BYPASS_OFFSET    5
+#define    RTL8367C_IGMP_MLD_IP6_BYPASS_MASK    0x20
+#define    RTL8367C_IGMP_MLD_IP4_BYPASS_239_255_255_OFFSET    4
+#define    RTL8367C_IGMP_MLD_IP4_BYPASS_239_255_255_MASK    0x10
+#define    RTL8367C_IGMP_MLD_IP4_BYPASS_224_0_1_OFFSET    3
+#define    RTL8367C_IGMP_MLD_IP4_BYPASS_224_0_1_MASK    0x8
+#define    RTL8367C_IGMP_MLD_IP4_BYPASS_224_0_0_OFFSET    2
+#define    RTL8367C_IGMP_MLD_IP4_BYPASS_224_0_0_MASK    0x4
+#define    RTL8367C_REPORT_LEAVE_FORWARD_OFFSET    0
+#define    RTL8367C_REPORT_LEAVE_FORWARD_MASK    0x3
+
+#define    RTL8367C_REG_IGMP_MLD_CFG4    0x1c16
+#define    RTL8367C_IGMP_MLD_CFG4_OFFSET    0
+#define    RTL8367C_IGMP_MLD_CFG4_MASK    0x7FF
+
+#define    RTL8367C_REG_IGMP_GROUP_USAGE_LIST0    0x1c20
+
+#define    RTL8367C_REG_IGMP_GROUP_USAGE_LIST1    0x1c21
+
+#define    RTL8367C_REG_IGMP_GROUP_USAGE_LIST2    0x1c22
+
+#define    RTL8367C_REG_IGMP_GROUP_USAGE_LIST3    0x1c23
+
+#define    RTL8367C_REG_IGMP_GROUP_USAGE_LIST4    0x1c24
+
+#define    RTL8367C_REG_IGMP_GROUP_USAGE_LIST5    0x1c25
+
+#define    RTL8367C_REG_IGMP_GROUP_USAGE_LIST6    0x1c26
+
+#define    RTL8367C_REG_IGMP_GROUP_USAGE_LIST7    0x1c27
+
+#define    RTL8367C_REG_IGMP_GROUP_USAGE_LIST8    0x1c28
+
+#define    RTL8367C_REG_IGMP_GROUP_USAGE_LIST9    0x1c29
+
+#define    RTL8367C_REG_IGMP_GROUP_USAGE_LIST10    0x1c2a
+
+#define    RTL8367C_REG_IGMP_GROUP_USAGE_LIST11    0x1c2b
+
+#define    RTL8367C_REG_IGMP_GROUP_USAGE_LIST12    0x1c2c
+
+#define    RTL8367C_REG_IGMP_GROUP_USAGE_LIST13    0x1c2d
+
+#define    RTL8367C_REG_IGMP_GROUP_USAGE_LIST14    0x1c2e
+
+#define    RTL8367C_REG_IGMP_GROUP_USAGE_LIST15    0x1c2f
+
+#define    RTL8367C_REG_EAV_CTRL0    0x1c30
+#define    RTL8367C_EAV_CTRL0_OFFSET    0
+#define    RTL8367C_EAV_CTRL0_MASK    0xFF
+
+#define    RTL8367C_REG_EAV_CTRL1    0x1c31
+#define    RTL8367C_REMAP_EAV_PRI3_REGEN_OFFSET    9
+#define    RTL8367C_REMAP_EAV_PRI3_REGEN_MASK    0xE00
+#define    RTL8367C_REMAP_EAV_PRI2_REGEN_OFFSET    6
+#define    RTL8367C_REMAP_EAV_PRI2_REGEN_MASK    0x1C0
+#define    RTL8367C_REMAP_EAV_PRI1_REGEN_OFFSET    3
+#define    RTL8367C_REMAP_EAV_PRI1_REGEN_MASK    0x38
+#define    RTL8367C_REMAP_EAV_PRI0_REGEN_OFFSET    0
+#define    RTL8367C_REMAP_EAV_PRI0_REGEN_MASK    0x7
+
+#define    RTL8367C_REG_EAV_CTRL2    0x1c32
+#define    RTL8367C_REMAP_EAV_PRI7_REGEN_OFFSET    9
+#define    RTL8367C_REMAP_EAV_PRI7_REGEN_MASK    0xE00
+#define    RTL8367C_REMAP_EAV_PRI6_REGEN_OFFSET    6
+#define    RTL8367C_REMAP_EAV_PRI6_REGEN_MASK    0x1C0
+#define    RTL8367C_REMAP_EAV_PRI5_REGEN_OFFSET    3
+#define    RTL8367C_REMAP_EAV_PRI5_REGEN_MASK    0x38
+#define    RTL8367C_REMAP_EAV_PRI4_REGEN_OFFSET    0
+#define    RTL8367C_REMAP_EAV_PRI4_REGEN_MASK    0x7
+
+#define    RTL8367C_REG_SYS_TIME_FREQ    0x1c43
+
+#define    RTL8367C_REG_SYS_TIME_OFFSET_L    0x1c44
+
+#define    RTL8367C_REG_SYS_TIME_OFFSET_H    0x1c45
+
+#define    RTL8367C_REG_SYS_TIME_OFFSET_512NS_L    0x1c46
+
+#define    RTL8367C_REG_SYS_TIME_OFFSET_512NS_H    0x1c47
+#define    RTL8367C_SYS_TIME_OFFSET_TUNE_OFFSET    5
+#define    RTL8367C_SYS_TIME_OFFSET_TUNE_MASK    0x20
+#define    RTL8367C_SYS_TIME_OFFSET_512NS_H_SYS_TIME_OFFSET_512NS_OFFSET    0
+#define    RTL8367C_SYS_TIME_OFFSET_512NS_H_SYS_TIME_OFFSET_512NS_MASK    0x1F
+
+#define    RTL8367C_REG_SYS_TIME_SEC_TRANSIT    0x1c48
+#define    RTL8367C_SYS_TIME_SEC_TRANSIT_OFFSET    0
+#define    RTL8367C_SYS_TIME_SEC_TRANSIT_MASK    0x1
+
+#define    RTL8367C_REG_SYS_TIME_SEC_HIGH_L    0x1c49
+
+#define    RTL8367C_REG_SYS_TIME_SEC_HIGH_H    0x1c4a
+
+#define    RTL8367C_REG_SYS_TIME_512NS_L    0x1c4b
+
+#define    RTL8367C_REG_SYS_TIME_512NS_H    0x1c4c
+#define    RTL8367C_SYS_TIME_512NS_H_OFFSET    0
+#define    RTL8367C_SYS_TIME_512NS_H_MASK    0x1F
+
+#define    RTL8367C_REG_FALLBACK_CTRL    0x1c70
+#define    RTL8367C_FALLBACK_PL_DEC_EN_OFFSET    15
+#define    RTL8367C_FALLBACK_PL_DEC_EN_MASK    0x8000
+#define    RTL8367C_FALLBACK_MONITOR_TIMEOUT_IGNORE_OFFSET    14
+#define    RTL8367C_FALLBACK_MONITOR_TIMEOUT_IGNORE_MASK    0x4000
+#define    RTL8367C_FALLBACK_ERROR_RATIO_THRESHOLD_OFFSET    11
+#define    RTL8367C_FALLBACK_ERROR_RATIO_THRESHOLD_MASK    0x3800
+#define    RTL8367C_FALLBACK_MONITORMAX_OFFSET    8
+#define    RTL8367C_FALLBACK_MONITORMAX_MASK    0x700
+#define    RTL8367C_FALLBACK_MONITOR_TIMEOUT_OFFSET    0
+#define    RTL8367C_FALLBACK_MONITOR_TIMEOUT_MASK    0xFF
+
+#define    RTL8367C_REG_FALLBACK_PORT0_CFG0    0x1c71
+#define    RTL8367C_FALLBACK_PORT0_CFG0_RESET_POWER_LEVEL_OFFSET    15
+#define    RTL8367C_FALLBACK_PORT0_CFG0_RESET_POWER_LEVEL_MASK    0x8000
+#define    RTL8367C_FALLBACK_PORT0_CFG0_ENABLE_OFFSET    14
+#define    RTL8367C_FALLBACK_PORT0_CFG0_ENABLE_MASK    0x4000
+
+#define    RTL8367C_REG_FALLBACK_PORT0_CFG1    0x1c72
+
+#define    RTL8367C_REG_FALLBACK_PORT0_CFG2    0x1c73
+#define    RTL8367C_FALLBACK_PORT0_CFG2_OFFSET    0
+#define    RTL8367C_FALLBACK_PORT0_CFG2_MASK    0xFFF
+
+#define    RTL8367C_REG_FALLBACK_PORT0_CFG3    0x1c74
+#define    RTL8367C_FALLBACK_PORT0_CFG3_OFFSET    0
+#define    RTL8367C_FALLBACK_PORT0_CFG3_MASK    0xFF
+
+#define    RTL8367C_REG_FALLBACK_PORT1_CFG0    0x1c75
+#define    RTL8367C_FALLBACK_PORT1_CFG0_RESET_POWER_LEVEL_OFFSET    15
+#define    RTL8367C_FALLBACK_PORT1_CFG0_RESET_POWER_LEVEL_MASK    0x8000
+#define    RTL8367C_FALLBACK_PORT1_CFG0_ENABLE_OFFSET    14
+#define    RTL8367C_FALLBACK_PORT1_CFG0_ENABLE_MASK    0x4000
+
+#define    RTL8367C_REG_FALLBACK_PORT1_CFG1    0x1c76
+
+#define    RTL8367C_REG_FALLBACK_PORT1_CFG2    0x1c77
+#define    RTL8367C_FALLBACK_PORT1_CFG2_OFFSET    0
+#define    RTL8367C_FALLBACK_PORT1_CFG2_MASK    0xFFF
+
+#define    RTL8367C_REG_FALLBACK_PORT1_CFG3    0x1c78
+#define    RTL8367C_FALLBACK_PORT1_CFG3_OFFSET    0
+#define    RTL8367C_FALLBACK_PORT1_CFG3_MASK    0xFF
+
+#define    RTL8367C_REG_FALLBACK_PORT2_CFG0    0x1c79
+#define    RTL8367C_FALLBACK_PORT2_CFG0_RESET_POWER_LEVEL_OFFSET    15
+#define    RTL8367C_FALLBACK_PORT2_CFG0_RESET_POWER_LEVEL_MASK    0x8000
+#define    RTL8367C_FALLBACK_PORT2_CFG0_ENABLE_OFFSET    14
+#define    RTL8367C_FALLBACK_PORT2_CFG0_ENABLE_MASK    0x4000
+
+#define    RTL8367C_REG_FALLBACK_PORT2_CFG1    0x1c7a
+
+#define    RTL8367C_REG_FALLBACK_PORT2_CFG2    0x1c7b
+#define    RTL8367C_FALLBACK_PORT2_CFG2_OFFSET    0
+#define    RTL8367C_FALLBACK_PORT2_CFG2_MASK    0xFFF
+
+#define    RTL8367C_REG_FALLBACK_PORT2_CFG3    0x1c7c
+#define    RTL8367C_FALLBACK_PORT2_CFG3_OFFSET    0
+#define    RTL8367C_FALLBACK_PORT2_CFG3_MASK    0xFF
+
+#define    RTL8367C_REG_FALLBACK_PORT3_CFG0    0x1c7d
+#define    RTL8367C_FALLBACK_PORT3_CFG0_RESET_POWER_LEVEL_OFFSET    15
+#define    RTL8367C_FALLBACK_PORT3_CFG0_RESET_POWER_LEVEL_MASK    0x8000
+#define    RTL8367C_FALLBACK_PORT3_CFG0_ENABLE_OFFSET    14
+#define    RTL8367C_FALLBACK_PORT3_CFG0_ENABLE_MASK    0x4000
+
+#define    RTL8367C_REG_FALLBACK_PORT3_CFG1    0x1c7e
+
+#define    RTL8367C_REG_FALLBACK_PORT3_CFG2    0x1c7f
+#define    RTL8367C_FALLBACK_PORT3_CFG2_OFFSET    0
+#define    RTL8367C_FALLBACK_PORT3_CFG2_MASK    0xFFF
+
+#define    RTL8367C_REG_FALLBACK_PORT3_CFG3    0x1c80
+#define    RTL8367C_FALLBACK_PORT3_CFG3_OFFSET    0
+#define    RTL8367C_FALLBACK_PORT3_CFG3_MASK    0xFF
+
+#define    RTL8367C_REG_FALLBACK_PORT4_CFG0    0x1c81
+#define    RTL8367C_FALLBACK_PORT4_CFG0_RESET_POWER_LEVEL_OFFSET    15
+#define    RTL8367C_FALLBACK_PORT4_CFG0_RESET_POWER_LEVEL_MASK    0x8000
+#define    RTL8367C_FALLBACK_PORT4_CFG0_ENABLE_OFFSET    14
+#define    RTL8367C_FALLBACK_PORT4_CFG0_ENABLE_MASK    0x4000
+
+#define    RTL8367C_REG_FALLBACK_PORT4_CFG1    0x1c82
+
+#define    RTL8367C_REG_FALLBACK_PORT4_CFG2    0x1c83
+#define    RTL8367C_FALLBACK_PORT4_CFG2_OFFSET    0
+#define    RTL8367C_FALLBACK_PORT4_CFG2_MASK    0xFFF
+
+#define    RTL8367C_REG_FALLBACK_PORT4_CFG3    0x1c84
+#define    RTL8367C_FALLBACK_PORT4_CFG3_OFFSET    0
+#define    RTL8367C_FALLBACK_PORT4_CFG3_MASK    0xFF
+
+#define    RTL8367C_REG_FALLBACK_CTRL1    0x1c85
+#define    RTL8367C_FALLBACK_VALIDFLOW_OFFSET    8
+#define    RTL8367C_FALLBACK_VALIDFLOW_MASK    0xFF00
+#define    RTL8367C_FALLBACK_STOP_TMR_OFFSET    0
+#define    RTL8367C_FALLBACK_STOP_TMR_MASK    0x1
+
+#define    RTL8367C_REG_FALLBACK_CPL    0x1c86
+#define    RTL8367C_PORT4_CPL_OFFSET    4
+#define    RTL8367C_PORT4_CPL_MASK    0x10
+#define    RTL8367C_PORT3_CPL_OFFSET    3
+#define    RTL8367C_PORT3_CPL_MASK    0x8
+#define    RTL8367C_PORT2_CPL_OFFSET    2
+#define    RTL8367C_PORT2_CPL_MASK    0x4
+#define    RTL8367C_PORT1_CPL_OFFSET    1
+#define    RTL8367C_PORT1_CPL_MASK    0x2
+#define    RTL8367C_PORT0_CPL_OFFSET    0
+#define    RTL8367C_PORT0_CPL_MASK    0x1
+
+#define    RTL8367C_REG_FALLBACK_PHY_PAGE    0x1c87
+#define    RTL8367C_FALLBACK_PHY_PAGE_OFFSET    0
+#define    RTL8367C_FALLBACK_PHY_PAGE_MASK    0xFFF
+
+#define    RTL8367C_REG_FALLBACK_PHY_REG    0x1c88
+#define    RTL8367C_FALLBACK_PHY_REG_OFFSET    0
+#define    RTL8367C_FALLBACK_PHY_REG_MASK    0x1F
+
+#define    RTL8367C_REG_AFBK_INFO_X0    0x1c89
+
+#define    RTL8367C_REG_AFBK_INFO_X1    0x1c8a
+
+#define    RTL8367C_REG_AFBK_INFO_X2    0x1c8b
+
+#define    RTL8367C_REG_AFBK_INFO_X3    0x1c8c
+
+#define    RTL8367C_REG_AFBK_INFO_X4    0x1c8d
+
+#define    RTL8367C_REG_AFBK_INFO_X5    0x1c8e
+
+#define    RTL8367C_REG_AFBK_INFO_X6    0x1c8f
+
+#define    RTL8367C_REG_AFBK_INFO_X7    0x1c90
+
+#define    RTL8367C_REG_AFBK_INFO_X8    0x1c91
+
+#define    RTL8367C_REG_AFBK_INFO_X9    0x1c92
+
+#define    RTL8367C_REG_AFBK_INFO_X10    0x1c93
+
+#define    RTL8367C_REG_AFBK_INFO_X11    0x1c94
+
+#define    RTL8367C_REG_FALLBACK_PORT5_CFG0    0x1ca0
+#define    RTL8367C_FALLBACK_PORT5_CFG0_RESET_POWER_LEVEL_OFFSET    15
+#define    RTL8367C_FALLBACK_PORT5_CFG0_RESET_POWER_LEVEL_MASK    0x8000
+#define    RTL8367C_FALLBACK_PORT5_CFG0_ENABLE_OFFSET    14
+#define    RTL8367C_FALLBACK_PORT5_CFG0_ENABLE_MASK    0x4000
+
+#define    RTL8367C_REG_FALLBACK_PORT5_CFG1    0x1ca1
+
+#define    RTL8367C_REG_FALLBACK_PORT5_CFG2    0x1ca2
+#define    RTL8367C_FALLBACK_PORT5_CFG2_OFFSET    0
+#define    RTL8367C_FALLBACK_PORT5_CFG2_MASK    0xFFF
+
+#define    RTL8367C_REG_FALLBACK_PORT5_CFG3    0x1ca3
+#define    RTL8367C_FALLBACK_PORT5_CFG3_OFFSET    0
+#define    RTL8367C_FALLBACK_PORT5_CFG3_MASK    0xFF
+
+#define    RTL8367C_REG_FALLBACK_PORT6_CFG0    0x1ca4
+#define    RTL8367C_FALLBACK_PORT6_CFG0_RESET_POWER_LEVEL_OFFSET    15
+#define    RTL8367C_FALLBACK_PORT6_CFG0_RESET_POWER_LEVEL_MASK    0x8000
+#define    RTL8367C_FALLBACK_PORT6_CFG0_ENABLE_OFFSET    14
+#define    RTL8367C_FALLBACK_PORT6_CFG0_ENABLE_MASK    0x4000
+
+#define    RTL8367C_REG_FALLBACK_PORT6_CFG1    0x1ca5
+
+#define    RTL8367C_REG_FALLBACK_PORT6_CFG2    0x1ca6
+#define    RTL8367C_FALLBACK_PORT6_CFG2_OFFSET    0
+#define    RTL8367C_FALLBACK_PORT6_CFG2_MASK    0xFFF
+
+#define    RTL8367C_REG_FALLBACK_PORT6_CFG3    0x1ca7
+#define    RTL8367C_FALLBACK_PORT6_CFG3_OFFSET    0
+#define    RTL8367C_FALLBACK_PORT6_CFG3_MASK    0xFF
+
+#define    RTL8367C_REG_FALLBACK_PORT7_CFG0    0x1ca8
+#define    RTL8367C_FALLBACK_PORT7_CFG0_RESET_POWER_LEVEL_OFFSET    15
+#define    RTL8367C_FALLBACK_PORT7_CFG0_RESET_POWER_LEVEL_MASK    0x8000
+#define    RTL8367C_FALLBACK_PORT7_CFG0_ENABLE_OFFSET    14
+#define    RTL8367C_FALLBACK_PORT7_CFG0_ENABLE_MASK    0x4000
+
+#define    RTL8367C_REG_FALLBACK_PORT7_CFG1    0x1ca9
+
+#define    RTL8367C_REG_FALLBACK_PORT7_CFG2    0x1caa
+#define    RTL8367C_FALLBACK_PORT7_CFG2_OFFSET    0
+#define    RTL8367C_FALLBACK_PORT7_CFG2_MASK    0xFFF
+
+#define    RTL8367C_REG_FALLBACK_PORT7_CFG3    0x1cab
+#define    RTL8367C_FALLBACK_PORT7_CFG3_OFFSET    0
+#define    RTL8367C_FALLBACK_PORT7_CFG3_MASK    0xFF
+
+#define    RTL8367C_REG_IGMP_PORT8_CONTROL    0x1cb0
+#define    RTL8367C_IGMP_PORT8_CONTROL_ALLOW_QUERY_OFFSET    14
+#define    RTL8367C_IGMP_PORT8_CONTROL_ALLOW_QUERY_MASK    0x4000
+#define    RTL8367C_IGMP_PORT8_CONTROL_ALLOW_REPORT_OFFSET    13
+#define    RTL8367C_IGMP_PORT8_CONTROL_ALLOW_REPORT_MASK    0x2000
+#define    RTL8367C_IGMP_PORT8_CONTROL_ALLOW_LEAVE_OFFSET    12
+#define    RTL8367C_IGMP_PORT8_CONTROL_ALLOW_LEAVE_MASK    0x1000
+#define    RTL8367C_IGMP_PORT8_CONTROL_ALLOW_MRP_OFFSET    11
+#define    RTL8367C_IGMP_PORT8_CONTROL_ALLOW_MRP_MASK    0x800
+#define    RTL8367C_IGMP_PORT8_CONTROL_ALLOW_MC_DATA_OFFSET    10
+#define    RTL8367C_IGMP_PORT8_CONTROL_ALLOW_MC_DATA_MASK    0x400
+#define    RTL8367C_IGMP_PORT8_CONTROL_MLDv2_OP_OFFSET    8
+#define    RTL8367C_IGMP_PORT8_CONTROL_MLDv2_OP_MASK    0x300
+#define    RTL8367C_IGMP_PORT8_CONTROL_MLDv1_OP_OFFSET    6
+#define    RTL8367C_IGMP_PORT8_CONTROL_MLDv1_OP_MASK    0xC0
+#define    RTL8367C_IGMP_PORT8_CONTROL_IGMPV3_OP_OFFSET    4
+#define    RTL8367C_IGMP_PORT8_CONTROL_IGMPV3_OP_MASK    0x30
+#define    RTL8367C_IGMP_PORT8_CONTROL_IGMPV2_OP_OFFSET    2
+#define    RTL8367C_IGMP_PORT8_CONTROL_IGMPV2_OP_MASK    0xC
+#define    RTL8367C_IGMP_PORT8_CONTROL_IGMPV1_OP_OFFSET    0
+#define    RTL8367C_IGMP_PORT8_CONTROL_IGMPV1_OP_MASK    0x3
+
+#define    RTL8367C_REG_IGMP_PORT9_CONTROL    0x1cb1
+#define    RTL8367C_IGMP_PORT9_CONTROL_ALLOW_QUERY_OFFSET    14
+#define    RTL8367C_IGMP_PORT9_CONTROL_ALLOW_QUERY_MASK    0x4000
+#define    RTL8367C_IGMP_PORT9_CONTROL_ALLOW_REPORT_OFFSET    13
+#define    RTL8367C_IGMP_PORT9_CONTROL_ALLOW_REPORT_MASK    0x2000
+#define    RTL8367C_IGMP_PORT9_CONTROL_ALLOW_LEAVE_OFFSET    12
+#define    RTL8367C_IGMP_PORT9_CONTROL_ALLOW_LEAVE_MASK    0x1000
+#define    RTL8367C_IGMP_PORT9_CONTROL_ALLOW_MRP_OFFSET    11
+#define    RTL8367C_IGMP_PORT9_CONTROL_ALLOW_MRP_MASK    0x800
+#define    RTL8367C_IGMP_PORT9_CONTROL_ALLOW_MC_DATA_OFFSET    10
+#define    RTL8367C_IGMP_PORT9_CONTROL_ALLOW_MC_DATA_MASK    0x400
+#define    RTL8367C_IGMP_PORT9_CONTROL_MLDv2_OP_OFFSET    8
+#define    RTL8367C_IGMP_PORT9_CONTROL_MLDv2_OP_MASK    0x300
+#define    RTL8367C_IGMP_PORT9_CONTROL_MLDv1_OP_OFFSET    6
+#define    RTL8367C_IGMP_PORT9_CONTROL_MLDv1_OP_MASK    0xC0
+#define    RTL8367C_IGMP_PORT9_CONTROL_IGMPV3_OP_OFFSET    4
+#define    RTL8367C_IGMP_PORT9_CONTROL_IGMPV3_OP_MASK    0x30
+#define    RTL8367C_IGMP_PORT9_CONTROL_IGMPV2_OP_OFFSET    2
+#define    RTL8367C_IGMP_PORT9_CONTROL_IGMPV2_OP_MASK    0xC
+#define    RTL8367C_IGMP_PORT9_CONTROL_IGMPV1_OP_OFFSET    0
+#define    RTL8367C_IGMP_PORT9_CONTROL_IGMPV1_OP_MASK    0x3
+
+#define    RTL8367C_REG_IGMP_PORT10_CONTROL    0x1cb2
+#define    RTL8367C_IGMP_PORT10_CONTROL_ALLOW_QUERY_OFFSET    14
+#define    RTL8367C_IGMP_PORT10_CONTROL_ALLOW_QUERY_MASK    0x4000
+#define    RTL8367C_IGMP_PORT10_CONTROL_ALLOW_REPORT_OFFSET    13
+#define    RTL8367C_IGMP_PORT10_CONTROL_ALLOW_REPORT_MASK    0x2000
+#define    RTL8367C_IGMP_PORT10_CONTROL_ALLOW_LEAVE_OFFSET    12
+#define    RTL8367C_IGMP_PORT10_CONTROL_ALLOW_LEAVE_MASK    0x1000
+#define    RTL8367C_IGMP_PORT10_CONTROL_ALLOW_MRP_OFFSET    11
+#define    RTL8367C_IGMP_PORT10_CONTROL_ALLOW_MRP_MASK    0x800
+#define    RTL8367C_IGMP_PORT10_CONTROL_ALLOW_MC_DATA_OFFSET    10
+#define    RTL8367C_IGMP_PORT10_CONTROL_ALLOW_MC_DATA_MASK    0x400
+#define    RTL8367C_IGMP_PORT10_CONTROL_MLDv2_OP_OFFSET    8
+#define    RTL8367C_IGMP_PORT10_CONTROL_MLDv2_OP_MASK    0x300
+#define    RTL8367C_IGMP_PORT10_CONTROL_MLDv1_OP_OFFSET    6
+#define    RTL8367C_IGMP_PORT10_CONTROL_MLDv1_OP_MASK    0xC0
+#define    RTL8367C_IGMP_PORT10_CONTROL_IGMPV3_OP_OFFSET    4
+#define    RTL8367C_IGMP_PORT10_CONTROL_IGMPV3_OP_MASK    0x30
+#define    RTL8367C_IGMP_PORT10_CONTROL_IGMPV2_OP_OFFSET    2
+#define    RTL8367C_IGMP_PORT10_CONTROL_IGMPV2_OP_MASK    0xC
+#define    RTL8367C_IGMP_PORT10_CONTROL_IGMPV1_OP_OFFSET    0
+#define    RTL8367C_IGMP_PORT10_CONTROL_IGMPV1_OP_MASK    0x3
+
+#define    RTL8367C_REG_IGMP_PORT89_MAX_GROUP    0x1cb3
+#define    RTL8367C_PORT9_MAX_GROUP_OFFSET    8
+#define    RTL8367C_PORT9_MAX_GROUP_MASK    0xFF00
+#define    RTL8367C_PORT8_MAX_GROUP_OFFSET    0
+#define    RTL8367C_PORT8_MAX_GROUP_MASK    0xFF
+
+#define    RTL8367C_REG_IGMP_PORT10_MAX_GROUP    0x1cb4
+#define    RTL8367C_IGMP_PORT10_MAX_GROUP_OFFSET    0
+#define    RTL8367C_IGMP_PORT10_MAX_GROUP_MASK    0xFF
+
+#define    RTL8367C_REG_IGMP_PORT89_CURRENT_GROUP    0x1cb5
+#define    RTL8367C_PORT9_CURRENT_GROUP_OFFSET    8
+#define    RTL8367C_PORT9_CURRENT_GROUP_MASK    0xFF00
+#define    RTL8367C_PORT8_CURRENT_GROUP_OFFSET    0
+#define    RTL8367C_PORT8_CURRENT_GROUP_MASK    0xFF
+
+#define    RTL8367C_REG_IGMP_PORT10_CURRENT_GROUP    0x1cb6
+#define    RTL8367C_IGMP_PORT10_CURRENT_GROUP_OFFSET    0
+#define    RTL8367C_IGMP_PORT10_CURRENT_GROUP_MASK    0xFF
+
+#define    RTL8367C_REG_IGMP_L3_CHECKSUM_CHECK    0x1cb7
+#define    RTL8367C_IGMP_L3_CHECKSUM_CHECK_OFFSET    0
+#define    RTL8367C_IGMP_L3_CHECKSUM_CHECK_MASK    0x1
+
+/* (16'h1d00)chip_70b_reg */
+
+#define    RTL8367C_REG_PCSXF_CFG    0x1d00
+#define    RTL8367C_PCSXF_CFG_Reserved_OFFSET    15
+#define    RTL8367C_PCSXF_CFG_Reserved_MASK    0x8000
+#define    RTL8367C_CFG_RST_RXFIFO_P7_5_OFFSET    12
+#define    RTL8367C_CFG_RST_RXFIFO_P7_5_MASK    0x7000
+#define    RTL8367C_CFG_PCSXF_OFFSET    8
+#define    RTL8367C_CFG_PCSXF_MASK    0xF00
+#define    RTL8367C_CFG_RST_RXFIFO_OFFSET    3
+#define    RTL8367C_CFG_RST_RXFIFO_MASK    0xF8
+#define    RTL8367C_CFG_COL2RXDV_OFFSET    2
+#define    RTL8367C_CFG_COL2RXDV_MASK    0x4
+#define    RTL8367C_CFG_PHY_SDET_OFFSET    0
+#define    RTL8367C_CFG_PHY_SDET_MASK    0x3
+
+#define    RTL8367C_REG_PHYID_CFG0    0x1d01
+#define    RTL8367C_CFG_PHY_BRD_MODE_P7_5_OFFSET    11
+#define    RTL8367C_CFG_PHY_BRD_MODE_P7_5_MASK    0x3800
+#define    RTL8367C_CFG_PHYAD_14C_OFFSET    10
+#define    RTL8367C_CFG_PHYAD_14C_MASK    0x400
+#define    RTL8367C_CFG_PHY_BRD_MODE_OFFSET    5
+#define    RTL8367C_CFG_PHY_BRD_MODE_MASK    0x3E0
+#define    RTL8367C_CFG_BRD_PHYAD_OFFSET    0
+#define    RTL8367C_CFG_BRD_PHYAD_MASK    0x1F
+
+#define    RTL8367C_REG_PHYID_CFG1    0x1d02
+#define    RTL8367C_CFG_MSK_MDI_OFFSET    5
+#define    RTL8367C_CFG_MSK_MDI_MASK    0x1FE0
+#define    RTL8367C_CFG_BASE_PHYAD_OFFSET    0
+#define    RTL8367C_CFG_BASE_PHYAD_MASK    0x1F
+
+#define    RTL8367C_REG_PHY_POLL_CFG0    0x1d03
+#define    RTL8367C_CFG_HOTCMD_PRD_EN_OFFSET    15
+#define    RTL8367C_CFG_HOTCMD_PRD_EN_MASK    0x8000
+#define    RTL8367C_CFG_HOTCMD_EN_OFFSET    12
+#define    RTL8367C_CFG_HOTCMD_EN_MASK    0x7000
+#define    RTL8367C_CFG_POLL_PERIOD_OFFSET    8
+#define    RTL8367C_CFG_POLL_PERIOD_MASK    0xF00
+#define    RTL8367C_CFG_PERI_CMDS_RD_OFFSET    4
+#define    RTL8367C_CFG_PERI_CMDS_RD_MASK    0xF0
+#define    RTL8367C_CFG_PERI_CMDS_WR_OFFSET    0
+#define    RTL8367C_CFG_PERI_CMDS_WR_MASK    0xF
+
+#define    RTL8367C_REG_PHY_POLL_CFG1    0x1d04
+
+#define    RTL8367C_REG_PHY_POLL_CFG2    0x1d05
+
+#define    RTL8367C_REG_PHY_POLL_CFG3    0x1d06
+
+#define    RTL8367C_REG_PHY_POLL_CFG4    0x1d07
+
+#define    RTL8367C_REG_PHY_POLL_CFG5    0x1d08
+
+#define    RTL8367C_REG_PHY_POLL_CFG6    0x1d09
+
+#define    RTL8367C_REG_PHY_POLL_CFG7    0x1d0a
+
+#define    RTL8367C_REG_PHY_POLL_CFG8    0x1d0b
+
+#define    RTL8367C_REG_PHY_POLL_CFG9    0x1d0c
+
+#define    RTL8367C_REG_PHY_POLL_CFG10    0x1d0d
+
+#define    RTL8367C_REG_PHY_POLL_CFG11    0x1d0e
+
+#define    RTL8367C_REG_PHY_POLL_CFG12    0x1d0f
+
+#define    RTL8367C_REG_EFUSE_MISC    0x1d10
+#define    RTL8367C_CFG_SA_SEL_OFFSET    5
+#define    RTL8367C_CFG_SA_SEL_MASK    0x20
+#define    RTL8367C_CFG_PHYAD00_OFFSET    0
+#define    RTL8367C_CFG_PHYAD00_MASK    0x1F
+
+#define    RTL8367C_REG_SDS_MISC    0x1d11
+#define    RTL8367C_CFG_SGMII_RXFC_OFFSET    14
+#define    RTL8367C_CFG_SGMII_RXFC_MASK    0x4000
+#define    RTL8367C_CFG_SGMII_TXFC_OFFSET    13
+#define    RTL8367C_CFG_SGMII_TXFC_MASK    0x2000
+#define    RTL8367C_INB_ARB_OFFSET    12
+#define    RTL8367C_INB_ARB_MASK    0x1000
+#define    RTL8367C_CFG_MAC8_SEL_HSGMII_OFFSET    11
+#define    RTL8367C_CFG_MAC8_SEL_HSGMII_MASK    0x800
+#define    RTL8367C_CFG_SGMII_FDUP_OFFSET    10
+#define    RTL8367C_CFG_SGMII_FDUP_MASK    0x400
+#define    RTL8367C_CFG_SGMII_LINK_OFFSET    9
+#define    RTL8367C_CFG_SGMII_LINK_MASK    0x200
+#define    RTL8367C_CFG_SGMII_SPD_OFFSET    7
+#define    RTL8367C_CFG_SGMII_SPD_MASK    0x180
+#define    RTL8367C_CFG_MAC8_SEL_SGMII_OFFSET    6
+#define    RTL8367C_CFG_MAC8_SEL_SGMII_MASK    0x40
+#define    RTL8367C_CFG_INB_SEL_OFFSET    3
+#define    RTL8367C_CFG_INB_SEL_MASK    0x38
+#define    RTL8367C_CFG_SDS_MODE_18C_OFFSET    0
+#define    RTL8367C_CFG_SDS_MODE_18C_MASK    0x7
+
+#define    RTL8367C_REG_FIFO_CTRL    0x1d12
+#define    RTL8367C_CFG_LINK_DOWN_CLR_FIFO_OFFSET    11
+#define    RTL8367C_CFG_LINK_DOWN_CLR_FIFO_MASK    0x800
+#define    RTL8367C_CFG_LPBK_OFFSET    10
+#define    RTL8367C_CFG_LPBK_MASK    0x400
+#define    RTL8367C_CFG_NOT_FF_OUT_OFFSET    9
+#define    RTL8367C_CFG_NOT_FF_OUT_MASK    0x200
+#define    RTL8367C_CFG_WATER_LEVEL_FD_OFFSET    6
+#define    RTL8367C_CFG_WATER_LEVEL_FD_MASK    0x1C0
+#define    RTL8367C_CFG_WATER_LEVEL_Y2X_OFFSET    3
+#define    RTL8367C_CFG_WATER_LEVEL_Y2X_MASK    0x38
+#define    RTL8367C_CFG_WATER_LEVEL_X2Y_OFFSET    0
+#define    RTL8367C_CFG_WATER_LEVEL_X2Y_MASK    0x7
+
+#define    RTL8367C_REG_BCAM_SETTING    0x1d13
+#define    RTL8367C_CFG_BCAM_MDS_OFFSET    3
+#define    RTL8367C_CFG_BCAM_MDS_MASK    0x18
+#define    RTL8367C_CFG_BCAM_RDS_OFFSET    0
+#define    RTL8367C_CFG_BCAM_RDS_MASK    0x7
+
+#define    RTL8367C_REG_GPHY_ACS_MISC    0x1d14
+#define    RTL8367C_CFG_SEL_GPHY_SMI_OFFSET    3
+#define    RTL8367C_CFG_SEL_GPHY_SMI_MASK    0x8
+#define    RTL8367C_CFG_BRD_PHYIDX_OFFSET    0
+#define    RTL8367C_CFG_BRD_PHYIDX_MASK    0x7
+
+#define    RTL8367C_REG_GPHY_OCP_MSB_0    0x1d15
+#define    RTL8367C_CFG_CPU_OCPADR_MSB_OFFSET    6
+#define    RTL8367C_CFG_CPU_OCPADR_MSB_MASK    0xFC0
+#define    RTL8367C_CFG_DW8051_OCPADR_MSB_OFFSET    0
+#define    RTL8367C_CFG_DW8051_OCPADR_MSB_MASK    0x3F
+
+#define    RTL8367C_REG_GPHY_OCP_MSB_1    0x1d16
+#define    RTL8367C_CFG_PATCH_OCPADR_MSB_OFFSET    6
+#define    RTL8367C_CFG_PATCH_OCPADR_MSB_MASK    0xFC0
+#define    RTL8367C_CFG_PHYSTS_OCPADR_MSB_OFFSET    0
+#define    RTL8367C_CFG_PHYSTS_OCPADR_MSB_MASK    0x3F
+
+#define    RTL8367C_REG_GPHY_OCP_MSB_2    0x1d17
+#define    RTL8367C_CFG_RRCP_OCPADR_MSB_OFFSET    6
+#define    RTL8367C_CFG_RRCP_OCPADR_MSB_MASK    0xFC0
+#define    RTL8367C_CFG_RTCT_OCPADR_MSB_OFFSET    0
+#define    RTL8367C_CFG_RTCT_OCPADR_MSB_MASK    0x3F
+
+#define    RTL8367C_REG_GPHY_OCP_MSB_3    0x1d18
+#define    RTL8367C_GPHY_OCP_MSB_3_OFFSET    0
+#define    RTL8367C_GPHY_OCP_MSB_3_MASK    0x3F
+
+#define    RTL8367C_REG_GPIO_67C_I_X0    0x1d19
+
+#define    RTL8367C_REG_GPIO_67C_I_X1    0x1d1a
+
+#define    RTL8367C_REG_GPIO_67C_I_X2    0x1d1b
+
+#define    RTL8367C_REG_GPIO_67C_I_X3    0x1d1c
+#define    RTL8367C_GPIO_67C_I_X3_OFFSET    0
+#define    RTL8367C_GPIO_67C_I_X3_MASK    0x3FFF
+
+#define    RTL8367C_REG_GPIO_67C_O_X0    0x1d1d
+
+#define    RTL8367C_REG_GPIO_67C_O_X1    0x1d1e
+
+#define    RTL8367C_REG_GPIO_67C_O_X2    0x1d1f
+
+#define    RTL8367C_REG_GPIO_67C_O_X3    0x1d20
+#define    RTL8367C_GPIO_67C_O_X3_OFFSET    0
+#define    RTL8367C_GPIO_67C_O_X3_MASK    0x3FFF
+
+#define    RTL8367C_REG_GPIO_67C_OE_X0    0x1d21
+
+#define    RTL8367C_REG_GPIO_67C_OE_X1    0x1d22
+
+#define    RTL8367C_REG_GPIO_67C_OE_X2    0x1d23
+
+#define    RTL8367C_REG_GPIO_67C_OE_X3    0x1d24
+#define    RTL8367C_GPIO_67C_OE_X3_OFFSET    0
+#define    RTL8367C_GPIO_67C_OE_X3_MASK    0x3FFF
+
+#define    RTL8367C_REG_GPIO_MODE_67C_X0    0x1d25
+
+#define    RTL8367C_REG_GPIO_MODE_67C_X1    0x1d26
+
+#define    RTL8367C_REG_GPIO_MODE_67C_X2    0x1d27
+
+#define    RTL8367C_REG_GPIO_MODE_67C_X3    0x1d28
+#define    RTL8367C_GPIO_MODE_67C_X3_OFFSET    0
+#define    RTL8367C_GPIO_MODE_67C_X3_MASK    0x3FFF
+
+#define    RTL8367C_REG_WGPHY_MISC_0    0x1d29
+#define    RTL8367C_CFG_INIPHY_DISGIGA_P7_5_OFFSET    13
+#define    RTL8367C_CFG_INIPHY_DISGIGA_P7_5_MASK    0xE000
+#define    RTL8367C_CFG_INIPHY_PWRUP_OFFSET    5
+#define    RTL8367C_CFG_INIPHY_PWRUP_MASK    0x1FE0
+#define    RTL8367C_CFG_INIPHY_DISGIGA_OFFSET    0
+#define    RTL8367C_CFG_INIPHY_DISGIGA_MASK    0x1F
+
+#define    RTL8367C_REG_WGPHY_MISC_1    0x1d2a
+#define    RTL8367C_WGPHY_MISC_1_OFFSET    0
+#define    RTL8367C_WGPHY_MISC_1_MASK    0xFF
+
+#define    RTL8367C_REG_WGPHY_MISC_2    0x1d2b
+#define    RTL8367C_WGPHY_MISC_2_OFFSET    0
+#define    RTL8367C_WGPHY_MISC_2_MASK    0x3FF
+
+#define    RTL8367C_REG_CFG_AFBK_GPHY_0    0x1d2c
+#define    RTL8367C_CFG_AFBK_GPHY_0_OFFSET    0
+#define    RTL8367C_CFG_AFBK_GPHY_0_MASK    0x1F
+
+#define    RTL8367C_REG_CFG_AFBK_GPHY_1    0x1d2d
+#define    RTL8367C_CFG_AFBK_GPHY_1_OFFSET    0
+#define    RTL8367C_CFG_AFBK_GPHY_1_MASK    0xFFF
+
+#define    RTL8367C_REG_EF_SLV_CTRL_0    0x1d2e
+#define    RTL8367C_EF_SLV_BUSY_OFFSET    11
+#define    RTL8367C_EF_SLV_BUSY_MASK    0x800
+#define    RTL8367C_EF_SLV_ACK_OFFSET    10
+#define    RTL8367C_EF_SLV_ACK_MASK    0x400
+#define    RTL8367C_EF_SLV_A_OFFSET    2
+#define    RTL8367C_EF_SLV_A_MASK    0x3FC
+#define    RTL8367C_EF_SLV_WE_OFFSET    1
+#define    RTL8367C_EF_SLV_WE_MASK    0x2
+#define    RTL8367C_EF_SLV_CE_OFFSET    0
+#define    RTL8367C_EF_SLV_CE_MASK    0x1
+
+#define    RTL8367C_REG_EF_SLV_CTRL_1    0x1d2f
+
+#define    RTL8367C_REG_EF_SLV_CTRL_2    0x1d30
+
+#define    RTL8367C_REG_EFUSE_MISC_1    0x1d31
+#define    RTL8367C_EF_EN_EFUSE_OFFSET    10
+#define    RTL8367C_EF_EN_EFUSE_MASK    0x400
+#define    RTL8367C_EF_MODEL_ID_OFFSET    6
+#define    RTL8367C_EF_MODEL_ID_MASK    0x3C0
+#define    RTL8367C_EF_RSVD_OFFSET    2
+#define    RTL8367C_EF_RSVD_MASK    0x3C
+#define    RTL8367C_EF_SYS_CLK_OFFSET    0
+#define    RTL8367C_EF_SYS_CLK_MASK    0x3
+
+#define    RTL8367C_REG_IO_MISC_FUNC    0x1d32
+#define    RTL8367C_TST_MODE_OFFSET    3
+#define    RTL8367C_TST_MODE_MASK    0x8
+#define    RTL8367C_UART_EN_OFFSET    2
+#define    RTL8367C_UART_EN_MASK    0x4
+#define    RTL8367C_INT_EN_OFFSET    1
+#define    RTL8367C_INT_EN_MASK    0x2
+#define    RTL8367C_BUZ_EN_OFFSET    0
+#define    RTL8367C_BUZ_EN_MASK    0x1
+
+#define    RTL8367C_REG_HTRAM_DVS    0x1d33
+#define    RTL8367C_HTRAM_DVS_OFFSET    0
+#define    RTL8367C_HTRAM_DVS_MASK    0x1
+
+#define    RTL8367C_REG_EF_SLV_CTRL_3    0x1d34
+#define    RTL8367C_EF_SLV_CTRL_3_OFFSET    0
+#define    RTL8367C_EF_SLV_CTRL_3_MASK    0x1
+
+#define    RTL8367C_REG_INBAND_EN14C    0x1d35
+#define    RTL8367C_INBAND_EN14C_OFFSET    0
+#define    RTL8367C_INBAND_EN14C_MASK    0x1
+
+#define    RTL8367C_REG_CFG_SWR_L    0x1d36
+#define    RTL8367C_ANARG_RDY_SWR_L_OFFSET    14
+#define    RTL8367C_ANARG_RDY_SWR_L_MASK    0x4000
+#define    RTL8367C_ANARG_VALID_SWR_L_OFFSET    13
+#define    RTL8367C_ANARG_VALID_SWR_L_MASK    0x2000
+#define    RTL8367C_SAW_SWR_L_OFFSET    9
+#define    RTL8367C_SAW_SWR_L_MASK    0x1E00
+#define    RTL8367C_SAW_VALID_SWR_L_OFFSET    8
+#define    RTL8367C_SAW_VALID_SWR_L_MASK    0x100
+#define    RTL8367C_UPS_DBGO_L_OFFSET    0
+#define    RTL8367C_UPS_DBGO_L_MASK    0xFF
+
+#define    RTL8367C_REG_BTCAM_CTRL    0x1d37
+#define    RTL8367C_TCAM_RDS_OFFSET    2
+#define    RTL8367C_TCAM_RDS_MASK    0x1C
+#define    RTL8367C_TCAM_MDS_OFFSET    0
+#define    RTL8367C_TCAM_MDS_MASK    0x3
+
+#define    RTL8367C_REG_PBRAM_BISR_CTRL    0x1d38
+#define    RTL8367C_HAS_HLDRMP_MD_OFFSET    9
+#define    RTL8367C_HAS_HLDRMP_MD_MASK    0x200
+#define    RTL8367C_PB_HLDRMP_MD_OFFSET    8
+#define    RTL8367C_PB_HLDRMP_MD_MASK    0x100
+#define    RTL8367C_HAS_BISR_BIRSTN_OFFSET    7
+#define    RTL8367C_HAS_BISR_BIRSTN_MASK    0x80
+#define    RTL8367C_SEC_RUN_HSA_OFFSET    6
+#define    RTL8367C_SEC_RUN_HSA_MASK    0x40
+#define    RTL8367C_HAS_HLDRMP_VAL_OFFSET    5
+#define    RTL8367C_HAS_HLDRMP_VAL_MASK    0x20
+#define    RTL8367C_HAS_BISR_PWRSTN_OFFSET    4
+#define    RTL8367C_HAS_BISR_PWRSTN_MASK    0x10
+#define    RTL8367C_SEC_RUN_PB_OFFSET    3
+#define    RTL8367C_SEC_RUN_PB_MASK    0x8
+#define    RTL8367C_PB_HLDRMP_VAL_OFFSET    2
+#define    RTL8367C_PB_HLDRMP_VAL_MASK    0x4
+#define    RTL8367C_PB_BISR_BIRSTN_OFFSET    1
+#define    RTL8367C_PB_BISR_BIRSTN_MASK    0x2
+#define    RTL8367C_PB_BISR_PWRSTN_OFFSET    0
+#define    RTL8367C_PB_BISR_PWRSTN_MASK    0x1
+
+#define    RTL8367C_REG_CVLANRAM_BISR_CTRL    0x1d39
+#define    RTL8367C_SEC_RUN_CVLAN_OFFSET    4
+#define    RTL8367C_SEC_RUN_CVLAN_MASK    0x10
+#define    RTL8367C_CVALN_HLDRMP_MD_OFFSET    3
+#define    RTL8367C_CVALN_HLDRMP_MD_MASK    0x8
+#define    RTL8367C_CVALN_HLDRMP_VAL_OFFSET    2
+#define    RTL8367C_CVALN_HLDRMP_VAL_MASK    0x4
+#define    RTL8367C_CVLAN_BISR_BIRSTN_OFFSET    1
+#define    RTL8367C_CVLAN_BISR_BIRSTN_MASK    0x2
+#define    RTL8367C_CVLAN_BISR_PWRSTN_OFFSET    0
+#define    RTL8367C_CVLAN_BISR_PWRSTN_MASK    0x1
+
+#define    RTL8367C_REG_CFG_1588_TIMER_EN_GPI    0x1d3a
+#define    RTL8367C_CFG_1588_TIMER_EN_GPI_OFFSET    0
+#define    RTL8367C_CFG_1588_TIMER_EN_GPI_MASK    0x1
+
+#define    RTL8367C_REG_MDIO_PRMB_SUPP    0x1d3b
+#define    RTL8367C_FIB_HIPRI_OFFSET    14
+#define    RTL8367C_FIB_HIPRI_MASK    0x4000
+#define    RTL8367C_SMT_EN_OFFSET    13
+#define    RTL8367C_SMT_EN_MASK    0x2000
+#define    RTL8367C_P4_FB_CPL_OFFSET    12
+#define    RTL8367C_P4_FB_CPL_MASK    0x1000
+#define    RTL8367C_P3_FB_CPL_OFFSET    11
+#define    RTL8367C_P3_FB_CPL_MASK    0x800
+#define    RTL8367C_P2_FB_CPL_OFFSET    10
+#define    RTL8367C_P2_FB_CPL_MASK    0x400
+#define    RTL8367C_P1_FB_CPL_OFFSET    9
+#define    RTL8367C_P1_FB_CPL_MASK    0x200
+#define    RTL8367C_P0_FB_CPL_OFFSET    8
+#define    RTL8367C_P0_FB_CPL_MASK    0x100
+#define    RTL8367C_DBG_PKG_8367N_OFFSET    7
+#define    RTL8367C_DBG_PKG_8367N_MASK    0x80
+#define    RTL8367C_DBG_PKG_8367VB_OFFSET    6
+#define    RTL8367C_DBG_PKG_8367VB_MASK    0x40
+#define    RTL8367C_CFG_DEBUG_EN_OFFSET    5
+#define    RTL8367C_CFG_DEBUG_EN_MASK    0x20
+#define    RTL8367C_CFG_TMR_ACK_OFFSET    1
+#define    RTL8367C_CFG_TMR_ACK_MASK    0x1E
+#define    RTL8367C_CFG_PRMB_SUPP_OFFSET    0
+#define    RTL8367C_CFG_PRMB_SUPP_MASK    0x1
+
+#define    RTL8367C_REG_BOND4READ    0x1d3c
+#define    RTL8367C_BOND_BOID0_OFFSET    8
+#define    RTL8367C_BOND_BOID0_MASK    0x100
+#define    RTL8367C_BOND_SYSCLK_OFFSET    7
+#define    RTL8367C_BOND_SYSCLK_MASK    0x80
+#define    RTL8367C_BOND_PHYMODE_OFFSET    6
+#define    RTL8367C_BOND_PHYMODE_MASK    0x40
+#define    RTL8367C_BOND_DIS_PON_BIST_OFFSET    5
+#define    RTL8367C_BOND_DIS_PON_BIST_MASK    0x20
+#define    RTL8367C_BOND_DIS_TABLE_INIT_OFFSET    4
+#define    RTL8367C_BOND_DIS_TABLE_INIT_MASK    0x10
+#define    RTL8367C_BOND_BYP_AFE_PLL_OFFSET    3
+#define    RTL8367C_BOND_BYP_AFE_PLL_MASK    0x8
+#define    RTL8367C_BOND_BYP_AFE_POR_OFFSET    2
+#define    RTL8367C_BOND_BYP_AFE_POR_MASK    0x4
+#define    RTL8367C_BOND_BISR_COND_OFFSET    1
+#define    RTL8367C_BOND_BISR_COND_MASK    0x2
+#define    RTL8367C_BOND_EF_EN_OFFSET    0
+#define    RTL8367C_BOND_EF_EN_MASK    0x1
+
+#define    RTL8367C_REG_REG_TO_ECO0    0x1d3d
+
+#define    RTL8367C_REG_REG_TO_ECO1    0x1d3e
+
+#define    RTL8367C_REG_REG_TO_ECO2    0x1d3f
+
+#define    RTL8367C_REG_REG_TO_ECO3    0x1d40
+
+#define    RTL8367C_REG_REG_TO_ECO4    0x1d41
+
+#define    RTL8367C_REG_PHYSTS_CTRL0    0x1d42
+#define    RTL8367C_MACRX_DUPDET_EN_OFFSET    5
+#define    RTL8367C_MACRX_DUPDET_EN_MASK    0x20
+#define    RTL8367C_LNKUP_DLY_EN_OFFSET    4
+#define    RTL8367C_LNKUP_DLY_EN_MASK    0x10
+#define    RTL8367C_GE_100M_LNKUP_DLY_OFFSET    2
+#define    RTL8367C_GE_100M_LNKUP_DLY_MASK    0xC
+#define    RTL8367C_PHYSTS_10M_LNKUP_DLY_OFFSET    0
+#define    RTL8367C_PHYSTS_10M_LNKUP_DLY_MASK    0x3
+
+#define    RTL8367C_REG_SSC_CTRL0_0    0x1d44
+#define    RTL8367C_SSC_CTRL0_0_SSC_TYPE_OFFSET    13
+#define    RTL8367C_SSC_CTRL0_0_SSC_TYPE_MASK    0x2000
+#define    RTL8367C_SSC_CTRL0_0_PHASE_LIM_SEL_OFFSET    5
+#define    RTL8367C_SSC_CTRL0_0_PHASE_LIM_SEL_MASK    0x1FE0
+#define    RTL8367C_SSC_CTRL0_0_PHASE_LIM_EN_OFFSET    4
+#define    RTL8367C_SSC_CTRL0_0_PHASE_LIM_EN_MASK    0x10
+#define    RTL8367C_SSC_CTRL0_0_DLL_MODE_OFFSET    2
+#define    RTL8367C_SSC_CTRL0_0_DLL_MODE_MASK    0xC
+#define    RTL8367C_SSC_CTRL0_0_SSC_EN_OFFSET    1
+#define    RTL8367C_SSC_CTRL0_0_SSC_EN_MASK    0x2
+#define    RTL8367C_SSC_CTRL0_0_SSC_MODE_OFFSET    0
+#define    RTL8367C_SSC_CTRL0_0_SSC_MODE_MASK    0x1
+
+#define    RTL8367C_REG_SSC_RDM_SEED    0x1d45
+
+#define    RTL8367C_REG_SSC_PN_POLY_SEL    0x1d46
+
+#define    RTL8367C_REG_SSC_CTRL0_3    0x1d47
+#define    RTL8367C_SSC_CTRL0_3_PHSFT_CNT_OFFSET    8
+#define    RTL8367C_SSC_CTRL0_3_PHSFT_CNT_MASK    0x7F00
+#define    RTL8367C_SSC_CTRL0_3_PHSFT_A_OFFSET    7
+#define    RTL8367C_SSC_CTRL0_3_PHSFT_A_MASK    0x80
+#define    RTL8367C_SSC_CTRL0_3_PHSFT_B_OFFSET    6
+#define    RTL8367C_SSC_CTRL0_3_PHSFT_B_MASK    0x40
+#define    RTL8367C_SSC_CTRL0_3_PHSFT_UPDN_OFFSET    5
+#define    RTL8367C_SSC_CTRL0_3_PHSFT_UPDN_MASK    0x20
+#define    RTL8367C_SSC_CTRL0_3_PHSFT_PRD_OFFSET    4
+#define    RTL8367C_SSC_CTRL0_3_PHSFT_PRD_MASK    0x10
+#define    RTL8367C_SSC_CTRL0_3_PN_POLY_DEG_OFFSET    0
+#define    RTL8367C_SSC_CTRL0_3_PN_POLY_DEG_MASK    0xF
+
+#define    RTL8367C_REG_SSC_CTRL0_4    0x1d48
+#define    RTL8367C_SSC_CTRL0_4_SSC_UP1DN0_OFFSET    15
+#define    RTL8367C_SSC_CTRL0_4_SSC_UP1DN0_MASK    0x8000
+#define    RTL8367C_SSC_CTRL0_4_SSC_PERIOD_OFFSET    8
+#define    RTL8367C_SSC_CTRL0_4_SSC_PERIOD_MASK    0x7F00
+#define    RTL8367C_SSC_CTRL0_4_SSC_OFFSET_OFFSET    0
+#define    RTL8367C_SSC_CTRL0_4_SSC_OFFSET_MASK    0xFF
+
+#define    RTL8367C_REG_SSC_CTRL0_5    0x1d49
+#define    RTL8367C_SSC_CTRL0_5_PH_OFS_TOG_OFFSET    15
+#define    RTL8367C_SSC_CTRL0_5_PH_OFS_TOG_MASK    0x8000
+#define    RTL8367C_SSC_CTRL0_5_PH_OFS_OFFSET    10
+#define    RTL8367C_SSC_CTRL0_5_PH_OFS_MASK    0x7C00
+#define    RTL8367C_SSC_CTRL0_5_SSC_STEP_OFFSET    4
+#define    RTL8367C_SSC_CTRL0_5_SSC_STEP_MASK    0x3F0
+#define    RTL8367C_SSC_CTRL0_5_SSC_TEST_MODE_OFFSET    2
+#define    RTL8367C_SSC_CTRL0_5_SSC_TEST_MODE_MASK    0xC
+#define    RTL8367C_SSC_CTRL0_5_SSC_PH_CFG_OFFSET    0
+#define    RTL8367C_SSC_CTRL0_5_SSC_PH_CFG_MASK    0x3
+
+#define    RTL8367C_REG_SSC_STS0    0x1d4a
+#define    RTL8367C_SSC_STS0_OFS_BUSY_OFFSET    13
+#define    RTL8367C_SSC_STS0_OFS_BUSY_MASK    0x2000
+#define    RTL8367C_SSC_STS0_OFS_TOTAL_R_OFFSET    8
+#define    RTL8367C_SSC_STS0_OFS_TOTAL_R_MASK    0x1F00
+#define    RTL8367C_SSC_STS0_CNT_GRY0_OFFSET    4
+#define    RTL8367C_SSC_STS0_CNT_GRY0_MASK    0xF0
+#define    RTL8367C_SSC_STS0_OFS_GRY0_OFFSET    0
+#define    RTL8367C_SSC_STS0_OFS_GRY0_MASK    0xF
+
+#define    RTL8367C_REG_SSC_CTRL1_0    0x1d4b
+#define    RTL8367C_SSC_CTRL1_0_SSC_TYPE_OFFSET    13
+#define    RTL8367C_SSC_CTRL1_0_SSC_TYPE_MASK    0x2000
+#define    RTL8367C_SSC_CTRL1_0_PHASE_LIM_SEL_OFFSET    5
+#define    RTL8367C_SSC_CTRL1_0_PHASE_LIM_SEL_MASK    0x1FE0
+#define    RTL8367C_SSC_CTRL1_0_PHASE_LIM_EN_OFFSET    4
+#define    RTL8367C_SSC_CTRL1_0_PHASE_LIM_EN_MASK    0x10
+#define    RTL8367C_SSC_CTRL1_0_DLL_MODE_OFFSET    2
+#define    RTL8367C_SSC_CTRL1_0_DLL_MODE_MASK    0xC
+#define    RTL8367C_SSC_CTRL1_0_SSC_EN_OFFSET    1
+#define    RTL8367C_SSC_CTRL1_0_SSC_EN_MASK    0x2
+#define    RTL8367C_SSC_CTRL1_0_SSC_MODE_OFFSET    0
+#define    RTL8367C_SSC_CTRL1_0_SSC_MODE_MASK    0x1
+
+#define    RTL8367C_REG_SSC_RDM_SEED1    0x1d4c
+
+#define    RTL8367C_REG_SSC_PN_POLY_SEL1    0x1d4d
+
+#define    RTL8367C_REG_SSC_CTRL1_3    0x1d4e
+#define    RTL8367C_SSC_CTRL1_3_PHSFT_CNT_OFFSET    8
+#define    RTL8367C_SSC_CTRL1_3_PHSFT_CNT_MASK    0x7F00
+#define    RTL8367C_SSC_CTRL1_3_PHSFT_A_OFFSET    7
+#define    RTL8367C_SSC_CTRL1_3_PHSFT_A_MASK    0x80
+#define    RTL8367C_SSC_CTRL1_3_PHSFT_B_OFFSET    6
+#define    RTL8367C_SSC_CTRL1_3_PHSFT_B_MASK    0x40
+#define    RTL8367C_SSC_CTRL1_3_PHSFT_UPDN_OFFSET    5
+#define    RTL8367C_SSC_CTRL1_3_PHSFT_UPDN_MASK    0x20
+#define    RTL8367C_SSC_CTRL1_3_PHSFT_PRD_OFFSET    4
+#define    RTL8367C_SSC_CTRL1_3_PHSFT_PRD_MASK    0x10
+#define    RTL8367C_SSC_CTRL1_3_PN_POLY_DEG_OFFSET    0
+#define    RTL8367C_SSC_CTRL1_3_PN_POLY_DEG_MASK    0xF
+
+#define    RTL8367C_REG_SSC_CTRL1_4    0x1d4f
+#define    RTL8367C_SSC_CTRL1_4_SSC_UP1DN0_OFFSET    15
+#define    RTL8367C_SSC_CTRL1_4_SSC_UP1DN0_MASK    0x8000
+#define    RTL8367C_SSC_CTRL1_4_SSC_PERIOD_OFFSET    8
+#define    RTL8367C_SSC_CTRL1_4_SSC_PERIOD_MASK    0x7F00
+#define    RTL8367C_SSC_CTRL1_4_SSC_OFFSET_OFFSET    0
+#define    RTL8367C_SSC_CTRL1_4_SSC_OFFSET_MASK    0xFF
+
+#define    RTL8367C_REG_SSC_CTRL1_5    0x1d50
+#define    RTL8367C_SSC_CTRL1_5_PH_OFS_TOG_OFFSET    15
+#define    RTL8367C_SSC_CTRL1_5_PH_OFS_TOG_MASK    0x8000
+#define    RTL8367C_SSC_CTRL1_5_PH_OFS_OFFSET    10
+#define    RTL8367C_SSC_CTRL1_5_PH_OFS_MASK    0x7C00
+#define    RTL8367C_SSC_CTRL1_5_SSC_STEP_OFFSET    4
+#define    RTL8367C_SSC_CTRL1_5_SSC_STEP_MASK    0x3F0
+#define    RTL8367C_SSC_CTRL1_5_SSC_TEST_MODE_OFFSET    2
+#define    RTL8367C_SSC_CTRL1_5_SSC_TEST_MODE_MASK    0xC
+#define    RTL8367C_SSC_CTRL1_5_SSC_PH_CFG_OFFSET    0
+#define    RTL8367C_SSC_CTRL1_5_SSC_PH_CFG_MASK    0x3
+
+#define    RTL8367C_REG_SSC_STS1    0x1d51
+#define    RTL8367C_SSC_STS1_OFS_BUSY_OFFSET    13
+#define    RTL8367C_SSC_STS1_OFS_BUSY_MASK    0x2000
+#define    RTL8367C_SSC_STS1_OFS_TOTAL_R_OFFSET    8
+#define    RTL8367C_SSC_STS1_OFS_TOTAL_R_MASK    0x1F00
+#define    RTL8367C_SSC_STS1_CNT_GRY0_OFFSET    4
+#define    RTL8367C_SSC_STS1_CNT_GRY0_MASK    0xF0
+#define    RTL8367C_SSC_STS1_OFS_GRY0_OFFSET    0
+#define    RTL8367C_SSC_STS1_OFS_GRY0_MASK    0xF
+
+#define    RTL8367C_REG_SSC_CTRL2_0    0x1d52
+#define    RTL8367C_SSC_CTRL2_0_SSC_TYPE_OFFSET    13
+#define    RTL8367C_SSC_CTRL2_0_SSC_TYPE_MASK    0x2000
+#define    RTL8367C_SSC_CTRL2_0_PHASE_LIM_SEL_OFFSET    5
+#define    RTL8367C_SSC_CTRL2_0_PHASE_LIM_SEL_MASK    0x1FE0
+#define    RTL8367C_SSC_CTRL2_0_PHASE_LIM_EN_OFFSET    4
+#define    RTL8367C_SSC_CTRL2_0_PHASE_LIM_EN_MASK    0x10
+#define    RTL8367C_SSC_CTRL2_0_DLL_MODE_OFFSET    2
+#define    RTL8367C_SSC_CTRL2_0_DLL_MODE_MASK    0xC
+#define    RTL8367C_SSC_CTRL2_0_SSC_EN_OFFSET    1
+#define    RTL8367C_SSC_CTRL2_0_SSC_EN_MASK    0x2
+#define    RTL8367C_SSC_CTRL2_0_SSC_MODE_OFFSET    0
+#define    RTL8367C_SSC_CTRL2_0_SSC_MODE_MASK    0x1
+
+#define    RTL8367C_REG_SSC_RDM_SEED2    0x1d53
+
+#define    RTL8367C_REG_SSC_PN_POLY_SEL2    0x1d54
+
+#define    RTL8367C_REG_SSC_CTRL2_3    0x1d55
+#define    RTL8367C_SSC_CTRL2_3_PHSFT_CNT_OFFSET    8
+#define    RTL8367C_SSC_CTRL2_3_PHSFT_CNT_MASK    0x7F00
+#define    RTL8367C_SSC_CTRL2_3_PHSFT_A_OFFSET    7
+#define    RTL8367C_SSC_CTRL2_3_PHSFT_A_MASK    0x80
+#define    RTL8367C_SSC_CTRL2_3_PHSFT_B_OFFSET    6
+#define    RTL8367C_SSC_CTRL2_3_PHSFT_B_MASK    0x40
+#define    RTL8367C_SSC_CTRL2_3_PHSFT_UPDN_OFFSET    5
+#define    RTL8367C_SSC_CTRL2_3_PHSFT_UPDN_MASK    0x20
+#define    RTL8367C_SSC_CTRL2_3_PHSFT_PRD_OFFSET    4
+#define    RTL8367C_SSC_CTRL2_3_PHSFT_PRD_MASK    0x10
+#define    RTL8367C_SSC_CTRL2_3_PN_POLY_DEG_OFFSET    0
+#define    RTL8367C_SSC_CTRL2_3_PN_POLY_DEG_MASK    0xF
+
+#define    RTL8367C_REG_SSC_CTRL2_4    0x1d56
+#define    RTL8367C_SSC_CTRL2_4_SSC_UP1DN0_OFFSET    15
+#define    RTL8367C_SSC_CTRL2_4_SSC_UP1DN0_MASK    0x8000
+#define    RTL8367C_SSC_CTRL2_4_SSC_PERIOD_OFFSET    8
+#define    RTL8367C_SSC_CTRL2_4_SSC_PERIOD_MASK    0x7F00
+#define    RTL8367C_SSC_CTRL2_4_SSC_OFFSET_OFFSET    0
+#define    RTL8367C_SSC_CTRL2_4_SSC_OFFSET_MASK    0xFF
+
+#define    RTL8367C_REG_SSC_CTRL2_5    0x1d57
+#define    RTL8367C_SSC_CTRL2_5_PH_OFS_TOG_OFFSET    15
+#define    RTL8367C_SSC_CTRL2_5_PH_OFS_TOG_MASK    0x8000
+#define    RTL8367C_SSC_CTRL2_5_PH_OFS_OFFSET    10
+#define    RTL8367C_SSC_CTRL2_5_PH_OFS_MASK    0x7C00
+#define    RTL8367C_SSC_CTRL2_5_SSC_STEP_OFFSET    4
+#define    RTL8367C_SSC_CTRL2_5_SSC_STEP_MASK    0x3F0
+#define    RTL8367C_SSC_CTRL2_5_SSC_TEST_MODE_OFFSET    2
+#define    RTL8367C_SSC_CTRL2_5_SSC_TEST_MODE_MASK    0xC
+#define    RTL8367C_SSC_CTRL2_5_SSC_PH_CFG_OFFSET    0
+#define    RTL8367C_SSC_CTRL2_5_SSC_PH_CFG_MASK    0x3
+
+#define    RTL8367C_REG_SSC_STS2    0x1d58
+#define    RTL8367C_SSC_STS2_OFS_BUSY_OFFSET    13
+#define    RTL8367C_SSC_STS2_OFS_BUSY_MASK    0x2000
+#define    RTL8367C_SSC_STS2_OFS_TOTAL_R_OFFSET    8
+#define    RTL8367C_SSC_STS2_OFS_TOTAL_R_MASK    0x1F00
+#define    RTL8367C_SSC_STS2_CNT_GRY0_OFFSET    4
+#define    RTL8367C_SSC_STS2_CNT_GRY0_MASK    0xF0
+#define    RTL8367C_SSC_STS2_OFS_GRY0_OFFSET    0
+#define    RTL8367C_SSC_STS2_OFS_GRY0_MASK    0xF
+
+#define    RTL8367C_REG_SSC_CTRL3_0    0x1d59
+#define    RTL8367C_SSC_CTRL3_0_SSC_TYPE_OFFSET    13
+#define    RTL8367C_SSC_CTRL3_0_SSC_TYPE_MASK    0x2000
+#define    RTL8367C_SSC_CTRL3_0_PHASE_LIM_SEL_OFFSET    5
+#define    RTL8367C_SSC_CTRL3_0_PHASE_LIM_SEL_MASK    0x1FE0
+#define    RTL8367C_SSC_CTRL3_0_PHASE_LIM_EN_OFFSET    4
+#define    RTL8367C_SSC_CTRL3_0_PHASE_LIM_EN_MASK    0x10
+#define    RTL8367C_SSC_CTRL3_0_DLL_MODE_OFFSET    2
+#define    RTL8367C_SSC_CTRL3_0_DLL_MODE_MASK    0xC
+#define    RTL8367C_SSC_CTRL3_0_SSC_EN_OFFSET    1
+#define    RTL8367C_SSC_CTRL3_0_SSC_EN_MASK    0x2
+#define    RTL8367C_SSC_CTRL3_0_SSC_MODE_OFFSET    0
+#define    RTL8367C_SSC_CTRL3_0_SSC_MODE_MASK    0x1
+
+#define    RTL8367C_REG_SSC_RDM_SEED3    0x1d5a
+
+#define    RTL8367C_REG_SSC_PN_POLY_SEL3    0x1d5b
+
+#define    RTL8367C_REG_SSC_CTRL3_3    0x1d5c
+#define    RTL8367C_SSC_CTRL3_3_PHSFT_CNT_OFFSET    8
+#define    RTL8367C_SSC_CTRL3_3_PHSFT_CNT_MASK    0x7F00
+#define    RTL8367C_SSC_CTRL3_3_PHSFT_A_OFFSET    7
+#define    RTL8367C_SSC_CTRL3_3_PHSFT_A_MASK    0x80
+#define    RTL8367C_SSC_CTRL3_3_PHSFT_B_OFFSET    6
+#define    RTL8367C_SSC_CTRL3_3_PHSFT_B_MASK    0x40
+#define    RTL8367C_SSC_CTRL3_3_PHSFT_UPDN_OFFSET    5
+#define    RTL8367C_SSC_CTRL3_3_PHSFT_UPDN_MASK    0x20
+#define    RTL8367C_SSC_CTRL3_3_PHSFT_PRD_OFFSET    4
+#define    RTL8367C_SSC_CTRL3_3_PHSFT_PRD_MASK    0x10
+#define    RTL8367C_SSC_CTRL3_3_PN_POLY_DEG_OFFSET    0
+#define    RTL8367C_SSC_CTRL3_3_PN_POLY_DEG_MASK    0xF
+
+#define    RTL8367C_REG_SSC_CTRL3_4    0x1d5d
+#define    RTL8367C_SSC_CTRL3_4_SSC_UP1DN0_OFFSET    15
+#define    RTL8367C_SSC_CTRL3_4_SSC_UP1DN0_MASK    0x8000
+#define    RTL8367C_SSC_CTRL3_4_SSC_PERIOD_OFFSET    8
+#define    RTL8367C_SSC_CTRL3_4_SSC_PERIOD_MASK    0x7F00
+#define    RTL8367C_SSC_CTRL3_4_SSC_OFFSET_OFFSET    0
+#define    RTL8367C_SSC_CTRL3_4_SSC_OFFSET_MASK    0xFF
+
+#define    RTL8367C_REG_SSC_CTRL3_5    0x1d5e
+#define    RTL8367C_SSC_CTRL3_5_PH_OFS_TOG_OFFSET    15
+#define    RTL8367C_SSC_CTRL3_5_PH_OFS_TOG_MASK    0x8000
+#define    RTL8367C_SSC_CTRL3_5_PH_OFS_OFFSET    10
+#define    RTL8367C_SSC_CTRL3_5_PH_OFS_MASK    0x7C00
+#define    RTL8367C_SSC_CTRL3_5_SSC_STEP_OFFSET    4
+#define    RTL8367C_SSC_CTRL3_5_SSC_STEP_MASK    0x3F0
+#define    RTL8367C_SSC_CTRL3_5_SSC_TEST_MODE_OFFSET    2
+#define    RTL8367C_SSC_CTRL3_5_SSC_TEST_MODE_MASK    0xC
+#define    RTL8367C_SSC_CTRL3_5_SSC_PH_CFG_OFFSET    0
+#define    RTL8367C_SSC_CTRL3_5_SSC_PH_CFG_MASK    0x3
+
+#define    RTL8367C_REG_SSC_STS3    0x1d5f
+#define    RTL8367C_SSC_STS3_OFS_BUSY_OFFSET    13
+#define    RTL8367C_SSC_STS3_OFS_BUSY_MASK    0x2000
+#define    RTL8367C_SSC_STS3_OFS_TOTAL_R_OFFSET    8
+#define    RTL8367C_SSC_STS3_OFS_TOTAL_R_MASK    0x1F00
+#define    RTL8367C_SSC_STS3_CNT_GRY0_OFFSET    4
+#define    RTL8367C_SSC_STS3_CNT_GRY0_MASK    0xF0
+#define    RTL8367C_SSC_STS3_OFS_GRY0_OFFSET    0
+#define    RTL8367C_SSC_STS3_OFS_GRY0_MASK    0xF
+
+#define    RTL8367C_REG_PHY_POLL_CFG13    0x1d60
+
+#define    RTL8367C_REG_PHY_POLL_CFG14    0x1d61
+
+#define    RTL8367C_REG_FRC_SYS_CLK    0x1d62
+#define    RTL8367C_SYSCLK_FRC_MD_OFFSET    1
+#define    RTL8367C_SYSCLK_FRC_MD_MASK    0x2
+#define    RTL8367C_SYSCLK_FRC_VAL_OFFSET    0
+#define    RTL8367C_SYSCLK_FRC_VAL_MASK    0x1
+
+#define    RTL8367C_REG_AFE_SSC_CTRL    0x1d63
+#define    RTL8367C_PH_RSTB_TXD1_OFFSET    9
+#define    RTL8367C_PH_RSTB_TXD1_MASK    0x200
+#define    RTL8367C_PH_RSTB_TXC1_OFFSET    8
+#define    RTL8367C_PH_RSTB_TXC1_MASK    0x100
+#define    RTL8367C_PH_RSTB_TXD0_OFFSET    7
+#define    RTL8367C_PH_RSTB_TXD0_MASK    0x80
+#define    RTL8367C_PH_RSTB_TXC0_OFFSET    6
+#define    RTL8367C_PH_RSTB_TXC0_MASK    0x40
+#define    RTL8367C_PH_RSTBSYS_OFFSET    5
+#define    RTL8367C_PH_RSTBSYS_MASK    0x20
+#define    RTL8367C_PH_RSTB8051_OFFSET    4
+#define    RTL8367C_PH_RSTB8051_MASK    0x10
+#define    RTL8367C_OREG_SSC_OFFSET    0
+#define    RTL8367C_OREG_SSC_MASK    0xF
+
+#define    RTL8367C_REG_BUFF_RST_CTRL0    0x1d64
+#define    RTL8367C_BUFFRST_TXESD_EN_OFFSET    13
+#define    RTL8367C_BUFFRST_TXESD_EN_MASK    0x2000
+#define    RTL8367C_BUFF_RST_TIME_LONG_OFFSET    8
+#define    RTL8367C_BUFF_RST_TIME_LONG_MASK    0x1F00
+#define    RTL8367C_BUFF_RST_TIME_SHORT_OFFSET    3
+#define    RTL8367C_BUFF_RST_TIME_SHORT_MASK    0xF8
+#define    RTL8367C_SW_BUFF_RST_OFFSET    2
+#define    RTL8367C_SW_BUFF_RST_MASK    0x4
+#define    RTL8367C_IMS_BUFF_RST_OFFSET    1
+#define    RTL8367C_IMS_BUFF_RST_MASK    0x2
+#define    RTL8367C_IMR_BUFF_RST_OFFSET    0
+#define    RTL8367C_IMR_BUFF_RST_MASK    0x1
+
+#define    RTL8367C_REG_BUFF_RST_CTRL1    0x1d65
+#define    RTL8367C_BUFFRST_SYSOVER_EN_OFFSET    10
+#define    RTL8367C_BUFFRST_SYSOVER_EN_MASK    0x400
+#define    RTL8367C_BUFFRST_SYSOVER_THR_OFFSET    0
+#define    RTL8367C_BUFFRST_SYSOVER_THR_MASK    0x3FF
+
+#define    RTL8367C_REG_BUFF_RST_CTRL2    0x1d66
+#define    RTL8367C_BUFFRST_QOVER_EN_OFFSET    10
+#define    RTL8367C_BUFFRST_QOVER_EN_MASK    0x400
+#define    RTL8367C_BUFFRST_QOVER_THR_OFFSET    0
+#define    RTL8367C_BUFFRST_QOVER_THR_MASK    0x3FF
+
+#define    RTL8367C_REG_BUFF_RST_CTRL3    0x1d67
+#define    RTL8367C_DSC_TIMER_OFFSET    11
+#define    RTL8367C_DSC_TIMER_MASK    0x7800
+#define    RTL8367C_BUFFRST_DSCOVER_THR_OFFSET    1
+#define    RTL8367C_BUFFRST_DSCOVER_THR_MASK    0x7FE
+#define    RTL8367C_BUFFRST_DSCOVER_EN_OFFSET    0
+#define    RTL8367C_BUFFRST_DSCOVER_EN_MASK    0x1
+
+#define    RTL8367C_REG_BUFF_RST_CTRL4    0x1d68
+#define    RTL8367C_INDSC_TIMER_OFFSET    11
+#define    RTL8367C_INDSC_TIMER_MASK    0x7800
+#define    RTL8367C_BUFFRST_INDSCOVER_THR_OFFSET    1
+#define    RTL8367C_BUFFRST_INDSCOVER_THR_MASK    0x7FE
+#define    RTL8367C_BUFFRST_INDSCOVER_EN_OFFSET    0
+#define    RTL8367C_BUFFRST_INDSCOVER_EN_MASK    0x1
+
+#define    RTL8367C_REG_BUFF_RST_CTRL5    0x1d69
+#define    RTL8367C_TX_ESD_MODE_OFFSET    8
+#define    RTL8367C_TX_ESD_MODE_MASK    0x100
+#define    RTL8367C_TX_ESD_LVL_OFFSET    0
+#define    RTL8367C_TX_ESD_LVL_MASK    0xFF
+
+#define    RTL8367C_REG_TOP_CON0    0x1d70
+#define    RTL8367C_TOP_CON0_SDS_PWR_ISO_1_OFFSET    15
+#define    RTL8367C_TOP_CON0_SDS_PWR_ISO_1_MASK    0x8000
+#define    RTL8367C_OCP_TIMEOUT_P7_5_OFFSET    12
+#define    RTL8367C_OCP_TIMEOUT_P7_5_MASK    0x7000
+#define    RTL8367C_FIB_EEE_AB_OFFSET    11
+#define    RTL8367C_FIB_EEE_AB_MASK    0x800
+#define    RTL8367C_ADCCKIEN_OFFSET    10
+#define    RTL8367C_ADCCKIEN_MASK    0x400
+#define    RTL8367C_OCP_TIMEOUT_OFFSET    5
+#define    RTL8367C_OCP_TIMEOUT_MASK    0x3E0
+#define    RTL8367C_TOP_CON0_SDS_PWR_ISO_OFFSET    4
+#define    RTL8367C_TOP_CON0_SDS_PWR_ISO_MASK    0x10
+#define    RTL8367C_RG2_TXC_SEL_OFFSET    3
+#define    RTL8367C_RG2_TXC_SEL_MASK    0x8
+#define    RTL8367C_RG1TXC_SEL_OFFSET    2
+#define    RTL8367C_RG1TXC_SEL_MASK    0x4
+#define    RTL8367C_SYNC_1588_EN_OFFSET    1
+#define    RTL8367C_SYNC_1588_EN_MASK    0x2
+#define    RTL8367C_LS_MODE_OFFSET    0
+#define    RTL8367C_LS_MODE_MASK    0x1
+
+#define    RTL8367C_REG_TOP_CON1    0x1d71
+#define    RTL8367C_TA_CHK_EN_OFFSET    2
+#define    RTL8367C_TA_CHK_EN_MASK    0x4
+#define    RTL8367C_SLV_EG_SEL_OFFSET    1
+#define    RTL8367C_SLV_EG_SEL_MASK    0x2
+#define    RTL8367C_IIC_OP_DRAIN_OFFSET    0
+#define    RTL8367C_IIC_OP_DRAIN_MASK    0x1
+
+#define    RTL8367C_REG_SWR_FPWM    0x1d72
+#define    RTL8367C_SWR_FPWM_OFFSET    0
+#define    RTL8367C_SWR_FPWM_MASK    0x1
+
+#define    RTL8367C_REG_EEEP_CTRL_500M    0x1d73
+
+#define    RTL8367C_REG_SHORT_PRMB    0x1d74
+#define    RTL8367C_SHORT_PRMB_OFFSET    0
+#define    RTL8367C_SHORT_PRMB_MASK    0x1
+
+#define    RTL8367C_REG_INDSC_THR_CTRL    0x1d75
+#define    RTL8367C_INDSC_THR_CTRL_OFFSET    0
+#define    RTL8367C_INDSC_THR_CTRL_MASK    0x7FF
+
+#define    RTL8367C_REG_SET_PAD_CTRL_NEW    0x1d80
+#define    RTL8367C_SET_PAD_CTRL_NEW_OFFSET    0
+#define    RTL8367C_SET_PAD_CTRL_NEW_MASK    0x1
+
+#define    RTL8367C_REG_SET_PAD_DRI_0    0x1d81
+
+#define    RTL8367C_REG_SET_PAD_DRI_1    0x1d82
+
+#define    RTL8367C_REG_SET_PAD_DRI_2    0x1d83
+
+#define    RTL8367C_REG_SET_PAD_SLEW_0    0x1d84
+
+#define    RTL8367C_REG_SET_PAD_SLEW_1    0x1d85
+
+#define    RTL8367C_REG_SET_PAD_SLEW_2    0x1d86
+
+#define    RTL8367C_REG_SET_PAD_SMT_0    0x1d87
+
+#define    RTL8367C_REG_SET_PAD_SMT_1    0x1d88
+
+#define    RTL8367C_REG_SET_PAD_SMT_2    0x1d89
+
+#define    RTL8367C_REG_M_I2C_CTL_STA_REG    0x1d8a
+#define    RTL8367C_TX_RX_DATA_OFFSET    8
+#define    RTL8367C_TX_RX_DATA_MASK    0xFF00
+#define    RTL8367C_DUMB_RW_ERR_OFFSET    7
+#define    RTL8367C_DUMB_RW_ERR_MASK    0x80
+#define    RTL8367C_SLV_ACK_FLAG_OFFSET    6
+#define    RTL8367C_SLV_ACK_FLAG_MASK    0x40
+#define    RTL8367C_M_I2C_BUS_IDLE_OFFSET    5
+#define    RTL8367C_M_I2C_BUS_IDLE_MASK    0x20
+#define    RTL8367C_I2C_CMD_TYPE_OFFSET    1
+#define    RTL8367C_I2C_CMD_TYPE_MASK    0x1E
+#define    RTL8367C_I2C_CMD_EXEC_OFFSET    0
+#define    RTL8367C_I2C_CMD_EXEC_MASK    0x1
+
+#define    RTL8367C_REG_M_I2C_DUMB_RW_ADDR_0    0x1d8b
+
+#define    RTL8367C_REG_M_I2C_DUMB_RW_ADDR_1    0x1d8c
+
+#define    RTL8367C_REG_M_I2C_DUMB_RW_DATA_0    0x1d8d
+
+#define    RTL8367C_REG_M_I2C_DUMB_RW_DATA_1    0x1d8e
+
+#define    RTL8367C_REG_M_I2C_DUMB_RW_CTL    0x1d8f
+#define    RTL8367C_DUMB_I2C_CTL_CODE_OFFSET    8
+#define    RTL8367C_DUMB_I2C_CTL_CODE_MASK    0x7F00
+#define    RTL8367C_DUMB_RW_I2C_FORMAT_OFFSET    4
+#define    RTL8367C_DUMB_RW_I2C_FORMAT_MASK    0x10
+#define    RTL8367C_DUMB_RW_DATA_MODE_OFFSET    2
+#define    RTL8367C_DUMB_RW_DATA_MODE_MASK    0xC
+#define    RTL8367C_DUMB_RW_ADDR_MODE_OFFSET    0
+#define    RTL8367C_DUMB_RW_ADDR_MODE_MASK    0x3
+
+#define    RTL8367C_REG_M_I2C_SYS_CTL    0x1d90
+#define    RTL8367C_M_I2C_SCL_IO_MUX_OFFSET    12
+#define    RTL8367C_M_I2C_SCL_IO_MUX_MASK    0x3000
+#define    RTL8367C_M_I2C_SDA_IO_MUX_OFFSET    10
+#define    RTL8367C_M_I2C_SDA_IO_MUX_MASK    0xC00
+#define    RTL8367C_M_I2C_SDA_OD_EN_OFFSET    9
+#define    RTL8367C_M_I2C_SDA_OD_EN_MASK    0x200
+#define    RTL8367C_M_I2C_SCL_OD_EN_OFFSET    8
+#define    RTL8367C_M_I2C_SCL_OD_EN_MASK    0x100
+#define    RTL8367C_M_I2C_SCL_F_DIV_OFFSET    0
+#define    RTL8367C_M_I2C_SCL_F_DIV_MASK    0xFF
+
+#define    RTL8367C_REG_HT_PB_SRAM_CTRL    0x1da0
+#define    RTL8367C_HTPB_RW_OFFSET    2
+#define    RTL8367C_HTPB_RW_MASK    0x4
+#define    RTL8367C_HTPB_SEL_OFFSET    1
+#define    RTL8367C_HTPB_SEL_MASK    0x2
+#define    RTL8367C_HTPB_CE_OFFSET    0
+#define    RTL8367C_HTPB_CE_MASK    0x1
+
+#define    RTL8367C_REG_HT_PB_SRAM_ADDR    0x1da1
+
+#define    RTL8367C_REG_HT_PB_SRAM_DIN0    0x1da2
+
+#define    RTL8367C_REG_HT_PB_SRAM_DIN1    0x1da3
+
+#define    RTL8367C_REG_HT_PB_SRAM_DOUT0    0x1da4
+
+#define    RTL8367C_REG_HT_PB_SRAM_DOUT1    0x1da5
+
+#define    RTL8367C_REG_PHY_STAT_0    0x1db0
+
+#define    RTL8367C_REG_PHY_STAT_1    0x1db1
+
+#define    RTL8367C_REG_PHY_STAT_2    0x1db2
+
+#define    RTL8367C_REG_PHY_STAT_3    0x1db3
+
+#define    RTL8367C_REG_PHY_STAT_4    0x1db4
+
+#define    RTL8367C_REG_PHY_STAT_5    0x1db5
+
+#define    RTL8367C_REG_PHY_STAT_6    0x1db6
+
+#define    RTL8367C_REG_PHY_STAT_7    0x1db7
+
+#define    RTL8367C_REG_SDS_STAT_0    0x1db8
+
+#define    RTL8367C_REG_SDS_STAT_1    0x1db9
+
+#define    RTL8367C_REG_MAC_LINK_STAT_0    0x1dba
+#define    RTL8367C_MAC_LINK_STAT_CUR_0_OFFSET    8
+#define    RTL8367C_MAC_LINK_STAT_CUR_0_MASK    0xFF00
+#define    RTL8367C_MAC_LINK_STAT_LATCH_0_OFFSET    0
+#define    RTL8367C_MAC_LINK_STAT_LATCH_0_MASK    0xFF
+
+#define    RTL8367C_REG_MAC_LINK_STAT_1    0x1dbb
+#define    RTL8367C_MAC_LINK_STAT_1_Reserved_OFFSET    6
+#define    RTL8367C_MAC_LINK_STAT_1_Reserved_MASK    0xFFC0
+#define    RTL8367C_MAC_LINK_STAT_CUR_1_OFFSET    3
+#define    RTL8367C_MAC_LINK_STAT_CUR_1_MASK    0x38
+#define    RTL8367C_MAC_LINK_STAT_LATCH_1_OFFSET    0
+#define    RTL8367C_MAC_LINK_STAT_LATCH_1_MASK    0x7
+
+#define    RTL8367C_REG_MISC_CONTROL_1    0x1dc0
+#define    RTL8367C_P7_FB_CPL_OFFSET    2
+#define    RTL8367C_P7_FB_CPL_MASK    0x4
+#define    RTL8367C_P6_FB_CPL_OFFSET    1
+#define    RTL8367C_P6_FB_CPL_MASK    0x2
+#define    RTL8367C_P5_FB_CPL_OFFSET    0
+#define    RTL8367C_P5_FB_CPL_MASK    0x1
+
+#define    RTL8367C_REG_SDS_MISC_1    0x1dc1
+#define    RTL8367C_CFG_SGMII_RXFC_1_OFFSET    14
+#define    RTL8367C_CFG_SGMII_RXFC_1_MASK    0x4000
+#define    RTL8367C_CFG_SGMII_TXFC_1_OFFSET    13
+#define    RTL8367C_CFG_SGMII_TXFC_1_MASK    0x2000
+#define    RTL8367C_CFG_MAC9_SEL_HSGMII_OFFSET    11
+#define    RTL8367C_CFG_MAC9_SEL_HSGMII_MASK    0x800
+#define    RTL8367C_CFG_SGMII_FDUP_1_OFFSET    10
+#define    RTL8367C_CFG_SGMII_FDUP_1_MASK    0x400
+#define    RTL8367C_CFG_SGMII_LINK_1_OFFSET    9
+#define    RTL8367C_CFG_SGMII_LINK_1_MASK    0x200
+#define    RTL8367C_CFG_SGMII_SPD_1_OFFSET    7
+#define    RTL8367C_CFG_SGMII_SPD_1_MASK    0x180
+#define    RTL8367C_CFG_MAC9_SEL_SGMII_OFFSET    6
+#define    RTL8367C_CFG_MAC9_SEL_SGMII_MASK    0x40
+#define    RTL8367C_CFG_SDS_MODE_14C_1_OFFSET    0
+#define    RTL8367C_CFG_SDS_MODE_14C_1_MASK    0x7
+
+#define    RTL8367C_REG_FIBER_CFG_2_1    0x1dc2
+#define    RTL8367C_SDS_RX_DISABLE_1_OFFSET    6
+#define    RTL8367C_SDS_RX_DISABLE_1_MASK    0xC0
+#define    RTL8367C_SDS_TX_DISABLE_1_OFFSET    4
+#define    RTL8367C_SDS_TX_DISABLE_1_MASK    0x30
+#define    RTL8367C_FIBER_CFG_2_1_SDS_PWR_ISO_1_OFFSET    2
+#define    RTL8367C_FIBER_CFG_2_1_SDS_PWR_ISO_1_MASK    0xC
+#define    RTL8367C_SDS_FRC_LD_1_OFFSET    0
+#define    RTL8367C_SDS_FRC_LD_1_MASK    0x3
+
+#define    RTL8367C_REG_FIBER_CFG_1_1    0x1dc3
+#define    RTL8367C_SDS_FRC_REG4_1_OFFSET    12
+#define    RTL8367C_SDS_FRC_REG4_1_MASK    0x1000
+#define    RTL8367C_SDS_FRC_REG4_FIB100_1_OFFSET    11
+#define    RTL8367C_SDS_FRC_REG4_FIB100_1_MASK    0x800
+#define    RTL8367C_SDS_FRC_MODE_1_OFFSET    3
+#define    RTL8367C_SDS_FRC_MODE_1_MASK    0x8
+#define    RTL8367C_SDS_MODE_1_OFFSET    0
+#define    RTL8367C_SDS_MODE_1_MASK    0x7
+
+#define    RTL8367C_REG_PHYSTS_CTRL0_1    0x1dc4
+#define    RTL8367C_LNKUP_DLY_EN_EXT2_OFFSET    9
+#define    RTL8367C_LNKUP_DLY_EN_EXT2_MASK    0x200
+#define    RTL8367C_GE_100M_LNKUP_DLY_EXT2_OFFSET    7
+#define    RTL8367C_GE_100M_LNKUP_DLY_EXT2_MASK    0x180
+#define    RTL8367C_PHYSTS_10M_LNKUP_DLY_EXT2_OFFSET    5
+#define    RTL8367C_PHYSTS_10M_LNKUP_DLY_EXT2_MASK    0x60
+#define    RTL8367C_LNKUP_DLY_EN_EXT1_OFFSET    4
+#define    RTL8367C_LNKUP_DLY_EN_EXT1_MASK    0x10
+#define    RTL8367C_GE_100M_LNKUP_DLY_EXT1_OFFSET    2
+#define    RTL8367C_GE_100M_LNKUP_DLY_EXT1_MASK    0xC
+#define    RTL8367C_PHYSTS_10M_LNKUP_DLY_EXT1_OFFSET    0
+#define    RTL8367C_PHYSTS_10M_LNKUP_DLY_EXT1_MASK    0x3
+
+#define    RTL8367C_REG_FIBER_CFG_3_1    0x1dc5
+#define    RTL8367C_FIBER_CFG_3_1_OFFSET    0
+#define    RTL8367C_FIBER_CFG_3_1_MASK    0xFFF
+
+#define    RTL8367C_REG_FIBER_CFG_4_1    0x1dc6
+
+#define    RTL8367C_REG_BUFF_RST_CTRL2_2    0x1dc7
+#define    RTL8367C_Cfg_buffrst_sysover_thr_1_OFFSET    3
+#define    RTL8367C_Cfg_buffrst_sysover_thr_1_MASK    0x8
+#define    RTL8367C_Cfg_buffrst_qover_thr_OFFSET    2
+#define    RTL8367C_Cfg_buffrst_qover_thr_MASK    0x4
+#define    RTL8367C_Cfg_buffrst_indscover_thr_1_OFFSET    1
+#define    RTL8367C_Cfg_buffrst_indscover_thr_1_MASK    0x2
+#define    RTL8367C_Cfg_buffrst_dscover_thr_1_OFFSET    0
+#define    RTL8367C_Cfg_buffrst_dscover_thr_1_MASK    0x1
+
+#define    RTL8367C_REG_PHY_DEBUG_CNT_CTRL    0x1dc8
+#define    RTL8367C_PHY_MIB_RST_7_OFFSET    15
+#define    RTL8367C_PHY_MIB_RST_7_MASK    0x8000
+#define    RTL8367C_PHY_MIB_RST_6_OFFSET    14
+#define    RTL8367C_PHY_MIB_RST_6_MASK    0x4000
+#define    RTL8367C_PHY_MIB_RST_5_OFFSET    13
+#define    RTL8367C_PHY_MIB_RST_5_MASK    0x2000
+#define    RTL8367C_PHY_MIB_RST_4_OFFSET    12
+#define    RTL8367C_PHY_MIB_RST_4_MASK    0x1000
+#define    RTL8367C_PHY_MIB_RST_3_OFFSET    11
+#define    RTL8367C_PHY_MIB_RST_3_MASK    0x800
+#define    RTL8367C_PHY_MIB_RST_2_OFFSET    10
+#define    RTL8367C_PHY_MIB_RST_2_MASK    0x400
+#define    RTL8367C_PHY_MIB_RST_1_OFFSET    9
+#define    RTL8367C_PHY_MIB_RST_1_MASK    0x200
+#define    RTL8367C_PHY_MIB_RST_0_OFFSET    8
+#define    RTL8367C_PHY_MIB_RST_0_MASK    0x100
+#define    RTL8367C_PHY_MIB_EN_7_OFFSET    7
+#define    RTL8367C_PHY_MIB_EN_7_MASK    0x80
+#define    RTL8367C_PHY_MIB_EN_6_OFFSET    6
+#define    RTL8367C_PHY_MIB_EN_6_MASK    0x40
+#define    RTL8367C_PHY_MIB_EN_5_OFFSET    5
+#define    RTL8367C_PHY_MIB_EN_5_MASK    0x20
+#define    RTL8367C_PHY_MIB_EN_4_OFFSET    4
+#define    RTL8367C_PHY_MIB_EN_4_MASK    0x10
+#define    RTL8367C_PHY_MIB_EN_3_OFFSET    3
+#define    RTL8367C_PHY_MIB_EN_3_MASK    0x8
+#define    RTL8367C_PHY_MIB_EN_2_OFFSET    2
+#define    RTL8367C_PHY_MIB_EN_2_MASK    0x4
+#define    RTL8367C_PHY_MIB_EN_1_OFFSET    1
+#define    RTL8367C_PHY_MIB_EN_1_MASK    0x2
+#define    RTL8367C_PHY_MIB_EN_0_OFFSET    0
+#define    RTL8367C_PHY_MIB_EN_0_MASK    0x1
+
+#define    RTL8367C_REG_TXPKT_CNT_L_0    0x1dc9
+
+#define    RTL8367C_REG_TXPKT_CNT_H_0    0x1dca
+
+#define    RTL8367C_REG_RXPKT_CNT_L_0    0x1dcb
+
+#define    RTL8367C_REG_RXPKT_CNT_H_0    0x1dcc
+
+#define    RTL8367C_REG_TX_CRC_0    0x1dcd
+
+#define    RTL8367C_REG_RX_CRC_0    0x1dce
+
+#define    RTL8367C_REG_TXPKT_CNT_L_1    0x1dcf
+
+#define    RTL8367C_REG_TXPKT_CNT_H_1    0x1dd0
+
+#define    RTL8367C_REG_RXPKT_CNT_L_1    0x1dd1
+
+#define    RTL8367C_REG_RXPKT_CNT_H_1    0x1dd2
+
+#define    RTL8367C_REG_TX_CRC_1    0x1dd3
+
+#define    RTL8367C_REG_RX_CRC_1    0x1dd4
+
+#define    RTL8367C_REG_TXPKT_CNT_L_2    0x1dd5
+
+#define    RTL8367C_REG_TXPKT_CNT_H_2    0x1dd6
+
+#define    RTL8367C_REG_RXPKT_CNT_L_2    0x1dd7
+
+#define    RTL8367C_REG_RXPKT_CNT_H_2    0x1dd8
+
+#define    RTL8367C_REG_TX_CRC_2    0x1dd9
+
+#define    RTL8367C_REG_RX_CRC_2    0x1dda
+
+#define    RTL8367C_REG_TXPKT_CNT_L_3    0x1ddb
+
+#define    RTL8367C_REG_TXPKT_CNT_H_3    0x1ddc
+
+#define    RTL8367C_REG_RXPKT_CNT_L_3    0x1ddd
+
+#define    RTL8367C_REG_RXPKT_CNT_H_3    0x1dde
+
+#define    RTL8367C_REG_TX_CRC_3    0x1ddf
+
+#define    RTL8367C_REG_RX_CRC_3    0x1de0
+
+#define    RTL8367C_REG_TXPKT_CNT_L_4    0x1de1
+
+#define    RTL8367C_REG_TXPKT_CNT_H_4    0x1de2
+
+#define    RTL8367C_REG_RXPKT_CNT_L_4    0x1de3
+
+#define    RTL8367C_REG_RXPKT_CNT_H_4    0x1de4
+
+#define    RTL8367C_REG_TX_CRC_4    0x1de5
+
+#define    RTL8367C_REG_RX_CRC_4    0x1de6
+
+#define    RTL8367C_REG_TXPKT_CNT_L_5    0x1de7
+
+#define    RTL8367C_REG_TXPKT_CNT_H_5    0x1de8
+
+#define    RTL8367C_REG_RXPKT_CNT_L_5    0x1de9
+
+#define    RTL8367C_REG_RXPKT_CNT_H_5    0x1dea
+
+#define    RTL8367C_REG_TX_CRC_5    0x1deb
+
+#define    RTL8367C_REG_RX_CRC_5    0x1dec
+
+#define    RTL8367C_REG_TXPKT_CNT_L_6    0x1ded
+
+#define    RTL8367C_REG_TXPKT_CNT_H_6    0x1dee
+
+#define    RTL8367C_REG_RXPKT_CNT_L_6    0x1def
+
+#define    RTL8367C_REG_RXPKT_CNT_H_6    0x1df0
+
+#define    RTL8367C_REG_TX_CRC_6    0x1df1
+
+#define    RTL8367C_REG_RX_CRC_6    0x1df2
+
+#define    RTL8367C_REG_TXPKT_CNT_L_7    0x1df3
+
+#define    RTL8367C_REG_TXPKT_CNT_H_7    0x1df4
+
+#define    RTL8367C_REG_RXPKT_CNT_L_7    0x1df5
+
+#define    RTL8367C_REG_RXPKT_CNT_H_7    0x1df6
+
+#define    RTL8367C_REG_TX_CRC_7    0x1df7
+
+#define    RTL8367C_REG_RX_CRC_7    0x1df8
+
+#define    RTL8367C_REG_BOND_DBG_0    0x1df9
+
+#define    RTL8367C_REG_BOND_DBG_1    0x1dfa
+
+#define    RTL8367C_REG_STRP_DBG_0    0x1dfb
+
+#define    RTL8367C_REG_STRP_DBG_1    0x1dfc
+
+#define    RTL8367C_REG_STRP_DBG_2    0x1dfd
+
+/* (16'h1f00)patch_reg */
+
+#define    RTL8367C_REG_INDRECT_ACCESS_CTRL    0x1f00
+#define    RTL8367C_RW_OFFSET    1
+#define    RTL8367C_RW_MASK    0x2
+#define    RTL8367C_CMD_OFFSET    0
+#define    RTL8367C_CMD_MASK    0x1
+
+#define    RTL8367C_REG_INDRECT_ACCESS_STATUS    0x1f01
+#define    RTL8367C_INDRECT_ACCESS_STATUS_OFFSET    2
+#define    RTL8367C_INDRECT_ACCESS_STATUS_MASK    0x7
+
+#define    RTL8367C_REG_INDRECT_ACCESS_ADDRESS    0x1f02
+
+#define    RTL8367C_REG_INDRECT_ACCESS_WRITE_DATA    0x1f03
+
+#define    RTL8367C_REG_INDRECT_ACCESS_READ_DATA    0x1f04
+
+/* (16'h6200)fib_page */
+
+#define    RTL8367C_REG_FIB0_CFG00    0x6200
+#define    RTL8367C_FIB0_CFG00_CFG_FIB_RST_OFFSET    15
+#define    RTL8367C_FIB0_CFG00_CFG_FIB_RST_MASK    0x8000
+#define    RTL8367C_FIB0_CFG00_CFG_FIB_LPK_OFFSET    14
+#define    RTL8367C_FIB0_CFG00_CFG_FIB_LPK_MASK    0x4000
+#define    RTL8367C_FIB0_CFG00_CFG_FIB_SPD_RD_0_OFFSET    13
+#define    RTL8367C_FIB0_CFG00_CFG_FIB_SPD_RD_0_MASK    0x2000
+#define    RTL8367C_FIB0_CFG00_CFG_FIB_ANEN_OFFSET    12
+#define    RTL8367C_FIB0_CFG00_CFG_FIB_ANEN_MASK    0x1000
+#define    RTL8367C_FIB0_CFG00_CFG_FIB_PDOWN_OFFSET    11
+#define    RTL8367C_FIB0_CFG00_CFG_FIB_PDOWN_MASK    0x800
+#define    RTL8367C_FIB0_CFG00_CFG_FIB_ISO_OFFSET    10
+#define    RTL8367C_FIB0_CFG00_CFG_FIB_ISO_MASK    0x400
+#define    RTL8367C_FIB0_CFG00_CFG_FIB_RESTART_OFFSET    9
+#define    RTL8367C_FIB0_CFG00_CFG_FIB_RESTART_MASK    0x200
+#define    RTL8367C_FIB0_CFG00_CFG_FIB_FULLDUP_OFFSET    8
+#define    RTL8367C_FIB0_CFG00_CFG_FIB_FULLDUP_MASK    0x100
+#define    RTL8367C_FIB0_CFG00_CFG_FIB_SPD_RD_1_OFFSET    6
+#define    RTL8367C_FIB0_CFG00_CFG_FIB_SPD_RD_1_MASK    0x40
+#define    RTL8367C_FIB0_CFG00_CFG_FIB_FRCTX_OFFSET    5
+#define    RTL8367C_FIB0_CFG00_CFG_FIB_FRCTX_MASK    0x20
+
+#define    RTL8367C_REG_FIB0_CFG01    0x6201
+#define    RTL8367C_FIB0_CFG01_CAPBILITY_OFFSET    6
+#define    RTL8367C_FIB0_CFG01_CAPBILITY_MASK    0xFFC0
+#define    RTL8367C_FIB0_CFG01_AN_COMPLETE_OFFSET    5
+#define    RTL8367C_FIB0_CFG01_AN_COMPLETE_MASK    0x20
+#define    RTL8367C_FIB0_CFG01_R_FAULT_OFFSET    4
+#define    RTL8367C_FIB0_CFG01_R_FAULT_MASK    0x10
+#define    RTL8367C_FIB0_CFG01_NWAY_ABILITY_OFFSET    3
+#define    RTL8367C_FIB0_CFG01_NWAY_ABILITY_MASK    0x8
+#define    RTL8367C_FIB0_CFG01_LINK_STATUS_OFFSET    2
+#define    RTL8367C_FIB0_CFG01_LINK_STATUS_MASK    0x4
+#define    RTL8367C_FIB0_CFG01_JABBER_DETECT_OFFSET    1
+#define    RTL8367C_FIB0_CFG01_JABBER_DETECT_MASK    0x2
+#define    RTL8367C_FIB0_CFG01_EXTENDED_CAPBILITY_OFFSET    0
+#define    RTL8367C_FIB0_CFG01_EXTENDED_CAPBILITY_MASK    0x1
+
+#define    RTL8367C_REG_FIB0_CFG02    0x6202
+
+#define    RTL8367C_REG_FIB0_CFG03    0x6203
+#define    RTL8367C_FIB0_CFG03_REALTEK_OUI5_0_OFFSET    10
+#define    RTL8367C_FIB0_CFG03_REALTEK_OUI5_0_MASK    0xFC00
+#define    RTL8367C_FIB0_CFG03_MODEL_NO_OFFSET    4
+#define    RTL8367C_FIB0_CFG03_MODEL_NO_MASK    0x3F0
+#define    RTL8367C_FIB0_CFG03_REVISION_NO_OFFSET    0
+#define    RTL8367C_FIB0_CFG03_REVISION_NO_MASK    0xF
+
+#define    RTL8367C_REG_FIB0_CFG04    0x6204
+
+#define    RTL8367C_REG_FIB0_CFG05    0x6205
+
+#define    RTL8367C_REG_FIB0_CFG06    0x6206
+#define    RTL8367C_FIB0_CFG06_FIB_NP_EN_OFFSET    2
+#define    RTL8367C_FIB0_CFG06_FIB_NP_EN_MASK    0x4
+#define    RTL8367C_FIB0_CFG06_RXPAGE_OFFSET    1
+#define    RTL8367C_FIB0_CFG06_RXPAGE_MASK    0x2
+
+#define    RTL8367C_REG_FIB0_CFG07    0x6207
+
+#define    RTL8367C_REG_FIB0_CFG08    0x6208
+
+#define    RTL8367C_REG_FIB0_CFG09    0x6209
+
+#define    RTL8367C_REG_FIB0_CFG10    0x620a
+
+#define    RTL8367C_REG_FIB0_CFG11    0x620b
+
+#define    RTL8367C_REG_FIB0_CFG12    0x620c
+
+#define    RTL8367C_REG_FIB0_CFG13    0x620d
+#define    RTL8367C_FIB0_CFG13_INDR_FUNC_OFFSET    14
+#define    RTL8367C_FIB0_CFG13_INDR_FUNC_MASK    0xC000
+#define    RTL8367C_FIB0_CFG13_DUMMY_OFFSET    5
+#define    RTL8367C_FIB0_CFG13_DUMMY_MASK    0x3FE0
+#define    RTL8367C_FIB0_CFG13_INDR_DEVAD_OFFSET    0
+#define    RTL8367C_FIB0_CFG13_INDR_DEVAD_MASK    0x1F
+
+#define    RTL8367C_REG_FIB0_CFG14    0x620e
+
+#define    RTL8367C_REG_FIB0_CFG15    0x620f
+
+#define    RTL8367C_REG_FIB1_CFG00    0x6210
+#define    RTL8367C_FIB1_CFG00_CFG_FIB_RST_OFFSET    15
+#define    RTL8367C_FIB1_CFG00_CFG_FIB_RST_MASK    0x8000
+#define    RTL8367C_FIB1_CFG00_CFG_FIB_LPK_OFFSET    14
+#define    RTL8367C_FIB1_CFG00_CFG_FIB_LPK_MASK    0x4000
+#define    RTL8367C_FIB1_CFG00_CFG_FIB_SPD_RD_0_OFFSET    13
+#define    RTL8367C_FIB1_CFG00_CFG_FIB_SPD_RD_0_MASK    0x2000
+#define    RTL8367C_FIB1_CFG00_CFG_FIB_ANEN_OFFSET    12
+#define    RTL8367C_FIB1_CFG00_CFG_FIB_ANEN_MASK    0x1000
+#define    RTL8367C_FIB1_CFG00_CFG_FIB_PDOWN_OFFSET    11
+#define    RTL8367C_FIB1_CFG00_CFG_FIB_PDOWN_MASK    0x800
+#define    RTL8367C_FIB1_CFG00_CFG_FIB_ISO_OFFSET    10
+#define    RTL8367C_FIB1_CFG00_CFG_FIB_ISO_MASK    0x400
+#define    RTL8367C_FIB1_CFG00_CFG_FIB_RESTART_OFFSET    9
+#define    RTL8367C_FIB1_CFG00_CFG_FIB_RESTART_MASK    0x200
+#define    RTL8367C_FIB1_CFG00_CFG_FIB_FULLDUP_OFFSET    8
+#define    RTL8367C_FIB1_CFG00_CFG_FIB_FULLDUP_MASK    0x100
+#define    RTL8367C_FIB1_CFG00_CFG_FIB_SPD_RD_1_OFFSET    6
+#define    RTL8367C_FIB1_CFG00_CFG_FIB_SPD_RD_1_MASK    0x40
+#define    RTL8367C_FIB1_CFG00_CFG_FIB_FRCTX_OFFSET    5
+#define    RTL8367C_FIB1_CFG00_CFG_FIB_FRCTX_MASK    0x20
+
+#define    RTL8367C_REG_FIB1_CFG01    0x6211
+#define    RTL8367C_FIB1_CFG01_CAPBILITY_OFFSET    6
+#define    RTL8367C_FIB1_CFG01_CAPBILITY_MASK    0xFFC0
+#define    RTL8367C_FIB1_CFG01_AN_COMPLETE_OFFSET    5
+#define    RTL8367C_FIB1_CFG01_AN_COMPLETE_MASK    0x20
+#define    RTL8367C_FIB1_CFG01_R_FAULT_OFFSET    4
+#define    RTL8367C_FIB1_CFG01_R_FAULT_MASK    0x10
+#define    RTL8367C_FIB1_CFG01_NWAY_ABILITY_OFFSET    3
+#define    RTL8367C_FIB1_CFG01_NWAY_ABILITY_MASK    0x8
+#define    RTL8367C_FIB1_CFG01_LINK_STATUS_OFFSET    2
+#define    RTL8367C_FIB1_CFG01_LINK_STATUS_MASK    0x4
+#define    RTL8367C_FIB1_CFG01_JABBER_DETECT_OFFSET    1
+#define    RTL8367C_FIB1_CFG01_JABBER_DETECT_MASK    0x2
+#define    RTL8367C_FIB1_CFG01_EXTENDED_CAPBILITY_OFFSET    0
+#define    RTL8367C_FIB1_CFG01_EXTENDED_CAPBILITY_MASK    0x1
+
+#define    RTL8367C_REG_FIB1_CFG02    0x6212
+
+#define    RTL8367C_REG_FIB1_CFG03    0x6213
+#define    RTL8367C_FIB1_CFG03_REALTEK_OUI5_0_OFFSET    10
+#define    RTL8367C_FIB1_CFG03_REALTEK_OUI5_0_MASK    0xFC00
+#define    RTL8367C_FIB1_CFG03_MODEL_NO_OFFSET    4
+#define    RTL8367C_FIB1_CFG03_MODEL_NO_MASK    0x3F0
+#define    RTL8367C_FIB1_CFG03_REVISION_NO_OFFSET    0
+#define    RTL8367C_FIB1_CFG03_REVISION_NO_MASK    0xF
+
+#define    RTL8367C_REG_FIB1_CFG04    0x6214
+
+#define    RTL8367C_REG_FIB1_CFG05    0x6215
+
+#define    RTL8367C_REG_FIB1_CFG06    0x6216
+#define    RTL8367C_FIB1_CFG06_FIB_NP_EN_OFFSET    2
+#define    RTL8367C_FIB1_CFG06_FIB_NP_EN_MASK    0x4
+#define    RTL8367C_FIB1_CFG06_RXPAGE_OFFSET    1
+#define    RTL8367C_FIB1_CFG06_RXPAGE_MASK    0x2
+
+#define    RTL8367C_REG_FIB1_CFG07    0x6217
+
+#define    RTL8367C_REG_FIB1_CFG08    0x6218
+
+#define    RTL8367C_REG_FIB1_CFG09    0x6219
+
+#define    RTL8367C_REG_FIB1_CFG10    0x621a
+
+#define    RTL8367C_REG_FIB1_CFG11    0x621b
+
+#define    RTL8367C_REG_FIB1_CFG12    0x621c
+
+#define    RTL8367C_REG_FIB1_CFG13    0x621d
+#define    RTL8367C_FIB1_CFG13_INDR_FUNC_OFFSET    14
+#define    RTL8367C_FIB1_CFG13_INDR_FUNC_MASK    0xC000
+#define    RTL8367C_FIB1_CFG13_DUMMY_OFFSET    5
+#define    RTL8367C_FIB1_CFG13_DUMMY_MASK    0x3FE0
+#define    RTL8367C_FIB1_CFG13_INDR_DEVAD_OFFSET    0
+#define    RTL8367C_FIB1_CFG13_INDR_DEVAD_MASK    0x1F
+
+#define    RTL8367C_REG_FIB1_CFG14    0x621e
+
+#define    RTL8367C_REG_FIB1_CFG15    0x621f
+
+/* (16'h6400)timer_1588 */
+
+#define    RTL8367C_REG_PTP_TIME_NSEC_L_NSEC    0x6400
+
+#define    RTL8367C_REG_PTP_TIME_NSEC_H_NSEC    0x6401
+#define    RTL8367C_PTP_TIME_NSEC_H_EXEC_OFFSET    15
+#define    RTL8367C_PTP_TIME_NSEC_H_EXEC_MASK    0x8000
+#define    RTL8367C_PTP_TIME_NSEC_H_CMD_OFFSET    12
+#define    RTL8367C_PTP_TIME_NSEC_H_CMD_MASK    0x3000
+#define    RTL8367C_PTP_TIME_NSEC_H_NSEC_OFFSET    0
+#define    RTL8367C_PTP_TIME_NSEC_H_NSEC_MASK    0x7FF
+
+#define    RTL8367C_REG_PTP_TIME_SEC_L_SEC    0x6402
+
+#define    RTL8367C_REG_PTP_TIME_SEC_H_SEC    0x6403
+
+#define    RTL8367C_REG_PTP_TIME_CFG    0x6404
+#define    RTL8367C_CFG_TIMER_EN_FRC_OFFSET    2
+#define    RTL8367C_CFG_TIMER_EN_FRC_MASK    0x4
+#define    RTL8367C_CFG_TIMER_1588_EN_OFFSET    1
+#define    RTL8367C_CFG_TIMER_1588_EN_MASK    0x2
+#define    RTL8367C_CFG_CLK_SRC_OFFSET    0
+#define    RTL8367C_CFG_CLK_SRC_MASK    0x1
+
+#define    RTL8367C_REG_OTAG_TPID    0x6405
+
+#define    RTL8367C_REG_ITAG_TPID    0x6406
+
+#define    RTL8367C_REG_MAC_ADDR_L    0x6407
+
+#define    RTL8367C_REG_MAC_ADDR_M    0x6408
+
+#define    RTL8367C_REG_MAC_ADDR_H    0x6409
+
+#define    RTL8367C_REG_PTP_TIME_NSEC_L_NSEC_RD    0x640a
+
+#define    RTL8367C_REG_PTP_TIME_NSEC_H_NSEC_RD    0x640b
+#define    RTL8367C_PTP_TIME_NSEC_H_NSEC_RD_OFFSET    0
+#define    RTL8367C_PTP_TIME_NSEC_H_NSEC_RD_MASK    0x7FF
+
+#define    RTL8367C_REG_PTP_TIME_SEC_L_SEC_RD    0x640c
+
+#define    RTL8367C_REG_PTP_TIME_SEC_H_SEC_RD    0x640d
+
+#define    RTL8367C_REG_PTP_TIME_CFG2    0x640e
+#define    RTL8367C_CFG_EN_OFFLOAD_OFFSET    9
+#define    RTL8367C_CFG_EN_OFFLOAD_MASK    0x200
+#define    RTL8367C_CFG_SAVE_OFF_TS_OFFSET    8
+#define    RTL8367C_CFG_SAVE_OFF_TS_MASK    0x100
+#define    RTL8367C_CFG_IMR_OFFSET    0
+#define    RTL8367C_CFG_IMR_MASK    0xFF
+
+#define    RTL8367C_REG_PTP_INTERRUPT_CFG    0x640f
+#define    RTL8367C_P9_INTERRUPT_OFFSET    9
+#define    RTL8367C_P9_INTERRUPT_MASK    0x200
+#define    RTL8367C_P8_INTERRUPT_OFFSET    8
+#define    RTL8367C_P8_INTERRUPT_MASK    0x100
+#define    RTL8367C_P7_INTERRUPT_OFFSET    7
+#define    RTL8367C_P7_INTERRUPT_MASK    0x80
+#define    RTL8367C_P6_INTERRUPT_OFFSET    6
+#define    RTL8367C_P6_INTERRUPT_MASK    0x40
+#define    RTL8367C_P5_INTERRUPT_OFFSET    5
+#define    RTL8367C_P5_INTERRUPT_MASK    0x20
+#define    RTL8367C_P4_INTERRUPT_OFFSET    4
+#define    RTL8367C_P4_INTERRUPT_MASK    0x10
+#define    RTL8367C_P3_INTERRUPT_OFFSET    3
+#define    RTL8367C_P3_INTERRUPT_MASK    0x8
+#define    RTL8367C_P2_INTERRUPT_OFFSET    2
+#define    RTL8367C_P2_INTERRUPT_MASK    0x4
+#define    RTL8367C_P1_INTERRUPT_OFFSET    1
+#define    RTL8367C_P1_INTERRUPT_MASK    0x2
+#define    RTL8367C_P0_INTERRUPT_OFFSET    0
+#define    RTL8367C_P0_INTERRUPT_MASK    0x1
+
+#define    RTL8367C_REG_P0_TX_SYNC_SEQ_ID    0x6410
+
+#define    RTL8367C_REG_P0_TX_DELAY_REQ_SEQ_ID    0x6411
+
+#define    RTL8367C_REG_P0_TX_PDELAY_REQ_SEQ_ID    0x6412
+
+#define    RTL8367C_REG_P0_TX_PDELAY_RESP_SEQ_ID    0x6413
+
+#define    RTL8367C_REG_P0_RX_SYNC_SEQ_ID    0x6414
+
+#define    RTL8367C_REG_P0_RX_DELAY_REQ_SEQ_ID    0x6415
+
+#define    RTL8367C_REG_P0_RX_PDELAY_REQ_SEQ_ID    0x6416
+
+#define    RTL8367C_REG_P0_RX_PDELAY_RESP_SEQ_ID    0x6417
+
+#define    RTL8367C_REG_P0_PORT_NSEC_15_0    0x6418
+
+#define    RTL8367C_REG_P0_PORT_NSEC_26_16    0x6419
+#define    RTL8367C_P0_PORT_NSEC_26_16_OFFSET    0
+#define    RTL8367C_P0_PORT_NSEC_26_16_MASK    0x7FF
+
+#define    RTL8367C_REG_P0_PORT_SEC_15_0    0x641a
+
+#define    RTL8367C_REG_P0_PORT_SEC_31_16    0x641b
+
+#define    RTL8367C_REG_P0_EAV_CFG    0x641c
+#define    RTL8367C_P0_EAV_CFG_PTP_PHY_EN_EN_OFFSET    8
+#define    RTL8367C_P0_EAV_CFG_PTP_PHY_EN_EN_MASK    0x100
+#define    RTL8367C_P0_EAV_CFG_RX_PDELAY_RESP_OFFSET    7
+#define    RTL8367C_P0_EAV_CFG_RX_PDELAY_RESP_MASK    0x80
+#define    RTL8367C_P0_EAV_CFG_RX_PDELAY_REQ_OFFSET    6
+#define    RTL8367C_P0_EAV_CFG_RX_PDELAY_REQ_MASK    0x40
+#define    RTL8367C_P0_EAV_CFG_RX_DELAY_REQ_OFFSET    5
+#define    RTL8367C_P0_EAV_CFG_RX_DELAY_REQ_MASK    0x20
+#define    RTL8367C_P0_EAV_CFG_RX_SYNC_OFFSET    4
+#define    RTL8367C_P0_EAV_CFG_RX_SYNC_MASK    0x10
+#define    RTL8367C_P0_EAV_CFG_TX_PDELAY_RESP_OFFSET    3
+#define    RTL8367C_P0_EAV_CFG_TX_PDELAY_RESP_MASK    0x8
+#define    RTL8367C_P0_EAV_CFG_TX_PDELAY_REQ_OFFSET    2
+#define    RTL8367C_P0_EAV_CFG_TX_PDELAY_REQ_MASK    0x4
+#define    RTL8367C_P0_EAV_CFG_TX_DELAY_REQ_OFFSET    1
+#define    RTL8367C_P0_EAV_CFG_TX_DELAY_REQ_MASK    0x2
+#define    RTL8367C_P0_EAV_CFG_TX_SYNC_OFFSET    0
+#define    RTL8367C_P0_EAV_CFG_TX_SYNC_MASK    0x1
+
+#define    RTL8367C_REG_P1_TX_SYNC_SEQ_ID    0x6420
+
+#define    RTL8367C_REG_P1_TX_DELAY_REQ_SEQ_ID    0x6421
+
+#define    RTL8367C_REG_P1_TX_PDELAY_REQ_SEQ_ID    0x6422
+
+#define    RTL8367C_REG_P1_TX_PDELAY_RESP_SEQ_ID    0x6423
+
+#define    RTL8367C_REG_P1_RX_SYNC_SEQ_ID    0x6424
+
+#define    RTL8367C_REG_P1_RX_DELAY_REQ_SEQ_ID    0x6425
+
+#define    RTL8367C_REG_P1_RX_PDELAY_REQ_SEQ_ID    0x6426
+
+#define    RTL8367C_REG_P1_RX_PDELAY_RESP_SEQ_ID    0x6427
+
+#define    RTL8367C_REG_P1_PORT_NSEC_15_0    0x6428
+
+#define    RTL8367C_REG_P1_PORT_NSEC_26_16    0x6429
+#define    RTL8367C_P1_PORT_NSEC_26_16_OFFSET    0
+#define    RTL8367C_P1_PORT_NSEC_26_16_MASK    0x7FF
+
+#define    RTL8367C_REG_P1_PORT_SEC_15_0    0x642a
+
+#define    RTL8367C_REG_P1_PORT_SEC_31_16    0x642b
+
+#define    RTL8367C_REG_P1_EAV_CFG    0x642c
+#define    RTL8367C_P1_EAV_CFG_PTP_PHY_EN_EN_OFFSET    8
+#define    RTL8367C_P1_EAV_CFG_PTP_PHY_EN_EN_MASK    0x100
+#define    RTL8367C_P1_EAV_CFG_RX_PDELAY_RESP_OFFSET    7
+#define    RTL8367C_P1_EAV_CFG_RX_PDELAY_RESP_MASK    0x80
+#define    RTL8367C_P1_EAV_CFG_RX_PDELAY_REQ_OFFSET    6
+#define    RTL8367C_P1_EAV_CFG_RX_PDELAY_REQ_MASK    0x40
+#define    RTL8367C_P1_EAV_CFG_RX_DELAY_REQ_OFFSET    5
+#define    RTL8367C_P1_EAV_CFG_RX_DELAY_REQ_MASK    0x20
+#define    RTL8367C_P1_EAV_CFG_RX_SYNC_OFFSET    4
+#define    RTL8367C_P1_EAV_CFG_RX_SYNC_MASK    0x10
+#define    RTL8367C_P1_EAV_CFG_TX_PDELAY_RESP_OFFSET    3
+#define    RTL8367C_P1_EAV_CFG_TX_PDELAY_RESP_MASK    0x8
+#define    RTL8367C_P1_EAV_CFG_TX_PDELAY_REQ_OFFSET    2
+#define    RTL8367C_P1_EAV_CFG_TX_PDELAY_REQ_MASK    0x4
+#define    RTL8367C_P1_EAV_CFG_TX_DELAY_REQ_OFFSET    1
+#define    RTL8367C_P1_EAV_CFG_TX_DELAY_REQ_MASK    0x2
+#define    RTL8367C_P1_EAV_CFG_TX_SYNC_OFFSET    0
+#define    RTL8367C_P1_EAV_CFG_TX_SYNC_MASK    0x1
+
+#define    RTL8367C_REG_P2_TX_SYNC_SEQ_ID    0x6430
+
+#define    RTL8367C_REG_P2_TX_DELAY_REQ_SEQ_ID    0x6431
+
+#define    RTL8367C_REG_P2_TX_PDELAY_REQ_SEQ_ID    0x6432
+
+#define    RTL8367C_REG_P2_TX_PDELAY_RESP_SEQ_ID    0x6433
+
+#define    RTL8367C_REG_P2_RX_SYNC_SEQ_ID    0x6434
+
+#define    RTL8367C_REG_P2_RX_DELAY_REQ_SEQ_ID    0x6435
+
+#define    RTL8367C_REG_P2_RX_PDELAY_REQ_SEQ_ID    0x6436
+
+#define    RTL8367C_REG_P2_RX_PDELAY_RESP_SEQ_ID    0x6437
+
+#define    RTL8367C_REG_P2_PORT_NSEC_15_0    0x6438
+
+#define    RTL8367C_REG_P2_PORT_NSEC_26_16    0x6439
+#define    RTL8367C_P2_PORT_NSEC_26_16_OFFSET    0
+#define    RTL8367C_P2_PORT_NSEC_26_16_MASK    0x7FF
+
+#define    RTL8367C_REG_P2_PORT_SEC_15_0    0x643a
+
+#define    RTL8367C_REG_P2_PORT_SEC_31_16    0x643b
+
+#define    RTL8367C_REG_P2_EAV_CFG    0x643c
+#define    RTL8367C_P2_EAV_CFG_PTP_PHY_EN_EN_OFFSET    8
+#define    RTL8367C_P2_EAV_CFG_PTP_PHY_EN_EN_MASK    0x100
+#define    RTL8367C_P2_EAV_CFG_RX_PDELAY_RESP_OFFSET    7
+#define    RTL8367C_P2_EAV_CFG_RX_PDELAY_RESP_MASK    0x80
+#define    RTL8367C_P2_EAV_CFG_RX_PDELAY_REQ_OFFSET    6
+#define    RTL8367C_P2_EAV_CFG_RX_PDELAY_REQ_MASK    0x40
+#define    RTL8367C_P2_EAV_CFG_RX_DELAY_REQ_OFFSET    5
+#define    RTL8367C_P2_EAV_CFG_RX_DELAY_REQ_MASK    0x20
+#define    RTL8367C_P2_EAV_CFG_RX_SYNC_OFFSET    4
+#define    RTL8367C_P2_EAV_CFG_RX_SYNC_MASK    0x10
+#define    RTL8367C_P2_EAV_CFG_TX_PDELAY_RESP_OFFSET    3
+#define    RTL8367C_P2_EAV_CFG_TX_PDELAY_RESP_MASK    0x8
+#define    RTL8367C_P2_EAV_CFG_TX_PDELAY_REQ_OFFSET    2
+#define    RTL8367C_P2_EAV_CFG_TX_PDELAY_REQ_MASK    0x4
+#define    RTL8367C_P2_EAV_CFG_TX_DELAY_REQ_OFFSET    1
+#define    RTL8367C_P2_EAV_CFG_TX_DELAY_REQ_MASK    0x2
+#define    RTL8367C_P2_EAV_CFG_TX_SYNC_OFFSET    0
+#define    RTL8367C_P2_EAV_CFG_TX_SYNC_MASK    0x1
+
+#define    RTL8367C_REG_P3_TX_SYNC_SEQ_ID    0x6440
+
+#define    RTL8367C_REG_P3_TX_DELAY_REQ_SEQ_ID    0x6441
+
+#define    RTL8367C_REG_P3_TX_PDELAY_REQ_SEQ_ID    0x6442
+
+#define    RTL8367C_REG_P3_TX_PDELAY_RESP_SEQ_ID    0x6443
+
+#define    RTL8367C_REG_P3_RX_SYNC_SEQ_ID    0x6444
+
+#define    RTL8367C_REG_P3_RX_DELAY_REQ_SEQ_ID    0x6445
+
+#define    RTL8367C_REG_P3_RX_PDELAY_REQ_SEQ_ID    0x6446
+
+#define    RTL8367C_REG_P3_RX_PDELAY_RESP_SEQ_ID    0x6447
+
+#define    RTL8367C_REG_P3_PORT_NSEC_15_0    0x6448
+
+#define    RTL8367C_REG_P3_PORT_NSEC_26_16    0x6449
+#define    RTL8367C_P3_PORT_NSEC_26_16_OFFSET    0
+#define    RTL8367C_P3_PORT_NSEC_26_16_MASK    0x7FF
+
+#define    RTL8367C_REG_P3_PORT_SEC_15_0    0x644a
+
+#define    RTL8367C_REG_P3_PORT_SEC_31_16    0x644b
+
+#define    RTL8367C_REG_P3_EAV_CFG    0x644c
+#define    RTL8367C_P3_EAV_CFG_PTP_PHY_EN_EN_OFFSET    8
+#define    RTL8367C_P3_EAV_CFG_PTP_PHY_EN_EN_MASK    0x100
+#define    RTL8367C_P3_EAV_CFG_RX_PDELAY_RESP_OFFSET    7
+#define    RTL8367C_P3_EAV_CFG_RX_PDELAY_RESP_MASK    0x80
+#define    RTL8367C_P3_EAV_CFG_RX_PDELAY_REQ_OFFSET    6
+#define    RTL8367C_P3_EAV_CFG_RX_PDELAY_REQ_MASK    0x40
+#define    RTL8367C_P3_EAV_CFG_RX_DELAY_REQ_OFFSET    5
+#define    RTL8367C_P3_EAV_CFG_RX_DELAY_REQ_MASK    0x20
+#define    RTL8367C_P3_EAV_CFG_RX_SYNC_OFFSET    4
+#define    RTL8367C_P3_EAV_CFG_RX_SYNC_MASK    0x10
+#define    RTL8367C_P3_EAV_CFG_TX_PDELAY_RESP_OFFSET    3
+#define    RTL8367C_P3_EAV_CFG_TX_PDELAY_RESP_MASK    0x8
+#define    RTL8367C_P3_EAV_CFG_TX_PDELAY_REQ_OFFSET    2
+#define    RTL8367C_P3_EAV_CFG_TX_PDELAY_REQ_MASK    0x4
+#define    RTL8367C_P3_EAV_CFG_TX_DELAY_REQ_OFFSET    1
+#define    RTL8367C_P3_EAV_CFG_TX_DELAY_REQ_MASK    0x2
+#define    RTL8367C_P3_EAV_CFG_TX_SYNC_OFFSET    0
+#define    RTL8367C_P3_EAV_CFG_TX_SYNC_MASK    0x1
+
+#define    RTL8367C_REG_P4_TX_SYNC_SEQ_ID    0x6450
+
+#define    RTL8367C_REG_P4_TX_DELAY_REQ_SEQ_ID    0x6451
+
+#define    RTL8367C_REG_P4_TX_PDELAY_REQ_SEQ_ID    0x6452
+
+#define    RTL8367C_REG_P4_TX_PDELAY_RESP_SEQ_ID    0x6453
+
+#define    RTL8367C_REG_P4_RX_SYNC_SEQ_ID    0x6454
+
+#define    RTL8367C_REG_P4_RX_DELAY_REQ_SEQ_ID    0x6455
+
+#define    RTL8367C_REG_P4_RX_PDELAY_REQ_SEQ_ID    0x6456
+
+#define    RTL8367C_REG_P4_RX_PDELAY_RESP_SEQ_ID    0x6457
+
+#define    RTL8367C_REG_P4_PORT_NSEC_15_0    0x6458
+
+#define    RTL8367C_REG_P4_PORT_NSEC_26_16    0x6459
+#define    RTL8367C_P4_PORT_NSEC_26_16_OFFSET    0
+#define    RTL8367C_P4_PORT_NSEC_26_16_MASK    0x7FF
+
+#define    RTL8367C_REG_P4_PORT_SEC_15_0    0x645a
+
+#define    RTL8367C_REG_P4_PORT_SEC_31_16    0x645b
+
+#define    RTL8367C_REG_P4_EAV_CFG    0x645c
+#define    RTL8367C_P4_EAV_CFG_PTP_PHY_EN_EN_OFFSET    8
+#define    RTL8367C_P4_EAV_CFG_PTP_PHY_EN_EN_MASK    0x100
+#define    RTL8367C_P4_EAV_CFG_RX_PDELAY_RESP_OFFSET    7
+#define    RTL8367C_P4_EAV_CFG_RX_PDELAY_RESP_MASK    0x80
+#define    RTL8367C_P4_EAV_CFG_RX_PDELAY_REQ_OFFSET    6
+#define    RTL8367C_P4_EAV_CFG_RX_PDELAY_REQ_MASK    0x40
+#define    RTL8367C_P4_EAV_CFG_RX_DELAY_REQ_OFFSET    5
+#define    RTL8367C_P4_EAV_CFG_RX_DELAY_REQ_MASK    0x20
+#define    RTL8367C_P4_EAV_CFG_RX_SYNC_OFFSET    4
+#define    RTL8367C_P4_EAV_CFG_RX_SYNC_MASK    0x10
+#define    RTL8367C_P4_EAV_CFG_TX_PDELAY_RESP_OFFSET    3
+#define    RTL8367C_P4_EAV_CFG_TX_PDELAY_RESP_MASK    0x8
+#define    RTL8367C_P4_EAV_CFG_TX_PDELAY_REQ_OFFSET    2
+#define    RTL8367C_P4_EAV_CFG_TX_PDELAY_REQ_MASK    0x4
+#define    RTL8367C_P4_EAV_CFG_TX_DELAY_REQ_OFFSET    1
+#define    RTL8367C_P4_EAV_CFG_TX_DELAY_REQ_MASK    0x2
+#define    RTL8367C_P4_EAV_CFG_TX_SYNC_OFFSET    0
+#define    RTL8367C_P4_EAV_CFG_TX_SYNC_MASK    0x1
+
+#define    RTL8367C_REG_P6_TX_SYNC_SEQ_ID    0x6460
+
+#define    RTL8367C_REG_P6_TX_DELAY_REQ_SEQ_ID    0x6461
+
+#define    RTL8367C_REG_P6_TX_PDELAY_REQ_SEQ_ID    0x6462
+
+#define    RTL8367C_REG_P6_TX_PDELAY_RESP_SEQ_ID    0x6463
+
+#define    RTL8367C_REG_P6_RX_SYNC_SEQ_ID    0x6464
+
+#define    RTL8367C_REG_P6_RX_DELAY_REQ_SEQ_ID    0x6465
+
+#define    RTL8367C_REG_P6_RX_PDELAY_REQ_SEQ_ID    0x6466
+
+#define    RTL8367C_REG_P6_RX_PDELAY_RESP_SEQ_ID    0x6467
+
+#define    RTL8367C_REG_P6_PORT_NSEC_15_0    0x6468
+
+#define    RTL8367C_REG_P6_PORT_NSEC_26_16    0x6469
+#define    RTL8367C_P6_PORT_NSEC_26_16_OFFSET    0
+#define    RTL8367C_P6_PORT_NSEC_26_16_MASK    0x7FF
+
+#define    RTL8367C_REG_P6_PORT_SEC_15_0    0x646a
+
+#define    RTL8367C_REG_P6_PORT_SEC_31_16    0x646b
+
+#define    RTL8367C_REG_P6_EAV_CFG    0x646c
+#define    RTL8367C_P6_EAV_CFG_PTP_PHY_EN_EN_OFFSET    8
+#define    RTL8367C_P6_EAV_CFG_PTP_PHY_EN_EN_MASK    0x100
+#define    RTL8367C_P6_EAV_CFG_RX_PDELAY_RESP_OFFSET    7
+#define    RTL8367C_P6_EAV_CFG_RX_PDELAY_RESP_MASK    0x80
+#define    RTL8367C_P6_EAV_CFG_RX_PDELAY_REQ_OFFSET    6
+#define    RTL8367C_P6_EAV_CFG_RX_PDELAY_REQ_MASK    0x40
+#define    RTL8367C_P6_EAV_CFG_RX_DELAY_REQ_OFFSET    5
+#define    RTL8367C_P6_EAV_CFG_RX_DELAY_REQ_MASK    0x20
+#define    RTL8367C_P6_EAV_CFG_RX_SYNC_OFFSET    4
+#define    RTL8367C_P6_EAV_CFG_RX_SYNC_MASK    0x10
+#define    RTL8367C_P6_EAV_CFG_TX_PDELAY_RESP_OFFSET    3
+#define    RTL8367C_P6_EAV_CFG_TX_PDELAY_RESP_MASK    0x8
+#define    RTL8367C_P6_EAV_CFG_TX_PDELAY_REQ_OFFSET    2
+#define    RTL8367C_P6_EAV_CFG_TX_PDELAY_REQ_MASK    0x4
+#define    RTL8367C_P6_EAV_CFG_TX_DELAY_REQ_OFFSET    1
+#define    RTL8367C_P6_EAV_CFG_TX_DELAY_REQ_MASK    0x2
+#define    RTL8367C_P6_EAV_CFG_TX_SYNC_OFFSET    0
+#define    RTL8367C_P6_EAV_CFG_TX_SYNC_MASK    0x1
+
+#define    RTL8367C_REG_P7_TX_SYNC_SEQ_ID    0x6470
+
+#define    RTL8367C_REG_P7_TX_DELAY_REQ_SEQ_ID    0x6471
+
+#define    RTL8367C_REG_P7_TX_PDELAY_REQ_SEQ_ID    0x6472
+
+#define    RTL8367C_REG_P7_TX_PDELAY_RESP_SEQ_ID    0x6473
+
+#define    RTL8367C_REG_P7_RX_SYNC_SEQ_ID    0x6474
+
+#define    RTL8367C_REG_P7_RX_DELAY_REQ_SEQ_ID    0x6475
+
+#define    RTL8367C_REG_P7_RX_PDELAY_REQ_SEQ_ID    0x6476
+
+#define    RTL8367C_REG_P7_RX_PDELAY_RESP_SEQ_ID    0x6477
+
+#define    RTL8367C_REG_P7_PORT_NSEC_15_0    0x6478
+
+#define    RTL8367C_REG_P7_PORT_NSEC_26_16    0x6479
+#define    RTL8367C_P7_PORT_NSEC_26_16_OFFSET    0
+#define    RTL8367C_P7_PORT_NSEC_26_16_MASK    0x7FF
+
+#define    RTL8367C_REG_P7_PORT_SEC_15_0    0x647a
+
+#define    RTL8367C_REG_P7_PORT_SEC_31_16    0x647b
+
+#define    RTL8367C_REG_P7_EAV_CFG    0x647c
+#define    RTL8367C_P7_EAV_CFG_PTP_PHY_EN_EN_OFFSET    8
+#define    RTL8367C_P7_EAV_CFG_PTP_PHY_EN_EN_MASK    0x100
+#define    RTL8367C_P7_EAV_CFG_RX_PDELAY_RESP_OFFSET    7
+#define    RTL8367C_P7_EAV_CFG_RX_PDELAY_RESP_MASK    0x80
+#define    RTL8367C_P7_EAV_CFG_RX_PDELAY_REQ_OFFSET    6
+#define    RTL8367C_P7_EAV_CFG_RX_PDELAY_REQ_MASK    0x40
+#define    RTL8367C_P7_EAV_CFG_RX_DELAY_REQ_OFFSET    5
+#define    RTL8367C_P7_EAV_CFG_RX_DELAY_REQ_MASK    0x20
+#define    RTL8367C_P7_EAV_CFG_RX_SYNC_OFFSET    4
+#define    RTL8367C_P7_EAV_CFG_RX_SYNC_MASK    0x10
+#define    RTL8367C_P7_EAV_CFG_TX_PDELAY_RESP_OFFSET    3
+#define    RTL8367C_P7_EAV_CFG_TX_PDELAY_RESP_MASK    0x8
+#define    RTL8367C_P7_EAV_CFG_TX_PDELAY_REQ_OFFSET    2
+#define    RTL8367C_P7_EAV_CFG_TX_PDELAY_REQ_MASK    0x4
+#define    RTL8367C_P7_EAV_CFG_TX_DELAY_REQ_OFFSET    1
+#define    RTL8367C_P7_EAV_CFG_TX_DELAY_REQ_MASK    0x2
+#define    RTL8367C_P7_EAV_CFG_TX_SYNC_OFFSET    0
+#define    RTL8367C_P7_EAV_CFG_TX_SYNC_MASK    0x1
+
+#define    RTL8367C_REG_P5_TX_SYNC_SEQ_ID    0x6480
+
+#define    RTL8367C_REG_P5_TX_DELAY_REQ_SEQ_ID    0x6481
+
+#define    RTL8367C_REG_P5_TX_PDELAY_REQ_SEQ_ID    0x6482
+
+#define    RTL8367C_REG_P5_TX_PDELAY_RESP_SEQ_ID    0x6483
+
+#define    RTL8367C_REG_P5_RX_SYNC_SEQ_ID    0x6484
+
+#define    RTL8367C_REG_P5_RX_DELAY_REQ_SEQ_ID    0x6485
+
+#define    RTL8367C_REG_P5_RX_PDELAY_REQ_SEQ_ID    0x6486
+
+#define    RTL8367C_REG_P5_RX_PDELAY_RESP_SEQ_ID    0x6487
+
+#define    RTL8367C_REG_P5_PORT_NSEC_15_0    0x6488
+
+#define    RTL8367C_REG_P5_PORT_NSEC_26_16    0x6489
+#define    RTL8367C_P5_PORT_NSEC_26_16_OFFSET    0
+#define    RTL8367C_P5_PORT_NSEC_26_16_MASK    0x7FF
+
+#define    RTL8367C_REG_P5_PORT_SEC_15_0    0x648a
+
+#define    RTL8367C_REG_P5_PORT_SEC_31_16    0x648b
+
+#define    RTL8367C_REG_P5_EAV_CFG    0x648c
+#define    RTL8367C_P5_EAV_CFG_PTP_PHY_EN_EN_OFFSET    8
+#define    RTL8367C_P5_EAV_CFG_PTP_PHY_EN_EN_MASK    0x100
+#define    RTL8367C_P5_EAV_CFG_RX_PDELAY_RESP_OFFSET    7
+#define    RTL8367C_P5_EAV_CFG_RX_PDELAY_RESP_MASK    0x80
+#define    RTL8367C_P5_EAV_CFG_RX_PDELAY_REQ_OFFSET    6
+#define    RTL8367C_P5_EAV_CFG_RX_PDELAY_REQ_MASK    0x40
+#define    RTL8367C_P5_EAV_CFG_RX_DELAY_REQ_OFFSET    5
+#define    RTL8367C_P5_EAV_CFG_RX_DELAY_REQ_MASK    0x20
+#define    RTL8367C_P5_EAV_CFG_RX_SYNC_OFFSET    4
+#define    RTL8367C_P5_EAV_CFG_RX_SYNC_MASK    0x10
+#define    RTL8367C_P5_EAV_CFG_TX_PDELAY_RESP_OFFSET    3
+#define    RTL8367C_P5_EAV_CFG_TX_PDELAY_RESP_MASK    0x8
+#define    RTL8367C_P5_EAV_CFG_TX_PDELAY_REQ_OFFSET    2
+#define    RTL8367C_P5_EAV_CFG_TX_PDELAY_REQ_MASK    0x4
+#define    RTL8367C_P5_EAV_CFG_TX_DELAY_REQ_OFFSET    1
+#define    RTL8367C_P5_EAV_CFG_TX_DELAY_REQ_MASK    0x2
+#define    RTL8367C_P5_EAV_CFG_TX_SYNC_OFFSET    0
+#define    RTL8367C_P5_EAV_CFG_TX_SYNC_MASK    0x1
+
+#define    RTL8367C_REG_P8_TX_SYNC_SEQ_ID    0x6490
+
+#define    RTL8367C_REG_P8_TX_DELAY_REQ_SEQ_ID    0x6491
+
+#define    RTL8367C_REG_P8_TX_PDELAY_REQ_SEQ_ID    0x6492
+
+#define    RTL8367C_REG_P8_TX_PDELAY_RESP_SEQ_ID    0x6493
+
+#define    RTL8367C_REG_P8_RX_SYNC_SEQ_ID    0x6494
+
+#define    RTL8367C_REG_P8_RX_DELAY_REQ_SEQ_ID    0x6495
+
+#define    RTL8367C_REG_P8_RX_PDELAY_REQ_SEQ_ID    0x6496
+
+#define    RTL8367C_REG_P8_RX_PDELAY_RESP_SEQ_ID    0x6497
+
+#define    RTL8367C_REG_P8_PORT_NSEC_15_0    0x6498
+
+#define    RTL8367C_REG_P8_PORT_NSEC_26_16    0x6499
+#define    RTL8367C_P8_PORT_NSEC_26_16_OFFSET    0
+#define    RTL8367C_P8_PORT_NSEC_26_16_MASK    0x7FF
+
+#define    RTL8367C_REG_P8_PORT_SEC_15_0    0x649a
+
+#define    RTL8367C_REG_P8_PORT_SEC_31_16    0x649b
+
+#define    RTL8367C_REG_P8_EAV_CFG    0x649c
+#define    RTL8367C_P8_EAV_CFG_PTP_PHY_EN_EN_OFFSET    8
+#define    RTL8367C_P8_EAV_CFG_PTP_PHY_EN_EN_MASK    0x100
+#define    RTL8367C_P8_EAV_CFG_RX_PDELAY_RESP_OFFSET    7
+#define    RTL8367C_P8_EAV_CFG_RX_PDELAY_RESP_MASK    0x80
+#define    RTL8367C_P8_EAV_CFG_RX_PDELAY_REQ_OFFSET    6
+#define    RTL8367C_P8_EAV_CFG_RX_PDELAY_REQ_MASK    0x40
+#define    RTL8367C_P8_EAV_CFG_RX_DELAY_REQ_OFFSET    5
+#define    RTL8367C_P8_EAV_CFG_RX_DELAY_REQ_MASK    0x20
+#define    RTL8367C_P8_EAV_CFG_RX_SYNC_OFFSET    4
+#define    RTL8367C_P8_EAV_CFG_RX_SYNC_MASK    0x10
+#define    RTL8367C_P8_EAV_CFG_TX_PDELAY_RESP_OFFSET    3
+#define    RTL8367C_P8_EAV_CFG_TX_PDELAY_RESP_MASK    0x8
+#define    RTL8367C_P8_EAV_CFG_TX_PDELAY_REQ_OFFSET    2
+#define    RTL8367C_P8_EAV_CFG_TX_PDELAY_REQ_MASK    0x4
+#define    RTL8367C_P8_EAV_CFG_TX_DELAY_REQ_OFFSET    1
+#define    RTL8367C_P8_EAV_CFG_TX_DELAY_REQ_MASK    0x2
+#define    RTL8367C_P8_EAV_CFG_TX_SYNC_OFFSET    0
+#define    RTL8367C_P8_EAV_CFG_TX_SYNC_MASK    0x1
+
+#define    RTL8367C_REG_P9_TX_SYNC_SEQ_ID    0x64a0
+
+#define    RTL8367C_REG_P9_TX_DELAY_REQ_SEQ_ID    0x64a1
+
+#define    RTL8367C_REG_P9_TX_PDELAY_REQ_SEQ_ID    0x64a2
+
+#define    RTL8367C_REG_P9_TX_PDELAY_RESP_SEQ_ID    0x64a3
+
+#define    RTL8367C_REG_P9_RX_SYNC_SEQ_ID    0x64a4
+
+#define    RTL8367C_REG_P9_RX_DELAY_REQ_SEQ_ID    0x64a5
+
+#define    RTL8367C_REG_P9_RX_PDELAY_REQ_SEQ_ID    0x64a6
+
+#define    RTL8367C_REG_P9_RX_PDELAY_RESP_SEQ_ID    0x64a7
+
+#define    RTL8367C_REG_P9_PORT_NSEC_15_0    0x64a8
+
+#define    RTL8367C_REG_P9_PORT_NSEC_26_16    0x64a9
+#define    RTL8367C_P9_PORT_NSEC_26_16_OFFSET    0
+#define    RTL8367C_P9_PORT_NSEC_26_16_MASK    0x7FF
+
+#define    RTL8367C_REG_P9_PORT_SEC_15_0    0x64aa
+
+#define    RTL8367C_REG_P9_PORT_SEC_31_16    0x64ab
+
+#define    RTL8367C_REG_P9_EAV_CFG    0x64ac
+#define    RTL8367C_P9_EAV_CFG_PTP_PHY_EN_EN_OFFSET    8
+#define    RTL8367C_P9_EAV_CFG_PTP_PHY_EN_EN_MASK    0x100
+#define    RTL8367C_P9_EAV_CFG_RX_PDELAY_RESP_OFFSET    7
+#define    RTL8367C_P9_EAV_CFG_RX_PDELAY_RESP_MASK    0x80
+#define    RTL8367C_P9_EAV_CFG_RX_PDELAY_REQ_OFFSET    6
+#define    RTL8367C_P9_EAV_CFG_RX_PDELAY_REQ_MASK    0x40
+#define    RTL8367C_P9_EAV_CFG_RX_DELAY_REQ_OFFSET    5
+#define    RTL8367C_P9_EAV_CFG_RX_DELAY_REQ_MASK    0x20
+#define    RTL8367C_P9_EAV_CFG_RX_SYNC_OFFSET    4
+#define    RTL8367C_P9_EAV_CFG_RX_SYNC_MASK    0x10
+#define    RTL8367C_P9_EAV_CFG_TX_PDELAY_RESP_OFFSET    3
+#define    RTL8367C_P9_EAV_CFG_TX_PDELAY_RESP_MASK    0x8
+#define    RTL8367C_P9_EAV_CFG_TX_PDELAY_REQ_OFFSET    2
+#define    RTL8367C_P9_EAV_CFG_TX_PDELAY_REQ_MASK    0x4
+#define    RTL8367C_P9_EAV_CFG_TX_DELAY_REQ_OFFSET    1
+#define    RTL8367C_P9_EAV_CFG_TX_DELAY_REQ_MASK    0x2
+#define    RTL8367C_P9_EAV_CFG_TX_SYNC_OFFSET    0
+#define    RTL8367C_P9_EAV_CFG_TX_SYNC_MASK    0x1
+
+/* (16'h6600)sds_indacs_reg */
+
+#define    RTL8367C_REG_SDS_INDACS_CMD    0x6600
+#define    RTL8367C_SDS_CMD_BUSY_OFFSET    8
+#define    RTL8367C_SDS_CMD_BUSY_MASK    0x100
+#define    RTL8367C_SDS_CMD_OFFSET    7
+#define    RTL8367C_SDS_CMD_MASK    0x80
+#define    RTL8367C_SDS_RWOP_OFFSET    6
+#define    RTL8367C_SDS_RWOP_MASK    0x40
+#define    RTL8367C_SDS_INDEX_OFFSET    0
+#define    RTL8367C_SDS_INDEX_MASK    0x3F
+
+#define    RTL8367C_REG_SDS_INDACS_ADR    0x6601
+#define    RTL8367C_SDS_PAGE_OFFSET    5
+#define    RTL8367C_SDS_PAGE_MASK    0x7E0
+#define    RTL8367C_SDS_REGAD_OFFSET    0
+#define    RTL8367C_SDS_REGAD_MASK    0x1F
+
+#define    RTL8367C_REG_SDS_INDACS_DATA    0x6602
+
+
+#endif /*#ifndef _RTL8367C_REG_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/smi.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/smi.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/smi.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/smi.c	2019-01-22 16:16:24.723257454 +0100
@@ -0,0 +1,449 @@
+/*
+* Copyright c                  Realtek Semiconductor Corporation, 2006
+* All rights reserved.
+*
+* Program : Control smi connected RTL8366
+* Abstract :
+* Author : Yu-Mei Pan (ympan@realtek.com.cn)
+*  $Id: smi.c,v 1.2 2008-04-10 03:04:19 shiehyy Exp $
+*/
+#include <rtk_types.h>
+#include <smi.h>
+#include "rtk_error.h"
+
+
+#if defined(MDC_MDIO_OPERATION)
+/*******************************************************************************/
+/*  MDC/MDIO porting                                                           */
+/*******************************************************************************/
+/* define the PHY ID currently used */
+#define MDC_MDIO_PHY_ID     0  /* PHY ID 0 or 29 */
+
+/* MDC/MDIO, redefine/implement the following Macro */
+#define MDC_MDIO_WRITE(preamableLength, phyID, regID, data)
+#define MDC_MDIO_READ(preamableLength, phyID, regID, pData)
+
+
+
+
+
+#elif defined(SPI_OPERATION)
+/*******************************************************************************/
+/*  SPI porting                                                                */
+/*******************************************************************************/
+/* SPI, redefine/implement the following Macro */
+#define SPI_WRITE(data, length)
+#define SPI_READ(pData, length)
+
+
+
+#elif defined(SPI_FBX_OPERATION)
+/*******************************************************************************/
+/*  SPI porting alternative                                                    */
+/*******************************************************************************/
+extern void rtk_api_read_reg_wrapper(rtk_uint32 *, rtk_uint16);
+extern void rtk_api_write_reg_wrapper(rtk_uint32, rtk_uint16);
+
+
+
+#else
+/*******************************************************************************/
+/*  I2C porting                                                                */
+/*******************************************************************************/
+/* Define the GPIO ID for SCK & SDA */
+rtk_uint32  smi_SCK = 1;    /* GPIO used for SMI Clock Generation */
+rtk_uint32  smi_SDA = 2;    /* GPIO used for SMI Data signal */
+
+/* I2C, redefine/implement the following Macro */
+#define GPIO_DIRECTION_SET(gpioID, direction)
+#define GPIO_DATA_SET(gpioID, data)
+#define GPIO_DATA_GET(gpioID, pData)
+
+
+
+
+
+#endif
+
+static void rtlglue_drvMutexLock(void)
+{
+    /* It is empty currently. Implement this function if Lock/Unlock function is needed */
+    return;
+}
+
+static void rtlglue_drvMutexUnlock(void)
+{
+    /* It is empty currently. Implement this function if Lock/Unlock function is needed */
+    return;
+}
+
+
+
+#if defined(MDC_MDIO_OPERATION) || defined(SPI_OPERATION) || defined(SPI_FBX_OPERATION)
+    /* No local function in MDC/MDIO & SPI mode */
+#else
+static void _smi_start(void)
+{
+
+    /* change GPIO pin to Output only */
+    GPIO_DIRECTION_SET(smi_SCK, GPIO_DIR_OUT);
+    GPIO_DIRECTION_SET(smi_SDA, GPIO_DIR_OUT);
+
+    /* Initial state: SCK: 0, SDA: 1 */
+    GPIO_DATA_SET(smi_SCK, 0);
+    GPIO_DATA_SET(smi_SDA, 1);
+    CLK_DURATION(DELAY);
+
+    /* CLK 1: 0 -> 1, 1 -> 0 */
+    GPIO_DATA_SET(smi_SCK, 1);
+    CLK_DURATION(DELAY);
+    GPIO_DATA_SET(smi_SCK, 0);
+    CLK_DURATION(DELAY);
+
+    /* CLK 2: */
+    GPIO_DATA_SET(smi_SCK, 1);
+    CLK_DURATION(DELAY);
+    GPIO_DATA_SET(smi_SDA, 0);
+    CLK_DURATION(DELAY);
+    GPIO_DATA_SET(smi_SCK, 0);
+    CLK_DURATION(DELAY);
+    GPIO_DATA_SET(smi_SDA, 1);
+
+}
+
+
+
+static void _smi_writeBit(rtk_uint16 signal, rtk_uint32 bitLen)
+{
+    for( ; bitLen > 0; bitLen--)
+    {
+        CLK_DURATION(DELAY);
+
+        /* prepare data */
+        if ( signal & (1<<(bitLen-1)) )
+        {
+            GPIO_DATA_SET(smi_SDA, 1);
+        }
+        else
+        {
+            GPIO_DATA_SET(smi_SDA, 0);
+        }
+        CLK_DURATION(DELAY);
+
+        /* clocking */
+        GPIO_DATA_SET(smi_SCK, 1);
+        CLK_DURATION(DELAY);
+        GPIO_DATA_SET(smi_SCK, 0);
+    }
+}
+
+
+
+static void _smi_readBit(rtk_uint32 bitLen, rtk_uint32 *rData)
+{
+    rtk_uint32 u = 0;
+
+    /* change GPIO pin to Input only */
+    GPIO_DIRECTION_SET(smi_SDA, GPIO_DIR_IN);
+
+    for (*rData = 0; bitLen > 0; bitLen--)
+    {
+        CLK_DURATION(DELAY);
+
+        /* clocking */
+        GPIO_DATA_SET(smi_SCK, 1);
+        CLK_DURATION(DELAY);
+        GPIO_DATA_GET(smi_SDA, &u);
+        GPIO_DATA_SET(smi_SCK, 0);
+
+        *rData |= (u << (bitLen - 1));
+    }
+
+    /* change GPIO pin to Output only */
+    GPIO_DIRECTION_SET(smi_SDA, GPIO_DIR_OUT);
+}
+
+
+
+static void _smi_stop(void)
+{
+
+    CLK_DURATION(DELAY);
+    GPIO_DATA_SET(smi_SDA, 0);
+    GPIO_DATA_SET(smi_SCK, 1);
+    CLK_DURATION(DELAY);
+    GPIO_DATA_SET(smi_SDA, 1);
+    CLK_DURATION(DELAY);
+    GPIO_DATA_SET(smi_SCK, 1);
+    CLK_DURATION(DELAY);
+    GPIO_DATA_SET(smi_SCK, 0);
+    CLK_DURATION(DELAY);
+    GPIO_DATA_SET(smi_SCK, 1);
+
+    /* add a click */
+    CLK_DURATION(DELAY);
+    GPIO_DATA_SET(smi_SCK, 0);
+    CLK_DURATION(DELAY);
+    GPIO_DATA_SET(smi_SCK, 1);
+
+
+    /* change GPIO pin to Input only */
+    GPIO_DIRECTION_SET(smi_SDA, GPIO_DIR_IN);
+    GPIO_DIRECTION_SET(smi_SCK, GPIO_DIR_IN);
+}
+
+#endif /* End of #if defined(MDC_MDIO_OPERATION) || defined(SPI_OPERATION) */
+
+rtk_int32 smi_read(rtk_uint32 mAddrs, rtk_uint32 *rData)
+{
+#if (!defined(MDC_MDIO_OPERATION) && !defined(SPI_OPERATION) && !defined(SPI_FBX_OPERATION))
+    rtk_uint32 rawData=0, ACK;
+    rtk_uint8  con;
+    rtk_uint32 ret = RT_ERR_OK;
+#endif
+
+    if(mAddrs > 0xFFFF)
+        return RT_ERR_INPUT;
+
+    if(rData == NULL)
+        return RT_ERR_NULL_POINTER;
+
+#if defined(MDC_MDIO_OPERATION)
+
+    /* Lock */
+    rtlglue_drvMutexLock();
+
+    /* Write address control code to register 31 */
+    MDC_MDIO_WRITE(MDC_MDIO_PREAMBLE_LEN, MDC_MDIO_PHY_ID, MDC_MDIO_CTRL0_REG, MDC_MDIO_ADDR_OP);
+
+    /* Write address to register 23 */
+    MDC_MDIO_WRITE(MDC_MDIO_PREAMBLE_LEN, MDC_MDIO_PHY_ID, MDC_MDIO_ADDRESS_REG, mAddrs);
+
+    /* Write read control code to register 21 */
+    MDC_MDIO_WRITE(MDC_MDIO_PREAMBLE_LEN, MDC_MDIO_PHY_ID, MDC_MDIO_CTRL1_REG, MDC_MDIO_READ_OP);
+
+    /* Read data from register 25 */
+    MDC_MDIO_READ(MDC_MDIO_PREAMBLE_LEN, MDC_MDIO_PHY_ID, MDC_MDIO_DATA_READ_REG, rData);
+
+    /* Unlock */
+    rtlglue_drvMutexUnlock();
+
+    return RT_ERR_OK;
+
+#elif defined(SPI_OPERATION)
+
+    /* Lock */
+    rtlglue_drvMutexLock();
+
+    /* Write 8 bits READ OP_CODE */
+    SPI_WRITE(SPI_READ_OP, SPI_READ_OP_LEN);
+
+    /* Write 16 bits register address */
+    SPI_WRITE(mAddrs, SPI_REG_LEN);
+
+    /* Read 16 bits data */
+    SPI_READ(rData, SPI_DATA_LEN);
+
+    /* Unlock */
+    rtlglue_drvMutexUnlock();
+
+    return RT_ERR_OK;
+
+#elif defined(SPI_FBX_OPERATION)
+    /* Lock */
+    rtlglue_drvMutexLock();
+
+    rtk_api_read_reg_wrapper(rData, mAddrs);
+
+    /* Unlock */
+    rtlglue_drvMutexUnlock();
+    return RT_ERR_OK;
+#else
+
+    /*Disable CPU interrupt to ensure that the SMI operation is atomic.
+      The API is based on RTL865X, rewrite the API if porting to other platform.*/
+    rtlglue_drvMutexLock();
+
+    _smi_start();                                /* Start SMI */
+
+    _smi_writeBit(0x0b, 4);                     /* CTRL code: 4'b1011 for RTL8370 */
+
+    _smi_writeBit(0x4, 3);                        /* CTRL code: 3'b100 */
+
+    _smi_writeBit(0x1, 1);                        /* 1: issue READ command */
+
+    con = 0;
+    do {
+        con++;
+        _smi_readBit(1, &ACK);                    /* ACK for issuing READ command*/
+    } while ((ACK != 0) && (con < ack_timer));
+
+    if (ACK != 0) ret = RT_ERR_FAILED;
+
+    _smi_writeBit((mAddrs&0xff), 8);             /* Set reg_addr[7:0] */
+
+    con = 0;
+    do {
+        con++;
+        _smi_readBit(1, &ACK);                    /* ACK for setting reg_addr[7:0] */
+    } while ((ACK != 0) && (con < ack_timer));
+
+    if (ACK != 0) ret = RT_ERR_FAILED;
+
+    _smi_writeBit((mAddrs>>8), 8);                 /* Set reg_addr[15:8] */
+
+    con = 0;
+    do {
+        con++;
+        _smi_readBit(1, &ACK);                    /* ACK by RTL8369 */
+    } while ((ACK != 0) && (con < ack_timer));
+    if (ACK != 0) ret = RT_ERR_FAILED;
+
+    _smi_readBit(8, &rawData);                    /* Read DATA [7:0] */
+    *rData = rawData&0xff;
+
+    _smi_writeBit(0x00, 1);                        /* ACK by CPU */
+
+    _smi_readBit(8, &rawData);                    /* Read DATA [15: 8] */
+
+    _smi_writeBit(0x01, 1);                        /* ACK by CPU */
+    *rData |= (rawData<<8);
+
+    _smi_stop();
+
+    rtlglue_drvMutexUnlock();/*enable CPU interrupt*/
+
+    return ret;
+#endif /* end of #if defined(MDC_MDIO_OPERATION) */
+}
+
+
+
+rtk_int32 smi_write(rtk_uint32 mAddrs, rtk_uint32 rData)
+{
+#if (!defined(MDC_MDIO_OPERATION) && !defined(SPI_OPERATION) && !defined(SPI_FBX_OPERATION))
+    rtk_int8 con;
+    rtk_uint32 ACK;
+    rtk_uint32 ret = RT_ERR_OK;
+#endif
+
+    if(mAddrs > 0xFFFF)
+        return RT_ERR_INPUT;
+
+    if(rData > 0xFFFF)
+        return RT_ERR_INPUT;
+
+#if defined(MDC_MDIO_OPERATION)
+
+    /* Lock */
+    rtlglue_drvMutexLock();
+
+    /* Write address control code to register 31 */
+    MDC_MDIO_WRITE(MDC_MDIO_PREAMBLE_LEN, MDC_MDIO_PHY_ID, MDC_MDIO_CTRL0_REG, MDC_MDIO_ADDR_OP);
+
+    /* Write address to register 23 */
+    MDC_MDIO_WRITE(MDC_MDIO_PREAMBLE_LEN, MDC_MDIO_PHY_ID, MDC_MDIO_ADDRESS_REG, mAddrs);
+
+    /* Write data to register 24 */
+    MDC_MDIO_WRITE(MDC_MDIO_PREAMBLE_LEN, MDC_MDIO_PHY_ID, MDC_MDIO_DATA_WRITE_REG, rData);
+
+    /* Write data control code to register 21 */
+    MDC_MDIO_WRITE(MDC_MDIO_PREAMBLE_LEN, MDC_MDIO_PHY_ID, MDC_MDIO_CTRL1_REG, MDC_MDIO_WRITE_OP);
+
+    /* Unlock */
+    rtlglue_drvMutexUnlock();
+
+    return RT_ERR_OK;
+
+#elif defined(SPI_OPERATION)
+
+    /* Lock */
+    rtlglue_drvMutexLock();
+
+    /* Write 8 bits WRITE OP_CODE */
+    SPI_WRITE(SPI_WRITE_OP, SPI_WRITE_OP_LEN);
+
+    /* Write 16 bits register address */
+    SPI_WRITE(mAddrs, SPI_REG_LEN);
+
+    /* Write 16 bits data */
+    SPI_WRITE(rData, SPI_DATA_LEN);
+
+    /* Unlock */
+    rtlglue_drvMutexUnlock();
+
+    return RT_ERR_OK;
+#elif defined(SPI_FBX_OPERATION)
+    /* Lock */
+    rtlglue_drvMutexLock();
+
+    rtk_api_write_reg_wrapper(rData, mAddrs);
+
+    /* Unlock */
+    rtlglue_drvMutexUnlock();
+    return RT_ERR_OK;
+#else
+
+    /*Disable CPU interrupt to ensure that the SMI operation is atomic.
+      The API is based on RTL865X, rewrite the API if porting to other platform.*/
+    rtlglue_drvMutexLock();
+
+    _smi_start();                                /* Start SMI */
+
+    _smi_writeBit(0x0b, 4);                     /* CTRL code: 4'b1011 for RTL8370*/
+
+    _smi_writeBit(0x4, 3);                        /* CTRL code: 3'b100 */
+
+    _smi_writeBit(0x0, 1);                        /* 0: issue WRITE command */
+
+    con = 0;
+    do {
+        con++;
+        _smi_readBit(1, &ACK);                    /* ACK for issuing WRITE command*/
+    } while ((ACK != 0) && (con < ack_timer));
+    if (ACK != 0) ret = RT_ERR_FAILED;
+
+    _smi_writeBit((mAddrs&0xff), 8);             /* Set reg_addr[7:0] */
+
+    con = 0;
+    do {
+        con++;
+        _smi_readBit(1, &ACK);                    /* ACK for setting reg_addr[7:0] */
+    } while ((ACK != 0) && (con < ack_timer));
+    if (ACK != 0) ret = RT_ERR_FAILED;
+
+    _smi_writeBit((mAddrs>>8), 8);                 /* Set reg_addr[15:8] */
+
+    con = 0;
+    do {
+        con++;
+        _smi_readBit(1, &ACK);                    /* ACK for setting reg_addr[15:8] */
+    } while ((ACK != 0) && (con < ack_timer));
+    if (ACK != 0) ret = RT_ERR_FAILED;
+
+    _smi_writeBit(rData&0xff, 8);                /* Write Data [7:0] out */
+
+    con = 0;
+    do {
+        con++;
+        _smi_readBit(1, &ACK);                    /* ACK for writting data [7:0] */
+    } while ((ACK != 0) && (con < ack_timer));
+    if (ACK != 0) ret = RT_ERR_FAILED;
+
+    _smi_writeBit(rData>>8, 8);                    /* Write Data [15:8] out */
+
+    con = 0;
+    do {
+        con++;
+        _smi_readBit(1, &ACK);                        /* ACK for writting data [15:8] */
+    } while ((ACK != 0) && (con < ack_timer));
+    if (ACK != 0) ret = RT_ERR_FAILED;
+
+    _smi_stop();
+
+    rtlglue_drvMutexUnlock();/*enable CPU interrupt*/
+
+    return ret;
+#endif /* end of #if defined(MDC_MDIO_OPERATION) */
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/smi.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/smi.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/smi.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/smi.h	2019-01-22 16:16:24.723257454 +0100
@@ -0,0 +1,41 @@
+
+#ifndef __SMI_H__
+#define __SMI_H__
+
+#include <rtk_types.h>
+#include "rtk_error.h"
+
+#define MDC_MDIO_CTRL0_REG          31
+#define MDC_MDIO_START_REG          29
+#define MDC_MDIO_CTRL1_REG          21
+#define MDC_MDIO_ADDRESS_REG        23
+#define MDC_MDIO_DATA_WRITE_REG     24
+#define MDC_MDIO_DATA_READ_REG      25
+#define MDC_MDIO_PREAMBLE_LEN       32
+
+#define MDC_MDIO_START_OP          0xFFFF
+#define MDC_MDIO_ADDR_OP           0x000E
+#define MDC_MDIO_READ_OP           0x0001
+#define MDC_MDIO_WRITE_OP          0x0003
+
+#define SPI_READ_OP                 0x3
+#define SPI_WRITE_OP                0x2
+#define SPI_READ_OP_LEN             0x8
+#define SPI_WRITE_OP_LEN            0x8
+#define SPI_REG_LEN                 16
+#define SPI_DATA_LEN                16
+
+#define GPIO_DIR_IN                 1
+#define GPIO_DIR_OUT                0
+
+#define ack_timer                   5
+
+#define DELAY                        10000
+#define CLK_DURATION(clk)            { int i; for(i=0; i<clk; i++); }
+
+rtk_int32 smi_read(rtk_uint32 mAddrs, rtk_uint32 *rData);
+rtk_int32 smi_write(rtk_uint32 mAddrs, rtk_uint32 rData);
+
+#endif /* __SMI_H__ */
+
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/stat.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/stat.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/stat.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/stat.c	2019-01-22 16:16:24.723257454 +0100
@@ -0,0 +1,773 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 80021 $
+ * $Date: 2017-06-26 10:07:17 +0800 (é€±ä¸€, 26 å…­æœˆ 2017) $
+ *
+ * Purpose : RTK switch high-level API for RTL8367/RTL8367C
+ * Feature : Here is a list of all functions and variables in MIB module.
+ *
+ */
+
+#include <rtk_switch.h>
+#include <rtk_error.h>
+#include <stat.h>
+#include <string.h>
+
+#include <rtl8367c_asicdrv.h>
+#include <rtl8367c_asicdrv_mib.h>
+
+#define MIB_NOT_SUPPORT     (0xFFFF)
+static rtk_api_ret_t _get_asic_mib_idx(rtk_stat_port_type_t cnt_idx, RTL8367C_MIBCOUNTER *pMib_idx)
+{
+    RTL8367C_MIBCOUNTER mib_asic_idx[STAT_PORT_CNTR_END]=
+    {
+        ifInOctets,                     /* STAT_IfInOctets */
+        dot3StatsFCSErrors,             /* STAT_Dot3StatsFCSErrors */
+        dot3StatsSymbolErrors,          /* STAT_Dot3StatsSymbolErrors */
+        dot3InPauseFrames,              /* STAT_Dot3InPauseFrames */
+        dot3ControlInUnknownOpcodes,    /* STAT_Dot3ControlInUnknownOpcodes */
+        etherStatsFragments,            /* STAT_EtherStatsFragments */
+        etherStatsJabbers,              /* STAT_EtherStatsJabbers */
+        ifInUcastPkts,                  /* STAT_IfInUcastPkts */
+        etherStatsDropEvents,           /* STAT_EtherStatsDropEvents */
+        etherStatsOctets,               /* STAT_EtherStatsOctets */
+        etherStatsUnderSizePkts,        /* STAT_EtherStatsUnderSizePkts */
+        etherOversizeStats,             /* STAT_EtherOversizeStats */
+        etherStatsPkts64Octets,         /* STAT_EtherStatsPkts64Octets */
+        etherStatsPkts65to127Octets,    /* STAT_EtherStatsPkts65to127Octets */
+        etherStatsPkts128to255Octets,   /* STAT_EtherStatsPkts128to255Octets */
+        etherStatsPkts256to511Octets,   /* STAT_EtherStatsPkts256to511Octets */
+        etherStatsPkts512to1023Octets,  /* STAT_EtherStatsPkts512to1023Octets */
+        etherStatsPkts1024to1518Octets, /* STAT_EtherStatsPkts1024to1518Octets */
+        ifInMulticastPkts,              /* STAT_EtherStatsMulticastPkts */
+        ifInBroadcastPkts,              /* STAT_EtherStatsBroadcastPkts */
+        ifOutOctets,                    /* STAT_IfOutOctets */
+        dot3StatsSingleCollisionFrames, /* STAT_Dot3StatsSingleCollisionFrames */
+        dot3StatMultipleCollisionFrames,/* STAT_Dot3StatsMultipleCollisionFrames */
+        dot3sDeferredTransmissions,     /* STAT_Dot3StatsDeferredTransmissions */
+        dot3StatsLateCollisions,        /* STAT_Dot3StatsLateCollisions */
+        etherStatsCollisions,           /* STAT_EtherStatsCollisions */
+        dot3StatsExcessiveCollisions,   /* STAT_Dot3StatsExcessiveCollisions */
+        dot3OutPauseFrames,             /* STAT_Dot3OutPauseFrames */
+        MIB_NOT_SUPPORT,                /* STAT_Dot1dBasePortDelayExceededDiscards */
+        dot1dTpPortInDiscards,          /* STAT_Dot1dTpPortInDiscards */
+        ifOutUcastPkts,                 /* STAT_IfOutUcastPkts */
+        ifOutMulticastPkts,             /* STAT_IfOutMulticastPkts */
+        ifOutBroadcastPkts,             /* STAT_IfOutBroadcastPkts */
+        outOampduPkts,                  /* STAT_OutOampduPkts */
+        inOampduPkts,                   /* STAT_InOampduPkts */
+        MIB_NOT_SUPPORT,                /* STAT_PktgenPkts */
+        inMldChecksumError,             /* STAT_InMldChecksumError */
+        inIgmpChecksumError,            /* STAT_InIgmpChecksumError */
+        inMldSpecificQuery,             /* STAT_InMldSpecificQuery */
+        inMldGeneralQuery,              /* STAT_InMldGeneralQuery */
+        inIgmpSpecificQuery,            /* STAT_InIgmpSpecificQuery */
+        inIgmpGeneralQuery,             /* STAT_InIgmpGeneralQuery */
+        inMldLeaves,                    /* STAT_InMldLeaves */
+        inIgmpLeaves,                   /* STAT_InIgmpInterfaceLeaves */
+        inIgmpJoinsSuccess,             /* STAT_InIgmpJoinsSuccess */
+        inIgmpJoinsFail,                /* STAT_InIgmpJoinsFail */
+        inMldJoinsSuccess,              /* STAT_InMldJoinsSuccess */
+        inMldJoinsFail,                 /* STAT_InMldJoinsFail */
+        inReportSuppressionDrop,        /* STAT_InReportSuppressionDrop */
+        inLeaveSuppressionDrop,         /* STAT_InLeaveSuppressionDrop */
+        outIgmpReports,                 /* STAT_OutIgmpReports */
+        outIgmpLeaves,                  /* STAT_OutIgmpLeaves */
+        outIgmpGeneralQuery,            /* STAT_OutIgmpGeneralQuery */
+        outIgmpSpecificQuery,           /* STAT_OutIgmpSpecificQuery */
+        outMldReports,                  /* STAT_OutMldReports */
+        outMldLeaves,                   /* STAT_OutMldLeaves */
+        outMldGeneralQuery,             /* STAT_OutMldGeneralQuery */
+        outMldSpecificQuery,            /* STAT_OutMldSpecificQuery */
+        inKnownMulticastPkts,           /* STAT_InKnownMulticastPkts */
+        ifInMulticastPkts,              /* STAT_IfInMulticastPkts */
+        ifInBroadcastPkts,              /* STAT_IfInBroadcastPkts */
+        ifOutDiscards                   /* STAT_IfOutDiscards */
+    };
+
+    if(cnt_idx >= STAT_PORT_CNTR_END)
+        return RT_ERR_STAT_INVALID_PORT_CNTR;
+
+    if(mib_asic_idx[cnt_idx] == MIB_NOT_SUPPORT)
+        return RT_ERR_CHIP_NOT_SUPPORTED;
+
+    *pMib_idx = mib_asic_idx[cnt_idx];
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_stat_global_reset(void)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if ((retVal = rtl8367c_setAsicMIBsCounterReset(TRUE,FALSE, 0)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_stat_port_reset(rtk_port_t port)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check port valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if ((retVal = rtl8367c_setAsicMIBsCounterReset(FALSE,FALSE,1 << rtk_switch_port_L2P_get(port))) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_stat_queueManage_reset(void)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if ((retVal = rtl8367c_setAsicMIBsCounterReset(FALSE,TRUE,0)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_stat_global_get(rtk_stat_global_type_t cntr_idx, rtk_stat_counter_t *pCntr)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pCntr)
+        return RT_ERR_NULL_POINTER;
+
+    if (cntr_idx!=DOT1D_TP_LEARNED_ENTRY_DISCARDS_INDEX)
+        return RT_ERR_STAT_INVALID_GLOBAL_CNTR;
+
+    if ((retVal = rtl8367c_getAsicMIBsCounter(0, dot1dTpLearnedEntryDiscards, pCntr)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_stat_global_getAll(rtk_stat_global_cntr_t *pGlobal_cntrs)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pGlobal_cntrs)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicMIBsCounter(0,DOT1D_TP_LEARNED_ENTRY_DISCARDS_INDEX, &pGlobal_cntrs->dot1dTpLearnedEntryDiscards)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_stat_port_get(rtk_port_t port, rtk_stat_port_type_t cntr_idx, rtk_stat_counter_t *pCntr)
+{
+    rtk_api_ret_t       retVal;
+    RTL8367C_MIBCOUNTER mib_idx;
+    rtk_stat_counter_t  second_cnt;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pCntr)
+        return RT_ERR_NULL_POINTER;
+
+    /* Check port valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (cntr_idx>=STAT_PORT_CNTR_END)
+        return RT_ERR_STAT_INVALID_PORT_CNTR;
+
+    if((retVal = _get_asic_mib_idx(cntr_idx, &mib_idx)) != RT_ERR_OK)
+        return retVal;
+
+    if(mib_idx == MIB_NOT_SUPPORT)
+        return RT_ERR_CHIP_NOT_SUPPORTED;
+
+    if ((retVal = rtl8367c_getAsicMIBsCounter(rtk_switch_port_L2P_get(port), mib_idx, pCntr)) != RT_ERR_OK)
+        return retVal;
+
+    if(cntr_idx == STAT_EtherStatsMulticastPkts)
+    {
+        if((retVal = _get_asic_mib_idx(STAT_IfOutMulticastPkts, &mib_idx)) != RT_ERR_OK)
+            return retVal;
+
+        if((retVal = rtl8367c_getAsicMIBsCounter(rtk_switch_port_L2P_get(port), mib_idx, &second_cnt)) != RT_ERR_OK)
+            return retVal;
+
+        *pCntr += second_cnt;
+    }
+
+    if(cntr_idx == STAT_EtherStatsBroadcastPkts)
+    {
+        if((retVal = _get_asic_mib_idx(STAT_IfOutBroadcastPkts, &mib_idx)) != RT_ERR_OK)
+            return retVal;
+
+        if((retVal = rtl8367c_getAsicMIBsCounter(rtk_switch_port_L2P_get(port), mib_idx, &second_cnt)) != RT_ERR_OK)
+            return retVal;
+
+        *pCntr += second_cnt;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_stat_port_getAll(rtk_port_t port, rtk_stat_port_cntr_t *pPort_cntrs)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 mibIndex;
+    rtk_uint64 mibCounter;
+    rtk_uint32 *accessPtr;
+    /* address offset to MIBs counter */
+    CONST_T rtk_uint16 mibLength[STAT_PORT_CNTR_END]= {
+        2,1,1,1,1,1,1,1,1,
+        2,1,1,1,1,1,1,1,1,1,1,
+        2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+        1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pPort_cntrs)
+        return RT_ERR_NULL_POINTER;
+
+    /* Check port valid */
+    RTK_CHK_PORT_VALID(port);
+
+    accessPtr = (rtk_uint32*)pPort_cntrs;
+    for (mibIndex=0;mibIndex<STAT_PORT_CNTR_END;mibIndex++)
+    {
+        if ((retVal = _rtk_stat_port_get(port, mibIndex, &mibCounter)) != RT_ERR_OK)
+        {
+            if (retVal == RT_ERR_CHIP_NOT_SUPPORTED)
+                mibCounter = 0;
+            else
+                return retVal;
+        }
+
+        if (2 == mibLength[mibIndex])
+            *(rtk_uint64*)accessPtr = mibCounter;
+        else if (1 == mibLength[mibIndex])
+            *accessPtr = mibCounter;
+        else
+            return RT_ERR_FAILED;
+
+        accessPtr+=mibLength[mibIndex];
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_stat_logging_counterCfg_set(rtk_uint32 idx, rtk_logging_counter_mode_t mode, rtk_logging_counter_type_t type)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(idx > RTL8367C_MIB_MAX_LOG_CNT_IDX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if((idx % 2) == 1)
+        return RT_ERR_INPUT;
+
+    if(mode >= LOGGING_MODE_END)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if(type >= LOGGING_TYPE_END)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if((retVal = rtl8367c_setAsicMIBsLoggingType((idx / 2), (rtk_uint32)type)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicMIBsLoggingMode((idx / 2), (rtk_uint32)mode)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_stat_logging_counterCfg_get(rtk_uint32 idx, rtk_logging_counter_mode_t *pMode, rtk_logging_counter_type_t *pType)
+{
+    rtk_api_ret_t   retVal;
+    rtk_uint32      type, mode;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(idx > RTL8367C_MIB_MAX_LOG_CNT_IDX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if((idx % 2) == 1)
+        return RT_ERR_INPUT;
+
+    if(pMode == NULL)
+        return RT_ERR_NULL_POINTER;
+
+    if(pType == NULL)
+        return RT_ERR_NULL_POINTER;
+
+    if((retVal = rtl8367c_getAsicMIBsLoggingType((idx / 2), &type)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_getAsicMIBsLoggingMode((idx / 2), &mode)) != RT_ERR_OK)
+        return retVal;
+
+    *pMode = (rtk_logging_counter_mode_t)mode;
+    *pType = (rtk_logging_counter_type_t)type;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_stat_logging_counter_reset(rtk_uint32 idx)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(idx > RTL8367C_MIB_MAX_LOG_CNT_IDX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if((retVal = rtl8367c_setAsicMIBsResetLoggingCounter(idx)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_stat_logging_counter_get(rtk_uint32 idx, rtk_uint32 *pCnt)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pCnt)
+        return RT_ERR_NULL_POINTER;
+
+    if(idx > RTL8367C_MIB_MAX_LOG_CNT_IDX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if((retVal = rtl8367c_getAsicMIBsLogCounter(idx, pCnt)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_stat_lengthMode_set(rtk_stat_lengthMode_t txMode, rtk_stat_lengthMode_t rxMode)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(txMode >= LENGTH_MODE_END)
+        return RT_ERR_INPUT;
+
+    if(rxMode >= LENGTH_MODE_END)
+        return RT_ERR_INPUT;
+
+    if((retVal = rtl8367c_setAsicMIBsLength((rtk_uint32)txMode, (rtk_uint32)rxMode)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_stat_lengthMode_get(rtk_stat_lengthMode_t *pTxMode, rtk_stat_lengthMode_t *pRxMode)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pTxMode)
+        return RT_ERR_NULL_POINTER;
+
+    if(NULL == pRxMode)
+        return RT_ERR_NULL_POINTER;
+
+    if((retVal = rtl8367c_getAsicMIBsLength((rtk_uint32 *)pTxMode, (rtk_uint32 *)pRxMode)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+
+/* Function Name:
+ *      rtk_stat_global_reset
+ * Description:
+ *      Reset global MIB counter.
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      Reset MIB counter of ports. API will use global reset while port mask is all-ports.
+ */
+rtk_api_ret_t rtk_stat_global_reset(void)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_stat_global_reset();
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_stat_port_reset
+ * Description:
+ *      Reset per port MIB counter by port.
+ * Input:
+ *      port - port id.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_stat_port_reset(rtk_port_t port)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_stat_port_reset(port);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_stat_queueManage_reset
+ * Description:
+ *      Reset queue manage MIB counter.
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_stat_queueManage_reset(void)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_stat_queueManage_reset();
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+
+/* Function Name:
+ *      rtk_stat_global_get
+ * Description:
+ *      Get global MIB counter
+ * Input:
+ *      cntr_idx - global counter index.
+ * Output:
+ *      pCntr - global counter value.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      Get global MIB counter by index definition.
+ */
+rtk_api_ret_t rtk_stat_global_get(rtk_stat_global_type_t cntr_idx, rtk_stat_counter_t *pCntr)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_stat_global_get(cntr_idx, pCntr);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_stat_global_getAll
+ * Description:
+ *      Get all global MIB counter
+ * Input:
+ *      None
+ * Output:
+ *      pGlobal_cntrs - global counter structure.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      Get all global MIB counter by index definition.
+ */
+rtk_api_ret_t rtk_stat_global_getAll(rtk_stat_global_cntr_t *pGlobal_cntrs)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_stat_global_getAll(pGlobal_cntrs);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_stat_port_get
+ * Description:
+ *      Get per port MIB counter by index
+ * Input:
+ *      port        - port id.
+ *      cntr_idx    - port counter index.
+ * Output:
+ *      pCntr - MIB retrived counter.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      Get per port MIB counter by index definition.
+ */
+rtk_api_ret_t rtk_stat_port_get(rtk_port_t port, rtk_stat_port_type_t cntr_idx, rtk_stat_counter_t *pCntr)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_stat_port_get(port, cntr_idx, pCntr);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_stat_port_getAll
+ * Description:
+ *      Get all counters of one specified port in the specified device.
+ * Input:
+ *      port - port id.
+ * Output:
+ *      pPort_cntrs - buffer pointer of counter value.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      Get all MIB counters of one port.
+ */
+rtk_api_ret_t rtk_stat_port_getAll(rtk_port_t port, rtk_stat_port_cntr_t *pPort_cntrs)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_stat_port_getAll(port, pPort_cntrs);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_stat_logging_counterCfg_set
+ * Description:
+ *      Set the type and mode of Logging Counter
+ * Input:
+ *      idx     - The index of Logging Counter. Should be even number only.(0,2,4,6,8.....30)
+ *      mode    - 32 bits or 64 bits mode
+ *      type    - Packet counter or byte counter
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_OUT_OF_RANGE - Out of range.
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      Set the type and mode of Logging Counter.
+ */
+rtk_api_ret_t rtk_stat_logging_counterCfg_set(rtk_uint32 idx, rtk_logging_counter_mode_t mode, rtk_logging_counter_type_t type)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_stat_logging_counterCfg_set(idx, mode, type);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_stat_logging_counterCfg_get
+ * Description:
+ *      Get the type and mode of Logging Counter
+ * Input:
+ *      idx     - The index of Logging Counter. Should be even number only.(0,2,4,6,8.....30)
+ * Output:
+ *      pMode   - 32 bits or 64 bits mode
+ *      pType   - Packet counter or byte counter
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_OUT_OF_RANGE - Out of range.
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_NULL_POINTER - NULL Pointer
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      Get the type and mode of Logging Counter.
+ */
+rtk_api_ret_t rtk_stat_logging_counterCfg_get(rtk_uint32 idx, rtk_logging_counter_mode_t *pMode, rtk_logging_counter_type_t *pType)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_stat_logging_counterCfg_get(idx, pMode, pType);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+
+/* Function Name:
+ *      rtk_stat_logging_counter_reset
+ * Description:
+ *      Reset Logging Counter
+ * Input:
+ *      idx     - The index of Logging Counter. (0~31)
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_OUT_OF_RANGE - Out of range.
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      Reset Logging Counter.
+ */
+rtk_api_ret_t rtk_stat_logging_counter_reset(rtk_uint32 idx)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_stat_logging_counter_reset(idx);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_stat_logging_counter_get
+ * Description:
+ *      Get Logging Counter
+ * Input:
+ *      idx     - The index of Logging Counter. (0~31)
+ * Output:
+ *      pCnt    - Logging counter value
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_OUT_OF_RANGE - Out of range.
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      Get Logging Counter.
+ */
+rtk_api_ret_t rtk_stat_logging_counter_get(rtk_uint32 idx, rtk_uint32 *pCnt)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_stat_logging_counter_get(idx, pCnt);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_stat_lengthMode_set
+ * Description:
+ *      Set Legnth mode.
+ * Input:
+ *      txMode     - The length counting mode
+ *      rxMode     - The length counting mode
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_INPUT        - Out of range.
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_stat_lengthMode_set(rtk_stat_lengthMode_t txMode, rtk_stat_lengthMode_t rxMode)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_stat_lengthMode_set(txMode, rxMode);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_stat_lengthMode_get
+ * Description:
+ *      Get Legnth mode.
+ * Input:
+ *      None.
+ * Output:
+ *      pTxMode       - The length counting mode
+ *      pRxMode       - The length counting mode
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_INPUT        - Out of range.
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ */
+rtk_api_ret_t rtk_stat_lengthMode_get(rtk_stat_lengthMode_t *pTxMode, rtk_stat_lengthMode_t *pRxMode)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_stat_lengthMode_get(pTxMode, pRxMode);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/stat.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/stat.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/stat.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/stat.h	2019-01-22 16:16:24.723257454 +0100
@@ -0,0 +1,435 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * Purpose : RTL8367/RTL8367C switch high-level API
+ *
+ * Feature : The file includes MIB module high-layer API defination
+ *
+ */
+
+#ifndef __RTK_API_STAT_H__
+#define __RTK_API_STAT_H__
+
+/*
+ * Data Type Declaration
+ */
+typedef rtk_u_long_t rtk_stat_counter_t;
+
+/* global statistic counter structure */
+typedef struct rtk_stat_global_cntr_s
+{
+    rtk_uint64 dot1dTpLearnedEntryDiscards;
+}rtk_stat_global_cntr_t;
+
+typedef enum rtk_stat_global_type_e
+{
+    DOT1D_TP_LEARNED_ENTRY_DISCARDS_INDEX = 58,
+    MIB_GLOBAL_CNTR_END
+}rtk_stat_global_type_t;
+
+/* port statistic counter structure */
+typedef struct rtk_stat_port_cntr_s
+{
+    rtk_uint64 ifInOctets;
+    rtk_uint32 dot3StatsFCSErrors;
+    rtk_uint32 dot3StatsSymbolErrors;
+    rtk_uint32 dot3InPauseFrames;
+    rtk_uint32 dot3ControlInUnknownOpcodes;
+    rtk_uint32 etherStatsFragments;
+    rtk_uint32 etherStatsJabbers;
+    rtk_uint32 ifInUcastPkts;
+    rtk_uint32 etherStatsDropEvents;
+    rtk_uint64 etherStatsOctets;
+    rtk_uint32 etherStatsUndersizePkts;
+    rtk_uint32 etherStatsOversizePkts;
+    rtk_uint32 etherStatsPkts64Octets;
+    rtk_uint32 etherStatsPkts65to127Octets;
+    rtk_uint32 etherStatsPkts128to255Octets;
+    rtk_uint32 etherStatsPkts256to511Octets;
+    rtk_uint32 etherStatsPkts512to1023Octets;
+    rtk_uint32 etherStatsPkts1024toMaxOctets;
+    rtk_uint32 etherStatsMcastPkts;
+    rtk_uint32 etherStatsBcastPkts;
+    rtk_uint64 ifOutOctets;
+    rtk_uint32 dot3StatsSingleCollisionFrames;
+    rtk_uint32 dot3StatsMultipleCollisionFrames;
+    rtk_uint32 dot3StatsDeferredTransmissions;
+    rtk_uint32 dot3StatsLateCollisions;
+    rtk_uint32 etherStatsCollisions;
+    rtk_uint32 dot3StatsExcessiveCollisions;
+    rtk_uint32 dot3OutPauseFrames;
+    rtk_uint32 dot1dBasePortDelayExceededDiscards;
+    rtk_uint32 dot1dTpPortInDiscards;
+    rtk_uint32 ifOutUcastPkts;
+    rtk_uint32 ifOutMulticastPkts;
+    rtk_uint32 ifOutBrocastPkts;
+    rtk_uint32 outOampduPkts;
+    rtk_uint32 inOampduPkts;
+    rtk_uint32 pktgenPkts;
+    rtk_uint32 inMldChecksumError;
+    rtk_uint32 inIgmpChecksumError;
+    rtk_uint32 inMldSpecificQuery;
+    rtk_uint32 inMldGeneralQuery;
+    rtk_uint32 inIgmpSpecificQuery;
+    rtk_uint32 inIgmpGeneralQuery;
+    rtk_uint32 inMldLeaves;
+    rtk_uint32 inIgmpLeaves;
+    rtk_uint32 inIgmpJoinsSuccess;
+    rtk_uint32 inIgmpJoinsFail;
+    rtk_uint32 inMldJoinsSuccess;
+    rtk_uint32 inMldJoinsFail;
+    rtk_uint32 inReportSuppressionDrop;
+    rtk_uint32 inLeaveSuppressionDrop;
+    rtk_uint32 outIgmpReports;
+    rtk_uint32 outIgmpLeaves;
+    rtk_uint32 outIgmpGeneralQuery;
+    rtk_uint32 outIgmpSpecificQuery;
+    rtk_uint32 outMldReports;
+    rtk_uint32 outMldLeaves;
+    rtk_uint32 outMldGeneralQuery;
+    rtk_uint32 outMldSpecificQuery;
+    rtk_uint32 inKnownMulticastPkts;
+    rtk_uint32 ifInMulticastPkts;
+    rtk_uint32 ifInBroadcastPkts;
+    rtk_uint32 ifOutDiscards;
+}rtk_stat_port_cntr_t;
+
+/* port statistic counter index */
+typedef enum rtk_stat_port_type_e
+{
+    STAT_IfInOctets = 0,
+    STAT_Dot3StatsFCSErrors,
+    STAT_Dot3StatsSymbolErrors,
+    STAT_Dot3InPauseFrames,
+    STAT_Dot3ControlInUnknownOpcodes,
+    STAT_EtherStatsFragments,
+    STAT_EtherStatsJabbers,
+    STAT_IfInUcastPkts,
+    STAT_EtherStatsDropEvents,
+    STAT_EtherStatsOctets,
+    STAT_EtherStatsUnderSizePkts,
+    STAT_EtherOversizeStats,
+    STAT_EtherStatsPkts64Octets,
+    STAT_EtherStatsPkts65to127Octets,
+    STAT_EtherStatsPkts128to255Octets,
+    STAT_EtherStatsPkts256to511Octets,
+    STAT_EtherStatsPkts512to1023Octets,
+    STAT_EtherStatsPkts1024to1518Octets,
+    STAT_EtherStatsMulticastPkts,
+    STAT_EtherStatsBroadcastPkts,
+    STAT_IfOutOctets,
+    STAT_Dot3StatsSingleCollisionFrames,
+    STAT_Dot3StatsMultipleCollisionFrames,
+    STAT_Dot3StatsDeferredTransmissions,
+    STAT_Dot3StatsLateCollisions,
+    STAT_EtherStatsCollisions,
+    STAT_Dot3StatsExcessiveCollisions,
+    STAT_Dot3OutPauseFrames,
+    STAT_Dot1dBasePortDelayExceededDiscards,
+    STAT_Dot1dTpPortInDiscards,
+    STAT_IfOutUcastPkts,
+    STAT_IfOutMulticastPkts,
+    STAT_IfOutBroadcastPkts,
+    STAT_OutOampduPkts,
+    STAT_InOampduPkts,
+    STAT_PktgenPkts,
+    STAT_InMldChecksumError,
+    STAT_InIgmpChecksumError,
+    STAT_InMldSpecificQuery,
+    STAT_InMldGeneralQuery,
+    STAT_InIgmpSpecificQuery,
+    STAT_InIgmpGeneralQuery,
+    STAT_InMldLeaves,
+    STAT_InIgmpInterfaceLeaves,
+    STAT_InIgmpJoinsSuccess,
+    STAT_InIgmpJoinsFail,
+    STAT_InMldJoinsSuccess,
+    STAT_InMldJoinsFail,
+    STAT_InReportSuppressionDrop,
+    STAT_InLeaveSuppressionDrop,
+    STAT_OutIgmpReports,
+    STAT_OutIgmpLeaves,
+    STAT_OutIgmpGeneralQuery,
+    STAT_OutIgmpSpecificQuery,
+    STAT_OutMldReports,
+    STAT_OutMldLeaves,
+    STAT_OutMldGeneralQuery,
+    STAT_OutMldSpecificQuery,
+    STAT_InKnownMulticastPkts,
+    STAT_IfInMulticastPkts,
+    STAT_IfInBroadcastPkts,
+    STAT_IfOutDiscards,
+    STAT_PORT_CNTR_END
+}rtk_stat_port_type_t;
+
+typedef enum rtk_logging_counter_mode_e
+{
+    LOGGING_MODE_32BIT = 0,
+    LOGGING_MODE_64BIT,
+    LOGGING_MODE_END
+}rtk_logging_counter_mode_t;
+
+typedef enum rtk_logging_counter_type_e
+{
+    LOGGING_TYPE_PACKET = 0,
+    LOGGING_TYPE_BYTE,
+    LOGGING_TYPE_END
+}rtk_logging_counter_type_t;
+
+typedef enum rtk_stat_lengthMode_e
+{
+    LENGTH_MODE_EXC_TAG = 0,
+    LENGTH_MODE_INC_TAG,
+    LENGTH_MODE_END
+}rtk_stat_lengthMode_t;
+
+
+
+/* Function Name:
+ *      rtk_stat_global_reset
+ * Description:
+ *      Reset global MIB counter.
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      Reset MIB counter of ports. API will use global reset while port mask is all-ports.
+ */
+extern rtk_api_ret_t rtk_stat_global_reset(void);
+
+/* Function Name:
+ *      rtk_stat_port_reset
+ * Description:
+ *      Reset per port MIB counter by port.
+ * Input:
+ *      port - port id.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_stat_port_reset(rtk_port_t port);
+
+/* Function Name:
+ *      rtk_stat_queueManage_reset
+ * Description:
+ *      Reset queue manage MIB counter.
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_stat_queueManage_reset(void);
+
+/* Function Name:
+ *      rtk_stat_global_get
+ * Description:
+ *      Get global MIB counter
+ * Input:
+ *      cntr_idx - global counter index.
+ * Output:
+ *      pCntr - global counter value.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      Get global MIB counter by index definition.
+ */
+extern rtk_api_ret_t rtk_stat_global_get(rtk_stat_global_type_t cntr_idx, rtk_stat_counter_t *pCntr);
+
+/* Function Name:
+ *      rtk_stat_global_getAll
+ * Description:
+ *      Get all global MIB counter
+ * Input:
+ *      None
+ * Output:
+ *      pGlobal_cntrs - global counter structure.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      Get all global MIB counter by index definition.
+ */
+extern rtk_api_ret_t rtk_stat_global_getAll(rtk_stat_global_cntr_t *pGlobal_cntrs);
+
+/* Function Name:
+ *      rtk_stat_port_get
+ * Description:
+ *      Get per port MIB counter by index
+ * Input:
+ *      port        - port id.
+ *      cntr_idx    - port counter index.
+ * Output:
+ *      pCntr - MIB retrived counter.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      Get per port MIB counter by index definition.
+ */
+extern rtk_api_ret_t rtk_stat_port_get(rtk_port_t port, rtk_stat_port_type_t cntr_idx, rtk_stat_counter_t *pCntr);
+
+/* Function Name:
+ *      rtk_stat_port_getAll
+ * Description:
+ *      Get all counters of one specified port in the specified device.
+ * Input:
+ *      port - port id.
+ * Output:
+ *      pPort_cntrs - buffer pointer of counter value.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      Get all MIB counters of one port.
+ */
+extern rtk_api_ret_t rtk_stat_port_getAll(rtk_port_t port, rtk_stat_port_cntr_t *pPort_cntrs);
+
+/* Function Name:
+ *      rtk_stat_logging_counterCfg_set
+ * Description:
+ *      Set the type and mode of Logging Counter
+ * Input:
+ *      idx     - The index of Logging Counter. Should be even number only.(0,2,4,6,8.....30)
+ *      mode    - 32 bits or 64 bits mode
+ *      type    - Packet counter or byte counter
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_OUT_OF_RANGE - Out of range.
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      Set the type and mode of Logging Counter.
+ */
+extern rtk_api_ret_t rtk_stat_logging_counterCfg_set(rtk_uint32 idx, rtk_logging_counter_mode_t mode, rtk_logging_counter_type_t type);
+
+/* Function Name:
+ *      rtk_stat_logging_counterCfg_get
+ * Description:
+ *      Get the type and mode of Logging Counter
+ * Input:
+ *      idx     - The index of Logging Counter. Should be even number only.(0,2,4,6,8.....30)
+ * Output:
+ *      pMode   - 32 bits or 64 bits mode
+ *      pType   - Packet counter or byte counter
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_OUT_OF_RANGE - Out of range.
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_NULL_POINTER - NULL Pointer
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      Get the type and mode of Logging Counter.
+ */
+extern rtk_api_ret_t rtk_stat_logging_counterCfg_get(rtk_uint32 idx, rtk_logging_counter_mode_t *pMode, rtk_logging_counter_type_t *pType);
+
+/* Function Name:
+ *      rtk_stat_logging_counter_reset
+ * Description:
+ *      Reset Logging Counter
+ * Input:
+ *      idx     - The index of Logging Counter. (0~31)
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_OUT_OF_RANGE - Out of range.
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      Reset Logging Counter.
+ */
+extern rtk_api_ret_t rtk_stat_logging_counter_reset(rtk_uint32 idx);
+
+/* Function Name:
+ *      rtk_stat_logging_counter_get
+ * Description:
+ *      Get Logging Counter
+ * Input:
+ *      idx     - The index of Logging Counter. (0~31)
+ * Output:
+ *      pCnt    - Logging counter value
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_OUT_OF_RANGE - Out of range.
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      Get Logging Counter.
+ */
+extern rtk_api_ret_t rtk_stat_logging_counter_get(rtk_uint32 idx, rtk_uint32 *pCnt);
+
+/* Function Name:
+ *      rtk_stat_lengthMode_set
+ * Description:
+ *      Set Legnth mode.
+ * Input:
+ *      txMode     - The length counting mode
+ *      rxMode     - The length counting mode
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_INPUT        - Out of range.
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_stat_lengthMode_set(rtk_stat_lengthMode_t txMode, rtk_stat_lengthMode_t rxMode);
+
+/* Function Name:
+ *      rtk_stat_lengthMode_get
+ * Description:
+ *      Get Legnth mode.
+ * Input:
+ *      None.
+ * Output:
+ *      pTxMode       - The length counting mode
+ *      pRxMode       - The length counting mode
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_INPUT        - Out of range.
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ */
+extern rtk_api_ret_t rtk_stat_lengthMode_get(rtk_stat_lengthMode_t *pTxMode, rtk_stat_lengthMode_t *pRxMode);
+
+#endif /* __RTK_API_STAT_H__ */
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/storm.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/storm.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/storm.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/storm.c	2019-01-22 16:16:24.723257454 +0100
@@ -0,0 +1,950 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 79496 $
+ * $Date: 2017-06-08 17:31:25 +0800 (é€±å››, 08 å…­æœˆ 2017) $
+ *
+ * Purpose : RTK switch high-level API for RTL8367/RTL8367C
+ * Feature : Here is a list of all functions and variables in Storm module.
+ *
+ */
+
+#include <rtk_switch.h>
+#include <rtk_error.h>
+#include <storm.h>
+#include <rate.h>
+#include <string.h>
+#include <rtl8367c_asicdrv.h>
+#include <rtl8367c_asicdrv_storm.h>
+#include <rtl8367c_asicdrv_meter.h>
+#include <rtl8367c_asicdrv_rma.h>
+#include <rtl8367c_asicdrv_igmp.h>
+
+static rtk_api_ret_t _rtk_rate_stormControlMeterIdx_set(rtk_port_t port, rtk_rate_storm_group_t stormType, rtk_uint32 index)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (stormType >= STORM_GROUP_END)
+        return RT_ERR_SFC_UNKNOWN_GROUP;
+
+    if (index > RTK_MAX_METER_ID)
+        return RT_ERR_FILTER_METER_ID;
+
+    switch (stormType)
+    {
+        case STORM_GROUP_UNKNOWN_UNICAST:
+            if ((retVal = rtl8367c_setAsicStormFilterUnknownUnicastMeter(rtk_switch_port_L2P_get(port), index))!=RT_ERR_OK)
+                return retVal;
+            break;
+        case STORM_GROUP_UNKNOWN_MULTICAST:
+            if ((retVal = rtl8367c_setAsicStormFilterUnknownMulticastMeter(rtk_switch_port_L2P_get(port), index))!=RT_ERR_OK)
+                return retVal;
+            break;
+        case STORM_GROUP_MULTICAST:
+            if ((retVal = rtl8367c_setAsicStormFilterMulticastMeter(rtk_switch_port_L2P_get(port), index))!=RT_ERR_OK)
+                return retVal;
+            break;
+        case STORM_GROUP_BROADCAST:
+            if ((retVal = rtl8367c_setAsicStormFilterBroadcastMeter(rtk_switch_port_L2P_get(port), index))!=RT_ERR_OK)
+                return retVal;
+            break;
+        default:
+            break;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_rate_stormControlMeterIdx_get(rtk_port_t port, rtk_rate_storm_group_t stormType, rtk_uint32 *pIndex)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (stormType >= STORM_GROUP_END)
+        return RT_ERR_SFC_UNKNOWN_GROUP;
+
+    if (NULL == pIndex )
+        return RT_ERR_NULL_POINTER;
+
+    switch (stormType)
+    {
+        case STORM_GROUP_UNKNOWN_UNICAST:
+            if ((retVal = rtl8367c_getAsicStormFilterUnknownUnicastMeter(rtk_switch_port_L2P_get(port), pIndex))!=RT_ERR_OK)
+                return retVal;
+            break;
+        case STORM_GROUP_UNKNOWN_MULTICAST:
+            if ((retVal = rtl8367c_getAsicStormFilterUnknownMulticastMeter(rtk_switch_port_L2P_get(port), pIndex))!=RT_ERR_OK)
+                return retVal;
+            break;
+        case STORM_GROUP_MULTICAST:
+            if ((retVal = rtl8367c_getAsicStormFilterMulticastMeter(rtk_switch_port_L2P_get(port), pIndex))!=RT_ERR_OK)
+                return retVal;
+            break;
+        case STORM_GROUP_BROADCAST:
+            if ((retVal = rtl8367c_getAsicStormFilterBroadcastMeter(rtk_switch_port_L2P_get(port), pIndex))!=RT_ERR_OK)
+                return retVal;
+            break;
+        default:
+            break;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_rate_stormControlPortEnable_set(rtk_port_t port, rtk_rate_storm_group_t stormType, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (stormType >= STORM_GROUP_END)
+        return RT_ERR_SFC_UNKNOWN_GROUP;
+
+    if (enable >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    switch (stormType)
+    {
+        case STORM_GROUP_UNKNOWN_UNICAST:
+            if ((retVal = rtl8367c_setAsicStormFilterUnknownUnicastEnable(rtk_switch_port_L2P_get(port), enable)) != RT_ERR_OK)
+                return retVal;
+            break;
+        case STORM_GROUP_UNKNOWN_MULTICAST:
+            if ((retVal = rtl8367c_setAsicStormFilterUnknownMulticastEnable(rtk_switch_port_L2P_get(port), enable)) != RT_ERR_OK)
+                return retVal;
+            break;
+        case STORM_GROUP_MULTICAST:
+            if ((retVal = rtl8367c_setAsicStormFilterMulticastEnable(rtk_switch_port_L2P_get(port), enable)) != RT_ERR_OK)
+                return retVal;
+            break;
+        case STORM_GROUP_BROADCAST:
+            if ((retVal = rtl8367c_setAsicStormFilterBroadcastEnable(rtk_switch_port_L2P_get(port), enable)) != RT_ERR_OK)
+                return retVal;
+            break;
+        default:
+            break;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_rate_stormControlPortEnable_get(rtk_port_t port, rtk_rate_storm_group_t stormType, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (stormType >= STORM_GROUP_END)
+        return RT_ERR_SFC_UNKNOWN_GROUP;
+
+    if (NULL == pEnable)
+        return RT_ERR_ENABLE;
+
+    switch (stormType)
+    {
+        case STORM_GROUP_UNKNOWN_UNICAST:
+            if ((retVal = rtl8367c_getAsicStormFilterUnknownUnicastEnable(rtk_switch_port_L2P_get(port), pEnable)) != RT_ERR_OK)
+                return retVal;
+            break;
+        case STORM_GROUP_UNKNOWN_MULTICAST:
+            if ((retVal = rtl8367c_getAsicStormFilterUnknownMulticastEnable(rtk_switch_port_L2P_get(port), pEnable)) != RT_ERR_OK)
+                return retVal;
+            break;
+        case STORM_GROUP_MULTICAST:
+            if ((retVal = rtl8367c_getAsicStormFilterMulticastEnable(rtk_switch_port_L2P_get(port), pEnable)) != RT_ERR_OK)
+                return retVal;
+            break;
+        case STORM_GROUP_BROADCAST:
+            if ((retVal = rtl8367c_getAsicStormFilterBroadcastEnable(rtk_switch_port_L2P_get(port), pEnable)) != RT_ERR_OK)
+                return retVal;
+            break;
+        default:
+            break;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_storm_bypass_set(rtk_storm_bypass_t type, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+    rtl8367c_rma_t rmacfg;
+    rtk_uint32 tmp;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (type >= BYPASS_END)
+        return RT_ERR_INPUT;
+
+    if (enable >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if (type >= 0 && type <= BYPASS_UNDEF_GARP_2F)
+    {
+        if ((retVal = rtl8367c_getAsicRma(type, &rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        rmacfg.discard_storm_filter = enable;
+
+        if ((retVal = rtl8367c_setAsicRma(type, &rmacfg)) != RT_ERR_OK)
+            return retVal;
+    }
+    else if(type == BYPASS_IGMP)
+    {
+        if ((retVal = rtl8367c_setAsicIGMPBypassStormCTRL(enable)) != RT_ERR_OK)
+            return retVal;
+    }
+    else if (type == BYPASS_CDP)
+    {
+        if ((retVal = rtl8367c_getAsicRmaCdp(&rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        rmacfg.discard_storm_filter = enable;
+
+        if ((retVal = rtl8367c_setAsicRmaCdp(&rmacfg)) != RT_ERR_OK)
+            return retVal;
+    }
+    else if (type  == BYPASS_CSSTP)
+    {
+        if ((retVal = rtl8367c_getAsicRmaCsstp(&rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        rmacfg.discard_storm_filter = enable;
+
+        if ((retVal = rtl8367c_setAsicRmaCsstp(&rmacfg)) != RT_ERR_OK)
+            return retVal;
+    }
+    else if (type  == BYPASS_LLDP)
+    {
+        if ((retVal = rtl8367c_getAsicRmaLldp(&tmp, &rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        rmacfg.discard_storm_filter = enable;
+
+        if ((retVal = rtl8367c_setAsicRmaLldp(tmp, &rmacfg)) != RT_ERR_OK)
+            return retVal;
+    }
+    else
+        return RT_ERR_INPUT;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_storm_bypass_get(rtk_storm_bypass_t type, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+    rtl8367c_rma_t rmacfg;
+    rtk_uint32 tmp;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (type >= BYPASS_END)
+        return RT_ERR_INPUT;
+
+    if(NULL == pEnable)
+        return RT_ERR_NULL_POINTER;
+
+    if (type >= 0 && type <= BYPASS_UNDEF_GARP_2F)
+    {
+        if ((retVal = rtl8367c_getAsicRma(type, &rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        *pEnable = rmacfg.discard_storm_filter;
+    }
+    else if(type == BYPASS_IGMP)
+    {
+        if ((retVal = rtl8367c_getAsicIGMPBypassStormCTRL(pEnable)) != RT_ERR_OK)
+            return retVal;
+    }
+    else if (type == BYPASS_CDP)
+    {
+        if ((retVal = rtl8367c_getAsicRmaCdp(&rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        *pEnable = rmacfg.discard_storm_filter;
+    }
+    else if (type == BYPASS_CSSTP)
+    {
+        if ((retVal = rtl8367c_getAsicRmaCsstp(&rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        *pEnable = rmacfg.discard_storm_filter;
+    }
+    else if (type == BYPASS_LLDP)
+    {
+        if ((retVal = rtl8367c_getAsicRmaLldp(&tmp,&rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        *pEnable = rmacfg.discard_storm_filter;
+    }
+    else
+        return RT_ERR_INPUT;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_rate_stormControlExtPortmask_set(rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 pmask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pPortmask)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtk_switch_portmask_L2P_get(pPortmask, &pmask)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicStormFilterExtEnablePortMask(pmask)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_rate_stormControlExtPortmask_get(rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 pmask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pPortmask)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicStormFilterExtEnablePortMask(&pmask)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtk_switch_portmask_P2L_get(pmask, pPortmask)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_rate_stormControlExtEnable_set(rtk_rate_storm_group_t stormType, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (stormType >= STORM_GROUP_END)
+        return RT_ERR_SFC_UNKNOWN_GROUP;
+
+    if (enable >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    switch (stormType)
+    {
+        case STORM_GROUP_UNKNOWN_UNICAST:
+            if ((retVal = rtl8367c_setAsicStormFilterExtUnknownUnicastEnable(enable)) != RT_ERR_OK)
+                return retVal;
+            break;
+        case STORM_GROUP_UNKNOWN_MULTICAST:
+            if ((retVal = rtl8367c_setAsicStormFilterExtUnknownMulticastEnable(enable)) != RT_ERR_OK)
+                return retVal;
+            break;
+        case STORM_GROUP_MULTICAST:
+            if ((retVal = rtl8367c_setAsicStormFilterExtMulticastEnable(enable)) != RT_ERR_OK)
+                return retVal;
+            break;
+        case STORM_GROUP_BROADCAST:
+            if ((retVal = rtl8367c_setAsicStormFilterExtBroadcastEnable(enable)) != RT_ERR_OK)
+                return retVal;
+            break;
+        default:
+            break;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_rate_stormControlExtEnable_get(rtk_rate_storm_group_t stormType, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (stormType >= STORM_GROUP_END)
+        return RT_ERR_SFC_UNKNOWN_GROUP;
+
+    if (NULL == pEnable)
+        return RT_ERR_NULL_POINTER;
+
+    switch (stormType)
+    {
+        case STORM_GROUP_UNKNOWN_UNICAST:
+            if ((retVal = rtl8367c_getAsicStormFilterExtUnknownUnicastEnable((rtk_uint32 *)pEnable)) != RT_ERR_OK)
+                return retVal;
+            break;
+        case STORM_GROUP_UNKNOWN_MULTICAST:
+            if ((retVal = rtl8367c_getAsicStormFilterExtUnknownMulticastEnable((rtk_uint32 *)pEnable)) != RT_ERR_OK)
+                return retVal;
+            break;
+        case STORM_GROUP_MULTICAST:
+            if ((retVal = rtl8367c_getAsicStormFilterExtMulticastEnable((rtk_uint32 *)pEnable)) != RT_ERR_OK)
+                return retVal;
+            break;
+        case STORM_GROUP_BROADCAST:
+            if ((retVal = rtl8367c_getAsicStormFilterExtBroadcastEnable((rtk_uint32 *)pEnable)) != RT_ERR_OK)
+                return retVal;
+            break;
+        default:
+            break;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_rate_stormControlExtMeterIdx_set(rtk_rate_storm_group_t stormType, rtk_uint32 index)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (stormType >= STORM_GROUP_END)
+        return RT_ERR_SFC_UNKNOWN_GROUP;
+
+    if (index > RTK_MAX_METER_ID)
+        return RT_ERR_FILTER_METER_ID;
+
+    switch (stormType)
+    {
+        case STORM_GROUP_UNKNOWN_UNICAST:
+            if ((retVal = rtl8367c_setAsicStormFilterExtUnknownUnicastMeter(index))!=RT_ERR_OK)
+                return retVal;
+            break;
+        case STORM_GROUP_UNKNOWN_MULTICAST:
+            if ((retVal = rtl8367c_setAsicStormFilterExtUnknownMulticastMeter(index))!=RT_ERR_OK)
+                return retVal;
+            break;
+        case STORM_GROUP_MULTICAST:
+            if ((retVal = rtl8367c_setAsicStormFilterExtMulticastMeter(index))!=RT_ERR_OK)
+                return retVal;
+            break;
+        case STORM_GROUP_BROADCAST:
+            if ((retVal = rtl8367c_setAsicStormFilterExtBroadcastMeter(index))!=RT_ERR_OK)
+                return retVal;
+            break;
+        default:
+            break;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_rate_stormControlExtMeterIdx_get(rtk_rate_storm_group_t stormType, rtk_uint32 *pIndex)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (stormType >= STORM_GROUP_END)
+        return RT_ERR_SFC_UNKNOWN_GROUP;
+
+    if(NULL == pIndex)
+        return RT_ERR_NULL_POINTER;
+
+    switch (stormType)
+    {
+        case STORM_GROUP_UNKNOWN_UNICAST:
+            if ((retVal = rtl8367c_getAsicStormFilterExtUnknownUnicastMeter(pIndex))!=RT_ERR_OK)
+                return retVal;
+            break;
+        case STORM_GROUP_UNKNOWN_MULTICAST:
+            if ((retVal = rtl8367c_getAsicStormFilterExtUnknownMulticastMeter(pIndex))!=RT_ERR_OK)
+                return retVal;
+            break;
+        case STORM_GROUP_MULTICAST:
+            if ((retVal = rtl8367c_getAsicStormFilterExtMulticastMeter(pIndex))!=RT_ERR_OK)
+                return retVal;
+            break;
+        case STORM_GROUP_BROADCAST:
+            if ((retVal = rtl8367c_getAsicStormFilterExtBroadcastMeter(pIndex))!=RT_ERR_OK)
+                return retVal;
+            break;
+        default:
+            break;
+    }
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtk_rate_stormControlMeterIdx_set
+ * Description:
+ *      Set the storm control meter index.
+ * Input:
+ *      port       - port id
+ *      storm_type - storm group type
+ *      index       - storm control meter index.
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_PORT_ID - Invalid port id
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_rate_stormControlMeterIdx_set(rtk_port_t port, rtk_rate_storm_group_t stormType, rtk_uint32 index)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_rate_stormControlMeterIdx_set(port, stormType, index);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_rate_stormControlMeterIdx_get
+ * Description:
+ *      Get the storm control meter index.
+ * Input:
+ *      port       - port id
+ *      storm_type - storm group type
+ * Output:
+ *      pIndex     - storm control meter index.
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_PORT_ID - Invalid port id
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_rate_stormControlMeterIdx_get(rtk_port_t port, rtk_rate_storm_group_t stormType, rtk_uint32 *pIndex)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_rate_stormControlMeterIdx_get(port, stormType, pIndex);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_rate_stormControlPortEnable_set
+ * Description:
+ *      Set enable status of storm control on specified port.
+ * Input:
+ *      port       - port id
+ *      stormType  - storm group type
+ *      enable     - enable status of storm control
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT          - The module is not initial
+ *      RT_ERR_PORT_ID           - invalid port id
+ *      RT_ERR_INPUT             - invalid input parameter
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_rate_stormControlPortEnable_set(rtk_port_t port, rtk_rate_storm_group_t stormType, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_rate_stormControlPortEnable_set(port, stormType, enable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_rate_stormControlPortEnable_set
+ * Description:
+ *      Set enable status of storm control on specified port.
+ * Input:
+ *      port       - port id
+ *      stormType  - storm group type
+ * Output:
+ *      pEnable     - enable status of storm control
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT          - The module is not initial
+ *      RT_ERR_PORT_ID           - invalid port id
+ *      RT_ERR_INPUT             - invalid input parameter
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_rate_stormControlPortEnable_get(rtk_port_t port, rtk_rate_storm_group_t stormType, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_rate_stormControlPortEnable_get(port, stormType, pEnable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_storm_bypass_set
+ * Description:
+ *      Set bypass storm filter control configuration.
+ * Input:
+ *      type    - Bypass storm filter control type.
+ *      enable  - Bypass status.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_ENABLE       - Invalid IFG parameter
+ * Note:
+ *
+ *      This API can set per-port bypass stomr filter control frame type including RMA and igmp.
+ *      The bypass frame type is as following:
+ *      - BYPASS_BRG_GROUP,
+ *      - BYPASS_FD_PAUSE,
+ *      - BYPASS_SP_MCAST,
+ *      - BYPASS_1X_PAE,
+ *      - BYPASS_UNDEF_BRG_04,
+ *      - BYPASS_UNDEF_BRG_05,
+ *      - BYPASS_UNDEF_BRG_06,
+ *      - BYPASS_UNDEF_BRG_07,
+ *      - BYPASS_PROVIDER_BRIDGE_GROUP_ADDRESS,
+ *      - BYPASS_UNDEF_BRG_09,
+ *      - BYPASS_UNDEF_BRG_0A,
+ *      - BYPASS_UNDEF_BRG_0B,
+ *      - BYPASS_UNDEF_BRG_0C,
+ *      - BYPASS_PROVIDER_BRIDGE_GVRP_ADDRESS,
+ *      - BYPASS_8021AB,
+ *      - BYPASS_UNDEF_BRG_0F,
+ *      - BYPASS_BRG_MNGEMENT,
+ *      - BYPASS_UNDEFINED_11,
+ *      - BYPASS_UNDEFINED_12,
+ *      - BYPASS_UNDEFINED_13,
+ *      - BYPASS_UNDEFINED_14,
+ *      - BYPASS_UNDEFINED_15,
+ *      - BYPASS_UNDEFINED_16,
+ *      - BYPASS_UNDEFINED_17,
+ *      - BYPASS_UNDEFINED_18,
+ *      - BYPASS_UNDEFINED_19,
+ *      - BYPASS_UNDEFINED_1A,
+ *      - BYPASS_UNDEFINED_1B,
+ *      - BYPASS_UNDEFINED_1C,
+ *      - BYPASS_UNDEFINED_1D,
+ *      - BYPASS_UNDEFINED_1E,
+ *      - BYPASS_UNDEFINED_1F,
+ *      - BYPASS_GMRP,
+ *      - BYPASS_GVRP,
+ *      - BYPASS_UNDEF_GARP_22,
+ *      - BYPASS_UNDEF_GARP_23,
+ *      - BYPASS_UNDEF_GARP_24,
+ *      - BYPASS_UNDEF_GARP_25,
+ *      - BYPASS_UNDEF_GARP_26,
+ *      - BYPASS_UNDEF_GARP_27,
+ *      - BYPASS_UNDEF_GARP_28,
+ *      - BYPASS_UNDEF_GARP_29,
+ *      - BYPASS_UNDEF_GARP_2A,
+ *      - BYPASS_UNDEF_GARP_2B,
+ *      - BYPASS_UNDEF_GARP_2C,
+ *      - BYPASS_UNDEF_GARP_2D,
+ *      - BYPASS_UNDEF_GARP_2E,
+ *      - BYPASS_UNDEF_GARP_2F,
+ *      - BYPASS_IGMP.
+ *      - BYPASS_CDP.
+ *      - BYPASS_CSSTP.
+ *      - BYPASS_LLDP.
+ */
+rtk_api_ret_t rtk_storm_bypass_set(rtk_storm_bypass_t type, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_storm_bypass_set(type, enable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_storm_bypass_get
+ * Description:
+ *      Get bypass storm filter control configuration.
+ * Input:
+ *      type - Bypass storm filter control type.
+ * Output:
+ *      pEnable - Bypass status.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This API can get per-port bypass stomr filter control frame type including RMA and igmp.
+ *      The bypass frame type is as following:
+ *      - BYPASS_BRG_GROUP,
+ *      - BYPASS_FD_PAUSE,
+ *      - BYPASS_SP_MCAST,
+ *      - BYPASS_1X_PAE,
+ *      - BYPASS_UNDEF_BRG_04,
+ *      - BYPASS_UNDEF_BRG_05,
+ *      - BYPASS_UNDEF_BRG_06,
+ *      - BYPASS_UNDEF_BRG_07,
+ *      - BYPASS_PROVIDER_BRIDGE_GROUP_ADDRESS,
+ *      - BYPASS_UNDEF_BRG_09,
+ *      - BYPASS_UNDEF_BRG_0A,
+ *      - BYPASS_UNDEF_BRG_0B,
+ *      - BYPASS_UNDEF_BRG_0C,
+ *      - BYPASS_PROVIDER_BRIDGE_GVRP_ADDRESS,
+ *      - BYPASS_8021AB,
+ *      - BYPASS_UNDEF_BRG_0F,
+ *      - BYPASS_BRG_MNGEMENT,
+ *      - BYPASS_UNDEFINED_11,
+ *      - BYPASS_UNDEFINED_12,
+ *      - BYPASS_UNDEFINED_13,
+ *      - BYPASS_UNDEFINED_14,
+ *      - BYPASS_UNDEFINED_15,
+ *      - BYPASS_UNDEFINED_16,
+ *      - BYPASS_UNDEFINED_17,
+ *      - BYPASS_UNDEFINED_18,
+ *      - BYPASS_UNDEFINED_19,
+ *      - BYPASS_UNDEFINED_1A,
+ *      - BYPASS_UNDEFINED_1B,
+ *      - BYPASS_UNDEFINED_1C,
+ *      - BYPASS_UNDEFINED_1D,
+ *      - BYPASS_UNDEFINED_1E,
+ *      - BYPASS_UNDEFINED_1F,
+ *      - BYPASS_GMRP,
+ *      - BYPASS_GVRP,
+ *      - BYPASS_UNDEF_GARP_22,
+ *      - BYPASS_UNDEF_GARP_23,
+ *      - BYPASS_UNDEF_GARP_24,
+ *      - BYPASS_UNDEF_GARP_25,
+ *      - BYPASS_UNDEF_GARP_26,
+ *      - BYPASS_UNDEF_GARP_27,
+ *      - BYPASS_UNDEF_GARP_28,
+ *      - BYPASS_UNDEF_GARP_29,
+ *      - BYPASS_UNDEF_GARP_2A,
+ *      - BYPASS_UNDEF_GARP_2B,
+ *      - BYPASS_UNDEF_GARP_2C,
+ *      - BYPASS_UNDEF_GARP_2D,
+ *      - BYPASS_UNDEF_GARP_2E,
+ *      - BYPASS_UNDEF_GARP_2F,
+ *      - BYPASS_IGMP.
+ *      - BYPASS_CDP.
+ *      - BYPASS_CSSTP.
+ *      - BYPASS_LLDP.
+ */
+rtk_api_ret_t rtk_storm_bypass_get(rtk_storm_bypass_t type, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_storm_bypass_get(type, pEnable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_rate_stormControlExtPortmask_set
+ * Description:
+ *      Set externsion storm control port mask
+ * Input:
+ *      pPortmask  - port mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT          - The module is not initial
+ *      RT_ERR_INPUT             - invalid input parameter
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_rate_stormControlExtPortmask_set(rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_rate_stormControlExtPortmask_set(pPortmask);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_rate_stormControlExtPortmask_get
+ * Description:
+ *      Set externsion storm control port mask
+ * Input:
+ *      None
+ * Output:
+ *      pPortmask  - port mask
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT          - The module is not initial
+ *      RT_ERR_INPUT             - invalid input parameter
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_rate_stormControlExtPortmask_get(rtk_portmask_t *pPortmask)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_rate_stormControlExtPortmask_get(pPortmask);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_rate_stormControlExtEnable_set
+ * Description:
+ *      Set externsion storm control state
+ * Input:
+ *      stormType   - storm group type
+ *      enable      - externsion storm control state
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT          - The module is not initial
+ *      RT_ERR_INPUT             - invalid input parameter
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_rate_stormControlExtEnable_set(rtk_rate_storm_group_t stormType, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_rate_stormControlExtEnable_set(stormType, enable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_rate_stormControlExtEnable_get
+ * Description:
+ *      Get externsion storm control state
+ * Input:
+ *      stormType   - storm group type
+ * Output:
+ *      pEnable     - externsion storm control state
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT          - The module is not initial
+ *      RT_ERR_INPUT             - invalid input parameter
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_rate_stormControlExtEnable_get(rtk_rate_storm_group_t stormType, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_rate_stormControlExtEnable_get(stormType, pEnable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_rate_stormControlExtMeterIdx_set
+ * Description:
+ *      Set externsion storm control meter index
+ * Input:
+ *      stormType   - storm group type
+ *      index       - externsion storm control state
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT          - The module is not initial
+ *      RT_ERR_INPUT             - invalid input parameter
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_rate_stormControlExtMeterIdx_set(rtk_rate_storm_group_t stormType, rtk_uint32 index)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_rate_stormControlExtMeterIdx_set(stormType, index);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_rate_stormControlExtMeterIdx_get
+ * Description:
+ *      Get externsion storm control meter index
+ * Input:
+ *      stormType   - storm group type
+ *      pIndex      - externsion storm control state
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT          - The module is not initial
+ *      RT_ERR_INPUT             - invalid input parameter
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_rate_stormControlExtMeterIdx_get(rtk_rate_storm_group_t stormType, rtk_uint32 *pIndex)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_rate_stormControlExtMeterIdx_get(stormType, pIndex);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/storm.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/storm.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/storm.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/storm.h	2019-01-22 16:16:24.723257454 +0100
@@ -0,0 +1,424 @@
+   /*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * Purpose : RTL8367/RTL8367C switch high-level API
+ *
+ * Feature : The file includes Storm module high-layer API defination
+ *
+ */
+
+#ifndef __RTK_API_STORM_H__
+#define __RTK_API_STORM_H__
+
+#define STORM_UNUC_INDEX                            28
+#define STORM_UNMC_INDEX                            29
+#define STORM_MC_INDEX                              30
+#define STORM_BC_INDEX                              31
+
+typedef enum rtk_rate_storm_group_e
+{
+    STORM_GROUP_UNKNOWN_UNICAST = 0,
+    STORM_GROUP_UNKNOWN_MULTICAST,
+    STORM_GROUP_MULTICAST,
+    STORM_GROUP_BROADCAST,
+    STORM_GROUP_END
+} rtk_rate_storm_group_t;
+
+typedef enum rtk_storm_bypass_e
+{
+    BYPASS_BRG_GROUP = 0,
+    BYPASS_FD_PAUSE,
+    BYPASS_SP_MCAST,
+    BYPASS_1X_PAE,
+    BYPASS_UNDEF_BRG_04,
+    BYPASS_UNDEF_BRG_05,
+    BYPASS_UNDEF_BRG_06,
+    BYPASS_UNDEF_BRG_07,
+    BYPASS_PROVIDER_BRIDGE_GROUP_ADDRESS,
+    BYPASS_UNDEF_BRG_09,
+    BYPASS_UNDEF_BRG_0A,
+    BYPASS_UNDEF_BRG_0B,
+    BYPASS_UNDEF_BRG_0C,
+    BYPASS_PROVIDER_BRIDGE_GVRP_ADDRESS,
+    BYPASS_8021AB,
+    BYPASS_UNDEF_BRG_0F,
+    BYPASS_BRG_MNGEMENT,
+    BYPASS_UNDEFINED_11,
+    BYPASS_UNDEFINED_12,
+    BYPASS_UNDEFINED_13,
+    BYPASS_UNDEFINED_14,
+    BYPASS_UNDEFINED_15,
+    BYPASS_UNDEFINED_16,
+    BYPASS_UNDEFINED_17,
+    BYPASS_UNDEFINED_18,
+    BYPASS_UNDEFINED_19,
+    BYPASS_UNDEFINED_1A,
+    BYPASS_UNDEFINED_1B,
+    BYPASS_UNDEFINED_1C,
+    BYPASS_UNDEFINED_1D,
+    BYPASS_UNDEFINED_1E,
+    BYPASS_UNDEFINED_1F,
+    BYPASS_GMRP,
+    BYPASS_GVRP,
+    BYPASS_UNDEF_GARP_22,
+    BYPASS_UNDEF_GARP_23,
+    BYPASS_UNDEF_GARP_24,
+    BYPASS_UNDEF_GARP_25,
+    BYPASS_UNDEF_GARP_26,
+    BYPASS_UNDEF_GARP_27,
+    BYPASS_UNDEF_GARP_28,
+    BYPASS_UNDEF_GARP_29,
+    BYPASS_UNDEF_GARP_2A,
+    BYPASS_UNDEF_GARP_2B,
+    BYPASS_UNDEF_GARP_2C,
+    BYPASS_UNDEF_GARP_2D,
+    BYPASS_UNDEF_GARP_2E,
+    BYPASS_UNDEF_GARP_2F,
+    BYPASS_IGMP,
+    BYPASS_CDP,
+    BYPASS_CSSTP,
+    BYPASS_LLDP,
+    BYPASS_END,
+}rtk_storm_bypass_t;
+
+/* Function Name:
+ *      rtk_rate_stormControlMeterIdx_set
+ * Description:
+ *      Set the storm control meter index.
+ * Input:
+ *      port       - port id
+ *      storm_type - storm group type
+ *      index       - storm control meter index.
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_PORT_ID - Invalid port id
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_rate_stormControlMeterIdx_set(rtk_port_t port, rtk_rate_storm_group_t stormType, rtk_uint32 index);
+
+/* Function Name:
+ *      rtk_rate_stormControlMeterIdx_get
+ * Description:
+ *      Get the storm control meter index.
+ * Input:
+ *      port       - port id
+ *      storm_type - storm group type
+ * Output:
+ *      pIndex     - storm control meter index.
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_PORT_ID - Invalid port id
+ *      RT_ERR_FILTER_METER_ID  - Invalid meter
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_rate_stormControlMeterIdx_get(rtk_port_t port, rtk_rate_storm_group_t stormType, rtk_uint32 *pIndex);
+
+/* Function Name:
+ *      rtk_rate_stormControlPortEnable_set
+ * Description:
+ *      Set enable status of storm control on specified port.
+ * Input:
+ *      port       - port id
+ *      stormType  - storm group type
+ *      enable     - enable status of storm control
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT          - The module is not initial
+ *      RT_ERR_PORT_ID           - invalid port id
+ *      RT_ERR_INPUT             - invalid input parameter
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_rate_stormControlPortEnable_set(rtk_port_t port, rtk_rate_storm_group_t stormType, rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_rate_stormControlPortEnable_set
+ * Description:
+ *      Set enable status of storm control on specified port.
+ * Input:
+ *      port       - port id
+ *      stormType  - storm group type
+ * Output:
+ *      pEnable     - enable status of storm control
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT          - The module is not initial
+ *      RT_ERR_PORT_ID           - invalid port id
+ *      RT_ERR_INPUT             - invalid input parameter
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_rate_stormControlPortEnable_get(rtk_port_t port, rtk_rate_storm_group_t stormType, rtk_enable_t *pEnable);
+
+/* Function Name:
+ *      rtk_storm_bypass_set
+ * Description:
+ *      Set bypass storm filter control configuration.
+ * Input:
+ *      type    - Bypass storm filter control type.
+ *      enable  - Bypass status.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_ENABLE       - Invalid IFG parameter
+ * Note:
+ *
+ *      This API can set per-port bypass stomr filter control frame type including RMA and igmp.
+ *      The bypass frame type is as following:
+ *      - BYPASS_BRG_GROUP,
+ *      - BYPASS_FD_PAUSE,
+ *      - BYPASS_SP_MCAST,
+ *      - BYPASS_1X_PAE,
+ *      - BYPASS_UNDEF_BRG_04,
+ *      - BYPASS_UNDEF_BRG_05,
+ *      - BYPASS_UNDEF_BRG_06,
+ *      - BYPASS_UNDEF_BRG_07,
+ *      - BYPASS_PROVIDER_BRIDGE_GROUP_ADDRESS,
+ *      - BYPASS_UNDEF_BRG_09,
+ *      - BYPASS_UNDEF_BRG_0A,
+ *      - BYPASS_UNDEF_BRG_0B,
+ *      - BYPASS_UNDEF_BRG_0C,
+ *      - BYPASS_PROVIDER_BRIDGE_GVRP_ADDRESS,
+ *      - BYPASS_8021AB,
+ *      - BYPASS_UNDEF_BRG_0F,
+ *      - BYPASS_BRG_MNGEMENT,
+ *      - BYPASS_UNDEFINED_11,
+ *      - BYPASS_UNDEFINED_12,
+ *      - BYPASS_UNDEFINED_13,
+ *      - BYPASS_UNDEFINED_14,
+ *      - BYPASS_UNDEFINED_15,
+ *      - BYPASS_UNDEFINED_16,
+ *      - BYPASS_UNDEFINED_17,
+ *      - BYPASS_UNDEFINED_18,
+ *      - BYPASS_UNDEFINED_19,
+ *      - BYPASS_UNDEFINED_1A,
+ *      - BYPASS_UNDEFINED_1B,
+ *      - BYPASS_UNDEFINED_1C,
+ *      - BYPASS_UNDEFINED_1D,
+ *      - BYPASS_UNDEFINED_1E,
+ *      - BYPASS_UNDEFINED_1F,
+ *      - BYPASS_GMRP,
+ *      - BYPASS_GVRP,
+ *      - BYPASS_UNDEF_GARP_22,
+ *      - BYPASS_UNDEF_GARP_23,
+ *      - BYPASS_UNDEF_GARP_24,
+ *      - BYPASS_UNDEF_GARP_25,
+ *      - BYPASS_UNDEF_GARP_26,
+ *      - BYPASS_UNDEF_GARP_27,
+ *      - BYPASS_UNDEF_GARP_28,
+ *      - BYPASS_UNDEF_GARP_29,
+ *      - BYPASS_UNDEF_GARP_2A,
+ *      - BYPASS_UNDEF_GARP_2B,
+ *      - BYPASS_UNDEF_GARP_2C,
+ *      - BYPASS_UNDEF_GARP_2D,
+ *      - BYPASS_UNDEF_GARP_2E,
+ *      - BYPASS_UNDEF_GARP_2F,
+ *      - BYPASS_IGMP.
+ */
+extern rtk_api_ret_t rtk_storm_bypass_set(rtk_storm_bypass_t type, rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_storm_bypass_get
+ * Description:
+ *      Get bypass storm filter control configuration.
+ * Input:
+ *      type - Bypass storm filter control type.
+ * Output:
+ *      pEnable - Bypass status.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This API can get per-port bypass stomr filter control frame type including RMA and igmp.
+ *      The bypass frame type is as following:
+ *      - BYPASS_BRG_GROUP,
+ *      - BYPASS_FD_PAUSE,
+ *      - BYPASS_SP_MCAST,
+ *      - BYPASS_1X_PAE,
+ *      - BYPASS_UNDEF_BRG_04,
+ *      - BYPASS_UNDEF_BRG_05,
+ *      - BYPASS_UNDEF_BRG_06,
+ *      - BYPASS_UNDEF_BRG_07,
+ *      - BYPASS_PROVIDER_BRIDGE_GROUP_ADDRESS,
+ *      - BYPASS_UNDEF_BRG_09,
+ *      - BYPASS_UNDEF_BRG_0A,
+ *      - BYPASS_UNDEF_BRG_0B,
+ *      - BYPASS_UNDEF_BRG_0C,
+ *      - BYPASS_PROVIDER_BRIDGE_GVRP_ADDRESS,
+ *      - BYPASS_8021AB,
+ *      - BYPASS_UNDEF_BRG_0F,
+ *      - BYPASS_BRG_MNGEMENT,
+ *      - BYPASS_UNDEFINED_11,
+ *      - BYPASS_UNDEFINED_12,
+ *      - BYPASS_UNDEFINED_13,
+ *      - BYPASS_UNDEFINED_14,
+ *      - BYPASS_UNDEFINED_15,
+ *      - BYPASS_UNDEFINED_16,
+ *      - BYPASS_UNDEFINED_17,
+ *      - BYPASS_UNDEFINED_18,
+ *      - BYPASS_UNDEFINED_19,
+ *      - BYPASS_UNDEFINED_1A,
+ *      - BYPASS_UNDEFINED_1B,
+ *      - BYPASS_UNDEFINED_1C,
+ *      - BYPASS_UNDEFINED_1D,
+ *      - BYPASS_UNDEFINED_1E,
+ *      - BYPASS_UNDEFINED_1F,
+ *      - BYPASS_GMRP,
+ *      - BYPASS_GVRP,
+ *      - BYPASS_UNDEF_GARP_22,
+ *      - BYPASS_UNDEF_GARP_23,
+ *      - BYPASS_UNDEF_GARP_24,
+ *      - BYPASS_UNDEF_GARP_25,
+ *      - BYPASS_UNDEF_GARP_26,
+ *      - BYPASS_UNDEF_GARP_27,
+ *      - BYPASS_UNDEF_GARP_28,
+ *      - BYPASS_UNDEF_GARP_29,
+ *      - BYPASS_UNDEF_GARP_2A,
+ *      - BYPASS_UNDEF_GARP_2B,
+ *      - BYPASS_UNDEF_GARP_2C,
+ *      - BYPASS_UNDEF_GARP_2D,
+ *      - BYPASS_UNDEF_GARP_2E,
+ *      - BYPASS_UNDEF_GARP_2F,
+ *      - BYPASS_IGMP.
+ */
+extern rtk_api_ret_t rtk_storm_bypass_get(rtk_storm_bypass_t type, rtk_enable_t *pEnable);
+
+/* Function Name:
+ *      rtk_rate_stormControlExtPortmask_set
+ * Description:
+ *      Set externsion storm control port mask
+ * Input:
+ *      pPortmask  - port mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT          - The module is not initial
+ *      RT_ERR_INPUT             - invalid input parameter
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_rate_stormControlExtPortmask_set(rtk_portmask_t *pPortmask);
+
+/* Function Name:
+ *      rtk_rate_stormControlExtPortmask_get
+ * Description:
+ *      Set externsion storm control port mask
+ * Input:
+ *      None
+ * Output:
+ *      pPortmask  - port mask
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT          - The module is not initial
+ *      RT_ERR_INPUT             - invalid input parameter
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_rate_stormControlExtPortmask_get(rtk_portmask_t *pPortmask);
+
+/* Function Name:
+ *      rtk_rate_stormControlExtEnable_set
+ * Description:
+ *      Set externsion storm control state
+ * Input:
+ *      stormType   - storm group type
+ *      enable      - externsion storm control state
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT          - The module is not initial
+ *      RT_ERR_INPUT             - invalid input parameter
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_rate_stormControlExtEnable_set(rtk_rate_storm_group_t stormType, rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_rate_stormControlExtEnable_get
+ * Description:
+ *      Get externsion storm control state
+ * Input:
+ *      stormType   - storm group type
+ * Output:
+ *      pEnable     - externsion storm control state
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT          - The module is not initial
+ *      RT_ERR_INPUT             - invalid input parameter
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_rate_stormControlExtEnable_get(rtk_rate_storm_group_t stormType, rtk_enable_t *pEnable);
+
+/* Function Name:
+ *      rtk_rate_stormControlExtMeterIdx_set
+ * Description:
+ *      Set externsion storm control meter index
+ * Input:
+ *      stormType   - storm group type
+ *      index       - externsion storm control state
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT          - The module is not initial
+ *      RT_ERR_INPUT             - invalid input parameter
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_rate_stormControlExtMeterIdx_set(rtk_rate_storm_group_t stormType, rtk_uint32 index);
+
+/* Function Name:
+ *      rtk_rate_stormControlExtMeterIdx_get
+ * Description:
+ *      Get externsion storm control meter index
+ * Input:
+ *      stormType   - storm group type
+ *      pIndex      - externsion storm control state
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT          - The module is not initial
+ *      RT_ERR_INPUT             - invalid input parameter
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_rate_stormControlExtMeterIdx_get(rtk_rate_storm_group_t stormType, rtk_uint32 *pIndex);
+
+
+
+#endif /* __RTK_API_STORM_H__ */
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/string.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/string.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/string.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/string.h	2019-01-22 16:16:24.723257454 +0100
@@ -0,0 +1,6 @@
+#ifndef STRINGS_H_
+#define STRINGS_H_
+
+#include <linux/string.h>
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/svlan.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/svlan.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/svlan.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/svlan.c	2019-01-22 16:16:24.723257454 +0100
@@ -0,0 +1,2818 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 79489 $
+ * $Date: 2017-06-08 14:33:54 +0800 (é€±å››, 08 å…­æœˆ 2017) $
+ *
+ * Purpose : RTK switch high-level API for RTL8367/RTL8367C
+ * Feature : Here is a list of all functions and variables in SVLAN module.
+ *
+ */
+
+#include <rtk_switch.h>
+#include <rtk_error.h>
+#include <svlan.h>
+#include <vlan.h>
+#include <string.h>
+
+#include <rtl8367c_asicdrv.h>
+#include <rtl8367c_asicdrv_svlan.h>
+
+rtk_uint8               svlan_mbrCfgUsage[RTL8367C_SVIDXNO];
+rtk_uint16              svlan_mbrCfgVid[RTL8367C_SVIDXNO];
+rtk_svlan_lookupType_t  svlan_lookupType;
+
+static rtk_api_ret_t _rtk_svlan_lookupType_set(rtk_svlan_lookupType_t type);
+
+static rtk_api_ret_t _rtk_svlan_init(void)
+{
+    rtk_uint32 i;
+    rtk_api_ret_t retVal;
+    rtl8367c_svlan_memconf_t svlanMemConf;
+    rtl8367c_svlan_s2c_t svlanSP2CConf;
+    rtl8367c_svlan_mc2s_t svlanMC2SConf;
+    rtk_uint32 svidx;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /*default use C-priority*/
+    if ((retVal = rtl8367c_setAsicSvlanPrioritySel(SPRISEL_CTAGPRI)) != RT_ERR_OK)
+        return retVal;
+
+    /*Drop SVLAN untag frame*/
+    if ((retVal = rtl8367c_setAsicSvlanIngressUntag(UNTAG_DROP)) != RT_ERR_OK)
+        return retVal;
+
+    /*Drop SVLAN unmatch frame*/
+    if ((retVal = rtl8367c_setAsicSvlanIngressUnmatch(UNMATCH_DROP)) != RT_ERR_OK)
+        return retVal;
+
+    /*Set TPID to 0x88a8*/
+    if ((retVal = rtl8367c_setAsicSvlanTpid(0x88a8)) != RT_ERR_OK)
+        return retVal;
+
+    /*Clean Uplink Port Mask to none*/
+    if ((retVal = rtl8367c_setAsicSvlanUplinkPortMask(0)) != RT_ERR_OK)
+        return retVal;
+
+    /*Clean SVLAN Member Configuration*/
+    for (i=0; i<= RTL8367C_SVIDXMAX; i++)
+    {
+        memset(&svlanMemConf, 0, sizeof(rtl8367c_svlan_memconf_t));
+        if ((retVal = rtl8367c_setAsicSvlanMemberConfiguration(i, &svlanMemConf)) != RT_ERR_OK)
+            return retVal;
+    }
+
+    /*Clean C2S Configuration*/
+    for (i=0; i<= RTL8367C_C2SIDXMAX; i++)
+    {
+        if ((retVal = rtl8367c_setAsicSvlanC2SConf(i, 0,0,0)) != RT_ERR_OK)
+            return retVal;
+    }
+
+    /*Clean SP2C Configuration*/
+    for (i=0; i <= RTL8367C_SP2CMAX ; i++)
+    {
+        memset(&svlanSP2CConf, 0, sizeof(rtl8367c_svlan_s2c_t));
+        if ((retVal = rtl8367c_setAsicSvlanSP2CConf(i, &svlanSP2CConf)) != RT_ERR_OK)
+            return retVal;
+    }
+
+    /*Clean MC2S Configuration*/
+    for (i=0 ; i<= RTL8367C_MC2SIDXMAX; i++)
+    {
+        memset(&svlanMC2SConf, 0, sizeof(rtl8367c_svlan_mc2s_t));
+        if ((retVal = rtl8367c_setAsicSvlanMC2SConf(i, &svlanMC2SConf)) != RT_ERR_OK)
+            return retVal;
+    }
+
+
+    if ((retVal = _rtk_svlan_lookupType_set(SVLAN_LOOKUP_S64MBRCGF)) != RT_ERR_OK)
+        return retVal;
+
+
+    for (svidx = 0; svidx <= RTL8367C_SVIDXMAX; svidx++)
+    {
+        svlan_mbrCfgUsage[svidx] = FALSE;
+    }
+
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_svlan_servicePort_add(rtk_port_t port)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 pmsk;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* check port valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if ((retVal = rtl8367c_getAsicSvlanUplinkPortMask(&pmsk)) != RT_ERR_OK)
+        return retVal;
+
+    pmsk = pmsk | (1<<rtk_switch_port_L2P_get(port));
+
+    if ((retVal = rtl8367c_setAsicSvlanUplinkPortMask(pmsk)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_svlan_servicePort_get(rtk_portmask_t *pSvlan_portmask)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 phyMbrPmask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pSvlan_portmask)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicSvlanUplinkPortMask(&phyMbrPmask)) != RT_ERR_OK)
+        return retVal;
+
+    if(rtk_switch_portmask_P2L_get(phyMbrPmask, pSvlan_portmask) != RT_ERR_OK)
+        return RT_ERR_FAILED;
+
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_svlan_servicePort_del(rtk_port_t port)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 pmsk;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* check port valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if ((retVal = rtl8367c_getAsicSvlanUplinkPortMask(&pmsk)) != RT_ERR_OK)
+        return retVal;
+
+    pmsk = pmsk & ~(1<<rtk_switch_port_L2P_get(port));
+
+    if ((retVal = rtl8367c_setAsicSvlanUplinkPortMask(pmsk)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_svlan_tpidEntry_set(rtk_svlan_tpid_t svlan_tag_id)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (svlan_tag_id>RTK_MAX_NUM_OF_PROTO_TYPE)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicSvlanTpid(svlan_tag_id)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_svlan_tpidEntry_get(rtk_svlan_tpid_t *pSvlan_tag_id)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pSvlan_tag_id)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicSvlanTpid(pSvlan_tag_id)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_svlan_priorityRef_set(rtk_svlan_pri_ref_t ref)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (ref >= REF_PRI_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicSvlanPrioritySel(ref)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_svlan_priorityRef_get(rtk_svlan_pri_ref_t *pRef)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pRef)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicSvlanPrioritySel(pRef)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_svlan_memberPortEntry_set(rtk_vlan_t svid, rtk_svlan_memberCfg_t *pSvlan_cfg)
+{
+    rtk_api_ret_t retVal;
+    rtk_int32 i;
+    rtk_uint32 empty_idx;
+    rtl8367c_svlan_memconf_t svlanMemConf;
+    rtk_uint32 phyMbrPmask;
+    rtk_vlan_cfg_t vlanCfg;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pSvlan_cfg)
+        return RT_ERR_NULL_POINTER;
+
+    if(svid > RTL8367C_VIDMAX)
+        return RT_ERR_SVLAN_VID;
+
+    RTK_CHK_PORTMASK_VALID(&(pSvlan_cfg->memberport));
+
+    RTK_CHK_PORTMASK_VALID(&(pSvlan_cfg->untagport));
+
+    if (pSvlan_cfg->fiden > ENABLED)
+        return RT_ERR_ENABLE;
+
+    if (pSvlan_cfg->fid > RTL8367C_FIDMAX)
+        return RT_ERR_L2_FID;
+
+    if (pSvlan_cfg->priority > RTL8367C_PRIMAX)
+        return RT_ERR_VLAN_PRIORITY;
+
+    if (pSvlan_cfg->efiden > ENABLED)
+        return RT_ERR_ENABLE;
+
+    if (pSvlan_cfg->efid > RTL8367C_EFIDMAX)
+        return RT_ERR_L2_FID;
+
+    if(SVLAN_LOOKUP_C4KVLAN == svlan_lookupType)
+    {
+        if ((retVal = rtk_vlan_get(svid, &vlanCfg)) != RT_ERR_OK)
+            return retVal;
+
+        vlanCfg.mbr = pSvlan_cfg->memberport;
+        vlanCfg.untag = pSvlan_cfg->untagport;
+
+        if ((retVal = rtk_vlan_set(svid, &vlanCfg)) != RT_ERR_OK)
+            return retVal;
+
+        empty_idx = 0xFF;
+
+        for (i = 0; i<= RTL8367C_SVIDXMAX; i++)
+        {
+            if (svid == svlan_mbrCfgVid[i] && TRUE == svlan_mbrCfgUsage[i])
+            {
+                memset(&svlanMemConf, 0, sizeof(rtl8367c_svlan_memconf_t));
+                svlanMemConf.vs_svid        = svid;
+                svlanMemConf.vs_efiden      = pSvlan_cfg->efiden;
+                svlanMemConf.vs_efid        = pSvlan_cfg->efid;
+                svlanMemConf.vs_priority    = pSvlan_cfg->priority;
+
+                /*for create check*/
+                if(0 == svlanMemConf.vs_efiden && 0 == svlanMemConf.vs_efid)
+                    svlanMemConf.vs_efid = 1;
+
+                if ((retVal = rtl8367c_setAsicSvlanMemberConfiguration(i, &svlanMemConf)) != RT_ERR_OK)
+                    return retVal;
+
+                return RT_ERR_OK;
+            }
+            else if (FALSE == svlan_mbrCfgUsage[i] && 0xFF == empty_idx)
+            {
+                empty_idx = i;
+            }
+        }
+
+        if (empty_idx != 0xFF)
+        {
+            svlan_mbrCfgUsage[empty_idx] = TRUE;
+            svlan_mbrCfgVid[empty_idx] = svid;
+
+            memset(&svlanMemConf, 0, sizeof(rtl8367c_svlan_memconf_t));
+            svlanMemConf.vs_svid        = svid;
+            svlanMemConf.vs_efiden      = pSvlan_cfg->efiden;
+            svlanMemConf.vs_efid        = pSvlan_cfg->efid;
+            svlanMemConf.vs_priority    = pSvlan_cfg->priority;
+
+            /*for create check*/
+            if(0 == svlanMemConf.vs_efiden && 0 == svlanMemConf.vs_efid)
+                svlanMemConf.vs_efid = 1;
+
+            if ((retVal = rtl8367c_setAsicSvlanMemberConfiguration(empty_idx, &svlanMemConf)) != RT_ERR_OK)
+                return retVal;
+
+        }
+
+        return RT_ERR_OK;
+    }
+
+
+    empty_idx = 0xFF;
+
+    for (i = 0; i<= RTL8367C_SVIDXMAX; i++)
+    {
+        /*
+        if ((retVal = rtl8367c_getAsicSvlanMemberConfiguration(i, &svlanMemConf)) != RT_ERR_OK)
+            return retVal;
+        */
+        if (svid == svlan_mbrCfgVid[i] && TRUE == svlan_mbrCfgUsage[i])
+        {
+            svlanMemConf.vs_svid = svid;
+
+            if(rtk_switch_portmask_L2P_get(&(pSvlan_cfg->memberport), &phyMbrPmask) != RT_ERR_OK)
+                return RT_ERR_FAILED;
+
+            svlanMemConf.vs_member = phyMbrPmask;
+
+            if(rtk_switch_portmask_L2P_get(&(pSvlan_cfg->untagport), &phyMbrPmask) != RT_ERR_OK)
+                return RT_ERR_FAILED;
+
+            svlanMemConf.vs_untag = phyMbrPmask;
+
+            svlanMemConf.vs_force_fid   = pSvlan_cfg->fiden;
+            svlanMemConf.vs_fid_msti    = pSvlan_cfg->fid;
+            svlanMemConf.vs_priority    = pSvlan_cfg->priority;
+            svlanMemConf.vs_efiden      = pSvlan_cfg->efiden;
+            svlanMemConf.vs_efid        = pSvlan_cfg->efid;
+
+            /*all items are reset means deleting*/
+            if( 0 == svlanMemConf.vs_member &&
+                0 == svlanMemConf.vs_untag &&
+                0 == svlanMemConf.vs_force_fid &&
+                0 == svlanMemConf.vs_fid_msti &&
+                0 == svlanMemConf.vs_priority &&
+                0 == svlanMemConf.vs_efiden &&
+                0 == svlanMemConf.vs_efid)
+            {
+                svlan_mbrCfgUsage[i] = FALSE;
+                svlan_mbrCfgVid[i] = 0;
+
+                /* Clear SVID also */
+                svlanMemConf.vs_svid = 0;
+            }
+            else
+            {
+                svlan_mbrCfgUsage[i] = TRUE;
+                svlan_mbrCfgVid[i] = svlanMemConf.vs_svid;
+
+                if(0 == svlanMemConf.vs_svid)
+                {
+                    /*for create check*/
+                    if(0 == svlanMemConf.vs_efiden && 0 == svlanMemConf.vs_efid)
+                    {
+                        svlanMemConf.vs_efid = 1;
+                    }
+                }
+            }
+
+            if ((retVal = rtl8367c_setAsicSvlanMemberConfiguration(i, &svlanMemConf)) != RT_ERR_OK)
+                return retVal;
+
+            return RT_ERR_OK;
+        }
+        else if (FALSE == svlan_mbrCfgUsage[i] && 0xFF == empty_idx)
+        {
+            empty_idx = i;
+        }
+    }
+
+    if (empty_idx != 0xFF)
+    {
+        memset(&svlanMemConf, 0, sizeof(rtl8367c_svlan_memconf_t));
+        svlanMemConf.vs_svid = svid;
+
+        if(rtk_switch_portmask_L2P_get(&(pSvlan_cfg->memberport), &phyMbrPmask) != RT_ERR_OK)
+            return RT_ERR_FAILED;
+
+        svlanMemConf.vs_member = phyMbrPmask;
+
+        if(rtk_switch_portmask_L2P_get(&(pSvlan_cfg->untagport), &phyMbrPmask) != RT_ERR_OK)
+            return RT_ERR_FAILED;
+
+        svlanMemConf.vs_untag = phyMbrPmask;
+
+        svlanMemConf.vs_force_fid   = pSvlan_cfg->fiden;
+        svlanMemConf.vs_fid_msti    = pSvlan_cfg->fid;
+        svlanMemConf.vs_priority    = pSvlan_cfg->priority;
+
+        svlanMemConf.vs_efiden      = pSvlan_cfg->efiden;
+        svlanMemConf.vs_efid        = pSvlan_cfg->efid;
+
+        /*change efid for empty svid 0*/
+        if(0 == svlanMemConf.vs_svid)
+        {   /*for create check*/
+            if(0 == svlanMemConf.vs_efiden && 0 == svlanMemConf.vs_efid)
+            {
+                svlanMemConf.vs_efid = 1;
+            }
+        }
+
+        svlan_mbrCfgUsage[empty_idx] = TRUE;
+        svlan_mbrCfgVid[empty_idx] = svlanMemConf.vs_svid;
+
+        if ((retVal = rtl8367c_setAsicSvlanMemberConfiguration(empty_idx, &svlanMemConf)) != RT_ERR_OK)
+        {
+            return retVal;
+        }
+
+        return RT_ERR_OK;
+    }
+
+    return RT_ERR_SVLAN_TABLE_FULL;
+}
+
+static rtk_api_ret_t _rtk_svlan_memberPortEntry_get(rtk_vlan_t svid, rtk_svlan_memberCfg_t *pSvlan_cfg)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 i;
+    rtl8367c_svlan_memconf_t svlanMemConf;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pSvlan_cfg)
+        return RT_ERR_NULL_POINTER;
+
+    if (svid > RTL8367C_VIDMAX)
+        return RT_ERR_SVLAN_VID;
+
+
+    for (i = 0; i<= RTL8367C_SVIDXMAX; i++)
+    {
+        if ((retVal = rtl8367c_getAsicSvlanMemberConfiguration(i, &svlanMemConf)) != RT_ERR_OK)
+            return retVal;
+
+        if (svid == svlanMemConf.vs_svid)
+        {
+            pSvlan_cfg->svid        = svlanMemConf.vs_svid;
+
+            if(rtk_switch_portmask_P2L_get(svlanMemConf.vs_member,&(pSvlan_cfg->memberport)) != RT_ERR_OK)
+                return RT_ERR_FAILED;
+
+            if(rtk_switch_portmask_P2L_get(svlanMemConf.vs_untag,&(pSvlan_cfg->untagport)) != RT_ERR_OK)
+                return RT_ERR_FAILED;
+
+            pSvlan_cfg->fiden       = svlanMemConf.vs_force_fid;
+            pSvlan_cfg->fid         = svlanMemConf.vs_fid_msti;
+            pSvlan_cfg->priority    = svlanMemConf.vs_priority;
+            pSvlan_cfg->efiden      = svlanMemConf.vs_efiden;
+            pSvlan_cfg->efid        = svlanMemConf.vs_efid;
+
+            return RT_ERR_OK;
+        }
+    }
+
+    return RT_ERR_SVLAN_ENTRY_NOT_FOUND;
+
+}
+
+static rtk_api_ret_t _rtk_svlan_memberPortEntry_adv_set(rtk_uint32 idx, rtk_svlan_memberCfg_t *pSvlan_cfg)
+{
+    rtk_api_ret_t retVal;
+    rtl8367c_svlan_memconf_t svlanMemConf;
+    rtk_uint32 phyMbrPmask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pSvlan_cfg)
+        return RT_ERR_NULL_POINTER;
+
+    if (idx > RTL8367C_SVIDXMAX)
+        return RT_ERR_SVLAN_ENTRY_INDEX;
+
+    if (pSvlan_cfg->svid>RTL8367C_VIDMAX)
+        return RT_ERR_SVLAN_VID;
+
+    RTK_CHK_PORTMASK_VALID(&(pSvlan_cfg->memberport));
+
+    RTK_CHK_PORTMASK_VALID(&(pSvlan_cfg->untagport));
+
+    if (pSvlan_cfg->fiden > ENABLED)
+        return RT_ERR_ENABLE;
+
+    if (pSvlan_cfg->fid > RTL8367C_FIDMAX)
+        return RT_ERR_L2_FID;
+
+    if (pSvlan_cfg->priority > RTL8367C_PRIMAX)
+        return RT_ERR_VLAN_PRIORITY;
+
+    if (pSvlan_cfg->efiden > ENABLED)
+        return RT_ERR_ENABLE;
+
+    if (pSvlan_cfg->efid > RTL8367C_EFIDMAX)
+        return RT_ERR_L2_FID;
+
+    memset(&svlanMemConf, 0, sizeof(rtl8367c_svlan_memconf_t));
+    svlanMemConf.vs_svid        = pSvlan_cfg->svid;
+    if(rtk_switch_portmask_L2P_get(&(pSvlan_cfg->memberport), &phyMbrPmask) != RT_ERR_OK)
+        return RT_ERR_FAILED;
+
+    svlanMemConf.vs_member = phyMbrPmask;
+
+    if(rtk_switch_portmask_L2P_get(&(pSvlan_cfg->untagport), &phyMbrPmask) != RT_ERR_OK)
+        return RT_ERR_FAILED;
+
+    svlanMemConf.vs_untag = phyMbrPmask;
+
+
+    svlanMemConf.vs_force_fid   = pSvlan_cfg->fiden;
+    svlanMemConf.vs_fid_msti    = pSvlan_cfg->fid;
+    svlanMemConf.vs_priority    = pSvlan_cfg->priority;
+    svlanMemConf.vs_efiden      = pSvlan_cfg->efiden;
+    svlanMemConf.vs_efid        = pSvlan_cfg->efid;
+
+    if(0 == svlanMemConf.vs_svid &&
+        0 == svlanMemConf.vs_member &&
+        0 == svlanMemConf.vs_untag &&
+        0 == svlanMemConf.vs_force_fid &&
+        0 == svlanMemConf.vs_fid_msti &&
+        0 == svlanMemConf.vs_priority &&
+        0 == svlanMemConf.vs_efiden &&
+        0 == svlanMemConf.vs_efid)
+    {
+        svlan_mbrCfgUsage[idx] = FALSE;
+        svlan_mbrCfgVid[idx] = 0;
+    }
+    else
+    {
+        svlan_mbrCfgUsage[idx] = TRUE;
+        svlan_mbrCfgVid[idx] = svlanMemConf.vs_svid;
+    }
+
+    if ((retVal = rtl8367c_setAsicSvlanMemberConfiguration(idx, &svlanMemConf)) != RT_ERR_OK)
+        return retVal;
+
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_svlan_memberPortEntry_adv_get(rtk_uint32 idx, rtk_svlan_memberCfg_t *pSvlan_cfg)
+{
+    rtk_api_ret_t retVal;
+    rtl8367c_svlan_memconf_t svlanMemConf;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pSvlan_cfg)
+        return RT_ERR_NULL_POINTER;
+
+    if (idx > RTL8367C_SVIDXMAX)
+        return RT_ERR_SVLAN_ENTRY_INDEX;
+
+    if ((retVal = rtl8367c_getAsicSvlanMemberConfiguration(idx, &svlanMemConf)) != RT_ERR_OK)
+        return retVal;
+
+    pSvlan_cfg->svid        = svlanMemConf.vs_svid;
+    if(rtk_switch_portmask_P2L_get(svlanMemConf.vs_member,&(pSvlan_cfg->memberport)) != RT_ERR_OK)
+        return RT_ERR_FAILED;
+
+    if(rtk_switch_portmask_P2L_get(svlanMemConf.vs_untag,&(pSvlan_cfg->untagport)) != RT_ERR_OK)
+        return RT_ERR_FAILED;
+
+    pSvlan_cfg->fiden       = svlanMemConf.vs_force_fid;
+    pSvlan_cfg->fid         = svlanMemConf.vs_fid_msti;
+    pSvlan_cfg->priority    = svlanMemConf.vs_priority;
+    pSvlan_cfg->efiden      = svlanMemConf.vs_efiden;
+    pSvlan_cfg->efid        = svlanMemConf.vs_efid;
+
+    return RT_ERR_OK;
+
+}
+
+static rtk_api_ret_t _rtk_svlan_defaultSvlan_set(rtk_port_t port, rtk_vlan_t svid)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 i;
+    rtl8367c_svlan_memconf_t svlanMemConf;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    /* svid must be 0~4095 */
+    if (svid > RTL8367C_VIDMAX)
+        return RT_ERR_SVLAN_VID;
+
+    for (i = 0; i < RTL8367C_SVIDXNO; i++)
+    {
+        if ((retVal = rtl8367c_getAsicSvlanMemberConfiguration(i, &svlanMemConf)) != RT_ERR_OK)
+            return retVal;
+
+        if (svid == svlanMemConf.vs_svid)
+        {
+            if ((retVal = rtl8367c_setAsicSvlanDefaultVlan(rtk_switch_port_L2P_get(port), i)) != RT_ERR_OK)
+                return retVal;
+
+            return RT_ERR_OK;
+        }
+    }
+
+    return RT_ERR_SVLAN_ENTRY_NOT_FOUND;
+}
+
+static rtk_api_ret_t _rtk_svlan_defaultSvlan_get(rtk_port_t port, rtk_vlan_t *pSvid)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 idx;
+    rtl8367c_svlan_memconf_t svlanMemConf;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pSvid)
+        return RT_ERR_NULL_POINTER;
+
+    /* Check port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if ((retVal = rtl8367c_getAsicSvlanDefaultVlan(rtk_switch_port_L2P_get(port), &idx)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_getAsicSvlanMemberConfiguration(idx, &svlanMemConf)) != RT_ERR_OK)
+        return retVal;
+
+    *pSvid = svlanMemConf.vs_svid;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_svlan_c2s_add(rtk_vlan_t vid, rtk_port_t src_port, rtk_vlan_t svid)
+{
+    rtk_api_ret_t retVal, i;
+    rtk_uint32 empty_idx;
+    rtk_uint32 evid, pmsk, svidx, c2s_svidx;
+    rtl8367c_svlan_memconf_t svlanMemConf;
+    rtk_port_t phyPort;
+    rtk_uint16 doneFlag;
+
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+
+    if (vid > RTL8367C_VIDMAX)
+        return RT_ERR_VLAN_VID;
+
+    if (svid > RTL8367C_VIDMAX)
+        return RT_ERR_SVLAN_VID;
+
+    /* Check port Valid */
+    RTK_CHK_PORT_VALID(src_port);
+
+    phyPort = rtk_switch_port_L2P_get(src_port);
+
+    empty_idx = 0xFFFF;
+    svidx = 0xFFFF;
+    doneFlag = FALSE;
+
+    for (i = 0; i<= RTL8367C_SVIDXMAX; i++)
+    {
+        if ((retVal = rtl8367c_getAsicSvlanMemberConfiguration(i, &svlanMemConf)) != RT_ERR_OK)
+            return retVal;
+
+        if (svid == svlanMemConf.vs_svid)
+        {
+            svidx = i;
+            break;
+        }
+    }
+
+    if (0xFFFF == svidx)
+        return RT_ERR_SVLAN_VID;
+
+    for (i=RTL8367C_C2SIDXMAX; i>=0; i--)
+    {
+        if ((retVal = rtl8367c_getAsicSvlanC2SConf(i, &evid, &pmsk, &c2s_svidx)) != RT_ERR_OK)
+                return retVal;
+
+        if (evid == vid)
+        {
+            /* Check Src_port */
+            if(pmsk & (1 << phyPort))
+            {
+                /* Check SVIDX */
+                if(c2s_svidx == svidx)
+                {
+                    /* All the same, do nothing */
+                }
+                else
+                {
+                    /* New svidx, remove src_port and find a new slot to add a new enrty */
+                    pmsk = pmsk & ~(1 << phyPort);
+                    if(pmsk == 0)
+                        c2s_svidx = 0;
+
+                    if ((retVal = rtl8367c_setAsicSvlanC2SConf(i, vid, pmsk, c2s_svidx)) != RT_ERR_OK)
+                        return retVal;
+                }
+            }
+            else
+            {
+                if(c2s_svidx == svidx && doneFlag == FALSE)
+                {
+                    pmsk = pmsk | (1 << phyPort);
+                    if ((retVal = rtl8367c_setAsicSvlanC2SConf(i, vid, pmsk, svidx)) != RT_ERR_OK)
+                        return retVal;
+
+                    doneFlag = TRUE;
+                }
+            }
+        }
+        else if (evid==0&&pmsk==0)
+        {
+            empty_idx = i;
+        }
+    }
+
+    if (0xFFFF != empty_idx && doneFlag ==FALSE)
+    {
+       if ((retVal = rtl8367c_setAsicSvlanC2SConf(empty_idx, vid, (1<<phyPort), svidx)) != RT_ERR_OK)
+           return retVal;
+
+       return RT_ERR_OK;
+    }
+    else if(doneFlag == TRUE)
+    {
+        return RT_ERR_OK;
+    }
+
+    return RT_ERR_OUT_OF_RANGE;
+}
+
+static rtk_api_ret_t _rtk_svlan_c2s_del(rtk_vlan_t vid, rtk_port_t src_port)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 i;
+    rtk_uint32 evid, pmsk, svidx;
+    rtk_port_t phyPort;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (vid > RTL8367C_EVIDMAX)
+        return RT_ERR_VLAN_VID;
+
+    /* Check port Valid */
+    RTK_CHK_PORT_VALID(src_port);
+    phyPort = rtk_switch_port_L2P_get(src_port);
+
+    for (i = 0; i <= RTL8367C_C2SIDXMAX; i++)
+    {
+        if ((retVal = rtl8367c_getAsicSvlanC2SConf(i, &evid, &pmsk, &svidx)) != RT_ERR_OK)
+            return retVal;
+
+        if (evid == vid)
+        {
+            if(pmsk & (1 << phyPort))
+            {
+                pmsk = pmsk & ~(1 << phyPort);
+                if(pmsk == 0)
+                {
+                    vid = 0;
+                    svidx = 0;
+                }
+
+                if ((retVal = rtl8367c_setAsicSvlanC2SConf(i, vid, pmsk, svidx)) != RT_ERR_OK)
+                    return retVal;
+
+                return RT_ERR_OK;
+            }
+        }
+    }
+
+    return RT_ERR_OUT_OF_RANGE;
+}
+
+static rtk_api_ret_t _rtk_svlan_c2s_get(rtk_vlan_t vid, rtk_port_t src_port, rtk_vlan_t *pSvid)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 i;
+    rtk_uint32 evid, pmsk, svidx;
+    rtl8367c_svlan_memconf_t svlanMemConf;
+    rtk_port_t phyPort;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pSvid)
+        return RT_ERR_NULL_POINTER;
+
+    if (vid > RTL8367C_VIDMAX)
+        return RT_ERR_VLAN_VID;
+
+    /* Check port Valid */
+    RTK_CHK_PORT_VALID(src_port);
+    phyPort = rtk_switch_port_L2P_get(src_port);
+
+    for (i = 0; i <= RTL8367C_C2SIDXMAX; i++)
+    {
+        if ((retVal = rtl8367c_getAsicSvlanC2SConf(i, &evid, &pmsk, &svidx)) != RT_ERR_OK)
+            return retVal;
+
+        if (evid == vid)
+        {
+            if(pmsk & (1 << phyPort))
+            {
+                if ((retVal = rtl8367c_getAsicSvlanMemberConfiguration(svidx, &svlanMemConf)) != RT_ERR_OK)
+                    return retVal;
+
+                *pSvid = svlanMemConf.vs_svid;
+                return RT_ERR_OK;
+            }
+        }
+    }
+
+    return RT_ERR_OUT_OF_RANGE;
+}
+
+static rtk_api_ret_t _rtk_svlan_untag_action_set(rtk_svlan_untag_action_t action, rtk_vlan_t svid)
+{
+    rtk_api_ret_t   retVal;
+    rtk_uint32      i;
+    rtl8367c_svlan_memconf_t svlanMemConf;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (action >= UNTAG_END)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if(action == UNTAG_ASSIGN)
+    {
+        if (svid > RTL8367C_VIDMAX)
+            return RT_ERR_SVLAN_VID;
+    }
+
+    if ((retVal = rtl8367c_setAsicSvlanIngressUntag((rtk_uint32)action)) != RT_ERR_OK)
+        return retVal;
+
+    if(action == UNTAG_ASSIGN)
+    {
+        for (i = 0; i < RTL8367C_SVIDXNO; i++)
+        {
+            if ((retVal = rtl8367c_getAsicSvlanMemberConfiguration(i, &svlanMemConf)) != RT_ERR_OK)
+                return retVal;
+
+            if (svid == svlanMemConf.vs_svid)
+            {
+                if ((retVal = rtl8367c_setAsicSvlanUntagVlan(i)) != RT_ERR_OK)
+                    return retVal;
+
+                return RT_ERR_OK;
+            }
+        }
+
+        return RT_ERR_SVLAN_ENTRY_NOT_FOUND;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_svlan_untag_action_get(rtk_svlan_untag_action_t *pAction, rtk_vlan_t *pSvid)
+{
+    rtk_api_ret_t   retVal;
+    rtk_uint32      svidx;
+    rtl8367c_svlan_memconf_t svlanMemConf;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pAction || NULL == pSvid)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicSvlanIngressUntag(pAction)) != RT_ERR_OK)
+        return retVal;
+
+    if(*pAction == UNTAG_ASSIGN)
+    {
+        if ((retVal = rtl8367c_getAsicSvlanUntagVlan(&svidx)) != RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_getAsicSvlanMemberConfiguration(svidx, &svlanMemConf)) != RT_ERR_OK)
+            return retVal;
+
+        *pSvid = svlanMemConf.vs_svid;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_svlan_unmatch_action_set(rtk_svlan_unmatch_action_t action, rtk_vlan_t svid)
+{
+    rtk_api_ret_t   retVal;
+    rtk_uint32      i;
+    rtl8367c_svlan_memconf_t svlanMemConf;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (action >= UNMATCH_END)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if (action == UNMATCH_ASSIGN)
+    {
+        if (svid > RTL8367C_VIDMAX)
+            return RT_ERR_SVLAN_VID;
+    }
+
+    if ((retVal = rtl8367c_setAsicSvlanIngressUnmatch((rtk_uint32)action)) != RT_ERR_OK)
+        return retVal;
+
+    if(action == UNMATCH_ASSIGN)
+    {
+        for (i = 0; i < RTL8367C_SVIDXNO; i++)
+        {
+            if ((retVal = rtl8367c_getAsicSvlanMemberConfiguration(i, &svlanMemConf)) != RT_ERR_OK)
+                return retVal;
+
+            if (svid == svlanMemConf.vs_svid)
+            {
+                if ((retVal = rtl8367c_setAsicSvlanUnmatchVlan(i)) != RT_ERR_OK)
+                    return retVal;
+
+                return RT_ERR_OK;
+            }
+        }
+
+        return RT_ERR_SVLAN_ENTRY_NOT_FOUND;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_svlan_unmatch_action_get(rtk_svlan_unmatch_action_t *pAction, rtk_vlan_t *pSvid)
+{
+    rtk_api_ret_t   retVal;
+    rtk_uint32      svidx;
+    rtl8367c_svlan_memconf_t svlanMemConf;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pAction || NULL == pSvid)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicSvlanIngressUnmatch(pAction)) != RT_ERR_OK)
+        return retVal;
+
+    if(*pAction == UNMATCH_ASSIGN)
+    {
+        if ((retVal = rtl8367c_getAsicSvlanUnmatchVlan(&svidx)) != RT_ERR_OK)
+            return retVal;
+
+        if ((retVal = rtl8367c_getAsicSvlanMemberConfiguration(svidx, &svlanMemConf)) != RT_ERR_OK)
+            return retVal;
+
+        *pSvid = svlanMemConf.vs_svid;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_svlan_unassign_action_set(rtk_svlan_unassign_action_t action)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (action >= UNASSIGN_END)
+        return RT_ERR_OUT_OF_RANGE;
+
+    retVal = rtl8367c_setAsicSvlanEgressUnassign((rtk_uint32)action);
+
+    return retVal;
+}
+
+static rtk_api_ret_t _rtk_svlan_unassign_action_get(rtk_svlan_unassign_action_t *pAction)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pAction)
+        return RT_ERR_NULL_POINTER;
+
+    retVal = rtl8367c_getAsicSvlanEgressUnassign(pAction);
+
+    return retVal;
+}
+
+static rtk_api_ret_t _rtk_svlan_dmac_vidsel_set(rtk_port_t port, rtk_enable_t enable)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (enable >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    if ((retVal = rtl8367c_setAsicSvlanDmacCvidSel(rtk_switch_port_L2P_get(port), enable)) != RT_ERR_OK)
+            return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_svlan_dmac_vidsel_get(rtk_port_t port, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pEnable)
+        return RT_ERR_NULL_POINTER;
+
+    /* Check port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if ((retVal = rtl8367c_getAsicSvlanDmacCvidSel(rtk_switch_port_L2P_get(port), pEnable)) != RT_ERR_OK)
+            return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_svlan_ipmc2s_add(ipaddr_t ipmc, ipaddr_t ipmcMsk,rtk_vlan_t svid)
+{
+    rtk_api_ret_t retVal, i;
+    rtk_uint32 empty_idx;
+    rtk_uint32 svidx;
+    rtl8367c_svlan_memconf_t svlanMemConf;
+    rtl8367c_svlan_mc2s_t svlanMC2SConf;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (svid > RTL8367C_VIDMAX)
+        return RT_ERR_SVLAN_VID;
+
+    if ((ipmc&0xF0000000)!=0xE0000000)
+        return RT_ERR_INPUT;
+
+    svidx = 0xFFFF;
+
+    for (i = 0; i < RTL8367C_SVIDXNO; i++)
+    {
+        if ((retVal = rtl8367c_getAsicSvlanMemberConfiguration(i, &svlanMemConf)) != RT_ERR_OK)
+            return retVal;
+
+        if (svid == svlanMemConf.vs_svid)
+        {
+            svidx = i;
+            break;
+        }
+    }
+
+    if (0xFFFF == svidx)
+            return RT_ERR_SVLAN_ENTRY_NOT_FOUND;
+
+
+    empty_idx = 0xFFFF;
+
+    for (i = RTL8367C_MC2SIDXMAX; i >= 0; i--)
+    {
+        if ((retVal = rtl8367c_getAsicSvlanMC2SConf(i, &svlanMC2SConf)) != RT_ERR_OK)
+            return retVal;
+
+        if (TRUE == svlanMC2SConf.valid)
+        {
+            if (svlanMC2SConf.format == SVLAN_MC2S_MODE_IP &&
+                svlanMC2SConf.sdata==ipmc&&
+                svlanMC2SConf.smask==ipmcMsk)
+            {
+                svlanMC2SConf.svidx = svidx;
+                if ((retVal = rtl8367c_setAsicSvlanMC2SConf(i, &svlanMC2SConf)) != RT_ERR_OK)
+                    return retVal;
+            }
+        }
+        else
+        {
+            empty_idx = i;
+        }
+    }
+
+    if (empty_idx!=0xFFFF)
+    {
+        svlanMC2SConf.valid = TRUE;
+        svlanMC2SConf.svidx = svidx;
+        svlanMC2SConf.format = SVLAN_MC2S_MODE_IP;
+        svlanMC2SConf.sdata = ipmc;
+        svlanMC2SConf.smask = ipmcMsk;
+        if ((retVal = rtl8367c_setAsicSvlanMC2SConf(empty_idx, &svlanMC2SConf)) != RT_ERR_OK)
+            return retVal;
+        return RT_ERR_OK;
+    }
+
+    return RT_ERR_OUT_OF_RANGE;
+
+}
+
+static rtk_api_ret_t _rtk_svlan_ipmc2s_del(ipaddr_t ipmc, ipaddr_t ipmcMsk)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 i;
+    rtl8367c_svlan_mc2s_t svlanMC2SConf;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if ((ipmc&0xF0000000)!=0xE0000000)
+        return RT_ERR_INPUT;
+
+    for (i = 0; i <= RTL8367C_MC2SIDXMAX; i++)
+    {
+        if ((retVal = rtl8367c_getAsicSvlanMC2SConf(i, &svlanMC2SConf)) != RT_ERR_OK)
+            return retVal;
+
+        if (TRUE == svlanMC2SConf.valid)
+        {
+            if (svlanMC2SConf.format == SVLAN_MC2S_MODE_IP &&
+                svlanMC2SConf.sdata==ipmc&&
+                svlanMC2SConf.smask==ipmcMsk)
+            {
+                memset(&svlanMC2SConf, 0, sizeof(rtl8367c_svlan_mc2s_t));
+                if ((retVal = rtl8367c_setAsicSvlanMC2SConf(i, &svlanMC2SConf)) != RT_ERR_OK)
+                    return retVal;
+                return RT_ERR_OK;
+            }
+        }
+    }
+
+    return RT_ERR_OUT_OF_RANGE;
+}
+
+static rtk_api_ret_t _rtk_svlan_ipmc2s_get(ipaddr_t ipmc, ipaddr_t ipmcMsk, rtk_vlan_t *pSvid)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 i;
+    rtl8367c_svlan_memconf_t svlanMemConf;
+    rtl8367c_svlan_mc2s_t svlanMC2SConf;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pSvid)
+        return RT_ERR_NULL_POINTER;
+
+    if ((ipmc&0xF0000000)!=0xE0000000)
+        return RT_ERR_INPUT;
+
+    for (i = 0; i <= RTL8367C_MC2SIDXMAX; i++)
+    {
+        if ((retVal = rtl8367c_getAsicSvlanMC2SConf(i, &svlanMC2SConf)) != RT_ERR_OK)
+            return retVal;
+
+        if (TRUE == svlanMC2SConf.valid &&
+            svlanMC2SConf.format == SVLAN_MC2S_MODE_IP &&
+            svlanMC2SConf.sdata == ipmc &&
+            svlanMC2SConf.smask == ipmcMsk)
+        {
+            if ((retVal = rtl8367c_getAsicSvlanMemberConfiguration(svlanMC2SConf.svidx, &svlanMemConf)) != RT_ERR_OK)
+                return retVal;
+            *pSvid = svlanMemConf.vs_svid;
+            return RT_ERR_OK;
+        }
+    }
+
+    return RT_ERR_OUT_OF_RANGE;
+}
+
+static rtk_api_ret_t _rtk_svlan_l2mc2s_add(rtk_mac_t mac, rtk_mac_t macMsk, rtk_vlan_t svid)
+{
+    rtk_api_ret_t retVal, i;
+    rtk_uint32 empty_idx;
+    rtk_uint32 svidx, l2add, l2Mask;
+    rtl8367c_svlan_memconf_t svlanMemConf;
+    rtl8367c_svlan_mc2s_t svlanMC2SConf;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (svid > RTL8367C_VIDMAX)
+        return RT_ERR_SVLAN_VID;
+
+    if (mac.octet[0]!= 1&&mac.octet[1]!=0)
+        return RT_ERR_INPUT;
+
+    l2add = (mac.octet[2] << 24) | (mac.octet[3] << 16) | (mac.octet[4] << 8) | mac.octet[5];
+    l2Mask = (macMsk.octet[2] << 24) | (macMsk.octet[3] << 16) | (macMsk.octet[4] << 8) | macMsk.octet[5];
+
+    svidx = 0xFFFF;
+
+    for (i = 0; i < RTL8367C_SVIDXNO; i++)
+    {
+        if ((retVal = rtl8367c_getAsicSvlanMemberConfiguration(i, &svlanMemConf)) != RT_ERR_OK)
+            return retVal;
+
+        if (svid == svlanMemConf.vs_svid)
+        {
+            svidx = i;
+            break;
+        }
+    }
+
+    if (0xFFFF == svidx)
+        return RT_ERR_SVLAN_ENTRY_NOT_FOUND;
+
+    empty_idx = 0xFFFF;
+
+    for (i = RTL8367C_MC2SIDXMAX; i >=0; i--)
+    {
+        if ((retVal = rtl8367c_getAsicSvlanMC2SConf(i, &svlanMC2SConf)) != RT_ERR_OK)
+            return retVal;
+
+        if (TRUE == svlanMC2SConf.valid)
+        {
+            if (svlanMC2SConf.format == SVLAN_MC2S_MODE_MAC &&
+                svlanMC2SConf.sdata==l2add&&
+                svlanMC2SConf.smask==l2Mask)
+            {
+                svlanMC2SConf.svidx = svidx;
+                if ((retVal = rtl8367c_setAsicSvlanMC2SConf(i, &svlanMC2SConf)) != RT_ERR_OK)
+                    return retVal;
+            }
+        }
+        else
+        {
+            empty_idx = i;
+        }
+    }
+
+    if (empty_idx!=0xFFFF)
+    {
+        svlanMC2SConf.valid = TRUE;
+        svlanMC2SConf.svidx = svidx;
+        svlanMC2SConf.format = SVLAN_MC2S_MODE_MAC;
+        svlanMC2SConf.sdata = l2add;
+        svlanMC2SConf.smask = l2Mask;
+
+        if ((retVal = rtl8367c_setAsicSvlanMC2SConf(empty_idx, &svlanMC2SConf)) != RT_ERR_OK)
+            return retVal;
+        return RT_ERR_OK;
+    }
+
+    return RT_ERR_OUT_OF_RANGE;
+}
+
+static rtk_api_ret_t _rtk_svlan_l2mc2s_del(rtk_mac_t mac, rtk_mac_t macMsk)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 i;
+    rtk_uint32 l2add, l2Mask;
+    rtl8367c_svlan_mc2s_t svlanMC2SConf;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (mac.octet[0]!= 1&&mac.octet[1]!=0)
+        return RT_ERR_INPUT;
+
+    l2add = (mac.octet[2] << 24) | (mac.octet[3] << 16) | (mac.octet[4] << 8) | mac.octet[5];
+    l2Mask = (macMsk.octet[2] << 24) | (macMsk.octet[3] << 16) | (macMsk.octet[4] << 8) | macMsk.octet[5];
+
+    for (i = 0; i <= RTL8367C_MC2SIDXMAX; i++)
+    {
+        if ((retVal = rtl8367c_getAsicSvlanMC2SConf(i, &svlanMC2SConf)) != RT_ERR_OK)
+            return retVal;
+
+        if (TRUE == svlanMC2SConf.valid)
+        {
+            if (svlanMC2SConf.format == SVLAN_MC2S_MODE_MAC &&
+                svlanMC2SConf.sdata==l2add&&
+                svlanMC2SConf.smask==l2Mask)
+            {
+                memset(&svlanMC2SConf, 0, sizeof(rtl8367c_svlan_mc2s_t));
+                if ((retVal = rtl8367c_setAsicSvlanMC2SConf(i, &svlanMC2SConf)) != RT_ERR_OK)
+                    return retVal;
+                return RT_ERR_OK;
+            }
+        }
+    }
+
+    return RT_ERR_OUT_OF_RANGE;
+}
+
+static rtk_api_ret_t _rtk_svlan_l2mc2s_get(rtk_mac_t mac, rtk_mac_t macMsk, rtk_vlan_t *pSvid)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 i;
+    rtk_uint32 l2add,l2Mask;
+    rtl8367c_svlan_memconf_t svlanMemConf;
+    rtl8367c_svlan_mc2s_t svlanMC2SConf;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pSvid)
+        return RT_ERR_NULL_POINTER;
+
+    if (mac.octet[0]!= 1&&mac.octet[1]!=0)
+        return RT_ERR_INPUT;
+
+    l2add = (mac.octet[2] << 24) | (mac.octet[3] << 16) | (mac.octet[4] << 8) | mac.octet[5];
+    l2Mask = (macMsk.octet[2] << 24) | (macMsk.octet[3] << 16) | (macMsk.octet[4] << 8) | macMsk.octet[5];
+
+    for (i = 0; i <= RTL8367C_MC2SIDXMAX; i++)
+    {
+        if ((retVal = rtl8367c_getAsicSvlanMC2SConf(i, &svlanMC2SConf)) != RT_ERR_OK)
+            return retVal;
+
+        if (TRUE == svlanMC2SConf.valid)
+        {
+            if (svlanMC2SConf.format == SVLAN_MC2S_MODE_MAC &&
+                svlanMC2SConf.sdata==l2add&&
+                svlanMC2SConf.smask==l2Mask)
+            {
+                if ((retVal = rtl8367c_getAsicSvlanMemberConfiguration(svlanMC2SConf.svidx, &svlanMemConf)) != RT_ERR_OK)
+                    return retVal;
+                *pSvid = svlanMemConf.vs_svid;
+
+                return RT_ERR_OK;
+            }
+        }
+    }
+
+    return RT_ERR_OUT_OF_RANGE;
+}
+
+static rtk_api_ret_t _rtk_svlan_sp2c_add(rtk_vlan_t svid, rtk_port_t dst_port, rtk_vlan_t cvid)
+{
+    rtk_api_ret_t retVal, i;
+    rtk_uint32 empty_idx, svidx;
+    rtl8367c_svlan_memconf_t svlanMemConf;
+    rtl8367c_svlan_s2c_t svlanSP2CConf;
+    rtk_port_t port;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (svid > RTL8367C_VIDMAX)
+        return RT_ERR_SVLAN_VID;
+
+    if (cvid > RTL8367C_VIDMAX)
+        return RT_ERR_VLAN_VID;
+
+    /* Check port Valid */
+    RTK_CHK_PORT_VALID(dst_port);
+    port = rtk_switch_port_L2P_get(dst_port);
+
+    svidx = 0xFFFF;
+
+    for (i = 0; i < RTL8367C_SVIDXNO; i++)
+    {
+        if ((retVal = rtl8367c_getAsicSvlanMemberConfiguration(i, &svlanMemConf)) != RT_ERR_OK)
+            return retVal;
+
+        if (svid == svlanMemConf.vs_svid)
+        {
+            svidx = i;
+            break;
+        }
+    }
+
+    if (0xFFFF == svidx)
+        return RT_ERR_SVLAN_ENTRY_NOT_FOUND;
+
+    empty_idx = 0xFFFF;
+
+    for (i=RTL8367C_SP2CMAX; i >=0 ; i--)
+    {
+        if ((retVal = rtl8367c_getAsicSvlanSP2CConf(i, &svlanSP2CConf)) != RT_ERR_OK)
+            return retVal;
+
+        if ( (svlanSP2CConf.svidx == svidx) && (svlanSP2CConf.dstport == port) && (svlanSP2CConf.valid == 1))
+        {
+            empty_idx = i;
+            break;
+        }
+        else if (svlanSP2CConf.valid == 0)
+        {
+            empty_idx = i;
+        }
+    }
+
+    if (empty_idx!=0xFFFF)
+    {
+        svlanSP2CConf.valid     = 1;
+        svlanSP2CConf.vid       = cvid;
+        svlanSP2CConf.svidx     = svidx;
+        svlanSP2CConf.dstport   = port;
+
+        if ((retVal = rtl8367c_setAsicSvlanSP2CConf(empty_idx, &svlanSP2CConf)) != RT_ERR_OK)
+            return retVal;
+        return RT_ERR_OK;
+    }
+
+    return RT_ERR_OUT_OF_RANGE;
+
+}
+
+static rtk_api_ret_t _rtk_svlan_sp2c_get(rtk_vlan_t svid, rtk_port_t dst_port, rtk_vlan_t *pCvid)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 i, svidx;
+    rtl8367c_svlan_memconf_t svlanMemConf;
+    rtl8367c_svlan_s2c_t svlanSP2CConf;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pCvid)
+        return RT_ERR_NULL_POINTER;
+
+    if (svid > RTL8367C_VIDMAX)
+        return RT_ERR_SVLAN_VID;
+
+    /* Check port Valid */
+    RTK_CHK_PORT_VALID(dst_port);
+    dst_port = rtk_switch_port_L2P_get(dst_port);
+
+    svidx = 0xFFFF;
+
+    for (i = 0; i < RTL8367C_SVIDXNO; i++)
+    {
+        if ((retVal = rtl8367c_getAsicSvlanMemberConfiguration(i, &svlanMemConf)) != RT_ERR_OK)
+            return retVal;
+
+        if (svid == svlanMemConf.vs_svid)
+        {
+            svidx = i;
+            break;
+        }
+    }
+
+    if (0xFFFF == svidx)
+        return RT_ERR_SVLAN_ENTRY_NOT_FOUND;
+
+    for (i = 0; i <= RTL8367C_SP2CMAX; i++)
+    {
+        if ((retVal = rtl8367c_getAsicSvlanSP2CConf(i, &svlanSP2CConf)) != RT_ERR_OK)
+            return retVal;
+
+        if ( (svlanSP2CConf.svidx == svidx) && (svlanSP2CConf.dstport == dst_port) && (svlanSP2CConf.valid == 1) )
+        {
+            *pCvid = svlanSP2CConf.vid;
+            return RT_ERR_OK;
+        }
+    }
+
+    return RT_ERR_OUT_OF_RANGE;
+}
+
+static rtk_api_ret_t _rtk_svlan_sp2c_del(rtk_vlan_t svid, rtk_port_t dst_port)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 i, svidx;
+    rtl8367c_svlan_memconf_t svlanMemConf;
+    rtl8367c_svlan_s2c_t svlanSP2CConf;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (svid > RTL8367C_VIDMAX)
+        return RT_ERR_SVLAN_VID;
+
+    /* Check port Valid */
+    RTK_CHK_PORT_VALID(dst_port);
+    dst_port = rtk_switch_port_L2P_get(dst_port);
+
+    svidx = 0xFFFF;
+
+    for (i = 0; i < RTL8367C_SVIDXNO; i++)
+    {
+        if ((retVal = rtl8367c_getAsicSvlanMemberConfiguration(i, &svlanMemConf)) != RT_ERR_OK)
+            return retVal;
+
+        if (svid == svlanMemConf.vs_svid)
+        {
+            svidx = i;
+            break;
+        }
+    }
+
+    if (0xFFFF == svidx)
+        return RT_ERR_SVLAN_ENTRY_NOT_FOUND;
+
+    for (i = 0; i <= RTL8367C_SP2CMAX; i++)
+    {
+        if ((retVal = rtl8367c_getAsicSvlanSP2CConf(i, &svlanSP2CConf)) != RT_ERR_OK)
+            return retVal;
+
+        if ( (svlanSP2CConf.svidx == svidx) && (svlanSP2CConf.dstport == dst_port) && (svlanSP2CConf.valid == 1) )
+        {
+            svlanSP2CConf.valid     = 0;
+            svlanSP2CConf.vid       = 0;
+            svlanSP2CConf.svidx     = 0;
+            svlanSP2CConf.dstport   = 0;
+
+            if ((retVal = rtl8367c_setAsicSvlanSP2CConf(i, &svlanSP2CConf)) != RT_ERR_OK)
+                return retVal;
+            return RT_ERR_OK;
+        }
+
+    }
+
+    return RT_ERR_OUT_OF_RANGE;
+}
+
+static rtk_api_ret_t _rtk_svlan_lookupType_set(rtk_svlan_lookupType_t type)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (type >= SVLAN_LOOKUP_END)
+        return RT_ERR_CHIP_NOT_SUPPORTED;
+
+
+    svlan_lookupType = type;
+
+    retVal = rtl8367c_setAsicSvlanLookupType((rtk_uint32)type);
+
+    return retVal;
+}
+
+static rtk_api_ret_t _rtk_svlan_lookupType_get(rtk_svlan_lookupType_t *pType)
+{
+    rtk_api_ret_t   retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pType)
+        return RT_ERR_NULL_POINTER;
+
+    retVal = rtl8367c_getAsicSvlanLookupType(pType);
+
+    svlan_lookupType = *pType;
+
+    return retVal;
+}
+
+static rtk_api_ret_t _rtk_svlan_trapPri_set(rtk_pri_t priority)
+{
+    rtk_api_ret_t   retVal;
+
+    RTK_CHK_INIT_STATE();
+
+    if(priority > RTL8367C_PRIMAX)
+        return RT_ERR_OUT_OF_RANGE;
+
+    retVal = rtl8367c_setAsicSvlanTrapPriority(priority);
+
+    return retVal;
+} 
+
+static rtk_api_ret_t _rtk_svlan_trapPri_get(rtk_pri_t *pPriority)
+{
+    rtk_api_ret_t   retVal;
+
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pPriority)
+        return RT_ERR_NULL_POINTER;
+
+    retVal = rtl8367c_getAsicSvlanTrapPriority(pPriority);
+
+    return retVal;
+}   /* end of rtk_svlan_trapPri_get */
+
+
+/* Function Name:
+ *      rtk_svlan_init
+ * Description:
+ *      Initialize SVLAN Configuration
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      Ether type of S-tag in 802.1ad is 0x88a8 and there are existed ether type 0x9100 and 0x9200 for Q-in-Q SLAN design.
+ *      User can set mathced ether type as service provider supported protocol.
+ */
+rtk_api_ret_t rtk_svlan_init(void)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_init(); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_svlan_servicePort_add
+ * Description:
+ *      Add one service port in the specified device
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This API is setting which port is connected to provider switch. All frames receiving from this port must
+ *      contain accept SVID in S-tag field.
+ */
+rtk_api_ret_t rtk_svlan_servicePort_add(rtk_port_t port)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_servicePort_add(port); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_svlan_servicePort_get
+ * Description:
+ *      Get service ports in the specified device.
+ * Input:
+ *      None
+ * Output:
+ *      pSvlan_portmask - pointer buffer of svlan ports.
+ * Return:
+ *      RT_ERR_OK          - OK
+ *      RT_ERR_FAILED      - Failed
+ *      RT_ERR_SMI         - SMI access error
+ * Note:
+ *      This API is setting which port is connected to provider switch. All frames receiving from this port must
+ *      contain accept SVID in S-tag field.
+ */
+rtk_api_ret_t rtk_svlan_servicePort_get(rtk_portmask_t *pSvlan_portmask)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_servicePort_get(pSvlan_portmask); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_svlan_servicePort_del
+ * Description:
+ *      Delete one service port in the specified device
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      This API is removing SVLAN service port in the specified device.
+ */
+rtk_api_ret_t rtk_svlan_servicePort_del(rtk_port_t port)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_servicePort_del(port); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_svlan_tpidEntry_set
+ * Description:
+ *      Configure accepted S-VLAN ether type.
+ * Input:
+ *      svlan_tag_id - Ether type of S-tag frame parsing in uplink ports.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameter.
+ * Note:
+ *      Ether type of S-tag in 802.1ad is 0x88a8 and there are existed ether type 0x9100 and 0x9200 for Q-in-Q SLAN design.
+ *      User can set mathced ether type as service provider supported protocol.
+ */
+rtk_api_ret_t rtk_svlan_tpidEntry_set(rtk_svlan_tpid_t svlan_tag_id)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_tpidEntry_set(svlan_tag_id); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_svlan_tpidEntry_get
+ * Description:
+ *      Get accepted S-VLAN ether type setting.
+ * Input:
+ *      None
+ * Output:
+ *      pSvlan_tag_id -  Ether type of S-tag frame parsing in uplink ports.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      This API is setting which port is connected to provider switch. All frames receiving from this port must
+ *      contain accept SVID in S-tag field.
+ */
+rtk_api_ret_t rtk_svlan_tpidEntry_get(rtk_svlan_tpid_t *pSvlan_tag_id)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_tpidEntry_get(pSvlan_tag_id); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_svlan_priorityRef_set
+ * Description:
+ *      Set S-VLAN upstream priority reference setting.
+ * Input:
+ *      ref - reference selection parameter.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameter.
+ * Note:
+ *      The API can set the upstream SVLAN tag priority reference source. The related priority
+ *      sources are as following:
+ *      - REF_INTERNAL_PRI,
+ *      - REF_CTAG_PRI,
+ *      - REF_SVLAN_PRI,
+ *      - REF_PB_PRI.
+ */
+rtk_api_ret_t rtk_svlan_priorityRef_set(rtk_svlan_pri_ref_t ref)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_priorityRef_set(ref); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_svlan_priorityRef_get
+ * Description:
+ *      Get S-VLAN upstream priority reference setting.
+ * Input:
+ *      None
+ * Output:
+ *      pRef - reference selection parameter.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      The API can get the upstream SVLAN tag priority reference source. The related priority
+ *      sources are as following:
+ *      - REF_INTERNAL_PRI,
+ *      - REF_CTAG_PRI,
+ *      - REF_SVLAN_PRI,
+ *      - REF_PB_PRI
+ */
+rtk_api_ret_t rtk_svlan_priorityRef_get(rtk_svlan_pri_ref_t *pRef)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_priorityRef_get(pRef); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_svlan_memberPortEntry_set
+ * Description:
+ *      Configure system SVLAN member content
+ * Input:
+ *      svid - SVLAN id
+ *      psvlan_cfg - SVLAN member configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_INPUT            - Invalid input parameter.
+ *      RT_ERR_SVLAN_VID        - Invalid SVLAN VID parameter.
+ *      RT_ERR_PORT_MASK        - Invalid portmask.
+ *      RT_ERR_SVLAN_TABLE_FULL - SVLAN configuration is full.
+ * Note:
+ *      The API can set system 64 accepted s-tag frame format. Only 64 SVID S-tag frame will be accpeted
+ *      to receiving from uplink ports. Other SVID S-tag frame or S-untagged frame will be droped by default setup.
+ *      - rtk_svlan_memberCfg_t->svid is SVID of SVLAN member configuration.
+ *      - rtk_svlan_memberCfg_t->memberport is member port mask of SVLAN member configuration.
+ *      - rtk_svlan_memberCfg_t->fid is filtering database of SVLAN member configuration.
+ *      - rtk_svlan_memberCfg_t->priority is priority of SVLAN member configuration.
+ */
+rtk_api_ret_t rtk_svlan_memberPortEntry_set(rtk_vlan_t svid, rtk_svlan_memberCfg_t *pSvlan_cfg)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_memberPortEntry_set(svid, pSvlan_cfg); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_svlan_memberPortEntry_get
+ * Description:
+ *      Get SVLAN member Configure.
+ * Input:
+ *      svid - SVLAN id
+ * Output:
+ *      pSvlan_cfg - SVLAN member configuration
+ * Return:
+ *      RT_ERR_OK                       - OK
+ *      RT_ERR_FAILED                   - Failed
+ *      RT_ERR_SMI                      - SMI access error
+ *      RT_ERR_SVLAN_ENTRY_NOT_FOUND    - specified svlan entry not found.
+ *      RT_ERR_INPUT                    - Invalid input parameters.
+ * Note:
+ *      The API can get system 64 accepted s-tag frame format. Only 64 SVID S-tag frame will be accpeted
+ *      to receiving from uplink ports. Other SVID S-tag frame or S-untagged frame will be droped.
+ */
+rtk_api_ret_t rtk_svlan_memberPortEntry_get(rtk_vlan_t svid, rtk_svlan_memberCfg_t *pSvlan_cfg)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_memberPortEntry_get(svid, pSvlan_cfg); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_svlan_memberPortEntry_adv_set
+ * Description:
+ *      Configure system SVLAN member by index
+ * Input:
+ *      idx         - Index (0 ~ 63)
+ *      psvlan_cfg  - SVLAN member configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_INPUT            - Invalid input parameter.
+ *      RT_ERR_SVLAN_VID        - Invalid SVLAN VID parameter.
+ *      RT_ERR_PORT_MASK        - Invalid portmask.
+ *      RT_ERR_SVLAN_TABLE_FULL - SVLAN configuration is full.
+ * Note:
+ *      The API can set system 64 accepted s-tag frame format by index.
+ *      - rtk_svlan_memberCfg_t->svid is SVID of SVLAN member configuration.
+ *      - rtk_svlan_memberCfg_t->memberport is member port mask of SVLAN member configuration.
+ *      - rtk_svlan_memberCfg_t->fid is filtering database of SVLAN member configuration.
+ *      - rtk_svlan_memberCfg_t->priority is priority of SVLAN member configuration.
+ */
+rtk_api_ret_t rtk_svlan_memberPortEntry_adv_set(rtk_uint32 idx, rtk_svlan_memberCfg_t *pSvlan_cfg)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_memberPortEntry_adv_set(idx, pSvlan_cfg); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_svlan_memberPortEntry_adv_get
+ * Description:
+ *      Get SVLAN member Configure by index.
+ * Input:
+ *      idx         - Index (0 ~ 63)
+ * Output:
+ *      pSvlan_cfg  - SVLAN member configuration
+ * Return:
+ *      RT_ERR_OK                       - OK
+ *      RT_ERR_FAILED                   - Failed
+ *      RT_ERR_SMI                      - SMI access error
+ *      RT_ERR_SVLAN_ENTRY_NOT_FOUND    - specified svlan entry not found.
+ *      RT_ERR_INPUT                    - Invalid input parameters.
+ * Note:
+ *      The API can get system 64 accepted s-tag frame format. Only 64 SVID S-tag frame will be accpeted
+ *      to receiving from uplink ports. Other SVID S-tag frame or S-untagged frame will be droped.
+ */
+rtk_api_ret_t rtk_svlan_memberPortEntry_adv_get(rtk_uint32 idx, rtk_svlan_memberCfg_t *pSvlan_cfg)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_memberPortEntry_adv_get(idx, pSvlan_cfg); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_svlan_defaultSvlan_set
+ * Description:
+ *      Configure default egress SVLAN.
+ * Input:
+ *      port - Source port
+ *      svid - SVLAN id
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                       - OK
+ *      RT_ERR_FAILED                   - Failed
+ *      RT_ERR_SMI                      - SMI access error
+ *      RT_ERR_INPUT                    - Invalid input parameter.
+ *      RT_ERR_SVLAN_VID                - Invalid SVLAN VID parameter.
+ *      RT_ERR_SVLAN_ENTRY_NOT_FOUND    - specified svlan entry not found.
+ * Note:
+ *      The API can set port n S-tag format index while receiving frame from port n
+ *      is transmit through uplink port with s-tag field
+ */
+rtk_api_ret_t rtk_svlan_defaultSvlan_set(rtk_port_t port, rtk_vlan_t svid)
+{
+
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_defaultSvlan_set(port, svid); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_svlan_defaultSvlan_get
+ * Description:
+ *      Get the configure default egress SVLAN.
+ * Input:
+ *      port - Source port
+ * Output:
+ *      pSvid - SVLAN VID
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can get port n S-tag format index while receiving frame from port n
+ *      is transmit through uplink port with s-tag field
+ */
+rtk_api_ret_t rtk_svlan_defaultSvlan_get(rtk_port_t port, rtk_vlan_t *pSvid)
+{
+
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_defaultSvlan_get(port, pSvid); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_svlan_c2s_add
+ * Description:
+ *      Configure SVLAN C2S table
+ * Input:
+ *      vid - VLAN ID
+ *      src_port - Ingress Port
+ *      svid - SVLAN VID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port ID.
+ *      RT_ERR_SVLAN_VID    - Invalid SVLAN VID parameter.
+ *      RT_ERR_VLAN_VID     - Invalid VID parameter.
+ *      RT_ERR_OUT_OF_RANGE - input out of range.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can set system C2S configuration. ASIC will check upstream's VID and assign related
+ *      SVID to mathed packet. There are 128 SVLAN C2S configurations.
+ */
+rtk_api_ret_t rtk_svlan_c2s_add(rtk_vlan_t vid, rtk_port_t src_port, rtk_vlan_t svid)
+{
+
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_c2s_add(vid, src_port, svid); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_svlan_c2s_del
+ * Description:
+ *      Delete one C2S entry
+ * Input:
+ *      vid - VLAN ID
+ *      src_port - Ingress Port
+ *      svid - SVLAN VID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_VLAN_VID         - Invalid VID parameter.
+ *      RT_ERR_PORT_ID          - Invalid port ID.
+ *      RT_ERR_OUT_OF_RANGE     - input out of range.
+ * Note:
+ *      The API can delete system C2S configuration. There are 128 SVLAN C2S configurations.
+ */
+rtk_api_ret_t rtk_svlan_c2s_del(rtk_vlan_t vid, rtk_port_t src_port)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_c2s_del(vid, src_port); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_svlan_c2s_get
+ * Description:
+ *      Get configure SVLAN C2S table
+ * Input:
+ *      vid - VLAN ID
+ *      src_port - Ingress Port
+ * Output:
+ *      pSvid - SVLAN ID
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Invalid port ID.
+ *      RT_ERR_OUT_OF_RANGE - input out of range.
+ * Note:
+ *     The API can get system C2S configuration. There are 128 SVLAN C2S configurations.
+ */
+rtk_api_ret_t rtk_svlan_c2s_get(rtk_vlan_t vid, rtk_port_t src_port, rtk_vlan_t *pSvid)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_c2s_get(vid, src_port, pSvid); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_svlan_untag_action_set
+ * Description:
+ *      Configure Action of downstream UnStag packet
+ * Input:
+ *      action  - Action for UnStag
+ *      svid    - The SVID assigned to UnStag packet
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                       - OK
+ *      RT_ERR_FAILED                   - Failed
+ *      RT_ERR_SMI                      - SMI access error
+ *      RT_ERR_SVLAN_VID                - Invalid SVLAN VID parameter.
+ *      RT_ERR_SVLAN_ENTRY_NOT_FOUND    - specified svlan entry not found.
+ *      RT_ERR_OUT_OF_RANGE             - input out of range.
+ *      RT_ERR_INPUT                    - Invalid input parameters.
+ * Note:
+ *      The API can configure action of downstream Un-Stag packet. A SVID assigned
+ *      to the un-stag is also supported by this API. The parameter of svid is
+ *      only referenced when the action is set to UNTAG_ASSIGN
+ */
+rtk_api_ret_t rtk_svlan_untag_action_set(rtk_svlan_untag_action_t action, rtk_vlan_t svid)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_untag_action_set(action, svid); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_svlan_untag_action_get
+ * Description:
+ *      Get Action of downstream UnStag packet
+ * Input:
+ *      None
+ * Output:
+ *      pAction  - Action for UnStag
+ *      pSvid    - The SVID assigned to UnStag packet
+ * Return:
+ *      RT_ERR_OK                       - OK
+ *      RT_ERR_FAILED                   - Failed
+ *      RT_ERR_SMI                      - SMI access error
+ *      RT_ERR_SVLAN_VID                - Invalid SVLAN VID parameter.
+ *      RT_ERR_SVLAN_ENTRY_NOT_FOUND    - specified svlan entry not found.
+ *      RT_ERR_OUT_OF_RANGE             - input out of range.
+ *      RT_ERR_INPUT                    - Invalid input parameters.
+ * Note:
+ *      The API can Get action of downstream Un-Stag packet. A SVID assigned
+ *      to the un-stag is also retrieved by this API. The parameter pSvid is
+ *      only refernced when the action is UNTAG_ASSIGN
+ */
+rtk_api_ret_t rtk_svlan_untag_action_get(rtk_svlan_untag_action_t *pAction, rtk_vlan_t *pSvid)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_untag_action_get(pAction, pSvid); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_svlan_unmatch_action_set
+ * Description:
+ *      Configure Action of downstream Unmatch packet
+ * Input:
+ *      action  - Action for Unmatch
+ *      svid    - The SVID assigned to Unmatch packet
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                       - OK
+ *      RT_ERR_FAILED                   - Failed
+ *      RT_ERR_SMI                      - SMI access error
+ *      RT_ERR_SVLAN_VID                - Invalid SVLAN VID parameter.
+ *      RT_ERR_SVLAN_ENTRY_NOT_FOUND    - specified svlan entry not found.
+ *      RT_ERR_OUT_OF_RANGE             - input out of range.
+ *      RT_ERR_INPUT                    - Invalid input parameters.
+ * Note:
+ *      The API can configure action of downstream Un-match packet. A SVID assigned
+ *      to the un-match is also supported by this API. The parameter od svid is
+ *      only refernced when the action is set to UNMATCH_ASSIGN
+ */
+rtk_api_ret_t rtk_svlan_unmatch_action_set(rtk_svlan_unmatch_action_t action, rtk_vlan_t svid)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_unmatch_action_set(action, svid); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_svlan_unmatch_action_get
+ * Description:
+ *      Get Action of downstream Unmatch packet
+ * Input:
+ *      None
+ * Output:
+ *      pAction  - Action for Unmatch
+ *      pSvid    - The SVID assigned to Unmatch packet
+ * Return:
+ *      RT_ERR_OK                       - OK
+ *      RT_ERR_FAILED                   - Failed
+ *      RT_ERR_SMI                      - SMI access error
+ *      RT_ERR_SVLAN_VID                - Invalid SVLAN VID parameter.
+ *      RT_ERR_SVLAN_ENTRY_NOT_FOUND    - specified svlan entry not found.
+ *      RT_ERR_OUT_OF_RANGE             - input out of range.
+ *      RT_ERR_INPUT                    - Invalid input parameters.
+ * Note:
+ *      The API can Get action of downstream Un-match packet. A SVID assigned
+ *      to the un-match is also retrieved by this API. The parameter pSvid is
+ *      only refernced when the action is UNMATCH_ASSIGN
+ */
+rtk_api_ret_t rtk_svlan_unmatch_action_get(rtk_svlan_unmatch_action_t *pAction, rtk_vlan_t *pSvid)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_unmatch_action_get(pAction, pSvid); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_svlan_unassign_action_set
+ * Description:
+ *      Configure Action of upstream without svid assign action
+ * Input:
+ *      action  - Action for Un-assign
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                       - OK
+ *      RT_ERR_FAILED                   - Failed
+ *      RT_ERR_OUT_OF_RANGE             - input out of range.
+ *      RT_ERR_INPUT                    - Invalid input parameters.
+ * Note:
+ *      The API can configure action of upstream Un-assign svid packet. If action is not
+ *      trap to CPU, the port-based SVID sure be assign as system need
+ */
+rtk_api_ret_t rtk_svlan_unassign_action_set(rtk_svlan_unassign_action_t action)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_unassign_action_set(action); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_svlan_unassign_action_get
+ * Description:
+ *      Get action of upstream without svid assignment
+ * Input:
+ *      None
+ * Output:
+ *      pAction  - Action for Un-assign
+ * Return:
+ *      RT_ERR_OK                       - OK
+ *      RT_ERR_FAILED                   - Failed
+ * Note:
+ *      None
+ */
+rtk_api_ret_t rtk_svlan_unassign_action_get(rtk_svlan_unassign_action_t *pAction)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_unassign_action_get(pAction); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_svlan_dmac_vidsel_set
+ * Description:
+ *      Set DMAC CVID selection
+ * Input:
+ *      port    - Port
+ *      enable  - state of DMAC CVID Selection
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK                       - OK
+ *      RT_ERR_FAILED                   - Failed
+ *      RT_ERR_SMI                      - SMI access error
+ *      RT_ERR_SVLAN_VID                - Invalid SVLAN VID parameter.
+ *      RT_ERR_SVLAN_ENTRY_NOT_FOUND    - specified svlan entry not found.
+ *      RT_ERR_OUT_OF_RANGE             - input out of range.
+ *      RT_ERR_INPUT                    - Invalid input parameters.
+ * Note:
+ *      This API can set DMAC CVID Selection state
+ */
+rtk_api_ret_t rtk_svlan_dmac_vidsel_set(rtk_port_t port, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_dmac_vidsel_set(port, enable); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_svlan_dmac_vidsel_get
+ * Description:
+ *      Get DMAC CVID selection
+ * Input:
+ *      port    - Port
+ * Output:
+ *      pEnable - state of DMAC CVID Selection
+ * Return:
+ *      RT_ERR_OK                       - OK
+ *      RT_ERR_FAILED                   - Failed
+ *      RT_ERR_SMI                      - SMI access error
+ *      RT_ERR_SVLAN_VID                - Invalid SVLAN VID parameter.
+ *      RT_ERR_SVLAN_ENTRY_NOT_FOUND    - specified svlan entry not found.
+ *      RT_ERR_OUT_OF_RANGE             - input out of range.
+ *      RT_ERR_INPUT                    - Invalid input parameters.
+ * Note:
+ *      This API can get DMAC CVID Selection state
+ */
+rtk_api_ret_t rtk_svlan_dmac_vidsel_get(rtk_port_t port, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_dmac_vidsel_get(port, pEnable); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_svlan_ipmc2s_add
+ * Description:
+ *      add ip multicast address to SVLAN
+ * Input:
+ *      svid    - SVLAN VID
+ *      ipmc    - ip multicast address
+ *      ipmcMsk - ip multicast mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                       - OK
+ *      RT_ERR_FAILED                   - Failed
+ *      RT_ERR_SMI                      - SMI access error
+ *      RT_ERR_SVLAN_VID                - Invalid SVLAN VID parameter.
+ *      RT_ERR_SVLAN_ENTRY_NOT_FOUND    - specified svlan entry not found.
+ *      RT_ERR_OUT_OF_RANGE             - input out of range.
+ *      RT_ERR_INPUT                    - Invalid input parameters.
+ * Note:
+ *      The API can set IP mutlicast to SVID configuration. If upstream packet is IPv4 multicast
+ *      packet and DIP is matched MC2S configuration, ASIC will assign egress SVID to the packet.
+ *      There are 32 SVLAN multicast configurations for IP and L2 multicast.
+ */
+rtk_api_ret_t rtk_svlan_ipmc2s_add(ipaddr_t ipmc, ipaddr_t ipmcMsk,rtk_vlan_t svid)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_ipmc2s_add(ipmc, ipmcMsk, svid); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_svlan_ipmc2s_del
+ * Description:
+ *      delete ip multicast address to SVLAN
+ * Input:
+ *      ipmc    - ip multicast address
+ *      ipmcMsk - ip multicast mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_SVLAN_VID        - Invalid SVLAN VID parameter.
+ *      RT_ERR_OUT_OF_RANGE     - input out of range.
+ * Note:
+ *      The API can delete IP mutlicast to SVID configuration. There are 32 SVLAN multicast configurations for IP and L2 multicast.
+ */
+rtk_api_ret_t rtk_svlan_ipmc2s_del(ipaddr_t ipmc, ipaddr_t ipmcMsk)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_ipmc2s_del(ipmc, ipmcMsk); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_svlan_ipmc2s_get
+ * Description:
+ *      Get ip multicast address to SVLAN
+ * Input:
+ *      ipmc    - ip multicast address
+ *      ipmcMsk - ip multicast mask
+ * Output:
+ *      pSvid - SVLAN VID
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_OUT_OF_RANGE - input out of range.
+ * Note:
+ *      The API can get IP mutlicast to SVID configuration. There are 32 SVLAN multicast configurations for IP and L2 multicast.
+ */
+rtk_api_ret_t rtk_svlan_ipmc2s_get(ipaddr_t ipmc, ipaddr_t ipmcMsk, rtk_vlan_t *pSvid)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_ipmc2s_get(ipmc, ipmcMsk, pSvid); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+/* Function Name:
+ *      rtk_svlan_l2mc2s_add
+ * Description:
+ *      Add L2 multicast address to SVLAN
+ * Input:
+ *      mac     - L2 multicast address
+ *      macMsk  - L2 multicast address mask
+ *      svid    - SVLAN VID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                       - OK
+ *      RT_ERR_FAILED                   - Failed
+ *      RT_ERR_SMI                      - SMI access error
+ *      RT_ERR_SVLAN_VID                - Invalid SVLAN VID parameter.
+ *      RT_ERR_SVLAN_ENTRY_NOT_FOUND    - specified svlan entry not found.
+ *      RT_ERR_OUT_OF_RANGE             - input out of range.
+ *      RT_ERR_INPUT                    - Invalid input parameters.
+ * Note:
+ *      The API can set L2 Mutlicast to SVID configuration. If upstream packet is L2 multicast
+ *      packet and DMAC is matched, ASIC will assign egress SVID to the packet. There are 32
+ *      SVLAN multicast configurations for IP and L2 multicast.
+ */
+rtk_api_ret_t rtk_svlan_l2mc2s_add(rtk_mac_t mac, rtk_mac_t macMsk, rtk_vlan_t svid)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_l2mc2s_add(mac, macMsk, svid); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_svlan_l2mc2s_del
+ * Description:
+ *      delete L2 multicast address to SVLAN
+ * Input:
+ *      mac     - L2 multicast address
+ *      macMsk  - L2 multicast address mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_SVLAN_VID        - Invalid SVLAN VID parameter.
+ *      RT_ERR_OUT_OF_RANGE     - input out of range.
+ * Note:
+ *      The API can delete Mutlicast to SVID configuration. There are 32 SVLAN multicast configurations for IP and L2 multicast.
+ */
+rtk_api_ret_t rtk_svlan_l2mc2s_del(rtk_mac_t mac, rtk_mac_t macMsk)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_l2mc2s_del(mac, macMsk); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_svlan_l2mc2s_get
+ * Description:
+ *      Get L2 multicast address to SVLAN
+ * Input:
+ *      mac     - L2 multicast address
+ *      macMsk  - L2 multicast address mask
+ * Output:
+ *      pSvid - SVLAN VID
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ *      RT_ERR_OUT_OF_RANGE     - input out of range.
+ * Note:
+ *      The API can get L2 mutlicast to SVID configuration. There are 32 SVLAN multicast configurations for IP and L2 multicast.
+ */
+rtk_api_ret_t rtk_svlan_l2mc2s_get(rtk_mac_t mac, rtk_mac_t macMsk, rtk_vlan_t *pSvid)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_l2mc2s_get(mac, macMsk, pSvid); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_svlan_sp2c_add
+ * Description:
+ *      Add system SP2C configuration
+ * Input:
+ *      cvid        - VLAN ID
+ *      dst_port    - Destination port of SVLAN to CVLAN configuration
+ *      svid        - SVLAN VID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_SVLAN_VID    - Invalid SVLAN VID parameter.
+ *      RT_ERR_VLAN_VID     - Invalid VID parameter.
+ *      RT_ERR_OUT_OF_RANGE - input out of range.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can add SVID & Destination Port to CVLAN configuration. The downstream frames with assigned
+ *      SVID will be add C-tag with assigned CVID if the output port is the assigned destination port.
+ *      There are 128 SP2C configurations.
+ */
+rtk_api_ret_t rtk_svlan_sp2c_add(rtk_vlan_t svid, rtk_port_t dst_port, rtk_vlan_t cvid)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_sp2c_add(svid, dst_port, cvid); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_svlan_sp2c_get
+ * Description:
+ *      Get configure system SP2C content
+ * Input:
+ *      svid        - SVLAN VID
+ *      dst_port    - Destination port of SVLAN to CVLAN configuration
+ * Output:
+ *      pCvid - VLAN ID
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_OUT_OF_RANGE - input out of range.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_SVLAN_VID    - Invalid SVLAN VID parameter.
+ * Note:
+ *     The API can get SVID & Destination Port to CVLAN configuration. There are 128 SP2C configurations.
+ */
+rtk_api_ret_t rtk_svlan_sp2c_get(rtk_vlan_t svid, rtk_port_t dst_port, rtk_vlan_t *pCvid)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_sp2c_get(svid, dst_port, pCvid); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_svlan_sp2c_del
+ * Description:
+ *      Delete system SP2C configuration
+ * Input:
+ *      svid        - SVLAN VID
+ *      dst_port    - Destination port of SVLAN to CVLAN configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_SVLAN_VID    - Invalid SVLAN VID parameter.
+ *      RT_ERR_OUT_OF_RANGE - input out of range.
+ * Note:
+ *      The API can delete SVID & Destination Port to CVLAN configuration. There are 128 SP2C configurations.
+ */
+rtk_api_ret_t rtk_svlan_sp2c_del(rtk_vlan_t svid, rtk_port_t dst_port)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_sp2c_del(svid, dst_port); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_svlan_lookupType_set
+ * Description:
+ *      Set lookup type of SVLAN
+ * Input:
+ *      type        - lookup type
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ * Note:
+ *      none
+ */
+rtk_api_ret_t rtk_svlan_lookupType_set(rtk_svlan_lookupType_t type)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_lookupType_set(type); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_svlan_lookupType_get
+ * Description:
+ *      Get lookup type of SVLAN
+ * Input:
+ *      pType       - lookup type
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ * Note:
+ *      none
+ */
+rtk_api_ret_t rtk_svlan_lookupType_get(rtk_svlan_lookupType_t *pType)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_lookupType_get(pType); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_svlan_trapPri_set
+ * Description:
+ *      Set svlan trap priority
+ * Input:
+ *      priority - priority for trap packets
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_QOS_INT_PRIORITY
+ * Note:
+ *      None
+ */
+rtk_api_ret_t rtk_svlan_trapPri_set(rtk_pri_t priority)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_trapPri_set(priority); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}   /* end of rtk_svlan_trapPri_set */
+
+/* Function Name:
+ *      rtk_svlan_trapPri_get
+ * Description:
+ *      Get svlan trap priority
+ * Input:
+ *      None
+ * Output:
+ *      pPriority - priority for trap packets
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NULL_POINTER - input parameter may be null pointer
+ * Note:
+ *      None
+ */
+rtk_api_ret_t rtk_svlan_trapPri_get(rtk_pri_t *pPriority)
+{
+    rtk_api_ret_t retVal;
+    
+    RTK_API_LOCK();
+    retVal = _rtk_svlan_trapPri_get(pPriority); 
+    RTK_API_UNLOCK();
+
+    return retVal;
+}   /* end of rtk_svlan_trapPri_get */
+
+
+/*Don't lock mutex in following API*/
+
+/* Function Name:
+ *      rtk_svlan_checkAndCreateMbr
+ * Description:
+ *      Check and create Member configuration and return index
+ * Input:
+ *      vid  - VLAN id.
+ * Output:
+ *      pIndex  - Member configuration index
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_VLAN_VID     - Invalid VLAN ID.
+ *      RT_ERR_TBL_FULL     - Member Configuration table full
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_svlan_checkAndCreateMbr(rtk_vlan_t vid, rtk_uint32 *pIndex)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 svidx;
+    rtk_uint32 empty_idx = 0xFFFF;
+    rtl8367c_svlan_memconf_t svlan_cfg;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* vid must be 0~4095 */
+    if (vid > RTL8367C_VIDMAX)
+        return RT_ERR_VLAN_VID;
+
+    /* Null pointer check */
+    if(NULL == pIndex)
+        return RT_ERR_NULL_POINTER;
+
+    /* Search exist entry */
+    for (svidx = 0; svidx <= RTL8367C_SVIDXMAX; svidx++)
+    {
+        if(svlan_mbrCfgUsage[svidx] == TRUE)
+        {
+            if(svlan_mbrCfgVid[svidx] == vid)
+            {
+                /* Found! return index */
+                *pIndex = svidx;
+                return RT_ERR_OK;
+            }
+        }
+        else if(empty_idx == 0xFFFF)
+        {
+            empty_idx = svidx;
+        }
+
+    }
+
+    if(empty_idx == 0xFFFF)
+    {
+        /* No empty index */
+        return RT_ERR_TBL_FULL;
+    }
+
+    svlan_mbrCfgUsage[empty_idx] = TRUE;
+    svlan_mbrCfgVid[empty_idx] = vid;
+
+    memset(&svlan_cfg, 0, sizeof(rtl8367c_svlan_memconf_t));
+
+    svlan_cfg.vs_svid = vid;
+    /*for create check*/
+    if(vid == 0)
+    {
+        svlan_cfg.vs_efid = 1;
+    }
+
+    if((retVal = rtl8367c_setAsicSvlanMemberConfiguration(empty_idx, &svlan_cfg)) != RT_ERR_OK)
+        return retVal;
+
+    *pIndex = empty_idx;
+    return RT_ERR_OK;
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/svlan.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/svlan.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/svlan.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/svlan.h	2019-01-22 16:16:24.723257454 +0100
@@ -0,0 +1,898 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * Purpose : RTL8367/RTL8367C switch high-level API
+ *
+ * Feature : The file includes SVLAN module high-layer API defination
+ *
+ */
+
+#ifndef __RTK_API_SVLAN_H__
+#define __RTK_API_SVLAN_H__
+
+typedef rtk_uint32 rtk_svlan_index_t;
+
+typedef struct rtk_svlan_memberCfg_s{
+    rtk_uint32 svid;
+    rtk_portmask_t memberport;
+    rtk_portmask_t untagport;
+    rtk_uint32 fiden;
+    rtk_uint32 fid;
+    rtk_uint32 priority;
+    rtk_uint32 efiden;
+    rtk_uint32 efid;
+}rtk_svlan_memberCfg_t;
+
+typedef enum rtk_svlan_pri_ref_e
+{
+    REF_INTERNAL_PRI = 0,
+    REF_CTAG_PRI,
+    REF_SVLAN_PRI,
+    REF_PB_PRI,
+    REF_PRI_END
+} rtk_svlan_pri_ref_t;
+
+
+typedef rtk_uint32 rtk_svlan_tpid_t;
+
+typedef enum rtk_svlan_untag_action_e
+{
+    UNTAG_DROP = 0,
+    UNTAG_TRAP,
+    UNTAG_ASSIGN,
+    UNTAG_END
+} rtk_svlan_untag_action_t;
+
+typedef enum rtk_svlan_unmatch_action_e
+{
+    UNMATCH_DROP = 0,
+    UNMATCH_TRAP,
+    UNMATCH_ASSIGN,
+    UNMATCH_END
+} rtk_svlan_unmatch_action_t;
+
+typedef enum rtk_svlan_unassign_action_e
+{
+    UNASSIGN_PBSVID = 0,
+    UNASSIGN_TRAP,
+    UNASSIGN_END
+} rtk_svlan_unassign_action_t;
+
+
+typedef enum rtk_svlan_lookupType_e
+{
+    SVLAN_LOOKUP_S64MBRCGF  = 0,
+    SVLAN_LOOKUP_C4KVLAN,
+    SVLAN_LOOKUP_END,
+
+} rtk_svlan_lookupType_t;
+
+/* Function Name:
+ *      rtk_svlan_init
+ * Description:
+ *      Initialize SVLAN Configuration
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      Ether type of S-tag in 802.1ad is 0x88a8 and there are existed ether type 0x9100 and 0x9200 for Q-in-Q SLAN design.
+ *      User can set mathced ether type as service provider supported protocol.
+ */
+extern rtk_api_ret_t rtk_svlan_init(void);
+
+/* Function Name:
+ *      rtk_svlan_servicePort_add
+ * Description:
+ *      Add one service port in the specified device
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      This API is setting which port is connected to provider switch. All frames receiving from this port must
+ *      contain accept SVID in S-tag field.
+ */
+extern rtk_api_ret_t rtk_svlan_servicePort_add(rtk_port_t port);
+
+/* Function Name:
+ *      rtk_svlan_servicePort_get
+ * Description:
+ *      Get service ports in the specified device.
+ * Input:
+ *      None
+ * Output:
+ *      pSvlan_portmask - pointer buffer of svlan ports.
+ * Return:
+ *      RT_ERR_OK          - OK
+ *      RT_ERR_FAILED      - Failed
+ *      RT_ERR_SMI         - SMI access error
+ * Note:
+ *      This API is setting which port is connected to provider switch. All frames receiving from this port must
+ *      contain accept SVID in S-tag field.
+ */
+extern rtk_api_ret_t rtk_svlan_servicePort_get(rtk_portmask_t *pSvlan_portmask);
+
+/* Function Name:
+ *      rtk_svlan_servicePort_del
+ * Description:
+ *      Delete one service port in the specified device
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      This API is removing SVLAN service port in the specified device.
+ */
+extern rtk_api_ret_t rtk_svlan_servicePort_del(rtk_port_t port);
+
+/* Function Name:
+ *      rtk_svlan_tpidEntry_set
+ * Description:
+ *      Configure accepted S-VLAN ether type.
+ * Input:
+ *      svlan_tag_id - Ether type of S-tag frame parsing in uplink ports.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameter.
+ * Note:
+ *      Ether type of S-tag in 802.1ad is 0x88a8 and there are existed ether type 0x9100 and 0x9200 for Q-in-Q SLAN design.
+ *      User can set mathced ether type as service provider supported protocol.
+ */
+extern rtk_api_ret_t rtk_svlan_tpidEntry_set(rtk_uint32 svlan_tag_id);
+
+/* Function Name:
+ *      rtk_svlan_tpidEntry_get
+ * Description:
+ *      Get accepted S-VLAN ether type setting.
+ * Input:
+ *      None
+ * Output:
+ *      pSvlan_tag_id -  Ether type of S-tag frame parsing in uplink ports.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      This API is setting which port is connected to provider switch. All frames receiving from this port must
+ *      contain accept SVID in S-tag field.
+ */
+extern rtk_api_ret_t rtk_svlan_tpidEntry_get(rtk_uint32 *pSvlan_tag_id);
+
+/* Function Name:
+ *      rtk_svlan_priorityRef_set
+ * Description:
+ *      Set S-VLAN upstream priority reference setting.
+ * Input:
+ *      ref - reference selection parameter.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameter.
+ * Note:
+ *      The API can set the upstream SVLAN tag priority reference source. The related priority
+ *      sources are as following:
+ *      - REF_INTERNAL_PRI,
+ *      - REF_CTAG_PRI,
+ *      - REF_SVLAN_PRI,
+ *      - REF_PB_PRI.
+ */
+extern rtk_api_ret_t rtk_svlan_priorityRef_set(rtk_svlan_pri_ref_t ref);
+
+/* Function Name:
+ *      rtk_svlan_priorityRef_get
+ * Description:
+ *      Get S-VLAN upstream priority reference setting.
+ * Input:
+ *      None
+ * Output:
+ *      pRef - reference selection parameter.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      The API can get the upstream SVLAN tag priority reference source. The related priority
+ *      sources are as following:
+ *      - REF_INTERNAL_PRI,
+ *      - REF_CTAG_PRI,
+ *      - REF_SVLAN_PRI,
+ *      - REF_PB_PRI
+ */
+extern rtk_api_ret_t rtk_svlan_priorityRef_get(rtk_svlan_pri_ref_t *pRef);
+
+/* Function Name:
+ *      rtk_svlan_memberPortEntry_set
+ * Description:
+ *      Configure system SVLAN member content
+ * Input:
+ *      svid - SVLAN id
+ *      psvlan_cfg - SVLAN member configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_INPUT            - Invalid input parameter.
+ *      RT_ERR_SVLAN_VID        - Invalid SVLAN VID parameter.
+ *      RT_ERR_PORT_MASK        - Invalid portmask.
+ *      RT_ERR_SVLAN_TABLE_FULL - SVLAN configuration is full.
+ * Note:
+ *      The API can set system 64 accepted s-tag frame format. Only 64 SVID S-tag frame will be accpeted
+ *      to receiving from uplink ports. Other SVID S-tag frame or S-untagged frame will be droped by default setup.
+ *      - rtk_svlan_memberCfg_t->svid is SVID of SVLAN member configuration.
+ *      - rtk_svlan_memberCfg_t->memberport is member port mask of SVLAN member configuration.
+ *      - rtk_svlan_memberCfg_t->fid is filtering database of SVLAN member configuration.
+ *      - rtk_svlan_memberCfg_t->priority is priority of SVLAN member configuration.
+ */
+extern rtk_api_ret_t rtk_svlan_memberPortEntry_set(rtk_uint32 svid_idx, rtk_svlan_memberCfg_t *psvlan_cfg);
+
+/* Function Name:
+ *      rtk_svlan_memberPortEntry_get
+ * Description:
+ *      Get SVLAN member Configure.
+ * Input:
+ *      svid - SVLAN id
+ * Output:
+ *      pSvlan_cfg - SVLAN member configuration
+ * Return:
+ *      RT_ERR_OK                       - OK
+ *      RT_ERR_FAILED                   - Failed
+ *      RT_ERR_SMI                      - SMI access error
+ *      RT_ERR_SVLAN_ENTRY_NOT_FOUND    - specified svlan entry not found.
+ *      RT_ERR_INPUT                    - Invalid input parameters.
+ * Note:
+ *      The API can get system 64 accepted s-tag frame format. Only 64 SVID S-tag frame will be accpeted
+ *      to receiving from uplink ports. Other SVID S-tag frame or S-untagged frame will be droped.
+ */
+extern rtk_api_ret_t rtk_svlan_memberPortEntry_get(rtk_uint32 svid_idx, rtk_svlan_memberCfg_t *pSvlan_cfg);
+
+/* Function Name:
+ *      rtk_svlan_memberPortEntry_adv_set
+ * Description:
+ *      Configure system SVLAN member by index
+ * Input:
+ *      idx         - Index (0 ~ 63)
+ *      psvlan_cfg  - SVLAN member configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_INPUT            - Invalid input parameter.
+ *      RT_ERR_SVLAN_VID        - Invalid SVLAN VID parameter.
+ *      RT_ERR_PORT_MASK        - Invalid portmask.
+ *      RT_ERR_SVLAN_TABLE_FULL - SVLAN configuration is full.
+ * Note:
+ *      The API can set system 64 accepted s-tag frame format by index.
+ *      - rtk_svlan_memberCfg_t->svid is SVID of SVLAN member configuration.
+ *      - rtk_svlan_memberCfg_t->memberport is member port mask of SVLAN member configuration.
+ *      - rtk_svlan_memberCfg_t->fid is filtering database of SVLAN member configuration.
+ *      - rtk_svlan_memberCfg_t->priority is priority of SVLAN member configuration.
+ */
+extern rtk_api_ret_t rtk_svlan_memberPortEntry_adv_set(rtk_uint32 idx, rtk_svlan_memberCfg_t *pSvlan_cfg);
+
+/* Function Name:
+ *      rtk_svlan_memberPortEntry_adv_get
+ * Description:
+ *      Get SVLAN member Configure by index.
+ * Input:
+ *      idx         - Index (0 ~ 63)
+ * Output:
+ *      pSvlan_cfg  - SVLAN member configuration
+ * Return:
+ *      RT_ERR_OK                       - OK
+ *      RT_ERR_FAILED                   - Failed
+ *      RT_ERR_SMI                      - SMI access error
+ *      RT_ERR_SVLAN_ENTRY_NOT_FOUND    - specified svlan entry not found.
+ *      RT_ERR_INPUT                    - Invalid input parameters.
+ * Note:
+ *      The API can get system 64 accepted s-tag frame format. Only 64 SVID S-tag frame will be accpeted
+ *      to receiving from uplink ports. Other SVID S-tag frame or S-untagged frame will be droped.
+ */
+extern rtk_api_ret_t rtk_svlan_memberPortEntry_adv_get(rtk_uint32 idx, rtk_svlan_memberCfg_t *pSvlan_cfg);
+
+/* Function Name:
+ *      rtk_svlan_defaultSvlan_set
+ * Description:
+ *      Configure default egress SVLAN.
+ * Input:
+ *      port - Source port
+ *      svid - SVLAN id
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                       - OK
+ *      RT_ERR_FAILED                   - Failed
+ *      RT_ERR_SMI                      - SMI access error
+ *      RT_ERR_INPUT                    - Invalid input parameter.
+ *      RT_ERR_SVLAN_VID                - Invalid SVLAN VID parameter.
+ *      RT_ERR_SVLAN_ENTRY_NOT_FOUND    - specified svlan entry not found.
+ * Note:
+ *      The API can set port n S-tag format index while receiving frame from port n
+ *      is transmit through uplink port with s-tag field
+ */
+extern rtk_api_ret_t rtk_svlan_defaultSvlan_set(rtk_port_t port, rtk_vlan_t svid);
+
+/* Function Name:
+ *      rtk_svlan_defaultSvlan_get
+ * Description:
+ *      Get the configure default egress SVLAN.
+ * Input:
+ *      port - Source port
+ * Output:
+ *      pSvid - SVLAN VID
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can get port n S-tag format index while receiving frame from port n
+ *      is transmit through uplink port with s-tag field
+ */
+extern rtk_api_ret_t rtk_svlan_defaultSvlan_get(rtk_port_t port, rtk_vlan_t *pSvid);
+
+/* Function Name:
+ *      rtk_svlan_c2s_add
+ * Description:
+ *      Configure SVLAN C2S table
+ * Input:
+ *      vid - VLAN ID
+ *      src_port - Ingress Port
+ *      svid - SVLAN VID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port ID.
+ *      RT_ERR_SVLAN_VID    - Invalid SVLAN VID parameter.
+ *      RT_ERR_VLAN_VID     - Invalid VID parameter.
+ *      RT_ERR_OUT_OF_RANGE - input out of range.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can set system C2S configuration. ASIC will check upstream's VID and assign related
+ *      SVID to mathed packet. There are 128 SVLAN C2S configurations.
+ */
+extern rtk_api_ret_t rtk_svlan_c2s_add(rtk_vlan_t vid, rtk_port_t src_port, rtk_vlan_t svid);
+
+/* Function Name:
+ *      rtk_svlan_c2s_del
+ * Description:
+ *      Delete one C2S entry
+ * Input:
+ *      vid - VLAN ID
+ *      src_port - Ingress Port
+ *      svid - SVLAN VID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_VLAN_VID         - Invalid VID parameter.
+ *      RT_ERR_PORT_ID          - Invalid port ID.
+ *      RT_ERR_OUT_OF_RANGE     - input out of range.
+ * Note:
+ *      The API can delete system C2S configuration. There are 128 SVLAN C2S configurations.
+ */
+extern rtk_api_ret_t rtk_svlan_c2s_del(rtk_vlan_t vid, rtk_port_t src_port);
+
+/* Function Name:
+ *      rtk_svlan_c2s_get
+ * Description:
+ *      Get configure SVLAN C2S table
+ * Input:
+ *      vid - VLAN ID
+ *      src_port - Ingress Port
+ * Output:
+ *      pSvid - SVLAN ID
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Invalid port ID.
+ *      RT_ERR_OUT_OF_RANGE - input out of range.
+ * Note:
+ *     The API can get system C2S configuration. There are 128 SVLAN C2S configurations.
+ */
+extern rtk_api_ret_t rtk_svlan_c2s_get(rtk_vlan_t vid, rtk_port_t src_port, rtk_vlan_t *pSvid);
+
+/* Function Name:
+ *      rtk_svlan_untag_action_set
+ * Description:
+ *      Configure Action of downstream Un-Stag packet
+ * Input:
+ *      action  - Action for UnStag
+ *      svid    - The SVID assigned to UnStag packet
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                       - OK
+ *      RT_ERR_FAILED                   - Failed
+ *      RT_ERR_SMI                      - SMI access error
+ *      RT_ERR_SVLAN_VID                - Invalid SVLAN VID parameter.
+ *      RT_ERR_SVLAN_ENTRY_NOT_FOUND    - specified svlan entry not found.
+ *      RT_ERR_OUT_OF_RANGE             - input out of range.
+ *      RT_ERR_INPUT                    - Invalid input parameters.
+ * Note:
+ *      The API can configure action of downstream Un-Stag packet. A SVID assigned
+ *      to the un-stag is also supported by this API. The parameter of svid is
+ *      only referenced when the action is set to UNTAG_ASSIGN
+ */
+extern rtk_api_ret_t rtk_svlan_untag_action_set(rtk_svlan_untag_action_t action, rtk_vlan_t svid);
+
+/* Function Name:
+ *      rtk_svlan_untag_action_get
+ * Description:
+ *      Get Action of downstream Un-Stag packet
+ * Input:
+ *      None
+ * Output:
+ *      pAction  - Action for UnStag
+ *      pSvid    - The SVID assigned to UnStag packet
+ * Return:
+ *      RT_ERR_OK                       - OK
+ *      RT_ERR_FAILED                   - Failed
+ *      RT_ERR_SMI                      - SMI access error
+ *      RT_ERR_SVLAN_VID                - Invalid SVLAN VID parameter.
+ *      RT_ERR_SVLAN_ENTRY_NOT_FOUND    - specified svlan entry not found.
+ *      RT_ERR_OUT_OF_RANGE             - input out of range.
+ *      RT_ERR_INPUT                    - Invalid input parameters.
+ * Note:
+ *      The API can Get action of downstream Un-Stag packet. A SVID assigned
+ *      to the un-stag is also retrieved by this API. The parameter pSvid is
+ *      only refernced when the action is UNTAG_ASSIGN
+ */
+extern rtk_api_ret_t rtk_svlan_untag_action_get(rtk_svlan_untag_action_t *pAction, rtk_vlan_t *pSvid);
+
+/* Function Name:
+ *      rtk_svlan_unmatch_action_set
+ * Description:
+ *      Configure Action of downstream Unmatch packet
+ * Input:
+ *      action  - Action for Unmatch
+ *      svid    - The SVID assigned to Unmatch packet
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                       - OK
+ *      RT_ERR_FAILED                   - Failed
+ *      RT_ERR_SMI                      - SMI access error
+ *      RT_ERR_SVLAN_VID                - Invalid SVLAN VID parameter.
+ *      RT_ERR_SVLAN_ENTRY_NOT_FOUND    - specified svlan entry not found.
+ *      RT_ERR_OUT_OF_RANGE             - input out of range.
+ *      RT_ERR_INPUT                    - Invalid input parameters.
+ * Note:
+ *      The API can configure action of downstream Un-match packet. A SVID assigned
+ *      to the un-match is also supported by this API. The parameter od svid is
+ *      only refernced when the action is set to UNMATCH_ASSIGN
+ */
+extern rtk_api_ret_t rtk_svlan_unmatch_action_set(rtk_svlan_unmatch_action_t action, rtk_vlan_t svid);
+
+/* Function Name:
+ *      rtk_svlan_unmatch_action_get
+ * Description:
+ *      Get Action of downstream Unmatch packet
+ * Input:
+ *      None
+ * Output:
+ *      pAction  - Action for Unmatch
+ *      pSvid    - The SVID assigned to Unmatch packet
+ * Return:
+ *      RT_ERR_OK                       - OK
+ *      RT_ERR_FAILED                   - Failed
+ *      RT_ERR_SMI                      - SMI access error
+ *      RT_ERR_SVLAN_VID                - Invalid SVLAN VID parameter.
+ *      RT_ERR_SVLAN_ENTRY_NOT_FOUND    - specified svlan entry not found.
+ *      RT_ERR_OUT_OF_RANGE             - input out of range.
+ *      RT_ERR_INPUT                    - Invalid input parameters.
+ * Note:
+ *      The API can Get action of downstream Un-match packet. A SVID assigned
+ *      to the un-match is also retrieved by this API. The parameter pSvid is
+ *      only refernced when the action is UNMATCH_ASSIGN
+ */
+extern rtk_api_ret_t rtk_svlan_unmatch_action_get(rtk_svlan_unmatch_action_t *pAction, rtk_vlan_t *pSvid);
+
+/* Function Name:
+ *      rtk_svlan_dmac_vidsel_set
+ * Description:
+ *      Set DMAC CVID selection
+ * Input:
+ *      port    - Port
+ *      enable  - state of DMAC CVID Selection
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK                       - OK
+ *      RT_ERR_FAILED                   - Failed
+ *      RT_ERR_SMI                      - SMI access error
+ *      RT_ERR_SVLAN_VID                - Invalid SVLAN VID parameter.
+ *      RT_ERR_SVLAN_ENTRY_NOT_FOUND    - specified svlan entry not found.
+ *      RT_ERR_OUT_OF_RANGE             - input out of range.
+ *      RT_ERR_INPUT                    - Invalid input parameters.
+ * Note:
+ *      This API can set DMAC CVID Selection state
+ */
+extern rtk_api_ret_t rtk_svlan_dmac_vidsel_set(rtk_port_t port, rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_svlan_dmac_vidsel_get
+ * Description:
+ *      Get DMAC CVID selection
+ * Input:
+ *      port    - Port
+ * Output:
+ *      pEnable - state of DMAC CVID Selection
+ * Return:
+ *      RT_ERR_OK                       - OK
+ *      RT_ERR_FAILED                   - Failed
+ *      RT_ERR_SMI                      - SMI access error
+ *      RT_ERR_SVLAN_VID                - Invalid SVLAN VID parameter.
+ *      RT_ERR_SVLAN_ENTRY_NOT_FOUND    - specified svlan entry not found.
+ *      RT_ERR_OUT_OF_RANGE             - input out of range.
+ *      RT_ERR_INPUT                    - Invalid input parameters.
+ * Note:
+ *      This API can get DMAC CVID Selection state
+ */
+extern rtk_api_ret_t rtk_svlan_dmac_vidsel_get(rtk_port_t port, rtk_enable_t *pEnable);
+
+/* Function Name:
+ *      rtk_svlan_ipmc2s_add
+ * Description:
+ *      add ip multicast address to SVLAN
+ * Input:
+ *      svid    - SVLAN VID
+ *      ipmc    - ip multicast address
+ *      ipmcMsk - ip multicast mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                       - OK
+ *      RT_ERR_FAILED                   - Failed
+ *      RT_ERR_SMI                      - SMI access error
+ *      RT_ERR_SVLAN_VID                - Invalid SVLAN VID parameter.
+ *      RT_ERR_SVLAN_ENTRY_NOT_FOUND    - specified svlan entry not found.
+ *      RT_ERR_OUT_OF_RANGE             - input out of range.
+ *      RT_ERR_INPUT                    - Invalid input parameters.
+ * Note:
+ *      The API can set IP mutlicast to SVID configuration. If upstream packet is IPv4 multicast
+ *      packet and DIP is matched MC2S configuration, ASIC will assign egress SVID to the packet.
+ *      There are 32 SVLAN multicast configurations for IP and L2 multicast.
+ */
+extern rtk_api_ret_t rtk_svlan_ipmc2s_add(ipaddr_t ipmc, ipaddr_t ipmcMsk, rtk_vlan_t svid);
+
+/* Function Name:
+ *      rtk_svlan_ipmc2s_del
+ * Description:
+ *      delete ip multicast address to SVLAN
+ * Input:
+ *      ipmc    - ip multicast address
+ *      ipmcMsk - ip multicast mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_SVLAN_VID        - Invalid SVLAN VID parameter.
+ *      RT_ERR_OUT_OF_RANGE     - input out of range.
+ * Note:
+ *      The API can delete IP mutlicast to SVID configuration. There are 32 SVLAN multicast configurations for IP and L2 multicast.
+ */
+extern rtk_api_ret_t rtk_svlan_ipmc2s_del(ipaddr_t ipmc, ipaddr_t ipmcMsk);
+
+/* Function Name:
+ *      rtk_svlan_ipmc2s_get
+ * Description:
+ *      Get ip multicast address to SVLAN
+ * Input:
+ *      ipmc    - ip multicast address
+ *      ipmcMsk - ip multicast mask
+ * Output:
+ *      pSvid - SVLAN VID
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_OUT_OF_RANGE - input out of range.
+ * Note:
+ *      The API can get IP mutlicast to SVID configuration. There are 32 SVLAN multicast configurations for IP and L2 multicast.
+ */
+extern rtk_api_ret_t rtk_svlan_ipmc2s_get(ipaddr_t ipmc, ipaddr_t ipmcMsk, rtk_vlan_t *pSvid);
+
+/* Function Name:
+ *      rtk_svlan_l2mc2s_add
+ * Description:
+ *      Add L2 multicast address to SVLAN
+ * Input:
+ *      mac     - L2 multicast address
+ *      macMsk  - L2 multicast address mask
+ *      svid    - SVLAN VID
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                       - OK
+ *      RT_ERR_FAILED                   - Failed
+ *      RT_ERR_SMI                      - SMI access error
+ *      RT_ERR_SVLAN_VID                - Invalid SVLAN VID parameter.
+ *      RT_ERR_SVLAN_ENTRY_NOT_FOUND    - specified svlan entry not found.
+ *      RT_ERR_OUT_OF_RANGE             - input out of range.
+ *      RT_ERR_INPUT                    - Invalid input parameters.
+ * Note:
+ *      The API can set L2 Mutlicast to SVID configuration. If upstream packet is L2 multicast
+ *      packet and DMAC is matched, ASIC will assign egress SVID to the packet. There are 32
+ *      SVLAN multicast configurations for IP and L2 multicast.
+ */
+extern rtk_api_ret_t rtk_svlan_l2mc2s_add(rtk_mac_t mac, rtk_mac_t macMsk, rtk_vlan_t svid);
+
+/* Function Name:
+ *      rtk_svlan_l2mc2s_del
+ * Description:
+ *      delete L2 multicast address to SVLAN
+ * Input:
+ *      mac     - L2 multicast address
+ *      macMsk  - L2 multicast address mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_SVLAN_VID        - Invalid SVLAN VID parameter.
+ *      RT_ERR_OUT_OF_RANGE     - input out of range.
+ * Note:
+ *      The API can delete Mutlicast to SVID configuration. There are 32 SVLAN multicast configurations for IP and L2 multicast.
+ */
+extern rtk_api_ret_t rtk_svlan_l2mc2s_del(rtk_mac_t mac, rtk_mac_t macMsk);
+
+/* Function Name:
+ *      rtk_svlan_l2mc2s_get
+ * Description:
+ *      Get L2 multicast address to SVLAN
+ * Input:
+ *      mac     - L2 multicast address
+ *      macMsk  - L2 multicast address mask
+ * Output:
+ *      pSvid   - SVLAN VID
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ *      RT_ERR_OUT_OF_RANGE     - input out of range.
+ * Note:
+ *      The API can get L2 mutlicast to SVID configuration. There are 32 SVLAN multicast configurations for IP and L2 multicast.
+ */
+extern rtk_api_ret_t rtk_svlan_l2mc2s_get(rtk_mac_t mac, rtk_mac_t macMsk, rtk_vlan_t *pSvid);
+
+/* Function Name:
+ *      rtk_svlan_sp2c_add
+ * Description:
+ *      Add system SP2C configuration
+ * Input:
+ *      cvid        - VLAN ID
+ *      dst_port    - Destination port of SVLAN to CVLAN configuration
+ *      svid        - SVLAN VID
+ *
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_SVLAN_VID    - Invalid SVLAN VID parameter.
+ *      RT_ERR_VLAN_VID     - Invalid VID parameter.
+ *      RT_ERR_OUT_OF_RANGE - input out of range.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      The API can add SVID & Destination Port to CVLAN configuration. The downstream frames with assigned
+ *      SVID will be add C-tag with assigned CVID if the output port is the assigned destination port.
+ *      There are 128 SP2C configurations.
+ */
+extern rtk_api_ret_t rtk_svlan_sp2c_add(rtk_vlan_t svid, rtk_port_t dst_port, rtk_vlan_t cvid);
+
+/* Function Name:
+ *      rtk_svlan_sp2c_get
+ * Description:
+ *      Get configure system SP2C content
+ * Input:
+ *      svid        - SVLAN VID
+ *      dst_port    - Destination port of SVLAN to CVLAN configuration
+ * Output:
+ *      pCvid - VLAN ID
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_OUT_OF_RANGE - input out of range.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_SVLAN_VID    - Invalid SVLAN VID parameter.
+ * Note:
+ *     The API can get SVID & Destination Port to CVLAN configuration. There are 128 SP2C configurations.
+ */
+extern rtk_api_ret_t rtk_svlan_sp2c_get(rtk_vlan_t svid, rtk_port_t dst_port, rtk_vlan_t *pCvid);
+
+/* Function Name:
+ *      rtk_svlan_sp2c_del
+ * Description:
+ *      Delete system SP2C configuration
+ * Input:
+ *      svid        - SVLAN VID
+ *      dst_port    - Destination port of SVLAN to CVLAN configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_SVLAN_VID    - Invalid SVLAN VID parameter.
+ *      RT_ERR_OUT_OF_RANGE - input out of range.
+ * Note:
+ *      The API can delete SVID & Destination Port to CVLAN configuration. There are 128 SP2C configurations.
+ */
+extern rtk_api_ret_t rtk_svlan_sp2c_del(rtk_vlan_t svid, rtk_port_t dst_port);
+
+
+/* Function Name:
+ *      rtk_svlan_lookupType_set
+ * Description:
+ *      Set lookup type of SVLAN
+ * Input:
+ *      type        - lookup type
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ * Note:
+ *      none
+ */
+extern rtk_api_ret_t rtk_svlan_lookupType_set(rtk_svlan_lookupType_t type);
+
+/* Function Name:
+ *      rtk_svlan_lookupType_get
+ * Description:
+ *      Get lookup type of SVLAN
+ * Input:
+ *      pType       - lookup type
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ * Note:
+ *      none
+ */
+extern rtk_api_ret_t rtk_svlan_lookupType_get(rtk_svlan_lookupType_t *pType);
+
+/* Function Name:
+ *      rtk_svlan_trapPri_set
+ * Description:
+ *      Set svlan trap priority
+ * Input:
+ *      priority - priority for trap packets
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_QOS_INT_PRIORITY
+ * Note:
+ *      None
+ */
+extern rtk_api_ret_t rtk_svlan_trapPri_set(rtk_pri_t priority);
+
+/* Function Name:
+ *      rtk_svlan_trapPri_get
+ * Description:
+ *      Get svlan trap priority
+ * Input:
+ *      None
+ * Output:
+ *      pPriority - priority for trap packets
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NULL_POINTER - input parameter may be null pointer
+ * Note:
+ *      None
+ */
+extern rtk_api_ret_t rtk_svlan_trapPri_get(rtk_pri_t *pPriority);
+
+/* Function Name:
+ *      rtk_svlan_unassign_action_set
+ * Description:
+ *      Configure Action of upstream without svid assign action
+ * Input:
+ *      action  - Action for Un-assign
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                       - OK
+ *      RT_ERR_FAILED                   - Failed
+ *      RT_ERR_OUT_OF_RANGE             - input out of range.
+ *      RT_ERR_INPUT                    - Invalid input parameters.
+ * Note:
+ *      The API can configure action of upstream Un-assign svid packet. If action is not
+ *      trap to CPU, the port-based SVID sure be assign as system need
+ */
+extern rtk_api_ret_t rtk_svlan_unassign_action_set(rtk_svlan_unassign_action_t action);
+
+/* Function Name:
+ *      rtk_svlan_unassign_action_get
+ * Description:
+ *      Get action of upstream without svid assignment
+ * Input:
+ *      None
+ * Output:
+ *      pAction  - Action for Un-assign
+ * Return:
+ *      RT_ERR_OK                       - OK
+ *      RT_ERR_FAILED                   - Failed
+ * Note:
+ *      None
+ */
+extern rtk_api_ret_t rtk_svlan_unassign_action_get(rtk_svlan_unassign_action_t *pAction);
+
+
+/* Function Name:
+ *      rtk_svlan_checkAndCreateMbr
+ * Description:
+ *      Check and create Member configuration and return index
+ * Input:
+ *      vid  - VLAN id.
+ * Output:
+ *      pIndex  - Member configuration index
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_VLAN_VID     - Invalid VLAN ID.
+ *      RT_ERR_TBL_FULL     - Member Configuration table full
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_svlan_checkAndCreateMbr(rtk_vlan_t vid, rtk_uint32 *pIndex);
+
+
+#endif /* __RTK_API_SVLAN_H__ */
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/trap.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/trap.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/trap.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/trap.c	2019-01-22 16:16:24.723257454 +0100
@@ -0,0 +1,1432 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 79496 $
+ * $Date: 2017-06-08 17:31:25 +0800 (é€±å››, 08 å…­æœˆ 2017) $
+ *
+ * Purpose : RTK switch high-level API for RTL8367/RTL8367C
+ * Feature : Here is a list of all functions and variables in Trap module.
+ *
+ */
+
+#include <rtk_switch.h>
+#include <rtk_error.h>
+#include <trap.h>
+#include <string.h>
+
+#include <rtl8367c_asicdrv.h>
+#include <rtl8367c_asicdrv_port.h>
+#include <rtl8367c_asicdrv_igmp.h>
+#include <rtl8367c_asicdrv_rma.h>
+#include <rtl8367c_asicdrv_eav.h>
+#include <rtl8367c_asicdrv_oam.h>
+#include <rtl8367c_asicdrv_svlan.h>
+#include <rtl8367c_asicdrv_unknownMulticast.h>
+#include <rtl8367c_asicdrv_dot1x.h>
+
+static rtk_api_ret_t _rtk_trap_unknownUnicastPktAction_set(rtk_port_t port, rtk_trap_ucast_action_t ucast_action)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* check port valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (ucast_action >= UCAST_ACTION_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicPortUnknownDaBehavior(rtk_switch_port_L2P_get(port), ucast_action)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_trap_unknownUnicastPktAction_get(rtk_port_t port, rtk_trap_ucast_action_t *pUcast_action)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* check port valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (NULL == pUcast_action)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicPortUnknownDaBehavior(rtk_switch_port_L2P_get(port), pUcast_action)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_trap_unknownMacPktAction_set(rtk_trap_ucast_action_t ucast_action)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (ucast_action >= UCAST_ACTION_FLOODING)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicPortUnknownSaBehavior(ucast_action)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_trap_unknownMacPktAction_get(rtk_trap_ucast_action_t *pUcast_action)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pUcast_action)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicPortUnknownSaBehavior(pUcast_action)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_trap_unmatchMacPktAction_set(rtk_trap_ucast_action_t ucast_action)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (ucast_action >= UCAST_ACTION_FLOODING)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicPortUnmatchedSaBehavior(ucast_action)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_trap_unmatchMacPktAction_get(rtk_trap_ucast_action_t *pUcast_action)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pUcast_action)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicPortUnmatchedSaBehavior(pUcast_action)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_trap_unmatchMacMoving_set(rtk_port_t port, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* check port valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(enable >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicPortUnmatchedSaMoving(rtk_switch_port_L2P_get(port), enable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_trap_unmatchMacMoving_get(rtk_port_t port, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* check port valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(NULL == pEnable)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicPortUnmatchedSaMoving(rtk_switch_port_L2P_get(port), pEnable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_trap_unknownMcastPktAction_set(rtk_port_t port, rtk_mcast_type_t type, rtk_trap_mcast_action_t mcast_action)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 rawAction;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (type >= MCAST_END)
+        return RT_ERR_INPUT;
+
+    if (mcast_action >= MCAST_ACTION_END)
+        return RT_ERR_INPUT;
+
+
+    switch (type)
+    {
+        case MCAST_L2:
+            if (MCAST_ACTION_ROUTER_PORT == mcast_action)
+                return RT_ERR_INPUT;
+            else if(MCAST_ACTION_DROP_EX_RMA == mcast_action)
+                rawAction = L2_UNKOWN_MULTICAST_DROP_EXCLUDE_RMA;
+            else
+                rawAction = (rtk_uint32)mcast_action;
+
+            if ((retVal = rtl8367c_setAsicUnknownL2MulticastBehavior(rtk_switch_port_L2P_get(port), rawAction)) != RT_ERR_OK)
+                return retVal;
+
+            break;
+        case MCAST_IPV4:
+            if (MCAST_ACTION_DROP_EX_RMA == mcast_action)
+                return RT_ERR_INPUT;
+            else
+                rawAction = (rtk_uint32)mcast_action;
+
+            if ((retVal = rtl8367c_setAsicUnknownIPv4MulticastBehavior(rtk_switch_port_L2P_get(port), rawAction)) != RT_ERR_OK)
+                return retVal;
+
+            break;
+        case MCAST_IPV6:
+            if (MCAST_ACTION_DROP_EX_RMA == mcast_action)
+                return RT_ERR_INPUT;
+            else
+                rawAction = (rtk_uint32)mcast_action;
+
+            if ((retVal = rtl8367c_setAsicUnknownIPv6MulticastBehavior(rtk_switch_port_L2P_get(port), rawAction)) != RT_ERR_OK)
+                return retVal;
+
+            break;
+        default:
+            break;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_trap_unknownMcastPktAction_get(rtk_port_t port, rtk_mcast_type_t type, rtk_trap_mcast_action_t *pMcast_action)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 rawAction;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (type >= MCAST_END)
+        return RT_ERR_INPUT;
+
+    if(NULL == pMcast_action)
+        return RT_ERR_NULL_POINTER;
+
+    switch (type)
+    {
+        case MCAST_L2:
+            if ((retVal = rtl8367c_getAsicUnknownL2MulticastBehavior(rtk_switch_port_L2P_get(port), &rawAction)) != RT_ERR_OK)
+                return retVal;
+
+            if(L2_UNKOWN_MULTICAST_DROP_EXCLUDE_RMA == rawAction)
+                *pMcast_action = MCAST_ACTION_DROP_EX_RMA;
+            else
+                *pMcast_action = (rtk_trap_mcast_action_t)rawAction;
+
+            break;
+        case MCAST_IPV4:
+            if ((retVal = rtl8367c_getAsicUnknownIPv4MulticastBehavior(rtk_switch_port_L2P_get(port), &rawAction)) != RT_ERR_OK)
+                return retVal;
+
+            *pMcast_action = (rtk_trap_mcast_action_t)rawAction;
+            break;
+        case MCAST_IPV6:
+            if ((retVal = rtl8367c_getAsicUnknownIPv6MulticastBehavior(rtk_switch_port_L2P_get(port), &rawAction)) != RT_ERR_OK)
+                return retVal;
+
+            *pMcast_action = (rtk_trap_mcast_action_t)rawAction;
+            break;
+        default:
+            break;
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_trap_lldpEnable_set(rtk_enable_t enabled)
+{
+    rtk_api_ret_t retVal;
+    rtl8367c_rma_t rmacfg;
+    rtk_enable_t tmp;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (enabled >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    if ((retVal = rtl8367c_getAsicRmaLldp(&tmp, &rmacfg)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicRmaLldp(enabled, &rmacfg)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_trap_lldpEnable_get(rtk_enable_t *pEnabled)
+{
+    rtk_api_ret_t retVal;
+    rtl8367c_rma_t rmacfg;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pEnabled)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicRmaLldp(pEnabled, &rmacfg)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_trap_reasonTrapToCpuPriority_set(rtk_trap_reason_type_t type, rtk_pri_t priority)
+{
+    rtk_api_ret_t retVal;
+    rtl8367c_rma_t rmacfg;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (type >= TRAP_REASON_END)
+        return RT_ERR_INPUT;
+
+    if (priority > RTL8367C_PRIMAX)
+        return  RT_ERR_QOS_INT_PRIORITY;
+
+    switch (type)
+    {
+        case TRAP_REASON_RMA:
+            if ((retVal = rtl8367c_getAsicRma(0, &rmacfg)) != RT_ERR_OK)
+                return retVal;
+            rmacfg.trap_priority= priority;
+            if ((retVal = rtl8367c_setAsicRma(0, &rmacfg)) != RT_ERR_OK)
+                return retVal;
+
+            break;
+        case TRAP_REASON_OAM:
+            if ((retVal = rtl8367c_setAsicOamCpuPri(priority)) != RT_ERR_OK)
+                return retVal;
+
+            break;
+        case TRAP_REASON_1XUNAUTH:
+            if ((retVal = rtl8367c_setAsic1xTrapPriority(priority)) != RT_ERR_OK)
+                return retVal;
+
+            break;
+        case TRAP_REASON_VLANSTACK:
+            if ((retVal = rtl8367c_setAsicSvlanTrapPriority(priority)) != RT_ERR_OK)
+                return retVal;
+
+            break;
+        case TRAP_REASON_UNKNOWNMC:
+            if ((retVal = rtl8367c_setAsicUnknownMulticastTrapPriority(priority)) != RT_ERR_OK)
+                return retVal;
+
+            break;
+        default:
+            return RT_ERR_CHIP_NOT_SUPPORTED;
+    }
+
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_trap_reasonTrapToCpuPriority_get(rtk_trap_reason_type_t type, rtk_pri_t *pPriority)
+{
+    rtk_api_ret_t retVal;
+    rtl8367c_rma_t rmacfg;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (type >= TRAP_REASON_END)
+        return RT_ERR_INPUT;
+
+    if(NULL == pPriority)
+        return RT_ERR_NULL_POINTER;
+
+    switch (type)
+    {
+        case TRAP_REASON_RMA:
+            if ((retVal = rtl8367c_getAsicRma(0, &rmacfg)) != RT_ERR_OK)
+                return retVal;
+            *pPriority = rmacfg.trap_priority;
+
+            break;
+        case TRAP_REASON_OAM:
+            if ((retVal = rtl8367c_getAsicOamCpuPri(pPriority)) != RT_ERR_OK)
+                return retVal;
+
+            break;
+        case TRAP_REASON_1XUNAUTH:
+            if ((retVal = rtl8367c_getAsic1xTrapPriority(pPriority)) != RT_ERR_OK)
+                return retVal;
+
+            break;
+        case TRAP_REASON_VLANSTACK:
+            if ((retVal = rtl8367c_getAsicSvlanTrapPriority(pPriority)) != RT_ERR_OK)
+                return retVal;
+
+            break;
+        case TRAP_REASON_UNKNOWNMC:
+            if ((retVal = rtl8367c_getAsicUnknownMulticastTrapPriority(pPriority)) != RT_ERR_OK)
+                return retVal;
+
+            break;
+        default:
+            return RT_ERR_CHIP_NOT_SUPPORTED;
+
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_trap_rmaAction_set(rtk_trap_type_t type, rtk_trap_rma_action_t rma_action)
+{
+    rtk_api_ret_t retVal;
+    rtl8367c_rma_t rmacfg;
+    rtk_uint32 tmp;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (type >= TRAP_END)
+        return RT_ERR_INPUT;
+
+    if (rma_action >= RMA_ACTION_END)
+        return RT_ERR_RMA_ACTION;
+
+    if (type >= 0 && type <= TRAP_UNDEF_GARP_2F)
+    {
+        if ((retVal = rtl8367c_getAsicRma(type, &rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        rmacfg.operation = rma_action;
+
+        if ((retVal = rtl8367c_setAsicRma(type, &rmacfg)) != RT_ERR_OK)
+            return retVal;
+    }
+    else if (type == TRAP_CDP)
+    {
+        if ((retVal = rtl8367c_getAsicRmaCdp(&rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        rmacfg.operation = rma_action;
+
+        if ((retVal = rtl8367c_setAsicRmaCdp(&rmacfg)) != RT_ERR_OK)
+            return retVal;
+    }
+    else if (type  == TRAP_CSSTP)
+    {
+        if ((retVal = rtl8367c_getAsicRmaCsstp(&rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        rmacfg.operation = rma_action;
+
+        if ((retVal = rtl8367c_setAsicRmaCsstp(&rmacfg)) != RT_ERR_OK)
+            return retVal;
+    }
+    else if (type  == TRAP_LLDP)
+    {
+        if ((retVal = rtl8367c_getAsicRmaLldp(&tmp, &rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        rmacfg.operation = rma_action;
+
+        if ((retVal = rtl8367c_setAsicRmaLldp(tmp, &rmacfg)) != RT_ERR_OK)
+            return retVal;
+    }
+    else
+        return RT_ERR_INPUT;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_trap_rmaAction_get(rtk_trap_type_t type, rtk_trap_rma_action_t *pRma_action)
+{
+    rtk_api_ret_t retVal;
+    rtl8367c_rma_t rmacfg;
+    rtk_uint32 tmp;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (type >= TRAP_END)
+        return RT_ERR_INPUT;
+
+    if(NULL == pRma_action)
+        return RT_ERR_NULL_POINTER;
+
+    if (type >= 0 && type <= TRAP_UNDEF_GARP_2F)
+    {
+        if ((retVal = rtl8367c_getAsicRma(type, &rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        *pRma_action = rmacfg.operation;
+    }
+    else if (type == TRAP_CDP)
+    {
+        if ((retVal = rtl8367c_getAsicRmaCdp(&rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        *pRma_action = rmacfg.operation;
+    }
+    else if (type == TRAP_CSSTP)
+    {
+        if ((retVal = rtl8367c_getAsicRmaCsstp(&rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        *pRma_action = rmacfg.operation;
+    }
+    else if (type == TRAP_LLDP)
+    {
+        if ((retVal = rtl8367c_getAsicRmaLldp(&tmp,&rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        *pRma_action = rmacfg.operation;
+    }
+    else
+        return RT_ERR_INPUT;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_trap_rmaKeepFormat_set(rtk_trap_type_t type, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+    rtl8367c_rma_t rmacfg;
+    rtk_uint32 tmp;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (type >= TRAP_END)
+        return RT_ERR_INPUT;
+
+    if (enable >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if (type >= 0 && type <= TRAP_UNDEF_GARP_2F)
+    {
+        if ((retVal = rtl8367c_getAsicRma(type, &rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        rmacfg.keep_format = enable;
+
+        if ((retVal = rtl8367c_setAsicRma(type, &rmacfg)) != RT_ERR_OK)
+            return retVal;
+    }
+    else if (type == TRAP_CDP)
+    {
+        if ((retVal = rtl8367c_getAsicRmaCdp(&rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        rmacfg.keep_format = enable;
+
+        if ((retVal = rtl8367c_setAsicRmaCdp(&rmacfg)) != RT_ERR_OK)
+            return retVal;
+    }
+    else if (type  == TRAP_CSSTP)
+    {
+        if ((retVal = rtl8367c_getAsicRmaCsstp(&rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        rmacfg.keep_format = enable;
+
+        if ((retVal = rtl8367c_setAsicRmaCsstp(&rmacfg)) != RT_ERR_OK)
+            return retVal;
+    }
+    else if (type  == TRAP_LLDP)
+    {
+        if ((retVal = rtl8367c_getAsicRmaLldp(&tmp, &rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        rmacfg.keep_format = enable;
+
+        if ((retVal = rtl8367c_setAsicRmaLldp(tmp, &rmacfg)) != RT_ERR_OK)
+            return retVal;
+    }
+    else
+        return RT_ERR_INPUT;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_trap_rmaKeepFormat_get(rtk_trap_type_t type, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+    rtl8367c_rma_t rmacfg;
+    rtk_uint32 tmp;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (type >= TRAP_END)
+        return RT_ERR_INPUT;
+
+    if(NULL == pEnable)
+        return RT_ERR_NULL_POINTER;
+
+    if (type >= 0 && type <= TRAP_UNDEF_GARP_2F)
+    {
+        if ((retVal = rtl8367c_getAsicRma(type, &rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        *pEnable = rmacfg.keep_format;
+    }
+    else if (type == TRAP_CDP)
+    {
+        if ((retVal = rtl8367c_getAsicRmaCdp(&rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        *pEnable = rmacfg.keep_format;
+    }
+    else if (type == TRAP_CSSTP)
+    {
+        if ((retVal = rtl8367c_getAsicRmaCsstp(&rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        *pEnable = rmacfg.keep_format;
+    }
+    else if (type == TRAP_LLDP)
+    {
+        if ((retVal = rtl8367c_getAsicRmaLldp(&tmp,&rmacfg)) != RT_ERR_OK)
+            return retVal;
+
+        *pEnable = rmacfg.keep_format;
+    }
+    else
+        return RT_ERR_INPUT;
+
+    return RT_ERR_OK;
+}
+
+
+/* Function Name:
+ *      rtk_trap_unknownUnicastPktAction_set
+ * Description:
+ *      Set unknown unicast packet action configuration.
+ * Input:
+ *      port            - ingress port ID for unknown unicast packet
+ *      ucast_action    - Unknown unicast action.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                  - OK
+ *      RT_ERR_FAILED              - Failed
+ *      RT_ERR_SMI                 - SMI access error
+ *      RT_ERR_NOT_ALLOWED         - Invalid action.
+ *      RT_ERR_INPUT               - Invalid input parameters.
+ * Note:
+ *      This API can set unknown unicast packet action configuration.
+ *      The unknown unicast action is as following:
+ *          - UCAST_ACTION_FORWARD_PMASK
+ *          - UCAST_ACTION_DROP
+ *          - UCAST_ACTION_TRAP2CPU
+ *          - UCAST_ACTION_FLOODING
+ */
+rtk_api_ret_t rtk_trap_unknownUnicastPktAction_set(rtk_port_t port, rtk_trap_ucast_action_t ucast_action)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trap_unknownUnicastPktAction_set(port, ucast_action);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_trap_unknownUnicastPktAction_get
+ * Description:
+ *      Get unknown unicast packet action configuration.
+ * Input:
+ *      port            - ingress port ID for unknown unicast packet
+ * Output:
+ *      pUcast_action   - Unknown unicast action.
+ * Return:
+ *      RT_ERR_OK                  - OK
+ *      RT_ERR_FAILED              - Failed
+ *      RT_ERR_SMI                 - SMI access error
+ *      RT_ERR_NOT_ALLOWED         - Invalid action.
+ *      RT_ERR_INPUT               - Invalid input parameters.
+ *      RT_ERR_NULL_POINTER        - Null pointer
+ * Note:
+ *      This API can get unknown unicast packet action configuration.
+ *      The unknown unicast action is as following:
+ *          - UCAST_ACTION_FORWARD_PMASK
+ *          - UCAST_ACTION_DROP
+ *          - UCAST_ACTION_TRAP2CPU
+ *          - UCAST_ACTION_FLOODING
+ */
+rtk_api_ret_t rtk_trap_unknownUnicastPktAction_get(rtk_port_t port, rtk_trap_ucast_action_t *pUcast_action)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trap_unknownUnicastPktAction_get(port, pUcast_action);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_trap_unknownMacPktAction_set
+ * Description:
+ *      Set unknown source MAC packet action configuration.
+ * Input:
+ *      ucast_action    - Unknown source MAC action.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                  - OK
+ *      RT_ERR_FAILED              - Failed
+ *      RT_ERR_SMI                 - SMI access error
+ *      RT_ERR_NOT_ALLOWED         - Invalid action.
+ *      RT_ERR_INPUT               - Invalid input parameters.
+ * Note:
+ *      This API can set unknown unicast packet action configuration.
+ *      The unknown unicast action is as following:
+ *          - UCAST_ACTION_FORWARD_PMASK
+ *          - UCAST_ACTION_DROP
+ *          - UCAST_ACTION_TRAP2CPU
+ */
+rtk_api_ret_t rtk_trap_unknownMacPktAction_set(rtk_trap_ucast_action_t ucast_action)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trap_unknownMacPktAction_set(ucast_action);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_trap_unknownMacPktAction_get
+ * Description:
+ *      Get unknown source MAC packet action configuration.
+ * Input:
+ *      None.
+ * Output:
+ *      pUcast_action   - Unknown source MAC action.
+ * Return:
+ *      RT_ERR_OK                  - OK
+ *      RT_ERR_FAILED              - Failed
+ *      RT_ERR_SMI                 - SMI access error
+ *      RT_ERR_NULL_POINTER        - Null Pointer.
+ *      RT_ERR_INPUT               - Invalid input parameters.
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_trap_unknownMacPktAction_get(rtk_trap_ucast_action_t *pUcast_action)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trap_unknownMacPktAction_get(pUcast_action);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_trap_unmatchMacPktAction_set
+ * Description:
+ *      Set unmatch source MAC packet action configuration.
+ * Input:
+ *      ucast_action    - Unknown source MAC action.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                  - OK
+ *      RT_ERR_FAILED              - Failed
+ *      RT_ERR_SMI                 - SMI access error
+ *      RT_ERR_NOT_ALLOWED         - Invalid action.
+ *      RT_ERR_INPUT               - Invalid input parameters.
+ * Note:
+ *      This API can set unknown unicast packet action configuration.
+ *      The unknown unicast action is as following:
+ *          - UCAST_ACTION_FORWARD_PMASK
+ *          - UCAST_ACTION_DROP
+ *          - UCAST_ACTION_TRAP2CPU
+ */
+rtk_api_ret_t rtk_trap_unmatchMacPktAction_set(rtk_trap_ucast_action_t ucast_action)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trap_unmatchMacPktAction_set(ucast_action);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_trap_unmatchMacPktAction_get
+ * Description:
+ *      Get unmatch source MAC packet action configuration.
+ * Input:
+ *      None.
+ * Output:
+ *      pUcast_action   - Unknown source MAC action.
+ * Return:
+ *      RT_ERR_OK                  - OK
+ *      RT_ERR_FAILED              - Failed
+ *      RT_ERR_SMI                 - SMI access error
+ *      RT_ERR_NOT_ALLOWED         - Invalid action.
+ *      RT_ERR_INPUT               - Invalid input parameters.
+ * Note:
+ *      This API can set unknown unicast packet action configuration.
+ *      The unknown unicast action is as following:
+ *          - UCAST_ACTION_FORWARD_PMASK
+ *          - UCAST_ACTION_DROP
+ *          - UCAST_ACTION_TRAP2CPU
+ */
+rtk_api_ret_t rtk_trap_unmatchMacPktAction_get(rtk_trap_ucast_action_t *pUcast_action)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trap_unmatchMacPktAction_get(pUcast_action);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_trap_unmatchMacMoving_set
+ * Description:
+ *      Set unmatch source MAC packet moving state.
+ * Input:
+ *      port        - Port ID.
+ *      enable      - ENABLED: allow SA moving, DISABLE: don't allow SA moving.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                  - OK
+ *      RT_ERR_FAILED              - Failed
+ *      RT_ERR_SMI                 - SMI access error
+ *      RT_ERR_NOT_ALLOWED         - Invalid action.
+ *      RT_ERR_INPUT               - Invalid input parameters.
+ * Note:
+ */
+rtk_api_ret_t rtk_trap_unmatchMacMoving_set(rtk_port_t port, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trap_unmatchMacMoving_set(port, enable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_trap_unmatchMacMoving_get
+ * Description:
+ *      Set unmatch source MAC packet moving state.
+ * Input:
+ *      port        - Port ID.
+ * Output:
+ *      pEnable     - ENABLED: allow SA moving, DISABLE: don't allow SA moving.
+ * Return:
+ *      RT_ERR_OK                  - OK
+ *      RT_ERR_FAILED              - Failed
+ *      RT_ERR_SMI                 - SMI access error
+ *      RT_ERR_NOT_ALLOWED         - Invalid action.
+ *      RT_ERR_INPUT               - Invalid input parameters.
+ * Note:
+ */
+rtk_api_ret_t rtk_trap_unmatchMacMoving_get(rtk_port_t port, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trap_unmatchMacMoving_get(port, pEnable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_trap_unknownMcastPktAction_set
+ * Description:
+ *      Set behavior of unknown multicast
+ * Input:
+ *      port            - Port id.
+ *      type            - unknown multicast packet type.
+ *      mcast_action    - unknown multicast action.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID         - Invalid port number.
+ *      RT_ERR_NOT_ALLOWED     - Invalid action.
+ *      RT_ERR_INPUT         - Invalid input parameters.
+ * Note:
+ *      When receives an unknown multicast packet, switch may trap, drop or flood this packet
+ *      (1) The unknown multicast packet type is as following:
+ *          - MCAST_L2
+ *          - MCAST_IPV4
+ *          - MCAST_IPV6
+ *      (2) The unknown multicast action is as following:
+ *          - MCAST_ACTION_FORWARD
+ *          - MCAST_ACTION_DROP
+ *          - MCAST_ACTION_TRAP2CPU
+ */
+rtk_api_ret_t rtk_trap_unknownMcastPktAction_set(rtk_port_t port, rtk_mcast_type_t type, rtk_trap_mcast_action_t mcast_action)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trap_unknownMcastPktAction_set(port, type, mcast_action);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_trap_unknownMcastPktAction_get
+ * Description:
+ *      Get behavior of unknown multicast
+ * Input:
+ *      type - unknown multicast packet type.
+ * Output:
+ *      pMcast_action - unknown multicast action.
+ * Return:
+ *      RT_ERR_OK                  - OK
+ *      RT_ERR_FAILED              - Failed
+ *      RT_ERR_SMI                 - SMI access error
+ *      RT_ERR_PORT_ID             - Invalid port number.
+ *      RT_ERR_NOT_ALLOWED         - Invalid operation.
+ *      RT_ERR_INPUT             - Invalid input parameters.
+ * Note:
+ *      When receives an unknown multicast packet, switch may trap, drop or flood this packet
+ *      (1) The unknown multicast packet type is as following:
+ *          - MCAST_L2
+ *          - MCAST_IPV4
+ *          - MCAST_IPV6
+ *      (2) The unknown multicast action is as following:
+ *          - MCAST_ACTION_FORWARD
+ *          - MCAST_ACTION_DROP
+ *          - MCAST_ACTION_TRAP2CPU
+ */
+rtk_api_ret_t rtk_trap_unknownMcastPktAction_get(rtk_port_t port, rtk_mcast_type_t type, rtk_trap_mcast_action_t *pMcast_action)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trap_unknownMcastPktAction_get(port, type, pMcast_action);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_trap_lldpEnable_set
+ * Description:
+ *      Set LLDP enable.
+ * Input:
+ *      enabled - LLDP enable, 0: follow RMA, 1: use LLDP action.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                  - OK
+ *      RT_ERR_FAILED              - Failed
+ *      RT_ERR_SMI                 - SMI access error
+ *      RT_ERR_NOT_ALLOWED         - Invalid action.
+ *      RT_ERR_INPUT             - Invalid input parameters.
+ * Note:
+ *      - DMAC                                                 Assignment
+ *      - 01:80:c2:00:00:0e ethertype = 0x88CC    LLDP
+ *      - 01:80:c2:00:00:03 ethertype = 0x88CC
+ *      - 01:80:c2:00:00:00 ethertype = 0x88CC
+
+ */
+rtk_api_ret_t rtk_trap_lldpEnable_set(rtk_enable_t enabled)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trap_lldpEnable_set(enabled);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_trap_lldpEnable_get
+ * Description:
+ *      Get LLDP status.
+ * Input:
+ *      None
+ * Output:
+ *      pEnabled - LLDP enable, 0: follow RMA, 1: use LLDP action.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT         - Invalid input parameters.
+ * Note:
+ *      LLDP is as following definition.
+ *      - DMAC                                                 Assignment
+ *      - 01:80:c2:00:00:0e ethertype = 0x88CC    LLDP
+ *      - 01:80:c2:00:00:03 ethertype = 0x88CC
+ *      - 01:80:c2:00:00:00 ethertype = 0x88CC
+ */
+rtk_api_ret_t rtk_trap_lldpEnable_get(rtk_enable_t *pEnabled)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trap_lldpEnable_get(pEnabled);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_trap_reasonTrapToCpuPriority_set
+ * Description:
+ *      Set priority value of a packet that trapped to CPU port according to specific reason.
+ * Input:
+ *      type     - reason that trap to CPU port.
+ *      priority - internal priority that is going to be set for specific trap reason.
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT - The module is not initial
+ *      RT_ERR_INPUT    - Invalid input parameter
+ * Note:
+ *      Currently the trap reason that supported are listed as follows:
+ *      - TRAP_REASON_RMA
+ *      - TRAP_REASON_OAM
+ *      - TRAP_REASON_1XUNAUTH
+ *      - TRAP_REASON_VLANSTACK
+ *      - TRAP_REASON_UNKNOWNMC
+ */
+rtk_api_ret_t rtk_trap_reasonTrapToCpuPriority_set(rtk_trap_reason_type_t type, rtk_pri_t priority)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trap_reasonTrapToCpuPriority_set(type, priority);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+
+/* Function Name:
+ *      rtk_trap_reasonTrapToCpuPriority_get
+ * Description:
+ *      Get priority value of a packet that trapped to CPU port according to specific reason.
+ * Input:
+ *      type      - reason that trap to CPU port.
+ * Output:
+ *      pPriority - configured internal priority for such reason.
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT     - The module is not initial
+ *      RT_ERR_INPUT        - Invalid input parameter
+ *      RT_ERR_NULL_POINTER - NULL pointer
+ * Note:
+ *      Currently the trap reason that supported are listed as follows:
+ *      - TRAP_REASON_RMA
+ *      - TRAP_REASON_OAM
+ *      - TRAP_REASON_1XUNAUTH
+ *      - TRAP_REASON_VLANSTACK
+ *      - TRAP_REASON_UNKNOWNMC
+ */
+rtk_api_ret_t rtk_trap_reasonTrapToCpuPriority_get(rtk_trap_reason_type_t type, rtk_pri_t *pPriority)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trap_reasonTrapToCpuPriority_get(type, pPriority);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+
+
+/* Function Name:
+ *      rtk_trap_rmaAction_set
+ * Description:
+ *      Set Reserved multicast address action configuration.
+ * Input:
+ *      type    - rma type.
+ *      rma_action - RMA action.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *
+ *      There are 48 types of Reserved Multicast Address frame for application usage.
+ *      (1)They are as following definition.
+ *      - TRAP_BRG_GROUP,
+ *      - TRAP_FD_PAUSE,
+ *      - TRAP_SP_MCAST,
+ *      - TRAP_1X_PAE,
+ *      - TRAP_UNDEF_BRG_04,
+ *      - TRAP_UNDEF_BRG_05,
+ *      - TRAP_UNDEF_BRG_06,
+ *      - TRAP_UNDEF_BRG_07,
+ *      - TRAP_PROVIDER_BRIDGE_GROUP_ADDRESS,
+ *      - TRAP_UNDEF_BRG_09,
+ *      - TRAP_UNDEF_BRG_0A,
+ *      - TRAP_UNDEF_BRG_0B,
+ *      - TRAP_UNDEF_BRG_0C,
+ *      - TRAP_PROVIDER_BRIDGE_GVRP_ADDRESS,
+ *      - TRAP_8021AB,
+ *      - TRAP_UNDEF_BRG_0F,
+ *      - TRAP_BRG_MNGEMENT,
+ *      - TRAP_UNDEFINED_11,
+ *      - TRAP_UNDEFINED_12,
+ *      - TRAP_UNDEFINED_13,
+ *      - TRAP_UNDEFINED_14,
+ *      - TRAP_UNDEFINED_15,
+ *      - TRAP_UNDEFINED_16,
+ *      - TRAP_UNDEFINED_17,
+ *      - TRAP_UNDEFINED_18,
+ *      - TRAP_UNDEFINED_19,
+ *      - TRAP_UNDEFINED_1A,
+ *      - TRAP_UNDEFINED_1B,
+ *      - TRAP_UNDEFINED_1C,
+ *      - TRAP_UNDEFINED_1D,
+ *      - TRAP_UNDEFINED_1E,
+ *      - TRAP_UNDEFINED_1F,
+ *      - TRAP_GMRP,
+ *      - TRAP_GVRP,
+ *      - TRAP_UNDEF_GARP_22,
+ *      - TRAP_UNDEF_GARP_23,
+ *      - TRAP_UNDEF_GARP_24,
+ *      - TRAP_UNDEF_GARP_25,
+ *      - TRAP_UNDEF_GARP_26,
+ *      - TRAP_UNDEF_GARP_27,
+ *      - TRAP_UNDEF_GARP_28,
+ *      - TRAP_UNDEF_GARP_29,
+ *      - TRAP_UNDEF_GARP_2A,
+ *      - TRAP_UNDEF_GARP_2B,
+ *      - TRAP_UNDEF_GARP_2C,
+ *      - TRAP_UNDEF_GARP_2D,
+ *      - TRAP_UNDEF_GARP_2E,
+ *      - TRAP_UNDEF_GARP_2F,
+ *      - TRAP_CDP.
+ *      - TRAP_CSSTP.
+ *      - TRAP_LLDP.
+ *      (2) The RMA action is as following:
+ *      - RMA_ACTION_FORWARD
+ *      - RMA_ACTION_TRAP2CPU
+ *      - RMA_ACTION_DROP
+ *      - RMA_ACTION_FORWARD_EXCLUDE_CPU
+ */
+rtk_api_ret_t rtk_trap_rmaAction_set(rtk_trap_type_t type, rtk_trap_rma_action_t rma_action)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trap_rmaAction_set(type, rma_action);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_trap_rmaAction_get
+ * Description:
+ *      Get Reserved multicast address action configuration.
+ * Input:
+ *      type - rma type.
+ * Output:
+ *      pRma_action - RMA action.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      There are 48 types of Reserved Multicast Address frame for application usage.
+ *      (1)They are as following definition.
+ *      - TRAP_BRG_GROUP,
+ *      - TRAP_FD_PAUSE,
+ *      - TRAP_SP_MCAST,
+ *      - TRAP_1X_PAE,
+ *      - TRAP_UNDEF_BRG_04,
+ *      - TRAP_UNDEF_BRG_05,
+ *      - TRAP_UNDEF_BRG_06,
+ *      - TRAP_UNDEF_BRG_07,
+ *      - TRAP_PROVIDER_BRIDGE_GROUP_ADDRESS,
+ *      - TRAP_UNDEF_BRG_09,
+ *      - TRAP_UNDEF_BRG_0A,
+ *      - TRAP_UNDEF_BRG_0B,
+ *      - TRAP_UNDEF_BRG_0C,
+ *      - TRAP_PROVIDER_BRIDGE_GVRP_ADDRESS,
+ *      - TRAP_8021AB,
+ *      - TRAP_UNDEF_BRG_0F,
+ *      - TRAP_BRG_MNGEMENT,
+ *      - TRAP_UNDEFINED_11,
+ *      - TRAP_UNDEFINED_12,
+ *      - TRAP_UNDEFINED_13,
+ *      - TRAP_UNDEFINED_14,
+ *      - TRAP_UNDEFINED_15,
+ *      - TRAP_UNDEFINED_16,
+ *      - TRAP_UNDEFINED_17,
+ *      - TRAP_UNDEFINED_18,
+ *      - TRAP_UNDEFINED_19,
+ *      - TRAP_UNDEFINED_1A,
+ *      - TRAP_UNDEFINED_1B,
+ *      - TRAP_UNDEFINED_1C,
+ *      - TRAP_UNDEFINED_1D,
+ *      - TRAP_UNDEFINED_1E,
+ *      - TRAP_UNDEFINED_1F,
+ *      - TRAP_GMRP,
+ *      - TRAP_GVRP,
+ *      - TRAP_UNDEF_GARP_22,
+ *      - TRAP_UNDEF_GARP_23,
+ *      - TRAP_UNDEF_GARP_24,
+ *      - TRAP_UNDEF_GARP_25,
+ *      - TRAP_UNDEF_GARP_26,
+ *      - TRAP_UNDEF_GARP_27,
+ *      - TRAP_UNDEF_GARP_28,
+ *      - TRAP_UNDEF_GARP_29,
+ *      - TRAP_UNDEF_GARP_2A,
+ *      - TRAP_UNDEF_GARP_2B,
+ *      - TRAP_UNDEF_GARP_2C,
+ *      - TRAP_UNDEF_GARP_2D,
+ *      - TRAP_UNDEF_GARP_2E,
+ *      - TRAP_UNDEF_GARP_2F,
+ *      - TRAP_CDP.
+ *      - TRAP_CSSTP.
+ *      - TRAP_LLDP.
+ *      (2) The RMA action is as following:
+ *      - RMA_ACTION_FORWARD
+ *      - RMA_ACTION_TRAP2CPU
+ *      - RMA_ACTION_DROP
+ *      - RMA_ACTION_FORWARD_EXCLUDE_CPU
+ */
+rtk_api_ret_t rtk_trap_rmaAction_get(rtk_trap_type_t type, rtk_trap_rma_action_t *pRma_action)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trap_rmaAction_get(type, pRma_action);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_trap_rmaKeepFormat_set
+ * Description:
+ *      Set Reserved multicast address keep format configuration.
+ * Input:
+ *      type    - rma type.
+ *      enable - enable keep format.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_ENABLE       - Invalid IFG parameter
+ * Note:
+ *
+ *      There are 48 types of Reserved Multicast Address frame for application usage.
+ *      They are as following definition.
+ *      - TRAP_BRG_GROUP,
+ *      - TRAP_FD_PAUSE,
+ *      - TRAP_SP_MCAST,
+ *      - TRAP_1X_PAE,
+ *      - TRAP_UNDEF_BRG_04,
+ *      - TRAP_UNDEF_BRG_05,
+ *      - TRAP_UNDEF_BRG_06,
+ *      - TRAP_UNDEF_BRG_07,
+ *      - TRAP_PROVIDER_BRIDGE_GROUP_ADDRESS,
+ *      - TRAP_UNDEF_BRG_09,
+ *      - TRAP_UNDEF_BRG_0A,
+ *      - TRAP_UNDEF_BRG_0B,
+ *      - TRAP_UNDEF_BRG_0C,
+ *      - TRAP_PROVIDER_BRIDGE_GVRP_ADDRESS,
+ *      - TRAP_8021AB,
+ *      - TRAP_UNDEF_BRG_0F,
+ *      - TRAP_BRG_MNGEMENT,
+ *      - TRAP_UNDEFINED_11,
+ *      - TRAP_UNDEFINED_12,
+ *      - TRAP_UNDEFINED_13,
+ *      - TRAP_UNDEFINED_14,
+ *      - TRAP_UNDEFINED_15,
+ *      - TRAP_UNDEFINED_16,
+ *      - TRAP_UNDEFINED_17,
+ *      - TRAP_UNDEFINED_18,
+ *      - TRAP_UNDEFINED_19,
+ *      - TRAP_UNDEFINED_1A,
+ *      - TRAP_UNDEFINED_1B,
+ *      - TRAP_UNDEFINED_1C,
+ *      - TRAP_UNDEFINED_1D,
+ *      - TRAP_UNDEFINED_1E,
+ *      - TRAP_UNDEFINED_1F,
+ *      - TRAP_GMRP,
+ *      - TRAP_GVRP,
+ *      - TRAP_UNDEF_GARP_22,
+ *      - TRAP_UNDEF_GARP_23,
+ *      - TRAP_UNDEF_GARP_24,
+ *      - TRAP_UNDEF_GARP_25,
+ *      - TRAP_UNDEF_GARP_26,
+ *      - TRAP_UNDEF_GARP_27,
+ *      - TRAP_UNDEF_GARP_28,
+ *      - TRAP_UNDEF_GARP_29,
+ *      - TRAP_UNDEF_GARP_2A,
+ *      - TRAP_UNDEF_GARP_2B,
+ *      - TRAP_UNDEF_GARP_2C,
+ *      - TRAP_UNDEF_GARP_2D,
+ *      - TRAP_UNDEF_GARP_2E,
+ *      - TRAP_UNDEF_GARP_2F,
+ *      - TRAP_CDP.
+ *      - TRAP_CSSTP.
+ *      - TRAP_LLDP.
+ */
+rtk_api_ret_t rtk_trap_rmaKeepFormat_set(rtk_trap_type_t type, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trap_rmaKeepFormat_set(type, enable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_trap_rmaKeepFormat_get
+ * Description:
+ *      Get Reserved multicast address action configuration.
+ * Input:
+ *      type - rma type.
+ * Output:
+ *      pEnable - keep format status.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      There are 48 types of Reserved Multicast Address frame for application usage.
+ *      They are as following definition.
+ *      - TRAP_BRG_GROUP,
+ *      - TRAP_FD_PAUSE,
+ *      - TRAP_SP_MCAST,
+ *      - TRAP_1X_PAE,
+ *      - TRAP_UNDEF_BRG_04,
+ *      - TRAP_UNDEF_BRG_05,
+ *      - TRAP_UNDEF_BRG_06,
+ *      - TRAP_UNDEF_BRG_07,
+ *      - TRAP_PROVIDER_BRIDGE_GROUP_ADDRESS,
+ *      - TRAP_UNDEF_BRG_09,
+ *      - TRAP_UNDEF_BRG_0A,
+ *      - TRAP_UNDEF_BRG_0B,
+ *      - TRAP_UNDEF_BRG_0C,
+ *      - TRAP_PROVIDER_BRIDGE_GVRP_ADDRESS,
+ *      - TRAP_8021AB,
+ *      - TRAP_UNDEF_BRG_0F,
+ *      - TRAP_BRG_MNGEMENT,
+ *      - TRAP_UNDEFINED_11,
+ *      - TRAP_UNDEFINED_12,
+ *      - TRAP_UNDEFINED_13,
+ *      - TRAP_UNDEFINED_14,
+ *      - TRAP_UNDEFINED_15,
+ *      - TRAP_UNDEFINED_16,
+ *      - TRAP_UNDEFINED_17,
+ *      - TRAP_UNDEFINED_18,
+ *      - TRAP_UNDEFINED_19,
+ *      - TRAP_UNDEFINED_1A,
+ *      - TRAP_UNDEFINED_1B,
+ *      - TRAP_UNDEFINED_1C,
+ *      - TRAP_UNDEFINED_1D,
+ *      - TRAP_UNDEFINED_1E,
+ *      - TRAP_UNDEFINED_1F,
+ *      - TRAP_GMRP,
+ *      - TRAP_GVRP,
+ *      - TRAP_UNDEF_GARP_22,
+ *      - TRAP_UNDEF_GARP_23,
+ *      - TRAP_UNDEF_GARP_24,
+ *      - TRAP_UNDEF_GARP_25,
+ *      - TRAP_UNDEF_GARP_26,
+ *      - TRAP_UNDEF_GARP_27,
+ *      - TRAP_UNDEF_GARP_28,
+ *      - TRAP_UNDEF_GARP_29,
+ *      - TRAP_UNDEF_GARP_2A,
+ *      - TRAP_UNDEF_GARP_2B,
+ *      - TRAP_UNDEF_GARP_2C,
+ *      - TRAP_UNDEF_GARP_2D,
+ *      - TRAP_UNDEF_GARP_2E,
+ *      - TRAP_UNDEF_GARP_2F,
+ *      - TRAP_CDP.
+ *      - TRAP_CSSTP.
+ *      - TRAP_LLDP.
+ */
+rtk_api_ret_t rtk_trap_rmaKeepFormat_get(rtk_trap_type_t type, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trap_rmaKeepFormat_get(type, pEnable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+
+
+
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/trap.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/trap.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/trap.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/trap.h	2019-01-22 16:16:24.723257454 +0100
@@ -0,0 +1,759 @@
+  /*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * Purpose : RTL8367/RTL8367C switch high-level API
+ *
+ * Feature : The file includes Trap module high-layer API defination
+ *
+ */
+
+#ifndef __RTK_API_TRAP_H__
+#define __RTK_API_TRAP_H__
+
+
+typedef enum rtk_trap_type_e
+{
+    TRAP_BRG_GROUP = 0,
+    TRAP_FD_PAUSE,
+    TRAP_SP_MCAST,
+    TRAP_1X_PAE,
+    TRAP_UNDEF_BRG_04,
+    TRAP_UNDEF_BRG_05,
+    TRAP_UNDEF_BRG_06,
+    TRAP_UNDEF_BRG_07,
+    TRAP_PROVIDER_BRIDGE_GROUP_ADDRESS,
+    TRAP_UNDEF_BRG_09,
+    TRAP_UNDEF_BRG_0A,
+    TRAP_UNDEF_BRG_0B,
+    TRAP_UNDEF_BRG_0C,
+    TRAP_PROVIDER_BRIDGE_GVRP_ADDRESS,
+    TRAP_8021AB,
+    TRAP_UNDEF_BRG_0F,
+    TRAP_BRG_MNGEMENT,
+    TRAP_UNDEFINED_11,
+    TRAP_UNDEFINED_12,
+    TRAP_UNDEFINED_13,
+    TRAP_UNDEFINED_14,
+    TRAP_UNDEFINED_15,
+    TRAP_UNDEFINED_16,
+    TRAP_UNDEFINED_17,
+    TRAP_UNDEFINED_18,
+    TRAP_UNDEFINED_19,
+    TRAP_UNDEFINED_1A,
+    TRAP_UNDEFINED_1B,
+    TRAP_UNDEFINED_1C,
+    TRAP_UNDEFINED_1D,
+    TRAP_UNDEFINED_1E,
+    TRAP_UNDEFINED_1F,
+    TRAP_GMRP,
+    TRAP_GVRP,
+    TRAP_UNDEF_GARP_22,
+    TRAP_UNDEF_GARP_23,
+    TRAP_UNDEF_GARP_24,
+    TRAP_UNDEF_GARP_25,
+    TRAP_UNDEF_GARP_26,
+    TRAP_UNDEF_GARP_27,
+    TRAP_UNDEF_GARP_28,
+    TRAP_UNDEF_GARP_29,
+    TRAP_UNDEF_GARP_2A,
+    TRAP_UNDEF_GARP_2B,
+    TRAP_UNDEF_GARP_2C,
+    TRAP_UNDEF_GARP_2D,
+    TRAP_UNDEF_GARP_2E,
+    TRAP_UNDEF_GARP_2F,
+    TRAP_CDP,
+    TRAP_CSSTP,
+    TRAP_LLDP,
+    TRAP_END,
+}rtk_trap_type_t;
+
+
+typedef enum rtk_mcast_type_e
+{
+    MCAST_L2 = 0,
+    MCAST_IPV4,
+    MCAST_IPV6,
+    MCAST_END
+} rtk_mcast_type_t;
+
+typedef enum rtk_trap_mcast_action_e
+{
+    MCAST_ACTION_FORWARD = 0,
+    MCAST_ACTION_DROP,
+    MCAST_ACTION_TRAP2CPU,
+    MCAST_ACTION_ROUTER_PORT,
+    MCAST_ACTION_DROP_EX_RMA,
+    MCAST_ACTION_END
+} rtk_trap_mcast_action_t;
+
+typedef enum rtk_trap_rma_action_e
+{
+    RMA_ACTION_FORWARD = 0,
+    RMA_ACTION_TRAP2CPU,
+    RMA_ACTION_DROP,
+    RMA_ACTION_FORWARD_EXCLUDE_CPU,
+    RMA_ACTION_END
+} rtk_trap_rma_action_t;
+
+typedef enum rtk_trap_ucast_action_e
+{
+    UCAST_ACTION_FORWARD_PMASK = 0,
+    UCAST_ACTION_DROP,
+    UCAST_ACTION_TRAP2CPU,
+    UCAST_ACTION_FLOODING,
+    UCAST_ACTION_END
+} rtk_trap_ucast_action_t;
+
+typedef enum rtk_trap_ucast_type_e
+{
+    UCAST_UNKNOWNDA = 0,
+    UCAST_UNKNOWNSA,
+    UCAST_UNMATCHSA,
+    UCAST_END
+} rtk_trap_ucast_type_t;
+
+typedef enum rtk_trap_reason_type_e
+{
+    TRAP_REASON_RMA = 0,
+    TRAP_REASON_OAM,
+    TRAP_REASON_1XUNAUTH,
+    TRAP_REASON_VLANSTACK,
+    TRAP_REASON_UNKNOWNMC,
+    TRAP_REASON_END,
+} rtk_trap_reason_type_t;
+
+
+/* Function Name:
+ *      rtk_trap_unknownUnicastPktAction_set
+ * Description:
+ *      Set unknown unicast packet action configuration.
+ * Input:
+ *      port            - ingress port ID for unknown unicast packet
+ *      ucast_action    - Unknown unicast action.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                  - OK
+ *      RT_ERR_FAILED              - Failed
+ *      RT_ERR_SMI                 - SMI access error
+ *      RT_ERR_NOT_ALLOWED         - Invalid action.
+ *      RT_ERR_INPUT               - Invalid input parameters.
+ * Note:
+ *      This API can set unknown unicast packet action configuration.
+ *      The unknown unicast action is as following:
+ *          - UCAST_ACTION_FORWARD_PMASK
+ *          - UCAST_ACTION_DROP
+ *          - UCAST_ACTION_TRAP2CPU
+ *          - UCAST_ACTION_FLOODING
+ */
+rtk_api_ret_t rtk_trap_unknownUnicastPktAction_set(rtk_port_t port, rtk_trap_ucast_action_t ucast_action);
+
+/* Function Name:
+ *      rtk_trap_unknownUnicastPktAction_get
+ * Description:
+ *      Get unknown unicast packet action configuration.
+ * Input:
+ *      port            - ingress port ID for unknown unicast packet
+ * Output:
+ *      pUcast_action   - Unknown unicast action.
+ * Return:
+ *      RT_ERR_OK                  - OK
+ *      RT_ERR_FAILED              - Failed
+ *      RT_ERR_SMI                 - SMI access error
+ *      RT_ERR_NOT_ALLOWED         - Invalid action.
+ *      RT_ERR_INPUT               - Invalid input parameters.
+ *      RT_ERR_NULL_POINTER        - Null pointer
+ * Note:
+ *      This API can get unknown unicast packet action configuration.
+ *      The unknown unicast action is as following:
+ *          - UCAST_ACTION_FORWARD_PMASK
+ *          - UCAST_ACTION_DROP
+ *          - UCAST_ACTION_TRAP2CPU
+ *          - UCAST_ACTION_FLOODING
+ */
+rtk_api_ret_t rtk_trap_unknownUnicastPktAction_get(rtk_port_t port, rtk_trap_ucast_action_t *pUcast_action);
+
+/* Function Name:
+ *      rtk_trap_unknownMacPktAction_set
+ * Description:
+ *      Set unknown source MAC packet action configuration.
+ * Input:
+ *      ucast_action    - Unknown source MAC action.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                  - OK
+ *      RT_ERR_FAILED              - Failed
+ *      RT_ERR_SMI                 - SMI access error
+ *      RT_ERR_NOT_ALLOWED         - Invalid action.
+ *      RT_ERR_INPUT               - Invalid input parameters.
+ * Note:
+ *      This API can set unknown unicast packet action configuration.
+ *      The unknown unicast action is as following:
+ *          - UCAST_ACTION_FORWARD_PMASK
+ *          - UCAST_ACTION_DROP
+ *          - UCAST_ACTION_TRAP2CPU
+ */
+extern rtk_api_ret_t rtk_trap_unknownMacPktAction_set(rtk_trap_ucast_action_t ucast_action);
+
+/* Function Name:
+ *      rtk_trap_unknownMacPktAction_get
+ * Description:
+ *      Get unknown source MAC packet action configuration.
+ * Input:
+ *      None.
+ * Output:
+ *      pUcast_action   - Unknown source MAC action.
+ * Return:
+ *      RT_ERR_OK                  - OK
+ *      RT_ERR_FAILED              - Failed
+ *      RT_ERR_SMI                 - SMI access error
+ *      RT_ERR_NULL_POINTER        - Null Pointer.
+ *      RT_ERR_INPUT               - Invalid input parameters.
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_trap_unknownMacPktAction_get(rtk_trap_ucast_action_t *pUcast_action);
+
+/* Function Name:
+ *      rtk_trap_unmatchMacPktAction_set
+ * Description:
+ *      Set unmatch source MAC packet action configuration.
+ * Input:
+ *      ucast_action    - Unknown source MAC action.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                  - OK
+ *      RT_ERR_FAILED              - Failed
+ *      RT_ERR_SMI                 - SMI access error
+ *      RT_ERR_NOT_ALLOWED         - Invalid action.
+ *      RT_ERR_INPUT               - Invalid input parameters.
+ * Note:
+ *      This API can set unknown unicast packet action configuration.
+ *      The unknown unicast action is as following:
+ *          - UCAST_ACTION_FORWARD_PMASK
+ *          - UCAST_ACTION_DROP
+ *          - UCAST_ACTION_TRAP2CPU
+ */
+extern rtk_api_ret_t rtk_trap_unmatchMacPktAction_set(rtk_trap_ucast_action_t ucast_action);
+
+/* Function Name:
+ *      rtk_trap_unmatchMacPktAction_get
+ * Description:
+ *      Get unmatch source MAC packet action configuration.
+ * Input:
+ *      None.
+ * Output:
+ *      pUcast_action   - Unknown source MAC action.
+ * Return:
+ *      RT_ERR_OK                  - OK
+ *      RT_ERR_FAILED              - Failed
+ *      RT_ERR_SMI                 - SMI access error
+ *      RT_ERR_NOT_ALLOWED         - Invalid action.
+ *      RT_ERR_INPUT               - Invalid input parameters.
+ * Note:
+ *      This API can set unknown unicast packet action configuration.
+ *      The unknown unicast action is as following:
+ *          - UCAST_ACTION_FORWARD_PMASK
+ *          - UCAST_ACTION_DROP
+ *          - UCAST_ACTION_TRAP2CPU
+ */
+extern rtk_api_ret_t rtk_trap_unmatchMacPktAction_get(rtk_trap_ucast_action_t *pUcast_action);
+
+/* Function Name:
+ *      rtk_trap_unmatchMacMoving_set
+ * Description:
+ *      Set unmatch source MAC packet moving state.
+ * Input:
+ *      port        - Port ID.
+ *      enable      - ENABLED: allow SA moving, DISABLE: don't allow SA moving.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                  - OK
+ *      RT_ERR_FAILED              - Failed
+ *      RT_ERR_SMI                 - SMI access error
+ *      RT_ERR_NOT_ALLOWED         - Invalid action.
+ *      RT_ERR_INPUT               - Invalid input parameters.
+ * Note:
+ */
+extern rtk_api_ret_t rtk_trap_unmatchMacMoving_set(rtk_port_t port, rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_trap_unmatchMacMoving_get
+ * Description:
+ *      Set unmatch source MAC packet moving state.
+ * Input:
+ *      port        - Port ID.
+ * Output:
+ *      pEnable     - ENABLED: allow SA moving, DISABLE: don't allow SA moving.
+ * Return:
+ *      RT_ERR_OK                  - OK
+ *      RT_ERR_FAILED              - Failed
+ *      RT_ERR_SMI                 - SMI access error
+ *      RT_ERR_NOT_ALLOWED         - Invalid action.
+ *      RT_ERR_INPUT               - Invalid input parameters.
+ * Note:
+ */
+extern rtk_api_ret_t rtk_trap_unmatchMacMoving_get(rtk_port_t port, rtk_enable_t *pEnable);
+
+/* Function Name:
+ *      rtk_trap_unknownMcastPktAction_set
+ * Description:
+ *      Set behavior of unknown multicast
+ * Input:
+ *      port            - Port id.
+ *      type            - unknown multicast packet type.
+ *      mcast_action    - unknown multicast action.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_NOT_ALLOWED  - Invalid action.
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      When receives an unknown multicast packet, switch may trap, drop or flood this packet
+ *      (1) The unknown multicast packet type is as following:
+ *          - MCAST_L2
+ *          - MCAST_IPV4
+ *          - MCAST_IPV6
+ *      (2) The unknown multicast action is as following:
+ *          - MCAST_ACTION_FORWARD
+ *          - MCAST_ACTION_DROP
+ *          - MCAST_ACTION_TRAP2CPU
+ */
+extern rtk_api_ret_t rtk_trap_unknownMcastPktAction_set(rtk_port_t port, rtk_mcast_type_t type, rtk_trap_mcast_action_t mcast_action);
+
+/* Function Name:
+ *      rtk_trap_unknownMcastPktAction_get
+ * Description:
+ *      Get behavior of unknown multicast
+ * Input:
+ *      type - unknown multicast packet type.
+ * Output:
+ *      pMcast_action - unknown multicast action.
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_NOT_ALLOWED      - Invalid operation.
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ * Note:
+ *      When receives an unknown multicast packet, switch may trap, drop or flood this packet
+ *      (1) The unknown multicast packet type is as following:
+ *          - MCAST_L2
+ *          - MCAST_IPV4
+ *          - MCAST_IPV6
+ *      (2) The unknown multicast action is as following:
+ *          - MCAST_ACTION_FORWARD
+ *          - MCAST_ACTION_DROP
+ *          - MCAST_ACTION_TRAP2CPU
+ */
+extern rtk_api_ret_t rtk_trap_unknownMcastPktAction_get(rtk_port_t port, rtk_mcast_type_t type, rtk_trap_mcast_action_t *pMcast_action);
+
+/* Function Name:
+ *      rtk_trap_lldpEnable_set
+ * Description:
+ *      Set LLDP enable.
+ * Input:
+ *      enabled - LLDP enable, 0: follow RMA, 1: use LLDP action.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_NOT_ALLOWED      - Invalid action.
+ *      RT_ERR_INPUT            - Invalid input parameters.
+ * Note:
+ *      - DMAC                                                 Assignment
+ *      - 01:80:c2:00:00:0e ethertype = 0x88CC    LLDP
+ *      - 01:80:c2:00:00:03 ethertype = 0x88CC
+ *      - 01:80:c2:00:00:00 ethertype = 0x88CC
+
+ */
+extern rtk_api_ret_t rtk_trap_lldpEnable_set(rtk_enable_t enabled);
+
+/* Function Name:
+ *      rtk_trap_lldpEnable_get
+ * Description:
+ *      Get LLDP status.
+ * Input:
+ *      None
+ * Output:
+ *      pEnabled - LLDP enable, 0: follow RMA, 1: use LLDP action.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      LLDP is as following definition.
+ *      - DMAC                                                 Assignment
+ *      - 01:80:c2:00:00:0e ethertype = 0x88CC    LLDP
+ *      - 01:80:c2:00:00:03 ethertype = 0x88CC
+ *      - 01:80:c2:00:00:00 ethertype = 0x88CC
+ */
+extern rtk_api_ret_t rtk_trap_lldpEnable_get(rtk_enable_t *pEnabled);
+
+/* Function Name:
+ *      rtk_trap_reasonTrapToCpuPriority_set
+ * Description:
+ *      Set priority value of a packet that trapped to CPU port according to specific reason.
+ * Input:
+ *      type     - reason that trap to CPU port.
+ *      priority - internal priority that is going to be set for specific trap reason.
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT - The module is not initial
+ *      RT_ERR_INPUT    - Invalid input parameter
+ * Note:
+ *      Currently the trap reason that supported are listed as follows:
+ *      - TRAP_REASON_RMA
+ *      - TRAP_REASON_OAM
+ *      - TRAP_REASON_1XUNAUTH
+ *      - TRAP_REASON_VLANSTACK
+ *      - TRAP_REASON_UNKNOWNMC
+ */
+extern rtk_api_ret_t rtk_trap_reasonTrapToCpuPriority_set(rtk_trap_reason_type_t type, rtk_pri_t priority);
+
+/* Function Name:
+ *      rtk_trap_reasonTrapToCpuPriority_get
+ * Description:
+ *      Get priority value of a packet that trapped to CPU port according to specific reason.
+ * Input:
+ *      type      - reason that trap to CPU port.
+ * Output:
+ *      pPriority - configured internal priority for such reason.
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NOT_INIT     - The module is not initial
+ *      RT_ERR_INPUT        - Invalid input parameter
+ *      RT_ERR_NULL_POINTER - NULL pointer
+ * Note:
+ *      Currently the trap reason that supported are listed as follows:
+ *      - TRAP_REASON_RMA
+ *      - TRAP_REASON_OAM
+ *      - TRAP_REASON_1XUNAUTH
+ *      - TRAP_REASON_VLANSTACK
+ *      - TRAP_REASON_UNKNOWNMC
+ */
+extern rtk_api_ret_t rtk_trap_reasonTrapToCpuPriority_get(rtk_trap_reason_type_t type, rtk_pri_t *pPriority);
+
+/* Function Name:
+ *      rtk_trap_rmaAction_set
+ * Description:
+ *      Set Reserved multicast address action configuration.
+ * Input:
+ *      type    - rma type.
+ *      rma_action - RMA action.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_ENABLE       - Invalid IFG parameter
+ * Note:
+ *
+ *      There are 48 types of Reserved Multicast Address frame for application usage.
+ *      (1)They are as following definition.
+ *      - TRAP_BRG_GROUP,
+ *      - TRAP_FD_PAUSE,
+ *      - TRAP_SP_MCAST,
+ *      - TRAP_1X_PAE,
+ *      - TRAP_UNDEF_BRG_04,
+ *      - TRAP_UNDEF_BRG_05,
+ *      - TRAP_UNDEF_BRG_06,
+ *      - TRAP_UNDEF_BRG_07,
+ *      - TRAP_PROVIDER_BRIDGE_GROUP_ADDRESS,
+ *      - TRAP_UNDEF_BRG_09,
+ *      - TRAP_UNDEF_BRG_0A,
+ *      - TRAP_UNDEF_BRG_0B,
+ *      - TRAP_UNDEF_BRG_0C,
+ *      - TRAP_PROVIDER_BRIDGE_GVRP_ADDRESS,
+ *      - TRAP_8021AB,
+ *      - TRAP_UNDEF_BRG_0F,
+ *      - TRAP_BRG_MNGEMENT,
+ *      - TRAP_UNDEFINED_11,
+ *      - TRAP_UNDEFINED_12,
+ *      - TRAP_UNDEFINED_13,
+ *      - TRAP_UNDEFINED_14,
+ *      - TRAP_UNDEFINED_15,
+ *      - TRAP_UNDEFINED_16,
+ *      - TRAP_UNDEFINED_17,
+ *      - TRAP_UNDEFINED_18,
+ *      - TRAP_UNDEFINED_19,
+ *      - TRAP_UNDEFINED_1A,
+ *      - TRAP_UNDEFINED_1B,
+ *      - TRAP_UNDEFINED_1C,
+ *      - TRAP_UNDEFINED_1D,
+ *      - TRAP_UNDEFINED_1E,
+ *      - TRAP_UNDEFINED_1F,
+ *      - TRAP_GMRP,
+ *      - TRAP_GVRP,
+ *      - TRAP_UNDEF_GARP_22,
+ *      - TRAP_UNDEF_GARP_23,
+ *      - TRAP_UNDEF_GARP_24,
+ *      - TRAP_UNDEF_GARP_25,
+ *      - TRAP_UNDEF_GARP_26,
+ *      - TRAP_UNDEF_GARP_27,
+ *      - TRAP_UNDEF_GARP_28,
+ *      - TRAP_UNDEF_GARP_29,
+ *      - TRAP_UNDEF_GARP_2A,
+ *      - TRAP_UNDEF_GARP_2B,
+ *      - TRAP_UNDEF_GARP_2C,
+ *      - TRAP_UNDEF_GARP_2D,
+ *      - TRAP_UNDEF_GARP_2E,
+ *      - TRAP_UNDEF_GARP_2F,
+ *      - TRAP_CDP.
+ *      - TRAP_CSSTP.
+ *      - TRAP_LLDP.
+ *      (2) The RMA action is as following:
+ *      - RMA_ACTION_FORWARD
+ *      - RMA_ACTION_TRAP2CPU
+ *      - RMA_ACTION_DROP
+ *      - RMA_ACTION_FORWARD_EXCLUDE_CPU
+ */
+extern rtk_api_ret_t rtk_trap_rmaAction_set(rtk_trap_type_t type, rtk_trap_rma_action_t rma_action);
+
+/* Function Name:
+ *      rtk_trap_rmaAction_get
+ * Description:
+ *      Get Reserved multicast address action configuration.
+ * Input:
+ *      type - rma type.
+ * Output:
+ *      pRma_action - RMA action.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      There are 48 types of Reserved Multicast Address frame for application usage.
+ *      (1)They are as following definition.
+ *      - TRAP_BRG_GROUP,
+ *      - TRAP_FD_PAUSE,
+ *      - TRAP_SP_MCAST,
+ *      - TRAP_1X_PAE,
+ *      - TRAP_UNDEF_BRG_04,
+ *      - TRAP_UNDEF_BRG_05,
+ *      - TRAP_UNDEF_BRG_06,
+ *      - TRAP_UNDEF_BRG_07,
+ *      - TRAP_PROVIDER_BRIDGE_GROUP_ADDRESS,
+ *      - TRAP_UNDEF_BRG_09,
+ *      - TRAP_UNDEF_BRG_0A,
+ *      - TRAP_UNDEF_BRG_0B,
+ *      - TRAP_UNDEF_BRG_0C,
+ *      - TRAP_PROVIDER_BRIDGE_GVRP_ADDRESS,
+ *      - TRAP_8021AB,
+ *      - TRAP_UNDEF_BRG_0F,
+ *      - TRAP_BRG_MNGEMENT,
+ *      - TRAP_UNDEFINED_11,
+ *      - TRAP_UNDEFINED_12,
+ *      - TRAP_UNDEFINED_13,
+ *      - TRAP_UNDEFINED_14,
+ *      - TRAP_UNDEFINED_15,
+ *      - TRAP_UNDEFINED_16,
+ *      - TRAP_UNDEFINED_17,
+ *      - TRAP_UNDEFINED_18,
+ *      - TRAP_UNDEFINED_19,
+ *      - TRAP_UNDEFINED_1A,
+ *      - TRAP_UNDEFINED_1B,
+ *      - TRAP_UNDEFINED_1C,
+ *      - TRAP_UNDEFINED_1D,
+ *      - TRAP_UNDEFINED_1E,
+ *      - TRAP_UNDEFINED_1F,
+ *      - TRAP_GMRP,
+ *      - TRAP_GVRP,
+ *      - TRAP_UNDEF_GARP_22,
+ *      - TRAP_UNDEF_GARP_23,
+ *      - TRAP_UNDEF_GARP_24,
+ *      - TRAP_UNDEF_GARP_25,
+ *      - TRAP_UNDEF_GARP_26,
+ *      - TRAP_UNDEF_GARP_27,
+ *      - TRAP_UNDEF_GARP_28,
+ *      - TRAP_UNDEF_GARP_29,
+ *      - TRAP_UNDEF_GARP_2A,
+ *      - TRAP_UNDEF_GARP_2B,
+ *      - TRAP_UNDEF_GARP_2C,
+ *      - TRAP_UNDEF_GARP_2D,
+ *      - TRAP_UNDEF_GARP_2E,
+ *      - TRAP_UNDEF_GARP_2F,
+ *      - TRAP_CDP.
+ *      - TRAP_CSSTP.
+ *      - TRAP_LLDP.
+ *      (2) The RMA action is as following:
+ *      - RMA_ACTION_FORWARD
+ *      - RMA_ACTION_TRAP2CPU
+ *      - RMA_ACTION_DROP
+ *      - RMA_ACTION_FORWARD_EXCLUDE_CPU
+ */
+extern rtk_api_ret_t rtk_trap_rmaAction_get(rtk_trap_type_t type, rtk_trap_rma_action_t *pRma_action);
+
+/* Function Name:
+ *      rtk_trap_rmaKeepFormat_set
+ * Description:
+ *      Set Reserved multicast address keep format configuration.
+ * Input:
+ *      type    - rma type.
+ *      enable - enable keep format.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_ENABLE       - Invalid IFG parameter
+ * Note:
+ *
+ *      There are 48 types of Reserved Multicast Address frame for application usage.
+ *      They are as following definition.
+ *      - TRAP_BRG_GROUP,
+ *      - TRAP_FD_PAUSE,
+ *      - TRAP_SP_MCAST,
+ *      - TRAP_1X_PAE,
+ *      - TRAP_UNDEF_BRG_04,
+ *      - TRAP_UNDEF_BRG_05,
+ *      - TRAP_UNDEF_BRG_06,
+ *      - TRAP_UNDEF_BRG_07,
+ *      - TRAP_PROVIDER_BRIDGE_GROUP_ADDRESS,
+ *      - TRAP_UNDEF_BRG_09,
+ *      - TRAP_UNDEF_BRG_0A,
+ *      - TRAP_UNDEF_BRG_0B,
+ *      - TRAP_UNDEF_BRG_0C,
+ *      - TRAP_PROVIDER_BRIDGE_GVRP_ADDRESS,
+ *      - TRAP_8021AB,
+ *      - TRAP_UNDEF_BRG_0F,
+ *      - TRAP_BRG_MNGEMENT,
+ *      - TRAP_UNDEFINED_11,
+ *      - TRAP_UNDEFINED_12,
+ *      - TRAP_UNDEFINED_13,
+ *      - TRAP_UNDEFINED_14,
+ *      - TRAP_UNDEFINED_15,
+ *      - TRAP_UNDEFINED_16,
+ *      - TRAP_UNDEFINED_17,
+ *      - TRAP_UNDEFINED_18,
+ *      - TRAP_UNDEFINED_19,
+ *      - TRAP_UNDEFINED_1A,
+ *      - TRAP_UNDEFINED_1B,
+ *      - TRAP_UNDEFINED_1C,
+ *      - TRAP_UNDEFINED_1D,
+ *      - TRAP_UNDEFINED_1E,
+ *      - TRAP_UNDEFINED_1F,
+ *      - TRAP_GMRP,
+ *      - TRAP_GVRP,
+ *      - TRAP_UNDEF_GARP_22,
+ *      - TRAP_UNDEF_GARP_23,
+ *      - TRAP_UNDEF_GARP_24,
+ *      - TRAP_UNDEF_GARP_25,
+ *      - TRAP_UNDEF_GARP_26,
+ *      - TRAP_UNDEF_GARP_27,
+ *      - TRAP_UNDEF_GARP_28,
+ *      - TRAP_UNDEF_GARP_29,
+ *      - TRAP_UNDEF_GARP_2A,
+ *      - TRAP_UNDEF_GARP_2B,
+ *      - TRAP_UNDEF_GARP_2C,
+ *      - TRAP_UNDEF_GARP_2D,
+ *      - TRAP_UNDEF_GARP_2E,
+ *      - TRAP_UNDEF_GARP_2F,
+ *      - TRAP_CDP.
+ *      - TRAP_CSSTP.
+ *      - TRAP_LLDP.
+ */
+extern rtk_api_ret_t rtk_trap_rmaKeepFormat_set(rtk_trap_type_t type, rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_trap_rmaKeepFormat_get
+ * Description:
+ *      Get Reserved multicast address action configuration.
+ * Input:
+ *      type - rma type.
+ * Output:
+ *      pEnable - keep format status.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ * Note:
+ *      There are 48 types of Reserved Multicast Address frame for application usage.
+ *      They are as following definition.
+ *      - TRAP_BRG_GROUP,
+ *      - TRAP_FD_PAUSE,
+ *      - TRAP_SP_MCAST,
+ *      - TRAP_1X_PAE,
+ *      - TRAP_UNDEF_BRG_04,
+ *      - TRAP_UNDEF_BRG_05,
+ *      - TRAP_UNDEF_BRG_06,
+ *      - TRAP_UNDEF_BRG_07,
+ *      - TRAP_PROVIDER_BRIDGE_GROUP_ADDRESS,
+ *      - TRAP_UNDEF_BRG_09,
+ *      - TRAP_UNDEF_BRG_0A,
+ *      - TRAP_UNDEF_BRG_0B,
+ *      - TRAP_UNDEF_BRG_0C,
+ *      - TRAP_PROVIDER_BRIDGE_GVRP_ADDRESS,
+ *      - TRAP_8021AB,
+ *      - TRAP_UNDEF_BRG_0F,
+ *      - TRAP_BRG_MNGEMENT,
+ *      - TRAP_UNDEFINED_11,
+ *      - TRAP_UNDEFINED_12,
+ *      - TRAP_UNDEFINED_13,
+ *      - TRAP_UNDEFINED_14,
+ *      - TRAP_UNDEFINED_15,
+ *      - TRAP_UNDEFINED_16,
+ *      - TRAP_UNDEFINED_17,
+ *      - TRAP_UNDEFINED_18,
+ *      - TRAP_UNDEFINED_19,
+ *      - TRAP_UNDEFINED_1A,
+ *      - TRAP_UNDEFINED_1B,
+ *      - TRAP_UNDEFINED_1C,
+ *      - TRAP_UNDEFINED_1D,
+ *      - TRAP_UNDEFINED_1E,
+ *      - TRAP_UNDEFINED_1F,
+ *      - TRAP_GMRP,
+ *      - TRAP_GVRP,
+ *      - TRAP_UNDEF_GARP_22,
+ *      - TRAP_UNDEF_GARP_23,
+ *      - TRAP_UNDEF_GARP_24,
+ *      - TRAP_UNDEF_GARP_25,
+ *      - TRAP_UNDEF_GARP_26,
+ *      - TRAP_UNDEF_GARP_27,
+ *      - TRAP_UNDEF_GARP_28,
+ *      - TRAP_UNDEF_GARP_29,
+ *      - TRAP_UNDEF_GARP_2A,
+ *      - TRAP_UNDEF_GARP_2B,
+ *      - TRAP_UNDEF_GARP_2C,
+ *      - TRAP_UNDEF_GARP_2D,
+ *      - TRAP_UNDEF_GARP_2E,
+ *      - TRAP_UNDEF_GARP_2F,
+ *      - TRAP_CDP.
+ *      - TRAP_CSSTP.
+ *      - TRAP_LLDP.
+ */
+extern rtk_api_ret_t rtk_trap_rmaKeepFormat_get(rtk_trap_type_t type, rtk_enable_t *pEnable);
+
+
+#endif /* __RTK_API_TRAP_H__ */
+
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/trunk.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/trunk.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/trunk.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/trunk.c	2019-01-22 16:16:24.723257454 +0100
@@ -0,0 +1,751 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 79496 $
+ * $Date: 2017-06-08 17:31:25 +0800 (é€±å››, 08 å…­æœˆ 2017) $
+ *
+ * Purpose : RTK switch high-level API for RTL8367/RTL8367C
+ * Feature : Here is a list of all functions and variables in Trunk module.
+ *
+ */
+
+#include <rtk_switch.h>
+#include <rtk_error.h>
+#include <trunk.h>
+#include <string.h>
+
+#include <rtl8367c_asicdrv.h>
+#include <rtl8367c_asicdrv_trunking.h>
+
+static rtk_api_ret_t _rtk_trunk_port_set(rtk_trunk_group_t trk_gid, rtk_portmask_t *pTrunk_member_portmask)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 pmsk;
+    rtk_uint32 regValue, type, tmp;
+
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0249)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_getAsicReg(0x1300, &regValue)) != RT_ERR_OK)
+        return retVal;
+
+    if((retVal = rtl8367c_setAsicReg(0x13C2, 0x0000)) != RT_ERR_OK)
+        return retVal;
+
+    switch (regValue)
+    {
+        case 0x0276:
+        case 0x0597:
+        case 0x6367:
+            type = 0;
+            break;
+        case 0x0652:
+        case 0x6368:
+            type = 1;
+            break;
+        case 0x0801:
+        case 0x6511:
+            type = 2;
+            break;
+        default:
+            return RT_ERR_FAILED;
+    }
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Trunk Group Valid */
+    RTK_CHK_TRUNK_GROUP_VALID(trk_gid);
+
+    if(NULL == pTrunk_member_portmask)
+        return RT_ERR_NULL_POINTER;
+
+    RTK_CHK_PORTMASK_VALID(pTrunk_member_portmask);
+
+    if((retVal = rtk_switch_portmask_L2P_get(pTrunk_member_portmask, &pmsk)) != RT_ERR_OK)
+        return retVal;
+
+    if((type == 0) || (type == 1))
+    {
+        if ((pmsk | RTL8367C_PORT_TRUNK_GROUP_MASK_MASK(trk_gid)) != (rtk_uint32)RTL8367C_PORT_TRUNK_GROUP_MASK_MASK(trk_gid))
+            return RT_ERR_PORT_MASK;
+
+        pmsk = (pmsk & RTL8367C_PORT_TRUNK_GROUP_MASK_MASK(trk_gid)) >> RTL8367C_PORT_TRUNK_GROUP_MASK_OFFSET(trk_gid);
+    }
+    else if(type == 2)
+    {
+        tmp = 0;
+
+        if(pmsk & 0x2)
+            tmp |= 1;
+        if(pmsk & 0x8)
+            tmp |=2;
+        if(pmsk & 0x80)
+            tmp |=8;
+
+        pmsk = tmp;
+    }
+
+    if ((retVal = rtl8367c_setAsicTrunkingGroup(trk_gid, pmsk)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_trunk_port_get(rtk_trunk_group_t trk_gid, rtk_portmask_t *pTrunk_member_portmask)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 pmsk;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Trunk Group Valid */
+    RTK_CHK_TRUNK_GROUP_VALID(trk_gid);
+
+    if ((retVal = rtl8367c_getAsicTrunkingGroup(trk_gid, &pmsk)) != RT_ERR_OK)
+        return retVal;
+
+    pmsk = pmsk << RTL8367C_PORT_TRUNK_GROUP_MASK_OFFSET(trk_gid);
+
+    if((retVal = rtk_switch_portmask_P2L_get(pmsk, pTrunk_member_portmask)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_trunk_distributionAlgorithm_set(rtk_trunk_group_t trk_gid, rtk_uint32 algo_bitmask)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (trk_gid != RTK_WHOLE_SYSTEM)
+        return RT_ERR_LA_TRUNK_ID;
+
+    if (algo_bitmask >= 128)
+        return RT_ERR_LA_HASHMASK;
+
+    if ((retVal = rtl8367c_setAsicTrunkingHashSelect(algo_bitmask)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_trunk_distributionAlgorithm_get(rtk_trunk_group_t trk_gid, rtk_uint32 *pAlgo_bitmask)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (trk_gid != RTK_WHOLE_SYSTEM)
+        return RT_ERR_LA_TRUNK_ID;
+
+    if(NULL == pAlgo_bitmask)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicTrunkingHashSelect((rtk_uint32 *)pAlgo_bitmask)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_trunk_trafficSeparate_set(rtk_trunk_group_t trk_gid, rtk_trunk_separateType_t separateType)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 enabled;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (trk_gid != RTK_WHOLE_SYSTEM)
+        return RT_ERR_LA_TRUNK_ID;
+
+    if(separateType >= SEPARATE_END)
+        return RT_ERR_INPUT;
+
+    enabled = (separateType == SEPARATE_FLOOD) ? ENABLED : DISABLED;
+    if ((retVal = rtl8367c_setAsicTrunkingFlood(enabled)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_trunk_trafficSeparate_get(rtk_trunk_group_t trk_gid, rtk_trunk_separateType_t *pSeparateType)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 enabled;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if (trk_gid != RTK_WHOLE_SYSTEM)
+        return RT_ERR_LA_TRUNK_ID;
+
+    if(NULL == pSeparateType)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicTrunkingFlood(&enabled)) != RT_ERR_OK)
+        return retVal;
+
+    *pSeparateType = (enabled == ENABLED) ? SEPARATE_FLOOD : SEPARATE_NONE;
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_trunk_mode_set(rtk_trunk_mode_t mode)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(mode >= TRUNK_MODE_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicTrunkingMode((rtk_uint32)mode)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_trunk_mode_get(rtk_trunk_mode_t *pMode)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pMode)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicTrunkingMode((rtk_uint32 *)pMode)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_trunk_trafficPause_set(rtk_trunk_group_t trk_gid, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Trunk Group Valid */
+    RTK_CHK_TRUNK_GROUP_VALID(trk_gid);
+
+    if(enable >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setAsicTrunkingFc((rtk_uint32)trk_gid, (rtk_uint32)enable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_trunk_trafficPause_get(rtk_trunk_group_t trk_gid, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Trunk Group Valid */
+    RTK_CHK_TRUNK_GROUP_VALID(trk_gid);
+
+    if(NULL == pEnable)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicTrunkingFc((rtk_uint32)trk_gid, (rtk_uint32 *)pEnable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_trunk_hashMappingTable_set(rtk_trunk_group_t trk_gid, rtk_trunk_hashVal2Port_t *pHash2Port_array)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 hashValue;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Trunk Group Valid */
+    RTK_CHK_TRUNK_GROUP_VALID(trk_gid);
+
+    if(NULL == pHash2Port_array)
+        return RT_ERR_NULL_POINTER;
+
+    if(trk_gid <= TRUNK_GROUP1)
+    {
+        for(hashValue = 0; hashValue < RTK_MAX_NUM_OF_TRUNK_HASH_VAL; hashValue++)
+        {
+            if ((retVal = rtl8367c_setAsicTrunkingHashTable(hashValue, pHash2Port_array->value[hashValue])) != RT_ERR_OK)
+                return retVal;
+        }
+    }
+    else
+    {
+        for(hashValue = 0; hashValue < RTK_MAX_NUM_OF_TRUNK_HASH_VAL; hashValue++)
+        {
+            if ((retVal = rtl8367c_setAsicTrunkingHashTable1(hashValue, pHash2Port_array->value[hashValue])) != RT_ERR_OK)
+                return retVal;
+        }
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_trunk_hashMappingTable_get(rtk_trunk_group_t trk_gid, rtk_trunk_hashVal2Port_t *pHash2Port_array)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 hashValue;
+    rtk_uint32 hashPort;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Trunk Group Valid */
+    RTK_CHK_TRUNK_GROUP_VALID(trk_gid);
+
+    if(NULL == pHash2Port_array)
+        return RT_ERR_NULL_POINTER;
+
+    if(trk_gid <= TRUNK_GROUP1)
+    {
+        for(hashValue = 0; hashValue < RTK_MAX_NUM_OF_TRUNK_HASH_VAL; hashValue++)
+        {
+            if ((retVal = rtl8367c_getAsicTrunkingHashTable(hashValue, &hashPort)) != RT_ERR_OK)
+                return retVal;
+
+            pHash2Port_array->value[hashValue] = hashPort;
+        }
+    }
+    else
+    {
+        for(hashValue = 0; hashValue < RTK_MAX_NUM_OF_TRUNK_HASH_VAL; hashValue++)
+        {
+            if ((retVal = rtl8367c_getAsicTrunkingHashTable1(hashValue, &hashPort)) != RT_ERR_OK)
+                return retVal;
+
+            pHash2Port_array->value[hashValue] = hashPort;
+        }
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_trunk_portQueueEmpty_get(rtk_portmask_t *pEmpty_portmask)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 pmask;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pEmpty_portmask)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicQeueuEmptyStatus(&pmask)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtk_switch_portmask_P2L_get(pmask, pEmpty_portmask)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+/* Function Name:
+ *      rtk_trunk_port_set
+ * Description:
+ *      Set trunking group available port mask
+ * Input:
+ *      trk_gid                 - trunk group id
+ *      pTrunk_member_portmask  - Logic trunking member port mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_LA_TRUNK_ID  - Invalid trunking group
+ *      RT_ERR_PORT_MASK    - Invalid portmask.
+ * Note:
+ *      The API can set port trunking group port mask. Each port trunking group has max 4 ports.
+ *      If enabled port mask has less than 2 ports available setting, then this trunking group function is disabled.
+ */
+rtk_api_ret_t rtk_trunk_port_set(rtk_trunk_group_t trk_gid, rtk_portmask_t *pTrunk_member_portmask)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trunk_port_set(trk_gid, pTrunk_member_portmask);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_trunk_port_get
+ * Description:
+ *      Get trunking group available port mask
+ * Input:
+ *      trk_gid - trunk group id
+ * Output:
+ *      pTrunk_member_portmask - Logic trunking member port mask
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_LA_TRUNK_ID  - Invalid trunking group
+ * Note:
+ *      The API can get 2 port trunking group.
+ */
+rtk_api_ret_t rtk_trunk_port_get(rtk_trunk_group_t trk_gid, rtk_portmask_t *pTrunk_member_portmask)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trunk_port_get(trk_gid, pTrunk_member_portmask);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_trunk_distributionAlgorithm_set
+ * Description:
+ *      Set port trunking hash select sources
+ * Input:
+ *      trk_gid         - trunk group id
+ *      algo_bitmask   - Bitmask of the distribution algorithm
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_LA_TRUNK_ID  - Invalid trunking group
+ *      RT_ERR_LA_HASHMASK  - Hash algorithm selection error.
+ *      RT_ERR_PORT_MASK    - Invalid portmask.
+ * Note:
+ *      The API can set port trunking hash algorithm sources.
+ *      7 bits mask for link aggregation group0 hash parameter selection {DIP, SIP, DMAC, SMAC, SPA}
+ *      - 0b0000001: SPA
+ *      - 0b0000010: SMAC
+ *      - 0b0000100: DMAC
+ *      - 0b0001000: SIP
+ *      - 0b0010000: DIP
+ *      - 0b0100000: TCP/UDP Source Port
+ *      - 0b1000000: TCP/UDP Destination Port
+ *      Example:
+ *      - 0b0000011: SMAC & SPA
+ *      - Note that it could be an arbitrary combination or independent set
+ */
+rtk_api_ret_t rtk_trunk_distributionAlgorithm_set(rtk_trunk_group_t trk_gid, rtk_uint32 algo_bitmask)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trunk_distributionAlgorithm_set(trk_gid, algo_bitmask);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_trunk_distributionAlgorithm_get
+ * Description:
+ *      Get port trunking hash select sources
+ * Input:
+ *      trk_gid - trunk group id
+ * Output:
+ *      pAlgo_bitmask -  Bitmask of the distribution algorithm
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_LA_TRUNK_ID  - Invalid trunking group
+ * Note:
+ *      The API can get port trunking hash algorithm sources.
+ */
+rtk_api_ret_t rtk_trunk_distributionAlgorithm_get(rtk_trunk_group_t trk_gid, rtk_uint32 *pAlgo_bitmask)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trunk_distributionAlgorithm_get(trk_gid, pAlgo_bitmask);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_trunk_trafficSeparate_set
+ * Description:
+ *      Set the traffic separation setting of a trunk group from the specified device.
+ * Input:
+ *      trk_gid      - trunk group id
+ *      separateType     - traffic separation setting
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_UNIT_ID     - invalid unit id
+ *      RT_ERR_LA_TRUNK_ID - invalid trunk ID
+ *      RT_ERR_LA_HASHMASK - invalid hash mask
+ * Note:
+ *      SEPARATE_NONE: disable traffic separation
+ *      SEPARATE_FLOOD: trunk MSB link up port is dedicated to TX flooding (L2 lookup miss) traffic
+ */
+rtk_api_ret_t rtk_trunk_trafficSeparate_set(rtk_trunk_group_t trk_gid, rtk_trunk_separateType_t separateType)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trunk_trafficSeparate_set(trk_gid, separateType);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_trunk_trafficSeparate_get
+ * Description:
+ *      Get the traffic separation setting of a trunk group from the specified device.
+ * Input:
+ *      trk_gid        - trunk group id
+ * Output:
+ *      pSeparateType   - pointer separated traffic type
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_UNIT_ID      - invalid unit id
+ *      RT_ERR_LA_TRUNK_ID  - invalid trunk ID
+ *      RT_ERR_NULL_POINTER - input parameter may be null pointer
+ * Note:
+ *      SEPARATE_NONE: disable traffic separation
+ *      SEPARATE_FLOOD: trunk MSB link up port is dedicated to TX flooding (L2 lookup miss) traffic
+ */
+rtk_api_ret_t rtk_trunk_trafficSeparate_get(rtk_trunk_group_t trk_gid, rtk_trunk_separateType_t *pSeparateType)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trunk_trafficSeparate_get(trk_gid, pSeparateType);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_trunk_mode_set
+ * Description:
+ *      Set the trunk mode to the specified device.
+ * Input:
+ *      mode - trunk mode
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_INPUT   - invalid input parameter
+ * Note:
+ *      The enum of the trunk mode as following
+ *      - TRUNK_MODE_NORMAL
+ *      - TRUNK_MODE_DUMB
+ */
+rtk_api_ret_t rtk_trunk_mode_set(rtk_trunk_mode_t mode)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trunk_mode_set(mode);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_trunk_mode_get
+ * Description:
+ *      Get the trunk mode from the specified device.
+ * Input:
+ *      None
+ * Output:
+ *      pMode - pointer buffer of trunk mode
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NULL_POINTER - input parameter may be null pointer
+ * Note:
+ *      The enum of the trunk mode as following
+ *      - TRUNK_MODE_NORMAL
+ *      - TRUNK_MODE_DUMB
+ */
+rtk_api_ret_t rtk_trunk_mode_get(rtk_trunk_mode_t *pMode)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trunk_mode_get(pMode);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_trunk_trafficPause_set
+ * Description:
+ *      Set the traffic pause setting of a trunk group.
+ * Input:
+ *      trk_gid      - trunk group id
+ *      enable       - traffic pause state
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_LA_TRUNK_ID - invalid trunk ID
+ * Note:
+ *      None.
+ */
+rtk_api_ret_t rtk_trunk_trafficPause_set(rtk_trunk_group_t trk_gid, rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trunk_trafficPause_set(trk_gid, enable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_trunk_trafficPause_get
+ * Description:
+ *      Get the traffic pause setting of a trunk group.
+ * Input:
+ *      trk_gid        - trunk group id
+ * Output:
+ *      pEnable        - pointer of traffic pause state.
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_LA_TRUNK_ID  - invalid trunk ID
+ *      RT_ERR_NULL_POINTER - input parameter may be null pointer
+ * Note:
+ *      None.
+ */
+rtk_api_ret_t rtk_trunk_trafficPause_get(rtk_trunk_group_t trk_gid, rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trunk_trafficPause_get(trk_gid, pEnable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_trunk_hashMappingTable_set
+ * Description:
+ *      Set hash value to port array in the trunk group id from the specified device.
+ * Input:
+ *      trk_gid          - trunk group id
+ *      pHash2Port_array - ports associate with the hash value
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_UNIT_ID            - invalid unit id
+ *      RT_ERR_LA_TRUNK_ID        - invalid trunk ID
+ *      RT_ERR_NULL_POINTER       - input parameter may be null pointer
+ *      RT_ERR_LA_TRUNK_NOT_EXIST - the trunk doesn't exist
+ *      RT_ERR_LA_NOT_MEMBER_PORT - the port is not a member port of the trunk
+ *      RT_ERR_LA_CPUPORT         - CPU port can not be aggregated port
+ * Note:
+ *      Trunk group 0 & 1 shares the same hash mapping table.
+ *      Trunk group 2 uses a independent table.
+ */
+rtk_api_ret_t rtk_trunk_hashMappingTable_set(rtk_trunk_group_t trk_gid, rtk_trunk_hashVal2Port_t *pHash2Port_array)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trunk_hashMappingTable_set(trk_gid, pHash2Port_array);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_trunk_hashMappingTable_get
+ * Description:
+ *      Get hash value to port array in the trunk group id from the specified device.
+ * Input:
+ *      trk_gid          - trunk group id
+ * Output:
+ *      pHash2Port_array - pointer buffer of ports associate with the hash value
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_UNIT_ID      - invalid unit id
+ *      RT_ERR_LA_TRUNK_ID  - invalid trunk ID
+ *      RT_ERR_NULL_POINTER - input parameter may be null pointer
+ * Note:
+ *      Trunk group 0 & 1 shares the same hash mapping table.
+ *      Trunk group 2 uses a independent table.
+ */
+rtk_api_ret_t rtk_trunk_hashMappingTable_get(rtk_trunk_group_t trk_gid, rtk_trunk_hashVal2Port_t *pHash2Port_array)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trunk_hashMappingTable_get(trk_gid, pHash2Port_array);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_trunk_portQueueEmpty_get
+ * Description:
+ *      Get the port mask which all queues are empty.
+ * Input:
+ *      None.
+ * Output:
+ *      pEmpty_portmask   - pointer empty port mask
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NULL_POINTER - input parameter may be null pointer
+ * Note:
+ *      None.
+ */
+rtk_api_ret_t rtk_trunk_portQueueEmpty_get(rtk_portmask_t *pEmpty_portmask)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_trunk_portQueueEmpty_get(pEmpty_portmask);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/trunk.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/trunk.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/trunk.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/trunk.h	2019-01-22 16:16:24.723257454 +0100
@@ -0,0 +1,330 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * Purpose : RTL8367/RTL8367C switch high-level API
+ *
+ * Feature : The file includes Trunk module high-layer TRUNK defination
+ *
+ */
+
+#ifndef __RTK_API_TRUNK_H__
+#define __RTK_API_TRUNK_H__
+
+/*
+ * Data Type Declaration
+ */
+#define    RTK_TRUNK_DPORT_HASH_MASK     0x40
+#define    RTK_TRUNK_SPORT_HASH_MASK     0x20
+#define    RTK_TRUNK_DIP_HASH_MASK       0x10
+#define    RTK_TRUNK_SIP_HASH_MASK       0x8
+#define    RTK_TRUNK_DMAC_HASH_MASK      0x4
+#define    RTK_TRUNK_SMAC_HASH_MASK      0x2
+#define    RTK_TRUNK_SPA_HASH_MASK       0x1
+
+
+#define RTK_MAX_NUM_OF_TRUNK_HASH_VAL               16
+
+typedef struct  rtk_trunk_hashVal2Port_s
+{
+    rtk_uint8 value[RTK_MAX_NUM_OF_TRUNK_HASH_VAL];
+} rtk_trunk_hashVal2Port_t;
+
+typedef enum rtk_trunk_group_e
+{
+    TRUNK_GROUP0 = 0,
+    TRUNK_GROUP1,
+    TRUNK_GROUP2,
+    TRUNK_GROUP3,
+    TRUNK_GROUP_END
+} rtk_trunk_group_t;
+
+typedef enum rtk_trunk_separateType_e
+{
+    SEPARATE_NONE = 0,
+    SEPARATE_FLOOD,
+    SEPARATE_END
+
+} rtk_trunk_separateType_t;
+
+typedef enum rtk_trunk_mode_e
+{
+    TRUNK_MODE_NORMAL = 0,
+    TRUNK_MODE_DUMB,
+    TRUNK_MODE_END
+} rtk_trunk_mode_t;
+
+/* Function Name:
+ *      rtk_trunk_port_set
+ * Description:
+ *      Set trunking group available port mask
+ * Input:
+ *      trk_gid                 - trunk group id
+ *      pTrunk_member_portmask  - Logic trunking member port mask
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_LA_TRUNK_ID  - Invalid trunking group
+ *      RT_ERR_PORT_MASK    - Invalid portmask.
+ * Note:
+ *      The API can set port trunking group port mask. Each port trunking group has max 4 ports.
+ *      If enabled port mask has less than 2 ports available setting, then this trunking group function is disabled.
+ */
+extern rtk_api_ret_t rtk_trunk_port_set(rtk_trunk_group_t trk_gid, rtk_portmask_t *pTrunk_member_portmask);
+
+/* Function Name:
+ *      rtk_trunk_port_get
+ * Description:
+ *      Get trunking group available port mask
+ * Input:
+ *      trk_gid - trunk group id
+ * Output:
+ *      pTrunk_member_portmask - Logic trunking member port mask
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_LA_TRUNK_ID  - Invalid trunking group
+ * Note:
+ *      The API can get 2 port trunking group.
+ */
+extern rtk_api_ret_t rtk_trunk_port_get(rtk_trunk_group_t trk_gid, rtk_portmask_t *pTrunk_member_portmask);
+
+/* Function Name:
+ *      rtk_trunk_distributionAlgorithm_set
+ * Description:
+ *      Set port trunking hash select sources
+ * Input:
+ *      trk_gid         - trunk group id
+ *      algo_bitmask    - Bitmask of the distribution algorithm
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_LA_TRUNK_ID  - Invalid trunking group
+ *      RT_ERR_LA_HASHMASK  - Hash algorithm selection error.
+ *      RT_ERR_PORT_MASK    - Invalid portmask.
+ * Note:
+ *      The API can set port trunking hash algorithm sources.
+ *      7 bits mask for link aggregation group0 hash parameter selection {DIP, SIP, DMAC, SMAC, SPA}
+ *      - 0b0000001: SPA
+ *      - 0b0000010: SMAC
+ *      - 0b0000100: DMAC
+ *      - 0b0001000: SIP
+ *      - 0b0010000: DIP
+ *      - 0b0100000: TCP/UDP Source Port
+ *      - 0b1000000: TCP/UDP Destination Port
+ *      Example:
+ *      - 0b0000011: SMAC & SPA
+ *      - Note that it could be an arbitrary combination or independent set
+ */
+extern rtk_api_ret_t rtk_trunk_distributionAlgorithm_set(rtk_trunk_group_t trk_gid, rtk_uint32 algo_bitmask);
+
+/* Function Name:
+ *      rtk_trunk_distributionAlgorithm_get
+ * Description:
+ *      Get port trunking hash select sources
+ * Input:
+ *      trk_gid - trunk group id
+ * Output:
+ *      pAlgo_bitmask -  Bitmask of the distribution algorithm
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_LA_TRUNK_ID  - Invalid trunking group
+ * Note:
+ *      The API can get port trunking hash algorithm sources.
+ */
+extern rtk_api_ret_t rtk_trunk_distributionAlgorithm_get(rtk_trunk_group_t trk_gid, rtk_uint32 *pAlgo_bitmask);
+
+/* Function Name:
+ *      rtk_trunk_trafficSeparate_set
+ * Description:
+ *      Set the traffic separation setting of a trunk group from the specified device.
+ * Input:
+ *      trk_gid      - trunk group id
+ *      separateType     - traffic separation setting
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_UNIT_ID     - invalid unit id
+ *      RT_ERR_LA_TRUNK_ID - invalid trunk ID
+ *      RT_ERR_LA_HASHMASK - invalid hash mask
+ * Note:
+ *      SEPARATE_NONE: disable traffic separation
+ *      SEPARATE_FLOOD: trunk MSB link up port is dedicated to TX flooding (L2 lookup miss) traffic
+ */
+extern rtk_api_ret_t rtk_trunk_trafficSeparate_set(rtk_trunk_group_t trk_gid, rtk_trunk_separateType_t separateType);
+
+/* Function Name:
+ *      rtk_trunk_trafficSeparate_get
+ * Description:
+ *      Get the traffic separation setting of a trunk group from the specified device.
+ * Input:
+ *      trk_gid        - trunk group id
+ * Output:
+ *      pSeparateType   - pointer separated traffic type
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_UNIT_ID      - invalid unit id
+ *      RT_ERR_LA_TRUNK_ID  - invalid trunk ID
+ *      RT_ERR_NULL_POINTER - input parameter may be null pointer
+ * Note:
+ *      SEPARATE_NONE: disable traffic separation
+ *      SEPARATE_FLOOD: trunk MSB link up port is dedicated to TX flooding (L2 lookup miss) traffic
+ */
+extern rtk_api_ret_t rtk_trunk_trafficSeparate_get(rtk_trunk_group_t trk_gid, rtk_trunk_separateType_t *pSeparateType);
+
+
+/* Function Name:
+ *      rtk_trunk_mode_set
+ * Description:
+ *      Set the trunk mode to the specified device.
+ * Input:
+ *      mode - trunk mode
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_INPUT   - invalid input parameter
+ * Note:
+ *      The enum of the trunk mode as following
+ *      - TRUNK_MODE_NORMAL
+ *      - TRUNK_MODE_DUMB
+ */
+extern rtk_api_ret_t rtk_trunk_mode_set(rtk_trunk_mode_t mode);
+
+/* Function Name:
+ *      rtk_trunk_mode_get
+ * Description:
+ *      Get the trunk mode from the specified device.
+ * Input:
+ *      None
+ * Output:
+ *      pMode - pointer buffer of trunk mode
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NULL_POINTER - input parameter may be null pointer
+ * Note:
+ *      The enum of the trunk mode as following
+ *      - TRUNK_MODE_NORMAL
+ *      - TRUNK_MODE_DUMB
+ */
+extern rtk_api_ret_t rtk_trunk_mode_get(rtk_trunk_mode_t *pMode);
+
+/* Function Name:
+ *      rtk_trunk_trafficPause_set
+ * Description:
+ *      Set the traffic pause setting of a trunk group.
+ * Input:
+ *      trk_gid      - trunk group id
+ *      enable       - traffic pause state
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_LA_TRUNK_ID - invalid trunk ID
+ * Note:
+ *      None.
+ */
+extern rtk_api_ret_t rtk_trunk_trafficPause_set(rtk_trunk_group_t trk_gid, rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_trunk_trafficPause_get
+ * Description:
+ *      Get the traffic pause setting of a trunk group.
+ * Input:
+ *      trk_gid        - trunk group id
+ * Output:
+ *      pEnable        - pointer of traffic pause state.
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_LA_TRUNK_ID  - invalid trunk ID
+ *      RT_ERR_NULL_POINTER - input parameter may be null pointer
+ * Note:
+ *      None.
+ */
+extern rtk_api_ret_t rtk_trunk_trafficPause_get(rtk_trunk_group_t trk_gid, rtk_enable_t *pEnable);
+
+/* Function Name:
+ *      rtk_trunk_hashMappingTable_set
+ * Description:
+ *      Set hash value to port array in the trunk group id from the specified device.
+ * Input:
+ *      trk_gid          - trunk group id
+ *      pHash2Port_array - ports associate with the hash value
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_UNIT_ID            - invalid unit id
+ *      RT_ERR_LA_TRUNK_ID        - invalid trunk ID
+ *      RT_ERR_NULL_POINTER       - input parameter may be null pointer
+ *      RT_ERR_LA_TRUNK_NOT_EXIST - the trunk doesn't exist
+ *      RT_ERR_LA_NOT_MEMBER_PORT - the port is not a member port of the trunk
+ *      RT_ERR_LA_CPUPORT         - CPU port can not be aggregated port
+ * Note:
+ *      Trunk group 0 & 1 shares the same hash mapping table.
+ *      Trunk group 2 uses a independent table.
+ */
+extern rtk_api_ret_t rtk_trunk_hashMappingTable_set(rtk_trunk_group_t trk_gid, rtk_trunk_hashVal2Port_t *pHash2Port_array);
+
+/* Function Name:
+ *      rtk_trunk_hashMappingTable_get
+ * Description:
+ *      Get hash value to port array in the trunk group id from the specified device.
+ * Input:
+ *      trk_gid          - trunk group id
+ * Output:
+ *      pHash2Port_array - pointer buffer of ports associate with the hash value
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_UNIT_ID      - invalid unit id
+ *      RT_ERR_LA_TRUNK_ID  - invalid trunk ID
+ *      RT_ERR_NULL_POINTER - input parameter may be null pointer
+ * Note:
+ *      Trunk group 0 & 1 shares the same hash mapping table.
+ *      Trunk group 2 uses a independent table.
+ */
+extern rtk_api_ret_t rtk_trunk_hashMappingTable_get(rtk_trunk_group_t trk_gid, rtk_trunk_hashVal2Port_t *pHash2Port_array);
+
+/* Function Name:
+ *      rtk_trunk_portQueueEmpty_get
+ * Description:
+ *      Get the port mask which all queues are empty.
+ * Input:
+ *      None.
+ * Output:
+ *      pEmpty_portmask   - pointer empty port mask
+ * Return:
+ *      RT_ERR_OK
+ *      RT_ERR_FAILED
+ *      RT_ERR_NULL_POINTER - input parameter may be null pointer
+ * Note:
+ *      None.
+ */
+extern rtk_api_ret_t rtk_trunk_portQueueEmpty_get(rtk_portmask_t *pEmpty_portmask);
+
+#endif /* __RTK_API_TRUNK_H__ */
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/vlan.c linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/vlan.c
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/vlan.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/vlan.c	2019-01-22 16:16:24.723257454 +0100
@@ -0,0 +1,2540 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * $Revision: 79926 $
+ * $Date: 2017-06-21 17:01:03 +0800 (é€±ä¸‰, 21 å…­æœˆ 2017) $
+ *
+ * Purpose : RTK switch high-level API for RTL8367/RTL8367C
+ * Feature : Here is a list of all functions and variables in VLAN module.
+ *
+ */
+
+#include <rtk_switch.h>
+#include <rtk_error.h>
+#include <vlan.h>
+#include <rate.h>
+#include <string.h>
+
+#include <rtl8367c_asicdrv.h>
+#include <rtl8367c_asicdrv_vlan.h>
+#include <rtl8367c_asicdrv_dot1x.h>
+
+typedef enum vlan_mbrCfgType_e
+{
+    MBRCFG_UNUSED = 0,
+    MBRCFG_USED_BY_VLAN,
+    MBRCFG_END
+}vlan_mbrCfgType_t;
+
+static rtk_vlan_t           vlan_mbrCfgVid[RTL8367C_CVIDXNO];
+static vlan_mbrCfgType_t    vlan_mbrCfgUsage[RTL8367C_CVIDXNO];
+
+static rtk_api_ret_t _rtk_vlan_init(void)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 i;
+    rtl8367c_user_vlan4kentry vlan4K;
+    rtl8367c_vlanconfiguser vlanMC;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Clean Database */
+    memset(vlan_mbrCfgVid, 0x00, sizeof(rtk_vlan_t) * RTL8367C_CVIDXNO);
+    memset(vlan_mbrCfgUsage, 0x00, sizeof(vlan_mbrCfgType_t) * RTL8367C_CVIDXNO);
+
+    /* clean 32 VLAN member configuration */
+    for (i = 0; i <= RTL8367C_CVIDXMAX; i++)
+    {
+        vlanMC.evid = 0;
+        vlanMC.mbr = 0;
+        vlanMC.fid_msti = 0;
+        vlanMC.envlanpol = 0;
+        vlanMC.meteridx = 0;
+        vlanMC.vbpen = 0;
+        vlanMC.vbpri = 0;
+        if ((retVal = rtl8367c_setAsicVlanMemberConfig(i, &vlanMC)) != RT_ERR_OK)
+            return retVal;
+    }
+
+    /* Set a default VLAN with vid 1 to 4K table for all ports */
+    memset(&vlan4K, 0, sizeof(rtl8367c_user_vlan4kentry));
+    vlan4K.vid = 1;
+    vlan4K.mbr = RTK_PHY_PORTMASK_ALL;
+    vlan4K.untag = RTK_PHY_PORTMASK_ALL;
+    vlan4K.fid_msti = 0;
+    if ((retVal = rtl8367c_setAsicVlan4kEntry(&vlan4K)) != RT_ERR_OK)
+        return retVal;
+
+    /* Also set the default VLAN to 32 member configuration index 0 */
+    memset(&vlanMC, 0, sizeof(rtl8367c_vlanconfiguser));
+    vlanMC.evid = 1;
+    vlanMC.mbr = RTK_PHY_PORTMASK_ALL;
+    vlanMC.fid_msti = 0;
+    if ((retVal = rtl8367c_setAsicVlanMemberConfig(0, &vlanMC)) != RT_ERR_OK)
+            return retVal;
+
+    /* Set all ports PVID to default VLAN and tag-mode to original */
+    RTK_SCAN_ALL_PHY_PORTMASK(i)
+    {
+        if ((retVal = rtl8367c_setAsicVlanPortBasedVID(i, 0, 0)) != RT_ERR_OK)
+            return retVal;
+        if ((retVal = rtl8367c_setAsicVlanEgressTagMode(i, EG_TAG_MODE_ORI)) != RT_ERR_OK)
+            return retVal;
+    }
+
+    /* Updata Databse */
+    vlan_mbrCfgUsage[0] = MBRCFG_USED_BY_VLAN;
+    vlan_mbrCfgVid[0] = 1;
+
+    /* Enable Ingress filter */
+    RTK_SCAN_ALL_PHY_PORTMASK(i)
+    {
+        if ((retVal = rtl8367c_setAsicVlanIngressFilter(i, ENABLED)) != RT_ERR_OK)
+            return retVal;
+    }
+
+    /* enable VLAN */
+    if ((retVal = rtl8367c_setAsicVlanFilter(ENABLED)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+
+static rtk_api_ret_t _rtk_vlan_set(rtk_vlan_t vid, rtk_vlan_cfg_t *pVlanCfg)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 phyMbrPmask;
+    rtk_uint32 phyUntagPmask;
+    rtl8367c_user_vlan4kentry vlan4K;
+    rtl8367c_vlanconfiguser vlanMC;
+    rtk_uint32 idx;
+    rtk_uint32 empty_index = 0xffff;
+    rtk_uint32 update_evid = 0;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* vid must be 0~8191 */
+    if (vid > RTL8367C_EVIDMAX)
+        return RT_ERR_VLAN_VID;
+
+    /* Null pointer check */
+    if(NULL == pVlanCfg)
+        return RT_ERR_NULL_POINTER;
+
+    /* Check port mask valid */
+    RTK_CHK_PORTMASK_VALID(&(pVlanCfg->mbr));
+
+    if (vid <= RTL8367C_VIDMAX)
+    {
+        /* Check untag port mask valid */
+        RTK_CHK_PORTMASK_VALID(&(pVlanCfg->untag));
+    }
+
+    /* IVL_EN */
+    if(pVlanCfg->ivl_en >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    /* fid must be 0~15 */
+    if(pVlanCfg->fid_msti > RTL8367C_FIDMAX)
+        return RT_ERR_L2_FID;
+
+    /* Policing */
+    if(pVlanCfg->envlanpol >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    /* Meter ID */
+    if(pVlanCfg->meteridx > RTK_MAX_METER_ID)
+        return RT_ERR_INPUT;
+
+    /* VLAN based priority */
+    if(pVlanCfg->vbpen >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    /* Priority */
+    if(pVlanCfg->vbpri > RTL8367C_PRIMAX)
+        return RT_ERR_INPUT;
+
+    /* Get physical port mask */
+    if(rtk_switch_portmask_L2P_get(&(pVlanCfg->mbr), &phyMbrPmask) != RT_ERR_OK)
+        return RT_ERR_FAILED;
+
+    if(rtk_switch_portmask_L2P_get(&(pVlanCfg->untag), &phyUntagPmask) != RT_ERR_OK)
+        return RT_ERR_FAILED;
+
+    if (vid <= RTL8367C_VIDMAX)
+    {
+        /* update 4K table */
+        memset(&vlan4K, 0, sizeof(rtl8367c_user_vlan4kentry));
+        vlan4K.vid = vid;
+
+        vlan4K.mbr    = (phyMbrPmask & 0xFFFF);
+        vlan4K.untag  = (phyUntagPmask & 0xFFFF);
+
+        vlan4K.ivl_svl      = pVlanCfg->ivl_en;
+        vlan4K.fid_msti     = pVlanCfg->fid_msti;
+        vlan4K.envlanpol    = pVlanCfg->envlanpol;
+        vlan4K.meteridx     = pVlanCfg->meteridx;
+        vlan4K.vbpen        = pVlanCfg->vbpen;
+        vlan4K.vbpri        = pVlanCfg->vbpri;
+
+        if ((retVal = rtl8367c_setAsicVlan4kEntry(&vlan4K)) != RT_ERR_OK)
+            return retVal;
+
+        /* Update Member configuration if exist */
+        for (idx = 0; idx <= RTL8367C_CVIDXMAX; idx++)
+        {
+            if(vlan_mbrCfgUsage[idx] == MBRCFG_USED_BY_VLAN)
+            {
+                if(vlan_mbrCfgVid[idx] == vid)
+                {
+                    /* Found! Update */
+                    if(phyMbrPmask == 0x00)
+                    {
+                        /* Member port = 0x00, delete this VLAN from Member Configuration */
+                        memset(&vlanMC, 0x00, sizeof(rtl8367c_vlanconfiguser));
+                        if ((retVal = rtl8367c_setAsicVlanMemberConfig(idx, &vlanMC)) != RT_ERR_OK)
+                            return retVal;
+
+                        /* Clear Database */
+                        vlan_mbrCfgUsage[idx] = MBRCFG_UNUSED;
+                        vlan_mbrCfgVid[idx]   = 0;
+                    }
+                    else
+                    {
+                        /* Normal VLAN config, update to member configuration */
+                        vlanMC.evid = vid;
+                        vlanMC.mbr = vlan4K.mbr;
+                        vlanMC.fid_msti = vlan4K.fid_msti;
+                        vlanMC.meteridx = vlan4K.meteridx;
+                        vlanMC.envlanpol= vlan4K.envlanpol;
+                        vlanMC.vbpen = vlan4K.vbpen;
+                        vlanMC.vbpri = vlan4K.vbpri;
+                        if ((retVal = rtl8367c_setAsicVlanMemberConfig(idx, &vlanMC)) != RT_ERR_OK)
+                            return retVal;
+                    }
+
+                    break;
+                }
+            }
+        }
+    }
+    else
+    {
+        /* vid > 4095 */
+        for (idx = 0; idx <= RTL8367C_CVIDXMAX; idx++)
+        {
+            if(vlan_mbrCfgUsage[idx] == MBRCFG_USED_BY_VLAN)
+            {
+                if(vlan_mbrCfgVid[idx] == vid)
+                {
+                    /* Found! Update */
+                    if(phyMbrPmask == 0x00)
+                    {
+                        /* Member port = 0x00, delete this VLAN from Member Configuration */
+                        memset(&vlanMC, 0x00, sizeof(rtl8367c_vlanconfiguser));
+                        if ((retVal = rtl8367c_setAsicVlanMemberConfig(idx, &vlanMC)) != RT_ERR_OK)
+                            return retVal;
+
+                        /* Clear Database */
+                        vlan_mbrCfgUsage[idx] = MBRCFG_UNUSED;
+                        vlan_mbrCfgVid[idx]   = 0;
+                    }
+                    else
+                    {
+                        /* Normal VLAN config, update to member configuration */
+                        vlanMC.evid = vid;
+                        vlanMC.mbr = phyMbrPmask;
+                        vlanMC.fid_msti = pVlanCfg->fid_msti;
+                        vlanMC.meteridx = pVlanCfg->meteridx;
+                        vlanMC.envlanpol= pVlanCfg->envlanpol;
+                        vlanMC.vbpen = pVlanCfg->vbpen;
+                        vlanMC.vbpri = pVlanCfg->vbpri;
+                        if ((retVal = rtl8367c_setAsicVlanMemberConfig(idx, &vlanMC)) != RT_ERR_OK)
+                            return retVal;
+
+                        break;
+                    }
+
+                    update_evid = 1;
+                }
+            }
+
+            if(vlan_mbrCfgUsage[idx] == MBRCFG_UNUSED)
+            {
+                if(0xffff == empty_index)
+                    empty_index = idx;
+            }
+        }
+
+        /* doesn't find out same EVID entry and there is empty index in member configuration */
+        if( (phyMbrPmask != 0x00) && (update_evid == 0) && (empty_index != 0xFFFF) )
+        {
+            vlanMC.evid = vid;
+            vlanMC.mbr = phyMbrPmask;
+            vlanMC.fid_msti = pVlanCfg->fid_msti;
+            vlanMC.meteridx = pVlanCfg->meteridx;
+            vlanMC.envlanpol= pVlanCfg->envlanpol;
+            vlanMC.vbpen = pVlanCfg->vbpen;
+            vlanMC.vbpri = pVlanCfg->vbpri;
+            if ((retVal = rtl8367c_setAsicVlanMemberConfig(empty_index, &vlanMC)) != RT_ERR_OK)
+                return retVal;
+
+            vlan_mbrCfgUsage[empty_index] = MBRCFG_USED_BY_VLAN;
+            vlan_mbrCfgVid[empty_index] = vid;
+
+        }
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_vlan_get(rtk_vlan_t vid, rtk_vlan_cfg_t *pVlanCfg)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 phyMbrPmask;
+    rtk_uint32 phyUntagPmask;
+    rtl8367c_user_vlan4kentry vlan4K;
+    rtl8367c_vlanconfiguser vlanMC;
+    rtk_uint32 idx;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* vid must be 0~8191 */
+    if (vid > RTL8367C_EVIDMAX)
+        return RT_ERR_VLAN_VID;
+
+    /* Null pointer check */
+    if(NULL == pVlanCfg)
+        return RT_ERR_NULL_POINTER;
+
+    if (vid <= RTL8367C_VIDMAX)
+    {
+        vlan4K.vid = vid;
+
+        if ((retVal = rtl8367c_getAsicVlan4kEntry(&vlan4K)) != RT_ERR_OK)
+            return retVal;
+
+        phyMbrPmask   = vlan4K.mbr;
+        phyUntagPmask = vlan4K.untag;
+        if(rtk_switch_portmask_P2L_get(phyMbrPmask, &(pVlanCfg->mbr)) != RT_ERR_OK)
+            return RT_ERR_FAILED;
+
+        if(rtk_switch_portmask_P2L_get(phyUntagPmask, &(pVlanCfg->untag)) != RT_ERR_OK)
+            return RT_ERR_FAILED;
+
+        pVlanCfg->ivl_en    = vlan4K.ivl_svl;
+        pVlanCfg->fid_msti  = vlan4K.fid_msti;
+        pVlanCfg->envlanpol = vlan4K.envlanpol;
+        pVlanCfg->meteridx  = vlan4K.meteridx;
+        pVlanCfg->vbpen     = vlan4K.vbpen;
+        pVlanCfg->vbpri     = vlan4K.vbpri;
+    }
+    else
+    {
+        for (idx = 0; idx <= RTL8367C_CVIDXMAX; idx++)
+        {
+            if(vlan_mbrCfgUsage[idx] == MBRCFG_USED_BY_VLAN)
+            {
+                if(vlan_mbrCfgVid[idx] == vid)
+                {
+                    if ((retVal = rtl8367c_getAsicVlanMemberConfig(idx, &vlanMC)) != RT_ERR_OK)
+                        return retVal;
+
+                    phyMbrPmask   = vlanMC.mbr;
+                    if(rtk_switch_portmask_P2L_get(phyMbrPmask, &(pVlanCfg->mbr)) != RT_ERR_OK)
+                        return RT_ERR_FAILED;
+
+                    pVlanCfg->untag.bits[0] = 0;
+                    pVlanCfg->ivl_en    = 0;
+                    pVlanCfg->fid_msti  = vlanMC.fid_msti;
+                    pVlanCfg->envlanpol = vlanMC.envlanpol;
+                    pVlanCfg->meteridx  = vlanMC.meteridx;
+                    pVlanCfg->vbpen     = vlanMC.vbpen;
+                    pVlanCfg->vbpri     = vlanMC.vbpri;
+                }
+            }
+        }
+    }
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_vlan_egrFilterEnable_set(rtk_enable_t egrFilter)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(egrFilter >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    /* enable VLAN */
+    if ((retVal = rtl8367c_setAsicVlanFilter((rtk_uint32)egrFilter)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_vlan_egrFilterEnable_get(rtk_enable_t *pEgrFilter)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 state;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pEgrFilter)
+        return RT_ERR_NULL_POINTER;
+
+    /* enable VLAN */
+    if ((retVal = rtl8367c_getAsicVlanFilter(&state)) != RT_ERR_OK)
+        return retVal;
+
+    *pEgrFilter = (rtk_enable_t)state;
+    return RT_ERR_OK;
+}
+
+
+static rtk_api_ret_t _rtk_vlan_mbrCfg_set(rtk_uint32 idx, rtk_vlan_mbrcfg_t *pMbrcfg)
+{
+    rtk_api_ret_t           retVal;
+    rtk_uint32              phyMbrPmask;
+    rtl8367c_vlanconfiguser mbrCfg;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Error check */
+    if(pMbrcfg == NULL)
+        return RT_ERR_NULL_POINTER;
+
+    if(idx > RTL8367C_CVIDXMAX)
+        return RT_ERR_INPUT;
+
+    if(pMbrcfg->evid > RTL8367C_EVIDMAX)
+        return RT_ERR_INPUT;
+
+    if(pMbrcfg->fid_msti > RTL8367C_FIDMAX)
+        return RT_ERR_L2_FID;
+
+    if(pMbrcfg->envlanpol >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    if(pMbrcfg->meteridx > RTK_MAX_METER_ID)
+        return RT_ERR_FILTER_METER_ID;
+
+    if(pMbrcfg->vbpen >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    if(pMbrcfg->vbpri > RTL8367C_PRIMAX)
+        return RT_ERR_QOS_INT_PRIORITY;
+
+    /* Check port mask valid */
+    RTK_CHK_PORTMASK_VALID(&(pMbrcfg->mbr));
+
+    mbrCfg.evid         = pMbrcfg->evid;
+    mbrCfg.fid_msti     = pMbrcfg->fid_msti;
+    mbrCfg.envlanpol    = pMbrcfg->envlanpol;
+    mbrCfg.meteridx     = pMbrcfg->meteridx;
+    mbrCfg.vbpen        = pMbrcfg->vbpen;
+    mbrCfg.vbpri        = pMbrcfg->vbpri;
+
+    if(rtk_switch_portmask_L2P_get(&(pMbrcfg->mbr), &phyMbrPmask) != RT_ERR_OK)
+        return RT_ERR_FAILED;
+
+    mbrCfg.mbr = phyMbrPmask;
+
+    if ((retVal = rtl8367c_setAsicVlanMemberConfig(idx, &mbrCfg)) != RT_ERR_OK)
+        return retVal;
+
+    /* Update Database */
+    if( (mbrCfg.evid == 0) && (mbrCfg.mbr == 0) )
+    {
+        vlan_mbrCfgUsage[idx] = MBRCFG_UNUSED;
+        vlan_mbrCfgVid[idx] = 0;
+    }
+    else
+    {
+        vlan_mbrCfgUsage[idx] = MBRCFG_USED_BY_VLAN;
+        vlan_mbrCfgVid[idx] = mbrCfg.evid;
+    }
+
+    return RT_ERR_OK;
+
+}
+
+static rtk_api_ret_t _rtk_vlan_mbrCfg_get(rtk_uint32 idx, rtk_vlan_mbrcfg_t *pMbrcfg)
+{
+    rtk_api_ret_t           retVal;
+    rtk_uint32              phyMbrPmask;
+    rtl8367c_vlanconfiguser mbrCfg;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Error check */
+    if(pMbrcfg == NULL)
+        return RT_ERR_NULL_POINTER;
+
+    if(idx > RTL8367C_CVIDXMAX)
+        return RT_ERR_INPUT;
+
+    memset(&mbrCfg, 0x00, sizeof(rtl8367c_vlanconfiguser));
+    if ((retVal = rtl8367c_getAsicVlanMemberConfig(idx, &mbrCfg)) != RT_ERR_OK)
+        return retVal;
+
+    pMbrcfg->evid       = mbrCfg.evid;
+    pMbrcfg->fid_msti   = mbrCfg.fid_msti;
+    pMbrcfg->envlanpol  = mbrCfg.envlanpol;
+    pMbrcfg->meteridx   = mbrCfg.meteridx;
+    pMbrcfg->vbpen      = mbrCfg.vbpen;
+    pMbrcfg->vbpri      = mbrCfg.vbpri;
+
+    phyMbrPmask = mbrCfg.mbr;
+    if(rtk_switch_portmask_P2L_get(phyMbrPmask, &(pMbrcfg->mbr)) != RT_ERR_OK)
+        return RT_ERR_FAILED;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_vlan_portPvid_set(rtk_port_t port, rtk_vlan_t pvid, rtk_pri_t priority)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 index;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    /* vid must be 0~8191 */
+    if (pvid > RTL8367C_EVIDMAX)
+        return RT_ERR_VLAN_VID;
+
+    /* priority must be 0~7 */
+    if (priority > RTL8367C_PRIMAX)
+        return RT_ERR_VLAN_PRIORITY;
+
+    if((retVal = rtk_vlan_checkAndCreateMbr(pvid, &index)) != RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicVlanPortBasedVID(rtk_switch_port_L2P_get(port), index, priority)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+
+static rtk_api_ret_t _rtk_vlan_portPvid_get(rtk_port_t port, rtk_vlan_t *pPvid, rtk_pri_t *pPriority)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 index, pri;
+    rtl8367c_vlanconfiguser mbrCfg;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(NULL == pPvid)
+        return RT_ERR_NULL_POINTER;
+
+    if(NULL == pPriority)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicVlanPortBasedVID(rtk_switch_port_L2P_get(port), &index, &pri)) != RT_ERR_OK)
+        return retVal;
+
+    memset(&mbrCfg, 0x00, sizeof(rtl8367c_vlanconfiguser));
+    if ((retVal = rtl8367c_getAsicVlanMemberConfig(index, &mbrCfg)) != RT_ERR_OK)
+        return retVal;
+
+    *pPvid = mbrCfg.evid;
+    *pPriority = pri;
+
+    return RT_ERR_OK;
+}
+
+
+static rtk_api_ret_t _rtk_vlan_portIgrFilterEnable_set(rtk_port_t port, rtk_enable_t igr_filter)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (igr_filter >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    if ((retVal = rtl8367c_setAsicVlanIngressFilter(rtk_switch_port_L2P_get(port), igr_filter)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+
+static rtk_api_ret_t _rtk_vlan_portIgrFilterEnable_get(rtk_port_t port, rtk_enable_t *pIgr_filter)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(NULL == pIgr_filter)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicVlanIngressFilter(rtk_switch_port_L2P_get(port), pIgr_filter)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_vlan_portAcceptFrameType_set(rtk_port_t port, rtk_vlan_acceptFrameType_t accept_frame_type)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (accept_frame_type >= ACCEPT_FRAME_TYPE_END)
+        return RT_ERR_VLAN_ACCEPT_FRAME_TYPE;
+
+    if ((retVal = rtl8367c_setAsicVlanAccpetFrameType(rtk_switch_port_L2P_get(port), (rtl8367c_accframetype)accept_frame_type)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_vlan_portAcceptFrameType_get(rtk_port_t port, rtk_vlan_acceptFrameType_t *pAccept_frame_type)
+{
+    rtk_api_ret_t retVal;
+    rtl8367c_accframetype   acc_frm_type;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(NULL == pAccept_frame_type)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicVlanAccpetFrameType(rtk_switch_port_L2P_get(port), &acc_frm_type)) != RT_ERR_OK)
+        return retVal;
+
+    *pAccept_frame_type = (rtk_vlan_acceptFrameType_t)acc_frm_type;
+
+    return RT_ERR_OK;
+}
+
+
+static rtk_api_ret_t _rtk_vlan_protoAndPortBasedVlan_add(rtk_port_t port, rtk_vlan_protoAndPortInfo_t *pInfo)
+{
+    rtk_api_ret_t retVal, i;
+    rtk_uint32 exist, empty, used, index;
+    rtl8367c_protocolgdatacfg ppb_data_cfg;
+    rtl8367c_protocolvlancfg ppb_vlan_cfg;
+    rtl8367c_provlan_frametype tmp;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(NULL == pInfo)
+        return RT_ERR_NULL_POINTER;
+
+    if (pInfo->proto_type > RTK_MAX_NUM_OF_PROTO_TYPE)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if (pInfo->frame_type >= FRAME_TYPE_END)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if (pInfo->cvid > RTL8367C_VIDMAX)
+        return RT_ERR_VLAN_VID;
+
+    if (pInfo->cpri > RTL8367C_PRIMAX)
+        return RT_ERR_VLAN_PRIORITY;
+
+    exist = 0xFF;
+    empty = 0xFF;
+    for (i = RTL8367C_PROTOVLAN_GIDX_MAX; i >= 0; i--)
+    {
+        if ((retVal = rtl8367c_getAsicVlanProtocolBasedGroupData(i, &ppb_data_cfg)) != RT_ERR_OK)
+            return retVal;
+        tmp = pInfo->frame_type;
+        if (ppb_data_cfg.etherType == pInfo->proto_type && ppb_data_cfg.frameType == tmp)
+        {
+            /*Already exist*/
+            exist = i;
+            break;
+        }
+        else if (ppb_data_cfg.etherType == 0 && ppb_data_cfg.frameType == 0)
+        {
+            /*find empty index*/
+            empty = i;
+        }
+    }
+
+    used = 0xFF;
+    /*No empty and exist index*/
+    if (0xFF == exist && 0xFF == empty)
+        return RT_ERR_TBL_FULL;
+    else if (exist<RTL8367C_PROTOVLAN_GROUPNO)
+    {
+       /*exist index*/
+       used = exist;
+    }
+    else if (empty<RTL8367C_PROTOVLAN_GROUPNO)
+    {
+        /*No exist index, but have empty index*/
+        ppb_data_cfg.frameType = pInfo->frame_type;
+        ppb_data_cfg.etherType = pInfo->proto_type;
+        if ((retVal = rtl8367c_setAsicVlanProtocolBasedGroupData(empty, &ppb_data_cfg)) != RT_ERR_OK)
+            return retVal;
+        used = empty;
+    }
+    else
+        return RT_ERR_FAILED;
+
+    if((retVal = rtk_vlan_checkAndCreateMbr(pInfo->cvid, &index)) != RT_ERR_OK)
+        return retVal;
+
+    ppb_vlan_cfg.vlan_idx = index;
+    ppb_vlan_cfg.valid = TRUE;
+    ppb_vlan_cfg.priority = pInfo->cpri;
+    if ((retVal = rtl8367c_setAsicVlanPortAndProtocolBased(rtk_switch_port_L2P_get(port), used, &ppb_vlan_cfg)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+
+static rtk_api_ret_t _rtk_vlan_protoAndPortBasedVlan_get(rtk_port_t port, rtk_vlan_proto_type_t proto_type, rtk_vlan_protoVlan_frameType_t frame_type, rtk_vlan_protoAndPortInfo_t *pInfo)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 i;
+    rtk_uint32 ppb_idx;
+    rtl8367c_protocolgdatacfg ppb_data_cfg;
+    rtl8367c_protocolvlancfg ppb_vlan_cfg;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (proto_type > RTK_MAX_NUM_OF_PROTO_TYPE)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if (frame_type >= FRAME_TYPE_END)
+        return RT_ERR_OUT_OF_RANGE;
+
+   ppb_idx = 0;
+
+    for (i = 0; i<= RTL8367C_PROTOVLAN_GIDX_MAX; i++)
+    {
+        if ((retVal = rtl8367c_getAsicVlanProtocolBasedGroupData(i, &ppb_data_cfg)) != RT_ERR_OK)
+            return retVal;
+
+        if ( (ppb_data_cfg.frameType == (rtl8367c_provlan_frametype)frame_type) && (ppb_data_cfg.etherType == proto_type) )
+        {
+            ppb_idx = i;
+            break;
+        }
+        else if (RTL8367C_PROTOVLAN_GIDX_MAX == i)
+            return RT_ERR_TBL_FULL;
+    }
+
+    if ((retVal = rtl8367c_getAsicVlanPortAndProtocolBased(rtk_switch_port_L2P_get(port), ppb_idx, &ppb_vlan_cfg)) != RT_ERR_OK)
+        return retVal;
+
+    if (FALSE == ppb_vlan_cfg.valid)
+        return RT_ERR_FAILED;
+
+    pInfo->frame_type = frame_type;
+    pInfo->proto_type = proto_type;
+    pInfo->cvid = vlan_mbrCfgVid[ppb_vlan_cfg.vlan_idx];
+    pInfo->cpri = ppb_vlan_cfg.priority;
+
+    return RT_ERR_OK;
+}
+
+
+static rtk_api_ret_t _rtk_vlan_protoAndPortBasedVlan_del(rtk_port_t port, rtk_vlan_proto_type_t proto_type, rtk_vlan_protoVlan_frameType_t frame_type)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 i, bUsed;
+    rtk_uint32 ppb_idx;
+    rtl8367c_protocolgdatacfg ppb_data_cfg;
+    rtl8367c_protocolvlancfg ppb_vlan_cfg;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (proto_type > RTK_MAX_NUM_OF_PROTO_TYPE)
+        return RT_ERR_OUT_OF_RANGE;
+
+    if (frame_type >= FRAME_TYPE_END)
+        return RT_ERR_OUT_OF_RANGE;
+
+   ppb_idx = 0;
+
+    for (i = 0; i<= RTL8367C_PROTOVLAN_GIDX_MAX; i++)
+    {
+        if ((retVal = rtl8367c_getAsicVlanProtocolBasedGroupData(i, &ppb_data_cfg)) != RT_ERR_OK)
+            return retVal;
+
+        if ( (ppb_data_cfg.frameType == (rtl8367c_provlan_frametype)frame_type) && (ppb_data_cfg.etherType == proto_type) )
+        {
+            ppb_idx = i;
+            ppb_vlan_cfg.valid = FALSE;
+            ppb_vlan_cfg.vlan_idx = 0;
+            ppb_vlan_cfg.priority = 0;
+            if ((retVal = rtl8367c_setAsicVlanPortAndProtocolBased(rtk_switch_port_L2P_get(port), ppb_idx, &ppb_vlan_cfg)) != RT_ERR_OK)
+                return retVal;
+        }
+    }
+
+    bUsed = FALSE;
+    RTK_SCAN_ALL_PHY_PORTMASK(i)
+    {
+        if ((retVal = rtl8367c_getAsicVlanPortAndProtocolBased(i, ppb_idx, &ppb_vlan_cfg)) != RT_ERR_OK)
+            return retVal;
+
+        if (TRUE == ppb_vlan_cfg.valid)
+        {
+            bUsed = TRUE;
+                break;
+        }
+    }
+
+    if (FALSE == bUsed) /*No Port use this PPB Index, Delete it*/
+    {
+        ppb_data_cfg.etherType=0;
+        ppb_data_cfg.frameType=0;
+        if ((retVal = rtl8367c_setAsicVlanProtocolBasedGroupData(ppb_idx, &ppb_data_cfg)) != RT_ERR_OK)
+            return retVal;
+    }
+
+    return RT_ERR_OK;
+}
+
+
+static rtk_api_ret_t _rtk_vlan_protoAndPortBasedVlan_delAll(rtk_port_t port)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32 i, j, bUsed[4];
+    rtl8367c_protocolgdatacfg ppb_data_cfg;
+    rtl8367c_protocolvlancfg ppb_vlan_cfg;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    for (i = 0; i<= RTL8367C_PROTOVLAN_GIDX_MAX; i++)
+    {
+        ppb_vlan_cfg.valid = FALSE;
+        ppb_vlan_cfg.vlan_idx = 0;
+        ppb_vlan_cfg.priority = 0;
+        if ((retVal = rtl8367c_setAsicVlanPortAndProtocolBased(rtk_switch_port_L2P_get(port), i, &ppb_vlan_cfg)) != RT_ERR_OK)
+            return retVal;
+    }
+
+    bUsed[0] = FALSE;
+    bUsed[1] = FALSE;
+    bUsed[2] = FALSE;
+    bUsed[3] = FALSE;
+    RTK_SCAN_ALL_PHY_PORTMASK(i)
+    {
+        for (j = 0; j <= RTL8367C_PROTOVLAN_GIDX_MAX; j++)
+        {
+            if ((retVal = rtl8367c_getAsicVlanPortAndProtocolBased(i,j, &ppb_vlan_cfg)) != RT_ERR_OK)
+                return retVal;
+
+            if (TRUE == ppb_vlan_cfg.valid)
+            {
+                bUsed[j] = TRUE;
+            }
+        }
+    }
+
+    for (i = 0; i<= RTL8367C_PROTOVLAN_GIDX_MAX; i++)
+    {
+        if (FALSE == bUsed[i]) /*No Port use this PPB Index, Delete it*/
+        {
+            ppb_data_cfg.etherType=0;
+            ppb_data_cfg.frameType=0;
+            if ((retVal = rtl8367c_setAsicVlanProtocolBasedGroupData(i, &ppb_data_cfg)) != RT_ERR_OK)
+                return retVal;
+        }
+    }
+
+
+
+    return RT_ERR_OK;
+}
+
+
+static rtk_api_ret_t _rtk_vlan_tagMode_set(rtk_port_t port, rtk_vlan_tagMode_t tag_mode)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (tag_mode >= VLAN_TAG_MODE_END)
+        return RT_ERR_PORT_ID;
+
+    if ((retVal = rtl8367c_setAsicVlanEgressTagMode(rtk_switch_port_L2P_get(port), tag_mode)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+
+static rtk_api_ret_t _rtk_vlan_tagMode_get(rtk_port_t port, rtk_vlan_tagMode_t *pTag_mode)
+{
+    rtk_api_ret_t retVal;
+    rtl8367c_egtagmode  mode;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(NULL == pTag_mode)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicVlanEgressTagMode(rtk_switch_port_L2P_get(port), &mode)) != RT_ERR_OK)
+        return retVal;
+
+    *pTag_mode = (rtk_vlan_tagMode_t)mode;
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_vlan_transparent_set(rtk_port_t egr_port, rtk_portmask_t *pIgr_pmask)
+{
+     rtk_api_ret_t retVal;
+     rtk_uint32    pmask;
+
+     /* Check initialization state */
+     RTK_CHK_INIT_STATE();
+
+     /* Check Port Valid */
+     RTK_CHK_PORT_VALID(egr_port);
+
+     if(NULL == pIgr_pmask)
+        return RT_ERR_NULL_POINTER;
+
+     RTK_CHK_PORTMASK_VALID(pIgr_pmask);
+
+     if(rtk_switch_portmask_L2P_get(pIgr_pmask, &pmask) != RT_ERR_OK)
+        return RT_ERR_FAILED;
+
+     if ((retVal = rtl8367c_setAsicVlanTransparent(rtk_switch_port_L2P_get(egr_port), pmask)) != RT_ERR_OK)
+         return retVal;
+
+     return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_vlan_transparent_get(rtk_port_t egr_port, rtk_portmask_t *pIgr_pmask)
+{
+     rtk_api_ret_t retVal;
+     rtk_uint32    pmask;
+
+     /* Check initialization state */
+     RTK_CHK_INIT_STATE();
+
+     /* Check Port Valid */
+     RTK_CHK_PORT_VALID(egr_port);
+
+     if(NULL == pIgr_pmask)
+        return RT_ERR_NULL_POINTER;
+
+     if ((retVal = rtl8367c_getAsicVlanTransparent(rtk_switch_port_L2P_get(egr_port), &pmask)) != RT_ERR_OK)
+         return retVal;
+
+     if(rtk_switch_portmask_P2L_get(pmask, pIgr_pmask) != RT_ERR_OK)
+        return RT_ERR_FAILED;
+
+     return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_vlan_keep_set(rtk_port_t egr_port, rtk_portmask_t *pIgr_pmask)
+{
+     rtk_api_ret_t retVal;
+     rtk_uint32    pmask;
+
+     /* Check initialization state */
+     RTK_CHK_INIT_STATE();
+
+     /* Check Port Valid */
+     RTK_CHK_PORT_VALID(egr_port);
+
+     if(NULL == pIgr_pmask)
+        return RT_ERR_NULL_POINTER;
+
+     RTK_CHK_PORTMASK_VALID(pIgr_pmask);
+
+     if(rtk_switch_portmask_L2P_get(pIgr_pmask, &pmask) != RT_ERR_OK)
+        return RT_ERR_FAILED;
+
+     if ((retVal = rtl8367c_setAsicVlanEgressKeep(rtk_switch_port_L2P_get(egr_port), pmask)) != RT_ERR_OK)
+         return retVal;
+
+     return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_vlan_keep_get(rtk_port_t egr_port, rtk_portmask_t *pIgr_pmask)
+{
+     rtk_api_ret_t retVal;
+     rtk_uint32    pmask;
+
+     /* Check initialization state */
+     RTK_CHK_INIT_STATE();
+
+     /* Check Port Valid */
+     RTK_CHK_PORT_VALID(egr_port);
+
+     if(NULL == pIgr_pmask)
+        return RT_ERR_NULL_POINTER;
+
+     if ((retVal = rtl8367c_getAsicVlanEgressKeep(rtk_switch_port_L2P_get(egr_port), &pmask)) != RT_ERR_OK)
+         return retVal;
+
+     if(rtk_switch_portmask_P2L_get(pmask, pIgr_pmask) != RT_ERR_OK)
+        return RT_ERR_FAILED;
+
+     return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_vlan_stg_set(rtk_vlan_t vid, rtk_stp_msti_id_t stg)
+{
+    rtk_api_ret_t retVal;
+    rtl8367c_user_vlan4kentry vlan4K;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* vid must be 0~4095 */
+    if (vid > RTL8367C_VIDMAX)
+        return RT_ERR_VLAN_VID;
+
+    /* priority must be 0~15 */
+    if (stg > RTL8367C_MSTIMAX)
+        return RT_ERR_MSTI;
+
+    /* update 4K table */
+    vlan4K.vid = vid;
+    if ((retVal = rtl8367c_getAsicVlan4kEntry(&vlan4K)) != RT_ERR_OK)
+        return retVal;
+
+    vlan4K.fid_msti= stg;
+    if ((retVal = rtl8367c_setAsicVlan4kEntry(&vlan4K)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+
+static rtk_api_ret_t _rtk_vlan_stg_get(rtk_vlan_t vid, rtk_stp_msti_id_t *pStg)
+{
+    rtk_api_ret_t retVal;
+    rtl8367c_user_vlan4kentry vlan4K;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* vid must be 0~4095 */
+    if (vid > RTL8367C_VIDMAX)
+        return RT_ERR_VLAN_VID;
+
+    if(NULL == pStg)
+        return RT_ERR_NULL_POINTER;
+
+    /* update 4K table */
+    vlan4K.vid = vid;
+    if ((retVal = rtl8367c_getAsicVlan4kEntry(&vlan4K)) != RT_ERR_OK)
+        return retVal;
+
+    *pStg = vlan4K.fid_msti;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_vlan_portFid_set(rtk_port_t port, rtk_enable_t enable, rtk_fid_t fid)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (enable>=RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    /* fid must be 0~4095 */
+    if (fid > RTK_FID_MAX)
+        return RT_ERR_L2_FID;
+
+    if ((retVal = rtl8367c_setAsicPortBasedFidEn(rtk_switch_port_L2P_get(port), enable))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_setAsicPortBasedFid(rtk_switch_port_L2P_get(port), fid))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_vlan_portFid_get(rtk_port_t port, rtk_enable_t *pEnable, rtk_fid_t *pFid)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if(NULL == pEnable)
+        return RT_ERR_NULL_POINTER;
+
+    if(NULL == pFid)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicPortBasedFidEn(rtk_switch_port_L2P_get(port), pEnable))!=RT_ERR_OK)
+        return retVal;
+
+    if ((retVal = rtl8367c_getAsicPortBasedFid(rtk_switch_port_L2P_get(port), pFid))!=RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_vlan_UntagDscpPriorityEnable_set(rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(enable >= RTK_ENABLE_END)
+        return RT_ERR_ENABLE;
+
+    if ((retVal = rtl8367c_setAsicVlanUntagDscpPriorityEn((rtk_uint32)enable)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_vlan_UntagDscpPriorityEnable_get(rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+    rtk_uint32  value;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pEnable)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicVlanUntagDscpPriorityEn(&value)) != RT_ERR_OK)
+        return retVal;
+
+    *pEnable = (rtk_enable_t)value;
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_stp_mstpState_set(rtk_stp_msti_id_t msti, rtk_port_t port, rtk_stp_state_t stp_state)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (msti > RTK_MAX_NUM_OF_MSTI)
+        return RT_ERR_MSTI;
+
+    if (stp_state >= STP_STATE_END)
+        return RT_ERR_MSTP_STATE;
+
+    if ((retVal = rtl8367c_setAsicSpanningTreeStatus(rtk_switch_port_L2P_get(port), msti, stp_state)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_stp_mstpState_get(rtk_stp_msti_id_t msti, rtk_port_t port, rtk_stp_state_t *pStp_state)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* Check Port Valid */
+    RTK_CHK_PORT_VALID(port);
+
+    if (msti > RTK_MAX_NUM_OF_MSTI)
+        return RT_ERR_MSTI;
+
+    if(NULL == pStp_state)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getAsicSpanningTreeStatus(rtk_switch_port_L2P_get(port), msti, pStp_state)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_vlan_reservedVidAction_set(rtk_vlan_resVidAction_t action_vid0, rtk_vlan_resVidAction_t action_vid4095)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(action_vid0 >= RESVID_ACTION_END)
+        return RT_ERR_INPUT;
+
+    if(action_vid4095 >= RESVID_ACTION_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setReservedVidAction((rtk_uint32)action_vid0, (rtk_uint32)action_vid4095)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_vlan_reservedVidAction_get(rtk_vlan_resVidAction_t *pAction_vid0, rtk_vlan_resVidAction_t *pAction_vid4095)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(pAction_vid0 == NULL)
+        return RT_ERR_NULL_POINTER;
+
+    if(pAction_vid4095 == NULL)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getReservedVidAction((rtk_uint32 *)pAction_vid0, (rtk_uint32 *)pAction_vid4095)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_vlan_realKeepRemarkEnable_set(rtk_enable_t enabled)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(enabled >= RTK_ENABLE_END)
+        return RT_ERR_INPUT;
+
+    if ((retVal = rtl8367c_setRealKeepRemarkEn((rtk_uint32)enabled)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_vlan_realKeepRemarkEnable_get(rtk_enable_t *pEnabled)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if(NULL == pEnabled)
+        return RT_ERR_NULL_POINTER;
+
+    if ((retVal = rtl8367c_getRealKeepRemarkEn((rtk_uint32 *)pEnabled)) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+static rtk_api_ret_t _rtk_vlan_reset(void)
+{
+    rtk_api_ret_t retVal;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    if ((retVal = rtl8367c_resetVlan()) != RT_ERR_OK)
+        return retVal;
+
+    return RT_ERR_OK;
+}
+
+
+
+/* Function Name:
+ *      rtk_vlan_init
+ * Description:
+ *      Initialize VLAN.
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      VLAN is disabled by default. User has to call this API to enable VLAN before
+ *      using it. And It will set a default VLAN(vid 1) including all ports and set
+ *      all ports PVID to the default VLAN.
+ */
+rtk_api_ret_t rtk_vlan_init(void)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_init();
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_set
+ * Description:
+ *      Set a VLAN entry.
+ * Input:
+ *      vid - VLAN ID to configure.
+ *      pVlanCfg - VLAN Configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_INPUT                - Invalid input parameters.
+ *      RT_ERR_L2_FID               - Invalid FID.
+ *      RT_ERR_VLAN_PORT_MBR_EXIST  - Invalid member port mask.
+ *      RT_ERR_VLAN_VID             - Invalid VID parameter.
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_vlan_set(rtk_vlan_t vid, rtk_vlan_cfg_t *pVlanCfg)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_set(vid, pVlanCfg);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_get
+ * Description:
+ *      Get a VLAN entry.
+ * Input:
+ *      vid - VLAN ID to configure.
+ * Output:
+ *      pVlanCfg - VLAN Configuration
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_VLAN_VID     - Invalid VID parameter.
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_vlan_get(rtk_vlan_t vid, rtk_vlan_cfg_t *pVlanCfg)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_get(vid, pVlanCfg);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_egrFilterEnable_set
+ * Description:
+ *      Set VLAN egress filter.
+ * Input:
+ *      egrFilter - Egress filtering
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_ENABLE       - Invalid input parameters.
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_vlan_egrFilterEnable_set(rtk_enable_t egrFilter)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_egrFilterEnable_set(egrFilter);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_egrFilterEnable_get
+ * Description:
+ *      Get VLAN egress filter.
+ * Input:
+ *      pEgrFilter - Egress filtering
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_NULL_POINTER - NULL Pointer.
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_vlan_egrFilterEnable_get(rtk_enable_t *pEgrFilter)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_egrFilterEnable_get(pEgrFilter);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_mbrCfg_set
+ * Description:
+ *      Set a VLAN Member Configuration entry by index.
+ * Input:
+ *      idx     - Index of VLAN Member Configuration.
+ *      pMbrcfg - VLAN member Configuration.
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_VLAN_VID     - Invalid VID parameter.
+ * Note:
+ *     Set a VLAN Member Configuration entry by index.
+ */
+rtk_api_ret_t rtk_vlan_mbrCfg_set(rtk_uint32 idx, rtk_vlan_mbrcfg_t *pMbrcfg)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_mbrCfg_set(idx, pMbrcfg);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_mbrCfg_get
+ * Description:
+ *      Get a VLAN Member Configuration entry by index.
+ * Input:
+ *      idx - Index of VLAN Member Configuration.
+ * Output:
+ *      pMbrcfg - VLAN member Configuration.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_VLAN_VID     - Invalid VID parameter.
+ * Note:
+ *     Get a VLAN Member Configuration entry by index.
+ */
+rtk_api_ret_t rtk_vlan_mbrCfg_get(rtk_uint32 idx, rtk_vlan_mbrcfg_t *pMbrcfg)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_mbrCfg_get(idx, pMbrcfg);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *     rtk_vlan_portPvid_set
+ * Description:
+ *      Set port to specified VLAN ID(PVID).
+ * Input:
+ *      port - Port id.
+ *      pvid - Specified VLAN ID.
+ *      priority - 802.1p priority for the PVID.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port number.
+ *      RT_ERR_VLAN_PRIORITY        - Invalid priority.
+ *      RT_ERR_VLAN_ENTRY_NOT_FOUND - VLAN entry not found.
+ *      RT_ERR_VLAN_VID             - Invalid VID parameter.
+ * Note:
+ *       The API is used for Port-based VLAN. The untagged frame received from the
+ *       port will be classified to the specified VLAN and assigned to the specified priority.
+ */
+rtk_api_ret_t rtk_vlan_portPvid_set(rtk_port_t port, rtk_vlan_t pvid, rtk_pri_t priority)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_portPvid_set(port, pvid, priority);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_portPvid_get
+ * Description:
+ *      Get VLAN ID(PVID) on specified port.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pPvid - Specified VLAN ID.
+ *      pPriority - 802.1p priority for the PVID.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *     The API can get the PVID and 802.1p priority for the PVID of Port-based VLAN.
+ */
+rtk_api_ret_t rtk_vlan_portPvid_get(rtk_port_t port, rtk_vlan_t *pPvid, rtk_pri_t *pPriority)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_portPvid_get(port, pPvid, pPriority);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_portIgrFilterEnable_set
+ * Description:
+ *      Set VLAN ingress for each port.
+ * Input:
+ *      port - Port id.
+ *      igr_filter - VLAN ingress function enable status.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ *      RT_ERR_ENABLE       - Invalid enable input
+ * Note:
+ *      The status of vlan ingress filter is as following:
+ *      - DISABLED
+ *      - ENABLED
+ *      While VLAN function is enabled, ASIC will decide VLAN ID for each received frame and get belonged member
+ *      ports from VLAN table. If received port is not belonged to VLAN member ports, ASIC will drop received frame if VLAN ingress function is enabled.
+ */
+rtk_api_ret_t rtk_vlan_portIgrFilterEnable_set(rtk_port_t port, rtk_enable_t igr_filter)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_portIgrFilterEnable_set(port, igr_filter);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_portIgrFilterEnable_get
+ * Description:
+ *      Get VLAN Ingress Filter
+ * Input:
+ *      port        - Port id.
+ * Output:
+ *      pIgr_filter - VLAN ingress function enable status.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *     The API can Get the VLAN ingress filter status.
+ *     The status of vlan ingress filter is as following:
+ *     - DISABLED
+ *     - ENABLED
+ */
+rtk_api_ret_t rtk_vlan_portIgrFilterEnable_get(rtk_port_t port, rtk_enable_t *pIgr_filter)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_portIgrFilterEnable_get(port, pIgr_filter);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_portAcceptFrameType_set
+ * Description:
+ *      Set VLAN accept_frame_type
+ * Input:
+ *      port                - Port id.
+ *      accept_frame_type   - accept frame type
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                       - OK
+ *      RT_ERR_FAILED                   - Failed
+ *      RT_ERR_SMI                      - SMI access error
+ *      RT_ERR_PORT_ID                  - Invalid port number.
+ *      RT_ERR_VLAN_ACCEPT_FRAME_TYPE   - Invalid frame type.
+ * Note:
+ *      The API is used for checking 802.1Q tagged frames.
+ *      The accept frame type as following:
+ *      - ACCEPT_FRAME_TYPE_ALL
+ *      - ACCEPT_FRAME_TYPE_TAG_ONLY
+ *      - ACCEPT_FRAME_TYPE_UNTAG_ONLY
+ */
+rtk_api_ret_t rtk_vlan_portAcceptFrameType_set(rtk_port_t port, rtk_vlan_acceptFrameType_t accept_frame_type)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_portAcceptFrameType_set(port, accept_frame_type);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_portAcceptFrameType_get
+ * Description:
+ *      Get VLAN accept_frame_type
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pAccept_frame_type - accept frame type
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *     The API can Get the VLAN ingress filter.
+ *     The accept frame type as following:
+ *     - ACCEPT_FRAME_TYPE_ALL
+ *     - ACCEPT_FRAME_TYPE_TAG_ONLY
+ *     - ACCEPT_FRAME_TYPE_UNTAG_ONLY
+ */
+rtk_api_ret_t rtk_vlan_portAcceptFrameType_get(rtk_port_t port, rtk_vlan_acceptFrameType_t *pAccept_frame_type)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_portAcceptFrameType_get(port, pAccept_frame_type);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_protoAndPortBasedVlan_add
+ * Description:
+ *      Add the protocol-and-port-based vlan to the specified port of device.
+ * Input:
+ *      port  - Port id.
+ *      pInfo - Protocol and port based VLAN configuration information.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_VLAN_VID         - Invalid VID parameter.
+ *      RT_ERR_VLAN_PRIORITY    - Invalid priority.
+ *      RT_ERR_TBL_FULL         - Table is full.
+ *      RT_ERR_OUT_OF_RANGE     - input out of range.
+ * Note:
+ *      The incoming packet which match the protocol-and-port-based vlan will use the configure vid for ingress pipeline
+ *      The frame type is shown in the following:
+ *      - FRAME_TYPE_ETHERNET
+ *      - FRAME_TYPE_RFC1042
+ *      - FRAME_TYPE_LLCOTHER
+ */
+rtk_api_ret_t rtk_vlan_protoAndPortBasedVlan_add(rtk_port_t port, rtk_vlan_protoAndPortInfo_t *pInfo)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_protoAndPortBasedVlan_add(port, pInfo);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_protoAndPortBasedVlan_get
+ * Description:
+ *      Get the protocol-and-port-based vlan to the specified port of device.
+ * Input:
+ *      port - Port id.
+ *      proto_type - protocol-and-port-based vlan protocol type.
+ *      frame_type - protocol-and-port-based vlan frame type.
+ * Output:
+ *      pInfo - Protocol and port based VLAN configuration information.
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_OUT_OF_RANGE     - input out of range.
+ *      RT_ERR_TBL_FULL         - Table is full.
+ * Note:
+ *     The incoming packet which match the protocol-and-port-based vlan will use the configure vid for ingress pipeline
+ *     The frame type is shown in the following:
+ *      - FRAME_TYPE_ETHERNET
+ *      - FRAME_TYPE_RFC1042
+ *      - FRAME_TYPE_LLCOTHER
+ */
+rtk_api_ret_t rtk_vlan_protoAndPortBasedVlan_get(rtk_port_t port, rtk_vlan_proto_type_t proto_type, rtk_vlan_protoVlan_frameType_t frame_type, rtk_vlan_protoAndPortInfo_t *pInfo)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_protoAndPortBasedVlan_get(port, proto_type, frame_type, pInfo);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_protoAndPortBasedVlan_del
+ * Description:
+ *      Delete the protocol-and-port-based vlan from the specified port of device.
+ * Input:
+ *      port        - Port id.
+ *      proto_type  - protocol-and-port-based vlan protocol type.
+ *      frame_type  - protocol-and-port-based vlan frame type.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_OUT_OF_RANGE     - input out of range.
+ *      RT_ERR_TBL_FULL         - Table is full.
+ * Note:
+ *     The incoming packet which match the protocol-and-port-based vlan will use the configure vid for ingress pipeline
+ *     The frame type is shown in the following:
+ *      - FRAME_TYPE_ETHERNET
+ *      - FRAME_TYPE_RFC1042
+ *      - FRAME_TYPE_LLCOTHER
+ */
+rtk_api_ret_t rtk_vlan_protoAndPortBasedVlan_del(rtk_port_t port, rtk_vlan_proto_type_t proto_type, rtk_vlan_protoVlan_frameType_t frame_type)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_protoAndPortBasedVlan_del(port, proto_type, frame_type);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_protoAndPortBasedVlan_delAll
+ * Description:
+ *     Delete all protocol-and-port-based vlans from the specified port of device.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_OUT_OF_RANGE     - input out of range.
+ * Note:
+ *     The incoming packet which match the protocol-and-port-based vlan will use the configure vid for ingress pipeline
+ *     Delete all flow table protocol-and-port-based vlan entries.
+ */
+rtk_api_ret_t rtk_vlan_protoAndPortBasedVlan_delAll(rtk_port_t port)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_protoAndPortBasedVlan_delAll(port);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_tagMode_set
+ * Description:
+ *      Set CVLAN egress tag mode
+ * Input:
+ *      port        - Port id.
+ *      tag_mode    - The egress tag mode.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_INPUT        - Invalid input parameter.
+ *      RT_ERR_ENABLE       - Invalid enable input.
+ * Note:
+ *      The API can set Egress tag mode. There are 4 mode for egress tag:
+ *      - VLAN_TAG_MODE_ORIGINAL,
+ *      - VLAN_TAG_MODE_KEEP_FORMAT,
+ *      - VLAN_TAG_MODE_PRI.
+ *      - VLAN_TAG_MODE_REAL_KEEP_FORMAT,
+ */
+rtk_api_ret_t rtk_vlan_tagMode_set(rtk_port_t port, rtk_vlan_tagMode_t tag_mode)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_tagMode_set(port, tag_mode);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_tagMode_get
+ * Description:
+ *      Get CVLAN egress tag mode
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pTag_mode - The egress tag mode.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can get Egress tag mode. There are 4 mode for egress tag:
+ *      - VLAN_TAG_MODE_ORIGINAL,
+ *      - VLAN_TAG_MODE_KEEP_FORMAT,
+ *      - VLAN_TAG_MODE_PRI.
+ *      - VLAN_TAG_MODE_REAL_KEEP_FORMAT,
+ */
+rtk_api_ret_t rtk_vlan_tagMode_get(rtk_port_t port, rtk_vlan_tagMode_t *pTag_mode)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_tagMode_get(port, pTag_mode);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_transparent_set
+ * Description:
+ *      Set VLAN transparent mode
+ * Input:
+ *      egr_port        - Egress Port id.
+ *      pIgr_pmask      - Ingress Port Mask.
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      None.
+ */
+rtk_api_ret_t rtk_vlan_transparent_set(rtk_port_t egr_port, rtk_portmask_t *pIgr_pmask)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_transparent_set(egr_port, pIgr_pmask);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_transparent_get
+ * Description:
+ *      Get VLAN transparent mode
+ * Input:
+ *      egr_port        - Egress Port id.
+ * Output:
+ *      pIgr_pmask      - Ingress Port Mask
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      None.
+ */
+rtk_api_ret_t rtk_vlan_transparent_get(rtk_port_t egr_port, rtk_portmask_t *pIgr_pmask)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_transparent_get(egr_port, pIgr_pmask);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_keep_set
+ * Description:
+ *      Set VLAN egress keep mode
+ * Input:
+ *      egr_port        - Egress Port id.
+ *      pIgr_pmask      - Ingress Port Mask.
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      None.
+ */
+rtk_api_ret_t rtk_vlan_keep_set(rtk_port_t egr_port, rtk_portmask_t *pIgr_pmask)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_keep_set(egr_port, pIgr_pmask);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_keep_get
+ * Description:
+ *      Get VLAN egress keep mode
+ * Input:
+ *      egr_port        - Egress Port id.
+ * Output:
+ *      pIgr_pmask      - Ingress Port Mask
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      None.
+ */
+rtk_api_ret_t rtk_vlan_keep_get(rtk_port_t egr_port, rtk_portmask_t *pIgr_pmask)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_keep_get(egr_port, pIgr_pmask);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_stg_set
+ * Description:
+ *      Set spanning tree group instance of the vlan to the specified device
+ * Input:
+ *      vid - Specified VLAN ID.
+ *      stg - spanning tree group instance.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_MSTI         - Invalid msti parameter
+ *      RT_ERR_INPUT        - Invalid input parameter.
+ *      RT_ERR_VLAN_VID     - Invalid VID parameter.
+ * Note:
+ *      The API can set spanning tree group instance of the vlan to the specified device.
+ */
+rtk_api_ret_t rtk_vlan_stg_set(rtk_vlan_t vid, rtk_stp_msti_id_t stg)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_stg_set(vid, stg);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_stg_get
+ * Description:
+ *      Get spanning tree group instance of the vlan to the specified device
+ * Input:
+ *      vid - Specified VLAN ID.
+ * Output:
+ *      pStg - spanning tree group instance.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_VLAN_VID     - Invalid VID parameter.
+ * Note:
+ *      The API can get spanning tree group instance of the vlan to the specified device.
+ */
+rtk_api_ret_t rtk_vlan_stg_get(rtk_vlan_t vid, rtk_stp_msti_id_t *pStg)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_stg_get(vid, pStg);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_portFid_set
+ * Description:
+ *      Set port-based filtering database
+ * Input:
+ *      port - Port id.
+ *      enable - ebable port-based FID
+ *      fid - Specified filtering database.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_L2_FID - Invalid fid.
+ *      RT_ERR_INPUT - Invalid input parameter.
+ *      RT_ERR_PORT_ID - Invalid port ID.
+ * Note:
+ *      The API can set port-based filtering database. If the function is enabled, all input
+ *      packets will be assigned to the port-based fid regardless vlan tag.
+ */
+rtk_api_ret_t rtk_vlan_portFid_set(rtk_port_t port, rtk_enable_t enable, rtk_fid_t fid)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_portFid_set(port, enable, fid);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_portFid_get
+ * Description:
+ *      Get port-based filtering database
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pEnable - ebable port-based FID
+ *      pFid - Specified filtering database.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT - Invalid input parameters.
+ *      RT_ERR_PORT_ID - Invalid port ID.
+ * Note:
+ *      The API can get port-based filtering database status. If the function is enabled, all input
+ *      packets will be assigned to the port-based fid regardless vlan tag.
+ */
+rtk_api_ret_t rtk_vlan_portFid_get(rtk_port_t port, rtk_enable_t *pEnable, rtk_fid_t *pFid)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_portFid_get(port, pEnable, pFid);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_UntagDscpPriorityEnable_set
+ * Description:
+ *      Set Untag DSCP priority assign
+ * Input:
+ *      enable - state of Untag DSCP priority assign
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_ENABLE          - Invalid input parameters.
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_vlan_UntagDscpPriorityEnable_set(rtk_enable_t enable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_UntagDscpPriorityEnable_set(enable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_UntagDscpPriorityEnable_get
+ * Description:
+ *      Get Untag DSCP priority assign
+ * Input:
+ *      None
+ * Output:
+ *      pEnable - state of Untag DSCP priority assign
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_NULL_POINTER    - Null pointer
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_vlan_UntagDscpPriorityEnable_get(rtk_enable_t *pEnable)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_UntagDscpPriorityEnable_get(pEnable);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_stp_mstpState_set
+ * Description:
+ *      Configure spanning tree state per each port.
+ * Input:
+ *      port - Port id
+ *      msti - Multiple spanning tree instance.
+ *      stp_state - Spanning tree state for msti
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_MSTI         - Invalid msti parameter.
+ *      RT_ERR_MSTP_STATE   - Invalid STP state.
+ * Note:
+ *      System supports per-port multiple spanning tree state for each msti.
+ *      There are four states supported by ASIC.
+ *      - STP_STATE_DISABLED
+ *      - STP_STATE_BLOCKING
+ *      - STP_STATE_LEARNING
+ *      - STP_STATE_FORWARDING
+ */
+rtk_api_ret_t rtk_stp_mstpState_set(rtk_stp_msti_id_t msti, rtk_port_t port, rtk_stp_state_t stp_state)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_stp_mstpState_set(msti, port, stp_state);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_stp_mstpState_get
+ * Description:
+ *      Get spanning tree state per each port.
+ * Input:
+ *      port - Port id.
+ *      msti - Multiple spanning tree instance.
+ * Output:
+ *      pStp_state - Spanning tree state for msti
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_MSTI         - Invalid msti parameter.
+ * Note:
+ *      System supports per-port multiple spanning tree state for each msti.
+ *      There are four states supported by ASIC.
+ *      - STP_STATE_DISABLED
+ *      - STP_STATE_BLOCKING
+ *      - STP_STATE_LEARNING
+ *      - STP_STATE_FORWARDING
+ */
+rtk_api_ret_t rtk_stp_mstpState_get(rtk_stp_msti_id_t msti, rtk_port_t port, rtk_stp_state_t *pStp_state)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_stp_mstpState_get(msti, port, pStp_state);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_reservedVidAction_set
+ * Description:
+ *      Set Action of VLAN ID = 0 & 4095 tagged packet
+ * Input:
+ *      action_vid0     - Action for VID 0.
+ *      action_vid4095  - Action for VID 4095.
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Error Input
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_vlan_reservedVidAction_set(rtk_vlan_resVidAction_t action_vid0, rtk_vlan_resVidAction_t action_vid4095)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_reservedVidAction_set(action_vid0, action_vid4095);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_reservedVidAction_get
+ * Description:
+ *      Get Action of VLAN ID = 0 & 4095 tagged packet
+ * Input:
+ *      pAction_vid0     - Action for VID 0.
+ *      pAction_vid4095  - Action for VID 4095.
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_NULL_POINTER - NULL Pointer
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_vlan_reservedVidAction_get(rtk_vlan_resVidAction_t *pAction_vid0, rtk_vlan_resVidAction_t *pAction_vid4095)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_reservedVidAction_get(pAction_vid0, pAction_vid4095);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_realKeepRemarkEnable_set
+ * Description:
+ *      Set Real keep 1p remarking feature
+ * Input:
+ *      enabled     - State of 1p remarking at real keep packet
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Error Input
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_vlan_realKeepRemarkEnable_set(rtk_enable_t enabled)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_realKeepRemarkEnable_set(enabled);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_realKeepRemarkEnable_get
+ * Description:
+ *      Get Real keep 1p remarking feature
+ * Input:
+ *      None.
+ * Output:
+ *      pEnabled     - State of 1p remarking at real keep packet
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Error Input
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_vlan_realKeepRemarkEnable_get(rtk_enable_t *pEnabled)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_realKeepRemarkEnable_get(pEnabled);
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/* Function Name:
+ *      rtk_vlan_reset
+ * Description:
+ *      Reset VLAN
+ * Input:
+ *      None.
+ * Output:
+ *      pEnabled     - State of 1p remarking at real keep packet
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Error Input
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_vlan_reset(void)
+{
+    rtk_api_ret_t retVal;
+
+    RTK_API_LOCK();
+    retVal = _rtk_vlan_reset();
+    RTK_API_UNLOCK();
+
+    return retVal;
+}
+
+/*Don't lock mutex in following API*/
+
+/* Function Name:
+ *      rtk_vlan_checkAndCreateMbr
+ * Description:
+ *      Check and create Member configuration and return index
+ * Input:
+ *      vid  - VLAN id.
+ * Output:
+ *      pIndex  - Member configuration index
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_VLAN_VID     - Invalid VLAN ID.
+ *      RT_ERR_VLAN_ENTRY_NOT_FOUND - VLAN not found
+ *      RT_ERR_TBL_FULL     - Member Configuration table full
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_vlan_checkAndCreateMbr(rtk_vlan_t vid, rtk_uint32 *pIndex)
+{
+    rtk_api_ret_t retVal;
+    rtl8367c_user_vlan4kentry vlan4K;
+    rtl8367c_vlanconfiguser vlanMC;
+    rtk_uint32 idx;
+    rtk_uint32 empty_idx = 0xFFFF;
+
+    /* Check initialization state */
+    RTK_CHK_INIT_STATE();
+
+    /* vid must be 0~8191 */
+    if (vid > RTL8367C_EVIDMAX)
+        return RT_ERR_VLAN_VID;
+
+    /* Null pointer check */
+    if(NULL == pIndex)
+        return RT_ERR_NULL_POINTER;
+
+    /* Get 4K VLAN */
+    if (vid <= RTL8367C_VIDMAX)
+    {
+        memset(&vlan4K, 0x00, sizeof(rtl8367c_user_vlan4kentry));
+        vlan4K.vid = vid;
+        if ((retVal = rtl8367c_getAsicVlan4kEntry(&vlan4K)) != RT_ERR_OK)
+            return retVal;
+    }
+
+    /* Search exist entry */
+    for (idx = 0; idx <= RTL8367C_CVIDXMAX; idx++)
+    {
+        if(vlan_mbrCfgUsage[idx] == MBRCFG_USED_BY_VLAN)
+        {
+            if(vlan_mbrCfgVid[idx] == vid)
+            {
+                /* Found! return index */
+                *pIndex = idx;
+                return RT_ERR_OK;
+            }
+        }
+    }
+
+    /* Not found, Read H/W Member Configuration table to update database */
+    for (idx = 0; idx <= RTL8367C_CVIDXMAX; idx++)
+    {
+        if ((retVal = rtl8367c_getAsicVlanMemberConfig(idx, &vlanMC)) != RT_ERR_OK)
+            return retVal;
+
+        if( (vlanMC.evid == 0) && (vlanMC.mbr == 0x00))
+        {
+            vlan_mbrCfgUsage[idx]   = MBRCFG_UNUSED;
+            vlan_mbrCfgVid[idx]     = 0;
+        }
+        else
+        {
+            vlan_mbrCfgUsage[idx]   = MBRCFG_USED_BY_VLAN;
+            vlan_mbrCfgVid[idx]     = vlanMC.evid;
+        }
+    }
+
+    /* Search exist entry again */
+    for (idx = 0; idx <= RTL8367C_CVIDXMAX; idx++)
+    {
+        if(vlan_mbrCfgUsage[idx] == MBRCFG_USED_BY_VLAN)
+        {
+            if(vlan_mbrCfgVid[idx] == vid)
+            {
+                /* Found! return index */
+                *pIndex = idx;
+                return RT_ERR_OK;
+            }
+        }
+    }
+
+    /* try to look up an empty index */
+    for (idx = 0; idx <= RTL8367C_CVIDXMAX; idx++)
+    {
+        if(vlan_mbrCfgUsage[idx] == MBRCFG_UNUSED)
+        {
+            empty_idx = idx;
+            break;
+        }
+    }
+
+    if(empty_idx == 0xFFFF)
+    {
+        /* No empty index */
+        return RT_ERR_TBL_FULL;
+    }
+
+    if (vid > RTL8367C_VIDMAX)
+    {
+        /* > 4K, there is no 4K entry, create on member configuration directly */
+        memset(&vlanMC, 0x00, sizeof(rtl8367c_vlanconfiguser));
+        vlanMC.evid = vid;
+        if ((retVal = rtl8367c_setAsicVlanMemberConfig(empty_idx, &vlanMC)) != RT_ERR_OK)
+            return retVal;
+    }
+    else
+    {
+        /* Copy from 4K table */
+        vlanMC.evid = vid;
+        vlanMC.mbr = vlan4K.mbr;
+        vlanMC.fid_msti = vlan4K.fid_msti;
+        vlanMC.meteridx= vlan4K.meteridx;
+        vlanMC.envlanpol= vlan4K.envlanpol;
+        vlanMC.vbpen = vlan4K.vbpen;
+        vlanMC.vbpri = vlan4K.vbpri;
+        if ((retVal = rtl8367c_setAsicVlanMemberConfig(empty_idx, &vlanMC)) != RT_ERR_OK)
+            return retVal;
+    }
+
+    /* Update Database */
+    vlan_mbrCfgUsage[empty_idx] = MBRCFG_USED_BY_VLAN;
+    vlan_mbrCfgVid[empty_idx] = vid;
+
+    *pIndex = empty_idx;
+    return RT_ERR_OK;
+}
+
+
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/vlan.h linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/vlan.h
--- linux-4.4.115-fbx/drivers/misc/freebox./rtlapi/vlan.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/freebox/rtlapi/vlan.h	2019-01-22 16:16:24.723257454 +0100
@@ -0,0 +1,894 @@
+/*
+ * Copyright (C) 2013 Realtek Semiconductor Corp.
+ * All Rights Reserved.
+ *
+ * This program is the proprietary software of Realtek Semiconductor
+ * Corporation and/or its licensors, and only be used, duplicated,
+ * modified or distributed under the authorized license from Realtek.
+ *
+ * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER
+ * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+ *
+ * Purpose : RTL8367/RTL8367C switch high-level API
+ *
+ * Feature : The file includes Trap module high-layer VLAN defination
+ *
+ */
+
+#ifndef __RTK_API_VLAN_H__
+#define __RTK_API_VLAN_H__
+
+
+/*
+ * Data Type Declaration
+ */
+#define RTK_MAX_NUM_OF_PROTO_TYPE                   0xFFFF
+#define RTK_MAX_NUM_OF_MSTI                         0xF
+#define RTK_FID_MAX                                 0xF
+
+typedef struct  rtk_vlan_cfg_s
+{
+    rtk_portmask_t  mbr;
+    rtk_portmask_t  untag;
+    rtk_uint16      ivl_en;
+    rtk_uint16      fid_msti;
+    rtk_uint16      envlanpol;
+    rtk_uint16      meteridx;
+    rtk_uint16      vbpen;
+    rtk_uint16      vbpri;
+}rtk_vlan_cfg_t;
+
+typedef struct  rtk_vlan_mbrcfg_s
+{
+    rtk_uint16      evid;
+    rtk_portmask_t  mbr;
+    rtk_uint16      fid_msti;
+    rtk_uint16      envlanpol;
+    rtk_uint16      meteridx;
+    rtk_uint16      vbpen;
+    rtk_uint16      vbpri;
+}rtk_vlan_mbrcfg_t;
+
+typedef rtk_uint32  rtk_stp_msti_id_t;     /* MSTI ID  */
+
+typedef enum rtk_stp_state_e
+{
+    STP_STATE_DISABLED = 0,
+    STP_STATE_BLOCKING,
+    STP_STATE_LEARNING,
+    STP_STATE_FORWARDING,
+    STP_STATE_END
+} rtk_stp_state_t;
+
+typedef rtk_uint32  rtk_vlan_proto_type_t;     /* protocol and port based VLAN protocol type  */
+
+
+typedef enum rtk_vlan_acceptFrameType_e
+{
+    ACCEPT_FRAME_TYPE_ALL = 0,             /* untagged, priority-tagged and tagged */
+    ACCEPT_FRAME_TYPE_TAG_ONLY,         /* tagged */
+    ACCEPT_FRAME_TYPE_UNTAG_ONLY,     /* untagged and priority-tagged */
+    ACCEPT_FRAME_TYPE_END
+} rtk_vlan_acceptFrameType_t;
+
+
+/* frame type of protocol vlan - reference 802.1v standard */
+typedef enum rtk_vlan_protoVlan_frameType_e
+{
+    FRAME_TYPE_ETHERNET = 0,
+    FRAME_TYPE_LLCOTHER,
+    FRAME_TYPE_RFC1042,
+    FRAME_TYPE_END
+} rtk_vlan_protoVlan_frameType_t;
+
+/* Protocol-and-port-based Vlan structure */
+typedef struct rtk_vlan_protoAndPortInfo_s
+{
+    rtk_uint32                         proto_type;
+    rtk_vlan_protoVlan_frameType_t frame_type;
+    rtk_vlan_t                     cvid;
+    rtk_pri_t                     cpri;
+}rtk_vlan_protoAndPortInfo_t;
+
+/* tagged mode of VLAN - reference realtek private specification */
+typedef enum rtk_vlan_tagMode_e
+{
+    VLAN_TAG_MODE_ORIGINAL = 0,
+    VLAN_TAG_MODE_KEEP_FORMAT,
+    VLAN_TAG_MODE_PRI,
+    VLAN_TAG_MODE_REAL_KEEP_FORMAT,
+    VLAN_TAG_MODE_END
+} rtk_vlan_tagMode_t;
+
+typedef enum rtk_vlan_resVidAction_e
+{
+    RESVID_ACTION_UNTAG = 0,
+    RESVID_ACTION_TAG,
+    RESVID_ACTION_END
+}
+rtk_vlan_resVidAction_t;
+
+/* Function Name:
+ *      rtk_vlan_init
+ * Description:
+ *      Initialize VLAN.
+ * Input:
+ *      None
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ * Note:
+ *      VLAN is disabled by default. User has to call this API to enable VLAN before
+ *      using it. And It will set a default VLAN(vid 1) including all ports and set
+ *      all ports PVID to the default VLAN.
+ */
+extern rtk_api_ret_t rtk_vlan_init(void);
+
+/* Function Name:
+ *      rtk_vlan_set
+ * Description:
+ *      Set a VLAN entry.
+ * Input:
+ *      vid - VLAN ID to configure.
+ *      pVlanCfg - VLAN Configuration
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_INPUT                - Invalid input parameters.
+ *      RT_ERR_L2_FID               - Invalid FID.
+ *      RT_ERR_VLAN_PORT_MBR_EXIST  - Invalid member port mask.
+ *      RT_ERR_VLAN_VID             - Invalid VID parameter.
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_vlan_set(rtk_vlan_t vid, rtk_vlan_cfg_t *pVlanCfg);
+
+/* Function Name:
+ *      rtk_vlan_get
+ * Description:
+ *      Get a VLAN entry.
+ * Input:
+ *      vid - VLAN ID to configure.
+ * Output:
+ *      pVlanCfg - VLAN Configuration
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_VLAN_VID     - Invalid VID parameter.
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_vlan_get(rtk_vlan_t vid, rtk_vlan_cfg_t *pVlanCfg);
+
+/* Function Name:
+ *      rtk_vlan_egrFilterEnable_set
+ * Description:
+ *      Set VLAN egress filter.
+ * Input:
+ *      egrFilter - Egress filtering
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_ENABLE       - Invalid input parameters.
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_vlan_egrFilterEnable_set(rtk_enable_t egrFilter);
+
+/* Function Name:
+ *      rtk_vlan_egrFilterEnable_get
+ * Description:
+ *      Get VLAN egress filter.
+ * Input:
+ *      pEgrFilter - Egress filtering
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_NULL_POINTER - NULL Pointer.
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_vlan_egrFilterEnable_get(rtk_enable_t *pEgrFilter);
+
+/* Function Name:
+ *      rtk_vlan_mbrCfg_set
+ * Description:
+ *      Set a VLAN Member Configuration entry by index.
+ * Input:
+ *      idx     - Index of VLAN Member Configuration.
+ *      pMbrcfg - VLAN member Configuration.
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_VLAN_VID     - Invalid VID parameter.
+ * Note:
+ *     Set a VLAN Member Configuration entry by index.
+ */
+extern rtk_api_ret_t rtk_vlan_mbrCfg_set(rtk_uint32 idx, rtk_vlan_mbrcfg_t *pMbrcfg);
+
+/* Function Name:
+ *      rtk_vlan_mbrCfg_get
+ * Description:
+ *      Get a VLAN Member Configuration entry by index.
+ * Input:
+ *      idx - Index of VLAN Member Configuration.
+ * Output:
+ *      pMbrcfg - VLAN member Configuration.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_VLAN_VID     - Invalid VID parameter.
+ * Note:
+ *     Get a VLAN Member Configuration entry by index.
+ */
+extern rtk_api_ret_t rtk_vlan_mbrCfg_get(rtk_uint32 idx, rtk_vlan_mbrcfg_t *pMbrcfg);
+
+/* Function Name:
+ *     rtk_vlan_portPvid_set
+ * Description:
+ *      Set port to specified VLAN ID(PVID).
+ * Input:
+ *      port - Port id.
+ *      pvid - Specified VLAN ID.
+ *      priority - 802.1p priority for the PVID.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                   - OK
+ *      RT_ERR_FAILED               - Failed
+ *      RT_ERR_SMI                  - SMI access error
+ *      RT_ERR_PORT_ID              - Invalid port number.
+ *      RT_ERR_VLAN_PRIORITY        - Invalid priority.
+ *      RT_ERR_VLAN_ENTRY_NOT_FOUND - VLAN entry not found.
+ *      RT_ERR_VLAN_VID             - Invalid VID parameter.
+ * Note:
+ *       The API is used for Port-based VLAN. The untagged frame received from the
+ *       port will be classified to the specified VLAN and assigned to the specified priority.
+ */
+extern rtk_api_ret_t rtk_vlan_portPvid_set(rtk_port_t port, rtk_vlan_t pvid, rtk_pri_t priority);
+
+/* Function Name:
+ *      rtk_vlan_portPvid_get
+ * Description:
+ *      Get VLAN ID(PVID) on specified port.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pPvid - Specified VLAN ID.
+ *      pPriority - 802.1p priority for the PVID.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *     The API can get the PVID and 802.1p priority for the PVID of Port-based VLAN.
+ */
+extern rtk_api_ret_t rtk_vlan_portPvid_get(rtk_port_t port, rtk_vlan_t *pPvid, rtk_pri_t *pPriority);
+
+/* Function Name:
+ *      rtk_vlan_portIgrFilterEnable_set
+ * Description:
+ *      Set VLAN ingress for each port.
+ * Input:
+ *      port - Port id.
+ *      igr_filter - VLAN ingress function enable status.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number
+ *      RT_ERR_ENABLE       - Invalid enable input
+ * Note:
+ *      The status of vlan ingress filter is as following:
+ *      - DISABLED
+ *      - ENABLED
+ *      While VLAN function is enabled, ASIC will decide VLAN ID for each received frame and get belonged member
+ *      ports from VLAN table. If received port is not belonged to VLAN member ports, ASIC will drop received frame if VLAN ingress function is enabled.
+ */
+extern rtk_api_ret_t rtk_vlan_portIgrFilterEnable_set(rtk_port_t port, rtk_enable_t igr_filter);
+
+/* Function Name:
+ *      rtk_vlan_portIgrFilterEnable_get
+ * Description:
+ *      Get VLAN Ingress Filter
+ * Input:
+ *      port        - Port id.
+ * Output:
+ *      pIgr_filter - VLAN ingress function enable status.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *     The API can Get the VLAN ingress filter status.
+ *     The status of vlan ingress filter is as following:
+ *     - DISABLED
+ *     - ENABLED
+ */
+extern rtk_api_ret_t rtk_vlan_portIgrFilterEnable_get(rtk_port_t port, rtk_enable_t *pIgr_filter);
+
+/* Function Name:
+ *      rtk_vlan_portAcceptFrameType_set
+ * Description:
+ *      Set VLAN accept_frame_type
+ * Input:
+ *      port                - Port id.
+ *      accept_frame_type   - accept frame type
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK                       - OK
+ *      RT_ERR_FAILED                   - Failed
+ *      RT_ERR_SMI                      - SMI access error
+ *      RT_ERR_PORT_ID                  - Invalid port number.
+ *      RT_ERR_VLAN_ACCEPT_FRAME_TYPE   - Invalid frame type.
+ * Note:
+ *      The API is used for checking 802.1Q tagged frames.
+ *      The accept frame type as following:
+ *      - ACCEPT_FRAME_TYPE_ALL
+ *      - ACCEPT_FRAME_TYPE_TAG_ONLY
+ *      - ACCEPT_FRAME_TYPE_UNTAG_ONLY
+ */
+extern rtk_api_ret_t rtk_vlan_portAcceptFrameType_set(rtk_port_t port, rtk_vlan_acceptFrameType_t accept_frame_type);
+
+/* Function Name:
+ *      rtk_vlan_portAcceptFrameType_get
+ * Description:
+ *      Get VLAN accept_frame_type
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pAccept_frame_type - accept frame type
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *     The API can Get the VLAN ingress filter.
+ *     The accept frame type as following:
+ *     - ACCEPT_FRAME_TYPE_ALL
+ *     - ACCEPT_FRAME_TYPE_TAG_ONLY
+ *     - ACCEPT_FRAME_TYPE_UNTAG_ONLY
+ */
+extern rtk_api_ret_t rtk_vlan_portAcceptFrameType_get(rtk_port_t port, rtk_vlan_acceptFrameType_t *pAccept_frame_type);
+
+/* Function Name:
+ *      rtk_vlan_tagMode_set
+ * Description:
+ *      Set CVLAN egress tag mode
+ * Input:
+ *      port        - Port id.
+ *      tag_mode    - The egress tag mode.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_INPUT        - Invalid input parameter.
+ *      RT_ERR_ENABLE       - Invalid enable input.
+ * Note:
+ *      The API can set Egress tag mode. There are 4 mode for egress tag:
+ *      - VLAN_TAG_MODE_ORIGINAL,
+ *      - VLAN_TAG_MODE_KEEP_FORMAT,
+ *      - VLAN_TAG_MODE_PRI.
+ *      - VLAN_TAG_MODE_REAL_KEEP_FORMAT,
+ */
+extern rtk_api_ret_t rtk_vlan_tagMode_set(rtk_port_t port, rtk_vlan_tagMode_t tag_mode);
+
+/* Function Name:
+ *      rtk_vlan_tagMode_get
+ * Description:
+ *      Get CVLAN egress tag mode
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pTag_mode - The egress tag mode.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      The API can get Egress tag mode. There are 4 mode for egress tag:
+ *      - VLAN_TAG_MODE_ORIGINAL,
+ *      - VLAN_TAG_MODE_KEEP_FORMAT,
+ *      - VLAN_TAG_MODE_PRI.
+ *      - VLAN_TAG_MODE_REAL_KEEP_FORMAT,
+ */
+extern rtk_api_ret_t rtk_vlan_tagMode_get(rtk_port_t port, rtk_vlan_tagMode_t *pTag_mode);
+
+/* Function Name:
+ *      rtk_vlan_transparent_set
+ * Description:
+ *      Set VLAN transparent mode
+ * Input:
+ *      egr_port        - Egress Port id.
+ *      pIgr_pmask      - Ingress Port Mask.
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      None.
+ */
+extern rtk_api_ret_t rtk_vlan_transparent_set(rtk_port_t egr_port, rtk_portmask_t *pIgr_pmask);
+
+/* Function Name:
+ *      rtk_vlan_transparent_get
+ * Description:
+ *      Get VLAN transparent mode
+ * Input:
+ *      egr_port        - Egress Port id.
+ * Output:
+ *      pIgr_pmask      - Ingress Port Mask
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      None.
+ */
+extern rtk_api_ret_t rtk_vlan_transparent_get(rtk_port_t egr_port, rtk_portmask_t *pIgr_pmask);
+
+/* Function Name:
+ *      rtk_vlan_keep_set
+ * Description:
+ *      Set VLAN egress keep mode
+ * Input:
+ *      egr_port        - Egress Port id.
+ *      pIgr_pmask      - Ingress Port Mask.
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      None.
+ */
+extern rtk_api_ret_t rtk_vlan_keep_set(rtk_port_t egr_port, rtk_portmask_t *pIgr_pmask);
+
+/* Function Name:
+ *      rtk_vlan_keep_get
+ * Description:
+ *      Get VLAN egress keep mode
+ * Input:
+ *      egr_port        - Egress Port id.
+ * Output:
+ *      pIgr_pmask      - Ingress Port Mask
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ * Note:
+ *      None.
+ */
+extern rtk_api_ret_t rtk_vlan_keep_get(rtk_port_t egr_port, rtk_portmask_t *pIgr_pmask);
+
+/* Function Name:
+ *      rtk_vlan_stg_set
+ * Description:
+ *      Set spanning tree group instance of the vlan to the specified device
+ * Input:
+ *      vid - Specified VLAN ID.
+ *      stg - spanning tree group instance.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_MSTI         - Invalid msti parameter
+ *      RT_ERR_INPUT        - Invalid input parameter.
+ *      RT_ERR_VLAN_VID     - Invalid VID parameter.
+ * Note:
+ *      The API can set spanning tree group instance of the vlan to the specified device.
+ */
+extern rtk_api_ret_t rtk_vlan_stg_set(rtk_vlan_t vid, rtk_stp_msti_id_t stg);
+
+/* Function Name:
+ *      rtk_vlan_stg_get
+ * Description:
+ *      Get spanning tree group instance of the vlan to the specified device
+ * Input:
+ *      vid - Specified VLAN ID.
+ * Output:
+ *      pStg - spanning tree group instance.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Invalid input parameters.
+ *      RT_ERR_VLAN_VID     - Invalid VID parameter.
+ * Note:
+ *      The API can get spanning tree group instance of the vlan to the specified device.
+ */
+extern rtk_api_ret_t rtk_vlan_stg_get(rtk_vlan_t vid, rtk_stp_msti_id_t *pStg);
+
+/* Function Name:
+ *      rtk_vlan_protoAndPortBasedVlan_add
+ * Description:
+ *      Add the protocol-and-port-based vlan to the specified port of device.
+ * Input:
+ *      port  - Port id.
+ *      pInfo - Protocol and port based VLAN configuration information.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_VLAN_VID         - Invalid VID parameter.
+ *      RT_ERR_VLAN_PRIORITY    - Invalid priority.
+ *      RT_ERR_TBL_FULL         - Table is full.
+ *      RT_ERR_OUT_OF_RANGE     - input out of range.
+ * Note:
+ *      The incoming packet which match the protocol-and-port-based vlan will use the configure vid for ingress pipeline
+ *      The frame type is shown in the following:
+ *      - FRAME_TYPE_ETHERNET
+ *      - FRAME_TYPE_RFC1042
+ *      - FRAME_TYPE_LLCOTHER
+ */
+extern rtk_api_ret_t rtk_vlan_protoAndPortBasedVlan_add(rtk_port_t port, rtk_vlan_protoAndPortInfo_t *pInfo);
+
+/* Function Name:
+ *      rtk_vlan_protoAndPortBasedVlan_get
+ * Description:
+ *      Get the protocol-and-port-based vlan to the specified port of device.
+ * Input:
+ *      port - Port id.
+ *      proto_type - protocol-and-port-based vlan protocol type.
+ *      frame_type - protocol-and-port-based vlan frame type.
+ * Output:
+ *      pInfo - Protocol and port based VLAN configuration information.
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_OUT_OF_RANGE     - input out of range.
+ *      RT_ERR_TBL_FULL         - Table is full.
+ * Note:
+ *     The incoming packet which match the protocol-and-port-based vlan will use the configure vid for ingress pipeline
+ *     The frame type is shown in the following:
+ *      - FRAME_TYPE_ETHERNET
+ *      - FRAME_TYPE_RFC1042
+ *      - FRAME_TYPE_LLCOTHER
+ */
+extern rtk_api_ret_t rtk_vlan_protoAndPortBasedVlan_get(rtk_port_t port, rtk_vlan_proto_type_t proto_type, rtk_vlan_protoVlan_frameType_t frame_type, rtk_vlan_protoAndPortInfo_t *pInfo);
+
+/* Function Name:
+ *      rtk_vlan_protoAndPortBasedVlan_del
+ * Description:
+ *      Delete the protocol-and-port-based vlan from the specified port of device.
+ * Input:
+ *      port        - Port id.
+ *      proto_type  - protocol-and-port-based vlan protocol type.
+ *      frame_type  - protocol-and-port-based vlan frame type.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_OUT_OF_RANGE     - input out of range.
+ *      RT_ERR_TBL_FULL         - Table is full.
+ * Note:
+ *     The incoming packet which match the protocol-and-port-based vlan will use the configure vid for ingress pipeline
+ *     The frame type is shown in the following:
+ *      - FRAME_TYPE_ETHERNET
+ *      - FRAME_TYPE_RFC1042
+ *      - FRAME_TYPE_LLCOTHER
+ */
+extern rtk_api_ret_t rtk_vlan_protoAndPortBasedVlan_del(rtk_port_t port, rtk_vlan_proto_type_t proto_type, rtk_vlan_protoVlan_frameType_t frame_type);
+
+/* Function Name:
+ *      rtk_vlan_protoAndPortBasedVlan_delAll
+ * Description:
+ *     Delete all protocol-and-port-based vlans from the specified port of device.
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK               - OK
+ *      RT_ERR_FAILED           - Failed
+ *      RT_ERR_SMI              - SMI access error
+ *      RT_ERR_PORT_ID          - Invalid port number.
+ *      RT_ERR_OUT_OF_RANGE     - input out of range.
+ * Note:
+ *     The incoming packet which match the protocol-and-port-based vlan will use the configure vid for ingress pipeline
+ *     Delete all flow table protocol-and-port-based vlan entries.
+ */
+extern rtk_api_ret_t rtk_vlan_protoAndPortBasedVlan_delAll(rtk_port_t port);
+
+/* Function Name:
+ *      rtk_vlan_portFid_set
+ * Description:
+ *      Set port-based filtering database
+ * Input:
+ *      port - Port id.
+ *      enable - ebable port-based FID
+ *      fid - Specified filtering database.
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_L2_FID - Invalid fid.
+ *      RT_ERR_INPUT - Invalid input parameter.
+ *      RT_ERR_PORT_ID - Invalid port ID.
+ * Note:
+ *      The API can set port-based filtering database. If the function is enabled, all input
+ *      packets will be assigned to the port-based fid regardless vlan tag.
+ */
+extern rtk_api_ret_t rtk_vlan_portFid_set(rtk_port_t port, rtk_enable_t enable, rtk_fid_t fid);
+
+/* Function Name:
+ *      rtk_vlan_portFid_get
+ * Description:
+ *      Get port-based filtering database
+ * Input:
+ *      port - Port id.
+ * Output:
+ *      pEnable - ebable port-based FID
+ *      pFid - Specified filtering database.
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_INPUT - Invalid input parameters.
+ *      RT_ERR_PORT_ID - Invalid port ID.
+ * Note:
+ *      The API can get port-based filtering database status. If the function is enabled, all input
+ *      packets will be assigned to the port-based fid regardless vlan tag.
+ */
+extern rtk_api_ret_t rtk_vlan_portFid_get(rtk_port_t port, rtk_enable_t *pEnable, rtk_fid_t *pFid);
+
+/* Function Name:
+ *      rtk_vlan_UntagDscpPriorityEnable_set
+ * Description:
+ *      Set Untag DSCP priority assign
+ * Input:
+ *      enable - state of Untag DSCP priority assign
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_ENABLE          - Invalid input parameters.
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_vlan_UntagDscpPriorityEnable_set(rtk_enable_t enable);
+
+/* Function Name:
+ *      rtk_vlan_UntagDscpPriorityEnable_get
+ * Description:
+ *      Get Untag DSCP priority assign
+ * Input:
+ *      None
+ * Output:
+ *      pEnable - state of Untag DSCP priority assign
+ * Return:
+ *      RT_ERR_OK              - OK
+ *      RT_ERR_FAILED          - Failed
+ *      RT_ERR_SMI             - SMI access error
+ *      RT_ERR_NULL_POINTER    - Null pointer
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_vlan_UntagDscpPriorityEnable_get(rtk_enable_t *pEnable);
+
+
+/*Spanning Tree*/
+/* Function Name:
+ *      rtk_stp_mstpState_set
+ * Description:
+ *      Configure spanning tree state per each port.
+ * Input:
+ *      port - Port id
+ *      msti - Multiple spanning tree instance.
+ *      stp_state - Spanning tree state for msti
+ * Output:
+ *      None
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_MSTI         - Invalid msti parameter.
+ *      RT_ERR_MSTP_STATE   - Invalid STP state.
+ * Note:
+ *      System supports per-port multiple spanning tree state for each msti.
+ *      There are four states supported by ASIC.
+ *      - STP_STATE_DISABLED
+ *      - STP_STATE_BLOCKING
+ *      - STP_STATE_LEARNING
+ *      - STP_STATE_FORWARDING
+ */
+extern rtk_api_ret_t rtk_stp_mstpState_set(rtk_stp_msti_id_t msti, rtk_port_t port, rtk_stp_state_t stp_state);
+
+/* Function Name:
+ *      rtk_stp_mstpState_get
+ * Description:
+ *      Get spanning tree state per each port.
+ * Input:
+ *      port - Port id.
+ *      msti - Multiple spanning tree instance.
+ * Output:
+ *      pStp_state - Spanning tree state for msti
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_PORT_ID      - Invalid port number.
+ *      RT_ERR_MSTI         - Invalid msti parameter.
+ * Note:
+ *      System supports per-port multiple spanning tree state for each msti.
+ *      There are four states supported by ASIC.
+ *      - STP_STATE_DISABLED
+ *      - STP_STATE_BLOCKING
+ *      - STP_STATE_LEARNING
+ *      - STP_STATE_FORWARDING
+ */
+extern rtk_api_ret_t rtk_stp_mstpState_get(rtk_stp_msti_id_t msti, rtk_port_t port, rtk_stp_state_t *pStp_state);
+
+/* Function Name:
+ *      rtk_vlan_checkAndCreateMbr
+ * Description:
+ *      Check and create Member configuration and return index
+ * Input:
+ *      vid  - VLAN id.
+ * Output:
+ *      pIndex  - Member configuration index
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_VLAN_VID     - Invalid VLAN ID.
+ *      RT_ERR_VLAN_ENTRY_NOT_FOUND - VLAN not found
+ *      RT_ERR_TBL_FULL     - Member Configuration table full
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_vlan_checkAndCreateMbr(rtk_vlan_t vid, rtk_uint32 *pIndex);
+
+/* Function Name:
+ *      rtk_vlan_reservedVidAction_set
+ * Description:
+ *      Set Action of VLAN ID = 0 & 4095 tagged packet
+ * Input:
+ *      action_vid0     - Action for VID 0.
+ *      action_vid4095  - Action for VID 4095.
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Error Input
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_vlan_reservedVidAction_set(rtk_vlan_resVidAction_t action_vid0, rtk_vlan_resVidAction_t action_vid4095);
+
+/* Function Name:
+ *      rtk_vlan_reservedVidAction_get
+ * Description:
+ *      Get Action of VLAN ID = 0 & 4095 tagged packet
+ * Input:
+ *      pAction_vid0     - Action for VID 0.
+ *      pAction_vid4095  - Action for VID 4095.
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_NULL_POINTER - NULL Pointer
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_vlan_reservedVidAction_get(rtk_vlan_resVidAction_t *pAction_vid0, rtk_vlan_resVidAction_t *pAction_vid4095);
+
+/* Function Name:
+ *      rtk_vlan_realKeepRemarkEnable_set
+ * Description:
+ *      Set Real keep 1p remarking feature
+ * Input:
+ *      enabled     - State of 1p remarking at real keep packet
+ * Output:
+ *      None.
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Error Input
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_vlan_realKeepRemarkEnable_set(rtk_enable_t enabled);
+
+/* Function Name:
+ *      rtk_vlan_realKeepRemarkEnable_get
+ * Description:
+ *      Get Real keep 1p remarking feature
+ * Input:
+ *      None.
+ * Output:
+ *      pEnabled     - State of 1p remarking at real keep packet
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Error Input
+ * Note:
+ *
+ */
+extern rtk_api_ret_t rtk_vlan_realKeepRemarkEnable_get(rtk_enable_t *pEnabled);
+
+/* Function Name:
+ *      rtk_vlan_reset
+ * Description:
+ *      Reset VLAN
+ * Input:
+ *      None.
+ * Output:
+ *      pEnabled     - State of 1p remarking at real keep packet
+ * Return:
+ *      RT_ERR_OK           - OK
+ *      RT_ERR_FAILED       - Failed
+ *      RT_ERR_SMI          - SMI access error
+ *      RT_ERR_INPUT        - Error Input
+ * Note:
+ *
+ */
+rtk_api_ret_t rtk_vlan_reset(void);
+
+#endif /* __RTK_API_VLAN_H__ */
Binary file (standard input) matches
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/misc/hdcp.c	2019-10-29 09:26:24.041207073 +0100
@@ -0,0 +1,3063 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/uaccess.h>
+#include <linux/cdev.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <linux/ion.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/errno.h>
+#include <linux/hdcp_qseecom.h>
+#include <linux/kthread.h>
+#include <linux/of.h>
+#include <video/msm_hdmi_hdcp_mgr.h>
+
+#include "qseecom_kernel.h"
+
+#define SRMAPP_NAME            "hdcpsrm"
+#define TZAPP_NAME            "hdcp2p2"
+#define HDCP1_APP_NAME        "hdcp1"
+#define QSEECOM_SBUFF_SIZE    0x1000
+
+#define MAX_TX_MESSAGE_SIZE	129
+#define MAX_RX_MESSAGE_SIZE	534
+#define MAX_TOPOLOGY_ELEMS	32
+#define HDCP1_AKSV_SIZE         8
+
+/* parameters related to LC_Init message */
+#define MESSAGE_ID_SIZE            1
+#define LC_INIT_MESSAGE_SIZE       (MESSAGE_ID_SIZE+BITS_64_IN_BYTES)
+
+/* parameters related to SKE_Send_EKS message */
+#define SKE_SEND_EKS_MESSAGE_SIZE \
+	(MESSAGE_ID_SIZE+BITS_128_IN_BYTES+BITS_64_IN_BYTES)
+
+#define HDCP2_0_REPEATER_DOWNSTREAM BIT(1)
+#define HDCP1_DEVICE_DOWNSTREAM BIT(0)
+
+/* all message IDs */
+#define INVALID_MESSAGE_ID               0
+#define AKE_INIT_MESSAGE_ID              2
+#define AKE_SEND_CERT_MESSAGE_ID         3
+#define AKE_NO_STORED_KM_MESSAGE_ID      4
+#define AKE_STORED_KM_MESSAGE_ID         5
+#define AKE_SEND_H_PRIME_MESSAGE_ID      7
+#define AKE_SEND_PAIRING_INFO_MESSAGE_ID 8
+#define LC_INIT_MESSAGE_ID               9
+#define LC_SEND_L_PRIME_MESSAGE_ID      10
+#define SKE_SEND_EKS_MESSAGE_ID         11
+#define REP_SEND_RECV_ID_LIST_ID 12
+#define REP_SEND_ACK_ID      15
+#define REP_STREAM_MANAGE_ID     16
+#define REP_STREAM_READY_ID  17
+#define SKE_SEND_TYPE_ID                       18
+#define HDCP2P2_MAX_MESSAGES                   19
+
+#define HDCP1_SET_KEY_MESSAGE_ID       202
+#define HDCP1_SET_ENC_MESSAGE_ID       205
+
+#define BITS_8_IN_BYTES       1
+#define BITS_16_IN_BYTES      2
+#define BITS_24_IN_BYTES      3
+#define BITS_32_IN_BYTES      4
+#define BITS_40_IN_BYTES      5
+#define BITS_64_IN_BYTES      8
+#define BITS_128_IN_BYTES    16
+#define BITS_160_IN_BYTES    20
+#define BITS_256_IN_BYTES    32
+#define BITS_1024_IN_BYTES  128
+#define BITS_3072_IN_BYTES  384
+#define TXCAPS_SIZE           3
+#define RXCAPS_SIZE           3
+#define RXINFO_SIZE           2
+#define SEQ_NUM_V_SIZE        3
+
+#define HDCP_SRM_CMD_CHECK_DEVICE_ID 2
+
+#define RCVR_ID_SIZE BITS_40_IN_BYTES
+#define MAX_RCVR_IDS_ALLOWED_IN_LIST 31
+#define MAX_RCVR_ID_LIST_SIZE \
+		(RCVR_ID_SIZE*MAX_RCVR_IDS_ALLOWED_IN_LIST)
+/*
+ * minimum wait as per standard is 200 ms. keep it 300 ms
+ * to be on safe side.
+ */
+#define SLEEP_SET_HW_KEY_MS 220
+
+/* hdcp command status */
+#define HDCP_SUCCESS      0
+
+/* flags set by tz in response message */
+#define HDCP_TXMTR_SUBSTATE_INIT                              0
+#define HDCP_TXMTR_SUBSTATE_WAITING_FOR_RECIEVERID_LIST       1
+#define HDCP_TXMTR_SUBSTATE_PROCESSED_RECIEVERID_LIST         2
+#define HDCP_TXMTR_SUBSTATE_WAITING_FOR_STREAM_READY_MESSAGE  3
+#define HDCP_TXMTR_SUBSTATE_REPEATER_AUTH_COMPLETE            4
+
+#define HDCP_DEVICE_ID                         0x0008000
+#define HDCP_CREATE_DEVICE_ID(x)               (HDCP_DEVICE_ID | (x))
+
+#define HDCP_TXMTR_HDMI                        HDCP_CREATE_DEVICE_ID(1)
+#define HDCP_TXMTR_DP                          HDCP_CREATE_DEVICE_ID(2)
+#define HDCP_TXMTR_SERVICE_ID                 0x0001000
+#define SERVICE_CREATE_CMD(x)                 (HDCP_TXMTR_SERVICE_ID | x)
+
+#define HDCP_TXMTR_INIT                       SERVICE_CREATE_CMD(1)
+#define HDCP_TXMTR_DEINIT                     SERVICE_CREATE_CMD(2)
+#define HDCP_TXMTR_PROCESS_RECEIVED_MESSAGE   SERVICE_CREATE_CMD(3)
+#define HDCP_TXMTR_SEND_MESSAGE_TIMEOUT       SERVICE_CREATE_CMD(4)
+#define HDCP_TXMTR_SET_HW_KEY                 SERVICE_CREATE_CMD(5)
+#define HDCP_TXMTR_QUERY_STREAM_TYPE          SERVICE_CREATE_CMD(6)
+#define HDCP_TXMTR_GET_KSXORLC128_AND_RIV     SERVICE_CREATE_CMD(7)
+#define HDCP_TXMTR_PROVISION_KEY              SERVICE_CREATE_CMD(8)
+#define HDCP_TXMTR_GET_TOPOLOGY_INFO          SERVICE_CREATE_CMD(9)
+#define HDCP_TXMTR_UPDATE_SRM                 SERVICE_CREATE_CMD(10)
+#define HDCP_LIB_INIT                         SERVICE_CREATE_CMD(11)
+#define HDCP_LIB_DEINIT                       SERVICE_CREATE_CMD(12)
+#define HDCP_TXMTR_DELETE_PAIRING_INFO        SERVICE_CREATE_CMD(13)
+#define HDCP_TXMTR_GET_VERSION                SERVICE_CREATE_CMD(14)
+#define HDCP_TXMTR_VERIFY_KEY                 SERVICE_CREATE_CMD(15)
+#define HDCP_SESSION_INIT                     SERVICE_CREATE_CMD(16)
+#define HDCP_SESSION_DEINIT                   SERVICE_CREATE_CMD(17)
+#define HDCP_TXMTR_START_AUTHENTICATE         SERVICE_CREATE_CMD(18)
+#define HDCP_TXMTR_VALIDATE_RECEIVER_ID_LIST  SERVICE_CREATE_CMD(19)
+
+#define HCDP_TXMTR_GET_MAJOR_VERSION(v) (((v) >> 16) & 0xFF)
+#define HCDP_TXMTR_GET_MINOR_VERSION(v) (((v) >> 8) & 0xFF)
+#define HCDP_TXMTR_GET_PATCH_VERSION(v) ((v) & 0xFF)
+
+#define HDCP_CLIENT_MAJOR_VERSION 2
+#define HDCP_CLIENT_MINOR_VERSION 1
+#define HDCP_CLIENT_PATCH_VERSION 0
+#define HDCP_CLIENT_MAKE_VERSION(maj, min, patch) \
+	((((maj) & 0xFF) << 16) | (((min) & 0xFF) << 8) | ((patch) & 0xFF))
+
+#define REAUTH_REQ BIT(3)
+#define LINK_INTEGRITY_FAILURE BIT(4)
+
+#define HDCP_LIB_EXECUTE(x) {\
+	if (handle->tethered)\
+		hdcp_lib_##x(handle);\
+	else\
+		queue_kthread_work(&handle->worker, &handle->wk_##x);\
+}
+
+static const struct hdcp_msg_data hdcp_msg_lookup[HDCP2P2_MAX_MESSAGES] = {
+	[AKE_INIT_MESSAGE_ID] = { 2,
+		{ {"rtx", 0x69000, 8}, {"TxCaps", 0x69008, 3} },
+		0 },
+	[AKE_SEND_CERT_MESSAGE_ID] = { 3,
+		{ {"cert-rx", 0x6900B, 522}, {"rrx", 0x69215, 8},
+			{"RxCaps", 0x6921D, 3} },
+		0 },
+	[AKE_NO_STORED_KM_MESSAGE_ID] = { 1,
+		{ {"Ekpub_km", 0x69220, 128} },
+		0 },
+	[AKE_STORED_KM_MESSAGE_ID] = { 2,
+		{ {"Ekh_km", 0x692A0, 16}, {"m", 0x692B0, 16} },
+		0 },
+	[AKE_SEND_H_PRIME_MESSAGE_ID] = { 1,
+		{ {"H'", 0x692C0, 32} },
+		(1 << 1) },
+	[AKE_SEND_PAIRING_INFO_MESSAGE_ID] =  { 1,
+		{ {"Ekh_km", 0x692E0, 16} },
+		(1 << 2) },
+	[LC_INIT_MESSAGE_ID] = { 1,
+		{ {"rn", 0x692F0, 8} },
+		0 },
+	[LC_SEND_L_PRIME_MESSAGE_ID] = { 1,
+		{ {"L'", 0x692F8, 32} },
+		0 },
+	[SKE_SEND_EKS_MESSAGE_ID] = { 2,
+		{ {"Edkey_ks", 0x69318, 16}, {"riv", 0x69328, 8} },
+		0 },
+	[SKE_SEND_TYPE_ID] = { 1,
+		{ {"type", 0x69494, 1} },
+		0 },
+	[REP_SEND_RECV_ID_LIST_ID] = { 4,
+		{ {"RxInfo", 0x69330, 2}, {"seq_num_V", 0x69332, 3},
+			{"V'", 0x69335, 16}, {"ridlist", 0x69345, 155} },
+		(1 << 0) },
+	[REP_SEND_ACK_ID] = { 1,
+		{ {"V", 0x693E0, 16} },
+		0 },
+	[REP_STREAM_MANAGE_ID] = { 3,
+		{ {"seq_num_M", 0x693F0, 3}, {"k", 0x693F3, 2},
+			{"streamID_Type", 0x693F5, 126} },
+		0 },
+	[REP_STREAM_READY_ID] = { 1,
+		{ {"M'", 0x69473, 32} },
+		0 }
+};
+
+enum hdcp_state {
+	HDCP_STATE_INIT = 0x00,
+	HDCP_STATE_APP_LOADED = 0x01,
+	HDCP_STATE_SESSION_INIT = 0x02,
+	HDCP_STATE_TXMTR_INIT = 0x04,
+	HDCP_STATE_AUTHENTICATED = 0x08,
+	HDCP_STATE_ERROR = 0x10
+};
+
+enum hdcp_element {
+	HDCP_TYPE_UNKNOWN,
+	HDCP_TYPE_RECEIVER,
+	HDCP_TYPE_REPEATER,
+};
+
+enum hdcp_version {
+	HDCP_VERSION_UNKNOWN,
+	HDCP_VERSION_2_2,
+	HDCP_VERSION_1_4
+};
+
+struct receiver_info {
+	unsigned char rcvrInfo[RCVR_ID_SIZE];
+	enum hdcp_element elem_type;
+	enum hdcp_version hdcp_version;
+};
+
+struct topology_info {
+	unsigned int nNumRcvrs;
+	struct receiver_info rcvinfo[MAX_TOPOLOGY_ELEMS];
+};
+
+struct __attribute__ ((__packed__)) hdcp1_key_set_req {
+	uint32_t commandid;
+};
+
+struct __attribute__ ((__packed__)) hdcp1_key_set_rsp {
+	uint32_t commandid;
+	uint32_t ret;
+	uint8_t ksv[HDCP1_AKSV_SIZE];
+};
+
+struct __attribute__ ((__packed__)) hdcp_version_req {
+	uint32_t commandid;
+};
+
+struct __attribute__ ((__packed__)) hdcp_version_rsp {
+	uint32_t commandid;
+	uint32_t commandId;
+	uint32_t appversion;
+};
+
+struct __attribute__ ((__packed__)) hdcp_verify_key_req {
+	uint32_t commandid;
+};
+
+struct __attribute__ ((__packed__)) hdcp_verify_key_rsp {
+	uint32_t status;
+	uint32_t commandId;
+};
+
+struct __attribute__ ((__packed__)) hdcp_lib_init_req_v1 {
+	uint32_t commandid;
+};
+
+struct __attribute__ ((__packed__)) hdcp_lib_init_rsp_v1 {
+	uint32_t status;
+	uint32_t commandid;
+	uint32_t ctxhandle;
+	uint32_t timeout;
+	uint32_t msglen;
+	uint8_t message[MAX_TX_MESSAGE_SIZE];
+};
+
+struct __attribute__ ((__packed__)) hdcp_lib_init_req {
+	uint32_t commandid;
+	uint32_t clientversion;
+};
+
+struct __attribute__ ((__packed__)) hdcp_lib_init_rsp {
+	uint32_t status;
+	uint32_t commandid;
+	uint32_t appversion;
+};
+
+struct __attribute__ ((__packed__)) hdcp_lib_deinit_req {
+	uint32_t commandid;
+};
+
+struct __attribute__ ((__packed__)) hdcp_lib_deinit_rsp {
+	uint32_t status;
+	uint32_t commandid;
+};
+
+struct __attribute__ ((__packed__)) hdcp_lib_session_init_req {
+	uint32_t commandid;
+	uint32_t deviceid;
+};
+
+struct __attribute__ ((__packed__)) hdcp_lib_session_init_rsp {
+	uint32_t status;
+	uint32_t commandid;
+	uint32_t sessionid;
+};
+
+struct __attribute__ ((__packed__)) hdcp_lib_session_deinit_req {
+	uint32_t commandid;
+	uint32_t sessionid;
+};
+
+struct __attribute__ ((__packed__)) hdcp_lib_session_deinit_rsp {
+	uint32_t status;
+	uint32_t commandid;
+};
+
+struct __attribute__ ((__packed__)) hdcp_tx_init_req_v1 {
+	uint32_t commandid;
+};
+
+struct __attribute__ ((__packed__)) hdcp_tx_init_rsp_v1 {
+	uint32_t status;
+	uint32_t commandid;
+	uint32_t ctxhandle;
+	uint32_t timeout;
+	uint32_t msglen;
+	uint8_t message[MAX_TX_MESSAGE_SIZE];
+};
+
+struct __attribute__ ((__packed__)) hdcp_tx_init_req {
+	uint32_t commandid;
+	uint32_t sessionid;
+};
+
+struct __attribute__ ((__packed__)) hdcp_tx_init_rsp {
+	uint32_t status;
+	uint32_t commandid;
+	uint32_t ctxhandle;
+};
+
+struct __attribute__ ((__packed__)) hdcp_deinit_req {
+	uint32_t commandid;
+	uint32_t ctxhandle;
+};
+
+struct __attribute__ ((__packed__)) hdcp_deinit_rsp {
+	uint32_t status;
+	uint32_t commandid;
+};
+
+struct __attribute__ ((__packed__)) hdcp_rcvd_msg_req {
+	uint32_t commandid;
+	uint32_t ctxhandle;
+	uint32_t msglen;
+	uint8_t msg[MAX_RX_MESSAGE_SIZE];
+};
+
+struct __attribute__ ((__packed__)) hdcp_rcvd_msg_rsp {
+	uint32_t status;
+	uint32_t commandid;
+	uint32_t state;
+	uint32_t timeout;
+	uint32_t flag;
+	uint32_t msglen;
+	uint8_t msg[MAX_TX_MESSAGE_SIZE];
+};
+
+struct __attribute__ ((__packed__)) hdcp_set_hw_key_req {
+	uint32_t commandid;
+	uint32_t ctxhandle;
+};
+
+struct __attribute__ ((__packed__)) hdcp_set_hw_key_rsp {
+	uint32_t status;
+	uint32_t commandid;
+};
+
+struct __attribute__ ((__packed__)) hdcp_send_timeout_req {
+	uint32_t commandid;
+	uint32_t ctxhandle;
+};
+
+struct __attribute__ ((__packed__)) hdcp_send_timeout_rsp {
+	uint32_t status;
+	uint32_t commandid;
+	uint32_t timeout;
+	uint32_t msglen;
+	uint8_t message[MAX_TX_MESSAGE_SIZE];
+};
+
+struct __attribute__ ((__packed__)) hdcp_query_stream_type_req {
+	uint32_t commandid;
+	uint32_t ctxhandle;
+};
+
+struct __attribute__ ((__packed__)) hdcp_query_stream_type_rsp {
+	uint32_t status;
+	uint32_t commandid;
+	uint32_t timeout;
+	uint32_t msglen;
+	uint8_t msg[MAX_TX_MESSAGE_SIZE];
+};
+
+struct __attribute__ ((__packed__)) hdcp_set_stream_type_req {
+	uint32_t commandid;
+	uint32_t ctxhandle;
+	uint8_t streamtype;
+};
+
+struct __attribute__ ((__packed__)) hdcp_set_stream_type_rsp {
+	uint32_t status;
+	uint32_t commandid;
+	uint32_t timeout;
+	uint32_t msglen;
+	uint8_t message[MAX_TX_MESSAGE_SIZE];
+};
+
+struct __attribute__ ((__packed__)) hdcp_update_srm_req {
+	uint32_t commandid;
+	uint32_t ctxhandle;
+	uint32_t srmoffset;
+	uint32_t srmlength;
+};
+
+struct __attribute__ ((__packed__)) hdcp_update_srm_rsp {
+	uint32_t status;
+	uint32_t commandid;
+};
+
+struct __attribute__ ((__packed__)) hdcp_srm_check_device_ids_req {
+	uint32_t commandid;
+	uint32_t num_device_ids;
+	uint8_t device_ids[1];
+};
+
+struct __attribute__ ((__packed__)) hdcp_srm_check_device_ids_rsp {
+	uint32_t commandid;
+	int32_t retval;
+};
+
+struct __attribute__ ((__packed__)) hdcp_get_topology_req {
+	uint32_t commandid;
+	uint32_t ctxhandle;
+};
+
+struct __attribute__ ((__packed__)) hdcp_get_topology_rsp {
+	uint32_t status;
+	uint32_t commandid;
+	struct topology_info topologyinfo;
+};
+
+struct __attribute__ ((__packed__)) rxvr_info_struct {
+	uint8_t rcvrCert[522];
+	uint8_t rrx[BITS_64_IN_BYTES];
+	uint8_t rxcaps[RXCAPS_SIZE];
+	bool repeater;
+};
+
+struct __attribute__ ((__packed__)) repeater_info_struct {
+	uint8_t RxInfo[RXINFO_SIZE];
+	uint8_t seq_num_V[SEQ_NUM_V_SIZE];
+	bool seq_num_V_Rollover_flag;
+	uint8_t ReceiverIDList[MAX_RCVR_ID_LIST_SIZE];
+	uint32_t ReceiverIDListLen;
+};
+
+struct __attribute__ ((__packed__)) hdcp1_set_enc_req {
+	uint32_t commandid;
+	uint32_t enable;
+};
+
+struct __attribute__ ((__packed__)) hdcp1_set_enc_rsp {
+	uint32_t commandid;
+	uint32_t ret;
+};
+
+struct __attribute__ ((__packed__)) hdcp_start_auth_req {
+	uint32_t commandid;
+	uint32_t ctxHandle;
+};
+
+struct __attribute__ ((__packed__)) hdcp_start_auth_rsp {
+	uint32_t status;
+	uint32_t commandid;
+	uint32_t ctxhandle;
+	uint32_t timeout;
+	uint32_t msglen;
+	uint8_t message[MAX_TX_MESSAGE_SIZE];
+};
+
+struct __attribute__ ((__packed__)) hdcp_rcv_id_list_req {
+	uint32_t commandid;
+	uint32_t ctxHandle;
+};
+struct __attribute__ ((__packed__)) hdcp_rcv_id_list_rsp {
+	uint32_t status;
+	uint32_t commandid;
+};
+
+/*
+ * struct hdcp1_lib_handle - handle for hdcp1 client
+ * @qseecom_handle - for sending commands to hdcp1 TA
+ * @srm_handle - for sending commands to SRM TA
+ * @client_ops - handle to call APIs exposed by hdcp1 client
+ * @client_ctx - client context maintained by hdmi
+ */
+struct hdcp1_lib_handle {
+	struct qseecom_handle *qsee_handle;
+	struct qseecom_handle *srm_handle;
+	struct hdcp_client_ops *client_ops;
+	void *client_ctx;
+};
+
+/*
+ * struct hdcp_lib_handle - handle for hdcp client
+ * @qseecom_handle - for sending commands to qseecom
+ * @listener_buf - buffer containing message shared with the client
+ * @msglen - size message in the buffer
+ * @tz_ctxhandle - context handle shared with tz
+ * @hdcp_timeout - timeout in msecs shared for hdcp messages
+ * @client_ctx - client context maintained by hdmi
+ * @client_ops - handle to call APIs exposed by hdcp client
+ * @timeout_lock - this lock protects hdcp_timeout field
+ * @msg_lock - this lock protects the message buffer
+ */
+struct hdcp_lib_handle {
+	unsigned char *listener_buf;
+	uint32_t msglen;
+	uint32_t tz_ctxhandle;
+	uint32_t hdcp_timeout;
+	uint32_t timeout_left;
+	uint32_t wait_timeout;
+	bool no_stored_km_flag;
+	bool feature_supported;
+	bool authenticated;
+	void *client_ctx;
+	struct hdcp_client_ops *client_ops;
+	struct mutex msg_lock;
+	struct mutex wakeup_mutex;
+	enum hdcp_state hdcp_state;
+	enum hdcp_lib_wakeup_cmd wakeup_cmd;
+	bool repeater_flag;
+	bool non_2p2_present;
+	bool update_stream;
+	bool tethered;
+	struct qseecom_handle *qseecom_handle;
+	int last_msg_sent;
+	int last_msg;
+	char *last_msg_recvd_buf;
+	uint32_t last_msg_recvd_len;
+	atomic_t hdcp_off;
+	uint32_t session_id;
+	bool legacy_app;
+	enum hdcp_device_type device_type;
+
+	struct task_struct *thread;
+	struct completion poll_wait;
+
+	struct kthread_worker worker;
+	struct kthread_work wk_init;
+	struct kthread_work wk_msg_sent;
+	struct kthread_work wk_msg_recvd;
+	struct kthread_work wk_timeout;
+	struct kthread_work wk_clean;
+	struct kthread_work wk_wait;
+	struct kthread_work wk_stream;
+
+	int (*hdcp_app_init)(struct hdcp_lib_handle *handle);
+	int (*hdcp_txmtr_init)(struct hdcp_lib_handle *handle);
+};
+
+struct hdcp_lib_message_map {
+	int msg_id;
+	const char *msg_name;
+};
+
+struct msm_hdcp_mgr {
+	struct platform_device *pdev;
+	dev_t dev_num;
+	struct cdev cdev;
+	struct class *class;
+	struct device *device;
+	struct HDCP_V2V1_MSG_TOPOLOGY cached_tp;
+	u32 tp_msgid;
+	void *client_ctx;
+	struct hdcp_lib_handle *handle;
+};
+
+#define CLASS_NAME "hdcp"
+#define DRIVER_NAME "msm_hdcp"
+
+static struct msm_hdcp_mgr *hdcp_drv_mgr;
+static struct hdcp_lib_handle *drv_client_handle;
+
+static void hdcp_lib_clean(struct hdcp_lib_handle *handle);
+static void hdcp_lib_init(struct hdcp_lib_handle *handle);
+static void hdcp_lib_msg_sent(struct hdcp_lib_handle *handle);
+static void hdcp_lib_msg_recvd(struct hdcp_lib_handle *handle);
+static void hdcp_lib_timeout(struct hdcp_lib_handle *handle);
+static void hdcp_lib_stream(struct hdcp_lib_handle *handle);
+static int hdcp_lib_txmtr_init(struct hdcp_lib_handle *handle);
+static int hdcp_lib_txmtr_init_legacy(struct hdcp_lib_handle *handle);
+
+static struct qseecom_handle *hdcpsrm_handle;
+
+static struct hdcp1_lib_handle *hdcp1_handle;
+
+static bool hdcp1_supported = true;
+static bool hdcp1_enc_enabled;
+static struct mutex hdcp1_ta_cmd_lock;
+
+static const char *hdcp_lib_message_name(int msg_id)
+{
+	/*
+	 * Message ID map. The first number indicates the message number
+	 * assigned to the message by the HDCP 2.2 spec. This is also the first
+	 * byte of every HDCP 2.2 authentication protocol message.
+	 */
+	static struct hdcp_lib_message_map hdcp_lib_msg_map[] = {
+		{2, "AKE_INIT"},
+		{3, "AKE_SEND_CERT"},
+		{4, "AKE_NO_STORED_KM"},
+		{5, "AKE_STORED_KM"},
+		{7, "AKE_SEND_H_PRIME"},
+		{8, "AKE_SEND_PAIRING_INFO"},
+		{9, "LC_INIT"},
+		{10, "LC_SEND_L_PRIME"},
+		{11, "SKE_SEND_EKS"},
+		{12, "REPEATER_AUTH_SEND_RECEIVERID_LIST"},
+		{15, "REPEATER_AUTH_SEND_ACK"},
+		{16, "REPEATER_AUTH_STREAM_MANAGE"},
+		{17, "REPEATER_AUTH_STREAM_READY"},
+		{18, "SKE_SEND_TYPE_ID"},
+	};
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(hdcp_lib_msg_map); i++) {
+		if (msg_id == hdcp_lib_msg_map[i].msg_id)
+			return hdcp_lib_msg_map[i].msg_name;
+	}
+	return "UNKNOWN";
+}
+
+static int hdcp_lib_get_next_message(struct hdcp_lib_handle *handle,
+				     struct hdmi_hdcp_wakeup_data *data)
+{
+	switch (handle->last_msg) {
+	case INVALID_MESSAGE_ID:
+		return AKE_INIT_MESSAGE_ID;
+	case AKE_INIT_MESSAGE_ID:
+		return AKE_SEND_CERT_MESSAGE_ID;
+	case AKE_SEND_CERT_MESSAGE_ID:
+		if (handle->no_stored_km_flag)
+			return AKE_NO_STORED_KM_MESSAGE_ID;
+		else
+			return AKE_STORED_KM_MESSAGE_ID;
+	case AKE_STORED_KM_MESSAGE_ID:
+	case AKE_NO_STORED_KM_MESSAGE_ID:
+		return AKE_SEND_H_PRIME_MESSAGE_ID;
+	case AKE_SEND_H_PRIME_MESSAGE_ID:
+		if (handle->no_stored_km_flag)
+			return AKE_SEND_PAIRING_INFO_MESSAGE_ID;
+		else
+			return LC_INIT_MESSAGE_ID;
+	case AKE_SEND_PAIRING_INFO_MESSAGE_ID:
+		return LC_INIT_MESSAGE_ID;
+	case LC_INIT_MESSAGE_ID:
+		return LC_SEND_L_PRIME_MESSAGE_ID;
+	case LC_SEND_L_PRIME_MESSAGE_ID:
+		return SKE_SEND_EKS_MESSAGE_ID;
+	case SKE_SEND_EKS_MESSAGE_ID:
+		if (!handle->repeater_flag &&
+			handle->device_type == HDCP_TXMTR_DP)
+			return SKE_SEND_TYPE_ID;
+	case SKE_SEND_TYPE_ID:
+	case REP_STREAM_READY_ID:
+	case REP_SEND_ACK_ID:
+		if (!handle->repeater_flag)
+			return INVALID_MESSAGE_ID;
+
+		if (data->cmd == HDMI_HDCP_WKUP_CMD_SEND_MESSAGE)
+			return REP_STREAM_MANAGE_ID;
+		else
+			return REP_SEND_RECV_ID_LIST_ID;
+	case REP_SEND_RECV_ID_LIST_ID:
+		return REP_SEND_ACK_ID;
+	case REP_STREAM_MANAGE_ID:
+		return REP_STREAM_READY_ID;
+	default:
+		pr_err("Uknown message ID (%d)", handle->last_msg);
+		return -EINVAL;
+	}
+}
+
+static void hdcp_lib_wait_for_response(struct hdcp_lib_handle *handle,
+				       struct hdmi_hdcp_wakeup_data *data)
+{
+	switch (handle->last_msg) {
+	case AKE_SEND_H_PRIME_MESSAGE_ID:
+		if (handle->no_stored_km_flag)
+			handle->wait_timeout = HZ;
+		else
+			handle->wait_timeout = HZ / 4;
+		break;
+	case AKE_SEND_PAIRING_INFO_MESSAGE_ID:
+		handle->wait_timeout = HZ / 4;
+		break;
+	case REP_SEND_RECV_ID_LIST_ID:
+		if (!handle->authenticated)
+			handle->wait_timeout = HZ * 3;
+		else
+			handle->wait_timeout = 0;
+		break;
+	default:
+		handle->wait_timeout = 0;
+	}
+
+	if (handle->wait_timeout)
+		queue_kthread_work(&handle->worker, &handle->wk_wait);
+}
+
+static void hdcp_lib_wakeup_client(struct hdcp_lib_handle *handle,
+				  struct hdmi_hdcp_wakeup_data *data)
+{
+	int rc = 0, i;
+
+	if (!handle || !handle->client_ops || !handle->client_ops->wakeup ||
+	    !data || (data->cmd == HDMI_HDCP_WKUP_CMD_INVALID))
+		return;
+
+	data->abort_mask = REAUTH_REQ | LINK_INTEGRITY_FAILURE;
+
+	if (data->cmd == HDMI_HDCP_WKUP_CMD_RECV_MESSAGE ||
+	    data->cmd == HDMI_HDCP_WKUP_CMD_LINK_POLL)
+		handle->last_msg = hdcp_lib_get_next_message(handle, data);
+
+	if (handle->last_msg != INVALID_MESSAGE_ID &&
+	    data->cmd != HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS &&
+	    data->cmd != HDMI_HDCP_WKUP_CMD_STATUS_FAILED) {
+		u32 msg_num, rx_status;
+		const struct hdcp_msg_part *msg;
+
+		pr_debug("lib->client: %s (%s)\n",
+			hdmi_hdcp_cmd_to_str(data->cmd),
+			hdcp_lib_message_name(handle->last_msg));
+
+		data->message_data = &hdcp_msg_lookup[handle->last_msg];
+
+		msg_num = data->message_data->num_messages;
+		msg = data->message_data->messages;
+		rx_status = data->message_data->rx_status;
+
+		pr_debug("%10s | %6s | %4s\n", "name", "offset", "len");
+
+		for (i = 0; i < msg_num; i++)
+			pr_debug("%10s | %6x | %4d\n",
+				msg[i].name, msg[i].offset,
+				msg[i].length);
+	} else {
+		pr_debug("lib->client: %s\n", hdmi_hdcp_cmd_to_str(data->cmd));
+	}
+
+	rc = handle->client_ops->wakeup(data);
+	if (rc)
+		pr_err("error sending %s to client\n",
+		       hdmi_hdcp_cmd_to_str(data->cmd));
+
+	hdcp_lib_wait_for_response(handle, data);
+}
+
+static inline void hdcp_lib_send_message(struct hdcp_lib_handle *handle)
+{
+	char msg_name[50];
+	struct hdmi_hdcp_wakeup_data cdata = {
+		HDMI_HDCP_WKUP_CMD_SEND_MESSAGE
+	};
+
+	cdata.context = handle->client_ctx;
+	cdata.send_msg_buf = handle->listener_buf;
+	cdata.send_msg_len = handle->msglen;
+	cdata.timeout = handle->hdcp_timeout;
+
+	snprintf(msg_name, sizeof(msg_name), "%s: ",
+		hdcp_lib_message_name((int)cdata.send_msg_buf[0]));
+
+	print_hex_dump_debug(msg_name,
+		DUMP_PREFIX_NONE, 16, 1, cdata.send_msg_buf,
+		cdata.send_msg_len, false);
+
+	hdcp_lib_wakeup_client(handle, &cdata);
+}
+
+static int hdcp_lib_enable_encryption(struct hdcp_lib_handle *handle)
+{
+	int rc = 0;
+	struct hdcp_set_hw_key_req *req_buf;
+	struct hdcp_set_hw_key_rsp *rsp_buf;
+
+	if (!handle || !handle->qseecom_handle ||
+	    !handle->qseecom_handle->sbuf) {
+		pr_err("invalid handle\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	/*
+	 * wait at least 200ms before enabling encryption
+	 * as per hdcp2p2 sepcifications.
+	 */
+	msleep(SLEEP_SET_HW_KEY_MS);
+
+	req_buf = (struct hdcp_set_hw_key_req *)(handle->qseecom_handle->sbuf);
+	req_buf->commandid = HDCP_TXMTR_SET_HW_KEY;
+	req_buf->ctxhandle = handle->tz_ctxhandle;
+
+	rsp_buf = (struct hdcp_set_hw_key_rsp *)
+	    (handle->qseecom_handle->sbuf +
+	     QSEECOM_ALIGN(sizeof(struct hdcp_set_hw_key_req)));
+
+	rc = qseecom_send_command(handle->qseecom_handle, req_buf,
+				  QSEECOM_ALIGN(sizeof
+						(struct hdcp_set_hw_key_req)),
+				  rsp_buf,
+				  QSEECOM_ALIGN(sizeof
+						(struct hdcp_set_hw_key_rsp)));
+
+	if ((rc < 0) || (rsp_buf->status < 0)) {
+		pr_err("qseecom cmd failed with err = %d status = %d\n",
+		       rc, rsp_buf->status);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	/* reached an authenticated state */
+	handle->hdcp_state |= HDCP_STATE_AUTHENTICATED;
+
+	pr_debug("success\n");
+	return 0;
+error:
+	if (handle && !atomic_read(&handle->hdcp_off))
+		HDCP_LIB_EXECUTE(clean);
+
+	return rc;
+}
+
+static int hdcp_lib_get_version(struct hdcp_lib_handle *handle)
+{
+	int rc = 0;
+	struct hdcp_version_req *req_buf;
+	struct hdcp_version_rsp *rsp_buf;
+	uint32_t app_major_version = 0;
+
+	if (!handle) {
+		pr_err("invalid input\n");
+		goto exit;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_err("library not loaded\n");
+		return rc;
+	}
+
+	/* get the TZ hdcp2p2 app version */
+	req_buf = (struct hdcp_version_req *)handle->qseecom_handle->sbuf;
+	req_buf->commandid = HDCP_TXMTR_GET_VERSION;
+
+	rsp_buf = (struct hdcp_version_rsp *)
+	    (handle->qseecom_handle->sbuf +
+	     QSEECOM_ALIGN(sizeof(struct hdcp_version_req)));
+
+	rc = qseecom_send_command(handle->qseecom_handle,
+				  req_buf,
+				  QSEECOM_ALIGN(sizeof
+						(struct hdcp_lib_init_req)),
+				  rsp_buf,
+				  QSEECOM_ALIGN(sizeof
+						(struct hdcp_lib_init_rsp)));
+
+	if (rc < 0) {
+		pr_err("qseecom cmd failed err = %d\n", rc);
+		goto exit;
+	}
+
+	app_major_version = HCDP_TXMTR_GET_MAJOR_VERSION(rsp_buf->appversion);
+
+	pr_debug("hdp2p2 app major version %d, app version %d\n",
+		 app_major_version, rsp_buf->appversion);
+
+	if (app_major_version == 1)
+		handle->legacy_app = true;
+
+exit:
+	return rc;
+}
+
+static int hdcp_lib_verify_keys(struct hdcp_lib_handle *handle)
+{
+	int rc = -EINVAL;
+	struct hdcp_verify_key_req *req_buf;
+	struct hdcp_verify_key_rsp *rsp_buf;
+
+	if (!handle) {
+		pr_err("invalid input\n");
+		goto exit;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_err("app not loaded\n");
+		goto exit;
+	}
+
+	req_buf = (struct hdcp_verify_key_req *)handle->qseecom_handle->sbuf;
+	req_buf->commandid = HDCP_TXMTR_VERIFY_KEY;
+
+	rsp_buf = (struct hdcp_verify_key_rsp *)
+	    (handle->qseecom_handle->sbuf +
+	     QSEECOM_ALIGN(sizeof(struct hdcp_verify_key_req)));
+
+	rc = qseecom_send_command(handle->qseecom_handle,
+				  req_buf,
+				  QSEECOM_ALIGN(sizeof
+						(struct hdcp_verify_key_req)),
+				  rsp_buf,
+				  QSEECOM_ALIGN(sizeof
+						(struct hdcp_verify_key_rsp)));
+
+	if (rc < 0) {
+		pr_err("qseecom cmd failed err = %d\n", rc);
+		goto exit;
+	}
+
+	return rsp_buf->status;
+exit:
+	return rc;
+}
+
+
+static int hdcp_app_init_legacy(struct hdcp_lib_handle *handle)
+{
+	int rc = 0;
+	struct hdcp_lib_init_req_v1 *req_buf;
+	struct hdcp_lib_init_rsp_v1 *rsp_buf;
+
+	if (!handle) {
+		pr_err("invalid input\n");
+		goto exit;
+	}
+
+	if (!handle->legacy_app) {
+		pr_err("wrong init function\n");
+		goto exit;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_err("library not loaded\n");
+		goto exit;
+	}
+
+	/* now load the app by sending hdcp_lib_init */
+	req_buf = (struct hdcp_lib_init_req_v1 *)handle->qseecom_handle->sbuf;
+	req_buf->commandid = HDCP_LIB_INIT;
+	rsp_buf = (struct hdcp_lib_init_rsp_v1 *)
+	    (handle->qseecom_handle->sbuf +
+	     QSEECOM_ALIGN(sizeof(struct hdcp_lib_init_req_v1)));
+
+	rc = qseecom_send_command(handle->qseecom_handle,
+				  req_buf,
+				  QSEECOM_ALIGN(sizeof
+						(struct hdcp_lib_init_req_v1)),
+				  rsp_buf,
+				  QSEECOM_ALIGN(sizeof
+						(struct hdcp_lib_init_rsp_v1)));
+
+	if (rc < 0) {
+		pr_err("qseecom cmd failed err = %d\n", rc);
+		goto exit;
+	}
+
+	pr_debug("success\n");
+
+exit:
+	return rc;
+}
+
+static int hdcp_app_init(struct hdcp_lib_handle *handle)
+{
+	int rc = 0;
+	struct hdcp_lib_init_req *req_buf;
+	struct hdcp_lib_init_rsp *rsp_buf;
+	uint32_t app_minor_version = 0;
+
+	if (!handle) {
+		pr_err("invalid input\n");
+		goto exit;
+	}
+
+	if (handle->legacy_app) {
+		pr_err("wrong init function\n");
+		goto exit;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_err("library not loaded\n");
+		goto exit;
+	}
+
+	/* now load the app by sending hdcp_lib_init */
+	req_buf = (struct hdcp_lib_init_req *)handle->qseecom_handle->sbuf;
+	req_buf->commandid = HDCP_LIB_INIT;
+	req_buf->clientversion =
+	    HDCP_CLIENT_MAKE_VERSION(HDCP_CLIENT_MAJOR_VERSION,
+				     HDCP_CLIENT_MINOR_VERSION,
+				     HDCP_CLIENT_PATCH_VERSION);
+	rsp_buf = (struct hdcp_lib_init_rsp *)
+	    (handle->qseecom_handle->sbuf +
+	     QSEECOM_ALIGN(sizeof(struct hdcp_lib_init_req)));
+
+	rc = qseecom_send_command(handle->qseecom_handle,
+				  req_buf,
+				  QSEECOM_ALIGN(sizeof
+						(struct hdcp_lib_init_req)),
+				  rsp_buf,
+				  QSEECOM_ALIGN(sizeof
+						(struct hdcp_lib_init_rsp)));
+
+	if (rc < 0) {
+		pr_err("qseecom cmd failed err = %d\n", rc);
+		goto exit;
+	}
+
+	app_minor_version = HCDP_TXMTR_GET_MINOR_VERSION(rsp_buf->appversion);
+	if (app_minor_version != HDCP_CLIENT_MINOR_VERSION) {
+		pr_err
+		    ("client-app minor version mismatch app(%d), client(%d)\n",
+		     app_minor_version, HDCP_CLIENT_MINOR_VERSION);
+		rc = -1;
+		goto exit;
+	}
+	pr_debug("success\n");
+	pr_debug("client version major(%d), minor(%d), patch(%d)\n",
+		 HDCP_CLIENT_MAJOR_VERSION, HDCP_CLIENT_MINOR_VERSION,
+		 HDCP_CLIENT_PATCH_VERSION);
+	pr_debug("app version major(%d), minor(%d), patch(%d)\n",
+		 HCDP_TXMTR_GET_MAJOR_VERSION(rsp_buf->appversion),
+		 HCDP_TXMTR_GET_MINOR_VERSION(rsp_buf->appversion),
+		 HCDP_TXMTR_GET_PATCH_VERSION(rsp_buf->appversion));
+
+exit:
+	return rc;
+}
+
+static int hdcp_lib_library_load(struct hdcp_lib_handle *handle)
+{
+	int rc = 0;
+
+	if (!handle) {
+		pr_err("invalid input\n");
+		goto exit;
+	}
+
+	if (handle->hdcp_state & HDCP_STATE_APP_LOADED) {
+		pr_err("library already loaded\n");
+		goto exit;
+	}
+
+	/*
+	 * allocating resource for qseecom handle
+	 * the app is not loaded here
+	 */
+	rc = qseecom_start_app(&(handle->qseecom_handle),
+			       TZAPP_NAME, QSEECOM_SBUFF_SIZE);
+	if (rc) {
+		pr_err("qseecom_start_app failed %d\n", rc);
+		goto exit;
+	}
+
+	if (!hdcpsrm_handle) {
+		rc = qseecom_start_app(&hdcpsrm_handle,
+					SRMAPP_NAME, QSEECOM_SBUFF_SIZE);
+		if (rc) {
+			pr_err("qseecom_start_app failed for SRM TA %d\n", rc);
+			goto exit;
+		}
+	}
+
+	handle->hdcp_state |= HDCP_STATE_APP_LOADED;
+	pr_debug("qseecom_start_app success\n");
+
+	rc = hdcp_lib_get_version(handle);
+	if (rc) {
+		pr_err("library get version failed\n");
+		goto exit;
+	}
+
+	if (handle->legacy_app) {
+		handle->hdcp_app_init = hdcp_app_init_legacy;
+		handle->hdcp_txmtr_init = hdcp_lib_txmtr_init_legacy;
+	} else {
+		handle->hdcp_app_init = hdcp_app_init;
+		handle->hdcp_txmtr_init = hdcp_lib_txmtr_init;
+	}
+
+	if (handle->hdcp_app_init == NULL) {
+		pr_err("invalid app init function pointer\n");
+		goto exit;
+	}
+
+	rc = handle->hdcp_app_init(handle);
+	if (rc) {
+		pr_err("app init failed\n");
+		goto exit;
+	}
+exit:
+	return rc;
+}
+
+static int hdcp_lib_library_unload(struct hdcp_lib_handle *handle)
+{
+	int rc = 0;
+	struct hdcp_lib_deinit_req *req_buf;
+	struct hdcp_lib_deinit_rsp *rsp_buf;
+
+	if (!handle || !handle->qseecom_handle ||
+	    !handle->qseecom_handle->sbuf) {
+		pr_err("invalid handle\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_err("library not loaded\n");
+		return rc;
+	}
+
+	/* unloading app by sending hdcp_lib_deinit cmd */
+	req_buf = (struct hdcp_lib_deinit_req *)handle->qseecom_handle->sbuf;
+	req_buf->commandid = HDCP_LIB_DEINIT;
+	rsp_buf = (struct hdcp_lib_deinit_rsp *)
+	    (handle->qseecom_handle->sbuf +
+	     QSEECOM_ALIGN(sizeof(struct hdcp_lib_deinit_req)));
+
+	rc = qseecom_send_command(handle->qseecom_handle,
+				  req_buf,
+				  QSEECOM_ALIGN(sizeof
+						(struct hdcp_lib_deinit_req)),
+				  rsp_buf,
+				  QSEECOM_ALIGN(sizeof
+						(struct hdcp_lib_deinit_rsp)));
+
+	if (rc < 0) {
+		pr_err("qseecom cmd failed err = %d\n", rc);
+		goto exit;
+	}
+
+	/* deallocate the resources for qseecom hdcp2p2 handle */
+	rc = qseecom_shutdown_app(&handle->qseecom_handle);
+	if (rc) {
+		pr_err("hdcp2p2 qseecom_shutdown_app failed err: %d\n", rc);
+		goto exit;
+	}
+
+	/* deallocate the resources for qseecom hdcpsrm handle */
+	rc = qseecom_shutdown_app(&hdcpsrm_handle);
+	if (rc) {
+		pr_err("hdcpsrm qseecom_shutdown_app failed err: %d\n", rc);
+		goto exit;
+	}
+
+	handle->hdcp_state &= ~HDCP_STATE_APP_LOADED;
+	pr_debug("success\n");
+exit:
+	return rc;
+}
+
+static int hdcp_lib_session_init(struct hdcp_lib_handle *handle)
+{
+	int rc = 0;
+	struct hdcp_lib_session_init_req *req_buf;
+	struct hdcp_lib_session_init_rsp *rsp_buf;
+
+	if (!handle || !handle->qseecom_handle ||
+	    !handle->qseecom_handle->sbuf) {
+		pr_err("invalid handle\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_err("app not loaded\n");
+		goto exit;
+	}
+
+	if (handle->hdcp_state & HDCP_STATE_SESSION_INIT) {
+		pr_err("session already initialized\n");
+		goto exit;
+	}
+
+	/* send HDCP_Session_Init command to TZ */
+	req_buf =
+	    (struct hdcp_lib_session_init_req *)handle->qseecom_handle->sbuf;
+	req_buf->commandid = HDCP_SESSION_INIT;
+	req_buf->deviceid = handle->device_type;
+	rsp_buf = (struct hdcp_lib_session_init_rsp *)
+	    (handle->qseecom_handle->sbuf +
+	     QSEECOM_ALIGN(sizeof(struct hdcp_lib_session_init_req)));
+
+	rc = qseecom_send_command(handle->qseecom_handle, req_buf,
+				  QSEECOM_ALIGN(sizeof
+						(struct
+						 hdcp_lib_session_init_req)),
+				  rsp_buf,
+				  QSEECOM_ALIGN(sizeof
+						(struct
+						 hdcp_lib_session_init_rsp)));
+
+	if ((rc < 0) || (rsp_buf->status != HDCP_SUCCESS) ||
+	    (rsp_buf->commandid != HDCP_SESSION_INIT)) {
+		pr_err("qseecom cmd failed with err = %d, status = %d\n",
+		       rc, rsp_buf->status);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	pr_debug("session id %d\n", rsp_buf->sessionid);
+
+	handle->session_id = rsp_buf->sessionid;
+	handle->hdcp_state |= HDCP_STATE_SESSION_INIT;
+
+	pr_debug("success\n");
+exit:
+	return rc;
+}
+
+static int hdcp_lib_session_deinit(struct hdcp_lib_handle *handle)
+{
+	int rc = 0;
+	struct hdcp_lib_session_deinit_req *req_buf;
+	struct hdcp_lib_session_deinit_rsp *rsp_buf;
+
+	if (!handle || !handle->qseecom_handle ||
+	    !handle->qseecom_handle->sbuf) {
+		pr_err("invalid handle\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_err("app not loaded\n");
+		goto exit;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) {
+		/* unload library here */
+		pr_err("session not initialized\n");
+		goto exit;
+	}
+
+	/* send command to TZ */
+	req_buf =
+	    (struct hdcp_lib_session_deinit_req *)handle->qseecom_handle->sbuf;
+	req_buf->commandid = HDCP_SESSION_DEINIT;
+	req_buf->sessionid = handle->session_id;
+	rsp_buf = (struct hdcp_lib_session_deinit_rsp *)
+	    (handle->qseecom_handle->sbuf +
+	     QSEECOM_ALIGN(sizeof(struct hdcp_lib_session_deinit_req)));
+
+	rc = qseecom_send_command(handle->qseecom_handle, req_buf,
+				  QSEECOM_ALIGN(sizeof
+						(struct
+						 hdcp_lib_session_deinit_req)),
+				  rsp_buf,
+				  QSEECOM_ALIGN(sizeof
+						(struct
+						 hdcp_lib_session_deinit_rsp)));
+
+	if ((rc < 0) || (rsp_buf->status < 0) ||
+	    (rsp_buf->commandid != HDCP_SESSION_DEINIT)) {
+		pr_err("qseecom cmd failed with err = %d status = %d\n",
+		       rc, rsp_buf->status);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	handle->hdcp_state &= ~HDCP_STATE_SESSION_INIT;
+	pr_debug("success\n");
+exit:
+	return rc;
+}
+
+static int hdcp_lib_txmtr_init(struct hdcp_lib_handle *handle)
+{
+	int rc = 0;
+	struct hdcp_tx_init_req *req_buf;
+	struct hdcp_tx_init_rsp *rsp_buf;
+
+	if (!handle || !handle->qseecom_handle ||
+	    !handle->qseecom_handle->sbuf) {
+		pr_err("invalid handle\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) {
+		pr_err("session not initialized\n");
+		goto exit;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_err("library not loaded\n");
+		goto exit;
+	}
+
+	/* send HDCP_Txmtr_Init command to TZ */
+	req_buf = (struct hdcp_tx_init_req *)handle->qseecom_handle->sbuf;
+	req_buf->commandid = HDCP_TXMTR_INIT;
+	req_buf->sessionid = handle->session_id;
+	rsp_buf = (struct hdcp_tx_init_rsp *)
+	    (handle->qseecom_handle->sbuf +
+	     QSEECOM_ALIGN(sizeof(struct hdcp_tx_init_req)));
+
+	rc = qseecom_send_command(handle->qseecom_handle, req_buf,
+				  QSEECOM_ALIGN(sizeof
+						(struct hdcp_tx_init_req)),
+				  rsp_buf,
+				  QSEECOM_ALIGN(sizeof
+						(struct hdcp_tx_init_rsp)));
+
+	if ((rc < 0) || (rsp_buf->status != HDCP_SUCCESS) ||
+	    (rsp_buf->commandid != HDCP_TXMTR_INIT)) {
+		pr_err("qseecom cmd failed with err = %d, status = %d\n",
+		       rc, rsp_buf->status);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	handle->tz_ctxhandle = rsp_buf->ctxhandle;
+	handle->hdcp_state |= HDCP_STATE_TXMTR_INIT;
+
+	pr_debug("success\n");
+exit:
+	return rc;
+}
+
+static int hdcp_lib_txmtr_init_legacy(struct hdcp_lib_handle *handle)
+{
+	int rc = 0;
+	struct hdcp_tx_init_req_v1 *req_buf;
+	struct hdcp_tx_init_rsp_v1 *rsp_buf;
+
+	if (!handle || !handle->qseecom_handle ||
+	    !handle->qseecom_handle->sbuf) {
+		pr_err("invalid handle\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_err("app not loaded\n");
+		goto exit;
+	}
+
+	if (handle->hdcp_state & HDCP_STATE_TXMTR_INIT) {
+		pr_err("txmtr already initialized\n");
+		goto exit;
+	}
+
+	/* send HDCP_Txmtr_Init command to TZ */
+	req_buf = (struct hdcp_tx_init_req_v1 *)handle->qseecom_handle->sbuf;
+	req_buf->commandid = HDCP_TXMTR_INIT;
+	rsp_buf = (struct hdcp_tx_init_rsp_v1 *)
+	    (handle->qseecom_handle->sbuf +
+	     QSEECOM_ALIGN(sizeof(struct hdcp_tx_init_req_v1)));
+
+	rc = qseecom_send_command(handle->qseecom_handle, req_buf,
+				  QSEECOM_ALIGN(sizeof
+						(struct hdcp_tx_init_req_v1)),
+				  rsp_buf,
+				  QSEECOM_ALIGN(sizeof
+						(struct hdcp_tx_init_rsp_v1)));
+
+	if ((rc < 0) || (rsp_buf->status != HDCP_SUCCESS) ||
+	    (rsp_buf->commandid != HDCP_TXMTR_INIT) ||
+	    (rsp_buf->msglen <= 0) || (rsp_buf->message == NULL)) {
+		pr_err("qseecom cmd failed with err = %d, status = %d\n",
+		       rc, rsp_buf->status);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	pr_debug("recvd %s from TZ at %dms\n",
+		 hdcp_lib_message_name((int)rsp_buf->message[0]),
+		 jiffies_to_msecs(jiffies));
+
+	handle->last_msg = (int)rsp_buf->message[0];
+
+	/* send the response to HDMI driver */
+	memset(handle->listener_buf, 0, MAX_TX_MESSAGE_SIZE);
+	memcpy(handle->listener_buf, (unsigned char *)rsp_buf->message,
+	       rsp_buf->msglen);
+	handle->msglen = rsp_buf->msglen;
+	handle->hdcp_timeout = rsp_buf->timeout;
+
+	handle->tz_ctxhandle = rsp_buf->ctxhandle;
+	handle->hdcp_state |= HDCP_STATE_TXMTR_INIT;
+
+	pr_debug("success\n");
+exit:
+	return rc;
+}
+
+static int hdcp_lib_txmtr_deinit(struct hdcp_lib_handle *handle)
+{
+	int rc = 0;
+	struct hdcp_deinit_req *req_buf;
+	struct hdcp_deinit_rsp *rsp_buf;
+
+	if (!handle || !handle->qseecom_handle ||
+	    !handle->qseecom_handle->sbuf) {
+		pr_err("invalid handle\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_err("app not loaded\n");
+		goto exit;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_TXMTR_INIT)) {
+		/* unload library here */
+		pr_err("txmtr not initialized\n");
+		goto exit;
+	}
+
+	/* send command to TZ */
+	req_buf = (struct hdcp_deinit_req *)handle->qseecom_handle->sbuf;
+	req_buf->commandid = HDCP_TXMTR_DEINIT;
+	req_buf->ctxhandle = handle->tz_ctxhandle;
+	rsp_buf = (struct hdcp_deinit_rsp *)
+	    (handle->qseecom_handle->sbuf +
+	     QSEECOM_ALIGN(sizeof(struct hdcp_deinit_req)));
+
+	rc = qseecom_send_command(handle->qseecom_handle, req_buf,
+				  QSEECOM_ALIGN(sizeof(struct hdcp_deinit_req)),
+				  rsp_buf,
+				  QSEECOM_ALIGN(sizeof
+						(struct hdcp_deinit_rsp)));
+
+	if ((rc < 0) || (rsp_buf->status < 0) ||
+	    (rsp_buf->commandid != HDCP_TXMTR_DEINIT)) {
+		pr_err("qseecom cmd failed with err = %d status = %d\n",
+		       rc, rsp_buf->status);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	handle->hdcp_state &= ~HDCP_STATE_TXMTR_INIT;
+	pr_debug("success\n");
+exit:
+	return rc;
+}
+
+static int hdcp_lib_start_auth(struct hdcp_lib_handle *handle)
+{
+	int rc = 0;
+	struct hdcp_start_auth_req *req_buf;
+	struct hdcp_start_auth_rsp *rsp_buf;
+
+	if (!handle || !handle->qseecom_handle ||
+	    !handle->qseecom_handle->sbuf) {
+		pr_err("invalid handle\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) {
+		pr_err("session not initialized\n");
+		goto exit;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_TXMTR_INIT)) {
+		pr_err("txmtr not initialized\n");
+		goto exit;
+	}
+
+	/* send HDCP_Txmtr_Start_Auth command to TZ */
+	req_buf = (struct hdcp_start_auth_req *)handle->qseecom_handle->sbuf;
+	req_buf->commandid = HDCP_TXMTR_START_AUTHENTICATE;
+	req_buf->ctxHandle = handle->tz_ctxhandle;
+	rsp_buf = (struct hdcp_start_auth_rsp *)
+	    (handle->qseecom_handle->sbuf +
+	     QSEECOM_ALIGN(sizeof(struct hdcp_start_auth_req)));
+
+	rc = qseecom_send_command(handle->qseecom_handle, req_buf,
+				  QSEECOM_ALIGN(sizeof
+						(struct hdcp_start_auth_req)),
+				  rsp_buf,
+				  QSEECOM_ALIGN(sizeof
+						(struct hdcp_start_auth_rsp)));
+
+	if ((rc < 0) || (rsp_buf->status != HDCP_SUCCESS) ||
+	    (rsp_buf->commandid != HDCP_TXMTR_START_AUTHENTICATE) ||
+	    (rsp_buf->msglen <= 0) || (rsp_buf->message == NULL)) {
+		pr_err("qseecom cmd failed with err = %d, status = %d\n",
+		       rc, rsp_buf->status);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	pr_debug("recvd %s from TZ at %dms\n",
+		 hdcp_lib_message_name((int)rsp_buf->message[0]),
+		 jiffies_to_msecs(jiffies));
+
+	handle->last_msg = (int)rsp_buf->message[0];
+
+	/* send the response to HDMI driver */
+	memset(handle->listener_buf, 0, MAX_TX_MESSAGE_SIZE);
+	memcpy(handle->listener_buf, (unsigned char *)rsp_buf->message,
+	       rsp_buf->msglen);
+	handle->msglen = rsp_buf->msglen;
+	handle->hdcp_timeout = rsp_buf->timeout;
+
+	handle->tz_ctxhandle = rsp_buf->ctxhandle;
+
+	pr_debug("success\n");
+exit:
+	return rc;
+}
+
+static void hdcp_lib_stream(struct hdcp_lib_handle *handle)
+{
+	int rc = 0;
+	struct hdcp_query_stream_type_req *req_buf;
+	struct hdcp_query_stream_type_rsp *rsp_buf;
+
+	if (!handle || !handle->qseecom_handle ||
+	    !handle->qseecom_handle->sbuf) {
+		pr_err("invalid handle\n");
+		return;
+	}
+
+	if (atomic_read(&handle->hdcp_off)) {
+		pr_debug("invalid state, hdcp off\n");
+		return;
+	}
+
+	if (!handle->repeater_flag) {
+		pr_debug("invalid state, not a repeater\n");
+		return;
+	}
+
+	/* send command to TZ */
+	req_buf =
+	    (struct hdcp_query_stream_type_req *)handle->qseecom_handle->sbuf;
+	req_buf->commandid = HDCP_TXMTR_QUERY_STREAM_TYPE;
+	req_buf->ctxhandle = handle->tz_ctxhandle;
+	rsp_buf = (struct hdcp_query_stream_type_rsp *)
+	    (handle->qseecom_handle->sbuf +
+	     QSEECOM_ALIGN(sizeof(struct hdcp_query_stream_type_req)));
+
+	rc = qseecom_send_command(handle->qseecom_handle, req_buf,
+				  QSEECOM_ALIGN(sizeof
+						(struct
+						 hdcp_query_stream_type_req)),
+				  rsp_buf,
+				  QSEECOM_ALIGN(sizeof
+						(struct
+						 hdcp_query_stream_type_rsp)));
+
+	if ((rc < 0) || (rsp_buf->status < 0) || (rsp_buf->msglen <= 0) ||
+	    (rsp_buf->commandid != HDCP_TXMTR_QUERY_STREAM_TYPE) ||
+	    (rsp_buf->msg == NULL)) {
+		pr_err("qseecom cmd failed with err=%d status=%d\n",
+		       rc, rsp_buf->status);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	pr_debug("message received from TZ: %s\n",
+		 hdcp_lib_message_name((int)rsp_buf->msg[0]));
+
+	handle->last_msg = (int)rsp_buf->msg[0];
+
+	memset(handle->listener_buf, 0, MAX_TX_MESSAGE_SIZE);
+	memcpy(handle->listener_buf, (unsigned char *)rsp_buf->msg,
+	       rsp_buf->msglen);
+	handle->hdcp_timeout = rsp_buf->timeout;
+	handle->msglen = rsp_buf->msglen;
+
+exit:
+	if (!rc && !atomic_read(&handle->hdcp_off))
+		hdcp_lib_send_message(handle);
+}
+
+static void hdcp_lib_query_stream_work(struct kthread_work *work)
+{
+	struct hdcp_lib_handle *handle = container_of(work,
+						      struct hdcp_lib_handle,
+						      wk_stream);
+
+	hdcp_lib_stream(handle);
+}
+
+static bool hdcp_lib_client_feature_supported(void *phdcpcontext)
+{
+	int rc = 0;
+	bool supported = false;
+	struct hdcp_lib_handle *handle = phdcpcontext;
+
+	if (!handle) {
+		pr_err("invalid input\n");
+		goto exit;
+	}
+
+	if (handle->feature_supported) {
+		supported = true;
+		goto exit;
+	}
+
+	rc = hdcp_lib_library_load(handle);
+	if (!rc) {
+		if (!hdcp_lib_verify_keys(handle)) {
+			pr_debug("HDCP2p2 supported\n");
+			handle->feature_supported = true;
+			supported = true;
+		}
+		hdcp_lib_library_unload(handle);
+	}
+exit:
+	return supported;
+}
+
+static void hdcp_lib_check_worker_status(struct hdcp_lib_handle *handle)
+{
+	if (!list_empty(&handle->wk_init.node))
+		pr_debug("init work queued\n");
+
+	if (handle->worker.current_work == &handle->wk_init)
+		pr_debug("init work executing\n");
+
+	if (!list_empty(&handle->wk_msg_sent.node))
+		pr_debug("msg_sent work queued\n");
+
+	if (handle->worker.current_work == &handle->wk_msg_sent)
+		pr_debug("msg_sent work executing\n");
+
+	if (!list_empty(&handle->wk_msg_recvd.node))
+		pr_debug("msg_recvd work queued\n");
+
+	if (handle->worker.current_work == &handle->wk_msg_recvd)
+		pr_debug("msg_recvd work executing\n");
+
+	if (!list_empty(&handle->wk_timeout.node))
+		pr_debug("timeout work queued\n");
+
+	if (handle->worker.current_work == &handle->wk_timeout)
+		pr_debug("timeout work executing\n");
+
+	if (!list_empty(&handle->wk_clean.node))
+		pr_debug("clean work queued\n");
+
+	if (handle->worker.current_work == &handle->wk_clean)
+		pr_debug("clean work executing\n");
+
+	if (!list_empty(&handle->wk_wait.node))
+		pr_debug("wait work queued\n");
+
+	if (handle->worker.current_work == &handle->wk_wait)
+		pr_debug("wait work executing\n");
+
+	if (!list_empty(&handle->wk_stream.node))
+		pr_debug("stream work queued\n");
+
+	if (handle->worker.current_work == &handle->wk_stream)
+		pr_debug("stream work executing\n");
+}
+
+static int hdcp_lib_check_valid_state(struct hdcp_lib_handle *handle)
+{
+	int rc = 0;
+
+	if (!list_empty(&handle->worker.work_list))
+		hdcp_lib_check_worker_status(handle);
+
+	if (handle->wakeup_cmd == HDCP_LIB_WKUP_CMD_START) {
+		if (!list_empty(&handle->worker.work_list)) {
+			pr_debug("error: queue not empty\n");
+			rc = -EBUSY;
+			goto exit;
+		}
+
+		if (handle->hdcp_state & HDCP_STATE_APP_LOADED) {
+			pr_debug("library already loaded\n");
+			rc = -EBUSY;
+			goto exit;
+		}
+	} else {
+		if (atomic_read(&handle->hdcp_off)) {
+			pr_debug("hdcp2.2 session tearing down\n");
+			goto exit;
+		}
+
+		if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+			pr_debug("hdcp 2.2 app not loaded\n");
+			goto exit;
+		}
+	}
+exit:
+	return rc;
+}
+
+static void hdcp_lib_update_exec_type(void *ctx, bool tethered)
+{
+	struct hdcp_lib_handle *handle = ctx;
+
+	if (!handle)
+		return;
+
+	mutex_lock(&handle->wakeup_mutex);
+
+	if (handle->tethered == tethered) {
+		pr_debug("exec mode same as %s\n",
+			 tethered ? "tethered" : "threaded");
+	} else {
+		handle->tethered = tethered;
+
+		pr_debug("exec mode changed to %s\n",
+			 tethered ? "tethered" : "threaded");
+	}
+
+	mutex_unlock(&handle->wakeup_mutex);
+}
+
+static int hdcp_lib_wakeup_thread(struct hdcp_lib_wakeup_data *data)
+{
+	struct hdcp_lib_handle *handle;
+	int rc = 0;
+
+	if (!data)
+		return -EINVAL;
+
+	handle = data->context;
+	if (!handle)
+		return -EINVAL;
+
+	mutex_lock(&handle->wakeup_mutex);
+
+	handle->wakeup_cmd = data->cmd;
+	handle->timeout_left = data->timeout;
+
+	pr_debug("client->lib: %s (%s)\n",
+		hdcp_lib_cmd_to_str(data->cmd),
+		hdcp_lib_message_name(handle->last_msg));
+
+	rc = hdcp_lib_check_valid_state(handle);
+	if (rc)
+		goto exit;
+
+	mutex_lock(&handle->msg_lock);
+	if (data->recvd_msg_len) {
+		kzfree(handle->last_msg_recvd_buf);
+
+		handle->last_msg_recvd_len = data->recvd_msg_len;
+		handle->last_msg_recvd_buf = kzalloc(data->recvd_msg_len,
+						     GFP_KERNEL);
+		if (!handle->last_msg_recvd_buf) {
+			rc = -ENOMEM;
+			mutex_unlock(&handle->msg_lock);
+			goto exit;
+		}
+
+		memcpy(handle->last_msg_recvd_buf, data->recvd_msg_buf,
+		       data->recvd_msg_len);
+	}
+	mutex_unlock(&handle->msg_lock);
+
+	if (!completion_done(&handle->poll_wait))
+		complete_all(&handle->poll_wait);
+
+	switch (handle->wakeup_cmd) {
+	case HDCP_LIB_WKUP_CMD_START:
+		handle->no_stored_km_flag = 0;
+		handle->repeater_flag = false;
+		handle->non_2p2_present = false;
+		handle->update_stream = false;
+		handle->last_msg_sent = 0;
+		handle->last_msg = INVALID_MESSAGE_ID;
+		handle->hdcp_timeout = 0;
+		handle->timeout_left = 0;
+		handle->legacy_app = false;
+		atomic_set(&handle->hdcp_off, 0);
+		handle->hdcp_state = HDCP_STATE_INIT;
+
+		HDCP_LIB_EXECUTE(init);
+		break;
+	case HDCP_LIB_WKUP_CMD_STOP:
+		atomic_set(&handle->hdcp_off, 1);
+
+		HDCP_LIB_EXECUTE(clean);
+		break;
+	case HDCP_LIB_WKUP_CMD_MSG_SEND_SUCCESS:
+		handle->last_msg_sent = handle->listener_buf[0];
+
+		HDCP_LIB_EXECUTE(msg_sent);
+		break;
+	case HDCP_LIB_WKUP_CMD_MSG_SEND_FAILED:
+	case HDCP_LIB_WKUP_CMD_MSG_RECV_FAILED:
+	case HDCP_LIB_WKUP_CMD_LINK_FAILED:
+		handle->hdcp_state |= HDCP_STATE_ERROR;
+		HDCP_LIB_EXECUTE(clean);
+		break;
+	case HDCP_LIB_WKUP_CMD_MSG_RECV_SUCCESS:
+		HDCP_LIB_EXECUTE(msg_recvd);
+		break;
+	case HDCP_LIB_WKUP_CMD_MSG_RECV_TIMEOUT:
+		HDCP_LIB_EXECUTE(timeout);
+		break;
+	case HDCP_LIB_WKUP_CMD_QUERY_STREAM_TYPE:
+		HDCP_LIB_EXECUTE(stream);
+		break;
+	default:
+		pr_err("invalid wakeup command %d\n", handle->wakeup_cmd);
+	}
+exit:
+	mutex_unlock(&handle->wakeup_mutex);
+
+	return rc;
+}
+
+static void hdcp_lib_prep_type_id(struct hdcp_lib_handle *handle,
+	struct hdmi_hdcp_wakeup_data *cdata)
+{
+	memset(handle->listener_buf, 0, MAX_TX_MESSAGE_SIZE);
+	handle->listener_buf[0] = SKE_SEND_TYPE_ID;
+	handle->msglen = 2;
+	cdata->cmd = HDMI_HDCP_WKUP_CMD_SEND_MESSAGE;
+	cdata->send_msg_buf = handle->listener_buf;
+	cdata->send_msg_len = handle->msglen;
+	handle->last_msg = hdcp_lib_get_next_message(handle,
+						cdata);
+}
+
+static void hdcp_lib_msg_sent(struct hdcp_lib_handle *handle)
+{
+	struct hdmi_hdcp_wakeup_data cdata = { HDMI_HDCP_WKUP_CMD_INVALID };
+
+	if (!handle) {
+		pr_err("invalid handle\n");
+		return;
+	}
+
+	cdata.context = handle->client_ctx;
+
+	switch (handle->last_msg_sent) {
+	case SKE_SEND_TYPE_ID:
+		if (!hdcp_lib_enable_encryption(handle)) {
+			handle->authenticated = true;
+
+			cdata.cmd = HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS;
+			hdcp_lib_wakeup_client(handle, &cdata);
+		}
+
+		/* poll for link check */
+		cdata.cmd = HDMI_HDCP_WKUP_CMD_LINK_POLL;
+		break;
+	case SKE_SEND_EKS_MESSAGE_ID:
+		/*
+		 * a) if its a repeater irrespective of device type we
+		 *    start CMD_LINK_POLL to trigger repeater auth
+		 * b) if its not a repeater and device is DP we
+		 *    first send the SKE_SEND_TYPE_ID and upon success
+		 *    enable encryption
+		 * c) if its not a repeater and device is HDMI we
+		 *    dont send SKE_SEND_TYPE_ID and enable encryption
+		 *    and start part III of authentication
+		 */
+		if (handle->repeater_flag) {
+			/* poll for link check */
+			cdata.cmd = HDMI_HDCP_WKUP_CMD_LINK_POLL;
+		} else if (handle->device_type == HDCP_TXMTR_DP) {
+			hdcp_lib_prep_type_id(handle, &cdata);
+		} else if (handle->device_type == HDCP_TXMTR_HDMI) {
+			if (!hdcp_lib_enable_encryption(handle)) {
+				handle->authenticated = true;
+				cdata.cmd = HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS;
+				hdcp_lib_wakeup_client(handle, &cdata);
+			}
+			/* poll for link check */
+			cdata.cmd = HDMI_HDCP_WKUP_CMD_LINK_POLL;
+		}
+		break;
+	case REP_SEND_ACK_ID:
+		pr_debug("Repeater authentication successful\n");
+
+		if (handle->update_stream) {
+			HDCP_LIB_EXECUTE(stream);
+			handle->update_stream = false;
+		} else {
+			cdata.cmd = HDMI_HDCP_WKUP_CMD_LINK_POLL;
+		}
+		break;
+	default:
+		cdata.cmd = HDMI_HDCP_WKUP_CMD_RECV_MESSAGE;
+		cdata.timeout = handle->timeout_left;
+	}
+
+	hdcp_lib_wakeup_client(handle, &cdata);
+}
+
+static void hdcp_lib_msg_sent_work(struct kthread_work *work)
+{
+	struct hdcp_lib_handle *handle = container_of(work,
+						      struct hdcp_lib_handle,
+						      wk_msg_sent);
+
+	if (handle->wakeup_cmd != HDCP_LIB_WKUP_CMD_MSG_SEND_SUCCESS) {
+		pr_err("invalid wakeup command %d\n", handle->wakeup_cmd);
+		return;
+	}
+
+	hdcp_lib_msg_sent(handle);
+}
+
+static void hdcp_lib_init(struct hdcp_lib_handle *handle)
+{
+	int rc = 0;
+
+	if (!handle) {
+		pr_err("invalid handle\n");
+		return;
+	}
+
+	if (handle->wakeup_cmd != HDCP_LIB_WKUP_CMD_START) {
+		pr_err("invalid wakeup command %d\n", handle->wakeup_cmd);
+		return;
+	}
+
+	rc = hdcp_lib_library_load(handle);
+	if (rc)
+		goto exit;
+
+	if (!handle->legacy_app) {
+		rc = hdcp_lib_session_init(handle);
+		if (rc)
+			goto exit;
+	}
+
+	if (handle->hdcp_txmtr_init == NULL) {
+		pr_err("invalid txmtr init function pointer\n");
+		return;
+	}
+
+	rc = handle->hdcp_txmtr_init(handle);
+	if (rc)
+		goto exit;
+
+	if (!handle->legacy_app) {
+		rc = hdcp_lib_start_auth(handle);
+		if (rc)
+			goto exit;
+	}
+
+	hdcp_lib_send_message(handle);
+
+	return;
+exit:
+	HDCP_LIB_EXECUTE(clean);
+}
+
+static void hdcp_lib_init_work(struct kthread_work *work)
+{
+	struct hdcp_lib_handle *handle = container_of(work,
+						      struct hdcp_lib_handle,
+						      wk_init);
+
+	hdcp_lib_init(handle);
+}
+
+static void hdcp_lib_timeout(struct hdcp_lib_handle *handle)
+{
+	int rc = 0;
+	struct hdcp_send_timeout_req *req_buf;
+	struct hdcp_send_timeout_rsp *rsp_buf;
+
+	if (!handle || !handle->qseecom_handle ||
+	    !handle->qseecom_handle->sbuf) {
+		pr_debug("invalid handle\n");
+		return;
+	}
+
+	if (atomic_read(&handle->hdcp_off)) {
+		pr_debug("invalid state, hdcp off\n");
+		return;
+	}
+
+	req_buf = (struct hdcp_send_timeout_req *)
+	    (handle->qseecom_handle->sbuf);
+	req_buf->commandid = HDCP_TXMTR_SEND_MESSAGE_TIMEOUT;
+	req_buf->ctxhandle = handle->tz_ctxhandle;
+
+	rsp_buf = (struct hdcp_send_timeout_rsp *)
+	    (handle->qseecom_handle->sbuf +
+	    QSEECOM_ALIGN(sizeof(struct hdcp_send_timeout_req)));
+
+	rc = qseecom_send_command(handle->qseecom_handle, req_buf,
+				  QSEECOM_ALIGN(sizeof
+						(struct hdcp_send_timeout_req)),
+				  rsp_buf,
+				  QSEECOM_ALIGN(sizeof
+						(struct
+						 hdcp_send_timeout_rsp)));
+
+	if ((rc < 0) || (rsp_buf->status != HDCP_SUCCESS)) {
+		pr_err("qseecom cmd failed for with err = %d status = %d\n",
+		       rc, rsp_buf->status);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (rsp_buf->commandid == HDCP_TXMTR_SEND_MESSAGE_TIMEOUT) {
+		pr_err("HDCP_TXMTR_SEND_MESSAGE_TIMEOUT\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	/*
+	 * if the response contains LC_Init OR RepeaterAuth_Stream_Manage
+	 * message send the message again to the sink as this means that
+	 * TZ would like to try again
+	 */
+	if ((rsp_buf->commandid == HDCP_TXMTR_PROCESS_RECEIVED_MESSAGE) &&
+	    ((int)rsp_buf->message[0] == LC_INIT_MESSAGE_ID ||
+		 (int)rsp_buf->message[0] == REP_STREAM_MANAGE_ID)) {
+		if (!atomic_read(&handle->hdcp_off)) {
+			pr_err("resending LC_INIT/RPTR_AUTH_STREAM_MANAGE message 10/19\n");
+			/* keep local copy of TZ response */
+			memset(handle->listener_buf, 0, MAX_TX_MESSAGE_SIZE);
+			memcpy(handle->listener_buf,
+			       (unsigned char *)rsp_buf->message,
+			       rsp_buf->msglen);
+			handle->hdcp_timeout = rsp_buf->timeout;
+			handle->msglen = rsp_buf->msglen;
+
+			hdcp_lib_send_message(handle);
+		}
+	}
+
+	return;
+error:
+	if (!atomic_read(&handle->hdcp_off))
+		HDCP_LIB_EXECUTE(clean);
+}
+
+static void hdcp_lib_manage_timeout_work(struct kthread_work *work)
+{
+	struct hdcp_lib_handle *handle = container_of(work,
+						      struct hdcp_lib_handle,
+						      wk_timeout);
+
+	hdcp_lib_timeout(handle);
+}
+
+static void hdcp_lib_clean(struct hdcp_lib_handle *handle)
+{
+	struct hdmi_hdcp_wakeup_data cdata = { HDMI_HDCP_WKUP_CMD_INVALID };
+
+	if (!handle) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	handle->authenticated = false;
+
+	/* AV mute the sink first to avoid artifacts */
+	handle->client_ops->mute_sink(handle->client_ctx);
+
+	hdcp_lib_txmtr_deinit(handle);
+	if (!handle->legacy_app)
+		hdcp_lib_session_deinit(handle);
+	hdcp_lib_library_unload(handle);
+
+	cdata.context = handle->client_ctx;
+	cdata.cmd = HDMI_HDCP_WKUP_CMD_STATUS_FAILED;
+
+	if (!atomic_read(&handle->hdcp_off))
+		hdcp_lib_wakeup_client(handle, &cdata);
+
+	atomic_set(&handle->hdcp_off, 1);
+}
+
+static void hdcp_lib_cleanup_work(struct kthread_work *work)
+{
+
+	struct hdcp_lib_handle *handle = container_of(work,
+						      struct hdcp_lib_handle,
+						      wk_clean);
+
+	hdcp_lib_clean(handle);
+}
+
+static void hdcp_lib_msg_recvd(struct hdcp_lib_handle *handle)
+{
+	int rc = 0;
+	struct hdmi_hdcp_wakeup_data cdata = { HDMI_HDCP_WKUP_CMD_INVALID };
+	struct hdcp_rcvd_msg_req *req_buf;
+	struct hdcp_rcvd_msg_rsp *rsp_buf;
+	uint32_t msglen;
+	char *msg = NULL;
+	char msg_name[50];
+	uint32_t message_id_bytes = 0;
+
+	if (!handle || !handle->qseecom_handle ||
+	    !handle->qseecom_handle->sbuf) {
+		pr_err("invalid handle\n");
+		return;
+	}
+
+	if (atomic_read(&handle->hdcp_off)) {
+		pr_debug("invalid state, hdcp off\n");
+		return;
+	}
+
+	cdata.context = handle->client_ctx;
+
+	mutex_lock(&handle->msg_lock);
+	msglen = handle->last_msg_recvd_len;
+
+	if (msglen <= 0) {
+		pr_err("invalid msg len\n");
+		mutex_unlock(&handle->msg_lock);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	/* If the client is DP then allocate extra byte for message ID. */
+	if (handle->device_type == HDCP_TXMTR_DP)
+		message_id_bytes = 1;
+
+	msglen += message_id_bytes;
+
+	msg = kzalloc(msglen, GFP_KERNEL);
+	if (!msg) {
+		mutex_unlock(&handle->msg_lock);
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	/* copy the message id if needed */
+	if (message_id_bytes)
+		memcpy(msg, &handle->last_msg, message_id_bytes);
+
+	memcpy(msg + message_id_bytes,
+		handle->last_msg_recvd_buf,
+		handle->last_msg_recvd_len);
+
+	mutex_unlock(&handle->msg_lock);
+
+	snprintf(msg_name, sizeof(msg_name), "%s: ",
+		hdcp_lib_message_name((int)msg[0]));
+
+	print_hex_dump_debug(msg_name,
+		DUMP_PREFIX_NONE, 16, 1, msg, msglen, false);
+
+	/* send the message to QSEECOM */
+	req_buf = (struct hdcp_rcvd_msg_req *)(handle->qseecom_handle->sbuf);
+	req_buf->commandid = HDCP_TXMTR_PROCESS_RECEIVED_MESSAGE;
+	memcpy(req_buf->msg, msg, msglen);
+	req_buf->msglen = msglen;
+	req_buf->ctxhandle = handle->tz_ctxhandle;
+
+	rsp_buf =
+	    (struct hdcp_rcvd_msg_rsp *)(handle->qseecom_handle->sbuf +
+					 QSEECOM_ALIGN(sizeof
+						       (struct
+							hdcp_rcvd_msg_req)));
+
+	pr_debug("writing %s to TZ at %dms\n",
+		 hdcp_lib_message_name((int)msg[0]), jiffies_to_msecs(jiffies));
+
+	rc = qseecom_send_command(handle->qseecom_handle, req_buf,
+				  QSEECOM_ALIGN(sizeof
+						(struct hdcp_rcvd_msg_req)),
+				  rsp_buf,
+				  QSEECOM_ALIGN(sizeof
+						(struct hdcp_rcvd_msg_rsp)));
+
+	if (msg[0] == REP_SEND_RECV_ID_LIST_ID) {
+		if ((msg[2] & HDCP2_0_REPEATER_DOWNSTREAM) ||
+		   (msg[2] & HDCP1_DEVICE_DOWNSTREAM))
+			handle->non_2p2_present = true;
+		else
+			handle->non_2p2_present = false;
+	}
+
+	/* get next message from sink if we receive H PRIME on no store km */
+	if ((msg[0] == AKE_SEND_H_PRIME_MESSAGE_ID) &&
+	    handle->no_stored_km_flag) {
+		handle->hdcp_timeout = rsp_buf->timeout;
+
+		cdata.cmd = HDMI_HDCP_WKUP_CMD_RECV_MESSAGE;
+		cdata.timeout = handle->hdcp_timeout;
+
+		goto exit;
+	}
+
+	if ((msg[0] == REP_STREAM_READY_ID) &&
+	    (rc == 0) && (rsp_buf->status == 0)) {
+		pr_debug("Got Auth_Stream_Ready, nothing sent to rx\n");
+
+		if (!handle->authenticated &&
+		    !hdcp_lib_enable_encryption(handle)) {
+			handle->authenticated = true;
+
+			cdata.cmd = HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS;
+			hdcp_lib_wakeup_client(handle, &cdata);
+		}
+
+		cdata.cmd = HDMI_HDCP_WKUP_CMD_LINK_POLL;
+		goto exit;
+	}
+
+	if ((rc < 0) || (rsp_buf->status != 0) || (rsp_buf->msglen <= 0) ||
+	    (rsp_buf->commandid != HDCP_TXMTR_PROCESS_RECEIVED_MESSAGE) ||
+	    (rsp_buf->msg == NULL)) {
+		pr_err("qseecom cmd failed with err=%d status=%d\n",
+		       rc, rsp_buf->status);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	pr_debug("recvd %s from TZ at %dms\n",
+		 hdcp_lib_message_name((int)rsp_buf->msg[0]),
+		 jiffies_to_msecs(jiffies));
+
+	handle->last_msg = (int)rsp_buf->msg[0];
+
+	/* set the flag if response is AKE_No_Stored_km */
+	if (((int)rsp_buf->msg[0] == AKE_NO_STORED_KM_MESSAGE_ID)) {
+		pr_debug("Setting no_stored_km_flag\n");
+		handle->no_stored_km_flag = 1;
+	} else {
+		handle->no_stored_km_flag = 0;
+	}
+
+	/* check if it's a repeater */
+	if ((rsp_buf->msg[0] == SKE_SEND_EKS_MESSAGE_ID) &&
+	    (rsp_buf->msglen == SKE_SEND_EKS_MESSAGE_SIZE)) {
+		if ((rsp_buf->flag ==
+		     HDCP_TXMTR_SUBSTATE_WAITING_FOR_RECIEVERID_LIST) &&
+		    (rsp_buf->timeout > 0))
+			handle->repeater_flag = true;
+		handle->update_stream = true;
+	}
+
+	memset(handle->listener_buf, 0, MAX_TX_MESSAGE_SIZE);
+	memcpy(handle->listener_buf, (unsigned char *)rsp_buf->msg,
+	       rsp_buf->msglen);
+	handle->hdcp_timeout = rsp_buf->timeout;
+	handle->msglen = rsp_buf->msglen;
+
+	if (!atomic_read(&handle->hdcp_off))
+		hdcp_lib_send_message(handle);
+exit:
+	kzfree(msg);
+
+	hdcp_lib_wakeup_client(handle, &cdata);
+
+	if (rc && !atomic_read(&handle->hdcp_off))
+		HDCP_LIB_EXECUTE(clean);
+}
+
+static void hdcp_lib_msg_recvd_work(struct kthread_work *work)
+{
+	struct hdcp_lib_handle *handle = container_of(work,
+						      struct hdcp_lib_handle,
+						      wk_msg_recvd);
+
+	hdcp_lib_msg_recvd(handle);
+}
+
+static void hdcp_lib_wait_work(struct kthread_work *work)
+{
+	u32 timeout;
+	struct hdcp_lib_handle *handle = container_of(work,
+				struct hdcp_lib_handle, wk_wait);
+
+	if (!handle) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	if (atomic_read(&handle->hdcp_off)) {
+		pr_debug("invalid state: hdcp off\n");
+		return;
+	}
+
+	if (handle->hdcp_state & HDCP_STATE_ERROR) {
+		pr_debug("invalid state: hdcp error\n");
+		return;
+	}
+
+	reinit_completion(&handle->poll_wait);
+	timeout = wait_for_completion_timeout(&handle->poll_wait,
+			handle->wait_timeout);
+	if (!timeout) {
+		pr_err("wait timeout\n");
+
+		if (!atomic_read(&handle->hdcp_off))
+			HDCP_LIB_EXECUTE(clean);
+	}
+
+	handle->wait_timeout = 0;
+}
+
+bool hdcp1_check_if_supported_load_app(void)
+{
+	int rc = 0;
+	bool hdcp1_srm_supported = true;
+
+	/* start hdcp1 app */
+	if (hdcp1_supported && !hdcp1_handle->qsee_handle) {
+		rc = qseecom_start_app(&hdcp1_handle->qsee_handle,
+				HDCP1_APP_NAME,
+				QSEECOM_SBUFF_SIZE);
+		if (rc) {
+			pr_err("hdcp1 qseecom_start_app failed %d\n", rc);
+			hdcp1_supported = false;
+			kfree(hdcp1_handle);
+		}
+	}
+
+	/* if hdcp1 app succeeds load SRM TA as well */
+	if (hdcp1_supported && !hdcp1_handle->srm_handle) {
+		mutex_init(&hdcp1_ta_cmd_lock);
+		rc = qseecom_start_app(&hdcp1_handle->srm_handle,
+				SRMAPP_NAME,
+				QSEECOM_SBUFF_SIZE);
+		if (rc) {
+			hdcp1_srm_supported = false;
+			pr_err("hdcp1_srm qseecom_start_app failed %d\n", rc);
+		}
+	}
+
+	pr_debug("hdcp1 app %s loaded\n",
+		 hdcp1_supported ? "successfully" : "not");
+	pr_debug("hdcp1 srm app %s loaded\n",
+		 hdcp1_srm_supported ? "successfully" : "not");
+
+	return hdcp1_supported;
+}
+
+void hdcp1_client_register(void *client_ctx, struct hdcp_client_ops *ops)
+{
+	/* initialize the hdcp1 handle */
+	hdcp1_handle = kzalloc(sizeof(*hdcp1_handle), GFP_KERNEL);
+
+	if (hdcp1_handle) {
+		hdcp1_handle->client_ops = ops;
+		hdcp1_handle->client_ctx = client_ctx;
+	}
+}
+
+void hdcp1_client_unregister(void)
+{
+	if (hdcp1_handle && hdcp1_handle->qsee_handle)
+		qseecom_shutdown_app(&hdcp1_handle->qsee_handle);
+
+	if (hdcp1_handle && hdcp1_handle->srm_handle)
+		qseecom_shutdown_app(&hdcp1_handle->srm_handle);
+
+	kfree(hdcp1_handle);
+}
+
+/* APIs exposed to all clients */
+int hdcp1_set_keys(uint32_t *aksv_msb, uint32_t *aksv_lsb)
+{
+	int rc = 0;
+	struct hdcp1_key_set_req *key_set_req;
+	struct hdcp1_key_set_rsp *key_set_rsp;
+	struct qseecom_handle *hdcp1_qsee_handle;
+
+	if (aksv_msb == NULL || aksv_lsb == NULL)
+		return -EINVAL;
+
+	if (!hdcp1_supported || !hdcp1_handle)
+		return -EINVAL;
+
+	hdcp1_qsee_handle = hdcp1_handle->qsee_handle;
+
+	if (!hdcp1_qsee_handle)
+		return -EINVAL;
+
+	/* set keys and request aksv */
+	key_set_req = (struct hdcp1_key_set_req *)hdcp1_qsee_handle->sbuf;
+	key_set_req->commandid = HDCP1_SET_KEY_MESSAGE_ID;
+	key_set_rsp = (struct hdcp1_key_set_rsp *)(hdcp1_qsee_handle->sbuf +
+			   QSEECOM_ALIGN(sizeof(struct hdcp1_key_set_req)));
+	rc = qseecom_send_command(hdcp1_qsee_handle, key_set_req,
+				  QSEECOM_ALIGN(sizeof
+						(struct hdcp1_key_set_req)),
+				  key_set_rsp,
+				  QSEECOM_ALIGN(sizeof
+						(struct hdcp1_key_set_rsp)));
+
+	if (rc < 0) {
+		pr_err("qseecom cmd failed err=%d\n", rc);
+		return -ENOKEY;
+	}
+
+	rc = key_set_rsp->ret;
+	if (rc) {
+		pr_err("set key cmd failed, rsp=%d\n", key_set_rsp->ret);
+		return -ENOKEY;
+	}
+
+	/* copy bytes into msb and lsb */
+	*aksv_msb = key_set_rsp->ksv[0] << 24;
+	*aksv_msb |= key_set_rsp->ksv[1] << 16;
+	*aksv_msb |= key_set_rsp->ksv[2] << 8;
+	*aksv_msb |= key_set_rsp->ksv[3];
+	*aksv_lsb = key_set_rsp->ksv[4] << 24;
+	*aksv_lsb |= key_set_rsp->ksv[5] << 16;
+	*aksv_lsb |= key_set_rsp->ksv[6] << 8;
+	*aksv_lsb |= key_set_rsp->ksv[7];
+
+	return 0;
+}
+
+int hdcp1_validate_receiver_ids(struct hdcp_srm_device_id_t *device_ids,
+	uint32_t device_id_cnt)
+{
+	int rc = 0;
+	struct hdcp_srm_check_device_ids_req *recv_id_req;
+	struct hdcp_srm_check_device_ids_rsp *recv_id_rsp;
+	uint32_t sbuf_len;
+	uint32_t rbuf_len;
+	int i = 0;
+	struct qseecom_handle *hdcp1_srmhandle;
+
+	/* If client has not been registered return */
+	if (!hdcp1_supported || !hdcp1_handle)
+		return -EINVAL;
+
+	/* Start the hdcp srm app if not already started */
+	if (hdcp1_handle && !hdcp1_handle->srm_handle) {
+		rc = qseecom_start_app(&hdcp1_handle->srm_handle,
+					SRMAPP_NAME, QSEECOM_SBUFF_SIZE);
+		if (rc) {
+			pr_err("qseecom_start_app failed for SRM TA %d\n", rc);
+			goto end;
+		}
+	}
+
+	pr_debug("device_id_cnt = %d\n", device_id_cnt);
+
+	hdcp1_srmhandle = hdcp1_handle->srm_handle;
+
+	sbuf_len = sizeof(struct hdcp_srm_check_device_ids_req)
+		+ sizeof(struct hdcp_srm_device_id_t) * device_id_cnt
+		- 1;
+
+	rbuf_len = sizeof(struct hdcp_srm_check_device_ids_rsp);
+
+	/* Create a SRM validate receiver ID request */
+	recv_id_req = (struct hdcp_srm_check_device_ids_req *)
+					hdcp1_srmhandle->sbuf;
+	recv_id_req->commandid = HDCP_SRM_CMD_CHECK_DEVICE_ID;
+	recv_id_req->num_device_ids = device_id_cnt;
+	memcpy(recv_id_req->device_ids, device_ids,
+		   device_id_cnt * sizeof(struct hdcp_srm_device_id_t));
+
+	for (i = 0; i < device_id_cnt * sizeof(struct hdcp_srm_device_id_t);
+		i++) {
+		pr_debug("recv_id_req->device_ids[%d] = 0x%x\n", i,
+			   recv_id_req->device_ids[i]);
+	}
+
+	recv_id_rsp = (struct hdcp_srm_check_device_ids_rsp *)
+			(hdcp1_srmhandle->sbuf +
+			 QSEECOM_ALIGN(sbuf_len));
+
+	rc = qseecom_send_command(hdcp1_srmhandle,
+			recv_id_req,
+			QSEECOM_ALIGN(sbuf_len),
+			recv_id_rsp,
+			QSEECOM_ALIGN(rbuf_len));
+
+	if (rc < 0) {
+		pr_err("qseecom cmd failed err=%d\n", rc);
+		goto end;
+	}
+
+	rc = recv_id_rsp->retval;
+	if (rc) {
+		pr_err("enc cmd failed, rsp=%d\n", recv_id_rsp->retval);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	pr_debug("rsp=%d\n", recv_id_rsp->retval);
+	pr_debug("commandid=%d\n", recv_id_rsp->commandid);
+
+end:
+	return rc;
+}
+
+
+static int hdcp_validate_recv_id(struct hdcp_lib_handle *handle)
+{
+	int rc = 0;
+	struct hdcp_rcv_id_list_req *recv_id_req;
+	struct hdcp_rcv_id_list_rsp *recv_id_rsp;
+
+	if (!handle || !handle->qseecom_handle ||
+		!handle->qseecom_handle->sbuf) {
+		pr_err("invalid handle\n");
+		return -EINVAL;
+	}
+
+	/* validate the receiver ID list against the new SRM blob */
+	recv_id_req = (struct hdcp_rcv_id_list_req *)
+					handle->qseecom_handle->sbuf;
+	recv_id_req->commandid = HDCP_TXMTR_VALIDATE_RECEIVER_ID_LIST;
+	recv_id_req->ctxHandle = handle->tz_ctxhandle;
+
+	recv_id_rsp = (struct hdcp_rcv_id_list_rsp *)
+		(handle->qseecom_handle->sbuf +
+		QSEECOM_ALIGN(sizeof(struct hdcp_rcv_id_list_req)));
+
+	rc = qseecom_send_command(handle->qseecom_handle,
+			recv_id_req,
+			QSEECOM_ALIGN(sizeof(struct hdcp_rcv_id_list_req)),
+			recv_id_rsp,
+			QSEECOM_ALIGN(sizeof(struct hdcp_rcv_id_list_rsp)));
+
+
+	if ((rc < 0) || (recv_id_rsp->status != HDCP_SUCCESS) ||
+		(recv_id_rsp->commandid !=
+			HDCP_TXMTR_VALIDATE_RECEIVER_ID_LIST)) {
+		pr_err("qseecom cmd failed with err = %d status = %d\n",
+			   rc, recv_id_rsp->status);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+exit:
+	return rc;
+}
+
+int hdcp1_set_enc(bool enable)
+{
+	int rc = 0;
+	struct hdcp1_set_enc_req *set_enc_req;
+	struct hdcp1_set_enc_rsp *set_enc_rsp;
+	struct qseecom_handle *hdcp1_qsee_handle;
+
+	mutex_lock(&hdcp1_ta_cmd_lock);
+
+	if (!hdcp1_supported || !hdcp1_handle) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	hdcp1_qsee_handle = hdcp1_handle->qsee_handle;
+
+	if (!hdcp1_qsee_handle)
+		return -EINVAL;
+
+	if (hdcp1_enc_enabled == enable) {
+		pr_info("already %s\n", enable ? "enabled" : "disabled");
+		goto end;
+	}
+
+	/* set keys and request aksv */
+	set_enc_req = (struct hdcp1_set_enc_req *)hdcp1_qsee_handle->sbuf;
+	set_enc_req->commandid = HDCP1_SET_ENC_MESSAGE_ID;
+	set_enc_req->enable = enable;
+	set_enc_rsp = (struct hdcp1_set_enc_rsp *)(hdcp1_qsee_handle->sbuf +
+			QSEECOM_ALIGN(sizeof(struct hdcp1_set_enc_req)));
+	rc = qseecom_send_command(hdcp1_qsee_handle, set_enc_req,
+				  QSEECOM_ALIGN(sizeof
+						(struct hdcp1_set_enc_req)),
+				  set_enc_rsp,
+				  QSEECOM_ALIGN(sizeof
+						(struct hdcp1_set_enc_rsp)));
+
+	if (rc < 0) {
+		pr_err("qseecom cmd failed err=%d\n", rc);
+		goto end;
+	}
+
+	rc = set_enc_rsp->ret;
+	if (rc) {
+		pr_err("enc cmd failed, rsp=%d\n", set_enc_rsp->ret);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	hdcp1_enc_enabled = enable;
+	pr_info("%s success\n", enable ? "enable" : "disable");
+end:
+	mutex_unlock(&hdcp1_ta_cmd_lock);
+	return rc;
+}
+
+int hdcp_library_register(struct hdcp_register_data *data)
+{
+	int rc = 0;
+	struct hdcp_lib_handle *handle = NULL;
+
+	if (!data) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	if (!data->txmtr_ops) {
+		pr_err("invalid input: txmtr context\n");
+		return -EINVAL;
+	}
+
+	if (!data->client_ops) {
+		pr_err("invalid input: client_ops\n");
+		return -EINVAL;
+	}
+
+	if (!data->hdcp_ctx) {
+		pr_err("invalid input: hdcp_ctx\n");
+		return -EINVAL;
+	}
+
+	/* populate ops to be called by client */
+	data->txmtr_ops->feature_supported = hdcp_lib_client_feature_supported;
+	data->txmtr_ops->wakeup = hdcp_lib_wakeup_thread;
+	data->txmtr_ops->update_exec_type = hdcp_lib_update_exec_type;
+
+	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+	if (!handle) {
+		rc = -ENOMEM;
+		goto unlock;
+	}
+
+	handle->client_ctx = data->client_ctx;
+	handle->client_ops = data->client_ops;
+	handle->tethered = data->tethered;
+	handle->hdcp_app_init = NULL;
+	handle->hdcp_txmtr_init = NULL;
+	handle->device_type = data->device_type;
+
+	pr_debug("tethered %d\n", handle->tethered);
+
+	atomic_set(&handle->hdcp_off, 0);
+
+	mutex_init(&handle->msg_lock);
+	mutex_init(&handle->wakeup_mutex);
+
+	init_kthread_worker(&handle->worker);
+
+	init_kthread_work(&handle->wk_init, hdcp_lib_init_work);
+	init_kthread_work(&handle->wk_msg_sent, hdcp_lib_msg_sent_work);
+	init_kthread_work(&handle->wk_msg_recvd, hdcp_lib_msg_recvd_work);
+	init_kthread_work(&handle->wk_timeout, hdcp_lib_manage_timeout_work);
+	init_kthread_work(&handle->wk_clean, hdcp_lib_cleanup_work);
+	init_kthread_work(&handle->wk_wait, hdcp_lib_wait_work);
+	init_kthread_work(&handle->wk_stream, hdcp_lib_query_stream_work);
+
+	init_completion(&handle->poll_wait);
+
+	handle->listener_buf = kzalloc(MAX_TX_MESSAGE_SIZE, GFP_KERNEL);
+	if (!(handle->listener_buf)) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	*data->hdcp_ctx = handle;
+	/* Cache the client ctx to be used later
+	 * HDCP driver probe happens earlier than
+	 * SDE driver probe hence caching it to
+	 * be used later.
+	 */
+
+	drv_client_handle = handle;
+	handle->thread = kthread_run(kthread_worker_fn,
+				     &handle->worker, "hdcp_tz_lib");
+
+	if (IS_ERR(handle->thread)) {
+		pr_err("unable to start lib thread\n");
+		rc = PTR_ERR(handle->thread);
+		handle->thread = NULL;
+		goto error;
+	}
+
+	return 0;
+error:
+	kzfree(handle->listener_buf);
+	handle->listener_buf = NULL;
+	kzfree(handle);
+	handle = NULL;
+unlock:
+	return rc;
+}
+EXPORT_SYMBOL(hdcp_library_register);
+
+void hdcp_library_deregister(void *phdcpcontext)
+{
+	struct hdcp_lib_handle *handle = phdcpcontext;
+
+	if (!handle)
+		return;
+
+	kthread_stop(handle->thread);
+
+	kzfree(handle->qseecom_handle);
+	kzfree(handle->last_msg_recvd_buf);
+
+	mutex_destroy(&handle->wakeup_mutex);
+
+	kzfree(handle->listener_buf);
+	kzfree(handle);
+}
+EXPORT_SYMBOL(hdcp_library_deregister);
+
+void hdcp1_notify_topology(void)
+{
+	char *envp[4];
+	char *a;
+	char *b;
+
+	a = kzalloc(SZ_16, GFP_KERNEL);
+
+	if (!a)
+		return;
+
+	b = kzalloc(SZ_16, GFP_KERNEL);
+
+	if (!b) {
+		kfree(a);
+		return;
+	}
+
+	envp[0] = "HDCP_MGR_EVENT=MSG_READY";
+	envp[1] = a;
+	envp[2] = b;
+	envp[3] = NULL;
+
+	snprintf(envp[1], 16, "%d", (int)DOWN_CHECK_TOPOLOGY);
+	snprintf(envp[2], 16, "%d", (int)HDCP_V1_TX);
+
+	kobject_uevent_env(&hdcp_drv_mgr->device->kobj, KOBJ_CHANGE, envp);
+	kfree(a);
+	kfree(b);
+}
+
+static ssize_t msm_hdcp_1x_sysfs_rda_tp(struct device *dev,
+struct device_attribute *attr, char *buf)
+{
+	ssize_t ret = 0;
+
+	if (!hdcp_drv_mgr) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	switch (hdcp_drv_mgr->tp_msgid) {
+	case DOWN_CHECK_TOPOLOGY:
+	case DOWN_REQUEST_TOPOLOGY:
+		buf[MSG_ID_IDX]   = hdcp_drv_mgr->tp_msgid;
+		buf[RET_CODE_IDX] = HDCP_AUTHED;
+		ret = HEADER_LEN;
+
+		memcpy(buf + HEADER_LEN, &hdcp_drv_mgr->cached_tp,
+			   sizeof(struct HDCP_V2V1_MSG_TOPOLOGY));
+
+		ret += sizeof(struct HDCP_V2V1_MSG_TOPOLOGY);
+
+		/* clear the flag once data is read back to user space*/
+		hdcp_drv_mgr->tp_msgid = -1;
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+} /* hdcp_1x_sysfs_rda_tp*/
+
+static ssize_t msm_hdcp_1x_sysfs_wta_tp(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int msgid = 0;
+	ssize_t ret = count;
+
+	if (!hdcp_drv_mgr || !buf) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	msgid = buf[0];
+
+	switch (msgid) {
+	case DOWN_CHECK_TOPOLOGY:
+	case DOWN_REQUEST_TOPOLOGY:
+		hdcp_drv_mgr->tp_msgid = msgid;
+		break;
+		/* more cases added here */
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+} /* hdmi_tx_sysfs_wta_hpd */
+
+static ssize_t hdmi_hdcp2p2_sysfs_wta_min_level_change(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int rc;
+	int min_enc_lvl;
+	struct hdcp_lib_handle *handle;
+	ssize_t ret = count;
+
+	handle = hdcp_drv_mgr->handle;
+
+	/*
+	 * if the stream type from TZ is type 1
+	 * ignore subsequent writes to the min_enc_level
+	 * to avoid state transitions which can potentially
+	 * cause visual artifacts because the stream type
+	 * is already at the highest level and for a HDCP 2.2
+	 * capable sink, we do not need to reduce the stream type
+	 */
+	if (handle &&
+		!handle->non_2p2_present) {
+		pr_debug("stream type is 1 returning\n");
+		return ret;
+	}
+
+	rc = kstrtoint(buf, 10, &min_enc_lvl);
+	if (rc) {
+		pr_err("%s: kstrtoint failed. rc=%d\n", __func__, rc);
+		return -EINVAL;
+	}
+
+	if (handle && handle->client_ops->notify_lvl_change) {
+		handle->client_ops->notify_lvl_change(handle->client_ctx,
+		min_enc_lvl);
+	}
+
+	return ret;
+}
+
+static ssize_t hdmi_hdcp_srm_updated(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int rc;
+	int srm_updated;
+	struct hdcp_lib_handle *handle;
+	ssize_t ret = count;
+	struct hdcp_client_ops *client_ops;
+	void *hdcp_client_ctx;
+
+	handle = hdcp_drv_mgr->handle;
+
+	rc = kstrtoint(buf, 10, &srm_updated);
+	if (rc) {
+		pr_err("%s: kstrtoint failed. rc=%d\n", __func__, rc);
+		return -EINVAL;
+	}
+
+	if (srm_updated) {
+		if (handle && handle->qseecom_handle) {
+			client_ops = handle->client_ops;
+			hdcp_client_ctx = handle->client_ctx;
+			if (hdcp_validate_recv_id(handle)) {
+				pr_debug("HDCP 2.2 SRM check FAILED\n");
+				if (handle && client_ops->srm_cb)
+					client_ops->srm_cb(hdcp_client_ctx);
+			} else
+				pr_debug("HDCP 2.2 SRM check PASSED\n");
+		} else if (hdcp1_handle && hdcp1_handle->qsee_handle) {
+			pr_debug("HDCP 1.4 SRM check\n");
+			hdcp_client_ctx = hdcp1_handle->client_ctx;
+			client_ops = hdcp1_handle->client_ops;
+			if (client_ops->srm_cb)
+				client_ops->srm_cb(hdcp_client_ctx);
+		}
+	}
+
+	return ret;
+}
+
+static DEVICE_ATTR(tp, S_IRUGO | S_IWUSR, msm_hdcp_1x_sysfs_rda_tp,
+msm_hdcp_1x_sysfs_wta_tp);
+
+static DEVICE_ATTR(min_level_change, S_IWUSR, NULL,
+hdmi_hdcp2p2_sysfs_wta_min_level_change);
+
+static DEVICE_ATTR(srm_updated, S_IWUSR, NULL,
+hdmi_hdcp_srm_updated);
+
+void hdcp1_cache_repeater_topology(void *hdcp1_cached_tp)
+{
+	memcpy((void *)&hdcp_drv_mgr->cached_tp,
+		   hdcp1_cached_tp,
+		   sizeof(struct HDCP_V2V1_MSG_TOPOLOGY));
+}
+
+static struct attribute *msm_hdcp_fs_attrs[] = {
+	&dev_attr_tp.attr,
+	&dev_attr_min_level_change.attr,
+	&dev_attr_srm_updated.attr,
+	NULL
+};
+
+static struct attribute_group msm_hdcp_fs_attr_group = {
+	.attrs = msm_hdcp_fs_attrs
+};
+
+static int msm_hdcp_open(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static int msm_hdcp_close(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static const struct file_operations msm_hdcp_fops = {
+	.owner = THIS_MODULE,
+	.open = msm_hdcp_open,
+	.release = msm_hdcp_close,
+};
+
+static const struct of_device_id msm_hdcp_dt_match[] = {
+	{ .compatible = "qcom,msm-hdcp",},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, msm_hdcp_dt_match);
+
+static int msm_hdcp_probe(struct platform_device *pdev)
+{
+	int ret;
+
+	hdcp_drv_mgr = devm_kzalloc(&pdev->dev, sizeof(struct msm_hdcp_mgr),
+						   GFP_KERNEL);
+	if (!hdcp_drv_mgr)
+		return -ENOMEM;
+
+	hdcp_drv_mgr->pdev = pdev;
+
+	platform_set_drvdata(pdev, hdcp_drv_mgr);
+
+	ret = alloc_chrdev_region(&hdcp_drv_mgr->dev_num, 0, 1, DRIVER_NAME);
+	if (ret  < 0) {
+		pr_err("alloc_chrdev_region failed ret = %d\n", ret);
+		goto error_get_dev_num;
+	}
+
+	hdcp_drv_mgr->class = class_create(THIS_MODULE, CLASS_NAME);
+	if (IS_ERR(hdcp_drv_mgr->class)) {
+		ret = PTR_ERR(hdcp_drv_mgr->class);
+		pr_err("couldn't create class rc = %d\n", ret);
+		goto error_class_create;
+	}
+
+	hdcp_drv_mgr->device = device_create(hdcp_drv_mgr->class, NULL,
+		hdcp_drv_mgr->dev_num, NULL, DRIVER_NAME);
+	if (IS_ERR(hdcp_drv_mgr->device)) {
+		ret = PTR_ERR(hdcp_drv_mgr->device);
+		pr_err("device_create failed %d\n", ret);
+		goto error_class_device_create;
+	}
+
+	cdev_init(&hdcp_drv_mgr->cdev, &msm_hdcp_fops);
+	ret = cdev_add(&hdcp_drv_mgr->cdev,
+			MKDEV(MAJOR(hdcp_drv_mgr->dev_num), 0), 1);
+	if (ret < 0) {
+		pr_err("cdev_add failed %d\n", ret);
+		goto error_cdev_add;
+	}
+
+	ret = sysfs_create_group(&hdcp_drv_mgr->device->kobj,
+			&msm_hdcp_fs_attr_group);
+	if (ret)
+		pr_err("unable to register rotator sysfs nodes\n");
+
+	/* Store the handle in the hdcp drv mgr
+	 * to be used for the sysfs notifications
+	 */
+	hdcp_drv_mgr->handle = drv_client_handle;
+
+	return 0;
+error_cdev_add:
+	device_destroy(hdcp_drv_mgr->class, hdcp_drv_mgr->dev_num);
+error_class_device_create:
+	class_destroy(hdcp_drv_mgr->class);
+error_class_create:
+	unregister_chrdev_region(hdcp_drv_mgr->dev_num, 1);
+error_get_dev_num:
+	devm_kfree(&pdev->dev, hdcp_drv_mgr);
+	hdcp_drv_mgr = NULL;
+	return ret;
+}
+
+static int msm_hdcp_remove(struct platform_device *pdev)
+{
+	struct msm_hdcp_mgr *mgr;
+
+	mgr = (struct msm_hdcp_mgr *)platform_get_drvdata(pdev);
+	if (!mgr)
+		return -ENODEV;
+
+	sysfs_remove_group(&hdcp_drv_mgr->device->kobj,
+	&msm_hdcp_fs_attr_group);
+	cdev_del(&hdcp_drv_mgr->cdev);
+	device_destroy(hdcp_drv_mgr->class, hdcp_drv_mgr->dev_num);
+	class_destroy(hdcp_drv_mgr->class);
+	unregister_chrdev_region(hdcp_drv_mgr->dev_num, 1);
+
+	devm_kfree(&pdev->dev, hdcp_drv_mgr);
+	hdcp_drv_mgr = NULL;
+	return 0;
+}
+
+static struct platform_driver msm_hdcp_driver = {
+	.probe = msm_hdcp_probe,
+	.remove = msm_hdcp_remove,
+	.driver = {
+		.name = "msm_hdcp",
+		.of_match_table = msm_hdcp_dt_match,
+		.pm = NULL,
+	}
+};
+
+static int __init msm_hdcp_init(void)
+{
+	return platform_driver_register(&msm_hdcp_driver);
+}
+
+static void __exit msm_hdcp_exit(void)
+{
+	return platform_driver_unregister(&msm_hdcp_driver);
+}
+
+module_init(msm_hdcp_init);
+module_exit(msm_hdcp_exit);
+
+MODULE_DESCRIPTION("MSM HDCP driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/drivers/misc/hdmi-cec./core.c linux-4.4.115-fbx/drivers/misc/hdmi-cec/core.c
--- linux-4.4.115-fbx/drivers/misc/hdmi-cec./core.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/hdmi-cec/core.c	2019-06-20 12:29:44.969525999 +0200
@@ -0,0 +1,616 @@
+/*
+ * HDMI Consumer Electronics Control, core module
+ *
+ * Copyright (C) 2011, Florian Fainelli <ffainelli@freebox;fr>
+ *
+ * This file is subject to the GPLv2 licensing terms.
+ *
+ */
+#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+
+#include <linux/hdmi-cec/hdmi-cec.h>
+#include <linux/hdmi-cec/dev.h>
+
+#include "hdmi-cec-priv.h"
+
+static unsigned cec_adapter_count;
+
+#define CEC_RX_QUEUE_MAX_LEN	(20)
+
+/*
+ * 400 ms is the time it takes for one 16 byte message to be
+ * transferred and 5 is the maximum number of retries. Add
+ * another 100 ms as a margin. So if the transmit doesn't
+ * finish before that time something is really wrong and we
+ * have to time out.
+ */
+#define CEC_TX_TIMEOUT_MS (5 * 400 + 100)
+
+/**
+ * cec_set_logical_address() - sets the cec logical address
+ * @adapter:	adapter pointer
+ * @addr:	logical address
+ *
+ * calls the adapter specific set_logical_address callback
+ */
+int cec_set_logical_address(struct cec_adapter *adapter, const u8 addr)
+{
+	int ret;
+
+	if (addr > CEC_ADDR_MAX)
+		return -EINVAL;
+
+	mutex_lock(&adapter->lock);
+	if (adapter->dead)
+		ret = -ENODEV;
+	else
+		ret = adapter->ops->set_logical_address(adapter, addr);
+	mutex_unlock(&adapter->lock);
+
+	return ret;
+}
+
+/**
+ *  __cec_rx_queue_len() - returns the lenght of a cec driver rx queue
+ * @adapter:	adapter pointer
+ */
+unsigned __cec_rx_queue_len(struct cec_adapter *adapter)
+{
+	unsigned qlen;
+
+	spin_lock(&adapter->rx_msg_list_lock);
+	qlen = adapter->rx_msg_len;
+	spin_unlock(&adapter->rx_msg_list_lock);
+
+	return qlen;
+}
+
+/**
+ * adapter_rx_done() - called by an adapter when message is received
+ * @adapter:	adapter pointer
+ * @data:	message blob
+ * @len:	message length
+ */
+int adapter_rx_done(struct cec_adapter *adapter,
+		    const u8 *data, const u8 len,
+		    bool valid, u8 rx_flags)
+{
+	struct cec_rx_kmsg *kmsg;
+	struct cec_rx_msg *msg;
+	int ret = 0;
+
+	if (!len || len > CEC_MAX_MSG_LEN)
+		return -EINVAL;
+
+	if (!adapter->attached) {
+		pr_debug("%s: no client attached, dropping", adapter->name);
+		goto out;
+	}
+
+	spin_lock(&adapter->rx_msg_list_lock);
+	if (adapter->rx_msg_len >= CEC_RX_QUEUE_MAX_LEN) {
+		pr_debug("%s: queue full!\n", adapter->name);
+		ret = -ENOSPC;
+		goto out_unlock;
+	}
+
+	kmsg = kzalloc(sizeof(*kmsg), GFP_ATOMIC);
+	if (!kmsg) {
+		ret = ENOMEM;
+		goto out_unlock;
+	}
+
+	msg = &kmsg->msg;
+	memcpy(&msg->data, data, len);
+	msg->len = len;
+	msg->valid = valid;
+	msg->flags = rx_flags;
+	list_add_tail(&kmsg->next, &adapter->rx_msg_list);
+	adapter->rx_msg_len++;
+
+out_unlock:
+	spin_unlock(&adapter->rx_msg_list_lock);
+
+	/* wake up clients, they can dequeue a buffer now */
+	wake_up_interruptible(&adapter->wait);
+
+out:
+	return ret;
+}
+EXPORT_SYMBOL(adapter_rx_done);
+
+/**
+ * cec_read_message() - reads a cec message from the adapter's rx queue
+ * @adapter:	adapter pointer
+ * @msg:	cec user-space exposed message pointer
+ *
+ * Reads a CEC message from the adapter's RX queue in blocking mode with
+ * either a finite or inifinite timeout
+ */
+int cec_read_message(struct cec_adapter *adapter,
+		     struct cec_rx_msg *msg,
+		     bool non_block)
+{
+	struct cec_rx_kmsg *kmsg;
+	int ret = 0;
+
+	if (!non_block) {
+		ret = wait_event_interruptible(adapter->wait,
+					       __cec_rx_queue_len(adapter) != 0 ||
+					       adapter->dead);
+		if (ret)
+			return ret;
+	}
+
+	if (adapter->dead)
+		return -ENODEV;
+
+	spin_lock(&adapter->rx_msg_list_lock);
+	if (list_empty(&adapter->rx_msg_list)) {
+		ret = -EAGAIN;
+		goto out;
+	}
+
+	kmsg = list_first_entry(&adapter->rx_msg_list,
+				struct cec_rx_kmsg, next);
+	memcpy(msg, &kmsg->msg, sizeof (*msg));
+	list_del(&kmsg->next);
+	kfree(kmsg);
+	adapter->rx_msg_len--;
+
+out:
+	spin_unlock(&adapter->rx_msg_list_lock);
+	return ret;
+}
+
+/**
+ * cec_send_message() - sends an user fed cec message
+ * @adapter:	adapter pointer
+ * @msg:	user-exposed cec message pointer
+ *
+ * Send a message using the specific adapter
+ */
+int cec_send_message(struct cec_adapter *adapter, struct cec_tx_msg *msg)
+{
+	unsigned long flags;
+	int ret;
+
+	if (!msg->len || msg->len > CEC_MAX_MSG_LEN)
+		return -EINVAL;
+
+	mutex_lock(&adapter->lock);
+
+	if (adapter->dead) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	/* prevent queuing more than one message */
+	if (test_bit(0, &adapter->tx_pending)) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	/* default to 1 retransmit for polling messages, otherwise 3 */
+	if (!msg->tries)
+		msg->tries = msg->len == 1 ? 2 : 4;
+
+	/* try to send it */
+	set_bit(0, &adapter->tx_pending);
+	ret = adapter->ops->send(adapter, msg->expire_ms, msg->tries,
+				 msg->data, msg->len);
+
+	if (ret)
+		clear_bit(0, &adapter->tx_pending);
+	else {
+		spin_lock_irqsave(&adapter->tx_done_lock, flags);
+		if (test_bit(0, &adapter->tx_pending)) {
+			adapter->tx_timeout_timer.expires = jiffies +
+				msecs_to_jiffies(CEC_TX_TIMEOUT_MS);
+			add_timer(&adapter->tx_timeout_timer);
+		}
+		spin_unlock_irqrestore(&adapter->tx_done_lock, flags);
+	}
+
+out:
+	mutex_unlock(&adapter->lock);
+	return ret;
+}
+
+/**
+ * adapter_tx_done() - called by adapter when tx is complete
+ * @adapter:	adapter pointer
+ *
+ */
+static void __adapter_tx_done(struct cec_adapter *adapter, bool success,
+			      u8 flags, u8 tries)
+{
+	if (!test_bit(0, &adapter->tx_pending)) {
+		WARN(1, "__adapter_tx_done called with no tx pending");
+		return;
+	}
+	adapter->last_tx_success = success;
+	adapter->last_tx_flags = flags;
+	adapter->last_tx_tries = tries;
+	clear_bit(0, &adapter->tx_pending);
+	wake_up_interruptible(&adapter->wait);
+}
+
+void adapter_tx_done(struct cec_adapter *adapter, bool success,
+		     u8 flags, u8 tries)
+{
+	spin_lock(&adapter->tx_done_lock);
+	del_timer_sync(&adapter->tx_timeout_timer);
+	__adapter_tx_done(adapter, success, flags, tries);
+	spin_unlock(&adapter->tx_done_lock);
+}
+
+EXPORT_SYMBOL(adapter_tx_done);
+
+/*
+ *
+ */
+static void adapter_tx_timeout(unsigned long data)
+{
+	struct cec_adapter *adapter = (struct cec_adapter *)data;
+	unsigned long flags;
+
+	dev_err(&adapter->dev, "tx timeout\n");
+
+	spin_lock_irqsave(&adapter->tx_done_lock, flags);
+	__adapter_tx_done(adapter, false, CEC_TX_F_TIMEOUT, 0);
+	spin_unlock_irqrestore(&adapter->tx_done_lock, flags);
+}
+
+/**
+ * cec_reset_device() - resets a cec adapter
+ * @adapter:	adapter pointer
+ *
+ * Resets a CEC device to a sane state
+ */
+int cec_reset_device(struct cec_adapter *adapter)
+{
+	int ret;
+
+	mutex_lock(&adapter->lock);
+	if (adapter->dead)
+		ret = -ENODEV;
+	else
+		ret = adapter->ops->reset(adapter);
+	mutex_unlock(&adapter->lock);
+	return ret;
+}
+
+/**
+ * cec_get_counters() - gets counters from a cec adapter
+ * @adapter:	adapter pointer
+ * @cnt:	struct cec_counters pointer
+ *
+ * Get counters from the CEC adapter if supported, adapter should advertise
+ * CEC_HW_HAS_COUNTERS flag
+ */
+int cec_get_counters(struct cec_adapter *adapter, struct cec_counters *cnt)
+{
+	int ret;
+
+	mutex_lock(&adapter->lock);
+	if (!(adapter->flags & CEC_HW_HAS_COUNTERS))
+		ret = -ENOTSUPP;
+	else if (adapter->dead)
+		ret = -ENODEV;
+	else
+		ret = adapter->ops->get_counters(adapter, cnt);
+	mutex_unlock(&adapter->lock);
+
+	return ret;
+}
+
+/**
+ * cec_set_detached_config() - send detached config to adapter
+ * @adapter:	adapter pointer
+ * @config:	config
+ *
+ */
+int cec_set_detached_config(struct cec_adapter *adapter,
+			    const struct cec_detached_config *config)
+{
+	int ret;
+
+	mutex_lock(&adapter->lock);
+	if (adapter->dead)
+		ret = -ENODEV;
+	else
+		ret = adapter->ops->set_detached_config(adapter, config);
+	mutex_unlock(&adapter->lock);
+
+	return ret;
+}
+
+/**
+ * cec_set_rx_mode() - sets the adapter receive mode
+ * @adapter:	adapter pointer
+ * @mode:	receive mode (accept all, unicast only)
+ *
+ * Set the receive mode filter of the adapter
+ */
+int cec_set_rx_mode(struct cec_adapter *adapter, enum cec_rx_mode mode)
+{
+	int ret;
+
+	if (~adapter->flags & CEC_HW_HAS_RX_FILTER)
+		return -ENOTSUPP;
+
+	if (mode >= CEC_RX_MODE_MAX)
+		return -EINVAL;
+
+	mutex_lock(&adapter->lock);
+	if (adapter->dead)
+		ret = -ENODEV;
+	else
+		ret = adapter->ops->set_rx_mode(adapter, mode);
+	mutex_unlock(&adapter->lock);
+
+	return ret;
+}
+
+/**
+ * cec_attach_host - attaches a host to the adapter
+ * @adapter:	adapter pointer
+ *
+ * Attaches the host to the adapter. In case the hardware is able
+ * to process CEC messages itself, it should now send them to the
+ * host for processing
+ */
+int cec_attach_host(struct cec_adapter *adapter)
+{
+	int ret = 0;
+
+	if (adapter->attached)
+		return -EBUSY;
+
+	mutex_lock(&adapter->lock);
+	if (adapter->dead)
+		ret = -ENODEV;
+	else {
+		if (adapter->ops->attach)
+			ret = adapter->ops->attach(adapter);
+		if (!ret)
+			adapter->attached = true;
+	}
+	mutex_unlock(&adapter->lock);
+	return ret;
+}
+
+/**
+ * cec_detach_host - detaches a host from the adapter
+ * @adapter:	adapter pointer
+ *
+ * Detaches the host from the adapter. In case the hardware is able
+ * to process CEC messages itself, it should now keep the messages for
+ * itself and no longer send them to the host
+ */
+int cec_detach_host(struct cec_adapter *adapter)
+{
+	int ret;
+
+	mutex_lock(&adapter->lock);
+	if (adapter->dead)
+		ret = -ENODEV;
+	else {
+		if (adapter->ops->detach(adapter))
+			adapter->ops->detach(adapter);
+		adapter->attached = false;
+	}
+	mutex_unlock(&adapter->lock);
+
+	return 0;
+}
+
+/**
+ * alloc_cec_adapter() - allocate a new cec adapter
+ * @priv_size:	sizeof of adapter private date
+ */
+struct cec_adapter *alloc_cec_adapter(size_t priv_size)
+{
+	size_t size;
+
+	size = sizeof (struct cec_adapter) + priv_size + CECDEV_PRIV_ALIGN;
+	return kzalloc(size, GFP_KERNEL);
+}
+EXPORT_SYMBOL(alloc_cec_adapter);
+
+/**
+ * cec_flush_queues() - flushes a cec adapter queues
+ * @adapter:	adapter pointer
+ */
+void cec_flush_queues(struct cec_adapter *adapter)
+{
+	struct cec_rx_kmsg *cur, *next;
+
+	spin_lock(&adapter->rx_msg_list_lock);
+
+	list_for_each_entry_safe(cur, next, &adapter->rx_msg_list, next)
+		kfree(cur);
+	INIT_LIST_HEAD(&adapter->rx_msg_list);
+	adapter->rx_msg_len = 0;
+
+	spin_unlock(&adapter->rx_msg_list_lock);
+}
+
+/*
+ * device refcounting
+ */
+int cec_get_adapter(struct cec_adapter *adapter)
+{
+	int ret;
+
+	mutex_lock(&adapter->lock);
+	ret = adapter->dead;
+	if (!ret)
+		atomic_inc(&adapter->users);
+	mutex_unlock(&adapter->lock);
+	return ret;
+}
+
+void cec_put_adapter(struct cec_adapter *adapter)
+{
+	if (atomic_dec_and_test(&adapter->users))
+		kfree(adapter);
+}
+
+/**
+ * free_cec_adapter() - free cec adapter
+ */
+void free_cec_adapter(struct cec_adapter *adapter)
+{
+	cec_put_adapter(adapter);
+}
+EXPORT_SYMBOL(free_cec_adapter);
+
+/*
+ * called by sysfs when all device references have been dropped
+ */
+static void cec_adapter_sysfs_release(struct device *dev)
+{
+	struct cec_adapter *adapter = to_cec_adapter(dev);
+	free_cec_adapter(adapter);
+}
+
+/*
+ * cec device sysfs bus
+ */
+static struct bus_type cec_bus_type = {
+	.name = "cec",
+};
+
+/**
+ * register_cec_adapter() - registers a new cec adapter
+ * @cec_adapter:	cec_adapter pointer
+ */
+int register_cec_adapter(struct cec_adapter *adapter, struct device *parent)
+{
+	struct device *dev = &adapter->dev;
+	int ret;
+
+	if (!parent)
+		return -EINVAL;
+
+	memset(dev, 0, sizeof (*dev));
+
+	adapter->attached = false;
+	mutex_init(&adapter->lock);
+
+	adapter->tx_pending = 0;
+	spin_lock_init(&adapter->tx_done_lock);
+	init_waitqueue_head(&adapter->wait);
+
+	spin_lock_init(&adapter->rx_msg_list_lock);
+	INIT_LIST_HEAD(&adapter->rx_msg_list);
+	adapter->rx_msg_len = 0;
+
+	init_timer(&adapter->tx_timeout_timer);
+	adapter->tx_timeout_timer.function = adapter_tx_timeout;
+	adapter->tx_timeout_timer.data = (unsigned long)adapter;
+
+	snprintf(adapter->name, sizeof (adapter->name),
+		 "%s%d", adapter->driver_name,
+		 cec_adapter_count++);
+
+	/* register to sysfs */
+	dev_set_name(dev, adapter->name);
+	dev->bus = &cec_bus_type;
+	dev->parent = parent;
+	dev->release = cec_adapter_sysfs_release;
+	device_initialize(dev);
+
+	/* create char device */
+	ret = cec_create_adapter_node(adapter);
+	if (ret < 0)
+		return ret;
+
+	ret = device_add(dev);
+	if (ret < 0) {
+		cec_remove_adapter_node(adapter);
+		return ret;
+	}
+
+	/* 2 users, driver itself + sysfs */
+	atomic_set(&adapter->users, 2);
+	dev_info(dev, "registered cec adapter\n");
+	return 0;
+}
+EXPORT_SYMBOL(register_cec_adapter);
+
+/**
+ * unregister_cec_adapter() - unregisters a cec adapter
+ * @cec_adapter:	cec_adapter pointer
+ */
+void unregister_cec_adapter(struct cec_adapter *adapter)
+{
+	/* mark as dead */
+	mutex_lock(&adapter->lock);
+	adapter->dead = true;
+	mutex_unlock(&adapter->lock);
+
+	/* from this point, no adapter ops can be called again */
+
+	/* unregister char dev openers */
+	cec_remove_adapter_node(adapter);
+
+	del_timer_sync(&adapter->tx_timeout_timer);
+
+	/* wake up any sleeper */
+	adapter->last_tx_success = false;
+	adapter->last_tx_flags = 0;
+	adapter->tx_pending = 0;
+	wake_up_all(&adapter->wait);
+
+	cec_detach_host(adapter);
+	cec_flush_queues(adapter);
+	/* let sysfs release the device */
+	dev_info(&adapter->dev, "unregistering cec adapter\n");
+	device_unregister(&adapter->dev);
+}
+
+EXPORT_SYMBOL(unregister_cec_adapter);
+
+static int __init cec_init(void)
+{
+	int ret;
+
+	ret = bus_register(&cec_bus_type);
+	if (ret) {
+		pr_err("failed to register cec bus\n");
+		return ret;
+	}
+
+	ret = cec_cdev_init();
+	if (ret) {
+		pr_err("failed to create devices\n");
+		bus_unregister(&cec_bus_type);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void __exit cec_exit(void)
+{
+	cec_cdev_exit();
+	bus_unregister(&cec_bus_type);
+}
+
+subsys_initcall(cec_init);
+module_exit(cec_exit);
+
+MODULE_AUTHOR("Florian Fainelli <ffainelli@freebox.fr>");
+MODULE_DESCRIPTION("HDMI CEC core driver");
+MODULE_LICENSE("GPL");
diff -Nruw linux-4.4.115-fbx/drivers/misc/hdmi-cec./dev.c linux-4.4.115-fbx/drivers/misc/hdmi-cec/dev.c
--- linux-4.4.115-fbx/drivers/misc/hdmi-cec./dev.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/hdmi-cec/dev.c	2019-05-31 20:58:54.922934968 +0200
@@ -0,0 +1,294 @@
+/*
+ * HDMI CEC character device code
+ *
+ * Copyright (C), 2011 Florian Fainelli <ffainelli@freebox.fr>
+ *
+ * This file is subject to the GPLv2 licensing terms
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/spinlock.h>
+#include <linux/ioctl.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+
+#include <linux/hdmi-cec/hdmi-cec.h>
+#include <linux/hdmi-cec/dev.h>
+
+#include "hdmi-cec-priv.h"
+
+static int cec_major;
+
+static DEFINE_MUTEX(cec_minors_lock);
+static bool cec_minors[256];
+
+static int cec_dev_open(struct inode *i, struct file *f)
+{
+	struct cdev *cdev = i->i_cdev;
+	struct cec_adapter *adapter =
+			container_of(cdev, struct cec_adapter, cdev);
+
+	if (f->private_data)
+		return -EBUSY;
+
+	if (cec_get_adapter(adapter))
+		return -ENODEV;
+
+	f->private_data = adapter;
+	return cec_attach_host(adapter);
+}
+
+static int cec_dev_close(struct inode *i, struct file *f)
+{
+	struct cec_adapter *adapter = f->private_data;
+
+	cec_detach_host(adapter);
+	cec_flush_queues(adapter);
+	f->private_data = NULL;
+	cec_put_adapter(adapter);
+	return 0;
+}
+
+static int wait_tx_done(struct cec_adapter *adapter)
+{
+	int ret;
+
+	ret = wait_event_interruptible(adapter->wait,
+				       !test_bit(0, &adapter->tx_pending) ||
+				       adapter->dead);
+	if (ret)
+		return ret;
+
+	if (adapter->dead)
+		return -ENODEV;
+
+	return 0;
+}
+
+static long cec_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+	struct cec_adapter *adapter;
+	void __user *argp = (void __user *)arg;
+	int __user *p = argp;
+	int val, ret;
+	struct cec_counters cnt;
+	struct cec_tx_status tx_status;
+	struct cec_detached_config config;
+
+	if (!f->private_data)
+		return -EINVAL;
+
+	adapter = f->private_data;
+
+	ret = -ENOTTY;
+	switch (cmd) {
+	case CEC_SET_LOGICAL_ADDRESS:
+		if (get_user(val, p))
+			return -EFAULT;
+
+		ret = cec_set_logical_address(adapter, (u8)val);
+		break;
+
+	case CEC_RESET_DEVICE:
+		ret = cec_reset_device(adapter);
+		break;
+
+	case CEC_GET_COUNTERS:
+		memset(&cnt, 0, sizeof(cnt));
+
+		ret = cec_get_counters(adapter, &cnt);
+		if (ret)
+			return ret;
+
+		if (copy_to_user(argp, &cnt, sizeof(cnt)))
+			return -EFAULT;
+		break;
+
+	case CEC_GET_TX_STATUS:
+		tx_status.sent = !test_bit(0, &adapter->tx_pending);
+		tx_status.success = adapter->last_tx_success;
+		tx_status.flags = adapter->last_tx_flags;
+		tx_status.tries = adapter->last_tx_tries;
+
+		if (copy_to_user(argp, &tx_status, sizeof(tx_status)))
+			return -EFAULT;
+
+		ret = 0;
+		break;
+
+	case CEC_SET_RX_MODE:
+		if (get_user(val, p))
+			return -EFAULT;
+
+		ret = cec_set_rx_mode(adapter, (enum cec_rx_mode)val);
+		break;
+
+	case CEC_SET_DETACHED_CONFIG:
+		if (copy_from_user(&config, argp, sizeof (config)))
+			return -EFAULT;
+
+		ret = cec_set_detached_config(adapter, &config);
+		break;
+
+	default:
+		dev_err(&adapter->dev, "unsupported ioctl: %08x\n", cmd);
+		break;
+	}
+
+	return ret;
+}
+
+static ssize_t cec_dev_write(struct file *f, const char __user *buf,
+			     size_t count, loff_t *pos)
+{
+	struct cec_adapter *adapter = f->private_data;
+	struct cec_tx_msg msg;
+	int ret;
+
+	if (count != sizeof (struct cec_tx_msg))
+		return -EINVAL;
+
+	if (copy_from_user(&msg, buf, sizeof (msg)))
+		return -EFAULT;
+
+	ret = cec_send_message(adapter, &msg);
+	if (ret)
+		return ret;
+
+	if (!(f->f_flags & O_NONBLOCK)) {
+		ret = wait_tx_done(adapter);
+		if (ret)
+			return ret;
+
+		/* update status */
+		msg.success = adapter->last_tx_success;
+		msg.flags = adapter->last_tx_flags;
+		msg.tries = adapter->last_tx_tries;
+	}
+
+	if (copy_to_user((char __user *)buf, &msg, sizeof (msg)))
+		return -EFAULT;
+
+	return count;
+}
+
+static ssize_t cec_dev_read(struct file *f, char __user *buf,
+			    size_t count, loff_t *pos)
+{
+	struct cec_adapter *adapter = f->private_data;
+	int ret;
+	struct cec_rx_msg msg;
+
+	if (count != sizeof (struct cec_rx_msg))
+		return -EINVAL;
+
+	ret = cec_read_message(adapter, &msg, f->f_flags & O_NONBLOCK);
+	if (ret)
+		return ret;
+
+	if (copy_to_user(buf, &msg, sizeof (msg)))
+		return -EFAULT;
+
+	return sizeof (msg);
+}
+
+static unsigned int cec_dev_poll(struct file *f, poll_table *wait)
+{
+	struct cec_adapter *adapter = f->private_data;
+	unsigned int flags;
+
+	if (adapter->dead)
+		return POLLERR | POLLHUP;
+
+	poll_wait(f, &adapter->wait, wait);
+
+	flags = 0;
+	if (__cec_rx_queue_len(adapter))
+		flags |= POLLIN;
+
+	if (!test_bit(0, &adapter->tx_pending))
+		flags |= POLLOUT;
+
+	return flags;
+}
+
+static const struct file_operations cec_adapter_fops = {
+	.owner		= THIS_MODULE,
+	.llseek		= no_llseek,
+	.open		= cec_dev_open,
+	.release	= cec_dev_close,
+	.unlocked_ioctl	= cec_dev_ioctl,
+	.read		= cec_dev_read,
+	.write		= cec_dev_write,
+	.poll		= cec_dev_poll,
+};
+
+int cec_create_adapter_node(struct cec_adapter *adapter)
+{
+	size_t i;
+	dev_t devno;
+	int ret;
+
+	cdev_init(&adapter->cdev, &cec_adapter_fops);
+	adapter->cdev.kobj.parent = &adapter->dev.kobj;
+	adapter->cdev.owner = adapter->module;
+
+	/* allocate minor */
+	mutex_lock(&cec_minors_lock);
+	for (i = 0; i < ARRAY_SIZE(cec_minors); i++) {
+		if (!cec_minors[i])
+			break;
+	}
+	mutex_unlock(&cec_minors_lock);
+
+	if (i == ARRAY_SIZE(cec_minors)) {
+		dev_err(&adapter->dev, "no minor available\n");
+		return 1;
+	}
+
+	devno = MKDEV(cec_major, i);
+	ret = cdev_add(&adapter->cdev, devno, 1);
+	if (ret) {
+		dev_err(&adapter->dev, "failed to add char device\n");
+		return ret;
+	}
+
+	adapter->dev.devt = devno;
+	return 0;
+}
+
+void cec_remove_adapter_node(struct cec_adapter *adapter)
+{
+	mutex_lock(&cec_minors_lock);
+	cec_minors[MINOR(adapter->cdev.dev)] = false;
+	mutex_unlock(&cec_minors_lock);
+	cdev_del(&adapter->cdev);
+}
+
+int __init cec_cdev_init(void)
+{
+	dev_t dev = 0;
+	int ret;
+
+	ret = alloc_chrdev_region(&dev, 0, CEC_MAX_DEVS, "cec");
+	if (ret < 0) {
+		printk(KERN_ERR "alloc_chrdev_region() failed for cec\n");
+		goto out;
+	}
+
+	cec_major = MAJOR(dev);
+	return 0;
+
+out:
+	unregister_chrdev_region(MKDEV(cec_major, 0), CEC_MAX_DEVS);
+	return ret;
+}
+
+void __exit cec_cdev_exit(void)
+{
+	unregister_chrdev_region(MKDEV(cec_major, 0), CEC_MAX_DEVS);
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/hdmi-cec./hdmi-cec-priv.h linux-4.4.115-fbx/drivers/misc/hdmi-cec/hdmi-cec-priv.h
--- linux-4.4.115-fbx/drivers/misc/hdmi-cec./hdmi-cec-priv.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/hdmi-cec/hdmi-cec-priv.h	2019-05-31 20:58:54.922934968 +0200
@@ -0,0 +1,34 @@
+#ifndef __HDMI_CEC_PRIV_H
+#define __HDMI_CEC_PRIV_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+/**
+ * struct cec_rx_kmsg - kernel-side cec message cookie
+ * @msg:	user-side cec message cookie
+ * @next:	list pointer to next message
+ */
+struct cec_rx_kmsg {
+	struct cec_rx_msg	msg;
+	struct list_head	next;
+};
+
+int cec_get_adapter(struct cec_adapter *);
+void cec_put_adapter(struct cec_adapter *);
+int cec_read_message(struct cec_adapter *, struct cec_rx_msg *msg,
+		     bool non_block);
+int cec_send_message(struct cec_adapter *, struct cec_tx_msg *msg);
+int cec_reset_device(struct cec_adapter *);
+int cec_get_counters(struct cec_adapter *, struct cec_counters *cnt);
+int cec_set_logical_address(struct cec_adapter *, const u8 addr);
+int cec_set_rx_mode(struct cec_adapter *, enum cec_rx_mode mode);
+void cec_flush_queues(struct cec_adapter *);
+unsigned __cec_rx_queue_len(struct cec_adapter *);
+int cec_attach_host(struct cec_adapter *);
+int cec_detach_host(struct cec_adapter *);
+int cec_set_detached_config(struct cec_adapter *,
+			    const struct cec_detached_config *);
+
+#endif /* __HDMI_CEC_PRIV_H */
+
diff -Nruw linux-4.4.115-fbx/drivers/misc/hdmi-cec./Kconfig linux-4.4.115-fbx/drivers/misc/hdmi-cec/Kconfig
--- linux-4.4.115-fbx/drivers/misc/hdmi-cec./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/hdmi-cec/Kconfig	2019-05-31 20:58:54.922934968 +0200
@@ -0,0 +1,8 @@
+menu "HDMI CEC support"
+
+config HDMI_CEC
+	tristate "HDMI CEC (Consumer Electronics Control) support"
+	---help---
+	   HDMI Consumer Electronics Control support.
+
+endmenu
diff -Nruw linux-4.4.115-fbx/drivers/misc/hdmi-cec./Makefile linux-4.4.115-fbx/drivers/misc/hdmi-cec/Makefile
--- linux-4.4.115-fbx/drivers/misc/hdmi-cec./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/hdmi-cec/Makefile	2019-05-31 20:58:54.922934968 +0200
@@ -0,0 +1,2 @@
+obj-$(CONFIG_HDMI_CEC)		+= hdmi-cec.o
+hdmi-cec-objs			+= core.o dev.o
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./Kconfig linux-4.4.115-fbx/drivers/misc/qcom/Kconfig
--- linux-4.4.115-fbx/drivers/misc/qcom./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/Kconfig	2019-01-22 16:16:24.743257635 +0100
@@ -0,0 +1,20 @@
+config MSM_QDSP6V2_CODECS
+	bool "Audio QDSP6V2 APR support"
+	depends on MSM_SMD
+	select SND_SOC_QDSP6V2
+	help
+	  Enable Audio codecs with APR IPC protocol support between
+	  application processor and QDSP6 for B-family. APR is
+	  used by audio driver to configure QDSP6's
+	  ASM, ADM and AFE.
+
+config MSM_ULTRASOUND
+	bool "QDSP6V2 HW Ultrasound support"
+	select SND_SOC_QDSP6V2
+	help
+	  Enable HW Ultrasound support in QDSP6V2.
+	  QDSP6V2 can support HW encoder & decoder and
+	  ultrasound processing. It will enable
+	  ultrasound data paths between
+	  HW and services, calculating input events
+	  upon the ultrasound data.
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./Makefile linux-4.4.115-fbx/drivers/misc/qcom/Makefile
--- linux-4.4.115-fbx/drivers/misc/qcom./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/Makefile	2019-01-22 16:16:24.743257635 +0100
@@ -0,0 +1 @@
+obj-y		+= qdsp6v2/
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/aac_in.c linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/aac_in.c
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/aac_in.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/aac_in.c	2019-01-22 16:16:24.743257635 +0100
@@ -0,0 +1,709 @@
+/*
+ * Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/msm_audio_aac.h>
+#include <linux/compat.h>
+#include <asm/atomic.h>
+#include <asm/ioctls.h>
+#include "audio_utils.h"
+
+
+/* Buffer with meta*/
+#define PCM_BUF_SIZE		(4096 + sizeof(struct meta_in))
+
+/* Maximum 5 frames in buffer with meta */
+#define FRAME_SIZE		(1 + ((1536+sizeof(struct meta_out_dsp)) * 5))
+
+#define AAC_FORMAT_ADTS 65535
+
+#define MAX_SAMPLE_RATE_384K 384000
+
+static long aac_in_ioctl_shared(struct file *file, unsigned int cmd, void *arg)
+{
+	struct q6audio_in  *audio = file->private_data;
+	int rc = 0;
+	int cnt = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		struct msm_audio_aac_enc_config *enc_cfg;
+		struct msm_audio_aac_config *aac_config;
+		uint32_t aac_mode = AAC_ENC_MODE_AAC_LC;
+
+		enc_cfg = audio->enc_cfg;
+		aac_config = audio->codec_cfg;
+		/* ENCODE CFG (after new set of API's are published )bharath*/
+		pr_debug("%s:session id %d: default buf alloc[%d]\n", __func__,
+				audio->ac->session, audio->buf_alloc);
+		if (audio->enabled == 1) {
+			pr_info("%s:AUDIO_START already over\n", __func__);
+			rc = 0;
+			break;
+		}
+
+		if (audio->opened) {
+			rc = audio_in_buf_alloc(audio);
+			if (rc < 0) {
+				pr_err("%s:session id %d: buffer allocation failed\n",
+					 __func__, audio->ac->session);
+				break;
+			}
+		} else {
+			if (audio->feedback == NON_TUNNEL_MODE) {
+				pr_debug("%s: starting in non_tunnel mode",
+					__func__);
+				rc = q6asm_open_read_write(audio->ac,
+					FORMAT_MPEG4_AAC, FORMAT_LINEAR_PCM);
+				if (rc < 0) {
+					pr_err("%s:open read write failed\n",
+						__func__);
+					break;
+				}
+			}
+			if (audio->feedback == TUNNEL_MODE) {
+				pr_debug("%s: starting in tunnel mode",
+					__func__);
+				rc = q6asm_open_read(audio->ac,
+							FORMAT_MPEG4_AAC);
+
+				if (rc < 0) {
+					pr_err("%s:open read failed\n",
+							__func__);
+					break;
+				}
+			}
+			audio->stopped = 0;
+		}
+
+		pr_debug("%s:sbr_ps_flag = %d, sbr_flag = %d\n", __func__,
+			aac_config->sbr_ps_on_flag, aac_config->sbr_on_flag);
+		if (aac_config->sbr_ps_on_flag)
+			aac_mode = AAC_ENC_MODE_EAAC_P;
+		else if (aac_config->sbr_on_flag)
+			aac_mode = AAC_ENC_MODE_AAC_P;
+		else
+			aac_mode = AAC_ENC_MODE_AAC_LC;
+
+		rc = q6asm_enc_cfg_blk_aac(audio->ac,
+					audio->buf_cfg.frames_per_buf,
+					enc_cfg->sample_rate,
+					enc_cfg->channels,
+					enc_cfg->bit_rate,
+					aac_mode,
+					enc_cfg->stream_format);
+		if (rc < 0) {
+			pr_err("%s:session id %d: cmd media format block"
+				"failed\n", __func__, audio->ac->session);
+			break;
+		}
+		if (audio->feedback == NON_TUNNEL_MODE) {
+			rc = q6asm_media_format_block_pcm(audio->ac,
+						audio->pcm_cfg.sample_rate,
+						audio->pcm_cfg.channel_count);
+			if (rc < 0) {
+				pr_err("%s:session id %d: media format block"
+				"failed\n", __func__, audio->ac->session);
+				break;
+			}
+		}
+		rc = audio_in_enable(audio);
+		if (!rc) {
+			audio->enabled = 1;
+		} else {
+			audio->enabled = 0;
+			pr_err("%s:session id %d: Audio Start procedure"
+			"failed rc=%d\n", __func__, audio->ac->session, rc);
+			break;
+		}
+		while (cnt++ < audio->str_cfg.buffer_count)
+			q6asm_read(audio->ac);
+		pr_debug("%s:session id %d: AUDIO_START success enable[%d]\n",
+				__func__, audio->ac->session, audio->enabled);
+		break;
+	}
+	case AUDIO_STOP: {
+		pr_debug("%s:session id %d: Rxed AUDIO_STOP\n", __func__,
+				audio->ac->session);
+		rc = audio_in_disable(audio);
+		if (rc  < 0) {
+			pr_err("%s:session id %d: Audio Stop procedure failed"
+				"rc=%d\n", __func__, audio->ac->session, rc);
+			break;
+		}
+		break;
+	}
+	case AUDIO_GET_AAC_ENC_CONFIG: {
+		struct msm_audio_aac_enc_config *cfg;
+		struct msm_audio_aac_enc_config *enc_cfg;
+
+		cfg = (struct msm_audio_aac_enc_config *)arg;
+		if (cfg == NULL) {
+			pr_err("%s: NULL config pointer for %s\n",
+			__func__, "AUDIO_GET_AAC_CONFIG");
+			rc = -EINVAL;
+			break;
+		}
+		memset(cfg, 0, sizeof(*cfg));
+		enc_cfg = audio->enc_cfg;
+		if (enc_cfg->channels == CH_MODE_MONO)
+			cfg->channels = 1;
+		else
+			cfg->channels = 2;
+
+		cfg->sample_rate = enc_cfg->sample_rate;
+		cfg->bit_rate = enc_cfg->bit_rate;
+		switch (enc_cfg->stream_format) {
+		case 0x00:
+			cfg->stream_format = AUDIO_AAC_FORMAT_ADTS;
+			break;
+		case 0x01:
+			cfg->stream_format = AUDIO_AAC_FORMAT_LOAS;
+			break;
+		case 0x02:
+			cfg->stream_format = AUDIO_AAC_FORMAT_ADIF;
+			break;
+		default:
+		case 0x03:
+			cfg->stream_format = AUDIO_AAC_FORMAT_RAW;
+		}
+		pr_debug("%s:session id %d: Get-aac-cfg: format=%d sr=%d"
+			"bitrate=%d\n", __func__, audio->ac->session,
+			cfg->stream_format, cfg->sample_rate, cfg->bit_rate);
+		break;
+	}
+	case AUDIO_SET_AAC_ENC_CONFIG: {
+		struct msm_audio_aac_enc_config *cfg;
+		struct msm_audio_aac_enc_config *enc_cfg;
+		uint32_t min_bitrate, max_bitrate;
+
+		cfg = (struct msm_audio_aac_enc_config *)arg;
+		if (cfg == NULL) {
+			pr_err("%s: NULL config pointer for %s\n",
+			"AUDIO_SET_AAC_ENC_CONFIG", __func__);
+			rc = -EINVAL;
+			break;
+		}
+		enc_cfg = audio->enc_cfg;
+		pr_debug("%s:session id %d: Set-aac-cfg: stream=%d\n", __func__,
+			audio->ac->session, cfg->stream_format);
+
+		switch (cfg->stream_format) {
+		case AUDIO_AAC_FORMAT_ADTS:
+			enc_cfg->stream_format = 0x00;
+			break;
+		case AUDIO_AAC_FORMAT_LOAS:
+			enc_cfg->stream_format = 0x01;
+			break;
+		case AUDIO_AAC_FORMAT_ADIF:
+			enc_cfg->stream_format = 0x02;
+			break;
+		case AUDIO_AAC_FORMAT_RAW:
+			enc_cfg->stream_format = 0x03;
+			break;
+		default:
+			pr_err("%s:session id %d: unsupported AAC format %d\n",
+				__func__, audio->ac->session,
+				cfg->stream_format);
+			rc = -EINVAL;
+			break;
+		}
+
+		if (cfg->channels == 1) {
+			cfg->channels = CH_MODE_MONO;
+		} else if (cfg->channels == 2) {
+			cfg->channels = CH_MODE_STEREO;
+		} else {
+			rc = -EINVAL;
+			break;
+		}
+
+		if (cfg->sample_rate > MAX_SAMPLE_RATE_384K) {
+			pr_err("%s: ERROR: invalid sample rate = %u",
+				__func__, cfg->sample_rate);
+			rc = -EINVAL;
+			break;
+		}
+
+		min_bitrate = ((cfg->sample_rate)*(cfg->channels))/2;
+		/* This calculation should be based on AAC mode. But we cannot
+		 * get AAC mode in this setconfig. min_bitrate's logical max
+		 * value is 24000. So if min_bitrate is higher than 24000,
+		 * choose 24000.
+		 */
+		if (min_bitrate > 24000)
+			min_bitrate = 24000;
+		max_bitrate = 6*(cfg->sample_rate)*(cfg->channels);
+		if (max_bitrate > 192000)
+			max_bitrate = 192000;
+		if ((cfg->bit_rate < min_bitrate) ||
+			(cfg->bit_rate > max_bitrate)) {
+			pr_err("%s: bitrate permissible: max=%d, min=%d\n",
+				__func__, max_bitrate, min_bitrate);
+			pr_err("%s: ERROR in setting bitrate = %d\n",
+				__func__, cfg->bit_rate);
+			rc = -EINVAL;
+			break;
+		}
+		enc_cfg->sample_rate = cfg->sample_rate;
+		enc_cfg->channels = cfg->channels;
+		enc_cfg->bit_rate = cfg->bit_rate;
+		pr_debug("%s:session id %d: Set-aac-cfg:SR= 0x%x ch=0x%x"
+			"bitrate=0x%x, format(adts/raw) = %d\n",
+			__func__, audio->ac->session, enc_cfg->sample_rate,
+			enc_cfg->channels, enc_cfg->bit_rate,
+			enc_cfg->stream_format);
+		break;
+	}
+	case AUDIO_SET_AAC_CONFIG: {
+		struct msm_audio_aac_config *aac_cfg;
+		struct msm_audio_aac_config *audio_aac_cfg;
+		struct msm_audio_aac_enc_config *enc_cfg;
+		enc_cfg = audio->enc_cfg;
+		audio_aac_cfg = audio->codec_cfg;
+		aac_cfg = (struct msm_audio_aac_config *)arg;
+
+		if (aac_cfg == NULL) {
+			pr_err("%s: NULL config pointer %s\n",
+				__func__, "AUDIO_SET_AAC_CONFIG");
+			rc = -EINVAL;
+			break;
+		}
+		pr_debug("%s:session id %d: AUDIO_SET_AAC_CONFIG: sbr_flag = %d sbr_ps_flag = %d\n",
+			 __func__, audio->ac->session, aac_cfg->sbr_on_flag,
+			 aac_cfg->sbr_ps_on_flag);
+		audio_aac_cfg->sbr_on_flag = aac_cfg->sbr_on_flag;
+		audio_aac_cfg->sbr_ps_on_flag = aac_cfg->sbr_ps_on_flag;
+		if ((audio_aac_cfg->sbr_on_flag == 1) ||
+			 (audio_aac_cfg->sbr_ps_on_flag == 1)) {
+			if (enc_cfg->sample_rate < 24000) {
+				pr_err("%s: ERROR in setting samplerate = %d"
+					"\n", __func__, enc_cfg->sample_rate);
+				rc = -EINVAL;
+				break;
+			}
+		}
+		break;
+	}
+	default:
+		pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+static long aac_in_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_in  *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START:
+	case AUDIO_STOP: {
+		rc = aac_in_ioctl_shared(file, cmd, NULL);
+		break;
+	}
+	case AUDIO_GET_AAC_ENC_CONFIG: {
+		struct msm_audio_aac_enc_config cfg;
+		rc = aac_in_ioctl_shared(file, cmd, &cfg);
+		if (rc) {
+			pr_err("%s:AUDIO_GET_AAC_ENC_CONFIG failed. rc=%d\n",
+				__func__, rc);
+			break;
+		}
+		if (copy_to_user((void *)arg, &cfg, sizeof(cfg))) {
+			pr_err("%s: copy_to_user for AUDIO_GET_AAC_ENC_CONFIG failed\n",
+				__func__);
+			rc = -EFAULT;
+		}
+		break;
+	}
+	case AUDIO_SET_AAC_ENC_CONFIG: {
+		struct msm_audio_aac_enc_config cfg;
+		if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) {
+			pr_err("%s: copy_from_user for AUDIO_SET_AAC_ENC_CONFIG failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		rc = aac_in_ioctl_shared(file, cmd, &cfg);
+		if (rc)
+			pr_err("%s:AUDIO_SET_AAC_ENC_CONFIG failed. rc=%d\n",
+				__func__, rc);
+		break;
+	}
+	case AUDIO_GET_AAC_CONFIG: {
+		if (copy_to_user((void *)arg, &audio->codec_cfg,
+				 sizeof(struct msm_audio_aac_config))) {
+			pr_err("%s: copy_to_user for AUDIO_GET_AAC_CONFIG failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		break;
+	}
+	case AUDIO_SET_AAC_CONFIG: {
+		struct msm_audio_aac_config aac_cfg;
+		if (copy_from_user(&aac_cfg, (void *)arg,
+				 sizeof(struct msm_audio_aac_config))) {
+			pr_err("%s: copy_to_user for AUDIO_SET_CONFIG failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		rc = aac_in_ioctl_shared(file, cmd, &aac_cfg);
+		if (rc)
+			pr_err("%s:AUDIO_SET_AAC_CONFIG failed. rc=%d\n",
+				__func__, rc);
+		break;
+	}
+	default:
+		pr_err("%s: Unknown ioctl cmd=%d\n", __func__, cmd);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+struct msm_audio_aac_enc_config32 {
+	u32 channels;
+	u32 sample_rate;
+	u32 bit_rate;
+	u32 stream_format;
+};
+
+struct msm_audio_aac_config32 {
+	s16 format;
+	u16 audio_object;
+	u16 ep_config;       /* 0 ~ 3 useful only obj = ERLC */
+	u16 aac_section_data_resilience_flag;
+	u16 aac_scalefactor_data_resilience_flag;
+	u16 aac_spectral_data_resilience_flag;
+	u16 sbr_on_flag;
+	u16 sbr_ps_on_flag;
+	u16 dual_mono_mode;
+	u16 channel_configuration;
+	u16 sample_rate;
+};
+
+enum {
+	AUDIO_SET_AAC_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC,
+	  (AUDIO_MAX_COMMON_IOCTL_NUM+0), struct msm_audio_aac_config32),
+	AUDIO_GET_AAC_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC,
+	  (AUDIO_MAX_COMMON_IOCTL_NUM+1), struct msm_audio_aac_config32),
+	AUDIO_SET_AAC_ENC_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC,
+	  (AUDIO_MAX_COMMON_IOCTL_NUM+3), struct msm_audio_aac_enc_config32),
+	AUDIO_GET_AAC_ENC_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC,
+	  (AUDIO_MAX_COMMON_IOCTL_NUM+4), struct msm_audio_aac_enc_config32)
+};
+
+static long aac_in_compat_ioctl(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	struct q6audio_in  *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START:
+	case AUDIO_STOP: {
+		rc = aac_in_ioctl_shared(file, cmd, NULL);
+		break;
+	}
+	case AUDIO_GET_AAC_ENC_CONFIG_32: {
+		struct msm_audio_aac_enc_config cfg;
+		struct msm_audio_aac_enc_config32 cfg_32;
+
+		memset(&cfg_32, 0, sizeof(cfg_32));
+
+		cmd = AUDIO_GET_AAC_ENC_CONFIG;
+		rc = aac_in_ioctl_shared(file, cmd, &cfg);
+		if (rc) {
+			pr_err("%s:AUDIO_GET_AAC_ENC_CONFIG_32 failed. Rc= %d\n",
+				__func__, rc);
+			break;
+		}
+		cfg_32.channels = cfg.channels;
+		cfg_32.sample_rate = cfg.sample_rate;
+		cfg_32.bit_rate = cfg.bit_rate;
+		cfg_32.stream_format = cfg.stream_format;
+		if (copy_to_user((void *)arg, &cfg_32, sizeof(cfg_32))) {
+			pr_err("%s: copy_to_user for AUDIO_GET_AAC_ENC_CONFIG_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+		}
+		break;
+	}
+	case AUDIO_SET_AAC_ENC_CONFIG_32: {
+		struct msm_audio_aac_enc_config cfg;
+		struct msm_audio_aac_enc_config32 cfg_32;
+		if (copy_from_user(&cfg_32, (void *)arg, sizeof(cfg_32))) {
+			pr_err("%s: copy_from_user for AUDIO_GET_AAC_ENC_CONFIG_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		cfg.channels = cfg_32.channels;
+		cfg.sample_rate = cfg_32.sample_rate;
+		cfg.bit_rate = cfg_32.bit_rate;
+		cfg.stream_format = cfg_32.stream_format;
+		/* The command should be converted from 32 bit to normal
+		 * before the shared ioctl is called as shared ioctl
+		 * can process only normal commands */
+		cmd = AUDIO_SET_AAC_ENC_CONFIG;
+		rc = aac_in_ioctl_shared(file, cmd, &cfg);
+		if (rc)
+			pr_err("%s:AUDIO_SET_AAC_ENC_CONFIG_32 failed. rc=%d\n",
+				__func__, rc);
+		break;
+	}
+	case AUDIO_GET_AAC_CONFIG_32: {
+		struct msm_audio_aac_config *aac_config;
+		struct msm_audio_aac_config32 aac_config_32;
+
+		aac_config = (struct msm_audio_aac_config *)audio->codec_cfg;
+		aac_config_32.format = aac_config->format;
+		aac_config_32.audio_object = aac_config->audio_object;
+		aac_config_32.ep_config = aac_config->ep_config;
+		aac_config_32.aac_section_data_resilience_flag =
+			aac_config->aac_section_data_resilience_flag;
+		aac_config_32.aac_scalefactor_data_resilience_flag =
+			aac_config->aac_scalefactor_data_resilience_flag;
+		aac_config_32.aac_spectral_data_resilience_flag =
+			aac_config->aac_spectral_data_resilience_flag;
+		aac_config_32.sbr_on_flag = aac_config->sbr_on_flag;
+		aac_config_32.sbr_ps_on_flag = aac_config->sbr_ps_on_flag;
+		aac_config_32.dual_mono_mode = aac_config->dual_mono_mode;
+		aac_config_32.channel_configuration =
+				aac_config->channel_configuration;
+		aac_config_32.sample_rate = aac_config->sample_rate;
+
+		if (copy_to_user((void *)arg, &aac_config_32,
+				 sizeof(aac_config_32))) {
+			pr_err("%s: copy_to_user for AUDIO_GET_AAC_CONFIG_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		break;
+	}
+	case AUDIO_SET_AAC_CONFIG_32: {
+		struct msm_audio_aac_config aac_cfg;
+		struct msm_audio_aac_config32 aac_cfg_32;
+		if (copy_from_user(&aac_cfg_32, (void *)arg,
+					sizeof(aac_cfg_32))) {
+			pr_err("%s: copy_from_user for AUDIO_SET_AAC_CONFIG_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		aac_cfg.format = aac_cfg_32.format;
+		aac_cfg.audio_object = aac_cfg_32.audio_object;
+		aac_cfg.ep_config = aac_cfg_32.ep_config;
+		aac_cfg.aac_section_data_resilience_flag =
+			aac_cfg_32.aac_section_data_resilience_flag;
+		aac_cfg.aac_scalefactor_data_resilience_flag =
+			aac_cfg_32.aac_scalefactor_data_resilience_flag;
+		aac_cfg.aac_spectral_data_resilience_flag =
+			aac_cfg_32.aac_spectral_data_resilience_flag;
+		aac_cfg.sbr_on_flag = aac_cfg_32.sbr_on_flag;
+		aac_cfg.sbr_ps_on_flag = aac_cfg_32.sbr_ps_on_flag;
+		aac_cfg.dual_mono_mode = aac_cfg_32.dual_mono_mode;
+		aac_cfg.channel_configuration =
+				aac_cfg_32.channel_configuration;
+		aac_cfg.sample_rate = aac_cfg_32.sample_rate;
+
+		cmd = AUDIO_SET_AAC_CONFIG;
+		rc = aac_in_ioctl_shared(file, cmd, &aac_cfg);
+		if (rc)
+			pr_err("%s:AUDIO_SET_AAC_CONFIG failed. Rc= %d\n",
+				__func__, rc);
+		break;
+	}
+	default:
+		pr_err("%s: Unknown ioctl cmd = %d\n", __func__, cmd);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+#else
+#define aac_in_compat_ioctl NULL
+#endif
+
+static int aac_in_open(struct inode *inode, struct file *file)
+{
+	struct q6audio_in *audio = NULL;
+	struct msm_audio_aac_enc_config *enc_cfg;
+	struct msm_audio_aac_config *aac_config;
+	int rc = 0;
+
+	audio = kzalloc(sizeof(struct q6audio_in), GFP_KERNEL);
+
+	if (audio == NULL) {
+		pr_err("%s: Could not allocate memory for aac"
+				"driver\n", __func__);
+		return -ENOMEM;
+	}
+	/* Allocate memory for encoder config param */
+	audio->enc_cfg = kzalloc(sizeof(struct msm_audio_aac_enc_config),
+				GFP_KERNEL);
+	if (audio->enc_cfg == NULL) {
+		pr_err("%s:session id %d: Could not allocate memory for aac"
+				"config param\n", __func__, audio->ac->session);
+		kfree(audio);
+		return -ENOMEM;
+	}
+	enc_cfg = audio->enc_cfg;
+
+	audio->codec_cfg = kzalloc(sizeof(struct msm_audio_aac_config),
+				GFP_KERNEL);
+	if (audio->codec_cfg == NULL) {
+		pr_err("%s:session id %d: Could not allocate memory for aac"
+				"config\n", __func__, audio->ac->session);
+		kfree(audio->enc_cfg);
+		kfree(audio);
+		return -ENOMEM;
+	}
+	aac_config = audio->codec_cfg;
+
+	mutex_init(&audio->lock);
+	mutex_init(&audio->read_lock);
+	mutex_init(&audio->write_lock);
+	spin_lock_init(&audio->dsp_lock);
+	init_waitqueue_head(&audio->read_wait);
+	init_waitqueue_head(&audio->write_wait);
+
+	/* Settings will be re-config at AUDIO_SET_CONFIG,
+	* but at least we need to have initial config
+	*/
+	audio->str_cfg.buffer_size = FRAME_SIZE;
+	audio->str_cfg.buffer_count = FRAME_NUM;
+	audio->min_frame_size = 1536;
+	audio->max_frames_per_buf = 5;
+	enc_cfg->sample_rate = 8000;
+	enc_cfg->channels = 1;
+	enc_cfg->bit_rate = 16000;
+	enc_cfg->stream_format = 0x00;/* 0:ADTS, 3:RAW */
+	audio->buf_cfg.meta_info_enable = 0x01;
+	audio->buf_cfg.frames_per_buf   = 0x01;
+	audio->pcm_cfg.buffer_count = PCM_BUF_COUNT;
+	audio->pcm_cfg.buffer_size  = PCM_BUF_SIZE;
+	aac_config->format = AUDIO_AAC_FORMAT_ADTS;
+	aac_config->audio_object = AUDIO_AAC_OBJECT_LC;
+	aac_config->sbr_on_flag = 0;
+	aac_config->sbr_ps_on_flag = 0;
+	aac_config->channel_configuration = 1;
+
+	audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_in_cb,
+							(void *)audio);
+
+	if (!audio->ac) {
+		pr_err("%s: Could not allocate memory for"
+				"audio client\n", __func__);
+		kfree(audio->enc_cfg);
+		kfree(audio->codec_cfg);
+		kfree(audio);
+		return -ENOMEM;
+	}
+	/* open aac encoder in tunnel mode */
+	audio->buf_cfg.frames_per_buf = 0x01;
+
+	if ((file->f_mode & FMODE_WRITE) &&
+		(file->f_mode & FMODE_READ)) {
+		audio->feedback = NON_TUNNEL_MODE;
+		rc = q6asm_open_read_write(audio->ac, FORMAT_MPEG4_AAC,
+						FORMAT_LINEAR_PCM);
+
+		if (rc < 0) {
+			pr_err("%s:session id %d: NT Open failed rc=%d\n",
+				__func__, audio->ac->session, rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		audio->buf_cfg.meta_info_enable = 0x01;
+		pr_info("%s:session id %d: NT mode encoder success\n", __func__,
+				audio->ac->session);
+	} else if (!(file->f_mode & FMODE_WRITE) &&
+				(file->f_mode & FMODE_READ)) {
+		audio->feedback = TUNNEL_MODE;
+		rc = q6asm_open_read(audio->ac, FORMAT_MPEG4_AAC);
+
+		if (rc < 0) {
+			pr_err("%s:session id %d: Tunnel Open failed rc=%d\n",
+				__func__, audio->ac->session, rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		/* register for tx overflow (valid for tunnel mode only) */
+		rc = q6asm_reg_tx_overflow(audio->ac, 0x01);
+		if (rc < 0) {
+			pr_err("%s:session id %d: TX Overflow registration"
+				"failed rc=%d\n", __func__,
+				audio->ac->session, rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		audio->buf_cfg.meta_info_enable = 0x00;
+		pr_info("%s:session id %d: T mode encoder success\n", __func__,
+			audio->ac->session);
+	} else {
+		pr_err("%s:session id %d: Unexpected mode\n", __func__,
+				audio->ac->session);
+		rc = -EACCES;
+		goto fail;
+	}
+	audio->opened = 1;
+	audio->reset_event = false;
+	atomic_set(&audio->in_count, PCM_BUF_COUNT);
+	atomic_set(&audio->out_count, 0x00);
+	audio->enc_compat_ioctl = aac_in_compat_ioctl;
+	audio->enc_ioctl = aac_in_ioctl;
+	file->private_data = audio;
+
+	pr_info("%s:session id %d: success\n", __func__, audio->ac->session);
+	return 0;
+fail:
+	q6asm_audio_client_free(audio->ac);
+	kfree(audio->enc_cfg);
+	kfree(audio->codec_cfg);
+	kfree(audio);
+	return rc;
+}
+
+static const struct file_operations audio_in_fops = {
+	.owner		= THIS_MODULE,
+	.open		= aac_in_open,
+	.release	= audio_in_release,
+	.read		= audio_in_read,
+	.write		= audio_in_write,
+	.unlocked_ioctl	= audio_in_ioctl,
+	.compat_ioctl	= audio_in_compat_ioctl
+};
+
+struct miscdevice audio_aac_in_misc = {
+	.minor	= MISC_DYNAMIC_MINOR,
+	.name	= "msm_aac_in",
+	.fops	= &audio_in_fops,
+};
+
+static int __init aac_in_init(void)
+{
+	return misc_register(&audio_aac_in_misc);
+}
+device_initcall(aac_in_init);
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/amrnb_in.c linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/amrnb_in.c
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/amrnb_in.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/amrnb_in.c	2019-01-22 16:16:24.743257635 +0100
@@ -0,0 +1,404 @@
+/* Copyright (c) 2010-2012, 2014, 2016 The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/dma-mapping.h>
+#include <linux/msm_audio_amrnb.h>
+#include <linux/compat.h>
+#include <asm/atomic.h>
+#include <asm/ioctls.h>
+#include "audio_utils.h"
+
+/* Buffer with meta*/
+#define PCM_BUF_SIZE		(4096 + sizeof(struct meta_in))
+
+/* Maximum 10 frames in buffer with meta */
+#define FRAME_SIZE		(1 + ((32+sizeof(struct meta_out_dsp)) * 10))
+
+static long amrnb_in_ioctl_shared(struct file *file,
+				unsigned int cmd, void *arg)
+{
+	struct q6audio_in  *audio = file->private_data;
+	int rc = 0;
+	int cnt = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		struct msm_audio_amrnb_enc_config_v2 *enc_cfg;
+		enc_cfg = audio->enc_cfg;
+		pr_debug("%s:session id %d: default buf alloc[%d]\n", __func__,
+				audio->ac->session, audio->buf_alloc);
+		if (audio->enabled == 1) {
+			pr_info("%s:AUDIO_START already over\n", __func__);
+			rc = 0;
+			break;
+		}
+		rc = audio_in_buf_alloc(audio);
+		if (rc < 0) {
+			pr_err("%s:session id %d: buffer allocation failed\n",
+				__func__, audio->ac->session);
+			break;
+		}
+
+		rc = q6asm_enc_cfg_blk_amrnb(audio->ac,
+			audio->buf_cfg.frames_per_buf,
+			enc_cfg->band_mode,
+			enc_cfg->dtx_enable);
+
+		if (rc < 0) {
+			pr_err("%s:session id %d: cmd amrnb media format block"
+				"failed\n", __func__, audio->ac->session);
+			break;
+		}
+		if (audio->feedback == NON_TUNNEL_MODE) {
+			rc = q6asm_media_format_block_pcm(audio->ac,
+				audio->pcm_cfg.sample_rate,
+				audio->pcm_cfg.channel_count);
+
+			if (rc < 0) {
+				pr_err("%s:session id %d: media format block"
+				"failed\n", __func__, audio->ac->session);
+				break;
+			}
+		}
+		pr_debug("%s:session id %d: AUDIO_START enable[%d]\n",
+				__func__, audio->ac->session,
+				audio->enabled);
+		rc = audio_in_enable(audio);
+		if (!rc) {
+			audio->enabled = 1;
+		} else {
+			audio->enabled = 0;
+			pr_err("%s:session id %d: Audio Start procedure failed"
+					"rc=%d\n", __func__,
+					audio->ac->session, rc);
+			break;
+		}
+		while (cnt++ < audio->str_cfg.buffer_count)
+			q6asm_read(audio->ac); /* Push buffer to DSP */
+		rc = 0;
+		pr_debug("%s:session id %d: AUDIO_START success enable[%d]\n",
+				__func__, audio->ac->session, audio->enabled);
+		break;
+	}
+	case AUDIO_STOP: {
+		pr_debug("%s:AUDIO_STOP\n", __func__);
+		rc = audio_in_disable(audio);
+		if (rc  < 0) {
+			pr_err("%s:session id %d: Audio Stop procedure failed"
+				"rc=%d\n", __func__,
+				audio->ac->session, rc);
+			break;
+		}
+		break;
+	}
+	case AUDIO_SET_AMRNB_ENC_CONFIG_V2: {
+		struct msm_audio_amrnb_enc_config_v2 *cfg;
+		struct msm_audio_amrnb_enc_config_v2 *enc_cfg;
+		cfg = (struct msm_audio_amrnb_enc_config_v2 *)arg;
+		if (cfg == NULL) {
+			pr_err("%s: NULL config pointer for %s\n",
+					__func__,
+					"AUDIO_SET_AMRNB_ENC_CONFIG_V2");
+			rc = -EINVAL;
+			break;
+		}
+
+		enc_cfg = audio->enc_cfg;
+		if (cfg->band_mode > 8 ||
+			 cfg->band_mode < 1) {
+			pr_err("%s:session id %d: invalid band mode\n",
+				__func__, audio->ac->session);
+			rc = -EINVAL;
+			break;
+		}
+		/* AMR NB encoder accepts values between 0-7
+		   while openmax provides value between 1-8
+		   as per spec */
+		enc_cfg->band_mode = (cfg->band_mode - 1);
+		enc_cfg->dtx_enable = (cfg->dtx_enable ? 1 : 0);
+		enc_cfg->frame_format = 0;
+		pr_debug("%s:session id %d: band_mode = 0x%x dtx_enable=0x%x\n",
+				__func__, audio->ac->session,
+				enc_cfg->band_mode, enc_cfg->dtx_enable);
+		break;
+	}
+	default:
+		pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+static long amrnb_in_ioctl(struct file *file,
+				unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_in  *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START:
+	case AUDIO_STOP: {
+		rc =  amrnb_in_ioctl_shared(file, cmd, NULL);
+		break;
+	}
+	case AUDIO_GET_AMRNB_ENC_CONFIG_V2: {
+		if (copy_to_user((void *)arg, audio->enc_cfg,
+			sizeof(struct msm_audio_amrnb_enc_config_v2))) {
+			pr_err("%s: copy_to_user for AUDIO_GET_AMRNB_ENC_CONFIG_V2 failed\n",
+				__func__);
+			rc = -EFAULT;
+		}
+		break;
+	}
+	case AUDIO_SET_AMRNB_ENC_CONFIG_V2: {
+		struct msm_audio_amrnb_enc_config_v2 cfg;
+		if (copy_from_user(&cfg, (void *) arg,
+				sizeof(cfg))) {
+			pr_err("%s: copy_from_user for AUDIO_SET_AMRNB_ENC_CONFIG_V2 failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		rc = amrnb_in_ioctl_shared(file, cmd, &cfg);
+		if (rc)
+			pr_err("%s: AUDIO_SET_AMRNB_ENC_CONFIG_V2 failed. rc=%d\n",
+				__func__, rc);
+		break;
+	}
+	default:
+		pr_err("%s: Unknown ioctl cmd=%d", __func__, cmd);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+struct msm_audio_amrnb_enc_config_v2_32 {
+	u32 band_mode;
+	u32 dtx_enable;
+	u32 frame_format;
+};
+
+enum {
+	AUDIO_GET_AMRNB_ENC_CONFIG_V2_32 = _IOW(AUDIO_IOCTL_MAGIC,
+		(AUDIO_MAX_COMMON_IOCTL_NUM+2),
+		struct msm_audio_amrnb_enc_config_v2_32),
+	AUDIO_SET_AMRNB_ENC_CONFIG_V2_32 = _IOR(AUDIO_IOCTL_MAGIC,
+		(AUDIO_MAX_COMMON_IOCTL_NUM+3),
+		struct msm_audio_amrnb_enc_config_v2_32)
+};
+
+static long amrnb_in_compat_ioctl(struct file *file,
+				unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_in  *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START:
+	case AUDIO_STOP: {
+		rc =  amrnb_in_ioctl_shared(file, cmd, NULL);
+		break;
+	}
+	case AUDIO_GET_AMRNB_ENC_CONFIG_V2_32: {
+		struct msm_audio_amrnb_enc_config_v2 *amrnb_config;
+		struct msm_audio_amrnb_enc_config_v2_32 amrnb_config_32;
+
+		memset(&amrnb_config_32, 0, sizeof(amrnb_config_32));
+
+		amrnb_config =
+		(struct msm_audio_amrnb_enc_config_v2 *)audio->enc_cfg;
+		amrnb_config_32.band_mode = amrnb_config->band_mode;
+		amrnb_config_32.dtx_enable = amrnb_config->dtx_enable;
+		amrnb_config_32.frame_format = amrnb_config->frame_format;
+
+		if (copy_to_user((void *)arg, &amrnb_config_32,
+			sizeof(amrnb_config_32))) {
+			pr_err("%s: copy_to_user for AUDIO_GET_AMRNB_ENC_CONFIG_V2_32 failed",
+				__func__);
+			rc = -EFAULT;
+		}
+		break;
+	}
+	case AUDIO_SET_AMRNB_ENC_CONFIG_V2_32: {
+		struct msm_audio_amrnb_enc_config_v2_32 cfg_32;
+		if (copy_from_user(&cfg_32, (void *) arg,
+				sizeof(cfg_32))) {
+			pr_err("%s: copy_from_user for AUDIO_SET_AMRNB_ENC_CONFIG_V2_32 failed\n",
+					__func__);
+			rc = -EFAULT;
+			break;
+		}
+		cmd = AUDIO_SET_AMRNB_ENC_CONFIG_V2;
+		rc = amrnb_in_ioctl_shared(file, cmd, &cfg_32);
+		if (rc)
+			pr_err("%s:AUDIO_SET_AMRNB_ENC_CONFIG_V2 failed rc= %d\n",
+				__func__, rc);
+		break;
+	}
+	default:
+		pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+#else
+#define amrnb_in_compat_ioctl NULL
+#endif
+
+static int amrnb_in_open(struct inode *inode, struct file *file)
+{
+	struct q6audio_in *audio = NULL;
+	struct msm_audio_amrnb_enc_config_v2 *enc_cfg;
+	int rc = 0;
+
+	audio = kzalloc(sizeof(struct q6audio_in), GFP_KERNEL);
+
+	if (audio == NULL) {
+		pr_err("%s Could not allocate memory for amrnb"
+			"driver\n", __func__);
+		return -ENOMEM;
+	}
+	/* Allocate memory for encoder config param */
+	audio->enc_cfg = kzalloc(sizeof(struct msm_audio_amrnb_enc_config_v2),
+				GFP_KERNEL);
+	if (audio->enc_cfg == NULL) {
+		pr_err("%s:session id %d: Could not allocate memory for aac"
+				"config param\n", __func__, audio->ac->session);
+		kfree(audio);
+		return -ENOMEM;
+	}
+	enc_cfg = audio->enc_cfg;
+
+	mutex_init(&audio->lock);
+	mutex_init(&audio->read_lock);
+	mutex_init(&audio->write_lock);
+	spin_lock_init(&audio->dsp_lock);
+	init_waitqueue_head(&audio->read_wait);
+	init_waitqueue_head(&audio->write_wait);
+
+	/* Settings will be re-config at AUDIO_SET_CONFIG,
+	* but at least we need to have initial config
+	*/
+	audio->str_cfg.buffer_size = FRAME_SIZE;
+	audio->str_cfg.buffer_count = FRAME_NUM;
+	audio->min_frame_size = 32;
+	audio->max_frames_per_buf = 10;
+	audio->pcm_cfg.buffer_size = PCM_BUF_SIZE;
+	audio->pcm_cfg.buffer_count = PCM_BUF_COUNT;
+	enc_cfg->band_mode = 7;
+	enc_cfg->dtx_enable = 0;
+	audio->pcm_cfg.channel_count = 1;
+	audio->pcm_cfg.sample_rate = 8000;
+	audio->buf_cfg.meta_info_enable = 0x01;
+	audio->buf_cfg.frames_per_buf = 0x01;
+
+	audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_in_cb,
+				(void *)audio);
+
+	if (!audio->ac) {
+		pr_err("%s: Could not allocate memory for audio"
+				"client\n", __func__);
+		kfree(audio->enc_cfg);
+		kfree(audio);
+		return -ENOMEM;
+	}
+
+	/* open amrnb encoder in T/NT mode */
+	if ((file->f_mode & FMODE_WRITE) &&
+		(file->f_mode & FMODE_READ)) {
+		audio->feedback = NON_TUNNEL_MODE;
+		rc = q6asm_open_read_write(audio->ac, FORMAT_AMRNB,
+					FORMAT_LINEAR_PCM);
+		if (rc < 0) {
+			pr_err("%s:session id %d: NT mode Open failed rc=%d\n",
+				__func__, audio->ac->session, rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		pr_info("%s:session id %d: NT mode encoder success\n",
+				__func__, audio->ac->session);
+	} else if (!(file->f_mode & FMODE_WRITE) &&
+				(file->f_mode & FMODE_READ)) {
+		audio->feedback = TUNNEL_MODE;
+		rc = q6asm_open_read(audio->ac, FORMAT_AMRNB);
+		if (rc < 0) {
+			pr_err("%s:session id %d: T mode Open failed rc=%d\n",
+				__func__, audio->ac->session, rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		/* register for tx overflow (valid for tunnel mode only) */
+		rc = q6asm_reg_tx_overflow(audio->ac, 0x01);
+		if (rc < 0) {
+			pr_err("%s:session id %d: TX Overflow registration"
+				"failed rc=%d\n", __func__, audio->ac->session,
+				rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		pr_info("%s:session id %d: T mode encoder success\n",
+				__func__, audio->ac->session);
+	} else {
+		pr_err("%s:session id %d: Unexpected mode\n", __func__,
+				audio->ac->session);
+		rc = -EACCES;
+		goto fail;
+	}
+
+	audio->opened = 1;
+	atomic_set(&audio->in_count, PCM_BUF_COUNT);
+	atomic_set(&audio->out_count, 0x00);
+	audio->enc_compat_ioctl = amrnb_in_compat_ioctl;
+	audio->enc_ioctl = amrnb_in_ioctl;
+	file->private_data = audio;
+
+	pr_info("%s:session id %d: success\n", __func__, audio->ac->session);
+	return 0;
+fail:
+	q6asm_audio_client_free(audio->ac);
+	kfree(audio->enc_cfg);
+	kfree(audio);
+	return rc;
+}
+
+static const struct file_operations audio_in_fops = {
+	.owner		= THIS_MODULE,
+	.open		= amrnb_in_open,
+	.release	= audio_in_release,
+	.read		= audio_in_read,
+	.write		= audio_in_write,
+	.unlocked_ioctl	= audio_in_ioctl,
+	.compat_ioctl   = audio_in_compat_ioctl
+};
+
+struct miscdevice audio_amrnb_in_misc = {
+	.minor	= MISC_DYNAMIC_MINOR,
+	.name	= "msm_amrnb_in",
+	.fops	= &audio_in_fops,
+};
+
+static int __init amrnb_in_init(void)
+{
+	return misc_register(&audio_amrnb_in_misc);
+}
+
+device_initcall(amrnb_in_init);
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/amrwb_in.c linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/amrwb_in.c
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/amrwb_in.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/amrwb_in.c	2019-01-22 16:16:24.743257635 +0100
@@ -0,0 +1,399 @@
+/* Copyright (c) 2011-2012, 2014, 2016 The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/msm_audio_amrwb.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/compat.h>
+#include <asm/atomic.h>
+#include <asm/ioctls.h>
+#include "audio_utils.h"
+
+/* Buffer with meta*/
+#define PCM_BUF_SIZE		(4096 + sizeof(struct meta_in))
+
+/* Maximum 10 frames in buffer with meta */
+#define FRAME_SIZE		(1 + ((61+sizeof(struct meta_out_dsp)) * 10))
+
+static long amrwb_in_ioctl_shared(struct file *file,
+				  unsigned int cmd, void *arg)
+{
+	struct q6audio_in  *audio = file->private_data;
+	int rc = 0;
+	int cnt = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		struct msm_audio_amrwb_enc_config *enc_cfg;
+		enc_cfg = audio->enc_cfg;
+		pr_debug("%s:session id %d: default buf alloc[%d]\n", __func__,
+				audio->ac->session, audio->buf_alloc);
+		if (audio->enabled == 1) {
+			pr_info("%s:AUDIO_START already over\n", __func__);
+			rc = 0;
+			break;
+		}
+		rc = audio_in_buf_alloc(audio);
+		if (rc < 0) {
+			pr_err("%s:session id %d: buffer allocation failed\n",
+				__func__, audio->ac->session);
+			break;
+		}
+
+		rc = q6asm_enc_cfg_blk_amrwb(audio->ac,
+			audio->buf_cfg.frames_per_buf,
+			enc_cfg->band_mode,
+			enc_cfg->dtx_enable);
+
+		if (rc < 0) {
+			pr_err("%s:session id %d: cmd amrwb media format block"
+				"failed\n", __func__, audio->ac->session);
+			break;
+		}
+		if (audio->feedback == NON_TUNNEL_MODE) {
+			rc = q6asm_media_format_block_pcm(audio->ac,
+				audio->pcm_cfg.sample_rate,
+				audio->pcm_cfg.channel_count);
+
+			if (rc < 0) {
+				pr_err("%s:session id %d: media format block"
+				"failed\n", __func__, audio->ac->session);
+				break;
+			}
+		}
+		pr_debug("%s:session id %d: AUDIO_START enable[%d]\n",
+				__func__, audio->ac->session,
+				audio->enabled);
+		rc = audio_in_enable(audio);
+		if (!rc) {
+			audio->enabled = 1;
+		} else {
+			audio->enabled = 0;
+			pr_err("%s:session id %d: Audio Start procedure failed"
+				"rc=%d\n", __func__, audio->ac->session, rc);
+			break;
+		}
+		while (cnt++ < audio->str_cfg.buffer_count)
+			q6asm_read(audio->ac); /* Push buffer to DSP */
+		rc = 0;
+		pr_debug("%s:session id %d: AUDIO_START success enable[%d]\n",
+				__func__, audio->ac->session, audio->enabled);
+		break;
+	}
+	case AUDIO_STOP: {
+		pr_debug("%s:AUDIO_STOP\n", __func__);
+		rc = audio_in_disable(audio);
+		if (rc  < 0) {
+			pr_err("%s:session id %d: Audio Stop procedure failed"
+				"rc=%d\n", __func__, audio->ac->session, rc);
+			break;
+		}
+		break;
+	}
+	case AUDIO_SET_AMRWB_ENC_CONFIG: {
+		struct msm_audio_amrwb_enc_config *cfg;
+		struct msm_audio_amrwb_enc_config *enc_cfg;
+		enc_cfg = audio->enc_cfg;
+		cfg = (struct msm_audio_amrwb_enc_config *)arg;
+		if (cfg == NULL) {
+			pr_err("%s: NULL config pointer for %s\n",
+					__func__, "AUDIO_SET_AMRWB_ENC_CONFIG");
+			rc = -EINVAL;
+			break;
+		}
+
+		if (cfg->band_mode > 8) {
+			pr_err("%s:session id %d: invalid band mode\n",
+				__func__, audio->ac->session);
+			rc = -EINVAL;
+			break;
+		}
+		/* ToDo: AMR WB encoder accepts values between 0-8
+		   while openmax provides value between 9-17
+		   as per spec */
+		enc_cfg->band_mode = cfg->band_mode;
+		enc_cfg->dtx_enable = (cfg->dtx_enable ? 1 : 0);
+		/* Currently DSP does not support different frameformat */
+		enc_cfg->frame_format = 0;
+		pr_debug("%s:session id %d: band_mode = 0x%x dtx_enable=0x%x\n",
+				__func__, audio->ac->session,
+				enc_cfg->band_mode, enc_cfg->dtx_enable);
+		break;
+	}
+	default:
+		pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+static long amrwb_in_ioctl(struct file *file,
+				  unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_in  *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START:
+	case AUDIO_STOP: {
+		rc = amrwb_in_ioctl_shared(file, cmd, NULL);
+		break;
+	}
+	case AUDIO_GET_AMRWB_ENC_CONFIG: {
+		if (copy_to_user((void *)arg, audio->enc_cfg,
+				sizeof(struct msm_audio_amrwb_enc_config)))
+			pr_err("%s: copy_to_user for AUDIO_GET_AMRWB_ENC_CONFIG failed\n",
+				__func__);
+			rc = -EFAULT;
+		break;
+	}
+	case AUDIO_SET_AMRWB_ENC_CONFIG: {
+		struct msm_audio_amrwb_enc_config cfg;
+		if (copy_from_user(&cfg, (void *) arg,
+				sizeof(cfg))) {
+			pr_err("%s: copy_from_user for AUDIO_SET_AMRWB_ENC_CONFIG failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		rc = amrwb_in_ioctl_shared(file, cmd, &cfg);
+		if (rc)
+			pr_err("%s:AUDIO_SET_AAC_ENC_CONFIG failed. rc=%d\n",
+				__func__, rc);
+		break;
+	}
+	default:
+		pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+struct msm_audio_amrwb_enc_config_32 {
+	u32 band_mode;
+	u32 dtx_enable;
+	u32 frame_format;
+};
+
+enum {
+	AUDIO_GET_AMRWB_ENC_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC,
+		(AUDIO_MAX_COMMON_IOCTL_NUM+0),
+		struct msm_audio_amrwb_enc_config_32),
+	AUDIO_SET_AMRWB_ENC_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC,
+		(AUDIO_MAX_COMMON_IOCTL_NUM+1),
+		struct msm_audio_amrwb_enc_config_32)
+};
+
+static long amrwb_in_compat_ioctl(struct file *file,
+				  unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_in  *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START:
+	case AUDIO_STOP: {
+		rc = amrwb_in_ioctl_shared(file, cmd, NULL);
+		break;
+	}
+	case AUDIO_GET_AMRWB_ENC_CONFIG_32: {
+		struct msm_audio_amrwb_enc_config *amrwb_config;
+		struct msm_audio_amrwb_enc_config_32 amrwb_config_32;
+
+		memset(&amrwb_config_32, 0, sizeof(amrwb_config_32));
+
+		amrwb_config =
+		(struct msm_audio_amrwb_enc_config *)audio->enc_cfg;
+		amrwb_config_32.band_mode = amrwb_config->band_mode;
+		amrwb_config_32.dtx_enable = amrwb_config->dtx_enable;
+		amrwb_config_32.frame_format = amrwb_config->frame_format;
+
+		if (copy_to_user((void *)arg, &amrwb_config_32,
+			sizeof(struct msm_audio_amrwb_enc_config_32))) {
+			pr_err("%s: copy_to_user for AUDIO_GET_AMRWB_ENC_CONFIG_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+		}
+		break;
+	}
+	case AUDIO_SET_AMRWB_ENC_CONFIG_32: {
+		struct msm_audio_amrwb_enc_config cfg_32;
+		if (copy_from_user(&cfg_32, (void *) arg,
+				sizeof(cfg_32))) {
+			pr_err("%s: copy_from_user for AUDIO_SET_AMRWB_ENC_CONFIG_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		cmd = AUDIO_SET_AMRWB_ENC_CONFIG;
+		rc = amrwb_in_ioctl_shared(file, cmd, &cfg_32);
+		if (rc)
+			pr_err("%s:AUDIO_SET_AAC_ENC_CONFIG failed. rc=%d\n",
+				__func__, rc);
+		break;
+	}
+	default:
+		pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+#else
+#define amrwb_in_compat_ioctl NULL
+#endif
+
+static int amrwb_in_open(struct inode *inode, struct file *file)
+{
+	struct q6audio_in *audio = NULL;
+	struct msm_audio_amrwb_enc_config *enc_cfg;
+	int rc = 0;
+
+	audio = kzalloc(sizeof(struct q6audio_in), GFP_KERNEL);
+
+	if (audio == NULL) {
+		pr_err("%s: Could not allocate memory for amrwb driver\n",
+								__func__);
+		return -ENOMEM;
+	}
+	/* Allocate memory for encoder config param */
+	audio->enc_cfg = kzalloc(sizeof(struct msm_audio_amrwb_enc_config),
+				GFP_KERNEL);
+	if (audio->enc_cfg == NULL) {
+		pr_err("%s:session id %d: Could not allocate memory for amrwb"
+			"config param\n", __func__, audio->ac->session);
+		kfree(audio);
+		return -ENOMEM;
+	}
+	enc_cfg = audio->enc_cfg;
+
+	mutex_init(&audio->lock);
+	mutex_init(&audio->read_lock);
+	mutex_init(&audio->write_lock);
+	spin_lock_init(&audio->dsp_lock);
+	init_waitqueue_head(&audio->read_wait);
+	init_waitqueue_head(&audio->write_wait);
+
+	/* Settings will be re-config at AUDIO_SET_CONFIG,
+	* but at least we need to have initial config
+	*/
+	audio->str_cfg.buffer_size = FRAME_SIZE;
+	audio->str_cfg.buffer_count = FRAME_NUM;
+	audio->min_frame_size = 32;
+	audio->max_frames_per_buf = 10;
+	audio->pcm_cfg.buffer_size = PCM_BUF_SIZE;
+	audio->pcm_cfg.buffer_count = PCM_BUF_COUNT;
+	enc_cfg->band_mode = 8;
+	enc_cfg->dtx_enable = 0;
+	audio->pcm_cfg.channel_count = 1;
+	audio->pcm_cfg.sample_rate = 16000;
+	audio->buf_cfg.meta_info_enable = 0x01;
+	audio->buf_cfg.frames_per_buf = 0x01;
+
+	audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_in_cb,
+				(void *)audio);
+
+	if (!audio->ac) {
+		pr_err("%s:audio[%pK]: Could not allocate memory for audio"
+			"client\n", __func__, audio);
+		kfree(audio->enc_cfg);
+		kfree(audio);
+		return -ENOMEM;
+	}
+
+	/* open amrwb encoder in T/NT mode */
+	if ((file->f_mode & FMODE_WRITE) &&
+		(file->f_mode & FMODE_READ)) {
+		audio->feedback = NON_TUNNEL_MODE;
+		rc = q6asm_open_read_write(audio->ac, FORMAT_AMRWB,
+					FORMAT_LINEAR_PCM);
+		if (rc < 0) {
+			pr_err("%s:session id %d: NT mode Open failed rc=%d\n",
+				__func__, audio->ac->session, rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		pr_info("%s:session id %d: NT mode encoder success\n",
+				__func__, audio->ac->session);
+	} else if (!(file->f_mode & FMODE_WRITE) &&
+				(file->f_mode & FMODE_READ)) {
+		audio->feedback = TUNNEL_MODE;
+		rc = q6asm_open_read(audio->ac, FORMAT_AMRWB);
+		if (rc < 0) {
+			pr_err("%s:session id %d: T mode Open failed rc=%d\n",
+				__func__, audio->ac->session, rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		/* register for tx overflow (valid for tunnel mode only) */
+		rc = q6asm_reg_tx_overflow(audio->ac, 0x01);
+		if (rc < 0) {
+			pr_err("%s:session id %d: TX Overflow registration"
+				"failed rc=%d\n", __func__, audio->ac->session,
+				rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		pr_info("%s:session id %d: T mode encoder success\n",
+				__func__, audio->ac->session);
+	} else {
+		pr_err("%s:session id %d: Unexpected mode\n", __func__,
+				audio->ac->session);
+		rc = -EACCES;
+		goto fail;
+	}
+
+	audio->opened = 1;
+	atomic_set(&audio->in_count, PCM_BUF_COUNT);
+	atomic_set(&audio->out_count, 0x00);
+	audio->enc_compat_ioctl = amrwb_in_compat_ioctl;
+	audio->enc_ioctl = amrwb_in_ioctl;
+	file->private_data = audio;
+
+	pr_info("%s:session id %d: success\n", __func__, audio->ac->session);
+	return 0;
+fail:
+	q6asm_audio_client_free(audio->ac);
+	kfree(audio->enc_cfg);
+	kfree(audio);
+	return rc;
+}
+
+static const struct file_operations audio_in_fops = {
+	.owner		= THIS_MODULE,
+	.open		= amrwb_in_open,
+	.release	= audio_in_release,
+	.read		= audio_in_read,
+	.write		= audio_in_write,
+	.unlocked_ioctl	= audio_in_ioctl,
+	.compat_ioctl   = audio_in_compat_ioctl
+};
+
+struct miscdevice audio_amrwb_in_misc = {
+	.minor	= MISC_DYNAMIC_MINOR,
+	.name	= "msm_amrwb_in",
+	.fops	= &audio_in_fops,
+};
+
+static int __init amrwb_in_init(void)
+{
+	return misc_register(&audio_amrwb_in_misc);
+}
+
+device_initcall(amrwb_in_init);
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_aac.c linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_aac.c
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_aac.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_aac.c	2019-01-22 16:16:24.743257635 +0100
@@ -0,0 +1,472 @@
+/* aac audio output device
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (C) 2008 HTC Corporation
+ * Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/msm_audio_aac.h>
+#include <linux/compat.h>
+#include "audio_utils_aio.h"
+
+#define AUDIO_AAC_DUAL_MONO_INVALID -1
+#define PCM_BUFSZ_MIN_AAC	((8*1024) + sizeof(struct dec_meta_out))
+
+static struct miscdevice audio_aac_misc;
+static struct ws_mgr audio_aac_ws_mgr;
+
+#ifdef CONFIG_DEBUG_FS
+static const struct file_operations audio_aac_debug_fops = {
+	.read = audio_aio_debug_read,
+	.open = audio_aio_debug_open,
+};
+#endif
+
+static long audio_ioctl_shared(struct file *file, unsigned int cmd,
+				void *arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+	switch (cmd) {
+	case AUDIO_START: {
+		struct asm_aac_cfg aac_cfg;
+		struct msm_audio_aac_config *aac_config;
+		uint32_t sbr_ps = 0x00;
+		pr_debug("%s: AUDIO_START session_id[%d]\n", __func__,
+							audio->ac->session);
+		if (audio->feedback == NON_TUNNEL_MODE) {
+			/* Configure PCM output block */
+			rc = q6asm_enc_cfg_blk_pcm(audio->ac,
+					audio->pcm_cfg.sample_rate,
+					audio->pcm_cfg.channel_count);
+			if (rc < 0) {
+				pr_err("pcm output block config failed\n");
+				break;
+			}
+		}
+		/* turn on both sbr and ps */
+		rc = q6asm_enable_sbrps(audio->ac, sbr_ps);
+		if (rc < 0)
+			pr_err("sbr-ps enable failed\n");
+		aac_config = (struct msm_audio_aac_config *)audio->codec_cfg;
+		if (aac_config->sbr_ps_on_flag)
+			aac_cfg.aot = AAC_ENC_MODE_EAAC_P;
+		else if (aac_config->sbr_on_flag)
+			aac_cfg.aot = AAC_ENC_MODE_AAC_P;
+		else
+			aac_cfg.aot = AAC_ENC_MODE_AAC_LC;
+
+		switch (aac_config->format) {
+		case AUDIO_AAC_FORMAT_ADTS:
+			aac_cfg.format = 0x00;
+			break;
+		case AUDIO_AAC_FORMAT_LOAS:
+			aac_cfg.format = 0x01;
+			break;
+		case AUDIO_AAC_FORMAT_ADIF:
+			aac_cfg.format = 0x02;
+			break;
+		default:
+		case AUDIO_AAC_FORMAT_RAW:
+			aac_cfg.format = 0x03;
+		}
+		aac_cfg.ep_config = aac_config->ep_config;
+		aac_cfg.section_data_resilience =
+			aac_config->aac_section_data_resilience_flag;
+		aac_cfg.scalefactor_data_resilience =
+			aac_config->aac_scalefactor_data_resilience_flag;
+		aac_cfg.spectral_data_resilience =
+			aac_config->aac_spectral_data_resilience_flag;
+		aac_cfg.ch_cfg = audio->pcm_cfg.channel_count;
+		if (audio->feedback == TUNNEL_MODE) {
+			aac_cfg.sample_rate = aac_config->sample_rate;
+			aac_cfg.ch_cfg = aac_config->channel_configuration;
+		} else {
+			aac_cfg.sample_rate =  audio->pcm_cfg.sample_rate;
+			aac_cfg.ch_cfg = audio->pcm_cfg.channel_count;
+		}
+
+		pr_debug("%s:format=%x aot=%d  ch=%d sr=%d\n",
+			__func__, aac_cfg.format,
+			aac_cfg.aot, aac_cfg.ch_cfg,
+			aac_cfg.sample_rate);
+
+		/* Configure Media format block */
+		rc = q6asm_media_format_block_aac(audio->ac, &aac_cfg);
+		if (rc < 0) {
+			pr_err("cmd media format block failed\n");
+			break;
+		}
+		rc = audio_aio_enable(audio);
+		audio->eos_rsp = 0;
+		audio->eos_flag = 0;
+		if (!rc) {
+			rc = enable_volume_ramp(audio);
+			if (rc < 0) {
+				pr_err("%s: Failed to enable volume ramp\n",
+					__func__);
+			}
+			audio->enabled = 1;
+		} else {
+			audio->enabled = 0;
+			pr_err("Audio Start procedure failed rc=%d\n", rc);
+			break;
+		}
+		pr_info("%s: AUDIO_START sessionid[%d]enable[%d]\n", __func__,
+						audio->ac->session,
+						audio->enabled);
+		if (audio->stopped == 1)
+			audio->stopped = 0;
+		break;
+	}
+	case AUDIO_SET_AAC_CONFIG: {
+		struct msm_audio_aac_config *aac_config;
+		uint16_t sce_left = 1, sce_right = 2;
+
+		pr_debug("%s: AUDIO_SET_AAC_CONFIG\n", __func__);
+		aac_config = (struct msm_audio_aac_config *)arg;
+		if (aac_config == NULL) {
+			pr_err("%s: Invalid config pointer\n", __func__);
+			rc = -EINVAL;
+			break;
+		}
+		memcpy(audio->codec_cfg, aac_config,
+				sizeof(struct msm_audio_aac_config));
+		/* PL_PR is 0 only need to check PL_SR */
+		if (aac_config->dual_mono_mode >
+		    AUDIO_AAC_DUAL_MONO_PL_SR) {
+			pr_err("%s:Invalid dual_mono mode =%d\n", __func__,
+			aac_config->dual_mono_mode);
+		} else {
+			/* convert the data from user into sce_left
+			 * and sce_right based on the definitions
+			 */
+			pr_debug("%s: modify dual_mono mode =%d\n", __func__,
+				 aac_config->dual_mono_mode);
+			switch (aac_config->dual_mono_mode) {
+			case AUDIO_AAC_DUAL_MONO_PL_PR:
+				sce_left = 1;
+				sce_right = 1;
+				break;
+			case AUDIO_AAC_DUAL_MONO_SL_SR:
+				sce_left = 2;
+				sce_right = 2;
+				break;
+			case AUDIO_AAC_DUAL_MONO_SL_PR:
+				sce_left = 2;
+				sce_right = 1;
+				break;
+			case AUDIO_AAC_DUAL_MONO_PL_SR:
+			default:
+				sce_left = 1;
+				sce_right = 2;
+				break;
+			}
+			rc = q6asm_cfg_dual_mono_aac(audio->ac,
+						sce_left, sce_right);
+			if (rc < 0)
+				pr_err("%s:asm cmd dualmono failed rc=%d\n",
+					 __func__, rc);
+		}
+		break;
+	}
+	default:
+		pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd);
+		break;
+	}
+	return rc;
+}
+
+static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+	switch (cmd) {
+	case AUDIO_START: {
+		rc = audio_ioctl_shared(file, cmd, (void *)arg);
+		break;
+	}
+	case AUDIO_GET_AAC_CONFIG: {
+		if (copy_to_user((void *)arg, audio->codec_cfg,
+			sizeof(struct msm_audio_aac_config))) {
+			pr_err("%s: copy_to_user for AUDIO_GET_AAC_CONFIG failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		break;
+	}
+	case AUDIO_SET_AAC_CONFIG: {
+		struct msm_audio_aac_config aac_config;
+		pr_debug("%s: AUDIO_SET_AAC_CONFIG\n", __func__);
+		if (copy_from_user(&aac_config, (void *)arg,
+			sizeof(aac_config))) {
+			pr_err("%s: copy_from_user for AUDIO_SET_AAC_CONFIG failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		rc = audio_ioctl_shared(file, cmd, &aac_config);
+		if (rc)
+			pr_err("%s:AUDIO_SET_AAC_CONFIG failed. Rc= %d\n",
+						__func__, rc);
+		break;
+	}
+	default: {
+		pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio);
+		rc = audio->codec_ioctl(file, cmd, arg);
+		if (rc)
+			pr_err("%s[%pK]:Failed in utils_ioctl: %d\n",
+				__func__, audio, rc);
+	}
+	}
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+struct msm_audio_aac_config32 {
+	s16 format;
+	u16 audio_object;
+	u16 ep_config;	/* 0 ~ 3 useful only obj = ERLC */
+	u16 aac_section_data_resilience_flag;
+	u16 aac_scalefactor_data_resilience_flag;
+	u16 aac_spectral_data_resilience_flag;
+	u16 sbr_on_flag;
+	u16 sbr_ps_on_flag;
+	u16 dual_mono_mode;
+	u16 channel_configuration;
+	u16 sample_rate;
+};
+
+enum {
+	AUDIO_SET_AAC_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC,
+	  (AUDIO_MAX_COMMON_IOCTL_NUM+0), struct msm_audio_aac_config32),
+	AUDIO_GET_AAC_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC,
+	  (AUDIO_MAX_COMMON_IOCTL_NUM+1), struct msm_audio_aac_config32)
+};
+
+static long audio_compat_ioctl(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+	switch (cmd) {
+	case AUDIO_START: {
+		rc = audio_ioctl_shared(file, cmd, (void *)arg);
+		break;
+	}
+	case AUDIO_GET_AAC_CONFIG_32: {
+		struct msm_audio_aac_config *aac_config;
+		struct msm_audio_aac_config32 aac_config_32;
+
+		aac_config = (struct msm_audio_aac_config *)audio->codec_cfg;
+		aac_config_32.format = aac_config->format;
+		aac_config_32.audio_object = aac_config->audio_object;
+		aac_config_32.ep_config = aac_config->ep_config;
+		aac_config_32.aac_section_data_resilience_flag =
+			aac_config->aac_section_data_resilience_flag;
+		aac_config_32.aac_scalefactor_data_resilience_flag =
+			 aac_config->aac_scalefactor_data_resilience_flag;
+		aac_config_32.aac_spectral_data_resilience_flag =
+			aac_config->aac_spectral_data_resilience_flag;
+		aac_config_32.sbr_on_flag = aac_config->sbr_on_flag;
+		aac_config_32.sbr_ps_on_flag = aac_config->sbr_ps_on_flag;
+		aac_config_32.dual_mono_mode = aac_config->dual_mono_mode;
+		aac_config_32.channel_configuration =
+					aac_config->channel_configuration;
+		aac_config_32.sample_rate = aac_config->sample_rate;
+
+		if (copy_to_user((void *)arg, &aac_config_32,
+			sizeof(aac_config_32))) {
+			pr_err("%s: copy_to_user for AUDIO_GET_AAC_CONFIG_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		break;
+	}
+	case AUDIO_SET_AAC_CONFIG_32: {
+		struct msm_audio_aac_config aac_config;
+		struct msm_audio_aac_config32 aac_config_32;
+		pr_debug("%s: AUDIO_SET_AAC_CONFIG\n", __func__);
+		if (copy_from_user(&aac_config_32, (void *)arg,
+			sizeof(aac_config_32))) {
+			pr_err("%s: copy_from_user for AUDIO_SET_AAC_CONFIG_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		aac_config.format = aac_config_32.format;
+		aac_config.audio_object = aac_config_32.audio_object;
+		aac_config.ep_config = aac_config_32.ep_config;
+		aac_config.aac_section_data_resilience_flag =
+			aac_config_32.aac_section_data_resilience_flag;
+		aac_config.aac_scalefactor_data_resilience_flag =
+			aac_config_32.aac_scalefactor_data_resilience_flag;
+		aac_config.aac_spectral_data_resilience_flag =
+			aac_config_32.aac_spectral_data_resilience_flag;
+		aac_config.sbr_on_flag = aac_config_32.sbr_on_flag;
+		aac_config.sbr_ps_on_flag = aac_config_32.sbr_ps_on_flag;
+		aac_config.dual_mono_mode = aac_config_32.dual_mono_mode;
+		aac_config.channel_configuration =
+				aac_config_32.channel_configuration;
+		aac_config.sample_rate = aac_config_32.sample_rate;
+
+		cmd = AUDIO_SET_AAC_CONFIG;
+		rc = audio_ioctl_shared(file, cmd, &aac_config);
+		if (rc)
+			pr_err("%s:AUDIO_SET_AAC_CONFIG failed. Rc= %d\n",
+				__func__, rc);
+		break;
+	}
+	default: {
+		pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio);
+		rc = audio->codec_compat_ioctl(file, cmd, arg);
+		if (rc)
+			pr_err("%s[%pK]:Failed in utils_ioctl: %d\n",
+				__func__, audio, rc);
+	}
+	}
+	return rc;
+}
+#else
+#define audio_compat_ioctl NULL
+#endif
+
+static int audio_open(struct inode *inode, struct file *file)
+{
+	struct q6audio_aio *audio = NULL;
+	int rc = 0;
+	struct msm_audio_aac_config *aac_config = NULL;
+
+#ifdef CONFIG_DEBUG_FS
+	/* 4 bytes represents decoder number, 1 byte for terminate string */
+	char name[sizeof "msm_aac_" + 5];
+#endif
+	audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL);
+
+	if (audio == NULL) {
+		pr_err("Could not allocate memory for aac decode driver\n");
+		return -ENOMEM;
+	}
+	audio->codec_cfg = kzalloc(sizeof(struct msm_audio_aac_config),
+					GFP_KERNEL);
+	if (audio->codec_cfg == NULL) {
+		pr_err("%s:Could not allocate memory for aac"
+			"config\n", __func__);
+		kfree(audio);
+		return -ENOMEM;
+	}
+	aac_config = audio->codec_cfg;
+
+	/* Settings will be re-config at AUDIO_SET_CONFIG,
+	 * but at least we need to have initial config
+	 */
+	audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN_AAC;
+	audio->miscdevice = &audio_aac_misc;
+	audio->wakelock_voted = false;
+	audio->audio_ws_mgr = &audio_aac_ws_mgr;
+	aac_config->dual_mono_mode = AUDIO_AAC_DUAL_MONO_INVALID;
+
+	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb,
+					     (void *)audio);
+
+	if (!audio->ac) {
+		pr_err("Could not allocate memory for audio client\n");
+		kfree(audio->codec_cfg);
+		kfree(audio);
+		return -ENOMEM;
+	}
+	rc = audio_aio_open(audio, file);
+	if (rc < 0) {
+		pr_err("%s: audio_aio_open rc=%d\n",
+			__func__, rc);
+		goto fail;
+	}
+	/* open in T/NT mode */
+	if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) {
+		rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM,
+					   FORMAT_MPEG4_AAC);
+		if (rc < 0) {
+			pr_err("NT mode Open failed rc=%d\n", rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		audio->feedback = NON_TUNNEL_MODE;
+		/* open AAC decoder, expected frames is always 1
+		audio->buf_cfg.frames_per_buf = 0x01;*/
+		audio->buf_cfg.meta_info_enable = 0x01;
+	} else if ((file->f_mode & FMODE_WRITE) &&
+			!(file->f_mode & FMODE_READ)) {
+		rc = q6asm_open_write(audio->ac, FORMAT_MPEG4_AAC);
+		if (rc < 0) {
+			pr_err("T mode Open failed rc=%d\n", rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		audio->feedback = TUNNEL_MODE;
+		audio->buf_cfg.meta_info_enable = 0x00;
+	} else {
+		pr_err("Not supported mode\n");
+		rc = -EACCES;
+		goto fail;
+	}
+
+#ifdef CONFIG_DEBUG_FS
+	snprintf(name, sizeof name, "msm_aac_%04x", audio->ac->session);
+	audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
+					    NULL, (void *)audio,
+					    &audio_aac_debug_fops);
+
+	if (IS_ERR(audio->dentry))
+		pr_debug("debugfs_create_file failed\n");
+#endif
+	pr_info("%s:aacdec success mode[%d]session[%d]\n", __func__,
+						audio->feedback,
+						audio->ac->session);
+	return rc;
+fail:
+	q6asm_audio_client_free(audio->ac);
+	kfree(audio->codec_cfg);
+	kfree(audio);
+	return rc;
+}
+
+static const struct file_operations audio_aac_fops = {
+	.owner = THIS_MODULE,
+	.open = audio_open,
+	.release = audio_aio_release,
+	.unlocked_ioctl = audio_ioctl,
+	.fsync = audio_aio_fsync,
+	.compat_ioctl = audio_compat_ioctl
+};
+
+static struct miscdevice audio_aac_misc = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "msm_aac",
+	.fops = &audio_aac_fops,
+};
+
+static int __init audio_aac_init(void)
+{
+	int ret = misc_register(&audio_aac_misc);
+
+	if (ret == 0)
+		device_init_wakeup(audio_aac_misc.this_device, true);
+	audio_aac_ws_mgr.ref_cnt = 0;
+	mutex_init(&audio_aac_ws_mgr.ws_lock);
+
+	return ret;
+}
+
+device_initcall(audio_aac_init);
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_alac.c linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_alac.c
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_alac.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_alac.c	2019-01-22 16:16:24.743257635 +0100
@@ -0,0 +1,438 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 and
+* only version 2 as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+*/
+
+#include <linux/types.h>
+#include <linux/msm_audio_alac.h>
+#include <linux/compat.h>
+#include "audio_utils_aio.h"
+
+static struct miscdevice audio_alac_misc;
+static struct ws_mgr audio_alac_ws_mgr;
+
+static const struct file_operations audio_alac_debug_fops = {
+	.read = audio_aio_debug_read,
+	.open = audio_aio_debug_open,
+};
+
+static struct dentry *config_debugfs_create_file(const char *name, void *data)
+{
+	return debugfs_create_file(name, S_IFREG | S_IRUGO,
+				NULL, (void *)data, &audio_alac_debug_fops);
+}
+
+static int alac_channel_map(u8 *channel_mapping, uint32_t channels);
+
+static long audio_ioctl_shared(struct file *file, unsigned int cmd,
+						void *arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		struct asm_alac_cfg alac_cfg;
+		struct msm_audio_alac_config *alac_config;
+		u8 channel_mapping[PCM_FORMAT_MAX_NUM_CHANNEL];
+
+		memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+
+		if (alac_channel_map(channel_mapping,
+			audio->pcm_cfg.channel_count)) {
+			pr_err("%s: setting channel map failed %d\n",
+					__func__, audio->pcm_cfg.channel_count);
+		}
+
+		pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__,
+						audio, audio->ac->session);
+		if (audio->feedback == NON_TUNNEL_MODE) {
+			/* Configure PCM output block */
+			rc = q6asm_enc_cfg_blk_pcm_v2(audio->ac,
+					audio->pcm_cfg.sample_rate,
+					audio->pcm_cfg.channel_count,
+					16, /*bits per sample*/
+					false, false, channel_mapping);
+			if (rc < 0) {
+				pr_err("pcm output block config failed\n");
+				break;
+			}
+		}
+		alac_config = (struct msm_audio_alac_config *)audio->codec_cfg;
+		alac_cfg.frame_length = alac_config->frameLength;
+		alac_cfg.compatible_version = alac_config->compatVersion;
+		alac_cfg.bit_depth = alac_config->bitDepth;
+		alac_cfg.pb = alac_config->pb;
+		alac_cfg.mb = alac_config->mb;
+		alac_cfg.kb = alac_config->kb;
+		alac_cfg.num_channels = alac_config->channelCount;
+		alac_cfg.max_run = alac_config->maxRun;
+		alac_cfg.max_frame_bytes = alac_config->maxSize;
+		alac_cfg.avg_bit_rate = alac_config->averageBitRate;
+		alac_cfg.sample_rate = alac_config->sampleRate;
+		alac_cfg.channel_layout_tag = alac_config->channelLayout;
+		pr_debug("%s: frame_length %d compatible_version %d bit_depth %d pb %d mb %d kb %d num_channels %d max_run %d max_frame_bytes %d avg_bit_rate %d sample_rate %d channel_layout_tag %d\n",
+				__func__, alac_config->frameLength,
+				alac_config->compatVersion,
+				alac_config->bitDepth, alac_config->pb,
+				alac_config->mb, alac_config->kb,
+				alac_config->channelCount, alac_config->maxRun,
+				alac_config->maxSize,
+				alac_config->averageBitRate,
+				alac_config->sampleRate,
+				alac_config->channelLayout);
+		/* Configure Media format block */
+		rc = q6asm_media_format_block_alac(audio->ac, &alac_cfg,
+							audio->ac->stream_id);
+		if (rc < 0) {
+			pr_err("cmd media format block failed\n");
+			break;
+		}
+		rc = audio_aio_enable(audio);
+		audio->eos_rsp = 0;
+		audio->eos_flag = 0;
+		if (!rc) {
+			audio->enabled = 1;
+		} else {
+			audio->enabled = 0;
+			pr_err("Audio Start procedure failed rc=%d\n", rc);
+			break;
+		}
+		pr_debug("AUDIO_START success enable[%d]\n", audio->enabled);
+		if (audio->stopped == 1)
+			audio->stopped = 0;
+		break;
+	}
+	default:
+		pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd);
+		break;
+	}
+	return rc;
+}
+
+static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		rc = audio_ioctl_shared(file, cmd, (void *)arg);
+		break;
+	}
+	case AUDIO_GET_ALAC_CONFIG: {
+		if (copy_to_user((void *)arg, audio->codec_cfg,
+			sizeof(struct msm_audio_alac_config))) {
+			pr_err("%s:copy_to_user for AUDIO_GET_ALAC_CONFIG failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		break;
+	}
+	case AUDIO_SET_ALAC_CONFIG: {
+		if (copy_from_user(audio->codec_cfg, (void *)arg,
+			sizeof(struct msm_audio_alac_config))) {
+			pr_err("%s:copy_from_user for AUDIO_SET_ALAC_CONFIG failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		break;
+	}
+	default: {
+		rc = audio->codec_ioctl(file, cmd, arg);
+		if (rc)
+			pr_err("Failed in utils_ioctl: %d\n", rc);
+		break;
+	}
+	}
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+struct msm_audio_alac_config_32 {
+	u32 frameLength;
+	u8 compatVersion;
+	u8 bitDepth;
+	u8 pb;
+	u8 mb;
+	u8 kb;
+	u8 channelCount;
+	u16 maxRun;
+	u32 maxSize;
+	u32 averageBitRate;
+	u32 sampleRate;
+	u32 channelLayout;
+};
+
+enum {
+	AUDIO_GET_ALAC_CONFIG_32 =  _IOR(AUDIO_IOCTL_MAGIC,
+	(AUDIO_MAX_COMMON_IOCTL_NUM+0), struct msm_audio_alac_config_32),
+	AUDIO_SET_ALAC_CONFIG_32 =  _IOW(AUDIO_IOCTL_MAGIC,
+	(AUDIO_MAX_COMMON_IOCTL_NUM+1), struct msm_audio_alac_config_32)
+};
+
+static long audio_compat_ioctl(struct file *file, unsigned int cmd,
+						unsigned long arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		rc = audio_ioctl_shared(file, cmd, (void *)arg);
+		break;
+	}
+	case AUDIO_GET_ALAC_CONFIG_32: {
+		struct msm_audio_alac_config *alac_config;
+		struct msm_audio_alac_config_32 alac_config_32;
+
+		memset(&alac_config_32, 0, sizeof(alac_config_32));
+
+		alac_config = (struct msm_audio_alac_config *)audio->codec_cfg;
+		alac_config_32.frameLength = alac_config->frameLength;
+		alac_config_32.compatVersion =
+				alac_config->compatVersion;
+		alac_config_32.bitDepth = alac_config->bitDepth;
+		alac_config_32.pb = alac_config->pb;
+		alac_config_32.mb = alac_config->mb;
+		alac_config_32.kb = alac_config->kb;
+		alac_config_32.channelCount = alac_config->channelCount;
+		alac_config_32.maxRun = alac_config->maxRun;
+		alac_config_32.maxSize = alac_config->maxSize;
+		alac_config_32.averageBitRate = alac_config->averageBitRate;
+		alac_config_32.sampleRate = alac_config->sampleRate;
+		alac_config_32.channelLayout = alac_config->channelLayout;
+
+		if (copy_to_user((void *)arg, &alac_config_32,
+			sizeof(alac_config_32))) {
+			pr_err("%s: copy_to_user for GET_ALAC_CONFIG_32 failed\n",
+				 __func__);
+			rc = -EFAULT;
+			break;
+		}
+		break;
+	}
+	case AUDIO_SET_ALAC_CONFIG_32: {
+		struct msm_audio_alac_config *alac_config;
+		struct msm_audio_alac_config_32 alac_config_32;
+
+		if (copy_from_user(&alac_config_32, (void *)arg,
+			sizeof(alac_config_32))) {
+			pr_err("%s: copy_from_user for SET_ALAC_CONFIG_32 failed\n"
+				, __func__);
+			rc = -EFAULT;
+			break;
+		}
+		alac_config = (struct msm_audio_alac_config *)audio->codec_cfg;
+		alac_config->frameLength = alac_config_32.frameLength;
+		alac_config->compatVersion =
+				alac_config_32.compatVersion;
+		alac_config->bitDepth = alac_config_32.bitDepth;
+		alac_config->pb = alac_config_32.pb;
+		alac_config->mb = alac_config_32.mb;
+		alac_config->kb = alac_config_32.kb;
+		alac_config->channelCount = alac_config_32.channelCount;
+		alac_config->maxRun = alac_config_32.maxRun;
+		alac_config->maxSize = alac_config_32.maxSize;
+		alac_config->averageBitRate = alac_config_32.averageBitRate;
+		alac_config->sampleRate = alac_config_32.sampleRate;
+		alac_config->channelLayout = alac_config_32.channelLayout;
+
+		break;
+	}
+	default: {
+		rc = audio->codec_compat_ioctl(file, cmd, arg);
+		if (rc)
+			pr_err("Failed in utils_ioctl: %d\n", rc);
+		break;
+	}
+	}
+	return rc;
+}
+#else
+#define audio_compat_ioctl NULL
+#endif
+
+static int audio_open(struct inode *inode, struct file *file)
+{
+	struct q6audio_aio *audio = NULL;
+	int rc = 0;
+
+	/* 4 bytes represents decoder number, 1 byte for terminate string */
+	char name[sizeof "msm_alac_" + 5];
+	audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL);
+
+	if (!audio) {
+		pr_err("Could not allocate memory for alac decode driver\n");
+		return -ENOMEM;
+	}
+	audio->codec_cfg = kzalloc(sizeof(struct msm_audio_alac_config),
+					GFP_KERNEL);
+	if (!audio->codec_cfg) {
+		pr_err("%s:Could not allocate memory for alac config\n",
+			__func__);
+		kfree(audio);
+		return -ENOMEM;
+	}
+
+	audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN;
+	audio->miscdevice = &audio_alac_misc;
+	audio->wakelock_voted = false;
+	audio->audio_ws_mgr = &audio_alac_ws_mgr;
+
+	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb,
+					     (void *)audio);
+
+	if (!audio->ac) {
+		pr_err("Could not allocate memory for audio client\n");
+		kfree(audio->codec_cfg);
+		kfree(audio);
+		return -ENOMEM;
+	}
+	rc = audio_aio_open(audio, file);
+	if (rc < 0) {
+		pr_err("%s: audio_aio_open rc=%d\n",
+			__func__, rc);
+		goto fail;
+	}
+	/* open in T/NT mode */
+	if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) {
+		rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM,
+					   FORMAT_ALAC);
+		if (rc < 0) {
+			pr_err("NT mode Open failed rc=%d\n", rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		audio->feedback = NON_TUNNEL_MODE;
+		/* open ALAC decoder, expected frames is always 1*/
+		audio->buf_cfg.frames_per_buf = 0x01;
+		audio->buf_cfg.meta_info_enable = 0x01;
+	} else if ((file->f_mode & FMODE_WRITE) &&
+			!(file->f_mode & FMODE_READ)) {
+		rc = q6asm_open_write(audio->ac, FORMAT_ALAC);
+		if (rc < 0) {
+			pr_err("T mode Open failed rc=%d\n", rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		audio->feedback = TUNNEL_MODE;
+		audio->buf_cfg.meta_info_enable = 0x00;
+	} else {
+		pr_err("Not supported mode\n");
+		rc = -EACCES;
+		goto fail;
+	}
+
+	snprintf(name, sizeof(name), "msm_alac_%04x", audio->ac->session);
+	audio->dentry = config_debugfs_create_file(name, (void *)audio);
+
+	if (IS_ERR_OR_NULL(audio->dentry))
+		pr_debug("debugfs_create_file failed\n");
+	pr_debug("%s:alacdec success mode[%d]session[%d]\n", __func__,
+						audio->feedback,
+						audio->ac->session);
+	return rc;
+fail:
+	q6asm_audio_client_free(audio->ac);
+	kfree(audio->codec_cfg);
+	kfree(audio);
+	return rc;
+}
+
+static int alac_channel_map(u8 *channel_mapping, uint32_t channels)
+{
+	u8 *lchannel_mapping;
+
+	lchannel_mapping = channel_mapping;
+	pr_debug("%s:  channels passed: %d\n", __func__, channels);
+	if (channels == 1)  {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+	} else if (channels == 2) {
+		lchannel_mapping[0] = PCM_CHANNEL_FL;
+		lchannel_mapping[1] = PCM_CHANNEL_FR;
+	} else if (channels == 3) {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+		lchannel_mapping[1] = PCM_CHANNEL_FL;
+		lchannel_mapping[2] = PCM_CHANNEL_FR;
+	} else if (channels == 4) {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+		lchannel_mapping[1] = PCM_CHANNEL_FL;
+		lchannel_mapping[2] = PCM_CHANNEL_FR;
+		lchannel_mapping[3] = PCM_CHANNEL_CS;
+	} else if (channels == 5) {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+		lchannel_mapping[1] = PCM_CHANNEL_FL;
+		lchannel_mapping[2] = PCM_CHANNEL_FR;
+		lchannel_mapping[3] = PCM_CHANNEL_LS;
+		lchannel_mapping[4] = PCM_CHANNEL_RS;
+	} else if (channels == 6) {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+		lchannel_mapping[1] = PCM_CHANNEL_FL;
+		lchannel_mapping[2] = PCM_CHANNEL_FR;
+		lchannel_mapping[3] = PCM_CHANNEL_LS;
+		lchannel_mapping[4] = PCM_CHANNEL_RS;
+		lchannel_mapping[5] = PCM_CHANNEL_LFE;
+	} else if (channels == 7) {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+		lchannel_mapping[1] = PCM_CHANNEL_FL;
+		lchannel_mapping[2] = PCM_CHANNEL_FR;
+		lchannel_mapping[3] = PCM_CHANNEL_LS;
+		lchannel_mapping[4] = PCM_CHANNEL_RS;
+		lchannel_mapping[5] = PCM_CHANNEL_CS;
+		lchannel_mapping[6] = PCM_CHANNEL_LFE;
+	} else if (channels == 8) {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+		lchannel_mapping[1] = PCM_CHANNEL_FLC;
+		lchannel_mapping[2] = PCM_CHANNEL_FRC;
+		lchannel_mapping[3] = PCM_CHANNEL_FL;
+		lchannel_mapping[4] = PCM_CHANNEL_FR;
+		lchannel_mapping[5] = PCM_CHANNEL_LS;
+		lchannel_mapping[6] = PCM_CHANNEL_RS;
+		lchannel_mapping[7] = PCM_CHANNEL_LFE;
+	} else {
+		pr_err("%s: ERROR.unsupported num_ch = %u\n",
+				__func__, channels);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static const struct file_operations audio_alac_fops = {
+	.owner = THIS_MODULE,
+	.open = audio_open,
+	.release = audio_aio_release,
+	.unlocked_ioctl = audio_ioctl,
+	.fsync = audio_aio_fsync,
+	.compat_ioctl = audio_compat_ioctl
+};
+
+static struct miscdevice audio_alac_misc = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "msm_alac",
+	.fops = &audio_alac_fops,
+};
+
+static int __init audio_alac_init(void)
+{
+	int ret = misc_register(&audio_alac_misc);
+
+	if (ret == 0)
+		device_init_wakeup(audio_alac_misc.this_device, true);
+	audio_alac_ws_mgr.ref_cnt = 0;
+	mutex_init(&audio_alac_ws_mgr.ws_lock);
+
+	return ret;
+}
+
+device_initcall(audio_alac_init);
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_amrnb.c linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_amrnb.c
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_amrnb.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_amrnb.c	2019-01-22 16:16:24.743257635 +0100
@@ -0,0 +1,228 @@
+/* amrnb audio output device
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (C) 2008 HTC Corporation
+ * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/compat.h>
+#include "audio_utils_aio.h"
+
+static struct miscdevice audio_amrnb_misc;
+static struct ws_mgr audio_amrnb_ws_mgr;
+
+#ifdef CONFIG_DEBUG_FS
+static const struct file_operations audio_amrnb_debug_fops = {
+	.read = audio_aio_debug_read,
+	.open = audio_aio_debug_open,
+};
+#endif
+
+static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__,
+						audio, audio->ac->session);
+		if (audio->feedback == NON_TUNNEL_MODE) {
+			/* Configure PCM output block */
+			rc = q6asm_enc_cfg_blk_pcm(audio->ac,
+					audio->pcm_cfg.sample_rate,
+					audio->pcm_cfg.channel_count);
+			if (rc < 0) {
+				pr_err("pcm output block config failed\n");
+				break;
+			}
+		}
+
+		rc = audio_aio_enable(audio);
+		audio->eos_rsp = 0;
+		audio->eos_flag = 0;
+		if (!rc) {
+			audio->enabled = 1;
+		} else {
+			audio->enabled = 0;
+			pr_err("Audio Start procedure failed rc=%d\n", rc);
+			break;
+		}
+		pr_debug("AUDIO_START success enable[%d]\n", audio->enabled);
+		if (audio->stopped == 1)
+			audio->stopped = 0;
+		break;
+	}
+	default:
+		pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio);
+		rc = audio->codec_ioctl(file, cmd, arg);
+	}
+	return rc;
+}
+
+static long audio_compat_ioctl(struct file *file, unsigned int cmd,
+			       unsigned long arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__,
+				audio, audio->ac->session);
+		if (audio->feedback == NON_TUNNEL_MODE) {
+			/* Configure PCM output block */
+			rc = q6asm_enc_cfg_blk_pcm(audio->ac,
+					audio->pcm_cfg.sample_rate,
+					audio->pcm_cfg.channel_count);
+			if (rc < 0) {
+				pr_err("%s: pcm output block config failed rc=%d\n",
+					__func__, rc);
+				break;
+			}
+		}
+
+		rc = audio_aio_enable(audio);
+		audio->eos_rsp = 0;
+		audio->eos_flag = 0;
+		if (!rc) {
+			audio->enabled = 1;
+		} else {
+			audio->enabled = 0;
+			pr_err("%s: Audio Start procedure failed rc=%d\n",
+				__func__, rc);
+			break;
+		}
+		pr_debug("AUDIO_START success enable[%d]\n", audio->enabled);
+		if (audio->stopped == 1)
+			audio->stopped = 0;
+		break;
+	}
+	default:
+		pr_debug("%s[%pK]: Calling compat ioctl\n", __func__, audio);
+		rc = audio->codec_compat_ioctl(file, cmd, arg);
+	}
+	return rc;
+}
+
+
+static int audio_open(struct inode *inode, struct file *file)
+{
+	struct q6audio_aio *audio = NULL;
+	int rc = 0;
+
+#ifdef CONFIG_DEBUG_FS
+	/* 4 bytes represents decoder number, 1 byte for terminate string */
+	char name[sizeof "msm_amrnb_" + 5];
+#endif
+	audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL);
+
+	if (audio == NULL) {
+		pr_err("Could not allocate memory for wma decode driver\n");
+		return -ENOMEM;
+	}
+
+	audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN;
+	audio->miscdevice = &audio_amrnb_misc;
+	audio->wakelock_voted = false;
+	audio->audio_ws_mgr = &audio_amrnb_ws_mgr;
+
+	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb,
+					     (void *)audio);
+
+	if (!audio->ac) {
+		pr_err("Could not allocate memory for audio client\n");
+		kfree(audio);
+		return -ENOMEM;
+	}
+	rc = audio_aio_open(audio, file);
+	if (rc < 0) {
+		pr_err("%s: audio_aio_open rc=%d\n",
+			__func__, rc);
+		goto fail;
+	}
+	/* open in T/NT mode */
+	if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) {
+		rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM,
+					   FORMAT_AMRNB);
+		if (rc < 0) {
+			pr_err("NT mode Open failed rc=%d\n", rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		audio->feedback = NON_TUNNEL_MODE;
+		audio->buf_cfg.frames_per_buf = 0x01;
+		audio->buf_cfg.meta_info_enable = 0x01;
+	} else if ((file->f_mode & FMODE_WRITE) &&
+			!(file->f_mode & FMODE_READ)) {
+		rc = q6asm_open_write(audio->ac, FORMAT_AMRNB);
+		if (rc < 0) {
+			pr_err("T mode Open failed rc=%d\n", rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		audio->feedback = TUNNEL_MODE;
+		audio->buf_cfg.meta_info_enable = 0x00;
+	} else {
+		pr_err("Not supported mode\n");
+		rc = -EACCES;
+		goto fail;
+	}
+
+#ifdef CONFIG_DEBUG_FS
+	snprintf(name, sizeof name, "msm_amrnb_%04x", audio->ac->session);
+	audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
+					    NULL, (void *)audio,
+					    &audio_amrnb_debug_fops);
+
+	if (IS_ERR(audio->dentry))
+		pr_debug("debugfs_create_file failed\n");
+#endif
+	pr_info("%s:amrnb decoder open success, session_id = %d\n", __func__,
+				audio->ac->session);
+	return rc;
+fail:
+	q6asm_audio_client_free(audio->ac);
+	kfree(audio);
+	return rc;
+}
+
+static const struct file_operations audio_amrnb_fops = {
+	.owner = THIS_MODULE,
+	.open = audio_open,
+	.release = audio_aio_release,
+	.unlocked_ioctl = audio_ioctl,
+	.fsync = audio_aio_fsync,
+	.compat_ioctl = audio_compat_ioctl,
+};
+
+static struct miscdevice audio_amrnb_misc = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "msm_amrnb",
+	.fops = &audio_amrnb_fops,
+};
+
+static int __init audio_amrnb_init(void)
+{
+	int ret = misc_register(&audio_amrnb_misc);
+
+	if (ret == 0)
+		device_init_wakeup(audio_amrnb_misc.this_device, true);
+	audio_amrnb_ws_mgr.ref_cnt = 0;
+	mutex_init(&audio_amrnb_ws_mgr.ws_lock);
+
+	return ret;
+}
+
+device_initcall(audio_amrnb_init);
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_amrwb.c linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_amrwb.c
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_amrwb.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_amrwb.c	2019-01-22 16:16:24.743257635 +0100
@@ -0,0 +1,232 @@
+/* amrwb audio output device
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (C) 2008 HTC Corporation
+ * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/compat.h>
+#include <linux/types.h>
+#include "audio_utils_aio.h"
+
+static struct miscdevice audio_amrwb_misc;
+static struct ws_mgr audio_amrwb_ws_mgr;
+
+#ifdef CONFIG_DEBUG_FS
+static const struct file_operations audio_amrwb_debug_fops = {
+	.read = audio_aio_debug_read,
+	.open = audio_aio_debug_open,
+};
+#endif
+
+static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__,
+						audio, audio->ac->session);
+		if (audio->feedback == NON_TUNNEL_MODE) {
+			/* Configure PCM output block */
+			rc = q6asm_enc_cfg_blk_pcm(audio->ac,
+					audio->pcm_cfg.sample_rate,
+					audio->pcm_cfg.channel_count);
+			if (rc < 0) {
+				pr_err("pcm output block config failed\n");
+				break;
+			}
+		}
+
+		rc = audio_aio_enable(audio);
+		audio->eos_rsp = 0;
+		audio->eos_flag = 0;
+		if (!rc) {
+			audio->enabled = 1;
+		} else {
+			audio->enabled = 0;
+			pr_err("Audio Start procedure failed rc=%d\n", rc);
+			break;
+		}
+		pr_debug("%s: AUDIO_START sessionid[%d]enable[%d]\n", __func__,
+						audio->ac->session,
+						audio->enabled);
+		if (audio->stopped == 1)
+			audio->stopped = 0;
+		break;
+	}
+	default:
+		pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio);
+		rc = audio->codec_ioctl(file, cmd, arg);
+	}
+	return rc;
+}
+
+static long audio_compat_ioctl(struct file *file, unsigned int cmd,
+			       unsigned long arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__,
+				audio, audio->ac->session);
+		if (audio->feedback == NON_TUNNEL_MODE) {
+			/* Configure PCM output block */
+			rc = q6asm_enc_cfg_blk_pcm(audio->ac,
+					audio->pcm_cfg.sample_rate,
+					audio->pcm_cfg.channel_count);
+			if (rc < 0) {
+				pr_err("%s: pcm output block config failed rc=%d\n",
+					__func__, rc);
+				break;
+			}
+		}
+
+		rc = audio_aio_enable(audio);
+		audio->eos_rsp = 0;
+		audio->eos_flag = 0;
+		if (!rc) {
+			audio->enabled = 1;
+		} else {
+			audio->enabled = 0;
+			pr_err("%s: Audio Start procedure failed rc=%d\n",
+				__func__, rc);
+			break;
+		}
+		pr_debug("%s: AUDIO_START sessionid[%d]enable[%d]\n", __func__,
+				audio->ac->session,
+				audio->enabled);
+		if (audio->stopped == 1)
+			audio->stopped = 0;
+		break;
+	}
+	default:
+		pr_debug("%s[%pK]: Calling compat ioctl\n", __func__, audio);
+		rc = audio->codec_compat_ioctl(file, cmd, arg);
+	}
+	return rc;
+}
+
+static int audio_open(struct inode *inode, struct file *file)
+{
+	struct q6audio_aio *audio = NULL;
+	int rc = 0;
+
+#ifdef CONFIG_DEBUG_FS
+	/* 4 bytes represents decoder number, 1 byte for terminate string */
+	char name[sizeof "msm_amrwb_" + 5];
+#endif
+	audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL);
+
+	if (audio == NULL) {
+		pr_err("Could not allocate memory for aac decode driver\n");
+		return -ENOMEM;
+	}
+	audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN;
+	audio->miscdevice = &audio_amrwb_misc;
+	audio->wakelock_voted = false;
+	audio->audio_ws_mgr = &audio_amrwb_ws_mgr;
+
+	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb,
+					     (void *)audio);
+
+	if (!audio->ac) {
+		pr_err("Could not allocate memory for audio client\n");
+		kfree(audio);
+		return -ENOMEM;
+	}
+	rc = audio_aio_open(audio, file);
+	if (rc < 0) {
+		pr_err("%s: audio_aio_open rc=%d\n",
+			__func__, rc);
+		goto fail;
+	}
+
+	/* open in T/NT mode */
+	if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) {
+		rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM,
+					   FORMAT_AMRWB);
+		if (rc < 0) {
+			pr_err("NT mode Open failed rc=%d\n", rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		audio->feedback = NON_TUNNEL_MODE;
+		audio->buf_cfg.frames_per_buf = 0x01;
+		audio->buf_cfg.meta_info_enable = 0x01;
+	} else if ((file->f_mode & FMODE_WRITE) &&
+			!(file->f_mode & FMODE_READ)) {
+		rc = q6asm_open_write(audio->ac, FORMAT_AMRWB);
+		if (rc < 0) {
+			pr_err("T mode Open failed rc=%d\n", rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		audio->feedback = TUNNEL_MODE;
+		audio->buf_cfg.meta_info_enable = 0x00;
+	} else {
+		pr_err("Not supported mode\n");
+		rc = -EACCES;
+		goto fail;
+	}
+
+#ifdef CONFIG_DEBUG_FS
+	snprintf(name, sizeof name, "msm_amrwb_%04x", audio->ac->session);
+	audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
+					    NULL, (void *)audio,
+					    &audio_amrwb_debug_fops);
+
+	if (IS_ERR(audio->dentry))
+		pr_debug("debugfs_create_file failed\n");
+#endif
+	pr_info("%s: AMRWB dec success mode[%d]session[%d]\n", __func__,
+						audio->feedback,
+						audio->ac->session);
+	return 0;
+fail:
+	q6asm_audio_client_free(audio->ac);
+	kfree(audio);
+	return rc;
+}
+
+static const struct file_operations audio_amrwb_fops = {
+	.owner = THIS_MODULE,
+	.open = audio_open,
+	.release = audio_aio_release,
+	.unlocked_ioctl = audio_ioctl,
+	.fsync = audio_aio_fsync,
+	.compat_ioctl = audio_compat_ioctl,
+};
+
+static struct miscdevice audio_amrwb_misc = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "msm_amrwb",
+	.fops = &audio_amrwb_fops,
+};
+
+static int __init audio_amrwb_init(void)
+{
+	int ret = misc_register(&audio_amrwb_misc);
+
+	if (ret == 0)
+		device_init_wakeup(audio_amrwb_misc.this_device, true);
+	audio_amrwb_ws_mgr.ref_cnt = 0;
+	mutex_init(&audio_amrwb_ws_mgr.ws_lock);
+
+	return ret;
+}
+
+device_initcall(audio_amrwb_init);
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_amrwbplus.c linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_amrwbplus.c
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_amrwbplus.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_amrwbplus.c	2019-01-22 16:16:24.743257635 +0100
@@ -0,0 +1,399 @@
+/* amr-wbplus audio output device
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (C) 2008 HTC Corporation
+ * Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/msm_audio_amrwbplus.h>
+#include <linux/compat.h>
+#include "audio_utils_aio.h"
+
+static struct miscdevice audio_amrwbplus_misc;
+static struct ws_mgr audio_amrwbplus_ws_mgr;
+
+#ifdef CONFIG_DEBUG_FS
+static const struct file_operations audio_amrwbplus_debug_fops = {
+	.read = audio_aio_debug_read,
+	.open = audio_aio_debug_open,
+};
+static void config_debug_fs(struct q6audio_aio *audio)
+{
+	if (audio != NULL) {
+		char name[sizeof("msm_amrwbplus_") + 5];
+		snprintf(name, sizeof(name), "msm_amrwbplus_%04x",
+			audio->ac->session);
+		audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
+						NULL, (void *)audio,
+						&audio_amrwbplus_debug_fops);
+		if (IS_ERR(audio->dentry))
+			pr_debug("debugfs_create_file failed\n");
+	}
+}
+#else
+static void config_debug_fs(struct q6audio_aio *audio)
+{
+}
+#endif
+
+static long audio_ioctl_shared(struct file *file, unsigned int cmd,
+					void *arg)
+{
+	struct asm_amrwbplus_cfg q6_amrwbplus_cfg;
+	struct msm_audio_amrwbplus_config_v2 *amrwbplus_drv_config;
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		pr_err("%s[%pK]: AUDIO_START session_id[%d]\n", __func__,
+			audio, audio->ac->session);
+		if (audio->feedback == NON_TUNNEL_MODE) {
+			/* Configure PCM output block */
+			rc = q6asm_enc_cfg_blk_pcm(audio->ac,
+			audio->pcm_cfg.sample_rate,
+			audio->pcm_cfg.channel_count);
+			if (rc < 0) {
+				pr_err("pcm output block config failed\n");
+				break;
+			}
+		}
+		amrwbplus_drv_config =
+		(struct msm_audio_amrwbplus_config_v2 *)audio->codec_cfg;
+
+		q6_amrwbplus_cfg.size_bytes     =
+			amrwbplus_drv_config->size_bytes;
+		q6_amrwbplus_cfg.version        =
+			amrwbplus_drv_config->version;
+		q6_amrwbplus_cfg.num_channels   =
+			amrwbplus_drv_config->num_channels;
+		q6_amrwbplus_cfg.amr_band_mode  =
+			amrwbplus_drv_config->amr_band_mode;
+		q6_amrwbplus_cfg.amr_dtx_mode   =
+			amrwbplus_drv_config->amr_dtx_mode;
+		q6_amrwbplus_cfg.amr_frame_fmt  =
+			amrwbplus_drv_config->amr_frame_fmt;
+		q6_amrwbplus_cfg.amr_lsf_idx    =
+			amrwbplus_drv_config->amr_lsf_idx;
+
+		rc = q6asm_media_format_block_amrwbplus(audio->ac,
+							&q6_amrwbplus_cfg);
+		if (rc < 0) {
+			pr_err("q6asm_media_format_block_amrwb+ failed...\n");
+			break;
+		}
+		rc = audio_aio_enable(audio);
+		audio->eos_rsp = 0;
+		audio->eos_flag = 0;
+		if (!rc) {
+			audio->enabled = 1;
+		} else {
+			audio->enabled = 0;
+			pr_err("Audio Start procedure failed rc=%d\n", rc);
+			break;
+		}
+		pr_debug("%s:AUDIO_START sessionid[%d]enable[%d]\n", __func__,
+			audio->ac->session,
+			audio->enabled);
+		if (audio->stopped == 1)
+			audio->stopped = 0;
+		break;
+		}
+	default:
+		pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd);
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+static long audio_ioctl(struct file *file, unsigned int cmd,
+						unsigned long arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		rc = audio_ioctl_shared(file, cmd, (void *)arg);
+		break;
+	}
+	case AUDIO_GET_AMRWBPLUS_CONFIG_V2: {
+		if ((audio) && (arg) && (audio->codec_cfg)) {
+			if (copy_to_user((void *)arg, audio->codec_cfg,
+				sizeof(struct msm_audio_amrwbplus_config_v2))) {
+				rc = -EFAULT;
+				pr_err("%s: copy_to_user for AUDIO_GET_AMRWBPLUS_CONFIG_V2 failed\n",
+					__func__);
+				break;
+			}
+			} else {
+				pr_err("%s: wb+ config v2 invalid parameters\n"
+					, __func__);
+				rc = -EFAULT;
+				break;
+			}
+		break;
+	}
+	case AUDIO_SET_AMRWBPLUS_CONFIG_V2: {
+		if ((audio) && (arg) && (audio->codec_cfg)) {
+			if (copy_from_user(audio->codec_cfg, (void *)arg,
+				sizeof(struct msm_audio_amrwbplus_config_v2))) {
+				rc = -EFAULT;
+				pr_err("%s: copy_from_user for AUDIO_SET_AMRWBPLUS_CONFIG_V2 failed\n",
+					__func__);
+				break;
+			}
+			} else {
+				pr_err("%s: wb+ config invalid parameters\n",
+					__func__);
+				rc = -EFAULT;
+				break;
+			}
+		break;
+	}
+	default: {
+		pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio);
+		rc = audio->codec_ioctl(file, cmd, arg);
+		break;
+	}
+	}
+	return rc;
+}
+#ifdef CONFIG_COMPAT
+struct msm_audio_amrwbplus_config_v2_32 {
+	u32 size_bytes;
+	u32 version;
+	u32 num_channels;
+	u32 amr_band_mode;
+	u32 amr_dtx_mode;
+	u32 amr_frame_fmt;
+	u32 amr_lsf_idx;
+};
+
+enum {
+	AUDIO_GET_AMRWBPLUS_CONFIG_V2_32 = _IOR(AUDIO_IOCTL_MAGIC,
+		(AUDIO_MAX_COMMON_IOCTL_NUM+2),
+		struct msm_audio_amrwbplus_config_v2_32),
+	AUDIO_SET_AMRWBPLUS_CONFIG_V2_32 = _IOW(AUDIO_IOCTL_MAGIC,
+		(AUDIO_MAX_COMMON_IOCTL_NUM+3),
+		struct msm_audio_amrwbplus_config_v2_32)
+};
+
+static long audio_compat_ioctl(struct file *file, unsigned int cmd,
+					unsigned long arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		rc = audio_ioctl_shared(file, cmd, (void *)arg);
+		break;
+	}
+	case AUDIO_GET_AMRWBPLUS_CONFIG_V2_32: {
+		if (audio && arg && (audio->codec_cfg)) {
+			struct msm_audio_amrwbplus_config_v2 *amrwbplus_config;
+			struct msm_audio_amrwbplus_config_v2_32
+						amrwbplus_config_32;
+
+			memset(&amrwbplus_config_32, 0,
+					sizeof(amrwbplus_config_32));
+
+			amrwbplus_config =
+				(struct msm_audio_amrwbplus_config_v2 *)
+				audio->codec_cfg;
+			amrwbplus_config_32.size_bytes =
+					amrwbplus_config->size_bytes;
+			amrwbplus_config_32.version =
+					amrwbplus_config->version;
+			amrwbplus_config_32.num_channels =
+					amrwbplus_config->num_channels;
+			amrwbplus_config_32.amr_band_mode =
+					amrwbplus_config->amr_band_mode;
+			amrwbplus_config_32.amr_dtx_mode =
+					amrwbplus_config->amr_dtx_mode;
+			amrwbplus_config_32.amr_frame_fmt =
+					amrwbplus_config->amr_frame_fmt;
+			amrwbplus_config_32.amr_lsf_idx =
+					amrwbplus_config->amr_lsf_idx;
+
+			if (copy_to_user((void *)arg, &amrwbplus_config_32,
+				sizeof(amrwbplus_config_32))) {
+				rc = -EFAULT;
+				pr_err("%s: copy_to_user for AUDIO_GET_AMRWBPLUS_CONFIG_V2_32 failed\n"
+					, __func__);
+			}
+		} else {
+			pr_err("%s: wb+ Get config v2 invalid parameters\n"
+				, __func__);
+			rc = -EFAULT;
+		}
+		break;
+	}
+	case AUDIO_SET_AMRWBPLUS_CONFIG_V2_32: {
+		if ((audio) && (arg) && (audio->codec_cfg)) {
+			struct msm_audio_amrwbplus_config_v2 *amrwbplus_config;
+			struct msm_audio_amrwbplus_config_v2_32
+							amrwbplus_config_32;
+
+			if (copy_from_user(&amrwbplus_config_32, (void *)arg,
+			sizeof(struct msm_audio_amrwbplus_config_v2_32))) {
+				rc = -EFAULT;
+				pr_err("%s: copy_from_user for AUDIO_SET_AMRWBPLUS_CONFIG_V2_32 failed\n"
+					, __func__);
+				break;
+			}
+			amrwbplus_config =
+			 (struct msm_audio_amrwbplus_config_v2 *)
+						audio->codec_cfg;
+			amrwbplus_config->size_bytes =
+					amrwbplus_config_32.size_bytes;
+			amrwbplus_config->version =
+					amrwbplus_config_32.version;
+			amrwbplus_config->num_channels =
+					amrwbplus_config_32.num_channels;
+			amrwbplus_config->amr_band_mode =
+					amrwbplus_config_32.amr_band_mode;
+			amrwbplus_config->amr_dtx_mode =
+					amrwbplus_config_32.amr_dtx_mode;
+			amrwbplus_config->amr_frame_fmt =
+					amrwbplus_config_32.amr_frame_fmt;
+			amrwbplus_config->amr_lsf_idx =
+					amrwbplus_config_32.amr_lsf_idx;
+		} else {
+			pr_err("%s: wb+ config invalid parameters\n",
+				__func__);
+			rc = -EFAULT;
+		}
+		break;
+	}
+	default: {
+		pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio);
+		rc = audio->codec_compat_ioctl(file, cmd, arg);
+		break;
+	}
+	}
+	return rc;
+}
+#else
+#define audio_compat_ioctl NULL
+#endif
+
+static int audio_open(struct inode *inode, struct file *file)
+{
+	struct q6audio_aio *audio = NULL;
+	int rc = 0;
+
+	audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL);
+
+	if (audio == NULL) {
+		pr_err("kzalloc failed for amrwb+ decode driver\n");
+		return -ENOMEM;
+	}
+	audio->codec_cfg =
+	kzalloc(sizeof(struct msm_audio_amrwbplus_config_v2), GFP_KERNEL);
+	if (audio->codec_cfg == NULL) {
+		pr_err("%s:failed kzalloc for amrwb+ config structure",
+			__func__);
+		kfree(audio);
+		return -ENOMEM;
+	}
+	audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN;
+	audio->miscdevice = &audio_amrwbplus_misc;
+	audio->wakelock_voted = false;
+	audio->audio_ws_mgr = &audio_amrwbplus_ws_mgr;
+
+	audio->ac =
+	q6asm_audio_client_alloc((app_cb) q6_audio_cb, (void *)audio);
+
+	if (!audio->ac) {
+		pr_err("Could not allocate memory for audio client\n");
+		kfree(audio->codec_cfg);
+		kfree(audio);
+		return -ENOMEM;
+	}
+	rc = audio_aio_open(audio, file);
+	if (rc < 0) {
+		pr_err("%s: audio_aio_open rc=%d\n",
+			__func__, rc);
+		goto fail;
+	}
+
+	/* open in T/NT mode */
+	if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) {
+		rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM,
+					FORMAT_AMR_WB_PLUS);
+		if (rc < 0) {
+			pr_err("amrwbplus NT mode Open failed rc=%d\n", rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		audio->feedback = NON_TUNNEL_MODE;
+		audio->buf_cfg.frames_per_buf = 0x01;
+		audio->buf_cfg.meta_info_enable = 0x01;
+	} else if ((file->f_mode & FMODE_WRITE) &&
+			!(file->f_mode & FMODE_READ)) {
+			rc = q6asm_open_write(audio->ac, FORMAT_AMR_WB_PLUS);
+			if (rc < 0) {
+				pr_err("wb+ T mode Open failed rc=%d\n", rc);
+				rc = -ENODEV;
+				goto fail;
+			}
+		audio->feedback = TUNNEL_MODE;
+		audio->buf_cfg.meta_info_enable = 0x00;
+	} else {
+		pr_err("audio_amrwbplus Not supported mode\n");
+		rc = -EACCES;
+		goto fail;
+	}
+
+	config_debug_fs(audio);
+	pr_debug("%s: AMRWBPLUS dec success mode[%d]session[%d]\n", __func__,
+		audio->feedback,
+		audio->ac->session);
+	return 0;
+fail:
+	q6asm_audio_client_free(audio->ac);
+	kfree(audio->codec_cfg);
+	kfree(audio);
+	return rc;
+}
+
+static const struct file_operations audio_amrwbplus_fops = {
+	.owner = THIS_MODULE,
+	.open = audio_open,
+	.release = audio_aio_release,
+	.unlocked_ioctl = audio_ioctl,
+	.fsync = audio_aio_fsync,
+	.compat_ioctl = audio_compat_ioctl
+};
+
+static struct miscdevice audio_amrwbplus_misc = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "msm_amrwbplus",
+	.fops = &audio_amrwbplus_fops,
+};
+
+static int __init audio_amrwbplus_init(void)
+{
+	int ret = misc_register(&audio_amrwbplus_misc);
+
+	if (ret == 0)
+		device_init_wakeup(audio_amrwbplus_misc.this_device, true);
+	audio_amrwbplus_ws_mgr.ref_cnt = 0;
+	mutex_init(&audio_amrwbplus_ws_mgr.ws_lock);
+
+	return ret;
+}
+
+device_initcall(audio_amrwbplus_init);
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_ape.c linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_ape.c
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_ape.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_ape.c	2019-01-22 16:16:24.743257635 +0100
@@ -0,0 +1,361 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 and
+* only version 2 as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+*/
+
+#include <linux/types.h>
+#include <linux/msm_audio_ape.h>
+#include <linux/compat.h>
+#include "audio_utils_aio.h"
+
+static struct miscdevice audio_ape_misc;
+static struct ws_mgr audio_ape_ws_mgr;
+
+static const struct file_operations audio_ape_debug_fops = {
+	.read = audio_aio_debug_read,
+	.open = audio_aio_debug_open,
+};
+static struct dentry *config_debugfs_create_file(const char *name, void *data)
+{
+	return debugfs_create_file(name, S_IFREG | S_IRUGO,
+			NULL, (void *)data, &audio_ape_debug_fops);
+}
+
+static long audio_ioctl_shared(struct file *file, unsigned int cmd,
+						void *arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		struct asm_ape_cfg ape_cfg;
+		struct msm_audio_ape_config *ape_config;
+		pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__,
+						audio, audio->ac->session);
+		if (audio->feedback == NON_TUNNEL_MODE) {
+			/* Configure PCM output block */
+			rc = q6asm_enc_cfg_blk_pcm(audio->ac,
+					audio->pcm_cfg.sample_rate,
+					audio->pcm_cfg.channel_count);
+			if (rc < 0) {
+				pr_err("pcm output block config failed\n");
+				break;
+			}
+		}
+		ape_config = (struct msm_audio_ape_config *)audio->codec_cfg;
+		ape_cfg.compatible_version = ape_config->compatibleVersion;
+		ape_cfg.compression_level = ape_config->compressionLevel;
+		ape_cfg.format_flags = ape_config->formatFlags;
+		ape_cfg.blocks_per_frame = ape_config->blocksPerFrame;
+		ape_cfg.final_frame_blocks = ape_config->finalFrameBlocks;
+		ape_cfg.total_frames = ape_config->totalFrames;
+		ape_cfg.bits_per_sample = ape_config->bitsPerSample;
+		ape_cfg.num_channels = ape_config->numChannels;
+		ape_cfg.sample_rate = ape_config->sampleRate;
+		ape_cfg.seek_table_present = ape_config->seekTablePresent;
+		pr_debug("%s: compatibleVersion %d compressionLevel %d formatFlags %d blocksPerFrame %d finalFrameBlocks %d totalFrames %d bitsPerSample %d numChannels %d sampleRate %d seekTablePresent %d\n",
+				__func__, ape_config->compatibleVersion,
+				ape_config->compressionLevel,
+				ape_config->formatFlags,
+				ape_config->blocksPerFrame,
+				ape_config->finalFrameBlocks,
+				ape_config->totalFrames,
+				ape_config->bitsPerSample,
+				ape_config->numChannels,
+				ape_config->sampleRate,
+				ape_config->seekTablePresent);
+		/* Configure Media format block */
+		rc = q6asm_media_format_block_ape(audio->ac, &ape_cfg,
+							audio->ac->stream_id);
+		if (rc < 0) {
+			pr_err("cmd media format block failed\n");
+			break;
+		}
+		rc = audio_aio_enable(audio);
+		audio->eos_rsp = 0;
+		audio->eos_flag = 0;
+		if (!rc) {
+			audio->enabled = 1;
+		} else {
+			audio->enabled = 0;
+			pr_err("Audio Start procedure failed rc=%d\n", rc);
+			break;
+		}
+		pr_debug("AUDIO_START success enable[%d]\n", audio->enabled);
+		if (audio->stopped == 1)
+			audio->stopped = 0;
+		break;
+	}
+	default:
+		pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd);
+		break;
+	}
+	return rc;
+}
+
+static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		rc = audio_ioctl_shared(file, cmd, (void *)arg);
+		break;
+	}
+	case AUDIO_GET_APE_CONFIG: {
+		if (copy_to_user((void *)arg, audio->codec_cfg,
+			sizeof(struct msm_audio_ape_config))) {
+			pr_err("%s:copy_to_user for AUDIO_GET_APE_CONFIG failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		break;
+	}
+	case AUDIO_SET_APE_CONFIG: {
+		if (copy_from_user(audio->codec_cfg, (void *)arg,
+			sizeof(struct msm_audio_ape_config))) {
+			pr_err("%s:copy_from_user for AUDIO_SET_APE_CONFIG failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		break;
+	}
+	default: {
+		pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio);
+		rc = audio->codec_ioctl(file, cmd, arg);
+		if (rc)
+			pr_err("Failed in utils_ioctl: %d\n", rc);
+		break;
+	}
+	}
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+struct msm_audio_ape_config_32 {
+	u16 compatibleVersion;
+	u16 compressionLevel;
+	u32 formatFlags;
+	u32 blocksPerFrame;
+	u32 finalFrameBlocks;
+	u32 totalFrames;
+	u16 bitsPerSample;
+	u16 numChannels;
+	u32 sampleRate;
+	u32 seekTablePresent;
+
+};
+
+enum {
+	AUDIO_GET_APE_CONFIG_32 =  _IOR(AUDIO_IOCTL_MAGIC,
+	(AUDIO_MAX_COMMON_IOCTL_NUM+0), struct msm_audio_ape_config_32),
+	AUDIO_SET_APE_CONFIG_32 =  _IOW(AUDIO_IOCTL_MAGIC,
+	(AUDIO_MAX_COMMON_IOCTL_NUM+1), struct msm_audio_ape_config_32)
+};
+
+static long audio_compat_ioctl(struct file *file, unsigned int cmd,
+						unsigned long arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		rc = audio_ioctl_shared(file, cmd, (void *)arg);
+		break;
+	}
+	case AUDIO_GET_APE_CONFIG_32: {
+		struct msm_audio_ape_config *ape_config;
+		struct msm_audio_ape_config_32 ape_config_32;
+
+		memset(&ape_config_32, 0, sizeof(ape_config_32));
+
+		ape_config = (struct msm_audio_ape_config *)audio->codec_cfg;
+		ape_config_32.compatibleVersion = ape_config->compatibleVersion;
+		ape_config_32.compressionLevel =
+				ape_config->compressionLevel;
+		ape_config_32.formatFlags = ape_config->formatFlags;
+		ape_config_32.blocksPerFrame = ape_config->blocksPerFrame;
+		ape_config_32.finalFrameBlocks = ape_config->finalFrameBlocks;
+		ape_config_32.totalFrames = ape_config->totalFrames;
+		ape_config_32.bitsPerSample = ape_config->bitsPerSample;
+		ape_config_32.numChannels = ape_config->numChannels;
+		ape_config_32.sampleRate = ape_config->sampleRate;
+		ape_config_32.seekTablePresent = ape_config->seekTablePresent;
+
+		if (copy_to_user((void *)arg, &ape_config_32,
+			sizeof(ape_config_32))) {
+			pr_err("%s: copy_to_user for GET_APE_CONFIG_32 failed\n",
+				 __func__);
+			rc = -EFAULT;
+			break;
+		}
+		break;
+	}
+	case AUDIO_SET_APE_CONFIG_32: {
+		struct msm_audio_ape_config *ape_config;
+		struct msm_audio_ape_config_32 ape_config_32;
+
+		if (copy_from_user(&ape_config_32, (void *)arg,
+			sizeof(ape_config_32))) {
+			pr_err("%s: copy_from_user for SET_APE_CONFIG_32 failed\n"
+				, __func__);
+			rc = -EFAULT;
+			break;
+		}
+		ape_config = (struct msm_audio_ape_config *)audio->codec_cfg;
+		ape_config->compatibleVersion = ape_config_32.compatibleVersion;
+		ape_config->compressionLevel =
+				ape_config_32.compressionLevel;
+		ape_config->formatFlags = ape_config_32.formatFlags;
+		ape_config->blocksPerFrame = ape_config_32.blocksPerFrame;
+		ape_config->finalFrameBlocks = ape_config_32.finalFrameBlocks;
+		ape_config->totalFrames = ape_config_32.totalFrames;
+		ape_config->bitsPerSample = ape_config_32.bitsPerSample;
+		ape_config->numChannels = ape_config_32.numChannels;
+		ape_config->sampleRate = ape_config_32.sampleRate;
+		ape_config->seekTablePresent = ape_config_32.seekTablePresent;
+
+		break;
+	}
+	default: {
+		pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio);
+		rc = audio->codec_compat_ioctl(file, cmd, arg);
+		if (rc)
+			pr_err("Failed in utils_ioctl: %d\n", rc);
+		break;
+	}
+	}
+	return rc;
+}
+#else
+#define audio_compat_ioctl NULL
+#endif
+
+static int audio_open(struct inode *inode, struct file *file)
+{
+	struct q6audio_aio *audio = NULL;
+	int rc = 0;
+
+	/* 4 bytes represents decoder number, 1 byte for terminate string */
+	char name[sizeof "msm_ape_" + 5];
+	audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL);
+
+	if (!audio) {
+		pr_err("Could not allocate memory for ape decode driver\n");
+		return -ENOMEM;
+	}
+	audio->codec_cfg = kzalloc(sizeof(struct msm_audio_ape_config),
+					GFP_KERNEL);
+	if (!audio->codec_cfg) {
+		pr_err("%s:Could not allocate memory for ape config\n",
+			__func__);
+		kfree(audio);
+		return -ENOMEM;
+	}
+
+	audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN;
+	audio->miscdevice = &audio_ape_misc;
+	audio->wakelock_voted = false;
+	audio->audio_ws_mgr = &audio_ape_ws_mgr;
+
+	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb,
+					     (void *)audio);
+
+	if (!audio->ac) {
+		pr_err("Could not allocate memory for audio client\n");
+		kfree(audio->codec_cfg);
+		kfree(audio);
+		return -ENOMEM;
+	}
+	rc = audio_aio_open(audio, file);
+	if (rc < 0) {
+		pr_err("%s: audio_aio_open rc=%d\n",
+			__func__, rc);
+		goto fail;
+	}
+	/* open in T/NT mode */
+	if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) {
+		rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM,
+					   FORMAT_APE);
+		if (rc < 0) {
+			pr_err("NT mode Open failed rc=%d\n", rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		audio->feedback = NON_TUNNEL_MODE;
+		/* open APE decoder, expected frames is always 1*/
+		audio->buf_cfg.frames_per_buf = 0x01;
+		audio->buf_cfg.meta_info_enable = 0x01;
+	} else if ((file->f_mode & FMODE_WRITE) &&
+			!(file->f_mode & FMODE_READ)) {
+		rc = q6asm_open_write(audio->ac, FORMAT_APE);
+		if (rc < 0) {
+			pr_err("T mode Open failed rc=%d\n", rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		audio->feedback = TUNNEL_MODE;
+		audio->buf_cfg.meta_info_enable = 0x00;
+	} else {
+		pr_err("Not supported mode\n");
+		rc = -EACCES;
+		goto fail;
+	}
+
+	snprintf(name, sizeof(name), "msm_ape_%04x", audio->ac->session);
+	audio->dentry = config_debugfs_create_file(name, (void *)audio);
+
+	if (IS_ERR_OR_NULL(audio->dentry))
+		pr_debug("debugfs_create_file failed\n");
+	pr_debug("%s:apedec success mode[%d]session[%d]\n", __func__,
+						audio->feedback,
+						audio->ac->session);
+	return rc;
+fail:
+	q6asm_audio_client_free(audio->ac);
+	kfree(audio->codec_cfg);
+	kfree(audio);
+	return rc;
+}
+
+static const struct file_operations audio_ape_fops = {
+	.owner = THIS_MODULE,
+	.open = audio_open,
+	.release = audio_aio_release,
+	.unlocked_ioctl = audio_ioctl,
+	.fsync = audio_aio_fsync,
+	.compat_ioctl = audio_compat_ioctl
+};
+
+static struct miscdevice audio_ape_misc = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "msm_ape",
+	.fops = &audio_ape_fops,
+};
+
+static int __init audio_ape_init(void)
+{
+	int ret = misc_register(&audio_ape_misc);
+
+	if (ret == 0)
+		device_init_wakeup(audio_ape_misc.this_device, true);
+	audio_ape_ws_mgr.ref_cnt = 0;
+	mutex_init(&audio_ape_ws_mgr.ws_lock);
+
+	return ret;
+}
+
+device_initcall(audio_ape_init);
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_evrc.c linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_evrc.c
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_evrc.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_evrc.c	2019-01-22 16:16:24.743257635 +0100
@@ -0,0 +1,186 @@
+/* evrc audio output device
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (C) 2008 HTC Corporation
+ * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "audio_utils_aio.h"
+
+static struct miscdevice audio_evrc_misc;
+static struct ws_mgr audio_evrc_ws_mgr;
+
+#ifdef CONFIG_DEBUG_FS
+static const struct file_operations audio_evrc_debug_fops = {
+	.read = audio_aio_debug_read,
+	.open = audio_aio_debug_open,
+};
+#endif
+
+static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__,
+						audio, audio->ac->session);
+		if (audio->feedback == NON_TUNNEL_MODE) {
+			/* Configure PCM output block */
+			rc = q6asm_enc_cfg_blk_pcm(audio->ac,
+					audio->pcm_cfg.sample_rate,
+					audio->pcm_cfg.channel_count);
+			if (rc < 0) {
+				pr_err("pcm output block config failed\n");
+				break;
+			}
+		}
+
+		rc = audio_aio_enable(audio);
+		audio->eos_rsp = 0;
+		audio->eos_flag = 0;
+		if (!rc) {
+			audio->enabled = 1;
+		} else {
+			audio->enabled = 0;
+			pr_err("Audio Start procedure failed rc=%d\n", rc);
+			break;
+		}
+		pr_debug("%s: AUDIO_START sessionid[%d]enable[%d]\n", __func__,
+						audio->ac->session,
+						audio->enabled);
+		if (audio->stopped == 1)
+			audio->stopped = 0;
+		break;
+	}
+	default:
+		pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio);
+		rc = audio->codec_ioctl(file, cmd, arg);
+	}
+	return rc;
+}
+
+static int audio_open(struct inode *inode, struct file *file)
+{
+	struct q6audio_aio *audio = NULL;
+	int rc = 0;
+
+#ifdef CONFIG_DEBUG_FS
+	/* 4 bytes represents decoder number, 1 byte for terminate string */
+	char name[sizeof "msm_evrc_" + 5];
+#endif
+	audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL);
+
+	if (audio == NULL) {
+		pr_err("Could not allocate memory for aac decode driver\n");
+		return -ENOMEM;
+	}
+
+	/* Settings will be re-config at AUDIO_SET_CONFIG,
+	 * but at least we need to have initial config
+	 */
+	audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN;
+	audio->miscdevice = &audio_evrc_misc;
+	audio->wakelock_voted = false;
+	audio->audio_ws_mgr = &audio_evrc_ws_mgr;
+
+	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb,
+					     (void *)audio);
+
+	if (!audio->ac) {
+		pr_err("Could not allocate memory for audio client\n");
+		kfree(audio);
+		return -ENOMEM;
+	}
+	rc = audio_aio_open(audio, file);
+	if (rc < 0) {
+		pr_err("%s: audio_aio_open rc=%d\n",
+			__func__, rc);
+		goto fail;
+	}
+
+	/* open in T/NT mode */
+	if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) {
+		rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM,
+					   FORMAT_EVRC);
+		if (rc < 0) {
+			pr_err("NT mode Open failed rc=%d\n", rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		audio->feedback = NON_TUNNEL_MODE;
+		audio->buf_cfg.frames_per_buf = 0x01;
+		audio->buf_cfg.meta_info_enable = 0x01;
+	} else if ((file->f_mode & FMODE_WRITE) &&
+			!(file->f_mode & FMODE_READ)) {
+		rc = q6asm_open_write(audio->ac, FORMAT_EVRC);
+		if (rc < 0) {
+			pr_err("T mode Open failed rc=%d\n", rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		audio->feedback = TUNNEL_MODE;
+		audio->buf_cfg.meta_info_enable = 0x00;
+	} else {
+		pr_err("Not supported mode\n");
+		rc = -EACCES;
+		goto fail;
+	}
+
+#ifdef CONFIG_DEBUG_FS
+	snprintf(name, sizeof name, "msm_evrc_%04x", audio->ac->session);
+	audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
+					    NULL, (void *)audio,
+					    &audio_evrc_debug_fops);
+
+	if (IS_ERR(audio->dentry))
+		pr_debug("debugfs_create_file failed\n");
+#endif
+	pr_info("%s:dec success mode[%d]session[%d]\n", __func__,
+						audio->feedback,
+						audio->ac->session);
+	return rc;
+fail:
+	q6asm_audio_client_free(audio->ac);
+	kfree(audio);
+	return rc;
+}
+
+static const struct file_operations audio_evrc_fops = {
+	.owner = THIS_MODULE,
+	.open = audio_open,
+	.release = audio_aio_release,
+	.unlocked_ioctl = audio_ioctl,
+	.fsync = audio_aio_fsync,
+};
+
+static struct miscdevice audio_evrc_misc = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "msm_evrc",
+	.fops = &audio_evrc_fops,
+};
+
+static int __init audio_evrc_init(void)
+{
+	int ret = misc_register(&audio_evrc_misc);
+
+	if (ret == 0)
+		device_init_wakeup(audio_evrc_misc.this_device, true);
+	audio_evrc_ws_mgr.ref_cnt = 0;
+	mutex_init(&audio_evrc_ws_mgr.ws_lock);
+
+	return ret;
+}
+
+device_initcall(audio_evrc_init);
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_g711alaw.c linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_g711alaw.c
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_g711alaw.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_g711alaw.c	2019-01-22 16:16:24.747257672 +0100
@@ -0,0 +1,396 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 and
+* only version 2 as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+*/
+
+#include <linux/types.h>
+#include <linux/msm_audio_g711_dec.h>
+#include <linux/compat.h>
+#include "audio_utils_aio.h"
+
+static struct miscdevice audio_g711alaw_misc;
+static struct ws_mgr audio_g711_ws_mgr;
+
+static const struct file_operations audio_g711_debug_fops = {
+	.read = audio_aio_debug_read,
+	.open = audio_aio_debug_open,
+};
+
+static struct dentry *config_debugfs_create_file(const char *name, void *data)
+{
+	return debugfs_create_file(name, S_IFREG | S_IRUGO,
+				NULL, (void *)data, &audio_g711_debug_fops);
+}
+
+static int g711_channel_map(u8 *channel_mapping, uint32_t channels);
+
+static long audio_ioctl_shared(struct file *file, unsigned int cmd,
+						void *arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		struct asm_g711_dec_cfg g711_dec_cfg;
+		struct msm_audio_g711_dec_config *g711_dec_config;
+		u8 channel_mapping[PCM_FORMAT_MAX_NUM_CHANNEL];
+
+		memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+		memset(&g711_dec_cfg, 0, sizeof(g711_dec_cfg));
+
+		if (g711_channel_map(channel_mapping,
+			audio->pcm_cfg.channel_count)) {
+			pr_err("%s: setting channel map failed %d\n",
+					__func__, audio->pcm_cfg.channel_count);
+		}
+
+		pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__,
+						audio, audio->ac->session);
+		if (audio->feedback == NON_TUNNEL_MODE) {
+			/* Configure PCM output block */
+			rc = q6asm_enc_cfg_blk_pcm_v2(audio->ac,
+					audio->pcm_cfg.sample_rate,
+					audio->pcm_cfg.channel_count,
+					16, /*bits per sample*/
+					false, false, channel_mapping);
+			if (rc < 0) {
+				pr_err("%s: pcm output block config failed rc=%d\n",
+						 __func__, rc);
+				break;
+			}
+		}
+		g711_dec_config =
+			(struct msm_audio_g711_dec_config *)audio->codec_cfg;
+		g711_dec_cfg.sample_rate = g711_dec_config->sample_rate;
+		/* Configure Media format block */
+		rc = q6asm_media_format_block_g711(audio->ac, &g711_dec_cfg,
+							audio->ac->stream_id);
+		if (rc < 0) {
+			pr_err("%s: cmd media format block failed rc=%d\n",
+				__func__, rc);
+			break;
+		}
+		rc = audio_aio_enable(audio);
+		audio->eos_rsp = 0;
+		audio->eos_flag = 0;
+		if (!rc) {
+			audio->enabled = 1;
+		} else {
+			audio->enabled = 0;
+			pr_err("%s: Audio Start procedure failed rc=%d\n",
+						__func__, rc);
+			break;
+		}
+		pr_debug("%s: AUDIO_START success enable[%d]\n",
+					 __func__, audio->enabled);
+		if (audio->stopped == 1)
+			audio->stopped = 0;
+		break;
+	}
+	default:
+		pr_debug("%s: Unknown ioctl cmd = %d", __func__, cmd);
+		break;
+	}
+	return rc;
+}
+
+static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		rc = audio_ioctl_shared(file, cmd, (void *)arg);
+		break;
+	}
+	case AUDIO_GET_G711_DEC_CONFIG: {
+		if (copy_to_user((void *)arg, audio->codec_cfg,
+			sizeof(struct msm_audio_g711_dec_config))) {
+			pr_err("%s: copy_to_user for AUDIO_GET_G711_DEC_CONFIG failed\n",
+				__func__);
+			rc = -EFAULT;
+		}
+		break;
+	}
+	case AUDIO_SET_G711_DEC_CONFIG: {
+		if (copy_from_user(audio->codec_cfg, (void *)arg,
+			sizeof(struct msm_audio_g711_dec_config))) {
+			pr_err("%s: copy_from_user for AUDIO_SET_G711_DEC_CONFIG failed\n",
+				__func__);
+			rc = -EFAULT;
+		}
+		break;
+	}
+	default: {
+		rc = audio->codec_ioctl(file, cmd, arg);
+		if (rc)
+			pr_err("%s: Failed in audio_aio_ioctl: %d cmd=%d\n",
+				__func__, rc, cmd);
+		break;
+	}
+	}
+	return  rc;
+}
+
+#ifdef CONFIG_COMPAT
+struct msm_audio_g711_dec_config_32 {
+	u32 sample_rate;
+};
+
+enum {
+	AUDIO_SET_G711_DEC_CONFIG_32 =  _IOW(AUDIO_IOCTL_MAGIC,
+	(AUDIO_MAX_COMMON_IOCTL_NUM+0), struct msm_audio_g711_dec_config_32),
+	AUDIO_GET_G711_DEC_CONFIG_32 =  _IOR(AUDIO_IOCTL_MAGIC,
+	(AUDIO_MAX_COMMON_IOCTL_NUM+1), struct msm_audio_g711_dec_config_32)
+};
+
+static long audio_compat_ioctl(struct file *file, unsigned int cmd,
+						unsigned long arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		rc = audio_ioctl_shared(file, cmd, (void *)arg);
+		break;
+	}
+	case AUDIO_GET_G711_DEC_CONFIG_32: {
+		struct msm_audio_g711_dec_config *g711_dec_config;
+		struct msm_audio_g711_dec_config_32 g711_dec_config_32;
+
+		memset(&g711_dec_config_32, 0, sizeof(g711_dec_config_32));
+
+		g711_dec_config =
+			(struct msm_audio_g711_dec_config *)audio->codec_cfg;
+		g711_dec_config_32.sample_rate = g711_dec_config->sample_rate;
+
+		if (copy_to_user((void *)arg, &g711_dec_config_32,
+			sizeof(g711_dec_config_32))) {
+			pr_err("%s: copy_to_user for AUDIO_GET_G711_DEC_CONFIG_32 failed\n",
+				 __func__);
+			rc = -EFAULT;
+		}
+		break;
+	}
+	case AUDIO_SET_G711_DEC_CONFIG_32: {
+		struct msm_audio_g711_dec_config *g711_dec_config;
+		struct msm_audio_g711_dec_config_32 g711_dec_config_32;
+
+		memset(&g711_dec_config_32, 0, sizeof(g711_dec_config_32));
+
+		if (copy_from_user(&g711_dec_config_32, (void *)arg,
+			sizeof(g711_dec_config_32))) {
+			pr_err("%s: copy_from_user for AUDIO_SET_G711_DEC_CONFIG_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+
+		g711_dec_config =
+			(struct msm_audio_g711_dec_config *)audio->codec_cfg;
+		g711_dec_config->sample_rate = g711_dec_config_32.sample_rate;
+
+		break;
+	}
+	default: {
+		rc = audio->codec_compat_ioctl(file, cmd, arg);
+		if (rc)
+			pr_err("%s: Failed in audio_aio_compat_ioctl: %d cmd=%d\n",
+				__func__, rc, cmd);
+		break;
+	}
+	}
+	return rc;
+}
+#else
+#define audio_compat_ioctl NULL
+#endif
+
+static int audio_open(struct inode *inode, struct file *file)
+{
+	struct q6audio_aio *audio = NULL;
+	int rc = 0;
+	/* 4 bytes represents decoder number, 1 byte for terminate string */
+	char name[sizeof "msm_g711_" + 5];
+
+	audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL);
+
+	if (!audio)
+		return -ENOMEM;
+	audio->codec_cfg = kzalloc(sizeof(struct msm_audio_g711_dec_config),
+					GFP_KERNEL);
+	if (!audio->codec_cfg) {
+		kfree(audio);
+		return -ENOMEM;
+	}
+
+	audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN;
+	audio->miscdevice = &audio_g711alaw_misc;
+	audio->wakelock_voted = false;
+	audio->audio_ws_mgr = &audio_g711_ws_mgr;
+
+	init_waitqueue_head(&audio->event_wait);
+
+	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb,
+					     (void *)audio);
+
+	if (!audio->ac) {
+		pr_err("%s: Could not allocate memory for audio client\n",
+					 __func__);
+		kfree(audio->codec_cfg);
+		kfree(audio);
+		return -ENOMEM;
+	}
+	rc = audio_aio_open(audio, file);
+	if (rc < 0) {
+		pr_err("%s: audio_aio_open rc=%d\n",
+			__func__, rc);
+		goto fail;
+	}
+	/* open in T/NT mode */ /*foramt:G711_ALAW*/
+	if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) {
+		rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM,
+					   FORMAT_G711_ALAW_FS);
+		if (rc < 0) {
+			pr_err("%s: NT mode Open failed rc=%d\n", __func__, rc);
+			goto fail;
+		}
+		audio->feedback = NON_TUNNEL_MODE;
+		/* open G711 decoder, expected frames is always 1*/
+		audio->buf_cfg.frames_per_buf = 0x01;
+		audio->buf_cfg.meta_info_enable = 0x01;
+	} else if ((file->f_mode & FMODE_WRITE) &&
+			!(file->f_mode & FMODE_READ)) {
+		rc = q6asm_open_write(audio->ac, FORMAT_G711_ALAW_FS);
+		if (rc < 0) {
+			pr_err("%s: T mode Open failed rc=%d\n", __func__, rc);
+			goto fail;
+		}
+		audio->feedback = TUNNEL_MODE;
+		audio->buf_cfg.meta_info_enable = 0x00;
+	} else {
+		pr_err("%s: %d mode is not supported mode\n",
+				__func__, file->f_mode);
+		rc = -EACCES;
+		goto fail;
+	}
+
+	snprintf(name, sizeof(name), "msm_g711_%04x", audio->ac->session);
+	audio->dentry = config_debugfs_create_file(name, (void *)audio);
+
+	if (IS_ERR_OR_NULL(audio->dentry))
+		pr_debug("%s: debugfs_create_file failed\n", __func__);
+	pr_debug("%s: g711dec success mode[%d]session[%d]\n", __func__,
+						audio->feedback,
+						audio->ac->session);
+	return rc;
+fail:
+	q6asm_audio_client_free(audio->ac);
+	kfree(audio->codec_cfg);
+	kfree(audio);
+	return rc;
+}
+
+static int g711_channel_map(u8 *channel_mapping, uint32_t channels)
+{
+	u8 *lchannel_mapping;
+
+	lchannel_mapping = channel_mapping;
+	pr_debug("%s: channels passed: %d\n", __func__, channels);
+	if (channels == 1)  {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+	} else if (channels == 2) {
+		lchannel_mapping[0] = PCM_CHANNEL_FL;
+		lchannel_mapping[1] = PCM_CHANNEL_FR;
+	} else if (channels == 3) {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+		lchannel_mapping[1] = PCM_CHANNEL_FL;
+		lchannel_mapping[2] = PCM_CHANNEL_FR;
+	} else if (channels == 4) {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+		lchannel_mapping[1] = PCM_CHANNEL_FL;
+		lchannel_mapping[2] = PCM_CHANNEL_FR;
+		lchannel_mapping[3] = PCM_CHANNEL_CS;
+	} else if (channels == 5) {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+		lchannel_mapping[1] = PCM_CHANNEL_FL;
+		lchannel_mapping[2] = PCM_CHANNEL_FR;
+		lchannel_mapping[3] = PCM_CHANNEL_LS;
+		lchannel_mapping[4] = PCM_CHANNEL_RS;
+	} else if (channels == 6) {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+		lchannel_mapping[1] = PCM_CHANNEL_FL;
+		lchannel_mapping[2] = PCM_CHANNEL_FR;
+		lchannel_mapping[3] = PCM_CHANNEL_LS;
+		lchannel_mapping[4] = PCM_CHANNEL_RS;
+		lchannel_mapping[5] = PCM_CHANNEL_LFE;
+	} else if (channels == 7) {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+		lchannel_mapping[1] = PCM_CHANNEL_FL;
+		lchannel_mapping[2] = PCM_CHANNEL_FR;
+		lchannel_mapping[3] = PCM_CHANNEL_LS;
+		lchannel_mapping[4] = PCM_CHANNEL_RS;
+		lchannel_mapping[5] = PCM_CHANNEL_CS;
+		lchannel_mapping[6] = PCM_CHANNEL_LFE;
+	} else if (channels == 8) {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+		lchannel_mapping[1] = PCM_CHANNEL_FLC;
+		lchannel_mapping[2] = PCM_CHANNEL_FRC;
+		lchannel_mapping[3] = PCM_CHANNEL_FL;
+		lchannel_mapping[4] = PCM_CHANNEL_FR;
+		lchannel_mapping[5] = PCM_CHANNEL_LS;
+		lchannel_mapping[6] = PCM_CHANNEL_RS;
+		lchannel_mapping[7] = PCM_CHANNEL_LFE;
+	} else {
+		pr_err("%s: ERROR.unsupported num_ch = %u\n",
+				__func__, channels);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static const struct file_operations audio_g711_fops = {
+	.owner = THIS_MODULE,
+	.open = audio_open,
+	.release = audio_aio_release,
+	.unlocked_ioctl = audio_ioctl,
+	.compat_ioctl = audio_compat_ioctl,
+	.fsync = audio_aio_fsync,
+};
+
+static struct miscdevice audio_g711alaw_misc = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "msm_g711alaw",
+	.fops = &audio_g711_fops,
+};
+
+static int __init audio_g711alaw_init(void)
+{
+	int ret = misc_register(&audio_g711alaw_misc);
+
+	if (ret == 0)
+		device_init_wakeup(audio_g711alaw_misc.this_device, true);
+	audio_g711_ws_mgr.ref_cnt = 0;
+	mutex_init(&audio_g711_ws_mgr.ws_lock);
+
+	return ret;
+}
+static void __exit audio_g711alaw_exit(void)
+{
+	misc_deregister(&audio_g711alaw_misc);
+	mutex_destroy(&audio_g711_ws_mgr.ws_lock);
+}
+
+device_initcall(audio_g711alaw_init);
+__exitcall(audio_g711alaw_exit);
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_g711mlaw.c linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_g711mlaw.c
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_g711mlaw.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_g711mlaw.c	2019-01-22 16:16:24.747257672 +0100
@@ -0,0 +1,396 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 and
+* only version 2 as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+*/
+
+#include <linux/types.h>
+#include <linux/msm_audio_g711_dec.h>
+#include <linux/compat.h>
+#include "audio_utils_aio.h"
+
+static struct miscdevice audio_g711mlaw_misc;
+static struct ws_mgr audio_g711_ws_mgr;
+
+static const struct file_operations audio_g711_debug_fops = {
+	.read = audio_aio_debug_read,
+	.open = audio_aio_debug_open,
+};
+
+static struct dentry *config_debugfs_create_file(const char *name, void *data)
+{
+	return debugfs_create_file(name, S_IFREG | S_IRUGO,
+				NULL, (void *)data, &audio_g711_debug_fops);
+}
+
+static int g711_channel_map(u8 *channel_mapping, uint32_t channels);
+
+static long audio_ioctl_shared(struct file *file, unsigned int cmd,
+						void *arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		struct asm_g711_dec_cfg g711_dec_cfg;
+		struct msm_audio_g711_dec_config *g711_dec_config;
+		u8 channel_mapping[PCM_FORMAT_MAX_NUM_CHANNEL];
+
+		memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+		memset(&g711_dec_cfg, 0, sizeof(g711_dec_cfg));
+
+		if (g711_channel_map(channel_mapping,
+			audio->pcm_cfg.channel_count)) {
+			pr_err("%s: setting channel map failed %d\n",
+					__func__, audio->pcm_cfg.channel_count);
+		}
+
+		pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__,
+						audio, audio->ac->session);
+		if (audio->feedback == NON_TUNNEL_MODE) {
+			/* Configure PCM output block */
+			rc = q6asm_enc_cfg_blk_pcm_v2(audio->ac,
+					audio->pcm_cfg.sample_rate,
+					audio->pcm_cfg.channel_count,
+					16, /*bits per sample*/
+					false, false, channel_mapping);
+			if (rc < 0) {
+				pr_err("%s: pcm output block config failed rc=%d\n",
+						 __func__, rc);
+				break;
+			}
+		}
+		g711_dec_config =
+			(struct msm_audio_g711_dec_config *)audio->codec_cfg;
+		g711_dec_cfg.sample_rate = g711_dec_config->sample_rate;
+		/* Configure Media format block */
+		rc = q6asm_media_format_block_g711(audio->ac, &g711_dec_cfg,
+							audio->ac->stream_id);
+		if (rc < 0) {
+			pr_err("%s: cmd media format block failed rc=%d\n",
+				__func__, rc);
+			break;
+		}
+		rc = audio_aio_enable(audio);
+		audio->eos_rsp = 0;
+		audio->eos_flag = 0;
+		if (!rc) {
+			audio->enabled = 1;
+		} else {
+			audio->enabled = 0;
+			pr_err("%s: Audio Start procedure failed rc=%d\n",
+						__func__, rc);
+			break;
+		}
+		pr_debug("%s: AUDIO_START success enable[%d]\n",
+						__func__, audio->enabled);
+		if (audio->stopped == 1)
+			audio->stopped = 0;
+		break;
+	}
+	default:
+		pr_debug("%s: Unknown ioctl cmd = %d", __func__, cmd);
+		break;
+	}
+	return rc;
+}
+
+static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		rc = audio_ioctl_shared(file, cmd, (void *)arg);
+		break;
+	}
+	case AUDIO_GET_G711_DEC_CONFIG: {
+		if (copy_to_user((void *)arg, audio->codec_cfg,
+			sizeof(struct msm_audio_g711_dec_config))) {
+			pr_err("%s: AUDIO_GET_G711_DEC_CONFIG failed\n",
+				__func__);
+			rc = -EFAULT;
+		}
+		break;
+	}
+	case AUDIO_SET_G711_DEC_CONFIG: {
+		if (copy_from_user(audio->codec_cfg, (void *)arg,
+			sizeof(struct msm_audio_g711_dec_config))) {
+			pr_err("%s: AUDIO_SET_G711_DEC_CONFIG failed\n",
+				__func__);
+			rc = -EFAULT;
+		}
+		break;
+	}
+	default: {
+		rc = audio->codec_ioctl(file, cmd, arg);
+		if (rc)
+			pr_err("%s: Failed in audio_aio_ioctl: %d cmd=%d\n",
+				__func__, rc, cmd);
+		break;
+	}
+	}
+	return  rc;
+}
+
+#ifdef CONFIG_COMPAT
+struct msm_audio_g711_dec_config_32 {
+	u32 sample_rate;
+};
+
+enum {
+	AUDIO_SET_G711_DEC_CONFIG_32 =  _IOW(AUDIO_IOCTL_MAGIC,
+	(AUDIO_MAX_COMMON_IOCTL_NUM+0), struct msm_audio_g711_dec_config_32),
+	AUDIO_GET_G711_DEC_CONFIG_32 =  _IOR(AUDIO_IOCTL_MAGIC,
+	(AUDIO_MAX_COMMON_IOCTL_NUM+1), struct msm_audio_g711_dec_config_32)
+};
+
+static long audio_compat_ioctl(struct file *file, unsigned int cmd,
+						unsigned long arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		rc = audio_ioctl_shared(file, cmd, (void *)arg);
+		break;
+	}
+	case AUDIO_GET_G711_DEC_CONFIG_32: {
+		struct msm_audio_g711_dec_config *g711_dec_config;
+		struct msm_audio_g711_dec_config_32 g711_dec_config_32;
+
+		memset(&g711_dec_config_32, 0, sizeof(g711_dec_config_32));
+
+		g711_dec_config =
+			(struct msm_audio_g711_dec_config *)audio->codec_cfg;
+		g711_dec_config_32.sample_rate = g711_dec_config->sample_rate;
+
+		if (copy_to_user((void *)arg, &g711_dec_config_32,
+			sizeof(g711_dec_config_32))) {
+			pr_err("%s: copy_to_user for AUDIO_GET_G711_DEC_CONFIG failed\n",
+				 __func__);
+			rc = -EFAULT;
+		}
+		break;
+	}
+	case AUDIO_SET_G711_DEC_CONFIG_32: {
+		struct msm_audio_g711_dec_config *g711_dec_config;
+		struct msm_audio_g711_dec_config_32 g711_dec_config_32;
+
+		memset(&g711_dec_config_32, 0, sizeof(g711_dec_config_32));
+
+		if (copy_from_user(&g711_dec_config_32, (void *)arg,
+			sizeof(g711_dec_config_32))) {
+			pr_err("%s: copy_from_user for AUDIO_SET_G711_DEC_CONFIG failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		g711_dec_config =
+			(struct msm_audio_g711_dec_config *)audio->codec_cfg;
+		g711_dec_config->sample_rate = g711_dec_config_32.sample_rate;
+
+		break;
+	}
+	default: {
+		rc = audio->codec_compat_ioctl(file, cmd, arg);
+		if (rc)
+			pr_err("%s: Failed in audio_aio_compat_ioctl: %d cmd=%d\n",
+				__func__, rc, cmd);
+		break;
+	}
+	}
+	return rc;
+}
+#else
+#define audio_compat_ioctl NULL
+#endif
+
+static int audio_open(struct inode *inode, struct file *file)
+{
+	struct q6audio_aio *audio = NULL;
+	int rc = 0;
+	/* 4 bytes represents decoder number, 1 byte for terminate string */
+	char name[sizeof "msm_g711_" + 5];
+
+	audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL);
+
+	if (!audio)
+		return -ENOMEM;
+	audio->codec_cfg = kzalloc(sizeof(struct msm_audio_g711_dec_config),
+					GFP_KERNEL);
+	if (!audio->codec_cfg) {
+		kfree(audio);
+		return -ENOMEM;
+	}
+
+	audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN;
+	audio->miscdevice = &audio_g711mlaw_misc;
+	audio->wakelock_voted = false;
+	audio->audio_ws_mgr = &audio_g711_ws_mgr;
+
+	init_waitqueue_head(&audio->event_wait);
+
+	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb,
+					     (void *)audio);
+
+	if (!audio->ac) {
+		pr_err("%s: Could not allocate memory for audio client\n",
+					__func__);
+		kfree(audio->codec_cfg);
+		kfree(audio);
+		return -ENOMEM;
+	}
+	rc = audio_aio_open(audio, file);
+	if (rc < 0) {
+		pr_err("%s: audio_aio_open rc=%d\n",
+			__func__, rc);
+		goto fail;
+	}
+	/* open in T/NT mode */ /*foramt:G711_ALAW*/
+	if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) {
+		rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM,
+					   FORMAT_G711_MLAW_FS);
+		if (rc < 0) {
+			pr_err("%s: NT mode Open failed rc=%d\n", __func__, rc);
+			goto fail;
+		}
+		audio->feedback = NON_TUNNEL_MODE;
+		/* open G711 decoder, expected frames is always 1*/
+		audio->buf_cfg.frames_per_buf = 0x01;
+		audio->buf_cfg.meta_info_enable = 0x01;
+	} else if ((file->f_mode & FMODE_WRITE) &&
+			!(file->f_mode & FMODE_READ)) {
+		rc = q6asm_open_write(audio->ac, FORMAT_G711_MLAW_FS);
+		if (rc < 0) {
+			pr_err("%s: T mode Open failed rc=%d\n", __func__, rc);
+			goto fail;
+		}
+		audio->feedback = TUNNEL_MODE;
+		audio->buf_cfg.meta_info_enable = 0x00;
+	} else {
+		pr_err("%s: %d mode is not supported\n", __func__,
+					file->f_mode);
+		rc = -EACCES;
+		goto fail;
+	}
+
+	snprintf(name, sizeof(name), "msm_g711_%04x", audio->ac->session);
+	audio->dentry = config_debugfs_create_file(name, (void *)audio);
+
+	if (IS_ERR_OR_NULL(audio->dentry))
+		pr_debug("%s: debugfs_create_file failed\n", __func__);
+	pr_debug("%s: g711dec success mode[%d]session[%d]\n", __func__,
+						audio->feedback,
+						audio->ac->session);
+	return rc;
+fail:
+	q6asm_audio_client_free(audio->ac);
+	kfree(audio->codec_cfg);
+	kfree(audio);
+	return rc;
+}
+
+static int g711_channel_map(u8 *channel_mapping, uint32_t channels)
+{
+	u8 *lchannel_mapping;
+
+	lchannel_mapping = channel_mapping;
+	pr_debug("%s: channels passed: %d\n", __func__, channels);
+	if (channels == 1)  {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+	} else if (channels == 2) {
+		lchannel_mapping[0] = PCM_CHANNEL_FL;
+		lchannel_mapping[1] = PCM_CHANNEL_FR;
+	} else if (channels == 3) {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+		lchannel_mapping[1] = PCM_CHANNEL_FL;
+		lchannel_mapping[2] = PCM_CHANNEL_FR;
+	} else if (channels == 4) {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+		lchannel_mapping[1] = PCM_CHANNEL_FL;
+		lchannel_mapping[2] = PCM_CHANNEL_FR;
+		lchannel_mapping[3] = PCM_CHANNEL_CS;
+	} else if (channels == 5) {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+		lchannel_mapping[1] = PCM_CHANNEL_FL;
+		lchannel_mapping[2] = PCM_CHANNEL_FR;
+		lchannel_mapping[3] = PCM_CHANNEL_LS;
+		lchannel_mapping[4] = PCM_CHANNEL_RS;
+	} else if (channels == 6) {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+		lchannel_mapping[1] = PCM_CHANNEL_FL;
+		lchannel_mapping[2] = PCM_CHANNEL_FR;
+		lchannel_mapping[3] = PCM_CHANNEL_LS;
+		lchannel_mapping[4] = PCM_CHANNEL_RS;
+		lchannel_mapping[5] = PCM_CHANNEL_LFE;
+	} else if (channels == 7) {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+		lchannel_mapping[1] = PCM_CHANNEL_FL;
+		lchannel_mapping[2] = PCM_CHANNEL_FR;
+		lchannel_mapping[3] = PCM_CHANNEL_LS;
+		lchannel_mapping[4] = PCM_CHANNEL_RS;
+		lchannel_mapping[5] = PCM_CHANNEL_CS;
+		lchannel_mapping[6] = PCM_CHANNEL_LFE;
+	} else if (channels == 8) {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+		lchannel_mapping[1] = PCM_CHANNEL_FLC;
+		lchannel_mapping[2] = PCM_CHANNEL_FRC;
+		lchannel_mapping[3] = PCM_CHANNEL_FL;
+		lchannel_mapping[4] = PCM_CHANNEL_FR;
+		lchannel_mapping[5] = PCM_CHANNEL_LS;
+		lchannel_mapping[6] = PCM_CHANNEL_RS;
+		lchannel_mapping[7] = PCM_CHANNEL_LFE;
+	} else {
+		pr_err("%s: ERROR.unsupported num_ch = %u\n",
+				__func__, channels);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static const struct file_operations audio_g711_fops = {
+	.owner = THIS_MODULE,
+	.open = audio_open,
+	.release = audio_aio_release,
+	.unlocked_ioctl = audio_ioctl,
+	.compat_ioctl = audio_compat_ioctl,
+	.fsync = audio_aio_fsync,
+};
+
+static struct miscdevice audio_g711mlaw_misc = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "msm_g711mlaw",
+	.fops = &audio_g711_fops,
+};
+
+static int __init audio_g711mlaw_init(void)
+{
+	int ret = misc_register(&audio_g711mlaw_misc);
+
+	if (ret == 0)
+		device_init_wakeup(audio_g711mlaw_misc.this_device, true);
+	audio_g711_ws_mgr.ref_cnt = 0;
+	mutex_init(&audio_g711_ws_mgr.ws_lock);
+
+	return ret;
+}
+
+static void __exit audio_g711mlaw_exit(void)
+{
+	misc_deregister(&audio_g711mlaw_misc);
+	mutex_destroy(&audio_g711_ws_mgr.ws_lock);
+}
+
+device_initcall(audio_g711mlaw_init);
+__exitcall(audio_g711mlaw_exit);
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_hwacc_effects.c linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_hwacc_effects.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c	2019-01-22 16:16:24.747257672 +0100
@@ -0,0 +1,776 @@
+/*
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/msm_audio.h>
+#include <linux/compat.h>
+#include "q6audio_common.h"
+#include "audio_utils_aio.h"
+#include <sound/msm-audio-effects-q6-v2.h>
+
+#define MAX_CHANNELS_SUPPORTED		8
+#define WAIT_TIMEDOUT_DURATION_SECS	1
+
+struct q6audio_effects {
+	wait_queue_head_t		read_wait;
+	wait_queue_head_t		write_wait;
+
+	struct audio_client             *ac;
+	struct msm_hwacc_effects_config  config;
+
+	struct mutex			lock;
+
+	atomic_t			in_count;
+	atomic_t			out_count;
+
+	int				opened;
+	int				started;
+	int				buf_alloc;
+	struct msm_nt_eff_all_config audio_effects;
+};
+
+static void audio_effects_init_pp(struct audio_client *ac)
+{
+	int ret = 0;
+	struct asm_softvolume_params softvol = {
+		.period = SOFT_VOLUME_PERIOD,
+		.step = SOFT_VOLUME_STEP,
+		.rampingcurve = SOFT_VOLUME_CURVE_LINEAR,
+	};
+
+	if (!ac) {
+		pr_err("%s: audio client null to init pp\n", __func__);
+		return;
+	}
+	ret = q6asm_set_softvolume_v2(ac, &softvol,
+				      SOFT_VOLUME_INSTANCE_1);
+	if (ret < 0)
+		pr_err("%s: Send SoftVolume Param failed ret=%d\n",
+			__func__, ret);
+}
+
+static void audio_effects_deinit_pp(struct audio_client *ac)
+{
+	if (!ac) {
+		pr_err("%s: audio client null to deinit pp\n", __func__);
+		return;
+	}
+}
+
+static void audio_effects_event_handler(uint32_t opcode, uint32_t token,
+				 uint32_t *payload,  void *priv)
+{
+	struct q6audio_effects *effects;
+
+	if (!payload || !priv) {
+		pr_err("%s: invalid data to handle events, payload: %pK, priv: %pK\n",
+			__func__, payload, priv);
+		return;
+	}
+
+	effects = (struct q6audio_effects *)priv;
+	switch (opcode) {
+	case ASM_DATA_EVENT_WRITE_DONE_V2: {
+		atomic_inc(&effects->out_count);
+		wake_up(&effects->write_wait);
+		break;
+	}
+	case ASM_DATA_EVENT_READ_DONE_V2: {
+		atomic_inc(&effects->in_count);
+		wake_up(&effects->read_wait);
+		break;
+	}
+	case APR_BASIC_RSP_RESULT: {
+		pr_debug("%s: APR_BASIC_RSP_RESULT Cmd[0x%x] Status[0x%x]\n",
+			 __func__, payload[0], payload[1]);
+		switch (payload[0]) {
+		case ASM_SESSION_CMD_RUN_V2:
+			pr_debug("ASM_SESSION_CMD_RUN_V2\n");
+			break;
+		default:
+			pr_debug("%s: Payload = [0x%x] stat[0x%x]\n",
+				 __func__, payload[0], payload[1]);
+			break;
+		}
+		break;
+	}
+	default:
+		pr_debug("%s: Unhandled Event 0x%x token = 0x%x\n",
+			 __func__, opcode, token);
+		break;
+	}
+}
+
+static int audio_effects_shared_ioctl(struct file *file, unsigned cmd,
+				      unsigned long arg)
+{
+	struct q6audio_effects *effects = file->private_data;
+	int rc = 0;
+	switch (cmd) {
+	case AUDIO_START: {
+		pr_debug("%s: AUDIO_START\n", __func__);
+
+		mutex_lock(&effects->lock);
+
+		rc = q6asm_open_read_write_v2(effects->ac,
+					FORMAT_LINEAR_PCM,
+					FORMAT_MULTI_CHANNEL_LINEAR_PCM,
+					effects->config.meta_mode_enabled,
+					effects->config.output.bits_per_sample,
+					true /*overwrite topology*/,
+					ASM_STREAM_POSTPROC_TOPO_ID_HPX_MASTER);
+		if (rc < 0) {
+			pr_err("%s: Open failed for hw accelerated effects:rc=%d\n",
+				__func__, rc);
+			rc = -EINVAL;
+			mutex_unlock(&effects->lock);
+			goto ioctl_fail;
+		}
+		effects->opened = 1;
+
+		pr_debug("%s: dec buf size: %d, num_buf: %d, enc buf size: %d, num_buf: %d\n",
+			 __func__, effects->config.output.buf_size,
+			 effects->config.output.num_buf,
+			 effects->config.input.buf_size,
+			 effects->config.input.num_buf);
+		rc = q6asm_audio_client_buf_alloc_contiguous(IN, effects->ac,
+					effects->config.output.buf_size,
+					effects->config.output.num_buf);
+		if (rc < 0) {
+			pr_err("%s: Write buffer Allocation failed rc = %d\n",
+				__func__, rc);
+			rc = -ENOMEM;
+			mutex_unlock(&effects->lock);
+			goto ioctl_fail;
+		}
+		atomic_set(&effects->in_count, effects->config.input.num_buf);
+		rc = q6asm_audio_client_buf_alloc_contiguous(OUT, effects->ac,
+					effects->config.input.buf_size,
+					effects->config.input.num_buf);
+		if (rc < 0) {
+			pr_err("%s: Read buffer Allocation failed rc = %d\n",
+				__func__, rc);
+			rc = -ENOMEM;
+			goto readbuf_fail;
+		}
+		atomic_set(&effects->out_count, effects->config.output.num_buf);
+		effects->buf_alloc = 1;
+
+		pr_debug("%s: enc: sample_rate: %d, num_channels: %d\n",
+			 __func__, effects->config.input.sample_rate,
+			effects->config.input.num_channels);
+		rc = q6asm_enc_cfg_blk_pcm(effects->ac,
+					   effects->config.input.sample_rate,
+					   effects->config.input.num_channels);
+		if (rc < 0) {
+			pr_err("%s: pcm read block config failed\n", __func__);
+			rc = -EINVAL;
+			goto cfg_fail;
+		}
+		pr_debug("%s: dec: sample_rate: %d, num_channels: %d, bit_width: %d\n",
+			 __func__, effects->config.output.sample_rate,
+			effects->config.output.num_channels,
+			effects->config.output.bits_per_sample);
+		rc = q6asm_media_format_block_pcm_format_support(
+				effects->ac, effects->config.output.sample_rate,
+				effects->config.output.num_channels,
+				effects->config.output.bits_per_sample);
+		if (rc < 0) {
+			pr_err("%s: pcm write format block config failed\n",
+				__func__);
+			rc = -EINVAL;
+			goto cfg_fail;
+		}
+
+		audio_effects_init_pp(effects->ac);
+
+		rc = q6asm_run(effects->ac, 0x00, 0x00, 0x00);
+		if (!rc)
+			effects->started = 1;
+		else {
+			effects->started = 0;
+			pr_err("%s: ASM run state failed\n", __func__);
+		}
+		mutex_unlock(&effects->lock);
+		break;
+	}
+	case AUDIO_EFFECTS_WRITE: {
+		char *bufptr = NULL;
+		uint32_t idx = 0;
+		uint32_t size = 0;
+
+		mutex_lock(&effects->lock);
+
+		if (!effects->started) {
+			rc = -EFAULT;
+			mutex_unlock(&effects->lock);
+			goto ioctl_fail;
+		}
+
+		rc = wait_event_timeout(effects->write_wait,
+					atomic_read(&effects->out_count),
+					WAIT_TIMEDOUT_DURATION_SECS * HZ);
+		if (!rc) {
+			pr_err("%s: write wait_event_timeout\n", __func__);
+			rc = -EFAULT;
+			 mutex_unlock(&effects->lock);
+			goto ioctl_fail;
+		}
+		if (!atomic_read(&effects->out_count)) {
+			pr_err("%s: pcm stopped out_count 0\n", __func__);
+			rc = -EFAULT;
+			mutex_unlock(&effects->lock);
+			goto ioctl_fail;
+		}
+
+		bufptr = q6asm_is_cpu_buf_avail(IN, effects->ac, &size, &idx);
+		if (bufptr) {
+			if ((effects->config.buf_cfg.output_len > size) ||
+				copy_from_user(bufptr, (void *)arg,
+					effects->config.buf_cfg.output_len)) {
+				rc = -EFAULT;
+				mutex_unlock(&effects->lock);
+				goto ioctl_fail;
+			}
+			rc = q6asm_write(effects->ac,
+					 effects->config.buf_cfg.output_len,
+					 0, 0, NO_TIMESTAMP);
+			if (rc < 0) {
+				rc = -EFAULT;
+				mutex_unlock(&effects->lock);
+				goto ioctl_fail;
+			}
+			atomic_dec(&effects->out_count);
+		} else {
+			pr_err("%s: AUDIO_EFFECTS_WRITE: Buffer dropped\n",
+				__func__);
+		}
+		mutex_unlock(&effects->lock);
+		break;
+	}
+	case AUDIO_EFFECTS_READ: {
+		char *bufptr = NULL;
+		uint32_t idx = 0;
+		uint32_t size = 0;
+
+		mutex_lock(&effects->lock);
+
+		if (!effects->started) {
+			rc = -EFAULT;
+			mutex_unlock(&effects->lock);
+			goto ioctl_fail;
+		}
+
+		atomic_set(&effects->in_count, 0);
+
+		q6asm_read_v2(effects->ac, effects->config.buf_cfg.input_len);
+		/* Read might fail initially, don't error out */
+		if (rc < 0)
+			pr_err("%s: read failed\n", __func__);
+
+		rc = wait_event_timeout(effects->read_wait,
+					atomic_read(&effects->in_count),
+					WAIT_TIMEDOUT_DURATION_SECS * HZ);
+		if (!rc) {
+			pr_err("%s: read wait_event_timeout\n", __func__);
+			rc = -EFAULT;
+			mutex_unlock(&effects->lock);
+			goto ioctl_fail;
+		}
+		if (!atomic_read(&effects->in_count)) {
+			pr_err("%s: pcm stopped in_count 0\n", __func__);
+			rc = -EFAULT;
+			mutex_unlock(&effects->lock);
+			goto ioctl_fail;
+		}
+
+		bufptr = q6asm_is_cpu_buf_avail(OUT, effects->ac, &size, &idx);
+		if (bufptr) {
+			if (!((void *)arg)) {
+				rc = -EFAULT;
+				mutex_unlock(&effects->lock);
+				goto ioctl_fail;
+			}
+			if ((effects->config.buf_cfg.input_len > size) ||
+				copy_to_user((void *)arg, bufptr,
+					  effects->config.buf_cfg.input_len)) {
+				rc = -EFAULT;
+				mutex_unlock(&effects->lock);
+				goto ioctl_fail;
+			}
+		}
+		mutex_unlock(&effects->lock);
+		break;
+	}
+	default:
+		pr_err("%s: Invalid effects config module\n", __func__);
+		rc = -EINVAL;
+		break;
+	}
+ioctl_fail:
+	return rc;
+readbuf_fail:
+	q6asm_audio_client_buf_free_contiguous(IN,
+					effects->ac);
+	mutex_unlock(&effects->lock);
+	return rc;
+cfg_fail:
+	q6asm_audio_client_buf_free_contiguous(IN,
+					effects->ac);
+	q6asm_audio_client_buf_free_contiguous(OUT,
+					effects->ac);
+	effects->buf_alloc = 0;
+	mutex_unlock(&effects->lock);
+	return rc;
+}
+
+static long audio_effects_set_pp_param(struct q6audio_effects *effects,
+				long *values)
+{
+	int rc = 0;
+	int effects_module = values[0];
+	switch (effects_module) {
+	case VIRTUALIZER_MODULE:
+		pr_debug("%s: VIRTUALIZER_MODULE\n", __func__);
+		if (msm_audio_effects_is_effmodule_supp_in_top(
+			effects_module, effects->ac->topology))
+			msm_audio_effects_virtualizer_handler(
+				effects->ac,
+				&(effects->audio_effects.virtualizer),
+				(long *)&values[1]);
+		break;
+	case REVERB_MODULE:
+		pr_debug("%s: REVERB_MODULE\n", __func__);
+		if (msm_audio_effects_is_effmodule_supp_in_top(
+			effects_module, effects->ac->topology))
+			msm_audio_effects_reverb_handler(effects->ac,
+				 &(effects->audio_effects.reverb),
+				 (long *)&values[1]);
+		break;
+	case BASS_BOOST_MODULE:
+		pr_debug("%s: BASS_BOOST_MODULE\n", __func__);
+		if (msm_audio_effects_is_effmodule_supp_in_top(
+			effects_module, effects->ac->topology))
+			msm_audio_effects_bass_boost_handler(
+				effects->ac,
+				&(effects->audio_effects.bass_boost),
+				(long *)&values[1]);
+		break;
+	case PBE_MODULE:
+		pr_debug("%s: PBE_MODULE\n", __func__);
+		if (msm_audio_effects_is_effmodule_supp_in_top(
+			effects_module, effects->ac->topology))
+			msm_audio_effects_pbe_handler(
+				effects->ac,
+				&(effects->audio_effects.pbe),
+				(long *)&values[1]);
+		break;
+	case EQ_MODULE:
+		pr_debug("%s: EQ_MODULE\n", __func__);
+		if (msm_audio_effects_is_effmodule_supp_in_top(
+			effects_module, effects->ac->topology))
+			msm_audio_effects_popless_eq_handler(
+				effects->ac,
+				&(effects->audio_effects.equalizer),
+				(long *)&values[1]);
+		break;
+	case SOFT_VOLUME_MODULE:
+		pr_debug("%s: SA PLUS VOLUME_MODULE\n", __func__);
+		msm_audio_effects_volume_handler_v2(effects->ac,
+				&(effects->audio_effects.saplus_vol),
+				(long *)&values[1], SOFT_VOLUME_INSTANCE_1);
+		break;
+	case SOFT_VOLUME2_MODULE:
+		pr_debug("%s: TOPOLOGY SWITCH VOLUME MODULE\n",
+			 __func__);
+		if (msm_audio_effects_is_effmodule_supp_in_top(
+			effects_module, effects->ac->topology))
+			msm_audio_effects_volume_handler_v2(effects->ac,
+			      &(effects->audio_effects.topo_switch_vol),
+			      (long *)&values[1], SOFT_VOLUME_INSTANCE_2);
+		break;
+	default:
+		pr_err("%s: Invalid effects config module\n", __func__);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+static long audio_effects_ioctl(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	struct q6audio_effects *effects = file->private_data;
+	int rc = 0;
+	long argvalues[MAX_PP_PARAMS_SZ] = {0};
+
+	switch (cmd) {
+	case AUDIO_SET_EFFECTS_CONFIG: {
+		pr_debug("%s: AUDIO_SET_EFFECTS_CONFIG\n", __func__);
+		mutex_lock(&effects->lock);
+		memset(&effects->config, 0, sizeof(effects->config));
+		if (copy_from_user(&effects->config, (void *)arg,
+				   sizeof(effects->config))) {
+			pr_err("%s: copy from user for AUDIO_SET_EFFECTS_CONFIG failed\n",
+				__func__);
+			rc = -EFAULT;
+		}
+		pr_debug("%s: write buf_size: %d, num_buf: %d, sample_rate: %d, channel: %d\n",
+			 __func__, effects->config.output.buf_size,
+			 effects->config.output.num_buf,
+			 effects->config.output.sample_rate,
+			 effects->config.output.num_channels);
+		pr_debug("%s: read buf_size: %d, num_buf: %d, sample_rate: %d, channel: %d\n",
+			 __func__, effects->config.input.buf_size,
+			 effects->config.input.num_buf,
+			 effects->config.input.sample_rate,
+			 effects->config.input.num_channels);
+		mutex_unlock(&effects->lock);
+		break;
+	}
+	case AUDIO_EFFECTS_SET_BUF_LEN: {
+		mutex_lock(&effects->lock);
+		if (copy_from_user(&effects->config.buf_cfg, (void *)arg,
+				   sizeof(effects->config.buf_cfg))) {
+			pr_err("%s: copy from user for AUDIO_EFFECTS_SET_BUF_LEN failed\n",
+				__func__);
+			rc = -EFAULT;
+		}
+		pr_debug("%s: write buf len: %d, read buf len: %d\n",
+			 __func__, effects->config.buf_cfg.output_len,
+			 effects->config.buf_cfg.input_len);
+		mutex_unlock(&effects->lock);
+		break;
+	}
+	case AUDIO_EFFECTS_GET_BUF_AVAIL: {
+		struct msm_hwacc_buf_avail buf_avail;
+
+		buf_avail.input_num_avail = atomic_read(&effects->in_count);
+		buf_avail.output_num_avail = atomic_read(&effects->out_count);
+		mutex_lock(&effects->lock);
+		pr_debug("%s: write buf avail: %d, read buf avail: %d\n",
+			 __func__, buf_avail.output_num_avail,
+			 buf_avail.input_num_avail);
+		if (copy_to_user((void *)arg, &buf_avail,
+				   sizeof(buf_avail))) {
+			pr_err("%s: copy to user for AUDIO_EFFECTS_GET_NUM_BUF_AVAIL failed\n",
+				__func__);
+			rc = -EFAULT;
+		}
+		mutex_unlock(&effects->lock);
+		break;
+	}
+	case AUDIO_EFFECTS_SET_PP_PARAMS: {
+		mutex_lock(&effects->lock);
+		if (copy_from_user(argvalues, (void *)arg,
+				   MAX_PP_PARAMS_SZ*sizeof(long))) {
+			pr_err("%s: copy from user for pp params failed\n",
+				__func__);
+			mutex_unlock(&effects->lock);
+			return -EFAULT;
+		}
+		rc = audio_effects_set_pp_param(effects, argvalues);
+		mutex_unlock(&effects->lock);
+		break;
+	}
+	default:
+		pr_debug("%s: Calling shared ioctl\n", __func__);
+		rc = audio_effects_shared_ioctl(file, cmd, arg);
+		break;
+	}
+	if (rc)
+		pr_err("%s: cmd 0x%x failed\n", __func__, cmd);
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+struct msm_hwacc_data_config32 {
+	__u32 buf_size;
+	__u32 num_buf;
+	__u32 num_channels;
+	__u8 channel_map[MAX_CHANNELS_SUPPORTED];
+	__u32 sample_rate;
+	__u32 bits_per_sample;
+};
+
+struct msm_hwacc_buf_cfg32 {
+	__u32 input_len;
+	__u32 output_len;
+};
+
+struct msm_hwacc_buf_avail32 {
+	__u32 input_num_avail;
+	__u32 output_num_avail;
+};
+
+struct msm_hwacc_effects_config32 {
+	struct msm_hwacc_data_config32 input;
+	struct msm_hwacc_data_config32 output;
+	struct msm_hwacc_buf_cfg32 buf_cfg;
+	__u32 meta_mode_enabled;
+	__u32 overwrite_topology;
+	__s32 topology;
+};
+
+enum {
+	AUDIO_SET_EFFECTS_CONFIG32 = _IOW(AUDIO_IOCTL_MAGIC, 99,
+					  struct msm_hwacc_effects_config32),
+	AUDIO_EFFECTS_SET_BUF_LEN32 = _IOW(AUDIO_IOCTL_MAGIC, 100,
+					   struct msm_hwacc_buf_cfg32),
+	AUDIO_EFFECTS_GET_BUF_AVAIL32 = _IOW(AUDIO_IOCTL_MAGIC, 101,
+					     struct msm_hwacc_buf_avail32),
+	AUDIO_EFFECTS_WRITE32 = _IOW(AUDIO_IOCTL_MAGIC, 102, compat_uptr_t),
+	AUDIO_EFFECTS_READ32 = _IOWR(AUDIO_IOCTL_MAGIC, 103, compat_uptr_t),
+	AUDIO_EFFECTS_SET_PP_PARAMS32 = _IOW(AUDIO_IOCTL_MAGIC, 104,
+					   compat_uptr_t),
+	AUDIO_START32 = _IOW(AUDIO_IOCTL_MAGIC, 0, unsigned),
+};
+
+static long audio_effects_compat_ioctl(struct file *file, unsigned int cmd,
+					unsigned long arg)
+{
+	struct q6audio_effects *effects = file->private_data;
+	int rc = 0, i;
+
+	switch (cmd) {
+	case AUDIO_SET_EFFECTS_CONFIG32: {
+		struct msm_hwacc_effects_config32 config32;
+		struct msm_hwacc_effects_config *config = &effects->config;
+		mutex_lock(&effects->lock);
+		memset(&effects->config, 0, sizeof(effects->config));
+		if (copy_from_user(&config32, (void *)arg,
+				   sizeof(config32))) {
+			pr_err("%s: copy to user for AUDIO_SET_EFFECTS_CONFIG failed\n",
+				__func__);
+			rc = -EFAULT;
+			mutex_unlock(&effects->lock);
+			break;
+		}
+		config->input.buf_size = config32.input.buf_size;
+		config->input.num_buf = config32.input.num_buf;
+		config->input.num_channels = config32.input.num_channels;
+		config->input.sample_rate = config32.input.sample_rate;
+		config->input.bits_per_sample = config32.input.bits_per_sample;
+		config->input.buf_size = config32.input.buf_size;
+		for (i = 0; i < MAX_CHANNELS_SUPPORTED; i++)
+			config->input.channel_map[i] =
+						config32.input.channel_map[i];
+		config->output.buf_size = config32.output.buf_size;
+		config->output.num_buf = config32.output.num_buf;
+		config->output.num_channels = config32.output.num_channels;
+		config->output.sample_rate = config32.output.sample_rate;
+		config->output.bits_per_sample =
+					 config32.output.bits_per_sample;
+		config->output.buf_size = config32.output.buf_size;
+		for (i = 0; i < MAX_CHANNELS_SUPPORTED; i++)
+			config->output.channel_map[i] =
+						config32.output.channel_map[i];
+		config->buf_cfg.input_len = config32.buf_cfg.input_len;
+		config->buf_cfg.output_len = config32.buf_cfg.output_len;
+		config->meta_mode_enabled = config32.meta_mode_enabled;
+		config->overwrite_topology = config32.overwrite_topology;
+		config->topology = config32.topology;
+		pr_debug("%s: write buf_size: %d, num_buf: %d, sample_rate: %d, channels: %d\n",
+			 __func__, effects->config.output.buf_size,
+			 effects->config.output.num_buf,
+			 effects->config.output.sample_rate,
+			 effects->config.output.num_channels);
+		pr_debug("%s: read buf_size: %d, num_buf: %d, sample_rate: %d, channels: %d\n",
+			 __func__, effects->config.input.buf_size,
+			 effects->config.input.num_buf,
+			 effects->config.input.sample_rate,
+			 effects->config.input.num_channels);
+		mutex_unlock(&effects->lock);
+		break;
+	}
+	case AUDIO_EFFECTS_SET_BUF_LEN32: {
+		struct msm_hwacc_buf_cfg32 buf_cfg32;
+		struct msm_hwacc_effects_config *config = &effects->config;
+		mutex_lock(&effects->lock);
+		if (copy_from_user(&buf_cfg32, (void *)arg,
+				   sizeof(buf_cfg32))) {
+			pr_err("%s: copy from user for AUDIO_EFFECTS_SET_BUF_LEN failed\n",
+				__func__);
+			rc = -EFAULT;
+			mutex_unlock(&effects->lock);
+			break;
+		}
+		config->buf_cfg.input_len = buf_cfg32.input_len;
+		config->buf_cfg.output_len = buf_cfg32.output_len;
+		pr_debug("%s: write buf len: %d, read buf len: %d\n",
+			 __func__, effects->config.buf_cfg.output_len,
+			 effects->config.buf_cfg.input_len);
+		mutex_unlock(&effects->lock);
+		break;
+	}
+	case AUDIO_EFFECTS_GET_BUF_AVAIL32: {
+		struct msm_hwacc_buf_avail32 buf_avail;
+
+		memset(&buf_avail, 0, sizeof(buf_avail));
+
+		mutex_lock(&effects->lock);
+		buf_avail.input_num_avail = atomic_read(&effects->in_count);
+		buf_avail.output_num_avail = atomic_read(&effects->out_count);
+		pr_debug("%s: write buf avail: %d, read buf avail: %d\n",
+			 __func__, buf_avail.output_num_avail,
+			 buf_avail.input_num_avail);
+		if (copy_to_user((void *)arg, &buf_avail,
+				   sizeof(buf_avail))) {
+			pr_err("%s: copy to user for AUDIO_EFFECTS_GET_NUM_BUF_AVAIL failed\n",
+				__func__);
+			rc = -EFAULT;
+		}
+		mutex_unlock(&effects->lock);
+		break;
+	}
+	case AUDIO_EFFECTS_SET_PP_PARAMS32: {
+		long argvalues[MAX_PP_PARAMS_SZ] = {0};
+		int argvalues32[MAX_PP_PARAMS_SZ] = {0};
+
+		mutex_lock(&effects->lock);
+		if (copy_from_user(argvalues32, (void *)arg,
+				   MAX_PP_PARAMS_SZ*sizeof(int))) {
+			pr_err("%s: copy from user failed for pp params\n",
+				__func__);
+			mutex_unlock(&effects->lock);
+			return -EFAULT;
+		}
+		for (i = 0; i < MAX_PP_PARAMS_SZ; i++)
+			argvalues[i] = argvalues32[i];
+
+		rc = audio_effects_set_pp_param(effects, argvalues);
+		mutex_unlock(&effects->lock);
+		break;
+	}
+	case AUDIO_START32: {
+		rc = audio_effects_shared_ioctl(file, AUDIO_START, arg);
+		break;
+	}
+	case AUDIO_EFFECTS_WRITE32: {
+		rc = audio_effects_shared_ioctl(file, AUDIO_EFFECTS_WRITE, arg);
+		break;
+	}
+	case AUDIO_EFFECTS_READ32: {
+		rc = audio_effects_shared_ioctl(file, AUDIO_EFFECTS_READ, arg);
+		break;
+	}
+	default:
+		pr_debug("%s: unhandled ioctl\n", __func__);
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+#endif
+
+static int audio_effects_release(struct inode *inode, struct file *file)
+{
+	struct q6audio_effects *effects = file->private_data;
+	int rc = 0;
+	if (!effects) {
+		pr_err("%s: effect is NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (effects->opened) {
+		rc = wait_event_timeout(effects->write_wait,
+					atomic_read(&effects->out_count),
+					WAIT_TIMEDOUT_DURATION_SECS * HZ);
+		if (!rc)
+			pr_err("%s: write wait_event_timeout failed\n",
+				__func__);
+		rc = wait_event_timeout(effects->read_wait,
+					atomic_read(&effects->in_count),
+					WAIT_TIMEDOUT_DURATION_SECS * HZ);
+		if (!rc)
+			pr_err("%s: read wait_event_timeout failed\n",
+				__func__);
+		rc = q6asm_cmd(effects->ac, CMD_CLOSE);
+		if (rc < 0)
+			pr_err("%s[%pK]:Failed to close the session rc=%d\n",
+				__func__, effects, rc);
+		effects->opened = 0;
+		effects->started = 0;
+
+		audio_effects_deinit_pp(effects->ac);
+	}
+
+	if (effects->buf_alloc) {
+		q6asm_audio_client_buf_free_contiguous(IN, effects->ac);
+		q6asm_audio_client_buf_free_contiguous(OUT, effects->ac);
+	}
+	q6asm_audio_client_free(effects->ac);
+
+	mutex_destroy(&effects->lock);
+	kfree(effects);
+
+	pr_debug("%s: close session success\n", __func__);
+	return rc;
+}
+
+static int audio_effects_open(struct inode *inode, struct file *file)
+{
+	struct q6audio_effects *effects;
+	int rc = 0;
+
+	effects = kzalloc(sizeof(struct q6audio_effects), GFP_KERNEL);
+	if (!effects) {
+		pr_err("%s: Could not allocate memory for hw acc effects driver\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	effects->ac = q6asm_audio_client_alloc(
+					(app_cb)audio_effects_event_handler,
+					(void *)effects);
+	if (!effects->ac) {
+		pr_err("%s: Could not allocate memory for audio client\n",
+			__func__);
+		kfree(effects);
+		return -ENOMEM;
+	}
+
+	init_waitqueue_head(&effects->read_wait);
+	init_waitqueue_head(&effects->write_wait);
+	mutex_init(&effects->lock);
+
+	effects->opened = 0;
+	effects->started = 0;
+	effects->buf_alloc = 0;
+	file->private_data = effects;
+	pr_debug("%s: open session success\n", __func__);
+	return rc;
+}
+
+static const struct file_operations audio_effects_fops = {
+	.owner = THIS_MODULE,
+	.open = audio_effects_open,
+	.release = audio_effects_release,
+	.unlocked_ioctl = audio_effects_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = audio_effects_compat_ioctl,
+#endif
+};
+
+struct miscdevice audio_effects_misc = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "msm_hweffects",
+	.fops = &audio_effects_fops,
+};
+
+static int __init audio_effects_init(void)
+{
+	return misc_register(&audio_effects_misc);
+}
+
+device_initcall(audio_effects_init);
+MODULE_DESCRIPTION("Audio hardware accelerated effects driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_mp3.c linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_mp3.c
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_mp3.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_mp3.c	2019-01-22 16:16:24.747257672 +0100
@@ -0,0 +1,188 @@
+/* mp3 audio output device
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (C) 2008 HTC Corporation
+ * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "audio_utils_aio.h"
+
+static struct miscdevice audio_mp3_misc;
+static struct ws_mgr audio_mp3_ws_mgr;
+
+#ifdef CONFIG_DEBUG_FS
+static const struct file_operations audio_mp3_debug_fops = {
+	.read = audio_aio_debug_read,
+	.open = audio_aio_debug_open,
+};
+#endif
+
+static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+	switch (cmd) {
+	case AUDIO_START: {
+		pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__,
+						audio, audio->ac->session);
+		if (audio->feedback == NON_TUNNEL_MODE) {
+			/* Configure PCM output block */
+			rc = q6asm_enc_cfg_blk_pcm(audio->ac,
+					audio->pcm_cfg.sample_rate,
+					audio->pcm_cfg.channel_count);
+			if (rc < 0) {
+				pr_err("pcm output block config failed\n");
+				break;
+			}
+		}
+
+		rc = audio_aio_enable(audio);
+		audio->eos_rsp = 0;
+		audio->eos_flag = 0;
+		if (!rc) {
+			rc = enable_volume_ramp(audio);
+			if (rc < 0) {
+				pr_err("%s: Failed to enable volume ramp\n",
+					__func__);
+			}
+			audio->enabled = 1;
+		} else {
+			audio->enabled = 0;
+			pr_err("Audio Start procedure failed rc=%d\n", rc);
+			break;
+		}
+		pr_info("%s: AUDIO_START sessionid[%d]enable[%d]\n", __func__,
+						audio->ac->session,
+						audio->enabled);
+		if (audio->stopped == 1)
+			audio->stopped = 0;
+		break;
+	}
+	default:
+		pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio);
+		rc = audio->codec_ioctl(file, cmd, arg);
+	}
+	return rc;
+}
+
+static int audio_open(struct inode *inode, struct file *file)
+{
+	struct q6audio_aio *audio = NULL;
+	int rc = 0;
+
+#ifdef CONFIG_DEBUG_FS
+	/* 4 bytes represents decoder number, 1 byte for terminate string */
+	char name[sizeof "msm_mp3_" + 5];
+#endif
+	audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL);
+
+	if (audio == NULL) {
+		pr_err("Could not allocate memory for mp3 decode driver\n");
+		return -ENOMEM;
+	}
+
+	audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN;
+	audio->miscdevice = &audio_mp3_misc;
+	audio->wakelock_voted = false;
+	audio->audio_ws_mgr = &audio_mp3_ws_mgr;
+
+	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb,
+					     (void *)audio);
+
+	if (!audio->ac) {
+		pr_err("Could not allocate memory for audio client\n");
+		kfree(audio);
+		return -ENOMEM;
+	}
+	rc = audio_aio_open(audio, file);
+	if (rc < 0) {
+		pr_err("%s: audio_aio_open rc=%d\n",
+			__func__, rc);
+		goto fail;
+	}
+
+	/* open in T/NT mode */
+	if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) {
+		rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM,
+					   FORMAT_MP3);
+		if (rc < 0) {
+			pr_err("NT mode Open failed rc=%d\n", rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		audio->feedback = NON_TUNNEL_MODE;
+		/* open MP3 decoder, expected frames is always 1
+		audio->buf_cfg.frames_per_buf = 0x01;*/
+		audio->buf_cfg.meta_info_enable = 0x01;
+	} else if ((file->f_mode & FMODE_WRITE) &&
+			!(file->f_mode & FMODE_READ)) {
+		rc = q6asm_open_write(audio->ac, FORMAT_MP3);
+		if (rc < 0) {
+			pr_err("T mode Open failed rc=%d\n", rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		audio->feedback = TUNNEL_MODE;
+		audio->buf_cfg.meta_info_enable = 0x00;
+	} else {
+		pr_err("Not supported mode\n");
+		rc = -EACCES;
+		goto fail;
+	}
+
+#ifdef CONFIG_DEBUG_FS
+	snprintf(name, sizeof name, "msm_mp3_%04x", audio->ac->session);
+	audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
+					    NULL, (void *)audio,
+					    &audio_mp3_debug_fops);
+
+	if (IS_ERR(audio->dentry))
+		pr_debug("debugfs_create_file failed\n");
+#endif
+	pr_info("%s:mp3dec success mode[%d]session[%d]\n", __func__,
+						audio->feedback,
+						audio->ac->session);
+	return rc;
+fail:
+	q6asm_audio_client_free(audio->ac);
+	kfree(audio);
+	return rc;
+}
+
+static const struct file_operations audio_mp3_fops = {
+	.owner = THIS_MODULE,
+	.open = audio_open,
+	.release = audio_aio_release,
+	.unlocked_ioctl = audio_ioctl,
+	.fsync = audio_aio_fsync,
+};
+
+static struct miscdevice audio_mp3_misc = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "msm_mp3",
+	.fops = &audio_mp3_fops,
+};
+
+static int __init audio_mp3_init(void)
+{
+	int ret = misc_register(&audio_mp3_misc);
+
+	if (ret == 0)
+		device_init_wakeup(audio_mp3_misc.this_device, true);
+	audio_mp3_ws_mgr.ref_cnt = 0;
+	mutex_init(&audio_mp3_ws_mgr.ws_lock);
+
+	return ret;
+}
+
+device_initcall(audio_mp3_init);
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_multi_aac.c linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_multi_aac.c
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_multi_aac.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_multi_aac.c	2019-10-29 09:26:24.045207112 +0100
@@ -0,0 +1,521 @@
+/* aac audio output device
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (C) 2008 HTC Corporation
+ * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/msm_audio_aac.h>
+#include <linux/compat.h>
+#include <soc/qcom/socinfo.h>
+#include "audio_utils_aio.h"
+
+#define AUDIO_AAC_DUAL_MONO_INVALID -1
+
+
+/* Default number of pre-allocated event packets */
+#define PCM_BUFSZ_MIN_AACM	((8*1024) + sizeof(struct dec_meta_out))
+static struct miscdevice audio_multiaac_misc;
+static struct ws_mgr audio_multiaac_ws_mgr;
+
+#ifdef CONFIG_DEBUG_FS
+static const struct file_operations audio_aac_debug_fops = {
+	.read = audio_aio_debug_read,
+	.open = audio_aio_debug_open,
+};
+#endif
+
+static long audio_ioctl_shared(struct file *file, unsigned int cmd,
+						void *arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		struct asm_aac_cfg aac_cfg;
+		struct msm_audio_aac_config *aac_config;
+		uint32_t sbr_ps = 0x00;
+		aac_config = (struct msm_audio_aac_config *)audio->codec_cfg;
+		if (audio->feedback == TUNNEL_MODE) {
+			aac_cfg.sample_rate = aac_config->sample_rate;
+			aac_cfg.ch_cfg = aac_config->channel_configuration;
+		} else {
+			aac_cfg.sample_rate =  audio->pcm_cfg.sample_rate;
+			aac_cfg.ch_cfg = audio->pcm_cfg.channel_count;
+		}
+		pr_debug("%s: AUDIO_START session_id[%d]\n", __func__,
+						audio->ac->session);
+		if (audio->feedback == NON_TUNNEL_MODE) {
+			/* Configure PCM output block */
+			rc = q6asm_enc_cfg_blk_pcm(audio->ac,
+					audio->pcm_cfg.sample_rate,
+					audio->pcm_cfg.channel_count);
+			if (rc < 0) {
+				pr_err("pcm output block config failed\n");
+				break;
+			}
+		}
+		/* turn on both sbr and ps */
+		rc = q6asm_enable_sbrps(audio->ac, sbr_ps);
+		if (rc < 0)
+			pr_err("sbr-ps enable failed\n");
+		if (aac_config->sbr_ps_on_flag)
+			aac_cfg.aot = AAC_ENC_MODE_EAAC_P;
+		else if (aac_config->sbr_on_flag)
+			aac_cfg.aot = AAC_ENC_MODE_AAC_P;
+		else
+			aac_cfg.aot = AAC_ENC_MODE_AAC_LC;
+
+		switch (aac_config->format) {
+		case AUDIO_AAC_FORMAT_ADTS:
+			aac_cfg.format = 0x00;
+			break;
+		case AUDIO_AAC_FORMAT_LOAS:
+			aac_cfg.format = 0x01;
+			break;
+		case AUDIO_AAC_FORMAT_ADIF:
+			aac_cfg.format = 0x02;
+			break;
+		default:
+		case AUDIO_AAC_FORMAT_RAW:
+			aac_cfg.format = 0x03;
+		}
+		aac_cfg.ep_config = aac_config->ep_config;
+		aac_cfg.section_data_resilience =
+			aac_config->aac_section_data_resilience_flag;
+		aac_cfg.scalefactor_data_resilience =
+			aac_config->aac_scalefactor_data_resilience_flag;
+		aac_cfg.spectral_data_resilience =
+			aac_config->aac_spectral_data_resilience_flag;
+
+		pr_debug("%s:format=%x aot=%d  ch=%d sr=%d\n",
+			__func__, aac_cfg.format,
+			aac_cfg.aot, aac_cfg.ch_cfg,
+			aac_cfg.sample_rate);
+
+		/* Configure Media format block */
+		rc = q6asm_media_format_block_multi_aac(audio->ac, &aac_cfg);
+		if (rc < 0) {
+			pr_err("cmd media format block failed\n");
+			break;
+		}
+		rc = q6asm_set_encdec_chan_map(audio->ac, 2);
+		if (rc < 0) {
+			pr_err("%s: cmd set encdec_chan_map failed\n",
+				__func__);
+			break;
+		}
+		rc = audio_aio_enable(audio);
+		audio->eos_rsp = 0;
+		audio->eos_flag = 0;
+		if (!rc) {
+			audio->enabled = 1;
+		} else {
+			audio->enabled = 0;
+			pr_err("Audio Start procedure failed rc=%d\n", rc);
+			break;
+		}
+		pr_info("%s: AUDIO_START sessionid[%d]enable[%d]\n", __func__,
+						audio->ac->session,
+						audio->enabled);
+		if (audio->stopped == 1)
+			audio->stopped = 0;
+		break;
+	}
+	case AUDIO_SET_AAC_CONFIG: {
+		struct msm_audio_aac_config *aac_config;
+		uint16_t sce_left = 1, sce_right = 2;
+
+		if (arg == NULL) {
+			pr_err("%s: NULL config pointer\n", __func__);
+			rc = -EINVAL;
+			break;
+		}
+		memcpy(audio->codec_cfg, arg,
+				sizeof(struct msm_audio_aac_config));
+		aac_config = audio->codec_cfg;
+		if (aac_config->dual_mono_mode >
+		    AUDIO_AAC_DUAL_MONO_PL_SR) {
+			pr_err("%s:AUDIO_SET_AAC_CONFIG: Invalid dual_mono mode =%d\n",
+				 __func__, aac_config->dual_mono_mode);
+		} else {
+			/* convert the data from user into sce_left
+			 * and sce_right based on the definitions
+			 */
+			pr_debug("%s: AUDIO_SET_AAC_CONFIG: modify dual_mono mode =%d\n",
+				 __func__, aac_config->dual_mono_mode);
+			switch (aac_config->dual_mono_mode) {
+			case AUDIO_AAC_DUAL_MONO_PL_PR:
+				sce_left = 1;
+				sce_right = 1;
+				break;
+			case AUDIO_AAC_DUAL_MONO_SL_SR:
+				sce_left = 2;
+				sce_right = 2;
+				break;
+			case AUDIO_AAC_DUAL_MONO_SL_PR:
+				sce_left = 2;
+				sce_right = 1;
+				break;
+			case AUDIO_AAC_DUAL_MONO_PL_SR:
+			default:
+				sce_left = 1;
+				sce_right = 2;
+				break;
+			}
+			rc = q6asm_cfg_dual_mono_aac(audio->ac,
+						sce_left, sce_right);
+			if (rc < 0)
+				pr_err("%s: asm cmd dualmono failed rc=%d\n",
+							 __func__, rc);
+		}			break;
+		break;
+	}
+	case AUDIO_SET_AAC_MIX_CONFIG:	{
+		u32 *mix_coeff = (u32 *)arg;
+		if (!arg) {
+			pr_err("%s: Invalid param for %s\n",
+				__func__, "AUDIO_SET_AAC_MIX_CONFIG");
+			rc = -EINVAL;
+			break;
+		}
+		pr_debug("%s, AUDIO_SET_AAC_MIX_CONFIG", __func__);
+		pr_debug("%s, value of coeff = %d",
+					__func__, *mix_coeff);
+		q6asm_cfg_aac_sel_mix_coef(audio->ac, *mix_coeff);
+		if (rc < 0)
+			pr_err("%s asm aac_sel_mix_coef failed rc=%d\n",
+				 __func__, rc);
+		break;
+	}
+	default:
+		pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd);
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		rc = audio_ioctl_shared(file, cmd, (void *)arg);
+		break;
+	}
+	case AUDIO_GET_AAC_CONFIG: {
+		if (copy_to_user((void *)arg, audio->codec_cfg,
+			sizeof(struct msm_audio_aac_config))) {
+			pr_err("%s: copy_to_user for AUDIO_GET_AAC_CONFIG failed\n"
+				, __func__);
+			rc = -EFAULT;
+			break;
+		}
+		break;
+	}
+	case AUDIO_SET_AAC_CONFIG: {
+		struct msm_audio_aac_config aac_config;
+		if (copy_from_user(&aac_config, (void *)arg,
+			sizeof(aac_config))) {
+			pr_err("%s: copy_from_user for AUDIO_SET_AAC_CONFIG failed\n"
+				, __func__);
+			rc = -EFAULT;
+		}
+		rc = audio_ioctl_shared(file, cmd, &aac_config);
+		if (rc)
+			pr_err("%s:AUDIO_SET_AAC_CONFIG failed. Rc= %d\n",
+				__func__, rc);
+		break;
+	}
+	case AUDIO_SET_AAC_MIX_CONFIG:	{
+		u32 mix_config;
+		pr_debug("%s, AUDIO_SET_AAC_MIX_CONFIG", __func__);
+		if (copy_from_user(&mix_config, (void *)arg,
+			sizeof(u32))) {
+			pr_err("%s: copy_from_user for AUDIO_SET_AAC_MIX_CONFIG failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		rc = audio_ioctl_shared(file, cmd, &mix_config);
+		if (rc)
+			pr_err("%s:AUDIO_SET_AAC_CONFIG failed. Rc= %d\n",
+			__func__, rc);
+		break;
+	}
+	default: {
+		pr_debug("Calling utils ioctl\n");
+		rc = audio->codec_ioctl(file, cmd, arg);
+	}
+	}
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+struct msm_audio_aac_config32 {
+	s16 format;
+	u16 audio_object;
+	u16 ep_config;  /* 0 ~ 3 useful only obj = ERLC */
+	u16 aac_section_data_resilience_flag;
+	u16 aac_scalefactor_data_resilience_flag;
+	u16 aac_spectral_data_resilience_flag;
+	u16 sbr_on_flag;
+	u16 sbr_ps_on_flag;
+	u16 dual_mono_mode;
+	u16 channel_configuration;
+	u16 sample_rate;
+};
+
+enum {
+	AUDIO_SET_AAC_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC,
+		(AUDIO_MAX_COMMON_IOCTL_NUM+0), struct msm_audio_aac_config32),
+	AUDIO_GET_AAC_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC,
+		(AUDIO_MAX_COMMON_IOCTL_NUM+1), struct msm_audio_aac_config32),
+};
+
+static long audio_compat_ioctl(struct file *file, unsigned int cmd,
+							unsigned long arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		rc = audio_ioctl_shared(file, cmd, (void *)arg);
+		break;
+	}
+	case AUDIO_GET_AAC_CONFIG_32: {
+		struct msm_audio_aac_config *aac_config;
+		struct msm_audio_aac_config32 aac_config_32;
+
+		memset(&aac_config_32, 0, sizeof(aac_config_32));
+
+		aac_config = (struct msm_audio_aac_config *)audio->codec_cfg;
+		aac_config_32.format = aac_config->format;
+		aac_config_32.audio_object = aac_config->audio_object;
+		aac_config_32.ep_config = aac_config->ep_config;
+		aac_config_32.aac_section_data_resilience_flag =
+			aac_config->aac_section_data_resilience_flag;
+		aac_config_32.aac_scalefactor_data_resilience_flag =
+			aac_config->aac_scalefactor_data_resilience_flag;
+		aac_config_32.aac_spectral_data_resilience_flag =
+			aac_config->aac_spectral_data_resilience_flag;
+		aac_config_32.sbr_on_flag = aac_config->sbr_on_flag;
+		aac_config_32.sbr_ps_on_flag = aac_config->sbr_ps_on_flag;
+		aac_config_32.dual_mono_mode = aac_config->dual_mono_mode;
+		aac_config_32.channel_configuration =
+			aac_config->channel_configuration;
+		aac_config_32.sample_rate = aac_config->sample_rate;
+
+		if (copy_to_user((void *)arg, &aac_config_32,
+			sizeof(aac_config_32))) {
+			pr_err("%s: copy_to_user for AUDIO_GET_AAC_CONFIG_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		break;
+	}
+	case AUDIO_SET_AAC_CONFIG_32: {
+		struct msm_audio_aac_config aac_config;
+		struct msm_audio_aac_config32 aac_config_32;
+		pr_debug("%s: AUDIO_SET_AAC_CONFIG\n", __func__);
+
+		if (copy_from_user(&aac_config_32, (void *)arg,
+			sizeof(aac_config_32))) {
+			pr_err(
+				"%s: copy_from_user for AUDIO_SET_AAC_CONFIG_32 failed",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		aac_config.format = aac_config_32.format;
+		aac_config.audio_object = aac_config_32.audio_object;
+		aac_config.ep_config = aac_config_32.ep_config;
+		aac_config.aac_section_data_resilience_flag =
+			aac_config_32.aac_section_data_resilience_flag;
+		aac_config.aac_scalefactor_data_resilience_flag =
+			aac_config_32.aac_scalefactor_data_resilience_flag;
+		aac_config.aac_spectral_data_resilience_flag =
+			aac_config_32.aac_spectral_data_resilience_flag;
+		aac_config.sbr_on_flag = aac_config_32.sbr_on_flag;
+		aac_config.sbr_ps_on_flag = aac_config_32.sbr_ps_on_flag;
+		aac_config.dual_mono_mode = aac_config_32.dual_mono_mode;
+		aac_config.channel_configuration =
+				aac_config_32.channel_configuration;
+		aac_config.sample_rate = aac_config_32.sample_rate;
+
+		cmd = AUDIO_SET_AAC_CONFIG;
+		rc = audio_ioctl_shared(file, cmd, &aac_config);
+		if (rc)
+			pr_err("%s:AUDIO_SET_AAC_CONFIG failed. rc= %d\n",
+				__func__, rc);
+		break;
+	}
+	case AUDIO_SET_AAC_MIX_CONFIG: {
+		u32 mix_config;
+		pr_debug("%s, AUDIO_SET_AAC_MIX_CONFIG\n", __func__);
+		if (copy_from_user(&mix_config, (void *)arg,
+			sizeof(u32))) {
+			pr_err("%s: copy_from_user for AUDIO_SET_AAC_MIX_CONFIG failed\n"
+				, __func__);
+			rc = -EFAULT;
+			break;
+		}
+		rc = audio_ioctl_shared(file, cmd, &mix_config);
+		if (rc)
+			pr_err("%s:AUDIO_SET_AAC_CONFIG failed. Rc= %d\n",
+				__func__, rc);
+		break;
+	}
+	default: {
+		pr_debug("Calling utils ioctl\n");
+		rc = audio->codec_compat_ioctl(file, cmd, arg);
+	}
+	}
+	return rc;
+}
+#else
+#define audio_compat_ioctl NULL
+#endif
+
+static int audio_open(struct inode *inode, struct file *file)
+{
+	struct q6audio_aio *audio = NULL;
+	int rc = 0;
+	struct msm_audio_aac_config *aac_config = NULL;
+
+#ifdef CONFIG_DEBUG_FS
+	/* 4 bytes represents decoder number, 1 byte for terminate string */
+	char name[sizeof "msm_multi_aac_" + 5];
+#endif
+	audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL);
+
+	if (audio == NULL) {
+		pr_err("Could not allocate memory for aac decode driver\n");
+		return -ENOMEM;
+	}
+
+	audio->codec_cfg = kzalloc(sizeof(struct msm_audio_aac_config),
+					GFP_KERNEL);
+	if (audio->codec_cfg == NULL) {
+		pr_err("%s: Could not allocate memory for aac config\n",
+							 __func__);
+		kfree(audio);
+		return -ENOMEM;
+	}
+
+	aac_config = audio->codec_cfg;
+
+	audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN_AACM;
+	audio->miscdevice = &audio_multiaac_misc;
+	audio->wakelock_voted = false;
+	audio->audio_ws_mgr = &audio_multiaac_ws_mgr;
+	aac_config->dual_mono_mode = AUDIO_AAC_DUAL_MONO_INVALID;
+
+	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb,
+					     (void *)audio);
+
+	if (!audio->ac) {
+		pr_err("Could not allocate memory for audio client\n");
+		kfree(audio->codec_cfg);
+		kfree(audio);
+		return -ENOMEM;
+	}
+	rc = audio_aio_open(audio, file);
+	if (rc < 0) {
+		pr_err("%s: audio_aio_open rc=%d\n",
+			__func__, rc);
+		goto fail;
+	}
+
+	/* open in T/NT mode */
+	if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) {
+		rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM,
+					   FORMAT_MPEG4_MULTI_AAC);
+		if (rc < 0) {
+			pr_err("NT mode Open failed rc=%d\n", rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		audio->feedback = NON_TUNNEL_MODE;
+		/* open AAC decoder, expected frames is always 1
+		audio->buf_cfg.frames_per_buf = 0x01;*/
+		audio->buf_cfg.meta_info_enable = 0x01;
+	} else if ((file->f_mode & FMODE_WRITE) &&
+			!(file->f_mode & FMODE_READ)) {
+		rc = q6asm_open_write(audio->ac, FORMAT_MPEG4_MULTI_AAC);
+		if (rc < 0) {
+			pr_err("T mode Open failed rc=%d\n", rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		audio->feedback = TUNNEL_MODE;
+		audio->buf_cfg.meta_info_enable = 0x00;
+	} else {
+		pr_err("Not supported mode\n");
+		rc = -EACCES;
+		goto fail;
+	}
+
+#ifdef CONFIG_DEBUG_FS
+	snprintf(name, sizeof name, "msm_multi_aac_%04x", audio->ac->session);
+	audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
+					    NULL, (void *)audio,
+					    &audio_aac_debug_fops);
+
+	if (IS_ERR(audio->dentry))
+		pr_debug("debugfs_create_file failed\n");
+#endif
+	pr_info("%s:AAC 5.1 Decoder OPEN success mode[%d]session[%d]\n",
+		__func__, audio->feedback, audio->ac->session);
+	return rc;
+fail:
+	q6asm_audio_client_free(audio->ac);
+	kfree(audio->codec_cfg);
+	kfree(audio);
+	return rc;
+}
+
+static const struct file_operations audio_aac_fops = {
+	.owner = THIS_MODULE,
+	.open = audio_open,
+	.release = audio_aio_release,
+	.unlocked_ioctl = audio_ioctl,
+	.fsync = audio_aio_fsync,
+	.compat_ioctl = audio_compat_ioctl
+};
+
+static struct miscdevice audio_multiaac_misc = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "msm_multi_aac",
+	.fops = &audio_aac_fops,
+};
+
+static int __init audio_aac_init(void)
+{
+	int ret = misc_register(&audio_multiaac_misc);
+
+	if (ret == 0)
+		device_init_wakeup(audio_multiaac_misc.this_device, true);
+	audio_multiaac_ws_mgr.ref_cnt = 0;
+	mutex_init(&audio_multiaac_ws_mgr.ws_lock);
+
+	return ret;
+}
+
+device_initcall(audio_aac_init);
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_qcelp.c linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_qcelp.c
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_qcelp.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_qcelp.c	2019-01-22 16:16:24.747257672 +0100
@@ -0,0 +1,193 @@
+/* qcelp(v13k) audio output device
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (C) 2008 HTC Corporation
+ * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "audio_utils_aio.h"
+
+#define FRAME_SIZE_DEC_QCELP  ((32) + sizeof(struct dec_meta_in))
+
+static struct miscdevice audio_qcelp_misc;
+static struct ws_mgr audio_qcelp_ws_mgr;
+
+#ifdef CONFIG_DEBUG_FS
+static const struct file_operations audio_qcelp_debug_fops = {
+	.read = audio_aio_debug_read,
+	.open = audio_aio_debug_open,
+};
+#endif
+
+static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__,
+						audio, audio->ac->session);
+		if (audio->feedback == NON_TUNNEL_MODE) {
+			/* Configure PCM output block */
+			rc = q6asm_enc_cfg_blk_pcm(audio->ac,
+					audio->pcm_cfg.sample_rate,
+					audio->pcm_cfg.channel_count);
+			if (rc < 0) {
+				pr_err("pcm output block config failed\n");
+				break;
+			}
+		}
+
+		rc = audio_aio_enable(audio);
+		audio->eos_rsp = 0;
+		audio->eos_flag = 0;
+		if (!rc) {
+			audio->enabled = 1;
+		} else {
+			audio->enabled = 0;
+			pr_err("Audio Start procedure failed rc=%d\n", rc);
+			break;
+		}
+		pr_debug("%s: AUDIO_START sessionid[%d]enable[%d]\n", __func__,
+						audio->ac->session,
+						audio->enabled);
+		if (audio->stopped == 1)
+			audio->stopped = 0;
+		break;
+	}
+	default:
+		pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio);
+		rc = audio->codec_ioctl(file, cmd, arg);
+	}
+	return rc;
+}
+
+static int audio_open(struct inode *inode, struct file *file)
+{
+	struct q6audio_aio *audio = NULL;
+	int rc = 0;
+
+#ifdef CONFIG_DEBUG_FS
+	/* 4 bytes represents decoder number, 1 byte for terminate string */
+	char name[sizeof "msm_qcelp_" + 5];
+#endif
+	audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL);
+
+	if (audio == NULL) {
+		pr_err("Could not allocate memory for aac decode driver\n");
+		return -ENOMEM;
+	}
+
+	/* Settings will be re-config at AUDIO_SET_CONFIG,
+	 * but at least we need to have initial config
+	 */
+	audio->str_cfg.buffer_size = FRAME_SIZE_DEC_QCELP;
+	audio->str_cfg.buffer_count = FRAME_NUM;
+	audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN;
+	audio->pcm_cfg.buffer_count = PCM_BUF_COUNT;
+	audio->pcm_cfg.sample_rate = 8000;
+	audio->pcm_cfg.channel_count = 1;
+	audio->miscdevice = &audio_qcelp_misc;
+	audio->wakelock_voted = false;
+	audio->audio_ws_mgr = &audio_qcelp_ws_mgr;
+
+	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb,
+					     (void *)audio);
+
+	if (!audio->ac) {
+		pr_err("Could not allocate memory for audio client\n");
+		kfree(audio);
+		return -ENOMEM;
+	}
+	rc = audio_aio_open(audio, file);
+	if (rc < 0) {
+		pr_err("%s: audio_aio_open rc=%d\n",
+			__func__, rc);
+		goto fail;
+	}
+
+	/* open in T/NT mode */
+	if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) {
+		rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM,
+					   FORMAT_V13K);
+		if (rc < 0) {
+			pr_err("NT mode Open failed rc=%d\n", rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		audio->feedback = NON_TUNNEL_MODE;
+		audio->buf_cfg.frames_per_buf = 0x01;
+		audio->buf_cfg.meta_info_enable = 0x01;
+	} else if ((file->f_mode & FMODE_WRITE) &&
+			!(file->f_mode & FMODE_READ)) {
+		rc = q6asm_open_write(audio->ac, FORMAT_V13K);
+		if (rc < 0) {
+			pr_err("T mode Open failed rc=%d\n", rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		audio->feedback = TUNNEL_MODE;
+		audio->buf_cfg.meta_info_enable = 0x00;
+	} else {
+		pr_err("Not supported mode\n");
+		rc = -EACCES;
+		goto fail;
+	}
+
+#ifdef CONFIG_DEBUG_FS
+	snprintf(name, sizeof name, "msm_qcelp_%04x", audio->ac->session);
+	audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
+					    NULL, (void *)audio,
+					    &audio_qcelp_debug_fops);
+
+	if (IS_ERR(audio->dentry))
+		pr_debug("debugfs_create_file failed\n");
+#endif
+	pr_info("%s:dec success mode[%d]session[%d]\n", __func__,
+						audio->feedback,
+						audio->ac->session);
+	return 0;
+fail:
+	q6asm_audio_client_free(audio->ac);
+	kfree(audio);
+	return rc;
+}
+
+static const struct file_operations audio_qcelp_fops = {
+	.owner = THIS_MODULE,
+	.open = audio_open,
+	.release = audio_aio_release,
+	.unlocked_ioctl = audio_ioctl,
+	.fsync = audio_aio_fsync,
+};
+
+static struct miscdevice audio_qcelp_misc = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "msm_qcelp",
+	.fops = &audio_qcelp_fops,
+};
+
+static int __init audio_qcelp_init(void)
+{
+	int ret = misc_register(&audio_qcelp_misc);
+
+	if (ret == 0)
+		device_init_wakeup(audio_qcelp_misc.this_device, true);
+	audio_qcelp_ws_mgr.ref_cnt = 0;
+	mutex_init(&audio_qcelp_ws_mgr.ws_lock);
+
+	return ret;
+}
+
+device_initcall(audio_qcelp_init);
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_utils_aio.c linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_utils_aio.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c	2019-01-22 16:16:24.747257672 +0100
@@ -0,0 +1,2140 @@
+/* Copyright (C) 2008 Google, Inc.
+ * Copyright (C) 2008 HTC Corporation
+ * Copyright (c) 2009-2017, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <asm/ioctls.h>
+#include <linux/debugfs.h>
+#include <linux/msm_audio_ion.h>
+#include <linux/compat.h>
+#include <linux/mutex.h>
+#include "audio_utils_aio.h"
+#ifdef CONFIG_USE_DEV_CTRL_VOLUME
+#include <linux/qdsp6v2/audio_dev_ctl.h>
+#endif /*CONFIG_USE_DEV_CTRL_VOLUME*/
+static DEFINE_MUTEX(lock);
+#ifdef CONFIG_DEBUG_FS
+
+int audio_aio_debug_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+ssize_t audio_aio_debug_read(struct file *file, char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	const int debug_bufmax = 4096;
+	static char buffer[4096];
+	int n = 0;
+	struct q6audio_aio *audio;
+
+	mutex_lock(&lock);
+	if (file->private_data != NULL) {
+		audio = file->private_data;
+		mutex_lock(&audio->lock);
+		n = scnprintf(buffer, debug_bufmax, "opened %d\n",
+				audio->opened);
+		n += scnprintf(buffer + n, debug_bufmax - n,
+				"enabled %d\n", audio->enabled);
+		n += scnprintf(buffer + n, debug_bufmax - n,
+				"stopped %d\n", audio->stopped);
+		n += scnprintf(buffer + n, debug_bufmax - n,
+				"feedback %d\n", audio->feedback);
+		mutex_unlock(&audio->lock);
+		/* Following variables are only useful for debugging when
+		 * when playback halts unexpectedly. Thus, no mutual exclusion
+		 * enforced
+		 */
+		n += scnprintf(buffer + n, debug_bufmax - n,
+				"wflush %d\n", audio->wflush);
+		n += scnprintf(buffer + n, debug_bufmax - n,
+				"rflush %d\n", audio->rflush);
+		n += scnprintf(buffer + n, debug_bufmax - n,
+				"inqueue empty %d\n",
+				list_empty(&audio->in_queue));
+		n += scnprintf(buffer + n, debug_bufmax - n,
+				"outqueue empty %d\n",
+				list_empty(&audio->out_queue));
+	}
+	mutex_unlock(&lock);
+	buffer[n] = 0;
+	return simple_read_from_buffer(buf, count, ppos, buffer, n);
+}
+#endif
+
+static long audio_aio_ioctl(struct file *file, unsigned int cmd,
+				unsigned long arg);
+#ifdef CONFIG_COMPAT
+static long audio_aio_compat_ioctl(struct file *file, unsigned int cmd,
+				unsigned long arg);
+#else
+#define audio_aio_compat_ioctl NULL
+#endif
+int insert_eos_buf(struct q6audio_aio *audio,
+		struct audio_aio_buffer_node *buf_node)
+{
+	struct dec_meta_out *eos_buf = buf_node->kvaddr;
+	pr_debug("%s[%pK]:insert_eos_buf\n", __func__, audio);
+	eos_buf->num_of_frames = 0xFFFFFFFF;
+	eos_buf->meta_out_dsp[0].offset_to_frame = 0x0;
+	eos_buf->meta_out_dsp[0].nflags = AUDIO_DEC_EOS_SET;
+	return sizeof(struct dec_meta_out) +
+		sizeof(eos_buf->meta_out_dsp[0]);
+}
+
+/* Routine which updates read buffers of driver/dsp,
+   for flush operation as DSP output might not have proper
+   value set */
+static int insert_meta_data_flush(struct q6audio_aio *audio,
+	struct audio_aio_buffer_node *buf_node)
+{
+	struct dec_meta_out *meta_data = buf_node->kvaddr;
+	meta_data->num_of_frames = 0x0;
+	meta_data->meta_out_dsp[0].offset_to_frame = 0x0;
+	meta_data->meta_out_dsp[0].nflags = 0x0;
+	return sizeof(struct dec_meta_out) +
+		sizeof(meta_data->meta_out_dsp[0]);
+}
+
+static int audio_aio_ion_lookup_vaddr(struct q6audio_aio *audio, void *addr,
+					unsigned long len,
+					struct audio_aio_ion_region **region)
+{
+	struct audio_aio_ion_region *region_elt;
+
+	int match_count = 0;
+
+	*region = NULL;
+
+	/* returns physical address or zero */
+	list_for_each_entry(region_elt, &audio->ion_region_queue, list) {
+		if (addr >= region_elt->vaddr &&
+			addr < region_elt->vaddr + region_elt->len &&
+			addr + len <= region_elt->vaddr + region_elt->len &&
+			addr + len > addr) {
+			/* to avoid integer addition overflow */
+
+			/* offset since we could pass vaddr inside a registerd
+			* ion buffer
+			*/
+
+			match_count++;
+			if (!*region)
+				*region = region_elt;
+		}
+	}
+
+	if (match_count > 1) {
+		pr_err("%s[%pK]:multiple hits for vaddr %pK, len %ld\n",
+			__func__, audio, addr, len);
+		list_for_each_entry(region_elt, &audio->ion_region_queue,
+					list) {
+			if (addr >= region_elt->vaddr &&
+			addr < region_elt->vaddr + region_elt->len &&
+			addr + len <= region_elt->vaddr + region_elt->len &&
+			addr + len > addr)
+				pr_err("\t%s[%pK]:%pK, %ld --> %pK\n",
+					__func__, audio,
+					region_elt->vaddr,
+					region_elt->len,
+					&region_elt->paddr);
+		}
+	}
+
+	return *region ? 0 : -1;
+}
+
+static phys_addr_t audio_aio_ion_fixup(struct q6audio_aio *audio, void *addr,
+				unsigned long len, int ref_up, void **kvaddr)
+{
+	struct audio_aio_ion_region *region;
+	phys_addr_t paddr;
+	int ret;
+
+	ret = audio_aio_ion_lookup_vaddr(audio, addr, len, &region);
+	if (ret) {
+		pr_err("%s[%pK]:lookup (%pK, %ld) failed\n",
+				__func__, audio, addr, len);
+		return 0;
+	}
+	if (ref_up)
+		region->ref_cnt++;
+	else
+		region->ref_cnt--;
+	pr_debug("%s[%pK]:found region %pK ref_cnt %d\n",
+			__func__, audio, region, region->ref_cnt);
+	paddr = region->paddr + (addr - region->vaddr);
+	/* provide kernel virtual address for accessing meta information */
+	if (kvaddr)
+		*kvaddr = (void *) (region->kvaddr + (addr - region->vaddr));
+	return paddr;
+}
+
+static int audio_aio_pause(struct q6audio_aio  *audio)
+{
+	int rc = -EINVAL;
+
+	pr_debug("%s[%pK], enabled = %d\n", __func__, audio,
+			audio->enabled);
+	if (audio->enabled) {
+		rc = q6asm_cmd(audio->ac, CMD_PAUSE);
+		if (rc < 0)
+			pr_err("%s[%pK]: pause cmd failed rc=%d\n",
+				__func__, audio, rc);
+
+		if (rc == 0) {
+			/* Send suspend only if pause was successful */
+			rc = q6asm_cmd(audio->ac, CMD_SUSPEND);
+			if (rc < 0)
+				pr_err("%s[%pK]: suspend cmd failed rc=%d\n",
+					__func__, audio, rc);
+		} else
+			pr_err("%s[%pK]: not sending suspend since pause failed\n",
+				__func__, audio);
+
+	} else
+		pr_err("%s[%pK]: Driver not enabled\n", __func__, audio);
+	return rc;
+}
+
+static int audio_aio_flush(struct q6audio_aio  *audio)
+{
+	int rc = 0;
+
+	if (audio->enabled) {
+		/* Implicitly issue a pause to the decoder before flushing if
+		   it is not in pause state */
+		if (!(audio->drv_status & ADRV_STATUS_PAUSE)) {
+			rc = audio_aio_pause(audio);
+			if (rc < 0)
+				pr_err("%s[%pK}: pause cmd failed rc=%d\n",
+					__func__, audio,
+					rc);
+			else
+				audio->drv_status |= ADRV_STATUS_PAUSE;
+		}
+		rc = q6asm_cmd(audio->ac, CMD_FLUSH);
+		if (rc < 0)
+			pr_err("%s[%pK]: flush cmd failed rc=%d\n",
+				__func__, audio, rc);
+		/* Not in stop state, reenable the stream */
+		if (audio->stopped == 0) {
+			rc = audio_aio_enable(audio);
+			if (rc)
+				pr_err("%s[%pK]:audio re-enable failed\n",
+					__func__, audio);
+			else {
+				audio->enabled = 1;
+				if (audio->drv_status & ADRV_STATUS_PAUSE)
+					audio->drv_status &= ~ADRV_STATUS_PAUSE;
+			}
+		}
+	}
+	pr_debug("%s[%pK]:in_bytes %d\n",
+			__func__, audio, atomic_read(&audio->in_bytes));
+	pr_debug("%s[%pK]:in_samples %d\n",
+			__func__, audio, atomic_read(&audio->in_samples));
+	atomic_set(&audio->in_bytes, 0);
+	atomic_set(&audio->in_samples, 0);
+	return rc;
+}
+
+static int audio_aio_outport_flush(struct q6audio_aio *audio)
+{
+	int rc;
+
+	rc = q6asm_cmd(audio->ac, CMD_OUT_FLUSH);
+	if (rc < 0)
+		pr_err("%s[%pK}: output port flush cmd failed rc=%d\n",
+			__func__, audio, rc);
+	return rc;
+}
+
+/* Write buffer to DSP / Handle Ack from DSP */
+void audio_aio_async_write_ack(struct q6audio_aio *audio, uint32_t token,
+				uint32_t *payload)
+{
+	unsigned long flags;
+	union msm_audio_event_payload event_payload;
+	struct audio_aio_buffer_node *used_buf;
+
+	/* No active flush in progress */
+	if (audio->wflush)
+		return;
+
+	spin_lock_irqsave(&audio->dsp_lock, flags);
+	if (list_empty(&audio->out_queue)) {
+		pr_warning("%s: ingore unexpected event from dsp\n", __func__);
+		spin_unlock_irqrestore(&audio->dsp_lock, flags);
+		return;
+	}
+	used_buf = list_first_entry(&audio->out_queue,
+					struct audio_aio_buffer_node, list);
+	if (token == used_buf->token) {
+		list_del(&used_buf->list);
+		spin_unlock_irqrestore(&audio->dsp_lock, flags);
+		pr_debug("%s[%pK]:consumed buffer\n", __func__, audio);
+		event_payload.aio_buf = used_buf->buf;
+		audio_aio_post_event(audio, AUDIO_EVENT_WRITE_DONE,
+					event_payload);
+		kfree(used_buf);
+		if (list_empty(&audio->out_queue) &&
+			(audio->drv_status & ADRV_STATUS_FSYNC)) {
+			pr_debug("%s[%pK]: list is empty, reached EOS in Tunnel\n",
+				 __func__, audio);
+			wake_up(&audio->write_wait);
+		}
+	} else {
+		pr_err("%s[%pK]:expected=%x ret=%x\n",
+			__func__, audio, used_buf->token, token);
+		spin_unlock_irqrestore(&audio->dsp_lock, flags);
+	}
+}
+
+/* ------------------- device --------------------- */
+void audio_aio_async_out_flush(struct q6audio_aio *audio)
+{
+	struct audio_aio_buffer_node *buf_node;
+	struct list_head *ptr, *next;
+	union msm_audio_event_payload payload;
+	unsigned long flags;
+
+	pr_debug("%s[%pK}\n", __func__, audio);
+	/* EOS followed by flush, EOS response not guranteed, free EOS i/p
+	buffer */
+	spin_lock_irqsave(&audio->dsp_lock, flags);
+
+	if (audio->eos_flag && (audio->eos_write_payload.aio_buf.buf_addr)) {
+		pr_debug("%s[%pK]: EOS followed by flush received,acknowledge"
+			" eos i/p buffer immediately\n", __func__, audio);
+		audio_aio_post_event(audio, AUDIO_EVENT_WRITE_DONE,
+				audio->eos_write_payload);
+		memset(&audio->eos_write_payload , 0,
+			sizeof(union msm_audio_event_payload));
+	}
+	spin_unlock_irqrestore(&audio->dsp_lock, flags);
+	list_for_each_safe(ptr, next, &audio->out_queue) {
+		buf_node = list_entry(ptr, struct audio_aio_buffer_node, list);
+		list_del(&buf_node->list);
+		payload.aio_buf = buf_node->buf;
+		audio_aio_post_event(audio, AUDIO_EVENT_WRITE_DONE, payload);
+		kfree(buf_node);
+		pr_debug("%s[%pK]: Propagate WRITE_DONE during flush\n",
+				__func__, audio);
+	}
+}
+
+void audio_aio_async_in_flush(struct q6audio_aio *audio)
+{
+	struct audio_aio_buffer_node *buf_node;
+	struct list_head *ptr, *next;
+	union msm_audio_event_payload payload;
+
+	pr_debug("%s[%pK]\n", __func__, audio);
+	list_for_each_safe(ptr, next, &audio->in_queue) {
+		buf_node = list_entry(ptr, struct audio_aio_buffer_node, list);
+		list_del(&buf_node->list);
+		/* Forcefull send o/p eos buffer after flush, if no eos response
+		 * received by dsp even after sending eos command */
+		if ((audio->eos_rsp != 1) && audio->eos_flag) {
+			pr_debug("%s[%pK]: send eos on o/p buffer during flush\n",
+				 __func__, audio);
+			payload.aio_buf = buf_node->buf;
+			payload.aio_buf.data_len =
+					insert_eos_buf(audio, buf_node);
+			audio->eos_flag = 0;
+		} else {
+			payload.aio_buf = buf_node->buf;
+			payload.aio_buf.data_len =
+					insert_meta_data_flush(audio, buf_node);
+		}
+		audio_aio_post_event(audio, AUDIO_EVENT_READ_DONE, payload);
+		kfree(buf_node);
+		pr_debug("%s[%pK]: Propagate READ_DONE during flush\n",
+				__func__, audio);
+	}
+}
+
+int audio_aio_enable(struct q6audio_aio  *audio)
+{
+	/* 2nd arg: 0 -> run immediately
+	3rd arg: 0 -> msw_ts, 4th arg: 0 ->lsw_ts */
+	return q6asm_run(audio->ac, 0x00, 0x00, 0x00);
+}
+
+int audio_aio_disable(struct q6audio_aio *audio)
+{
+	int rc = 0;
+	if (audio->opened) {
+		audio->enabled = 0;
+		audio->opened = 0;
+		pr_debug("%s[%pK]: inbytes[%d] insamples[%d]\n", __func__,
+			audio, atomic_read(&audio->in_bytes),
+			atomic_read(&audio->in_samples));
+		/* Close the session */
+		rc = q6asm_cmd(audio->ac, CMD_CLOSE);
+		if (rc < 0)
+			pr_err("%s[%pK]:Failed to close the session rc=%d\n",
+				__func__, audio, rc);
+		audio->stopped = 1;
+		wake_up(&audio->write_wait);
+		wake_up(&audio->cmd_wait);
+	}
+	pr_debug("%s[%pK]:enabled[%d]\n", __func__, audio, audio->enabled);
+	return rc;
+}
+
+void audio_aio_reset_ion_region(struct q6audio_aio *audio)
+{
+	struct audio_aio_ion_region *region;
+	struct list_head *ptr, *next;
+
+	list_for_each_safe(ptr, next, &audio->ion_region_queue) {
+		region = list_entry(ptr, struct audio_aio_ion_region, list);
+		list_del(&region->list);
+		msm_audio_ion_free_legacy(audio->client, region->handle);
+		kfree(region);
+	}
+
+	return;
+}
+
+void audio_aio_reset_event_queue(struct q6audio_aio *audio)
+{
+	unsigned long flags;
+	struct audio_aio_event *drv_evt;
+	struct list_head *ptr, *next;
+
+	spin_lock_irqsave(&audio->event_queue_lock, flags);
+	list_for_each_safe(ptr, next, &audio->event_queue) {
+		drv_evt = list_first_entry(&audio->event_queue,
+				   struct audio_aio_event, list);
+		list_del(&drv_evt->list);
+		kfree(drv_evt);
+	}
+	list_for_each_safe(ptr, next, &audio->free_event_queue) {
+		drv_evt = list_first_entry(&audio->free_event_queue,
+				   struct audio_aio_event, list);
+		list_del(&drv_evt->list);
+		kfree(drv_evt);
+	}
+	spin_unlock_irqrestore(&audio->event_queue_lock, flags);
+
+	return;
+}
+
+static void audio_aio_unmap_ion_region(struct q6audio_aio *audio)
+{
+	struct audio_aio_ion_region *region;
+	struct list_head *ptr, *next;
+	int rc = -EINVAL;
+
+	pr_debug("%s[%pK]:\n", __func__, audio);
+	list_for_each_safe(ptr, next, &audio->ion_region_queue) {
+		region = list_entry(ptr, struct audio_aio_ion_region, list);
+		if (region != NULL) {
+			pr_debug("%s[%pK]: phy_address = 0x%pK\n",
+				__func__, audio, &region->paddr);
+			rc = q6asm_memory_unmap(audio->ac,
+						region->paddr, IN);
+			if (rc < 0)
+				pr_err("%s[%pK]: memory unmap failed\n",
+					__func__, audio);
+		}
+	}
+}
+
+#ifdef CONFIG_USE_DEV_CTRL_VOLUME
+
+static void audio_aio_listner(u32 evt_id, union auddev_evt_data *evt_payload,
+			void *private_data)
+{
+	struct q6audio_aio *audio = (struct q6audio_aio *) private_data;
+	int rc  = 0;
+
+	switch (evt_id) {
+	case AUDDEV_EVT_STREAM_VOL_CHG:
+		audio->volume = evt_payload->session_vol;
+		pr_debug("%s[%pK]: AUDDEV_EVT_STREAM_VOL_CHG, stream vol %d, enabled = %d\n",
+			__func__, audio, audio->volume, audio->enabled);
+		if (audio->enabled == 1) {
+			if (audio->ac) {
+				rc = q6asm_set_volume(audio->ac, audio->volume);
+				if (rc < 0) {
+					pr_err("%s[%pK]: Send Volume command failed rc=%d\n",
+						__func__, audio, rc);
+				}
+			}
+		}
+		break;
+	default:
+		pr_err("%s[%pK]:ERROR:wrong event\n", __func__, audio);
+		break;
+	}
+}
+
+int register_volume_listener(struct q6audio_aio *audio)
+{
+	int rc  = 0;
+	audio->device_events = AUDDEV_EVT_STREAM_VOL_CHG;
+	audio->drv_status &= ~ADRV_STATUS_PAUSE;
+
+	rc = auddev_register_evt_listner(audio->device_events,
+					AUDDEV_CLNT_DEC,
+					audio->ac->session,
+					audio_aio_listner,
+					(void *)audio);
+	if (rc < 0) {
+		pr_err("%s[%pK]: Event listener failed\n", __func__, audio);
+		rc = -EACCES;
+	}
+	return rc;
+}
+void unregister_volume_listener(struct q6audio_aio *audio)
+{
+	auddev_unregister_evt_listner(AUDDEV_CLNT_DEC, audio->ac->session);
+}
+
+int enable_volume_ramp(struct q6audio_aio *audio)
+{
+	int rc = 0;
+	struct asm_softpause_params softpause;
+	struct asm_softvolume_params softvol;
+
+	if (audio->ac == NULL)
+		return -EINVAL;
+	pr_debug("%s[%pK]\n", __func__, audio);
+	softpause.enable = SOFT_PAUSE_ENABLE;
+	softpause.period = SOFT_PAUSE_PERIOD;
+	softpause.step = SOFT_PAUSE_STEP;
+	softpause.rampingcurve = SOFT_PAUSE_CURVE_LINEAR;
+
+	softvol.period = SOFT_VOLUME_PERIOD;
+	softvol.step = SOFT_VOLUME_STEP;
+	softvol.rampingcurve = SOFT_VOLUME_CURVE_LINEAR;
+
+	if (softpause.rampingcurve == SOFT_PAUSE_CURVE_LINEAR)
+		softpause.step = SOFT_PAUSE_STEP_LINEAR;
+	if (softvol.rampingcurve == SOFT_VOLUME_CURVE_LINEAR)
+		softvol.step = SOFT_VOLUME_STEP_LINEAR;
+	rc = q6asm_set_volume(audio->ac, audio->volume);
+	if (rc < 0) {
+		pr_err("%s: Send Volume command failed rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+	rc = q6asm_set_softpause(audio->ac, &softpause);
+	if (rc < 0) {
+		pr_err("%s: Send SoftPause Param failed rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+	rc = q6asm_set_softvolume(audio->ac, &softvol);
+	if (rc < 0) {
+		pr_err("%s: Send SoftVolume Param failed rc=%d\n",
+		__func__, rc);
+		return rc;
+	}
+	/* disable mute by default */
+	rc = q6asm_set_mute(audio->ac, 0);
+	if (rc < 0) {
+		pr_err("%s: Send mute command failed rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+	return rc;
+}
+
+#else /*CONFIG_USE_DEV_CTRL_VOLUME*/
+int register_volume_listener(struct q6audio_aio *audio)
+{
+	return 0;/* do nothing */
+}
+void unregister_volume_listener(struct q6audio_aio *audio)
+{
+	return;/* do nothing */
+}
+int enable_volume_ramp(struct q6audio_aio *audio)
+{
+	return 0; /* do nothing */
+}
+#endif /*CONFIG_USE_DEV_CTRL_VOLUME*/
+
+int audio_aio_release(struct inode *inode, struct file *file)
+{
+	struct q6audio_aio *audio = file->private_data;
+	pr_debug("%s[%pK]\n", __func__, audio);
+	mutex_lock(&lock);
+	mutex_lock(&audio->lock);
+	mutex_lock(&audio->read_lock);
+	mutex_lock(&audio->write_lock);
+	audio->wflush = 1;
+	if (audio->wakelock_voted &&
+		(audio->audio_ws_mgr != NULL) &&
+		(audio->miscdevice != NULL)) {
+		audio->wakelock_voted = false;
+		mutex_lock(&audio->audio_ws_mgr->ws_lock);
+		if ((audio->audio_ws_mgr->ref_cnt > 0) &&
+				(--audio->audio_ws_mgr->ref_cnt == 0)) {
+			pm_relax(audio->miscdevice->this_device);
+		}
+		mutex_unlock(&audio->audio_ws_mgr->ws_lock);
+	}
+	if (audio->enabled)
+		audio_aio_flush(audio);
+	audio->wflush = 0;
+	audio->drv_ops.out_flush(audio);
+	audio->drv_ops.in_flush(audio);
+	audio_aio_disable(audio);
+	audio_aio_unmap_ion_region(audio);
+	audio_aio_reset_ion_region(audio);
+	msm_audio_ion_client_destroy(audio->client);
+	audio->event_abort = 1;
+	wake_up(&audio->event_wait);
+	audio_aio_reset_event_queue(audio);
+	q6asm_audio_client_free(audio->ac);
+	mutex_unlock(&audio->write_lock);
+	mutex_unlock(&audio->read_lock);
+	mutex_unlock(&audio->lock);
+	mutex_destroy(&audio->lock);
+	mutex_destroy(&audio->read_lock);
+	mutex_destroy(&audio->write_lock);
+	mutex_destroy(&audio->get_event_lock);
+	unregister_volume_listener(audio);
+
+#ifdef CONFIG_DEBUG_FS
+	if (audio->dentry)
+		debugfs_remove(audio->dentry);
+#endif
+	kfree(audio->codec_cfg);
+	kfree(audio);
+	file->private_data = NULL;
+	mutex_unlock(&lock);
+	return 0;
+}
+
+int audio_aio_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+{
+	int rc = 0;
+	struct q6audio_aio *audio = file->private_data;
+
+	if (!audio->enabled || audio->feedback)
+		return -EINVAL;
+
+	/* Blocking client sends more data */
+	mutex_lock(&audio->lock);
+	audio->drv_status |= ADRV_STATUS_FSYNC;
+	mutex_unlock(&audio->lock);
+
+	pr_debug("%s[%pK]:\n", __func__, audio);
+
+	audio->eos_rsp = 0;
+
+	pr_debug("%s[%pK]Wait for write done from DSP\n", __func__, audio);
+	rc = wait_event_interruptible(audio->write_wait,
+					(list_empty(&audio->out_queue)) ||
+					audio->wflush || audio->stopped);
+
+	if (audio->stopped || audio->wflush) {
+		pr_debug("%s[%pK]: Audio Flushed or Stopped,this is not EOS\n"
+			, __func__, audio);
+		audio->wflush = 0;
+		rc = -EBUSY;
+	}
+
+	if (rc < 0) {
+		pr_err("%s[%pK]: wait event for list_empty failed, rc = %d\n",
+			__func__, audio, rc);
+		goto done;
+	}
+
+	rc = q6asm_cmd(audio->ac, CMD_EOS);
+	pr_debug("%s[%pK]: EOS cmd sent to DSP\n", __func__, audio);
+
+	if (rc < 0)
+		pr_err("%s[%pK]: q6asm_cmd failed, rc = %d",
+			__func__, audio, rc);
+
+	pr_debug("%s[%pK]: wait for RENDERED_EOS from DSP\n"
+		, __func__, audio);
+	rc = wait_event_interruptible(audio->write_wait,
+					(audio->eos_rsp || audio->wflush ||
+					audio->stopped));
+
+	if (rc < 0) {
+		pr_err("%s[%pK]: wait event for eos_rsp failed, rc = %d\n",
+			__func__, audio, rc);
+		goto done;
+	}
+
+	if (audio->stopped || audio->wflush) {
+		audio->wflush = 0;
+		pr_debug("%s[%pK]: Audio Flushed or Stopped,this is not EOS\n"
+			, __func__, audio);
+		rc = -EBUSY;
+	}
+
+	if (audio->eos_rsp == 1)
+		pr_debug("%s[%pK]: EOS\n", __func__, audio);
+
+
+done:
+	mutex_lock(&audio->lock);
+	audio->drv_status &= ~ADRV_STATUS_FSYNC;
+	mutex_unlock(&audio->lock);
+
+	return rc;
+}
+
+static int audio_aio_events_pending(struct q6audio_aio *audio)
+{
+	unsigned long flags;
+	int empty;
+
+	spin_lock_irqsave(&audio->event_queue_lock, flags);
+	empty = !list_empty(&audio->event_queue);
+	spin_unlock_irqrestore(&audio->event_queue_lock, flags);
+	return empty || audio->event_abort || audio->reset_event;
+}
+
+static long audio_aio_process_event_req_common(struct q6audio_aio *audio,
+					struct msm_audio_event *usr_evt)
+{
+	long rc;
+	struct audio_aio_event *drv_evt = NULL;
+	int timeout;
+	unsigned long flags;
+
+	timeout = usr_evt->timeout_ms;
+
+	if (timeout > 0) {
+		rc = wait_event_interruptible_timeout(audio->event_wait,
+						audio_aio_events_pending
+						(audio),
+						msecs_to_jiffies
+						(timeout));
+		if (rc == 0)
+			return -ETIMEDOUT;
+	} else {
+		rc = wait_event_interruptible(audio->event_wait,
+		audio_aio_events_pending(audio));
+	}
+	if (rc < 0)
+		return rc;
+
+	if (audio->reset_event) {
+		audio->reset_event = false;
+		pr_err("In SSR, post ENETRESET err\n");
+		return -ENETRESET;
+	}
+
+	if (audio->event_abort) {
+		audio->event_abort = 0;
+		return -ENODEV;
+	}
+
+	rc = 0;
+
+	spin_lock_irqsave(&audio->event_queue_lock, flags);
+	if (!list_empty(&audio->event_queue)) {
+		drv_evt = list_first_entry(&audio->event_queue,
+		   struct audio_aio_event, list);
+		list_del(&drv_evt->list);
+	}
+	if (drv_evt) {
+		usr_evt->event_type = drv_evt->event_type;
+		usr_evt->event_payload = drv_evt->payload;
+		list_add_tail(&drv_evt->list, &audio->free_event_queue);
+	} else {
+		pr_err("%s[%pK]:Unexpected path\n", __func__, audio);
+		spin_unlock_irqrestore(&audio->event_queue_lock, flags);
+		return -EPERM;
+	}
+	spin_unlock_irqrestore(&audio->event_queue_lock, flags);
+
+	if (drv_evt->event_type == AUDIO_EVENT_WRITE_DONE) {
+		pr_debug("%s[%pK]:posted AUDIO_EVENT_WRITE_DONE to user\n",
+			__func__, audio);
+		mutex_lock(&audio->write_lock);
+		audio_aio_ion_fixup(audio, drv_evt->payload.aio_buf.buf_addr,
+		drv_evt->payload.aio_buf.buf_len, 0, 0);
+		mutex_unlock(&audio->write_lock);
+	} else if (drv_evt->event_type == AUDIO_EVENT_READ_DONE) {
+		pr_debug("%s[%pK]:posted AUDIO_EVENT_READ_DONE to user\n",
+			__func__, audio);
+		mutex_lock(&audio->read_lock);
+		audio_aio_ion_fixup(audio, drv_evt->payload.aio_buf.buf_addr,
+		drv_evt->payload.aio_buf.buf_len, 0, 0);
+		mutex_unlock(&audio->read_lock);
+	}
+
+	/* Some read buffer might be held up in DSP,release all
+	 * Once EOS indicated
+	 */
+	if (audio->eos_rsp && !list_empty(&audio->in_queue)) {
+		pr_debug("%s[%pK]:Send flush command to release read buffers"
+			" held up in DSP\n", __func__, audio);
+		mutex_lock(&audio->lock);
+		audio_aio_flush(audio);
+		mutex_unlock(&audio->lock);
+	}
+
+	return rc;
+}
+
+static long audio_aio_process_event_req(struct q6audio_aio *audio,
+					void __user *arg)
+{
+	long rc;
+	struct msm_audio_event usr_evt;
+
+	if (copy_from_user(&usr_evt, arg, sizeof(struct msm_audio_event))) {
+		pr_err("%s: copy_from_user failed\n", __func__);
+		return -EFAULT;
+	}
+
+	rc = audio_aio_process_event_req_common(audio, &usr_evt);
+
+	if (copy_to_user(arg, &usr_evt, sizeof(usr_evt))) {
+		pr_err("%s: copy_to_user failed\n", __func__);
+		rc = -EFAULT;
+	}
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+
+struct msm_audio_aio_buf32 {
+	compat_uptr_t buf_addr;
+	u32 buf_len;
+	u32 data_len;
+	compat_uptr_t private_data;
+	u16 mfield_sz; /*only useful for data has meta field */
+};
+
+struct msm_audio_bitstream_info32 {
+	u32 codec_type;
+	u32 chan_info;
+	u32 sample_rate;
+	u32 bit_stream_info;
+	u32 bit_rate;
+	u32 unused[3];
+};
+
+struct msm_audio_bitstream_error_info32 {
+	u32 dec_id;
+	u32 err_msg_indicator;
+	u32 err_type;
+};
+
+union msm_audio_event_payload32 {
+	struct msm_audio_aio_buf32 aio_buf;
+	struct msm_audio_bitstream_info32 stream_info;
+	struct msm_audio_bitstream_error_info32 error_info;
+	s32 reserved;
+};
+
+struct msm_audio_event32 {
+	s32 event_type;
+	s32 timeout_ms;
+	union msm_audio_event_payload32 event_payload;
+};
+
+static long audio_aio_process_event_req_compat(struct q6audio_aio *audio,
+					void __user *arg)
+{
+	long rc;
+	struct msm_audio_event32 usr_evt_32;
+	struct msm_audio_event usr_evt;
+	memset(&usr_evt, 0, sizeof(struct msm_audio_event));
+
+	if (copy_from_user(&usr_evt_32, arg,
+				sizeof(struct msm_audio_event32))) {
+		pr_err("%s: copy_from_user failed\n", __func__);
+		return -EFAULT;
+	}
+	usr_evt.timeout_ms = usr_evt_32.timeout_ms;
+
+	rc = audio_aio_process_event_req_common(audio, &usr_evt);
+	if (rc < 0) {
+		pr_err("%s: audio process event failed, rc = %ld",
+			__func__, rc);
+		return rc;
+	}
+
+	usr_evt_32.event_type = usr_evt.event_type;
+	switch (usr_evt_32.event_type) {
+	case AUDIO_EVENT_SUSPEND:
+	case AUDIO_EVENT_RESUME:
+	case AUDIO_EVENT_WRITE_DONE:
+	case AUDIO_EVENT_READ_DONE:
+		usr_evt_32.event_payload.aio_buf.buf_addr =
+			ptr_to_compat(usr_evt.event_payload.aio_buf.buf_addr);
+		usr_evt_32.event_payload.aio_buf.buf_len =
+			usr_evt.event_payload.aio_buf.buf_len;
+		usr_evt_32.event_payload.aio_buf.data_len =
+			usr_evt.event_payload.aio_buf.data_len;
+		usr_evt_32.event_payload.aio_buf.private_data =
+		ptr_to_compat(usr_evt.event_payload.aio_buf.private_data);
+		usr_evt_32.event_payload.aio_buf.mfield_sz =
+			usr_evt.event_payload.aio_buf.mfield_sz;
+		break;
+	case AUDIO_EVENT_STREAM_INFO:
+		usr_evt_32.event_payload.stream_info.codec_type =
+			usr_evt.event_payload.stream_info.codec_type;
+		usr_evt_32.event_payload.stream_info.chan_info =
+			usr_evt.event_payload.stream_info.chan_info;
+		usr_evt_32.event_payload.stream_info.sample_rate =
+			usr_evt.event_payload.stream_info.sample_rate;
+		usr_evt_32.event_payload.stream_info.bit_stream_info =
+			usr_evt.event_payload.stream_info.bit_stream_info;
+		usr_evt_32.event_payload.stream_info.bit_rate =
+			usr_evt.event_payload.stream_info.bit_rate;
+		break;
+	case AUDIO_EVENT_BITSTREAM_ERROR_INFO:
+		usr_evt_32.event_payload.error_info.dec_id =
+			usr_evt.event_payload.error_info.dec_id;
+		usr_evt_32.event_payload.error_info.err_msg_indicator =
+			usr_evt.event_payload.error_info.err_msg_indicator;
+		usr_evt_32.event_payload.error_info.err_type =
+			usr_evt.event_payload.error_info.err_type;
+		break;
+	default:
+		pr_debug("%s: unknown audio event type = %d rc = %ld",
+			 __func__, usr_evt_32.event_type, rc);
+		return rc;
+	}
+	if (copy_to_user(arg, &usr_evt_32, sizeof(usr_evt_32))) {
+		pr_err("%s: copy_to_user failed\n", __func__);
+		rc = -EFAULT;
+	}
+	return rc;
+}
+#endif
+
+static int audio_aio_ion_check(struct q6audio_aio *audio,
+				void *vaddr, unsigned long len)
+{
+	struct audio_aio_ion_region *region_elt;
+	struct audio_aio_ion_region t = {.vaddr = vaddr, .len = len };
+
+	list_for_each_entry(region_elt, &audio->ion_region_queue, list) {
+		if (CONTAINS(region_elt, &t) || CONTAINS(&t, region_elt) ||
+			OVERLAPS(region_elt, &t)) {
+			pr_err("%s[%pK]:region (vaddr %pK len %ld) clashes with registered region (vaddr %pK paddr %pK len %ld)\n",
+				__func__, audio, vaddr, len,
+				region_elt->vaddr,
+				&region_elt->paddr, region_elt->len);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int audio_aio_ion_add(struct q6audio_aio *audio,
+				struct msm_audio_ion_info *info)
+{
+	ion_phys_addr_t paddr = 0;
+	size_t len = 0;
+	struct audio_aio_ion_region *region;
+	int rc = -EINVAL;
+	struct ion_handle *handle = NULL;
+	unsigned long ionflag;
+	void *kvaddr = NULL;
+
+	pr_debug("%s[%pK]:\n", __func__, audio);
+	region = kmalloc(sizeof(*region), GFP_KERNEL);
+
+	if (!region) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	rc = msm_audio_ion_import_legacy("Audio_Dec_Client", audio->client,
+				&handle, info->fd, &ionflag,
+				0, &paddr, &len, &kvaddr);
+	if (rc) {
+		pr_err("%s: msm audio ion alloc failed\n", __func__);
+		goto import_error;
+	}
+
+	rc = audio_aio_ion_check(audio, info->vaddr, len);
+	if (rc < 0) {
+		pr_err("%s: audio_aio_ion_check failed\n", __func__);
+		goto ion_error;
+	}
+
+	region->handle = handle;
+	region->vaddr = info->vaddr;
+	region->fd = info->fd;
+	region->paddr = paddr;
+	region->kvaddr = kvaddr;
+	region->len = len;
+	region->ref_cnt = 0;
+	pr_debug("%s[%pK]:add region paddr %pK vaddr %pK, len %lu kvaddr %pK\n",
+		__func__, audio,
+		&region->paddr, region->vaddr, region->len,
+		region->kvaddr);
+	list_add_tail(&region->list, &audio->ion_region_queue);
+	rc = q6asm_memory_map(audio->ac,  paddr, IN, len, 1);
+	if (rc < 0) {
+		pr_err("%s[%pK]: memory map failed\n", __func__, audio);
+		goto mmap_error;
+	} else {
+		goto end;
+	}
+mmap_error:
+	list_del(&region->list);
+ion_error:
+	msm_audio_ion_free_legacy(audio->client, handle);
+import_error:
+	kfree(region);
+end:
+	return rc;
+}
+
+static int audio_aio_ion_remove(struct q6audio_aio *audio,
+				struct msm_audio_ion_info *info)
+{
+	struct audio_aio_ion_region *region;
+	struct list_head *ptr, *next;
+	int rc = -EINVAL;
+
+	pr_debug("%s[%pK]:info fd %d vaddr %pK\n",
+		__func__, audio, info->fd, info->vaddr);
+
+	list_for_each_safe(ptr, next, &audio->ion_region_queue) {
+		region = list_entry(ptr, struct audio_aio_ion_region, list);
+
+		if ((region->fd == info->fd) &&
+			(region->vaddr == info->vaddr)) {
+			if (region->ref_cnt) {
+				pr_debug("%s[%pK]:region %pK in use ref_cnt %d\n",
+					__func__, audio, region,
+					region->ref_cnt);
+				break;
+			}
+			pr_debug("%s[%pK]:remove region fd %d vaddr %pK\n",
+				__func__, audio, info->fd, info->vaddr);
+			rc = q6asm_memory_unmap(audio->ac,
+						region->paddr, IN);
+			if (rc < 0)
+				pr_err("%s[%pK]: memory unmap failed\n",
+					__func__, audio);
+
+			list_del(&region->list);
+			msm_audio_ion_free_legacy(audio->client,
+						 region->handle);
+			kfree(region);
+			rc = 0;
+			break;
+		}
+	}
+
+	return rc;
+}
+
+static int audio_aio_async_write(struct q6audio_aio *audio,
+				struct audio_aio_buffer_node *buf_node)
+{
+	int rc;
+	struct audio_client *ac;
+	struct audio_aio_write_param param;
+
+	memset(&param, 0, sizeof(param));
+
+	if (!audio || !buf_node) {
+		pr_err("%s NULL pointer audio=[0x%pK], buf_node=[0x%pK]\n",
+			__func__, audio, buf_node);
+		return -EINVAL;
+	}
+	pr_debug("%s[%pK]: Send write buff %pK phy %pK len %d meta_enable = %d\n",
+		__func__, audio, buf_node, &buf_node->paddr,
+		buf_node->buf.data_len,
+		audio->buf_cfg.meta_info_enable);
+	pr_debug("%s[%pK]: flags = 0x%x\n", __func__, audio,
+		buf_node->meta_info.meta_in.nflags);
+
+	ac = audio->ac;
+	/* Offset with  appropriate meta */
+	if (audio->feedback) {
+		/* Non Tunnel mode */
+		param.paddr = buf_node->paddr + sizeof(struct dec_meta_in);
+		param.len = buf_node->buf.data_len - sizeof(struct dec_meta_in);
+	} else {
+		/* Tunnel mode */
+		param.paddr = buf_node->paddr;
+		param.len = buf_node->buf.data_len;
+	}
+	param.msw_ts = buf_node->meta_info.meta_in.ntimestamp.highpart;
+	param.lsw_ts = buf_node->meta_info.meta_in.ntimestamp.lowpart;
+	param.flags  = buf_node->meta_info.meta_in.nflags;
+	/* If no meta_info enaled, indicate no time stamp valid */
+	if (!audio->buf_cfg.meta_info_enable)
+		param.flags = 0xFF00;
+
+	if (buf_node->meta_info.meta_in.nflags & AUDIO_DEC_EOF_SET)
+		param.flags |= AUDIO_DEC_EOF_SET;
+
+	param.uid = ac->session;
+	/* Read command will populate session id as token */
+	buf_node->token = ac->session;
+	rc = q6asm_async_write(ac, &param);
+	if (rc < 0)
+		pr_err("%s[%pK]:failed\n", __func__, audio);
+	return rc;
+}
+
+void audio_aio_post_event(struct q6audio_aio *audio, int type,
+			union msm_audio_event_payload payload)
+{
+	struct audio_aio_event *e_node = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&audio->event_queue_lock, flags);
+
+	if (!list_empty(&audio->free_event_queue)) {
+		e_node = list_first_entry(&audio->free_event_queue,
+					struct audio_aio_event, list);
+		list_del(&e_node->list);
+	} else {
+		e_node = kmalloc(sizeof(struct audio_aio_event), GFP_ATOMIC);
+		if (!e_node) {
+			spin_unlock_irqrestore(&audio->event_queue_lock, flags);
+			return;
+		}
+	}
+
+	e_node->event_type = type;
+	e_node->payload = payload;
+
+	list_add_tail(&e_node->list, &audio->event_queue);
+	spin_unlock_irqrestore(&audio->event_queue_lock, flags);
+	wake_up(&audio->event_wait);
+}
+
+static int audio_aio_async_read(struct q6audio_aio *audio,
+				struct audio_aio_buffer_node *buf_node)
+{
+	struct audio_client *ac;
+	struct audio_aio_read_param param;
+	int rc;
+
+	pr_debug("%s[%pK]: Send read buff %pK phy %pK len %d\n",
+		__func__, audio, buf_node,
+		&buf_node->paddr, buf_node->buf.buf_len);
+	ac = audio->ac;
+	/* Provide address so driver can append nr frames information */
+	param.paddr = buf_node->paddr +
+		sizeof(struct dec_meta_out);
+	param.len = buf_node->buf.buf_len -
+		sizeof(struct dec_meta_out);
+	param.uid = ac->session;
+	/* Write command will populate session_id as token */
+	buf_node->token = ac->session;
+	rc = q6asm_async_read(ac, &param);
+	if (rc < 0)
+		pr_err("%s[%pK]:failed\n", __func__, audio);
+	return rc;
+}
+
+static int audio_aio_buf_add_shared(struct q6audio_aio *audio, u32 dir,
+				struct audio_aio_buffer_node *buf_node)
+{
+	unsigned long flags;
+	int ret = 0;
+	pr_debug("%s[%pK]:node %pK dir %x buf_addr %pK buf_len %d data_len %d\n",
+		 __func__, audio, buf_node, dir, buf_node->buf.buf_addr,
+		buf_node->buf.buf_len, buf_node->buf.data_len);
+	buf_node->paddr = audio_aio_ion_fixup(audio, buf_node->buf.buf_addr,
+						buf_node->buf.buf_len, 1,
+						&buf_node->kvaddr);
+	if (dir) {
+		/* write */
+		if (!buf_node->paddr ||
+			(buf_node->paddr & 0x1) ||
+			(!audio->feedback && !buf_node->buf.data_len)) {
+			kfree(buf_node);
+			return -EINVAL;
+		}
+		extract_meta_out_info(audio, buf_node, 1);
+		/* Not a EOS buffer */
+		if (!(buf_node->meta_info.meta_in.nflags & AUDIO_DEC_EOS_SET)) {
+			spin_lock_irqsave(&audio->dsp_lock, flags);
+			ret = audio_aio_async_write(audio, buf_node);
+			/* EOS buffer handled in driver */
+			list_add_tail(&buf_node->list, &audio->out_queue);
+			spin_unlock_irqrestore(&audio->dsp_lock, flags);
+		} else if (buf_node->meta_info.meta_in.nflags
+				   & AUDIO_DEC_EOS_SET) {
+			if (!audio->wflush) {
+				pr_debug("%s[%pK]:Send EOS cmd at i/p\n",
+					__func__, audio);
+				/* Driver will forcefully post writedone event
+				 * once eos ack recived from DSP
+				 */
+				audio->eos_write_payload.aio_buf =\
+						buf_node->buf;
+				audio->eos_flag = 1;
+				audio->eos_rsp = 0;
+				q6asm_cmd(audio->ac, CMD_EOS);
+				kfree(buf_node);
+			} else { /* Flush in progress, send back i/p
+				  * EOS buffer as is
+				  */
+				union msm_audio_event_payload event_payload;
+				event_payload.aio_buf = buf_node->buf;
+				audio_aio_post_event(audio,
+						AUDIO_EVENT_WRITE_DONE,
+						event_payload);
+				kfree(buf_node);
+			}
+		}
+	} else {
+		/* read */
+		if (!buf_node->paddr ||
+			(buf_node->paddr & 0x1) ||
+			(buf_node->buf.buf_len < PCM_BUFSZ_MIN)) {
+			kfree(buf_node);
+			return -EINVAL;
+		}
+		/* No EOS reached */
+		if (!audio->eos_rsp) {
+			spin_lock_irqsave(&audio->dsp_lock, flags);
+			ret = audio_aio_async_read(audio, buf_node);
+			/* EOS buffer handled in driver */
+			list_add_tail(&buf_node->list, &audio->in_queue);
+			spin_unlock_irqrestore(&audio->dsp_lock, flags);
+		}
+		/* EOS reached at input side fake all upcoming read buffer to
+		 * indicate the same
+		 */
+		else {
+			union msm_audio_event_payload event_payload;
+			event_payload.aio_buf = buf_node->buf;
+			event_payload.aio_buf.data_len =
+				insert_eos_buf(audio, buf_node);
+			pr_debug("%s[%pK]: propagate READ_DONE as EOS done\n",
+				__func__, audio);
+			audio_aio_post_event(audio, AUDIO_EVENT_READ_DONE,
+					event_payload);
+			kfree(buf_node);
+		}
+	}
+	return ret;
+}
+#ifdef CONFIG_COMPAT
+static int audio_aio_buf_add_compat(struct q6audio_aio *audio, u32 dir,
+				void __user *arg)
+{
+	struct audio_aio_buffer_node *buf_node;
+	struct msm_audio_aio_buf32 aio_buf_32;
+
+	buf_node = kzalloc(sizeof(*buf_node), GFP_KERNEL);
+
+	if (!buf_node) {
+		pr_err("%s: Buffer node alloc failed\n", __func__);
+		return -ENOMEM;
+	}
+
+
+	if (copy_from_user(&aio_buf_32, arg, sizeof(aio_buf_32))) {
+		kfree(buf_node);
+		pr_err("%s: copy_from_user failed\n", __func__);
+		return -EFAULT;
+	}
+
+	buf_node->buf.buf_addr = compat_ptr(aio_buf_32.buf_addr);
+	buf_node->buf.buf_len = aio_buf_32.buf_len;
+	buf_node->buf.data_len = aio_buf_32.data_len;
+	buf_node->buf.private_data = compat_ptr(aio_buf_32.private_data);
+	buf_node->buf.mfield_sz = aio_buf_32.mfield_sz;
+
+	return audio_aio_buf_add_shared(audio, dir, buf_node);
+}
+#endif
+
+static int audio_aio_buf_add(struct q6audio_aio *audio, u32 dir,
+				void __user *arg)
+{
+	struct audio_aio_buffer_node *buf_node;
+
+	buf_node = kzalloc(sizeof(*buf_node), GFP_KERNEL);
+
+	if (!buf_node) {
+		pr_err("%s: Buffer node alloc failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	if (copy_from_user(&buf_node->buf, arg, sizeof(buf_node->buf))) {
+		kfree(buf_node);
+		pr_err("%s: copy_from_user failed\n", __func__);
+		return -EFAULT;
+	}
+
+	return audio_aio_buf_add_shared(audio, dir, buf_node);
+}
+
+void audio_aio_ioport_reset(struct q6audio_aio *audio)
+{
+	if (audio->drv_status & ADRV_STATUS_AIO_INTF) {
+		/* If fsync is in progress, make sure
+		 * return value of fsync indicates
+		 * abort due to flush
+		 */
+		if (audio->drv_status & ADRV_STATUS_FSYNC) {
+			pr_debug("%s[%pK]:fsync in progress\n",
+				 __func__, audio);
+			audio->drv_ops.out_flush(audio);
+		} else
+			audio->drv_ops.out_flush(audio);
+		if (audio->feedback == NON_TUNNEL_MODE)
+			audio->drv_ops.in_flush(audio);
+	}
+}
+
+int audio_aio_open(struct q6audio_aio *audio, struct file *file)
+{
+	int rc = 0;
+	int i;
+	struct audio_aio_event *e_node = NULL;
+	struct list_head *ptr, *next;
+
+	/* Settings will be re-config at AUDIO_SET_CONFIG,
+	 * but at least we need to have initial config
+	 */
+	audio->str_cfg.buffer_size = FRAME_SIZE;
+	audio->str_cfg.buffer_count = FRAME_NUM;
+	audio->pcm_cfg.buffer_count = PCM_BUF_COUNT;
+	audio->pcm_cfg.sample_rate = 48000;
+	audio->pcm_cfg.channel_count = 2;
+
+	/* Only AIO interface */
+	if (file->f_flags & O_NONBLOCK) {
+		pr_debug("%s[%pK]:set to aio interface\n", __func__, audio);
+		audio->drv_status |= ADRV_STATUS_AIO_INTF;
+		audio->drv_ops.out_flush = audio_aio_async_out_flush;
+		audio->drv_ops.in_flush = audio_aio_async_in_flush;
+		q6asm_set_io_mode(audio->ac, ASYNC_IO_MODE);
+	} else {
+		pr_err("%s[%pK]:SIO interface not supported\n",
+			__func__, audio);
+		rc = -EACCES;
+		goto fail;
+	}
+
+	/* Initialize all locks of audio instance */
+	mutex_init(&audio->lock);
+	mutex_init(&audio->read_lock);
+	mutex_init(&audio->write_lock);
+	mutex_init(&audio->get_event_lock);
+	spin_lock_init(&audio->dsp_lock);
+	spin_lock_init(&audio->event_queue_lock);
+	init_waitqueue_head(&audio->cmd_wait);
+	init_waitqueue_head(&audio->write_wait);
+	init_waitqueue_head(&audio->event_wait);
+	INIT_LIST_HEAD(&audio->out_queue);
+	INIT_LIST_HEAD(&audio->in_queue);
+	INIT_LIST_HEAD(&audio->ion_region_queue);
+	INIT_LIST_HEAD(&audio->free_event_queue);
+	INIT_LIST_HEAD(&audio->event_queue);
+
+	audio->drv_ops.out_flush(audio);
+	audio->opened = 1;
+	audio->reset_event = false;
+	file->private_data = audio;
+	audio->codec_ioctl = audio_aio_ioctl;
+	audio->codec_compat_ioctl = audio_aio_compat_ioctl;
+	for (i = 0; i < AUDIO_EVENT_NUM; i++) {
+		e_node = kmalloc(sizeof(struct audio_aio_event), GFP_KERNEL);
+		if (e_node)
+			list_add_tail(&e_node->list, &audio->free_event_queue);
+		else {
+			pr_err("%s[%pK]:event pkt alloc failed\n",
+				__func__, audio);
+			rc = -ENOMEM;
+			goto cleanup;
+		}
+	}
+	audio->client = msm_audio_ion_client_create("Audio_Dec_Client");
+	if (IS_ERR_OR_NULL(audio->client)) {
+		pr_err("Unable to create ION client\n");
+		rc = -ENOMEM;
+		goto cleanup;
+	}
+	pr_debug("Ion client create in audio_aio_open %pK", audio->client);
+
+	rc = register_volume_listener(audio);
+	if (rc < 0)
+		goto ion_cleanup;
+
+	return 0;
+ion_cleanup:
+	msm_audio_ion_client_destroy(audio->client);
+	audio->client = NULL;
+cleanup:
+	list_for_each_safe(ptr, next, &audio->free_event_queue) {
+		e_node = list_first_entry(&audio->free_event_queue,
+				   struct audio_aio_event, list);
+		list_del(&e_node->list);
+		kfree(e_node);
+	}
+fail:
+	return rc;
+}
+
+static long audio_aio_shared_ioctl(struct file *file, unsigned int cmd,
+			unsigned long arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_ABORT_GET_EVENT: {
+		audio->event_abort = 1;
+		wake_up(&audio->event_wait);
+		break;
+	}
+	case AUDIO_OUTPORT_FLUSH: {
+		pr_debug("%s[%pK]:AUDIO_OUTPORT_FLUSH\n", __func__, audio);
+		mutex_lock(&audio->read_lock);
+		rc = audio_aio_outport_flush(audio);
+		if (rc < 0) {
+			pr_err("%s[%pK]: AUDIO_OUTPORT_FLUSH failed\n",
+				__func__, audio);
+			rc = -EINTR;
+		}
+		mutex_unlock(&audio->read_lock);
+		break;
+	}
+	case AUDIO_STOP: {
+		pr_debug("%s[%pK]: AUDIO_STOP session_id[%d]\n", __func__,
+				audio, audio->ac->session);
+		mutex_lock(&audio->lock);
+		audio->stopped = 1;
+		rc = audio_aio_flush(audio);
+		if (rc < 0) {
+			pr_err("%s[%pK]:Audio Stop procedure failed rc=%d\n",
+				__func__, audio, rc);
+			mutex_unlock(&audio->lock);
+			break;
+		}
+		audio->enabled = 0;
+		audio->drv_status &= ~ADRV_STATUS_PAUSE;
+		if (audio->drv_status & ADRV_STATUS_FSYNC) {
+			pr_debug("%s[%pK] Waking up the audio_aio_fsync\n",
+					__func__, audio);
+			wake_up(&audio->write_wait);
+		}
+		mutex_unlock(&audio->lock);
+		break;
+	}
+	case AUDIO_PAUSE: {
+		pr_debug("%s[%pK]:AUDIO_PAUSE %ld\n", __func__, audio, arg);
+		mutex_lock(&audio->lock);
+		if (arg == 1) {
+			rc = audio_aio_pause(audio);
+			if (rc < 0) {
+				pr_err("%s[%pK]: pause FAILED rc=%d\n",
+					__func__, audio, rc);
+				mutex_unlock(&audio->lock);
+				break;
+			}
+			audio->drv_status |= ADRV_STATUS_PAUSE;
+		} else if (arg == 0) {
+			if (audio->drv_status & ADRV_STATUS_PAUSE) {
+				rc = audio_aio_enable(audio);
+				if (rc)
+					pr_err("%s[%pK]: audio enable failed\n",
+					__func__, audio);
+				else {
+					audio->drv_status &= ~ADRV_STATUS_PAUSE;
+					audio->enabled = 1;
+				}
+			}
+		}
+		mutex_unlock(&audio->lock);
+		break;
+	}
+	case AUDIO_FLUSH: {
+		pr_debug("%s[%pK]: AUDIO_FLUSH sessionid[%d]\n", __func__,
+			audio, audio->ac->session);
+		mutex_lock(&audio->lock);
+		audio->rflush = 1;
+		audio->wflush = 1;
+		if (audio->drv_status & ADRV_STATUS_FSYNC) {
+			pr_debug("%s[%pK] Waking up the audio_aio_fsync\n",
+				__func__, audio);
+			wake_up(&audio->write_wait);
+		}
+		/* Flush DSP */
+		rc = audio_aio_flush(audio);
+		/* Flush input / Output buffer in software*/
+		audio_aio_ioport_reset(audio);
+		if (rc < 0) {
+			pr_err("%s[%pK]:AUDIO_FLUSH interrupted\n",
+				__func__, audio);
+			rc = -EINTR;
+		} else {
+			audio->rflush = 0;
+			if (audio->drv_status & ADRV_STATUS_FSYNC)
+				wake_up(&audio->write_wait);
+			else
+				audio->wflush = 0;
+
+		}
+		audio->eos_flag = 0;
+		audio->eos_rsp = 0;
+		mutex_unlock(&audio->lock);
+		break;
+	}
+	case AUDIO_GET_SESSION_ID: {
+		mutex_lock(&audio->lock);
+		if (copy_to_user((void *)arg, &audio->ac->session,
+			sizeof(u16))) {
+			pr_err("%s: copy_to_user for AUDIO_GET_SESSION_ID failed\n",
+				__func__);
+			rc = -EFAULT;
+		}
+		mutex_unlock(&audio->lock);
+		break;
+	}
+	case AUDIO_PM_AWAKE: {
+		if ((audio->audio_ws_mgr ==  NULL) ||
+				(audio->miscdevice == NULL)) {
+			pr_err("%s[%pK]: invalid ws_mgr or miscdevice",
+					__func__, audio);
+			rc = -EACCES;
+			break;
+		}
+		pr_debug("%s[%pK]:AUDIO_PM_AWAKE\n", __func__, audio);
+		mutex_lock(&audio->lock);
+		if (!audio->wakelock_voted) {
+			audio->wakelock_voted = true;
+			mutex_lock(&audio->audio_ws_mgr->ws_lock);
+			if (audio->audio_ws_mgr->ref_cnt++ == 0)
+				pm_stay_awake(audio->miscdevice->this_device);
+			mutex_unlock(&audio->audio_ws_mgr->ws_lock);
+		}
+		mutex_unlock(&audio->lock);
+		break;
+	}
+	case AUDIO_PM_RELAX: {
+		if ((audio->audio_ws_mgr ==  NULL) ||
+				(audio->miscdevice == NULL)) {
+			pr_err("%s[%pK]: invalid ws_mgr or miscdevice",
+					__func__, audio);
+			rc = -EACCES;
+			break;
+		}
+		pr_debug("%s[%pK]:AUDIO_PM_RELAX\n", __func__, audio);
+		mutex_lock(&audio->lock);
+		if (audio->wakelock_voted) {
+			audio->wakelock_voted = false;
+			mutex_lock(&audio->audio_ws_mgr->ws_lock);
+			if ((audio->audio_ws_mgr->ref_cnt > 0) &&
+					(--audio->audio_ws_mgr->ref_cnt == 0)) {
+				pm_relax(audio->miscdevice->this_device);
+			}
+			mutex_unlock(&audio->audio_ws_mgr->ws_lock);
+		}
+		mutex_unlock(&audio->lock);
+		break;
+	}
+	default:
+		pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd);
+		rc =  -EINVAL;
+	}
+	return rc;
+
+
+}
+
+static long audio_aio_ioctl(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_ABORT_GET_EVENT:
+	case AUDIO_OUTPORT_FLUSH:
+	case AUDIO_STOP:
+	case AUDIO_PAUSE:
+	case AUDIO_FLUSH:
+	case AUDIO_GET_SESSION_ID:
+	case AUDIO_PM_AWAKE:
+	case AUDIO_PM_RELAX:
+		rc = audio_aio_shared_ioctl(file, cmd, arg);
+		break;
+	case AUDIO_GET_STATS: {
+		struct msm_audio_stats stats;
+		uint64_t timestamp;
+		memset(&stats, 0, sizeof(struct msm_audio_stats));
+		stats.byte_count = atomic_read(&audio->in_bytes);
+		stats.sample_count = atomic_read(&audio->in_samples);
+		rc = q6asm_get_session_time(audio->ac, &timestamp);
+		if (rc >= 0)
+			memcpy(&stats.unused[0], &timestamp, sizeof(timestamp));
+		else
+			pr_debug("Error while getting timestamp\n");
+		if (copy_to_user((void *)arg, &stats, sizeof(stats))) {
+			pr_err("%s: copy_frm_user for AUDIO_GET_STATS failed\n",
+				__func__);
+			rc = -EFAULT;
+		}
+		break;
+	}
+	case AUDIO_GET_EVENT: {
+		pr_debug("%s[%pK]:AUDIO_GET_EVENT\n", __func__, audio);
+		if (mutex_trylock(&audio->get_event_lock)) {
+			rc = audio_aio_process_event_req(audio,
+						(void __user *)arg);
+			mutex_unlock(&audio->get_event_lock);
+		} else
+			rc = -EBUSY;
+		break;
+	}
+	case AUDIO_ASYNC_WRITE: {
+		mutex_lock(&audio->write_lock);
+		if (audio->drv_status & ADRV_STATUS_FSYNC)
+			rc = -EBUSY;
+		else {
+			if (audio->enabled)
+				rc = audio_aio_buf_add(audio, 1,
+						(void __user *)arg);
+			else
+				rc = -EPERM;
+		}
+		mutex_unlock(&audio->write_lock);
+		break;
+	}
+	case AUDIO_ASYNC_READ: {
+		mutex_lock(&audio->read_lock);
+		if (audio->feedback)
+			rc = audio_aio_buf_add(audio, 0,
+					(void __user *)arg);
+		else
+			rc = -EPERM;
+		mutex_unlock(&audio->read_lock);
+		break;
+	}
+
+	case AUDIO_GET_STREAM_CONFIG: {
+		struct msm_audio_stream_config cfg;
+		mutex_lock(&audio->lock);
+		memset(&cfg, 0, sizeof(cfg));
+		cfg.buffer_size = audio->str_cfg.buffer_size;
+		cfg.buffer_count = audio->str_cfg.buffer_count;
+		pr_debug("%s[%pK]:GET STREAM CFG %d %d\n",
+			__func__, audio, cfg.buffer_size, cfg.buffer_count);
+		if (copy_to_user((void *)arg, &cfg, sizeof(cfg))) {
+			pr_err(
+				"%s: copy_to_user for AUDIO_GET_STREAM_CONFIG failed\n",
+				__func__);
+			rc = -EFAULT;
+		}
+		mutex_unlock(&audio->lock);
+		break;
+	}
+	case AUDIO_SET_STREAM_CONFIG: {
+		struct msm_audio_stream_config cfg;
+		pr_debug("%s[%pK]:SET STREAM CONFIG\n", __func__, audio);
+		mutex_lock(&audio->lock);
+		if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) {
+			pr_err(
+				"%s: copy_from_user for AUDIO_SET_STREAM_CONFIG failed\n",
+				__func__);
+			rc = -EFAULT;
+			mutex_unlock(&audio->lock);
+			break;
+		}
+		audio->str_cfg.buffer_size = FRAME_SIZE;
+		audio->str_cfg.buffer_count = FRAME_NUM;
+		rc = 0;
+		mutex_unlock(&audio->lock);
+		break;
+	}
+	case AUDIO_GET_CONFIG: {
+		struct msm_audio_config cfg;
+		mutex_lock(&audio->lock);
+		if (copy_to_user((void *)arg, &audio->pcm_cfg, sizeof(cfg))) {
+			pr_err(
+				"%s: copy_to_user for AUDIO_GET_CONFIG failed\n",
+				__func__);
+			rc = -EFAULT;
+		}
+		mutex_unlock(&audio->lock);
+		break;
+	}
+	case AUDIO_SET_CONFIG: {
+		struct msm_audio_config config;
+		pr_err("%s[%pK]:AUDIO_SET_CONFIG\n", __func__, audio);
+		mutex_lock(&audio->lock);
+		if (copy_from_user(&config, (void *)arg, sizeof(config))) {
+			pr_err(
+				"%s: copy_from_user for AUDIO_SET_CONFIG failed\n",
+				__func__);
+			rc = -EFAULT;
+			mutex_unlock(&audio->lock);
+			break;
+		}
+		if (audio->feedback != NON_TUNNEL_MODE) {
+			pr_err("%s[%pK]:Not sufficient permission to change the playback mode\n",
+				 __func__, audio);
+			rc = -EACCES;
+			mutex_unlock(&audio->lock);
+			break;
+		}
+		if ((config.buffer_count > PCM_BUF_COUNT) ||
+			(config.buffer_count == 1))
+			config.buffer_count = PCM_BUF_COUNT;
+
+		if (config.buffer_size < PCM_BUFSZ_MIN)
+			config.buffer_size = PCM_BUFSZ_MIN;
+
+		audio->pcm_cfg.buffer_count = config.buffer_count;
+		audio->pcm_cfg.buffer_size = config.buffer_size;
+		audio->pcm_cfg.channel_count = config.channel_count;
+		audio->pcm_cfg.sample_rate = config.sample_rate;
+		rc = 0;
+		mutex_unlock(&audio->lock);
+		break;
+	}
+	case AUDIO_SET_BUF_CFG: {
+		struct msm_audio_buf_cfg  cfg;
+		mutex_lock(&audio->lock);
+		if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) {
+			pr_err(
+				"%s: copy_from_user for AUDIO_GET_BUF CONFIG failed\n",
+				__func__);
+			rc = -EFAULT;
+			mutex_unlock(&audio->lock);
+			break;
+		}
+		if ((audio->feedback == NON_TUNNEL_MODE) &&
+			!cfg.meta_info_enable) {
+			rc = -EFAULT;
+			mutex_unlock(&audio->lock);
+			break;
+		}
+
+		audio->buf_cfg.meta_info_enable = cfg.meta_info_enable;
+		pr_debug("%s[%pK]:session id %d: Set-buf-cfg: meta[%d]",
+				__func__, audio,
+				audio->ac->session, cfg.meta_info_enable);
+		mutex_unlock(&audio->lock);
+		break;
+	}
+	case AUDIO_GET_BUF_CFG: {
+		pr_debug("%s[%pK]:session id %d: Get-buf-cfg: meta[%d] framesperbuf[%d]\n",
+			 __func__, audio,
+			audio->ac->session, audio->buf_cfg.meta_info_enable,
+			audio->buf_cfg.frames_per_buf);
+
+		mutex_lock(&audio->lock);
+		if (copy_to_user((void *)arg, &audio->buf_cfg,
+			sizeof(struct msm_audio_buf_cfg))) {
+			pr_err(
+				"%s: copy_to_user for AUDIO_GET_BUF_CONFIG failed\n",
+				__func__);
+			rc = -EFAULT;
+		}
+		mutex_unlock(&audio->lock);
+		break;
+	}
+	case AUDIO_REGISTER_ION: {
+		struct msm_audio_ion_info info;
+		pr_debug("%s[%pK]:AUDIO_REGISTER_ION\n", __func__, audio);
+		mutex_lock(&audio->lock);
+		if (copy_from_user(&info, (void *)arg, sizeof(info))) {
+			pr_err(
+				"%s: copy_from_user for AUDIO_REGISTER_ION failed\n",
+				__func__);
+			rc = -EFAULT;
+		} else {
+			mutex_lock(&audio->read_lock);
+			mutex_lock(&audio->write_lock);
+			rc = audio_aio_ion_add(audio, &info);
+			mutex_unlock(&audio->write_lock);
+			mutex_unlock(&audio->read_lock);
+		}
+		mutex_unlock(&audio->lock);
+		break;
+	}
+	case AUDIO_DEREGISTER_ION: {
+		struct msm_audio_ion_info info;
+		mutex_lock(&audio->lock);
+		pr_debug("%s[%pK]:AUDIO_DEREGISTER_ION\n", __func__, audio);
+		if (copy_from_user(&info, (void *)arg, sizeof(info))) {
+			pr_err(
+				"%s: copy_from_user for AUDIO_DEREGISTER_ION failed\n",
+				__func__);
+			rc = -EFAULT;
+		} else {
+			mutex_lock(&audio->read_lock);
+			mutex_lock(&audio->write_lock);
+			rc = audio_aio_ion_remove(audio, &info);
+			mutex_unlock(&audio->write_lock);
+			mutex_unlock(&audio->read_lock);
+		}
+		mutex_unlock(&audio->lock);
+		break;
+	}
+	default:
+		pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd);
+		rc =  -EINVAL;
+	}
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+struct msm_audio_stream_config32 {
+	u32 buffer_size;
+	u32 buffer_count;
+};
+
+struct msm_audio_stats32 {
+	u32 byte_count;
+	u32 sample_count;
+	u32 unused[2];
+};
+
+struct msm_audio_config32 {
+	u32 buffer_size;
+	u32 buffer_count;
+	u32 channel_count;
+	u32 sample_rate;
+	u32 type;
+	u32 meta_field;
+	u32 bits;
+	u32 unused[3];
+};
+
+struct msm_audio_buf_cfg32 {
+	u32 meta_info_enable;
+	u32 frames_per_buf;
+};
+
+struct msm_audio_ion_info32 {
+	int fd;
+	compat_uptr_t vaddr;
+};
+
+enum {
+	AUDIO_GET_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC, 3,
+			struct msm_audio_config32),
+	AUDIO_SET_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC, 4,
+			struct msm_audio_config32),
+	AUDIO_GET_STATS_32 = _IOR(AUDIO_IOCTL_MAGIC, 5,
+			struct msm_audio_stats32),
+	AUDIO_GET_EVENT_32 = _IOR(AUDIO_IOCTL_MAGIC, 13,
+			struct msm_audio_event32),
+	AUDIO_ASYNC_WRITE_32 = _IOW(AUDIO_IOCTL_MAGIC, 17,
+			struct msm_audio_aio_buf32),
+	AUDIO_ASYNC_READ_32 = _IOW(AUDIO_IOCTL_MAGIC, 18,
+			struct msm_audio_aio_buf32),
+	AUDIO_SET_STREAM_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC, 80,
+			struct msm_audio_stream_config32),
+	AUDIO_GET_STREAM_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC, 81,
+			struct msm_audio_stream_config32),
+	AUDIO_GET_BUF_CFG_32 = _IOW(AUDIO_IOCTL_MAGIC, 93,
+			struct msm_audio_buf_cfg32),
+	AUDIO_SET_BUF_CFG_32 = _IOW(AUDIO_IOCTL_MAGIC, 94,
+			struct msm_audio_buf_cfg32),
+	AUDIO_REGISTER_ION_32 = _IOW(AUDIO_IOCTL_MAGIC, 97,
+			struct msm_audio_ion_info32),
+	AUDIO_DEREGISTER_ION_32 = _IOW(AUDIO_IOCTL_MAGIC, 98,
+			struct msm_audio_ion_info32),
+};
+
+static long audio_aio_compat_ioctl(struct file *file, unsigned int cmd,
+			unsigned long arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_ABORT_GET_EVENT:
+	case AUDIO_OUTPORT_FLUSH:
+	case AUDIO_STOP:
+	case AUDIO_PAUSE:
+	case AUDIO_FLUSH:
+	case AUDIO_GET_SESSION_ID:
+	case AUDIO_PM_AWAKE:
+	case AUDIO_PM_RELAX:
+		rc = audio_aio_shared_ioctl(file, cmd, arg);
+		break;
+	case AUDIO_GET_STATS_32: {
+		struct msm_audio_stats32 stats;
+		uint64_t timestamp;
+		memset(&stats, 0, sizeof(struct msm_audio_stats32));
+		stats.byte_count = atomic_read(&audio->in_bytes);
+		stats.sample_count = atomic_read(&audio->in_samples);
+		rc = q6asm_get_session_time(audio->ac, &timestamp);
+		if (rc >= 0)
+			memcpy(&stats.unused[0], &timestamp, sizeof(timestamp));
+		else
+			pr_debug("Error while getting timestamp\n");
+		if (copy_to_user((void *)arg, &stats, sizeof(stats))) {
+			pr_err(
+				"%s: copy_to_user for AUDIO_GET_STATS_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+		}
+		break;
+	}
+	case AUDIO_GET_EVENT_32: {
+		pr_debug("%s[%pK]:AUDIO_GET_EVENT\n", __func__, audio);
+		if (mutex_trylock(&audio->get_event_lock)) {
+			rc = audio_aio_process_event_req_compat(audio,
+						(void __user *)arg);
+			mutex_unlock(&audio->get_event_lock);
+		} else
+			rc = -EBUSY;
+		break;
+	}
+	case AUDIO_ASYNC_WRITE_32: {
+		mutex_lock(&audio->write_lock);
+		if (audio->drv_status & ADRV_STATUS_FSYNC)
+			rc = -EBUSY;
+		else {
+			if (audio->enabled)
+				rc = audio_aio_buf_add_compat(audio, 1,
+						(void __user *)arg);
+			else
+				rc = -EPERM;
+		}
+		mutex_unlock(&audio->write_lock);
+		break;
+	}
+	case AUDIO_ASYNC_READ_32: {
+		mutex_lock(&audio->read_lock);
+		if (audio->feedback)
+			rc = audio_aio_buf_add_compat(audio, 0,
+					(void __user *)arg);
+		else
+			rc = -EPERM;
+		mutex_unlock(&audio->read_lock);
+		break;
+	}
+
+	case AUDIO_GET_STREAM_CONFIG_32: {
+		struct msm_audio_stream_config32 cfg;
+		mutex_lock(&audio->lock);
+		memset(&cfg, 0, sizeof(cfg));
+		cfg.buffer_size = audio->str_cfg.buffer_size;
+		cfg.buffer_count = audio->str_cfg.buffer_count;
+		pr_debug("%s[%pK]:GET STREAM CFG %d %d\n",
+			__func__, audio, cfg.buffer_size, cfg.buffer_count);
+		if (copy_to_user((void *)arg, &cfg, sizeof(cfg))) {
+			pr_err("%s: copy_to_user for AUDIO_GET_STREAM_CONFIG_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+		}
+		mutex_unlock(&audio->lock);
+		break;
+	}
+	case AUDIO_SET_STREAM_CONFIG_32: {
+		struct msm_audio_stream_config32 cfg_32;
+		struct msm_audio_stream_config cfg;
+		pr_debug("%s[%pK]:SET STREAM CONFIG\n", __func__, audio);
+		mutex_lock(&audio->lock);
+		if (copy_from_user(&cfg_32, (void *)arg, sizeof(cfg_32))) {
+			pr_err("%s: copy_from_user for AUDIO_SET_STREAM_CONFIG_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+			mutex_unlock(&audio->lock);
+			break;
+		}
+		cfg.buffer_size = cfg_32.buffer_size;
+		cfg.buffer_count = cfg_32.buffer_count;
+
+		audio->str_cfg.buffer_size = FRAME_SIZE;
+		audio->str_cfg.buffer_count = FRAME_NUM;
+		rc = 0;
+		mutex_unlock(&audio->lock);
+		break;
+	}
+	case AUDIO_GET_CONFIG_32: {
+		struct msm_audio_config32 cfg_32;
+		mutex_lock(&audio->lock);
+		memset(&cfg_32, 0, sizeof(cfg_32));
+		cfg_32.buffer_size = audio->pcm_cfg.buffer_size;
+		cfg_32.buffer_count = audio->pcm_cfg.buffer_count;
+		cfg_32.channel_count = audio->pcm_cfg.channel_count;
+		cfg_32.sample_rate = audio->pcm_cfg.sample_rate;
+		cfg_32.type = audio->pcm_cfg.type;
+		cfg_32.meta_field = audio->pcm_cfg.meta_field;
+		cfg_32.bits = audio->pcm_cfg.bits;
+
+		if (copy_to_user((void *)arg, &cfg_32, sizeof(cfg_32))) {
+			pr_err("%s: copy_to_user for AUDIO_GET_CONFIG_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+		}
+		mutex_unlock(&audio->lock);
+		break;
+	}
+	case AUDIO_SET_CONFIG_32: {
+		struct msm_audio_config config;
+		struct msm_audio_config32 config_32;
+		mutex_lock(&audio->lock);
+
+		if (audio->feedback != NON_TUNNEL_MODE) {
+			pr_err("%s[%pK]:Not sufficient permission to change the playback mode\n",
+				 __func__, audio);
+			rc = -EACCES;
+			mutex_unlock(&audio->lock);
+			break;
+		}
+		pr_err("%s[%pK]:AUDIO_SET_CONFIG\n", __func__, audio);
+		if (copy_from_user(&config_32, (void *)arg,
+					sizeof(config_32))) {
+			pr_err("%s: copy_from_user for AUDIO_SET_CONFIG_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+			mutex_unlock(&audio->lock);
+			break;
+		}
+		config.buffer_size = config_32.buffer_size;
+		config.buffer_count = config_32.buffer_count;
+		config.channel_count = config_32.channel_count;
+		config.sample_rate = config_32.sample_rate;
+		config.type = config_32.type;
+		config.meta_field = config_32.meta_field;
+		config.bits = config_32.bits;
+
+		if ((config.buffer_count > PCM_BUF_COUNT) ||
+			(config.buffer_count == 1))
+			config.buffer_count = PCM_BUF_COUNT;
+
+		if (config.buffer_size < PCM_BUFSZ_MIN)
+			config.buffer_size = PCM_BUFSZ_MIN;
+
+		audio->pcm_cfg.buffer_count = config.buffer_count;
+		audio->pcm_cfg.buffer_size = config.buffer_size;
+		audio->pcm_cfg.channel_count = config.channel_count;
+		audio->pcm_cfg.sample_rate = config.sample_rate;
+		rc = 0;
+		mutex_unlock(&audio->lock);
+		break;
+	}
+	case AUDIO_SET_BUF_CFG_32: {
+		struct msm_audio_buf_cfg cfg;
+		struct msm_audio_buf_cfg32 cfg_32;
+		mutex_lock(&audio->lock);
+		if (copy_from_user(&cfg_32, (void *)arg, sizeof(cfg_32))) {
+			pr_err("%s: copy_from_user for AUDIO_SET_CONFIG_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+			mutex_unlock(&audio->lock);
+			break;
+		}
+		cfg.meta_info_enable = cfg_32.meta_info_enable;
+		cfg.frames_per_buf = cfg_32.frames_per_buf;
+
+		if ((audio->feedback == NON_TUNNEL_MODE) &&
+			!cfg.meta_info_enable) {
+			rc = -EFAULT;
+			mutex_unlock(&audio->lock);
+			break;
+		}
+
+		audio->buf_cfg.meta_info_enable = cfg.meta_info_enable;
+		pr_debug("%s[%pK]:session id %d: Set-buf-cfg: meta[%d]",
+				__func__, audio,
+				audio->ac->session, cfg.meta_info_enable);
+		mutex_unlock(&audio->lock);
+		break;
+	}
+	case AUDIO_GET_BUF_CFG_32: {
+		struct msm_audio_buf_cfg32 cfg_32;
+		pr_debug("%s[%pK]:session id %d: Get-buf-cfg: meta[%d] framesperbuf[%d]\n",
+			 __func__, audio,
+			audio->ac->session, audio->buf_cfg.meta_info_enable,
+			audio->buf_cfg.frames_per_buf);
+
+		mutex_lock(&audio->lock);
+		memset(&cfg_32, 0, sizeof(cfg_32));
+		cfg_32.meta_info_enable = audio->buf_cfg.meta_info_enable;
+		cfg_32.frames_per_buf = audio->buf_cfg.frames_per_buf;
+		if (copy_to_user((void *)arg, &cfg_32,
+			sizeof(struct msm_audio_buf_cfg32))) {
+			pr_err("%s: copy_to_user for AUDIO_GET_BUF_CFG_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+		}
+		mutex_unlock(&audio->lock);
+		break;
+	}
+	case AUDIO_REGISTER_ION_32: {
+		struct msm_audio_ion_info32 info_32;
+		struct msm_audio_ion_info info;
+		pr_debug("%s[%pK]:AUDIO_REGISTER_ION\n", __func__, audio);
+		mutex_lock(&audio->lock);
+		if (copy_from_user(&info_32, (void *)arg, sizeof(info_32))) {
+			pr_err("%s: copy_from_user for AUDIO_REGISTER_ION_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+		} else {
+			info.fd = info_32.fd;
+			info.vaddr = compat_ptr(info_32.vaddr);
+			mutex_lock(&audio->read_lock);
+			mutex_lock(&audio->write_lock);
+			rc = audio_aio_ion_add(audio, &info);
+			mutex_unlock(&audio->write_lock);
+			mutex_unlock(&audio->read_lock);
+		}
+		mutex_unlock(&audio->lock);
+		break;
+	}
+	case AUDIO_DEREGISTER_ION_32: {
+		struct msm_audio_ion_info32 info_32;
+		struct msm_audio_ion_info info;
+		mutex_lock(&audio->lock);
+		pr_debug("%s[%pK]:AUDIO_DEREGISTER_ION\n", __func__, audio);
+		if (copy_from_user(&info_32, (void *)arg, sizeof(info_32))) {
+			pr_err("%s: copy_from_user for	AUDIO_DEREGISTER_ION_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+		} else {
+			info.fd = info_32.fd;
+			info.vaddr = compat_ptr(info_32.vaddr);
+			mutex_lock(&audio->read_lock);
+			mutex_lock(&audio->write_lock);
+			rc = audio_aio_ion_remove(audio, &info);
+			mutex_unlock(&audio->write_lock);
+			mutex_unlock(&audio->read_lock);
+		}
+		mutex_unlock(&audio->lock);
+		break;
+	}
+	default:
+		pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd);
+		rc =  -EINVAL;
+	}
+	return rc;
+}
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_utils_aio.h linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_utils_aio.h
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_utils_aio.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_utils_aio.h	2019-01-22 16:16:24.747257672 +0100
@@ -0,0 +1,233 @@
+/* Copyright (C) 2008 Google, Inc.
+ * Copyright (C) 2008 HTC Corporation
+ * Copyright (c) 2009-2016, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/wakelock.h>
+#include <linux/msm_audio.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/msm_ion.h>
+#include <asm/ioctls.h>
+#include <asm/atomic.h>
+#include "q6audio_common.h"
+
+#define TUNNEL_MODE     0x0000
+#define NON_TUNNEL_MODE 0x0001
+
+#define ADRV_STATUS_AIO_INTF 0x00000001 /* AIO interface */
+#define ADRV_STATUS_FSYNC 0x00000008
+#define ADRV_STATUS_PAUSE 0x00000010
+#define AUDIO_DEC_EOS_SET  0x00000001
+#define AUDIO_DEC_EOF_SET  0x00000010
+#define AUDIO_EVENT_NUM		10
+
+#define __CONTAINS(r, v, l) ({                                  \
+	typeof(r) __r = r;                                      \
+	typeof(v) __v = v;                                      \
+	typeof(v) __e = __v + l;                                \
+	int res = ((__v >= __r->vaddr) &&                       \
+		(__e <= __r->vaddr + __r->len));                \
+	res;                                                    \
+})
+
+#define CONTAINS(r1, r2) ({                                     \
+	typeof(r2) __r2 = r2;                                   \
+	__CONTAINS(r1, __r2->vaddr, __r2->len);                 \
+})
+
+#define IN_RANGE(r, v) ({                                       \
+	typeof(r) __r = r;                                      \
+	typeof(v) __vv = v;                                     \
+	int res = ((__vv >= __r->vaddr) &&                      \
+		(__vv < (__r->vaddr + __r->len)));              \
+	res;                                                    \
+})
+
+#define OVERLAPS(r1, r2) ({                                     \
+	typeof(r1) __r1 = r1;                                   \
+	typeof(r2) __r2 = r2;                                   \
+	typeof(__r2->vaddr) __v = __r2->vaddr;                  \
+	typeof(__v) __e = __v + __r2->len - 1;                  \
+	int res = (IN_RANGE(__r1, __v) || IN_RANGE(__r1, __e)); \
+	res;                                                    \
+})
+
+struct timestamp {
+	u32 lowpart;
+	u32 highpart;
+} __packed;
+
+struct meta_out_dsp {
+	u32 offset_to_frame;
+	u32 frame_size;
+	u32 encoded_pcm_samples;
+	u32 msw_ts;
+	u32 lsw_ts;
+	u32 nflags;
+} __packed;
+
+struct dec_meta_in {
+	unsigned char reserved[18];
+	unsigned short offset;
+	struct timestamp ntimestamp;
+	unsigned int nflags;
+} __packed;
+
+struct dec_meta_out {
+	unsigned int reserved[7];
+	unsigned int num_of_frames;
+	struct meta_out_dsp meta_out_dsp[];
+} __packed;
+
+/* General meta field to store meta info
+locally */
+union  meta_data {
+	struct dec_meta_out meta_out;
+	struct dec_meta_in meta_in;
+} __packed;
+
+/* per device wakeup source manager */
+struct ws_mgr {
+	struct mutex       ws_lock;
+	uint32_t           ref_cnt;
+};
+
+#define PCM_BUF_COUNT           (2)
+/* Buffer with meta */
+#define PCM_BUFSZ_MIN           ((4*1024) + sizeof(struct dec_meta_out))
+
+/* FRAME_NUM must be a power of two */
+#define FRAME_NUM               (2)
+#define FRAME_SIZE	((4*1536) + sizeof(struct dec_meta_in))
+
+struct audio_aio_ion_region {
+	struct list_head list;
+	struct ion_handle *handle;
+	int fd;
+	void *vaddr;
+	phys_addr_t paddr;
+	void *kvaddr;
+	unsigned long len;
+	unsigned ref_cnt;
+};
+
+struct audio_aio_event {
+	struct list_head list;
+	int event_type;
+	union msm_audio_event_payload payload;
+};
+
+struct audio_aio_buffer_node {
+	struct list_head list;
+	struct msm_audio_aio_buf buf;
+	unsigned long paddr;
+	uint32_t token;
+	void            *kvaddr;
+	union meta_data meta_info;
+};
+
+struct q6audio_aio;
+struct audio_aio_drv_operations {
+	void (*out_flush) (struct q6audio_aio *);
+	void (*in_flush) (struct q6audio_aio *);
+};
+
+struct q6audio_aio {
+	atomic_t in_bytes;
+	atomic_t in_samples;
+
+	struct msm_audio_stream_config str_cfg;
+	struct msm_audio_buf_cfg        buf_cfg;
+	struct msm_audio_config pcm_cfg;
+	void *codec_cfg;
+
+	struct audio_client *ac;
+
+	struct mutex lock;
+	struct mutex read_lock;
+	struct mutex write_lock;
+	struct mutex get_event_lock;
+	wait_queue_head_t cmd_wait;
+	wait_queue_head_t write_wait;
+	wait_queue_head_t event_wait;
+	spinlock_t dsp_lock;
+	spinlock_t event_queue_lock;
+
+	struct miscdevice *miscdevice;
+	uint32_t wakelock_voted;
+	struct ws_mgr *audio_ws_mgr;
+
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *dentry;
+#endif
+	struct list_head out_queue;     /* queue to retain output buffers */
+	struct list_head in_queue;      /* queue to retain input buffers */
+	struct list_head free_event_queue;
+	struct list_head event_queue;
+	struct list_head ion_region_queue;     /* protected by lock */
+	struct ion_client *client;
+	struct audio_aio_drv_operations drv_ops;
+	union msm_audio_event_payload eos_write_payload;
+	uint32_t device_events;
+	uint16_t volume;
+	uint32_t drv_status;
+	int event_abort;
+	int eos_rsp;
+	int eos_flag;
+	int opened;
+	int enabled;
+	int stopped;
+	int feedback;
+	int rflush;             /* Read  flush */
+	int wflush;             /* Write flush */
+	bool reset_event;
+	long (*codec_ioctl)(struct file *, unsigned int, unsigned long);
+	long (*codec_compat_ioctl)(struct file *, unsigned int, unsigned long);
+};
+
+void audio_aio_async_write_ack(struct q6audio_aio *audio, uint32_t token,
+				uint32_t *payload);
+
+void audio_aio_async_read_ack(struct q6audio_aio *audio, uint32_t token,
+			uint32_t *payload);
+
+int insert_eos_buf(struct q6audio_aio *audio,
+		struct audio_aio_buffer_node *buf_node);
+
+void extract_meta_out_info(struct q6audio_aio *audio,
+		struct audio_aio_buffer_node *buf_node, int dir);
+
+int audio_aio_open(struct q6audio_aio *audio, struct file *file);
+int audio_aio_enable(struct q6audio_aio  *audio);
+void audio_aio_post_event(struct q6audio_aio *audio, int type,
+		union msm_audio_event_payload payload);
+int audio_aio_release(struct inode *inode, struct file *file);
+int audio_aio_fsync(struct file *file, loff_t start, loff_t end, int datasync);
+void audio_aio_async_out_flush(struct q6audio_aio *audio);
+void audio_aio_async_in_flush(struct q6audio_aio *audio);
+void audio_aio_ioport_reset(struct q6audio_aio *audio);
+int enable_volume_ramp(struct q6audio_aio *audio);
+#ifdef CONFIG_DEBUG_FS
+int audio_aio_debug_open(struct inode *inode, struct file *file);
+ssize_t audio_aio_debug_read(struct file *file, char __user *buf,
+			size_t count, loff_t *ppos);
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_utils.c linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_utils.c
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_utils.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_utils.c	2019-10-29 09:26:24.045207112 +0100
@@ -0,0 +1,929 @@
+/* Copyright (c) 2010-2016, 2018-2019, The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <linux/compat.h>
+#include <asm/ioctls.h>
+#include "audio_utils.h"
+
+/*
+ * Define maximum buffer size. Below values are chosen considering the higher
+ * values used among all native drivers.
+ */
+#define MAX_FRAME_SIZE	1536
+#define MAX_FRAMES	5
+#define META_SIZE	(sizeof(struct meta_out_dsp))
+#define MAX_BUFFER_SIZE	(1 + ((MAX_FRAME_SIZE + META_SIZE) * MAX_FRAMES))
+
+static int audio_in_pause(struct q6audio_in  *audio)
+{
+	int rc;
+
+	rc = q6asm_cmd(audio->ac, CMD_PAUSE);
+	if (rc < 0)
+		pr_err("%s:session id %d: pause cmd failed rc=%d\n", __func__,
+				audio->ac->session, rc);
+
+	return rc;
+}
+
+static int audio_in_flush(struct q6audio_in  *audio)
+{
+	int rc;
+
+	pr_debug("%s:session id %d: flush\n", __func__, audio->ac->session);
+	/* Flush if session running */
+	if (audio->enabled) {
+		/* Implicitly issue a pause to the encoder before flushing */
+		rc = audio_in_pause(audio);
+		if (rc < 0) {
+			pr_err("%s:session id %d: pause cmd failed rc=%d\n",
+				 __func__, audio->ac->session, rc);
+			return rc;
+		}
+
+		rc = q6asm_cmd(audio->ac, CMD_FLUSH);
+		if (rc < 0) {
+			pr_err("%s:session id %d: flush cmd failed rc=%d\n",
+				__func__, audio->ac->session, rc);
+			return rc;
+		}
+		/* 2nd arg: 0 -> run immediately
+		   3rd arg: 0 -> msw_ts, 4th arg: 0 ->lsw_ts */
+		q6asm_run(audio->ac, 0x00, 0x00, 0x00);
+		pr_debug("Rerun the session\n");
+	}
+	audio->rflush = 1;
+	audio->wflush = 1;
+	memset(audio->out_frame_info, 0, sizeof(audio->out_frame_info));
+	wake_up(&audio->read_wait);
+	/* get read_lock to ensure no more waiting read thread */
+	mutex_lock(&audio->read_lock);
+	audio->rflush = 0;
+	mutex_unlock(&audio->read_lock);
+	wake_up(&audio->write_wait);
+	/* get write_lock to ensure no more waiting write thread */
+	mutex_lock(&audio->write_lock);
+	audio->wflush = 0;
+	mutex_unlock(&audio->write_lock);
+	pr_debug("%s:session id %d: in_bytes %d\n", __func__,
+			audio->ac->session, atomic_read(&audio->in_bytes));
+	pr_debug("%s:session id %d: in_samples %d\n", __func__,
+			audio->ac->session, atomic_read(&audio->in_samples));
+	atomic_set(&audio->in_bytes, 0);
+	atomic_set(&audio->in_samples, 0);
+	atomic_set(&audio->out_count, 0);
+	return 0;
+}
+
+/* must be called with audio->lock held */
+int audio_in_enable(struct q6audio_in  *audio)
+{
+	if (audio->enabled)
+		return 0;
+
+	/* 2nd arg: 0 -> run immediately
+		3rd arg: 0 -> msw_ts, 4th arg: 0 ->lsw_ts */
+	return q6asm_run(audio->ac, 0x00, 0x00, 0x00);
+}
+
+/* must be called with audio->lock held */
+int audio_in_disable(struct q6audio_in  *audio)
+{
+	int rc = 0;
+	if (!audio->stopped) {
+		audio->enabled = 0;
+		audio->opened = 0;
+		pr_debug("%s:session id %d: inbytes[%d] insamples[%d]\n",
+				__func__, audio->ac->session,
+				atomic_read(&audio->in_bytes),
+				atomic_read(&audio->in_samples));
+
+		rc = q6asm_cmd(audio->ac, CMD_CLOSE);
+		if (rc < 0)
+			pr_err("%s:session id %d: Failed to close the session rc=%d\n",
+				__func__, audio->ac->session,
+				rc);
+		audio->stopped = 1;
+		memset(audio->out_frame_info, 0,
+				sizeof(audio->out_frame_info));
+		wake_up(&audio->read_wait);
+		wake_up(&audio->write_wait);
+	}
+	pr_debug("%s:session id %d: enabled[%d]\n", __func__,
+			audio->ac->session, audio->enabled);
+	return rc;
+}
+
+int audio_in_buf_alloc(struct q6audio_in *audio)
+{
+	int rc = 0;
+
+	switch (audio->buf_alloc) {
+	case NO_BUF_ALLOC:
+		if (audio->feedback == NON_TUNNEL_MODE) {
+			rc = q6asm_audio_client_buf_alloc(IN,
+				audio->ac,
+				ALIGN_BUF_SIZE(audio->pcm_cfg.buffer_size),
+				audio->pcm_cfg.buffer_count);
+			if (rc < 0) {
+				pr_err("%s:session id %d: Buffer Alloc failed\n",
+						__func__,
+						audio->ac->session);
+				rc = -ENOMEM;
+				break;
+			}
+			audio->buf_alloc |= BUF_ALLOC_IN;
+		}
+		rc = q6asm_audio_client_buf_alloc(OUT, audio->ac,
+				ALIGN_BUF_SIZE(audio->str_cfg.buffer_size),
+				audio->str_cfg.buffer_count);
+		if (rc < 0) {
+			pr_err("%s:session id %d: Buffer Alloc failed rc=%d\n",
+					__func__, audio->ac->session, rc);
+			rc = -ENOMEM;
+			break;
+		}
+		audio->buf_alloc |= BUF_ALLOC_OUT;
+		break;
+	case BUF_ALLOC_IN:
+		rc = q6asm_audio_client_buf_alloc(OUT, audio->ac,
+				ALIGN_BUF_SIZE(audio->str_cfg.buffer_size),
+				audio->str_cfg.buffer_count);
+		if (rc < 0) {
+			pr_err("%s:session id %d: Buffer Alloc failed rc=%d\n",
+					__func__, audio->ac->session, rc);
+			rc = -ENOMEM;
+			break;
+		}
+		audio->buf_alloc |= BUF_ALLOC_OUT;
+		break;
+	case BUF_ALLOC_OUT:
+		if (audio->feedback == NON_TUNNEL_MODE) {
+			rc = q6asm_audio_client_buf_alloc(IN, audio->ac,
+				ALIGN_BUF_SIZE(audio->pcm_cfg.buffer_size),
+				audio->pcm_cfg.buffer_count);
+			if (rc < 0) {
+				pr_err("%s:session id %d: Buffer Alloc failed\n",
+					__func__,
+					audio->ac->session);
+				rc = -ENOMEM;
+				break;
+			}
+			audio->buf_alloc |= BUF_ALLOC_IN;
+		}
+		break;
+	default:
+		pr_debug("%s:session id %d: buf[%d]\n", __func__,
+					audio->ac->session, audio->buf_alloc);
+	}
+
+	return rc;
+}
+
+int audio_in_set_config(struct file *file,
+		struct msm_audio_config *cfg)
+{
+	int rc = 0;
+	struct q6audio_in  *audio = file->private_data;
+
+	if (audio->feedback != NON_TUNNEL_MODE) {
+		pr_err("%s:session id %d: Not sufficient permission to change the record mode\n",
+			__func__, audio->ac->session);
+		rc = -EACCES;
+		goto ret;
+	}
+	if ((cfg->buffer_count > PCM_BUF_COUNT) ||
+		(cfg->buffer_count == 1))
+		cfg->buffer_count = PCM_BUF_COUNT;
+
+	audio->pcm_cfg.buffer_count = cfg->buffer_count;
+	audio->pcm_cfg.buffer_size  = cfg->buffer_size;
+	audio->pcm_cfg.channel_count = cfg->channel_count;
+	audio->pcm_cfg.sample_rate = cfg->sample_rate;
+	if (audio->opened && audio->feedback == NON_TUNNEL_MODE) {
+		rc = q6asm_audio_client_buf_alloc(IN, audio->ac,
+			ALIGN_BUF_SIZE(audio->pcm_cfg.buffer_size),
+			audio->pcm_cfg.buffer_count);
+		if (rc < 0) {
+			pr_err("%s:session id %d: Buffer Alloc failed\n",
+				__func__, audio->ac->session);
+			rc = -ENOMEM;
+			goto ret;
+		}
+	}
+	audio->buf_alloc |= BUF_ALLOC_IN;
+	rc = 0;
+	pr_debug("%s:session id %d: AUDIO_SET_CONFIG %d %d\n", __func__,
+			audio->ac->session, audio->pcm_cfg.buffer_count,
+			audio->pcm_cfg.buffer_size);
+ret:
+	return rc;
+}
+/* ------------------- device --------------------- */
+static long audio_in_ioctl_shared(struct file *file,
+				unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_in  *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_FLUSH: {
+		/* Make sure we're stopped and we wake any threads
+		* that might be blocked holding the read_lock.
+		* While audio->stopped read threads will always
+		* exit immediately.
+		*/
+		rc = audio_in_flush(audio);
+		if (rc < 0)
+			pr_err("%s:session id %d: Flush Fail rc=%d\n",
+				__func__, audio->ac->session, rc);
+		else { /* Register back the flushed read buffer with DSP */
+			int cnt = 0;
+			while (cnt++ < audio->str_cfg.buffer_count)
+				q6asm_read(audio->ac); /* Push buffer to DSP */
+			pr_debug("register the read buffer\n");
+		}
+		break;
+	}
+	case AUDIO_PAUSE: {
+		pr_debug("%s:session id %d: AUDIO_PAUSE\n", __func__,
+					audio->ac->session);
+		if (audio->enabled)
+			audio_in_pause(audio);
+		break;
+	}
+	case AUDIO_GET_SESSION_ID: {
+		if (copy_to_user((void *) arg, &audio->ac->session,
+			sizeof(u16))) {
+			pr_err("%s: copy_to_user for AUDIO_GET_SESSION_ID failed\n",
+				__func__);
+			rc = -EFAULT;
+		}
+		break;
+	}
+	default:
+		pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+long audio_in_ioctl(struct file *file,
+				unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_in  *audio = file->private_data;
+	int rc = 0;
+
+	if (cmd == AUDIO_GET_STATS) {
+		struct msm_audio_stats stats;
+		memset(&stats, 0, sizeof(stats));
+		stats.byte_count = atomic_read(&audio->in_bytes);
+		stats.sample_count = atomic_read(&audio->in_samples);
+		if (copy_to_user((void *) arg, &stats, sizeof(stats)))
+			return -EFAULT;
+		return rc;
+	}
+
+	mutex_lock(&audio->lock);
+	switch (cmd) {
+	case AUDIO_FLUSH:
+	case AUDIO_PAUSE:
+	case AUDIO_GET_SESSION_ID:
+		rc = audio_in_ioctl_shared(file, cmd, arg);
+		break;
+	case AUDIO_GET_STREAM_CONFIG: {
+		struct msm_audio_stream_config cfg;
+		memset(&cfg, 0, sizeof(cfg));
+		cfg.buffer_size = audio->str_cfg.buffer_size;
+		cfg.buffer_count = audio->str_cfg.buffer_count;
+		if (copy_to_user((void *)arg, &cfg, sizeof(cfg)))
+			rc = -EFAULT;
+		pr_debug("%s:session id %d: AUDIO_GET_STREAM_CONFIG %d %d\n",
+				__func__, audio->ac->session, cfg.buffer_size,
+				cfg.buffer_count);
+		break;
+	}
+	case AUDIO_SET_STREAM_CONFIG: {
+		struct msm_audio_stream_config cfg;
+		if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) {
+			pr_err("%s: copy_from_user for AUDIO_SET_STREAM_CONFIG failed\n"
+				, __func__);
+			rc = -EFAULT;
+			break;
+		}
+		/* Minimum single frame size,
+		   but with in maximum frames number */
+		if ((cfg.buffer_size < (audio->min_frame_size+ \
+			sizeof(struct meta_out_dsp))) ||
+			(cfg.buffer_count < FRAME_NUM)) {
+			rc = -EINVAL;
+			break;
+		}
+		if (cfg.buffer_size > MAX_BUFFER_SIZE) {
+			rc = -EINVAL;
+			break;
+		}
+		audio->str_cfg.buffer_size = cfg.buffer_size;
+		audio->str_cfg.buffer_count = cfg.buffer_count;
+		if (audio->opened) {
+			rc = q6asm_audio_client_buf_alloc(OUT, audio->ac,
+				ALIGN_BUF_SIZE(audio->str_cfg.buffer_size),
+				audio->str_cfg.buffer_count);
+			if (rc < 0) {
+				pr_err("%s: session id %d: Buffer Alloc failed rc=%d\n",
+					__func__, audio->ac->session, rc);
+				rc = -ENOMEM;
+				break;
+			}
+		}
+		audio->buf_alloc |= BUF_ALLOC_OUT;
+		rc = 0;
+		pr_debug("%s:session id %d: AUDIO_SET_STREAM_CONFIG %d %d\n",
+				__func__, audio->ac->session,
+				audio->str_cfg.buffer_size,
+				audio->str_cfg.buffer_count);
+		break;
+	}
+	case AUDIO_SET_BUF_CFG: {
+		struct msm_audio_buf_cfg  cfg;
+		if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) {
+			rc = -EFAULT;
+			break;
+		}
+		if ((audio->feedback == NON_TUNNEL_MODE) &&
+			!cfg.meta_info_enable) {
+			rc = -EFAULT;
+			break;
+		}
+
+		/* Restrict the num of frames per buf to coincide with
+		 * default buf size */
+		if (cfg.frames_per_buf > audio->max_frames_per_buf) {
+			rc = -EFAULT;
+			break;
+		}
+		audio->buf_cfg.meta_info_enable = cfg.meta_info_enable;
+		audio->buf_cfg.frames_per_buf = cfg.frames_per_buf;
+		pr_debug("%s:session id %d: Set-buf-cfg: meta[%d] framesperbuf[%d]\n",
+				__func__,
+				audio->ac->session, cfg.meta_info_enable,
+				cfg.frames_per_buf);
+		break;
+	}
+	case AUDIO_GET_BUF_CFG: {
+		pr_debug("%s:session id %d: Get-buf-cfg: meta[%d] framesperbuf[%d]\n",
+			__func__,
+			audio->ac->session, audio->buf_cfg.meta_info_enable,
+			audio->buf_cfg.frames_per_buf);
+
+		if (copy_to_user((void *)arg, &audio->buf_cfg,
+					sizeof(struct msm_audio_buf_cfg)))
+			rc = -EFAULT;
+		break;
+	}
+	case AUDIO_GET_CONFIG: {
+		if (copy_to_user((void *)arg, &audio->pcm_cfg,
+					sizeof(struct msm_audio_config)))
+			rc = -EFAULT;
+		break;
+
+	}
+	case AUDIO_SET_CONFIG: {
+		struct msm_audio_config cfg;
+		if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) {
+			pr_err("%s: copy_from_user for AUDIO_SET_CONFIG failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		rc = audio_in_set_config(file, &cfg);
+		break;
+	}
+	default:
+		/* call codec specific ioctl */
+		rc = audio->enc_ioctl(file, cmd, arg);
+	}
+	mutex_unlock(&audio->lock);
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+struct msm_audio_stats32 {
+	u32 byte_count;
+	u32 sample_count;
+	u32 unused[2];
+};
+
+struct msm_audio_stream_config32 {
+	u32 buffer_size;
+	u32 buffer_count;
+};
+
+struct msm_audio_config32 {
+	u32 buffer_size;
+	u32 buffer_count;
+	u32 channel_count;
+	u32 sample_rate;
+	u32 type;
+	u32 meta_field;
+	u32 bits;
+	u32 unused[3];
+};
+
+struct msm_audio_buf_cfg32 {
+	u32 meta_info_enable;
+	u32 frames_per_buf;
+};
+
+enum {
+	AUDIO_GET_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC, 3,
+			struct msm_audio_config32),
+	AUDIO_SET_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC, 4,
+			struct msm_audio_config32),
+	AUDIO_GET_STATS_32 = _IOR(AUDIO_IOCTL_MAGIC, 5,
+			struct msm_audio_stats32),
+	AUDIO_SET_STREAM_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC, 80,
+			struct msm_audio_stream_config32),
+	AUDIO_GET_STREAM_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC, 81,
+			struct msm_audio_stream_config32),
+	AUDIO_SET_BUF_CFG_32 = _IOW(AUDIO_IOCTL_MAGIC, 94,
+			struct msm_audio_buf_cfg32),
+	AUDIO_GET_BUF_CFG_32 = _IOW(AUDIO_IOCTL_MAGIC, 93,
+			struct msm_audio_buf_cfg32),
+};
+
+long audio_in_compat_ioctl(struct file *file,
+				unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_in  *audio = file->private_data;
+	int rc = 0;
+
+	if (cmd == AUDIO_GET_STATS_32) {
+		struct msm_audio_stats32 stats_32;
+		memset(&stats_32, 0, sizeof(stats_32));
+		stats_32.byte_count = atomic_read(&audio->in_bytes);
+		stats_32.sample_count = atomic_read(&audio->in_samples);
+		if (copy_to_user((void *) arg, &stats_32, sizeof(stats_32))) {
+			pr_err("%s: copy_to_user failed for AUDIO_GET_STATS_32\n",
+				__func__);
+			return -EFAULT;
+		}
+		return rc;
+	}
+
+	mutex_lock(&audio->lock);
+	switch (cmd) {
+	case AUDIO_FLUSH:
+	case AUDIO_PAUSE:
+	case AUDIO_GET_SESSION_ID:
+		rc = audio_in_ioctl_shared(file, cmd, arg);
+		break;
+	case AUDIO_GET_STREAM_CONFIG_32: {
+		struct msm_audio_stream_config32 cfg_32;
+		memset(&cfg_32, 0, sizeof(cfg_32));
+		cfg_32.buffer_size = audio->str_cfg.buffer_size;
+		cfg_32.buffer_count = audio->str_cfg.buffer_count;
+		if (copy_to_user((void *)arg, &cfg_32, sizeof(cfg_32))) {
+			pr_err("%s: Copy to user failed\n", __func__);
+			rc = -EFAULT;
+		}
+		pr_debug("%s:session id %d: AUDIO_GET_STREAM_CONFIG %d %d\n",
+				__func__, audio->ac->session,
+				cfg_32.buffer_size,
+				cfg_32.buffer_count);
+		break;
+	}
+	case AUDIO_SET_STREAM_CONFIG_32: {
+		struct msm_audio_stream_config32 cfg_32;
+		struct msm_audio_stream_config cfg;
+		if (copy_from_user(&cfg_32, (void *)arg, sizeof(cfg_32))) {
+			pr_err("%s: copy_from_user for AUDIO_SET_STREAM_CONFIG_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		cfg.buffer_size = cfg_32.buffer_size;
+		cfg.buffer_count = cfg_32.buffer_count;
+		/* Minimum single frame size,
+		 * but with in maximum frames number */
+		if ((cfg.buffer_size < (audio->min_frame_size +
+			sizeof(struct meta_out_dsp))) ||
+			(cfg.buffer_count < FRAME_NUM)) {
+			rc = -EINVAL;
+			break;
+		}
+		audio->str_cfg.buffer_size = cfg.buffer_size;
+		audio->str_cfg.buffer_count = cfg.buffer_count;
+		if (audio->opened) {
+			rc = q6asm_audio_client_buf_alloc(OUT, audio->ac,
+				ALIGN_BUF_SIZE(audio->str_cfg.buffer_size),
+				audio->str_cfg.buffer_count);
+			if (rc < 0) {
+				pr_err("%s: session id %d:\n",
+					__func__, audio->ac->session);
+				pr_err("Buffer Alloc failed rc=%d\n", rc);
+				rc = -ENOMEM;
+				break;
+			}
+		}
+		audio->buf_alloc |= BUF_ALLOC_OUT;
+		pr_debug("%s:session id %d: AUDIO_SET_STREAM_CONFIG %d %d\n",
+				__func__, audio->ac->session,
+				audio->str_cfg.buffer_size,
+				audio->str_cfg.buffer_count);
+		break;
+	}
+	case AUDIO_SET_BUF_CFG_32: {
+		struct msm_audio_buf_cfg32 cfg_32;
+		struct msm_audio_buf_cfg cfg;
+		if (copy_from_user(&cfg_32, (void *)arg, sizeof(cfg_32))) {
+			pr_err("%s: copy_from_user for AUDIO_SET_BUG_CFG_32 failed",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		cfg.meta_info_enable = cfg_32.meta_info_enable;
+		cfg.frames_per_buf = cfg_32.frames_per_buf;
+
+		if ((audio->feedback == NON_TUNNEL_MODE) &&
+			!cfg.meta_info_enable) {
+			rc = -EFAULT;
+			break;
+		}
+
+		/* Restrict the num of frames per buf to coincide with
+		 * default buf size */
+		if (cfg.frames_per_buf > audio->max_frames_per_buf) {
+			rc = -EFAULT;
+			break;
+		}
+		audio->buf_cfg.meta_info_enable = cfg.meta_info_enable;
+		audio->buf_cfg.frames_per_buf = cfg.frames_per_buf;
+		pr_debug("%s:session id %d: Set-buf-cfg: meta[%d] framesperbuf[%d]\n",
+			__func__, audio->ac->session, cfg.meta_info_enable,
+			cfg.frames_per_buf);
+		break;
+	}
+	case AUDIO_GET_BUF_CFG_32: {
+		struct msm_audio_buf_cfg32 cfg_32;
+		pr_debug("%s:session id %d: Get-buf-cfg: meta[%d] framesperbuf[%d]\n",
+			__func__,
+			audio->ac->session, audio->buf_cfg.meta_info_enable,
+			audio->buf_cfg.frames_per_buf);
+		cfg_32.meta_info_enable = audio->buf_cfg.meta_info_enable;
+		cfg_32.frames_per_buf = audio->buf_cfg.frames_per_buf;
+
+		if (copy_to_user((void *)arg, &cfg_32,
+			sizeof(struct msm_audio_buf_cfg32))) {
+			pr_err("%s: Copy to user failed\n", __func__);
+			rc = -EFAULT;
+		}
+		break;
+	}
+	case AUDIO_GET_CONFIG_32: {
+		struct msm_audio_config32 cfg_32;
+		memset(&cfg_32, 0, sizeof(cfg_32));
+		cfg_32.buffer_size = audio->pcm_cfg.buffer_size;
+		cfg_32.buffer_count = audio->pcm_cfg.buffer_count;
+		cfg_32.channel_count = audio->pcm_cfg.channel_count;
+		cfg_32.sample_rate = audio->pcm_cfg.sample_rate;
+		cfg_32.type = audio->pcm_cfg.type;
+		cfg_32.meta_field = audio->pcm_cfg.meta_field;
+		cfg_32.bits = audio->pcm_cfg.bits;
+
+		if (copy_to_user((void *)arg, &cfg_32,
+					sizeof(struct msm_audio_config32))) {
+			pr_err("%s: Copy to user failed\n", __func__);
+			rc = -EFAULT;
+		}
+		break;
+	}
+	case AUDIO_SET_CONFIG_32: {
+		struct msm_audio_config32 cfg_32;
+		struct msm_audio_config cfg;
+		if (copy_from_user(&cfg_32, (void *)arg, sizeof(cfg_32))) {
+			pr_err("%s: copy_from_user for AUDIO_SET_CONFIG_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		cfg.buffer_size = cfg_32.buffer_size;
+		cfg.buffer_count = cfg_32.buffer_count;
+		cfg.channel_count = cfg_32.channel_count;
+		cfg.sample_rate = cfg_32.sample_rate;
+		cfg.type = cfg_32.type;
+		cfg.meta_field = cfg_32.meta_field;
+		cfg.bits = cfg_32.bits;
+		rc = audio_in_set_config(file, &cfg);
+		break;
+	}
+	default:
+		  /* call codec specific ioctl */
+		  rc = audio->enc_compat_ioctl(file, cmd, arg);
+	}
+	mutex_unlock(&audio->lock);
+	return rc;
+}
+#endif
+
+ssize_t audio_in_read(struct file *file,
+				char __user *buf,
+				size_t count, loff_t *pos)
+{
+	struct q6audio_in  *audio = file->private_data;
+	const char __user *start = buf;
+	unsigned char *data;
+	uint32_t offset = 0;
+	uint32_t size = 0;
+	int rc = 0;
+	uint32_t idx;
+	struct meta_out_dsp meta;
+	uint32_t bytes_to_copy = 0;
+	uint32_t mfield_size = (audio->buf_cfg.meta_info_enable == 0) ? 0 :
+		(sizeof(unsigned char) +
+		(sizeof(struct meta_out_dsp)*(audio->buf_cfg.frames_per_buf)));
+
+	memset(&meta, 0, sizeof(meta));
+	pr_debug("%s:session id %d: read - %zd\n", __func__, audio->ac->session,
+			count);
+	if (audio->reset_event)
+		return -ENETRESET;
+
+	if (!audio->enabled)
+		return -EFAULT;
+	mutex_lock(&audio->read_lock);
+	while (count > 0) {
+		rc = wait_event_interruptible(
+			audio->read_wait,
+			((atomic_read(&audio->out_count) > 0) ||
+			(audio->stopped) ||
+			 audio->rflush || audio->eos_rsp ||
+			audio->event_abort));
+
+		if (audio->event_abort) {
+			rc = -EIO;
+			break;
+		}
+
+
+		if (rc < 0)
+			break;
+
+		if ((audio->stopped && !(atomic_read(&audio->out_count))) ||
+			audio->rflush) {
+			pr_debug("%s:session id %d: driver in stop state or flush,No more buf to read",
+				__func__,
+				audio->ac->session);
+			rc = 0;/* End of File */
+			break;
+		}
+		if (!(atomic_read(&audio->out_count)) &&
+			(audio->eos_rsp == 1) &&
+			(count >= (sizeof(unsigned char) +
+				sizeof(struct meta_out_dsp)))) {
+			unsigned char num_of_frames;
+			pr_info("%s:session id %d: eos %d at output\n",
+				__func__, audio->ac->session, audio->eos_rsp);
+			if (buf != start)
+				break;
+			num_of_frames = 0xFF;
+			if (copy_to_user(buf, &num_of_frames,
+					sizeof(unsigned char))) {
+				rc = -EFAULT;
+				break;
+			}
+			buf += sizeof(unsigned char);
+			meta.frame_size = 0xFFFF;
+			meta.encoded_pcm_samples = 0xFFFF;
+			meta.msw_ts = 0x00;
+			meta.lsw_ts = 0x00;
+			meta.nflags = AUD_EOS_SET;
+			audio->eos_rsp = 0;
+			if (copy_to_user(buf, &meta, sizeof(meta))) {
+				rc = -EFAULT;
+				break;
+			}
+			buf += sizeof(meta);
+			break;
+		}
+		data = (unsigned char *)q6asm_is_cpu_buf_avail(OUT, audio->ac,
+						&size, &idx);
+		if ((count >= (size + mfield_size)) && data) {
+			if (audio->buf_cfg.meta_info_enable) {
+				if (copy_to_user(buf,
+					&audio->out_frame_info[idx][0],
+					sizeof(unsigned char))) {
+					rc = -EFAULT;
+					break;
+				}
+				bytes_to_copy =
+					(size + audio->out_frame_info[idx][1]);
+				/* Number of frames information copied */
+				buf += sizeof(unsigned char);
+				count -= sizeof(unsigned char);
+			} else {
+				offset = audio->out_frame_info[idx][1];
+				bytes_to_copy = size;
+			}
+
+			pr_debug("%s:session id %d: offset=%d nr of frames= %d\n",
+					__func__, audio->ac->session,
+					audio->out_frame_info[idx][1],
+					audio->out_frame_info[idx][0]);
+
+			if (copy_to_user(buf, &data[offset], bytes_to_copy)) {
+				rc = -EFAULT;
+				break;
+			}
+			count -= bytes_to_copy;
+			buf += bytes_to_copy;
+		} else {
+			pr_err("%s:session id %d: short read data[%pK] bytesavail[%d]bytesrequest[%zd]\n",
+				__func__,
+				audio->ac->session,
+				data, size, count);
+		}
+		atomic_dec(&audio->out_count);
+		q6asm_read(audio->ac);
+		break;
+	}
+	mutex_unlock(&audio->read_lock);
+
+	pr_debug("%s:session id %d: read: %zd bytes\n", __func__,
+			audio->ac->session, (buf-start));
+	if (buf > start)
+		return buf - start;
+	return rc;
+}
+
+static int extract_meta_info(char *buf, unsigned long *msw_ts,
+		unsigned long *lsw_ts, unsigned int *flags)
+{
+	struct meta_in *meta = (struct meta_in *)buf;
+	*msw_ts = meta->ntimestamp.highpart;
+	*lsw_ts = meta->ntimestamp.lowpart;
+	*flags = meta->nflags;
+	return 0;
+}
+
+ssize_t audio_in_write(struct file *file,
+		const char __user *buf,
+		size_t count, loff_t *pos)
+{
+	struct q6audio_in *audio = file->private_data;
+	const char __user *start = buf;
+	size_t xfer = 0;
+	char *cpy_ptr;
+	int rc = 0;
+	unsigned char *data;
+	uint32_t size = 0;
+	uint32_t idx = 0;
+	uint32_t nflags = 0;
+	unsigned long msw_ts = 0;
+	unsigned long lsw_ts = 0;
+	uint32_t mfield_size = (audio->buf_cfg.meta_info_enable == 0) ? 0 :
+			sizeof(struct meta_in);
+
+	pr_debug("%s:session id %d: to write[%zd]\n", __func__,
+			audio->ac->session, count);
+	if (audio->reset_event)
+		return -ENETRESET;
+
+	if (!audio->enabled)
+		return -EFAULT;
+	mutex_lock(&audio->write_lock);
+
+	while (count > 0) {
+		rc = wait_event_interruptible(audio->write_wait,
+				     ((atomic_read(&audio->in_count) > 0) ||
+				      (audio->stopped) ||
+				      (audio->wflush) || (audio->event_abort)));
+
+		if (audio->event_abort) {
+			rc = -EIO;
+			break;
+		}
+
+		if (rc < 0)
+			break;
+		if (audio->stopped || audio->wflush) {
+			pr_debug("%s: session id %d: stop or flush\n", __func__,
+					audio->ac->session);
+			rc = -EBUSY;
+			break;
+		}
+		/* if no PCM data, might have only eos buffer
+		   such case do not hold cpu buffer */
+		if ((buf == start) && (count == mfield_size)) {
+			char eos_buf[sizeof(struct meta_in)];
+			/* Processing begining of user buffer */
+			if (copy_from_user(eos_buf, buf, mfield_size)) {
+				rc = -EFAULT;
+				break;
+			}
+			/* Check if EOS flag is set and buffer has
+			 * contains just meta field
+			 */
+			extract_meta_info(eos_buf, &msw_ts, &lsw_ts,
+						&nflags);
+			buf += mfield_size;
+			/* send the EOS and return */
+			pr_debug("%s:session id %d: send EOS 0x%8x\n",
+				__func__,
+				audio->ac->session, nflags);
+			break;
+		}
+		data = (unsigned char *)q6asm_is_cpu_buf_avail(IN, audio->ac,
+						&size, &idx);
+		if (!data) {
+			pr_debug("%s:session id %d: No buf available\n",
+				__func__, audio->ac->session);
+			continue;
+		}
+		cpy_ptr = data;
+		if (audio->buf_cfg.meta_info_enable) {
+			if (buf == start) {
+				/* Processing beginning of user buffer */
+				if (copy_from_user(cpy_ptr, buf, mfield_size)) {
+					rc = -EFAULT;
+					break;
+				}
+				/* Check if EOS flag is set and buffer has
+				* contains just meta field
+				*/
+				extract_meta_info(cpy_ptr, &msw_ts, &lsw_ts,
+						&nflags);
+				buf += mfield_size;
+				count -= mfield_size;
+			} else {
+				pr_debug("%s:session id %d: continuous buffer\n",
+						__func__, audio->ac->session);
+			}
+		}
+		xfer = (count > size) ? size : count;
+
+		if (copy_from_user(cpy_ptr, buf, xfer)) {
+			rc = -EFAULT;
+			break;
+		}
+		rc = q6asm_write(audio->ac, xfer, msw_ts, lsw_ts, 0x00);
+		if (rc < 0) {
+			rc = -EFAULT;
+			break;
+		}
+		atomic_dec(&audio->in_count);
+		count -= xfer;
+		buf += xfer;
+	}
+	mutex_unlock(&audio->write_lock);
+	pr_debug("%s:session id %d: eos_condition 0x%x buf[0x%pK] start[0x%pK]\n",
+				__func__, audio->ac->session,
+				nflags, buf, start);
+	if (nflags & AUD_EOS_SET) {
+		rc = q6asm_cmd(audio->ac, CMD_EOS);
+		pr_info("%s:session id %d: eos %d at input\n", __func__,
+				audio->ac->session, audio->eos_rsp);
+	}
+	pr_debug("%s:session id %d: Written %zd Avail Buf[%d]", __func__,
+			audio->ac->session, (buf - start - mfield_size),
+			atomic_read(&audio->in_count));
+	if (!rc) {
+		if (buf > start)
+			return buf - start;
+	}
+	return rc;
+}
+
+int audio_in_release(struct inode *inode, struct file *file)
+{
+	struct q6audio_in  *audio = file->private_data;
+	pr_info("%s: session id %d\n", __func__, audio->ac->session);
+	mutex_lock(&audio->lock);
+	audio_in_disable(audio);
+	q6asm_audio_client_free(audio->ac);
+	mutex_unlock(&audio->lock);
+	kfree(audio->enc_cfg);
+	kfree(audio->codec_cfg);
+	kfree(audio);
+	return 0;
+}
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_utils.h linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_utils.h
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_utils.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_utils.h	2019-01-22 16:16:24.747257672 +0100
@@ -0,0 +1,113 @@
+/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+#include <linux/msm_audio.h>
+#include <linux/compat.h>
+#include "q6audio_common.h"
+
+#define FRAME_NUM	(8)
+
+#define PCM_BUF_COUNT		(2)
+
+#define AUD_EOS_SET  0x01
+#define TUNNEL_MODE     0x0000
+#define NON_TUNNEL_MODE 0x0001
+
+#define NO_BUF_ALLOC	0x00
+#define BUF_ALLOC_IN    0x01
+#define BUF_ALLOC_OUT   0x02
+#define BUF_ALLOC_INOUT 0x03
+#define ALIGN_BUF_SIZE(size) ((size + 4095) & (~4095))
+
+struct timestamp {
+	u32 lowpart;
+	u32 highpart;
+} __packed;
+
+struct meta_in {
+	unsigned short offset;
+	struct timestamp ntimestamp;
+	unsigned int nflags;
+} __packed;
+
+struct meta_out_dsp {
+	u32 offset_to_frame;
+	u32 frame_size;
+	u32 encoded_pcm_samples;
+	u32 msw_ts;
+	u32 lsw_ts;
+	u32 nflags;
+} __packed;
+
+struct meta_out {
+	unsigned char num_of_frames;
+	struct meta_out_dsp meta_out_dsp[];
+} __packed;
+
+struct q6audio_in {
+	spinlock_t			dsp_lock;
+	atomic_t			in_bytes;
+	atomic_t			in_samples;
+
+	struct mutex			lock;
+	struct mutex			read_lock;
+	struct mutex			write_lock;
+	wait_queue_head_t		read_wait;
+	wait_queue_head_t		write_wait;
+
+	struct audio_client             *ac;
+	struct msm_audio_stream_config  str_cfg;
+	void				*enc_cfg;
+	struct msm_audio_buf_cfg        buf_cfg;
+	struct msm_audio_config		pcm_cfg;
+	void				*codec_cfg;
+
+	/* number of buffers available to read/write */
+	atomic_t			in_count;
+	atomic_t			out_count;
+
+	/* first idx: num of frames per buf, second idx: offset to frame */
+	uint32_t			out_frame_info[FRAME_NUM][2];
+	int				eos_rsp;
+	int				opened;
+	int				enabled;
+	int				stopped;
+	int				event_abort;
+	int				feedback; /* Flag indicates whether used
+							in Non Tunnel mode */
+	int				rflush;
+	int				wflush;
+	int				buf_alloc;
+	uint16_t			min_frame_size;
+	uint16_t			max_frames_per_buf;
+	bool				reset_event;
+	long (*enc_ioctl)(struct file *, unsigned int, unsigned long);
+	long (*enc_compat_ioctl)(struct file *, unsigned int, unsigned long);
+};
+
+int audio_in_enable(struct q6audio_in  *audio);
+int audio_in_disable(struct q6audio_in  *audio);
+int audio_in_buf_alloc(struct q6audio_in *audio);
+long audio_in_ioctl(struct file *file,
+		unsigned int cmd, unsigned long arg);
+#ifdef CONFIG_COMPAT
+long audio_in_compat_ioctl(struct file *file,
+		unsigned int cmd, unsigned long arg);
+#else
+#define audio_in_compat_ioctl NULL
+#endif
+ssize_t audio_in_read(struct file *file, char __user *buf,
+		size_t count, loff_t *pos);
+ssize_t audio_in_write(struct file *file, const char __user *buf,
+		size_t count, loff_t *pos);
+int audio_in_release(struct inode *inode, struct file *file);
+int audio_in_set_config(struct file *file, struct msm_audio_config *cfg);
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_wma.c linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_wma.c
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_wma.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_wma.c	2019-01-22 16:16:24.747257672 +0100
@@ -0,0 +1,347 @@
+/* wma audio output device
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (C) 2008 HTC Corporation
+ * Copyright (c) 2009-2016, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/msm_audio_wma.h>
+#include <linux/compat.h>
+#include "audio_utils_aio.h"
+
+static struct miscdevice audio_wma_misc;
+static struct ws_mgr audio_wma_ws_mgr;
+
+#ifdef CONFIG_DEBUG_FS
+static const struct file_operations audio_wma_debug_fops = {
+	.read = audio_aio_debug_read,
+	.open = audio_aio_debug_open,
+};
+#endif
+
+static long audio_ioctl_shared(struct file *file, unsigned int cmd,
+						void *arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		struct asm_wma_cfg wma_cfg;
+		struct msm_audio_wma_config_v2 *wma_config;
+		pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__,
+						audio, audio->ac->session);
+		if (audio->feedback == NON_TUNNEL_MODE) {
+			/* Configure PCM output block */
+			rc = q6asm_enc_cfg_blk_pcm(audio->ac,
+					audio->pcm_cfg.sample_rate,
+					audio->pcm_cfg.channel_count);
+			if (rc < 0) {
+				pr_err("pcm output block config failed\n");
+				break;
+			}
+		}
+		wma_config = (struct msm_audio_wma_config_v2 *)audio->codec_cfg;
+		wma_cfg.format_tag = wma_config->format_tag;
+		wma_cfg.ch_cfg = wma_config->numchannels;
+		wma_cfg.sample_rate =  wma_config->samplingrate;
+		wma_cfg.avg_bytes_per_sec = wma_config->avgbytespersecond;
+		wma_cfg.block_align = wma_config->block_align;
+		wma_cfg.valid_bits_per_sample =
+				wma_config->validbitspersample;
+		wma_cfg.ch_mask =  wma_config->channelmask;
+		wma_cfg.encode_opt = wma_config->encodeopt;
+		/* Configure Media format block */
+		rc = q6asm_media_format_block_wma(audio->ac, &wma_cfg,
+				audio->ac->stream_id);
+		if (rc < 0) {
+			pr_err("cmd media format block failed\n");
+			break;
+		}
+		rc = audio_aio_enable(audio);
+		audio->eos_rsp = 0;
+		audio->eos_flag = 0;
+		if (!rc) {
+			audio->enabled = 1;
+		} else {
+			audio->enabled = 0;
+			pr_err("Audio Start procedure failed rc=%d\n", rc);
+			break;
+		}
+		pr_debug("AUDIO_START success enable[%d]\n", audio->enabled);
+		if (audio->stopped == 1)
+			audio->stopped = 0;
+		break;
+	}
+	default:
+		pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd);
+		break;
+	}
+	return rc;
+}
+
+static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		rc = audio_ioctl_shared(file, cmd, (void *)arg);
+		break;
+	}
+	case AUDIO_GET_WMA_CONFIG_V2: {
+		if (copy_to_user((void *)arg, audio->codec_cfg,
+			sizeof(struct msm_audio_wma_config_v2))) {
+			pr_err("%s:copy_to_user for AUDIO_SET_WMA_CONFIG_V2 failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		break;
+	}
+	case AUDIO_SET_WMA_CONFIG_V2: {
+		if (copy_from_user(audio->codec_cfg, (void *)arg,
+			sizeof(struct msm_audio_wma_config_v2))) {
+			pr_err("%s:copy_from_user for AUDIO_SET_WMA_CONFIG_V2 failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		break;
+	}
+	default: {
+		pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio);
+		rc = audio->codec_ioctl(file, cmd, arg);
+		if (rc)
+			pr_err("Failed in utils_ioctl: %d\n", rc);
+		break;
+	}
+	}
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+struct msm_audio_wma_config_v2_32 {
+	u16 format_tag;
+	u16 numchannels;
+	u32 samplingrate;
+	u32 avgbytespersecond;
+	u16 block_align;
+	u16 validbitspersample;
+	u32 channelmask;
+	u16 encodeopt;
+};
+
+enum {
+	AUDIO_GET_WMA_CONFIG_V2_32 =  _IOR(AUDIO_IOCTL_MAGIC,
+	(AUDIO_MAX_COMMON_IOCTL_NUM+2), struct msm_audio_wma_config_v2_32),
+	AUDIO_SET_WMA_CONFIG_V2_32 =  _IOW(AUDIO_IOCTL_MAGIC,
+	(AUDIO_MAX_COMMON_IOCTL_NUM+3), struct msm_audio_wma_config_v2_32)
+};
+
+static long audio_compat_ioctl(struct file *file, unsigned int cmd,
+						unsigned long arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		rc = audio_ioctl_shared(file, cmd, (void *)arg);
+		break;
+	}
+	case AUDIO_GET_WMA_CONFIG_V2_32: {
+		struct msm_audio_wma_config_v2 *wma_config;
+		struct msm_audio_wma_config_v2_32 wma_config_32;
+
+		memset(&wma_config_32, 0, sizeof(wma_config_32));
+
+		wma_config = (struct msm_audio_wma_config_v2 *)audio->codec_cfg;
+		wma_config_32.format_tag = wma_config->format_tag;
+		wma_config_32.numchannels = wma_config->numchannels;
+		wma_config_32.samplingrate = wma_config->samplingrate;
+		wma_config_32.avgbytespersecond = wma_config->avgbytespersecond;
+		wma_config_32.block_align = wma_config->block_align;
+		wma_config_32.validbitspersample =
+					wma_config->validbitspersample;
+		wma_config_32.channelmask = wma_config->channelmask;
+		wma_config_32.encodeopt = wma_config->encodeopt;
+		if (copy_to_user((void *)arg, &wma_config_32,
+			sizeof(wma_config_32))) {
+			pr_err("%s: copy_to_user for GET_WMA_CONFIG_V2_32 failed\n",
+				 __func__);
+			rc = -EFAULT;
+			break;
+		}
+		break;
+	}
+	case AUDIO_SET_WMA_CONFIG_V2_32: {
+		struct msm_audio_wma_config_v2 *wma_config;
+		struct msm_audio_wma_config_v2_32 wma_config_32;
+
+		if (copy_from_user(&wma_config_32, (void *)arg,
+			sizeof(wma_config_32))) {
+			pr_err("%s: copy_from_user for SET_WMA_CONFIG_V2_32 failed\n"
+				, __func__);
+			rc = -EFAULT;
+			break;
+		}
+		wma_config = (struct msm_audio_wma_config_v2 *)audio->codec_cfg;
+		wma_config->format_tag = wma_config_32.format_tag;
+		wma_config->numchannels = wma_config_32.numchannels;
+		wma_config->samplingrate = wma_config_32.samplingrate;
+		wma_config->avgbytespersecond = wma_config_32.avgbytespersecond;
+		wma_config->block_align = wma_config_32.block_align;
+		wma_config->validbitspersample =
+				wma_config_32.validbitspersample;
+		wma_config->channelmask = wma_config_32.channelmask;
+		wma_config->encodeopt = wma_config_32.encodeopt;
+		break;
+	}
+	default: {
+		pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio);
+		rc = audio->codec_compat_ioctl(file, cmd, arg);
+		if (rc)
+			pr_err("Failed in utils_ioctl: %d\n", rc);
+		break;
+	}
+	}
+	return rc;
+}
+#else
+#define audio_compat_ioctl NULL
+#endif
+
+static int audio_open(struct inode *inode, struct file *file)
+{
+	struct q6audio_aio *audio = NULL;
+	int rc = 0;
+
+#ifdef CONFIG_DEBUG_FS
+	/* 4 bytes represents decoder number, 1 byte for terminate string */
+	char name[sizeof "msm_wma_" + 5];
+#endif
+	audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL);
+
+	if (audio == NULL) {
+		pr_err("Could not allocate memory for wma decode driver\n");
+		return -ENOMEM;
+	}
+	audio->codec_cfg = kzalloc(sizeof(struct msm_audio_wma_config_v2),
+					GFP_KERNEL);
+	if (audio->codec_cfg == NULL) {
+		pr_err("%s:Could not allocate memory for wma"
+			"config\n", __func__);
+		kfree(audio);
+		return -ENOMEM;
+	}
+
+	audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN;
+	audio->miscdevice = &audio_wma_misc;
+	audio->wakelock_voted = false;
+	audio->audio_ws_mgr = &audio_wma_ws_mgr;
+
+	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb,
+					     (void *)audio);
+
+	if (!audio->ac) {
+		pr_err("Could not allocate memory for audio client\n");
+		kfree(audio->codec_cfg);
+		kfree(audio);
+		return -ENOMEM;
+	}
+	rc = audio_aio_open(audio, file);
+	if (rc < 0) {
+		pr_err("%s: audio_aio_open rc=%d\n",
+			__func__, rc);
+		goto fail;
+	}
+	/* open in T/NT mode */
+	if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) {
+		rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM,
+					   FORMAT_WMA_V9);
+		if (rc < 0) {
+			pr_err("NT mode Open failed rc=%d\n", rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		audio->feedback = NON_TUNNEL_MODE;
+		/* open WMA decoder, expected frames is always 1*/
+		audio->buf_cfg.frames_per_buf = 0x01;
+		audio->buf_cfg.meta_info_enable = 0x01;
+	} else if ((file->f_mode & FMODE_WRITE) &&
+			!(file->f_mode & FMODE_READ)) {
+		rc = q6asm_open_write(audio->ac, FORMAT_WMA_V9);
+		if (rc < 0) {
+			pr_err("T mode Open failed rc=%d\n", rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		audio->feedback = TUNNEL_MODE;
+		audio->buf_cfg.meta_info_enable = 0x00;
+	} else {
+		pr_err("Not supported mode\n");
+		rc = -EACCES;
+		goto fail;
+	}
+
+#ifdef CONFIG_DEBUG_FS
+	snprintf(name, sizeof name, "msm_wma_%04x", audio->ac->session);
+	audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
+					    NULL, (void *)audio,
+					    &audio_wma_debug_fops);
+
+	if (IS_ERR(audio->dentry))
+		pr_debug("debugfs_create_file failed\n");
+#endif
+	pr_info("%s:wmadec success mode[%d]session[%d]\n", __func__,
+						audio->feedback,
+						audio->ac->session);
+	return rc;
+fail:
+	q6asm_audio_client_free(audio->ac);
+	kfree(audio->codec_cfg);
+	kfree(audio);
+	return rc;
+}
+
+static const struct file_operations audio_wma_fops = {
+	.owner = THIS_MODULE,
+	.open = audio_open,
+	.release = audio_aio_release,
+	.unlocked_ioctl = audio_ioctl,
+	.fsync = audio_aio_fsync,
+	.compat_ioctl = audio_compat_ioctl
+};
+
+static struct miscdevice audio_wma_misc = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "msm_wma",
+	.fops = &audio_wma_fops,
+};
+
+static int __init audio_wma_init(void)
+{
+	int ret = misc_register(&audio_wma_misc);
+
+	if (ret == 0)
+		device_init_wakeup(audio_wma_misc.this_device, true);
+	audio_wma_ws_mgr.ref_cnt = 0;
+	mutex_init(&audio_wma_ws_mgr.ws_lock);
+
+	return ret;
+}
+
+device_initcall(audio_wma_init);
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_wmapro.c linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_wmapro.c
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/audio_wmapro.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/audio_wmapro.c	2019-01-22 16:16:24.747257672 +0100
@@ -0,0 +1,420 @@
+/* wmapro audio output device
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (C) 2008 HTC Corporation
+ * Copyright (c) 2009-2016, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/msm_audio_wmapro.h>
+#include <linux/compat.h>
+#include "audio_utils_aio.h"
+
+static struct miscdevice audio_wmapro_misc;
+static struct ws_mgr audio_wmapro_ws_mgr;
+
+#ifdef CONFIG_DEBUG_FS
+static const struct file_operations audio_wmapro_debug_fops = {
+	.read = audio_aio_debug_read,
+	.open = audio_aio_debug_open,
+};
+#endif
+
+static long audio_ioctl_shared(struct file *file, unsigned int cmd,
+						void *arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		struct asm_wmapro_cfg wmapro_cfg;
+		struct msm_audio_wmapro_config *wmapro_config;
+		pr_debug("%s: AUDIO_START session_id[%d]\n", __func__,
+						audio->ac->session);
+		if (audio->feedback == NON_TUNNEL_MODE) {
+			/* Configure PCM output block */
+			rc = q6asm_enc_cfg_blk_pcm_v2(audio->ac,
+					audio->pcm_cfg.sample_rate,
+					audio->pcm_cfg.channel_count,
+					16, /* bits per sample */
+					true, /* use default channel map */
+					true, /* use back channel map flavor */
+					NULL);
+			if (rc < 0) {
+				pr_err("pcm output block config failed\n");
+				break;
+			}
+		}
+		wmapro_config = (struct msm_audio_wmapro_config *)
+				audio->codec_cfg;
+		if ((wmapro_config->formattag == 0x162) ||
+		(wmapro_config->formattag == 0x163) ||
+		(wmapro_config->formattag == 0x166) ||
+		(wmapro_config->formattag == 0x167)) {
+			wmapro_cfg.format_tag = wmapro_config->formattag;
+		} else {
+			pr_err("%s:AUDIO_START failed: formattag = %d\n",
+				__func__, wmapro_config->formattag);
+			rc = -EINVAL;
+			break;
+		}
+		if (wmapro_config->numchannels > 0) {
+			wmapro_cfg.ch_cfg = wmapro_config->numchannels;
+		} else {
+			pr_err("%s:AUDIO_START failed: channels = %d\n",
+				__func__, wmapro_config->numchannels);
+			rc = -EINVAL;
+			break;
+		}
+		if (wmapro_config->samplingrate > 0) {
+			wmapro_cfg.sample_rate = wmapro_config->samplingrate;
+		} else {
+			pr_err("%s:AUDIO_START failed: sample_rate = %d\n",
+				__func__, wmapro_config->samplingrate);
+			rc = -EINVAL;
+			break;
+		}
+		wmapro_cfg.avg_bytes_per_sec =
+				wmapro_config->avgbytespersecond;
+		if ((wmapro_config->asfpacketlength <= 13376) ||
+		(wmapro_config->asfpacketlength > 0)) {
+			wmapro_cfg.block_align =
+				wmapro_config->asfpacketlength;
+		} else {
+			pr_err("%s:AUDIO_START failed: block_align = %d\n",
+				__func__, wmapro_config->asfpacketlength);
+			rc = -EINVAL;
+			break;
+		}
+		if ((wmapro_config->validbitspersample == 16) ||
+			(wmapro_config->validbitspersample == 24)) {
+			wmapro_cfg.valid_bits_per_sample =
+				wmapro_config->validbitspersample;
+		} else {
+			pr_err("%s:AUDIO_START failed: bitspersample = %d\n",
+				__func__, wmapro_config->validbitspersample);
+			rc = -EINVAL;
+			break;
+		}
+		wmapro_cfg.ch_mask = wmapro_config->channelmask;
+		wmapro_cfg.encode_opt = wmapro_config->encodeopt;
+		wmapro_cfg.adv_encode_opt =
+				wmapro_config->advancedencodeopt;
+		wmapro_cfg.adv_encode_opt2 =
+				wmapro_config->advancedencodeopt2;
+		/* Configure Media format block */
+		rc = q6asm_media_format_block_wmapro(audio->ac, &wmapro_cfg,
+				audio->ac->stream_id);
+		if (rc < 0) {
+			pr_err("cmd media format block failed\n");
+			break;
+		}
+		rc = audio_aio_enable(audio);
+		audio->eos_rsp = 0;
+		audio->eos_flag = 0;
+		if (!rc) {
+			audio->enabled = 1;
+		} else {
+			audio->enabled = 0;
+			pr_err("Audio Start procedure failed rc=%d\n", rc);
+			break;
+		}
+		pr_debug("AUDIO_START success enable[%d]\n", audio->enabled);
+		if (audio->stopped == 1)
+			audio->stopped = 0;
+		break;
+	}
+	default:
+		pr_err("%s: Unkown ioctl cmd %d\n", __func__, cmd);
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_GET_WMAPRO_CONFIG: {
+		if (copy_to_user((void *)arg, audio->codec_cfg,
+			 sizeof(struct msm_audio_wmapro_config))) {
+			pr_err("%s: copy_to_user for AUDIO_GET_WMAPRO_CONFIG failed\n",
+				__func__);
+			rc = -EFAULT;
+		}
+		break;
+	}
+	case AUDIO_SET_WMAPRO_CONFIG: {
+		if (copy_from_user(audio->codec_cfg, (void *)arg,
+			sizeof(struct msm_audio_wmapro_config))) {
+			pr_err("%s: copy_from_user for AUDIO_SET_WMAPRO_CONFIG_V2 failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		break;
+	}
+	case AUDIO_START: {
+		rc = audio_ioctl_shared(file, cmd, (void *)arg);
+		break;
+	}
+	default: {
+		pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio);
+		rc = audio->codec_ioctl(file, cmd, arg);
+		if (rc)
+			pr_err("Failed in utils_ioctl: %d\n", rc);
+		break;
+	}
+	}
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+
+struct msm_audio_wmapro_config32 {
+	u16 armdatareqthr;
+	u8  validbitspersample;
+	u8  numchannels;
+	u16 formattag;
+	u32 samplingrate;
+	u32 avgbytespersecond;
+	u16 asfpacketlength;
+	u32 channelmask;
+	u16 encodeopt;
+	u16 advancedencodeopt;
+	u32 advancedencodeopt2;
+};
+
+enum {
+	AUDIO_GET_WMAPRO_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC,
+	  (AUDIO_MAX_COMMON_IOCTL_NUM+0), struct msm_audio_wmapro_config32),
+	AUDIO_SET_WMAPRO_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC,
+	  (AUDIO_MAX_COMMON_IOCTL_NUM+1), struct msm_audio_wmapro_config32)
+};
+
+static long audio_compat_ioctl(struct file *file, unsigned int cmd,
+					unsigned long arg)
+{
+	struct q6audio_aio *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_GET_WMAPRO_CONFIG_32: {
+		struct msm_audio_wmapro_config *wmapro_config;
+		struct msm_audio_wmapro_config32 wmapro_config_32;
+
+		memset(&wmapro_config_32, 0, sizeof(wmapro_config_32));
+
+		wmapro_config =
+			(struct msm_audio_wmapro_config *)audio->codec_cfg;
+		wmapro_config_32.armdatareqthr = wmapro_config->armdatareqthr;
+		wmapro_config_32.validbitspersample =
+					wmapro_config->validbitspersample;
+		wmapro_config_32.numchannels = wmapro_config->numchannels;
+		wmapro_config_32.formattag = wmapro_config->formattag;
+		wmapro_config_32.samplingrate = wmapro_config->samplingrate;
+		wmapro_config_32.avgbytespersecond =
+					wmapro_config->avgbytespersecond;
+		wmapro_config_32.asfpacketlength =
+					wmapro_config->asfpacketlength;
+		wmapro_config_32.channelmask = wmapro_config->channelmask;
+		wmapro_config_32.encodeopt = wmapro_config->encodeopt;
+		wmapro_config_32.advancedencodeopt =
+					wmapro_config->advancedencodeopt;
+		wmapro_config_32.advancedencodeopt2 =
+					wmapro_config->advancedencodeopt2;
+
+		if (copy_to_user((void *)arg, &wmapro_config_32,
+			 sizeof(struct msm_audio_wmapro_config32))) {
+			pr_err("%s: copy_to_user for AUDIO_GET_WMAPRO_CONFIG_V2_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+		}
+		break;
+	}
+	case AUDIO_SET_WMAPRO_CONFIG_32: {
+		struct msm_audio_wmapro_config *wmapro_config;
+		struct msm_audio_wmapro_config32 wmapro_config_32;
+
+		if (copy_from_user(&wmapro_config_32, (void *)arg,
+			sizeof(struct msm_audio_wmapro_config32))) {
+			pr_err(
+				"%s: copy_from_user for AUDIO_SET_WMAPRO_CONFG_V2_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		wmapro_config =
+			(struct msm_audio_wmapro_config *)audio->codec_cfg;
+		wmapro_config->armdatareqthr = wmapro_config_32.armdatareqthr;
+		wmapro_config->validbitspersample =
+					wmapro_config_32.validbitspersample;
+		wmapro_config->numchannels = wmapro_config_32.numchannels;
+		wmapro_config->formattag = wmapro_config_32.formattag;
+		wmapro_config->samplingrate = wmapro_config_32.samplingrate;
+		wmapro_config->avgbytespersecond =
+					wmapro_config_32.avgbytespersecond;
+		wmapro_config->asfpacketlength =
+					wmapro_config_32.asfpacketlength;
+		wmapro_config->channelmask = wmapro_config_32.channelmask;
+		wmapro_config->encodeopt = wmapro_config_32.encodeopt;
+		wmapro_config->advancedencodeopt =
+					wmapro_config_32.advancedencodeopt;
+		wmapro_config->advancedencodeopt2 =
+					wmapro_config_32.advancedencodeopt2;
+		break;
+	}
+	case AUDIO_START: {
+		rc = audio_ioctl_shared(file, cmd, (void *)arg);
+		break;
+	}
+	default: {
+		pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio);
+		rc = audio->codec_compat_ioctl(file, cmd, arg);
+		if (rc)
+			pr_err("Failed in utils_ioctl: %d\n", rc);
+		break;
+	}
+	}
+	return rc;
+}
+#else
+#define audio_compat_ioctl NULL
+#endif
+
+static int audio_open(struct inode *inode, struct file *file)
+{
+	struct q6audio_aio *audio = NULL;
+	int rc = 0;
+
+#ifdef CONFIG_DEBUG_FS
+	/* 4 bytes represents decoder number, 1 byte for terminate string */
+	char name[sizeof "msm_wmapro_" + 5];
+#endif
+	audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL);
+
+	if (audio == NULL) {
+		pr_err("Could not allocate memory for wma decode driver\n");
+		return -ENOMEM;
+	}
+	audio->codec_cfg = kzalloc(sizeof(struct msm_audio_wmapro_config),
+					GFP_KERNEL);
+	if (audio->codec_cfg == NULL) {
+		pr_err("%s: Could not allocate memory for wmapro"
+			"config\n", __func__);
+		kfree(audio);
+		return -ENOMEM;
+	}
+
+
+	audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN;
+	audio->miscdevice = &audio_wmapro_misc;
+	audio->wakelock_voted = false;
+	audio->audio_ws_mgr = &audio_wmapro_ws_mgr;
+
+	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb,
+					     (void *)audio);
+
+	if (!audio->ac) {
+		pr_err("Could not allocate memory for audio client\n");
+		kfree(audio->codec_cfg);
+		kfree(audio);
+		return -ENOMEM;
+	}
+
+	rc = audio_aio_open(audio, file);
+	if (rc < 0) {
+		pr_err("%s: audio_aio_open rc=%d\n",
+			__func__, rc);
+		goto fail;
+	}
+	/* open in T/NT mode */
+	if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) {
+		rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM,
+					   FORMAT_WMA_V10PRO);
+		if (rc < 0) {
+			pr_err("NT mode Open failed rc=%d\n", rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		audio->feedback = NON_TUNNEL_MODE;
+		/* open WMA decoder, expected frames is always 1*/
+		audio->buf_cfg.frames_per_buf = 0x01;
+		audio->buf_cfg.meta_info_enable = 0x01;
+	} else if ((file->f_mode & FMODE_WRITE) &&
+			!(file->f_mode & FMODE_READ)) {
+		rc = q6asm_open_write(audio->ac, FORMAT_WMA_V10PRO);
+		if (rc < 0) {
+			pr_err("T mode Open failed rc=%d\n", rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		audio->feedback = TUNNEL_MODE;
+		audio->buf_cfg.meta_info_enable = 0x00;
+	} else {
+		pr_err("Not supported mode\n");
+		rc = -EACCES;
+		goto fail;
+	}
+
+#ifdef CONFIG_DEBUG_FS
+	snprintf(name, sizeof name, "msm_wmapro_%04x", audio->ac->session);
+	audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
+					    NULL, (void *)audio,
+					    &audio_wmapro_debug_fops);
+
+	if (IS_ERR(audio->dentry))
+		pr_debug("debugfs_create_file failed\n");
+#endif
+	pr_info("%s:wmapro decoder open success, session_id = %d\n", __func__,
+				audio->ac->session);
+	return rc;
+fail:
+	q6asm_audio_client_free(audio->ac);
+	kfree(audio->codec_cfg);
+	kfree(audio);
+	return rc;
+}
+
+static const struct file_operations audio_wmapro_fops = {
+	.owner = THIS_MODULE,
+	.open = audio_open,
+	.release = audio_aio_release,
+	.unlocked_ioctl = audio_ioctl,
+	.fsync = audio_aio_fsync,
+	.compat_ioctl = audio_compat_ioctl
+};
+
+static struct miscdevice audio_wmapro_misc = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "msm_wmapro",
+	.fops = &audio_wmapro_fops,
+};
+
+static int __init audio_wmapro_init(void)
+{
+	int ret = misc_register(&audio_wmapro_misc);
+
+	if (ret == 0)
+		device_init_wakeup(audio_wmapro_misc.this_device, true);
+	audio_wmapro_ws_mgr.ref_cnt = 0;
+	mutex_init(&audio_wmapro_ws_mgr.ws_lock);
+
+	return ret;
+}
+
+device_initcall(audio_wmapro_init);
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/evrc_in.c linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/evrc_in.c
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/evrc_in.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/evrc_in.c	2019-01-22 16:16:24.747257672 +0100
@@ -0,0 +1,410 @@
+/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/msm_audio_qcp.h>
+#include <linux/atomic.h>
+#include <linux/compat.h>
+#include <asm/ioctls.h>
+#include "audio_utils.h"
+
+/* Buffer with meta*/
+#define PCM_BUF_SIZE		(4096 + sizeof(struct meta_in))
+
+/* Maximum 10 frames in buffer with meta */
+#define FRAME_SIZE		(1 + ((23+sizeof(struct meta_out_dsp)) * 10))
+
+static long evrc_in_ioctl_shared(struct file *file,
+				unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_in  *audio = file->private_data;
+	int rc = 0;
+	int cnt = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		struct msm_audio_evrc_enc_config *enc_cfg;
+		enc_cfg = audio->enc_cfg;
+		pr_debug("%s:session id %d: default buf alloc[%d]\n", __func__,
+				audio->ac->session, audio->buf_alloc);
+		if (audio->enabled == 1) {
+			pr_info("%s:AUDIO_START already over\n", __func__);
+			rc = 0;
+			break;
+		}
+		rc = audio_in_buf_alloc(audio);
+		if (rc < 0) {
+			pr_err("%s:session id %d: buffer allocation failed\n",
+				__func__, audio->ac->session);
+			break;
+		}
+
+		/* rate_modulation_cmd set to zero
+			 currently not configurable from user space */
+		rc = q6asm_enc_cfg_blk_evrc(audio->ac,
+			audio->buf_cfg.frames_per_buf,
+			enc_cfg->min_bit_rate,
+			enc_cfg->max_bit_rate, 0);
+
+		if (rc < 0) {
+			pr_err("%s:session id %d: cmd evrc media format block failed\n",
+					__func__, audio->ac->session);
+			break;
+		}
+		if (audio->feedback == NON_TUNNEL_MODE) {
+			rc = q6asm_media_format_block_pcm(audio->ac,
+				audio->pcm_cfg.sample_rate,
+				audio->pcm_cfg.channel_count);
+
+			if (rc < 0) {
+				pr_err("%s:session id %d: media format block failed\n",
+					__func__, audio->ac->session);
+				break;
+			}
+		}
+		pr_debug("%s:session id %d: AUDIO_START enable[%d]\n",
+				__func__, audio->ac->session, audio->enabled);
+		rc = audio_in_enable(audio);
+		if (!rc) {
+			audio->enabled = 1;
+		} else {
+			audio->enabled = 0;
+			pr_err("%s:session id %d: Audio Start procedure failed rc=%d\n",
+					__func__, audio->ac->session, rc);
+			break;
+		}
+		while (cnt++ < audio->str_cfg.buffer_count)
+			q6asm_read(audio->ac); /* Push buffer to DSP */
+		rc = 0;
+		pr_debug("%s:session id %d: AUDIO_START success enable[%d]\n",
+				__func__, audio->ac->session, audio->enabled);
+		break;
+	}
+	case AUDIO_STOP: {
+		pr_debug("%s:session id %d: AUDIO_STOP\n", __func__,
+				audio->ac->session);
+		rc = audio_in_disable(audio);
+		if (rc  < 0) {
+			pr_err("%s:session id %d: Audio Stop procedure failed rc=%d\n",
+				__func__, audio->ac->session, rc);
+			break;
+		}
+		break;
+	}
+	case AUDIO_SET_EVRC_ENC_CONFIG: {
+		struct msm_audio_evrc_enc_config *cfg;
+		struct msm_audio_evrc_enc_config *enc_cfg;
+		enc_cfg = audio->enc_cfg;
+
+		cfg = (struct msm_audio_evrc_enc_config *)arg;
+		if (cfg == NULL) {
+			pr_err("%s: NULL config pointer for %s\n",
+					__func__, "AUDIO_SET_EVRC_ENC_CONFIG");
+			rc = -EINVAL;
+			break;
+		}
+		if (cfg->min_bit_rate > 4 ||
+			 cfg->min_bit_rate < 1 ||
+			 (cfg->min_bit_rate == 2)) {
+			pr_err("%s:session id %d: invalid min bitrate\n",
+					__func__, audio->ac->session);
+			rc = -EINVAL;
+			break;
+		}
+		if (cfg->max_bit_rate > 4 ||
+			 cfg->max_bit_rate < 1 ||
+			 (cfg->max_bit_rate == 2)) {
+			pr_err("%s:session id %d: invalid max bitrate\n",
+				__func__, audio->ac->session);
+			rc = -EINVAL;
+			break;
+		}
+		enc_cfg->min_bit_rate = cfg->min_bit_rate;
+		enc_cfg->max_bit_rate = cfg->max_bit_rate;
+		pr_debug("%s:session id %d: min_bit_rate= 0x%x max_bit_rate=0x%x\n",
+			__func__,
+			audio->ac->session, enc_cfg->min_bit_rate,
+			enc_cfg->max_bit_rate);
+		break;
+	}
+	default:
+		pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+static long evrc_in_ioctl(struct file *file,
+				unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_in  *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START:
+	case AUDIO_STOP: {
+		rc = evrc_in_ioctl_shared(file, cmd, arg);
+		break;
+	}
+	case AUDIO_GET_EVRC_ENC_CONFIG: {
+		if (copy_to_user((void *)arg, audio->enc_cfg,
+			sizeof(struct msm_audio_evrc_enc_config))) {
+			pr_err("%s: copy_to_user for AUDIO_GET_EVRC_ENC_CONFIG failed\n",
+				__func__);
+			rc = -EFAULT;
+		}
+		break;
+	}
+	case AUDIO_SET_EVRC_ENC_CONFIG: {
+		struct msm_audio_evrc_enc_config cfg;
+		if (copy_from_user(&cfg, (void *) arg,
+				sizeof(struct msm_audio_evrc_enc_config))) {
+			pr_err("%s: copy_from_user for AUDIO_SET_EVRC_ENC_CONFIG failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		rc = evrc_in_ioctl_shared(file, cmd, (unsigned long)&cfg);
+		if (rc)
+			pr_err("%s:AUDIO_SET_EVRC_ENC_CONFIG failed. rc= %d\n",
+				__func__, rc);
+		break;
+	}
+	default:
+		pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+struct msm_audio_evrc_enc_config32 {
+	u32 cdma_rate;
+	u32 min_bit_rate;
+	u32 max_bit_rate;
+};
+
+enum {
+	AUDIO_SET_EVRC_ENC_CONFIG_32 =  _IOW(AUDIO_IOCTL_MAGIC,
+		2, struct msm_audio_evrc_enc_config32),
+	AUDIO_GET_EVRC_ENC_CONFIG_32 =  _IOR(AUDIO_IOCTL_MAGIC,
+		3, struct msm_audio_evrc_enc_config32)
+};
+
+static long evrc_in_compat_ioctl(struct file *file,
+				unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_in  *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START:
+	case AUDIO_STOP: {
+		rc = evrc_in_ioctl_shared(file, cmd, arg);
+		break;
+	}
+	case AUDIO_GET_EVRC_ENC_CONFIG_32: {
+		struct msm_audio_evrc_enc_config32 cfg_32;
+		struct msm_audio_evrc_enc_config *enc_cfg;
+
+		memset(&cfg_32, 0, sizeof(cfg_32));
+
+		enc_cfg = audio->enc_cfg;
+		cfg_32.cdma_rate = enc_cfg->cdma_rate;
+		cfg_32.min_bit_rate = enc_cfg->min_bit_rate;
+		cfg_32.max_bit_rate = enc_cfg->max_bit_rate;
+
+		if (copy_to_user((void *)arg, &cfg_32,
+			sizeof(cfg_32))) {
+			pr_err("%s: copy_to_user for AUDIO_GET_EVRC_ENC_CONFIG_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+		}
+		break;
+	}
+	case AUDIO_SET_EVRC_ENC_CONFIG_32: {
+		struct msm_audio_evrc_enc_config cfg;
+		struct msm_audio_evrc_enc_config32 cfg_32;
+		if (copy_from_user(&cfg_32, (void *) arg,
+				sizeof(cfg_32))) {
+			pr_err("%s: copy_from_user for AUDIO_SET_EVRC_ENC_CONFIG_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		cfg.cdma_rate = cfg_32.cdma_rate;
+		cfg.min_bit_rate = cfg_32.min_bit_rate;
+		cfg.max_bit_rate = cfg_32.max_bit_rate;
+		cmd = AUDIO_SET_EVRC_ENC_CONFIG;
+		rc = evrc_in_ioctl_shared(file, cmd, (unsigned long)&cfg);
+		if (rc)
+			pr_err("%s:AUDIO_SET_EVRC_ENC_CONFIG failed. rc= %d\n",
+				__func__, rc);
+		break;
+	}
+	default:
+		pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+#else
+#define evrc_in_compat_ioctl NULL
+#endif
+
+static int evrc_in_open(struct inode *inode, struct file *file)
+{
+	struct q6audio_in *audio = NULL;
+	struct msm_audio_evrc_enc_config *enc_cfg;
+	int rc = 0;
+
+	audio = kzalloc(sizeof(struct q6audio_in), GFP_KERNEL);
+
+	if (audio == NULL) {
+		pr_err("%s: Could not allocate memory for evrc driver\n",
+				__func__);
+		return -ENOMEM;
+	}
+	/* Allocate memory for encoder config param */
+	audio->enc_cfg = kzalloc(sizeof(struct msm_audio_evrc_enc_config),
+				GFP_KERNEL);
+	if (audio->enc_cfg == NULL) {
+		pr_err("%s:session id %d: Could not allocate memory for aac config param\n",
+				__func__, audio->ac->session);
+		kfree(audio);
+		return -ENOMEM;
+	}
+	enc_cfg = audio->enc_cfg;
+	mutex_init(&audio->lock);
+	mutex_init(&audio->read_lock);
+	mutex_init(&audio->write_lock);
+	spin_lock_init(&audio->dsp_lock);
+	init_waitqueue_head(&audio->read_wait);
+	init_waitqueue_head(&audio->write_wait);
+
+	/* Settings will be re-config at AUDIO_SET_CONFIG,
+	* but at least we need to have initial config
+	*/
+	audio->str_cfg.buffer_size = FRAME_SIZE;
+	audio->str_cfg.buffer_count = FRAME_NUM;
+	audio->min_frame_size = 23;
+	audio->max_frames_per_buf = 10;
+	audio->pcm_cfg.buffer_size = PCM_BUF_SIZE;
+	audio->pcm_cfg.buffer_count = PCM_BUF_COUNT;
+	enc_cfg->min_bit_rate = 4;
+	enc_cfg->max_bit_rate = 4;
+	audio->pcm_cfg.channel_count = 1;
+	audio->pcm_cfg.sample_rate = 8000;
+	audio->buf_cfg.meta_info_enable = 0x01;
+	audio->buf_cfg.frames_per_buf = 0x01;
+	audio->event_abort = 0;
+
+	audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_in_cb,
+				(void *)audio);
+
+	if (!audio->ac) {
+		pr_err("%s: Could not allocate memory for audio client\n",
+				__func__);
+		kfree(audio->enc_cfg);
+		kfree(audio);
+		return -ENOMEM;
+	}
+
+	/* open evrc encoder in T/NT mode */
+	if ((file->f_mode & FMODE_WRITE) &&
+		(file->f_mode & FMODE_READ)) {
+		audio->feedback = NON_TUNNEL_MODE;
+		rc = q6asm_open_read_write(audio->ac, FORMAT_EVRC,
+					FORMAT_LINEAR_PCM);
+		if (rc < 0) {
+			pr_err("%s:session id %d: NT mode Open failed rc=%d\n",
+					__func__, audio->ac->session, rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		pr_info("%s:session id %d: NT mode encoder success\n",
+				__func__, audio->ac->session);
+	} else if (!(file->f_mode & FMODE_WRITE) &&
+				(file->f_mode & FMODE_READ)) {
+		audio->feedback = TUNNEL_MODE;
+		rc = q6asm_open_read(audio->ac, FORMAT_EVRC);
+		if (rc < 0) {
+			pr_err("%s:session id %d: T mode Open failed rc=%d\n",
+					__func__, audio->ac->session, rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		/* register for tx overflow (valid for tunnel mode only) */
+		rc = q6asm_reg_tx_overflow(audio->ac, 0x01);
+		if (rc < 0) {
+			pr_err("%s:session id %d: TX Overflow registration failed rc=%d\n",
+				__func__,
+				audio->ac->session, rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		pr_info("%s:session id %d: T mode encoder success\n", __func__,
+				audio->ac->session);
+	} else {
+		pr_err("%s:session id %d: Unexpected mode\n", __func__,
+				audio->ac->session);
+		rc = -EACCES;
+		goto fail;
+	}
+
+	audio->opened = 1;
+	audio->reset_event = false;
+	atomic_set(&audio->in_count, PCM_BUF_COUNT);
+	atomic_set(&audio->out_count, 0x00);
+	audio->enc_compat_ioctl = evrc_in_compat_ioctl;
+	audio->enc_ioctl = evrc_in_ioctl;
+	file->private_data = audio;
+
+	pr_info("%s:session id %d: success\n", __func__, audio->ac->session);
+	return 0;
+fail:
+	q6asm_audio_client_free(audio->ac);
+	kfree(audio->enc_cfg);
+	kfree(audio);
+	return rc;
+}
+
+static const struct file_operations audio_in_fops = {
+	.owner		= THIS_MODULE,
+	.open		= evrc_in_open,
+	.release	= audio_in_release,
+	.read		= audio_in_read,
+	.write		= audio_in_write,
+	.unlocked_ioctl	= audio_in_ioctl,
+	.compat_ioctl   = audio_in_compat_ioctl
+};
+
+struct miscdevice audio_evrc_in_misc = {
+	.minor	= MISC_DYNAMIC_MINOR,
+	.name	= "msm_evrc_in",
+	.fops	= &audio_in_fops,
+};
+
+static int __init evrc_in_init(void)
+{
+	return misc_register(&audio_evrc_in_misc);
+}
+
+device_initcall(evrc_in_init);
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/g711alaw_in.c linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/g711alaw_in.c
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/g711alaw_in.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/g711alaw_in.c	2019-01-22 16:16:24.747257672 +0100
@@ -0,0 +1,382 @@
+/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/msm_audio_g711.h>
+#include <linux/atomic.h>
+#include <linux/compat.h>
+#include <asm/ioctls.h>
+#include "audio_utils.h"
+
+/* Buffer with meta*/
+#define PCM_BUF_SIZE		(4096 + sizeof(struct meta_in))
+
+/* Maximum 10 frames in buffer with meta */
+#define FRAME_SIZE		(1 + ((320+sizeof(struct meta_out_dsp)) * 10))
+static long g711_in_ioctl_shared(struct file *file,
+				unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_in  *audio = file->private_data;
+	int rc = 0;
+	int cnt = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		struct msm_audio_g711_enc_config *enc_cfg;
+
+		enc_cfg = (struct msm_audio_g711_enc_config *)audio->enc_cfg;
+		pr_debug("%s:session id %d: default buf alloc[%d]\n", __func__,
+				audio->ac->session, audio->buf_alloc);
+		if (audio->enabled == 1) {
+			rc = 0;
+			break;
+		}
+		rc = audio_in_buf_alloc(audio);
+		if (rc < 0) {
+			pr_err("%s:session id %d: buffer allocation failed rc=%d\n",
+				__func__, audio->ac->session, rc);
+			break;
+		}
+		pr_debug("%s: sample rate %d", __func__, enc_cfg->sample_rate);
+		rc = q6asm_enc_cfg_blk_g711(audio->ac,
+			audio->buf_cfg.frames_per_buf,
+			enc_cfg->sample_rate);
+
+		if (rc < 0) {
+			pr_err("%s:session id %d: cmd g711 media format block failed rc=%d\n",
+					__func__, audio->ac->session, rc);
+			break;
+		}
+		if (audio->feedback == NON_TUNNEL_MODE) {
+			rc = q6asm_media_format_block_pcm(audio->ac,
+				audio->pcm_cfg.sample_rate,
+				audio->pcm_cfg.channel_count);
+
+			if (rc < 0) {
+				pr_err("%s:session id %d: media format block failed rc=%d\n",
+					__func__, audio->ac->session, rc);
+				break;
+			}
+		}
+		pr_debug("%s:session id %d: AUDIO_START enable[%d]\n", __func__,
+				audio->ac->session, audio->enabled);
+		rc = audio_in_enable(audio);
+		if (!rc) {
+			audio->enabled = 1;
+		} else {
+			audio->enabled = 0;
+			pr_err("%s:session id %d: Audio Start procedure failed rc=%d\n",
+					__func__, audio->ac->session, rc);
+			break;
+		}
+		while (cnt++ < audio->str_cfg.buffer_count)
+			q6asm_read(audio->ac); /* Push buffer to DSP */
+		rc = 0;
+		pr_debug("%s:session id %d: AUDIO_START success enable[%d]\n",
+				__func__, audio->ac->session, audio->enabled);
+		break;
+	}
+	case AUDIO_STOP: {
+		pr_debug("%s:session id %d: AUDIO_STOP\n", __func__,
+				audio->ac->session);
+		rc = audio_in_disable(audio);
+		if (rc  < 0) {
+			pr_err("%s:session id %d: Audio Stop procedure failed rc=%d\n",
+				__func__, audio->ac->session,
+					rc);
+			break;
+		}
+		break;
+	}
+	case AUDIO_SET_G711_ENC_CONFIG: {
+		struct msm_audio_g711_enc_config *cfg;
+		struct msm_audio_g711_enc_config *enc_cfg;
+
+		enc_cfg = (struct msm_audio_g711_enc_config *)audio->enc_cfg;
+
+		cfg = (struct msm_audio_g711_enc_config *)arg;
+		if (cfg == NULL) {
+			pr_err("%s: NULL config pointer\n", __func__);
+			rc = -EINVAL;
+			break;
+		}
+		if (cfg->sample_rate != 8000 &&
+			 cfg->sample_rate != 16000) {
+			pr_err("%s:session id %d: invalid sample rate\n",
+					__func__, audio->ac->session);
+			rc = -EINVAL;
+			break;
+		}
+		enc_cfg->sample_rate = cfg->sample_rate;
+		pr_debug("%s:session id %d: sample_rate= 0x%x",
+			__func__,
+			audio->ac->session, enc_cfg->sample_rate);
+		break;
+	}
+	default:
+		pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd);
+		rc = -ENOIOCTLCMD;
+	}
+	return rc;
+}
+
+static long g711_in_ioctl(struct file *file,
+				unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_in  *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START:
+	case AUDIO_STOP: {
+		rc = g711_in_ioctl_shared(file, cmd, arg);
+		break;
+	}
+	case AUDIO_GET_G711_ENC_CONFIG: {
+		if (copy_to_user((void *)arg, audio->enc_cfg,
+			sizeof(struct msm_audio_g711_enc_config))) {
+			pr_err(
+				"%s: copy_to_user for AUDIO_GET_g711_ENC_CONFIG failed",
+				__func__);
+			rc = -EFAULT;
+		}
+		break;
+	}
+	case AUDIO_SET_G711_ENC_CONFIG: {
+		struct msm_audio_g711_enc_config cfg;
+
+		if (copy_from_user(&cfg, (void *) arg,
+				sizeof(cfg))) {
+			pr_err(
+				"%s: copy_from_user for AUDIO_GET_G711_ENC_CONFIG failed",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		rc = g711_in_ioctl_shared(file, cmd, (unsigned long)&cfg);
+		if (rc)
+			pr_err("%s:AUDIO_GET_G711_ENC_CONFIG failed. Rc= %d\n",
+				__func__, rc);
+		break;
+	}
+	default:
+		pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd);
+		rc = -ENOIOCTLCMD;
+	}
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+struct msm_audio_g711_enc_config32 {
+	uint32_t sample_rate;
+};
+
+enum {
+	AUDIO_SET_G711_ENC_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC,
+	  (AUDIO_MAX_COMMON_IOCTL_NUM+0), struct msm_audio_g711_enc_config32),
+	AUDIO_GET_G711_ENC_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC,
+	  (AUDIO_MAX_COMMON_IOCTL_NUM+1), struct msm_audio_g711_enc_config32)
+};
+
+static long g711_in_compat_ioctl(struct file *file,
+				unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_in  *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START:
+	case AUDIO_STOP: {
+		rc = g711_in_ioctl_shared(file, cmd, arg);
+		break;
+	}
+	case AUDIO_GET_G711_ENC_CONFIG_32: {
+		struct msm_audio_g711_enc_config32 cfg_32;
+		struct msm_audio_g711_enc_config32 *enc_cfg;
+
+		enc_cfg = (struct msm_audio_g711_enc_config32 *)audio->enc_cfg;
+		cfg_32.sample_rate = enc_cfg->sample_rate;
+		if (copy_to_user((void *)arg, &cfg_32,
+			sizeof(cfg_32))) {
+			pr_err("%s: copy_to_user for AUDIO_GET_G711_ENC_CONFIG_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+		}
+		break;
+	}
+	case AUDIO_SET_G711_ENC_CONFIG_32: {
+		struct msm_audio_g711_enc_config32 cfg_32;
+		struct msm_audio_g711_enc_config32 cfg;
+
+		if (copy_from_user(&cfg_32, (void *) arg,
+				sizeof(cfg_32))) {
+			pr_err("%s: copy_from_user for AUDIO_SET_G711_ENC_CONFIG_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		cfg.sample_rate = cfg_32.sample_rate;
+		cmd = AUDIO_SET_G711_ENC_CONFIG;
+		rc = g711_in_ioctl_shared(file, cmd, (unsigned long)&cfg);
+		if (rc)
+			pr_err("%s:AUDIO_SET_G711_ENC_CONFIG failed. rc= %d\n",
+				__func__, rc);
+		break;
+	}
+	default:
+		pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd);
+		rc = -ENOIOCTLCMD;
+	}
+	return rc;
+}
+#else
+#define g711_in_compat_ioctl NULL
+#endif
+
+static int g711_in_open(struct inode *inode, struct file *file)
+{
+	struct q6audio_in *audio = NULL;
+	struct msm_audio_g711_enc_config *enc_cfg;
+	int rc = 0;
+
+	audio = kzalloc(sizeof(struct q6audio_in), GFP_KERNEL);
+
+	if (audio == NULL)
+		return -ENOMEM;
+	/* Allocate memory for encoder config param */
+	audio->enc_cfg = kzalloc(sizeof(struct msm_audio_g711_enc_config),
+				GFP_KERNEL);
+	if (audio->enc_cfg == NULL) {
+		kfree(audio);
+		return -ENOMEM;
+	}
+	enc_cfg = audio->enc_cfg;
+
+	mutex_init(&audio->lock);
+	mutex_init(&audio->read_lock);
+	mutex_init(&audio->write_lock);
+	spin_lock_init(&audio->dsp_lock);
+	init_waitqueue_head(&audio->read_wait);
+	init_waitqueue_head(&audio->write_wait);
+
+	/*
+	 * Settings will be re-config at AUDIO_SET_CONFIG,
+	 * but at least we need to have initial config
+	 */
+	audio->str_cfg.buffer_size = FRAME_SIZE;
+	audio->str_cfg.buffer_count = FRAME_NUM;
+	audio->min_frame_size = 320;
+	audio->max_frames_per_buf = 10;
+	audio->pcm_cfg.buffer_size = PCM_BUF_SIZE;
+	audio->pcm_cfg.buffer_count = PCM_BUF_COUNT;
+	enc_cfg->sample_rate = 8000;
+	audio->pcm_cfg.channel_count = 1;
+	audio->pcm_cfg.sample_rate = 8000;
+	audio->buf_cfg.meta_info_enable = 0x01;
+	audio->buf_cfg.frames_per_buf = 0x01;
+	audio->event_abort = 0;
+
+	audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_in_cb,
+				(void *)audio);
+
+	if (!audio->ac) {
+		kfree(audio->enc_cfg);
+		kfree(audio);
+		return -ENOMEM;
+	}
+
+	/* open g711 encoder in T/NT mode */
+	if ((file->f_mode & FMODE_WRITE) &&
+		(file->f_mode & FMODE_READ)) {
+		audio->feedback = NON_TUNNEL_MODE;
+		rc = q6asm_open_read_write(audio->ac, FORMAT_G711_ALAW_FS,
+					FORMAT_LINEAR_PCM);
+		if (rc < 0) {
+			pr_err("%s:session id %d: NT mode Open failed rc=%d\n",
+				__func__, audio->ac->session, rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+	} else if (!(file->f_mode & FMODE_WRITE) &&
+				(file->f_mode & FMODE_READ)) {
+		audio->feedback = TUNNEL_MODE;
+		rc = q6asm_open_read(audio->ac, FORMAT_G711_ALAW_FS);
+		if (rc < 0) {
+			pr_err("%s:session id %d: T mode Open failed rc=%d\n",
+				__func__, audio->ac->session, rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		/* register for tx overflow (valid for tunnel mode only) */
+		rc = q6asm_reg_tx_overflow(audio->ac, 0x01);
+		if (rc < 0) {
+			pr_err("%s:session id %d: TX Overflow registration failed rc=%d\n",
+				__func__, audio->ac->session, rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+	} else {
+		pr_err("%s:session id %d: Unexpected mode\n", __func__,
+				audio->ac->session);
+		rc = -EACCES;
+		goto fail;
+	}
+
+	audio->opened = 1;
+	audio->reset_event = false;
+	atomic_set(&audio->in_count, PCM_BUF_COUNT);
+	atomic_set(&audio->out_count, 0x00);
+	audio->enc_compat_ioctl = g711_in_compat_ioctl;
+	audio->enc_ioctl = g711_in_ioctl;
+	file->private_data = audio;
+
+	pr_info("%s:session id %d: success\n", __func__, audio->ac->session);
+	return 0;
+fail:
+	q6asm_audio_client_free(audio->ac);
+	kfree(audio->enc_cfg);
+	kfree(audio);
+	return rc;
+}
+
+static const struct file_operations audio_in_fops = {
+	.owner		= THIS_MODULE,
+	.open		= g711_in_open,
+	.release	= audio_in_release,
+	.read		= audio_in_read,
+	.write		= audio_in_write,
+	.unlocked_ioctl	= audio_in_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl   = audio_in_compat_ioctl,
+#endif
+};
+
+struct miscdevice audio_g711alaw_in_misc = {
+	.minor	= MISC_DYNAMIC_MINOR,
+	.name	= "msm_g711alaw_in",
+	.fops	= &audio_in_fops,
+};
+
+static int __init g711alaw_in_init(void)
+{
+	return misc_register(&audio_g711alaw_in_misc);
+}
+
+device_initcall(g711alaw_in_init);
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/g711mlaw_in.c linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/g711mlaw_in.c
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/g711mlaw_in.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/g711mlaw_in.c	2019-01-22 16:16:24.747257672 +0100
@@ -0,0 +1,385 @@
+/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/msm_audio_g711.h>
+#include <linux/atomic.h>
+#include <linux/compat.h>
+#include <asm/ioctls.h>
+#include "audio_utils.h"
+
+#ifdef CONFIG_COMPAT
+#undef PROC_ADD
+#endif
+/* Buffer with meta*/
+#define PCM_BUF_SIZE		(4096 + sizeof(struct meta_in))
+
+/* Maximum 10 frames in buffer with meta */
+#define FRAME_SIZE		(1 + ((320+sizeof(struct meta_out_dsp)) * 10))
+static long g711_in_ioctl_shared(struct file *file,
+				unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_in  *audio = file->private_data;
+	int rc = 0;
+	int cnt = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		struct msm_audio_g711_enc_config *enc_cfg;
+
+		enc_cfg = (struct msm_audio_g711_enc_config *)audio->enc_cfg;
+		pr_debug("%s:session id %d: default buf alloc[%d]\n", __func__,
+				audio->ac->session, audio->buf_alloc);
+		if (audio->enabled == 1) {
+			rc = 0;
+			break;
+		}
+		rc = audio_in_buf_alloc(audio);
+		if (rc < 0) {
+			pr_err("%s:session id %d: buffer allocation failed rc=%d\n",
+				__func__, audio->ac->session, rc);
+			break;
+		}
+		pr_debug("%s: sample rate %d", __func__, enc_cfg->sample_rate);
+		rc = q6asm_enc_cfg_blk_g711(audio->ac,
+			audio->buf_cfg.frames_per_buf,
+			enc_cfg->sample_rate);
+
+		if (rc < 0) {
+			pr_err("%s:session id %d: cmd g711 media format block failed rc=%d\n",
+					__func__, audio->ac->session, rc);
+			break;
+		}
+		if (audio->feedback == NON_TUNNEL_MODE) {
+			rc = q6asm_media_format_block_pcm(audio->ac,
+				audio->pcm_cfg.sample_rate,
+				audio->pcm_cfg.channel_count);
+
+			if (rc < 0) {
+				pr_err("%s:session id %d: media format block failed rc=%d\n",
+					__func__, audio->ac->session, rc);
+				break;
+			}
+		}
+		pr_debug("%s:session id %d: AUDIO_START enable[%d]\n", __func__,
+				audio->ac->session, audio->enabled);
+		rc = audio_in_enable(audio);
+		if (!rc) {
+			audio->enabled = 1;
+		} else {
+			audio->enabled = 0;
+			pr_err("%s:session id %d: Audio Start procedure failed rc=%d\n",
+					__func__, audio->ac->session, rc);
+			break;
+		}
+		while (cnt++ < audio->str_cfg.buffer_count)
+			q6asm_read(audio->ac); /* Push buffer to DSP */
+		rc = 0;
+		pr_debug("%s:session id %d: AUDIO_START success enable[%d]\n",
+				__func__, audio->ac->session, audio->enabled);
+		break;
+	}
+	case AUDIO_STOP: {
+		pr_debug("%s:session id %d: AUDIO_STOP\n", __func__,
+				audio->ac->session);
+		rc = audio_in_disable(audio);
+		if (rc  < 0) {
+			pr_err("%s:session id %d: Audio Stop procedure failed rc=%d\n",
+				__func__, audio->ac->session,
+					rc);
+			break;
+		}
+		break;
+	}
+	case AUDIO_SET_G711_ENC_CONFIG: {
+		struct msm_audio_g711_enc_config *cfg;
+		struct msm_audio_g711_enc_config *enc_cfg;
+
+		enc_cfg = (struct msm_audio_g711_enc_config *)audio->enc_cfg;
+
+		cfg = (struct msm_audio_g711_enc_config *)arg;
+		if (cfg == NULL) {
+			pr_err("%s: NULL config pointer\n", __func__);
+			rc = -EINVAL;
+			break;
+		}
+		if (cfg->sample_rate != 8000 &&
+			 cfg->sample_rate != 16000) {
+			pr_err("%s:session id %d: invalid sample rate\n",
+					__func__, audio->ac->session);
+			rc = -EINVAL;
+			break;
+		}
+		enc_cfg->sample_rate = cfg->sample_rate;
+		pr_debug("%s:session id %d: sample_rate= 0x%x",
+			__func__,
+			audio->ac->session, enc_cfg->sample_rate);
+		break;
+	}
+	default:
+		pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd);
+		rc = -ENOIOCTLCMD;
+	}
+	return rc;
+}
+
+static long g711_in_ioctl(struct file *file,
+				unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_in  *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START:
+	case AUDIO_STOP: {
+		rc = g711_in_ioctl_shared(file, cmd, arg);
+		break;
+	}
+	case AUDIO_GET_G711_ENC_CONFIG: {
+		if (copy_to_user((void *)arg, audio->enc_cfg,
+			sizeof(struct msm_audio_g711_enc_config))) {
+			pr_err(
+				"%s: copy_to_user for AUDIO_GET_g711_ENC_CONFIG failed",
+				__func__);
+			rc = -EFAULT;
+		}
+		break;
+	}
+	case AUDIO_SET_G711_ENC_CONFIG: {
+		struct msm_audio_g711_enc_config cfg;
+
+		if (copy_from_user(&cfg, (void *) arg,
+				sizeof(cfg))) {
+			pr_err(
+				"%s: copy_from_user for AUDIO_GET_G711_ENC_CONFIG failed",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		rc = g711_in_ioctl_shared(file, cmd, (unsigned long)&cfg);
+		if (rc)
+			pr_err("%s:AUDIO_GET_G711_ENC_CONFIG failed. Rc= %d\n",
+				__func__, rc);
+		break;
+	}
+	default:
+		pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd);
+		rc = -ENOIOCTLCMD;
+	}
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+struct msm_audio_g711_enc_config32 {
+	uint32_t sample_rate;
+};
+
+enum {
+	AUDIO_SET_G711_ENC_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC,
+	  (AUDIO_MAX_COMMON_IOCTL_NUM+0), struct msm_audio_g711_enc_config32),
+	AUDIO_GET_G711_ENC_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC,
+	  (AUDIO_MAX_COMMON_IOCTL_NUM+1), struct msm_audio_g711_enc_config32)
+};
+
+static long g711_in_compat_ioctl(struct file *file,
+				unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_in  *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START:
+	case AUDIO_STOP: {
+		rc = g711_in_ioctl_shared(file, cmd, arg);
+		break;
+	}
+	case AUDIO_GET_G711_ENC_CONFIG_32: {
+		struct msm_audio_g711_enc_config32 cfg_32;
+		struct msm_audio_g711_enc_config32 *enc_cfg;
+
+		enc_cfg = (struct msm_audio_g711_enc_config32 *)audio->enc_cfg;
+		cfg_32.sample_rate = enc_cfg->sample_rate;
+		if (copy_to_user((void *)arg, &cfg_32,
+			sizeof(cfg_32))) {
+			pr_err("%s: copy_to_user for AUDIO_GET_G711_ENC_CONFIG_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+		}
+		break;
+	}
+	case AUDIO_SET_G711_ENC_CONFIG_32: {
+		struct msm_audio_g711_enc_config32 cfg_32;
+		struct msm_audio_g711_enc_config32 cfg;
+
+		if (copy_from_user(&cfg_32, (void *) arg,
+				sizeof(cfg_32))) {
+			pr_err("%s: copy_from_user for AUDIO_SET_G711_ENC_CONFIG_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		cfg.sample_rate = cfg_32.sample_rate;
+		cmd = AUDIO_SET_G711_ENC_CONFIG;
+		rc = g711_in_ioctl_shared(file, cmd, (unsigned long)&cfg);
+		if (rc)
+			pr_err("%s:AUDIO_SET_G711_ENC_CONFIG failed. rc= %d\n",
+				__func__, rc);
+		break;
+	}
+	default:
+		pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd);
+		rc = -ENOIOCTLCMD;
+	}
+	return rc;
+}
+#else
+#define g711_in_compat_ioctl NULL
+#endif
+
+static int g711_in_open(struct inode *inode, struct file *file)
+{
+	struct q6audio_in *audio = NULL;
+	struct msm_audio_g711_enc_config *enc_cfg;
+	int rc = 0;
+
+	audio = kzalloc(sizeof(struct q6audio_in), GFP_KERNEL);
+
+	if (audio == NULL)
+		return -ENOMEM;
+	/* Allocate memory for encoder config param */
+	audio->enc_cfg = kzalloc(sizeof(struct msm_audio_g711_enc_config),
+				GFP_KERNEL);
+	if (audio->enc_cfg == NULL) {
+		kfree(audio);
+		return -ENOMEM;
+	}
+	enc_cfg = audio->enc_cfg;
+
+	mutex_init(&audio->lock);
+	mutex_init(&audio->read_lock);
+	mutex_init(&audio->write_lock);
+	spin_lock_init(&audio->dsp_lock);
+	init_waitqueue_head(&audio->read_wait);
+	init_waitqueue_head(&audio->write_wait);
+
+	/*
+	 * Settings will be re-config at AUDIO_SET_CONFIG,
+	 * but at least we need to have initial config
+	 */
+	audio->str_cfg.buffer_size = FRAME_SIZE;
+	audio->str_cfg.buffer_count = FRAME_NUM;
+	audio->min_frame_size = 320;
+	audio->max_frames_per_buf = 10;
+	audio->pcm_cfg.buffer_size = PCM_BUF_SIZE;
+	audio->pcm_cfg.buffer_count = PCM_BUF_COUNT;
+	enc_cfg->sample_rate = 8000;
+	audio->pcm_cfg.channel_count = 1;
+	audio->pcm_cfg.sample_rate = 8000;
+	audio->buf_cfg.meta_info_enable = 0x01;
+	audio->buf_cfg.frames_per_buf = 0x01;
+	audio->event_abort = 0;
+
+	audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_in_cb,
+				(void *)audio);
+
+	if (!audio->ac) {
+		kfree(audio->enc_cfg);
+		kfree(audio);
+		return -ENOMEM;
+	}
+
+	/* open g711 encoder in T/NT mode */
+	if ((file->f_mode & FMODE_WRITE) &&
+		(file->f_mode & FMODE_READ)) {
+		audio->feedback = NON_TUNNEL_MODE;
+		rc = q6asm_open_read_write(audio->ac, FORMAT_G711_MLAW_FS,
+					FORMAT_LINEAR_PCM);
+		if (rc < 0) {
+			pr_err("%s:session id %d: NT mode Open failed rc=%d\n",
+				__func__, audio->ac->session, rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+	} else if (!(file->f_mode & FMODE_WRITE) &&
+				(file->f_mode & FMODE_READ)) {
+		audio->feedback = TUNNEL_MODE;
+		rc = q6asm_open_read(audio->ac, FORMAT_G711_MLAW_FS);
+		if (rc < 0) {
+			pr_err("%s:session id %d: T mode Open failed rc=%d\n",
+				__func__, audio->ac->session, rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		/* register for tx overflow (valid for tunnel mode only) */
+		rc = q6asm_reg_tx_overflow(audio->ac, 0x01);
+		if (rc < 0) {
+			pr_err("%s:session id %d: TX Overflow registration failed rc=%d\n",
+				__func__, audio->ac->session, rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+	} else {
+		pr_err("%s:session id %d: Unexpected mode\n", __func__,
+				audio->ac->session);
+		rc = -EACCES;
+		goto fail;
+	}
+
+	audio->opened = 1;
+	audio->reset_event = false;
+	atomic_set(&audio->in_count, PCM_BUF_COUNT);
+	atomic_set(&audio->out_count, 0x00);
+	audio->enc_compat_ioctl = g711_in_compat_ioctl;
+	audio->enc_ioctl = g711_in_ioctl;
+	file->private_data = audio;
+
+	pr_info("%s:session id %d: success\n", __func__, audio->ac->session);
+	return 0;
+fail:
+	q6asm_audio_client_free(audio->ac);
+	kfree(audio->enc_cfg);
+	kfree(audio);
+	return rc;
+}
+
+static const struct file_operations audio_in_fops = {
+	.owner		= THIS_MODULE,
+	.open		= g711_in_open,
+	.release	= audio_in_release,
+	.read		= audio_in_read,
+	.write		= audio_in_write,
+	.unlocked_ioctl = audio_in_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl   = audio_in_compat_ioctl,
+#endif
+};
+
+struct miscdevice audio_g711mlaw_in_misc = {
+	.minor	= MISC_DYNAMIC_MINOR,
+	.name	= "msm_g711mlaw_in",
+	.fops	= &audio_in_fops,
+};
+
+static int __init g711mlaw_in_init(void)
+{
+	return misc_register(&audio_g711mlaw_in_misc);
+}
+
+device_initcall(g711mlaw_in_init);
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/Makefile linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/Makefile
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/Makefile	2019-01-22 16:16:24.743257635 +0100
@@ -0,0 +1,6 @@
+obj-$(CONFIG_MSM_QDSP6V2_CODECS) += aac_in.o qcelp_in.o evrc_in.o amrnb_in.o g711mlaw_in.o g711alaw_in.o audio_utils.o
+obj-$(CONFIG_MSM_QDSP6V2_CODECS) += audio_wma.o audio_wmapro.o audio_aac.o audio_multi_aac.o audio_alac.o audio_ape.o audio_utils_aio.o
+obj-$(CONFIG_MSM_QDSP6V2_CODECS) += q6audio_v2.o q6audio_v2_aio.o
+obj-$(CONFIG_MSM_QDSP6V2_CODECS) += audio_g711mlaw.o audio_g711alaw.o
+obj-$(CONFIG_MSM_QDSP6V2_CODECS)  += audio_mp3.o audio_amrnb.o audio_amrwb.o audio_amrwbplus.o audio_evrc.o audio_qcelp.o amrwb_in.o audio_hwacc_effects.o
+obj-$(CONFIG_MSM_ULTRASOUND) += ultrasound/
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/q6audio_common.h linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/q6audio_common.h
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/q6audio_common.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/q6audio_common.h	2019-01-22 16:16:24.747257672 +0100
@@ -0,0 +1,37 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+
+/* For Decoders */
+#ifndef __Q6_AUDIO_COMMON_H__
+#define __Q6_AUDIO_COMMON_H__
+
+#include <sound/apr_audio-v2.h>
+#include <sound/q6asm-v2.h>
+
+
+void q6_audio_cb(uint32_t opcode, uint32_t token,
+		uint32_t *payload, void *priv);
+
+void audio_aio_cb(uint32_t opcode, uint32_t token,
+			uint32_t *payload,  void *audio);
+
+
+/* For Encoders */
+void q6asm_in_cb(uint32_t opcode, uint32_t token,
+		uint32_t *payload, void *priv);
+
+void  audio_in_get_dsp_frames(void *audio,
+		uint32_t token,	uint32_t *payload);
+
+#endif /*__Q6_AUDIO_COMMON_H__*/
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/q6audio_v2_aio.c linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/q6audio_v2_aio.c
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/q6audio_v2_aio.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/q6audio_v2_aio.c	2019-01-22 16:16:24.747257672 +0100
@@ -0,0 +1,220 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <asm/ioctls.h>
+#include "audio_utils_aio.h"
+
+void q6_audio_cb(uint32_t opcode, uint32_t token,
+		uint32_t *payload, void *priv)
+{
+	struct q6audio_aio *audio = (struct q6audio_aio *)priv;
+
+	pr_debug("%s:opcode = %x token = 0x%x\n", __func__, opcode, token);
+	switch (opcode) {
+	case ASM_DATA_EVENT_WRITE_DONE_V2:
+	case ASM_DATA_EVENT_READ_DONE_V2:
+	case ASM_DATA_EVENT_RENDERED_EOS:
+	case ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2:
+	case ASM_STREAM_CMD_SET_ENCDEC_PARAM:
+	case ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY:
+	case ASM_DATA_EVENT_ENC_SR_CM_CHANGE_NOTIFY:
+	case RESET_EVENTS:
+		audio_aio_cb(opcode, token, payload, audio);
+		break;
+	default:
+		pr_debug("%s:Unhandled event = 0x%8x\n", __func__, opcode);
+		break;
+	}
+}
+
+void audio_aio_cb(uint32_t opcode, uint32_t token,
+		uint32_t *payload,  void *priv/*struct q6audio_aio *audio*/)
+{
+	struct q6audio_aio *audio = (struct q6audio_aio *)priv;
+	union msm_audio_event_payload e_payload;
+
+	switch (opcode) {
+	case ASM_DATA_EVENT_WRITE_DONE_V2:
+		pr_debug("%s[%pK]:ASM_DATA_EVENT_WRITE_DONE token = 0x%x\n",
+			__func__, audio, token);
+		audio_aio_async_write_ack(audio, token, payload);
+		break;
+	case ASM_DATA_EVENT_READ_DONE_V2:
+		pr_debug("%s[%pK]:ASM_DATA_EVENT_READ_DONE token = 0x%x\n",
+			__func__, audio, token);
+		audio_aio_async_read_ack(audio, token, payload);
+		break;
+	case ASM_DATA_EVENT_RENDERED_EOS:
+		/* EOS Handle */
+		pr_debug("%s[%pK]:ASM_DATA_CMDRSP_EOS\n", __func__, audio);
+		if (audio->feedback) { /* Non-Tunnel mode */
+			audio->eos_rsp = 1;
+			/* propagate input EOS i/p buffer,
+			after receiving DSP acknowledgement */
+			if (audio->eos_flag &&
+				(audio->eos_write_payload.aio_buf.buf_addr)) {
+				audio_aio_post_event(audio,
+						AUDIO_EVENT_WRITE_DONE,
+						audio->eos_write_payload);
+				memset(&audio->eos_write_payload , 0,
+					sizeof(union msm_audio_event_payload));
+				audio->eos_flag = 0;
+			}
+		} else { /* Tunnel mode */
+			audio->eos_rsp = 1;
+			wake_up(&audio->write_wait);
+			wake_up(&audio->cmd_wait);
+		}
+		break;
+	case ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2:
+	case ASM_STREAM_CMD_SET_ENCDEC_PARAM:
+		pr_debug("%s[%pK]:payload0[%x] payloa1d[%x]opcode= 0x%x\n",
+			__func__, audio, payload[0], payload[1], opcode);
+		break;
+	case ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY:
+	case ASM_DATA_EVENT_ENC_SR_CM_CHANGE_NOTIFY:
+		pr_debug("%s[%pK]: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, payload[0]-sr = %d, payload[1]-chl = %d, payload[2] = %d, payload[3] = %d\n",
+					 __func__, audio, payload[0],
+					 payload[1], payload[2], payload[3]);
+
+		pr_debug("%s[%pK]: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, sr(prev) = %d, chl(prev) = %d,",
+		__func__, audio, audio->pcm_cfg.sample_rate,
+		audio->pcm_cfg.channel_count);
+
+		audio->pcm_cfg.sample_rate = payload[0];
+		audio->pcm_cfg.channel_count = payload[1] & 0xFFFF;
+		e_payload.stream_info.chan_info = audio->pcm_cfg.channel_count;
+		e_payload.stream_info.sample_rate = audio->pcm_cfg.sample_rate;
+		audio_aio_post_event(audio, AUDIO_EVENT_STREAM_INFO, e_payload);
+		break;
+	case RESET_EVENTS:
+		pr_err("%s: Received opcode:0x%x\n", __func__, opcode);
+		audio->stopped = 1;
+		audio->reset_event = true;
+		wake_up(&audio->event_wait);
+		break;
+	default:
+		break;
+	}
+}
+
+void extract_meta_out_info(struct q6audio_aio *audio,
+		struct audio_aio_buffer_node *buf_node, int dir)
+{
+	struct dec_meta_out *meta_data = buf_node->kvaddr;
+	uint32_t temp;
+
+	if (dir) { /* input buffer - Write */
+		if (audio->buf_cfg.meta_info_enable)
+			memcpy(&buf_node->meta_info.meta_in,
+			(char *)buf_node->kvaddr, sizeof(struct dec_meta_in));
+		else
+			memset(&buf_node->meta_info.meta_in,
+			0, sizeof(struct dec_meta_in));
+		pr_debug("%s[%pK]:i/p: msw_ts %d lsw_ts %d nflags 0x%8x\n",
+			__func__, audio,
+			buf_node->meta_info.meta_in.ntimestamp.highpart,
+			buf_node->meta_info.meta_in.ntimestamp.lowpart,
+			buf_node->meta_info.meta_in.nflags);
+	} else { /* output buffer - Read */
+		memcpy((char *)buf_node->kvaddr,
+			&buf_node->meta_info.meta_out,
+			sizeof(struct dec_meta_out));
+		meta_data->meta_out_dsp[0].nflags = 0x00000000;
+		temp = meta_data->meta_out_dsp[0].msw_ts;
+		meta_data->meta_out_dsp[0].msw_ts =
+				meta_data->meta_out_dsp[0].lsw_ts;
+		meta_data->meta_out_dsp[0].lsw_ts = temp;
+
+		pr_debug("%s[%pK]:o/p: msw_ts %d lsw_ts %d nflags 0x%8x, num_frames = %d\n",
+		__func__, audio,
+		((struct dec_meta_out *)buf_node->kvaddr)->\
+			meta_out_dsp[0].msw_ts,
+		((struct dec_meta_out *)buf_node->kvaddr)->\
+			meta_out_dsp[0].lsw_ts,
+		((struct dec_meta_out *)buf_node->kvaddr)->\
+			meta_out_dsp[0].nflags,
+		((struct dec_meta_out *)buf_node->kvaddr)->num_of_frames);
+	}
+}
+
+/* Read buffer from DSP / Handle Ack from DSP */
+void audio_aio_async_read_ack(struct q6audio_aio *audio, uint32_t token,
+			uint32_t *payload)
+{
+	unsigned long flags;
+	union msm_audio_event_payload event_payload;
+	struct audio_aio_buffer_node *filled_buf;
+	pr_debug("%s\n", __func__);
+
+	/* No active flush in progress */
+	if (audio->rflush)
+		return;
+
+	/* Statistics of read */
+	atomic_add(payload[4], &audio->in_bytes);
+	atomic_add(payload[9], &audio->in_samples);
+
+	spin_lock_irqsave(&audio->dsp_lock, flags);
+	if (list_empty(&audio->in_queue)) {
+		spin_unlock_irqrestore(&audio->dsp_lock, flags);
+		pr_warning("%s unexpected ack from dsp\n", __func__);
+		return;
+	}
+	filled_buf = list_first_entry(&audio->in_queue,
+					struct audio_aio_buffer_node, list);
+
+	pr_debug("%s token: 0x[%x], filled_buf->token: 0x[%x]",
+				 __func__, token, filled_buf->token);
+	if (token == (filled_buf->token)) {
+		list_del(&filled_buf->list);
+		spin_unlock_irqrestore(&audio->dsp_lock, flags);
+		event_payload.aio_buf = filled_buf->buf;
+		/* Read done Buffer due to flush/normal condition
+		after EOS event, so append EOS buffer */
+		if (audio->eos_rsp == 0x1) {
+			event_payload.aio_buf.data_len =
+			insert_eos_buf(audio, filled_buf);
+			/* Reset flag back to indicate eos intimated */
+			audio->eos_rsp = 0;
+		} else {
+			filled_buf->meta_info.meta_out.num_of_frames\
+							 = payload[9];
+			event_payload.aio_buf.data_len = payload[4]\
+				 + payload[5] + sizeof(struct dec_meta_out);
+			pr_debug("%s[%pK]:nr of frames 0x%8x len=%d\n",
+				__func__, audio,
+				filled_buf->meta_info.meta_out.num_of_frames,
+				event_payload.aio_buf.data_len);
+			extract_meta_out_info(audio, filled_buf, 0);
+			audio->eos_rsp = 0;
+		}
+		pr_debug("%s, posting read done to the app here\n", __func__);
+		audio_aio_post_event(audio, AUDIO_EVENT_READ_DONE,
+					event_payload);
+		kfree(filled_buf);
+	} else {
+		pr_err("%s[%pK]:expected=%x ret=%x\n",
+			__func__, audio, filled_buf->token, token);
+		spin_unlock_irqrestore(&audio->dsp_lock, flags);
+	}
+}
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/q6audio_v2.c linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/q6audio_v2.c
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/q6audio_v2.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/q6audio_v2.c	2019-01-22 16:16:24.747257672 +0100
@@ -0,0 +1,106 @@
+/* Copyright (c) 2012-2013, 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <asm/ioctls.h>
+#include "audio_utils.h"
+
+void q6asm_in_cb(uint32_t opcode, uint32_t token,
+		uint32_t *payload, void *priv)
+{
+	struct q6audio_in *audio = (struct q6audio_in *)priv;
+	unsigned long flags;
+
+	pr_debug("%s:session id %d: opcode[0x%x]\n", __func__,
+			audio->ac->session, opcode);
+
+	spin_lock_irqsave(&audio->dsp_lock, flags);
+	switch (opcode) {
+	case ASM_DATA_EVENT_READ_DONE_V2:
+		audio_in_get_dsp_frames(audio, token, payload);
+		break;
+	case ASM_DATA_EVENT_WRITE_DONE_V2:
+		atomic_inc(&audio->in_count);
+		wake_up(&audio->write_wait);
+		break;
+	case ASM_DATA_EVENT_RENDERED_EOS:
+		audio->eos_rsp = 1;
+		wake_up(&audio->read_wait);
+		break;
+	case ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2:
+		break;
+	case ASM_SESSION_EVENTX_OVERFLOW:
+		pr_err("%s:session id %d: ASM_SESSION_EVENT_TX_OVERFLOW\n",
+			__func__, audio->ac->session);
+		break;
+	case RESET_EVENTS:
+		pr_debug("%s:received RESET EVENTS\n", __func__);
+		audio->enabled = 0;
+		audio->stopped = 1;
+		audio->event_abort = 1;
+		audio->reset_event = true;
+		wake_up(&audio->read_wait);
+		wake_up(&audio->write_wait);
+		break;
+	default:
+		pr_debug("%s:session id %d: Ignore opcode[0x%x]\n", __func__,
+			audio->ac->session, opcode);
+		break;
+	}
+	spin_unlock_irqrestore(&audio->dsp_lock, flags);
+}
+
+void  audio_in_get_dsp_frames(void *priv,
+	uint32_t token,	uint32_t *payload)
+{
+	struct q6audio_in *audio = (struct q6audio_in *)priv;
+	uint32_t index;
+
+	index = q6asm_get_buf_index_from_token(token);
+	pr_debug("%s:session id %d: index=%d nr frames=%d offset[%d]\n",
+			__func__, audio->ac->session, token, payload[9],
+			payload[5]);
+	pr_debug("%s:session id %d: timemsw=%d lsw=%d\n", __func__,
+			audio->ac->session, payload[7], payload[6]);
+	pr_debug("%s:session id %d: uflags=0x%8x uid=0x%8x\n", __func__,
+			audio->ac->session, payload[8], payload[10]);
+	pr_debug("%s:session id %d: enc_framesotal_size=0x%8x\n", __func__,
+			audio->ac->session, payload[4]);
+
+	/* Ensure the index is within max array size: FRAME_NUM */
+	if (index >= FRAME_NUM) {
+		pr_err("%s: Invalid index %d\n",
+			__func__, index);
+		return;
+	}
+
+	audio->out_frame_info[index][0] = payload[9];
+	audio->out_frame_info[index][1] = payload[5];
+
+	/* statistics of read */
+	atomic_add(payload[4], &audio->in_bytes);
+	atomic_add(payload[9], &audio->in_samples);
+
+	if (atomic_read(&audio->out_count) <= audio->str_cfg.buffer_count) {
+		atomic_inc(&audio->out_count);
+		wake_up(&audio->read_wait);
+	}
+}
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/qcelp_in.c linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/qcelp_in.c
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/qcelp_in.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/qcelp_in.c	2019-01-22 16:16:24.747257672 +0100
@@ -0,0 +1,410 @@
+/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/msm_audio_qcp.h>
+#include <linux/atomic.h>
+#include <linux/compat.h>
+#include <asm/ioctls.h>
+#include "audio_utils.h"
+
+/* Buffer with meta*/
+#define PCM_BUF_SIZE		(4096 + sizeof(struct meta_in))
+
+/* Maximum 10 frames in buffer with meta */
+#define FRAME_SIZE		(1 + ((35+sizeof(struct meta_out_dsp)) * 10))
+
+static long qcelp_in_ioctl_shared(struct file *file,
+				unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_in  *audio = file->private_data;
+	int rc = 0;
+	int cnt = 0;
+
+	switch (cmd) {
+	case AUDIO_START: {
+		struct msm_audio_qcelp_enc_config *enc_cfg;
+		enc_cfg = audio->enc_cfg;
+		pr_debug("%s:session id %d: default buf alloc[%d]\n", __func__,
+				audio->ac->session, audio->buf_alloc);
+		if (audio->enabled == 1) {
+			pr_info("%s:AUDIO_START already over\n", __func__);
+			rc = 0;
+			break;
+		}
+		rc = audio_in_buf_alloc(audio);
+		if (rc < 0) {
+			pr_err("%s:session id %d: buffer allocation failed\n",
+				__func__, audio->ac->session);
+			break;
+		}
+
+		/* reduced_rate_level, rate_modulation_cmd set to zero
+			 currently not configurable from user space */
+		rc = q6asm_enc_cfg_blk_qcelp(audio->ac,
+			audio->buf_cfg.frames_per_buf,
+			enc_cfg->min_bit_rate,
+			enc_cfg->max_bit_rate, 0, 0);
+
+		if (rc < 0) {
+			pr_err("%s:session id %d: cmd qcelp media format block failed\n",
+					__func__, audio->ac->session);
+			break;
+		}
+		if (audio->feedback == NON_TUNNEL_MODE) {
+			rc = q6asm_media_format_block_pcm(audio->ac,
+				audio->pcm_cfg.sample_rate,
+				audio->pcm_cfg.channel_count);
+
+			if (rc < 0) {
+				pr_err("%s:session id %d: media format block failed\n",
+					__func__, audio->ac->session);
+				break;
+			}
+		}
+		pr_debug("%s:session id %d: AUDIO_START enable[%d]\n", __func__,
+				audio->ac->session, audio->enabled);
+		rc = audio_in_enable(audio);
+		if (!rc) {
+			audio->enabled = 1;
+		} else {
+			audio->enabled = 0;
+			pr_err("%s:session id %d: Audio Start procedure failed rc=%d\n",
+					__func__, audio->ac->session, rc);
+			break;
+		}
+		while (cnt++ < audio->str_cfg.buffer_count)
+			q6asm_read(audio->ac); /* Push buffer to DSP */
+		rc = 0;
+		pr_debug("%s:session id %d: AUDIO_START success enable[%d]\n",
+				__func__, audio->ac->session, audio->enabled);
+		break;
+	}
+	case AUDIO_STOP: {
+		pr_debug("%s:session id %d: AUDIO_STOP\n", __func__,
+				audio->ac->session);
+		rc = audio_in_disable(audio);
+		if (rc  < 0) {
+			pr_err("%s:session id %d: Audio Stop procedure failed rc=%d\n",
+				__func__, audio->ac->session,
+					rc);
+			break;
+		}
+		break;
+	}
+	case AUDIO_SET_QCELP_ENC_CONFIG: {
+		struct msm_audio_qcelp_enc_config *cfg;
+		struct msm_audio_qcelp_enc_config *enc_cfg;
+		enc_cfg = audio->enc_cfg;
+
+		cfg = (struct msm_audio_qcelp_enc_config *)arg;
+		if (cfg == NULL) {
+			pr_err("%s: NULL config pointer\n", __func__);
+			rc = -EINVAL;
+			break;
+		}
+		if (cfg->min_bit_rate > 4 ||
+			 cfg->min_bit_rate < 1) {
+			pr_err("%s:session id %d: invalid min bitrate\n",
+					__func__, audio->ac->session);
+			rc = -EINVAL;
+			break;
+		}
+		if (cfg->max_bit_rate > 4 ||
+			 cfg->max_bit_rate < 1) {
+			pr_err("%s:session id %d: invalid max bitrate\n",
+					__func__, audio->ac->session);
+			rc = -EINVAL;
+			break;
+		}
+		enc_cfg->cdma_rate = cfg->cdma_rate;
+		enc_cfg->min_bit_rate = cfg->min_bit_rate;
+		enc_cfg->max_bit_rate = cfg->max_bit_rate;
+		pr_debug("%s:session id %d: min_bit_rate= 0x%x max_bit_rate=0x%x\n",
+			__func__,
+			audio->ac->session, enc_cfg->min_bit_rate,
+			enc_cfg->max_bit_rate);
+		break;
+	}
+	default:
+		pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+static long qcelp_in_ioctl(struct file *file,
+				unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_in  *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START:
+	case AUDIO_STOP: {
+		rc = qcelp_in_ioctl_shared(file, cmd, arg);
+		break;
+	}
+	case AUDIO_GET_QCELP_ENC_CONFIG: {
+		if (copy_to_user((void *)arg, audio->enc_cfg,
+			sizeof(struct msm_audio_qcelp_enc_config))) {
+			pr_err(
+				"%s: copy_to_user for AUDIO_GET_QCELP_ENC_CONFIG failed",
+				__func__);
+			rc = -EFAULT;
+		}
+		break;
+	}
+	case AUDIO_SET_QCELP_ENC_CONFIG: {
+		struct msm_audio_qcelp_enc_config cfg;
+		if (copy_from_user(&cfg, (void *) arg,
+				sizeof(cfg))) {
+			pr_err(
+				"%s: copy_from_user for AUDIO_SET_QCELP_ENC_CONFIG failed",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		rc = qcelp_in_ioctl_shared(file, cmd, (unsigned long)&cfg);
+		if (rc)
+			pr_err("%s:AUDIO_SET_QCELP_ENC_CONFIG failed. Rc= %d\n",
+				__func__, rc);
+		break;
+	}
+	default:
+		pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+struct msm_audio_qcelp_enc_config32 {
+	u32 cdma_rate;
+	u32 min_bit_rate;
+	u32 max_bit_rate;
+};
+
+enum {
+	AUDIO_SET_QCELP_ENC_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC,
+		0, struct msm_audio_qcelp_enc_config32),
+	AUDIO_GET_QCELP_ENC_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC,
+		1, struct msm_audio_qcelp_enc_config32)
+};
+
+static long qcelp_in_compat_ioctl(struct file *file,
+				unsigned int cmd, unsigned long arg)
+{
+	struct q6audio_in  *audio = file->private_data;
+	int rc = 0;
+
+	switch (cmd) {
+	case AUDIO_START:
+	case AUDIO_STOP: {
+		rc = qcelp_in_ioctl_shared(file, cmd, arg);
+		break;
+	}
+	case AUDIO_GET_QCELP_ENC_CONFIG_32: {
+		struct msm_audio_qcelp_enc_config32 cfg_32;
+		struct msm_audio_qcelp_enc_config *enc_cfg;
+
+		memset(&cfg_32, 0, sizeof(cfg_32));
+
+		enc_cfg = (struct msm_audio_qcelp_enc_config *)audio->enc_cfg;
+		cfg_32.cdma_rate = enc_cfg->cdma_rate;
+		cfg_32.min_bit_rate = enc_cfg->min_bit_rate;
+		cfg_32.max_bit_rate = enc_cfg->max_bit_rate;
+		if (copy_to_user((void *)arg, &cfg_32,
+			sizeof(cfg_32))) {
+			pr_err("%s: copy_to_user for AUDIO_GET_QCELP_ENC_CONFIG_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+}
+		break;
+	}
+	case AUDIO_SET_QCELP_ENC_CONFIG_32: {
+		struct msm_audio_qcelp_enc_config32 cfg_32;
+		struct msm_audio_qcelp_enc_config cfg;
+		if (copy_from_user(&cfg_32, (void *) arg,
+				sizeof(cfg_32))) {
+			pr_err("%s: copy_from_user for AUDIO_SET_QCELP_ENC_CONFIG_32 failed\n",
+				__func__);
+			rc = -EFAULT;
+			break;
+		}
+		cfg.cdma_rate = cfg_32.cdma_rate;
+		cfg.min_bit_rate = cfg_32.min_bit_rate;
+		cfg.max_bit_rate = cfg_32.max_bit_rate;
+		cmd = AUDIO_SET_QCELP_ENC_CONFIG;
+		rc = qcelp_in_ioctl_shared(file, cmd, (unsigned long)&cfg);
+		if (rc)
+			pr_err("%s:AUDIO_SET_QCELP_ENC_CONFIG failed. rc= %d\n",
+				__func__, rc);
+		break;
+	}
+	default:
+		pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+#else
+#define qcelp_in_compat_ioctl NULL
+#endif
+
+static int qcelp_in_open(struct inode *inode, struct file *file)
+{
+	struct q6audio_in *audio = NULL;
+	struct msm_audio_qcelp_enc_config *enc_cfg;
+	int rc = 0;
+
+	audio = kzalloc(sizeof(struct q6audio_in), GFP_KERNEL);
+
+	if (audio == NULL) {
+		pr_err("%s: Could not allocate memory for qcelp driver\n",
+				__func__);
+		return -ENOMEM;
+	}
+	/* Allocate memory for encoder config param */
+	audio->enc_cfg = kzalloc(sizeof(struct msm_audio_qcelp_enc_config),
+				GFP_KERNEL);
+	if (audio->enc_cfg == NULL) {
+		pr_err("%s:session id %d: Could not allocate memory for aac config param\n",
+				__func__, audio->ac->session);
+		kfree(audio);
+		return -ENOMEM;
+	}
+	enc_cfg = audio->enc_cfg;
+
+	mutex_init(&audio->lock);
+	mutex_init(&audio->read_lock);
+	mutex_init(&audio->write_lock);
+	spin_lock_init(&audio->dsp_lock);
+	init_waitqueue_head(&audio->read_wait);
+	init_waitqueue_head(&audio->write_wait);
+
+	/* Settings will be re-config at AUDIO_SET_CONFIG,
+	* but at least we need to have initial config
+	*/
+	audio->str_cfg.buffer_size = FRAME_SIZE;
+	audio->str_cfg.buffer_count = FRAME_NUM;
+	audio->min_frame_size = 35;
+	audio->max_frames_per_buf = 10;
+	audio->pcm_cfg.buffer_size = PCM_BUF_SIZE;
+	audio->pcm_cfg.buffer_count = PCM_BUF_COUNT;
+	enc_cfg->min_bit_rate = 4;
+	enc_cfg->max_bit_rate = 4;
+	audio->pcm_cfg.channel_count = 1;
+	audio->pcm_cfg.sample_rate = 8000;
+	audio->buf_cfg.meta_info_enable = 0x01;
+	audio->buf_cfg.frames_per_buf = 0x01;
+	audio->event_abort = 0;
+
+	audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_in_cb,
+				(void *)audio);
+
+	if (!audio->ac) {
+		pr_err("%s: Could not allocate memory for audio client\n",
+				__func__);
+		kfree(audio->enc_cfg);
+		kfree(audio);
+		return -ENOMEM;
+	}
+
+	/* open qcelp encoder in T/NT mode */
+	if ((file->f_mode & FMODE_WRITE) &&
+		(file->f_mode & FMODE_READ)) {
+		audio->feedback = NON_TUNNEL_MODE;
+		rc = q6asm_open_read_write(audio->ac, FORMAT_V13K,
+					FORMAT_LINEAR_PCM);
+		if (rc < 0) {
+			pr_err("%s:session id %d: NT mode Open failed rc=%d\n",
+				__func__, audio->ac->session, rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		pr_info("%s:session id %d: NT mode encoder success\n", __func__,
+				audio->ac->session);
+	} else if (!(file->f_mode & FMODE_WRITE) &&
+				(file->f_mode & FMODE_READ)) {
+		audio->feedback = TUNNEL_MODE;
+		rc = q6asm_open_read(audio->ac, FORMAT_V13K);
+		if (rc < 0) {
+			pr_err("%s:session id %d: T mode Open failed rc=%d\n",
+				__func__, audio->ac->session, rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		/* register for tx overflow (valid for tunnel mode only) */
+		rc = q6asm_reg_tx_overflow(audio->ac, 0x01);
+		if (rc < 0) {
+			pr_err("%s:session id %d: TX Overflow registration failed rc=%d\n",
+				__func__, audio->ac->session, rc);
+			rc = -ENODEV;
+			goto fail;
+		}
+		pr_info("%s:session id %d: T mode encoder success\n", __func__,
+				audio->ac->session);
+	} else {
+		pr_err("%s:session id %d: Unexpected mode\n", __func__,
+				audio->ac->session);
+		rc = -EACCES;
+		goto fail;
+	}
+
+	audio->opened = 1;
+	audio->reset_event = false;
+	atomic_set(&audio->in_count, PCM_BUF_COUNT);
+	atomic_set(&audio->out_count, 0x00);
+	audio->enc_compat_ioctl = qcelp_in_compat_ioctl;
+	audio->enc_ioctl = qcelp_in_ioctl;
+	file->private_data = audio;
+
+	pr_info("%s:session id %d: success\n", __func__, audio->ac->session);
+	return 0;
+fail:
+	q6asm_audio_client_free(audio->ac);
+	kfree(audio->enc_cfg);
+	kfree(audio);
+	return rc;
+}
+
+static const struct file_operations audio_in_fops = {
+	.owner		= THIS_MODULE,
+	.open		= qcelp_in_open,
+	.release	= audio_in_release,
+	.read		= audio_in_read,
+	.write		= audio_in_write,
+	.unlocked_ioctl	= audio_in_ioctl,
+	.compat_ioctl   = audio_in_compat_ioctl
+};
+
+struct miscdevice audio_qcelp_in_misc = {
+	.minor	= MISC_DYNAMIC_MINOR,
+	.name	= "msm_qcelp_in",
+	.fops	= &audio_in_fops,
+};
+
+static int __init qcelp_in_init(void)
+{
+	return misc_register(&audio_qcelp_in_misc);
+}
+
+device_initcall(qcelp_in_init);
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/ultrasound/Makefile linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/ultrasound/Makefile
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/ultrasound/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/ultrasound/Makefile	2019-01-22 16:16:24.747257672 +0100
@@ -0,0 +1,2 @@
+ccflags-y := -I$(src)/..
+obj-$(CONFIG_MSM_ULTRASOUND) += usf.o usfcdev.o q6usm.o
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/ultrasound/q6usm.c linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/ultrasound/q6usm.c
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/ultrasound/q6usm.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/ultrasound/q6usm.c	2019-10-29 09:26:24.049207151 +0100
@@ -0,0 +1,1468 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/msm_audio_ion.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/msm_audio.h>
+#include <sound/apr_audio-v2.h>
+#include <linux/qdsp6v2/apr_us.h>
+#include "q6usm.h"
+
+#define ADSP_MEMORY_MAP_SHMEM8_4K_POOL 3
+
+#define MEM_4K_OFFSET 4095
+#define MEM_4K_MASK 0xfffff000
+
+#define USM_SESSION_MAX 0x02 /* aDSP:USM limit */
+
+#define READDONE_IDX_STATUS     0
+
+#define WRITEDONE_IDX_STATUS    0
+
+/* Standard timeout in the asynchronous ops */
+#define Q6USM_TIMEOUT_JIFFIES	(1*HZ) /* 1 sec */
+
+static DEFINE_MUTEX(session_lock);
+
+static struct us_client *session[USM_SESSION_MAX];
+static int32_t q6usm_mmapcallback(struct apr_client_data *data, void *priv);
+static int32_t q6usm_callback(struct apr_client_data *data, void *priv);
+static void q6usm_add_hdr(struct us_client *usc, struct apr_hdr *hdr,
+			  uint32_t pkt_size, bool cmd_flg);
+
+struct usm_mmap {
+	atomic_t ref_cnt;
+	atomic_t cmd_state;
+	wait_queue_head_t cmd_wait;
+	void *apr;
+	int mem_handle;
+};
+
+static struct usm_mmap this_mmap;
+
+static void q6usm_add_mmaphdr(struct apr_hdr *hdr,
+			      uint32_t pkt_size, bool cmd_flg, u32 token)
+{
+	hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \
+				       APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	hdr->src_port = 0;
+	hdr->dest_port = 0;
+	if (cmd_flg) {
+		hdr->token = token;
+		atomic_set(&this_mmap.cmd_state, 1);
+	}
+	hdr->pkt_size  = pkt_size;
+	return;
+}
+
+static int q6usm_memory_map(phys_addr_t buf_add, int dir, uint32_t bufsz,
+		uint32_t bufcnt, uint32_t session, uint32_t *mem_handle)
+{
+	struct usm_cmd_memory_map_region mem_region_map;
+	int rc = 0;
+
+	if (this_mmap.apr == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	q6usm_add_mmaphdr(&mem_region_map.hdr,
+			  sizeof(struct usm_cmd_memory_map_region), true,
+			  ((session << 8) | dir));
+
+	mem_region_map.hdr.opcode = USM_CMD_SHARED_MEM_MAP_REGION;
+	mem_region_map.mempool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
+
+	mem_region_map.num_regions = 1;
+	mem_region_map.flags = 0;
+
+	mem_region_map.shm_addr_lsw = lower_32_bits(buf_add);
+	mem_region_map.shm_addr_msw =
+			msm_audio_populate_upper_32_bits(buf_add);
+	mem_region_map.mem_size_bytes = bufsz * bufcnt;
+
+	rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_region_map);
+	if (rc < 0) {
+		pr_err("%s: mem_map op[0x%x]rc[%d]\n",
+		       __func__, mem_region_map.hdr.opcode, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	rc = wait_event_timeout(this_mmap.cmd_wait,
+				(atomic_read(&this_mmap.cmd_state) == 0),
+				Q6USM_TIMEOUT_JIFFIES);
+	if (!rc) {
+		rc = -ETIME;
+		pr_err("%s: timeout. waited for memory_map\n", __func__);
+	} else {
+		*mem_handle = this_mmap.mem_handle;
+		rc = 0;
+	}
+fail_cmd:
+	return rc;
+}
+
+int q6usm_memory_unmap(phys_addr_t buf_add, int dir, uint32_t session,
+			uint32_t mem_handle)
+{
+	struct usm_cmd_memory_unmap_region mem_unmap;
+	int rc = 0;
+
+	if (this_mmap.apr == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	q6usm_add_mmaphdr(&mem_unmap.hdr,
+			  sizeof(struct usm_cmd_memory_unmap_region), true,
+			  ((session << 8) | dir));
+	mem_unmap.hdr.opcode = USM_CMD_SHARED_MEM_UNMAP_REGION;
+	mem_unmap.mem_map_handle = mem_handle;
+
+	rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_unmap);
+	if (rc < 0) {
+		pr_err("%s: mem_unmap op[0x%x] rc[%d]\n",
+		       __func__, mem_unmap.hdr.opcode, rc);
+		goto fail_cmd;
+	}
+
+	rc = wait_event_timeout(this_mmap.cmd_wait,
+				(atomic_read(&this_mmap.cmd_state) == 0),
+				Q6USM_TIMEOUT_JIFFIES);
+	if (!rc) {
+		rc = -ETIME;
+		pr_err("%s: timeout. waited for memory_unmap\n", __func__);
+	} else
+		rc = 0;
+fail_cmd:
+	return rc;
+}
+
+static int q6usm_session_alloc(struct us_client *usc)
+{
+	int ind = 0;
+
+	mutex_lock(&session_lock);
+	for (ind = 0; ind < USM_SESSION_MAX; ++ind) {
+		if (!session[ind]) {
+			session[ind] = usc;
+			mutex_unlock(&session_lock);
+			++ind; /* session id: 0 reserved */
+			pr_debug("%s: session[%d] was allocated\n",
+				  __func__, ind);
+			return ind;
+		}
+	}
+	mutex_unlock(&session_lock);
+	return -ENOMEM;
+}
+
+static void q6usm_session_free(struct us_client *usc)
+{
+	/* Session index was incremented during allocation */
+	uint16_t ind = (uint16_t)usc->session - 1;
+
+	pr_debug("%s: to free session[%d]\n", __func__, ind);
+	if (ind < USM_SESSION_MAX) {
+		mutex_lock(&session_lock);
+		session[ind] = NULL;
+		mutex_unlock(&session_lock);
+	}
+}
+
+static int q6usm_us_client_buf_free(unsigned int dir,
+			     struct us_client *usc)
+{
+	struct us_port_data *port;
+	int rc = 0;
+
+	if ((usc == NULL) ||
+	    ((dir != IN) && (dir != OUT)))
+		return -EINVAL;
+
+	mutex_lock(&usc->cmd_lock);
+	port = &usc->port[dir];
+	if (port == NULL) {
+		mutex_unlock(&usc->cmd_lock);
+		return -EINVAL;
+	}
+
+	if (port->data == NULL) {
+		mutex_unlock(&usc->cmd_lock);
+		return 0;
+	}
+
+	rc = q6usm_memory_unmap(port->phys, dir, usc->session,
+				*((uint32_t *)port->ext));
+	pr_debug("%s: data[%pK]phys[%llx][%pK]\n", __func__,
+		 (void *)port->data, (u64)port->phys, (void *)&port->phys);
+
+	msm_audio_ion_free(port->client, port->handle);
+
+	port->data = NULL;
+	port->phys = 0;
+	port->buf_size = 0;
+	port->buf_cnt = 0;
+	port->client = NULL;
+	port->handle = NULL;
+
+	mutex_unlock(&usc->cmd_lock);
+	return rc;
+}
+
+int q6usm_us_param_buf_free(unsigned int dir,
+			struct us_client *usc)
+{
+	struct us_port_data *port;
+	int rc = 0;
+
+	if ((usc == NULL) ||
+		((dir != IN) && (dir != OUT)))
+		return -EINVAL;
+
+	mutex_lock(&usc->cmd_lock);
+	port = &usc->port[dir];
+	if (port == NULL) {
+		mutex_unlock(&usc->cmd_lock);
+		return -EINVAL;
+	}
+
+	if (port->param_buf == NULL) {
+		mutex_unlock(&usc->cmd_lock);
+		return 0;
+	}
+
+	rc = q6usm_memory_unmap(port->param_phys, dir, usc->session,
+				*((uint32_t *)port->param_buf_mem_handle));
+	pr_debug("%s: data[%pK]phys[%llx][%pK]\n", __func__,
+		 (void *)port->param_buf, (u64)port->param_phys,
+		 (void *)&port->param_phys);
+
+	msm_audio_ion_free(port->param_client, port->param_handle);
+
+	port->param_buf = NULL;
+	port->param_phys = 0;
+	port->param_buf_size = 0;
+	port->param_client = NULL;
+	port->param_handle = NULL;
+
+	mutex_unlock(&usc->cmd_lock);
+	return rc;
+}
+
+void q6usm_us_client_free(struct us_client *usc)
+{
+	int loopcnt = 0;
+	struct us_port_data *port;
+	uint32_t *p_mem_handle = NULL;
+
+	if ((usc == NULL) ||
+	    !(usc->session))
+		return;
+
+	for (loopcnt = 0; loopcnt <= OUT; ++loopcnt) {
+		port = &usc->port[loopcnt];
+		if (port->data == NULL)
+			continue;
+		pr_debug("%s: loopcnt = %d\n", __func__, loopcnt);
+		q6usm_us_client_buf_free(loopcnt, usc);
+		q6usm_us_param_buf_free(loopcnt, usc);
+	}
+	q6usm_session_free(usc);
+	apr_deregister(usc->apr);
+
+	pr_debug("%s: APR De-Register\n", __func__);
+
+	if (atomic_read(&this_mmap.ref_cnt) <= 0) {
+		pr_err("%s: APR Common Port Already Closed\n", __func__);
+		goto done;
+	}
+
+	atomic_dec(&this_mmap.ref_cnt);
+	if (atomic_read(&this_mmap.ref_cnt) == 0) {
+		apr_deregister(this_mmap.apr);
+		pr_debug("%s: APR De-Register common port\n", __func__);
+	}
+
+done:
+	p_mem_handle = (uint32_t *)usc->port[IN].ext;
+	kfree(p_mem_handle);
+	kfree(usc);
+	pr_debug("%s:\n", __func__);
+	return;
+}
+
+struct us_client *q6usm_us_client_alloc(
+	void (*cb)(uint32_t, uint32_t, uint32_t *, void *),
+	void *priv)
+{
+	struct us_client *usc;
+	uint32_t *p_mem_handle = NULL;
+	int n;
+	int lcnt = 0;
+
+	usc = kzalloc(sizeof(struct us_client), GFP_KERNEL);
+	if (usc == NULL) {
+		pr_err("%s: us_client allocation failed\n", __func__);
+		return NULL;
+	}
+	p_mem_handle = kzalloc(sizeof(uint32_t) * 4, GFP_KERNEL);
+	if (p_mem_handle == NULL) {
+		pr_err("%s: p_mem_handle allocation failed\n", __func__);
+		kfree(usc);
+		return NULL;
+	}
+
+	n = q6usm_session_alloc(usc);
+	if (n <= 0)
+		goto fail_session;
+	usc->session = n;
+	usc->cb = cb;
+	usc->priv = priv;
+	usc->apr = apr_register("ADSP", "USM", \
+				(apr_fn)q6usm_callback,\
+				((usc->session) << 8 | 0x0001),\
+				usc);
+
+	if (usc->apr == NULL) {
+		pr_err("%s: Registration with APR failed\n", __func__);
+		goto fail;
+	}
+	pr_debug("%s: Registering the common port with APR\n", __func__);
+	if (atomic_read(&this_mmap.ref_cnt) == 0) {
+		this_mmap.apr = apr_register("ADSP", "USM",
+					     (apr_fn)q6usm_mmapcallback,
+					     0x0FFFFFFFF, &this_mmap);
+		if (this_mmap.apr == NULL) {
+			pr_err("%s: USM port registration failed\n",
+			       __func__);
+			goto fail;
+		}
+	}
+
+	atomic_inc(&this_mmap.ref_cnt);
+	init_waitqueue_head(&usc->cmd_wait);
+	mutex_init(&usc->cmd_lock);
+	for (lcnt = 0; lcnt <= OUT; ++lcnt) {
+		mutex_init(&usc->port[lcnt].lock);
+		spin_lock_init(&usc->port[lcnt].dsp_lock);
+		usc->port[lcnt].ext = (void *)p_mem_handle++;
+		usc->port[lcnt].param_buf_mem_handle = (void *)p_mem_handle++;
+		pr_err("%s: usc->port[%d].ext=%pK;\n",
+		       __func__, lcnt, usc->port[lcnt].ext);
+	}
+	atomic_set(&usc->cmd_state, 0);
+
+	return usc;
+fail:
+	kfree(p_mem_handle);
+	q6usm_us_client_free(usc);
+	return NULL;
+fail_session:
+	kfree(p_mem_handle);
+	kfree(usc);
+	return NULL;
+}
+
+int q6usm_us_client_buf_alloc(unsigned int dir,
+			      struct us_client *usc,
+			      unsigned int bufsz,
+			      unsigned int bufcnt)
+{
+	int rc = 0;
+	struct us_port_data *port = NULL;
+	unsigned int size = bufsz*bufcnt;
+	size_t len;
+
+	if ((usc == NULL) ||
+	    ((dir != IN) && (dir != OUT)) || (size == 0) ||
+	    (usc->session <= 0 || usc->session > USM_SESSION_MAX)) {
+		pr_err("%s: wrong parameters: size=%d; bufcnt=%d\n",
+		       __func__, size, bufcnt);
+		return -EINVAL;
+	}
+
+	mutex_lock(&usc->cmd_lock);
+
+	port = &usc->port[dir];
+
+	/* The size to allocate should be multiple of 4K bytes */
+	size = PAGE_ALIGN(size);
+
+	rc = msm_audio_ion_alloc("ultrasound_client",
+		&port->client, &port->handle,
+		size, &port->phys,
+		&len, &port->data);
+
+	if (rc) {
+		pr_err("%s: US ION allocation failed, rc = %d\n",
+			__func__, rc);
+		mutex_unlock(&usc->cmd_lock);
+		return -ENOMEM;
+	}
+
+	port->buf_cnt = bufcnt;
+	port->buf_size = bufsz;
+	pr_debug("%s: data[%pK]; phys[%llx]; [%pK]\n", __func__,
+		 (void *)port->data,
+		 (u64)port->phys,
+		 (void *)&port->phys);
+
+	rc = q6usm_memory_map(port->phys, dir, size, 1, usc->session,
+				(uint32_t *)port->ext);
+	if (rc < 0) {
+		pr_err("%s: CMD Memory_map failed\n", __func__);
+		mutex_unlock(&usc->cmd_lock);
+		q6usm_us_client_buf_free(dir, usc);
+		q6usm_us_param_buf_free(dir, usc);
+	} else {
+		mutex_unlock(&usc->cmd_lock);
+		rc = 0;
+	}
+
+	return rc;
+}
+
+int q6usm_us_param_buf_alloc(unsigned int dir,
+			struct us_client *usc,
+			unsigned int bufsz)
+{
+	int rc = 0;
+	struct us_port_data *port = NULL;
+	unsigned int size = bufsz;
+	size_t len;
+
+	if ((usc == NULL) ||
+		((dir != IN) && (dir != OUT)) ||
+		(usc->session <= 0 || usc->session > USM_SESSION_MAX)) {
+		pr_err("%s: wrong parameters: direction=%d, bufsz=%d\n",
+			__func__, dir, bufsz);
+		return -EINVAL;
+	}
+
+	mutex_lock(&usc->cmd_lock);
+
+	port = &usc->port[dir];
+
+	if (bufsz == 0) {
+		pr_debug("%s: bufsz=0, get/set param commands are forbidden\n",
+			__func__);
+		port->param_buf = NULL;
+		mutex_unlock(&usc->cmd_lock);
+		return rc;
+	}
+
+	/* The size to allocate should be multiple of 4K bytes */
+	size = PAGE_ALIGN(size);
+
+	rc = msm_audio_ion_alloc("ultrasound_client",
+		&port->param_client, &port->param_handle,
+		size, &port->param_phys,
+		&len, &port->param_buf);
+
+	if (rc) {
+		pr_err("%s: US ION allocation failed, rc = %d\n",
+			__func__, rc);
+		mutex_unlock(&usc->cmd_lock);
+		return -ENOMEM;
+	}
+
+	port->param_buf_size = bufsz;
+	pr_debug("%s: param_buf[%pK]; param_phys[%llx]; [%pK]\n", __func__,
+		 (void *)port->param_buf,
+		 (u64)port->param_phys,
+		 (void *)&port->param_phys);
+
+	rc = q6usm_memory_map(port->param_phys, (IN | OUT), size, 1,
+			usc->session, (uint32_t *)port->param_buf_mem_handle);
+	if (rc < 0) {
+		pr_err("%s: CMD Memory_map failed\n", __func__);
+		mutex_unlock(&usc->cmd_lock);
+		q6usm_us_client_buf_free(dir, usc);
+		q6usm_us_param_buf_free(dir, usc);
+	} else {
+		mutex_unlock(&usc->cmd_lock);
+		rc = 0;
+	}
+
+	return rc;
+}
+
+static int32_t q6usm_mmapcallback(struct apr_client_data *data, void *priv)
+{
+	uint32_t token;
+	uint32_t *payload = data->payload;
+
+	pr_debug("%s: ptr0[0x%x]; ptr1[0x%x]; opcode[0x%x]\n",
+		 __func__, payload[0], payload[1], data->opcode);
+	pr_debug("%s: token[0x%x]; payload_size[%d]; src[%d]; dest[%d];\n",
+		 __func__, data->token, data->payload_size,
+		 data->src_port, data->dest_port);
+
+	if (data->opcode == APR_BASIC_RSP_RESULT) {
+		/* status field check */
+		if (payload[1]) {
+			pr_err("%s: wrong response[%d] on cmd [%d]\n",
+			       __func__, payload[1], payload[0]);
+		} else {
+			token = data->token;
+			switch (payload[0]) {
+			case USM_CMD_SHARED_MEM_UNMAP_REGION:
+				if (atomic_read(&this_mmap.cmd_state)) {
+					atomic_set(&this_mmap.cmd_state, 0);
+					wake_up(&this_mmap.cmd_wait);
+				}
+			case USM_CMD_SHARED_MEM_MAP_REGION:
+				/* For MEM_MAP, additional answer is waited, */
+				/* therfore, no wake-up here */
+				pr_debug("%s: cmd[0x%x]; result[0x%x]\n",
+					 __func__, payload[0], payload[1]);
+				break;
+			default:
+				pr_debug("%s: wrong command[0x%x]\n",
+					 __func__, payload[0]);
+				break;
+			}
+		}
+	} else {
+		if (data->opcode == USM_CMDRSP_SHARED_MEM_MAP_REGION) {
+			this_mmap.mem_handle = payload[0];
+			pr_debug("%s: memory map handle = 0x%x",
+				__func__, payload[0]);
+			if (atomic_read(&this_mmap.cmd_state)) {
+				atomic_set(&this_mmap.cmd_state, 0);
+				wake_up(&this_mmap.cmd_wait);
+			}
+		}
+	}
+	return 0;
+}
+
+
+static int32_t q6usm_callback(struct apr_client_data *data, void *priv)
+{
+	struct us_client *usc = (struct us_client *)priv;
+	unsigned long dsp_flags;
+	uint32_t *payload = data->payload;
+	uint32_t token = data->token;
+	uint32_t opcode = Q6USM_EVENT_UNDEF;
+
+	if (usc == NULL) {
+		pr_err("%s: client info is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	if (data->opcode == APR_BASIC_RSP_RESULT) {
+		/* status field check */
+		if (payload[1]) {
+			pr_err("%s: wrong response[%d] on cmd [%d]\n",
+			       __func__, payload[1], payload[0]);
+			if (usc->cb)
+				usc->cb(data->opcode, token,
+					(uint32_t *)data->payload, usc->priv);
+		} else {
+			switch (payload[0]) {
+			case USM_SESSION_CMD_RUN:
+			case USM_STREAM_CMD_CLOSE:
+				if (token != usc->session) {
+					pr_err("%s: wrong token[%d]",
+					       __func__, token);
+					break;
+				}
+			case USM_STREAM_CMD_OPEN_READ:
+			case USM_STREAM_CMD_OPEN_WRITE:
+			case USM_STREAM_CMD_SET_ENC_PARAM:
+			case USM_DATA_CMD_MEDIA_FORMAT_UPDATE:
+			case USM_SESSION_CMD_SIGNAL_DETECT_MODE:
+			case USM_STREAM_CMD_SET_PARAM:
+			case USM_STREAM_CMD_GET_PARAM:
+				if (atomic_read(&usc->cmd_state)) {
+					atomic_set(&usc->cmd_state, 0);
+					wake_up(&usc->cmd_wait);
+				}
+				if (usc->cb)
+					usc->cb(data->opcode, token,
+						(uint32_t *)data->payload,
+						usc->priv);
+				break;
+			default:
+				break;
+			}
+		}
+		return 0;
+	}
+
+	switch (data->opcode) {
+	case RESET_EVENTS: {
+		pr_err("%s: Reset event is received: %d %d\n",
+				__func__,
+				data->reset_event,
+				data->reset_proc);
+
+		opcode = RESET_EVENTS;
+
+		apr_reset(this_mmap.apr);
+		this_mmap.apr = NULL;
+
+		apr_reset(usc->apr);
+		usc->apr = NULL;
+
+		break;
+	}
+
+
+	case USM_DATA_EVENT_READ_DONE: {
+		struct us_port_data *port = &usc->port[OUT];
+
+		opcode = Q6USM_EVENT_READ_DONE;
+		spin_lock_irqsave(&port->dsp_lock, dsp_flags);
+		if (payload[READDONE_IDX_STATUS]) {
+			pr_err("%s: wrong READDONE[%d]; token[%d]\n",
+			       __func__,
+			       payload[READDONE_IDX_STATUS],
+			       token);
+			token = USM_WRONG_TOKEN;
+			spin_unlock_irqrestore(&port->dsp_lock,
+					       dsp_flags);
+			break;
+		}
+
+		if (port->expected_token != token) {
+			u32 cpu_buf = port->cpu_buf;
+			pr_err("%s: expected[%d] != token[%d]\n",
+				__func__, port->expected_token, token);
+			pr_debug("%s: dsp_buf=%d; cpu_buf=%d;\n",
+				__func__,   port->dsp_buf, cpu_buf);
+
+			token = USM_WRONG_TOKEN;
+			/* To prevent data handle continiue */
+			port->expected_token = USM_WRONG_TOKEN;
+			spin_unlock_irqrestore(&port->dsp_lock,
+					       dsp_flags);
+			break;
+		} /* port->expected_token != data->token */
+
+		port->expected_token = token + 1;
+		if (port->expected_token == port->buf_cnt)
+			port->expected_token = 0;
+
+		/* gap support */
+		if (port->expected_token != port->cpu_buf) {
+			port->dsp_buf = port->expected_token;
+			token = port->dsp_buf; /* for callback */
+		} else
+			port->dsp_buf = token;
+
+		spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
+		break;
+	} /* case USM_DATA_EVENT_READ_DONE */
+
+	case USM_DATA_EVENT_WRITE_DONE: {
+		struct us_port_data *port = &usc->port[IN];
+
+		opcode = Q6USM_EVENT_WRITE_DONE;
+		if (payload[WRITEDONE_IDX_STATUS]) {
+			pr_err("%s: wrong WRITEDONE_IDX_STATUS[%d]\n",
+			       __func__,
+			       payload[WRITEDONE_IDX_STATUS]);
+			break;
+		}
+
+		spin_lock_irqsave(&port->dsp_lock, dsp_flags);
+		port->dsp_buf = token + 1;
+		if (port->dsp_buf == port->buf_cnt)
+			port->dsp_buf = 0;
+		spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
+
+		break;
+	} /* case USM_DATA_EVENT_WRITE_DONE */
+
+	case USM_SESSION_EVENT_SIGNAL_DETECT_RESULT: {
+		pr_debug("%s: US detect result: result=%d",
+			 __func__,
+			 payload[0]);
+		opcode = Q6USM_EVENT_SIGNAL_DETECT_RESULT;
+
+		break;
+	} /* case USM_SESSION_EVENT_SIGNAL_DETECT_RESULT */
+
+	default:
+		return 0;
+
+	} /* switch */
+
+	if (usc->cb)
+		usc->cb(opcode, token,
+			data->payload, usc->priv);
+
+	return 0;
+}
+
+uint32_t q6usm_get_virtual_address(int dir,
+				   struct us_client *usc,
+				   struct vm_area_struct *vms)
+{
+	uint32_t ret = 0xffffffff;
+
+	if (vms && (usc != NULL) && ((dir == IN) || (dir == OUT))) {
+		struct us_port_data *port = &usc->port[dir];
+		int size = PAGE_ALIGN(port->buf_size * port->buf_cnt);
+		struct audio_buffer ab;
+
+		ab.phys = port->phys;
+		ab.data = port->data;
+		ab.used = 1;
+		ab.size = size;
+		ab.actual_size = size;
+		ab.handle = port->handle;
+		ab.client = port->client;
+
+		ret = msm_audio_ion_mmap(&ab, vms);
+
+	}
+	return ret;
+}
+
+static void q6usm_add_hdr(struct us_client *usc, struct apr_hdr *hdr,
+			  uint32_t pkt_size, bool cmd_flg)
+{
+	mutex_lock(&usc->cmd_lock);
+	hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \
+				       APR_HDR_LEN(sizeof(struct apr_hdr)),\
+				       APR_PKT_VER);
+	hdr->src_svc = ((struct apr_svc *)usc->apr)->id;
+	hdr->src_domain = APR_DOMAIN_APPS;
+	hdr->dest_svc = APR_SVC_USM;
+	hdr->dest_domain = APR_DOMAIN_ADSP;
+	hdr->src_port = (usc->session << 8) | 0x0001;
+	hdr->dest_port = (usc->session << 8) | 0x0001;
+	if (cmd_flg) {
+		hdr->token = usc->session;
+		atomic_set(&usc->cmd_state, 1);
+	}
+	hdr->pkt_size  = pkt_size;
+	mutex_unlock(&usc->cmd_lock);
+	return;
+}
+
+static uint32_t q6usm_ext2int_format(uint32_t ext_format)
+{
+	uint32_t int_format = INVALID_FORMAT;
+	switch (ext_format) {
+	case FORMAT_USPS_EPOS:
+		int_format = US_POINT_EPOS_FORMAT_V2;
+		break;
+	case FORMAT_USRAW:
+		int_format = US_RAW_FORMAT_V2;
+		break;
+	case FORMAT_USPROX:
+		int_format = US_PROX_FORMAT_V4;
+		break;
+	case FORMAT_USGES_SYNC:
+		int_format = US_GES_SYNC_FORMAT;
+		break;
+	case FORMAT_USRAW_SYNC:
+		int_format = US_RAW_SYNC_FORMAT;
+		break;
+	default:
+		pr_err("%s: Invalid format[%d]\n", __func__, ext_format);
+		break;
+	}
+
+	return int_format;
+}
+
+int q6usm_open_read(struct us_client *usc,
+		    uint32_t format)
+{
+	uint32_t int_format = INVALID_FORMAT;
+	int rc = 0x00;
+	struct usm_stream_cmd_open_read open;
+
+	if ((usc == NULL) || (usc->apr == NULL)) {
+		pr_err("%s: client or its apr is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: session[%d]", __func__, usc->session);
+
+	q6usm_add_hdr(usc, &open.hdr, sizeof(open), true);
+	open.hdr.opcode = USM_STREAM_CMD_OPEN_READ;
+	open.src_endpoint = 0; /* AFE */
+	open.pre_proc_top = 0; /* No preprocessing required */
+
+	int_format = q6usm_ext2int_format(format);
+	if (int_format == INVALID_FORMAT)
+		return -EINVAL;
+
+	open.uMode = STREAM_PRIORITY_NORMAL;
+	open.format = int_format;
+
+	rc = apr_send_pkt(usc->apr, (uint32_t *) &open);
+	if (rc < 0) {
+		pr_err("%s: open failed op[0x%x]rc[%d]\n",
+		       __func__, open.hdr.opcode, rc);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(usc->cmd_wait,
+				(atomic_read(&usc->cmd_state) == 0),
+				Q6USM_TIMEOUT_JIFFIES);
+	if (!rc) {
+		rc = -ETIME;
+		pr_err("%s: timeout, waited for OPEN_READ rc[%d]\n",
+		       __func__, rc);
+		goto fail_cmd;
+	} else
+		rc = 0;
+fail_cmd:
+	return rc;
+}
+
+
+int q6usm_enc_cfg_blk(struct us_client *usc, struct us_encdec_cfg *us_cfg)
+{
+	uint32_t int_format = INVALID_FORMAT;
+	struct usm_stream_cmd_encdec_cfg_blk  enc_cfg_obj;
+	struct usm_stream_cmd_encdec_cfg_blk  *enc_cfg = &enc_cfg_obj;
+	int rc = 0;
+	uint32_t total_cfg_size =
+		sizeof(struct usm_stream_cmd_encdec_cfg_blk);
+	uint32_t round_params_size = 0;
+	uint8_t  is_allocated = 0;
+
+
+	if ((usc == NULL) || (us_cfg == NULL)) {
+		pr_err("%s: wrong input", __func__);
+		return -EINVAL;
+	}
+
+	int_format = q6usm_ext2int_format(us_cfg->format_id);
+	if (int_format == INVALID_FORMAT) {
+		pr_err("%s: wrong input format[%d]",
+		       __func__, us_cfg->format_id);
+		return -EINVAL;
+	}
+
+	/* Transparent configuration data is after enc_cfg */
+	/* Integer number of u32s is requred */
+	round_params_size = ((us_cfg->params_size + 3)/4) * 4;
+	if (round_params_size > USM_MAX_CFG_DATA_SIZE) {
+		/* Dynamic allocated encdec_cfg_blk is required */
+		/* static part use */
+		round_params_size -= USM_MAX_CFG_DATA_SIZE;
+		total_cfg_size += round_params_size;
+		enc_cfg = kzalloc(total_cfg_size, GFP_KERNEL);
+		if (enc_cfg == NULL) {
+			pr_err("%s: enc_cfg[%d] allocation failed\n",
+			       __func__, total_cfg_size);
+			return -ENOMEM;
+		}
+		is_allocated = 1;
+	} else
+		round_params_size = 0;
+
+	q6usm_add_hdr(usc, &enc_cfg->hdr, total_cfg_size, true);
+
+	enc_cfg->hdr.opcode = USM_STREAM_CMD_SET_ENC_PARAM;
+	enc_cfg->param_id = USM_PARAM_ID_ENCDEC_ENC_CFG_BLK;
+	enc_cfg->param_size = sizeof(struct usm_encode_cfg_blk)+
+				round_params_size;
+	enc_cfg->enc_blk.frames_per_buf = 1;
+	enc_cfg->enc_blk.format_id = int_format;
+	enc_cfg->enc_blk.cfg_size = sizeof(struct usm_cfg_common)+
+				    USM_MAX_CFG_DATA_SIZE +
+				    round_params_size;
+	memcpy(&(enc_cfg->enc_blk.cfg_common), &(us_cfg->cfg_common),
+	       sizeof(struct usm_cfg_common));
+
+	/* Transparent data copy */
+	memcpy(enc_cfg->enc_blk.transp_data, us_cfg->params,
+	       us_cfg->params_size);
+	pr_debug("%s: cfg_size[%d], params_size[%d]\n",
+		__func__,
+		enc_cfg->enc_blk.cfg_size,
+		us_cfg->params_size);
+	pr_debug("%s: params[%d,%d,%d,%d, %d,%d,%d,%d]\n",
+		__func__,
+		enc_cfg->enc_blk.transp_data[0],
+		enc_cfg->enc_blk.transp_data[1],
+		enc_cfg->enc_blk.transp_data[2],
+		enc_cfg->enc_blk.transp_data[3],
+		enc_cfg->enc_blk.transp_data[4],
+		enc_cfg->enc_blk.transp_data[5],
+		enc_cfg->enc_blk.transp_data[6],
+		enc_cfg->enc_blk.transp_data[7]
+	       );
+	pr_debug("%s: srate:%d, ch=%d, bps= %d;\n",
+		__func__, enc_cfg->enc_blk.cfg_common.sample_rate,
+		enc_cfg->enc_blk.cfg_common.ch_cfg,
+		enc_cfg->enc_blk.cfg_common.bits_per_sample);
+	pr_debug("dmap:[0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x]; dev_id=0x%x\n",
+		enc_cfg->enc_blk.cfg_common.data_map[0],
+		enc_cfg->enc_blk.cfg_common.data_map[1],
+		enc_cfg->enc_blk.cfg_common.data_map[2],
+		enc_cfg->enc_blk.cfg_common.data_map[3],
+		enc_cfg->enc_blk.cfg_common.data_map[4],
+		enc_cfg->enc_blk.cfg_common.data_map[5],
+		enc_cfg->enc_blk.cfg_common.data_map[6],
+		enc_cfg->enc_blk.cfg_common.data_map[7],
+		enc_cfg->enc_blk.cfg_common.dev_id);
+
+	rc = apr_send_pkt(usc->apr, (uint32_t *) enc_cfg);
+	if (rc < 0) {
+		pr_err("%s:Comamnd open failed\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(usc->cmd_wait,
+				(atomic_read(&usc->cmd_state) == 0),
+				Q6USM_TIMEOUT_JIFFIES);
+	if (!rc) {
+		rc = -ETIME;
+		pr_err("%s: timeout opcode[0x%x]\n",
+		       __func__, enc_cfg->hdr.opcode);
+	} else
+		rc = 0;
+
+fail_cmd:
+	if (is_allocated == 1)
+		kfree(enc_cfg);
+
+	return rc;
+}
+
+int q6usm_dec_cfg_blk(struct us_client *usc, struct us_encdec_cfg *us_cfg)
+{
+
+	uint32_t int_format = INVALID_FORMAT;
+	struct usm_stream_media_format_update dec_cfg_obj;
+	struct usm_stream_media_format_update *dec_cfg = &dec_cfg_obj;
+
+	int rc = 0;
+	uint32_t total_cfg_size = sizeof(struct usm_stream_media_format_update);
+	uint32_t round_params_size = 0;
+	uint8_t  is_allocated = 0;
+
+
+	if ((usc == NULL) || (us_cfg == NULL)) {
+		pr_err("%s: wrong input", __func__);
+		return -EINVAL;
+	}
+
+	int_format = q6usm_ext2int_format(us_cfg->format_id);
+	if (int_format == INVALID_FORMAT) {
+		pr_err("%s: wrong input format[%d]",
+		       __func__, us_cfg->format_id);
+		return -EINVAL;
+	}
+
+	/* Transparent configuration data is after enc_cfg */
+	/* Integer number of u32s is requred */
+	round_params_size = ((us_cfg->params_size + 3)/4) * 4;
+	if (round_params_size > USM_MAX_CFG_DATA_SIZE) {
+		/* Dynamic allocated encdec_cfg_blk is required */
+		/* static part use */
+		round_params_size -= USM_MAX_CFG_DATA_SIZE;
+		total_cfg_size += round_params_size;
+		dec_cfg = kzalloc(total_cfg_size, GFP_KERNEL);
+		if (dec_cfg == NULL) {
+			pr_err("%s:dec_cfg[%d] allocation failed\n",
+			       __func__, total_cfg_size);
+			return -ENOMEM;
+		}
+		is_allocated = 1;
+	} else { /* static transp_data is enough */
+		round_params_size = 0;
+	}
+
+	q6usm_add_hdr(usc, &dec_cfg->hdr, total_cfg_size, true);
+
+	dec_cfg->hdr.opcode = USM_DATA_CMD_MEDIA_FORMAT_UPDATE;
+	dec_cfg->format_id = int_format;
+	dec_cfg->cfg_size = sizeof(struct usm_cfg_common) +
+			    USM_MAX_CFG_DATA_SIZE +
+			    round_params_size;
+	memcpy(&(dec_cfg->cfg_common), &(us_cfg->cfg_common),
+	       sizeof(struct usm_cfg_common));
+	/* Transparent data copy */
+	memcpy(dec_cfg->transp_data, us_cfg->params, us_cfg->params_size);
+	pr_debug("%s: cfg_size[%d], params_size[%d]; parambytes[%d,%d,%d,%d]\n",
+		__func__,
+		dec_cfg->cfg_size,
+		us_cfg->params_size,
+		dec_cfg->transp_data[0],
+		dec_cfg->transp_data[1],
+		dec_cfg->transp_data[2],
+		dec_cfg->transp_data[3]
+	       );
+
+	rc = apr_send_pkt(usc->apr, (uint32_t *) dec_cfg);
+	if (rc < 0) {
+		pr_err("%s:Comamnd open failed\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(usc->cmd_wait,
+				(atomic_read(&usc->cmd_state) == 0),
+				Q6USM_TIMEOUT_JIFFIES);
+	if (!rc) {
+		rc = -ETIME;
+		pr_err("%s: timeout opcode[0x%x]\n",
+		       __func__, dec_cfg->hdr.opcode);
+	} else
+		rc = 0;
+
+fail_cmd:
+	if (is_allocated == 1)
+		kfree(dec_cfg);
+
+	return rc;
+}
+
+int q6usm_open_write(struct us_client *usc,
+		     uint32_t format)
+{
+	int rc = 0;
+	uint32_t int_format = INVALID_FORMAT;
+	struct usm_stream_cmd_open_write open;
+
+	if ((usc == NULL) || (usc->apr == NULL)) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: session[%d]", __func__, usc->session);
+
+	q6usm_add_hdr(usc, &open.hdr, sizeof(open), true);
+	open.hdr.opcode = USM_STREAM_CMD_OPEN_WRITE;
+
+	int_format = q6usm_ext2int_format(format);
+	if (int_format == INVALID_FORMAT) {
+		pr_err("%s: wrong format[%d]", __func__, format);
+		return -EINVAL;
+	}
+
+	open.format = int_format;
+
+	rc = apr_send_pkt(usc->apr, (uint32_t *) &open);
+	if (rc < 0) {
+		pr_err("%s:open failed op[0x%x]rc[%d]\n", \
+		       __func__, open.hdr.opcode, rc);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(usc->cmd_wait,
+				(atomic_read(&usc->cmd_state) == 0),
+				Q6USM_TIMEOUT_JIFFIES);
+	if (!rc) {
+		rc = -ETIME;
+		pr_err("%s:timeout. waited for OPEN_WRITR rc[%d]\n",
+		       __func__, rc);
+		goto fail_cmd;
+	} else
+		rc = 0;
+
+fail_cmd:
+	return rc;
+}
+
+int q6usm_run(struct us_client *usc, uint32_t flags,
+	      uint32_t msw_ts, uint32_t lsw_ts)
+{
+	struct usm_stream_cmd_run run;
+	int rc = 0;
+
+	if ((usc == NULL) || (usc->apr == NULL)) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	q6usm_add_hdr(usc, &run.hdr, sizeof(run), true);
+
+	run.hdr.opcode = USM_SESSION_CMD_RUN;
+	run.flags    = flags;
+	run.msw_ts   = msw_ts;
+	run.lsw_ts   = lsw_ts;
+
+	rc = apr_send_pkt(usc->apr, (uint32_t *) &run);
+	if (rc < 0) {
+		pr_err("%s: Commmand run failed[%d]\n", __func__, rc);
+		goto fail_cmd;
+	}
+
+	rc = wait_event_timeout(usc->cmd_wait,
+				(atomic_read(&usc->cmd_state) == 0),
+				Q6USM_TIMEOUT_JIFFIES);
+	if (!rc) {
+		rc = -ETIME;
+		pr_err("%s: timeout. waited for run success rc[%d]\n",
+		       __func__, rc);
+	} else
+		rc = 0;
+
+fail_cmd:
+	return rc;
+}
+
+
+
+int q6usm_read(struct us_client *usc, uint32_t read_ind)
+{
+	struct usm_stream_cmd_read read;
+	struct us_port_data *port = NULL;
+	int rc = 0;
+	u32 read_counter = 0;
+	u32 loop_ind = 0;
+	u64 buf_addr = 0;
+
+	if ((usc == NULL) || (usc->apr == NULL)) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	port = &usc->port[OUT];
+
+	if (read_ind > port->buf_cnt) {
+		pr_err("%s: wrong read_ind[%d]\n",
+		       __func__, read_ind);
+		return -EINVAL;
+	}
+	if (read_ind == port->cpu_buf) {
+		pr_err("%s: no free region\n", __func__);
+		return 0;
+	}
+
+	if (read_ind > port->cpu_buf) { /* 1 range */
+		read_counter = read_ind - port->cpu_buf;
+	} else { /* 2 ranges */
+		read_counter = (port->buf_cnt - port->cpu_buf) + read_ind;
+	}
+
+	q6usm_add_hdr(usc, &read.hdr, sizeof(read), false);
+
+	read.hdr.opcode = USM_DATA_CMD_READ;
+	read.buf_size = port->buf_size;
+	buf_addr = (u64)(port->phys) + port->buf_size * (port->cpu_buf);
+	read.buf_addr_lsw = lower_32_bits(buf_addr);
+	read.buf_addr_msw = msm_audio_populate_upper_32_bits(buf_addr);
+	read.mem_map_handle = *((uint32_t *)(port->ext));
+
+	for (loop_ind = 0; loop_ind < read_counter; ++loop_ind) {
+		u32 temp_cpu_buf = port->cpu_buf;
+
+		buf_addr = (u64)(port->phys) +
+				port->buf_size * (port->cpu_buf);
+		read.buf_addr_lsw = lower_32_bits(buf_addr);
+		read.buf_addr_msw = msm_audio_populate_upper_32_bits(buf_addr);
+		read.seq_id = port->cpu_buf;
+		read.hdr.token = port->cpu_buf;
+		read.counter = 1;
+
+		++(port->cpu_buf);
+		if (port->cpu_buf == port->buf_cnt)
+			port->cpu_buf = 0;
+
+		rc = apr_send_pkt(usc->apr, (uint32_t *) &read);
+
+		if (rc < 0) {
+			port->cpu_buf = temp_cpu_buf;
+
+			pr_err("%s:read op[0x%x]rc[%d]\n",
+			       __func__, read.hdr.opcode, rc);
+			break;
+		} else
+			rc = 0;
+	} /* bufs loop */
+
+	return rc;
+}
+
+int q6usm_write(struct us_client *usc, uint32_t write_ind)
+{
+	int rc = 0;
+	struct usm_stream_cmd_write cmd_write;
+	struct us_port_data *port = NULL;
+	u32 current_dsp_buf = 0;
+	u64 buf_addr = 0;
+
+	if ((usc == NULL) || (usc->apr == NULL)) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	port = &usc->port[IN];
+
+	current_dsp_buf = port->dsp_buf;
+	/* free region, caused by new dsp_buf report from DSP, */
+	/* can be only extended */
+	if (port->cpu_buf >= current_dsp_buf) {
+		/* 2 -part free region, including empty buffer */
+		if ((write_ind <= port->cpu_buf)  &&
+		    (write_ind > current_dsp_buf)) {
+			pr_err("%s: wrong w_ind[%d]; d_buf=%d; c_buf=%d\n",
+			       __func__, write_ind,
+			       current_dsp_buf, port->cpu_buf);
+			return -EINVAL;
+		}
+	} else {
+		/* 1 -part free region */
+		if ((write_ind <= port->cpu_buf)  ||
+		    (write_ind > current_dsp_buf)) {
+			pr_err("%s: wrong w_ind[%d]; d_buf=%d; c_buf=%d\n",
+			       __func__, write_ind,
+			       current_dsp_buf, port->cpu_buf);
+			return -EINVAL;
+		}
+	}
+
+	q6usm_add_hdr(usc, &cmd_write.hdr, sizeof(cmd_write), false);
+
+	cmd_write.hdr.opcode = USM_DATA_CMD_WRITE;
+	cmd_write.buf_size = port->buf_size;
+	buf_addr = (u64)(port->phys) + port->buf_size * (port->cpu_buf);
+	cmd_write.buf_addr_lsw = lower_32_bits(buf_addr);
+	cmd_write.buf_addr_msw = msm_audio_populate_upper_32_bits(buf_addr);
+	cmd_write.mem_map_handle = *((uint32_t *)(port->ext));
+	cmd_write.res0 = 0;
+	cmd_write.res1 = 0;
+	cmd_write.res2 = 0;
+
+	while (port->cpu_buf != write_ind) {
+		u32 temp_cpu_buf = port->cpu_buf;
+
+		buf_addr = (u64)(port->phys) +
+				port->buf_size * (port->cpu_buf);
+		cmd_write.buf_addr_lsw = lower_32_bits(buf_addr);
+		cmd_write.buf_addr_msw =
+				msm_audio_populate_upper_32_bits(buf_addr);
+		cmd_write.seq_id = port->cpu_buf;
+		cmd_write.hdr.token = port->cpu_buf;
+
+		++(port->cpu_buf);
+		if (port->cpu_buf == port->buf_cnt)
+			port->cpu_buf = 0;
+
+		rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_write);
+
+		if (rc < 0) {
+			port->cpu_buf = temp_cpu_buf;
+			pr_err("%s:write op[0x%x];rc[%d];cpu_buf[%d]\n",
+			       __func__, cmd_write.hdr.opcode,
+			       rc, port->cpu_buf);
+			break;
+		}
+
+		rc = 0;
+	}
+
+	return rc;
+}
+
+bool q6usm_is_write_buf_full(struct us_client *usc, uint32_t *free_region)
+{
+	struct us_port_data *port = NULL;
+	u32 cpu_buf = 0;
+
+	if ((usc == NULL) || !free_region) {
+		pr_err("%s: input data wrong\n", __func__);
+		return false;
+	}
+	port = &usc->port[IN];
+	cpu_buf = port->cpu_buf + 1;
+	if (cpu_buf == port->buf_cnt)
+		cpu_buf = 0;
+
+	*free_region = port->dsp_buf;
+
+	return cpu_buf == *free_region;
+}
+
+int q6usm_cmd(struct us_client *usc, int cmd)
+{
+	struct apr_hdr hdr;
+	int rc = 0;
+	atomic_t *state;
+
+	if ((usc == NULL) || (usc->apr == NULL)) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	q6usm_add_hdr(usc, &hdr, sizeof(hdr), true);
+	switch (cmd) {
+	case CMD_CLOSE:
+		hdr.opcode = USM_STREAM_CMD_CLOSE;
+		state = &usc->cmd_state;
+		break;
+
+	default:
+		pr_err("%s:Invalid format[%d]\n", __func__, cmd);
+		goto fail_cmd;
+	}
+
+	rc = apr_send_pkt(usc->apr, (uint32_t *) &hdr);
+	if (rc < 0) {
+		pr_err("%s: Command 0x%x failed\n", __func__, hdr.opcode);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(usc->cmd_wait, (atomic_read(state) == 0),
+				Q6USM_TIMEOUT_JIFFIES);
+	if (!rc) {
+		rc = -ETIME;
+		pr_err("%s:timeout. waited for response opcode[0x%x]\n",
+		       __func__, hdr.opcode);
+	} else
+		rc = 0;
+fail_cmd:
+	return rc;
+}
+
+int q6usm_set_us_detection(struct us_client *usc,
+			   struct usm_session_cmd_detect_info *detect_info,
+			   uint16_t detect_info_size)
+{
+	int rc = 0;
+
+	if ((usc == NULL) ||
+	    (detect_info_size == 0) ||
+	    (detect_info == NULL)) {
+		pr_err("%s: wrong input: usc=0x%pK, inf_size=%d; info=0x%pK",
+		       __func__,
+		       usc,
+		       detect_info_size,
+		       detect_info);
+		return -EINVAL;
+	}
+
+	q6usm_add_hdr(usc, &detect_info->hdr, detect_info_size, true);
+
+	detect_info->hdr.opcode = USM_SESSION_CMD_SIGNAL_DETECT_MODE;
+
+	rc = apr_send_pkt(usc->apr, (uint32_t *)detect_info);
+	if (rc < 0) {
+		pr_err("%s:Comamnd signal detect failed\n", __func__);
+		return -EINVAL;
+	}
+	rc = wait_event_timeout(usc->cmd_wait,
+				(atomic_read(&usc->cmd_state) == 0),
+				Q6USM_TIMEOUT_JIFFIES);
+	if (!rc) {
+		rc = -ETIME;
+		pr_err("%s: CMD_SIGNAL_DETECT_MODE: timeout=%d\n",
+		       __func__, Q6USM_TIMEOUT_JIFFIES);
+	} else
+		rc = 0;
+
+	return rc;
+}
+
+int q6usm_set_us_stream_param(int dir, struct us_client *usc,
+		uint32_t module_id, uint32_t param_id, uint32_t buf_size)
+{
+	int rc = 0;
+	struct usm_stream_cmd_set_param cmd_set_param;
+	struct us_port_data *port = NULL;
+
+	if ((usc == NULL) || (usc->apr == NULL)) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	port = &usc->port[dir];
+
+	q6usm_add_hdr(usc, &cmd_set_param.hdr, sizeof(cmd_set_param), true);
+
+	cmd_set_param.hdr.opcode = USM_STREAM_CMD_SET_PARAM;
+	cmd_set_param.buf_size = buf_size;
+	cmd_set_param.buf_addr_msw =
+			msm_audio_populate_upper_32_bits(port->param_phys);
+	cmd_set_param.buf_addr_lsw = lower_32_bits(port->param_phys);
+	cmd_set_param.mem_map_handle =
+			*((uint32_t *)(port->param_buf_mem_handle));
+	cmd_set_param.module_id = module_id;
+	cmd_set_param.param_id = param_id;
+	cmd_set_param.hdr.token = 0;
+
+	rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_set_param);
+
+	if (rc < 0) {
+		pr_err("%s:write op[0x%x];rc[%d]\n",
+			__func__, cmd_set_param.hdr.opcode, rc);
+	}
+
+	rc = wait_event_timeout(usc->cmd_wait,
+				(atomic_read(&usc->cmd_state) == 0),
+				Q6USM_TIMEOUT_JIFFIES);
+	if (!rc) {
+		rc = -ETIME;
+		pr_err("%s: CMD_SET_PARAM: timeout=%d\n",
+			__func__, Q6USM_TIMEOUT_JIFFIES);
+	} else
+		rc = 0;
+
+	return rc;
+}
+
+int q6usm_get_us_stream_param(int dir, struct us_client *usc,
+		uint32_t module_id, uint32_t param_id, uint32_t buf_size)
+{
+	int rc = 0;
+	struct usm_stream_cmd_get_param cmd_get_param;
+	struct us_port_data *port = NULL;
+
+	if ((usc == NULL) || (usc->apr == NULL)) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	port = &usc->port[dir];
+
+	q6usm_add_hdr(usc, &cmd_get_param.hdr, sizeof(cmd_get_param), true);
+
+	cmd_get_param.hdr.opcode = USM_STREAM_CMD_GET_PARAM;
+	cmd_get_param.buf_size = buf_size;
+	cmd_get_param.buf_addr_msw =
+			msm_audio_populate_upper_32_bits(port->param_phys);
+	cmd_get_param.buf_addr_lsw = lower_32_bits(port->param_phys);
+	cmd_get_param.mem_map_handle =
+			*((uint32_t *)(port->param_buf_mem_handle));
+	cmd_get_param.module_id = module_id;
+	cmd_get_param.param_id = param_id;
+	cmd_get_param.hdr.token = 0;
+
+	rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_get_param);
+
+	if (rc < 0) {
+		pr_err("%s:write op[0x%x];rc[%d]\n",
+			__func__, cmd_get_param.hdr.opcode, rc);
+	}
+
+	rc = wait_event_timeout(usc->cmd_wait,
+				(atomic_read(&usc->cmd_state) == 0),
+				Q6USM_TIMEOUT_JIFFIES);
+	if (!rc) {
+		rc = -ETIME;
+		pr_err("%s: CMD_GET_PARAM: timeout=%d\n",
+			__func__, Q6USM_TIMEOUT_JIFFIES);
+	} else
+		rc = 0;
+
+	return rc;
+}
+
+static int __init q6usm_init(void)
+{
+	pr_debug("%s\n", __func__);
+	init_waitqueue_head(&this_mmap.cmd_wait);
+	memset(session, 0, sizeof(session));
+	return 0;
+}
+
+device_initcall(q6usm_init);
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/ultrasound/q6usm.h linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/ultrasound/q6usm.h
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/ultrasound/q6usm.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/ultrasound/q6usm.h	2019-01-22 16:16:24.747257672 +0100
@@ -0,0 +1,130 @@
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __Q6_USM_H__
+#define __Q6_USM_H__
+
+#include <linux/qdsp6v2/apr_us.h>
+
+#define Q6USM_EVENT_UNDEF                0
+#define Q6USM_EVENT_READ_DONE            1
+#define Q6USM_EVENT_WRITE_DONE           2
+#define Q6USM_EVENT_SIGNAL_DETECT_RESULT 3
+
+/* cyclic buffer with 1 gap support */
+#define USM_MIN_BUF_CNT 3
+
+#define FORMAT_USPS_EPOS	0x00000000
+#define FORMAT_USRAW		0x00000001
+#define FORMAT_USPROX		0x00000002
+#define FORMAT_USGES_SYNC	0x00000003
+#define FORMAT_USRAW_SYNC	0x00000004
+#define INVALID_FORMAT		0xffffffff
+
+#define IN			0x000
+#define OUT			0x001
+
+#define USM_WRONG_TOKEN		0xffffffff
+#define USM_UNDEF_TOKEN		0xfffffffe
+
+#define CMD_CLOSE		0x0004
+
+/* bit 0:1 represents priority of stream */
+#define STREAM_PRIORITY_NORMAL	0x0000
+#define STREAM_PRIORITY_LOW	0x0001
+#define STREAM_PRIORITY_HIGH	0x0002
+
+/* bit 4 represents META enable of encoded data buffer */
+#define BUFFER_META_ENABLE	0x0010
+
+struct us_port_data {
+	dma_addr_t	phys;
+	/* cyclic region of buffers with 1 gap */
+	void		*data;
+	/* number of buffers in the region */
+	uint32_t	buf_cnt;
+	/* size of buffer */
+	size_t		buf_size;
+	/* write index */
+	uint32_t	dsp_buf;
+	/* read index */
+	uint32_t	cpu_buf;
+	/* expected token from dsp */
+	uint32_t	expected_token;
+	/* read or write locks */
+	struct mutex	lock;
+	spinlock_t	dsp_lock;
+	/* ION memory handle */
+	struct      ion_handle *handle;
+	/* ION memory client */
+	struct      ion_client *client;
+	/* extended parameters, related to q6 variants */
+	void		*ext;
+	/* physical address of parameter buffer */
+	dma_addr_t	param_phys;
+	/* buffer which stores the parameter data */
+	void		*param_buf;
+	/* size of parameter buffer */
+	uint32_t	param_buf_size;
+	/* parameter buffer memory handle */
+	void		*param_buf_mem_handle;
+	/* ION memory handle for parameter buffer */
+	struct      ion_handle *param_handle;
+	/* ION memory client for parameter buffer */
+	struct      ion_client *param_client;
+};
+
+struct us_client {
+	int			session;
+	/* idx:1 out port, 0: in port*/
+	struct us_port_data	port[2];
+
+	struct apr_svc		*apr;
+	struct mutex		cmd_lock;
+
+	atomic_t		cmd_state;
+	atomic_t		eos_state;
+	wait_queue_head_t	cmd_wait;
+
+	void (*cb)(uint32_t, uint32_t, uint32_t *, void *);
+	void			*priv;
+};
+
+int q6usm_run(struct us_client *usc, uint32_t flags,
+	      uint32_t msw_ts, uint32_t lsw_ts);
+int q6usm_cmd(struct us_client *usc, int cmd);
+int q6usm_us_client_buf_alloc(unsigned int dir, struct us_client *usc,
+			      unsigned int bufsz, unsigned int bufcnt);
+int q6usm_us_param_buf_alloc(unsigned int dir, struct us_client *usc,
+			      unsigned int bufsz);
+int q6usm_enc_cfg_blk(struct us_client *usc, struct us_encdec_cfg *us_cfg);
+int q6usm_dec_cfg_blk(struct us_client *usc, struct us_encdec_cfg *us_cfg);
+int q6usm_read(struct us_client *usc, uint32_t read_ind);
+struct us_client *q6usm_us_client_alloc(
+	void (*cb)(uint32_t, uint32_t, uint32_t *, void *),
+	void *priv);
+int q6usm_open_read(struct us_client *usc, uint32_t format);
+void q6usm_us_client_free(struct us_client *usc);
+uint32_t q6usm_get_virtual_address(int dir, struct us_client *usc,
+				   struct vm_area_struct *vms);
+int q6usm_open_write(struct us_client *usc,  uint32_t format);
+int q6usm_write(struct us_client *usc, uint32_t write_ind);
+bool q6usm_is_write_buf_full(struct us_client *usc, uint32_t *free_region);
+int q6usm_set_us_detection(struct us_client *usc,
+			   struct usm_session_cmd_detect_info *detect_info,
+			   uint16_t detect_info_size);
+int q6usm_set_us_stream_param(int dir, struct us_client *usc,
+		uint32_t module_id, uint32_t param_id, uint32_t buf_size);
+int q6usm_get_us_stream_param(int dir, struct us_client *usc,
+		uint32_t module_id, uint32_t param_id, uint32_t buf_size);
+
+#endif /* __Q6_USM_H__ */
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/ultrasound/usf.c linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/ultrasound/usf.c
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/ultrasound/usf.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/ultrasound/usf.c	2019-01-22 16:16:24.747257672 +0100
@@ -0,0 +1,2456 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/compat.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/input.h>
+#include <linux/uaccess.h>
+#include <linux/time.h>
+#include <linux/kmemleak.h>
+#include <linux/wakelock.h>
+#include <linux/mutex.h>
+#include <sound/apr_audio.h>
+#include <linux/qdsp6v2/usf.h>
+#include "q6usm.h"
+#include "usfcdev.h"
+
+/* The driver version*/
+#define DRV_VERSION "1.7.1"
+#define USF_VERSION_ID 0x0171
+
+/* Standard timeout in the asynchronous ops */
+#define USF_TIMEOUT_JIFFIES (1*HZ) /* 1 sec */
+
+/* Undefined USF device */
+#define USF_UNDEF_DEV_ID 0xffff
+
+/* TX memory mapping flag */
+#define USF_VM_READ 1
+/* RX memory mapping flag */
+#define USF_VM_WRITE 2
+
+/* Number of events, copied from the user space to kernel one */
+#define USF_EVENTS_PORTION_SIZE 20
+
+/* Indexes in range definitions */
+#define MIN_IND 0
+#define MAX_IND 1
+
+/* The coordinates indexes */
+#define X_IND 0
+#define Y_IND 1
+#define Z_IND 2
+
+/* Shared memory limits */
+/* max_buf_size = (port_size(65535*2) * port_num(8) * group_size(3) */
+#define USF_MAX_BUF_SIZE 3145680
+#define USF_MAX_BUF_NUM  32
+
+/* max size for buffer set from user space */
+#define USF_MAX_USER_BUF_SIZE 100000
+
+/* Place for opreation result, received from QDSP6 */
+#define APR_RESULT_IND 1
+
+/* Place for US detection result, received from QDSP6 */
+#define APR_US_DETECT_RESULT_IND 0
+
+#define BITS_IN_BYTE 8
+
+/* Time to stay awake after tx read event (e.g., proximity) */
+#define STAY_AWAKE_AFTER_READ_MSECS 3000
+
+/* The driver states */
+enum usf_state_type {
+	USF_IDLE_STATE,
+	USF_OPENED_STATE,
+	USF_CONFIGURED_STATE,
+	USF_WORK_STATE,
+	USF_ADSP_RESTART_STATE,
+	USF_ERROR_STATE
+};
+
+/* The US detection status upon FW/HW based US detection results */
+enum usf_us_detect_type {
+	USF_US_DETECT_UNDEF,
+	USF_US_DETECT_YES,
+	USF_US_DETECT_NO
+};
+
+struct usf_xx_type {
+	/* Name of the client - event calculator */
+	char client_name[USF_MAX_CLIENT_NAME_SIZE];
+	/* The driver state in TX or RX direction */
+	enum usf_state_type usf_state;
+	/* wait for q6 events mechanism */
+	wait_queue_head_t wait;
+	/* IF with q6usm info */
+	struct us_client *usc;
+	/* Q6:USM' Encoder/decoder configuration */
+	struct us_encdec_cfg encdec_cfg;
+	/* Shared buffer (with Q6:USM) size */
+	uint32_t buffer_size;
+	/* Number of the shared buffers (with Q6:USM) */
+	uint32_t buffer_count;
+	/* Shared memory (Cyclic buffer with 1 gap) control */
+	uint32_t new_region;
+	uint32_t prev_region;
+	/* Q6:USM's events handler */
+	void (*cb)(uint32_t, uint32_t, uint32_t *, void *);
+	/* US detection result */
+	enum usf_us_detect_type us_detect_type;
+	/* User's update info isn't acceptable */
+	u8 user_upd_info_na;
+};
+
+struct usf_type {
+	/* TX device component configuration & control */
+	struct usf_xx_type usf_tx;
+	/* RX device component configuration & control */
+	struct usf_xx_type usf_rx;
+	/* Index into the opened device container */
+	/* To prevent mutual usage of the same device */
+	uint16_t dev_ind;
+	/* Event types, supported by device */
+	uint16_t event_types;
+	/*  The input devices are "input" module registered clients */
+	struct input_dev *input_ifs[USF_MAX_EVENT_IND];
+	/* Bitmap of types of events, conflicting to USF's ones */
+	uint16_t conflicting_event_types;
+	/* Bitmap of types of events from devs, conflicting with USF */
+	uint16_t conflicting_event_filters;
+	/* The requested buttons bitmap */
+	uint16_t req_buttons_bitmap;
+	/* Mutex for exclusive operations (all public APIs) */
+	struct mutex mutex;
+};
+
+struct usf_input_dev_type {
+	/* Input event type, supported by the input device */
+	uint16_t event_type;
+	/* Input device name */
+	const char *input_dev_name;
+	/* Input device registration function */
+	int (*prepare_dev)(uint16_t, struct usf_type *,
+			    struct us_input_info_type *,
+			   const char *);
+	/* Input event notification function */
+	void (*notify_event)(struct usf_type *,
+			     uint16_t,
+			     struct usf_event_type *
+			     );
+};
+
+
+/* The MAX number of the supported devices */
+#define MAX_DEVS_NUMBER	1
+
+/*
+ * code for a special button that is used to show/hide a
+ * hovering cursor in the input framework. Must be in
+ * sync with the button code definition in the framework
+ * (EventHub.h)
+ */
+#define BTN_USF_HOVERING_CURSOR         0x230
+
+/* Supported buttons container */
+static const int s_button_map[] = {
+	BTN_STYLUS,
+	BTN_STYLUS2,
+	BTN_TOOL_PEN,
+	BTN_TOOL_RUBBER,
+	BTN_TOOL_FINGER,
+	BTN_USF_HOVERING_CURSOR
+};
+
+/* The opened devices container */
+static atomic_t s_opened_devs[MAX_DEVS_NUMBER];
+
+static struct wakeup_source usf_wakeup_source;
+
+#define USF_NAME_PREFIX "usf_"
+#define USF_NAME_PREFIX_SIZE 4
+
+
+static struct input_dev *allocate_dev(uint16_t ind, const char *name)
+{
+	struct input_dev *in_dev = input_allocate_device();
+
+	if (in_dev == NULL) {
+		pr_err("%s: input_allocate_device() failed\n", __func__);
+	} else {
+		/* Common part configuration */
+		in_dev->name = name;
+		in_dev->phys = NULL;
+		in_dev->id.bustype = BUS_HOST;
+		in_dev->id.vendor  = 0x0001;
+		in_dev->id.product = 0x0001;
+		in_dev->id.version = USF_VERSION_ID;
+	}
+	return in_dev;
+}
+
+static int prepare_tsc_input_device(uint16_t ind,
+				struct usf_type *usf_info,
+				struct us_input_info_type *input_info,
+				const char *name)
+{
+	int i = 0;
+
+	int num_buttons = min(ARRAY_SIZE(s_button_map),
+		sizeof(input_info->req_buttons_bitmap) *
+		BITS_IN_BYTE);
+	uint16_t max_buttons_bitmap = ((1 << ARRAY_SIZE(s_button_map)) - 1);
+
+	struct input_dev *in_dev = allocate_dev(ind, name);
+	if (in_dev == NULL)
+		return -ENOMEM;
+
+	if (input_info->req_buttons_bitmap > max_buttons_bitmap) {
+		pr_err("%s: Requested buttons[%d] exceeds max buttons available[%d]\n",
+		__func__,
+		input_info->req_buttons_bitmap,
+		max_buttons_bitmap);
+		input_free_device(in_dev);
+		return -EINVAL;
+	}
+
+	usf_info->input_ifs[ind] = in_dev;
+	usf_info->req_buttons_bitmap =
+		input_info->req_buttons_bitmap;
+	in_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+	in_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+
+	for (i = 0; i < num_buttons; i++)
+		if (input_info->req_buttons_bitmap & (1 << i))
+			in_dev->keybit[BIT_WORD(s_button_map[i])] |=
+			BIT_MASK(s_button_map[i]);
+
+	input_set_abs_params(in_dev, ABS_X,
+			     input_info->tsc_x_dim[MIN_IND],
+			     input_info->tsc_x_dim[MAX_IND],
+			     0, 0);
+	input_set_abs_params(in_dev, ABS_Y,
+			     input_info->tsc_y_dim[MIN_IND],
+			     input_info->tsc_y_dim[MAX_IND],
+			     0, 0);
+	input_set_abs_params(in_dev, ABS_DISTANCE,
+			     input_info->tsc_z_dim[MIN_IND],
+			     input_info->tsc_z_dim[MAX_IND],
+			     0, 0);
+
+	input_set_abs_params(in_dev, ABS_PRESSURE,
+			     input_info->tsc_pressure[MIN_IND],
+			     input_info->tsc_pressure[MAX_IND],
+			     0, 0);
+
+	input_set_abs_params(in_dev, ABS_TILT_X,
+			     input_info->tsc_x_tilt[MIN_IND],
+			     input_info->tsc_x_tilt[MAX_IND],
+			     0, 0);
+	input_set_abs_params(in_dev, ABS_TILT_Y,
+			     input_info->tsc_y_tilt[MIN_IND],
+			     input_info->tsc_y_tilt[MAX_IND],
+			     0, 0);
+
+	return 0;
+}
+
+static int prepare_mouse_input_device(uint16_t ind, struct usf_type *usf_info,
+			struct us_input_info_type *input_info,
+			const char *name)
+{
+	struct input_dev *in_dev = allocate_dev(ind, name);
+
+	if (in_dev == NULL)
+		return -ENOMEM;
+
+	usf_info->input_ifs[ind] = in_dev;
+	in_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
+
+	in_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_LEFT) |
+						BIT_MASK(BTN_RIGHT) |
+						BIT_MASK(BTN_MIDDLE);
+	in_dev->relbit[0] =  BIT_MASK(REL_X) |
+				BIT_MASK(REL_Y) |
+				BIT_MASK(REL_Z);
+
+	return 0;
+}
+
+static int prepare_keyboard_input_device(
+					uint16_t ind,
+					struct usf_type *usf_info,
+					struct us_input_info_type *input_info,
+					const char *name)
+{
+	struct input_dev *in_dev = allocate_dev(ind, name);
+
+	if (in_dev == NULL)
+		return -ENOMEM;
+
+	usf_info->input_ifs[ind] = in_dev;
+	in_dev->evbit[0] |= BIT_MASK(EV_KEY);
+	/* All keys are permitted */
+	memset(in_dev->keybit, 0xff, sizeof(in_dev->keybit));
+
+	return 0;
+}
+
+static void notify_tsc_event(struct usf_type *usf_info,
+			     uint16_t if_ind,
+			     struct usf_event_type *event)
+
+{
+	int i = 0;
+	int num_buttons = min(ARRAY_SIZE(s_button_map),
+		sizeof(usf_info->req_buttons_bitmap) *
+		BITS_IN_BYTE);
+
+	struct input_dev *input_if = usf_info->input_ifs[if_ind];
+	struct point_event_type *pe = &(event->event_data.point_event);
+
+	input_report_abs(input_if, ABS_X, pe->coordinates[X_IND]);
+	input_report_abs(input_if, ABS_Y, pe->coordinates[Y_IND]);
+	input_report_abs(input_if, ABS_DISTANCE, pe->coordinates[Z_IND]);
+
+	input_report_abs(input_if, ABS_TILT_X, pe->inclinations[X_IND]);
+	input_report_abs(input_if, ABS_TILT_Y, pe->inclinations[Y_IND]);
+
+	input_report_abs(input_if, ABS_PRESSURE, pe->pressure);
+	input_report_key(input_if, BTN_TOUCH, !!(pe->pressure));
+
+	for (i = 0; i < num_buttons; i++) {
+		uint16_t mask = (1 << i),
+		btn_state = !!(pe->buttons_state_bitmap & mask);
+		if (usf_info->req_buttons_bitmap & mask)
+			input_report_key(input_if, s_button_map[i], btn_state);
+	}
+
+	input_sync(input_if);
+
+	pr_debug("%s: TSC event: xyz[%d;%d;%d], incl[%d;%d], pressure[%d], buttons[%d]\n",
+		 __func__,
+		 pe->coordinates[X_IND],
+		 pe->coordinates[Y_IND],
+		 pe->coordinates[Z_IND],
+		 pe->inclinations[X_IND],
+		 pe->inclinations[Y_IND],
+		 pe->pressure,
+		 pe->buttons_state_bitmap);
+}
+
+static void notify_mouse_event(struct usf_type *usf_info,
+			       uint16_t if_ind,
+			       struct usf_event_type *event)
+{
+	struct input_dev *input_if = usf_info->input_ifs[if_ind];
+	struct mouse_event_type *me = &(event->event_data.mouse_event);
+
+	input_report_rel(input_if, REL_X, me->rels[X_IND]);
+	input_report_rel(input_if, REL_Y, me->rels[Y_IND]);
+	input_report_rel(input_if, REL_Z, me->rels[Z_IND]);
+
+	input_report_key(input_if, BTN_LEFT,
+			 me->buttons_states & USF_BUTTON_LEFT_MASK);
+	input_report_key(input_if, BTN_MIDDLE,
+			 me->buttons_states & USF_BUTTON_MIDDLE_MASK);
+	input_report_key(input_if, BTN_RIGHT,
+			 me->buttons_states & USF_BUTTON_RIGHT_MASK);
+
+	input_sync(input_if);
+
+	pr_debug("%s: mouse event: dx[%d], dy[%d], buttons_states[%d]\n",
+		 __func__, me->rels[X_IND],
+		 me->rels[Y_IND], me->buttons_states);
+}
+
+static void notify_key_event(struct usf_type *usf_info,
+			     uint16_t if_ind,
+			     struct usf_event_type *event)
+{
+	struct input_dev *input_if = usf_info->input_ifs[if_ind];
+	struct key_event_type *ke = &(event->event_data.key_event);
+
+	input_report_key(input_if, ke->key, ke->key_state);
+	input_sync(input_if);
+	pr_debug("%s: key event: key[%d], state[%d]\n",
+		 __func__,
+		 ke->key,
+		 ke->key_state);
+
+}
+
+static struct usf_input_dev_type s_usf_input_devs[] = {
+	{USF_TSC_EVENT, "usf_tsc",
+		prepare_tsc_input_device, notify_tsc_event},
+	{USF_TSC_PTR_EVENT, "usf_tsc_ptr",
+		prepare_tsc_input_device, notify_tsc_event},
+	{USF_MOUSE_EVENT, "usf_mouse",
+		prepare_mouse_input_device, notify_mouse_event},
+	{USF_KEYBOARD_EVENT, "usf_kb",
+		prepare_keyboard_input_device, notify_key_event},
+	{USF_TSC_EXT_EVENT, "usf_tsc_ext",
+		prepare_tsc_input_device, notify_tsc_event},
+};
+
+static void usf_rx_cb(uint32_t opcode, uint32_t token,
+		      uint32_t *payload, void *priv)
+{
+	struct usf_xx_type *usf_xx = (struct usf_xx_type *) priv;
+
+	if (usf_xx == NULL) {
+		pr_err("%s: the private data is NULL\n", __func__);
+		return;
+	}
+
+	switch (opcode) {
+	case Q6USM_EVENT_WRITE_DONE:
+		wake_up(&usf_xx->wait);
+		break;
+
+	case RESET_EVENTS:
+		pr_err("%s: received RESET_EVENTS\n", __func__);
+		usf_xx->usf_state = USF_ADSP_RESTART_STATE;
+		wake_up(&usf_xx->wait);
+		break;
+
+	default:
+		break;
+	}
+}
+
+static void usf_tx_cb(uint32_t opcode, uint32_t token,
+		      uint32_t *payload, void *priv)
+{
+	struct usf_xx_type *usf_xx = (struct usf_xx_type *) priv;
+
+	if (usf_xx == NULL) {
+		pr_err("%s: the private data is NULL\n", __func__);
+		return;
+	}
+
+	switch (opcode) {
+	case Q6USM_EVENT_READ_DONE:
+		pr_debug("%s: acquiring %d msec wake lock\n", __func__,
+				STAY_AWAKE_AFTER_READ_MSECS);
+		__pm_wakeup_event(&usf_wakeup_source,
+				  STAY_AWAKE_AFTER_READ_MSECS);
+		if (token == USM_WRONG_TOKEN)
+			usf_xx->usf_state = USF_ERROR_STATE;
+		usf_xx->new_region = token;
+		wake_up(&usf_xx->wait);
+		break;
+
+	case Q6USM_EVENT_SIGNAL_DETECT_RESULT:
+		usf_xx->us_detect_type = (payload[APR_US_DETECT_RESULT_IND]) ?
+					USF_US_DETECT_YES :
+					USF_US_DETECT_NO;
+
+		wake_up(&usf_xx->wait);
+		break;
+
+	case APR_BASIC_RSP_RESULT:
+		if (payload[APR_RESULT_IND]) {
+			usf_xx->usf_state = USF_ERROR_STATE;
+			usf_xx->new_region = USM_WRONG_TOKEN;
+			wake_up(&usf_xx->wait);
+		}
+		break;
+
+	case RESET_EVENTS:
+		pr_err("%s: received RESET_EVENTS\n", __func__);
+		usf_xx->usf_state = USF_ADSP_RESTART_STATE;
+		wake_up(&usf_xx->wait);
+		break;
+
+	default:
+		break;
+	}
+}
+
+static void release_xx(struct usf_xx_type *usf_xx)
+{
+	if (usf_xx != NULL) {
+		if (usf_xx->usc) {
+			q6usm_us_client_free(usf_xx->usc);
+			usf_xx->usc = NULL;
+		}
+
+		if (usf_xx->encdec_cfg.params != NULL) {
+			kfree(usf_xx->encdec_cfg.params);
+			usf_xx->encdec_cfg.params = NULL;
+		}
+	}
+}
+
+static void usf_disable(struct usf_xx_type *usf_xx)
+{
+	if (usf_xx != NULL) {
+		if ((usf_xx->usf_state != USF_IDLE_STATE) &&
+		    (usf_xx->usf_state != USF_OPENED_STATE)) {
+			(void)q6usm_cmd(usf_xx->usc, CMD_CLOSE);
+			usf_xx->usf_state = USF_OPENED_STATE;
+			wake_up(&usf_xx->wait);
+		}
+		release_xx(usf_xx);
+	}
+}
+
+static int config_xx(struct usf_xx_type *usf_xx, struct us_xx_info_type *config)
+{
+	int rc = 0;
+	uint16_t data_map_size = 0;
+	uint16_t min_map_size = 0;
+
+	if ((usf_xx == NULL) ||
+	    (config == NULL))
+		return -EINVAL;
+
+	if ((config->buf_size == 0) ||
+	    (config->buf_size > USF_MAX_BUF_SIZE) ||
+	    (config->buf_num == 0) ||
+	    (config->buf_num > USF_MAX_BUF_NUM)) {
+		pr_err("%s: wrong params: buf_size=%d; buf_num=%d\n",
+		       __func__, config->buf_size, config->buf_num);
+		return -EINVAL;
+	}
+
+	data_map_size = sizeof(usf_xx->encdec_cfg.cfg_common.data_map);
+	min_map_size = min(data_map_size, config->port_cnt);
+
+	if (config->client_name != NULL) {
+		if (strncpy_from_user(usf_xx->client_name,
+				      (char __user *)config->client_name,
+				      sizeof(usf_xx->client_name) - 1) < 0) {
+			pr_err("%s: get client name failed\n", __func__);
+			return -EINVAL;
+		}
+	}
+
+	pr_debug("%s: name=%s; buf_size:%d; dev_id:0x%x; sample_rate:%d\n",
+		__func__, usf_xx->client_name, config->buf_size,
+		config->dev_id, config->sample_rate);
+
+	pr_debug("%s: buf_num:%d; format:%d; port_cnt:%d; data_size=%d\n",
+		__func__, config->buf_num, config->stream_format,
+		config->port_cnt, config->params_data_size);
+
+	pr_debug("%s: id[0]=%d, id[1]=%d, id[2]=%d, id[3]=%d, id[4]=%d,\n",
+		__func__,
+		config->port_id[0],
+		config->port_id[1],
+		config->port_id[2],
+		config->port_id[3],
+		config->port_id[4]);
+
+	pr_debug("id[5]=%d, id[6]=%d, id[7]=%d\n",
+		config->port_id[5],
+		config->port_id[6],
+		config->port_id[7]);
+
+	/* q6usm allocation & configuration */
+	usf_xx->buffer_size = config->buf_size;
+	usf_xx->buffer_count = config->buf_num;
+	usf_xx->encdec_cfg.cfg_common.bits_per_sample =
+				config->bits_per_sample;
+	usf_xx->encdec_cfg.cfg_common.sample_rate = config->sample_rate;
+	/* AFE port e.g. AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_RX */
+	usf_xx->encdec_cfg.cfg_common.dev_id = config->dev_id;
+
+	usf_xx->encdec_cfg.cfg_common.ch_cfg = config->port_cnt;
+	memcpy((void *)&usf_xx->encdec_cfg.cfg_common.data_map,
+	       (void *)config->port_id,
+	       min_map_size);
+
+	usf_xx->encdec_cfg.format_id = config->stream_format;
+	usf_xx->encdec_cfg.params_size = config->params_data_size;
+	usf_xx->user_upd_info_na = 1; /* it's used in US_GET_TX_UPDATE */
+
+	if (config->params_data_size > 0) { /* transparent data copy */
+		usf_xx->encdec_cfg.params = kzalloc(config->params_data_size,
+						    GFP_KERNEL);
+		/* False memory leak here - pointer in packed struct *
+		* is undetected by kmemleak tool                    */
+		kmemleak_ignore(usf_xx->encdec_cfg.params);
+		if (usf_xx->encdec_cfg.params == NULL) {
+			pr_err("%s: params memory alloc[%d] failure\n",
+				__func__,
+				config->params_data_size);
+			return -ENOMEM;
+		}
+		rc = copy_from_user(usf_xx->encdec_cfg.params,
+				    (uint8_t __user *)config->params_data,
+				    config->params_data_size);
+		if (rc) {
+			pr_err("%s: transparent data copy failure\n",
+			       __func__);
+			kfree(usf_xx->encdec_cfg.params);
+			usf_xx->encdec_cfg.params = NULL;
+			return -EFAULT;
+		}
+		pr_debug("%s: params_size[%d]; params[%d,%d,%d,%d, %d]\n",
+			 __func__,
+			 config->params_data_size,
+			 usf_xx->encdec_cfg.params[0],
+			 usf_xx->encdec_cfg.params[1],
+			 usf_xx->encdec_cfg.params[2],
+			 usf_xx->encdec_cfg.params[3],
+			 usf_xx->encdec_cfg.params[4]
+			);
+	}
+
+	usf_xx->usc = q6usm_us_client_alloc(usf_xx->cb, (void *)usf_xx);
+	if (!usf_xx->usc) {
+		pr_err("%s: Could not allocate q6usm client\n", __func__);
+		rc = -EFAULT;
+	}
+
+	return rc;
+}
+
+static bool usf_match(uint16_t event_type_ind, struct input_dev *dev)
+{
+	bool rc = false;
+
+	rc = (event_type_ind < MAX_EVENT_TYPE_NUM) &&
+		((dev->name == NULL) ||
+		strncmp(dev->name, USF_NAME_PREFIX, USF_NAME_PREFIX_SIZE));
+	pr_debug("%s: name=[%s]; rc=%d\n",
+		 __func__, dev->name, rc);
+
+	return rc;
+}
+
+static bool usf_register_conflicting_events(uint16_t event_types)
+{
+	bool rc = true;
+	uint16_t ind = 0;
+	uint16_t mask = 1;
+
+	for (ind = 0; ind < MAX_EVENT_TYPE_NUM; ++ind) {
+		if (event_types & mask) {
+			rc = usfcdev_register(ind, usf_match);
+			if (!rc)
+				break;
+		}
+		mask = mask << 1;
+	}
+
+	return rc;
+}
+
+static void usf_unregister_conflicting_events(uint16_t event_types)
+{
+	uint16_t ind = 0;
+	uint16_t mask = 1;
+
+	for (ind = 0; ind < MAX_EVENT_TYPE_NUM; ++ind) {
+		if (event_types & mask)
+			usfcdev_unregister(ind);
+		mask = mask << 1;
+	}
+}
+
+static void usf_set_event_filters(struct usf_type *usf, uint16_t event_filters)
+{
+	uint16_t ind = 0;
+	uint16_t mask = 1;
+
+	if (usf->conflicting_event_filters != event_filters) {
+		for (ind = 0; ind < MAX_EVENT_TYPE_NUM; ++ind) {
+			if (usf->conflicting_event_types & mask)
+				usfcdev_set_filter(ind, event_filters&mask);
+			mask = mask << 1;
+		}
+		usf->conflicting_event_filters = event_filters;
+	}
+}
+
+static int register_input_device(struct usf_type *usf_info,
+				 struct us_input_info_type *input_info)
+{
+	int rc = 0;
+	bool ret = true;
+	uint16_t ind = 0;
+
+	if ((usf_info == NULL) ||
+	    (input_info == NULL) ||
+	    !(input_info->event_types & USF_ALL_EVENTS)) {
+		pr_err("%s: wrong input parameter(s)\n", __func__);
+		return -EINVAL;
+	}
+
+	for (ind = 0; ind < USF_MAX_EVENT_IND; ++ind) {
+		if (usf_info->input_ifs[ind] != NULL) {
+			pr_err("%s: input_if[%d] is already allocated\n",
+				__func__, ind);
+			return -EFAULT;
+		}
+		if ((input_info->event_types &
+			s_usf_input_devs[ind].event_type) &&
+		     s_usf_input_devs[ind].prepare_dev) {
+			rc = (*s_usf_input_devs[ind].prepare_dev)(
+				ind,
+				usf_info,
+				input_info,
+				s_usf_input_devs[ind].input_dev_name);
+			if (rc)
+				return rc;
+
+			rc = input_register_device(usf_info->input_ifs[ind]);
+			if (rc) {
+				pr_err("%s: input_reg_dev() failed; rc=%d\n",
+					__func__, rc);
+				input_free_device(usf_info->input_ifs[ind]);
+				usf_info->input_ifs[ind] = NULL;
+			} else {
+				usf_info->event_types |=
+					s_usf_input_devs[ind].event_type;
+				pr_debug("%s: input device[%s] was registered\n",
+					__func__,
+					s_usf_input_devs[ind].input_dev_name);
+			}
+		} /* supported event */
+	} /* event types loop */
+
+	ret = usf_register_conflicting_events(
+			input_info->conflicting_event_types);
+	if (ret)
+		usf_info->conflicting_event_types =
+			input_info->conflicting_event_types;
+
+	return 0;
+}
+
+
+static void handle_input_event(struct usf_type *usf_info,
+			       uint16_t event_counter,
+			       struct usf_event_type __user *event)
+{
+	uint16_t ind = 0;
+	uint16_t events_num = 0;
+	struct usf_event_type usf_events[USF_EVENTS_PORTION_SIZE];
+	int rc = 0;
+
+	if ((usf_info == NULL) ||
+	    (event == NULL) || (!event_counter)) {
+		return;
+	}
+
+	while (event_counter > 0) {
+		if (event_counter > USF_EVENTS_PORTION_SIZE) {
+			events_num = USF_EVENTS_PORTION_SIZE;
+			event_counter -= USF_EVENTS_PORTION_SIZE;
+		} else {
+			events_num = event_counter;
+			event_counter = 0;
+		}
+		rc = copy_from_user(usf_events,
+				(struct usf_event_type __user *)event,
+				events_num * sizeof(struct usf_event_type));
+		if (rc) {
+			pr_err("%s: copy upd_rx_info from user; rc=%d\n",
+				__func__, rc);
+			return;
+		}
+		for (ind = 0; ind < events_num; ++ind) {
+			struct usf_event_type *p_event = &usf_events[ind];
+			uint16_t if_ind = p_event->event_type_ind;
+
+			if ((if_ind >= USF_MAX_EVENT_IND) ||
+			    (usf_info->input_ifs[if_ind] == NULL))
+				continue; /* event isn't supported */
+
+			if (s_usf_input_devs[if_ind].notify_event)
+				(*s_usf_input_devs[if_ind].notify_event)(
+								usf_info,
+								if_ind,
+								p_event);
+		} /* loop in the portion */
+	} /* all events loop */
+}
+
+static int usf_start_tx(struct usf_xx_type *usf_xx)
+{
+	int rc = q6usm_run(usf_xx->usc, 0, 0, 0);
+
+	pr_debug("%s: tx: q6usm_run; rc=%d\n", __func__, rc);
+	if (!rc) {
+		if (usf_xx->buffer_count >= USM_MIN_BUF_CNT) {
+			/* supply all buffers */
+			rc = q6usm_read(usf_xx->usc,
+					usf_xx->buffer_count);
+			pr_debug("%s: q6usm_read[%d]\n",
+				 __func__, rc);
+
+			if (rc)
+				pr_err("%s: buf read failed",
+				       __func__);
+			else
+				usf_xx->usf_state =
+					USF_WORK_STATE;
+		} else
+			usf_xx->usf_state =
+				USF_WORK_STATE;
+	}
+
+	return rc;
+} /* usf_start_tx */
+
+static int usf_start_rx(struct usf_xx_type *usf_xx)
+{
+	int rc = q6usm_run(usf_xx->usc, 0, 0, 0);
+
+	pr_debug("%s: rx: q6usm_run; rc=%d\n",
+		 __func__, rc);
+	if (!rc)
+		usf_xx->usf_state = USF_WORK_STATE;
+
+	return rc;
+} /* usf_start_rx */
+
+static int __usf_set_us_detection(struct usf_type *usf,
+				  struct us_detect_info_type *detect_info)
+{
+	uint32_t timeout = 0;
+	struct usm_session_cmd_detect_info *p_allocated_memory = NULL;
+	struct usm_session_cmd_detect_info usm_detect_info;
+	struct usm_session_cmd_detect_info *p_usm_detect_info =
+						&usm_detect_info;
+	uint32_t detect_info_size = sizeof(struct usm_session_cmd_detect_info);
+	struct usf_xx_type *usf_xx =  &usf->usf_tx;
+	int rc = 0;
+
+	if (detect_info->us_detector != US_DETECT_FW) {
+		pr_err("%s: unsupported detector: %d\n",
+			__func__, detect_info->us_detector);
+		return -EINVAL;
+	}
+
+	if ((detect_info->params_data_size != 0) &&
+	    (detect_info->params_data != NULL)) {
+		uint8_t *p_data = NULL;
+
+		detect_info_size += detect_info->params_data_size;
+		 p_allocated_memory = kzalloc(detect_info_size, GFP_KERNEL);
+		if (p_allocated_memory == NULL) {
+			pr_err("%s: detect_info[%d] allocation failed\n",
+			       __func__, detect_info_size);
+			return -ENOMEM;
+		}
+		p_usm_detect_info = p_allocated_memory;
+		p_data = (uint8_t *)p_usm_detect_info +
+			sizeof(struct usm_session_cmd_detect_info);
+
+		rc = copy_from_user(p_data,
+			(uint8_t __user *)(detect_info->params_data),
+			detect_info->params_data_size);
+		if (rc) {
+			pr_err("%s: copy params from user; rc=%d\n",
+				__func__, rc);
+			kfree(p_allocated_memory);
+			return -EFAULT;
+		}
+		p_usm_detect_info->algorithm_cfg_size =
+				detect_info->params_data_size;
+	} else
+		usm_detect_info.algorithm_cfg_size = 0;
+
+	p_usm_detect_info->detect_mode = detect_info->us_detect_mode;
+	p_usm_detect_info->skip_interval = detect_info->skip_time;
+
+	usf_xx->us_detect_type = USF_US_DETECT_UNDEF;
+
+	rc = q6usm_set_us_detection(usf_xx->usc,
+				    p_usm_detect_info,
+				    detect_info_size);
+	if (rc || (detect_info->detect_timeout == USF_NO_WAIT_TIMEOUT)) {
+		kfree(p_allocated_memory);
+		return rc;
+	}
+
+	/* Get US detection result */
+	if (detect_info->detect_timeout == USF_INFINITIVE_TIMEOUT) {
+		rc = wait_event_interruptible(usf_xx->wait,
+						(usf_xx->us_detect_type !=
+						USF_US_DETECT_UNDEF) ||
+						(usf_xx->usf_state ==
+						USF_ADSP_RESTART_STATE));
+	} else {
+		if (detect_info->detect_timeout == USF_DEFAULT_TIMEOUT)
+			timeout = USF_TIMEOUT_JIFFIES;
+		else
+			timeout = detect_info->detect_timeout * HZ;
+	}
+	rc = wait_event_interruptible_timeout(usf_xx->wait,
+					(usf_xx->us_detect_type !=
+					USF_US_DETECT_UNDEF) ||
+					(usf_xx->usf_state ==
+					USF_ADSP_RESTART_STATE), timeout);
+
+	/* In the case of aDSP restart, "no US" is assumed */
+	if (usf_xx->usf_state == USF_ADSP_RESTART_STATE) {
+		rc = -EFAULT;
+	}
+	/* In the case of timeout, "no US" is assumed */
+	if (rc < 0)
+		pr_err("%s: Getting US detection failed rc[%d]\n",
+		       __func__, rc);
+	else {
+		usf->usf_rx.us_detect_type = usf->usf_tx.us_detect_type;
+		detect_info->is_us =
+			(usf_xx->us_detect_type == USF_US_DETECT_YES);
+	}
+
+	kfree(p_allocated_memory);
+
+	return rc;
+} /* __usf_set_us_detection */
+
+static int usf_set_us_detection(struct usf_type *usf, unsigned long arg)
+{
+	struct us_detect_info_type detect_info;
+
+	int rc = copy_from_user(&detect_info,
+				(struct us_detect_info_type __user *) arg,
+				sizeof(detect_info));
+
+	if (rc) {
+		pr_err("%s: copy detect_info from user; rc=%d\n",
+			__func__, rc);
+		return -EFAULT;
+	}
+
+	if (detect_info.params_data_size > USF_MAX_USER_BUF_SIZE) {
+		pr_err("%s: user buffer size exceeds maximum\n",
+			__func__);
+		return -EFAULT;
+	}
+
+	rc = __usf_set_us_detection(usf, &detect_info);
+	if (rc < 0) {
+		pr_err("%s: set us detection failed; rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	rc = copy_to_user((void __user *)arg,
+			  &detect_info,
+			  sizeof(detect_info));
+	if (rc) {
+		pr_err("%s: copy detect_info to user; rc=%d\n",
+			__func__, rc);
+		rc = -EFAULT;
+	}
+
+	return rc;
+} /* usf_set_us_detection */
+
+static int __usf_set_tx_info(struct usf_type *usf,
+			     struct us_tx_info_type *config_tx)
+{
+	struct usf_xx_type *usf_xx =  &usf->usf_tx;
+	int rc = 0;
+
+	usf_xx->new_region = USM_UNDEF_TOKEN;
+	usf_xx->prev_region = USM_UNDEF_TOKEN;
+	usf_xx->cb = usf_tx_cb;
+
+	init_waitqueue_head(&usf_xx->wait);
+
+	if (config_tx->us_xx_info.client_name != NULL) {
+		int res = strncpy_from_user(
+			usf_xx->client_name,
+			(char __user *)(config_tx->us_xx_info.client_name),
+			sizeof(usf_xx->client_name)-1);
+		if (res < 0) {
+			pr_err("%s: get client name failed\n",
+			       __func__);
+			return -EINVAL;
+		}
+	}
+
+	rc = config_xx(usf_xx, &(config_tx->us_xx_info));
+	if (rc)
+		return rc;
+
+	rc = q6usm_open_read(usf_xx->usc,
+			     usf_xx->encdec_cfg.format_id);
+	if (rc)
+		return rc;
+
+	rc = q6usm_us_client_buf_alloc(OUT, usf_xx->usc,
+				       usf_xx->buffer_size,
+				       usf_xx->buffer_count);
+	if (rc) {
+		(void)q6usm_cmd(usf_xx->usc, CMD_CLOSE);
+		return rc;
+	}
+
+	rc = q6usm_us_param_buf_alloc(OUT, usf_xx->usc,
+			config_tx->us_xx_info.max_get_set_param_buf_size);
+	if (rc) {
+		(void)q6usm_cmd(usf_xx->usc, CMD_CLOSE);
+		return rc;
+	}
+
+	rc = q6usm_enc_cfg_blk(usf_xx->usc,
+			       &usf_xx->encdec_cfg);
+	if (!rc &&
+	     (config_tx->input_info.event_types != USF_NO_EVENT)) {
+		rc = register_input_device(usf,
+					   &(config_tx->input_info));
+	}
+
+	if (rc)
+		(void)q6usm_cmd(usf_xx->usc, CMD_CLOSE);
+	else
+		usf_xx->usf_state = USF_CONFIGURED_STATE;
+
+	return rc;
+} /* __usf_set_tx_info */
+
+static int usf_set_tx_info(struct usf_type *usf, unsigned long arg)
+{
+	struct us_tx_info_type config_tx;
+
+	int rc = copy_from_user(&config_tx,
+			    (struct us_tx_info_type __user *) arg,
+			    sizeof(config_tx));
+
+	if (rc) {
+		pr_err("%s: copy config_tx from user; rc=%d\n",
+			__func__, rc);
+		return -EFAULT;
+	}
+
+	if (config_tx.us_xx_info.params_data_size > USF_MAX_USER_BUF_SIZE) {
+		pr_err("%s: user buffer size exceeds maximum\n",
+			__func__);
+		return -EFAULT;
+	}
+
+	return __usf_set_tx_info(usf, &config_tx);
+} /* usf_set_tx_info */
+
+static int __usf_set_rx_info(struct usf_type *usf,
+			     struct us_rx_info_type *config_rx)
+{
+	struct usf_xx_type *usf_xx =  &usf->usf_rx;
+	int rc = 0;
+
+	usf_xx->new_region = USM_UNDEF_TOKEN;
+	usf_xx->prev_region = USM_UNDEF_TOKEN;
+
+	usf_xx->cb = usf_rx_cb;
+
+	rc = config_xx(usf_xx, &(config_rx->us_xx_info));
+	if (rc)
+		return rc;
+
+	rc = q6usm_open_write(usf_xx->usc,
+			      usf_xx->encdec_cfg.format_id);
+	if (rc)
+		return rc;
+
+	rc = q6usm_us_client_buf_alloc(
+				IN,
+				usf_xx->usc,
+				usf_xx->buffer_size,
+				usf_xx->buffer_count);
+	if (rc) {
+		(void)q6usm_cmd(usf_xx->usc, CMD_CLOSE);
+		return rc;
+	}
+
+	rc = q6usm_us_param_buf_alloc(IN, usf_xx->usc,
+			config_rx->us_xx_info.max_get_set_param_buf_size);
+	if (rc) {
+		(void)q6usm_cmd(usf_xx->usc, CMD_CLOSE);
+		return rc;
+	}
+
+	rc = q6usm_dec_cfg_blk(usf_xx->usc,
+			       &usf_xx->encdec_cfg);
+	if (rc)
+		(void)q6usm_cmd(usf_xx->usc, CMD_CLOSE);
+	else {
+		init_waitqueue_head(&usf_xx->wait);
+		usf_xx->usf_state = USF_CONFIGURED_STATE;
+	}
+
+	return rc;
+} /* __usf_set_rx_info */
+
+static int usf_set_rx_info(struct usf_type *usf, unsigned long arg)
+{
+	struct us_rx_info_type config_rx;
+
+	int rc = copy_from_user(&config_rx,
+				(struct us_rx_info_type __user *) arg,
+				sizeof(config_rx));
+
+	if (rc) {
+		pr_err("%s: copy config_rx from user; rc=%d\n",
+			__func__, rc);
+		return -EFAULT;
+	}
+
+	if (config_rx.us_xx_info.params_data_size > USF_MAX_USER_BUF_SIZE) {
+		pr_err("%s: user buffer size exceeds maximum\n",
+			__func__);
+		return -EFAULT;
+	}
+
+	return __usf_set_rx_info(usf, &config_rx);
+} /* usf_set_rx_info */
+
+static int __usf_get_tx_update(struct usf_type *usf,
+			struct us_tx_update_info_type *upd_tx_info)
+{
+	unsigned long prev_jiffies = 0;
+	uint32_t timeout = 0;
+	struct usf_xx_type *usf_xx =  &usf->usf_tx;
+	int rc = 0;
+
+	if (!usf_xx->user_upd_info_na) {
+		usf_set_event_filters(usf, upd_tx_info->event_filters);
+		handle_input_event(usf,
+				   upd_tx_info->event_counter,
+				   upd_tx_info->event);
+
+		/* Release available regions */
+		rc = q6usm_read(usf_xx->usc,
+				upd_tx_info->free_region);
+		if (rc)
+			return rc;
+	} else
+		usf_xx->user_upd_info_na = 0;
+
+	/* Get data ready regions */
+	if (upd_tx_info->timeout == USF_INFINITIVE_TIMEOUT) {
+		rc = wait_event_interruptible(usf_xx->wait,
+			   (usf_xx->prev_region !=
+			    usf_xx->new_region) ||
+			   (usf_xx->usf_state !=
+			    USF_WORK_STATE));
+	} else {
+		if (upd_tx_info->timeout == USF_NO_WAIT_TIMEOUT)
+			rc = (usf_xx->prev_region != usf_xx->new_region);
+		else {
+			prev_jiffies = jiffies;
+			if (upd_tx_info->timeout == USF_DEFAULT_TIMEOUT) {
+				timeout = USF_TIMEOUT_JIFFIES;
+				rc = wait_event_timeout(
+						usf_xx->wait,
+						(usf_xx->prev_region !=
+						 usf_xx->new_region) ||
+						(usf_xx->usf_state !=
+						 USF_WORK_STATE),
+						timeout);
+			} else {
+				timeout = upd_tx_info->timeout * HZ;
+				rc = wait_event_interruptible_timeout(
+						usf_xx->wait,
+						(usf_xx->prev_region !=
+						 usf_xx->new_region) ||
+						(usf_xx->usf_state !=
+						 USF_WORK_STATE),
+						timeout);
+			}
+		}
+		if (!rc) {
+			pr_debug("%s: timeout. prev_j=%lu; j=%lu\n",
+				__func__, prev_jiffies, jiffies);
+			pr_debug("%s: timeout. prev=%d; new=%d\n",
+				__func__, usf_xx->prev_region,
+				usf_xx->new_region);
+			pr_debug("%s: timeout. free_region=%d;\n",
+				__func__, upd_tx_info->free_region);
+			if (usf_xx->prev_region ==
+			    usf_xx->new_region) {
+				pr_err("%s:read data: timeout\n",
+				       __func__);
+				return -ETIME;
+			}
+		}
+	}
+
+	if ((usf_xx->usf_state != USF_WORK_STATE) ||
+	    (rc == -ERESTARTSYS)) {
+		pr_err("%s: Get ready region failure; state[%d]; rc[%d]\n",
+		       __func__, usf_xx->usf_state, rc);
+		return -EINTR;
+	}
+
+	upd_tx_info->ready_region = usf_xx->new_region;
+	usf_xx->prev_region = upd_tx_info->ready_region;
+
+	if (upd_tx_info->ready_region == USM_WRONG_TOKEN) {
+		pr_err("%s: TX path corrupted; prev=%d\n",
+		       __func__, usf_xx->prev_region);
+		return -EIO;
+	}
+
+	return rc;
+} /* __usf_get_tx_update */
+
+static int usf_get_tx_update(struct usf_type *usf, unsigned long arg)
+{
+	struct us_tx_update_info_type upd_tx_info;
+
+	int rc = copy_from_user(&upd_tx_info,
+				(struct us_tx_update_info_type __user *) arg,
+				sizeof(upd_tx_info));
+
+	if (rc < 0) {
+		pr_err("%s: copy upd_tx_info from user; rc=%d\n",
+			__func__, rc);
+		return -EFAULT;
+	}
+
+	rc = __usf_get_tx_update(usf, &upd_tx_info);
+	if (rc < 0) {
+		pr_err("%s: get tx update failed; rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	rc = copy_to_user((void __user *)arg,
+			  &upd_tx_info,
+			  sizeof(upd_tx_info));
+	if (rc) {
+		pr_err("%s: copy upd_tx_info to user; rc=%d\n",
+			__func__, rc);
+		rc = -EFAULT;
+	}
+
+	return rc;
+} /* usf_get_tx_update */
+
+static int __usf_set_rx_update(struct usf_xx_type *usf_xx,
+			       struct us_rx_update_info_type *upd_rx_info)
+{
+	int rc = 0;
+
+	/* Send available data regions */
+	if (upd_rx_info->ready_region !=
+	    usf_xx->buffer_count) {
+		rc = q6usm_write(
+			usf_xx->usc,
+			upd_rx_info->ready_region);
+		if (rc)
+			return rc;
+	}
+
+	/* Get free regions */
+	rc = wait_event_timeout(
+		usf_xx->wait,
+		!q6usm_is_write_buf_full(
+			usf_xx->usc,
+			&(upd_rx_info->free_region)) ||
+		(usf_xx->usf_state == USF_IDLE_STATE),
+		USF_TIMEOUT_JIFFIES);
+
+	if (!rc) {
+		rc = -ETIME;
+		pr_err("%s:timeout. wait for write buf not full\n",
+		       __func__);
+	} else {
+		if (usf_xx->usf_state !=
+		    USF_WORK_STATE) {
+			pr_err("%s: RX: state[%d]\n",
+			       __func__,
+			       usf_xx->usf_state);
+			rc = -EINTR;
+		}
+	}
+
+	return rc;
+} /* __usf_set_rx_update */
+
+static int usf_set_rx_update(struct usf_xx_type *usf_xx, unsigned long arg)
+{
+	struct us_rx_update_info_type upd_rx_info;
+
+	int rc = copy_from_user(&upd_rx_info,
+				(struct us_rx_update_info_type __user *) arg,
+				sizeof(upd_rx_info));
+
+	if (rc) {
+		pr_err("%s: copy upd_rx_info from user; rc=%d\n",
+			__func__, rc);
+		return -EFAULT;
+	}
+
+	rc = __usf_set_rx_update(usf_xx, &upd_rx_info);
+	if (rc < 0) {
+		pr_err("%s: set rx update failed; rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	rc = copy_to_user((void __user *)arg,
+			&upd_rx_info,
+			sizeof(upd_rx_info));
+	if (rc) {
+		pr_err("%s: copy rx_info to user; rc=%d\n",
+			__func__, rc);
+		rc = -EFAULT;
+	}
+
+	return rc;
+} /* usf_set_rx_update */
+
+static void usf_release_input(struct usf_type *usf)
+{
+	uint16_t ind = 0;
+
+	usf_unregister_conflicting_events(
+					usf->conflicting_event_types);
+	usf->conflicting_event_types = 0;
+	for (ind = 0; ind < USF_MAX_EVENT_IND; ++ind) {
+		if (usf->input_ifs[ind] == NULL)
+			continue;
+		input_unregister_device(usf->input_ifs[ind]);
+		usf->input_ifs[ind] = NULL;
+		pr_debug("%s input_unregister_device[%s]\n",
+			 __func__,
+			 s_usf_input_devs[ind].input_dev_name);
+	}
+} /* usf_release_input */
+
+static int usf_stop_tx(struct usf_type *usf)
+{
+	struct usf_xx_type *usf_xx =  &usf->usf_tx;
+
+	usf_release_input(usf);
+	usf_disable(usf_xx);
+
+	return 0;
+} /* usf_stop_tx */
+
+static int __usf_get_version(struct us_version_info_type *version_info)
+{
+	int rc = 0;
+
+	if (version_info->buf_size < sizeof(DRV_VERSION)) {
+		pr_err("%s: buf_size (%d) < version string size (%zu)\n",
+			__func__, version_info->buf_size, sizeof(DRV_VERSION));
+		return -EINVAL;
+	}
+
+	rc = copy_to_user((void __user *)(version_info->pbuf),
+			  DRV_VERSION,
+			  sizeof(DRV_VERSION));
+	if (rc) {
+		pr_err("%s: copy to version_info.pbuf; rc=%d\n",
+			__func__, rc);
+		rc = -EFAULT;
+	}
+
+	return rc;
+} /* __usf_get_version */
+
+static int usf_get_version(unsigned long arg)
+{
+	struct us_version_info_type version_info;
+
+	int rc = copy_from_user(&version_info,
+				(struct us_version_info_type __user *) arg,
+				sizeof(version_info));
+
+	if (rc) {
+		pr_err("%s: copy version_info from user; rc=%d\n",
+			__func__, rc);
+		return -EFAULT;
+	}
+
+	rc = __usf_get_version(&version_info);
+	if (rc < 0) {
+		pr_err("%s: get version failed; rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	rc = copy_to_user((void __user *)arg,
+			  &version_info,
+			  sizeof(version_info));
+	if (rc) {
+		pr_err("%s: copy version_info to user; rc=%d\n",
+			__func__, rc);
+		rc = -EFAULT;
+	}
+
+	return rc;
+} /* usf_get_version */
+
+static int __usf_set_stream_param(struct usf_xx_type *usf_xx,
+				struct us_stream_param_type *set_stream_param,
+				int dir)
+{
+	struct us_client *usc = usf_xx->usc;
+	struct us_port_data *port;
+	int rc = 0;
+
+	if (usc == NULL) {
+		pr_err("%s: usc is null\n",
+			__func__);
+		return -EFAULT;
+	}
+
+	port = &usc->port[dir];
+	if (port == NULL) {
+		pr_err("%s: port is null\n",
+			__func__);
+		return -EFAULT;
+	}
+
+	if (port->param_buf == NULL) {
+		pr_err("%s: parameter buffer is null\n",
+			__func__);
+		return -EFAULT;
+	}
+
+	if (set_stream_param->buf_size > port->param_buf_size) {
+		pr_err("%s: buf_size (%d) > maximum buf size (%d)\n",
+			__func__, set_stream_param->buf_size,
+			port->param_buf_size);
+		return -EINVAL;
+	}
+
+	if (set_stream_param->buf_size == 0) {
+		pr_err("%s: buf_size is 0\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = copy_from_user(port->param_buf,
+			(uint8_t __user *) set_stream_param->pbuf,
+			set_stream_param->buf_size);
+	if (rc) {
+		pr_err("%s: copy param buf from user; rc=%d\n",
+			__func__, rc);
+		return -EFAULT;
+	}
+
+	rc = q6usm_set_us_stream_param(dir, usc, set_stream_param->module_id,
+					set_stream_param->param_id,
+					set_stream_param->buf_size);
+	if (rc) {
+		pr_err("%s: q6usm_set_us_stream_param failed; rc=%d\n",
+			__func__, rc);
+		return -EFAULT;
+	}
+
+	return rc;
+}
+
+static int usf_set_stream_param(struct usf_xx_type *usf_xx,
+				unsigned long arg, int dir)
+{
+	struct us_stream_param_type set_stream_param;
+	int rc = 0;
+
+	rc = copy_from_user(&set_stream_param,
+			(struct us_stream_param_type __user *) arg,
+			sizeof(set_stream_param));
+
+	if (rc) {
+		pr_err("%s: copy set_stream_param from user; rc=%d\n",
+			__func__, rc);
+		return -EFAULT;
+	}
+
+	return __usf_set_stream_param(usf_xx, &set_stream_param, dir);
+} /* usf_set_stream_param */
+
+static int __usf_get_stream_param(struct usf_xx_type *usf_xx,
+				struct us_stream_param_type *get_stream_param,
+				int dir)
+{
+	struct us_client *usc = usf_xx->usc;
+	struct us_port_data *port;
+	int rc = 0;
+
+	if (usc == NULL) {
+		pr_err("%s: us_client is null\n",
+			__func__);
+		return -EFAULT;
+	}
+
+	port = &usc->port[dir];
+
+	if (port->param_buf == NULL) {
+		pr_err("%s: parameter buffer is null\n",
+			__func__);
+		return -EFAULT;
+	}
+
+	if (get_stream_param->buf_size > port->param_buf_size) {
+		pr_err("%s: buf_size (%d) > maximum buf size (%d)\n",
+			__func__, get_stream_param->buf_size,
+			port->param_buf_size);
+		return -EINVAL;
+	}
+
+	if (get_stream_param->buf_size == 0) {
+		pr_err("%s: buf_size is 0\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = q6usm_get_us_stream_param(dir, usc, get_stream_param->module_id,
+					get_stream_param->param_id,
+					get_stream_param->buf_size);
+	if (rc) {
+		pr_err("%s: q6usm_get_us_stream_param failed; rc=%d\n",
+			__func__, rc);
+		return -EFAULT;
+	}
+
+	rc = copy_to_user((uint8_t __user *) get_stream_param->pbuf,
+			port->param_buf,
+			get_stream_param->buf_size);
+	if (rc) {
+		pr_err("%s: copy param buf to user; rc=%d\n",
+			__func__, rc);
+		return -EFAULT;
+	}
+
+	return rc;
+}
+
+static int usf_get_stream_param(struct usf_xx_type *usf_xx,
+				unsigned long arg, int dir)
+{
+	struct us_stream_param_type get_stream_param;
+	int rc = 0;
+
+	rc = copy_from_user(&get_stream_param,
+			(struct us_stream_param_type __user *) arg,
+			sizeof(get_stream_param));
+
+	if (rc) {
+		pr_err("%s: copy get_stream_param from user; rc=%d\n",
+			__func__, rc);
+		return -EFAULT;
+	}
+
+	return __usf_get_stream_param(usf_xx, &get_stream_param, dir);
+} /* usf_get_stream_param */
+
+static long __usf_ioctl(struct usf_type *usf,
+		unsigned int cmd,
+		unsigned long arg)
+{
+
+	int rc = 0;
+	struct usf_xx_type *usf_xx = NULL;
+
+	switch (cmd) {
+	case US_START_TX: {
+		usf_xx = &usf->usf_tx;
+		if (usf_xx->usf_state == USF_CONFIGURED_STATE)
+			rc = usf_start_tx(usf_xx);
+		else {
+			pr_err("%s: start_tx: wrong state[%d]\n",
+			       __func__,
+			       usf_xx->usf_state);
+			return -EBADFD;
+		}
+		break;
+	}
+
+	case US_START_RX: {
+		usf_xx = &usf->usf_rx;
+		if (usf_xx->usf_state == USF_CONFIGURED_STATE)
+			rc = usf_start_rx(usf_xx);
+		else {
+			pr_err("%s: start_rx: wrong state[%d]\n",
+				__func__,
+				usf_xx->usf_state);
+			return -EBADFD;
+		}
+		break;
+	}
+
+	case US_SET_TX_INFO: {
+		usf_xx = &usf->usf_tx;
+		if (usf_xx->usf_state == USF_OPENED_STATE)
+			rc = usf_set_tx_info(usf, arg);
+		else {
+			pr_err("%s: set_tx_info: wrong state[%d]\n",
+			       __func__,
+			       usf_xx->usf_state);
+			return -EBADFD;
+		}
+
+		break;
+	} /* US_SET_TX_INFO */
+
+	case US_SET_RX_INFO: {
+		usf_xx = &usf->usf_rx;
+		if (usf_xx->usf_state == USF_OPENED_STATE)
+			rc = usf_set_rx_info(usf, arg);
+		else {
+			pr_err("%s: set_rx_info: wrong state[%d]\n",
+				__func__,
+				usf_xx->usf_state);
+			return -EBADFD;
+		}
+
+		break;
+	} /* US_SET_RX_INFO */
+
+	case US_GET_TX_UPDATE: {
+		struct usf_xx_type *usf_xx = &usf->usf_tx;
+		if (usf_xx->usf_state == USF_WORK_STATE)
+			rc = usf_get_tx_update(usf, arg);
+		else {
+			pr_err("%s: get_tx_update: wrong state[%d]\n", __func__,
+			       usf_xx->usf_state);
+			rc = -EBADFD;
+		}
+		break;
+	} /* US_GET_TX_UPDATE */
+
+	case US_SET_RX_UPDATE: {
+		struct usf_xx_type *usf_xx = &usf->usf_rx;
+		if (usf_xx->usf_state == USF_WORK_STATE)
+			rc = usf_set_rx_update(usf_xx, arg);
+		else {
+			pr_err("%s: set_rx_update: wrong state[%d]\n",
+			       __func__,
+			       usf_xx->usf_state);
+			rc = -EBADFD;
+		}
+		break;
+	} /* US_SET_RX_UPDATE */
+
+	case US_STOP_TX: {
+		usf_xx = &usf->usf_tx;
+		if ((usf_xx->usf_state == USF_WORK_STATE)
+			|| (usf_xx->usf_state == USF_ADSP_RESTART_STATE))
+			rc = usf_stop_tx(usf);
+		else {
+			pr_err("%s: stop_tx: wrong state[%d]\n",
+			       __func__,
+			       usf_xx->usf_state);
+			return -EBADFD;
+		}
+		break;
+	} /* US_STOP_TX */
+
+	case US_STOP_RX: {
+		usf_xx = &usf->usf_rx;
+		if ((usf_xx->usf_state == USF_WORK_STATE)
+			|| (usf_xx->usf_state == USF_ADSP_RESTART_STATE))
+			usf_disable(usf_xx);
+		else {
+			pr_err("%s: stop_rx: wrong state[%d]\n",
+			       __func__,
+			       usf_xx->usf_state);
+			return -EBADFD;
+		}
+		break;
+	} /* US_STOP_RX */
+
+	case US_SET_DETECTION: {
+		struct usf_xx_type *usf_xx = &usf->usf_tx;
+		if (usf_xx->usf_state == USF_WORK_STATE)
+			rc = usf_set_us_detection(usf, arg);
+		else {
+			pr_err("%s: set us detection: wrong state[%d]\n",
+			       __func__,
+			       usf_xx->usf_state);
+			rc = -EBADFD;
+		}
+		break;
+	} /* US_SET_DETECTION */
+
+	case US_GET_VERSION: {
+		rc = usf_get_version(arg);
+		break;
+	} /* US_GET_VERSION */
+
+	case US_SET_TX_STREAM_PARAM: {
+		rc = usf_set_stream_param(&usf->usf_tx, arg, OUT);
+		break;
+	} /* US_SET_TX_STREAM_PARAM */
+
+	case US_GET_TX_STREAM_PARAM: {
+		rc = usf_get_stream_param(&usf->usf_tx, arg, OUT);
+		break;
+	} /* US_GET_TX_STREAM_PARAM */
+
+	case US_SET_RX_STREAM_PARAM: {
+		rc = usf_set_stream_param(&usf->usf_rx, arg, IN);
+		break;
+	} /* US_SET_RX_STREAM_PARAM */
+
+	case US_GET_RX_STREAM_PARAM: {
+		rc = usf_get_stream_param(&usf->usf_rx, arg, IN);
+		break;
+	} /* US_GET_RX_STREAM_PARAM */
+
+	default:
+		pr_err("%s: unsupported IOCTL command [%d]\n",
+		       __func__,
+		       cmd);
+		rc = -ENOTTY;
+		break;
+	}
+
+	if (rc &&
+	    ((cmd == US_SET_TX_INFO) ||
+	     (cmd == US_SET_RX_INFO)))
+		release_xx(usf_xx);
+
+	return rc;
+} /* __usf_ioctl */
+
+static long usf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct usf_type *usf = file->private_data;
+	int rc = 0;
+
+	mutex_lock(&usf->mutex);
+	rc = __usf_ioctl(usf, cmd, arg);
+	mutex_unlock(&usf->mutex);
+
+	return rc;
+} /* usf_ioctl */
+
+#ifdef CONFIG_COMPAT
+
+#define US_SET_TX_INFO32   _IOW(USF_IOCTL_MAGIC, 0, \
+				struct us_tx_info_type32)
+#define US_GET_TX_UPDATE32 _IOWR(USF_IOCTL_MAGIC, 2, \
+				struct us_tx_update_info_type32)
+#define US_SET_RX_INFO32   _IOW(USF_IOCTL_MAGIC, 3, \
+				struct us_rx_info_type32)
+#define US_SET_RX_UPDATE32 _IOWR(USF_IOCTL_MAGIC, 4, \
+				struct us_rx_update_info_type32)
+#define US_SET_DETECTION32 _IOWR(USF_IOCTL_MAGIC, 8, \
+				struct us_detect_info_type32)
+#define US_GET_VERSION32  _IOWR(USF_IOCTL_MAGIC, 9, \
+				struct us_version_info_type32)
+#define US_SET_TX_STREAM_PARAM32   _IOW(USF_IOCTL_MAGIC, 10, \
+				struct us_stream_param_type32)
+#define US_GET_TX_STREAM_PARAM32  _IOWR(USF_IOCTL_MAGIC, 11, \
+				struct us_stream_param_type32)
+#define US_SET_RX_STREAM_PARAM32   _IOW(USF_IOCTL_MAGIC, 12, \
+				struct us_stream_param_type32)
+#define US_GET_RX_STREAM_PARAM32  _IOWR(USF_IOCTL_MAGIC, 13, \
+				struct us_stream_param_type32)
+
+/* Info structure common for TX and RX */
+struct us_xx_info_type32 {
+/* Input:  general info */
+/* Name of the client - event calculator, ptr to char */
+	const compat_uptr_t client_name;
+/* Selected device identification, accepted in the kernel's CAD */
+	uint32_t dev_id;
+/* 0 - point_epos type; (e.g. 1 - gr_mmrd) */
+	uint32_t stream_format;
+/* Required sample rate in Hz */
+	uint32_t sample_rate;
+/* Size of a buffer (bytes) for US data transfer between the module and USF */
+	uint32_t buf_size;
+/* Number of the buffers for the US data transfer */
+	uint16_t buf_num;
+/* Number of the microphones (TX) or speakers(RX) */
+	uint16_t port_cnt;
+/* Microphones(TX) or speakers(RX) indexes in their enumeration */
+	uint8_t  port_id[USF_MAX_PORT_NUM];
+/* Bits per sample 16 or 32 */
+	uint16_t bits_per_sample;
+/* Input:  Transparent info for encoder in the LPASS */
+/* Parameters data size in bytes */
+	uint16_t params_data_size;
+/* Pointer to the parameters, ptr to uint8_t */
+	compat_uptr_t params_data;
+/* Max size of buffer for get and set parameter */
+	uint32_t max_get_set_param_buf_size;
+};
+
+struct us_tx_info_type32 {
+/* Common info. This struct includes ptr and therefore the 32 version */
+	struct us_xx_info_type32 us_xx_info;
+/* Info specific for TX. This struct doesn't include long or ptr
+   and therefore no 32 version */
+	struct us_input_info_type input_info;
+};
+
+struct us_tx_update_info_type32 {
+/* Input  general: */
+/* Number of calculated events */
+	uint16_t event_counter;
+/* Calculated events or NULL, ptr to struct usf_event_type */
+	compat_uptr_t event;
+/* Pointer (read index) to the end of available region */
+/* in the shared US data memory */
+	uint32_t free_region;
+/* Time (sec) to wait for data or special values: */
+/* USF_NO_WAIT_TIMEOUT, USF_INFINITIVE_TIMEOUT, USF_DEFAULT_TIMEOUT */
+	uint32_t timeout;
+/* Events (from conflicting devs) to be disabled/enabled */
+	uint16_t event_filters;
+
+/* Input  transparent data: */
+/* Parameters size */
+	uint16_t params_data_size;
+/* Pointer to the parameters, ptr to uint8_t */
+	compat_uptr_t params_data;
+/* Output parameters: */
+/* Pointer (write index) to the end of ready US data region */
+/* in the shared memory */
+	uint32_t ready_region;
+};
+
+struct us_rx_info_type32 {
+	/* Common info */
+	struct us_xx_info_type32 us_xx_info;
+	/* Info specific for RX*/
+};
+
+struct us_rx_update_info_type32 {
+/* Input  general: */
+/* Pointer (write index) to the end of ready US data region */
+/* in the shared memory */
+	uint32_t ready_region;
+/* Input  transparent data: */
+/* Parameters size */
+	uint16_t params_data_size;
+/* pPointer to the parameters, ptr to uint8_t */
+	compat_uptr_t params_data;
+/* Output parameters: */
+/* Pointer (read index) to the end of available region */
+/* in the shared US data memory */
+	uint32_t free_region;
+};
+
+struct us_detect_info_type32 {
+/* US detection place (HW|FW) */
+/* NA in the Active and OFF states */
+	enum us_detect_place_enum us_detector;
+/* US detection mode */
+	enum us_detect_mode_enum  us_detect_mode;
+/* US data dropped during this time (msec) */
+	uint32_t skip_time;
+/* Transparent data size */
+	uint16_t params_data_size;
+/* Pointer to the transparent data, ptr to uint8_t */
+	compat_uptr_t params_data;
+/* Time (sec) to wait for US presence event */
+	uint32_t detect_timeout;
+/* Out parameter: US presence */
+	bool is_us;
+};
+
+struct us_version_info_type32 {
+/* Size of memory for the version string */
+	uint16_t buf_size;
+/* Pointer to the memory for the version string, ptr to char */
+	compat_uptr_t pbuf;
+};
+
+struct us_stream_param_type32 {
+/* Id of module */
+	uint32_t module_id;
+/* Id of parameter */
+	uint32_t param_id;
+/* Size of memory of the parameter buffer */
+	uint32_t buf_size;
+/* Pointer to the memory of the parameter buffer */
+	compat_uptr_t pbuf;
+};
+
+static void usf_compat_xx_info_type(struct us_xx_info_type32 *us_xx_info32,
+				   struct us_xx_info_type *us_xx_info)
+{
+	int i = 0;
+	us_xx_info->client_name = compat_ptr(us_xx_info32->client_name);
+	us_xx_info->dev_id = us_xx_info32->dev_id;
+	us_xx_info->stream_format = us_xx_info32->stream_format;
+	us_xx_info->sample_rate = us_xx_info32->sample_rate;
+	us_xx_info->buf_size = us_xx_info32->buf_size;
+	us_xx_info->buf_num = us_xx_info32->buf_num;
+	us_xx_info->port_cnt = us_xx_info32->port_cnt;
+	for (i = 0; i < USF_MAX_PORT_NUM; i++)
+		us_xx_info->port_id[i] = us_xx_info32->port_id[i];
+	us_xx_info->bits_per_sample = us_xx_info32->bits_per_sample;
+	us_xx_info->params_data_size = us_xx_info32->params_data_size;
+	us_xx_info->params_data = compat_ptr(us_xx_info32->params_data);
+	us_xx_info->max_get_set_param_buf_size =
+			    us_xx_info32->max_get_set_param_buf_size;
+}
+
+static int usf_set_tx_info32(struct usf_type *usf, unsigned long arg)
+{
+	struct us_tx_info_type32 config_tx32;
+	struct us_tx_info_type config_tx;
+
+	int rc = copy_from_user(&config_tx32,
+			    (struct us_tx_info_type32 __user *) arg,
+			    sizeof(config_tx32));
+
+	if (rc) {
+		pr_err("%s: copy config_tx from user; rc=%d\n",
+			__func__, rc);
+		return -EFAULT;
+	}
+	memset(&config_tx, 0, sizeof(config_tx));
+	usf_compat_xx_info_type(&(config_tx32.us_xx_info),
+				&(config_tx.us_xx_info));
+	config_tx.input_info = config_tx32.input_info;
+
+	return __usf_set_tx_info(usf, &config_tx);
+} /* usf_set_tx_info 32*/
+
+static int usf_set_rx_info32(struct usf_type *usf, unsigned long arg)
+{
+	struct us_rx_info_type32 config_rx32;
+	struct us_rx_info_type config_rx;
+
+	int rc = copy_from_user(&config_rx32,
+				(struct us_rx_info_type32 __user *) arg,
+				sizeof(config_rx32));
+
+	if (rc) {
+		pr_err("%s: copy config_rx from user; rc=%d\n",
+			__func__, rc);
+		return -EFAULT;
+	}
+	memset(&config_rx, 0, sizeof(config_rx));
+	usf_compat_xx_info_type(&(config_rx32.us_xx_info),
+				&(config_rx.us_xx_info));
+
+	return __usf_set_rx_info(usf, &config_rx);
+} /* usf_set_rx_info32 */
+
+static int usf_get_tx_update32(struct usf_type *usf, unsigned long arg)
+{
+	struct us_tx_update_info_type32 upd_tx_info32;
+	struct us_tx_update_info_type upd_tx_info;
+
+	int rc = copy_from_user(&upd_tx_info32,
+				(struct us_tx_update_info_type32 __user *) arg,
+				sizeof(upd_tx_info32));
+
+	if (rc) {
+		pr_err("%s: copy upd_tx_info32 from user; rc=%d\n",
+			__func__, rc);
+		return -EFAULT;
+	}
+
+	memset(&upd_tx_info, 0, sizeof(upd_tx_info));
+	upd_tx_info.event_counter = upd_tx_info32.event_counter;
+	upd_tx_info.event = compat_ptr(upd_tx_info32.event);
+	upd_tx_info.free_region = upd_tx_info32.free_region;
+	upd_tx_info.timeout = upd_tx_info32.timeout;
+	upd_tx_info.event_filters = upd_tx_info32.event_filters;
+	upd_tx_info.params_data_size = upd_tx_info32.params_data_size;
+	upd_tx_info.params_data = compat_ptr(upd_tx_info32.params_data);
+	upd_tx_info.ready_region = upd_tx_info32.ready_region;
+
+	rc = __usf_get_tx_update(usf, &upd_tx_info);
+	if (rc < 0) {
+		pr_err("%s: get tx update failed; rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	/* Update only the fields that were changed */
+	upd_tx_info32.ready_region = upd_tx_info.ready_region;
+
+	rc = copy_to_user((void __user *)arg, &upd_tx_info32,
+			  sizeof(upd_tx_info32));
+	if (rc) {
+		pr_err("%s: copy upd_tx_info32 to user; rc=%d\n",
+			__func__, rc);
+		rc = -EFAULT;
+	}
+
+	return rc;
+} /* usf_get_tx_update */
+
+static int usf_set_rx_update32(struct usf_xx_type *usf_xx, unsigned long arg)
+{
+	struct us_rx_update_info_type32 upd_rx_info32;
+	struct us_rx_update_info_type upd_rx_info;
+
+	int rc = copy_from_user(&upd_rx_info32,
+				(struct us_rx_update_info_type32 __user *) arg,
+				sizeof(upd_rx_info32));
+
+	if (rc) {
+		pr_err("%s: copy upd_rx_info32 from user; rc=%d\n",
+			__func__, rc);
+		return -EFAULT;
+	}
+
+	memset(&upd_rx_info, 0, sizeof(upd_rx_info));
+	upd_rx_info.ready_region = upd_rx_info32.ready_region;
+	upd_rx_info.params_data_size = upd_rx_info32.params_data_size;
+	upd_rx_info.params_data = compat_ptr(upd_rx_info32.params_data);
+	upd_rx_info.free_region = upd_rx_info32.free_region;
+
+	rc = __usf_set_rx_update(usf_xx, &upd_rx_info);
+	if (rc < 0) {
+		pr_err("%s: set rx update failed; rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	/* Update only the fields that were changed */
+	upd_rx_info32.free_region = upd_rx_info.free_region;
+
+	rc = copy_to_user((void __user *)arg,
+			&upd_rx_info32,
+			sizeof(upd_rx_info32));
+	if (rc) {
+		pr_err("%s: copy rx_info32 to user; rc=%d\n",
+			__func__, rc);
+		rc = -EFAULT;
+	}
+
+	return rc;
+} /* usf_set_rx_update32 */
+
+static int usf_set_us_detection32(struct usf_type *usf, unsigned long arg)
+{
+	struct us_detect_info_type32 detect_info32;
+	struct us_detect_info_type detect_info;
+
+	int rc = copy_from_user(&detect_info32,
+				(struct us_detect_info_type32 __user *) arg,
+				sizeof(detect_info32));
+
+	if (rc) {
+		pr_err("%s: copy detect_info32 from user; rc=%d\n",
+			__func__, rc);
+		return -EFAULT;
+	}
+
+	if (detect_info32.params_data_size > USF_MAX_USER_BUF_SIZE) {
+		pr_err("%s: user buffer size exceeds maximum\n",
+			__func__);
+		return -EFAULT;
+	}
+
+	memset(&detect_info, 0, sizeof(detect_info));
+	detect_info.us_detector = detect_info32.us_detector;
+	detect_info.us_detect_mode = detect_info32.us_detect_mode;
+	detect_info.skip_time = detect_info32.skip_time;
+	detect_info.params_data_size = detect_info32.params_data_size;
+	detect_info.params_data = compat_ptr(detect_info32.params_data);
+	detect_info.detect_timeout = detect_info32.detect_timeout;
+	detect_info.is_us = detect_info32.is_us;
+
+	rc = __usf_set_us_detection(usf, &detect_info);
+	if (rc < 0) {
+		pr_err("%s: set us detection failed; rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	/* Update only the fields that were changed */
+	detect_info32.is_us = detect_info.is_us;
+
+	rc = copy_to_user((void __user *)arg,
+			  &detect_info32,
+			  sizeof(detect_info32));
+	if (rc) {
+		pr_err("%s: copy detect_info32 to user; rc=%d\n",
+			__func__, rc);
+		rc = -EFAULT;
+	}
+
+	return rc;
+} /* usf_set_us_detection32 */
+
+static int usf_get_version32(unsigned long arg)
+{
+	struct us_version_info_type32 version_info32;
+	struct us_version_info_type version_info;
+
+	int rc = copy_from_user(&version_info32,
+				(struct us_version_info_type32 __user *) arg,
+				sizeof(version_info32));
+
+	if (rc) {
+		pr_err("%s: copy version_info32 from user; rc=%d\n",
+			__func__, rc);
+		return -EFAULT;
+	}
+
+	memset(&version_info, 0, sizeof(version_info));
+	version_info.buf_size = version_info32.buf_size;
+	version_info.pbuf = compat_ptr(version_info32.pbuf);
+
+	rc = __usf_get_version(&version_info);
+	if (rc < 0) {
+		pr_err("%s: get version failed; rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	/* None of the fields were changed */
+
+	rc = copy_to_user((void __user *)arg,
+			  &version_info32,
+			  sizeof(version_info32));
+	if (rc) {
+		pr_err("%s: copy version_info32 to user; rc=%d\n",
+			__func__, rc);
+		rc = -EFAULT;
+	}
+
+	return rc;
+} /* usf_get_version32 */
+
+static int usf_set_stream_param32(struct usf_xx_type *usf_xx,
+				unsigned long arg, int dir)
+{
+	struct us_stream_param_type32 set_stream_param32;
+	struct us_stream_param_type set_stream_param;
+	int rc = 0;
+
+	rc = copy_from_user(&set_stream_param32,
+			(struct us_stream_param_type32 __user *) arg,
+			sizeof(set_stream_param32));
+
+	if (rc) {
+		pr_err("%s: copy set_stream_param from user; rc=%d\n",
+			__func__, rc);
+		return -EFAULT;
+	}
+
+	memset(&set_stream_param, 0, sizeof(set_stream_param));
+	set_stream_param.module_id = set_stream_param32.module_id;
+	set_stream_param.param_id = set_stream_param32.param_id;
+	set_stream_param.buf_size = set_stream_param32.buf_size;
+	set_stream_param.pbuf = compat_ptr(set_stream_param32.pbuf);
+
+	return __usf_set_stream_param(usf_xx, &set_stream_param, dir);
+} /* usf_set_stream_param32 */
+
+static int usf_get_stream_param32(struct usf_xx_type *usf_xx,
+				unsigned long arg, int dir)
+{
+	struct us_stream_param_type32 get_stream_param32;
+	struct us_stream_param_type get_stream_param;
+	int rc = 0;
+
+	rc = copy_from_user(&get_stream_param32,
+			(struct us_stream_param_type32 __user *) arg,
+			sizeof(get_stream_param32));
+
+	if (rc) {
+		pr_err("%s: copy get_stream_param from user; rc=%d\n",
+			__func__, rc);
+		return -EFAULT;
+	}
+
+	memset(&get_stream_param, 0, sizeof(get_stream_param));
+	get_stream_param.module_id = get_stream_param32.module_id;
+	get_stream_param.param_id = get_stream_param32.param_id;
+	get_stream_param.buf_size = get_stream_param32.buf_size;
+	get_stream_param.pbuf = compat_ptr(get_stream_param32.pbuf);
+
+	return __usf_get_stream_param(usf_xx, &get_stream_param, dir);
+} /* usf_get_stream_param32 */
+
+static long __usf_compat_ioctl(struct usf_type *usf,
+			     unsigned int cmd,
+			     unsigned long arg)
+{
+	int rc = 0;
+	struct usf_xx_type *usf_xx = NULL;
+
+	switch (cmd) {
+	case US_START_TX:
+	case US_START_RX:
+	case US_STOP_TX:
+	case US_STOP_RX: {
+		return __usf_ioctl(usf, cmd, arg);
+	}
+
+	case US_SET_TX_INFO32: {
+		usf_xx = &usf->usf_tx;
+		if (usf_xx->usf_state == USF_OPENED_STATE)
+			rc = usf_set_tx_info32(usf, arg);
+		else {
+			pr_err("%s: set_tx_info32: wrong state[%d]\n",
+			       __func__,
+			       usf_xx->usf_state);
+			return -EBADFD;
+		}
+
+		break;
+	} /* US_SET_TX_INFO32 */
+
+	case US_SET_RX_INFO32: {
+		usf_xx = &usf->usf_rx;
+		if (usf_xx->usf_state == USF_OPENED_STATE)
+			rc = usf_set_rx_info32(usf, arg);
+		else {
+			pr_err("%s: set_rx_info32: wrong state[%d]\n",
+				__func__,
+				usf_xx->usf_state);
+			return -EBADFD;
+		}
+
+		break;
+	} /* US_SET_RX_INFO32 */
+
+	case US_GET_TX_UPDATE32: {
+		struct usf_xx_type *usf_xx = &usf->usf_tx;
+		if (usf_xx->usf_state == USF_WORK_STATE)
+			rc = usf_get_tx_update32(usf, arg);
+		else {
+			pr_err("%s: get_tx_update32: wrong state[%d]\n",
+			       __func__,
+			       usf_xx->usf_state);
+			rc = -EBADFD;
+		}
+		break;
+	} /* US_GET_TX_UPDATE32 */
+
+	case US_SET_RX_UPDATE32: {
+		struct usf_xx_type *usf_xx = &usf->usf_rx;
+		if (usf_xx->usf_state == USF_WORK_STATE)
+			rc = usf_set_rx_update32(usf_xx, arg);
+		else {
+			pr_err("%s: set_rx_update: wrong state[%d]\n",
+			       __func__,
+			       usf_xx->usf_state);
+			rc = -EBADFD;
+		}
+		break;
+	} /* US_SET_RX_UPDATE32 */
+
+	case US_SET_DETECTION32: {
+		struct usf_xx_type *usf_xx = &usf->usf_tx;
+		if (usf_xx->usf_state == USF_WORK_STATE)
+			rc = usf_set_us_detection32(usf, arg);
+		else {
+			pr_err("%s: set us detection: wrong state[%d]\n",
+			       __func__,
+			       usf_xx->usf_state);
+			rc = -EBADFD;
+		}
+		break;
+	} /* US_SET_DETECTION32 */
+
+	case US_GET_VERSION32: {
+		rc = usf_get_version32(arg);
+		break;
+	} /* US_GET_VERSION32 */
+
+	case US_SET_TX_STREAM_PARAM32: {
+		rc = usf_set_stream_param32(&usf->usf_tx, arg, OUT);
+		break;
+	} /* US_SET_TX_STREAM_PARAM32 */
+
+	case US_GET_TX_STREAM_PARAM32: {
+		rc = usf_get_stream_param32(&usf->usf_tx, arg, OUT);
+		break;
+	} /* US_GET_TX_STREAM_PARAM32 */
+
+	case US_SET_RX_STREAM_PARAM32: {
+		rc = usf_set_stream_param32(&usf->usf_rx, arg, IN);
+		break;
+	} /* US_SET_RX_STREAM_PARAM32 */
+
+	case US_GET_RX_STREAM_PARAM32: {
+		rc = usf_get_stream_param32(&usf->usf_rx, arg, IN);
+		break;
+	} /* US_GET_RX_STREAM_PARAM32 */
+
+	default:
+		pr_err("%s: unsupported IOCTL command [%d]\n",
+		       __func__,
+		       cmd);
+		rc = -ENOTTY;
+		break;
+	}
+
+	if (rc &&
+	    ((cmd == US_SET_TX_INFO) ||
+	     (cmd == US_SET_RX_INFO)))
+		release_xx(usf_xx);
+
+	return rc;
+} /* __usf_compat_ioctl */
+
+static long usf_compat_ioctl(struct file *file,
+			     unsigned int cmd,
+			     unsigned long arg)
+{
+	struct usf_type *usf = file->private_data;
+	int rc = 0;
+
+	mutex_lock(&usf->mutex);
+	rc = __usf_compat_ioctl(usf, cmd, arg);
+	mutex_unlock(&usf->mutex);
+
+	return rc;
+} /* usf_compat_ioctl */
+#endif /* CONFIG_COMPAT */
+
+static int usf_mmap(struct file *file, struct vm_area_struct *vms)
+{
+	struct usf_type *usf = file->private_data;
+	int dir = OUT;
+	struct usf_xx_type *usf_xx = &usf->usf_tx;
+	int rc = 0;
+
+	mutex_lock(&usf->mutex);
+	if (vms->vm_flags & USF_VM_WRITE) { /* RX buf mapping */
+		dir = IN;
+		usf_xx = &usf->usf_rx;
+	}
+	rc = q6usm_get_virtual_address(dir, usf_xx->usc, vms);
+	mutex_unlock(&usf->mutex);
+
+	return rc;
+}
+
+static uint16_t add_opened_dev(int minor)
+{
+	uint16_t ind = 0;
+
+	for (ind = 0; ind < MAX_DEVS_NUMBER; ++ind) {
+		if (minor == atomic_cmpxchg(&s_opened_devs[ind], 0, minor)) {
+			pr_err("%s: device %d is already opened\n",
+			       __func__, minor);
+			return USF_UNDEF_DEV_ID;
+		} else {
+			pr_debug("%s: device %d is added; ind=%d\n",
+				__func__, minor, ind);
+			return ind;
+		}
+	}
+
+	pr_err("%s: there is no place for device %d\n",
+	       __func__, minor);
+	return USF_UNDEF_DEV_ID;
+}
+
+static int usf_open(struct inode *inode, struct file *file)
+{
+	struct usf_type *usf =  NULL;
+	uint16_t dev_ind = 0;
+	int minor = MINOR(inode->i_rdev);
+
+	dev_ind = add_opened_dev(minor);
+	if (dev_ind == USF_UNDEF_DEV_ID)
+		return -EBUSY;
+
+	usf = kzalloc(sizeof(struct usf_type), GFP_KERNEL);
+	if (usf == NULL) {
+		pr_err("%s:usf allocation failed\n", __func__);
+		return -ENOMEM;
+	}
+	wakeup_source_init(&usf_wakeup_source, "usf");
+
+	file->private_data = usf;
+	usf->dev_ind = dev_ind;
+
+	usf->usf_tx.usf_state = USF_OPENED_STATE;
+	usf->usf_rx.usf_state = USF_OPENED_STATE;
+
+	usf->usf_tx.us_detect_type = USF_US_DETECT_UNDEF;
+	usf->usf_rx.us_detect_type = USF_US_DETECT_UNDEF;
+
+	mutex_init(&usf->mutex);
+
+	pr_debug("%s:usf in open\n", __func__);
+	return 0;
+}
+
+static int usf_release(struct inode *inode, struct file *file)
+{
+	struct usf_type *usf = file->private_data;
+
+	pr_debug("%s: release entry\n", __func__);
+
+	mutex_lock(&usf->mutex);
+	usf_release_input(usf);
+
+	usf_disable(&usf->usf_tx);
+	usf_disable(&usf->usf_rx);
+
+	atomic_set(&s_opened_devs[usf->dev_ind], 0);
+
+	wakeup_source_trash(&usf_wakeup_source);
+	mutex_unlock(&usf->mutex);
+	mutex_destroy(&usf->mutex);
+	kfree(usf);
+	pr_debug("%s: release exit\n", __func__);
+	return 0;
+}
+
+extern long usf_compat_ioctl(struct file *file,
+			     unsigned int cmd,
+			     unsigned long arg);
+
+static const struct file_operations usf_fops = {
+	.owner                  = THIS_MODULE,
+	.open                   = usf_open,
+	.release                = usf_release,
+	.unlocked_ioctl = usf_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = usf_compat_ioctl,
+#endif /* CONFIG_COMPAT */
+	.mmap                   = usf_mmap,
+};
+
+static struct miscdevice usf_misc[MAX_DEVS_NUMBER] = {
+	{
+		.minor  = MISC_DYNAMIC_MINOR,
+		.name   = "usf1",
+		.fops   = &usf_fops,
+	},
+};
+
+static int __init usf_init(void)
+{
+	int rc = 0;
+	uint16_t ind = 0;
+
+	pr_debug("%s: USF SW version %s.\n", __func__, DRV_VERSION);
+	pr_debug("%s: Max %d devs registration\n", __func__, MAX_DEVS_NUMBER);
+
+	for (ind = 0; ind < MAX_DEVS_NUMBER; ++ind) {
+		rc = misc_register(&usf_misc[ind]);
+		if (rc) {
+			pr_err("%s: misc_register() failed ind=%d; rc = %d\n",
+			       __func__, ind, rc);
+			break;
+		}
+	}
+
+	return rc;
+}
+
+device_initcall(usf_init);
+
+MODULE_DESCRIPTION("Ultrasound framework driver");
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/ultrasound/usfcdev.c linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/ultrasound/usfcdev.c
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/ultrasound/usfcdev.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/ultrasound/usfcdev.c	2019-01-22 16:16:24.747257672 +0100
@@ -0,0 +1,424 @@
+/* Copyright (c) 2012-2013, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/input/mt.h>
+#include <linux/syscalls.h>
+#include "usfcdev.h"
+
+#define UNDEF_ID    0xffffffff
+#define SLOT_CMD_ID 0
+#define MAX_RETRIES 10
+
+enum usdev_event_status {
+	USFCDEV_EVENT_ENABLED,
+	USFCDEV_EVENT_DISABLING,
+	USFCDEV_EVENT_DISABLED,
+};
+
+struct usfcdev_event {
+	bool (*match_cb)(uint16_t, struct input_dev *dev);
+	bool registered_event;
+	bool interleaved;
+	enum usdev_event_status event_status;
+};
+static struct usfcdev_event s_usfcdev_events[MAX_EVENT_TYPE_NUM];
+
+struct usfcdev_input_command {
+	unsigned int type;
+	unsigned int code;
+	unsigned int value;
+};
+
+static long  s_usf_pid;
+
+static bool usfcdev_filter(struct input_handle *handle,
+			 unsigned int type, unsigned int code, int value);
+static bool usfcdev_match(struct input_handler *handler,
+				struct input_dev *dev);
+static int usfcdev_connect(struct input_handler *handler,
+				struct input_dev *dev,
+				const struct input_device_id *id);
+static void usfcdev_disconnect(struct input_handle *handle);
+
+static const struct input_device_id usfc_tsc_ids[] = {
+	{
+		.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+			INPUT_DEVICE_ID_MATCH_KEYBIT |
+			INPUT_DEVICE_ID_MATCH_ABSBIT,
+		.evbit = { BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY) },
+		.keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
+		/* assumption: ABS_X & ABS_Y are in the same long */
+		.absbit = { [BIT_WORD(ABS_X)] = BIT_MASK(ABS_X) |
+						BIT_MASK(ABS_Y) },
+	},
+	{
+		.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+			INPUT_DEVICE_ID_MATCH_KEYBIT |
+			INPUT_DEVICE_ID_MATCH_ABSBIT,
+		.evbit = { BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY) },
+		.keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
+		/* assumption: MT_.._X & MT_.._Y are in the same long */
+		.absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
+			BIT_MASK(ABS_MT_POSITION_X) |
+			BIT_MASK(ABS_MT_POSITION_Y) },
+	},
+	{ } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(input, usfc_tsc_ids);
+
+static struct input_handler s_usfc_handlers[MAX_EVENT_TYPE_NUM] = {
+	{ /* TSC handler */
+		.filter         = usfcdev_filter,
+		.match          = usfcdev_match,
+		.connect        = usfcdev_connect,
+		.disconnect     = usfcdev_disconnect,
+		/* .minor can be used as index in the container, */
+		/*  because .fops isn't supported */
+		.minor          = TSC_EVENT_TYPE_IND,
+		.name           = "usfc_tsc_handler",
+		.id_table       = usfc_tsc_ids,
+	},
+};
+
+/*
+ * For each event type, there are a number conflicting devices (handles)
+ * The first registered device (primary) is real TSC device; it's mandatory
+ * Optionally, later registered devices are simulated ones.
+ * They are dynamically managed
+ * The primary device's handles are stored in the below static array
+ */
+static struct input_handle s_usfc_primary_handles[MAX_EVENT_TYPE_NUM] = {
+	{ /* TSC handle */
+		.handler	= &s_usfc_handlers[TSC_EVENT_TYPE_IND],
+		.name		= "usfc_tsc_handle",
+	},
+};
+
+static struct usfcdev_input_command initial_clear_cmds[] = {
+	{EV_ABS, ABS_PRESSURE,               0},
+	{EV_KEY, BTN_TOUCH,                  0},
+};
+
+static struct usfcdev_input_command slot_clear_cmds[] = {
+	{EV_ABS, ABS_MT_SLOT,               0},
+	{EV_ABS, ABS_MT_TRACKING_ID, UNDEF_ID},
+};
+
+static struct usfcdev_input_command no_filter_cmds[] = {
+	{EV_ABS, ABS_MT_SLOT,               0},
+	{EV_ABS, ABS_MT_TRACKING_ID, UNDEF_ID},
+	{EV_SYN, SYN_REPORT,                0},
+};
+
+static bool usfcdev_match(struct input_handler *handler, struct input_dev *dev)
+{
+	bool rc = false;
+	int ind = handler->minor;
+
+	pr_debug("%s: name=[%s]; ind=%d\n", __func__, dev->name, ind);
+
+	if (s_usfcdev_events[ind].registered_event &&
+		s_usfcdev_events[ind].match_cb) {
+		rc = (*s_usfcdev_events[ind].match_cb)((uint16_t)ind, dev);
+		pr_debug("%s: [%s]; rc=%d\n", __func__, dev->name, rc);
+	}
+	return rc;
+}
+
+static int usfcdev_connect(struct input_handler *handler, struct input_dev *dev,
+				const struct input_device_id *id)
+{
+	int ret = 0;
+	uint16_t ind = handler->minor;
+	struct input_handle *usfc_handle = NULL;
+
+	if (s_usfc_primary_handles[ind].dev == NULL) {
+		pr_debug("%s: primary device; ind=%d\n",
+			__func__,
+			ind);
+		usfc_handle = &s_usfc_primary_handles[ind];
+	} else {
+		pr_debug("%s: secondary device; ind=%d\n",
+			__func__,
+			ind);
+		usfc_handle = kzalloc(sizeof(struct input_handle),
+					GFP_KERNEL);
+		if (!usfc_handle) {
+			pr_err("%s: memory allocation failed; ind=%d\n",
+				__func__,
+				ind);
+			return -ENOMEM;
+		}
+		usfc_handle->handler = &s_usfc_handlers[ind];
+		usfc_handle->name = s_usfc_primary_handles[ind].name;
+	}
+	usfc_handle->dev = dev;
+	ret = input_register_handle(usfc_handle);
+	pr_debug("%s: name=[%s]; ind=%d; dev=0x%pK\n",
+		 __func__,
+		dev->name,
+		ind,
+		usfc_handle->dev);
+	if (ret)
+		pr_err("%s: input_register_handle[%d] failed: ret=%d\n",
+			__func__,
+			ind,
+			ret);
+	else {
+		ret = input_open_device(usfc_handle);
+		if (ret) {
+			pr_err("%s: input_open_device[%d] failed: ret=%d\n",
+				__func__,
+				ind,
+				ret);
+			input_unregister_handle(usfc_handle);
+		} else
+			pr_debug("%s: device[%d] is opened\n",
+				__func__,
+				ind);
+	}
+
+	return ret;
+}
+
+static void usfcdev_disconnect(struct input_handle *handle)
+{
+	int ind = handle->handler->minor;
+
+	input_close_device(handle);
+	input_unregister_handle(handle);
+	pr_debug("%s: handle[%d], name=[%s] is disconnected\n",
+		__func__,
+		ind,
+		handle->dev->name);
+	if (s_usfc_primary_handles[ind].dev == handle->dev)
+		s_usfc_primary_handles[ind].dev = NULL;
+	else
+		kfree(handle);
+}
+
+static bool usfcdev_filter(struct input_handle *handle,
+			unsigned int type, unsigned int code, int value)
+{
+	uint16_t i = 0;
+	uint16_t ind = (uint16_t)handle->handler->minor;
+	bool rc = (s_usfcdev_events[ind].event_status != USFCDEV_EVENT_ENABLED);
+
+	if (s_usf_pid == sys_getpid()) {
+		/* Pass events from usfcdev driver */
+		rc = false;
+		pr_debug("%s: event_type=%d; type=%d; code=%d; val=%d",
+			__func__,
+			ind,
+			type,
+			code,
+			value);
+	} else if (s_usfcdev_events[ind].event_status ==
+						USFCDEV_EVENT_DISABLING) {
+		uint32_t u_value = value;
+		s_usfcdev_events[ind].interleaved = true;
+		/* Pass events for freeing slots from TSC driver */
+		for (i = 0; i < ARRAY_SIZE(no_filter_cmds); ++i) {
+			if ((no_filter_cmds[i].type == type) &&
+			    (no_filter_cmds[i].code == code) &&
+			    (no_filter_cmds[i].value <= u_value)) {
+				rc = false;
+				pr_debug("%s: no_filter_cmds[%d]; %d",
+					__func__,
+					i,
+					no_filter_cmds[i].value);
+				break;
+			}
+		}
+	}
+
+	return rc;
+}
+
+bool usfcdev_register(
+	uint16_t event_type_ind,
+	bool (*match_cb)(uint16_t, struct input_dev *dev))
+{
+	int ret = 0;
+	bool rc = false;
+
+	if ((event_type_ind >= MAX_EVENT_TYPE_NUM) || !match_cb) {
+		pr_err("%s: wrong input: event_type_ind=%d; match_cb=0x%pK\n",
+			__func__,
+			event_type_ind,
+			match_cb);
+		return false;
+	}
+
+	if (s_usfcdev_events[event_type_ind].registered_event) {
+		pr_info("%s: handler[%d] was already registered\n",
+			__func__,
+			event_type_ind);
+		return true;
+	}
+
+	s_usfcdev_events[event_type_ind].registered_event = true;
+	s_usfcdev_events[event_type_ind].match_cb = match_cb;
+	s_usfcdev_events[event_type_ind].event_status = USFCDEV_EVENT_ENABLED;
+	ret = input_register_handler(&s_usfc_handlers[event_type_ind]);
+	if (!ret) {
+		rc = true;
+		pr_debug("%s: handler[%d] was registered\n",
+			__func__,
+			event_type_ind);
+	} else {
+		s_usfcdev_events[event_type_ind].registered_event = false;
+		s_usfcdev_events[event_type_ind].match_cb = NULL;
+		pr_err("%s: handler[%d] registration failed: ret=%d\n",
+			__func__,
+			event_type_ind,
+			ret);
+	}
+
+	return rc;
+}
+
+void usfcdev_unregister(uint16_t event_type_ind)
+{
+	if (event_type_ind >= MAX_EVENT_TYPE_NUM) {
+		pr_err("%s: wrong input: event_type_ind=%d\n",
+			__func__,
+			event_type_ind);
+		return;
+	}
+	if (s_usfcdev_events[event_type_ind].registered_event) {
+		input_unregister_handler(&s_usfc_handlers[event_type_ind]);
+		pr_debug("%s: handler[%d] was unregistered\n",
+			__func__,
+			event_type_ind);
+		s_usfcdev_events[event_type_ind].registered_event = false;
+		s_usfcdev_events[event_type_ind].match_cb = NULL;
+		s_usfcdev_events[event_type_ind].event_status =
+							USFCDEV_EVENT_ENABLED;
+
+	}
+}
+
+static inline void usfcdev_send_cmd(
+	struct input_dev *dev,
+	struct usfcdev_input_command cmd)
+{
+	input_event(dev, cmd.type, cmd.code, cmd.value);
+}
+
+static void usfcdev_clean_dev(uint16_t event_type_ind)
+{
+	struct input_dev *dev = NULL;
+	int i;
+	int j;
+	int retries = 0;
+
+	if (event_type_ind >= MAX_EVENT_TYPE_NUM) {
+		pr_err("%s: wrong input: event_type_ind=%d\n",
+			__func__,
+			event_type_ind);
+		return;
+	}
+	/* Only primary device must exist */
+	dev = s_usfc_primary_handles[event_type_ind].dev;
+	if (dev == NULL) {
+		pr_err("%s: NULL primary device\n",
+		__func__);
+		return;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(initial_clear_cmds); i++)
+		usfcdev_send_cmd(dev, initial_clear_cmds[i]);
+	input_sync(dev);
+
+	/* Send commands to free all slots */
+	for (i = 0; i < dev->mt->num_slots; i++) {
+		s_usfcdev_events[event_type_ind].interleaved = false;
+		if (input_mt_get_value(&dev->mt->slots[i],
+					ABS_MT_TRACKING_ID) < 0) {
+			pr_debug("%s: skipping slot %d",
+				__func__, i);
+			continue;
+		}
+		slot_clear_cmds[SLOT_CMD_ID].value = i;
+		for (j = 0; j < ARRAY_SIZE(slot_clear_cmds); j++)
+			usfcdev_send_cmd(dev, slot_clear_cmds[j]);
+
+		if (s_usfcdev_events[event_type_ind].interleaved) {
+			pr_debug("%s: interleaved(%d): slot(%d)",
+				__func__, i, dev->mt->slot);
+			if (retries++ < MAX_RETRIES) {
+				--i;
+				continue;
+			}
+			pr_warn("%s: index(%d) reached max retires",
+				__func__, i);
+		}
+
+		retries = 0;
+		input_sync(dev);
+	}
+}
+
+bool usfcdev_set_filter(uint16_t event_type_ind, bool filter)
+{
+	bool rc = true;
+
+	if (event_type_ind >= MAX_EVENT_TYPE_NUM) {
+		pr_err("%s: wrong input: event_type_ind=%d\n",
+			__func__,
+			event_type_ind);
+		return false;
+	}
+
+	if (s_usfcdev_events[event_type_ind].registered_event) {
+
+		pr_debug("%s: event_type[%d]; filter=%d\n",
+			__func__,
+			event_type_ind,
+			filter
+			);
+		if (filter) {
+			s_usfcdev_events[event_type_ind].event_status =
+						USFCDEV_EVENT_DISABLING;
+			s_usf_pid = sys_getpid();
+			usfcdev_clean_dev(event_type_ind);
+			s_usfcdev_events[event_type_ind].event_status =
+						USFCDEV_EVENT_DISABLED;
+		} else
+			s_usfcdev_events[event_type_ind].event_status =
+						USFCDEV_EVENT_ENABLED;
+	} else {
+		pr_err("%s: event_type[%d] isn't registered\n",
+			__func__,
+			event_type_ind);
+		rc = false;
+	}
+
+	return rc;
+}
+
+static int __init usfcdev_init(void)
+{
+	return 0;
+}
+
+device_initcall(usfcdev_init);
+
+MODULE_DESCRIPTION("Handle of events from devices, conflicting with USF");
diff -Nruw linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/ultrasound/usfcdev.h linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/ultrasound/usfcdev.h
--- linux-4.4.115-fbx/drivers/misc/qcom./qdsp6v2/ultrasound/usfcdev.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/misc/qcom/qdsp6v2/ultrasound/usfcdev.h	2019-01-22 16:16:24.747257672 +0100
@@ -0,0 +1,28 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __USFCDEV_H__
+#define __USFCDEV_H__
+
+#include <linux/input.h>
+
+/* TSC event type index in the containers of the handlers & handles */
+#define TSC_EVENT_TYPE_IND 0
+/* Number of supported event types to be filtered */
+#define MAX_EVENT_TYPE_NUM 1
+
+bool usfcdev_register(
+	uint16_t event_type_ind,
+	bool (*match_cb)(uint16_t, struct input_dev *dev));
+void usfcdev_unregister(uint16_t event_type_ind);
+bool usfcdev_set_filter(uint16_t event_type_ind, bool filter);
+#endif /* __USFCDEV_H__ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/misc/qpnp-misc.c	2019-01-22 16:16:24.747257672 +0100
@@ -0,0 +1,350 @@
+/* Copyright (c) 2013-2014,2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/qpnp-misc.h>
+
+#define QPNP_MISC_DEV_NAME "qcom,qpnp-misc"
+
+#define REG_DIG_MAJOR_REV	0x01
+#define REG_SUBTYPE		0x05
+#define REG_PWM_SEL		0x49
+#define REG_GP_DRIVER_EN	0x4C
+
+#define PWM_SEL_MAX		0x03
+#define GP_DRIVER_EN_BIT	BIT(0)
+
+static DEFINE_MUTEX(qpnp_misc_dev_list_mutex);
+static LIST_HEAD(qpnp_misc_dev_list);
+
+struct qpnp_misc_version {
+	u8	subtype;
+	u8	dig_major_rev;
+};
+
+/**
+ * struct qpnp_misc_dev - holds controller device specific information
+ * @list:			Doubly-linked list parameter linking to other
+ *				qpnp_misc devices.
+ * @mutex:			Mutex lock that is used to ensure mutual
+ *				exclusion between probing and accessing misc
+ *				driver information
+ * @dev:			Device pointer to the misc device
+ * @regmap:			Regmap pointer to the misc device
+ * @version:			struct that holds the subtype and dig_major_rev
+ *				of the chip.
+ */
+struct qpnp_misc_dev {
+	struct list_head		list;
+	struct mutex			mutex;
+	struct device			*dev;
+	struct regmap			*regmap;
+	struct qpnp_misc_version	version;
+
+	u32				base;
+	u8				pwm_sel;
+	bool				enable_gp_driver;
+};
+
+static struct of_device_id qpnp_misc_match_table[] = {
+	{ .compatible = QPNP_MISC_DEV_NAME },
+	{}
+};
+
+enum qpnp_misc_version_name {
+	INVALID,
+	PM8941,
+	PM8226,
+	PMA8084,
+	PMDCALIFORNIUM,
+};
+
+static struct qpnp_misc_version irq_support_version[] = {
+	{0x00, 0x00}, /* INVALID */
+	{0x01, 0x02}, /* PM8941 */
+	{0x07, 0x00}, /* PM8226 */
+	{0x09, 0x00}, /* PMA8084 */
+	{0x16, 0x00}, /* PMDCALIFORNIUM */
+};
+
+static int qpnp_write_byte(struct qpnp_misc_dev *mdev, u16 addr, u8 val)
+{
+	int rc;
+
+	rc = regmap_write(mdev->regmap, mdev->base + addr, val);
+	if (rc)
+		pr_err("regmap write failed rc=%d\n", rc);
+
+	return rc;
+}
+
+static int qpnp_read_byte(struct qpnp_misc_dev *mdev, u16 addr, u8 *val)
+{
+	unsigned int temp;
+	int rc;
+
+	rc = regmap_read(mdev->regmap, mdev->base + addr, &temp);
+	if (rc) {
+		pr_err("regmap read failed rc=%d\n", rc);
+		return rc;
+	}
+
+	*val = (u8)temp;
+	return rc;
+}
+
+static int get_qpnp_misc_version_name(struct qpnp_misc_dev *dev)
+{
+	int i;
+
+	for (i = 1; i < ARRAY_SIZE(irq_support_version); i++)
+		if (dev->version.subtype == irq_support_version[i].subtype &&
+		    dev->version.dig_major_rev >=
+					irq_support_version[i].dig_major_rev)
+			return i;
+
+	return INVALID;
+}
+
+static bool __misc_irqs_available(struct qpnp_misc_dev *dev)
+{
+	int version_name = get_qpnp_misc_version_name(dev);
+
+	if (version_name == INVALID)
+		return 0;
+	return 1;
+}
+
+int qpnp_misc_read_reg(struct device_node *node, u16 addr, u8 *val)
+{
+	struct qpnp_misc_dev *mdev = NULL;
+	struct qpnp_misc_dev *mdev_found = NULL;
+	int rc;
+	u8 temp;
+
+	if (IS_ERR_OR_NULL(node)) {
+		pr_err("Invalid device node pointer\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&qpnp_misc_dev_list_mutex);
+	list_for_each_entry(mdev, &qpnp_misc_dev_list, list) {
+		if (mdev->dev->of_node == node) {
+			mdev_found = mdev;
+			break;
+		}
+	}
+	mutex_unlock(&qpnp_misc_dev_list_mutex);
+
+	if (!mdev_found) {
+		/*
+		 * No MISC device was found. This API should only
+		 * be called by drivers which have specified the
+		 * misc phandle in their device tree node.
+		 */
+		pr_err("no probed misc device found\n");
+		return -EPROBE_DEFER;
+	}
+
+	rc = qpnp_read_byte(mdev, addr, &temp);
+	if (rc < 0) {
+		dev_err(mdev->dev, "Failed to read addr %x, rc=%d\n", addr, rc);
+		return rc;
+	}
+
+	*val = temp;
+	return 0;
+}
+
+int qpnp_misc_irqs_available(struct device *consumer_dev)
+{
+	struct device_node *misc_node = NULL;
+	struct qpnp_misc_dev *mdev = NULL;
+	struct qpnp_misc_dev *mdev_found = NULL;
+
+	if (IS_ERR_OR_NULL(consumer_dev)) {
+		pr_err("Invalid consumer device pointer\n");
+		return -EINVAL;
+	}
+
+	misc_node = of_parse_phandle(consumer_dev->of_node, "qcom,misc-ref", 0);
+	if (!misc_node) {
+		pr_debug("Could not find qcom,misc-ref property in %s\n",
+			consumer_dev->of_node->full_name);
+		return 0;
+	}
+
+	mutex_lock(&qpnp_misc_dev_list_mutex);
+	list_for_each_entry(mdev, &qpnp_misc_dev_list, list) {
+		if (mdev->dev->of_node == misc_node) {
+			mdev_found = mdev;
+			break;
+		}
+	}
+	mutex_unlock(&qpnp_misc_dev_list_mutex);
+
+	if (!mdev_found) {
+		/* No MISC device was found. This API should only
+		 * be called by drivers which have specified the
+		 * misc phandle in their device tree node */
+		pr_err("no probed misc device found\n");
+		return -EPROBE_DEFER;
+	}
+
+	return __misc_irqs_available(mdev_found);
+}
+
+static int qpnp_misc_dt_init(struct qpnp_misc_dev *mdev)
+{
+	struct device_node *node = mdev->dev->of_node;
+	u32 val;
+	int rc;
+
+	rc = of_property_read_u32(node, "reg", &mdev->base);
+	if (rc < 0 || !mdev->base) {
+		dev_err(mdev->dev, "Base address not defined or invalid\n");
+		return -EINVAL;
+	}
+
+	if (!of_property_read_u32(node, "qcom,pwm-sel", &val)) {
+		if (val > PWM_SEL_MAX) {
+			dev_err(mdev->dev, "Invalid value for pwm-sel\n");
+			return -EINVAL;
+		}
+		mdev->pwm_sel = (u8)val;
+	}
+	mdev->enable_gp_driver = of_property_read_bool(node,
+						"qcom,enable-gp-driver");
+
+	WARN((mdev->pwm_sel > 0 && !mdev->enable_gp_driver),
+			"Setting PWM source without enabling gp driver\n");
+	WARN((mdev->pwm_sel == 0 && mdev->enable_gp_driver),
+			"Enabling gp driver without setting PWM source\n");
+
+	return 0;
+}
+
+static int qpnp_misc_config(struct qpnp_misc_dev *mdev)
+{
+	int rc, version_name;
+
+	version_name = get_qpnp_misc_version_name(mdev);
+
+	switch (version_name) {
+	case PMDCALIFORNIUM:
+		if (mdev->pwm_sel > 0 && mdev->enable_gp_driver) {
+			rc = qpnp_write_byte(mdev, REG_PWM_SEL, mdev->pwm_sel);
+			if (rc < 0) {
+				dev_err(mdev->dev,
+					"Failed to write PWM_SEL reg\n");
+				return rc;
+			}
+
+			rc = qpnp_write_byte(mdev, REG_GP_DRIVER_EN,
+					GP_DRIVER_EN_BIT);
+			if (rc < 0) {
+				dev_err(mdev->dev,
+					"Failed to write GP_DRIVER_EN reg\n");
+				return rc;
+			}
+		}
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int qpnp_misc_probe(struct platform_device *pdev)
+{
+	struct qpnp_misc_dev *mdev = ERR_PTR(-EINVAL);
+	int rc;
+
+	mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
+	if (!mdev)
+		return -ENOMEM;
+
+	mdev->dev = &pdev->dev;
+	mdev->regmap = dev_get_regmap(mdev->dev->parent, NULL);
+	if (!mdev->regmap) {
+		dev_err(mdev->dev, "Parent regmap is unavailable\n");
+		return -ENXIO;
+	}
+
+	rc = qpnp_misc_dt_init(mdev);
+	if (rc < 0) {
+		dev_err(mdev->dev,
+			"Error reading device tree properties, rc=%d\n", rc);
+		return rc;
+	}
+
+
+	rc = qpnp_read_byte(mdev, REG_SUBTYPE, &mdev->version.subtype);
+	if (rc < 0) {
+		dev_err(mdev->dev, "Failed to read subtype, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = qpnp_read_byte(mdev, REG_DIG_MAJOR_REV,
+			&mdev->version.dig_major_rev);
+	if (rc < 0) {
+		dev_err(mdev->dev, "Failed to read dig_major_rev, rc=%d\n", rc);
+		return rc;
+	}
+
+	mutex_lock(&qpnp_misc_dev_list_mutex);
+	list_add_tail(&mdev->list, &qpnp_misc_dev_list);
+	mutex_unlock(&qpnp_misc_dev_list_mutex);
+
+	rc = qpnp_misc_config(mdev);
+	if (rc < 0) {
+		dev_err(mdev->dev,
+			"Error configuring module registers, rc=%d\n", rc);
+		return rc;
+	}
+
+	dev_info(mdev->dev, "probe successful\n");
+	return 0;
+}
+
+static struct platform_driver qpnp_misc_driver = {
+	.probe	= qpnp_misc_probe,
+	.driver	= {
+		.name		= QPNP_MISC_DEV_NAME,
+		.owner		= THIS_MODULE,
+		.of_match_table	= qpnp_misc_match_table,
+	},
+};
+
+static int __init qpnp_misc_init(void)
+{
+	return platform_driver_register(&qpnp_misc_driver);
+}
+
+static void __exit qpnp_misc_exit(void)
+{
+	return platform_driver_unregister(&qpnp_misc_driver);
+}
+
+subsys_initcall(qpnp_misc_init);
+module_exit(qpnp_misc_exit);
+
+MODULE_DESCRIPTION(QPNP_MISC_DEV_NAME);
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" QPNP_MISC_DEV_NAME);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/misc/qseecom.c	2019-10-29 09:26:24.049207151 +0100
@@ -0,0 +1,9032 @@
+/*Qualcomm Secure Execution Environment Communicator (QSEECOM) driver
+ *
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <linux/msm_ion.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/qseecom.h>
+#include <linux/elf.h>
+#include <linux/firmware.h>
+#include <linux/freezer.h>
+#include <linux/scatterlist.h>
+#include <linux/regulator/consumer.h>
+#include <linux/dma-mapping.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/socinfo.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <soc/qcom/qseecomi.h>
+#include <asm/cacheflush.h>
+#include "qseecom_legacy.h"
+#include "qseecom_kernel.h"
+#include <crypto/ice.h>
+#include <linux/delay.h>
+
+#include <linux/compat.h>
+#include "compat_qseecom.h"
+
+#define QSEECOM_DEV			"qseecom"
+#define QSEOS_VERSION_14		0x14
+#define QSEEE_VERSION_00		0x400000
+#define QSEE_VERSION_01			0x401000
+#define QSEE_VERSION_02			0x402000
+#define QSEE_VERSION_03			0x403000
+#define QSEE_VERSION_04			0x404000
+#define QSEE_VERSION_05			0x405000
+#define QSEE_VERSION_20			0x800000
+#define QSEE_VERSION_40			0x1000000  /* TZ.BF.4.0 */
+
+#define QSEE_CE_CLK_100MHZ		100000000
+#define CE_CLK_DIV			1000000
+
+#define QSEECOM_MAX_SG_ENTRY			512
+#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT	\
+			(QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
+
+#define QSEECOM_INVALID_KEY_ID  0xff
+
+/* Save partition image hash for authentication check */
+#define	SCM_SAVE_PARTITION_HASH_ID	0x01
+
+/* Check if enterprise security is activate */
+#define	SCM_IS_ACTIVATED_ID		0x02
+
+/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
+#define SCM_MDTP_CIPHER_DIP		0x01
+
+/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
+#define MAX_DIP			0x20000
+
+#define RPMB_SERVICE			0x2000
+#define SSD_SERVICE			0x3000
+
+#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT	2000
+#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT	2000
+#define TWO 2
+#define QSEECOM_UFS_ICE_CE_NUM 10
+#define QSEECOM_SDCC_ICE_CE_NUM 20
+#define QSEECOM_ICE_FDE_KEY_INDEX 0
+
+#define PHY_ADDR_4G	(1ULL<<32)
+
+#define QSEECOM_STATE_NOT_READY         0
+#define QSEECOM_STATE_SUSPEND           1
+#define QSEECOM_STATE_READY             2
+#define QSEECOM_ICE_FDE_KEY_SIZE_MASK   2
+
+/*
+ * default ce info unit to 0 for
+ * services which
+ * support only single instance.
+ * Most of services are in this category.
+ */
+#define DEFAULT_CE_INFO_UNIT 0
+#define DEFAULT_NUM_CE_INFO_UNIT 1
+
+enum qseecom_clk_definitions {
+	CLK_DFAB = 0,
+	CLK_SFPB,
+};
+
+enum qseecom_ice_key_size_type {
+	QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
+		(0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
+	QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
+		(1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
+	QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
+		(0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
+};
+
+enum qseecom_client_handle_type {
+	QSEECOM_CLIENT_APP = 1,
+	QSEECOM_LISTENER_SERVICE,
+	QSEECOM_SECURE_SERVICE,
+	QSEECOM_GENERIC,
+	QSEECOM_UNAVAILABLE_CLIENT_APP,
+};
+
+enum qseecom_ce_hw_instance {
+	CLK_QSEE = 0,
+	CLK_CE_DRV,
+	CLK_INVALID,
+};
+
+static struct class *driver_class;
+static dev_t qseecom_device_no;
+
+static DEFINE_MUTEX(qsee_bw_mutex);
+static DEFINE_MUTEX(app_access_lock);
+static DEFINE_MUTEX(clk_access_lock);
+
+struct sglist_info {
+	uint32_t indexAndFlags;
+	uint32_t sizeOrCount;
+};
+
+/*
+ * The 31th bit indicates only one or multiple physical address inside
+ * the request buffer. If it is set,  the index locates a single physical addr
+ * inside the request buffer, and `sizeOrCount` is the size of the memory being
+ * shared at that physical address.
+ * Otherwise, the index locates an array of {start, len} pairs (a
+ * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
+ * that array.
+ *
+ * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
+ * and scatter gather entry sizes are 64-bit values.  Otherwise, 32-bit values.
+ *
+ * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
+ */
+#define SGLISTINFO_SET_INDEX_FLAG(c, s, i)	\
+	((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
+
+#define SGLISTINFO_TABLE_SIZE	(sizeof(struct sglist_info) * MAX_ION_FD)
+
+#define FEATURE_ID_WHITELIST	15	/*whitelist feature id*/
+
+#define MAKE_WHITELIST_VERSION(major, minor, patch) \
+	(((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
+
+struct qseecom_registered_listener_list {
+	struct list_head                 list;
+	struct qseecom_register_listener_req svc;
+	void  *user_virt_sb_base;
+	u8 *sb_virt;
+	phys_addr_t sb_phys;
+	size_t sb_length;
+	struct ion_handle *ihandle; /* Retrieve phy addr */
+	wait_queue_head_t          rcv_req_wq;
+	int                        rcv_req_flag;
+	int                        send_resp_flag;
+	bool                       listener_in_use;
+	/* wq for thread blocked on this listener*/
+	wait_queue_head_t          listener_block_app_wq;
+	struct sglist_info sglistinfo_ptr[MAX_ION_FD];
+	uint32_t sglist_cnt;
+};
+
+struct qseecom_registered_app_list {
+	struct list_head                 list;
+	u32  app_id;
+	u32  ref_cnt;
+	char app_name[MAX_APP_NAME_SIZE];
+	u32  app_arch;
+	bool app_blocked;
+	u32  blocked_on_listener_id;
+};
+
+struct qseecom_registered_kclient_list {
+	struct list_head list;
+	struct qseecom_handle *handle;
+};
+
+struct qseecom_ce_info_use {
+	unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
+	unsigned int unit_num;
+	unsigned int num_ce_pipe_entries;
+	struct qseecom_ce_pipe_entry *ce_pipe_entry;
+	bool alloc;
+	uint32_t type;
+};
+
+struct ce_hw_usage_info {
+	uint32_t qsee_ce_hw_instance;
+	uint32_t num_fde;
+	struct qseecom_ce_info_use *fde;
+	uint32_t num_pfe;
+	struct qseecom_ce_info_use *pfe;
+};
+
+struct qseecom_clk {
+	enum qseecom_ce_hw_instance instance;
+	struct clk *ce_core_clk;
+	struct clk *ce_clk;
+	struct clk *ce_core_src_clk;
+	struct clk *ce_bus_clk;
+	uint32_t clk_access_cnt;
+};
+
+struct qseecom_control {
+	struct ion_client *ion_clnt;		/* Ion client */
+	struct list_head  registered_listener_list_head;
+	spinlock_t        registered_listener_list_lock;
+
+	struct list_head  registered_app_list_head;
+	spinlock_t        registered_app_list_lock;
+
+	struct list_head   registered_kclient_list_head;
+	spinlock_t        registered_kclient_list_lock;
+
+	wait_queue_head_t send_resp_wq;
+	int               send_resp_flag;
+
+	uint32_t          qseos_version;
+	uint32_t          qsee_version;
+	struct device *pdev;
+	bool  whitelist_support;
+	bool  commonlib_loaded;
+	bool  commonlib64_loaded;
+	struct ce_hw_usage_info ce_info;
+
+	int qsee_bw_count;
+	int qsee_sfpb_bw_count;
+
+	uint32_t qsee_perf_client;
+	struct qseecom_clk qsee;
+	struct qseecom_clk ce_drv;
+
+	bool support_bus_scaling;
+	bool support_fde;
+	bool support_pfe;
+	bool fde_key_size;
+	uint32_t  cumulative_mode;
+	enum qseecom_bandwidth_request_mode  current_mode;
+	struct timer_list bw_scale_down_timer;
+	struct work_struct bw_inactive_req_ws;
+	struct cdev cdev;
+	bool timer_running;
+	bool no_clock_support;
+	unsigned int ce_opp_freq_hz;
+	bool appsbl_qseecom_support;
+	uint32_t qsee_reentrancy_support;
+
+	uint32_t app_block_ref_cnt;
+	wait_queue_head_t app_block_wq;
+	atomic_t qseecom_state;
+	int is_apps_region_protected;
+	bool smcinvoke_support;
+};
+
+struct qseecom_sec_buf_fd_info {
+	bool is_sec_buf_fd;
+	size_t size;
+	void *vbase;
+	dma_addr_t pbase;
+};
+
+struct qseecom_param_memref {
+	uint32_t buffer;
+	uint32_t size;
+};
+
+struct qseecom_client_handle {
+	u32  app_id;
+	u8 *sb_virt;
+	phys_addr_t sb_phys;
+	unsigned long user_virt_sb_base;
+	size_t sb_length;
+	struct ion_handle *ihandle;		/* Retrieve phy addr */
+	char app_name[MAX_APP_NAME_SIZE];
+	u32  app_arch;
+	struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
+};
+
+struct qseecom_listener_handle {
+	u32               id;
+};
+
+static struct qseecom_control qseecom;
+
+struct qseecom_dev_handle {
+	enum qseecom_client_handle_type type;
+	union {
+		struct qseecom_client_handle client;
+		struct qseecom_listener_handle listener;
+	};
+	bool released;
+	int               abort;
+	wait_queue_head_t abort_wq;
+	atomic_t          ioctl_count;
+	bool  perf_enabled;
+	bool  fast_load_enabled;
+	enum qseecom_bandwidth_request_mode mode;
+	struct sglist_info sglistinfo_ptr[MAX_ION_FD];
+	uint32_t sglist_cnt;
+	bool use_legacy_cmd;
+};
+
+struct qseecom_key_id_usage_desc {
+	uint8_t desc[QSEECOM_KEY_ID_SIZE];
+};
+
+struct qseecom_crypto_info {
+	unsigned int unit_num;
+	unsigned int ce;
+	unsigned int pipe_pair;
+};
+
+static struct qseecom_key_id_usage_desc key_id_array[] = {
+	{
+		.desc = "Undefined Usage Index",
+	},
+
+	{
+		.desc = "Full Disk Encryption",
+	},
+
+	{
+		.desc = "Per File Encryption",
+	},
+
+	{
+		.desc = "UFS ICE Full Disk Encryption",
+	},
+
+	{
+		.desc = "SDCC ICE Full Disk Encryption",
+	},
+};
+
+/* Function proto types */
+static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
+static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
+static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
+static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
+static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
+static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
+					char *cmnlib_name);
+static int qseecom_enable_ice_setup(int usage);
+static int qseecom_disable_ice_setup(int usage);
+static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
+static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
+						void __user *argp);
+static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
+						void __user *argp);
+static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
+						void __user *argp);
+
+static int get_qseecom_keymaster_status(char *str)
+{
+	get_option(&str, &qseecom.is_apps_region_protected);
+	return 1;
+}
+__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
+
+static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
+			const void *req_buf, void *resp_buf)
+{
+	int      ret = 0;
+	uint32_t smc_id = 0;
+	uint32_t qseos_cmd_id = 0;
+	struct scm_desc desc = {0};
+	struct qseecom_command_scm_resp *scm_resp = NULL;
+
+	if (!req_buf || !resp_buf) {
+		pr_err("Invalid buffer pointer\n");
+		return -EINVAL;
+	}
+	qseos_cmd_id = *(uint32_t *)req_buf;
+	scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
+
+	switch (svc_id) {
+	case 6: {
+		if (tz_cmd_id == 3) {
+			smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
+			desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
+			desc.args[0] = *(uint32_t *)req_buf;
+		} else {
+			pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
+				svc_id, tz_cmd_id);
+			return -EINVAL;
+		}
+		ret = scm_call2(smc_id, &desc);
+		break;
+	}
+	case SCM_SVC_ES: {
+		switch (tz_cmd_id) {
+		case SCM_SAVE_PARTITION_HASH_ID: {
+			u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
+			struct qseecom_save_partition_hash_req *p_hash_req =
+				(struct qseecom_save_partition_hash_req *)
+				req_buf;
+			char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
+			if (!tzbuf) {
+				pr_err("error allocating data\n");
+				return -ENOMEM;
+			}
+			memset(tzbuf, 0, tzbuflen);
+			memcpy(tzbuf, p_hash_req->digest,
+				SHA256_DIGEST_LENGTH);
+			dmac_flush_range(tzbuf, tzbuf + tzbuflen);
+			smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
+			desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
+			desc.args[0] = p_hash_req->partition_id;
+			desc.args[1] = virt_to_phys(tzbuf);
+			desc.args[2] = SHA256_DIGEST_LENGTH;
+			ret = scm_call2(smc_id, &desc);
+			kzfree(tzbuf);
+			break;
+		}
+		default: {
+			pr_err("tz_cmd_id %d is not supported by scm_call2\n",
+						tz_cmd_id);
+			ret = -EINVAL;
+			break;
+		}
+		} /* end of switch (tz_cmd_id) */
+		break;
+	} /* end of case SCM_SVC_ES */
+	case SCM_SVC_TZSCHEDULER: {
+		switch (qseos_cmd_id) {
+		case QSEOS_APP_START_COMMAND: {
+			struct qseecom_load_app_ireq *req;
+			struct qseecom_load_app_64bit_ireq *req_64bit;
+			smc_id = TZ_OS_APP_START_ID;
+			desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_load_app_ireq *)req_buf;
+				desc.args[0] = req->mdt_len;
+				desc.args[1] = req->img_len;
+				desc.args[2] = req->phy_addr;
+			} else {
+				req_64bit =
+					(struct qseecom_load_app_64bit_ireq *)
+					req_buf;
+				desc.args[0] = req_64bit->mdt_len;
+				desc.args[1] = req_64bit->img_len;
+				desc.args[2] = req_64bit->phy_addr;
+			}
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_APP_SHUTDOWN_COMMAND: {
+			struct qseecom_unload_app_ireq *req;
+			req = (struct qseecom_unload_app_ireq *)req_buf;
+			smc_id = TZ_OS_APP_SHUTDOWN_ID;
+			desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
+			desc.args[0] = req->app_id;
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_APP_LOOKUP_COMMAND: {
+			struct qseecom_check_app_ireq *req;
+			u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
+			char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
+			if (!tzbuf) {
+				pr_err("Allocate %d bytes buffer failed\n",
+					tzbuflen);
+				return -ENOMEM;
+			}
+			req = (struct qseecom_check_app_ireq *)req_buf;
+			pr_debug("Lookup app_name = %s\n", req->app_name);
+			strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
+			dmac_flush_range(tzbuf, tzbuf + tzbuflen);
+			smc_id = TZ_OS_APP_LOOKUP_ID;
+			desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
+			desc.args[0] = virt_to_phys(tzbuf);
+			desc.args[1] = strlen(req->app_name);
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			kzfree(tzbuf);
+			break;
+		}
+		case QSEOS_APP_REGION_NOTIFICATION: {
+			struct qsee_apps_region_info_ireq *req;
+			struct qsee_apps_region_info_64bit_ireq *req_64bit;
+			smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
+			desc.arginfo =
+				TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qsee_apps_region_info_ireq *)
+					req_buf;
+				desc.args[0] = req->addr;
+				desc.args[1] = req->size;
+			} else {
+				req_64bit =
+				(struct qsee_apps_region_info_64bit_ireq *)
+					req_buf;
+				desc.args[0] = req_64bit->addr;
+				desc.args[1] = req_64bit->size;
+			}
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
+			struct qseecom_load_lib_image_ireq *req;
+			struct qseecom_load_lib_image_64bit_ireq *req_64bit;
+			smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
+			desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_load_lib_image_ireq *)
+					req_buf;
+				desc.args[0] = req->mdt_len;
+				desc.args[1] = req->img_len;
+				desc.args[2] = req->phy_addr;
+			} else {
+				req_64bit =
+				(struct qseecom_load_lib_image_64bit_ireq *)
+					req_buf;
+				desc.args[0] = req_64bit->mdt_len;
+				desc.args[1] = req_64bit->img_len;
+				desc.args[2] = req_64bit->phy_addr;
+			}
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
+			smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
+			desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_REGISTER_LISTENER: {
+			struct qseecom_register_listener_ireq *req;
+			struct qseecom_register_listener_64bit_ireq *req_64bit;
+			desc.arginfo =
+				TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_register_listener_ireq *)
+					req_buf;
+				desc.args[0] = req->listener_id;
+				desc.args[1] = req->sb_ptr;
+				desc.args[2] = req->sb_len;
+			} else {
+				req_64bit =
+				(struct qseecom_register_listener_64bit_ireq *)
+					req_buf;
+				desc.args[0] = req_64bit->listener_id;
+				desc.args[1] = req_64bit->sb_ptr;
+				desc.args[2] = req_64bit->sb_len;
+			}
+			qseecom.smcinvoke_support = true;
+			smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			if (ret) {
+				qseecom.smcinvoke_support = false;
+				smc_id = TZ_OS_REGISTER_LISTENER_ID;
+				__qseecom_reentrancy_check_if_no_app_blocked(
+					smc_id);
+				ret = scm_call2(smc_id, &desc);
+			}
+			break;
+		}
+		case QSEOS_DEREGISTER_LISTENER: {
+			struct qseecom_unregister_listener_ireq *req;
+			req = (struct qseecom_unregister_listener_ireq *)
+				req_buf;
+			smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
+			desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
+			desc.args[0] = req->listener_id;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_LISTENER_DATA_RSP_COMMAND: {
+			struct qseecom_client_listener_data_irsp *req;
+			req = (struct qseecom_client_listener_data_irsp *)
+				req_buf;
+			smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
+			desc.arginfo =
+				TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
+			desc.args[0] = req->listener_id;
+			desc.args[1] = req->status;
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
+			struct qseecom_client_listener_data_irsp *req;
+			struct qseecom_client_listener_data_64bit_irsp *req_64;
+
+			smc_id =
+			TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
+			desc.arginfo =
+			TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req =
+				(struct qseecom_client_listener_data_irsp *)
+				req_buf;
+				desc.args[0] = req->listener_id;
+				desc.args[1] = req->status;
+				desc.args[2] = req->sglistinfo_ptr;
+				desc.args[3] = req->sglistinfo_len;
+			} else {
+				req_64 =
+			(struct qseecom_client_listener_data_64bit_irsp *)
+				req_buf;
+				desc.args[0] = req_64->listener_id;
+				desc.args[1] = req_64->status;
+				desc.args[2] = req_64->sglistinfo_ptr;
+				desc.args[3] = req_64->sglistinfo_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
+			struct qseecom_load_app_ireq *req;
+			struct qseecom_load_app_64bit_ireq *req_64bit;
+			smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
+			desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_load_app_ireq *)req_buf;
+				desc.args[0] = req->mdt_len;
+				desc.args[1] = req->img_len;
+				desc.args[2] = req->phy_addr;
+			} else {
+				req_64bit =
+				(struct qseecom_load_app_64bit_ireq *)req_buf;
+				desc.args[0] = req_64bit->mdt_len;
+				desc.args[1] = req_64bit->img_len;
+				desc.args[2] = req_64bit->phy_addr;
+			}
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
+			smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
+			desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+			}
+
+		case QSEOS_CLIENT_SEND_DATA_COMMAND: {
+			struct qseecom_client_send_data_ireq *req;
+			struct qseecom_client_send_data_64bit_ireq *req_64bit;
+			smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
+			desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_client_send_data_ireq *)
+					req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->rsp_ptr;
+				desc.args[4] = req->rsp_len;
+			} else {
+				req_64bit =
+				(struct qseecom_client_send_data_64bit_ireq *)
+					req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->rsp_ptr;
+				desc.args[4] = req_64bit->rsp_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
+			struct qseecom_client_send_data_ireq *req;
+			struct qseecom_client_send_data_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
+			desc.arginfo =
+			TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_client_send_data_ireq *)
+					req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->rsp_ptr;
+				desc.args[4] = req->rsp_len;
+				desc.args[5] = req->sglistinfo_ptr;
+				desc.args[6] = req->sglistinfo_len;
+			} else {
+				req_64bit =
+				(struct qseecom_client_send_data_64bit_ireq *)
+					req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->rsp_ptr;
+				desc.args[4] = req_64bit->rsp_len;
+				desc.args[5] = req_64bit->sglistinfo_ptr;
+				desc.args[6] = req_64bit->sglistinfo_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
+			struct qseecom_client_send_service_ireq *req;
+			req = (struct qseecom_client_send_service_ireq *)
+				req_buf;
+			smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
+			desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
+			desc.args[0] = req->key_type;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_RPMB_ERASE_COMMAND: {
+			smc_id = TZ_OS_RPMB_ERASE_ID;
+			desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
+			smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
+			desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_GENERATE_KEY: {
+			u32 tzbuflen = PAGE_ALIGN(sizeof
+				(struct qseecom_key_generate_ireq) -
+				sizeof(uint32_t));
+			char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
+			if (!tzbuf)
+				return -ENOMEM;
+			memset(tzbuf, 0, tzbuflen);
+			memcpy(tzbuf, req_buf + sizeof(uint32_t),
+				(sizeof(struct qseecom_key_generate_ireq) -
+				sizeof(uint32_t)));
+			dmac_flush_range(tzbuf, tzbuf + tzbuflen);
+			smc_id = TZ_OS_KS_GEN_KEY_ID;
+			desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
+			desc.args[0] = virt_to_phys(tzbuf);
+			desc.args[1] = tzbuflen;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			kzfree(tzbuf);
+			break;
+		}
+		case QSEOS_DELETE_KEY: {
+			u32 tzbuflen = PAGE_ALIGN(sizeof
+				(struct qseecom_key_delete_ireq) -
+				sizeof(uint32_t));
+			char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
+			if (!tzbuf) {
+				pr_err("Allocate %d bytes buffer failed\n",
+					tzbuflen);
+				return -ENOMEM;
+			}
+			memset(tzbuf, 0, tzbuflen);
+			memcpy(tzbuf, req_buf + sizeof(uint32_t),
+				(sizeof(struct qseecom_key_delete_ireq) -
+				sizeof(uint32_t)));
+			dmac_flush_range(tzbuf, tzbuf + tzbuflen);
+			smc_id = TZ_OS_KS_DEL_KEY_ID;
+			desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
+			desc.args[0] = virt_to_phys(tzbuf);
+			desc.args[1] = tzbuflen;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			kzfree(tzbuf);
+			break;
+		}
+		case QSEOS_SET_KEY: {
+			u32 tzbuflen = PAGE_ALIGN(sizeof
+				(struct qseecom_key_select_ireq) -
+				sizeof(uint32_t));
+			char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
+			if (!tzbuf) {
+				pr_err("Allocate %d bytes buffer failed\n",
+					tzbuflen);
+				return -ENOMEM;
+			}
+			memset(tzbuf, 0, tzbuflen);
+			memcpy(tzbuf, req_buf + sizeof(uint32_t),
+				(sizeof(struct qseecom_key_select_ireq) -
+				sizeof(uint32_t)));
+			dmac_flush_range(tzbuf, tzbuf + tzbuflen);
+			smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
+			desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
+			desc.args[0] = virt_to_phys(tzbuf);
+			desc.args[1] = tzbuflen;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			kzfree(tzbuf);
+			break;
+		}
+		case QSEOS_UPDATE_KEY_USERINFO: {
+			u32 tzbuflen = PAGE_ALIGN(sizeof
+				(struct qseecom_key_userinfo_update_ireq) -
+				sizeof(uint32_t));
+			char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
+			if (!tzbuf) {
+				pr_err("Allocate %d bytes buffer failed\n",
+					tzbuflen);
+				return -ENOMEM;
+			}
+			memset(tzbuf, 0, tzbuflen);
+			memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
+				(struct qseecom_key_userinfo_update_ireq) -
+				sizeof(uint32_t)));
+			dmac_flush_range(tzbuf, tzbuf + tzbuflen);
+			smc_id = TZ_OS_KS_UPDATE_KEY_ID;
+			desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
+			desc.args[0] = virt_to_phys(tzbuf);
+			desc.args[1] = tzbuflen;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			kzfree(tzbuf);
+			break;
+		}
+		case QSEOS_TEE_OPEN_SESSION: {
+			struct qseecom_qteec_ireq *req;
+			struct qseecom_qteec_64bit_ireq *req_64bit;
+			smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
+			desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_qteec_ireq *)req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->resp_ptr;
+				desc.args[4] = req->resp_len;
+			} else {
+				req_64bit = (struct qseecom_qteec_64bit_ireq *)
+						req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->resp_ptr;
+				desc.args[4] = req_64bit->resp_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
+			struct qseecom_qteec_ireq *req;
+			struct qseecom_qteec_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
+			desc.arginfo =
+			TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_qteec_ireq *)req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->resp_ptr;
+				desc.args[4] = req->resp_len;
+				desc.args[5] = req->sglistinfo_ptr;
+				desc.args[6] = req->sglistinfo_len;
+			} else {
+				req_64bit = (struct qseecom_qteec_64bit_ireq *)
+						req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->resp_ptr;
+				desc.args[4] = req_64bit->resp_len;
+				desc.args[5] = req_64bit->sglistinfo_ptr;
+				desc.args[6] = req_64bit->sglistinfo_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_TEE_INVOKE_COMMAND: {
+			struct qseecom_qteec_ireq *req;
+			struct qseecom_qteec_64bit_ireq *req_64bit;
+			smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
+			desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_qteec_ireq *)req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->resp_ptr;
+				desc.args[4] = req->resp_len;
+			} else {
+				req_64bit = (struct qseecom_qteec_64bit_ireq *)
+						req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->resp_ptr;
+				desc.args[4] = req_64bit->resp_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
+			struct qseecom_qteec_ireq *req;
+			struct qseecom_qteec_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
+			desc.arginfo =
+			TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_qteec_ireq *)req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->resp_ptr;
+				desc.args[4] = req->resp_len;
+				desc.args[5] = req->sglistinfo_ptr;
+				desc.args[6] = req->sglistinfo_len;
+			} else {
+				req_64bit = (struct qseecom_qteec_64bit_ireq *)
+						req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->resp_ptr;
+				desc.args[4] = req_64bit->resp_len;
+				desc.args[5] = req_64bit->sglistinfo_ptr;
+				desc.args[6] = req_64bit->sglistinfo_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_TEE_CLOSE_SESSION: {
+			struct qseecom_qteec_ireq *req;
+			struct qseecom_qteec_64bit_ireq *req_64bit;
+			smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
+			desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_qteec_ireq *)req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->resp_ptr;
+				desc.args[4] = req->resp_len;
+			} else {
+				req_64bit = (struct qseecom_qteec_64bit_ireq *)
+						req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->resp_ptr;
+				desc.args[4] = req_64bit->resp_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_TEE_REQUEST_CANCELLATION: {
+			struct qseecom_qteec_ireq *req;
+			struct qseecom_qteec_64bit_ireq *req_64bit;
+			smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
+			desc.arginfo =
+				TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_qteec_ireq *)req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->resp_ptr;
+				desc.args[4] = req->resp_len;
+			} else {
+				req_64bit = (struct qseecom_qteec_64bit_ireq *)
+						req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->resp_ptr;
+				desc.args[4] = req_64bit->resp_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
+			struct qseecom_continue_blocked_request_ireq *req =
+				(struct qseecom_continue_blocked_request_ireq *)
+				req_buf;
+			if (qseecom.smcinvoke_support)
+				smc_id =
+				TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
+			else
+				smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
+			desc.arginfo =
+				TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
+			desc.args[0] = req->app_or_session_id;
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		default: {
+			pr_err("qseos_cmd_id 0x%d is not supported by armv8 scm_call2.\n",
+						qseos_cmd_id);
+			ret = -EINVAL;
+			break;
+		}
+		} /*end of switch (qsee_cmd_id)  */
+	break;
+	} /*end of case SCM_SVC_TZSCHEDULER*/
+	default: {
+		pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
+					svc_id);
+		ret = -EINVAL;
+		break;
+	}
+	} /*end of switch svc_id */
+	scm_resp->result = desc.ret[0];
+	scm_resp->resp_type = desc.ret[1];
+	scm_resp->data = desc.ret[2];
+	pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
+		svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
+	pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
+		scm_resp->result, scm_resp->resp_type, scm_resp->data);
+	return ret;
+}
+
+
+static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
+		size_t cmd_len, void *resp_buf, size_t resp_len)
+{
+	if (!is_scm_armv8())
+		return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
+				resp_buf, resp_len);
+	else
+		return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
+}
+
+static int __qseecom_is_svc_unique(struct qseecom_dev_handle *data,
+		struct qseecom_register_listener_req *svc)
+{
+	struct qseecom_registered_listener_list *ptr;
+	int unique = 1;
+	unsigned long flags;
+
+	spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
+	list_for_each_entry(ptr, &qseecom.registered_listener_list_head, list) {
+		if (ptr->svc.listener_id == svc->listener_id) {
+			pr_err("Service id: %u is already registered\n",
+					ptr->svc.listener_id);
+			unique = 0;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
+	return unique;
+}
+
+static struct qseecom_registered_listener_list *__qseecom_find_svc(
+						int32_t listener_id)
+{
+	struct qseecom_registered_listener_list *entry = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
+	list_for_each_entry(entry, &qseecom.registered_listener_list_head, list)
+	{
+		if (entry->svc.listener_id == listener_id)
+			break;
+	}
+	spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
+
+	if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
+		pr_err("Service id: %u is not found\n", listener_id);
+		return NULL;
+	}
+
+	return entry;
+}
+
+static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
+				struct qseecom_dev_handle *handle,
+				struct qseecom_register_listener_req *listener)
+{
+	int ret = 0;
+	struct qseecom_register_listener_ireq req;
+	struct qseecom_register_listener_64bit_ireq req_64bit;
+	struct qseecom_command_scm_resp resp;
+	ion_phys_addr_t pa;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+
+	/* Get the handle of the shared fd */
+	svc->ihandle = ion_import_dma_buf(qseecom.ion_clnt,
+					listener->ifd_data_fd);
+	if (IS_ERR_OR_NULL(svc->ihandle)) {
+		pr_err("Ion client could not retrieve the handle\n");
+		return -ENOMEM;
+	}
+
+	/* Get the physical address of the ION BUF */
+	ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
+	if (ret) {
+		pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
+			ret);
+		return ret;
+	}
+	/* Populate the structure for sending scm call to load image */
+	svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
+	if (IS_ERR_OR_NULL(svc->sb_virt)) {
+		pr_err("ION memory mapping for listener shared buffer failed\n");
+		return -ENOMEM;
+	}
+	svc->sb_phys = (phys_addr_t)pa;
+
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
+		req.listener_id = svc->svc.listener_id;
+		req.sb_len = svc->sb_length;
+		req.sb_ptr = (uint32_t)svc->sb_phys;
+		cmd_buf = (void *)&req;
+		cmd_len = sizeof(struct qseecom_register_listener_ireq);
+	} else {
+		req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
+		req_64bit.listener_id = svc->svc.listener_id;
+		req_64bit.sb_len = svc->sb_length;
+		req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
+		cmd_buf = (void *)&req_64bit;
+		cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
+	}
+
+	resp.result = QSEOS_RESULT_INCOMPLETE;
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
+					 &resp, sizeof(resp));
+	if (ret) {
+		pr_err("qseecom_scm_call failed with err: %d\n", ret);
+		return -EINVAL;
+	}
+
+	if (resp.result != QSEOS_RESULT_SUCCESS) {
+		pr_err("Error SB registration req: resp.result = %d\n",
+			resp.result);
+		return -EPERM;
+	}
+	return 0;
+}
+
+static int qseecom_register_listener(struct qseecom_dev_handle *data,
+					void __user *argp)
+{
+	int ret = 0;
+	unsigned long flags;
+	struct qseecom_register_listener_req rcvd_lstnr;
+	struct qseecom_registered_listener_list *new_entry;
+
+	ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
+			rcvd_lstnr.sb_size))
+		return -EFAULT;
+
+	data->listener.id = 0;
+	if (!__qseecom_is_svc_unique(data, &rcvd_lstnr)) {
+		pr_err("Service is not unique and is already registered\n");
+		data->released = true;
+		return -EBUSY;
+	}
+
+	new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
+	if (!new_entry) {
+		pr_err("kmalloc failed\n");
+		return -ENOMEM;
+	}
+	memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
+	new_entry->rcv_req_flag = 0;
+
+	new_entry->svc.listener_id = rcvd_lstnr.listener_id;
+	new_entry->sb_length = rcvd_lstnr.sb_size;
+	new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
+	if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
+		pr_err("qseecom_set_sb_memoryfailed\n");
+		kzfree(new_entry);
+		return -ENOMEM;
+	}
+
+	data->listener.id = rcvd_lstnr.listener_id;
+	init_waitqueue_head(&new_entry->rcv_req_wq);
+	init_waitqueue_head(&new_entry->listener_block_app_wq);
+	new_entry->send_resp_flag = 0;
+	new_entry->listener_in_use = false;
+	spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
+	list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
+	spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
+
+	return ret;
+}
+
+static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+	unsigned long flags;
+	uint32_t unmap_mem = 0;
+	struct qseecom_register_listener_ireq req;
+	struct qseecom_registered_listener_list *ptr_svc = NULL;
+	struct qseecom_command_scm_resp resp;
+	struct ion_handle *ihandle = NULL;		/* Retrieve phy addr */
+
+	req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
+	req.listener_id = data->listener.id;
+	resp.result = QSEOS_RESULT_INCOMPLETE;
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
+					sizeof(req), &resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
+				ret, data->listener.id);
+		return ret;
+	}
+
+	if (resp.result != QSEOS_RESULT_SUCCESS) {
+		pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
+				resp.result, data->listener.id);
+		return -EPERM;
+	}
+
+	data->abort = 1;
+	spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
+	list_for_each_entry(ptr_svc, &qseecom.registered_listener_list_head,
+			list) {
+		if (ptr_svc->svc.listener_id == data->listener.id) {
+			wake_up_all(&ptr_svc->rcv_req_wq);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
+
+	while (atomic_read(&data->ioctl_count) > 1) {
+		if (wait_event_freezable(data->abort_wq,
+				atomic_read(&data->ioctl_count) <= 1)) {
+			pr_err("Interrupted from abort\n");
+			ret = -ERESTARTSYS;
+			return ret;
+		}
+	}
+
+	spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
+	list_for_each_entry(ptr_svc,
+			&qseecom.registered_listener_list_head,
+			list)
+	{
+		if (ptr_svc->svc.listener_id == data->listener.id) {
+			if (ptr_svc->sb_virt) {
+				unmap_mem = 1;
+				ihandle = ptr_svc->ihandle;
+				}
+			list_del(&ptr_svc->list);
+			kzfree(ptr_svc);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
+
+	/* Unmap the memory */
+	if (unmap_mem) {
+		if (!IS_ERR_OR_NULL(ihandle)) {
+			ion_unmap_kernel(qseecom.ion_clnt, ihandle);
+			ion_free(qseecom.ion_clnt, ihandle);
+			}
+	}
+	data->released = true;
+	return ret;
+}
+
+static int __qseecom_set_msm_bus_request(uint32_t mode)
+{
+	int ret = 0;
+	struct qseecom_clk *qclk;
+
+	qclk = &qseecom.qsee;
+	if (qclk->ce_core_src_clk != NULL) {
+		if (mode == INACTIVE) {
+			__qseecom_disable_clk(CLK_QSEE);
+		} else {
+			ret = __qseecom_enable_clk(CLK_QSEE);
+			if (ret)
+				pr_err("CLK enabling failed (%d) MODE (%d)\n",
+							ret, mode);
+		}
+	}
+
+	if ((!ret) && (qseecom.current_mode != mode)) {
+		ret = msm_bus_scale_client_update_request(
+					qseecom.qsee_perf_client, mode);
+		if (ret) {
+			pr_err("Bandwidth req failed(%d) MODE (%d)\n",
+							ret, mode);
+			if (qclk->ce_core_src_clk != NULL) {
+				if (mode == INACTIVE) {
+					ret = __qseecom_enable_clk(CLK_QSEE);
+					if (ret)
+						pr_err("CLK enable failed\n");
+				} else
+					__qseecom_disable_clk(CLK_QSEE);
+			}
+		}
+		qseecom.current_mode = mode;
+	}
+	return ret;
+}
+
+static void qseecom_bw_inactive_req_work(struct work_struct *work)
+{
+	mutex_lock(&app_access_lock);
+	mutex_lock(&qsee_bw_mutex);
+	if (qseecom.timer_running)
+		__qseecom_set_msm_bus_request(INACTIVE);
+	pr_debug("current_mode = %d, cumulative_mode = %d\n",
+				qseecom.current_mode, qseecom.cumulative_mode);
+	qseecom.timer_running = false;
+	mutex_unlock(&qsee_bw_mutex);
+	mutex_unlock(&app_access_lock);
+	return;
+}
+
+static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
+{
+	schedule_work(&qseecom.bw_inactive_req_ws);
+	return;
+}
+
+static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
+{
+	struct qseecom_clk *qclk;
+	int ret = 0;
+	mutex_lock(&clk_access_lock);
+	if (ce == CLK_QSEE)
+		qclk = &qseecom.qsee;
+	else
+		qclk = &qseecom.ce_drv;
+
+	if (qclk->clk_access_cnt > 2) {
+		pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
+		ret = -EINVAL;
+		goto err_dec_ref_cnt;
+	}
+	if (qclk->clk_access_cnt == 2)
+		qclk->clk_access_cnt--;
+
+err_dec_ref_cnt:
+	mutex_unlock(&clk_access_lock);
+	return ret;
+}
+
+
+static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
+{
+	int32_t ret = 0;
+	int32_t request_mode = INACTIVE;
+
+	mutex_lock(&qsee_bw_mutex);
+	if (mode == 0) {
+		if (qseecom.cumulative_mode > MEDIUM)
+			request_mode = HIGH;
+		else
+			request_mode = qseecom.cumulative_mode;
+	} else {
+		request_mode = mode;
+	}
+
+	ret = __qseecom_set_msm_bus_request(request_mode);
+	if (ret) {
+		pr_err("set msm bus request failed (%d),request_mode (%d)\n",
+			ret, request_mode);
+		goto err_scale_timer;
+	}
+
+	if (qseecom.timer_running) {
+		ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
+		if (ret) {
+			pr_err("Failed to decrease clk ref count.\n");
+			goto err_scale_timer;
+		}
+		del_timer_sync(&(qseecom.bw_scale_down_timer));
+		qseecom.timer_running = false;
+	}
+err_scale_timer:
+	mutex_unlock(&qsee_bw_mutex);
+	return ret;
+}
+
+
+static int qseecom_unregister_bus_bandwidth_needs(
+					struct qseecom_dev_handle *data)
+{
+	int32_t ret = 0;
+
+	qseecom.cumulative_mode -= data->mode;
+	data->mode = INACTIVE;
+
+	return ret;
+}
+
+static int __qseecom_register_bus_bandwidth_needs(
+			struct qseecom_dev_handle *data, uint32_t request_mode)
+{
+	int32_t ret = 0;
+
+	if (data->mode == INACTIVE) {
+		qseecom.cumulative_mode += request_mode;
+		data->mode = request_mode;
+	} else {
+		if (data->mode != request_mode) {
+			qseecom.cumulative_mode -= data->mode;
+			qseecom.cumulative_mode += request_mode;
+			data->mode = request_mode;
+		}
+	}
+	return ret;
+}
+
+static int qseecom_perf_enable(struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+	ret = qsee_vote_for_clock(data, CLK_DFAB);
+	if (ret) {
+		pr_err("Failed to vote for DFAB clock with err %d\n", ret);
+		goto perf_enable_exit;
+	}
+	ret = qsee_vote_for_clock(data, CLK_SFPB);
+	if (ret) {
+		qsee_disable_clock_vote(data, CLK_DFAB);
+		pr_err("Failed to vote for SFPB clock with err %d\n", ret);
+		goto perf_enable_exit;
+	}
+
+perf_enable_exit:
+	return ret;
+}
+
+static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
+						void __user *argp)
+{
+	int32_t ret = 0;
+	int32_t req_mode;
+
+	if (qseecom.no_clock_support)
+		return 0;
+
+	ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	if (req_mode > HIGH) {
+		pr_err("Invalid bandwidth mode (%d)\n", req_mode);
+		return -EINVAL;
+	}
+
+	/*
+	* Register bus bandwidth needs if bus scaling feature is enabled;
+	* otherwise, qseecom enable/disable clocks for the client directly.
+	*/
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
+		mutex_unlock(&qsee_bw_mutex);
+	} else {
+		pr_debug("Bus scaling feature is NOT enabled\n");
+		pr_debug("request bandwidth mode %d for the client\n",
+				req_mode);
+		if (req_mode != INACTIVE) {
+			ret = qseecom_perf_enable(data);
+			if (ret)
+				pr_err("Failed to vote for clock with err %d\n",
+						ret);
+		} else {
+			qsee_disable_clock_vote(data, CLK_DFAB);
+			qsee_disable_clock_vote(data, CLK_SFPB);
+		}
+	}
+	return ret;
+}
+
+static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
+{
+	if (qseecom.no_clock_support)
+		return;
+
+	mutex_lock(&qsee_bw_mutex);
+	qseecom.bw_scale_down_timer.expires = jiffies +
+		msecs_to_jiffies(duration);
+	mod_timer(&(qseecom.bw_scale_down_timer),
+		qseecom.bw_scale_down_timer.expires);
+	qseecom.timer_running = true;
+	mutex_unlock(&qsee_bw_mutex);
+}
+
+static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
+{
+	if (!qseecom.support_bus_scaling)
+		qsee_disable_clock_vote(data, CLK_SFPB);
+	else
+		__qseecom_add_bw_scale_down_timer(
+			QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
+	return;
+}
+
+static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+	if (qseecom.support_bus_scaling) {
+		ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
+		if (ret)
+			pr_err("Failed to set bw MEDIUM.\n");
+	} else {
+		ret = qsee_vote_for_clock(data, CLK_SFPB);
+		if (ret)
+			pr_err("Fail vote for clk SFPB ret %d\n", ret);
+	}
+	return ret;
+}
+
+static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
+						void __user *argp)
+{
+	ion_phys_addr_t pa;
+	int32_t ret;
+	struct qseecom_set_sb_mem_param_req req;
+	size_t len;
+
+	/* Copy the relevant information needed for loading the image */
+	if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
+		return -EFAULT;
+
+	if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
+					(req.sb_len == 0)) {
+		pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
+			req.ifd_data_fd, req.sb_len, req.virt_sb_base);
+		return -EFAULT;
+	}
+	if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
+			req.sb_len))
+		return -EFAULT;
+
+	/* Get the handle of the shared fd */
+	data->client.ihandle = ion_import_dma_buf(qseecom.ion_clnt,
+						req.ifd_data_fd);
+	if (IS_ERR_OR_NULL(data->client.ihandle)) {
+		pr_err("Ion client could not retrieve the handle\n");
+		return -ENOMEM;
+	}
+	/* Get the physical address of the ION BUF */
+	ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
+	if (ret) {
+
+		pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
+			ret);
+		return ret;
+	}
+
+	if (len < req.sb_len) {
+		pr_err("Requested length (0x%x) is > allocated (0x%zu)\n",
+			req.sb_len, len);
+		return -EINVAL;
+	}
+	/* Populate the structure for sending scm call to load image */
+	data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
+							data->client.ihandle);
+	if (IS_ERR_OR_NULL(data->client.sb_virt)) {
+		pr_err("ION memory mapping for client shared buf failed\n");
+		return -ENOMEM;
+	}
+	data->client.sb_phys = (phys_addr_t)pa;
+	data->client.sb_length = req.sb_len;
+	data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
+	return 0;
+}
+
+static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data)
+{
+	int ret;
+	ret = (qseecom.send_resp_flag != 0);
+	return ret || data->abort;
+}
+
+static int __qseecom_reentrancy_listener_has_sent_rsp(
+			struct qseecom_dev_handle *data,
+			struct qseecom_registered_listener_list *ptr_svc)
+{
+	int ret;
+
+	ret = (ptr_svc->send_resp_flag != 0);
+	return ret || data->abort;
+}
+
+static int __qseecom_qseos_fail_return_resp_tz(struct qseecom_dev_handle *data,
+					struct qseecom_command_scm_resp *resp,
+			struct qseecom_client_listener_data_irsp *send_data_rsp,
+			struct qseecom_registered_listener_list *ptr_svc,
+							uint32_t lstnr) {
+	int ret = 0;
+
+	send_data_rsp->status = QSEOS_RESULT_FAILURE;
+	qseecom.send_resp_flag = 0;
+	send_data_rsp->qsee_cmd_id = QSEOS_LISTENER_DATA_RSP_COMMAND;
+	send_data_rsp->listener_id = lstnr;
+	if (ptr_svc)
+		pr_warn("listener_id:%x, lstnr: %x\n",
+					ptr_svc->svc.listener_id, lstnr);
+	if (ptr_svc && ptr_svc->ihandle) {
+		ret = msm_ion_do_cache_op(qseecom.ion_clnt, ptr_svc->ihandle,
+					ptr_svc->sb_virt, ptr_svc->sb_length,
+					ION_IOC_CLEAN_INV_CACHES);
+		if (ret) {
+			pr_err("cache operation failed %d\n", ret);
+			return ret;
+		}
+	}
+
+	if (lstnr == RPMB_SERVICE) {
+		ret = __qseecom_enable_clk(CLK_QSEE);
+		if (ret)
+			return ret;
+	}
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, send_data_rsp,
+				sizeof(send_data_rsp), resp, sizeof(*resp));
+	if (ret) {
+		pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+						ret, data->client.app_id);
+		if (lstnr == RPMB_SERVICE)
+			__qseecom_disable_clk(CLK_QSEE);
+		return ret;
+	}
+	if ((resp->result != QSEOS_RESULT_SUCCESS) &&
+			(resp->result != QSEOS_RESULT_INCOMPLETE)) {
+		pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
+				resp->result, data->client.app_id, lstnr);
+		ret = -EINVAL;
+	}
+	if (lstnr == RPMB_SERVICE)
+		__qseecom_disable_clk(CLK_QSEE);
+	return ret;
+}
+
+static void __qseecom_clean_listener_sglistinfo(
+			struct qseecom_registered_listener_list *ptr_svc)
+{
+	if (ptr_svc->sglist_cnt) {
+		memset(ptr_svc->sglistinfo_ptr, 0,
+			SGLISTINFO_TABLE_SIZE);
+		ptr_svc->sglist_cnt = 0;
+	}
+}
+
+static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
+					struct qseecom_command_scm_resp *resp)
+{
+	int ret = 0;
+	int rc = 0;
+	uint32_t lstnr;
+	unsigned long flags;
+	struct qseecom_client_listener_data_irsp send_data_rsp;
+	struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
+	struct qseecom_registered_listener_list *ptr_svc = NULL;
+	sigset_t new_sigset;
+	sigset_t old_sigset;
+	uint32_t status;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	struct sglist_info *table = NULL;
+
+	while (resp->result == QSEOS_RESULT_INCOMPLETE) {
+		lstnr = resp->data;
+		/*
+		 * Wake up blocking lsitener service with the lstnr id
+		 */
+		spin_lock_irqsave(&qseecom.registered_listener_list_lock,
+					flags);
+		list_for_each_entry(ptr_svc,
+				&qseecom.registered_listener_list_head, list) {
+			if (ptr_svc->svc.listener_id == lstnr) {
+				ptr_svc->listener_in_use = true;
+				ptr_svc->rcv_req_flag = 1;
+				wake_up_interruptible(&ptr_svc->rcv_req_wq);
+				break;
+			}
+		}
+		spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
+				flags);
+
+		if (ptr_svc == NULL) {
+			pr_err("Listener Svc %d does not exist\n", lstnr);
+			__qseecom_qseos_fail_return_resp_tz(data, resp,
+					&send_data_rsp, ptr_svc, lstnr);
+			return -EINVAL;
+		}
+
+		if (!ptr_svc->ihandle) {
+			pr_err("Client handle is not initialized\n");
+			__qseecom_qseos_fail_return_resp_tz(data, resp,
+					&send_data_rsp, ptr_svc, lstnr);
+			return -EINVAL;
+		}
+
+		if (ptr_svc->svc.listener_id != lstnr) {
+			pr_warn("Service requested does not exist\n");
+			__qseecom_qseos_fail_return_resp_tz(data, resp,
+					&send_data_rsp, NULL, lstnr);
+			return -ERESTARTSYS;
+		}
+		pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
+
+		/* initialize the new signal mask with all signals*/
+		sigfillset(&new_sigset);
+		/* block all signals */
+		sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
+
+		do {
+			/*
+			 * When reentrancy is not supported, check global
+			 * send_resp_flag; otherwise, check this listener's
+			 * send_resp_flag.
+			 */
+			if (!qseecom.qsee_reentrancy_support &&
+				!wait_event_freezable(qseecom.send_resp_wq,
+				__qseecom_listener_has_sent_rsp(data))) {
+					break;
+			}
+
+			if (qseecom.qsee_reentrancy_support &&
+				!wait_event_freezable(qseecom.send_resp_wq,
+				__qseecom_reentrancy_listener_has_sent_rsp(
+						data, ptr_svc))) {
+					break;
+			}
+		} while (1);
+
+		/* restore signal mask */
+		sigprocmask(SIG_SETMASK, &old_sigset, NULL);
+		if (data->abort) {
+			pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
+				data->client.app_id, lstnr, ret);
+			rc = -ENODEV;
+			status = QSEOS_RESULT_FAILURE;
+		} else {
+			status = QSEOS_RESULT_SUCCESS;
+		}
+
+		qseecom.send_resp_flag = 0;
+		ptr_svc->send_resp_flag = 0;
+		table = ptr_svc->sglistinfo_ptr;
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			send_data_rsp.listener_id  = lstnr;
+			send_data_rsp.status = status;
+			send_data_rsp.sglistinfo_ptr =
+				(uint32_t)virt_to_phys(table);
+			send_data_rsp.sglistinfo_len =
+				SGLISTINFO_TABLE_SIZE;
+			dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+			cmd_buf = (void *)&send_data_rsp;
+			cmd_len = sizeof(send_data_rsp);
+		} else {
+			send_data_rsp_64bit.listener_id  = lstnr;
+			send_data_rsp_64bit.status = status;
+			send_data_rsp_64bit.sglistinfo_ptr =
+				virt_to_phys(table);
+			send_data_rsp_64bit.sglistinfo_len =
+				SGLISTINFO_TABLE_SIZE;
+			dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+			cmd_buf = (void *)&send_data_rsp_64bit;
+			cmd_len = sizeof(send_data_rsp_64bit);
+		}
+		if (qseecom.whitelist_support == false)
+			*(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
+		else
+			*(uint32_t *)cmd_buf =
+				QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
+		if (ptr_svc) {
+			ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+					ptr_svc->ihandle,
+					ptr_svc->sb_virt, ptr_svc->sb_length,
+					ION_IOC_CLEAN_INV_CACHES);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				return ret;
+			}
+		}
+
+		if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
+			ret = __qseecom_enable_clk(CLK_QSEE);
+			if (ret)
+				return ret;
+		}
+
+		ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+					cmd_buf, cmd_len, resp, sizeof(*resp));
+		ptr_svc->listener_in_use = false;
+		__qseecom_clean_listener_sglistinfo(ptr_svc);
+		if (ret) {
+			pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+				ret, data->client.app_id);
+			if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
+				__qseecom_disable_clk(CLK_QSEE);
+			return ret;
+		}
+		if ((resp->result != QSEOS_RESULT_SUCCESS) &&
+			(resp->result != QSEOS_RESULT_INCOMPLETE)) {
+			pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
+				resp->result, data->client.app_id, lstnr);
+			ret = -EINVAL;
+		}
+		if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
+			__qseecom_disable_clk(CLK_QSEE);
+
+	}
+	if (rc)
+		return rc;
+
+	return ret;
+}
+
+static int __qseecom_process_blocked_on_listener_legacy(
+				struct qseecom_command_scm_resp *resp,
+				struct qseecom_registered_app_list *ptr_app,
+				struct qseecom_dev_handle *data)
+{
+	struct qseecom_registered_listener_list *list_ptr;
+	int ret = 0;
+	struct qseecom_continue_blocked_request_ireq ireq;
+	struct qseecom_command_scm_resp continue_resp;
+	bool found_app = false;
+	unsigned long flags;
+
+	if (!resp || !data) {
+		pr_err("invalid resp or data pointer\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	/* find app_id & img_name from list */
+	if (!ptr_app) {
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+							list) {
+			if ((ptr_app->app_id == data->client.app_id) &&
+				(!strcmp(ptr_app->app_name,
+						data->client.app_name))) {
+				found_app = true;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+					flags);
+		if (!found_app) {
+			pr_err("app_id %d (%s) is not found\n",
+				data->client.app_id,
+				(char *)data->client.app_name);
+			ret = -ENOENT;
+			goto exit;
+		}
+	}
+
+	list_ptr = __qseecom_find_svc(resp->data);
+	if (!list_ptr) {
+		pr_err("Invalid listener ID\n");
+		ret = -ENODATA;
+		goto exit;
+	}
+	pr_debug("lsntr %d in_use = %d\n",
+			resp->data, list_ptr->listener_in_use);
+	ptr_app->blocked_on_listener_id = resp->data;
+
+	/* sleep until listener is available */
+	do {
+		qseecom.app_block_ref_cnt++;
+		ptr_app->app_blocked = true;
+		mutex_unlock(&app_access_lock);
+		if (wait_event_freezable(
+			list_ptr->listener_block_app_wq,
+			!list_ptr->listener_in_use)) {
+			pr_err("Interrupted: listener_id %d, app_id %d\n",
+				resp->data, ptr_app->app_id);
+			ret = -ERESTARTSYS;
+			goto exit;
+		}
+		mutex_lock(&app_access_lock);
+		ptr_app->app_blocked = false;
+		qseecom.app_block_ref_cnt--;
+	}  while (list_ptr->listener_in_use);
+
+	ptr_app->blocked_on_listener_id = 0;
+	/* notify the blocked app that listener is available */
+	pr_warn("Lsntr %d is available, unblock app(%d) %s in TZ\n",
+		resp->data, data->client.app_id,
+		data->client.app_name);
+	ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
+	ireq.app_or_session_id = data->client.app_id;
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+			&ireq, sizeof(ireq),
+			&continue_resp, sizeof(continue_resp));
+	if (ret) {
+		pr_err("scm_call for continue blocked req for app(%d) %s failed, ret %d\n",
+			data->client.app_id,
+			data->client.app_name, ret);
+		goto exit;
+	}
+	/*
+	 * After TZ app is unblocked, then continue to next case
+	 * for incomplete request processing
+	 */
+	resp->result = QSEOS_RESULT_INCOMPLETE;
+exit:
+	return ret;
+}
+
+static int __qseecom_process_blocked_on_listener_smcinvoke(
+			struct qseecom_command_scm_resp *resp, uint32_t app_id)
+{
+	struct qseecom_registered_listener_list *list_ptr;
+	int ret = 0;
+	struct qseecom_continue_blocked_request_ireq ireq;
+	struct qseecom_command_scm_resp continue_resp;
+	unsigned int session_id;
+
+	if (!resp) {
+		pr_err("invalid resp pointer\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+	session_id = resp->resp_type;
+	list_ptr = __qseecom_find_svc(resp->data);
+	if (!list_ptr) {
+		pr_err("Invalid listener ID\n");
+		ret = -ENODATA;
+		goto exit;
+	}
+	pr_debug("lsntr %d in_use = %d\n",
+			resp->data, list_ptr->listener_in_use);
+	/* sleep until listener is available */
+	do {
+		qseecom.app_block_ref_cnt++;
+		mutex_unlock(&app_access_lock);
+		if (wait_event_freezable(
+			list_ptr->listener_block_app_wq,
+			!list_ptr->listener_in_use)) {
+			pr_err("Interrupted: listener_id %d, session_id %d\n",
+				resp->data, session_id);
+			ret = -ERESTARTSYS;
+			goto exit;
+		}
+		mutex_lock(&app_access_lock);
+		qseecom.app_block_ref_cnt--;
+	}  while (list_ptr->listener_in_use);
+
+	/* notify TZ that listener is available */
+	pr_warn("Lsntr %d is available, unblock session(%d) in TZ\n",
+			resp->data, session_id);
+	ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
+	ireq.app_or_session_id = session_id;
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+			&ireq, sizeof(ireq),
+			&continue_resp, sizeof(continue_resp));
+	if (ret) {
+		/* retry with legacy cmd */
+		qseecom.smcinvoke_support = false;
+		ireq.app_or_session_id = app_id;
+		ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+			&ireq, sizeof(ireq),
+			&continue_resp, sizeof(continue_resp));
+		qseecom.smcinvoke_support = true;
+		if (ret) {
+			pr_err("cont block req for app %d or session %d fail\n",
+				app_id, session_id);
+			goto exit;
+		}
+	}
+	resp->result = QSEOS_RESULT_INCOMPLETE;
+exit:
+	return ret;
+}
+
+static int __qseecom_process_reentrancy_blocked_on_listener(
+				struct qseecom_command_scm_resp *resp,
+				struct qseecom_registered_app_list *ptr_app,
+				struct qseecom_dev_handle *data)
+{
+	if (!qseecom.smcinvoke_support)
+		return __qseecom_process_blocked_on_listener_legacy(
+			resp, ptr_app, data);
+	else
+		return __qseecom_process_blocked_on_listener_smcinvoke(
+			resp, data->client.app_id);
+}
+static int __qseecom_reentrancy_process_incomplete_cmd(
+					struct qseecom_dev_handle *data,
+					struct qseecom_command_scm_resp *resp)
+{
+	int ret = 0;
+	int rc = 0;
+	uint32_t lstnr;
+	unsigned long flags;
+	struct qseecom_client_listener_data_irsp send_data_rsp;
+	struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
+	struct qseecom_registered_listener_list *ptr_svc = NULL;
+	sigset_t new_sigset;
+	sigset_t old_sigset;
+	uint32_t status;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	struct sglist_info *table = NULL;
+
+	while (ret == 0 && rc == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
+		lstnr = resp->data;
+		/*
+		 * Wake up blocking lsitener service with the lstnr id
+		 */
+		spin_lock_irqsave(&qseecom.registered_listener_list_lock,
+					flags);
+		list_for_each_entry(ptr_svc,
+				&qseecom.registered_listener_list_head, list) {
+			if (ptr_svc->svc.listener_id == lstnr) {
+				ptr_svc->listener_in_use = true;
+				ptr_svc->rcv_req_flag = 1;
+				wake_up_interruptible(&ptr_svc->rcv_req_wq);
+				break;
+			}
+		}
+		spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
+				flags);
+
+		if (ptr_svc == NULL) {
+			pr_err("Listener Svc %d does not exist\n", lstnr);
+			return -EINVAL;
+		}
+
+		if (!ptr_svc->ihandle) {
+			pr_err("Client handle is not initialized\n");
+			return -EINVAL;
+		}
+
+		if (ptr_svc->svc.listener_id != lstnr) {
+			pr_warn("Service requested does not exist\n");
+			return -ERESTARTSYS;
+		}
+		pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
+
+		/* initialize the new signal mask with all signals*/
+		sigfillset(&new_sigset);
+
+		/* block all signals */
+		sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
+
+		/* unlock mutex btw waking listener and sleep-wait */
+		mutex_unlock(&app_access_lock);
+		do {
+			if (!wait_event_freezable(qseecom.send_resp_wq,
+				__qseecom_reentrancy_listener_has_sent_rsp(
+						data, ptr_svc))) {
+					break;
+			}
+		} while (1);
+		/* lock mutex again after resp sent */
+		mutex_lock(&app_access_lock);
+		ptr_svc->send_resp_flag = 0;
+		qseecom.send_resp_flag = 0;
+
+		/* restore signal mask */
+		sigprocmask(SIG_SETMASK, &old_sigset, NULL);
+		if (data->abort) {
+			pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
+				data->client.app_id, lstnr, ret);
+			rc = -ENODEV;
+			status  = QSEOS_RESULT_FAILURE;
+		} else {
+			status  = QSEOS_RESULT_SUCCESS;
+		}
+		table = ptr_svc->sglistinfo_ptr;
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			send_data_rsp.listener_id  = lstnr;
+			send_data_rsp.status = status;
+			send_data_rsp.sglistinfo_ptr =
+				(uint32_t)virt_to_phys(table);
+			send_data_rsp.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+			dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+			cmd_buf = (void *)&send_data_rsp;
+			cmd_len = sizeof(send_data_rsp);
+		} else {
+			send_data_rsp_64bit.listener_id  = lstnr;
+			send_data_rsp_64bit.status = status;
+			send_data_rsp_64bit.sglistinfo_ptr =
+				virt_to_phys(table);
+			send_data_rsp_64bit.sglistinfo_len =
+				SGLISTINFO_TABLE_SIZE;
+			dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+			cmd_buf = (void *)&send_data_rsp_64bit;
+			cmd_len = sizeof(send_data_rsp_64bit);
+		}
+		if (qseecom.whitelist_support == false)
+			*(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
+		else
+			*(uint32_t *)cmd_buf =
+				QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
+		if (ptr_svc) {
+			ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+					ptr_svc->ihandle,
+					ptr_svc->sb_virt, ptr_svc->sb_length,
+					ION_IOC_CLEAN_INV_CACHES);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				return ret;
+			}
+		}
+		if (lstnr == RPMB_SERVICE) {
+			ret = __qseecom_enable_clk(CLK_QSEE);
+			if (ret)
+				return ret;
+		}
+
+		ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+					cmd_buf, cmd_len, resp, sizeof(*resp));
+		ptr_svc->listener_in_use = false;
+		__qseecom_clean_listener_sglistinfo(ptr_svc);
+		wake_up_interruptible(&ptr_svc->listener_block_app_wq);
+
+		if (ret) {
+			pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+				ret, data->client.app_id);
+			goto exit;
+		}
+
+		switch (resp->result) {
+		case QSEOS_RESULT_BLOCKED_ON_LISTENER:
+			pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
+					lstnr, data->client.app_id, resp->data);
+			if (lstnr == resp->data) {
+				pr_err("lstnr %d should not be blocked!\n",
+					lstnr);
+				ret = -EINVAL;
+				goto exit;
+			}
+			ret = __qseecom_process_reentrancy_blocked_on_listener(
+					resp, NULL, data);
+			if (ret) {
+				pr_err("failed to process App(%d) %s blocked on listener %d\n",
+					data->client.app_id,
+					data->client.app_name, resp->data);
+				goto exit;
+			}
+		case QSEOS_RESULT_SUCCESS:
+		case QSEOS_RESULT_INCOMPLETE:
+			break;
+		default:
+			pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
+				resp->result, data->client.app_id, lstnr);
+			ret = -EINVAL;
+			goto exit;
+		}
+exit:
+		if (lstnr == RPMB_SERVICE)
+			__qseecom_disable_clk(CLK_QSEE);
+
+	}
+	if (rc)
+		return rc;
+
+	return ret;
+}
+
+/*
+ * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
+ * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
+ * So, needs to first check if no app blocked before sending OS level scm call,
+ * then wait until all apps are unblocked.
+ */
+static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
+{
+	sigset_t new_sigset, old_sigset;
+
+	if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
+		qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
+		IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
+		/* thread sleep until this app unblocked */
+		while (qseecom.app_block_ref_cnt > 0) {
+			sigfillset(&new_sigset);
+			sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
+			mutex_unlock(&app_access_lock);
+			do {
+				if (!wait_event_freezable(qseecom.app_block_wq,
+					(qseecom.app_block_ref_cnt == 0)))
+					break;
+			} while (1);
+			mutex_lock(&app_access_lock);
+			sigprocmask(SIG_SETMASK, &old_sigset, NULL);
+		}
+	}
+}
+
+/*
+ * scm_call of send data will fail if this TA is blocked or there are more
+ * than one TA requesting listener services; So, first check to see if need
+ * to wait.
+ */
+static void __qseecom_reentrancy_check_if_this_app_blocked(
+			struct qseecom_registered_app_list *ptr_app)
+{
+	sigset_t new_sigset, old_sigset;
+	if (qseecom.qsee_reentrancy_support) {
+		while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
+			/* thread sleep until this app unblocked */
+			sigfillset(&new_sigset);
+			sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
+			mutex_unlock(&app_access_lock);
+			do {
+				if (!wait_event_freezable(qseecom.app_block_wq,
+					(!ptr_app->app_blocked &&
+					qseecom.app_block_ref_cnt <= 1)))
+					break;
+			} while (1);
+			mutex_lock(&app_access_lock);
+			sigprocmask(SIG_SETMASK, &old_sigset, NULL);
+		}
+	}
+}
+
+static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
+					uint32_t *app_id)
+{
+	int32_t ret;
+	struct qseecom_command_scm_resp resp;
+	bool found_app = false;
+	struct qseecom_registered_app_list *entry = NULL;
+	unsigned long flags = 0;
+
+	if (!app_id) {
+		pr_err("Null pointer to app_id\n");
+		return -EINVAL;
+	}
+	*app_id = 0;
+
+	/* check if app exists and has been registered locally */
+	spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+	list_for_each_entry(entry,
+			&qseecom.registered_app_list_head, list) {
+		if (!strcmp(entry->app_name, req.app_name)) {
+			found_app = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
+	if (found_app) {
+		pr_debug("Found app with id %d\n", entry->app_id);
+		*app_id = entry->app_id;
+		return 0;
+	}
+
+	memset((void *)&resp, 0, sizeof(resp));
+
+	/*  SCM_CALL  to check if app_id for the mentioned app exists */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
+				sizeof(struct qseecom_check_app_ireq),
+				&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to check if app is already loaded failed\n");
+		return -EINVAL;
+	}
+
+	if (resp.result == QSEOS_RESULT_FAILURE)
+		return 0;
+
+	switch (resp.resp_type) {
+	/*qsee returned listener type response */
+	case QSEOS_LISTENER_ID:
+		pr_err("resp type is of listener type instead of app");
+		return -EINVAL;
+	case QSEOS_APP_ID:
+		*app_id = resp.data;
+		return 0;
+	default:
+		pr_err("invalid resp type (%d) from qsee",
+				resp.resp_type);
+		return -ENODEV;
+	}
+}
+
+static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
+{
+	struct qseecom_registered_app_list *entry = NULL;
+	unsigned long flags = 0;
+	u32 app_id = 0;
+	struct ion_handle *ihandle;	/* Ion handle */
+	struct qseecom_load_img_req load_img_req;
+	int32_t ret = 0;
+	ion_phys_addr_t pa = 0;
+	size_t len;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_check_app_ireq req;
+	struct qseecom_load_app_ireq load_req;
+	struct qseecom_load_app_64bit_ireq load_req_64bit;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	bool first_time = false;
+
+	/* Copy the relevant information needed for loading the image */
+	if (copy_from_user(&load_img_req,
+				(void __user *)argp,
+				sizeof(struct qseecom_load_img_req))) {
+		pr_err("copy_from_user failed\n");
+		return -EFAULT;
+	}
+
+	/* Check and load cmnlib */
+	if (qseecom.qsee_version > QSEEE_VERSION_00) {
+		if (!qseecom.commonlib_loaded &&
+				load_img_req.app_arch == ELFCLASS32) {
+			ret = qseecom_load_commonlib_image(data, "cmnlib");
+			if (ret) {
+				pr_err("failed to load cmnlib\n");
+				return -EIO;
+			}
+			qseecom.commonlib_loaded = true;
+			pr_debug("cmnlib is loaded\n");
+		}
+
+		if (!qseecom.commonlib64_loaded &&
+				load_img_req.app_arch == ELFCLASS64) {
+			ret = qseecom_load_commonlib_image(data, "cmnlib64");
+			if (ret) {
+				pr_err("failed to load cmnlib64\n");
+				return -EIO;
+			}
+			qseecom.commonlib64_loaded = true;
+			pr_debug("cmnlib64 is loaded\n");
+		}
+	}
+
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
+		mutex_unlock(&qsee_bw_mutex);
+		if (ret)
+			return ret;
+	}
+
+	/* Vote for the SFPB clock */
+	ret = __qseecom_enable_clk_scale_up(data);
+	if (ret)
+		goto enable_clk_err;
+
+	req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
+	load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
+	strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
+
+	ret = __qseecom_check_app_exists(req, &app_id);
+	if (ret < 0)
+		goto loadapp_err;
+
+	if (app_id) {
+		pr_debug("App id %d (%s) already exists\n", app_id,
+			(char *)(req.app_name));
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_for_each_entry(entry,
+		&qseecom.registered_app_list_head, list){
+			if (entry->app_id == app_id) {
+				entry->ref_cnt++;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(
+		&qseecom.registered_app_list_lock, flags);
+		ret = 0;
+	} else {
+		first_time = true;
+		pr_warn("App (%s) does'nt exist, loading apps for first time\n",
+			(char *)(load_img_req.img_name));
+		/* Get the handle of the shared fd */
+		ihandle = ion_import_dma_buf(qseecom.ion_clnt,
+					load_img_req.ifd_data_fd);
+		if (IS_ERR_OR_NULL(ihandle)) {
+			pr_err("Ion client could not retrieve the handle\n");
+			ret = -ENOMEM;
+			goto loadapp_err;
+		}
+
+		/* Get the physical address of the ION BUF */
+		ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
+		if (ret) {
+			pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
+				ret);
+			goto loadapp_err;
+		}
+		if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
+			pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
+					len, load_img_req.mdt_len,
+					load_img_req.img_len);
+			ret = -EINVAL;
+			goto loadapp_err;
+		}
+		/* Populate the structure for sending scm call to load image */
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
+			load_req.mdt_len = load_img_req.mdt_len;
+			load_req.img_len = load_img_req.img_len;
+			strlcpy(load_req.app_name, load_img_req.img_name,
+						MAX_APP_NAME_SIZE);
+			load_req.phy_addr = (uint32_t)pa;
+			cmd_buf = (void *)&load_req;
+			cmd_len = sizeof(struct qseecom_load_app_ireq);
+		} else {
+			load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
+			load_req_64bit.mdt_len = load_img_req.mdt_len;
+			load_req_64bit.img_len = load_img_req.img_len;
+			strlcpy(load_req_64bit.app_name, load_img_req.img_name,
+						MAX_APP_NAME_SIZE);
+			load_req_64bit.phy_addr = (uint64_t)pa;
+			cmd_buf = (void *)&load_req_64bit;
+			cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
+		}
+
+		ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
+					ION_IOC_CLEAN_INV_CACHES);
+		if (ret) {
+			pr_err("cache operation failed %d\n", ret);
+			goto loadapp_err;
+		}
+
+		/*  SCM_CALL  to load the app and get the app_id back */
+		ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
+			cmd_len, &resp, sizeof(resp));
+		if (ret) {
+			pr_err("scm_call to load app failed\n");
+			if (!IS_ERR_OR_NULL(ihandle))
+				ion_free(qseecom.ion_clnt, ihandle);
+			ret = -EINVAL;
+			goto loadapp_err;
+		}
+
+		if (resp.result == QSEOS_RESULT_FAILURE) {
+			pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
+			if (!IS_ERR_OR_NULL(ihandle))
+				ion_free(qseecom.ion_clnt, ihandle);
+			ret = -EFAULT;
+			goto loadapp_err;
+		}
+
+		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+			ret = __qseecom_process_incomplete_cmd(data, &resp);
+			if (ret) {
+				pr_err("process_incomplete_cmd failed err: %d\n",
+					ret);
+				if (!IS_ERR_OR_NULL(ihandle))
+					ion_free(qseecom.ion_clnt, ihandle);
+				ret = -EFAULT;
+				goto loadapp_err;
+			}
+		}
+
+		if (resp.result != QSEOS_RESULT_SUCCESS) {
+			pr_err("scm_call failed resp.result unknown, %d\n",
+				resp.result);
+			if (!IS_ERR_OR_NULL(ihandle))
+				ion_free(qseecom.ion_clnt, ihandle);
+			ret = -EFAULT;
+			goto loadapp_err;
+		}
+
+		app_id = resp.data;
+
+		entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+		if (!entry) {
+			pr_err("kmalloc failed\n");
+			ret = -ENOMEM;
+			goto loadapp_err;
+		}
+		entry->app_id = app_id;
+		entry->ref_cnt = 1;
+		entry->app_arch = load_img_req.app_arch;
+		/*
+		* keymaster app may be first loaded as "keymaste" by qseecomd,
+		* and then used as "keymaster" on some targets. To avoid app
+		* name checking error, register "keymaster" into app_list and
+		* thread private data.
+		*/
+		if (!strcmp(load_img_req.img_name, "keymaste"))
+			strlcpy(entry->app_name, "keymaster",
+					MAX_APP_NAME_SIZE);
+		else
+			strlcpy(entry->app_name, load_img_req.img_name,
+					MAX_APP_NAME_SIZE);
+		entry->app_blocked = false;
+		entry->blocked_on_listener_id = 0;
+
+		/* Deallocate the handle */
+		if (!IS_ERR_OR_NULL(ihandle))
+			ion_free(qseecom.ion_clnt, ihandle);
+
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_add_tail(&entry->list, &qseecom.registered_app_list_head);
+		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+									flags);
+
+		pr_warn("App with id %d (%s) now loaded\n", app_id,
+		(char *)(load_img_req.img_name));
+	}
+	data->client.app_id = app_id;
+	data->client.app_arch = load_img_req.app_arch;
+	if (!strcmp(load_img_req.img_name, "keymaste"))
+		strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
+	else
+		strlcpy(data->client.app_name, load_img_req.img_name,
+					MAX_APP_NAME_SIZE);
+	load_img_req.app_id = app_id;
+	if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
+		pr_err("copy_to_user failed\n");
+		ret = -EFAULT;
+		if (first_time == true) {
+			spin_lock_irqsave(
+				&qseecom.registered_app_list_lock, flags);
+			list_del(&entry->list);
+			spin_unlock_irqrestore(
+				&qseecom.registered_app_list_lock, flags);
+			kzfree(entry);
+		}
+	}
+
+loadapp_err:
+	__qseecom_disable_clk_scale_down(data);
+enable_clk_err:
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		qseecom_unregister_bus_bandwidth_needs(data);
+		mutex_unlock(&qsee_bw_mutex);
+	}
+	return ret;
+}
+
+static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
+{
+	int ret = 1;	/* Set unload app */
+	wake_up_all(&qseecom.send_resp_wq);
+	if (qseecom.qsee_reentrancy_support)
+		mutex_unlock(&app_access_lock);
+	while (atomic_read(&data->ioctl_count) > 1) {
+		if (wait_event_freezable(data->abort_wq,
+					atomic_read(&data->ioctl_count) <= 1)) {
+			pr_err("Interrupted from abort\n");
+			ret = -ERESTARTSYS;
+			break;
+		}
+	}
+	if (qseecom.qsee_reentrancy_support)
+		mutex_lock(&app_access_lock);
+	return ret;
+}
+
+static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+	if (!IS_ERR_OR_NULL(data->client.ihandle)) {
+		ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
+		ion_free(qseecom.ion_clnt, data->client.ihandle);
+		data->client.ihandle = NULL;
+	}
+	return ret;
+}
+
+static int qseecom_unload_app(struct qseecom_dev_handle *data,
+				bool app_crash)
+{
+	unsigned long flags;
+	unsigned long flags1;
+	int ret = 0;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_registered_app_list *ptr_app = NULL;
+	bool unload = false;
+	bool found_app = false;
+	bool found_dead_app = false;
+
+	if (!data) {
+		pr_err("Invalid/uninitialized device handle\n");
+		return -EINVAL;
+	}
+
+	if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
+		pr_debug("Do not unload keymaster app from tz\n");
+		goto unload_exit;
+	}
+
+	__qseecom_cleanup_app(data);
+	__qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
+
+	if (data->client.app_id > 0) {
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+									list) {
+			if (ptr_app->app_id == data->client.app_id) {
+				if (!strcmp((void *)ptr_app->app_name,
+					(void *)data->client.app_name)) {
+					found_app = true;
+					if (ptr_app->app_blocked)
+						app_crash = false;
+					if (app_crash || ptr_app->ref_cnt == 1)
+						unload = true;
+					break;
+				} else {
+					found_dead_app = true;
+					break;
+				}
+			}
+		}
+		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+								flags);
+		if (found_app == false && found_dead_app == false) {
+			pr_err("Cannot find app with id = %d (%s)\n",
+				data->client.app_id,
+				(char *)data->client.app_name);
+			ret = -EINVAL;
+			goto unload_exit;
+		}
+	}
+
+	if (found_dead_app)
+		pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
+			(char *)data->client.app_name);
+
+	if (unload) {
+		struct qseecom_unload_app_ireq req;
+		/* Populate the structure for sending scm call to load image */
+		req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
+		req.app_id = data->client.app_id;
+
+		/* SCM_CALL to unload the app */
+		ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
+				sizeof(struct qseecom_unload_app_ireq),
+				&resp, sizeof(resp));
+		if (ret) {
+			pr_err("scm_call to unload app (id = %d) failed\n",
+								req.app_id);
+			ret = -EFAULT;
+			goto unload_exit;
+		} else {
+			pr_warn("App id %d now unloaded\n", req.app_id);
+		}
+		if (resp.result == QSEOS_RESULT_FAILURE) {
+			pr_err("app (%d) unload_failed!!\n",
+					data->client.app_id);
+			ret = -EFAULT;
+			goto unload_exit;
+		}
+		if (resp.result == QSEOS_RESULT_SUCCESS)
+			pr_debug("App (%d) is unloaded!!\n",
+					data->client.app_id);
+		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+			ret = __qseecom_process_incomplete_cmd(data, &resp);
+			if (ret) {
+				pr_err("process_incomplete_cmd fail err: %d\n",
+									ret);
+				goto unload_exit;
+			}
+		}
+	}
+
+	if (found_app) {
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
+		if (app_crash) {
+			ptr_app->ref_cnt = 0;
+			pr_debug("app_crash: ref_count = 0\n");
+		} else {
+			if (ptr_app->ref_cnt == 1) {
+				ptr_app->ref_cnt = 0;
+				pr_debug("ref_count set to 0\n");
+			} else {
+				ptr_app->ref_cnt--;
+				pr_debug("Can't unload app(%d) inuse\n",
+					ptr_app->app_id);
+			}
+		}
+		if (unload) {
+			list_del(&ptr_app->list);
+			kzfree(ptr_app);
+		}
+		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+								flags1);
+	}
+unload_exit:
+	qseecom_unmap_ion_allocated_memory(data);
+	data->released = true;
+	return ret;
+}
+
+static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
+						unsigned long virt)
+{
+	return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
+}
+
+static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
+						unsigned long virt)
+{
+	return (uintptr_t)data->client.sb_virt +
+				(virt - data->client.user_virt_sb_base);
+}
+
+int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
+		struct qseecom_send_svc_cmd_req *req_ptr,
+		struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
+{
+	int ret = 0;
+	void *req_buf = NULL;
+
+	if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
+		pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
+			req_ptr, send_svc_ireq_ptr);
+		return -EINVAL;
+	}
+
+	/* Clients need to ensure req_buf is at base offset of shared buffer */
+	if ((uintptr_t)req_ptr->cmd_req_buf !=
+			data_ptr->client.user_virt_sb_base) {
+		pr_err("cmd buf not pointing to base offset of shared buffer\n");
+		return -EINVAL;
+	}
+
+	if (data_ptr->client.sb_length <
+			sizeof(struct qseecom_rpmb_provision_key)) {
+		pr_err("shared buffer is too small to hold key type\n");
+		return -EINVAL;
+	}
+	req_buf = data_ptr->client.sb_virt;
+
+	send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
+	send_svc_ireq_ptr->key_type =
+		((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
+	send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
+	send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
+			data_ptr, (uintptr_t)req_ptr->resp_buf));
+	send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
+
+	return ret;
+}
+
+int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
+		struct qseecom_send_svc_cmd_req *req_ptr,
+		struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
+{
+	int ret = 0;
+	uint32_t reqd_len_sb_in = 0;
+
+	if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
+		pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
+			req_ptr, send_svc_ireq_ptr);
+		return -EINVAL;
+	}
+
+	reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
+	if (reqd_len_sb_in > data_ptr->client.sb_length) {
+		pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
+		pr_err("Required: %u, Available: %zu\n",
+				reqd_len_sb_in, data_ptr->client.sb_length);
+		return -ENOMEM;
+	}
+
+	send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
+	send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
+	send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
+			data_ptr, (uintptr_t)req_ptr->resp_buf));
+	send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
+
+	send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
+			data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
+
+
+	return ret;
+}
+
+static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
+				struct qseecom_send_svc_cmd_req *req)
+{
+	if (!req || !req->resp_buf || !req->cmd_req_buf) {
+		pr_err("req or cmd buffer or response buffer is null\n");
+		return -EINVAL;
+	}
+
+	if (!data || !data->client.ihandle) {
+		pr_err("Client or client handle is not initialized\n");
+		return -EINVAL;
+	}
+
+	if (data->client.sb_virt == NULL) {
+		pr_err("sb_virt null\n");
+		return -EINVAL;
+	}
+
+	if (data->client.user_virt_sb_base == 0) {
+		pr_err("user_virt_sb_base is null\n");
+		return -EINVAL;
+	}
+
+	if (data->client.sb_length == 0) {
+		pr_err("sb_length is 0\n");
+		return -EINVAL;
+	}
+
+	if (((uintptr_t)req->cmd_req_buf <
+				data->client.user_virt_sb_base) ||
+		((uintptr_t)req->cmd_req_buf >=
+		(data->client.user_virt_sb_base + data->client.sb_length))) {
+		pr_err("cmd buffer address not within shared bufffer\n");
+		return -EINVAL;
+	}
+	if (((uintptr_t)req->resp_buf <
+				data->client.user_virt_sb_base)  ||
+		((uintptr_t)req->resp_buf >=
+		(data->client.user_virt_sb_base + data->client.sb_length))) {
+		pr_err("response buffer address not within shared bufffer\n");
+		return -EINVAL;
+	}
+	if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
+		(req->cmd_req_len > data->client.sb_length) ||
+		(req->resp_len > data->client.sb_length)) {
+		pr_err("cmd buf length or response buf length not valid\n");
+		return -EINVAL;
+	}
+	if (req->cmd_req_len > UINT_MAX - req->resp_len) {
+		pr_err("Integer overflow detected in req_len & rsp_len\n");
+		return -EINVAL;
+	}
+
+	if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
+		pr_debug("Not enough memory to fit cmd_buf.\n");
+		pr_debug("resp_buf. Required: %u, Available: %zu\n",
+				(req->cmd_req_len + req->resp_len),
+					data->client.sb_length);
+		return -ENOMEM;
+	}
+	if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
+		pr_err("Integer overflow in req_len & cmd_req_buf\n");
+		return -EINVAL;
+	}
+	if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
+		pr_err("Integer overflow in resp_len & resp_buf\n");
+		return -EINVAL;
+	}
+	if (data->client.user_virt_sb_base >
+					(ULONG_MAX - data->client.sb_length)) {
+		pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
+		return -EINVAL;
+	}
+	if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
+		((uintptr_t)data->client.user_virt_sb_base +
+					data->client.sb_length)) ||
+		(((uintptr_t)req->resp_buf + req->resp_len) >
+		((uintptr_t)data->client.user_virt_sb_base +
+					data->client.sb_length))) {
+		pr_err("cmd buf or resp buf is out of shared buffer region\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	int ret = 0;
+	struct qseecom_client_send_service_ireq send_svc_ireq;
+	struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_send_svc_cmd_req req;
+	void   *send_req_ptr;
+	size_t req_buf_size;
+
+	/*struct qseecom_command_scm_resp resp;*/
+
+	if (copy_from_user(&req,
+				(void __user *)argp,
+				sizeof(req))) {
+		pr_err("copy_from_user failed\n");
+		return -EFAULT;
+	}
+
+	if (__validate_send_service_cmd_inputs(data, &req))
+		return -EINVAL;
+
+	data->type = QSEECOM_SECURE_SERVICE;
+
+	switch (req.cmd_id) {
+	case QSEOS_RPMB_PROVISION_KEY_COMMAND:
+	case QSEOS_RPMB_ERASE_COMMAND:
+	case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
+		send_req_ptr = &send_svc_ireq;
+		req_buf_size = sizeof(send_svc_ireq);
+		if (__qseecom_process_rpmb_svc_cmd(data, &req,
+				send_req_ptr))
+			return -EINVAL;
+		break;
+	case QSEOS_FSM_LTEOTA_REQ_CMD:
+	case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
+	case QSEOS_FSM_IKE_REQ_CMD:
+	case QSEOS_FSM_IKE_REQ_RSP_CMD:
+	case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
+	case QSEOS_FSM_OEM_FUSE_READ_ROW:
+	case QSEOS_FSM_ENCFS_REQ_CMD:
+	case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
+		send_req_ptr = &send_fsm_key_svc_ireq;
+		req_buf_size = sizeof(send_fsm_key_svc_ireq);
+		if (__qseecom_process_fsm_key_svc_cmd(data, &req,
+				send_req_ptr))
+			return -EINVAL;
+		break;
+	default:
+		pr_err("Unsupported cmd_id %d\n", req.cmd_id);
+		return -EINVAL;
+	}
+
+	if (qseecom.support_bus_scaling) {
+		ret = qseecom_scale_bus_bandwidth_timer(HIGH);
+		if (ret) {
+			pr_err("Fail to set bw HIGH\n");
+			return ret;
+		}
+	} else {
+		ret = qseecom_perf_enable(data);
+		if (ret) {
+			pr_err("Failed to vote for clocks with err %d\n", ret);
+			goto exit;
+		}
+	}
+
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+				data->client.sb_virt, data->client.sb_length,
+				ION_IOC_CLEAN_INV_CACHES);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		goto exit;
+	}
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				(const void *)send_req_ptr,
+				req_buf_size, &resp, sizeof(resp));
+	if (ret) {
+		pr_err("qseecom_scm_call failed with err: %d\n", ret);
+		if (!qseecom.support_bus_scaling) {
+			qsee_disable_clock_vote(data, CLK_DFAB);
+			qsee_disable_clock_vote(data, CLK_SFPB);
+		} else {
+			__qseecom_add_bw_scale_down_timer(
+				QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+		}
+		goto exit;
+	}
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+				data->client.sb_virt, data->client.sb_length,
+				ION_IOC_INV_CACHES);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		goto exit;
+	}
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		pr_debug("qseos_result_incomplete\n");
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret) {
+			pr_err("process_incomplete_cmd fail with result: %d\n",
+				resp.result);
+		}
+		if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
+			pr_warn("RPMB key status is 0x%x\n", resp.result);
+			if (put_user(resp.result,
+				(uint32_t __user *)req.resp_buf)) {
+				ret = -EINVAL;
+				goto exit;
+			}
+			ret = 0;
+		}
+		break;
+	case QSEOS_RESULT_FAILURE:
+		pr_err("scm call failed with resp.result: %d\n", resp.result);
+		ret = -EINVAL;
+		break;
+	default:
+		pr_err("Response result %d not supported\n",
+				resp.result);
+		ret = -EINVAL;
+		break;
+	}
+	if (!qseecom.support_bus_scaling) {
+		qsee_disable_clock_vote(data, CLK_DFAB);
+		qsee_disable_clock_vote(data, CLK_SFPB);
+	} else {
+		__qseecom_add_bw_scale_down_timer(
+			QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+	}
+
+exit:
+	return ret;
+}
+
+static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
+				struct qseecom_send_cmd_req *req)
+
+{
+	if (!data || !data->client.ihandle) {
+		pr_err("Client or client handle is not initialized\n");
+		return -EINVAL;
+	}
+	if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
+						(req->cmd_req_buf == NULL)) {
+		pr_err("cmd buffer or response buffer is null\n");
+		return -EINVAL;
+	}
+	if (((uintptr_t)req->cmd_req_buf <
+				data->client.user_virt_sb_base) ||
+		((uintptr_t)req->cmd_req_buf >=
+		(data->client.user_virt_sb_base + data->client.sb_length))) {
+		pr_err("cmd buffer address not within shared bufffer\n");
+		return -EINVAL;
+	}
+	if (((uintptr_t)req->resp_buf <
+				data->client.user_virt_sb_base)  ||
+		((uintptr_t)req->resp_buf >=
+		(data->client.user_virt_sb_base + data->client.sb_length))) {
+		pr_err("response buffer address not within shared bufffer\n");
+		return -EINVAL;
+	}
+	if ((req->cmd_req_len == 0) ||
+		(req->cmd_req_len > data->client.sb_length) ||
+		(req->resp_len > data->client.sb_length)) {
+		pr_err("cmd buf length or response buf length not valid\n");
+		return -EINVAL;
+	}
+	if (req->cmd_req_len > UINT_MAX - req->resp_len) {
+		pr_err("Integer overflow detected in req_len & rsp_len\n");
+		return -EINVAL;
+	}
+
+	if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
+		pr_debug("Not enough memory to fit cmd_buf.\n");
+		pr_debug("resp_buf. Required: %u, Available: %zu\n",
+				(req->cmd_req_len + req->resp_len),
+					data->client.sb_length);
+		return -ENOMEM;
+	}
+	if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
+		pr_err("Integer overflow in req_len & cmd_req_buf\n");
+		return -EINVAL;
+	}
+	if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
+		pr_err("Integer overflow in resp_len & resp_buf\n");
+		return -EINVAL;
+	}
+	if (data->client.user_virt_sb_base >
+					(ULONG_MAX - data->client.sb_length)) {
+		pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
+		return -EINVAL;
+	}
+	if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
+		((uintptr_t)data->client.user_virt_sb_base +
+						data->client.sb_length)) ||
+		(((uintptr_t)req->resp_buf + req->resp_len) >
+		((uintptr_t)data->client.user_virt_sb_base +
+						data->client.sb_length))) {
+		pr_err("cmd buf or resp buf is out of shared buffer region\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
+				struct qseecom_registered_app_list *ptr_app,
+				struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+
+	switch (resp->result) {
+	case QSEOS_RESULT_BLOCKED_ON_LISTENER:
+		pr_warn("App(%d) %s is blocked on listener %d\n",
+			data->client.app_id, data->client.app_name,
+			resp->data);
+		ret = __qseecom_process_reentrancy_blocked_on_listener(
+					resp, ptr_app, data);
+		if (ret) {
+			pr_err("failed to process App(%d) %s is blocked on listener %d\n",
+			data->client.app_id, data->client.app_name, resp->data);
+			return ret;
+		}
+
+	case QSEOS_RESULT_INCOMPLETE:
+		qseecom.app_block_ref_cnt++;
+		ptr_app->app_blocked = true;
+		ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
+		ptr_app->app_blocked = false;
+		qseecom.app_block_ref_cnt--;
+		wake_up_interruptible(&qseecom.app_block_wq);
+		if (ret)
+			pr_err("process_incomplete_cmd failed err: %d\n",
+					ret);
+		return ret;
+	case QSEOS_RESULT_SUCCESS:
+		return ret;
+	default:
+		pr_err("Response result %d not supported\n",
+						resp->result);
+		return -EINVAL;
+	}
+}
+
+static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
+				struct qseecom_send_cmd_req *req)
+{
+	int ret = 0;
+	int ret2 = 0;
+	u32 reqd_len_sb_in = 0;
+	struct qseecom_client_send_data_ireq send_data_req = {0};
+	struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
+	struct qseecom_command_scm_resp resp;
+	unsigned long flags;
+	struct qseecom_registered_app_list *ptr_app;
+	bool found_app = false;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	struct sglist_info *table = data->sglistinfo_ptr;
+
+	reqd_len_sb_in = req->cmd_req_len + req->resp_len;
+	/* find app_id & img_name from list */
+	spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+	list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+							list) {
+		if ((ptr_app->app_id == data->client.app_id) &&
+			 (!strcmp(ptr_app->app_name, data->client.app_name))) {
+			found_app = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
+
+	if (!found_app) {
+		pr_err("app_id %d (%s) is not found\n", data->client.app_id,
+			(char *)data->client.app_name);
+		return -ENOENT;
+	}
+
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		send_data_req.app_id = data->client.app_id;
+		send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
+					data, (uintptr_t)req->cmd_req_buf));
+		send_data_req.req_len = req->cmd_req_len;
+		send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
+					data, (uintptr_t)req->resp_buf));
+		send_data_req.rsp_len = req->resp_len;
+		send_data_req.sglistinfo_ptr =
+				(uint32_t)virt_to_phys(table);
+		send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+		dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+		cmd_buf = (void *)&send_data_req;
+		cmd_len = sizeof(struct qseecom_client_send_data_ireq);
+	} else {
+		send_data_req_64bit.app_id = data->client.app_id;
+		send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
+					(uintptr_t)req->cmd_req_buf);
+		send_data_req_64bit.req_len = req->cmd_req_len;
+		send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
+					(uintptr_t)req->resp_buf);
+		send_data_req_64bit.rsp_len = req->resp_len;
+		/* check if 32bit app's phys_addr region is under 4GB.*/
+		if ((data->client.app_arch == ELFCLASS32) &&
+			((send_data_req_64bit.req_ptr >=
+				PHY_ADDR_4G - send_data_req_64bit.req_len) ||
+			(send_data_req_64bit.rsp_ptr >=
+				PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
+			pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
+				data->client.app_name,
+				send_data_req_64bit.req_ptr,
+				send_data_req_64bit.req_len,
+				send_data_req_64bit.rsp_ptr,
+				send_data_req_64bit.rsp_len);
+			return -EFAULT;
+		}
+		send_data_req_64bit.sglistinfo_ptr =
+				(uint64_t)virt_to_phys(table);
+		send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+		dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+		cmd_buf = (void *)&send_data_req_64bit;
+		cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
+	}
+
+	if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
+		*(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
+	else
+		*(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
+
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+					data->client.sb_virt,
+					reqd_len_sb_in,
+					ION_IOC_CLEAN_INV_CACHES);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		return ret;
+	}
+
+	__qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				cmd_buf, cmd_len,
+				&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+					ret, data->client.app_id);
+		goto exit;
+	}
+
+	if (qseecom.qsee_reentrancy_support) {
+		ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
+		if (ret)
+			goto exit;
+	} else {
+		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+			ret = __qseecom_process_incomplete_cmd(data, &resp);
+			if (ret) {
+				pr_err("process_incomplete_cmd failed err: %d\n",
+						ret);
+				goto exit;
+			}
+		} else {
+			if (resp.result != QSEOS_RESULT_SUCCESS) {
+				pr_err("Response result %d not supported\n",
+								resp.result);
+				ret = -EINVAL;
+				goto exit;
+			}
+		}
+	}
+exit:
+	ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+				data->client.sb_virt, data->client.sb_length,
+				ION_IOC_INV_CACHES);
+	if (ret2) {
+		pr_err("cache operation failed %d\n", ret2);
+		return ret2;
+	}
+	return ret;
+}
+
+static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
+{
+	int ret = 0;
+	struct qseecom_send_cmd_req req;
+
+	ret = copy_from_user(&req, argp, sizeof(req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	if (__validate_send_cmd_inputs(data, &req))
+		return -EINVAL;
+
+	ret = __qseecom_send_cmd(data, &req);
+
+	if (ret)
+		return ret;
+
+	return ret;
+}
+
+int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
+			struct qseecom_send_modfd_listener_resp *lstnr_resp,
+			struct qseecom_dev_handle *data, int i) {
+
+	if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+						(req->ifd_data[i].fd > 0)) {
+			if ((req->cmd_req_len < sizeof(uint32_t)) ||
+				(req->ifd_data[i].cmd_buf_offset >
+				req->cmd_req_len - sizeof(uint32_t))) {
+				pr_err("Invalid offset (req len) 0x%x\n",
+					req->ifd_data[i].cmd_buf_offset);
+				return -EINVAL;
+			}
+	} else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
+					(lstnr_resp->ifd_data[i].fd > 0)) {
+			if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
+				(lstnr_resp->ifd_data[i].cmd_buf_offset >
+				lstnr_resp->resp_len - sizeof(uint32_t))) {
+				pr_err("Invalid offset (lstnr resp len) 0x%x\n",
+					lstnr_resp->ifd_data[i].cmd_buf_offset);
+				return -EINVAL;
+			}
+		}
+	return 0;
+}
+
+static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
+			struct qseecom_dev_handle *data)
+{
+	struct ion_handle *ihandle;
+	char *field;
+	int ret = 0;
+	int i = 0;
+	uint32_t len = 0;
+	struct scatterlist *sg;
+	struct qseecom_send_modfd_cmd_req *req = NULL;
+	struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
+	struct qseecom_registered_listener_list *this_lstnr = NULL;
+	uint32_t offset;
+	struct sg_table *sg_ptr;
+
+	if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+			(data->type != QSEECOM_CLIENT_APP))
+		return -EFAULT;
+
+	if (msg == NULL) {
+		pr_err("Invalid address\n");
+		return -EINVAL;
+	}
+	if (data->type == QSEECOM_LISTENER_SERVICE) {
+		lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
+		this_lstnr = __qseecom_find_svc(data->listener.id);
+		if (IS_ERR_OR_NULL(this_lstnr)) {
+			pr_err("Invalid listener ID\n");
+			return -ENOMEM;
+		}
+	} else {
+		req = (struct qseecom_send_modfd_cmd_req *)msg;
+	}
+
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+						(req->ifd_data[i].fd > 0)) {
+			ihandle = ion_import_dma_buf(qseecom.ion_clnt,
+					req->ifd_data[i].fd);
+			if (IS_ERR_OR_NULL(ihandle)) {
+				pr_err("Ion client can't retrieve the handle\n");
+				return -ENOMEM;
+			}
+			field = (char *) req->cmd_req_buf +
+				req->ifd_data[i].cmd_buf_offset;
+		} else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
+				(lstnr_resp->ifd_data[i].fd > 0)) {
+			ihandle = ion_import_dma_buf(qseecom.ion_clnt,
+						lstnr_resp->ifd_data[i].fd);
+			if (IS_ERR_OR_NULL(ihandle)) {
+				pr_err("Ion client can't retrieve the handle\n");
+				return -ENOMEM;
+			}
+			field = lstnr_resp->resp_buf_ptr +
+				lstnr_resp->ifd_data[i].cmd_buf_offset;
+		} else {
+			continue;
+		}
+		/* Populate the cmd data structure with the phys_addr */
+		sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
+		if (IS_ERR_OR_NULL(sg_ptr)) {
+			pr_err("IOn client could not retrieve sg table\n");
+			goto err;
+		}
+		if (sg_ptr->nents == 0) {
+			pr_err("Num of scattered entries is 0\n");
+			goto err;
+		}
+		if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
+			pr_err("Num of scattered entries");
+			pr_err(" (%d) is greater than max supported %d\n",
+				sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
+			goto err;
+		}
+		sg = sg_ptr->sgl;
+		if (sg_ptr->nents == 1) {
+			uint32_t *update;
+			if (__boundary_checks_offset(req, lstnr_resp, data, i))
+				goto err;
+			if ((data->type == QSEECOM_CLIENT_APP &&
+				(data->client.app_arch == ELFCLASS32 ||
+				data->client.app_arch == ELFCLASS64)) ||
+				(data->type == QSEECOM_LISTENER_SERVICE)) {
+				/*
+				 * Check if sg list phy add region is under 4GB
+				 */
+				if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
+					(!cleanup) &&
+					((uint64_t)sg_dma_address(sg_ptr->sgl)
+					>= PHY_ADDR_4G - sg->length)) {
+					pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
+						data->client.app_name,
+						&(sg_dma_address(sg_ptr->sgl)),
+						sg->length);
+					goto err;
+				}
+				update = (uint32_t *) field;
+				*update = cleanup ? 0 :
+					(uint32_t)sg_dma_address(sg_ptr->sgl);
+			} else {
+				pr_err("QSEE app arch %u is not supported\n",
+							data->client.app_arch);
+				goto err;
+			}
+			len += (uint32_t)sg->length;
+		} else {
+			struct qseecom_sg_entry *update;
+			int j = 0;
+
+			if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+					(req->ifd_data[i].fd > 0)) {
+
+				if ((req->cmd_req_len <
+					 SG_ENTRY_SZ * sg_ptr->nents) ||
+					(req->ifd_data[i].cmd_buf_offset >
+						(req->cmd_req_len -
+						SG_ENTRY_SZ * sg_ptr->nents))) {
+					pr_err("Invalid offset = 0x%x\n",
+					req->ifd_data[i].cmd_buf_offset);
+					goto err;
+				}
+
+			} else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
+					(lstnr_resp->ifd_data[i].fd > 0)) {
+
+				if ((lstnr_resp->resp_len <
+						SG_ENTRY_SZ * sg_ptr->nents) ||
+				(lstnr_resp->ifd_data[i].cmd_buf_offset >
+						(lstnr_resp->resp_len -
+						SG_ENTRY_SZ * sg_ptr->nents))) {
+					goto err;
+				}
+			}
+			if ((data->type == QSEECOM_CLIENT_APP &&
+				(data->client.app_arch == ELFCLASS32 ||
+				data->client.app_arch == ELFCLASS64)) ||
+				(data->type == QSEECOM_LISTENER_SERVICE)) {
+				update = (struct qseecom_sg_entry *)field;
+				for (j = 0; j < sg_ptr->nents; j++) {
+					/*
+					* Check if sg list PA is under 4GB
+					*/
+					if ((qseecom.qsee_version >=
+						QSEE_VERSION_40) &&
+						(!cleanup) &&
+						((uint64_t)(sg_dma_address(sg))
+						>= PHY_ADDR_4G - sg->length)) {
+						pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
+							data->client.app_name,
+							&(sg_dma_address(sg)),
+							sg->length);
+						goto err;
+					}
+					update->phys_addr = cleanup ? 0 :
+						(uint32_t)sg_dma_address(sg);
+					update->len = cleanup ? 0 : sg->length;
+					update++;
+					len += sg->length;
+					sg = sg_next(sg);
+				}
+			} else {
+				pr_err("QSEE app arch %u is not supported\n",
+							data->client.app_arch);
+					goto err;
+			}
+		}
+
+		if (cleanup) {
+			ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+					ihandle, NULL, len,
+					ION_IOC_INV_CACHES);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				goto err;
+			}
+		} else {
+			ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+					ihandle, NULL, len,
+					ION_IOC_CLEAN_INV_CACHES);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				goto err;
+			}
+			if (data->type == QSEECOM_CLIENT_APP) {
+				offset = req->ifd_data[i].cmd_buf_offset;
+				data->sglistinfo_ptr[i].indexAndFlags =
+					SGLISTINFO_SET_INDEX_FLAG(
+					(sg_ptr->nents == 1), 0, offset);
+				data->sglistinfo_ptr[i].sizeOrCount =
+					(sg_ptr->nents == 1) ?
+					sg->length : sg_ptr->nents;
+				data->sglist_cnt = i + 1;
+			} else {
+				offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
+					+ (uintptr_t)lstnr_resp->resp_buf_ptr -
+					(uintptr_t)this_lstnr->sb_virt);
+				this_lstnr->sglistinfo_ptr[i].indexAndFlags =
+					SGLISTINFO_SET_INDEX_FLAG(
+					(sg_ptr->nents == 1), 0, offset);
+				this_lstnr->sglistinfo_ptr[i].sizeOrCount =
+					(sg_ptr->nents == 1) ?
+					sg->length : sg_ptr->nents;
+				this_lstnr->sglist_cnt = i + 1;
+			}
+		}
+		/* Deallocate the handle */
+		if (!IS_ERR_OR_NULL(ihandle))
+			ion_free(qseecom.ion_clnt, ihandle);
+	}
+	return ret;
+err:
+	if (!IS_ERR_OR_NULL(ihandle))
+		ion_free(qseecom.ion_clnt, ihandle);
+	return -ENOMEM;
+}
+
+static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
+		char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
+{
+	struct scatterlist *sg = sg_ptr->sgl;
+	struct qseecom_sg_entry_64bit *sg_entry;
+	struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
+	void *buf;
+	uint i;
+	size_t size;
+	dma_addr_t coh_pmem;
+
+	if (fd_idx >= MAX_ION_FD) {
+		pr_err("fd_idx [%d] is invalid\n", fd_idx);
+		return -ENOMEM;
+	}
+	buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
+	memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
+	/* Allocate a contiguous kernel buffer */
+	size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
+	size = (size + PAGE_SIZE) & PAGE_MASK;
+	buf = dma_alloc_coherent(qseecom.pdev,
+			size, &coh_pmem, GFP_KERNEL);
+	if (buf == NULL) {
+		pr_err("failed to alloc memory for sg buf\n");
+		return -ENOMEM;
+	}
+	/* update qseecom_sg_list_buf_hdr_64bit */
+	buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
+	buf_hdr->new_buf_phys_addr = coh_pmem;
+	buf_hdr->nents_total = sg_ptr->nents;
+	/* save the left sg entries into new allocated buf */
+	sg_entry = (struct qseecom_sg_entry_64bit *)buf;
+	for (i = 0; i < sg_ptr->nents; i++) {
+		sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
+		sg_entry->len = sg->length;
+		sg_entry++;
+		sg = sg_next(sg);
+	}
+
+	data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
+	data->client.sec_buf_fd[fd_idx].vbase = buf;
+	data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
+	data->client.sec_buf_fd[fd_idx].size = size;
+
+	return 0;
+}
+
+static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
+			struct qseecom_dev_handle *data)
+{
+	struct ion_handle *ihandle;
+	char *field;
+	int ret = 0;
+	int i = 0;
+	uint32_t len = 0;
+	struct scatterlist *sg;
+	struct qseecom_send_modfd_cmd_req *req = NULL;
+	struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
+	struct qseecom_registered_listener_list *this_lstnr = NULL;
+	uint32_t offset;
+	struct sg_table *sg_ptr;
+
+	if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+			(data->type != QSEECOM_CLIENT_APP))
+		return -EFAULT;
+
+	if (msg == NULL) {
+		pr_err("Invalid address\n");
+		return -EINVAL;
+	}
+	if (data->type == QSEECOM_LISTENER_SERVICE) {
+		lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
+		this_lstnr = __qseecom_find_svc(data->listener.id);
+		if (IS_ERR_OR_NULL(this_lstnr)) {
+			pr_err("Invalid listener ID\n");
+			return -ENOMEM;
+		}
+	} else {
+		req = (struct qseecom_send_modfd_cmd_req *)msg;
+	}
+
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+						(req->ifd_data[i].fd > 0)) {
+			ihandle = ion_import_dma_buf(qseecom.ion_clnt,
+					req->ifd_data[i].fd);
+			if (IS_ERR_OR_NULL(ihandle)) {
+				pr_err("Ion client can't retrieve the handle\n");
+				return -ENOMEM;
+			}
+			field = (char *) req->cmd_req_buf +
+				req->ifd_data[i].cmd_buf_offset;
+		} else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
+				(lstnr_resp->ifd_data[i].fd > 0)) {
+			ihandle = ion_import_dma_buf(qseecom.ion_clnt,
+						lstnr_resp->ifd_data[i].fd);
+			if (IS_ERR_OR_NULL(ihandle)) {
+				pr_err("Ion client can't retrieve the handle\n");
+				return -ENOMEM;
+			}
+			field = lstnr_resp->resp_buf_ptr +
+				lstnr_resp->ifd_data[i].cmd_buf_offset;
+		} else {
+			continue;
+		}
+		/* Populate the cmd data structure with the phys_addr */
+		sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
+		if (IS_ERR_OR_NULL(sg_ptr)) {
+			pr_err("IOn client could not retrieve sg table\n");
+			goto err;
+		}
+		if (sg_ptr->nents == 0) {
+			pr_err("Num of scattered entries is 0\n");
+			goto err;
+		}
+		if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
+			pr_warn("Num of scattered entries");
+			pr_warn(" (%d) is greater than %d\n",
+				sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
+			if (cleanup) {
+				if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
+					data->client.sec_buf_fd[i].vbase)
+					dma_free_coherent(qseecom.pdev,
+					data->client.sec_buf_fd[i].size,
+					data->client.sec_buf_fd[i].vbase,
+					data->client.sec_buf_fd[i].pbase);
+			} else {
+				ret = __qseecom_allocate_sg_list_buffer(data,
+						field, i, sg_ptr);
+				if (ret) {
+					pr_err("Failed to allocate sg list buffer\n");
+					goto err;
+				}
+			}
+			len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
+			sg = sg_ptr->sgl;
+			goto cleanup;
+		}
+		sg = sg_ptr->sgl;
+		if (sg_ptr->nents == 1) {
+			uint64_t *update_64bit;
+			if (__boundary_checks_offset(req, lstnr_resp, data, i))
+				goto err;
+				/* 64bit app uses 64bit address */
+			update_64bit = (uint64_t *) field;
+			*update_64bit = cleanup ? 0 :
+					(uint64_t)sg_dma_address(sg_ptr->sgl);
+			len += (uint32_t)sg->length;
+		} else {
+			struct qseecom_sg_entry_64bit *update_64bit;
+			int j = 0;
+
+			if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+					(req->ifd_data[i].fd > 0)) {
+
+				if ((req->cmd_req_len <
+					 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
+					(req->ifd_data[i].cmd_buf_offset >
+					(req->cmd_req_len -
+					SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
+					pr_err("Invalid offset = 0x%x\n",
+					req->ifd_data[i].cmd_buf_offset);
+					goto err;
+				}
+
+			} else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
+					(lstnr_resp->ifd_data[i].fd > 0)) {
+
+				if ((lstnr_resp->resp_len <
+					SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
+				(lstnr_resp->ifd_data[i].cmd_buf_offset >
+						(lstnr_resp->resp_len -
+					SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
+					goto err;
+				}
+			}
+			/* 64bit app uses 64bit address */
+			update_64bit = (struct qseecom_sg_entry_64bit *)field;
+			for (j = 0; j < sg_ptr->nents; j++) {
+				update_64bit->phys_addr = cleanup ? 0 :
+					(uint64_t)sg_dma_address(sg);
+				update_64bit->len = cleanup ? 0 :
+						(uint32_t)sg->length;
+				update_64bit++;
+				len += sg->length;
+				sg = sg_next(sg);
+			}
+		}
+cleanup:
+		if (cleanup) {
+			ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+					ihandle, NULL, len,
+					ION_IOC_INV_CACHES);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				goto err;
+			}
+		} else {
+			ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+					ihandle, NULL, len,
+					ION_IOC_CLEAN_INV_CACHES);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				goto err;
+			}
+			if (data->type == QSEECOM_CLIENT_APP) {
+				offset = req->ifd_data[i].cmd_buf_offset;
+				data->sglistinfo_ptr[i].indexAndFlags =
+					SGLISTINFO_SET_INDEX_FLAG(
+					(sg_ptr->nents == 1), 1, offset);
+				data->sglistinfo_ptr[i].sizeOrCount =
+					(sg_ptr->nents == 1) ?
+					sg->length : sg_ptr->nents;
+				data->sglist_cnt = i + 1;
+			} else {
+				offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
+					+ (uintptr_t)lstnr_resp->resp_buf_ptr -
+					(uintptr_t)this_lstnr->sb_virt);
+				this_lstnr->sglistinfo_ptr[i].indexAndFlags =
+					SGLISTINFO_SET_INDEX_FLAG(
+					(sg_ptr->nents == 1), 1, offset);
+				this_lstnr->sglistinfo_ptr[i].sizeOrCount =
+					(sg_ptr->nents == 1) ?
+					sg->length : sg_ptr->nents;
+				this_lstnr->sglist_cnt = i + 1;
+			}
+		}
+		/* Deallocate the handle */
+		if (!IS_ERR_OR_NULL(ihandle))
+			ion_free(qseecom.ion_clnt, ihandle);
+	}
+	return ret;
+err:
+	for (i = 0; i < MAX_ION_FD; i++)
+		if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
+			data->client.sec_buf_fd[i].vbase)
+			dma_free_coherent(qseecom.pdev,
+				data->client.sec_buf_fd[i].size,
+				data->client.sec_buf_fd[i].vbase,
+				data->client.sec_buf_fd[i].pbase);
+	if (!IS_ERR_OR_NULL(ihandle))
+		ion_free(qseecom.ion_clnt, ihandle);
+	return -ENOMEM;
+}
+
+static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
+					void __user *argp,
+					bool is_64bit_addr)
+{
+	int ret = 0;
+	int i;
+	struct qseecom_send_modfd_cmd_req req;
+	struct qseecom_send_cmd_req send_cmd_req;
+
+	ret = copy_from_user(&req, argp, sizeof(req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	send_cmd_req.cmd_req_buf = req.cmd_req_buf;
+	send_cmd_req.cmd_req_len = req.cmd_req_len;
+	send_cmd_req.resp_buf = req.resp_buf;
+	send_cmd_req.resp_len = req.resp_len;
+
+	if (__validate_send_cmd_inputs(data, &send_cmd_req))
+		return -EINVAL;
+
+	/* validate offsets */
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
+			pr_err("Invalid offset %d = 0x%x\n",
+				i, req.ifd_data[i].cmd_buf_offset);
+			return -EINVAL;
+		}
+	}
+	req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
+						(uintptr_t)req.cmd_req_buf);
+	req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
+						(uintptr_t)req.resp_buf);
+
+	if (!is_64bit_addr) {
+		ret = __qseecom_update_cmd_buf(&req, false, data);
+		if (ret)
+			return ret;
+		ret = __qseecom_send_cmd(data, &send_cmd_req);
+		if (ret)
+			return ret;
+		ret = __qseecom_update_cmd_buf(&req, true, data);
+		if (ret)
+			return ret;
+	} else {
+		ret = __qseecom_update_cmd_buf_64(&req, false, data);
+		if (ret)
+			return ret;
+		ret = __qseecom_send_cmd(data, &send_cmd_req);
+		if (ret)
+			return ret;
+		ret = __qseecom_update_cmd_buf_64(&req, true, data);
+		if (ret)
+			return ret;
+	}
+
+	return ret;
+}
+
+static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
+					void __user *argp)
+{
+	return __qseecom_send_modfd_cmd(data, argp, false);
+}
+
+static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
+					void __user *argp)
+{
+	return __qseecom_send_modfd_cmd(data, argp, true);
+}
+
+
+
+static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
+		struct qseecom_registered_listener_list *svc)
+{
+	int ret;
+	ret = (svc->rcv_req_flag != 0);
+	return ret || data->abort;
+}
+
+static int qseecom_receive_req(struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+	struct qseecom_registered_listener_list *this_lstnr;
+
+	this_lstnr = __qseecom_find_svc(data->listener.id);
+	if (!this_lstnr) {
+		pr_err("Invalid listener ID\n");
+		return -ENODATA;
+	}
+
+	while (1) {
+		if (wait_event_freezable(this_lstnr->rcv_req_wq,
+				__qseecom_listener_has_rcvd_req(data,
+				this_lstnr))) {
+			pr_debug("Interrupted: exiting Listener Service = %d\n",
+						(uint32_t)data->listener.id);
+			/* woken up for different reason */
+			return -ERESTARTSYS;
+		}
+
+		if (data->abort) {
+			pr_err("Aborting Listener Service = %d\n",
+						(uint32_t)data->listener.id);
+			return -ENODEV;
+		}
+		this_lstnr->rcv_req_flag = 0;
+		break;
+	}
+	return ret;
+}
+
+static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
+{
+	unsigned char app_arch = 0;
+	struct elf32_hdr *ehdr;
+	struct elf64_hdr *ehdr64;
+
+	app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
+
+	switch (app_arch) {
+	case ELFCLASS32: {
+		ehdr = (struct elf32_hdr *)fw_entry->data;
+		if (fw_entry->size < sizeof(*ehdr)) {
+			pr_err("%s: Not big enough to be an elf32 header\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
+			pr_err("%s: Not an elf32 header\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		if (ehdr->e_phnum == 0) {
+			pr_err("%s: No loadable segments\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
+		    sizeof(struct elf32_hdr) > fw_entry->size) {
+			pr_err("%s: Program headers not within mdt\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		break;
+	}
+	case ELFCLASS64: {
+		ehdr64 = (struct elf64_hdr *)fw_entry->data;
+		if (fw_entry->size < sizeof(*ehdr64)) {
+			pr_err("%s: Not big enough to be an elf64 header\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
+			pr_err("%s: Not an elf64 header\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		if (ehdr64->e_phnum == 0) {
+			pr_err("%s: No loadable segments\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
+		    sizeof(struct elf64_hdr) > fw_entry->size) {
+			pr_err("%s: Program headers not within mdt\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		break;
+	}
+	default: {
+		pr_err("QSEE app arch %u is not supported\n", app_arch);
+		return false;
+	}
+	}
+	return true;
+}
+
+static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
+					uint32_t *app_arch)
+{
+	int ret = -1;
+	int i = 0, rc = 0;
+	const struct firmware *fw_entry = NULL;
+	char fw_name[MAX_APP_NAME_SIZE];
+	struct elf32_hdr *ehdr;
+	struct elf64_hdr *ehdr64;
+	int num_images = 0;
+
+	snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
+	rc = request_firmware(&fw_entry, fw_name,  qseecom.pdev);
+	if (rc) {
+		pr_err("error with request_firmware\n");
+		ret = -EIO;
+		goto err;
+	}
+	if (!__qseecom_is_fw_image_valid(fw_entry)) {
+		ret = -EIO;
+		goto err;
+	}
+	*app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
+	*fw_size = fw_entry->size;
+	if (*app_arch == ELFCLASS32) {
+		ehdr = (struct elf32_hdr *)fw_entry->data;
+		num_images = ehdr->e_phnum;
+	} else if (*app_arch == ELFCLASS64) {
+		ehdr64 = (struct elf64_hdr *)fw_entry->data;
+		num_images = ehdr64->e_phnum;
+	} else {
+		pr_err("QSEE %s app, arch %u is not supported\n",
+						appname, *app_arch);
+		ret = -EIO;
+		goto err;
+	}
+	pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
+	release_firmware(fw_entry);
+	fw_entry = NULL;
+	for (i = 0; i < num_images; i++) {
+		memset(fw_name, 0, sizeof(fw_name));
+		snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
+		ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
+		if (ret)
+			goto err;
+		if (*fw_size > U32_MAX - fw_entry->size) {
+			pr_err("QSEE %s app file size overflow\n", appname);
+			ret = -EINVAL;
+			goto err;
+		}
+		*fw_size += fw_entry->size;
+		release_firmware(fw_entry);
+		fw_entry = NULL;
+	}
+
+	return ret;
+err:
+	if (fw_entry)
+		release_firmware(fw_entry);
+	*fw_size = 0;
+	return ret;
+}
+
+static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
+				uint32_t fw_size,
+				struct qseecom_load_app_ireq *load_req)
+{
+	int ret = -1;
+	int i = 0, rc = 0;
+	const struct firmware *fw_entry = NULL;
+	char fw_name[MAX_APP_NAME_SIZE];
+	u8 *img_data_ptr = img_data;
+	struct elf32_hdr *ehdr;
+	struct elf64_hdr *ehdr64;
+	int num_images = 0;
+	unsigned char app_arch = 0;
+
+	snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
+	rc = request_firmware(&fw_entry, fw_name,  qseecom.pdev);
+	if (rc) {
+		ret = -EIO;
+		goto err;
+	}
+
+	load_req->img_len = fw_entry->size;
+	if (load_req->img_len > fw_size) {
+		pr_err("app %s size %zu is larger than buf size %u\n",
+			appname, fw_entry->size, fw_size);
+		ret = -EINVAL;
+		goto err;
+	}
+	memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
+	img_data_ptr = img_data_ptr + fw_entry->size;
+	load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
+
+	app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
+	if (app_arch == ELFCLASS32) {
+		ehdr = (struct elf32_hdr *)fw_entry->data;
+		num_images = ehdr->e_phnum;
+	} else if (app_arch == ELFCLASS64) {
+		ehdr64 = (struct elf64_hdr *)fw_entry->data;
+		num_images = ehdr64->e_phnum;
+	} else {
+		pr_err("QSEE %s app, arch %u is not supported\n",
+						appname, app_arch);
+		ret = -EIO;
+		goto err;
+	}
+	release_firmware(fw_entry);
+	fw_entry = NULL;
+	for (i = 0; i < num_images; i++) {
+		snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
+		ret = request_firmware(&fw_entry, fw_name,  qseecom.pdev);
+		if (ret) {
+			pr_err("Failed to locate blob %s\n", fw_name);
+			goto err;
+		}
+		if ((fw_entry->size > U32_MAX - load_req->img_len) ||
+			(fw_entry->size + load_req->img_len > fw_size)) {
+			pr_err("Invalid file size for %s\n", fw_name);
+			ret = -EINVAL;
+			goto err;
+		}
+		memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
+		img_data_ptr = img_data_ptr + fw_entry->size;
+		load_req->img_len += fw_entry->size;
+		release_firmware(fw_entry);
+		fw_entry = NULL;
+	}
+	return ret;
+err:
+	release_firmware(fw_entry);
+	return ret;
+}
+
+static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
+			u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
+{
+	size_t len = 0;
+	int ret = 0;
+	ion_phys_addr_t pa;
+	struct ion_handle *ihandle = NULL;
+	u8 *img_data = NULL;
+
+	ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
+			SZ_4K, ION_HEAP(ION_QSECOM_HEAP_ID), 0);
+
+	if (IS_ERR_OR_NULL(ihandle)) {
+		pr_err("ION alloc failed\n");
+		return -ENOMEM;
+	}
+	img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
+					ihandle);
+
+	if (IS_ERR_OR_NULL(img_data)) {
+		pr_err("ION memory mapping for image loading failed\n");
+		ret = -ENOMEM;
+		goto exit_ion_free;
+	}
+	/* Get the physical address of the ION BUF */
+	ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
+	if (ret) {
+		pr_err("physical memory retrieval failure\n");
+		ret = -EIO;
+		goto exit_ion_unmap_kernel;
+	}
+
+	*pihandle = ihandle;
+	*data = img_data;
+	*paddr = pa;
+	return ret;
+
+exit_ion_unmap_kernel:
+	ion_unmap_kernel(qseecom.ion_clnt, ihandle);
+exit_ion_free:
+	ion_free(qseecom.ion_clnt, ihandle);
+	ihandle = NULL;
+	return ret;
+}
+
+static void __qseecom_free_img_data(struct ion_handle **ihandle)
+{
+	ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
+	ion_free(qseecom.ion_clnt, *ihandle);
+	*ihandle = NULL;
+}
+
+static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
+				uint32_t *app_id)
+{
+	int ret = -1;
+	uint32_t fw_size = 0;
+	struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
+	struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
+	struct qseecom_command_scm_resp resp;
+	u8 *img_data = NULL;
+	ion_phys_addr_t pa = 0;
+	struct ion_handle *ihandle = NULL;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	uint32_t app_arch = 0;
+
+	if (!data || !appname || !app_id) {
+		pr_err("Null pointer to data or appname or appid\n");
+		return -EINVAL;
+	}
+	*app_id = 0;
+	if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
+		return -EIO;
+	data->client.app_arch = app_arch;
+
+	/* Check and load cmnlib */
+	if (qseecom.qsee_version > QSEEE_VERSION_00) {
+		if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
+			ret = qseecom_load_commonlib_image(data, "cmnlib");
+			if (ret) {
+				pr_err("failed to load cmnlib\n");
+				return -EIO;
+			}
+			qseecom.commonlib_loaded = true;
+			pr_debug("cmnlib is loaded\n");
+		}
+
+		if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
+			ret = qseecom_load_commonlib_image(data, "cmnlib64");
+			if (ret) {
+				pr_err("failed to load cmnlib64\n");
+				return -EIO;
+			}
+			qseecom.commonlib64_loaded = true;
+			pr_debug("cmnlib64 is loaded\n");
+		}
+	}
+
+	ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
+	if (ret)
+		return ret;
+
+	ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
+	if (ret) {
+		ret = -EIO;
+		goto exit_free_img_data;
+	}
+
+	/* Populate the load_req parameters */
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
+		load_req.mdt_len = load_req.mdt_len;
+		load_req.img_len = load_req.img_len;
+		strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
+		load_req.phy_addr = (uint32_t)pa;
+		cmd_buf = (void *)&load_req;
+		cmd_len = sizeof(struct qseecom_load_app_ireq);
+	} else {
+		load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
+		load_req_64bit.mdt_len = load_req.mdt_len;
+		load_req_64bit.img_len = load_req.img_len;
+		strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
+		load_req_64bit.phy_addr = (uint64_t)pa;
+		cmd_buf = (void *)&load_req_64bit;
+		cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
+	}
+
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
+		mutex_unlock(&qsee_bw_mutex);
+		if (ret) {
+			ret = -EIO;
+			goto exit_free_img_data;
+		}
+	}
+
+	ret = __qseecom_enable_clk_scale_up(data);
+	if (ret) {
+		ret = -EIO;
+		goto exit_unregister_bus_bw_need;
+	}
+
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
+				img_data, fw_size,
+				ION_IOC_CLEAN_INV_CACHES);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		goto exit_disable_clk_vote;
+	}
+
+	/* SCM_CALL to load the image */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
+			&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to load failed : ret %d\n", ret);
+		ret = -EIO;
+		goto exit_disable_clk_vote;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		*app_id = resp.data;
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret)
+			pr_err("process_incomplete_cmd FAILED\n");
+		else
+			*app_id = resp.data;
+		break;
+	case QSEOS_RESULT_FAILURE:
+		pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
+		break;
+	default:
+		pr_err("scm call return unknown response %d\n", resp.result);
+		ret = -EINVAL;
+		break;
+	}
+
+exit_disable_clk_vote:
+	__qseecom_disable_clk_scale_down(data);
+
+exit_unregister_bus_bw_need:
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		qseecom_unregister_bus_bandwidth_needs(data);
+		mutex_unlock(&qsee_bw_mutex);
+	}
+
+exit_free_img_data:
+	__qseecom_free_img_data(&ihandle);
+	return ret;
+}
+
+static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
+					char *cmnlib_name)
+{
+	int ret = 0;
+	uint32_t fw_size = 0;
+	struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
+	struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
+	struct qseecom_command_scm_resp resp;
+	u8 *img_data = NULL;
+	ion_phys_addr_t pa = 0;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	uint32_t app_arch = 0;
+	struct ion_handle *cmnlib_ion_handle = NULL;
+
+	if (!cmnlib_name) {
+		pr_err("cmnlib_name is NULL\n");
+		return -EINVAL;
+	}
+	if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
+		pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
+			cmnlib_name, strlen(cmnlib_name));
+		return -EINVAL;
+	}
+
+	if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
+		return -EIO;
+
+	ret = __qseecom_allocate_img_data(&cmnlib_ion_handle,
+						&img_data, fw_size, &pa);
+	if (ret)
+		return -EIO;
+
+	ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
+	if (ret) {
+		ret = -EIO;
+		goto exit_free_img_data;
+	}
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		load_req.phy_addr = (uint32_t)pa;
+		load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
+		cmd_buf = (void *)&load_req;
+		cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
+	} else {
+		load_req_64bit.phy_addr = (uint64_t)pa;
+		load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
+		load_req_64bit.img_len = load_req.img_len;
+		load_req_64bit.mdt_len = load_req.mdt_len;
+		cmd_buf = (void *)&load_req_64bit;
+		cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
+	}
+
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
+		mutex_unlock(&qsee_bw_mutex);
+		if (ret) {
+			ret = -EIO;
+			goto exit_free_img_data;
+		}
+	}
+
+	/* Vote for the SFPB clock */
+	ret = __qseecom_enable_clk_scale_up(data);
+	if (ret) {
+		ret = -EIO;
+		goto exit_unregister_bus_bw_need;
+	}
+
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, cmnlib_ion_handle,
+				img_data, fw_size,
+				ION_IOC_CLEAN_INV_CACHES);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		goto exit_disable_clk_vote;
+	}
+
+	/* SCM_CALL to load the image */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
+							&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to load failed : ret %d\n", ret);
+		ret = -EIO;
+		goto exit_disable_clk_vote;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_FAILURE:
+		pr_err("scm call failed w/response result%d\n", resp.result);
+		ret = -EINVAL;
+		goto exit_disable_clk_vote;
+	case  QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret) {
+			pr_err("process_incomplete_cmd failed err: %d\n", ret);
+			goto exit_disable_clk_vote;
+		}
+		break;
+	default:
+		pr_err("scm call return unknown response %d\n",	resp.result);
+		ret = -EINVAL;
+		goto exit_disable_clk_vote;
+	}
+
+exit_disable_clk_vote:
+	__qseecom_disable_clk_scale_down(data);
+
+exit_unregister_bus_bw_need:
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		qseecom_unregister_bus_bandwidth_needs(data);
+		mutex_unlock(&qsee_bw_mutex);
+	}
+
+exit_free_img_data:
+	__qseecom_free_img_data(&cmnlib_ion_handle);
+	return ret;
+}
+
+static int qseecom_unload_commonlib_image(void)
+{
+	int ret = -EINVAL;
+	struct qseecom_unload_lib_image_ireq unload_req = {0};
+	struct qseecom_command_scm_resp resp;
+
+	/* Populate the remaining parameters */
+	unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
+
+	/* SCM_CALL to load the image */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
+			sizeof(struct qseecom_unload_lib_image_ireq),
+						&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to unload lib failed : ret %d\n", ret);
+		ret = -EIO;
+	} else {
+		switch (resp.result) {
+		case QSEOS_RESULT_SUCCESS:
+			break;
+		case QSEOS_RESULT_FAILURE:
+			pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
+			break;
+		default:
+			pr_err("scm call return unknown response %d\n",
+					resp.result);
+			ret = -EINVAL;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+int qseecom_start_app(struct qseecom_handle **handle,
+						char *app_name, uint32_t size)
+{
+	int32_t ret = 0;
+	unsigned long flags = 0;
+	struct qseecom_dev_handle *data = NULL;
+	struct qseecom_check_app_ireq app_ireq;
+	struct qseecom_registered_app_list *entry = NULL;
+	struct qseecom_registered_kclient_list *kclient_entry = NULL;
+	bool found_app = false;
+	size_t len;
+	ion_phys_addr_t pa;
+	uint32_t fw_size, app_arch;
+	uint32_t app_id = 0;
+
+	if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
+		pr_err("Not allowed to be called in %d state\n",
+				atomic_read(&qseecom.qseecom_state));
+		return -EPERM;
+	}
+	if (!app_name) {
+		pr_err("failed to get the app name\n");
+		return -EINVAL;
+	}
+
+	if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
+		pr_err("The app_name (%s) with length %zu is not valid\n",
+			app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
+		return -EINVAL;
+	}
+
+	*handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
+	if (!(*handle)) {
+		pr_err("failed to allocate memory for kernel client handle\n");
+		return -ENOMEM;
+	}
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data) {
+		pr_err("kmalloc failed\n");
+		if (ret == 0) {
+			kfree(*handle);
+			*handle = NULL;
+		}
+		return -ENOMEM;
+	}
+	data->abort = 0;
+	data->type = QSEECOM_CLIENT_APP;
+	data->released = false;
+	data->client.sb_length = size;
+	data->client.user_virt_sb_base = 0;
+	data->client.ihandle = NULL;
+
+	init_waitqueue_head(&data->abort_wq);
+
+	data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
+				ION_HEAP(ION_QSECOM_HEAP_ID), 0);
+	if (IS_ERR_OR_NULL(data->client.ihandle)) {
+		pr_err("Ion client could not retrieve the handle\n");
+		kfree(data);
+		kfree(*handle);
+		*handle = NULL;
+		return -EINVAL;
+	}
+	mutex_lock(&app_access_lock);
+
+	app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
+	strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
+	ret = __qseecom_check_app_exists(app_ireq, &app_id);
+	if (ret)
+		goto err;
+
+	strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
+	if (app_id) {
+		pr_warn("App id %d for [%s] app exists\n", app_id,
+			(char *)app_ireq.app_name);
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_for_each_entry(entry,
+				&qseecom.registered_app_list_head, list){
+			if (entry->app_id == app_id) {
+				entry->ref_cnt++;
+				found_app = true;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(
+				&qseecom.registered_app_list_lock, flags);
+		if (!found_app)
+			pr_warn("App_id %d [%s] was loaded but not registered\n",
+					ret, (char *)app_ireq.app_name);
+	} else {
+		/* load the app and get the app_id  */
+		pr_debug("%s: Loading app for the first time'\n",
+				qseecom.pdev->init_name);
+		ret = __qseecom_load_fw(data, app_name, &app_id);
+		if (ret < 0)
+			goto err;
+	}
+	data->client.app_id = app_id;
+	if (!found_app) {
+		entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+		if (!entry) {
+			pr_err("kmalloc for app entry failed\n");
+			ret =  -ENOMEM;
+			goto err;
+		}
+		entry->app_id = app_id;
+		entry->ref_cnt = 1;
+		strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
+		if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
+			ret = -EIO;
+			kfree(entry);
+			goto err;
+		}
+		entry->app_arch = app_arch;
+		entry->app_blocked = false;
+		entry->blocked_on_listener_id = 0;
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_add_tail(&entry->list, &qseecom.registered_app_list_head);
+		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+									flags);
+	}
+
+	/* Get the physical address of the ION BUF */
+	ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
+	if (ret) {
+		pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
+			ret);
+		goto err;
+	}
+
+	/* Populate the structure for sending scm call to load image */
+	data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
+							data->client.ihandle);
+	if (IS_ERR_OR_NULL(data->client.sb_virt)) {
+		pr_err("ION memory mapping for client shared buf failed\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+	data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
+	data->client.sb_phys = (phys_addr_t)pa;
+	(*handle)->dev = (void *)data;
+	(*handle)->sbuf = (unsigned char *)data->client.sb_virt;
+	(*handle)->sbuf_len = data->client.sb_length;
+
+	kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
+	if (!kclient_entry) {
+		pr_err("kmalloc failed\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+	kclient_entry->handle = *handle;
+
+	spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
+	list_add_tail(&kclient_entry->list,
+			&qseecom.registered_kclient_list_head);
+	spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
+
+	mutex_unlock(&app_access_lock);
+	return 0;
+
+err:
+	kfree(data);
+	kfree(*handle);
+	*handle = NULL;
+	mutex_unlock(&app_access_lock);
+	return ret;
+}
+EXPORT_SYMBOL(qseecom_start_app);
+
+int qseecom_shutdown_app(struct qseecom_handle **handle)
+{
+	int ret = -EINVAL;
+	struct qseecom_dev_handle *data;
+
+	struct qseecom_registered_kclient_list *kclient = NULL;
+	unsigned long flags = 0;
+	bool found_handle = false;
+
+	if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
+		pr_err("Not allowed to be called in %d state\n",
+				atomic_read(&qseecom.qseecom_state));
+		return -EPERM;
+	}
+
+	if ((handle == NULL)  || (*handle == NULL)) {
+		pr_err("Handle is not initialized\n");
+		return -EINVAL;
+	}
+	data =	(struct qseecom_dev_handle *) ((*handle)->dev);
+	mutex_lock(&app_access_lock);
+
+	spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
+	list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
+				list) {
+		if (kclient->handle == (*handle)) {
+			list_del(&kclient->list);
+			found_handle = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
+	if (!found_handle)
+		pr_err("Unable to find the handle, exiting\n");
+	else
+		ret = qseecom_unload_app(data, false);
+
+	mutex_unlock(&app_access_lock);
+	if (ret == 0) {
+		kzfree(data);
+		kzfree(*handle);
+		kzfree(kclient);
+		*handle = NULL;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(qseecom_shutdown_app);
+
+int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
+			uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
+{
+	int ret = 0;
+	struct qseecom_send_cmd_req req = {0, 0, 0, 0};
+	struct qseecom_dev_handle *data;
+	bool perf_enabled = false;
+
+	if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
+		pr_err("Not allowed to be called in %d state\n",
+				atomic_read(&qseecom.qseecom_state));
+		return -EPERM;
+	}
+
+	if (handle == NULL) {
+		pr_err("Handle is not initialized\n");
+		return -EINVAL;
+	}
+	data = handle->dev;
+
+	req.cmd_req_len = sbuf_len;
+	req.resp_len = rbuf_len;
+	req.cmd_req_buf = send_buf;
+	req.resp_buf = resp_buf;
+
+	if (__validate_send_cmd_inputs(data, &req))
+		return -EINVAL;
+
+	mutex_lock(&app_access_lock);
+	if (qseecom.support_bus_scaling) {
+		ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
+		if (ret) {
+			pr_err("Failed to set bw.\n");
+			mutex_unlock(&app_access_lock);
+			return ret;
+		}
+	}
+	/*
+	* On targets where crypto clock is handled by HLOS,
+	* if clk_access_cnt is zero and perf_enabled is false,
+	* then the crypto clock was not enabled before sending cmd
+	* to tz, qseecom will enable the clock to avoid service failure.
+	*/
+	if (!qseecom.no_clock_support &&
+		!qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
+		pr_debug("ce clock is not enabled!\n");
+		ret = qseecom_perf_enable(data);
+		if (ret) {
+			pr_err("Failed to vote for clock with err %d\n",
+						ret);
+			mutex_unlock(&app_access_lock);
+			return -EINVAL;
+		}
+		perf_enabled = true;
+	}
+	if (!strcmp(data->client.app_name, "securemm"))
+		data->use_legacy_cmd = true;
+
+	ret = __qseecom_send_cmd(data, &req);
+	data->use_legacy_cmd = false;
+	if (qseecom.support_bus_scaling)
+		__qseecom_add_bw_scale_down_timer(
+			QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+
+	if (perf_enabled) {
+		qsee_disable_clock_vote(data, CLK_DFAB);
+		qsee_disable_clock_vote(data, CLK_SFPB);
+	}
+
+	mutex_unlock(&app_access_lock);
+
+	if (ret)
+		return ret;
+
+	pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
+			req.resp_len, req.resp_buf);
+	return ret;
+}
+EXPORT_SYMBOL(qseecom_send_command);
+
+int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
+{
+	int ret = 0;
+	if ((handle == NULL) || (handle->dev == NULL)) {
+		pr_err("No valid kernel client\n");
+		return -EINVAL;
+	}
+	if (high) {
+		if (qseecom.support_bus_scaling) {
+			mutex_lock(&qsee_bw_mutex);
+			__qseecom_register_bus_bandwidth_needs(handle->dev,
+									HIGH);
+			mutex_unlock(&qsee_bw_mutex);
+		} else {
+			ret = qseecom_perf_enable(handle->dev);
+			if (ret)
+				pr_err("Failed to vote for clock with err %d\n",
+						ret);
+		}
+	} else {
+		if (!qseecom.support_bus_scaling) {
+			qsee_disable_clock_vote(handle->dev, CLK_DFAB);
+			qsee_disable_clock_vote(handle->dev, CLK_SFPB);
+		} else {
+			mutex_lock(&qsee_bw_mutex);
+			qseecom_unregister_bus_bandwidth_needs(handle->dev);
+			mutex_unlock(&qsee_bw_mutex);
+		}
+	}
+	return ret;
+}
+EXPORT_SYMBOL(qseecom_set_bandwidth);
+
+int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
+{
+	struct qseecom_registered_app_list dummy_app_entry = { {0} };
+	struct qseecom_dev_handle dummy_private_data = {0};
+	struct qseecom_command_scm_resp resp;
+	int ret = 0;
+
+	if (!desc) {
+		pr_err("desc is NULL\n");
+		return -EINVAL;
+	}
+
+	resp.result = desc->ret[0];	/*req_cmd*/
+	resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
+	resp.data = desc->ret[2];	/*listener_id*/
+
+	dummy_private_data.client.app_id = desc->ret[1];
+	dummy_app_entry.app_id = desc->ret[1];
+
+	mutex_lock(&app_access_lock);
+	if (qseecom.qsee_reentrancy_support)
+		ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
+					&dummy_private_data);
+	else
+		ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
+					&resp);
+	mutex_unlock(&app_access_lock);
+	if (ret)
+		pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
+			(int)desc->ret[0], (int)desc->ret[2],
+			(int)desc->ret[1], ret);
+	desc->ret[0] = resp.result;
+	desc->ret[1] = resp.resp_type;
+	desc->ret[2] = resp.data;
+	return ret;
+}
+EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
+
+static int qseecom_send_resp(void)
+{
+	qseecom.send_resp_flag = 1;
+	wake_up_interruptible(&qseecom.send_resp_wq);
+	return 0;
+}
+
+static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
+{
+	struct qseecom_registered_listener_list *this_lstnr = NULL;
+
+	pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
+	this_lstnr = __qseecom_find_svc(data->listener.id);
+	if (this_lstnr == NULL)
+		return -EINVAL;
+	qseecom.send_resp_flag = 1;
+	this_lstnr->send_resp_flag = 1;
+	wake_up_interruptible(&qseecom.send_resp_wq);
+	return 0;
+}
+
+static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
+			struct qseecom_send_modfd_listener_resp *resp,
+			struct qseecom_registered_listener_list *this_lstnr)
+{
+	int i;
+
+	if (!data || !resp || !this_lstnr) {
+		pr_err("listener handle or resp msg is null\n");
+		return -EINVAL;
+	}
+
+	if (resp->resp_buf_ptr == NULL) {
+		pr_err("resp buffer is null\n");
+		return -EINVAL;
+	}
+	/* validate resp buf length */
+	if ((resp->resp_len == 0) ||
+			(resp->resp_len > this_lstnr->sb_length)) {
+		pr_err("resp buf length %d not valid\n", resp->resp_len);
+		return -EINVAL;
+	}
+
+	if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
+		pr_err("Integer overflow in resp_len & resp_buf\n");
+		return -EINVAL;
+	}
+	if ((uintptr_t)this_lstnr->user_virt_sb_base >
+					(ULONG_MAX - this_lstnr->sb_length)) {
+		pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
+		return -EINVAL;
+	}
+	/* validate resp buf */
+	if (((uintptr_t)resp->resp_buf_ptr <
+		(uintptr_t)this_lstnr->user_virt_sb_base) ||
+		((uintptr_t)resp->resp_buf_ptr >=
+		((uintptr_t)this_lstnr->user_virt_sb_base +
+				this_lstnr->sb_length)) ||
+		(((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
+		((uintptr_t)this_lstnr->user_virt_sb_base +
+						this_lstnr->sb_length))) {
+		pr_err("resp buf is out of shared buffer region\n");
+		return -EINVAL;
+	}
+
+	/* validate offsets */
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
+			pr_err("Invalid offset %d = 0x%x\n",
+				i, resp->ifd_data[i].cmd_buf_offset);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
+				void __user *argp, bool is_64bit_addr)
+{
+	struct qseecom_send_modfd_listener_resp resp;
+	struct qseecom_registered_listener_list *this_lstnr = NULL;
+
+	if (copy_from_user(&resp, argp, sizeof(resp))) {
+		pr_err("copy_from_user failed");
+		return -EINVAL;
+	}
+
+	this_lstnr = __qseecom_find_svc(data->listener.id);
+	if (this_lstnr == NULL)
+		return -EINVAL;
+
+	if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
+		return -EINVAL;
+
+	resp.resp_buf_ptr = this_lstnr->sb_virt +
+		(uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
+
+	if (!is_64bit_addr)
+		__qseecom_update_cmd_buf(&resp, false, data);
+	else
+		__qseecom_update_cmd_buf_64(&resp, false, data);
+	qseecom.send_resp_flag = 1;
+	this_lstnr->send_resp_flag = 1;
+	wake_up_interruptible(&qseecom.send_resp_wq);
+	return 0;
+}
+
+static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
+						void __user *argp)
+{
+	return __qseecom_send_modfd_resp(data, argp, false);
+}
+
+static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
+						void __user *argp)
+{
+	return __qseecom_send_modfd_resp(data, argp, true);
+}
+
+static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
+						void __user *argp)
+{
+	struct qseecom_qseos_version_req req;
+
+	if (copy_from_user(&req, argp, sizeof(req))) {
+		pr_err("copy_from_user failed");
+		return -EINVAL;
+	}
+	req.qseos_version = qseecom.qseos_version;
+	if (copy_to_user(argp, &req, sizeof(req))) {
+		pr_err("copy_to_user failed");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
+{
+	int rc = 0;
+	struct qseecom_clk *qclk = NULL;
+
+	if (qseecom.no_clock_support)
+		return 0;
+
+	if (ce == CLK_QSEE)
+		qclk = &qseecom.qsee;
+	if (ce == CLK_CE_DRV)
+		qclk = &qseecom.ce_drv;
+
+	if (qclk == NULL) {
+		pr_err("CLK type not supported\n");
+		return -EINVAL;
+	}
+	mutex_lock(&clk_access_lock);
+
+	if (qclk->clk_access_cnt == ULONG_MAX) {
+		pr_err("clk_access_cnt beyond limitation\n");
+		goto err;
+	}
+	if (qclk->clk_access_cnt > 0) {
+		qclk->clk_access_cnt++;
+		mutex_unlock(&clk_access_lock);
+		return rc;
+	}
+
+	/* Enable CE core clk */
+	if (qclk->ce_core_clk != NULL) {
+		rc = clk_prepare_enable(qclk->ce_core_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE core clk\n");
+			goto err;
+		}
+	}
+	/* Enable CE clk */
+	if (qclk->ce_clk != NULL) {
+		rc = clk_prepare_enable(qclk->ce_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE iface clk\n");
+			goto ce_clk_err;
+		}
+	}
+	/* Enable AXI clk */
+	if (qclk->ce_bus_clk != NULL) {
+		rc = clk_prepare_enable(qclk->ce_bus_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE bus clk\n");
+			goto ce_bus_clk_err;
+		}
+	}
+	qclk->clk_access_cnt++;
+	mutex_unlock(&clk_access_lock);
+	return 0;
+
+ce_bus_clk_err:
+	if (qclk->ce_clk != NULL)
+		clk_disable_unprepare(qclk->ce_clk);
+ce_clk_err:
+	if (qclk->ce_core_clk != NULL)
+		clk_disable_unprepare(qclk->ce_core_clk);
+err:
+	mutex_unlock(&clk_access_lock);
+	return -EIO;
+}
+
+static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
+{
+	struct qseecom_clk *qclk;
+
+	if (qseecom.no_clock_support)
+		return;
+
+	if (ce == CLK_QSEE)
+		qclk = &qseecom.qsee;
+	else
+		qclk = &qseecom.ce_drv;
+
+	mutex_lock(&clk_access_lock);
+
+	if (qclk->clk_access_cnt == 0) {
+		mutex_unlock(&clk_access_lock);
+		return;
+	}
+
+	if (qclk->clk_access_cnt == 1) {
+		if (qclk->ce_clk != NULL)
+			clk_disable_unprepare(qclk->ce_clk);
+		if (qclk->ce_core_clk != NULL)
+			clk_disable_unprepare(qclk->ce_core_clk);
+		if (qclk->ce_bus_clk != NULL)
+			clk_disable_unprepare(qclk->ce_bus_clk);
+	}
+	qclk->clk_access_cnt--;
+	mutex_unlock(&clk_access_lock);
+}
+
+static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
+						int32_t clk_type)
+{
+	int ret = 0;
+	struct qseecom_clk *qclk;
+
+	if (qseecom.no_clock_support)
+		return 0;
+
+	qclk = &qseecom.qsee;
+	if (!qseecom.qsee_perf_client)
+		return ret;
+
+	switch (clk_type) {
+	case CLK_DFAB:
+		mutex_lock(&qsee_bw_mutex);
+		if (!qseecom.qsee_bw_count) {
+			if (qseecom.qsee_sfpb_bw_count > 0)
+				ret = msm_bus_scale_client_update_request(
+					qseecom.qsee_perf_client, 3);
+			else {
+				if (qclk->ce_core_src_clk != NULL)
+					ret = __qseecom_enable_clk(CLK_QSEE);
+				if (!ret) {
+					ret =
+					msm_bus_scale_client_update_request(
+						qseecom.qsee_perf_client, 1);
+					if ((ret) &&
+						(qclk->ce_core_src_clk != NULL))
+						__qseecom_disable_clk(CLK_QSEE);
+				}
+			}
+			if (ret)
+				pr_err("DFAB Bandwidth req failed (%d)\n",
+								ret);
+			else {
+				qseecom.qsee_bw_count++;
+				data->perf_enabled = true;
+			}
+		} else {
+			qseecom.qsee_bw_count++;
+			data->perf_enabled = true;
+		}
+		mutex_unlock(&qsee_bw_mutex);
+		break;
+	case CLK_SFPB:
+		mutex_lock(&qsee_bw_mutex);
+		if (!qseecom.qsee_sfpb_bw_count) {
+			if (qseecom.qsee_bw_count > 0)
+				ret = msm_bus_scale_client_update_request(
+					qseecom.qsee_perf_client, 3);
+			else {
+				if (qclk->ce_core_src_clk != NULL)
+					ret = __qseecom_enable_clk(CLK_QSEE);
+				if (!ret) {
+					ret =
+					msm_bus_scale_client_update_request(
+						qseecom.qsee_perf_client, 2);
+					if ((ret) &&
+						(qclk->ce_core_src_clk != NULL))
+						__qseecom_disable_clk(CLK_QSEE);
+				}
+			}
+
+			if (ret)
+				pr_err("SFPB Bandwidth req failed (%d)\n",
+								ret);
+			else {
+				qseecom.qsee_sfpb_bw_count++;
+				data->fast_load_enabled = true;
+			}
+		} else {
+			qseecom.qsee_sfpb_bw_count++;
+			data->fast_load_enabled = true;
+		}
+		mutex_unlock(&qsee_bw_mutex);
+		break;
+	default:
+		pr_err("Clock type not defined\n");
+		break;
+	}
+	return ret;
+}
+
+static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
+						int32_t clk_type)
+{
+	int32_t ret = 0;
+	struct qseecom_clk *qclk;
+
+	qclk = &qseecom.qsee;
+
+	if (qseecom.no_clock_support)
+		return;
+	if (!qseecom.qsee_perf_client)
+		return;
+
+	switch (clk_type) {
+	case CLK_DFAB:
+		mutex_lock(&qsee_bw_mutex);
+		if (qseecom.qsee_bw_count == 0) {
+			pr_err("Client error.Extra call to disable DFAB clk\n");
+			mutex_unlock(&qsee_bw_mutex);
+			return;
+		}
+
+		if (qseecom.qsee_bw_count == 1) {
+			if (qseecom.qsee_sfpb_bw_count > 0)
+				ret = msm_bus_scale_client_update_request(
+					qseecom.qsee_perf_client, 2);
+			else {
+				ret = msm_bus_scale_client_update_request(
+						qseecom.qsee_perf_client, 0);
+				if ((!ret) && (qclk->ce_core_src_clk != NULL))
+					__qseecom_disable_clk(CLK_QSEE);
+			}
+			if (ret)
+				pr_err("SFPB Bandwidth req fail (%d)\n",
+								ret);
+			else {
+				qseecom.qsee_bw_count--;
+				data->perf_enabled = false;
+			}
+		} else {
+			qseecom.qsee_bw_count--;
+			data->perf_enabled = false;
+		}
+		mutex_unlock(&qsee_bw_mutex);
+		break;
+	case CLK_SFPB:
+		mutex_lock(&qsee_bw_mutex);
+		if (qseecom.qsee_sfpb_bw_count == 0) {
+			pr_err("Client error.Extra call to disable SFPB clk\n");
+			mutex_unlock(&qsee_bw_mutex);
+			return;
+		}
+		if (qseecom.qsee_sfpb_bw_count == 1) {
+			if (qseecom.qsee_bw_count > 0)
+				ret = msm_bus_scale_client_update_request(
+						qseecom.qsee_perf_client, 1);
+			else {
+				ret = msm_bus_scale_client_update_request(
+						qseecom.qsee_perf_client, 0);
+				if ((!ret) && (qclk->ce_core_src_clk != NULL))
+					__qseecom_disable_clk(CLK_QSEE);
+			}
+			if (ret)
+				pr_err("SFPB Bandwidth req fail (%d)\n",
+								ret);
+			else {
+				qseecom.qsee_sfpb_bw_count--;
+				data->fast_load_enabled = false;
+			}
+		} else {
+			qseecom.qsee_sfpb_bw_count--;
+			data->fast_load_enabled = false;
+		}
+		mutex_unlock(&qsee_bw_mutex);
+		break;
+	default:
+		pr_err("Clock type not defined\n");
+		break;
+	}
+
+}
+
+static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct ion_handle *ihandle;	/* Ion handle */
+	struct qseecom_load_img_req load_img_req;
+	int uret = 0;
+	int ret;
+	ion_phys_addr_t pa = 0;
+	size_t len;
+	struct qseecom_load_app_ireq load_req;
+	struct qseecom_load_app_64bit_ireq load_req_64bit;
+	struct qseecom_command_scm_resp resp;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	/* Copy the relevant information needed for loading the image */
+	if (copy_from_user(&load_img_req,
+				(void __user *)argp,
+				sizeof(struct qseecom_load_img_req))) {
+		pr_err("copy_from_user failed\n");
+		return -EFAULT;
+	}
+
+	/* Get the handle of the shared fd */
+	ihandle = ion_import_dma_buf(qseecom.ion_clnt,
+				load_img_req.ifd_data_fd);
+	if (IS_ERR_OR_NULL(ihandle)) {
+		pr_err("Ion client could not retrieve the handle\n");
+		return -ENOMEM;
+	}
+
+	/* Get the physical address of the ION BUF */
+	ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
+	if (ret) {
+		pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
+			ret);
+		return ret;
+	}
+	if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
+		pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
+				len, load_img_req.mdt_len,
+				load_img_req.img_len);
+		return ret;
+	}
+	/* Populate the structure for sending scm call to load image */
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
+		load_req.mdt_len = load_img_req.mdt_len;
+		load_req.img_len = load_img_req.img_len;
+		load_req.phy_addr = (uint32_t)pa;
+		cmd_buf = (void *)&load_req;
+		cmd_len = sizeof(struct qseecom_load_app_ireq);
+	} else {
+		load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
+		load_req_64bit.mdt_len = load_img_req.mdt_len;
+		load_req_64bit.img_len = load_img_req.img_len;
+		load_req_64bit.phy_addr = (uint64_t)pa;
+		cmd_buf = (void *)&load_req_64bit;
+		cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
+	}
+
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
+		mutex_unlock(&qsee_bw_mutex);
+		if (ret) {
+			ret = -EIO;
+			goto exit_cpu_restore;
+		}
+	}
+
+	/* Vote for the SFPB clock */
+	ret = __qseecom_enable_clk_scale_up(data);
+	if (ret) {
+		ret = -EIO;
+		goto exit_register_bus_bandwidth_needs;
+	}
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
+				ION_IOC_CLEAN_INV_CACHES);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		goto exit_disable_clock;
+	}
+	/*  SCM_CALL to load the external elf */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
+			&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to load failed : ret %d\n",
+				ret);
+		ret = -EFAULT;
+		goto exit_disable_clock;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		pr_err("%s: qseos result incomplete\n", __func__);
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret)
+			pr_err("process_incomplete_cmd failed: err: %d\n", ret);
+		break;
+	case QSEOS_RESULT_FAILURE:
+		pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
+		ret = -EFAULT;
+		break;
+	default:
+		pr_err("scm_call response result %d not supported\n",
+							resp.result);
+		ret = -EFAULT;
+		break;
+	}
+
+exit_disable_clock:
+	__qseecom_disable_clk_scale_down(data);
+
+exit_register_bus_bandwidth_needs:
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		uret = qseecom_unregister_bus_bandwidth_needs(data);
+		mutex_unlock(&qsee_bw_mutex);
+		if (uret)
+			pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
+								uret, ret);
+	}
+
+exit_cpu_restore:
+	/* Deallocate the handle */
+	if (!IS_ERR_OR_NULL(ihandle))
+		ion_free(qseecom.ion_clnt, ihandle);
+	return ret;
+}
+
+static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_unload_app_ireq req;
+
+	/* unavailable client app */
+	data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
+
+	/* Populate the structure for sending scm call to unload image */
+	req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
+
+	/* SCM_CALL to unload the external elf */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
+			sizeof(struct qseecom_unload_app_ireq),
+			&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to unload failed : ret %d\n",
+				ret);
+		ret = -EFAULT;
+		goto qseecom_unload_external_elf_scm_err;
+	}
+	if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret)
+			pr_err("process_incomplete_cmd fail err: %d\n",
+					ret);
+	} else {
+		if (resp.result != QSEOS_RESULT_SUCCESS) {
+			pr_err("scm_call to unload image failed resp.result =%d\n",
+						resp.result);
+			ret = -EFAULT;
+		}
+	}
+
+qseecom_unload_external_elf_scm_err:
+
+	return ret;
+}
+
+static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
+					void __user *argp)
+{
+
+	int32_t ret;
+	struct qseecom_qseos_app_load_query query_req;
+	struct qseecom_check_app_ireq req;
+	struct qseecom_registered_app_list *entry = NULL;
+	unsigned long flags = 0;
+	uint32_t app_arch = 0, app_id = 0;
+	bool found_app = false;
+
+	/* Copy the relevant information needed for loading the image */
+	if (copy_from_user(&query_req,
+				(void __user *)argp,
+				sizeof(struct qseecom_qseos_app_load_query))) {
+		pr_err("copy_from_user failed\n");
+		return -EFAULT;
+	}
+
+	req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
+	query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
+	strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
+
+	ret = __qseecom_check_app_exists(req, &app_id);
+	if (ret) {
+		pr_err(" scm call to check if app is loaded failed");
+		return ret;	/* scm call failed */
+	}
+	if (app_id) {
+		pr_debug("App id %d (%s) already exists\n", app_id,
+			(char *)(req.app_name));
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_for_each_entry(entry,
+				&qseecom.registered_app_list_head, list){
+			if (entry->app_id == app_id) {
+				app_arch = entry->app_arch;
+				entry->ref_cnt++;
+				found_app = true;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(
+				&qseecom.registered_app_list_lock, flags);
+		data->client.app_id = app_id;
+		query_req.app_id = app_id;
+		if (app_arch) {
+			data->client.app_arch = app_arch;
+			query_req.app_arch = app_arch;
+		} else {
+			data->client.app_arch = 0;
+			query_req.app_arch = 0;
+		}
+		strlcpy(data->client.app_name, query_req.app_name,
+				MAX_APP_NAME_SIZE);
+		/*
+		 * If app was loaded by appsbl before and was not registered,
+		 * regiser this app now.
+		 */
+		if (!found_app) {
+			pr_debug("Register app %d [%s] which was loaded before\n",
+					ret, (char *)query_req.app_name);
+			entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+			if (!entry) {
+				pr_err("kmalloc for app entry failed\n");
+				return  -ENOMEM;
+			}
+			entry->app_id = app_id;
+			entry->ref_cnt = 1;
+			entry->app_arch = data->client.app_arch;
+			strlcpy(entry->app_name, data->client.app_name,
+				MAX_APP_NAME_SIZE);
+			entry->app_blocked = false;
+			entry->blocked_on_listener_id = 0;
+			spin_lock_irqsave(&qseecom.registered_app_list_lock,
+				flags);
+			list_add_tail(&entry->list,
+				&qseecom.registered_app_list_head);
+			spin_unlock_irqrestore(
+				&qseecom.registered_app_list_lock, flags);
+		}
+		if (copy_to_user(argp, &query_req, sizeof(query_req))) {
+			pr_err("copy_to_user failed\n");
+			return -EFAULT;
+		}
+		return -EEXIST;	/* app already loaded */
+	} else {
+		return 0;	/* app not loaded */
+	}
+}
+
+static int __qseecom_get_ce_pipe_info(
+			enum qseecom_key_management_usage_type usage,
+			uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
+{
+	int ret = -EINVAL;
+	int i, j;
+	struct qseecom_ce_info_use *p = NULL;
+	int total = 0;
+	struct qseecom_ce_pipe_entry *pcepipe;
+
+	switch (usage) {
+	case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
+		if (qseecom.support_fde) {
+			p = qseecom.ce_info.fde;
+			total = qseecom.ce_info.num_fde;
+		} else {
+			pr_err("system does not support fde\n");
+			return -EINVAL;
+		}
+		break;
+	case QSEOS_KM_USAGE_FILE_ENCRYPTION:
+		if (qseecom.support_pfe) {
+			p = qseecom.ce_info.pfe;
+			total = qseecom.ce_info.num_pfe;
+		} else {
+			pr_err("system does not support pfe\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		pr_err("unsupported usage %d\n", usage);
+		return -EINVAL;
+	}
+
+	for (j = 0; j < total; j++) {
+		if (p->unit_num == unit) {
+			pcepipe =  p->ce_pipe_entry;
+			for (i = 0; i < p->num_ce_pipe_entries; i++) {
+				(*ce_hw)[i] = pcepipe->ce_num;
+				*pipe = pcepipe->ce_pipe_pair;
+				pcepipe++;
+			}
+			ret = 0;
+			break;
+		}
+		p++;
+	}
+	return ret;
+}
+
+static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
+			enum qseecom_key_management_usage_type usage,
+			struct qseecom_key_generate_ireq *ireq)
+{
+	struct qseecom_command_scm_resp resp;
+	int ret;
+
+	if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("Error:: unsupported usage %d\n", usage);
+		return -EFAULT;
+	}
+	ret = __qseecom_enable_clk(CLK_QSEE);
+	if (ret)
+		return ret;
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				ireq, sizeof(struct qseecom_key_generate_ireq),
+				&resp, sizeof(resp));
+	if (ret) {
+		if (ret == -EINVAL &&
+			resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
+			pr_debug("Key ID exists.\n");
+			ret = 0;
+		} else {
+			pr_err("scm call to generate key failed : %d\n", ret);
+			ret = -EFAULT;
+		}
+		goto generate_key_exit;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
+		pr_debug("Key ID exists.\n");
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret) {
+			if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
+				pr_debug("Key ID exists.\n");
+				ret = 0;
+			} else {
+				pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
+					resp.result);
+			}
+		}
+		break;
+	case QSEOS_RESULT_FAILURE:
+	default:
+		pr_err("gen key scm call failed resp.result %d\n", resp.result);
+		ret = -EINVAL;
+		break;
+	}
+generate_key_exit:
+	__qseecom_disable_clk(CLK_QSEE);
+	return ret;
+}
+
+static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
+			enum qseecom_key_management_usage_type usage,
+			struct qseecom_key_delete_ireq *ireq)
+{
+	struct qseecom_command_scm_resp resp;
+	int ret;
+
+	if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("Error:: unsupported usage %d\n", usage);
+		return -EFAULT;
+	}
+	ret = __qseecom_enable_clk(CLK_QSEE);
+	if (ret)
+		return ret;
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				ireq, sizeof(struct qseecom_key_delete_ireq),
+				&resp, sizeof(struct qseecom_command_scm_resp));
+	if (ret) {
+		if (ret == -EINVAL &&
+			resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
+			pr_debug("Max attempts to input password reached.\n");
+			ret = -ERANGE;
+		} else {
+			pr_err("scm call to delete key failed : %d\n", ret);
+			ret = -EFAULT;
+		}
+		goto del_key_exit;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret) {
+			pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
+					resp.result);
+			if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
+				pr_debug("Max attempts to input password reached.\n");
+				ret = -ERANGE;
+			}
+		}
+		break;
+	case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
+		pr_debug("Max attempts to input password reached.\n");
+		ret = -ERANGE;
+		break;
+	case QSEOS_RESULT_FAILURE:
+	default:
+		pr_err("Delete key scm call failed resp.result %d\n",
+							resp.result);
+		ret = -EINVAL;
+		break;
+	}
+del_key_exit:
+	__qseecom_disable_clk(CLK_QSEE);
+	return ret;
+}
+
+static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
+			enum qseecom_key_management_usage_type usage,
+			struct qseecom_key_select_ireq *ireq)
+{
+	struct qseecom_command_scm_resp resp;
+	int ret;
+
+	if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("Error:: unsupported usage %d\n", usage);
+		return -EFAULT;
+	}
+	ret = __qseecom_enable_clk(CLK_QSEE);
+	if (ret)
+		return ret;
+
+	if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
+		ret = __qseecom_enable_clk(CLK_CE_DRV);
+		if (ret)
+			return ret;
+	}
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				ireq, sizeof(struct qseecom_key_select_ireq),
+				&resp, sizeof(struct qseecom_command_scm_resp));
+	if (ret) {
+		if (ret == -EINVAL &&
+			resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
+			pr_debug("Max attempts to input password reached.\n");
+			ret = -ERANGE;
+		} else if (ret == -EINVAL &&
+			resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
+			pr_debug("Set Key operation under processing...\n");
+			ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+		} else {
+			pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
+				ret);
+			ret = -EFAULT;
+		}
+		goto set_key_exit;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret) {
+			pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
+					resp.result);
+			if (resp.result ==
+				QSEOS_RESULT_FAIL_PENDING_OPERATION) {
+				pr_debug("Set Key operation under processing...\n");
+				ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+			}
+			if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
+				pr_debug("Max attempts to input password reached.\n");
+				ret = -ERANGE;
+			}
+		}
+		break;
+	case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
+		pr_debug("Max attempts to input password reached.\n");
+		ret = -ERANGE;
+		break;
+	case QSEOS_RESULT_FAIL_PENDING_OPERATION:
+		pr_debug("Set Key operation under processing...\n");
+		ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+		break;
+	case QSEOS_RESULT_FAILURE:
+	default:
+		pr_err("Set key scm call failed resp.result %d\n", resp.result);
+		ret = -EINVAL;
+		break;
+	}
+set_key_exit:
+	__qseecom_disable_clk(CLK_QSEE);
+	if (qseecom.qsee.instance != qseecom.ce_drv.instance)
+		__qseecom_disable_clk(CLK_CE_DRV);
+	return ret;
+}
+
+static int __qseecom_update_current_key_user_info(
+			struct qseecom_dev_handle *data,
+			enum qseecom_key_management_usage_type usage,
+			struct qseecom_key_userinfo_update_ireq *ireq)
+{
+	struct qseecom_command_scm_resp resp;
+	int ret;
+
+	if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		usage >= QSEOS_KM_USAGE_MAX) {
+			pr_err("Error:: unsupported usage %d\n", usage);
+			return -EFAULT;
+	}
+	ret = __qseecom_enable_clk(CLK_QSEE);
+	if (ret)
+		return ret;
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+		ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
+		&resp, sizeof(struct qseecom_command_scm_resp));
+	if (ret) {
+		if (ret == -EINVAL &&
+			resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
+			pr_debug("Set Key operation under processing...\n");
+			ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+		} else {
+			pr_err("scm call to update key userinfo failed: %d\n",
+									ret);
+			__qseecom_disable_clk(CLK_QSEE);
+			return -EFAULT;
+		}
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (resp.result ==
+			QSEOS_RESULT_FAIL_PENDING_OPERATION) {
+			pr_debug("Set Key operation under processing...\n");
+			ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+		}
+		if (ret)
+			pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
+					resp.result);
+		break;
+	case QSEOS_RESULT_FAIL_PENDING_OPERATION:
+		pr_debug("Update Key operation under processing...\n");
+		ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+		break;
+	case QSEOS_RESULT_FAILURE:
+	default:
+		pr_err("Set key scm call failed resp.result %d\n", resp.result);
+		ret = -EINVAL;
+		break;
+	}
+
+	__qseecom_disable_clk(CLK_QSEE);
+	return ret;
+}
+
+
+static int qseecom_enable_ice_setup(int usage)
+{
+	int ret = 0;
+
+	if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
+		ret = qcom_ice_setup_ice_hw("ufs", true);
+	else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
+		ret = qcom_ice_setup_ice_hw("sdcc", true);
+
+	return ret;
+}
+
+static int qseecom_disable_ice_setup(int usage)
+{
+	int ret = 0;
+
+	if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
+		ret = qcom_ice_setup_ice_hw("ufs", false);
+	else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
+		ret = qcom_ice_setup_ice_hw("sdcc", false);
+
+	return ret;
+}
+
+static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
+{
+	struct qseecom_ce_info_use *pce_info_use, *p;
+	int total = 0;
+	int i;
+
+	switch (usage) {
+	case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
+		p = qseecom.ce_info.fde;
+		total = qseecom.ce_info.num_fde;
+		break;
+	case QSEOS_KM_USAGE_FILE_ENCRYPTION:
+		p = qseecom.ce_info.pfe;
+		total = qseecom.ce_info.num_pfe;
+		break;
+	default:
+		pr_err("unsupported usage %d\n", usage);
+		return -EINVAL;
+	}
+
+	pce_info_use = NULL;
+
+	for (i = 0; i < total; i++) {
+		if (p->unit_num == unit) {
+			pce_info_use = p;
+			break;
+		}
+		p++;
+	}
+	if (!pce_info_use) {
+		pr_err("can not find %d\n", unit);
+		return -EINVAL;
+	}
+	return pce_info_use->num_ce_pipe_entries;
+}
+
+static int qseecom_create_key(struct qseecom_dev_handle *data,
+			void __user *argp)
+{
+	int i;
+	uint32_t *ce_hw = NULL;
+	uint32_t pipe = 0;
+	int ret = 0;
+	uint32_t flags = 0;
+	struct qseecom_create_key_req create_key_req;
+	struct qseecom_key_generate_ireq generate_key_ireq;
+	struct qseecom_key_select_ireq set_key_ireq;
+	uint32_t entries = 0;
+
+	ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("unsupported usage %d\n", create_key_req.usage);
+		ret = -EFAULT;
+		return ret;
+	}
+	entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
+					create_key_req.usage);
+	if (entries <= 0) {
+		pr_err("no ce instance for usage %d instance %d\n",
+			DEFAULT_CE_INFO_UNIT, create_key_req.usage);
+		ret = -EINVAL;
+		return ret;
+	}
+
+	ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
+	if (!ce_hw) {
+		ret = -ENOMEM;
+		return ret;
+	}
+	ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
+			DEFAULT_CE_INFO_UNIT);
+	if (ret) {
+		pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
+		ret = -EINVAL;
+		goto free_buf;
+	}
+
+	if (qseecom.fde_key_size)
+		flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
+	else
+		flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
+
+	generate_key_ireq.flags = flags;
+	generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
+	memset((void *)generate_key_ireq.key_id,
+			0, QSEECOM_KEY_ID_SIZE);
+	memset((void *)generate_key_ireq.hash32,
+			0, QSEECOM_HASH_SIZE);
+	memcpy((void *)generate_key_ireq.key_id,
+			(void *)key_id_array[create_key_req.usage].desc,
+			QSEECOM_KEY_ID_SIZE);
+	memcpy((void *)generate_key_ireq.hash32,
+			(void *)create_key_req.hash32,
+			QSEECOM_HASH_SIZE);
+
+	ret = __qseecom_generate_and_save_key(data,
+			create_key_req.usage, &generate_key_ireq);
+	if (ret) {
+		pr_err("Failed to generate key on storage: %d\n", ret);
+		goto free_buf;
+	}
+
+	for (i = 0; i < entries; i++) {
+		set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
+		if (create_key_req.usage ==
+				QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
+			set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
+			set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
+
+		} else if (create_key_req.usage ==
+				QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
+			set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
+			set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
+
+		} else {
+			set_key_ireq.ce = ce_hw[i];
+			set_key_ireq.pipe = pipe;
+		}
+		set_key_ireq.flags = flags;
+
+		/* set both PIPE_ENC and PIPE_ENC_XTS*/
+		set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
+		memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
+		memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
+		memcpy((void *)set_key_ireq.key_id,
+			(void *)key_id_array[create_key_req.usage].desc,
+			QSEECOM_KEY_ID_SIZE);
+		memcpy((void *)set_key_ireq.hash32,
+				(void *)create_key_req.hash32,
+				QSEECOM_HASH_SIZE);
+
+		/* It will return false if it is GPCE based crypto instance or
+		   ICE is setup properly */
+		if (qseecom_enable_ice_setup(create_key_req.usage))
+			goto free_buf;
+
+		do {
+			ret = __qseecom_set_clear_ce_key(data,
+					create_key_req.usage,
+					&set_key_ireq);
+			/*
+			 * wait a little before calling scm again to let other
+			 * processes run
+			 */
+			if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
+				msleep(50);
+
+		} while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
+
+		qseecom_disable_ice_setup(create_key_req.usage);
+
+		if (ret) {
+			pr_err("Failed to create key: pipe %d, ce %d: %d\n",
+				pipe, ce_hw[i], ret);
+			goto free_buf;
+		} else {
+			pr_err("Set the key successfully\n");
+			if ((create_key_req.usage ==
+				QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
+			     (create_key_req.usage ==
+				QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
+				goto free_buf;
+		}
+	}
+
+free_buf:
+	kzfree(ce_hw);
+	return ret;
+}
+
+static int qseecom_wipe_key(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	uint32_t *ce_hw = NULL;
+	uint32_t pipe = 0;
+	int ret = 0;
+	uint32_t flags = 0;
+	int i, j;
+	struct qseecom_wipe_key_req wipe_key_req;
+	struct qseecom_key_delete_ireq delete_key_ireq;
+	struct qseecom_key_select_ireq clear_key_ireq;
+	uint32_t entries = 0;
+
+	ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("unsupported usage %d\n", wipe_key_req.usage);
+		ret = -EFAULT;
+		return ret;
+	}
+
+	entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
+					wipe_key_req.usage);
+	if (entries <= 0) {
+		pr_err("no ce instance for usage %d instance %d\n",
+			DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
+		ret = -EINVAL;
+		return ret;
+	}
+
+	ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
+	if (!ce_hw) {
+		ret = -ENOMEM;
+		return ret;
+	}
+
+	ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
+				DEFAULT_CE_INFO_UNIT);
+	if (ret) {
+		pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
+		ret = -EINVAL;
+		goto free_buf;
+	}
+
+	if (wipe_key_req.wipe_key_flag) {
+		delete_key_ireq.flags = flags;
+		delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
+		memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
+		memcpy((void *)delete_key_ireq.key_id,
+			(void *)key_id_array[wipe_key_req.usage].desc,
+			QSEECOM_KEY_ID_SIZE);
+		memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
+
+		ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
+					&delete_key_ireq);
+		if (ret) {
+			pr_err("Failed to delete key from ssd storage: %d\n",
+				ret);
+			ret = -EFAULT;
+			goto free_buf;
+		}
+	}
+
+	for (j = 0; j < entries; j++) {
+		clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
+		if (wipe_key_req.usage ==
+				QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
+			clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
+			clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
+		} else if (wipe_key_req.usage ==
+			QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
+			clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
+			clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
+		} else {
+			clear_key_ireq.ce = ce_hw[j];
+			clear_key_ireq.pipe = pipe;
+		}
+		clear_key_ireq.flags = flags;
+		clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
+		for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
+			clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
+		memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
+
+		/* It will return false if it is GPCE based crypto instance or
+		   ICE is setup properly */
+		if (qseecom_enable_ice_setup(wipe_key_req.usage))
+			goto free_buf;
+
+		ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
+					&clear_key_ireq);
+
+		qseecom_disable_ice_setup(wipe_key_req.usage);
+
+		if (ret) {
+			pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
+				pipe, ce_hw[j], ret);
+			ret = -EFAULT;
+			goto free_buf;
+		}
+	}
+
+free_buf:
+	kzfree(ce_hw);
+	return ret;
+}
+
+static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
+			void __user *argp)
+{
+	int ret = 0;
+	uint32_t flags = 0;
+	struct qseecom_update_key_userinfo_req update_key_req;
+	struct qseecom_key_userinfo_update_ireq ireq;
+
+	ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
+		return -EFAULT;
+	}
+
+	ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
+
+	if (qseecom.fde_key_size)
+		flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
+	else
+		flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
+
+	ireq.flags = flags;
+	memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
+	memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
+	memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
+	memcpy((void *)ireq.key_id,
+		(void *)key_id_array[update_key_req.usage].desc,
+		QSEECOM_KEY_ID_SIZE);
+	memcpy((void *)ireq.current_hash32,
+		(void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
+	memcpy((void *)ireq.new_hash32,
+		(void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
+
+	do {
+		ret = __qseecom_update_current_key_user_info(data,
+						update_key_req.usage,
+						&ireq);
+		/*
+		 * wait a little before calling scm again to let other
+		 * processes run
+		 */
+		if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
+			msleep(50);
+
+	} while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
+	if (ret) {
+		pr_err("Failed to update key info: %d\n", ret);
+		return ret;
+	}
+	return ret;
+
+}
+static int qseecom_is_es_activated(void __user *argp)
+{
+	struct qseecom_is_es_activated_req req;
+	struct qseecom_command_scm_resp resp;
+	int ret;
+
+	if (qseecom.qsee_version < QSEE_VERSION_04) {
+		pr_err("invalid qsee version\n");
+		return -ENODEV;
+	}
+
+	if (argp == NULL) {
+		pr_err("arg is null\n");
+		return -EINVAL;
+	}
+
+	ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
+		&req, sizeof(req), &resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call failed\n");
+		return ret;
+	}
+
+	req.is_activated = resp.result;
+	ret = copy_to_user(argp, &req, sizeof(req));
+	if (ret) {
+		pr_err("copy_to_user failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int qseecom_save_partition_hash(void __user *argp)
+{
+	struct qseecom_save_partition_hash_req req;
+	struct qseecom_command_scm_resp resp;
+	int ret;
+
+	memset(&resp, 0x00, sizeof(resp));
+
+	if (qseecom.qsee_version < QSEE_VERSION_04) {
+		pr_err("invalid qsee version\n");
+		return -ENODEV;
+	}
+
+	if (argp == NULL) {
+		pr_err("arg is null\n");
+		return -EINVAL;
+	}
+
+	ret = copy_from_user(&req, argp, sizeof(req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
+		       (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
+	if (ret) {
+		pr_err("qseecom_scm_call failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int qseecom_mdtp_cipher_dip(void __user *argp)
+{
+	struct qseecom_mdtp_cipher_dip_req req;
+	u32 tzbuflenin, tzbuflenout;
+	char *tzbufin = NULL, *tzbufout = NULL;
+	struct scm_desc desc = {0};
+	int ret;
+
+	do {
+		/* Copy the parameters from userspace */
+		if (argp == NULL) {
+			pr_err("arg is null\n");
+			ret = -EINVAL;
+			break;
+		}
+
+		ret = copy_from_user(&req, argp, sizeof(req));
+		if (ret) {
+			pr_err("copy_from_user failed, ret= %d\n", ret);
+			break;
+		}
+
+		if (req.in_buf == NULL || req.out_buf == NULL ||
+			req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
+			req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
+				req.direction > 1) {
+				pr_err("invalid parameters\n");
+				ret = -EINVAL;
+				break;
+		}
+
+		/* Copy the input buffer from userspace to kernel space */
+		tzbuflenin = PAGE_ALIGN(req.in_buf_size);
+		tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
+		if (!tzbufin) {
+			pr_err("error allocating in buffer\n");
+			ret = -ENOMEM;
+			break;
+		}
+
+		ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
+		if (ret) {
+			pr_err("copy_from_user failed, ret=%d\n", ret);
+			break;
+		}
+
+		dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
+
+		/* Prepare the output buffer in kernel space */
+		tzbuflenout = PAGE_ALIGN(req.out_buf_size);
+		tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
+		if (!tzbufout) {
+			pr_err("error allocating out buffer\n");
+			ret = -ENOMEM;
+			break;
+		}
+
+		dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
+
+		/* Send the command to TZ */
+		desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
+		desc.args[0] = virt_to_phys(tzbufin);
+		desc.args[1] = req.in_buf_size;
+		desc.args[2] = virt_to_phys(tzbufout);
+		desc.args[3] = req.out_buf_size;
+		desc.args[4] = req.direction;
+
+		ret = __qseecom_enable_clk(CLK_QSEE);
+		if (ret)
+			break;
+
+		ret = scm_call2(TZ_MDTP_CIPHER_DIP_ID, &desc);
+
+		__qseecom_disable_clk(CLK_QSEE);
+
+		if (ret) {
+			pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
+				ret);
+			break;
+		}
+
+		/* Copy the output buffer from kernel space to userspace */
+		dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
+		ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
+		if (ret) {
+			pr_err("copy_to_user failed, ret=%d\n", ret);
+			break;
+		}
+	} while (0);
+
+	kzfree(tzbufin);
+	kzfree(tzbufout);
+
+	return ret;
+}
+
+static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
+				struct qseecom_qteec_req *req)
+{
+	if (!data || !data->client.ihandle) {
+		pr_err("Client or client handle is not initialized\n");
+		return -EINVAL;
+	}
+
+	if (data->type != QSEECOM_CLIENT_APP)
+		return -EFAULT;
+
+	if (req->req_len > UINT_MAX - req->resp_len) {
+		pr_err("Integer overflow detected in req_len & rsp_len\n");
+		return -EINVAL;
+	}
+
+	if (req->req_len + req->resp_len > data->client.sb_length) {
+		pr_debug("Not enough memory to fit cmd_buf.\n");
+		pr_debug("resp_buf. Required: %u, Available: %zu\n",
+		(req->req_len + req->resp_len), data->client.sb_length);
+		return -ENOMEM;
+	}
+
+	if (req->req_ptr == NULL || req->resp_ptr == NULL) {
+		pr_err("cmd buffer or response buffer is null\n");
+		return -EINVAL;
+	}
+	if (((uintptr_t)req->req_ptr <
+			data->client.user_virt_sb_base) ||
+		((uintptr_t)req->req_ptr >=
+		(data->client.user_virt_sb_base + data->client.sb_length))) {
+		pr_err("cmd buffer address not within shared bufffer\n");
+		return -EINVAL;
+	}
+
+	if (((uintptr_t)req->resp_ptr <
+			data->client.user_virt_sb_base)  ||
+		((uintptr_t)req->resp_ptr >=
+		(data->client.user_virt_sb_base + data->client.sb_length))) {
+		pr_err("response buffer address not within shared bufffer\n");
+		return -EINVAL;
+	}
+
+	if ((req->req_len == 0) || (req->resp_len == 0)) {
+		pr_err("cmd buf lengtgh/response buf length not valid\n");
+		return -EINVAL;
+	}
+
+	if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
+		pr_err("Integer overflow in req_len & req_ptr\n");
+		return -EINVAL;
+	}
+
+	if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
+		pr_err("Integer overflow in resp_len & resp_ptr\n");
+		return -EINVAL;
+	}
+
+	if (data->client.user_virt_sb_base >
+					(ULONG_MAX - data->client.sb_length)) {
+		pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
+		return -EINVAL;
+	}
+	if ((((uintptr_t)req->req_ptr + req->req_len) >
+		((uintptr_t)data->client.user_virt_sb_base +
+						data->client.sb_length)) ||
+		(((uintptr_t)req->resp_ptr + req->resp_len) >
+		((uintptr_t)data->client.user_virt_sb_base +
+						data->client.sb_length))) {
+		pr_err("cmd buf or resp buf is out of shared buffer region\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
+				uint32_t fd_idx, struct sg_table *sg_ptr)
+{
+	struct scatterlist *sg = sg_ptr->sgl;
+	struct qseecom_sg_entry *sg_entry;
+	void *buf;
+	uint i;
+	size_t size;
+	dma_addr_t coh_pmem;
+
+	if (fd_idx >= MAX_ION_FD) {
+		pr_err("fd_idx [%d] is invalid\n", fd_idx);
+		return -ENOMEM;
+	}
+	/*
+	* Allocate a buffer, populate it with number of entry plus
+	* each sg entry's phy addr and lenth; then return the
+	* phy_addr of the buffer.
+	*/
+	size = sizeof(uint32_t) +
+		sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
+	size = (size + PAGE_SIZE) & PAGE_MASK;
+	buf = dma_alloc_coherent(qseecom.pdev,
+			size, &coh_pmem, GFP_KERNEL);
+	if (buf == NULL) {
+		pr_err("failed to alloc memory for sg buf\n");
+		return -ENOMEM;
+	}
+	*(uint32_t *)buf = sg_ptr->nents;
+	sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
+	for (i = 0; i < sg_ptr->nents; i++) {
+		sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
+		sg_entry->len = sg->length;
+		sg_entry++;
+		sg = sg_next(sg);
+	}
+	data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
+	data->client.sec_buf_fd[fd_idx].vbase = buf;
+	data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
+	data->client.sec_buf_fd[fd_idx].size = size;
+	return 0;
+}
+
+static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
+			struct qseecom_dev_handle *data, bool cleanup)
+{
+	struct ion_handle *ihandle;
+	int ret = 0;
+	int i = 0;
+	uint32_t *update;
+	struct sg_table *sg_ptr = NULL;
+	struct scatterlist *sg;
+	struct qseecom_param_memref *memref;
+
+	if (req == NULL) {
+		pr_err("Invalid address\n");
+		return -EINVAL;
+	}
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if (req->ifd_data[i].fd > 0) {
+			ihandle = ion_import_dma_buf(qseecom.ion_clnt,
+					req->ifd_data[i].fd);
+			if (IS_ERR_OR_NULL(ihandle)) {
+				pr_err("Ion client can't retrieve the handle\n");
+				return -ENOMEM;
+			}
+			if ((req->req_len < sizeof(uint32_t)) ||
+				(req->ifd_data[i].cmd_buf_offset >
+				req->req_len - sizeof(uint32_t))) {
+				pr_err("Invalid offset/req len 0x%x/0x%x\n",
+					req->req_len,
+					req->ifd_data[i].cmd_buf_offset);
+				return -EINVAL;
+			}
+			update = (uint32_t *)((char *) req->req_ptr +
+				req->ifd_data[i].cmd_buf_offset);
+			if (!update) {
+				pr_err("update pointer is NULL\n");
+				return -EINVAL;
+			}
+		} else {
+			continue;
+		}
+		/* Populate the cmd data structure with the phys_addr */
+		sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
+		if (IS_ERR_OR_NULL(sg_ptr)) {
+			pr_err("IOn client could not retrieve sg table\n");
+			goto err;
+		}
+		sg = sg_ptr->sgl;
+		if (sg == NULL) {
+			pr_err("sg is NULL\n");
+			goto err;
+		}
+		if ((sg_ptr->nents == 0) || (sg->length == 0)) {
+			pr_err("Num of scat entr (%d)or length(%d) invalid\n",
+					sg_ptr->nents, sg->length);
+			goto err;
+		}
+		/* clean up buf for pre-allocated fd */
+		if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
+			(*update)) {
+			if (data->client.sec_buf_fd[i].vbase)
+				dma_free_coherent(qseecom.pdev,
+					data->client.sec_buf_fd[i].size,
+					data->client.sec_buf_fd[i].vbase,
+					data->client.sec_buf_fd[i].pbase);
+			memset((void *)update, 0,
+				sizeof(struct qseecom_param_memref));
+			memset(&(data->client.sec_buf_fd[i]), 0,
+				sizeof(struct qseecom_sec_buf_fd_info));
+			goto clean;
+		}
+
+		if (*update == 0) {
+			/* update buf for pre-allocated fd from secure heap*/
+			ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
+				sg_ptr);
+			if (ret) {
+				pr_err("Failed to handle buf for fd[%d]\n", i);
+				goto err;
+			}
+			memref = (struct qseecom_param_memref *)update;
+			memref->buffer =
+				(uint32_t)(data->client.sec_buf_fd[i].pbase);
+			memref->size =
+				(uint32_t)(data->client.sec_buf_fd[i].size);
+		} else {
+			/* update buf for fd from non-secure qseecom heap */
+			if (sg_ptr->nents != 1) {
+				pr_err("Num of scat entr (%d) invalid\n",
+					sg_ptr->nents);
+				goto err;
+			}
+			if (cleanup)
+				*update = 0;
+			else
+				*update = (uint32_t)sg_dma_address(sg_ptr->sgl);
+		}
+clean:
+		if (cleanup) {
+			ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+				ihandle, NULL, sg->length,
+				ION_IOC_INV_CACHES);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				goto err;
+			}
+		} else {
+			ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+				ihandle, NULL, sg->length,
+				ION_IOC_CLEAN_INV_CACHES);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				goto err;
+			}
+			data->sglistinfo_ptr[i].indexAndFlags =
+				SGLISTINFO_SET_INDEX_FLAG(
+				(sg_ptr->nents == 1), 0,
+				req->ifd_data[i].cmd_buf_offset);
+			data->sglistinfo_ptr[i].sizeOrCount =
+				(sg_ptr->nents == 1) ?
+				sg->length : sg_ptr->nents;
+			data->sglist_cnt = i + 1;
+		}
+		/* Deallocate the handle */
+		if (!IS_ERR_OR_NULL(ihandle))
+			ion_free(qseecom.ion_clnt, ihandle);
+	}
+	return ret;
+err:
+	if (!IS_ERR_OR_NULL(ihandle))
+		ion_free(qseecom.ion_clnt, ihandle);
+	return -ENOMEM;
+}
+
+static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
+				struct qseecom_qteec_req *req, uint32_t cmd_id)
+{
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_qteec_ireq ireq;
+	struct qseecom_qteec_64bit_ireq ireq_64bit;
+	struct qseecom_registered_app_list *ptr_app;
+	bool found_app = false;
+	unsigned long flags;
+	int ret = 0;
+	int ret2 = 0;
+	uint32_t reqd_len_sb_in = 0;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	struct sglist_info *table = data->sglistinfo_ptr;
+	void *req_ptr = NULL;
+	void *resp_ptr = NULL;
+
+	ret  = __qseecom_qteec_validate_msg(data, req);
+	if (ret)
+		return ret;
+
+	req_ptr = req->req_ptr;
+	resp_ptr = req->resp_ptr;
+
+	/* find app_id & img_name from list */
+	spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+	list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+							list) {
+		if ((ptr_app->app_id == data->client.app_id) &&
+			 (!strcmp(ptr_app->app_name, data->client.app_name))) {
+			found_app = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
+	if (!found_app) {
+		pr_err("app_id %d (%s) is not found\n", data->client.app_id,
+			(char *)data->client.app_name);
+		return -ENOENT;
+	}
+
+	req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
+						(uintptr_t)req->req_ptr);
+	req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
+						(uintptr_t)req->resp_ptr);
+
+	if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
+			(cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
+		ret = __qseecom_update_qteec_req_buf(
+			(struct qseecom_qteec_modfd_req *)req, data, false);
+		if (ret)
+			return ret;
+	}
+
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		ireq.app_id = data->client.app_id;
+		ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)req_ptr);
+		ireq.req_len = req->req_len;
+		ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)resp_ptr);
+		ireq.resp_len = req->resp_len;
+		ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
+		ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+		dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+		cmd_buf = (void *)&ireq;
+		cmd_len = sizeof(struct qseecom_qteec_ireq);
+	} else {
+		ireq_64bit.app_id = data->client.app_id;
+		ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)req_ptr);
+		ireq_64bit.req_len = req->req_len;
+		ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)resp_ptr);
+		ireq_64bit.resp_len = req->resp_len;
+		if ((data->client.app_arch == ELFCLASS32) &&
+			((ireq_64bit.req_ptr >=
+				PHY_ADDR_4G - ireq_64bit.req_len) ||
+			(ireq_64bit.resp_ptr >=
+				PHY_ADDR_4G - ireq_64bit.resp_len))){
+			pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
+				data->client.app_name, data->client.app_id);
+			pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
+				ireq_64bit.req_ptr, ireq_64bit.req_len,
+				ireq_64bit.resp_ptr, ireq_64bit.resp_len);
+			return -EFAULT;
+		}
+		ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
+		ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+		dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+		cmd_buf = (void *)&ireq_64bit;
+		cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
+	}
+	if (qseecom.whitelist_support == true
+		&& cmd_id == QSEOS_TEE_OPEN_SESSION)
+		*(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
+	else
+		*(uint32_t *)cmd_buf = cmd_id;
+
+	reqd_len_sb_in = req->req_len + req->resp_len;
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+					data->client.sb_virt,
+					reqd_len_sb_in,
+					ION_IOC_CLEAN_INV_CACHES);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		return ret;
+	}
+
+	__qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				cmd_buf, cmd_len,
+				&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+					ret, data->client.app_id);
+		goto exit;
+	}
+
+	if (qseecom.qsee_reentrancy_support) {
+		ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
+		if (ret)
+			goto exit;
+	} else {
+		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+			ret = __qseecom_process_incomplete_cmd(data, &resp);
+			if (ret) {
+				pr_err("process_incomplete_cmd failed err: %d\n",
+						ret);
+				goto exit;
+			}
+		} else {
+			if (resp.result != QSEOS_RESULT_SUCCESS) {
+				pr_err("Response result %d not supported\n",
+								resp.result);
+				ret = -EINVAL;
+				goto exit;
+			}
+		}
+	}
+exit:
+	ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+				data->client.sb_virt, data->client.sb_length,
+				ION_IOC_INV_CACHES);
+	if (ret2) {
+		pr_err("cache operation failed %d\n", ret);
+		return ret2;
+	}
+
+	if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
+			(cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
+		ret2 = __qseecom_update_qteec_req_buf(
+			(struct qseecom_qteec_modfd_req *)req, data, true);
+		if (ret2)
+			return ret2;
+	}
+	return ret;
+}
+
+static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_qteec_modfd_req req;
+	int ret = 0;
+
+	ret = copy_from_user(&req, argp,
+				sizeof(struct qseecom_qteec_modfd_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
+							QSEOS_TEE_OPEN_SESSION);
+
+	return ret;
+}
+
+static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_qteec_req req;
+	int ret = 0;
+
+	ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
+	return ret;
+}
+
+static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_qteec_modfd_req req;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_qteec_ireq ireq;
+	struct qseecom_qteec_64bit_ireq ireq_64bit;
+	struct qseecom_registered_app_list *ptr_app;
+	bool found_app = false;
+	unsigned long flags;
+	int ret = 0;
+	int i = 0;
+	uint32_t reqd_len_sb_in = 0;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	struct sglist_info *table = data->sglistinfo_ptr;
+	void *req_ptr = NULL;
+	void *resp_ptr = NULL;
+
+	ret = copy_from_user(&req, argp,
+			sizeof(struct qseecom_qteec_modfd_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	ret = __qseecom_qteec_validate_msg(data,
+					(struct qseecom_qteec_req *)(&req));
+	if (ret)
+		return ret;
+	req_ptr = req.req_ptr;
+	resp_ptr = req.resp_ptr;
+
+	/* find app_id & img_name from list */
+	spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+	list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+							list) {
+		if ((ptr_app->app_id == data->client.app_id) &&
+			 (!strcmp(ptr_app->app_name, data->client.app_name))) {
+			found_app = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
+	if (!found_app) {
+		pr_err("app_id %d (%s) is not found\n", data->client.app_id,
+			(char *)data->client.app_name);
+		return -ENOENT;
+	}
+
+	/* validate offsets */
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if (req.ifd_data[i].fd) {
+			if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
+				return -EINVAL;
+		}
+	}
+	req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
+						(uintptr_t)req.req_ptr);
+	req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
+						(uintptr_t)req.resp_ptr);
+	ret = __qseecom_update_qteec_req_buf(&req, data, false);
+	if (ret)
+		return ret;
+
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		ireq.app_id = data->client.app_id;
+		ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)req_ptr);
+		ireq.req_len = req.req_len;
+		ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)resp_ptr);
+		ireq.resp_len = req.resp_len;
+		cmd_buf = (void *)&ireq;
+		cmd_len = sizeof(struct qseecom_qteec_ireq);
+		ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
+		ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+		dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+	} else {
+		ireq_64bit.app_id = data->client.app_id;
+		ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)req_ptr);
+		ireq_64bit.req_len = req.req_len;
+		ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)resp_ptr);
+		ireq_64bit.resp_len = req.resp_len;
+		cmd_buf = (void *)&ireq_64bit;
+		cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
+		ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
+		ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+		dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+	}
+	reqd_len_sb_in = req.req_len + req.resp_len;
+	if (qseecom.whitelist_support == true)
+		*(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
+	else
+		*(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
+
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+					data->client.sb_virt,
+					reqd_len_sb_in,
+					ION_IOC_CLEAN_INV_CACHES);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		return ret;
+	}
+
+	__qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				cmd_buf, cmd_len,
+				&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+					ret, data->client.app_id);
+		return ret;
+	}
+
+	if (qseecom.qsee_reentrancy_support) {
+		ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
+	} else {
+		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+			ret = __qseecom_process_incomplete_cmd(data, &resp);
+			if (ret) {
+				pr_err("process_incomplete_cmd failed err: %d\n",
+						ret);
+				return ret;
+			}
+		} else {
+			if (resp.result != QSEOS_RESULT_SUCCESS) {
+				pr_err("Response result %d not supported\n",
+								resp.result);
+				ret = -EINVAL;
+			}
+		}
+	}
+	ret = __qseecom_update_qteec_req_buf(&req, data, true);
+	if (ret)
+		return ret;
+
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+				data->client.sb_virt, data->client.sb_length,
+				ION_IOC_INV_CACHES);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		return ret;
+	}
+	return 0;
+}
+
+static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_qteec_modfd_req req;
+	int ret = 0;
+
+	ret = copy_from_user(&req, argp,
+				sizeof(struct qseecom_qteec_modfd_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
+						QSEOS_TEE_REQUEST_CANCELLATION);
+
+	return ret;
+}
+
+static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
+{
+	if (data->sglist_cnt) {
+		memset(data->sglistinfo_ptr, 0,
+			SGLISTINFO_TABLE_SIZE);
+		data->sglist_cnt = 0;
+	}
+}
+
+
+static int __qseecom_bus_scaling_enable(struct qseecom_dev_handle *data,
+					bool *perf_enabled)
+{
+	int ret = 0;
+
+	if (qseecom.support_bus_scaling) {
+		if (!data->mode) {
+			mutex_lock(&qsee_bw_mutex);
+			__qseecom_register_bus_bandwidth_needs(
+							data, HIGH);
+			mutex_unlock(&qsee_bw_mutex);
+		}
+		ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
+		if (ret) {
+			pr_err("Failed to set bw\n");
+			ret = -EINVAL;
+			goto exit;
+		}
+	}
+	/*
+	* On targets where crypto clock is handled by HLOS,
+	* if clk_access_cnt is zero and perf_enabled is false,
+	* then the crypto clock was not enabled before sending cmd
+	* to tz, qseecom will enable the clock to avoid service failure.
+	*/
+	if (!qseecom.no_clock_support &&
+		!qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
+		pr_debug("ce clock is not enabled\n");
+		ret = qseecom_perf_enable(data);
+		if (ret) {
+			pr_err("Failed to vote for clock with err %d\n",
+					ret);
+			ret = -EINVAL;
+			goto exit;
+		}
+		*perf_enabled = true;
+	}
+exit:
+	return ret;
+}
+
+static void __qseecom_bus_scaling_disable(struct qseecom_dev_handle *data,
+					bool perf_enabled)
+{
+	if (qseecom.support_bus_scaling)
+		__qseecom_add_bw_scale_down_timer(
+			QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+	if (perf_enabled) {
+		qsee_disable_clock_vote(data, CLK_DFAB);
+		qsee_disable_clock_vote(data, CLK_SFPB);
+	}
+}
+
+long qseecom_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+{
+	int ret = 0;
+	struct qseecom_dev_handle *data = file->private_data;
+	void __user *argp = (void __user *) arg;
+	bool perf_enabled = false;
+
+	if (!data) {
+		pr_err("Invalid/uninitialized device handle\n");
+		return -EINVAL;
+	}
+
+	if (data->abort) {
+		pr_err("Aborting qseecom driver\n");
+		return -ENODEV;
+	}
+
+	switch (cmd) {
+	case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("reg lstnr req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		pr_debug("ioctl register_listener_req()\n");
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		data->type = QSEECOM_LISTENER_SERVICE;
+		ret = qseecom_register_listener(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed qseecom_register_listener: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
+		if ((data->listener.id == 0) ||
+			(data->type != QSEECOM_LISTENER_SERVICE)) {
+			pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
+						data->type, data->listener.id);
+			ret = -EINVAL;
+			break;
+		}
+		pr_debug("ioctl unregister_listener_req()\n");
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_unregister_listener(data);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed qseecom_unregister_listener: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_SEND_CMD_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		/* Only one client allowed here at a time */
+		mutex_lock(&app_access_lock);
+		ret = __qseecom_bus_scaling_enable(data, &perf_enabled);
+		if (ret) {
+			mutex_unlock(&app_access_lock);
+			break;
+		}
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_send_cmd(data, argp);
+		__qseecom_bus_scaling_disable(data, perf_enabled);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed qseecom_send_cmd: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
+	case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		/* Only one client allowed here at a time */
+		mutex_lock(&app_access_lock);
+		ret = __qseecom_bus_scaling_enable(data, &perf_enabled);
+		if (ret) {
+			mutex_unlock(&app_access_lock);
+			break;
+		}
+		atomic_inc(&data->ioctl_count);
+		if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
+			ret = qseecom_send_modfd_cmd(data, argp);
+		else
+			ret = qseecom_send_modfd_cmd_64(data, argp);
+		__qseecom_bus_scaling_disable(data, perf_enabled);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed qseecom_send_cmd: %d\n", ret);
+		__qseecom_clean_data_sglistinfo(data);
+		break;
+	}
+	case QSEECOM_IOCTL_RECEIVE_REQ: {
+		if ((data->listener.id == 0) ||
+			(data->type != QSEECOM_LISTENER_SERVICE)) {
+			pr_err("receive req: invalid handle (%d), lid(%d)\n",
+						data->type, data->listener.id);
+			ret = -EINVAL;
+			break;
+		}
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_receive_req(data);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		if (ret && (ret != -ERESTARTSYS))
+			pr_err("failed qseecom_receive_req: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_SEND_RESP_REQ: {
+		if ((data->listener.id == 0) ||
+			(data->type != QSEECOM_LISTENER_SERVICE)) {
+			pr_err("send resp req: invalid handle (%d), lid(%d)\n",
+						data->type, data->listener.id);
+			ret = -EINVAL;
+			break;
+		}
+		atomic_inc(&data->ioctl_count);
+		if (!qseecom.qsee_reentrancy_support)
+			ret = qseecom_send_resp();
+		else
+			ret = qseecom_reentrancy_send_resp(data);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		if (ret)
+			pr_err("failed qseecom_send_resp: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
+		if ((data->type != QSEECOM_CLIENT_APP) &&
+			(data->type != QSEECOM_GENERIC) &&
+			(data->type != QSEECOM_SECURE_SERVICE)) {
+			pr_err("set mem param req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_set_client_mem_param(data, argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed Qqseecom_set_mem_param request: %d\n",
+								ret);
+		break;
+	}
+	case QSEECOM_IOCTL_LOAD_APP_REQ: {
+		if ((data->type != QSEECOM_GENERIC) &&
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("load app req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->type = QSEECOM_CLIENT_APP;
+		pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_load_app(data, argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed load_app request: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_unload_app(data, false);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed unload_app request: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_get_qseos_version(data, argp);
+		if (ret)
+			pr_err("qseecom_get_qseos_version: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
+		if ((data->type != QSEECOM_GENERIC) &&
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("perf enable req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		if ((data->type == QSEECOM_CLIENT_APP) &&
+			(data->client.app_id == 0)) {
+			pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		atomic_inc(&data->ioctl_count);
+		if (qseecom.support_bus_scaling) {
+			mutex_lock(&qsee_bw_mutex);
+			__qseecom_register_bus_bandwidth_needs(data, HIGH);
+			mutex_unlock(&qsee_bw_mutex);
+		} else {
+			ret = qseecom_perf_enable(data);
+			if (ret)
+				pr_err("Fail to vote for clocks %d\n", ret);
+		}
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
+		if ((data->type != QSEECOM_SECURE_SERVICE) &&
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("perf disable req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		if ((data->type == QSEECOM_CLIENT_APP) &&
+			(data->client.app_id == 0)) {
+			pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		atomic_inc(&data->ioctl_count);
+		if (!qseecom.support_bus_scaling) {
+			qsee_disable_clock_vote(data, CLK_DFAB);
+			qsee_disable_clock_vote(data, CLK_SFPB);
+		} else {
+			mutex_lock(&qsee_bw_mutex);
+			qseecom_unregister_bus_bandwidth_needs(data);
+			mutex_unlock(&qsee_bw_mutex);
+		}
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+
+	case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
+		/* If crypto clock is not handled by HLOS, return directly. */
+		if (qseecom.no_clock_support) {
+			pr_debug("crypto clock is not handled by HLOS\n");
+			break;
+		}
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_scale_bus_bandwidth(data, argp);
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("load ext elf req: invalid client handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_load_external_elf(data, argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed load_external_elf request: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
+		if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
+			pr_err("unload ext elf req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_unload_external_elf(data);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed unload_app request: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
+		data->type = QSEECOM_CLIENT_APP;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
+		ret = qseecom_query_app_loaded(data, argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("send cmd svc req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->type = QSEECOM_SECURE_SERVICE;
+		if (qseecom.qsee_version < QSEE_VERSION_03) {
+			pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_send_service_cmd(data, argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_CREATE_KEY_REQ: {
+		if (!(qseecom.support_pfe || qseecom.support_fde))
+			pr_err("Features requiring key init not supported\n");
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("create key req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_05) {
+			pr_err("Create Key feature unsupported: qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_create_key(data, argp);
+		if (ret)
+			pr_err("failed to create encryption key: %d\n", ret);
+
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_WIPE_KEY_REQ: {
+		if (!(qseecom.support_pfe || qseecom.support_fde))
+			pr_err("Features requiring key init not supported\n");
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("wipe key req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_05) {
+			pr_err("Wipe Key feature unsupported in qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_wipe_key(data, argp);
+		if (ret)
+			pr_err("failed to wipe encryption key: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
+		if (!(qseecom.support_pfe || qseecom.support_fde))
+			pr_err("Features requiring key init not supported\n");
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("update key req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_05) {
+			pr_err("Update Key feature unsupported in qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_update_key_user_info(data, argp);
+		if (ret)
+			pr_err("failed to update key user info: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("save part hash req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_save_partition_hash(argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("ES activated req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_is_es_activated(argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_mdtp_cipher_dip(argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_SEND_MODFD_RESP:
+	case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
+		if ((data->listener.id == 0) ||
+			(data->type != QSEECOM_LISTENER_SERVICE)) {
+			pr_err("receive req: invalid handle (%d), lid(%d)\n",
+						data->type, data->listener.id);
+			ret = -EINVAL;
+			break;
+		}
+		atomic_inc(&data->ioctl_count);
+		if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
+			ret = qseecom_send_modfd_resp(data, argp);
+		else
+			ret = qseecom_send_modfd_resp_64(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		if (ret)
+			pr_err("failed qseecom_send_mod_resp: %d\n", ret);
+		__qseecom_clean_data_sglistinfo(data);
+		break;
+	}
+	case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("Open session: invalid handle (%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			pr_err("GP feature unsupported: qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		/* Only one client allowed here at a time */
+		mutex_lock(&app_access_lock);
+		ret = __qseecom_bus_scaling_enable(data, &perf_enabled);
+		if (ret) {
+			mutex_unlock(&app_access_lock);
+			break;
+		}
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_qteec_open_session(data, argp);
+		__qseecom_bus_scaling_disable(data, perf_enabled);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed open_session_cmd: %d\n", ret);
+		__qseecom_clean_data_sglistinfo(data);
+		break;
+	}
+	case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("Close session: invalid handle (%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			pr_err("GP feature unsupported: qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		/* Only one client allowed here at a time */
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_qteec_close_session(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed close_session_cmd: %d\n", ret);
+		break;
+	}
+	case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			pr_err("GP feature unsupported: qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		/* Only one client allowed here at a time */
+		mutex_lock(&app_access_lock);
+		ret = __qseecom_bus_scaling_enable(data, &perf_enabled);
+		if (ret) {
+			mutex_unlock(&app_access_lock);
+			break;
+		}
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
+		__qseecom_bus_scaling_disable(data, perf_enabled);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed Invoke cmd: %d\n", ret);
+		__qseecom_clean_data_sglistinfo(data);
+		break;
+	}
+	case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			pr_err("GP feature unsupported: qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		/* Only one client allowed here at a time */
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_qteec_request_cancellation(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed request_cancellation: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_get_ce_info(data, argp);
+		if (ret)
+			pr_err("failed get fde ce pipe info: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_free_ce_info(data, argp);
+		if (ret)
+			pr_err("failed get fde ce pipe info: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_query_ce_info(data, argp);
+		if (ret)
+			pr_err("failed get fde ce pipe info: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	default:
+		pr_err("Invalid IOCTL: 0x%x\n", cmd);
+		return -EINVAL;
+	}
+	return ret;
+}
+
+static int qseecom_open(struct inode *inode, struct file *file)
+{
+	int ret = 0;
+	struct qseecom_dev_handle *data;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data) {
+		pr_err("kmalloc failed\n");
+		return -ENOMEM;
+	}
+	file->private_data = data;
+	data->abort = 0;
+	data->type = QSEECOM_GENERIC;
+	data->released = false;
+	memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
+	data->mode = INACTIVE;
+	init_waitqueue_head(&data->abort_wq);
+	atomic_set(&data->ioctl_count, 0);
+	return ret;
+}
+
+static int qseecom_release(struct inode *inode, struct file *file)
+{
+	struct qseecom_dev_handle *data = file->private_data;
+	int ret = 0;
+
+	if (data->released == false) {
+		pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
+			data->type, data->mode, data);
+		switch (data->type) {
+		case QSEECOM_LISTENER_SERVICE:
+			mutex_lock(&app_access_lock);
+			ret = qseecom_unregister_listener(data);
+			mutex_unlock(&app_access_lock);
+			break;
+		case QSEECOM_CLIENT_APP:
+			mutex_lock(&app_access_lock);
+			/*
+			 * do not assume the userland app crashed, we
+			 * couldn't possibly know if it did or
+			 * not. And be that as it may, what about the
+			 * potential legitimate userland application
+			 * that still depend on the TZ app?
+			 */
+			ret = qseecom_unload_app(data, false);
+			mutex_unlock(&app_access_lock);
+			break;
+		case QSEECOM_SECURE_SERVICE:
+		case QSEECOM_GENERIC:
+			ret = qseecom_unmap_ion_allocated_memory(data);
+			if (ret)
+				pr_err("Ion Unmap failed\n");
+			break;
+		case QSEECOM_UNAVAILABLE_CLIENT_APP:
+			break;
+		default:
+			pr_err("Unsupported clnt_handle_type %d",
+				data->type);
+			break;
+		}
+	}
+
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		if (data->mode != INACTIVE) {
+			qseecom_unregister_bus_bandwidth_needs(data);
+			if (qseecom.cumulative_mode == INACTIVE) {
+				ret = __qseecom_set_msm_bus_request(INACTIVE);
+				if (ret)
+					pr_err("Fail to scale down bus\n");
+			}
+		}
+		mutex_unlock(&qsee_bw_mutex);
+	} else {
+		if (data->fast_load_enabled == true)
+			qsee_disable_clock_vote(data, CLK_SFPB);
+		if (data->perf_enabled == true)
+			qsee_disable_clock_vote(data, CLK_DFAB);
+	}
+	kfree(data);
+
+	return ret;
+}
+
+static const struct file_operations qseecom_fops = {
+		.owner = THIS_MODULE,
+		.unlocked_ioctl = qseecom_ioctl,
+#ifdef CONFIG_COMPAT
+		.compat_ioctl = compat_qseecom_ioctl,
+#endif
+		.open = qseecom_open,
+		.release = qseecom_release
+};
+
+static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
+{
+	int rc = 0;
+	struct device *pdev;
+	struct qseecom_clk *qclk;
+	char *core_clk_src = NULL;
+	char *core_clk = NULL;
+	char *iface_clk = NULL;
+	char *bus_clk = NULL;
+
+	switch (ce) {
+	case CLK_QSEE: {
+		core_clk_src = "core_clk_src";
+		core_clk = "core_clk";
+		iface_clk = "iface_clk";
+		bus_clk = "bus_clk";
+		qclk = &qseecom.qsee;
+		qclk->instance = CLK_QSEE;
+		break;
+	};
+	case CLK_CE_DRV: {
+		core_clk_src = "ce_drv_core_clk_src";
+		core_clk = "ce_drv_core_clk";
+		iface_clk = "ce_drv_iface_clk";
+		bus_clk = "ce_drv_bus_clk";
+		qclk = &qseecom.ce_drv;
+		qclk->instance = CLK_CE_DRV;
+		break;
+	};
+	default:
+		pr_err("Invalid ce hw instance: %d!\n", ce);
+		return -EIO;
+	}
+
+	if (qseecom.no_clock_support) {
+		qclk->ce_core_clk = NULL;
+		qclk->ce_clk = NULL;
+		qclk->ce_bus_clk = NULL;
+		qclk->ce_core_src_clk = NULL;
+		return 0;
+	}
+
+	pdev = qseecom.pdev;
+
+	/* Get CE3 src core clk. */
+	qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
+	if (!IS_ERR(qclk->ce_core_src_clk)) {
+			rc = clk_set_rate(qclk->ce_core_src_clk,
+						qseecom.ce_opp_freq_hz);
+			if (rc) {
+				clk_put(qclk->ce_core_src_clk);
+				qclk->ce_core_src_clk = NULL;
+				pr_err("Unable to set the core src clk @%uMhz.\n",
+					qseecom.ce_opp_freq_hz/CE_CLK_DIV);
+				return -EIO;
+		}
+	} else {
+		pr_warn("Unable to get CE core src clk, set to NULL\n");
+		qclk->ce_core_src_clk = NULL;
+	}
+
+	/* Get CE core clk */
+	qclk->ce_core_clk = clk_get(pdev, core_clk);
+	if (IS_ERR(qclk->ce_core_clk)) {
+		rc = PTR_ERR(qclk->ce_core_clk);
+		pr_err("Unable to get CE core clk\n");
+		if (qclk->ce_core_src_clk != NULL)
+			clk_put(qclk->ce_core_src_clk);
+		return -EIO;
+	}
+
+	/* Get CE Interface clk */
+	qclk->ce_clk = clk_get(pdev, iface_clk);
+	if (IS_ERR(qclk->ce_clk)) {
+		rc = PTR_ERR(qclk->ce_clk);
+		pr_err("Unable to get CE interface clk\n");
+		if (qclk->ce_core_src_clk != NULL)
+			clk_put(qclk->ce_core_src_clk);
+		clk_put(qclk->ce_core_clk);
+		return -EIO;
+	}
+
+	/* Get CE AXI clk */
+	qclk->ce_bus_clk = clk_get(pdev, bus_clk);
+	if (IS_ERR(qclk->ce_bus_clk)) {
+		rc = PTR_ERR(qclk->ce_bus_clk);
+		pr_err("Unable to get CE BUS interface clk\n");
+		if (qclk->ce_core_src_clk != NULL)
+			clk_put(qclk->ce_core_src_clk);
+		clk_put(qclk->ce_core_clk);
+		clk_put(qclk->ce_clk);
+		return -EIO;
+	}
+
+	return rc;
+}
+
+static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
+{
+	struct qseecom_clk *qclk;
+
+	if (ce == CLK_QSEE)
+		qclk = &qseecom.qsee;
+	else
+		qclk = &qseecom.ce_drv;
+
+	if (qclk->ce_clk != NULL) {
+		clk_put(qclk->ce_clk);
+		qclk->ce_clk = NULL;
+	}
+	if (qclk->ce_core_clk != NULL) {
+		clk_put(qclk->ce_core_clk);
+		qclk->ce_core_clk = NULL;
+	}
+	if (qclk->ce_bus_clk != NULL) {
+		clk_put(qclk->ce_bus_clk);
+		qclk->ce_bus_clk = NULL;
+	}
+	if (qclk->ce_core_src_clk != NULL) {
+		clk_put(qclk->ce_core_src_clk);
+		qclk->ce_core_src_clk = NULL;
+	}
+	qclk->instance = CLK_INVALID;
+}
+
+static int qseecom_retrieve_ce_data(struct platform_device *pdev)
+{
+	int rc = 0;
+	uint32_t hlos_num_ce_hw_instances;
+	uint32_t disk_encrypt_pipe;
+	uint32_t file_encrypt_pipe;
+	uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0};
+	int i;
+	const int *tbl;
+	int size;
+	int entry;
+	struct qseecom_crypto_info *pfde_tbl = NULL;
+	struct qseecom_crypto_info *p;
+	int tbl_size;
+	int j;
+	bool old_db = true;
+	struct qseecom_ce_info_use *pce_info_use;
+	uint32_t *unit_tbl = NULL;
+	int total_units = 0;
+	struct qseecom_ce_pipe_entry *pce_entry;
+
+	qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
+	qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
+
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,qsee-ce-hw-instance",
+				&qseecom.ce_info.qsee_ce_hw_instance)) {
+		pr_err("Fail to get qsee ce hw instance information.\n");
+		rc = -EINVAL;
+		goto out;
+	} else {
+		pr_debug("qsee-ce-hw-instance=0x%x\n",
+			qseecom.ce_info.qsee_ce_hw_instance);
+	}
+
+	qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,support-fde");
+	qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,support-pfe");
+
+	if (!qseecom.support_pfe && !qseecom.support_fde) {
+		pr_warn("Device does not support PFE/FDE");
+		goto out;
+	}
+
+	if (qseecom.support_fde)
+		tbl = of_get_property((&pdev->dev)->of_node,
+			"qcom,full-disk-encrypt-info", &size);
+	else
+		tbl = NULL;
+	if (tbl) {
+		old_db = false;
+		if (size % sizeof(struct qseecom_crypto_info)) {
+			pr_err("full-disk-encrypt-info tbl size(%d)\n",
+				size);
+			rc = -EINVAL;
+			goto out;
+		}
+		tbl_size = size / sizeof
+				(struct qseecom_crypto_info);
+
+		pfde_tbl = kzalloc(size, GFP_KERNEL);
+		unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
+		total_units = 0;
+
+		if (!pfde_tbl || !unit_tbl) {
+			pr_err("failed to alloc memory\n");
+			rc = -ENOMEM;
+			goto out;
+		}
+		if (of_property_read_u32_array((&pdev->dev)->of_node,
+			"qcom,full-disk-encrypt-info",
+			(u32 *)pfde_tbl, size/sizeof(u32))) {
+			pr_err("failed to read full-disk-encrypt-info tbl\n");
+			rc = -EINVAL;
+			goto out;
+		}
+
+		for (i = 0, p = pfde_tbl;  i < tbl_size; i++, p++) {
+			for (j = 0; j < total_units; j++) {
+				if (p->unit_num == *(unit_tbl + j))
+					break;
+			}
+			if (j == total_units) {
+				*(unit_tbl + total_units) = p->unit_num;
+				total_units++;
+			}
+		}
+
+		qseecom.ce_info.num_fde = total_units;
+		pce_info_use = qseecom.ce_info.fde = kcalloc(
+			total_units, sizeof(struct qseecom_ce_info_use),
+				GFP_KERNEL);
+		if (!pce_info_use) {
+			pr_err("failed to alloc memory\n");
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		for (j = 0; j < total_units; j++, pce_info_use++) {
+			pce_info_use->unit_num = *(unit_tbl + j);
+			pce_info_use->alloc = false;
+			pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
+			pce_info_use->num_ce_pipe_entries = 0;
+			pce_info_use->ce_pipe_entry = NULL;
+			for (i = 0, p = pfde_tbl;  i < tbl_size; i++, p++) {
+				if (p->unit_num == pce_info_use->unit_num)
+					pce_info_use->num_ce_pipe_entries++;
+			}
+
+			entry = pce_info_use->num_ce_pipe_entries;
+			pce_entry = pce_info_use->ce_pipe_entry =
+				kcalloc(entry,
+					sizeof(struct qseecom_ce_pipe_entry),
+					GFP_KERNEL);
+			if (pce_entry == NULL) {
+				pr_err("failed to alloc memory\n");
+				rc = -ENOMEM;
+				goto out;
+			}
+
+			for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
+				if (p->unit_num == pce_info_use->unit_num) {
+					pce_entry->ce_num = p->ce;
+					pce_entry->ce_pipe_pair =
+							p->pipe_pair;
+					pce_entry->valid = true;
+					pce_entry++;
+				}
+			}
+		}
+		kfree(unit_tbl);
+		unit_tbl = NULL;
+		kfree(pfde_tbl);
+		pfde_tbl = NULL;
+	}
+
+	if (qseecom.support_pfe)
+		tbl = of_get_property((&pdev->dev)->of_node,
+			"qcom,per-file-encrypt-info", &size);
+	else
+		tbl = NULL;
+	if (tbl) {
+		old_db = false;
+		if (size % sizeof(struct qseecom_crypto_info)) {
+			pr_err("per-file-encrypt-info tbl size(%d)\n",
+				size);
+			rc = -EINVAL;
+			goto out;
+		}
+		tbl_size = size / sizeof
+				(struct qseecom_crypto_info);
+
+		pfde_tbl = kzalloc(size, GFP_KERNEL);
+		unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
+		total_units = 0;
+		if (!pfde_tbl || !unit_tbl) {
+			pr_err("failed to alloc memory\n");
+			rc = -ENOMEM;
+			goto out;
+		}
+		if (of_property_read_u32_array((&pdev->dev)->of_node,
+			"qcom,per-file-encrypt-info",
+			(u32 *)pfde_tbl, size/sizeof(u32))) {
+			pr_err("failed to read per-file-encrypt-info tbl\n");
+			rc = -EINVAL;
+			goto out;
+		}
+
+		for (i = 0, p = pfde_tbl;  i < tbl_size; i++, p++) {
+			for (j = 0; j < total_units; j++) {
+				if (p->unit_num == *(unit_tbl + j))
+					break;
+			}
+			if (j == total_units) {
+				*(unit_tbl + total_units) = p->unit_num;
+				total_units++;
+			}
+		}
+
+		qseecom.ce_info.num_pfe = total_units;
+		pce_info_use = qseecom.ce_info.pfe = kcalloc(
+			total_units, sizeof(struct qseecom_ce_info_use),
+				GFP_KERNEL);
+		if (!pce_info_use) {
+			pr_err("failed to alloc memory\n");
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		for (j = 0; j < total_units; j++, pce_info_use++) {
+			pce_info_use->unit_num = *(unit_tbl + j);
+			pce_info_use->alloc = false;
+			pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
+			pce_info_use->num_ce_pipe_entries = 0;
+			pce_info_use->ce_pipe_entry = NULL;
+			for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
+				if (p->unit_num == pce_info_use->unit_num)
+					pce_info_use->num_ce_pipe_entries++;
+			}
+
+			entry = pce_info_use->num_ce_pipe_entries;
+			pce_entry = pce_info_use->ce_pipe_entry =
+				kcalloc(entry,
+					sizeof(struct qseecom_ce_pipe_entry),
+					GFP_KERNEL);
+			if (pce_entry == NULL) {
+				pr_err("failed to alloc memory\n");
+				rc = -ENOMEM;
+				goto out;
+			}
+
+			for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
+				if (p->unit_num == pce_info_use->unit_num) {
+					pce_entry->ce_num = p->ce;
+					pce_entry->ce_pipe_pair =
+							p->pipe_pair;
+					pce_entry->valid = true;
+					pce_entry++;
+				}
+			}
+		}
+		kfree(unit_tbl);
+		unit_tbl = NULL;
+		kfree(pfde_tbl);
+		pfde_tbl = NULL;
+	}
+
+	if (!old_db)
+		goto out1;
+
+	if (of_property_read_bool((&pdev->dev)->of_node,
+			"qcom,support-multiple-ce-hw-instance")) {
+		if (of_property_read_u32((&pdev->dev)->of_node,
+			"qcom,hlos-num-ce-hw-instances",
+				&hlos_num_ce_hw_instances)) {
+			pr_err("Fail: get hlos number of ce hw instance\n");
+			rc = -EINVAL;
+			goto out;
+		}
+	} else {
+		hlos_num_ce_hw_instances = 1;
+	}
+
+	if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
+		pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
+			MAX_CE_PIPE_PAIR_PER_UNIT);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (of_property_read_u32_array((&pdev->dev)->of_node,
+			"qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
+			hlos_num_ce_hw_instances)) {
+		pr_err("Fail: get hlos ce hw instance info\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (qseecom.support_fde) {
+		pce_info_use = qseecom.ce_info.fde =
+			kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
+		if (!pce_info_use) {
+			pr_err("failed to alloc memory\n");
+			rc = -ENOMEM;
+			goto out;
+		}
+		/* by default for old db */
+		qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
+		pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
+		pce_info_use->alloc = false;
+		pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
+		pce_info_use->ce_pipe_entry = NULL;
+		if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,disk-encrypt-pipe-pair",
+				&disk_encrypt_pipe)) {
+			pr_err("Fail to get FDE pipe information.\n");
+			rc = -EINVAL;
+				goto out;
+		} else {
+			pr_debug("disk-encrypt-pipe-pair=0x%x",
+				disk_encrypt_pipe);
+		}
+		entry = pce_info_use->num_ce_pipe_entries =
+				hlos_num_ce_hw_instances;
+		pce_entry = pce_info_use->ce_pipe_entry =
+			kcalloc(entry,
+				sizeof(struct qseecom_ce_pipe_entry),
+				GFP_KERNEL);
+		if (pce_entry == NULL) {
+			pr_err("failed to alloc memory\n");
+			rc = -ENOMEM;
+			goto out;
+		}
+		for (i = 0; i < entry; i++) {
+			pce_entry->ce_num = hlos_ce_hw_instance[i];
+			pce_entry->ce_pipe_pair = disk_encrypt_pipe;
+			pce_entry->valid = 1;
+			pce_entry++;
+		}
+	} else {
+		pr_warn("Device does not support FDE");
+		disk_encrypt_pipe = 0xff;
+	}
+	if (qseecom.support_pfe) {
+		pce_info_use = qseecom.ce_info.pfe =
+			kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
+		if (!pce_info_use) {
+			pr_err("failed to alloc memory\n");
+			rc = -ENOMEM;
+			goto out;
+		}
+		/* by default for old db */
+		qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
+		pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
+		pce_info_use->alloc = false;
+		pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
+		pce_info_use->ce_pipe_entry = NULL;
+
+		if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,file-encrypt-pipe-pair",
+				&file_encrypt_pipe)) {
+			pr_err("Fail to get PFE pipe information.\n");
+			rc = -EINVAL;
+			goto out;
+		} else {
+			pr_debug("file-encrypt-pipe-pair=0x%x",
+				file_encrypt_pipe);
+		}
+		entry = pce_info_use->num_ce_pipe_entries =
+						hlos_num_ce_hw_instances;
+		pce_entry = pce_info_use->ce_pipe_entry =
+			kcalloc(entry,
+				sizeof(struct qseecom_ce_pipe_entry),
+				GFP_KERNEL);
+		if (pce_entry == NULL) {
+			pr_err("failed to alloc memory\n");
+			rc = -ENOMEM;
+			goto out;
+		}
+		for (i = 0; i < entry; i++) {
+			pce_entry->ce_num = hlos_ce_hw_instance[i];
+			pce_entry->ce_pipe_pair = file_encrypt_pipe;
+			pce_entry->valid = 1;
+			pce_entry++;
+		}
+	} else {
+		pr_warn("Device does not support PFE");
+		file_encrypt_pipe = 0xff;
+	}
+
+out1:
+	qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
+	qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
+out:
+	if (rc) {
+		if (qseecom.ce_info.fde) {
+			pce_info_use = qseecom.ce_info.fde;
+			for (i = 0; i < qseecom.ce_info.num_fde; i++) {
+				pce_entry = pce_info_use->ce_pipe_entry;
+				kfree(pce_entry);
+				pce_info_use++;
+			}
+		}
+		kfree(qseecom.ce_info.fde);
+		qseecom.ce_info.fde = NULL;
+		if (qseecom.ce_info.pfe) {
+			pce_info_use = qseecom.ce_info.pfe;
+			for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
+				pce_entry = pce_info_use->ce_pipe_entry;
+				kfree(pce_entry);
+				pce_info_use++;
+			}
+		}
+		kfree(qseecom.ce_info.pfe);
+		qseecom.ce_info.pfe = NULL;
+	}
+	kfree(unit_tbl);
+	kfree(pfde_tbl);
+	return rc;
+}
+
+static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_ce_info_req req;
+	struct qseecom_ce_info_req *pinfo = &req;
+	int ret = 0;
+	int i;
+	unsigned int entries;
+	struct qseecom_ce_info_use *pce_info_use, *p;
+	int total = 0;
+	bool found = false;
+	struct qseecom_ce_pipe_entry *pce_entry;
+
+	ret = copy_from_user(pinfo, argp,
+				sizeof(struct qseecom_ce_info_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	switch (pinfo->usage) {
+	case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
+		if (qseecom.support_fde) {
+			p = qseecom.ce_info.fde;
+			total = qseecom.ce_info.num_fde;
+		} else {
+			pr_err("system does not support fde\n");
+			return -EINVAL;
+		}
+		break;
+	case QSEOS_KM_USAGE_FILE_ENCRYPTION:
+		if (qseecom.support_pfe) {
+			p = qseecom.ce_info.pfe;
+			total = qseecom.ce_info.num_pfe;
+		} else {
+			pr_err("system does not support pfe\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		pr_err("unsupported usage %d\n", pinfo->usage);
+		return -EINVAL;
+	}
+
+	pce_info_use = NULL;
+	for (i = 0; i < total; i++) {
+		if (!p->alloc)
+			pce_info_use = p;
+		else if (!memcmp(p->handle, pinfo->handle,
+						MAX_CE_INFO_HANDLE_SIZE)) {
+			pce_info_use = p;
+			found = true;
+			break;
+		}
+		p++;
+	}
+
+	if (pce_info_use == NULL)
+		return -EBUSY;
+
+	pinfo->unit_num = pce_info_use->unit_num;
+	if (!pce_info_use->alloc) {
+		pce_info_use->alloc = true;
+		memcpy(pce_info_use->handle,
+			pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
+	}
+	if (pce_info_use->num_ce_pipe_entries >
+					MAX_CE_PIPE_PAIR_PER_UNIT)
+		entries = MAX_CE_PIPE_PAIR_PER_UNIT;
+	else
+		entries = pce_info_use->num_ce_pipe_entries;
+	pinfo->num_ce_pipe_entries = entries;
+	pce_entry = pce_info_use->ce_pipe_entry;
+	for (i = 0; i < entries; i++, pce_entry++)
+		pinfo->ce_pipe_entry[i] = *pce_entry;
+	for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
+		pinfo->ce_pipe_entry[i].valid = 0;
+
+	if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
+		pr_err("copy_to_user failed\n");
+		ret = -EFAULT;
+	}
+	return ret;
+}
+
+static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_ce_info_req req;
+	struct qseecom_ce_info_req *pinfo = &req;
+	int ret = 0;
+	struct qseecom_ce_info_use *p;
+	int total = 0;
+	int i;
+	bool found = false;
+
+	ret = copy_from_user(pinfo, argp,
+				sizeof(struct qseecom_ce_info_req));
+	if (ret)
+		return ret;
+
+	switch (pinfo->usage) {
+	case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
+		if (qseecom.support_fde) {
+			p = qseecom.ce_info.fde;
+			total = qseecom.ce_info.num_fde;
+		} else {
+			pr_err("system does not support fde\n");
+			return -EINVAL;
+		}
+		break;
+	case QSEOS_KM_USAGE_FILE_ENCRYPTION:
+		if (qseecom.support_pfe) {
+			p = qseecom.ce_info.pfe;
+			total = qseecom.ce_info.num_pfe;
+		} else {
+			pr_err("system does not support pfe\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		pr_err("unsupported usage %d\n", pinfo->usage);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < total; i++) {
+		if (p->alloc &&
+			!memcmp(p->handle, pinfo->handle,
+					MAX_CE_INFO_HANDLE_SIZE)) {
+			memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
+			p->alloc = false;
+			found = true;
+			break;
+		}
+		p++;
+	}
+	return ret;
+}
+
+static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_ce_info_req req;
+	struct qseecom_ce_info_req *pinfo = &req;
+	int ret = 0;
+	int i;
+	unsigned int entries;
+	struct qseecom_ce_info_use *pce_info_use, *p;
+	int total = 0;
+	bool found = false;
+	struct qseecom_ce_pipe_entry *pce_entry;
+
+	ret = copy_from_user(pinfo, argp,
+				sizeof(struct qseecom_ce_info_req));
+	if (ret)
+		return ret;
+
+	switch (pinfo->usage) {
+	case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
+		if (qseecom.support_fde) {
+			p = qseecom.ce_info.fde;
+			total = qseecom.ce_info.num_fde;
+		} else {
+			pr_err("system does not support fde\n");
+			return -EINVAL;
+		}
+		break;
+	case QSEOS_KM_USAGE_FILE_ENCRYPTION:
+		if (qseecom.support_pfe) {
+			p = qseecom.ce_info.pfe;
+			total = qseecom.ce_info.num_pfe;
+		} else {
+			pr_err("system does not support pfe\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		pr_err("unsupported usage %d\n", pinfo->usage);
+		return -EINVAL;
+	}
+
+	pce_info_use = NULL;
+	pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
+	pinfo->num_ce_pipe_entries  = 0;
+	for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
+		pinfo->ce_pipe_entry[i].valid = 0;
+
+	for (i = 0; i < total; i++) {
+
+		if (p->alloc && !memcmp(p->handle,
+				pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
+			pce_info_use = p;
+			found = true;
+			break;
+		}
+		p++;
+	}
+	if (!pce_info_use)
+		goto out;
+	pinfo->unit_num = pce_info_use->unit_num;
+	if (pce_info_use->num_ce_pipe_entries >
+					MAX_CE_PIPE_PAIR_PER_UNIT)
+		entries = MAX_CE_PIPE_PAIR_PER_UNIT;
+	else
+		entries = pce_info_use->num_ce_pipe_entries;
+	pinfo->num_ce_pipe_entries = entries;
+	pce_entry = pce_info_use->ce_pipe_entry;
+	for (i = 0; i < entries; i++, pce_entry++)
+		pinfo->ce_pipe_entry[i] = *pce_entry;
+	for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
+		pinfo->ce_pipe_entry[i].valid = 0;
+out:
+	if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
+		pr_err("copy_to_user failed\n");
+		ret = -EFAULT;
+	}
+	return ret;
+}
+
+/*
+ * Check whitelist feature, and if TZ feature version is < 1.0.0,
+ * then whitelist feature is not supported.
+ */
+static int qseecom_check_whitelist_feature(void)
+{
+	u64 version = 0;
+	int ret = scm_get_feat_version(FEATURE_ID_WHITELIST, &version);
+
+	return (ret == 0) && (version >= MAKE_WHITELIST_VERSION(1, 0, 0));
+}
+
+static int qseecom_probe(struct platform_device *pdev)
+{
+	int rc;
+	int i;
+	uint32_t feature = 10;
+	struct device *class_dev;
+	struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_ce_info_use *pce_info_use = NULL;
+
+	qseecom.qsee_bw_count = 0;
+	qseecom.qsee_perf_client = 0;
+	qseecom.qsee_sfpb_bw_count = 0;
+
+	qseecom.qsee.ce_core_clk = NULL;
+	qseecom.qsee.ce_clk = NULL;
+	qseecom.qsee.ce_core_src_clk = NULL;
+	qseecom.qsee.ce_bus_clk = NULL;
+
+	qseecom.cumulative_mode = 0;
+	qseecom.current_mode = INACTIVE;
+	qseecom.support_bus_scaling = false;
+	qseecom.support_fde = false;
+	qseecom.support_pfe = false;
+
+	qseecom.ce_drv.ce_core_clk = NULL;
+	qseecom.ce_drv.ce_clk = NULL;
+	qseecom.ce_drv.ce_core_src_clk = NULL;
+	qseecom.ce_drv.ce_bus_clk = NULL;
+	atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
+
+	qseecom.app_block_ref_cnt = 0;
+	init_waitqueue_head(&qseecom.app_block_wq);
+	qseecom.whitelist_support = true;
+
+	rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
+	if (rc < 0) {
+		pr_err("alloc_chrdev_region failed %d\n", rc);
+		return rc;
+	}
+
+	driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
+	if (IS_ERR(driver_class)) {
+		rc = -ENOMEM;
+		pr_err("class_create failed %d\n", rc);
+		goto exit_unreg_chrdev_region;
+	}
+
+	class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
+			QSEECOM_DEV);
+	if (IS_ERR(class_dev)) {
+		pr_err("class_device_create failed %d\n", rc);
+		rc = -ENOMEM;
+		goto exit_destroy_class;
+	}
+
+	cdev_init(&qseecom.cdev, &qseecom_fops);
+	qseecom.cdev.owner = THIS_MODULE;
+
+	rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
+	if (rc < 0) {
+		pr_err("cdev_add failed %d\n", rc);
+		goto exit_destroy_device;
+	}
+
+	INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
+	spin_lock_init(&qseecom.registered_listener_list_lock);
+	INIT_LIST_HEAD(&qseecom.registered_app_list_head);
+	spin_lock_init(&qseecom.registered_app_list_lock);
+	INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
+	spin_lock_init(&qseecom.registered_kclient_list_lock);
+	init_waitqueue_head(&qseecom.send_resp_wq);
+	qseecom.send_resp_flag = 0;
+
+	qseecom.qsee_version = QSEEE_VERSION_00;
+	rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
+		&resp, sizeof(resp));
+	pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
+	if (rc) {
+		pr_err("Failed to get QSEE version info %d\n", rc);
+		goto exit_del_cdev;
+	}
+	qseecom.qsee_version = resp.result;
+	qseecom.qseos_version = QSEOS_VERSION_14;
+	qseecom.commonlib_loaded = false;
+	qseecom.commonlib64_loaded = false;
+	qseecom.pdev = class_dev;
+	/* Create ION msm client */
+	qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
+	if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
+		pr_err("Ion client cannot be created\n");
+
+		if (qseecom.ion_clnt != ERR_PTR(-EPROBE_DEFER))
+			rc = -ENOMEM;
+		else
+			rc = -EPROBE_DEFER;
+		goto exit_del_cdev;
+	}
+
+	/* register client for bus scaling */
+	if (pdev->dev.of_node) {
+		qseecom.pdev->of_node = pdev->dev.of_node;
+		qseecom.support_bus_scaling =
+				of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,support-bus-scaling");
+		rc = qseecom_retrieve_ce_data(pdev);
+		if (rc)
+			goto exit_destroy_ion_client;
+		qseecom.appsbl_qseecom_support =
+				of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,appsbl-qseecom-support");
+		pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
+				qseecom.appsbl_qseecom_support);
+
+		qseecom.commonlib64_loaded =
+				of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,commonlib64-loaded-by-uefi");
+		pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
+				qseecom.commonlib64_loaded);
+		qseecom.fde_key_size =
+			of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,fde-key-size");
+		qseecom.no_clock_support =
+				of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,no-clock-support");
+		if (!qseecom.no_clock_support) {
+			pr_info("qseecom clocks handled by other subsystem\n");
+		} else {
+			pr_info("no-clock-support=0x%x",
+			qseecom.no_clock_support);
+		}
+
+		if (of_property_read_u32((&pdev->dev)->of_node,
+					"qcom,qsee-reentrancy-support",
+					&qseecom.qsee_reentrancy_support)) {
+			pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
+			qseecom.qsee_reentrancy_support = 0;
+		} else {
+			pr_warn("qseecom.qsee_reentrancy_support = %d\n",
+				qseecom.qsee_reentrancy_support);
+		}
+
+		/*
+		 * The qseecom bus scaling flag can not be enabled when
+		 * crypto clock is not handled by HLOS.
+		 */
+		if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
+			pr_err("support_bus_scaling flag can not be enabled.\n");
+			rc = -EINVAL;
+			goto exit_destroy_ion_client;
+		}
+
+		if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,ce-opp-freq",
+				&qseecom.ce_opp_freq_hz)) {
+			pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
+			qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
+		}
+		rc = __qseecom_init_clk(CLK_QSEE);
+		if (rc)
+			goto exit_destroy_ion_client;
+
+		if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
+				(qseecom.support_pfe || qseecom.support_fde)) {
+			rc = __qseecom_init_clk(CLK_CE_DRV);
+			if (rc) {
+				__qseecom_deinit_clk(CLK_QSEE);
+				goto exit_destroy_ion_client;
+			}
+		} else {
+			struct qseecom_clk *qclk;
+
+			qclk = &qseecom.qsee;
+			qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
+			qseecom.ce_drv.ce_clk = qclk->ce_clk;
+			qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
+			qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
+		}
+
+		qseecom_platform_support = (struct msm_bus_scale_pdata *)
+						msm_bus_cl_get_pdata(pdev);
+		if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
+			(!qseecom.is_apps_region_protected &&
+			!qseecom.appsbl_qseecom_support)) {
+			struct resource *resource = NULL;
+			struct qsee_apps_region_info_ireq req;
+			struct qsee_apps_region_info_64bit_ireq req_64bit;
+			struct qseecom_command_scm_resp resp;
+			void *cmd_buf = NULL;
+			size_t cmd_len;
+
+			resource = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, "secapp-region");
+			if (resource) {
+				if (qseecom.qsee_version < QSEE_VERSION_40) {
+					req.qsee_cmd_id =
+						QSEOS_APP_REGION_NOTIFICATION;
+					req.addr = (uint32_t)resource->start;
+					req.size = resource_size(resource);
+					cmd_buf = (void *)&req;
+					cmd_len = sizeof(struct
+						qsee_apps_region_info_ireq);
+					pr_warn("secure app region addr=0x%x size=0x%x",
+							req.addr, req.size);
+				} else {
+					req_64bit.qsee_cmd_id =
+						QSEOS_APP_REGION_NOTIFICATION;
+					req_64bit.addr = resource->start;
+					req_64bit.size = resource_size(
+							resource);
+					cmd_buf = (void *)&req_64bit;
+					cmd_len = sizeof(struct
+					qsee_apps_region_info_64bit_ireq);
+					pr_warn("secure app region addr=0x%llx size=0x%x",
+						req_64bit.addr, req_64bit.size);
+				}
+			} else {
+				pr_err("Fail to get secure app region info\n");
+				rc = -EINVAL;
+				goto exit_deinit_clock;
+			}
+			rc = __qseecom_enable_clk(CLK_QSEE);
+			if (rc) {
+				pr_err("CLK_QSEE enabling failed (%d)\n", rc);
+				rc = -EIO;
+				goto exit_deinit_clock;
+			}
+			rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+					cmd_buf, cmd_len,
+					&resp, sizeof(resp));
+			__qseecom_disable_clk(CLK_QSEE);
+			if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
+				pr_err("send secapp reg fail %d resp.res %d\n",
+							rc, resp.result);
+				rc = -EINVAL;
+				goto exit_deinit_clock;
+			}
+		}
+	/*
+	 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
+	 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
+	 * Pls add "qseecom.commonlib64_loaded = true" here too.
+	 */
+		if (qseecom.is_apps_region_protected ||
+					qseecom.appsbl_qseecom_support)
+			qseecom.commonlib_loaded = true;
+	} else {
+		qseecom_platform_support = (struct msm_bus_scale_pdata *)
+						pdev->dev.platform_data;
+	}
+	if (qseecom.support_bus_scaling) {
+		init_timer(&(qseecom.bw_scale_down_timer));
+		INIT_WORK(&qseecom.bw_inactive_req_ws,
+					qseecom_bw_inactive_req_work);
+		qseecom.bw_scale_down_timer.function =
+				qseecom_scale_bus_bandwidth_timer_callback;
+	}
+	qseecom.timer_running = false;
+	qseecom.qsee_perf_client = msm_bus_scale_register_client(
+					qseecom_platform_support);
+
+	qseecom.whitelist_support = qseecom_check_whitelist_feature();
+	pr_warn("qseecom.whitelist_support = %d\n",
+				qseecom.whitelist_support);
+
+	if (!qseecom.qsee_perf_client)
+		pr_err("Unable to register bus client\n");
+
+	atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
+	return 0;
+
+exit_deinit_clock:
+	__qseecom_deinit_clk(CLK_QSEE);
+	if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
+		(qseecom.support_pfe || qseecom.support_fde))
+		__qseecom_deinit_clk(CLK_CE_DRV);
+exit_destroy_ion_client:
+	if (qseecom.ce_info.fde) {
+		pce_info_use = qseecom.ce_info.fde;
+		for (i = 0; i < qseecom.ce_info.num_fde; i++) {
+			kzfree(pce_info_use->ce_pipe_entry);
+			pce_info_use++;
+		}
+		kfree(qseecom.ce_info.fde);
+	}
+	if (qseecom.ce_info.pfe) {
+		pce_info_use = qseecom.ce_info.pfe;
+		for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
+			kzfree(pce_info_use->ce_pipe_entry);
+			pce_info_use++;
+		}
+		kfree(qseecom.ce_info.pfe);
+	}
+	ion_client_destroy(qseecom.ion_clnt);
+exit_del_cdev:
+	cdev_del(&qseecom.cdev);
+exit_destroy_device:
+	device_destroy(driver_class, qseecom_device_no);
+exit_destroy_class:
+	class_destroy(driver_class);
+exit_unreg_chrdev_region:
+	unregister_chrdev_region(qseecom_device_no, 1);
+	return rc;
+}
+
+static int qseecom_remove(struct platform_device *pdev)
+{
+	struct qseecom_registered_kclient_list *kclient = NULL;
+	struct qseecom_registered_kclient_list *kclient_tmp = NULL;
+	unsigned long flags = 0;
+	int ret = 0;
+	int i;
+	struct qseecom_ce_pipe_entry *pce_entry;
+	struct qseecom_ce_info_use *pce_info_use;
+
+	atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
+	spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
+
+	list_for_each_entry_safe(kclient, kclient_tmp,
+		&qseecom.registered_kclient_list_head, list) {
+
+		/* Break the loop if client handle is NULL */
+		if (!kclient->handle) {
+			list_del(&kclient->list);
+			kzfree(kclient);
+			break;
+		}
+
+		list_del(&kclient->list);
+		mutex_lock(&app_access_lock);
+		ret = qseecom_unload_app(kclient->handle->dev, false);
+		mutex_unlock(&app_access_lock);
+		if (!ret) {
+			kzfree(kclient->handle->dev);
+			kzfree(kclient->handle);
+			kzfree(kclient);
+		}
+	}
+
+	spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
+
+	if (qseecom.qseos_version > QSEEE_VERSION_00)
+		qseecom_unload_commonlib_image();
+
+	if (qseecom.qsee_perf_client)
+		msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
+									0);
+	if (pdev->dev.platform_data != NULL)
+		msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
+
+	if (qseecom.support_bus_scaling) {
+		cancel_work_sync(&qseecom.bw_inactive_req_ws);
+		del_timer_sync(&qseecom.bw_scale_down_timer);
+	}
+
+	if (qseecom.ce_info.fde) {
+		pce_info_use = qseecom.ce_info.fde;
+		for (i = 0; i < qseecom.ce_info.num_fde; i++) {
+			pce_entry = pce_info_use->ce_pipe_entry;
+			kfree(pce_entry);
+			pce_info_use++;
+		}
+	}
+	kfree(qseecom.ce_info.fde);
+	if (qseecom.ce_info.pfe) {
+		pce_info_use = qseecom.ce_info.pfe;
+		for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
+			pce_entry = pce_info_use->ce_pipe_entry;
+			kfree(pce_entry);
+			pce_info_use++;
+		}
+	}
+	kfree(qseecom.ce_info.pfe);
+
+	/* register client for bus scaling */
+	if (pdev->dev.of_node) {
+		__qseecom_deinit_clk(CLK_QSEE);
+		if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
+				(qseecom.support_pfe || qseecom.support_fde))
+			__qseecom_deinit_clk(CLK_CE_DRV);
+	}
+
+	ion_client_destroy(qseecom.ion_clnt);
+
+	cdev_del(&qseecom.cdev);
+
+	device_destroy(driver_class, qseecom_device_no);
+
+	class_destroy(driver_class);
+
+	unregister_chrdev_region(qseecom_device_no, 1);
+
+	return ret;
+}
+
+static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	int ret = 0;
+	struct qseecom_clk *qclk;
+	qclk = &qseecom.qsee;
+
+	atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
+	if (qseecom.no_clock_support)
+		return 0;
+
+	mutex_lock(&qsee_bw_mutex);
+	mutex_lock(&clk_access_lock);
+
+	if (qseecom.current_mode != INACTIVE) {
+		ret = msm_bus_scale_client_update_request(
+			qseecom.qsee_perf_client, INACTIVE);
+		if (ret)
+			pr_err("Fail to scale down bus\n");
+		else
+			qseecom.current_mode = INACTIVE;
+	}
+
+	if (qclk->clk_access_cnt) {
+		if (qclk->ce_clk != NULL)
+			clk_disable_unprepare(qclk->ce_clk);
+		if (qclk->ce_core_clk != NULL)
+			clk_disable_unprepare(qclk->ce_core_clk);
+		if (qclk->ce_bus_clk != NULL)
+			clk_disable_unprepare(qclk->ce_bus_clk);
+	}
+
+	del_timer_sync(&(qseecom.bw_scale_down_timer));
+	qseecom.timer_running = false;
+
+	mutex_unlock(&clk_access_lock);
+	mutex_unlock(&qsee_bw_mutex);
+	cancel_work_sync(&qseecom.bw_inactive_req_ws);
+
+	return 0;
+}
+
+static int qseecom_resume(struct platform_device *pdev)
+{
+	int mode = 0;
+	int ret = 0;
+	struct qseecom_clk *qclk;
+	qclk = &qseecom.qsee;
+
+	if (qseecom.no_clock_support)
+		goto exit;
+
+	mutex_lock(&qsee_bw_mutex);
+	mutex_lock(&clk_access_lock);
+	if (qseecom.cumulative_mode >= HIGH)
+		mode = HIGH;
+	else
+		mode = qseecom.cumulative_mode;
+
+	if (qseecom.cumulative_mode != INACTIVE) {
+		ret = msm_bus_scale_client_update_request(
+			qseecom.qsee_perf_client, mode);
+		if (ret)
+			pr_err("Fail to scale up bus to %d\n", mode);
+		else
+			qseecom.current_mode = mode;
+	}
+
+	if (qclk->clk_access_cnt) {
+		if (qclk->ce_core_clk != NULL) {
+			ret = clk_prepare_enable(qclk->ce_core_clk);
+			if (ret) {
+				pr_err("Unable to enable/prep CE core clk\n");
+				qclk->clk_access_cnt = 0;
+				goto err;
+			}
+		}
+		if (qclk->ce_clk != NULL) {
+			ret = clk_prepare_enable(qclk->ce_clk);
+			if (ret) {
+				pr_err("Unable to enable/prep CE iface clk\n");
+				qclk->clk_access_cnt = 0;
+				goto ce_clk_err;
+			}
+		}
+		if (qclk->ce_bus_clk != NULL) {
+			ret = clk_prepare_enable(qclk->ce_bus_clk);
+			if (ret) {
+				pr_err("Unable to enable/prep CE bus clk\n");
+				qclk->clk_access_cnt = 0;
+				goto ce_bus_clk_err;
+			}
+		}
+	}
+
+	if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
+		qseecom.bw_scale_down_timer.expires = jiffies +
+			msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+		mod_timer(&(qseecom.bw_scale_down_timer),
+				qseecom.bw_scale_down_timer.expires);
+		qseecom.timer_running = true;
+	}
+
+	mutex_unlock(&clk_access_lock);
+	mutex_unlock(&qsee_bw_mutex);
+	goto exit;
+
+ce_bus_clk_err:
+	if (qclk->ce_clk)
+		clk_disable_unprepare(qclk->ce_clk);
+ce_clk_err:
+	if (qclk->ce_core_clk)
+		clk_disable_unprepare(qclk->ce_core_clk);
+err:
+	mutex_unlock(&clk_access_lock);
+	mutex_unlock(&qsee_bw_mutex);
+	ret = -EIO;
+exit:
+	atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
+	return ret;
+}
+static struct of_device_id qseecom_match[] = {
+	{
+		.compatible = "qcom,qseecom",
+	},
+	{}
+};
+
+static struct platform_driver qseecom_plat_driver = {
+	.probe = qseecom_probe,
+	.remove = qseecom_remove,
+	.suspend = qseecom_suspend,
+	.resume = qseecom_resume,
+	.driver = {
+		.name = "qseecom",
+		.owner = THIS_MODULE,
+		.of_match_table = qseecom_match,
+	},
+};
+
+static int qseecom_init(void)
+{
+	return platform_driver_register(&qseecom_plat_driver);
+}
+
+static void qseecom_exit(void)
+{
+	platform_driver_unregister(&qseecom_plat_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm Secure Execution Environment Communicator");
+
+module_init(qseecom_init);
+module_exit(qseecom_exit);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/misc/qseecom_kernel.h	2019-01-22 16:16:24.751257708 +0100
@@ -0,0 +1,44 @@
+/* Copyright (c) 2012-2013, 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QSEECOM_KERNEL_H_
+#define __QSEECOM_KERNEL_H_
+
+#include <linux/types.h>
+#include <soc/qcom/scm.h>
+
+#define QSEECOM_ALIGN_SIZE	0x40
+#define QSEECOM_ALIGN_MASK	(QSEECOM_ALIGN_SIZE - 1)
+#define QSEECOM_ALIGN(x)	\
+	((x + QSEECOM_ALIGN_MASK) & (~QSEECOM_ALIGN_MASK))
+
+/*
+ * struct qseecom_handle -
+ *      Handle to the qseecom device for kernel clients
+ * @sbuf - shared buffer pointer
+ * @sbbuf_len - shared buffer size
+ */
+struct qseecom_handle {
+	void *dev; /* in/out */
+	unsigned char *sbuf; /* in/out */
+	uint32_t sbuf_len; /* in/out */
+};
+
+int qseecom_start_app(struct qseecom_handle **handle,
+						char *app_name, uint32_t size);
+int qseecom_shutdown_app(struct qseecom_handle **handle);
+int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
+			uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len);
+int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high);
+int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc);
+
+#endif /* __QSEECOM_KERNEL_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/misc/qseecom_legacy.h	2019-01-22 16:16:24.751257708 +0100
@@ -0,0 +1,79 @@
+/* Qualcomm Secure Execution Environment Communicator (QSEECOM) driver
+ *
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QSEECOM_LEGACY_H_
+#define __QSEECOM_LEGACY_H_
+
+#include <linux/types.h>
+
+#define TZ_SCHED_CMD_ID_REGISTER_LISTENER    0x04
+
+enum tz_sched_cmd_type {
+	TZ_SCHED_CMD_INVALID = 0,
+	TZ_SCHED_CMD_NEW,      /* New TZ Scheduler Command */
+	TZ_SCHED_CMD_PENDING,  /* Pending cmd...sched will restore stack */
+	TZ_SCHED_CMD_COMPLETE, /* TZ sched command is complete */
+	TZ_SCHED_CMD_MAX     = 0x7FFFFFFF
+};
+
+enum tz_sched_cmd_status {
+	TZ_SCHED_STATUS_INCOMPLETE = 0,
+	TZ_SCHED_STATUS_COMPLETE,
+	TZ_SCHED_STATUS_MAX  = 0x7FFFFFFF
+};
+/* Command structure for initializing shared buffers */
+__packed struct qse_pr_init_sb_req_s {
+	/* First 4 bytes should always be command id */
+	uint32_t                  pr_cmd;
+	/* Pointer to the physical location of sb buffer */
+	uint32_t                  sb_ptr;
+	/* length of shared buffer */
+	uint32_t                  sb_len;
+	uint32_t                  listener_id;
+};
+
+__packed struct qse_pr_init_sb_rsp_s {
+	/* First 4 bytes should always be command id */
+	uint32_t                  pr_cmd;
+	/* Return code, 0 for success, Approp error code otherwise */
+	int32_t                   ret;
+};
+
+/*
+ * struct QSEECom_command - QSECom command buffer
+ * @cmd_type: value from enum tz_sched_cmd_type
+ * @sb_in_cmd_addr: points to physical location of command
+ *                buffer
+ * @sb_in_cmd_len: length of command buffer
+ */
+__packed struct qseecom_command {
+	uint32_t               cmd_type;
+	uint8_t                *sb_in_cmd_addr;
+	uint32_t               sb_in_cmd_len;
+};
+
+/*
+ * struct QSEECom_response - QSECom response buffer
+ * @cmd_status: value from enum tz_sched_cmd_status
+ * @sb_in_rsp_addr: points to physical location of response
+ *                buffer
+ * @sb_in_rsp_len: length of command response
+ */
+__packed struct qseecom_response {
+	uint32_t                 cmd_status;
+	uint8_t                  *sb_in_rsp_addr;
+	uint32_t                 sb_in_rsp_len;
+};
+
+#endif /* __QSEECOM_LEGACY_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/mmc/host/cmdq_hci.h	2019-01-22 16:16:24.779257962 +0100
@@ -0,0 +1,251 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef LINUX_MMC_CQ_HCI_H
+#define LINUX_MMC_CQ_HCI_H
+#include <linux/mmc/core.h>
+
+/* registers */
+/* version */
+#define CQVER		0x00
+/* capabilities */
+#define CQCAP		0x04
+#define CQCAP_CS	(1 << 28)
+/* configuration */
+#define CQCFG		0x08
+#define CQ_DCMD		0x00001000
+#define CQ_TASK_DESC_SZ 0x00000100
+#define CQ_ENABLE	0x00000001
+#define CQ_ICE_ENABLE	0x00000002
+
+/* control */
+#define CQCTL		0x0C
+#define CLEAR_ALL_TASKS 0x00000100
+#define HALT		0x00000001
+
+/* interrupt status */
+#define CQIS		0x10
+#define CQIS_HAC	(1 << 0)
+#define CQIS_TCC	(1 << 1)
+#define CQIS_RED	(1 << 2)
+#define CQIS_TCL	(1 << 3)
+#define CQIS_GCE	(1 << 4)
+#define CQIS_ICCE	(1 << 5)
+
+/* interrupt status enable */
+#define CQISTE		0x14
+
+/* interrupt signal enable */
+#define CQISGE		0x18
+
+/* interrupt coalescing */
+#define CQIC		0x1C
+#define CQIC_ENABLE	(1 << 31)
+#define CQIC_RESET	(1 << 16)
+#define CQIC_ICCTHWEN	(1 << 15)
+#define CQIC_ICCTH(x)	((x & 0x1F) << 8)
+#define CQIC_ICTOVALWEN (1 << 7)
+#define CQIC_ICTOVAL(x) (x & 0x7F)
+
+/* task list base address */
+#define CQTDLBA		0x20
+
+/* task list base address upper */
+#define CQTDLBAU	0x24
+
+/* door-bell */
+#define CQTDBR		0x28
+
+/* task completion notification */
+#define CQTCN		0x2C
+
+/* device queue status */
+#define CQDQS		0x30
+
+/* device pending tasks */
+#define CQDPT		0x34
+
+/* task clear */
+#define CQTCLR		0x38
+
+/* send status config 1 */
+#define CQSSC1		0x40
+/*
+ * Value n means CQE would send CMD13 during the transfer of data block
+ * BLOCK_CNT-n
+ */
+#define SEND_QSR_INTERVAL 0x70001
+
+/* send status config 2 */
+#define CQSSC2		0x44
+
+/* response for dcmd */
+#define CQCRDCT		0x48
+
+/* response mode error mask */
+#define CQRMEM		0x50
+#define CQ_EXCEPTION	(1 << 6)
+
+/* task error info */
+#define CQTERRI		0x54
+
+/* CQTERRI bit fields */
+#define CQ_RMECI	0x1F
+#define CQ_RMETI	(0x1F << 8)
+#define CQ_RMEFV	(1 << 15)
+#define CQ_DTECI	(0x3F << 16)
+#define CQ_DTETI	(0x1F << 24)
+#define CQ_DTEFV	(1 << 31)
+
+#define GET_CMD_ERR_TAG(__r__) ((__r__ & CQ_RMETI) >> 8)
+#define GET_DAT_ERR_TAG(__r__) ((__r__ & CQ_DTETI) >> 24)
+
+/* command response index */
+#define CQCRI		0x58
+
+/* command response argument */
+#define CQCRA		0x5C
+
+#define CQ_INT_ALL	0x3F
+#define CQIC_DEFAULT_ICCTH 31
+#define CQIC_DEFAULT_ICTOVAL 1
+
+/* attribute fields */
+#define VALID(x)	((x & 1) << 0)
+#define END(x)		((x & 1) << 1)
+#define INT(x)		((x & 1) << 2)
+#define ACT(x)		((x & 0x7) << 3)
+
+/* data command task descriptor fields */
+#define FORCED_PROG(x)	((x & 1) << 6)
+#define CONTEXT(x)	((x & 0xF) << 7)
+#define DATA_TAG(x)	((x & 1) << 11)
+#define DATA_DIR(x)	((x & 1) << 12)
+#define PRIORITY(x)	((x & 1) << 13)
+#define QBAR(x)		((x & 1) << 14)
+#define REL_WRITE(x)	((x & 1) << 15)
+#define BLK_COUNT(x)	((x & 0xFFFF) << 16)
+#define BLK_ADDR(x)	((x & 0xFFFFFFFF) << 32)
+
+/* direct command task descriptor fields */
+#define CMD_INDEX(x)	((x & 0x3F) << 16)
+#define CMD_TIMING(x)	((x & 1) << 22)
+#define RESP_TYPE(x)	((x & 0x3) << 23)
+
+/* transfer descriptor fields */
+#define DAT_LENGTH(x)	((x & 0xFFFF) << 16)
+#define DAT_ADDR_LO(x)	((x & 0xFFFFFFFF) << 32)
+#define DAT_ADDR_HI(x)	((x & 0xFFFFFFFF) << 0)
+
+/*
+ * Add new macro for updated CQ vendor specific
+ * register address for SDHC v5.0 onwards.
+ */
+#define CQ_V5_VENDOR_CFG	0x900
+#define CQ_VENDOR_CFG	0x100
+#define CMDQ_SEND_STATUS_TRIGGER (1 << 31)
+
+#define CQ_TASK_DESC_TASK_PARAMS_SIZE	8
+#define CQ_TASK_DESC_ICE_PARAMS_SIZE	8
+
+struct task_history {
+	u64 task;
+	bool is_dcmd;
+};
+
+struct cmdq_host {
+	const struct cmdq_host_ops *ops;
+	void __iomem *mmio;
+	struct mmc_host *mmc;
+
+	/* 64 bit DMA */
+	bool dma64;
+	int num_slots;
+
+	u32 dcmd_slot;
+	u32 caps;
+#define CMDQ_TASK_DESC_SZ_128 0x1
+#define CMDQ_CAP_CRYPTO_SUPPORT 0x2
+
+	u32 quirks;
+#define CMDQ_QUIRK_SHORT_TXFR_DESC_SZ 0x1
+#define CMDQ_QUIRK_NO_DCMD	0x2
+
+	bool enabled;
+	bool halted;
+	bool init_done;
+	bool offset_changed;
+
+	u8 *desc_base;
+
+	/* total descriptor size */
+	u8 slot_sz;
+
+	/* 64/128 bit depends on CQCFG */
+	u8 task_desc_len;
+
+	/* 64 bit on 32-bit arch, 128 bit on 64-bit */
+	u8 link_desc_len;
+
+	u8 *trans_desc_base;
+	/* same length as transfer descriptor */
+	u8 trans_desc_len;
+
+	dma_addr_t desc_dma_base;
+	dma_addr_t trans_desc_dma_base;
+
+	struct task_history *thist;
+	u8 thist_idx;
+
+	struct completion halt_comp;
+	struct mmc_request **mrq_slot;
+	void *private;
+};
+
+struct cmdq_host_ops {
+	void (*set_transfer_params)(struct mmc_host *mmc);
+	void (*set_data_timeout)(struct mmc_host *mmc, u32 val);
+	void (*clear_set_irqs)(struct mmc_host *mmc, bool clear);
+	void (*set_block_size)(struct mmc_host *mmc);
+	void (*dump_vendor_regs)(struct mmc_host *mmc);
+	void (*write_l)(struct cmdq_host *host, u32 val, int reg);
+	u32 (*read_l)(struct cmdq_host *host, int reg);
+	void (*clear_set_dumpregs)(struct mmc_host *mmc, bool set);
+	void (*enhanced_strobe_mask)(struct mmc_host *mmc, bool set);
+	int (*reset)(struct mmc_host *mmc);
+	int (*crypto_cfg)(struct mmc_host *mmc, struct mmc_request *mrq,
+				u32 slot, u64 *ice_ctx);
+	int (*crypto_cfg_end)(struct mmc_host *mmc, struct mmc_request *mrq);
+	void (*crypto_cfg_reset)(struct mmc_host *mmc, unsigned int slot);
+	void (*post_cqe_halt)(struct mmc_host *mmc);
+};
+
+static inline void cmdq_writel(struct cmdq_host *host, u32 val, int reg)
+{
+	if (unlikely(host->ops && host->ops->write_l))
+		host->ops->write_l(host, val, reg);
+	else
+		writel_relaxed(val, host->mmio + reg);
+}
+
+static inline u32 cmdq_readl(struct cmdq_host *host, int reg)
+{
+	if (unlikely(host->ops && host->ops->read_l))
+		return host->ops->read_l(host, reg);
+	else
+		return readl_relaxed(host->mmio + reg);
+}
+
+extern irqreturn_t cmdq_irq(struct mmc_host *mmc, int err);
+extern int cmdq_init(struct cmdq_host *cq_host, struct mmc_host *mmc,
+		     bool dma64);
+extern struct cmdq_host *cmdq_pltfm_init(struct platform_device *pdev);
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/mmc/host/sdhci-msm.h	2019-10-29 09:26:24.073207386 +0100
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SDHCI_MSM_H__
+#define __SDHCI_MSM_H__
+
+#include <linux/mmc/mmc.h>
+#include <linux/pm_qos.h>
+#include "sdhci-pltfm.h"
+
+/* This structure keeps information per regulator */
+struct sdhci_msm_reg_data {
+	/* voltage regulator handle */
+	struct regulator *reg;
+	/* regulator name */
+	const char *name;
+	/* voltage level to be set */
+	u32 low_vol_level;
+	u32 high_vol_level;
+	/* Load values for low power and high power mode */
+	u32 lpm_uA;
+	u32 hpm_uA;
+
+	/* is this regulator enabled? */
+	bool is_enabled;
+	/* is this regulator needs to be always on? */
+	bool is_always_on;
+	/* is low power mode setting required for this regulator? */
+	bool lpm_sup;
+	bool set_voltage_sup;
+};
+
+/*
+ * This structure keeps information for all the
+ * regulators required for a SDCC slot.
+ */
+struct sdhci_msm_slot_reg_data {
+	/* keeps VDD/VCC regulator info */
+	struct sdhci_msm_reg_data *vdd_data;
+	 /* keeps VDD IO regulator info */
+	struct sdhci_msm_reg_data *vdd_io_data;
+};
+
+struct sdhci_msm_gpio {
+	u32 no;
+	const char *name;
+	bool is_enabled;
+};
+
+struct sdhci_msm_gpio_data {
+	struct sdhci_msm_gpio *gpio;
+	u8 size;
+};
+
+struct sdhci_msm_pin_data {
+	/*
+	 * = 1 if controller pins are using gpios
+	 * = 0 if controller has dedicated MSM pads
+	 */
+	u8 is_gpio;
+	struct sdhci_msm_gpio_data *gpio_data;
+};
+
+struct sdhci_pinctrl_data {
+	struct pinctrl          *pctrl;
+	struct pinctrl_state    *pins_active;
+	struct pinctrl_state    *pins_sleep;
+};
+
+struct sdhci_msm_bus_voting_data {
+	struct msm_bus_scale_pdata *bus_pdata;
+	unsigned int *bw_vecs;
+	unsigned int bw_vecs_size;
+};
+
+struct sdhci_msm_cpu_group_map {
+	int nr_groups;
+	cpumask_t *mask;
+};
+
+struct sdhci_msm_pm_qos_latency {
+	s32 latency[SDHCI_POWER_POLICY_NUM];
+};
+
+struct sdhci_msm_pm_qos_data {
+	struct sdhci_msm_cpu_group_map cpu_group_map;
+	enum pm_qos_req_type irq_req_type;
+	int irq_cpu;
+	struct sdhci_msm_pm_qos_latency irq_latency;
+	struct sdhci_msm_pm_qos_latency *cmdq_latency;
+	struct sdhci_msm_pm_qos_latency *latency;
+	bool irq_valid;
+	bool cmdq_valid;
+	bool legacy_valid;
+};
+
+/*
+ * PM QoS for group voting management - each cpu group defined is associated
+ * with 1 instance of this structure.
+ */
+struct sdhci_msm_pm_qos_group {
+	struct pm_qos_request req;
+	struct delayed_work unvote_work;
+	atomic_t counter;
+	s32 latency;
+};
+
+/* PM QoS HW IRQ voting */
+struct sdhci_msm_pm_qos_irq {
+	struct pm_qos_request req;
+	struct delayed_work unvote_work;
+	struct device_attribute enable_attr;
+	struct device_attribute status_attr;
+	atomic_t counter;
+	s32 latency;
+	bool enabled;
+};
+
+struct sdhci_msm_pltfm_data {
+	/* Supported UHS-I Modes */
+	u32 caps;
+
+	/* More capabilities */
+	u32 caps2;
+
+	unsigned long mmc_bus_width;
+	struct sdhci_msm_slot_reg_data *vreg_data;
+	bool nonremovable;
+	bool nonhotplug;
+	bool largeaddressbus;
+	bool pin_cfg_sts;
+	struct sdhci_msm_pin_data *pin_data;
+	struct sdhci_pinctrl_data *pctrl_data;
+	int status_gpio; /* card detection GPIO that is configured as IRQ */
+	struct sdhci_msm_bus_voting_data *voting_data;
+	u32 *sup_clk_table;
+	unsigned char sup_clk_cnt;
+	int sdiowakeup_irq;
+	u32 *sup_ice_clk_table;
+	unsigned char sup_ice_clk_cnt;
+	u32 ice_clk_max;
+	u32 ice_clk_min;
+	struct sdhci_msm_pm_qos_data pm_qos_data;
+	bool sdr104_wa;
+};
+
+struct sdhci_msm_bus_vote {
+	uint32_t client_handle;
+	uint32_t curr_vote;
+	int min_bw_vote;
+	int max_bw_vote;
+	bool is_max_bw_needed;
+	struct delayed_work vote_work;
+	struct device_attribute max_bus_bw;
+};
+
+struct sdhci_msm_ice_data {
+	struct qcom_ice_variant_ops *vops;
+	struct platform_device *pdev;
+	int state;
+};
+
+struct sdhci_msm_debug_data {
+	struct mmc_host copy_mmc;
+	struct mmc_card copy_card;
+	struct sdhci_host copy_host;
+};
+
+struct sdhci_msm_host {
+	struct platform_device	*pdev;
+	void __iomem *core_mem;    /* MSM SDCC mapped address */
+	void __iomem *cryptoio;    /* ICE HCI mapped address */
+	bool ice_hci_support;
+	int	pwr_irq;	/* power irq */
+	struct clk	 *clk;     /* main SD/MMC bus clock */
+	struct clk	 *pclk;    /* SDHC peripheral bus clock */
+	struct clk	 *bus_clk; /* SDHC bus voter clock */
+	struct clk	 *ff_clk; /* CDC calibration fixed feedback clock */
+	struct clk	 *sleep_clk; /* CDC calibration sleep clock */
+	struct clk	 *ice_clk; /* SDHC peripheral ICE clock */
+	atomic_t clks_on; /* Set if clocks are enabled */
+	struct sdhci_msm_pltfm_data *pdata;
+	struct mmc_host  *mmc;
+	struct sdhci_msm_debug_data cached_data;
+	struct sdhci_pltfm_data sdhci_msm_pdata;
+	u32 curr_pwr_state;
+	u32 curr_io_level;
+	struct completion pwr_irq_completion;
+	struct sdhci_msm_bus_vote msm_bus_vote;
+	struct device_attribute	polling;
+	u32 clk_rate; /* Keeps track of current clock rate that is set */
+	bool tuning_done;
+	bool calibration_done;
+	u8 saved_tuning_phase;
+	bool en_auto_cmd21;
+	struct device_attribute auto_cmd21_attr;
+	bool is_sdiowakeup_enabled;
+	bool sdio_pending_processing;
+	atomic_t controller_clock;
+	bool use_cdclp533;
+	bool use_updated_dll_reset;
+	bool use_14lpp_dll;
+	bool enhanced_strobe;
+	bool rclk_delay_fix;
+	u32 caps_0;
+	struct sdhci_msm_ice_data ice;
+	u32 ice_clk_rate;
+	struct sdhci_msm_pm_qos_group *pm_qos;
+	int pm_qos_prev_cpu;
+	struct device_attribute pm_qos_group_enable_attr;
+	struct device_attribute pm_qos_group_status_attr;
+	bool pm_qos_group_enable;
+	struct sdhci_msm_pm_qos_irq pm_qos_irq;
+	bool tuning_in_progress;
+	bool mci_removed;
+	const struct sdhci_msm_offset *offset;
+	bool core_3_0v_support;
+	bool pltfm_init_done;
+};
+
+extern char *saved_command_line;
+
+void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host);
+void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host);
+void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async);
+
+void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
+		struct sdhci_msm_pm_qos_latency *latency);
+void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
+		struct sdhci_msm_pm_qos_latency *latency, int cpu);
+bool sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async);
+
+
+#endif /* __SDHCI_MSM_H__ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/mmc/host/sdhci-msm-ice.h	2019-01-22 16:16:24.787258034 +0100
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SDHCI_MSM_ICE_H__
+#define __SDHCI_MSM_ICE_H__
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/blkdev.h>
+#include <crypto/ice.h>
+
+#include "sdhci-msm.h"
+
+#define SDHC_MSM_CRYPTO_LABEL "sdhc-msm-crypto"
+/* Timeout waiting for ICE initialization, that requires TZ access */
+#define SDHCI_MSM_ICE_COMPLETION_TIMEOUT_MS	500
+
+/*
+ * SDHCI host controller ICE registers. There are n [0..31]
+ * of each of these registers
+ */
+#define NUM_SDHCI_MSM_ICE_CTRL_INFO_n_REGS	32
+
+#define CORE_VENDOR_SPEC_ICE_CTRL		0x300
+#define CORE_VENDOR_SPEC_ICE_CTRL_INFO_1_n	0x304
+#define CORE_VENDOR_SPEC_ICE_CTRL_INFO_2_n	0x308
+#define CORE_VENDOR_SPEC_ICE_CTRL_INFO_3_n	0x30C
+
+/* ICE3.0 register which got added cmdq reg space */
+#define ICE_CQ_CAPABILITIES	0x04
+#define ICE_HCI_SUPPORT		(1 << 28)
+#define ICE_CQ_CONFIG		0x08
+#define CRYPTO_GENERAL_ENABLE	(1 << 1)
+#define ICE_NONCQ_CRYPTO_PARAMS	0x70
+#define ICE_NONCQ_CRYPTO_DUN	0x74
+
+/* ICE3.0 register which got added hc reg space */
+#define HC_VENDOR_SPECIFIC_FUNC4	0x260
+#define DISABLE_CRYPTO			(1 << 15)
+#define HC_VENDOR_SPECIFIC_ICE_CTRL	0x800
+#define ICE_SW_RST_EN			(1 << 0)
+
+/* SDHCI MSM ICE CTRL Info register offset */
+enum {
+	OFFSET_SDHCI_MSM_ICE_CTRL_INFO_BYPASS     = 0,
+	OFFSET_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX  = 1,
+	OFFSET_SDHCI_MSM_ICE_CTRL_INFO_CDU        = 6,
+	OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CCI	  = 0,
+	OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CE	  = 8,
+};
+
+/* SDHCI MSM ICE CTRL Info register masks */
+enum {
+	MASK_SDHCI_MSM_ICE_CTRL_INFO_BYPASS     = 0x1,
+	MASK_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX  = 0x1F,
+	MASK_SDHCI_MSM_ICE_CTRL_INFO_CDU        = 0x7,
+	MASK_SDHCI_MSM_ICE_HCI_PARAM_CE		= 0x1,
+	MASK_SDHCI_MSM_ICE_HCI_PARAM_CCI	= 0xff
+};
+
+/* SDHCI MSM ICE encryption/decryption bypass state */
+enum {
+	SDHCI_MSM_ICE_DISABLE_BYPASS  = 0,
+	SDHCI_MSM_ICE_ENABLE_BYPASS = 1,
+};
+
+/* SDHCI MSM ICE Crypto Data Unit of target DUN of Transfer Request */
+enum {
+	SDHCI_MSM_ICE_TR_DATA_UNIT_512_B          = 0,
+	SDHCI_MSM_ICE_TR_DATA_UNIT_1_KB           = 1,
+	SDHCI_MSM_ICE_TR_DATA_UNIT_2_KB           = 2,
+	SDHCI_MSM_ICE_TR_DATA_UNIT_4_KB           = 3,
+	SDHCI_MSM_ICE_TR_DATA_UNIT_8_KB           = 4,
+	SDHCI_MSM_ICE_TR_DATA_UNIT_16_KB          = 5,
+	SDHCI_MSM_ICE_TR_DATA_UNIT_32_KB          = 6,
+	SDHCI_MSM_ICE_TR_DATA_UNIT_64_KB          = 7,
+};
+
+/* SDHCI MSM ICE internal state */
+enum {
+	SDHCI_MSM_ICE_STATE_DISABLED   = 0,
+	SDHCI_MSM_ICE_STATE_ACTIVE     = 1,
+	SDHCI_MSM_ICE_STATE_SUSPENDED  = 2,
+};
+
+/* crypto context fields in cmdq data command task descriptor */
+#define DATA_UNIT_NUM(x)	(((u64)(x) & 0xFFFFFFFF) << 0)
+#define CRYPTO_CONFIG_INDEX(x)	(((u64)(x) & 0xFF) << 32)
+#define CRYPTO_ENABLE(x)	(((u64)(x) & 0x1) << 47)
+
+#ifdef CONFIG_MMC_SDHCI_MSM_ICE
+int sdhci_msm_ice_get_dev(struct sdhci_host *host);
+int sdhci_msm_ice_init(struct sdhci_host *host);
+void sdhci_msm_ice_cfg_reset(struct sdhci_host *host, u32 slot);
+int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq,
+			u32 slot);
+int sdhci_msm_ice_cmdq_cfg(struct sdhci_host *host,
+			struct mmc_request *mrq, u32 slot, u64 *ice_ctx);
+int sdhci_msm_ice_cfg_end(struct sdhci_host *host, struct mmc_request *mrq);
+int sdhci_msm_ice_reset(struct sdhci_host *host);
+int sdhci_msm_ice_resume(struct sdhci_host *host);
+int sdhci_msm_ice_suspend(struct sdhci_host *host);
+int sdhci_msm_ice_get_status(struct sdhci_host *host, int *ice_status);
+void sdhci_msm_ice_print_regs(struct sdhci_host *host);
+#else
+inline int sdhci_msm_ice_get_dev(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	if (msm_host) {
+		msm_host->ice.pdev = NULL;
+		msm_host->ice.vops = NULL;
+	}
+	return -ENODEV;
+}
+inline int sdhci_msm_ice_init(struct sdhci_host *host)
+{
+	return 0;
+}
+
+inline void sdhci_msm_ice_cfg_reset(struct sdhci_host *host, u32 slot)
+{
+}
+
+inline int sdhci_msm_ice_cfg(struct sdhci_host *host,
+		struct mmc_request *mrq, u32 slot)
+{
+	return 0;
+}
+inline int sdhci_msm_ice_cmdq_cfg(struct sdhci_host *host,
+		struct mmc_request *mrq, u32 slot, u64 *ice_ctx)
+{
+	return 0;
+}
+inline int sdhci_msm_ice_cfg_end(struct sdhci_host *host,
+			struct mmc_request *mrq)
+{
+	return 0;
+}
+inline int sdhci_msm_ice_reset(struct sdhci_host *host)
+{
+	return 0;
+}
+inline int sdhci_msm_ice_resume(struct sdhci_host *host)
+{
+	return 0;
+}
+inline int sdhci_msm_ice_suspend(struct sdhci_host *host)
+{
+	return 0;
+}
+inline int sdhci_msm_ice_get_status(struct sdhci_host *host,
+				   int *ice_status)
+{
+	return 0;
+}
+inline void sdhci_msm_ice_print_regs(struct sdhci_host *host)
+{
+	return;
+}
+#endif /* CONFIG_MMC_SDHCI_MSM_ICE */
+#endif /* __SDHCI_MSM_ICE_H__ */
diff -Nruw linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop./alc_cb.c linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop/alc_cb.c
--- linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop./alc_cb.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop/alc_cb.c	2019-01-22 16:16:24.927259302 +0100
@@ -0,0 +1,919 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include <linux/pci_regs.h>
+#include <linux/mii.h>
+
+#include "alc_hw.h"
+
+
+/* NIC */
+static int alc_identify_nic(struct alx_hw *hw)
+{
+	return 0;
+}
+
+
+/* PHY */
+static int alc_read_phy_reg(struct alx_hw *hw, u16 reg_addr, u16 *phy_data)
+{
+	unsigned long  flags;
+	int  retval = 0;
+
+	spin_lock_irqsave(&hw->mdio_lock, flags);
+
+	if (l1c_read_phy(hw, false, ALX_MDIO_DEV_TYPE_NORM, false, reg_addr,
+			 phy_data)) {
+		alx_hw_err(hw, "error when read phy reg\n");
+		retval = -EINVAL;
+	}
+
+	spin_unlock_irqrestore(&hw->mdio_lock, flags);
+	return retval;
+}
+
+
+static int alc_write_phy_reg(struct alx_hw *hw, u16 reg_addr, u16 phy_data)
+{
+	unsigned long  flags;
+	int  retval = 0;
+
+	spin_lock_irqsave(&hw->mdio_lock, flags);
+
+	if (l1c_write_phy(hw, false, ALX_MDIO_DEV_TYPE_NORM, false, reg_addr,
+			  phy_data)) {
+		alx_hw_err(hw, "error when write phy reg\n");
+		retval = -EINVAL;
+	}
+
+	spin_unlock_irqrestore(&hw->mdio_lock, flags);
+	return retval;
+}
+
+
+static int alc_init_phy(struct alx_hw *hw)
+{
+	u16 phy_id[2];
+	int retval;
+
+	spin_lock_init(&hw->mdio_lock);
+
+	retval = alc_read_phy_reg(hw, MII_PHYSID1, &phy_id[0]);
+	if (retval)
+		return retval;
+	retval = alc_read_phy_reg(hw, MII_PHYSID2, &phy_id[1]);
+	if (retval)
+		return retval;
+
+	memcpy(&hw->phy_id, phy_id, sizeof(hw->phy_id));
+
+	hw->autoneg_advertised = ALX_LINK_SPEED_1GB_FULL |
+				 ALX_LINK_SPEED_10_HALF  |
+				 ALX_LINK_SPEED_10_FULL  |
+				 ALX_LINK_SPEED_100_HALF |
+				 ALX_LINK_SPEED_100_FULL;
+	return retval;
+}
+
+
+static int alc_reset_phy(struct alx_hw *hw)
+{
+	bool pws_en, az_en, ptp_en;
+	int retval = 0;
+
+	pws_en = az_en = ptp_en = false;
+	CLI_HW_FLAG(PWSAVE_EN);
+	CLI_HW_FLAG(AZ_EN);
+	CLI_HW_FLAG(PTP_EN);
+
+	if (CHK_HW_FLAG(PWSAVE_CAP)) {
+		pws_en = true;
+		SET_HW_FLAG(PWSAVE_EN);
+	}
+
+	if (CHK_HW_FLAG(AZ_CAP)) {
+		az_en = true;
+		SET_HW_FLAG(AZ_EN);
+	}
+
+	if (CHK_HW_FLAG(PTP_CAP)) {
+		ptp_en = true;
+		SET_HW_FLAG(PTP_EN);
+	}
+
+	alx_hw_info(hw, "reset PHY, pws = %d, az = %d, ptp = %d\n",
+		    pws_en, az_en, ptp_en);
+
+	if (l1c_reset_phy(hw, pws_en, az_en, ptp_en)) {
+		alx_hw_err(hw, "error when reset phy\n");
+		retval = -EINVAL;
+	}
+
+	return retval;
+}
+
+
+/* LINK */
+static int alc_setup_phy_link(struct alx_hw *hw, u32 speed, bool autoneg,
+			      bool fc)
+{
+	u8 link_cap = 0;
+	int retval = 0;
+
+	alx_hw_info(hw, "speed = 0x%x, autoneg = %d\n", speed, autoneg);
+	if (speed & ALX_LINK_SPEED_1GB_FULL)
+		link_cap |= LX_LC_1000F;
+
+	if (speed & ALX_LINK_SPEED_100_FULL)
+		link_cap |= LX_LC_100F;
+
+	if (speed & ALX_LINK_SPEED_100_HALF)
+		link_cap |= LX_LC_100H;
+
+	if (speed & ALX_LINK_SPEED_10_FULL)
+		link_cap |= LX_LC_10F;
+
+	if (speed & ALX_LINK_SPEED_10_HALF)
+		link_cap |= LX_LC_10H;
+
+	if (l1c_init_phy_spdfc(hw, autoneg, link_cap, fc)) {
+		alx_hw_err(hw, "error when init phy speed and fc\n");
+		retval = -EINVAL;
+	}
+
+	return retval;
+}
+
+
+static int alc_setup_phy_link_speed(struct alx_hw *hw, u32 speed,
+				    bool autoneg, bool fc)
+{
+	/*
+	 * Clear autoneg_advertised and set new values based on input link
+	 * speed.
+	 */
+	hw->autoneg_advertised = 0;
+
+	if (speed & ALX_LINK_SPEED_1GB_FULL)
+		hw->autoneg_advertised |= ALX_LINK_SPEED_1GB_FULL;
+
+	if (speed & ALX_LINK_SPEED_100_FULL)
+		hw->autoneg_advertised |= ALX_LINK_SPEED_100_FULL;
+
+	if (speed & ALX_LINK_SPEED_100_HALF)
+		hw->autoneg_advertised |= ALX_LINK_SPEED_100_HALF;
+
+	if (speed & ALX_LINK_SPEED_10_FULL)
+		hw->autoneg_advertised |= ALX_LINK_SPEED_10_FULL;
+
+	if (speed & ALX_LINK_SPEED_10_HALF)
+		hw->autoneg_advertised |= ALX_LINK_SPEED_10_HALF;
+
+	return alc_setup_phy_link(hw, hw->autoneg_advertised,
+				  autoneg, fc);
+}
+
+
+static int alc_check_phy_link(struct alx_hw *hw, u32 *speed, bool *link_up)
+{
+	u16 bmsr, giga;
+	int retval;
+
+	alc_read_phy_reg(hw, MII_BMSR, &bmsr);
+	retval = alc_read_phy_reg(hw, MII_BMSR, &bmsr);
+	if (retval)
+		return retval;
+
+	if (!(bmsr & BMSR_LSTATUS)) {
+		*link_up = false;
+		*speed = ALX_LINK_SPEED_UNKNOWN;
+		return 0;
+	}
+	*link_up = true;
+
+	/* Read PHY Specific Status Register (17) */
+	retval = alc_read_phy_reg(hw, L1C_MII_GIGA_PSSR, &giga);
+	if (retval)
+		return retval;
+
+
+	if (!(giga & L1C_GIGA_PSSR_SPD_DPLX_RESOLVED)) {
+		alx_hw_err(hw, "error for speed duplex resolved\n");
+		return -EINVAL;
+	}
+
+	switch (giga & L1C_GIGA_PSSR_SPEED) {
+	case L1C_GIGA_PSSR_1000MBS:
+		if (giga & L1C_GIGA_PSSR_DPLX)
+			*speed = ALX_LINK_SPEED_1GB_FULL;
+		else
+			alx_hw_err(hw, "1000M half is invalid\n");
+		break;
+	case L1C_GIGA_PSSR_100MBS:
+		if (giga & L1C_GIGA_PSSR_DPLX)
+			*speed = ALX_LINK_SPEED_100_FULL;
+		else
+			*speed = ALX_LINK_SPEED_100_HALF;
+		break;
+	case L1C_GIGA_PSSR_10MBS:
+		if (giga & L1C_GIGA_PSSR_DPLX)
+			*speed = ALX_LINK_SPEED_10_FULL;
+		else
+			*speed = ALX_LINK_SPEED_10_HALF;
+		break;
+	default:
+		*speed = ALX_LINK_SPEED_UNKNOWN;
+		retval = -EINVAL;
+		break;
+	}
+
+	return retval;
+}
+
+
+/*
+ * 1. stop_mac
+ * 2. reset mac & dma by reg1400(MASTER)
+ * 3. control speed/duplex, hash-alg
+ * 4. clock switch setting
+ */
+static int alc_reset_mac(struct alx_hw *hw)
+{
+	int retval = 0;
+
+	if (l1c_reset_mac(hw)) {
+		alx_hw_err(hw, "error when reset mac\n");
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+
+static int alc_start_mac(struct alx_hw *hw)
+{
+	u16 en_ctrl = 0;
+	int retval = 0;
+
+	/* set link speed param */
+	switch (hw->link_speed) {
+	case ALX_LINK_SPEED_1GB_FULL:
+		en_ctrl |= LX_MACSPEED_1000;
+		/* fall through */
+	case ALX_LINK_SPEED_100_FULL:
+	case ALX_LINK_SPEED_10_FULL:
+		en_ctrl |= LX_MACDUPLEX_FULL;
+		break;
+	}
+
+	/* set fc param*/
+	switch (hw->cur_fc_mode) {
+	case alx_fc_full:
+		en_ctrl |= LX_FC_RXEN; /* Flow Control RX Enable */
+		en_ctrl |= LX_FC_TXEN; /* Flow Control TX Enable */
+		break;
+	case alx_fc_rx_pause:
+		en_ctrl |= LX_FC_RXEN; /* Flow Control RX Enable */
+		break;
+	case alx_fc_tx_pause:
+		en_ctrl |= LX_FC_TXEN; /* Flow Control TX Enable */
+		break;
+	default:
+		break;
+	}
+
+	if (hw->fc_single_pause)
+		en_ctrl |= LX_SINGLE_PAUSE;
+
+
+	en_ctrl |= LX_FLT_DIRECT; /* RX Enable; and TX Always Enable */
+	en_ctrl |= LX_FLT_BROADCAST; /* RX Broadcast Enable */
+	en_ctrl |= LX_ADD_FCS;
+
+	if (CHK_HW_FLAG(VLANSTRIP_EN))
+		en_ctrl |= LX_VLAN_STRIP;
+
+	if (CHK_HW_FLAG(PROMISC_EN))
+		en_ctrl |=  LX_FLT_PROMISC;
+
+	if (CHK_HW_FLAG(MULTIALL_EN))
+		en_ctrl |= LX_FLT_MULTI_ALL;
+
+	if (CHK_HW_FLAG(LOOPBACK_EN))
+		en_ctrl |= LX_LOOPBACK;
+
+	if (l1c_enable_mac(hw, true, en_ctrl)) {
+		alx_hw_err(hw, "error when start mac\n");
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+
+/*
+ * 1. stop RXQ (reg15A0) and TXQ (reg1590)
+ * 2. stop MAC (reg1480)
+ */
+static int alc_stop_mac(struct alx_hw *hw)
+{
+	int retval = 0;
+
+	if (l1c_enable_mac(hw, false, 0)) {
+		alx_hw_err(hw, "error when stop mac\n");
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+
+static int alc_config_mac(struct alx_hw *hw, u16 rxbuf_sz, u16 rx_qnum,
+			  u16 rxring_sz, u16 tx_qnum,  u16 txring_sz)
+{
+	u8 *addr;
+
+	u32 txmem_hi, txmem_lo[4];
+
+	u32 rxmem_hi, rfdmem_lo, rrdmem_lo;
+
+	u16 smb_timer, mtu_with_eth, int_mod;
+	bool hash_legacy;
+
+	int i;
+	int retval = 0;
+
+	addr = hw->mac_addr;
+
+	txmem_hi = ALX_DMA_ADDR_HI(hw->tpdma[0]);
+	for (i = 0; i < tx_qnum; i++)
+		txmem_lo[i] = ALX_DMA_ADDR_LO(hw->tpdma[i]);
+
+
+	rxmem_hi = ALX_DMA_ADDR_HI(hw->rfdma[0]);
+	rfdmem_lo = ALX_DMA_ADDR_LO(hw->rfdma[0]);
+	rrdmem_lo = ALX_DMA_ADDR_LO(hw->rrdma[0]);
+
+
+	smb_timer = (u16)hw->smb_timer;
+	mtu_with_eth = hw->mtu + ALX_ETH_LENGTH_OF_HEADER;
+	int_mod = hw->imt;
+
+	hash_legacy = true;
+
+	if (l1c_init_mac(hw, addr, txmem_hi, txmem_lo, tx_qnum, txring_sz,
+			 rxmem_hi, rfdmem_lo, rrdmem_lo, rxring_sz, rxbuf_sz,
+			 smb_timer, mtu_with_eth, int_mod, hash_legacy)) {
+		alx_hw_err(hw, "error when config mac\n");
+		retval = -EINVAL;
+	}
+
+	return retval;
+}
+
+
+/**
+ *  alc_get_mac_addr
+ *  @hw: pointer to hardware structure
+ **/
+static int alc_get_mac_addr(struct alx_hw *hw, u8 *addr)
+{
+	int retval = 0;
+
+	if (l1c_get_perm_macaddr(hw, addr)) {
+		alx_hw_err(hw, "error when get permanent mac address\n");
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+
+static int alc_reset_pcie(struct alx_hw *hw, bool l0s_en, bool l1_en)
+{
+	int retval = 0;
+
+	if (!CHK_HW_FLAG(L0S_CAP))
+		l0s_en = false;
+
+	if (l0s_en)
+		SET_HW_FLAG(L0S_EN);
+	else
+		CLI_HW_FLAG(L0S_EN);
+
+
+	if (!CHK_HW_FLAG(L1_CAP))
+		l1_en = false;
+
+	if (l1_en)
+		SET_HW_FLAG(L1_EN);
+	else
+		CLI_HW_FLAG(L1_EN);
+
+	if (l1c_reset_pcie(hw, l0s_en, l1_en)) {
+		alx_hw_err(hw, "error when reset pcie\n");
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+
+static int alc_config_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en)
+{
+	u8  link_stat;
+	int retval = 0;
+
+	if (!CHK_HW_FLAG(L0S_CAP))
+		l0s_en = false;
+
+	if (l0s_en)
+		SET_HW_FLAG(L0S_EN);
+	else
+		CLI_HW_FLAG(L0S_EN);
+
+	if (!CHK_HW_FLAG(L1_CAP))
+		l1_en = false;
+
+	if (l1_en)
+		SET_HW_FLAG(L1_EN);
+	else
+		CLI_HW_FLAG(L1_EN);
+
+	link_stat = hw->link_up ? LX_LC_ALL : 0;
+	if (l1c_enable_aspm(hw, l0s_en, l1_en, link_stat)) {
+		alx_hw_err(hw, "error when enable aspm\n");
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+
+static int alc_config_wol(struct alx_hw *hw, u32 wufc)
+{
+	u32 wol = 0;
+
+	/* turn on magic packet event */
+	if (wufc & ALX_WOL_MAGIC) {
+		wol |= L1C_WOL0_MAGIC_EN | L1C_WOL0_PME_MAGIC_EN;
+		if (hw->mac_type == alx_mac_l2cb_v1 &&
+		    hw->pci_revid == ALX_REV_ID_AR8152_V1_1) {
+			wol |= L1C_WOL0_PATTERN_EN | L1C_WOL0_PME_PATTERN_EN;
+		}
+		/* magic packet maybe Broadcast&multicast&Unicast frame
+		 * move to l1c_powersaving
+		 */
+	}
+
+	/* turn on link up event */
+	if (wufc & ALX_WOL_PHY) {
+		wol |=  L1C_WOL0_LINK_EN | L1C_WOL0_PME_LINK;
+		/* only link up can wake up */
+		alc_write_phy_reg(hw, L1C_MII_IER, L1C_IER_LINK_UP);
+	}
+
+	alx_mem_w32(hw, L1C_WOL0, wol);
+	return 0;
+}
+
+
+static int alc_config_mac_ctrl(struct alx_hw *hw)
+{
+	u32 mac;
+
+	alx_mem_r32(hw, L1C_MAC_CTRL, &mac);
+
+	/* enable/disable VLAN tag insert,strip */
+	if (CHK_HW_FLAG(VLANSTRIP_EN))
+		mac |= L1C_MAC_CTRL_VLANSTRIP;
+	else
+		mac &= ~L1C_MAC_CTRL_VLANSTRIP;
+
+	if (CHK_HW_FLAG(PROMISC_EN))
+		mac |= L1C_MAC_CTRL_PROMISC_EN;
+	else
+		mac &= ~L1C_MAC_CTRL_PROMISC_EN;
+
+	if (CHK_HW_FLAG(MULTIALL_EN))
+		mac |= L1C_MAC_CTRL_MULTIALL_EN;
+	else
+		mac &= ~L1C_MAC_CTRL_MULTIALL_EN;
+
+	if (CHK_HW_FLAG(LOOPBACK_EN))
+		mac |= L1C_MAC_CTRL_LPBACK_EN;
+	else
+		mac &= ~L1C_MAC_CTRL_LPBACK_EN;
+
+	alx_mem_w32(hw, L1C_MAC_CTRL, mac);
+	return 0;
+}
+
+
+static int alc_config_pow_save(struct alx_hw *hw, u32 speed, bool wol_en,
+			       bool tx_en, bool rx_en, bool pws_en)
+{
+	u8 wire_spd = LX_LC_10H;
+	int retval = 0;
+
+	switch (speed) {
+	case ALX_LINK_SPEED_UNKNOWN:
+	case ALX_LINK_SPEED_10_HALF:
+		wire_spd = LX_LC_10H;
+		break;
+	case ALX_LINK_SPEED_10_FULL:
+		wire_spd = LX_LC_10F;
+		break;
+	case ALX_LINK_SPEED_100_HALF:
+		wire_spd = LX_LC_100H;
+		break;
+	case ALX_LINK_SPEED_100_FULL:
+		wire_spd = LX_LC_100F;
+		break;
+	case ALX_LINK_SPEED_1GB_FULL:
+		wire_spd = LX_LC_1000F;
+		break;
+	}
+
+	if (l1c_powersaving(hw, wire_spd, wol_en, tx_en, rx_en, pws_en)) {
+		alx_hw_err(hw, "error when set power saving\n");
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+
+/* RAR, Multicast, VLAN */
+static int alc_set_mac_addr(struct alx_hw *hw, u8 *addr)
+{
+	u32 sta;
+
+	/*
+	 * for example: 00-0B-6A-F6-00-DC
+	 * 0<-->6AF600DC, 1<-->000B.
+	 */
+
+	/* low dword */
+	sta = (((u32)addr[2]) << 24) | (((u32)addr[3]) << 16) |
+	      (((u32)addr[4]) << 8)  | (((u32)addr[5])) ;
+	alx_mem_w32(hw, L1C_STAD0, sta);
+
+	/* hight dword */
+	sta = (((u32)addr[0]) << 8) | (((u32)addr[1])) ;
+	alx_mem_w32(hw, L1C_STAD1, sta);
+	return 0;
+}
+
+
+static int alc_set_mc_addr(struct alx_hw *hw, u8 *addr)
+{
+	u32 crc32, bit, reg, mta;
+
+	/*
+	 * set hash value for a multicast address hash calcu processing.
+	 * 1. calcu 32bit CRC for multicast address
+	 * 2. reverse crc with MSB to LSB
+	 */
+	crc32 = ALX_ETH_CRC(addr, ALX_ETH_LENGTH_OF_ADDRESS);
+
+	/*
+	 * The HASH Table  is a register array of 2 32-bit registers.
+	 * It is treated like an array of 64 bits.  We want to set
+	 * bit BitArray[hash_value]. So we figure out what register
+	 * the bit is in, read it, OR in the new bit, then write
+	 * back the new value.  The register is determined by the
+	 * upper 7 bits of the hash value and the bit within that
+	 * register are determined by the lower 5 bits of the value.
+	 */
+	reg = (crc32 >> 31) & 0x1;
+	bit = (crc32 >> 26) & 0x1F;
+
+	alx_mem_r32(hw, L1C_HASH_TBL0 + (reg<<2), &mta);
+	mta |= (0x1 << bit);
+	alx_mem_w32(hw, L1C_HASH_TBL0 + (reg<<2), mta);
+	return 0;
+}
+
+
+static int alc_clear_mc_addr(struct alx_hw *hw)
+{
+	alx_mem_w32(hw, L1C_HASH_TBL0, 0);
+	alx_mem_w32(hw, L1C_HASH_TBL1, 0);
+	return 0;
+}
+
+
+/* RTX */
+static int alc_config_tx(struct alx_hw *hw)
+{
+	return 0;
+}
+
+
+/* INTR */
+static int alc_ack_phy_intr(struct alx_hw *hw)
+{
+	u16 isr;
+	return alc_read_phy_reg(hw, L1C_MII_ISR, &isr);
+}
+
+
+static int alc_enable_legacy_intr(struct alx_hw *hw)
+{
+	alx_mem_w32(hw, L1C_ISR, ~((u32) L1C_ISR_DIS));
+	alx_mem_w32(hw, L1C_IMR, hw->intr_mask);
+	return 0;
+}
+
+
+static int alc_disable_legacy_intr(struct alx_hw *hw)
+{
+	alx_mem_w32(hw, L1C_ISR, L1C_ISR_DIS);
+	alx_mem_w32(hw, L1C_IMR, 0);
+	alx_mem_flush(hw);
+	return 0;
+}
+
+
+/*
+ * NV Ram
+ */
+static int alc_check_nvram(struct alx_hw *hw, bool *exist)
+{
+	*exist = false;
+	return 0;
+}
+
+
+static int alc_read_nvram(struct alx_hw *hw, u16 offset, u32 *data)
+{
+	int i;
+	u32 ectrl1, ectrl2, edata;
+	int retval = 0;
+
+	if (offset & 0x3)
+		return retval; /* address do not align */
+
+	alx_mem_r32(hw, L1C_EFUSE_CTRL2, &ectrl2);
+	if (!(ectrl2 & L1C_EFUSE_CTRL2_CLK_EN))
+		alx_mem_w32(hw, L1C_EFUSE_CTRL2, ectrl2|L1C_EFUSE_CTRL2_CLK_EN);
+
+	alx_mem_w32(hw, L1C_EFUSE_DATA, 0);
+	ectrl1 = FIELDL(L1C_EFUSE_CTRL_ADDR, offset);
+	alx_mem_w32(hw, L1C_EFUSE_CTRL, ectrl1);
+
+	for (i = 0; i < 10; i++) {
+		udelay(100);
+		alx_mem_r32(hw, L1C_EFUSE_CTRL, &ectrl1);
+		if (ectrl1 & L1C_EFUSE_CTRL_FLAG)
+			break;
+	}
+	if (ectrl1 & L1C_EFUSE_CTRL_FLAG) {
+		alx_mem_r32(hw, L1C_EFUSE_CTRL, &ectrl1);
+		alx_mem_r32(hw, L1C_EFUSE_DATA, &edata);
+		*data = LX_SWAP_DW((ectrl1 << 16) | (edata >> 16));
+		return retval;
+	}
+
+	if (!(ectrl2 & L1C_EFUSE_CTRL2_CLK_EN))
+		alx_mem_w32(hw, L1C_EFUSE_CTRL2, ectrl2);
+
+	return retval;
+}
+
+
+static int alc_write_nvram(struct alx_hw *hw, u16 offset, u32 data)
+{
+	return 0;
+}
+
+
+/* fc */
+static int alc_get_fc_mode(struct alx_hw *hw, enum alx_fc_mode *mode)
+{
+	u16 bmsr, giga;
+	int i;
+	int retval = 0;
+
+	for (i = 0; i < ALX_MAX_SETUP_LNK_CYCLE; i++) {
+		alc_read_phy_reg(hw, MII_BMSR, &bmsr);
+		alc_read_phy_reg(hw, MII_BMSR, &bmsr);
+		if (bmsr & BMSR_LSTATUS) {
+			/* Read phy Specific Status Register (17) */
+			retval = alc_read_phy_reg(hw, L1C_MII_GIGA_PSSR, &giga);
+			if (retval)
+				return retval;
+
+			if (!(giga & L1C_GIGA_PSSR_SPD_DPLX_RESOLVED)) {
+				alx_hw_err(hw,
+					"error for speed duplex resolved\n");
+				return -EINVAL;
+			}
+
+			if ((giga & L1C_GIGA_PSSR_FC_TXEN) &&
+			    (giga & L1C_GIGA_PSSR_FC_RXEN)) {
+				*mode = alx_fc_full;
+			} else if (giga & L1C_GIGA_PSSR_FC_TXEN) {
+				*mode = alx_fc_tx_pause;
+			} else if (giga & L1C_GIGA_PSSR_FC_RXEN) {
+				*mode = alx_fc_rx_pause;
+			} else {
+				*mode = alx_fc_none;
+			}
+			break;
+		}
+		mdelay(100);
+	}
+
+	if (i == ALX_MAX_SETUP_LNK_CYCLE) {
+		alx_hw_err(hw, "error when get flow control mode\n");
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+
+static int alc_config_fc(struct alx_hw *hw)
+{
+	u32 mac;
+	int retval = 0;
+
+	if (hw->disable_fc_autoneg) {
+		hw->fc_was_autonegged = false;
+		hw->cur_fc_mode = hw->req_fc_mode;
+	} else {
+		hw->fc_was_autonegged = true;
+		retval = alc_get_fc_mode(hw, &hw->cur_fc_mode);
+		if (retval)
+			return retval;
+	}
+
+	alx_mem_r32(hw, L1C_MAC_CTRL, &mac);
+
+	switch (hw->cur_fc_mode) {
+	case alx_fc_none: /* 0 */
+		mac &= ~(L1C_MAC_CTRL_RXFC_EN | L1C_MAC_CTRL_TXFC_EN);
+		break;
+	case alx_fc_rx_pause: /* 1 */
+		mac &= ~L1C_MAC_CTRL_TXFC_EN;
+		mac |= L1C_MAC_CTRL_RXFC_EN;
+		break;
+	case alx_fc_tx_pause: /* 2 */
+		mac |= L1C_MAC_CTRL_TXFC_EN;
+		mac &= ~L1C_MAC_CTRL_RXFC_EN;
+		break;
+	case alx_fc_full: /* 3 */
+	case alx_fc_default: /* 4 */
+		mac |= (L1C_MAC_CTRL_TXFC_EN | L1C_MAC_CTRL_RXFC_EN);
+		break;
+	default:
+		alx_hw_err(hw, "flow control param set incorrectly\n");
+		return -EINVAL;
+	}
+
+	alx_mem_w32(hw, L1C_MAC_CTRL, mac);
+	return retval;
+}
+
+
+/* ethtool */
+static int alc_get_ethtool_regs(struct alx_hw *hw, void *buff)
+{
+	int i;
+	u32 *val = buff;
+	static const int reg[] = {
+		/* 0 */
+		L1C_LNK_CAP, L1C_PMCTRL, L1C_HALFD, L1C_SLD, L1C_MASTER,
+		L1C_MANU_TIMER, L1C_IRQ_MODU_TIMER, L1C_PHY_CTRL, L1C_LNK_CTRL,
+		L1C_MAC_STS,
+
+		/* 10 */
+		L1C_MDIO, L1C_SERDES, L1C_MAC_CTRL, L1C_GAP, L1C_STAD0,
+		L1C_STAD1, L1C_HASH_TBL0, L1C_HASH_TBL1, L1C_RXQ0, L1C_RXQ1,
+
+		/* 20 */
+		L1C_RXQ2, L1C_RXQ3, L1C_TXQ0, L1C_TXQ1, L1C_TXQ2, L1C_MTU,
+		L1C_WOL0, L1C_WOL1, L1C_WOL2,
+	};
+
+	for (i = 0; i < ARRAY_SIZE(reg); i++) {
+		alx_mem_r32(hw, reg[i], &val[i]);
+                pr_info(" Register offset 0x%04x = 0x%08x \n", reg[i], val[i]);
+	}
+	return 0;
+}
+
+static int alc_apply_phy_hib_patch(struct alx_hw *hw)
+{
+	return l1c_apply_phy_hib_patch(hw);
+}
+
+/******************************************************************************/
+static int alc_get_hw_capabilities(struct alx_hw *hw)
+{
+	/*
+	 * because there is some hareware error on some platforms, just disable
+	 * this feature when link connected.
+	 */
+	CLI_HW_FLAG(L0S_CAP);
+	CLI_HW_FLAG(L1_CAP);
+
+	if ((hw->mac_type == alx_mac_l1c) ||
+	    (hw->mac_type == alx_mac_l1d_v1) ||
+	    (hw->mac_type == alx_mac_l1d_v2))
+		SET_HW_FLAG(GIGA_CAP);
+
+	SET_HW_FLAG(PWSAVE_CAP);
+	return 0;
+}
+
+
+/* alc_set_hw_info */
+static int alc_set_hw_infos(struct alx_hw *hw)
+{
+	hw->rxstat_reg = 0x1700;
+	hw->rxstat_sz  = 0x60;
+	hw->txstat_reg = 0x1760;
+	hw->txstat_sz  = 0x68;
+
+	hw->rx_prod_reg[0] = L1C_RFD_PIDX;
+	hw->rx_cons_reg[0] = L1C_RFD_CIDX;
+
+	hw->tx_prod_reg[0] = L1C_TPD_PRI0_PIDX;
+	hw->tx_cons_reg[0] = L1C_TPD_PRI0_CIDX;
+	hw->tx_prod_reg[1] = L1C_TPD_PRI1_PIDX;
+	hw->tx_cons_reg[1] = L1C_TPD_PRI1_CIDX;
+
+	hw->hwreg_sz = 0x80;
+	hw->eeprom_sz = 0;
+
+	return 0;
+}
+
+
+/**
+ *  alc_init_hw_callbacks - Inits func ptrs and MAC type
+ *  @hw: pointer to hardware structure
+ **/
+int alc_init_hw_callbacks(struct alx_hw *hw)
+{
+	/* NIC */
+	hw->cbs.identify_nic   = &alc_identify_nic;
+	/* MAC*/
+	hw->cbs.reset_mac      = &alc_reset_mac;
+	hw->cbs.start_mac      = &alc_start_mac;
+	hw->cbs.stop_mac       = &alc_stop_mac;
+	hw->cbs.config_mac     = &alc_config_mac;
+	hw->cbs.get_mac_addr   = &alc_get_mac_addr;
+	hw->cbs.set_mac_addr   = &alc_set_mac_addr;
+	hw->cbs.set_mc_addr    = &alc_set_mc_addr;
+	hw->cbs.clear_mc_addr  = &alc_clear_mc_addr;
+
+	/* PHY */
+	hw->cbs.init_phy          = &alc_init_phy;
+	hw->cbs.reset_phy         = &alc_reset_phy;
+	hw->cbs.read_phy_reg      = &alc_read_phy_reg;
+	hw->cbs.write_phy_reg     = &alc_write_phy_reg;
+	hw->cbs.check_phy_link    = &alc_check_phy_link;
+	hw->cbs.setup_phy_link    = &alc_setup_phy_link;
+	hw->cbs.setup_phy_link_speed = &alc_setup_phy_link_speed;
+	hw->cbs.apply_phy_hib_patch = &alc_apply_phy_hib_patch;
+
+	/* Interrupt */
+	hw->cbs.ack_phy_intr	= &alc_ack_phy_intr;
+	hw->cbs.enable_legacy_intr  = &alc_enable_legacy_intr;
+	hw->cbs.disable_legacy_intr = &alc_disable_legacy_intr;
+
+	/* Configure */
+	hw->cbs.config_tx	= &alc_config_tx;
+	hw->cbs.config_fc	= &alc_config_fc;
+	hw->cbs.config_aspm	= &alc_config_aspm;
+	hw->cbs.config_wol	= &alc_config_wol;
+	hw->cbs.config_mac_ctrl	= &alc_config_mac_ctrl;
+	hw->cbs.config_pow_save	= &alc_config_pow_save;
+	hw->cbs.reset_pcie	= &alc_reset_pcie;
+
+	/* NVRam */
+	hw->cbs.check_nvram	= &alc_check_nvram;
+	hw->cbs.read_nvram	= &alc_read_nvram;
+	hw->cbs.write_nvram	= &alc_write_nvram;
+
+	/* Others */
+	hw->cbs.get_ethtool_regs = alc_get_ethtool_regs;
+
+	/* get hw capabilitites to HW->flags */
+	alc_get_hw_capabilities(hw);
+	alc_set_hw_infos(hw);
+
+	alx_hw_info(hw, "HW Flags = 0x%x\n", hw->flags);
+	return 0;
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop./alc_hw.c linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop/alc_hw.c
--- linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop./alc_hw.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop/alc_hw.c	2019-01-22 16:16:24.927259302 +0100
@@ -0,0 +1,1167 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/pci_regs.h>
+#include <linux/mii.h>
+#include "alx.h"
+#include "alc_hw.h"
+
+extern void *ipc_alx_log_ctxt;
+
+/*
+ * get permanent mac address
+ *    0: success
+ *    non-0:fail
+ */
+u16 l1c_get_perm_macaddr(struct alx_hw *hw, u8 *addr)
+{
+	u32 val, otp_ctrl, otp_flag, mac0, mac1;
+	u16 i;
+	u16 phy_val;
+
+	/* get it from register first */
+	alx_mem_r32(hw, L1C_STAD0, &mac0);
+	alx_mem_r32(hw, L1C_STAD1, &mac1);
+
+	*(u32 *)(addr + 2) = LX_SWAP_DW(mac0);
+	*(u16 *)addr = (u16)LX_SWAP_W((u16)mac1);
+
+	if (macaddr_valid(addr))
+		return 0;
+
+	alx_mem_r32(hw, L1C_TWSI_DBG, &val);
+	alx_mem_r32(hw, L1C_EFUSE_CTRL2, &otp_ctrl);
+	alx_mem_r32(hw, L1C_MASTER, &otp_flag);
+
+	if ((val & L1C_TWSI_DBG_DEV_EXIST) != 0 ||
+	    (otp_flag & L1C_MASTER_OTP_FLG) != 0) {
+		/* nov-memory exist, do software-autoload */
+		/* enable OTP_CLK for L1C */
+		if (hw->pci_devid == L1C_DEV_ID ||
+		    hw->pci_devid == L2C_DEV_ID) {
+			if ((otp_ctrl & L1C_EFUSE_CTRL2_CLK_EN) != 0) {
+				alx_mem_w32(hw, L1C_EFUSE_CTRL2,
+					    otp_ctrl | L1C_EFUSE_CTRL2_CLK_EN);
+				udelay(5);
+			}
+		}
+		/* raise voltage temporally for L2CB/L1D */
+		if (hw->pci_devid == L2CB_DEV_ID ||
+		    hw->pci_devid == L2CB2_DEV_ID) {
+			/* clear bit[7] of debugport 00 */
+			l1c_read_phydbg(hw, true, L1C_MIIDBG_ANACTRL,
+					&phy_val);
+			l1c_write_phydbg(hw, true, L1C_MIIDBG_ANACTRL,
+					 phy_val & ~L1C_ANACTRL_HB_EN);
+			/* set bit[3] of debugport 3B */
+			l1c_read_phydbg(hw, true, L1C_MIIDBG_VOLT_CTRL,
+					&phy_val);
+			l1c_write_phydbg(hw, true, L1C_MIIDBG_VOLT_CTRL,
+					 phy_val | L1C_VOLT_CTRL_SWLOWEST);
+			udelay(20);
+		}
+		/* do load */
+		alx_mem_r32(hw, L1C_SLD, &val);
+		alx_mem_w32(hw, L1C_SLD, val | L1C_SLD_START);
+		for (i = 0; i < L1C_SLD_MAX_TO; i++) {
+			mdelay(1);
+			alx_mem_r32(hw, L1C_SLD, &val);
+			if ((val & L1C_SLD_START) == 0)
+				break;
+		}
+		/* disable OTP_CLK for L1C */
+		if (hw->pci_devid == L1C_DEV_ID ||
+		    hw->pci_devid == L2C_DEV_ID) {
+			alx_mem_w32(hw, L1C_EFUSE_CTRL2,
+				    otp_ctrl & ~L1C_EFUSE_CTRL2_CLK_EN);
+			udelay(5);
+		}
+		/* low voltage */
+		if (hw->pci_devid == L2CB_DEV_ID ||
+		    hw->pci_devid == L2CB2_DEV_ID) {
+			/* set bit[7] of debugport 00 */
+			l1c_read_phydbg(hw, true, L1C_MIIDBG_ANACTRL,
+					&phy_val);
+			l1c_write_phydbg(hw, true, L1C_MIIDBG_ANACTRL,
+					 phy_val | L1C_ANACTRL_HB_EN);
+			/* clear bit[3] of debugport 3B */
+			l1c_read_phydbg(hw, true, L1C_MIIDBG_VOLT_CTRL,
+					&phy_val);
+			l1c_write_phydbg(hw, true, L1C_MIIDBG_VOLT_CTRL,
+					 phy_val & ~L1C_VOLT_CTRL_SWLOWEST);
+			udelay(20);
+		}
+		if (i == L1C_SLD_MAX_TO)
+			goto out;
+	} else {
+		if (hw->pci_devid == L1C_DEV_ID ||
+		    hw->pci_devid == L2C_DEV_ID) {
+			alx_mem_w32(hw, L1C_EFUSE_CTRL2,
+				    otp_ctrl & ~L1C_EFUSE_CTRL2_CLK_EN);
+			udelay(5);
+		}
+	}
+
+	alx_mem_r32(hw, L1C_STAD0, &mac0);
+	alx_mem_r32(hw, L1C_STAD1, &mac1);
+
+	*(u32 *)(addr + 2) = LX_SWAP_DW(mac0);
+	*(u16 *)addr = (u16)LX_SWAP_W((u16)mac1);
+
+	if (macaddr_valid(addr))
+		return 0;
+
+out:
+	return LX_ERR_ALOAD;
+}
+
+/*
+ * reset mac & dma
+ * return
+ *     0: success
+ *     non-0:fail
+ */
+u16 l1c_reset_mac(struct alx_hw *hw)
+{
+	u32 val, mrst_val;
+	u16 ret;
+	u16 i;
+
+	/* disable all interrupts, RXQ/TXQ */
+	alx_mem_w32(hw, L1C_IMR, 0);
+	alx_mem_w32(hw, L1C_ISR, L1C_ISR_DIS);
+
+	ret = l1c_enable_mac(hw, false, 0);
+	if (ret != 0)
+		return ret;
+
+	/* reset whole mac safely. OOB is meaningful for L1D only  */
+	alx_mem_r32(hw, L1C_MASTER, &mrst_val);
+	mrst_val |= L1C_MASTER_OOB_DIS;
+	alx_mem_w32(hw, L1C_MASTER, mrst_val | L1C_MASTER_DMA_MAC_RST);
+
+	/* make sure it's idle */
+	for (i = 0; i < L1C_DMA_MAC_RST_TO; i++) {
+		alx_mem_r32(hw, L1C_MASTER, &val);
+		if ((val & L1C_MASTER_DMA_MAC_RST) == 0)
+			break;
+#ifdef ALX_LINK_DOWN_CONFIG
+		mdelay(20);
+#else
+		udelay(20);
+#endif
+	}
+	if (i == L1C_DMA_MAC_RST_TO)
+		return LX_ERR_RSTMAC;
+	/* keep the old value */
+	alx_mem_w32(hw, L1C_MASTER, mrst_val & ~L1C_MASTER_DMA_MAC_RST);
+
+	/* driver control speed/duplex, hash-alg */
+	alx_mem_r32(hw, L1C_MAC_CTRL, &val);
+	alx_mem_w32(hw, L1C_MAC_CTRL, val | L1C_MAC_CTRL_WOLSPED_SWEN);
+
+	/* clk switch setting */
+	alx_mem_r32(hw, L1C_SERDES, &val);
+	switch (hw->pci_devid) {
+	case L2CB_DEV_ID:
+		alx_mem_w32(hw, L1C_SERDES, val & ~L1C_SERDES_PHYCLK_SLWDWN);
+		break;
+	case L2CB2_DEV_ID:
+	case L1D2_DEV_ID:
+		alx_mem_w32(hw, L1C_SERDES,
+			    val | L1C_SERDES_PHYCLK_SLWDWN |
+			    L1C_SERDES_MACCLK_SLWDWN);
+		break;
+	default:
+		/* the defalut value of default product is OFF */;
+	}
+
+	return 0;
+}
+
+/* reset phy
+ * return
+ *    0: success
+ *    non-0:fail
+ */
+u16 l1c_reset_phy(struct alx_hw *hw, bool pws_en, bool az_en, bool ptp_en)
+{
+	u32 val;
+	u16 i, phy_val;
+
+	ptp_en = ptp_en;
+
+	/* reset PHY core */
+	alx_mem_r32(hw, L1C_PHY_CTRL, &val);
+	val &= ~(L1C_PHY_CTRL_DSPRST_OUT | L1C_PHY_CTRL_IDDQ |
+		 L1C_PHY_CTRL_GATE_25M | L1C_PHY_CTRL_POWER_DOWN |
+		 L1C_PHY_CTRL_CLS);
+	val |= L1C_PHY_CTRL_RST_ANALOG;
+
+	if (pws_en)
+		val |= (L1C_PHY_CTRL_HIB_PULSE | L1C_PHY_CTRL_HIB_EN);
+	else
+		val &= ~(L1C_PHY_CTRL_HIB_PULSE | L1C_PHY_CTRL_HIB_EN);
+
+	alx_mem_w32(hw, L1C_PHY_CTRL, val);
+	udelay(10); /* 5us is enough */
+	alx_mem_w32(hw, L1C_PHY_CTRL, val | L1C_PHY_CTRL_DSPRST_OUT);
+
+	/* delay 800us */
+	for (i = 0; i < L1C_PHY_CTRL_DSPRST_TO; i++)
+		udelay(10);
+
+	/* switch clock */
+	if (hw->pci_devid == L2CB_DEV_ID) {
+		l1c_read_phydbg(hw, true, L1C_MIIDBG_CFGLPSPD, &phy_val);
+		/* clear bit13 */
+		l1c_write_phydbg(hw, true, L1C_MIIDBG_CFGLPSPD,
+				 phy_val & ~L1C_CFGLPSPD_RSTCNT_CLK125SW);
+	}
+
+	/* fix tx-half-amp issue */
+	if (hw->pci_devid == L2CB_DEV_ID || hw->pci_devid == L2CB2_DEV_ID) {
+		l1c_read_phydbg(hw, true, L1C_MIIDBG_CABLE1TH_DET, &phy_val);
+		l1c_write_phydbg(hw, true, L1C_MIIDBG_CABLE1TH_DET,
+				 phy_val | L1C_CABLE1TH_DET_EN); /* set bit15 */
+	}
+
+	if (pws_en) {
+		/* clear bit[3] of debugport 3B to 0,
+		 * lower voltage to save power */
+		if (hw->pci_devid == L2CB_DEV_ID ||
+		    hw->pci_devid == L2CB2_DEV_ID) {
+			l1c_read_phydbg(hw, true, L1C_MIIDBG_VOLT_CTRL,
+					&phy_val);
+			l1c_write_phydbg(hw, true, L1C_MIIDBG_VOLT_CTRL,
+					 phy_val & ~L1C_VOLT_CTRL_SWLOWEST);
+		}
+		/* power saving config */
+		l1c_write_phydbg(hw, true, L1C_MIIDBG_LEGCYPS,
+				 (hw->pci_devid == L1D_DEV_ID ||
+				  hw->pci_devid == L1D2_DEV_ID) ?
+				 L1D_LEGCYPS_DEF : L1C_LEGCYPS_DEF_MPQ);
+		/* hib */
+		l1c_write_phydbg(hw, true, L1C_MIIDBG_SYSMODCTRL,
+				 L1C_SYSMODCTRL_IECHOADJ_DEF);
+	} else {
+		/*dis powersaving */
+		l1c_read_phydbg(hw, true, L1C_MIIDBG_LEGCYPS, &phy_val);
+		l1c_write_phydbg(hw, true, L1C_MIIDBG_LEGCYPS,
+				 phy_val & ~L1C_LEGCYPS_EN);
+		/* disable hibernate */
+		l1c_read_phydbg(hw, true, L1C_MIIDBG_HIBNEG, &phy_val);
+		l1c_write_phydbg(hw, true, L1C_MIIDBG_HIBNEG,
+				 phy_val & ~L1C_HIBNEG_PSHIB_EN);
+	}
+
+	/* az is only for l2cbv2 / l1dv1 /l1dv2 */
+	if (hw->pci_devid == L1D_DEV_ID ||
+	    hw->pci_devid == L1D2_DEV_ID ||
+	    hw->pci_devid == L2CB2_DEV_ID) {
+		if (az_en) {
+			switch (hw->pci_devid) {
+			case L2CB2_DEV_ID:
+				alx_mem_w32(hw, L1C_LPI_DECISN_TIMER,
+					    L1C_LPI_DESISN_TIMER_L2CB);
+				/* az enable 100M */
+				l1c_write_phy(hw, true, L1C_MIIEXT_ANEG, true,
+					      L1C_MIIEXT_LOCAL_EEEADV,
+					      L1C_LOCAL_EEEADV_100BT);
+				/* az long wake threshold */
+				l1c_write_phy(hw, true, L1C_MIIEXT_PCS, true,
+					      L1C_MIIEXT_AZCTRL5,
+					      L1C_AZCTRL5_WAKE_LTH_L2CB);
+				/* az short wake threshold */
+				l1c_write_phy(hw, true, L1C_MIIEXT_PCS, true,
+					      L1C_MIIEXT_AZCTRL4,
+					      L1C_AZCTRL4_WAKE_STH_L2CB);
+
+				l1c_write_phy(hw, true, L1C_MIIEXT_PCS, true,
+					      L1C_MIIEXT_CLDCTRL3,
+					      L1C_CLDCTRL3_L2CB);
+
+				/* bit7 set to 0, otherwise ping fail */
+				l1c_write_phy(hw, true, L1C_MIIEXT_PCS, true,
+					      L1C_MIIEXT_CLDCTRL7,
+					      L1C_CLDCTRL7_L2CB);
+
+				l1c_write_phy(hw, true, L1C_MIIEXT_PCS, true,
+					      L1C_MIIEXT_AZCTRL2,
+					      L1C_AZCTRL2_L2CB);
+				break;
+
+			case L1D_DEV_ID:
+				l1c_write_phydbg(hw, true,
+				    L1C_MIIDBG_AZ_ANADECT, L1C_AZ_ANADECT_DEF);
+				phy_val = hw->long_cable ? L1C_CLDCTRL3_L1D :
+					  (L1C_CLDCTRL3_L1D &
+					   ~(L1C_CLDCTRL3_BP_CABLE1TH_DET_GT |
+					     L1C_CLDCTRL3_AZ_DISAMP));
+				l1c_write_phy(hw, true, L1C_MIIEXT_PCS, true,
+					      L1C_MIIEXT_CLDCTRL3, phy_val);
+				l1c_write_phy(hw, true, L1C_MIIEXT_PCS, true,
+					      L1C_MIIEXT_AZCTRL,
+					      L1C_AZCTRL_L1D);
+				l1c_write_phy(hw, true, L1C_MIIEXT_PCS, true,
+					      L1C_MIIEXT_AZCTRL2,
+					      L1C_AZCTRL2_L2CB);
+				break;
+
+			case L1D2_DEV_ID:
+				l1c_write_phydbg(hw, true,
+						 L1C_MIIDBG_AZ_ANADECT,
+						 L1C_AZ_ANADECT_DEF);
+				phy_val = hw->long_cable ? L1C_CLDCTRL3_L1D :
+					  (L1C_CLDCTRL3_L1D &
+					   ~L1C_CLDCTRL3_BP_CABLE1TH_DET_GT);
+				l1c_write_phy(hw, true, L1C_MIIEXT_PCS, true,
+					      L1C_MIIEXT_CLDCTRL3, phy_val);
+				l1c_write_phy(hw, true, L1C_MIIEXT_PCS, true,
+					      L1C_MIIEXT_AZCTRL,
+					      L1C_AZCTRL_L1D);
+				l1c_write_phy(hw, true, L1C_MIIEXT_PCS, true,
+					      L1C_MIIEXT_AZCTRL2,
+					      L1C_AZCTRL2_L1D2);
+				l1c_write_phy(hw, true, L1C_MIIEXT_PCS, true,
+					      L1C_MIIEXT_AZCTRL6,
+					      L1C_AZCTRL6_L1D2);
+				break;
+			}
+		} else {
+			alx_mem_r32(hw, L1C_LPI_CTRL, &val);
+			alx_mem_w32(hw, L1C_LPI_CTRL, val & ~L1C_LPI_CTRL_EN);
+			l1c_write_phy(hw, true, L1C_MIIEXT_ANEG, true,
+				      L1C_MIIEXT_LOCAL_EEEADV, 0);
+			l1c_write_phy(hw, true, L1C_MIIEXT_PCS, true,
+				      L1C_MIIEXT_CLDCTRL3, L1C_CLDCTRL3_L2CB);
+		}
+	}
+
+	/* other debug port need to set */
+	l1c_write_phydbg(hw, true, L1C_MIIDBG_ANACTRL, L1C_ANACTRL_DEF);
+	l1c_write_phydbg(hw, true, L1C_MIIDBG_SRDSYSMOD, L1C_SRDSYSMOD_DEF);
+	l1c_write_phydbg(hw, true, L1C_MIIDBG_TST10BTCFG, L1C_TST10BTCFG_DEF);
+	/* L1c, L2c, L1d, L2cb  link fail inhibit
+	   timer issue of L1c UNH-IOL test fail, set bit7*/
+	l1c_write_phydbg(hw, true, L1C_MIIDBG_TST100BTCFG,
+			 L1C_TST100BTCFG_DEF | L1C_TST100BTCFG_LITCH_EN);
+
+	/* set phy interrupt mask */
+	l1c_write_phy(hw, false, 0, true,
+		      L1C_MII_IER, L1C_IER_LINK_UP | L1C_IER_LINK_DOWN);
+
+	return 0;
+}
+
+
+/* reset pcie
+ * just reset pcie relative registers (pci command, clk, aspm...)
+ * return
+ *    0:success
+ *    non-0:fail
+ */
+u16 l1c_reset_pcie(struct alx_hw *hw, bool l0s_en, bool l1_en)
+{
+	u32 val;
+	u16 val16;
+	u16 ret;
+
+	/* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */
+	alx_cfg_r16(hw, PCI_COMMAND, &val16);
+	if (((val16 & PCI_COMMAND_IO) &&
+	     (val16 & PCI_COMMAND_MEMORY) &&
+	     (val16 & PCI_COMMAND_MASTER)) == 0 ||
+	    (val16 & PCI_COMMAND_INTX_DISABLE) != 0) {
+		val16 = (u16)((val16 | (PCI_COMMAND_IO |
+					PCI_COMMAND_MEMORY |
+					PCI_COMMAND_MASTER))
+			      & ~PCI_COMMAND_INTX_DISABLE);
+		alx_cfg_w16(hw, PCI_COMMAND, val16);
+	}
+
+	/* Clear any PowerSaving Settings */
+	alx_cfg_w16(hw, L1C_PM_CSR, 0);
+
+	/* close write attr for some registes */
+	alx_mem_r32(hw, L1C_LTSSM_CTRL, &val);
+	alx_mem_w32(hw, L1C_LTSSM_CTRL, val & ~L1C_LTSSM_WRO_EN);
+
+	/* mask some pcie error bits */
+	alx_mem_r32(hw, L1C_UE_SVRT, &val);
+	val &= ~(L1C_UE_SVRT_DLPROTERR | L1C_UE_SVRT_FCPROTERR);
+	alx_mem_w32(hw, L1C_UE_SVRT, val);
+
+	/* pclk */
+	alx_mem_r32(hw, L1C_MASTER, &val);
+	val &= ~L1C_MASTER_PCLKSEL_SRDS;
+	alx_mem_w32(hw, L1C_MASTER, val);
+
+	/* Set 1000 bit 2, only used for L1c/L2c , WOL usage */
+	if (hw->pci_devid == L1C_DEV_ID || hw->pci_devid == L2C_DEV_ID) {
+		alx_mem_r32(hw, L1C_PPHY_MISC1, &val);
+		alx_mem_w32(hw, L1C_PPHY_MISC1, val | L1C_PPHY_MISC1_RCVDET);
+	} else { /* other device should set bit 5 of reg1400 for WOL */
+		if ((val & L1C_MASTER_WAKEN_25M) == 0)
+			alx_mem_w32(hw, L1C_MASTER, val | L1C_MASTER_WAKEN_25M);
+	}
+	/* l2cb 1.0*/
+	if (hw->pci_devid == L2CB_DEV_ID && hw->pci_revid == L2CB_V10) {
+		alx_mem_r32(hw, L1C_PPHY_MISC2, &val);
+		FIELD_SETL(val, L1C_PPHY_MISC2_L0S_TH,
+			   L1C_PPHY_MISC2_L0S_TH_L2CB1);
+		FIELD_SETL(val, L1C_PPHY_MISC2_CDR_BW,
+			   L1C_PPHY_MISC2_CDR_BW_L2CB1);
+		alx_mem_w32(hw, L1C_PPHY_MISC2, val);
+		/* extend L1 sync timer, this will use more power,
+		 * only for L2cb v1.0*/
+		if (!hw->aps_en) {
+			alx_mem_r32(hw, L1C_LNK_CTRL, &val);
+			alx_mem_w32(hw, L1C_LNK_CTRL,
+				    val | L1C_LNK_CTRL_EXTSYNC);
+		}
+	}
+
+	/* l2cbv1.x & l1dv1.x */
+	if (hw->pci_devid == L2CB_DEV_ID || hw->pci_devid == L1D_DEV_ID) {
+		alx_mem_r32(hw, L1C_PMCTRL, &val);
+		alx_mem_w32(hw, L1C_PMCTRL, val | L1C_PMCTRL_L0S_BUFSRX_EN);
+		/* clear vendor message for L1d & L2cb */
+		alx_mem_r32(hw, L1C_DMA_DBG, &val);
+		alx_mem_w32(hw, L1C_DMA_DBG, val & ~L1C_DMA_DBG_VENDOR_MSG);
+	}
+
+	/* hi-tx-perf */
+	if (hw->hi_txperf) {
+		alx_mem_r32(hw, L1C_PPHY_MISC1, &val);
+		FIELD_SETL(val, L1C_PPHY_MISC1_NFTS,
+			   L1C_PPHY_MISC1_NFTS_HIPERF);
+		alx_mem_w32(hw, L1C_PPHY_MISC1, val);
+	}
+	/* l0s, l1 setting */
+	ret = l1c_enable_aspm(hw, l0s_en, l1_en, 0);
+
+	udelay(10);
+
+	return ret;
+}
+
+
+/* disable/enable MAC/RXQ/TXQ
+ * en
+ *    true:enable
+ *    false:disable
+ * return
+ *    0:success
+ *    non-0-fail
+ */
+u16 l1c_enable_mac(struct alx_hw *hw, bool en, u16 en_ctrl)
+{
+	u32 rxq, txq, mac, val;
+	u16 i;
+
+	alx_mem_r32(hw, L1C_RXQ0, &rxq);
+	alx_mem_r32(hw, L1C_TXQ0, &txq);
+	alx_mem_r32(hw, L1C_MAC_CTRL, &mac);
+
+	if (en) { /* enable */
+		alx_mem_w32(hw, L1C_RXQ0, rxq | L1C_RXQ0_EN);
+		alx_mem_w32(hw, L1C_TXQ0, txq | L1C_TXQ0_EN);
+		if ((en_ctrl & LX_MACSPEED_1000) != 0) {
+			FIELD_SETL(mac, L1C_MAC_CTRL_SPEED,
+				   L1C_MAC_CTRL_SPEED_1000);
+		} else {
+			FIELD_SETL(mac, L1C_MAC_CTRL_SPEED,
+				   L1C_MAC_CTRL_SPEED_10_100);
+		}
+
+		test_set_or_clear(mac, en_ctrl, LX_MACDUPLEX_FULL,
+				  L1C_MAC_CTRL_FULLD);
+
+		/* rx filter */
+		test_set_or_clear(mac, en_ctrl, LX_FLT_PROMISC,
+				  L1C_MAC_CTRL_PROMISC_EN);
+		test_set_or_clear(mac, en_ctrl, LX_FLT_MULTI_ALL,
+				  L1C_MAC_CTRL_MULTIALL_EN);
+		test_set_or_clear(mac, en_ctrl, LX_FLT_BROADCAST,
+				  L1C_MAC_CTRL_BRD_EN);
+		test_set_or_clear(mac, en_ctrl, LX_FLT_DIRECT,
+				  L1C_MAC_CTRL_RX_EN);
+		test_set_or_clear(mac, en_ctrl, LX_FC_TXEN,
+				  L1C_MAC_CTRL_TXFC_EN);
+		test_set_or_clear(mac, en_ctrl, LX_FC_RXEN,
+				  L1C_MAC_CTRL_RXFC_EN);
+		test_set_or_clear(mac, en_ctrl, LX_VLAN_STRIP,
+				  L1C_MAC_CTRL_VLANSTRIP);
+		test_set_or_clear(mac, en_ctrl, LX_LOOPBACK,
+				  L1C_MAC_CTRL_LPBACK_EN);
+		test_set_or_clear(mac, en_ctrl, LX_SINGLE_PAUSE,
+				  L1C_MAC_CTRL_SPAUSE_EN);
+		test_set_or_clear(mac, en_ctrl, LX_ADD_FCS,
+				  (L1C_MAC_CTRL_PCRCE | L1C_MAC_CTRL_CRCE));
+
+		alx_mem_w32(hw, L1C_MAC_CTRL, mac | L1C_MAC_CTRL_TX_EN);
+	} else { /* disable mac */
+		alx_mem_w32(hw, L1C_RXQ0, rxq & ~L1C_RXQ0_EN);
+		alx_mem_w32(hw, L1C_TXQ0, txq & ~L1C_TXQ0_EN);
+
+		/* waiting for rxq/txq be idle */
+		for (i = 0; i < L1C_DMA_MAC_RST_TO; i++) {/* wait atmost 1ms */
+			alx_mem_r32(hw, L1C_MAC_STS, &val);
+			if ((val & (L1C_MAC_STS_TXQ_BUSY |
+				    L1C_MAC_STS_RXQ_BUSY)) == 0) {
+				break;
+			}
+#ifdef ALX_LINK_DOWN_CONFIG
+			mdelay(20);
+#else
+			udelay(20);
+#endif
+		}
+		if (L1C_DMA_MAC_RST_TO == i)
+			return LX_ERR_RSTMAC;
+		/* stop mac tx/rx */
+		alx_mem_w32(hw, L1C_MAC_CTRL,
+			    mac & ~(L1C_MAC_CTRL_RX_EN | L1C_MAC_CTRL_TX_EN));
+
+		for (i = 0; i < L1C_DMA_MAC_RST_TO; i++) {
+			alx_mem_r32(hw, L1C_MAC_STS, &val);
+			if ((val & L1C_MAC_STS_IDLE) == 0)
+				break;
+#ifdef ALX_LINK_DOWN_CONFIG
+			mdelay(20);
+#else
+			udelay(10);
+#endif
+		}
+		if (L1C_DMA_MAC_RST_TO == i)
+			return LX_ERR_RSTMAC;
+	}
+
+	return 0;
+}
+
+
+/* enable/disable aspm support
+ * that will change settings for phy/mac/pcie
+ */
+u16 l1c_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en, u8 lnk_stat)
+{
+	u32 pmctrl;
+	bool linkon;
+
+	linkon = (lnk_stat == LX_LC_10H || lnk_stat == LX_LC_10F ||
+		  lnk_stat == LX_LC_100H || lnk_stat == LX_LC_100F ||
+		  lnk_stat == LX_LC_1000F) ? true : false;
+
+	alx_mem_r32(hw, L1C_PMCTRL, &pmctrl);
+	pmctrl &= ~(L1C_PMCTRL_L0S_EN |
+		    L1C_PMCTRL_L1_EN |
+		    L1C_PMCTRL_ASPM_FCEN);
+	FIELD_SETL(pmctrl, L1C_PMCTRL_LCKDET_TIMER,
+		   L1C_PMCTRL_LCKDET_TIMER_DEF);
+
+	/* l1 timer */
+	if (hw->pci_devid == L2CB2_DEV_ID || hw->pci_devid == L1D2_DEV_ID) {
+		pmctrl &= ~L1D_PMCTRL_TXL1_AFTER_L0S;
+		FIELD_SETL(pmctrl, L1D_PMCTRL_L1_TIMER,
+			   (lnk_stat == LX_LC_100H ||
+			    lnk_stat == LX_LC_100F ||
+			    lnk_stat == LX_LC_1000F) ?
+			   L1D_PMCTRL_L1_TIMER_16US : 1);
+	} else {
+		FIELD_SETL(pmctrl, L1C_PMCTRL_L1_TIMER,
+			   (lnk_stat == LX_LC_100H ||
+			    lnk_stat == LX_LC_100F ||
+			    lnk_stat == LX_LC_1000F) ?
+			   ((hw->pci_devid == L2CB_DEV_ID) ?
+			    L1C_PMCTRL_L1_TIMER_L2CB1 : L1C_PMCTRL_L1_TIMER_DEF
+			   ) : 1);
+	}
+	if (l0s_en) { /* on/off l0s only if bios/system enable l0s */
+		pmctrl |= (L1C_PMCTRL_L0S_EN | L1C_PMCTRL_ASPM_FCEN);
+	}
+	if (l1_en) { /* on/off l1 only if bios/system enable l1 */
+		pmctrl |= (L1C_PMCTRL_L1_EN | L1C_PMCTRL_ASPM_FCEN);
+	}
+
+	if (hw->pci_devid == L2CB_DEV_ID || hw->pci_devid == L1D_DEV_ID ||
+	    hw->pci_devid == L2CB2_DEV_ID || hw->pci_devid == L1D2_DEV_ID) {
+		/* If the pm_request_l1 time exceeds the value of this timer,
+		   it will enter L0s instead of L1 for this ASPM request.*/
+		FIELD_SETL(pmctrl, L1C_PMCTRL_L1REQ_TO,
+			   L1C_PMCTRL_L1REG_TO_DEF);
+
+		pmctrl |= L1C_PMCTRL_RCVR_WT_1US    |   /* wait 1us not 2ms */
+			  L1C_PMCTRL_L1_SRDSRX_PWD  |   /* pwd serdes */
+			  L1C_PMCTRL_L1_CLKSW_EN;
+		pmctrl &= ~(L1C_PMCTRL_L1_SRDS_EN   |
+			    L1C_PMCTRL_L1_SRDSPLL_EN|
+			    L1C_PMCTRL_L1_BUFSRX_EN |
+			    L1C_PMCTRL_SADLY_EN     |
+			    L1C_PMCTRL_HOTRST_WTEN);
+		/* disable l0s if linkdown or l2cbv1.x */
+		if (!linkon ||
+		    (!hw->aps_en && hw->pci_devid == L2CB_DEV_ID)) {
+			pmctrl &= ~L1C_PMCTRL_L0S_EN;
+		}
+	} else { /* l1c */
+		FIELD_SETL(pmctrl, L1C_PMCTRL_L1_TIMER, 0);
+		if (linkon) {
+			pmctrl |= L1C_PMCTRL_L1_SRDS_EN     |
+				  L1C_PMCTRL_L1_SRDSPLL_EN  |
+				  L1C_PMCTRL_L1_BUFSRX_EN;
+			pmctrl &= ~(L1C_PMCTRL_L1_SRDSRX_PWD|
+				    L1C_PMCTRL_L1_CLKSW_EN  |
+				    L1C_PMCTRL_L0S_EN       |
+				    L1C_PMCTRL_L1_EN);
+		} else {
+			pmctrl |= L1C_PMCTRL_L1_CLKSW_EN;
+			pmctrl &= ~(L1C_PMCTRL_L1_SRDS_EN   |
+				    L1C_PMCTRL_L1_SRDSPLL_EN|
+				    L1C_PMCTRL_L1_BUFSRX_EN |
+				    L1C_PMCTRL_L0S_EN);
+		}
+	}
+
+	alx_mem_w32(hw, L1C_PMCTRL, pmctrl);
+
+	return 0;
+}
+
+
+/* initialize phy for speed / flow control
+ * lnk_cap
+ *    if autoNeg, is link capability to tell the peer
+ *    if force mode, is forced speed/duplex
+ */
+u16 l1c_init_phy_spdfc(struct alx_hw *hw, bool auto_neg,
+		       u8 lnk_cap, bool fc_en)
+{
+	u16 adv, giga, cr;
+	u32 val;
+	u16 ret;
+
+	/* clear flag */
+	l1c_write_phy(hw, false, 0, false, L1C_MII_DBG_ADDR, 0);
+	alx_mem_r32(hw, L1C_DRV, &val);
+	FIELD_SETL(val, LX_DRV_PHY, 0);
+
+	if (auto_neg) {
+		adv = L1C_ADVERTISE_DEFAULT_CAP & ~L1C_ADVERTISE_SPEED_MASK;
+		giga = L1C_GIGA_CR_1000T_DEFAULT_CAP &
+		       ~L1C_GIGA_CR_1000T_SPEED_MASK;
+		val |= LX_DRV_PHY_AUTO;
+		if (!fc_en)
+			adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+		else
+			val |= LX_DRV_PHY_FC;
+		if ((LX_LC_10H & lnk_cap) != 0) {
+			adv |= ADVERTISE_10HALF;
+			val |= LX_DRV_PHY_10;
+		}
+		if ((LX_LC_10F & lnk_cap) != 0) {
+			adv |= ADVERTISE_10HALF |
+			       ADVERTISE_10FULL;
+			val |= LX_DRV_PHY_10 | LX_DRV_PHY_DUPLEX;
+		}
+		if ((LX_LC_100H & lnk_cap) != 0) {
+			adv |= ADVERTISE_100HALF;
+			val |= LX_DRV_PHY_100;
+		}
+		if ((LX_LC_100F & lnk_cap) != 0) {
+			adv |= ADVERTISE_100HALF |
+			       ADVERTISE_100FULL;
+			val |= LX_DRV_PHY_100 | LX_DRV_PHY_DUPLEX;
+		}
+		if ((LX_LC_1000F & lnk_cap) != 0) {
+			giga |= L1C_GIGA_CR_1000T_FD_CAPS;
+			val |= LX_DRV_PHY_1000 | LX_DRV_PHY_DUPLEX;
+		}
+
+		ret = l1c_write_phy(hw, false, 0, false, MII_ADVERTISE, adv);
+		ret = l1c_write_phy(hw, false, 0, false, MII_CTRL1000, giga);
+
+		cr = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
+		ret = l1c_write_phy(hw, false, 0, false, MII_BMCR, cr);
+	} else { /* force mode */
+		cr = BMCR_RESET;
+		switch (lnk_cap) {
+		case LX_LC_10H:
+			val |= LX_DRV_PHY_10;
+			break;
+		case LX_LC_10F:
+			cr |= BMCR_FULLDPLX;
+			val |= LX_DRV_PHY_10 | LX_DRV_PHY_DUPLEX;
+			break;
+		case LX_LC_100H:
+			cr |= BMCR_SPEED100;
+			val |= LX_DRV_PHY_100;
+			break;
+		case LX_LC_100F:
+			cr |= BMCR_SPEED100 | BMCR_FULLDPLX;
+			val |= LX_DRV_PHY_100 | LX_DRV_PHY_DUPLEX;
+			break;
+		default:
+			return LX_ERR_PARM;
+		}
+		ret = l1c_write_phy(hw, false, 0, false, MII_BMCR, cr);
+	}
+
+	if (!ret) {
+		l1c_write_phy(hw, false, 0, false, L1C_MII_DBG_ADDR,
+			      LX_PHY_INITED);
+	}
+	alx_mem_w32(hw, L1C_DRV, val);
+
+	return ret;
+}
+
+
+/* do power saving setting befor enter suspend mode
+ * NOTE:
+ *    1. phy link must be established before calling this function
+ *    2. wol option (pattern,magic,link,etc.) is configed before call it.
+ */
+u16 l1c_powersaving(struct alx_hw *hw, u8 wire_spd, bool wol_en,
+		    bool mac_txen, bool mac_rxen, bool pws_en)
+{
+	u32 master_ctrl, mac_ctrl, phy_ctrl;
+	u16 pm_ctrl, ret = 0;
+
+	master_ctrl = 0;
+	mac_ctrl = 0;
+	phy_ctrl = 0;
+
+	pws_en = pws_en;
+
+	alx_mem_r32(hw, L1C_MASTER, &master_ctrl);
+	master_ctrl &= ~L1C_MASTER_PCLKSEL_SRDS;
+
+	alx_mem_r32(hw, L1C_MAC_CTRL, &mac_ctrl);
+	/* 10/100 half */
+	FIELD_SETL(mac_ctrl, L1C_MAC_CTRL_SPEED,  L1C_MAC_CTRL_SPEED_10_100);
+	mac_ctrl &= ~(L1C_MAC_CTRL_FULLD |
+		      L1C_MAC_CTRL_RX_EN |
+		      L1C_MAC_CTRL_TX_EN);
+
+	alx_mem_r32(hw, L1C_PHY_CTRL, &phy_ctrl);
+	phy_ctrl &= ~(L1C_PHY_CTRL_DSPRST_OUT | L1C_PHY_CTRL_CLS);
+	/* if (pws_en) */
+	phy_ctrl |= (L1C_PHY_CTRL_RST_ANALOG | L1C_PHY_CTRL_HIB_PULSE |
+		     L1C_PHY_CTRL_HIB_EN);
+
+	if (wol_en) { /* enable rx packet or tx packet */
+		if (mac_rxen)
+			mac_ctrl |= (L1C_MAC_CTRL_RX_EN | L1C_MAC_CTRL_BRD_EN);
+		if (mac_txen)
+			mac_ctrl |= L1C_MAC_CTRL_TX_EN;
+		if (LX_LC_1000F == wire_spd) {
+			FIELD_SETL(mac_ctrl, L1C_MAC_CTRL_SPEED,
+				   L1C_MAC_CTRL_SPEED_1000);
+		}
+		if (LX_LC_10F == wire_spd || LX_LC_100F == wire_spd ||
+		    LX_LC_100F == wire_spd) {
+			mac_ctrl |= L1C_MAC_CTRL_FULLD;
+		}
+		phy_ctrl |= L1C_PHY_CTRL_DSPRST_OUT;
+		ret = l1c_write_phy(hw, false, 0, false,
+				    L1C_MII_IER, L1C_IER_LINK_UP);
+	} else {
+		master_ctrl |= L1C_MASTER_PCLKSEL_SRDS;
+		ret = l1c_write_phy(hw, false, 0, false, L1C_MII_IER, 0);
+		phy_ctrl |= (L1C_PHY_CTRL_IDDQ | L1C_PHY_CTRL_POWER_DOWN);
+	}
+	alx_mem_w32(hw, L1C_MASTER, master_ctrl);
+	alx_mem_w32(hw, L1C_MAC_CTRL, mac_ctrl);
+	alx_mem_w32(hw, L1C_PHY_CTRL, phy_ctrl);
+
+	/* set PME_EN ?? */
+	if (wol_en) {
+		alx_cfg_r16(hw, L1C_PM_CSR, &pm_ctrl);
+		pm_ctrl |= L1C_PM_CSR_PME_EN;
+		alx_cfg_w16(hw, L1C_PM_CSR, pm_ctrl);
+	}
+
+	return ret;
+}
+
+
+/* read phy register */
+u16 l1c_read_phy(struct alx_hw *hw, bool ext, u8 dev, bool fast,
+		 u16 reg, u16 *data)
+{
+	u32 val;
+	u16 clk_sel, i, ret = 0;
+
+	*data = 0;
+	clk_sel = fast ?
+		  (u16)L1C_MDIO_CLK_SEL_25MD4 : (u16)L1C_MDIO_CLK_SEL_25MD128;
+
+	if (ext) {
+		val = FIELDL(L1C_MDIO_EXTN_DEVAD, dev) |
+		      FIELDL(L1C_MDIO_EXTN_REG, reg);
+		alx_mem_w32(hw, L1C_MDIO_EXTN, val);
+
+		val = L1C_MDIO_SPRES_PRMBL |
+		      FIELDL(L1C_MDIO_CLK_SEL, clk_sel) |
+		      L1C_MDIO_START |
+		      L1C_MDIO_MODE_EXT |
+		      L1C_MDIO_OP_READ;
+	} else {
+		val = L1C_MDIO_SPRES_PRMBL |
+		      FIELDL(L1C_MDIO_CLK_SEL, clk_sel) |
+		      FIELDL(L1C_MDIO_REG, reg) |
+		      L1C_MDIO_START |
+		      L1C_MDIO_OP_READ;
+	}
+
+	alx_mem_w32(hw, L1C_MDIO, val);
+
+	for (i = 0; i < L1C_MDIO_MAX_AC_TO; i++) {
+		alx_mem_r32(hw, L1C_MDIO, &val);
+		if ((val & L1C_MDIO_BUSY) == 0) {
+			*data = (u16)FIELD_GETX(val, L1C_MDIO_DATA);
+			break;
+		}
+		udelay(10);
+	}
+	if (L1C_MDIO_MAX_AC_TO == i)
+		ret = LX_ERR_MIIBUSY;
+
+	return ret;
+}
+
+/* write phy register */
+u16 l1c_write_phy(struct alx_hw *hw, bool ext, u8 dev, bool fast,
+		  u16 reg, u16 data)
+{
+	u32 val;
+	u16 clk_sel, i, ret = 0;
+
+	clk_sel = fast ?
+	    (u16)L1C_MDIO_CLK_SEL_25MD4 : (u16)L1C_MDIO_CLK_SEL_25MD128;
+
+	if (ext) {
+		val = FIELDL(L1C_MDIO_EXTN_DEVAD, dev) |
+		      FIELDL(L1C_MDIO_EXTN_REG, reg);
+		alx_mem_w32(hw, L1C_MDIO_EXTN, val);
+
+		val = L1C_MDIO_SPRES_PRMBL |
+		      FIELDL(L1C_MDIO_CLK_SEL, clk_sel) |
+		      FIELDL(L1C_MDIO_DATA, data) |
+		      L1C_MDIO_START |
+		      L1C_MDIO_MODE_EXT;
+	} else {
+		val = L1C_MDIO_SPRES_PRMBL |
+		      FIELDL(L1C_MDIO_CLK_SEL, clk_sel) |
+		      FIELDL(L1C_MDIO_REG, reg) |
+		      FIELDL(L1C_MDIO_DATA, data) |
+		      L1C_MDIO_START;
+	}
+
+	alx_mem_w32(hw, L1C_MDIO, val);
+
+	for (i = 0; i < L1C_MDIO_MAX_AC_TO; i++) {
+		alx_mem_r32(hw, L1C_MDIO, &val);
+		if ((val & L1C_MDIO_BUSY) == 0)
+			break;
+		udelay(10);
+	}
+
+	if (L1C_MDIO_MAX_AC_TO == i)
+		ret = LX_ERR_MIIBUSY;
+
+	return ret;
+}
+
+u16 l1c_read_phydbg(struct alx_hw *hw, bool fast, u16 reg, u16 *data)
+{
+	u16 ret;
+
+	ret = l1c_write_phy(hw, false, 0, fast, L1C_MII_DBG_ADDR, reg);
+	ret = l1c_read_phy(hw, false, 0, fast, L1C_MII_DBG_DATA, data);
+
+	return ret;
+}
+
+u16 l1c_write_phydbg(struct alx_hw *hw, bool fast, u16 reg, u16 data)
+{
+	u16 ret;
+
+	ret = l1c_write_phy(hw, false, 0, fast, L1C_MII_DBG_ADDR, reg);
+	ret = l1c_write_phy(hw, false, 0, fast, L1C_MII_DBG_DATA, data);
+
+	return ret;
+}
+
+
+/*
+ * initialize mac basically
+ *  most of hi-feature no init
+ *      MAC/PHY should be reset before call this function
+ *  smb_timer : million-second
+ *  int_mod   : micro-second
+ *  disable RSS as default
+ */
+u16 l1c_init_mac(struct alx_hw *hw, u8 *addr, u32 txmem_hi,
+		 u32 *tx_mem_lo, u8 tx_qnum, u16 txring_sz,
+		 u32 rxmem_hi, u32 rfdmem_lo, u32 rrdmem_lo,
+		 u16 rxring_sz, u16 rxbuf_sz, u16 smb_timer,
+		 u16 mtu, u16 int_mod, bool hash_legacy)
+{
+	u32 val;
+	u16 val16;
+	u8 dmar_len;
+
+	/* set mac-address */
+	val = *(u32 *)(addr + 2);
+	alx_mem_w32(hw, L1C_STAD0, LX_SWAP_DW(val));
+	val = *(u16 *)addr ;
+	alx_mem_w32(hw, L1C_STAD1, LX_SWAP_W((u16)val));
+
+	/* clear multicast hash table, algrithm */
+	alx_mem_w32(hw, L1C_HASH_TBL0, 0);
+	alx_mem_w32(hw, L1C_HASH_TBL1, 0);
+	alx_mem_r32(hw, L1C_MAC_CTRL, &val);
+	if (hash_legacy)
+		val |= L1C_MAC_CTRL_MHASH_ALG_HI5B;
+	else
+		val &= ~L1C_MAC_CTRL_MHASH_ALG_HI5B;
+	alx_mem_w32(hw, L1C_MAC_CTRL, val);
+
+	/* clear any wol setting/status */
+	alx_mem_r32(hw, L1C_WOL0, &val);
+	alx_mem_w32(hw, L1C_WOL0, 0);
+
+	/* clk gating */
+	alx_mem_w32(hw, L1C_CLK_GATE, (hw->pci_devid == L1D_DEV_ID) ? 0 :
+		       (L1C_CLK_GATE_DMAR | L1C_CLK_GATE_DMAW |
+			L1C_CLK_GATE_TXQ  | L1C_CLK_GATE_RXQ  |
+			L1C_CLK_GATE_TXMAC));
+
+	/* descriptor ring base memory */
+	alx_mem_w32(hw, L1C_TX_BASE_ADDR_HI, txmem_hi);
+	alx_mem_w32(hw, L1C_TPD_RING_SZ, txring_sz);
+	switch (tx_qnum) {
+	case 2:
+		alx_mem_w32(hw, L1C_TPD_PRI1_ADDR_LO, tx_mem_lo[1]);
+		/* fall through */
+	case 1:
+		alx_mem_w32(hw, L1C_TPD_PRI0_ADDR_LO, tx_mem_lo[0]);
+		break;
+	default:
+		return LX_ERR_PARM;
+	}
+	alx_mem_w32(hw, L1C_RX_BASE_ADDR_HI, rxmem_hi);
+	alx_mem_w32(hw, L1C_RFD_ADDR_LO, rfdmem_lo);
+	alx_mem_w32(hw, L1C_RRD_ADDR_LO, rrdmem_lo);
+	alx_mem_w32(hw, L1C_RFD_BUF_SZ, rxbuf_sz);
+	alx_mem_w32(hw, L1C_RRD_RING_SZ, rxring_sz);
+	alx_mem_w32(hw, L1C_RFD_RING_SZ, rxring_sz);
+	alx_mem_w32(hw, L1C_SMB_TIMER, smb_timer * 500UL);
+
+	if (hw->pci_devid == L2CB_DEV_ID) {
+		/* revise SRAM configuration */
+		alx_mem_w32(hw, L1C_SRAM5, L1C_SRAM_RXF_LEN_L2CB1);
+		alx_mem_w32(hw, L1C_SRAM7, L1C_SRAM_TXF_LEN_L2CB1);
+		alx_mem_w32(hw, L1C_SRAM4, L1C_SRAM_RXF_HT_L2CB1);
+		alx_mem_w32(hw, L1C_SRAM0, L1C_SRAM_RFD_HT_L2CB1);
+		alx_mem_w32(hw, L1C_SRAM6, L1C_SRAM_TXF_HT_L2CB1);
+		alx_mem_w32(hw, L1C_SRAM2, L1C_SRAM_TRD_HT_L2CB1);
+		alx_mem_w32(hw, L1C_TXQ2, 0); /* TX watermark, goto L1 state.*/
+		alx_mem_w32(hw, L1C_RXQ3, 0); /* RXD threshold. */
+	}
+	alx_mem_w32(hw, L1C_SRAM9, L1C_SRAM_LOAD_PTR);
+
+	/* int moduration */
+	alx_mem_r32(hw, L1C_MASTER, &val);
+	val |= L1C_MASTER_IRQMOD2_EN | L1C_MASTER_IRQMOD1_EN |
+	    L1C_MASTER_SYSALVTIMER_EN;  /* sysalive */
+	alx_mem_w32(hw, L1C_MASTER, val);
+	/* set Interrupt Moderator Timer (max interrupt per sec)
+	 * we use seperate time for rx/tx */
+	alx_mem_w32(hw, L1C_IRQ_MODU_TIMER,
+		    FIELDL(L1C_IRQ_MODU_TIMER1, int_mod) |
+		    FIELDL(L1C_IRQ_MODU_TIMER2, int_mod >> 1));
+
+	/* tpd threshold to trig int */
+	alx_mem_w32(hw, L1C_TINT_TPD_THRSHLD, (u32)txring_sz / 3);
+	alx_mem_w32(hw, L1C_TINT_TIMER, int_mod * 2);
+	/* re-send int */
+	alx_mem_w32(hw, L1C_INT_RETRIG, L1C_INT_RETRIG_TO);
+
+	/* mtu */
+	alx_mem_w32(hw, L1C_MTU, (u32)(mtu + 4 + 4)); /* crc + vlan */
+
+	/* txq */
+	if ((mtu + 8) < L1C_TXQ1_JUMBO_TSO_TH)
+		val = (u32)(mtu + 8 + 7); /* 7 for QWORD align */
+	else
+		val = L1C_TXQ1_JUMBO_TSO_TH;
+	alx_mem_w32(hw, L1C_TXQ1, val >> 3);
+
+	alx_mem_r32(hw, L1C_DEV_CTRL, &val);
+	dmar_len = (u8)FIELD_GETX(val, L1C_DEV_CTRL_MAXRRS);
+	/* if BIOS had changed the default dma read max length,
+	 * restore it to default value */
+	if (dmar_len < L1C_DEV_CTRL_MAXRRS_MIN) {
+		FIELD_SETL(val, L1C_DEV_CTRL_MAXRRS, L1C_DEV_CTRL_MAXRRS_MIN);
+		alx_mem_w32(hw, L1C_DEV_CTRL, val);
+		dmar_len = L1C_DEV_CTRL_MAXRRS_MIN;
+	}
+	val = FIELDL(L1C_TXQ0_TPD_BURSTPREF, L1C_TXQ0_TPD_BURSTPREF_DEF) |
+	      L1C_TXQ0_MODE_ENHANCE |
+	      L1C_TXQ0_LSO_8023_EN |
+	      L1C_TXQ0_SUPT_IPOPT |
+	      FIELDL(L1C_TXQ0_TXF_BURST_PREF,
+		     (hw->pci_devid == L2CB_DEV_ID ||
+		      hw->pci_devid == L2CB2_DEV_ID) ?
+		     L1C_TXQ0_TXF_BURST_PREF_L2CB :
+		     L1C_TXQ0_TXF_BURST_PREF_DEF);
+	alx_mem_w32(hw, L1C_TXQ0, val);
+
+	/* fc */
+	alx_mem_r32(hw, L1C_SRAM5, &val);
+	val = FIELD_GETX(val, L1C_SRAM_RXF_LEN) << 3; /* bytes */
+	if (val > L1C_SRAM_RXF_LEN_8K) {
+		val16 = L1C_MTU_STD_ALGN;
+		val = (val - (2 * L1C_MTU_STD_ALGN + L1C_MTU_MIN));
+	} else {
+		val16 = L1C_MTU_STD_ALGN;
+		val = (val - L1C_MTU_STD_ALGN);
+	}
+	alx_mem_w32(hw, L1C_RXQ2,
+		    FIELDL(L1C_RXQ2_RXF_XOFF_THRESH, val16 >> 3) |
+		    FIELDL(L1C_RXQ2_RXF_XON_THRESH, val >> 3));
+	/* rxq */
+	val = FIELDL(L1C_RXQ0_NUM_RFD_PREF, L1C_RXQ0_NUM_RFD_PREF_DEF) |
+	    L1C_RXQ0_IPV6_PARSE_EN;
+
+	if ((hw->pci_devid & 1) != 0) {
+		FIELD_SETL(val, L1C_RXQ0_ASPM_THRESH,
+			   (hw->pci_devid == L1D2_DEV_ID) ?
+			   L1C_RXQ0_ASPM_THRESH_NO :
+			   L1C_RXQ0_ASPM_THRESH_100M);
+	}
+	alx_mem_w32(hw, L1C_RXQ0, val);
+
+	/* rfd producer index */
+	alx_mem_w32(hw, L1C_RFD_PIDX, (u32)rxring_sz - 1);
+
+	/* DMA */
+	val = FIELDL(L1C_DMA_RORDER_MODE, L1C_DMA_RORDER_MODE_OUT) |
+	      L1C_DMA_RREQ_PRI_DATA |
+	      FIELDL(L1C_DMA_RREQ_BLEN, dmar_len) |
+	      FIELDL(L1C_DMA_WDLY_CNT, L1C_DMA_WDLY_CNT_DEF) |
+	      FIELDL(L1C_DMA_RDLY_CNT, L1C_DMA_RDLY_CNT_DEF) ;
+	alx_mem_w32(hw, L1C_DMA, val);
+
+	return 0;
+}
+
+
+u16 l1c_get_phy_config(struct alx_hw *hw)
+{
+	u32 val;
+	u16 phy_val;
+
+	alx_mem_r32(hw, L1C_PHY_CTRL, &val);
+	if ((val & L1C_PHY_CTRL_DSPRST_OUT) == 0) { /* phy in rst */
+		return LX_DRV_PHY_UNKNOWN;
+	}
+
+	alx_mem_r32(hw, L1C_DRV, &val);
+	val = FIELD_GETX(val, LX_DRV_PHY);
+	if (LX_DRV_PHY_UNKNOWN == val)
+		return LX_DRV_PHY_UNKNOWN;
+
+	l1c_read_phy(hw, false, 0, false, L1C_MII_DBG_ADDR, &phy_val);
+
+	if (LX_PHY_INITED == phy_val)
+		return (u16) val;
+
+	return LX_DRV_PHY_UNKNOWN;
+}
+
+u16 l1c_apply_phy_hib_patch(struct alx_hw *hw)
+{
+	u16 Control, cr;
+	u8 link_cap = 0;
+	u32 speed = 0;
+	bool link_up = 0;
+	u16 i;
+
+	l1c_read_phydbg(hw, false, 0xc, &Control);
+
+	/*bit 11: 0 means in hibernation, 1 means not*/
+	if (Control & BIT(11))
+		hw->bInHibMode = false;
+	else
+		hw->bInHibMode = true;
+
+
+	if ((hw->bInHibMode) && (!hw->bHibPatched)) {
+		if ((hw->mac_type == alx_mac_l2cb_v1) ||
+		    (hw->mac_type == alx_mac_l2cb_v20) ||
+		    (hw->mac_type == alx_mac_l2cb_v21)) {
+			l1c_write_phy(hw, false, 0, false, MII_BMCR,
+				      (BMCR_FULLDPLX | BMCR_SPEED100));
+		} else if ((hw->mac_type == alx_mac_l1d_v1) ||
+			   (hw->mac_type == alx_mac_l1d_v2)) {
+			l1c_write_phy(hw, false, 0, false, MII_BMCR,
+				      (BMCR_FULLDPLX | BMCR_SPEED1000));
+		}
+		hw->bHibPatched = true;
+
+	} else if (!hw->bInHibMode && hw->bHibPatched) {
+		for (i = 0; i < 10; i++) {
+			hw->cbs.check_phy_link(hw, &speed, &link_up);
+
+			if (link_up) {
+				if (speed & ALX_LINK_SPEED_1GB_FULL)
+					link_cap |= LX_LC_1000F;
+
+				if (speed & ALX_LINK_SPEED_100_FULL)
+					link_cap |= LX_LC_100F;
+
+				if (speed & ALX_LINK_SPEED_100_HALF)
+					link_cap |= LX_LC_100H;
+
+				if (speed & ALX_LINK_SPEED_10_FULL)
+					link_cap |= LX_LC_10F;
+
+				if (speed & ALX_LINK_SPEED_10_HALF)
+					link_cap |= LX_LC_10H;
+
+				l1c_init_phy_spdfc(hw, true, link_cap,
+						   !hw->disable_fc_autoneg);
+				break;
+			}
+
+			mdelay(100);
+		}
+
+		if (!link_up) {
+			cr = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
+			l1c_write_phy(hw, false, 0, false, MII_BMCR, cr);
+		}
+
+		hw->bHibPatched = false;
+	}
+
+	return 0;
+}
diff -Nruw linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop./alc_hw.h linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop/alc_hw.h
--- linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop./alc_hw.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop/alc_hw.h	2019-01-22 16:16:24.927259302 +0100
@@ -0,0 +1,1327 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef L1C_HW_H_
+#define L1C_HW_H_
+
+/*********************************************************************
+ * some reqs for l1x_sw.h
+ *
+ * 1. some basic type must be defined if there are not defined by
+ *    your compiler:
+ *    u8, u16, u32, bool
+ *
+ * 2. PETHCONTEXT difinition should be in l1x_sw.h and it must contain
+ *    pci_devid & pci_venid & pci_revid
+ *
+ *********************************************************************/
+
+#include "alx_hwcom.h"
+
+/******************************************************************************/
+
+#define L1C_DEV_ID                      0x1063
+#define L2C_DEV_ID                      0x1062
+#define L2CB_DEV_ID                     0x2060
+#define L2CB2_DEV_ID                    0x2062
+#define L1D_DEV_ID                      0x1073
+#define L1D2_DEV_ID                     0x1083
+
+#define L2CB_V10                        0xC0
+#define L2CB_V11                        0xC1
+#define L2CB_V20                        0xC0
+#define L2CB_V21                        0xC1
+
+#define L1C_PM_CSR                      0x0044  /* 16bit */
+#define L1C_PM_CSR_PME_STAT             BIT(15)
+#define L1C_PM_CSR_DSCAL_MASK           ASHFT13(3U)
+#define L1C_PM_CSR_DSCAL_SHIFT          13
+#define L1C_PM_CSR_DSEL_MASK            ASHFT9(0xFU)
+#define L1C_PM_CSR_DSEL_SHIFT           9
+#define L1C_PM_CSR_PME_EN               BIT(8)
+#define L1C_PM_CSR_PWST_MASK            ASHFT0(3U)
+#define L1C_PM_CSR_PWST_SHIFT           0
+
+#define L1C_PM_DATA                     0x0047  /* 8bit */
+
+#define L1C_DEV_CAP                     0x005C
+#define L1C_DEV_CAP_SPLSL_MASK          ASHFT26(3UL)
+#define L1C_DEV_CAP_SPLSL_SHIFT         26
+#define L1C_DEV_CAP_SPLV_MASK           ASHFT18(0xFFUL)
+#define L1C_DEV_CAP_SPLV_SHIFT          18
+#define L1C_DEV_CAP_RBER                BIT(15)
+#define L1C_DEV_CAP_PIPRS               BIT(14)
+#define L1C_DEV_CAP_AIPRS               BIT(13)
+#define L1C_DEV_CAP_ABPRS               BIT(12)
+#define L1C_DEV_CAP_L1ACLAT_MASK        ASHFT9(7UL)
+#define L1C_DEV_CAP_L1ACLAT_SHIFT       9
+#define L1C_DEV_CAP_L0SACLAT_MASK       ASHFT6(7UL)
+#define L1C_DEV_CAP_L0SACLAT_SHIFT      6
+#define L1C_DEV_CAP_EXTAG               BIT(5)
+#define L1C_DEV_CAP_PHANTOM             BIT(4)
+#define L1C_DEV_CAP_MPL_MASK            ASHFT0(7UL)
+#define L1C_DEV_CAP_MPL_SHIFT           0
+#define L1C_DEV_CAP_MPL_128             1
+#define L1C_DEV_CAP_MPL_256             2
+#define L1C_DEV_CAP_MPL_512             3
+#define L1C_DEV_CAP_MPL_1024            4
+#define L1C_DEV_CAP_MPL_2048            5
+#define L1C_DEV_CAP_MPL_4096            6
+
+#define L1C_DEV_CTRL                    0x0060    /* 16bit */
+#define L1C_DEV_CTRL_MAXRRS_MASK        ASHFT12(7U)
+#define L1C_DEV_CTRL_MAXRRS_SHIFT       12
+#define L1C_DEV_CTRL_MAXRRS_MIN         2
+#define L1C_DEV_CTRL_NOSNP_EN           BIT(11)
+#define L1C_DEV_CTRL_AUXPWR_EN          BIT(10)
+#define L1C_DEV_CTRL_PHANTOM_EN         BIT(9)
+#define L1C_DEV_CTRL_EXTAG_EN           BIT(8)
+#define L1C_DEV_CTRL_MPL_MASK           ASHFT5(7U)
+#define L1C_DEV_CTRL_MPL_SHIFT          5
+#define L1C_DEV_CTRL_RELORD_EN          BIT(4)
+#define L1C_DEV_CTRL_URR_EN             BIT(3)
+#define L1C_DEV_CTRL_FERR_EN            BIT(2)
+#define L1C_DEV_CTRL_NFERR_EN           BIT(1)
+#define L1C_DEV_CTRL_CERR_EN            BIT(0)
+
+#define L1C_DEV_STAT                    0x0062    /* 16bit */
+#define L1C_DEV_STAT_XS_PEND            BIT(5)
+#define L1C_DEV_STAT_AUXPWR             BIT(4)
+#define L1C_DEV_STAT_UR                 BIT(3)
+#define L1C_DEV_STAT_FERR               BIT(2)
+#define L1C_DEV_STAT_NFERR              BIT(1)
+#define L1C_DEV_STAT_CERR               BIT(0)
+
+#define L1C_LNK_CAP                     0x0064
+#define L1C_LNK_CAP_PRTNUM_MASK         ASHFT24(0xFFUL)
+#define L1C_LNK_CAP_PRTNUM_SHIFT        24
+#define L1C_LNK_CAP_CLK_PM              BIT(18)
+#define L1C_LNK_CAP_L1EXTLAT_MASK       ASHFT15(7UL)
+#define L1C_LNK_CAP_L1EXTLAT_SHIFT      15
+#define L1C_LNK_CAP_L0SEXTLAT_MASK      ASHFT12(7UL)
+#define L1C_LNK_CAP_L0SEXTLAT_SHIFT     12
+#define L1C_LNK_CAP_ASPM_SUP_MASK       ASHFT10(3UL)
+#define L1C_LNK_CAP_ASPM_SUP_SHIFT      10
+#define L1C_LNK_CAP_ASPM_SUP_L0S        1
+#define L1C_LNK_CAP_ASPM_SUP_L0SL1      3
+#define L1C_LNK_CAP_MAX_LWH_MASK        ASHFT4(0x3FUL)
+#define L1C_LNK_CAP_MAX_LWH_SHIFT       4
+#define L1C_LNK_CAP_MAX_LSPD_MASH       ASHFT0(0xFUL)
+#define L1C_LNK_CAP_MAX_LSPD_SHIFT      0
+
+#define L1C_LNK_CTRL                    0x0068  /* 16bit */
+#define L1C_LNK_CTRL_CLK_PM_EN          BIT(8)
+#define L1C_LNK_CTRL_EXTSYNC            BIT(7)
+#define L1C_LNK_CTRL_CMNCLK_CFG         BIT(6)
+#define L1C_LNK_CTRL_RCB_128B           BIT(3)  /* 0:64b,1:128b */
+#define L1C_LNK_CTRL_ASPM_MASK          ASHFT0(3U)
+#define L1C_LNK_CTRL_ASPM_SHIFT         0
+#define L1C_LNK_CTRL_ASPM_DIS           0
+#define L1C_LNK_CTRL_ASPM_ENL0S         1
+#define L1C_LNK_CTRL_ASPM_ENL1          2
+#define L1C_LNK_CTRL_ASPM_ENL0SL1       3
+
+#define L1C_LNK_STAT                    0x006A  /* 16bit */
+#define L1C_LNK_STAT_SCLKCFG            BIT(12)
+#define L1C_LNK_STAT_LNKTRAIN           BIT(11)
+#define L1C_LNK_STAT_TRNERR             BIT(10)
+#define L1C_LNK_STAT_LNKSPD_MASK        ASHFT0(0xFU)
+#define L1C_LNK_STAT_LNKSPD_SHIFT       0
+#define L1C_LNK_STAT_NEGLW_MASK         ASHFT4(0x3FU)
+#define L1C_LNK_STAT_NEGLW_SHIFT        4
+
+#define L1C_UE_SVRT                     0x010C
+#define L1C_UE_SVRT_UR                  BIT(20)
+#define L1C_UE_SVRT_ECRCERR             BIT(19)
+#define L1C_UE_SVRT_MTLP                BIT(18)
+#define L1C_UE_SVRT_RCVOVFL             BIT(17)
+#define L1C_UE_SVRT_UNEXPCPL            BIT(16)
+#define L1C_UE_SVRT_CPLABRT             BIT(15)
+#define L1C_UE_SVRT_CPLTO               BIT(14)
+#define L1C_UE_SVRT_FCPROTERR           BIT(13)
+#define L1C_UE_SVRT_PTLP                BIT(12)
+#define L1C_UE_SVRT_DLPROTERR           BIT(4)
+#define L1C_UE_SVRT_TRNERR              BIT(0)
+
+#define L1C_SLD                         0x0218  /* efuse load */
+#define L1C_SLD_FREQ_MASK               ASHFT24(3UL)
+#define L1C_SLD_FREQ_SHIFT              24
+#define L1C_SLD_FREQ_100K               0
+#define L1C_SLD_FREQ_200K               1
+#define L1C_SLD_FREQ_300K               2
+#define L1C_SLD_FREQ_400K               3
+#define L1C_SLD_EXIST                   BIT(23)
+#define L1C_SLD_SLVADDR_MASK            ASHFT16(0x7FUL)
+#define L1C_SLD_SLVADDR_SHIFT           16
+#define L1C_SLD_IDLE                    BIT(13)
+#define L1C_SLD_STAT                    BIT(12)  /* 0:finish,1:in progress */
+#define L1C_SLD_START                   BIT(11)
+#define L1C_SLD_STARTADDR_MASK          ASHFT0(0xFFUL)
+#define L1C_SLD_STARTADDR_SHIFT         0
+#define L1C_SLD_MAX_TO                  100
+
+#define L1C_PPHY_MISC1                  0x1000
+#define L1C_PPHY_MISC1_RCVDET           BIT(2)
+#define L1C_PPHY_MISC1_NFTS_MASK        ASHFT16(0xFFUL)
+#define L1C_PPHY_MISC1_NFTS_SHIFT       16
+#define L1C_PPHY_MISC1_NFTS_HIPERF      0xA0    /* ???? */
+
+#define L1C_PPHY_MISC2                  0x1004
+#define L1C_PPHY_MISC2_L0S_TH_MASK      ASHFT18(0x3UL)
+#define L1C_PPHY_MISC2_L0S_TH_SHIFT     18
+#define L1C_PPHY_MISC2_L0S_TH_L2CB1     3
+#define L1C_PPHY_MISC2_CDR_BW_MASK      ASHFT16(0x3UL)
+#define L1C_PPHY_MISC2_CDR_BW_SHIFT     16
+#define L1C_PPHY_MISC2_CDR_BW_L2CB1     3
+
+#define L1C_PDLL_TRNS1                  0x1104
+#define L1C_PDLL_TRNS1_D3PLLOFF_EN      BIT(11)
+#define L1C_PDLL_TRNS1_REGCLK_SEL_NORM  BIT(10)
+#define L1C_PDLL_TRNS1_REPLY_TO_MASK    ASHFT0(0x3FFUL)
+#define L1C_PDLL_TRNS1_REPLY_TO_SHIFT   0
+
+#define L1C_TWSI_DBG                    0x1108
+#define L1C_TWSI_DBG_DEV_EXIST          BIT(29)
+
+#define L1C_DMA_DBG                     0x1114
+#define L1C_DMA_DBG_VENDOR_MSG          BIT(0)
+
+#define L1C_TLEXTN_STATS                0x1204  /* diff with l1f */
+#define L1C_TLEXTN_STATS_DEVNO_MASK     ASHFT16(0x1FUL)
+#define L1C_TLEXTN_STATS_DEVNO_SHIFT    16
+#define L1C_TLEXTN_STATS_BUSNO_MASK     ASHFT8(0xFFUL)
+#define L1C_TLEXTN_STATS_BUSNO_SHIFT    8
+
+#define L1C_EFUSE_CTRL                  0x12C0
+#define L1C_EFUSE_CTRL_FLAG             BIT(31)  /* 0:read,1:write */
+#define L1C_EUFSE_CTRL_ACK              BIT(30)
+#define L1C_EFUSE_CTRL_ADDR_MASK        ASHFT16(0x3FFUL)
+#define L1C_EFUSE_CTRL_ADDR_SHIFT       16
+
+#define L1C_EFUSE_DATA                  0x12C4
+
+#define EFUSE_OP_MAX_AC_TIMER           100     /* 1ms */
+
+#define L1C_EFUSE_CTRL2                 0x12F0
+#define L1C_EFUSE_CTRL2_CLK_EN          BIT(1)
+
+#define L1C_PMCTRL                      0x12F8
+#define L1C_PMCTRL_HOTRST_WTEN          BIT(31)
+#define L1C_PMCTRL_ASPM_FCEN            BIT(30)  /* L0s/L1 dis by MAC based on
+						 * thrghput(setting in 15A0) */
+#define L1C_PMCTRL_SADLY_EN             BIT(29)
+#define L1C_PMCTRL_L0S_BUFSRX_EN        BIT(28)
+#define L1C_PMCTRL_LCKDET_TIMER_MASK    ASHFT24(0xFUL)
+#define L1C_PMCTRL_LCKDET_TIMER_SHIFT   24
+#define L1C_PMCTRL_LCKDET_TIMER_DEF     0xC
+#define L1C_PMCTRL_L1REQ_TO_MASK        ASHFT20(0xFUL)
+#define L1C_PMCTRL_L1REQ_TO_SHIFT       20      /* pm_request_l1 time > @
+						 * ->L0s not L1 */
+#define L1C_PMCTRL_L1REG_TO_DEF         0xF
+#define L1D_PMCTRL_TXL1_AFTER_L0S       BIT(19)  /* l1dv2.0+ */
+#define L1D_PMCTRL_L1_TIMER_MASK        ASHFT16(7UL)
+#define L1D_PMCTRL_L1_TIMER_SHIFT       16
+#define L1D_PMCTRL_L1_TIMER_DIS         0
+#define L1D_PMCTRL_L1_TIMER_2US         1
+#define L1D_PMCTRL_L1_TIMER_4US         2
+#define L1D_PMCTRL_L1_TIMER_8US         3
+#define L1D_PMCTRL_L1_TIMER_16US        4
+#define L1D_PMCTRL_L1_TIMER_24US        5
+#define L1D_PMCTRL_L1_TIMER_32US        6
+#define L1D_PMCTRL_L1_TIMER_63US        7
+#define L1C_PMCTRL_L1_TIMER_MASK        ASHFT16(0xFUL)
+#define L1C_PMCTRL_L1_TIMER_SHIFT       16
+#define L1C_PMCTRL_L1_TIMER_L2CB1       7
+#define L1C_PMCTRL_L1_TIMER_DEF         0xF
+#define L1C_PMCTRL_RCVR_WT_1US          BIT(15)  /* 1:1us, 0:2ms */
+#define L1C_PMCTRL_PWM_VER_11           BIT(14)  /* 0:1.0a,1:1.1 */
+#define L1C_PMCTRL_L1_CLKSW_EN          BIT(13)  /* en pcie clk sw in L1 */
+#define L1C_PMCTRL_L0S_EN               BIT(12)
+#define L1D_PMCTRL_RXL1_AFTER_L0S       BIT(11)  /* l1dv2.0+ */
+#define L1D_PMCTRL_L0S_TIMER_MASK       ASHFT8(7UL)
+#define L1D_PMCTRL_L0S_TIMER_SHIFT      8
+#define L1C_PMCTRL_L0S_TIMER_MASK       ASHFT8(0xFUL)
+#define L1C_PMCTRL_L0S_TIMER_SHIFT      8
+#define L1C_PMCTRL_L1_BUFSRX_EN         BIT(7)
+#define L1C_PMCTRL_L1_SRDSRX_PWD        BIT(6)   /* power down serdes rx */
+#define L1C_PMCTRL_L1_SRDSPLL_EN        BIT(5)
+#define L1C_PMCTRL_L1_SRDS_EN           BIT(4)
+#define L1C_PMCTRL_L1_EN                BIT(3)
+#define L1C_PMCTRL_CLKREQ_EN            BIT(2)
+#define L1C_PMCTRL_RBER_EN              BIT(1)
+#define L1C_PMCTRL_SPRSDWER_EN          BIT(0)
+
+#define L1C_LTSSM_CTRL                  0x12FC
+#define L1C_LTSSM_WRO_EN                BIT(12)
+#define L1C_LTSSM_TXTLP_BYPASS          BIT(7)
+
+#define L1C_MASTER                      0x1400
+#define L1C_MASTER_OTP_FLG              BIT(31)
+#define L1C_MASTER_DEV_NUM_MASK         ASHFT24(0x7FUL)
+#define L1C_MASTER_DEV_NUM_SHIFT        24
+#define L1C_MASTER_REV_NUM_MASK         ASHFT16(0xFFUL)
+#define L1C_MASTER_REV_NUM_SHIFT        16
+#define L1C_MASTER_RDCLR_INT            BIT(14)
+#define L1C_MASTER_CLKSW_L2EV1          BIT(13)      /* 0:l2ev2.0,1:l2ev1.0 */
+#define L1C_MASTER_PCLKSEL_SRDS         BIT(12)      /* 1:alwys sel pclk from
+						     * serdes, not sw to 25M */
+#define L1C_MASTER_IRQMOD2_EN           BIT(11)      /* IRQ MODURATION FOR RX */
+#define L1C_MASTER_IRQMOD1_EN           BIT(10)      /* MODURATION FOR TX/RX */
+#define L1C_MASTER_MANU_INT             BIT(9)       /* SOFT MANUAL INT */
+#define L1C_MASTER_MANUTIMER_EN         BIT(8)
+#define L1C_MASTER_SYSALVTIMER_EN       BIT(7)       /* SYS ALIVE TIMER EN */
+#define L1C_MASTER_OOB_DIS              BIT(6)       /* OUT OF BOX DIS */
+#define L1C_MASTER_WAKEN_25M            BIT(5)       /* WAKE WO. PCIE CLK */
+#define L1C_MASTER_BERT_START           BIT(4)
+#define L1C_MASTER_PCIE_TSTMOD_MASK     ASHFT2(3UL)
+#define L1C_MASTER_PCIE_TSTMOD_SHIFT    2
+#define L1C_MASTER_PCIE_RST             BIT(1)
+#define L1C_MASTER_DMA_MAC_RST          BIT(0)       /* RST MAC & DMA */
+#define L1C_DMA_MAC_RST_TO              50
+
+#define L1C_MANU_TIMER                  0x1404
+
+#define L1C_IRQ_MODU_TIMER              0x1408
+#define L1C_IRQ_MODU_TIMER2_MASK        ASHFT16(0xFFFFUL)
+#define L1C_IRQ_MODU_TIMER2_SHIFT       16          /* ONLY FOR RX */
+#define L1C_IRQ_MODU_TIMER1_MASK        ASHFT0(0xFFFFUL)
+#define L1C_IRQ_MODU_TIMER1_SHIFT       0
+
+#define L1C_PHY_CTRL                    0x140C
+#define L1C_PHY_CTRL_ADDR_MASK          ASHFT19(0x1FUL)
+#define L1C_PHY_CTRL_ADDR_SHIFT         19
+#define L1C_PHY_CTRL_BP_VLTGSW          BIT(18)
+#define L1C_PHY_CTRL_100AB_EN           BIT(17)
+#define L1C_PHY_CTRL_10AB_EN            BIT(16)
+#define L1C_PHY_CTRL_PLL_BYPASS         BIT(15)
+#define L1C_PHY_CTRL_POWER_DOWN         BIT(14)      /* affect MAC & PHY,
+						     * go to low power sts */
+#define L1C_PHY_CTRL_PLL_ON             BIT(13)      /* 1:PLL ALWAYS ON
+						     * 0:CAN SWITCH IN LPW */
+#define L1C_PHY_CTRL_RST_ANALOG         BIT(12)
+#define L1C_PHY_CTRL_HIB_PULSE          BIT(11)
+#define L1C_PHY_CTRL_HIB_EN             BIT(10)
+#define L1C_PHY_CTRL_GIGA_DIS           BIT(9)
+#define L1C_PHY_CTRL_IDDQ_DIS           BIT(8)       /* POWER ON RST */
+#define L1C_PHY_CTRL_IDDQ               BIT(7)       /* WHILE REBOOT, BIT8(1)
+						     * EFFECTS BIT7 */
+#define L1C_PHY_CTRL_LPW_EXIT           BIT(6)
+#define L1C_PHY_CTRL_GATE_25M           BIT(5)
+#define L1C_PHY_CTRL_RVRS_ANEG          BIT(4)
+#define L1C_PHY_CTRL_ANEG_NOW           BIT(3)
+#define L1C_PHY_CTRL_LED_MODE           BIT(2)
+#define L1C_PHY_CTRL_RTL_MODE           BIT(1)
+#define L1C_PHY_CTRL_DSPRST_OUT         BIT(0)       /* OUT OF DSP RST STATE */
+#define L1C_PHY_CTRL_DSPRST_TO          80
+#define L1C_PHY_CTRL_CLS                (\
+	L1C_PHY_CTRL_LED_MODE           |\
+	L1C_PHY_CTRL_100AB_EN           |\
+	L1C_PHY_CTRL_PLL_ON)
+
+
+#define L1C_MAC_STS                     0x1410
+#define L1C_MAC_STS_SFORCE_MASK         ASHFT14(0xFUL)
+#define L1C_MAC_STS_SFORCE_SHIFT        14
+#define L1C_MAC_STS_CALIB_DONE          BIT13
+#define L1C_MAC_STS_CALIB_RES_MASK      ASHFT8(0x1FUL)
+#define L1C_MAC_STS_CALIB_RES_SHIFT     8
+#define L1C_MAC_STS_CALIBERR_MASK       ASHFT4(0xFUL)
+#define L1C_MAC_STS_CALIBERR_SHIFT      4
+#define L1C_MAC_STS_TXQ_BUSY            BIT(3)
+#define L1C_MAC_STS_RXQ_BUSY            BIT(2)
+#define L1C_MAC_STS_TXMAC_BUSY          BIT(1)
+#define L1C_MAC_STS_RXMAC_BUSY          BIT(0)
+#define L1C_MAC_STS_IDLE                (\
+	L1C_MAC_STS_TXQ_BUSY            |\
+	L1C_MAC_STS_RXQ_BUSY            |\
+	L1C_MAC_STS_TXMAC_BUSY          |\
+	L1C_MAC_STS_RXMAC_BUSY)
+
+#define L1C_MDIO                        0x1414
+#define L1C_MDIO_MODE_EXT               BIT(30)      /* 0:normal,1:ext */
+#define L1C_MDIO_POST_READ              BIT(29)
+#define L1C_MDIO_AUTO_POLLING           BIT(28)
+#define L1C_MDIO_BUSY                   BIT(27)
+#define L1C_MDIO_CLK_SEL_MASK           ASHFT24(7UL)
+#define L1C_MDIO_CLK_SEL_SHIFT          24
+#define L1C_MDIO_CLK_SEL_25MD4          0           /* 25M DIV 4 */
+#define L1C_MDIO_CLK_SEL_25MD6          2
+#define L1C_MDIO_CLK_SEL_25MD8          3
+#define L1C_MDIO_CLK_SEL_25MD10         4
+#define L1C_MDIO_CLK_SEL_25MD32         5
+#define L1C_MDIO_CLK_SEL_25MD64         6
+#define L1C_MDIO_CLK_SEL_25MD128        7
+#define L1C_MDIO_START                  BIT(23)
+#define L1C_MDIO_SPRES_PRMBL            BIT(22)
+#define L1C_MDIO_OP_READ                BIT(21)      /* 1:read,0:write */
+#define L1C_MDIO_REG_MASK               ASHFT16(0x1FUL)
+#define L1C_MDIO_REG_SHIFT              16
+#define L1C_MDIO_DATA_MASK              ASHFT0(0xFFFFUL)
+#define L1C_MDIO_DATA_SHIFT             0
+#define L1C_MDIO_MAX_AC_TO              120
+
+#define L1C_MDIO_EXTN                   0x1448
+#define L1C_MDIO_EXTN_PORTAD_MASK       ASHFT21(0x1FUL)
+#define L1C_MDIO_EXTN_PORTAD_SHIFT      21
+#define L1C_MDIO_EXTN_DEVAD_MASK        ASHFT16(0x1FUL)
+#define L1C_MDIO_EXTN_DEVAD_SHIFT       16
+#define L1C_MDIO_EXTN_REG_MASK          ASHFT0(0xFFFFUL)
+#define L1C_MDIO_EXTN_REG_SHIFT         0
+
+#define L1C_PHY_STS                     0x1418
+#define L1C_PHY_STS_LPW                 BIT(31)
+#define L1C_PHY_STS_LPI                 BIT(30)
+#define L1C_PHY_STS_PWON_STRIP_MASK     ASHFT16(0xFFFUL)
+#define L1C_PHY_STS_PWON_STRIP_SHIFT    16
+
+#define L1C_PHY_STS_DUPLEX              BIT(3)
+#define L1C_PHY_STS_LINKUP              BIT(2)
+#define L1C_PHY_STS_SPEED_MASK          ASHFT0(3UL)
+#define L1C_PHY_STS_SPEED_SHIFT         0
+#define L1C_PHY_STS_SPEED_SHIFT         0
+#define L1C_PHY_STS_SPEED_1000M         2
+#define L1C_PHY_STS_SPEED_100M          1
+#define L1C_PHY_STS_SPEED_10M           0
+
+#define L1C_BIST0                       0x141C
+#define L1C_BIST0_COL_MASK              ASHFT24(0x3FUL)
+#define L1C_BIST0_COL_SHIFT             24
+#define L1C_BIST0_ROW_MASK              ASHFT12(0xFFFUL)
+#define L1C_BIST0_ROW_SHIFT             12
+#define L1C_BIST0_STEP_MASK             ASHFT8(0xFUL)
+#define L1C_BIST0_STEP_SHIFT            8
+#define L1C_BIST0_PATTERN_MASK          ASHFT4(7UL)
+#define L1C_BIST0_PATTERN_SHIFT         4
+#define L1C_BIST0_CRIT                  BIT(3)
+#define L1C_BIST0_FIXED                 BIT(2)
+#define L1C_BIST0_FAIL                  BIT(1)
+#define L1C_BIST0_START                 BIT(0)
+
+#define L1C_BIST1                       0x1420
+#define L1C_BIST1_COL_MASK              ASHFT24(0x3FUL)
+#define L1C_BIST1_COL_SHIFT             24
+#define L1C_BIST1_ROW_MASK              ASHFT12(0xFFFUL)
+#define L1C_BIST1_ROW_SHIFT             12
+#define L1C_BIST1_STEP_MASK             ASHFT8(0xFUL)
+#define L1C_BIST1_STEP_SHIFT            8
+#define L1C_BIST1_PATTERN_MASK          ASHFT4(7UL)
+#define L1C_BIST1_PATTERN_SHIFT         4
+#define L1C_BIST1_CRIT                  BIT(3)
+#define L1C_BIST1_FIXED                 BIT(2)
+#define L1C_BIST1_FAIL                  BIT(1)
+#define L1C_BIST1_START                 BIT(0)
+
+#define L1C_SERDES                      0x1424
+#define L1C_SERDES_PHYCLK_SLWDWN        BIT(18)
+#define L1C_SERDES_MACCLK_SLWDWN        BIT(17)
+#define L1C_SERDES_SELFB_PLL_MASK       ASHFT14(3UL)
+#define L1C_SERDES_SELFB_PLL_SHIFT      14
+#define L1C_SERDES_PHYCLK_SEL_GTX       BIT(13)          /* 1:gtx_clk, 0:25M */
+#define L1C_SERDES_PCIECLK_SEL_SRDS     BIT(12)          /* 1:serdes,0:25M */
+#define L1C_SERDES_BUFS_RX_EN           BIT(11)
+#define L1C_SERDES_PD_RX                BIT(10)
+#define L1C_SERDES_PLL_EN               BIT(9)
+#define L1C_SERDES_EN                   BIT(8)
+#define L1C_SERDES_SELFB_PLL_SEL_CSR    BIT(6)       /* 0:state-machine,1:csr */
+#define L1C_SERDES_SELFB_PLL_CSR_MASK   ASHFT4(3UL)
+#define L1C_SERDES_SELFB_PLL_CSR_SHIFT  4
+#define L1C_SERDES_SELFB_PLL_CSR_4      3           /* 4-12% OV-CLK */
+#define L1C_SERDES_SELFB_PLL_CSR_0      2           /* 0-4% OV-CLK */
+#define L1C_SERDES_SELFB_PLL_CSR_12     1           /* 12-18% OV-CLK */
+#define L1C_SERDES_SELFB_PLL_CSR_18     0           /* 18-25% OV-CLK */
+#define L1C_SERDES_VCO_SLOW             BIT(3)
+#define L1C_SERDES_VCO_FAST             BIT(2)
+#define L1C_SERDES_LOCKDCT_EN           BIT(1)
+#define L1C_SERDES_LOCKDCTED            BIT(0)
+
+#define L1C_LED_CTRL                    0x1428
+#define L1C_LED_CTRL_PATMAP2_MASK       ASHFT8(3UL)
+#define L1C_LED_CTRL_PATMAP2_SHIFT      8
+#define L1C_LED_CTRL_PATMAP1_MASK       ASHFT6(3UL)
+#define L1C_LED_CTRL_PATMAP1_SHIFT      6
+#define L1C_LED_CTRL_PATMAP0_MASK       ASHFT4(3UL)
+#define L1C_LED_CTRL_PATMAP0_SHIFT      4
+#define L1C_LED_CTRL_D3_MODE_MASK       ASHFT2(3UL)
+#define L1C_LED_CTRL_D3_MODE_SHIFT      2
+#define L1C_LED_CTRL_D3_MODE_NORMAL     0
+#define L1C_LED_CTRL_D3_MODE_WOL_DIS    1
+#define L1C_LED_CTRL_D3_MODE_WOL_ANY    2
+#define L1C_LED_CTRL_D3_MODE_WOL_EN     3
+#define L1C_LED_CTRL_DUTY_CYCL_MASK     ASHFT0(3UL)
+#define L1C_LED_CTRL_DUTY_CYCL_SHIFT    0
+#define L1C_LED_CTRL_DUTY_CYCL_50       0           /* 50% */
+#define L1C_LED_CTRL_DUTY_CYCL_125      1           /* 12.5% */
+#define L1C_LED_CTRL_DUTY_CYCL_25       2           /* 25% */
+#define L1C_LED_CTRL_DUTY_CYCL_75       3           /* 75% */
+
+#define L1C_LED_PATN                    0x142C
+#define L1C_LED_PATN1_MASK              ASHFT16(0xFFFFUL)
+#define L1C_LED_PATN1_SHIFT             16
+#define L1C_LED_PATN0_MASK              ASHFT0(0xFFFFUL)
+#define L1C_LED_PATN0_SHIFT             0
+
+#define L1C_LED_PATN2                   0x1430
+#define L1C_LED_PATN2_MASK              ASHFT0(0xFFFFUL)
+#define L1C_LED_PATN2_SHIFT             0
+
+#define L1C_SYSALV                      0x1434
+#define L1C_SYSALV_FLAG                 BIT(0)
+
+#define L1C_PCIERR_INST                 0x1438
+#define L1C_PCIERR_INST_TX_RATE_MASK    ASHFT4(0xFUL)
+#define L1C_PCIERR_INST_TX_RATE_SHIFT   4
+#define L1C_PCIERR_INST_RX_RATE_MASK    ASHFT0(0xFUL)
+#define L1C_PCIERR_INST_RX_RATE_SHIFT   0
+
+#define L1C_LPI_DECISN_TIMER            0x143C
+#define L1C_LPI_DESISN_TIMER_L2CB       0x7D00
+
+#define L1C_LPI_CTRL                    0x1440
+#define L1C_LPI_CTRL_CHK_DA             BIT(31)
+#define L1C_LPI_CTRL_ENH_TO_MASK        ASHFT12(0x1FFFUL)
+#define L1C_LPI_CTRL_ENH_TO_SHIFT       12
+#define L1C_LPI_CTRL_ENH_TH_MASK        ASHFT6(0x1FUL)
+#define L1C_LPI_CTRL_ENH_TH_SHIFT       6
+#define L1C_LPI_CTRL_ENH_EN             BIT(5)
+#define L1C_LPI_CTRL_CHK_RX             BIT(4)
+#define L1C_LPI_CTRL_CHK_STATE          BIT(3)
+#define L1C_LPI_CTRL_GMII               BIT(2)
+#define L1C_LPI_CTRL_TO_PHY             BIT(1)
+#define L1C_LPI_CTRL_EN                 BIT(0)
+
+#define L1C_LPI_WAIT                    0x1444
+#define L1C_LPI_WAIT_TIMER_MASK         ASHFT0(0xFFFFUL)
+#define L1C_LPI_WAIT_TIMER_SHIFT        0
+
+#define L1C_MAC_CTRL                    0x1480
+#define L1C_MAC_CTRL_WOLSPED_SWEN       BIT(30)  /* 0:phy,1:sw */
+#define L1C_MAC_CTRL_MHASH_ALG_HI5B     BIT(29)  /* 1:legacy, 0:marvl(low5b)*/
+#define L1C_MAC_CTRL_SPAUSE_EN          BIT(28)
+#define L1C_MAC_CTRL_DBG_EN             BIT(27)
+#define L1C_MAC_CTRL_BRD_EN             BIT(26)
+#define L1C_MAC_CTRL_MULTIALL_EN        BIT(25)
+#define L1C_MAC_CTRL_RX_XSUM_EN         BIT(24)
+#define L1C_MAC_CTRL_THUGE              BIT(23)
+#define L1C_MAC_CTRL_MBOF               BIT(22)
+#define L1C_MAC_CTRL_SPEED_MASK         ASHFT20(3UL)
+#define L1C_MAC_CTRL_SPEED_SHIFT        20
+#define L1C_MAC_CTRL_SPEED_10_100       1
+#define L1C_MAC_CTRL_SPEED_1000         2
+#define L1C_MAC_CTRL_SIMR               BIT(19)
+#define L1C_MAC_CTRL_SSTCT              BIT(17)
+#define L1C_MAC_CTRL_TPAUSE             BIT(16)
+#define L1C_MAC_CTRL_PROMISC_EN         BIT(15)
+#define L1C_MAC_CTRL_VLANSTRIP          BIT(14)
+#define L1C_MAC_CTRL_PRMBLEN_MASK       ASHFT10(0xFUL)
+#define L1C_MAC_CTRL_PRMBLEN_SHIFT      10
+#define L1C_MAC_CTRL_RHUGE_EN           BIT(9)
+#define L1C_MAC_CTRL_FLCHK              BIT(8)
+#define L1C_MAC_CTRL_PCRCE              BIT(7)
+#define L1C_MAC_CTRL_CRCE               BIT(6)
+#define L1C_MAC_CTRL_FULLD              BIT(5)
+#define L1C_MAC_CTRL_LPBACK_EN          BIT(4)
+#define L1C_MAC_CTRL_RXFC_EN            BIT(3)
+#define L1C_MAC_CTRL_TXFC_EN            BIT(2)
+#define L1C_MAC_CTRL_RX_EN              BIT(1)
+#define L1C_MAC_CTRL_TX_EN              BIT(0)
+
+#define L1C_GAP                         0x1484
+#define L1C_GAP_IPGR2_MASK              ASHFT24(0x7FUL)
+#define L1C_GAP_IPGR2_SHIFT             24
+#define L1C_GAP_IPGR1_MASK              ASHFT16(0x7FUL)
+#define L1C_GAP_IPGR1_SHIFT             16
+#define L1C_GAP_MIN_IFG_MASK            ASHFT8(0xFFUL)
+#define L1C_GAP_MIN_IFG_SHIFT           8
+#define L1C_GAP_IPGT_MASK               ASHFT0(0x7FUL)
+#define L1C_GAP_IPGT_SHIFT              0
+
+#define L1C_STAD0                       0x1488
+#define L1C_STAD1                       0x148C
+
+#define L1C_HASH_TBL0                   0x1490
+#define L1C_HASH_TBL1                   0x1494
+
+#define L1C_HALFD                       0x1498
+#define L1C_HALFD_JAM_IPG_MASK          ASHFT24(0xFUL)
+#define L1C_HALFD_JAM_IPG_SHIFT         24
+#define L1C_HALFD_ABEBT_MASK            ASHFT20(0xFUL)
+#define L1C_HALFD_ABEBT_SHIFT           20
+#define L1C_HALFD_ABEBE                 BIT(19)
+#define L1C_HALFD_BPNB                  BIT(18)
+#define L1C_HALFD_NOBO                  BIT(17)
+#define L1C_HALFD_EDXSDFR               BIT(16)
+#define L1C_HALFD_RETRY_MASK            ASHFT12(0xFUL)
+#define L1C_HALFD_RETRY_SHIFT           12
+#define L1C_HALFD_LCOL_MASK             ASHFT0(0x3FFUL)
+#define L1C_HALFD_LCOL_SHIFT            0
+
+#define L1C_MTU                         0x149C
+#define L1C_MTU_JUMBO_TH                1514
+#define L1C_MTU_STD_ALGN                1536
+#define L1C_MTU_MIN                     64
+
+#define L1C_WOL0                        0x14A0
+#define L1C_WOL0_PT7_MATCH              BIT(31)
+#define L1C_WOL0_PT6_MATCH              BIT(30)
+#define L1C_WOL0_PT5_MATCH              BIT(29)
+#define L1C_WOL0_PT4_MATCH              BIT(28)
+#define L1C_WOL0_PT3_MATCH              BIT(27)
+#define L1C_WOL0_PT2_MATCH              BIT(26)
+#define L1C_WOL0_PT1_MATCH              BIT(25)
+#define L1C_WOL0_PT0_MATCH              BIT(24)
+#define L1C_WOL0_PT7_EN                 BIT(23)
+#define L1C_WOL0_PT6_EN                 BIT(22)
+#define L1C_WOL0_PT5_EN                 BIT(21)
+#define L1C_WOL0_PT4_EN                 BIT(20)
+#define L1C_WOL0_PT3_EN                 BIT(19)
+#define L1C_WOL0_PT2_EN                 BIT(18)
+#define L1C_WOL0_PT1_EN                 BIT(17)
+#define L1C_WOL0_PT0_EN                 BIT(16)
+#define L1C_WOL0_IPV4_SYNC_EVT          BIT(14)
+#define L1C_WOL0_IPV6_SYNC_EVT          BIT(13)
+#define L1C_WOL0_LINK_EVT               BIT(10)
+#define L1C_WOL0_MAGIC_EVT              BIT(9)
+#define L1C_WOL0_PATTERN_EVT            BIT(8)
+#define L1D_WOL0_OOB_EN                 BIT(6)
+#define L1C_WOL0_PME_LINK               BIT(5)
+#define L1C_WOL0_LINK_EN                BIT(4)
+#define L1C_WOL0_PME_MAGIC_EN           BIT(3)
+#define L1C_WOL0_MAGIC_EN               BIT(2)
+#define L1C_WOL0_PME_PATTERN_EN         BIT(1)
+#define L1C_WOL0_PATTERN_EN             BIT(0)
+
+#define L1C_WOL1                        0x14A4
+#define L1C_WOL1_PT3_LEN_MASK           ASHFT24(0xFFUL)
+#define L1C_WOL1_PT3_LEN_SHIFT          24
+#define L1C_WOL1_PT2_LEN_MASK           ASHFT16(0xFFUL)
+#define L1C_WOL1_PT2_LEN_SHIFT          16
+#define L1C_WOL1_PT1_LEN_MASK           ASHFT8(0xFFUL)
+#define L1C_WOL1_PT1_LEN_SHIFT          8
+#define L1C_WOL1_PT0_LEN_MASK           ASHFT0(0xFFUL)
+#define L1C_WOL1_PT0_LEN_SHIFT          0
+
+#define L1C_WOL2                        0x14A8
+#define L1C_WOL2_PT7_LEN_MASK           ASHFT24(0xFFUL)
+#define L1C_WOL2_PT7_LEN_SHIFT          24
+#define L1C_WOL2_PT6_LEN_MASK           ASHFT16(0xFFUL)
+#define L1C_WOL2_PT6_LEN_SHIFT          16
+#define L1C_WOL2_PT5_LEN_MASK           ASHFT8(0xFFUL)
+#define L1C_WOL2_PT5_LEN_SHIFT          8
+#define L1C_WOL2_PT4_LEN_MASK           ASHFT0(0xFFUL)
+#define L1C_WOL2_PT4_LEN_SHIFT          0
+
+#define L1C_SRAM0                       0x1500
+#define L1C_SRAM_RFD_TAIL_ADDR_MASK     ASHFT16(0xFFFUL)
+#define L1C_SRAM_RFD_TAIL_ADDR_SHIFT    16
+#define L1C_SRAM_RFD_HEAD_ADDR_MASK     ASHFT0(0xFFFUL)
+#define L1C_SRAM_RFD_HEAD_ADDR_SHIFT    0
+#define L1C_SRAM_RFD_HT_L2CB1           0x02bf02a0L
+
+#define L1C_SRAM1                       0x1510
+#define L1C_SRAM_RFD_LEN_MASK           ASHFT0(0xFFFUL) /* 8BYTES UNIT */
+#define L1C_SRAM_RFD_LEN_SHIFT          0
+
+#define L1C_SRAM2                       0x1518
+#define L1C_SRAM_TRD_TAIL_ADDR_MASK     ASHFT16(0xFFFUL)
+#define L1C_SRAM_TRD_TAIL_ADDR_SHIFT    16
+#define L1C_SRMA_TRD_HEAD_ADDR_MASK     ASHFT0(0xFFFUL)
+#define L1C_SRAM_TRD_HEAD_ADDR_SHIFT    0
+#define L1C_SRAM_TRD_HT_L2CB1           0x03df03c0L
+
+#define L1C_SRAM3                       0x151C
+#define L1C_SRAM_TRD_LEN_MASK           ASHFT0(0xFFFUL) /* 8BYTES UNIT */
+#define L1C_SRAM_TRD_LEN_SHIFT          0
+
+#define L1C_SRAM4                       0x1520
+#define L1C_SRAM_RXF_TAIL_ADDR_MASK     ASHFT16(0xFFFUL)
+#define L1C_SRAM_RXF_TAIL_ADDR_SHIFT    16
+#define L1C_SRAM_RXF_HEAD_ADDR_MASK     ASHFT0(0xFFFUL)
+#define L1C_SRAM_RXF_HEAD_ADDR_SHIFT    0
+#define L1C_SRAM_RXF_HT_L2CB1           0x029f0000L
+
+#define L1C_SRAM5                       0x1524
+#define L1C_SRAM_RXF_LEN_MASK           ASHFT0(0xFFFUL) /* 8BYTES UNIT */
+#define L1C_SRAM_RXF_LEN_SHIFT          0
+#define L1C_SRAM_RXF_LEN_8K             (8*1024)
+#define L1C_SRAM_RXF_LEN_L2CB1          0x02a0L
+
+#define L1C_SRAM6                       0x1528
+#define L1C_SRAM_TXF_TAIL_ADDR_MASK     ASHFT16(0xFFFUL)
+#define L1C_SRAM_TXF_TAIL_ADDR_SHIFT    16
+#define L1C_SRAM_TXF_HEAD_ADDR_MASK     ASHFT0(0xFFFUL)
+#define L1C_SRAM_TXF_HEAD_ADDR_SHIFT    0
+#define L1C_SRAM_TXF_HT_L2CB1           0x03bf02c0L
+
+#define L1C_SRAM7                       0x152C
+#define L1C_SRAM_TXF_LEN_MASK           ASHFT0(0xFFFUL) /* 8BYTES UNIT */
+#define L1C_SRAM_TXF_LEN_SHIFT          0
+#define L1C_SRAM_TXF_LEN_L2CB1          0x0100L
+
+#define L1C_SRAM8                       0x1530
+#define L1C_SRAM_PATTERN_ADDR_MASK      ASHFT16(0xFFFUL)
+#define L1C_SRAM_PATTERN_ADDR_SHIFT     16
+#define L1C_SRAM_TSO_ADDR_MASK          ASHFT0(0xFFFUL)
+#define L1C_SRAM_TSO_ADDR_SHIFT         0
+
+#define L1C_SRAM9                       0x1534
+#define L1C_SRAM_LOAD_PTR               BIT(0)
+
+#define L1C_RX_BASE_ADDR_HI             0x1540
+
+#define L1C_TX_BASE_ADDR_HI             0x1544
+
+#define L1C_RFD_ADDR_LO                 0x1550
+#define L1C_RFD_RING_SZ                 0x1560
+#define L1C_RFD_BUF_SZ                  0x1564
+#define L1C_RFD_BUF_SZ_MASK             ASHFT0(0xFFFFUL)
+#define L1C_RFD_BUF_SZ_SHIFT            0
+
+#define L1C_RRD_ADDR_LO                 0x1568
+#define L1C_RRD_RING_SZ                 0x1578
+#define L1C_RRD_RING_SZ_MASK            ASHFT0(0xFFFUL)
+#define L1C_RRD_RING_SZ_SHIFT           0
+
+#define L1C_TPD_PRI1_ADDR_LO            0x157C
+#define L1C_TPD_PRI0_ADDR_LO            0x1580      /* LOWEST PRORITY */
+
+#define L1C_TPD_PRI1_PIDX               0x15F0      /* 16BIT */
+#define L1C_TPD_PRI0_PIDX               0x15F2      /* 16BIT */
+
+#define L1C_TPD_PRI1_CIDX               0x15F4      /* 16BIT */
+#define L1C_TPD_PRI0_CIDX               0x15F6      /* 16BIT */
+
+#define L1C_TPD_RING_SZ                 0x1584
+#define L1C_TPD_RING_SZ_MASK            ASHFT0(0xFFFFUL)
+#define L1C_TPD_RING_SZ_SHIFT           0
+
+#define L1C_TXQ0                        0x1590
+#define L1C_TXQ0_TXF_BURST_PREF_MASK    ASHFT16(0xFFFFUL)
+#define L1C_TXQ0_TXF_BURST_PREF_SHIFT   16
+#define L1C_TXQ0_TXF_BURST_PREF_DEF     0x200
+#define L1C_TXQ0_TXF_BURST_PREF_L2CB    0x40
+#define L1D_TXQ0_PEDING_CLR             BIT(8)
+#define L1C_TXQ0_LSO_8023_EN            BIT(7)
+#define L1C_TXQ0_MODE_ENHANCE           BIT(6)
+#define L1C_TXQ0_EN                     BIT(5)
+#define L1C_TXQ0_SUPT_IPOPT             BIT(4)
+#define L1C_TXQ0_TPD_BURSTPREF_MASK     ASHFT0(0xFUL)
+#define L1C_TXQ0_TPD_BURSTPREF_SHIFT    0
+#define L1C_TXQ0_TPD_BURSTPREF_DEF      5
+
+#define L1C_TXQ1                        0x1594
+#define L1C_TXQ1_JUMBO_TSOTHR_MASK      ASHFT0(0x7FFUL) /* 8BYTES UNIT */
+#define L1C_TXQ1_JUMBO_TSOTHR_SHIFT     0
+#define L1C_TXQ1_JUMBO_TSO_TH           (7*1024)    /* byte */
+
+#define L1C_TXQ2                        0x1598          /* ENTER L1 CONTROL */
+#define L1C_TXQ2_BURST_EN               BIT(31)
+#define L1C_TXQ2_BURST_HI_WM_MASK       ASHFT16(0xFFFUL)
+#define L1C_TXQ2_BURST_HI_WM_SHIFT      16
+#define L1C_TXQ2_BURST_LO_WM_MASK       ASHFT0(0xFFFUL)
+#define L1C_TXQ2_BURST_LO_WM_SHIFT      0
+
+#define L1C_RFD_PIDX                    0x15E0
+#define L1C_RFD_PIDX_MASK               ASHFT0(0xFFFUL)
+#define L1C_RFD_PIDX_SHIFT              0
+
+#define L1C_RFD_CIDX                    0x15F8
+#define L1C_RFD_CIDX_MASK               ASHFT0(0xFFFUL)
+#define L1C_RFD_CIDX_SHIFT              0
+
+#define L1C_RXQ0                        0x15A0
+#define L1C_RXQ0_EN                     BIT(31)
+#define L1C_RXQ0_CUT_THRU_EN            BIT(30)
+#define L1C_RXQ0_RSS_HASH_EN            BIT(29)
+#define L1C_RXQ0_NON_IP_QTBL            BIT(28)  /* 0:q0,1:table */
+#define L1C_RXQ0_RSS_MODE_MASK          ASHFT26(3UL)
+#define L1C_RXQ0_RSS_MODE_SHIFT         26
+#define L1C_RXQ0_RSS_MODE_DIS           0
+#define L1C_RXQ0_RSS_MODE_SQSI          1
+#define L1C_RXQ0_RSS_MODE_MQSI          2
+#define L1C_RXQ0_RSS_MODE_MQMI          3
+#define L1C_RXQ0_NUM_RFD_PREF_MASK      ASHFT20(0x3FUL)
+#define L1C_RXQ0_NUM_RFD_PREF_SHIFT     20
+#define L1C_RXQ0_NUM_RFD_PREF_DEF       8
+#define L1C_RXQ0_RSS_HSTYP_IPV6_TCP_EN  BIT(19)
+#define L1C_RXQ0_RSS_HSTYP_IPV6_EN      BIT(18)
+#define L1C_RXQ0_RSS_HSTYP_IPV4_TCP_EN  BIT(17)
+#define L1C_RXQ0_RSS_HSTYP_IPV4_EN      BIT(16)
+#define L1C_RXQ0_RSS_HSTYP_ALL          (\
+	L1C_RXQ0_RSS_HSTYP_IPV6_TCP_EN  |\
+	L1C_RXQ0_RSS_HSTYP_IPV4_TCP_EN  |\
+	L1C_RXQ0_RSS_HSTYP_IPV6_EN      |\
+	L1C_RXQ0_RSS_HSTYP_IPV4_EN)
+#define L1C_RXQ0_IDT_TBL_SIZE_MASK      ASHFT8(0xFFUL)
+#define L1C_RXQ0_IDT_TBL_SIZE_SHIFT     8
+#define L1C_RXQ0_IDT_TBL_SIZE_DEF       0x80
+#define L1C_RXQ0_IPV6_PARSE_EN          BIT(7)
+#define L1C_RXQ0_ASPM_THRESH_MASK       ASHFT0(3UL)
+#define L1C_RXQ0_ASPM_THRESH_SHIFT      0
+#define L1C_RXQ0_ASPM_THRESH_NO         0
+#define L1C_RXQ0_ASPM_THRESH_1M         1
+#define L1C_RXQ0_ASPM_THRESH_10M        2
+#define L1C_RXQ0_ASPM_THRESH_100M       3
+
+#define L1C_RXQ1                        0x15A4
+#define L1C_RXQ1_RFD_PREF_DOWN_MASK     ASHFT6(0x3FUL)
+#define L1C_RXQ1_RFD_PREF_DOWN_SHIFT    6
+#define L1C_RXQ1_RFD_PREF_UP_MASK       ASHFT0(0x3FUL)
+#define L1C_RXQ1_RFD_PREF_UP_SHIFT      0
+
+#define L1C_RXQ2                        0x15A8
+/* XOFF: USED SRAM LOWER THAN IT, THEN NOTIFY THE PEER TO SEND AGAIN */
+#define L1C_RXQ2_RXF_XOFF_THRESH_MASK   ASHFT16(0xFFFUL)
+#define L1C_RXQ2_RXF_XOFF_THRESH_SHIFT  16
+#define L1C_RXQ2_RXF_XON_THRESH_MASK    ASHFT0(0xFFFUL)
+#define L1C_RXQ2_RXF_XON_THRESH_SHIFT   0
+
+#define L1C_RXQ3                        0x15AC
+#define L1C_RXQ3_RXD_TIMER_MASK         ASHFT16(0xFFFFUL)
+#define L1C_RXQ3_RXD_TIMER_SHIFT        16
+#define L1C_RXQ3_RXD_THRESH_MASK        ASHFT0(0xFFFUL) /* 8BYTES UNIT */
+#define L1C_RXQ3_RXD_THRESH_SHIFT       0
+
+#define L1C_DMA                         0x15C0
+#define L1C_DMA_WPEND_CLR               BIT(30)
+#define L1C_DMA_RPEND_CLR               BIT(29)
+#define L1C_DMA_WDLY_CNT_MASK           ASHFT16(0xFUL)
+#define L1C_DMA_WDLY_CNT_SHIFT          16
+#define L1C_DMA_WDLY_CNT_DEF            4
+#define L1C_DMA_RDLY_CNT_MASK           ASHFT11(0x1FUL)
+#define L1C_DMA_RDLY_CNT_SHIFT          11
+#define L1C_DMA_RDLY_CNT_DEF            15
+#define L1C_DMA_RREQ_PRI_DATA           BIT(10)      /* 0:tpd, 1:data */
+#define L1C_DMA_WREQ_BLEN_MASK          ASHFT7(7UL)
+#define L1C_DMA_WREQ_BLEN_SHIFT         7
+#define L1C_DMA_RREQ_BLEN_MASK          ASHFT4(7UL)
+#define L1C_DMA_RREQ_BLEN_SHIFT         4
+#define L1C_DMA_RCB_LEN128              BIT(3)   /* 0:64bytes,1:128bytes */
+#define L1C_DMA_RORDER_MODE_MASK        ASHFT0(7UL)
+#define L1C_DMA_RORDER_MODE_SHIFT       0
+#define L1C_DMA_RORDER_MODE_OUT         4
+#define L1C_DMA_RORDER_MODE_ENHANCE     2
+#define L1C_DMA_RORDER_MODE_IN          1
+
+#define L1C_SMB_TIMER                   0x15C4
+
+#define L1C_TINT_TPD_THRSHLD            0x15C8
+
+#define L1C_TINT_TIMER                  0x15CC
+
+#define L1C_ISR                         0x1600
+#define L1C_ISR_DIS                     BIT(31)
+#define L1C_ISR_PCIE_LNKDOWN            BIT(26)
+#define L1C_ISR_PCIE_CERR               BIT(25)
+#define L1C_ISR_PCIE_NFERR              BIT(24)
+#define L1C_ISR_PCIE_FERR               BIT(23)
+#define L1C_ISR_PCIE_UR                 BIT(22)
+#define L1C_ISR_MAC_TX                  BIT(21)
+#define L1C_ISR_MAC_RX                  BIT(20)
+#define L1C_ISR_RX_Q0                   BIT(16)
+#define L1C_ISR_TX_Q0                   BIT(15)
+#define L1C_ISR_TXQ_TO                  BIT(14)
+#define L1C_ISR_PHY_LPW                 BIT(13)
+#define L1C_ISR_PHY                     BIT(12)
+#define L1C_ISR_TX_CREDIT               BIT(11)
+#define L1C_ISR_DMAW                    BIT(10)
+#define L1C_ISR_DMAR                    BIT(9)
+#define L1C_ISR_TXF_UR                  BIT(8)
+#define L1C_ISR_RFD_UR                  BIT(4)
+#define L1C_ISR_RXF_OV                  BIT(3)
+#define L1C_ISR_MANU                    BIT(2)
+#define L1C_ISR_TIMER                   BIT(1)
+#define L1C_ISR_SMB                     BIT(0)
+
+#define L1C_IMR                         0x1604
+
+#define L1C_INT_RETRIG                  0x1608  /* re-send deassrt/assert
+						 * if sw no reflect */
+#define L1C_INT_RETRIG_TO               20000   /* 40 ms */
+
+/* WOL mask register only for L1Dv2.0 and later chips */
+#define L1D_PATTERN_MASK                0x1620  /* 128bytes, sleep state */
+#define L1D_PATTERN_MASK_LEN            128     /* 128bytes, 32DWORDs */
+
+
+#define L1C_BTROM_CFG                   0x1800  /* pwon rst */
+
+#define L1C_DRV                         0x1804  /* pwon rst */
+/* bit definition is in lx_hwcomm.h */
+
+#define L1C_DRV_ERR1                    0x1808  /* perst */
+#define L1C_DRV_ERR1_GEN                BIT(31)  /* geneneral err */
+#define L1C_DRV_ERR1_NOR                BIT(30)  /* rrd.nor */
+#define L1C_DRV_ERR1_TRUNC              BIT(29)
+#define L1C_DRV_ERR1_RES                BIT(28)
+#define L1C_DRV_ERR1_INTFATAL           BIT(27)
+#define L1C_DRV_ERR1_TXQPEND            BIT(26)
+#define L1C_DRV_ERR1_DMAW               BIT(25)
+#define L1C_DRV_ERR1_DMAR               BIT(24)
+#define L1C_DRV_ERR1_PCIELNKDWN         BIT(23)
+#define L1C_DRV_ERR1_PKTSIZE            BIT(22)
+#define L1C_DRV_ERR1_FIFOFUL            BIT(21)
+#define L1C_DRV_ERR1_RFDUR              BIT(20)
+#define L1C_DRV_ERR1_RRDSI              BIT(19)
+#define L1C_DRV_ERR1_UPDATE             BIT(18)
+
+#define L1C_DRV_ERR2                    0x180C  /* perst */
+
+#define L1C_CLK_GATE                    0x1814
+#define L1C_CLK_GATE_RXMAC              BIT(5)
+#define L1C_CLK_GATE_TXMAC              BIT(4)
+#define L1C_CLK_GATE_RXQ                BIT(3)
+#define L1C_CLK_GATE_TXQ                BIT(2)
+#define L1C_CLK_GATE_DMAR               BIT(1)
+#define L1C_CLK_GATE_DMAW               BIT(0)
+#define L1C_CLK_GATE_ALL    (\
+	L1C_CLK_GATE_RXMAC  |\
+	L1C_CLK_GATE_TXMAC  |\
+	L1C_CLK_GATE_RXQ    |\
+	L1C_CLK_GATE_TXQ    |\
+	L1C_CLK_GATE_DMAR   |\
+	L1C_CLK_GATE_DMAW)
+
+#define L1C_DBG_ADDR                    0x1900  /* DWORD reg */
+#define L1C_DBG_DATA                    0x1904  /* DWORD reg */
+
+/***************************** IO mapping registers ***************************/
+#define L1C_IO_ADDR                     0x00    /* DWORD reg */
+#define L1C_IO_DATA                     0x04    /* DWORD reg */
+#define L1C_IO_MASTER                   0x08    /* DWORD same as reg0x1400 */
+#define L1C_IO_MAC_CTRL                 0x0C    /* DWORD same as reg0x1480*/
+#define L1C_IO_ISR                      0x10    /* DWORD same as reg0x1600 */
+#define L1C_IO_IMR                      0x14    /* DWORD same as reg0x1604 */
+#define L1C_IO_TPD_PRI1_PIDX            0x18    /* WORD same as reg0x15F0 */
+#define L1C_IO_TPD_PRI0_PIDX            0x1A    /* WORD same as reg0x15F2 */
+#define L1C_IO_TPD_PRI1_CIDX            0x1C    /* WORD same as reg0x15F4 */
+#define L1C_IO_TPD_PRI0_CIDX            0x1E    /* WORD same as reg0x15F6 */
+#define L1C_IO_RFD_PIDX                 0x20    /* WORD same as reg0x15E0 */
+#define L1C_IO_RFD_CIDX                 0x30    /* WORD same as reg0x15F8 */
+#define L1C_IO_MDIO                     0x38    /* WORD same as reg0x1414 */
+#define L1C_IO_PHY_CTRL                 0x3C    /* DWORD same as reg0x140C */
+
+
+
+/********************* PHY regs definition ***************************/
+
+/* Autoneg Advertisement Register (0x4) */
+#define L1C_ADVERTISE_SPEED_MASK            0x01E0
+#define L1C_ADVERTISE_DEFAULT_CAP           0x0DE0 /* diff with L1C */
+
+/* 1000BASE-T Control Register (0x9) */
+#define L1C_GIGA_CR_1000T_HD_CAPS           0x0100
+#define L1C_GIGA_CR_1000T_FD_CAPS           0x0200
+#define L1C_GIGA_CR_1000T_REPEATER_DTE      0x0400
+#define L1C_GIGA_CR_1000T_MS_VALUE          0x0800
+#define L1C_GIGA_CR_1000T_MS_ENABLE         0x1000
+#define L1C_GIGA_CR_1000T_TEST_MODE_NORMAL  0x0000
+#define L1C_GIGA_CR_1000T_TEST_MODE_1       0x2000
+#define L1C_GIGA_CR_1000T_TEST_MODE_2       0x4000
+#define L1C_GIGA_CR_1000T_TEST_MODE_3       0x6000
+#define L1C_GIGA_CR_1000T_TEST_MODE_4       0x8000
+#define L1C_GIGA_CR_1000T_SPEED_MASK        0x0300
+#define L1C_GIGA_CR_1000T_DEFAULT_CAP       0x0300
+
+/* 1000BASE-T Status Register */
+#define L1C_MII_GIGA_SR                     0x0A
+
+/* PHY Specific Status Register */
+#define L1C_MII_GIGA_PSSR                   0x11
+#define L1C_GIGA_PSSR_FC_RXEN               0x0004
+#define L1C_GIGA_PSSR_FC_TXEN               0x0008
+#define L1C_GIGA_PSSR_SPD_DPLX_RESOLVED     0x0800
+#define L1C_GIGA_PSSR_DPLX                  0x2000
+#define L1C_GIGA_PSSR_SPEED                 0xC000
+#define L1C_GIGA_PSSR_10MBS                 0x0000
+#define L1C_GIGA_PSSR_100MBS                0x4000
+#define L1C_GIGA_PSSR_1000MBS               0x8000
+
+/* PHY Interrupt Enable Register */
+#define L1C_MII_IER                         0x12
+#define L1C_IER_LINK_UP                     0x0400
+#define L1C_IER_LINK_DOWN                   0x0800
+
+/* PHY Interrupt Status Register */
+#define L1C_MII_ISR                         0x13
+#define L1C_ISR_LINK_UP                     0x0400
+#define L1C_ISR_LINK_DOWN                   0x0800
+
+/* Cable-Detect-Test Control Register */
+#define L1C_MII_CDTC                        0x16
+#define L1C_CDTC_EN                         1       /* sc */
+#define L1C_CDTC_PAIR_MASK                  ASHFT8(3U)
+#define L1C_CDTC_PAIR_SHIFT                 8
+
+
+/* Cable-Detect-Test Status Register */
+#define L1C_MII_CDTS                        0x1C
+#define L1C_CDTS_STATUS_MASK                ASHFT8(3U)
+#define L1C_CDTS_STATUS_SHIFT               8
+#define L1C_CDTS_STATUS_NORMAL              0
+#define L1C_CDTS_STATUS_SHORT               1
+#define L1C_CDTS_STATUS_OPEN                2
+#define L1C_CDTS_STATUS_INVALID             3
+
+#define L1C_MII_DBG_ADDR                    0x1D
+#define L1C_MII_DBG_DATA                    0x1E
+
+/***************************** debug port *************************************/
+
+#define L1C_MIIDBG_ANACTRL                  0x00
+#define L1C_ANACTRL_CLK125M_DELAY_EN        BIT(15)
+#define L1C_ANACTRL_VCO_FAST                BIT(14)
+#define L1C_ANACTRL_VCO_SLOW                BIT(13)
+#define L1C_ANACTRL_AFE_MODE_EN             BIT(12)
+#define L1C_ANACTRL_LCKDET_PHY              BIT(11)
+#define L1C_ANACTRL_LCKDET_EN               BIT(10)
+#define L1C_ANACTRL_OEN_125M                BIT(9)
+#define L1C_ANACTRL_HBIAS_EN                BIT(8)
+#define L1C_ANACTRL_HB_EN                   BIT(7)
+#define L1C_ANACTRL_SEL_HSP                 BIT(6)
+#define L1C_ANACTRL_CLASSA_EN               BIT(5)
+#define L1C_ANACTRL_MANUSWON_SWR_MASK       ASHFT2(3U)
+#define L1C_ANACTRL_MANUSWON_SWR_SHIFT      2
+#define L1C_ANACTRL_MANUSWON_SWR_2V         0
+#define L1C_ANACTRL_MANUSWON_SWR_1P9V       1
+#define L1C_ANACTRL_MANUSWON_SWR_1P8V       2
+#define L1C_ANACTRL_MANUSWON_SWR_1P7V       3
+#define L1C_ANACTRL_MANUSWON_BW3_4M         BIT(1)
+#define L1C_ANACTRL_RESTART_CAL             BIT(0)
+#define L1C_ANACTRL_DEF                     0x02EF
+
+
+#define L1C_MIIDBG_SYSMODCTRL               0x04
+#define L1C_SYSMODCTRL_IECHOADJ_PFMH_PHY    BIT(15)
+#define L1C_SYSMODCTRL_IECHOADJ_BIASGEN     BIT(14)
+#define L1C_SYSMODCTRL_IECHOADJ_PFML_PHY    BIT(13)
+#define L1C_SYSMODCTRL_IECHOADJ_PS_MASK     ASHFT10(3U)
+#define L1C_SYSMODCTRL_IECHOADJ_PS_SHIFT    10
+#define L1C_SYSMODCTRL_IECHOADJ_PS_40       3
+#define L1C_SYSMODCTRL_IECHOADJ_PS_20       2
+#define L1C_SYSMODCTRL_IECHOADJ_PS_0        1
+#define L1C_SYSMODCTRL_IECHOADJ_10BT_100MV  BIT(6) /* 1:100mv, 0:200mv */
+#define L1C_SYSMODCTRL_IECHOADJ_HLFAP_MASK  ASHFT4(3U)
+#define L1C_SYSMODCTRL_IECHOADJ_HLFAP_SHIFT 4
+#define L1C_SYSMODCTRL_IECHOADJ_VDFULBW     BIT(3)
+#define L1C_SYSMODCTRL_IECHOADJ_VDBIASHLF   BIT(2)
+#define L1C_SYSMODCTRL_IECHOADJ_VDAMPHLF    BIT(1)
+#define L1C_SYSMODCTRL_IECHOADJ_VDLANSW     BIT(0)
+#define L1C_SYSMODCTRL_IECHOADJ_DEF         0x88BB /* ???? */
+
+
+
+#define L1D_MIIDBG_SYSMODCTRL               0x04    /* l1d & l2cb */
+#define L1D_SYSMODCTRL_IECHOADJ_CUR_ADD     BIT(15)
+#define L1D_SYSMODCTRL_IECHOADJ_CUR_MASK    ASHFT12(7U)
+#define L1D_SYSMODCTRL_IECHOADJ_CUR_SHIFT   12
+#define L1D_SYSMODCTRL_IECHOADJ_VOL_MASK    ASHFT8(0xFU)
+#define L1D_SYSMODCTRL_IECHOADJ_VOL_SHIFT   8
+#define L1D_SYSMODCTRL_IECHOADJ_VOL_17ALL   3
+#define L1D_SYSMODCTRL_IECHOADJ_VOL_100M15  1
+#define L1D_SYSMODCTRL_IECHOADJ_VOL_10M17   0
+#define L1D_SYSMODCTRL_IECHOADJ_BIAS1_MASK  ASHFT4(0xFU)
+#define L1D_SYSMODCTRL_IECHOADJ_BIAS1_SHIFT 4
+#define L1D_SYSMODCTRL_IECHOADJ_BIAS2_MASK  ASHFT0(0xFU)
+#define L1D_SYSMODCTRL_IECHOADJ_BIAS2_SHIFT 0
+#define L1D_SYSMODCTRL_IECHOADJ_DEF         0x4FBB
+
+
+#define L1C_MIIDBG_SRDSYSMOD                0x05
+#define L1C_SRDSYSMOD_LCKDET_EN             BIT(13)
+#define L1C_SRDSYSMOD_PLL_EN                BIT(11)
+#define L1C_SRDSYSMOD_SEL_HSP               BIT(10)
+#define L1C_SRDSYSMOD_HLFTXDR               BIT(9)
+#define L1C_SRDSYSMOD_TXCLK_DELAY_EN        BIT(8)
+#define L1C_SRDSYSMOD_TXELECIDLE            BIT(7)
+#define L1C_SRDSYSMOD_DEEMP_EN              BIT(6)
+#define L1C_SRDSYSMOD_MS_PAD                BIT(2)
+#define L1C_SRDSYSMOD_CDR_ADC_VLTG          BIT(1)
+#define L1C_SRDSYSMOD_CDR_DAC_1MA           BIT(0)
+#define L1C_SRDSYSMOD_DEF                   0x2C46
+
+#define L1C_MIIDBG_CFGLPSPD                 0x0A
+#define L1C_CFGLPSPD_RSTCNT_MASK            ASHFT(3U)
+#define L1C_CFGLPSPD_RSTCNT_SHIFT           14
+#define L1C_CFGLPSPD_RSTCNT_CLK125SW        BIT(13)
+
+#define L1C_MIIDBG_HIBNEG                   0x0B
+#define L1C_HIBNEG_PSHIB_EN                 BIT(15)
+#define L1C_HIBNEG_WAKE_BOTH                BIT(14)
+#define L1C_HIBNEG_ONOFF_ANACHG_SUDEN       BIT(13)
+#define L1C_HIBNEG_HIB_PULSE                BIT(12)
+#define L1C_HIBNEG_GATE_25M_EN              BIT(11)
+#define L1C_HIBNEG_RST_80U                  BIT(10)
+#define L1C_HIBNEG_RST_TIMER_MASK           ASHFT8(3U)
+#define L1C_HIBNEG_RST_TIMER_SHIFT          8
+#define L1C_HIBNEG_GTX_CLK_DELAY_MASK       ASHFT5(3U)
+#define L1C_HIBNEG_GTX_CLK_DELAY_SHIFT      5
+#define L1C_HIBNEG_BYPSS_BRKTIMER           BIT(4)
+#define L1C_HIBNEG_DEF                      0xBC40
+
+#define L1C_MIIDBG_TST10BTCFG               0x12
+#define L1C_TST10BTCFG_INTV_TIMER_MASK      ASHFT14(3U)
+#define L1C_TST10BTCFG_INTV_TIMER_SHIFT     14
+#define L1C_TST10BTCFG_TRIGER_TIMER_MASK    ASHFT12(3U)
+#define L1C_TST10BTCFG_TRIGER_TIMER_SHIFT   12
+#define L1C_TST10BTCFG_DIV_MAN_MLT3_EN      BIT(11)
+#define L1C_TST10BTCFG_OFF_DAC_IDLE         BIT(10)
+#define L1C_TST10BTCFG_LPBK_DEEP            BIT(2) /* 1:deep,0:shallow */
+#define L1C_TST10BTCFG_DEF                  0x4C04
+
+#define L1C_MIIDBG_AZ_ANADECT               0x15
+#define L1C_AZ_ANADECT_10BTRX_TH            BIT(15)
+#define L1C_AZ_ANADECT_BOTH_01CHNL          BIT(14)
+#define L1C_AZ_ANADECT_INTV_MASK            ASHFT8(0x3FU)
+#define L1C_AZ_ANADECT_INTV_SHIFT           8
+#define L1C_AZ_ANADECT_THRESH_MASK          ASHFT4(0xFU)
+#define L1C_AZ_ANADECT_THRESH_SHIFT         4
+#define L1C_AZ_ANADECT_CHNL_MASK            ASHFT0(0xFU)
+#define L1C_AZ_ANADECT_CHNL_SHIFT           0
+#define L1C_AZ_ANADECT_DEF                  0x3220
+#define L1C_AZ_ANADECT_LONG                 0xb210
+
+#define L1D_MIIDBG_MSE16DB                  0x18
+#define L1D_MSE16DB_UP                      0x05EA
+#define L1D_MSE16DB_DOWN                    0x02EA
+
+
+#define L1C_MIIDBG_LEGCYPS                  0x29
+#define L1C_LEGCYPS_EN                      BIT(15)
+#define L1C_LEGCYPS_DAC_AMP1000_MASK        ASHFT12(7U)
+#define L1C_LEGCYPS_DAC_AMP1000_SHIFT       12
+#define L1C_LEGCYPS_DAC_AMP100_MASK         ASHFT9(7U)
+#define L1C_LEGCYPS_DAC_AMP100_SHIFT        9
+#define L1C_LEGCYPS_DAC_AMP10_MASK          ASHFT6(7U)
+#define L1C_LEGCYPS_DAC_AMP10_SHIFT         6
+#define L1C_LEGCYPS_UNPLUG_TIMER_MASK       ASHFT3(7U)
+#define L1C_LEGCYPS_UNPLUG_TIMER_SHIFT      3
+#define L1C_LEGCYPS_UNPLUG_DECT_EN          BIT(2)
+#define L1C_LEGCYPS_ECNC_PS_EN              BIT(0)
+#define L1D_LEGCYPS_DEF                     0x129D
+#define L1C_LEGCYPS_DEF                     0x36DD
+#define L1C_LEGCYPS_DEF_MPQ                 0x30DD
+
+#define L1C_MIIDBG_TST100BTCFG              0x36
+#define L1C_TST100BTCFG_NORMAL_BW_EN        BIT(15)
+#define L1C_TST100BTCFG_BADLNK_BYPASS       BIT(14)
+#define L1C_TST100BTCFG_SHORTCABL_TH_MASK   ASHFT8(0x3FU)
+#define L1C_TST100BTCFG_SHORTCABL_TH_SHIFT  8
+#define L1C_TST100BTCFG_LITCH_EN            BIT(7)
+#define L1C_TST100BTCFG_VLT_SW              BIT(6)
+#define L1C_TST100BTCFG_LONGCABL_TH_MASK    ASHFT0(0x3FU)
+#define L1C_TST100BTCFG_LONGCABL_TH_SHIFT   0
+#define L1C_TST100BTCFG_DEF                 0xE12C
+
+#define L1C_MIIDBG_VOLT_CTRL                0x3B
+#define L1C_VOLT_CTRL_CABLE1TH_MASK         ASHFT7(0x1FFU)
+#define L1C_VOLT_CTRL_CABLE1TH_SHIFT        7
+#define L1C_VOLT_CTRL_AMPCTRL_MASK          ASHFT5(3U)
+#define L1C_VOLT_CTRL_AMPCTRL_SHIFT         5
+#define L1C_VOLT_CTRL_SW_BYPASS             BIT(4)
+#define L1C_VOLT_CTRL_SWLOWEST              BIT(3)
+#define L1C_VOLT_CTRL_DACAMP10_MASK         ASHFT0(7U)
+#define L1C_VOLT_CTRL_DACAMP10_SHIFT        0
+
+#define L1C_MIIDBG_CABLE1TH_DET             0x3E
+#define L1C_CABLE1TH_DET_EN                 BIT(15)
+
+/***************************** extension **************************************/
+
+/******* dev 3 *********/
+#define L1C_MIIEXT_PCS                      3
+
+#define L1C_MIIEXT_CLDCTRL3                 0x8003
+#define L1C_CLDCTRL3_BP_CABLE1TH_DET_GT     BIT(15)
+#define L1C_CLDCTRL3_AZ_DISAMP              BIT(12)
+#define L1C_CLDCTRL3_L2CB                   0x4D19
+#define L1C_CLDCTRL3_L1D                    0xDD19
+
+#define L1C_MIIEXT_CLDCTRL6                 0x8006
+#define L1C_CLDCTRL6_CAB_LEN_MASK           ASHFT0(0x1FFU)
+#define L1C_CLDCTRL6_CAB_LEN_SHIFT          0
+#define L1C_CLDCTRL6_CAB_LEN_SHORT          0x50
+
+#define L1C_MIIEXT_CLDCTRL7                 0x8007
+#define L1C_CLDCTRL7_VDHLF_BIAS_TH_MASK     ASHFT9(0x7FU)
+#define L1C_CLDCTRL7_VDHLF_BIAS_TH_SHIFT    9
+#define L1C_CLDCTRL7_AFE_AZ_MASK            ASHFT4(0x1FU)
+#define L1C_CLDCTRL7_AFE_AZ_SHIFT           4
+#define L1C_CLDCTRL7_SIDE_PEAK_TH_MASK      ASHFT0(0xFU)
+#define L1C_CLDCTRL7_SIDE_PEAK_TH_SHIFT     0
+#define L1C_CLDCTRL7_DEF                    0x6BF6 /* ???? */
+#define L1C_CLDCTRL7_FPGA_DEF               0x0005
+#define L1C_CLDCTRL7_L2CB                   0x0175
+
+#define L1C_MIIEXT_AZCTRL                   0x8008
+#define L1C_AZCTRL_SHORT_TH_MASK            ASHFT8(0xFFU)
+#define L1C_AZCTRL_SHORT_TH_SHIFT           8
+#define L1C_AZCTRL_LONG_TH_MASK             ASHFT0(0xFFU)
+#define L1C_AZCTRL_LONG_TH_SHIFT            0
+#define L1C_AZCTRL_DEF                      0x1629
+#define L1C_AZCTRL_FPGA_DEF                 0x101D
+#define L1C_AZCTRL_L1D                      0x2034
+
+#define L1C_MIIEXT_AZCTRL2                  0x8009
+#define L1C_AZCTRL2_WAKETRNING_MASK         ASHFT8(0xFFU)
+#define L1C_AZCTRL2_WAKETRNING_SHIFT        8
+#define L1C_AZCTRL2_QUIET_TIMER_MASH        ASHFT6(3U)
+#define L1C_AZCTRL2_QUIET_TIMER_SHIFT       6
+#define L1C_AZCTRL2_PHAS_JMP2               BIT(4)
+#define L1C_AZCTRL2_CLKTRCV_125MD16         BIT(3)
+#define L1C_AZCTRL2_GATE1000_EN             BIT(2)
+#define L1C_AZCTRL2_AVRG_FREQ               BIT(1)
+#define L1C_AZCTRL2_PHAS_JMP4               BIT(0)
+#define L1C_AZCTRL2_DEF                     0x32C0
+#define L1C_AZCTRL2_FPGA_DEF                0x40C8
+#define L1C_AZCTRL2_L2CB                    0xE003
+#define L1C_AZCTRL2_L1D2                    0x18C0
+
+
+#define L1C_MIIEXT_AZCTRL4                  0x800B
+#define L1C_AZCTRL4_WAKE_STH_L2CB           0x0094
+
+#define L1C_MIIEXT_AZCTRL5                  0x800C
+#define L1C_AZCTRL5_WAKE_LTH_L2CB           0x00EB
+
+#define L1C_MIIEXT_AZCTRL6                  0x800D
+#define L1C_AZCTRL6_L1D2                    0x003F
+
+
+
+/********* dev 7 **********/
+#define L1C_MIIEXT_ANEG                     7
+
+#define L1C_MIIEXT_LOCAL_EEEADV             0x3C
+#define L1C_LOCAL_EEEADV_1000BT             BIT(2)
+#define L1C_LOCAL_EEEADV_100BT              BIT(1)
+
+#define L1C_MIIEXT_REMOTE_EEEADV            0x3D
+#define L1C_REMOTE_EEEADV_1000BT            BIT(2)
+#define L1C_REMOTE_EEEADV_100BT             BIT(1)
+
+#define L1C_MIIEXT_EEE_ANEG                 0x8000
+#define L1C_EEE_ANEG_1000M                  BIT(2)
+#define L1C_EEE_ANEG_100M                   BIT(1)
+
+
+
+
+/******************************************************************************/
+
+/* functions */
+
+/* get permanent mac address from
+ * return
+ *    0: success
+ *    non-0:fail
+ */
+u16 l1c_get_perm_macaddr(struct alx_hw *hw, u8 *addr);
+
+
+/* reset mac & dma
+ * return
+ *     0: success
+ *     non-0:fail
+ */
+u16 l1c_reset_mac(struct alx_hw *hw);
+
+/* reset phy
+ * return
+ *    0: success
+ *    non-0:fail
+ */
+u16 l1c_reset_phy(struct alx_hw *hw, bool pws_en, bool az_en, bool ptp_en);
+
+
+/* reset pcie
+ * just reset pcie relative registers (pci command, clk, aspm...)
+ * return
+ *    0:success
+ *    non-0:fail
+ */
+u16 l1c_reset_pcie(struct alx_hw *hw, bool l0s_en, bool l1_en);
+
+
+/* disable/enable MAC/RXQ/TXQ
+ * en
+ *    true:enable
+ *    false:disable
+ * return
+ *    0:success
+ *    non-0-fail
+ */
+u16 l1c_enable_mac(struct alx_hw *hw, bool en, u16 en_ctrl);
+
+/* enable/disable aspm support
+ * that will change settings for phy/mac/pcie
+ */
+u16 l1c_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en, u8 lnk_stat);
+
+
+/* initialize phy for speed / flow control
+ * lnk_cap
+ *    if autoNeg, is link capability to tell the peer
+ *    if force mode, is forced speed/duplex
+ */
+u16 l1c_init_phy_spdfc(struct alx_hw *hw, bool auto_neg,
+		       u8 lnk_cap, bool fc_en);
+
+/* do post setting on phy if link up/down event occur
+ */
+u16 l1c_post_phy_link(struct alx_hw *hw, bool linkon, u8 wire_spd);
+
+
+/* do power saving setting befor enter suspend mode
+ * NOTE:
+ *    1. phy link must be established before calling this function
+ *    2. wol option (pattern,magic,link,etc.) is configed before call it.
+ */
+u16 l1c_powersaving(struct alx_hw *hw, u8 wire_spd, bool wol_en,
+		    bool mac_txen, bool mac_rxen, bool pws_en);
+
+
+/* read phy register */
+u16 l1c_read_phy(struct alx_hw *hw, bool ext, u8 dev, bool fast, u16 reg,
+		 u16 *data);
+
+/* write phy register */
+u16 l1c_write_phy(struct alx_hw *hw, bool ext, u8 dev,  bool fast, u16 reg,
+		  u16 data);
+
+/* phy debug port */
+u16 l1c_read_phydbg(struct alx_hw *hw, bool fast, u16 reg, u16 *data);
+u16 l1c_write_phydbg(struct alx_hw *hw, bool fast, u16 reg, u16 data);
+
+/* check the configuration of the PHY */
+u16 l1c_get_phy_config(struct alx_hw *hw);
+
+/* phy hib patch */
+u16 l1c_apply_phy_hib_patch(struct alx_hw *hw);
+/*
+ * initialize mac basically
+ *  most of hi-feature no init
+ *      MAC/PHY should be reset before call this function
+ */
+u16 l1c_init_mac(struct alx_hw *hw, u8 *addr, u32 txmem_hi,
+		 u32 *tx_mem_lo, u8 tx_qnum, u16 txring_sz,
+		 u32 rxmem_hi, u32 rfdmem_lo, u32 rrdmem_lo,
+		 u16 rxring_sz, u16 rxbuf_sz, u16 smb_timer,
+		 u16 mtu, u16 int_mod, bool hash_legacy);
+
+
+
+#endif/*L1C_HW_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop./alf_cb.c linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop/alf_cb.c
--- linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop./alf_cb.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop/alf_cb.c	2019-01-22 16:16:24.927259302 +0100
@@ -0,0 +1,1187 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/pci_regs.h>
+#include <linux/mii.h>
+
+#include "alf_hw.h"
+
+
+#define ALF_REV_ID_AR8161_B0            0x10
+
+/* definition for MSIX */
+#define ALF_MSIX_ENTRY_BASE		0x2000
+#define ALF_MSIX_ENTRY_SIZE		16
+#define ALF_MSIX_MSG_LOADDR_OFF		0
+#define ALF_MSIX_MSG_HIADDR_OFF		4
+#define ALF_MSIX_MSG_DATA_OFF		8
+#define ALF_MSIX_MSG_CTRL_OFF		12
+
+#define ALF_MSIX_INDEX_RXQ0		0
+#define ALF_MSIX_INDEX_RXQ1		1
+#define ALF_MSIX_INDEX_RXQ2		2
+#define ALF_MSIX_INDEX_RXQ3		3
+#define ALF_MSIX_INDEX_RXQ4		4
+#define ALF_MSIX_INDEX_RXQ5		5
+#define ALF_MSIX_INDEX_RXQ6		6
+#define ALF_MSIX_INDEX_RXQ7		7
+#define ALF_MSIX_INDEX_TXQ0		8
+#define ALF_MSIX_INDEX_TXQ1		9
+#define ALF_MSIX_INDEX_TXQ2		10
+#define ALF_MSIX_INDEX_TXQ3		11
+#define ALF_MSIX_INDEX_TIMER		12
+#define ALF_MSIX_INDEX_ALERT		13
+#define ALF_MSIX_INDEX_SMB		14
+#define ALF_MSIX_INDEX_PHY		15
+
+
+#define ALF_SRAM_BASE		L1F_SRAM0
+#define ALF_SRAM(_i, _type) \
+		(ALF_SRAM_BASE + ((_i) * sizeof(_type)))
+
+#define ALF_MIB_BASE		L1F_MIB_BASE
+#define ALF_MIB(_i, _type) \
+		(ALF_MIB_BASE + ((_i) * sizeof(_type)))
+
+/* definition for RSS */
+#define ALF_RSS_KEY_BASE	L1F_RSS_KEY0
+#define ALF_RSS_IDT_BASE	L1F_RSS_IDT_TBL0
+#define ALF_RSS_KEY(_i, _type) \
+		(ALF_RSS_KEY_BASE + ((_i) * sizeof(_type)))
+#define ALF_RSS_TBL(_i, _type) \
+		(L1F_RSS_IDT_TBL0 + ((_i) * sizeof(_type)))
+
+
+/* NIC */
+static int alf_identify_nic(struct alx_hw *hw)
+{
+	u32 drv;
+
+	if (hw->pci_revid < ALX_REV_ID_AR8161_V2_0)
+		return 0;
+
+	/* check from V2_0(b0) to ... */
+	switch (hw->pci_revid) {
+	default:
+		alx_mem_r32(hw, L1F_DRV, &drv);
+		if (drv & LX_DRV_DISABLE)
+			return -EINVAL;
+		break;
+	}
+	return 0;
+}
+
+
+/* PHY */
+static int alf_read_phy_reg(struct alx_hw *hw, u16 reg_addr, u16 *phy_data)
+{
+	unsigned long  flags;
+	int  retval = 0;
+
+	spin_lock_irqsave(&hw->mdio_lock, flags);
+
+	if (l1f_read_phy(hw, false, ALX_MDIO_DEV_TYPE_NORM, false, reg_addr,
+			 phy_data)) {
+		alx_hw_err(hw, "error when read phy reg\n");
+		retval = -EINVAL;
+	}
+
+	spin_unlock_irqrestore(&hw->mdio_lock, flags);
+	return retval;
+}
+
+
+static int alf_write_phy_reg(struct alx_hw *hw, u16 reg_addr, u16 phy_data)
+{
+	unsigned long  flags;
+	int  retval = 0;
+
+	spin_lock_irqsave(&hw->mdio_lock, flags);
+
+	if (l1f_write_phy(hw, false, ALX_MDIO_DEV_TYPE_NORM, false, reg_addr,
+			  phy_data)) {
+		alx_hw_err(hw, "error when write phy reg\n");
+		retval = -EINVAL;
+	}
+
+	spin_unlock_irqrestore(&hw->mdio_lock, flags);
+	return retval;
+}
+
+
+static int alf_init_phy(struct alx_hw *hw)
+{
+	u16 phy_id[2];
+	int retval;
+
+	spin_lock_init(&hw->mdio_lock);
+
+	retval = alf_read_phy_reg(hw, MII_PHYSID1, &phy_id[0]);
+	if (retval)
+		return retval;
+	retval = alf_read_phy_reg(hw, MII_PHYSID2, &phy_id[1]);
+	if (retval)
+		return retval;
+	memcpy(&hw->phy_id, phy_id, sizeof(hw->phy_id));
+
+	hw->autoneg_advertised = ALX_LINK_SPEED_1GB_FULL |
+				 ALX_LINK_SPEED_10_HALF  |
+				 ALX_LINK_SPEED_10_FULL  |
+				 ALX_LINK_SPEED_100_HALF |
+				 ALX_LINK_SPEED_100_FULL;
+	return retval;
+}
+
+
+static int alf_reset_phy(struct alx_hw *hw)
+{
+	int retval = 0;
+	bool pws_en, az_en, ptp_en;
+
+	pws_en = az_en = ptp_en = false;
+	CLI_HW_FLAG(PWSAVE_EN);
+	CLI_HW_FLAG(AZ_EN);
+	CLI_HW_FLAG(PTP_EN);
+
+	if (CHK_HW_FLAG(PWSAVE_CAP)) {
+		pws_en = true;
+		SET_HW_FLAG(PWSAVE_EN);
+	}
+
+	if (CHK_HW_FLAG(AZ_CAP)) {
+		az_en = true;
+		SET_HW_FLAG(AZ_EN);
+	}
+
+	if (CHK_HW_FLAG(PTP_CAP)) {
+		ptp_en = true;
+		SET_HW_FLAG(PTP_EN);
+	}
+
+	alx_hw_info(hw, "reset PHY, pws = %d, az = %d, ptp = %d\n",
+		    pws_en, az_en, ptp_en);
+	if (l1f_reset_phy(hw, pws_en, az_en, ptp_en)) {
+		alx_hw_err(hw, "error when reset phy\n");
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+
+/* LINK */
+static int alf_setup_phy_link(struct alx_hw *hw, u32 speed, bool autoneg,
+			      bool fc)
+{
+	u8 link_cap = 0;
+	int retval = 0;
+
+	alx_hw_info(hw, "speed = 0x%x, autoneg = %d\n", speed, autoneg);
+	if (speed & ALX_LINK_SPEED_1GB_FULL)
+		link_cap |= LX_LC_1000F;
+
+	if (speed & ALX_LINK_SPEED_100_FULL)
+		link_cap |= LX_LC_100F;
+
+	if (speed & ALX_LINK_SPEED_100_HALF)
+		link_cap |= LX_LC_100H;
+
+	if (speed & ALX_LINK_SPEED_10_FULL)
+		link_cap |= LX_LC_10F;
+
+	if (speed & ALX_LINK_SPEED_10_HALF)
+		link_cap |= LX_LC_10H;
+
+	if (l1f_init_phy_spdfc(hw, autoneg, link_cap, fc)) {
+		alx_hw_err(hw, "error when init phy speed and fc\n");
+		retval = -EINVAL;
+	}
+
+	return retval;
+}
+
+
+static int alf_setup_phy_link_speed(struct alx_hw *hw, u32 speed,
+				    bool autoneg, bool fc)
+{
+	/*
+	 * Clear autoneg_advertised and set new values based on input link
+	 * speed.
+	 */
+	hw->autoneg_advertised = 0;
+
+	if (speed & ALX_LINK_SPEED_1GB_FULL)
+		hw->autoneg_advertised |= ALX_LINK_SPEED_1GB_FULL;
+
+	if (speed & ALX_LINK_SPEED_100_FULL)
+		hw->autoneg_advertised |= ALX_LINK_SPEED_100_FULL;
+
+	if (speed & ALX_LINK_SPEED_100_HALF)
+		hw->autoneg_advertised |= ALX_LINK_SPEED_100_HALF;
+
+	if (speed & ALX_LINK_SPEED_10_FULL)
+		hw->autoneg_advertised |= ALX_LINK_SPEED_10_FULL;
+
+	if (speed & ALX_LINK_SPEED_10_HALF)
+		hw->autoneg_advertised |= ALX_LINK_SPEED_10_HALF;
+
+	return alf_setup_phy_link(hw, hw->autoneg_advertised,
+				  autoneg, fc);
+}
+
+
+static int alf_check_phy_link(struct alx_hw *hw, u32 *speed, bool *link_up)
+{
+	u16 bmsr, giga;
+	int retval;
+
+	alf_read_phy_reg(hw, MII_BMSR, &bmsr);
+	retval = alf_read_phy_reg(hw, MII_BMSR, &bmsr);
+	if (retval)
+		return retval;
+
+	if (!(bmsr & BMSR_LSTATUS)) {
+		*link_up = false;
+		*speed = ALX_LINK_SPEED_UNKNOWN;
+		return 0;
+	}
+	*link_up = true;
+
+	/* Read PHY Specific Status Register (17) */
+	retval = alf_read_phy_reg(hw, L1F_MII_GIGA_PSSR, &giga);
+	if (retval)
+		return retval;
+
+
+	if (!(giga & L1F_GIGA_PSSR_SPD_DPLX_RESOLVED)) {
+		alx_hw_err(hw, "error for speed duplex resolved\n");
+		return -EINVAL;
+	}
+
+	switch (giga & L1F_GIGA_PSSR_SPEED) {
+	case L1F_GIGA_PSSR_1000MBS:
+		if (giga & L1F_GIGA_PSSR_DPLX)
+			*speed = ALX_LINK_SPEED_1GB_FULL;
+		else
+			alx_hw_err(hw, "1000M half is invalid");
+		break;
+	case L1F_GIGA_PSSR_100MBS:
+		if (giga & L1F_GIGA_PSSR_DPLX)
+			*speed = ALX_LINK_SPEED_100_FULL;
+		else
+			*speed = ALX_LINK_SPEED_100_HALF;
+		break;
+	case L1F_GIGA_PSSR_10MBS:
+		if (giga & L1F_GIGA_PSSR_DPLX)
+			*speed = ALX_LINK_SPEED_10_FULL;
+		else
+			*speed = ALX_LINK_SPEED_10_HALF;
+		break;
+	default:
+		*speed = ALX_LINK_SPEED_UNKNOWN;
+		retval = -EINVAL;
+		break;
+	}
+	return retval;
+}
+
+
+/*
+ * 1. stop_mac
+ * 2. reset mac & dma by reg1400(MASTER)
+ * 3. control speed/duplex, hash-alg
+ * 4. clock switch setting
+ */
+static int alf_reset_mac(struct alx_hw *hw)
+{
+	int retval = 0;
+
+	if (l1f_reset_mac(hw)) {
+		alx_hw_err(hw, "error when reset mac\n");
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+
+static int alf_start_mac(struct alx_hw *hw)
+{
+	u16 en_ctrl = 0;
+	int retval = 0;
+
+	/* set link speed param */
+	switch (hw->link_speed) {
+	case ALX_LINK_SPEED_1GB_FULL:
+		en_ctrl |= LX_MACSPEED_1000;
+		/* fall through */
+	case ALX_LINK_SPEED_100_FULL:
+	case ALX_LINK_SPEED_10_FULL:
+		en_ctrl |= LX_MACDUPLEX_FULL;
+		break;
+	}
+
+	/* set fc param*/
+	switch (hw->cur_fc_mode) {
+	case alx_fc_full:
+		en_ctrl |= LX_FC_RXEN; /* Flow Control RX Enable */
+		en_ctrl |= LX_FC_TXEN; /* Flow Control TX Enable */
+		break;
+	case alx_fc_rx_pause:
+		en_ctrl |= LX_FC_RXEN; /* Flow Control RX Enable */
+		break;
+	case alx_fc_tx_pause:
+		en_ctrl |= LX_FC_TXEN; /* Flow Control TX Enable */
+		break;
+	default:
+		break;
+	}
+
+	if (hw->fc_single_pause)
+		en_ctrl |= LX_SINGLE_PAUSE;
+
+	en_ctrl |= LX_FLT_DIRECT;    /* RX Enable; and TX Always Enable */
+	en_ctrl |= LX_FLT_BROADCAST; /* RX Broadcast Enable */
+	en_ctrl |= LX_ADD_FCS;
+
+	if (CHK_HW_FLAG(VLANSTRIP_EN))
+		en_ctrl |= LX_VLAN_STRIP;
+
+	if (CHK_HW_FLAG(PROMISC_EN))
+		en_ctrl |=  LX_FLT_PROMISC;
+
+	if (CHK_HW_FLAG(MULTIALL_EN))
+		en_ctrl |= LX_FLT_MULTI_ALL;
+
+	if (CHK_HW_FLAG(LOOPBACK_EN))
+		en_ctrl |= LX_LOOPBACK;
+
+	if (l1f_enable_mac(hw, true, en_ctrl)) {
+		alx_hw_err(hw, "error when start mac\n");
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+
+/*
+ * 1. stop RXQ (reg15A0) and TXQ (reg1590)
+ * 2. stop MAC (reg1480)
+ */
+static int alf_stop_mac(struct alx_hw *hw)
+{
+	int retval = 0;
+
+	if (l1f_enable_mac(hw, false, 0)) {
+		alx_hw_err(hw, "error when stop mac\n");
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+
+static int alf_config_mac(struct alx_hw *hw, u16 rxbuf_sz, u16 rx_qnum,
+			  u16 rxring_sz, u16 tx_qnum,  u16 txring_sz)
+{
+	u8 *addr;
+	u32 txmem_hi, txmem_lo[4];
+	u32 rxmem_hi, rfdmem_lo, rrdmem_lo;
+	u16 smb_timer, mtu_with_eth, int_mod;
+	bool hash_legacy;
+	int i;
+	int retval = 0;
+
+	addr = hw->mac_addr;
+
+	txmem_hi = ALX_DMA_ADDR_HI(hw->tpdma[0]);
+	for (i = 0; i < tx_qnum; i++)
+		txmem_lo[i] = ALX_DMA_ADDR_LO(hw->tpdma[i]);
+
+
+	rxmem_hi  = ALX_DMA_ADDR_HI(hw->rfdma[0]);
+	rfdmem_lo = ALX_DMA_ADDR_LO(hw->rfdma[0]);
+	rrdmem_lo = ALX_DMA_ADDR_LO(hw->rrdma[0]);
+
+	smb_timer = (u16)hw->smb_timer;
+	mtu_with_eth = hw->mtu + ALX_ETH_LENGTH_OF_HEADER;
+	int_mod = hw->imt;
+
+	hash_legacy = true;
+
+	if (l1f_init_mac(hw, addr, txmem_hi, txmem_lo, tx_qnum, txring_sz,
+			 rxmem_hi, rfdmem_lo, rrdmem_lo, rxring_sz, rxbuf_sz,
+			 smb_timer, mtu_with_eth, int_mod, hash_legacy)) {
+		alx_hw_err(hw, "error when config mac\n");
+		retval = -EINVAL;
+	}
+
+	return retval;
+}
+
+
+/**
+ *  alf_get_mac_addr
+ *  @hw: pointer to hardware structure
+ **/
+static int alf_get_mac_addr(struct alx_hw *hw, u8 *addr)
+{
+	int retval = 0;
+
+	if (l1f_get_perm_macaddr(hw, addr)) {
+		alx_hw_err(hw, "error when get permanent mac address\n");
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+
+static int alf_reset_pcie(struct alx_hw *hw, bool l0s_en, bool l1_en)
+{
+	int retval = 0;
+
+	if (!CHK_HW_FLAG(L0S_CAP))
+		l0s_en = false;
+
+	if (l0s_en)
+		SET_HW_FLAG(L0S_EN);
+	else
+		CLI_HW_FLAG(L0S_EN);
+
+
+	if (!CHK_HW_FLAG(L1_CAP))
+		l1_en = false;
+
+	if (l1_en)
+		SET_HW_FLAG(L1_EN);
+	else
+		CLI_HW_FLAG(L1_EN);
+
+	if (l1f_reset_pcie(hw, l0s_en, l1_en)) {
+		alx_hw_err(hw, "error when reset pcie\n");
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+
+static int alf_config_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en)
+{
+	int retval = 0;
+
+	if (!CHK_HW_FLAG(L0S_CAP))
+		l0s_en = false;
+
+	if (l0s_en)
+		SET_HW_FLAG(L0S_EN);
+	 else
+		CLI_HW_FLAG(L0S_EN);
+
+	if (!CHK_HW_FLAG(L1_CAP))
+		l1_en = false;
+
+	if (l1_en)
+		SET_HW_FLAG(L1_EN);
+	else
+		CLI_HW_FLAG(L1_EN);
+
+	if (l1f_enable_aspm(hw, l0s_en, l1_en, 0)) {
+		alx_hw_err(hw, "error when enable aspm\n");
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+
+static int alf_config_wol(struct alx_hw *hw, u32 wufc)
+{
+	u32 wol;
+	int retval = 0;
+
+	wol = 0;
+	/* turn on magic packet event */
+	if (wufc & ALX_WOL_MAGIC) {
+		wol |= L1F_WOL0_MAGIC_EN | L1F_WOL0_PME_MAGIC_EN;
+		/* magic packet maybe Broadcast&multicast&Unicast frame */
+		/* mac |= MAC_CTRL_BC_EN; */
+	}
+
+	/* turn on link up event */
+	if (wufc & ALX_WOL_PHY) {
+		wol |=  L1F_WOL0_LINK_EN | L1F_WOL0_PME_LINK;
+		/* only link up can wake up */
+		retval = alf_write_phy_reg(hw, L1F_MII_IER, L1F_IER_LINK_UP);
+	}
+	alx_mem_w32(hw, L1F_WOL0, wol);
+	return retval;
+}
+
+
+static int alf_config_mac_ctrl(struct alx_hw *hw)
+{
+	u32 mac;
+
+	alx_mem_r32(hw, L1F_MAC_CTRL, &mac);
+
+	/* enable/disable VLAN tag insert,strip */
+	if (CHK_HW_FLAG(VLANSTRIP_EN))
+		mac |= L1F_MAC_CTRL_VLANSTRIP;
+	else
+		mac &= ~L1F_MAC_CTRL_VLANSTRIP;
+
+	if (CHK_HW_FLAG(PROMISC_EN))
+		mac |= L1F_MAC_CTRL_PROMISC_EN;
+	else
+		mac &= ~L1F_MAC_CTRL_PROMISC_EN;
+
+	if (CHK_HW_FLAG(MULTIALL_EN))
+		mac |= L1F_MAC_CTRL_MULTIALL_EN;
+	else
+		mac &= ~L1F_MAC_CTRL_MULTIALL_EN;
+
+	if (CHK_HW_FLAG(LOOPBACK_EN))
+		mac |= L1F_MAC_CTRL_LPBACK_EN;
+	else
+		mac &= ~L1F_MAC_CTRL_LPBACK_EN;
+
+	alx_mem_w32(hw, L1F_MAC_CTRL, mac);
+	return 0;
+}
+
+
+static int alf_config_pow_save(struct alx_hw *hw, u32 speed, bool wol_en,
+			       bool tx_en, bool rx_en, bool pws_en)
+{
+	u8 wire_spd = LX_LC_10H;
+	int retval = 0;
+
+	switch (speed) {
+	case ALX_LINK_SPEED_UNKNOWN:
+	case ALX_LINK_SPEED_10_HALF:
+		wire_spd = LX_LC_10H;
+		break;
+	case ALX_LINK_SPEED_10_FULL:
+		wire_spd = LX_LC_10F;
+		break;
+	case ALX_LINK_SPEED_100_HALF:
+		wire_spd = LX_LC_100H;
+		break;
+	case ALX_LINK_SPEED_100_FULL:
+		wire_spd = LX_LC_100F;
+		break;
+	case ALX_LINK_SPEED_1GB_FULL:
+		wire_spd = LX_LC_1000F;
+		break;
+	}
+
+	if (l1f_powersaving(hw, wire_spd, wol_en, tx_en, rx_en, pws_en)) {
+		alx_hw_err(hw, "error when set power saving\n");
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+
+/* RAR, Multicast, VLAN */
+static int alf_set_mac_addr(struct alx_hw *hw, u8 *addr)
+{
+	u32 sta;
+
+	/*
+	 * for example: 00-0B-6A-F6-00-DC
+	 * 0<-->6AF600DC, 1<-->000B.
+	 */
+
+	/* low dword */
+	sta = (((u32)addr[2]) << 24) | (((u32)addr[3]) << 16) |
+	      (((u32)addr[4]) << 8)  | (((u32)addr[5])) ;
+	alx_mem_w32(hw, L1F_STAD0, sta);
+
+	/* hight dword */
+	sta = (((u32)addr[0]) << 8) | (((u32)addr[1])) ;
+	alx_mem_w32(hw, L1F_STAD1, sta);
+	return 0;
+}
+
+
+static int alf_set_mc_addr(struct alx_hw *hw, u8 *addr)
+{
+	u32 crc32, bit, reg, mta;
+
+	/*
+	 * set hash value for a multicast address hash calcu processing.
+	 *   1. calcu 32bit CRC for multicast address
+	 *   2. reverse crc with MSB to LSB
+	 */
+	crc32 = ALX_ETH_CRC(addr, ALX_ETH_LENGTH_OF_ADDRESS);
+
+	/*
+	 * The HASH Table  is a register array of 2 32-bit registers.
+	 * It is treated like an array of 64 bits.  We want to set
+	 * bit BitArray[hash_value]. So we figure out what register
+	 * the bit is in, read it, OR in the new bit, then write
+	 * back the new value.  The register is determined by the
+	 * upper 7 bits of the hash value and the bit within that
+	 * register are determined by the lower 5 bits of the value.
+	 */
+	reg = (crc32 >> 31) & 0x1;
+	bit = (crc32 >> 26) & 0x1F;
+
+	alx_mem_r32(hw, L1F_HASH_TBL0 + (reg<<2), &mta);
+	mta |= (0x1 << bit);
+	alx_mem_w32(hw, L1F_HASH_TBL0 + (reg<<2), mta);
+	return 0;
+}
+
+
+static int alf_clear_mc_addr(struct alx_hw *hw)
+{
+	alx_mem_w32(hw, L1F_HASH_TBL0, 0);
+	alx_mem_w32(hw, L1F_HASH_TBL1, 0);
+	return 0;
+}
+
+
+/* RTX, IRQ */
+static int alf_config_tx(struct alx_hw *hw)
+{
+	u32 wrr;
+
+	alx_mem_r32(hw, L1F_WRR, &wrr);
+	switch (hw->wrr_mode) {
+	case alx_wrr_mode_none:
+		FIELD_SETL(wrr, L1F_WRR_PRI, L1F_WRR_PRI_RESTRICT_NONE);
+		break;
+	case alx_wrr_mode_high:
+		FIELD_SETL(wrr, L1F_WRR_PRI, L1F_WRR_PRI_RESTRICT_HI);
+		break;
+	case alx_wrr_mode_high2:
+		FIELD_SETL(wrr, L1F_WRR_PRI, L1F_WRR_PRI_RESTRICT_HI2);
+		break;
+	case alx_wrr_mode_all:
+		FIELD_SETL(wrr, L1F_WRR_PRI, L1F_WRR_PRI_RESTRICT_ALL);
+		break;
+	}
+	FIELD_SETL(wrr, L1F_WRR_PRI0, hw->wrr_prio0);
+	FIELD_SETL(wrr, L1F_WRR_PRI1, hw->wrr_prio1);
+	FIELD_SETL(wrr, L1F_WRR_PRI2, hw->wrr_prio2);
+	FIELD_SETL(wrr, L1F_WRR_PRI3, hw->wrr_prio3);
+	alx_mem_w32(hw, L1F_WRR, wrr);
+	return 0;
+}
+
+
+static int alf_config_msix(struct alx_hw *hw, u16 num_intrs,
+			   bool msix_en, bool msi_en)
+{
+	u32 map[2];
+	u32 type;
+	int msix_idx;
+
+	if (!msix_en)
+		goto configure_legacy;
+
+	memset(map, 0, sizeof(map));
+	for (msix_idx = 0; msix_idx < num_intrs; msix_idx++) {
+		switch (msix_idx) {
+		case ALF_MSIX_INDEX_RXQ0:
+			FIELD_SETL(map[0], L1F_MSI_MAP_TBL1_RXQ0,
+				   ALF_MSIX_INDEX_RXQ0);
+			break;
+		case ALF_MSIX_INDEX_RXQ1:
+			FIELD_SETL(map[0], L1F_MSI_MAP_TBL1_RXQ1,
+				   ALF_MSIX_INDEX_RXQ1);
+			break;
+		case ALF_MSIX_INDEX_RXQ2:
+			FIELD_SETL(map[0], L1F_MSI_MAP_TBL1_RXQ2,
+				   ALF_MSIX_INDEX_RXQ2);
+			break;
+		case ALF_MSIX_INDEX_RXQ3:
+			FIELD_SETL(map[0], L1F_MSI_MAP_TBL1_RXQ3,
+				   ALF_MSIX_INDEX_RXQ3);
+			break;
+		case ALF_MSIX_INDEX_RXQ4:
+			FIELD_SETL(map[1], L1F_MSI_MAP_TBL2_RXQ4,
+				   ALF_MSIX_INDEX_RXQ4);
+			break;
+		case ALF_MSIX_INDEX_RXQ5:
+			FIELD_SETL(map[1], L1F_MSI_MAP_TBL2_RXQ5,
+				   ALF_MSIX_INDEX_RXQ5);
+			break;
+		case ALF_MSIX_INDEX_RXQ6:
+			FIELD_SETL(map[1], L1F_MSI_MAP_TBL2_RXQ6,
+				   ALF_MSIX_INDEX_RXQ6);
+			break;
+		case ALF_MSIX_INDEX_RXQ7:
+			FIELD_SETL(map[1], L1F_MSI_MAP_TBL2_RXQ7,
+				   ALF_MSIX_INDEX_RXQ7);
+			break;
+		case ALF_MSIX_INDEX_TXQ0:
+			FIELD_SETL(map[0], L1F_MSI_MAP_TBL1_TXQ0,
+				   ALF_MSIX_INDEX_TXQ0);
+			break;
+		case ALF_MSIX_INDEX_TXQ1:
+			FIELD_SETL(map[0], L1F_MSI_MAP_TBL1_TXQ1,
+				   ALF_MSIX_INDEX_TXQ1);
+			break;
+		case ALF_MSIX_INDEX_TXQ2:
+			FIELD_SETL(map[1], L1F_MSI_MAP_TBL2_TXQ2,
+				   ALF_MSIX_INDEX_TXQ2);
+			break;
+		case ALF_MSIX_INDEX_TXQ3:
+			FIELD_SETL(map[1], L1F_MSI_MAP_TBL2_TXQ3,
+				   ALF_MSIX_INDEX_TXQ3);
+			break;
+		case ALF_MSIX_INDEX_TIMER:
+			FIELD_SETL(map[0], L1F_MSI_MAP_TBL1_TIMER,
+				   ALF_MSIX_INDEX_TIMER);
+			break;
+		case ALF_MSIX_INDEX_ALERT:
+			FIELD_SETL(map[0], L1F_MSI_MAP_TBL1_ALERT,
+				   ALF_MSIX_INDEX_ALERT);
+			break;
+		case ALF_MSIX_INDEX_SMB:
+			FIELD_SETL(map[1], L1F_MSI_MAP_TBL2_SMB,
+				   ALF_MSIX_INDEX_SMB);
+			break;
+		case ALF_MSIX_INDEX_PHY:
+			FIELD_SETL(map[1], L1F_MSI_MAP_TBL2_PHY,
+				   ALF_MSIX_INDEX_PHY);
+			break;
+		default:
+			break;
+
+		}
+
+	}
+
+	alx_mem_w32(hw, L1F_MSI_MAP_TBL1, map[0]);
+	alx_mem_w32(hw, L1F_MSI_MAP_TBL2, map[1]);
+
+	/* 0 to alert, 1 to timer */
+	type = (L1F_MSI_ID_MAP_DMAW |
+		L1F_MSI_ID_MAP_DMAR |
+		L1F_MSI_ID_MAP_PCIELNKDW |
+		L1F_MSI_ID_MAP_PCIECERR |
+		L1F_MSI_ID_MAP_PCIENFERR |
+		L1F_MSI_ID_MAP_PCIEFERR |
+		L1F_MSI_ID_MAP_PCIEUR);
+
+	alx_mem_w32(hw, L1F_MSI_ID_MAP, type);
+	return 0;
+
+configure_legacy:
+	alx_mem_w32(hw, L1F_MSI_MAP_TBL1, 0x0);
+	alx_mem_w32(hw, L1F_MSI_MAP_TBL2, 0x0);
+	alx_mem_w32(hw, L1F_MSI_ID_MAP, 0x0);
+	if (msi_en) {
+		u32 msi;
+		alx_mem_r32(hw, 0x1920, &msi);
+		msi |= 0x10000;
+		alx_mem_w32(hw, 0x1920, msi);
+	}
+	return 0;
+}
+
+
+/*
+ * Interrupt
+ */
+static int alf_ack_phy_intr(struct alx_hw *hw)
+{
+	u16 isr;
+	return alf_read_phy_reg(hw, L1F_MII_ISR, &isr);
+}
+
+
+static int alf_enable_legacy_intr(struct alx_hw *hw)
+{
+	u16 cmd;
+
+	alx_cfg_r16(hw, PCI_COMMAND, &cmd);
+	cmd &= ~PCI_COMMAND_INTX_DISABLE;
+	alx_cfg_w16(hw, PCI_COMMAND, cmd);
+
+	alx_mem_w32(hw, L1F_ISR, ~((u32) L1F_ISR_DIS));
+	alx_mem_w32(hw, L1F_IMR, hw->intr_mask);
+	return 0;
+}
+
+
+static int alf_disable_legacy_intr(struct alx_hw *hw)
+{
+	alx_mem_w32(hw, L1F_ISR, L1F_ISR_DIS);
+	alx_mem_w32(hw, L1F_IMR, 0);
+	alx_mem_flush(hw);
+	return 0;
+}
+
+
+static int alf_enable_msix_intr(struct alx_hw *hw, u8 entry_idx)
+{
+	u32 ctrl_reg;
+
+	ctrl_reg = ALF_MSIX_ENTRY_BASE + (entry_idx * ALF_MSIX_ENTRY_SIZE) +
+		   ALF_MSIX_MSG_CTRL_OFF;
+
+	alx_mem_w32(hw, ctrl_reg, 0x0);
+	alx_mem_flush(hw);
+	return 0;
+}
+
+
+static int alf_disable_msix_intr(struct alx_hw *hw, u8 entry_idx)
+{
+	u32 ctrl_reg;
+
+	ctrl_reg = ALF_MSIX_ENTRY_BASE + (entry_idx * ALF_MSIX_ENTRY_SIZE) +
+		   ALF_MSIX_MSG_CTRL_OFF;
+
+	alx_mem_w32(hw, ctrl_reg, 0x1);
+	alx_mem_flush(hw);
+	return 0;
+}
+
+
+/* RSS */
+static int alf_config_rss(struct alx_hw *hw, bool rss_en)
+{
+	int key_len_by_u8 = sizeof(hw->rss_key);
+	int idt_len_by_u32 = sizeof(hw->rss_idt) / sizeof(u32);
+	u32 rxq0;
+	int i;
+
+	/* Fill out hash function keys */
+	for (i = 0; i < key_len_by_u8; i++) {
+		alx_mem_w8(hw, ALF_RSS_KEY(i, u8),
+			   hw->rss_key[key_len_by_u8 - i - 1]);
+	}
+
+	/* Fill out redirection table */
+	for (i = 0; i < idt_len_by_u32; i++)
+		alx_mem_w32(hw, ALF_RSS_TBL(i, u32), hw->rss_idt[i]);
+
+	alx_mem_w32(hw, L1F_RSS_BASE_CPU_NUM, hw->rss_base_cpu);
+
+	alx_mem_r32(hw, L1F_RXQ0, &rxq0);
+	if (hw->rss_hstype & ALX_RSS_HSTYP_IPV4_EN)
+		rxq0 |=  L1F_RXQ0_RSS_HSTYP_IPV4_EN;
+	else
+		rxq0 &=  ~L1F_RXQ0_RSS_HSTYP_IPV4_EN;
+
+	if (hw->rss_hstype & ALX_RSS_HSTYP_TCP4_EN)
+		rxq0 |=  L1F_RXQ0_RSS_HSTYP_IPV4_TCP_EN;
+	else
+		rxq0 &=  ~L1F_RXQ0_RSS_HSTYP_IPV4_TCP_EN;
+
+	if (hw->rss_hstype & ALX_RSS_HSTYP_IPV6_EN)
+		rxq0 |=  L1F_RXQ0_RSS_HSTYP_IPV6_EN;
+	else
+		rxq0 &=  ~L1F_RXQ0_RSS_HSTYP_IPV6_EN;
+
+	if (hw->rss_hstype & ALX_RSS_HSTYP_TCP6_EN)
+		rxq0 |=  L1F_RXQ0_RSS_HSTYP_IPV6_TCP_EN;
+	else
+		rxq0 &=  ~L1F_RXQ0_RSS_HSTYP_IPV6_TCP_EN;
+
+	FIELD_SETL(rxq0, L1F_RXQ0_RSS_MODE, hw->rss_mode);
+	FIELD_SETL(rxq0, L1F_RXQ0_IDT_TBL_SIZE, hw->rss_idt_size);
+
+	if (rss_en)
+		rxq0 |= L1F_RXQ0_RSS_HASH_EN;
+	else
+		rxq0 &= ~L1F_RXQ0_RSS_HASH_EN;
+
+	alx_mem_w32(hw, L1F_RXQ0, rxq0);
+	return 0;
+}
+
+
+/* fc */
+static int alf_get_fc_mode(struct alx_hw *hw, enum alx_fc_mode *mode)
+{
+	u16 bmsr, giga;
+	int i;
+	int retval = 0;
+
+	for (i = 0; i < ALX_MAX_SETUP_LNK_CYCLE; i++) {
+		alf_read_phy_reg(hw, MII_BMSR, &bmsr);
+		alf_read_phy_reg(hw, MII_BMSR, &bmsr);
+		if (bmsr & BMSR_LSTATUS) {
+			/* Read phy Specific Status Register (17) */
+			retval = alf_read_phy_reg(hw, L1F_MII_GIGA_PSSR, &giga);
+			if (retval)
+				return retval;
+
+			if (!(giga & L1F_GIGA_PSSR_SPD_DPLX_RESOLVED)) {
+				alx_hw_err(hw,
+					"error for speed duplex resolved\n");
+				return -EINVAL;
+			}
+
+			if ((giga & L1F_GIGA_PSSR_FC_TXEN) &&
+			    (giga & L1F_GIGA_PSSR_FC_RXEN)) {
+				*mode = alx_fc_full;
+			} else if (giga & L1F_GIGA_PSSR_FC_TXEN) {
+				*mode = alx_fc_tx_pause;
+			} else if (giga & L1F_GIGA_PSSR_FC_RXEN) {
+				*mode = alx_fc_rx_pause;
+			} else {
+				*mode = alx_fc_none;
+			}
+			break;
+		}
+		mdelay(100);
+	}
+
+	if (i == ALX_MAX_SETUP_LNK_CYCLE) {
+		alx_hw_err(hw, "error when get flow control mode\n");
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+
+static int alf_config_fc(struct alx_hw *hw)
+{
+	u32 mac;
+	int retval = 0;
+
+	if (hw->disable_fc_autoneg) {
+		hw->fc_was_autonegged = false;
+		hw->cur_fc_mode = hw->req_fc_mode;
+	} else {
+		hw->fc_was_autonegged = true;
+		retval = alf_get_fc_mode(hw, &hw->cur_fc_mode);
+		if (retval)
+			return retval;
+	}
+
+	alx_mem_r32(hw, L1F_MAC_CTRL, &mac);
+
+	switch (hw->cur_fc_mode) {
+	case alx_fc_none: /* 0 */
+		mac &= ~(L1F_MAC_CTRL_RXFC_EN | L1F_MAC_CTRL_TXFC_EN);
+		break;
+	case alx_fc_rx_pause: /* 1 */
+		mac &= ~L1F_MAC_CTRL_TXFC_EN;
+		mac |= L1F_MAC_CTRL_RXFC_EN;
+		break;
+	case alx_fc_tx_pause: /* 2 */
+		mac |= L1F_MAC_CTRL_TXFC_EN;
+		mac &= ~L1F_MAC_CTRL_RXFC_EN;
+		break;
+	case alx_fc_full: /* 3 */
+	case alx_fc_default: /* 4 */
+		mac |= (L1F_MAC_CTRL_TXFC_EN | L1F_MAC_CTRL_RXFC_EN);
+		break;
+	default:
+		alx_hw_err(hw, "flow control param set incorrectly\n");
+		return -EINVAL;
+		break;
+	}
+
+	alx_mem_w32(hw, L1F_MAC_CTRL, mac);
+
+	return retval;
+}
+
+
+/*
+ * NVRam
+ */
+static int alf_check_nvram(struct alx_hw *hw, bool *exist)
+{
+	*exist = false;
+	return 0;
+}
+
+
+/* ethtool */
+static int alf_get_ethtool_regs(struct alx_hw *hw, void *buff)
+{
+	int i;
+	u32 *val = buff;
+	static const u32 reg[] = {
+		/* 0 */
+		L1F_DEV_CAP, L1F_DEV_CTRL, L1F_LNK_CAP, L1F_LNK_CTRL,
+		L1F_UE_SVRT, L1F_EFLD, L1F_SLD, L1F_PPHY_MISC1,
+		L1F_PPHY_MISC2, L1F_PDLL_TRNS1,
+
+		/* 10 */
+		L1F_TLEXTN_STATS, L1F_EFUSE_CTRL, L1F_EFUSE_DATA, L1F_SPI_OP1,
+		L1F_SPI_OP2, L1F_SPI_OP3, L1F_EF_CTRL, L1F_EF_ADDR,
+		L1F_EF_DATA, L1F_SPI_ID,
+
+		/* 20 */
+		L1F_SPI_CFG_START, L1F_PMCTRL, L1F_LTSSM_CTRL, L1F_MASTER,
+		L1F_MANU_TIMER, L1F_IRQ_MODU_TIMER, L1F_PHY_CTRL, L1F_MAC_STS,
+		L1F_MDIO, L1F_MDIO_EXTN,
+
+		/* 30 */
+		L1F_PHY_STS, L1F_BIST0, L1F_BIST1, L1F_SERDES,
+		L1F_LED_CTRL, L1F_LED_PATN, L1F_LED_PATN2, L1F_SYSALV,
+		L1F_PCIERR_INST, L1F_LPI_DECISN_TIMER,
+
+		/* 40 */
+		L1F_LPI_CTRL, L1F_LPI_WAIT, L1F_HRTBT_VLAN, L1F_HRTBT_CTRL,
+		L1F_RXPARSE, L1F_MAC_CTRL, L1F_GAP, L1F_STAD1,
+		L1F_LED_CTRL, L1F_HASH_TBL0,
+
+		/* 50 */
+		L1F_HASH_TBL1, L1F_HALFD, L1F_DMA, L1F_WOL0,
+		L1F_WOL1, L1F_WOL2, L1F_WRR, L1F_HQTPD,
+		L1F_CPUMAP1, L1F_CPUMAP2,
+
+		/* 60 */
+		L1F_MISC, L1F_RX_BASE_ADDR_HI, L1F_RFD_ADDR_LO, L1F_RFD_RING_SZ,
+		L1F_RFD_BUF_SZ, L1F_RRD_ADDR_LO, L1F_RRD_RING_SZ,
+		L1F_RFD_PIDX, L1F_RFD_CIDX, L1F_RXQ0,
+
+		/* 70 */
+		L1F_RXQ1, L1F_RXQ2, L1F_RXQ3, L1F_TX_BASE_ADDR_HI,
+		L1F_TPD_PRI0_ADDR_LO, L1F_TPD_PRI1_ADDR_LO,
+		L1F_TPD_PRI2_ADDR_LO, L1F_TPD_PRI3_ADDR_LO,
+		L1F_TPD_PRI0_PIDX, L1F_TPD_PRI1_PIDX,
+
+		/* 80 */
+		L1F_TPD_PRI2_PIDX, L1F_TPD_PRI3_PIDX, L1F_TPD_PRI0_CIDX,
+		L1F_TPD_PRI1_CIDX, L1F_TPD_PRI2_CIDX, L1F_TPD_PRI3_CIDX,
+		L1F_TPD_RING_SZ, L1F_TXQ0, L1F_TXQ1, L1F_TXQ2,
+
+		/* 90 */
+		L1F_MSI_MAP_TBL1, L1F_MSI_MAP_TBL2, L1F_MSI_ID_MAP,
+		L1F_MSIX_MASK, L1F_MSIX_PENDING,
+	};
+
+	for (i = 0; i < ARRAY_SIZE(reg); i++)
+		alx_mem_r32(hw, reg[i], &val[i]);
+
+	/* SRAM */
+	for (i = 0; i < 16; i++)
+		alx_mem_r32(hw, ALF_SRAM(i, u32), &val[100 + i]);
+
+	/* RSS */
+	for (i = 0; i < 10; i++)
+		alx_mem_r32(hw, ALF_RSS_KEY(i, u32), &val[120 + i]);
+	for (i = 0; i < 32; i++)
+		alx_mem_r32(hw, ALF_RSS_TBL(i, u32), &val[130 + i]);
+	alx_mem_r32(hw, L1F_RSS_HASH_VAL,     &val[162]);
+	alx_mem_r32(hw, L1F_RSS_HASH_FLAG,    &val[163]);
+	alx_mem_r32(hw, L1F_RSS_BASE_CPU_NUM, &val[164]);
+
+	/* MIB */
+	for (i = 0; i < 48; i++)
+		alx_mem_r32(hw, ALF_MIB(i, u32), &val[170 + i]);
+	return 0;
+}
+
+
+/******************************************************************************/
+static int alf_set_hw_capabilities(struct alx_hw *hw)
+{
+	SET_HW_FLAG(L0S_CAP);
+	SET_HW_FLAG(L1_CAP);
+
+	if (hw->mac_type == alx_mac_l1f)
+		SET_HW_FLAG(GIGA_CAP);
+
+	/* set flags of alx_phy_info */
+	SET_HW_FLAG(PWSAVE_CAP);
+	return 0;
+}
+
+
+/* alc_set_hw_info */
+static int alf_set_hw_infos(struct alx_hw *hw)
+{
+	hw->rxstat_reg = L1F_MIB_RX_OK;
+	hw->rxstat_sz = 0x60;
+	hw->txstat_reg = L1F_MIB_TX_OK;
+	hw->txstat_sz = 0x68;
+
+	hw->rx_prod_reg[0] = L1F_RFD_PIDX;
+	hw->rx_cons_reg[0] = L1F_RFD_CIDX;
+
+	hw->tx_prod_reg[0] = L1F_TPD_PRI0_PIDX;
+	hw->tx_cons_reg[0] = L1F_TPD_PRI0_CIDX;
+	hw->tx_prod_reg[1] = L1F_TPD_PRI1_PIDX;
+	hw->tx_cons_reg[1] = L1F_TPD_PRI1_CIDX;
+	hw->tx_prod_reg[2] = L1F_TPD_PRI2_PIDX;
+	hw->tx_cons_reg[2] = L1F_TPD_PRI2_CIDX;
+	hw->tx_prod_reg[3] = L1F_TPD_PRI3_PIDX;
+	hw->tx_cons_reg[3] = L1F_TPD_PRI3_CIDX;
+
+	hw->hwreg_sz = 0x200;
+	hw->eeprom_sz = 0;
+
+	return 0;
+}
+
+
+/*
+ *  alf_init_hw_callbacks
+ */
+int alf_init_hw_callbacks(struct alx_hw *hw)
+{
+	/* NIC */
+	hw->cbs.identify_nic   = &alf_identify_nic;
+	/* MAC */
+	hw->cbs.reset_mac      = &alf_reset_mac;
+	hw->cbs.start_mac      = &alf_start_mac;
+	hw->cbs.stop_mac       = &alf_stop_mac;
+	hw->cbs.config_mac     = &alf_config_mac;
+	hw->cbs.get_mac_addr   = &alf_get_mac_addr;
+	hw->cbs.set_mac_addr   = &alf_set_mac_addr;
+	hw->cbs.set_mc_addr    = &alf_set_mc_addr;
+	hw->cbs.clear_mc_addr  = &alf_clear_mc_addr;
+
+	/* PHY */
+	hw->cbs.init_phy          = &alf_init_phy;
+	hw->cbs.reset_phy         = &alf_reset_phy;
+	hw->cbs.read_phy_reg      = &alf_read_phy_reg;
+	hw->cbs.write_phy_reg     = &alf_write_phy_reg;
+	hw->cbs.check_phy_link    = &alf_check_phy_link;
+	hw->cbs.setup_phy_link    = &alf_setup_phy_link;
+	hw->cbs.setup_phy_link_speed = &alf_setup_phy_link_speed;
+
+	/* Interrupt */
+	hw->cbs.ack_phy_intr		= &alf_ack_phy_intr;
+	hw->cbs.enable_legacy_intr	= &alf_enable_legacy_intr;
+	hw->cbs.disable_legacy_intr	= &alf_disable_legacy_intr;
+	hw->cbs.enable_msix_intr	= &alf_enable_msix_intr;
+	hw->cbs.disable_msix_intr	= &alf_disable_msix_intr;
+
+	/* Configure */
+	hw->cbs.config_tx	= &alf_config_tx;
+	hw->cbs.config_fc	= &alf_config_fc;
+	hw->cbs.config_rss	= &alf_config_rss;
+	hw->cbs.config_msix	= &alf_config_msix;
+	hw->cbs.config_wol	= &alf_config_wol;
+	hw->cbs.config_aspm	= &alf_config_aspm;
+	hw->cbs.config_mac_ctrl	= &alf_config_mac_ctrl;
+	hw->cbs.config_pow_save	= &alf_config_pow_save;
+	hw->cbs.reset_pcie	= &alf_reset_pcie;
+
+	/* NVRam */
+	hw->cbs.check_nvram	= &alf_check_nvram;
+
+	/* Others */
+	hw->cbs.get_ethtool_regs = alf_get_ethtool_regs;
+
+	alf_set_hw_capabilities(hw);
+	alf_set_hw_infos(hw);
+
+	alx_hw_info(hw, "HW Flags = 0x%x\n", hw->flags);
+	return 0;
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop./alf_hw.c linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop/alf_hw.c
--- linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop./alf_hw.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop/alf_hw.c	2019-01-22 16:16:24.927259302 +0100
@@ -0,0 +1,917 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/pci_regs.h>
+#include <linux/mii.h>
+
+#include "alf_hw.h"
+
+
+/* get permanent mac address from
+ *    0: success
+ *    non-0:fail
+ */
+u16 l1f_get_perm_macaddr(struct alx_hw *hw, u8 *addr)
+{
+	u32 val, mac0, mac1;
+	u16 flag, i;
+
+#define INTN_LOADED 0x1
+#define EXTN_LOADED 0x2
+
+	flag = 0;
+	val = 0;
+
+read_mcadr:
+
+	/* get it from register first */
+	alx_mem_r32(hw, L1F_STAD0, &mac0);
+	alx_mem_r32(hw, L1F_STAD1, &mac1);
+
+	*(u32 *)(addr + 2) = LX_SWAP_DW(mac0);
+	*(u16 *)addr = (u16)LX_SWAP_W((u16)mac1);
+
+	if (macaddr_valid(addr))
+		return 0;
+
+	if ((flag & INTN_LOADED) == 0) {
+		/* load from efuse ? */
+		for (i = 0; i < L1F_SLD_MAX_TO; i++) {
+			alx_mem_r32(hw, L1F_SLD, &val);
+			if ((val & (L1F_SLD_STAT | L1F_SLD_START)) == 0)
+				break;
+			mdelay(1);
+		}
+		if (i == L1F_SLD_MAX_TO)
+			goto out;
+		alx_mem_w32(hw, L1F_SLD, val | L1F_SLD_START);
+		for (i = 0; i < L1F_SLD_MAX_TO; i++) {
+			mdelay(1);
+			alx_mem_r32(hw, L1F_SLD, &val);
+			if ((val & L1F_SLD_START) == 0)
+				break;
+		}
+		if (i == L1F_SLD_MAX_TO)
+			goto out;
+		flag |= INTN_LOADED;
+		goto read_mcadr;
+	}
+
+	if ((flag & EXTN_LOADED) == 0) {
+		alx_mem_r32(hw, L1F_EFLD, &val);
+		if ((val & (L1F_EFLD_F_EXIST | L1F_EFLD_E_EXIST)) != 0) {
+			/* load from eeprom/flash ? */
+			for (i = 0; i < L1F_SLD_MAX_TO; i++) {
+				alx_mem_r32(hw, L1F_EFLD, &val);
+				if ((val & (L1F_EFLD_STAT |
+					    L1F_EFLD_START)) == 0) {
+					break;
+				}
+				mdelay(1);
+			}
+			if (i == L1F_SLD_MAX_TO)
+				goto out;
+			alx_mem_w32(hw, L1F_EFLD, val | L1F_EFLD_START);
+			for (i = 0; i < L1F_SLD_MAX_TO; i++) {
+				mdelay(1);
+				alx_mem_r32(hw, L1F_EFLD, &val);
+				if ((val & L1F_EFLD_START) == 0)
+					break;
+			}
+			if (i == L1F_SLD_MAX_TO)
+				goto out;
+			flag |= EXTN_LOADED;
+			goto read_mcadr;
+		}
+	}
+
+out:
+	return LX_ERR_ALOAD;
+}
+
+
+/* reset mac & dma
+ * return
+ *     0: success
+ *     non-0:fail
+ */
+u16 l1f_reset_mac(struct alx_hw *hw)
+{
+	u32 val, pmctrl = 0;
+	u16 ret;
+	u16 i;
+	u8 rev = (u8)(FIELD_GETX(hw->pci_revid, L1F_PCI_REVID));
+
+	/* disable all interrupts, RXQ/TXQ */
+	alx_mem_w32(hw, L1F_MSIX_MASK, BIT_ALL); /* ???? msi-x */
+	alx_mem_w32(hw, L1F_IMR, 0);
+	alx_mem_w32(hw, L1F_ISR, L1F_ISR_DIS);
+
+	ret = l1f_enable_mac(hw, false, 0);
+	if (ret != 0)
+		return ret;
+
+	/* mac reset workaroud */
+	alx_mem_w32(hw, L1F_RFD_PIDX, 1);
+
+	/* dis l0s/l1 before mac reset */
+	if ((rev == L1F_REV_A0 || rev == L1F_REV_A1) &&
+	    (hw->pci_revid & L1F_PCI_REVID_WTH_CR) != 0) {
+		alx_mem_r32(hw, L1F_PMCTRL, &pmctrl);
+		if ((pmctrl & (L1F_PMCTRL_L1_EN | L1F_PMCTRL_L0S_EN)) != 0) {
+			alx_mem_w32(hw, L1F_PMCTRL,
+				    pmctrl & ~(L1F_PMCTRL_L1_EN |
+					       L1F_PMCTRL_L0S_EN));
+		}
+	}
+
+	/* reset whole mac safely */
+	alx_mem_r32(hw, L1F_MASTER, &val);
+	alx_mem_w32(hw, L1F_MASTER,
+		    val | L1F_MASTER_DMA_MAC_RST | L1F_MASTER_OOB_DIS);
+
+	/* make sure it's real idle */
+	udelay(10);
+	for (i = 0; i < L1F_DMA_MAC_RST_TO; i++) {
+		alx_mem_r32(hw, L1F_RFD_PIDX, &val);
+		if (val == 0)
+			break;
+		udelay(10);
+	}
+	for (; i < L1F_DMA_MAC_RST_TO; i++) {
+		alx_mem_r32(hw, L1F_MASTER, &val);
+		if ((val & L1F_MASTER_DMA_MAC_RST) == 0)
+			break;
+		udelay(10);
+	}
+	if (i == L1F_DMA_MAC_RST_TO)
+		return LX_ERR_RSTMAC;
+	udelay(10);
+
+	if ((rev == L1F_REV_A0 || rev == L1F_REV_A1) &&
+	    (hw->pci_revid & L1F_PCI_REVID_WTH_CR) != 0) {
+		/* set L1F_MASTER_PCLKSEL_SRDS (affect by soft-rst, PERST) */
+		alx_mem_w32(hw, L1F_MASTER, val | L1F_MASTER_PCLKSEL_SRDS);
+		/* resoter l0s / l1 */
+		if ((pmctrl & (L1F_PMCTRL_L1_EN | L1F_PMCTRL_L0S_EN)) != 0)
+			alx_mem_w32(hw, L1F_PMCTRL, pmctrl);
+	}
+
+	/* clear Internal OSC settings, switching OSC by hw itself,
+	 * disable isoloate for A0 */
+	alx_mem_r32(hw, L1F_MISC3, &val);
+	alx_mem_w32(hw, L1F_MISC3,
+		    (val & ~L1F_MISC3_25M_BY_SW) | L1F_MISC3_25M_NOTO_INTNL);
+	alx_mem_r32(hw, L1F_MISC, &val);
+	val &= ~L1F_MISC_INTNLOSC_OPEN;
+	if (rev == L1F_REV_A0 || rev == L1F_REV_A1)
+		val &= ~L1F_MISC_ISO_EN;
+	alx_mem_w32(hw, L1F_MISC, val);
+	udelay(20);
+
+	/* driver control speed/duplex, hash-alg */
+	alx_mem_r32(hw, L1F_MAC_CTRL, &val);
+	alx_mem_w32(hw, L1F_MAC_CTRL, val | L1F_MAC_CTRL_WOLSPED_SWEN);
+
+	/* clk sw */
+	alx_mem_r32(hw, L1F_SERDES, &val);
+	alx_mem_w32(hw, L1F_SERDES,
+		    val | L1F_SERDES_MACCLK_SLWDWN | L1F_SERDES_PHYCLK_SLWDWN);
+
+	return 0;
+}
+
+/* reset phy
+ * return
+ *    0: success
+ *    non-0:fail
+ */
+u16 l1f_reset_phy(struct alx_hw *hw, bool pws_en, bool az_en, bool ptp_en)
+{
+	u32 val;
+	u16 i, phy_val;
+
+	az_en = az_en;
+	ptp_en = ptp_en;
+
+	/* reset PHY core */
+	alx_mem_r32(hw, L1F_PHY_CTRL, &val);
+	val &= ~(L1F_PHY_CTRL_DSPRST_OUT | L1F_PHY_CTRL_IDDQ |
+		 L1F_PHY_CTRL_GATE_25M | L1F_PHY_CTRL_POWER_DOWN |
+		 L1F_PHY_CTRL_CLS);
+	val |= L1F_PHY_CTRL_RST_ANALOG;
+
+	if (pws_en)
+		val |= (L1F_PHY_CTRL_HIB_PULSE | L1F_PHY_CTRL_HIB_EN);
+	else
+		val &= ~(L1F_PHY_CTRL_HIB_PULSE | L1F_PHY_CTRL_HIB_EN);
+	alx_mem_w32(hw, L1F_PHY_CTRL, val);
+	udelay(10); /* 5us is enough */
+	alx_mem_w32(hw, L1F_PHY_CTRL, val | L1F_PHY_CTRL_DSPRST_OUT);
+
+	for (i = 0; i < L1F_PHY_CTRL_DSPRST_TO; i++) { /* delay 800us */
+		udelay(10);
+	}
+
+	/* ???? phy power saving */
+
+	l1f_write_phydbg(hw, true,
+			 L1F_MIIDBG_TST10BTCFG, L1F_TST10BTCFG_DEF);
+	l1f_write_phydbg(hw, true, L1F_MIIDBG_SRDSYSMOD, L1F_SRDSYSMOD_DEF);
+	l1f_write_phydbg(hw, true,
+			 L1F_MIIDBG_TST100BTCFG, L1F_TST100BTCFG_DEF);
+	l1f_write_phydbg(hw, true, L1F_MIIDBG_ANACTRL, L1F_ANACTRL_DEF);
+	l1f_read_phydbg(hw, true, L1F_MIIDBG_GREENCFG2, &phy_val);
+	l1f_write_phydbg(hw, true, L1F_MIIDBG_GREENCFG2,
+			 phy_val & ~L1F_GREENCFG2_GATE_DFSE_EN);
+	/* rtl8139c, 120m */
+	l1f_write_phy(hw, true, L1F_MIIEXT_ANEG, true,
+		      L1F_MIIEXT_NLP78, L1F_MIIEXT_NLP78_120M_DEF);
+
+	/* set phy interrupt mask */
+	l1f_write_phy(hw, false, 0, true,
+		      L1F_MII_IER, L1F_IER_LINK_UP | L1F_IER_LINK_DOWN);
+
+
+	/* TODO *****???? */
+	return 0;
+}
+
+
+/* reset pcie
+ * just reset pcie relative registers (pci command, clk, aspm...)
+ * return
+ *    0:success
+ *    non-0:fail
+ */
+u16 l1f_reset_pcie(struct alx_hw *hw, bool l0s_en, bool l1_en)
+{
+	u32 val;
+	u16 val16;
+	u16 ret;
+	u8 rev = (u8)(FIELD_GETX(hw->pci_revid, L1F_PCI_REVID));
+
+	/* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */
+	alx_cfg_r16(hw, PCI_COMMAND, &val16);
+	if ((val16 & (PCI_COMMAND_IO |
+		      PCI_COMMAND_MEMORY |
+		      PCI_COMMAND_MASTER)) == 0 ||
+	    (val16 & PCI_COMMAND_INTX_DISABLE) != 0) {
+		val16 = (u16)((val16 | (PCI_COMMAND_IO |
+					PCI_COMMAND_MEMORY |
+					PCI_COMMAND_MASTER))
+			      & ~PCI_COMMAND_INTX_DISABLE);
+		alx_cfg_w16(hw, PCI_COMMAND, val16);
+	}
+
+	/* Clear any PowerSaving Settings */
+	alx_cfg_w16(hw, L1F_PM_CSR, 0);
+
+	/* deflt val of PDLL D3PLLOFF */
+	alx_mem_r32(hw, L1F_PDLL_TRNS1, &val);
+	alx_mem_w32(hw, L1F_PDLL_TRNS1, val & ~L1F_PDLL_TRNS1_D3PLLOFF_EN);
+
+	/* mask some pcie error bits */
+	alx_mem_r32(hw, L1F_UE_SVRT, &val);
+	val &= ~(L1F_UE_SVRT_DLPROTERR | L1F_UE_SVRT_FCPROTERR);
+	alx_mem_w32(hw, L1F_UE_SVRT, val);
+
+	/* wol 25M  & pclk */
+	alx_mem_r32(hw, L1F_MASTER, &val);
+	if ((rev == L1F_REV_A0 || rev == L1F_REV_A1) &&
+	    (hw->pci_revid & L1F_PCI_REVID_WTH_CR) != 0) {
+		if ((val & L1F_MASTER_WAKEN_25M) == 0 ||
+		    (val & L1F_MASTER_PCLKSEL_SRDS) == 0) {
+			alx_mem_w32(hw, L1F_MASTER,
+				    val | L1F_MASTER_PCLKSEL_SRDS |
+				    L1F_MASTER_WAKEN_25M);
+		}
+	} else {
+		if ((val & L1F_MASTER_WAKEN_25M) == 0 ||
+		    (val & L1F_MASTER_PCLKSEL_SRDS) != 0) {
+			alx_mem_w32(hw, L1F_MASTER,
+				    (val & ~L1F_MASTER_PCLKSEL_SRDS) |
+				    L1F_MASTER_WAKEN_25M);
+		}
+	}
+
+	/* l0s, l1 setting */
+	ret = l1f_enable_aspm(hw, l0s_en, l1_en, 0);
+
+	udelay(10);
+
+	return ret;
+}
+
+
+/* disable/enable MAC/RXQ/TXQ
+ * en
+ *    true:enable
+ *    false:disable
+ * return
+ *    0:success
+ *    non-0-fail
+ */
+u16 l1f_enable_mac(struct alx_hw *hw, bool en, u16 en_ctrl)
+{
+	u32 rxq, txq, mac, val;
+	u16 i;
+
+	alx_mem_r32(hw, L1F_RXQ0, &rxq);
+	alx_mem_r32(hw, L1F_TXQ0, &txq);
+	alx_mem_r32(hw, L1F_MAC_CTRL, &mac);
+
+	if (en) { /* enable */
+		alx_mem_w32(hw, L1F_RXQ0, rxq | L1F_RXQ0_EN);
+		alx_mem_w32(hw, L1F_TXQ0, txq | L1F_TXQ0_EN);
+		if ((en_ctrl & LX_MACSPEED_1000) != 0) {
+			FIELD_SETL(mac, L1F_MAC_CTRL_SPEED,
+				   L1F_MAC_CTRL_SPEED_1000);
+		} else {
+			FIELD_SETL(mac, L1F_MAC_CTRL_SPEED,
+				   L1F_MAC_CTRL_SPEED_10_100);
+		}
+
+		test_set_or_clear(mac, en_ctrl, LX_MACDUPLEX_FULL,
+				  L1F_MAC_CTRL_FULLD);
+		/* rx filter */
+		test_set_or_clear(mac, en_ctrl, LX_FLT_PROMISC,
+				  L1F_MAC_CTRL_PROMISC_EN);
+		test_set_or_clear(mac, en_ctrl, LX_FLT_MULTI_ALL,
+				  L1F_MAC_CTRL_MULTIALL_EN);
+		test_set_or_clear(mac, en_ctrl, LX_FLT_BROADCAST,
+				  L1F_MAC_CTRL_BRD_EN);
+		test_set_or_clear(mac, en_ctrl, LX_FLT_DIRECT,
+				  L1F_MAC_CTRL_RX_EN);
+		test_set_or_clear(mac, en_ctrl, LX_FC_TXEN,
+				  L1F_MAC_CTRL_TXFC_EN);
+		test_set_or_clear(mac, en_ctrl, LX_FC_RXEN,
+				  L1F_MAC_CTRL_RXFC_EN);
+		test_set_or_clear(mac, en_ctrl, LX_VLAN_STRIP,
+				  L1F_MAC_CTRL_VLANSTRIP);
+		test_set_or_clear(mac, en_ctrl, LX_LOOPBACK,
+				  L1F_MAC_CTRL_LPBACK_EN);
+		test_set_or_clear(mac, en_ctrl, LX_SINGLE_PAUSE,
+				  L1F_MAC_CTRL_SPAUSE_EN);
+		test_set_or_clear(mac, en_ctrl, LX_ADD_FCS,
+				  (L1F_MAC_CTRL_PCRCE | L1F_MAC_CTRL_CRCE));
+
+		alx_mem_w32(hw, L1F_MAC_CTRL, mac | L1F_MAC_CTRL_TX_EN);
+	} else { /* disable mac */
+		alx_mem_w32(hw, L1F_RXQ0, rxq & ~L1F_RXQ0_EN);
+		alx_mem_w32(hw, L1F_TXQ0, txq & ~L1F_TXQ0_EN);
+
+		/* waiting for rxq/txq be idle */
+		udelay(40);
+
+		/* stop mac tx/rx */
+		alx_mem_w32(hw, L1F_MAC_CTRL,
+			    mac & ~(L1F_MAC_CTRL_RX_EN | L1F_MAC_CTRL_TX_EN));
+
+		for (i = 0; i < L1F_DMA_MAC_RST_TO; i++) {
+			alx_mem_r32(hw, L1F_MAC_STS, &val);
+			if ((val & L1F_MAC_STS_IDLE) == 0)
+				break;
+			udelay(10);
+		}
+		if (L1F_DMA_MAC_RST_TO == i)
+			return LX_ERR_RSTMAC;
+	}
+
+	return 0;
+}
+
+/* enable/disable aspm support
+ * that will change settings for phy/mac/pcie
+ */
+u16 l1f_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en, u8 lnk_stat)
+{
+	u32 pmctrl;
+	u8 rev = (u8)(FIELD_GETX(hw->pci_revid, L1F_PCI_REVID));
+
+	lnk_stat = lnk_stat;
+
+
+	alx_mem_r32(hw, L1F_PMCTRL, &pmctrl);
+
+	/* ????default */
+	FIELD_SETL(pmctrl, L1F_PMCTRL_LCKDET_TIMER,
+		   L1F_PMCTRL_LCKDET_TIMER_DEF);
+	pmctrl |= L1F_PMCTRL_RCVR_WT_1US    |   /* wait 1us */
+		  L1F_PMCTRL_L1_CLKSW_EN    |   /* pcie clk sw */
+		  L1F_PMCTRL_L1_SRDSRX_PWD  ;   /* pwd serdes ????default */
+	/* ????default */
+	FIELD_SETL(pmctrl, L1F_PMCTRL_L1REQ_TO, L1F_PMCTRL_L1REG_TO_DEF);
+	FIELD_SETL(pmctrl, L1F_PMCTRL_L1_TIMER, L1F_PMCTRL_L1_TIMER_16US);
+	pmctrl &= ~(L1F_PMCTRL_L1_SRDS_EN |
+		    L1F_PMCTRL_L1_SRDSPLL_EN |
+		    L1F_PMCTRL_L1_BUFSRX_EN |
+		    L1F_PMCTRL_SADLY_EN |       /* ???default */
+		    L1F_PMCTRL_HOTRST_WTEN|
+		    L1F_PMCTRL_L0S_EN |
+		    L1F_PMCTRL_L1_EN |
+		    L1F_PMCTRL_ASPM_FCEN |
+		    L1F_PMCTRL_TXL1_AFTER_L0S |
+		    L1F_PMCTRL_RXL1_AFTER_L0S
+		    );
+	if ((rev == L1F_REV_A0 || rev == L1F_REV_A1) &&
+	    (hw->pci_revid & L1F_PCI_REVID_WTH_CR) != 0) {
+		pmctrl |= L1F_PMCTRL_L1_SRDS_EN | L1F_PMCTRL_L1_SRDSPLL_EN;
+	}
+
+	/* on/off l0s only if bios/system enable l0s */
+	if (/* sysl0s_en && */ l0s_en)
+		pmctrl |= (L1F_PMCTRL_L0S_EN | L1F_PMCTRL_ASPM_FCEN);
+	/* on/off l1 only if bios/system enable l1 */
+	if (/* sysl1_en && */ l1_en)
+		pmctrl |= (L1F_PMCTRL_L1_EN | L1F_PMCTRL_ASPM_FCEN);
+
+	alx_mem_w32(hw, L1F_PMCTRL, pmctrl);
+
+	return 0;
+}
+
+
+/* initialize phy for speed / flow control
+ * lnk_cap
+ *    if autoNeg, is link capability to tell the peer
+ *    if force mode, is forced speed/duplex
+ */
+u16 l1f_init_phy_spdfc(struct alx_hw *hw, bool auto_neg,
+		       u8 lnk_cap, bool fc_en)
+{
+	u16 adv, giga, cr;
+	u32 val;
+	u16 ret;
+
+	/* clear flag */
+	l1f_write_phy(hw, false, 0, false, L1F_MII_DBG_ADDR, 0);
+	alx_mem_r32(hw, L1F_DRV, &val);
+	FIELD_SETL(val, LX_DRV_PHY, 0);
+
+	if (auto_neg) {
+		adv = L1F_ADVERTISE_DEFAULT_CAP & ~L1F_ADVERTISE_SPEED_MASK;
+		giga = L1F_GIGA_CR_1000T_DEFAULT_CAP &
+			~L1F_GIGA_CR_1000T_SPEED_MASK;
+		val |= LX_DRV_PHY_AUTO;
+		if (!fc_en)
+			adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+		else
+			val |= LX_DRV_PHY_FC;
+		if ((LX_LC_10H & lnk_cap) != 0) {
+			adv |= ADVERTISE_10HALF;
+			val |= LX_DRV_PHY_10;
+		}
+		if ((LX_LC_10F & lnk_cap) != 0) {
+			adv |= ADVERTISE_10HALF |
+			       ADVERTISE_10FULL;
+			val |= LX_DRV_PHY_10 | LX_DRV_PHY_DUPLEX;
+		}
+		if ((LX_LC_100H & lnk_cap) != 0) {
+			adv |= ADVERTISE_100HALF;
+			val |= LX_DRV_PHY_100;
+		}
+		if ((LX_LC_100F & lnk_cap) != 0) {
+			adv |= ADVERTISE_100HALF |
+			       ADVERTISE_100FULL;
+			val |= LX_DRV_PHY_100 | LX_DRV_PHY_DUPLEX;
+		}
+		if ((LX_LC_1000F & lnk_cap) != 0) {
+			giga |= L1F_GIGA_CR_1000T_FD_CAPS;
+			val |= LX_DRV_PHY_1000 | LX_DRV_PHY_DUPLEX;
+		}
+
+		ret = l1f_write_phy(hw, false, 0, false, MII_ADVERTISE, adv);
+		ret = l1f_write_phy(hw, false, 0, false, MII_CTRL1000, giga);
+
+		cr = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
+		ret = l1f_write_phy(hw, false, 0, false, MII_BMCR, cr);
+	} else { /* force mode */
+		cr = BMCR_RESET;
+		switch (lnk_cap) {
+		case LX_LC_10H:
+			val |= LX_DRV_PHY_10;
+			break;
+		case LX_LC_10F:
+			cr |= BMCR_FULLDPLX;
+			val |= LX_DRV_PHY_10 | LX_DRV_PHY_DUPLEX;
+			break;
+		case LX_LC_100H:
+			cr |= BMCR_SPEED100;
+			val |= LX_DRV_PHY_100;
+			break;
+		case LX_LC_100F:
+			cr |= BMCR_SPEED100 | BMCR_FULLDPLX;
+			val |= LX_DRV_PHY_100 | LX_DRV_PHY_DUPLEX;
+			break;
+		default:
+			return LX_ERR_PARM;
+		}
+		ret = l1f_write_phy(hw, false, 0, false, MII_BMCR, cr);
+	}
+
+	if (!ret) {
+		l1f_write_phy(hw, false, 0, false,
+			      L1F_MII_DBG_ADDR, LX_PHY_INITED);
+	}
+	alx_mem_w32(hw, L1F_DRV, val);
+
+	return ret;
+}
+
+
+/* do power saving setting befor enter suspend mode
+ * NOTE:
+ *    1. phy link must be established before calling this function
+ *    2. wol option (pattern,magic,link,etc.) is configed before call it.
+ */
+u16 l1f_powersaving(struct alx_hw *hw,
+		    u8 wire_spd,
+		    bool wol_en,
+		    bool mactx_en,
+		    bool macrx_en,
+		    bool pws_en)
+{
+	u32 master_ctrl, mac_ctrl, phy_ctrl, val;
+	u16 pm_ctrl, ret = 0;
+
+	master_ctrl = 0;
+	mac_ctrl = 0;
+	phy_ctrl = 0;
+
+	pws_en = pws_en;
+
+	alx_mem_r32(hw, L1F_MASTER, &master_ctrl);
+	master_ctrl &= ~L1F_MASTER_PCLKSEL_SRDS;
+
+	alx_mem_r32(hw, L1F_MAC_CTRL, &mac_ctrl);
+	/* 10/100 half */
+	FIELD_SETL(mac_ctrl, L1F_MAC_CTRL_SPEED,  L1F_MAC_CTRL_SPEED_10_100);
+	mac_ctrl &= ~(L1F_MAC_CTRL_FULLD |
+		      L1F_MAC_CTRL_RX_EN |
+		      L1F_MAC_CTRL_TX_EN);
+
+	alx_mem_r32(hw, L1F_PHY_CTRL, &phy_ctrl);
+	phy_ctrl &= ~(L1F_PHY_CTRL_DSPRST_OUT | L1F_PHY_CTRL_CLS);
+	/* if (pws_en) { */
+	phy_ctrl |= (L1F_PHY_CTRL_RST_ANALOG | L1F_PHY_CTRL_HIB_PULSE |
+		     L1F_PHY_CTRL_HIB_EN);
+
+	if (wol_en) { /* enable rx packet or tx packet */
+		if (macrx_en)
+			mac_ctrl |= (L1F_MAC_CTRL_RX_EN | L1F_MAC_CTRL_BRD_EN);
+		if (mactx_en)
+			mac_ctrl |= L1F_MAC_CTRL_TX_EN;
+		if (LX_LC_1000F == wire_spd) {
+			FIELD_SETL(mac_ctrl, L1F_MAC_CTRL_SPEED,
+				   L1F_MAC_CTRL_SPEED_1000);
+		}
+		if (LX_LC_10F == wire_spd ||
+		    LX_LC_100F == wire_spd ||
+		    LX_LC_100F == wire_spd) {
+			mac_ctrl |= L1F_MAC_CTRL_FULLD;
+		}
+		phy_ctrl |= L1F_PHY_CTRL_DSPRST_OUT;
+		ret = l1f_write_phy(hw, false, 0, false, L1F_MII_IER,
+				    L1F_IER_LINK_UP);
+	} else {
+		ret = l1f_write_phy(hw, false, 0, false, L1F_MII_IER, 0);
+		phy_ctrl |= (L1F_PHY_CTRL_IDDQ | L1F_PHY_CTRL_POWER_DOWN);
+	}
+	alx_mem_w32(hw, L1F_MASTER, master_ctrl);
+	alx_mem_w32(hw, L1F_MAC_CTRL, mac_ctrl);
+	alx_mem_w32(hw, L1F_PHY_CTRL, phy_ctrl);
+
+	/* set val of PDLL D3PLLOFF */
+	alx_mem_r32(hw, L1F_PDLL_TRNS1, &val);
+	alx_mem_w32(hw, L1F_PDLL_TRNS1, val | L1F_PDLL_TRNS1_D3PLLOFF_EN);
+
+	/* set PME_EN */
+	if (wol_en) {
+		alx_cfg_r16(hw, L1F_PM_CSR, &pm_ctrl);
+		pm_ctrl |= L1F_PM_CSR_PME_EN;
+		alx_cfg_w16(hw, L1F_PM_CSR, pm_ctrl);
+	}
+
+	return ret;
+}
+
+
+/* read phy register */
+u16 l1f_read_phy(struct alx_hw *hw, bool ext, u8 dev, bool fast,
+		 u16 reg, u16 *data)
+{
+	u32 val;
+	u16 clk_sel, i, ret = 0;
+
+	*data = 0;
+	clk_sel = fast ?
+	    (u16)L1F_MDIO_CLK_SEL_25MD4 : (u16)L1F_MDIO_CLK_SEL_25MD128;
+
+	if (ext) {
+		val = FIELDL(L1F_MDIO_EXTN_DEVAD, dev) |
+		      FIELDL(L1F_MDIO_EXTN_REG, reg);
+		alx_mem_w32(hw, L1F_MDIO_EXTN, val);
+
+		val = L1F_MDIO_SPRES_PRMBL |
+		      FIELDL(L1F_MDIO_CLK_SEL, clk_sel) |
+		      L1F_MDIO_START |
+		      L1F_MDIO_MODE_EXT |
+		      L1F_MDIO_OP_READ;
+	} else {
+		val = L1F_MDIO_SPRES_PRMBL |
+		      FIELDL(L1F_MDIO_CLK_SEL, clk_sel) |
+		      FIELDL(L1F_MDIO_REG, reg) |
+		      L1F_MDIO_START |
+		      L1F_MDIO_OP_READ;
+	}
+
+	alx_mem_w32(hw, L1F_MDIO, val);
+
+	for (i = 0; i < L1F_MDIO_MAX_AC_TO; i++) {
+		alx_mem_r32(hw, L1F_MDIO, &val);
+		if ((val & L1F_MDIO_BUSY) == 0) {
+			*data = (u16)FIELD_GETX(val, L1F_MDIO_DATA);
+			break;
+		}
+		udelay(10);
+	}
+
+	if (L1F_MDIO_MAX_AC_TO == i)
+		ret = LX_ERR_MIIBUSY;
+
+	return ret;
+}
+
+/* write phy register */
+u16 l1f_write_phy(struct alx_hw *hw, bool ext, u8 dev, bool fast,
+		  u16 reg, u16 data)
+{
+	u32 val;
+	u16 clk_sel, i, ret = 0;
+
+	clk_sel = fast ?
+	    (u16)L1F_MDIO_CLK_SEL_25MD4 : (u16)L1F_MDIO_CLK_SEL_25MD128;
+
+	if (ext) {
+		val = FIELDL(L1F_MDIO_EXTN_DEVAD, dev) |
+		      FIELDL(L1F_MDIO_EXTN_REG, reg);
+		alx_mem_w32(hw, L1F_MDIO_EXTN, val);
+
+		val = L1F_MDIO_SPRES_PRMBL |
+		      FIELDL(L1F_MDIO_CLK_SEL, clk_sel) |
+		      FIELDL(L1F_MDIO_DATA, data) |
+		      L1F_MDIO_START |
+		      L1F_MDIO_MODE_EXT;
+	} else {
+		val = L1F_MDIO_SPRES_PRMBL |
+		      FIELDL(L1F_MDIO_CLK_SEL, clk_sel) |
+		      FIELDL(L1F_MDIO_REG, reg) |
+		      FIELDL(L1F_MDIO_DATA, data) |
+		      L1F_MDIO_START;
+	}
+
+	alx_mem_w32(hw, L1F_MDIO, val);
+
+	for (i = 0; i < L1F_MDIO_MAX_AC_TO; i++) {
+		alx_mem_r32(hw, L1F_MDIO, &val);
+		if ((val & L1F_MDIO_BUSY) == 0)
+			break;
+		udelay(10);
+	}
+
+	if (L1F_MDIO_MAX_AC_TO == i)
+		ret = LX_ERR_MIIBUSY;
+
+	return ret;
+}
+
+u16 l1f_read_phydbg(struct alx_hw *hw, bool fast, u16 reg, u16 *data)
+{
+	u16 ret;
+
+	ret = l1f_write_phy(hw, false, 0, fast, L1F_MII_DBG_ADDR, reg);
+	ret = l1f_read_phy(hw, false, 0, fast, L1F_MII_DBG_DATA, data);
+
+	return ret;
+}
+
+u16 l1f_write_phydbg(struct alx_hw *hw, bool fast, u16 reg, u16 data)
+{
+	u16 ret;
+
+	ret = l1f_write_phy(hw, false, 0, fast, L1F_MII_DBG_ADDR, reg);
+	ret = l1f_write_phy(hw, false, 0, fast, L1F_MII_DBG_DATA, data);
+
+	return ret;
+}
+
+/*
+ * initialize mac basically
+ *  most of hi-feature no init
+ *      MAC/PHY should be reset before call this function
+ *  smb_timer : million-second
+ *  int_mod   : micro-second
+ *  disable RSS as default
+ */
+u16 l1f_init_mac(struct alx_hw *hw, u8 *addr, u32 txmem_hi,
+		 u32 *tx_mem_lo, u8 tx_qnum, u16 txring_sz,
+		 u32 rxmem_hi, u32 rfdmem_lo, u32 rrdmem_lo,
+		 u16 rxring_sz, u16 rxbuf_sz, u16 smb_timer,
+		 u16 mtu, u16 int_mod, bool hash_legacy)
+{
+	u32 val;
+	u16 val16, devid;
+	u8 dmar_len;
+
+	alx_cfg_r16(hw, PCI_DEVICE_ID, &devid);
+
+	/* set mac-address */
+	val = *(u32 *)(addr + 2);
+	alx_mem_w32(hw, L1F_STAD0, LX_SWAP_DW(val));
+	val = *(u16 *)addr ;
+	alx_mem_w32(hw, L1F_STAD1, LX_SWAP_W((u16)val));
+
+	/* clear multicast hash table, algrithm */
+	alx_mem_w32(hw, L1F_HASH_TBL0, 0);
+	alx_mem_w32(hw, L1F_HASH_TBL1, 0);
+	alx_mem_r32(hw, L1F_MAC_CTRL, &val);
+	if (hash_legacy)
+		val |= L1F_MAC_CTRL_MHASH_ALG_HI5B;
+	else
+		val &= ~L1F_MAC_CTRL_MHASH_ALG_HI5B;
+	alx_mem_w32(hw, L1F_MAC_CTRL, val);
+
+	/* clear any wol setting/status */
+	alx_mem_r32(hw, L1F_WOL0, &val);
+	alx_mem_w32(hw, L1F_WOL0, 0);
+
+	/* clk gating */
+	alx_mem_w32(hw, L1F_CLK_GATE,
+		    (FIELD_GETX(hw->pci_revid, L1F_PCI_REVID) == L1F_REV_B0) ?
+		     L1F_CLK_GATE_ALL_B0 : L1F_CLK_GATE_ALL_A0);
+
+	/* idle timeout to switch clk_125M */
+	if (FIELD_GETX(hw->pci_revid, L1F_PCI_REVID) == L1F_REV_B0) {
+		alx_mem_w32(hw, L1F_IDLE_DECISN_TIMER,
+			    L1F_IDLE_DECISN_TIMER_DEF);
+	}
+
+	/* descriptor ring base memory */
+	alx_mem_w32(hw, L1F_TX_BASE_ADDR_HI, txmem_hi);
+	alx_mem_w32(hw, L1F_TPD_RING_SZ, txring_sz);
+	switch (tx_qnum) {
+	case 4:
+		alx_mem_w32(hw, L1F_TPD_PRI3_ADDR_LO, tx_mem_lo[3]);
+		/* fall through */
+	case 3:
+		alx_mem_w32(hw, L1F_TPD_PRI2_ADDR_LO, tx_mem_lo[2]);
+		/* fall through */
+	case 2:
+		alx_mem_w32(hw, L1F_TPD_PRI1_ADDR_LO, tx_mem_lo[1]);
+		/* fall through */
+	case 1:
+		alx_mem_w32(hw, L1F_TPD_PRI0_ADDR_LO, tx_mem_lo[0]);
+		break;
+	default:
+		return LX_ERR_PARM;
+	}
+	alx_mem_w32(hw, L1F_RX_BASE_ADDR_HI, rxmem_hi);
+	alx_mem_w32(hw, L1F_RFD_ADDR_LO, rfdmem_lo);
+	alx_mem_w32(hw, L1F_RRD_ADDR_LO, rrdmem_lo);
+	alx_mem_w32(hw, L1F_RFD_BUF_SZ, rxbuf_sz);
+	alx_mem_w32(hw, L1F_RRD_RING_SZ, rxring_sz);
+	alx_mem_w32(hw, L1F_RFD_RING_SZ, rxring_sz);
+	alx_mem_w32(hw, L1F_SMB_TIMER, smb_timer * 500UL);
+	alx_mem_w32(hw, L1F_SRAM9, L1F_SRAM_LOAD_PTR);
+
+	/* int moduration */
+	alx_mem_r32(hw, L1F_MASTER, &val);
+/*    val = (val & ~L1F_MASTER_IRQMOD2_EN) | */
+	val = val | L1F_MASTER_IRQMOD2_EN |
+		    L1F_MASTER_IRQMOD1_EN |
+		    L1F_MASTER_SYSALVTIMER_EN;  /* sysalive */
+	alx_mem_w32(hw, L1F_MASTER, val);
+	alx_mem_w32(hw, L1F_IRQ_MODU_TIMER,
+		    FIELDL(L1F_IRQ_MODU_TIMER1, int_mod >> 1));
+
+	/* tpd threshold to trig int */
+	alx_mem_w32(hw, L1F_TINT_TPD_THRSHLD, (u32)txring_sz / 3);
+	alx_mem_w32(hw, L1F_TINT_TIMER, int_mod);
+	/* re-send int */
+	alx_mem_w32(hw, L1F_INT_RETRIG, L1F_INT_RETRIG_TO);
+
+	/* mtu */
+	alx_mem_w32(hw, L1F_MTU, (u32)(mtu + 4 + 4)); /* crc + vlan */
+	if (mtu > L1F_MTU_JUMBO_TH) {
+		alx_mem_r32(hw, L1F_MAC_CTRL, &val);
+		alx_mem_w32(hw, L1F_MAC_CTRL, val & ~L1F_MAC_CTRL_FAST_PAUSE);
+	}
+
+	/* txq */
+	if ((mtu + 8) < L1F_TXQ1_JUMBO_TSO_TH)
+		val = (u32)(mtu + 8 + 7) >> 3; /* 7 for QWORD align */
+	else
+		val = L1F_TXQ1_JUMBO_TSO_TH >> 3;
+	alx_mem_w32(hw, L1F_TXQ1, val | L1F_TXQ1_ERRLGPKT_DROP_EN);
+	alx_mem_r32(hw, L1F_DEV_CTRL, &val);
+	dmar_len = (u8)FIELD_GETX(val, L1F_DEV_CTRL_MAXRRS);
+	/* if BIOS had changed the default dma read max length,
+	 * restore it to default value */
+	if (dmar_len < L1F_DEV_CTRL_MAXRRS_MIN) {
+		FIELD_SETL(val, L1F_DEV_CTRL_MAXRRS, L1F_DEV_CTRL_MAXRRS_MIN);
+		alx_mem_w32(hw, L1F_DEV_CTRL, val);
+	}
+	val = FIELDL(L1F_TXQ0_TPD_BURSTPREF, L1F_TXQ_TPD_BURSTPREF_DEF) |
+	      L1F_TXQ0_MODE_ENHANCE |
+	      L1F_TXQ0_LSO_8023_EN |
+	      L1F_TXQ0_SUPT_IPOPT |
+	      FIELDL(L1F_TXQ0_TXF_BURST_PREF, L1F_TXQ_TXF_BURST_PREF_DEF);
+	alx_mem_w32(hw, L1F_TXQ0, val);
+	val = FIELDL(L1F_HQTPD_Q1_NUMPREF, L1F_TXQ_TPD_BURSTPREF_DEF) |
+	      FIELDL(L1F_HQTPD_Q2_NUMPREF, L1F_TXQ_TPD_BURSTPREF_DEF) |
+	      FIELDL(L1F_HQTPD_Q3_NUMPREF, L1F_TXQ_TPD_BURSTPREF_DEF) |
+	      L1F_HQTPD_BURST_EN;
+	alx_mem_w32(hw, L1F_HQTPD, val);
+
+	/* rxq */
+	alx_mem_r32(hw, L1F_SRAM5, &val);
+	val = FIELD_GETX(val, L1F_SRAM_RXF_LEN) << 3; /* bytes */
+	if (val > L1F_SRAM_RXF_LEN_8K) {
+		val16 = L1F_MTU_STD_ALGN >> 3;
+		val = (val - (2 * L1F_MTU_STD_ALGN + L1F_MTU_MIN)) >> 3;
+	} else {
+		val16 = L1F_MTU_STD_ALGN >> 3;
+		val = (val - L1F_MTU_STD_ALGN) >> 3;
+	}
+	alx_mem_w32(hw, L1F_RXQ2,
+		    FIELDL(L1F_RXQ2_RXF_XOFF_THRESH, val16) |
+		    FIELDL(L1F_RXQ2_RXF_XON_THRESH, val));
+	val = FIELDL(L1F_RXQ0_NUM_RFD_PREF, L1F_RXQ0_NUM_RFD_PREF_DEF) |
+	      FIELDL(L1F_RXQ0_RSS_MODE, L1F_RXQ0_RSS_MODE_DIS) |
+	      FIELDL(L1F_RXQ0_IDT_TBL_SIZE, L1F_RXQ0_IDT_TBL_SIZE_DEF) |
+	      L1F_RXQ0_RSS_HSTYP_ALL |
+	      L1F_RXQ0_RSS_HASH_EN |
+	      L1F_RXQ0_IPV6_PARSE_EN;
+
+	if ((devid & 1) != 0) {
+		FIELD_SETL(val, L1F_RXQ0_ASPM_THRESH,
+			   L1F_RXQ0_ASPM_THRESH_100M);
+	}
+	alx_mem_w32(hw, L1F_RXQ0, val);
+
+	/* rfd producer index */
+	alx_mem_w32(hw, L1F_RFD_PIDX, (u32)rxring_sz - 1);
+
+	/* DMA */
+	alx_mem_r32(hw, L1F_DMA, &val);
+	val = FIELDL(L1F_DMA_RORDER_MODE, L1F_DMA_RORDER_MODE_OUT) |
+	      L1F_DMA_RREQ_PRI_DATA |
+	      FIELDL(L1F_DMA_RREQ_BLEN, dmar_len) |
+	      FIELDL(L1F_DMA_WDLY_CNT, L1F_DMA_WDLY_CNT_DEF) |
+	      FIELDL(L1F_DMA_RDLY_CNT, L1F_DMA_RDLY_CNT_DEF) |
+	      FIELDL(L1F_DMA_RCHNL_SEL, hw->dma_chnl - 1);
+	alx_mem_w32(hw, L1F_DMA, val);
+
+	return 0;
+}
+
+
+u16 l1f_get_phy_config(struct alx_hw *hw)
+{
+	u32 val;
+	u16 phy_val;
+
+	alx_mem_r32(hw, L1F_PHY_CTRL, &val);
+
+	/* phy in rst */
+	if ((val & L1F_PHY_CTRL_DSPRST_OUT) == 0)
+		return LX_DRV_PHY_UNKNOWN;
+
+	alx_mem_r32(hw, L1F_DRV, &val);
+	val = FIELD_GETX(val, LX_DRV_PHY);
+
+	if (LX_DRV_PHY_UNKNOWN == val)
+		return LX_DRV_PHY_UNKNOWN;
+
+	l1f_read_phy(hw, false, 0, false, L1F_MII_DBG_ADDR, &phy_val);
+
+	if (LX_PHY_INITED == phy_val)
+		return (u16) val;
+
+	return LX_DRV_PHY_UNKNOWN;
+}
+
diff -Nruw linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop./alf_hw.h linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop/alf_hw.h
--- linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop./alf_hw.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop/alf_hw.h	2019-01-22 16:16:24.927259302 +0100
@@ -0,0 +1,2098 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef L1F_HW_H_
+#define L1F_HW_H_
+
+/*********************************************************************
+ * some reqs for l1f_sw.h
+ *
+ * 1. some basic type must be defined if there are not defined by
+ *    your compiler:
+ *    u8, u16, u32, bool
+ *
+ * 2. PETHCONTEXT difinition should be in l1x_sw.h and it must contain
+ *    pci_devid & pci_venid
+ *
+ *********************************************************************/
+
+#include "alx_hwcom.h"
+
+/******************************************************************************/
+#define L1F_DEV_ID                      0x1091
+#define L2F_DEV_ID                      0x1090
+
+
+#define L1F_PCI_REVID_WTH_CR            BIT(1)
+#define L1F_PCI_REVID_WTH_XD            BIT(0)
+#define L1F_PCI_REVID_MASK              ASHFT3(0x1FU)
+#define L1F_PCI_REVID_SHIFT             3
+#define L1F_REV_A0                      0
+#define L1F_REV_A1                      1
+#define L1F_REV_B0                      2
+
+#define L1F_PM_CSR                      0x0044  /* 16bit */
+#define L1F_PM_CSR_PME_STAT             BIT(15)
+#define L1F_PM_CSR_DSCAL_MASK           ASHFT13(3U)
+#define L1F_PM_CSR_DSCAL_SHIFT          13
+#define L1F_PM_CSR_DSEL_MASK            ASHFT9(0xFU)
+#define L1F_PM_CSR_DSEL_SHIFT           9
+#define L1F_PM_CSR_PME_EN               BIT(8)
+#define L1F_PM_CSR_PWST_MASK            ASHFT0(3U)
+#define L1F_PM_CSR_PWST_SHIFT           0
+
+#define L1F_PM_DATA                     0x0047  /* 8bit */
+
+
+#define L1F_DEV_CAP                     0x005C
+#define L1F_DEV_CAP_SPLSL_MASK          ASHFT26(3UL)
+#define L1F_DEV_CAP_SPLSL_SHIFT         26
+#define L1F_DEV_CAP_SPLV_MASK           ASHFT18(0xFFUL)
+#define L1F_DEV_CAP_SPLV_SHIFT          18
+#define L1F_DEV_CAP_RBER                BIT(15)
+#define L1F_DEV_CAP_PIPRS               BIT(14)
+#define L1F_DEV_CAP_AIPRS               BIT(13)
+#define L1F_DEV_CAP_ABPRS               BIT(12)
+#define L1F_DEV_CAP_L1ACLAT_MASK        ASHFT9(7UL)
+#define L1F_DEV_CAP_L1ACLAT_SHIFT       9
+#define L1F_DEV_CAP_L0SACLAT_MASK       ASHFT6(7UL)
+#define L1F_DEV_CAP_L0SACLAT_SHIFT      6
+#define L1F_DEV_CAP_EXTAG               BIT(5)
+#define L1F_DEV_CAP_PHANTOM             BIT(4)
+#define L1F_DEV_CAP_MPL_MASK            ASHFT0(7UL)
+#define L1F_DEV_CAP_MPL_SHIFT           0
+#define L1F_DEV_CAP_MPL_128             1
+#define L1F_DEV_CAP_MPL_256             2
+#define L1F_DEV_CAP_MPL_512             3
+#define L1F_DEV_CAP_MPL_1024            4
+#define L1F_DEV_CAP_MPL_2048            5
+#define L1F_DEV_CAP_MPL_4096            6
+
+#define L1F_DEV_CTRL                    0x0060    /* 16bit */
+#define L1F_DEV_CTRL_MAXRRS_MASK        ASHFT12(7U)
+#define L1F_DEV_CTRL_MAXRRS_SHIFT       12
+#define L1F_DEV_CTRL_MAXRRS_MIN         2
+#define L1F_DEV_CTRL_NOSNP_EN           BIT(11)
+#define L1F_DEV_CTRL_AUXPWR_EN          BIT(10)
+#define L1F_DEV_CTRL_PHANTOM_EN         BIT(9)
+#define L1F_DEV_CTRL_EXTAG_EN           BIT(8)
+#define L1F_DEV_CTRL_MPL_MASK           ASHFT5(7U)
+#define L1F_DEV_CTRL_MPL_SHIFT          5
+#define L1F_DEV_CTRL_RELORD_EN          BIT(4)
+#define L1F_DEV_CTRL_URR_EN             BIT(3)
+#define L1F_DEV_CTRL_FERR_EN            BIT(2)
+#define L1F_DEV_CTRL_NFERR_EN           BIT(1)
+#define L1F_DEV_CTRL_CERR_EN            BIT(0)
+
+
+#define L1F_DEV_STAT                    0x0062    /* 16bit */
+#define L1F_DEV_STAT_XS_PEND            BIT(5)
+#define L1F_DEV_STAT_AUXPWR             BIT(4)
+#define L1F_DEV_STAT_UR                 BIT(3)
+#define L1F_DEV_STAT_FERR               BIT(2)
+#define L1F_DEV_STAT_NFERR              BIT(1)
+#define L1F_DEV_STAT_CERR               BIT(0)
+
+#define L1F_LNK_CAP                     0x0064
+#define L1F_LNK_CAP_PRTNUM_MASK         ASHFT24(0xFFUL)
+#define L1F_LNK_CAP_PRTNUM_SHIFT        24
+#define L1F_LNK_CAP_CLK_PM              BIT(18)
+#define L1F_LNK_CAP_L1EXTLAT_MASK       ASHFT15(7UL)
+#define L1F_LNK_CAP_L1EXTLAT_SHIFT      15
+#define L1F_LNK_CAP_L0SEXTLAT_MASK      ASHFT12(7UL)
+#define L1F_LNK_CAP_L0SEXTLAT_SHIFT     12
+#define L1F_LNK_CAP_ASPM_SUP_MASK       ASHFT10(3UL)
+#define L1F_LNK_CAP_ASPM_SUP_SHIFT      10
+#define L1F_LNK_CAP_ASPM_SUP_L0S        1
+#define L1F_LNK_CAP_ASPM_SUP_L0SL1      3
+#define L1F_LNK_CAP_MAX_LWH_MASK        ASHFT4(0x3FUL)
+#define L1F_LNK_CAP_MAX_LWH_SHIFT       4
+#define L1F_LNK_CAP_MAX_LSPD_MASH       ASHFT0(0xFUL)
+#define L1F_LNK_CAP_MAX_LSPD_SHIFT      0
+
+#define L1F_LNK_CTRL                    0x0068  /* 16bit */
+#define L1F_LNK_CTRL_CLK_PM_EN          BIT(8)
+#define L1F_LNK_CTRL_EXTSYNC            BIT(7)
+#define L1F_LNK_CTRL_CMNCLK_CFG         BIT(6)
+#define L1F_LNK_CTRL_RCB_128B           BIT(3)  /* 0:64b,1:128b */
+#define L1F_LNK_CTRL_ASPM_MASK          ASHFT0(3U)
+#define L1F_LNK_CTRL_ASPM_SHIFT         0
+#define L1F_LNK_CTRL_ASPM_DIS           0
+#define L1F_LNK_CTRL_ASPM_ENL0S         1
+#define L1F_LNK_CTRL_ASPM_ENL1          2
+#define L1F_LNK_CTRL_ASPM_ENL0SL1       3
+
+#define L1F_LNK_STAT                    0x006A  /* 16bit */
+#define L1F_LNK_STAT_SCLKCFG            BIT(12)
+#define L1F_LNK_STAT_LNKTRAIN           BIT(11)
+#define L1F_LNK_STAT_TRNERR             BIT(10)
+#define L1F_LNK_STAT_LNKSPD_MASK        ASHFT0(0xFU)
+#define L1F_LNK_STAT_LNKSPD_SHIFT       0
+#define L1F_LNK_STAT_NEGLW_MASK         ASHFT4(0x3FU)
+#define L1F_LNK_STAT_NEGLW_SHIFT        4
+
+#define L1F_MSIX_MASK                   0x0090
+#define L1F_MSIX_PENDING                0x0094
+
+#define L1F_UE_SVRT                     0x010C
+#define L1F_UE_SVRT_UR                  BIT(20)
+#define L1F_UE_SVRT_ECRCERR             BIT(19)
+#define L1F_UE_SVRT_MTLP                BIT(18)
+#define L1F_UE_SVRT_RCVOVFL             BIT(17)
+#define L1F_UE_SVRT_UNEXPCPL            BIT(16)
+#define L1F_UE_SVRT_CPLABRT             BIT(15)
+#define L1F_UE_SVRT_CPLTO               BIT(14)
+#define L1F_UE_SVRT_FCPROTERR           BIT(13)
+#define L1F_UE_SVRT_PTLP                BIT(12)
+#define L1F_UE_SVRT_DLPROTERR           BIT(4)
+#define L1F_UE_SVRT_TRNERR              BIT(0)
+
+#define L1F_EFLD                        0x0204  /* eeprom/flash load */
+#define L1F_EFLD_F_ENDADDR_MASK         ASHFT16(0x3FFUL)
+#define L1F_EFLD_F_ENDADDR_SHIFT        16
+#define L1F_EFLD_F_EXIST                BIT(10)
+#define L1F_EFLD_E_EXIST                BIT(9)
+#define L1F_EFLD_EXIST                  BIT(8)
+#define L1F_EFLD_STAT                   BIT(5)   /* 0:finish,1:in progress */
+#define L1F_EFLD_IDLE                   BIT(4)
+#define L1F_EFLD_START                  BIT(0)
+
+#define L1F_SLD                         0x0218  /* efuse load */
+#define L1F_SLD_FREQ_MASK               ASHFT24(3UL)
+#define L1F_SLD_FREQ_SHIFT              24
+#define L1F_SLD_FREQ_100K               0
+#define L1F_SLD_FREQ_200K               1
+#define L1F_SLD_FREQ_300K               2
+#define L1F_SLD_FREQ_400K               3
+#define L1F_SLD_EXIST                   BIT(23)
+#define L1F_SLD_SLVADDR_MASK            ASHFT16(0x7FUL)
+#define L1F_SLD_SLVADDR_SHIFT           16
+#define L1F_SLD_IDLE                    BIT(13)
+#define L1F_SLD_STAT                    BIT(12)  /* 0:finish,1:in progress */
+#define L1F_SLD_START                   BIT(11)
+#define L1F_SLD_STARTADDR_MASK          ASHFT0(0xFFUL)
+#define L1F_SLD_STARTADDR_SHIFT         0
+#define L1F_SLD_MAX_TO                  100
+
+#define L1F_PCIE_MSIC                   0x021C
+#define L1F_PCIE_MSIC_MSIX_DIS          BIT(22)
+#define L1F_PCIE_MSIC_MSI_DIS           BIT(21)
+
+#define L1F_PPHY_MISC1                  0x1000
+#define L1F_PPHY_MISC1_RCVDET           BIT(2)
+#define L1F_PPHY_MISC1_NFTS_MASK        ASHFT16(0xFFUL)
+#define L1F_PPHY_MISC1_NFTS_SHIFT       16
+#define L1F_PPHY_MISC1_NFTS_HIPERF      0xA0    /* ???? */
+
+#define L1F_PPHY_MISC2                  0x1004
+#define L1F_PPHY_MISC2_L0S_TH_MASK      ASHFT18(0x3UL)
+#define L1F_PPHY_MISC2_L0S_TH_SHIFT     18
+#define L1F_PPHY_MISC2_CDR_BW_MASK      ASHFT16(0x3UL)
+#define L1F_PPHY_MISC2_CDR_BW_SHIFT     16
+
+#define L1F_PDLL_TRNS1                  0x1104
+#define L1F_PDLL_TRNS1_D3PLLOFF_EN      BIT(11)
+#define L1F_PDLL_TRNS1_REGCLK_SEL_NORM  BIT(10)
+#define L1F_PDLL_TRNS1_REPLY_TO_MASK    ASHFT0(0x3FFUL)
+#define L1F_PDLL_TRNS1_REPLY_TO_SHIFT   0
+
+
+#define L1F_TLEXTN_STATS                0x1208
+#define L1F_TLEXTN_STATS_DEVNO_MASK     ASHFT16(0x1FUL)
+#define L1F_TLEXTN_STATS_DEVNO_SHIFT    16
+#define L1F_TLEXTN_STATS_BUSNO_MASK     ASHFT8(0xFFUL)
+#define L1F_TLEXTN_STATS_BUSNO_SHIFT    8
+
+#define L1F_EFUSE_CTRL                  0x12C0
+#define L1F_EFUSE_CTRL_FLAG             BIT(31)          /* 0:read,1:write */
+#define L1F_EUFSE_CTRL_ACK              BIT(30)
+#define L1F_EFUSE_CTRL_ADDR_MASK        ASHFT16(0x3FFUL)
+#define L1F_EFUSE_CTRL_ADDR_SHIFT       16
+
+#define L1F_EFUSE_DATA                  0x12C4
+
+#define L1F_SPI_OP1                     0x12C8
+#define L1F_SPI_OP1_RDID_MASK           ASHFT24(0xFFUL)
+#define L1F_SPI_OP1_RDID_SHIFT          24
+#define L1F_SPI_OP1_CE_MASK             ASHFT16(0xFFUL)
+#define L1F_SPI_OP1_CE_SHIFT            16
+#define L1F_SPI_OP1_SE_MASK             ASHFT8(0xFFUL)
+#define L1F_SPI_OP1_SE_SHIFT            8
+#define L1F_SPI_OP1_PRGRM_MASK          ASHFT0(0xFFUL)
+#define L1F_SPI_OP1_PRGRM_SHIFT         0
+
+#define L1F_SPI_OP2                     0x12CC
+#define L1F_SPI_OP2_READ_MASK           ASHFT24(0xFFUL)
+#define L1F_SPI_OP2_READ_SHIFT          24
+#define L1F_SPI_OP2_WRSR_MASK           ASHFT16(0xFFUL)
+#define L1F_SPI_OP2_WRSR_SHIFT          16
+#define L1F_SPI_OP2_RDSR_MASK           ASHFT8(0xFFUL)
+#define L1F_SPI_OP2_RDSR_SHIFT          8
+#define L1F_SPI_OP2_WREN_MASK           ASHFT0(0xFFUL)
+#define L1F_SPI_OP2_WREN_SHIFT          0
+
+#define L1F_SPI_OP3                     0x12E4
+#define L1F_SPI_OP3_WRDI_MASK           ASHFT8(0xFFUL)
+#define L1F_SPI_OP3_WRDI_SHIFT          8
+#define L1F_SPI_OP3_EWSR_MASK           ASHFT0(0xFFUL)
+#define L1F_SPI_OP3_EWSR_SHIFT          0
+
+#define L1F_EF_CTRL                     0x12D0
+#define L1F_EF_CTRL_FSTS_MASK           ASHFT20(0xFFUL)
+#define L1F_EF_CTRL_FSTS_SHIFT          20
+#define L1F_EF_CTRL_CLASS_MASK          ASHFT16(7UL)
+#define L1F_EF_CTRL_CLASS_SHIFT         16
+#define L1F_EF_CTRL_CLASS_F_UNKNOWN     0
+#define L1F_EF_CTRL_CLASS_F_STD         1
+#define L1F_EF_CTRL_CLASS_F_SST         2
+#define L1F_EF_CTRL_CLASS_E_UNKNOWN     0
+#define L1F_EF_CTRL_CLASS_E_1K          1
+#define L1F_EF_CTRL_CLASS_E_4K          2
+#define L1F_EF_CTRL_FRET                BIT(15)          /* 0:OK,1:fail */
+#define L1F_EF_CTRL_TYP_MASK            ASHFT12(3UL)
+#define L1F_EF_CTRL_TYP_SHIFT           12
+#define L1F_EF_CTRL_TYP_NONE            0
+#define L1F_EF_CTRL_TYP_F               1
+#define L1F_EF_CTRL_TYP_E               2
+#define L1F_EF_CTRL_TYP_UNKNOWN         3
+#define L1F_EF_CTRL_ONE_CLK             BIT(10)
+#define L1F_EF_CTRL_ECLK_MASK           ASHFT8(3UL)
+#define L1F_EF_CTRL_ECLK_SHIFT          8
+#define L1F_EF_CTRL_ECLK_125K           0
+#define L1F_EF_CTRL_ECLK_250K           1
+#define L1F_EF_CTRL_ECLK_500K           2
+#define L1F_EF_CTRL_ECLK_1M             3
+#define L1F_EF_CTRL_FBUSY               BIT(7)
+#define L1F_EF_CTRL_ACTION              BIT(6)           /* 1:start,0:stop */
+#define L1F_EF_CTRL_AUTO_OP             BIT(5)
+#define L1F_EF_CTRL_SST_MODE            BIT(4)           /* force using sst */
+#define L1F_EF_CTRL_INST_MASK           ASHFT0(0xFUL)
+#define L1F_EF_CTRL_INST_SHIFT          0
+#define L1F_EF_CTRL_INST_NONE           0
+#define L1F_EF_CTRL_INST_READ           1               /* for flash & eeprom */
+#define L1F_EF_CTRL_INST_RDID           2
+#define L1F_EF_CTRL_INST_RDSR           3
+#define L1F_EF_CTRL_INST_WREN           4
+#define L1F_EF_CTRL_INST_PRGRM          5
+#define L1F_EF_CTRL_INST_SE             6
+#define L1F_EF_CTRL_INST_CE             7
+#define L1F_EF_CTRL_INST_WRSR           10
+#define L1F_EF_CTRL_INST_EWSR           11
+#define L1F_EF_CTRL_INST_WRDI           12
+#define L1F_EF_CTRL_INST_WRITE          2               /* only for eeprom */
+
+#define L1F_EF_ADDR                     0x12D4
+#define L1F_EF_DATA                     0x12D8
+#define L1F_SPI_ID                      0x12DC
+
+#define L1F_SPI_CFG_START               0x12E0
+
+#define L1F_PMCTRL                      0x12F8
+#define L1F_PMCTRL_HOTRST_WTEN          BIT(31)
+#define L1F_PMCTRL_ASPM_FCEN            BIT(30)  /* L0s/L1 dis by MAC based on
+						 * thrghput(setting in 15A0) */
+#define L1F_PMCTRL_SADLY_EN             BIT(29)
+#define L1F_PMCTRL_L0S_BUFSRX_EN        BIT(28)
+#define L1F_PMCTRL_LCKDET_TIMER_MASK    ASHFT24(0xFUL)
+#define L1F_PMCTRL_LCKDET_TIMER_SHIFT   24
+#define L1F_PMCTRL_LCKDET_TIMER_DEF     0xC
+#define L1F_PMCTRL_L1REQ_TO_MASK        ASHFT20(0xFUL)
+#define L1F_PMCTRL_L1REQ_TO_SHIFT       20      /* pm_request_l1 time > @
+						 * ->L0s not L1 */
+#define L1F_PMCTRL_L1REG_TO_DEF         0xF
+#define L1F_PMCTRL_TXL1_AFTER_L0S       BIT(19)
+#define L1F_PMCTRL_L1_TIMER_MASK        ASHFT16(7UL)
+#define L1F_PMCTRL_L1_TIMER_SHIFT       16
+#define L1F_PMCTRL_L1_TIMER_DIS         0
+#define L1F_PMCTRL_L1_TIMER_2US         1
+#define L1F_PMCTRL_L1_TIMER_4US         2
+#define L1F_PMCTRL_L1_TIMER_8US         3
+#define L1F_PMCTRL_L1_TIMER_16US        4
+#define L1F_PMCTRL_L1_TIMER_24US        5
+#define L1F_PMCTRL_L1_TIMER_32US        6
+#define L1F_PMCTRL_L1_TIMER_63US        7
+#define L1F_PMCTRL_RCVR_WT_1US          BIT(15)  /* 1:1us, 0:2ms */
+#define L1F_PMCTRL_PWM_VER_11           BIT(14)  /* 0:1.0a,1:1.1 */
+#define L1F_PMCTRL_L1_CLKSW_EN          BIT(13)  /* en pcie clk sw in L1 */
+#define L1F_PMCTRL_L0S_EN               BIT(12)
+#define L1F_PMCTRL_RXL1_AFTER_L0S       BIT(11)
+#define L1F_PMCTRL_L0S_TIMER_MASK       ASHFT8(7UL)
+#define L1F_PMCTRL_L0S_TIMER_SHIFT      8
+#define L1F_PMCTRL_L1_BUFSRX_EN         BIT(7)
+#define L1F_PMCTRL_L1_SRDSRX_PWD        BIT(6)   /* power down serdes rx */
+#define L1F_PMCTRL_L1_SRDSPLL_EN        BIT(5)
+#define L1F_PMCTRL_L1_SRDS_EN           BIT(4)
+#define L1F_PMCTRL_L1_EN                BIT(3)
+#define L1F_PMCTRL_CLKREQ_EN            BIT(2)
+#define L1F_PMCTRL_RBER_EN              BIT(1)
+#define L1F_PMCTRL_SPRSDWER_EN          BIT(0)
+
+#define L1F_LTSSM_CTRL                  0x12FC
+#define L1F_LTSSM_WRO_EN                BIT(12)
+
+
+/******************************************************************************/
+
+#define L1F_MASTER                      0x1400
+#define L1F_MASTER_OTP_FLG              BIT(31)
+#define L1F_MASTER_DEV_NUM_MASK         ASHFT24(0x7FUL)
+#define L1F_MASTER_DEV_NUM_SHIFT        24
+#define L1F_MASTER_REV_NUM_MASK         ASHFT16(0xFFUL)
+#define L1F_MASTER_REV_NUM_SHIFT        16
+#define L1F_MASTER_DEASSRT              BIT(15)      /*ISSUE DE-ASSERT MSG */
+#define L1F_MASTER_RDCLR_INT            BIT(14)
+#define L1F_MASTER_DMA_RST              BIT(13)
+#define L1F_MASTER_PCLKSEL_SRDS         BIT(12)      /* 1:alwys sel pclk from
+						     * serdes, not sw to 25M */
+#define L1F_MASTER_IRQMOD2_EN           BIT(11)      /* IRQ MODURATION FOR RX */
+#define L1F_MASTER_IRQMOD1_EN           BIT(10)      /* MODURATION FOR TX/RX */
+#define L1F_MASTER_MANU_INT             BIT(9)       /* SOFT MANUAL INT */
+#define L1F_MASTER_MANUTIMER_EN         BIT(8)
+#define L1F_MASTER_SYSALVTIMER_EN       BIT(7)       /* SYS ALIVE TIMER EN */
+#define L1F_MASTER_OOB_DIS              BIT(6)       /* OUT OF BOX DIS */
+#define L1F_MASTER_WAKEN_25M            BIT(5)       /* WAKE WO. PCIE CLK */
+#define L1F_MASTER_BERT_START           BIT(4)
+#define L1F_MASTER_PCIE_TSTMOD_MASK     ASHFT2(3UL)
+#define L1F_MASTER_PCIE_TSTMOD_SHIFT    2
+#define L1F_MASTER_PCIE_RST             BIT(1)
+#define L1F_MASTER_DMA_MAC_RST          BIT(0)       /* RST MAC & DMA */
+#define L1F_DMA_MAC_RST_TO              50
+
+#define L1F_MANU_TIMER                  0x1404
+
+#define L1F_IRQ_MODU_TIMER              0x1408
+#define L1F_IRQ_MODU_TIMER2_MASK        ASHFT16(0xFFFFUL)
+#define L1F_IRQ_MODU_TIMER2_SHIFT       16          /* ONLY FOR RX */
+#define L1F_IRQ_MODU_TIMER1_MASK        ASHFT0(0xFFFFUL)
+#define L1F_IRQ_MODU_TIMER1_SHIFT       0
+
+#define L1F_PHY_CTRL                    0x140C
+#define L1F_PHY_CTRL_ADDR_MASK          ASHFT19(0x1FUL)
+#define L1F_PHY_CTRL_ADDR_SHIFT         19
+#define L1F_PHY_CTRL_BP_VLTGSW          BIT(18)
+#define L1F_PHY_CTRL_100AB_EN           BIT(17)
+#define L1F_PHY_CTRL_10AB_EN            BIT(16)
+#define L1F_PHY_CTRL_PLL_BYPASS         BIT(15)
+#define L1F_PHY_CTRL_POWER_DOWN         BIT(14)      /* affect MAC & PHY,
+						     * go to low power sts */
+#define L1F_PHY_CTRL_PLL_ON             BIT(13)      /* 1:PLL ALWAYS ON
+						     * 0:CAN SWITCH IN LPW */
+#define L1F_PHY_CTRL_RST_ANALOG         BIT(12)
+#define L1F_PHY_CTRL_HIB_PULSE          BIT(11)
+#define L1F_PHY_CTRL_HIB_EN             BIT(10)
+#define L1F_PHY_CTRL_GIGA_DIS           BIT(9)
+#define L1F_PHY_CTRL_IDDQ_DIS           BIT(8)       /* POWER ON RST */
+#define L1F_PHY_CTRL_IDDQ               BIT(7)       /* WHILE REBOOT, BIT8(1)
+						     * EFFECTS BIT7 */
+#define L1F_PHY_CTRL_LPW_EXIT           BIT(6)
+#define L1F_PHY_CTRL_GATE_25M           BIT(5)
+#define L1F_PHY_CTRL_RVRS_ANEG          BIT(4)
+#define L1F_PHY_CTRL_ANEG_NOW           BIT(3)
+#define L1F_PHY_CTRL_LED_MODE           BIT(2)
+#define L1F_PHY_CTRL_RTL_MODE           BIT(1)
+#define L1F_PHY_CTRL_DSPRST_OUT         BIT(0)       /* OUT OF DSP RST STATE */
+#define L1F_PHY_CTRL_DSPRST_TO          80
+#define L1F_PHY_CTRL_CLS                (\
+	L1F_PHY_CTRL_LED_MODE           |\
+	L1F_PHY_CTRL_100AB_EN           |\
+	L1F_PHY_CTRL_PLL_ON)
+
+#define L1F_MAC_STS                     0x1410
+#define L1F_MAC_STS_SFORCE_MASK         ASHFT14(0xFUL)
+#define L1F_MAC_STS_SFORCE_SHIFT        14
+#define L1F_MAC_STS_CALIB_DONE          BIT13
+#define L1F_MAC_STS_CALIB_RES_MASK      ASHFT8(0x1FUL)
+#define L1F_MAC_STS_CALIB_RES_SHIFT     8
+#define L1F_MAC_STS_CALIBERR_MASK       ASHFT4(0xFUL)
+#define L1F_MAC_STS_CALIBERR_SHIFT      4
+#define L1F_MAC_STS_TXQ_BUSY            BIT(3)
+#define L1F_MAC_STS_RXQ_BUSY            BIT(2)
+#define L1F_MAC_STS_TXMAC_BUSY          BIT(1)
+#define L1F_MAC_STS_RXMAC_BUSY          BIT(0)
+#define L1F_MAC_STS_IDLE                (\
+	L1F_MAC_STS_TXQ_BUSY            |\
+	L1F_MAC_STS_RXQ_BUSY            |\
+	L1F_MAC_STS_TXMAC_BUSY          |\
+	L1F_MAC_STS_RXMAC_BUSY)
+
+#define L1F_MDIO                        0x1414
+#define L1F_MDIO_MODE_EXT               BIT(30)      /* 0:normal,1:ext */
+#define L1F_MDIO_POST_READ              BIT(29)
+#define L1F_MDIO_AUTO_POLLING           BIT(28)
+#define L1F_MDIO_BUSY                   BIT(27)
+#define L1F_MDIO_CLK_SEL_MASK           ASHFT24(7UL)
+#define L1F_MDIO_CLK_SEL_SHIFT          24
+#define L1F_MDIO_CLK_SEL_25MD4          0           /* 25M DIV 4 */
+#define L1F_MDIO_CLK_SEL_25MD6          2
+#define L1F_MDIO_CLK_SEL_25MD8          3
+#define L1F_MDIO_CLK_SEL_25MD10         4
+#define L1F_MDIO_CLK_SEL_25MD32         5
+#define L1F_MDIO_CLK_SEL_25MD64         6
+#define L1F_MDIO_CLK_SEL_25MD128        7
+#define L1F_MDIO_START                  BIT(23)
+#define L1F_MDIO_SPRES_PRMBL            BIT(22)
+#define L1F_MDIO_OP_READ                BIT(21)      /* 1:read,0:write */
+#define L1F_MDIO_REG_MASK               ASHFT16(0x1FUL)
+#define L1F_MDIO_REG_SHIFT              16
+#define L1F_MDIO_DATA_MASK              ASHFT0(0xFFFFUL)
+#define L1F_MDIO_DATA_SHIFT             0
+#define L1F_MDIO_MAX_AC_TO              120
+
+#define L1F_MDIO_EXTN                   0x1448
+#define L1F_MDIO_EXTN_PORTAD_MASK       ASHFT21(0x1FUL)
+#define L1F_MDIO_EXTN_PORTAD_SHIFT      21
+#define L1F_MDIO_EXTN_DEVAD_MASK        ASHFT16(0x1FUL)
+#define L1F_MDIO_EXTN_DEVAD_SHIFT       16
+#define L1F_MDIO_EXTN_REG_MASK          ASHFT0(0xFFFFUL)
+#define L1F_MDIO_EXTN_REG_SHIFT         0
+
+#define L1F_PHY_STS                     0x1418
+#define L1F_PHY_STS_LPW                 BIT(31)
+#define L1F_PHY_STS_LPI                 BIT(30)
+#define L1F_PHY_STS_PWON_STRIP_MASK     ASHFT16(0xFFFUL)
+#define L1F_PHY_STS_PWON_STRIP_SHIFT    16
+
+#define L1F_PHY_STS_DUPLEX              BIT(3)
+#define L1F_PHY_STS_LINKUP              BIT(2)
+#define L1F_PHY_STS_SPEED_MASK          ASHFT0(3UL)
+#define L1F_PHY_STS_SPEED_SHIFT         0
+#define L1F_PHY_STS_SPEED_1000M         2
+#define L1F_PHY_STS_SPEED_100M          1
+#define L1F_PHY_STS_SPEED_10M           0
+
+#define L1F_BIST0                       0x141C
+#define L1F_BIST0_COL_MASK              ASHFT24(0x3FUL)
+#define L1F_BIST0_COL_SHIFT             24
+#define L1F_BIST0_ROW_MASK              ASHFT12(0xFFFUL)
+#define L1F_BIST0_ROW_SHIFT             12
+#define L1F_BIST0_STEP_MASK             ASHFT8(0xFUL)
+#define L1F_BIST0_STEP_SHIFT            8
+#define L1F_BIST0_PATTERN_MASK          ASHFT4(7UL)
+#define L1F_BIST0_PATTERN_SHIFT         4
+#define L1F_BIST0_CRIT                  BIT(3)
+#define L1F_BIST0_FIXED                 BIT(2)
+#define L1F_BIST0_FAIL                  BIT(1)
+#define L1F_BIST0_START                 BIT(0)
+
+#define L1F_BIST1                       0x1420
+#define L1F_BIST1_COL_MASK              ASHFT24(0x3FUL)
+#define L1F_BIST1_COL_SHIFT             24
+#define L1F_BIST1_ROW_MASK              ASHFT12(0xFFFUL)
+#define L1F_BIST1_ROW_SHIFT             12
+#define L1F_BIST1_STEP_MASK             ASHFT8(0xFUL)
+#define L1F_BIST1_STEP_SHIFT            8
+#define L1F_BIST1_PATTERN_MASK          ASHFT4(7UL)
+#define L1F_BIST1_PATTERN_SHIFT         4
+#define L1F_BIST1_CRIT                  BIT(3)
+#define L1F_BIST1_FIXED                 BIT(2)
+#define L1F_BIST1_FAIL                  BIT(1)
+#define L1F_BIST1_START                 BIT(0)
+
+#define L1F_SERDES                      0x1424
+#define L1F_SERDES_PHYCLK_SLWDWN        BIT(18)
+#define L1F_SERDES_MACCLK_SLWDWN        BIT(17)
+#define L1F_SERDES_SELFB_PLL_MASK       ASHFT14(3UL)
+#define L1F_SERDES_SELFB_PLL_SHIFT      14
+#define L1F_SERDES_PHYCLK_SEL_GTX       BIT(13)          /* 1:gtx_clk, 0:25M */
+#define L1F_SERDES_PCIECLK_SEL_SRDS     BIT(12)          /* 1:serdes,0:25M */
+#define L1F_SERDES_BUFS_RX_EN           BIT(11)
+#define L1F_SERDES_PD_RX                BIT(10)
+#define L1F_SERDES_PLL_EN               BIT(9)
+#define L1F_SERDES_EN                   BIT(8)
+#define L1F_SERDES_SELFB_PLL_SEL_CSR    BIT(6)       /* 0:state-machine,1:csr */
+#define L1F_SERDES_SELFB_PLL_CSR_MASK   ASHFT4(3UL)
+#define L1F_SERDES_SELFB_PLL_CSR_SHIFT  4
+#define L1F_SERDES_SELFB_PLL_CSR_4      3           /* 4-12% OV-CLK */
+#define L1F_SERDES_SELFB_PLL_CSR_0      2           /* 0-4% OV-CLK */
+#define L1F_SERDES_SELFB_PLL_CSR_12     1           /* 12-18% OV-CLK */
+#define L1F_SERDES_SELFB_PLL_CSR_18     0           /* 18-25% OV-CLK */
+#define L1F_SERDES_VCO_SLOW             BIT(3)
+#define L1F_SERDES_VCO_FAST             BIT(2)
+#define L1F_SERDES_LOCKDCT_EN           BIT(1)
+#define L1F_SERDES_LOCKDCTED            BIT(0)
+
+#define L1F_LED_CTRL                    0x1428
+#define L1F_LED_CTRL_PATMAP2_MASK       ASHFT8(3UL)
+#define L1F_LED_CTRL_PATMAP2_SHIFT      8
+#define L1F_LED_CTRL_PATMAP1_MASK       ASHFT6(3UL)
+#define L1F_LED_CTRL_PATMAP1_SHIFT      6
+#define L1F_LED_CTRL_PATMAP0_MASK       ASHFT4(3UL)
+#define L1F_LED_CTRL_PATMAP0_SHIFT      4
+#define L1F_LED_CTRL_D3_MODE_MASK       ASHFT2(3UL)
+#define L1F_LED_CTRL_D3_MODE_SHIFT      2
+#define L1F_LED_CTRL_D3_MODE_NORMAL     0
+#define L1F_LED_CTRL_D3_MODE_WOL_DIS    1
+#define L1F_LED_CTRL_D3_MODE_WOL_ANY    2
+#define L1F_LED_CTRL_D3_MODE_WOL_EN     3
+#define L1F_LED_CTRL_DUTY_CYCL_MASK     ASHFT0(3UL)
+#define L1F_LED_CTRL_DUTY_CYCL_SHIFT    0
+#define L1F_LED_CTRL_DUTY_CYCL_50       0           /* 50% */
+#define L1F_LED_CTRL_DUTY_CYCL_125      1           /* 12.5% */
+#define L1F_LED_CTRL_DUTY_CYCL_25       2           /* 25% */
+#define L1F_LED_CTRL_DUTY_CYCL_75       3           /* 75% */
+
+#define L1F_LED_PATN                    0x142C
+#define L1F_LED_PATN1_MASK              ASHFT16(0xFFFFUL)
+#define L1F_LED_PATN1_SHIFT             16
+#define L1F_LED_PATN0_MASK              ASHFT0(0xFFFFUL)
+#define L1F_LED_PATN0_SHIFT             0
+
+#define L1F_LED_PATN2                   0x1430
+#define L1F_LED_PATN2_MASK              ASHFT0(0xFFFFUL)
+#define L1F_LED_PATN2_SHIFT             0
+
+#define L1F_SYSALV                      0x1434
+#define L1F_SYSALV_FLAG                 BIT(0)
+
+#define L1F_PCIERR_INST                 0x1438
+#define L1F_PCIERR_INST_TX_RATE_MASK    ASHFT4(0xFUL)
+#define L1F_PCIERR_INST_TX_RATE_SHIFT   4
+#define L1F_PCIERR_INST_RX_RATE_MASK    ASHFT0(0xFUL)
+#define L1F_PCIERR_INST_RX_RATE_SHIFT   0
+
+#define L1F_LPI_DECISN_TIMER            0x143C
+
+#define L1F_LPI_CTRL                    0x1440
+#define L1F_LPI_CTRL_CHK_DA             BIT(31)
+#define L1F_LPI_CTRL_ENH_TO_MASK        ASHFT12(0x1FFFUL)
+#define L1F_LPI_CTRL_ENH_TO_SHIFT       12
+#define L1F_LPI_CTRL_ENH_TH_MASK        ASHFT6(0x1FUL)
+#define L1F_LPI_CTRL_ENH_TH_SHIFT       6
+#define L1F_LPI_CTRL_ENH_EN             BIT(5)
+#define L1F_LPI_CTRL_CHK_RX             BIT(4)
+#define L1F_LPI_CTRL_CHK_STATE          BIT(3)
+#define L1F_LPI_CTRL_GMII               BIT(2)
+#define L1F_LPI_CTRL_TO_PHY             BIT(1)
+#define L1F_LPI_CTRL_EN                 BIT(0)
+
+#define L1F_LPI_WAIT                    0x1444
+#define L1F_LPI_WAIT_TIMER_MASK         ASHFT0(0xFFFFUL)
+#define L1F_LPI_WAIT_TIMER_SHIFT        0
+
+#define L1F_HRTBT_VLAN                  0x1450      /* HEARTBEAT, FOR CIFS */
+#define L1F_HRTBT_VLANID_MASK           ASHFT0(0xFFFFUL) /* OR CLOUD */
+#define L1F_HRRBT_VLANID_SHIFT          0
+
+#define L1F_HRTBT_CTRL                  0x1454
+#define L1F_HRTBT_CTRL_EN               BIT(31)
+#define L1F_HRTBT_CTRL_PERIOD_MASK      ASHFT25(0x3FUL)
+#define L1F_HRTBT_CTRL_PERIOD_SHIFT     25
+#define L1F_HRTBT_CTRL_HASVLAN          BIT(24)
+#define L1F_HRTBT_CTRL_HDRADDR_MASK     ASHFT12(0xFFFUL)    /* A0 */
+#define L1F_HRTBT_CTRL_HDRADDR_SHIFT    12
+#define L1F_HRTBT_CTRL_HDRADDRB0_MASK   ASHFT13(0x7FFUL)    /* B0 */
+#define L1F_HRTBT_CTRL_HDRADDRB0_SHIFT  13
+#define L1F_HRTBT_CTRL_PKT_FRAG         BIT(12)              /* B0 */
+#define L1F_HRTBT_CTRL_PKTLEN_MASK      ASHFT0(0xFFFUL)
+#define L1F_HRTBT_CTRL_PKTLEN_SHIFT     0
+
+#define L1F_HRTBT_EXT_CTRL                  0x1AD0      /* B0 */
+#define L1F_HRTBT_EXT_CTRL_NS_EN            BIT(12)
+#define L1F_HRTBT_EXT_CTRL_FRAG_LEN_MASK    ASHFT4(0xFFUL)
+#define L1F_HRTBT_EXT_CTRL_FRAG_LEN_SHIFT   4
+#define L1F_HRTBT_EXT_CTRL_IS_8023          BIT(3)
+#define L1F_HRTBT_EXT_CTRL_IS_IPV6          BIT(2)
+#define L1F_HRTBT_EXT_CTRL_WAKEUP_EN        BIT(1)
+#define L1F_HRTBT_EXT_CTRL_ARP_EN           BIT(0)
+
+#define L1F_HRTBT_REM_IPV4_ADDR             0x1AD4
+#define L1F_HRTBT_HOST_IPV4_ADDR            0x1478/*use L1F_TRD_BUBBLE_DA_IP4*/
+#define L1F_HRTBT_REM_IPV6_ADDR3            0x1AD8
+#define L1F_HRTBT_REM_IPV6_ADDR2            0x1ADC
+#define L1F_HRTBT_REM_IPV6_ADDR1            0x1AE0
+#define L1F_HRTBT_REM_IPV6_ADDR0            0x1AE4
+/*SWOI_HOST_IPV6_ADDR reuse reg1a60-1a6c, 1a70-1a7c, 1aa0-1aac, 1ab0-1abc.*/
+#define L1F_HRTBT_WAKEUP_PORT               0x1AE8
+#define L1F_HRTBT_WAKEUP_PORT_SRC_MASK      ASHFT16(0xFFFFUL)
+#define L1F_HRTBT_WAKEUP_PORT_SRC_SHIFT     16
+#define L1F_HRTBT_WAKEUP_PORT_DEST_MASK     ASHFT0(0xFFFFUL)
+#define L1F_HRTBT_WAKEUP_PORT_DEST_SHIFT    0
+
+#define L1F_HRTBT_WAKEUP_DATA7              0x1AEC
+#define L1F_HRTBT_WAKEUP_DATA6              0x1AF0
+#define L1F_HRTBT_WAKEUP_DATA5              0x1AF4
+#define L1F_HRTBT_WAKEUP_DATA4              0x1AF8
+#define L1F_HRTBT_WAKEUP_DATA3              0x1AFC
+#define L1F_HRTBT_WAKEUP_DATA2              0x1B80
+#define L1F_HRTBT_WAKEUP_DATA1              0x1B84
+#define L1F_HRTBT_WAKEUP_DATA0              0x1B88
+
+#define L1F_RXPARSE                     0x1458
+#define L1F_RXPARSE_FLT6_L4_MASK        ASHFT30(3UL)
+#define L1F_RXPARSE_FLT6_L4_SHIFT       30
+#define L1F_RXPARSE_FLT6_L3_MASK        ASHFT28(3UL)
+#define L1F_RXPARSE_FLT6_L3_SHIFT       28
+#define L1F_RXPARSE_FLT5_L4_MASK        ASHFT26(3UL)
+#define L1F_RXPARSE_FLT5_L4_SHIFT       26
+#define L1F_RXPARSE_FLT5_L3_MASK        ASHFT24(3UL)
+#define L1F_RXPARSE_FLT5_L3_SHIFT       24
+#define L1F_RXPARSE_FLT4_L4_MASK        ASHFT22(3UL)
+#define L1F_RXPARSE_FLT4_L4_SHIFT       22
+#define L1F_RXPARSE_FLT4_L3_MASK        ASHFT20(3UL)
+#define L1F_RXPARSE_FLT4_L3_SHIFT       20
+#define L1F_RXPARSE_FLT3_L4_MASK        ASHFT18(3UL)
+#define L1F_RXPARSE_FLT3_L4_SHIFT       18
+#define L1F_RXPARSE_FLT3_L3_MASK        ASHFT16(3UL)
+#define L1F_RXPARSE_FLT3_L3_SHIFT       16
+#define L1F_RXPARSE_FLT2_L4_MASK        ASHFT14(3UL)
+#define L1F_RXPARSE_FLT2_L4_SHIFT       14
+#define L1F_RXPARSE_FLT2_L3_MASK        ASHFT12(3UL)
+#define L1F_RXPARSE_FLT2_L3_SHIFT       12
+#define L1F_RXPARSE_FLT1_L4_MASK        ASHFT10(3UL)
+#define L1F_RXPARSE_FLT1_L4_SHIFT       10
+#define L1F_RXPARSE_FLT1_L3_MASK        ASHFT8(3UL)
+#define L1F_RXPARSE_FLT1_L3_SHIFT       8
+#define L1F_RXPARSE_FLT6_EN             BIT(5)
+#define L1F_RXPARSE_FLT5_EN             BIT(4)
+#define L1F_RXPARSE_FLT4_EN             BIT(3)
+#define L1F_RXPARSE_FLT3_EN             BIT(2)
+#define L1F_RXPARSE_FLT2_EN             BIT(1)
+#define L1F_RXPARSE_FLT1_EN             BIT(0)
+#define L1F_RXPARSE_FLT_L4_UDP          0
+#define L1F_RXPARSE_FLT_L4_TCP          1
+#define L1F_RXPARSE_FLT_L4_BOTH         2
+#define L1F_RXPARSE_FLT_L4_NONE         3
+#define L1F_RXPARSE_FLT_L3_IPV6         0
+#define L1F_RXPARSE_FLT_L3_IPV4         1
+#define L1F_RXPARSE_FLT_L3_BOTH         2
+
+/* Terodo support */
+#define L1F_TRD_CTRL                    0x145C
+#define L1F_TRD_CTRL_EN                 BIT(31)
+#define L1F_TRD_CTRL_BUBBLE_WAKE_EN     BIT(30)
+#define L1F_TRD_CTRL_PREFIX_CMP_HW      BIT(28)
+#define L1F_TRD_CTRL_RSHDR_ADDR_MASK    ASHFT16(0xFFFUL)
+#define L1F_TRD_CTRL_RSHDR_ADDR_SHIFT   16
+#define L1F_TRD_CTRL_SINTV_MAX_MASK     ASHFT8(0xFFUL)
+#define L1F_TRD_CTRL_SINTV_MAX_SHIFT    8
+#define L1F_TRD_CTRL_SINTV_MIN_MASK     ASHFT0(0xFFUL)
+#define L1F_TRD_CTRL_SINTV_MIN_SHIFT    0
+
+#define L1F_TRD_RS                      0x1460
+#define L1F_TRD_RS_SZ_MASK              ASHFT20(0xFFFUL)
+#define L1F_TRD_RS_SZ_SHIFT             20
+#define L1F_TRD_RS_NONCE_OFS_MASK       ASHFT8(0xFFFUL)
+#define L1F_TRD_RS_NONCE_OFS_SHIFT      8
+#define L1F_TRD_RS_SEQ_OFS_MASK         ASHFT0(0xFFUL)
+#define L1F_TRD_RS_SEQ_OFS_SHIFT        0
+
+#define L1F_TRD_SRV_IP4                 0x1464
+
+#define L1F_TRD_CLNT_EXTNL_IP4          0x1468
+
+#define L1F_TRD_PORT                    0x146C
+#define L1F_TRD_PORT_CLNT_EXTNL_MASK    ASHFT16(0xFFFFUL)
+#define L1F_TRD_PORT_CLNT_EXTNL_SHIFT   16
+#define L1F_TRD_PORT_SRV_MASK           ASHFT0(0xFFFFUL)
+#define L1F_TRD_PORT_SRV_SHIFT          0
+
+#define L1F_TRD_PREFIX                  0x1470
+
+#define L1F_TRD_BUBBLE_DA_IP4           0x1478
+
+#define L1F_TRD_BUBBLE_DA_PORT          0x147C
+
+
+#define L1F_IDLE_DECISN_TIMER           0x1474  /* B0 */
+#define L1F_IDLE_DECISN_TIMER_DEF       0x400   /* 1ms */
+
+
+#define L1F_MAC_CTRL                    0x1480
+#define L1F_MAC_CTRL_FAST_PAUSE         BIT(31)
+#define L1F_MAC_CTRL_WOLSPED_SWEN       BIT(30)
+#define L1F_MAC_CTRL_MHASH_ALG_HI5B     BIT(29)  /* 1:legacy, 0:marvl(low5b)*/
+#define L1F_MAC_CTRL_SPAUSE_EN          BIT(28)
+#define L1F_MAC_CTRL_DBG_EN             BIT(27)
+#define L1F_MAC_CTRL_BRD_EN             BIT(26)
+#define L1F_MAC_CTRL_MULTIALL_EN        BIT(25)
+#define L1F_MAC_CTRL_RX_XSUM_EN         BIT(24)
+#define L1F_MAC_CTRL_THUGE              BIT(23)
+#define L1F_MAC_CTRL_MBOF               BIT(22)
+#define L1F_MAC_CTRL_SPEED_MASK         ASHFT20(3UL)
+#define L1F_MAC_CTRL_SPEED_SHIFT        20
+#define L1F_MAC_CTRL_SPEED_10_100       1
+#define L1F_MAC_CTRL_SPEED_1000         2
+#define L1F_MAC_CTRL_SIMR               BIT(19)
+#define L1F_MAC_CTRL_SSTCT              BIT(17)
+#define L1F_MAC_CTRL_TPAUSE             BIT(16)
+#define L1F_MAC_CTRL_PROMISC_EN         BIT(15)
+#define L1F_MAC_CTRL_VLANSTRIP          BIT(14)
+#define L1F_MAC_CTRL_PRMBLEN_MASK       ASHFT10(0xFUL)
+#define L1F_MAC_CTRL_PRMBLEN_SHIFT      10
+#define L1F_MAC_CTRL_RHUGE_EN           BIT(9)
+#define L1F_MAC_CTRL_FLCHK              BIT(8)
+#define L1F_MAC_CTRL_PCRCE              BIT(7)
+#define L1F_MAC_CTRL_CRCE               BIT(6)
+#define L1F_MAC_CTRL_FULLD              BIT(5)
+#define L1F_MAC_CTRL_LPBACK_EN          BIT(4)
+#define L1F_MAC_CTRL_RXFC_EN            BIT(3)
+#define L1F_MAC_CTRL_TXFC_EN            BIT(2)
+#define L1F_MAC_CTRL_RX_EN              BIT(1)
+#define L1F_MAC_CTRL_TX_EN              BIT(0)
+
+#define L1F_GAP                         0x1484
+#define L1F_GAP_IPGR2_MASK              ASHFT24(0x7FUL)
+#define L1F_GAP_IPGR2_SHIFT             24
+#define L1F_GAP_IPGR1_MASK              ASHFT16(0x7FUL)
+#define L1F_GAP_IPGR1_SHIFT             16
+#define L1F_GAP_MIN_IFG_MASK            ASHFT8(0xFFUL)
+#define L1F_GAP_MIN_IFG_SHIFT           8
+#define L1F_GAP_IPGT_MASK               ASHFT0(0x7FUL)  /* A0 diff with B0 */
+#define L1F_GAP_IPGT_SHIFT              0
+
+#define L1F_STAD0                       0x1488
+#define L1F_STAD1                       0x148C
+
+#define L1F_HASH_TBL0                   0x1490
+#define L1F_HASH_TBL1                   0x1494
+
+#define L1F_HALFD                       0x1498
+#define L1F_HALFD_JAM_IPG_MASK          ASHFT24(0xFUL)
+#define L1F_HALFD_JAM_IPG_SHIFT         24
+#define L1F_HALFD_ABEBT_MASK            ASHFT20(0xFUL)
+#define L1F_HALFD_ABEBT_SHIFT           20
+#define L1F_HALFD_ABEBE                 BIT(19)
+#define L1F_HALFD_BPNB                  BIT(18)
+#define L1F_HALFD_NOBO                  BIT(17)
+#define L1F_HALFD_EDXSDFR               BIT(16)
+#define L1F_HALFD_RETRY_MASK            ASHFT12(0xFUL)
+#define L1F_HALFD_RETRY_SHIFT           12
+#define L1F_HALFD_LCOL_MASK             ASHFT0(0x3FFUL)
+#define L1F_HALFD_LCOL_SHIFT            0
+
+#define L1F_MTU                         0x149C
+#define L1F_MTU_JUMBO_TH                1514
+#define L1F_MTU_STD_ALGN                1536
+#define L1F_MTU_MIN                     64
+
+#define L1F_SRAM0                       0x1500
+#define L1F_SRAM_RFD_TAIL_ADDR_MASK     ASHFT16(0xFFFUL)
+#define L1F_SRAM_RFD_TAIL_ADDR_SHIFT    16
+#define L1F_SRAM_RFD_HEAD_ADDR_MASK     ASHFT0(0xFFFUL)
+#define L1F_SRAM_RFD_HEAD_ADDR_SHIFT    0
+
+#define L1F_SRAM1                       0x1510
+#define L1F_SRAM_RFD_LEN_MASK           ASHFT0(0xFFFUL) /* 8BYTES UNIT */
+#define L1F_SRAM_RFD_LEN_SHIFT          0
+
+#define L1F_SRAM2                       0x1518
+#define L1F_SRAM_TRD_TAIL_ADDR_MASK     ASHFT16(0xFFFUL)
+#define L1F_SRAM_TRD_TAIL_ADDR_SHIFT    16
+#define L1F_SRMA_TRD_HEAD_ADDR_MASK     ASHFT0(0xFFFUL)
+#define L1F_SRAM_TRD_HEAD_ADDR_SHIFT    0
+
+#define L1F_SRAM3                       0x151C
+#define L1F_SRAM_TRD_LEN_MASK           ASHFT0(0xFFFUL) /* 8BYTES UNIT */
+#define L1F_SRAM_TRD_LEN_SHIFT          0
+
+#define L1F_SRAM4                       0x1520
+#define L1F_SRAM_RXF_TAIL_ADDR_MASK     ASHFT16(0xFFFUL)
+#define L1F_SRAM_RXF_TAIL_ADDR_SHIFT    16
+#define L1F_SRAM_RXF_HEAD_ADDR_MASK     ASHFT0(0xFFFUL)
+#define L1F_SRAM_RXF_HEAD_ADDR_SHIFT    0
+
+#define L1F_SRAM5                       0x1524
+#define L1F_SRAM_RXF_LEN_MASK           ASHFT0(0xFFFUL) /* 8BYTES UNIT */
+#define L1F_SRAM_RXF_LEN_SHIFT          0
+#define L1F_SRAM_RXF_LEN_8K             (8*1024)
+
+#define L1F_SRAM6                       0x1528
+#define L1F_SRAM_TXF_TAIL_ADDR_MASK     ASHFT16(0xFFFUL)
+#define L1F_SRAM_TXF_TAIL_ADDR_SHIFT    16
+#define L1F_SRAM_TXF_HEAD_ADDR_MASK     ASHFT0(0xFFFUL)
+#define L1F_SRAM_TXF_HEAD_ADDR_SHIFT    0
+
+#define L1F_SRAM7                       0x152C
+#define L1F_SRAM_TXF_LEN_MASK           ASHFT0(0xFFFUL) /* 8BYTES UNIT */
+#define L1F_SRAM_TXF_LEN_SHIFT          0
+
+#define L1F_SRAM8                       0x1530
+#define L1F_SRAM_PATTERN_ADDR_MASK      ASHFT16(0xFFFUL)
+#define L1F_SRAM_PATTERN_ADDR_SHIFT     16
+#define L1F_SRAM_TSO_ADDR_MASK          ASHFT0(0xFFFUL)
+#define L1F_SRAM_TSO_ADDR_SHIFT         0
+
+#define L1F_SRAM9                       0x1534
+#define L1F_SRAM_LOAD_PTR               BIT(0)
+
+#define L1F_RX_BASE_ADDR_HI             0x1540
+
+#define L1F_TX_BASE_ADDR_HI             0x1544
+
+#define L1F_RFD_ADDR_LO                 0x1550
+#define L1F_RFD_RING_SZ                 0x1560
+#define L1F_RFD_BUF_SZ                  0x1564
+#define L1F_RFD_BUF_SZ_MASK             ASHFT0(0xFFFFUL)
+#define L1F_RFD_BUF_SZ_SHIFT            0
+
+#define L1F_RRD_ADDR_LO                 0x1568
+#define L1F_RRD_RING_SZ                 0x1578
+#define L1F_RRD_RING_SZ_MASK            ASHFT0(0xFFFUL)
+#define L1F_RRD_RING_SZ_SHIFT           0
+
+#define L1F_TPD_PRI3_ADDR_LO            0x14E4      /* HIGHEST PRIORITY */
+#define L1F_TPD_PRI2_ADDR_LO            0x14E0
+#define L1F_TPD_PRI1_ADDR_LO            0x157C
+#define L1F_TPD_PRI0_ADDR_LO            0x1580      /* LOWEST PRORITY */
+
+#define L1F_TPD_PRI3_PIDX               0x1618      /* 16BIT */
+#define L1F_TPD_PRI2_PIDX               0x161A      /* 16BIT */
+#define L1F_TPD_PRI1_PIDX               0x15F0      /* 16BIT */
+#define L1F_TPD_PRI0_PIDX               0x15F2      /* 16BIT */
+
+#define L1F_TPD_PRI3_CIDX               0x161C      /* 16BIT */
+#define L1F_TPD_PRI2_CIDX               0x161E      /* 16BIT */
+#define L1F_TPD_PRI1_CIDX               0x15F4      /* 16BIT */
+#define L1F_TPD_PRI0_CIDX               0x15F6      /* 16BIT */
+
+#define L1F_TPD_RING_SZ                 0x1584
+#define L1F_TPD_RING_SZ_MASK            ASHFT0(0xFFFFUL)
+#define L1F_TPD_RING_SZ_SHIFT           0
+
+#define L1F_CMB_ADDR_LO                 0x1588      /* NOT USED */
+
+#define L1F_TXQ0                        0x1590
+#define L1F_TXQ0_TXF_BURST_PREF_MASK    ASHFT16(0xFFFFUL)
+#define L1F_TXQ0_TXF_BURST_PREF_SHIFT   16
+#define L1F_TXQ_TXF_BURST_PREF_DEF      0x200
+#define L1F_TXQ0_PEDING_CLR             BIT(8)
+#define L1F_TXQ0_LSO_8023_EN            BIT(7)
+#define L1F_TXQ0_MODE_ENHANCE           BIT(6)
+#define L1F_TXQ0_EN                     BIT(5)
+#define L1F_TXQ0_SUPT_IPOPT             BIT(4)
+#define L1F_TXQ0_TPD_BURSTPREF_MASK     ASHFT0(0xFUL)
+#define L1F_TXQ0_TPD_BURSTPREF_SHIFT    0
+#define L1F_TXQ_TPD_BURSTPREF_DEF       5
+
+#define L1F_TXQ1                        0x1594
+#define L1F_TXQ1_ERRLGPKT_DROP_EN       BIT(11)          /* drop error large
+							 * (>rfd buf) packet */
+#define L1F_TXQ1_JUMBO_TSOTHR_MASK      ASHFT0(0x7FFUL) /* 8BYTES UNIT */
+#define L1F_TXQ1_JUMBO_TSOTHR_SHIFT     0
+#define L1F_TXQ1_JUMBO_TSO_TH           (7*1024)    /* byte */
+
+#define L1F_TXQ2                        0x1598          /* ENTER L1 CONTROL */
+#define L1F_TXQ2_BURST_EN               BIT(31)
+#define L1F_TXQ2_BURST_HI_WM_MASK       ASHFT16(0xFFFUL)
+#define L1F_TXQ2_BURST_HI_WM_SHIFT      16
+#define L1F_TXQ2_BURST_LO_WM_MASK       ASHFT0(0xFFFUL)
+#define L1F_TXQ2_BURST_LO_WM_SHIFT      0
+
+#define L1F_RXQ0                        0x15A0
+#define L1F_RXQ0_EN                     BIT(31)
+#define L1F_RXQ0_CUT_THRU_EN            BIT(30)
+#define L1F_RXQ0_RSS_HASH_EN            BIT(29)
+#define L1F_RXQ0_NON_IP_QTBL            BIT(28)  /* 0:q0,1:table */
+#define L1F_RXQ0_RSS_MODE_MASK          ASHFT26(3UL)
+#define L1F_RXQ0_RSS_MODE_SHIFT         26
+#define L1F_RXQ0_RSS_MODE_DIS           0
+#define L1F_RXQ0_RSS_MODE_SQSI          1
+#define L1F_RXQ0_RSS_MODE_MQSI          2
+#define L1F_RXQ0_RSS_MODE_MQMI          3
+#define L1F_RXQ0_NUM_RFD_PREF_MASK      ASHFT20(0x3FUL)
+#define L1F_RXQ0_NUM_RFD_PREF_SHIFT     20
+#define L1F_RXQ0_NUM_RFD_PREF_DEF       8
+#define L1F_RXQ0_IDT_TBL_SIZE_MASK      ASHFT8(0x1FFUL)
+#define L1F_RXQ0_IDT_TBL_SIZE_SHIFT     8
+#define L1F_RXQ0_IDT_TBL_SIZE_DEF       0x100
+#define L1F_RXQ0_IPV6_PARSE_EN          BIT(7)
+#define L1F_RXQ0_RSS_HSTYP_IPV6_TCP_EN  BIT(5)
+#define L1F_RXQ0_RSS_HSTYP_IPV6_EN      BIT(4)
+#define L1F_RXQ0_RSS_HSTYP_IPV4_TCP_EN  BIT(3)
+#define L1F_RXQ0_RSS_HSTYP_IPV4_EN      BIT(2)
+#define L1F_RXQ0_RSS_HSTYP_ALL          (\
+	L1F_RXQ0_RSS_HSTYP_IPV6_TCP_EN  |\
+	L1F_RXQ0_RSS_HSTYP_IPV4_TCP_EN  |\
+	L1F_RXQ0_RSS_HSTYP_IPV6_EN      |\
+	L1F_RXQ0_RSS_HSTYP_IPV4_EN)
+#define L1F_RXQ0_ASPM_THRESH_MASK       ASHFT0(3UL)
+#define L1F_RXQ0_ASPM_THRESH_SHIFT      0
+#define L1F_RXQ0_ASPM_THRESH_NO         0
+#define L1F_RXQ0_ASPM_THRESH_1M         1
+#define L1F_RXQ0_ASPM_THRESH_10M        2
+#define L1F_RXQ0_ASPM_THRESH_100M       3
+
+#define L1F_RXQ1                        0x15A4
+#define L1F_RXQ1_JUMBO_LKAH_MASK        ASHFT12(0xFUL)      /* 32BYTES UNIT */
+#define L1F_RXQ1_JUMBO_LKAH_SHIFT       12
+#define L1F_RXQ1_RFD_PREF_DOWN_MASK     ASHFT6(0x3FUL)
+#define L1F_RXQ1_RFD_PREF_DOWN_SHIFT    6
+#define L1F_RXQ1_RFD_PREF_UP_MASK       ASHFT0(0x3FUL)
+#define L1F_RXQ1_RFD_PREF_UP_SHIFT      0
+
+#define L1F_RXQ2                        0x15A8
+/* XOFF: USED SRAM LOWER THAN IT, THEN NOTIFY THE PEER TO SEND AGAIN */
+#define L1F_RXQ2_RXF_XOFF_THRESH_MASK   ASHFT16(0xFFFUL)
+#define L1F_RXQ2_RXF_XOFF_THRESH_SHIFT  16
+#define L1F_RXQ2_RXF_XON_THRESH_MASK    ASHFT0(0xFFFUL)
+#define L1F_RXQ2_RXF_XON_THRESH_SHIFT   0
+
+#define L1F_RXQ3                        0x15AC
+#define L1F_RXQ3_RXD_TIMER_MASK         ASHFT16(0x7FFFUL)
+#define L1F_RXQ3_RXD_TIMER_SHIFT        16
+#define L1F_RXQ3_RXD_THRESH_MASK        ASHFT0(0xFFFUL) /* 8BYTES UNIT */
+#define L1F_RXQ3_RXD_THRESH_SHIFT       0
+
+#define L1F_DMA                         0x15C0
+#define L1F_DMA_SMB_NOW                 BIT(31)
+#define L1F_DMA_WPEND_CLR               BIT(30)
+#define L1F_DMA_RPEND_CLR               BIT(29)
+#define L1F_DMA_WSRAM_RDCTRL            BIT(28)
+#define L1F_DMA_RCHNL_SEL_MASK          ASHFT26(3UL)
+#define L1F_DMA_RCHNL_SEL_SHIFT         26
+#define L1F_DMA_RCHNL_SEL_1             0
+#define L1F_DMA_RCHNL_SEL_2             1
+#define L1F_DMA_RCHNL_SEL_3             2
+#define L1F_DMA_RCHNL_SEL_4             3
+#define L1F_DMA_SMB_EN                  BIT(21)      /* smb dma enable */
+#define L1F_DMA_WDLY_CNT_MASK           ASHFT16(0xFUL)
+#define L1F_DMA_WDLY_CNT_SHIFT          16
+#define L1F_DMA_WDLY_CNT_DEF            4
+#define L1F_DMA_RDLY_CNT_MASK           ASHFT11(0x1FUL)
+#define L1F_DMA_RDLY_CNT_SHIFT          11
+#define L1F_DMA_RDLY_CNT_DEF            15
+#define L1F_DMA_RREQ_PRI_DATA           BIT(10)      /* 0:tpd, 1:data */
+#define L1F_DMA_WREQ_BLEN_MASK          ASHFT7(7UL)
+#define L1F_DMA_WREQ_BLEN_SHIFT         7
+#define L1F_DMA_RREQ_BLEN_MASK          ASHFT4(7UL)
+#define L1F_DMA_RREQ_BLEN_SHIFT         4
+#define L1F_DMA_PENDING_AUTO_RST        BIT(3)
+#define L1F_DMA_RORDER_MODE_MASK        ASHFT0(7UL)
+#define L1F_DMA_RORDER_MODE_SHIFT       0
+#define L1F_DMA_RORDER_MODE_OUT         4
+#define L1F_DMA_RORDER_MODE_ENHANCE     2
+#define L1F_DMA_RORDER_MODE_IN          1
+
+#define L1F_WOL0                        0x14A0
+#define L1F_WOL0_PT7_MATCH              BIT(31)
+#define L1F_WOL0_PT6_MATCH              BIT(30)
+#define L1F_WOL0_PT5_MATCH              BIT(29)
+#define L1F_WOL0_PT4_MATCH              BIT(28)
+#define L1F_WOL0_PT3_MATCH              BIT(27)
+#define L1F_WOL0_PT2_MATCH              BIT(26)
+#define L1F_WOL0_PT1_MATCH              BIT(25)
+#define L1F_WOL0_PT0_MATCH              BIT(24)
+#define L1F_WOL0_PT7_EN                 BIT(23)
+#define L1F_WOL0_PT6_EN                 BIT(22)
+#define L1F_WOL0_PT5_EN                 BIT(21)
+#define L1F_WOL0_PT4_EN                 BIT(20)
+#define L1F_WOL0_PT3_EN                 BIT(19)
+#define L1F_WOL0_PT2_EN                 BIT(18)
+#define L1F_WOL0_PT1_EN                 BIT(17)
+#define L1F_WOL0_PT0_EN                 BIT(16)
+#define L1F_WOL0_IPV4_SYNC_EVT          BIT(14)
+#define L1F_WOL0_IPV6_SYNC_EVT          BIT(13)
+#define L1F_WOL0_LINK_EVT               BIT(10)
+#define L1F_WOL0_MAGIC_EVT              BIT(9)
+#define L1F_WOL0_PATTERN_EVT            BIT(8)
+#define L1F_WOL0_OOB_EN                 BIT(6)
+#define L1F_WOL0_PME_LINK               BIT(5)
+#define L1F_WOL0_LINK_EN                BIT(4)
+#define L1F_WOL0_PME_MAGIC_EN           BIT(3)
+#define L1F_WOL0_MAGIC_EN               BIT(2)
+#define L1F_WOL0_PME_PATTERN_EN         BIT(1)
+#define L1F_WOL0_PATTERN_EN             BIT(0)
+
+#define L1F_WOL1                        0x14A4
+#define L1F_WOL1_PT3_LEN_MASK           ASHFT24(0xFFUL)
+#define L1F_WOL1_PT3_LEN_SHIFT          24
+#define L1F_WOL1_PT2_LEN_MASK           ASHFT16(0xFFUL)
+#define L1F_WOL1_PT2_LEN_SHIFT          16
+#define L1F_WOL1_PT1_LEN_MASK           ASHFT8(0xFFUL)
+#define L1F_WOL1_PT1_LEN_SHIFT          8
+#define L1F_WOL1_PT0_LEN_MASK           ASHFT0(0xFFUL)
+#define L1F_WOL1_PT0_LEN_SHIFT          0
+
+#define L1F_WOL2                        0x14A8
+#define L1F_WOL2_PT7_LEN_MASK           ASHFT24(0xFFUL)
+#define L1F_WOL2_PT7_LEN_SHIFT          24
+#define L1F_WOL2_PT6_LEN_MASK           ASHFT16(0xFFUL)
+#define L1F_WOL2_PT6_LEN_SHIFT          16
+#define L1F_WOL2_PT5_LEN_MASK           ASHFT8(0xFFUL)
+#define L1F_WOL2_PT5_LEN_SHIFT          8
+#define L1F_WOL2_PT4_LEN_MASK           ASHFT0(0xFFUL)
+#define L1F_WOL2_PT4_LEN_SHIFT          0
+
+#define L1F_RFD_PIDX                    0x15E0
+#define L1F_RFD_PIDX_MASK               ASHFT0(0xFFFUL)
+#define L1F_RFD_PIDX_SHIFT              0
+
+#define L1F_RFD_CIDX                    0x15F8
+#define L1F_RFD_CIDX_MASK               ASHFT0(0xFFFUL)
+#define L1F_RFD_CIDX_SHIFT              0
+
+/* MIB */
+#define L1F_MIB_BASE                    0x1700
+#define L1F_MIB_RX_OK                   (L1F_MIB_BASE + 0)
+#define L1F_MIB_RX_BC                   (L1F_MIB_BASE + 4)
+#define L1F_MIB_RX_MC                   (L1F_MIB_BASE + 8)
+#define L1F_MIB_RX_PAUSE                (L1F_MIB_BASE + 12)
+#define L1F_MIB_RX_CTRL                 (L1F_MIB_BASE + 16)
+#define L1F_MIB_RX_FCS                  (L1F_MIB_BASE + 20)
+#define L1F_MIB_RX_LENERR               (L1F_MIB_BASE + 24)
+#define L1F_MIB_RX_BYTCNT               (L1F_MIB_BASE + 28)
+#define L1F_MIB_RX_RUNT                 (L1F_MIB_BASE + 32)
+#define L1F_MIB_RX_FRAGMENT             (L1F_MIB_BASE + 36)
+#define L1F_MIB_RX_64B                  (L1F_MIB_BASE + 40)
+#define L1F_MIB_RX_127B                 (L1F_MIB_BASE + 44)
+#define L1F_MIB_RX_255B                 (L1F_MIB_BASE + 48)
+#define L1F_MIB_RX_511B                 (L1F_MIB_BASE + 52)
+#define L1F_MIB_RX_1023B                (L1F_MIB_BASE + 56)
+#define L1F_MIB_RX_1518B                (L1F_MIB_BASE + 60)
+#define L1F_MIB_RX_SZMAX                (L1F_MIB_BASE + 64)
+#define L1F_MIB_RX_OVSZ                 (L1F_MIB_BASE + 68)
+#define L1F_MIB_RXF_OV                  (L1F_MIB_BASE + 72)
+#define L1F_MIB_RRD_OV                  (L1F_MIB_BASE + 76)
+#define L1F_MIB_RX_ALIGN                (L1F_MIB_BASE + 80)
+#define L1F_MIB_RX_BCCNT                (L1F_MIB_BASE + 84)
+#define L1F_MIB_RX_MCCNT                (L1F_MIB_BASE + 88)
+#define L1F_MIB_RX_ERRADDR              (L1F_MIB_BASE + 92)
+#define L1F_MIB_TX_OK                   (L1F_MIB_BASE + 96)
+#define L1F_MIB_TX_BC                   (L1F_MIB_BASE + 100)
+#define L1F_MIB_TX_MC                   (L1F_MIB_BASE + 104)
+#define L1F_MIB_TX_PAUSE                (L1F_MIB_BASE + 108)
+#define L1F_MIB_TX_EXCDEFER             (L1F_MIB_BASE + 112)
+#define L1F_MIB_TX_CTRL                 (L1F_MIB_BASE + 116)
+#define L1F_MIB_TX_DEFER                (L1F_MIB_BASE + 120)
+#define L1F_MIB_TX_BYTCNT               (L1F_MIB_BASE + 124)
+#define L1F_MIB_TX_64B                  (L1F_MIB_BASE + 128)
+#define L1F_MIB_TX_127B                 (L1F_MIB_BASE + 132)
+#define L1F_MIB_TX_255B                 (L1F_MIB_BASE + 136)
+#define L1F_MIB_TX_511B                 (L1F_MIB_BASE + 140)
+#define L1F_MIB_TX_1023B                (L1F_MIB_BASE + 144)
+#define L1F_MIB_TX_1518B                (L1F_MIB_BASE + 148)
+#define L1F_MIB_TX_SZMAX                (L1F_MIB_BASE + 152)
+#define L1F_MIB_TX_1COL                 (L1F_MIB_BASE + 156)
+#define L1F_MIB_TX_2COL                 (L1F_MIB_BASE + 160)
+#define L1F_MIB_TX_LATCOL               (L1F_MIB_BASE + 164)
+#define L1F_MIB_TX_ABRTCOL              (L1F_MIB_BASE + 168)
+#define L1F_MIB_TX_UNDRUN               (L1F_MIB_BASE + 172)
+#define L1F_MIB_TX_TRDBEOP              (L1F_MIB_BASE + 176)
+#define L1F_MIB_TX_LENERR               (L1F_MIB_BASE + 180)
+#define L1F_MIB_TX_TRUNC                (L1F_MIB_BASE + 184)
+#define L1F_MIB_TX_BCCNT                (L1F_MIB_BASE + 188)
+#define L1F_MIB_TX_MCCNT                (L1F_MIB_BASE + 192)
+#define L1F_MIB_UPDATE                  (L1F_MIB_BASE + 196)
+
+/******************************************************************************/
+
+#define L1F_ISR                         0x1600
+#define L1F_ISR_DIS                     BIT(31)
+#define L1F_ISR_RX_Q7                   BIT(30)
+#define L1F_ISR_RX_Q6                   BIT(29)
+#define L1F_ISR_RX_Q5                   BIT(28)
+#define L1F_ISR_RX_Q4                   BIT(27)
+#define L1F_ISR_PCIE_LNKDOWN            BIT(26)
+#define L1F_ISR_PCIE_CERR               BIT(25)
+#define L1F_ISR_PCIE_NFERR              BIT(24)
+#define L1F_ISR_PCIE_FERR               BIT(23)
+#define L1F_ISR_PCIE_UR                 BIT(22)
+#define L1F_ISR_MAC_TX                  BIT(21)
+#define L1F_ISR_MAC_RX                  BIT(20)
+#define L1F_ISR_RX_Q3                   BIT(19)
+#define L1F_ISR_RX_Q2                   BIT(18)
+#define L1F_ISR_RX_Q1                   BIT(17)
+#define L1F_ISR_RX_Q0                   BIT(16)
+#define L1F_ISR_TX_Q0                   BIT(15)
+#define L1F_ISR_TXQ_TO                  BIT(14)
+#define L1F_ISR_PHY_LPW                 BIT(13)
+#define L1F_ISR_PHY                     BIT(12)
+#define L1F_ISR_TX_CREDIT               BIT(11)
+#define L1F_ISR_DMAW                    BIT(10)
+#define L1F_ISR_DMAR                    BIT(9)
+#define L1F_ISR_TXF_UR                  BIT(8)
+#define L1F_ISR_TX_Q3                   BIT(7)
+#define L1F_ISR_TX_Q2                   BIT(6)
+#define L1F_ISR_TX_Q1                   BIT(5)
+#define L1F_ISR_RFD_UR                  BIT(4)
+#define L1F_ISR_RXF_OV                  BIT(3)
+#define L1F_ISR_MANU                    BIT(2)
+#define L1F_ISR_TIMER                   BIT(1)
+#define L1F_ISR_SMB                     BIT(0)
+
+#define L1F_IMR                         0x1604
+
+#define L1F_INT_RETRIG                  0x1608  /* re-send deassrt/assert
+						 * if sw no reflect */
+#define L1F_INT_RETRIG_TIMER_MASK       ASHFT0(0xFFFFUL)
+#define L1F_INT_RETRIG_TIMER_SHIFT      0
+#define L1F_INT_RETRIG_TO               20000   /* 40ms */
+
+#define L1F_INT_DEASST_TIMER            0x1614  /* re-send deassert
+						 * if sw no reflect */
+
+#define L1F_PATTERN_MASK                0x1620  /* 128bytes, sleep state */
+#define L1F_PATTERN_MASK_LEN            128
+
+
+#define L1F_FLT1_SRC_IP0                0x1A00
+#define L1F_FLT1_SRC_IP1                0x1A04
+#define L1F_FLT1_SRC_IP2                0x1A08
+#define L1F_FLT1_SRC_IP3                0x1A0C
+#define L1F_FLT1_DST_IP0                0x1A10
+#define L1F_FLT1_DST_IP1                0x1A14
+#define L1F_FLT1_DST_IP2                0x1A18
+#define L1F_FLT1_DST_IP3                0x1A1C
+#define L1F_FLT1_PORT                   0x1A20
+#define L1F_FLT1_PORT_DST_MASK          ASHFT16(0xFFFFUL)
+#define L1F_FLT1_PORT_DST_SHIFT         16
+#define L1F_FLT1_PORT_SRC_MASK          ASHFT0(0xFFFFUL)
+#define L1F_FLT1_PORT_SRC_SHIFT         0
+
+#define L1F_FLT2_SRC_IP0                0x1A24
+#define L1F_FLT2_SRC_IP1                0x1A28
+#define L1F_FLT2_SRC_IP2                0x1A2C
+#define L1F_FLT2_SRC_IP3                0x1A30
+#define L1F_FLT2_DST_IP0                0x1A34
+#define L1F_FLT2_DST_IP1                0x1A38
+#define L1F_FLT2_DST_IP2                0x1A40
+#define L1F_FLT2_DST_IP3                0x1A44
+#define L1F_FLT2_PORT                   0x1A48
+#define L1F_FLT2_PORT_DST_MASK          ASHFT16(0xFFFFUL)
+#define L1F_FLT2_PORT_DST_SHIFT         16
+#define L1F_FLT2_PORT_SRC_MASK          ASHFT0(0xFFFFUL)
+#define L1F_FLT2_PORT_SRC_SHIFT         0
+
+#define L1F_FLT3_SRC_IP0                0x1A4C
+#define L1F_FLT3_SRC_IP1                0x1A50
+#define L1F_FLT3_SRC_IP2                0x1A54
+#define L1F_FLT3_SRC_IP3                0x1A58
+#define L1F_FLT3_DST_IP0                0x1A5C
+#define L1F_FLT3_DST_IP1                0x1A60
+#define L1F_FLT3_DST_IP2                0x1A64
+#define L1F_FLT3_DST_IP3                0x1A68
+#define L1F_FLT3_PORT                   0x1A6C
+#define L1F_FLT3_PORT_DST_MASK          ASHFT16(0xFFFFUL)
+#define L1F_FLT3_PORT_DST_SHIFT         16
+#define L1F_FLT3_PORT_SRC_MASK          ASHFT0(0xFFFFUL)
+#define L1F_FLT3_PORT_SRC_SHIFT         0
+
+#define L1F_FLT4_SRC_IP0                0x1A70
+#define L1F_FLT4_SRC_IP1                0x1A74
+#define L1F_FLT4_SRC_IP2                0x1A78
+#define L1F_FLT4_SRC_IP3                0x1A7C
+#define L1F_FLT4_DST_IP0                0x1A80
+#define L1F_FLT4_DST_IP1                0x1A84
+#define L1F_FLT4_DST_IP2                0x1A88
+#define L1F_FLT4_DST_IP3                0x1A8C
+#define L1F_FLT4_PORT                   0x1A90
+#define L1F_FLT4_PORT_DST_MASK          ASHFT16(0xFFFFUL)
+#define L1F_FLT4_PORT_DST_SHIFT         16
+#define L1F_FLT4_PORT_SRC_MASK          ASHFT0(0xFFFFUL)
+#define L1F_FLT4_PORT_SRC_SHIFT         0
+
+#define L1F_FLT5_SRC_IP0                0x1A94
+#define L1F_FLT5_SRC_IP1                0x1A98
+#define L1F_FLT5_SRC_IP2                0x1A9C
+#define L1F_FLT5_SRC_IP3                0x1AA0
+#define L1F_FLT5_DST_IP0                0x1AA4
+#define L1F_FLT5_DST_IP1                0x1AA8
+#define L1F_FLT5_DST_IP2                0x1AAC
+#define L1F_FLT5_DST_IP3                0x1AB0
+#define L1F_FLT5_PORT                   0x1AB4
+#define L1F_FLT5_PORT_DST_MASK          ASHFT16(0xFFFFUL)
+#define L1F_FLT5_PORT_DST_SHIFT         16
+#define L1F_FLT5_PORT_SRC_MASK          ASHFT0(0xFFFFUL)
+#define L1F_FLT5_PORT_SRC_SHIFT         0
+
+#define L1F_FLT6_SRC_IP0                0x1AB8
+#define L1F_FLT6_SRC_IP1                0x1ABC
+#define L1F_FLT6_SRC_IP2                0x1AC0
+#define L1F_FLT6_SRC_IP3                0x1AC8
+#define L1F_FLT6_DST_IP0                0x1620  /* only S0 state */
+#define L1F_FLT6_DST_IP1                0x1624
+#define L1F_FLT6_DST_IP2                0x1628
+#define L1F_FLT6_DST_IP3                0x162C
+#define L1F_FLT6_PORT                   0x1630
+#define L1F_FLT6_PORT_DST_MASK          ASHFT16(0xFFFFUL)
+#define L1F_FLT6_PORT_DST_SHIFT         16
+#define L1F_FLT6_PORT_SRC_MASK          ASHFT0(0xFFFFUL)
+#define L1F_FLT6_PORT_SRC_SHIFT         0
+
+#define L1F_FLTCTRL                     0x1634
+#define L1F_FLTCTRL_PSTHR_TIMER_MASK    ASHFT24(0xFFUL)
+#define L1F_FLTCTRL_PSTHR_TIMER_SHIFT   24
+#define L1F_FLTCTRL_CHK_DSTPRT6         BIT(23)
+#define L1F_FLTCTRL_CHK_SRCPRT6         BIT(22)
+#define L1F_FLTCTRL_CHK_DSTIP6          BIT(21)
+#define L1F_FLTCTRL_CHK_SRCIP6          BIT(20)
+#define L1F_FLTCTRL_CHK_DSTPRT5         BIT(19)
+#define L1F_FLTCTRL_CHK_SRCPRT5         BIT(18)
+#define L1F_FLTCTRL_CHK_DSTIP5          BIT(17)
+#define L1F_FLTCTRL_CHK_SRCIP5          BIT(16)
+#define L1F_FLTCTRL_CHK_DSTPRT4         BIT(15)
+#define L1F_FLTCTRL_CHK_SRCPRT4         BIT(14)
+#define L1F_FLTCTRL_CHK_DSTIP4          BIT(13)
+#define L1F_FLTCTRL_CHK_SRCIP4          BIT(12)
+#define L1F_FLTCTRL_CHK_DSTPRT3         BIT(11)
+#define L1F_FLTCTRL_CHK_SRCPRT3         BIT(10)
+#define L1F_FLTCTRL_CHK_DSTIP3          BIT(9)
+#define L1F_FLTCTRL_CHK_SRCIP3          BIT(8)
+#define L1F_FLTCTRL_CHK_DSTPRT2         BIT(7)
+#define L1F_FLTCTRL_CHK_SRCPRT2         BIT(6)
+#define L1F_FLTCTRL_CHK_DSTIP2          BIT(5)
+#define L1F_FLTCTRL_CHK_SRCIP2          BIT(4)
+#define L1F_FLTCTRL_CHK_DSTPRT1         BIT(3)
+#define L1F_FLTCTRL_CHK_SRCPRT1         BIT(2)
+#define L1F_FLTCTRL_CHK_DSTIP1          BIT(1)
+#define L1F_FLTCTRL_CHK_SRCIP1          BIT(0)
+
+#define L1F_DROP_ALG1                   0x1638
+#define L1F_DROP_ALG1_BWCHGVAL_MASK     ASHFT12(0xFFFFFUL)
+#define L1F_DROP_ALG1_BWCHGVAL_SHIFT    12
+#define L1F_DROP_ALG1_BWCHGSCL_6        BIT(11)      /* 0:3.125%, 1:6.25% */
+#define L1F_DROP_ALG1_ASUR_LWQ_EN       BIT(10)
+#define L1F_DROP_ALG1_BWCHGVAL_EN       BIT(9)
+#define L1F_DROP_ALG1_BWCHGSCL_EN       BIT(8)
+#define L1F_DROP_ALG1_PSTHR_AUTO        BIT(7)       /* 0:manual, 1:auto */
+#define L1F_DROP_ALG1_MIN_PSTHR_MASK    ASHFT5(3UL)
+#define L1F_DROP_ALG1_MIN_PSTHR_SHIFT   5
+#define L1F_DROP_ALG1_MIN_PSTHR_1_16    0
+#define L1F_DROP_ALG1_MIN_PSTHR_1_8     1
+#define L1F_DROP_ALG1_MIN_PSTHR_1_4     2
+#define L1F_DROP_ALG1_MIN_PSTHR_1_2     3
+#define L1F_DROP_ALG1_PSCL_MASK         ASHFT3(3UL)
+#define L1F_DROP_ALG1_PSCL_SHIFT        3
+#define L1F_DROP_ALG1_PSCL_1_4          0
+#define L1F_DROP_ALG1_PSCL_1_8          1
+#define L1F_DROP_ALG1_PSCL_1_16         2
+#define L1F_DROP_ALG1_PSCL_1_32         3
+#define L1F_DROP_ALG1_TIMESLOT_MASK     ASHFT0(7UL)
+#define L1F_DROP_ALG1_TIMESLOT_SHIFT    0
+#define L1F_DROP_ALG1_TIMESLOT_4MS      0
+#define L1F_DROP_ALG1_TIMESLOT_8MS      1
+#define L1F_DROP_ALG1_TIMESLOT_16MS     2
+#define L1F_DROP_ALG1_TIMESLOT_32MS     3
+#define L1F_DROP_ALG1_TIMESLOT_64MS     4
+#define L1F_DROP_ALG1_TIMESLOT_128MS    5
+#define L1F_DROP_ALG1_TIMESLOT_256MS    6
+#define L1F_DROP_ALG1_TIMESLOT_512MS    7
+
+#define L1F_DROP_ALG2                   0x163C
+#define L1F_DROP_ALG2_SMPLTIME_MASK     ASHFT24(0xFUL)
+#define L1F_DROP_ALG2_SMPLTIME_SHIFT    24
+#define L1F_DROP_ALG2_LWQBW_MASK        ASHFT0(0xFFFFFFUL)
+#define L1F_DROP_ALG2_LWQBW_SHIFT       0
+
+#define L1F_SMB_TIMER                   0x15C4
+
+#define L1F_TINT_TPD_THRSHLD            0x15C8
+
+#define L1F_TINT_TIMER                  0x15CC
+
+#define L1F_CLK_GATE                    0x1814
+#define L1F_CLK_GATE_125M_SW_DIS_CR     BIT(8)       /* B0 */
+#define L1F_CLK_GATE_125M_SW_AZ         BIT(7)       /* B0 */
+#define L1F_CLK_GATE_125M_SW_IDLE       BIT(6)       /* B0 */
+#define L1F_CLK_GATE_RXMAC              BIT(5)
+#define L1F_CLK_GATE_TXMAC              BIT(4)
+#define L1F_CLK_GATE_RXQ                BIT(3)
+#define L1F_CLK_GATE_TXQ                BIT(2)
+#define L1F_CLK_GATE_DMAR               BIT(1)
+#define L1F_CLK_GATE_DMAW               BIT(0)
+#define L1F_CLK_GATE_ALL_A0         (\
+	L1F_CLK_GATE_RXMAC          |\
+	L1F_CLK_GATE_TXMAC          |\
+	L1F_CLK_GATE_RXQ            |\
+	L1F_CLK_GATE_TXQ            |\
+	L1F_CLK_GATE_DMAR           |\
+	L1F_CLK_GATE_DMAW)
+#define L1F_CLK_GATE_ALL_B0         (\
+	L1F_CLK_GATE_ALL_A0         |\
+	L1F_CLK_GATE_125M_SW_AZ     |\
+	L1F_CLK_GATE_125M_SW_IDLE)
+
+
+
+
+
+#define L1F_BTROM_CFG                   0x1800          /* pwon rst */
+
+#define L1F_DRV                         0x1804
+/* bit definition is in lx_hwcomm.h */
+
+#define L1F_DRV_ERR1                    0x1808          /* perst */
+#define L1F_DRV_ERR1_GEN                BIT(31)          /* geneneral err */
+#define L1F_DRV_ERR1_NOR                BIT(30)          /* rrd.nor */
+#define L1F_DRV_ERR1_TRUNC              BIT(29)
+#define L1F_DRV_ERR1_RES                BIT(28)
+#define L1F_DRV_ERR1_INTFATAL           BIT(27)
+#define L1F_DRV_ERR1_TXQPEND            BIT(26)
+#define L1F_DRV_ERR1_DMAW               BIT(25)
+#define L1F_DRV_ERR1_DMAR               BIT(24)
+#define L1F_DRV_ERR1_PCIELNKDWN         BIT(23)
+#define L1F_DRV_ERR1_PKTSIZE            BIT(22)
+#define L1F_DRV_ERR1_FIFOFUL            BIT(21)
+#define L1F_DRV_ERR1_RFDUR              BIT(20)
+#define L1F_DRV_ERR1_RRDSI              BIT(19)
+#define L1F_DRV_ERR1_UPDATE             BIT(18)
+
+#define L1F_DRV_ERR2                    0x180C
+
+#define L1F_DBG_ADDR                    0x1900  /* DWORD reg */
+#define L1F_DBG_DATA                    0x1904  /* DWORD reg */
+
+#define L1F_SYNC_IPV4_SA                0x1A00
+#define L1F_SYNC_IPV4_DA                0x1A04
+
+#define L1F_SYNC_V4PORT                 0x1A08
+#define L1F_SYNC_V4PORT_DST_MASK        ASHFT16(0xFFFFUL)
+#define L1F_SYNC_V4PORT_DST_SHIFT       16
+#define L1F_SYNC_V4PORT_SRC_MASK        ASHFT0(0xFFFFUL)
+#define L1F_SYNC_V4PORT_SRC_SHIFT       0
+
+#define L1F_SYNC_IPV6_SA0               0x1A0C
+#define L1F_SYNC_IPV6_SA1               0x1A10
+#define L1F_SYNC_IPV6_SA2               0x1A14
+#define L1F_SYNC_IPV6_SA3               0x1A18
+#define L1F_SYNC_IPV6_DA0               0x1A1C
+#define L1F_SYNC_IPV6_DA1               0x1A20
+#define L1F_SYNC_IPV6_DA2               0x1A24
+#define L1F_SYNC_IPV6_DA3               0x1A28
+
+#define L1F_SYNC_V6PORT                 0x1A2C
+#define L1F_SYNC_V6PORT_DST_MASK        ASHFT16(0xFFFFUL)
+#define L1F_SYNC_V6PORT_DST_SHIFT       16
+#define L1F_SYNC_V6PORT_SRC_MASK        ASHFT0(0xFFFFUL)
+#define L1F_SYNC_V6PORT_SRC_SHIFT       0
+
+#define L1F_ARP_REMOTE_IPV4             0x1A30
+#define L1F_ARP_HOST_IPV4               0x1A34
+#define L1F_ARP_MAC0                    0x1A38
+#define L1F_ARP_MAC1                    0x1A3C
+
+#define L1F_1ST_REMOTE_IPV6_0           0x1A40
+#define L1F_1ST_REMOTE_IPV6_1           0x1A44
+#define L1F_1ST_REMOTE_IPV6_2           0x1A48
+#define L1F_1ST_REMOTE_IPV6_3           0x1A4C
+
+#define L1F_1ST_SN_IPV6_0               0x1A50
+#define L1F_1ST_SN_IPV6_1               0x1A54
+#define L1F_1ST_SN_IPV6_2               0x1A58
+#define L1F_1ST_SN_IPV6_3               0x1A5C
+
+#define L1F_1ST_TAR_IPV6_1_0            0x1A60
+#define L1F_1ST_TAR_IPV6_1_1            0x1A64
+#define L1F_1ST_TAR_IPV6_1_2            0x1A68
+#define L1F_1ST_TAR_IPV6_1_3            0x1A6C
+#define L1F_1ST_TAR_IPV6_2_0            0x1A70
+#define L1F_1ST_TAR_IPV6_2_1            0x1A74
+#define L1F_1ST_TAR_IPV6_2_2            0x1A78
+#define L1F_1ST_TAR_IPV6_2_3            0x1A7C
+
+#define L1F_2ND_REMOTE_IPV6_0           0x1A80
+#define L1F_2ND_REMOTE_IPV6_1           0x1A84
+#define L1F_2ND_REMOTE_IPV6_2           0x1A88
+#define L1F_2ND_REMOTE_IPV6_3           0x1A8C
+
+#define L1F_2ND_SN_IPV6_0               0x1A90
+#define L1F_2ND_SN_IPV6_1               0x1A94
+#define L1F_2ND_SN_IPV6_2               0x1A98
+#define L1F_2ND_SN_IPV6_3               0x1A9C
+
+#define L1F_2ND_TAR_IPV6_1_0            0x1AA0
+#define L1F_2ND_TAR_IPV6_1_1            0x1AA4
+#define L1F_2ND_TAR_IPV6_1_2            0x1AA8
+#define L1F_2ND_TAR_IPV6_1_3            0x1AAC
+#define L1F_2ND_TAR_IPV6_2_0            0x1AB0
+#define L1F_2ND_TAR_IPV6_2_1            0x1AB4
+#define L1F_2ND_TAR_IPV6_2_2            0x1AB8
+#define L1F_2ND_TAR_IPV6_2_3            0x1ABC
+
+#define L1F_1ST_NS_MAC0                 0x1AC0
+#define L1F_1ST_NS_MAC1                 0x1AC4
+
+#define L1F_2ND_NS_MAC0                 0x1AC8
+#define L1F_2ND_NS_MAC1                 0x1ACC
+
+#define L1F_PMOFLD                      0x144C
+#define L1F_PMOFLD_ECMA_IGNR_FRG_SSSR   BIT(11)  /* B0 */
+#define L1F_PMOFLD_ARP_CNFLCT_WAKEUP    BIT(10)  /* B0 */
+#define L1F_PMOFLD_MULTI_SOLD           BIT(9)
+#define L1F_PMOFLD_ICMP_XSUM            BIT(8)
+#define L1F_PMOFLD_GARP_REPLY           BIT(7)
+#define L1F_PMOFLD_SYNCV6_ANY           BIT(6)
+#define L1F_PMOFLD_SYNCV4_ANY           BIT(5)
+#define L1F_PMOFLD_BY_HW                BIT(4)
+#define L1F_PMOFLD_NS_EN                BIT(3)
+#define L1F_PMOFLD_ARP_EN               BIT(2)
+#define L1F_PMOFLD_SYNCV6_EN            BIT(1)
+#define L1F_PMOFLD_SYNCV4_EN            BIT(0)
+
+#define L1F_RSS_KEY0                    0x14B0
+#define L1F_RSS_KEY1                    0x14B4
+#define L1F_RSS_KEY2                    0x14B8
+#define L1F_RSS_KEY3                    0x14BC
+#define L1F_RSS_KEY4                    0x14C0
+#define L1F_RSS_KEY5                    0x14C4
+#define L1F_RSS_KEY6                    0x14C8
+#define L1F_RSS_KEY7                    0x14CC
+#define L1F_RSS_KEY8                    0x14D0
+#define L1F_RSS_KEY9                    0x14D4
+
+#define L1F_RSS_IDT_TBL0                0x1B00
+#define L1F_RSS_IDT_TBL1                0x1B04
+#define L1F_RSS_IDT_TBL2                0x1B08
+#define L1F_RSS_IDT_TBL3                0x1B0C
+#define L1F_RSS_IDT_TBL4                0x1B10
+#define L1F_RSS_IDT_TBL5                0x1B14
+#define L1F_RSS_IDT_TBL6                0x1B18
+#define L1F_RSS_IDT_TBL7                0x1B1C
+#define L1F_RSS_IDT_TBL8                0x1B20
+#define L1F_RSS_IDT_TBL9                0x1B24
+#define L1F_RSS_IDT_TBL10               0x1B28
+#define L1F_RSS_IDT_TBL11               0x1B2C
+#define L1F_RSS_IDT_TBL12               0x1B30
+#define L1F_RSS_IDT_TBL13               0x1B34
+#define L1F_RSS_IDT_TBL14               0x1B38
+#define L1F_RSS_IDT_TBL15               0x1B3C
+#define L1F_RSS_IDT_TBL16               0x1B40
+#define L1F_RSS_IDT_TBL17               0x1B44
+#define L1F_RSS_IDT_TBL18               0x1B48
+#define L1F_RSS_IDT_TBL19               0x1B4C
+#define L1F_RSS_IDT_TBL20               0x1B50
+#define L1F_RSS_IDT_TBL21               0x1B54
+#define L1F_RSS_IDT_TBL22               0x1B58
+#define L1F_RSS_IDT_TBL23               0x1B5C
+#define L1F_RSS_IDT_TBL24               0x1B60
+#define L1F_RSS_IDT_TBL25               0x1B64
+#define L1F_RSS_IDT_TBL26               0x1B68
+#define L1F_RSS_IDT_TBL27               0x1B6C
+#define L1F_RSS_IDT_TBL28               0x1B70
+#define L1F_RSS_IDT_TBL29               0x1B74
+#define L1F_RSS_IDT_TBL30               0x1B78
+#define L1F_RSS_IDT_TBL31               0x1B7C
+
+#define L1F_RSS_HASH_VAL                0x15B0
+#define L1F_RSS_HASH_FLAG               0x15B4
+
+#define L1F_RSS_BASE_CPU_NUM            0x15B8
+
+#define L1F_MSI_MAP_TBL1                0x15D0
+#define L1F_MSI_MAP_TBL1_ALERT_MASK     ASHFT28(0xFUL)
+#define L1F_MSI_MAP_TBL1_ALERT_SHIFT    28
+#define L1F_MSI_MAP_TBL1_TIMER_MASK     ASHFT24(0xFUL)
+#define L1F_MSI_MAP_TBL1_TIMER_SHIFT    24
+#define L1F_MSI_MAP_TBL1_TXQ1_MASK      ASHFT20(0xFUL)
+#define L1F_MSI_MAP_TBL1_TXQ1_SHIFT     20
+#define L1F_MSI_MAP_TBL1_TXQ0_MASK      ASHFT16(0xFUL)
+#define L1F_MSI_MAP_TBL1_TXQ0_SHIFT     16
+#define L1F_MSI_MAP_TBL1_RXQ3_MASK      ASHFT12(0xFUL)
+#define L1F_MSI_MAP_TBL1_RXQ3_SHIFT     12
+#define L1F_MSI_MAP_TBL1_RXQ2_MASK      ASHFT8(0xFUL)
+#define L1F_MSI_MAP_TBL1_RXQ2_SHIFT     8
+#define L1F_MSI_MAP_TBL1_RXQ1_MASK      ASHFT4(0xFUL)
+#define L1F_MSI_MAP_TBL1_RXQ1_SHIFT     4
+#define L1F_MSI_MAP_TBL1_RXQ0_MASK      ASHFT0(0xFUL)
+#define L1F_MSI_MAP_TBL1_RXQ0_SHIFT     0
+
+#define L1F_MSI_MAP_TBL2                0x15D8
+#define L1F_MSI_MAP_TBL2_PHY_MASK       ASHFT28(0xFUL)
+#define L1F_MSI_MAP_TBL2_PHY_SHIFT      28
+#define L1F_MSI_MAP_TBL2_SMB_MASK       ASHFT24(0xFUL)
+#define L1F_MSI_MAP_TBL2_SMB_SHIFT      24
+#define L1F_MSI_MAP_TBL2_TXQ3_MASK      ASHFT20(0xFUL)
+#define L1F_MSI_MAP_TBL2_TXQ3_SHIFT     20
+#define L1F_MSI_MAP_TBL2_TXQ2_MASK      ASHFT16(0xFUL)
+#define L1F_MSI_MAP_TBL2_TXQ2_SHIFT     16
+#define L1F_MSI_MAP_TBL2_RXQ7_MASK      ASHFT12(0xFUL)
+#define L1F_MSI_MAP_TBL2_RXQ7_SHIFT     12
+#define L1F_MSI_MAP_TBL2_RXQ6_MASK      ASHFT8(0xFUL)
+#define L1F_MSI_MAP_TBL2_RXQ6_SHIFT     8
+#define L1F_MSI_MAP_TBL2_RXQ5_MASK      ASHFT4(0xFUL)
+#define L1F_MSI_MAP_TBL2_RXQ5_SHIFT     4
+#define L1F_MSI_MAP_TBL2_RXQ4_MASK      ASHFT0(0xFUL)
+#define L1F_MSI_MAP_TBL2_RXQ4_SHIFT     0
+
+#define L1F_MSI_ID_MAP                  0x15D4
+#define L1F_MSI_ID_MAP_RXQ7             BIT(30)
+#define L1F_MSI_ID_MAP_RXQ6             BIT(29)
+#define L1F_MSI_ID_MAP_RXQ5             BIT(28)
+#define L1F_MSI_ID_MAP_RXQ4             BIT(27)
+#define L1F_MSI_ID_MAP_PCIELNKDW        BIT(26)  /* 0:common,1:timer */
+#define L1F_MSI_ID_MAP_PCIECERR         BIT(25)
+#define L1F_MSI_ID_MAP_PCIENFERR        BIT(24)
+#define L1F_MSI_ID_MAP_PCIEFERR         BIT(23)
+#define L1F_MSI_ID_MAP_PCIEUR           BIT(22)
+#define L1F_MSI_ID_MAP_MACTX            BIT(21)
+#define L1F_MSI_ID_MAP_MACRX            BIT(20)
+#define L1F_MSI_ID_MAP_RXQ3             BIT(19)
+#define L1F_MSI_ID_MAP_RXQ2             BIT(18)
+#define L1F_MSI_ID_MAP_RXQ1             BIT(17)
+#define L1F_MSI_ID_MAP_RXQ0             BIT(16)
+#define L1F_MSI_ID_MAP_TXQ0             BIT(15)
+#define L1F_MSI_ID_MAP_TXQTO            BIT(14)
+#define L1F_MSI_ID_MAP_LPW              BIT(13)
+#define L1F_MSI_ID_MAP_PHY              BIT(12)
+#define L1F_MSI_ID_MAP_TXCREDIT         BIT(11)
+#define L1F_MSI_ID_MAP_DMAW             BIT(10)
+#define L1F_MSI_ID_MAP_DMAR             BIT(9)
+#define L1F_MSI_ID_MAP_TXFUR            BIT(8)
+#define L1F_MSI_ID_MAP_TXQ3             BIT(7)
+#define L1F_MSI_ID_MAP_TXQ2             BIT(6)
+#define L1F_MSI_ID_MAP_TXQ1             BIT(5)
+#define L1F_MSI_ID_MAP_RFDUR            BIT(4)
+#define L1F_MSI_ID_MAP_RXFOV            BIT(3)
+#define L1F_MSI_ID_MAP_MANU             BIT(2)
+#define L1F_MSI_ID_MAP_TIMER            BIT(1)
+#define L1F_MSI_ID_MAP_SMB              BIT(0)
+
+#define L1F_MSI_RETRANS_TIMER           0x1920
+#define L1F_MSI_MASK_SEL_LINE           BIT(16)  /* 1:line,0:standard*/
+#define L1F_MSI_RETRANS_TM_MASK         ASHFT0(0xFFFFUL)
+#define L1F_MSI_RETRANS_TM_SHIFT        0
+
+#define L1F_CR_DMA_CTRL                 0x1930
+#define L1F_CR_DMA_CTRL_PRI             BIT(22)
+#define L1F_CR_DMA_CTRL_RRDRXD_JOINT    BIT(21)
+#define L1F_CR_DMA_CTRL_BWCREDIT_MASK   ASHFT19(0x3UL)
+#define L1F_CR_DMA_CTRL_BWCREDIT_SHIFT  19
+#define L1F_CR_DMA_CTRL_BWCREDIT_2KB    0
+#define L1F_CR_DMA_CTRL_BWCREDIT_1KB    1
+#define L1F_CR_DMA_CTRL_BWCREDIT_4KB    2
+#define L1F_CR_DMA_CTRL_BWCREDIT_8KB    3
+#define L1F_CR_DMA_CTRL_BW_EN           BIT(18)
+#define L1F_CR_DMA_CTRL_BW_RATIO_MASK   ASHFT16(0x3UL)
+#define L1F_CR_DMA_CTRL_BW_RATIO_1_2    0
+#define L1F_CR_DMA_CTRL_BW_RATIO_1_4    1
+#define L1F_CR_DMA_CTRL_BW_RATIO_1_8    2
+#define L1F_CR_DMA_CTRL_BW_RATIO_2_1    3
+#define L1F_CR_DMA_CTRL_SOFT_RST        BIT(11)
+#define L1F_CR_DMA_CTRL_TXEARLY_EN      BIT(10)
+#define L1F_CR_DMA_CTRL_RXEARLY_EN      BIT(9)
+#define L1F_CR_DMA_CTRL_WEARLY_EN       BIT(8)
+#define L1F_CR_DMA_CTRL_RXTH_MASK       ASHFT4(0xFUL)
+#define L1F_CR_DMA_CTRL_WTH_MASK        ASHFT0(0xFUL)
+
+
+#define L1F_EFUSE_BIST                  0x1934
+#define L1F_EFUSE_BIST_COL_MASK         ASHFT24(0x3FUL)
+#define L1F_EFUSE_BIST_COL_SHIFT        24
+#define L1F_EFUSE_BIST_ROW_MASK         ASHFT12(0x7FUL)
+#define L1F_EFUSE_BIST_ROW_SHIFT        12
+#define L1F_EFUSE_BIST_STEP_MASK        ASHFT8(0xFUL)
+#define L1F_EFUSE_BIST_STEP_SHIFT       8
+#define L1F_EFUSE_BIST_PAT_MASK         ASHFT4(0x7UL)
+#define L1F_EFUSE_BIST_PAT_SHIFT        4
+#define L1F_EFUSE_BIST_CRITICAL         BIT(3)
+#define L1F_EFUSE_BIST_FIXED            BIT(2)
+#define L1F_EFUSE_BIST_FAIL             BIT(1)
+#define L1F_EFUSE_BIST_NOW              BIT(0)
+
+/* CR DMA ctrl */
+
+/* TX QoS */
+#define L1F_WRR                         0x1938
+#define L1F_WRR_PRI_MASK                ASHFT29(3UL)
+#define L1F_WRR_PRI_SHIFT               29
+#define L1F_WRR_PRI_RESTRICT_ALL        0
+#define L1F_WRR_PRI_RESTRICT_HI         1
+#define L1F_WRR_PRI_RESTRICT_HI2        2
+#define L1F_WRR_PRI_RESTRICT_NONE       3
+#define L1F_WRR_PRI3_MASK               ASHFT24(0x1FUL)
+#define L1F_WRR_PRI3_SHIFT              24
+#define L1F_WRR_PRI2_MASK               ASHFT16(0x1FUL)
+#define L1F_WRR_PRI2_SHIFT              16
+#define L1F_WRR_PRI1_MASK               ASHFT8(0x1FUL)
+#define L1F_WRR_PRI1_SHIFT              8
+#define L1F_WRR_PRI0_MASK               ASHFT0(0x1FUL)
+#define L1F_WRR_PRI0_SHIFT              0
+
+#define L1F_HQTPD                       0x193C
+#define L1F_HQTPD_BURST_EN              BIT(31)
+#define L1F_HQTPD_Q3_NUMPREF_MASK       ASHFT8(0xFUL)
+#define L1F_HQTPD_Q3_NUMPREF_SHIFT      8
+#define L1F_HQTPD_Q2_NUMPREF_MASK       ASHFT4(0xFUL)
+#define L1F_HQTPD_Q2_NUMPREF_SHIFT      4
+#define L1F_HQTPD_Q1_NUMPREF_MASK       ASHFT0(0xFUL)
+#define L1F_HQTPD_Q1_NUMPREF_SHIFT      0
+
+#define L1F_CPUMAP1                     0x19A0
+#define L1F_CPUMAP1_VCT7_MASK           ASHFT28(0xFUL)
+#define L1F_CPUMAP1_VCT7_SHIFT          28
+#define L1F_CPUMAP1_VCT6_MASK           ASHFT24(0xFUL)
+#define L1F_CPUMAP1_VCT6_SHIFT          24
+#define L1F_CPUMAP1_VCT5_MASK           ASHFT20(0xFUL)
+#define L1F_CPUMAP1_VCT5_SHIFT          20
+#define L1F_CPUMAP1_VCT4_MASK           ASHFT16(0xFUL)
+#define L1F_CPUMAP1_VCT4_SHIFT          16
+#define L1F_CPUMAP1_VCT3_MASK           ASHFT12(0xFUL)
+#define L1F_CPUMAP1_VCT3_SHIFT          12
+#define L1F_CPUMAP1_VCT2_MASK           ASHFT8(0xFUL)
+#define L1F_CPUMAP1_VCT2_SHIFT          8
+#define L1F_CPUMAP1_VCT1_MASK           ASHFT4(0xFUL)
+#define L1F_CPUMAP1_VCT1_SHIFT          4
+#define L1F_CPUMAP1_VCT0_MASK           ASHFT0(0xFUL)
+#define L1F_CPUMAP1_VCT0_SHIFT          0
+
+#define L1F_CPUMAP2                     0x19A4
+#define L1F_CPUMAP2_VCT15_MASK          ASHFT28(0xFUL)
+#define L1F_CPUMAP2_VCT15_SHIFT         28
+#define L1F_CPUMAP2_VCT14_MASK          ASHFT24(0xFUL)
+#define L1F_CPUMAP2_VCT14_SHIFT         24
+#define L1F_CPUMAP2_VCT13_MASK          ASHFT20(0xFUL)
+#define L1F_CPUMAP2_VCT13_SHIFT         20
+#define L1F_CPUMAP2_VCT12_MASK          ASHFT16(0xFUL)
+#define L1F_CPUMAP2_VCT12_SHIFT         16
+#define L1F_CPUMAP2_VCT11_MASK          ASHFT12(0xFUL)
+#define L1F_CPUMAP2_VCT11_SHIFT         12
+#define L1F_CPUMAP2_VCT10_MASK          ASHFT8(0xFUL)
+#define L1F_CPUMAP2_VCT10_SHIFT         8
+#define L1F_CPUMAP2_VCT9_MASK           ASHFT4(0xFUL)
+#define L1F_CPUMAP2_VCT9_SHIFT          4
+#define L1F_CPUMAP2_VCT8_MASK           ASHFT0(0xFUL)
+#define L1F_CPUMAP2_VCT8_SHIFT          0
+
+#define L1F_MISC                        0x19C0
+#define L1F_MISC_MODU                   BIT(31)  /* 0:vector,1:cpu */
+#define L1F_MISC_OVERCUR                BIT(29)
+#define L1F_MISC_PSWR_EN                BIT(28)
+#define L1F_MISC_PSW_CTRL_MASK          ASHFT24(0xFUL)
+#define L1F_MISC_PSW_CTRL_SHIFT         24
+#define L1F_MISC_PSW_OCP_MASK           ASHFT21(7UL)
+#define L1F_MISC_PSW_OCP_SHIFT          21
+#define L1F_MISC_V18_HIGH               BIT(20)
+#define L1F_MISC_LPO_CTRL_MASK          ASHFT16(0xFUL)
+#define L1F_MISC_LPO_CTRL_SHIFT         16
+#define L1F_MISC_ISO_EN                 BIT(12)
+#define L1F_MISC_XSTANA_ALWAYS_ON       BIT(11)
+#define L1F_MISC_SYS25M_SEL_ADAPTIVE    BIT(10)
+#define L1F_MISC_SPEED_SIM              BIT(9)
+#define L1F_MISC_S1_LWP_EN              BIT(8)
+#define L1F_MISC_MACLPW                 BIT(7)   /* pcie/mac do pwsaving
+						 * as phy in lpw state */
+#define L1F_MISC_125M_SW                BIT(6)
+#define L1F_MISC_INTNLOSC_OFF_EN        BIT(5)
+#define L1F_MISC_EXTN25M_SEL            BIT(4)   /* 0:chipset,1:cystle */
+#define L1F_MISC_INTNLOSC_OPEN          BIT(3)
+#define L1F_MISC_SMBUS_AT_LED           BIT(2)
+#define L1F_MISC_PPS_AT_LED_MASK        ASHFT0(3UL)
+#define L1F_MISC_PPS_AT_LED_SHIFT       0
+#define L1F_MISC_PPS_AT_LED_ACT         1
+#define L1F_MISC_PPS_AT_LED_10_100      2
+#define L1F_MISC_PPS_AT_LED_1000        3
+
+#define L1F_MISC1                       0x19C4
+#define L1F_MSC1_BLK_CRASPM_REQ         BIT(15)
+
+#define L1F_MISC3                       0x19CC
+#define L1F_MISC3_25M_BY_SW             BIT(1)   /* 1:Software control 25M */
+#define L1F_MISC3_25M_NOTO_INTNL        BIT(0)   /* 0:25M switch to intnl OSC */
+
+
+
+/***************************** IO mapping registers ***************************/
+#define L1F_IO_ADDR                     0x00    /* DWORD reg */
+#define L1F_IO_DATA                     0x04    /* DWORD reg */
+#define L1F_IO_MASTER                   0x08    /* DWORD same as reg0x1400 */
+#define L1F_IO_MAC_CTRL                 0x0C    /* DWORD same as reg0x1480*/
+#define L1F_IO_ISR                      0x10    /* DWORD same as reg0x1600 */
+#define L1F_IO_IMR                      0x14    /* DWORD same as reg0x1604 */
+#define L1F_IO_TPD_PRI1_PIDX            0x18    /* WORD same as reg0x15F0 */
+#define L1F_IO_TPD_PRI0_PIDX            0x1A    /* WORD same as reg0x15F2 */
+#define L1F_IO_TPD_PRI1_CIDX            0x1C    /* WORD same as reg0x15F4 */
+#define L1F_IO_TPD_PRI0_CIDX            0x1E    /* WORD same as reg0x15F6 */
+#define L1F_IO_RFD_PIDX                 0x20    /* WORD same as reg0x15E0 */
+#define L1F_IO_RFD_CIDX                 0x30    /* WORD same as reg0x15F8 */
+#define L1F_IO_MDIO                     0x38    /* WORD same as reg0x1414 */
+#define L1F_IO_PHY_CTRL                 0x3C    /* DWORD same as reg0x140C */
+
+
+/********************* PHY regs definition ***************************/
+
+/* Autoneg Advertisement Register */
+#define L1F_ADVERTISE_SPEED_MASK            0x01E0
+#define L1F_ADVERTISE_DEFAULT_CAP           0x1DE0 /* diff with L1C */
+
+/* 1000BASE-T Control Register (0x9) */
+#define L1F_GIGA_CR_1000T_HD_CAPS           0x0100
+#define L1F_GIGA_CR_1000T_FD_CAPS           0x0200
+#define L1F_GIGA_CR_1000T_REPEATER_DTE      0x0400
+
+#define L1F_GIGA_CR_1000T_MS_VALUE          0x0800
+
+#define L1F_GIGA_CR_1000T_MS_ENABLE         0x1000
+
+#define L1F_GIGA_CR_1000T_TEST_MODE_NORMAL  0x0000
+#define L1F_GIGA_CR_1000T_TEST_MODE_1       0x2000
+#define L1F_GIGA_CR_1000T_TEST_MODE_2       0x4000
+#define L1F_GIGA_CR_1000T_TEST_MODE_3       0x6000
+#define L1F_GIGA_CR_1000T_TEST_MODE_4       0x8000
+#define L1F_GIGA_CR_1000T_SPEED_MASK        0x0300
+#define L1F_GIGA_CR_1000T_DEFAULT_CAP       0x0300
+
+/* 1000BASE-T Status Register */
+#define L1F_MII_GIGA_SR                     0x0A
+
+/* PHY Specific Status Register */
+#define L1F_MII_GIGA_PSSR                   0x11
+#define L1F_GIGA_PSSR_FC_RXEN               0x0004
+#define L1F_GIGA_PSSR_FC_TXEN               0x0008
+#define L1F_GIGA_PSSR_SPD_DPLX_RESOLVED     0x0800
+#define L1F_GIGA_PSSR_DPLX                  0x2000
+#define L1F_GIGA_PSSR_SPEED                 0xC000
+#define L1F_GIGA_PSSR_10MBS                 0x0000
+#define L1F_GIGA_PSSR_100MBS                0x4000
+#define L1F_GIGA_PSSR_1000MBS               0x8000
+
+/* PHY Interrupt Enable Register */
+#define L1F_MII_IER                         0x12
+#define L1F_IER_LINK_UP                     0x0400
+#define L1F_IER_LINK_DOWN                   0x0800
+
+/* PHY Interrupt Status Register */
+#define L1F_MII_ISR                         0x13
+#define L1F_ISR_LINK_UP                     0x0400
+#define L1F_ISR_LINK_DOWN                   0x0800
+
+/* Cable-Detect-Test Control Register */
+#define L1F_MII_CDTC                        0x16
+#define L1F_CDTC_EN                         1       /* sc */
+#define L1F_CDTC_PAIR_MASK                  ASHFT8(3U)
+#define L1F_CDTC_PAIR_SHIFT                 8
+
+
+/* Cable-Detect-Test Status Register */
+#define L1F_MII_CDTS                        0x1C
+#define L1F_CDTS_STATUS_MASK                ASHFT8(3U)
+#define L1F_CDTS_STATUS_SHIFT               8
+#define L1F_CDTS_STATUS_NORMAL              0
+#define L1F_CDTS_STATUS_SHORT               1
+#define L1F_CDTS_STATUS_OPEN                2
+#define L1F_CDTS_STATUS_INVALID             3
+
+#define L1F_MII_DBG_ADDR                    0x1D
+#define L1F_MII_DBG_DATA                    0x1E
+
+/***************************** debug port *************************************/
+
+#define L1F_MIIDBG_ANACTRL                  0x00
+#define L1F_ANACTRL_CLK125M_DELAY_EN        BIT(15)
+#define L1F_ANACTRL_VCO_FAST                BIT(14)
+#define L1F_ANACTRL_VCO_SLOW                BIT(13)
+#define L1F_ANACTRL_AFE_MODE_EN             BIT(12)
+#define L1F_ANACTRL_LCKDET_PHY              BIT(11)
+#define L1F_ANACTRL_LCKDET_EN               BIT(10)
+#define L1F_ANACTRL_OEN_125M                BIT(9)
+#define L1F_ANACTRL_HBIAS_EN                BIT(8)
+#define L1F_ANACTRL_HB_EN                   BIT(7)
+#define L1F_ANACTRL_SEL_HSP                 BIT(6)
+#define L1F_ANACTRL_CLASSA_EN               BIT(5)
+#define L1F_ANACTRL_MANUSWON_SWR_MASK       ASHFT2(3U)
+#define L1F_ANACTRL_MANUSWON_SWR_SHIFT      2
+#define L1F_ANACTRL_MANUSWON_SWR_2V         0
+#define L1F_ANACTRL_MANUSWON_SWR_1P9V       1
+#define L1F_ANACTRL_MANUSWON_SWR_1P8V       2
+#define L1F_ANACTRL_MANUSWON_SWR_1P7V       3
+#define L1F_ANACTRL_MANUSWON_BW3_4M         BIT(1)
+#define L1F_ANACTRL_RESTART_CAL             BIT(0)
+#define L1F_ANACTRL_DEF                     0x02EF
+
+
+#define L1F_MIIDBG_SYSMODCTRL               0x04
+#define L1F_SYSMODCTRL_IECHOADJ_PFMH_PHY    BIT(15)
+#define L1F_SYSMODCTRL_IECHOADJ_BIASGEN     BIT(14)
+#define L1F_SYSMODCTRL_IECHOADJ_PFML_PHY    BIT(13)
+#define L1F_SYSMODCTRL_IECHOADJ_PS_MASK     ASHFT10(3U)
+#define L1F_SYSMODCTRL_IECHOADJ_PS_SHIFT    10
+#define L1F_SYSMODCTRL_IECHOADJ_PS_40       3
+#define L1F_SYSMODCTRL_IECHOADJ_PS_20       2
+#define L1F_SYSMODCTRL_IECHOADJ_PS_0        1
+#define L1F_SYSMODCTRL_IECHOADJ_10BT_100MV  BIT(6) /* 1:100mv, 0:200mv */
+#define L1F_SYSMODCTRL_IECHOADJ_HLFAP_MASK  ASHFT4(3U)
+#define L1F_SYSMODCTRL_IECHOADJ_HLFAP_SHIFT 4
+#define L1F_SYSMODCTRL_IECHOADJ_VDFULBW     BIT(3)
+#define L1F_SYSMODCTRL_IECHOADJ_VDBIASHLF   BIT(2)
+#define L1F_SYSMODCTRL_IECHOADJ_VDAMPHLF    BIT(1)
+#define L1F_SYSMODCTRL_IECHOADJ_VDLANSW     BIT(0)
+#define L1F_SYSMODCTRL_IECHOADJ_DEF         0xBB8B /* en half bias */
+
+
+#define L1F_MIIDBG_SRDSYSMOD                0x05
+#define L1F_SRDSYSMOD_LCKDET_EN             BIT(13)
+#define L1F_SRDSYSMOD_PLL_EN                BIT(11)
+#define L1F_SRDSYSMOD_SEL_HSP               BIT(10)
+#define L1F_SRDSYSMOD_HLFTXDR               BIT(9)
+#define L1F_SRDSYSMOD_TXCLK_DELAY_EN        BIT(8)
+#define L1F_SRDSYSMOD_TXELECIDLE            BIT(7)
+#define L1F_SRDSYSMOD_DEEMP_EN              BIT(6)
+#define L1F_SRDSYSMOD_MS_PAD                BIT(2)
+#define L1F_SRDSYSMOD_CDR_ADC_VLTG          BIT(1)
+#define L1F_SRDSYSMOD_CDR_DAC_1MA           BIT(0)
+#define L1F_SRDSYSMOD_DEF                   0x2C46
+
+
+#define L1F_MIIDBG_HIBNEG                   0x0B
+#define L1F_HIBNEG_PSHIB_EN                 BIT(15)
+#define L1F_HIBNEG_WAKE_BOTH                BIT(14)
+#define L1F_HIBNEG_ONOFF_ANACHG_SUDEN       BIT(13)
+#define L1F_HIBNEG_HIB_PULSE                BIT(12)
+#define L1F_HIBNEG_GATE_25M_EN              BIT(11)
+#define L1F_HIBNEG_RST_80U                  BIT(10)
+#define L1F_HIBNEG_RST_TIMER_MASK           ASHFT8(3U)
+#define L1F_HIBNEG_RST_TIMER_SHIFT          8
+#define L1F_HIBNEG_GTX_CLK_DELAY_MASK       ASHFT5(3U)
+#define L1F_HIBNEG_GTX_CLK_DELAY_SHIFT      5
+#define L1F_HIBNEG_BYPSS_BRKTIMER           BIT(4)
+#define L1F_HIBNEG_DEF                      0xBC40
+
+#define L1F_MIIDBG_TST10BTCFG               0x12
+#define L1F_TST10BTCFG_INTV_TIMER_MASK      ASHFT14(3U)
+#define L1F_TST10BTCFG_INTV_TIMER_SHIFT     14
+#define L1F_TST10BTCFG_TRIGER_TIMER_MASK    ASHFT12(3U)
+#define L1F_TST10BTCFG_TRIGER_TIMER_SHIFT   12
+#define L1F_TST10BTCFG_DIV_MAN_MLT3_EN      BIT(11)
+#define L1F_TST10BTCFG_OFF_DAC_IDLE         BIT(10)
+#define L1F_TST10BTCFG_LPBK_DEEP            BIT(2) /* 1:deep,0:shallow */
+#define L1F_TST10BTCFG_DEF                  0x4C04
+
+#define L1F_MIIDBG_AZ_ANADECT               0x15
+#define L1F_AZ_ANADECT_10BTRX_TH            BIT(15)
+#define L1F_AZ_ANADECT_BOTH_01CHNL          BIT(14)
+#define L1F_AZ_ANADECT_INTV_MASK            ASHFT8(0x3FU)
+#define L1F_AZ_ANADECT_INTV_SHIFT           8
+#define L1F_AZ_ANADECT_THRESH_MASK          ASHFT4(0xFU)
+#define L1F_AZ_ANADECT_THRESH_SHIFT         4
+#define L1F_AZ_ANADECT_CHNL_MASK            ASHFT0(0xFU)
+#define L1F_AZ_ANADECT_CHNL_SHIFT           0
+#define L1F_AZ_ANADECT_DEF                  0x3220
+#define L1F_AZ_ANADECT_LONG                 0x3210
+
+#define L1F_MIIDBG_AGC                      0x23
+#define L1F_AGC_2_VGA_MASK                  ASHFT8(0x3FU)
+#define L1F_AGC_2_VGA_SHIFT                 8
+#define L1F_AGC_LONG1G_LIMT                 40
+#define L1F_AGC_LONG100M_LIMT               44
+
+#define L1F_MIIDBG_LEGCYPS                  0x29
+#define L1F_LEGCYPS_EN                      BIT(15)
+#define L1F_LEGCYPS_DAC_AMP1000_MASK        ASHFT12(7U)
+#define L1F_LEGCYPS_DAC_AMP1000_SHIFT       12
+#define L1F_LEGCYPS_DAC_AMP100_MASK         ASHFT9(7U)
+#define L1F_LEGCYPS_DAC_AMP100_SHIFT        9
+#define L1F_LEGCYPS_DAC_AMP10_MASK          ASHFT6(7U)
+#define L1F_LEGCYPS_DAC_AMP10_SHIFT         6
+#define L1F_LEGCYPS_UNPLUG_TIMER_MASK       ASHFT3(7U)
+#define L1F_LEGCYPS_UNPLUG_TIMER_SHIFT      3
+#define L1F_LEGCYPS_UNPLUG_DECT_EN          BIT(2)
+#define L1F_LEGCYPS_ECNC_PS_EN              BIT(0)
+#define L1F_LEGCYPS_DEF                     0x129D
+
+#define L1F_MIIDBG_TST100BTCFG              0x36
+#define L1F_TST100BTCFG_NORMAL_BW_EN        BIT(15)
+#define L1F_TST100BTCFG_BADLNK_BYPASS       BIT(14)
+#define L1F_TST100BTCFG_SHORTCABL_TH_MASK   ASHFT8(0x3FU)
+#define L1F_TST100BTCFG_SHORTCABL_TH_SHIFT  8
+#define L1F_TST100BTCFG_LITCH_EN            BIT(7)
+#define L1F_TST100BTCFG_VLT_SW              BIT(6)
+#define L1F_TST100BTCFG_LONGCABL_TH_MASK    ASHFT0(0x3FU)
+#define L1F_TST100BTCFG_LONGCABL_TH_SHIFT   0
+#define L1F_TST100BTCFG_DEF                 0xE12C
+
+#define L1F_MIIDBG_GREENCFG                 0x3B
+#define L1F_GREENCFG_MSTPS_MSETH2_MASK      ASHFT8(0xFFU)
+#define L1F_GREENCFG_MSTPS_MSETH2_SHIFT     8
+#define L1F_GREENCFG_MSTPS_MSETH1_MASK      ASHFT0(0xFFU)
+#define L1F_GREENCFG_MSTPS_MSETH1_SHIFT     0
+#define L1F_GREENCFG_DEF                    0x7078
+
+#define L1F_MIIDBG_GREENCFG2                0x3D
+#define L1F_GREENCFG2_GATE_DFSE_EN          BIT(7)
+
+
+/***************************** extension **************************************/
+
+/******* dev 3 *********/
+#define L1F_MIIEXT_PCS                      3
+
+#define L1F_MIIEXT_CLDCTRL6                 0x8006
+#define L1F_CLDCTRL6_CAB_LEN_MASK           ASHFT0(0xFFU)
+#define L1F_CLDCTRL6_CAB_LEN_SHIFT          0
+#define L1F_CLDCTRL6_CAB_LEN_SHORT1G        116
+#define L1F_CLDCTRL6_CAB_LEN_SHORT100M      152
+
+#define L1F_MIIEXT_CLDCTRL7                 0x8007
+#define L1F_CLDCTRL7_VDHLF_BIAS_TH_MASK     ASHFT9(0x7FU)
+#define L1F_CLDCTRL7_VDHLF_BIAS_TH_SHIFT    9
+#define L1F_CLDCTRL7_AFE_AZ_MASK            ASHFT4(0x1FU)
+#define L1F_CLDCTRL7_AFE_AZ_SHIFT           4
+#define L1F_CLDCTRL7_SIDE_PEAK_TH_MASK      ASHFT0(0xFU)
+#define L1F_CLDCTRL7_SIDE_PEAK_TH_SHIFT     0
+#define L1F_CLDCTRL7_DEF                    0x6BF6 /* ???? */
+
+#define L1F_MIIEXT_AZCTRL                   0x8008
+#define L1F_AZCTRL_SHORT_TH_MASK            ASHFT8(0xFFU)
+#define L1F_AZCTRL_SHORT_TH_SHIFT           8
+#define L1F_AZCTRL_LONG_TH_MASK             ASHFT0(0xFFU)
+#define L1F_AZCTRL_LONG_TH_SHIFT            0
+#define L1F_AZCTRL_DEF                      0x1629
+
+#define L1F_MIIEXT_AZCTRL2                  0x8009
+#define L1F_AZCTRL2_WAKETRNING_MASK         ASHFT8(0xFFU)
+#define L1F_AZCTRL2_WAKETRNING_SHIFT        8
+#define L1F_AZCTRL2_QUIET_TIMER_MASH        ASHFT6(3U)
+#define L1F_AZCTRL2_QUIET_TIMER_SHIFT       6
+#define L1F_AZCTRL2_PHAS_JMP2               BIT(4)
+#define L1F_AZCTRL2_CLKTRCV_125MD16         BIT(3)
+#define L1F_AZCTRL2_GATE1000_EN             BIT(2)
+#define L1F_AZCTRL2_AVRG_FREQ               BIT(1)
+#define L1F_AZCTRL2_PHAS_JMP4               BIT(0)
+#define L1F_AZCTRL2_DEF                     0x32C0
+
+#define L1F_MIIEXT_AZCTRL6                  0x800D
+
+#define L1F_MIIEXT_VDRVBIAS                 0x8062
+#define L1F_VDRVBIAS_SEL_MASK               ASHFT0(0x3U)
+#define L1F_VDRVBIAS_SEL_SHIFT              0
+#define L1F_VDRVBIAS_DEF                    0x3
+
+/********* dev 7 **********/
+#define L1F_MIIEXT_ANEG                     7
+
+#define L1F_MIIEXT_LOCAL_EEEADV             0x3C
+#define L1F_LOCAL_EEEADV_1000BT             BIT(2)
+#define L1F_LOCAL_EEEADV_100BT              BIT(1)
+
+#define L1F_MIIEXT_REMOTE_EEEADV            0x3D
+#define L1F_REMOTE_EEEADV_1000BT            BIT(2)
+#define L1F_REMOTE_EEEADV_100BT             BIT(1)
+
+#define L1F_MIIEXT_EEE_ANEG                 0x8000
+#define L1F_EEE_ANEG_1000M                  BIT(2)
+#define L1F_EEE_ANEG_100M                   BIT(1)
+
+#define L1F_MIIEXT_AFE                      0x801A
+#define L1F_AFE_10BT_100M_TH                BIT(6)
+
+
+#define L1F_MIIEXT_NLP34                    0x8025
+#define L1F_MIIEXT_NLP34_DEF                0x1010  /* for 160m */
+
+#define L1F_MIIEXT_NLP56                    0x8026
+#define L1F_MIIEXT_NLP56_DEF                0x1010  /* for 160m */
+
+#define L1F_MIIEXT_NLP78                    0x8027
+#define L1F_MIIEXT_NLP78_160M_DEF           0x8D05  /* for 160m */
+#define L1F_MIIEXT_NLP78_120M_DEF           0x8A05  /* for 120m */
+
+
+
+/******************************************************************************/
+
+/* functions */
+
+
+/* get permanent mac address from
+ * return
+ *    0: success
+ *    non-0:fail
+ */
+u16 l1f_get_perm_macaddr(struct alx_hw *hw, u8 *addr);
+
+
+/* reset mac & dma
+ * return
+ *     0: success
+ *     non-0:fail
+ */
+u16 l1f_reset_mac(struct alx_hw *hw);
+
+/* reset phy
+ * return
+ *    0: success
+ *    non-0:fail
+ */
+u16 l1f_reset_phy(struct alx_hw *hw, bool pws_en, bool az_en, bool ptp_en);
+
+
+/* reset pcie
+ * just reset pcie relative registers (pci command, clk, aspm...)
+ * return
+ *    0:success
+ *    non-0:fail
+ */
+u16 l1f_reset_pcie(struct alx_hw *hw, bool l0s_en, bool l1_en);
+
+
+/* disable/enable MAC/RXQ/TXQ
+ * en
+ *    true:enable
+ *    false:disable
+ * return
+ *    0:success
+ *    non-0-fail
+ */
+u16 l1f_enable_mac(struct alx_hw *hw, bool en, u16 en_ctrl);
+
+
+/* enable/disable aspm support
+ * that will change settings for phy/mac/pcie
+ */
+u16 l1f_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en, u8 lnk_stat);
+
+
+/* initialize phy for speed / flow control
+ * lnk_cap
+ *    if autoNeg, is link capability to tell the peer
+ *    if force mode, is forced speed/duplex
+ */
+u16 l1f_init_phy_spdfc(struct alx_hw *hw, bool auto_neg,
+		       u8 lnk_cap, bool fc_en);
+
+/* do post setting on phy if link up/down event occur
+ */
+u16 l1f_post_phy_link(struct alx_hw *hw, bool linkon, u8 wire_spd);
+
+
+/* do power saving setting befor enter suspend mode
+ * NOTE:
+ *    1. phy link must be established before calling this function
+ *    2. wol option (pattern,magic,link,etc.) is configed before call it.
+ */
+u16 l1f_powersaving(struct alx_hw *hw, u8 wire_spd, bool wol_en,
+		    bool mahw_en, bool macrx_en, bool pws_en);
+
+/* read phy register */
+u16 l1f_read_phy(struct alx_hw *hw, bool ext, u8 dev, bool fast, u16 reg,
+		 u16 *data);
+
+/* write phy register */
+u16 l1f_write_phy(struct alx_hw *hw, bool ext, u8 dev,  bool fast, u16 reg,
+		  u16 data);
+
+/* phy debug port */
+u16 l1f_read_phydbg(struct alx_hw *hw, bool fast, u16 reg, u16 *data);
+u16 l1f_write_phydbg(struct alx_hw *hw, bool fast, u16 reg, u16 data);
+
+
+/* check the configuration of the PHY */
+u16 l1f_get_phy_config(struct alx_hw *hw);
+
+/*
+ * initialize mac basically
+ *  most of hi-feature no init
+ *      MAC/PHY should be reset before call this function
+ */
+u16 l1f_init_mac(struct alx_hw *hw, u8 *addr, u32 txmem_hi,
+		 u32 *tx_mem_lo, u8 tx_qnum, u16 txring_sz,
+		 u32 rxmem_hi, u32 rfdmem_lo, u32 rrdmem_lo,
+		 u16 rxring_sz, u16 rxbuf_sz, u16 smb_timer,
+		 u16 mtu, u16 int_mod, bool hash_legacy);
+
+
+
+#endif/*L1F_HW_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop./alx_ethtool.c linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop/alx_ethtool.c
--- linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop./alx_ethtool.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop/alx_ethtool.c	2019-01-22 16:16:24.927259302 +0100
@@ -0,0 +1,659 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/slab.h>
+
+#include "alx.h"
+#include "alx_hwcom.h"
+
+extern void *ipc_alx_log_ctxt;
+
+#ifdef ETHTOOL_OPS_COMPAT
+#include "alx_compat_ethtool.c"
+#endif
+
+/* Ethtool Stats API Structs */
+#define ALX_STAT(m) \
+	sizeof(((struct alx_adapter *)0)->m), offsetof(struct alx_adapter, m)
+
+/* For Ethtool HW MAC Stats */
+struct alx_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int sizeof_stat;
+	int stat_offset;
+};
+
+static struct alx_stats alx_gstrings_stats[] = {
+	{"rx: total pkts                 ", ALX_STAT(hw_stats.rx_ok)},
+	{"rx: bcast pkts                 ", ALX_STAT(hw_stats.rx_bcast)},
+	{"rx: mcast pkts                 ", ALX_STAT(hw_stats.rx_mcast)},
+	{"rx: pause pkts                 ", ALX_STAT(hw_stats.rx_pause)},
+	{"rx: ctrl pkts                  ", ALX_STAT(hw_stats.rx_ctrl)},
+	{"rx: fcs_err pkts               ", ALX_STAT(hw_stats.rx_fcs_err)},
+	{"rx: len_err pkts               ", ALX_STAT(hw_stats.rx_len_err)},
+	{"rx: rx total bytes cnt         ", ALX_STAT(hw_stats.rx_byte_cnt)},
+	{"rx: rx runt pkts               ", ALX_STAT(hw_stats.rx_runt)},
+	{"rx: rx fragment pkts           ", ALX_STAT(hw_stats.rx_frag)},
+	{"rx: 64_bytes_pkts              ", ALX_STAT(hw_stats.rx_sz_64B)},
+	{"rx: 65_to_127_bytes_pkts       ", ALX_STAT(hw_stats.rx_sz_127B)},
+	{"rx: rx_128_to_255_bytes_pkts   ", ALX_STAT(hw_stats.rx_sz_255B)},
+	{"rx: rx_256_to_511_bytes_pkts   ", ALX_STAT(hw_stats.rx_sz_511B)},
+	{"rx: rx_512_to_1023_bytes_pkts  ", ALX_STAT(hw_stats.rx_sz_1023B)},
+	{"rx: rx_1024_to_1518_bytes_pkts ", ALX_STAT(hw_stats.rx_sz_1518B)},
+	{"rx: rx_1519_to_max_bytes_pkts  ", ALX_STAT(hw_stats.rx_sz_max)},
+	{"rx: rx_oversize_pkts           ", ALX_STAT(hw_stats.rx_ov_sz)},
+	{"rx: rx_fifo_overflow_drop_pkts ", ALX_STAT(hw_stats.rx_ov_rxf)},
+	{"rx: rx_no_rrd_drop_pkts        ", ALX_STAT(hw_stats.rx_ov_rrd)},
+	{"rx: rx_align_error pkts        ", ALX_STAT(hw_stats.rx_align_err)},
+	{"rx: rx_addr_err_filtering pkts ", ALX_STAT(hw_stats.rx_err_addr)},
+	{"tx: total pkts                 ", ALX_STAT(hw_stats.tx_ok)},
+	{"tx: bcast pkts                 ", ALX_STAT(hw_stats.tx_bcast)},
+	{"tx: mcast pkts                 ", ALX_STAT(hw_stats.tx_mcast)},
+	{"tx: pause pkts                 ", ALX_STAT(hw_stats.tx_pause)},
+	{"tx: exc_deffer pkts            ", ALX_STAT(hw_stats.tx_exc_defer)},
+	{"tx: ctrl pkts                  ", ALX_STAT(hw_stats.tx_ctrl)},
+	{"tx: deffer pkts                ", ALX_STAT(hw_stats.tx_defer)},
+	{"tx: tx total bytes cnt         ", ALX_STAT(hw_stats.tx_byte_cnt)},
+	{"tx: 64_bytes_pkts              ", ALX_STAT(hw_stats.tx_sz_64B)},
+	{"tx: 65_to_127_bytes_pkts       ", ALX_STAT(hw_stats.tx_sz_127B)},
+	{"tx: 128_to_255_bytes_pkts      ", ALX_STAT(hw_stats.tx_sz_255B)},
+	{"tx: 256_to_511_bytes_pkts      ", ALX_STAT(hw_stats.tx_sz_511B)},
+	{"tx: 512_to_1023_bytes_pkts     ", ALX_STAT(hw_stats.tx_sz_1023B)},
+	{"tx: 1024_to_1518_bytes_pkts    ", ALX_STAT(hw_stats.tx_sz_1518B)},
+	{"tx: 1519_to_max_bytes_pkts     ", ALX_STAT(hw_stats.tx_sz_max)},
+	{"tx: pkts_wo_single_coll        ", ALX_STAT(hw_stats.tx_single_col)},
+	{"tx: ptks_wo_multi_coll         ", ALX_STAT(hw_stats.tx_multi_col)},
+	{"tx: pkts_wi_late_coll          ", ALX_STAT(hw_stats.tx_late_col)},
+	{"tx: pkts_abort_for_coll        ", ALX_STAT(hw_stats.tx_abort_col)},
+	{"tx: underrun pkts              ", ALX_STAT(hw_stats.tx_underrun)},
+	{"tx: rd_beyond_eop pkts         ", ALX_STAT(hw_stats.tx_trd_eop)},
+	{"tx: length_err pkts            ", ALX_STAT(hw_stats.tx_len_err)},
+	{"tx: trunc_err pkts             ", ALX_STAT(hw_stats.tx_trunc)},
+};
+
+static int alx_get_settings(struct net_device *netdev,
+			    struct ethtool_cmd *ecmd)
+{
+	struct alx_adapter *adpt = netdev_priv(netdev);
+	struct alx_hw *hw = &adpt->hw;
+	u32 link_speed = hw->link_speed;
+	bool link_up = hw->link_up;
+
+	ecmd->supported = (SUPPORTED_10baseT_Half  |
+			   SUPPORTED_10baseT_Full  |
+			   SUPPORTED_100baseT_Half |
+			   SUPPORTED_100baseT_Full |
+			   SUPPORTED_Autoneg       |
+			   SUPPORTED_TP);
+	if (CHK_HW_FLAG(GIGA_CAP))
+		ecmd->supported |= SUPPORTED_1000baseT_Full;
+
+	ecmd->advertising = ADVERTISED_TP;
+
+	ecmd->advertising |= ADVERTISED_Autoneg;
+	ecmd->advertising |= hw->autoneg_advertised;
+
+	ecmd->port = PORT_TP;
+	ecmd->phy_address = 0;
+	ecmd->autoneg = AUTONEG_ENABLE;
+	ecmd->transceiver = XCVR_INTERNAL;
+
+	if (!in_interrupt()) {
+		hw->cbs.check_phy_link(hw, &link_speed, &link_up);
+		hw->link_speed = link_speed;
+		hw->link_up = link_up;
+	}
+
+	if (link_up) {
+		switch (link_speed) {
+		case ALX_LINK_SPEED_10_HALF:
+			ethtool_cmd_speed_set(ecmd, SPEED_10);
+			ecmd->duplex = DUPLEX_HALF;
+			break;
+		case ALX_LINK_SPEED_10_FULL:
+			ethtool_cmd_speed_set(ecmd, SPEED_10);
+			ecmd->duplex = DUPLEX_FULL;
+			break;
+		case ALX_LINK_SPEED_100_HALF:
+			ethtool_cmd_speed_set(ecmd, SPEED_100);
+			ecmd->duplex = DUPLEX_HALF;
+			break;
+		case ALX_LINK_SPEED_100_FULL:
+			ethtool_cmd_speed_set(ecmd, SPEED_100);
+			ecmd->duplex = DUPLEX_FULL;
+			break;
+		case ALX_LINK_SPEED_1GB_FULL:
+			ethtool_cmd_speed_set(ecmd, SPEED_1000);
+			ecmd->duplex = DUPLEX_FULL;
+			break;
+		default:
+			ecmd->speed = -1;
+			ecmd->duplex = -1;
+			break;
+		}
+	} else {
+		ethtool_cmd_speed_set(ecmd, -1);
+		ecmd->duplex = -1;
+	}
+
+	return 0;
+}
+
+
+static int alx_set_settings(struct net_device *netdev,
+			    struct ethtool_cmd *ecmd)
+{
+	struct alx_adapter *adpt = netdev_priv(netdev);
+	struct alx_hw *hw = &adpt->hw;
+	u32 advertised, old;
+	int error = 0;
+
+	while (CHK_ADPT_FLAG(1, STATE_RESETTING))
+		msleep(20);
+	SET_ADPT_FLAG(1, STATE_RESETTING);
+
+	_IPC_INFO("ALX: ethtool cmd autoneg %d, speed %d, duplex %d\n",
+           ecmd->autoneg, ecmd->speed, ecmd->duplex);
+
+	old = hw->autoneg_advertised;
+	advertised = 0;
+	if (ecmd->autoneg == AUTONEG_ENABLE) {
+		advertised = ALX_LINK_SPEED_DEFAULT;
+	} else {
+		u32 speed = ethtool_cmd_speed(ecmd);
+		if (speed == SPEED_1000) {
+			if (ecmd->duplex != DUPLEX_FULL) {
+				dev_warn(&adpt->pdev->dev,
+					 "1000M half is invalid\n");
+				CLI_ADPT_FLAG(1, STATE_RESETTING);
+				return -EINVAL;
+			}
+			advertised = ALX_LINK_SPEED_1GB_FULL;
+		} else if (speed == SPEED_100) {
+			if (ecmd->duplex == DUPLEX_FULL)
+				advertised = ALX_LINK_SPEED_100_FULL;
+			else
+				advertised = ALX_LINK_SPEED_100_HALF;
+		} else {
+			if (ecmd->duplex == DUPLEX_FULL)
+				advertised = ALX_LINK_SPEED_10_FULL;
+			else
+				advertised = ALX_LINK_SPEED_10_HALF;
+		}
+	}
+
+	if (hw->autoneg_advertised == advertised) {
+		CLI_ADPT_FLAG(1, STATE_RESETTING);
+		return error;
+	}
+
+	error = hw->cbs.setup_phy_link_speed(hw, advertised, true,
+			!hw->disable_fc_autoneg);
+	if (error) {
+		dev_err(&adpt->pdev->dev,
+			"setup link failed with code %d\n", error);
+		hw->cbs.setup_phy_link_speed(hw, old, true,
+				!hw->disable_fc_autoneg);
+	}
+
+        alx_stop_internal(adpt, ALX_OPEN_CTRL_RESET_MAC);
+        alx_open_internal(adpt, ALX_OPEN_CTRL_RESET_MAC);
+
+	CLI_ADPT_FLAG(1, STATE_RESETTING);
+	return error;
+}
+
+
+static void alx_get_pauseparam(struct net_device *netdev,
+			       struct ethtool_pauseparam *pause)
+{
+	struct alx_adapter *adpt = netdev_priv(netdev);
+	struct alx_hw *hw = &adpt->hw;
+
+
+	if (hw->disable_fc_autoneg ||
+	    hw->cur_fc_mode == alx_fc_none)
+		pause->autoneg = 0;
+	else
+		pause->autoneg = 1;
+
+	if (hw->cur_fc_mode == alx_fc_rx_pause) {
+		pause->rx_pause = 1;
+	} else if (hw->cur_fc_mode == alx_fc_tx_pause) {
+		pause->tx_pause = 1;
+	} else if (hw->cur_fc_mode == alx_fc_full) {
+		pause->rx_pause = 1;
+		pause->tx_pause = 1;
+	}
+}
+
+
+static int alx_set_pauseparam(struct net_device *netdev,
+			      struct ethtool_pauseparam *pause)
+{
+	struct alx_adapter *adpt = netdev_priv(netdev);
+	struct alx_hw *hw = &adpt->hw;
+	enum alx_fc_mode req_fc_mode;
+	bool disable_fc_autoneg;
+	int retval;
+
+	while (CHK_ADPT_FLAG(1, STATE_RESETTING))
+		msleep(20);
+	SET_ADPT_FLAG(1, STATE_RESETTING);
+
+	req_fc_mode        = hw->req_fc_mode;
+	disable_fc_autoneg = hw->disable_fc_autoneg;
+
+
+	if (pause->autoneg != AUTONEG_ENABLE)
+		disable_fc_autoneg = true;
+	else
+		disable_fc_autoneg = false;
+
+	if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
+		req_fc_mode = alx_fc_full;
+	else if (pause->rx_pause && !pause->tx_pause)
+		req_fc_mode = alx_fc_rx_pause;
+	else if (!pause->rx_pause && pause->tx_pause)
+		req_fc_mode = alx_fc_tx_pause;
+	else if (!pause->rx_pause && !pause->tx_pause)
+		req_fc_mode = alx_fc_none;
+	else
+		return -EINVAL;
+
+	if ((hw->req_fc_mode != req_fc_mode) ||
+	    (hw->disable_fc_autoneg != disable_fc_autoneg)) {
+		hw->req_fc_mode = req_fc_mode;
+		hw->disable_fc_autoneg = disable_fc_autoneg;
+		if (!hw->disable_fc_autoneg)
+			retval = hw->cbs.setup_phy_link(hw,
+				hw->autoneg_advertised, true, true);
+
+		if (hw->cbs.config_fc)
+			hw->cbs.config_fc(hw);
+	}
+
+	CLI_ADPT_FLAG(1, STATE_RESETTING);
+	return 0;
+}
+
+
+static u32 alx_get_msglevel(struct net_device *netdev)
+{
+	struct alx_adapter *adpt = netdev_priv(netdev);
+	return adpt->msg_enable;
+}
+
+
+static void alx_set_msglevel(struct net_device *netdev, u32 data)
+{
+	struct alx_adapter *adpt = netdev_priv(netdev);
+	adpt->msg_enable = data;
+}
+
+
+static int alx_get_regs_len(struct net_device *netdev)
+{
+	struct alx_adapter *adpt = netdev_priv(netdev);
+	struct alx_hw *hw = &adpt->hw;
+	return hw->hwreg_sz * sizeof(32);
+}
+
+
+static void alx_get_regs(struct net_device *netdev,
+			 struct ethtool_regs *regs, void *buff)
+{
+	struct alx_adapter *adpt = netdev_priv(netdev);
+	struct alx_hw *hw = &adpt->hw;
+
+	regs->version = 0;
+
+	memset(buff, 0, hw->hwreg_sz * sizeof(u32));
+	if (hw->cbs.get_ethtool_regs)
+		hw->cbs.get_ethtool_regs(hw, buff);
+}
+
+
+static int alx_get_eeprom_len(struct net_device *netdev)
+{
+	struct alx_adapter *adpt = netdev_priv(netdev);
+	struct alx_hw *hw = &adpt->hw;
+	return hw->eeprom_sz;
+}
+
+
+static int alx_get_eeprom(struct net_device *netdev,
+			  struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+	struct alx_adapter *adpt = netdev_priv(netdev);
+	struct alx_hw *hw = &adpt->hw;
+	bool eeprom_exist = false;
+	u32 *eeprom_buff;
+	int first_dword, last_dword;
+	int retval = 0;
+	int i;
+
+	if (eeprom->len == 0)
+		return -EINVAL;
+
+	if (hw->cbs.check_nvram)
+		hw->cbs.check_nvram(hw, &eeprom_exist);
+	if (!eeprom_exist)
+		return -EOPNOTSUPP;
+
+	eeprom->magic = adpt->pdev->vendor |
+			(adpt->pdev->device << 16);
+
+	first_dword = eeprom->offset >> 2;
+	last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
+
+	eeprom_buff = kmalloc(sizeof(u32) *
+			(last_dword - first_dword + 1), GFP_KERNEL);
+	if (eeprom_buff == NULL)
+		return -ENOMEM;
+
+	for (i = first_dword; i < last_dword; i++) {
+		if (hw->cbs.read_nvram) {
+			retval = hw->cbs.read_nvram(hw, i*4,
+					&(eeprom_buff[i-first_dword]));
+			if (retval) {
+				retval =  -EIO;
+				goto out;
+			}
+		}
+	}
+
+	/* Device's eeprom is always little-endian, word addressable */
+	for (i = 0; i < last_dword - first_dword; i++)
+		le32_to_cpus(&eeprom_buff[i]);
+
+	memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3), eeprom->len);
+out:
+	kfree(eeprom_buff);
+	return retval;
+}
+
+
+static int alx_set_eeprom(struct net_device *netdev,
+			  struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+	struct alx_adapter *adpt = netdev_priv(netdev);
+	struct alx_hw *hw = &adpt->hw;
+	bool eeprom_exist = false;
+	u32 *eeprom_buff;
+	u32 *ptr;
+	int first_dword, last_dword;
+	int retval = 0;
+	int i;
+
+	if (eeprom->len == 0)
+		return -EINVAL;
+
+	if (hw->cbs.check_nvram)
+		hw->cbs.check_nvram(hw, &eeprom_exist);
+	if (!eeprom_exist)
+		return -EOPNOTSUPP;
+
+
+	if (eeprom->magic != (adpt->pdev->vendor |
+				(adpt->pdev->device << 16)))
+		return -EINVAL;
+
+	first_dword = eeprom->offset >> 2;
+	last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
+	eeprom_buff = kmalloc(ALX_MAX_EEPROM_LEN, GFP_KERNEL);
+	if (eeprom_buff == NULL)
+		return -ENOMEM;
+
+	ptr = (u32 *)eeprom_buff;
+
+	if (eeprom->offset & 3) {
+		/* need read/modify/write of first changed EEPROM word */
+		/* only the second byte of the word is being modified */
+		if (hw->cbs.read_nvram) {
+			retval = hw->cbs.read_nvram(hw, first_dword * 4,
+						&(eeprom_buff[0]));
+			if (retval) {
+				retval = -EIO;
+				goto out;
+			}
+		}
+		ptr++;
+	}
+
+	if (((eeprom->offset + eeprom->len) & 3)) {
+		/* need read/modify/write of last changed EEPROM word */
+		/* only the first byte of the word is being modified */
+		if (hw->cbs.read_nvram) {
+			retval = hw->cbs.read_nvram(hw, last_dword * 4,
+				&(eeprom_buff[last_dword - first_dword]));
+			if (retval) {
+				retval = -EIO;
+				goto out;
+			}
+		}
+	}
+
+	/* Device's eeprom is always little-endian, word addressable */
+	memcpy(ptr, bytes, eeprom->len);
+	for (i = 0; i < last_dword - first_dword + 1; i++) {
+		if (hw->cbs.write_nvram) {
+			retval = hw->cbs.write_nvram(hw, (first_dword + i) * 4,
+						eeprom_buff[i]);
+			if (retval) {
+				retval = -EIO;
+				goto out;
+			}
+		}
+	}
+out:
+	kfree(eeprom_buff);
+	return retval;
+}
+
+
+static void alx_get_drvinfo(struct net_device *netdev,
+			    struct ethtool_drvinfo *drvinfo)
+{
+	struct alx_adapter *adpt = netdev_priv(netdev);
+
+	strlcpy(drvinfo->driver,  alx_drv_name, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->fw_version, "alx", 32);
+	strlcpy(drvinfo->bus_info, pci_name(adpt->pdev),
+		sizeof(drvinfo->bus_info));
+	drvinfo->n_stats = 0;
+	drvinfo->testinfo_len = 0;
+	drvinfo->regdump_len = adpt->hw.hwreg_sz;
+	drvinfo->eedump_len = adpt->hw.eeprom_sz;
+}
+
+
+static int alx_wol_exclusion(struct alx_adapter *adpt,
+			     struct ethtool_wolinfo *wol)
+{
+	struct alx_hw *hw = &adpt->hw;
+	int retval = 1;
+
+	/* WOL not supported except for the following */
+	switch (hw->pci_devid) {
+	case ALX_DEV_ID_AR8131:
+	case ALX_DEV_ID_AR8132:
+	case ALX_DEV_ID_AR8151_V1:
+	case ALX_DEV_ID_AR8151_V2:
+	case ALX_DEV_ID_AR8152_V1:
+	case ALX_DEV_ID_AR8152_V2:
+	case ALX_DEV_ID_AR8161:
+	case ALX_DEV_ID_AR8162:
+		retval = 0;
+		break;
+	default:
+		wol->supported = 0;
+	}
+
+	return retval;
+}
+
+
+static void alx_get_wol(struct net_device *netdev,
+			struct ethtool_wolinfo *wol)
+{
+	struct alx_adapter *adpt = netdev_priv(netdev);
+
+	wol->supported = WAKE_MAGIC | WAKE_PHY;
+	wol->wolopts = 0;
+
+	if (adpt->wol & ALX_WOL_MAGIC)
+		wol->wolopts |= WAKE_MAGIC;
+	if (adpt->wol & ALX_WOL_PHY)
+		wol->wolopts |= WAKE_PHY;
+
+	netif_info(adpt, wol, adpt->netdev,
+		   "wol->wolopts = %x\n", wol->wolopts);
+}
+
+
+static int alx_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+	struct alx_adapter *adpt = netdev_priv(netdev);
+
+	if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE |
+			    WAKE_UCAST | WAKE_BCAST | WAKE_MCAST))
+		return -EOPNOTSUPP;
+
+	if (alx_wol_exclusion(adpt, wol))
+		return wol->wolopts ? -EOPNOTSUPP : 0;
+
+	adpt->wol = 0;
+
+	if (wol->wolopts & WAKE_MAGIC)
+		adpt->wol |= ALX_WOL_MAGIC;
+	if (wol->wolopts & WAKE_PHY)
+		adpt->wol |= ALX_WOL_PHY;
+
+	device_set_wakeup_enable(&adpt->pdev->dev, adpt->wol);
+
+	return 0;
+}
+
+
+static int alx_nway_reset(struct net_device *netdev)
+{
+	struct alx_adapter *adpt = netdev_priv(netdev);
+	if (netif_running(netdev))
+		alx_reinit_locked(adpt);
+	return 0;
+}
+
+static void alx_get_ethtool_stats(struct net_device *netdev,
+					struct ethtool_stats *stats, u64 *data)
+{
+	struct alx_adapter *adpt = netdev_priv(netdev);
+	int i;
+	char *p;
+
+	/* Update the current stats from HW */
+	alx_update_hw_stats(adpt);
+	for (i = 0; i < ARRAY_SIZE(alx_gstrings_stats); i++) {
+		p = (char *)adpt + alx_gstrings_stats[i].stat_offset;
+		data[i] = (alx_gstrings_stats[i].sizeof_stat ==
+				sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+	}
+}
+
+static void alx_get_strings(struct net_device *netdev, u32 stringset,
+						u8 *data)
+{
+	u8 *p = data;
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < ARRAY_SIZE(alx_gstrings_stats); i++) {
+			memcpy(p, alx_gstrings_stats[i].stat_string,
+				ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+		break;
+	}
+}
+
+static int alx_get_sset_count(struct net_device *netdev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return ARRAY_SIZE(alx_gstrings_stats);
+	break;
+	default:
+		return -EOPNOTSUPP;
+	break;
+	}
+}
+static void alx_get_ringparam( struct net_device *netdev, struct ethtool_ringparam *ring)
+{
+       struct alx_adapter *adpt = netdev_priv(netdev);
+       ring->rx_pending = adpt->num_rxdescs;
+       ring->tx_pending = adpt->num_txdescs;
+       return;
+}
+
+static int alx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+{
+        struct alx_adapter *adpt = netdev_priv(netdev);
+        int retval = 0;
+
+        adpt->num_txdescs = clamp_t(u32, ring->tx_pending,MIN_TX_DESC,MAX_TX_DESC);
+        adpt->num_rxdescs = clamp_t(u32, ring->rx_pending,MIN_TX_DESC,MAX_TX_DESC);
+        if (netif_running(netdev))
+               retval = alx_resize_rings(netdev);
+        return retval;
+}
+
+
+static const struct ethtool_ops alx_ethtool_ops = {
+	.get_settings      = alx_get_settings,
+	.set_settings      = alx_set_settings,
+	.get_pauseparam    = alx_get_pauseparam,
+	.set_pauseparam    = alx_set_pauseparam,
+	.get_drvinfo       = alx_get_drvinfo,
+	.get_regs_len      = alx_get_regs_len,
+	.get_regs          = alx_get_regs,
+	.get_wol           = alx_get_wol,
+	.set_wol           = alx_set_wol,
+	.get_msglevel      = alx_get_msglevel,
+	.set_msglevel      = alx_set_msglevel,
+	.nway_reset        = alx_nway_reset,
+	.get_link          = ethtool_op_get_link,
+	.get_eeprom_len    = alx_get_eeprom_len,
+	.get_eeprom        = alx_get_eeprom,
+	.set_eeprom        = alx_set_eeprom,
+	.get_strings       = alx_get_strings,
+        .get_ringparam     = alx_get_ringparam,
+	.set_ringparam     = alx_set_ringparam,
+	.get_ethtool_stats = alx_get_ethtool_stats,
+	.get_sset_count    = alx_get_sset_count,
+};
+
+
+void alx_set_ethtool_ops(struct net_device *netdev)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+	netdev_set_default_ethtool_ops(netdev, &alx_ethtool_ops);
+#else
+	SET_ETHTOOL_OPS(netdev, &alx_ethtool_ops);
+#endif
+}
diff -Nruw linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop./alx.h linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop/alx.h
--- linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop./alx.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop/alx.h	2019-01-22 16:16:24.927259302 +0100
@@ -0,0 +1,1046 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _ALX_H_
+#define _ALX_H_
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/sctp.h>
+#include <linux/pkt_sched.h>
+#include <linux/ipv6.h>
+#include <linux/slab.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/mii.h>
+#include <linux/cpumask.h>
+#include <linux/aer.h>
+#include <linux/version.h>
+#include <asm/byteorder.h>
+#include <linux/ipc_logging.h>
+
+#ifdef MDM_PLATFORM
+#include <linux/ipa_odu_bridge.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+#include <linux/msm_pcie.h>
+#else
+#include <mach/msm_pcie.h>
+#endif
+#endif
+
+#include "alx_sw.h"
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+#define __devinit
+#define __devexit
+#define __devexit_p
+#endif
+
+#define ALX_LINK_DOWN_CONFIG 1
+#define ALX_HIB_TASK_CONFIG  1
+#define ALX_HIB_TIMER_CONFIG 1
+#define MIN_TX_DESC 128
+#define MAX_TX_DESC 1024
+#ifdef MDM_PLATFORM
+#define MAX_AR8151_BW 900
+#define ALX_IPA_SYS_PIPE_MAX_PKTS_DESC 200
+#define ALX_IPA_SYS_PIPE_MIN_PKTS_DESC 5
+#define ALX_IPA_SYS_PIPE_DNE_PKTS ALX_IPA_SYS_PIPE_MAX_PKTS_DESC*3
+#define ALX_IPA_INACTIVITY_DELAY_MS 100
+/* Protocol Specific Offsets*/
+#define ALX_IP_OFFSET       14
+#define ALX_IP_HEADER_SIZE  20
+#define ALX_DHCP_SRV_PORT   67
+#define ALX_DHCP_CLI_PORT   68
+#endif
+
+/*
+ * Definition to enable some features
+ */
+#undef CONFIG_ALX_MSIX
+#undef CONFIG_ALX_MSI
+#undef CONFIG_ALX_MTQ
+#undef CONFIG_ALX_MRQ
+#undef CONFIG_ALX_RSS
+/* #define CONFIG_ALX_MSIX */
+#define CONFIG_ALX_MSI
+#define CONFIG_ALX_MTQ
+#define CONFIG_ALX_MRQ
+#ifdef CONFIG_ALX_MRQ
+#define CONFIG_ALX_RSS
+#endif
+
+#define IPCLOG_STATE_PAGES 2
+#define __FILENAME__ (strrchr(__FILE__, '/') ? \
+	strrchr(__FILE__, '/') + 1 : __FILE__)
+
+#define IPC_EMERG(s, ...) \
+do { \
+	pr_emerg("%s: %s[%u]: " s, __FILENAME__, \
+	__func__, __LINE__, ##__VA_ARGS__); \
+	if (ipc_alx_log_ctxt) { \
+	ipc_log_string(ipc_alx_log_ctxt, \
+		"%s: %s[%u]: IPC EMERG:" s, __FILENAME__ , \
+		__func__, __LINE__, ##__VA_ARGS__); \
+	} \
+} while (0)
+
+#define IPC_DEBUG(s, ...) \
+do { \
+	pr_debug("%s: %s[%u]: " s, __FILENAME__, \
+	__func__, __LINE__, ##__VA_ARGS__); \
+	if (ipc_alx_log_ctxt) { \
+		ipc_log_string(ipc_alx_log_ctxt, \
+		"%s: %s[%u]: IPC DEBUG:" s, __FILENAME__ , \
+		__func__, __LINE__, ##__VA_ARGS__); \
+	} \
+} while (0)
+
+#define _IPC_INFO(s, ...) \
+do { \
+	pr_info("%s: %s[%u]: " s, __FILENAME__, \
+	__func__, __LINE__, ##__VA_ARGS__); \
+	if (ipc_alx_log_ctxt) { \
+		ipc_log_string(ipc_alx_log_ctxt, \
+		"%s: %s[%u]: IPC INFO:" s, __FILENAME__ , \
+		__func__, __LINE__, ##__VA_ARGS__);\
+	} \
+} while (0)
+
+#define IPC_ERROR(s, ...) \
+do { \
+	pr_err("%s: %s[%u]: " s, __FILENAME__, \
+	__func__, __LINE__, ##__VA_ARGS__); \
+	if (ipc_alx_log_ctxt) { \
+		ipc_log_string(ipc_alx_log_ctxt, \
+		"%s: %s[%u]: IPC ERROR:" s, __FILENAME__ , \
+		__func__, __LINE__, ##__VA_ARGS__);\
+	} \
+} while (0)
+
+#define IPC_WARN(s, ...) \
+do { \
+	pr_warn("%s: %s[%u]: " s, __FILENAME__, \
+	__func__, __LINE__, ##__VA_ARGS__); \
+	if (ipc_alx_log_ctxt) { \
+	ipc_log_string(ipc_alx_log_ctxt, \
+		"%s: %s[%u]: IPC WARN:" s, __FILENAME__ , \
+		__func__, __LINE__, ##__VA_ARGS__);\
+	} \
+} while (0)
+
+#define IPC_DEBUG_LOW(s, ...) \
+do { \
+	pr_debug("%s: %s[%u]: " s, __FILENAME__, \
+	__func__, __LINE__, ##__VA_ARGS__); \
+	if (ipc_alx_log_ctxt_low) { \
+		ipc_log_string(ipc_alx_log_ctxt_low, \
+		"%s: %s[%u]: IPC DEBUG:" s, __FILENAME__ , \
+		__func__, __LINE__, ##__VA_ARGS__); \
+	} \
+} while (0)
+
+#define IPC_INFO_LOW(s, ...) \
+	do { \
+		pr_info("%s: %s[%u]: " s, __FILENAME__, \
+		__func__, __LINE__, ##__VA_ARGS__); \
+		if (ipc_alx_log_ctxt_low) { \
+			ipc_log_string(ipc_alx_log_ctxt_low, \
+			"%s: %s[%u]: IPC INFO:" s, __FILENAME__ , \
+			__func__, __LINE__, ##__VA_ARGS__);\
+		} \
+	} while (0)
+
+#define IPC_ERROR_LOW(s, ...) \
+do { \
+	pr_err("%s: %s[%u]: " s, __FILENAME__, \
+	__func__, __LINE__, ##__VA_ARGS__); \
+	if (ipc_alx_log_ctxt_low) { \
+		ipc_log_string(ipc_alx_log_ctxt_low, \
+		"%s: %s[%u]: IPC ERROR:" s, __FILENAME__ , \
+		__func__, __LINE__, ##__VA_ARGS__);\
+	} \
+} while (0)
+
+#define IPC_WARN_LOW(s, ...) \
+	do { \
+		pr_warn("%s: %s[%u]: " s, __FILENAME__, \
+		__func__, __LINE__, ##__VA_ARGS__); \
+		if (ipc_alx_log_ctxt_low) { \
+		ipc_log_string(ipc_alx_log_ctxt_low, \
+			"%s: %s[%u]: IPC WARN:" s, __FILENAME__ , \
+			__func__, __LINE__, ##__VA_ARGS__);\
+		} \
+	} while (0)
+
+
+#define ALX_MSG_DEFAULT         0
+
+/* Logging functions and macros */
+#define alx_err(adpt, fmt, ...)\
+do { \
+	netdev_err(adpt->netdev, fmt, ##__VA_ARGS__); \
+	if (ipc_alx_log_ctxt_low) { \
+		ipc_log_string(ipc_alx_log_ctxt_low, \
+		"%s: %s[%u]: IPC ALX_ERR:" fmt, __FILENAME__ , \
+		__func__, __LINE__, ##__VA_ARGS__);\
+	} \
+} while (0)
+
+#define alx_netif_dbg(adpt, type, netdev, fmt, ...)\
+	do { \
+		netif_dbg(adpt, type, netdev, fmt, ##__VA_ARGS__); \
+		if (ipc_alx_log_ctxt_low) { \
+			ipc_log_string(ipc_alx_log_ctxt_low, \
+			"%s: %s[%u]: IPC ALX_ERR:" fmt, __FILENAME__ , \
+			__func__, __LINE__, ##__VA_ARGS__);\
+		} \
+	} while (0)
+
+
+#define ALX_VLAN_TO_TAG(_vlan, _tag) \
+	do { \
+		_tag =  ((((_vlan) >> 8) & 0xFF) | (((_vlan) & 0xFF) << 8)); \
+	} while (0)
+
+#define ALX_TAG_TO_VLAN(_tag, _vlan) \
+	do { \
+		_vlan = ((((_tag) >> 8) & 0xFF) | (((_tag) & 0xFF) << 8)) ; \
+	} while (0)
+
+/* Coalescing Message Block */
+struct coals_msg_block {
+	int test;
+};
+
+
+#define BAR_0   0
+
+#define ALX_DEF_RX_BUF_SIZE	1536
+#define ALX_MAX_JUMBO_PKT_SIZE	(9*1024)
+#define ALX_MAX_TSO_PKT_SIZE	(7*1024)
+
+#define ALX_MAX_ETH_FRAME_SIZE	ALX_MAX_JUMBO_PKT_SIZE
+#define ALX_MIN_ETH_FRAME_SIZE	68
+
+
+#define ALX_MAX_RX_QUEUES	8
+#define ALX_MAX_TX_QUEUES	4
+#define ALX_MAX_HANDLED_INTRS	5
+
+#define ALX_WATCHDOG_TIME   (5 * HZ)
+
+struct alx_cmb {
+	char name[IFNAMSIZ + 9];
+	void *cmb;
+	dma_addr_t dma;
+};
+struct alx_smb {
+	char name[IFNAMSIZ + 9];
+	void *smb;
+	dma_addr_t dma;
+};
+
+
+/*
+ * RRD : definition
+ */
+
+/* general parameter format of rrd */
+struct alx_sw_rrdes_general {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	/* dword 0 */
+	u32  xsum:16;
+	u32  nor:4;  /* number of RFD */
+	u32  si:12;  /* start index of rfd-ring */
+	/* dword 1 */
+	u32 hash;
+	/* dword 2 */
+	u32 vlan_tag:16; /* vlan-tag */
+	u32 pid:8;       /* Header Length of Header-Data Split. WORD unit */
+	u32 reserve0:1;
+	u32 rss_cpu:3;   /* CPU number used by RSS */
+	u32 rss_flag:4;  /* rss_flag 0, TCP(IPv6) flag for RSS hash algrithm
+			  * rss_flag 1, IPv6 flag for RSS hash algrithm
+			  * rss_flag 2, TCP(IPv4) flag for RSS hash algrithm
+			  * rss_flag 3, IPv4 flag for RSS hash algrithm */
+	/* dword 3 */
+	u32 pkt_len:14;  /* length of the packet */
+	u32 l4f:1;       /* L4(TCP/UDP) checksum failed */
+	u32 ipf:1;       /* IP checksum failed */
+	u32 vlan_flag:1; /* vlan tag */
+	u32 proto:3;
+	u32 res:1;       /* received error summary */
+	u32 crc:1;       /* crc error */
+	u32 fae:1;       /* frame alignment error */
+	u32 trunc:1;     /* truncated packet, larger than MTU */
+	u32 runt:1;      /* runt packet */
+	u32 icmp:1;      /* incomplete packet due to insufficient rx-desc*/
+	u32 bar:1;       /* broadcast address received */
+	u32 mar:1;       /* multicast address received */
+	u32 type:1;      /* ethernet type */
+	u32 fov:1;       /* fifo overflow*/
+	u32 lene:1;      /* length error */
+	u32 update:1;    /* update*/
+#elif defined(__BIG_ENDIAN_BITFIELD)
+	/* dword 0 */
+	u32  si:12;
+	u32  nor:4;
+	u32  xsum:16;
+	/* dword 1 */
+	u32 hash;
+	/* dword 2 */
+	u32 rss_flag:4;
+	u32 rss_cpu:3;
+	u32 reserve0:1;
+	u32 pid:8;
+	u32 vlan_tag:16;
+	/* dword 3 */
+	u32 update:1;
+	u32 lene:1;
+	u32 fov:1;
+	u32 type:1;
+	u32 mar:1;
+	u32 bar:1;
+	u32 icmp:1;
+	u32 runt:1;
+	u32 trunc:1;
+	u32 fae:1;
+	u32 crc:1;
+	u32 res:1;
+	u32 proto:3;
+	u32 vlan_flag:1;
+	u32 ipf:1;
+	u32 l4f:1;
+	u32 pkt_len:14;
+#else
+#error	"Please fix <asm/byteorder.h>"
+#endif
+};
+
+union alx_hw_rrdesc {
+	/* dword flat format */
+	struct {
+		__le32 dw0;
+		__le32 dw1;
+		__le32 dw2;
+		__le32 dw3;
+	} dfmt;
+
+	/* qword flat format */
+	struct {
+		__le64 qw0;
+		__le64 qw1;
+	} qfmt;
+};
+
+/*
+ * XXX: we should not use this guy, best to just
+ * do all le32_to_cpu() conversions on the spot.
+ */
+union alx_sw_rrdesc {
+	struct alx_sw_rrdes_general genr;
+
+	/* dword flat format */
+	struct {
+		u32 dw0;
+		u32 dw1;
+		u32 dw2;
+		u32 dw3;
+	} dfmt;
+
+	/* qword flat format */
+	struct {
+		u64 qw0;
+		u64 qw1;
+	} qfmt;
+};
+
+/*
+ * RFD : definition
+ */
+
+/* general parameter format of rfd */
+struct alx_sw_rfdes_general {
+	u64   addr;
+};
+
+union alx_hw_rfdesc {
+	/* dword flat format */
+	struct {
+		__le32 dw0;
+		__le32 dw1;
+	} dfmt;
+
+	/* qword flat format */
+	struct {
+		__le64 qw0;
+	} qfmt;
+};
+
+/*
+ * XXX: we should not use this guy, best to just
+ * do all le32_to_cpu() conversions on the spot.
+ */
+union alx_sw_rfdesc {
+	struct alx_sw_rfdes_general genr;
+
+	/* dword flat format */
+	struct {
+		u32 dw0;
+		u32 dw1;
+	} dfmt;
+
+	/* qword flat format */
+	struct {
+		u64 qw0;
+	} qfmt;
+};
+
+/* RRD Proto Defination as per spec*/
+enum alx_rrd_proto {
+	RRD_PROTO_NON_IP = 0,
+	RRD_PROTO_IPv4,
+	RRD_PROTO_IPv6_TCP,
+	RRD_PROTO_IPv4_TCP,
+	RRD_PROTO_IPv6_UDP,
+	RRD_PROTO_IPv4_UDP,
+	RRD_PROTO_IPv6,
+	RRD_PROTO_LLDP
+};
+
+/*
+ * TPD : definition
+ */
+
+/* general parameter format of tpd */
+struct alx_sw_tpdes_general {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	/* dword 0 */
+	u32  buffer_len:16; /* include 4-byte CRC */
+	u32  vlan_tag:16;
+	/* dword 1 */
+	u32  l4hdr_offset:8; /* l4 header offset to the 1st byte of packet */
+	u32  c_csum:1;
+	u32  ip_csum:1;
+	u32  tcp_csum:1;
+	u32  udp_csum:1;
+	u32  lso:1;
+	u32  lso_v2:1;
+	u32  vtagged:1;   /* vlan-id tagged already */
+	u32  instag:1;    /* insert vlan tag */
+
+	u32  ipv4:1;      /* ipv4 packet */
+	u32  type:1;      /* type of packet (ethernet_ii(0) or snap(1)) */
+	u32  reserve:12;
+	u32  epad:1;      /* even byte padding when this packet */
+	u32  last_frag:1; /* last fragment(buffer) of the packet */
+
+	u64  addr;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+	/* dword 0 */
+	u32  vlan_tag:16;
+	u32  buffer_len:16;
+	/* dword 1 */
+	u32  last_frag:1;
+	u32  epad:1;
+	u32  reserve:12;
+	u32  type:1;
+	u32  ipv4:1;
+	u32  instag:1;
+	u32  vtagged:1;
+	u32  lso_v2:1;
+	u32  lso:1;
+	u32  udp_csum:1;
+	u32  tcp_csum:1;
+	u32  ip_csum:1;
+	u32  c_csum:1;
+	u32  l4hdr_offset:8;
+
+	u64  addr;
+#else
+#error	"Please fix <asm/byteorder.h>"
+#endif
+};
+
+/* custom checksum parameter format of tpd */
+struct alx_sw_tpdes_checksum {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	/* dword 0 */
+	u32  buffer_len:16;
+	u32  vlan_tag:16;
+	/* dword 1 */
+	u32  payld_offset:8; /* payload offset to the 1st byte of packet */
+	u32  c_csum:1;    /* do custom checksum offload */
+	u32  ip_csum:1;   /* do ip(v4) header checksum offload */
+	u32  tcp_csum:1;  /* do tcp checksum offload, both ipv4 and ipv6 */
+	u32  udp_csum:1;  /* do udp checksum offlaod, both ipv4 and ipv6 */
+	u32  lso:1;
+	u32  lso_v2:1;
+	u32  vtagged:1;   /* vlan-id tagged already */
+	u32  instag:1;    /* insert vlan tag */
+	u32  ipv4:1;      /* ipv4 packet */
+	u32  type:1;      /* type of packet (ethernet_ii(0) or snap(1)) */
+	u32  cxsum_offset:8;  /* checksum offset to the 1st byte of packet */
+	u32  reserve:4;
+	u32  epad:1;      /* even byte padding when this packet */
+	u32  last_frag:1; /* last fragment(buffer) of the packet */
+
+	u64 addr;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+	/* dword 0 */
+	u32  vlan_tag:16;
+	u32  buffer_len:16;
+	/* dword 1 */
+	u32  last_frag:1;
+	u32  epad:1;
+	u32  reserve:4;
+	u32  cxsum_offset:8;
+	u32  type:1;
+	u32  ipv4:1;
+	u32  instag:1;
+	u32  vtagged:1;
+	u32  lso_v2:1;
+	u32  lso:1;
+	u32  udp_csum:1;
+	u32  tcp_csum:1;
+	u32  ip_csum:1;
+	u32  c_csum:1;
+	u32  payld_offset:8;
+
+	u64  addr;
+#else
+#error	"Please fix <asm/byteorder.h>"
+#endif
+};
+
+
+/* tcp large send format (v1/v2) of tpd */
+struct alx_sw_tpdes_tso {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	/* dword 0 */
+	u32  buffer_len:16; /* include 4-byte CRC */
+	u32  vlan_tag:16;
+	/* dword 1 */
+	u32  tcphdr_offset:8; /* tcp hdr offset to the 1st byte of packet */
+	u32  c_csum:1;
+	u32  ip_csum:1;
+	u32  tcp_csum:1;
+	u32  udp_csum:1;
+	u32  lso:1;       /* do tcp large send (ipv4 only) */
+	u32  lso_v2:1;    /* must be 0 in this format */
+	u32  vtagged:1;   /* vlan-id tagged already */
+	u32  instag:1;    /* insert vlan tag */
+	u32  ipv4:1;      /* ipv4 packet */
+	u32  type:1;      /* type of packet (ethernet_ii(1) or snap(0)) */
+	u32  mss:13;      /* mss if do tcp large send */
+	u32  last_frag:1; /* last fragment(buffer) of the packet */
+
+	u32  pkt_len;     /* packet length in ext tpd */
+	u32  reserve;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+	/* dword 0 */
+	u32  vlan_tag:16;
+	u32  buffer_len:16;
+	/* dword 1 */
+	u32  last_frag:1;
+	u32  mss:13;
+	u32  type:1;
+	u32  ipv4:1;
+	u32  instag:1;
+	u32  vtagged:1;
+	u32  lso_v2:1;
+	u32  lso:1;
+	u32  udp_csum:1;
+	u32  tcp_csum:1;
+	u32  ip_csum:1;
+	u32  c_csum:1;
+	u32  tcphdr_offset:8;
+
+	u32  pkt_len;
+	u32  reserve;
+#else
+#error	"Please fix <asm/byteorder.h>"
+#endif
+};
+
+union alx_hw_tpdesc {
+	/* dword flat format */
+	struct {
+		__le32 dw0;
+		__le32 dw1;
+		__le32 dw2;
+		__le32 dw3;
+	} dfmt;
+
+	/* qword flat format */
+	struct {
+		__le64 qw0;
+		__le64 qw1;
+	} qfmt;
+};
+
+/*
+ * XXX: we should not use this guy, best to just
+ * do all le32_to_cpu() conversions on the spot.
+ */
+union alx_sw_tpdesc {
+	struct alx_sw_tpdes_general   genr;
+	struct alx_sw_tpdes_checksum  csum;
+	struct alx_sw_tpdes_tso       tso;
+
+	/* dword flat format */
+	struct {
+		u32 dw0;
+		u32 dw1;
+		u32 dw2;
+		u32 dw3;
+	} dfmt;
+
+	/* qword flat format */
+	struct {
+		u64 qw0;
+		u64 qw1;
+	} qfmt;
+};
+
+#define ALX_RRD(_que, _i)	\
+		(&(((union alx_hw_rrdesc *)(_que)->rrq.rrdesc)[(_i)]))
+#define ALX_RFD(_que, _i)	\
+		(&(((union alx_hw_rfdesc *)(_que)->rfq.rfdesc)[(_i)]))
+#define ALX_TPD(_que, _i)	\
+		(&(((union alx_hw_tpdesc *)(_que)->tpq.tpdesc)[(_i)]))
+
+
+/*
+ * alx_ring_header represents a single, contiguous block of DMA space
+ * mapped for the three descriptor rings (tpd, rfd, rrd) and the two
+ * message blocks (cmb, smb) described below
+ */
+struct alx_ring_header {
+	void        *desc;      /* virtual address */
+	dma_addr_t   dma;       /* physical address*/
+	unsigned int size;      /* length in bytes */
+	unsigned int used;
+};
+
+
+/*
+ * alx_buffer is wrapper around a pointer to a socket buffer
+ * so a DMA handle can be stored along with the skb
+ */
+struct alx_buffer {
+	struct sk_buff *skb;      /* socket buffer */
+	u16             length;   /* rx buffer length */
+	dma_addr_t      dma;
+};
+
+struct alx_sw_buffer {
+	struct sk_buff *skb;   /* socket buffer */
+	u32             vlan_tag:16;
+	u32             vlan_flag:1;
+	u32             reserved:15;
+};
+
+/* receive free descriptor (rfd) queue */
+struct alx_rfd_queue {
+	struct alx_buffer   *rfbuff;
+	union alx_hw_rfdesc *rfdesc;   /* virtual address */
+	dma_addr_t         rfdma;    /* physical address */
+	u16 size;          /* length in bytes */
+	u16 count;         /* number of descriptors in the ring */
+	u16 produce_idx;   /* it's written to rxque->produce_reg */
+	u16 consume_idx;   /* unused*/
+};
+
+/* receive return desciptor (rrd) queue */
+struct alx_rrd_queue {
+	union alx_hw_rrdesc *rrdesc;    /* virtual address */
+	dma_addr_t          rrdma;     /* physical address */
+	u16 size;          /* length in bytes */
+	u16 count;         /* number of descriptors in the ring */
+	u16 produce_idx;   /* unused */
+	u16 consume_idx;   /* rxque->consume_reg */
+};
+
+/* software desciptor (swd) queue */
+struct alx_swd_queue {
+	struct alx_sw_buffer *swbuff;
+	u16 count;         /* number of descriptors in the ring */
+	u16 produce_idx;
+	u16 consume_idx;
+};
+
+/* rx queue */
+struct alx_rx_queue {
+	struct device         *dev;      /* device for dma mapping */
+	struct net_device     *netdev;   /* netdev ring belongs to */
+	struct alx_msix_param *msix;
+	struct alx_rrd_queue   rrq;
+	struct alx_rfd_queue   rfq;
+	struct alx_swd_queue   swq;
+
+	u16 que_idx;       /* index in multi rx queues*/
+	u16 max_packets;   /* max work per interrupt */
+	u16 produce_reg;
+	u16 consume_reg;
+	u32 flags;
+};
+#define ALX_RX_FLAG_SW_QUE          0x00000001
+#define ALX_RX_FLAG_HW_QUE          0x00000002
+#define CHK_RX_FLAG(_flag)          CHK_FLAG(rxque, RX, _flag)
+#define SET_RX_FLAG(_flag)          SET_FLAG(rxque, RX, _flag)
+#define CLI_RX_FLAG(_flag)          CLI_FLAG(rxque, RX, _flag)
+
+#define GET_RF_BUFFER(_rque, _i)    (&((_rque)->rfq.rfbuff[(_i)]))
+#define GET_SW_BUFFER(_rque, _i)    (&((_rque)->swq.swbuff[(_i)]))
+
+
+/* transimit packet descriptor (tpd) ring */
+struct alx_tpd_queue {
+	struct alx_buffer   *tpbuff;
+	union alx_hw_tpdesc *tpdesc;   /* virtual address */
+	dma_addr_t         tpdma;    /* physical address */
+
+	u16 size;    /* length in bytes */
+	u16 count;   /* number of descriptors in the ring */
+	u16 produce_idx;
+	u16 consume_idx;
+	u16 last_produce_idx;
+};
+
+/* tx queue */
+struct alx_tx_queue {
+	struct device         *dev;	/* device for dma mapping */
+	struct net_device     *netdev;	/* netdev ring belongs to */
+	struct alx_tpd_queue   tpq;
+	struct alx_msix_param *msix;
+
+	u16 que_idx;       /* needed for multiqueue queue management */
+	u16 max_packets;   /* max packets per interrupt */
+	u16 produce_reg;
+	u16 consume_reg;
+};
+#define GET_TP_BUFFER(_tque, _i)    (&((_tque)->tpq.tpbuff[(_i)]))
+
+
+/*
+ * definition for array allocations.
+ */
+#define ALX_MAX_MSIX_INTRS              16
+#define ALX_MAX_RX_QUEUES               8
+#define ALX_MAX_TX_QUEUES               4
+
+enum alx_msix_type {
+	alx_msix_type_rx,
+	alx_msix_type_tx,
+	alx_msix_type_other,
+};
+#define ALX_MSIX_TYPE_OTH_TIMER         0
+#define ALX_MSIX_TYPE_OTH_ALERT         1
+#define ALX_MSIX_TYPE_OTH_SMB           2
+#define ALX_MSIX_TYPE_OTH_PHY           3
+
+/* ALX_MAX_MSIX_INTRS of these are allocated,
+ * but we only use one per queue-specific vector.
+ */
+struct alx_msix_param {
+	struct alx_adapter *adpt;
+	unsigned int        vec_idx; /* index in HW interrupt vector */
+	char                name[IFNAMSIZ + 9];
+
+	/* msix interrupts for queue */
+	u8 rx_map[ALX_MAX_RX_QUEUES];
+	u8 tx_map[ALX_MAX_TX_QUEUES];
+	u8 rx_count;   /* Rx ring count assigned to this vector */
+	u8 tx_count;   /* Tx ring count assigned to this vector */
+
+	struct napi_struct napi;
+	cpumask_var_t      affinity_mask;
+	u32 flags;
+};
+
+#define ALX_MSIX_FLAG_RX0               0x00000001
+#define ALX_MSIX_FLAG_RX1               0x00000002
+#define ALX_MSIX_FLAG_RX2               0x00000004
+#define ALX_MSIX_FLAG_RX3               0x00000008
+#define ALX_MSIX_FLAG_RX4               0x00000010
+#define ALX_MSIX_FLAG_RX5               0x00000020
+#define ALX_MSIX_FLAG_RX6               0x00000040
+#define ALX_MSIX_FLAG_RX7               0x00000080
+#define ALX_MSIX_FLAG_TX0               0x00000100
+#define ALX_MSIX_FLAG_TX1               0x00000200
+#define ALX_MSIX_FLAG_TX2               0x00000400
+#define ALX_MSIX_FLAG_TX3               0x00000800
+#define ALX_MSIX_FLAG_TIMER             0x00001000
+#define ALX_MSIX_FLAG_ALERT             0x00002000
+#define ALX_MSIX_FLAG_SMB               0x00004000
+#define ALX_MSIX_FLAG_PHY               0x00008000
+
+#define ALX_MSIX_FLAG_RXS (\
+		ALX_MSIX_FLAG_RX0       |\
+		ALX_MSIX_FLAG_RX1       |\
+		ALX_MSIX_FLAG_RX2       |\
+		ALX_MSIX_FLAG_RX3       |\
+		ALX_MSIX_FLAG_RX4       |\
+		ALX_MSIX_FLAG_RX5       |\
+		ALX_MSIX_FLAG_RX6       |\
+		ALX_MSIX_FLAG_RX7)
+#define ALX_MSIX_FLAG_TXS (\
+		ALX_MSIX_FLAG_TX0       |\
+		ALX_MSIX_FLAG_TX1       |\
+		ALX_MSIX_FLAG_TX2       |\
+		ALX_MSIX_FLAG_TX3)
+#define ALX_MSIX_FLAG_ALL (\
+		ALX_MSIX_FLAG_RXS       |\
+		ALX_MSIX_FLAG_TXS       |\
+		ALX_MSIX_FLAG_TIMER     |\
+		ALX_MSIX_FLAG_ALERT     |\
+		ALX_MSIX_FLAG_SMB       |\
+		ALX_MSIX_FLAG_PHY)
+
+#define CHK_MSIX_FLAG(_flag)    CHK_FLAG(msix, MSIX, _flag)
+#define SET_MSIX_FLAG(_flag)    SET_FLAG(msix, MSIX, _flag)
+#define CLI_MSIX_FLAG(_flag)    CLI_FLAG(msix, MSIX, _flag)
+
+#ifdef MDM_PLATFORM
+/**
+ *  * struct alx_ipa_stats - ALX - ODU_Bridge/IPA Stats
+ *  * @send_msg: MSG Send to ODU_Bridge
+ *  * @recv_msg: MSG Recd from ODU_Bridge/IPA
+ *  * @rx_ipa_excep: Exception packets send from ODU_Bridge/IPA; packets
+ *  *                that need to be delivered to network stack
+ *  * @rx_ipa_write_done: MSG Recd from ODU_Bridge/IPA when message posted to
+ *  *			  IPA HW
+ *  * This function sets the link local ipv6 address provided by IOCTL
+ *  */
+struct alx_ipa_stats {
+	/* RX Side */
+	uint64_t rx_ipa_excep;
+	uint64_t rx_ipa_write_done;
+	uint64_t rx_ipa_send;
+	uint64_t rx_ipa_send_fail;
+
+	/* TX Side*/
+	uint64_t tx_ipa_send;
+	uint64_t tx_ipa_send_err;
+
+	/* Frag Stats */
+	uint64_t non_ip_frag_pkt;
+
+	/* Flow Control Stats */
+	uint64_t flow_control_pkt_drop;
+	uint64_t ipa_low_watermark_cnt;
+};
+
+enum alx_ipa_rm_state {
+	ALX_IPA_RM_RELEASED,
+	ALX_IPA_RM_REQUESTED,
+	ALX_IPA_RM_GRANT_PENDING,
+	ALX_IPA_RM_GRANTED,
+};
+
+/**
+ * struct alx_ipa_ctx - ALX IPA Context
+ *  @stats: ALX - IPA brigde stats
+ *  @debugfs_dir: Debug FS handle for alx
+ *  @ipa_prod_rm_state: IPA Producer Pipe RM state
+ *  @ipa_cons_rm_state: IPA Consumer Pipe RM state
+ *  @alx_ipa_perf_requested: Set to true when perf profile have been requested.
+ *  @ipa_rm_lock: Lock to syncronize IPA Prod/Cons RM state access
+ *  @rm_ipa_lock: Lock to syncronize ipa_rx_completion access
+ *  @ipa_rx_completion: Keeps track of pending IPA WRITE DONE Evts
+ *  @ipa_comp_wait: Wait source used to keep APPS awake
+ *                  when packets are submitted to IPA
+ **/
+struct alx_ipa_ctx {
+	struct alx_ipa_stats stats;
+	struct dentry *debugfs_dir;
+	enum alx_ipa_rm_state ipa_prod_rm_state;
+	enum alx_ipa_rm_state ipa_cons_rm_state;
+	bool alx_ipa_perf_requested;
+	spinlock_t ipa_rm_state_lock;
+	spinlock_t rm_ipa_lock;
+	uint64_t ipa_rx_completion;
+	uint64_t alx_tx_completion;
+	bool acquire_wake_src;
+	struct wakeup_source rm_ipa_wait;
+	bool ipa_ready;
+};
+
+struct alx_ipa_rx_desc_node {
+	struct list_head link;
+	struct sk_buff * skb_ptr;
+};
+#endif
+
+/*
+ *board specific private data structure
+ */
+struct alx_adapter {
+	struct net_device *netdev;
+	struct pci_dev    *pdev;
+#ifdef  MDM_PLATFORM
+	struct alx_ipa_ctx *palx_ipa;
+	struct msm_pcie_register_event msm_pcie_event;
+#endif
+	struct net_device_stats net_stats;
+	bool netdev_registered;
+	u16 bd_number;    /* board number;*/
+
+	struct alx_msix_param *msix[ALX_MAX_MSIX_INTRS];
+	struct msix_entry     *msix_entries;
+	int num_msix_rxques;
+	int num_msix_txques;
+	int num_msix_noques;    /* true count of msix_noques for device */
+	int num_msix_intrs;
+
+	int min_msix_intrs;
+	int max_msix_intrs;
+
+	/* All Descriptor memory */
+	struct alx_ring_header ring_header;
+
+	/* TX */
+	struct alx_tx_queue *tx_queue[ALX_MAX_TX_QUEUES];
+	/* RX */
+	struct alx_rx_queue *rx_queue[ALX_MAX_RX_QUEUES];
+
+	u16 num_txques;
+	u16 num_rxques; /* equal max(num_hw_rxques, num_sw_rxques) */
+	u16 num_hw_rxques;
+	u16 num_sw_rxques;
+	u16 max_rxques;
+	u16 max_txques;
+
+	u16 num_txdescs;
+	u16 num_rxdescs;
+
+	u32 rxbuf_size;
+
+	struct alx_cmb cmb;
+	struct alx_smb smb;
+
+	/* structs defined in alx_hw.h */
+	struct alx_hw       hw;
+	struct alx_hw_stats hw_stats;
+
+	u32 *config_space;
+
+	struct work_struct alx_task;
+	struct work_struct ipa_ready_task;
+	struct timer_list  alx_timer;
+
+	unsigned long link_jiffies;
+
+	u32 wol;
+	spinlock_t tx_lock;
+	spinlock_t rx_lock;
+	atomic_t irq_sem;
+
+#ifdef  MDM_PLATFORM
+	u16 ipa_high_watermark;
+	u16 ipa_low_watermark;
+	u16 pendq_cnt;
+	u16 freeq_cnt;
+	u16 ipa_free_desc_cnt;
+	spinlock_t flow_ctrl_lock;
+	struct list_head pend_queue_head;
+	struct list_head free_queue_head;
+	struct work_struct ipa_send_task;
+#endif
+
+	u16 msg_enable;
+#ifdef MDM_PLATFORM
+	unsigned long flags[3];
+#else
+	unsigned long flags[2];
+#endif
+};
+
+
+#define ALX_ADPT_FLAG_0_MSI_CAP                 0x00000001
+#define ALX_ADPT_FLAG_0_MSI_EN                  0x00000002
+#define ALX_ADPT_FLAG_0_MSIX_CAP                0x00000004
+#define ALX_ADPT_FLAG_0_MSIX_EN                 0x00000008
+#define ALX_ADPT_FLAG_0_MRQ_CAP                 0x00000010
+#define ALX_ADPT_FLAG_0_MRQ_EN                  0x00000020
+#define ALX_ADPT_FLAG_0_MTQ_CAP                 0x00000040
+#define ALX_ADPT_FLAG_0_MTQ_EN                  0x00000080
+#define ALX_ADPT_FLAG_0_SRSS_CAP                0x00000100
+#define ALX_ADPT_FLAG_0_SRSS_EN                 0x00000200
+#define ALX_ADPT_FLAG_0_FIXED_MSIX              0x00000400
+
+#define ALX_ADPT_FLAG_0_TASK_REINIT_REQ         0x00010000  /* reinit */
+#define ALX_ADPT_FLAG_0_TASK_LSC_REQ            0x00020000
+
+#define ALX_ADPT_FLAG_1_STATE_TESTING           0x00000001
+#define ALX_ADPT_FLAG_1_STATE_RESETTING         0x00000002
+#define ALX_ADPT_FLAG_1_STATE_DOWN              0x00000004
+#define ALX_ADPT_FLAG_1_STATE_WATCH_DOG         0x00000008
+#define ALX_ADPT_FLAG_1_STATE_DIAG_RUNNING      0x00000010
+#define ALX_ADPT_FLAG_1_STATE_INACTIVE          0x00000020
+
+#ifdef MDM_PLATFORM
+#define ALX_ADPT_FLAG_2_ODU_CONNECT             0x00000001
+#define ALX_ADPT_FLAG_2_IPA_RM                  0x00000002
+#define ALX_ADPT_FLAG_2_DEBUGFS_INIT            0x00000004
+#define ALX_ADPT_FLAG_2_ODU_INIT                0x00000008
+#define ALX_ADPT_FLAG_2_WQ_SCHED                0x00000010
+#endif
+
+#define CHK_ADPT_FLAG(_idx, _flag)	\
+		CHK_FLAG_ARRAY(adpt, _idx, ADPT, _flag)
+#define SET_ADPT_FLAG(_idx, _flag)	\
+		SET_FLAG_ARRAY(adpt, _idx, ADPT, _flag)
+#define CLI_ADPT_FLAG(_idx, _flag)	\
+		CLI_FLAG_ARRAY(adpt, _idx, ADPT, _flag)
+
+/* default to trying for four seconds */
+#define ALX_TRY_LINK_TIMEOUT (4 * HZ)
+
+
+#define ALX_OPEN_CTRL_IRQ_EN            0x00000001
+#define ALX_OPEN_CTRL_RESET_MAC         0x00000002
+#define ALX_OPEN_CTRL_RESET_PHY         0x00000004
+#define ALX_OPEN_CTRL_RESET_ALL (\
+		ALX_OPEN_CTRL_RESET_MAC         |\
+		ALX_OPEN_CTRL_RESET_PHY)
+
+/* needed by alx_ethtool.c */
+extern char alx_drv_name[];
+extern void alx_reinit_locked(struct alx_adapter *adpt);
+extern void alx_set_ethtool_ops(struct net_device *netdev);
+extern void alx_update_hw_stats(struct alx_adapter *adpt);
+int alx_resize_rings(struct net_device *netdev);
+int alx_open_internal(struct alx_adapter *adpt, u32 ctrl);
+void alx_stop_internal(struct alx_adapter *adpt, u32 ctrl);
+
+#ifdef ETHTOOL_OPS_COMPAT
+extern int ethtool_ioctl(struct ifreq *ifr);
+#endif
+
+#endif /* _ALX_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop./alx_hwcom.h linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop/alx_hwcom.h
--- linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop./alx_hwcom.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop/alx_hwcom.h	2019-01-22 16:16:24.927259302 +0100
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _ALX_HWCOMMON_H_
+#define _ALX_HWCOMMON_H_
+
+#include <linux/bitops.h>
+#include "alx_sw.h"
+
+
+#define BIT_ALL	    0xffffffffUL
+
+#define ASHFT31(_x)  ((_x) << 31)
+#define ASHFT30(_x)  ((_x) << 30)
+#define ASHFT29(_x)  ((_x) << 29)
+#define ASHFT28(_x)  ((_x) << 28)
+#define ASHFT27(_x)  ((_x) << 27)
+#define ASHFT26(_x)  ((_x) << 26)
+#define ASHFT25(_x)  ((_x) << 25)
+#define ASHFT24(_x)  ((_x) << 24)
+#define ASHFT23(_x)  ((_x) << 23)
+#define ASHFT22(_x)  ((_x) << 22)
+#define ASHFT21(_x)  ((_x) << 21)
+#define ASHFT20(_x)  ((_x) << 20)
+#define ASHFT19(_x)  ((_x) << 19)
+#define ASHFT18(_x)  ((_x) << 18)
+#define ASHFT17(_x)  ((_x) << 17)
+#define ASHFT16(_x)  ((_x) << 16)
+#define ASHFT15(_x)  ((_x) << 15)
+#define ASHFT14(_x)  ((_x) << 14)
+#define ASHFT13(_x)  ((_x) << 13)
+#define ASHFT12(_x)  ((_x) << 12)
+#define ASHFT11(_x)  ((_x) << 11)
+#define ASHFT10(_x)  ((_x) << 10)
+#define ASHFT9(_x)   ((_x) << 9)
+#define ASHFT8(_x)   ((_x) << 8)
+#define ASHFT7(_x)   ((_x) << 7)
+#define ASHFT6(_x)   ((_x) << 6)
+#define ASHFT5(_x)   ((_x) << 5)
+#define ASHFT4(_x)   ((_x) << 4)
+#define ASHFT3(_x)   ((_x) << 3)
+#define ASHFT2(_x)   ((_x) << 2)
+#define ASHFT1(_x)   ((_x) << 1)
+#define ASHFT0(_x)   ((_x) << 0)
+
+
+#define FIELD_GETX(_x, _name)   (((_x) & (_name##_MASK)) >> (_name##_SHIFT))
+#define FIELD_SETS(_x, _name, _v)   (\
+(_x) =                               \
+((_x) & ~(_name##_MASK))            |\
+(((u16)(_v) << (_name##_SHIFT)) & (_name##_MASK)))
+#define FIELD_SETL(_x, _name, _v)   (\
+(_x) =                               \
+((_x) & ~(_name##_MASK))            |\
+(((u32)(_v) << (_name##_SHIFT)) & (_name##_MASK)))
+#define FIELDL(_name, _v) (((u32)(_v) << (_name##_SHIFT)) & (_name##_MASK))
+#define FIELDS(_name, _v) (((u16)(_v) << (_name##_SHIFT)) & (_name##_MASK))
+
+
+
+#define LX_SWAP_DW(_x) (\
+	(((_x) << 24) & 0xFF000000UL) |\
+	(((_x) <<  8) & 0x00FF0000UL) |\
+	(((_x) >>  8) & 0x0000FF00UL) |\
+	(((_x) >> 24) & 0x000000FFUL))
+
+#define LX_SWAP_W(_x) (\
+	(((_x) >> 8) & 0x00FFU) |\
+	(((_x) << 8) & 0xFF00U))
+
+
+#define LX_ERR_SUCCESS          0x0000
+#define LX_ERR_ALOAD            0x0001
+#define LX_ERR_RSTMAC           0x0002
+#define LX_ERR_PARM             0x0003
+#define LX_ERR_MIIBUSY          0x0004
+
+/* link capability */
+#define LX_LC_10H               0x01
+#define LX_LC_10F               0x02
+#define LX_LC_100H              0x04
+#define LX_LC_100F              0x08
+#define LX_LC_1000F             0x10
+#define LX_LC_ALL               \
+	(LX_LC_10H|LX_LC_10F|LX_LC_100H|LX_LC_100F|LX_LC_1000F)
+
+/* options for MAC contrl */
+#define LX_MACSPEED_1000        BIT(0)  /* 1:1000M, 0:10/100M */
+#define LX_MACDUPLEX_FULL       BIT(1)  /* 1:full, 0:half */
+#define LX_FLT_BROADCAST        BIT(2)  /* 1:enable rx-broadcast */
+#define LX_FLT_MULTI_ALL        BIT(3)
+#define LX_FLT_DIRECT           BIT(4)
+#define LX_FLT_PROMISC          BIT(5)
+#define LX_FC_TXEN              BIT(6)
+#define LX_FC_RXEN              BIT(7)
+#define LX_VLAN_STRIP           BIT(8)
+#define LX_LOOPBACK             BIT(9)
+#define LX_ADD_FCS              BIT(10)
+#define LX_SINGLE_PAUSE         BIT(11)
+
+
+/* interop between drivers */
+#define LX_DRV_TYPE_MASK                ASHFT27(0x1FUL)
+#define LX_DRV_TYPE_SHIFT               27
+#define LX_DRV_TYPE_UNKNOWN             0
+#define LX_DRV_TYPE_BIOS                1
+#define LX_DRV_TYPE_BTROM               2
+#define LX_DRV_TYPE_PKT                 3
+#define LX_DRV_TYPE_NDS2                4
+#define LX_DRV_TYPE_UEFI                5
+#define LX_DRV_TYPE_NDS5                6
+#define LX_DRV_TYPE_NDS62               7
+#define LX_DRV_TYPE_NDS63               8
+#define LX_DRV_TYPE_LNX                 9
+#define LX_DRV_TYPE_ODI16               10
+#define LX_DRV_TYPE_ODI32               11
+#define LX_DRV_TYPE_FRBSD               12
+#define LX_DRV_TYPE_NTBSD               13
+#define LX_DRV_TYPE_WCE                 14
+#define LX_DRV_PHY_AUTO                 BIT(26)  /* 1:auto, 0:force */
+#define LX_DRV_PHY_1000                 BIT(25)
+#define LX_DRV_PHY_100                  BIT(24)
+#define LX_DRV_PHY_10                   BIT(23)
+#define LX_DRV_PHY_DUPLEX               BIT(22)  /* 1:full, 0:half */
+#define LX_DRV_PHY_FC                   BIT(21)  /* 1:en flow control */
+#define LX_DRV_PHY_MASK                 ASHFT21(0x1FUL)
+#define LX_DRV_PHY_SHIFT                21
+#define LX_DRV_PHY_UNKNOWN              0
+#define LX_DRV_DISABLE                  BIT(18)
+#define LX_DRV_WOLS5_EN                 BIT(17)
+#define LX_DRV_WOLS5_BIOS_EN            BIT(16)
+#define LX_DRV_AZ_EN                    BIT(12)
+#define LX_DRV_WOLPATTERN_EN            BIT(11)
+#define LX_DRV_WOLLINKUP_EN             BIT(10)
+#define LX_DRV_WOLMAGIC_EN              BIT(9)
+#define LX_DRV_WOLCAP_BIOS_EN           BIT(8)
+#define LX_DRV_ASPM_SPD1000LMT_MASK     ASHFT4(3UL)
+#define LX_DRV_ASPM_SPD1000LMT_SHIFT    4
+#define LX_DRV_ASPM_SPD1000LMT_100M     0
+#define LX_DRV_ASPM_SPD1000LMT_NO       1
+#define LX_DRV_ASPM_SPD1000LMT_1M       2
+#define LX_DRV_ASPM_SPD1000LMT_10M      3
+#define LX_DRV_ASPM_SPD100LMT_MASK      ASHFT2(3UL)
+#define LX_DRV_ASPM_SPD100LMT_SHIFT     2
+#define LX_DRV_ASPM_SPD100LMT_1M        0
+#define LX_DRV_ASPM_SPD100LMT_10M       1
+#define LX_DRV_ASPM_SPD100LMT_100M      2
+#define LX_DRV_ASPM_SPD100LMT_NO        3
+#define LX_DRV_ASPM_SPD10LMT_MASK       ASHFT0(3UL)
+#define LX_DRV_ASPM_SPD10LMT_SHIFT      0
+#define LX_DRV_ASPM_SPD10LMT_1M         0
+#define LX_DRV_ASPM_SPD10LMT_10M        1
+#define LX_DRV_ASPM_SPD10LMT_100M       2
+#define LX_DRV_ASPM_SPD10LMT_NO         3
+
+/* flag of phy inited */
+#define LX_PHY_INITED           0x003F
+
+/* check if the mac address is valid */
+#define macaddr_valid(_addr) (\
+	((*(u8 *)(_addr))&1) == 0 && \
+	!(*(u32 *)(_addr) == 0 && *((u16 *)(_addr)+2) == 0))
+
+#define test_set_or_clear(_val, _ctrl, _flag, _bit)	\
+do {							\
+	if ((_ctrl) & (_flag))				\
+		(_val) |= (_bit);			\
+	else						\
+		(_val) &= ~(_bit);			\
+} while (0)
+
+
+#endif/*_ALX_HWCOMMON_H_*/
+
diff -Nruw linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop./alx_main.c linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop/alx_main.c
--- linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop./alx_main.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop/alx_main.c	2019-10-29 09:26:24.141208052 +0100
@@ -0,0 +1,5779 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. */
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "alx.h"
+#include "alx_hwcom.h"
+
+#include <linux/moduleparam.h>
+#include <linux/proc_fs.h>
+#ifdef MDM_PLATFORM
+#include <linux/debugfs.h>
+#include <linux/pm_wakeup.h>
+#endif
+
+#ifdef CONFIG_FBXSERIAL
+# include <linux/fbxserial.h>
+#endif
+
+#ifdef APQ_PLATFORM
+#include <linux/platform_device.h>
+#include <asm/dma-iommu.h>
+#include <linux/iommu.h>
+#define ALX_SMMU_BASE       0x10000000 /* Device address range base */
+#define ALX_SMMU_SIZE       ((SZ_1G * 4ULL) - ALX_SMMU_BASE)
+struct dma_iommu_mapping *alx_mapping;
+#endif
+
+void *ipc_alx_log_ctxt;
+void *ipc_alx_log_ctxt_low;
+
+
+char alx_drv_name[] = "alx";
+int alx_enable_ipc_low;
+#define MAX_PROC_SIZE 10
+char tmp_buff[MAX_PROC_SIZE];
+static struct proc_dir_entry* proc_file = NULL;
+static struct file_operations proc_file_ops;
+
+static const char alx_drv_description[] =
+	"Qualcomm Atheros(R) "
+	"AR813x/AR815x/AR816x PCI-E Ethernet Network Driver";
+
+static bool ipa_enable = 1;
+bool module_ipa_enable = 1;
+module_param(module_ipa_enable, bool, S_IRUGO);
+MODULE_PARM_DESC(module_ipa_enable, "PSB enable/disable");
+
+// ProjE change
+u32 mac_addr_hi16=0xFFFFFFFFUL;
+module_param(mac_addr_hi16, uint, 0);
+MODULE_PARM_DESC(mac_addr_hi16,"Specify the high 16 bits (most significant) of the mac address");
+
+u32 mac_addr_lo32=0xFFFFFFFFUL;
+module_param(mac_addr_lo32, uint, 0);
+MODULE_PARM_DESC(mac_addr_lo32,"Specify the low 32 bits of the mac address");
+// END of ProjE change
+
+
+/* alx_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ *   Class, Class Mask, private data (not used) }
+ */
+#define ALX_ETHER_DEVICE(device_id) {\
+	PCI_DEVICE(ALX_VENDOR_ID, device_id)}
+static DEFINE_PCI_DEVICE_TABLE(alx_pci_tbl) = {
+	ALX_ETHER_DEVICE(ALX_DEV_ID_AR8131),
+	ALX_ETHER_DEVICE(ALX_DEV_ID_AR8132),
+	ALX_ETHER_DEVICE(ALX_DEV_ID_AR8151_V1),
+	ALX_ETHER_DEVICE(ALX_DEV_ID_AR8151_V2),
+	ALX_ETHER_DEVICE(ALX_DEV_ID_AR8152_V1),
+	ALX_ETHER_DEVICE(ALX_DEV_ID_AR8152_V2),
+	ALX_ETHER_DEVICE(ALX_DEV_ID_AR8161),
+	ALX_ETHER_DEVICE(ALX_DEV_ID_AR8162),
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, alx_pci_tbl);
+MODULE_AUTHOR("Qualcomm Corporation, <nic-devel@qualcomm.com>");
+MODULE_DESCRIPTION("Qualcomm Atheros Gigabit Ethernet Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+
+static void alx_init_ring_ptrs(struct alx_adapter *adpt);
+
+#ifdef MDM_PLATFORM
+static int alx_ipa_rm_try_release(struct alx_adapter *adpt);
+static int alx_ipa_setup_rm(struct alx_adapter *adpt);
+
+/* Global CTX PTR which can be used for debugging */
+static struct alx_adapter *galx_adapter_ptr = NULL;
+const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH | S_IWUSR | S_IWGRP;
+
+static inline char *alx_ipa_rm_state_to_str(enum alx_ipa_rm_state state)
+{
+	switch (state) {
+	case ALX_IPA_RM_RELEASED: return "RELEASED";
+	case ALX_IPA_RM_REQUESTED: return "REQUESTED";
+	case ALX_IPA_RM_GRANT_PENDING: return "GRANT PENDING";
+	case ALX_IPA_RM_GRANTED: return "GRANTED";
+	}
+
+	return "UNKNOWN";
+}
+
+#ifdef ALX_IPA_DEBUG
+/**
+ * alx_dump_buff() - dumps buffer for debug purposes
+ * @base: buffer base address
+ * @phy_base: buffer physical base address
+ * @size: size of the buffer
+ */
+static void alx_dump_buff(void *base, dma_addr_t phy_base, u32 size)
+{
+        int i;
+        u32 *cur = (u32 *)base;
+        u8 *byt;
+        _IPC_INFO("system phys addr=%pa len=%u\n", &phy_base, size);
+        for (i = 0; i < size / 4; i++) {
+                byt = (u8 *)(cur + i);
+                _IPC_INFO("%2d %08x   %02x %02x %02x %02x\n", i, *(cur + i),
+                                byt[0], byt[1], byt[2], byt[3]);
+        }
+        _IPC_INFO("END\n");
+}
+#define ALX_DUMP_BUFF(base, phy_base, size) alx_dump_buff(base, phy_base, size)
+
+static bool alx_is_packet_dhcp(struct sk_buff *skb)
+{
+	uint16_t sport = 0;
+	uint16_t dport = 0;
+
+	sport = be16_to_cpu((uint16_t)(*(uint16_t *)(skb->data + ALX_IP_OFFSET + ALX_IP_HEADER_SIZE)));
+	dport = be16_to_cpu((uint16_t)(*(uint16_t *)(skb->data + ALX_IP_OFFSET + ALX_IP_HEADER_SIZE + sizeof(uint16_t))));
+
+	if (((sport == ALX_DHCP_SRV_PORT) && (dport == ALX_DHCP_CLI_PORT)) ||
+	    ((dport == ALX_DHCP_SRV_PORT) && (sport == ALX_DHCP_CLI_PORT)))
+	{
+		return true;
+	}
+	return false;
+}
+#endif
+
+static int alx_ipa_rm_request(struct alx_adapter *adpt);
+
+#endif
+
+int alx_cfg_r16(const struct alx_hw *hw, int reg, u16 *pval)
+{
+	if (!(hw && hw->adpt && hw->adpt->pdev))
+		return -EINVAL;
+	return pci_read_config_word(hw->adpt->pdev, reg, pval);
+}
+
+
+int alx_cfg_w16(const struct alx_hw *hw, int reg, u16 val)
+{
+	if (!(hw && hw->adpt && hw->adpt->pdev))
+		return -EINVAL;
+	return pci_write_config_word(hw->adpt->pdev, reg, val);
+}
+
+
+void alx_mem_flush(const struct alx_hw *hw)
+{
+	readl(hw->hw_addr);
+}
+
+
+void alx_mem_r32(const struct alx_hw *hw, int reg, u32 *val)
+{
+	if (unlikely(!hw->link_up))
+		readl(hw->hw_addr + reg);
+	*(u32 *)val = readl(hw->hw_addr + reg);
+}
+
+
+void alx_mem_w32(const struct alx_hw *hw, int reg, u32 val)
+{
+	if (hw->mac_type == alx_mac_l2cb_v20 && reg < 0x1400)
+		readl(hw->hw_addr + reg);
+	writel(val, hw->hw_addr + reg);
+}
+
+
+static void alx_mem_r16(const struct alx_hw *hw, int reg, u16 *val)
+{
+	if (unlikely(!hw->link_up))
+		readw(hw->hw_addr + reg);
+	*(u16 *)val = readw(hw->hw_addr + reg);
+}
+
+
+static void alx_mem_w16(const struct alx_hw *hw, int reg, u16 val)
+{
+	if (hw->mac_type == alx_mac_l2cb_v20 && reg < 0x1400)
+		readw(hw->hw_addr + reg);
+	writew(val, hw->hw_addr + reg);
+}
+
+
+void alx_mem_w8(const struct alx_hw *hw, int reg, u8 val)
+{
+	if (hw->mac_type == alx_mac_l2cb_v20 && reg < 0x1400)
+		readb(hw->hw_addr + reg);
+	writeb(val, hw->hw_addr + reg);
+}
+
+
+/*
+ * alx_hw_printk
+ */
+void alx_hw_printk(const char *level, const struct alx_hw *hw,
+		   const char *fmt, ...)
+{
+	struct va_format vaf;
+	va_list args;
+
+	va_start(args, fmt);
+	vaf.fmt = fmt;
+	vaf.va = &args;
+/* warning : __netdev_printk if no compat.o
+	if (hw && hw->adpt && hw->adpt->netdev)
+		__netdev_printk(level, hw->adpt->netdev, &vaf);
+	else
+*/
+		_IPC_INFO("%salx_hw: %pV", level, &vaf);
+
+	va_end(args);
+}
+
+
+/*
+ *  alx_validate_mac_addr - Validate MAC address
+ */
+static int alx_validate_mac_addr(u8 *mac_addr)
+{
+	int retval = 0;
+
+	if (mac_addr[0] & 0x01) {
+		IPC_DEBUG("MAC address is multicast\n");
+		retval = -EADDRNOTAVAIL;
+	} else if (mac_addr[0] == 0xff && mac_addr[1] == 0xff) {
+		IPC_DEBUG("MAC address is broadcast\n");
+		retval = -EADDRNOTAVAIL;
+	} else if (mac_addr[0] == 0 && mac_addr[1] == 0 &&
+		   mac_addr[2] == 0 && mac_addr[3] == 0 &&
+		   mac_addr[4] == 0 && mac_addr[5] == 0) {
+		IPC_DEBUG("MAC address is all zeros\n");
+		retval = -EADDRNOTAVAIL;
+	}
+	return retval;
+}
+
+
+/*
+ *  alx_set_mac_type - Sets MAC type
+ */
+static int alx_set_mac_type(struct alx_adapter *adpt)
+{
+	struct alx_hw *hw = &adpt->hw;
+	int retval = 0;
+
+	if (hw->pci_venid == ALX_VENDOR_ID) {
+		switch (hw->pci_devid) {
+		case ALX_DEV_ID_AR8131:
+			hw->mac_type = alx_mac_l1c;
+			break;
+		case ALX_DEV_ID_AR8132:
+			hw->mac_type = alx_mac_l2c;
+			break;
+		case ALX_DEV_ID_AR8151_V1:
+			hw->mac_type = alx_mac_l1d_v1;
+			break;
+		case ALX_DEV_ID_AR8151_V2:
+			/* just use l1d configure */
+			hw->mac_type = alx_mac_l1d_v2;
+			break;
+		case ALX_DEV_ID_AR8152_V1:
+			hw->mac_type = alx_mac_l2cb_v1;
+			break;
+		case ALX_DEV_ID_AR8152_V2:
+			if (hw->pci_revid == ALX_REV_ID_AR8152_V2_0)
+				hw->mac_type = alx_mac_l2cb_v20;
+			else
+				hw->mac_type = alx_mac_l2cb_v21;
+			break;
+		case ALX_DEV_ID_AR8161:
+			hw->mac_type = alx_mac_l1f;
+			break;
+		case ALX_DEV_ID_AR8162:
+			hw->mac_type = alx_mac_l2f;
+			break;
+		default:
+			retval = -EINVAL;
+			break;
+		}
+	} else {
+		retval = -EINVAL;
+	}
+
+	alx_netif_dbg(adpt, hw, adpt->netdev,
+		   "found mac: %d, returns: %d\n", hw->mac_type, retval);
+	return retval;
+}
+
+
+/*
+ *  alx_init_hw_callbacks
+ */
+static int alx_init_hw_callbacks(struct alx_adapter *adpt)
+{
+	struct alx_hw *hw = &adpt->hw;
+	int retval = 0;
+
+	alx_set_mac_type(adpt);
+
+	switch (hw->mac_type) {
+	case alx_mac_l1f:
+	case alx_mac_l2f:
+		retval = alf_init_hw_callbacks(hw);
+		break;
+	case alx_mac_l1c:
+	case alx_mac_l2c:
+	case alx_mac_l2cb_v1:
+	case alx_mac_l2cb_v20:
+	case alx_mac_l2cb_v21:
+	case alx_mac_l1d_v1:
+	case alx_mac_l1d_v2:
+		retval = alc_init_hw_callbacks(hw);
+		break;
+	default:
+		retval = -EINVAL;
+		break;
+	}
+	return retval;
+}
+
+
+void alx_reinit_locked(struct alx_adapter *adpt)
+{
+	WARN_ON(in_interrupt());
+
+	while (CHK_ADPT_FLAG(1, STATE_RESETTING))
+		msleep(20);
+	SET_ADPT_FLAG(1, STATE_RESETTING);
+
+	alx_stop_internal(adpt, ALX_OPEN_CTRL_RESET_MAC);
+	alx_open_internal(adpt, ALX_OPEN_CTRL_RESET_MAC);
+
+	CLI_ADPT_FLAG(1, STATE_RESETTING);
+}
+
+
+static void alx_task_schedule(struct alx_adapter *adpt)
+{
+	if (!CHK_ADPT_FLAG(1, STATE_DOWN) &&
+	    !CHK_ADPT_FLAG(1, STATE_WATCH_DOG)) {
+		SET_ADPT_FLAG(1, STATE_WATCH_DOG);
+		schedule_work(&adpt->alx_task);
+	}
+}
+
+
+static void alx_check_lsc(struct alx_adapter *adpt)
+{
+	SET_ADPT_FLAG(0, TASK_LSC_REQ);
+	adpt->link_jiffies = jiffies + ALX_TRY_LINK_TIMEOUT;
+
+	if (!CHK_ADPT_FLAG(1, STATE_DOWN))
+		alx_task_schedule(adpt);
+}
+
+
+/*
+ * alx_tx_timeout - Respond to a Tx Hang
+ */
+static void alx_tx_timeout(struct net_device *netdev)
+{
+	struct alx_adapter *adpt = netdev_priv(netdev);
+
+	/* Do the reset outside of interrupt context */
+	if (!CHK_ADPT_FLAG(1, STATE_DOWN)) {
+		SET_ADPT_FLAG(0, TASK_REINIT_REQ);
+		alx_task_schedule(adpt);
+	}
+}
+
+
+/*
+ * alx_set_multicase_list - Multicast and Promiscuous mode set
+ */
+static void alx_set_multicase_list(struct net_device *netdev)
+{
+	struct alx_adapter *adpt = netdev_priv(netdev);
+	struct alx_hw *hw = &adpt->hw;
+	struct netdev_hw_addr *ha = NULL;
+
+	/* Check for Promiscuous and All Multicast modes */
+	if (netdev->flags & IFF_PROMISC) {
+		SET_HW_FLAG(PROMISC_EN);
+	} else if (netdev->flags & IFF_ALLMULTI) {
+		SET_HW_FLAG(MULTIALL_EN);
+		CLI_HW_FLAG(PROMISC_EN);
+	} else {
+		CLI_HW_FLAG(MULTIALL_EN);
+		CLI_HW_FLAG(PROMISC_EN);
+	}
+	hw->cbs.config_mac_ctrl(hw);
+
+	/* clear the old settings from the multicast hash table */
+	hw->cbs.clear_mc_addr(hw);
+
+	/* comoute mc addresses' hash value ,and put it into hash table */
+	netdev_for_each_mc_addr(ha, netdev)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+		hw->cbs.set_mc_addr(hw, ha->addr);
+#else
+		hw->cbs.set_mc_addr(hw, ha->dmi_addr);
+#endif
+}
+
+
+/*
+ * alx_set_mac - Change the Ethernet Address of the NIC
+ */
+static int alx_set_mac_address(struct net_device *netdev, void *data)
+{
+	struct alx_adapter *adpt = netdev_priv(netdev);
+	struct alx_hw *hw = &adpt->hw;
+	struct sockaddr *addr = data;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	if (netif_running(netdev))
+		return -EBUSY;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
+	if (netdev->addr_assign_type & NET_ADDR_RANDOM)
+		netdev->addr_assign_type ^= NET_ADDR_RANDOM;
+#endif
+
+	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+	memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
+
+	if (hw->cbs.set_mac_addr)
+		hw->cbs.set_mac_addr(hw, hw->mac_addr);
+	return 0;
+}
+
+
+/*
+ * Read / Write Ptr Initialize:
+ */
+static void alx_init_ring_ptrs(struct alx_adapter *adpt)
+{
+	int i, j;
+
+	for (i = 0; i < adpt->num_txques; i++) {
+		struct alx_tx_queue *txque = adpt->tx_queue[i];
+		struct alx_buffer *tpbuf = txque->tpq.tpbuff;
+		txque->tpq.produce_idx = 0;
+		txque->tpq.consume_idx = 0;
+		for (j = 0; j < txque->tpq.count; j++)
+			tpbuf[j].dma = 0;
+	}
+
+	for (i = 0; i < adpt->num_hw_rxques; i++) {
+		struct alx_rx_queue *rxque = adpt->rx_queue[i];
+		struct alx_buffer *rfbuf = rxque->rfq.rfbuff;
+		rxque->rrq.produce_idx = 0;
+		rxque->rrq.consume_idx = 0;
+		rxque->rfq.produce_idx = 0;
+		rxque->rfq.consume_idx = 0;
+		for (j = 0; j < rxque->rfq.count; j++)
+			rfbuf[j].dma = 0;
+	}
+
+	if (CHK_ADPT_FLAG(0, SRSS_EN))
+		goto srrs_enable;
+
+	return;
+
+srrs_enable:
+	for (i = 0; i < adpt->num_sw_rxques; i++) {
+		struct alx_rx_queue *rxque = adpt->rx_queue[i];
+		rxque->swq.produce_idx = 0;
+		rxque->swq.consume_idx = 0;
+	}
+}
+
+
+static void alx_config_rss(struct alx_adapter *adpt)
+{
+	static const u8 key[40] = {
+		0xE2, 0x91, 0xD7, 0x3D, 0x18, 0x05, 0xEC, 0x6C,
+		0x2A, 0x94, 0xB3, 0x0D, 0xA5, 0x4F, 0x2B, 0xEC,
+		0xEA, 0x49, 0xAF, 0x7C, 0xE2, 0x14, 0xAD, 0x3D,
+		0xB8, 0x55, 0xAA, 0xBE, 0x6A, 0x3E, 0x67, 0xEA,
+		0x14, 0x36, 0x4D, 0x17, 0x3B, 0xED, 0x20, 0x0D};
+
+	struct alx_hw *hw = &adpt->hw;
+	u32 reta = 0;
+	int i, j;
+
+	/* initialize rss hash type and idt table size */
+	hw->rss_hstype = ALX_RSS_HSTYP_ALL_EN;
+	hw->rss_idt_size = 0x100;
+
+	/* Fill out redirection table */
+	memcpy(hw->rss_key, key, sizeof(hw->rss_key));
+
+	/* Fill out redirection table */
+	memset(hw->rss_idt, 0x0, sizeof(hw->rss_idt));
+	for (i = 0, j = 0; i < 256; i++, j++) {
+		if (j == adpt->max_rxques)
+			j = 0;
+		reta |= (j << ((i & 7) * 4));
+		if ((i & 7) == 7) {
+			hw->rss_idt[i>>3] = reta;
+			reta = 0;
+		}
+	}
+
+	if (hw->cbs.config_rss)
+		hw->cbs.config_rss(hw, CHK_ADPT_FLAG(0, SRSS_EN));
+}
+
+#ifdef MDM_PLATFORM
+static bool alx_ipa_is_non_ip_pkt(uint8_t proto)
+{
+	if ((proto == RRD_PROTO_NON_IP) ||
+		(proto == RRD_PROTO_LLDP))
+		return true;
+	return false;
+}
+
+static void alx_ipa_remove_padding(struct sk_buff *skb, bool ipv4, bool ipv6)
+{
+	if (ipv4) {
+		struct iphdr *ip_hdr = NULL;
+		ip_hdr = (struct iphdr *)(skb_mac_header(skb) + ETH_HLEN);
+		skb_trim(skb, ntohs(ip_hdr->tot_len) + ETH_HLEN);
+	} else if (ipv6) {
+		struct ipv6hdr *ip6_hdr = NULL;
+		ip6_hdr = (struct ipv6hdr *)(skb_mac_header(skb) + ETH_HLEN);
+		skb_trim(skb, ntohs(ip6_hdr->payload_len) + sizeof(struct ipv6hdr) + ETH_HLEN);
+	}
+}
+
+static bool alx_ipa_is_ip_frag_pkt(struct sk_buff *skb)
+{
+	struct iphdr *ip_hdr = NULL;
+	ip_hdr = (struct iphdr *)(skb_mac_header(skb) + ETH_HLEN);
+
+	/* Return true if: 'More_Frag bit is set' OR
+			'Fragmentation Offset is set' */
+	if (ip_hdr->frag_off & htons(IP_MF | IP_OFFSET))
+		return true;
+	return false;
+}
+
+static bool alx_ipa_is_ipv4_pkt(uint8_t proto)
+{
+	if ((proto == RRD_PROTO_IPv4) ||
+		(proto == RRD_PROTO_IPv4_TCP) ||
+		(proto == RRD_PROTO_IPv4_UDP))
+		return true;
+	else
+		return false;
+}
+
+static bool alx_ipa_is_ipv6_pkt(uint8_t proto)
+{
+	if ((proto == RRD_PROTO_IPv6) ||
+		(proto == RRD_PROTO_IPv6_TCP) ||
+		(proto == RRD_PROTO_IPv6_UDP))
+		return true;
+	else
+		return false;
+}
+#endif
+
+/*
+ * alx_receive_skb
+ */
+static void alx_receive_skb(struct alx_adapter *adpt,
+                            struct sk_buff *skb,
+                            u16 vlan_tag, bool vlan_flag)
+{
+	if (vlan_flag) {
+		u16 vlan = 0;
+		ALX_TAG_TO_VLAN(vlan_tag, vlan);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+		/*alx MAC only recognize ETH_P_8021Q*/
+                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
+#else
+                __vlan_hwaccel_put_tag(skb, vlan);
+#endif
+	}
+	netif_receive_skb(skb);
+}
+
+#ifdef MDM_PLATFORM
+/*
+ * alx_receive_skb_ipa
+ */
+static void alx_receive_skb_ipa(struct alx_adapter *adpt,
+				struct sk_buff *skb,
+				u16 vlan_tag, bool vlan_flag, uint8_t proto)
+{
+	struct ipa_tx_meta ipa_meta = {0x0};
+	int ret =0;
+	struct alx_ipa_rx_desc_node *node = NULL;
+	bool schedule_ipa_work = false;
+	bool is_pkt_ipv4 = alx_ipa_is_ipv4_pkt(proto);
+	bool is_pkt_ipv6 = alx_ipa_is_ipv6_pkt(proto);
+	struct alx_ipa_ctx *alx_ipa = adpt->palx_ipa;
+	skb_reset_mac_header(skb);
+	if (vlan_flag) {
+	u16 vlan = 0;
+		ALX_TAG_TO_VLAN(vlan_tag, vlan);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
+#else
+		__vlan_hwaccel_put_tag(skb, vlan);
+#endif
+	}
+
+	/* If Packet is an IP Packet and non-fragmented then Send it to
+		IPA/ODU Bridge Driver*/
+	if (alx_ipa_is_non_ip_pkt(proto) ||
+	   (is_pkt_ipv4 && alx_ipa_is_ip_frag_pkt(skb)) ||
+	   (!CHK_ADPT_FLAG(2, ODU_CONNECT))) {
+		/* Send packet to network stack */
+		skb->protocol = eth_type_trans(skb, adpt->netdev);
+		adpt->palx_ipa->stats.non_ip_frag_pkt++;
+		/* Prevent device to goto suspend for 200 msec; to provide enough
+		time for packet to be processed by network stack */
+		pm_wakeup_event(&adpt->pdev->dev, 200);
+		netif_receive_skb(skb);
+		return;
+	} else {
+		/* Send Packet to ODU bridge Driver */
+		spin_lock(&adpt->flow_ctrl_lock);
+
+		/* Drop Packets if we cannot handle them */
+		if (adpt->freeq_cnt == 0) {
+			adpt->palx_ipa->stats.flow_control_pkt_drop++;
+			IPC_DEBUG_LOW("ALX-IPA Flow Control - Drop Pkt"
+				" IPA pending packets = %d",
+					adpt->ipa_free_desc_cnt);
+			dev_kfree_skb(skb);
+			/* Schedule ipa_work task if IPA has desc left. This is
+			done since priority of softirq > IPA_WRITE_DONE Event.
+			Which implies ipa_send_task will not be scheduled in
+			a timely manner */
+			if ((adpt->ipa_free_desc_cnt > 0) && !CHK_ADPT_FLAG(2, WQ_SCHED))
+				schedule_ipa_work = true;
+			goto unlock_and_schedule;
+		}
+
+		/* Remove extra padding if the rcv_pkt_len == 64 */
+		if (skb->len == ETH_ZLEN)
+			alx_ipa_remove_padding(skb, is_pkt_ipv4, is_pkt_ipv6);
+
+		/* Send Packet to IPA; if there are no pending packets
+		   and ipa has available descriptors */
+		if ((adpt->pendq_cnt == 0) && (adpt->ipa_free_desc_cnt > 0) &&
+			(alx_ipa_rm_request(adpt) == 0)) {
+			ipa_meta.dma_address_valid = false;
+			/* Send Packet to ODU bridge Driver */
+			ret = odu_bridge_tx_dp(skb, &ipa_meta);
+			if (ret) {
+				IPC_ERROR_LOW("odu_bridge_tx_dp() Failed!!"
+					" ret %d--Free SKB\n", ret);
+				kfree(skb);
+				adpt->palx_ipa->stats.rx_ipa_send_fail++;
+			} else {
+				adpt->ipa_free_desc_cnt--;
+				adpt->palx_ipa->stats.rx_ipa_send++;
+				/* Increment the ipa_rx_completion Counter */
+				spin_lock(&alx_ipa->rm_ipa_lock);
+				if (alx_ipa->acquire_wake_src == false) {
+					__pm_stay_awake(&alx_ipa->rm_ipa_wait);
+					alx_ipa->acquire_wake_src = true;
+				}
+				alx_ipa->ipa_rx_completion++;
+				spin_unlock(&alx_ipa->rm_ipa_lock);
+			}
+		} else if (adpt->pendq_cnt <= ALX_IPA_SYS_PIPE_DNE_PKTS) {
+			/* If we have pending packets to send,
+			add the current packet to the end of the queue. */
+			node = list_first_entry(&adpt->free_queue_head,
+				struct alx_ipa_rx_desc_node, link);
+			list_del(&node->link);
+			node->skb_ptr = skb;
+			list_add_tail(&node->link, &adpt->pend_queue_head);
+			adpt->freeq_cnt--;
+			adpt->pendq_cnt++;
+			if (!CHK_ADPT_FLAG(2, WQ_SCHED))
+				schedule_ipa_work = true;
+		}
+unlock_and_schedule:
+		spin_unlock(&adpt->flow_ctrl_lock);
+		if (schedule_ipa_work) {
+			SET_ADPT_FLAG(2, WQ_SCHED);
+			schedule_work(&adpt->ipa_send_task);
+		}
+	}
+}
+#endif
+
+
+static bool alx_get_rrdesc(struct alx_adapter *adpt, struct alx_rx_queue *rxque,
+			   union alx_sw_rrdesc *srrd)
+{
+	u32 cnt = 0;
+	union alx_hw_rrdesc *hrrd =
+			ALX_RRD(rxque, rxque->rrq.consume_idx);
+
+	srrd->dfmt.dw0 = le32_to_cpu(hrrd->dfmt.dw0);
+	srrd->dfmt.dw1 = le32_to_cpu(hrrd->dfmt.dw1);
+	srrd->dfmt.dw2 = le32_to_cpu(hrrd->dfmt.dw2);
+	srrd->dfmt.dw3 = le32_to_cpu(hrrd->dfmt.dw3);
+
+	if (!srrd->genr.update)
+		return false;
+
+	/* workaround for the PCIe DMA write issue */
+	/* Please make sure hrrd->dmt.dw0 is set to 0 after handling, please refer to the later line in the same function alx_get_rrdesc: hrrd->dfmt.dw0 = 0;*/
+	if (srrd->dfmt.dw0 == 0) {
+		volatile u32 *flag = (volatile u32 *)&hrrd->dfmt.dw0;
+		while (*flag == 0) { /* while the dword0 of the rrd is still 0 which means it's not updated yet, read it again */
+						if (++cnt >= 10) { /* 10 more times should be enough, actually should NOT get here */
+							alx_err(adpt, "ERROR, RRD update timeout\n");
+							return false;
+						}
+		}
+		/* read the rrd descriptor from hardware again after all four dwords of the rrd are synced by DMA engine */
+		srrd->dfmt.dw0 = le32_to_cpu(hrrd->dfmt.dw0);
+		srrd->dfmt.dw1 = le32_to_cpu(hrrd->dfmt.dw1);
+		srrd->dfmt.dw2 = le32_to_cpu(hrrd->dfmt.dw2);
+		srrd->dfmt.dw3 = le32_to_cpu(hrrd->dfmt.dw3);
+	}
+
+	if (unlikely(srrd->genr.nor != 1)) {
+		/* TODO support mul rfd*/
+		IPC_EMERG(KERN_EMERG "Multi rfd not support yet!\n");
+		alx_err(adpt, "Please make sure PCIe DMA Write workaroud has been applied\n");
+		alx_err(adpt, "SRRD 0/1/2/3:0x%08x/0x%08x/0x%08x/0x%08x\n",
+			srrd->dfmt.dw0, srrd->dfmt.dw1, srrd->dfmt.dw2, srrd->dfmt.dw3);
+		alx_err(adpt, "HRRD 0/1/2/3:0x%08x/0x%08x/0x%08x/0x%08x\n",
+			hrrd->dfmt.dw0, hrrd->dfmt.dw1, hrrd->dfmt.dw2, hrrd->dfmt.dw3);
+	}
+
+	srrd->genr.update = 0;
+	hrrd->dfmt.dw3 = cpu_to_le32(srrd->dfmt.dw3); /* clear the update bit in hardware */
+
+	/* Workaround for the PCIe DMA write issue */
+	/* Note: for intensive verification to set it 0 */
+	hrrd->dfmt.dw0 = 0;
+
+	if (++rxque->rrq.consume_idx == rxque->rrq.count)
+		rxque->rrq.consume_idx = 0;
+
+	return true;
+}
+
+
+static bool alx_set_rfdesc(struct alx_rx_queue *rxque,
+			   union alx_sw_rfdesc *srfd)
+{
+	union alx_hw_rfdesc *hrfd =
+			ALX_RFD(rxque, rxque->rfq.produce_idx);
+
+	hrfd->qfmt.qw0 = cpu_to_le64(srfd->qfmt.qw0);
+
+	if (++rxque->rfq.produce_idx == rxque->rfq.count)
+		rxque->rfq.produce_idx = 0;
+
+	return true;
+}
+
+
+static bool alx_set_tpdesc(struct alx_tx_queue *txque,
+			   union alx_sw_tpdesc *stpd)
+{
+	union alx_hw_tpdesc *htpd;
+
+	txque->tpq.last_produce_idx = txque->tpq.produce_idx;
+	htpd = ALX_TPD(txque, txque->tpq.produce_idx);
+
+	if (++txque->tpq.produce_idx == txque->tpq.count)
+		txque->tpq.produce_idx = 0;
+
+	htpd->dfmt.dw0 = cpu_to_le32(stpd->dfmt.dw0);
+	htpd->dfmt.dw1 = cpu_to_le32(stpd->dfmt.dw1);
+	htpd->qfmt.qw1 = cpu_to_le64(stpd->qfmt.qw1);
+
+	return true;
+}
+
+
+static void alx_set_tpdesc_lastfrag(struct alx_tx_queue *txque)
+{
+	union alx_hw_tpdesc *htpd;
+#define ALX_TPD_LAST_FLAGMENT  0x80000000
+	htpd = ALX_TPD(txque, txque->tpq.last_produce_idx);
+	htpd->dfmt.dw1 |= cpu_to_le32(ALX_TPD_LAST_FLAGMENT);
+}
+
+
+static int alx_refresh_rx_buffer(struct alx_rx_queue *rxque)
+{
+	struct alx_adapter *adpt = netdev_priv(rxque->netdev);
+	struct alx_hw *hw = &adpt->hw;
+	struct alx_buffer *curr_rxbuf;
+	struct alx_buffer *next_rxbuf;
+	union alx_sw_rfdesc srfd;
+	struct sk_buff *skb;
+	void *skb_data = NULL;
+	u16 count = 0;
+	u16 next_produce_idx;
+
+	next_produce_idx = rxque->rfq.produce_idx;
+	if (++next_produce_idx == rxque->rfq.count)
+		next_produce_idx = 0;
+	curr_rxbuf = GET_RF_BUFFER(rxque, rxque->rfq.produce_idx);
+	next_rxbuf = GET_RF_BUFFER(rxque, next_produce_idx);
+
+	/* this always has a blank rx_buffer*/
+	while (next_rxbuf->dma == 0) {
+		skb = dev_alloc_skb(adpt->rxbuf_size);
+		if (unlikely(!skb)) {
+			alx_err(adpt, "alloc rx buffer failed\n");
+			break;
+		}
+
+		/*
+		 * Make buffer alignment 2 beyond a 16 byte boundary
+		 * this will result in a 16 byte aligned IP header after
+		 * the 14 byte MAC header is removed
+		 */
+		skb_data = skb->data;
+		/*skb_reserve(skb, NET_IP_ALIGN);*/
+		curr_rxbuf->skb = skb;
+		curr_rxbuf->length = adpt->rxbuf_size;
+		curr_rxbuf->dma = dma_map_single(rxque->dev,
+						 skb_data,
+						 curr_rxbuf->length,
+						 DMA_FROM_DEVICE);
+		srfd.genr.addr = curr_rxbuf->dma;
+		alx_set_rfdesc(rxque, &srfd);
+		/*alx_err(adpt, "rx-buffer-addr=%llx\n",
+			(u64)curr_rxbuf->dma);*/
+
+		next_produce_idx = rxque->rfq.produce_idx;
+		if (++next_produce_idx == rxque->rfq.count)
+			next_produce_idx = 0;
+		curr_rxbuf = GET_RF_BUFFER(rxque, rxque->rfq.produce_idx);
+		next_rxbuf = GET_RF_BUFFER(rxque, next_produce_idx);
+		count++;
+	}
+
+	if (count) {
+		wmb();
+		alx_mem_w16(hw, rxque->produce_reg, rxque->rfq.produce_idx);
+		alx_netif_dbg(adpt, rx_err, adpt->netdev,
+			   "RX[%d]: prod_reg[%x] = 0x%x, rfq.prod_idx = 0x%x\n",
+			   rxque->que_idx, rxque->produce_reg,
+			   rxque->rfq.produce_idx, rxque->rfq.produce_idx);
+	}
+	return count;
+}
+
+
+static void alx_clean_rfdesc(struct alx_rx_queue *rxque,
+			     union alx_sw_rrdesc *srrd)
+{
+	struct alx_buffer *rfbuf = rxque->rfq.rfbuff;
+	u32 consume_idx = srrd->genr.si;
+	u32 i;
+
+	for (i = 0; i < srrd->genr.nor; i++) {
+		rfbuf[consume_idx].skb = NULL;
+		if (++consume_idx == rxque->rfq.count)
+			consume_idx = 0;
+	}
+	rxque->rfq.consume_idx = consume_idx;
+}
+
+#if 0
+#define ROLL_BK_NUM 16
+
+static void alx_dump_rrd(struct alx_adapter *adpt, struct alx_rx_queue *rxque)
+{
+	union alx_sw_rrdesc srrd;
+	union alx_hw_rrdesc *hrrd;
+
+	u16 begin, end;
+
+
+	alx_err(adpt, "PATCH v5, dumpping RRD .... consumer idx=%x\n",
+			rxque->rrq.consume_idx);
+	begin = (rxque->rrq.consume_idx - ROLL_BK_NUM) % rxque->rrq.count;
+	end = (rxque->rrq.consume_idx + ROLL_BK_NUM) % rxque->rrq.count;
+
+  while (begin != end) {
+		hrrd = ALX_RRD(rxque, begin);
+		srrd.dfmt.dw0 = le32_to_cpu(hrrd->dfmt.dw0);
+		srrd.dfmt.dw1 = le32_to_cpu(hrrd->dfmt.dw1);
+		srrd.dfmt.dw2 = le32_to_cpu(hrrd->dfmt.dw2);
+		srrd.dfmt.dw3 = le32_to_cpu(hrrd->dfmt.dw3);
+		alx_err(adpt, "Index:%x\n", begin);
+		alx_err(adpt, "rrd->word0/1/2/3:0x%08x/0x%08x/0x%08x/0x%08x\n",
+			srrd.dfmt.dw0, srrd.dfmt.dw1, srrd.dfmt.dw2, srrd.dfmt.dw3);
+
+		if (++begin == rxque->rrq.count)
+			begin = 0;
+	}
+}
+
+
+static void alx_dump_rfd(struct alx_adapter *adpt, struct alx_rx_queue *rxque, u16 idx)
+{
+	struct alx_buffer *rfbuf;
+	u16 begin, end;
+
+
+	alx_err(adpt, "RFD:%x\n", idx);
+
+	//begin = (idx - 3) % rxque->rfq.count;
+	//end = (idx + 3) % rxque->rfq.count;
+	begin = 0;
+	end = rxque->rfq.count - 1;
+
+	while (begin != end) {
+		rfbuf = GET_RF_BUFFER(rxque, begin);
+		alx_err(adpt, "IDX(%x): addr=0x%llx\n", begin, (u64)rfbuf->dma);
+        if (++begin == rxque->rfq.count)
+		begin = 0;
+	}
+}
+
+static void alx_dump_register(struct alx_adapter *adpt)
+{
+	struct alx_hw *hw = &adpt->hw;
+	u16 reg, count;
+	u32 val;
+
+	for (reg = 0x40, count = 0; count < 0x20; count++, reg++) {
+		val = alx_read_dbg_reg(hw, reg);
+		alx_err(adpt, "DBG-reg(%x)=%08X\n", reg, val);
+	}
+
+}
+
+#endif
+
+u32 alx_read_dbg_reg(struct alx_hw *hw, u16 reg)
+{
+	u32 val;
+
+	alx_mem_w32(hw, 0x1900, reg);
+	alx_mem_r32(hw, 0x1904, &val);
+
+	return val;
+}
+
+static bool alx_dispatch_rx_irq(struct alx_msix_param *msix,
+				struct alx_rx_queue *rxque)
+{
+	struct alx_adapter *adpt = msix->adpt;
+	struct pci_dev *pdev = adpt->pdev;
+	struct net_device *netdev  = adpt->netdev;
+
+	union alx_sw_rrdesc srrd;
+	struct alx_buffer *rfbuf;
+	struct sk_buff *skb;
+	struct alx_rx_queue *swque;
+	struct alx_sw_buffer *curr_swbuf;
+	struct alx_sw_buffer *next_swbuf;
+
+	u16 next_produce_idx;
+	u16 count = 0;
+
+	while (1) {
+		if (!alx_get_rrdesc(adpt, rxque, &srrd))
+			break;
+
+		if (srrd.genr.res || srrd.genr.lene) {
+			alx_clean_rfdesc(rxque, &srrd);
+			netif_warn(adpt, rx_err, adpt->netdev,
+				   "wrong packet! rrd->word3 is 0x%08x\n",
+				   srrd.dfmt.dw3);
+			continue;
+		}
+
+		/* Good Receive */
+		if (likely(srrd.genr.nor == 1)) {
+			rfbuf = GET_RF_BUFFER(rxque, srrd.genr.si);
+			pci_unmap_single(pdev, rfbuf->dma,
+					 rfbuf->length, DMA_FROM_DEVICE);
+			rfbuf->dma = 0;
+			skb = rfbuf->skb;
+			alx_netif_dbg(adpt, rx_err, adpt->netdev,
+				   "skb addr = %p, rxbuf_len = %x\n",
+				   skb->data, rfbuf->length);
+		} else {
+			/* TODO */
+			alx_err(adpt, "alx_dispatch_rx_irq: Multi rfd not support yet!\n");
+			break;
+		}
+		alx_clean_rfdesc(rxque, &srrd);
+
+		skb_put(skb, srrd.genr.pkt_len - ETH_FCS_LEN);
+		skb->protocol = eth_type_trans(skb, netdev);
+		skb_checksum_none_assert(skb);
+
+		/* start to dispatch */
+		swque = adpt->rx_queue[srrd.genr.rss_cpu];
+		next_produce_idx = swque->swq.produce_idx;
+		if (++next_produce_idx == swque->swq.count)
+			next_produce_idx = 0;
+
+		curr_swbuf = GET_SW_BUFFER(swque, swque->swq.produce_idx);
+		next_swbuf = GET_SW_BUFFER(swque, next_produce_idx);
+
+		/*
+		 * if full, will discard the packet,
+		 * and at lease has a blank sw_buffer.
+		 */
+		if (!next_swbuf->skb) {
+			curr_swbuf->skb = skb;
+			curr_swbuf->vlan_tag = srrd.genr.vlan_tag;
+			curr_swbuf->vlan_flag = srrd.genr.vlan_flag;
+			if (++swque->swq.produce_idx == swque->swq.count)
+				swque->swq.produce_idx = 0;
+		}
+
+		count++;
+		if (count == 32)
+			break;
+	}
+	if (count)
+		alx_refresh_rx_buffer(rxque);
+	return true;
+}
+
+
+static bool alx_handle_srx_irq(struct alx_msix_param *msix,
+			       struct alx_rx_queue *rxque,
+			       int *num_pkts, int max_pkts)
+{
+	struct alx_adapter *adpt = msix->adpt;
+	struct alx_sw_buffer *swbuf;
+	bool retval = true;
+
+	while (rxque->swq.consume_idx != rxque->swq.produce_idx) {
+		swbuf = GET_SW_BUFFER(rxque, rxque->swq.consume_idx);
+
+		alx_receive_skb(adpt, swbuf->skb, (u16)swbuf->vlan_tag,
+				(bool)swbuf->vlan_flag);
+		swbuf->skb = NULL;
+
+		if (++rxque->swq.consume_idx == rxque->swq.count)
+			rxque->swq.consume_idx = 0;
+
+		(*num_pkts)++;
+		if (*num_pkts >= max_pkts) {
+			retval = false;
+			break;
+		}
+	}
+	return retval;
+}
+
+
+static bool alx_handle_rx_irq(struct alx_msix_param *msix,
+			      struct alx_rx_queue *rxque,
+			      int *num_pkts, int max_pkts)
+{
+	struct alx_adapter *adpt = msix->adpt;
+	struct alx_hw *hw = &adpt->hw;
+	struct pci_dev *pdev = adpt->pdev;
+
+	union alx_sw_rrdesc srrd;
+	struct alx_buffer *rfbuf;
+	struct sk_buff *skb;
+
+	u16 hw_consume_idx, num_consume_pkts;
+	u16 count = 0;
+
+	alx_mem_r16(hw, rxque->consume_reg, &hw_consume_idx);
+	num_consume_pkts = (hw_consume_idx > rxque->rrq.consume_idx) ?
+		(hw_consume_idx -  rxque->rrq.consume_idx) :
+		(hw_consume_idx + rxque->rrq.count - rxque->rrq.consume_idx);
+
+	while (1) {
+		if (!num_consume_pkts)
+			break;
+
+		if (!alx_get_rrdesc(adpt, rxque, &srrd))
+			break;
+
+		if (srrd.genr.res || srrd.genr.lene) {
+			alx_clean_rfdesc(rxque, &srrd);
+			netif_warn(adpt, rx_err, adpt->netdev,
+				   "wrong packet! rrd->word3 is 0x%08x\n",
+				   srrd.dfmt.dw3);
+			continue;
+		}
+
+		/* TODO: Good Receive */
+		if (likely(srrd.genr.nor == 1)) {
+			rfbuf = GET_RF_BUFFER(rxque, srrd.genr.si);
+			pci_unmap_single(pdev, rfbuf->dma, rfbuf->length,
+					 DMA_FROM_DEVICE);
+			rfbuf->dma = 0;
+			skb = rfbuf->skb;
+		} else {
+			/* TODO */
+			alx_err(adpt, "alx_hande_rx_irq: Multi rfd not support yet!\n");
+			break;
+		}
+		alx_clean_rfdesc(rxque, &srrd);
+		skb_put(skb, srrd.genr.pkt_len - ETH_FCS_LEN);
+#ifdef MDM_PLATFORM
+		/*Don't strip off MAC Header if sending through IPA*/
+		if (!ipa_enable) {
+			skb->protocol = eth_type_trans(skb, adpt->netdev);
+		}
+#else
+		skb->protocol = eth_type_trans(skb, adpt->netdev);
+#endif
+
+		skb_checksum_none_assert(skb);
+
+		/*the HW rrd->vlan_flag is still true when vlan stripping disabled,
+		correct it here */
+		if (!CHK_HW_FLAG(VLANSTRIP_EN))
+			srrd.genr.vlan_flag = false;
+
+#ifdef MDM_PLATFORM
+		/* Use IPA path only if VLAN stripping is not done and IPA is
+		 * enabled */
+		if (ipa_enable && !srrd.genr.vlan_flag)
+			alx_receive_skb_ipa(adpt, skb, (u16)srrd.genr.vlan_tag,
+					(bool)srrd.genr.vlan_flag,
+					(uint8_t)srrd.genr.proto);
+		else
+#endif
+			alx_receive_skb(adpt, skb, (u16)srrd.genr.vlan_tag,
+					(bool)srrd.genr.vlan_flag);
+
+		num_consume_pkts--;
+		count++;
+		(*num_pkts)++;
+		if (*num_pkts >= max_pkts)
+			break;
+	}
+	if (count)
+		alx_refresh_rx_buffer(rxque);
+
+	return true;
+}
+
+
+static bool alx_handle_tx_irq(struct alx_msix_param *msix,
+			      struct alx_tx_queue *txque)
+{
+	struct alx_adapter *adpt = msix->adpt;
+	struct alx_hw *hw = &adpt->hw;
+	struct alx_buffer *tpbuf;
+#ifdef MDM_PLATFORM
+	struct alx_ipa_ctx *alx_ipa;
+	int num_tx_comp;
+#endif
+	u16 consume_data;
+
+#ifdef MDM_PLATFORM
+	if (ipa_enable) {
+		alx_ipa = adpt->palx_ipa;
+		num_tx_comp = 0;
+	}
+#endif
+
+	alx_mem_r16(hw, txque->consume_reg, &consume_data);
+	alx_netif_dbg(adpt, tx_err, adpt->netdev,
+		   "TX[%d]: consume_reg[0x%x] = 0x%x, tpq.consume_idx = 0x%x\n",
+		   txque->que_idx, txque->consume_reg, consume_data,
+		   txque->tpq.consume_idx);
+
+
+	while (txque->tpq.consume_idx != consume_data) {
+		tpbuf = GET_TP_BUFFER(txque, txque->tpq.consume_idx);
+		if (tpbuf->dma) {
+			pci_unmap_page(adpt->pdev, tpbuf->dma, tpbuf->length,
+				       DMA_TO_DEVICE);
+			tpbuf->dma = 0;
+		}
+
+		if (tpbuf->skb) {
+			/* Since its called by NAPI; we are already in bh
+			context; so its safe to free the skb here*/
+			dev_kfree_skb(tpbuf->skb);
+			tpbuf->skb = NULL;
+		}
+
+		if (++txque->tpq.consume_idx == txque->tpq.count)
+			txque->tpq.consume_idx = 0;
+
+#ifdef MDM_PLATFORM
+		if (ipa_enable)
+			/* Update TX Completetion Recieved */
+			num_tx_comp++;
+#endif
+	}
+
+#ifdef MDM_PLATFORM
+	if (ipa_enable) {
+		/* Release Wakelock if all TX Completion is done */
+		spin_lock_bh(&alx_ipa->rm_ipa_lock);
+		alx_ipa->alx_tx_completion -= num_tx_comp;
+		if (!alx_ipa->ipa_rx_completion && !alx_ipa->alx_tx_completion &&
+			(alx_ipa->acquire_wake_src == true)) {
+			__pm_relax(&alx_ipa->rm_ipa_wait);
+			alx_ipa->acquire_wake_src = false;
+		}
+		spin_unlock_bh(&alx_ipa->rm_ipa_lock);
+	}
+#endif
+
+	if (netif_queue_stopped(adpt->netdev) &&
+		netif_carrier_ok(adpt->netdev)) {
+		netif_wake_queue(adpt->netdev);
+	}
+	return true;
+}
+
+
+static irqreturn_t alx_msix_timer(int irq, void *data)
+{
+	struct alx_msix_param *msix = data;
+	struct alx_adapter *adpt = msix->adpt;
+	struct alx_hw *hw = &adpt->hw;
+	u32 isr;
+
+	hw->cbs.disable_msix_intr(hw, msix->vec_idx);
+
+	alx_mem_r32(hw, ALX_ISR, &isr);
+	isr = isr & (ALX_ISR_TIMER | ALX_ISR_MANU);
+
+
+	if (isr == 0) {
+		hw->cbs.enable_msix_intr(hw, msix->vec_idx);
+		return IRQ_NONE;
+	}
+
+	/* Ack ISR */
+	alx_mem_w32(hw, ALX_ISR, isr);
+
+	if (isr & ALX_ISR_MANU) {
+		adpt->net_stats.tx_carrier_errors++;
+		alx_check_lsc(adpt);
+	}
+
+	hw->cbs.enable_msix_intr(hw, msix->vec_idx);
+
+	return IRQ_HANDLED;
+}
+
+
+static irqreturn_t alx_msix_alert(int irq, void *data)
+{
+	struct alx_msix_param *msix = data;
+	struct alx_adapter *adpt = msix->adpt;
+	struct alx_hw *hw = &adpt->hw;
+	u32 isr;
+
+	hw->cbs.disable_msix_intr(hw, msix->vec_idx);
+
+	alx_mem_r32(hw, ALX_ISR, &isr);
+	isr = isr & ALX_ISR_ALERT_MASK;
+
+	if (isr == 0) {
+		hw->cbs.enable_msix_intr(hw, msix->vec_idx);
+		return IRQ_NONE;
+	}
+	alx_mem_w32(hw, ALX_ISR, isr);
+
+	hw->cbs.enable_msix_intr(hw, msix->vec_idx);
+
+	return IRQ_HANDLED;
+}
+
+
+static irqreturn_t alx_msix_smb(int irq, void *data)
+{
+	struct alx_msix_param *msix = data;
+	struct alx_adapter *adpt = msix->adpt;
+	struct alx_hw *hw = &adpt->hw;
+
+	hw->cbs.disable_msix_intr(hw, msix->vec_idx);
+
+	hw->cbs.enable_msix_intr(hw, msix->vec_idx);
+
+	return IRQ_HANDLED;
+}
+
+
+static irqreturn_t alx_msix_phy(int irq, void *data)
+{
+	struct alx_msix_param *msix = data;
+	struct alx_adapter *adpt = msix->adpt;
+	struct alx_hw *hw = &adpt->hw;
+
+	hw->cbs.disable_msix_intr(hw, msix->vec_idx);
+
+	if (hw->cbs.ack_phy_intr)
+		hw->cbs.ack_phy_intr(hw);
+
+	adpt->net_stats.tx_carrier_errors++;
+	alx_check_lsc(adpt);
+
+	hw->cbs.enable_msix_intr(hw, msix->vec_idx);
+
+	return IRQ_HANDLED;
+}
+
+
+/*
+ * alx_msix_rtx
+ */
+static irqreturn_t alx_msix_rtx(int irq, void *data)
+{
+	struct alx_msix_param *msix = data;
+	struct alx_adapter  *adpt = msix->adpt;
+	struct alx_hw *hw = &adpt->hw;
+
+	alx_netif_dbg(adpt, intr, adpt->netdev,
+		   "msix vec_idx = %d\n", msix->vec_idx);
+
+	hw->cbs.disable_msix_intr(hw, msix->vec_idx);
+	if (!msix->rx_count && !msix->tx_count) {
+		hw->cbs.enable_msix_intr(hw, msix->vec_idx);
+		return IRQ_HANDLED;
+	}
+
+	napi_schedule(&msix->napi);
+	return IRQ_HANDLED;
+}
+
+
+/*
+ * alx_napi_msix_rtx
+ */
+static int alx_napi_msix_rtx(struct napi_struct *napi, int max_pkts)
+{
+	struct alx_msix_param *msix =
+			       container_of(napi, struct alx_msix_param, napi);
+	struct alx_adapter *adpt = msix->adpt;
+	struct alx_hw *hw = &adpt->hw;
+	struct alx_rx_queue *rxque;
+	struct alx_rx_queue *swque;
+	struct alx_tx_queue *txque;
+	unsigned long flags = 0;
+	bool complete = true;
+	int num_pkts = 0;
+	int rque_idx, tque_idx;
+	int i, j;
+
+	alx_netif_dbg(adpt, intr, adpt->netdev,
+		   "NAPI: msix vec_idx = %d\n", msix->vec_idx);
+
+	_IPC_INFO("NAPI: msix vec_idx = %d\n", msix->vec_idx);
+
+	/* RX */
+	for (i = 0; i < msix->rx_count; i++) {
+		rque_idx = msix->rx_map[i];
+		num_pkts = 0;
+		if (CHK_ADPT_FLAG(0, SRSS_EN)) {
+			if (!spin_trylock_irqsave(&adpt->rx_lock, flags))
+				goto clean_sw_irq;
+
+			for (j = 0; j < adpt->num_hw_rxques; j++)
+				alx_dispatch_rx_irq(msix, adpt->rx_queue[j]);
+
+			spin_unlock_irqrestore(&adpt->rx_lock, flags);
+clean_sw_irq:
+			swque = adpt->rx_queue[rque_idx];
+			complete &= alx_handle_srx_irq(msix, swque, &num_pkts,
+						       max_pkts);
+
+		} else {
+			rxque = adpt->rx_queue[rque_idx];
+			complete &= alx_handle_rx_irq(msix, rxque, &num_pkts,
+						      max_pkts);
+		}
+	}
+
+
+	/* Handle TX */
+	for (i = 0; i < msix->tx_count; i++) {
+		tque_idx = msix->tx_map[i];
+		txque = adpt->tx_queue[tque_idx];
+		complete &= alx_handle_tx_irq(msix, txque);
+	}
+
+	if (!complete) {
+		alx_netif_dbg(adpt, intr, adpt->netdev,
+			   "Some packets in the queue are not handled!\n");
+		num_pkts = max_pkts;
+	}
+
+	alx_netif_dbg(adpt, intr, adpt->netdev,
+		   "num_pkts = %d, max_pkts = %d\n", num_pkts, max_pkts);
+	/* If all work done, exit the polling mode */
+	if (num_pkts < max_pkts) {
+		napi_complete(napi);
+		if (!CHK_ADPT_FLAG(1, STATE_DOWN))
+			hw->cbs.enable_msix_intr(hw, msix->vec_idx);
+	}
+
+	return num_pkts;
+}
+
+
+
+/*
+ * alx_napi_legacy_rtx - NAPI Rx polling callback
+ */
+static int alx_napi_legacy_rtx(struct napi_struct *napi, int max_pkts)
+{
+	struct alx_msix_param *msix =
+				container_of(napi, struct alx_msix_param, napi);
+	struct alx_adapter *adpt = msix->adpt;
+	struct alx_hw *hw = &adpt->hw;
+	int complete = true;
+	int num_pkts = 0;
+	int que_idx;
+
+	alx_netif_dbg(adpt, intr, adpt->netdev,
+		   "NAPI: msix vec_idx = %d\n", msix->vec_idx);
+
+	/* Keep link state information with original netdev */
+	if (!netif_carrier_ok(adpt->netdev))
+		goto enable_rtx_irq;
+
+	for (que_idx = 0; que_idx < adpt->num_txques; que_idx++)
+		complete &= alx_handle_tx_irq(msix, adpt->tx_queue[que_idx]);
+
+	for (que_idx = 0; que_idx < adpt->num_hw_rxques; que_idx++) {
+		num_pkts = 0;
+		complete &= alx_handle_rx_irq(msix, adpt->rx_queue[que_idx],
+					      &num_pkts, max_pkts);
+	}
+
+	if (!complete)
+		num_pkts = max_pkts;
+
+	if (num_pkts < max_pkts) {
+enable_rtx_irq:
+		napi_complete(napi);
+		hw->intr_mask |= (ALX_ISR_RXQ | ALX_ISR_TXQ);
+		alx_mem_w32(hw, ALX_IMR, hw->intr_mask);
+	}
+	return num_pkts;
+}
+
+
+static void alx_set_msix_flags(struct alx_msix_param *msix,
+			       enum alx_msix_type type, int index)
+{
+	if (type == alx_msix_type_rx) {
+		switch (index) {
+		case 0:
+			SET_MSIX_FLAG(RX0);
+			break;
+		case 1:
+			SET_MSIX_FLAG(RX1);
+			break;
+		case 2:
+			SET_MSIX_FLAG(RX2);
+			break;
+		case 3:
+			SET_MSIX_FLAG(RX3);
+			break;
+		case 4:
+			SET_MSIX_FLAG(RX4);
+			break;
+		case 5:
+			SET_MSIX_FLAG(RX5);
+			break;
+		case 6:
+			SET_MSIX_FLAG(RX6);
+			break;
+		case 7:
+			SET_MSIX_FLAG(RX7);
+			break;
+		default:
+			IPC_ERROR("alx_set_msix_flags: rx error.");
+			break;
+		}
+	} else if (type == alx_msix_type_tx) {
+		switch (index) {
+		case 0:
+			SET_MSIX_FLAG(TX0);
+			break;
+		case 1:
+			SET_MSIX_FLAG(TX1);
+			break;
+		case 2:
+			SET_MSIX_FLAG(TX2);
+			break;
+		case 3:
+			SET_MSIX_FLAG(TX3);
+			break;
+		default:
+			IPC_ERROR("alx_set_msix_flags: tx error.");
+			break;
+		}
+	} else if (type == alx_msix_type_other) {
+		switch (index) {
+		case ALX_MSIX_TYPE_OTH_TIMER:
+			SET_MSIX_FLAG(TIMER);
+			break;
+		case ALX_MSIX_TYPE_OTH_ALERT:
+			SET_MSIX_FLAG(ALERT);
+			break;
+		case ALX_MSIX_TYPE_OTH_SMB:
+			SET_MSIX_FLAG(SMB);
+			break;
+		case ALX_MSIX_TYPE_OTH_PHY:
+			SET_MSIX_FLAG(PHY);
+			break;
+		default:
+			IPC_ERROR("alx_set_msix_flags: other error.");
+			break;
+		}
+	}
+}
+
+
+/* alx_setup_msix_maps */
+static int alx_setup_msix_maps(struct alx_adapter *adpt)
+{
+	int msix_idx = 0;
+	int que_idx = 0;
+	int num_rxques = adpt->num_rxques;
+	int num_txques = adpt->num_txques;
+	int num_msix_rxques = adpt->num_msix_rxques;
+	int num_msix_txques = adpt->num_msix_txques;
+	int num_msix_noques = adpt->num_msix_noques;
+
+	if (CHK_ADPT_FLAG(0, FIXED_MSIX))
+		goto fixed_msix_map;
+
+	netif_warn(adpt, ifup, adpt->netdev,
+		   "don't support non-fixed msix map\n");
+	return -EINVAL;
+
+fixed_msix_map:
+	/*
+	 * For RX queue msix map
+	 */
+	msix_idx = 0;
+	for (que_idx = 0; que_idx < num_msix_rxques; que_idx++, msix_idx++) {
+		struct alx_msix_param *msix = adpt->msix[msix_idx];
+		if (que_idx < num_rxques) {
+			adpt->rx_queue[que_idx]->msix = msix;
+			msix->rx_map[msix->rx_count] = que_idx;
+			msix->rx_count++;
+			alx_set_msix_flags(msix, alx_msix_type_rx, que_idx);
+		}
+	}
+	if (msix_idx != num_msix_rxques)
+		netif_warn(adpt, ifup, adpt->netdev, "msix_idx is wrong\n");
+
+	/*
+	 * For TX queue msix map
+	 */
+	for (que_idx = 0; que_idx < num_msix_txques; que_idx++, msix_idx++) {
+		struct alx_msix_param *msix = adpt->msix[msix_idx];
+		if (que_idx < num_txques) {
+			adpt->tx_queue[que_idx]->msix = msix;
+			msix->tx_map[msix->tx_count] = que_idx;
+			msix->tx_count++;
+			alx_set_msix_flags(msix, alx_msix_type_tx, que_idx);
+		}
+	}
+	if (msix_idx != (num_msix_rxques + num_msix_txques))
+		netif_warn(adpt, ifup, adpt->netdev, "msix_idx is wrong\n");
+
+
+	/*
+	 * For NON queue msix map
+	 */
+	for (que_idx = 0; que_idx < num_msix_noques; que_idx++, msix_idx++) {
+		struct alx_msix_param *msix = adpt->msix[msix_idx];
+		alx_set_msix_flags(msix, alx_msix_type_other, que_idx);
+	}
+	return 0;
+}
+
+
+static inline void alx_reset_msix_maps(struct alx_adapter *adpt)
+{
+	int que_idx, msix_idx;
+
+	for (que_idx = 0; que_idx < adpt->num_rxques; que_idx++)
+		adpt->rx_queue[que_idx]->msix = NULL;
+	for (que_idx = 0; que_idx < adpt->num_txques; que_idx++)
+		adpt->tx_queue[que_idx]->msix = NULL;
+
+	for (msix_idx = 0; msix_idx < adpt->num_msix_intrs; msix_idx++) {
+		struct alx_msix_param *msix = adpt->msix[msix_idx];
+		memset(msix->rx_map, 0, sizeof(msix->rx_map));
+		memset(msix->tx_map, 0, sizeof(msix->tx_map));
+		msix->rx_count = 0;
+		msix->tx_count = 0;
+		CLI_MSIX_FLAG(ALL);
+	}
+}
+
+
+/*
+ * alx_enable_intr - Enable default interrupt generation settings
+ */
+static inline void alx_enable_intr(struct alx_adapter *adpt)
+{
+	struct alx_hw *hw = &adpt->hw;
+	int i;
+
+	if (!atomic_dec_and_test(&adpt->irq_sem))
+		return;
+
+	if (hw->cbs.enable_legacy_intr)
+		hw->cbs.enable_legacy_intr(hw);
+
+	/* enable all MSIX IRQs */
+	for (i = 0; i < adpt->num_msix_intrs; i++) {
+		if (hw->cbs.disable_msix_intr)
+			hw->cbs.disable_msix_intr(hw, i);
+		if (hw->cbs.enable_msix_intr)
+			hw->cbs.enable_msix_intr(hw, i);
+	}
+}
+
+
+/* alx_disable_intr - Mask off interrupt generation on the NIC */
+static inline void alx_disable_intr(struct alx_adapter *adpt)
+{
+	struct alx_hw *hw = &adpt->hw;
+	atomic_inc(&adpt->irq_sem);
+
+	if (hw->cbs.disable_legacy_intr)
+		hw->cbs.disable_legacy_intr(hw);
+
+	if (CHK_ADPT_FLAG(0, MSIX_EN)) {
+		int i;
+		for (i = 0; i < adpt->num_msix_intrs; i++) {
+			synchronize_irq(adpt->msix_entries[i].vector);
+			hw->cbs.disable_msix_intr(hw, i);
+		}
+	} else {
+		synchronize_irq(adpt->pdev->irq);
+	}
+
+
+}
+
+
+/*
+ * alx_interrupt - Interrupt Handler
+ */
+static irqreturn_t alx_interrupt(int irq, void *data)
+{
+	struct net_device *netdev  = data;
+	struct alx_adapter *adpt = netdev_priv(netdev);
+	struct alx_hw *hw = &adpt->hw;
+	struct alx_msix_param *msix = adpt->msix[0];
+	int max_intrs = ALX_MAX_HANDLED_INTRS;
+	u32 isr, status;
+
+	do {
+		alx_mem_r32(hw, ALX_ISR, &isr);
+		status = isr & hw->intr_mask;
+
+		if (status == 0) {
+			alx_mem_w32(hw, ALX_ISR, 0);
+			if (max_intrs != ALX_MAX_HANDLED_INTRS)
+				return IRQ_HANDLED;
+			return IRQ_NONE;
+		}
+
+                /* GPHY_INT is received when cabled is plugged in */
+                /* Connect to ODU Bridge Driver */
+		/* ack ISR to PHY register */
+		if (status & ALX_ISR_PHY) {
+			hw->cbs.ack_phy_intr(hw);
+		}
+		/* ack ISR to MAC register */
+		alx_mem_w32(hw, ALX_ISR, status | ALX_ISR_DIS);
+
+		if (status & ALX_ISR_ERROR) {
+			netif_warn(adpt, intr, adpt->netdev,
+				   "isr error (status = 0x%x)\n",
+				   status & ALX_ISR_ERROR);
+			if (status & ALX_ISR_PCIE_FERR) {
+				alx_mem_w16(hw, ALX_DEV_STAT,
+					    ALX_DEV_STAT_FERR |
+					    ALX_DEV_STAT_NFERR |
+					    ALX_DEV_STAT_CERR);
+			}
+			/* reset MAC */
+			SET_ADPT_FLAG(0, TASK_REINIT_REQ);
+			alx_task_schedule(adpt);
+			return IRQ_HANDLED;
+		}
+
+		if (status & (ALX_ISR_RXQ | ALX_ISR_TXQ)) {
+			if (napi_schedule_prep(&(msix->napi))) {
+				hw->intr_mask &= ~(ALX_ISR_RXQ | ALX_ISR_TXQ);
+				alx_mem_w32(hw, ALX_IMR, hw->intr_mask);
+				__napi_schedule(&(msix->napi));
+			}
+		}
+
+		if (status & ALX_ISR_OVER) {
+			netif_warn(adpt, intr, adpt->netdev,
+				   "TX/RX overflow (status = 0x%x)\n",
+				   status & ALX_ISR_OVER);
+		}
+
+		/* link event */
+		if (status & (ALX_ISR_PHY | ALX_ISR_MANU)) {
+			netdev->stats.tx_carrier_errors++;
+			alx_check_lsc(adpt);
+			break;
+		}
+
+	} while (--max_intrs > 0);
+	/* re-enable Interrupt*/
+	alx_mem_w32(hw, ALX_ISR, 0);
+	return IRQ_HANDLED;
+}
+
+
+/*
+ * alx_request_msix_irqs - Initialize MSI-X interrupts
+ */
+static int alx_request_msix_irq(struct alx_adapter *adpt)
+{
+	struct net_device *netdev = adpt->netdev;
+	irqreturn_t (*handler)(int, void *);
+	int msix_idx;
+	int num_msix_intrs = adpt->num_msix_intrs;
+	int rx_idx = 0, tx_idx = 0;
+	int i;
+	int retval;
+
+	retval = alx_setup_msix_maps(adpt);
+	if (retval)
+		return retval;
+
+	for (msix_idx = 0; msix_idx < num_msix_intrs; msix_idx++) {
+		struct alx_msix_param *msix = adpt->msix[msix_idx];
+
+		if (CHK_MSIX_FLAG(RXS) && CHK_MSIX_FLAG(TXS)) {
+			handler = alx_msix_rtx;
+			snprintf(msix->name, sizeof(msix->name), "%s:%s%d",
+					    netdev->name, "rtx", rx_idx);
+			rx_idx++;
+			tx_idx++;
+		} else if (CHK_MSIX_FLAG(RXS)) {
+			handler = alx_msix_rtx;
+			snprintf(msix->name, sizeof(msix->name), "%s:%s%d",
+					    netdev->name, "rx", rx_idx);
+			rx_idx++;
+		} else if (CHK_MSIX_FLAG(TXS)) {
+			handler = alx_msix_rtx;
+			snprintf(msix->name, sizeof(msix->name), "%s:%s%d",
+					    netdev->name, "tx", tx_idx);
+			tx_idx++;
+		} else if (CHK_MSIX_FLAG(TIMER)) {
+			handler = alx_msix_timer;
+			snprintf(msix->name, sizeof(msix->name), "%s:%s", netdev->name, "timer");
+		} else if (CHK_MSIX_FLAG(ALERT)) {
+			handler = alx_msix_alert;
+			snprintf(msix->name, sizeof(msix->name), "%s:%s", netdev->name, "alert");
+		} else if (CHK_MSIX_FLAG(SMB)) {
+			handler = alx_msix_smb;
+			snprintf(msix->name, sizeof(msix->name), "%s:%s", netdev->name, "smb");
+		} else if (CHK_MSIX_FLAG(PHY)) {
+			handler = alx_msix_phy;
+			snprintf(msix->name, sizeof(msix->name), "%s:%s", netdev->name, "phy");
+		} else {
+			alx_netif_dbg(adpt, ifup, adpt->netdev,
+				   "MSIX entry [%d] is blank\n",
+				   msix->vec_idx);
+			continue;
+		}
+		alx_netif_dbg(adpt, ifup, adpt->netdev,
+			   "MSIX entry [%d] is %s\n",
+			   msix->vec_idx, msix->name);
+		retval = request_irq(adpt->msix_entries[msix_idx].vector,
+				     handler, 0, msix->name, msix);
+		if (retval)
+			goto free_msix_irq;
+
+		/* assign the mask for this irq */
+		irq_set_affinity_hint(adpt->msix_entries[msix_idx].vector,
+				      msix->affinity_mask);
+	}
+	return retval;
+
+
+free_msix_irq:
+	for (i = 0; i < msix_idx; i++) {
+		irq_set_affinity_hint(adpt->msix_entries[i].vector, NULL);
+		free_irq(adpt->msix_entries[i].vector, adpt->msix[i]);
+	}
+	CLI_ADPT_FLAG(0, MSIX_EN);
+	pci_disable_msix(adpt->pdev);
+	kfree(adpt->msix_entries);
+	adpt->msix_entries = NULL;
+	return retval;
+}
+
+
+/*
+ * alx_request_irq - initialize interrupts
+ */
+static int alx_request_irq(struct alx_adapter *adpt)
+{
+	struct net_device *netdev = adpt->netdev;
+	int retval;
+
+	/* request MSIX irq */
+	if (CHK_ADPT_FLAG(0, MSIX_EN)) {
+		retval = alx_request_msix_irq(adpt);
+		if (retval) {
+			alx_err(adpt, "request msix irq failed, error = %d\n",
+				retval);
+		}
+		goto out;
+	}
+
+	/* request MSI irq */
+	if (CHK_ADPT_FLAG(0, MSI_EN)) {
+		retval = request_irq(adpt->pdev->irq, alx_interrupt, 0,
+			netdev->name, netdev);
+		if (retval) {
+			alx_err(adpt, "request msix irq failed, error = %d\n",
+				retval);
+		}
+		_IPC_INFO("%s -- request_irq() suceeded for irq %d\n", __func__, adpt->pdev->irq);
+		goto out;
+	}
+
+	/* request shared irq */
+	retval = request_irq(adpt->pdev->irq, alx_interrupt, IRQF_SHARED,
+			netdev->name, netdev);
+	if (retval) {
+		alx_err(adpt, "request shared irq failed, error = %d\n",
+			retval);
+	}
+out:
+	return retval;
+}
+
+
+static void alx_free_irq(struct alx_adapter *adpt)
+{
+	struct net_device *netdev = adpt->netdev;
+	int i;
+
+	if (CHK_ADPT_FLAG(0, MSIX_EN)) {
+		for (i = 0; i < adpt->num_msix_intrs; i++) {
+			struct alx_msix_param *msix = adpt->msix[i];
+			alx_netif_dbg(adpt, ifdown, adpt->netdev,
+				   "msix entry = %d\n", i);
+			if (!CHK_MSIX_FLAG(ALL))
+				continue;
+			if (CHK_MSIX_FLAG(RXS) || CHK_MSIX_FLAG(TXS)) {
+				irq_set_affinity_hint(
+					adpt->msix_entries[i].vector, NULL);
+			}
+			free_irq(adpt->msix_entries[i].vector, msix);
+		}
+		alx_reset_msix_maps(adpt);
+	} else {
+		free_irq(adpt->pdev->irq, netdev);
+	}
+}
+
+static void alx_vlan_mode(struct net_device *netdev,
+			  netdev_features_t features)
+{
+	struct alx_adapter *adpt = netdev_priv(netdev);
+	struct alx_hw *hw = &adpt->hw;
+
+	if (!CHK_ADPT_FLAG(1, STATE_DOWN))
+		alx_disable_intr(adpt);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
+	if (features & NETIF_F_HW_VLAN_CTAG_RX) {
+#else
+	if (features & NETIF_F_HW_VLAN_RX) {
+#endif
+		/* enable VLAN tag insert/strip */
+		SET_HW_FLAG(VLANSTRIP_EN);
+	} else {
+		/* disable VLAN tag insert/strip */
+		CLI_HW_FLAG(VLANSTRIP_EN);
+	}
+
+	hw->cbs.config_mac_ctrl(hw);
+
+	if (!CHK_ADPT_FLAG(1, STATE_DOWN))
+		alx_enable_intr(adpt);
+}
+
+
+static void alx_restore_vlan(struct alx_adapter *adpt)
+{
+	alx_vlan_mode(adpt->netdev, adpt->netdev->features);
+}
+
+
+static void alx_napi_enable_all(struct alx_adapter *adpt)
+{
+	struct alx_msix_param *msix;
+	int num_msix_intrs = adpt->num_msix_intrs;
+	int msix_idx;
+
+	if (!CHK_ADPT_FLAG(0, MSIX_EN))
+		num_msix_intrs = 1;
+
+	for (msix_idx = 0; msix_idx < num_msix_intrs; msix_idx++) {
+		struct napi_struct *napi;
+		msix = adpt->msix[msix_idx];
+		napi = &msix->napi;
+		napi_enable(napi);
+	}
+}
+
+
+static void alx_napi_disable_all(struct alx_adapter *adpt)
+{
+	struct alx_msix_param *msix;
+	int num_msix_intrs = adpt->num_msix_intrs;
+	int msix_idx;
+
+	if (!CHK_ADPT_FLAG(0, MSIX_EN))
+		num_msix_intrs = 1;
+
+	for (msix_idx = 0; msix_idx < num_msix_intrs; msix_idx++) {
+		msix = adpt->msix[msix_idx];
+		napi_disable(&msix->napi);
+	}
+}
+
+
+static void alx_clean_tx_queue(struct alx_tx_queue *txque)
+{
+	struct device *dev = txque->dev;
+	unsigned long size;
+	u16 i;
+
+	/* ring already cleared, nothing to do */
+	if (!txque->tpq.tpbuff)
+		return;
+
+	for (i = 0; i < txque->tpq.count; i++) {
+		struct alx_buffer *tpbuf;
+		tpbuf = GET_TP_BUFFER(txque, i);
+		if (tpbuf->dma) {
+			pci_unmap_single(to_pci_dev(dev),
+					tpbuf->dma,
+					tpbuf->length,
+					DMA_TO_DEVICE);
+			tpbuf->dma = 0;
+		}
+		if (tpbuf->skb) {
+			dev_kfree_skb_any(tpbuf->skb);
+			tpbuf->skb = NULL;
+		}
+	}
+
+	size = sizeof(struct alx_buffer) * txque->tpq.count;
+	memset(txque->tpq.tpbuff, 0, size);
+
+	/* Zero out Tx-buffers */
+	memset(txque->tpq.tpdesc, 0, txque->tpq.size);
+
+	txque->tpq.consume_idx = 0;
+	txque->tpq.produce_idx = 0;
+}
+
+
+/*
+ * alx_clean_all_tx_queues
+ */
+static void alx_clean_all_tx_queues(struct alx_adapter *adpt)
+{
+	int i;
+
+	for (i = 0; i < adpt->num_txques; i++)
+		alx_clean_tx_queue(adpt->tx_queue[i]);
+}
+
+
+static void alx_clean_rx_queue(struct alx_rx_queue *rxque)
+{
+	struct device *dev = rxque->dev;
+	unsigned long size;
+	int i;
+
+	if (CHK_RX_FLAG(HW_QUE)) {
+		/* ring already cleared, nothing to do */
+		if (!rxque->rfq.rfbuff)
+			goto clean_sw_queue;
+
+		for (i = 0; i < rxque->rfq.count; i++) {
+			struct alx_buffer *rfbuf;
+			rfbuf = GET_RF_BUFFER(rxque, i);
+
+			if (rfbuf->dma) {
+				pci_unmap_single(to_pci_dev(dev),
+						rfbuf->dma,
+						rfbuf->length,
+						DMA_FROM_DEVICE);
+				rfbuf->dma = 0;
+			}
+			if (rfbuf->skb) {
+				dev_kfree_skb(rfbuf->skb);
+				rfbuf->skb = NULL;
+			}
+		}
+		size =  sizeof(struct alx_buffer) * rxque->rfq.count;
+		memset(rxque->rfq.rfbuff, 0, size);
+
+		/* zero out the descriptor ring */
+		memset(rxque->rrq.rrdesc, 0, rxque->rrq.size);
+		rxque->rrq.produce_idx = 0;
+		rxque->rrq.consume_idx = 0;
+
+		memset(rxque->rfq.rfdesc, 0, rxque->rfq.size);
+		rxque->rfq.produce_idx = 0;
+		rxque->rfq.consume_idx = 0;
+	}
+clean_sw_queue:
+	if (CHK_RX_FLAG(SW_QUE)) {
+		/* ring already cleared, nothing to do */
+		if (!rxque->swq.swbuff)
+			return;
+
+		for (i = 0; i < rxque->swq.count; i++) {
+			struct alx_sw_buffer *swbuf;
+			swbuf = GET_SW_BUFFER(rxque, i);
+
+			/* swq doesn't map DMA*/
+
+			if (swbuf->skb) {
+				dev_kfree_skb(swbuf->skb);
+				swbuf->skb = NULL;
+			}
+		}
+		size =  sizeof(struct alx_buffer) * rxque->swq.count;
+		memset(rxque->swq.swbuff, 0, size);
+
+		/* swq doesn't have any descripter rings */
+		rxque->swq.produce_idx = 0;
+		rxque->swq.consume_idx = 0;
+	}
+}
+
+
+/*
+ * alx_clean_all_rx_queues
+ */
+static void alx_clean_all_rx_queues(struct alx_adapter *adpt)
+{
+	int i;
+	for (i = 0; i < adpt->num_rxques; i++)
+		alx_clean_rx_queue(adpt->rx_queue[i]);
+}
+
+
+/*
+ * alx_set_rss_queues: Allocate queues for RSS
+ */
+static inline void alx_set_num_txques(struct alx_adapter *adpt)
+{
+	struct alx_hw *hw = &adpt->hw;
+
+	if (hw->mac_type == alx_mac_l1f || hw->mac_type == alx_mac_l2f)
+		adpt->num_txques = 4;
+	else
+		adpt->num_txques = 2;
+}
+
+
+/*
+ * alx_set_rss_queues: Allocate queues for RSS
+ */
+static inline void alx_set_num_rxques(struct alx_adapter *adpt)
+{
+	if (CHK_ADPT_FLAG(0, SRSS_CAP)) {
+		adpt->num_hw_rxques = 1;
+		adpt->num_sw_rxques = adpt->max_rxques;
+		adpt->num_rxques =
+			max_t(u16, adpt->num_hw_rxques, adpt->num_sw_rxques);
+	}
+}
+
+
+/*
+ * alx_set_num_queues: Allocate queues for device, feature dependant
+ */
+static void alx_set_num_queues(struct alx_adapter *adpt)
+{
+	/* Start with default case */
+	adpt->num_txques = 1;
+	adpt->num_rxques = 1;
+	adpt->num_hw_rxques = 1;
+	adpt->num_sw_rxques = 0;
+
+	alx_set_num_rxques(adpt);
+	alx_set_num_txques(adpt);
+}
+
+
+/* alx_alloc_all_rtx_queue - allocate all queues */
+static int alx_alloc_all_rtx_queue(struct alx_adapter *adpt)
+{
+	int que_idx;
+
+	for (que_idx = 0; que_idx < adpt->num_txques; que_idx++) {
+		struct alx_tx_queue *txque = adpt->tx_queue[que_idx];
+
+		txque = kzalloc(sizeof(struct alx_tx_queue), GFP_KERNEL);
+		if (!txque)
+			goto err_alloc_tx_queue;
+		txque->tpq.count = adpt->num_txdescs;
+		txque->que_idx = que_idx;
+		txque->dev = &adpt->pdev->dev;
+		txque->netdev = adpt->netdev;
+
+		adpt->tx_queue[que_idx] = txque;
+	}
+
+	for (que_idx = 0; que_idx < adpt->num_rxques; que_idx++) {
+		struct alx_rx_queue *rxque = adpt->rx_queue[que_idx];
+
+		rxque = kzalloc(sizeof(struct alx_rx_queue), GFP_KERNEL);
+		if (!rxque)
+			goto err_alloc_rx_queue;
+		rxque->rrq.count = adpt->num_rxdescs;
+		rxque->rfq.count = adpt->num_rxdescs;
+		rxque->swq.count = adpt->num_rxdescs;
+		rxque->que_idx = que_idx;
+		rxque->dev = &adpt->pdev->dev;
+		rxque->netdev = adpt->netdev;
+
+		if (CHK_ADPT_FLAG(0, SRSS_EN)) {
+			if (que_idx < adpt->num_hw_rxques)
+				SET_RX_FLAG(HW_QUE);
+			if (que_idx < adpt->num_sw_rxques)
+				SET_RX_FLAG(SW_QUE);
+		} else {
+			SET_RX_FLAG(HW_QUE);
+		}
+		adpt->rx_queue[que_idx] = rxque;
+	}
+	alx_netif_dbg(adpt, probe, adpt->netdev,
+		  "num_tx_descs = %d, num_rx_descs = %d\n",
+		  adpt->num_txdescs, adpt->num_rxdescs);
+	return 0;
+
+err_alloc_rx_queue:
+	alx_err(adpt, "goto err_alloc_rx_queue");
+	for (que_idx = 0; que_idx < adpt->num_rxques; que_idx++)
+		kfree(adpt->rx_queue[que_idx]);
+err_alloc_tx_queue:
+	alx_err(adpt, "goto err_alloc_tx_queue");
+	for (que_idx = 0; que_idx < adpt->num_txques; que_idx++)
+		kfree(adpt->tx_queue[que_idx]);
+	return -ENOMEM;
+}
+
+
+/* alx_free_all_rtx_queue */
+static void alx_free_all_rtx_queue(struct alx_adapter *adpt)
+{
+	int que_idx;
+
+	for (que_idx = 0; que_idx < adpt->num_txques; que_idx++) {
+		kfree(adpt->tx_queue[que_idx]);
+		adpt->tx_queue[que_idx] = NULL;
+	}
+	for (que_idx = 0; que_idx < adpt->num_rxques; que_idx++) {
+		kfree(adpt->rx_queue[que_idx]);
+		adpt->rx_queue[que_idx] = NULL;
+	}
+}
+
+
+/* alx_set_interrupt_param - set interrupt parameter */
+static int alx_set_interrupt_param(struct alx_adapter *adpt)
+{
+	struct alx_msix_param *msix;
+	int (*poll)(struct napi_struct *, int);
+	int msix_idx;
+
+	if (CHK_ADPT_FLAG(0, MSIX_EN)) {
+		poll = &alx_napi_msix_rtx;
+	} else {
+		adpt->num_msix_intrs = 1;
+		poll = &alx_napi_legacy_rtx;
+	}
+
+	for (msix_idx = 0; msix_idx < adpt->num_msix_intrs; msix_idx++) {
+		msix = kzalloc(sizeof(struct alx_msix_param),
+					   GFP_KERNEL);
+		if (!msix)
+			goto err_alloc_msix;
+		msix->adpt = adpt;
+		msix->vec_idx = msix_idx;
+		/* Allocate the affinity_hint cpumask, configure the mask */
+		if (!alloc_cpumask_var(&msix->affinity_mask, GFP_KERNEL))
+			goto err_alloc_cpumask;
+
+		cpumask_set_cpu((msix_idx % num_online_cpus()),
+				msix->affinity_mask);
+
+		netif_napi_add(adpt->netdev, &msix->napi, (*poll), 64);
+		adpt->msix[msix_idx] = msix;
+	}
+	return 0;
+
+err_alloc_cpumask:
+	kfree(msix);
+	adpt->msix[msix_idx] = NULL;
+err_alloc_msix:
+	for (msix_idx--; msix_idx >= 0; msix_idx--) {
+		msix = adpt->msix[msix_idx];
+		netif_napi_del(&msix->napi);
+		free_cpumask_var(msix->affinity_mask);
+		kfree(msix);
+		adpt->msix[msix_idx] = NULL;
+	}
+	alx_err(adpt, "can't allocate memory\n");
+	return -ENOMEM;
+}
+
+
+/*
+ * alx_reset_interrupt_param - Free memory allocated for interrupt vectors
+ */
+static void alx_reset_interrupt_param(struct alx_adapter *adpt)
+{
+	int msix_idx;
+
+	for (msix_idx = 0; msix_idx < adpt->num_msix_intrs; msix_idx++) {
+		struct alx_msix_param *msix = adpt->msix[msix_idx];
+		netif_napi_del(&msix->napi);
+		free_cpumask_var(msix->affinity_mask);
+		kfree(msix);
+		adpt->msix[msix_idx] = NULL;
+	}
+}
+
+
+/* set msix interrupt mode */
+static int alx_set_msix_interrupt_mode(struct alx_adapter *adpt)
+{
+	int msix_intrs, msix_idx;
+	int retval = 0;
+
+	adpt->msix_entries = kcalloc(adpt->max_msix_intrs,
+				sizeof(struct msix_entry), GFP_KERNEL);
+	if (!adpt->msix_entries) {
+		alx_netif_dbg(adpt, probe, adpt->netdev,
+			   "can't allocate msix entry\n");
+		CLI_ADPT_FLAG(0, MSIX_EN);
+		goto try_msi_mode;
+	}
+
+	for (msix_idx = 0; msix_idx < adpt->max_msix_intrs; msix_idx++)
+		adpt->msix_entries[msix_idx].entry = msix_idx;
+
+
+	msix_intrs = adpt->max_msix_intrs;
+	while (msix_intrs >= adpt->min_msix_intrs) {
+		retval = pci_enable_msix(adpt->pdev, adpt->msix_entries,
+				      msix_intrs);
+		if (!retval) /* Success in acquiring all requested vectors. */
+			break;
+		else if (retval < 0)
+			msix_intrs = 0; /* Nasty failure, quit now */
+		else /* error == number of vectors we should try again with */
+			msix_intrs = retval;
+	}
+	if (msix_intrs < adpt->min_msix_intrs) {
+		alx_netif_dbg(adpt, probe, adpt->netdev,
+			   "can't enable MSI-X interrupts\n");
+		CLI_ADPT_FLAG(0, MSIX_EN);
+		kfree(adpt->msix_entries);
+		adpt->msix_entries = NULL;
+		goto try_msi_mode;
+	}
+
+	alx_netif_dbg(adpt, probe, adpt->netdev,
+		   "enable MSI-X interrupts, num_msix_intrs = %d\n",
+		   msix_intrs);
+	SET_ADPT_FLAG(0, MSIX_EN);
+	if (CHK_ADPT_FLAG(0, SRSS_CAP))
+		SET_ADPT_FLAG(0, SRSS_EN);
+
+	adpt->num_msix_intrs = min_t(int, msix_intrs, adpt->max_msix_intrs);
+	retval = 0;
+	return retval;
+
+try_msi_mode:
+	CLI_ADPT_FLAG(0, SRSS_CAP);
+	CLI_ADPT_FLAG(0, SRSS_EN);
+	alx_set_num_queues(adpt);
+	retval = -1;
+	return retval;
+}
+
+
+/* set msi interrupt mode */
+static int alx_set_msi_interrupt_mode(struct alx_adapter *adpt)
+{
+	int retval;
+
+	retval = pci_enable_msi(adpt->pdev);
+	adpt->netdev->irq = adpt->pdev->irq;
+	if (retval) {
+		alx_netif_dbg(adpt, probe, adpt->netdev,
+			   "can't enable MSI interrupt, error = %d\n", retval);
+		return retval;
+	}
+	SET_ADPT_FLAG(0, MSI_EN);
+	return retval;
+}
+
+
+/* set interrupt mode */
+static int alx_set_interrupt_mode(struct alx_adapter *adpt)
+{
+	int retval = 0;
+
+	if (CHK_ADPT_FLAG(0, MSIX_CAP)) {
+		alx_netif_dbg(adpt, probe, adpt->netdev,
+			   "try to set MSIX interrupt\n");
+		retval = alx_set_msix_interrupt_mode(adpt);
+		if (!retval)
+			return retval;
+	}
+
+	if (CHK_ADPT_FLAG(0, MSI_CAP)) {
+		alx_netif_dbg(adpt, probe, adpt->netdev,
+			   "try to set MSI interrupt\n");
+		retval = alx_set_msi_interrupt_mode(adpt);
+		if (!retval)
+			return retval;
+	}
+
+	alx_netif_dbg(adpt, probe, adpt->netdev,
+		   "can't enable MSIX and MSI, will enable shared interrupt\n");
+	retval = 0;
+	return retval;
+}
+
+
+static void alx_reset_interrupt_mode(struct alx_adapter *adpt)
+{
+	if (CHK_ADPT_FLAG(0, MSIX_EN)) {
+		CLI_ADPT_FLAG(0, MSIX_EN);
+		pci_disable_msix(adpt->pdev);
+		kfree(adpt->msix_entries);
+		adpt->msix_entries = NULL;
+	} else if (CHK_ADPT_FLAG(0, MSI_EN)) {
+		CLI_ADPT_FLAG(0, MSI_EN);
+		pci_disable_msi(adpt->pdev);
+	}
+}
+
+
+static int __devinit alx_init_adapter_special(struct alx_adapter *adpt)
+{
+	switch (adpt->hw.mac_type) {
+	case alx_mac_l1f:
+	case alx_mac_l2f:
+		goto init_alf_adapter;
+		break;
+	case alx_mac_l1c:
+	case alx_mac_l2c:
+		goto init_alc_adapter;
+		break;
+	case alx_mac_l1d_v1:
+	case alx_mac_l1d_v2:
+	case alx_mac_l2cb_v1:
+	case alx_mac_l2cb_v20:
+	case alx_mac_l2cb_v21:
+		adpt->hw.bHibBug = true;
+		goto init_alc_adapter;
+		break;
+	default:
+		break;
+	}
+	return -1;
+
+init_alc_adapter:
+	if (CHK_ADPT_FLAG(0, MSIX_CAP))
+		alx_err(adpt, "ALC doesn't support MSIX\n");
+
+	/* msi for tx, rx and none queues */
+	adpt->num_msix_txques = 0;
+	adpt->num_msix_rxques = 0;
+	adpt->num_msix_noques = 0;
+	return 0;
+
+init_alf_adapter:
+	if (CHK_ADPT_FLAG(0, MSIX_CAP)) {
+		/* msix for tx, rx and none queues */
+		adpt->num_msix_txques = 4;
+		adpt->num_msix_rxques = 8;
+		adpt->num_msix_noques = ALF_MAX_MSIX_NOQUE_INTRS;
+
+		/* msix vector range */
+		adpt->max_msix_intrs = ALF_MAX_MSIX_INTRS;
+		adpt->min_msix_intrs = ALF_MIN_MSIX_INTRS;
+	} else {
+		/* msi for tx, rx and none queues */
+		adpt->num_msix_txques = 0;
+		adpt->num_msix_rxques = 0;
+		adpt->num_msix_noques = 0;
+	}
+	return 0;
+
+}
+
+
+/*
+ * alx_init_adapter
+ */
+static int __devinit alx_init_adapter(struct alx_adapter *adpt)
+{
+	struct alx_hw *hw   = &adpt->hw;
+	struct pci_dev	*pdev = adpt->pdev;
+	u16 revision;
+	int max_frame;
+
+	/* PCI config space info */
+	hw->pci_venid = pdev->vendor;
+	hw->pci_devid = pdev->device;
+	alx_cfg_r16(hw, PCI_CLASS_REVISION, &revision);
+	hw->pci_revid = revision & 0xFF;
+	hw->pci_sub_venid = pdev->subsystem_vendor;
+	hw->pci_sub_devid = pdev->subsystem_device;
+
+	if (alx_init_hw_callbacks(adpt) != 0) {
+		alx_err(adpt, "set HW function pointers failed\n");
+		return -1;
+	}
+
+	if (hw->cbs.identify_nic(hw) != 0) {
+		alx_err(adpt, "HW is disabled\n");
+		return -1;
+	}
+
+	/* Set adapter flags */
+	switch (hw->mac_type) {
+	case alx_mac_l1f:
+	case alx_mac_l2f:
+#ifdef CONFIG_ALX_MSI
+		SET_ADPT_FLAG(0, MSI_CAP);
+#endif
+#ifdef CONFIG_ALX_MSIX
+		SET_ADPT_FLAG(0, MSIX_CAP);
+#endif
+		if (CHK_ADPT_FLAG(0, MSIX_CAP)) {
+			SET_ADPT_FLAG(0, FIXED_MSIX);
+			SET_ADPT_FLAG(0, MRQ_CAP);
+#ifdef CONFIG_ALX_RSS
+			SET_ADPT_FLAG(0, SRSS_CAP);
+#endif
+		}
+		pdev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
+		break;
+	case alx_mac_l1c:
+	case alx_mac_l1d_v1:
+	case alx_mac_l1d_v2:
+	case alx_mac_l2c:
+	case alx_mac_l2cb_v1:
+	case alx_mac_l2cb_v20:
+	case alx_mac_l2cb_v21:
+#ifdef CONFIG_ALX_MSI
+		SET_ADPT_FLAG(0, MSI_CAP);
+#endif
+		break;
+	default:
+		break;
+	}
+
+	/* set default for alx_adapter */
+	adpt->max_msix_intrs = 1;
+	adpt->min_msix_intrs = 1;
+	max_frame = adpt->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+	adpt->rxbuf_size = adpt->netdev->mtu > ALX_DEF_RX_BUF_SIZE ?
+			   ALIGN(max_frame, 8) : ALX_DEF_RX_BUF_SIZE;
+	adpt->wol = 0;
+	device_set_wakeup_enable(&pdev->dev, false);
+
+	/* set default for alx_hw */
+	hw->link_up = false;
+	hw->link_speed = ALX_LINK_SPEED_UNKNOWN;
+	hw->preamble = 7;
+	hw->intr_mask = ALX_IMR_NORMAL_MASK;
+	hw->smb_timer = 400; /* 400ms */
+	hw->mtu = adpt->netdev->mtu;
+#ifdef MDM_PLATFORM
+	if (ipa_enable)
+		hw->imt = 500;       /* For MDM set it to 1ms */
+	else
+#endif
+		hw->imt = 100;       /* set to 200us */
+
+	/* set default for wrr */
+	hw->wrr_prio0 = 4;
+	hw->wrr_prio1 = 4;
+	hw->wrr_prio2 = 4;
+	hw->wrr_prio3 = 4;
+	hw->wrr_mode = alx_wrr_mode_none;
+
+	/* set default flow control settings */
+	hw->req_fc_mode = alx_fc_full;
+	hw->cur_fc_mode = alx_fc_full;	/* init for ethtool output */
+	hw->disable_fc_autoneg = false;
+	hw->fc_was_autonegged = false;
+	hw->fc_single_pause = true;
+
+	/* set default for rss info*/
+	hw->rss_hstype = 0;
+	hw->rss_mode = alx_rss_mode_disable;
+	hw->rss_idt_size = 0;
+	hw->rss_base_cpu = 0;
+	memset(hw->rss_idt, 0x0, sizeof(hw->rss_idt));
+	memset(hw->rss_key, 0x0, sizeof(hw->rss_key));
+
+	atomic_set(&adpt->irq_sem, 1);
+	spin_lock_init(&adpt->tx_lock);
+	spin_lock_init(&adpt->rx_lock);
+
+	alx_init_adapter_special(adpt);
+
+	if (hw->cbs.init_phy) {
+		if (hw->cbs.init_phy(hw))
+			return -EINVAL;
+	}
+
+	SET_ADPT_FLAG(1, STATE_DOWN);
+	return 0;
+}
+
+
+static int alx_set_register_info_special(struct alx_adapter *adpt)
+{
+	struct alx_hw *hw = &adpt->hw;
+	int num_txques = adpt->num_txques;
+
+	switch (adpt->hw.mac_type) {
+	case alx_mac_l1f:
+	case alx_mac_l2f:
+		goto cache_alf_register;
+		break;
+	case alx_mac_l1c:
+	case alx_mac_l1d_v1:
+	case alx_mac_l1d_v2:
+	case alx_mac_l2c:
+	case alx_mac_l2cb_v1:
+	case alx_mac_l2cb_v20:
+	case alx_mac_l2cb_v21:
+		goto cache_alc_register;
+		break;
+	default:
+		break;
+	}
+	return -1;
+
+cache_alc_register:
+	/* setting for Produce Index and Consume Index */
+	adpt->rx_queue[0]->produce_reg = hw->rx_prod_reg[0];
+	adpt->rx_queue[0]->consume_reg = hw->rx_cons_reg[0];
+
+	switch (num_txques) {
+	case 2:
+		adpt->tx_queue[1]->produce_reg = hw->tx_prod_reg[1];
+		adpt->tx_queue[1]->consume_reg = hw->tx_cons_reg[1];
+	case 1:
+		adpt->tx_queue[0]->produce_reg = hw->tx_prod_reg[0];
+		adpt->tx_queue[0]->consume_reg = hw->tx_cons_reg[0];
+		break;
+	}
+	return 0;
+
+cache_alf_register:
+	/* setting for Produce Index and Consume Index */
+	adpt->rx_queue[0]->produce_reg = hw->rx_prod_reg[0];
+	adpt->rx_queue[0]->consume_reg = hw->rx_cons_reg[0];
+
+	switch (num_txques) {
+	case 4:
+		adpt->tx_queue[3]->produce_reg = hw->tx_prod_reg[3];
+		adpt->tx_queue[3]->consume_reg = hw->tx_cons_reg[3];
+	case 3:
+		adpt->tx_queue[2]->produce_reg = hw->tx_prod_reg[2];
+		adpt->tx_queue[2]->consume_reg = hw->tx_cons_reg[2];
+	case 2:
+		adpt->tx_queue[1]->produce_reg = hw->tx_prod_reg[1];
+		adpt->tx_queue[1]->consume_reg = hw->tx_cons_reg[1];
+	case 1:
+		adpt->tx_queue[0]->produce_reg = hw->tx_prod_reg[0];
+		adpt->tx_queue[0]->consume_reg = hw->tx_cons_reg[0];
+	}
+	return 0;
+}
+
+
+/* alx_alloc_tx_descriptor - allocate Tx Descriptors */
+static int alx_alloc_tx_descriptor(struct alx_adapter *adpt,
+				   struct alx_tx_queue *txque)
+{
+	struct alx_ring_header *ring_header = &adpt->ring_header;
+	int size;
+
+	alx_netif_dbg(adpt, ifup, adpt->netdev,
+		   "tpq.count = %d\n", txque->tpq.count);
+
+	size = sizeof(struct alx_buffer) * txque->tpq.count;
+	txque->tpq.tpbuff = kzalloc(size, GFP_KERNEL);
+	if (!txque->tpq.tpbuff)
+		goto err_alloc_tpq_buffer;
+
+	/* round up to nearest 4K */
+	txque->tpq.size = txque->tpq.count * sizeof(union alx_hw_tpdesc);
+
+	txque->tpq.tpdma = ring_header->dma + ring_header->used;
+	txque->tpq.tpdesc = ring_header->desc + ring_header->used;
+	adpt->hw.tpdma[txque->que_idx] = (u64)txque->tpq.tpdma;
+	ring_header->used += ALIGN(txque->tpq.size, 8);
+
+	txque->tpq.produce_idx = 0;
+	txque->tpq.consume_idx = 0;
+	txque->max_packets = txque->tpq.count;
+	return 0;
+
+err_alloc_tpq_buffer:
+	alx_err(adpt, "Unable to allocate memory for the Tx descriptor\n");
+	return -ENOMEM;
+}
+
+
+/* alx_alloc_all_tx_descriptor - allocate all Tx Descriptors */
+static int alx_alloc_all_tx_descriptor(struct alx_adapter *adpt)
+{
+	int i, retval = 0;
+	alx_netif_dbg(adpt, ifup, adpt->netdev,
+		   "num_tques = %d\n", adpt->num_txques);
+
+	for (i = 0; i < adpt->num_txques; i++) {
+		retval = alx_alloc_tx_descriptor(adpt, adpt->tx_queue[i]);
+		if (!retval)
+			continue;
+
+		alx_err(adpt, "Allocation for Tx Queue %u failed\n", i);
+		break;
+	}
+
+	return retval;
+}
+
+
+/* alx_alloc_rx_descriptor - allocate Rx Descriptors */
+static int alx_alloc_rx_descriptor(struct alx_adapter *adpt,
+				   struct alx_rx_queue *rxque)
+{
+	struct alx_ring_header *ring_header = &adpt->ring_header;
+	int size;
+
+	alx_netif_dbg(adpt, ifup, adpt->netdev,
+		   "RRD.count = %d, RFD.count = %d, SWD.count = %d\n",
+		   rxque->rrq.count, rxque->rfq.count, rxque->swq.count);
+
+	if (CHK_RX_FLAG(HW_QUE)) {
+		/* alloc buffer info */
+		size = sizeof(struct alx_buffer) * rxque->rfq.count;
+		rxque->rfq.rfbuff = kzalloc(size, GFP_KERNEL);
+		if (!rxque->rfq.rfbuff)
+			goto err_alloc_rfq_buffer;
+		/*
+		 * set dma's point of rrq and rfq
+		 */
+
+		/* Round up to nearest 4K */
+		rxque->rrq.size =
+			rxque->rrq.count * sizeof(union alx_hw_rrdesc);
+		rxque->rfq.size =
+			rxque->rfq.count * sizeof(union alx_hw_rfdesc);
+
+		rxque->rrq.rrdma = ring_header->dma + ring_header->used;
+		rxque->rrq.rrdesc = ring_header->desc + ring_header->used;
+		adpt->hw.rrdma[rxque->que_idx] = (u64)rxque->rrq.rrdma;
+		ring_header->used += ALIGN(rxque->rrq.size, 8);
+
+		rxque->rfq.rfdma = ring_header->dma + ring_header->used;
+		rxque->rfq.rfdesc = ring_header->desc + ring_header->used;
+		adpt->hw.rfdma[rxque->que_idx] = (u64)rxque->rfq.rfdma;
+		ring_header->used += ALIGN(rxque->rfq.size, 8);
+
+		/* clean all counts within rxque */
+		rxque->rrq.produce_idx = 0;
+		rxque->rrq.consume_idx = 0;
+
+		rxque->rfq.produce_idx = 0;
+		rxque->rfq.consume_idx = 0;
+	}
+
+	if (CHK_RX_FLAG(SW_QUE)) {
+		size = sizeof(struct alx_sw_buffer) * rxque->swq.count;
+		rxque->swq.swbuff = kzalloc(size, GFP_KERNEL);
+		if (!rxque->swq.swbuff)
+			goto err_alloc_swq_buffer;
+
+		rxque->swq.consume_idx = 0;
+		rxque->swq.produce_idx = 0;
+	}
+
+	rxque->max_packets = rxque->rrq.count / 2;
+	return 0;
+
+err_alloc_swq_buffer:
+	kfree(rxque->rfq.rfbuff);
+	rxque->rfq.rfbuff = NULL;
+err_alloc_rfq_buffer:
+	alx_err(adpt, "Unable to allocate memory for the Rx descriptor\n");
+	return -ENOMEM;
+}
+
+
+/* alx_alloc_all_rx_descriptor - allocate all Rx Descriptors */
+static int alx_alloc_all_rx_descriptor(struct alx_adapter *adpt)
+{
+	int i, error = 0;
+
+	for (i = 0; i < adpt->num_rxques; i++) {
+		error = alx_alloc_rx_descriptor(adpt, adpt->rx_queue[i]);
+		if (!error)
+			continue;
+		alx_err(adpt, "Allocation for Rx Queue %u failed\n", i);
+		break;
+	}
+
+	return error;
+}
+
+
+/* alx_free_tx_descriptor - Free Tx Descriptor */
+static void alx_free_tx_descriptor(struct alx_tx_queue *txque)
+{
+	alx_clean_tx_queue(txque);
+
+	kfree(txque->tpq.tpbuff);
+	txque->tpq.tpbuff = NULL;
+
+	/* if not set, then don't free */
+	if (!txque->tpq.tpdesc)
+		return;
+	txque->tpq.tpdesc = NULL;
+}
+
+
+/* alx_free_all_tx_descriptor - Free all Tx Descriptor */
+static void alx_free_all_tx_descriptor(struct alx_adapter *adpt)
+{
+	int i;
+
+	for (i = 0; i < adpt->num_txques; i++)
+		alx_free_tx_descriptor(adpt->tx_queue[i]);
+}
+
+
+/* alx_free_all_rx_descriptor - Free all Rx Descriptor */
+static void alx_free_rx_descriptor(struct alx_rx_queue *rxque)
+{
+	alx_clean_rx_queue(rxque);
+
+	if (CHK_RX_FLAG(HW_QUE)) {
+		kfree(rxque->rfq.rfbuff);
+		rxque->rfq.rfbuff = NULL;
+
+		/* if not set, then don't free */
+		if (!rxque->rrq.rrdesc)
+			return;
+		rxque->rrq.rrdesc = NULL;
+
+		if (!rxque->rfq.rfdesc)
+			return;
+		rxque->rfq.rfdesc = NULL;
+	}
+
+	if (CHK_RX_FLAG(SW_QUE)) {
+		kfree(rxque->swq.swbuff);
+		rxque->swq.swbuff = NULL;
+	}
+}
+
+#ifdef MDM_PLATFORM
+static int alx_alloc_flow_ctrl_desc(struct alx_adapter *adpt)
+{
+	int i;
+	struct alx_ipa_rx_desc_node *node = NULL;
+
+	for (i = 0; i < ALX_IPA_SYS_PIPE_DNE_PKTS; i++) {
+		node = (struct alx_ipa_rx_desc_node *)
+				kzalloc(sizeof(struct alx_ipa_rx_desc_node),
+					GFP_KERNEL);
+		if (!node) {
+			IPC_ERROR("%s -- Only able to allocate %d nodes \n"
+						,__func__, adpt->freeq_cnt);
+			return -ENOMEM;
+		}
+		spin_lock(&adpt->flow_ctrl_lock);
+		adpt->freeq_cnt++;
+		list_add_tail(&node->link, &adpt->free_queue_head);
+		spin_unlock(&adpt->flow_ctrl_lock);
+	}
+	return 0;
+}
+
+static void alx_free_flow_ctrl_desc(struct alx_adapter *adpt)
+{
+	struct alx_ipa_rx_desc_node *node, *tmp;
+
+	spin_lock_bh(&adpt->flow_ctrl_lock);
+	list_for_each_entry_safe(node, tmp, &adpt->free_queue_head, link) {
+		list_del(&node->link);
+		kfree(node);
+		adpt->freeq_cnt--;
+	}
+	spin_unlock_bh(&adpt->flow_ctrl_lock);
+	if (adpt->freeq_cnt != 0) {
+		IPC_ERROR("%s - Memory Leak Detected \n",__func__);
+		BUG();
+	}
+}
+#endif
+
+/* alx_free_all_rx_descriptor - Free all Rx Descriptor */
+static void alx_free_all_rx_descriptor(struct alx_adapter *adpt)
+{
+	int i;
+	for (i = 0; i < adpt->num_rxques; i++)
+		alx_free_rx_descriptor(adpt->rx_queue[i]);
+}
+
+
+/*
+ * alx_alloc_all_rtx_descriptor - allocate Tx / RX descriptor queues
+ */
+static int alx_alloc_all_rtx_descriptor(struct alx_adapter *adpt)
+{
+	struct device *dev = &adpt->pdev->dev;
+	struct alx_ring_header *ring_header = &adpt->ring_header;
+	int num_tques = adpt->num_txques;
+	int num_rques = adpt->num_hw_rxques;
+	unsigned int num_tx_descs = adpt->num_txdescs;
+	unsigned int num_rx_descs = adpt->num_rxdescs;
+	int retval;
+
+	/*
+	 * real ring DMA buffer
+	 * each ring/block may need up to 8 bytes for alignment, hence the
+	 * additional bytes tacked onto the end.
+	 */
+	ring_header->size =
+		num_tques * num_tx_descs * sizeof(union alx_hw_tpdesc) +
+		num_rques * num_rx_descs * sizeof(union alx_hw_rfdesc) +
+		num_rques * num_rx_descs * sizeof(union alx_hw_rrdesc) +
+		sizeof(struct coals_msg_block) +
+		sizeof(struct alx_hw_stats) +
+		num_tques * 8 + num_rques * 2 * 8 + 8 * 2;
+	alx_netif_dbg(adpt, ifup, adpt->netdev,
+		   "num_tques = %d, num_tx_descs = %d\n",
+		   num_tques, num_tx_descs);
+	alx_netif_dbg(adpt, ifup, adpt->netdev,
+		   "num_rques = %d, num_rx_descs = %d\n",
+		   num_rques, num_rx_descs);
+
+	ring_header->used = 0;
+	ring_header->desc = dma_alloc_coherent(dev, ring_header->size,
+				&ring_header->dma, GFP_KERNEL);
+
+	if (!ring_header->desc) {
+		alx_err(adpt, "dma_alloc_coherent failed\n");
+		retval = -ENOMEM;
+		goto err_alloc_dma;
+	}
+	memset(ring_header->desc, 0, ring_header->size);
+	ring_header->used = ALIGN(ring_header->dma, 8) - ring_header->dma;
+
+	alx_netif_dbg(adpt, ifup, adpt->netdev,
+		   "ring header: size = %d, used= %d\n",
+		   ring_header->size, ring_header->used);
+
+	/* allocate receive descriptors */
+	retval = alx_alloc_all_tx_descriptor(adpt);
+	if (retval)
+		goto err_alloc_tx;
+
+	/* allocate receive descriptors */
+	retval = alx_alloc_all_rx_descriptor(adpt);
+	if (retval)
+		goto err_alloc_rx;
+
+	/* Init CMB dma address */
+	adpt->cmb.dma = ring_header->dma + ring_header->used;
+	adpt->cmb.cmb = (u8 *) ring_header->desc + ring_header->used;
+	ring_header->used += ALIGN(sizeof(struct coals_msg_block), 8);
+
+	adpt->smb.dma = ring_header->dma + ring_header->used;
+	adpt->smb.smb = (u8 *)ring_header->desc + ring_header->used;
+	ring_header->used += ALIGN(sizeof(struct alx_hw_stats), 8);
+
+	return 0;
+
+err_alloc_rx:
+	alx_free_all_rx_descriptor(adpt);
+err_alloc_tx:
+	alx_free_all_tx_descriptor(adpt);
+err_alloc_dma:
+	return retval;
+}
+
+
+/*
+ * alx_alloc_all_rtx_descriptor - allocate Tx / RX descriptor queues
+ */
+static void alx_free_all_rtx_descriptor(struct alx_adapter *adpt)
+{
+	struct pci_dev *pdev = adpt->pdev;
+	struct alx_ring_header *ring_header = &adpt->ring_header;
+
+	alx_free_all_tx_descriptor(adpt);
+	alx_free_all_rx_descriptor(adpt);
+
+	adpt->cmb.dma = 0;
+	adpt->cmb.cmb = NULL;
+	adpt->smb.dma = 0;
+	adpt->smb.smb = NULL;
+
+	if (ring_header->desc) {
+	pci_free_consistent(pdev, ring_header->size, ring_header->desc,
+					ring_header->dma);
+	ring_header->desc = NULL;
+	}
+
+	ring_header->size = ring_header->used = 0;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
+static netdev_features_t alx_fix_features(struct net_device *netdev,
+					  netdev_features_t features)
+{
+	struct alx_adapter *adpt = netdev_priv(netdev);
+	/*
+	 * Since there is no support for separate rx/tx vlan accel
+	 * enable/disable make sure tx flag is always in same state as rx.
+	 */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
+	if (features & NETIF_F_HW_VLAN_CTAG_RX)
+		features |= NETIF_F_HW_VLAN_CTAG_TX;
+	else
+		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+#else
+        if (features & NETIF_F_HW_VLAN_RX)
+                features |= NETIF_F_HW_VLAN_TX;
+        else
+                features &= ~NETIF_F_HW_VLAN_TX;
+#endif /*(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))*/
+
+	if (netdev->mtu > ALX_MAX_TSO_PKT_SIZE ||
+	    adpt->hw.mac_type == alx_mac_l1c ||
+	    adpt->hw.mac_type == alx_mac_l2c)
+		features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+
+	return features;
+}
+
+
+static int alx_set_features(struct net_device *netdev,
+			    netdev_features_t features)
+{
+	netdev_features_t changed = netdev->features ^ features;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
+        if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+#else
+	if (changed & NETIF_F_HW_VLAN_RX)
+#endif /*(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))*/
+		alx_vlan_mode(netdev, features);
+
+	return 0;
+}
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) */
+
+/*
+ * alx_change_mtu - Change the Maximum Transfer Unit
+ */
+static int alx_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct alx_adapter *adpt = netdev_priv(netdev);
+	int old_mtu   = netdev->mtu;
+	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+	if ((max_frame < ALX_MIN_ETH_FRAME_SIZE) ||
+	    (max_frame > ALX_MAX_ETH_FRAME_SIZE)) {
+		alx_err(adpt, "invalid MTU setting\n");
+		return -EINVAL;
+	}
+	/* set MTU */
+	if (old_mtu != new_mtu && netif_running(netdev)) {
+		alx_netif_dbg(adpt, hw, adpt->netdev,
+			   "changing MTU from %d to %d\n",
+			   netdev->mtu, new_mtu);
+		netdev->mtu = new_mtu;
+		adpt->hw.mtu = new_mtu;
+		adpt->rxbuf_size = new_mtu > ALX_DEF_RX_BUF_SIZE ?
+				   ALIGN(max_frame, 8) : ALX_DEF_RX_BUF_SIZE;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39))
+		if (new_mtu > (7*1024)) {
+			netdev->features &= ~NETIF_F_TSO;
+			netdev->features &= ~NETIF_F_TSO6;
+		} else {
+			netdev->features |= NETIF_F_TSO;
+			netdev->features |= NETIF_F_TSO6;
+		}
+#else
+		netdev_update_features(netdev);
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)) */
+		alx_reinit_locked(adpt);
+	}
+
+	return 0;
+}
+
+
+int alx_open_internal(struct alx_adapter *adpt, u32 ctrl)
+{
+	struct alx_hw *hw = &adpt->hw;
+	int retval = 0;
+	int i;
+
+	alx_init_ring_ptrs(adpt);
+
+	alx_set_multicase_list(adpt->netdev);
+	alx_restore_vlan(adpt);
+
+	if (hw->cbs.config_mac)
+		retval = hw->cbs.config_mac(hw, adpt->rxbuf_size,
+				adpt->num_hw_rxques, adpt->num_rxdescs,
+				adpt->num_txques, adpt->num_txdescs);
+
+	if (hw->cbs.config_tx)
+		retval = hw->cbs.config_tx(hw);
+
+	if (hw->cbs.config_rx)
+		retval = hw->cbs.config_rx(hw);
+
+	alx_config_rss(adpt);
+
+	for (i = 0; i < adpt->num_hw_rxques; i++)
+		alx_refresh_rx_buffer(adpt->rx_queue[i]);
+
+	/* configure HW regsiters of MSIX */
+	if (hw->cbs.config_msix)
+		retval = hw->cbs.config_msix(hw, adpt->num_msix_intrs,
+					CHK_ADPT_FLAG(0, MSIX_EN),
+					CHK_ADPT_FLAG(0, MSI_EN));
+
+	if (ctrl & ALX_OPEN_CTRL_IRQ_EN) {
+		retval = alx_request_irq(adpt);
+		if (retval)
+			goto err_request_irq;
+	}
+
+	/* enable NAPI, INTR and TX */
+	alx_napi_enable_all(adpt);
+
+	alx_enable_intr(adpt);
+
+	netif_tx_start_all_queues(adpt->netdev);
+
+	CLI_ADPT_FLAG(1, STATE_DOWN);
+
+	/* check link status */
+	SET_ADPT_FLAG(0, TASK_LSC_REQ);
+	adpt->link_jiffies = jiffies + ALX_TRY_LINK_TIMEOUT;
+#ifdef ALX_HIB_TIMER_CONFIG
+	mod_timer(&adpt->alx_timer, jiffies);
+#endif
+	return retval;
+
+err_request_irq:
+	alx_clean_all_rx_queues(adpt);
+	return retval;
+}
+
+
+void alx_stop_internal(struct alx_adapter *adpt, u32 ctrl)
+{
+	struct net_device *netdev = adpt->netdev;
+	struct alx_hw *hw = &adpt->hw;
+
+	SET_ADPT_FLAG(1, STATE_DOWN);
+
+	netif_tx_stop_all_queues(netdev);
+	/* call carrier off first to avoid false dev_watchdog timeouts */
+	netif_carrier_off(netdev);
+	netif_tx_disable(netdev);
+
+	alx_disable_intr(adpt);
+
+	alx_napi_disable_all(adpt);
+
+	if (ctrl & ALX_OPEN_CTRL_IRQ_EN)
+		alx_free_irq(adpt);
+
+	CLI_ADPT_FLAG(0, TASK_LSC_REQ);
+	CLI_ADPT_FLAG(0, TASK_REINIT_REQ);
+#ifdef ALX_HIB_TIMER_CONFIG
+	del_timer_sync(&adpt->alx_timer);
+#endif
+	if (ctrl & ALX_OPEN_CTRL_RESET_PHY)
+		hw->cbs.reset_phy(hw);
+
+	if (ctrl & ALX_OPEN_CTRL_RESET_MAC)
+		hw->cbs.reset_mac(hw);
+
+	adpt->hw.link_speed = ALX_LINK_SPEED_UNKNOWN;
+
+	alx_clean_all_tx_queues(adpt);
+	alx_clean_all_rx_queues(adpt);
+}
+
+
+/*
+ * alx_open - Called when a network interface is made active
+ */
+static int alx_open(struct net_device *netdev)
+{
+	struct alx_adapter *adpt = netdev_priv(netdev);
+	struct alx_hw *hw = &adpt->hw;
+	int retval;
+
+	/* disallow open during test */
+	if (CHK_ADPT_FLAG(1, STATE_TESTING) ||
+	    CHK_ADPT_FLAG(1, STATE_DIAG_RUNNING))
+		return -EBUSY;
+
+	netif_carrier_off(netdev);
+
+	/* allocate rx/tx dma buffer & descriptors */
+	retval = alx_alloc_all_rtx_descriptor(adpt);
+	if (retval) {
+		alx_err(adpt, "error in alx_alloc_all_rtx_descriptor\n");
+		goto err_alloc_rtx;
+	}
+
+#ifdef  MDM_PLATFORM
+	if (ipa_enable) {
+		/* Allocate Nodes and List for storing flow control packets*/
+		retval = alx_alloc_flow_ctrl_desc(adpt);
+		if (retval) {
+			alx_err(adpt, "Error in allocating Flow Control Buffers \n");
+			goto err_alloc_flow_ctrl;
+		}
+		_IPC_INFO("%s -- %d Flow Control Buffer Allocated \n",
+						__func__, adpt->freeq_cnt);
+	}
+#endif
+
+	retval = alx_open_internal(adpt, ALX_OPEN_CTRL_IRQ_EN);
+	if (retval)
+		goto err_open_internal;
+
+	return retval;
+
+err_open_internal:
+	alx_stop_internal(adpt, ALX_OPEN_CTRL_IRQ_EN);
+#ifdef MDM_PLATFORM
+err_alloc_flow_ctrl:
+	if (ipa_enable)
+		alx_free_flow_ctrl_desc(adpt);
+#endif
+err_alloc_rtx:
+	alx_free_all_rtx_descriptor(adpt);
+	hw->cbs.reset_mac(hw);
+	return retval;
+}
+
+
+/*
+ * alx_stop - Disables a network interface
+ */
+static int alx_stop(struct net_device *netdev)
+{
+	struct alx_adapter *adpt = netdev_priv(netdev);
+#ifdef  MDM_PLATFORM
+	struct alx_ipa_rx_desc_node *node = NULL;
+#endif
+
+	if (CHK_ADPT_FLAG(1, STATE_RESETTING))
+		netif_warn(adpt, ifdown, adpt->netdev,
+			   "flag STATE_RESETTING has already set\n");
+
+	alx_stop_internal(adpt, ALX_OPEN_CTRL_IRQ_EN |
+				ALX_OPEN_CTRL_RESET_MAC);
+	alx_free_all_rtx_descriptor(adpt);
+#ifdef  MDM_PLATFORM
+	if (ipa_enable) {
+		/* Flush any pending packets */
+		_IPC_INFO("ALX - Flush %d Pending Packets \n", adpt->pendq_cnt);
+		spin_lock_bh(&adpt->flow_ctrl_lock);
+		while (adpt->pendq_cnt) {
+			node = list_first_entry(&adpt->pend_queue_head,
+				struct alx_ipa_rx_desc_node, link);
+			list_del(&node->link);
+			list_add_tail(&node->link, &adpt->free_queue_head);
+			adpt->pendq_cnt--;
+			adpt->freeq_cnt++;
+		}
+		spin_unlock_bh(&adpt->flow_ctrl_lock);
+		if ((adpt->freeq_cnt != ALX_IPA_SYS_PIPE_DNE_PKTS) ||
+			(adpt->pendq_cnt != 0)) {
+			IPC_ERROR("%s -- Memory leak detected freeq_cnt %d, pendq_cnt %d",
+				__func__, adpt->freeq_cnt, adpt->pendq_cnt);
+			BUG();
+		}
+		alx_free_flow_ctrl_desc(adpt);
+	}
+#endif
+	return 0;
+}
+
+
+static int alx_shutdown_internal(struct pci_dev *pdev, bool *wakeup)
+{
+	struct alx_adapter *adpt = pci_get_drvdata(pdev);
+	struct net_device *netdev = adpt->netdev;
+	struct alx_hw *hw = &adpt->hw;
+	u32 wufc = adpt->wol;
+	u16 lpa;
+	u32 speed, adv_speed, misc;
+	bool link_up;
+	int i;
+	int retval = 0;
+
+	hw->cbs.config_aspm(hw, false, false);
+
+	netif_device_detach(netdev);
+	if (netif_running(netdev))
+		alx_stop_internal(adpt, 0);
+
+#ifdef CONFIG_PM_SLEEP
+	retval = pci_save_state(pdev);
+	if (retval)
+		return retval;
+#endif
+	hw->cbs.check_phy_link(hw, &speed, &link_up);
+
+	if (link_up) {
+		if (hw->mac_type == alx_mac_l1f ||
+		    hw->mac_type == alx_mac_l2f) {
+			alx_mem_r32(hw, ALX_MISC, &misc);
+			misc |= ALX_MISC_INTNLOSC_OPEN;
+			alx_mem_w32(hw, ALX_MISC, misc);
+		}
+
+		retval = hw->cbs.read_phy_reg(hw, MII_LPA, &lpa);
+		if (retval)
+			return retval;
+
+		adv_speed = ALX_LINK_SPEED_10_HALF;
+		if (lpa & LPA_10FULL)
+			adv_speed = ALX_LINK_SPEED_10_FULL;
+		else if (lpa & LPA_10HALF)
+			adv_speed = ALX_LINK_SPEED_10_HALF;
+		else if (lpa & LPA_100FULL)
+			adv_speed = ALX_LINK_SPEED_100_FULL;
+		else if (lpa & LPA_100HALF)
+			adv_speed = ALX_LINK_SPEED_100_HALF;
+
+		retval = hw->cbs.setup_phy_link(hw, adv_speed, true,
+				!hw->disable_fc_autoneg);
+		if (retval)
+			return retval;
+
+		for (i = 0; i < ALX_MAX_SETUP_LNK_CYCLE; i++) {
+			mdelay(100);
+			retval = hw->cbs.check_phy_link(hw, &speed, &link_up);
+			if (retval)
+				continue;
+			if (link_up)
+				break;
+		}
+	} else {
+		speed = ALX_LINK_SPEED_10_HALF;
+		link_up = false;
+		/* When link is  disabled and PHY/MAC not programmed at all
+		* Due to a suspected bug in HW, we dont get PHY UP interrupt
+		* As a solution, program the MAC/PHY with 10Mbps HD link speed
+		* even the there is no LINK Detected */
+		retval = hw->cbs.setup_phy_link(hw, speed, true,
+				!hw->disable_fc_autoneg);
+		if (retval)
+			return retval;
+	}
+	hw->link_speed = speed;
+	hw->link_up = link_up;
+
+	retval = hw->cbs.config_wol(hw, wufc);
+	if (retval)
+		return retval;
+
+	/* clear phy interrupt */
+	retval = hw->cbs.ack_phy_intr(hw);
+	if (retval)
+		return retval;
+
+	if (wufc) {
+		/* pcie patch */
+		device_set_wakeup_enable(&pdev->dev, 1);
+	}
+
+	retval = hw->cbs.config_pow_save(hw, adpt->hw.link_speed,
+			(wufc ? true : false), false,
+			(wufc & ALX_WOL_MAGIC ? true : false), true);
+	if (retval)
+		return retval;
+
+	*wakeup = wufc ? true : false;
+	pci_clear_master(pdev);
+	return 0;
+}
+
+
+static void alx_shutdown(struct pci_dev *pdev)
+{
+	bool wakeup;
+	alx_shutdown_internal(pdev, &wakeup);
+
+	pci_wake_from_d3(pdev, wakeup);
+	pci_set_power_state(pdev, PCI_D3hot);
+}
+
+
+#ifdef CONFIG_PM_SLEEP
+
+static int alx_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct alx_adapter *adpt = pci_get_drvdata(pdev);
+	int retval;
+	bool wakeup;
+
+	if (!pdev || !adpt) {
+		IPC_ERROR("%s NULL Pointers pdev %p adpt %p",__func__,pdev,adpt);
+		return -1;
+	}
+
+	retval = alx_shutdown_internal(pdev, &wakeup);
+	if (retval)
+		return retval;
+
+#ifndef APQ_PLATFORM
+	if (ipa_enable) {
+		if(alx_ipa_rm_try_release(adpt))
+			IPC_ERROR("%s -- ODU PROD Release unsuccessful \n",__func__);
+	}
+#endif
+
+	if (wakeup) {
+		pci_prepare_to_sleep(pdev);
+	} else {
+		pci_wake_from_d3(pdev, false);
+		pci_set_power_state(pdev, PCI_D3hot);
+	}
+
+	return 0;
+}
+
+
+static int alx_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct alx_adapter *adpt = pci_get_drvdata(pdev);
+	struct net_device *netdev = adpt->netdev;
+	struct alx_hw *hw = &adpt->hw;
+	u32 retval;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	/*
+	 * pci_restore_state clears dev->state_saved so call
+	 * pci_save_state to restore it.
+	 */
+	pci_save_state(pdev);
+	pci_enable_pcie_error_reporting(pdev);
+	pci_set_master(pdev);
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_enable_wake(pdev, PCI_D3cold, 0);
+
+	retval = hw->cbs.reset_pcie(hw, true, true);
+	retval = hw->cbs.reset_phy(hw);
+	retval = hw->cbs.reset_mac(hw);
+	retval = hw->cbs.setup_phy_link(hw, hw->autoneg_advertised, true,
+			!hw->disable_fc_autoneg);
+
+	retval = hw->cbs.config_wol(hw, 0);
+
+	if (netif_running(netdev)) {
+		retval = alx_open_internal(adpt, 0);
+		if (retval)
+			return retval;
+	}
+
+
+	netif_device_attach(netdev);
+	/* Hold the wake lock for 5sec to ensure any traffic*/
+	pm_wakeup_event(dev, 5000);
+	return 0;
+}
+#endif
+
+
+/*
+ * alx_update_hw_stats - Update the board statistics counters.
+ */
+void alx_update_hw_stats(struct alx_adapter *adpt)
+{
+	struct net_device_stats *net_stats;
+	struct alx_hw *hw = &adpt->hw;
+	struct alx_hw_stats *hwstats = &adpt->hw_stats;
+	unsigned long *hwstat_item = NULL;
+	u32 hwstat_reg;
+	u32 hwstat_data;
+
+	if (CHK_ADPT_FLAG(1, STATE_DOWN) || CHK_ADPT_FLAG(1, STATE_RESETTING))
+		return;
+
+	/* update RX status */
+	hwstat_reg  = hw->rxstat_reg;
+	hwstat_item = &hwstats->rx_ok;
+	while (hwstat_reg < hw->rxstat_reg + hw->rxstat_sz) {
+		alx_mem_r32(hw, hwstat_reg, &hwstat_data);
+		*hwstat_item += hwstat_data;
+		hwstat_reg += 4;
+		hwstat_item++;
+	}
+
+	/* update TX status */
+	hwstat_reg  = hw->txstat_reg;
+	hwstat_item = &hwstats->tx_ok;
+	while (hwstat_reg < hw->txstat_reg + hw->txstat_sz) {
+		alx_mem_r32(hw, hwstat_reg, &hwstat_data);
+		*hwstat_item += hwstat_data;
+		hwstat_reg += 4;
+		hwstat_item++;
+	}
+
+	net_stats = &adpt->netdev->stats;
+	net_stats->rx_packets = hwstats->rx_ok;
+	net_stats->tx_packets = hwstats->tx_ok;
+	net_stats->rx_bytes   = hwstats->rx_byte_cnt;
+	net_stats->tx_bytes   = hwstats->tx_byte_cnt;
+	net_stats->multicast  = hwstats->rx_mcast;
+	net_stats->collisions = hwstats->tx_single_col +
+		hwstats->tx_multi_col * 2 +
+		hwstats->tx_late_col + hwstats->tx_abort_col;
+
+	net_stats->rx_errors  = hwstats->rx_frag + hwstats->rx_fcs_err +
+		hwstats->rx_len_err + hwstats->rx_ov_sz +
+		hwstats->rx_ov_rrd + hwstats->rx_align_err;
+
+	net_stats->rx_fifo_errors   = hwstats->rx_ov_rxf;
+	net_stats->rx_length_errors = hwstats->rx_len_err;
+	net_stats->rx_crc_errors    = hwstats->rx_fcs_err;
+	net_stats->rx_frame_errors  = hwstats->rx_align_err;
+	net_stats->rx_over_errors   = hwstats->rx_ov_rrd + hwstats->rx_ov_rxf;
+
+	net_stats->rx_missed_errors = hwstats->rx_ov_rrd + hwstats->rx_ov_rxf;
+
+	net_stats->tx_errors = hwstats->tx_late_col + hwstats->tx_abort_col +
+		hwstats->tx_underrun + hwstats->tx_trunc;
+	net_stats->tx_fifo_errors    = hwstats->tx_underrun;
+	net_stats->tx_aborted_errors = hwstats->tx_abort_col;
+	net_stats->tx_window_errors  = hwstats->tx_late_col;
+}
+
+
+/*
+ * alx_get_stats - Get System Network Statistics
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ */
+static struct net_device_stats *alx_get_stats(struct net_device *netdev)
+{
+	struct alx_adapter *adpt = netdev_priv(netdev);
+
+	alx_update_hw_stats(adpt);
+	return &netdev->stats;
+}
+
+/* Resize the descriptor rings */
+int alx_resize_rings(struct net_device *netdev)
+{
+	/* close and then re-open interface */
+	alx_stop(netdev);
+	return alx_open(netdev);
+}
+
+
+#ifdef ALX_LINK_DOWN_CONFIG
+static int alx_link_mac_restore(struct alx_adapter *adpt)
+{
+	struct alx_hw *hw = &adpt->hw;
+	int retval = 0;
+	int i;
+
+	_IPC_INFO("alx: into NEW alx_link_mac_restore\n");
+	alx_init_ring_ptrs(adpt);
+
+	alx_set_multicase_list(adpt->netdev);
+	alx_restore_vlan(adpt);
+
+	if (hw->cbs.config_mac)
+		retval = hw->cbs.config_mac(hw, adpt->rxbuf_size,
+				adpt->num_hw_rxques, adpt->num_rxdescs,
+				adpt->num_txques, adpt->num_txdescs);
+
+	if (hw->cbs.config_tx)
+		retval = hw->cbs.config_tx(hw);
+
+	if (hw->cbs.config_rx)
+		retval = hw->cbs.config_rx(hw);
+
+	alx_config_rss(adpt);
+
+	for (i = 0; i < adpt->num_hw_rxques; i++)
+		alx_refresh_rx_buffer(adpt->rx_queue[i]);
+
+	/* configure HW regsiters of MSIX */
+	if (hw->cbs.config_msix)
+		retval = hw->cbs.config_msix(hw, adpt->num_msix_intrs,
+					CHK_ADPT_FLAG(0, MSIX_EN),
+					CHK_ADPT_FLAG(0, MSI_EN));
+
+	return retval;
+}
+#endif
+
+#ifndef APQ_PLATFORM
+static int alx_ipa_set_perf_level(void)
+{
+	struct ipa_rm_perf_profile profile;
+	struct alx_ipa_ctx *alx_ipa = galx_adapter_ptr->palx_ipa;
+	int ret = 0;
+
+	if (!alx_ipa) {
+		IPC_ERROR("%s alx_ipa cts NULL ctx:%p\n",__func__,alx_ipa);
+		return -1;
+	}
+
+	memset(&profile, 0, sizeof(profile));
+	profile.max_supported_bandwidth_mbps = MAX_AR8151_BW;
+
+	ret = ipa_rm_set_perf_profile (IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+					&profile);
+	if (ret) {
+		IPC_ERROR("Err to set BW: IPA_RM_RESOURCE_ODU_ADAPT_PROD err:%d\n",
+			ret);
+		return ret;
+	}
+
+	ret = ipa_rm_set_perf_profile (IPA_RM_RESOURCE_ODU_ADAPT_CONS,
+					&profile);
+	if (ret) {
+		IPC_ERROR("Err to set BW: IPA_RM_RESOURCE_ODU_ADAPT_CONS err:%d\n",
+			ret);
+		return ret;
+	}
+
+	alx_ipa->alx_ipa_perf_requested = true;
+	return ret;
+}
+#endif
+
+static void alx_link_task_routine(struct alx_adapter *adpt)
+{
+	struct net_device *netdev = adpt->netdev;
+	struct alx_hw *hw = &adpt->hw;
+	char *link_desc;
+#ifndef APQ_PLATFORM
+	int ret = 0;
+	struct alx_ipa_ctx *alx_ipa = adpt->palx_ipa;
+#endif
+
+	if (!CHK_ADPT_FLAG(0, TASK_LSC_REQ))
+		return;
+	CLI_ADPT_FLAG(0, TASK_LSC_REQ);
+
+	if (CHK_ADPT_FLAG(1, STATE_DOWN))
+		return;
+
+	if (hw->cbs.check_phy_link) {
+		hw->cbs.check_phy_link(hw,
+			&hw->link_speed, &hw->link_up);
+	} else {
+		/* always assume link is up, if no check link function */
+		hw->link_speed = ALX_LINK_SPEED_1GB_FULL;
+		hw->link_up = true;
+	}
+	alx_netif_dbg(adpt, timer, adpt->netdev,
+		   "link_speed = %d, link_up = %d\n",
+		   hw->link_speed, hw->link_up);
+
+	if (!hw->link_up && time_after(adpt->link_jiffies, jiffies))
+		SET_ADPT_FLAG(0, TASK_LSC_REQ);
+
+	if (hw->link_up) {
+		if (netif_carrier_ok(netdev))
+			return;
+
+		link_desc = (hw->link_speed == ALX_LINK_SPEED_1GB_FULL) ?
+			"1 Gbps Duplex Full" :
+			(hw->link_speed == ALX_LINK_SPEED_100_FULL ?
+			 "100 Mbps Duplex Full" :
+			 (hw->link_speed == ALX_LINK_SPEED_100_HALF ?
+			  "100 Mbps Duplex Half" :
+			  (hw->link_speed == ALX_LINK_SPEED_10_FULL ?
+			   "10 Mbps Duplex Full" :
+			   (hw->link_speed == ALX_LINK_SPEED_10_HALF ?
+			    "10 Mbps Duplex HALF" :
+			    "unknown speed"))));
+		alx_netif_dbg(adpt, timer, adpt->netdev,
+			   "NIC Link is Up %s\n", link_desc);
+
+		hw->cbs.config_aspm(hw, true, true);
+		hw->cbs.start_mac(hw);
+		netif_carrier_on(netdev);
+		netif_tx_wake_all_queues(netdev);
+
+#ifdef MDM_PLATFORM
+		if (ipa_enable) {
+			/* Enable ODU Bridge */
+			if (alx_ipa->ipa_ready == true && CHK_ADPT_FLAG(2, ODU_INIT)) {
+				ret = odu_bridge_connect();
+				if (ret)
+					IPC_ERROR("Could not connect to ODU bridge %d \n",
+						ret);
+				else
+					SET_ADPT_FLAG(2, ODU_CONNECT);
+				/* Request for IPA Resources */
+				spin_lock_bh(&alx_ipa->ipa_rm_state_lock);
+				if (alx_ipa->ipa_prod_rm_state == ALX_IPA_RM_RELEASED) {
+					spin_unlock_bh(&alx_ipa->ipa_rm_state_lock);
+					alx_ipa_rm_request(adpt);
+				} else {
+					spin_unlock_bh(&alx_ipa->ipa_rm_state_lock);
+				}
+			}
+		}
+#endif
+	} else {
+		/* only continue if link was up previously */
+		if (!netif_carrier_ok(netdev))
+			return;
+
+		hw->link_speed = 0;
+		alx_netif_dbg(adpt, timer, adpt->netdev, "NIC Link is Down\n");
+		netif_carrier_off(netdev);
+		netif_tx_stop_all_queues(netdev);
+
+#ifdef ALX_LINK_DOWN_CONFIG
+		hw->cbs.reset_mac(hw);
+#else
+		hw->cbs.stop_mac(hw);
+#endif
+		hw->cbs.config_aspm(hw, false, true);
+		hw->cbs.setup_phy_link(hw, hw->autoneg_advertised, true,
+				!hw->disable_fc_autoneg);
+#ifdef ALX_LINK_DOWN_CONFIG
+		alx_link_mac_restore(adpt);
+#endif
+#ifdef MDM_PLATFORM
+		if (ipa_enable) {
+			/* Disable ODU Bridge */
+			ret = odu_bridge_disconnect();
+			if (ret) {
+				IPC_ERROR("Could not connect to ODU bridge %d \n", ret);
+			} else {
+				CLI_ADPT_FLAG(2, ODU_CONNECT);
+				adpt->palx_ipa->alx_ipa_perf_requested = false;
+				if(alx_ipa_rm_try_release(adpt))
+					IPC_ERROR("%s -- ODU PROD Release unsuccessful \n", __func__);
+			}
+		}
+#endif
+	}
+}
+
+
+static void alx_reinit_task_routine(struct alx_adapter *adpt)
+{
+	if (!CHK_ADPT_FLAG(0, TASK_REINIT_REQ))
+		return;
+	CLI_ADPT_FLAG(0, TASK_REINIT_REQ);
+
+	if (CHK_ADPT_FLAG(1, STATE_DOWN) || CHK_ADPT_FLAG(1, STATE_RESETTING))
+		return;
+
+	alx_reinit_locked(adpt);
+}
+
+
+/*
+ * alx_timer_routine - Timer Call-back
+ */
+static void alx_timer_routine(unsigned long data)
+{
+	struct alx_adapter *adpt = (struct alx_adapter *)data;
+	unsigned long delay;
+
+#ifdef ALX_HIB_TASK_CONFIG
+	struct alx_hw *hw = &adpt->hw;
+	if (hw->bHibBug)
+		hw->cbs.apply_phy_hib_patch(hw);
+#endif
+	/* poll faster when waiting for link */
+	if (CHK_ADPT_FLAG(0, TASK_LSC_REQ))
+		delay = HZ / 10;
+	else
+		delay = HZ * 2;
+
+	/* Reset the timer */
+	mod_timer(&adpt->alx_timer, delay + jiffies);
+
+	alx_task_schedule(adpt);
+}
+
+
+/*
+ * alx_task_routine - manages and runs subtasks
+ */
+static void alx_task_routine(struct work_struct *work)
+{
+	struct alx_adapter *adpt = container_of(work,
+				struct alx_adapter, alx_task);
+	/* test state of adapter */
+	if (!CHK_ADPT_FLAG(1, STATE_WATCH_DOG))
+		netif_warn(adpt, timer, adpt->netdev,
+			   "flag STATE_WATCH_DOG doesn't set\n");
+
+	/* reinit task */
+	alx_reinit_task_routine(adpt);
+
+	/* link task */
+	alx_link_task_routine(adpt);
+
+	/* flush memory to make sure state is correct before next watchog */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
+	smp_mb();
+#else
+	smp_mb__before_clear_bit();
+#endif
+
+	CLI_ADPT_FLAG(1, STATE_WATCH_DOG);
+}
+
+#ifndef APQ_PLATFORM
+/*
+ * alx_ipa_send_routine - Sends packets to IPA/ODU bridge Driver
+ * Scheduled on RX of IPA_WRITE_DONE Event
+ */
+static void alx_ipa_send_routine(struct work_struct *work)
+{
+	struct alx_adapter *adpt = container_of(work,
+				struct alx_adapter, ipa_send_task);
+	struct alx_ipa_rx_desc_node *node = NULL;
+	struct ipa_tx_meta ipa_meta = {0x0};
+	int ret =0;
+	struct alx_ipa_ctx *alx_ipa = adpt->palx_ipa;
+
+	/* Set the Perf level when the Request is granted */
+	if (!alx_ipa->alx_ipa_perf_requested) {
+		spin_lock_bh(&alx_ipa->ipa_rm_state_lock);
+		if (alx_ipa->ipa_prod_rm_state == ALX_IPA_RM_GRANTED) {
+			spin_unlock_bh(&alx_ipa->ipa_rm_state_lock);
+			alx_ipa_set_perf_level();
+			alx_ipa->alx_ipa_perf_requested = true;
+		} else {
+			spin_unlock_bh(&alx_ipa->ipa_rm_state_lock);
+		}
+	}
+
+	/* Send all pending packets to IPA.
+          Compute the number of desc left for HW and send packets accordingly*/
+	spin_lock_bh(&adpt->flow_ctrl_lock);
+	CLI_ADPT_FLAG(2, WQ_SCHED);
+	if (unlikely(!adpt->pendq_cnt)) {
+		IPC_ERROR("%s - Error no pending packets in Queue %d\n",
+						__func__, adpt->pendq_cnt);
+		spin_unlock_bh(&adpt->flow_ctrl_lock);
+		return;
+	}
+	if (adpt->ipa_free_desc_cnt < adpt->ipa_low_watermark) {
+		adpt->palx_ipa->stats.ipa_low_watermark_cnt++;
+		spin_unlock_bh(&adpt->flow_ctrl_lock);
+		return;
+	}
+
+        spin_lock_bh(&alx_ipa->ipa_rm_state_lock);
+	if (alx_ipa->ipa_prod_rm_state != ALX_IPA_RM_GRANTED) {
+          spin_unlock_bh(&alx_ipa->ipa_rm_state_lock);
+          if (adpt->pendq_cnt > 0 && alx_ipa_rm_request(adpt) != 0 ) {
+	    if(printk_ratelimit())
+	      _IPC_INFO("%s IPA RM resource not granted,return \n", __func__);
+            spin_unlock_bh(&adpt->flow_ctrl_lock);
+            return;
+          }
+        }
+        else
+          spin_unlock_bh(&alx_ipa->ipa_rm_state_lock);
+
+	while (adpt->ipa_free_desc_cnt && adpt->pendq_cnt) {
+		node = list_first_entry(&adpt->pend_queue_head,
+				struct alx_ipa_rx_desc_node, link);
+		list_del(&node->link);
+		list_add_tail(&node->link, &adpt->free_queue_head);
+		adpt->freeq_cnt++;
+		adpt->pendq_cnt--;
+		ipa_meta.dma_address_valid = false;
+		/* Send Packet to ODU bridge Driver */
+		ret = odu_bridge_tx_dp(node->skb_ptr, &ipa_meta);
+		if (ret) {
+			IPC_ERROR("odu_bridge_tx_dp() Failed in %s!!"
+				" ret %d--Free SKB\n", __func__, ret);
+			kfree(node->skb_ptr);
+			adpt->palx_ipa->stats.rx_ipa_send_fail++;
+		} else {
+			adpt->palx_ipa->stats.rx_ipa_send++;
+			adpt->ipa_free_desc_cnt--;
+			/* Increment the ipa_rx_completion Counter */
+			spin_lock(&alx_ipa->rm_ipa_lock);
+			if (alx_ipa->acquire_wake_src == false) {
+				__pm_stay_awake(&alx_ipa->rm_ipa_wait);
+				alx_ipa->acquire_wake_src = true;
+			}
+			alx_ipa->ipa_rx_completion++;
+			spin_unlock(&alx_ipa->rm_ipa_lock);
+		}
+	}
+	/* Release PROD if we dont have any more data to send*/
+	spin_unlock_bh(&adpt->flow_ctrl_lock);
+}
+#endif
+
+/* Calculate the transmit packet descript needed*/
+static bool alx_check_num_tpdescs(struct alx_tx_queue *txque,
+				  const struct sk_buff *skb)
+{
+	u16 num_required = 1;
+	u16 num_available = 0;
+	u16 produce_idx = txque->tpq.produce_idx;
+	u16 consume_idx = txque->tpq.consume_idx;
+	int i = 0;
+
+	u16 proto_hdr_len = 0;
+	if (skb_is_gso(skb)) {
+		proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+		if (proto_hdr_len < skb_headlen(skb))
+			num_required++;
+		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+			num_required++;
+	}
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+		num_required++;
+	num_available = (u16)(consume_idx > produce_idx) ?
+		(consume_idx - produce_idx - 1) :
+		(txque->tpq.count + consume_idx - produce_idx - 1);
+
+	return num_required < num_available;
+}
+
+
+static int alx_tso_csum(struct alx_adapter *adpt,
+			struct alx_tx_queue *txque,
+			struct sk_buff *skb,
+			union alx_sw_tpdesc *stpd)
+{
+	struct pci_dev *pdev = adpt->pdev;
+	u8  hdr_len;
+	int retval;
+
+	if (skb_is_gso(skb)) {
+		if (skb_header_cloned(skb)) {
+			retval = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+			if (unlikely(retval))
+				return retval;
+		}
+
+		if (skb->protocol == htons(ETH_P_IP)) {
+			u32 pkt_len =
+				((unsigned char *)ip_hdr(skb) - skb->data) +
+				ntohs(ip_hdr(skb)->tot_len);
+			if (skb->len > pkt_len)
+				pskb_trim(skb, pkt_len);
+		}
+
+		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+		if (unlikely(skb->len == hdr_len)) {
+			/* we only need to do csum */
+			dev_warn(&pdev->dev,
+				 "tso doesn't need, if packet with 0 data\n");
+			goto do_csum;
+		}
+
+		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
+			ip_hdr(skb)->check = 0;
+			tcp_hdr(skb)->check = ~csum_tcpudp_magic(
+						ip_hdr(skb)->saddr,
+						ip_hdr(skb)->daddr,
+						0, IPPROTO_TCP, 0);
+			stpd->genr.ipv4 = 1;
+		}
+
+		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
+			/* ipv6 tso need an extra tpd */
+			union alx_sw_tpdesc extra_tpd;
+
+			memset(stpd, 0, sizeof(union alx_sw_tpdesc));
+			memset(&extra_tpd, 0, sizeof(union alx_sw_tpdesc));
+
+			ipv6_hdr(skb)->payload_len = 0;
+			tcp_hdr(skb)->check = ~csum_ipv6_magic(
+						&ipv6_hdr(skb)->saddr,
+						&ipv6_hdr(skb)->daddr,
+						0, IPPROTO_TCP, 0);
+			extra_tpd.tso.pkt_len = skb->len;
+			extra_tpd.tso.lso = 0x1;
+			extra_tpd.tso.lso_v2 = 0x1;
+			alx_set_tpdesc(txque, &extra_tpd);
+			stpd->tso.lso_v2 = 0x1;
+		}
+
+		stpd->tso.lso = 0x1;
+		stpd->tso.tcphdr_offset = skb_transport_offset(skb);
+		stpd->tso.mss = skb_shinfo(skb)->gso_size;
+		return 0;
+	}
+
+do_csum:
+	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+		u8 css, cso;
+		cso = skb_checksum_start_offset(skb);
+
+		if (unlikely(cso & 0x1)) {
+			dev_err(&pdev->dev, "pay load offset should not be an "
+				"event number\n");
+			return -1;
+		} else {
+			css = cso + skb->csum_offset;
+
+			stpd->csum.payld_offset = cso >> 1;
+			stpd->csum.cxsum_offset = css >> 1;
+			stpd->csum.c_csum = 0x1;
+		}
+	}
+	return 0;
+}
+
+
+static void alx_tx_map(struct alx_adapter *adpt,
+		       struct alx_tx_queue *txque,
+		       struct sk_buff *skb,
+		       union alx_sw_tpdesc *stpd)
+{
+	struct alx_buffer *tpbuf = NULL;
+
+	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+
+	unsigned int len = skb_headlen(skb);
+
+	u16 map_len = 0;
+	u16 mapped_len = 0;
+	u16 hdr_len = 0;
+	u16 f;
+	u32 tso = stpd->tso.lso;
+
+	if (tso) {
+		/* TSO */
+		map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+		tpbuf = GET_TP_BUFFER(txque, txque->tpq.produce_idx);
+		tpbuf->length = map_len;
+		tpbuf->dma = dma_map_single(txque->dev,
+					skb->data, hdr_len, DMA_TO_DEVICE);
+		mapped_len += map_len;
+		stpd->genr.addr = tpbuf->dma;
+		stpd->genr.buffer_len = tpbuf->length;
+
+		alx_set_tpdesc(txque, stpd);
+	}
+
+	if (mapped_len < len) {
+		tpbuf = GET_TP_BUFFER(txque, txque->tpq.produce_idx);
+		tpbuf->length = len - mapped_len;
+		tpbuf->dma =
+			dma_map_single(txque->dev, skb->data + mapped_len,
+					tpbuf->length, DMA_TO_DEVICE);
+		stpd->genr.addr = tpbuf->dma;
+		stpd->genr.buffer_len  = tpbuf->length;
+		alx_set_tpdesc(txque, stpd);
+	}
+
+	for (f = 0; f < nr_frags; f++) {
+		struct skb_frag_struct *frag;
+
+		frag = &skb_shinfo(skb)->frags[f];
+
+		tpbuf = GET_TP_BUFFER(txque, txque->tpq.produce_idx);
+		tpbuf->length = skb_frag_size(frag);
+		tpbuf->dma = skb_frag_dma_map(txque->dev, frag, 0,
+					      tpbuf->length, DMA_TO_DEVICE);
+		stpd->genr.addr = tpbuf->dma;
+		stpd->genr.buffer_len  = tpbuf->length;
+		alx_set_tpdesc(txque, stpd);
+	}
+
+
+	/* The last tpd */
+	alx_set_tpdesc_lastfrag(txque);
+	/*
+	 * The last buffer info contain the skb address,
+	 * so it will be free after unmap
+	 */
+	if (tpbuf)
+		tpbuf->skb = skb;
+}
+
+
+static netdev_tx_t alx_start_xmit_frame(struct alx_adapter *adpt,
+					struct alx_tx_queue *txque,
+					struct sk_buff *skb)
+{
+	struct alx_hw     *hw = &adpt->hw;
+	unsigned long     flags = 0;
+	union alx_sw_tpdesc stpd; /* normal*/
+#ifdef MDM_PLATFORM
+	struct alx_ipa_ctx *alx_ipa;
+	if (ipa_enable)
+		alx_ipa = adpt->palx_ipa;
+#endif
+
+	if (CHK_ADPT_FLAG(1, STATE_DOWN) ||
+	    CHK_ADPT_FLAG(1, STATE_DIAG_RUNNING)) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	if (!spin_trylock_irqsave(&adpt->tx_lock, flags)) {
+		alx_err(adpt, "tx locked!\n");
+		return NETDEV_TX_LOCKED;
+	}
+
+	if (!alx_check_num_tpdescs(txque, skb)) {
+		/* no enough descriptor, just stop queue */
+		netif_stop_queue(adpt->netdev);
+		spin_unlock_irqrestore(&adpt->tx_lock, flags);
+//		alx_err(adpt, "No TX Desc to send packet\n");
+		return NETDEV_TX_BUSY;
+	}
+
+	memset(&stpd, 0, sizeof(union alx_sw_tpdesc));
+	/* do TSO and check sum */
+	if (alx_tso_csum(adpt, txque, skb, &stpd) != 0) {
+		spin_unlock_irqrestore(&adpt->tx_lock, flags);
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
+	if (unlikely(skb_vlan_tag_present(skb))) {
+		u16 vlan = skb_vlan_tag_get(skb);
+#else
+	if (unlikely(vlan_tx_tag_present(skb))) {
+		u16 vlan = vlan_tx_tag_get(skb);
+#endif
+		u16 tag = 0;
+		ALX_VLAN_TO_TAG(vlan, tag);
+		stpd.genr.vlan_tag = tag;
+		stpd.genr.instag = 0x1;
+	}
+	if (skb_network_offset(skb) != ETH_HLEN)
+		stpd.genr.type = 0x1; /* Ethernet frame */
+
+	alx_tx_map(adpt, txque, skb, &stpd);
+
+
+	/* update produce idx */
+	wmb();
+	alx_mem_w16(hw, txque->produce_reg, txque->tpq.produce_idx);
+	alx_netif_dbg(adpt, tx_err, adpt->netdev,
+		   "TX[%d]: tpq.consume_idx = 0x%x, tpq.produce_idx = 0x%x\n",
+		   txque->que_idx, txque->tpq.consume_idx,
+		   txque->tpq.produce_idx);
+	alx_netif_dbg(adpt, tx_err, adpt->netdev,
+		   "TX[%d]: Produce Reg[%x] = 0x%x\n",
+		   txque->que_idx, txque->produce_reg, txque->tpq.produce_idx);
+
+	spin_unlock_irqrestore(&adpt->tx_lock, flags);
+
+#ifdef MDM_PLATFORM
+	if (ipa_enable) {
+		/* Hold on to the wake lock for TX Completion Event */
+		spin_lock_bh(&alx_ipa->rm_ipa_lock);
+		if (alx_ipa->acquire_wake_src == false) {
+			 __pm_stay_awake(&alx_ipa->rm_ipa_wait);
+			alx_ipa->acquire_wake_src = true;
+		}
+		alx_ipa->alx_tx_completion++;
+		spin_unlock_bh(&alx_ipa->rm_ipa_lock);
+	}
+#endif
+
+	return NETDEV_TX_OK;
+}
+
+
+static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
+				  struct net_device *netdev)
+{
+	struct alx_adapter *adpt = netdev_priv(netdev);
+	struct alx_tx_queue *txque;
+
+	txque = adpt->tx_queue[0];
+	return alx_start_xmit_frame(adpt, txque, skb);
+}
+
+
+/*
+ * alx_mii_ioctl
+ */
+static int alx_mii_ioctl(struct net_device *netdev,
+			 struct ifreq *ifr, int cmd)
+{
+	struct alx_adapter *adpt = netdev_priv(netdev);
+	struct alx_hw *hw = &adpt->hw;
+	struct mii_ioctl_data *data = if_mii(ifr);
+	int retval = 0;
+
+	if (!netif_running(netdev))
+		return -EINVAL;
+
+	switch (cmd) {
+	case SIOCGMIIPHY:
+		data->phy_id = 0;
+		break;
+
+	case SIOCGMIIREG:
+		if (data->reg_num & ~(0x1F)) {
+			retval = -EFAULT;
+			goto out;
+		}
+
+		retval = hw->cbs.read_phy_reg(hw, data->reg_num,
+					      &data->val_out);
+		alx_netif_dbg(adpt, hw, adpt->netdev, "read phy %02x %04x\n",
+			  data->reg_num, data->val_out);
+		if (retval) {
+			retval = -EIO;
+			goto out;
+		}
+		break;
+
+	case SIOCSMIIREG:
+		if (data->reg_num & ~(0x1F)) {
+			retval = -EFAULT;
+			goto out;
+		}
+
+		retval = hw->cbs.write_phy_reg(hw, data->reg_num, data->val_in);
+		alx_netif_dbg(adpt, hw, adpt->netdev, "write phy %02x %04x\n",
+			  data->reg_num, data->val_in);
+		if (retval) {
+			retval = -EIO;
+			goto out;
+		}
+		break;
+	default:
+		retval = -EOPNOTSUPP;
+		break;
+	}
+out:
+	return retval;
+
+}
+
+
+static int alx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+	switch (cmd) {
+	case SIOCGMIIPHY:
+	case SIOCGMIIREG:
+	case SIOCSMIIREG:
+		return alx_mii_ioctl(netdev, ifr, cmd);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void alx_poll_controller(struct net_device *netdev)
+{
+	struct alx_adapter *adpt = netdev_priv(netdev);
+	int num_msix_intrs = adpt->num_msix_intrs;
+	int msix_idx;
+
+	/* if interface is down do nothing */
+	if (CHK_ADPT_FLAG(1, STATE_DOWN))
+		return;
+
+	if (CHK_ADPT_FLAG(0, MSIX_EN)) {
+		for (msix_idx = 0; msix_idx < num_msix_intrs; msix_idx++) {
+			struct alx_msix_param *msix = adpt->msix[msix_idx];
+			if (CHK_MSIX_FLAG(RXS) || CHK_MSIX_FLAG(TXS))
+				alx_msix_rtx(0, msix);
+			else if (CHK_MSIX_FLAG(TIMER))
+				alx_msix_timer(0, msix);
+			else if (CHK_MSIX_FLAG(ALERT))
+				alx_msix_alert(0, msix);
+			else if (CHK_MSIX_FLAG(SMB))
+				alx_msix_smb(0, msix);
+			else if (CHK_MSIX_FLAG(PHY))
+				alx_msix_phy(0, msix);
+		}
+	} else {
+		alx_interrupt(adpt->pdev->irq, netdev);
+	}
+}
+#endif
+
+
+static const struct net_device_ops alx_netdev_ops = {
+	.ndo_open               = alx_open,
+	.ndo_stop               = alx_stop,
+	.ndo_start_xmit         = alx_start_xmit,
+	.ndo_get_stats          = alx_get_stats,
+	.ndo_set_rx_mode        = alx_set_multicase_list,
+	.ndo_validate_addr      = eth_validate_addr,
+	.ndo_set_mac_address    = alx_set_mac_address,
+	.ndo_change_mtu         = alx_change_mtu,
+	.ndo_do_ioctl           = alx_ioctl,
+	.ndo_tx_timeout         = alx_tx_timeout,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
+	.ndo_fix_features	= alx_fix_features,
+	.ndo_set_features	= alx_set_features,
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) */
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller    = alx_poll_controller,
+#endif
+};
+
+#ifdef MDM_PLATFORM
+static void alx_ipa_tx_dp_cb(void *priv, enum ipa_dp_evt_type evt,
+                unsigned long data)
+{
+	struct alx_adapter *adpt = priv;
+	struct alx_ipa_ctx *alx_ipa = adpt->palx_ipa;
+	struct sk_buff *skb = (struct sk_buff *)data;
+	bool schedule_ipa_work = false;
+
+	if (!CHK_ADPT_FLAG(2, ODU_CONNECT)) {
+		IPC_ERROR("%s called before ODU_CONNECT was called with evt %d \n",
+				__func__, evt);
+		return;
+	}
+
+	IPC_DEBUG("%s %d EVT Rcvd %d \n", __func__, __LINE__, evt);
+	if (evt == IPA_RECEIVE) {
+		/* Deliver SKB to network adapter */
+		alx_ipa->stats.rx_ipa_excep++;
+		skb->dev = adpt->netdev;
+		skb->protocol = eth_type_trans(skb, skb->dev);
+		/* Prevent device to goto suspend for 200 msec; to provide enough
+		time for packet to be processed by network stack */
+		pm_wakeup_event(&adpt->pdev->dev, 200);
+		netif_rx_ni(skb);
+        } else if (evt == IPA_WRITE_DONE) {
+		/* SKB send to IPA, safe to free */
+		alx_ipa->stats.rx_ipa_write_done++;
+		dev_kfree_skb(skb);
+	        spin_lock_bh(&adpt->flow_ctrl_lock);
+		adpt->ipa_free_desc_cnt++;
+		/* Decrement the ipa_rx_completion Counter */
+		spin_lock_bh(&alx_ipa->rm_ipa_lock);
+		alx_ipa->ipa_rx_completion--;
+		if (!alx_ipa->ipa_rx_completion &&
+			!alx_ipa->alx_tx_completion &&
+			(alx_ipa->acquire_wake_src == true)) {
+			__pm_relax(&alx_ipa->rm_ipa_wait);
+			alx_ipa->acquire_wake_src = false;
+		}
+		spin_unlock_bh(&alx_ipa->rm_ipa_lock);
+		if ((adpt->pendq_cnt > 0) &&
+			(adpt->ipa_free_desc_cnt < adpt->ipa_low_watermark)) {
+			alx_ipa->stats.ipa_low_watermark_cnt++;
+		} else if ((adpt->pendq_cnt > 0) &&
+			(adpt->ipa_free_desc_cnt >= adpt->ipa_low_watermark) &&
+			!CHK_ADPT_FLAG(2, WQ_SCHED)) {
+			schedule_ipa_work = true;
+		}
+
+                spin_lock_bh(&alx_ipa->rm_ipa_lock);
+                if (alx_ipa->ipa_rx_completion == 0 && adpt->pendq_cnt == 0) {
+                  spin_unlock_bh(&alx_ipa->rm_ipa_lock);
+                  alx_ipa_rm_try_release(adpt);
+                  /*Holding wakelock for 200 msec here will allow enough time for
+                  IPA Inactivity timer expiry(100msec) + IPA TAG process to complete
+                  before ALX tries to suspend. Starting Inactivity timer also helps
+                  improving UL throughput scenario as the resource will not be actually
+                  released till 100msec before which inactivity timer will be reset. This
+                  will resuce overhead caused by resource request and release during
+                  continous data transfer*/
+                  pm_wakeup_event(&adpt->pdev->dev, 200);
+                }
+                else
+                   spin_unlock_bh(&alx_ipa->rm_ipa_lock);
+
+		spin_unlock_bh(&adpt->flow_ctrl_lock);
+		if (schedule_ipa_work) {
+			SET_ADPT_FLAG(2, WQ_SCHED);
+			schedule_work(&adpt->ipa_send_task);
+		}
+	}
+}
+
+static void alx_ipa_tx_dl(void *priv, struct sk_buff *skb)
+{
+	struct alx_adapter *adpt = priv;
+	struct alx_ipa_ctx *alx_ipa = adpt->palx_ipa;
+	netdev_tx_t ret = __NETDEV_TX_MIN;
+
+	if (!CHK_ADPT_FLAG(2, ODU_CONNECT)) {
+                IPC_ERROR("%s called before ODU_CONNECT was called! \n",__func__);
+                return;
+        }
+
+        spin_lock_bh(&alx_ipa->ipa_rm_state_lock);
+        if (alx_ipa->ipa_prod_rm_state != ALX_IPA_RM_GRANTED) {
+          spin_unlock_bh(&alx_ipa->ipa_rm_state_lock);
+          alx_ipa_rm_request(adpt);
+        }
+        else
+          spin_unlock_bh(&alx_ipa->ipa_rm_state_lock);
+
+	IPC_DEBUG("%s %d SKB Send to line \n",__func__,__LINE__);
+	if ((ret = alx_start_xmit(skb, adpt->netdev)) != NETDEV_TX_OK)
+	{
+		IPC_ERROR("%s alx_ipa_tx_dl() failed xmit returned %d \n",
+					__func__, ret);
+		alx_ipa->stats.tx_ipa_send_err++;
+		dev_kfree_skb_any(skb);
+	} else {
+		/* Deliver SKB to HW */
+		alx_ipa->stats.tx_ipa_send++;
+	}
+        spin_lock_bh(&alx_ipa->rm_ipa_lock);
+        spin_lock_bh(&alx_ipa->ipa_rm_state_lock);
+        if (alx_ipa->ipa_rx_completion == 0 && adpt->pendq_cnt == 0
+            && alx_ipa->ipa_prod_rm_state != ALX_IPA_RM_RELEASED) {
+          spin_unlock_bh(&alx_ipa->ipa_rm_state_lock);
+          spin_unlock_bh(&alx_ipa->rm_ipa_lock);
+          alx_ipa_rm_try_release(adpt);
+          /*Holding wakelock for 200 msec here will allow enough time for
+            IPA Inactivity timer expiry(100msec) + IPA TAG process to complete
+            before ALX tries to suspend. Starting Inactivity timer also helps
+            improving UL throughput scenario as the resource will not be actually
+            released till 100msec before which inactivity timer will be reset. This
+            will resuce overhead caused by resource request and release during
+           continous data transfer*/
+           pm_wakeup_event(&adpt->pdev->dev, 200);
+         }
+         else{
+           spin_unlock_bh(&alx_ipa->ipa_rm_state_lock);
+           spin_unlock_bh(&alx_ipa->rm_ipa_lock);
+         }
+}
+
+static void alx_ipa_ready_work(struct work_struct *work)
+{
+	struct alx_adapter *adpt = container_of(work, struct alx_adapter, ipa_ready_task);
+	struct alx_ipa_ctx *alx_ipa = adpt->palx_ipa;
+	struct odu_bridge_params *params_ptr, params;
+	int retval = 0;
+	struct alx_hw *hw = &adpt->hw;
+	params_ptr = &params;
+
+	_IPC_INFO("%s:%d --- IPA is ready --- \n",__func__,__LINE__);
+	alx_ipa->ipa_ready = true;
+
+	/* Init IPA Resources */
+	if (alx_ipa_setup_rm(adpt)) {
+		IPC_ERROR("ALX: IPA Setup RM Failed \n");
+		return;
+	} else {
+		SET_ADPT_FLAG(2, IPA_RM);
+	}
+
+	/* Initialize the ODU bridge driver now: odu_bridge_init()*/
+	params_ptr->netdev_name = adpt->netdev->name;
+	params_ptr->priv = adpt;
+	params.tx_dp_notify = alx_ipa_tx_dp_cb;
+	params_ptr->send_dl_skb = (void *)&alx_ipa_tx_dl;
+	memcpy(params_ptr->device_ethaddr, adpt->netdev->dev_addr, ETH_ALEN);
+        /* The maximum number of descriptors that can be provided to a BAM at
+	* once is one less than the total number of descriptors that the buffer
+	* can contain. */
+	params_ptr->ipa_desc_size = (adpt->ipa_high_watermark + 1) *
+					sizeof(struct sps_iovec);
+	retval = odu_bridge_init(params_ptr);
+	if (retval) {
+		IPC_ERROR("Couldnt initialize ODU_Bridge Driver \n");
+		return;
+	} else {
+		SET_ADPT_FLAG(2, ODU_INIT);
+	}
+
+	/* Check for link phy state */
+	if (hw->link_up) {
+		/* Enable ODU Bridge */
+		if (CHK_ADPT_FLAG(2, ODU_INIT)) {
+			retval = odu_bridge_connect();
+			if (retval) {
+				IPC_ERROR("Could not connect to ODU bridge %d \n",
+					retval);
+				return;
+			} else {
+				SET_ADPT_FLAG(2, ODU_CONNECT);
+			}
+			spin_lock_bh(&alx_ipa->ipa_rm_state_lock);
+			if (alx_ipa->ipa_prod_rm_state == ALX_IPA_RM_RELEASED) {
+				spin_unlock_bh(&alx_ipa->ipa_rm_state_lock);
+				alx_ipa_rm_request(adpt);
+			} else {
+				spin_unlock_bh(&alx_ipa->ipa_rm_state_lock);
+			}
+		}
+	}
+}
+
+static void alx_ipa_ready_cb(void *padpt)
+{
+	struct alx_adapter *adpt = (struct alx_adapter *)padpt;
+
+	/* Adding to work queue */
+	schedule_work(&adpt->ipa_ready_task);
+}
+
+
+static ssize_t alx_ipa_debugfs_read_ipa_stats(struct file *file,
+                char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct alx_adapter *adpt = file->private_data;
+	struct alx_ipa_ctx *alx_ipa = adpt->palx_ipa;
+	char *buf;
+	unsigned int len = 0, buf_len = 2000;
+	ssize_t ret_cnt;
+	u16 pendq_cnt, freeq_cnt, ipa_free_desc_cnt;
+	u16 max_pkts_allowed, min_pkts_allowed;
+
+	if (unlikely(!alx_ipa)) {
+		IPC_ERROR(" %s NULL Pointer \n",__func__);
+		return -EINVAL;
+	}
+
+	if (!CHK_ADPT_FLAG(2, DEBUGFS_INIT))
+		return 0;
+
+	buf = kzalloc(buf_len, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+        len += scnprintf(buf + len, buf_len - len, "\n \n");
+        len += scnprintf(buf + len, buf_len - len, "%25s\n",
+         "ALX IPA stats");
+        len += scnprintf(buf + len, buf_len - len, "%25s\n\n",
+         "==================================================");
+
+        len += scnprintf(buf + len, buf_len - len, "%25s %10llu\n",
+        "IPA RX Pkt Send: ", alx_ipa->stats.rx_ipa_send);
+        len += scnprintf(buf + len, buf_len - len, "%25s %10llu\n",
+        "IPA RX IPA Send Fail: ", alx_ipa->stats.rx_ipa_send_fail);
+        len += scnprintf(buf + len, buf_len - len, "%25s %10llu\n",
+        "IPA RX Write done: ", alx_ipa->stats.rx_ipa_write_done);
+        len += scnprintf(buf + len, buf_len - len, "%25s %10llu\n",
+        "IPA RX Exception: ", alx_ipa->stats.rx_ipa_excep);
+        len += scnprintf(buf + len, buf_len - len, "%25s %10llu\n",
+        "IPA TX Send: ", alx_ipa->stats.tx_ipa_send);
+        len += scnprintf(buf + len, buf_len - len, "%25s %10llu\n",
+        "IPA TX Send Err: ", alx_ipa->stats.tx_ipa_send_err);
+        len += scnprintf(buf + len, buf_len - len, "%25s %10llu\n",
+        "Non-IP or Frag RX Pkt: ", alx_ipa->stats.non_ip_frag_pkt);
+
+	spin_lock_bh(&adpt->flow_ctrl_lock);
+	pendq_cnt = adpt->pendq_cnt;
+	freeq_cnt = adpt->freeq_cnt;
+	ipa_free_desc_cnt = adpt->ipa_free_desc_cnt;
+	max_pkts_allowed = adpt->ipa_high_watermark;
+	min_pkts_allowed = adpt->ipa_low_watermark;
+	spin_unlock_bh(&adpt->flow_ctrl_lock);
+
+        len += scnprintf(buf + len, buf_len - len, "%25s %10u\n",
+        "ALX Pending Queue Count: ", pendq_cnt);
+        len += scnprintf(buf + len, buf_len - len, "%25s %10u\n",
+        "ALX Free Queue Count: ", freeq_cnt);
+        len += scnprintf(buf + len, buf_len - len, "%25s %10u\n",
+        "IPA Free Queue Count: ", ipa_free_desc_cnt);
+        len += scnprintf(buf + len, buf_len - len, "%25s %10u\n",
+        "IPA High Watermark: ", max_pkts_allowed);
+        len += scnprintf(buf + len, buf_len - len, "%25s %10u\n",
+        "IPA Low Watermark: ", min_pkts_allowed);
+        len += scnprintf(buf + len, buf_len - len, "%25s %10llu\n",
+        "IPA Low Watermark Count: ", alx_ipa->stats.ipa_low_watermark_cnt);
+	len += scnprintf(buf + len, buf_len - len, "%25s %10llu\n",
+	"IPA Flow Ctrl Pkt Drop: ", alx_ipa->stats.flow_control_pkt_drop);
+
+        len += scnprintf(buf + len, buf_len - len, "\n \n");
+	len += scnprintf(buf + len, buf_len - len, "%25s %s\n",
+	"Data Path IPA Enabled: ",CHK_ADPT_FLAG(2, ODU_CONNECT)?"True":"False");
+
+        len += scnprintf(buf + len, buf_len - len,
+		"<------------------ ALX RM STATS ------------------>\n");
+	len += scnprintf(buf + len, buf_len - len, "%25s %s\n",
+	"IPA PROD RM State: ",
+	alx_ipa_rm_state_to_str(alx_ipa->ipa_prod_rm_state));
+	len += scnprintf(buf + len, buf_len - len, "%25s %s\n",
+	"IPA CONS RM State: ",
+	alx_ipa_rm_state_to_str(alx_ipa->ipa_cons_rm_state));
+	len += scnprintf(buf + len, buf_len - len, "%25s %s\n",
+	"IPA Perf Requested:",alx_ipa->alx_ipa_perf_requested?"True":"False");
+	len += scnprintf(buf + len, buf_len - len, "%25s %10llu\n",
+	"IPA RX Completions Events Remaining: ", alx_ipa->ipa_rx_completion);
+	len += scnprintf(buf + len, buf_len - len, "%25s %10llu\n",
+	"ALX TX Completions Events Remaining: ", alx_ipa->alx_tx_completion);
+
+
+        if (len > buf_len)
+	        len = buf_len;
+
+        ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+        kfree(buf);
+        return ret_cnt;
+}
+
+static const struct file_operations fops_ipa_stats = {
+                .read = alx_ipa_debugfs_read_ipa_stats,
+                .open = simple_open,
+                .owner = THIS_MODULE,
+                .llseek = default_llseek,
+};
+
+static int alx_debugfs_init(struct alx_adapter *adpt)
+{
+	adpt->palx_ipa->debugfs_dir = debugfs_create_dir("alx", 0);
+	if (!adpt->palx_ipa->debugfs_dir)
+		return -ENOMEM;
+
+	debugfs_create_file("stats", S_IRUSR, adpt->palx_ipa->debugfs_dir,
+					adpt, &fops_ipa_stats);
+
+	return 0;
+}
+
+void alx_debugfs_exit(struct alx_adapter *adpt)
+{
+    if (adpt->palx_ipa->debugfs_dir)
+	debugfs_remove_recursive(adpt->palx_ipa->debugfs_dir);
+}
+
+/*
+static void alx_ipa_process_evt(int evt, void *priv)
+{
+  *** When the RM Request is granted then send packet to IPA
+  *** But we might not use this since we send the packet to odu_bridge
+  *** and may be ODU bridge will request for IPA_RM_GRANT
+  *** Need to confirm with ady
+}
+*/
+
+static void alx_ipa_rm_notify(void *user_data, enum ipa_rm_event event,
+                                                        unsigned long data)
+{
+	struct alx_adapter *adpt = user_data;
+        struct alx_ipa_ctx *alx_ipa = adpt->palx_ipa;
+
+
+	spin_lock_bh(&alx_ipa->ipa_rm_state_lock);
+        _IPC_INFO(" %s IPA RM Evt: %d alx_ipa->ipa_prod_rm_state  %s\n",__func__,
+		event, alx_ipa_rm_state_to_str(alx_ipa->ipa_prod_rm_state));
+
+	spin_unlock_bh(&alx_ipa->ipa_rm_state_lock);
+
+        switch(event) {
+        case IPA_RM_RESOURCE_GRANTED:
+		IPC_DEBUG("%s:%d IPA_RM_RESOURCE_GRANTED \n",__func__,__LINE__);
+		spin_lock_bh(&alx_ipa->ipa_rm_state_lock);
+		if (alx_ipa->ipa_prod_rm_state == ALX_IPA_RM_GRANTED) {
+			spin_unlock_bh(&alx_ipa->ipa_rm_state_lock);
+			alx_err(adpt,"%s ERR:RM_GRANTED RCVD but rm_state already Granted\n",
+					__func__);
+			break;
+		}
+                alx_ipa->ipa_prod_rm_state = ALX_IPA_RM_GRANTED;
+		spin_unlock_bh(&alx_ipa->ipa_rm_state_lock);
+		/* Use Send task as a deffered way to request for IPA RM */
+		if (!alx_ipa->alx_ipa_perf_requested) {
+			schedule_work(&adpt->ipa_send_task);
+			break;
+		}
+		spin_lock_bh(&adpt->flow_ctrl_lock);
+		if (adpt->pendq_cnt && !CHK_ADPT_FLAG(2, WQ_SCHED)) {
+			SET_ADPT_FLAG(2, WQ_SCHED);
+			spin_unlock_bh(&adpt->flow_ctrl_lock);
+			schedule_work(&adpt->ipa_send_task);
+		} else {
+			spin_unlock_bh(&adpt->flow_ctrl_lock);
+			alx_err(adpt,"%s -- ERR RM_GRANTED RCVD but pendq_cnt %d, WQ_SCHED:%s\n",
+				__func__,adpt->pendq_cnt,CHK_ADPT_FLAG(2, WQ_SCHED)?"true":"false");
+		}
+                break;
+        case IPA_RM_RESOURCE_RELEASED:
+		spin_lock_bh(&alx_ipa->ipa_rm_state_lock);
+		alx_ipa->ipa_prod_rm_state = ALX_IPA_RM_RELEASED;
+		spin_unlock_bh(&alx_ipa->ipa_rm_state_lock);
+		IPC_DEBUG("%s -- IPA RM Release \n",__func__);
+                break;
+        default:
+                IPC_ERROR("Unknown RM Evt: %d", event);
+                break;
+        }
+}
+
+static int alx_ipa_rm_cons_request(void)
+{
+	struct alx_adapter *adpt = galx_adapter_ptr;
+	struct alx_ipa_ctx *alx_ipa = NULL;
+
+	_IPC_INFO("-- %s:%d -- \n",__func__,__LINE__);
+	if (!adpt) {
+		IPC_ERROR("%s --- adpt NULL pointer\n",__func__);
+		return 0;
+	}
+	alx_ipa = adpt->palx_ipa;
+
+	spin_lock_bh(&alx_ipa->ipa_rm_state_lock);
+	if (alx_ipa->ipa_cons_rm_state != ALX_IPA_RM_REQUESTED) {
+		alx_ipa->ipa_cons_rm_state = ALX_IPA_RM_REQUESTED;
+		spin_unlock_bh(&alx_ipa->ipa_rm_state_lock);
+	} else {
+		spin_unlock_bh(&alx_ipa->ipa_rm_state_lock);
+		IPC_ERROR("%s -- IPA RM CONS state = Requested %d."
+			"Missed a release request\n"
+			,__func__, alx_ipa->ipa_cons_rm_state);
+	}
+
+	/* Prevent device to goto suspend for 200 msec; to provide enough
+	time for IPA to send packets to us */
+	pm_wakeup_event(&adpt->pdev->dev, 200);
+	return 0;
+}
+
+static int alx_ipa_rm_cons_release(void)
+{
+	struct alx_adapter *adpt = galx_adapter_ptr;
+        struct alx_ipa_ctx *alx_ipa = NULL;
+
+	_IPC_INFO("-- %s:%d -- \n",__func__,__LINE__);
+        if (!adpt) {
+                IPC_ERROR("%s --- adpt NULL pointer\n",__func__);
+                return 0;
+        }
+	alx_ipa = adpt->palx_ipa;
+
+	spin_lock_bh(&alx_ipa->ipa_rm_state_lock);
+	if (alx_ipa->ipa_cons_rm_state != ALX_IPA_RM_RELEASED) {
+		alx_ipa->ipa_cons_rm_state = ALX_IPA_RM_RELEASED;
+		spin_unlock_bh(&alx_ipa->ipa_rm_state_lock);
+	} else {
+		spin_unlock_bh(&alx_ipa->ipa_rm_state_lock);
+		IPC_ERROR("%s -- IPA RM CONS state = Requested %d."
+				"Missed a release request\n"
+				,__func__, alx_ipa->ipa_cons_rm_state);
+	}
+
+	return 0;
+}
+
+static int alx_ipa_setup_rm(struct alx_adapter *adpt)
+{
+	struct ipa_rm_create_params create_params = {0};
+	int ret;
+	struct alx_ipa_ctx *alx_ipa = adpt->palx_ipa;
+
+	memset(&create_params, 0, sizeof(create_params));
+	create_params.name = IPA_RM_RESOURCE_ODU_ADAPT_PROD;
+	create_params.reg_params.user_data = adpt;
+	create_params.reg_params.notify_cb = alx_ipa_rm_notify;
+	create_params.floor_voltage = IPA_VOLTAGE_SVS;
+
+        ret = ipa_rm_create_resource(&create_params);
+        if (ret) {
+                IPC_ERROR("Create ODU PROD RM resource failed: %d \n", ret);
+		goto prod_fail;
+        }
+
+        ipa_rm_add_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+					IPA_RM_RESOURCE_APPS_CONS);
+
+	ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+					ALX_IPA_INACTIVITY_DELAY_MS);
+	if (ret) {
+		IPC_ERROR("Create ODU PROD RM inactivity timer failed: %d \n", ret);
+		goto delete_prod;
+	}
+
+        memset(&create_params, 0, sizeof(create_params));
+        create_params.name = IPA_RM_RESOURCE_ODU_ADAPT_CONS;
+        create_params.request_resource= alx_ipa_rm_cons_request;
+        create_params.release_resource= alx_ipa_rm_cons_release;
+        create_params.floor_voltage = IPA_VOLTAGE_SVS;
+
+        ret = ipa_rm_create_resource(&create_params);
+        if (ret) {
+		IPC_ERROR("Create ODU CONS RM resource failed: %d \n", ret);
+                goto delete_prod;
+        }
+
+	alx_ipa_set_perf_level();
+
+	/* Initialize IPA RM State variables */
+	spin_lock_init(&alx_ipa->ipa_rm_state_lock);
+	spin_lock_init(&alx_ipa->rm_ipa_lock);
+	alx_ipa->ipa_rx_completion = 0;
+	alx_ipa->alx_tx_completion = 0;
+	alx_ipa->acquire_wake_src = 0;
+	alx_ipa->ipa_prod_rm_state = ALX_IPA_RM_RELEASED;
+	alx_ipa->ipa_cons_rm_state = ALX_IPA_RM_RELEASED;
+	wakeup_source_init(&alx_ipa->rm_ipa_wait, "alx_ipa_completion_wake_source");
+	return ret;
+
+delete_prod:
+	ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+
+prod_fail:
+        return ret;
+}
+
+static void alx_ipa_cleanup_rm(struct alx_adapter *adpt)
+{
+        int ret;
+	struct alx_ipa_ctx *alx_ipa = adpt->palx_ipa;
+
+        ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+        if (ret)
+                IPC_ERROR("Resource:IPA_RM_RESOURCE_ODU_ADAPT_PROD del fail %d\n",
+					ret);
+
+        ret =  ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+        if (ret)
+                IPC_ERROR("Resource:IPA RM inactivity timer destroy fail %d\n",
+					ret);
+
+        ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS);
+        if (ret)
+                IPC_ERROR("Resource:IPA_RM_RESOURCE_ODU_ADAPT_CONS del fail %d\n",
+					ret);
+	if (!ret) {
+		/* ReInitialize IPA RM State to be Released */
+		alx_ipa->ipa_prod_rm_state = ALX_IPA_RM_RELEASED;
+		alx_ipa->ipa_cons_rm_state = ALX_IPA_RM_RELEASED;
+
+		wakeup_source_trash(&alx_ipa->rm_ipa_wait);
+		alx_ipa->ipa_rx_completion = 0;
+		alx_ipa->alx_tx_completion = 0;
+	}
+}
+
+/* Request IPA RM Resource for ODU_PROD */
+static int alx_ipa_rm_request(struct alx_adapter *adpt)
+{
+	int ret = 0;
+	struct alx_ipa_ctx *alx_ipa = adpt->palx_ipa;
+        spin_lock_bh(&alx_ipa->ipa_rm_state_lock);
+	IPC_DEBUG("%s - IPA RM PROD State state %s\n",__func__,
+			alx_ipa_rm_state_to_str(alx_ipa->ipa_prod_rm_state));
+	switch (alx_ipa->ipa_prod_rm_state) {
+	case ALX_IPA_RM_GRANTED:
+		spin_unlock_bh(&alx_ipa->ipa_rm_state_lock);
+		return 0;
+	case ALX_IPA_RM_GRANT_PENDING:
+		spin_unlock_bh(&alx_ipa->ipa_rm_state_lock);
+		return -EINPROGRESS;
+	case ALX_IPA_RM_RELEASED:
+		alx_ipa->ipa_prod_rm_state = ALX_IPA_RM_GRANT_PENDING;
+		break;
+	default:
+		spin_unlock_bh(&alx_ipa->ipa_rm_state_lock);
+		IPC_ERROR("%s - IPA RM PROD State state %s\n",__func__,
+			alx_ipa_rm_state_to_str(alx_ipa->ipa_prod_rm_state));
+		return -EINVAL;
+	}
+
+	ret = ipa_rm_inactivity_timer_request_resource(
+					IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+	if (ret == -EINPROGRESS) {
+		IPC_DEBUG("%s - IPA RM PROD State state %s, wake %d \n",__func__,
+			alx_ipa_rm_state_to_str(alx_ipa->ipa_prod_rm_state),
+			alx_ipa->acquire_wake_src);
+		/* Acquire the IPA TX Complete wait lock;
+		Since we will only request for grant if we want to send packets*/
+		spin_lock_bh(&alx_ipa->rm_ipa_lock);
+		if (alx_ipa->acquire_wake_src == false) {
+			__pm_stay_awake(&alx_ipa->rm_ipa_wait);
+			alx_ipa->acquire_wake_src = true;
+		}
+		spin_unlock_bh(&alx_ipa->rm_ipa_lock);
+		ret = -EINPROGRESS;
+	} else if (ret == 0) {
+		alx_ipa->ipa_prod_rm_state = ALX_IPA_RM_GRANTED;
+		IPC_DEBUG("%s - IPA RM PROD State state %s\n",__func__,
+			alx_ipa_rm_state_to_str(alx_ipa->ipa_prod_rm_state));
+	} else {
+		IPC_ERROR("%s -- IPA RM Request failed ret=%d\n",__func__, ret);
+                ret = -EINVAL;
+	}
+        spin_unlock_bh(&alx_ipa->ipa_rm_state_lock);
+	return ret;
+}
+
+/* Release IPA RM Resource for ODU_PROD if not needed*/
+static int alx_ipa_rm_try_release(struct alx_adapter *adpt)
+{
+        int ret = -1;
+        struct alx_ipa_ctx *alx_ipa = adpt->palx_ipa;
+
+	IPC_DEBUG("%s:%d \n",__func__,__LINE__);
+        spin_lock_bh(&alx_ipa->ipa_rm_state_lock);
+	ret = ipa_rm_inactivity_timer_release_resource(
+                                       IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+        if (ret == 0){
+          alx_ipa->ipa_prod_rm_state = ALX_IPA_RM_RELEASED;
+        }
+        spin_unlock_bh(&alx_ipa->ipa_rm_state_lock);
+        return ret;
+}
+#endif
+
+
+#ifdef APQ_PLATFORM
+int alx_smmu_init(struct device *dev)
+{
+	int retval = 0;
+	int alx_atomic_ctx = 1;
+	int alx_bypass = 1;
+
+	_IPC_INFO("[APQ] alx_smmu_init\n");
+	alx_mapping = arm_iommu_create_mapping(&platform_bus_type, ALX_SMMU_BASE, ALX_SMMU_SIZE);
+
+	if (IS_ERR_OR_NULL(alx_mapping)) {
+		retval = PTR_ERR(alx_mapping) ?: -ENODEV;
+		IPC_ERROR("Failed to create ALX SMMU mapping (%d)\n", retval);
+		return retval;
+	}
+
+	retval = iommu_domain_set_attr(alx_mapping->domain,
+				   DOMAIN_ATTR_ATOMIC,
+				   &alx_atomic_ctx);
+	if (retval) {
+		IPC_ERROR("Set atomic attribute for ALX SMMU failed (%d)\n", retval);
+		goto release_alx_mapping;
+	}
+
+	retval = iommu_domain_set_attr(alx_mapping->domain,
+				   DOMAIN_ATTR_S1_BYPASS,
+				   &alx_bypass);
+	if (retval) {
+		IPC_ERROR("Set bypass attribute for ALX SMMU failed (%d)\n", retval);
+		goto release_alx_mapping;
+	}
+
+	retval = arm_iommu_attach_device(dev, alx_mapping);
+	if (retval) {
+		IPC_ERROR("arm_iommu_attach_device for ALX failed (%d)\n", retval);
+		goto release_alx_mapping;
+	}
+
+	_IPC_INFO("attached to SMMU successful\n");
+
+	return retval;
+
+release_alx_mapping:
+	arm_iommu_release_mapping(alx_mapping);
+	alx_mapping = NULL;
+
+	return retval;
+};
+
+void alx_smmu_remove(struct device *dev)
+{
+    if (alx_mapping)
+    {
+	_IPC_INFO("[APQ] alx_smmu_remove\n");
+	arm_iommu_detach_device(dev);
+	arm_iommu_release_mapping(alx_mapping);
+	alx_mapping = NULL;
+    }
+}
+#endif
+
+/*
+ * alx_init - Device Initialization Routine
+ */
+static int __devinit alx_init(struct pci_dev *pdev,
+		       const struct pci_device_id *ent)
+{
+	struct net_device *netdev;
+	struct alx_adapter *adpt = NULL;
+	struct alx_hw *hw = NULL;
+#ifdef MDM_PLATFORM
+	struct alx_ipa_ctx *alx_ipa = NULL;
+#endif
+	static int cards_found;
+	int retval;
+
+#ifdef APQ_PLATFORM
+	retval = alx_smmu_init(&pdev->dev);
+	if (retval) {
+	    IPC_ERROR("%s: ALX SMMU init failed, err = %d\n", __func__, retval);
+	    goto alx_smmu_init_fail;
+	}
+#endif
+
+#ifndef APQ_PLATFORM
+        struct odu_bridge_params *params_ptr, params;
+        params_ptr = &params;
+#endif
+
+#ifdef MDM_PLATFORM
+	retval = msm_pcie_pm_control(MSM_PCIE_RESUME, pdev->bus->number,
+					pdev, NULL, 0);
+	if (retval) {
+		IPC_ERROR("Couldnt perform PCIe MSM Link resume %d\n",
+			retval);
+		return retval;
+	}
+#endif
+
+	/* enable device (incl. PCI PM wakeup and hotplug setup) */
+	retval = pci_enable_device_mem(pdev);
+	if (retval) {
+		dev_err(&pdev->dev, "cannot enable PCI device\n");
+		goto err_alloc_device;
+	}
+
+	/*
+	 * The alx chip can DMA to 64-bit addresses, but it uses a single
+	 * shared register for the high 32 bits, so only a single, aligned,
+	 * 4 GB physical address range can be used at a time.
+	 */
+	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+	    !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+		dev_info(&pdev->dev, "DMA to 64-BIT addresses\n");
+	} else {
+		retval = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+		if (retval) {
+			retval = dma_set_coherent_mask(&pdev->dev,
+						       DMA_BIT_MASK(32));
+			if (retval) {
+				dev_err(&pdev->dev,
+					"No usable DMA config, aborting\n");
+				goto err_alloc_pci_res_mem;
+			}
+		}
+	}
+
+	retval = pci_request_selected_regions(pdev, pci_select_bars(pdev,
+					IORESOURCE_MEM), alx_drv_name);
+	if (retval) {
+		dev_err(&pdev->dev,
+			"pci_request_selected_regions failed 0x%x\n", retval);
+		goto err_alloc_pci_res_mem;
+	}
+
+
+	pci_enable_pcie_error_reporting(pdev);
+	pci_set_master(pdev);
+
+	netdev = alloc_etherdev(sizeof(struct alx_adapter));
+	if (netdev == NULL) {
+		dev_err(&pdev->dev, "etherdev alloc failed\n");
+		retval = -ENOMEM;
+		goto err_alloc_netdev;
+	}
+
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+	netdev->irq  = pdev->irq;
+	adpt = netdev_priv(netdev);
+
+#ifdef MDM_PLATFORM
+	if (ipa_enable) {
+		/* Initialize the work for the first time */
+		INIT_WORK(&adpt->ipa_ready_task, alx_ipa_ready_work);
+		/* Init IPA Context */
+		alx_ipa = kzalloc(sizeof(struct alx_ipa_ctx), GFP_KERNEL);
+		if (!alx_ipa) {
+			IPC_ERROR("kzalloc err.\n");
+			return -ENOMEM;
+		} else {
+			adpt->palx_ipa = alx_ipa;
+		}
+	    /* Reset all the flags */
+	    CLI_ADPT_FLAG(2, ODU_CONNECT);
+	    CLI_ADPT_FLAG(2, ODU_INIT);
+	    CLI_ADPT_FLAG(2, IPA_RM);
+	    CLI_ADPT_FLAG(2, DEBUGFS_INIT);
+		CLI_ADPT_FLAG(2, WQ_SCHED);
+
+		galx_adapter_ptr = adpt;
+
+		/* Initialize all the flow control variables */
+		adpt->pendq_cnt = 0;
+		adpt->freeq_cnt = 0;
+		adpt->ipa_free_desc_cnt = ALX_IPA_SYS_PIPE_MAX_PKTS_DESC;
+		adpt->ipa_high_watermark = ALX_IPA_SYS_PIPE_MAX_PKTS_DESC;
+		adpt->ipa_low_watermark = ALX_IPA_SYS_PIPE_MIN_PKTS_DESC;
+		alx_ipa->ipa_ready = false;
+		spin_lock_init(&adpt->flow_ctrl_lock);
+		INIT_LIST_HEAD(&adpt->pend_queue_head);
+		INIT_LIST_HEAD(&adpt->free_queue_head);
+	}
+#endif
+
+	pci_set_drvdata(pdev, adpt);
+	adpt->netdev = netdev;
+	adpt->pdev = pdev;
+	hw = &adpt->hw;
+	hw->adpt = adpt;
+	adpt->msg_enable = ALX_MSG_DEFAULT;
+
+#ifdef MDM_PLATFORM
+	if (ipa_enable) {
+		retval = ipa_register_ipa_ready_cb(alx_ipa_ready_cb, (void *)adpt);
+		if (retval < 0) {
+			if (retval == -EEXIST) {
+				_IPC_INFO("%s:%d -- IPA is Ready retval %d \n",
+						__func__,__LINE__,retval);
+				alx_ipa->ipa_ready = true;
+			} else {
+				_IPC_INFO("%s:%d -- IPA is Not Ready retval %d \n",
+						__func__,__LINE__,retval);
+				alx_ipa->ipa_ready = false;
+			}
+		}
+
+		if (alx_ipa->ipa_ready == true)
+		{
+			if (alx_ipa_setup_rm(adpt)) {
+				IPC_ERROR("ALX: IPA Setup RM Failed \n");
+	                        goto err_ipa_rm;
+			} else {
+				SET_ADPT_FLAG(2, IPA_RM);
+			}
+		}
+		if (alx_debugfs_init(adpt)) {
+			IPC_ERROR("ALX: Debugfs Init failed \n");
+		} else {
+			SET_ADPT_FLAG(2, DEBUGFS_INIT);
+		}
+		alx_ipa->alx_ipa_perf_requested = false;
+	}
+#endif
+
+	adpt->hw.hw_addr = ioremap(pci_resource_start(pdev, BAR_0),
+				   pci_resource_len(pdev, BAR_0));
+	if (!adpt->hw.hw_addr) {
+		alx_err(adpt, "cannot map device registers\n");
+		retval = -EIO;
+		goto err_iomap;
+	}
+	netdev->base_addr = (unsigned long)adpt->hw.hw_addr;
+
+	/* set cb member of netdev structure*/
+#ifdef MDM_PLATFORM
+	netdev->netdev_ops = &alx_netdev_ops;
+#else
+#ifndef APQ_PLATFORM
+	netdev_attach_ops(netdev, &alx_netdev_ops);
+#else
+	netdev->netdev_ops = &alx_netdev_ops;
+#endif
+#endif
+	alx_set_ethtool_ops(netdev);
+	netdev->watchdog_timeo = ALX_WATCHDOG_TIME;
+	strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+
+	/* init alx_adapte structure */
+	retval = alx_init_adapter(adpt);
+	if (retval) {
+		alx_err(adpt, "net device private data init failed\n");
+		goto err_init_adapter;
+	}
+
+	/* reset pcie */
+	retval = hw->cbs.reset_pcie(hw, true, true);
+	if (retval) {
+		alx_err(adpt, "PCIE Reset failed, error = %d\n", retval);
+		retval = -EIO;
+		goto err_init_adapter;
+	}
+
+	/* Init GPHY as early as possible due to power saving issue  */
+	retval = hw->cbs.reset_phy(hw);
+	if (retval) {
+		alx_err(adpt, "PHY Reset failed, error = %d\n", retval);
+		retval = -EIO;
+		goto err_init_adapter;
+	}
+
+	/* reset mac */
+	retval = hw->cbs.reset_mac(hw);
+	if (retval) {
+		alx_err(adpt, "MAC Reset failed, error = %d\n", retval);
+		retval = -EIO;
+		goto err_init_adapter;
+	}
+
+	/* setup link to put it in a known good starting state */
+	retval = hw->cbs.setup_phy_link(hw, hw->autoneg_advertised, true,
+					!hw->disable_fc_autoneg);
+
+	/* get user settings */
+	adpt->num_txdescs = 1024;
+	adpt->num_rxdescs = 512;
+	adpt->max_rxques = min_t(int, ALX_MAX_RX_QUEUES, num_online_cpus());
+	adpt->max_txques = min_t(int, ALX_MAX_TX_QUEUES, num_online_cpus());
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
+	netdev->hw_features = NETIF_F_SG	 |
+			      NETIF_F_HW_CSUM	 |
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
+                              NETIF_F_HW_VLAN_CTAG_RX;
+#else
+			      NETIF_F_HW_VLAN_RX;
+#endif /*(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))*/
+	if (adpt->hw.mac_type != alx_mac_l1c &&
+	    adpt->hw.mac_type != alx_mac_l2c) {
+		netdev->hw_features = netdev->hw_features |
+				      NETIF_F_TSO |
+				      NETIF_F_TSO6;
+	}
+	netdev->features = netdev->hw_features |
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
+                           NETIF_F_HW_VLAN_CTAG_TX;
+#else
+                           NETIF_F_HW_VLAN_TX;
+#endif /*(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))*/
+#else
+	netdev->features = NETIF_F_SG	 |
+			   NETIF_F_HW_CSUM	 |
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
+                           NETIF_F_HW_VLAN_CTAG_RX;
+#else
+			   NETIF_F_HW_VLAN_RX;
+#endif /*(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))*/
+	if (adpt->hw.mac_type != alx_mac_l1c &&
+	    adpt->hw.mac_type != alx_mac_l2c) {
+		netdev->features = netdev->features |
+				   NETIF_F_TSO |
+				   NETIF_F_TSO6;
+	}
+	netdev->features = netdev->features |
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
+                           NETIF_F_HW_VLAN_CTAG_TX;
+#else
+			   NETIF_F_HW_VLAN_TX;
+#endif /*(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))*/
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) */
+
+	/* get mac addr and perm mac addr, set to register */
+	if (hw->cbs.get_mac_addr)
+		retval = hw->cbs.get_mac_addr(hw, hw->mac_perm_addr);
+	else {
+		retval = -EINVAL;
+	}
+
+/* original QC code */
+#if 0
+	if (retval) {
+		eth_hw_addr_random(netdev);
+		memcpy(hw->mac_perm_addr, netdev->dev_addr, netdev->addr_len);
+	}
+#endif
+
+#ifndef CONFIG_FBXSERIAL
+	/* ProjE change */
+        /* Fill the mac address from input parameters
+         * Always use the mac address from the input parameters
+         */
+        netdev->dev_addr[0] = ((mac_addr_hi16 >> 8) & 0xFF);
+        netdev->dev_addr[1] = ((mac_addr_hi16) & 0xFF);
+
+        netdev->dev_addr[2] = ((mac_addr_lo32 >> 24) & 0xFF);
+        netdev->dev_addr[3] = ((mac_addr_lo32 >> 16) & 0xFF);
+        netdev->dev_addr[4] = ((mac_addr_lo32 >> 8)  & 0xFF);
+        netdev->dev_addr[5] = ((mac_addr_lo32) & 0xFF);
+        _IPC_INFO("alx: Input mac: %X:%X:%X:%X:%X:%X\n",
+               netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
+               netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
+
+        if ((mac_addr_hi16 == 0xFFFFFFFF) ||
+             (mac_addr_lo32 == 0xFFFFFFFF))  {
+                _IPC_INFO("alx: Use random generate the mac address \n");
+                eth_hw_addr_random(netdev);
+        }
+
+        _IPC_INFO("alx: Use mac addr: %X:%X:%X:%X:%X:%X\n",
+               netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
+               netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
+        memcpy(hw->mac_perm_addr, netdev->dev_addr, netdev->addr_len);
+        // END ProjE change
+#else
+	if (fbxserialinfo_get_mac_addr(0)) {
+		memcpy(netdev->dev_addr, fbxserialinfo_get_mac_addr(0), 6);
+		memcpy(hw->mac_perm_addr, fbxserialinfo_get_mac_addr(0), 6);
+	}
+#endif
+
+	memcpy(hw->mac_addr, hw->mac_perm_addr, netdev->addr_len);
+	if (hw->cbs.set_mac_addr)
+		hw->cbs.set_mac_addr(hw, hw->mac_addr);
+
+	memcpy(netdev->dev_addr, hw->mac_perm_addr, netdev->addr_len);
+	memcpy(netdev->perm_addr, hw->mac_perm_addr, netdev->addr_len);
+	retval = alx_validate_mac_addr(netdev->perm_addr);
+	if (retval) {
+		alx_err(adpt, "invalid MAC address\n");
+		goto err_init_adapter;
+	}
+
+	setup_timer(&adpt->alx_timer, &alx_timer_routine,
+		    (unsigned long)adpt);
+	INIT_WORK(&adpt->alx_task, alx_task_routine);
+
+	/* Number of supported queues */
+	alx_set_num_queues(adpt);
+	retval = alx_set_interrupt_mode(adpt);
+	if (retval) {
+		alx_err(adpt, "can't set interrupt mode\n");
+		goto err_set_interrupt_mode;
+	}
+
+	retval = alx_set_interrupt_param(adpt);
+	if (retval) {
+		alx_err(adpt, "can't set interrupt parameter\n");
+		goto err_set_interrupt_param;
+	}
+
+	retval = alx_alloc_all_rtx_queue(adpt);
+	if (retval) {
+		alx_err(adpt, "can't allocate memory for queues\n");
+		goto err_alloc_rtx_queue;
+	}
+
+	alx_set_register_info_special(adpt);
+
+	alx_netif_dbg(adpt, probe, adpt->netdev,
+		  "num_msix_noque_intrs = %d, num_msix_rxque_intrs = %d, "
+		  "num_msix_txque_intrs = %d\n",
+		  adpt->num_msix_noques, adpt->num_msix_rxques,
+		  adpt->num_msix_txques);
+	alx_netif_dbg(adpt, probe, adpt->netdev, "num_msix_all_intrs = %d\n",
+		  adpt->num_msix_intrs);
+
+	alx_netif_dbg(adpt, probe, adpt->netdev,
+		  "RX Queue Count = %u, HRX Queue Count = %u, "
+		  "SRX Queue Count = %u, TX Queue Count = %u\n",
+		  adpt->num_rxques, adpt->num_hw_rxques, adpt->num_sw_rxques,
+		  adpt->num_txques);
+
+	/* WOL not supported for all but the following */
+	switch (hw->pci_devid) {
+	case ALX_DEV_ID_AR8131:
+	case ALX_DEV_ID_AR8132:
+	case ALX_DEV_ID_AR8151_V1:
+	case ALX_DEV_ID_AR8151_V2:
+	case ALX_DEV_ID_AR8152_V1:
+	case ALX_DEV_ID_AR8152_V2:
+		adpt->wol = (ALX_WOL_MAGIC | ALX_WOL_PHY);
+		break;
+	case ALX_DEV_ID_AR8161:
+	case ALX_DEV_ID_AR8162:
+		adpt->wol = (ALX_WOL_MAGIC | ALX_WOL_PHY);
+		break;
+	default:
+		adpt->wol = 0;
+		break;
+	}
+	device_set_wakeup_enable(&adpt->pdev->dev, adpt->wol);
+
+	SET_ADPT_FLAG(1, STATE_DOWN);
+	strlcpy(netdev->name, "eth%d", sizeof(netdev->name) - 1);
+	retval = register_netdev(netdev);
+	if (retval) {
+		alx_err(adpt, "register netdevice failed\n");
+		goto err_register_netdev;
+	}
+	adpt->netdev_registered = true;
+
+#ifdef MDM_PLATFORM
+	if (ipa_enable) {
+		/* Initialize the ODU bridge driver now: odu_bridge_init()*/
+		if (alx_ipa->ipa_ready == true) {
+			params_ptr->netdev_name = netdev->name;
+			params_ptr->priv = adpt;
+			params.tx_dp_notify = alx_ipa_tx_dp_cb;
+			params_ptr->send_dl_skb = (void *)&alx_ipa_tx_dl;
+			memcpy(params_ptr->device_ethaddr, netdev->dev_addr, ETH_ALEN);
+			/* The maximum number of descriptors that can be provided to a
+			 * BAM at once is one less than the total number of descriptors
+			 * that the buffer can contain. */
+			params_ptr->ipa_desc_size = (adpt->ipa_high_watermark + 1) *
+						sizeof(struct sps_iovec);
+			retval = odu_bridge_init(params_ptr);
+			if (retval) {
+				IPC_ERROR("Couldnt initialize ODU_Bridge Driver \n");
+				goto err_init_odu_bridge;
+			} else {
+				SET_ADPT_FLAG(2, ODU_INIT);
+			}
+		}
+
+		/* Initialize IPA Flow Control Work Task */
+		INIT_WORK(&adpt->ipa_send_task, alx_ipa_send_routine);
+	}
+
+	/* Register with MSM PCIe PM Framework */
+	adpt->msm_pcie_event.events = MSM_PCIE_EVENT_LINKDOWN;
+	adpt->msm_pcie_event.user = pdev;
+	adpt->msm_pcie_event.mode = MSM_PCIE_TRIGGER_CALLBACK;
+	adpt->msm_pcie_event.callback = NULL;
+	adpt->msm_pcie_event.options = MSM_PCIE_CONFIG_NO_RECOVERY;
+	retval = msm_pcie_register_event(&adpt->msm_pcie_event);
+	if (retval) {
+		IPC_ERROR("%s: PCI link down detect register failed %d\n",
+				__func__, retval);
+		goto msm_pcie_register_fail;
+	}
+#endif
+
+	/* carrier off reporting is important to ethtool even BEFORE open */
+	netif_carrier_off(netdev);
+	/* keep stopping all the transmit queues for older kernels */
+	netif_tx_stop_all_queues(netdev);
+
+	/* print the MAC address */
+	alx_netif_dbg(adpt, probe, adpt->netdev, "%pM\n", netdev->dev_addr);
+
+	/* print the adapter capability */
+	if (CHK_ADPT_FLAG(0, MSI_CAP)) {
+		alx_netif_dbg(adpt, probe, adpt->netdev,
+			   "MSI Capable: %s\n",
+			   CHK_ADPT_FLAG(0, MSI_EN) ? "Enable" : "Disable");
+	}
+	if (CHK_ADPT_FLAG(0, MSIX_CAP)) {
+		alx_netif_dbg(adpt, probe, adpt->netdev,
+			   "MSIX Capable: %s\n",
+			   CHK_ADPT_FLAG(0, MSIX_EN) ? "Enable" : "Disable");
+	}
+	if (CHK_ADPT_FLAG(0, MRQ_CAP)) {
+		alx_netif_dbg(adpt, probe, adpt->netdev,
+			   "MRQ Capable: %s\n",
+			   CHK_ADPT_FLAG(0, MRQ_EN) ? "Enable" : "Disable");
+	}
+	if (CHK_ADPT_FLAG(0, MRQ_CAP)) {
+		alx_netif_dbg(adpt, probe, adpt->netdev,
+			   "MTQ Capable: %s\n",
+			   CHK_ADPT_FLAG(0, MTQ_EN) ? "Enable" : "Disable");
+	}
+	if (CHK_ADPT_FLAG(0, SRSS_CAP)) {
+		alx_netif_dbg(adpt, probe, adpt->netdev,
+			   "RSS(SW) Capable: %s\n",
+			   CHK_ADPT_FLAG(0, SRSS_EN) ? "Enable" : "Disable");
+	}
+#ifdef ALX_HIB_TIMER_CONFIG
+	mod_timer(&adpt->alx_timer, jiffies);
+#endif
+
+	_IPC_INFO("alx: Atheros Gigabit Network Connection\n");
+	cards_found++;
+	return 0;
+
+#ifdef MDM_PLATFORM
+msm_pcie_register_fail:
+err_init_odu_bridge:
+#endif
+	unregister_netdev(netdev);
+	adpt->netdev_registered = false;
+err_register_netdev:
+	alx_free_all_rtx_queue(adpt);
+err_alloc_rtx_queue:
+	alx_reset_interrupt_param(adpt);
+err_set_interrupt_param:
+	alx_reset_interrupt_mode(adpt);
+#ifdef MDM_PLATFORM
+err_ipa_rm:
+#endif
+err_set_interrupt_mode:
+err_init_adapter:
+	iounmap(adpt->hw.hw_addr);
+err_iomap:
+	free_netdev(netdev);
+err_alloc_netdev:
+	pci_release_selected_regions(pdev,
+				     pci_select_bars(pdev, IORESOURCE_MEM));
+err_alloc_pci_res_mem:
+	pci_disable_device(pdev);
+err_alloc_device:
+	dev_err(&pdev->dev,
+		"error when probe device, error = %d\n", retval);
+#ifdef APQ_PLATFORM
+alx_smmu_init_fail:
+#endif
+	return retval;
+}
+
+
+/*
+ * alx_remove - Device Removal Routine
+ */
+static void __devexit alx_remove(struct pci_dev *pdev)
+{
+	struct alx_adapter *adpt = pci_get_drvdata(pdev);
+	struct alx_hw *hw = &adpt->hw;
+	struct net_device *netdev = adpt->netdev;
+#ifdef MDM_PLATFORM
+	int retval = 0;
+#endif
+
+#ifdef ALX_HIB_TIMER_CONFIG
+	del_timer_sync(&adpt->alx_timer);
+#endif
+	SET_ADPT_FLAG(1, STATE_DOWN);
+	cancel_work_sync(&adpt->alx_task);
+
+	hw->cbs.config_pow_save(hw, ALX_LINK_SPEED_UNKNOWN,
+				false, false, false, false);
+
+	/* resume permanent mac address */
+	hw->cbs.set_mac_addr(hw, hw->mac_perm_addr);
+
+	if (adpt->netdev_registered) {
+		unregister_netdev(netdev);
+		adpt->netdev_registered = false;
+	}
+
+	alx_free_all_rtx_queue(adpt);
+	alx_reset_interrupt_param(adpt);
+	alx_reset_interrupt_mode(adpt);
+
+	iounmap(adpt->hw.hw_addr);
+	pci_release_selected_regions(pdev,
+				     pci_select_bars(pdev, IORESOURCE_MEM));
+
+	alx_netif_dbg(adpt, probe, adpt->netdev, "complete\n");
+	free_netdev(netdev);
+
+#ifdef MDM_PLATFORM
+	if (ipa_enable) {
+		if (CHK_ADPT_FLAG(2, ODU_CONNECT)) {
+			retval = odu_bridge_disconnect();
+			if (retval)
+				IPC_ERROR("Could not Disconnect to ODU bridge"
+					"or ODU Bridge already disconnected %d \n",
+					retval);
+		}
+
+		if (CHK_ADPT_FLAG(2, ODU_INIT)) {
+			retval = odu_bridge_cleanup();
+			if (retval)
+				IPC_ERROR("Couldnt cleanup ODU_Bridge Driver %d \n",
+					retval);
+		}
+
+		/* ALX IPA Specific Cleanup */
+		if (CHK_ADPT_FLAG(2, IPA_RM))
+			alx_ipa_cleanup_rm(adpt);
+
+		if (CHK_ADPT_FLAG(2, DEBUGFS_INIT))
+			alx_debugfs_exit(adpt);
+
+		/* Reset all the flags */
+		CLI_ADPT_FLAG(2, ODU_CONNECT);
+		CLI_ADPT_FLAG(2, ODU_INIT);
+		CLI_ADPT_FLAG(2, IPA_RM);
+		CLI_ADPT_FLAG(2, DEBUGFS_INIT);
+		CLI_ADPT_FLAG(2, WQ_SCHED);
+		kfree(adpt->palx_ipa);
+
+		/* Cancel ALX IPA Flow Control Work */
+		cancel_work_sync(&adpt->ipa_send_task);
+		/* Cancel ALX IPA Ready Callback */
+		cancel_work_sync(&adpt->ipa_ready_task);
+	}
+#endif
+	/* Release wakelock to ensure that system can goto power collapse*/
+	pm_relax(&pdev->dev);
+
+	pci_disable_pcie_error_reporting(pdev);
+
+	pci_disable_device(pdev);
+
+#ifdef MDM_PLATFORM
+	/* De-register with MSM PCIe PM framework */
+	msm_pcie_deregister_event(&adpt->msm_pcie_event);
+
+	retval = msm_pcie_pm_control(MSM_PCIE_SUSPEND, pdev->bus->number,
+					  pdev, NULL, 0);
+	if (retval)
+		 IPC_ERROR("Couldnt Suspend PCIe MSM Link %d \n",
+				retval);
+#endif
+
+#ifdef APQ_PLATFORM
+	alx_smmu_remove(&pdev->dev);
+#endif
+}
+
+
+/*
+ * alx_pci_error_detected
+ */
+static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev,
+					       pci_channel_state_t state)
+{
+	struct alx_adapter *adpt = pci_get_drvdata(pdev);
+	struct net_device *netdev = adpt->netdev;
+	pci_ers_result_t retval = PCI_ERS_RESULT_NEED_RESET;
+
+	netif_device_detach(netdev);
+
+	if (state == pci_channel_io_perm_failure) {
+		retval = PCI_ERS_RESULT_DISCONNECT;
+		goto out;
+	}
+
+	if (netif_running(netdev))
+		alx_stop_internal(adpt, ALX_OPEN_CTRL_RESET_MAC);
+	pci_disable_device(pdev);
+out:
+	return retval;
+}
+
+
+/*
+ * alx_pci_error_slot_reset
+ */
+static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev)
+{
+	struct alx_adapter *adpt = pci_get_drvdata(pdev);
+	pci_ers_result_t retval = PCI_ERS_RESULT_DISCONNECT;
+
+	if (pci_enable_device(pdev)) {
+		alx_err(adpt, "cannot re-enable PCI device after reset\n");
+		goto out;
+	}
+
+	pci_set_master(pdev);
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_enable_wake(pdev, PCI_D3cold, 0);
+	adpt->hw.cbs.reset_mac(&adpt->hw);
+	retval = PCI_ERS_RESULT_RECOVERED;
+out:
+	pci_cleanup_aer_uncorrect_error_status(pdev);
+	return retval;
+}
+
+
+/*
+ * alx_pci_error_resume
+ */
+static void alx_pci_error_resume(struct pci_dev *pdev)
+{
+	struct alx_adapter *adpt = pci_get_drvdata(pdev);
+	struct net_device *netdev = adpt->netdev;
+
+	if (netif_running(netdev)) {
+		if (alx_open_internal(adpt, 0))
+			return;
+	}
+
+	netif_device_attach(netdev);
+}
+
+
+static struct pci_error_handlers alx_err_handler = {
+	.error_detected = alx_pci_error_detected,
+	.slot_reset     = alx_pci_error_slot_reset,
+	.resume         = alx_pci_error_resume,
+};
+
+
+#ifdef CONFIG_PM_SLEEP
+static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
+#define ALX_PM_OPS (&alx_pm_ops)
+#ifndef MDM_PLATFORM
+
+#ifndef APQ_PLATFORM
+compat_pci_suspend(alx_suspend)
+compat_pci_resume(alx_resume)
+#endif
+
+#endif
+#else
+#define ALX_PM_OPS      NULL
+#endif
+
+
+static struct pci_driver alx_driver = {
+	.name        = alx_drv_name,
+	.id_table    = alx_pci_tbl,
+	.probe       = alx_init,
+	.remove      = __devexit_p(alx_remove),
+	.shutdown    = alx_shutdown,
+	.err_handler = &alx_err_handler,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
+	.driver.pm   = ALX_PM_OPS,
+#elif defined(CONFIG_PM_SLEEP)
+	.suspend        = alx_suspend_compat,
+	.resume         = alx_resume_compat,
+#endif
+};
+
+static ssize_t alx_proc_read_cb(struct file *filp,char *buf,size_t count,loff_t *offp )
+{
+	if (*offp != 0)
+		return 0;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", alx_enable_ipc_low);
+}
+
+static ssize_t alx_proc_write_cb(struct file *file,const char *buf,size_t count,loff_t *data )
+
+{
+	int tmp = 0;
+
+	if(count > MAX_PROC_SIZE)
+		count = MAX_PROC_SIZE;
+	if(copy_from_user(tmp_buff, buf, count))
+		return -EFAULT;
+
+	if (sscanf(tmp_buff, "%du", &tmp) < 0)
+		pr_err("sscanf failed\n");
+	else {
+		if (tmp) {
+			if (!ipc_alx_log_ctxt_low) {
+					ipc_alx_log_ctxt_low = ipc_log_context_create(IPCLOG_STATE_PAGES,
+							"alx_low", 0);
+					//ipc_log_string(ipc_alx_log_ctxt_low,"low log enabled\n");
+			}
+			if (!ipc_alx_log_ctxt_low) {
+				pr_err("failed to create ipc alx low context\n");
+				return -EFAULT;
+			}
+		} else {
+			if (ipc_alx_log_ctxt_low)
+				ipc_log_context_destroy(ipc_alx_log_ctxt_low);
+				ipc_alx_log_ctxt_low = NULL;
+		}
+	}
+	alx_enable_ipc_low = tmp;
+	return count;
+}
+
+
+static int __init alx_init_module(void)
+{
+	int retval;
+
+	ipc_alx_log_ctxt = ipc_log_context_create(IPCLOG_STATE_PAGES,
+							"alx", 0);
+	if (!ipc_alx_log_ctxt)
+		pr_err("error creating logging context for alx\n");
+	else
+		pr_info("IPC logging has been enabled for alx\n");
+
+	_IPC_INFO("%s\n", alx_drv_description);
+	/* printk(KERN_INFO "%s\n", "-----ALX_V1.0.0.2-----"); */
+	ipa_enable = module_ipa_enable;
+	ipa_enable? pr_info("ALX: Software Bridge is Enabled\n"):
+				pr_info("ALX: Software Bridge is Disabled\n");
+
+	retval = pci_register_driver(&alx_driver);
+
+	//define proc file and operations
+	memset(&proc_file_ops, 0, sizeof(struct file_operations));
+	proc_file_ops.owner = THIS_MODULE;
+	proc_file_ops.read =  alx_proc_read_cb;
+	proc_file_ops.write = alx_proc_write_cb;
+	if((proc_file = proc_create("alx_enable_ipc_low", 0, NULL,
+		&proc_file_ops)) == NULL) {
+		IPC_ERROR(" error creating proc entry!\n");
+		return -EINVAL;
+	}
+
+	return retval;
+}
+module_init(alx_init_module);
+
+
+static void __exit alx_exit_module(void)
+{
+pci_unregister_driver(&alx_driver);
+
+	if (ipc_alx_log_ctxt != NULL)
+		ipc_log_context_destroy(ipc_alx_log_ctxt);
+
+	if (ipc_alx_log_ctxt_low != NULL)
+		ipc_log_context_destroy(ipc_alx_log_ctxt_low);
+
+}
+
+
+module_exit(alx_exit_module);
diff -Nruw linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop./alx_sw.h linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop/alx_sw.h
--- linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop./alx_sw.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop/alx_sw.h	2019-01-22 16:16:24.927259302 +0100
@@ -0,0 +1,497 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _ALX_SW_H_
+#define _ALX_SW_H_
+
+#include <linux/netdevice.h>
+#include <linux/crc32.h>
+
+/* Vendor ID */
+#define ALX_VENDOR_ID                   0x1969
+
+/* Device IDs */
+#define ALX_DEV_ID_AR8131               0x1063   /* l1c */
+#define ALX_DEV_ID_AR8132               0x1062   /* l2c */
+#define ALX_DEV_ID_AR8151_V1            0x1073   /* l1d_v1 */
+#define ALX_DEV_ID_AR8151_V2            0x1083   /* l1d_v2 */
+#define ALX_DEV_ID_AR8152_V1            0x2060   /* l2cb_v1 */
+#define ALX_DEV_ID_AR8152_V2            0x2062   /* l2cb_v2 */
+#define ALX_DEV_ID_AR8161               0x1091   /* l1f */
+#define ALX_DEV_ID_AR8162               0x1090   /* l2f */
+
+#define ALX_REV_ID_AR8152_V1_0          0xc0
+#define ALX_REV_ID_AR8152_V1_1          0xc1
+#define ALX_REV_ID_AR8152_V2_0          0xc0
+#define ALX_REV_ID_AR8152_V2_1          0xc1
+#define ALX_REV_ID_AR8161_V2_0          0x10  /* B0 */
+
+/* Generic Registers */
+#define ALX_DEV_STAT                    0x62  /* 16 bits */
+#define ALX_DEV_STAT_CERR               0x0001
+#define ALX_DEV_STAT_NFERR              0x0002
+#define ALX_DEV_STAT_FERR               0x0004
+
+#define ALX_ISR                         0x1600
+#define ALX_IMR                         0x1604
+#define ALX_ISR_SMB                     0x00000001
+#define ALX_ISR_TIMER                   0x00000002
+#define ALX_ISR_MANU                    0x00000004
+#define ALX_ISR_RXF_OV                  0x00000008
+#define ALX_ISR_RFD_UR                  0x00000010
+#define ALX_ISR_TX_Q1                   0x00000020
+#define ALX_ISR_TX_Q2                   0x00000040
+#define ALX_ISR_TX_Q3                   0x00000080
+#define ALX_ISR_TXF_UR                  0x00000100
+#define ALX_ISR_DMAR                    0x00000200
+#define ALX_ISR_DMAW                    0x00000400
+#define ALX_ISR_TX_CREDIT               0x00000800
+#define ALX_ISR_PHY                     0x00001000
+#define ALX_ISR_PHY_LPW                 0x00002000
+#define ALX_ISR_TXQ_TO                  0x00004000
+#define ALX_ISR_TX_Q0                   0x00008000
+#define ALX_ISR_RX_Q0                   0x00010000
+#define ALX_ISR_RX_Q1                   0x00020000
+#define ALX_ISR_RX_Q2                   0x00040000
+#define ALX_ISR_RX_Q3                   0x00080000
+#define ALX_ISR_MAC_RX                  0x00100000
+#define ALX_ISR_MAC_TX                  0x00200000
+#define ALX_ISR_PCIE_UR                 0x00400000
+#define ALX_ISR_PCIE_FERR               0x00800000
+#define ALX_ISR_PCIE_NFERR              0x01000000
+#define ALX_ISR_PCIE_CERR               0x02000000
+#define ALX_ISR_PCIE_LNKDOWN            0x04000000
+#define ALX_ISR_RX_Q4                   0x08000000
+#define ALX_ISR_RX_Q5                   0x10000000
+#define ALX_ISR_RX_Q6                   0x20000000
+#define ALX_ISR_RX_Q7                   0x40000000
+#define ALX_ISR_DIS                     0x80000000
+
+
+#define ALX_IMR_NORMAL_MASK (\
+		ALX_ISR_MANU            |\
+		ALX_ISR_OVER            |\
+		ALX_ISR_TXQ             |\
+		ALX_ISR_RXQ             |\
+		ALX_ISR_PHY_LPW         |\
+		ALX_ISR_PHY             |\
+		ALX_ISR_ERROR)
+
+#define ALX_ISR_ALERT_MASK (\
+		ALX_ISR_DMAR            |\
+		ALX_ISR_DMAW            |\
+		ALX_ISR_TXQ_TO          |\
+		ALX_ISR_PCIE_FERR       |\
+		ALX_ISR_PCIE_LNKDOWN    |\
+		ALX_ISR_RFD_UR          |\
+		ALX_ISR_RXF_OV)
+
+#define ALX_ISR_TXQ (\
+		ALX_ISR_TX_Q0           |\
+		ALX_ISR_TX_Q1           |\
+		ALX_ISR_TX_Q2           |\
+		ALX_ISR_TX_Q3)
+
+#define ALX_ISR_RXQ (\
+		ALX_ISR_RX_Q0           |\
+		ALX_ISR_RX_Q1           |\
+		ALX_ISR_RX_Q2           |\
+		ALX_ISR_RX_Q3           |\
+		ALX_ISR_RX_Q4           |\
+		ALX_ISR_RX_Q5           |\
+		ALX_ISR_RX_Q6           |\
+		ALX_ISR_RX_Q7)
+
+#define ALX_ISR_OVER (\
+		ALX_ISR_RFD_UR          |\
+		ALX_ISR_RXF_OV          |\
+		ALX_ISR_TXF_UR)
+
+#define ALX_ISR_ERROR (\
+		ALX_ISR_DMAR            |\
+		ALX_ISR_TXQ_TO          |\
+		ALX_ISR_DMAW            |\
+		ALX_ISR_PCIE_ERROR)
+
+#define ALX_ISR_PCIE_ERROR (\
+		ALX_ISR_PCIE_FERR       |\
+		ALX_ISR_PCIE_LNKDOWN)
+
+/* MISC Register */
+#define ALX_MISC                        0x19C0
+#define ALX_MISC_INTNLOSC_OPEN          0x00000008
+
+#define ALX_CLK_GATE                    0x1814
+
+/* DMA address */
+#define DMA_ADDR_HI_MASK                0xffffffff00000000ULL
+#define DMA_ADDR_LO_MASK                0x00000000ffffffffULL
+
+#define ALX_DMA_ADDR_HI(_addr) \
+		((u32)(((u64)(_addr) & DMA_ADDR_HI_MASK) >> 32))
+#define ALX_DMA_ADDR_LO(_addr) \
+		((u32)((u64)(_addr) & DMA_ADDR_LO_MASK))
+
+/* mac address length */
+#define ALX_ETH_LENGTH_OF_ADDRESS       6
+#define ALX_ETH_LENGTH_OF_HEADER        ETH_HLEN
+
+#define ALX_ETH_CRC(_addr, _len)        ether_crc((_len), (_addr));
+
+/* Autonegotiation advertised speeds */
+/* Link speed */
+#define ALX_LINK_SPEED_UNKNOWN          0x0
+#define ALX_LINK_SPEED_10_HALF          0x0001
+#define ALX_LINK_SPEED_10_FULL          0x0002
+#define ALX_LINK_SPEED_100_HALF         0x0004
+#define ALX_LINK_SPEED_100_FULL         0x0008
+#define ALX_LINK_SPEED_1GB_FULL         0x0020
+#define ALX_LINK_SPEED_DEFAULT (\
+		ALX_LINK_SPEED_10_HALF  |\
+		ALX_LINK_SPEED_10_FULL  |\
+		ALX_LINK_SPEED_100_HALF |\
+		ALX_LINK_SPEED_100_FULL |\
+		ALX_LINK_SPEED_1GB_FULL)
+
+#define ALX_MAX_SETUP_LNK_CYCLE         100
+
+/* Device Type definitions for new protocol MDIO commands */
+#define ALX_MDIO_DEV_TYPE_NORM          0
+
+/* Wake On Lan */
+#define ALX_WOL_PHY                     0x00000001 /* PHY Status Change */
+#define ALX_WOL_MAGIC                   0x00000002 /* Magic Packet */
+
+#define ALX_MAX_EEPROM_LEN              0x200
+#define ALX_MAX_HWREG_LEN               0x200
+
+/* RSS Settings */
+enum alx_rss_mode {
+	alx_rss_mode_disable    = 0,
+	alx_rss_sig_que         = 1,
+	alx_rss_mul_que_sig_int = 2,
+	alx_rss_mul_que_mul_int = 4,
+};
+
+/* Flow Control Settings */
+enum alx_fc_mode {
+	alx_fc_none = 0,
+	alx_fc_rx_pause,
+	alx_fc_tx_pause,
+	alx_fc_full,
+	alx_fc_default
+};
+
+/* WRR Restrict Settings */
+enum alx_wrr_mode {
+	alx_wrr_mode_none = 0,
+	alx_wrr_mode_high,
+	alx_wrr_mode_high2,
+	alx_wrr_mode_all
+};
+
+enum alx_mac_type {
+	alx_mac_unknown = 0,
+	alx_mac_l1c,
+	alx_mac_l2c,
+	alx_mac_l1d_v1,
+	alx_mac_l1d_v2,
+	alx_mac_l2cb_v1,
+	alx_mac_l2cb_v20,
+	alx_mac_l2cb_v21,
+	alx_mac_l1f,
+	alx_mac_l2f,
+};
+
+
+/* Statistics counters collected by the MAC */
+struct alx_hw_stats {
+	/* rx */
+	unsigned long rx_ok;
+	unsigned long rx_bcast;
+	unsigned long rx_mcast;
+	unsigned long rx_pause;
+	unsigned long rx_ctrl;
+	unsigned long rx_fcs_err;
+	unsigned long rx_len_err;
+	unsigned long rx_byte_cnt;
+	unsigned long rx_runt;
+	unsigned long rx_frag;
+	unsigned long rx_sz_64B;
+	unsigned long rx_sz_127B;
+	unsigned long rx_sz_255B;
+	unsigned long rx_sz_511B;
+	unsigned long rx_sz_1023B;
+	unsigned long rx_sz_1518B;
+	unsigned long rx_sz_max;
+	unsigned long rx_ov_sz;
+	unsigned long rx_ov_rxf;
+	unsigned long rx_ov_rrd;
+	unsigned long rx_align_err;
+	unsigned long rx_bc_byte_cnt;
+	unsigned long rx_mc_byte_cnt;
+	unsigned long rx_err_addr;
+
+	/* tx */
+	unsigned long tx_ok;
+	unsigned long tx_bcast;
+	unsigned long tx_mcast;
+	unsigned long tx_pause;
+	unsigned long tx_exc_defer;
+	unsigned long tx_ctrl;
+	unsigned long tx_defer;
+	unsigned long tx_byte_cnt;
+	unsigned long tx_sz_64B;
+	unsigned long tx_sz_127B;
+	unsigned long tx_sz_255B;
+	unsigned long tx_sz_511B;
+	unsigned long tx_sz_1023B;
+	unsigned long tx_sz_1518B;
+	unsigned long tx_sz_max;
+	unsigned long tx_single_col;
+	unsigned long tx_multi_col;
+	unsigned long tx_late_col;
+	unsigned long tx_abort_col;
+	unsigned long tx_underrun;
+	unsigned long tx_trd_eop;
+	unsigned long tx_len_err;
+	unsigned long tx_trunc;
+	unsigned long tx_bc_byte_cnt;
+	unsigned long tx_mc_byte_cnt;
+	unsigned long update;
+};
+
+/* HW callback function pointer table */
+struct alx_hw;
+struct alx_hw_callbacks {
+	/* NIC */
+	int (*identify_nic)(struct alx_hw *);
+	/* PHY */
+	int (*init_phy)(struct alx_hw *);
+	int (*reset_phy)(struct alx_hw *);
+	int (*read_phy_reg)(struct alx_hw *, u16, u16 *);
+	int (*write_phy_reg)(struct alx_hw *, u16, u16);
+	int (*apply_phy_hib_patch)(struct alx_hw *);
+	/* Link */
+	int (*setup_phy_link)(struct alx_hw *, u32, bool, bool);
+	int (*setup_phy_link_speed)(struct alx_hw *, u32, bool, bool);
+	int (*check_phy_link)(struct alx_hw *, u32 *, bool *);
+
+	/* MAC */
+	int (*reset_mac)(struct alx_hw *);
+	int (*start_mac)(struct alx_hw *);
+	int (*stop_mac)(struct alx_hw *);
+	int (*config_mac)(struct alx_hw *, u16, u16, u16, u16, u16);
+	int (*get_mac_addr)(struct alx_hw *, u8 *);
+	int (*set_mac_addr)(struct alx_hw *, u8 *);
+	int (*set_mc_addr)(struct alx_hw *, u8 *);
+	int (*clear_mc_addr)(struct alx_hw *);
+
+	/* intr */
+	int (*ack_phy_intr)(struct alx_hw *);
+	int (*enable_legacy_intr)(struct alx_hw *);
+	int (*disable_legacy_intr)(struct alx_hw *);
+	int (*enable_msix_intr)(struct alx_hw *, u8);
+	int (*disable_msix_intr)(struct alx_hw *, u8);
+
+	/* Configure */
+	int (*config_rx)(struct alx_hw *);
+	int (*config_tx)(struct alx_hw *);
+	int (*config_fc)(struct alx_hw *);
+	int (*config_rss)(struct alx_hw *, bool);
+	int (*config_msix)(struct alx_hw *, u16, bool, bool);
+	int (*config_wol)(struct alx_hw *, u32);
+	int (*config_aspm)(struct alx_hw *, bool, bool);
+	int (*config_mac_ctrl)(struct alx_hw *);
+	int (*config_pow_save)(struct alx_hw *, u32,
+				bool, bool, bool, bool);
+	int (*reset_pcie)(struct alx_hw *, bool, bool);
+
+	/* NVRam function */
+	int (*check_nvram)(struct alx_hw *, bool *);
+	int (*read_nvram)(struct alx_hw *, u16, u32 *);
+	int (*write_nvram)(struct alx_hw *, u16, u32);
+
+	/* Others */
+	int (*get_ethtool_regs)(struct alx_hw *, void *);
+};
+
+struct alx_hw {
+	struct alx_adapter	*adpt;
+	struct alx_hw_callbacks	 cbs;
+	u8 __iomem     *hw_addr; /* inner register address */
+	u16             pci_venid;
+	u16             pci_devid;
+	u16             pci_sub_devid;
+	u16             pci_sub_venid;
+	u8              pci_revid;
+
+	bool            long_cable;
+	bool            aps_en;
+	bool            hi_txperf;
+	bool            msi_lnkpatch;
+	u32             dma_chnl;
+	u32             hwreg_sz;
+	u32             eeprom_sz;
+
+	/* PHY parameter */
+	u32             phy_id;
+	u32             autoneg_advertised;
+	u32             link_speed;
+	bool            link_up;
+	spinlock_t      mdio_lock;
+	bool            bHibBug;
+	bool            bInHibMode;
+	bool            bHibPatched;
+
+	/* MAC parameter */
+	enum alx_mac_type mac_type;
+	u8              mac_addr[ALX_ETH_LENGTH_OF_ADDRESS];
+	u8              mac_perm_addr[ALX_ETH_LENGTH_OF_ADDRESS];
+
+	u32             mtu;
+	u16             rxstat_reg;
+	u16             rxstat_sz;
+	u16             txstat_reg;
+	u16             txstat_sz;
+
+	u16             tx_prod_reg[4];
+	u16             tx_cons_reg[4];
+	u16             rx_prod_reg[2];
+	u16             rx_cons_reg[2];
+	u64             tpdma[4];
+	u64             rfdma[2];
+	u64             rrdma[2];
+
+	/* WRR parameter */
+	enum alx_wrr_mode wrr_mode;
+	u32             wrr_prio0;
+	u32             wrr_prio1;
+	u32             wrr_prio2;
+	u32             wrr_prio3;
+
+	/* RSS parameter */
+	enum alx_rss_mode rss_mode;
+	u8              rss_hstype;
+	u8              rss_base_cpu;
+	u16             rss_idt_size;
+	u32             rss_idt[32];
+	u8              rss_key[40];
+
+	/* flow control parameter */
+	enum alx_fc_mode cur_fc_mode; /* FC mode in effect */
+	enum alx_fc_mode req_fc_mode; /* FC mode requested by caller */
+	bool            disable_fc_autoneg; /* Do not autonegotiate FC */
+	bool            fc_was_autonegged;  /* the result of autonegging */
+	bool            fc_single_pause;
+
+	/* Others */
+	u32             preamble;
+	u32             intr_mask;
+	u16             smb_timer;
+	u16             imt;    /* Interrupt Moderator timer (2us) */
+	u32             flags;
+};
+
+#define ALX_HW_FLAG_L0S_CAP             0x00000001
+#define ALX_HW_FLAG_L0S_EN              0x00000002
+#define ALX_HW_FLAG_L1_CAP              0x00000004
+#define ALX_HW_FLAG_L1_EN               0x00000008
+#define ALX_HW_FLAG_PWSAVE_CAP          0x00000010
+#define ALX_HW_FLAG_PWSAVE_EN           0x00000020
+#define ALX_HW_FLAG_AZ_CAP              0x00000040
+#define ALX_HW_FLAG_AZ_EN               0x00000080
+#define ALX_HW_FLAG_PTP_CAP             0x00000100
+#define ALX_HW_FLAG_PTP_EN              0x00000200
+#define ALX_HW_FLAG_GIGA_CAP            0x00000400
+
+#define ALX_HW_FLAG_PROMISC_EN          0x00010000   /* for mac ctrl reg */
+#define ALX_HW_FLAG_VLANSTRIP_EN        0x00020000   /* for mac ctrl reg */
+#define ALX_HW_FLAG_MULTIALL_EN         0x00040000   /* for mac ctrl reg */
+#define ALX_HW_FLAG_LOOPBACK_EN         0x00080000   /* for mac ctrl reg */
+
+#define CHK_HW_FLAG(_flag)              CHK_FLAG(hw, HW, _flag)
+#define SET_HW_FLAG(_flag)              SET_FLAG(hw, HW, _flag)
+#define CLI_HW_FLAG(_flag)              CLI_FLAG(hw, HW, _flag)
+
+
+/* RSS hstype Definitions */
+#define ALX_RSS_HSTYP_IPV4_EN           0x00000001
+#define ALX_RSS_HSTYP_TCP4_EN           0x00000002
+#define ALX_RSS_HSTYP_IPV6_EN           0x00000004
+#define ALX_RSS_HSTYP_TCP6_EN           0x00000008
+#define ALX_RSS_HSTYP_ALL_EN (\
+		ALX_RSS_HSTYP_IPV4_EN   |\
+		ALX_RSS_HSTYP_TCP4_EN   |\
+		ALX_RSS_HSTYP_IPV6_EN   |\
+		ALX_RSS_HSTYP_TCP6_EN)
+
+
+/* definitions for flags */
+
+#define CHK_FLAG_ARRAY(_st, _idx, _type, _flag)	\
+		((_st)->flags[_idx] & (ALX_##_type##_FLAG_##_idx##_##_flag))
+#define CHK_FLAG(_st, _type, _flag)	\
+		((_st)->flags & (ALX_##_type##_FLAG_##_flag))
+
+#define SET_FLAG_ARRAY(_st, _idx, _type, _flag) \
+		((_st)->flags[_idx] |= (ALX_##_type##_FLAG_##_idx##_##_flag))
+#define SET_FLAG(_st, _type, _flag) \
+		((_st)->flags |= (ALX_##_type##_FLAG_##_flag))
+
+#define CLI_FLAG_ARRAY(_st, _idx, _type, _flag) \
+		((_st)->flags[_idx] &= ~(ALX_##_type##_FLAG_##_idx##_##_flag))
+#define CLI_FLAG(_st, _type, _flag) \
+		((_st)->flags &= ~(ALX_##_type##_FLAG_##_flag))
+
+int alx_cfg_r16(const struct alx_hw *hw, int reg, u16 *pval);
+int alx_cfg_w16(const struct alx_hw *hw, int reg, u16 val);
+
+
+void alx_mem_flush(const struct alx_hw *hw);
+void alx_mem_r32(const struct alx_hw *hw, int reg, u32 *val);
+void alx_mem_w32(const struct alx_hw *hw, int reg, u32 val);
+void alx_mem_w8(const struct alx_hw *hw, int reg, u8 val);
+
+
+/* special definitions for hw */
+#define ALF_MAX_MSIX_NOQUE_INTRS        4
+#define ALF_MIN_MSIX_NOQUE_INTRS        4
+#define ALF_MAX_MSIX_QUEUE_INTRS        12
+#define ALF_MIN_MSIX_QUEUE_INTRS        12
+#define ALF_MAX_MSIX_INTRS \
+		(ALF_MAX_MSIX_QUEUE_INTRS + ALF_MAX_MSIX_NOQUE_INTRS)
+#define ALF_MIN_MSIX_INTRS \
+		(ALF_MIN_MSIX_NOQUE_INTRS + ALF_MIN_MSIX_QUEUE_INTRS)
+
+
+/* function */
+extern int alc_init_hw_callbacks(struct alx_hw *hw);
+extern int alf_init_hw_callbacks(struct alx_hw *hw);
+
+/* Logging message functions */
+void __printf(3, 4) alx_hw_printk(const char *level, const struct alx_hw *hw,
+				  const char *fmt, ...);
+
+#define alx_hw_err(_hw, _format, ...) \
+	alx_hw_printk(KERN_ERR, _hw, _format, ##__VA_ARGS__)
+#define alx_hw_warn(_hw, _format, ...) \
+	alx_hw_printk(KERN_WARNING, _hw, _format, ##__VA_ARGS__)
+#define alx_hw_info(_hw, _format, ...) \
+	alx_hw_printk(KERN_INFO, _hw, _format, ##__VA_ARGS__)
+
+#endif /* _ALX_SW_H_ */
+
diff -Nruw linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop./Makefile linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop/Makefile
--- linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/ethernet/atheros/alx_prop/Makefile	2019-01-22 16:16:24.927259302 +0100
@@ -0,0 +1,5 @@
+
+obj-y += alx_prop.o
+CDEFINES += -DAPQ_PLATFORM
+alx_prop-objs := alx_main.o alx_ethtool.o alc_cb.o alc_hw.o alf_cb.o alf_hw.o
+ccflags-y += -DAPQ_PLATFORM -D__CHECK_ENDIAN__
diff -Nruw linux-4.4.115-fbx/drivers/net/ethernet/msm./Kconfig linux-4.4.115-fbx/drivers/net/ethernet/msm/Kconfig
--- linux-4.4.115-fbx/drivers/net/ethernet/msm./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/ethernet/msm/Kconfig	2019-01-22 16:16:25.195261729 +0100
@@ -0,0 +1,31 @@
+#
+# msm network device configuration
+#
+
+config MSM_RMNET_MHI
+	bool "RMNET MHI Driver"
+	depends on MSM_MHI
+	help
+	  Implements RMNET over MHI interface.
+	  RMNET provides a virtual ethernet interface
+	  for routing IP packets within the MSM using
+	  BAM as a physical transport.
+
+config ECM_IPA
+	tristate "STD ECM LAN Driver support"
+	depends on IPA || IPA3
+	help
+	  Enables LAN between applications processor and a tethered
+	  host using the STD ECM protocol.
+	  This Network interface is aimed to allow data path go through
+	  IPA core while using STD ECM protocol.
+
+config RNDIS_IPA
+	tristate "RNDIS_IPA Network Interface Driver support"
+	depends on IPA || IPA3
+	help
+	  Enables LAN between applications processor and a tethered
+	  host using the RNDIS protocol.
+	  This Network interface is aimed to allow data path go through
+	  IPA core while using RNDIS protocol.
+
diff -Nruw linux-4.4.115-fbx/drivers/net/ethernet/msm./Makefile linux-4.4.115-fbx/drivers/net/ethernet/msm/Makefile
--- linux-4.4.115-fbx/drivers/net/ethernet/msm./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/ethernet/msm/Makefile	2019-01-22 16:16:25.195261729 +0100
@@ -0,0 +1,7 @@
+#
+# Makefile for the msm networking support.
+#
+
+obj-$(CONFIG_MSM_RMNET_MHI) += msm_rmnet_mhi.o
+obj-$(CONFIG_ECM_IPA) += ecm_ipa.o
+obj-$(CONFIG_RNDIS_IPA) += rndis_ipa.o
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/net/ppp/pppolac.c	2019-01-22 16:16:25.367263286 +0100
@@ -0,0 +1,450 @@
+/* drivers/net/pppolac.c
+ *
+ * Driver for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* This driver handles L2TP data packets between a UDP socket and a PPP channel.
+ * The socket must keep connected, and only one session per socket is permitted.
+ * Sequencing of outgoing packets is controlled by LNS. Incoming packets with
+ * sequences are reordered within a sliding window of one second. Currently
+ * reordering only happens when a packet is received. It is done for simplicity
+ * since no additional locks or threads are required. This driver only works on
+ * IPv4 due to the lack of UDP encapsulation support in IPv6. */
+
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/file.h>
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/udp.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_ppp.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_channel.h>
+#include <net/tcp_states.h>
+#include <asm/uaccess.h>
+
+#define L2TP_CONTROL_BIT	0x80
+#define L2TP_LENGTH_BIT		0x40
+#define L2TP_SEQUENCE_BIT	0x08
+#define L2TP_OFFSET_BIT		0x02
+#define L2TP_VERSION		0x02
+#define L2TP_VERSION_MASK	0x0F
+
+#define PPP_ADDR	0xFF
+#define PPP_CTRL	0x03
+
+union unaligned {
+	__u32 u32;
+} __attribute__((packed));
+
+static inline union unaligned *unaligned(void *ptr)
+{
+	return (union unaligned *)ptr;
+}
+
+struct meta {
+	__u32 sequence;
+	__u32 timestamp;
+};
+
+static inline struct meta *skb_meta(struct sk_buff *skb)
+{
+	return (struct meta *)skb->cb;
+}
+
+/******************************************************************************/
+
+static int pppolac_recv_core(struct sock *sk_udp, struct sk_buff *skb)
+{
+	struct sock *sk = (struct sock *)sk_udp->sk_user_data;
+	struct pppolac_opt *opt = &pppox_sk(sk)->proto.lac;
+	struct meta *meta = skb_meta(skb);
+	__u32 now = jiffies;
+	__u8 bits;
+	__u8 *ptr;
+
+	/* Drop the packet if L2TP header is missing. */
+	if (skb->len < sizeof(struct udphdr) + 6)
+		goto drop;
+
+	/* Put it back if it is a control packet. */
+	if (skb->data[sizeof(struct udphdr)] & L2TP_CONTROL_BIT)
+		return opt->backlog_rcv(sk_udp, skb);
+
+	/* Skip UDP header. */
+	skb_pull(skb, sizeof(struct udphdr));
+
+	/* Check the version. */
+	if ((skb->data[1] & L2TP_VERSION_MASK) != L2TP_VERSION)
+		goto drop;
+	bits = skb->data[0];
+	ptr = &skb->data[2];
+
+	/* Check the length if it is present. */
+	if (bits & L2TP_LENGTH_BIT) {
+		if ((ptr[0] << 8 | ptr[1]) != skb->len)
+			goto drop;
+		ptr += 2;
+	}
+
+	/* Skip all fields including optional ones. */
+	if (!skb_pull(skb, 6 + (bits & L2TP_SEQUENCE_BIT ? 4 : 0) +
+			(bits & L2TP_LENGTH_BIT ? 2 : 0) +
+			(bits & L2TP_OFFSET_BIT ? 2 : 0)))
+		goto drop;
+
+	/* Skip the offset padding if it is present. */
+	if (bits & L2TP_OFFSET_BIT &&
+			!skb_pull(skb, skb->data[-2] << 8 | skb->data[-1]))
+		goto drop;
+
+	/* Check the tunnel and the session. */
+	if (unaligned(ptr)->u32 != opt->local)
+		goto drop;
+
+	/* Check the sequence if it is present. */
+	if (bits & L2TP_SEQUENCE_BIT) {
+		meta->sequence = ptr[4] << 8 | ptr[5];
+		if ((__s16)(meta->sequence - opt->recv_sequence) < 0)
+			goto drop;
+	}
+
+	/* Skip PPP address and control if they are present. */
+	if (skb->len >= 2 && skb->data[0] == PPP_ADDR &&
+			skb->data[1] == PPP_CTRL)
+		skb_pull(skb, 2);
+
+	/* Fix PPP protocol if it is compressed. */
+	if (skb->len >= 1 && skb->data[0] & 1)
+		skb_push(skb, 1)[0] = 0;
+
+	/* Drop the packet if PPP protocol is missing. */
+	if (skb->len < 2)
+		goto drop;
+
+	/* Perform reordering if sequencing is enabled. */
+	atomic_set(&opt->sequencing, bits & L2TP_SEQUENCE_BIT);
+	if (bits & L2TP_SEQUENCE_BIT) {
+		struct sk_buff *skb1;
+
+		/* Insert the packet into receive queue in order. */
+		skb_set_owner_r(skb, sk);
+		skb_queue_walk(&sk->sk_receive_queue, skb1) {
+			struct meta *meta1 = skb_meta(skb1);
+			__s16 order = meta->sequence - meta1->sequence;
+			if (order == 0)
+				goto drop;
+			if (order < 0) {
+				meta->timestamp = meta1->timestamp;
+				skb_insert(skb1, skb, &sk->sk_receive_queue);
+				skb = NULL;
+				break;
+			}
+		}
+		if (skb) {
+			meta->timestamp = now;
+			skb_queue_tail(&sk->sk_receive_queue, skb);
+		}
+
+		/* Remove packets from receive queue as long as
+		 * 1. the receive buffer is full,
+		 * 2. they are queued longer than one second, or
+		 * 3. there are no missing packets before them. */
+		skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) {
+			meta = skb_meta(skb);
+			if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
+					now - meta->timestamp < HZ &&
+					meta->sequence != opt->recv_sequence)
+				break;
+			skb_unlink(skb, &sk->sk_receive_queue);
+			opt->recv_sequence = (__u16)(meta->sequence + 1);
+			skb_orphan(skb);
+			ppp_input(&pppox_sk(sk)->chan, skb);
+		}
+		return NET_RX_SUCCESS;
+	}
+
+	/* Flush receive queue if sequencing is disabled. */
+	skb_queue_purge(&sk->sk_receive_queue);
+	skb_orphan(skb);
+	ppp_input(&pppox_sk(sk)->chan, skb);
+	return NET_RX_SUCCESS;
+drop:
+	kfree_skb(skb);
+	return NET_RX_DROP;
+}
+
+static int pppolac_recv(struct sock *sk_udp, struct sk_buff *skb)
+{
+	sock_hold(sk_udp);
+	sk_receive_skb(sk_udp, skb, 0);
+	return 0;
+}
+
+static struct sk_buff_head delivery_queue;
+
+static void pppolac_xmit_core(struct work_struct *delivery_work)
+{
+	mm_segment_t old_fs = get_fs();
+	struct sk_buff *skb;
+
+	set_fs(KERNEL_DS);
+	while ((skb = skb_dequeue(&delivery_queue))) {
+		struct sock *sk_udp = skb->sk;
+		struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len};
+		struct msghdr msg = {
+			.msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT,
+		};
+
+		iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1,
+			      skb->len);
+		sk_udp->sk_prot->sendmsg(sk_udp, &msg, skb->len);
+		kfree_skb(skb);
+	}
+	set_fs(old_fs);
+}
+
+static DECLARE_WORK(delivery_work, pppolac_xmit_core);
+
+static int pppolac_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+	struct sock *sk_udp = (struct sock *)chan->private;
+	struct pppolac_opt *opt = &pppox_sk(sk_udp->sk_user_data)->proto.lac;
+
+	/* Install PPP address and control. */
+	skb_push(skb, 2);
+	skb->data[0] = PPP_ADDR;
+	skb->data[1] = PPP_CTRL;
+
+	/* Install L2TP header. */
+	if (atomic_read(&opt->sequencing)) {
+		skb_push(skb, 10);
+		skb->data[0] = L2TP_SEQUENCE_BIT;
+		skb->data[6] = opt->xmit_sequence >> 8;
+		skb->data[7] = opt->xmit_sequence;
+		skb->data[8] = 0;
+		skb->data[9] = 0;
+		opt->xmit_sequence++;
+	} else {
+		skb_push(skb, 6);
+		skb->data[0] = 0;
+	}
+	skb->data[1] = L2TP_VERSION;
+	unaligned(&skb->data[2])->u32 = opt->remote;
+
+	/* Now send the packet via the delivery queue. */
+	skb_set_owner_w(skb, sk_udp);
+	skb_queue_tail(&delivery_queue, skb);
+	schedule_work(&delivery_work);
+	return 1;
+}
+
+/******************************************************************************/
+
+static struct ppp_channel_ops pppolac_channel_ops = {
+	.start_xmit = pppolac_xmit,
+};
+
+static int pppolac_connect(struct socket *sock, struct sockaddr *useraddr,
+	int addrlen, int flags)
+{
+	struct sock *sk = sock->sk;
+	struct pppox_sock *po = pppox_sk(sk);
+	struct sockaddr_pppolac *addr = (struct sockaddr_pppolac *)useraddr;
+	struct socket *sock_udp = NULL;
+	struct sock *sk_udp;
+	int error;
+
+	if (addrlen != sizeof(struct sockaddr_pppolac) ||
+			!addr->local.tunnel || !addr->local.session ||
+			!addr->remote.tunnel || !addr->remote.session) {
+		return -EINVAL;
+	}
+
+	lock_sock(sk);
+	error = -EALREADY;
+	if (sk->sk_state != PPPOX_NONE)
+		goto out;
+
+	sock_udp = sockfd_lookup(addr->udp_socket, &error);
+	if (!sock_udp)
+		goto out;
+	sk_udp = sock_udp->sk;
+	lock_sock(sk_udp);
+
+	/* Remove this check when IPv6 supports UDP encapsulation. */
+	error = -EAFNOSUPPORT;
+	if (sk_udp->sk_family != AF_INET)
+		goto out;
+	error = -EPROTONOSUPPORT;
+	if (sk_udp->sk_protocol != IPPROTO_UDP)
+		goto out;
+	error = -EDESTADDRREQ;
+	if (sk_udp->sk_state != TCP_ESTABLISHED)
+		goto out;
+	error = -EBUSY;
+	if (udp_sk(sk_udp)->encap_type || sk_udp->sk_user_data)
+		goto out;
+	if (!sk_udp->sk_bound_dev_if) {
+		struct dst_entry *dst = sk_dst_get(sk_udp);
+		error = -ENODEV;
+		if (!dst)
+			goto out;
+		sk_udp->sk_bound_dev_if = dst->dev->ifindex;
+		dst_release(dst);
+	}
+
+	po->chan.hdrlen = 12;
+	po->chan.private = sk_udp;
+	po->chan.ops = &pppolac_channel_ops;
+	po->chan.mtu = PPP_MRU - 80;
+	po->proto.lac.local = unaligned(&addr->local)->u32;
+	po->proto.lac.remote = unaligned(&addr->remote)->u32;
+	atomic_set(&po->proto.lac.sequencing, 1);
+	po->proto.lac.backlog_rcv = sk_udp->sk_backlog_rcv;
+
+	error = ppp_register_channel(&po->chan);
+	if (error)
+		goto out;
+
+	sk->sk_state = PPPOX_CONNECTED;
+	udp_sk(sk_udp)->encap_type = UDP_ENCAP_L2TPINUDP;
+	udp_sk(sk_udp)->encap_rcv = pppolac_recv;
+	sk_udp->sk_backlog_rcv = pppolac_recv_core;
+	sk_udp->sk_user_data = sk;
+out:
+	if (sock_udp) {
+		release_sock(sk_udp);
+		if (error)
+			sockfd_put(sock_udp);
+	}
+	release_sock(sk);
+	return error;
+}
+
+static int pppolac_release(struct socket *sock)
+{
+	struct sock *sk = sock->sk;
+
+	if (!sk)
+		return 0;
+
+	lock_sock(sk);
+	if (sock_flag(sk, SOCK_DEAD)) {
+		release_sock(sk);
+		return -EBADF;
+	}
+
+	if (sk->sk_state != PPPOX_NONE) {
+		struct sock *sk_udp = (struct sock *)pppox_sk(sk)->chan.private;
+		lock_sock(sk_udp);
+		skb_queue_purge(&sk->sk_receive_queue);
+		pppox_unbind_sock(sk);
+		udp_sk(sk_udp)->encap_type = 0;
+		udp_sk(sk_udp)->encap_rcv = NULL;
+		sk_udp->sk_backlog_rcv = pppox_sk(sk)->proto.lac.backlog_rcv;
+		sk_udp->sk_user_data = NULL;
+		release_sock(sk_udp);
+		sockfd_put(sk_udp->sk_socket);
+	}
+
+	sock_orphan(sk);
+	sock->sk = NULL;
+	release_sock(sk);
+	sock_put(sk);
+	return 0;
+}
+
+/******************************************************************************/
+
+static struct proto pppolac_proto = {
+	.name = "PPPOLAC",
+	.owner = THIS_MODULE,
+	.obj_size = sizeof(struct pppox_sock),
+};
+
+static struct proto_ops pppolac_proto_ops = {
+	.family = PF_PPPOX,
+	.owner = THIS_MODULE,
+	.release = pppolac_release,
+	.bind = sock_no_bind,
+	.connect = pppolac_connect,
+	.socketpair = sock_no_socketpair,
+	.accept = sock_no_accept,
+	.getname = sock_no_getname,
+	.poll = sock_no_poll,
+	.ioctl = pppox_ioctl,
+	.listen = sock_no_listen,
+	.shutdown = sock_no_shutdown,
+	.setsockopt = sock_no_setsockopt,
+	.getsockopt = sock_no_getsockopt,
+	.sendmsg = sock_no_sendmsg,
+	.recvmsg = sock_no_recvmsg,
+	.mmap = sock_no_mmap,
+};
+
+static int pppolac_create(struct net *net, struct socket *sock, int kern)
+{
+	struct sock *sk;
+
+	sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppolac_proto, kern);
+	if (!sk)
+		return -ENOMEM;
+
+	sock_init_data(sock, sk);
+	sock->state = SS_UNCONNECTED;
+	sock->ops = &pppolac_proto_ops;
+	sk->sk_protocol = PX_PROTO_OLAC;
+	sk->sk_state = PPPOX_NONE;
+	return 0;
+}
+
+/******************************************************************************/
+
+static struct pppox_proto pppolac_pppox_proto = {
+	.create = pppolac_create,
+	.owner = THIS_MODULE,
+};
+
+static int __init pppolac_init(void)
+{
+	int error;
+
+	error = proto_register(&pppolac_proto, 0);
+	if (error)
+		return error;
+
+	error = register_pppox_proto(PX_PROTO_OLAC, &pppolac_pppox_proto);
+	if (error)
+		proto_unregister(&pppolac_proto);
+	else
+		skb_queue_head_init(&delivery_queue);
+	return error;
+}
+
+static void __exit pppolac_exit(void)
+{
+	unregister_pppox_proto(PX_PROTO_OLAC);
+	proto_unregister(&pppolac_proto);
+}
+
+module_init(pppolac_init);
+module_exit(pppolac_exit);
+
+MODULE_DESCRIPTION("PPP on L2TP Access Concentrator (PPPoLAC)");
+MODULE_AUTHOR("Chia-chi Yeh <chiachi@android.com>");
+MODULE_LICENSE("GPL");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/net/ppp/pppopns.c	2019-01-22 16:16:25.367263286 +0100
@@ -0,0 +1,429 @@
+/* drivers/net/pppopns.c
+ *
+ * Driver for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* This driver handles PPTP data packets between a RAW socket and a PPP channel.
+ * The socket is created in the kernel space and connected to the same address
+ * of the control socket. Outgoing packets are always sent with sequences but
+ * without acknowledgements. Incoming packets with sequences are reordered
+ * within a sliding window of one second. Currently reordering only happens when
+ * a packet is received. It is done for simplicity since no additional locks or
+ * threads are required. This driver should work on both IPv4 and IPv6. */
+
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/file.h>
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/ppp_defs.h>
+#include <linux/if.h>
+#include <linux/if_ppp.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_channel.h>
+#include <asm/uaccess.h>
+
+#define GRE_HEADER_SIZE		8
+
+#define PPTP_GRE_BITS		htons(0x2001)
+#define PPTP_GRE_BITS_MASK	htons(0xEF7F)
+#define PPTP_GRE_SEQ_BIT	htons(0x1000)
+#define PPTP_GRE_ACK_BIT	htons(0x0080)
+#define PPTP_GRE_TYPE		htons(0x880B)
+
+#define PPP_ADDR	0xFF
+#define PPP_CTRL	0x03
+
+struct header {
+	__u16	bits;
+	__u16	type;
+	__u16	length;
+	__u16	call;
+	__u32	sequence;
+} __attribute__((packed));
+
+struct meta {
+	__u32 sequence;
+	__u32 timestamp;
+};
+
+static inline struct meta *skb_meta(struct sk_buff *skb)
+{
+	return (struct meta *)skb->cb;
+}
+
+/******************************************************************************/
+
+static int pppopns_recv_core(struct sock *sk_raw, struct sk_buff *skb)
+{
+	struct sock *sk = (struct sock *)sk_raw->sk_user_data;
+	struct pppopns_opt *opt = &pppox_sk(sk)->proto.pns;
+	struct meta *meta = skb_meta(skb);
+	__u32 now = jiffies;
+	struct header *hdr;
+
+	/* Skip transport header */
+	skb_pull(skb, skb_transport_header(skb) - skb->data);
+
+	/* Drop the packet if GRE header is missing. */
+	if (skb->len < GRE_HEADER_SIZE)
+		goto drop;
+	hdr = (struct header *)skb->data;
+
+	/* Check the header. */
+	if (hdr->type != PPTP_GRE_TYPE || hdr->call != opt->local ||
+			(hdr->bits & PPTP_GRE_BITS_MASK) != PPTP_GRE_BITS)
+		goto drop;
+
+	/* Skip all fields including optional ones. */
+	if (!skb_pull(skb, GRE_HEADER_SIZE +
+			(hdr->bits & PPTP_GRE_SEQ_BIT ? 4 : 0) +
+			(hdr->bits & PPTP_GRE_ACK_BIT ? 4 : 0)))
+		goto drop;
+
+	/* Check the length. */
+	if (skb->len != ntohs(hdr->length))
+		goto drop;
+
+	/* Check the sequence if it is present. */
+	if (hdr->bits & PPTP_GRE_SEQ_BIT) {
+		meta->sequence = ntohl(hdr->sequence);
+		if ((__s32)(meta->sequence - opt->recv_sequence) < 0)
+			goto drop;
+	}
+
+	/* Skip PPP address and control if they are present. */
+	if (skb->len >= 2 && skb->data[0] == PPP_ADDR &&
+			skb->data[1] == PPP_CTRL)
+		skb_pull(skb, 2);
+
+	/* Fix PPP protocol if it is compressed. */
+	if (skb->len >= 1 && skb->data[0] & 1)
+		skb_push(skb, 1)[0] = 0;
+
+	/* Drop the packet if PPP protocol is missing. */
+	if (skb->len < 2)
+		goto drop;
+
+	/* Perform reordering if sequencing is enabled. */
+	if (hdr->bits & PPTP_GRE_SEQ_BIT) {
+		struct sk_buff *skb1;
+
+		/* Insert the packet into receive queue in order. */
+		skb_set_owner_r(skb, sk);
+		skb_queue_walk(&sk->sk_receive_queue, skb1) {
+			struct meta *meta1 = skb_meta(skb1);
+			__s32 order = meta->sequence - meta1->sequence;
+			if (order == 0)
+				goto drop;
+			if (order < 0) {
+				meta->timestamp = meta1->timestamp;
+				skb_insert(skb1, skb, &sk->sk_receive_queue);
+				skb = NULL;
+				break;
+			}
+		}
+		if (skb) {
+			meta->timestamp = now;
+			skb_queue_tail(&sk->sk_receive_queue, skb);
+		}
+
+		/* Remove packets from receive queue as long as
+		 * 1. the receive buffer is full,
+		 * 2. they are queued longer than one second, or
+		 * 3. there are no missing packets before them. */
+		skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) {
+			meta = skb_meta(skb);
+			if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
+					now - meta->timestamp < HZ &&
+					meta->sequence != opt->recv_sequence)
+				break;
+			skb_unlink(skb, &sk->sk_receive_queue);
+			opt->recv_sequence = meta->sequence + 1;
+			skb_orphan(skb);
+			ppp_input(&pppox_sk(sk)->chan, skb);
+		}
+		return NET_RX_SUCCESS;
+	}
+
+	/* Flush receive queue if sequencing is disabled. */
+	skb_queue_purge(&sk->sk_receive_queue);
+	skb_orphan(skb);
+	ppp_input(&pppox_sk(sk)->chan, skb);
+	return NET_RX_SUCCESS;
+drop:
+	kfree_skb(skb);
+	return NET_RX_DROP;
+}
+
+static void pppopns_recv(struct sock *sk_raw)
+{
+	struct sk_buff *skb;
+	while ((skb = skb_dequeue(&sk_raw->sk_receive_queue))) {
+		sock_hold(sk_raw);
+		sk_receive_skb(sk_raw, skb, 0);
+	}
+}
+
+static struct sk_buff_head delivery_queue;
+
+static void pppopns_xmit_core(struct work_struct *delivery_work)
+{
+	mm_segment_t old_fs = get_fs();
+	struct sk_buff *skb;
+
+	set_fs(KERNEL_DS);
+	while ((skb = skb_dequeue(&delivery_queue))) {
+		struct sock *sk_raw = skb->sk;
+		struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len};
+		struct msghdr msg = {
+			.msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT,
+		};
+
+		iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1,
+			      skb->len);
+		sk_raw->sk_prot->sendmsg(sk_raw, &msg, skb->len);
+		kfree_skb(skb);
+	}
+	set_fs(old_fs);
+}
+
+static DECLARE_WORK(delivery_work, pppopns_xmit_core);
+
+static int pppopns_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+	struct sock *sk_raw = (struct sock *)chan->private;
+	struct pppopns_opt *opt = &pppox_sk(sk_raw->sk_user_data)->proto.pns;
+	struct header *hdr;
+	__u16 length;
+
+	/* Install PPP address and control. */
+	skb_push(skb, 2);
+	skb->data[0] = PPP_ADDR;
+	skb->data[1] = PPP_CTRL;
+	length = skb->len;
+
+	/* Install PPTP GRE header. */
+	hdr = (struct header *)skb_push(skb, 12);
+	hdr->bits = PPTP_GRE_BITS | PPTP_GRE_SEQ_BIT;
+	hdr->type = PPTP_GRE_TYPE;
+	hdr->length = htons(length);
+	hdr->call = opt->remote;
+	hdr->sequence = htonl(opt->xmit_sequence);
+	opt->xmit_sequence++;
+
+	/* Now send the packet via the delivery queue. */
+	skb_set_owner_w(skb, sk_raw);
+	skb_queue_tail(&delivery_queue, skb);
+	schedule_work(&delivery_work);
+	return 1;
+}
+
+/******************************************************************************/
+
+static struct ppp_channel_ops pppopns_channel_ops = {
+	.start_xmit = pppopns_xmit,
+};
+
+static int pppopns_connect(struct socket *sock, struct sockaddr *useraddr,
+	int addrlen, int flags)
+{
+	struct sock *sk = sock->sk;
+	struct pppox_sock *po = pppox_sk(sk);
+	struct sockaddr_pppopns *addr = (struct sockaddr_pppopns *)useraddr;
+	struct sockaddr_storage ss;
+	struct socket *sock_tcp = NULL;
+	struct socket *sock_raw = NULL;
+	struct sock *sk_tcp;
+	struct sock *sk_raw;
+	int error;
+
+	if (addrlen != sizeof(struct sockaddr_pppopns))
+		return -EINVAL;
+
+	lock_sock(sk);
+	error = -EALREADY;
+	if (sk->sk_state != PPPOX_NONE)
+		goto out;
+
+	sock_tcp = sockfd_lookup(addr->tcp_socket, &error);
+	if (!sock_tcp)
+		goto out;
+	sk_tcp = sock_tcp->sk;
+	error = -EPROTONOSUPPORT;
+	if (sk_tcp->sk_protocol != IPPROTO_TCP)
+		goto out;
+	addrlen = sizeof(struct sockaddr_storage);
+	error = kernel_getpeername(sock_tcp, (struct sockaddr *)&ss, &addrlen);
+	if (error)
+		goto out;
+	if (!sk_tcp->sk_bound_dev_if) {
+		struct dst_entry *dst = sk_dst_get(sk_tcp);
+		error = -ENODEV;
+		if (!dst)
+			goto out;
+		sk_tcp->sk_bound_dev_if = dst->dev->ifindex;
+		dst_release(dst);
+	}
+
+	error = sock_create(ss.ss_family, SOCK_RAW, IPPROTO_GRE, &sock_raw);
+	if (error)
+		goto out;
+	sk_raw = sock_raw->sk;
+	sk_raw->sk_bound_dev_if = sk_tcp->sk_bound_dev_if;
+	error = kernel_connect(sock_raw, (struct sockaddr *)&ss, addrlen, 0);
+	if (error)
+		goto out;
+
+	po->chan.hdrlen = 14;
+	po->chan.private = sk_raw;
+	po->chan.ops = &pppopns_channel_ops;
+	po->chan.mtu = PPP_MRU - 80;
+	po->proto.pns.local = addr->local;
+	po->proto.pns.remote = addr->remote;
+	po->proto.pns.data_ready = sk_raw->sk_data_ready;
+	po->proto.pns.backlog_rcv = sk_raw->sk_backlog_rcv;
+
+	error = ppp_register_channel(&po->chan);
+	if (error)
+		goto out;
+
+	sk->sk_state = PPPOX_CONNECTED;
+	lock_sock(sk_raw);
+	sk_raw->sk_data_ready = pppopns_recv;
+	sk_raw->sk_backlog_rcv = pppopns_recv_core;
+	sk_raw->sk_user_data = sk;
+	release_sock(sk_raw);
+out:
+	if (sock_tcp)
+		sockfd_put(sock_tcp);
+	if (error && sock_raw)
+		sock_release(sock_raw);
+	release_sock(sk);
+	return error;
+}
+
+static int pppopns_release(struct socket *sock)
+{
+	struct sock *sk = sock->sk;
+
+	if (!sk)
+		return 0;
+
+	lock_sock(sk);
+	if (sock_flag(sk, SOCK_DEAD)) {
+		release_sock(sk);
+		return -EBADF;
+	}
+
+	if (sk->sk_state != PPPOX_NONE) {
+		struct sock *sk_raw = (struct sock *)pppox_sk(sk)->chan.private;
+		lock_sock(sk_raw);
+		skb_queue_purge(&sk->sk_receive_queue);
+		pppox_unbind_sock(sk);
+		sk_raw->sk_data_ready = pppox_sk(sk)->proto.pns.data_ready;
+		sk_raw->sk_backlog_rcv = pppox_sk(sk)->proto.pns.backlog_rcv;
+		sk_raw->sk_user_data = NULL;
+		release_sock(sk_raw);
+		sock_release(sk_raw->sk_socket);
+	}
+
+	sock_orphan(sk);
+	sock->sk = NULL;
+	release_sock(sk);
+	sock_put(sk);
+	return 0;
+}
+
+/******************************************************************************/
+
+static struct proto pppopns_proto = {
+	.name = "PPPOPNS",
+	.owner = THIS_MODULE,
+	.obj_size = sizeof(struct pppox_sock),
+};
+
+static struct proto_ops pppopns_proto_ops = {
+	.family = PF_PPPOX,
+	.owner = THIS_MODULE,
+	.release = pppopns_release,
+	.bind = sock_no_bind,
+	.connect = pppopns_connect,
+	.socketpair = sock_no_socketpair,
+	.accept = sock_no_accept,
+	.getname = sock_no_getname,
+	.poll = sock_no_poll,
+	.ioctl = pppox_ioctl,
+	.listen = sock_no_listen,
+	.shutdown = sock_no_shutdown,
+	.setsockopt = sock_no_setsockopt,
+	.getsockopt = sock_no_getsockopt,
+	.sendmsg = sock_no_sendmsg,
+	.recvmsg = sock_no_recvmsg,
+	.mmap = sock_no_mmap,
+};
+
+static int pppopns_create(struct net *net, struct socket *sock, int kern)
+{
+	struct sock *sk;
+
+	sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppopns_proto, kern);
+	if (!sk)
+		return -ENOMEM;
+
+	sock_init_data(sock, sk);
+	sock->state = SS_UNCONNECTED;
+	sock->ops = &pppopns_proto_ops;
+	sk->sk_protocol = PX_PROTO_OPNS;
+	sk->sk_state = PPPOX_NONE;
+	return 0;
+}
+
+/******************************************************************************/
+
+static struct pppox_proto pppopns_pppox_proto = {
+	.create = pppopns_create,
+	.owner = THIS_MODULE,
+};
+
+static int __init pppopns_init(void)
+{
+	int error;
+
+	error = proto_register(&pppopns_proto, 0);
+	if (error)
+		return error;
+
+	error = register_pppox_proto(PX_PROTO_OPNS, &pppopns_pppox_proto);
+	if (error)
+		proto_unregister(&pppopns_proto);
+	else
+		skb_queue_head_init(&delivery_queue);
+	return error;
+}
+
+static void __exit pppopns_exit(void)
+{
+	unregister_pppox_proto(PX_PROTO_OPNS);
+	proto_unregister(&pppopns_proto);
+}
+
+module_init(pppopns_init);
+module_exit(pppopns_exit);
+
+MODULE_DESCRIPTION("PPP on PPTP Network Server (PPPoPNS)");
+MODULE_AUTHOR("Chia-chi Yeh <chiachi@android.com>");
+MODULE_LICENSE("GPL");
diff -Nruw linux-4.4.115-fbx/drivers/net/rmnet./Kconfig linux-4.4.115-fbx/drivers/net/rmnet/Kconfig
--- linux-4.4.115-fbx/drivers/net/rmnet./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/rmnet/Kconfig	2019-01-22 16:16:25.367263286 +0100
@@ -0,0 +1,21 @@
+#
+# RMNET MAP driver
+#
+
+menuconfig RMNET
+	depends on NETDEVICES
+	bool "RmNet MAP driver"
+	---help---
+	  If you say Y here, then the rmnet module will be statically
+	  compiled into the kernel. The rmnet module provides MAP
+	  functionality for embedded and bridged traffic.
+if RMNET
+
+config RMNET_DEBUG
+	bool "RmNet Debug Logging"
+	---help---
+	  Say Y here if you want RmNet to be able to log packets in main
+	  system log. This should not be enabled on production builds as it can
+	  impact system performance. Note that simply enabling it here will not
+	  enable the logging; it must be enabled at run-time as well.
+endif # RMNET
diff -Nruw linux-4.4.115-fbx/drivers/net/rmnet./Makefile linux-4.4.115-fbx/drivers/net/rmnet/Makefile
--- linux-4.4.115-fbx/drivers/net/rmnet./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/rmnet/Makefile	2019-01-22 16:16:25.367263286 +0100
@@ -0,0 +1,14 @@
+#
+# Makefile for the RMNET module
+#
+
+rmnet-y		 := rmnet_main.o
+rmnet-y		 += rmnet_config.o
+rmnet-y		 += rmnet_vnd.o
+rmnet-y		 += rmnet_handlers.o
+rmnet-y		 += rmnet_map_data.o
+rmnet-y		 += rmnet_map_command.o
+rmnet-y		 += rmnet_stats.o
+obj-$(CONFIG_RMNET) += rmnet.o
+
+CFLAGS_rmnet_main.o := -I$(src)
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/ahb.h	2019-01-22 16:16:25.411263685 +0100
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2016 Qualcomm Atheros, Inc. All rights reserved.
+ * Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _AHB_H_
+#define _AHB_H_
+
+#include <linux/platform_device.h>
+
+struct ath10k_ahb {
+	struct platform_device *pdev;
+	void __iomem *mem;
+	unsigned long mem_len;
+	void __iomem *gcc_mem;
+	void __iomem *tcsr_mem;
+
+	int irq;
+
+	struct clk *cmd_clk;
+	struct clk *ref_clk;
+	struct clk *rtc_clk;
+
+	struct reset_control *core_cold_rst;
+	struct reset_control *radio_cold_rst;
+	struct reset_control *radio_warm_rst;
+	struct reset_control *radio_srif_rst;
+	struct reset_control *cpu_init_rst;
+};
+
+#ifdef CONFIG_ATH10K_AHB
+
+#define ATH10K_GCC_REG_BASE                  0x1800000
+#define ATH10K_GCC_REG_SIZE                  0x60000
+
+#define ATH10K_TCSR_REG_BASE                 0x1900000
+#define ATH10K_TCSR_REG_SIZE                 0x80000
+
+#define ATH10K_AHB_GCC_FEPLL_PLL_DIV         0x2f020
+#define ATH10K_AHB_WIFI_SCRATCH_5_REG        0x4f014
+
+#define ATH10K_AHB_WLAN_CORE_ID_REG          0x82030
+
+#define ATH10K_AHB_TCSR_WIFI0_GLB_CFG        0x49000
+#define ATH10K_AHB_TCSR_WIFI1_GLB_CFG        0x49004
+#define TCSR_WIFIX_GLB_CFG_DISABLE_CORE_CLK  BIT(25)
+
+#define ATH10K_AHB_TCSR_WCSS0_HALTREQ        0x52000
+#define ATH10K_AHB_TCSR_WCSS1_HALTREQ        0x52010
+#define ATH10K_AHB_TCSR_WCSS0_HALTACK        0x52004
+#define ATH10K_AHB_TCSR_WCSS1_HALTACK        0x52014
+
+#define ATH10K_AHB_AXI_BUS_HALT_TIMEOUT      10 /* msec */
+#define AHB_AXI_BUS_HALT_REQ                 1
+#define AHB_AXI_BUS_HALT_ACK                 1
+
+#define ATH10K_AHB_CORE_CTRL_CPU_INTR_MASK   1
+
+int ath10k_ahb_init(void);
+void ath10k_ahb_exit(void);
+
+#else /* CONFIG_ATH10K_AHB */
+
+static inline int ath10k_ahb_init(void)
+{
+	return 0;
+}
+
+static inline void ath10k_ahb_exit(void)
+{
+}
+
+#endif /* CONFIG_ATH10K_AHB */
+
+#endif /* _AHB_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/qmi.c	2019-01-22 16:16:25.423263793 +0100
@@ -0,0 +1,912 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/service-notifier.h>
+#include <soc/qcom/msm_qmi_interface.h>
+#include <soc/qcom/icnss.h>
+#include <soc/qcom/service-locator.h>
+#include "core.h"
+#include "qmi.h"
+#include "snoc.h"
+#include "wcn3990_qmi_service_v01.h"
+
+static DECLARE_WAIT_QUEUE_HEAD(ath10k_fw_ready_wait_event);
+
+static int
+ath10k_snoc_service_notifier_notify(struct notifier_block *nb,
+				    unsigned long notification, void *data)
+{
+	struct ath10k_snoc *ar_snoc = container_of(nb, struct ath10k_snoc,
+					       service_notifier_nb);
+	enum pd_subsys_state *state = data;
+	struct ath10k *ar = ar_snoc->ar;
+	struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg;
+	int ret;
+
+	switch (notification) {
+	case SERVREG_NOTIF_SERVICE_STATE_DOWN_V01:
+		ath10k_dbg(ar, ATH10K_DBG_SNOC, "Service down, data: 0x%pK\n",
+			   data);
+
+		if (!state || *state != ROOT_PD_SHUTDOWN) {
+			atomic_set(&ar_snoc->fw_crashed, 1);
+			atomic_set(&qmi_cfg->fw_ready, 0);
+		}
+
+		ath10k_dbg(ar, ATH10K_DBG_SNOC, "PD went down %d\n",
+			   atomic_read(&ar_snoc->fw_crashed));
+		break;
+	case SERVREG_NOTIF_SERVICE_STATE_UP_V01:
+		ath10k_dbg(ar, ATH10K_DBG_SNOC, "Service up\n");
+		ret = wait_event_timeout(ath10k_fw_ready_wait_event,
+					 (atomic_read(&qmi_cfg->fw_ready) &&
+				   atomic_read(&qmi_cfg->server_connected)),
+			msecs_to_jiffies(ATH10K_SNOC_WLAN_FW_READY_TIMEOUT));
+		if (ret) {
+			if (ar_snoc->drv_state != ATH10K_DRIVER_STATE_PROBED)
+				queue_work(ar->workqueue, &ar->restart_work);
+		} else {
+			ath10k_err(ar, "restart failed, fw_ready timed out\n");
+			return NOTIFY_OK;
+		}
+		break;
+	default:
+		ath10k_dbg(ar, ATH10K_DBG_SNOC,
+			   "Service state Unknown, notification: 0x%lx\n",
+			    notification);
+		return NOTIFY_DONE;
+	}
+	return NOTIFY_OK;
+}
+
+static int ath10k_snoc_get_service_location_notify(struct notifier_block *nb,
+						   unsigned long opcode,
+						   void *data)
+{
+	struct ath10k_snoc *ar_snoc = container_of(nb, struct ath10k_snoc,
+						   get_service_nb);
+	struct ath10k *ar = ar_snoc->ar;
+	struct pd_qmi_client_data *pd = data;
+	int curr_state;
+	int ret;
+	int i;
+	struct ath10k_service_notifier_context *notifier;
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC, "Get service notify opcode: %lu\n",
+		   opcode);
+
+	if (opcode != LOCATOR_UP)
+		return NOTIFY_DONE;
+
+	if (!pd->total_domains) {
+		ath10k_err(ar, "Did not find any domains\n");
+		ret = -ENOENT;
+		goto out;
+	}
+
+	notifier = kcalloc(pd->total_domains,
+			   sizeof(struct ath10k_service_notifier_context),
+			   GFP_KERNEL);
+	if (!notifier) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ar_snoc->service_notifier_nb.notifier_call =
+					ath10k_snoc_service_notifier_notify;
+
+	for (i = 0; i < pd->total_domains; i++) {
+		ath10k_dbg(ar, ATH10K_DBG_SNOC,
+			   "%d: domain_name: %s, instance_id: %d\n", i,
+				   pd->domain_list[i].name,
+				   pd->domain_list[i].instance_id);
+
+		notifier[i].handle =
+			service_notif_register_notifier(
+					pd->domain_list[i].name,
+					pd->domain_list[i].instance_id,
+					&ar_snoc->service_notifier_nb,
+					&curr_state);
+		notifier[i].instance_id = pd->domain_list[i].instance_id;
+		strlcpy(notifier[i].name, pd->domain_list[i].name,
+			QMI_SERVREG_LOC_NAME_LENGTH_V01 + 1);
+
+		if (IS_ERR(notifier[i].handle)) {
+			ath10k_err(ar, "%d: Unable to register notifier for %s(0x%x)\n",
+				   i, pd->domain_list->name,
+				   pd->domain_list->instance_id);
+			ret = PTR_ERR(notifier[i].handle);
+			goto free_handle;
+		}
+	}
+
+	ar_snoc->service_notifier = notifier;
+	ar_snoc->total_domains = pd->total_domains;
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC, "PD restart enabled\n");
+
+	return NOTIFY_OK;
+
+free_handle:
+	for (i = 0; i < pd->total_domains; i++) {
+		if (notifier[i].handle) {
+			service_notif_unregister_notifier(
+						notifier[i].handle,
+						&ar_snoc->service_notifier_nb);
+		}
+	}
+	kfree(notifier);
+
+out:
+	ath10k_err(ar, "PD restart not enabled: %d\n", ret);
+
+	return NOTIFY_OK;
+}
+
+int ath10k_snoc_pd_restart_enable(struct ath10k *ar)
+{
+	int ret;
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC, "Get service location\n");
+
+	ar_snoc->get_service_nb.notifier_call =
+		ath10k_snoc_get_service_location_notify;
+	ret = get_service_location(ATH10K_SERVICE_LOCATION_CLIENT_NAME,
+				   ATH10K_WLAN_SERVICE_NAME,
+				   &ar_snoc->get_service_nb);
+	if (ret) {
+		ath10k_err(ar, "Get service location failed: %d\n", ret);
+		goto out;
+	}
+
+	return 0;
+out:
+	ath10k_err(ar, "PD restart not enabled: %d\n", ret);
+	return ret;
+}
+
+int ath10k_snoc_pdr_unregister_notifier(struct ath10k *ar)
+{
+	int i;
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+	for (i = 0; i < ar_snoc->total_domains; i++) {
+		if (ar_snoc->service_notifier[i].handle)
+			service_notif_unregister_notifier(
+				ar_snoc->service_notifier[i].handle,
+				&ar_snoc->service_notifier_nb);
+	}
+
+	kfree(ar_snoc->service_notifier);
+
+	ar_snoc->service_notifier = NULL;
+
+	return 0;
+}
+
+static int ath10k_snoc_modem_notifier_nb(struct notifier_block *nb,
+					 unsigned long code,
+					 void *data)
+{
+	struct notif_data *notif = data;
+	struct ath10k_snoc *ar_snoc = container_of(nb, struct ath10k_snoc,
+						   modem_ssr_nb);
+	struct ath10k *ar = ar_snoc->ar;
+	struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg;
+
+	if (code != SUBSYS_BEFORE_SHUTDOWN)
+		return NOTIFY_OK;
+
+	if (notif->crashed) {
+		atomic_set(&ar_snoc->fw_crashed, 1);
+		atomic_set(&qmi_cfg->fw_ready, 0);
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC, "Modem went down %d\n",
+		   atomic_read(&ar_snoc->fw_crashed));
+
+	return NOTIFY_OK;
+}
+
+int ath10k_snoc_modem_ssr_register_notifier(struct ath10k *ar)
+{
+	int ret = 0;
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+	ar_snoc->modem_ssr_nb.notifier_call = ath10k_snoc_modem_notifier_nb;
+
+	ar_snoc->modem_notify_handler =
+		subsys_notif_register_notifier("modem", &ar_snoc->modem_ssr_nb);
+
+	if (IS_ERR(ar_snoc->modem_notify_handler)) {
+		ret = PTR_ERR(ar_snoc->modem_notify_handler);
+		ath10k_err(ar, "Modem register notifier failed: %d\n", ret);
+	}
+
+	return ret;
+}
+
+int ath10k_snoc_modem_ssr_unregister_notifier(struct ath10k *ar)
+{
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+	subsys_notif_unregister_notifier(ar_snoc->modem_notify_handler,
+					 &ar_snoc->modem_ssr_nb);
+	ar_snoc->modem_notify_handler = NULL;
+
+	return 0;
+}
+
+static char *
+ath10k_snoc_driver_event_to_str(enum ath10k_snoc_driver_event_type type)
+{
+	switch (type) {
+	case ATH10K_SNOC_DRIVER_EVENT_SERVER_ARRIVE:
+		return "SERVER_ARRIVE";
+	case ATH10K_SNOC_DRIVER_EVENT_SERVER_EXIT:
+		return "SERVER_EXIT";
+	case ATH10K_SNOC_DRIVER_EVENT_FW_READY_IND:
+		return "FW_READY";
+	case ATH10K_SNOC_DRIVER_EVENT_MAX:
+		return "EVENT_MAX";
+	}
+
+	return "UNKNOWN";
+};
+
+static int
+ath10k_snoc_driver_event_post(enum ath10k_snoc_driver_event_type type,
+			      u32 flags, void *data)
+{
+	int ret = 0;
+	int i = 0;
+	unsigned long irq_flags;
+	struct ath10k *ar = (struct ath10k *)data;
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+	struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg;
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC, "Posting event: %s type: %d\n",
+		   ath10k_snoc_driver_event_to_str(type), type);
+
+	if (type >= ATH10K_SNOC_DRIVER_EVENT_MAX) {
+		ath10k_err(ar, "Invalid Event type: %d, can't post", type);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&qmi_cfg->event_lock, irq_flags);
+
+	for (i = 0; i < ATH10K_SNOC_DRIVER_EVENT_MAX; i++) {
+		if (atomic_read(&qmi_cfg->qmi_ev_list[i].event_handled)) {
+			qmi_cfg->qmi_ev_list[i].type = type;
+			qmi_cfg->qmi_ev_list[i].data = data;
+			init_completion(&qmi_cfg->qmi_ev_list[i].complete);
+			qmi_cfg->qmi_ev_list[i].ret =
+					ATH10K_SNOC_EVENT_PENDING;
+			qmi_cfg->qmi_ev_list[i].sync =
+					!!(flags & ATH10K_SNOC_EVENT_SYNC);
+			atomic_set(&qmi_cfg->qmi_ev_list[i].event_handled, 0);
+			list_add_tail(&qmi_cfg->qmi_ev_list[i].list,
+				      &qmi_cfg->event_list);
+			break;
+		}
+	}
+
+	if (i >= ATH10K_SNOC_DRIVER_EVENT_MAX)
+		i = ATH10K_SNOC_DRIVER_EVENT_SERVER_ARRIVE;
+
+	spin_unlock_irqrestore(&qmi_cfg->event_lock, irq_flags);
+
+	queue_work(qmi_cfg->event_wq, &qmi_cfg->event_work);
+
+	if (!(flags & ATH10K_SNOC_EVENT_SYNC))
+		goto out;
+
+	if (flags & ATH10K_SNOC_EVENT_UNINTERRUPTIBLE)
+		wait_for_completion(&qmi_cfg->qmi_ev_list[i].complete);
+	else
+		ret = wait_for_completion_interruptible(
+			&qmi_cfg->qmi_ev_list[i].complete);
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC, "Completed event: %s(%d)\n",
+		   ath10k_snoc_driver_event_to_str(type), type);
+
+	spin_lock_irqsave(&qmi_cfg->event_lock, irq_flags);
+	if (ret == -ERESTARTSYS &&
+	    qmi_cfg->qmi_ev_list[i].ret == ATH10K_SNOC_EVENT_PENDING) {
+		qmi_cfg->qmi_ev_list[i].sync = false;
+		atomic_set(&qmi_cfg->qmi_ev_list[i].event_handled, 1);
+		spin_unlock_irqrestore(&qmi_cfg->event_lock, irq_flags);
+		ret = -EINTR;
+		goto out;
+	}
+	spin_unlock_irqrestore(&qmi_cfg->event_lock, irq_flags);
+
+out:
+	return ret;
+}
+
+static int
+ath10k_snoc_wlan_mode_send_sync_msg(struct ath10k *ar,
+				    enum wlfw_driver_mode_enum_v01 mode)
+{
+	int ret;
+	struct wlfw_wlan_mode_req_msg_v01 req;
+	struct wlfw_wlan_mode_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+	struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg;
+
+	if (!qmi_cfg || !qmi_cfg->wlfw_clnt)
+		return -ENODEV;
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC,
+		   "Sending Mode request, mode: %d\n", mode);
+
+	memset(&req, 0, sizeof(req));
+	memset(&resp, 0, sizeof(resp));
+
+	req.mode = mode;
+	req.hw_debug_valid = 1;
+	req.hw_debug = 0;
+
+	req_desc.max_msg_len = WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_WLFW_WLAN_MODE_REQ_V01;
+	req_desc.ei_array = wlfw_wlan_mode_req_msg_v01_ei;
+
+	resp_desc.max_msg_len = WLFW_WLAN_MODE_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_WLFW_WLAN_MODE_RESP_V01;
+	resp_desc.ei_array = wlfw_wlan_mode_resp_msg_v01_ei;
+
+	ret = qmi_send_req_wait(qmi_cfg->wlfw_clnt,
+				&req_desc, &req, sizeof(req),
+				&resp_desc, &resp, sizeof(resp),
+				WLFW_TIMEOUT_MS);
+	if (ret < 0) {
+		ath10k_err(ar, "Send mode req failed, mode: %d ret: %d\n",
+			   mode, ret);
+		return ret;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		ath10k_err(ar, "QMI mode request rejected:");
+		ath10k_err(ar, "mode:%d result:%d error:%d\n",
+			   mode, resp.resp.result, resp.resp.error);
+		ret = resp.resp.result;
+		return ret;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC,
+		   "wlan Mode request send success, mode: %d\n", mode);
+	return 0;
+}
+
+static int
+ath10k_snoc_wlan_cfg_send_sync_msg(struct ath10k *ar,
+				   struct wlfw_wlan_cfg_req_msg_v01 *data)
+{
+	int ret;
+	struct wlfw_wlan_cfg_req_msg_v01 req;
+	struct wlfw_wlan_cfg_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+	struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg;
+
+	if (!qmi_cfg || !qmi_cfg->wlfw_clnt)
+		return -ENODEV;
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC, "Sending config request\n");
+
+	memset(&req, 0, sizeof(req));
+	memset(&resp, 0, sizeof(resp));
+	memcpy(&req, data, sizeof(req));
+
+	req_desc.max_msg_len = WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_WLFW_WLAN_CFG_REQ_V01;
+	req_desc.ei_array = wlfw_wlan_cfg_req_msg_v01_ei;
+
+	resp_desc.max_msg_len = WLFW_WLAN_CFG_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_WLFW_WLAN_CFG_RESP_V01;
+	resp_desc.ei_array = wlfw_wlan_cfg_resp_msg_v01_ei;
+
+	ret = qmi_send_req_wait(qmi_cfg->wlfw_clnt,
+				&req_desc, &req, sizeof(req),
+				&resp_desc, &resp, sizeof(resp),
+				WLFW_TIMEOUT_MS);
+	if (ret < 0) {
+		ath10k_err(ar, "Send config req failed %d\n", ret);
+		return ret;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		ath10k_err(ar, "QMI config request rejected:");
+		ath10k_err(ar, "result:%d error:%d\n",
+			   resp.resp.result, resp.resp.error);
+		ret = resp.resp.result;
+		return ret;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC, "wlan config request success..\n");
+	return 0;
+}
+
+int ath10k_snoc_qmi_wlan_enable(struct ath10k *ar,
+				struct ath10k_wlan_enable_cfg *config,
+				enum ath10k_driver_mode mode,
+				const char *host_version)
+{
+	struct wlfw_wlan_cfg_req_msg_v01 req;
+	u32 i;
+	int ret;
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+	struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg;
+	unsigned long time_left;
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC,
+		   "Mode: %d, config: %p, host_version: %s\n",
+		   mode, config, host_version);
+
+	memset(&req, 0, sizeof(req));
+	if (!config || !host_version) {
+		ath10k_err(ar, "WLAN_EN Config Invalid:%p: host_version:%p\n",
+			   config, host_version);
+		ret = -EINVAL;
+		return ret;
+	}
+
+	time_left = wait_event_timeout(
+			   ath10k_fw_ready_wait_event,
+			   (atomic_read(&qmi_cfg->fw_ready) &&
+			    atomic_read(&qmi_cfg->server_connected)),
+			   msecs_to_jiffies(ATH10K_SNOC_WLAN_FW_READY_TIMEOUT));
+	if (time_left == 0) {
+		ath10k_err(ar, "Wait for FW ready and server connect timed out\n");
+		return -ETIMEDOUT;
+	}
+
+	req.host_version_valid = 1;
+	strlcpy(req.host_version, host_version,
+		QMI_WLFW_MAX_STR_LEN_V01 + 1);
+
+	req.tgt_cfg_valid = 1;
+	if (config->num_ce_tgt_cfg > QMI_WLFW_MAX_NUM_CE_V01)
+		req.tgt_cfg_len = QMI_WLFW_MAX_NUM_CE_V01;
+	else
+		req.tgt_cfg_len = config->num_ce_tgt_cfg;
+	for (i = 0; i < req.tgt_cfg_len; i++) {
+		req.tgt_cfg[i].pipe_num = config->ce_tgt_cfg[i].pipe_num;
+		req.tgt_cfg[i].pipe_dir = config->ce_tgt_cfg[i].pipe_dir;
+		req.tgt_cfg[i].nentries = config->ce_tgt_cfg[i].nentries;
+		req.tgt_cfg[i].nbytes_max = config->ce_tgt_cfg[i].nbytes_max;
+		req.tgt_cfg[i].flags = config->ce_tgt_cfg[i].flags;
+	}
+
+	req.svc_cfg_valid = 1;
+	if (config->num_ce_svc_pipe_cfg > QMI_WLFW_MAX_NUM_SVC_V01)
+		req.svc_cfg_len = QMI_WLFW_MAX_NUM_SVC_V01;
+	else
+		req.svc_cfg_len = config->num_ce_svc_pipe_cfg;
+	for (i = 0; i < req.svc_cfg_len; i++) {
+		req.svc_cfg[i].service_id = config->ce_svc_cfg[i].service_id;
+		req.svc_cfg[i].pipe_dir = config->ce_svc_cfg[i].pipe_dir;
+		req.svc_cfg[i].pipe_num = config->ce_svc_cfg[i].pipe_num;
+	}
+
+	req.shadow_reg_valid = 1;
+	if (config->num_shadow_reg_cfg >
+	    QMI_WLFW_MAX_NUM_SHADOW_REG_V01)
+		req.shadow_reg_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01;
+	else
+		req.shadow_reg_len = config->num_shadow_reg_cfg;
+
+	memcpy(req.shadow_reg, config->shadow_reg_cfg,
+	       sizeof(struct wlfw_shadow_reg_cfg_s_v01) * req.shadow_reg_len);
+
+	ret = ath10k_snoc_wlan_cfg_send_sync_msg(ar, &req);
+	if (ret) {
+		ath10k_err(ar, "WLAN config send failed\n");
+		return ret;
+	}
+
+	ret = ath10k_snoc_wlan_mode_send_sync_msg(ar, mode);
+	if (ret) {
+		ath10k_err(ar, "WLAN mode send failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+int ath10k_snoc_qmi_wlan_disable(struct ath10k *ar)
+{
+	return ath10k_snoc_wlan_mode_send_sync_msg(ar, QMI_WLFW_OFF_V01);
+}
+
+static int ath10k_snoc_ind_register_send_sync_msg(struct ath10k *ar)
+{
+	int ret;
+	struct wlfw_ind_register_req_msg_v01 req;
+	struct wlfw_ind_register_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+	struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg;
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC,
+		   "Sending indication register message,\n");
+
+	memset(&req, 0, sizeof(req));
+	memset(&resp, 0, sizeof(resp));
+
+	req.client_id_valid = 1;
+	req.client_id = WLFW_CLIENT_ID;
+	req.fw_ready_enable_valid = 1;
+	req.fw_ready_enable = 1;
+	req.msa_ready_enable_valid = 1;
+	req.msa_ready_enable = 1;
+
+	req_desc.max_msg_len = WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_WLFW_IND_REGISTER_REQ_V01;
+	req_desc.ei_array = wlfw_ind_register_req_msg_v01_ei;
+
+	resp_desc.max_msg_len = WLFW_IND_REGISTER_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_WLFW_IND_REGISTER_RESP_V01;
+	resp_desc.ei_array = wlfw_ind_register_resp_msg_v01_ei;
+
+	ret = qmi_send_req_wait(qmi_cfg->wlfw_clnt,
+				&req_desc, &req, sizeof(req),
+				&resp_desc, &resp, sizeof(resp),
+				WLFW_TIMEOUT_MS);
+	if (ret < 0) {
+		ath10k_err(ar, "Send indication register req failed %d\n", ret);
+		return ret;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		ath10k_err(ar, "QMI indication register request rejected:");
+		ath10k_err(ar, "resut:%d error:%d\n",
+			   resp.resp.result, resp.resp.error);
+		ret = resp.resp.result;
+		return ret;
+	}
+
+	return 0;
+}
+
+static void ath10k_snoc_qmi_wlfw_clnt_notify_work(struct work_struct *work)
+{
+	int ret;
+	struct ath10k_snoc_qmi_config *qmi_cfg =
+		container_of(work, struct ath10k_snoc_qmi_config,
+			     qmi_recv_msg_work);
+	struct ath10k_snoc *ar_snoc =
+		container_of(qmi_cfg, struct ath10k_snoc, qmi_cfg);
+	struct ath10k *ar = ar_snoc->ar;
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC,
+		   "Receiving Event in work queue context\n");
+
+	do {
+	} while ((ret = qmi_recv_msg(qmi_cfg->wlfw_clnt)) == 0);
+
+	if (ret != -ENOMSG)
+		ath10k_err(ar, "Error receiving message: %d\n", ret);
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC, "Receiving Event completed\n");
+}
+
+static void
+ath10k_snoc_qmi_wlfw_clnt_notify(struct qmi_handle *handle,
+				 enum qmi_event_type event,
+				 void *notify_priv)
+{
+	struct ath10k_snoc_qmi_config *qmi_cfg =
+		(struct ath10k_snoc_qmi_config *)notify_priv;
+	struct ath10k_snoc *ar_snoc =
+		container_of(qmi_cfg, struct ath10k_snoc, qmi_cfg);
+	struct ath10k *ar = ar_snoc->ar;
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC, "QMI client notify: %d\n", event);
+
+	if (!qmi_cfg || !qmi_cfg->wlfw_clnt)
+		return;
+
+	switch (event) {
+	case QMI_RECV_MSG:
+		schedule_work(&qmi_cfg->qmi_recv_msg_work);
+		break;
+	default:
+		ath10k_dbg(ar, ATH10K_DBG_SNOC, "Unknown Event: %d\n", event);
+		break;
+	}
+}
+
+static void
+ath10k_snoc_qmi_wlfw_clnt_ind(struct qmi_handle *handle,
+			      unsigned int msg_id, void *msg,
+			      unsigned int msg_len, void *ind_cb_priv)
+{
+	struct ath10k_snoc_qmi_config *qmi_cfg =
+		(struct ath10k_snoc_qmi_config *)ind_cb_priv;
+	struct ath10k_snoc *ar_snoc =
+		container_of(qmi_cfg, struct ath10k_snoc, qmi_cfg);
+	struct ath10k *ar = ar_snoc->ar;
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC,
+		   "Received Ind 0x%x, msg_len: %d\n", msg_id, msg_len);
+	switch (msg_id) {
+	case QMI_WLFW_FW_READY_IND_V01:
+		ath10k_snoc_driver_event_post(
+			ATH10K_SNOC_DRIVER_EVENT_FW_READY_IND, 0, ar);
+		break;
+	case QMI_WLFW_MSA_READY_IND_V01:
+		qmi_cfg->msa_ready = true;
+		ath10k_dbg(ar, ATH10K_DBG_SNOC,
+			   "Received MSA Ready, ind = 0x%x\n", msg_id);
+		break;
+	default:
+		ath10k_err(ar, "Invalid msg_id 0x%x\n", msg_id);
+		break;
+	}
+}
+
+static int ath10k_snoc_driver_event_server_arrive(struct ath10k *ar)
+{
+	int ret = 0;
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+	struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg;
+
+	if (!qmi_cfg)
+		return -ENODEV;
+
+	qmi_cfg->wlfw_clnt = qmi_handle_create(
+			ath10k_snoc_qmi_wlfw_clnt_notify, qmi_cfg);
+	if (!qmi_cfg->wlfw_clnt) {
+		ath10k_dbg(ar, ATH10K_DBG_SNOC,
+			   "QMI client handle create failed\n");
+		return -ENOMEM;
+	}
+
+	ret = qmi_connect_to_service(qmi_cfg->wlfw_clnt,
+				     WLFW_SERVICE_ID_V01,
+				     WLFW_SERVICE_VERS_V01,
+				     WLFW_SERVICE_INS_ID_V01);
+	if (ret < 0) {
+		ath10k_err(ar, "QMI WLAN Service not found : %d\n", ret);
+		goto err_qmi_config;
+	}
+
+	ret = qmi_register_ind_cb(qmi_cfg->wlfw_clnt,
+				  ath10k_snoc_qmi_wlfw_clnt_ind, qmi_cfg);
+	if (ret < 0) {
+		ath10k_err(ar, "Failed to register indication callback: %d\n",
+			   ret);
+		goto err_qmi_config;
+	}
+
+	ret = ath10k_snoc_ind_register_send_sync_msg(ar);
+	if (ret) {
+		ath10k_err(ar, "Failed to config qmi ind register\n");
+		goto err_qmi_config;
+	}
+
+	atomic_set(&qmi_cfg->server_connected, 1);
+	wake_up_all(&ath10k_fw_ready_wait_event);
+	ath10k_dbg(ar, ATH10K_DBG_SNOC,
+		   "QMI Server Arrive Configuration Success\n");
+	return 0;
+
+err_qmi_config:
+	qmi_handle_destroy(qmi_cfg->wlfw_clnt);
+	qmi_cfg->wlfw_clnt = NULL;
+	return ret;
+}
+
+static int ath10k_snoc_driver_event_server_exit(struct ath10k *ar)
+{
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+	struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg;
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC, "QMI Server Exit event received\n");
+	atomic_set(&qmi_cfg->fw_ready, 0);
+	qmi_cfg->msa_ready = false;
+	atomic_set(&qmi_cfg->server_connected, 0);
+	qmi_handle_destroy(qmi_cfg->wlfw_clnt);
+	return 0;
+}
+
+static int ath10k_snoc_driver_event_fw_ready_ind(struct ath10k *ar)
+{
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+	struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg;
+	int ret;
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC, "FW Ready event received.\n");
+	atomic_set(&qmi_cfg->fw_ready, 1);
+	if (ar_snoc->drv_state == ATH10K_DRIVER_STATE_PROBED) {
+		ret = ath10k_core_register(ar,
+					   ar_snoc->target_info.soc_version);
+		if (ret) {
+			ath10k_err(ar,
+				   "failed to register driver core: %d\n",
+				   ret);
+			return 0;
+		}
+		ar_snoc->drv_state = ATH10K_DRIVER_STATE_STARTED;
+	}
+	wake_up_all(&ath10k_fw_ready_wait_event);
+
+	return 0;
+}
+
+static void ath10k_snoc_driver_event_work(struct work_struct *work)
+{
+	int ret;
+	unsigned long irq_flags;
+	struct ath10k_snoc_qmi_driver_event *event;
+	struct ath10k_snoc_qmi_config *qmi_cfg =
+		container_of(work, struct ath10k_snoc_qmi_config, event_work);
+	struct ath10k_snoc *ar_snoc =
+		container_of(qmi_cfg, struct ath10k_snoc, qmi_cfg);
+	struct ath10k *ar = ar_snoc->ar;
+
+	spin_lock_irqsave(&qmi_cfg->event_lock, irq_flags);
+
+	while (!list_empty(&qmi_cfg->event_list)) {
+		event = list_first_entry(&qmi_cfg->event_list,
+					 struct ath10k_snoc_qmi_driver_event,
+					 list);
+		list_del(&event->list);
+		spin_unlock_irqrestore(&qmi_cfg->event_lock, irq_flags);
+
+		ath10k_dbg(ar, ATH10K_DBG_SNOC, "Processing event: %s%s(%d)\n",
+			   ath10k_snoc_driver_event_to_str(event->type),
+			   event->sync ? "-sync" : "", event->type);
+
+		switch (event->type) {
+		case ATH10K_SNOC_DRIVER_EVENT_SERVER_ARRIVE:
+			ret = ath10k_snoc_driver_event_server_arrive(ar);
+			break;
+		case ATH10K_SNOC_DRIVER_EVENT_SERVER_EXIT:
+			ret = ath10k_snoc_driver_event_server_exit(ar);
+			break;
+		case ATH10K_SNOC_DRIVER_EVENT_FW_READY_IND:
+			ret = ath10k_snoc_driver_event_fw_ready_ind(ar);
+			break;
+		default:
+			ath10k_err(ar, "Invalid Event type: %d", event->type);
+			kfree(event);
+			continue;
+		}
+
+		atomic_set(&event->event_handled, 1);
+		ath10k_dbg(ar, ATH10K_DBG_SNOC,
+			   "Event Processed: %s%s(%d), ret: %d\n",
+			   ath10k_snoc_driver_event_to_str(event->type),
+			   event->sync ? "-sync" : "", event->type, ret);
+		spin_lock_irqsave(&qmi_cfg->event_lock, irq_flags);
+		if (event->sync) {
+			event->ret = ret;
+			complete(&event->complete);
+			continue;
+		}
+		spin_unlock_irqrestore(&qmi_cfg->event_lock, irq_flags);
+		spin_lock_irqsave(&qmi_cfg->event_lock, irq_flags);
+	}
+
+	spin_unlock_irqrestore(&qmi_cfg->event_lock, irq_flags);
+}
+
+static int
+ath10k_snoc_qmi_wlfw_clnt_svc_event_notify(struct notifier_block *this,
+					   unsigned long code,
+					   void *_cmd)
+{
+	int ret = 0;
+	struct ath10k_snoc_qmi_config *qmi_cfg =
+		container_of(this, struct ath10k_snoc_qmi_config, wlfw_clnt_nb);
+	struct ath10k_snoc *ar_snoc =
+			container_of(qmi_cfg, struct ath10k_snoc, qmi_cfg);
+	struct ath10k *ar = ar_snoc->ar;
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC, "Event Notify: code: %ld", code);
+
+	switch (code) {
+	case QMI_SERVER_ARRIVE:
+		ret = ath10k_snoc_driver_event_post(
+			ATH10K_SNOC_DRIVER_EVENT_SERVER_ARRIVE, 0, ar);
+		break;
+	case QMI_SERVER_EXIT:
+		ret = ath10k_snoc_driver_event_post(
+			ATH10K_SNOC_DRIVER_EVENT_SERVER_EXIT, 0, ar);
+		break;
+	default:
+		ath10k_err(ar, "Invalid code: %ld", code);
+		break;
+	}
+
+	return ret;
+}
+
+int ath10k_snoc_start_qmi_service(struct ath10k *ar)
+{
+	int ret;
+	int i;
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+	struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg;
+
+	qmi_cfg->event_wq = alloc_workqueue("ath10k_snoc_driver_event",
+					    WQ_UNBOUND, 1);
+	if (!qmi_cfg->event_wq) {
+		ath10k_err(ar, "Workqueue creation failed\n");
+		return -EFAULT;
+	}
+
+	spin_lock_init(&qmi_cfg->event_lock);
+	atomic_set(&qmi_cfg->fw_ready, 0);
+	atomic_set(&qmi_cfg->server_connected, 0);
+
+	INIT_WORK(&qmi_cfg->event_work, ath10k_snoc_driver_event_work);
+	INIT_WORK(&qmi_cfg->qmi_recv_msg_work,
+		  ath10k_snoc_qmi_wlfw_clnt_notify_work);
+	INIT_LIST_HEAD(&qmi_cfg->event_list);
+
+	for (i = 0; i < ATH10K_SNOC_DRIVER_EVENT_MAX; i++)
+		atomic_set(&qmi_cfg->qmi_ev_list[i].event_handled, 1);
+
+	qmi_cfg->wlfw_clnt_nb.notifier_call =
+		ath10k_snoc_qmi_wlfw_clnt_svc_event_notify;
+	ret = qmi_svc_event_notifier_register(WLFW_SERVICE_ID_V01,
+					      WLFW_SERVICE_VERS_V01,
+					      WLFW_SERVICE_INS_ID_V01,
+					      &qmi_cfg->wlfw_clnt_nb);
+	if (ret < 0) {
+		ath10k_err(ar, "Notifier register failed: %d\n", ret);
+		ret = -EFAULT;
+		goto out_destroy_wq;
+	}
+
+	if (icnss_is_fw_ready())
+		atomic_set(&qmi_cfg->fw_ready, 1);
+	else
+		ath10k_err(ar, "FW ready indication not received yet\n");
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC, "QMI service started successfully\n");
+	return 0;
+
+	qmi_svc_event_notifier_unregister(WLFW_SERVICE_ID_V01,
+					  WLFW_SERVICE_VERS_V01,
+					  WLFW_SERVICE_INS_ID_V01,
+					  &qmi_cfg->wlfw_clnt_nb);
+out_destroy_wq:
+	destroy_workqueue(qmi_cfg->event_wq);
+	return ret;
+}
+
+void ath10k_snoc_stop_qmi_service(struct ath10k *ar)
+{
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+	struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg;
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC, "Removing QMI service..\n");
+
+	wake_up_all(&ath10k_fw_ready_wait_event);
+	cancel_work_sync(&qmi_cfg->event_work);
+	cancel_work_sync(&qmi_cfg->qmi_recv_msg_work);
+	qmi_svc_event_notifier_unregister(WLFW_SERVICE_ID_V01,
+					  WLFW_SERVICE_VERS_V01,
+					  WLFW_SERVICE_INS_ID_V01,
+					  &qmi_cfg->wlfw_clnt_nb);
+	destroy_workqueue(qmi_cfg->event_wq);
+	qmi_handle_destroy(qmi_cfg->wlfw_clnt);
+	qmi_cfg = NULL;
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/qmi.h	2019-01-22 16:16:25.423263793 +0100
@@ -0,0 +1,156 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _QMI_H_
+#define _QMI_H_
+
+#define ATH10K_SNOC_EVENT_PENDING		2989
+#define ATH10K_SNOC_EVENT_SYNC			BIT(0)
+#define ATH10K_SNOC_EVENT_UNINTERRUPTIBLE	BIT(1)
+#define ATH10K_SNOC_WLAN_FW_READY_TIMEOUT	8000
+
+#define WLFW_SERVICE_INS_ID_V01		0
+#define WLFW_CLIENT_ID			0x41544851
+#define WLFW_TIMEOUT_MS			20000
+
+enum ath10k_snoc_driver_event_type {
+	ATH10K_SNOC_DRIVER_EVENT_SERVER_ARRIVE,
+	ATH10K_SNOC_DRIVER_EVENT_SERVER_EXIT,
+	ATH10K_SNOC_DRIVER_EVENT_FW_READY_IND,
+	ATH10K_SNOC_DRIVER_EVENT_MAX,
+};
+
+/* enum ath10k_driver_mode: ath10k driver mode
+ * @ATH10K_MISSION: mission mode
+ * @ATH10K_FTM: ftm mode
+ * @ATH10K_EPPING: epping mode
+ * @ATH10K_OFF: off mode
+ */
+enum ath10k_driver_mode {
+	ATH10K_MISSION,
+	ATH10K_FTM,
+	ATH10K_EPPING,
+	ATH10K_OFF
+};
+
+/* struct ath10k_ce_tgt_pipe_cfg: target pipe configuration
+ * @pipe_num: pipe number
+ * @pipe_dir: pipe direction
+ * @nentries: entries in pipe
+ * @nbytes_max: pipe max size
+ * @flags: pipe flags
+ * @reserved: reserved
+ */
+struct ath10k_ce_tgt_pipe_cfg {
+	u32 pipe_num;
+	u32 pipe_dir;
+	u32 nentries;
+	u32 nbytes_max;
+	u32 flags;
+	u32 reserved;
+};
+
+/* struct ath10k_ce_svc_pipe_cfg: service pipe configuration
+ * @service_id: target version
+ * @pipe_dir: pipe direction
+ * @pipe_num: pipe number
+ */
+struct ath10k_ce_svc_pipe_cfg {
+	u32 service_id;
+	u32 pipe_dir;
+	u32 pipe_num;
+};
+
+/* struct ath10k_shadow_reg_cfg: shadow register configuration
+ * @ce_id: copy engine id
+ * @reg_offset: offset to copy engine
+ */
+struct ath10k_shadow_reg_cfg {
+	u16 ce_id;
+	u16 reg_offset;
+};
+
+/* struct ath10k_wlan_enable_cfg: wlan enable configuration
+ * @num_ce_tgt_cfg: no of ce target configuration
+ * @ce_tgt_cfg: target ce configuration
+ * @num_ce_svc_pipe_cfg: no of ce service configuration
+ * @ce_svc_cfg: ce service configuration
+ * @num_shadow_reg_cfg: no of shadow registers
+ * @shadow_reg_cfg: shadow register configuration
+ */
+struct ath10k_wlan_enable_cfg {
+	u32 num_ce_tgt_cfg;
+	struct ath10k_ce_tgt_pipe_cfg *ce_tgt_cfg;
+	u32 num_ce_svc_pipe_cfg;
+	struct ath10k_ce_svc_pipe_cfg *ce_svc_cfg;
+	u32 num_shadow_reg_cfg;
+	struct ath10k_shadow_reg_cfg *shadow_reg_cfg;
+};
+
+/* struct ath10k_snoc_qmi_driver_event: qmi driver event
+ * event_handled: event handled by event work handler
+ * sync: event synced
+ * ret: event received return value
+ * list: list to queue qmi event for process
+ * type: driver event type
+ * complete: completion for event handle complete
+ * data: encapsulate driver data for event handler callback
+ */
+struct ath10k_snoc_qmi_driver_event {
+	atomic_t event_handled;
+	bool sync;
+	int ret;
+	struct list_head list;
+	enum ath10k_snoc_driver_event_type type;
+	struct completion complete;
+	void *data;
+};
+
+/* struct ath10k_snoc_qmi_config: qmi service configuration
+ * fw_ready: wlan firmware ready for wlan operation
+ * msa_ready: wlan firmware msa memory ready for board data download
+ * server_connected: qmi server connected
+ * event_work: QMI event work
+ * event_list: QMI event list
+ * qmi_recv_msg_work: QMI message receive work
+ * event_wq: QMI event work queue
+ * wlfw_clnt_nb: WLAN firmware indication callback
+ * wlfw_clnt: QMI notifier handler for wlan firmware
+ * qmi_ev_list: QMI event list
+ * event_lock: spinlock for qmi event work queue
+ */
+struct ath10k_snoc_qmi_config {
+	atomic_t fw_ready;
+	bool msa_ready;
+	atomic_t server_connected;
+	struct work_struct event_work;
+	struct list_head event_list;
+	struct work_struct qmi_recv_msg_work;
+	struct workqueue_struct *event_wq;
+	struct notifier_block wlfw_clnt_nb;
+	struct qmi_handle *wlfw_clnt;
+	struct ath10k_snoc_qmi_driver_event
+			qmi_ev_list[ATH10K_SNOC_DRIVER_EVENT_MAX];
+	spinlock_t event_lock; /* spinlock for qmi event work queue */
+};
+
+int ath10k_snoc_pd_restart_enable(struct ath10k *ar);
+int ath10k_snoc_modem_ssr_register_notifier(struct ath10k *ar);
+int ath10k_snoc_modem_ssr_unregister_notifier(struct ath10k *ar);
+int ath10k_snoc_pdr_unregister_notifier(struct ath10k *ar);
+int ath10k_snoc_start_qmi_service(struct ath10k *ar);
+void ath10k_snoc_stop_qmi_service(struct ath10k *ar);
+int ath10k_snoc_qmi_wlan_enable(struct ath10k *ar,
+				struct ath10k_wlan_enable_cfg *config,
+				enum ath10k_driver_mode mode,
+				const char *host_version);
+int ath10k_snoc_qmi_wlan_disable(struct ath10k *ar);
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/snoc.c	2019-01-22 16:16:25.423263793 +0100
@@ -0,0 +1,1819 @@
+/* Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/bitops.h>
+#include <linux/suspend.h>
+#include "core.h"
+#include "debug.h"
+#include "hif.h"
+#include "htc.h"
+#include "ce.h"
+#include "snoc.h"
+#include "qmi.h"
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+
+#define WCN3990_MAX_IRQ	12
+#define WCN3990_WAKE_IRQ_CE	2
+
+const char *ce_name[WCN3990_MAX_IRQ] = {
+	"WLAN_CE_0",
+	"WLAN_CE_1",
+	"WLAN_CE_2",
+	"WLAN_CE_3",
+	"WLAN_CE_4",
+	"WLAN_CE_5",
+	"WLAN_CE_6",
+	"WLAN_CE_7",
+	"WLAN_CE_8",
+	"WLAN_CE_9",
+	"WLAN_CE_10",
+	"WLAN_CE_11",
+};
+
+#define ATH10K_SNOC_TARGET_WAIT 3000
+#define ATH10K_SNOC_NUM_WARM_RESET_ATTEMPTS 3
+#define SNOC_HIF_POWER_DOWN_DELAY 30
+#define ATH10K_MAX_PROP_SIZE 32
+
+static void ath10k_snoc_buffer_cleanup(struct ath10k *ar);
+static int ath10k_snoc_request_irq(struct ath10k *ar);
+static void ath10k_snoc_free_irq(struct ath10k *ar);
+static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
+
+static struct ce_attr host_ce_config_wlan[] = {
+	/* CE0: host->target HTC control streams */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 16,
+		.src_sz_max = 2048,
+		.dest_nentries = 0,
+		.send_cb = ath10k_snoc_htc_tx_cb,
+	},
+
+	/* CE1: target->host HTT + HTC control */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 2048,
+		.dest_nentries = 512,
+		.recv_cb = ath10k_snoc_htt_htc_rx_cb,
+	},
+
+	/* CE2: target->host WMI */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 2048,
+		.dest_nentries = 64,
+		.recv_cb = ath10k_snoc_htc_rx_cb,
+	},
+
+	/* CE3: host->target WMI */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 32,
+		.src_sz_max = 2048,
+		.dest_nentries = 0,
+		.send_cb = ath10k_snoc_htc_tx_cb,
+	},
+
+	/* CE4: host->target HTT */
+	{
+		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+		.src_nentries = 2048,
+		.src_sz_max = 2048,
+		.dest_nentries = 0,
+		.send_cb = ath10k_snoc_htt_tx_cb,
+	},
+
+	/* CE5: target->host HTT (ipa_uc->target ) */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+		.recv_cb = ath10k_snoc_htt_rx_cb,
+	},
+
+	/* CE6: target autonomous hif_memcpy */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+	},
+
+	/* CE7: ce_diag, the Diagnostic Window */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 2,
+		.src_sz_max = 2048,
+		.dest_nentries = 2,
+	},
+
+	/* CE8: Target to uMC */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 2048,
+		.dest_nentries = 128,
+	},
+
+	/* CE9 target->host HTT */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 2048,
+		.dest_nentries = 512,
+		.recv_cb = ath10k_snoc_htt_htc_rx_cb,
+	},
+
+	/* CE10: target->host HTT */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 2048,
+		.dest_nentries = 512,
+		.recv_cb = ath10k_snoc_htt_htc_rx_cb,
+	},
+
+	/* CE11: target -> host PKTLOG */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 2048,
+		.dest_nentries = 512,
+		.recv_cb = ath10k_snoc_htt_htc_rx_cb,
+	},
+};
+
+static struct ce_pipe_config target_ce_config_wlan[] = {
+	/* CE0: host->target HTC control and raw streams */
+	{
+		.pipenum = __cpu_to_le32(0),
+		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
+		.nentries = __cpu_to_le32(32),
+		.nbytes_max = __cpu_to_le32(2048),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
+		.reserved = __cpu_to_le32(0),
+	},
+
+	/* CE1: target->host HTT + HTC control */
+	{
+		.pipenum = __cpu_to_le32(1),
+		.pipedir = __cpu_to_le32(PIPEDIR_IN),
+		.nentries = __cpu_to_le32(32),
+		.nbytes_max = __cpu_to_le32(2048),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
+		.reserved = __cpu_to_le32(0),
+	},
+
+	/* CE2: target->host WMI */
+	{
+		.pipenum = __cpu_to_le32(2),
+		.pipedir = __cpu_to_le32(PIPEDIR_IN),
+		.nentries = __cpu_to_le32(64),
+		.nbytes_max = __cpu_to_le32(2048),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
+		.reserved = __cpu_to_le32(0),
+	},
+
+	/* CE3: host->target WMI */
+	{
+		.pipenum = __cpu_to_le32(3),
+		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
+		.nentries = __cpu_to_le32(32),
+		.nbytes_max = __cpu_to_le32(2048),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
+		.reserved = __cpu_to_le32(0),
+	},
+
+	/* CE4: host->target HTT */
+	{
+		.pipenum = __cpu_to_le32(4),
+		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
+		.nentries = __cpu_to_le32(256),
+		.nbytes_max = __cpu_to_le32(256),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+		.reserved = __cpu_to_le32(0),
+	},
+
+	/* CE5: target->host HTT (HIF->HTT) */
+	{
+		.pipenum = __cpu_to_le32(5),
+		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
+		.nentries = __cpu_to_le32(1024),
+		.nbytes_max = __cpu_to_le32(64),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+		.reserved = __cpu_to_le32(0),
+	},
+
+	/* CE6: Reserved for target autonomous hif_memcpy */
+	{
+		.pipenum = __cpu_to_le32(6),
+		.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+		.nentries = __cpu_to_le32(32),
+		.nbytes_max = __cpu_to_le32(16384),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
+		.reserved = __cpu_to_le32(0),
+	},
+
+	/* CE7 used only by Host */
+	{
+		.pipenum = __cpu_to_le32(7),
+		.pipedir = __cpu_to_le32(4),
+		.nentries = __cpu_to_le32(0),
+		.nbytes_max = __cpu_to_le32(0),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+		.reserved = __cpu_to_le32(0),
+	},
+
+	/* CE8 Target to uMC */
+	{
+		.pipenum = __cpu_to_le32(8),
+		.pipedir = __cpu_to_le32(PIPEDIR_IN),
+		.nentries = __cpu_to_le32(32),
+		.nbytes_max = __cpu_to_le32(2048),
+		.flags = __cpu_to_le32(0),
+		.reserved = __cpu_to_le32(0),
+	},
+
+	/* CE9 target->host HTT */
+	{
+		.pipenum = __cpu_to_le32(9),
+		.pipedir = __cpu_to_le32(PIPEDIR_IN),
+		.nentries = __cpu_to_le32(32),
+		.nbytes_max = __cpu_to_le32(2048),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
+		.reserved = __cpu_to_le32(0),
+	},
+
+	/* CE10 target->host HTT */
+	{
+		.pipenum = __cpu_to_le32(10),
+		.pipedir = __cpu_to_le32(PIPEDIR_IN),
+		.nentries = __cpu_to_le32(32),
+		.nbytes_max = __cpu_to_le32(2048),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
+		.reserved = __cpu_to_le32(0),
+	},
+
+	/* CE11 target autonomous qcache memcpy */
+	{
+		.pipenum = __cpu_to_le32(11),
+		.pipedir = __cpu_to_le32(PIPEDIR_IN),
+		.nentries = __cpu_to_le32(32),
+		.nbytes_max = __cpu_to_le32(2048),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
+		.reserved = __cpu_to_le32(0),
+	},
+};
+
+static struct service_to_pipe target_service_to_ce_map_wlan[] = {
+	{
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
+		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
+		__cpu_to_le32(3),
+	},
+	{
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
+		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
+		__cpu_to_le32(2),
+	},
+	{
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
+		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
+		__cpu_to_le32(3),
+	},
+	{
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
+		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
+		__cpu_to_le32(2),
+	},
+	{
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
+		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
+		__cpu_to_le32(3),
+	},
+	{
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
+		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
+		__cpu_to_le32(2),
+	},
+	{
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
+		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
+		__cpu_to_le32(3),
+	},
+	{
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
+		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
+		__cpu_to_le32(2),
+	},
+	{
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
+		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
+		__cpu_to_le32(3),
+	},
+	{
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
+		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
+		__cpu_to_le32(2),
+	},
+	{
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
+		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
+		__cpu_to_le32(0),
+	},
+	{
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
+		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
+		__cpu_to_le32(2),
+	},
+	{ /* not used */
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
+		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
+		__cpu_to_le32(0),
+	},
+	{ /* not used */
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
+		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
+		__cpu_to_le32(2),
+	},
+	{
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
+		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
+		__cpu_to_le32(4),
+	},
+	{
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
+		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
+		__cpu_to_le32(1),
+	},
+	{
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_IPA_MSG),
+		__cpu_to_le32(PIPEDIR_OUT),/* IPA service */
+		__cpu_to_le32(5),
+	},
+	{ /* in = DL = target -> host */
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA2_MSG),
+		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
+		__cpu_to_le32(9),
+	},
+	{ /* in = DL = target -> host */
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA3_MSG),
+		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
+		__cpu_to_le32(10),
+	},
+	{ /* in = DL = target -> host pktlog */
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_LOG_MSG),
+		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
+		__cpu_to_le32(11),
+	},
+	/* (Additions here) */
+
+	{ /* must be last */
+		__cpu_to_le32(0),
+		__cpu_to_le32(0),
+		__cpu_to_le32(0),
+	},
+};
+
+#define WCN3990_SRC_WR_INDEX_OFFSET 0x3C
+#define WCN3990_DST_WR_INDEX_OFFSET 0x40
+
+static struct ath10k_shadow_reg_cfg target_shadow_reg_cfg_map[] = {
+		{ 0, WCN3990_SRC_WR_INDEX_OFFSET},
+		{ 3, WCN3990_SRC_WR_INDEX_OFFSET},
+		{ 4, WCN3990_SRC_WR_INDEX_OFFSET},
+		{ 5, WCN3990_SRC_WR_INDEX_OFFSET},
+		{ 7, WCN3990_SRC_WR_INDEX_OFFSET},
+		{ 1, WCN3990_DST_WR_INDEX_OFFSET},
+		{ 2, WCN3990_DST_WR_INDEX_OFFSET},
+		{ 7, WCN3990_DST_WR_INDEX_OFFSET},
+		{ 8, WCN3990_DST_WR_INDEX_OFFSET},
+		{ 9, WCN3990_DST_WR_INDEX_OFFSET},
+		{ 10, WCN3990_DST_WR_INDEX_OFFSET},
+		{ 11, WCN3990_DST_WR_INDEX_OFFSET},
+};
+
+static bool ath10k_snoc_has_fw_crashed(struct ath10k *ar)
+{
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+	return atomic_read(&ar_snoc->fw_crashed);
+}
+
+static void ath10k_snoc_fw_crashed_clear(struct ath10k *ar)
+{
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+	atomic_set(&ar_snoc->fw_crashed, 0);
+}
+
+void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value)
+{
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+	if (!ar_snoc)
+		return;
+
+	iowrite32(value, ar_snoc->mem + offset);
+}
+
+u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset)
+{
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+	u32 val;
+
+	if (!ar_snoc)
+		return -EINVAL;
+
+	val = ioread32(ar_snoc->mem + offset);
+
+	return val;
+}
+
+static int __ath10k_snoc_rx_post_buf(struct ath10k_snoc_pipe *pipe)
+{
+	struct ath10k *ar = pipe->hif_ce_state;
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+	struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
+	struct sk_buff *skb;
+	dma_addr_t paddr;
+	int ret;
+
+	skb = dev_alloc_skb(pipe->buf_sz);
+	if (!skb)
+		return -ENOMEM;
+
+	WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
+
+	paddr = dma_map_single(ar->dev, skb->data,
+			       skb->len + skb_tailroom(skb),
+			       DMA_FROM_DEVICE);
+	if (unlikely(dma_mapping_error(ar->dev, paddr))) {
+		ath10k_warn(ar, "failed to dma map snoc rx buf\n");
+		dev_kfree_skb_any(skb);
+		return -EIO;
+	}
+
+	ATH10K_SKB_RXCB(skb)->paddr = paddr;
+
+	spin_lock_bh(&ar_snoc->opaque_ctx.ce_lock);
+	ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
+	spin_unlock_bh(&ar_snoc->opaque_ctx.ce_lock);
+	if (ret) {
+		dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
+				 DMA_FROM_DEVICE);
+		dev_kfree_skb_any(skb);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void ath10k_snoc_rx_post_pipe(struct ath10k_snoc_pipe *pipe)
+{
+	struct ath10k *ar = pipe->hif_ce_state;
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+	struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
+	int ret, num;
+
+	if (pipe->buf_sz == 0)
+		return;
+
+	if (!ce_pipe->dest_ring)
+		return;
+
+	spin_lock_bh(&ar_snoc->opaque_ctx.ce_lock);
+	num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
+	spin_unlock_bh(&ar_snoc->opaque_ctx.ce_lock);
+	while (num--) {
+		ret = __ath10k_snoc_rx_post_buf(pipe);
+		if (ret) {
+			if (ret == -ENOSPC)
+				break;
+			ath10k_warn(ar, "failed to post rx buf: %d\n", ret);
+			mod_timer(&ar_snoc->rx_post_retry, jiffies +
+				  ATH10K_SNOC_RX_POST_RETRY_MS);
+			break;
+		}
+	}
+}
+
+static void ath10k_snoc_rx_post(struct ath10k *ar)
+{
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+	int i;
+
+	for (i = 0; i < CE_COUNT; i++)
+		ath10k_snoc_rx_post_pipe(&ar_snoc->pipe_info[i]);
+}
+
+static void ath10k_snoc_rx_replenish_retry(unsigned long ptr)
+{
+	struct ath10k *ar = (void *)ptr;
+	ath10k_snoc_rx_post(ar);
+}
+
+static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
+{
+	struct ath10k *ar = ce_state->ar;
+	struct sk_buff_head list;
+	struct sk_buff *skb;
+
+	__skb_queue_head_init(&list);
+	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
+		if (!skb)
+			continue;
+
+		__skb_queue_tail(&list, skb);
+	}
+
+	while ((skb = __skb_dequeue(&list)))
+		ath10k_htc_tx_completion_handler(ar, skb);
+}
+
+static void ath10k_snoc_process_rx_cb(struct ath10k_ce_pipe *ce_state,
+				      void (*callback)(struct ath10k *ar,
+						       struct sk_buff *skb))
+{
+	struct ath10k *ar = ce_state->ar;
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+	struct ath10k_snoc_pipe *pipe_info =  &ar_snoc->pipe_info[ce_state->id];
+	struct sk_buff *skb;
+	struct sk_buff_head list;
+	void *transfer_context;
+	unsigned int nbytes, max_nbytes;
+
+	__skb_queue_head_init(&list);
+	while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
+					     &nbytes) == 0) {
+		skb = transfer_context;
+		max_nbytes = skb->len + skb_tailroom(skb);
+		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+				 max_nbytes, DMA_FROM_DEVICE);
+
+		if (unlikely(max_nbytes < nbytes)) {
+			ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
+				    nbytes, max_nbytes);
+			dev_kfree_skb_any(skb);
+			continue;
+		}
+
+		skb_put(skb, nbytes);
+		__skb_queue_tail(&list, skb);
+	}
+
+	while ((skb = __skb_dequeue(&list))) {
+		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc rx ce pipe %d len %d\n",
+			   ce_state->id, skb->len);
+
+		callback(ar, skb);
+	}
+
+	ath10k_snoc_rx_post_pipe(pipe_info);
+}
+
+static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+	ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
+}
+
+static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+	/* CE4 polling needs to be done whenever CE pipe which transports
+	 * HTT Rx (target->host) is processed.
+	 */
+	ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
+
+	ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
+}
+
+static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
+{
+	struct ath10k *ar = ce_state->ar;
+	struct sk_buff *skb;
+
+	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
+		if (!skb)
+			continue;
+
+		dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
+				 skb->len, DMA_TO_DEVICE);
+		ath10k_htt_hif_tx_complete(ar, skb);
+	}
+}
+
+static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
+{
+	skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+	ath10k_htt_t2h_msg_handler(ar, skb);
+}
+
+static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+	ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
+	ath10k_snoc_process_rx_cb(ce_state, ath10k_snoc_htt_rx_deliver);
+}
+
+static int ath10k_snoc_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+				 struct ath10k_hif_sg_item *items, int n_items)
+{
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+	struct ath10k_snoc_pipe *snoc_pipe;
+	struct ath10k_ce_pipe *ce_pipe;
+	int err, i = 0;
+
+	if (!ar_snoc)
+		return  -EINVAL;
+
+	if (atomic_read(&ar_snoc->fw_crashed))
+		return -ESHUTDOWN;
+
+	snoc_pipe = &ar_snoc->pipe_info[pipe_id];
+	ce_pipe = snoc_pipe->ce_hdl;
+	spin_lock_bh(&ar_snoc->opaque_ctx.ce_lock);
+
+	for (i = 0; i < n_items - 1; i++) {
+		ath10k_dbg(ar, ATH10K_DBG_SNOC,
+			   "snoc tx item %d paddr %pad len %d n_items %d\n",
+			   i, &items[i].paddr, items[i].len, n_items);
+
+		if (ath10k_snoc_has_fw_crashed(ar))
+			return  -EINVAL;
+
+		err = ath10k_ce_send_nolock(ce_pipe,
+					    items[i].transfer_context,
+					    items[i].paddr,
+					    items[i].len,
+					    items[i].transfer_id,
+					    CE_SEND_FLAG_GATHER);
+		if (err)
+			goto err;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC,
+		   "snoc tx item %d paddr %pad len %d n_items %d\n",
+		   i, &items[i].paddr, items[i].len, n_items);
+
+	err = ath10k_ce_send_nolock(ce_pipe,
+				    items[i].transfer_context,
+				    items[i].paddr,
+				    items[i].len,
+				    items[i].transfer_id,
+				    0);
+	if (err)
+		goto err;
+
+	spin_unlock_bh(&ar_snoc->opaque_ctx.ce_lock);
+	return 0;
+
+err:
+	for (; i > 0; i--)
+		__ath10k_ce_send_revert(ce_pipe);
+
+	spin_unlock_bh(&ar_snoc->opaque_ctx.ce_lock);
+	return err;
+}
+
+static u16 ath10k_snoc_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
+{
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC, "hif get free queue number\n");
+
+	return ath10k_ce_num_free_src_entries(ar_snoc->pipe_info[pipe].ce_hdl);
+}
+
+static void ath10k_snoc_hif_send_complete_check(struct ath10k *ar, u8 pipe,
+						int force)
+{
+	int resources;
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif send complete check\n");
+
+	if (!force) {
+		resources = ath10k_snoc_hif_get_free_queue_number(ar, pipe);
+
+		if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
+			return;
+	}
+	ath10k_ce_per_engine_service(ar, pipe);
+}
+
+static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
+					       u16 service_id,
+					       u8 *ul_pipe, u8 *dl_pipe)
+{
+	const struct service_to_pipe *entry;
+	bool ul_set = false, dl_set = false;
+	int i;
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif map service\n");
+
+	for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
+		entry = &target_service_to_ce_map_wlan[i];
+
+		if (__le32_to_cpu(entry->service_id) != service_id)
+			continue;
+
+		switch (__le32_to_cpu(entry->pipedir)) {
+		case PIPEDIR_NONE:
+			break;
+		case PIPEDIR_IN:
+			WARN_ON(dl_set);
+			*dl_pipe = __le32_to_cpu(entry->pipenum);
+			dl_set = true;
+			break;
+		case PIPEDIR_OUT:
+			WARN_ON(ul_set);
+			*ul_pipe = __le32_to_cpu(entry->pipenum);
+			ul_set = true;
+			break;
+		case PIPEDIR_INOUT:
+			WARN_ON(dl_set);
+			WARN_ON(ul_set);
+			*dl_pipe = __le32_to_cpu(entry->pipenum);
+			*ul_pipe = __le32_to_cpu(entry->pipenum);
+			dl_set = true;
+			ul_set = true;
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar,
+					     u8 *ul_pipe, u8 *dl_pipe)
+{
+	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif get default pipe\n");
+
+	(void)ath10k_snoc_hif_map_service_to_pipe(ar,
+						 ATH10K_HTC_SVC_ID_RSVD_CTRL,
+						 ul_pipe, dl_pipe);
+}
+
+static void ath10k_snoc_irq_disable(struct ath10k *ar)
+{
+	ath10k_ce_disable_interrupts(ar);
+}
+
+static void ath10k_snoc_irq_enable(struct ath10k *ar)
+{
+	ath10k_ce_enable_interrupts(ar);
+}
+
+static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
+{
+	struct ath10k *ar;
+	struct ath10k_ce_pipe *ce_pipe;
+	struct ath10k_ce_ring *ce_ring;
+	struct sk_buff *skb;
+	int i;
+
+	ar = snoc_pipe->hif_ce_state;
+	ce_pipe = snoc_pipe->ce_hdl;
+	ce_ring = ce_pipe->dest_ring;
+
+	if (!ce_ring)
+		return;
+
+	if (!snoc_pipe->buf_sz)
+		return;
+
+	for (i = 0; i < ce_ring->nentries; i++) {
+		skb = ce_ring->per_transfer_context[i];
+		if (!skb)
+			continue;
+
+		ce_ring->per_transfer_context[i] = NULL;
+
+		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+				 skb->len + skb_tailroom(skb),
+				 DMA_FROM_DEVICE);
+		dev_kfree_skb_any(skb);
+	}
+}
+
+static void ath10k_snoc_tx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
+{
+	struct ath10k *ar;
+	struct ath10k_snoc *ar_snoc;
+	struct ath10k_ce_pipe *ce_pipe;
+	struct ath10k_ce_ring *ce_ring;
+	struct sk_buff *skb;
+	int i;
+
+	ar = snoc_pipe->hif_ce_state;
+	ar_snoc = ath10k_snoc_priv(ar);
+	ce_pipe = snoc_pipe->ce_hdl;
+	ce_ring = ce_pipe->src_ring;
+
+	if (!ce_ring)
+		return;
+
+	if (!snoc_pipe->buf_sz)
+		return;
+
+	for (i = 0; i < ce_ring->nentries; i++) {
+		skb = ce_ring->per_transfer_context[i];
+		if (!skb)
+			continue;
+
+		ce_ring->per_transfer_context[i] = NULL;
+
+		ath10k_htc_tx_completion_handler(ar, skb);
+	}
+}
+
+static void ath10k_snoc_buffer_cleanup(struct ath10k *ar)
+{
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+	int pipe_num;
+
+	del_timer_sync(&ar_snoc->rx_post_retry);
+	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
+		struct ath10k_snoc_pipe *pipe_info;
+
+		pipe_info = &ar_snoc->pipe_info[pipe_num];
+		ath10k_snoc_rx_pipe_cleanup(pipe_info);
+		ath10k_snoc_tx_pipe_cleanup(pipe_info);
+	}
+}
+
+static void ath10k_snoc_flush(struct ath10k *ar)
+{
+	ath10k_snoc_buffer_cleanup(ar);
+}
+
+static void ath10k_snoc_hif_stop(struct ath10k *ar)
+{
+	if (!ar)
+		return;
+	if (ath10k_snoc_has_fw_crashed(ar) ||
+	    test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) {
+		ath10k_snoc_free_irq(ar);
+	} else {
+		ath10k_snoc_irq_disable(ar);
+	}
+
+	ath10k_snoc_flush(ar);
+	napi_synchronize(&ar->napi);
+	napi_disable(&ar->napi);
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
+}
+
+static int ath10k_snoc_alloc_pipes(struct ath10k *ar)
+{
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+	struct ath10k_snoc_pipe *pipe;
+	int i, ret;
+
+	for (i = 0; i < CE_COUNT; i++) {
+		pipe = &ar_snoc->pipe_info[i];
+		pipe->ce_hdl = &ar_snoc->opaque_ctx.ce_states[i];
+		pipe->pipe_num = i;
+		pipe->hif_ce_state = ar;
+
+		ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
+		if (ret) {
+			ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
+				   i, ret);
+			return ret;
+		}
+
+		pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
+	}
+
+	return 0;
+}
+
+static void ath10k_snoc_free_pipes(struct ath10k *ar)
+{
+	int i;
+
+	for (i = 0; i < CE_COUNT; i++)
+		ath10k_ce_free_pipe(ar, i);
+}
+
+static void ath10k_snoc_release_resource(struct ath10k *ar)
+{
+	netif_napi_del(&ar->napi);
+	ath10k_snoc_free_pipes(ar);
+}
+
+static int ath10k_snoc_init_pipes(struct ath10k *ar)
+{
+	int i, ret;
+
+	for (i = 0; i < CE_COUNT; i++) {
+		ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
+		if (ret) {
+			ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
+				   i, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void ath10k_snoc_hif_power_down(struct ath10k *ar)
+{
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
+	msleep(SNOC_HIF_POWER_DOWN_DELAY);
+
+	if (!atomic_read(&ar_snoc->pm_ops_inprogress))
+		ath10k_snoc_qmi_wlan_disable(ar);
+
+	ce_remove_rri_on_ddr(ar);
+}
+
+int ath10k_snoc_get_ce_id(struct ath10k *ar, int irq)
+{
+	int i;
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+	for (i = 0; i < CE_COUNT_MAX; i++) {
+		if (ar_snoc->ce_irqs[i].irq_line == irq)
+		return i;
+	}
+	ath10k_err(ar, "No matching CE id for irq %d\n", irq);
+
+	return -EINVAL;
+}
+
+static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg)
+{
+	struct ath10k *ar = arg;
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+	int ce_id = ath10k_snoc_get_ce_id(ar, irq);
+
+	if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_snoc->pipe_info)) {
+		ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
+			    ce_id);
+		return IRQ_HANDLED;
+	}
+
+	ath10k_snoc_irq_disable(ar);
+	napi_schedule(&ar->napi);
+
+	return IRQ_HANDLED;
+}
+
+static int ath10k_snoc_request_irq(struct ath10k *ar)
+{
+	int ret, id;
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+	int irqflags = IRQF_TRIGGER_RISING;
+
+	for (id = 0; id < CE_COUNT_MAX; id++) {
+		ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
+				  ath10k_snoc_per_engine_handler,
+				  irqflags, ce_name[id], ar);
+		if (ret) {
+			ath10k_err(ar,
+				   "%s: cannot register CE %d irq handler, ret = %d",
+				   __func__, id, ret);
+			atomic_set(&ar_snoc->ce_irqs[id].irq_req_stat, 0);
+			return ret;
+		} else {
+			 atomic_set(&ar_snoc->ce_irqs[id].irq_req_stat, 1);
+		}
+	}
+
+	return 0;
+}
+
+static void ath10k_snoc_free_irq(struct ath10k *ar)
+{
+	int id;
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+	for (id = 0; id < CE_COUNT_MAX; id++) {
+		if (atomic_read(&ar_snoc->ce_irqs[id].irq_req_stat)) {
+			free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
+			atomic_set(&ar_snoc->ce_irqs[id].irq_req_stat, 0);
+		}
+	}
+}
+
+static int ath10k_snoc_get_soc_info(struct ath10k *ar)
+{
+	struct resource *res;
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+	struct platform_device *pdev;
+
+	pdev = ar_snoc->dev;
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase");
+	if (!res) {
+		ath10k_err(ar, "Memory base not found in DT\n");
+		return -EINVAL;
+	}
+
+	ar_snoc->mem_pa = res->start;
+	ar_snoc->mem = devm_ioremap(&pdev->dev, ar_snoc->mem_pa,
+							    resource_size(res));
+	if (!ar_snoc->mem) {
+		ath10k_err(ar, "Memory base ioremap failed: phy addr: %pa\n",
+			   &ar_snoc->mem_pa);
+		return -EINVAL;
+	}
+
+	ar_snoc->target_info.soc_version = ATH10K_HW_WCN3990;
+	ar_snoc->target_info.target_version = ATH10K_HW_WCN3990;
+	ar_snoc->target_info.target_revision = 0;
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC,
+		   "%s: mem = %pS mem_pa = %pad soc ver=%x tgt ver=%x\n",
+		   __func__, ar_snoc->mem, &ar_snoc->mem_pa,
+		   ar_snoc->target_info.soc_version,
+		   ar_snoc->target_info.target_version);
+
+	return 0;
+}
+
+static int ath10k_snoc_wlan_enable(struct ath10k *ar)
+{
+	struct ath10k_wlan_enable_cfg cfg;
+	enum ath10k_driver_mode mode;
+	int pipe_num;
+	struct ath10k_ce_tgt_pipe_cfg tgt_cfg[CE_COUNT_MAX];
+
+	for (pipe_num = 0; pipe_num < CE_COUNT_MAX; pipe_num++) {
+		tgt_cfg[pipe_num].pipe_num =
+				target_ce_config_wlan[pipe_num].pipenum;
+		tgt_cfg[pipe_num].pipe_dir =
+				target_ce_config_wlan[pipe_num].pipedir;
+		tgt_cfg[pipe_num].nentries =
+				target_ce_config_wlan[pipe_num].nentries;
+		tgt_cfg[pipe_num].nbytes_max =
+				target_ce_config_wlan[pipe_num].nbytes_max;
+		tgt_cfg[pipe_num].flags =
+				target_ce_config_wlan[pipe_num].flags;
+		tgt_cfg[pipe_num].reserved = 0;
+	}
+
+	cfg.num_ce_tgt_cfg = sizeof(target_ce_config_wlan) /
+				sizeof(struct ath10k_ce_tgt_pipe_cfg);
+	cfg.ce_tgt_cfg = (struct ath10k_ce_tgt_pipe_cfg *)
+		&tgt_cfg;
+	cfg.num_ce_svc_pipe_cfg = sizeof(target_service_to_ce_map_wlan) /
+				  sizeof(struct ath10k_ce_svc_pipe_cfg);
+	cfg.ce_svc_cfg = (struct ath10k_ce_svc_pipe_cfg *)
+		&target_service_to_ce_map_wlan;
+	cfg.num_shadow_reg_cfg = sizeof(target_shadow_reg_cfg_map) /
+					sizeof(struct ath10k_shadow_reg_cfg);
+	cfg.shadow_reg_cfg = (struct ath10k_shadow_reg_cfg *)
+		&target_shadow_reg_cfg_map;
+
+	mode = ar->testmode.utf_monitor ? ATH10K_FTM : ATH10K_MISSION;
+	return ath10k_snoc_qmi_wlan_enable(ar, &cfg, mode,
+					   "5.1.0.26N");
+}
+
+static int ath10k_snoc_bus_configure(struct ath10k *ar)
+{
+	int ret;
+
+	ret = ath10k_snoc_wlan_enable(ar);
+	if (ret < 0) {
+		ath10k_err(ar, "%s: ath10k_snoc_bus_configure error = %d",
+			   __func__, ret);
+		return ret;
+	}
+
+	ce_config_rri_on_ddr(ar);
+
+	return 0;
+}
+
+static int ath10k_snoc_hif_start(struct ath10k *ar)
+{
+	if (ath10k_snoc_has_fw_crashed(ar)) {
+		ath10k_snoc_request_irq(ar);
+		ath10k_snoc_fw_crashed_clear(ar);
+	}
+	ath10k_snoc_irq_enable(ar);
+	ath10k_snoc_rx_post(ar);
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
+	return 0;
+}
+
+static int ath10k_snoc_claim(struct ath10k *ar)
+{
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+	ath10k_snoc_get_soc_info(ar);
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot snoc_mem 0x%p\n", ar_snoc->mem);
+
+	return 0;
+}
+
+static int ath10k_snoc_hif_power_up(struct ath10k *ar)
+{
+	int ret;
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 driver state = %d\n",
+		   __func__, ar->state);
+
+	if (atomic_read(&ar_snoc->pm_ops_inprogress)) {
+		ath10k_dbg(ar, ATH10K_DBG_SNOC,
+			   "%s: WLAN OFF CMD Reset on PM Resume\n", __func__);
+		ath10k_snoc_qmi_wlan_disable(ar);
+		atomic_set(&ar_snoc->pm_ops_inprogress, 0);
+	}
+
+	ret = ath10k_snoc_bus_configure(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to configure bus: %d\n", ret);
+		return ret;
+	}
+
+	ret = ath10k_snoc_init_pipes(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to initialize CE: %d\n", ret);
+		goto err_sleep;
+	}
+
+	napi_enable(&ar->napi);
+	return 0;
+
+err_sleep:
+	return ret;
+}
+
+static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
+{
+	struct ath10k *ar = container_of(ctx, struct ath10k, napi);
+	int done = 0;
+
+	if (ath10k_snoc_has_fw_crashed(ar)) {
+		napi_complete(ctx);
+		return done;
+	}
+	ath10k_ce_per_engine_service_any(ar);
+
+	done = ath10k_htt_txrx_compl_task(ar, budget);
+
+	if (done < budget) {
+		napi_complete(ctx);
+		ath10k_snoc_irq_enable(ar);
+	}
+
+	return done;
+}
+
+static int ath10k_snoc_resource_init(struct ath10k *ar)
+{
+	int i, ret = 0;
+	struct resource *res;
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+	for (i = 0; i < CE_COUNT; i++) {
+		res = platform_get_resource(ar_snoc->dev, IORESOURCE_IRQ, i);
+		if (!res) {
+			ath10k_err(ar, "Fail to get IRQ-%d\n", i);
+			ret = -ENODEV;
+			goto out;
+		} else {
+			ar_snoc->ce_irqs[i].irq_line = res->start;
+		}
+	}
+
+out:
+	return ret;
+}
+
+static
+int ath10k_snoc_pm_notifier(struct notifier_block *nb,
+			    unsigned long pm_event, void *data)
+{
+	struct ath10k_snoc *ar_snoc =
+			container_of(nb, struct ath10k_snoc, pm_notifier);
+	struct ath10k *ar = ar_snoc->ar;
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC,
+		   "%s: PM Event: %lu\n", __func__, pm_event);
+
+	switch (pm_event) {
+	case PM_HIBERNATION_PREPARE:
+	case PM_SUSPEND_PREPARE:
+	case PM_POST_HIBERNATION:
+	case PM_POST_SUSPEND:
+	case PM_RESTORE_PREPARE:
+	case PM_POST_RESTORE:
+		atomic_set(&ar_snoc->pm_ops_inprogress, 1);
+		break;
+	default:
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static int ath10k_get_vreg_info(struct ath10k *ar, struct device *dev,
+				struct ath10k_wcn3990_vreg_info *vreg_info)
+{
+	int ret = 0;
+	char prop_name[ATH10K_MAX_PROP_SIZE];
+	struct regulator *reg;
+	const __be32 *prop;
+	int len = 0;
+	int i;
+
+	reg = devm_regulator_get_optional(dev, vreg_info->name);
+	if (PTR_ERR(reg) == -EPROBE_DEFER) {
+		ath10k_err(ar, "EPROBE_DEFER for regulator: %s\n",
+			   vreg_info->name);
+		ret = PTR_ERR(reg);
+		goto out;
+	}
+
+	if (IS_ERR(reg)) {
+		ret = PTR_ERR(reg);
+
+		if (vreg_info->required) {
+			ath10k_err(ar, "Regulator %s doesn't exist: %d\n",
+				   vreg_info->name, ret);
+			goto out;
+		} else {
+			ath10k_dbg(ar, ATH10K_DBG_SNOC,
+				   "Optional regulator %s doesn't exist: %d\n",
+				   vreg_info->name, ret);
+			goto done;
+		}
+	}
+
+	vreg_info->reg = reg;
+
+	snprintf(prop_name, ATH10K_MAX_PROP_SIZE,
+		 "qcom,%s-config", vreg_info->name);
+
+	prop = of_get_property(dev->of_node, prop_name, &len);
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC, "Got regulator cfg,prop: %s, len: %d\n",
+		   prop_name, len);
+
+	if (!prop || len < (2 * sizeof(__be32))) {
+		ath10k_dbg(ar, ATH10K_DBG_SNOC, "Property %s %s\n", prop_name,
+			   prop ? "invalid format" : "doesn't exist");
+		goto done;
+	}
+
+	for (i = 0; (i * sizeof(__be32)) < len; i++) {
+		switch (i) {
+		case 0:
+			vreg_info->min_v = be32_to_cpup(&prop[0]);
+			break;
+		case 1:
+			vreg_info->max_v = be32_to_cpup(&prop[1]);
+			break;
+		case 2:
+			vreg_info->load_ua = be32_to_cpup(&prop[2]);
+			break;
+		case 3:
+			vreg_info->settle_delay = be32_to_cpup(&prop[3]);
+			break;
+		default:
+			ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s, ignoring val %d\n",
+				   prop_name, i);
+			break;
+		}
+	}
+
+done:
+	ath10k_dbg(ar, ATH10K_DBG_SNOC,
+		   "vreg: %s, min_v: %u, max_v: %u, load: %u, delay: %lu\n",
+		   vreg_info->name, vreg_info->min_v, vreg_info->max_v,
+		   vreg_info->load_ua, vreg_info->settle_delay);
+
+	return 0;
+
+out:
+	return ret;
+}
+
+static int ath10k_get_clk_info(struct ath10k *ar, struct device *dev,
+			       struct ath10k_wcn3990_clk_info *clk_info)
+{
+	struct clk *handle;
+	int ret = 0;
+
+	handle = devm_clk_get(dev, clk_info->name);
+	if (IS_ERR(handle)) {
+		ret = PTR_ERR(handle);
+		if (clk_info->required) {
+			ath10k_err(ar, "Clock %s isn't available: %d\n",
+				   clk_info->name, ret);
+			goto out;
+		} else {
+			ath10k_dbg(ar, ATH10K_DBG_SNOC, "Ignoring clk %s: %d\n",
+				   clk_info->name,
+				   ret);
+			ret = 0;
+			goto out;
+		}
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC, "Clock: %s, freq: %u\n",
+		   clk_info->name, clk_info->freq);
+
+	clk_info->handle = handle;
+out:
+	return ret;
+}
+
+static int ath10k_wcn3990_vreg_on(struct ath10k *ar)
+{
+	int ret = 0;
+	struct ath10k_wcn3990_vreg_info *vreg_info;
+	int i;
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+	for (i = 0; i < ATH10K_WCN3990_VREG_INFO_SIZE; i++) {
+		vreg_info = &ar_snoc->vreg[i];
+
+		if (!vreg_info->reg)
+			continue;
+
+		ath10k_dbg(ar, ATH10K_DBG_SNOC, "Regulator %s being enabled\n",
+			   vreg_info->name);
+
+		ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v,
+					    vreg_info->max_v);
+		if (ret) {
+			ath10k_err(ar,
+				   "vreg %s, set failed:min:%u,max:%u,ret: %d\n",
+				   vreg_info->name, vreg_info->min_v,
+				   vreg_info->max_v, ret);
+			break;
+		}
+
+		if (vreg_info->load_ua) {
+			ret = regulator_set_load(vreg_info->reg,
+						 vreg_info->load_ua);
+			if (ret < 0) {
+				ath10k_err(ar,
+					   "Reg %s, can't set load:%u,ret: %d\n",
+					   vreg_info->name,
+					   vreg_info->load_ua, ret);
+				break;
+			}
+		}
+
+		ret = regulator_enable(vreg_info->reg);
+		if (ret) {
+			ath10k_err(ar, "Regulator %s, can't enable: %d\n",
+				   vreg_info->name, ret);
+			break;
+		}
+
+		if (vreg_info->settle_delay)
+			udelay(vreg_info->settle_delay);
+	}
+
+	if (!ret)
+		return 0;
+
+	for (; i >= 0; i--) {
+		vreg_info = &ar_snoc->vreg[i];
+
+		if (!vreg_info->reg)
+			continue;
+
+		regulator_disable(vreg_info->reg);
+		regulator_set_load(vreg_info->reg, 0);
+		regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
+	}
+
+	return ret;
+}
+
+static int ath10k_wcn3990_vreg_off(struct ath10k *ar)
+{
+	int ret = 0;
+	struct ath10k_wcn3990_vreg_info *vreg_info;
+	int i;
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+	for (i = ATH10K_WCN3990_VREG_INFO_SIZE - 1; i >= 0; i--) {
+		vreg_info = &ar_snoc->vreg[i];
+
+		if (!vreg_info->reg)
+			continue;
+
+		ath10k_dbg(ar, ATH10K_DBG_SNOC, "Regulator %s being disabled\n",
+			   vreg_info->name);
+
+		ret = regulator_disable(vreg_info->reg);
+		if (ret)
+			ath10k_err(ar, "Regulator %s, can't disable: %d\n",
+				   vreg_info->name, ret);
+
+		ret = regulator_set_load(vreg_info->reg, 0);
+		if (ret < 0)
+			ath10k_err(ar, "Regulator %s, can't set load: %d\n",
+				   vreg_info->name, ret);
+
+		ret = regulator_set_voltage(vreg_info->reg, 0,
+					    vreg_info->max_v);
+		if (ret)
+			ath10k_err(ar, "Regulator %s, can't set voltage: %d\n",
+				   vreg_info->name, ret);
+	}
+
+	return ret;
+}
+
+static int ath10k_wcn3990_clk_init(struct ath10k *ar)
+{
+	struct ath10k_wcn3990_clk_info *clk_info;
+	int i;
+	int ret = 0;
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+	for (i = 0; i < ATH10K_WCN3990_CLK_INFO_SIZE; i++) {
+		clk_info = &ar_snoc->clk[i];
+
+		if (!clk_info->handle)
+			continue;
+
+		ath10k_dbg(ar, ATH10K_DBG_SNOC, "Clock %s being enabled\n",
+			   clk_info->name);
+
+		if (clk_info->freq) {
+			ret = clk_set_rate(clk_info->handle, clk_info->freq);
+
+			if (ret) {
+				ath10k_err(ar, "Clk %s,set err: %u,ret: %d\n",
+					   clk_info->name, clk_info->freq,
+					   ret);
+				break;
+			}
+		}
+
+		ret = clk_prepare_enable(clk_info->handle);
+		if (ret) {
+			ath10k_err(ar, "Clock %s, can't enable: %d\n",
+				   clk_info->name, ret);
+			break;
+		}
+	}
+
+	if (ret == 0)
+		return 0;
+
+	for (; i >= 0; i--) {
+		clk_info = &ar_snoc->clk[i];
+
+		if (!clk_info->handle)
+			continue;
+
+		clk_disable_unprepare(clk_info->handle);
+	}
+
+	return ret;
+}
+
+static int ath10k_wcn3990_clk_deinit(struct ath10k *ar)
+{
+	struct ath10k_wcn3990_clk_info *clk_info;
+	int i;
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+	for (i = 0; i < ATH10K_WCN3990_CLK_INFO_SIZE; i++) {
+		clk_info = &ar_snoc->clk[i];
+
+		if (!clk_info->handle)
+			continue;
+
+		ath10k_dbg(ar, ATH10K_DBG_SNOC, "Clock %s being disabled\n",
+			   clk_info->name);
+
+		clk_disable_unprepare(clk_info->handle);
+	}
+
+	return 0;
+}
+
+static int ath10k_hw_power_on(struct ath10k *ar)
+{
+	int ret = 0;
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC, "HW Power on\n");
+
+	ret = ath10k_wcn3990_vreg_on(ar);
+	if (ret)
+		goto out;
+
+	ret = ath10k_wcn3990_clk_init(ar);
+	if (ret)
+		goto vreg_off;
+
+	return ret;
+
+vreg_off:
+	ath10k_wcn3990_vreg_off(ar);
+out:
+	return ret;
+}
+
+static int ath10k_hw_power_off(struct ath10k *ar)
+{
+	int ret = 0;
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC, "HW Power off\n");
+
+	ath10k_wcn3990_clk_deinit(ar);
+
+	ret = ath10k_wcn3990_vreg_off(ar);
+
+	return ret;
+}
+
+static int ath10k_snoc_hif_suspend(struct ath10k *ar)
+{
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+	int ret = 0;
+
+	if (!ar_snoc)
+		return -EINVAL;
+
+	if (!device_may_wakeup(ar->dev))
+		return -EINVAL;
+
+	ret = enable_irq_wake(ar_snoc->ce_irqs[WCN3990_WAKE_IRQ_CE].irq_line);
+	if (ret) {
+		ath10k_dbg(ar, ATH10K_DBG_SNOC,
+			   "HIF Suspend: Failed to enable wakeup IRQ\n");
+		return ret;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC, "HIF Suspended\n");
+	return ret;
+}
+
+static int ath10k_snoc_hif_resume(struct ath10k *ar)
+{
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+	int ret = 0;
+
+	if (!ar_snoc)
+		return -EINVAL;
+
+	if (!device_may_wakeup(ar->dev))
+		return -EINVAL;
+
+	ret = disable_irq_wake(ar_snoc->ce_irqs[WCN3990_WAKE_IRQ_CE].irq_line);
+	if (ret) {
+		ath10k_dbg(ar, ATH10K_DBG_SNOC,
+			   "HIF Resume: Failed to disable wakeup IRQ\n");
+		return ret;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC, "HIF Resumed\n");
+	return ret;
+}
+
+static const struct ath10k_hif_ops ath10k_snoc_hif_ops = {
+	.tx_sg			= ath10k_snoc_hif_tx_sg,
+	.start			= ath10k_snoc_hif_start,
+	.stop			= ath10k_snoc_hif_stop,
+	.map_service_to_pipe	= ath10k_snoc_hif_map_service_to_pipe,
+	.get_default_pipe	= ath10k_snoc_hif_get_default_pipe,
+	.send_complete_check	= ath10k_snoc_hif_send_complete_check,
+	.get_free_queue_number	= ath10k_snoc_hif_get_free_queue_number,
+	.power_up		= ath10k_snoc_hif_power_up,
+	.power_down		= ath10k_snoc_hif_power_down,
+	.read32			= ath10k_snoc_read32,
+	.write32		= ath10k_snoc_write32,
+	.suspend		= ath10k_snoc_hif_suspend,
+	.resume			= ath10k_snoc_hif_resume,
+};
+
+static const struct ath10k_bus_ops ath10k_snoc_bus_ops = {
+	.read32		= ath10k_snoc_read32,
+	.write32	= ath10k_snoc_write32,
+};
+
+static int ath10k_snoc_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct ath10k *ar;
+	struct ath10k_snoc *ar_snoc;
+	struct ath10k_snoc_qmi_config *qmi_cfg;
+	enum ath10k_hw_rev hw_rev;
+	struct device *dev;
+	u32 i;
+
+	dev = &pdev->dev;
+	hw_rev = ATH10K_HW_WCN3990;
+	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(37));
+	ar = ath10k_core_create(sizeof(*ar_snoc), dev, ATH10K_BUS_SNOC,
+				hw_rev, &ath10k_snoc_hif_ops);
+	if (!ar) {
+		dev_err(dev, "failed to allocate core\n");
+		return -ENOMEM;
+	}
+
+	ar_snoc = ath10k_snoc_priv(ar);
+	if (!ar_snoc)
+		return  -EINVAL;
+	ar_snoc->dev = pdev;
+	platform_set_drvdata(pdev, ar);
+	ar_snoc->ar = ar;
+
+	ret = ath10k_snoc_start_qmi_service(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to start QMI service: %d\n", ret);
+		goto err_core_destroy;
+	}
+
+	qmi_cfg = &ar_snoc->qmi_cfg;
+	spin_lock_init(&ar_snoc->opaque_ctx.ce_lock);
+	ar_snoc->opaque_ctx.bus_ops = &ath10k_snoc_bus_ops;
+	ath10k_snoc_resource_init(ar);
+
+	ar->target_version = ATH10K_HW_WCN3990;
+	ar->hw->wiphy->hw_version = ATH10K_HW_WCN3990;
+	setup_timer(&ar_snoc->rx_post_retry, ath10k_snoc_rx_replenish_retry,
+		    (unsigned long)ar);
+
+	memcpy(ar_snoc->vreg, vreg_cfg, sizeof(vreg_cfg));
+	for (i = 0; i < ATH10K_WCN3990_VREG_INFO_SIZE; i++) {
+		ret = ath10k_get_vreg_info(ar, dev, &ar_snoc->vreg[i]);
+		if (ret)
+			goto err_core_destroy;
+	}
+
+	memcpy(ar_snoc->clk, clk_cfg, sizeof(clk_cfg));
+	for (i = 0; i < ATH10K_WCN3990_CLK_INFO_SIZE; i++) {
+		ret = ath10k_get_clk_info(ar, dev, &ar_snoc->clk[i]);
+		if (ret)
+			goto err_core_destroy;
+	}
+
+	ret = ath10k_hw_power_on(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to power on device: %d\n", ret);
+		goto err_stop_qmi_service;
+	}
+
+	ret = ath10k_snoc_claim(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to claim device: %d\n", ret);
+		goto err_hw_power_off;
+	}
+
+	ret = ath10k_snoc_alloc_pipes(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
+			   ret);
+		goto err_hw_power_off;
+	}
+
+	netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll,
+		       ATH10K_NAPI_BUDGET);
+
+	ret = ath10k_snoc_request_irq(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to request irqs: %d\n", ret);
+		goto err_free_pipes;
+	}
+
+	ar_snoc->drv_state = ATH10K_DRIVER_STATE_PROBED;
+	/* chip id needs to be retrieved from platform driver */
+	if (atomic_read(&qmi_cfg->fw_ready)) {
+		ret = ath10k_core_register(ar,
+					   ar_snoc->target_info.soc_version);
+		if (ret) {
+			ath10k_err(ar,
+				   "failed to register driver core: %d\n",
+				   ret);
+			goto err_free_irq;
+		}
+		ar_snoc->drv_state = ATH10K_DRIVER_STATE_STARTED;
+	}
+
+	ath10k_snoc_modem_ssr_register_notifier(ar);
+	ath10k_snoc_pd_restart_enable(ar);
+
+	ar_snoc->pm_notifier.notifier_call = ath10k_snoc_pm_notifier;
+	register_pm_notifier(&ar_snoc->pm_notifier);
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 probed\n", __func__);
+
+	return 0;
+
+err_free_irq:
+	ath10k_snoc_free_irq(ar);
+
+err_free_pipes:
+	ath10k_snoc_free_pipes(ar);
+
+err_hw_power_off:
+	ath10k_hw_power_off(ar);
+
+err_stop_qmi_service:
+	ath10k_snoc_stop_qmi_service(ar);
+
+err_core_destroy:
+	ath10k_core_destroy(ar);
+
+	return ret;
+}
+
+static int ath10k_snoc_remove(struct platform_device *pdev)
+{
+	struct ath10k *ar = platform_get_drvdata(pdev);
+	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+	if (!ar)
+		return -EINVAL;
+
+	if (!ar_snoc)
+		return -EINVAL;
+
+	ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 removed\n", __func__);
+
+	unregister_pm_notifier(&ar_snoc->pm_notifier);
+	ath10k_core_unregister(ar);
+	ath10k_snoc_pdr_unregister_notifier(ar);
+	ath10k_snoc_modem_ssr_unregister_notifier(ar);
+	ath10k_snoc_free_irq(ar);
+	ath10k_snoc_release_resource(ar);
+	ath10k_snoc_free_pipes(ar);
+	ath10k_snoc_stop_qmi_service(ar);
+	ath10k_hw_power_off(ar);
+	ath10k_core_destroy(ar);
+
+	return 0;
+}
+
+static const struct of_device_id ath10k_snoc_dt_match[] = {
+	{.compatible = "qcom,wcn3990-wifi"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, ath10k_snoc_dt_match);
+
+static struct platform_driver ath10k_snoc_driver = {
+		.probe  = ath10k_snoc_probe,
+		.remove = ath10k_snoc_remove,
+		.driver = {
+			.name   = "ath10k_snoc",
+			.owner = THIS_MODULE,
+			.of_match_table = ath10k_snoc_dt_match,
+		},
+};
+
+static int __init ath10k_snoc_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&ath10k_snoc_driver);
+	if (ret)
+		pr_err("failed to register ath10k snoc driver: %d\n",
+		       ret);
+
+	return ret;
+}
+module_init(ath10k_snoc_init);
+
+static void __exit ath10k_snoc_exit(void)
+{
+	platform_driver_unregister(&ath10k_snoc_driver);
+}
+module_exit(ath10k_snoc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Driver support for Atheros WCN3990 SNOC devices");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/snoc.h	2019-01-22 16:16:25.423263793 +0100
@@ -0,0 +1,220 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SNOC_H_
+#define _SNOC_H_
+
+#include "hw.h"
+#include "ce.h"
+#include "pci.h"
+#include "qmi.h"
+#include <linux/kernel.h>
+#include <soc/qcom/service-locator.h>
+#define ATH10K_SNOC_RX_POST_RETRY_MS 50
+#define CE_POLL_PIPE 4
+#define ATH10K_SERVICE_LOCATION_CLIENT_NAME			"ATH10K-WLAN"
+#define ATH10K_WLAN_SERVICE_NAME					"wlan/fw"
+
+/* struct snoc_state: SNOC target state
+ * @pipe_cfg_addr: pipe configuration address
+ * @svc_to_pipe_map: pipe services
+ */
+struct snoc_state {
+	u32 pipe_cfg_addr;
+	u32 svc_to_pipe_map;
+};
+
+/* struct ath10k_snoc_pipe: SNOC pipe configuration
+ * @ath10k_ce_pipe: pipe handle
+ * @pipe_num: pipe number
+ * @hif_ce_state: pointer to ce state
+ * @buf_sz: buffer size
+ * @pipe_lock: pipe lock
+ * @ar_snoc: snoc private structure
+ */
+
+struct ath10k_snoc_pipe {
+	struct ath10k_ce_pipe *ce_hdl;
+	u8 pipe_num;
+	struct ath10k *hif_ce_state;
+	size_t buf_sz;
+	/* protect ce info */
+	spinlock_t pipe_lock;
+	struct ath10k_snoc *ar_snoc;
+};
+
+/* struct ath10k_snoc_supp_chip: supported chip set
+ * @dev_id: device id
+ * @rev_id: revison id
+ */
+struct ath10k_snoc_supp_chip {
+	u32 dev_id;
+	u32 rev_id;
+};
+
+/* struct ath10k_snoc_info: SNOC info struct
+ * @v_addr: base virtual address
+ * @p_addr: base physical address
+ * @chip_id: chip id
+ * @chip_family: chip family
+ * @board_id: board id
+ * @soc_id: soc id
+ * @fw_version: fw version
+ */
+struct ath10k_snoc_info {
+	void __iomem *v_addr;
+	phys_addr_t p_addr;
+	u32 chip_id;
+	u32 chip_family;
+	u32 board_id;
+	u32 soc_id;
+	u32 fw_version;
+};
+
+/* struct ath10k_target_info: SNOC target info
+ * @target_version: target version
+ * @target_type: target type
+ * @target_revision: target revision
+ * @soc_version: target soc version
+ */
+struct ath10k_target_info {
+	u32 target_version;
+	u32 target_type;
+	u32 target_revision;
+	u32 soc_version;
+};
+
+/* struct ath10k_service_notifier_context: service notification context
+ * @handle: notifier handle
+ * @instance_id: domain instance id
+ * @name: domain name
+ */
+struct ath10k_service_notifier_context {
+	void *handle;
+	u32 instance_id;
+	char name[QMI_SERVREG_LOC_NAME_LENGTH_V01 + 1];
+};
+
+/* struct ath10k_snoc_ce_irq: copy engine irq struct
+ * @irq_req_stat: irq request status
+ * @irq_line: irq line
+ */
+struct ath10k_snoc_ce_irq {
+	atomic_t irq_req_stat;
+	u32 irq_line;
+};
+
+struct ath10k_wcn3990_vreg_info {
+	struct regulator *reg;
+	const char *name;
+	u32 min_v;
+	u32 max_v;
+	u32 load_ua;
+	unsigned long settle_delay;
+	bool required;
+};
+
+struct ath10k_wcn3990_clk_info {
+	struct clk *handle;
+	const char *name;
+	u32 freq;
+	bool required;
+};
+
+static struct ath10k_wcn3990_vreg_info vreg_cfg[] = {
+	{NULL, "vdd-0.8-cx-mx", 800000, 800000, 0, 0, false},
+	{NULL, "vdd-1.8-xo", 1800000, 1800000, 0, 0, false},
+	{NULL, "vdd-1.3-rfa", 1304000, 1304000, 0, 0, false},
+	{NULL, "vdd-3.3-ch0", 3312000, 3312000, 0, 0, false},
+};
+
+#define ATH10K_WCN3990_VREG_INFO_SIZE		ARRAY_SIZE(vreg_cfg)
+
+static struct ath10k_wcn3990_clk_info clk_cfg[] = {
+	{NULL, "cxo_ref_clk_pin", 0, false},
+};
+
+#define ATH10K_WCN3990_CLK_INFO_SIZE		ARRAY_SIZE(clk_cfg)
+
+enum ath10k_driver_state {
+	ATH10K_DRIVER_STATE_PROBED,
+	ATH10K_DRIVER_STATE_STARTED,
+};
+
+/* struct ath10k_snoc: SNOC info struct
+ * @dev: device structure
+ * @ar:ath10k base structure
+ * @mem: mem base virtual address
+ * @mem_pa: mem base physical address
+ * @target_info: snoc target info
+ * @mem_len: mempry map length
+ * @pipe_info: pipe info struct
+ * @ce_irqs: copy engine irq list
+ * @ce_lock: protect ce structures
+ * @ce_states: maps ce id to ce state
+ * @rx_post_retry: rx buffer post processing timer
+ * @vaddr_rri_on_ddr: virtual address for RRI
+ * @is_driver_probed: flag to indicate driver state
+ * @modem_ssr_nb: notifier callback for modem notification
+ * @modem_notify_handler: modem notification handler
+ * @service_notifier: notifier context for service notification
+ * @service_notifier_nb: notifier callback for service notification
+ * @total_domains: no of service domains
+ * @get_service_nb: notifier callback for service discovery
+ * @fw_crashed: fw state flag
+ */
+struct ath10k_snoc {
+	struct bus_opaque opaque_ctx;
+	struct platform_device *dev;
+	struct ath10k *ar;
+	void __iomem *mem;
+	dma_addr_t mem_pa;
+	struct ath10k_target_info target_info;
+	size_t mem_len;
+	struct ath10k_snoc_pipe pipe_info[CE_COUNT_MAX];
+	struct timer_list rx_post_retry;
+	struct ath10k_snoc_ce_irq ce_irqs[CE_COUNT_MAX];
+	u32 *vaddr_rri_on_ddr;
+	bool is_driver_probed;
+	struct notifier_block modem_ssr_nb;
+	struct notifier_block pm_notifier;
+	void *modem_notify_handler;
+	struct ath10k_service_notifier_context *service_notifier;
+	struct notifier_block service_notifier_nb;
+	int total_domains;
+	struct notifier_block get_service_nb;
+	atomic_t fw_crashed;
+	atomic_t pm_ops_inprogress;
+	struct ath10k_snoc_qmi_config qmi_cfg;
+	struct ath10k_wcn3990_vreg_info vreg[ATH10K_WCN3990_VREG_INFO_SIZE];
+	struct ath10k_wcn3990_clk_info clk[ATH10K_WCN3990_CLK_INFO_SIZE];
+	enum ath10k_driver_state drv_state;
+};
+
+struct ath10k_event_pd_down_data {
+	bool crashed;
+	bool fw_rejuvenate;
+};
+
+static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar)
+{
+	return (struct ath10k_snoc *)ar->drv_priv;
+}
+
+void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value);
+void ath10k_snoc_soc_write32(struct ath10k *ar, u32 addr, u32 val);
+void ath10k_snoc_reg_write32(struct ath10k *ar, u32 addr, u32 val);
+u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset);
+u32 ath10k_snoc_soc_read32(struct ath10k *ar, u32 addr);
+u32 ath10k_snoc_reg_read32(struct ath10k *ar, u32 addr);
+
+#endif /* _SNOC_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/wcn3990_qmi_service_v01.c	2019-01-22 16:16:25.427263830 +0100
@@ -0,0 +1,2091 @@
+ /* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/qmi_encdec.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+
+#include "wcn3990_qmi_service_v01.h"
+
+static struct elem_info wlfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+					   pipe_num),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_pipedir_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+					   pipe_dir),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+					   nentries),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+					   nbytes_max),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+					   flags),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info wlfw_ce_svc_pipe_cfg_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
+					   service_id),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_pipedir_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
+					   pipe_dir),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
+					   pipe_num),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info wlfw_shadow_reg_cfg_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_shadow_reg_cfg_s_v01,
+					   id),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_shadow_reg_cfg_s_v01,
+					   offset),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info wlfw_shadow_reg_v2_cfg_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_shadow_reg_v2_cfg_s_v01,
+					   addr),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info wlfw_memory_region_info_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_memory_region_info_s_v01,
+					   region_addr),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_memory_region_info_s_v01,
+					   size),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_memory_region_info_s_v01,
+					   secure_flag),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info wlfw_rf_chip_info_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_rf_chip_info_s_v01,
+					   chip_id),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_rf_chip_info_s_v01,
+					   chip_family),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info wlfw_rf_board_info_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_rf_board_info_s_v01,
+					   board_id),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info wlfw_soc_info_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_soc_info_s_v01,
+					   soc_id),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info wlfw_fw_version_info_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_fw_version_info_s_v01,
+					   fw_version),
+	},
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_fw_version_info_s_v01,
+					   fw_build_timestamp),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_ind_register_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   fw_ready_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   fw_ready_enable),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   initiate_cal_download_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   initiate_cal_download_enable),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   initiate_cal_update_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   initiate_cal_update_enable),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   msa_ready_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   msa_ready_enable),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   pin_connect_result_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   pin_connect_result_enable),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   client_id_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   client_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   request_mem_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   request_mem_enable),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   fw_mem_ready_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   fw_mem_ready_enable),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x18,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   cold_boot_cal_done_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x18,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   cold_boot_cal_done_enable),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x19,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   rejuvenate_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x19,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   rejuvenate_enable),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_ind_register_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+				   struct wlfw_ind_register_resp_msg_v01,
+				   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+				   struct wlfw_ind_register_resp_msg_v01,
+				   fw_status_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+				   struct wlfw_ind_register_resp_msg_v01,
+				   fw_status),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_fw_ready_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_msa_ready_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+				   struct wlfw_pin_connect_result_ind_msg_v01,
+				   pwr_pin_result_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+				   struct wlfw_pin_connect_result_ind_msg_v01,
+				   pwr_pin_result),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+				   struct wlfw_pin_connect_result_ind_msg_v01,
+				   phy_io_pin_result_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+				   struct wlfw_pin_connect_result_ind_msg_v01,
+				   phy_io_pin_result),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(
+				   struct wlfw_pin_connect_result_ind_msg_v01,
+				   rf_pin_result_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(
+				   struct wlfw_pin_connect_result_ind_msg_v01,
+				   rf_pin_result),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_wlan_mode_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_driver_mode_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_wlan_mode_req_msg_v01,
+					   mode),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_wlan_mode_req_msg_v01,
+					   hw_debug_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_wlan_mode_req_msg_v01,
+					   hw_debug),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_wlan_mode_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_wlan_mode_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   host_version_valid),
+	},
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_WLFW_MAX_STR_LEN_V01 + 1,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   host_version),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   tgt_cfg_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   tgt_cfg_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_WLFW_MAX_NUM_CE_V01,
+		.elem_size      = sizeof(struct wlfw_ce_tgt_pipe_cfg_s_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   tgt_cfg),
+		.ei_array      = wlfw_ce_tgt_pipe_cfg_s_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   svc_cfg_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   svc_cfg_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_WLFW_MAX_NUM_SVC_V01,
+		.elem_size      = sizeof(struct wlfw_ce_svc_pipe_cfg_s_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   svc_cfg),
+		.ei_array      = wlfw_ce_svc_pipe_cfg_s_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   shadow_reg_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   shadow_reg_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_WLFW_MAX_NUM_SHADOW_REG_V01,
+		.elem_size      = sizeof(struct wlfw_shadow_reg_cfg_s_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   shadow_reg),
+		.ei_array      = wlfw_shadow_reg_cfg_s_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   shadow_reg_v2_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   shadow_reg_v2_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01,
+		.elem_size      = sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   shadow_reg_v2),
+		.ei_array      = wlfw_shadow_reg_v2_cfg_s_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_wlan_cfg_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_wlan_cfg_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_cap_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_cap_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   chip_info_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct wlfw_rf_chip_info_s_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   chip_info),
+		.ei_array      = wlfw_rf_chip_info_s_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   board_info_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct wlfw_rf_board_info_s_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   board_info),
+		.ei_array      = wlfw_rf_board_info_s_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   soc_info_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct wlfw_soc_info_s_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   soc_info),
+		.ei_array      = wlfw_soc_info_s_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   fw_version_info_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct wlfw_fw_version_info_s_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   fw_version_info),
+		.ei_array      = wlfw_fw_version_info_s_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   fw_build_id_valid),
+	},
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   fw_build_id),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_bdf_download_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   valid),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   file_id_valid),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_cal_temp_id_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   file_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   total_size_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   total_size),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   seg_id_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   seg_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   data_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   data_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = QMI_WLFW_MAX_DATA_SIZE_V01,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   data),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   end_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   end),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_bdf_download_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+				   struct wlfw_bdf_download_resp_msg_v01,
+				   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_cal_report_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_cal_report_req_msg_v01,
+					   meta_data_len),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = QMI_WLFW_MAX_NUM_CAL_V01,
+		.elem_size      = sizeof(enum wlfw_cal_temp_id_enum_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_cal_report_req_msg_v01,
+					   meta_data),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_cal_report_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_cal_report_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_cal_temp_id_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(
+				struct wlfw_initiate_cal_download_ind_msg_v01,
+				cal_id),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_cal_download_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   valid),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   file_id_valid),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_cal_temp_id_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   file_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   total_size_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   total_size),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   seg_id_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   seg_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   data_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   data_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = QMI_WLFW_MAX_DATA_SIZE_V01,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   data),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   end_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   end),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_cal_download_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+				   struct wlfw_cal_download_resp_msg_v01,
+				   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_cal_temp_id_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(
+				   struct wlfw_initiate_cal_update_ind_msg_v01,
+				   cal_id),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+				struct wlfw_initiate_cal_update_ind_msg_v01,
+				total_size),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_cal_update_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_cal_temp_id_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_cal_update_req_msg_v01,
+					   cal_id),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_cal_update_req_msg_v01,
+					   seg_id),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_cal_update_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   file_id_valid),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_cal_temp_id_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   file_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   total_size_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   total_size),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   seg_id_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   seg_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   data_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   data_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = QMI_WLFW_MAX_DATA_SIZE_V01,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   data),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   end_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   end),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_msa_info_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_msa_info_req_msg_v01,
+					   msa_addr),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_msa_info_req_msg_v01,
+					   size),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_msa_info_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_msa_info_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x03,
+		.offset         = offsetof(struct wlfw_msa_info_resp_msg_v01,
+					   mem_region_info_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01,
+		.elem_size      = sizeof(struct wlfw_memory_region_info_s_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x03,
+		.offset         = offsetof(struct wlfw_msa_info_resp_msg_v01,
+					   mem_region_info),
+		.ei_array      = wlfw_memory_region_info_s_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_msa_ready_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_msa_ready_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_msa_ready_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_ini_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_ini_req_msg_v01,
+					   enablefwlog_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_ini_req_msg_v01,
+					   enablefwlog),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_ini_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_ini_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_athdiag_read_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_athdiag_read_req_msg_v01,
+					   offset),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_athdiag_read_req_msg_v01,
+					   mem_type),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x03,
+		.offset         = offsetof(struct wlfw_athdiag_read_req_msg_v01,
+					   data_len),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_athdiag_read_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+				   struct wlfw_athdiag_read_resp_msg_v01,
+				   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+				   struct wlfw_athdiag_read_resp_msg_v01,
+				   data_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+				   struct wlfw_athdiag_read_resp_msg_v01,
+				   data_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = QMI_WLFW_MAX_DATA_SIZE_V01,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+				   struct wlfw_athdiag_read_resp_msg_v01,
+				   data),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_athdiag_write_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(
+				   struct wlfw_athdiag_write_req_msg_v01,
+				   offset),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+				   struct wlfw_athdiag_write_req_msg_v01,
+				   mem_type),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x03,
+		.offset         = offsetof(
+				   struct wlfw_athdiag_write_req_msg_v01,
+				   data_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = QMI_WLFW_MAX_DATA_SIZE_V01,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x03,
+		.offset         = offsetof(
+				   struct wlfw_athdiag_write_req_msg_v01,
+				   data),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_athdiag_write_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+				   struct wlfw_athdiag_write_resp_msg_v01,
+				   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_vbatt_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_vbatt_req_msg_v01,
+					   voltage_uv),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_vbatt_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_vbatt_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_mac_addr_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_mac_addr_req_msg_v01,
+					   mac_addr_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = QMI_WLFW_MAC_ADDR_SIZE_V01,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = STATIC_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_mac_addr_req_msg_v01,
+					   mac_addr),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_mac_addr_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_mac_addr_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_host_cap_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   daemon_support_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   daemon_support),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_host_cap_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_host_cap_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_request_mem_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_request_mem_ind_msg_v01,
+					   size),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_respond_mem_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_respond_mem_req_msg_v01,
+					   addr),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_respond_mem_req_msg_v01,
+					   size),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_respond_mem_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_respond_mem_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_fw_mem_ready_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_cold_boot_cal_done_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   cause_for_rejuvenation_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   cause_for_rejuvenation),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   requesting_sub_system_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   requesting_sub_system),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   line_number_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   line_number),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   function_name_valid),
+	},
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   function_name),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+				   struct wlfw_rejuvenate_ack_resp_msg_v01,
+				   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+				   wlfw_dynamic_feature_mask_req_msg_v01,
+				   mask_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+				   struct wlfw_dynamic_feature_mask_req_msg_v01,
+				   mask),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+			   struct wlfw_dynamic_feature_mask_resp_msg_v01,
+			   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			   struct wlfw_dynamic_feature_mask_resp_msg_v01,
+			   prev_mask_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			   struct wlfw_dynamic_feature_mask_resp_msg_v01,
+			   prev_mask),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+			   struct wlfw_dynamic_feature_mask_resp_msg_v01,
+			   curr_mask_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+			   struct wlfw_dynamic_feature_mask_resp_msg_v01,
+			   curr_mask),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/net/wireless/ath/ath10k/wcn3990_qmi_service_v01.h	2019-01-22 16:16:25.427263830 +0100
@@ -0,0 +1,619 @@
+ /* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef WLAN_FIRMWARE_SERVICE_V01_H
+#define WLAN_FIRMWARE_SERVICE_V01_H
+
+#define WLFW_SERVICE_ID_V01 0x45
+#define WLFW_SERVICE_VERS_V01 0x01
+
+#define QMI_WLFW_BDF_DOWNLOAD_REQ_V01 0x0025
+#define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037
+#define QMI_WLFW_INITIATE_CAL_UPDATE_IND_V01 0x002A
+#define QMI_WLFW_HOST_CAP_REQ_V01 0x0034
+#define QMI_WLFW_DYNAMIC_FEATURE_MASK_RESP_V01 0x003B
+#define QMI_WLFW_CAP_REQ_V01 0x0024
+#define QMI_WLFW_CAL_REPORT_REQ_V01 0x0026
+#define QMI_WLFW_CAL_UPDATE_RESP_V01 0x0029
+#define QMI_WLFW_CAL_DOWNLOAD_RESP_V01 0x0027
+#define QMI_WLFW_INI_RESP_V01 0x002F
+#define QMI_WLFW_CAL_REPORT_RESP_V01 0x0026
+#define QMI_WLFW_MAC_ADDR_RESP_V01 0x0033
+#define QMI_WLFW_INITIATE_CAL_DOWNLOAD_IND_V01 0x0028
+#define QMI_WLFW_HOST_CAP_RESP_V01 0x0034
+#define QMI_WLFW_MSA_READY_IND_V01 0x002B
+#define QMI_WLFW_ATHDIAG_WRITE_RESP_V01 0x0031
+#define QMI_WLFW_WLAN_MODE_REQ_V01 0x0022
+#define QMI_WLFW_IND_REGISTER_REQ_V01 0x0020
+#define QMI_WLFW_WLAN_CFG_RESP_V01 0x0023
+#define QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01 0x0038
+#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
+#define QMI_WLFW_REJUVENATE_IND_V01 0x0039
+#define QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01 0x003B
+#define QMI_WLFW_ATHDIAG_WRITE_REQ_V01 0x0031
+#define QMI_WLFW_WLAN_MODE_RESP_V01 0x0022
+#define QMI_WLFW_RESPOND_MEM_REQ_V01 0x0036
+#define QMI_WLFW_PIN_CONNECT_RESULT_IND_V01 0x002C
+#define QMI_WLFW_FW_READY_IND_V01 0x0021
+#define QMI_WLFW_MSA_READY_RESP_V01 0x002E
+#define QMI_WLFW_CAL_UPDATE_REQ_V01 0x0029
+#define QMI_WLFW_INI_REQ_V01 0x002F
+#define QMI_WLFW_BDF_DOWNLOAD_RESP_V01 0x0025
+#define QMI_WLFW_REJUVENATE_ACK_RESP_V01 0x003A
+#define QMI_WLFW_MSA_INFO_RESP_V01 0x002D
+#define QMI_WLFW_MSA_READY_REQ_V01 0x002E
+#define QMI_WLFW_CAP_RESP_V01 0x0024
+#define QMI_WLFW_REJUVENATE_ACK_REQ_V01 0x003A
+#define QMI_WLFW_ATHDIAG_READ_RESP_V01 0x0030
+#define QMI_WLFW_VBATT_REQ_V01 0x0032
+#define QMI_WLFW_MAC_ADDR_REQ_V01 0x0033
+#define QMI_WLFW_RESPOND_MEM_RESP_V01 0x0036
+#define QMI_WLFW_VBATT_RESP_V01 0x0032
+#define QMI_WLFW_MSA_INFO_REQ_V01 0x002D
+#define QMI_WLFW_CAL_DOWNLOAD_REQ_V01 0x0027
+#define QMI_WLFW_ATHDIAG_READ_REQ_V01 0x0030
+#define QMI_WLFW_WLAN_CFG_REQ_V01 0x0023
+#define QMI_WLFW_IND_REGISTER_RESP_V01 0x0020
+
+#define QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01 2
+#define QMI_WLFW_MAX_NUM_CAL_V01 5
+#define QMI_WLFW_MAX_DATA_SIZE_V01 6144
+#define QMI_WLFW_FUNCTION_NAME_LEN_V01 128
+#define QMI_WLFW_MAX_NUM_CE_V01 12
+#define QMI_WLFW_MAX_TIMESTAMP_LEN_V01 32
+#define QMI_WLFW_MAX_BUILD_ID_LEN_V01 128
+#define QMI_WLFW_MAX_STR_LEN_V01 16
+#define QMI_WLFW_MAX_NUM_SHADOW_REG_V01 24
+#define QMI_WLFW_MAC_ADDR_SIZE_V01 6
+#define QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01 36
+#define QMI_WLFW_MAX_NUM_SVC_V01 24
+
+enum wlfw_driver_mode_enum_v01 {
+	WLFW_DRIVER_MODE_ENUM_MIN_VAL_V01 = INT_MIN,
+	QMI_WLFW_MISSION_V01 = 0,
+	QMI_WLFW_FTM_V01 = 1,
+	QMI_WLFW_EPPING_V01 = 2,
+	QMI_WLFW_WALTEST_V01 = 3,
+	QMI_WLFW_OFF_V01 = 4,
+	QMI_WLFW_CCPM_V01 = 5,
+	QMI_WLFW_QVIT_V01 = 6,
+	QMI_WLFW_CALIBRATION_V01 = 7,
+	WLFW_DRIVER_MODE_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+enum wlfw_cal_temp_id_enum_v01 {
+	WLFW_CAL_TEMP_ID_ENUM_MIN_VAL_V01 = INT_MIN,
+	QMI_WLFW_CAL_TEMP_IDX_0_V01 = 0,
+	QMI_WLFW_CAL_TEMP_IDX_1_V01 = 1,
+	QMI_WLFW_CAL_TEMP_IDX_2_V01 = 2,
+	QMI_WLFW_CAL_TEMP_IDX_3_V01 = 3,
+	QMI_WLFW_CAL_TEMP_IDX_4_V01 = 4,
+	WLFW_CAL_TEMP_ID_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+enum wlfw_pipedir_enum_v01 {
+	WLFW_PIPEDIR_ENUM_MIN_VAL_V01 = INT_MIN,
+	QMI_WLFW_PIPEDIR_NONE_V01 = 0,
+	QMI_WLFW_PIPEDIR_IN_V01 = 1,
+	QMI_WLFW_PIPEDIR_OUT_V01 = 2,
+	QMI_WLFW_PIPEDIR_INOUT_V01 = 3,
+	WLFW_PIPEDIR_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+#define QMI_WLFW_CE_ATTR_FLAGS_V01 ((uint32_t)0x00)
+#define QMI_WLFW_CE_ATTR_NO_SNOOP_V01 ((uint32_t)0x01)
+#define QMI_WLFW_CE_ATTR_BYTE_SWAP_DATA_V01 ((uint32_t)0x02)
+#define QMI_WLFW_CE_ATTR_SWIZZLE_DESCRIPTORS_V01 ((uint32_t)0x04)
+#define QMI_WLFW_CE_ATTR_DISABLE_INTR_V01 ((uint32_t)0x08)
+#define QMI_WLFW_CE_ATTR_ENABLE_POLL_V01 ((uint32_t)0x10)
+
+#define QMI_WLFW_ALREADY_REGISTERED_V01 ((uint64_t)0x01ULL)
+#define QMI_WLFW_FW_READY_V01 ((uint64_t)0x02ULL)
+#define QMI_WLFW_MSA_READY_V01 ((uint64_t)0x04ULL)
+#define QMI_WLFW_FW_MEM_READY_V01 ((uint64_t)0x08ULL)
+
+#define QMI_WLFW_FW_REJUVENATE_V01 ((uint64_t)0x01ULL)
+
+struct wlfw_ce_tgt_pipe_cfg_s_v01 {
+	u32 pipe_num;
+	enum wlfw_pipedir_enum_v01 pipe_dir;
+	u32 nentries;
+	u32 nbytes_max;
+	u32 flags;
+};
+
+struct wlfw_ce_svc_pipe_cfg_s_v01 {
+	u32 service_id;
+	enum wlfw_pipedir_enum_v01 pipe_dir;
+	u32 pipe_num;
+};
+
+struct wlfw_shadow_reg_cfg_s_v01 {
+	u16 id;
+	u16 offset;
+};
+
+struct wlfw_shadow_reg_v2_cfg_s_v01 {
+	u32 addr;
+};
+
+struct wlfw_memory_region_info_s_v01 {
+	u64 region_addr;
+	u32 size;
+	u8 secure_flag;
+};
+
+struct wlfw_rf_chip_info_s_v01 {
+	u32 chip_id;
+	u32 chip_family;
+};
+
+struct wlfw_rf_board_info_s_v01 {
+	u32 board_id;
+};
+
+struct wlfw_soc_info_s_v01 {
+	u32 soc_id;
+};
+
+struct wlfw_fw_version_info_s_v01 {
+	u32 fw_version;
+	char fw_build_timestamp[QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1];
+};
+
+struct wlfw_ind_register_req_msg_v01 {
+	u8 fw_ready_enable_valid;
+	u8 fw_ready_enable;
+	u8 initiate_cal_download_enable_valid;
+	u8 initiate_cal_download_enable;
+	u8 initiate_cal_update_enable_valid;
+	u8 initiate_cal_update_enable;
+	u8 msa_ready_enable_valid;
+	u8 msa_ready_enable;
+	u8 pin_connect_result_enable_valid;
+	u8 pin_connect_result_enable;
+	u8 client_id_valid;
+	u32 client_id;
+	u8 request_mem_enable_valid;
+	u8 request_mem_enable;
+	u8 fw_mem_ready_enable_valid;
+	u8 fw_mem_ready_enable;
+	u8 cold_boot_cal_done_enable_valid;
+	u8 cold_boot_cal_done_enable;
+	u8 rejuvenate_enable_valid;
+	u32 rejuvenate_enable;
+};
+
+#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 46
+extern struct elem_info wlfw_ind_register_req_msg_v01_ei[];
+
+struct wlfw_ind_register_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	u8 fw_status_valid;
+	u64 fw_status;
+};
+
+#define WLFW_IND_REGISTER_RESP_MSG_V01_MAX_MSG_LEN 18
+extern struct elem_info wlfw_ind_register_resp_msg_v01_ei[];
+
+struct wlfw_fw_ready_ind_msg_v01 {
+	char placeholder;
+};
+
+#define WLFW_FW_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_fw_ready_ind_msg_v01_ei[];
+
+struct wlfw_msa_ready_ind_msg_v01 {
+	char placeholder;
+};
+
+#define WLFW_MSA_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_msa_ready_ind_msg_v01_ei[];
+
+struct wlfw_pin_connect_result_ind_msg_v01 {
+	u8 pwr_pin_result_valid;
+	u32 pwr_pin_result;
+	u8 phy_io_pin_result_valid;
+	u32 phy_io_pin_result;
+	u8 rf_pin_result_valid;
+	u32 rf_pin_result;
+};
+
+#define WLFW_PIN_CONNECT_RESULT_IND_MSG_V01_MAX_MSG_LEN 21
+extern struct elem_info wlfw_pin_connect_result_ind_msg_v01_ei[];
+
+struct wlfw_wlan_mode_req_msg_v01 {
+	enum wlfw_driver_mode_enum_v01 mode;
+	u8 hw_debug_valid;
+	u8 hw_debug;
+};
+
+#define WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct elem_info wlfw_wlan_mode_req_msg_v01_ei[];
+
+struct wlfw_wlan_mode_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_WLAN_MODE_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_wlan_mode_resp_msg_v01_ei[];
+
+struct wlfw_wlan_cfg_req_msg_v01 {
+	u8 host_version_valid;
+	char host_version[QMI_WLFW_MAX_STR_LEN_V01 + 1];
+	u8 tgt_cfg_valid;
+	u32 tgt_cfg_len;
+	struct wlfw_ce_tgt_pipe_cfg_s_v01 tgt_cfg[QMI_WLFW_MAX_NUM_CE_V01];
+	u8 svc_cfg_valid;
+	u32 svc_cfg_len;
+	struct wlfw_ce_svc_pipe_cfg_s_v01 svc_cfg[QMI_WLFW_MAX_NUM_SVC_V01];
+	u8 shadow_reg_valid;
+	u32 shadow_reg_len;
+	struct wlfw_shadow_reg_cfg_s_v01
+	shadow_reg[QMI_WLFW_MAX_NUM_SHADOW_REG_V01];
+	u8 shadow_reg_v2_valid;
+	u32 shadow_reg_v2_len;
+	struct wlfw_shadow_reg_v2_cfg_s_v01
+	shadow_reg_v2[QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01];
+};
+
+#define WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN 803
+extern struct elem_info wlfw_wlan_cfg_req_msg_v01_ei[];
+
+struct wlfw_wlan_cfg_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_WLAN_CFG_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_wlan_cfg_resp_msg_v01_ei[];
+
+struct wlfw_cap_req_msg_v01 {
+	char placeholder;
+};
+
+#define WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_cap_req_msg_v01_ei[];
+
+struct wlfw_cap_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	u8 chip_info_valid;
+	struct wlfw_rf_chip_info_s_v01 chip_info;
+	u8 board_info_valid;
+	struct wlfw_rf_board_info_s_v01 board_info;
+	u8 soc_info_valid;
+	struct wlfw_soc_info_s_v01 soc_info;
+	u8 fw_version_info_valid;
+	struct wlfw_fw_version_info_s_v01 fw_version_info;
+	u8 fw_build_id_valid;
+	char fw_build_id[QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1];
+};
+
+#define WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN 203
+extern struct elem_info wlfw_cap_resp_msg_v01_ei[];
+
+struct wlfw_bdf_download_req_msg_v01 {
+	u8 valid;
+	u8 file_id_valid;
+	enum wlfw_cal_temp_id_enum_v01 file_id;
+	u8 total_size_valid;
+	u32 total_size;
+	u8 seg_id_valid;
+	u32 seg_id;
+	u8 data_valid;
+	u32 data_len;
+	u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+	u8 end_valid;
+	u8 end;
+};
+
+#define WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6178
+extern struct elem_info wlfw_bdf_download_req_msg_v01_ei[];
+
+struct wlfw_bdf_download_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_bdf_download_resp_msg_v01_ei[];
+
+struct wlfw_cal_report_req_msg_v01 {
+	u32 meta_data_len;
+	enum wlfw_cal_temp_id_enum_v01 meta_data[QMI_WLFW_MAX_NUM_CAL_V01];
+};
+
+#define WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN 24
+extern struct elem_info wlfw_cal_report_req_msg_v01_ei[];
+
+struct wlfw_cal_report_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_CAL_REPORT_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_cal_report_resp_msg_v01_ei[];
+
+struct wlfw_initiate_cal_download_ind_msg_v01 {
+	enum wlfw_cal_temp_id_enum_v01 cal_id;
+};
+
+#define WLFW_INITIATE_CAL_DOWNLOAD_IND_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[];
+
+struct wlfw_cal_download_req_msg_v01 {
+	u8 valid;
+	u8 file_id_valid;
+	enum wlfw_cal_temp_id_enum_v01 file_id;
+	u8 total_size_valid;
+	u32 total_size;
+	u8 seg_id_valid;
+	u32 seg_id;
+	u8 data_valid;
+	u32 data_len;
+	u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+	u8 end_valid;
+	u8 end;
+};
+
+#define WLFW_CAL_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6178
+extern struct elem_info wlfw_cal_download_req_msg_v01_ei[];
+
+struct wlfw_cal_download_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_CAL_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_cal_download_resp_msg_v01_ei[];
+
+struct wlfw_initiate_cal_update_ind_msg_v01 {
+	enum wlfw_cal_temp_id_enum_v01 cal_id;
+	u32 total_size;
+};
+
+#define WLFW_INITIATE_CAL_UPDATE_IND_MSG_V01_MAX_MSG_LEN 14
+extern struct elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[];
+
+struct wlfw_cal_update_req_msg_v01 {
+	enum wlfw_cal_temp_id_enum_v01 cal_id;
+	u32 seg_id;
+};
+
+#define WLFW_CAL_UPDATE_REQ_MSG_V01_MAX_MSG_LEN 14
+extern struct elem_info wlfw_cal_update_req_msg_v01_ei[];
+
+struct wlfw_cal_update_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	u8 file_id_valid;
+	enum wlfw_cal_temp_id_enum_v01 file_id;
+	u8 total_size_valid;
+	u32 total_size;
+	u8 seg_id_valid;
+	u32 seg_id;
+	u8 data_valid;
+	u32 data_len;
+	u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+	u8 end_valid;
+	u8 end;
+};
+
+#define WLFW_CAL_UPDATE_RESP_MSG_V01_MAX_MSG_LEN 6181
+extern struct elem_info wlfw_cal_update_resp_msg_v01_ei[];
+
+struct wlfw_msa_info_req_msg_v01 {
+	u64 msa_addr;
+	u32 size;
+};
+
+#define WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
+extern struct elem_info wlfw_msa_info_req_msg_v01_ei[];
+
+struct wlfw_msa_info_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	u32 mem_region_info_len;
+	struct wlfw_memory_region_info_s_v01
+	mem_region_info[QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01];
+};
+
+#define WLFW_MSA_INFO_RESP_MSG_V01_MAX_MSG_LEN 37
+extern struct elem_info wlfw_msa_info_resp_msg_v01_ei[];
+
+struct wlfw_msa_ready_req_msg_v01 {
+	char placeholder;
+};
+
+#define WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_msa_ready_req_msg_v01_ei[];
+
+struct wlfw_msa_ready_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_MSA_READY_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_msa_ready_resp_msg_v01_ei[];
+
+struct wlfw_ini_req_msg_v01 {
+	u8 enablefwlog_valid;
+	u8 enablefwlog;
+};
+
+#define WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN 4
+extern struct elem_info wlfw_ini_req_msg_v01_ei[];
+
+struct wlfw_ini_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_INI_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_ini_resp_msg_v01_ei[];
+
+struct wlfw_athdiag_read_req_msg_v01 {
+	u32 offset;
+	u32 mem_type;
+	u32 data_len;
+};
+
+#define WLFW_ATHDIAG_READ_REQ_MSG_V01_MAX_MSG_LEN 21
+extern struct elem_info wlfw_athdiag_read_req_msg_v01_ei[];
+
+struct wlfw_athdiag_read_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	u8 data_valid;
+	u32 data_len;
+	u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+};
+
+#define WLFW_ATHDIAG_READ_RESP_MSG_V01_MAX_MSG_LEN 6156
+extern struct elem_info wlfw_athdiag_read_resp_msg_v01_ei[];
+
+struct wlfw_athdiag_write_req_msg_v01 {
+	u32 offset;
+	u32 mem_type;
+	u32 data_len;
+	u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+};
+
+#define WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN 6163
+extern struct elem_info wlfw_athdiag_write_req_msg_v01_ei[];
+
+struct wlfw_athdiag_write_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_ATHDIAG_WRITE_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_athdiag_write_resp_msg_v01_ei[];
+
+struct wlfw_vbatt_req_msg_v01 {
+	u64 voltage_uv;
+};
+
+#define WLFW_VBATT_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct elem_info wlfw_vbatt_req_msg_v01_ei[];
+
+struct wlfw_vbatt_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_VBATT_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_vbatt_resp_msg_v01_ei[];
+
+struct wlfw_mac_addr_req_msg_v01 {
+	u8 mac_addr_valid;
+	u8 mac_addr[QMI_WLFW_MAC_ADDR_SIZE_V01];
+};
+
+#define WLFW_MAC_ADDR_REQ_MSG_V01_MAX_MSG_LEN 9
+extern struct elem_info wlfw_mac_addr_req_msg_v01_ei[];
+
+struct wlfw_mac_addr_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_MAC_ADDR_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_mac_addr_resp_msg_v01_ei[];
+
+struct wlfw_host_cap_req_msg_v01 {
+	u8 daemon_support_valid;
+	u8 daemon_support;
+};
+
+#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 4
+extern struct elem_info wlfw_host_cap_req_msg_v01_ei[];
+
+struct wlfw_host_cap_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_HOST_CAP_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_host_cap_resp_msg_v01_ei[];
+
+struct wlfw_request_mem_ind_msg_v01 {
+	u32 size;
+};
+
+#define WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_request_mem_ind_msg_v01_ei[];
+
+struct wlfw_respond_mem_req_msg_v01 {
+	u64 addr;
+	u32 size;
+};
+
+#define WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN 18
+extern struct elem_info wlfw_respond_mem_req_msg_v01_ei[];
+
+struct wlfw_respond_mem_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_RESPOND_MEM_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_respond_mem_resp_msg_v01_ei[];
+
+struct wlfw_fw_mem_ready_ind_msg_v01 {
+	char placeholder;
+};
+
+#define WLFW_FW_MEM_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_fw_mem_ready_ind_msg_v01_ei[];
+
+struct wlfw_cold_boot_cal_done_ind_msg_v01 {
+	char placeholder;
+};
+
+#define WLFW_COLD_BOOT_CAL_DONE_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_cold_boot_cal_done_ind_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ind_msg_v01 {
+	u8 cause_for_rejuvenation_valid;
+	u8 cause_for_rejuvenation;
+	u8 requesting_sub_system_valid;
+	u8 requesting_sub_system;
+	u8 line_number_valid;
+	u16 line_number;
+	u8 function_name_valid;
+	char function_name[QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1];
+};
+
+#define WLFW_REJUVENATE_IND_MSG_V01_MAX_MSG_LEN 144
+extern struct elem_info wlfw_rejuvenate_ind_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ack_req_msg_v01 {
+	char placeholder;
+};
+
+#define WLFW_REJUVENATE_ACK_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ack_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_REJUVENATE_ACK_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[];
+
+struct wlfw_dynamic_feature_mask_req_msg_v01 {
+	u8 mask_valid;
+	u64 mask;
+};
+
+#define WLFW_DYNAMIC_FEATURE_MASK_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[];
+
+struct wlfw_dynamic_feature_mask_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	u8 prev_mask_valid;
+	u64 prev_mask;
+	u8 curr_mask_valid;
+	u64 curr_mask;
+};
+
+#define WLFW_DYNAMIC_FEATURE_MASK_RESP_MSG_V01_MAX_MSG_LEN 29
+extern struct elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[];
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/net/wireless/cnss./Kconfig linux-4.4.115-fbx/drivers/net/wireless/cnss/Kconfig
--- linux-4.4.115-fbx/drivers/net/wireless/cnss./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/cnss/Kconfig	2019-01-22 16:16:25.659265930 +0100
@@ -0,0 +1,88 @@
+config CNSS
+	tristate "CNSS driver for wifi module"
+	select CNSS_UTILS
+	select CRYPTO
+	select CRYPTO_HASH
+	select CRYPTO_BLKCIPHER
+	---help---
+	  This module adds support for the CNSS connectivity subsystem used
+	  for wifi devices based on the QCA AR6320 chipset.
+	  This driver also adds support to integrate WLAN module to subsystem
+	  restart framework.
+
+config CNSS_ASYNC
+	bool "Enable/disable cnss pci platform driver asynchronous probe"
+	depends on CNSS || CNSS2
+	---help---
+	 If enabled, CNSS PCI platform driver would do asynchronous probe.
+	 Using asynchronous probe will allow CNSS PCI platform driver to
+	 probe in parallel with other device drivers and will help to
+	 reduce kernel boot time.
+
+config CNSS_MAC_BUG
+	bool "Enable/disable 0-4K memory initialization for QCA6174"
+	depends on CNSS
+	---help---
+	  If enabled, 0-4K memory is reserved for QCA6174 to address
+	  a MAC HW bug. MAC would do an invalid pointer fetch based on
+	  the data, that was read from 0 to 4K. So fill it with zero's;
+	  to an address for which PCIe root complex would honor the read
+	  without any errors.
+
+config CLD_DEBUG
+	bool "Enable/disable CLD debug features"
+	help
+	 WLAN CLD driver uses this config to enable certain debug features.
+	 Some of the debug features may affect performance or may compromise
+	 on security.
+
+	  Say N, if you are building a release kernel for production use.
+	  Only say Y, if you are building a kernel with debug support.
+
+config CLD_HL_SDIO_CORE
+	tristate "Qualcomm CORE driver for QCA6174 with SDIO interface"
+	select WIRELESS_EXT
+	select WEXT_PRIV
+	select WEXT_CORE
+	select WEXT_SPY
+	select NL80211_TESTMODE
+	depends on MMC
+
+config CLD_LL_CORE
+	tristate "Qualcomm core WLAN driver for QCA6174 chipset"
+	select NL80211_TESTMODE
+	select WEXT_CORE
+	select WEXT_PRIV
+	select WEXT_SPY
+	select WIRELESS_EXT
+	select CRYPTO
+	select CRYPTO_HASH
+	select CRYPTO_BLKCIPHER
+	---help---
+	  This section contains the necessary modules needed to enable the
+	  core WLAN driver for Qualcomm QCA6174 chipset.
+	  Select Y to compile the driver in order to have WLAN functionality
+	  support.
+
+config CNSS_SECURE_FW
+	bool "Enable/Disable Memory Allocation for Secure Firmware Feature"
+	depends on CNSS
+	---help---
+	  CLD Driver can use this for holding local copy of firmware
+	  binaries which is used for sha crypto computation.
+	  The Memory Allocation is done only if this Config Parameter is
+	  enabled
+
+config BUS_AUTO_SUSPEND
+	bool "Enable/Disable Runtime PM support for PCIe based WLAN Drivers"
+	depends on CNSS || CNSS2
+	depends on PCI
+	---help---
+	  Runtime Power Management is supported for PCIe based WLAN Drivers.
+	  The features enable cld wlan driver to suspend pcie bus when APPS
+	  is awake based on the driver inactivity with the Firmware.
+	  The Feature uses runtime power management framework from kernel to
+	  track bus access clients and to synchronize the driver activity
+	  during system pm.
+	  This config flag controls the feature per target based. The feature
+	  requires CNSS driver support.
diff -Nruw linux-4.4.115-fbx/drivers/net/wireless/cnss./Makefile linux-4.4.115-fbx/drivers/net/wireless/cnss/Makefile
--- linux-4.4.115-fbx/drivers/net/wireless/cnss./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/cnss/Makefile	2019-01-22 16:16:25.659265930 +0100
@@ -0,0 +1,5 @@
+# Makefile for CNSS platform driver
+
+obj-$(CONFIG_CNSS)	+= cnss_pci.o
+obj-$(CONFIG_CNSS)	+= cnss_sdio.o
+obj-$(CONFIG_CNSS)	+= cnss_common.o
diff -Nruw linux-4.4.115-fbx/drivers/net/wireless/cnss2./Kconfig linux-4.4.115-fbx/drivers/net/wireless/cnss2/Kconfig
--- linux-4.4.115-fbx/drivers/net/wireless/cnss2./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/cnss2/Kconfig	2019-10-29 09:26:24.489211457 +0100
@@ -0,0 +1,18 @@
+config CNSS2
+	tristate "CNSS2 Platform Driver for Wi-Fi Module"
+	depends on !CNSS && PCI_MSM
+	select CNSS_UTILS
+	---help---
+	  This module adds the support for Connectivity Subsystem (CNSS) used
+	  for PCIe based Wi-Fi devices with QCA6174/QCA6290 chipsets.
+	  This driver also adds support to integrate WLAN module to subsystem
+	  restart framework.
+
+config CNSS2_DEBUG
+	bool "CNSS2 Platform Driver Debug Support"
+	depends on CNSS2
+	---help---
+	  This option is to enable CNSS2 platform driver debug support which
+	  primarily includes providing additional verbose logs for certain
+	  features, enabling kernel panic for certain cases to aid the
+	  debugging, and enabling any other debug mechanisms.
diff -Nruw linux-4.4.115-fbx/drivers/net/wireless/cnss2./Makefile linux-4.4.115-fbx/drivers/net/wireless/cnss2/Makefile
--- linux-4.4.115-fbx/drivers/net/wireless/cnss2./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/cnss2/Makefile	2019-10-29 09:26:24.489211457 +0100
@@ -0,0 +1,8 @@
+obj-$(CONFIG_CNSS2) += cnss2.o
+
+cnss2-y := main.o
+cnss2-y += debug.o
+cnss2-y += pci.o
+cnss2-y += power.o
+cnss2-y += qmi.o
+cnss2-y += wlan_firmware_service_v01.o
diff -Nruw linux-4.4.115-fbx/drivers/net/wireless/cnss_crypto./Makefile linux-4.4.115-fbx/drivers/net/wireless/cnss_crypto/Makefile
--- linux-4.4.115-fbx/drivers/net/wireless/cnss_crypto./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/cnss_crypto/Makefile	2019-01-22 16:16:25.663265967 +0100
@@ -0,0 +1 @@
+obj-$(CONFIG_CNSS_CRYPTO) += cnss_secif.o
diff -Nruw linux-4.4.115-fbx/drivers/net/wireless/cnss_genl./Kconfig linux-4.4.115-fbx/drivers/net/wireless/cnss_genl/Kconfig
--- linux-4.4.115-fbx/drivers/net/wireless/cnss_genl./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/cnss_genl/Kconfig	2019-01-22 16:16:25.663265967 +0100
@@ -0,0 +1,7 @@
+config CNSS_GENL
+	tristate "CNSS Generic Netlink Socket Driver"
+	---help---
+	  This module creates generic netlink family "CLD80211". This can be
+	  used by cld driver and userspace utilities to communicate over
+	  netlink sockets. This module creates different multicast groups to
+	  facilitate the same.
diff -Nruw linux-4.4.115-fbx/drivers/net/wireless/cnss_genl./Makefile linux-4.4.115-fbx/drivers/net/wireless/cnss_genl/Makefile
--- linux-4.4.115-fbx/drivers/net/wireless/cnss_genl./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/cnss_genl/Makefile	2019-01-22 16:16:25.663265967 +0100
@@ -0,0 +1 @@
+obj-$(CONFIG_CNSS_GENL) := cnss_nl.o
diff -Nruw linux-4.4.115-fbx/drivers/net/wireless/cnss_prealloc./cnss_prealloc.c linux-4.4.115-fbx/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c
--- linux-4.4.115-fbx/drivers/net/wireless/cnss_prealloc./cnss_prealloc.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c	2019-10-29 09:26:24.497211535 +0100
@@ -0,0 +1,340 @@
+/* Copyright (c) 2012, 2014-2015, 2018 The Linux Foundation. All rights
+ * reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/err.h>
+#include <linux/stacktrace.h>
+#include <linux/wcnss_wlan.h>
+#include <linux/spinlock.h>
+#include <linux/debugfs.h>
+#include <net/cnss_prealloc.h>
+#ifdef CONFIG_WCNSS_SKB_PRE_ALLOC
+#include <linux/skbuff.h>
+#endif
+
+static DEFINE_SPINLOCK(alloc_lock);
+
+#ifdef CONFIG_SLUB_DEBUG
+#define WCNSS_MAX_STACK_TRACE			64
+#endif
+
+#define PRE_ALLOC_DEBUGFS_DIR		"cnss-prealloc"
+#define PRE_ALLOC_DEBUGFS_FILE_OBJ	"status"
+
+static struct dentry *debug_base;
+
+struct wcnss_prealloc {
+	int occupied;
+	size_t size;
+	void *ptr;
+#ifdef CONFIG_SLUB_DEBUG
+	unsigned long stack_trace[WCNSS_MAX_STACK_TRACE];
+	struct stack_trace trace;
+#endif
+};
+
+/* pre-alloced mem for WLAN driver */
+static struct wcnss_prealloc wcnss_allocs[] = {
+	{0, 8  * 1024, NULL},
+	{0, 8  * 1024, NULL},
+	{0, 8  * 1024, NULL},
+	{0, 8  * 1024, NULL},
+	{0, 8  * 1024, NULL},
+	{0, 8  * 1024, NULL},
+	{0, 8  * 1024, NULL},
+	{0, 8  * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 16 * 1024, NULL},
+	{0, 32 * 1024, NULL},
+	{0, 32 * 1024, NULL},
+	{0, 32 * 1024, NULL},
+	{0, 32 * 1024, NULL},
+	{0, 32 * 1024, NULL},
+	{0, 32 * 1024, NULL},
+	{0, 32 * 1024, NULL},
+	{0, 32 * 1024, NULL},
+	{0, 32 * 1024, NULL},
+	{0, 32 * 1024, NULL},
+	{0, 64 * 1024, NULL},
+	{0, 64 * 1024, NULL},
+	{0, 64 * 1024, NULL},
+	{0, 64 * 1024, NULL},
+	{0, 64 * 1024, NULL},
+	{0, 128 * 1024, NULL},
+	{0, 128 * 1024, NULL},
+};
+
+int wcnss_prealloc_init(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(wcnss_allocs); i++) {
+		wcnss_allocs[i].occupied = 0;
+		wcnss_allocs[i].ptr = kmalloc(wcnss_allocs[i].size, GFP_KERNEL);
+		if (wcnss_allocs[i].ptr == NULL)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+void wcnss_prealloc_deinit(void)
+{
+	int i = 0;
+
+	for (i = 0; i < ARRAY_SIZE(wcnss_allocs); i++) {
+		kfree(wcnss_allocs[i].ptr);
+		wcnss_allocs[i].ptr = NULL;
+	}
+}
+
+#ifdef CONFIG_SLUB_DEBUG
+static void wcnss_prealloc_save_stack_trace(struct wcnss_prealloc *entry)
+{
+	struct stack_trace *trace = &entry->trace;
+
+	memset(&entry->stack_trace, 0, sizeof(entry->stack_trace));
+	trace->nr_entries = 0;
+	trace->max_entries = WCNSS_MAX_STACK_TRACE;
+	trace->entries = entry->stack_trace;
+	trace->skip = 2;
+
+	save_stack_trace(trace);
+
+	return;
+}
+#else
+static inline void wcnss_prealloc_save_stack_trace(struct wcnss_prealloc *entry)
+{
+	return;
+}
+#endif
+
+void *wcnss_prealloc_get(size_t size)
+{
+	int i = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&alloc_lock, flags);
+	for (i = 0; i < ARRAY_SIZE(wcnss_allocs); i++) {
+		if (wcnss_allocs[i].occupied)
+			continue;
+
+		if (wcnss_allocs[i].size >= size) {
+			/* we found the slot */
+			wcnss_allocs[i].occupied = 1;
+			spin_unlock_irqrestore(&alloc_lock, flags);
+			wcnss_prealloc_save_stack_trace(&wcnss_allocs[i]);
+			return wcnss_allocs[i].ptr;
+		}
+	}
+	spin_unlock_irqrestore(&alloc_lock, flags);
+
+	pr_err("wcnss: %s: prealloc not available for size: %zu\n",
+	       __func__, size);
+
+	return NULL;
+}
+EXPORT_SYMBOL(wcnss_prealloc_get);
+
+int wcnss_prealloc_put(void *ptr)
+{
+	int i = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&alloc_lock, flags);
+	for (i = 0; i < ARRAY_SIZE(wcnss_allocs); i++) {
+		if (wcnss_allocs[i].ptr == ptr) {
+			wcnss_allocs[i].occupied = 0;
+			spin_unlock_irqrestore(&alloc_lock, flags);
+			return 1;
+		}
+	}
+	spin_unlock_irqrestore(&alloc_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(wcnss_prealloc_put);
+
+#ifdef CONFIG_SLUB_DEBUG
+void wcnss_prealloc_check_memory_leak(void)
+{
+	int i, j = 0;
+
+	for (i = 0; i < ARRAY_SIZE(wcnss_allocs); i++) {
+		if (!wcnss_allocs[i].occupied)
+			continue;
+
+		if (j == 0) {
+			pr_err("wcnss_prealloc: Memory leak detected\n");
+			j++;
+		}
+
+		pr_err("Size: %zu, addr: %pK, backtrace:\n",
+		       wcnss_allocs[i].size, wcnss_allocs[i].ptr);
+		print_stack_trace(&wcnss_allocs[i].trace, 1);
+	}
+
+}
+#else
+void wcnss_prealloc_check_memory_leak(void) {}
+#endif
+EXPORT_SYMBOL(wcnss_prealloc_check_memory_leak);
+
+int wcnss_pre_alloc_reset(void)
+{
+	int i, n = 0;
+
+	for (i = 0; i < ARRAY_SIZE(wcnss_allocs); i++) {
+		if (!wcnss_allocs[i].occupied)
+			continue;
+
+		wcnss_allocs[i].occupied = 0;
+		n++;
+	}
+
+	return n;
+}
+EXPORT_SYMBOL(wcnss_pre_alloc_reset);
+
+int prealloc_memory_stats_show(struct seq_file *fp, void *data)
+{
+	int i = 0;
+	int used_slots = 0, free_slots = 0;
+	unsigned int tsize = 0, tused = 0, size = 0;
+
+	seq_puts(fp, "\nSlot_Size(Kb)\t\t[Used : Free]\n");
+	for (i = 0; i < ARRAY_SIZE(wcnss_allocs); i++) {
+		tsize += wcnss_allocs[i].size;
+		if (size != wcnss_allocs[i].size) {
+			if (size) {
+				seq_printf(
+					fp, "[%d : %d]\n",
+					used_slots, free_slots);
+			}
+
+			size = wcnss_allocs[i].size;
+			used_slots = 0;
+			free_slots = 0;
+			seq_printf(fp, "%d Kb\t\t\t", size / 1024);
+		}
+
+		if (wcnss_allocs[i].occupied) {
+			tused += wcnss_allocs[i].size;
+			++used_slots;
+		} else {
+			++free_slots;
+		}
+	}
+	seq_printf(fp, "[%d : %d]\n", used_slots, free_slots);
+
+	/* Convert byte to Kb */
+	if (tsize)
+		tsize = tsize / 1024;
+	if (tused)
+		tused = tused / 1024;
+	seq_printf(fp, "\nMemory Status:\nTotal Memory: %dKb\n", tsize);
+	seq_printf(fp, "Used: %dKb\nFree: %dKb\n", tused, tsize - tused);
+
+	return 0;
+}
+
+int prealloc_memory_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, prealloc_memory_stats_show, NULL);
+}
+
+static const struct file_operations prealloc_memory_stats_fops = {
+	.owner = THIS_MODULE,
+	.open = prealloc_memory_stats_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int __init wcnss_pre_alloc_init(void)
+{
+	int ret;
+
+	ret = wcnss_prealloc_init();
+	if (ret) {
+		pr_err("%s: Failed to init the prealloc pool\n", __func__);
+		return ret;
+	}
+
+	debug_base = debugfs_create_dir(PRE_ALLOC_DEBUGFS_DIR, NULL);
+	if (IS_ERR_OR_NULL(debug_base)) {
+		pr_err("%s: Failed to create debugfs dir\n", __func__);
+	} else if (IS_ERR_OR_NULL(debugfs_create_file(
+			PRE_ALLOC_DEBUGFS_FILE_OBJ,
+			0644, debug_base, NULL,
+			&prealloc_memory_stats_fops))) {
+		pr_err("%s: Failed to create debugfs file\n", __func__);
+		debugfs_remove_recursive(debug_base);
+	}
+
+	return ret;
+}
+
+static void __exit wcnss_pre_alloc_exit(void)
+{
+	wcnss_prealloc_deinit();
+	debugfs_remove_recursive(debug_base);
+}
+
+module_init(wcnss_pre_alloc_init);
+module_exit(wcnss_pre_alloc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(DEVICE "WCNSS Prealloc Driver");
diff -Nruw linux-4.4.115-fbx/drivers/net/wireless/cnss_prealloc./Makefile linux-4.4.115-fbx/drivers/net/wireless/cnss_prealloc/Makefile
--- linux-4.4.115-fbx/drivers/net/wireless/cnss_prealloc./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/cnss_prealloc/Makefile	2019-01-22 16:16:25.663265967 +0100
@@ -0,0 +1 @@
+obj-$(CONFIG_WCNSS_MEM_PRE_ALLOC) += cnss_prealloc.o
diff -Nruw linux-4.4.115-fbx/drivers/net/wireless/cnss_utils./cnss_utils.c linux-4.4.115-fbx/drivers/net/wireless/cnss_utils/cnss_utils.c
--- linux-4.4.115-fbx/drivers/net/wireless/cnss_utils./cnss_utils.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/cnss_utils/cnss_utils.c	2019-01-22 16:16:25.663265967 +0100
@@ -0,0 +1,483 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "cnss_utils: " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/etherdevice.h>
+#include <linux/debugfs.h>
+#include <net/cnss_utils.h>
+
+#define CNSS_MAX_CH_NUM 45
+struct cnss_unsafe_channel_list {
+	u16 unsafe_ch_count;
+	u16 unsafe_ch_list[CNSS_MAX_CH_NUM];
+};
+
+struct cnss_dfs_nol_info {
+	void *dfs_nol_info;
+	u16 dfs_nol_info_len;
+};
+
+#define MAX_NO_OF_MAC_ADDR 4
+#define MAC_PREFIX_LEN 2
+struct cnss_wlan_mac_addr {
+	u8 mac_addr[MAX_NO_OF_MAC_ADDR][ETH_ALEN];
+	u32 no_of_mac_addr_set;
+};
+
+enum mac_type {
+	CNSS_MAC_PROVISIONED,
+	CNSS_MAC_DERIVED,
+};
+
+static struct cnss_utils_priv {
+	struct cnss_unsafe_channel_list unsafe_channel_list;
+	struct cnss_dfs_nol_info dfs_nol_info;
+	/* generic mutex for unsafe channel */
+	struct mutex unsafe_channel_list_lock;
+	/* generic spin-lock for dfs_nol info */
+	spinlock_t dfs_nol_info_lock;
+	int driver_load_cnt;
+	struct cnss_wlan_mac_addr wlan_mac_addr;
+	struct cnss_wlan_mac_addr wlan_der_mac_addr;
+	enum cnss_utils_cc_src cc_source;
+	struct dentry *root_dentry;
+} *cnss_utils_priv;
+
+int cnss_utils_set_wlan_unsafe_channel(struct device *dev,
+				       u16 *unsafe_ch_list, u16 ch_count)
+{
+	struct cnss_utils_priv *priv = cnss_utils_priv;
+
+	if (!priv)
+		return -EINVAL;
+
+	mutex_lock(&priv->unsafe_channel_list_lock);
+	if ((!unsafe_ch_list) || (ch_count > CNSS_MAX_CH_NUM)) {
+		mutex_unlock(&priv->unsafe_channel_list_lock);
+		return -EINVAL;
+	}
+
+	priv->unsafe_channel_list.unsafe_ch_count = ch_count;
+
+	if (ch_count == 0)
+		goto end;
+
+	memcpy(priv->unsafe_channel_list.unsafe_ch_list,
+	       unsafe_ch_list, ch_count * sizeof(u16));
+
+end:
+	mutex_unlock(&priv->unsafe_channel_list_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(cnss_utils_set_wlan_unsafe_channel);
+
+int cnss_utils_get_wlan_unsafe_channel(struct device *dev,
+				       u16 *unsafe_ch_list,
+				       u16 *ch_count, u16 buf_len)
+{
+	struct cnss_utils_priv *priv = cnss_utils_priv;
+
+	if (!priv)
+		return -EINVAL;
+
+	mutex_lock(&priv->unsafe_channel_list_lock);
+	if (!unsafe_ch_list || !ch_count) {
+		mutex_unlock(&priv->unsafe_channel_list_lock);
+		return -EINVAL;
+	}
+
+	if (buf_len <
+	    (priv->unsafe_channel_list.unsafe_ch_count * sizeof(u16))) {
+		mutex_unlock(&priv->unsafe_channel_list_lock);
+		return -ENOMEM;
+	}
+
+	*ch_count = priv->unsafe_channel_list.unsafe_ch_count;
+	memcpy(unsafe_ch_list, priv->unsafe_channel_list.unsafe_ch_list,
+	       priv->unsafe_channel_list.unsafe_ch_count * sizeof(u16));
+	mutex_unlock(&priv->unsafe_channel_list_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(cnss_utils_get_wlan_unsafe_channel);
+
+int cnss_utils_wlan_set_dfs_nol(struct device *dev,
+				const void *info, u16 info_len)
+{
+	void *temp;
+	void *old_nol_info;
+	struct cnss_dfs_nol_info *dfs_info;
+	struct cnss_utils_priv *priv = cnss_utils_priv;
+
+	if (!priv)
+		return -EINVAL;
+
+	if (!info || !info_len)
+		return -EINVAL;
+
+	temp = kmalloc(info_len, GFP_ATOMIC);
+	if (!temp)
+		return -ENOMEM;
+
+	memcpy(temp, info, info_len);
+	spin_lock_bh(&priv->dfs_nol_info_lock);
+	dfs_info = &priv->dfs_nol_info;
+	old_nol_info = dfs_info->dfs_nol_info;
+	dfs_info->dfs_nol_info = temp;
+	dfs_info->dfs_nol_info_len = info_len;
+	spin_unlock_bh(&priv->dfs_nol_info_lock);
+	kfree(old_nol_info);
+
+	return 0;
+}
+EXPORT_SYMBOL(cnss_utils_wlan_set_dfs_nol);
+
+int cnss_utils_wlan_get_dfs_nol(struct device *dev,
+				void *info, u16 info_len)
+{
+	int len;
+	struct cnss_dfs_nol_info *dfs_info;
+	struct cnss_utils_priv *priv = cnss_utils_priv;
+
+	if (!priv)
+		return -EINVAL;
+
+	if (!info || !info_len)
+		return -EINVAL;
+
+	spin_lock_bh(&priv->dfs_nol_info_lock);
+
+	dfs_info = &priv->dfs_nol_info;
+	if (!dfs_info->dfs_nol_info ||
+	    dfs_info->dfs_nol_info_len == 0) {
+		spin_unlock_bh(&priv->dfs_nol_info_lock);
+		return -ENOENT;
+	}
+
+	len = min(info_len, dfs_info->dfs_nol_info_len);
+	memcpy(info, dfs_info->dfs_nol_info, len);
+	spin_unlock_bh(&priv->dfs_nol_info_lock);
+
+	return len;
+}
+EXPORT_SYMBOL(cnss_utils_wlan_get_dfs_nol);
+
+void cnss_utils_increment_driver_load_cnt(struct device *dev)
+{
+	struct cnss_utils_priv *priv = cnss_utils_priv;
+
+	if (!priv)
+		return;
+
+	++(priv->driver_load_cnt);
+}
+EXPORT_SYMBOL(cnss_utils_increment_driver_load_cnt);
+
+int cnss_utils_get_driver_load_cnt(struct device *dev)
+{
+	struct cnss_utils_priv *priv = cnss_utils_priv;
+
+	if (!priv)
+		return -EINVAL;
+
+	return priv->driver_load_cnt;
+}
+EXPORT_SYMBOL(cnss_utils_get_driver_load_cnt);
+
+static int set_wlan_mac_address(const u8 *mac_list, const uint32_t len,
+				enum mac_type type)
+{
+	struct cnss_utils_priv *priv = cnss_utils_priv;
+	u32 no_of_mac_addr;
+	struct cnss_wlan_mac_addr *addr = NULL;
+	int iter;
+	u8 *temp = NULL;
+
+	if (!priv)
+		return -EINVAL;
+
+	if (len == 0 || (len % ETH_ALEN) != 0) {
+		pr_err("Invalid length %d\n", len);
+		return -EINVAL;
+	}
+
+	no_of_mac_addr = len / ETH_ALEN;
+	if (no_of_mac_addr > MAX_NO_OF_MAC_ADDR) {
+		pr_err("Exceed maximum supported MAC address %u %u\n",
+		       MAX_NO_OF_MAC_ADDR, no_of_mac_addr);
+		return -EINVAL;
+	}
+
+	if (type == CNSS_MAC_PROVISIONED)
+		addr = &priv->wlan_mac_addr;
+	else
+		addr = &priv->wlan_der_mac_addr;
+
+	if (addr->no_of_mac_addr_set) {
+		pr_err("WLAN MAC address is already set, num %d type %d\n",
+		       addr->no_of_mac_addr_set, type);
+		return 0;
+	}
+
+	addr->no_of_mac_addr_set = no_of_mac_addr;
+	temp = &addr->mac_addr[0][0];
+
+	for (iter = 0; iter < no_of_mac_addr;
+	     ++iter, temp += ETH_ALEN, mac_list += ETH_ALEN) {
+		ether_addr_copy(temp, mac_list);
+		pr_debug("MAC_ADDR:%02x:%02x:%02x:%02x:%02x:%02x\n",
+			 temp[0], temp[1], temp[2],
+			 temp[3], temp[4], temp[5]);
+	}
+	return 0;
+}
+
+int cnss_utils_set_wlan_mac_address(const u8 *mac_list, const uint32_t len)
+{
+	return set_wlan_mac_address(mac_list, len, CNSS_MAC_PROVISIONED);
+}
+EXPORT_SYMBOL(cnss_utils_set_wlan_mac_address);
+
+int cnss_utils_set_wlan_derived_mac_address(
+				const u8 *mac_list, const uint32_t len)
+{
+	return set_wlan_mac_address(mac_list, len, CNSS_MAC_DERIVED);
+}
+EXPORT_SYMBOL(cnss_utils_set_wlan_derived_mac_address);
+
+static u8 *get_wlan_mac_address(struct device *dev,
+				u32 *num, enum mac_type type)
+{
+	struct cnss_utils_priv *priv = cnss_utils_priv;
+	struct cnss_wlan_mac_addr *addr = NULL;
+
+	if (!priv)
+		goto out;
+
+	if (type == CNSS_MAC_PROVISIONED)
+		addr = &priv->wlan_mac_addr;
+	else
+		addr = &priv->wlan_der_mac_addr;
+
+	if (!addr->no_of_mac_addr_set) {
+		pr_err("WLAN MAC address is not set, type %d\n", type);
+		goto out;
+	}
+	*num = addr->no_of_mac_addr_set;
+	return &addr->mac_addr[0][0];
+
+out:
+	*num = 0;
+	return NULL;
+}
+
+u8 *cnss_utils_get_wlan_mac_address(struct device *dev, uint32_t *num)
+{
+	return get_wlan_mac_address(dev, num, CNSS_MAC_PROVISIONED);
+}
+EXPORT_SYMBOL(cnss_utils_get_wlan_mac_address);
+
+u8 *cnss_utils_get_wlan_derived_mac_address(
+			struct device *dev, uint32_t *num)
+{
+	return get_wlan_mac_address(dev, num, CNSS_MAC_DERIVED);
+}
+EXPORT_SYMBOL(cnss_utils_get_wlan_derived_mac_address);
+
+void cnss_utils_set_cc_source(struct device *dev,
+			      enum cnss_utils_cc_src cc_source)
+{
+	struct cnss_utils_priv *priv = cnss_utils_priv;
+
+	if (!priv)
+		return;
+
+	priv->cc_source = cc_source;
+}
+EXPORT_SYMBOL(cnss_utils_set_cc_source);
+
+enum cnss_utils_cc_src cnss_utils_get_cc_source(struct device *dev)
+{
+	struct cnss_utils_priv *priv = cnss_utils_priv;
+
+	if (!priv)
+		return -EINVAL;
+
+	return priv->cc_source;
+}
+EXPORT_SYMBOL(cnss_utils_get_cc_source);
+
+static ssize_t cnss_utils_mac_write(struct file *fp,
+				    const char __user *user_buf,
+				    size_t count, loff_t *off)
+{
+	struct cnss_utils_priv *priv =
+		((struct seq_file *)fp->private_data)->private;
+	char buf[128];
+	char *input, *mac_type, *mac_address;
+	u8 *dest_mac;
+	u8 val;
+	const char *delim = " \n";
+	size_t len = 0;
+	char temp[3] = "";
+
+	len = min_t(size_t, count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EINVAL;
+	buf[len] = '\0';
+
+	input = buf;
+
+	mac_type = strsep(&input, delim);
+	if (!mac_type)
+		return -EINVAL;
+	if (!input)
+		return -EINVAL;
+
+	mac_address = strsep(&input, delim);
+	if (!mac_address)
+		return -EINVAL;
+	if (strncmp("0x", mac_address, MAC_PREFIX_LEN)) {
+		pr_err("Invalid MAC prefix\n");
+		return -EINVAL;
+	}
+
+	len = strlen(mac_address);
+	mac_address += MAC_PREFIX_LEN;
+	len -= MAC_PREFIX_LEN;
+	if (len < ETH_ALEN * 2 || len > ETH_ALEN * 2 * MAX_NO_OF_MAC_ADDR ||
+	    len % (ETH_ALEN * 2) != 0) {
+		pr_err("Invalid MAC address length %zu\n", len);
+		return -EINVAL;
+	}
+
+	if (!strcmp("provisioned", mac_type)) {
+		dest_mac = &priv->wlan_mac_addr.mac_addr[0][0];
+		priv->wlan_mac_addr.no_of_mac_addr_set = len / (ETH_ALEN * 2);
+	} else if (!strcmp("derived", mac_type)) {
+		dest_mac = &priv->wlan_der_mac_addr.mac_addr[0][0];
+		priv->wlan_der_mac_addr.no_of_mac_addr_set =
+			len / (ETH_ALEN * 2);
+	} else {
+		pr_err("Invalid MAC address type %s\n", mac_type);
+		return -EINVAL;
+	}
+
+	while (len--) {
+		temp[0] = *mac_address++;
+		temp[1] = *mac_address++;
+		if (kstrtou8(temp, 16, &val))
+			return -EINVAL;
+		*dest_mac++ = val;
+	}
+	return count;
+}
+
+static int cnss_utils_mac_show(struct seq_file *s, void *data)
+{
+	u8 mac[6];
+	int i;
+	struct cnss_utils_priv *priv = s->private;
+	struct cnss_wlan_mac_addr *addr = NULL;
+
+	addr = &priv->wlan_mac_addr;
+	if (addr->no_of_mac_addr_set) {
+		seq_puts(s, "\nProvisioned MAC addresseses\n");
+		for (i = 0; i < addr->no_of_mac_addr_set; i++) {
+			ether_addr_copy(mac, addr->mac_addr[i]);
+			seq_printf(s, "MAC_ADDR:%02x:%02x:%02x:%02x:%02x:%02x\n",
+				   mac[0], mac[1], mac[2],
+				   mac[3], mac[4], mac[5]);
+		}
+	}
+
+	addr = &priv->wlan_der_mac_addr;
+	if (addr->no_of_mac_addr_set) {
+		seq_puts(s, "\nDerived MAC addresseses\n");
+		for (i = 0; i < addr->no_of_mac_addr_set; i++) {
+			ether_addr_copy(mac, addr->mac_addr[i]);
+			seq_printf(s, "MAC_ADDR:%02x:%02x:%02x:%02x:%02x:%02x\n",
+				   mac[0], mac[1], mac[2],
+				   mac[3], mac[4], mac[5]);
+		}
+	}
+
+	return 0;
+}
+
+static int cnss_utils_mac_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, cnss_utils_mac_show, inode->i_private);
+}
+
+static const struct file_operations cnss_utils_mac_fops = {
+	.read		= seq_read,
+	.write		= cnss_utils_mac_write,
+	.release	= single_release,
+	.open		= cnss_utils_mac_open,
+	.owner		= THIS_MODULE,
+	.llseek		= seq_lseek,
+};
+
+static int cnss_utils_debugfs_create(struct cnss_utils_priv *priv)
+{
+	int ret = 0;
+	struct dentry *root_dentry;
+
+	root_dentry = debugfs_create_dir("cnss_utils", NULL);
+
+	if (IS_ERR(root_dentry)) {
+		ret = PTR_ERR(root_dentry);
+		pr_err("Unable to create debugfs %d\n", ret);
+		goto out;
+	}
+	priv->root_dentry = root_dentry;
+	debugfs_create_file("mac_address", 0600, root_dentry, priv,
+			    &cnss_utils_mac_fops);
+out:
+	return ret;
+}
+
+static int __init cnss_utils_init(void)
+{
+	struct cnss_utils_priv *priv = NULL;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->cc_source = CNSS_UTILS_SOURCE_CORE;
+
+	mutex_init(&priv->unsafe_channel_list_lock);
+	spin_lock_init(&priv->dfs_nol_info_lock);
+	cnss_utils_debugfs_create(priv);
+	cnss_utils_priv = priv;
+
+	return 0;
+}
+
+static void __exit cnss_utils_exit(void)
+{
+	kfree(cnss_utils_priv);
+	cnss_utils_priv = NULL;
+}
+
+module_init(cnss_utils_init);
+module_exit(cnss_utils_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(DEVICE "CNSS Utilities Driver");
diff -Nruw linux-4.4.115-fbx/drivers/net/wireless/cnss_utils./Kconfig linux-4.4.115-fbx/drivers/net/wireless/cnss_utils/Kconfig
--- linux-4.4.115-fbx/drivers/net/wireless/cnss_utils./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/cnss_utils/Kconfig	2019-01-22 16:16:25.663265967 +0100
@@ -0,0 +1,6 @@
+config CNSS_UTILS
+	bool "CNSS utilities support"
+	---help---
+	  Add CNSS utilities support for the WLAN driver module.
+	  This feature enable wlan driver to use CNSS utilities APIs to set
+	  and get wlan related information.
\ No newline at end of file
diff -Nruw linux-4.4.115-fbx/drivers/net/wireless/cnss_utils./Makefile linux-4.4.115-fbx/drivers/net/wireless/cnss_utils/Makefile
--- linux-4.4.115-fbx/drivers/net/wireless/cnss_utils./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/cnss_utils/Makefile	2019-01-22 16:16:25.663265967 +0100
@@ -0,0 +1 @@
+obj-$(CONFIG_CNSS_UTILS) += cnss_utils.o
diff -Nruw linux-4.4.115-fbx/drivers/net/wireless/wcnss./Makefile linux-4.4.115-fbx/drivers/net/wireless/wcnss/Makefile
--- linux-4.4.115-fbx/drivers/net/wireless/wcnss./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/net/wireless/wcnss/Makefile	2019-01-22 16:16:25.991268937 +0100
@@ -0,0 +1,6 @@
+
+# Makefile for WCNSS triple-play driver
+
+wcnsscore-objs += wcnss_wlan.o wcnss_vreg.o
+
+obj-$(CONFIG_WCNSS_CORE) += wcnsscore.o
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/nfc/nq-nci.c	2019-01-22 16:16:25.999269009 +0100
@@ -0,0 +1,1252 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/spinlock.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+#include <linux/uaccess.h>
+#include "nq-nci.h"
+#include <linux/clk.h>
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+
+struct nqx_platform_data {
+	unsigned int irq_gpio;
+	unsigned int en_gpio;
+	unsigned int clkreq_gpio;
+	unsigned int firm_gpio;
+	unsigned int ese_gpio;
+	const char *clk_src_name;
+};
+
+static const struct of_device_id msm_match_table[] = {
+	{.compatible = "qcom,nq-nci"},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, msm_match_table);
+
+#define MAX_BUFFER_SIZE			(320)
+#define WAKEUP_SRC_TIMEOUT		(2000)
+#define MAX_RETRY_COUNT			3
+
+struct nqx_dev {
+	wait_queue_head_t	read_wq;
+	struct	mutex		read_mutex;
+	struct	i2c_client	*client;
+	struct	miscdevice	nqx_device;
+	union  nqx_uinfo	nqx_info;
+	/* NFC GPIO variables */
+	unsigned int		irq_gpio;
+	unsigned int		en_gpio;
+	unsigned int		firm_gpio;
+	unsigned int		clkreq_gpio;
+	unsigned int		ese_gpio;
+	/* NFC VEN pin state powered by Nfc */
+	bool			nfc_ven_enabled;
+	/* NFC_IRQ state */
+	bool			irq_enabled;
+	/* NFC_IRQ wake-up state */
+	bool			irq_wake_up;
+	spinlock_t		irq_enabled_lock;
+	unsigned int		count_irq;
+	/* Initial CORE RESET notification */
+	unsigned int		core_reset_ntf;
+	/* CLK control */
+	bool			clk_run;
+	struct	clk		*s_clk;
+	/* read buffer*/
+	size_t kbuflen;
+	u8 *kbuf;
+	struct nqx_platform_data *pdata;
+};
+
+static int nfcc_reboot(struct notifier_block *notifier, unsigned long val,
+			void *v);
+/*clock enable function*/
+static int nqx_clock_select(struct nqx_dev *nqx_dev);
+/*clock disable function*/
+static int nqx_clock_deselect(struct nqx_dev *nqx_dev);
+static struct notifier_block nfcc_notifier = {
+	.notifier_call	= nfcc_reboot,
+	.next			= NULL,
+	.priority		= 0
+};
+
+unsigned int	disable_ctrl;
+
+static void nqx_init_stat(struct nqx_dev *nqx_dev)
+{
+	nqx_dev->count_irq = 0;
+}
+
+static void nqx_disable_irq(struct nqx_dev *nqx_dev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&nqx_dev->irq_enabled_lock, flags);
+	if (nqx_dev->irq_enabled) {
+		disable_irq_nosync(nqx_dev->client->irq);
+		nqx_dev->irq_enabled = false;
+	}
+	spin_unlock_irqrestore(&nqx_dev->irq_enabled_lock, flags);
+}
+
+/**
+ * nqx_enable_irq()
+ *
+ * Check if interrupt is enabled or not
+ * and enable interrupt
+ *
+ * Return: void
+ */
+static void nqx_enable_irq(struct nqx_dev *nqx_dev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&nqx_dev->irq_enabled_lock, flags);
+	if (!nqx_dev->irq_enabled) {
+		nqx_dev->irq_enabled = true;
+		enable_irq(nqx_dev->client->irq);
+	}
+	spin_unlock_irqrestore(&nqx_dev->irq_enabled_lock, flags);
+}
+
+static irqreturn_t nqx_dev_irq_handler(int irq, void *dev_id)
+{
+	struct nqx_dev *nqx_dev = dev_id;
+	unsigned long flags;
+
+	if (device_may_wakeup(&nqx_dev->client->dev))
+		pm_wakeup_event(&nqx_dev->client->dev, WAKEUP_SRC_TIMEOUT);
+
+	nqx_disable_irq(nqx_dev);
+	spin_lock_irqsave(&nqx_dev->irq_enabled_lock, flags);
+	nqx_dev->count_irq++;
+	spin_unlock_irqrestore(&nqx_dev->irq_enabled_lock, flags);
+	wake_up(&nqx_dev->read_wq);
+
+	return IRQ_HANDLED;
+}
+
+static ssize_t nfc_read(struct file *filp, char __user *buf,
+					size_t count, loff_t *offset)
+{
+	struct nqx_dev *nqx_dev = filp->private_data;
+	unsigned char *tmp = NULL;
+	int ret;
+	int irq_gpio_val = 0;
+
+	if (!nqx_dev) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	if (count > nqx_dev->kbuflen)
+		count = nqx_dev->kbuflen;
+
+	dev_dbg(&nqx_dev->client->dev, "%s : reading %zu bytes.\n",
+			__func__, count);
+
+	mutex_lock(&nqx_dev->read_mutex);
+
+	irq_gpio_val = gpio_get_value(nqx_dev->irq_gpio);
+	if (irq_gpio_val == 0) {
+		if (filp->f_flags & O_NONBLOCK) {
+			dev_err(&nqx_dev->client->dev,
+			":f_falg has O_NONBLOCK. EAGAIN\n");
+			ret = -EAGAIN;
+			goto err;
+		}
+		while (1) {
+			ret = 0;
+			if (!nqx_dev->irq_enabled) {
+				nqx_dev->irq_enabled = true;
+				enable_irq(nqx_dev->client->irq);
+			}
+			if (!gpio_get_value(nqx_dev->irq_gpio)) {
+				ret = wait_event_interruptible(nqx_dev->read_wq,
+					!nqx_dev->irq_enabled);
+			}
+			if (ret)
+				goto err;
+			nqx_disable_irq(nqx_dev);
+
+			if (gpio_get_value(nqx_dev->irq_gpio))
+				break;
+			dev_err_ratelimited(&nqx_dev->client->dev, "gpio is low, no need to read data\n");
+		}
+	}
+
+	tmp = nqx_dev->kbuf;
+	if (!tmp) {
+		dev_err(&nqx_dev->client->dev,
+			"%s: device doesn't exist anymore\n", __func__);
+		ret = -ENODEV;
+		goto err;
+	}
+	memset(tmp, 0x00, count);
+
+	/* Read data */
+	ret = i2c_master_recv(nqx_dev->client, tmp, count);
+	if (ret < 0) {
+		dev_err(&nqx_dev->client->dev,
+			"%s: i2c_master_recv returned %d\n", __func__, ret);
+		goto err;
+	}
+	if (ret > count) {
+		dev_err(&nqx_dev->client->dev,
+			"%s: received too many bytes from i2c (%d)\n",
+			__func__, ret);
+		ret = -EIO;
+		goto err;
+	}
+#ifdef NFC_KERNEL_BU
+		dev_dbg(&nqx_dev->client->dev, "%s : NfcNciRx %x %x %x\n",
+			__func__, tmp[0], tmp[1], tmp[2]);
+#endif
+	if (copy_to_user(buf, tmp, ret)) {
+		dev_warn(&nqx_dev->client->dev,
+			"%s : failed to copy to user space\n", __func__);
+		ret = -EFAULT;
+		goto err;
+	}
+	mutex_unlock(&nqx_dev->read_mutex);
+	return ret;
+
+err:
+	mutex_unlock(&nqx_dev->read_mutex);
+out:
+	return ret;
+}
+
+static ssize_t nfc_write(struct file *filp, const char __user *buf,
+				size_t count, loff_t *offset)
+{
+	struct nqx_dev *nqx_dev = filp->private_data;
+	char *tmp = NULL;
+	int ret = 0;
+
+	if (!nqx_dev) {
+		ret = -ENODEV;
+		goto out;
+	}
+	if (count > nqx_dev->kbuflen) {
+		dev_err(&nqx_dev->client->dev, "%s: out of memory\n",
+			__func__);
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	tmp = memdup_user(buf, count);
+	if (IS_ERR(tmp)) {
+		dev_err(&nqx_dev->client->dev, "%s: memdup_user failed\n",
+			__func__);
+		ret = PTR_ERR(tmp);
+		goto out;
+	}
+
+	ret = i2c_master_send(nqx_dev->client, tmp, count);
+	if (ret != count) {
+		dev_dbg(&nqx_dev->client->dev,
+		"%s: failed to write %d\n", __func__, ret);
+		ret = -EIO;
+		goto out_free;
+	}
+#ifdef NFC_KERNEL_BU
+	dev_dbg(&nqx_dev->client->dev,
+			"%s : i2c-%d: NfcNciTx %x %x %x\n",
+			__func__, iminor(file_inode(filp)),
+			tmp[0], tmp[1], tmp[2]);
+#endif
+	usleep_range(1000, 1100);
+out_free:
+	kfree(tmp);
+out:
+	return ret;
+}
+
+/**
+ * nqx_standby_write()
+ * @buf:       pointer to data buffer
+ * @len:       # of bytes need to transfer
+ *
+ * write data buffer over I2C and retry
+ * if NFCC is in stand by mode
+ *
+ * Return: # of bytes written or -ve value in case of error
+ */
+static int nqx_standby_write(struct nqx_dev *nqx_dev,
+				const unsigned char *buf, size_t len)
+{
+	int ret = -EINVAL;
+	int retry_cnt;
+
+	for (retry_cnt = 1; retry_cnt <= MAX_RETRY_COUNT; retry_cnt++) {
+		ret = i2c_master_send(nqx_dev->client, buf, len);
+		if (ret < 0) {
+			dev_dbg(&nqx_dev->client->dev,
+				"%s: write failed, Maybe in Standby Mode - Retry(%d)\n",
+				 __func__, retry_cnt);
+			usleep_range(1000, 1100);
+		} else if (ret == len)
+			break;
+	}
+	return ret;
+}
+
+/*
+ * Power management of the eSE
+ * NFC & eSE ON : NFC_EN high and eSE_pwr_req high.
+ * NFC OFF & eSE ON : NFC_EN high and eSE_pwr_req high.
+ * NFC OFF & eSE OFF : NFC_EN low and eSE_pwr_req low.
+*/
+static int nqx_ese_pwr(struct nqx_dev *nqx_dev, unsigned long int arg)
+{
+	int r = -1;
+	const unsigned char svdd_off_cmd_warn[] =  {0x2F, 0x31, 0x01, 0x01};
+	const unsigned char svdd_off_cmd_done[] =  {0x2F, 0x31, 0x01, 0x00};
+
+	if (!gpio_is_valid(nqx_dev->ese_gpio)) {
+		dev_err(&nqx_dev->client->dev,
+			"%s: ese_gpio is not valid\n", __func__);
+		return -EINVAL;
+	}
+
+	if (arg == 0) {
+		/*
+		 * We want to power on the eSE and to do so we need the
+		 * eSE_pwr_req pin and the NFC_EN pin to be high
+		 */
+		if (gpio_get_value(nqx_dev->ese_gpio)) {
+			dev_dbg(&nqx_dev->client->dev, "ese_gpio is already high\n");
+			r = 0;
+		} else {
+			/**
+			 * Let's store the NFC_EN pin state
+			 * only if the eSE is not yet on
+			 */
+			nqx_dev->nfc_ven_enabled =
+					gpio_get_value(nqx_dev->en_gpio);
+			if (!nqx_dev->nfc_ven_enabled) {
+				gpio_set_value(nqx_dev->en_gpio, 1);
+				/* hardware dependent delay */
+				usleep_range(1000, 1100);
+			}
+			gpio_set_value(nqx_dev->ese_gpio, 1);
+			if (gpio_get_value(nqx_dev->ese_gpio)) {
+				dev_dbg(&nqx_dev->client->dev, "ese_gpio is enabled\n");
+				r = 0;
+			}
+		}
+	} else if (arg == 1) {
+		if (nqx_dev->nfc_ven_enabled &&
+			((nqx_dev->nqx_info.info.chip_type == NFCC_NQ_220) ||
+			(nqx_dev->nqx_info.info.chip_type == NFCC_PN66T))) {
+			/**
+			 * Let's inform the CLF we're
+			 * powering off the eSE
+			 */
+			r = nqx_standby_write(nqx_dev, svdd_off_cmd_warn,
+						sizeof(svdd_off_cmd_warn));
+			if (r < 0) {
+				dev_err(&nqx_dev->client->dev,
+					"%s: write failed after max retry\n",
+					 __func__);
+				return -ENXIO;
+			}
+			dev_dbg(&nqx_dev->client->dev,
+				"%s: svdd_off_cmd_warn sent\n", __func__);
+
+			/* let's power down the eSE */
+			gpio_set_value(nqx_dev->ese_gpio, 0);
+			dev_dbg(&nqx_dev->client->dev,
+				"%s: nqx_dev->ese_gpio set to 0\n", __func__);
+
+			/**
+			 * Time needed for the SVDD capacitor
+			 * to get discharged
+			 */
+			usleep_range(8000, 8100);
+
+			/* Let's inform the CLF the eSE is now off */
+			r = nqx_standby_write(nqx_dev, svdd_off_cmd_done,
+						sizeof(svdd_off_cmd_done));
+			if (r < 0) {
+				dev_err(&nqx_dev->client->dev,
+					"%s: write failed after max retry\n",
+					 __func__);
+				return -ENXIO;
+			}
+			dev_dbg(&nqx_dev->client->dev,
+				"%s: svdd_off_cmd_done sent\n", __func__);
+		} else {
+			/**
+			 * In case the NFC is off,
+			 * there's no need to send the i2c commands
+			 */
+			gpio_set_value(nqx_dev->ese_gpio, 0);
+		}
+
+		if (!gpio_get_value(nqx_dev->ese_gpio)) {
+			dev_dbg(&nqx_dev->client->dev, "ese_gpio is disabled\n");
+			r = 0;
+		}
+
+		if (!nqx_dev->nfc_ven_enabled) {
+			/* hardware dependent delay */
+			usleep_range(1000, 1100);
+			dev_dbg(&nqx_dev->client->dev, "disabling en_gpio\n");
+			gpio_set_value(nqx_dev->en_gpio, 0);
+		}
+	} else if (arg == 3) {
+		r = gpio_get_value(nqx_dev->ese_gpio);
+	}
+	return r;
+}
+
+static int nfc_open(struct inode *inode, struct file *filp)
+{
+	int ret = 0;
+	struct nqx_dev *nqx_dev = container_of(filp->private_data,
+				struct nqx_dev, nqx_device);
+
+	filp->private_data = nqx_dev;
+	nqx_init_stat(nqx_dev);
+
+	dev_dbg(&nqx_dev->client->dev,
+			"%s: %d,%d\n", __func__, imajor(inode), iminor(inode));
+	return ret;
+}
+
+/*
+ * nfc_ioctl_power_states() - power control
+ * @filp:	pointer to the file descriptor
+ * @arg:	mode that we want to move to
+ *
+ * Device power control. Depending on the arg value, device moves to
+ * different states
+ * (arg = 0): NFC_ENABLE	GPIO = 0, FW_DL GPIO = 0
+ * (arg = 1): NFC_ENABLE	GPIO = 1, FW_DL GPIO = 0
+ * (arg = 2): FW_DL GPIO = 1
+ *
+ * Return: -ENOIOCTLCMD if arg is not supported, 0 in any other case
+ */
+int nfc_ioctl_power_states(struct file *filp, unsigned long arg)
+{
+	int r = 0;
+	struct nqx_dev *nqx_dev = filp->private_data;
+
+	if (arg == 0) {
+		/*
+		 * We are attempting a hardware reset so let us disable
+		 * interrupts to avoid spurious notifications to upper
+		 * layers.
+		 */
+		nqx_disable_irq(nqx_dev);
+		dev_dbg(&nqx_dev->client->dev,
+			"gpio_set_value disable: %s: info: %p\n",
+			__func__, nqx_dev);
+		if (gpio_is_valid(nqx_dev->firm_gpio)) {
+			gpio_set_value(nqx_dev->firm_gpio, 0);
+			usleep_range(10000, 10100);
+		}
+
+		if (gpio_is_valid(nqx_dev->ese_gpio)) {
+			if (!gpio_get_value(nqx_dev->ese_gpio)) {
+				dev_dbg(&nqx_dev->client->dev, "disabling en_gpio\n");
+				gpio_set_value(nqx_dev->en_gpio, 0);
+				usleep_range(10000, 10100);
+			} else {
+				dev_dbg(&nqx_dev->client->dev, "keeping en_gpio high\n");
+			}
+		} else {
+			dev_dbg(&nqx_dev->client->dev, "ese_gpio invalid, set en_gpio to low\n");
+			gpio_set_value(nqx_dev->en_gpio, 0);
+			usleep_range(10000, 10100);
+		}
+		r = nqx_clock_deselect(nqx_dev);
+		if (r < 0)
+			dev_err(&nqx_dev->client->dev, "unable to disable clock\n");
+		nqx_dev->nfc_ven_enabled = false;
+	} else if (arg == 1) {
+		nqx_enable_irq(nqx_dev);
+		dev_dbg(&nqx_dev->client->dev,
+			"gpio_set_value enable: %s: info: %p\n",
+			__func__, nqx_dev);
+		if (gpio_is_valid(nqx_dev->firm_gpio)) {
+			gpio_set_value(nqx_dev->firm_gpio, 0);
+			usleep_range(10000, 10100);
+		}
+		gpio_set_value(nqx_dev->en_gpio, 1);
+		usleep_range(10000, 10100);
+		r = nqx_clock_select(nqx_dev);
+		if (r < 0)
+			dev_err(&nqx_dev->client->dev, "unable to enable clock\n");
+		nqx_dev->nfc_ven_enabled = true;
+	} else if (arg == 2) {
+		/*
+		 * We are switching to Dowload Mode, toggle the enable pin
+		 * in order to set the NFCC in the new mode
+		 */
+		if (gpio_is_valid(nqx_dev->ese_gpio)) {
+			if (gpio_get_value(nqx_dev->ese_gpio)) {
+				dev_err(&nqx_dev->client->dev, "FW download forbidden while ese is on\n");
+				return -EBUSY; /* Device or resource busy */
+			}
+		}
+		gpio_set_value(nqx_dev->en_gpio, 1);
+		usleep_range(10000, 10100);
+		if (gpio_is_valid(nqx_dev->firm_gpio)) {
+			gpio_set_value(nqx_dev->firm_gpio, 1);
+			usleep_range(10000, 10100);
+		}
+		gpio_set_value(nqx_dev->en_gpio, 0);
+		usleep_range(10000, 10100);
+		gpio_set_value(nqx_dev->en_gpio, 1);
+		usleep_range(10000, 10100);
+	} else {
+		r = -ENOIOCTLCMD;
+	}
+
+	return r;
+}
+
+#ifdef CONFIG_COMPAT
+static long nfc_compat_ioctl(struct file *pfile, unsigned int cmd,
+				unsigned long arg)
+{
+	long r = 0;
+	arg = (compat_u64)arg;
+	switch (cmd) {
+	case NFC_SET_PWR:
+		nfc_ioctl_power_states(pfile, arg);
+		break;
+	case ESE_SET_PWR:
+		nqx_ese_pwr(pfile->private_data, arg);
+		break;
+	case ESE_GET_PWR:
+		nqx_ese_pwr(pfile->private_data, 3);
+		break;
+	case SET_RX_BLOCK:
+		break;
+	case SET_EMULATOR_TEST_POINT:
+		break;
+	default:
+		r = -ENOTTY;
+	}
+	return r;
+}
+#endif
+
+/*
+ * nfc_ioctl_core_reset_ntf()
+ * @filp:       pointer to the file descriptor
+ *
+ * Allows callers to determine if a CORE_RESET_NTF has arrived
+ *
+ * Return: the value of variable core_reset_ntf
+ */
+int nfc_ioctl_core_reset_ntf(struct file *filp)
+{
+	struct nqx_dev *nqx_dev = filp->private_data;
+
+	dev_dbg(&nqx_dev->client->dev, "%s: returning = %d\n", __func__,
+		nqx_dev->core_reset_ntf);
+	return nqx_dev->core_reset_ntf;
+}
+
+/*
+ * Inside nfc_ioctl_nfcc_info
+ *
+ * @brief   nfc_ioctl_nfcc_info
+ *
+ * Check the NQ Chipset and firmware version details
+ */
+unsigned int nfc_ioctl_nfcc_info(struct file *filp, unsigned long arg)
+{
+	unsigned int r = 0;
+	struct nqx_dev *nqx_dev = filp->private_data;
+
+	r = nqx_dev->nqx_info.i;
+	dev_dbg(&nqx_dev->client->dev,
+		"nqx nfc : nfc_ioctl_nfcc_info r = %d\n", r);
+
+	return r;
+}
+
+static long nfc_ioctl(struct file *pfile, unsigned int cmd,
+			unsigned long arg)
+{
+	int r = 0;
+
+	switch (cmd) {
+	case NFC_SET_PWR:
+		r = nfc_ioctl_power_states(pfile, arg);
+		break;
+	case ESE_SET_PWR:
+		r = nqx_ese_pwr(pfile->private_data, arg);
+		break;
+	case ESE_GET_PWR:
+		r = nqx_ese_pwr(pfile->private_data, 3);
+		break;
+	case SET_RX_BLOCK:
+		break;
+	case SET_EMULATOR_TEST_POINT:
+		break;
+	case NFCC_INITIAL_CORE_RESET_NTF:
+		r = nfc_ioctl_core_reset_ntf(pfile);
+		break;
+	case NFCC_GET_INFO:
+		r = nfc_ioctl_nfcc_info(pfile, arg);
+		break;
+	default:
+		r = -ENOIOCTLCMD;
+	}
+	return r;
+}
+
+static const struct file_operations nfc_dev_fops = {
+	.owner = THIS_MODULE,
+	.llseek = no_llseek,
+	.read  = nfc_read,
+	.write = nfc_write,
+	.open = nfc_open,
+	.unlocked_ioctl = nfc_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = nfc_compat_ioctl
+#endif
+};
+
+/* Check for availability of NQ_ NFC controller hardware */
+static int nfcc_hw_check(struct i2c_client *client, struct nqx_dev *nqx_dev)
+{
+	int ret = 0;
+
+	unsigned char raw_nci_reset_cmd[] =  {0x20, 0x00, 0x01, 0x00};
+	unsigned char raw_nci_init_cmd[] =   {0x20, 0x01, 0x00};
+	unsigned char nci_init_rsp[28];
+	unsigned char nci_reset_rsp[6];
+	unsigned char init_rsp_len = 0;
+	unsigned int enable_gpio = nqx_dev->en_gpio;
+
+	/* making sure that the NFCC starts in a clean state. */
+	gpio_set_value(enable_gpio, 0);/* ULPM: Disable */
+	/* hardware dependent delay */
+	usleep_range(10000, 10100);
+	gpio_set_value(enable_gpio, 1);/* HPD : Enable*/
+	/* hardware dependent delay */
+	usleep_range(10000, 10100);
+
+	/* send NCI CORE RESET CMD with Keep Config parameters */
+	ret = i2c_master_send(client, raw_nci_reset_cmd,
+						sizeof(raw_nci_reset_cmd));
+	if (ret < 0) {
+		dev_err(&client->dev,
+		"%s: - i2c_master_send Error\n", __func__);
+		goto err_nfcc_hw_check;
+	}
+	/* hardware dependent delay */
+	msleep(30);
+
+	/* Read Response of RESET command */
+	ret = i2c_master_recv(client, nci_reset_rsp,
+		sizeof(nci_reset_rsp));
+	if (ret < 0) {
+		dev_err(&client->dev,
+		"%s: - i2c_master_recv Error\n", __func__);
+		goto err_nfcc_hw_check;
+	}
+	ret = nqx_standby_write(nqx_dev, raw_nci_init_cmd,
+				sizeof(raw_nci_init_cmd));
+	if (ret < 0) {
+		dev_err(&client->dev,
+		"%s: - i2c_master_send Error\n", __func__);
+		goto err_nfcc_core_init_fail;
+	}
+	/* hardware dependent delay */
+	msleep(30);
+	/* Read Response of INIT command */
+	ret = i2c_master_recv(client, nci_init_rsp,
+		sizeof(nci_init_rsp));
+	if (ret < 0) {
+		dev_err(&client->dev,
+		"%s: - i2c_master_recv Error\n", __func__);
+		goto err_nfcc_core_init_fail;
+	}
+	init_rsp_len = 2 + nci_init_rsp[2]; /*payload + len*/
+	if (init_rsp_len > PAYLOAD_HEADER_LENGTH) {
+		nqx_dev->nqx_info.info.chip_type =
+				nci_init_rsp[init_rsp_len - 3];
+		nqx_dev->nqx_info.info.rom_version =
+				nci_init_rsp[init_rsp_len - 2];
+		nqx_dev->nqx_info.info.fw_major =
+				nci_init_rsp[init_rsp_len - 1];
+		nqx_dev->nqx_info.info.fw_minor =
+				nci_init_rsp[init_rsp_len];
+	}
+	dev_dbg(&client->dev,
+		"%s: - nq - reset cmd answer : NfcNciRx %x %x %x\n",
+		__func__, nci_reset_rsp[0],
+		nci_reset_rsp[1], nci_reset_rsp[2]);
+
+	dev_dbg(&nqx_dev->client->dev, "NQ NFCC chip_type = %x\n",
+		nqx_dev->nqx_info.info.chip_type);
+	dev_dbg(&nqx_dev->client->dev, "NQ fw version = %x.%x.%x\n",
+		nqx_dev->nqx_info.info.rom_version,
+		nqx_dev->nqx_info.info.fw_major,
+		nqx_dev->nqx_info.info.fw_minor);
+
+	switch (nqx_dev->nqx_info.info.chip_type) {
+	case NFCC_NQ_210:
+		dev_dbg(&client->dev,
+		"%s: ## NFCC == NQ210 ##\n", __func__);
+		break;
+	case NFCC_NQ_220:
+		dev_dbg(&client->dev,
+		"%s: ## NFCC == NQ220 ##\n", __func__);
+		break;
+	case NFCC_NQ_310:
+		dev_dbg(&client->dev,
+		"%s: ## NFCC == NQ310 ##\n", __func__);
+		break;
+	case NFCC_NQ_330:
+		dev_dbg(&client->dev,
+		"%s: ## NFCC == NQ330 ##\n", __func__);
+		break;
+	case NFCC_PN66T:
+		dev_dbg(&client->dev,
+		"%s: ## NFCC == PN66T ##\n", __func__);
+		break;
+	default:
+		dev_err(&client->dev,
+		"%s: - NFCC HW not Supported\n", __func__);
+		break;
+	}
+
+	/*Disable NFC by default to save power on boot*/
+	gpio_set_value(enable_gpio, 0);/* ULPM: Disable */
+	ret = 0;
+	goto done;
+
+err_nfcc_core_init_fail:
+	dev_err(&client->dev,
+	"%s: - nq - reset cmd answer : NfcNciRx %x %x %x\n",
+	__func__, nci_reset_rsp[0],
+	nci_reset_rsp[1], nci_reset_rsp[2]);
+
+err_nfcc_hw_check:
+	ret = -ENXIO;
+	dev_err(&client->dev,
+		"%s: - NFCC HW not available\n", __func__);
+done:
+	return ret;
+}
+
+/*
+	* Routine to enable clock.
+	* this routine can be extended to select from multiple
+	* sources based on clk_src_name.
+*/
+static int nqx_clock_select(struct nqx_dev *nqx_dev)
+{
+	int r = 0;
+	nqx_dev->s_clk = clk_get(&nqx_dev->client->dev, "ref_clk");
+
+	if (nqx_dev->s_clk == NULL)
+		goto err_clk;
+
+	if (nqx_dev->clk_run == false)
+		r = clk_prepare_enable(nqx_dev->s_clk);
+
+	if (r)
+		goto err_clk;
+
+	nqx_dev->clk_run = true;
+
+	return r;
+
+err_clk:
+	r = -1;
+	return r;
+}
+/*
+	* Routine to disable clocks
+*/
+static int nqx_clock_deselect(struct nqx_dev *nqx_dev)
+{
+	int r = -1;
+
+	if (nqx_dev->s_clk != NULL) {
+		if (nqx_dev->clk_run == true) {
+			clk_disable_unprepare(nqx_dev->s_clk);
+			nqx_dev->clk_run = false;
+		}
+		return 0;
+	}
+	return r;
+}
+
+static int nfc_parse_dt(struct device *dev, struct nqx_platform_data *pdata)
+{
+	int r = 0;
+	struct device_node *np = dev->of_node;
+
+	pdata->en_gpio = of_get_named_gpio(np, "qcom,nq-ven", 0);
+	if ((!gpio_is_valid(pdata->en_gpio)))
+		return -EINVAL;
+	disable_ctrl = pdata->en_gpio;
+
+	pdata->irq_gpio = of_get_named_gpio(np, "qcom,nq-irq", 0);
+	if ((!gpio_is_valid(pdata->irq_gpio)))
+		return -EINVAL;
+
+	pdata->firm_gpio = of_get_named_gpio(np, "qcom,nq-firm", 0);
+	if (!gpio_is_valid(pdata->firm_gpio)) {
+		dev_warn(dev,
+			"FIRM GPIO <OPTIONAL> error getting from OF node\n");
+		pdata->firm_gpio = -EINVAL;
+	}
+
+	pdata->ese_gpio = of_get_named_gpio(np, "qcom,nq-esepwr", 0);
+	if (!gpio_is_valid(pdata->ese_gpio)) {
+		dev_warn(dev,
+			"ese GPIO <OPTIONAL> error getting from OF node\n");
+		pdata->ese_gpio = -EINVAL;
+	}
+
+	r = of_property_read_string(np, "qcom,clk-src", &pdata->clk_src_name);
+
+	pdata->clkreq_gpio = of_get_named_gpio(np, "qcom,nq-clkreq", 0);
+
+	if (r)
+		return -EINVAL;
+	return r;
+}
+
+static inline int gpio_input_init(const struct device * const dev,
+			const int gpio, const char * const gpio_name)
+{
+	int r = gpio_request(gpio, gpio_name);
+
+	if (r) {
+		dev_err(dev, "unable to request gpio [%d]\n", gpio);
+		return r;
+	}
+
+	r = gpio_direction_input(gpio);
+	if (r)
+		dev_err(dev, "unable to set direction for gpio [%d]\n", gpio);
+
+	return r;
+}
+
+static int nqx_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	int r = 0;
+	int irqn = 0;
+	struct nqx_platform_data *platform_data;
+	struct nqx_dev *nqx_dev;
+
+	dev_dbg(&client->dev, "%s: enter\n", __func__);
+	if (client->dev.of_node) {
+		platform_data = devm_kzalloc(&client->dev,
+			sizeof(struct nqx_platform_data), GFP_KERNEL);
+		if (!platform_data) {
+			r = -ENOMEM;
+			goto err_platform_data;
+		}
+		r = nfc_parse_dt(&client->dev, platform_data);
+		if (r)
+			goto err_free_data;
+	} else
+		platform_data = client->dev.platform_data;
+
+	dev_dbg(&client->dev,
+		"%s, inside nfc-nci flags = %x\n",
+		__func__, client->flags);
+
+	if (platform_data == NULL) {
+		dev_err(&client->dev, "%s: failed\n", __func__);
+		r = -ENODEV;
+		goto err_platform_data;
+	}
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		dev_err(&client->dev, "%s: need I2C_FUNC_I2C\n", __func__);
+		r = -ENODEV;
+		goto err_free_data;
+	}
+	nqx_dev = kzalloc(sizeof(*nqx_dev), GFP_KERNEL);
+	if (nqx_dev == NULL) {
+		r = -ENOMEM;
+		goto err_free_data;
+	}
+	nqx_dev->client = client;
+	nqx_dev->kbuflen = MAX_BUFFER_SIZE;
+	nqx_dev->kbuf = kzalloc(MAX_BUFFER_SIZE, GFP_KERNEL);
+	if (!nqx_dev->kbuf) {
+		dev_err(&client->dev,
+			"failed to allocate memory for nqx_dev->kbuf\n");
+		r = -ENOMEM;
+		goto err_free_dev;
+	}
+
+	if (gpio_is_valid(platform_data->en_gpio)) {
+		r = gpio_request(platform_data->en_gpio, "nfc_reset_gpio");
+		if (r) {
+			dev_err(&client->dev,
+			"%s: unable to request nfc reset gpio [%d]\n",
+				__func__,
+				platform_data->en_gpio);
+			goto err_mem;
+		}
+		r = gpio_direction_output(platform_data->en_gpio, 0);
+		if (r) {
+			dev_err(&client->dev,
+				"%s: unable to set direction for nfc reset gpio [%d]\n",
+					__func__,
+					platform_data->en_gpio);
+			goto err_en_gpio;
+		}
+	} else {
+		dev_err(&client->dev,
+		"%s: nfc reset gpio not provided\n", __func__);
+		goto err_mem;
+	}
+
+	if (gpio_is_valid(platform_data->irq_gpio)) {
+		r = gpio_request(platform_data->irq_gpio, "nfc_irq_gpio");
+		if (r) {
+			dev_err(&client->dev, "%s: unable to request nfc irq gpio [%d]\n",
+				__func__, platform_data->irq_gpio);
+			goto err_en_gpio;
+		}
+		r = gpio_direction_input(platform_data->irq_gpio);
+		if (r) {
+			dev_err(&client->dev,
+			"%s: unable to set direction for nfc irq gpio [%d]\n",
+				__func__,
+				platform_data->irq_gpio);
+			goto err_irq_gpio;
+		}
+		irqn = gpio_to_irq(platform_data->irq_gpio);
+		if (irqn < 0) {
+			r = irqn;
+			goto err_irq_gpio;
+		}
+		client->irq = irqn;
+	} else {
+		dev_err(&client->dev, "%s: irq gpio not provided\n", __func__);
+		goto err_en_gpio;
+	}
+	if (gpio_is_valid(platform_data->firm_gpio)) {
+		r = gpio_request(platform_data->firm_gpio,
+			"nfc_firm_gpio");
+		if (r) {
+			dev_err(&client->dev,
+				"%s: unable to request nfc firmware gpio [%d]\n",
+				__func__, platform_data->firm_gpio);
+			goto err_irq_gpio;
+		}
+		r = gpio_direction_output(platform_data->firm_gpio, 0);
+		if (r) {
+			dev_err(&client->dev,
+			"%s: cannot set direction for nfc firmware gpio [%d]\n",
+			__func__, platform_data->firm_gpio);
+			goto err_firm_gpio;
+		}
+	} else {
+		dev_err(&client->dev,
+			"%s: firm gpio not provided\n", __func__);
+		goto err_irq_gpio;
+	}
+	if (gpio_is_valid(platform_data->ese_gpio)) {
+		r = gpio_request(platform_data->ese_gpio,
+				"nfc-ese_pwr");
+		if (r) {
+			nqx_dev->ese_gpio = -EINVAL;
+			dev_err(&client->dev,
+				"%s: unable to request nfc ese gpio [%d]\n",
+					__func__, platform_data->ese_gpio);
+			/* ese gpio optional so we should continue */
+		} else {
+			nqx_dev->ese_gpio = platform_data->ese_gpio;
+			r = gpio_direction_output(platform_data->ese_gpio, 0);
+			if (r) {
+				/*
+				 * free ese gpio and set invalid
+				 * to avoid further use
+				 */
+				gpio_free(platform_data->ese_gpio);
+				nqx_dev->ese_gpio = -EINVAL;
+				dev_err(&client->dev,
+					"%s: cannot set direction for nfc ese gpio [%d]\n",
+					__func__, platform_data->ese_gpio);
+				/* ese gpio optional so we should continue */
+			}
+		}
+	} else {
+		nqx_dev->ese_gpio = -EINVAL;
+		dev_err(&client->dev,
+			"%s: ese gpio not provided\n", __func__);
+		/* ese gpio optional so we should continue */
+	}
+	if (gpio_is_valid(platform_data->clkreq_gpio)) {
+		r = gpio_request(platform_data->clkreq_gpio,
+			"nfc_clkreq_gpio");
+		if (r) {
+			dev_err(&client->dev,
+				"%s: unable to request nfc clkreq gpio [%d]\n",
+				__func__, platform_data->clkreq_gpio);
+			goto err_ese_gpio;
+		}
+		r = gpio_direction_input(platform_data->clkreq_gpio);
+		if (r) {
+			dev_err(&client->dev,
+			"%s: cannot set direction for nfc clkreq gpio [%d]\n",
+			__func__, platform_data->clkreq_gpio);
+			goto err_clkreq_gpio;
+		}
+	} else {
+		dev_err(&client->dev,
+			"%s: clkreq gpio not provided\n", __func__);
+		goto err_ese_gpio;
+	}
+
+	nqx_dev->en_gpio = platform_data->en_gpio;
+	nqx_dev->irq_gpio = platform_data->irq_gpio;
+	nqx_dev->firm_gpio  = platform_data->firm_gpio;
+	nqx_dev->clkreq_gpio = platform_data->clkreq_gpio;
+	nqx_dev->pdata = platform_data;
+
+	/* init mutex and queues */
+	init_waitqueue_head(&nqx_dev->read_wq);
+	mutex_init(&nqx_dev->read_mutex);
+	spin_lock_init(&nqx_dev->irq_enabled_lock);
+
+	nqx_dev->nqx_device.minor = MISC_DYNAMIC_MINOR;
+	nqx_dev->nqx_device.name = "nq-nci";
+	nqx_dev->nqx_device.fops = &nfc_dev_fops;
+
+	r = misc_register(&nqx_dev->nqx_device);
+	if (r) {
+		dev_err(&client->dev, "%s: misc_register failed\n", __func__);
+		goto err_misc_register;
+	}
+
+	/* NFC_INT IRQ */
+	nqx_dev->irq_enabled = true;
+	r = request_irq(client->irq, nqx_dev_irq_handler,
+			  IRQF_TRIGGER_HIGH, client->name, nqx_dev);
+	if (r) {
+		dev_err(&client->dev, "%s: request_irq failed\n", __func__);
+		goto err_request_irq_failed;
+	}
+	nqx_disable_irq(nqx_dev);
+
+	/*
+	 * To be efficient we need to test whether nfcc hardware is physically
+	 * present before attempting further hardware initialisation.
+	 *
+	 */
+	r = nfcc_hw_check(client, nqx_dev);
+	if (r) {
+		/* make sure NFCC is not enabled */
+		gpio_set_value(platform_data->en_gpio, 0);
+		/* We don't think there is hardware switch NFC OFF */
+		goto err_request_hw_check_failed;
+	}
+
+	/* Register reboot notifier here */
+	r = register_reboot_notifier(&nfcc_notifier);
+	if (r) {
+		dev_err(&client->dev,
+			"%s: cannot register reboot notifier(err = %d)\n",
+			__func__, r);
+		/*
+		 * nfcc_hw_check function not doing memory
+		 * allocation so using same goto target here
+		*/
+		goto err_request_hw_check_failed;
+	}
+
+#ifdef NFC_KERNEL_BU
+	r = nqx_clock_select(nqx_dev);
+	if (r < 0) {
+		dev_err(&client->dev,
+			"%s: nqx_clock_select failed\n", __func__);
+		goto err_clock_en_failed;
+	}
+	gpio_set_value(platform_data->en_gpio, 1);
+#endif
+	device_init_wakeup(&client->dev, true);
+	device_set_wakeup_capable(&client->dev, true);
+	i2c_set_clientdata(client, nqx_dev);
+	nqx_dev->irq_wake_up = false;
+
+	dev_err(&client->dev,
+	"%s: probing NFCC NQxxx exited successfully\n",
+		 __func__);
+	return 0;
+
+#ifdef NFC_KERNEL_BU
+err_clock_en_failed:
+	unregister_reboot_notifier(&nfcc_notifier);
+#endif
+err_request_hw_check_failed:
+	free_irq(client->irq, nqx_dev);
+err_request_irq_failed:
+	misc_deregister(&nqx_dev->nqx_device);
+err_misc_register:
+	mutex_destroy(&nqx_dev->read_mutex);
+err_clkreq_gpio:
+	gpio_free(platform_data->clkreq_gpio);
+err_ese_gpio:
+	/* optional gpio, not sure was configured in probe */
+	if (nqx_dev->ese_gpio > 0)
+		gpio_free(platform_data->ese_gpio);
+err_firm_gpio:
+	gpio_free(platform_data->firm_gpio);
+err_irq_gpio:
+	gpio_free(platform_data->irq_gpio);
+err_en_gpio:
+	gpio_free(platform_data->en_gpio);
+err_mem:
+	kfree(nqx_dev->kbuf);
+err_free_dev:
+	kfree(nqx_dev);
+err_free_data:
+	if (client->dev.of_node)
+		devm_kfree(&client->dev, platform_data);
+err_platform_data:
+	dev_err(&client->dev,
+	"%s: probing nqxx failed, check hardware\n",
+		 __func__);
+	return r;
+}
+
+static int nqx_remove(struct i2c_client *client)
+{
+	int ret = 0;
+	struct nqx_dev *nqx_dev;
+
+	nqx_dev = i2c_get_clientdata(client);
+	if (!nqx_dev) {
+		dev_err(&client->dev,
+		"%s: device doesn't exist anymore\n", __func__);
+		ret = -ENODEV;
+		goto err;
+	}
+
+	unregister_reboot_notifier(&nfcc_notifier);
+	free_irq(client->irq, nqx_dev);
+	misc_deregister(&nqx_dev->nqx_device);
+	mutex_destroy(&nqx_dev->read_mutex);
+	gpio_free(nqx_dev->clkreq_gpio);
+	/* optional gpio, not sure was configured in probe */
+	if (nqx_dev->ese_gpio > 0)
+		gpio_free(nqx_dev->ese_gpio);
+	gpio_free(nqx_dev->firm_gpio);
+	gpio_free(nqx_dev->irq_gpio);
+	gpio_free(nqx_dev->en_gpio);
+	kfree(nqx_dev->kbuf);
+	if (client->dev.of_node)
+		devm_kfree(&client->dev, nqx_dev->pdata);
+
+	kfree(nqx_dev);
+err:
+	return ret;
+}
+
+static int nqx_suspend(struct device *device)
+{
+	struct i2c_client *client = to_i2c_client(device);
+	struct nqx_dev *nqx_dev = i2c_get_clientdata(client);
+
+	if (device_may_wakeup(&client->dev) && nqx_dev->irq_enabled) {
+		if (!enable_irq_wake(client->irq))
+			nqx_dev->irq_wake_up = true;
+	}
+	return 0;
+}
+
+static int nqx_resume(struct device *device)
+{
+	struct i2c_client *client = to_i2c_client(device);
+	struct nqx_dev *nqx_dev = i2c_get_clientdata(client);
+
+	if (device_may_wakeup(&client->dev) && nqx_dev->irq_wake_up) {
+		if (!disable_irq_wake(client->irq))
+			nqx_dev->irq_wake_up = false;
+	}
+	return 0;
+}
+
+static const struct i2c_device_id nqx_id[] = {
+	{"nqx-i2c", 0},
+	{}
+};
+
+static const struct dev_pm_ops nfc_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(nqx_suspend, nqx_resume)
+};
+
+static struct i2c_driver nqx = {
+	.id_table = nqx_id,
+	.probe = nqx_probe,
+	.remove = nqx_remove,
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = "nq-nci",
+		.of_match_table = msm_match_table,
+		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+		.pm = &nfc_pm_ops,
+	},
+};
+
+
+static int nfcc_reboot(struct notifier_block *notifier, unsigned long val,
+			  void *v)
+{
+	gpio_set_value(disable_ctrl, 1);
+	return NOTIFY_OK;
+}
+
+/*
+ * module load/unload record keeping
+ */
+static int __init nqx_dev_init(void)
+{
+	return i2c_add_driver(&nqx);
+}
+module_init(nqx_dev_init);
+
+static void __exit nqx_dev_exit(void)
+{
+	unregister_reboot_notifier(&nfcc_notifier);
+	i2c_del_driver(&nqx);
+}
+module_exit(nqx_dev_exit);
+
+MODULE_DESCRIPTION("NFC nqx");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/nfc/nq-nci.h	2019-01-22 16:16:25.999269009 +0100
@@ -0,0 +1,54 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __NQ_NCI_H
+#define __NQ_NCI_H
+
+#include <linux/i2c.h>
+#include <linux/types.h>
+#include <linux/version.h>
+
+#include <linux/semaphore.h>
+#include <linux/completion.h>
+
+#include <linux/ioctl.h>
+#include <linux/miscdevice.h>
+#include <linux/nfcinfo.h>
+
+#define NFC_SET_PWR			_IOW(0xE9, 0x01, unsigned int)
+#define ESE_SET_PWR			_IOW(0xE9, 0x02, unsigned int)
+#define ESE_GET_PWR			_IOR(0xE9, 0x03, unsigned int)
+#define SET_RX_BLOCK			_IOW(0xE9, 0x04, unsigned int)
+#define SET_EMULATOR_TEST_POINT		_IOW(0xE9, 0x05, unsigned int)
+#define NFCC_INITIAL_CORE_RESET_NTF	_IOW(0xE9, 0x10, unsigned int)
+
+#define NFC_RX_BUFFER_CNT_START		(0x0)
+#define PAYLOAD_HEADER_LENGTH		(0x3)
+#define PAYLOAD_LENGTH_MAX		(256)
+#define BYTE				(0x8)
+#define NCI_IDENTIFIER			(0x10)
+
+enum nfcc_initial_core_reset_ntf {
+	TIMEDOUT_INITIAL_CORE_RESET_NTF = 0, /* 0*/
+	ARRIVED_INITIAL_CORE_RESET_NTF, /* 1 */
+	DEFAULT_INITIAL_CORE_RESET_NTF, /*2*/
+};
+
+enum nfcc_chip_variant {
+	NFCC_NQ_210			= 0x48,	/**< NFCC NQ210 */
+	NFCC_NQ_220			= 0x58,	/**< NFCC NQ220 */
+	NFCC_NQ_310			= 0x40,	/**< NFCC NQ310 */
+	NFCC_NQ_330			= 0x51,	/**< NFCC NQ330 */
+	NFCC_PN66T			= 0x18,	/**< NFCC PN66T */
+	NFCC_NOT_SUPPORTED	        = 0xFF	/**< NFCC is not supported */
+};
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/of/of_batterydata.c	2019-01-22 16:16:26.019269190 +0100
@@ -0,0 +1,453 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/batterydata-lib.h>
+#include <linux/power_supply.h>
+
+static int of_batterydata_read_lut(const struct device_node *np,
+			int max_cols, int max_rows, int *ncols, int *nrows,
+			int *col_legend_data, int *row_legend_data,
+			int *lut_data)
+{
+	struct property *prop;
+	const __be32 *data;
+	int cols, rows, size, i, j, *out_values;
+
+	prop = of_find_property(np, "qcom,lut-col-legend", NULL);
+	if (!prop) {
+		pr_err("%s: No col legend found\n", np->name);
+		return -EINVAL;
+	} else if (!prop->value) {
+		pr_err("%s: No col legend value found, np->name\n", np->name);
+		return -ENODATA;
+	} else if (prop->length > max_cols * sizeof(int)) {
+		pr_err("%s: Too many columns\n", np->name);
+		return -EINVAL;
+	}
+
+	cols = prop->length/sizeof(int);
+	*ncols = cols;
+	data = prop->value;
+	for (i = 0; i < cols; i++)
+		*col_legend_data++ = be32_to_cpup(data++);
+
+	prop = of_find_property(np, "qcom,lut-row-legend", NULL);
+	if (!prop || row_legend_data == NULL) {
+		/* single row lut */
+		rows = 1;
+	} else if (!prop->value) {
+		pr_err("%s: No row legend value found\n", np->name);
+		return -ENODATA;
+	} else if (prop->length > max_rows * sizeof(int)) {
+		pr_err("%s: Too many rows\n", np->name);
+		return -EINVAL;
+	} else {
+		rows = prop->length/sizeof(int);
+		*nrows = rows;
+		data = prop->value;
+		for (i = 0; i < rows; i++)
+			*row_legend_data++ = be32_to_cpup(data++);
+	}
+
+	prop = of_find_property(np, "qcom,lut-data", NULL);
+	if (!prop) {
+		pr_err("prop 'qcom,lut-data' not found\n");
+		return -EINVAL;
+	}
+	data = prop->value;
+	size = prop->length/sizeof(int);
+	if (size != cols * rows) {
+		pr_err("%s: data size mismatch, %dx%d != %d\n",
+				np->name, cols, rows, size);
+		return -EINVAL;
+	}
+	for (i = 0; i < rows; i++) {
+		out_values = lut_data + (max_cols * i);
+		for (j = 0; j < cols; j++) {
+			*out_values++ = be32_to_cpup(data++);
+			pr_debug("Value = %d\n", *(out_values-1));
+		}
+	}
+
+	return 0;
+}
+
+static int of_batterydata_read_sf_lut(struct device_node *data_node,
+				const char *name, struct sf_lut *lut)
+{
+	struct device_node *node = of_find_node_by_name(data_node, name);
+	int rc;
+
+	if (!lut) {
+		pr_debug("No lut provided, skipping\n");
+		return 0;
+	} else if (!node) {
+		pr_err("Couldn't find %s node.\n", name);
+		return -EINVAL;
+	}
+
+	rc = of_batterydata_read_lut(node, PC_CC_COLS, PC_CC_ROWS,
+			&lut->cols, &lut->rows, lut->row_entries,
+			lut->percent, *lut->sf);
+	if (rc) {
+		pr_err("Failed to read %s node.\n", name);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int of_batterydata_read_pc_temp_ocv_lut(struct device_node *data_node,
+				const char *name, struct pc_temp_ocv_lut *lut)
+{
+	struct device_node *node = of_find_node_by_name(data_node, name);
+	int rc;
+
+	if (!lut) {
+		pr_debug("No lut provided, skipping\n");
+		return 0;
+	} else if (!node) {
+		pr_err("Couldn't find %s node.\n", name);
+		return -EINVAL;
+	}
+	rc = of_batterydata_read_lut(node, PC_TEMP_COLS, PC_TEMP_ROWS,
+			&lut->cols, &lut->rows, lut->temp, lut->percent,
+			*lut->ocv);
+	if (rc) {
+		pr_err("Failed to read %s node.\n", name);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int of_batterydata_read_ibat_temp_acc_lut(struct device_node *data_node,
+			const char *name, struct ibat_temp_acc_lut *lut)
+{
+	struct device_node *node = of_find_node_by_name(data_node, name);
+	int rc;
+
+	if (!lut) {
+		pr_debug("No lut provided, skipping\n");
+		return 0;
+	} else if (!node) {
+		pr_debug("Couldn't find %s node.\n", name);
+		return 0;
+	}
+	rc = of_batterydata_read_lut(node, ACC_TEMP_COLS, ACC_IBAT_ROWS,
+			&lut->cols, &lut->rows, lut->temp, lut->ibat,
+			*lut->acc);
+	if (rc) {
+		pr_err("Failed to read %s node.\n", name);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int of_batterydata_read_single_row_lut(struct device_node *data_node,
+				const char *name, struct single_row_lut *lut)
+{
+	struct device_node *node = of_find_node_by_name(data_node, name);
+	int rc;
+
+	if (!lut) {
+		pr_debug("No lut provided, skipping\n");
+		return 0;
+	} else if (!node) {
+		pr_err("Couldn't find %s node.\n", name);
+		return -EINVAL;
+	}
+
+	rc = of_batterydata_read_lut(node, MAX_SINGLE_LUT_COLS, 1,
+			&lut->cols, NULL, lut->x, NULL, lut->y);
+	if (rc) {
+		pr_err("Failed to read %s node.\n", name);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int of_batterydata_read_batt_id_kohm(const struct device_node *np,
+				const char *propname, struct batt_ids *batt_ids)
+{
+	struct property *prop;
+	const __be32 *data;
+	int num, i, *id_kohm = batt_ids->kohm;
+
+	prop = of_find_property(np, "qcom,batt-id-kohm", NULL);
+	if (!prop) {
+		pr_err("%s: No battery id resistor found\n", np->name);
+		return -EINVAL;
+	} else if (!prop->value) {
+		pr_err("%s: No battery id resistor value found, np->name\n",
+						np->name);
+		return -ENODATA;
+	} else if (prop->length > MAX_BATT_ID_NUM * sizeof(__be32)) {
+		pr_err("%s: Too many battery id resistors\n", np->name);
+		return -EINVAL;
+	}
+
+	num = prop->length/sizeof(__be32);
+	batt_ids->num = num;
+	data = prop->value;
+	for (i = 0; i < num; i++)
+		*id_kohm++ = be32_to_cpup(data++);
+
+	return 0;
+}
+
+#define OF_PROP_READ(property, qpnp_dt_property, node, rc, optional)	\
+do {									\
+	if (rc)								\
+		break;							\
+	rc = of_property_read_u32(node, "qcom," qpnp_dt_property,	\
+					&property);			\
+									\
+	if ((rc == -EINVAL) && optional) {				\
+		property = -EINVAL;					\
+		rc = 0;							\
+	} else if (rc) {						\
+		pr_err("Error reading " #qpnp_dt_property		\
+				" property rc = %d\n", rc);		\
+	}								\
+} while (0)
+
+static int of_batterydata_load_battery_data(struct device_node *node,
+				int best_id_kohm,
+				struct bms_battery_data *batt_data)
+{
+	int rc;
+
+	rc = of_batterydata_read_single_row_lut(node, "qcom,fcc-temp-lut",
+			batt_data->fcc_temp_lut);
+	if (rc)
+		return rc;
+
+	rc = of_batterydata_read_pc_temp_ocv_lut(node,
+			"qcom,pc-temp-ocv-lut",
+			batt_data->pc_temp_ocv_lut);
+	if (rc)
+		return rc;
+
+	rc = of_batterydata_read_sf_lut(node, "qcom,rbatt-sf-lut",
+			batt_data->rbatt_sf_lut);
+	if (rc)
+		return rc;
+
+	rc = of_batterydata_read_ibat_temp_acc_lut(node, "qcom,ibat-acc-lut",
+						batt_data->ibat_acc_lut);
+	if (rc)
+		return rc;
+
+	rc = of_property_read_string(node, "qcom,battery-type",
+					&batt_data->battery_type);
+	if (rc) {
+		pr_err("Error reading qcom,battery-type property rc=%d\n", rc);
+		batt_data->battery_type = NULL;
+		return rc;
+	}
+
+	OF_PROP_READ(batt_data->fcc, "fcc-mah", node, rc, false);
+	OF_PROP_READ(batt_data->default_rbatt_mohm,
+			"default-rbatt-mohm", node, rc, false);
+	OF_PROP_READ(batt_data->rbatt_capacitive_mohm,
+			"rbatt-capacitive-mohm", node, rc, false);
+	OF_PROP_READ(batt_data->flat_ocv_threshold_uv,
+			"flat-ocv-threshold-uv", node, rc, true);
+	OF_PROP_READ(batt_data->max_voltage_uv,
+			"max-voltage-uv", node, rc, true);
+	OF_PROP_READ(batt_data->cutoff_uv, "v-cutoff-uv", node, rc, true);
+	OF_PROP_READ(batt_data->iterm_ua, "chg-term-ua", node, rc, true);
+	OF_PROP_READ(batt_data->fastchg_current_ma,
+			"fastchg-current-ma", node, rc, true);
+	OF_PROP_READ(batt_data->fg_cc_cv_threshold_mv,
+			"fg-cc-cv-threshold-mv", node, rc, true);
+
+	batt_data->batt_id_kohm = best_id_kohm;
+
+	return rc;
+}
+
+static int64_t of_batterydata_convert_battery_id_kohm(int batt_id_uv,
+				int rpull_up, int vadc_vdd)
+{
+	int64_t resistor_value_kohm, denom;
+
+	if (batt_id_uv == 0) {
+		/* vadc not correct or batt id line grounded, report 0 kohms */
+		return 0;
+	}
+	/* calculate the battery id resistance reported via ADC */
+	denom = div64_s64(vadc_vdd * 1000000LL, batt_id_uv) - 1000000LL;
+
+	if (denom == 0) {
+		/* batt id connector might be open, return 0 kohms */
+		return 0;
+	}
+	resistor_value_kohm = div64_s64(rpull_up * 1000000LL + denom/2, denom);
+
+	pr_debug("batt id voltage = %d, resistor value = %lld\n",
+			batt_id_uv, resistor_value_kohm);
+
+	return resistor_value_kohm;
+}
+
+struct device_node *of_batterydata_get_best_profile(
+		const struct device_node *batterydata_container_node,
+		int batt_id_kohm, const char *batt_type)
+{
+	struct batt_ids batt_ids;
+	struct device_node *node, *best_node = NULL;
+	const char *battery_type = NULL;
+	int delta = 0, best_delta = 0, best_id_kohm = 0, id_range_pct,
+		i = 0, rc = 0, limit = 0;
+	bool in_range = false;
+
+	/* read battery id range percentage for best profile */
+	rc = of_property_read_u32(batterydata_container_node,
+			"qcom,batt-id-range-pct", &id_range_pct);
+
+	if (rc) {
+		if (rc == -EINVAL) {
+			id_range_pct = 0;
+		} else {
+			pr_err("failed to read battery id range\n");
+			return ERR_PTR(-ENXIO);
+		}
+	}
+
+	/*
+	 * Find the battery data with a battery id resistor closest to this one
+	 */
+	for_each_child_of_node(batterydata_container_node, node) {
+		if (batt_type != NULL) {
+			rc = of_property_read_string(node, "qcom,battery-type",
+							&battery_type);
+			if (!rc && strcmp(battery_type, batt_type) == 0) {
+				best_node = node;
+				best_id_kohm = batt_id_kohm;
+				break;
+			}
+		} else {
+			rc = of_batterydata_read_batt_id_kohm(node,
+							"qcom,batt-id-kohm",
+							&batt_ids);
+			if (rc)
+				continue;
+			for (i = 0; i < batt_ids.num; i++) {
+				delta = abs(batt_ids.kohm[i] - batt_id_kohm);
+				limit = (batt_ids.kohm[i] * id_range_pct) / 100;
+				in_range = (delta <= limit);
+				/*
+				 * Check if the delta is the lowest one
+				 * and also if the limits are in range
+				 * before selecting the best node.
+				 */
+				if ((delta < best_delta || !best_node)
+					&& in_range) {
+					best_node = node;
+					best_delta = delta;
+					best_id_kohm = batt_ids.kohm[i];
+				}
+			}
+		}
+	}
+
+	if (best_node == NULL) {
+		pr_err("No battery data found\n");
+		return best_node;
+	}
+
+	/* check that profile id is in range of the measured batt_id */
+	if (abs(best_id_kohm - batt_id_kohm) >
+			((best_id_kohm * id_range_pct) / 100)) {
+		pr_err("out of range: profile id %d batt id %d pct %d",
+			best_id_kohm, batt_id_kohm, id_range_pct);
+		return NULL;
+	}
+
+	rc = of_property_read_string(best_node, "qcom,battery-type",
+							&battery_type);
+	if (!rc)
+		pr_info("%s found\n", battery_type);
+	else
+		pr_info("%s found\n", best_node->name);
+
+	return best_node;
+}
+
+int of_batterydata_read_data(struct device_node *batterydata_container_node,
+				struct bms_battery_data *batt_data,
+				int batt_id_uv)
+{
+	struct device_node *node, *best_node;
+	struct batt_ids batt_ids;
+	const char *battery_type = NULL;
+	int delta, best_delta, batt_id_kohm, rpull_up_kohm,
+		vadc_vdd_uv, best_id_kohm, i, rc = 0;
+
+	node = batterydata_container_node;
+	OF_PROP_READ(rpull_up_kohm, "rpull-up-kohm", node, rc, false);
+	OF_PROP_READ(vadc_vdd_uv, "vref-batt-therm", node, rc, false);
+	if (rc)
+		return rc;
+
+	batt_id_kohm = of_batterydata_convert_battery_id_kohm(batt_id_uv,
+					rpull_up_kohm, vadc_vdd_uv);
+	best_node = NULL;
+	best_delta = 0;
+	best_id_kohm = 0;
+
+	/*
+	 * Find the battery data with a battery id resistor closest to this one
+	 */
+	for_each_child_of_node(batterydata_container_node, node) {
+		rc = of_batterydata_read_batt_id_kohm(node,
+						"qcom,batt-id-kohm",
+						&batt_ids);
+		if (rc)
+			continue;
+		for (i = 0; i < batt_ids.num; i++) {
+			delta = abs(batt_ids.kohm[i] - batt_id_kohm);
+			if (delta < best_delta || !best_node) {
+				best_node = node;
+				best_delta = delta;
+				best_id_kohm = batt_ids.kohm[i];
+			}
+		}
+	}
+
+	if (best_node == NULL) {
+		pr_err("No battery data found\n");
+		return -ENODATA;
+	}
+	rc = of_property_read_string(best_node, "qcom,battery-type",
+							&battery_type);
+	if (!rc)
+		pr_info("%s loaded\n", battery_type);
+	else
+		pr_info("%s loaded\n", best_node->name);
+
+	return of_batterydata_load_battery_data(best_node,
+					best_id_kohm, batt_data);
+}
+
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/of/of_slimbus.c	2019-01-22 16:16:26.019269190 +0100
@@ -0,0 +1,90 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* OF helpers for SLIMbus */
+#include <linux/slimbus/slimbus.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_slimbus.h>
+
+int of_register_slim_devices(struct slim_controller *ctrl)
+{
+	struct device_node *node;
+	struct slim_boardinfo *binfo = NULL;
+	struct slim_boardinfo *temp;
+	int n = 0;
+	int ret = 0;
+
+	if (!ctrl->dev.of_node)
+		return -EINVAL;
+
+	for_each_child_of_node(ctrl->dev.of_node, node) {
+		struct property *prop;
+		struct slim_device *slim;
+		char *name;
+		prop = of_find_property(node, "elemental-addr", NULL);
+		if (!prop || prop->length != 6) {
+			dev_err(&ctrl->dev, "of_slim: invalid E-addr");
+			continue;
+		}
+		name = kzalloc(SLIMBUS_NAME_SIZE, GFP_KERNEL);
+		if (!name) {
+			dev_err(&ctrl->dev, "of_slim: out of memory");
+			ret = -ENOMEM;
+			goto of_slim_err;
+		}
+		if (of_modalias_node(node, name, SLIMBUS_NAME_SIZE) < 0) {
+			dev_err(&ctrl->dev, "of_slim: modalias failure on %s\n",
+				node->full_name);
+			kfree(name);
+			continue;
+		}
+		slim = kzalloc(sizeof(struct slim_device), GFP_KERNEL);
+		if (!slim) {
+			dev_err(&ctrl->dev, "of_slim: out of memory");
+			ret = -ENOMEM;
+			kfree(name);
+			goto of_slim_err;
+		}
+		memcpy(slim->e_addr, prop->value, 6);
+
+		temp = krealloc(binfo, (n + 1) * sizeof(struct slim_boardinfo),
+					GFP_KERNEL);
+		if (!temp) {
+			dev_err(&ctrl->dev, "out of memory");
+			kfree(name);
+			kfree(slim);
+			ret = -ENOMEM;
+			goto of_slim_err;
+		}
+		binfo = temp;
+
+		slim->dev.of_node = of_node_get(node);
+		slim->name = (const char *)name;
+		binfo[n].bus_num = ctrl->nr;
+		binfo[n].slim_slave = slim;
+		n++;
+	}
+	ret = slim_register_board_info(binfo, n);
+	if (!ret)
+		goto of_slim_ret;
+of_slim_err:
+	while (n-- > 0) {
+		kfree(binfo[n].slim_slave->name);
+		kfree(binfo[n].slim_slave);
+	}
+of_slim_ret:
+	kfree(binfo);
+	return ret;
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/pci/host/pci-msm.c	2019-10-29 09:26:24.557212123 +0100
@@ -0,0 +1,7338 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * MSM PCIe controller driver.
+ */
+
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/of_gpio.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/reset.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/msi.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/pm_wakeup.h>
+#include <linux/compiler.h>
+#include <soc/qcom/scm.h>
+#include <linux/ipc_logging.h>
+#include <linux/msm_pcie.h>
+
+#ifdef CONFIG_ARCH_MDMCALIFORNIUM
+#define PCIE_VENDOR_ID_RCP		0x17cb
+#define PCIE_DEVICE_ID_RCP		0x0302
+
+#define PCIE20_L1SUB_CONTROL1		0x158
+#define PCIE20_PARF_DBI_BASE_ADDR	0x350
+#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE	0x358
+
+#define TX_BASE 0x200
+#define RX_BASE 0x400
+#define PCS_BASE 0x800
+#define PCS_MISC_BASE 0x600
+
+#elif defined(CONFIG_ARCH_MSM8998)
+#define PCIE_VENDOR_ID_RCP		0x17cb
+#define PCIE_DEVICE_ID_RCP		0x0105
+
+#define PCIE20_L1SUB_CONTROL1		0x1E4
+#define PCIE20_PARF_DBI_BASE_ADDR       0x350
+#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x358
+
+#define TX_BASE 0
+#define RX_BASE 0
+#define PCS_BASE 0x800
+#define PCS_MISC_BASE 0
+
+#else
+#define PCIE_VENDOR_ID_RCP		0x17cb
+#define PCIE_DEVICE_ID_RCP		0x0104
+
+#define PCIE20_L1SUB_CONTROL1		0x158
+#define PCIE20_PARF_DBI_BASE_ADDR	0x168
+#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE	0x16C
+
+#define TX_BASE 0x1000
+#define RX_BASE 0x1200
+#define PCS_BASE 0x1400
+#define PCS_MISC_BASE 0
+#endif
+
+#define TX(n, m) (TX_BASE + n * m * 0x1000)
+#define RX(n, m) (RX_BASE + n * m * 0x1000)
+#define PCS_PORT(n, m) (PCS_BASE + n * m * 0x1000)
+#define PCS_MISC_PORT(n, m) (PCS_MISC_BASE + n * m * 0x1000)
+
+#define QSERDES_COM_BG_TIMER			0x00C
+#define QSERDES_COM_SSC_EN_CENTER		0x010
+#define QSERDES_COM_SSC_ADJ_PER1		0x014
+#define QSERDES_COM_SSC_ADJ_PER2		0x018
+#define QSERDES_COM_SSC_PER1			0x01C
+#define QSERDES_COM_SSC_PER2			0x020
+#define QSERDES_COM_SSC_STEP_SIZE1		0x024
+#define QSERDES_COM_SSC_STEP_SIZE2		0x028
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		0x034
+#define QSERDES_COM_CLK_ENABLE1			0x038
+#define QSERDES_COM_SYS_CLK_CTRL		0x03C
+#define QSERDES_COM_SYSCLK_BUF_ENABLE		0x040
+#define QSERDES_COM_PLL_IVCO			0x048
+#define QSERDES_COM_LOCK_CMP1_MODE0		0x04C
+#define QSERDES_COM_LOCK_CMP2_MODE0		0x050
+#define QSERDES_COM_LOCK_CMP3_MODE0		0x054
+#define QSERDES_COM_BG_TRIM			0x070
+#define QSERDES_COM_CLK_EP_DIV			0x074
+#define QSERDES_COM_CP_CTRL_MODE0		0x078
+#define QSERDES_COM_PLL_RCTRL_MODE0		0x084
+#define QSERDES_COM_PLL_CCTRL_MODE0		0x090
+#define QSERDES_COM_SYSCLK_EN_SEL		0x0AC
+#define QSERDES_COM_RESETSM_CNTRL		0x0B4
+#define QSERDES_COM_RESTRIM_CTRL		0x0BC
+#define QSERDES_COM_RESCODE_DIV_NUM		0x0C4
+#define QSERDES_COM_LOCK_CMP_EN			0x0C8
+#define QSERDES_COM_DEC_START_MODE0		0x0D0
+#define QSERDES_COM_DIV_FRAC_START1_MODE0	0x0DC
+#define QSERDES_COM_DIV_FRAC_START2_MODE0	0x0E0
+#define QSERDES_COM_DIV_FRAC_START3_MODE0	0x0E4
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0	0x108
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0	0x10C
+#define QSERDES_COM_VCO_TUNE_CTRL		0x124
+#define QSERDES_COM_VCO_TUNE_MAP		0x128
+#define QSERDES_COM_VCO_TUNE1_MODE0		0x12C
+#define QSERDES_COM_VCO_TUNE2_MODE0		0x130
+#define QSERDES_COM_VCO_TUNE_TIMER1		0x144
+#define QSERDES_COM_VCO_TUNE_TIMER2		0x148
+#define QSERDES_COM_BG_CTRL			0x170
+#define QSERDES_COM_CLK_SELECT			0x174
+#define QSERDES_COM_HSCLK_SEL			0x178
+#define QSERDES_COM_CORECLK_DIV			0x184
+#define QSERDES_COM_CORE_CLK_EN			0x18C
+#define QSERDES_COM_C_READY_STATUS		0x190
+#define QSERDES_COM_CMN_CONFIG			0x194
+#define QSERDES_COM_SVS_MODE_CLK_SEL		0x19C
+#define QSERDES_COM_DEBUG_BUS0			0x1A0
+#define QSERDES_COM_DEBUG_BUS1			0x1A4
+#define QSERDES_COM_DEBUG_BUS2			0x1A8
+#define QSERDES_COM_DEBUG_BUS3			0x1AC
+#define QSERDES_COM_DEBUG_BUS_SEL		0x1B0
+
+#define QSERDES_TX_N_RES_CODE_LANE_OFFSET(n, m)		(TX(n, m) + 0x4C)
+#define QSERDES_TX_N_DEBUG_BUS_SEL(n, m)		(TX(n, m) + 0x64)
+#define QSERDES_TX_N_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN(n, m) (TX(n, m) + 0x68)
+#define QSERDES_TX_N_LANE_MODE(n, m)			(TX(n, m) + 0x94)
+#define QSERDES_TX_N_RCV_DETECT_LVL_2(n, m)		(TX(n, m) + 0xAC)
+
+#define QSERDES_RX_N_UCDR_SO_GAIN_HALF(n, m)		(RX(n, m) + 0x010)
+#define QSERDES_RX_N_UCDR_SO_GAIN(n, m)			(RX(n, m) + 0x01C)
+#define QSERDES_RX_N_UCDR_SO_SATURATION_AND_ENABLE(n, m) (RX(n, m) + 0x048)
+#define QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL2(n, m)	(RX(n, m) + 0x0D8)
+#define QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL3(n, m)	(RX(n, m) + 0x0DC)
+#define QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL4(n, m)	(RX(n, m) + 0x0E0)
+#define QSERDES_RX_N_SIGDET_ENABLES(n, m)		(RX(n, m) + 0x110)
+#define QSERDES_RX_N_SIGDET_DEGLITCH_CNTRL(n, m)	(RX(n, m) + 0x11C)
+#define QSERDES_RX_N_SIGDET_LVL(n, m)			(RX(n, m) + 0x118)
+#define QSERDES_RX_N_RX_BAND(n, m)			(RX(n, m) + 0x120)
+
+#define PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(n, m)	(PCS_MISC_PORT(n, m) + 0x00)
+#define PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(n, m)	(PCS_MISC_PORT(n, m) + 0x04)
+#define PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(n, m)	(PCS_MISC_PORT(n, m) + 0x08)
+#define PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(n, m)	(PCS_MISC_PORT(n, m) + 0x0C)
+#define PCIE_MISC_N_DEBUG_BUS_0_STATUS(n, m)	(PCS_MISC_PORT(n, m) + 0x14)
+#define PCIE_MISC_N_DEBUG_BUS_1_STATUS(n, m)	(PCS_MISC_PORT(n, m) + 0x18)
+#define PCIE_MISC_N_DEBUG_BUS_2_STATUS(n, m)	(PCS_MISC_PORT(n, m) + 0x1C)
+#define PCIE_MISC_N_DEBUG_BUS_3_STATUS(n, m)	(PCS_MISC_PORT(n, m) + 0x20)
+
+#define PCIE_N_SW_RESET(n, m)			(PCS_PORT(n, m) + 0x00)
+#define PCIE_N_POWER_DOWN_CONTROL(n, m)		(PCS_PORT(n, m) + 0x04)
+#define PCIE_N_START_CONTROL(n, m)		(PCS_PORT(n, m) + 0x08)
+#define PCIE_N_TXDEEMPH_M6DB_V0(n, m)		(PCS_PORT(n, m) + 0x24)
+#define PCIE_N_TXDEEMPH_M3P5DB_V0(n, m)		(PCS_PORT(n, m) + 0x28)
+#define PCIE_N_ENDPOINT_REFCLK_DRIVE(n, m)	(PCS_PORT(n, m) + 0x54)
+#define PCIE_N_RX_IDLE_DTCT_CNTRL(n, m)		(PCS_PORT(n, m) + 0x58)
+#define PCIE_N_POWER_STATE_CONFIG1(n, m)	(PCS_PORT(n, m) + 0x60)
+#define PCIE_N_POWER_STATE_CONFIG4(n, m)	(PCS_PORT(n, m) + 0x6C)
+#define PCIE_N_PWRUP_RESET_DLY_TIME_AUXCLK(n, m)	(PCS_PORT(n, m) + 0xA0)
+#define PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK(n, m)	(PCS_PORT(n, m) + 0xA4)
+#define PCIE_N_PLL_LOCK_CHK_DLY_TIME(n, m)	(PCS_PORT(n, m) + 0xA8)
+#define PCIE_N_TEST_CONTROL4(n, m)		(PCS_PORT(n, m) + 0x11C)
+#define PCIE_N_TEST_CONTROL5(n, m)		(PCS_PORT(n, m) + 0x120)
+#define PCIE_N_TEST_CONTROL6(n, m)		(PCS_PORT(n, m) + 0x124)
+#define PCIE_N_TEST_CONTROL7(n, m)		(PCS_PORT(n, m) + 0x128)
+#define PCIE_N_PCS_STATUS(n, m)			(PCS_PORT(n, m) + 0x174)
+#define PCIE_N_DEBUG_BUS_0_STATUS(n, m)		(PCS_PORT(n, m) + 0x198)
+#define PCIE_N_DEBUG_BUS_1_STATUS(n, m)		(PCS_PORT(n, m) + 0x19C)
+#define PCIE_N_DEBUG_BUS_2_STATUS(n, m)		(PCS_PORT(n, m) + 0x1A0)
+#define PCIE_N_DEBUG_BUS_3_STATUS(n, m)		(PCS_PORT(n, m) + 0x1A4)
+#define PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK_MSB(n, m)	(PCS_PORT(n, m) + 0x1A8)
+#define PCIE_N_OSC_DTCT_ACTIONS(n, m)			(PCS_PORT(n, m) + 0x1AC)
+#define PCIE_N_SIGDET_CNTRL(n, m)			(PCS_PORT(n, m) + 0x1B0)
+#define PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB(n, m)	(PCS_PORT(n, m) + 0x1DC)
+#define PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB(n, m)	(PCS_PORT(n, m) + 0x1E0)
+
+#define PCIE_COM_SW_RESET		0x400
+#define PCIE_COM_POWER_DOWN_CONTROL	0x404
+#define PCIE_COM_START_CONTROL		0x408
+#define PCIE_COM_DEBUG_BUS_BYTE0_INDEX	0x438
+#define PCIE_COM_DEBUG_BUS_BYTE1_INDEX	0x43C
+#define PCIE_COM_DEBUG_BUS_BYTE2_INDEX	0x440
+#define PCIE_COM_DEBUG_BUS_BYTE3_INDEX	0x444
+#define PCIE_COM_PCS_READY_STATUS	0x448
+#define PCIE_COM_DEBUG_BUS_0_STATUS	0x45C
+#define PCIE_COM_DEBUG_BUS_1_STATUS	0x460
+#define PCIE_COM_DEBUG_BUS_2_STATUS	0x464
+#define PCIE_COM_DEBUG_BUS_3_STATUS	0x468
+
+#define PCIE20_PARF_SYS_CTRL	     0x00
+#define PCIE20_PARF_PM_STTS		0x24
+#define PCIE20_PARF_PCS_DEEMPH	   0x34
+#define PCIE20_PARF_PCS_SWING	    0x38
+#define PCIE20_PARF_PHY_CTRL	     0x40
+#define PCIE20_PARF_PHY_REFCLK	   0x4C
+#define PCIE20_PARF_CONFIG_BITS	  0x50
+#define PCIE20_PARF_TEST_BUS		0xE4
+#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL	0x174
+#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT   0x1A8
+#define PCIE20_PARF_LTSSM              0x1B0
+#define PCIE20_PARF_INT_ALL_STATUS	0x224
+#define PCIE20_PARF_INT_ALL_CLEAR	0x228
+#define PCIE20_PARF_INT_ALL_MASK	0x22C
+#define PCIE20_PARF_SID_OFFSET		0x234
+#define PCIE20_PARF_BDF_TRANSLATE_CFG	0x24C
+#define PCIE20_PARF_BDF_TRANSLATE_N	0x250
+
+#define PCIE20_ELBI_VERSION		0x00
+#define PCIE20_ELBI_SYS_CTRL	     0x04
+#define PCIE20_ELBI_SYS_STTS		 0x08
+
+#define PCIE20_CAP			   0x70
+#define PCIE20_CAP_DEVCTRLSTATUS	(PCIE20_CAP + 0x08)
+#define PCIE20_CAP_LINKCTRLSTATUS	(PCIE20_CAP + 0x10)
+
+#define PCIE20_COMMAND_STATUS	    0x04
+#define PCIE20_HEADER_TYPE		0x0C
+#define PCIE20_BUSNUMBERS		  0x18
+#define PCIE20_MEMORY_BASE_LIMIT	 0x20
+#define PCIE20_BRIDGE_CTRL		0x3C
+#define PCIE20_DEVICE_CONTROL_STATUS	0x78
+#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98
+
+#define PCIE20_AUX_CLK_FREQ_REG		0xB40
+#define PCIE20_ACK_F_ASPM_CTRL_REG     0x70C
+#define PCIE20_ACK_N_FTS		   0xff00
+
+#define PCIE20_PLR_IATU_VIEWPORT	 0x900
+#define PCIE20_PLR_IATU_CTRL1	    0x904
+#define PCIE20_PLR_IATU_CTRL2	    0x908
+#define PCIE20_PLR_IATU_LBAR	     0x90C
+#define PCIE20_PLR_IATU_UBAR	     0x910
+#define PCIE20_PLR_IATU_LAR		0x914
+#define PCIE20_PLR_IATU_LTAR	     0x918
+#define PCIE20_PLR_IATU_UTAR	     0x91c
+
+#define PCIE20_CTRL1_TYPE_CFG0		0x04
+#define PCIE20_CTRL1_TYPE_CFG1		0x05
+
+#define PCIE20_CAP_ID			0x10
+#define L1SUB_CAP_ID			0x1E
+
+#define PCIE_CAP_PTR_OFFSET		0x34
+#define PCIE_EXT_CAP_OFFSET		0x100
+
+#define PCIE20_AER_UNCORR_ERR_STATUS_REG	0x104
+#define PCIE20_AER_CORR_ERR_STATUS_REG		0x110
+#define PCIE20_AER_ROOT_ERR_STATUS_REG		0x130
+#define PCIE20_AER_ERR_SRC_ID_REG		0x134
+
+#define RD 0
+#define WR 1
+#define MSM_PCIE_ERROR -1
+
+#define PERST_PROPAGATION_DELAY_US_MIN	  1000
+#define PERST_PROPAGATION_DELAY_US_MAX	  1005
+#define SWITCH_DELAY_MAX	  20
+#define REFCLK_STABILIZATION_DELAY_US_MIN     1000
+#define REFCLK_STABILIZATION_DELAY_US_MAX     1005
+#define LINK_UP_TIMEOUT_US_MIN		    5000
+#define LINK_UP_TIMEOUT_US_MAX		    5100
+#define LINK_UP_CHECK_MAX_COUNT		   20
+#define PHY_STABILIZATION_DELAY_US_MIN	  995
+#define PHY_STABILIZATION_DELAY_US_MAX	  1005
+#define POWER_DOWN_DELAY_US_MIN		10
+#define POWER_DOWN_DELAY_US_MAX		11
+#define LINKDOWN_INIT_WAITING_US_MIN    995
+#define LINKDOWN_INIT_WAITING_US_MAX    1005
+#define LINKDOWN_WAITING_US_MIN	   4900
+#define LINKDOWN_WAITING_US_MAX	   5100
+#define LINKDOWN_WAITING_COUNT	    200
+
+#define PHY_READY_TIMEOUT_COUNT		   10
+#define XMLH_LINK_UP				  0x400
+#define MAX_LINK_RETRIES 5
+#define MAX_BUS_NUM 3
+#define MAX_PROP_SIZE 32
+#define MAX_RC_NAME_LEN 15
+#define MSM_PCIE_MAX_VREG 4
+#define MSM_PCIE_MAX_CLK 9
+#define MSM_PCIE_MAX_PIPE_CLK 1
+#define MAX_RC_NUM 3
+#define MAX_DEVICE_NUM 20
+#define MAX_SHORT_BDF_NUM 16
+#define PCIE_TLP_RD_SIZE 0x5
+#define PCIE_MSI_NR_IRQS 256
+#define MSM_PCIE_MAX_MSI 32
+#define MAX_MSG_LEN 80
+#define PCIE_LOG_PAGES (50)
+#define PCIE_CONF_SPACE_DW			1024
+#define PCIE_CLEAR				0xDEADBEEF
+#define PCIE_LINK_DOWN				0xFFFFFFFF
+
+#define MSM_PCIE_MAX_RESET 4
+#define MSM_PCIE_MAX_PIPE_RESET 1
+
+#define MSM_PCIE_MSI_PHY 0xa0000000
+#define PCIE20_MSI_CTRL_ADDR		(0x820)
+#define PCIE20_MSI_CTRL_UPPER_ADDR	(0x824)
+#define PCIE20_MSI_CTRL_INTR_EN	   (0x828)
+#define PCIE20_MSI_CTRL_INTR_MASK	 (0x82C)
+#define PCIE20_MSI_CTRL_INTR_STATUS     (0x830)
+#define PCIE20_MSI_CTRL_MAX 8
+
+/* PM control options */
+#define PM_IRQ			 0x1
+#define PM_CLK			 0x2
+#define PM_GPIO			0x4
+#define PM_VREG			0x8
+#define PM_PIPE_CLK		  0x10
+#define PM_ALL (PM_IRQ | PM_CLK | PM_GPIO | PM_VREG | PM_PIPE_CLK)
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+#define PCIE_UPPER_ADDR(addr) ((u32)((addr) >> 32))
+#else
+#define PCIE_UPPER_ADDR(addr) (0x0)
+#endif
+#define PCIE_LOWER_ADDR(addr) ((u32)((addr) & 0xffffffff))
+
+/* Config Space Offsets */
+#define BDF_OFFSET(bus, devfn) \
+	((bus << 24) | (devfn << 16))
+
+#define PCIE_GEN_DBG(x...) do { \
+	if (msm_pcie_debug_mask) \
+		pr_alert(x); \
+	} while (0)
+
+#define PCIE_DBG(dev, fmt, arg...) do {			 \
+	if ((dev) && (dev)->ipc_log_long)   \
+		ipc_log_string((dev)->ipc_log_long, \
+			"DBG1:%s: " fmt, __func__, arg); \
+	if ((dev) && (dev)->ipc_log)   \
+		ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
+	if (msm_pcie_debug_mask)   \
+		pr_alert("%s: " fmt, __func__, arg);		  \
+	} while (0)
+
+#define PCIE_DBG2(dev, fmt, arg...) do {			 \
+	if ((dev) && (dev)->ipc_log)   \
+		ipc_log_string((dev)->ipc_log, "DBG2:%s: " fmt, __func__, arg);\
+	if (msm_pcie_debug_mask)   \
+		pr_alert("%s: " fmt, __func__, arg);              \
+	} while (0)
+
+#define PCIE_DBG3(dev, fmt, arg...) do {			 \
+	if ((dev) && (dev)->ipc_log)   \
+		ipc_log_string((dev)->ipc_log, "DBG3:%s: " fmt, __func__, arg);\
+	if (msm_pcie_debug_mask)   \
+		pr_alert("%s: " fmt, __func__, arg);              \
+	} while (0)
+
+#define PCIE_DUMP(dev, fmt, arg...) do {			\
+	if ((dev) && (dev)->ipc_log_dump) \
+		ipc_log_string((dev)->ipc_log_dump, \
+			"DUMP:%s: " fmt, __func__, arg); \
+	} while (0)
+
+#define PCIE_DBG_FS(dev, fmt, arg...) do {			\
+	if ((dev) && (dev)->ipc_log_dump) \
+		ipc_log_string((dev)->ipc_log_dump, \
+			"DBG_FS:%s: " fmt, __func__, arg); \
+	pr_alert("%s: " fmt, __func__, arg); \
+	} while (0)
+
+#define PCIE_INFO(dev, fmt, arg...) do {			 \
+	if ((dev) && (dev)->ipc_log_long)   \
+		ipc_log_string((dev)->ipc_log_long, \
+			"INFO:%s: " fmt, __func__, arg); \
+	if ((dev) && (dev)->ipc_log)   \
+		ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
+	pr_info("%s: " fmt, __func__, arg);  \
+	} while (0)
+
+#define PCIE_ERR(dev, fmt, arg...) do {			 \
+	if ((dev) && (dev)->ipc_log_long)   \
+		ipc_log_string((dev)->ipc_log_long, \
+			"ERR:%s: " fmt, __func__, arg); \
+	if ((dev) && (dev)->ipc_log)   \
+		ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
+	pr_err("%s: " fmt, __func__, arg);  \
+	} while (0)
+
+
+enum msm_pcie_res {
+	MSM_PCIE_RES_PARF,
+	MSM_PCIE_RES_PHY,
+	MSM_PCIE_RES_DM_CORE,
+	MSM_PCIE_RES_ELBI,
+	MSM_PCIE_RES_CONF,
+	MSM_PCIE_RES_IO,
+	MSM_PCIE_RES_BARS,
+	MSM_PCIE_RES_TCSR,
+	MSM_PCIE_MAX_RES,
+};
+
+enum msm_pcie_irq {
+	MSM_PCIE_INT_MSI,
+	MSM_PCIE_INT_A,
+	MSM_PCIE_INT_B,
+	MSM_PCIE_INT_C,
+	MSM_PCIE_INT_D,
+	MSM_PCIE_INT_PLS_PME,
+	MSM_PCIE_INT_PME_LEGACY,
+	MSM_PCIE_INT_PLS_ERR,
+	MSM_PCIE_INT_AER_LEGACY,
+	MSM_PCIE_INT_LINK_UP,
+	MSM_PCIE_INT_LINK_DOWN,
+	MSM_PCIE_INT_BRIDGE_FLUSH_N,
+	MSM_PCIE_INT_GLOBAL_INT,
+	MSM_PCIE_MAX_IRQ,
+};
+
+enum msm_pcie_irq_event {
+	MSM_PCIE_INT_EVT_LINK_DOWN = 1,
+	MSM_PCIE_INT_EVT_BME,
+	MSM_PCIE_INT_EVT_PM_TURNOFF,
+	MSM_PCIE_INT_EVT_DEBUG,
+	MSM_PCIE_INT_EVT_LTR,
+	MSM_PCIE_INT_EVT_MHI_Q6,
+	MSM_PCIE_INT_EVT_MHI_A7,
+	MSM_PCIE_INT_EVT_DSTATE_CHANGE,
+	MSM_PCIE_INT_EVT_L1SUB_TIMEOUT,
+	MSM_PCIE_INT_EVT_MMIO_WRITE,
+	MSM_PCIE_INT_EVT_CFG_WRITE,
+	MSM_PCIE_INT_EVT_BRIDGE_FLUSH_N,
+	MSM_PCIE_INT_EVT_LINK_UP,
+	MSM_PCIE_INT_EVT_AER_LEGACY,
+	MSM_PCIE_INT_EVT_AER_ERR,
+	MSM_PCIE_INT_EVT_PME_LEGACY,
+	MSM_PCIE_INT_EVT_PLS_PME,
+	MSM_PCIE_INT_EVT_INTD,
+	MSM_PCIE_INT_EVT_INTC,
+	MSM_PCIE_INT_EVT_INTB,
+	MSM_PCIE_INT_EVT_INTA,
+	MSM_PCIE_INT_EVT_EDMA,
+	MSM_PCIE_INT_EVT_MSI_0,
+	MSM_PCIE_INT_EVT_MSI_1,
+	MSM_PCIE_INT_EVT_MSI_2,
+	MSM_PCIE_INT_EVT_MSI_3,
+	MSM_PCIE_INT_EVT_MSI_4,
+	MSM_PCIE_INT_EVT_MSI_5,
+	MSM_PCIE_INT_EVT_MSI_6,
+	MSM_PCIE_INT_EVT_MSI_7,
+	MSM_PCIE_INT_EVT_MAX = 30,
+};
+
+enum msm_pcie_gpio {
+	MSM_PCIE_GPIO_PERST,
+	MSM_PCIE_GPIO_WAKE,
+	MSM_PCIE_GPIO_EP,
+	MSM_PCIE_MAX_GPIO
+};
+
+enum msm_pcie_link_status {
+	MSM_PCIE_LINK_DEINIT,
+	MSM_PCIE_LINK_ENABLED,
+	MSM_PCIE_LINK_DISABLED
+};
+
+enum msm_pcie_boot_option {
+	MSM_PCIE_NO_PROBE_ENUMERATION = BIT(0),
+	MSM_PCIE_NO_WAKE_ENUMERATION = BIT(1)
+};
+
+/* gpio info structure */
+struct msm_pcie_gpio_info_t {
+	char	*name;
+	uint32_t   num;
+	bool	 out;
+	uint32_t   on;
+	uint32_t   init;
+	bool	required;
+};
+
+/* voltage regulator info structrue */
+struct msm_pcie_vreg_info_t {
+	struct regulator  *hdl;
+	char		  *name;
+	uint32_t	     max_v;
+	uint32_t	     min_v;
+	uint32_t	     opt_mode;
+	bool		   required;
+};
+
+/* reset info structure */
+struct msm_pcie_reset_info_t {
+	struct reset_control *hdl;
+	char *name;
+	bool required;
+};
+
+/* clock info structure */
+struct msm_pcie_clk_info_t {
+	struct clk  *hdl;
+	char	  *name;
+	u32	   freq;
+	bool	config_mem;
+	bool	  required;
+};
+
+/* resource info structure */
+struct msm_pcie_res_info_t {
+	char		*name;
+	struct resource *resource;
+	void __iomem    *base;
+};
+
+/* irq info structrue */
+struct msm_pcie_irq_info_t {
+	char		  *name;
+	uint32_t	    num;
+};
+
+/* phy info structure */
+struct msm_pcie_phy_info_t {
+	u32	offset;
+	u32	val;
+	u32	delay;
+};
+
+/* PCIe device info structure */
+struct msm_pcie_device_info {
+	u32			bdf;
+	struct pci_dev		*dev;
+	short			short_bdf;
+	u32			sid;
+	int			domain;
+	void __iomem		*conf_base;
+	unsigned long		phy_address;
+	u32			dev_ctrlstts_offset;
+	struct msm_pcie_register_event *event_reg;
+	bool			registered;
+};
+
+/* msm pcie device structure */
+struct msm_pcie_dev_t {
+	struct platform_device	 *pdev;
+	struct pci_dev *dev;
+	struct regulator *gdsc;
+	struct regulator *gdsc_smmu;
+	struct msm_pcie_vreg_info_t  vreg[MSM_PCIE_MAX_VREG];
+	struct msm_pcie_gpio_info_t  gpio[MSM_PCIE_MAX_GPIO];
+	struct msm_pcie_clk_info_t   clk[MSM_PCIE_MAX_CLK];
+	struct msm_pcie_clk_info_t   pipeclk[MSM_PCIE_MAX_PIPE_CLK];
+	struct msm_pcie_res_info_t   res[MSM_PCIE_MAX_RES];
+	struct msm_pcie_irq_info_t   irq[MSM_PCIE_MAX_IRQ];
+	struct msm_pcie_irq_info_t	msi[MSM_PCIE_MAX_MSI];
+	struct msm_pcie_reset_info_t reset[MSM_PCIE_MAX_RESET];
+	struct msm_pcie_reset_info_t pipe_reset[MSM_PCIE_MAX_PIPE_RESET];
+
+	void __iomem		     *parf;
+	void __iomem		     *phy;
+	void __iomem		     *elbi;
+	void __iomem		     *dm_core;
+	void __iomem		     *conf;
+	void __iomem		     *bars;
+	void __iomem		     *tcsr;
+
+	uint32_t			    axi_bar_start;
+	uint32_t			    axi_bar_end;
+
+	struct resource		   *dev_mem_res;
+	struct resource		   *dev_io_res;
+
+	uint32_t			    wake_n;
+	uint32_t			    vreg_n;
+	uint32_t			    gpio_n;
+	uint32_t			    parf_deemph;
+	uint32_t			    parf_swing;
+
+	bool				 cfg_access;
+	spinlock_t			 cfg_lock;
+	unsigned long		    irqsave_flags;
+	struct mutex		     setup_lock;
+
+	struct irq_domain		*irq_domain;
+	DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_NR_IRQS);
+	uint32_t			   msi_gicm_addr;
+	uint32_t			   msi_gicm_base;
+	bool				 use_msi;
+
+	enum msm_pcie_link_status    link_status;
+	bool				 user_suspend;
+	bool                         disable_pc;
+	struct pci_saved_state	     *saved_state;
+
+	struct wakeup_source	     ws;
+	struct msm_bus_scale_pdata   *bus_scale_table;
+	uint32_t			   bus_client;
+
+	bool				l0s_supported;
+	bool				l1_supported;
+	bool				 l1ss_supported;
+	bool				common_clk_en;
+	bool				clk_power_manage_en;
+	bool				 aux_clk_sync;
+	bool				aer_enable;
+	bool				smmu_exist;
+	uint32_t			smmu_sid_base;
+	uint32_t			   n_fts;
+	bool				 ext_ref_clk;
+	bool				common_phy;
+	uint32_t			   ep_latency;
+	uint32_t			switch_latency;
+	uint32_t			wr_halt_size;
+	uint32_t			cpl_timeout;
+	uint32_t			current_bdf;
+	short				current_short_bdf;
+	uint32_t			perst_delay_us_min;
+	uint32_t			perst_delay_us_max;
+	uint32_t			tlp_rd_size;
+	bool				linkdown_panic;
+	uint32_t			boot_option;
+
+	uint32_t			   rc_idx;
+	uint32_t			phy_ver;
+	bool				drv_ready;
+	bool				 enumerated;
+	struct work_struct	     handle_wake_work;
+	struct mutex		     recovery_lock;
+	spinlock_t                   linkdown_lock;
+	spinlock_t                   wakeup_lock;
+	spinlock_t			global_irq_lock;
+	spinlock_t			aer_lock;
+	ulong				linkdown_counter;
+	ulong				link_turned_on_counter;
+	ulong				link_turned_off_counter;
+	ulong				rc_corr_counter;
+	ulong				rc_non_fatal_counter;
+	ulong				rc_fatal_counter;
+	ulong				ep_corr_counter;
+	ulong				ep_non_fatal_counter;
+	ulong				ep_fatal_counter;
+	bool				 suspending;
+	ulong				wake_counter;
+	u32				num_active_ep;
+	u32				num_ep;
+	bool				pending_ep_reg;
+	u32				phy_len;
+	u32				port_phy_len;
+	struct msm_pcie_phy_info_t	*phy_sequence;
+	struct msm_pcie_phy_info_t	*port_phy_sequence;
+	u32		ep_shadow[MAX_DEVICE_NUM][PCIE_CONF_SPACE_DW];
+	u32				  rc_shadow[PCIE_CONF_SPACE_DW];
+	bool				 shadow_en;
+	bool				bridge_found;
+	struct msm_pcie_register_event *event_reg;
+	unsigned int			scm_dev_id;
+	bool				 power_on;
+	void				 *ipc_log;
+	void				*ipc_log_long;
+	void				*ipc_log_dump;
+	bool				use_19p2mhz_aux_clk;
+	bool				use_pinctrl;
+	struct pinctrl			*pinctrl;
+	struct pinctrl_state		*pins_default;
+	struct pinctrl_state		*pins_sleep;
+	struct msm_pcie_device_info   pcidev_table[MAX_DEVICE_NUM];
+};
+
+
+/* debug mask sys interface */
+static int msm_pcie_debug_mask;
+module_param_named(debug_mask, msm_pcie_debug_mask,
+			    int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+/* debugfs values */
+static u32 rc_sel;
+static u32 base_sel;
+static u32 wr_offset;
+static u32 wr_mask;
+static u32 wr_value;
+static ulong corr_counter_limit = 5;
+
+/* counter to keep track if common PHY needs to be configured */
+static u32 num_rc_on;
+
+/* global lock for PCIe common PHY */
+static struct mutex com_phy_lock;
+
+/* global lock for PCIe enumeration */
+static struct mutex enumerate_lock;
+
+/* Table to track info of PCIe devices */
+static struct msm_pcie_device_info
+	msm_pcie_dev_tbl[MAX_RC_NUM * MAX_DEVICE_NUM];
+
+/* PCIe driver state */
+struct pcie_drv_sta {
+	u32 rc_num;
+	struct mutex drv_lock;
+} pcie_drv;
+
+/* msm pcie device data */
+static struct msm_pcie_dev_t msm_pcie_dev[MAX_RC_NUM];
+
+/* regulators */
+static struct msm_pcie_vreg_info_t msm_pcie_vreg_info[MSM_PCIE_MAX_VREG] = {
+	{NULL, "vreg-3.3", 0, 0, 0, false},
+	{NULL, "vreg-1.8", 1800000, 1800000, 14000, true},
+	{NULL, "vreg-0.9", 1000000, 1000000, 40000, true},
+	{NULL, "vreg-cx", 0, 0, 0, false}
+};
+
+/* GPIOs */
+static struct msm_pcie_gpio_info_t msm_pcie_gpio_info[MSM_PCIE_MAX_GPIO] = {
+	{"perst-gpio",		0, 1, 0, 0, 1},
+	{"wake-gpio",		0, 0, 0, 0, 0},
+	{"qcom,ep-gpio",	0, 1, 1, 0, 0}
+};
+
+/* resets */
+static struct msm_pcie_reset_info_t
+msm_pcie_reset_info[MAX_RC_NUM][MSM_PCIE_MAX_RESET] = {
+	{
+		{NULL, "pcie_phy_reset", false},
+		{NULL, "pcie_phy_com_reset", false},
+		{NULL, "pcie_phy_nocsr_com_phy_reset", false},
+		{NULL, "pcie_0_phy_reset", false}
+	},
+	{
+		{NULL, "pcie_phy_reset", false},
+		{NULL, "pcie_phy_com_reset", false},
+		{NULL, "pcie_phy_nocsr_com_phy_reset", false},
+		{NULL, "pcie_1_phy_reset", false}
+	},
+	{
+		{NULL, "pcie_phy_reset", false},
+		{NULL, "pcie_phy_com_reset", false},
+		{NULL, "pcie_phy_nocsr_com_phy_reset", false},
+		{NULL, "pcie_2_phy_reset", false}
+	}
+};
+
+/* pipe reset  */
+static struct msm_pcie_reset_info_t
+msm_pcie_pipe_reset_info[MAX_RC_NUM][MSM_PCIE_MAX_PIPE_RESET] = {
+	{
+		{NULL, "pcie_0_phy_pipe_reset", false}
+	},
+	{
+		{NULL, "pcie_1_phy_pipe_reset", false}
+	},
+	{
+		{NULL, "pcie_2_phy_pipe_reset", false}
+	}
+};
+
+/* clocks */
+static struct msm_pcie_clk_info_t
+	msm_pcie_clk_info[MAX_RC_NUM][MSM_PCIE_MAX_CLK] = {
+	{
+	{NULL, "pcie_0_ref_clk_src", 0, false, false},
+	{NULL, "pcie_0_aux_clk", 1010000, false, true},
+	{NULL, "pcie_0_cfg_ahb_clk", 0, false, true},
+	{NULL, "pcie_0_mstr_axi_clk", 0, true, true},
+	{NULL, "pcie_0_slv_axi_clk", 0, true, true},
+	{NULL, "pcie_0_ldo", 0, false, true},
+	{NULL, "pcie_0_smmu_clk", 0, false, false},
+	{NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
+	{NULL, "pcie_phy_aux_clk", 0, false, false}
+	},
+	{
+	{NULL, "pcie_1_ref_clk_src", 0, false, false},
+	{NULL, "pcie_1_aux_clk", 1010000, false, true},
+	{NULL, "pcie_1_cfg_ahb_clk", 0, false, true},
+	{NULL, "pcie_1_mstr_axi_clk", 0, true, true},
+	{NULL, "pcie_1_slv_axi_clk", 0, true,  true},
+	{NULL, "pcie_1_ldo", 0, false, true},
+	{NULL, "pcie_1_smmu_clk", 0, false, false},
+	{NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
+	{NULL, "pcie_phy_aux_clk", 0, false, false}
+	},
+	{
+	{NULL, "pcie_2_ref_clk_src", 0, false, false},
+	{NULL, "pcie_2_aux_clk", 1010000, false, true},
+	{NULL, "pcie_2_cfg_ahb_clk", 0, false, true},
+	{NULL, "pcie_2_mstr_axi_clk", 0, true, true},
+	{NULL, "pcie_2_slv_axi_clk", 0, true, true},
+	{NULL, "pcie_2_ldo", 0, false, true},
+	{NULL, "pcie_2_smmu_clk", 0, false, false},
+	{NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
+	{NULL, "pcie_phy_aux_clk", 0, false, false}
+	}
+};
+
+/* Pipe Clocks */
+static struct msm_pcie_clk_info_t
+	msm_pcie_pipe_clk_info[MAX_RC_NUM][MSM_PCIE_MAX_PIPE_CLK] = {
+	{
+	{NULL, "pcie_0_pipe_clk", 125000000, true, true},
+	},
+	{
+	{NULL, "pcie_1_pipe_clk", 125000000, true, true},
+	},
+	{
+	{NULL, "pcie_2_pipe_clk", 125000000, true, true},
+	}
+};
+
+/* resources */
+static const struct msm_pcie_res_info_t msm_pcie_res_info[MSM_PCIE_MAX_RES] = {
+	{"parf",	0, 0},
+	{"phy",     0, 0},
+	{"dm_core",	0, 0},
+	{"elbi",	0, 0},
+	{"conf",	0, 0},
+	{"io",		0, 0},
+	{"bars",	0, 0},
+	{"tcsr",	0, 0}
+};
+
+/* irqs */
+static const struct msm_pcie_irq_info_t msm_pcie_irq_info[MSM_PCIE_MAX_IRQ] = {
+	{"int_msi",	0},
+	{"int_a",	0},
+	{"int_b",	0},
+	{"int_c",	0},
+	{"int_d",	0},
+	{"int_pls_pme",		0},
+	{"int_pme_legacy",	0},
+	{"int_pls_err",		0},
+	{"int_aer_legacy",	0},
+	{"int_pls_link_up",	0},
+	{"int_pls_link_down",	0},
+	{"int_bridge_flush_n",	0},
+	{"int_global_int",	0}
+};
+
+/* MSIs */
+static const struct msm_pcie_irq_info_t msm_pcie_msi_info[MSM_PCIE_MAX_MSI] = {
+	{"msi_0", 0}, {"msi_1", 0}, {"msi_2", 0}, {"msi_3", 0},
+	{"msi_4", 0}, {"msi_5", 0}, {"msi_6", 0}, {"msi_7", 0},
+	{"msi_8", 0}, {"msi_9", 0}, {"msi_10", 0}, {"msi_11", 0},
+	{"msi_12", 0}, {"msi_13", 0}, {"msi_14", 0}, {"msi_15", 0},
+	{"msi_16", 0}, {"msi_17", 0}, {"msi_18", 0}, {"msi_19", 0},
+	{"msi_20", 0}, {"msi_21", 0}, {"msi_22", 0}, {"msi_23", 0},
+	{"msi_24", 0}, {"msi_25", 0}, {"msi_26", 0}, {"msi_27", 0},
+	{"msi_28", 0}, {"msi_29", 0}, {"msi_30", 0}, {"msi_31", 0}
+};
+
+#ifdef CONFIG_ARM
+#define PCIE_BUS_PRIV_DATA(bus) \
+	(((struct pci_sys_data *)bus->sysdata)->private_data)
+
+static struct pci_sys_data msm_pcie_sys_data[MAX_RC_NUM];
+
+static inline void *msm_pcie_setup_sys_data(struct msm_pcie_dev_t *dev)
+{
+	msm_pcie_sys_data[dev->rc_idx].domain = dev->rc_idx;
+	msm_pcie_sys_data[dev->rc_idx].private_data = dev;
+
+	return &msm_pcie_sys_data[dev->rc_idx];
+}
+
+static inline void msm_pcie_fixup_irqs(struct msm_pcie_dev_t *dev)
+{
+	pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
+}
+#else
+#define PCIE_BUS_PRIV_DATA(bus) \
+	(struct msm_pcie_dev_t *)(bus->sysdata)
+
+static inline void *msm_pcie_setup_sys_data(struct msm_pcie_dev_t *dev)
+{
+	return dev;
+}
+
+static inline void msm_pcie_fixup_irqs(struct msm_pcie_dev_t *dev)
+{
+}
+#endif
+
+static inline void msm_pcie_write_reg(void *base, u32 offset, u32 value)
+{
+	writel_relaxed(value, base + offset);
+	wmb();
+}
+
+static inline void msm_pcie_write_reg_field(void *base, u32 offset,
+	const u32 mask, u32 val)
+{
+	u32 shift = find_first_bit((void *)&mask, 32);
+	u32 tmp = readl_relaxed(base + offset);
+
+	tmp &= ~mask; /* clear written bits */
+	val = tmp | (val << shift);
+	writel_relaxed(val, base + offset);
+	wmb();
+}
+
+static inline void msm_pcie_config_clock_mem(struct msm_pcie_dev_t *dev,
+	struct msm_pcie_clk_info_t *info)
+{
+	int ret;
+
+	ret = clk_set_flags(info->hdl, CLKFLAG_NORETAIN_MEM);
+	if (ret)
+		PCIE_ERR(dev,
+			"PCIe: RC%d can't configure core memory for clk %s: %d.\n",
+			dev->rc_idx, info->name, ret);
+	else
+		PCIE_DBG2(dev,
+			"PCIe: RC%d configured core memory for clk %s.\n",
+			dev->rc_idx, info->name);
+
+	ret = clk_set_flags(info->hdl, CLKFLAG_NORETAIN_PERIPH);
+	if (ret)
+		PCIE_ERR(dev,
+			"PCIe: RC%d can't configure peripheral memory for clk %s: %d.\n",
+			dev->rc_idx, info->name, ret);
+	else
+		PCIE_DBG2(dev,
+			"PCIe: RC%d configured peripheral memory for clk %s.\n",
+			dev->rc_idx, info->name);
+}
+
+#if defined(CONFIG_ARCH_FSM9010)
+#define PCIE20_PARF_PHY_STTS         0x3c
+#define PCIE2_PHY_RESET_CTRL         0x44
+#define PCIE20_PARF_PHY_REFCLK_CTRL2 0xa0
+#define PCIE20_PARF_PHY_REFCLK_CTRL3 0xa4
+#define PCIE20_PARF_PCS_SWING_CTRL1  0x88
+#define PCIE20_PARF_PCS_SWING_CTRL2  0x8c
+#define PCIE20_PARF_PCS_DEEMPH1      0x74
+#define PCIE20_PARF_PCS_DEEMPH2      0x78
+#define PCIE20_PARF_PCS_DEEMPH3      0x7c
+#define PCIE20_PARF_CONFIGBITS       0x84
+#define PCIE20_PARF_PHY_CTRL3        0x94
+#define PCIE20_PARF_PCS_CTRL         0x80
+
+#define TX_AMP_VAL                   127
+#define PHY_RX0_EQ_GEN1_VAL          0
+#define PHY_RX0_EQ_GEN2_VAL          4
+#define TX_DEEMPH_GEN1_VAL           24
+#define TX_DEEMPH_GEN2_3_5DB_VAL     24
+#define TX_DEEMPH_GEN2_6DB_VAL       34
+#define PHY_TX0_TERM_OFFST_VAL       0
+
+static inline void pcie_phy_dump(struct msm_pcie_dev_t *dev)
+{
+}
+
+static inline void pcie20_phy_reset(struct msm_pcie_dev_t *dev, uint32_t assert)
+{
+	msm_pcie_write_reg_field(dev->phy, PCIE2_PHY_RESET_CTRL,
+					 BIT(0), (assert) ? 1 : 0);
+}
+
+static void pcie_phy_init(struct msm_pcie_dev_t *dev)
+{
+	PCIE_DBG(dev, "RC%d: Initializing 28LP SNS phy - 100MHz\n",
+		dev->rc_idx);
+
+	/* De-assert Phy SW Reset */
+	pcie20_phy_reset(dev, 1);
+
+	/* Program SSP ENABLE */
+	if (readl_relaxed(dev->phy + PCIE20_PARF_PHY_REFCLK_CTRL2) & BIT(0))
+		msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_REFCLK_CTRL2,
+								 BIT(0), 0);
+	if ((readl_relaxed(dev->phy + PCIE20_PARF_PHY_REFCLK_CTRL3) &
+								 BIT(0)) == 0)
+		msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_REFCLK_CTRL3,
+								 BIT(0), 1);
+	/* Program Tx Amplitude */
+	if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_SWING_CTRL1) &
+		(BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
+				TX_AMP_VAL)
+		msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_SWING_CTRL1,
+			BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
+				TX_AMP_VAL);
+	if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_SWING_CTRL2) &
+		(BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
+				TX_AMP_VAL)
+		msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_SWING_CTRL2,
+			BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
+				TX_AMP_VAL);
+	/* Program De-Emphasis */
+	if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_DEEMPH1) &
+			(BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
+				TX_DEEMPH_GEN2_6DB_VAL)
+		msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_DEEMPH1,
+			BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
+				TX_DEEMPH_GEN2_6DB_VAL);
+
+	if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_DEEMPH2) &
+			(BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
+				TX_DEEMPH_GEN2_3_5DB_VAL)
+		msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_DEEMPH2,
+			BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
+				TX_DEEMPH_GEN2_3_5DB_VAL);
+
+	if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_DEEMPH3) &
+			(BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
+				TX_DEEMPH_GEN1_VAL)
+		msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_DEEMPH3,
+			BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
+				TX_DEEMPH_GEN1_VAL);
+
+	/* Program Rx_Eq */
+	if ((readl_relaxed(dev->phy + PCIE20_PARF_CONFIGBITS) &
+			(BIT(2)|BIT(1)|BIT(0))) != PHY_RX0_EQ_GEN1_VAL)
+		msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_CONFIGBITS,
+				 BIT(2)|BIT(1)|BIT(0), PHY_RX0_EQ_GEN1_VAL);
+
+	/* Program Tx0_term_offset */
+	if ((readl_relaxed(dev->phy + PCIE20_PARF_PHY_CTRL3) &
+			(BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
+				PHY_TX0_TERM_OFFST_VAL)
+		msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_CTRL3,
+			 BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
+				PHY_TX0_TERM_OFFST_VAL);
+
+	/* Program REF_CLK source */
+	msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_REFCLK_CTRL2, BIT(1),
+		(dev->ext_ref_clk) ? 1 : 0);
+	/* disable Tx2Rx Loopback */
+	if (readl_relaxed(dev->phy + PCIE20_PARF_PCS_CTRL) & BIT(1))
+		msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_CTRL,
+								 BIT(1), 0);
+	/* De-assert Phy SW Reset */
+	pcie20_phy_reset(dev, 0);
+}
+
+static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
+{
+
+	/* read PCIE20_PARF_PHY_STTS twice */
+	readl_relaxed(dev->phy + PCIE20_PARF_PHY_STTS);
+	if (readl_relaxed(dev->phy + PCIE20_PARF_PHY_STTS) & BIT(0))
+		return false;
+	else
+		return true;
+}
+#else
+static void pcie_phy_dump_test_cntrl(struct msm_pcie_dev_t *dev,
+					u32 cntrl4_val, u32 cntrl5_val,
+					u32 cntrl6_val, u32 cntrl7_val)
+{
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_TEST_CONTROL4(dev->rc_idx, dev->common_phy), cntrl4_val);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_TEST_CONTROL5(dev->rc_idx, dev->common_phy), cntrl5_val);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_TEST_CONTROL6(dev->rc_idx, dev->common_phy), cntrl6_val);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_TEST_CONTROL7(dev->rc_idx, dev->common_phy), cntrl7_val);
+
+	PCIE_DUMP(dev,
+		"PCIe: RC%d PCIE_N_TEST_CONTROL4: 0x%x\n", dev->rc_idx,
+		readl_relaxed(dev->phy +
+			PCIE_N_TEST_CONTROL4(dev->rc_idx,
+				dev->common_phy)));
+	PCIE_DUMP(dev,
+		"PCIe: RC%d PCIE_N_TEST_CONTROL5: 0x%x\n", dev->rc_idx,
+		readl_relaxed(dev->phy +
+			PCIE_N_TEST_CONTROL5(dev->rc_idx,
+				dev->common_phy)));
+	PCIE_DUMP(dev,
+		"PCIe: RC%d PCIE_N_TEST_CONTROL6: 0x%x\n", dev->rc_idx,
+		readl_relaxed(dev->phy +
+			PCIE_N_TEST_CONTROL6(dev->rc_idx,
+				dev->common_phy)));
+	PCIE_DUMP(dev,
+		"PCIe: RC%d PCIE_N_TEST_CONTROL7: 0x%x\n", dev->rc_idx,
+		readl_relaxed(dev->phy +
+			PCIE_N_TEST_CONTROL7(dev->rc_idx,
+				dev->common_phy)));
+	PCIE_DUMP(dev,
+		"PCIe: RC%d PCIE_N_DEBUG_BUS_0_STATUS: 0x%x\n", dev->rc_idx,
+		readl_relaxed(dev->phy +
+			PCIE_N_DEBUG_BUS_0_STATUS(dev->rc_idx,
+				dev->common_phy)));
+	PCIE_DUMP(dev,
+		"PCIe: RC%d PCIE_N_DEBUG_BUS_1_STATUS: 0x%x\n", dev->rc_idx,
+		readl_relaxed(dev->phy +
+			PCIE_N_DEBUG_BUS_1_STATUS(dev->rc_idx,
+				dev->common_phy)));
+	PCIE_DUMP(dev,
+		"PCIe: RC%d PCIE_N_DEBUG_BUS_2_STATUS: 0x%x\n", dev->rc_idx,
+		readl_relaxed(dev->phy +
+			PCIE_N_DEBUG_BUS_2_STATUS(dev->rc_idx,
+				dev->common_phy)));
+	PCIE_DUMP(dev,
+		"PCIe: RC%d PCIE_N_DEBUG_BUS_3_STATUS: 0x%x\n\n", dev->rc_idx,
+		readl_relaxed(dev->phy +
+			PCIE_N_DEBUG_BUS_3_STATUS(dev->rc_idx,
+				dev->common_phy)));
+}
+
+static void pcie_phy_dump(struct msm_pcie_dev_t *dev)
+{
+	int i, size;
+	u32 write_val;
+
+	if (dev->phy_ver >= 0x20) {
+		PCIE_DUMP(dev, "PCIe: RC%d PHY dump is not supported\n",
+			dev->rc_idx);
+		return;
+	}
+
+	PCIE_DUMP(dev, "PCIe: RC%d PHY testbus\n", dev->rc_idx);
+
+	pcie_phy_dump_test_cntrl(dev, 0x18, 0x19, 0x1A, 0x1B);
+	pcie_phy_dump_test_cntrl(dev, 0x1C, 0x1D, 0x1E, 0x1F);
+	pcie_phy_dump_test_cntrl(dev, 0x20, 0x21, 0x22, 0x23);
+
+	for (i = 0; i < 3; i++) {
+		write_val = 0x1 + i;
+		msm_pcie_write_reg(dev->phy,
+			QSERDES_TX_N_DEBUG_BUS_SEL(dev->rc_idx,
+				dev->common_phy), write_val);
+		PCIE_DUMP(dev,
+			"PCIe: RC%d QSERDES_TX_N_DEBUG_BUS_SEL: 0x%x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy +
+				QSERDES_TX_N_DEBUG_BUS_SEL(dev->rc_idx,
+					dev->common_phy)));
+
+		pcie_phy_dump_test_cntrl(dev, 0x30, 0x31, 0x32, 0x33);
+	}
+
+	pcie_phy_dump_test_cntrl(dev, 0, 0, 0, 0);
+
+	if (dev->phy_ver >= 0x10 && dev->phy_ver < 0x20) {
+		pcie_phy_dump_test_cntrl(dev, 0x01, 0x02, 0x03, 0x0A);
+		pcie_phy_dump_test_cntrl(dev, 0x0E, 0x0F, 0x12, 0x13);
+		pcie_phy_dump_test_cntrl(dev, 0, 0, 0, 0);
+
+		for (i = 0; i < 8; i += 4) {
+			write_val = 0x1 + i;
+			msm_pcie_write_reg(dev->phy,
+				PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(dev->rc_idx,
+					dev->common_phy), write_val);
+			msm_pcie_write_reg(dev->phy,
+				PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(dev->rc_idx,
+					dev->common_phy), write_val + 1);
+			msm_pcie_write_reg(dev->phy,
+				PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(dev->rc_idx,
+					dev->common_phy), write_val + 2);
+			msm_pcie_write_reg(dev->phy,
+				PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(dev->rc_idx,
+					dev->common_phy), write_val + 3);
+
+			PCIE_DUMP(dev,
+				"PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX: 0x%x\n",
+				dev->rc_idx,
+				readl_relaxed(dev->phy +
+					PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(
+						dev->rc_idx, dev->common_phy)));
+			PCIE_DUMP(dev,
+				"PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX: 0x%x\n",
+				dev->rc_idx,
+				readl_relaxed(dev->phy +
+					PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(
+						dev->rc_idx, dev->common_phy)));
+			PCIE_DUMP(dev,
+				"PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX: 0x%x\n",
+				dev->rc_idx,
+				readl_relaxed(dev->phy +
+					PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(
+						dev->rc_idx, dev->common_phy)));
+			PCIE_DUMP(dev,
+				"PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX: 0x%x\n",
+				dev->rc_idx,
+				readl_relaxed(dev->phy +
+					PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(
+						dev->rc_idx, dev->common_phy)));
+			PCIE_DUMP(dev,
+				"PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_0_STATUS: 0x%x\n",
+				dev->rc_idx,
+				readl_relaxed(dev->phy +
+					PCIE_MISC_N_DEBUG_BUS_0_STATUS(
+						dev->rc_idx, dev->common_phy)));
+			PCIE_DUMP(dev,
+				"PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_1_STATUS: 0x%x\n",
+				dev->rc_idx,
+				readl_relaxed(dev->phy +
+					PCIE_MISC_N_DEBUG_BUS_1_STATUS(
+						dev->rc_idx, dev->common_phy)));
+			PCIE_DUMP(dev,
+				"PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_2_STATUS: 0x%x\n",
+				dev->rc_idx,
+				readl_relaxed(dev->phy +
+					PCIE_MISC_N_DEBUG_BUS_2_STATUS(
+						dev->rc_idx, dev->common_phy)));
+			PCIE_DUMP(dev,
+				"PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_3_STATUS: 0x%x\n",
+				dev->rc_idx,
+				readl_relaxed(dev->phy +
+					PCIE_MISC_N_DEBUG_BUS_3_STATUS(
+						dev->rc_idx, dev->common_phy)));
+		}
+
+		msm_pcie_write_reg(dev->phy,
+			PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(
+				dev->rc_idx, dev->common_phy), 0);
+		msm_pcie_write_reg(dev->phy,
+			PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(
+				dev->rc_idx, dev->common_phy), 0);
+		msm_pcie_write_reg(dev->phy,
+			PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(
+				dev->rc_idx, dev->common_phy), 0);
+		msm_pcie_write_reg(dev->phy,
+			PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(
+				dev->rc_idx, dev->common_phy), 0);
+	}
+
+	for (i = 0; i < 2; i++) {
+		write_val = 0x2 + i;
+
+		msm_pcie_write_reg(dev->phy, QSERDES_COM_DEBUG_BUS_SEL,
+			write_val);
+
+		PCIE_DUMP(dev,
+			"PCIe: RC%d to QSERDES_COM_DEBUG_BUS_SEL: 0x%x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS_SEL));
+		PCIE_DUMP(dev,
+			"PCIe: RC%d QSERDES_COM_DEBUG_BUS0: 0x%x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS0));
+		PCIE_DUMP(dev,
+			"PCIe: RC%d QSERDES_COM_DEBUG_BUS1: 0x%x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS1));
+		PCIE_DUMP(dev,
+			"PCIe: RC%d QSERDES_COM_DEBUG_BUS2: 0x%x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS2));
+		PCIE_DUMP(dev,
+			"PCIe: RC%d QSERDES_COM_DEBUG_BUS3: 0x%x\n\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS3));
+	}
+
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_DEBUG_BUS_SEL, 0);
+
+	if (dev->common_phy) {
+		msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE0_INDEX,
+			0x01);
+		msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE1_INDEX,
+			0x02);
+		msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE2_INDEX,
+			0x03);
+		msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE3_INDEX,
+			0x04);
+
+		PCIE_DUMP(dev,
+			"PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE0_INDEX: 0x%x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy +
+				PCIE_COM_DEBUG_BUS_BYTE0_INDEX));
+		PCIE_DUMP(dev,
+			"PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE1_INDEX: 0x%x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy +
+				PCIE_COM_DEBUG_BUS_BYTE1_INDEX));
+		PCIE_DUMP(dev,
+			"PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE2_INDEX: 0x%x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy +
+				PCIE_COM_DEBUG_BUS_BYTE2_INDEX));
+		PCIE_DUMP(dev,
+			"PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE3_INDEX: 0x%x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy +
+				PCIE_COM_DEBUG_BUS_BYTE3_INDEX));
+		PCIE_DUMP(dev,
+			"PCIe: RC%d PCIE_COM_DEBUG_BUS_0_STATUS: 0x%x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy +
+				PCIE_COM_DEBUG_BUS_0_STATUS));
+		PCIE_DUMP(dev,
+			"PCIe: RC%d PCIE_COM_DEBUG_BUS_1_STATUS: 0x%x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy +
+				PCIE_COM_DEBUG_BUS_1_STATUS));
+		PCIE_DUMP(dev,
+			"PCIe: RC%d PCIE_COM_DEBUG_BUS_2_STATUS: 0x%x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy +
+				PCIE_COM_DEBUG_BUS_2_STATUS));
+		PCIE_DUMP(dev,
+			"PCIe: RC%d PCIE_COM_DEBUG_BUS_3_STATUS: 0x%x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy +
+				PCIE_COM_DEBUG_BUS_3_STATUS));
+
+		msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE0_INDEX,
+			0x05);
+
+		PCIE_DUMP(dev,
+			"PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE0_INDEX: 0x%x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy +
+				PCIE_COM_DEBUG_BUS_BYTE0_INDEX));
+		PCIE_DUMP(dev,
+			"PCIe: RC%d PCIE_COM_DEBUG_BUS_0_STATUS: 0x%x\n\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy +
+				PCIE_COM_DEBUG_BUS_0_STATUS));
+	}
+
+	size = resource_size(dev->res[MSM_PCIE_RES_PHY].resource);
+	for (i = 0; i < size; i += 32) {
+		PCIE_DUMP(dev,
+			"PCIe PHY of RC%d: 0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+			dev->rc_idx, i,
+			readl_relaxed(dev->phy + i),
+			readl_relaxed(dev->phy + (i + 4)),
+			readl_relaxed(dev->phy + (i + 8)),
+			readl_relaxed(dev->phy + (i + 12)),
+			readl_relaxed(dev->phy + (i + 16)),
+			readl_relaxed(dev->phy + (i + 20)),
+			readl_relaxed(dev->phy + (i + 24)),
+			readl_relaxed(dev->phy + (i + 28)));
+	}
+}
+
+#ifdef CONFIG_ARCH_MDMCALIFORNIUM
+static void pcie_phy_init(struct msm_pcie_dev_t *dev)
+{
+	u8 common_phy;
+
+	PCIE_DBG(dev,
+		"RC%d: Initializing MDM 14nm QMP phy - 19.2MHz with Common Mode Clock (SSC ON)\n",
+		dev->rc_idx);
+
+	if (dev->common_phy)
+		common_phy = 1;
+	else
+		common_phy = 0;
+
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_SW_RESET(dev->rc_idx, common_phy),
+		0x01);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, common_phy),
+		0x03);
+
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x18);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_ENABLE1, 0x10);
+
+	msm_pcie_write_reg(dev->phy,
+			QSERDES_TX_N_LANE_MODE(dev->rc_idx, common_phy), 0x06);
+
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP_EN, 0x01);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_MAP, 0x00);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER1, 0xFF);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER2, 0x1F);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TRIM, 0x0F);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_IVCO, 0x0F);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_HSCLK_SEL, 0x00);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CORE_CLK_EN, 0x20);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CORECLK_DIV, 0x0A);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TIMER, 0x09);
+
+	if (dev->tcsr) {
+		PCIE_DBG(dev, "RC%d: TCSR PHY clock scheme is 0x%x\n",
+			dev->rc_idx, readl_relaxed(dev->tcsr));
+
+		if (readl_relaxed(dev->tcsr) & (BIT(1) | BIT(0)))
+			msm_pcie_write_reg(dev->phy,
+					QSERDES_COM_SYSCLK_EN_SEL, 0x0A);
+		else
+			msm_pcie_write_reg(dev->phy,
+					QSERDES_COM_SYSCLK_EN_SEL, 0x04);
+	}
+
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_DEC_START_MODE0, 0x82);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP2_MODE0, 0x0D);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP1_MODE0, 0x04);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_SELECT, 0x33);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SYS_CLK_CTRL, 0x02);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1F);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CP_CTRL_MODE0, 0x0B);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_CCTRL_MODE0, 0x28);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80);
+
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_EN_CENTER, 0x01);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER1, 0x31);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER2, 0x01);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER1, 0x02);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER2, 0x00);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE1, 0x2f);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE2, 0x19);
+
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_TX_N_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN(dev->rc_idx,
+		common_phy), 0x45);
+
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CMN_CONFIG, 0x06);
+
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_TX_N_RES_CODE_LANE_OFFSET(dev->rc_idx, common_phy),
+		0x02);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_TX_N_RCV_DETECT_LVL_2(dev->rc_idx, common_phy),
+		0x12);
+
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_SIGDET_ENABLES(dev->rc_idx, common_phy),
+		0x1C);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_SIGDET_DEGLITCH_CNTRL(dev->rc_idx, common_phy),
+		0x14);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL2(dev->rc_idx, common_phy),
+		0x01);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL3(dev->rc_idx, common_phy),
+		0x00);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL4(dev->rc_idx, common_phy),
+		0xDB);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_UCDR_SO_SATURATION_AND_ENABLE(dev->rc_idx,
+		common_phy),
+		0x4B);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_UCDR_SO_GAIN(dev->rc_idx, common_phy),
+		0x04);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_UCDR_SO_GAIN_HALF(dev->rc_idx, common_phy),
+		0x04);
+
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_EP_DIV, 0x19);
+
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_ENDPOINT_REFCLK_DRIVE(dev->rc_idx, common_phy),
+		0x04);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_OSC_DTCT_ACTIONS(dev->rc_idx, common_phy),
+		0x00);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_PWRUP_RESET_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
+		0x40);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB(dev->rc_idx, common_phy),
+		0x00);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB(dev->rc_idx, common_phy),
+		0x40);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK_MSB(dev->rc_idx, common_phy),
+		0x00);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
+		0x40);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_PLL_LOCK_CHK_DLY_TIME(dev->rc_idx, common_phy),
+		0x73);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_SIGDET_LVL(dev->rc_idx, common_phy),
+		0x99);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_TXDEEMPH_M6DB_V0(dev->rc_idx, common_phy),
+		0x15);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_TXDEEMPH_M3P5DB_V0(dev->rc_idx, common_phy),
+		0x0E);
+
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_SIGDET_CNTRL(dev->rc_idx, common_phy),
+		0x07);
+
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_SW_RESET(dev->rc_idx, common_phy),
+		0x00);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_START_CONTROL(dev->rc_idx, common_phy),
+		0x03);
+}
+
+static void pcie_pcs_port_phy_init(struct msm_pcie_dev_t *dev)
+{
+}
+
+static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
+{
+	if (readl_relaxed(dev->phy +
+		PCIE_N_PCS_STATUS(dev->rc_idx, dev->common_phy)) & BIT(6))
+		return false;
+	else
+		return true;
+}
+#else
+static void pcie_phy_init(struct msm_pcie_dev_t *dev)
+{
+	int i;
+	struct msm_pcie_phy_info_t *phy_seq;
+
+	PCIE_DBG(dev,
+		"RC%d: Initializing 14nm QMP phy - 19.2MHz with Common Mode Clock (SSC ON)\n",
+		dev->rc_idx);
+
+	if (dev->phy_sequence) {
+		i =  dev->phy_len;
+		phy_seq = dev->phy_sequence;
+		while (i--) {
+			msm_pcie_write_reg(dev->phy,
+				phy_seq->offset,
+				phy_seq->val);
+			if (phy_seq->delay)
+				usleep_range(phy_seq->delay,
+					phy_seq->delay + 1);
+			phy_seq++;
+		}
+		return;
+	}
+
+	if (dev->common_phy)
+		msm_pcie_write_reg(dev->phy, PCIE_COM_POWER_DOWN_CONTROL, 0x01);
+
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1C);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_ENABLE1, 0x10);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_SELECT, 0x33);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CMN_CONFIG, 0x06);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP_EN, 0x42);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_MAP, 0x00);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER1, 0xFF);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER2, 0x1F);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_HSCLK_SEL, 0x01);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CORE_CLK_EN, 0x00);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CORECLK_DIV, 0x0A);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TIMER, 0x09);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_DEC_START_MODE0, 0x82);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP2_MODE0, 0x1A);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP1_MODE0, 0x0A);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_SELECT, 0x33);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SYS_CLK_CTRL, 0x02);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1F);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_EN_SEL, 0x04);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CP_CTRL_MODE0, 0x0B);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_CCTRL_MODE0, 0x28);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_EN_CENTER, 0x01);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER1, 0x31);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER2, 0x01);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER1, 0x02);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER2, 0x00);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE1, 0x2f);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE2, 0x19);
+
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_RESCODE_DIV_NUM, 0x15);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TRIM, 0x0F);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_IVCO, 0x0F);
+
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_EP_DIV, 0x19);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_ENABLE1, 0x10);
+
+	if (dev->phy_ver == 0x3) {
+		msm_pcie_write_reg(dev->phy, QSERDES_COM_HSCLK_SEL, 0x00);
+		msm_pcie_write_reg(dev->phy, QSERDES_COM_RESCODE_DIV_NUM, 0x40);
+	}
+
+	if (dev->common_phy) {
+		msm_pcie_write_reg(dev->phy, PCIE_COM_SW_RESET, 0x00);
+		msm_pcie_write_reg(dev->phy, PCIE_COM_START_CONTROL, 0x03);
+	}
+}
+
+static void pcie_pcs_port_phy_init(struct msm_pcie_dev_t *dev)
+{
+	int i;
+	struct msm_pcie_phy_info_t *phy_seq;
+	u8 common_phy;
+
+	if (dev->phy_ver >= 0x20)
+		return;
+
+	PCIE_DBG(dev, "RC%d: Initializing PCIe PHY Port\n", dev->rc_idx);
+
+	if (dev->common_phy)
+		common_phy = 1;
+	else
+		common_phy = 0;
+
+	if (dev->port_phy_sequence) {
+		i =  dev->port_phy_len;
+		phy_seq = dev->port_phy_sequence;
+		while (i--) {
+			msm_pcie_write_reg(dev->phy,
+				phy_seq->offset,
+				phy_seq->val);
+			if (phy_seq->delay)
+				usleep_range(phy_seq->delay,
+					phy_seq->delay + 1);
+			phy_seq++;
+		}
+		return;
+	}
+
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_TX_N_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN(dev->rc_idx,
+		common_phy), 0x45);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_TX_N_LANE_MODE(dev->rc_idx, common_phy),
+		0x06);
+
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_SIGDET_ENABLES(dev->rc_idx, common_phy),
+		0x1C);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_SIGDET_LVL(dev->rc_idx, common_phy),
+		0x17);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL2(dev->rc_idx, common_phy),
+		0x01);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL3(dev->rc_idx, common_phy),
+		0x00);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL4(dev->rc_idx, common_phy),
+		0xDB);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_RX_BAND(dev->rc_idx, common_phy),
+		0x18);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_UCDR_SO_GAIN(dev->rc_idx, common_phy),
+		0x04);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_UCDR_SO_GAIN_HALF(dev->rc_idx, common_phy),
+		0x04);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_RX_IDLE_DTCT_CNTRL(dev->rc_idx, common_phy),
+		0x4C);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_PWRUP_RESET_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
+		0x00);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
+		0x01);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_PLL_LOCK_CHK_DLY_TIME(dev->rc_idx, common_phy),
+		0x05);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_UCDR_SO_SATURATION_AND_ENABLE(dev->rc_idx,
+		common_phy), 0x4B);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_SIGDET_DEGLITCH_CNTRL(dev->rc_idx, common_phy),
+		0x14);
+
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_ENDPOINT_REFCLK_DRIVE(dev->rc_idx, common_phy),
+		0x05);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, common_phy),
+		0x02);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_POWER_STATE_CONFIG4(dev->rc_idx, common_phy),
+		0x00);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_POWER_STATE_CONFIG1(dev->rc_idx, common_phy),
+		0xA3);
+
+	if (dev->phy_ver == 0x3) {
+		msm_pcie_write_reg(dev->phy,
+			QSERDES_RX_N_SIGDET_LVL(dev->rc_idx, common_phy),
+			0x19);
+
+		msm_pcie_write_reg(dev->phy,
+			PCIE_N_TXDEEMPH_M3P5DB_V0(dev->rc_idx, common_phy),
+			0x0E);
+	}
+
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, common_phy),
+		0x03);
+	usleep_range(POWER_DOWN_DELAY_US_MIN, POWER_DOWN_DELAY_US_MAX);
+
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_SW_RESET(dev->rc_idx, common_phy),
+		0x00);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_START_CONTROL(dev->rc_idx, common_phy),
+		0x0A);
+}
+
+static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
+{
+	if (dev->phy_ver >= 0x20) {
+		if (readl_relaxed(dev->phy +
+			PCIE_N_PCS_STATUS(dev->rc_idx, dev->common_phy)) &
+					BIT(6))
+			return false;
+		else
+			return true;
+	}
+
+	if (!(readl_relaxed(dev->phy + PCIE_COM_PCS_READY_STATUS) & 0x1))
+		return false;
+	else
+		return true;
+}
+#endif
+#endif
+
+static int msm_pcie_restore_sec_config(struct msm_pcie_dev_t *dev)
+{
+	int ret;
+	u64 scm_ret;
+
+	if (!dev) {
+		pr_err("PCIe: the input pcie dev is NULL.\n");
+		return -ENODEV;
+	}
+
+	ret = scm_restore_sec_cfg(dev->scm_dev_id, 0, &scm_ret);
+	if (ret || scm_ret) {
+		PCIE_ERR(dev,
+			"PCIe: RC%d failed(%d) to restore sec config, scm_ret=%llu\n",
+			dev->rc_idx, ret, scm_ret);
+		return ret ? ret : -EINVAL;
+	}
+
+	return 0;
+}
+
+static inline int msm_pcie_check_align(struct msm_pcie_dev_t *dev,
+						u32 offset)
+{
+	if (offset % 4) {
+		PCIE_ERR(dev,
+			"PCIe: RC%d: offset 0x%x is not correctly aligned\n",
+			dev->rc_idx, offset);
+		return MSM_PCIE_ERROR;
+	}
+
+	return 0;
+}
+
+static bool msm_pcie_confirm_linkup(struct msm_pcie_dev_t *dev,
+						bool check_sw_stts,
+						bool check_ep,
+						void __iomem *ep_conf)
+{
+	u32 val;
+
+	if (check_sw_stts && (dev->link_status != MSM_PCIE_LINK_ENABLED)) {
+		PCIE_DBG(dev, "PCIe: The link of RC %d is not enabled.\n",
+			dev->rc_idx);
+		return false;
+	}
+
+	if (!(readl_relaxed(dev->dm_core + 0x80) & BIT(29))) {
+		PCIE_DBG(dev, "PCIe: The link of RC %d is not up.\n",
+			dev->rc_idx);
+		return false;
+	}
+
+	val = readl_relaxed(dev->dm_core);
+	PCIE_DBG(dev, "PCIe: device ID and vender ID of RC %d are 0x%x.\n",
+		dev->rc_idx, val);
+	if (val == PCIE_LINK_DOWN) {
+		PCIE_ERR(dev,
+			"PCIe: The link of RC %d is not really up; device ID and vender ID of RC %d are 0x%x.\n",
+			dev->rc_idx, dev->rc_idx, val);
+		return false;
+	}
+
+	if (check_ep) {
+		val = readl_relaxed(ep_conf);
+		PCIE_DBG(dev,
+			"PCIe: device ID and vender ID of EP of RC %d are 0x%x.\n",
+			dev->rc_idx, val);
+		if (val == PCIE_LINK_DOWN) {
+			PCIE_ERR(dev,
+				"PCIe: The link of RC %d is not really up; device ID and vender ID of EP of RC %d are 0x%x.\n",
+				dev->rc_idx, dev->rc_idx, val);
+			return false;
+		}
+	}
+
+	return true;
+}
+
+static void msm_pcie_cfg_recover(struct msm_pcie_dev_t *dev, bool rc)
+{
+	int i, j;
+	u32 val = 0;
+	u32 *shadow;
+	void *cfg = dev->conf;
+
+	for (i = 0; i < MAX_DEVICE_NUM; i++) {
+		if (!rc && !dev->pcidev_table[i].bdf)
+			break;
+		if (rc) {
+			cfg = dev->dm_core;
+			shadow = dev->rc_shadow;
+		} else {
+			if (!msm_pcie_confirm_linkup(dev, false, true,
+				dev->pcidev_table[i].conf_base))
+				continue;
+
+			shadow = dev->ep_shadow[i];
+			PCIE_DBG(dev,
+				"PCIe Device: %02x:%02x.%01x\n",
+				dev->pcidev_table[i].bdf >> 24,
+				dev->pcidev_table[i].bdf >> 19 & 0x1f,
+				dev->pcidev_table[i].bdf >> 16 & 0x07);
+		}
+		for (j = PCIE_CONF_SPACE_DW - 1; j >= 0; j--) {
+			val = shadow[j];
+			if (val != PCIE_CLEAR) {
+				PCIE_DBG3(dev,
+					"PCIe: before recovery:cfg 0x%x:0x%x\n",
+					j * 4, readl_relaxed(cfg + j * 4));
+				PCIE_DBG3(dev,
+					"PCIe: shadow_dw[%d]:cfg 0x%x:0x%x\n",
+					j, j * 4, val);
+				writel_relaxed(val, cfg + j * 4);
+				wmb();
+				PCIE_DBG3(dev,
+					"PCIe: after recovery:cfg 0x%x:0x%x\n\n",
+					j * 4, readl_relaxed(cfg + j * 4));
+			}
+		}
+		if (rc)
+			break;
+
+		pci_save_state(dev->pcidev_table[i].dev);
+		cfg += SZ_4K;
+	}
+}
+
+static void msm_pcie_write_mask(void __iomem *addr,
+				uint32_t clear_mask, uint32_t set_mask)
+{
+	uint32_t val;
+
+	val = (readl_relaxed(addr) & ~clear_mask) | set_mask;
+	writel_relaxed(val, addr);
+	wmb();  /* ensure data is written to hardware register */
+}
+
+static void pcie_parf_dump(struct msm_pcie_dev_t *dev)
+{
+	int i, size;
+	u32 original;
+
+	PCIE_DUMP(dev, "PCIe: RC%d PARF testbus\n", dev->rc_idx);
+
+	original = readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL);
+	for (i = 1; i <= 0x1A; i++) {
+		msm_pcie_write_mask(dev->parf + PCIE20_PARF_SYS_CTRL,
+				0xFF0000, i << 16);
+		PCIE_DUMP(dev,
+			"RC%d: PARF_SYS_CTRL: 0%08x PARF_TEST_BUS: 0%08x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL),
+			readl_relaxed(dev->parf + PCIE20_PARF_TEST_BUS));
+	}
+	writel_relaxed(original, dev->parf + PCIE20_PARF_SYS_CTRL);
+
+	PCIE_DUMP(dev, "PCIe: RC%d PARF register dump\n", dev->rc_idx);
+
+	size = resource_size(dev->res[MSM_PCIE_RES_PARF].resource);
+	for (i = 0; i < size; i += 32) {
+		PCIE_DUMP(dev,
+			"RC%d: 0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+			dev->rc_idx, i,
+			readl_relaxed(dev->parf + i),
+			readl_relaxed(dev->parf + (i + 4)),
+			readl_relaxed(dev->parf + (i + 8)),
+			readl_relaxed(dev->parf + (i + 12)),
+			readl_relaxed(dev->parf + (i + 16)),
+			readl_relaxed(dev->parf + (i + 20)),
+			readl_relaxed(dev->parf + (i + 24)),
+			readl_relaxed(dev->parf + (i + 28)));
+	}
+}
+
+static void msm_pcie_show_status(struct msm_pcie_dev_t *dev)
+{
+	PCIE_DBG_FS(dev, "PCIe: RC%d is %s enumerated\n",
+		dev->rc_idx, dev->enumerated ? "" : "not");
+	PCIE_DBG_FS(dev, "PCIe: link is %s\n",
+		(dev->link_status == MSM_PCIE_LINK_ENABLED)
+		? "enabled" : "disabled");
+	PCIE_DBG_FS(dev, "cfg_access is %s allowed\n",
+		dev->cfg_access ? "" : "not");
+	PCIE_DBG_FS(dev, "use_msi is %d\n",
+		dev->use_msi);
+	PCIE_DBG_FS(dev, "use_pinctrl is %d\n",
+		dev->use_pinctrl);
+	PCIE_DBG_FS(dev, "use_19p2mhz_aux_clk is %d\n",
+		dev->use_19p2mhz_aux_clk);
+	PCIE_DBG_FS(dev, "user_suspend is %d\n",
+		dev->user_suspend);
+	PCIE_DBG_FS(dev, "num_ep: %d\n",
+		dev->num_ep);
+	PCIE_DBG_FS(dev, "num_active_ep: %d\n",
+		dev->num_active_ep);
+	PCIE_DBG_FS(dev, "pending_ep_reg: %s\n",
+		dev->pending_ep_reg ? "true" : "false");
+	PCIE_DBG_FS(dev, "phy_len is %d",
+		dev->phy_len);
+	PCIE_DBG_FS(dev, "port_phy_len is %d",
+		dev->port_phy_len);
+	PCIE_DBG_FS(dev, "disable_pc is %d",
+		dev->disable_pc);
+	PCIE_DBG_FS(dev, "l0s_supported is %s supported\n",
+		dev->l0s_supported ? "" : "not");
+	PCIE_DBG_FS(dev, "l1_supported is %s supported\n",
+		dev->l1_supported ? "" : "not");
+	PCIE_DBG_FS(dev, "l1ss_supported is %s supported\n",
+		dev->l1ss_supported ? "" : "not");
+	PCIE_DBG_FS(dev, "common_clk_en is %d\n",
+		dev->common_clk_en);
+	PCIE_DBG_FS(dev, "clk_power_manage_en is %d\n",
+		dev->clk_power_manage_en);
+	PCIE_DBG_FS(dev, "aux_clk_sync is %d\n",
+		dev->aux_clk_sync);
+	PCIE_DBG_FS(dev, "AER is %s enable\n",
+		dev->aer_enable ? "" : "not");
+	PCIE_DBG_FS(dev, "ext_ref_clk is %d\n",
+		dev->ext_ref_clk);
+	PCIE_DBG_FS(dev, "boot_option is 0x%x\n",
+		dev->boot_option);
+	PCIE_DBG_FS(dev, "phy_ver is %d\n",
+		dev->phy_ver);
+	PCIE_DBG_FS(dev, "drv_ready is %d\n",
+		dev->drv_ready);
+	PCIE_DBG_FS(dev, "linkdown_panic is %d\n",
+		dev->linkdown_panic);
+	PCIE_DBG_FS(dev, "the link is %s suspending\n",
+		dev->suspending ? "" : "not");
+	PCIE_DBG_FS(dev, "shadow is %s enabled\n",
+		dev->shadow_en ? "" : "not");
+	PCIE_DBG_FS(dev, "the power of RC is %s on\n",
+		dev->power_on ? "" : "not");
+	PCIE_DBG_FS(dev, "msi_gicm_addr: 0x%x\n",
+		dev->msi_gicm_addr);
+	PCIE_DBG_FS(dev, "msi_gicm_base: 0x%x\n",
+		dev->msi_gicm_base);
+	PCIE_DBG_FS(dev, "bus_client: %d\n",
+		dev->bus_client);
+	PCIE_DBG_FS(dev, "current short bdf: %d\n",
+		dev->current_short_bdf);
+	PCIE_DBG_FS(dev, "smmu does %s exist\n",
+		dev->smmu_exist ? "" : "not");
+	PCIE_DBG_FS(dev, "smmu_sid_base: 0x%x\n",
+		dev->smmu_sid_base);
+	PCIE_DBG_FS(dev, "n_fts: %d\n",
+		dev->n_fts);
+	PCIE_DBG_FS(dev, "common_phy: %d\n",
+		dev->common_phy);
+	PCIE_DBG_FS(dev, "ep_latency: %dms\n",
+		dev->ep_latency);
+	PCIE_DBG_FS(dev, "switch_latency: %dms\n",
+		dev->switch_latency);
+	PCIE_DBG_FS(dev, "wr_halt_size: 0x%x\n",
+		dev->wr_halt_size);
+	PCIE_DBG_FS(dev, "cpl_timeout: 0x%x\n",
+		dev->cpl_timeout);
+	PCIE_DBG_FS(dev, "current_bdf: 0x%x\n",
+		dev->current_bdf);
+	PCIE_DBG_FS(dev, "perst_delay_us_min: %dus\n",
+		dev->perst_delay_us_min);
+	PCIE_DBG_FS(dev, "perst_delay_us_max: %dus\n",
+		dev->perst_delay_us_max);
+	PCIE_DBG_FS(dev, "tlp_rd_size: 0x%x\n",
+		dev->tlp_rd_size);
+	PCIE_DBG_FS(dev, "rc_corr_counter: %lu\n",
+		dev->rc_corr_counter);
+	PCIE_DBG_FS(dev, "rc_non_fatal_counter: %lu\n",
+		dev->rc_non_fatal_counter);
+	PCIE_DBG_FS(dev, "rc_fatal_counter: %lu\n",
+		dev->rc_fatal_counter);
+	PCIE_DBG_FS(dev, "ep_corr_counter: %lu\n",
+		dev->ep_corr_counter);
+	PCIE_DBG_FS(dev, "ep_non_fatal_counter: %lu\n",
+		dev->ep_non_fatal_counter);
+	PCIE_DBG_FS(dev, "ep_fatal_counter: %lu\n",
+		dev->ep_fatal_counter);
+	PCIE_DBG_FS(dev, "linkdown_counter: %lu\n",
+		dev->linkdown_counter);
+	PCIE_DBG_FS(dev, "wake_counter: %lu\n",
+		dev->wake_counter);
+	PCIE_DBG_FS(dev, "link_turned_on_counter: %lu\n",
+		dev->link_turned_on_counter);
+	PCIE_DBG_FS(dev, "link_turned_off_counter: %lu\n",
+		dev->link_turned_off_counter);
+}
+
+static void msm_pcie_shadow_dump(struct msm_pcie_dev_t *dev, bool rc)
+{
+	int i, j;
+	u32 val = 0;
+	u32 *shadow;
+
+	for (i = 0; i < MAX_DEVICE_NUM; i++) {
+		if (!rc && !dev->pcidev_table[i].bdf)
+			break;
+		if (rc) {
+			shadow = dev->rc_shadow;
+		} else {
+			shadow = dev->ep_shadow[i];
+			PCIE_DBG_FS(dev, "PCIe Device: %02x:%02x.%01x\n",
+				dev->pcidev_table[i].bdf >> 24,
+				dev->pcidev_table[i].bdf >> 19 & 0x1f,
+				dev->pcidev_table[i].bdf >> 16 & 0x07);
+		}
+		for (j = 0; j < PCIE_CONF_SPACE_DW; j++) {
+			val = shadow[j];
+			if (val != PCIE_CLEAR) {
+				PCIE_DBG_FS(dev,
+					"PCIe: shadow_dw[%d]:cfg 0x%x:0x%x\n",
+					j, j * 4, val);
+			}
+		}
+		if (rc)
+			break;
+	}
+}
+
+static void msm_pcie_sel_debug_testcase(struct msm_pcie_dev_t *dev,
+					u32 testcase)
+{
+	int ret, i;
+	u32 base_sel_size = 0;
+	u32 val = 0;
+	u32 current_offset = 0;
+	u32 ep_l1sub_ctrl1_offset = 0;
+	u32 ep_l1sub_cap_reg1_offset = 0;
+	u32 ep_link_ctrlstts_offset = 0;
+	u32 ep_dev_ctrl2stts2_offset = 0;
+
+	if (testcase >= 5 && testcase <= 10) {
+		current_offset =
+			readl_relaxed(dev->conf + PCIE_CAP_PTR_OFFSET) & 0xff;
+
+		while (current_offset) {
+			val = readl_relaxed(dev->conf + current_offset);
+			if ((val & 0xff) == PCIE20_CAP_ID) {
+				ep_link_ctrlstts_offset = current_offset +
+								0x10;
+				ep_dev_ctrl2stts2_offset = current_offset +
+								0x28;
+				break;
+			}
+			current_offset = (val >> 8) & 0xff;
+		}
+
+		if (!ep_link_ctrlstts_offset)
+			PCIE_DBG(dev,
+				"RC%d endpoint does not support PCIe capability registers\n",
+				dev->rc_idx);
+		else
+			PCIE_DBG(dev,
+				"RC%d: ep_link_ctrlstts_offset: 0x%x\n",
+				dev->rc_idx, ep_link_ctrlstts_offset);
+	}
+
+	switch (testcase) {
+	case 0: /* output status */
+		PCIE_DBG_FS(dev, "\n\nPCIe: Status for RC%d:\n",
+			dev->rc_idx);
+		msm_pcie_show_status(dev);
+		break;
+	case 1: /* disable link */
+		PCIE_DBG_FS(dev,
+			"\n\nPCIe: RC%d: disable link\n\n", dev->rc_idx);
+		ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, 0,
+			dev->dev, NULL,
+			MSM_PCIE_CONFIG_NO_CFG_RESTORE);
+		if (ret)
+			PCIE_DBG_FS(dev, "PCIe:%s:failed to disable link\n",
+				__func__);
+		else
+			PCIE_DBG_FS(dev, "PCIe:%s:disabled link\n",
+				__func__);
+		break;
+	case 2: /* enable link and recover config space for RC and EP */
+		PCIE_DBG_FS(dev,
+			"\n\nPCIe: RC%d: enable link and recover config space\n\n",
+			dev->rc_idx);
+		ret = msm_pcie_pm_control(MSM_PCIE_RESUME, 0,
+			dev->dev, NULL,
+			MSM_PCIE_CONFIG_NO_CFG_RESTORE);
+		if (ret)
+			PCIE_DBG_FS(dev, "PCIe:%s:failed to enable link\n",
+				__func__);
+		else {
+			PCIE_DBG_FS(dev, "PCIe:%s:enabled link\n", __func__);
+			msm_pcie_recover_config(dev->dev);
+		}
+		break;
+	case 3: /*
+		 * disable and enable link, recover config space for
+		 * RC and EP
+		 */
+		PCIE_DBG_FS(dev,
+			"\n\nPCIe: RC%d: disable and enable link then recover config space\n\n",
+			dev->rc_idx);
+		ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, 0,
+			dev->dev, NULL,
+			MSM_PCIE_CONFIG_NO_CFG_RESTORE);
+		if (ret)
+			PCIE_DBG_FS(dev, "PCIe:%s:failed to disable link\n",
+				__func__);
+		else
+			PCIE_DBG_FS(dev, "PCIe:%s:disabled link\n", __func__);
+		ret = msm_pcie_pm_control(MSM_PCIE_RESUME, 0,
+			dev->dev, NULL,
+			MSM_PCIE_CONFIG_NO_CFG_RESTORE);
+		if (ret)
+			PCIE_DBG_FS(dev, "PCIe:%s:failed to enable link\n",
+				__func__);
+		else {
+			PCIE_DBG_FS(dev, "PCIe:%s:enabled link\n", __func__);
+			msm_pcie_recover_config(dev->dev);
+		}
+		break;
+	case 4: /* dump shadow registers for RC and EP */
+		PCIE_DBG_FS(dev,
+			"\n\nPCIe: RC%d: dumping RC shadow registers\n",
+			dev->rc_idx);
+		msm_pcie_shadow_dump(dev, true);
+
+		PCIE_DBG_FS(dev,
+			"\n\nPCIe: RC%d: dumping EP shadow registers\n",
+			dev->rc_idx);
+		msm_pcie_shadow_dump(dev, false);
+		break;
+	case 5: /* disable L0s */
+		PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L0s\n\n",
+			dev->rc_idx);
+		msm_pcie_write_mask(dev->dm_core +
+				PCIE20_CAP_LINKCTRLSTATUS,
+				BIT(0), 0);
+		msm_pcie_write_mask(dev->conf +
+				ep_link_ctrlstts_offset,
+				BIT(0), 0);
+		if (dev->shadow_en) {
+			dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
+					readl_relaxed(dev->dm_core +
+					PCIE20_CAP_LINKCTRLSTATUS);
+			dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
+					readl_relaxed(dev->conf +
+					ep_link_ctrlstts_offset);
+		}
+		PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
+			readl_relaxed(dev->dm_core +
+			PCIE20_CAP_LINKCTRLSTATUS));
+		PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
+			readl_relaxed(dev->conf +
+			ep_link_ctrlstts_offset));
+		break;
+	case 6: /* enable L0s */
+		PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L0s\n\n",
+			dev->rc_idx);
+		msm_pcie_write_mask(dev->dm_core +
+				PCIE20_CAP_LINKCTRLSTATUS,
+				0, BIT(0));
+		msm_pcie_write_mask(dev->conf +
+				ep_link_ctrlstts_offset,
+				0, BIT(0));
+		if (dev->shadow_en) {
+			dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
+					readl_relaxed(dev->dm_core +
+					PCIE20_CAP_LINKCTRLSTATUS);
+			dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
+					readl_relaxed(dev->conf +
+					ep_link_ctrlstts_offset);
+		}
+		PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
+			readl_relaxed(dev->dm_core +
+			PCIE20_CAP_LINKCTRLSTATUS));
+		PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
+			readl_relaxed(dev->conf +
+			ep_link_ctrlstts_offset));
+		break;
+	case 7: /* disable L1 */
+		PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L1\n\n",
+			dev->rc_idx);
+		msm_pcie_write_mask(dev->dm_core +
+				PCIE20_CAP_LINKCTRLSTATUS,
+				BIT(1), 0);
+		msm_pcie_write_mask(dev->conf +
+				ep_link_ctrlstts_offset,
+				BIT(1), 0);
+		if (dev->shadow_en) {
+			dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
+					readl_relaxed(dev->dm_core +
+					PCIE20_CAP_LINKCTRLSTATUS);
+			dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
+					readl_relaxed(dev->conf +
+					ep_link_ctrlstts_offset);
+		}
+		PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
+			readl_relaxed(dev->dm_core +
+			PCIE20_CAP_LINKCTRLSTATUS));
+		PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
+			readl_relaxed(dev->conf +
+			ep_link_ctrlstts_offset));
+		break;
+	case 8: /* enable L1 */
+		PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L1\n\n",
+			dev->rc_idx);
+		msm_pcie_write_mask(dev->dm_core +
+				PCIE20_CAP_LINKCTRLSTATUS,
+				0, BIT(1));
+		msm_pcie_write_mask(dev->conf +
+				ep_link_ctrlstts_offset,
+				0, BIT(1));
+		if (dev->shadow_en) {
+			dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
+					readl_relaxed(dev->dm_core +
+					PCIE20_CAP_LINKCTRLSTATUS);
+			dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
+					readl_relaxed(dev->conf +
+					ep_link_ctrlstts_offset);
+		}
+		PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
+			readl_relaxed(dev->dm_core +
+			PCIE20_CAP_LINKCTRLSTATUS));
+		PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
+			readl_relaxed(dev->conf +
+			ep_link_ctrlstts_offset));
+		break;
+	case 9: /* disable L1ss */
+		PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L1ss\n\n",
+			dev->rc_idx);
+		current_offset = PCIE_EXT_CAP_OFFSET;
+		while (current_offset) {
+			val = readl_relaxed(dev->conf + current_offset);
+			if ((val & 0xffff) == L1SUB_CAP_ID) {
+				ep_l1sub_ctrl1_offset =
+						current_offset + 0x8;
+				break;
+			}
+			current_offset = val >> 20;
+		}
+		if (!ep_l1sub_ctrl1_offset) {
+			PCIE_DBG_FS(dev,
+				"PCIe: RC%d endpoint does not support l1ss registers\n",
+				dev->rc_idx);
+			break;
+		}
+
+		PCIE_DBG_FS(dev, "PCIe: RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
+				dev->rc_idx, ep_l1sub_ctrl1_offset);
+
+		msm_pcie_write_reg_field(dev->dm_core,
+					PCIE20_L1SUB_CONTROL1,
+					0xf, 0);
+		msm_pcie_write_mask(dev->dm_core +
+					PCIE20_DEVICE_CONTROL2_STATUS2,
+					BIT(10), 0);
+		msm_pcie_write_reg_field(dev->conf,
+					ep_l1sub_ctrl1_offset,
+					0xf, 0);
+		msm_pcie_write_mask(dev->conf +
+					ep_dev_ctrl2stts2_offset,
+					BIT(10), 0);
+		if (dev->shadow_en) {
+			dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
+				readl_relaxed(dev->dm_core +
+				PCIE20_L1SUB_CONTROL1);
+			dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
+				readl_relaxed(dev->dm_core +
+				PCIE20_DEVICE_CONTROL2_STATUS2);
+			dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
+				readl_relaxed(dev->conf +
+				ep_l1sub_ctrl1_offset);
+			dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
+				readl_relaxed(dev->conf +
+				ep_dev_ctrl2stts2_offset);
+		}
+		PCIE_DBG_FS(dev, "PCIe: RC's L1SUB_CONTROL1:0x%x\n",
+			readl_relaxed(dev->dm_core +
+			PCIE20_L1SUB_CONTROL1));
+		PCIE_DBG_FS(dev, "PCIe: RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
+			readl_relaxed(dev->dm_core +
+			PCIE20_DEVICE_CONTROL2_STATUS2));
+		PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CONTROL1:0x%x\n",
+			readl_relaxed(dev->conf +
+			ep_l1sub_ctrl1_offset));
+		PCIE_DBG_FS(dev, "PCIe: EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
+			readl_relaxed(dev->conf +
+			ep_dev_ctrl2stts2_offset));
+		break;
+	case 10: /* enable L1ss */
+		PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L1ss\n\n",
+			dev->rc_idx);
+		current_offset = PCIE_EXT_CAP_OFFSET;
+		while (current_offset) {
+			val = readl_relaxed(dev->conf + current_offset);
+			if ((val & 0xffff) == L1SUB_CAP_ID) {
+				ep_l1sub_cap_reg1_offset =
+						current_offset + 0x4;
+				ep_l1sub_ctrl1_offset =
+						current_offset + 0x8;
+				break;
+			}
+			current_offset = val >> 20;
+		}
+		if (!ep_l1sub_ctrl1_offset) {
+			PCIE_DBG_FS(dev,
+				"PCIe: RC%d endpoint does not support l1ss registers\n",
+				dev->rc_idx);
+			break;
+		}
+
+		val = readl_relaxed(dev->conf +
+				ep_l1sub_cap_reg1_offset);
+
+		PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CAPABILITY_REG_1: 0x%x\n",
+			val);
+		PCIE_DBG_FS(dev, "PCIe: RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
+			dev->rc_idx, ep_l1sub_ctrl1_offset);
+
+		val &= 0xf;
+
+		msm_pcie_write_reg_field(dev->dm_core,
+					PCIE20_L1SUB_CONTROL1,
+					0xf, val);
+		msm_pcie_write_mask(dev->dm_core +
+					PCIE20_DEVICE_CONTROL2_STATUS2,
+					0, BIT(10));
+		msm_pcie_write_reg_field(dev->conf,
+					ep_l1sub_ctrl1_offset,
+					0xf, val);
+		msm_pcie_write_mask(dev->conf +
+					ep_dev_ctrl2stts2_offset,
+					0, BIT(10));
+		if (dev->shadow_en) {
+			dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
+				readl_relaxed(dev->dm_core +
+					PCIE20_L1SUB_CONTROL1);
+			dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
+				readl_relaxed(dev->dm_core +
+				PCIE20_DEVICE_CONTROL2_STATUS2);
+			dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
+				readl_relaxed(dev->conf +
+				ep_l1sub_ctrl1_offset);
+			dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
+				readl_relaxed(dev->conf +
+				ep_dev_ctrl2stts2_offset);
+		}
+		PCIE_DBG_FS(dev, "PCIe: RC's L1SUB_CONTROL1:0x%x\n",
+			readl_relaxed(dev->dm_core +
+			PCIE20_L1SUB_CONTROL1));
+		PCIE_DBG_FS(dev, "PCIe: RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
+			readl_relaxed(dev->dm_core +
+			PCIE20_DEVICE_CONTROL2_STATUS2));
+		PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CONTROL1:0x%x\n",
+			readl_relaxed(dev->conf +
+			ep_l1sub_ctrl1_offset));
+		PCIE_DBG_FS(dev, "PCIe: EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
+			readl_relaxed(dev->conf +
+			ep_dev_ctrl2stts2_offset));
+		break;
+	case 11: /* enumerate PCIe  */
+		PCIE_DBG_FS(dev, "\n\nPCIe: attempting to enumerate RC%d\n\n",
+			dev->rc_idx);
+		if (dev->enumerated)
+			PCIE_DBG_FS(dev, "PCIe: RC%d is already enumerated\n",
+				dev->rc_idx);
+		else {
+			if (!msm_pcie_enumerate(dev->rc_idx))
+				PCIE_DBG_FS(dev,
+					"PCIe: RC%d is successfully enumerated\n",
+					dev->rc_idx);
+			else
+				PCIE_DBG_FS(dev,
+					"PCIe: RC%d enumeration failed\n",
+					dev->rc_idx);
+		}
+		break;
+	case 12: /* write a value to a register */
+		PCIE_DBG_FS(dev,
+			"\n\nPCIe: RC%d: writing a value to a register\n\n",
+			dev->rc_idx);
+
+		if (!base_sel) {
+			PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
+			break;
+		}
+
+		if (((base_sel - 1) >= MSM_PCIE_MAX_RES) ||
+					(!dev->res[base_sel - 1].resource)) {
+			PCIE_DBG_FS(dev, "PCIe: RC%d Resource does not exist\n",
+								dev->rc_idx);
+			break;
+		}
+
+		PCIE_DBG_FS(dev,
+			"base: %s: 0x%p\nwr_offset: 0x%x\nwr_mask: 0x%x\nwr_value: 0x%x\n",
+			dev->res[base_sel - 1].name,
+			dev->res[base_sel - 1].base,
+			wr_offset, wr_mask, wr_value);
+
+		base_sel_size = resource_size(dev->res[base_sel - 1].resource);
+
+		if (wr_offset >  base_sel_size - 4 ||
+			msm_pcie_check_align(dev, wr_offset))
+			PCIE_DBG_FS(dev,
+				"PCIe: RC%d: Invalid wr_offset: 0x%x. wr_offset should be no more than 0x%x\n",
+				dev->rc_idx, wr_offset, base_sel_size - 4);
+		else
+			msm_pcie_write_reg_field(dev->res[base_sel - 1].base,
+				wr_offset, wr_mask, wr_value);
+
+		break;
+	case 13: /* dump all registers of base_sel */
+		if (((base_sel - 1) >= MSM_PCIE_MAX_RES) ||
+					(!dev->res[base_sel - 1].resource)) {
+			PCIE_DBG_FS(dev, "PCIe: RC%d Resource does not exist\n",
+								dev->rc_idx);
+			break;
+		}
+
+		if (!base_sel) {
+			PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
+			break;
+		} else if (base_sel - 1 == MSM_PCIE_RES_PARF) {
+			pcie_parf_dump(dev);
+			break;
+		} else if (base_sel - 1 == MSM_PCIE_RES_PHY) {
+			pcie_phy_dump(dev);
+			break;
+		} else if (base_sel - 1 == MSM_PCIE_RES_CONF) {
+			base_sel_size = 0x1000;
+		} else {
+			base_sel_size = resource_size(
+				dev->res[base_sel - 1].resource);
+		}
+
+		PCIE_DBG_FS(dev, "\n\nPCIe: Dumping %s Registers for RC%d\n\n",
+			dev->res[base_sel - 1].name, dev->rc_idx);
+
+		for (i = 0; i < base_sel_size; i += 32) {
+			PCIE_DBG_FS(dev,
+			"0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+			i, readl_relaxed(dev->res[base_sel - 1].base + i),
+			readl_relaxed(dev->res[base_sel - 1].base + (i + 4)),
+			readl_relaxed(dev->res[base_sel - 1].base + (i + 8)),
+			readl_relaxed(dev->res[base_sel - 1].base + (i + 12)),
+			readl_relaxed(dev->res[base_sel - 1].base + (i + 16)),
+			readl_relaxed(dev->res[base_sel - 1].base + (i + 20)),
+			readl_relaxed(dev->res[base_sel - 1].base + (i + 24)),
+			readl_relaxed(dev->res[base_sel - 1].base + (i + 28)));
+		}
+		break;
+	default:
+		PCIE_DBG_FS(dev, "Invalid testcase: %d.\n", testcase);
+		break;
+	}
+}
+
+int msm_pcie_debug_info(struct pci_dev *dev, u32 option, u32 base,
+			u32 offset, u32 mask, u32 value)
+{
+	int ret = 0;
+	struct msm_pcie_dev_t *pdev = NULL;
+
+	if (!dev) {
+		pr_err("PCIe: the input pci dev is NULL.\n");
+		return -ENODEV;
+	}
+
+	if (option == 12 || option == 13) {
+		if (!base || base > 5) {
+			PCIE_DBG_FS(pdev, "Invalid base_sel: 0x%x\n", base);
+			PCIE_DBG_FS(pdev,
+				"PCIe: base_sel is still 0x%x\n", base_sel);
+			return -EINVAL;
+		} else {
+			base_sel = base;
+			PCIE_DBG_FS(pdev,
+				"PCIe: base_sel is now 0x%x\n", base_sel);
+		}
+
+		if (option == 12) {
+			wr_offset = offset;
+			wr_mask = mask;
+			wr_value = value;
+
+			PCIE_DBG_FS(pdev,
+				"PCIe: wr_offset is now 0x%x\n", wr_offset);
+			PCIE_DBG_FS(pdev,
+				"PCIe: wr_mask is now 0x%x\n", wr_mask);
+			PCIE_DBG_FS(pdev,
+				"PCIe: wr_value is now 0x%x\n", wr_value);
+		}
+	}
+
+	pdev = PCIE_BUS_PRIV_DATA(dev->bus);
+	rc_sel = 1 << pdev->rc_idx;
+
+	msm_pcie_sel_debug_testcase(pdev, option);
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_pcie_debug_info);
+
+#ifdef CONFIG_SYSFS
+static ssize_t msm_pcie_enumerate_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)
+						dev_get_drvdata(dev);
+
+	if (pcie_dev)
+		msm_pcie_enumerate(pcie_dev->rc_idx);
+
+	return count;
+}
+
+static DEVICE_ATTR(enumerate, S_IWUSR, NULL, msm_pcie_enumerate_store);
+
+static void msm_pcie_sysfs_init(struct msm_pcie_dev_t *dev)
+{
+	int ret;
+
+	ret = device_create_file(&dev->pdev->dev, &dev_attr_enumerate);
+	if (ret)
+		PCIE_DBG_FS(dev,
+			"RC%d: failed to create sysfs enumerate node\n",
+			dev->rc_idx);
+}
+
+static void msm_pcie_sysfs_exit(struct msm_pcie_dev_t *dev)
+{
+	if (dev->pdev)
+		device_remove_file(&dev->pdev->dev, &dev_attr_enumerate);
+}
+#else
+static void msm_pcie_sysfs_init(struct msm_pcie_dev_t *dev)
+{
+}
+
+static void msm_pcie_sysfs_exit(struct msm_pcie_dev_t *dev)
+{
+}
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *dent_msm_pcie;
+static struct dentry *dfile_rc_sel;
+static struct dentry *dfile_case;
+static struct dentry *dfile_base_sel;
+static struct dentry *dfile_linkdown_panic;
+static struct dentry *dfile_wr_offset;
+static struct dentry *dfile_wr_mask;
+static struct dentry *dfile_wr_value;
+static struct dentry *dfile_boot_option;
+static struct dentry *dfile_aer_enable;
+static struct dentry *dfile_corr_counter_limit;
+
+static u32 rc_sel_max;
+
+static ssize_t msm_pcie_cmd_debug(struct file *file,
+				const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	unsigned long ret;
+	char str[MAX_MSG_LEN];
+	unsigned int testcase = 0;
+	int i;
+	u32 size = sizeof(str) < count ? sizeof(str) : count;
+
+	memset(str, 0, size);
+	ret = copy_from_user(str, buf, size);
+	if (ret)
+		return -EFAULT;
+
+	for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
+		testcase = (testcase * 10) + (str[i] - '0');
+
+	if (!rc_sel)
+		rc_sel = 1;
+
+	pr_alert("PCIe: TEST: %d\n", testcase);
+
+	for (i = 0; i < MAX_RC_NUM; i++) {
+		if (!((rc_sel >> i) & 0x1))
+			continue;
+		msm_pcie_sel_debug_testcase(&msm_pcie_dev[i], testcase);
+	}
+
+	return count;
+}
+
+const struct file_operations msm_pcie_cmd_debug_ops = {
+	.write = msm_pcie_cmd_debug,
+};
+
+static ssize_t msm_pcie_set_rc_sel(struct file *file,
+				const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	unsigned long ret;
+	char str[MAX_MSG_LEN];
+	int i;
+	u32 new_rc_sel = 0;
+	u32 size = sizeof(str) < count ? sizeof(str) : count;
+
+	memset(str, 0, size);
+	ret = copy_from_user(str, buf, size);
+	if (ret)
+		return -EFAULT;
+
+	for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
+		new_rc_sel = (new_rc_sel * 10) + (str[i] - '0');
+
+	if ((!new_rc_sel) || (new_rc_sel > rc_sel_max)) {
+		pr_alert("PCIe: invalid value for rc_sel: 0x%x\n", new_rc_sel);
+		pr_alert("PCIe: rc_sel is still 0x%x\n", rc_sel ? rc_sel : 0x1);
+	} else {
+		rc_sel = new_rc_sel;
+		pr_alert("PCIe: rc_sel is now: 0x%x\n", rc_sel);
+	}
+
+	pr_alert("PCIe: the following RC(s) will be tested:\n");
+	for (i = 0; i < MAX_RC_NUM; i++) {
+		if (!rc_sel) {
+			pr_alert("RC %d\n", i);
+			break;
+		} else if (rc_sel & (1 << i)) {
+			pr_alert("RC %d\n", i);
+		}
+	}
+
+	return count;
+}
+
+const struct file_operations msm_pcie_rc_sel_ops = {
+	.write = msm_pcie_set_rc_sel,
+};
+
+static ssize_t msm_pcie_set_base_sel(struct file *file,
+				const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	unsigned long ret;
+	char str[MAX_MSG_LEN];
+	int i;
+	u32 new_base_sel = 0;
+	char *base_sel_name;
+	u32 size = sizeof(str) < count ? sizeof(str) : count;
+
+	memset(str, 0, size);
+	ret = copy_from_user(str, buf, size);
+	if (ret)
+		return -EFAULT;
+
+	for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
+		new_base_sel = (new_base_sel * 10) + (str[i] - '0');
+
+	if (!new_base_sel || new_base_sel > 5) {
+		pr_alert("PCIe: invalid value for base_sel: 0x%x\n",
+			new_base_sel);
+		pr_alert("PCIe: base_sel is still 0x%x\n", base_sel);
+	} else {
+		base_sel = new_base_sel;
+		pr_alert("PCIe: base_sel is now 0x%x\n", base_sel);
+	}
+
+	switch (base_sel) {
+	case 1:
+		base_sel_name = "PARF";
+		break;
+	case 2:
+		base_sel_name = "PHY";
+		break;
+	case 3:
+		base_sel_name = "RC CONFIG SPACE";
+		break;
+	case 4:
+		base_sel_name = "ELBI";
+		break;
+	case 5:
+		base_sel_name = "EP CONFIG SPACE";
+		break;
+	default:
+		base_sel_name = "INVALID";
+		break;
+	}
+
+	pr_alert("%s\n", base_sel_name);
+
+	return count;
+}
+
+const struct file_operations msm_pcie_base_sel_ops = {
+	.write = msm_pcie_set_base_sel,
+};
+
+static ssize_t msm_pcie_set_linkdown_panic(struct file *file,
+				const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	unsigned long ret;
+	char str[MAX_MSG_LEN];
+	u32 new_linkdown_panic = 0;
+	int i;
+
+	memset(str, 0, sizeof(str));
+	ret = copy_from_user(str, buf, sizeof(str));
+	if (ret)
+		return -EFAULT;
+
+	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+		new_linkdown_panic = (new_linkdown_panic * 10) + (str[i] - '0');
+
+	if (new_linkdown_panic <= 1) {
+		for (i = 0; i < MAX_RC_NUM; i++) {
+			if (!rc_sel) {
+				msm_pcie_dev[0].linkdown_panic =
+					new_linkdown_panic;
+				PCIE_DBG_FS(&msm_pcie_dev[0],
+					"PCIe: RC0: linkdown_panic is now %d\n",
+					msm_pcie_dev[0].linkdown_panic);
+				break;
+			} else if (rc_sel & (1 << i)) {
+				msm_pcie_dev[i].linkdown_panic =
+					new_linkdown_panic;
+				PCIE_DBG_FS(&msm_pcie_dev[i],
+					"PCIe: RC%d: linkdown_panic is now %d\n",
+					i, msm_pcie_dev[i].linkdown_panic);
+			}
+		}
+	} else {
+		pr_err("PCIe: Invalid input for linkdown_panic: %d. Please enter 0 or 1.\n",
+			new_linkdown_panic);
+	}
+
+	return count;
+}
+
+const struct file_operations msm_pcie_linkdown_panic_ops = {
+	.write = msm_pcie_set_linkdown_panic,
+};
+
+static ssize_t msm_pcie_set_wr_offset(struct file *file,
+				const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	unsigned long ret;
+	char str[MAX_MSG_LEN];
+	int i;
+	u32 size = sizeof(str) < count ? sizeof(str) : count;
+
+	memset(str, 0, size);
+	ret = copy_from_user(str, buf, size);
+	if (ret)
+		return -EFAULT;
+
+	wr_offset = 0;
+	for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
+		wr_offset = (wr_offset * 10) + (str[i] - '0');
+
+	pr_alert("PCIe: wr_offset is now 0x%x\n", wr_offset);
+
+	return count;
+}
+
+const struct file_operations msm_pcie_wr_offset_ops = {
+	.write = msm_pcie_set_wr_offset,
+};
+
+static ssize_t msm_pcie_set_wr_mask(struct file *file,
+				const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	unsigned long ret;
+	char str[MAX_MSG_LEN];
+	int i;
+	u32 size = sizeof(str) < count ? sizeof(str) : count;
+
+	memset(str, 0, size);
+	ret = copy_from_user(str, buf, size);
+	if (ret)
+		return -EFAULT;
+
+	wr_mask = 0;
+	for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
+		wr_mask = (wr_mask * 10) + (str[i] - '0');
+
+	pr_alert("PCIe: wr_mask is now 0x%x\n", wr_mask);
+
+	return count;
+}
+
+const struct file_operations msm_pcie_wr_mask_ops = {
+	.write = msm_pcie_set_wr_mask,
+};
+static ssize_t msm_pcie_set_wr_value(struct file *file,
+				const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	unsigned long ret;
+	char str[MAX_MSG_LEN];
+	int i;
+	u32 size = sizeof(str) < count ? sizeof(str) : count;
+
+	memset(str, 0, size);
+	ret = copy_from_user(str, buf, size);
+	if (ret)
+		return -EFAULT;
+
+	wr_value = 0;
+	for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
+		wr_value = (wr_value * 10) + (str[i] - '0');
+
+	pr_alert("PCIe: wr_value is now 0x%x\n", wr_value);
+
+	return count;
+}
+
+const struct file_operations msm_pcie_wr_value_ops = {
+	.write = msm_pcie_set_wr_value,
+};
+
+static ssize_t msm_pcie_set_boot_option(struct file *file,
+				const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	unsigned long ret;
+	char str[MAX_MSG_LEN];
+	u32 new_boot_option = 0;
+	int i;
+
+	memset(str, 0, sizeof(str));
+	ret = copy_from_user(str, buf, sizeof(str));
+	if (ret)
+		return -EFAULT;
+
+	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+		new_boot_option = (new_boot_option * 10) + (str[i] - '0');
+
+	if (new_boot_option <= 1) {
+		for (i = 0; i < MAX_RC_NUM; i++) {
+			if (!rc_sel) {
+				msm_pcie_dev[0].boot_option = new_boot_option;
+				PCIE_DBG_FS(&msm_pcie_dev[0],
+					"PCIe: RC0: boot_option is now 0x%x\n",
+					msm_pcie_dev[0].boot_option);
+				break;
+			} else if (rc_sel & (1 << i)) {
+				msm_pcie_dev[i].boot_option = new_boot_option;
+				PCIE_DBG_FS(&msm_pcie_dev[i],
+					"PCIe: RC%d: boot_option is now 0x%x\n",
+					i, msm_pcie_dev[i].boot_option);
+			}
+		}
+	} else {
+		pr_err("PCIe: Invalid input for boot_option: 0x%x.\n",
+			new_boot_option);
+	}
+
+	return count;
+}
+
+const struct file_operations msm_pcie_boot_option_ops = {
+	.write = msm_pcie_set_boot_option,
+};
+
+static ssize_t msm_pcie_set_aer_enable(struct file *file,
+				const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	unsigned long ret;
+	char str[MAX_MSG_LEN];
+	u32 new_aer_enable = 0;
+	u32 temp_rc_sel;
+	int i;
+
+	memset(str, 0, sizeof(str));
+	ret = copy_from_user(str, buf, sizeof(str));
+	if (ret)
+		return -EFAULT;
+
+	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+		new_aer_enable = (new_aer_enable * 10) + (str[i] - '0');
+
+	if (new_aer_enable > 1) {
+		pr_err(
+			"PCIe: Invalid input for aer_enable: %d. Please enter 0 or 1.\n",
+			new_aer_enable);
+		return count;
+	}
+
+	if (rc_sel)
+		temp_rc_sel = rc_sel;
+	else
+		temp_rc_sel = 0x1;
+
+	for (i = 0; i < MAX_RC_NUM; i++) {
+		if (temp_rc_sel & (1 << i)) {
+			msm_pcie_dev[i].aer_enable = new_aer_enable;
+			PCIE_DBG_FS(&msm_pcie_dev[i],
+				"PCIe: RC%d: aer_enable is now %d\n",
+				i, msm_pcie_dev[i].aer_enable);
+
+			msm_pcie_write_mask(msm_pcie_dev[i].dm_core +
+					PCIE20_BRIDGE_CTRL,
+					new_aer_enable ? 0 : BIT(16),
+					new_aer_enable ? BIT(16) : 0);
+
+			PCIE_DBG_FS(&msm_pcie_dev[i],
+				"RC%d: PCIE20_BRIDGE_CTRL: 0x%x\n", i,
+				readl_relaxed(msm_pcie_dev[i].dm_core +
+					PCIE20_BRIDGE_CTRL));
+		}
+	}
+
+	return count;
+}
+
+const struct file_operations msm_pcie_aer_enable_ops = {
+	.write = msm_pcie_set_aer_enable,
+};
+
+static ssize_t msm_pcie_set_corr_counter_limit(struct file *file,
+				const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	unsigned long ret;
+	char str[MAX_MSG_LEN];
+	int i;
+	u32 size = sizeof(str) < count ? sizeof(str) : count;
+
+	memset(str, 0, size);
+	ret = copy_from_user(str, buf, size);
+	if (ret)
+		return -EFAULT;
+
+	corr_counter_limit = 0;
+	for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
+		corr_counter_limit = (corr_counter_limit * 10) + (str[i] - '0');
+
+	pr_info("PCIe: corr_counter_limit is now %lu\n", corr_counter_limit);
+
+	return count;
+}
+
+const struct file_operations msm_pcie_corr_counter_limit_ops = {
+	.write = msm_pcie_set_corr_counter_limit,
+};
+
+static void msm_pcie_debugfs_init(void)
+{
+	rc_sel_max = (0x1 << MAX_RC_NUM) - 1;
+	wr_mask = 0xffffffff;
+
+	dent_msm_pcie = debugfs_create_dir("pci-msm", 0);
+	if (IS_ERR(dent_msm_pcie)) {
+		pr_err("PCIe: fail to create the folder for debug_fs.\n");
+		return;
+	}
+
+	dfile_rc_sel = debugfs_create_file("rc_sel", 0664,
+					dent_msm_pcie, 0,
+					&msm_pcie_rc_sel_ops);
+	if (!dfile_rc_sel || IS_ERR(dfile_rc_sel)) {
+		pr_err("PCIe: fail to create the file for debug_fs rc_sel.\n");
+		goto rc_sel_error;
+	}
+
+	dfile_case = debugfs_create_file("case", 0664,
+					dent_msm_pcie, 0,
+					&msm_pcie_cmd_debug_ops);
+	if (!dfile_case || IS_ERR(dfile_case)) {
+		pr_err("PCIe: fail to create the file for debug_fs case.\n");
+		goto case_error;
+	}
+
+	dfile_base_sel = debugfs_create_file("base_sel", 0664,
+					dent_msm_pcie, 0,
+					&msm_pcie_base_sel_ops);
+	if (!dfile_base_sel || IS_ERR(dfile_base_sel)) {
+		pr_err("PCIe: fail to create the file for debug_fs base_sel.\n");
+		goto base_sel_error;
+	}
+
+	dfile_linkdown_panic = debugfs_create_file("linkdown_panic", 0644,
+					dent_msm_pcie, 0,
+					&msm_pcie_linkdown_panic_ops);
+	if (!dfile_linkdown_panic || IS_ERR(dfile_linkdown_panic)) {
+		pr_err("PCIe: fail to create the file for debug_fs linkdown_panic.\n");
+		goto linkdown_panic_error;
+	}
+
+	dfile_wr_offset = debugfs_create_file("wr_offset", 0664,
+					dent_msm_pcie, 0,
+					&msm_pcie_wr_offset_ops);
+	if (!dfile_wr_offset || IS_ERR(dfile_wr_offset)) {
+		pr_err("PCIe: fail to create the file for debug_fs wr_offset.\n");
+		goto wr_offset_error;
+	}
+
+	dfile_wr_mask = debugfs_create_file("wr_mask", 0664,
+					dent_msm_pcie, 0,
+					&msm_pcie_wr_mask_ops);
+	if (!dfile_wr_mask || IS_ERR(dfile_wr_mask)) {
+		pr_err("PCIe: fail to create the file for debug_fs wr_mask.\n");
+		goto wr_mask_error;
+	}
+
+	dfile_wr_value = debugfs_create_file("wr_value", 0664,
+					dent_msm_pcie, 0,
+					&msm_pcie_wr_value_ops);
+	if (!dfile_wr_value || IS_ERR(dfile_wr_value)) {
+		pr_err("PCIe: fail to create the file for debug_fs wr_value.\n");
+		goto wr_value_error;
+	}
+
+	dfile_boot_option = debugfs_create_file("boot_option", 0664,
+					dent_msm_pcie, 0,
+					&msm_pcie_boot_option_ops);
+	if (!dfile_boot_option || IS_ERR(dfile_boot_option)) {
+		pr_err("PCIe: fail to create the file for debug_fs boot_option.\n");
+		goto boot_option_error;
+	}
+
+	dfile_aer_enable = debugfs_create_file("aer_enable", 0664,
+					dent_msm_pcie, 0,
+					&msm_pcie_aer_enable_ops);
+	if (!dfile_aer_enable || IS_ERR(dfile_aer_enable)) {
+		pr_err("PCIe: fail to create the file for debug_fs aer_enable.\n");
+		goto aer_enable_error;
+	}
+
+	dfile_corr_counter_limit = debugfs_create_file("corr_counter_limit",
+					0664, dent_msm_pcie, 0,
+					&msm_pcie_corr_counter_limit_ops);
+	if (!dfile_corr_counter_limit || IS_ERR(dfile_corr_counter_limit)) {
+		pr_err("PCIe: fail to create the file for debug_fs corr_counter_limit.\n");
+		goto corr_counter_limit_error;
+	}
+	return;
+
+corr_counter_limit_error:
+	debugfs_remove(dfile_aer_enable);
+aer_enable_error:
+	debugfs_remove(dfile_boot_option);
+boot_option_error:
+	debugfs_remove(dfile_wr_value);
+wr_value_error:
+	debugfs_remove(dfile_wr_mask);
+wr_mask_error:
+	debugfs_remove(dfile_wr_offset);
+wr_offset_error:
+	debugfs_remove(dfile_linkdown_panic);
+linkdown_panic_error:
+	debugfs_remove(dfile_base_sel);
+base_sel_error:
+	debugfs_remove(dfile_case);
+case_error:
+	debugfs_remove(dfile_rc_sel);
+rc_sel_error:
+	debugfs_remove(dent_msm_pcie);
+}
+
+static void msm_pcie_debugfs_exit(void)
+{
+	debugfs_remove(dfile_rc_sel);
+	debugfs_remove(dfile_case);
+	debugfs_remove(dfile_base_sel);
+	debugfs_remove(dfile_linkdown_panic);
+	debugfs_remove(dfile_wr_offset);
+	debugfs_remove(dfile_wr_mask);
+	debugfs_remove(dfile_wr_value);
+	debugfs_remove(dfile_boot_option);
+	debugfs_remove(dfile_aer_enable);
+	debugfs_remove(dfile_corr_counter_limit);
+}
+#else
+static void msm_pcie_debugfs_init(void)
+{
+	return;
+}
+
+static void msm_pcie_debugfs_exit(void)
+{
+	return;
+}
+#endif
+
+static inline int msm_pcie_is_link_up(struct msm_pcie_dev_t *dev)
+{
+	return readl_relaxed(dev->dm_core +
+			PCIE20_CAP_LINKCTRLSTATUS) & BIT(29);
+}
+
+/**
+ * msm_pcie_iatu_config - configure outbound address translation region
+ * @dev: root commpex
+ * @nr: region number
+ * @type: target transaction type, see PCIE20_CTRL1_TYPE_xxx
+ * @host_addr: - region start address on host
+ * @host_end: - region end address (low 32 bit) on host,
+ *	upper 32 bits are same as for @host_addr
+ * @target_addr: - region start address on target
+ */
+static void msm_pcie_iatu_config(struct msm_pcie_dev_t *dev, int nr, u8 type,
+				unsigned long host_addr, u32 host_end,
+				unsigned long target_addr)
+{
+	void __iomem *pcie20 = dev->dm_core;
+
+	if (dev->shadow_en) {
+		dev->rc_shadow[PCIE20_PLR_IATU_VIEWPORT / 4] =
+			nr;
+		dev->rc_shadow[PCIE20_PLR_IATU_CTRL1 / 4] =
+			type;
+		dev->rc_shadow[PCIE20_PLR_IATU_LBAR / 4] =
+			lower_32_bits(host_addr);
+		dev->rc_shadow[PCIE20_PLR_IATU_UBAR / 4] =
+			upper_32_bits(host_addr);
+		dev->rc_shadow[PCIE20_PLR_IATU_LAR / 4] =
+			host_end;
+		dev->rc_shadow[PCIE20_PLR_IATU_LTAR / 4] =
+			lower_32_bits(target_addr);
+		dev->rc_shadow[PCIE20_PLR_IATU_UTAR / 4] =
+			upper_32_bits(target_addr);
+		dev->rc_shadow[PCIE20_PLR_IATU_CTRL2 / 4] =
+			BIT(31);
+	}
+
+	/* select region */
+	writel_relaxed(nr, pcie20 + PCIE20_PLR_IATU_VIEWPORT);
+	/* ensure that hardware locks it */
+	wmb();
+
+	/* switch off region before changing it */
+	writel_relaxed(0, pcie20 + PCIE20_PLR_IATU_CTRL2);
+	/* and wait till it propagates to the hardware */
+	wmb();
+
+	writel_relaxed(type, pcie20 + PCIE20_PLR_IATU_CTRL1);
+	writel_relaxed(lower_32_bits(host_addr),
+		       pcie20 + PCIE20_PLR_IATU_LBAR);
+	writel_relaxed(upper_32_bits(host_addr),
+		       pcie20 + PCIE20_PLR_IATU_UBAR);
+	writel_relaxed(host_end, pcie20 + PCIE20_PLR_IATU_LAR);
+	writel_relaxed(lower_32_bits(target_addr),
+		       pcie20 + PCIE20_PLR_IATU_LTAR);
+	writel_relaxed(upper_32_bits(target_addr),
+		       pcie20 + PCIE20_PLR_IATU_UTAR);
+	wmb();
+	writel_relaxed(BIT(31), pcie20 + PCIE20_PLR_IATU_CTRL2);
+
+	/* ensure that changes propagated to the hardware */
+	wmb();
+
+	if (dev->enumerated) {
+		PCIE_DBG2(dev, "IATU for Endpoint %02x:%02x.%01x\n",
+			dev->pcidev_table[nr].bdf >> 24,
+			dev->pcidev_table[nr].bdf >> 19 & 0x1f,
+			dev->pcidev_table[nr].bdf >> 16 & 0x07);
+		PCIE_DBG2(dev, "PCIE20_PLR_IATU_VIEWPORT:0x%x\n",
+			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_VIEWPORT));
+		PCIE_DBG2(dev, "PCIE20_PLR_IATU_CTRL1:0x%x\n",
+			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL1));
+		PCIE_DBG2(dev, "PCIE20_PLR_IATU_LBAR:0x%x\n",
+			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LBAR));
+		PCIE_DBG2(dev, "PCIE20_PLR_IATU_UBAR:0x%x\n",
+			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UBAR));
+		PCIE_DBG2(dev, "PCIE20_PLR_IATU_LAR:0x%x\n",
+			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LAR));
+		PCIE_DBG2(dev, "PCIE20_PLR_IATU_LTAR:0x%x\n",
+			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LTAR));
+		PCIE_DBG2(dev, "PCIE20_PLR_IATU_UTAR:0x%x\n",
+			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UTAR));
+		PCIE_DBG2(dev, "PCIE20_PLR_IATU_CTRL2:0x%x\n\n",
+			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL2));
+	}
+}
+
+/**
+ * msm_pcie_cfg_bdf - configure for config access
+ * @dev: root commpex
+ * @bus: PCI bus number
+ * @devfn: PCI dev and function number
+ *
+ * Remap if required region 0 for config access of proper type
+ * (CFG0 for bus 1, CFG1 for other buses)
+ * Cache current device bdf for speed-up
+ */
+static void msm_pcie_cfg_bdf(struct msm_pcie_dev_t *dev, u8 bus, u8 devfn)
+{
+	struct resource *axi_conf = dev->res[MSM_PCIE_RES_CONF].resource;
+	u32 bdf  = BDF_OFFSET(bus, devfn);
+	u8 type = bus == 1 ? PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
+	if (dev->current_bdf == bdf)
+		return;
+
+	msm_pcie_iatu_config(dev, 0, type,
+			axi_conf->start,
+			axi_conf->start + SZ_4K - 1,
+			bdf);
+
+	dev->current_bdf = bdf;
+}
+
+static inline void msm_pcie_save_shadow(struct msm_pcie_dev_t *dev,
+					u32 word_offset, u32 wr_val,
+					u32 bdf, bool rc)
+{
+	int i, j;
+	u32 max_dev = MAX_RC_NUM * MAX_DEVICE_NUM;
+
+	if (rc) {
+		dev->rc_shadow[word_offset / 4] = wr_val;
+	} else {
+		for (i = 0; i < MAX_DEVICE_NUM; i++) {
+			if (!dev->pcidev_table[i].bdf) {
+				for (j = 0; j < max_dev; j++)
+					if (!msm_pcie_dev_tbl[j].bdf) {
+						msm_pcie_dev_tbl[j].bdf = bdf;
+						break;
+					}
+				dev->pcidev_table[i].bdf = bdf;
+				if ((!dev->bridge_found) && (i > 0))
+					dev->bridge_found = true;
+			}
+			if (dev->pcidev_table[i].bdf == bdf) {
+				dev->ep_shadow[i][word_offset / 4] = wr_val;
+				break;
+			}
+		}
+	}
+}
+
+static inline int msm_pcie_oper_conf(struct pci_bus *bus, u32 devfn, int oper,
+				     int where, int size, u32 *val)
+{
+	uint32_t word_offset, byte_offset, mask;
+	uint32_t rd_val, wr_val;
+	struct msm_pcie_dev_t *dev;
+	void __iomem *config_base;
+	bool rc = false;
+	u32 rc_idx;
+	int rv = 0;
+	u32 bdf = BDF_OFFSET(bus->number, devfn);
+	int i;
+
+	dev = PCIE_BUS_PRIV_DATA(bus);
+
+	if (!dev) {
+		pr_err("PCIe: No device found for this bus.\n");
+		*val = ~0;
+		rv = PCIBIOS_DEVICE_NOT_FOUND;
+		goto out;
+	}
+
+	rc_idx = dev->rc_idx;
+	rc = (bus->number == 0);
+
+	spin_lock_irqsave(&dev->cfg_lock, dev->irqsave_flags);
+
+	if (!dev->cfg_access) {
+		PCIE_DBG3(dev,
+			"Access denied for RC%d %d:0x%02x + 0x%04x[%d]\n",
+			rc_idx, bus->number, devfn, where, size);
+		*val = ~0;
+		rv = PCIBIOS_DEVICE_NOT_FOUND;
+		goto unlock;
+	}
+
+	if (rc && (devfn != 0)) {
+		PCIE_DBG3(dev, "RC%d invalid %s - bus %d devfn %d\n", rc_idx,
+			 (oper == RD) ? "rd" : "wr", bus->number, devfn);
+		*val = ~0;
+		rv = PCIBIOS_DEVICE_NOT_FOUND;
+		goto unlock;
+	}
+
+	if (dev->link_status != MSM_PCIE_LINK_ENABLED) {
+		PCIE_DBG3(dev,
+			"Access to RC%d %d:0x%02x + 0x%04x[%d] is denied because link is down\n",
+			rc_idx, bus->number, devfn, where, size);
+		*val = ~0;
+		rv = PCIBIOS_DEVICE_NOT_FOUND;
+		goto unlock;
+	}
+
+	/* check if the link is up for endpoint */
+	if (!rc && !msm_pcie_is_link_up(dev)) {
+		PCIE_ERR(dev,
+			"PCIe: RC%d %s fail, link down - bus %d devfn %d\n",
+				rc_idx, (oper == RD) ? "rd" : "wr",
+				bus->number, devfn);
+			*val = ~0;
+			rv = PCIBIOS_DEVICE_NOT_FOUND;
+			goto unlock;
+	}
+
+	if (!rc && !dev->enumerated)
+		msm_pcie_cfg_bdf(dev, bus->number, devfn);
+
+	word_offset = where & ~0x3;
+	byte_offset = where & 0x3;
+	mask = ((u32)~0 >> (8 * (4 - size))) << (8 * byte_offset);
+
+	if (rc || !dev->enumerated) {
+		config_base = rc ? dev->dm_core : dev->conf;
+	} else {
+		for (i = 0; i < MAX_DEVICE_NUM; i++) {
+			if (dev->pcidev_table[i].bdf == bdf) {
+				config_base = dev->pcidev_table[i].conf_base;
+				break;
+			}
+		}
+		if (i == MAX_DEVICE_NUM) {
+			*val = ~0;
+			rv = PCIBIOS_DEVICE_NOT_FOUND;
+			goto unlock;
+		}
+	}
+
+	rd_val = readl_relaxed(config_base + word_offset);
+
+	if (oper == RD) {
+		*val = ((rd_val & mask) >> (8 * byte_offset));
+		PCIE_DBG3(dev,
+			"RC%d %d:0x%02x + 0x%04x[%d] -> 0x%08x; rd 0x%08x\n",
+			rc_idx, bus->number, devfn, where, size, *val, rd_val);
+	} else {
+		wr_val = (rd_val & ~mask) |
+				((*val << (8 * byte_offset)) & mask);
+
+		if ((bus->number == 0) && (where == 0x3c))
+			wr_val = wr_val | (3 << 16);
+
+		writel_relaxed(wr_val, config_base + word_offset);
+		wmb(); /* ensure config data is written to hardware register */
+
+		if (dev->shadow_en) {
+			if (rd_val == PCIE_LINK_DOWN &&
+				(readl_relaxed(config_base) == PCIE_LINK_DOWN))
+				PCIE_ERR(dev,
+					"Read of RC%d %d:0x%02x + 0x%04x[%d] is all FFs\n",
+					rc_idx, bus->number, devfn,
+					where, size);
+			else
+				msm_pcie_save_shadow(dev, word_offset, wr_val,
+					bdf, rc);
+		}
+
+		PCIE_DBG3(dev,
+			"RC%d %d:0x%02x + 0x%04x[%d] <- 0x%08x; rd 0x%08x val 0x%08x\n",
+			rc_idx, bus->number, devfn, where, size,
+			wr_val, rd_val, *val);
+	}
+
+unlock:
+	spin_unlock_irqrestore(&dev->cfg_lock, dev->irqsave_flags);
+out:
+	return rv;
+}
+
+static int msm_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+			    int size, u32 *val)
+{
+	int ret = msm_pcie_oper_conf(bus, devfn, RD, where, size, val);
+
+	if ((bus->number == 0) && (where == PCI_CLASS_REVISION)) {
+		*val = (*val & 0xff) | (PCI_CLASS_BRIDGE_PCI << 16);
+		PCIE_GEN_DBG("change class for RC:0x%x\n", *val);
+	}
+
+	return ret;
+}
+
+static int msm_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+			    int where, int size, u32 val)
+{
+	return msm_pcie_oper_conf(bus, devfn, WR, where, size, &val);
+}
+
+static struct pci_ops msm_pcie_ops = {
+	.read = msm_pcie_rd_conf,
+	.write = msm_pcie_wr_conf,
+};
+
+static int msm_pcie_gpio_init(struct msm_pcie_dev_t *dev)
+{
+	int rc = 0, i;
+	struct msm_pcie_gpio_info_t *info;
+
+	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+	for (i = 0; i < dev->gpio_n; i++) {
+		info = &dev->gpio[i];
+
+		if (!info->num)
+			continue;
+
+		rc = gpio_request(info->num, info->name);
+		if (rc) {
+			PCIE_ERR(dev, "PCIe: RC%d can't get gpio %s; %d\n",
+				dev->rc_idx, info->name, rc);
+			break;
+		}
+
+		if (info->out)
+			rc = gpio_direction_output(info->num, info->init);
+		else
+			rc = gpio_direction_input(info->num);
+		if (rc) {
+			PCIE_ERR(dev,
+				"PCIe: RC%d can't set direction for GPIO %s:%d\n",
+				dev->rc_idx, info->name, rc);
+			gpio_free(info->num);
+			break;
+		}
+	}
+
+	if (rc)
+		while (i--)
+			gpio_free(dev->gpio[i].num);
+
+	return rc;
+}
+
+static void msm_pcie_gpio_deinit(struct msm_pcie_dev_t *dev)
+{
+	int i;
+
+	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+	for (i = 0; i < dev->gpio_n; i++)
+		gpio_free(dev->gpio[i].num);
+}
+
+int msm_pcie_vreg_init(struct msm_pcie_dev_t *dev)
+{
+	int i, rc = 0;
+	struct regulator *vreg;
+	struct msm_pcie_vreg_info_t *info;
+
+	PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
+
+	for (i = 0; i < MSM_PCIE_MAX_VREG; i++) {
+		info = &dev->vreg[i];
+		vreg = info->hdl;
+
+		if (!vreg)
+			continue;
+
+		PCIE_DBG2(dev, "RC%d Vreg %s is being enabled\n",
+			dev->rc_idx, info->name);
+		if (info->max_v) {
+			rc = regulator_set_voltage(vreg,
+						   info->min_v, info->max_v);
+			if (rc) {
+				PCIE_ERR(dev,
+					"PCIe: RC%d can't set voltage for %s: %d\n",
+					dev->rc_idx, info->name, rc);
+				break;
+			}
+		}
+
+		if (info->opt_mode) {
+			rc = regulator_set_load(vreg, info->opt_mode);
+			if (rc < 0) {
+				PCIE_ERR(dev,
+					"PCIe: RC%d can't set mode for %s: %d\n",
+					dev->rc_idx, info->name, rc);
+				break;
+			}
+		}
+
+		rc = regulator_enable(vreg);
+		if (rc) {
+			PCIE_ERR(dev,
+				"PCIe: RC%d can't enable regulator %s: %d\n",
+				dev->rc_idx, info->name, rc);
+			break;
+		}
+	}
+
+	if (rc)
+		while (i--) {
+			struct regulator *hdl = dev->vreg[i].hdl;
+			if (hdl) {
+				regulator_disable(hdl);
+				if (!strcmp(dev->vreg[i].name, "vreg-cx")) {
+					PCIE_DBG(dev,
+						"RC%d: Removing %s vote.\n",
+						dev->rc_idx,
+						dev->vreg[i].name);
+					regulator_set_voltage(hdl,
+						RPM_REGULATOR_CORNER_NONE,
+						INT_MAX);
+				}
+			}
+
+		}
+
+	PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
+
+	return rc;
+}
+
+static void msm_pcie_vreg_deinit(struct msm_pcie_dev_t *dev)
+{
+	int i;
+
+	PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
+
+	for (i = MSM_PCIE_MAX_VREG - 1; i >= 0; i--) {
+		if (dev->vreg[i].hdl) {
+			PCIE_DBG(dev, "Vreg %s is being disabled\n",
+				dev->vreg[i].name);
+			regulator_disable(dev->vreg[i].hdl);
+
+			if (!strcmp(dev->vreg[i].name, "vreg-cx")) {
+				PCIE_DBG(dev,
+					"RC%d: Removing %s vote.\n",
+					dev->rc_idx,
+					dev->vreg[i].name);
+				regulator_set_voltage(dev->vreg[i].hdl,
+					RPM_REGULATOR_CORNER_NONE,
+					INT_MAX);
+			}
+		}
+	}
+
+	PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
+}
+
+static int msm_pcie_clk_init(struct msm_pcie_dev_t *dev)
+{
+	int i, rc = 0;
+	struct msm_pcie_clk_info_t *info;
+	struct msm_pcie_reset_info_t *reset_info;
+
+	PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
+
+	rc = regulator_enable(dev->gdsc);
+
+	if (rc) {
+		PCIE_ERR(dev, "PCIe: fail to enable GDSC for RC%d (%s)\n",
+			dev->rc_idx, dev->pdev->name);
+		return rc;
+	}
+
+	if (dev->gdsc_smmu) {
+		rc = regulator_enable(dev->gdsc_smmu);
+
+		if (rc) {
+			PCIE_ERR(dev,
+				"PCIe: fail to enable SMMU GDSC for RC%d (%s)\n",
+				dev->rc_idx, dev->pdev->name);
+			return rc;
+		}
+	}
+
+	PCIE_DBG(dev, "PCIe: requesting bus vote for RC%d\n", dev->rc_idx);
+	if (dev->bus_client) {
+		rc = msm_bus_scale_client_update_request(dev->bus_client, 1);
+		if (rc) {
+			PCIE_ERR(dev,
+				"PCIe: fail to set bus bandwidth for RC%d:%d.\n",
+				dev->rc_idx, rc);
+			return rc;
+		} else {
+			PCIE_DBG2(dev,
+				"PCIe: set bus bandwidth for RC%d.\n",
+				dev->rc_idx);
+		}
+	}
+
+	for (i = 0; i < MSM_PCIE_MAX_CLK; i++) {
+		info = &dev->clk[i];
+
+		if (!info->hdl)
+			continue;
+
+		if (info->config_mem)
+			msm_pcie_config_clock_mem(dev, info);
+
+		if (info->freq) {
+			rc = clk_set_rate(info->hdl, info->freq);
+			if (rc) {
+				PCIE_ERR(dev,
+					"PCIe: RC%d can't set rate for clk %s: %d.\n",
+					dev->rc_idx, info->name, rc);
+				break;
+			} else {
+				PCIE_DBG2(dev,
+					"PCIe: RC%d set rate for clk %s.\n",
+					dev->rc_idx, info->name);
+			}
+		}
+
+		rc = clk_prepare_enable(info->hdl);
+
+		if (rc)
+			PCIE_ERR(dev, "PCIe: RC%d failed to enable clk %s\n",
+				dev->rc_idx, info->name);
+		else
+			PCIE_DBG2(dev, "enable clk %s for RC%d.\n",
+				info->name, dev->rc_idx);
+	}
+
+	if (rc) {
+		PCIE_DBG(dev, "RC%d disable clocks for error handling.\n",
+			dev->rc_idx);
+		while (i--) {
+			struct clk *hdl = dev->clk[i].hdl;
+			if (hdl)
+				clk_disable_unprepare(hdl);
+		}
+
+		if (dev->gdsc_smmu)
+			regulator_disable(dev->gdsc_smmu);
+
+		regulator_disable(dev->gdsc);
+	}
+
+	for (i = 0; i < MSM_PCIE_MAX_RESET; i++) {
+		reset_info = &dev->reset[i];
+		if (reset_info->hdl) {
+			rc = reset_control_deassert(reset_info->hdl);
+			if (rc)
+				PCIE_ERR(dev,
+					"PCIe: RC%d failed to deassert reset for %s.\n",
+					dev->rc_idx, reset_info->name);
+			else
+				PCIE_DBG2(dev,
+					"PCIe: RC%d successfully deasserted reset for %s.\n",
+					dev->rc_idx, reset_info->name);
+		}
+	}
+
+	PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
+
+	return rc;
+}
+
+static void msm_pcie_clk_deinit(struct msm_pcie_dev_t *dev)
+{
+	int i;
+	int rc;
+
+	PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
+
+	for (i = 0; i < MSM_PCIE_MAX_CLK; i++)
+		if (dev->clk[i].hdl)
+			clk_disable_unprepare(dev->clk[i].hdl);
+
+	if (dev->bus_client) {
+		PCIE_DBG(dev, "PCIe: removing bus vote for RC%d\n",
+			dev->rc_idx);
+
+		rc = msm_bus_scale_client_update_request(dev->bus_client, 0);
+		if (rc)
+			PCIE_ERR(dev,
+				"PCIe: fail to relinquish bus bandwidth for RC%d:%d.\n",
+				dev->rc_idx, rc);
+		else
+			PCIE_DBG(dev,
+				"PCIe: relinquish bus bandwidth for RC%d.\n",
+				dev->rc_idx);
+	}
+
+	if (dev->gdsc_smmu)
+		regulator_disable(dev->gdsc_smmu);
+
+	regulator_disable(dev->gdsc);
+
+	PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
+}
+
+static int msm_pcie_pipe_clk_init(struct msm_pcie_dev_t *dev)
+{
+	int i, rc = 0;
+	struct msm_pcie_clk_info_t *info;
+	struct msm_pcie_reset_info_t *pipe_reset_info;
+
+	PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
+
+	for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++) {
+		info = &dev->pipeclk[i];
+
+		if (!info->hdl)
+			continue;
+
+
+		if (info->config_mem)
+			msm_pcie_config_clock_mem(dev, info);
+
+		if (info->freq) {
+			rc = clk_set_rate(info->hdl, info->freq);
+			if (rc) {
+				PCIE_ERR(dev,
+					"PCIe: RC%d can't set rate for clk %s: %d.\n",
+					dev->rc_idx, info->name, rc);
+				break;
+			} else {
+				PCIE_DBG2(dev,
+					"PCIe: RC%d set rate for clk %s: %d.\n",
+					dev->rc_idx, info->name, rc);
+			}
+		}
+
+		rc = clk_prepare_enable(info->hdl);
+
+		if (rc)
+			PCIE_ERR(dev, "PCIe: RC%d failed to enable clk %s.\n",
+				dev->rc_idx, info->name);
+		else
+			PCIE_DBG2(dev, "RC%d enabled pipe clk %s.\n",
+				dev->rc_idx, info->name);
+	}
+
+	if (rc) {
+		PCIE_DBG(dev, "RC%d disable pipe clocks for error handling.\n",
+			dev->rc_idx);
+		while (i--)
+			if (dev->pipeclk[i].hdl)
+				clk_disable_unprepare(dev->pipeclk[i].hdl);
+	}
+
+	for (i = 0; i < MSM_PCIE_MAX_PIPE_RESET; i++) {
+		pipe_reset_info = &dev->pipe_reset[i];
+		if (pipe_reset_info->hdl) {
+			rc = reset_control_deassert(
+					pipe_reset_info->hdl);
+			if (rc)
+				PCIE_ERR(dev,
+					"PCIe: RC%d failed to deassert pipe reset for %s.\n",
+					dev->rc_idx, pipe_reset_info->name);
+			else
+				PCIE_DBG2(dev,
+					"PCIe: RC%d successfully deasserted pipe reset for %s.\n",
+					dev->rc_idx, pipe_reset_info->name);
+		}
+	}
+
+	PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
+
+	return rc;
+}
+
+static void msm_pcie_pipe_clk_deinit(struct msm_pcie_dev_t *dev)
+{
+	int i;
+
+	PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
+
+	for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++)
+		if (dev->pipeclk[i].hdl)
+			clk_disable_unprepare(
+				dev->pipeclk[i].hdl);
+
+	PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
+}
+
+static void msm_pcie_iatu_config_all_ep(struct msm_pcie_dev_t *dev)
+{
+	int i;
+	u8 type;
+	struct msm_pcie_device_info *dev_table = dev->pcidev_table;
+
+	for (i = 0; i < MAX_DEVICE_NUM; i++) {
+		if (!dev_table[i].bdf)
+			break;
+
+		type = dev_table[i].bdf >> 24 == 0x1 ?
+			PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
+
+		msm_pcie_iatu_config(dev, i, type, dev_table[i].phy_address,
+			dev_table[i].phy_address + SZ_4K - 1,
+			dev_table[i].bdf);
+	}
+}
+
+static void msm_pcie_config_controller(struct msm_pcie_dev_t *dev)
+{
+	int i;
+
+	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+	/*
+	 * program and enable address translation region 0 (device config
+	 * address space); region type config;
+	 * axi config address range to device config address range
+	 */
+	if (dev->enumerated) {
+		msm_pcie_iatu_config_all_ep(dev);
+	} else {
+		dev->current_bdf = 0; /* to force IATU re-config */
+		msm_pcie_cfg_bdf(dev, 1, 0);
+	}
+
+	/* configure N_FTS */
+	PCIE_DBG2(dev, "Original PCIE20_ACK_F_ASPM_CTRL_REG:0x%x\n",
+		readl_relaxed(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG));
+	if (!dev->n_fts)
+		msm_pcie_write_mask(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG,
+					0, BIT(15));
+	else
+		msm_pcie_write_mask(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG,
+					PCIE20_ACK_N_FTS,
+					dev->n_fts << 8);
+
+	if (dev->shadow_en)
+		dev->rc_shadow[PCIE20_ACK_F_ASPM_CTRL_REG / 4] =
+			readl_relaxed(dev->dm_core +
+			PCIE20_ACK_F_ASPM_CTRL_REG);
+
+	PCIE_DBG2(dev, "Updated PCIE20_ACK_F_ASPM_CTRL_REG:0x%x\n",
+		readl_relaxed(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG));
+
+	/* configure AUX clock frequency register for PCIe core */
+	if (dev->use_19p2mhz_aux_clk)
+		msm_pcie_write_reg(dev->dm_core, PCIE20_AUX_CLK_FREQ_REG, 0x14);
+	else
+		msm_pcie_write_reg(dev->dm_core, PCIE20_AUX_CLK_FREQ_REG, 0x01);
+
+	/* configure the completion timeout value for PCIe core */
+	if (dev->cpl_timeout && dev->bridge_found)
+		msm_pcie_write_reg_field(dev->dm_core,
+					PCIE20_DEVICE_CONTROL2_STATUS2,
+					0xf, dev->cpl_timeout);
+
+	/* Enable AER on RC */
+	if (dev->aer_enable) {
+		msm_pcie_write_mask(dev->dm_core + PCIE20_BRIDGE_CTRL, 0,
+						BIT(16)|BIT(17));
+		msm_pcie_write_mask(dev->dm_core +  PCIE20_CAP_DEVCTRLSTATUS, 0,
+						BIT(3)|BIT(2)|BIT(1)|BIT(0));
+
+		PCIE_DBG(dev, "RC's PCIE20_CAP_DEVCTRLSTATUS:0x%x\n",
+			readl_relaxed(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS));
+	}
+
+	/* configure SMMU registers */
+	if (dev->smmu_exist) {
+		msm_pcie_write_reg(dev->parf,
+			PCIE20_PARF_BDF_TRANSLATE_CFG, 0);
+		msm_pcie_write_reg(dev->parf,
+			PCIE20_PARF_SID_OFFSET, 0);
+
+		if (dev->enumerated) {
+			for (i = 0; i < MAX_DEVICE_NUM; i++) {
+				if (dev->pcidev_table[i].dev &&
+					dev->pcidev_table[i].short_bdf) {
+					msm_pcie_write_reg(dev->parf,
+						PCIE20_PARF_BDF_TRANSLATE_N +
+						dev->pcidev_table[i].short_bdf
+						* 4,
+						dev->pcidev_table[i].bdf >> 16);
+				}
+			}
+		}
+	}
+}
+
+static void msm_pcie_config_link_state(struct msm_pcie_dev_t *dev)
+{
+	u32 val;
+	u32 current_offset;
+	u32 ep_l1sub_ctrl1_offset = 0;
+	u32 ep_l1sub_cap_reg1_offset = 0;
+	u32 ep_link_cap_offset = 0;
+	u32 ep_link_ctrlstts_offset = 0;
+	u32 ep_dev_ctrl2stts2_offset = 0;
+
+	/* Enable the AUX Clock and the Core Clk to be synchronous for L1SS*/
+	if (!dev->aux_clk_sync && dev->l1ss_supported)
+		msm_pcie_write_mask(dev->parf +
+				PCIE20_PARF_SYS_CTRL, BIT(3), 0);
+
+	current_offset = readl_relaxed(dev->conf + PCIE_CAP_PTR_OFFSET) & 0xff;
+
+	while (current_offset) {
+		if (msm_pcie_check_align(dev, current_offset))
+			return;
+
+		val = readl_relaxed(dev->conf + current_offset);
+		if ((val & 0xff) == PCIE20_CAP_ID) {
+			ep_link_cap_offset = current_offset + 0x0c;
+			ep_link_ctrlstts_offset = current_offset + 0x10;
+			ep_dev_ctrl2stts2_offset = current_offset + 0x28;
+			break;
+		}
+		current_offset = (val >> 8) & 0xff;
+	}
+
+	if (!ep_link_cap_offset) {
+		PCIE_DBG(dev,
+			"RC%d endpoint does not support PCIe capability registers\n",
+			dev->rc_idx);
+		return;
+	} else {
+		PCIE_DBG(dev,
+			"RC%d: ep_link_cap_offset: 0x%x\n",
+			dev->rc_idx, ep_link_cap_offset);
+	}
+
+	if (dev->common_clk_en) {
+		msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS,
+					0, BIT(6));
+
+		msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
+					0, BIT(6));
+
+		if (dev->shadow_en) {
+			dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
+				readl_relaxed(dev->dm_core +
+					PCIE20_CAP_LINKCTRLSTATUS);
+
+			dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
+				readl_relaxed(dev->conf +
+					ep_link_ctrlstts_offset);
+		}
+
+		PCIE_DBG2(dev, "RC's CAP_LINKCTRLSTATUS:0x%x\n",
+			readl_relaxed(dev->dm_core +
+			PCIE20_CAP_LINKCTRLSTATUS));
+		PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
+			readl_relaxed(dev->conf + ep_link_ctrlstts_offset));
+	}
+
+	if (dev->clk_power_manage_en) {
+		val = readl_relaxed(dev->conf + ep_link_cap_offset);
+		if (val & BIT(18)) {
+			msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
+						0, BIT(8));
+
+			if (dev->shadow_en)
+				dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
+					readl_relaxed(dev->conf +
+						ep_link_ctrlstts_offset);
+
+			PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
+				readl_relaxed(dev->conf +
+					ep_link_ctrlstts_offset));
+		}
+	}
+
+	if (dev->l0s_supported) {
+		msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS,
+					0, BIT(0));
+		msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
+					0, BIT(0));
+		if (dev->shadow_en) {
+			dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
+						readl_relaxed(dev->dm_core +
+						PCIE20_CAP_LINKCTRLSTATUS);
+			dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
+						readl_relaxed(dev->conf +
+						ep_link_ctrlstts_offset);
+		}
+		PCIE_DBG2(dev, "RC's CAP_LINKCTRLSTATUS:0x%x\n",
+			readl_relaxed(dev->dm_core +
+			PCIE20_CAP_LINKCTRLSTATUS));
+		PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
+			readl_relaxed(dev->conf + ep_link_ctrlstts_offset));
+	}
+
+	if (dev->l1_supported) {
+		msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS,
+					0, BIT(1));
+		msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
+					0, BIT(1));
+		if (dev->shadow_en) {
+			dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
+						readl_relaxed(dev->dm_core +
+						PCIE20_CAP_LINKCTRLSTATUS);
+			dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
+						readl_relaxed(dev->conf +
+						ep_link_ctrlstts_offset);
+		}
+		PCIE_DBG2(dev, "RC's CAP_LINKCTRLSTATUS:0x%x\n",
+			readl_relaxed(dev->dm_core +
+			PCIE20_CAP_LINKCTRLSTATUS));
+		PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
+			readl_relaxed(dev->conf + ep_link_ctrlstts_offset));
+	}
+
+	if (dev->l1ss_supported) {
+		current_offset = PCIE_EXT_CAP_OFFSET;
+		while (current_offset) {
+			if (msm_pcie_check_align(dev, current_offset))
+				return;
+
+			val = readl_relaxed(dev->conf + current_offset);
+			if ((val & 0xffff) == L1SUB_CAP_ID) {
+				ep_l1sub_cap_reg1_offset = current_offset + 0x4;
+				ep_l1sub_ctrl1_offset = current_offset + 0x8;
+				break;
+			}
+			current_offset = val >> 20;
+		}
+		if (!ep_l1sub_ctrl1_offset) {
+			PCIE_DBG(dev,
+				"RC%d endpoint does not support l1ss registers\n",
+				dev->rc_idx);
+			return;
+		}
+
+		val = readl_relaxed(dev->conf + ep_l1sub_cap_reg1_offset);
+
+		PCIE_DBG2(dev, "EP's L1SUB_CAPABILITY_REG_1: 0x%x\n", val);
+		PCIE_DBG2(dev, "RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
+				dev->rc_idx, ep_l1sub_ctrl1_offset);
+
+		val &= 0xf;
+
+		msm_pcie_write_reg_field(dev->dm_core, PCIE20_L1SUB_CONTROL1,
+					0xf, val);
+		msm_pcie_write_mask(dev->dm_core +
+					PCIE20_DEVICE_CONTROL2_STATUS2,
+					0, BIT(10));
+		msm_pcie_write_reg_field(dev->conf, ep_l1sub_ctrl1_offset,
+					0xf, val);
+		msm_pcie_write_mask(dev->conf + ep_dev_ctrl2stts2_offset,
+					0, BIT(10));
+		if (dev->shadow_en) {
+			dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
+					readl_relaxed(dev->dm_core +
+					PCIE20_L1SUB_CONTROL1);
+			dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
+					readl_relaxed(dev->dm_core +
+					PCIE20_DEVICE_CONTROL2_STATUS2);
+			dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
+					readl_relaxed(dev->conf +
+					ep_l1sub_ctrl1_offset);
+			dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
+					readl_relaxed(dev->conf +
+					ep_dev_ctrl2stts2_offset);
+		}
+		PCIE_DBG2(dev, "RC's L1SUB_CONTROL1:0x%x\n",
+			readl_relaxed(dev->dm_core + PCIE20_L1SUB_CONTROL1));
+		PCIE_DBG2(dev, "RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
+			readl_relaxed(dev->dm_core +
+			PCIE20_DEVICE_CONTROL2_STATUS2));
+		PCIE_DBG2(dev, "EP's L1SUB_CONTROL1:0x%x\n",
+			readl_relaxed(dev->conf + ep_l1sub_ctrl1_offset));
+		PCIE_DBG2(dev, "EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
+			readl_relaxed(dev->conf +
+			ep_dev_ctrl2stts2_offset));
+	}
+}
+
+void msm_pcie_config_msi_controller(struct msm_pcie_dev_t *dev)
+{
+	int i;
+
+	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+	/* program MSI controller and enable all interrupts */
+	writel_relaxed(MSM_PCIE_MSI_PHY, dev->dm_core + PCIE20_MSI_CTRL_ADDR);
+	writel_relaxed(0, dev->dm_core + PCIE20_MSI_CTRL_UPPER_ADDR);
+
+	for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++)
+		writel_relaxed(~0, dev->dm_core +
+			       PCIE20_MSI_CTRL_INTR_EN + (i * 12));
+
+	/* ensure that hardware is configured before proceeding */
+	wmb();
+}
+
+static int msm_pcie_get_resources(struct msm_pcie_dev_t *dev,
+					struct platform_device *pdev)
+{
+	int i, len, cnt, ret = 0, size = 0;
+	struct msm_pcie_vreg_info_t *vreg_info;
+	struct msm_pcie_gpio_info_t *gpio_info;
+	struct msm_pcie_clk_info_t  *clk_info;
+	struct resource *res;
+	struct msm_pcie_res_info_t *res_info;
+	struct msm_pcie_irq_info_t *irq_info;
+	struct msm_pcie_irq_info_t *msi_info;
+	struct msm_pcie_reset_info_t *reset_info;
+	struct msm_pcie_reset_info_t *pipe_reset_info;
+	char prop_name[MAX_PROP_SIZE];
+	const __be32 *prop;
+	u32 *clkfreq = NULL;
+
+	PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
+
+	cnt = of_property_count_strings((&pdev->dev)->of_node,
+			"clock-names");
+	if (cnt > 0) {
+		clkfreq = kzalloc((MSM_PCIE_MAX_CLK + MSM_PCIE_MAX_PIPE_CLK) *
+					sizeof(*clkfreq), GFP_KERNEL);
+		if (!clkfreq) {
+			PCIE_ERR(dev, "PCIe: memory alloc failed for RC%d\n",
+					dev->rc_idx);
+			return -ENOMEM;
+		}
+		ret = of_property_read_u32_array(
+			(&pdev->dev)->of_node,
+			"max-clock-frequency-hz", clkfreq, cnt);
+		if (ret) {
+			PCIE_ERR(dev,
+				"PCIe: invalid max-clock-frequency-hz property for RC%d:%d\n",
+				dev->rc_idx, ret);
+			goto out;
+		}
+	}
+
+	for (i = 0; i < MSM_PCIE_MAX_VREG; i++) {
+		vreg_info = &dev->vreg[i];
+		vreg_info->hdl =
+				devm_regulator_get(&pdev->dev, vreg_info->name);
+
+		if (PTR_ERR(vreg_info->hdl) == -EPROBE_DEFER) {
+			PCIE_DBG(dev, "EPROBE_DEFER for VReg:%s\n",
+				vreg_info->name);
+			ret = PTR_ERR(vreg_info->hdl);
+			goto out;
+		}
+
+		if (IS_ERR(vreg_info->hdl)) {
+			if (vreg_info->required) {
+				PCIE_DBG(dev, "Vreg %s doesn't exist\n",
+					vreg_info->name);
+				ret = PTR_ERR(vreg_info->hdl);
+				goto out;
+			} else {
+				PCIE_DBG(dev,
+					"Optional Vreg %s doesn't exist\n",
+					vreg_info->name);
+				vreg_info->hdl = NULL;
+			}
+		} else {
+			dev->vreg_n++;
+			snprintf(prop_name, MAX_PROP_SIZE,
+				"qcom,%s-voltage-level", vreg_info->name);
+			prop = of_get_property((&pdev->dev)->of_node,
+						prop_name, &len);
+			if (!prop || (len != (3 * sizeof(__be32)))) {
+				PCIE_DBG(dev, "%s %s property\n",
+					prop ? "invalid format" :
+					"no", prop_name);
+			} else {
+				vreg_info->max_v = be32_to_cpup(&prop[0]);
+				vreg_info->min_v = be32_to_cpup(&prop[1]);
+				vreg_info->opt_mode =
+					be32_to_cpup(&prop[2]);
+			}
+		}
+	}
+
+	dev->gdsc = devm_regulator_get(&pdev->dev, "gdsc-vdd");
+
+	if (IS_ERR(dev->gdsc)) {
+		PCIE_ERR(dev, "PCIe: RC%d Failed to get %s GDSC:%ld\n",
+			dev->rc_idx, dev->pdev->name, PTR_ERR(dev->gdsc));
+		if (PTR_ERR(dev->gdsc) == -EPROBE_DEFER)
+			PCIE_DBG(dev, "PCIe: EPROBE_DEFER for %s GDSC\n",
+					dev->pdev->name);
+		ret = PTR_ERR(dev->gdsc);
+		goto out;
+	}
+
+	dev->gdsc_smmu = devm_regulator_get(&pdev->dev, "gdsc-smmu");
+
+	if (IS_ERR(dev->gdsc_smmu)) {
+		PCIE_DBG(dev, "PCIe: RC%d SMMU GDSC does not exist",
+			dev->rc_idx);
+		dev->gdsc_smmu = NULL;
+	}
+
+	dev->gpio_n = 0;
+	for (i = 0; i < MSM_PCIE_MAX_GPIO; i++) {
+		gpio_info = &dev->gpio[i];
+		ret = of_get_named_gpio((&pdev->dev)->of_node,
+					gpio_info->name, 0);
+		if (ret >= 0) {
+			gpio_info->num = ret;
+			dev->gpio_n++;
+			PCIE_DBG(dev, "GPIO num for %s is %d\n",
+				gpio_info->name, gpio_info->num);
+		} else {
+			if (gpio_info->required) {
+				PCIE_ERR(dev,
+					"Could not get required GPIO %s\n",
+					gpio_info->name);
+				goto out;
+			} else {
+				PCIE_DBG(dev,
+					"Could not get optional GPIO %s\n",
+					gpio_info->name);
+			}
+		}
+		ret = 0;
+	}
+
+	of_get_property(pdev->dev.of_node, "qcom,phy-sequence", &size);
+	if (size) {
+		dev->phy_sequence = (struct msm_pcie_phy_info_t *)
+			devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+
+		if (dev->phy_sequence) {
+			dev->phy_len =
+				size / sizeof(*dev->phy_sequence);
+
+			of_property_read_u32_array(pdev->dev.of_node,
+				"qcom,phy-sequence",
+				(unsigned int *)dev->phy_sequence,
+				size / sizeof(dev->phy_sequence->offset));
+		} else {
+			PCIE_ERR(dev,
+				"RC%d: Could not allocate memory for phy init sequence.\n",
+				dev->rc_idx);
+			ret = -ENOMEM;
+			goto out;
+		}
+	} else {
+		PCIE_DBG(dev, "RC%d: phy sequence is not present in DT\n",
+			dev->rc_idx);
+	}
+
+	of_get_property(pdev->dev.of_node, "qcom,port-phy-sequence", &size);
+	if (size) {
+		dev->port_phy_sequence = (struct msm_pcie_phy_info_t *)
+			devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+
+		if (dev->port_phy_sequence) {
+			dev->port_phy_len =
+				size / sizeof(*dev->port_phy_sequence);
+
+			of_property_read_u32_array(pdev->dev.of_node,
+				"qcom,port-phy-sequence",
+				(unsigned int *)dev->port_phy_sequence,
+				size / sizeof(dev->port_phy_sequence->offset));
+		} else {
+			PCIE_ERR(dev,
+				"RC%d: Could not allocate memory for port phy init sequence.\n",
+				dev->rc_idx);
+			ret = -ENOMEM;
+			goto out;
+		}
+	} else {
+		PCIE_DBG(dev, "RC%d: port phy sequence is not present in DT\n",
+			dev->rc_idx);
+	}
+
+	for (i = 0; i < MSM_PCIE_MAX_CLK; i++) {
+		clk_info = &dev->clk[i];
+
+		clk_info->hdl = devm_clk_get(&pdev->dev, clk_info->name);
+
+		if (IS_ERR(clk_info->hdl)) {
+			if (clk_info->required) {
+				PCIE_DBG(dev, "Clock %s isn't available:%ld\n",
+				clk_info->name, PTR_ERR(clk_info->hdl));
+				ret = PTR_ERR(clk_info->hdl);
+				goto out;
+			} else {
+				PCIE_DBG(dev, "Ignoring Clock %s\n",
+					clk_info->name);
+				clk_info->hdl = NULL;
+			}
+		} else {
+			if (clkfreq != NULL) {
+				clk_info->freq = clkfreq[i +
+					MSM_PCIE_MAX_PIPE_CLK];
+				PCIE_DBG(dev, "Freq of Clock %s is:%d\n",
+					clk_info->name, clk_info->freq);
+			}
+		}
+	}
+
+	for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++) {
+		clk_info = &dev->pipeclk[i];
+
+		clk_info->hdl = devm_clk_get(&pdev->dev, clk_info->name);
+
+		if (IS_ERR(clk_info->hdl)) {
+			if (clk_info->required) {
+				PCIE_DBG(dev, "Clock %s isn't available:%ld\n",
+				clk_info->name, PTR_ERR(clk_info->hdl));
+				ret = PTR_ERR(clk_info->hdl);
+				goto out;
+			} else {
+				PCIE_DBG(dev, "Ignoring Clock %s\n",
+					clk_info->name);
+				clk_info->hdl = NULL;
+			}
+		} else {
+			if (clkfreq != NULL) {
+				clk_info->freq = clkfreq[i];
+				PCIE_DBG(dev, "Freq of Clock %s is:%d\n",
+					clk_info->name, clk_info->freq);
+			}
+		}
+	}
+
+	for (i = 0; i < MSM_PCIE_MAX_RESET; i++) {
+		reset_info = &dev->reset[i];
+
+		reset_info->hdl = devm_reset_control_get(&pdev->dev,
+						reset_info->name);
+
+		if (IS_ERR(reset_info->hdl)) {
+			if (reset_info->required) {
+				PCIE_DBG(dev,
+					"Reset %s isn't available:%ld\n",
+					reset_info->name,
+					PTR_ERR(reset_info->hdl));
+
+				ret = PTR_ERR(reset_info->hdl);
+				reset_info->hdl = NULL;
+				goto out;
+			} else {
+				PCIE_DBG(dev, "Ignoring Reset %s\n",
+					reset_info->name);
+				reset_info->hdl = NULL;
+			}
+		}
+	}
+
+	for (i = 0; i < MSM_PCIE_MAX_PIPE_RESET; i++) {
+		pipe_reset_info = &dev->pipe_reset[i];
+
+		pipe_reset_info->hdl = devm_reset_control_get(&pdev->dev,
+						pipe_reset_info->name);
+
+		if (IS_ERR(pipe_reset_info->hdl)) {
+			if (pipe_reset_info->required) {
+				PCIE_DBG(dev,
+					"Pipe Reset %s isn't available:%ld\n",
+					pipe_reset_info->name,
+					PTR_ERR(pipe_reset_info->hdl));
+
+				ret = PTR_ERR(pipe_reset_info->hdl);
+				pipe_reset_info->hdl = NULL;
+				goto out;
+			} else {
+				PCIE_DBG(dev, "Ignoring Pipe Reset %s\n",
+					pipe_reset_info->name);
+				pipe_reset_info->hdl = NULL;
+			}
+		}
+	}
+
+	dev->bus_scale_table = msm_bus_cl_get_pdata(pdev);
+	if (!dev->bus_scale_table) {
+		PCIE_DBG(dev, "PCIe: No bus scale table for RC%d (%s)\n",
+			dev->rc_idx, dev->pdev->name);
+		dev->bus_client = 0;
+	} else {
+		dev->bus_client =
+			msm_bus_scale_register_client(dev->bus_scale_table);
+		if (!dev->bus_client) {
+			PCIE_ERR(dev,
+				"PCIe: Failed to register bus client for RC%d (%s)\n",
+				dev->rc_idx, dev->pdev->name);
+			msm_bus_cl_clear_pdata(dev->bus_scale_table);
+			ret = -ENODEV;
+			goto out;
+		}
+	}
+
+	for (i = 0; i < MSM_PCIE_MAX_RES; i++) {
+		res_info = &dev->res[i];
+
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							   res_info->name);
+
+		if (!res) {
+			PCIE_ERR(dev, "PCIe: RC%d can't get %s resource.\n",
+				dev->rc_idx, res_info->name);
+		} else {
+			PCIE_DBG(dev, "start addr for %s is %pa.\n",
+				res_info->name,	&res->start);
+
+			res_info->base = devm_ioremap(&pdev->dev,
+						res->start, resource_size(res));
+			if (!res_info->base) {
+				PCIE_ERR(dev, "PCIe: RC%d can't remap %s.\n",
+					dev->rc_idx, res_info->name);
+				ret = -ENOMEM;
+				goto out;
+			} else {
+				res_info->resource = res;
+			}
+		}
+	}
+
+	for (i = 0; i < MSM_PCIE_MAX_IRQ; i++) {
+		irq_info = &dev->irq[i];
+
+		res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+							   irq_info->name);
+
+		if (!res) {
+			PCIE_DBG(dev, "PCIe: RC%d can't find IRQ # for %s.\n",
+				dev->rc_idx, irq_info->name);
+		} else {
+			irq_info->num = res->start;
+			PCIE_DBG(dev, "IRQ # for %s is %d.\n", irq_info->name,
+					irq_info->num);
+		}
+	}
+
+	for (i = 0; i < MSM_PCIE_MAX_MSI; i++) {
+		msi_info = &dev->msi[i];
+
+		res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+							   msi_info->name);
+
+		if (!res) {
+			PCIE_DBG(dev, "PCIe: RC%d can't find IRQ # for %s.\n",
+				dev->rc_idx, msi_info->name);
+		} else {
+			msi_info->num = res->start;
+			PCIE_DBG(dev, "IRQ # for %s is %d.\n", msi_info->name,
+					msi_info->num);
+		}
+	}
+
+	/* All allocations succeeded */
+
+	if (dev->gpio[MSM_PCIE_GPIO_WAKE].num)
+		dev->wake_n = gpio_to_irq(dev->gpio[MSM_PCIE_GPIO_WAKE].num);
+	else
+		dev->wake_n = 0;
+
+	dev->parf = dev->res[MSM_PCIE_RES_PARF].base;
+	dev->phy = dev->res[MSM_PCIE_RES_PHY].base;
+	dev->elbi = dev->res[MSM_PCIE_RES_ELBI].base;
+	dev->dm_core = dev->res[MSM_PCIE_RES_DM_CORE].base;
+	dev->conf = dev->res[MSM_PCIE_RES_CONF].base;
+	dev->bars = dev->res[MSM_PCIE_RES_BARS].base;
+	dev->tcsr = dev->res[MSM_PCIE_RES_TCSR].base;
+	dev->dev_mem_res = dev->res[MSM_PCIE_RES_BARS].resource;
+	dev->dev_io_res = dev->res[MSM_PCIE_RES_IO].resource;
+	dev->dev_io_res->flags = IORESOURCE_IO;
+
+out:
+	kfree(clkfreq);
+
+	PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
+
+	return ret;
+}
+
+static void msm_pcie_release_resources(struct msm_pcie_dev_t *dev)
+{
+	dev->parf = NULL;
+	dev->elbi = NULL;
+	dev->dm_core = NULL;
+	dev->conf = NULL;
+	dev->bars = NULL;
+	dev->tcsr = NULL;
+	dev->dev_mem_res = NULL;
+	dev->dev_io_res = NULL;
+}
+
+int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options)
+{
+	int ret = 0;
+	uint32_t val;
+	long int retries = 0;
+	int link_check_count = 0;
+
+	PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
+
+	mutex_lock(&dev->setup_lock);
+
+	if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
+		PCIE_ERR(dev, "PCIe: the link of RC%d is already enabled\n",
+			dev->rc_idx);
+		goto out;
+	}
+
+	/* assert PCIe reset link to keep EP in reset */
+
+	PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
+		dev->rc_idx);
+	gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
+				dev->gpio[MSM_PCIE_GPIO_PERST].on);
+	usleep_range(PERST_PROPAGATION_DELAY_US_MIN,
+				 PERST_PROPAGATION_DELAY_US_MAX);
+
+	/* enable power */
+
+	if (options & PM_VREG) {
+		ret = msm_pcie_vreg_init(dev);
+		if (ret)
+			goto out;
+	}
+
+	/* enable clocks */
+	if (options & PM_CLK) {
+		ret = msm_pcie_clk_init(dev);
+		wmb();
+		if (ret)
+			goto clk_fail;
+	}
+
+	if (dev->scm_dev_id) {
+		PCIE_DBG(dev, "RC%d: restoring sec config\n", dev->rc_idx);
+		msm_pcie_restore_sec_config(dev);
+	}
+
+	/* enable PCIe clocks and resets */
+	msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, BIT(0), 0);
+
+	/* change DBI base address */
+	writel_relaxed(0, dev->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+	writel_relaxed(0x365E, dev->parf + PCIE20_PARF_SYS_CTRL);
+
+	msm_pcie_write_mask(dev->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL,
+				0, BIT(4));
+
+	/* enable selected IRQ */
+	if (dev->irq[MSM_PCIE_INT_GLOBAL_INT].num) {
+		msm_pcie_write_reg(dev->parf, PCIE20_PARF_INT_ALL_MASK, 0);
+
+		msm_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_MASK, 0,
+					BIT(MSM_PCIE_INT_EVT_LINK_DOWN) |
+					BIT(MSM_PCIE_INT_EVT_AER_LEGACY) |
+					BIT(MSM_PCIE_INT_EVT_AER_ERR) |
+					BIT(MSM_PCIE_INT_EVT_MSI_0) |
+					BIT(MSM_PCIE_INT_EVT_MSI_1) |
+					BIT(MSM_PCIE_INT_EVT_MSI_2) |
+					BIT(MSM_PCIE_INT_EVT_MSI_3) |
+					BIT(MSM_PCIE_INT_EVT_MSI_4) |
+					BIT(MSM_PCIE_INT_EVT_MSI_5) |
+					BIT(MSM_PCIE_INT_EVT_MSI_6) |
+					BIT(MSM_PCIE_INT_EVT_MSI_7));
+
+		PCIE_DBG(dev, "PCIe: RC%d: PCIE20_PARF_INT_ALL_MASK: 0x%x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK));
+	}
+
+	if (dev->dev_mem_res->end - dev->dev_mem_res->start > SZ_16M)
+		writel_relaxed(SZ_32M, dev->parf +
+			PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
+	else if (dev->dev_mem_res->end - dev->dev_mem_res->start > SZ_8M)
+		writel_relaxed(SZ_16M, dev->parf +
+			PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
+	else
+		writel_relaxed(SZ_8M, dev->parf +
+			PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
+
+	if (dev->use_msi) {
+		PCIE_DBG(dev, "RC%d: enable WR halt.\n", dev->rc_idx);
+		val = dev->wr_halt_size ? dev->wr_halt_size :
+			readl_relaxed(dev->parf +
+				PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+
+		msm_pcie_write_reg(dev->parf,
+			PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT,
+			BIT(31) | val);
+
+		PCIE_DBG(dev,
+			"RC%d: PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT: 0x%x.\n",
+			dev->rc_idx,
+			readl_relaxed(dev->parf +
+				PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT));
+	}
+
+	mutex_lock(&com_phy_lock);
+	/* init PCIe PHY */
+	if (!num_rc_on)
+		pcie_phy_init(dev);
+
+	num_rc_on++;
+	mutex_unlock(&com_phy_lock);
+
+	if (options & PM_PIPE_CLK) {
+		usleep_range(PHY_STABILIZATION_DELAY_US_MIN,
+					 PHY_STABILIZATION_DELAY_US_MAX);
+		/* Enable the pipe clock */
+		ret = msm_pcie_pipe_clk_init(dev);
+		wmb();
+		if (ret)
+			goto link_fail;
+	}
+
+	PCIE_DBG(dev, "RC%d: waiting for phy ready...\n", dev->rc_idx);
+
+	do {
+		if (pcie_phy_is_ready(dev))
+			break;
+		retries++;
+		usleep_range(REFCLK_STABILIZATION_DELAY_US_MIN,
+					 REFCLK_STABILIZATION_DELAY_US_MAX);
+	} while (retries < PHY_READY_TIMEOUT_COUNT);
+
+	PCIE_DBG(dev, "RC%d: number of PHY retries:%ld.\n",
+		dev->rc_idx, retries);
+
+	if (pcie_phy_is_ready(dev))
+		PCIE_INFO(dev, "PCIe RC%d PHY is ready!\n", dev->rc_idx);
+	else {
+		PCIE_ERR(dev, "PCIe PHY RC%d failed to come up!\n",
+			dev->rc_idx);
+		ret = -ENODEV;
+		pcie_phy_dump(dev);
+		goto link_fail;
+	}
+
+	pcie_pcs_port_phy_init(dev);
+
+	if (dev->ep_latency)
+		usleep_range(dev->ep_latency * 1000, dev->ep_latency * 1000);
+
+	if (dev->gpio[MSM_PCIE_GPIO_EP].num)
+		gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
+				dev->gpio[MSM_PCIE_GPIO_EP].on);
+
+	/* de-assert PCIe reset link to bring EP out of reset */
+
+	PCIE_INFO(dev, "PCIe: Release the reset of endpoint of RC%d.\n",
+		dev->rc_idx);
+	gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
+				1 - dev->gpio[MSM_PCIE_GPIO_PERST].on);
+	usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max);
+
+	/* set max tlp read size */
+	msm_pcie_write_reg_field(dev->dm_core, PCIE20_DEVICE_CONTROL_STATUS,
+				0x7000, dev->tlp_rd_size);
+
+	/* enable link training */
+	msm_pcie_write_mask(dev->parf + PCIE20_PARF_LTSSM, 0, BIT(8));
+
+	PCIE_DBG(dev, "%s", "check if link is up\n");
+
+	/* Wait for up to 100ms for the link to come up */
+	do {
+		usleep_range(LINK_UP_TIMEOUT_US_MIN, LINK_UP_TIMEOUT_US_MAX);
+		val =  readl_relaxed(dev->elbi + PCIE20_ELBI_SYS_STTS);
+		PCIE_DBG(dev, "PCIe RC%d: LTSSM_STATE:0x%x\n",
+			dev->rc_idx, (val >> 12) & 0x3f);
+	} while ((!(val & XMLH_LINK_UP) ||
+		!msm_pcie_confirm_linkup(dev, false, false, NULL))
+		&& (link_check_count++ < LINK_UP_CHECK_MAX_COUNT));
+
+	if ((val & XMLH_LINK_UP) &&
+		msm_pcie_confirm_linkup(dev, false, false, NULL)) {
+		PCIE_DBG(dev, "Link is up after %d checkings\n",
+			link_check_count);
+		PCIE_INFO(dev, "PCIe RC%d link initialized\n", dev->rc_idx);
+	} else {
+		PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
+			dev->rc_idx);
+		gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
+			dev->gpio[MSM_PCIE_GPIO_PERST].on);
+		PCIE_ERR(dev, "PCIe RC%d link initialization failed\n",
+			dev->rc_idx);
+		ret = -1;
+		goto link_fail;
+	}
+
+	if (dev->switch_latency) {
+		PCIE_DBG(dev, "switch_latency: %dms\n",
+			dev->switch_latency);
+		if (dev->switch_latency <= SWITCH_DELAY_MAX)
+			usleep_range(dev->switch_latency * 1000,
+				dev->switch_latency * 1000);
+		else
+			msleep(dev->switch_latency);
+	}
+
+	msm_pcie_config_controller(dev);
+
+	if (!dev->msi_gicm_addr)
+		msm_pcie_config_msi_controller(dev);
+
+	msm_pcie_config_link_state(dev);
+
+	dev->link_status = MSM_PCIE_LINK_ENABLED;
+	dev->power_on = true;
+	dev->suspending = false;
+	dev->link_turned_on_counter++;
+
+	goto out;
+
+link_fail:
+	if (dev->gpio[MSM_PCIE_GPIO_EP].num)
+		gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
+				1 - dev->gpio[MSM_PCIE_GPIO_EP].on);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_SW_RESET(dev->rc_idx, dev->common_phy), 0x1);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, dev->common_phy), 0);
+
+	mutex_lock(&com_phy_lock);
+	num_rc_on--;
+	if (!num_rc_on && dev->common_phy) {
+		PCIE_DBG(dev, "PCIe: RC%d is powering down the common phy\n",
+			dev->rc_idx);
+		msm_pcie_write_reg(dev->phy, PCIE_COM_SW_RESET, 0x1);
+		msm_pcie_write_reg(dev->phy, PCIE_COM_POWER_DOWN_CONTROL, 0);
+	}
+	mutex_unlock(&com_phy_lock);
+
+	msm_pcie_pipe_clk_deinit(dev);
+	msm_pcie_clk_deinit(dev);
+clk_fail:
+	msm_pcie_vreg_deinit(dev);
+out:
+	mutex_unlock(&dev->setup_lock);
+
+	PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
+
+	return ret;
+}
+
+void msm_pcie_disable(struct msm_pcie_dev_t *dev, u32 options)
+{
+	PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
+
+	mutex_lock(&dev->setup_lock);
+
+	if (!dev->power_on) {
+		PCIE_DBG(dev,
+			"PCIe: the link of RC%d is already power down.\n",
+			dev->rc_idx);
+		mutex_unlock(&dev->setup_lock);
+		return;
+	}
+
+	dev->link_status = MSM_PCIE_LINK_DISABLED;
+	dev->power_on = false;
+	dev->link_turned_off_counter++;
+
+	PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
+		dev->rc_idx);
+
+	gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
+				dev->gpio[MSM_PCIE_GPIO_PERST].on);
+
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_SW_RESET(dev->rc_idx, dev->common_phy), 0x1);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, dev->common_phy), 0);
+
+	mutex_lock(&com_phy_lock);
+	num_rc_on--;
+	if (!num_rc_on && dev->common_phy) {
+		PCIE_DBG(dev, "PCIe: RC%d is powering down the common phy\n",
+			dev->rc_idx);
+		msm_pcie_write_reg(dev->phy, PCIE_COM_SW_RESET, 0x1);
+		msm_pcie_write_reg(dev->phy, PCIE_COM_POWER_DOWN_CONTROL, 0);
+	}
+	mutex_unlock(&com_phy_lock);
+
+	if (options & PM_CLK) {
+		msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, 0,
+					BIT(0));
+		msm_pcie_clk_deinit(dev);
+	}
+
+	if (options & PM_VREG)
+		msm_pcie_vreg_deinit(dev);
+
+	if (options & PM_PIPE_CLK)
+		msm_pcie_pipe_clk_deinit(dev);
+
+	if (dev->gpio[MSM_PCIE_GPIO_EP].num)
+		gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
+				1 - dev->gpio[MSM_PCIE_GPIO_EP].on);
+
+	mutex_unlock(&dev->setup_lock);
+
+	PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
+}
+
+static void msm_pcie_config_ep_aer(struct msm_pcie_dev_t *dev,
+				struct msm_pcie_device_info *ep_dev_info)
+{
+	u32 val;
+	void __iomem *ep_base = ep_dev_info->conf_base;
+	u32 current_offset = readl_relaxed(ep_base + PCIE_CAP_PTR_OFFSET) &
+						0xff;
+
+	while (current_offset) {
+		if (msm_pcie_check_align(dev, current_offset))
+			return;
+
+		val = readl_relaxed(ep_base + current_offset);
+		if ((val & 0xff) == PCIE20_CAP_ID) {
+			ep_dev_info->dev_ctrlstts_offset =
+				current_offset + 0x8;
+			break;
+		}
+		current_offset = (val >> 8) & 0xff;
+	}
+
+	if (!ep_dev_info->dev_ctrlstts_offset) {
+		PCIE_DBG(dev,
+			"RC%d endpoint does not support PCIe cap registers\n",
+			dev->rc_idx);
+		return;
+	}
+
+	PCIE_DBG2(dev, "RC%d: EP dev_ctrlstts_offset: 0x%x\n",
+		dev->rc_idx, ep_dev_info->dev_ctrlstts_offset);
+
+	/* Enable AER on EP */
+	msm_pcie_write_mask(ep_base + ep_dev_info->dev_ctrlstts_offset, 0,
+				BIT(3)|BIT(2)|BIT(1)|BIT(0));
+
+	PCIE_DBG(dev, "EP's PCIE20_CAP_DEVCTRLSTATUS:0x%x\n",
+		readl_relaxed(ep_base + ep_dev_info->dev_ctrlstts_offset));
+}
+
+static int msm_pcie_config_device_table(struct device *dev, void *pdev)
+{
+	struct pci_dev *pcidev = to_pci_dev(dev);
+	struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *) pdev;
+	struct msm_pcie_device_info *dev_table_t = pcie_dev->pcidev_table;
+	struct resource *axi_conf = pcie_dev->res[MSM_PCIE_RES_CONF].resource;
+	int ret = 0;
+	u32 rc_idx = pcie_dev->rc_idx;
+	u32 i, index;
+	u32 bdf = 0;
+	u8 type;
+	u32 h_type;
+	u32 bme;
+
+	if (!pcidev) {
+		PCIE_ERR(pcie_dev,
+			"PCIe: Did not find PCI device in list for RC%d.\n",
+			pcie_dev->rc_idx);
+		return -ENODEV;
+	} else {
+		PCIE_DBG(pcie_dev,
+			"PCI device found: vendor-id:0x%x device-id:0x%x\n",
+			pcidev->vendor, pcidev->device);
+	}
+
+	if (!pcidev->bus->number)
+		return ret;
+
+	bdf = BDF_OFFSET(pcidev->bus->number, pcidev->devfn);
+	type = pcidev->bus->number == 1 ?
+		PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
+
+	for (i = 0; i < (MAX_RC_NUM * MAX_DEVICE_NUM); i++) {
+		if (msm_pcie_dev_tbl[i].bdf == bdf &&
+			!msm_pcie_dev_tbl[i].dev) {
+			for (index = 0; index < MAX_DEVICE_NUM; index++) {
+				if (dev_table_t[index].bdf == bdf) {
+					msm_pcie_dev_tbl[i].dev = pcidev;
+					msm_pcie_dev_tbl[i].domain = rc_idx;
+					msm_pcie_dev_tbl[i].conf_base =
+						pcie_dev->conf + index * SZ_4K;
+					msm_pcie_dev_tbl[i].phy_address =
+						axi_conf->start + index * SZ_4K;
+
+					dev_table_t[index].dev = pcidev;
+					dev_table_t[index].domain = rc_idx;
+					dev_table_t[index].conf_base =
+						pcie_dev->conf + index * SZ_4K;
+					dev_table_t[index].phy_address =
+						axi_conf->start + index * SZ_4K;
+
+					msm_pcie_iatu_config(pcie_dev, index,
+						type,
+						dev_table_t[index].phy_address,
+						dev_table_t[index].phy_address
+						+ SZ_4K - 1,
+						bdf);
+
+					h_type = readl_relaxed(
+						dev_table_t[index].conf_base +
+						PCIE20_HEADER_TYPE);
+
+					bme = readl_relaxed(
+						dev_table_t[index].conf_base +
+						PCIE20_COMMAND_STATUS);
+
+					if (h_type & (1 << 16)) {
+						pci_write_config_dword(pcidev,
+							PCIE20_COMMAND_STATUS,
+							bme | 0x06);
+					} else {
+						pcie_dev->num_ep++;
+						dev_table_t[index].registered =
+							false;
+					}
+
+					if (pcie_dev->num_ep > 1)
+						pcie_dev->pending_ep_reg = true;
+
+					msm_pcie_config_ep_aer(pcie_dev,
+						&dev_table_t[index]);
+
+					break;
+				}
+			}
+			if (index == MAX_DEVICE_NUM) {
+				PCIE_ERR(pcie_dev,
+					"RC%d PCI device table is full.\n",
+					rc_idx);
+				ret = index;
+			} else {
+				break;
+			}
+		} else if (msm_pcie_dev_tbl[i].bdf == bdf &&
+			pcidev == msm_pcie_dev_tbl[i].dev) {
+			break;
+		}
+	}
+	if (i == MAX_RC_NUM * MAX_DEVICE_NUM) {
+		PCIE_ERR(pcie_dev,
+			"Global PCI device table is full: %d elements.\n",
+			i);
+		PCIE_ERR(pcie_dev,
+			"Bus number is 0x%x\nDevice number is 0x%x\n",
+			pcidev->bus->number, pcidev->devfn);
+		ret = i;
+	}
+	return ret;
+}
+
+int msm_pcie_configure_sid(struct device *dev, u32 *sid, int *domain)
+{
+	struct pci_dev *pcidev;
+	struct msm_pcie_dev_t *pcie_dev;
+	struct pci_bus *bus;
+	int i;
+	u32 bdf;
+
+	if (!dev) {
+		pr_err("%s: PCIe: endpoint device passed in is NULL\n",
+			__func__);
+		return MSM_PCIE_ERROR;
+	}
+
+	pcidev = to_pci_dev(dev);
+	if (!pcidev) {
+		pr_err("%s: PCIe: PCI device of endpoint is NULL\n",
+			__func__);
+		return MSM_PCIE_ERROR;
+	}
+
+	bus = pcidev->bus;
+	if (!bus) {
+		pr_err("%s: PCIe: Bus of PCI device is NULL\n",
+			__func__);
+		return MSM_PCIE_ERROR;
+	}
+
+	while (!pci_is_root_bus(bus))
+		bus = bus->parent;
+
+	pcie_dev = (struct msm_pcie_dev_t *)(bus->sysdata);
+	if (!pcie_dev) {
+		pr_err("%s: PCIe: Could not get PCIe structure\n",
+			__func__);
+		return MSM_PCIE_ERROR;
+	}
+
+	if (!pcie_dev->smmu_exist) {
+		PCIE_DBG(pcie_dev,
+			"PCIe: RC:%d: smmu does not exist\n",
+			pcie_dev->rc_idx);
+		return MSM_PCIE_ERROR;
+	}
+
+	PCIE_DBG(pcie_dev, "PCIe: RC%d: device address is: %p\n",
+		pcie_dev->rc_idx, dev);
+	PCIE_DBG(pcie_dev, "PCIe: RC%d: PCI device address is: %p\n",
+		pcie_dev->rc_idx, pcidev);
+
+	*domain = pcie_dev->rc_idx;
+
+	if (pcie_dev->current_short_bdf < (MAX_SHORT_BDF_NUM - 1)) {
+		pcie_dev->current_short_bdf++;
+	} else {
+		PCIE_ERR(pcie_dev,
+			"PCIe: RC%d: No more short BDF left\n",
+			pcie_dev->rc_idx);
+		return MSM_PCIE_ERROR;
+	}
+
+	bdf = BDF_OFFSET(pcidev->bus->number, pcidev->devfn);
+
+	for (i = 0; i < MAX_DEVICE_NUM; i++) {
+		if (pcie_dev->pcidev_table[i].bdf == bdf) {
+			*sid = pcie_dev->smmu_sid_base +
+				((pcie_dev->rc_idx << 4) |
+				pcie_dev->current_short_bdf);
+
+			msm_pcie_write_reg(pcie_dev->parf,
+				PCIE20_PARF_BDF_TRANSLATE_N +
+				pcie_dev->current_short_bdf * 4,
+				bdf >> 16);
+
+			pcie_dev->pcidev_table[i].sid = *sid;
+			pcie_dev->pcidev_table[i].short_bdf =
+				pcie_dev->current_short_bdf;
+			break;
+		}
+	}
+
+	if (i == MAX_DEVICE_NUM) {
+		pcie_dev->current_short_bdf--;
+		PCIE_ERR(pcie_dev,
+			"PCIe: RC%d could not find BDF:%d\n",
+			pcie_dev->rc_idx, bdf);
+		return MSM_PCIE_ERROR;
+	}
+
+	PCIE_DBG(pcie_dev,
+		"PCIe: RC%d: Device: %02x:%02x.%01x received SID %d\n",
+		pcie_dev->rc_idx,
+		bdf >> 24,
+		bdf >> 19 & 0x1f,
+		bdf >> 16 & 0x07,
+		*sid);
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_pcie_configure_sid);
+
+int msm_pcie_enumerate(u32 rc_idx)
+{
+	int ret = 0, bus_ret = 0, scan_ret = 0;
+	struct msm_pcie_dev_t *dev = &msm_pcie_dev[rc_idx];
+
+	mutex_lock(&enumerate_lock);
+
+	PCIE_DBG(dev, "Enumerate RC%d\n", rc_idx);
+
+	if (!dev->drv_ready) {
+		PCIE_DBG(dev, "RC%d has not been successfully probed yet\n",
+			rc_idx);
+		ret = -EPROBE_DEFER;
+		goto out;
+	}
+
+	if (!dev->enumerated) {
+		ret = msm_pcie_enable(dev, PM_ALL);
+
+		/* kick start ARM PCI configuration framework */
+		if (!ret) {
+			struct pci_dev *pcidev = NULL;
+			bool found = false;
+			struct pci_bus *bus;
+			resource_size_t iobase = 0;
+			u32 ids = readl_relaxed(msm_pcie_dev[rc_idx].dm_core);
+			u32 vendor_id = ids & 0xffff;
+			u32 device_id = (ids & 0xffff0000) >> 16;
+			LIST_HEAD(res);
+
+			PCIE_DBG(dev, "vendor-id:0x%x device_id:0x%x\n",
+					vendor_id, device_id);
+
+			ret = of_pci_get_host_bridge_resources(
+						dev->pdev->dev.of_node,
+						0, 0xff, &res, &iobase);
+			if (ret) {
+				PCIE_ERR(dev,
+					"PCIe: failed to get host bridge resources for RC%d: %d\n",
+					dev->rc_idx, ret);
+				goto out;
+			}
+
+			bus = pci_create_root_bus(&dev->pdev->dev, 0,
+						&msm_pcie_ops,
+						msm_pcie_setup_sys_data(dev),
+						&res);
+			if (!bus) {
+				PCIE_ERR(dev,
+					"PCIe: failed to create root bus for RC%d\n",
+					dev->rc_idx);
+				ret = -ENOMEM;
+				goto out;
+			}
+
+			scan_ret = pci_scan_child_bus(bus);
+			PCIE_DBG(dev,
+				"PCIe: RC%d: The max subordinate bus number discovered is %d\n",
+				dev->rc_idx, ret);
+
+			msm_pcie_fixup_irqs(dev);
+			pci_assign_unassigned_bus_resources(bus);
+			pci_bus_add_devices(bus);
+
+			dev->enumerated = true;
+
+			msm_pcie_write_mask(dev->dm_core +
+				PCIE20_COMMAND_STATUS, 0, BIT(2)|BIT(1));
+
+			if (dev->cpl_timeout && dev->bridge_found)
+				msm_pcie_write_reg_field(dev->dm_core,
+					PCIE20_DEVICE_CONTROL2_STATUS2,
+					0xf, dev->cpl_timeout);
+
+			if (dev->shadow_en) {
+				u32 val = readl_relaxed(dev->dm_core +
+						PCIE20_COMMAND_STATUS);
+				PCIE_DBG(dev, "PCIE20_COMMAND_STATUS:0x%x\n",
+					val);
+				dev->rc_shadow[PCIE20_COMMAND_STATUS / 4] = val;
+			}
+
+			do {
+				pcidev = pci_get_device(vendor_id,
+					device_id, pcidev);
+				if (pcidev && (&msm_pcie_dev[rc_idx] ==
+					(struct msm_pcie_dev_t *)
+					PCIE_BUS_PRIV_DATA(pcidev->bus))) {
+					msm_pcie_dev[rc_idx].dev = pcidev;
+					found = true;
+					PCIE_DBG(&msm_pcie_dev[rc_idx],
+						"PCI device is found for RC%d\n",
+						rc_idx);
+				}
+			} while (!found && pcidev);
+
+			if (!pcidev) {
+				PCIE_ERR(dev,
+					"PCIe: Did not find PCI device for RC%d.\n",
+					dev->rc_idx);
+				ret = -ENODEV;
+				goto out;
+			}
+
+			bus_ret = bus_for_each_dev(&pci_bus_type, NULL, dev,
+					&msm_pcie_config_device_table);
+
+			if (bus_ret) {
+				PCIE_ERR(dev,
+					"PCIe: Failed to set up device table for RC%d\n",
+					dev->rc_idx);
+				ret = -ENODEV;
+				goto out;
+			}
+		} else {
+			PCIE_ERR(dev, "PCIe: failed to enable RC%d.\n",
+				dev->rc_idx);
+		}
+	} else {
+		PCIE_ERR(dev, "PCIe: RC%d has already been enumerated.\n",
+			dev->rc_idx);
+	}
+
+out:
+	mutex_unlock(&enumerate_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_pcie_enumerate);
+
+static void msm_pcie_notify_client(struct msm_pcie_dev_t *dev,
+					enum msm_pcie_event event)
+{
+	if (dev->event_reg && dev->event_reg->callback &&
+		(dev->event_reg->events & event)) {
+		struct msm_pcie_notify *notify = &dev->event_reg->notify;
+		notify->event = event;
+		notify->user = dev->event_reg->user;
+		PCIE_DBG(dev, "PCIe: callback RC%d for event %d\n",
+			dev->rc_idx, event);
+		dev->event_reg->callback(notify);
+
+		if ((dev->event_reg->options & MSM_PCIE_CONFIG_NO_RECOVERY) &&
+				(event == MSM_PCIE_EVENT_LINKDOWN)) {
+			dev->user_suspend = true;
+			PCIE_DBG(dev,
+				"PCIe: Client of RC%d will recover the link later.\n",
+				dev->rc_idx);
+			return;
+		}
+	} else {
+		PCIE_DBG2(dev,
+			"PCIe: Client of RC%d does not have registration for event %d\n",
+			dev->rc_idx, event);
+	}
+}
+
+static void handle_wake_func(struct work_struct *work)
+{
+	int i, ret;
+	struct msm_pcie_dev_t *dev = container_of(work, struct msm_pcie_dev_t,
+					handle_wake_work);
+
+	PCIE_DBG(dev, "PCIe: Wake work for RC%d\n", dev->rc_idx);
+
+	mutex_lock(&dev->recovery_lock);
+
+	if (!dev->enumerated) {
+		PCIE_DBG(dev,
+			"PCIe: Start enumeration for RC%d upon the wake from endpoint.\n",
+			dev->rc_idx);
+
+		ret = msm_pcie_enumerate(dev->rc_idx);
+		if (ret) {
+			PCIE_ERR(dev,
+				"PCIe: failed to enable RC%d upon wake request from the device.\n",
+				dev->rc_idx);
+			goto out;
+		}
+
+		if (dev->num_ep > 1) {
+			for (i = 0; i < MAX_DEVICE_NUM; i++) {
+				dev->event_reg = dev->pcidev_table[i].event_reg;
+
+				if ((dev->link_status == MSM_PCIE_LINK_ENABLED)
+					&& dev->event_reg &&
+					dev->event_reg->callback &&
+					(dev->event_reg->events &
+					MSM_PCIE_EVENT_LINKUP)) {
+					struct msm_pcie_notify *notify =
+						&dev->event_reg->notify;
+					notify->event = MSM_PCIE_EVENT_LINKUP;
+					notify->user = dev->event_reg->user;
+					PCIE_DBG(dev,
+						"PCIe: Linkup callback for RC%d after enumeration is successful in wake IRQ handling\n",
+						dev->rc_idx);
+					dev->event_reg->callback(notify);
+				}
+			}
+		} else {
+			if ((dev->link_status == MSM_PCIE_LINK_ENABLED) &&
+				dev->event_reg && dev->event_reg->callback &&
+				(dev->event_reg->events &
+				MSM_PCIE_EVENT_LINKUP)) {
+				struct msm_pcie_notify *notify =
+						&dev->event_reg->notify;
+				notify->event = MSM_PCIE_EVENT_LINKUP;
+				notify->user = dev->event_reg->user;
+				PCIE_DBG(dev,
+					"PCIe: Linkup callback for RC%d after enumeration is successful in wake IRQ handling\n",
+					dev->rc_idx);
+				dev->event_reg->callback(notify);
+			} else {
+				PCIE_DBG(dev,
+					"PCIe: Client of RC%d does not have registration for linkup event.\n",
+					dev->rc_idx);
+			}
+		}
+		goto out;
+	} else {
+		PCIE_ERR(dev,
+			"PCIe: The enumeration for RC%d has already been done.\n",
+			dev->rc_idx);
+		goto out;
+	}
+
+out:
+	mutex_unlock(&dev->recovery_lock);
+}
+
+static irqreturn_t handle_aer_irq(int irq, void *data)
+{
+	struct msm_pcie_dev_t *dev = data;
+
+	int corr_val = 0, uncorr_val = 0, rc_err_status = 0;
+	int ep_corr_val = 0, ep_uncorr_val = 0;
+	int rc_dev_ctrlstts = 0, ep_dev_ctrlstts = 0;
+	u32 ep_dev_ctrlstts_offset = 0;
+	int i, j, ep_src_bdf = 0;
+	void __iomem *ep_base = NULL;
+	unsigned long irqsave_flags;
+
+	PCIE_DBG2(dev,
+		"AER Interrupt handler fired for RC%d irq %d\nrc_corr_counter: %lu\nrc_non_fatal_counter: %lu\nrc_fatal_counter: %lu\nep_corr_counter: %lu\nep_non_fatal_counter: %lu\nep_fatal_counter: %lu\n",
+		dev->rc_idx, irq, dev->rc_corr_counter,
+		dev->rc_non_fatal_counter, dev->rc_fatal_counter,
+		dev->ep_corr_counter, dev->ep_non_fatal_counter,
+		dev->ep_fatal_counter);
+
+	spin_lock_irqsave(&dev->aer_lock, irqsave_flags);
+
+	if (dev->suspending) {
+		PCIE_DBG2(dev,
+			"PCIe: RC%d is currently suspending.\n",
+			dev->rc_idx);
+		spin_unlock_irqrestore(&dev->aer_lock, irqsave_flags);
+		return IRQ_HANDLED;
+	}
+
+	uncorr_val = readl_relaxed(dev->dm_core +
+				PCIE20_AER_UNCORR_ERR_STATUS_REG);
+	corr_val = readl_relaxed(dev->dm_core +
+				PCIE20_AER_CORR_ERR_STATUS_REG);
+	rc_err_status = readl_relaxed(dev->dm_core +
+				PCIE20_AER_ROOT_ERR_STATUS_REG);
+	rc_dev_ctrlstts = readl_relaxed(dev->dm_core +
+				PCIE20_CAP_DEVCTRLSTATUS);
+
+	if (uncorr_val)
+		PCIE_DBG(dev, "RC's PCIE20_AER_UNCORR_ERR_STATUS_REG:0x%x\n",
+				uncorr_val);
+	if (corr_val && (dev->rc_corr_counter < corr_counter_limit))
+		PCIE_DBG(dev, "RC's PCIE20_AER_CORR_ERR_STATUS_REG:0x%x\n",
+				corr_val);
+
+	if ((rc_dev_ctrlstts >> 18) & 0x1)
+		dev->rc_fatal_counter++;
+	if ((rc_dev_ctrlstts >> 17) & 0x1)
+		dev->rc_non_fatal_counter++;
+	if ((rc_dev_ctrlstts >> 16) & 0x1)
+		dev->rc_corr_counter++;
+
+	msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS, 0,
+				BIT(18)|BIT(17)|BIT(16));
+
+	if (dev->link_status == MSM_PCIE_LINK_DISABLED) {
+		PCIE_DBG2(dev, "RC%d link is down\n", dev->rc_idx);
+		goto out;
+	}
+
+	for (i = 0; i < 2; i++) {
+		if (i)
+			ep_src_bdf = readl_relaxed(dev->dm_core +
+				PCIE20_AER_ERR_SRC_ID_REG) & ~0xffff;
+		else
+			ep_src_bdf = (readl_relaxed(dev->dm_core +
+				PCIE20_AER_ERR_SRC_ID_REG) & 0xffff) << 16;
+
+		if (!ep_src_bdf)
+			continue;
+
+		for (j = 0; j < MAX_DEVICE_NUM; j++) {
+			if (ep_src_bdf == dev->pcidev_table[j].bdf) {
+				PCIE_DBG2(dev,
+					"PCIe: %s Error from Endpoint: %02x:%02x.%01x\n",
+					i ? "Uncorrectable" : "Correctable",
+					dev->pcidev_table[j].bdf >> 24,
+					dev->pcidev_table[j].bdf >> 19 & 0x1f,
+					dev->pcidev_table[j].bdf >> 16 & 0x07);
+				ep_base = dev->pcidev_table[j].conf_base;
+				ep_dev_ctrlstts_offset = dev->
+					pcidev_table[j].dev_ctrlstts_offset;
+				break;
+			}
+		}
+
+		if (!ep_base) {
+			PCIE_ERR(dev,
+				"PCIe: RC%d no endpoint found for reported error\n",
+				dev->rc_idx);
+			goto out;
+		}
+
+		ep_uncorr_val = readl_relaxed(ep_base +
+					PCIE20_AER_UNCORR_ERR_STATUS_REG);
+		ep_corr_val = readl_relaxed(ep_base +
+					PCIE20_AER_CORR_ERR_STATUS_REG);
+		ep_dev_ctrlstts = readl_relaxed(ep_base +
+					ep_dev_ctrlstts_offset);
+
+		if (ep_uncorr_val)
+			PCIE_DBG(dev,
+				"EP's PCIE20_AER_UNCORR_ERR_STATUS_REG:0x%x\n",
+				ep_uncorr_val);
+		if (ep_corr_val && (dev->ep_corr_counter < corr_counter_limit))
+			PCIE_DBG(dev,
+				"EP's PCIE20_AER_CORR_ERR_STATUS_REG:0x%x\n",
+				ep_corr_val);
+
+		if ((ep_dev_ctrlstts >> 18) & 0x1)
+			dev->ep_fatal_counter++;
+		if ((ep_dev_ctrlstts >> 17) & 0x1)
+			dev->ep_non_fatal_counter++;
+		if ((ep_dev_ctrlstts >> 16) & 0x1)
+			dev->ep_corr_counter++;
+
+		msm_pcie_write_mask(ep_base + ep_dev_ctrlstts_offset, 0,
+					BIT(18)|BIT(17)|BIT(16));
+
+		msm_pcie_write_reg_field(ep_base,
+				PCIE20_AER_UNCORR_ERR_STATUS_REG,
+				0x3fff031, 0x3fff031);
+		msm_pcie_write_reg_field(ep_base,
+				PCIE20_AER_CORR_ERR_STATUS_REG,
+				0xf1c1, 0xf1c1);
+	}
+out:
+	if (((dev->rc_corr_counter < corr_counter_limit) &&
+		(dev->ep_corr_counter < corr_counter_limit)) ||
+		uncorr_val || ep_uncorr_val)
+		PCIE_DBG(dev, "RC's PCIE20_AER_ROOT_ERR_STATUS_REG:0x%x\n",
+				rc_err_status);
+	msm_pcie_write_reg_field(dev->dm_core,
+			PCIE20_AER_UNCORR_ERR_STATUS_REG,
+			0x3fff031, 0x3fff031);
+	msm_pcie_write_reg_field(dev->dm_core,
+			PCIE20_AER_CORR_ERR_STATUS_REG,
+			0xf1c1, 0xf1c1);
+	msm_pcie_write_reg_field(dev->dm_core,
+			PCIE20_AER_ROOT_ERR_STATUS_REG,
+			0x7f, 0x7f);
+
+	spin_unlock_irqrestore(&dev->aer_lock, irqsave_flags);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t handle_wake_irq(int irq, void *data)
+{
+	struct msm_pcie_dev_t *dev = data;
+	unsigned long irqsave_flags;
+	int i;
+
+	spin_lock_irqsave(&dev->wakeup_lock, irqsave_flags);
+
+	dev->wake_counter++;
+	PCIE_DBG(dev, "PCIe: No. %ld wake IRQ for RC%d\n",
+			dev->wake_counter, dev->rc_idx);
+
+	PCIE_DBG2(dev, "PCIe WAKE is asserted by Endpoint of RC%d\n",
+		dev->rc_idx);
+
+	if (!dev->enumerated && !(dev->boot_option &
+		MSM_PCIE_NO_WAKE_ENUMERATION)) {
+		PCIE_DBG(dev, "Start enumerating RC%d\n", dev->rc_idx);
+		schedule_work(&dev->handle_wake_work);
+	} else {
+		PCIE_DBG2(dev, "Wake up RC%d\n", dev->rc_idx);
+		__pm_stay_awake(&dev->ws);
+		__pm_relax(&dev->ws);
+
+		if (dev->num_ep > 1) {
+			for (i = 0; i < MAX_DEVICE_NUM; i++) {
+				dev->event_reg =
+					dev->pcidev_table[i].event_reg;
+				msm_pcie_notify_client(dev,
+					MSM_PCIE_EVENT_WAKEUP);
+			}
+		} else {
+			msm_pcie_notify_client(dev, MSM_PCIE_EVENT_WAKEUP);
+		}
+	}
+
+	spin_unlock_irqrestore(&dev->wakeup_lock, irqsave_flags);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t handle_linkdown_irq(int irq, void *data)
+{
+	struct msm_pcie_dev_t *dev = data;
+	unsigned long irqsave_flags;
+	int i;
+
+	spin_lock_irqsave(&dev->linkdown_lock, irqsave_flags);
+
+	dev->linkdown_counter++;
+
+	PCIE_DBG(dev,
+		"PCIe: No. %ld linkdown IRQ for RC%d.\n",
+		dev->linkdown_counter, dev->rc_idx);
+
+	if (!dev->enumerated || dev->link_status != MSM_PCIE_LINK_ENABLED) {
+		PCIE_DBG(dev,
+			"PCIe:Linkdown IRQ for RC%d when the link is not enabled\n",
+			dev->rc_idx);
+	} else if (dev->suspending) {
+		PCIE_DBG(dev,
+			"PCIe:the link of RC%d is suspending.\n",
+			dev->rc_idx);
+	} else {
+		dev->link_status = MSM_PCIE_LINK_DISABLED;
+		dev->shadow_en = false;
+
+		if (dev->linkdown_panic)
+			panic("User has chosen to panic on linkdown\n");
+
+		/* assert PERST */
+		gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
+				dev->gpio[MSM_PCIE_GPIO_PERST].on);
+		PCIE_ERR(dev, "PCIe link is down for RC%d\n", dev->rc_idx);
+
+		if (dev->num_ep > 1) {
+			for (i = 0; i < MAX_DEVICE_NUM; i++) {
+				dev->event_reg =
+					dev->pcidev_table[i].event_reg;
+				msm_pcie_notify_client(dev,
+					MSM_PCIE_EVENT_LINKDOWN);
+			}
+		} else {
+			msm_pcie_notify_client(dev, MSM_PCIE_EVENT_LINKDOWN);
+		}
+	}
+
+	spin_unlock_irqrestore(&dev->linkdown_lock, irqsave_flags);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t handle_msi_irq(int irq, void *data)
+{
+	int i, j;
+	unsigned long val;
+	struct msm_pcie_dev_t *dev = data;
+	void __iomem *ctrl_status;
+
+	PCIE_DUMP(dev, "irq: %d\n", irq);
+
+	/* check for set bits, clear it by setting that bit
+	   and trigger corresponding irq */
+	for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++) {
+		ctrl_status = dev->dm_core +
+				PCIE20_MSI_CTRL_INTR_STATUS + (i * 12);
+
+		val = readl_relaxed(ctrl_status);
+		while (val) {
+			j = find_first_bit(&val, 32);
+			writel_relaxed(BIT(j), ctrl_status);
+			/* ensure that interrupt is cleared (acked) */
+			wmb();
+			generic_handle_irq(
+			   irq_find_mapping(dev->irq_domain, (j + (32*i)))
+			   );
+			val = readl_relaxed(ctrl_status);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t handle_global_irq(int irq, void *data)
+{
+	int i;
+	struct msm_pcie_dev_t *dev = data;
+	unsigned long irqsave_flags;
+	u32 status = 0;
+
+	spin_lock_irqsave(&dev->global_irq_lock, irqsave_flags);
+
+	status = readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_STATUS) &
+			readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK);
+
+	msm_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_CLEAR, 0, status);
+
+	PCIE_DBG2(dev, "RC%d: Global IRQ %d received: 0x%x\n",
+		dev->rc_idx, irq, status);
+
+	for (i = 0; i <= MSM_PCIE_INT_EVT_MAX; i++) {
+		if (status & BIT(i)) {
+			switch (i) {
+			case MSM_PCIE_INT_EVT_LINK_DOWN:
+				PCIE_DBG(dev,
+					"PCIe: RC%d: handle linkdown event.\n",
+					dev->rc_idx);
+				handle_linkdown_irq(irq, data);
+				break;
+			case MSM_PCIE_INT_EVT_AER_LEGACY:
+				PCIE_DBG(dev,
+					"PCIe: RC%d: AER legacy event.\n",
+					dev->rc_idx);
+				handle_aer_irq(irq, data);
+				break;
+			case MSM_PCIE_INT_EVT_AER_ERR:
+				PCIE_DBG(dev,
+					"PCIe: RC%d: AER event.\n",
+					dev->rc_idx);
+				handle_aer_irq(irq, data);
+				break;
+			default:
+				PCIE_DUMP(dev,
+					"PCIe: RC%d: Unexpected event %d is caught!\n",
+					dev->rc_idx, i);
+			}
+		}
+	}
+
+	spin_unlock_irqrestore(&dev->global_irq_lock, irqsave_flags);
+
+	return IRQ_HANDLED;
+}
+
+static void msm_pcie_unmap_qgic_addr(struct msm_pcie_dev_t *dev,
+					struct pci_dev *pdev)
+{
+	struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev);
+	int bypass_en = 0;
+
+	if (!domain) {
+		PCIE_DBG(dev,
+			"PCIe: RC%d: client does not have an iommu domain\n",
+			dev->rc_idx);
+		return;
+	}
+
+	iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &bypass_en);
+	if (!bypass_en) {
+		int ret;
+		phys_addr_t pcie_base_addr =
+			dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
+		dma_addr_t iova = rounddown(pcie_base_addr, PAGE_SIZE);
+
+		ret = iommu_unmap(domain, iova, PAGE_SIZE);
+		if (ret != PAGE_SIZE)
+			PCIE_ERR(dev,
+				"PCIe: RC%d: failed to unmap QGIC address. ret = %d\n",
+				dev->rc_idx, ret);
+	}
+}
+
+void msm_pcie_destroy_irq(unsigned int irq, struct pci_dev *pdev)
+{
+	int pos;
+	struct msi_desc *entry = irq_get_msi_desc(irq);
+	struct msi_desc *firstentry;
+	struct msm_pcie_dev_t *dev;
+	u32 nvec;
+	int firstirq;
+
+	if (!pdev)
+		pdev = irq_get_chip_data(irq);
+
+	if (!pdev) {
+		pr_err("PCIe: pci device is null. IRQ:%d\n", irq);
+		return;
+	}
+
+	dev = PCIE_BUS_PRIV_DATA(pdev->bus);
+	if (!dev) {
+		pr_err("PCIe: could not find RC. IRQ:%d\n", irq);
+		return;
+	}
+
+	if (!entry) {
+		PCIE_ERR(dev, "PCIe: RC%d: msi desc is null. IRQ:%d\n",
+			dev->rc_idx, irq);
+		return;
+	}
+
+	firstentry = first_pci_msi_entry(pdev);
+	if (!firstentry) {
+		PCIE_ERR(dev,
+			"PCIe: RC%d: firstentry msi desc is null. IRQ:%d\n",
+			dev->rc_idx, irq);
+		return;
+	}
+
+	firstirq = firstentry->irq;
+	nvec = (1 << entry->msi_attrib.multiple);
+
+	if (dev->msi_gicm_addr) {
+		PCIE_DBG(dev, "destroy QGIC based irq %d\n", irq);
+
+		if (irq < firstirq || irq > firstirq + nvec - 1) {
+			PCIE_ERR(dev,
+				"Could not find irq: %d in RC%d MSI table\n",
+				irq, dev->rc_idx);
+			return;
+		} else {
+			if (irq == firstirq + nvec - 1)
+				msm_pcie_unmap_qgic_addr(dev, pdev);
+			pos = irq - firstirq;
+		}
+	} else {
+		PCIE_DBG(dev, "destroy default MSI irq %d\n", irq);
+		pos = irq - irq_find_mapping(dev->irq_domain, 0);
+	}
+
+	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+	PCIE_DBG(dev, "Before clear_bit pos:%d msi_irq_in_use:%ld\n",
+		pos, *dev->msi_irq_in_use);
+	clear_bit(pos, dev->msi_irq_in_use);
+	PCIE_DBG(dev, "After clear_bit pos:%d msi_irq_in_use:%ld\n",
+		pos, *dev->msi_irq_in_use);
+}
+
+/* hookup to linux pci msi framework */
+void arch_teardown_msi_irq(unsigned int irq)
+{
+	PCIE_GEN_DBG("irq %d deallocated\n", irq);
+	msm_pcie_destroy_irq(irq, NULL);
+}
+
+void arch_teardown_msi_irqs(struct pci_dev *dev)
+{
+	struct msi_desc *entry;
+	struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+
+	PCIE_DBG(pcie_dev, "RC:%d EP: vendor_id:0x%x device_id:0x%x\n",
+		pcie_dev->rc_idx, dev->vendor, dev->device);
+
+	pcie_dev->use_msi = false;
+
+	list_for_each_entry(entry, &dev->dev.msi_list, list) {
+		int i, nvec;
+		if (entry->irq == 0)
+			continue;
+		nvec = 1 << entry->msi_attrib.multiple;
+		for (i = 0; i < nvec; i++)
+			msm_pcie_destroy_irq(entry->irq + i, dev);
+	}
+}
+
+static void msm_pcie_msi_nop(struct irq_data *d)
+{
+	return;
+}
+
+static struct irq_chip pcie_msi_chip = {
+	.name = "msm-pcie-msi",
+	.irq_ack = msm_pcie_msi_nop,
+	.irq_enable = unmask_msi_irq,
+	.irq_disable = mask_msi_irq,
+	.irq_mask = mask_msi_irq,
+	.irq_unmask = unmask_msi_irq,
+};
+
+static int msm_pcie_create_irq(struct msm_pcie_dev_t *dev)
+{
+	int irq, pos;
+
+	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+again:
+	pos = find_first_zero_bit(dev->msi_irq_in_use, PCIE_MSI_NR_IRQS);
+
+	if (pos >= PCIE_MSI_NR_IRQS)
+		return -ENOSPC;
+
+	PCIE_DBG(dev, "pos:%d msi_irq_in_use:%ld\n", pos, *dev->msi_irq_in_use);
+
+	if (test_and_set_bit(pos, dev->msi_irq_in_use))
+		goto again;
+	else
+		PCIE_DBG(dev, "test_and_set_bit is successful pos=%d\n", pos);
+
+	irq = irq_create_mapping(dev->irq_domain, pos);
+	if (!irq)
+		return -EINVAL;
+
+	return irq;
+}
+
+static int arch_setup_msi_irq_default(struct pci_dev *pdev,
+		struct msi_desc *desc, int nvec)
+{
+	int irq;
+	struct msi_msg msg;
+	struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
+
+	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+	irq = msm_pcie_create_irq(dev);
+
+	PCIE_DBG(dev, "IRQ %d is allocated.\n", irq);
+
+	if (irq < 0)
+		return irq;
+
+	PCIE_DBG(dev, "irq %d allocated\n", irq);
+
+	irq_set_chip_data(irq, pdev);
+	irq_set_msi_desc(irq, desc);
+
+	/* write msi vector and data */
+	msg.address_hi = 0;
+	msg.address_lo = MSM_PCIE_MSI_PHY;
+	msg.data = irq - irq_find_mapping(dev->irq_domain, 0);
+	write_msi_msg(irq, &msg);
+
+	return 0;
+}
+
+static int msm_pcie_create_irq_qgic(struct msm_pcie_dev_t *dev)
+{
+	int irq, pos;
+
+	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+again:
+	pos = find_first_zero_bit(dev->msi_irq_in_use, PCIE_MSI_NR_IRQS);
+
+	if (pos >= PCIE_MSI_NR_IRQS)
+		return -ENOSPC;
+
+	PCIE_DBG(dev, "pos:%d msi_irq_in_use:%ld\n", pos, *dev->msi_irq_in_use);
+
+	if (test_and_set_bit(pos, dev->msi_irq_in_use))
+		goto again;
+	else
+		PCIE_DBG(dev, "test_and_set_bit is successful pos=%d\n", pos);
+
+	if (pos >= MSM_PCIE_MAX_MSI) {
+		PCIE_ERR(dev,
+			"PCIe: RC%d: pos %d is not less than %d\n",
+			dev->rc_idx, pos, MSM_PCIE_MAX_MSI);
+		return MSM_PCIE_ERROR;
+	}
+
+	irq = dev->msi[pos].num;
+	if (!irq) {
+		PCIE_ERR(dev, "PCIe: RC%d failed to create QGIC MSI IRQ.\n",
+			dev->rc_idx);
+		return -EINVAL;
+	}
+
+	return irq;
+}
+
+static int msm_pcie_map_qgic_addr(struct msm_pcie_dev_t *dev,
+					struct pci_dev *pdev,
+					struct msi_msg *msg)
+{
+	struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev);
+	struct iommu_domain_geometry geometry;
+	int ret, fastmap_en = 0, bypass_en = 0;
+	dma_addr_t iova;
+	phys_addr_t gicm_db_offset;
+
+	msg->address_hi = 0;
+	msg->address_lo = dev->msi_gicm_addr;
+
+	if (!domain) {
+		PCIE_DBG(dev,
+			"PCIe: RC%d: client does not have an iommu domain\n",
+			dev->rc_idx);
+		return 0;
+	}
+
+	iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &bypass_en);
+
+	PCIE_DBG(dev,
+		"PCIe: RC%d: Stage 1 is %s for endpoint: %04x:%02x\n",
+		dev->rc_idx, bypass_en ? "bypass" : "enabled",
+		pdev->bus->number, pdev->devfn);
+
+	if (bypass_en)
+		return 0;
+
+	iommu_domain_get_attr(domain, DOMAIN_ATTR_FAST, &fastmap_en);
+	if (fastmap_en) {
+		iommu_domain_get_attr(domain, DOMAIN_ATTR_GEOMETRY, &geometry);
+		iova = geometry.aperture_start;
+		PCIE_DBG(dev,
+			"PCIe: RC%d: Use client's IOVA 0x%llx to map QGIC MSI address\n",
+			dev->rc_idx, iova);
+	} else {
+		phys_addr_t pcie_base_addr;
+
+		/*
+		 * Use PCIe DBI address as the IOVA since client cannot
+		 * use this address for their IOMMU mapping. This will
+		 * prevent any conflicts between PCIe host and
+		 * client's mapping.
+		 */
+		pcie_base_addr = dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
+		iova = rounddown(pcie_base_addr, PAGE_SIZE);
+	}
+
+	ret = iommu_map(domain, iova, rounddown(dev->msi_gicm_addr, PAGE_SIZE),
+			PAGE_SIZE, IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
+	if (ret < 0) {
+		PCIE_ERR(dev,
+			"PCIe: RC%d: ret: %d: Could not do iommu map for QGIC address\n",
+			dev->rc_idx, ret);
+		return -ENOMEM;
+	}
+
+	gicm_db_offset = dev->msi_gicm_addr -
+		rounddown(dev->msi_gicm_addr, PAGE_SIZE);
+	msg->address_lo = iova + gicm_db_offset;
+
+	return 0;
+}
+
+static int arch_setup_msi_irq_qgic(struct pci_dev *pdev,
+		struct msi_desc *desc, int nvec)
+{
+	int irq, index, ret, firstirq = 0;
+	struct msi_msg msg;
+	struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
+
+	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+	for (index = 0; index < nvec; index++) {
+		irq = msm_pcie_create_irq_qgic(dev);
+		PCIE_DBG(dev, "irq %d is allocated\n", irq);
+
+		if (irq < 0)
+			return irq;
+
+		if (index == 0)
+			firstirq = irq;
+
+		irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
+	}
+
+	/* write msi vector and data */
+	irq_set_msi_desc(firstirq, desc);
+
+	ret = msm_pcie_map_qgic_addr(dev, pdev, &msg);
+	if (ret)
+		return ret;
+
+	msg.data = dev->msi_gicm_base + (firstirq - dev->msi[0].num);
+	write_msi_msg(firstirq, &msg);
+
+	return 0;
+}
+
+int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
+{
+	struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
+
+	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+	if (dev->msi_gicm_addr)
+		return arch_setup_msi_irq_qgic(pdev, desc, 1);
+	else
+		return arch_setup_msi_irq_default(pdev, desc, 1);
+}
+
+static int msm_pcie_get_msi_multiple(int nvec)
+{
+	int msi_multiple = 0;
+
+	while (nvec) {
+		nvec = nvec >> 1;
+		msi_multiple++;
+	}
+	PCIE_GEN_DBG("log2 number of MSI multiple:%d\n",
+		msi_multiple - 1);
+
+	return msi_multiple - 1;
+}
+
+int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+	struct msi_desc *entry;
+	int ret;
+	struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+
+	PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
+
+	if (type != PCI_CAP_ID_MSI || nvec > 32)
+		return -ENOSPC;
+
+	PCIE_DBG(pcie_dev, "nvec = %d\n", nvec);
+
+	list_for_each_entry(entry, &dev->dev.msi_list, list) {
+		entry->msi_attrib.multiple =
+				msm_pcie_get_msi_multiple(nvec);
+
+		if (pcie_dev->msi_gicm_addr)
+			ret = arch_setup_msi_irq_qgic(dev, entry, nvec);
+		else
+			ret = arch_setup_msi_irq_default(dev, entry, nvec);
+
+		PCIE_DBG(pcie_dev, "ret from msi_irq: %d\n", ret);
+
+		if (ret < 0)
+			return ret;
+		if (ret > 0)
+			return -ENOSPC;
+	}
+
+	pcie_dev->use_msi = true;
+
+	return 0;
+}
+
+static int msm_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
+	   irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler (irq, &pcie_msi_chip, handle_simple_irq);
+	return 0;
+}
+
+static const struct irq_domain_ops msm_pcie_msi_ops = {
+	.map = msm_pcie_msi_map,
+};
+
+int32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev)
+{
+	int rc;
+	int msi_start =  0;
+	struct device *pdev = &dev->pdev->dev;
+
+	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+	if (dev->rc_idx)
+		wakeup_source_init(&dev->ws, "RC1 pcie_wakeup_source");
+	else
+		wakeup_source_init(&dev->ws, "RC0 pcie_wakeup_source");
+
+	/* register handler for linkdown interrupt */
+	if (dev->irq[MSM_PCIE_INT_LINK_DOWN].num) {
+		rc = devm_request_irq(pdev,
+			dev->irq[MSM_PCIE_INT_LINK_DOWN].num,
+			handle_linkdown_irq,
+			IRQF_TRIGGER_RISING,
+			dev->irq[MSM_PCIE_INT_LINK_DOWN].name,
+			dev);
+		if (rc) {
+			PCIE_ERR(dev,
+				"PCIe: Unable to request linkdown interrupt:%d\n",
+				dev->irq[MSM_PCIE_INT_LINK_DOWN].num);
+			return rc;
+		}
+	}
+
+	/* register handler for physical MSI interrupt line */
+	if (dev->irq[MSM_PCIE_INT_MSI].num) {
+		rc = devm_request_irq(pdev,
+			dev->irq[MSM_PCIE_INT_MSI].num,
+			handle_msi_irq,
+			IRQF_TRIGGER_RISING,
+			dev->irq[MSM_PCIE_INT_MSI].name,
+			dev);
+		if (rc) {
+			PCIE_ERR(dev,
+				"PCIe: RC%d: Unable to request MSI interrupt\n",
+				dev->rc_idx);
+			return rc;
+		}
+	}
+
+	/* register handler for AER interrupt */
+	if (dev->irq[MSM_PCIE_INT_PLS_ERR].num) {
+		rc = devm_request_irq(pdev,
+				dev->irq[MSM_PCIE_INT_PLS_ERR].num,
+				handle_aer_irq,
+				IRQF_TRIGGER_RISING,
+				dev->irq[MSM_PCIE_INT_PLS_ERR].name,
+				dev);
+		if (rc) {
+			PCIE_ERR(dev,
+				"PCIe: RC%d: Unable to request aer pls_err interrupt: %d\n",
+				dev->rc_idx,
+				dev->irq[MSM_PCIE_INT_PLS_ERR].num);
+			return rc;
+		}
+	}
+
+	/* register handler for AER legacy interrupt */
+	if (dev->irq[MSM_PCIE_INT_AER_LEGACY].num) {
+		rc = devm_request_irq(pdev,
+				dev->irq[MSM_PCIE_INT_AER_LEGACY].num,
+				handle_aer_irq,
+				IRQF_TRIGGER_RISING,
+				dev->irq[MSM_PCIE_INT_AER_LEGACY].name,
+				dev);
+		if (rc) {
+			PCIE_ERR(dev,
+				"PCIe: RC%d: Unable to request aer aer_legacy interrupt: %d\n",
+				dev->rc_idx,
+				dev->irq[MSM_PCIE_INT_AER_LEGACY].num);
+			return rc;
+		}
+	}
+
+	if (dev->irq[MSM_PCIE_INT_GLOBAL_INT].num) {
+		rc = devm_request_irq(pdev,
+				dev->irq[MSM_PCIE_INT_GLOBAL_INT].num,
+				handle_global_irq,
+				IRQF_TRIGGER_RISING,
+				dev->irq[MSM_PCIE_INT_GLOBAL_INT].name,
+				dev);
+		if (rc) {
+			PCIE_ERR(dev,
+				"PCIe: RC%d: Unable to request global_int interrupt: %d\n",
+				dev->rc_idx,
+				dev->irq[MSM_PCIE_INT_GLOBAL_INT].num);
+			return rc;
+		}
+	}
+
+	/* register handler for PCIE_WAKE_N interrupt line */
+	if (dev->wake_n) {
+		rc = devm_request_irq(pdev,
+				dev->wake_n, handle_wake_irq,
+				IRQF_TRIGGER_FALLING, "msm_pcie_wake", dev);
+		if (rc) {
+			PCIE_ERR(dev,
+				"PCIe: RC%d: Unable to request wake interrupt\n",
+				dev->rc_idx);
+			return rc;
+		}
+
+		INIT_WORK(&dev->handle_wake_work, handle_wake_func);
+
+		rc = enable_irq_wake(dev->wake_n);
+		if (rc) {
+			PCIE_ERR(dev,
+				"PCIe: RC%d: Unable to enable wake interrupt\n",
+				dev->rc_idx);
+			return rc;
+		}
+	}
+
+	/* Create a virtual domain of interrupts */
+	if (!dev->msi_gicm_addr) {
+		dev->irq_domain = irq_domain_add_linear(dev->pdev->dev.of_node,
+			PCIE_MSI_NR_IRQS, &msm_pcie_msi_ops, dev);
+
+		if (!dev->irq_domain) {
+			PCIE_ERR(dev,
+				"PCIe: RC%d: Unable to initialize irq domain\n",
+				dev->rc_idx);
+
+			if (dev->wake_n)
+				disable_irq(dev->wake_n);
+
+			return PTR_ERR(dev->irq_domain);
+		}
+
+		msi_start = irq_create_mapping(dev->irq_domain, 0);
+	}
+
+	return 0;
+}
+
+void msm_pcie_irq_deinit(struct msm_pcie_dev_t *dev)
+{
+	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+	wakeup_source_trash(&dev->ws);
+
+	if (dev->wake_n)
+		disable_irq(dev->wake_n);
+}
+
+
+static int msm_pcie_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	int rc_idx = -1;
+	int i, j;
+
+	PCIE_GEN_DBG("%s\n", __func__);
+
+	mutex_lock(&pcie_drv.drv_lock);
+
+	ret = of_property_read_u32((&pdev->dev)->of_node,
+				"cell-index", &rc_idx);
+	if (ret) {
+		PCIE_GEN_DBG("Did not find RC index.\n");
+		goto out;
+	} else {
+		if (rc_idx >= MAX_RC_NUM) {
+			pr_err(
+				"PCIe: Invalid RC Index %d (max supported = %d)\n",
+				rc_idx, MAX_RC_NUM);
+			goto out;
+		}
+		pcie_drv.rc_num++;
+		PCIE_DBG(&msm_pcie_dev[rc_idx], "PCIe: RC index is %d.\n",
+			rc_idx);
+	}
+
+	msm_pcie_dev[rc_idx].l0s_supported =
+		of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,l0s-supported");
+	PCIE_DBG(&msm_pcie_dev[rc_idx], "L0s is %s supported.\n",
+		msm_pcie_dev[rc_idx].l0s_supported ? "" : "not");
+	msm_pcie_dev[rc_idx].l1_supported =
+		of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,l1-supported");
+	PCIE_DBG(&msm_pcie_dev[rc_idx], "L1 is %s supported.\n",
+		msm_pcie_dev[rc_idx].l1_supported ? "" : "not");
+	msm_pcie_dev[rc_idx].l1ss_supported =
+		of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,l1ss-supported");
+	PCIE_DBG(&msm_pcie_dev[rc_idx], "L1ss is %s supported.\n",
+		msm_pcie_dev[rc_idx].l1ss_supported ? "" : "not");
+	msm_pcie_dev[rc_idx].common_clk_en =
+		of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,common-clk-en");
+	PCIE_DBG(&msm_pcie_dev[rc_idx], "Common clock is %s enabled.\n",
+		msm_pcie_dev[rc_idx].common_clk_en ? "" : "not");
+	msm_pcie_dev[rc_idx].clk_power_manage_en =
+		of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,clk-power-manage-en");
+	PCIE_DBG(&msm_pcie_dev[rc_idx],
+		"Clock power management is %s enabled.\n",
+		msm_pcie_dev[rc_idx].clk_power_manage_en ? "" : "not");
+	msm_pcie_dev[rc_idx].aux_clk_sync =
+		of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,aux-clk-sync");
+	PCIE_DBG(&msm_pcie_dev[rc_idx],
+		"AUX clock is %s synchronous to Core clock.\n",
+		msm_pcie_dev[rc_idx].aux_clk_sync ? "" : "not");
+
+	msm_pcie_dev[rc_idx].use_19p2mhz_aux_clk =
+		of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,use-19p2mhz-aux-clk");
+	PCIE_DBG(&msm_pcie_dev[rc_idx],
+		"AUX clock frequency is %s 19.2MHz.\n",
+		msm_pcie_dev[rc_idx].use_19p2mhz_aux_clk ? "" : "not");
+
+	msm_pcie_dev[rc_idx].smmu_exist =
+		of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,smmu-exist");
+	PCIE_DBG(&msm_pcie_dev[rc_idx],
+		"SMMU does %s exist.\n",
+		msm_pcie_dev[rc_idx].smmu_exist ? "" : "not");
+
+	msm_pcie_dev[rc_idx].smmu_sid_base = 0;
+	ret = of_property_read_u32((&pdev->dev)->of_node, "qcom,smmu-sid-base",
+				&msm_pcie_dev[rc_idx].smmu_sid_base);
+	if (ret)
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"RC%d SMMU sid base not found\n",
+			msm_pcie_dev[rc_idx].rc_idx);
+	else
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"RC%d: qcom,smmu-sid-base: 0x%x.\n",
+			msm_pcie_dev[rc_idx].rc_idx,
+			msm_pcie_dev[rc_idx].smmu_sid_base);
+
+	msm_pcie_dev[rc_idx].boot_option = 0;
+	ret = of_property_read_u32((&pdev->dev)->of_node, "qcom,boot-option",
+				&msm_pcie_dev[rc_idx].boot_option);
+	PCIE_DBG(&msm_pcie_dev[rc_idx],
+		"PCIe: RC%d boot option is 0x%x.\n",
+		rc_idx, msm_pcie_dev[rc_idx].boot_option);
+
+	msm_pcie_dev[rc_idx].phy_ver = 1;
+	ret = of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,pcie-phy-ver",
+				&msm_pcie_dev[rc_idx].phy_ver);
+	if (ret)
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"RC%d: pcie-phy-ver does not exist.\n",
+			msm_pcie_dev[rc_idx].rc_idx);
+	else
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"RC%d: pcie-phy-ver: %d.\n",
+			msm_pcie_dev[rc_idx].rc_idx,
+			msm_pcie_dev[rc_idx].phy_ver);
+
+	msm_pcie_dev[rc_idx].n_fts = 0;
+	ret = of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,n-fts",
+				&msm_pcie_dev[rc_idx].n_fts);
+
+	if (ret)
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"n-fts does not exist. ret=%d\n", ret);
+	else
+		PCIE_DBG(&msm_pcie_dev[rc_idx], "n-fts: 0x%x.\n",
+				msm_pcie_dev[rc_idx].n_fts);
+
+	msm_pcie_dev[rc_idx].common_phy =
+		of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,common-phy");
+	PCIE_DBG(&msm_pcie_dev[rc_idx],
+		"PCIe: RC%d: Common PHY does %s exist.\n",
+		rc_idx, msm_pcie_dev[rc_idx].common_phy ? "" : "not");
+
+	msm_pcie_dev[rc_idx].ext_ref_clk =
+		of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,ext-ref-clk");
+	PCIE_DBG(&msm_pcie_dev[rc_idx], "ref clk is %s.\n",
+		msm_pcie_dev[rc_idx].ext_ref_clk ? "external" : "internal");
+
+	msm_pcie_dev[rc_idx].ep_latency = 0;
+	ret = of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,ep-latency",
+				&msm_pcie_dev[rc_idx].ep_latency);
+	if (ret)
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"RC%d: ep-latency does not exist.\n",
+			rc_idx);
+	else
+		PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: ep-latency: 0x%x.\n",
+			rc_idx, msm_pcie_dev[rc_idx].ep_latency);
+
+	msm_pcie_dev[rc_idx].switch_latency = 0;
+	ret = of_property_read_u32((&pdev->dev)->of_node,
+					"qcom,switch-latency",
+					&msm_pcie_dev[rc_idx].switch_latency);
+
+	if (ret)
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+				"RC%d: switch-latency does not exist.\n",
+				rc_idx);
+	else
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+				"RC%d: switch-latency: 0x%x.\n",
+				rc_idx, msm_pcie_dev[rc_idx].switch_latency);
+
+	msm_pcie_dev[rc_idx].wr_halt_size = 0;
+	ret = of_property_read_u32(pdev->dev.of_node,
+				"qcom,wr-halt-size",
+				&msm_pcie_dev[rc_idx].wr_halt_size);
+	if (ret)
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"RC%d: wr-halt-size not specified in dt. Use default value.\n",
+			rc_idx);
+	else
+		PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: wr-halt-size: 0x%x.\n",
+			rc_idx, msm_pcie_dev[rc_idx].wr_halt_size);
+
+	msm_pcie_dev[rc_idx].cpl_timeout = 0;
+	ret = of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,cpl-timeout",
+				&msm_pcie_dev[rc_idx].cpl_timeout);
+	if (ret)
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"RC%d: Using default cpl-timeout.\n",
+			rc_idx);
+	else
+		PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: cpl-timeout: 0x%x.\n",
+			rc_idx, msm_pcie_dev[rc_idx].cpl_timeout);
+
+	msm_pcie_dev[rc_idx].perst_delay_us_min =
+		PERST_PROPAGATION_DELAY_US_MIN;
+	ret = of_property_read_u32(pdev->dev.of_node,
+				"qcom,perst-delay-us-min",
+				&msm_pcie_dev[rc_idx].perst_delay_us_min);
+	if (ret)
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"RC%d: perst-delay-us-min does not exist. Use default value %dus.\n",
+			rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_min);
+	else
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"RC%d: perst-delay-us-min: %dus.\n",
+			rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_min);
+
+	msm_pcie_dev[rc_idx].perst_delay_us_max =
+		PERST_PROPAGATION_DELAY_US_MAX;
+	ret = of_property_read_u32(pdev->dev.of_node,
+				"qcom,perst-delay-us-max",
+				&msm_pcie_dev[rc_idx].perst_delay_us_max);
+	if (ret)
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"RC%d: perst-delay-us-max does not exist. Use default value %dus.\n",
+			rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_max);
+	else
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"RC%d: perst-delay-us-max: %dus.\n",
+			rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_max);
+
+	msm_pcie_dev[rc_idx].tlp_rd_size = PCIE_TLP_RD_SIZE;
+	ret = of_property_read_u32(pdev->dev.of_node,
+				"qcom,tlp-rd-size",
+				&msm_pcie_dev[rc_idx].tlp_rd_size);
+	if (ret)
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"RC%d: tlp-rd-size does not exist. tlp-rd-size: 0x%x.\n",
+			rc_idx, msm_pcie_dev[rc_idx].tlp_rd_size);
+	else
+		PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: tlp-rd-size: 0x%x.\n",
+			rc_idx, msm_pcie_dev[rc_idx].tlp_rd_size);
+
+	msm_pcie_dev[rc_idx].msi_gicm_addr = 0;
+	msm_pcie_dev[rc_idx].msi_gicm_base = 0;
+	ret = of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,msi-gicm-addr",
+				&msm_pcie_dev[rc_idx].msi_gicm_addr);
+
+	if (ret) {
+		PCIE_DBG(&msm_pcie_dev[rc_idx], "%s",
+			"msi-gicm-addr does not exist.\n");
+	} else {
+		PCIE_DBG(&msm_pcie_dev[rc_idx], "msi-gicm-addr: 0x%x.\n",
+				msm_pcie_dev[rc_idx].msi_gicm_addr);
+
+		ret = of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,msi-gicm-base",
+				&msm_pcie_dev[rc_idx].msi_gicm_base);
+
+		if (ret) {
+			PCIE_ERR(&msm_pcie_dev[rc_idx],
+				"PCIe: RC%d: msi-gicm-base does not exist.\n",
+				rc_idx);
+			goto decrease_rc_num;
+		} else {
+			PCIE_DBG(&msm_pcie_dev[rc_idx], "msi-gicm-base: 0x%x\n",
+					msm_pcie_dev[rc_idx].msi_gicm_base);
+		}
+	}
+
+	msm_pcie_dev[rc_idx].scm_dev_id = 0;
+	ret = of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,scm-dev-id",
+				&msm_pcie_dev[rc_idx].scm_dev_id);
+
+	msm_pcie_dev[rc_idx].rc_idx = rc_idx;
+	msm_pcie_dev[rc_idx].pdev = pdev;
+	msm_pcie_dev[rc_idx].vreg_n = 0;
+	msm_pcie_dev[rc_idx].gpio_n = 0;
+	msm_pcie_dev[rc_idx].parf_deemph = 0;
+	msm_pcie_dev[rc_idx].parf_swing = 0;
+	msm_pcie_dev[rc_idx].link_status = MSM_PCIE_LINK_DEINIT;
+	msm_pcie_dev[rc_idx].user_suspend = false;
+	msm_pcie_dev[rc_idx].disable_pc = false;
+	msm_pcie_dev[rc_idx].saved_state = NULL;
+	msm_pcie_dev[rc_idx].enumerated = false;
+	msm_pcie_dev[rc_idx].num_active_ep = 0;
+	msm_pcie_dev[rc_idx].num_ep = 0;
+	msm_pcie_dev[rc_idx].pending_ep_reg = false;
+	msm_pcie_dev[rc_idx].phy_len = 0;
+	msm_pcie_dev[rc_idx].port_phy_len = 0;
+	msm_pcie_dev[rc_idx].phy_sequence = NULL;
+	msm_pcie_dev[rc_idx].port_phy_sequence = NULL;
+	msm_pcie_dev[rc_idx].event_reg = NULL;
+	msm_pcie_dev[rc_idx].linkdown_counter = 0;
+	msm_pcie_dev[rc_idx].link_turned_on_counter = 0;
+	msm_pcie_dev[rc_idx].link_turned_off_counter = 0;
+	msm_pcie_dev[rc_idx].rc_corr_counter = 0;
+	msm_pcie_dev[rc_idx].rc_non_fatal_counter = 0;
+	msm_pcie_dev[rc_idx].rc_fatal_counter = 0;
+	msm_pcie_dev[rc_idx].ep_corr_counter = 0;
+	msm_pcie_dev[rc_idx].ep_non_fatal_counter = 0;
+	msm_pcie_dev[rc_idx].ep_fatal_counter = 0;
+	msm_pcie_dev[rc_idx].suspending = false;
+	msm_pcie_dev[rc_idx].wake_counter = 0;
+	msm_pcie_dev[rc_idx].aer_enable = true;
+	msm_pcie_dev[rc_idx].power_on = false;
+	msm_pcie_dev[rc_idx].current_short_bdf = 0;
+	msm_pcie_dev[rc_idx].use_msi = false;
+	msm_pcie_dev[rc_idx].use_pinctrl = false;
+	msm_pcie_dev[rc_idx].linkdown_panic = false;
+	msm_pcie_dev[rc_idx].bridge_found = false;
+	memcpy(msm_pcie_dev[rc_idx].vreg, msm_pcie_vreg_info,
+				sizeof(msm_pcie_vreg_info));
+	memcpy(msm_pcie_dev[rc_idx].gpio, msm_pcie_gpio_info,
+				sizeof(msm_pcie_gpio_info));
+	memcpy(msm_pcie_dev[rc_idx].clk, msm_pcie_clk_info[rc_idx],
+				sizeof(msm_pcie_clk_info[rc_idx]));
+	memcpy(msm_pcie_dev[rc_idx].pipeclk, msm_pcie_pipe_clk_info[rc_idx],
+				sizeof(msm_pcie_pipe_clk_info[rc_idx]));
+	memcpy(msm_pcie_dev[rc_idx].res, msm_pcie_res_info,
+				sizeof(msm_pcie_res_info));
+	memcpy(msm_pcie_dev[rc_idx].irq, msm_pcie_irq_info,
+				sizeof(msm_pcie_irq_info));
+	memcpy(msm_pcie_dev[rc_idx].msi, msm_pcie_msi_info,
+				sizeof(msm_pcie_msi_info));
+	memcpy(msm_pcie_dev[rc_idx].reset, msm_pcie_reset_info[rc_idx],
+				sizeof(msm_pcie_reset_info[rc_idx]));
+	memcpy(msm_pcie_dev[rc_idx].pipe_reset,
+			msm_pcie_pipe_reset_info[rc_idx],
+			sizeof(msm_pcie_pipe_reset_info[rc_idx]));
+	msm_pcie_dev[rc_idx].shadow_en = true;
+	for (i = 0; i < PCIE_CONF_SPACE_DW; i++)
+		msm_pcie_dev[rc_idx].rc_shadow[i] = PCIE_CLEAR;
+	for (i = 0; i < MAX_DEVICE_NUM; i++)
+		for (j = 0; j < PCIE_CONF_SPACE_DW; j++)
+			msm_pcie_dev[rc_idx].ep_shadow[i][j] = PCIE_CLEAR;
+	for (i = 0; i < MAX_DEVICE_NUM; i++) {
+		msm_pcie_dev[rc_idx].pcidev_table[i].bdf = 0;
+		msm_pcie_dev[rc_idx].pcidev_table[i].dev = NULL;
+		msm_pcie_dev[rc_idx].pcidev_table[i].short_bdf = 0;
+		msm_pcie_dev[rc_idx].pcidev_table[i].sid = 0;
+		msm_pcie_dev[rc_idx].pcidev_table[i].domain = rc_idx;
+		msm_pcie_dev[rc_idx].pcidev_table[i].conf_base = 0;
+		msm_pcie_dev[rc_idx].pcidev_table[i].phy_address = 0;
+		msm_pcie_dev[rc_idx].pcidev_table[i].dev_ctrlstts_offset = 0;
+		msm_pcie_dev[rc_idx].pcidev_table[i].event_reg = NULL;
+		msm_pcie_dev[rc_idx].pcidev_table[i].registered = true;
+	}
+
+	dev_set_drvdata(&msm_pcie_dev[rc_idx].pdev->dev, &msm_pcie_dev[rc_idx]);
+
+	ret = msm_pcie_get_resources(&msm_pcie_dev[rc_idx],
+				msm_pcie_dev[rc_idx].pdev);
+
+	if (ret)
+		goto decrease_rc_num;
+
+	msm_pcie_dev[rc_idx].pinctrl = devm_pinctrl_get(&pdev->dev);
+	if (IS_ERR_OR_NULL(msm_pcie_dev[rc_idx].pinctrl))
+		PCIE_ERR(&msm_pcie_dev[rc_idx],
+			"PCIe: RC%d failed to get pinctrl\n",
+			rc_idx);
+	else
+		msm_pcie_dev[rc_idx].use_pinctrl = true;
+
+	if (msm_pcie_dev[rc_idx].use_pinctrl) {
+		msm_pcie_dev[rc_idx].pins_default =
+			pinctrl_lookup_state(msm_pcie_dev[rc_idx].pinctrl,
+						"default");
+		if (IS_ERR(msm_pcie_dev[rc_idx].pins_default)) {
+			PCIE_ERR(&msm_pcie_dev[rc_idx],
+				"PCIe: RC%d could not get pinctrl default state\n",
+				rc_idx);
+			msm_pcie_dev[rc_idx].pins_default = NULL;
+		}
+
+		msm_pcie_dev[rc_idx].pins_sleep =
+			pinctrl_lookup_state(msm_pcie_dev[rc_idx].pinctrl,
+						"sleep");
+		if (IS_ERR(msm_pcie_dev[rc_idx].pins_sleep)) {
+			PCIE_ERR(&msm_pcie_dev[rc_idx],
+				"PCIe: RC%d could not get pinctrl sleep state\n",
+				rc_idx);
+			msm_pcie_dev[rc_idx].pins_sleep = NULL;
+		}
+	}
+
+	ret = msm_pcie_gpio_init(&msm_pcie_dev[rc_idx]);
+	if (ret) {
+		msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
+		goto decrease_rc_num;
+	}
+
+	ret = msm_pcie_irq_init(&msm_pcie_dev[rc_idx]);
+	if (ret) {
+		msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
+		msm_pcie_gpio_deinit(&msm_pcie_dev[rc_idx]);
+		goto decrease_rc_num;
+	}
+
+	msm_pcie_sysfs_init(&msm_pcie_dev[rc_idx]);
+
+	msm_pcie_dev[rc_idx].drv_ready = true;
+
+	if (msm_pcie_dev[rc_idx].boot_option &
+			MSM_PCIE_NO_PROBE_ENUMERATION) {
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"PCIe: RC%d will be enumerated by client or endpoint.\n",
+			rc_idx);
+		mutex_unlock(&pcie_drv.drv_lock);
+		return 0;
+	}
+
+	ret = msm_pcie_enumerate(rc_idx);
+
+	if (ret)
+		PCIE_ERR(&msm_pcie_dev[rc_idx],
+			"PCIe: RC%d is not enabled during bootup; it will be enumerated upon client request.\n",
+			rc_idx);
+	else
+		PCIE_ERR(&msm_pcie_dev[rc_idx], "RC%d is enabled in bootup\n",
+			rc_idx);
+
+	PCIE_DBG(&msm_pcie_dev[rc_idx], "PCIE probed %s\n",
+		dev_name(&(pdev->dev)));
+
+	mutex_unlock(&pcie_drv.drv_lock);
+	return 0;
+
+decrease_rc_num:
+	pcie_drv.rc_num--;
+out:
+	if (rc_idx < 0 || rc_idx >= MAX_RC_NUM)
+		pr_err("PCIe: Invalid RC index %d. Driver probe failed\n",
+		rc_idx);
+	else
+		PCIE_ERR(&msm_pcie_dev[rc_idx],
+			"PCIe: Driver probe failed for RC%d:%d\n",
+			rc_idx, ret);
+
+	mutex_unlock(&pcie_drv.drv_lock);
+
+	return ret;
+}
+
+static int msm_pcie_remove(struct platform_device *pdev)
+{
+	int ret = 0;
+	int rc_idx;
+
+	PCIE_GEN_DBG("PCIe:%s.\n", __func__);
+
+	mutex_lock(&pcie_drv.drv_lock);
+
+	ret = of_property_read_u32((&pdev->dev)->of_node,
+				"cell-index", &rc_idx);
+	if (ret) {
+		pr_err("%s: Did not find RC index.\n", __func__);
+		goto out;
+	} else {
+		pcie_drv.rc_num--;
+		PCIE_GEN_DBG("%s: RC index is 0x%x.", __func__, rc_idx);
+	}
+
+	msm_pcie_irq_deinit(&msm_pcie_dev[rc_idx]);
+	msm_pcie_vreg_deinit(&msm_pcie_dev[rc_idx]);
+	msm_pcie_clk_deinit(&msm_pcie_dev[rc_idx]);
+	msm_pcie_gpio_deinit(&msm_pcie_dev[rc_idx]);
+	msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
+
+out:
+	mutex_unlock(&pcie_drv.drv_lock);
+
+	return ret;
+}
+
+static struct of_device_id msm_pcie_match[] = {
+	{	.compatible = "qcom,pci-msm",
+	},
+	{}
+};
+
+static struct platform_driver msm_pcie_driver = {
+	.probe	= msm_pcie_probe,
+	.remove	= msm_pcie_remove,
+	.driver	= {
+		.name		= "pci-msm",
+		.owner		= THIS_MODULE,
+		.of_match_table	= msm_pcie_match,
+	},
+};
+
+int __init pcie_init(void)
+{
+	int ret = 0, i;
+	char rc_name[MAX_RC_NAME_LEN];
+
+	pr_alert("pcie:%s.\n", __func__);
+
+	pcie_drv.rc_num = 0;
+	mutex_init(&pcie_drv.drv_lock);
+	mutex_init(&com_phy_lock);
+	mutex_init(&enumerate_lock);
+
+	for (i = 0; i < MAX_RC_NUM; i++) {
+		snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-short", i);
+		msm_pcie_dev[i].ipc_log =
+			ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
+		if (msm_pcie_dev[i].ipc_log == NULL)
+			pr_err("%s: unable to create IPC log context for %s\n",
+				__func__, rc_name);
+		else
+			PCIE_DBG(&msm_pcie_dev[i],
+				"PCIe IPC logging is enable for RC%d\n",
+				i);
+		snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-long", i);
+		msm_pcie_dev[i].ipc_log_long =
+			ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
+		if (msm_pcie_dev[i].ipc_log_long == NULL)
+			pr_err("%s: unable to create IPC log context for %s\n",
+				__func__, rc_name);
+		else
+			PCIE_DBG(&msm_pcie_dev[i],
+				"PCIe IPC logging %s is enable for RC%d\n",
+				rc_name, i);
+		snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-dump", i);
+		msm_pcie_dev[i].ipc_log_dump =
+			ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
+		if (msm_pcie_dev[i].ipc_log_dump == NULL)
+			pr_err("%s: unable to create IPC log context for %s\n",
+				__func__, rc_name);
+		else
+			PCIE_DBG(&msm_pcie_dev[i],
+				"PCIe IPC logging %s is enable for RC%d\n",
+				rc_name, i);
+		spin_lock_init(&msm_pcie_dev[i].cfg_lock);
+		msm_pcie_dev[i].cfg_access = true;
+		mutex_init(&msm_pcie_dev[i].setup_lock);
+		mutex_init(&msm_pcie_dev[i].recovery_lock);
+		spin_lock_init(&msm_pcie_dev[i].linkdown_lock);
+		spin_lock_init(&msm_pcie_dev[i].wakeup_lock);
+		spin_lock_init(&msm_pcie_dev[i].global_irq_lock);
+		spin_lock_init(&msm_pcie_dev[i].aer_lock);
+		msm_pcie_dev[i].drv_ready = false;
+	}
+	for (i = 0; i < MAX_RC_NUM * MAX_DEVICE_NUM; i++) {
+		msm_pcie_dev_tbl[i].bdf = 0;
+		msm_pcie_dev_tbl[i].dev = NULL;
+		msm_pcie_dev_tbl[i].short_bdf = 0;
+		msm_pcie_dev_tbl[i].sid = 0;
+		msm_pcie_dev_tbl[i].domain = -1;
+		msm_pcie_dev_tbl[i].conf_base = 0;
+		msm_pcie_dev_tbl[i].phy_address = 0;
+		msm_pcie_dev_tbl[i].dev_ctrlstts_offset = 0;
+		msm_pcie_dev_tbl[i].event_reg = NULL;
+		msm_pcie_dev_tbl[i].registered = true;
+	}
+
+	msm_pcie_debugfs_init();
+
+	ret = platform_driver_register(&msm_pcie_driver);
+
+	return ret;
+}
+
+static void __exit pcie_exit(void)
+{
+	int i;
+
+	PCIE_GEN_DBG("pcie:%s.\n", __func__);
+
+	platform_driver_unregister(&msm_pcie_driver);
+
+	msm_pcie_debugfs_exit();
+
+	for (i = 0; i < MAX_RC_NUM; i++)
+		msm_pcie_sysfs_exit(&msm_pcie_dev[i]);
+}
+
+subsys_initcall_sync(pcie_init);
+module_exit(pcie_exit);
+
+
+/* RC do not represent the right class; set it to PCI_CLASS_BRIDGE_PCI */
+static void msm_pcie_fixup_early(struct pci_dev *dev)
+{
+	struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+	PCIE_DBG(pcie_dev, "hdr_type %d\n", dev->hdr_type);
+	if (dev->hdr_type == 1)
+		dev->class = (dev->class & 0xff) | (PCI_CLASS_BRIDGE_PCI << 8);
+}
+DECLARE_PCI_FIXUP_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
+			msm_pcie_fixup_early);
+
+/* Suspend the PCIe link */
+static int msm_pcie_pm_suspend(struct pci_dev *dev,
+			void *user, void *data, u32 options)
+{
+	int ret = 0;
+	u32 val = 0;
+	int ret_l23;
+	unsigned long irqsave_flags;
+	struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+
+	PCIE_DBG(pcie_dev, "RC%d: entry\n", pcie_dev->rc_idx);
+
+	spin_lock_irqsave(&pcie_dev->aer_lock, irqsave_flags);
+	pcie_dev->suspending = true;
+	spin_unlock_irqrestore(&pcie_dev->aer_lock, irqsave_flags);
+
+	if (!pcie_dev->power_on) {
+		PCIE_DBG(pcie_dev,
+			"PCIe: power of RC%d has been turned off.\n",
+			pcie_dev->rc_idx);
+		return ret;
+	}
+
+	if (dev && !(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE)
+		&& msm_pcie_confirm_linkup(pcie_dev, true, true,
+			pcie_dev->conf)) {
+		ret = pci_save_state(dev);
+		pcie_dev->saved_state =	pci_store_saved_state(dev);
+	}
+	if (ret) {
+		PCIE_ERR(pcie_dev, "PCIe: fail to save state of RC%d:%d.\n",
+			pcie_dev->rc_idx, ret);
+		pcie_dev->suspending = false;
+		return ret;
+	}
+
+	spin_lock_irqsave(&pcie_dev->cfg_lock,
+				pcie_dev->irqsave_flags);
+	pcie_dev->cfg_access = false;
+	spin_unlock_irqrestore(&pcie_dev->cfg_lock,
+				pcie_dev->irqsave_flags);
+
+	msm_pcie_write_mask(pcie_dev->elbi + PCIE20_ELBI_SYS_CTRL, 0,
+				BIT(4));
+
+	PCIE_DBG(pcie_dev, "RC%d: PME_TURNOFF_MSG is sent out\n",
+		pcie_dev->rc_idx);
+
+	ret_l23 = readl_poll_timeout((pcie_dev->parf
+		+ PCIE20_PARF_PM_STTS), val, (val & BIT(5)), 10000, 100000);
+
+	/* check L23_Ready */
+	PCIE_DBG(pcie_dev, "RC%d: PCIE20_PARF_PM_STTS is 0x%x.\n",
+		pcie_dev->rc_idx,
+		readl_relaxed(pcie_dev->parf + PCIE20_PARF_PM_STTS));
+	if (!ret_l23)
+		PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is received\n",
+			pcie_dev->rc_idx);
+	else
+		PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is NOT received\n",
+			pcie_dev->rc_idx);
+
+	if (pcie_dev->use_pinctrl && pcie_dev->pins_sleep)
+		pinctrl_select_state(pcie_dev->pinctrl,
+					pcie_dev->pins_sleep);
+
+	msm_pcie_disable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);
+
+	PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
+
+	return ret;
+}
+
+static void msm_pcie_fixup_suspend(struct pci_dev *dev)
+{
+	int ret;
+	struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+
+	PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
+
+	if (pcie_dev->link_status != MSM_PCIE_LINK_ENABLED)
+		return;
+
+	spin_lock_irqsave(&pcie_dev->cfg_lock,
+				pcie_dev->irqsave_flags);
+	if (pcie_dev->disable_pc) {
+		PCIE_DBG(pcie_dev,
+			"RC%d: Skip suspend because of user request\n",
+			pcie_dev->rc_idx);
+		spin_unlock_irqrestore(&pcie_dev->cfg_lock,
+				pcie_dev->irqsave_flags);
+		return;
+	}
+	spin_unlock_irqrestore(&pcie_dev->cfg_lock,
+				pcie_dev->irqsave_flags);
+
+	mutex_lock(&pcie_dev->recovery_lock);
+
+	ret = msm_pcie_pm_suspend(dev, NULL, NULL, 0);
+	if (ret)
+		PCIE_ERR(pcie_dev, "PCIe: RC%d got failure in suspend:%d.\n",
+			pcie_dev->rc_idx, ret);
+
+	mutex_unlock(&pcie_dev->recovery_lock);
+}
+DECLARE_PCI_FIXUP_SUSPEND(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
+			  msm_pcie_fixup_suspend);
+
+/* Resume the PCIe link */
+static int msm_pcie_pm_resume(struct pci_dev *dev,
+			void *user, void *data, u32 options)
+{
+	int ret;
+	struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+
+	PCIE_DBG(pcie_dev, "RC%d: entry\n", pcie_dev->rc_idx);
+
+	if (pcie_dev->use_pinctrl && pcie_dev->pins_default)
+		pinctrl_select_state(pcie_dev->pinctrl,
+					pcie_dev->pins_default);
+
+	spin_lock_irqsave(&pcie_dev->cfg_lock,
+				pcie_dev->irqsave_flags);
+	pcie_dev->cfg_access = true;
+	spin_unlock_irqrestore(&pcie_dev->cfg_lock,
+				pcie_dev->irqsave_flags);
+
+	ret = msm_pcie_enable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);
+	if (ret) {
+		PCIE_ERR(pcie_dev,
+			"PCIe: RC%d fail to enable PCIe link in resume.\n",
+			pcie_dev->rc_idx);
+		return ret;
+	} else {
+		pcie_dev->suspending = false;
+		PCIE_DBG(pcie_dev,
+			"dev->bus->number = %d dev->bus->primary = %d\n",
+			 dev->bus->number, dev->bus->primary);
+
+		if (!(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE)) {
+			if (pcie_dev->saved_state) {
+				PCIE_DBG(pcie_dev,
+					 "RC%d: entry of PCI framework restore state\n",
+					 pcie_dev->rc_idx);
+
+				pci_load_and_free_saved_state(dev,
+						      &pcie_dev->saved_state);
+				pci_restore_state(dev);
+
+				PCIE_DBG(pcie_dev,
+					 "RC%d: exit of PCI framework restore state\n",
+					 pcie_dev->rc_idx);
+			} else {
+				PCIE_DBG(pcie_dev,
+					 "RC%d: restore rc config space using shadow recovery\n",
+					 pcie_dev->rc_idx);
+				msm_pcie_cfg_recover(pcie_dev, true);
+			}
+		}
+	}
+
+	if (pcie_dev->bridge_found) {
+		PCIE_DBG(pcie_dev,
+			"RC%d: entry of PCIe recover config\n",
+			pcie_dev->rc_idx);
+
+		msm_pcie_recover_config(dev);
+
+		PCIE_DBG(pcie_dev,
+			"RC%d: exit of PCIe recover config\n",
+			pcie_dev->rc_idx);
+	}
+
+	PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
+
+	return ret;
+}
+
+void msm_pcie_fixup_resume(struct pci_dev *dev)
+{
+	int ret;
+	struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+
+	PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
+
+	if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) ||
+		pcie_dev->user_suspend)
+		return;
+
+	mutex_lock(&pcie_dev->recovery_lock);
+	ret = msm_pcie_pm_resume(dev, NULL, NULL, 0);
+	if (ret)
+		PCIE_ERR(pcie_dev,
+			"PCIe: RC%d got failure in fixup resume:%d.\n",
+			pcie_dev->rc_idx, ret);
+
+	mutex_unlock(&pcie_dev->recovery_lock);
+}
+DECLARE_PCI_FIXUP_RESUME(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
+				 msm_pcie_fixup_resume);
+
+void msm_pcie_fixup_resume_early(struct pci_dev *dev)
+{
+	int ret;
+	struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+
+	PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
+
+	if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) ||
+		pcie_dev->user_suspend)
+		return;
+
+	mutex_lock(&pcie_dev->recovery_lock);
+	ret = msm_pcie_pm_resume(dev, NULL, NULL, 0);
+	if (ret)
+		PCIE_ERR(pcie_dev, "PCIe: RC%d got failure in resume:%d.\n",
+			pcie_dev->rc_idx, ret);
+
+	mutex_unlock(&pcie_dev->recovery_lock);
+}
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
+				 msm_pcie_fixup_resume_early);
+
+int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr, void *user,
+			void *data, u32 options)
+{
+	int i, ret = 0;
+	struct pci_dev *dev;
+	u32 rc_idx = 0;
+	struct msm_pcie_dev_t *pcie_dev;
+
+	PCIE_GEN_DBG("PCIe: pm_opt:%d;busnr:%d;options:%d\n",
+		pm_opt, busnr, options);
+
+
+	if (!user) {
+		pr_err("PCIe: endpoint device is NULL\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)user)->bus);
+
+	if (pcie_dev) {
+		rc_idx = pcie_dev->rc_idx;
+		PCIE_DBG(pcie_dev,
+			"PCIe: RC%d: pm_opt:%d;busnr:%d;options:%d\n",
+			rc_idx, pm_opt, busnr, options);
+	} else {
+		pr_err(
+			"PCIe: did not find RC for pci endpoint device.\n"
+			);
+		ret = -ENODEV;
+		goto out;
+	}
+
+	for (i = 0; i < MAX_DEVICE_NUM; i++) {
+		if (!busnr)
+			break;
+		if (user == pcie_dev->pcidev_table[i].dev) {
+			if (busnr == pcie_dev->pcidev_table[i].bdf >> 24) {
+				break;
+			} else {
+				PCIE_ERR(pcie_dev,
+					"PCIe: RC%d: bus number %d does not match with the expected value %d\n",
+					pcie_dev->rc_idx, busnr,
+					pcie_dev->pcidev_table[i].bdf >> 24);
+				ret = MSM_PCIE_ERROR;
+				goto out;
+			}
+		}
+	}
+
+	if (i == MAX_DEVICE_NUM) {
+		PCIE_ERR(pcie_dev,
+			"PCIe: RC%d: endpoint device was not found in device table",
+			pcie_dev->rc_idx);
+		ret = MSM_PCIE_ERROR;
+		goto out;
+	}
+
+	dev = msm_pcie_dev[rc_idx].dev;
+
+	if (!msm_pcie_dev[rc_idx].drv_ready) {
+		PCIE_ERR(&msm_pcie_dev[rc_idx],
+			"RC%d has not been successfully probed yet\n",
+			rc_idx);
+		return -EPROBE_DEFER;
+	}
+
+	switch (pm_opt) {
+	case MSM_PCIE_SUSPEND:
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"User of RC%d requests to suspend the link\n", rc_idx);
+		if (msm_pcie_dev[rc_idx].link_status != MSM_PCIE_LINK_ENABLED)
+			PCIE_DBG(&msm_pcie_dev[rc_idx],
+				"PCIe: RC%d: requested to suspend when link is not enabled:%d.\n",
+				rc_idx, msm_pcie_dev[rc_idx].link_status);
+
+		if (!msm_pcie_dev[rc_idx].power_on) {
+			PCIE_ERR(&msm_pcie_dev[rc_idx],
+				"PCIe: RC%d: requested to suspend when link is powered down:%d.\n",
+				rc_idx, msm_pcie_dev[rc_idx].link_status);
+			break;
+		}
+
+		if (msm_pcie_dev[rc_idx].pending_ep_reg) {
+			PCIE_DBG(&msm_pcie_dev[rc_idx],
+				"PCIe: RC%d: request to suspend the link is rejected\n",
+				rc_idx);
+			break;
+		}
+
+		if (pcie_dev->num_active_ep) {
+			PCIE_DBG(pcie_dev,
+				"RC%d: an EP requested to suspend the link, but other EPs are still active: %d\n",
+				pcie_dev->rc_idx, pcie_dev->num_active_ep);
+			return ret;
+		}
+
+		msm_pcie_dev[rc_idx].user_suspend = true;
+
+		mutex_lock(&msm_pcie_dev[rc_idx].recovery_lock);
+
+		ret = msm_pcie_pm_suspend(dev, user, data, options);
+		if (ret) {
+			PCIE_ERR(&msm_pcie_dev[rc_idx],
+				"PCIe: RC%d: user failed to suspend the link.\n",
+				rc_idx);
+			msm_pcie_dev[rc_idx].user_suspend = false;
+		}
+
+		mutex_unlock(&msm_pcie_dev[rc_idx].recovery_lock);
+		break;
+	case MSM_PCIE_RESUME:
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"User of RC%d requests to resume the link\n", rc_idx);
+		if (msm_pcie_dev[rc_idx].link_status !=
+					MSM_PCIE_LINK_DISABLED) {
+			PCIE_ERR(&msm_pcie_dev[rc_idx],
+				"PCIe: RC%d: requested to resume when link is not disabled:%d. Number of active EP(s): %d\n",
+				rc_idx, msm_pcie_dev[rc_idx].link_status,
+				msm_pcie_dev[rc_idx].num_active_ep);
+			break;
+		}
+
+		mutex_lock(&msm_pcie_dev[rc_idx].recovery_lock);
+		ret = msm_pcie_pm_resume(dev, user, data, options);
+		if (ret) {
+			PCIE_ERR(&msm_pcie_dev[rc_idx],
+				"PCIe: RC%d: user failed to resume the link.\n",
+				rc_idx);
+		} else {
+			PCIE_DBG(&msm_pcie_dev[rc_idx],
+				"PCIe: RC%d: user succeeded to resume the link.\n",
+				rc_idx);
+
+			msm_pcie_dev[rc_idx].user_suspend = false;
+		}
+
+		mutex_unlock(&msm_pcie_dev[rc_idx].recovery_lock);
+
+		break;
+	case MSM_PCIE_DISABLE_PC:
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"User of RC%d requests to keep the link always alive.\n",
+			rc_idx);
+		spin_lock_irqsave(&msm_pcie_dev[rc_idx].cfg_lock,
+				msm_pcie_dev[rc_idx].irqsave_flags);
+		if (msm_pcie_dev[rc_idx].suspending) {
+			PCIE_ERR(&msm_pcie_dev[rc_idx],
+				"PCIe: RC%d Link has been suspended before request\n",
+				rc_idx);
+			ret = MSM_PCIE_ERROR;
+		} else {
+			msm_pcie_dev[rc_idx].disable_pc = true;
+		}
+		spin_unlock_irqrestore(&msm_pcie_dev[rc_idx].cfg_lock,
+				msm_pcie_dev[rc_idx].irqsave_flags);
+		break;
+	case MSM_PCIE_ENABLE_PC:
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"User of RC%d cancels the request of alive link.\n",
+			rc_idx);
+		spin_lock_irqsave(&msm_pcie_dev[rc_idx].cfg_lock,
+				msm_pcie_dev[rc_idx].irqsave_flags);
+		msm_pcie_dev[rc_idx].disable_pc = false;
+		spin_unlock_irqrestore(&msm_pcie_dev[rc_idx].cfg_lock,
+				msm_pcie_dev[rc_idx].irqsave_flags);
+		break;
+	default:
+		PCIE_ERR(&msm_pcie_dev[rc_idx],
+			"PCIe: RC%d: unsupported pm operation:%d.\n",
+			rc_idx, pm_opt);
+		ret = -ENODEV;
+		goto out;
+	}
+
+out:
+	return ret;
+}
+EXPORT_SYMBOL(msm_pcie_pm_control);
+
+int msm_pcie_register_event(struct msm_pcie_register_event *reg)
+{
+	int i, ret = 0;
+	struct msm_pcie_dev_t *pcie_dev;
+
+	if (!reg) {
+		pr_err("PCIe: Event registration is NULL\n");
+		return -ENODEV;
+	}
+
+	if (!reg->user) {
+		pr_err("PCIe: User of event registration is NULL\n");
+		return -ENODEV;
+	}
+
+	pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)reg->user)->bus);
+
+	if (!pcie_dev) {
+		PCIE_ERR(pcie_dev, "%s",
+			"PCIe: did not find RC for pci endpoint device.\n");
+		return -ENODEV;
+	}
+
+	if (pcie_dev->num_ep > 1) {
+		for (i = 0; i < MAX_DEVICE_NUM; i++) {
+			if (reg->user ==
+				pcie_dev->pcidev_table[i].dev) {
+				pcie_dev->event_reg =
+					pcie_dev->pcidev_table[i].event_reg;
+
+				if (!pcie_dev->event_reg) {
+					pcie_dev->pcidev_table[i].registered =
+						true;
+
+					pcie_dev->num_active_ep++;
+					PCIE_DBG(pcie_dev,
+						"PCIe: RC%d: number of active EP(s): %d.\n",
+						pcie_dev->rc_idx,
+						pcie_dev->num_active_ep);
+				}
+
+				pcie_dev->event_reg = reg;
+				pcie_dev->pcidev_table[i].event_reg = reg;
+				PCIE_DBG(pcie_dev,
+					"Event 0x%x is registered for RC %d\n",
+					reg->events,
+					pcie_dev->rc_idx);
+
+				break;
+			}
+		}
+
+		if (pcie_dev->pending_ep_reg) {
+			for (i = 0; i < MAX_DEVICE_NUM; i++)
+				if (!pcie_dev->pcidev_table[i].registered)
+					break;
+
+			if (i == MAX_DEVICE_NUM)
+				pcie_dev->pending_ep_reg = false;
+		}
+	} else {
+		pcie_dev->event_reg = reg;
+		PCIE_DBG(pcie_dev,
+			"Event 0x%x is registered for RC %d\n", reg->events,
+			pcie_dev->rc_idx);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_pcie_register_event);
+
+int msm_pcie_deregister_event(struct msm_pcie_register_event *reg)
+{
+	int i, ret = 0;
+	struct msm_pcie_dev_t *pcie_dev;
+
+	if (!reg) {
+		pr_err("PCIe: Event deregistration is NULL\n");
+		return -ENODEV;
+	}
+
+	if (!reg->user) {
+		pr_err("PCIe: User of event deregistration is NULL\n");
+		return -ENODEV;
+	}
+
+	pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)reg->user)->bus);
+
+	if (!pcie_dev) {
+		PCIE_ERR(pcie_dev, "%s",
+			"PCIe: did not find RC for pci endpoint device.\n");
+		return -ENODEV;
+	}
+
+	if (pcie_dev->num_ep > 1) {
+		for (i = 0; i < MAX_DEVICE_NUM; i++) {
+			if (reg->user == pcie_dev->pcidev_table[i].dev) {
+				if (pcie_dev->pcidev_table[i].event_reg) {
+					pcie_dev->num_active_ep--;
+					PCIE_DBG(pcie_dev,
+						"PCIe: RC%d: number of active EP(s) left: %d.\n",
+						pcie_dev->rc_idx,
+						pcie_dev->num_active_ep);
+				}
+
+				pcie_dev->event_reg = NULL;
+				pcie_dev->pcidev_table[i].event_reg = NULL;
+				PCIE_DBG(pcie_dev,
+					"Event is deregistered for RC %d\n",
+					pcie_dev->rc_idx);
+
+				break;
+			}
+		}
+	} else {
+		pcie_dev->event_reg = NULL;
+		PCIE_DBG(pcie_dev, "Event is deregistered for RC %d\n",
+				pcie_dev->rc_idx);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_pcie_deregister_event);
+
+int msm_pcie_recover_config(struct pci_dev *dev)
+{
+	int ret = 0;
+	struct msm_pcie_dev_t *pcie_dev;
+
+	if (dev) {
+		pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+		PCIE_DBG(pcie_dev,
+			"Recovery for the link of RC%d\n", pcie_dev->rc_idx);
+	} else {
+		pr_err("PCIe: the input pci dev is NULL.\n");
+		return -ENODEV;
+	}
+
+	if (msm_pcie_confirm_linkup(pcie_dev, true, true, pcie_dev->conf)) {
+		PCIE_DBG(pcie_dev,
+			"Recover config space of RC%d and its EP\n",
+			pcie_dev->rc_idx);
+		pcie_dev->shadow_en = false;
+		PCIE_DBG(pcie_dev, "Recover RC%d\n", pcie_dev->rc_idx);
+		msm_pcie_cfg_recover(pcie_dev, true);
+		PCIE_DBG(pcie_dev, "Recover EP of RC%d\n", pcie_dev->rc_idx);
+		msm_pcie_cfg_recover(pcie_dev, false);
+		PCIE_DBG(pcie_dev,
+			"Refreshing the saved config space in PCI framework for RC%d and its EP\n",
+			pcie_dev->rc_idx);
+		pci_save_state(pcie_dev->dev);
+		pci_save_state(dev);
+		pcie_dev->shadow_en = true;
+		PCIE_DBG(pcie_dev, "Turn on shadow for RC%d\n",
+			pcie_dev->rc_idx);
+	} else {
+		PCIE_ERR(pcie_dev,
+			"PCIe: the link of RC%d is not up yet; can't recover config space.\n",
+			pcie_dev->rc_idx);
+		ret = -ENODEV;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_pcie_recover_config);
+
+int msm_pcie_shadow_control(struct pci_dev *dev, bool enable)
+{
+	int ret = 0;
+	struct msm_pcie_dev_t *pcie_dev;
+
+	if (dev) {
+		pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+		PCIE_DBG(pcie_dev,
+			"User requests to %s shadow\n",
+			enable ? "enable" : "disable");
+	} else {
+		pr_err("PCIe: the input pci dev is NULL.\n");
+		return -ENODEV;
+	}
+
+	PCIE_DBG(pcie_dev,
+		"The shadowing of RC%d is %s enabled currently.\n",
+		pcie_dev->rc_idx, pcie_dev->shadow_en ? "" : "not");
+
+	pcie_dev->shadow_en = enable;
+
+	PCIE_DBG(pcie_dev,
+		"Shadowing of RC%d is turned %s upon user's request.\n",
+		pcie_dev->rc_idx, enable ? "on" : "off");
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_pcie_shadow_control);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/perf/perf_event_armv8.c	2019-10-29 09:26:24.577212318 +0100
@@ -0,0 +1,761 @@
+/*
+ * PMU support
+ *
+ * Copyright (C) 2012 ARM Limited
+ * Author: Will Deacon <will.deacon@arm.com>
+ *
+ * This code is based heavily on the ARMv7 perf event code.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <asm/irq_regs.h>
+#include <asm/perf_event.h>
+
+#include <linux/of.h>
+#include <linux/perf/arm_pmu.h>
+#include <linux/platform_device.h>
+
+/*
+ * ARMv8 PMUv3 Performance Events handling code.
+ * Common event types.
+ */
+enum armv8_pmuv3_perf_types {
+	/* Required events. */
+	ARMV8_PMUV3_PERFCTR_PMNC_SW_INCR			= 0x00,
+	ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL			= 0x03,
+	ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS			= 0x04,
+	ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED			= 0x10,
+	ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES			= 0x11,
+	ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED			= 0x12,
+
+	/* At least one of the following is required. */
+	ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED			= 0x08,
+	ARMV8_PMUV3_PERFCTR_OP_SPEC				= 0x1B,
+
+	/* Common architectural events. */
+	ARMV8_PMUV3_PERFCTR_MEM_READ				= 0x06,
+	ARMV8_PMUV3_PERFCTR_MEM_WRITE				= 0x07,
+	ARMV8_PMUV3_PERFCTR_EXC_TAKEN				= 0x09,
+	ARMV8_PMUV3_PERFCTR_EXC_EXECUTED			= 0x0A,
+	ARMV8_PMUV3_PERFCTR_CID_WRITE				= 0x0B,
+	ARMV8_PMUV3_PERFCTR_PC_WRITE				= 0x0C,
+	ARMV8_PMUV3_PERFCTR_PC_IMM_BRANCH			= 0x0D,
+	ARMV8_PMUV3_PERFCTR_PC_PROC_RETURN			= 0x0E,
+	ARMV8_PMUV3_PERFCTR_MEM_UNALIGNED_ACCESS		= 0x0F,
+	ARMV8_PMUV3_PERFCTR_TTBR_WRITE				= 0x1C,
+
+	/* Common microarchitectural events. */
+	ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL			= 0x01,
+	ARMV8_PMUV3_PERFCTR_ITLB_REFILL				= 0x02,
+	ARMV8_PMUV3_PERFCTR_DTLB_REFILL				= 0x05,
+	ARMV8_PMUV3_PERFCTR_MEM_ACCESS				= 0x13,
+	ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS			= 0x14,
+	ARMV8_PMUV3_PERFCTR_L1_DCACHE_WB			= 0x15,
+	ARMV8_PMUV3_PERFCTR_L2_CACHE_ACCESS			= 0x16,
+	ARMV8_PMUV3_PERFCTR_L2_CACHE_REFILL			= 0x17,
+	ARMV8_PMUV3_PERFCTR_L2_CACHE_WB				= 0x18,
+	ARMV8_PMUV3_PERFCTR_BUS_ACCESS				= 0x19,
+	ARMV8_PMUV3_PERFCTR_MEM_ERROR				= 0x1A,
+	ARMV8_PMUV3_PERFCTR_BUS_CYCLES				= 0x1D,
+};
+
+/* ARMv8 Cortex-A53 specific event types. */
+enum armv8_a53_pmu_perf_types {
+	ARMV8_A53_PERFCTR_PREFETCH_LINEFILL			= 0xC2,
+};
+
+/* ARMv8 Cortex-A57 specific event types. */
+enum armv8_a57_perf_types {
+	ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_LD			= 0x40,
+	ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_ST			= 0x41,
+	ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_LD			= 0x42,
+	ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_ST			= 0x43,
+	ARMV8_A57_PERFCTR_DTLB_REFILL_LD			= 0x4c,
+	ARMV8_A57_PERFCTR_DTLB_REFILL_ST			= 0x4d,
+};
+
+/* PMUv3 HW events mapping. */
+const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
+	PERF_MAP_ALL_UNSUPPORTED,
+	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
+	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
+	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
+	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
+	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+};
+
+/* ARM Cortex-A53 HW events mapping. */
+static const unsigned armv8_a53_perf_map[PERF_COUNT_HW_MAX] = {
+	PERF_MAP_ALL_UNSUPPORTED,
+	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
+	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
+	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
+	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV8_PMUV3_PERFCTR_PC_WRITE,
+	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
+};
+
+static const unsigned armv8_a57_perf_map[PERF_COUNT_HW_MAX] = {
+	PERF_MAP_ALL_UNSUPPORTED,
+	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
+	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
+	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
+	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
+	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
+};
+
+const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+						[PERF_COUNT_HW_CACHE_OP_MAX]
+						[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+	PERF_CACHE_MAP_ALL_UNSUPPORTED,
+
+	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
+	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
+	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
+	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
+
+	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
+	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
+	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+};
+
+static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+					      [PERF_COUNT_HW_CACHE_OP_MAX]
+					      [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+	PERF_CACHE_MAP_ALL_UNSUPPORTED,
+
+	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
+	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
+	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
+	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
+	[C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREFETCH_LINEFILL,
+
+	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS,
+	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL,
+
+	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_ITLB_REFILL,
+
+	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
+	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
+	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+};
+
+static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+					      [PERF_COUNT_HW_CACHE_OP_MAX]
+					      [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+	PERF_CACHE_MAP_ALL_UNSUPPORTED,
+
+	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_LD,
+	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_LD,
+	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_ST,
+	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_ST,
+
+	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS,
+	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL,
+
+	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_A57_PERFCTR_DTLB_REFILL_LD,
+	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_A57_PERFCTR_DTLB_REFILL_ST,
+
+	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_ITLB_REFILL,
+
+	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
+	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
+	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+};
+
+
+/*
+ * Perf Events' indices
+ */
+#define	ARMV8_IDX_CYCLE_COUNTER	0
+#define	ARMV8_IDX_COUNTER0	1
+#define	ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
+	(ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
+
+#define	ARMV8_MAX_COUNTERS	32
+#define	ARMV8_COUNTER_MASK	(ARMV8_MAX_COUNTERS - 1)
+
+/*
+ * ARMv8 low level PMU access
+ */
+
+/*
+ * Perf Event to low level counters mapping
+ */
+#define	ARMV8_IDX_TO_COUNTER(x)	\
+	(((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK)
+
+/*
+ * Per-CPU PMCR: config reg
+ */
+#define ARMV8_PMCR_E		(1 << 0) /* Enable all counters */
+#define ARMV8_PMCR_P		(1 << 1) /* Reset all counters */
+#define ARMV8_PMCR_C		(1 << 2) /* Cycle counter reset */
+#define ARMV8_PMCR_D		(1 << 3) /* CCNT counts every 64th cpu cycle */
+#define ARMV8_PMCR_X		(1 << 4) /* Export to ETM */
+#define ARMV8_PMCR_DP		(1 << 5) /* Disable CCNT if non-invasive debug*/
+#define	ARMV8_PMCR_N_SHIFT	11	 /* Number of counters supported */
+#define	ARMV8_PMCR_N_MASK	0x1f
+#define	ARMV8_PMCR_MASK		0x3f	 /* Mask for writable bits */
+
+/*
+ * PMOVSR: counters overflow flag status reg
+ */
+#define	ARMV8_OVSR_MASK		0xffffffff	/* Mask for writable bits */
+#define	ARMV8_OVERFLOWED_MASK	ARMV8_OVSR_MASK
+
+/*
+ * PMXEVTYPER: Event selection reg
+ */
+#define	ARMV8_EVTYPE_MASK	0xc80003ff	/* Mask for writable bits */
+#define	ARMV8_EVTYPE_EVENT	0x3ff		/* Mask for EVENT bits */
+
+/*
+ * Event filters for PMUv3
+ */
+#define	ARMV8_EXCLUDE_EL1	(1 << 31)
+#define	ARMV8_EXCLUDE_EL0	(1 << 30)
+#define	ARMV8_INCLUDE_EL2	(1 << 27)
+
+struct arm_pmu_and_idle_nb {
+	struct arm_pmu *cpu_pmu;
+	struct notifier_block perf_cpu_idle_nb;
+};
+
+static inline u32 armv8pmu_pmcr_read(void)
+{
+	return armv8pmu_pmcr_read_reg();
+}
+
+inline void armv8pmu_pmcr_write(u32 val)
+{
+	val &= ARMV8_PMCR_MASK;
+	isb();
+	armv8pmu_pmcr_write_reg(val);
+}
+
+static inline int armv8pmu_has_overflowed(u32 pmovsr)
+{
+	return pmovsr & ARMV8_OVERFLOWED_MASK;
+}
+
+static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx)
+{
+	return idx >= ARMV8_IDX_CYCLE_COUNTER &&
+		idx <= ARMV8_IDX_COUNTER_LAST(cpu_pmu);
+}
+
+static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
+{
+	return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
+}
+
+static inline int armv8pmu_select_counter(int idx)
+{
+	u32 counter = ARMV8_IDX_TO_COUNTER(idx);
+	armv8pmu_pmselr_write_reg(counter);
+	isb();
+
+	return idx;
+}
+
+static inline u32 armv8pmu_read_counter(struct perf_event *event)
+{
+	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	int idx = hwc->idx;
+	u32 value = 0;
+
+	if (!armv8pmu_counter_valid(cpu_pmu, idx))
+		pr_err("CPU%u reading wrong counter %d\n",
+			smp_processor_id(), idx);
+	else if (idx == ARMV8_IDX_CYCLE_COUNTER)
+		value = armv8pmu_pmccntr_read_reg();
+	else if (armv8pmu_select_counter(idx) == idx)
+		value = armv8pmu_pmxevcntr_read_reg();
+
+	return value;
+}
+
+static inline void armv8pmu_write_counter(struct perf_event *event, u32 value)
+{
+	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	int idx = hwc->idx;
+
+	if (!armv8pmu_counter_valid(cpu_pmu, idx))
+		pr_err("CPU%u writing wrong counter %d\n",
+			smp_processor_id(), idx);
+	else if (idx == ARMV8_IDX_CYCLE_COUNTER)
+		armv8pmu_pmccntr_write_reg(value);
+	else if (armv8pmu_select_counter(idx) == idx)
+		armv8pmu_pmxevcntr_write_reg(value);
+}
+
+inline void armv8pmu_write_evtype(int idx, u32 val)
+{
+	if (armv8pmu_select_counter(idx) == idx) {
+		val &= ARMV8_EVTYPE_MASK;
+		armv8pmu_pmxevtyper_write_reg(val);
+	}
+}
+
+inline int armv8pmu_enable_counter(int idx)
+{
+	u32 counter = ARMV8_IDX_TO_COUNTER(idx);
+	armv8pmu_pmcntenset_write_reg(BIT(counter));
+	return idx;
+}
+
+inline int armv8pmu_disable_counter(int idx)
+{
+	u32 counter = ARMV8_IDX_TO_COUNTER(idx);
+	armv8pmu_pmcntenclr_write_reg(BIT(counter));
+	return idx;
+}
+
+inline int armv8pmu_enable_intens(int idx)
+{
+	u32 counter = ARMV8_IDX_TO_COUNTER(idx);
+	armv8pmu_pmintenset_write_reg(BIT(counter));
+	return idx;
+}
+
+inline int armv8pmu_disable_intens(int idx)
+{
+	u32 counter = ARMV8_IDX_TO_COUNTER(idx);
+	armv8pmu_pmintenclr_write_reg(BIT(counter));
+	isb();
+	/* Clear the overflow flag in case an interrupt is pending. */
+	armv8pmu_pmovsclr_write_reg(BIT(counter));
+	isb();
+
+	return idx;
+}
+
+inline u32 armv8pmu_getreset_flags(void)
+{
+	u32 value;
+
+	/* Read */
+	value = armv8pmu_pmovsclr_read_reg();
+
+	/* Write to clear flags */
+	value &= ARMV8_OVSR_MASK;
+	armv8pmu_pmovsclr_write_reg(value);
+
+	return value;
+}
+
+static void armv8pmu_enable_event(struct perf_event *event)
+{
+	unsigned long flags;
+	struct hw_perf_event *hwc = &event->hw;
+	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+	int idx = hwc->idx;
+
+	/*
+	 * Enable counter and interrupt, and set the counter to count
+	 * the event that we're interested in.
+	 */
+	raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+	/*
+	 * Disable counter
+	 */
+	armv8pmu_disable_counter(idx);
+
+	/*
+	 * Set event (if destined for PMNx counters).
+	 */
+	armv8pmu_write_evtype(idx, hwc->config_base);
+
+	/*
+	 * Enable interrupt for this counter
+	 */
+	armv8pmu_enable_intens(idx);
+
+	/*
+	 * Enable counter
+	 */
+	armv8pmu_enable_counter(idx);
+
+	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void armv8pmu_disable_event(struct perf_event *event)
+{
+	unsigned long flags;
+	struct hw_perf_event *hwc = &event->hw;
+	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+	int idx = hwc->idx;
+
+	/*
+	 * Disable counter and interrupt
+	 */
+	raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+	/*
+	 * Disable counter
+	 */
+	armv8pmu_disable_counter(idx);
+
+	/*
+	 * Disable interrupt for this counter
+	 */
+	armv8pmu_disable_intens(idx);
+
+	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
+{
+	u32 pmovsr;
+	struct perf_sample_data data;
+	struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
+	struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
+	struct pt_regs *regs;
+	int idx;
+
+	/*
+	 * Get and reset the IRQ flags
+	 */
+	pmovsr = armv8pmu_getreset_flags();
+
+	/*
+	 * Did an overflow occur?
+	 */
+	if (!armv8pmu_has_overflowed(pmovsr))
+		return IRQ_NONE;
+
+	/*
+	 * Handle the counter(s) overflow(s)
+	 */
+	regs = get_irq_regs();
+
+	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
+		struct perf_event *event = cpuc->events[idx];
+		struct hw_perf_event *hwc;
+
+		/* Ignore if we don't have an event. */
+		if (!event)
+			continue;
+
+		/*
+		 * We have a single interrupt for all counters. Check that
+		 * each counter has overflowed before we process it.
+		 */
+		if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
+			continue;
+
+		hwc = &event->hw;
+		armpmu_event_update(event);
+		perf_sample_data_init(&data, 0, hwc->last_period);
+		if (!armpmu_event_set_period(event))
+			continue;
+
+		if (perf_event_overflow(event, &data, regs))
+			cpu_pmu->disable(event);
+	}
+
+	/*
+	 * Handle the pending perf events.
+	 *
+	 * Note: this call *must* be run with interrupts disabled. For
+	 * platforms that can have the PMU interrupts raised as an NMI, this
+	 * will not work.
+	 */
+	irq_work_run();
+
+	return IRQ_HANDLED;
+}
+
+static void armv8pmu_start(struct arm_pmu *cpu_pmu)
+{
+	unsigned long flags;
+	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+
+	raw_spin_lock_irqsave(&events->pmu_lock, flags);
+	/* Enable all counters */
+	armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E);
+	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
+{
+	unsigned long flags;
+	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+
+	raw_spin_lock_irqsave(&events->pmu_lock, flags);
+	/* Disable all counters */
+	armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E);
+	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
+				  struct perf_event *event)
+{
+	int idx;
+	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	unsigned long evtype = hwc->config_base & ARMV8_EVTYPE_EVENT;
+
+	/* Place the first cycle counter request into the cycle counter. */
+	if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) {
+		if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
+			return ARMV8_IDX_CYCLE_COUNTER;
+	}
+
+	/*
+	 * For anything other than a cycle counter, try and use
+	 * the events counters
+	 */
+	for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
+		if (!test_and_set_bit(idx, cpuc->used_mask))
+			return idx;
+	}
+
+	/* The counters are all in use. */
+	return -EAGAIN;
+}
+
+/*
+ * Add an event filter to a given event. This will only work for PMUv2 PMUs.
+ */
+static int armv8pmu_set_event_filter(struct hw_perf_event *event,
+				     struct perf_event_attr *attr)
+{
+	unsigned long config_base = 0;
+
+	if (attr->exclude_user)
+		config_base |= ARMV8_EXCLUDE_EL0;
+	if (attr->exclude_kernel)
+		config_base |= ARMV8_EXCLUDE_EL1;
+	if (!attr->exclude_hv)
+		config_base |= ARMV8_INCLUDE_EL2;
+
+	/*
+	 * Install the filter into config_base as this is used to
+	 * construct the event type.
+	 */
+	event->config_base = config_base;
+
+	return 0;
+}
+
+#ifdef CONFIG_PERF_EVENTS_USERMODE
+static void armv8pmu_init_usermode(void)
+{
+	/* Enable access from userspace. */
+	armv8pmu_pmuserenr_write_reg(0xF);
+
+}
+#else
+static inline void armv8pmu_init_usermode(void)
+{
+	/* Disable access from userspace. */
+	armv8pmu_pmuserenr_write_reg(0);
+
+}
+#endif
+
+
+static void armv8pmu_idle_update(struct arm_pmu *cpu_pmu)
+{
+	struct pmu_hw_events *hw_events;
+	struct perf_event *event;
+	int idx;
+
+	if (!cpu_pmu)
+		return;
+
+	hw_events = this_cpu_ptr(cpu_pmu->hw_events);
+
+	if (!hw_events)
+		return;
+
+	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
+
+		if (!test_bit(idx, hw_events->used_mask))
+			continue;
+
+		event = hw_events->events[idx];
+
+		if (!event || !event->attr.exclude_idle ||
+				event->state != PERF_EVENT_STATE_ACTIVE)
+			continue;
+
+		cpu_pmu->pmu.read(event);
+	}
+}
+
+static void armv8pmu_reset(void *info)
+{
+	struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
+	u32 idx, nb_cnt = cpu_pmu->num_events;
+
+	/* The counter and interrupt enable registers are unknown at reset. */
+	for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
+		armv8pmu_disable_counter(idx);
+		armv8pmu_disable_intens(idx);
+	}
+
+	/* Initialize & Reset PMNC: C and P bits. */
+	armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_P | ARMV8_PMCR_C);
+
+	armv8pmu_init_usermode();
+}
+
+static int armv8_pmuv3_map_event(struct perf_event *event)
+{
+	return armpmu_map_event(event, &armv8_pmuv3_perf_map,
+				&armv8_pmuv3_perf_cache_map,
+				ARMV8_EVTYPE_EVENT);
+}
+
+static int armv8_a53_map_event(struct perf_event *event)
+{
+	return armpmu_map_event(event, &armv8_a53_perf_map,
+				&armv8_a53_perf_cache_map,
+				ARMV8_EVTYPE_EVENT);
+}
+
+static int armv8_a57_map_event(struct perf_event *event)
+{
+	return armpmu_map_event(event, &armv8_a57_perf_map,
+				&armv8_a57_perf_cache_map,
+				ARMV8_EVTYPE_EVENT);
+}
+
+static void armv8pmu_read_num_pmnc_events(void *info)
+{
+	int *nb_cnt = info;
+
+	/* Read the nb of CNTx counters supported from PMNC */
+	*nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK;
+
+	/* Add the CPU cycles counter */
+	*nb_cnt += 1;
+}
+
+static int perf_cpu_idle_notifier(struct notifier_block *nb,
+				unsigned long action, void *data)
+{
+	struct arm_pmu_and_idle_nb *pmu_nb = container_of(nb,
+				struct arm_pmu_and_idle_nb, perf_cpu_idle_nb);
+
+	if (action == IDLE_START)
+		armv8pmu_idle_update(pmu_nb->cpu_pmu);
+
+	return NOTIFY_OK;
+}
+
+int armv8pmu_probe_num_events(struct arm_pmu *arm_pmu)
+{
+	int ret;
+	struct arm_pmu_and_idle_nb *pmu_idle_nb;
+
+	pmu_idle_nb = devm_kzalloc(&arm_pmu->plat_device->dev,
+					sizeof(*pmu_idle_nb), GFP_KERNEL);
+	if (!pmu_idle_nb)
+		return -ENOMEM;
+
+	pmu_idle_nb->cpu_pmu = arm_pmu;
+	pmu_idle_nb->perf_cpu_idle_nb.notifier_call = perf_cpu_idle_notifier;
+	idle_notifier_register(&pmu_idle_nb->perf_cpu_idle_nb);
+
+	ret = smp_call_function_any(&arm_pmu->supported_cpus,
+				    armv8pmu_read_num_pmnc_events,
+				    &arm_pmu->num_events, 1);
+	if (ret)
+		idle_notifier_unregister(&pmu_idle_nb->perf_cpu_idle_nb);
+	return ret;
+
+
+}
+
+void armv8_pmu_init(struct arm_pmu *cpu_pmu)
+{
+	cpu_pmu->handle_irq		= armv8pmu_handle_irq,
+	cpu_pmu->enable			= armv8pmu_enable_event,
+	cpu_pmu->disable		= armv8pmu_disable_event,
+	cpu_pmu->read_counter		= armv8pmu_read_counter,
+	cpu_pmu->write_counter		= armv8pmu_write_counter,
+	cpu_pmu->get_event_idx		= armv8pmu_get_event_idx,
+	cpu_pmu->start			= armv8pmu_start,
+	cpu_pmu->stop			= armv8pmu_stop,
+	cpu_pmu->reset			= armv8pmu_reset,
+	cpu_pmu->max_period		= (1LLU << 32) - 1,
+	cpu_pmu->set_event_filter	= armv8pmu_set_event_filter;
+}
+
+static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
+{
+	armv8_pmu_init(cpu_pmu);
+	cpu_pmu->name			= "armv8_pmuv3";
+	cpu_pmu->map_event		= armv8_pmuv3_map_event;
+	return armv8pmu_probe_num_events(cpu_pmu);
+}
+
+static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
+{
+	armv8_pmu_init(cpu_pmu);
+	cpu_pmu->name			= "armv8_cortex_a53";
+	cpu_pmu->map_event		= armv8_a53_map_event;
+	return armv8pmu_probe_num_events(cpu_pmu);
+}
+
+static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
+{
+	armv8_pmu_init(cpu_pmu);
+	cpu_pmu->name			= "armv8_cortex_a57";
+	cpu_pmu->map_event		= armv8_a57_map_event;
+	return armv8pmu_probe_num_events(cpu_pmu);
+}
+
+static const struct of_device_id armv8_pmu_of_device_ids[] = {
+	{.compatible = "arm,armv8-pmuv3",	.data = armv8_pmuv3_init},
+	{.compatible = "arm,cortex-a53-pmu",	.data = armv8_a53_pmu_init},
+	{.compatible = "arm,cortex-a57-pmu",	.data = armv8_a57_pmu_init},
+#ifdef CONFIG_ARCH_MSM8996
+	{.compatible = "qcom,kryo-pmuv3", .data = kryo_pmu_init},
+#endif
+	{},
+};
+
+static int armv8_pmu_device_probe(struct platform_device *pdev)
+{
+	return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
+}
+
+static struct platform_driver armv8_pmu_driver = {
+	.driver		= {
+		.name	= "armv8-pmu",
+		.of_match_table = armv8_pmu_of_device_ids,
+	},
+	.probe		= armv8_pmu_device_probe,
+};
+
+static int __init register_armv8_pmu_driver(void)
+{
+	return platform_driver_register(&armv8_pmu_driver);
+}
+device_initcall(register_armv8_pmu_driver);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/phy/phy-qcom-ufs-qmp-v3-660.c	2019-01-22 16:16:26.079269734 +0100
@@ -0,0 +1,260 @@
+/*
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "phy-qcom-ufs-qmp-v3-660.h"
+
+#define UFS_PHY_NAME "ufs_phy_qmp_v3_660"
+
+static
+int ufs_qcom_phy_qmp_v3_660_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
+					bool is_rate_B)
+{
+	int err;
+	int tbl_size_A, tbl_size_B;
+	struct ufs_qcom_phy_calibration *tbl_A, *tbl_B;
+	u8 major = ufs_qcom_phy->host_ctrl_rev_major;
+	u16 minor = ufs_qcom_phy->host_ctrl_rev_minor;
+	u16 step = ufs_qcom_phy->host_ctrl_rev_step;
+
+	tbl_size_B = ARRAY_SIZE(phy_cal_table_rate_B);
+	tbl_B = phy_cal_table_rate_B;
+
+	if ((major == 0x3) && (minor == 0x001) && (step == 0x001)) {
+		tbl_A = phy_cal_table_rate_A_3_1_1;
+		tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A_3_1_1);
+	} else {
+		dev_err(ufs_qcom_phy->dev,
+			"%s: Unknown UFS-PHY version (major 0x%x minor 0x%x step 0x%x), no calibration values\n",
+			__func__, major, minor, step);
+		err = -ENODEV;
+		goto out;
+	}
+
+	err = ufs_qcom_phy_calibrate(ufs_qcom_phy,
+				     tbl_A, tbl_size_A,
+				     tbl_B, tbl_size_B,
+				     is_rate_B);
+
+	if (err)
+		dev_err(ufs_qcom_phy->dev,
+			"%s: ufs_qcom_phy_calibrate() failed %d\n",
+			__func__, err);
+
+out:
+	return err;
+}
+
+static int ufs_qcom_phy_qmp_v3_660_init(struct phy *generic_phy)
+{
+	struct ufs_qcom_phy_qmp_v3_660 *phy = phy_get_drvdata(generic_phy);
+	struct ufs_qcom_phy *phy_common = &phy->common_cfg;
+	int err;
+
+	err = ufs_qcom_phy_init_clks(generic_phy, phy_common);
+	if (err) {
+		dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
+			__func__, err);
+		goto out;
+	}
+
+	err = ufs_qcom_phy_init_vregulators(generic_phy, phy_common);
+	if (err) {
+		dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
+			__func__, err);
+		goto out;
+	}
+
+out:
+	return err;
+}
+
+static
+void ufs_qcom_phy_qmp_v3_660_power_control(struct ufs_qcom_phy *phy,
+					 bool power_ctrl)
+{
+	if (!power_ctrl) {
+		/* apply analog power collapse */
+		writel_relaxed(0x0, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
+		/*
+		 * Make sure that PHY knows its analog rail is going to be
+		 * powered OFF.
+		 */
+		mb();
+	} else {
+		/* bring PHY out of analog power collapse */
+		writel_relaxed(0x1, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
+
+		/*
+		 * Before any transactions involving PHY, ensure PHY knows
+		 * that it's analog rail is powered ON.
+		 */
+		mb();
+	}
+}
+
+static inline
+void ufs_qcom_phy_qmp_v3_660_set_tx_lane_enable(struct ufs_qcom_phy *phy,
+						   u32 val)
+{
+	/*
+	 * v3 PHY does not have TX_LANE_ENABLE register.
+	 * Implement this function so as not to propagate error to caller.
+	 */
+}
+
+static
+void ufs_qcom_phy_qmp_v3_660_ctrl_rx_linecfg(struct ufs_qcom_phy *phy,
+						bool ctrl)
+{
+	u32 temp;
+
+	temp = readl_relaxed(phy->mmio + UFS_PHY_LINECFG_DISABLE);
+
+	if (ctrl) /* enable RX LineCfg */
+		temp &= ~UFS_PHY_RX_LINECFG_DISABLE_BIT;
+	else /* disable RX LineCfg */
+		temp |= UFS_PHY_RX_LINECFG_DISABLE_BIT;
+
+	writel_relaxed(temp, phy->mmio + UFS_PHY_LINECFG_DISABLE);
+	/* Make sure that RX LineCfg config applied before we return */
+	mb();
+}
+
+static inline void ufs_qcom_phy_qmp_v3_660_start_serdes(
+					struct ufs_qcom_phy *phy)
+{
+	u32 tmp;
+
+	tmp = readl_relaxed(phy->mmio + UFS_PHY_PHY_START);
+	tmp &= ~MASK_SERDES_START;
+	tmp |= (1 << OFFSET_SERDES_START);
+	writel_relaxed(tmp, phy->mmio + UFS_PHY_PHY_START);
+	/* Ensure register value is committed */
+	mb();
+}
+
+static int ufs_qcom_phy_qmp_v3_660_is_pcs_ready(
+				struct ufs_qcom_phy *phy_common)
+{
+	int err = 0;
+	u32 val;
+
+	err = readl_poll_timeout(phy_common->mmio + UFS_PHY_PCS_READY_STATUS,
+		val, (val & MASK_PCS_READY), 10, 1000000);
+	if (err)
+		dev_err(phy_common->dev, "%s: poll for pcs failed err = %d\n",
+			__func__, err);
+	return err;
+}
+
+static void ufs_qcom_phy_qmp_v3_660_dbg_register_dump(
+					struct ufs_qcom_phy *phy)
+{
+	ufs_qcom_phy_dump_regs(phy, COM_BASE, COM_SIZE,
+					"PHY QSERDES COM Registers ");
+	ufs_qcom_phy_dump_regs(phy, PHY_BASE, PHY_SIZE,
+					"PHY Registers ");
+	ufs_qcom_phy_dump_regs(phy, RX_BASE, RX_SIZE,
+					"PHY RX0 Registers ");
+	ufs_qcom_phy_dump_regs(phy, TX_BASE, TX_SIZE,
+					"PHY TX0 Registers ");
+}
+
+struct phy_ops ufs_qcom_phy_qmp_v3_660_phy_ops = {
+	.init		= ufs_qcom_phy_qmp_v3_660_init,
+	.exit		= ufs_qcom_phy_exit,
+	.power_on	= ufs_qcom_phy_power_on,
+	.power_off	= ufs_qcom_phy_power_off,
+	.owner		= THIS_MODULE,
+};
+
+struct ufs_qcom_phy_specific_ops phy_v3_660_ops = {
+	.calibrate_phy		= ufs_qcom_phy_qmp_v3_660_phy_calibrate,
+	.start_serdes		= ufs_qcom_phy_qmp_v3_660_start_serdes,
+	.is_physical_coding_sublayer_ready =
+				ufs_qcom_phy_qmp_v3_660_is_pcs_ready,
+	.set_tx_lane_enable	= ufs_qcom_phy_qmp_v3_660_set_tx_lane_enable,
+	.ctrl_rx_linecfg	= ufs_qcom_phy_qmp_v3_660_ctrl_rx_linecfg,
+	.power_control		= ufs_qcom_phy_qmp_v3_660_power_control,
+	.dbg_register_dump	= ufs_qcom_phy_qmp_v3_660_dbg_register_dump,
+};
+
+static int ufs_qcom_phy_qmp_v3_660_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct phy *generic_phy;
+	struct ufs_qcom_phy_qmp_v3_660 *phy;
+	int err = 0;
+
+	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+	if (!phy) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg,
+				&ufs_qcom_phy_qmp_v3_660_phy_ops,
+				&phy_v3_660_ops);
+
+	if (!generic_phy) {
+		dev_err(dev, "%s: ufs_qcom_phy_generic_probe() failed\n",
+			__func__);
+		err = -EIO;
+		goto out;
+	}
+
+	phy_set_drvdata(generic_phy, phy);
+
+	strlcpy(phy->common_cfg.name, UFS_PHY_NAME,
+		sizeof(phy->common_cfg.name));
+
+out:
+	return err;
+}
+
+static int ufs_qcom_phy_qmp_v3_660_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct phy *generic_phy = to_phy(dev);
+	struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+	int err = 0;
+
+	err = ufs_qcom_phy_remove(generic_phy, ufs_qcom_phy);
+	if (err)
+		dev_err(dev, "%s: ufs_qcom_phy_remove failed = %d\n",
+			__func__, err);
+
+	return err;
+}
+
+static const struct of_device_id ufs_qcom_phy_qmp_v3_660_of_match[] = {
+	{.compatible = "qcom,ufs-phy-qmp-v3-660"},
+	{},
+};
+MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_v3_660_of_match);
+
+static struct platform_driver ufs_qcom_phy_qmp_v3_660_driver = {
+	.probe = ufs_qcom_phy_qmp_v3_660_probe,
+	.remove = ufs_qcom_phy_qmp_v3_660_remove,
+	.driver = {
+		.of_match_table = ufs_qcom_phy_qmp_v3_660_of_match,
+		.name = "ufs_qcom_phy_qmp_v3_660",
+		.owner = THIS_MODULE,
+	},
+};
+
+module_platform_driver(ufs_qcom_phy_qmp_v3_660_driver);
+
+MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY QMP v3 660");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/phy/phy-qcom-ufs-qmp-v3-660.h	2019-01-22 16:16:26.079269734 +0100
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef UFS_QCOM_PHY_QMP_V3_660_H_
+#define UFS_QCOM_PHY_QMP_V3_660_H_
+
+#include "phy-qcom-ufs-i.h"
+
+/* QCOM UFS PHY control registers */
+#define COM_BASE	0x000
+#define COM_OFF(x)	(COM_BASE + x)
+#define COM_SIZE	0x1C0
+
+#define TX_BASE		0x400
+#define TX_OFF(x)	(TX_BASE + x)
+#define TX_SIZE		0x128
+
+#define RX_BASE		0x600
+#define RX_OFF(x)	(RX_BASE + x)
+#define RX_SIZE		0x1FC
+
+#define PHY_BASE	0xC00
+#define PHY_OFF(x)	(PHY_BASE + x)
+#define PHY_SIZE	0x1B4
+
+/* UFS PHY QSERDES COM registers */
+#define QSERDES_COM_ATB_SEL1			COM_OFF(0x00)
+#define QSERDES_COM_ATB_SEL2			COM_OFF(0x04)
+#define QSERDES_COM_FREQ_UPDATE			COM_OFF(0x08)
+#define QSERDES_COM_BG_TIMER			COM_OFF(0x0C)
+#define QSERDES_COM_SSC_EN_CENTER		COM_OFF(0x10)
+#define QSERDES_COM_SSC_ADJ_PER1		COM_OFF(0x14)
+#define QSERDES_COM_SSC_ADJ_PER2		COM_OFF(0x18)
+#define QSERDES_COM_SSC_PER1			COM_OFF(0x1C)
+#define QSERDES_COM_SSC_PER2			COM_OFF(0x20)
+#define QSERDES_COM_SSC_STEP_SIZE1		COM_OFF(0x24)
+#define QSERDES_COM_SSC_STEP_SIZE2		COM_OFF(0x28)
+#define QSERDES_COM_POST_DIV			COM_OFF(0x2C)
+#define QSERDES_COM_POST_DIV_MUX		COM_OFF(0x30)
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		COM_OFF(0x34)
+#define QSERDES_COM_CLK_ENABLE1			COM_OFF(0x38)
+#define QSERDES_COM_SYS_CLK_CTRL		COM_OFF(0x3C)
+#define QSERDES_COM_SYSCLK_BUF_ENABLE		COM_OFF(0x40)
+#define QSERDES_COM_PLL_EN			COM_OFF(0x44)
+#define QSERDES_COM_PLL_IVCO			COM_OFF(0x48)
+#define QSERDES_COM_LOCK_CMP1_MODE0		COM_OFF(0X4C)
+#define QSERDES_COM_LOCK_CMP2_MODE0		COM_OFF(0X50)
+#define QSERDES_COM_LOCK_CMP3_MODE0		COM_OFF(0X54)
+#define QSERDES_COM_LOCK_CMP1_MODE1		COM_OFF(0X58)
+#define QSERDES_COM_LOCK_CMP2_MODE1		COM_OFF(0X5C)
+#define QSERDES_COM_LOCK_CMP3_MODE1		COM_OFF(0X60)
+#define QSERDES_COM_CMD_RSVD0			COM_OFF(0x64)
+#define QSERDES_COM_EP_CLOCK_DETECT_CTRL	COM_OFF(0x68)
+#define QSERDES_COM_SYSCLK_DET_COMP_STATUS	COM_OFF(0x6C)
+#define QSERDES_COM_BG_TRIM			COM_OFF(0x70)
+#define QSERDES_COM_CLK_EP_DIV			COM_OFF(0x74)
+#define QSERDES_COM_CP_CTRL_MODE0		COM_OFF(0x78)
+#define QSERDES_COM_CP_CTRL_MODE1		COM_OFF(0x7C)
+#define QSERDES_COM_CMN_RSVD1			COM_OFF(0x80)
+#define QSERDES_COM_PLL_RCTRL_MODE0		COM_OFF(0x84)
+#define QSERDES_COM_PLL_RCTRL_MODE1		COM_OFF(0x88)
+#define QSERDES_COM_CMN_RSVD2			COM_OFF(0x8C)
+#define QSERDES_COM_PLL_CCTRL_MODE0		COM_OFF(0x90)
+#define QSERDES_COM_PLL_CCTRL_MODE1		COM_OFF(0x94)
+#define QSERDES_COM_CMN_RSVD3			COM_OFF(0x98)
+#define QSERDES_COM_PLL_CNTRL			COM_OFF(0x9C)
+#define QSERDES_COM_PHASE_SEL_CTRL		COM_OFF(0xA0)
+#define QSERDES_COM_PHASE_SEL_DC		COM_OFF(0xA4)
+#define QSERDES_COM_BIAS_EN_CTRL_BY_PSM		COM_OFF(0xA8)
+#define QSERDES_COM_SYSCLK_EN_SEL		COM_OFF(0xAC)
+#define QSERDES_COM_CML_SYSCLK_SEL		COM_OFF(0xB0)
+#define QSERDES_COM_RESETSM_CNTRL		COM_OFF(0xB4)
+#define QSERDES_COM_RESETSM_CNTRL2		COM_OFF(0xB8)
+#define QSERDES_COM_RESTRIM_CTRL		COM_OFF(0xBC)
+#define QSERDES_COM_RESTRIM_CTRL2		COM_OFF(0xC0)
+#define QSERDES_COM_LOCK_CMP_EN			COM_OFF(0xC8)
+#define QSERDES_COM_LOCK_CMP_CFG		COM_OFF(0xCC)
+#define QSERDES_COM_DEC_START_MODE0		COM_OFF(0xD0)
+#define QSERDES_COM_DEC_START_MODE1		COM_OFF(0xD4)
+#define QSERDES_COM_VCOCAL_DEADMAN_CTRL		COM_OFF(0xD8)
+#define QSERDES_COM_DIV_FRAC_START1_MODE0	COM_OFF(0xDC)
+#define QSERDES_COM_DIV_FRAC_START2_MODE0	COM_OFF(0xE0)
+#define QSERDES_COM_DIV_FRAC_START3_MODE0	COM_OFF(0xE4)
+#define QSERDES_COM_DIV_FRAC_START1_MODE1	COM_OFF(0xE8)
+#define QSERDES_COM_DIV_FRAC_START2_MODE1	COM_OFF(0xEC)
+#define QSERDES_COM_DIV_FRAC_START3_MODE1	COM_OFF(0xF0)
+#define QSERDES_COM_VCO_TUNE_MINVAL1		COM_OFF(0xF4)
+#define QSERDES_COM_VCO_TUNE_MINVAL2		COM_OFF(0xF8)
+#define QSERDES_COM_CMN_RSVD4			COM_OFF(0xFC)
+#define QSERDES_COM_INTEGLOOP_INITVAL		COM_OFF(0x100)
+#define QSERDES_COM_INTEGLOOP_EN		COM_OFF(0x104)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0	COM_OFF(0x108)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0	COM_OFF(0x10C)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1	COM_OFF(0x110)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1	COM_OFF(0x114)
+#define QSERDES_COM_VCO_TUNE_MAXVAL1		COM_OFF(0x118)
+#define QSERDES_COM_VCO_TUNE_MAXVAL2		COM_OFF(0x11C)
+#define QSERDES_COM_RES_TRIM_CONTROL2		COM_OFF(0x120)
+#define QSERDES_COM_VCO_TUNE_CTRL		COM_OFF(0x124)
+#define QSERDES_COM_VCO_TUNE_MAP		COM_OFF(0x128)
+#define QSERDES_COM_VCO_TUNE1_MODE0		COM_OFF(0x12C)
+#define QSERDES_COM_VCO_TUNE2_MODE0		COM_OFF(0x130)
+#define QSERDES_COM_VCO_TUNE1_MODE1		COM_OFF(0x134)
+#define QSERDES_COM_VCO_TUNE2_MODE1		COM_OFF(0x138)
+#define QSERDES_COM_VCO_TUNE_INITVAL1		COM_OFF(0x13C)
+#define QSERDES_COM_VCO_TUNE_INITVAL2		COM_OFF(0x140)
+#define QSERDES_COM_VCO_TUNE_TIMER1		COM_OFF(0x144)
+#define QSERDES_COM_VCO_TUNE_TIMER2		COM_OFF(0x148)
+#define QSERDES_COM_SAR				COM_OFF(0x14C)
+#define QSERDES_COM_SAR_CLK			COM_OFF(0x150)
+#define QSERDES_COM_SAR_CODE_OUT_STATUS		COM_OFF(0x154)
+#define QSERDES_COM_SAR_CODE_READY_STATUS	COM_OFF(0x158)
+#define QSERDES_COM_CMN_STATUS			COM_OFF(0x15C)
+#define QSERDES_COM_RESET_SM_STATUS		COM_OFF(0x160)
+#define QSERDES_COM_RESTRIM_CODE_STATUS		COM_OFF(0x164)
+#define QSERDES_COM_PLLCAL_CODE1_STATUS		COM_OFF(0x168)
+#define QSERDES_COM_PLLCAL_CODE2_STATUS		COM_OFF(0x16C)
+#define QSERDES_COM_BG_CTRL			COM_OFF(0x170)
+#define QSERDES_COM_CLK_SELECT			COM_OFF(0x174)
+#define QSERDES_COM_HSCLK_SEL			COM_OFF(0x178)
+#define QSERDES_COM_INTEGLOOP_BINCODE_STATUS	COM_OFF(0x17C)
+#define QSERDES_COM_PLL_ANALOG			COM_OFF(0x180)
+#define QSERDES_COM_CORECLK_DIV			COM_OFF(0x184)
+#define QSERDES_COM_SW_RESET			COM_OFF(0x188)
+#define QSERDES_COM_CORE_CLK_EN			COM_OFF(0x18C)
+#define QSERDES_COM_C_READY_STATUS		COM_OFF(0x190)
+#define QSERDES_COM_CMN_CONFIG			COM_OFF(0x194)
+#define QSERDES_COM_CMN_RATE_OVERRIDE		COM_OFF(0x198)
+#define QSERDES_COM_SVS_MODE_CLK_SEL		COM_OFF(0x19C)
+#define QSERDES_COM_DEBUG_BUS0			COM_OFF(0x1A0)
+#define QSERDES_COM_DEBUG_BUS1			COM_OFF(0x1A4)
+#define QSERDES_COM_DEBUG_BUS2			COM_OFF(0x1A8)
+#define QSERDES_COM_DEBUG_BUS3			COM_OFF(0x1AC)
+#define QSERDES_COM_DEBUG_BUS_SEL		COM_OFF(0x1B0)
+#define QSERDES_COM_CMN_MISC1			COM_OFF(0x1B4)
+#define QSERDES_COM_CORECLK_DIV_MODE1		COM_OFF(0x1BC)
+#define QSERDES_COM_CMN_RSVD5			COM_OFF(0x1C0)
+
+/* UFS PHY registers */
+#define UFS_PHY_PHY_START			PHY_OFF(0x00)
+#define UFS_PHY_POWER_DOWN_CONTROL		PHY_OFF(0x04)
+#define UFS_PHY_TX_LARGE_AMP_DRV_LVL		PHY_OFF(0x34)
+#define UFS_PHY_TX_SMALL_AMP_DRV_LVL		PHY_OFF(0x3C)
+#define UFS_PHY_RX_MIN_STALL_NOCONFIG_TIME_CAP	PHY_OFF(0xCC)
+#define UFS_PHY_LINECFG_DISABLE			PHY_OFF(0x138)
+#define UFS_PHY_RX_SYM_RESYNC_CTRL		PHY_OFF(0x13C)
+#define UFS_PHY_RX_SIGDET_CTRL2			PHY_OFF(0x148)
+#define UFS_PHY_RX_PWM_GEAR_BAND		PHY_OFF(0x154)
+#define UFS_PHY_PCS_READY_STATUS		PHY_OFF(0x168)
+
+/* UFS PHY TX registers */
+#define QSERDES_TX_HIGHZ_TRANSCEIVER_BIAS_DRVR_EN	TX_OFF(0x68)
+#define	QSERDES_TX_LANE_MODE				TX_OFF(0x94)
+
+/* UFS PHY RX registers */
+#define QSERDES_RX_UCDR_SVS_SO_GAIN_HALF	RX_OFF(0x30)
+#define QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER	RX_OFF(0x34)
+#define QSERDES_RX_UCDR_SVS_SO_GAIN_EIGHTH	RX_OFF(0x38)
+#define QSERDES_RX_UCDR_SVS_SO_GAIN		RX_OFF(0x3C)
+#define QSERDES_RX_UCDR_FASTLOCK_FO_GAIN	RX_OFF(0x40)
+#define QSERDES_RX_UCDR_SO_SATURATION_ENABLE	RX_OFF(0x48)
+#define QSERDES_RX_RX_TERM_BW			RX_OFF(0x90)
+#define QSERDES_RX_RX_EQ_GAIN1_LSB		RX_OFF(0xC4)
+#define QSERDES_RX_RX_EQ_GAIN1_MSB		RX_OFF(0xC8)
+#define QSERDES_RX_RX_EQ_GAIN2_LSB		RX_OFF(0xCC)
+#define QSERDES_RX_RX_EQ_GAIN2_MSB		RX_OFF(0xD0)
+#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2	RX_OFF(0xD8)
+#define QSERDES_RX_SIGDET_CNTRL			RX_OFF(0x114)
+#define QSERDES_RX_SIGDET_LVL			RX_OFF(0x118)
+#define QSERDES_RX_SIGDET_DEGLITCH_CNTRL	RX_OFF(0x11C)
+#define QSERDES_RX_RX_INTERFACE_MODE		RX_OFF(0x12C)
+
+
+#define UFS_PHY_RX_LINECFG_DISABLE_BIT		BIT(1)
+
+/*
+ * This structure represents the v3 660 specific phy.
+ * common_cfg MUST remain the first field in this structure
+ * in case extra fields are added. This way, when calling
+ * get_ufs_qcom_phy() of generic phy, we can extract the
+ * common phy structure (struct ufs_qcom_phy) out of it
+ * regardless of the relevant specific phy.
+ */
+struct ufs_qcom_phy_qmp_v3_660 {
+	struct ufs_qcom_phy common_cfg;
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_3_1_1[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_CONFIG, 0x0e),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL, 0x14),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CLK_SELECT, 0x30),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYS_CLK_CTRL, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TIMER, 0x0a),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_HSCLK_SEL, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV, 0x0a),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE1, 0x0a),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_EN, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_CTRL, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x20),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORE_CLK_EN, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_CFG, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE0, 0x82),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE0, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE0, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0xff),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x0c),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE1, 0x98),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE1, 0x0b),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE1, 0x16),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE1, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x80),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE1, 0xd6),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x0f),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE1, 0x00),
+
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_HIGHZ_TRANSCEIVER_BIAS_DRVR_EN, 0x45),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE, 0x06),
+
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_LVL, 0x24),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_CNTRL, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_INTERFACE_MODE, 0x40),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x1E),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_TERM_BW, 0x5B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0D),
+
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IVCO, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TRIM, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_PWM_GEAR_BAND, 0x15),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN_HALF, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SO_SATURATION_ENABLE, 0x4B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL1, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL2, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL2, 0x6c),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_DRV_LVL, 0x0A),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_SMALL_AMP_DRV_LVL, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_MIN_STALL_NOCONFIG_TIME_CAP, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SYM_RESYNC_CTRL, 0x03),
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x44),
+};
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/phy/phy-qcom-ufs-qmp-v3.c	2019-01-22 16:16:26.079269734 +0100
@@ -0,0 +1,298 @@
+/*
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "phy-qcom-ufs-qmp-v3.h"
+
+#define UFS_PHY_NAME "ufs_phy_qmp_v3"
+
+static
+int ufs_qcom_phy_qmp_v3_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
+					bool is_rate_B)
+{
+	int err;
+	int tbl_size_A, tbl_size_B;
+	struct ufs_qcom_phy_calibration *tbl_A, *tbl_B;
+	u8 major = ufs_qcom_phy->host_ctrl_rev_major;
+	u16 minor = ufs_qcom_phy->host_ctrl_rev_minor;
+	u16 step = ufs_qcom_phy->host_ctrl_rev_step;
+
+	tbl_size_B = ARRAY_SIZE(phy_cal_table_rate_B);
+	tbl_B = phy_cal_table_rate_B;
+
+	if ((major == 0x3) && (minor == 0x000) && (step == 0x0000)) {
+		tbl_A = phy_cal_table_rate_A_3_0_0;
+		tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A_3_0_0);
+	} else if ((major == 0x3) && (minor == 0x001) && (step == 0x0000)) {
+		tbl_A = phy_cal_table_rate_A_3_1_0;
+		tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A_3_1_0);
+	} else {
+		dev_err(ufs_qcom_phy->dev,
+			"%s: Unknown UFS-PHY version (major 0x%x minor 0x%x step 0x%x), no calibration values\n",
+			__func__, major, minor, step);
+		err = -ENODEV;
+		goto out;
+	}
+
+	err = ufs_qcom_phy_calibrate(ufs_qcom_phy,
+				     tbl_A, tbl_size_A,
+				     tbl_B, tbl_size_B,
+				     is_rate_B);
+
+	if (err)
+		dev_err(ufs_qcom_phy->dev,
+			"%s: ufs_qcom_phy_calibrate() failed %d\n",
+			__func__, err);
+
+out:
+	return err;
+}
+
+static int ufs_qcom_phy_qmp_v3_init(struct phy *generic_phy)
+{
+	struct ufs_qcom_phy_qmp_v3 *phy = phy_get_drvdata(generic_phy);
+	struct ufs_qcom_phy *phy_common = &phy->common_cfg;
+	int err;
+
+	err = ufs_qcom_phy_init_clks(generic_phy, phy_common);
+	if (err) {
+		dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
+			__func__, err);
+		goto out;
+	}
+
+	err = ufs_qcom_phy_init_vregulators(generic_phy, phy_common);
+	if (err) {
+		dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
+			__func__, err);
+		goto out;
+	}
+
+out:
+	return err;
+}
+
+static
+void ufs_qcom_phy_qmp_v3_power_control(struct ufs_qcom_phy *phy,
+					 bool power_ctrl)
+{
+	if (!power_ctrl) {
+		/* apply analog power collapse */
+		writel_relaxed(0x0, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
+		/*
+		 * Make sure that PHY knows its analog rail is going to be
+		 * powered OFF.
+		 */
+		mb();
+	} else {
+		/* bring PHY out of analog power collapse */
+		writel_relaxed(0x1, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
+
+		/*
+		 * Before any transactions involving PHY, ensure PHY knows
+		 * that it's analog rail is powered ON.
+		 */
+		mb();
+	}
+}
+
+static inline
+void ufs_qcom_phy_qmp_v3_set_tx_lane_enable(struct ufs_qcom_phy *phy, u32 val)
+{
+	/*
+	 * v3 PHY does not have TX_LANE_ENABLE register.
+	 * Implement this function so as not to propagate error to caller.
+	 */
+}
+
+static
+void ufs_qcom_phy_qmp_v3_ctrl_rx_linecfg(struct ufs_qcom_phy *phy, bool ctrl)
+{
+	u32 temp;
+
+	temp = readl_relaxed(phy->mmio + UFS_PHY_LINECFG_DISABLE);
+
+	if (ctrl) /* enable RX LineCfg */
+		temp &= ~UFS_PHY_RX_LINECFG_DISABLE_BIT;
+	else /* disable RX LineCfg */
+		temp |= UFS_PHY_RX_LINECFG_DISABLE_BIT;
+
+	writel_relaxed(temp, phy->mmio + UFS_PHY_LINECFG_DISABLE);
+	/* make sure that RX LineCfg config applied before we return */
+	mb();
+}
+
+static inline void ufs_qcom_phy_qmp_v3_start_serdes(struct ufs_qcom_phy *phy)
+{
+	u32 tmp;
+
+	tmp = readl_relaxed(phy->mmio + UFS_PHY_PHY_START);
+	tmp &= ~MASK_SERDES_START;
+	tmp |= (1 << OFFSET_SERDES_START);
+	writel_relaxed(tmp, phy->mmio + UFS_PHY_PHY_START);
+	/* Ensure register value is committed */
+	mb();
+}
+
+static int ufs_qcom_phy_qmp_v3_is_pcs_ready(struct ufs_qcom_phy *phy_common)
+{
+	int err = 0;
+	u32 val;
+
+	err = readl_poll_timeout(phy_common->mmio + UFS_PHY_PCS_READY_STATUS,
+		val, (val & MASK_PCS_READY), 10, 1000000);
+	if (err) {
+		dev_err(phy_common->dev, "%s: poll for pcs failed err = %d\n",
+			__func__, err);
+		goto out;
+	}
+
+out:
+	return err;
+}
+
+static
+int ufs_qcom_phy_qmp_v3_configure_lpm(struct ufs_qcom_phy *ufs_qcom_phy,
+					bool enable)
+{
+	int err = 0;
+	int tbl_size;
+	struct ufs_qcom_phy_calibration *tbl = NULL;
+
+	/* The default low power mode configuration is SVS2 */
+	if (enable) {
+		tbl_size = ARRAY_SIZE(phy_cal_table_svs2_enable);
+		tbl = phy_cal_table_svs2_enable;
+	} else {
+		tbl_size = ARRAY_SIZE(phy_cal_table_svs2_disable);
+		tbl = phy_cal_table_svs2_disable;
+	}
+
+	if (!tbl) {
+		dev_err(ufs_qcom_phy->dev, "%s: tbl for SVS2 %s is NULL",
+			__func__, enable ? "enable" : "disable");
+		err = -EINVAL;
+		goto out;
+	}
+
+	ufs_qcom_phy_write_tbl(ufs_qcom_phy, tbl, tbl_size);
+
+	/* flush buffered writes */
+	mb();
+
+out:
+	return err;
+}
+
+static void ufs_qcom_phy_qmp_v3_dbg_register_dump(struct ufs_qcom_phy *phy)
+{
+	ufs_qcom_phy_dump_regs(phy, COM_BASE, COM_SIZE,
+					"PHY QSERDES COM Registers ");
+	ufs_qcom_phy_dump_regs(phy, PHY_BASE, PHY_SIZE,
+					"PHY Registers ");
+	ufs_qcom_phy_dump_regs(phy, RX_BASE(0), RX_SIZE,
+					"PHY RX0 Registers ");
+	ufs_qcom_phy_dump_regs(phy, TX_BASE(0), TX_SIZE,
+					"PHY TX0 Registers ");
+	ufs_qcom_phy_dump_regs(phy, RX_BASE(1), RX_SIZE,
+					"PHY RX1 Registers ");
+	ufs_qcom_phy_dump_regs(phy, TX_BASE(1), TX_SIZE,
+					"PHY TX1 Registers ");
+}
+
+struct phy_ops ufs_qcom_phy_qmp_v3_phy_ops = {
+	.init		= ufs_qcom_phy_qmp_v3_init,
+	.exit		= ufs_qcom_phy_exit,
+	.power_on	= ufs_qcom_phy_power_on,
+	.power_off	= ufs_qcom_phy_power_off,
+	.owner		= THIS_MODULE,
+};
+
+struct ufs_qcom_phy_specific_ops phy_v3_ops = {
+	.calibrate_phy		= ufs_qcom_phy_qmp_v3_phy_calibrate,
+	.start_serdes		= ufs_qcom_phy_qmp_v3_start_serdes,
+	.is_physical_coding_sublayer_ready = ufs_qcom_phy_qmp_v3_is_pcs_ready,
+	.set_tx_lane_enable	= ufs_qcom_phy_qmp_v3_set_tx_lane_enable,
+	.ctrl_rx_linecfg	= ufs_qcom_phy_qmp_v3_ctrl_rx_linecfg,
+	.power_control		= ufs_qcom_phy_qmp_v3_power_control,
+	.configure_lpm		= ufs_qcom_phy_qmp_v3_configure_lpm,
+	.dbg_register_dump	= ufs_qcom_phy_qmp_v3_dbg_register_dump,
+};
+
+static int ufs_qcom_phy_qmp_v3_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct phy *generic_phy;
+	struct ufs_qcom_phy_qmp_v3 *phy;
+	int err = 0;
+
+	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+	if (!phy) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg,
+				&ufs_qcom_phy_qmp_v3_phy_ops, &phy_v3_ops);
+
+	if (!generic_phy) {
+		dev_err(dev, "%s: ufs_qcom_phy_generic_probe() failed\n",
+			__func__);
+		err = -EIO;
+		goto out;
+	}
+
+	phy_set_drvdata(generic_phy, phy);
+
+	strlcpy(phy->common_cfg.name, UFS_PHY_NAME,
+		sizeof(phy->common_cfg.name));
+
+out:
+	return err;
+}
+
+static int ufs_qcom_phy_qmp_v3_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct phy *generic_phy = to_phy(dev);
+	struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+	int err = 0;
+
+	err = ufs_qcom_phy_remove(generic_phy, ufs_qcom_phy);
+	if (err)
+		dev_err(dev, "%s: ufs_qcom_phy_remove failed = %d\n",
+			__func__, err);
+
+	return err;
+}
+
+static const struct of_device_id ufs_qcom_phy_qmp_v3_of_match[] = {
+	{.compatible = "qcom,ufs-phy-qmp-v3"},
+	{},
+};
+MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_v3_of_match);
+
+static struct platform_driver ufs_qcom_phy_qmp_v3_driver = {
+	.probe = ufs_qcom_phy_qmp_v3_probe,
+	.remove = ufs_qcom_phy_qmp_v3_remove,
+	.driver = {
+		.of_match_table = ufs_qcom_phy_qmp_v3_of_match,
+		.name = "ufs_qcom_phy_qmp_v3",
+		.owner = THIS_MODULE,
+	},
+};
+
+module_platform_driver(ufs_qcom_phy_qmp_v3_driver);
+
+MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY QMP v3");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/phy/phy-qcom-ufs-qmp-v3.h	2019-01-22 16:16:26.079269734 +0100
@@ -0,0 +1,387 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef UFS_QCOM_PHY_QMP_V3_H_
+#define UFS_QCOM_PHY_QMP_V3_H_
+
+#include "phy-qcom-ufs-i.h"
+
+/* QCOM UFS PHY control registers */
+#define COM_BASE	0x000
+#define COM_SIZE	0x18C
+#define PHY_BASE	0xC00
+#define PHY_SIZE	0x1DC
+#define TX_BASE(n)	(0x400 + (0x400 * n))
+#define TX_SIZE		0x128
+#define RX_BASE(n)	(0x600 + (0x400 * n))
+#define RX_SIZE		0x1FC
+#define COM_OFF(x)	(COM_BASE + x)
+#define PHY_OFF(x)	(PHY_BASE + x)
+#define TX_OFF(n, x)	(TX_BASE(n) + x)
+#define RX_OFF(n, x)	(RX_BASE(n) + x)
+
+/* UFS PHY QSERDES COM registers */
+#define QSERDES_COM_ATB_SEL1			COM_OFF(0x00)
+#define QSERDES_COM_ATB_SEL2			COM_OFF(0x04)
+#define QSERDES_COM_FREQ_UPDATE			COM_OFF(0x08)
+#define QSERDES_COM_BG_TIMER			COM_OFF(0x0C)
+#define QSERDES_COM_SSC_EN_CENTER		COM_OFF(0x10)
+#define QSERDES_COM_SSC_ADJ_PER1		COM_OFF(0x14)
+#define QSERDES_COM_SSC_ADJ_PER2		COM_OFF(0x18)
+#define QSERDES_COM_SSC_PER1			COM_OFF(0x1C)
+#define QSERDES_COM_SSC_PER2			COM_OFF(0x20)
+#define QSERDES_COM_SSC_STEP_SIZE1		COM_OFF(0x24)
+#define QSERDES_COM_SSC_STEP_SIZE2		COM_OFF(0x28)
+#define QSERDES_COM_POST_DIV			COM_OFF(0x2C)
+#define QSERDES_COM_POST_DIV_MUX		COM_OFF(0x30)
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		COM_OFF(0x34)
+#define QSERDES_COM_CLK_ENABLE1			COM_OFF(0x38)
+#define QSERDES_COM_SYS_CLK_CTRL		COM_OFF(0x3C)
+#define QSERDES_COM_SYSCLK_BUF_ENABLE		COM_OFF(0x40)
+#define QSERDES_COM_PLL_EN			COM_OFF(0x44)
+#define QSERDES_COM_PLL_IVCO			COM_OFF(0x48)
+#define QSERDES_COM_CMN_IETRIM			COM_OFF(0x4C)
+#define QSERDES_COM_CMN_IPTRIM			COM_OFF(0x50)
+#define QSERDES_COM_EP_CLOCK_DETECT_CTR		COM_OFF(0x54)
+#define QSERDES_COM_SYSCLK_DET_COMP_STATUS	COM_OFF(0x58)
+#define QSERDES_COM_CLK_EP_DIV			COM_OFF(0x5C)
+#define QSERDES_COM_CP_CTRL_MODE0		COM_OFF(0x60)
+#define QSERDES_COM_CP_CTRL_MODE1		COM_OFF(0x64)
+#define QSERDES_COM_PLL_RCTRL_MODE0		COM_OFF(0x68)
+#define QSERDES_COM_PLL_RCTRL_MODE1		COM_OFF(0x6C)
+#define QSERDES_COM_PLL_CCTRL_MODE0		COM_OFF(0x70)
+#define QSERDES_COM_PLL_CCTRL_MODE1		COM_OFF(0x74)
+#define QSERDES_COM_PLL_CNTRL			COM_OFF(0x78)
+#define SERDES_COM_BIAS_EN_CTRL_BY_PSM		COM_OFF(0x7C)
+#define QSERDES_COM_SYSCLK_EN_SEL		COM_OFF(0x80)
+#define QSERDES_COM_CML_SYSCLK_SEL		COM_OFF(0x84)
+#define QSERDES_COM_RESETSM_CNTRL		COM_OFF(0x88)
+#define QSERDES_COM_RESETSM_CNTRL2		COM_OFF(0x8C)
+#define QSERDES_COM_LOCK_CMP_EN			COM_OFF(0x90)
+#define QSERDES_COM_LOCK_CMP_CFG		COM_OFF(0x94)
+#define QSERDES_COM_LOCK_CMP1_MODE0		COM_OFF(0x98)
+#define QSERDES_COM_LOCK_CMP2_MODE0		COM_OFF(0x9C)
+#define QSERDES_COM_LOCK_CMP3_MODE0		COM_OFF(0xA0)
+#define QSERDES_COM_LOCK_CMP1_MODE1		COM_OFF(0xA4)
+#define QSERDES_COM_LOCK_CMP2_MODE1		COM_OFF(0xA8)
+#define QSERDES_COM_LOCK_CMP3_MODE1		COM_OFF(0xAC)
+#define QSERDES_COM_DEC_START_MODE0		COM_OFF(0xB0)
+#define QSERDES_COM_DEC_START_MODE1		COM_OFF(0xB4)
+#define QSERDES_COM_DIV_FRAC_START1_MODE0	COM_OFF(0xB8)
+#define QSERDES_COM_DIV_FRAC_START2_MODE0	COM_OFF(0xBC)
+#define QSERDES_COM_DIV_FRAC_START3_MODE0	COM_OFF(0xC0)
+#define QSERDES_COM_DIV_FRAC_START1_MODE1	COM_OFF(0xC4)
+#define QSERDES_COM_DIV_FRAC_START2_MODE1	COM_OFF(0xC8)
+#define QSERDES_COM_DIV_FRAC_START3_MODE1	COM_OFF(0xCC)
+#define QSERDES_COM_INTEGLOOP_INITVAL		COM_OFF(0xD0)
+#define QSERDES_COM_INTEGLOOP_EN		COM_OFF(0xD4)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0	COM_OFF(0xD8)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0	COM_OFF(0xDC)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1	COM_OFF(0xE0)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1	COM_OFF(0xE4)
+#define QSERDES_COM_VCOCAL_DEADMAN_CTRL		COM_OFF(0xE8)
+#define QSERDES_COM_VCO_TUNE_CTRL		COM_OFF(0xEC)
+#define QSERDES_COM_VCO_TUNE_MAP		COM_OFF(0xF0)
+#define QSERDES_COM_VCO_TUNE1_MODE0		COM_OFF(0xF4)
+#define QSERDES_COM_VCO_TUNE2_MODE0		COM_OFF(0xF8)
+#define QSERDES_COM_VCO_TUNE1_MODE1		COM_OFF(0xFC)
+#define QSERDES_COM_VCO_TUNE2_MODE1		COM_OFF(0x100)
+#define QSERDES_COM_VCO_TUNE_INITVAL1		COM_OFF(0x104)
+#define QSERDES_COM_VCO_TUNE_INITVAL2		COM_OFF(0x108)
+#define QSERDES_COM_VCO_TUNE_MINVAL1		COM_OFF(0x10C)
+#define QSERDES_COM_VCO_TUNE_MINVAL2		COM_OFF(0x110)
+#define QSERDES_COM_VCO_TUNE_MAXVAL1		COM_OFF(0x114)
+#define QSERDES_COM_VCO_TUNE_MAXVAL2		COM_OFF(0x118)
+#define QSERDES_COM_VCO_TUNE_TIMER1		COM_OFF(0x11C)
+#define QSERDES_COM_VCO_TUNE_TIMER2		COM_OFF(0x120)
+#define QSERDES_COM_CMN_STATUS			COM_OFF(0x124)
+#define QSERDES_COM_RESET_SM_STATUS		COM_OFF(0x128)
+#define QSERDES_COM_RESTRIM_CODE_STATUS		COM_OFF(0x12C)
+#define QSERDES_COM_PLLCAL_CODE1_STATUS		COM_OFF(0x130)
+#define QSERDES_COM_PLLCAL_CODE2_STATUS		COM_OFF(0x134)
+#define QSERDES_COM_CLK_SELECT			COM_OFF(0x138)
+#define QSERDES_COM_HSCLK_SEL			COM_OFF(0x13C)
+#define QSERDES_COM_INTEGLOOP_BINCODE_STATUS	COM_OFF(0x140)
+#define QSERDES_COM_PLL_ANALOG			COM_OFF(0x144)
+#define QSERDES_COM_CORECLK_DIV_MODE0		COM_OFF(0x148)
+#define QSERDES_COM_CORECLK_DIV_MODE1		COM_OFF(0x14C)
+#define QSERDES_COM_SW_RESET			COM_OFF(0x150)
+#define QSERDES_COM_CORE_CLK_EN			COM_OFF(0x154)
+#define QSERDES_COM_C_READY_STATUS		COM_OFF(0x158)
+#define QSERDES_COM_CMN_CONFIG			COM_OFF(0x15C)
+#define QSERDES_COM_CMN_RATE_OVERRIDE		COM_OFF(0x160)
+#define QSERDES_COM_SVS_MODE_CLK_SEL		COM_OFF(0x164)
+#define QSERDES_COM_DEBUG_BUS0			COM_OFF(0x168)
+#define QSERDES_COM_DEBUG_BUS1			COM_OFF(0x16C)
+#define QSERDES_COM_DEBUG_BUS2			COM_OFF(0x170)
+#define QSERDES_COM_DEBUG_BUS3			COM_OFF(0x174)
+#define QSERDES_COM_DEBUG_BUS_SEL		COM_OFF(0x178)
+#define QSERDES_COM_CMN_MISC1			COM_OFF(0x17C)
+#define QSERDES_COM_CMN_MISC2			COM_OFF(0x180)
+#define QSERDES_COM_CMN_MODE			COM_OFF(0x184)
+#define QSERDES_COM_CMN_VREG_SEL		COM_OFF(0x188)
+
+/* UFS PHY registers */
+#define UFS_PHY_PHY_START			PHY_OFF(0x00)
+#define UFS_PHY_POWER_DOWN_CONTROL		PHY_OFF(0x04)
+#define UFS_PHY_TIMER_20US_CORECLK_STEPS_MSB	PHY_OFF(0x08)
+#define UFS_PHY_TIMER_20US_CORECLK_STEPS_LSB	PHY_OFF(0x0C)
+#define UFS_PHY_TX_LARGE_AMP_DRV_LVL		PHY_OFF(0x2C)
+#define UFS_PHY_TX_SMALL_AMP_DRV_LVL		PHY_OFF(0x34)
+#define UFS_PHY_LINECFG_DISABLE			PHY_OFF(0x130)
+#define UFS_PHY_RX_SYM_RESYNC_CTRL		PHY_OFF(0x134)
+#define UFS_PHY_RX_MIN_HIBERN8_TIME		PHY_OFF(0x138)
+#define UFS_PHY_RX_SIGDET_CTRL1			PHY_OFF(0x13C)
+#define UFS_PHY_RX_SIGDET_CTRL2			PHY_OFF(0x140)
+#define UFS_PHY_RX_PWM_GEAR_BAND		PHY_OFF(0x14C)
+#define UFS_PHY_PCS_READY_STATUS		PHY_OFF(0x160)
+#define UFS_PHY_TX_MID_TERM_CTRL1		PHY_OFF(0x1BC)
+#define UFS_PHY_MULTI_LANE_CTRL1		PHY_OFF(0x1C4)
+
+/* UFS PHY TX registers */
+#define QSERDES_TX0_TRANSCEIVER_BIAS_EN		TX_OFF(0, 0x5C)
+#define QSERDES_TX0_LANE_MODE_1			TX_OFF(0, 0x8C)
+#define QSERDES_TX0_LANE_MODE_2			TX_OFF(0, 0x90)
+#define QSERDES_TX0_LANE_MODE_3			TX_OFF(0, 0x94)
+
+#define QSERDES_TX1_LANE_MODE_1			TX_OFF(1, 0x8C)
+
+
+/* UFS PHY RX registers */
+#define QSERDES_RX0_UCDR_SVS_SO_GAIN_HALF		RX_OFF(0, 0x24)
+#define QSERDES_RX0_UCDR_SVS_SO_GAIN_QUARTER		RX_OFF(0, 0x28)
+#define QSERDES_RX0_UCDR_SVS_SO_GAIN			RX_OFF(0, 0x2C)
+#define QSERDES_RX0_UCDR_FASTLOCK_FO_GAIN		RX_OFF(0, 0x30)
+#define QSERDES_RX0_UCDR_SO_SATURATION_AND_ENABLE	RX_OFF(0, 0x34)
+#define QSERDES_RX0_UCDR_FASTLOCK_COUNT_LOW		RX_OFF(0, 0x3C)
+#define QSERDES_RX0_UCDR_PI_CONTROLS			RX_OFF(0, 0x44)
+#define QSERDES_RX0_RX_TERM_BW				RX_OFF(0, 0x7C)
+#define QSERDES_RX0_RX_EQ_GAIN2_LSB			RX_OFF(0, 0xC8)
+#define QSERDES_RX0_RX_EQ_GAIN2_MSB			RX_OFF(0, 0xCC)
+#define QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL1		RX_OFF(0, 0xD0)
+#define QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL2		RX_OFF(0, 0xD4)
+#define QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL3		RX_OFF(0, 0xD8)
+#define QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL4		RX_OFF(0, 0xDC)
+#define QSERDES_RX0_SIGDET_CNTRL			RX_OFF(0, 0x104)
+#define QSERDES_RX0_SIGDET_LVL				RX_OFF(0, 0x108)
+#define QSERDES_RX0_SIGDET_DEGLITCH_CNTRL		RX_OFF(0, 0x10C)
+#define QSERDES_RX0_RX_INTERFACE_MODE			RX_OFF(0, 0x11C)
+
+#define QSERDES_RX1_UCDR_SVS_SO_GAIN_HALF		RX_OFF(1, 0x24)
+#define QSERDES_RX1_UCDR_SVS_SO_GAIN_QUARTER		RX_OFF(1, 0x28)
+#define QSERDES_RX1_UCDR_SVS_SO_GAIN			RX_OFF(1, 0x2C)
+#define QSERDES_RX1_UCDR_FASTLOCK_FO_GAIN		RX_OFF(1, 0x30)
+#define QSERDES_RX1_UCDR_SO_SATURATION_AND_ENABLE	RX_OFF(1, 0x34)
+#define QSERDES_RX1_UCDR_FASTLOCK_COUNT_LOW		RX_OFF(1, 0x3C)
+#define QSERDES_RX1_UCDR_PI_CONTROLS			RX_OFF(1, 0x44)
+#define QSERDES_RX1_RX_TERM_BW				RX_OFF(1, 0x7C)
+#define QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL2		RX_OFF(1, 0xD4)
+#define QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL3		RX_OFF(1, 0xD8)
+#define QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL4		RX_OFF(1, 0xDC)
+#define QSERDES_RX1_SIGDET_CNTRL			RX_OFF(1, 0x104)
+#define QSERDES_RX1_SIGDET_LVL				RX_OFF(1, 0x108)
+#define QSERDES_RX1_SIGDET_DEGLITCH_CNTRL		RX_OFF(1, 0x10C)
+#define QSERDES_RX1_RX_INTERFACE_MODE			RX_OFF(1, 0x11C)
+
+#define UFS_PHY_RX_LINECFG_DISABLE_BIT		BIT(1)
+
+/*
+ * This structure represents the v3 specific phy.
+ * common_cfg MUST remain the first field in this structure
+ * in case extra fields are added. This way, when calling
+ * get_ufs_qcom_phy() of generic phy, we can extract the
+ * common phy structure (struct ufs_qcom_phy) out of it
+ * regardless of the relevant specific phy.
+ */
+struct ufs_qcom_phy_qmp_v3 {
+	struct ufs_qcom_phy common_cfg;
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_3_0_0[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_CONFIG, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL, 0xD5),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CLK_SELECT, 0x30),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYS_CLK_CTRL, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TIMER, 0x0A),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_HSCLK_SEL, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_EN, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_CTRL, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORE_CLK_EN, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IVCO, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL1, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL2, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE0, 0x82),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE0, 0x08),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE0, 0x34),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE0, 0xCB),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE0, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x0C),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE1, 0x98),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE1, 0x08),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE1, 0x16),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE1, 0x34),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE1, 0xB2),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_LANE_MODE_1, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_SIGDET_LVL, 0x24),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_SIGDET_CNTRL, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_SIGDET_DEGLITCH_CNTRL, 0x1E),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_INTERFACE_MODE, 0x40),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_FASTLOCK_FO_GAIN, 0x0B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_TERM_BW, 0x5B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL2, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL4, 0x1D),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SVS_SO_GAIN_HALF, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SVS_SO_GAIN, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SO_SATURATION_AND_ENABLE, 0x4B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_PI_CONTROLS, 0x81),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_FASTLOCK_COUNT_LOW, 0x80),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL2, 0x6E),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_DRV_LVL, 0x0A),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_SMALL_AMP_DRV_LVL, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SYM_RESYNC_CTRL, 0x03),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_MIN_HIBERN8_TIME, 0x9A), /* 8 us */
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_3_1_0[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_CONFIG, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL, 0xD5),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x20),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CLK_SELECT, 0x30),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYS_CLK_CTRL, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TIMER, 0x0A),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_HSCLK_SEL, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_EN, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_CTRL, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORE_CLK_EN, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IVCO, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL1, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL2, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE0, 0x82),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE0, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE0, 0x36),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE0, 0xDA),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE0, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x0C),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE1, 0x98),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE1, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE1, 0x16),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE1, 0x36),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE1, 0xC1),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_LANE_MODE_1, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_SIGDET_LVL, 0x24),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_SIGDET_CNTRL, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_SIGDET_DEGLITCH_CNTRL, 0x1E),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_INTERFACE_MODE, 0x40),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_FASTLOCK_FO_GAIN, 0x0B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_TERM_BW, 0x5B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL2, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL4, 0x1D),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SVS_SO_GAIN_HALF, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SVS_SO_GAIN, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SO_SATURATION_AND_ENABLE, 0x4B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_PI_CONTROLS, 0x81),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_FASTLOCK_COUNT_LOW, 0x80),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_LANE_MODE_1, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_SIGDET_LVL, 0x24),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_SIGDET_CNTRL, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_SIGDET_DEGLITCH_CNTRL, 0x1E),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_INTERFACE_MODE, 0x40),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_FASTLOCK_FO_GAIN, 0x0B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_TERM_BW, 0x5B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL2, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL4, 0x1D),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SVS_SO_GAIN_HALF, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SVS_SO_GAIN, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SO_SATURATION_AND_ENABLE, 0x4B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_PI_CONTROLS, 0x81),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_FASTLOCK_COUNT_LOW, 0x80),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_MULTI_LANE_CTRL1, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL2, 0x6E),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_DRV_LVL, 0x0A),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_SMALL_AMP_DRV_LVL, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SYM_RESYNC_CTRL, 0x03),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_MID_TERM_CTRL1, 0x43),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL1, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_MIN_HIBERN8_TIME, 0x9A), /* 8 us */
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x44),
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_svs2_enable[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE0, 0x14),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE1, 0x14),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SVS_MODE_CLK_SEL, 0x0a),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x7e),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0x7f),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x7e),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x99),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x07),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TIMER_20US_CORECLK_STEPS_MSB, 0x0b),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TIMER_20US_CORECLK_STEPS_LSB, 0x66),
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_svs2_disable[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE0, 0x0a),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE1, 0x0a),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0xff),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x0c),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x3f),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x0f),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TIMER_20US_CORECLK_STEPS_MSB, 0x16),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TIMER_20US_CORECLK_STEPS_LSB, 0xcc),
+};
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/phy/phy-qcom-ufs-qrbtc-v2.c	2019-01-22 16:16:26.079269734 +0100
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "phy-qcom-ufs-qrbtc-v2.h"
+
+#define UFS_PHY_NAME "ufs_phy_qrbtc_v2"
+
+static
+int ufs_qcom_phy_qrbtc_v2_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
+					bool is_rate_B)
+{
+	int err;
+	int tbl_size_A;
+	struct ufs_qcom_phy_calibration *tbl_A;
+	struct ufs_qcom_phy_qrbtc_v2 *qrbtc_phy = container_of(ufs_qcom_phy,
+				struct ufs_qcom_phy_qrbtc_v2, common_cfg);
+
+	writel_relaxed(0x15f, qrbtc_phy->u11_regs + U11_UFS_RESET_REG_OFFSET);
+
+	/* 50ms are required to stabilize the reset */
+	usleep_range(50000, 50100);
+	writel_relaxed(0x0, qrbtc_phy->u11_regs + U11_UFS_RESET_REG_OFFSET);
+
+	/* Set R3PC REF CLK */
+	writel_relaxed(0x80, qrbtc_phy->u11_regs + U11_QRBTC_CONTROL_OFFSET);
+
+
+	tbl_A = phy_cal_table_rate_A;
+	tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A);
+
+	err = ufs_qcom_phy_calibrate(ufs_qcom_phy,
+				     tbl_A, tbl_size_A,
+				     NULL, 0,
+				     false);
+
+	if (err)
+		dev_err(ufs_qcom_phy->dev,
+			"%s: ufs_qcom_phy_calibrate() failed %d\n",
+			__func__, err);
+
+	return err;
+}
+
+static int
+ufs_qcom_phy_qrbtc_v2_is_pcs_ready(struct ufs_qcom_phy *phy_common)
+{
+	int err = 0;
+	u32 val;
+	struct ufs_qcom_phy_qrbtc_v2 *qrbtc_phy = container_of(phy_common,
+				struct ufs_qcom_phy_qrbtc_v2, common_cfg);
+
+	/*
+	 * The value we are polling for is 0x3D which represents the
+	 * following masks:
+	 * RESET_SM field: 0x5
+	 * RESTRIMDONE bit: BIT(3)
+	 * PLLLOCK bit: BIT(4)
+	 * READY bit: BIT(5)
+	 */
+	#define QSERDES_COM_RESET_SM_REG_POLL_VAL	0x3D
+	err = readl_poll_timeout(phy_common->mmio + QSERDES_COM_RESET_SM,
+		val, (val == QSERDES_COM_RESET_SM_REG_POLL_VAL), 10, 1000000);
+
+	if (err)
+		dev_err(phy_common->dev, "%s: poll for pcs failed err = %d\n",
+			__func__, err);
+
+	writel_relaxed(0x100, qrbtc_phy->u11_regs + U11_QRBTC_TX_CLK_CTRL);
+
+	return err;
+}
+
+static void ufs_qcom_phy_qrbtc_v2_start_serdes(struct ufs_qcom_phy *phy)
+{
+	u32 temp;
+
+	writel_relaxed(0x01, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL_OFFSET);
+
+	temp = readl_relaxed(phy->mmio + UFS_PHY_PHY_START_OFFSET);
+	temp |= 0x1;
+	writel_relaxed(temp, phy->mmio + UFS_PHY_PHY_START_OFFSET);
+
+	/* Ensure register value is committed */
+	mb();
+}
+
+static int ufs_qcom_phy_qrbtc_v2_init(struct phy *generic_phy)
+{
+	return 0;
+}
+
+struct phy_ops ufs_qcom_phy_qrbtc_v2_phy_ops = {
+	.init		= ufs_qcom_phy_qrbtc_v2_init,
+	.exit		= ufs_qcom_phy_exit,
+	.owner		= THIS_MODULE,
+};
+
+struct ufs_qcom_phy_specific_ops phy_qrbtc_v2_ops = {
+	.calibrate_phy		= ufs_qcom_phy_qrbtc_v2_phy_calibrate,
+	.start_serdes		= ufs_qcom_phy_qrbtc_v2_start_serdes,
+	.is_physical_coding_sublayer_ready =
+				ufs_qcom_phy_qrbtc_v2_is_pcs_ready,
+};
+
+static int ufs_qcom_phy_qrbtc_v2_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct phy *generic_phy;
+	struct ufs_qcom_phy_qrbtc_v2 *phy;
+	struct resource *res;
+	int err = 0;
+
+	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+	if (!phy) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg,
+		&ufs_qcom_phy_qrbtc_v2_phy_ops, &phy_qrbtc_v2_ops);
+
+	if (!generic_phy) {
+		dev_err(dev, "%s: ufs_qcom_phy_generic_probe() failed\n",
+			__func__);
+		err = -EIO;
+		goto out;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "u11_user");
+	if (!res) {
+		dev_err(dev, "%s: u11_user resource not found\n", __func__);
+		err = -EINVAL;
+		goto out;
+	}
+
+	phy->u11_regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR_OR_NULL(phy->u11_regs)) {
+		if (IS_ERR(phy->u11_regs)) {
+			err = PTR_ERR(phy->u11_regs);
+			phy->u11_regs = NULL;
+			dev_err(dev, "%s: ioremap for phy_mem resource failed %d\n",
+				__func__, err);
+		} else {
+			dev_err(dev, "%s: ioremap for phy_mem resource failed\n",
+				__func__);
+			err = -ENOMEM;
+		}
+		goto out;
+	}
+
+	phy_set_drvdata(generic_phy, phy);
+
+	strlcpy(phy->common_cfg.name, UFS_PHY_NAME,
+		sizeof(phy->common_cfg.name));
+
+out:
+	return err;
+}
+
+static int ufs_qcom_phy_qrbtc_v2_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct phy *generic_phy = to_phy(dev);
+	struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+	int err = 0;
+
+	err = ufs_qcom_phy_remove(generic_phy, ufs_qcom_phy);
+	if (err)
+		dev_err(dev, "%s: ufs_qcom_phy_remove failed = %d\n",
+			__func__, err);
+
+	return err;
+}
+
+static const struct of_device_id ufs_qcom_phy_qrbtc_v2_of_match[] = {
+	{.compatible = "qcom,ufs-phy-qrbtc-v2"},
+	{},
+};
+MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qrbtc_v2_of_match);
+
+static struct platform_driver ufs_qcom_phy_qrbtc_v2_driver = {
+	.probe = ufs_qcom_phy_qrbtc_v2_probe,
+	.remove = ufs_qcom_phy_qrbtc_v2_remove,
+	.driver = {
+		.of_match_table = ufs_qcom_phy_qrbtc_v2_of_match,
+		.name = "ufs_qcom_phy_qrbtc_v2",
+		.owner = THIS_MODULE,
+	},
+};
+
+module_platform_driver(ufs_qcom_phy_qrbtc_v2_driver);
+
+MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY QRBTC V2");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/phy/phy-qcom-ufs-qrbtc-v2.h	2019-01-22 16:16:26.079269734 +0100
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef UFS_QCOM_PHY_QRBTC_V2_H_
+#define UFS_QCOM_PHY_QRBTC_V2_H_
+
+#include "phy-qcom-ufs-i.h"
+
+/* QCOM UFS PHY control registers */
+#define COM_OFF(x)	(0x000 + x)
+#define PHY_OFF(x)	(0x700 + x)
+#define PHY_USR(x)	(x)
+
+#define UFS_PHY_PHY_START_OFFSET		PHY_OFF(0x00)
+#define UFS_PHY_POWER_DOWN_CONTROL_OFFSET	PHY_OFF(0x04)
+
+#define	QSERDES_COM_BIAS_EN_CLKBUFLR_EN_OFFSET	COM_OFF(0x20)
+#define QSERDES_COM_SYSCLK_EN_SEL		COM_OFF(0x38)
+#define QSERDES_COM_SYS_CLK_CTRL		COM_OFF(0x00)
+#define QSERDES_COM_RES_CODE_TXBAND		COM_OFF(0x3C)
+#define QSERDES_COM_PLL_VCOTAIL_EN		COM_OFF(0x04)
+#define QSERDES_COM_PLL_CNTRL			COM_OFF(0x14)
+#define QSERDES_COM_PLL_CLKEPDIV		COM_OFF(0xB0)
+#define QSERDES_COM_RESETSM_CNTRL		COM_OFF(0x40)
+#define QSERDES_COM_PLL_RXTXEPCLK_EN		COM_OFF(0xA8)
+#define QSERDES_COM_PLL_CRCTRL			COM_OFF(0xAC)
+#define QSERDES_COM_DEC_START1			COM_OFF(0x64)
+#define QSERDES_COM_DEC_START2			COM_OFF(0xA4)
+#define QSERDES_COM_DIV_FRAC_START1		COM_OFF(0x98)
+#define QSERDES_COM_DIV_FRAC_START2		COM_OFF(0x9C)
+#define QSERDES_COM_DIV_FRAC_START3		COM_OFF(0xA0)
+#define QSERDES_COM_PLLLOCK_CMP1		COM_OFF(0x44)
+#define QSERDES_COM_PLLLOCK_CMP2		COM_OFF(0x48)
+#define QSERDES_COM_PLLLOCK_CMP3		COM_OFF(0x4C)
+#define QSERDES_COM_PLLLOCK_CMP_EN		COM_OFF(0x50)
+#define QSERDES_COM_PLL_IP_SETI			COM_OFF(0x18)
+#define QSERDES_COM_PLL_CP_SETI			COM_OFF(0x24)
+#define QSERDES_COM_PLL_IP_SETP			COM_OFF(0x28)
+#define QSERDES_COM_PLL_CP_SETP			COM_OFF(0x2C)
+#define QSERDES_COM_RESET_SM			COM_OFF(0xBC)
+#define QSERDES_COM_PWM_CNTRL1			COM_OFF(0x280)
+#define QSERDES_COM_PWM_CNTRL2			COM_OFF(0x284)
+#define QSERDES_COM_PWM_NDIV			COM_OFF(0x288)
+#define QSERDES_COM_CDR_CONTROL			COM_OFF(0x200)
+#define QSERDES_COM_CDR_CONTROL_HALF		COM_OFF(0x298)
+#define QSERDES_COM_CDR_CONTROL_QUARTER		COM_OFF(0x29C)
+#define QSERDES_COM_SIGDET_CNTRL		COM_OFF(0x234)
+#define QSERDES_COM_SIGDET_CNTRL2		COM_OFF(0x28C)
+#define QSERDES_COM_UFS_CNTRL			COM_OFF(0x290)
+
+/* QRBTC V2 USER REGISTERS */
+#define U11_UFS_RESET_REG_OFFSET		PHY_USR(0x4)
+#define U11_QRBTC_CONTROL_OFFSET		PHY_USR(0x18)
+#define U11_QRBTC_TX_CLK_CTRL			PHY_USR(0x20)
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_A[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_PHY_START_OFFSET, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN_OFFSET, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL, 0x03),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYS_CLK_CTRL, 0x16),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RES_CODE_TXBAND, 0xC0),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_VCOTAIL_EN, 0x03),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CNTRL, 0x88),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CLKEPDIV, 0x03),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x30),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RXTXEPCLK_EN, 0x10),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CRCTRL, 0x94),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START1, 0x98),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START2, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1, 0x8C),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2, 0xAE),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3, 0x1F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP1, 0xF7),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP2, 0x13),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP3, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP_EN, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IP_SETI, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CP_SETI, 0x3B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IP_SETP, 0x0A),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CP_SETP, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PWM_CNTRL1, 0xCF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PWM_CNTRL2, 0x61),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PWM_NDIV, 0x4F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CDR_CONTROL, 0xF2),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CDR_CONTROL_HALF, 0x2A),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CDR_CONTROL_QUARTER, 0x2A),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SIGDET_CNTRL, 0xC0),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SIGDET_CNTRL2, 0x07),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_UFS_CNTRL, 0x18),
+};
+
+/*
+ * This structure represents the qrbtc-v2 specific phy.
+ * common_cfg MUST remain the first field in this structure
+ * in case extra fields are added. This way, when calling
+ * get_ufs_qcom_phy() of generic phy, we can extract the
+ * common phy structure (struct ufs_qcom_phy) out of it
+ * regardless of the relevant specific phy.
+ */
+struct ufs_qcom_phy_qrbtc_v2 {
+	struct ufs_qcom_phy common_cfg;
+	void __iomem *u11_regs;
+};
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/pinctrl/qcom/pinctrl-msm8998.c	2019-01-22 16:16:26.111270024 +0100
@@ -0,0 +1,1933 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname)			                \
+	[msm_mux_##fname] = {		                \
+		.name = #fname,				\
+		.groups = fname##_groups,               \
+		.ngroups = ARRAY_SIZE(fname##_groups),	\
+	}
+
+#define NORTH	0x500000
+#define WEST	0x100000
+#define EAST	0x900000
+#define REG_SIZE 0x1000
+#define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9)	\
+	{					        \
+		.name = "gpio" #id,			\
+		.pins = gpio##id##_pins,		\
+		.npins = (unsigned)ARRAY_SIZE(gpio##id##_pins),	\
+		.funcs = (int[]){			\
+			msm_mux_gpio, /* gpio mode */	\
+			msm_mux_##f1,			\
+			msm_mux_##f2,			\
+			msm_mux_##f3,			\
+			msm_mux_##f4,			\
+			msm_mux_##f5,			\
+			msm_mux_##f6,			\
+			msm_mux_##f7,			\
+			msm_mux_##f8,			\
+			msm_mux_##f9			\
+		},				        \
+		.nfuncs = 10,				\
+		.ctl_reg = base + REG_SIZE * id,	\
+		.io_reg = base + 0x4 + REG_SIZE * id,		\
+		.intr_cfg_reg = base + 0x8 + REG_SIZE * id,	\
+		.intr_status_reg = base + 0xc + REG_SIZE * id,	\
+		.intr_target_reg = base + 0x8 + REG_SIZE * id,	\
+		.mux_bit = 2,			\
+		.pull_bit = 0,			\
+		.drv_bit = 6,			\
+		.oe_bit = 9,			\
+		.in_bit = 0,			\
+		.out_bit = 1,			\
+		.intr_enable_bit = 0,		\
+		.intr_status_bit = 0,		\
+		.intr_target_bit = 5,		\
+		.intr_target_kpss_val = 3,  \
+		.intr_raw_status_bit = 4,	\
+		.intr_polarity_bit = 1,		\
+		.intr_detection_bit = 2,	\
+		.intr_detection_width = 2,	\
+	}
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv)	\
+	{					        \
+		.name = #pg_name,			\
+		.pins = pg_name##_pins,			\
+		.npins = (unsigned)ARRAY_SIZE(pg_name##_pins),	\
+		.ctl_reg = ctl,				\
+		.io_reg = 0,				\
+		.intr_cfg_reg = 0,			\
+		.intr_status_reg = 0,			\
+		.intr_target_reg = 0,			\
+		.mux_bit = -1,				\
+		.pull_bit = pull,			\
+		.drv_bit = drv,				\
+		.oe_bit = -1,				\
+		.in_bit = -1,				\
+		.out_bit = -1,				\
+		.intr_enable_bit = -1,			\
+		.intr_status_bit = -1,			\
+		.intr_target_bit = -1,			\
+		.intr_raw_status_bit = -1,		\
+		.intr_polarity_bit = -1,		\
+		.intr_detection_bit = -1,		\
+		.intr_detection_width = -1,		\
+	}
+
+#define UFS_RESET(pg_name, offset)				\
+	{					        \
+		.name = #pg_name,			\
+		.pins = pg_name##_pins,			\
+		.npins = (unsigned)ARRAY_SIZE(pg_name##_pins),	\
+		.ctl_reg = offset,			\
+		.io_reg = offset + 0x4,			\
+		.intr_cfg_reg = 0,			\
+		.intr_status_reg = 0,			\
+		.intr_target_reg = 0,			\
+		.mux_bit = -1,				\
+		.pull_bit = 3,				\
+		.drv_bit = 0,				\
+		.oe_bit = -1,				\
+		.in_bit = -1,				\
+		.out_bit = 0,				\
+		.intr_enable_bit = -1,			\
+		.intr_status_bit = -1,			\
+		.intr_target_bit = -1,			\
+		.intr_raw_status_bit = -1,		\
+		.intr_polarity_bit = -1,		\
+		.intr_detection_bit = -1,		\
+		.intr_detection_width = -1,		\
+	}
+static const struct pinctrl_pin_desc msm8998_pins[] = {
+	PINCTRL_PIN(0, "GPIO_0"),
+	PINCTRL_PIN(1, "GPIO_1"),
+	PINCTRL_PIN(2, "GPIO_2"),
+	PINCTRL_PIN(3, "GPIO_3"),
+	PINCTRL_PIN(4, "GPIO_4"),
+	PINCTRL_PIN(5, "GPIO_5"),
+	PINCTRL_PIN(6, "GPIO_6"),
+	PINCTRL_PIN(7, "GPIO_7"),
+	PINCTRL_PIN(8, "GPIO_8"),
+	PINCTRL_PIN(9, "GPIO_9"),
+	PINCTRL_PIN(10, "GPIO_10"),
+	PINCTRL_PIN(11, "GPIO_11"),
+	PINCTRL_PIN(12, "GPIO_12"),
+	PINCTRL_PIN(13, "GPIO_13"),
+	PINCTRL_PIN(14, "GPIO_14"),
+	PINCTRL_PIN(15, "GPIO_15"),
+	PINCTRL_PIN(16, "GPIO_16"),
+	PINCTRL_PIN(17, "GPIO_17"),
+	PINCTRL_PIN(18, "GPIO_18"),
+	PINCTRL_PIN(19, "GPIO_19"),
+	PINCTRL_PIN(20, "GPIO_20"),
+	PINCTRL_PIN(21, "GPIO_21"),
+	PINCTRL_PIN(22, "GPIO_22"),
+	PINCTRL_PIN(23, "GPIO_23"),
+	PINCTRL_PIN(24, "GPIO_24"),
+	PINCTRL_PIN(25, "GPIO_25"),
+	PINCTRL_PIN(26, "GPIO_26"),
+	PINCTRL_PIN(27, "GPIO_27"),
+	PINCTRL_PIN(28, "GPIO_28"),
+	PINCTRL_PIN(29, "GPIO_29"),
+	PINCTRL_PIN(30, "GPIO_30"),
+	PINCTRL_PIN(31, "GPIO_31"),
+	PINCTRL_PIN(32, "GPIO_32"),
+	PINCTRL_PIN(33, "GPIO_33"),
+	PINCTRL_PIN(34, "GPIO_34"),
+	PINCTRL_PIN(35, "GPIO_35"),
+	PINCTRL_PIN(36, "GPIO_36"),
+	PINCTRL_PIN(37, "GPIO_37"),
+	PINCTRL_PIN(38, "GPIO_38"),
+	PINCTRL_PIN(39, "GPIO_39"),
+	PINCTRL_PIN(40, "GPIO_40"),
+	PINCTRL_PIN(41, "GPIO_41"),
+	PINCTRL_PIN(42, "GPIO_42"),
+	PINCTRL_PIN(43, "GPIO_43"),
+	PINCTRL_PIN(44, "GPIO_44"),
+	PINCTRL_PIN(45, "GPIO_45"),
+	PINCTRL_PIN(46, "GPIO_46"),
+	PINCTRL_PIN(47, "GPIO_47"),
+	PINCTRL_PIN(48, "GPIO_48"),
+	PINCTRL_PIN(49, "GPIO_49"),
+	PINCTRL_PIN(50, "GPIO_50"),
+	PINCTRL_PIN(51, "GPIO_51"),
+	PINCTRL_PIN(52, "GPIO_52"),
+	PINCTRL_PIN(53, "GPIO_53"),
+	PINCTRL_PIN(54, "GPIO_54"),
+	PINCTRL_PIN(55, "GPIO_55"),
+	PINCTRL_PIN(56, "GPIO_56"),
+	PINCTRL_PIN(57, "GPIO_57"),
+	PINCTRL_PIN(58, "GPIO_58"),
+	PINCTRL_PIN(59, "GPIO_59"),
+	PINCTRL_PIN(60, "GPIO_60"),
+	PINCTRL_PIN(61, "GPIO_61"),
+	PINCTRL_PIN(62, "GPIO_62"),
+	PINCTRL_PIN(63, "GPIO_63"),
+	PINCTRL_PIN(64, "GPIO_64"),
+	PINCTRL_PIN(65, "GPIO_65"),
+	PINCTRL_PIN(66, "GPIO_66"),
+	PINCTRL_PIN(67, "GPIO_67"),
+	PINCTRL_PIN(68, "GPIO_68"),
+	PINCTRL_PIN(69, "GPIO_69"),
+	PINCTRL_PIN(70, "GPIO_70"),
+	PINCTRL_PIN(71, "GPIO_71"),
+	PINCTRL_PIN(72, "GPIO_72"),
+	PINCTRL_PIN(73, "GPIO_73"),
+	PINCTRL_PIN(74, "GPIO_74"),
+	PINCTRL_PIN(75, "GPIO_75"),
+	PINCTRL_PIN(76, "GPIO_76"),
+	PINCTRL_PIN(77, "GPIO_77"),
+	PINCTRL_PIN(78, "GPIO_78"),
+	PINCTRL_PIN(79, "GPIO_79"),
+	PINCTRL_PIN(80, "GPIO_80"),
+	PINCTRL_PIN(81, "GPIO_81"),
+	PINCTRL_PIN(82, "GPIO_82"),
+	PINCTRL_PIN(83, "GPIO_83"),
+	PINCTRL_PIN(84, "GPIO_84"),
+	PINCTRL_PIN(85, "GPIO_85"),
+	PINCTRL_PIN(86, "GPIO_86"),
+	PINCTRL_PIN(87, "GPIO_87"),
+	PINCTRL_PIN(88, "GPIO_88"),
+	PINCTRL_PIN(89, "GPIO_89"),
+	PINCTRL_PIN(90, "GPIO_90"),
+	PINCTRL_PIN(91, "GPIO_91"),
+	PINCTRL_PIN(92, "GPIO_92"),
+	PINCTRL_PIN(93, "GPIO_93"),
+	PINCTRL_PIN(94, "GPIO_94"),
+	PINCTRL_PIN(95, "GPIO_95"),
+	PINCTRL_PIN(96, "GPIO_96"),
+	PINCTRL_PIN(97, "GPIO_97"),
+	PINCTRL_PIN(98, "GPIO_98"),
+	PINCTRL_PIN(99, "GPIO_99"),
+	PINCTRL_PIN(100, "GPIO_100"),
+	PINCTRL_PIN(101, "GPIO_101"),
+	PINCTRL_PIN(102, "GPIO_102"),
+	PINCTRL_PIN(103, "GPIO_103"),
+	PINCTRL_PIN(104, "GPIO_104"),
+	PINCTRL_PIN(105, "GPIO_105"),
+	PINCTRL_PIN(106, "GPIO_106"),
+	PINCTRL_PIN(107, "GPIO_107"),
+	PINCTRL_PIN(108, "GPIO_108"),
+	PINCTRL_PIN(109, "GPIO_109"),
+	PINCTRL_PIN(110, "GPIO_110"),
+	PINCTRL_PIN(111, "GPIO_111"),
+	PINCTRL_PIN(112, "GPIO_112"),
+	PINCTRL_PIN(113, "GPIO_113"),
+	PINCTRL_PIN(114, "GPIO_114"),
+	PINCTRL_PIN(115, "GPIO_115"),
+	PINCTRL_PIN(116, "GPIO_116"),
+	PINCTRL_PIN(117, "GPIO_117"),
+	PINCTRL_PIN(118, "GPIO_118"),
+	PINCTRL_PIN(119, "GPIO_119"),
+	PINCTRL_PIN(120, "GPIO_120"),
+	PINCTRL_PIN(121, "GPIO_121"),
+	PINCTRL_PIN(122, "GPIO_122"),
+	PINCTRL_PIN(123, "GPIO_123"),
+	PINCTRL_PIN(124, "GPIO_124"),
+	PINCTRL_PIN(125, "GPIO_125"),
+	PINCTRL_PIN(126, "GPIO_126"),
+	PINCTRL_PIN(127, "GPIO_127"),
+	PINCTRL_PIN(128, "GPIO_128"),
+	PINCTRL_PIN(129, "GPIO_129"),
+	PINCTRL_PIN(130, "GPIO_130"),
+	PINCTRL_PIN(131, "GPIO_131"),
+	PINCTRL_PIN(132, "GPIO_132"),
+	PINCTRL_PIN(133, "GPIO_133"),
+	PINCTRL_PIN(134, "GPIO_134"),
+	PINCTRL_PIN(135, "GPIO_135"),
+	PINCTRL_PIN(136, "GPIO_136"),
+	PINCTRL_PIN(137, "GPIO_137"),
+	PINCTRL_PIN(138, "GPIO_138"),
+	PINCTRL_PIN(139, "GPIO_139"),
+	PINCTRL_PIN(140, "GPIO_140"),
+	PINCTRL_PIN(141, "GPIO_141"),
+	PINCTRL_PIN(142, "GPIO_142"),
+	PINCTRL_PIN(143, "GPIO_143"),
+	PINCTRL_PIN(144, "GPIO_144"),
+	PINCTRL_PIN(145, "GPIO_145"),
+	PINCTRL_PIN(146, "GPIO_146"),
+	PINCTRL_PIN(147, "GPIO_147"),
+	PINCTRL_PIN(148, "GPIO_148"),
+	PINCTRL_PIN(149, "GPIO_149"),
+	PINCTRL_PIN(150, "SDC2_CLK"),
+	PINCTRL_PIN(151, "SDC2_CMD"),
+	PINCTRL_PIN(152, "SDC2_DATA"),
+	PINCTRL_PIN(153, "UFS_RESET"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+	static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+DECLARE_MSM_GPIO_PINS(145);
+DECLARE_MSM_GPIO_PINS(146);
+DECLARE_MSM_GPIO_PINS(147);
+DECLARE_MSM_GPIO_PINS(148);
+DECLARE_MSM_GPIO_PINS(149);
+
+static const unsigned int sdc2_clk_pins[] = { 150 };
+static const unsigned int sdc2_cmd_pins[] = { 151 };
+static const unsigned int sdc2_data_pins[] = { 152 };
+static const unsigned int ufs_reset_pins[] = { 153 };
+
+enum msm8998_functions {
+	msm_mux_blsp_spi1,
+	msm_mux_blsp_uim1_a,
+	msm_mux_blsp_uart1_a,
+	msm_mux_blsp_i2c1,
+	msm_mux_blsp_spi8,
+	msm_mux_blsp_uart8_a,
+	msm_mux_blsp_uim8_a,
+	msm_mux_qdss_cti0_b,
+	msm_mux_blsp_i2c8,
+	msm_mux_ddr_bist,
+	msm_mux_atest_tsens2,
+	msm_mux_atest_usb1,
+	msm_mux_blsp_spi4,
+	msm_mux_blsp_uart1_b,
+	msm_mux_blsp_uim1_b,
+	msm_mux_wlan1_adc1,
+	msm_mux_atest_usb13,
+	msm_mux_bimc_dte1,
+	msm_mux_tsif1_sync,
+	msm_mux_wlan1_adc0,
+	msm_mux_atest_usb12,
+	msm_mux_bimc_dte0,
+	msm_mux_mdp_vsync_a,
+	msm_mux_blsp_i2c4,
+	msm_mux_atest_gpsadc1,
+	msm_mux_wlan2_adc1,
+	msm_mux_atest_usb11,
+	msm_mux_edp_lcd,
+	msm_mux_dbg_out,
+	msm_mux_atest_gpsadc0,
+	msm_mux_wlan2_adc0,
+	msm_mux_atest_usb10,
+	msm_mux_mdp_vsync,
+	msm_mux_m_voc,
+	msm_mux_cam_mclk,
+	msm_mux_pll_bypassnl,
+	msm_mux_qdss_gpio0,
+	msm_mux_pll_reset,
+	msm_mux_qdss_gpio1,
+	msm_mux_qdss_gpio2,
+	msm_mux_qdss_gpio3,
+	msm_mux_cci_i2c,
+	msm_mux_qdss_gpio4,
+	msm_mux_phase_flag14,
+	msm_mux_qdss_gpio5,
+	msm_mux_phase_flag15,
+	msm_mux_qdss_gpio6,
+	msm_mux_qdss_gpio7,
+	msm_mux_cci_timer4,
+	msm_mux_blsp2_spi,
+	msm_mux_qdss_gpio11,
+	msm_mux_qdss_gpio12,
+	msm_mux_qdss_gpio13,
+	msm_mux_qdss_gpio14,
+	msm_mux_qdss_gpio15,
+	msm_mux_cci_timer0,
+	msm_mux_qdss_gpio8,
+	msm_mux_vsense_data0,
+	msm_mux_cci_timer1,
+	msm_mux_qdss_gpio,
+	msm_mux_vsense_data1,
+	msm_mux_cci_timer2,
+	msm_mux_blsp1_spi_b,
+	msm_mux_qdss_gpio9,
+	msm_mux_vsense_mode,
+	msm_mux_cci_timer3,
+	msm_mux_cci_async,
+	msm_mux_blsp1_spi_a,
+	msm_mux_qdss_gpio10,
+	msm_mux_vsense_clkout,
+	msm_mux_hdmi_rcv,
+	msm_mux_hdmi_cec,
+	msm_mux_blsp_spi2,
+	msm_mux_blsp_uart2_a,
+	msm_mux_blsp_uim2_a,
+	msm_mux_pwr_modem,
+	msm_mux_hdmi_ddc,
+	msm_mux_blsp_i2c2,
+	msm_mux_pwr_nav,
+	msm_mux_pwr_crypto,
+	msm_mux_hdmi_hot,
+	msm_mux_edp_hot,
+	msm_mux_pci_e0,
+	msm_mux_jitter_bist,
+	msm_mux_agera_pll,
+	msm_mux_atest_tsens,
+	msm_mux_usb_phy,
+	msm_mux_lpass_slimbus,
+	msm_mux_sd_write,
+	msm_mux_tsif1_error,
+	msm_mux_blsp_spi6,
+	msm_mux_blsp_uart3_b,
+	msm_mux_blsp_uim3_b,
+	msm_mux_blsp_i2c6,
+	msm_mux_bt_reset,
+	msm_mux_blsp_spi3,
+	msm_mux_blsp_uart3_a,
+	msm_mux_blsp_uim3_a,
+	msm_mux_blsp_i2c3,
+	msm_mux_blsp_spi9,
+	msm_mux_blsp_uart9_a,
+	msm_mux_blsp_uim9_a,
+	msm_mux_blsp10_spi_b,
+	msm_mux_qdss_cti0_a,
+	msm_mux_blsp_i2c9,
+	msm_mux_blsp10_spi_a,
+	msm_mux_blsp_spi7,
+	msm_mux_blsp_uart7_a,
+	msm_mux_blsp_uim7_a,
+	msm_mux_blsp_i2c7,
+	msm_mux_qua_mi2s,
+	msm_mux_blsp10_spi,
+	msm_mux_gcc_gp1_a,
+	msm_mux_ssc_irq,
+	msm_mux_blsp_spi11,
+	msm_mux_blsp_uart8_b,
+	msm_mux_blsp_uim8_b,
+	msm_mux_gcc_gp2_a,
+	msm_mux_qdss_cti1_a,
+	msm_mux_gcc_gp3_a,
+	msm_mux_blsp_i2c11,
+	msm_mux_cri_trng0,
+	msm_mux_cri_trng1,
+	msm_mux_cri_trng,
+	msm_mux_pri_mi2s,
+	msm_mux_sp_cmu,
+	msm_mux_blsp_spi10,
+	msm_mux_blsp_uart7_b,
+	msm_mux_blsp_uim7_b,
+	msm_mux_pri_mi2s_ws,
+	msm_mux_blsp_i2c10,
+	msm_mux_spkr_i2s,
+	msm_mux_audio_ref,
+	msm_mux_blsp9_spi,
+	msm_mux_tsense_pwm1,
+	msm_mux_tsense_pwm2,
+	msm_mux_btfm_slimbus,
+	msm_mux_phase_flag0,
+	msm_mux_ter_mi2s,
+	msm_mux_phase_flag7,
+	msm_mux_phase_flag8,
+	msm_mux_phase_flag9,
+	msm_mux_phase_flag4,
+	msm_mux_gcc_gp1_b,
+	msm_mux_sec_mi2s,
+	msm_mux_blsp_spi12,
+	msm_mux_blsp_uart9_b,
+	msm_mux_blsp_uim9_b,
+	msm_mux_gcc_gp2_b,
+	msm_mux_gcc_gp3_b,
+	msm_mux_blsp_i2c12,
+	msm_mux_blsp_spi5,
+	msm_mux_blsp_uart2_b,
+	msm_mux_blsp_uim2_b,
+	msm_mux_blsp_i2c5,
+	msm_mux_tsif1_clk,
+	msm_mux_phase_flag10,
+	msm_mux_tsif1_en,
+	msm_mux_mdp_vsync0,
+	msm_mux_mdp_vsync1,
+	msm_mux_mdp_vsync2,
+	msm_mux_mdp_vsync3,
+	msm_mux_blsp1_spi,
+	msm_mux_tgu_ch0,
+	msm_mux_qdss_cti1_b,
+	msm_mux_tsif1_data,
+	msm_mux_sdc4_cmd,
+	msm_mux_tgu_ch1,
+	msm_mux_phase_flag1,
+	msm_mux_tsif2_error,
+	msm_mux_sdc43,
+	msm_mux_vfr_1,
+	msm_mux_phase_flag2,
+	msm_mux_tsif2_clk,
+	msm_mux_sdc4_clk,
+	msm_mux_tsif2_en,
+	msm_mux_sdc42,
+	msm_mux_sd_card,
+	msm_mux_tsif2_data,
+	msm_mux_sdc41,
+	msm_mux_tsif2_sync,
+	msm_mux_sdc40,
+	msm_mux_phase_flag3,
+	msm_mux_mdp_vsync_b,
+	msm_mux_ldo_en,
+	msm_mux_ldo_update,
+	msm_mux_blsp_uart8,
+	msm_mux_blsp11_i2c,
+	msm_mux_prng_rosc,
+	msm_mux_phase_flag5,
+	msm_mux_uim2_data,
+	msm_mux_uim2_clk,
+	msm_mux_uim2_reset,
+	msm_mux_uim2_present,
+	msm_mux_uim1_data,
+	msm_mux_uim1_clk,
+	msm_mux_uim1_reset,
+	msm_mux_uim1_present,
+	msm_mux_uim_batt,
+	msm_mux_phase_flag16,
+	msm_mux_nav_dr,
+	msm_mux_phase_flag11,
+	msm_mux_phase_flag12,
+	msm_mux_phase_flag13,
+	msm_mux_atest_char,
+	msm_mux_adsp_ext,
+	msm_mux_phase_flag17,
+	msm_mux_atest_char3,
+	msm_mux_phase_flag18,
+	msm_mux_atest_char2,
+	msm_mux_phase_flag19,
+	msm_mux_atest_char1,
+	msm_mux_phase_flag20,
+	msm_mux_atest_char0,
+	msm_mux_phase_flag21,
+	msm_mux_phase_flag22,
+	msm_mux_phase_flag23,
+	msm_mux_phase_flag24,
+	msm_mux_phase_flag25,
+	msm_mux_modem_tsync,
+	msm_mux_nav_pps,
+	msm_mux_phase_flag26,
+	msm_mux_phase_flag27,
+	msm_mux_qlink_request,
+	msm_mux_phase_flag28,
+	msm_mux_qlink_enable,
+	msm_mux_phase_flag6,
+	msm_mux_phase_flag29,
+	msm_mux_phase_flag30,
+	msm_mux_phase_flag31,
+	msm_mux_pa_indicator,
+	msm_mux_ssbi1,
+	msm_mux_isense_dbg,
+	msm_mux_mss_lte,
+	msm_mux_gpio,
+	msm_mux_NA,
+};
+
+static const char * const gpio_groups[] = {
+	"gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+	"gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+	"gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+	"gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+	"gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+	"gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+	"gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+	"gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+	"gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+	"gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+	"gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+	"gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+	"gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+	"gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+	"gpio99", "gpio100", "gpio101",	"gpio102", "gpio103", "gpio104",
+	"gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+	"gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+	"gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+	"gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+	"gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134",
+	"gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140",
+	"gpio141", "gpio142", "gpio143", "gpio144", "gpio145", "gpio146",
+	"gpio147", "gpio148", "gpio149",
+};
+static const char * const blsp_spi1_groups[] = {
+	"gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const blsp_uim1_a_groups[] = {
+	"gpio0", "gpio1",
+};
+static const char * const blsp_uart1_a_groups[] = {
+	"gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const blsp_i2c1_groups[] = {
+	"gpio2", "gpio3",
+};
+static const char * const blsp_spi8_groups[] = {
+	"gpio4", "gpio5", "gpio6", "gpio7",
+};
+static const char * const blsp_uart8_a_groups[] = {
+	"gpio4", "gpio5", "gpio6", "gpio7",
+};
+static const char * const blsp_uim8_a_groups[] = {
+	"gpio4", "gpio5",
+};
+static const char * const qdss_cti0_b_groups[] = {
+	"gpio4", "gpio5",
+};
+static const char * const blsp_i2c8_groups[] = {
+	"gpio6", "gpio7",
+};
+static const char * const ddr_bist_groups[] = {
+	"gpio7", "gpio8", "gpio9", "gpio10",
+};
+static const char * const atest_tsens2_groups[] = {
+	"gpio7",
+};
+static const char * const atest_usb1_groups[] = {
+	"gpio7",
+};
+static const char * const blsp_spi4_groups[] = {
+	"gpio8", "gpio9", "gpio10", "gpio11",
+};
+static const char * const blsp_uart1_b_groups[] = {
+	"gpio8", "gpio9", "gpio10", "gpio11",
+};
+static const char * const blsp_uim1_b_groups[] = {
+	"gpio8", "gpio9",
+};
+static const char * const wlan1_adc1_groups[] = {
+	"gpio8",
+};
+static const char * const atest_usb13_groups[] = {
+	"gpio8",
+};
+static const char * const bimc_dte1_groups[] = {
+	"gpio8", "gpio10",
+};
+static const char * const tsif1_sync_groups[] = {
+	"gpio9",
+};
+static const char * const wlan1_adc0_groups[] = {
+	"gpio9",
+};
+static const char * const atest_usb12_groups[] = {
+	"gpio9",
+};
+static const char * const bimc_dte0_groups[] = {
+	"gpio9", "gpio11",
+};
+static const char * const mdp_vsync_a_groups[] = {
+	"gpio10", "gpio11",
+};
+static const char * const blsp_i2c4_groups[] = {
+	"gpio10", "gpio11",
+};
+static const char * const atest_gpsadc1_groups[] = {
+	"gpio10",
+};
+static const char * const wlan2_adc1_groups[] = {
+	"gpio10",
+};
+static const char * const atest_usb11_groups[] = {
+	"gpio10",
+};
+static const char * const edp_lcd_groups[] = {
+	"gpio11",
+};
+static const char * const dbg_out_groups[] = {
+	"gpio11",
+};
+static const char * const atest_gpsadc0_groups[] = {
+	"gpio11",
+};
+static const char * const wlan2_adc0_groups[] = {
+	"gpio11",
+};
+static const char * const atest_usb10_groups[] = {
+	"gpio11",
+};
+static const char * const mdp_vsync_groups[] = {
+	"gpio12",
+};
+static const char * const m_voc_groups[] = {
+	"gpio12",
+};
+static const char * const cam_mclk_groups[] = {
+	"gpio13", "gpio14", "gpio15", "gpio16",
+};
+static const char * const pll_bypassnl_groups[] = {
+	"gpio13",
+};
+static const char * const qdss_gpio0_groups[] = {
+	"gpio13", "gpio117",
+};
+static const char * const pll_reset_groups[] = {
+	"gpio14",
+};
+static const char * const qdss_gpio1_groups[] = {
+	"gpio14", "gpio118",
+};
+static const char * const qdss_gpio2_groups[] = {
+	"gpio15", "gpio119",
+};
+static const char * const qdss_gpio3_groups[] = {
+	"gpio16", "gpio120",
+};
+static const char * const cci_i2c_groups[] = {
+	"gpio17", "gpio18", "gpio19", "gpio20",
+};
+static const char * const qdss_gpio4_groups[] = {
+	"gpio17", "gpio121",
+};
+static const char * const phase_flag14_groups[] = {
+	"gpio18",
+};
+static const char * const qdss_gpio5_groups[] = {
+	"gpio18", "gpio122",
+};
+static const char * const phase_flag15_groups[] = {
+	"gpio19",
+};
+static const char * const qdss_gpio6_groups[] = {
+	"gpio19", "gpio41",
+};
+static const char * const qdss_gpio7_groups[] = {
+	"gpio20", "gpio42",
+};
+static const char * const cci_timer4_groups[] = {
+	"gpio25",
+};
+static const char * const blsp2_spi_groups[] = {
+	"gpio25", "gpio29", "gpio30",
+};
+static const char * const qdss_gpio11_groups[] = {
+	"gpio25", "gpio79",
+};
+static const char * const qdss_gpio12_groups[] = {
+	"gpio26", "gpio80",
+};
+static const char * const qdss_gpio13_groups[] = {
+	"gpio27", "gpio93",
+};
+static const char * const qdss_gpio14_groups[] = {
+	"gpio28", "gpio43",
+};
+static const char * const qdss_gpio15_groups[] = {
+	"gpio29", "gpio44",
+};
+static const char * const cci_timer0_groups[] = {
+	"gpio21",
+};
+static const char * const qdss_gpio8_groups[] = {
+	"gpio21", "gpio75",
+};
+static const char * const vsense_data0_groups[] = {
+	"gpio21",
+};
+static const char * const cci_timer1_groups[] = {
+	"gpio22",
+};
+static const char * const qdss_gpio_groups[] = {
+	"gpio22", "gpio30", "gpio123", "gpio124",
+};
+static const char * const vsense_data1_groups[] = {
+	"gpio22",
+};
+static const char * const cci_timer2_groups[] = {
+	"gpio23",
+};
+static const char * const blsp1_spi_b_groups[] = {
+	"gpio23", "gpio28",
+};
+static const char * const qdss_gpio9_groups[] = {
+	"gpio23", "gpio76",
+};
+static const char * const vsense_mode_groups[] = {
+	"gpio23",
+};
+static const char * const cci_timer3_groups[] = {
+	"gpio24",
+};
+static const char * const cci_async_groups[] = {
+	"gpio24", "gpio25", "gpio26",
+};
+static const char * const blsp1_spi_a_groups[] = {
+	"gpio24", "gpio27",
+};
+static const char * const qdss_gpio10_groups[] = {
+	"gpio24", "gpio77",
+};
+static const char * const vsense_clkout_groups[] = {
+	"gpio24",
+};
+static const char * const hdmi_rcv_groups[] = {
+	"gpio30",
+};
+static const char * const hdmi_cec_groups[] = {
+	"gpio31",
+};
+static const char * const blsp_spi2_groups[] = {
+	"gpio31", "gpio32", "gpio33", "gpio34",
+};
+static const char * const blsp_uart2_a_groups[] = {
+	"gpio31", "gpio32", "gpio33", "gpio34",
+};
+static const char * const blsp_uim2_a_groups[] = {
+	"gpio31", "gpio34",
+};
+static const char * const pwr_modem_groups[] = {
+	"gpio31",
+};
+static const char * const hdmi_ddc_groups[] = {
+	"gpio32", "gpio33",
+};
+static const char * const blsp_i2c2_groups[] = {
+	"gpio32", "gpio33",
+};
+static const char * const pwr_nav_groups[] = {
+	"gpio32",
+};
+static const char * const pwr_crypto_groups[] = {
+	"gpio33",
+};
+static const char * const hdmi_hot_groups[] = {
+	"gpio34",
+};
+static const char * const edp_hot_groups[] = {
+	"gpio34",
+};
+static const char * const pci_e0_groups[] = {
+	"gpio35", "gpio36", "gpio37",
+};
+static const char * const jitter_bist_groups[] = {
+	"gpio35",
+};
+static const char * const agera_pll_groups[] = {
+	"gpio36", "gpio37",
+};
+static const char * const atest_tsens_groups[] = {
+	"gpio36",
+};
+static const char * const usb_phy_groups[] = {
+	"gpio38",
+};
+static const char * const lpass_slimbus_groups[] = {
+	"gpio39", "gpio70", "gpio71", "gpio72",
+};
+static const char * const sd_write_groups[] = {
+	"gpio40",
+};
+static const char * const tsif1_error_groups[] = {
+	"gpio40",
+};
+static const char * const blsp_spi6_groups[] = {
+	"gpio41", "gpio42", "gpio43", "gpio44",
+};
+static const char * const blsp_uart3_b_groups[] = {
+	"gpio41", "gpio42", "gpio43", "gpio44",
+};
+static const char * const blsp_uim3_b_groups[] = {
+	"gpio41", "gpio42",
+};
+static const char * const blsp_i2c6_groups[] = {
+	"gpio43", "gpio44",
+};
+static const char * const bt_reset_groups[] = {
+	"gpio45",
+};
+static const char * const blsp_spi3_groups[] = {
+	"gpio45", "gpio46", "gpio47", "gpio48",
+};
+static const char * const blsp_uart3_a_groups[] = {
+	"gpio45", "gpio46", "gpio47", "gpio48",
+};
+static const char * const blsp_uim3_a_groups[] = {
+	"gpio45", "gpio46",
+};
+static const char * const blsp_i2c3_groups[] = {
+	"gpio47", "gpio48",
+};
+static const char * const blsp_spi9_groups[] = {
+	"gpio49", "gpio50", "gpio51", "gpio52",
+};
+static const char * const blsp_uart9_a_groups[] = {
+	"gpio49", "gpio50", "gpio51", "gpio52",
+};
+static const char * const blsp_uim9_a_groups[] = {
+	"gpio49", "gpio50",
+};
+static const char * const blsp10_spi_b_groups[] = {
+	"gpio49", "gpio50",
+};
+static const char * const qdss_cti0_a_groups[] = {
+	"gpio49", "gpio50",
+};
+static const char * const blsp_i2c9_groups[] = {
+	"gpio51", "gpio52",
+};
+static const char * const blsp10_spi_a_groups[] = {
+	"gpio51", "gpio52",
+};
+static const char * const blsp_spi7_groups[] = {
+	"gpio53", "gpio54", "gpio55", "gpio56",
+};
+static const char * const blsp_uart7_a_groups[] = {
+	"gpio53", "gpio54", "gpio55", "gpio56",
+};
+static const char * const blsp_uim7_a_groups[] = {
+	"gpio53", "gpio54",
+};
+static const char * const blsp_i2c7_groups[] = {
+	"gpio55", "gpio56",
+};
+static const char * const qua_mi2s_groups[] = {
+	"gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+};
+static const char * const blsp10_spi_groups[] = {
+	"gpio57",
+};
+static const char * const gcc_gp1_a_groups[] = {
+	"gpio57",
+};
+static const char * const ssc_irq_groups[] = {
+	"gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63", "gpio78",
+	"gpio79", "gpio80", "gpio117", "gpio118", "gpio119", "gpio120",
+	"gpio121", "gpio122", "gpio123", "gpio124", "gpio125",
+};
+static const char * const blsp_spi11_groups[] = {
+	"gpio58", "gpio59", "gpio60", "gpio61",
+};
+static const char * const blsp_uart8_b_groups[] = {
+	"gpio58", "gpio59", "gpio60", "gpio61",
+};
+static const char * const blsp_uim8_b_groups[] = {
+	"gpio58", "gpio59",
+};
+static const char * const gcc_gp2_a_groups[] = {
+	"gpio58",
+};
+static const char * const qdss_cti1_a_groups[] = {
+	"gpio58", "gpio59",
+};
+static const char * const gcc_gp3_a_groups[] = {
+	"gpio59",
+};
+static const char * const blsp_i2c11_groups[] = {
+	"gpio60", "gpio61",
+};
+static const char * const cri_trng0_groups[] = {
+	"gpio60",
+};
+static const char * const cri_trng1_groups[] = {
+	"gpio61",
+};
+static const char * const cri_trng_groups[] = {
+	"gpio62",
+};
+static const char * const pri_mi2s_groups[] = {
+	"gpio64", "gpio65", "gpio67", "gpio68",
+};
+static const char * const sp_cmu_groups[] = {
+	"gpio64",
+};
+static const char * const blsp_spi10_groups[] = {
+	"gpio65", "gpio66", "gpio67", "gpio68",
+};
+static const char * const blsp_uart7_b_groups[] = {
+	"gpio65", "gpio66", "gpio67", "gpio68",
+};
+static const char * const blsp_uim7_b_groups[] = {
+	"gpio65", "gpio66",
+};
+static const char * const pri_mi2s_ws_groups[] = {
+	"gpio66",
+};
+static const char * const blsp_i2c10_groups[] = {
+	"gpio67", "gpio68",
+};
+static const char * const spkr_i2s_groups[] = {
+	"gpio69", "gpio70", "gpio71", "gpio72",
+};
+static const char * const audio_ref_groups[] = {
+	"gpio69",
+};
+static const char * const blsp9_spi_groups[] = {
+	"gpio70", "gpio71", "gpio72",
+};
+static const char * const tsense_pwm1_groups[] = {
+	"gpio71",
+};
+static const char * const tsense_pwm2_groups[] = {
+	"gpio71",
+};
+static const char * const btfm_slimbus_groups[] = {
+	"gpio73", "gpio74",
+};
+static const char * const phase_flag0_groups[] = {
+	"gpio73",
+};
+static const char * const ter_mi2s_groups[] = {
+	"gpio74", "gpio75", "gpio76", "gpio77", "gpio78",
+};
+static const char * const phase_flag7_groups[] = {
+	"gpio74",
+};
+static const char * const phase_flag8_groups[] = {
+	"gpio75",
+};
+static const char * const phase_flag9_groups[] = {
+	"gpio76",
+};
+static const char * const phase_flag4_groups[] = {
+	"gpio77",
+};
+static const char * const gcc_gp1_b_groups[] = {
+	"gpio78",
+};
+static const char * const sec_mi2s_groups[] = {
+	"gpio79", "gpio80", "gpio81", "gpio82", "gpio83",
+};
+static const char * const blsp_spi12_groups[] = {
+	"gpio81", "gpio82", "gpio83", "gpio84",
+};
+static const char * const blsp_uart9_b_groups[] = {
+	"gpio81", "gpio82", "gpio83", "gpio84",
+};
+static const char * const blsp_uim9_b_groups[] = {
+	"gpio81", "gpio82",
+};
+static const char * const gcc_gp2_b_groups[] = {
+	"gpio81",
+};
+static const char * const gcc_gp3_b_groups[] = {
+	"gpio82",
+};
+static const char * const blsp_i2c12_groups[] = {
+	"gpio83", "gpio84",
+};
+static const char * const blsp_spi5_groups[] = {
+	"gpio85", "gpio86", "gpio87", "gpio88",
+};
+static const char * const blsp_uart2_b_groups[] = {
+	"gpio85", "gpio86", "gpio87", "gpio88",
+};
+static const char * const blsp_uim2_b_groups[] = {
+	"gpio85", "gpio86",
+};
+static const char * const blsp_i2c5_groups[] = {
+	"gpio87", "gpio88",
+};
+static const char * const tsif1_clk_groups[] = {
+	"gpio89",
+};
+static const char * const phase_flag10_groups[] = {
+	"gpio89",
+};
+static const char * const tsif1_en_groups[] = {
+	"gpio90",
+};
+static const char * const mdp_vsync0_groups[] = {
+	"gpio90",
+};
+static const char * const mdp_vsync1_groups[] = {
+	"gpio90",
+};
+static const char * const mdp_vsync2_groups[] = {
+	"gpio90",
+};
+static const char * const mdp_vsync3_groups[] = {
+	"gpio90",
+};
+static const char * const blsp1_spi_groups[] = {
+	"gpio90",
+};
+static const char * const tgu_ch0_groups[] = {
+	"gpio90",
+};
+static const char * const qdss_cti1_b_groups[] = {
+	"gpio90", "gpio91",
+};
+static const char * const tsif1_data_groups[] = {
+	"gpio91",
+};
+static const char * const sdc4_cmd_groups[] = {
+	"gpio91",
+};
+static const char * const tgu_ch1_groups[] = {
+	"gpio91",
+};
+static const char * const phase_flag1_groups[] = {
+	"gpio91",
+};
+static const char * const tsif2_error_groups[] = {
+	"gpio92",
+};
+static const char * const sdc43_groups[] = {
+	"gpio92",
+};
+static const char * const vfr_1_groups[] = {
+	"gpio92",
+};
+static const char * const phase_flag2_groups[] = {
+	"gpio92",
+};
+static const char * const tsif2_clk_groups[] = {
+	"gpio93",
+};
+static const char * const sdc4_clk_groups[] = {
+	"gpio93",
+};
+static const char * const tsif2_en_groups[] = {
+	"gpio94",
+};
+static const char * const sdc42_groups[] = {
+	"gpio94",
+};
+static const char * const sd_card_groups[] = {
+	"gpio95",
+};
+static const char * const tsif2_data_groups[] = {
+	"gpio95",
+};
+static const char * const sdc41_groups[] = {
+	"gpio95",
+};
+static const char * const tsif2_sync_groups[] = {
+	"gpio96",
+};
+static const char * const sdc40_groups[] = {
+	"gpio96",
+};
+static const char * const phase_flag3_groups[] = {
+	"gpio96",
+};
+static const char * const mdp_vsync_b_groups[] = {
+	"gpio97", "gpio98",
+};
+static const char * const ldo_en_groups[] = {
+	"gpio97",
+};
+static const char * const ldo_update_groups[] = {
+	"gpio98",
+};
+static const char * const blsp_uart8_groups[] = {
+	"gpio100", "gpio101",
+};
+static const char * const blsp11_i2c_groups[] = {
+	"gpio102", "gpio103",
+};
+static const char * const prng_rosc_groups[] = {
+	"gpio102",
+};
+static const char * const phase_flag5_groups[] = {
+	"gpio103",
+};
+static const char * const uim2_data_groups[] = {
+	"gpio105",
+};
+static const char * const uim2_clk_groups[] = {
+	"gpio106",
+};
+static const char * const uim2_reset_groups[] = {
+	"gpio107",
+};
+static const char * const uim2_present_groups[] = {
+	"gpio108",
+};
+static const char * const uim1_data_groups[] = {
+	"gpio109",
+};
+static const char * const uim1_clk_groups[] = {
+	"gpio110",
+};
+static const char * const uim1_reset_groups[] = {
+	"gpio111",
+};
+static const char * const uim1_present_groups[] = {
+	"gpio112",
+};
+static const char * const uim_batt_groups[] = {
+	"gpio113",
+};
+static const char * const phase_flag16_groups[] = {
+	"gpio114",
+};
+static const char * const nav_dr_groups[] = {
+	"gpio115",
+};
+static const char * const phase_flag11_groups[] = {
+	"gpio115",
+};
+static const char * const phase_flag12_groups[] = {
+	"gpio116",
+};
+static const char * const phase_flag13_groups[] = {
+	"gpio117",
+};
+static const char * const atest_char_groups[] = {
+	"gpio117",
+};
+static const char * const adsp_ext_groups[] = {
+	"gpio118",
+};
+static const char * const phase_flag17_groups[] = {
+	"gpio118",
+};
+static const char * const atest_char3_groups[] = {
+	"gpio118",
+};
+static const char * const phase_flag18_groups[] = {
+	"gpio119",
+};
+static const char * const atest_char2_groups[] = {
+	"gpio119",
+};
+static const char * const phase_flag19_groups[] = {
+	"gpio120",
+};
+static const char * const atest_char1_groups[] = {
+	"gpio120",
+};
+static const char * const phase_flag20_groups[] = {
+	"gpio121",
+};
+static const char * const atest_char0_groups[] = {
+	"gpio121",
+};
+static const char * const phase_flag21_groups[] = {
+	"gpio122",
+};
+static const char * const phase_flag22_groups[] = {
+	"gpio123",
+};
+static const char * const phase_flag23_groups[] = {
+	"gpio124",
+};
+static const char * const phase_flag24_groups[] = {
+	"gpio125",
+};
+static const char * const phase_flag25_groups[] = {
+	"gpio126",
+};
+static const char * const modem_tsync_groups[] = {
+	"gpio128",
+};
+static const char * const nav_pps_groups[] = {
+	"gpio128",
+};
+static const char * const phase_flag26_groups[] = {
+	"gpio128",
+};
+static const char * const phase_flag27_groups[] = {
+	"gpio129",
+};
+static const char * const qlink_request_groups[] = {
+	"gpio130",
+};
+static const char * const phase_flag28_groups[] = {
+	"gpio130",
+};
+static const char * const qlink_enable_groups[] = {
+	"gpio131",
+};
+static const char * const phase_flag6_groups[] = {
+	"gpio131",
+};
+static const char * const phase_flag29_groups[] = {
+	"gpio132",
+};
+static const char * const phase_flag30_groups[] = {
+	"gpio133",
+};
+static const char * const phase_flag31_groups[] = {
+	"gpio134",
+};
+static const char * const pa_indicator_groups[] = {
+	"gpio135",
+};
+static const char * const ssbi1_groups[] = {
+	"gpio142",
+};
+static const char * const isense_dbg_groups[] = {
+	"gpio143",
+};
+static const char * const mss_lte_groups[] = {
+	"gpio144", "gpio145",
+};
+
+static const struct msm_function msm8998_functions[] = {
+	FUNCTION(blsp_spi1),
+	FUNCTION(gpio),
+	FUNCTION(blsp_uim1_a),
+	FUNCTION(blsp_uart1_a),
+	FUNCTION(blsp_i2c1),
+	FUNCTION(blsp_spi8),
+	FUNCTION(blsp_uart8_a),
+	FUNCTION(blsp_uim8_a),
+	FUNCTION(qdss_cti0_b),
+	FUNCTION(blsp_i2c8),
+	FUNCTION(ddr_bist),
+	FUNCTION(atest_tsens2),
+	FUNCTION(atest_usb1),
+	FUNCTION(blsp_spi4),
+	FUNCTION(blsp_uart1_b),
+	FUNCTION(blsp_uim1_b),
+	FUNCTION(wlan1_adc1),
+	FUNCTION(atest_usb13),
+	FUNCTION(bimc_dte1),
+	FUNCTION(tsif1_sync),
+	FUNCTION(wlan1_adc0),
+	FUNCTION(atest_usb12),
+	FUNCTION(bimc_dte0),
+	FUNCTION(mdp_vsync_a),
+	FUNCTION(blsp_i2c4),
+	FUNCTION(atest_gpsadc1),
+	FUNCTION(wlan2_adc1),
+	FUNCTION(atest_usb11),
+	FUNCTION(edp_lcd),
+	FUNCTION(dbg_out),
+	FUNCTION(atest_gpsadc0),
+	FUNCTION(wlan2_adc0),
+	FUNCTION(atest_usb10),
+	FUNCTION(mdp_vsync),
+	FUNCTION(m_voc),
+	FUNCTION(cam_mclk),
+	FUNCTION(pll_bypassnl),
+	FUNCTION(qdss_gpio0),
+	FUNCTION(pll_reset),
+	FUNCTION(qdss_gpio1),
+	FUNCTION(qdss_gpio2),
+	FUNCTION(qdss_gpio3),
+	FUNCTION(cci_i2c),
+	FUNCTION(qdss_gpio4),
+	FUNCTION(phase_flag14),
+	FUNCTION(qdss_gpio5),
+	FUNCTION(phase_flag15),
+	FUNCTION(qdss_gpio6),
+	FUNCTION(qdss_gpio7),
+	FUNCTION(cci_timer4),
+	FUNCTION(blsp2_spi),
+	FUNCTION(qdss_gpio11),
+	FUNCTION(qdss_gpio12),
+	FUNCTION(qdss_gpio13),
+	FUNCTION(qdss_gpio14),
+	FUNCTION(qdss_gpio15),
+	FUNCTION(cci_timer0),
+	FUNCTION(qdss_gpio8),
+	FUNCTION(vsense_data0),
+	FUNCTION(cci_timer1),
+	FUNCTION(qdss_gpio),
+	FUNCTION(vsense_data1),
+	FUNCTION(cci_timer2),
+	FUNCTION(blsp1_spi_b),
+	FUNCTION(qdss_gpio9),
+	FUNCTION(vsense_mode),
+	FUNCTION(cci_timer3),
+	FUNCTION(cci_async),
+	FUNCTION(blsp1_spi_a),
+	FUNCTION(qdss_gpio10),
+	FUNCTION(vsense_clkout),
+	FUNCTION(hdmi_rcv),
+	FUNCTION(hdmi_cec),
+	FUNCTION(blsp_spi2),
+	FUNCTION(blsp_uart2_a),
+	FUNCTION(blsp_uim2_a),
+	FUNCTION(pwr_modem),
+	FUNCTION(hdmi_ddc),
+	FUNCTION(blsp_i2c2),
+	FUNCTION(pwr_nav),
+	FUNCTION(pwr_crypto),
+	FUNCTION(hdmi_hot),
+	FUNCTION(edp_hot),
+	FUNCTION(pci_e0),
+	FUNCTION(jitter_bist),
+	FUNCTION(agera_pll),
+	FUNCTION(atest_tsens),
+	FUNCTION(usb_phy),
+	FUNCTION(lpass_slimbus),
+	FUNCTION(sd_write),
+	FUNCTION(tsif1_error),
+	FUNCTION(blsp_spi6),
+	FUNCTION(blsp_uart3_b),
+	FUNCTION(blsp_uim3_b),
+	FUNCTION(blsp_i2c6),
+	FUNCTION(bt_reset),
+	FUNCTION(blsp_spi3),
+	FUNCTION(blsp_uart3_a),
+	FUNCTION(blsp_uim3_a),
+	FUNCTION(blsp_i2c3),
+	FUNCTION(blsp_spi9),
+	FUNCTION(blsp_uart9_a),
+	FUNCTION(blsp_uim9_a),
+	FUNCTION(blsp10_spi_b),
+	FUNCTION(qdss_cti0_a),
+	FUNCTION(blsp_i2c9),
+	FUNCTION(blsp10_spi_a),
+	FUNCTION(blsp_spi7),
+	FUNCTION(blsp_uart7_a),
+	FUNCTION(blsp_uim7_a),
+	FUNCTION(blsp_i2c7),
+	FUNCTION(qua_mi2s),
+	FUNCTION(blsp10_spi),
+	FUNCTION(gcc_gp1_a),
+	FUNCTION(ssc_irq),
+	FUNCTION(blsp_spi11),
+	FUNCTION(blsp_uart8_b),
+	FUNCTION(blsp_uim8_b),
+	FUNCTION(gcc_gp2_a),
+	FUNCTION(qdss_cti1_a),
+	FUNCTION(gcc_gp3_a),
+	FUNCTION(blsp_i2c11),
+	FUNCTION(cri_trng0),
+	FUNCTION(cri_trng1),
+	FUNCTION(cri_trng),
+	FUNCTION(pri_mi2s),
+	FUNCTION(sp_cmu),
+	FUNCTION(blsp_spi10),
+	FUNCTION(blsp_uart7_b),
+	FUNCTION(blsp_uim7_b),
+	FUNCTION(pri_mi2s_ws),
+	FUNCTION(blsp_i2c10),
+	FUNCTION(spkr_i2s),
+	FUNCTION(audio_ref),
+	FUNCTION(blsp9_spi),
+	FUNCTION(tsense_pwm1),
+	FUNCTION(tsense_pwm2),
+	FUNCTION(btfm_slimbus),
+	FUNCTION(phase_flag0),
+	FUNCTION(ter_mi2s),
+	FUNCTION(phase_flag7),
+	FUNCTION(phase_flag8),
+	FUNCTION(phase_flag9),
+	FUNCTION(phase_flag4),
+	FUNCTION(gcc_gp1_b),
+	FUNCTION(sec_mi2s),
+	FUNCTION(blsp_spi12),
+	FUNCTION(blsp_uart9_b),
+	FUNCTION(blsp_uim9_b),
+	FUNCTION(gcc_gp2_b),
+	FUNCTION(gcc_gp3_b),
+	FUNCTION(blsp_i2c12),
+	FUNCTION(blsp_spi5),
+	FUNCTION(blsp_uart2_b),
+	FUNCTION(blsp_uim2_b),
+	FUNCTION(blsp_i2c5),
+	FUNCTION(tsif1_clk),
+	FUNCTION(phase_flag10),
+	FUNCTION(tsif1_en),
+	FUNCTION(mdp_vsync0),
+	FUNCTION(mdp_vsync1),
+	FUNCTION(mdp_vsync2),
+	FUNCTION(mdp_vsync3),
+	FUNCTION(blsp1_spi),
+	FUNCTION(tgu_ch0),
+	FUNCTION(qdss_cti1_b),
+	FUNCTION(tsif1_data),
+	FUNCTION(sdc4_cmd),
+	FUNCTION(tgu_ch1),
+	FUNCTION(phase_flag1),
+	FUNCTION(tsif2_error),
+	FUNCTION(sdc43),
+	FUNCTION(vfr_1),
+	FUNCTION(phase_flag2),
+	FUNCTION(tsif2_clk),
+	FUNCTION(sdc4_clk),
+	FUNCTION(tsif2_en),
+	FUNCTION(sdc42),
+	FUNCTION(sd_card),
+	FUNCTION(tsif2_data),
+	FUNCTION(sdc41),
+	FUNCTION(tsif2_sync),
+	FUNCTION(sdc40),
+	FUNCTION(phase_flag3),
+	FUNCTION(mdp_vsync_b),
+	FUNCTION(ldo_en),
+	FUNCTION(ldo_update),
+	FUNCTION(blsp_uart8),
+	FUNCTION(blsp11_i2c),
+	FUNCTION(prng_rosc),
+	FUNCTION(phase_flag5),
+	FUNCTION(uim2_data),
+	FUNCTION(uim2_clk),
+	FUNCTION(uim2_reset),
+	FUNCTION(uim2_present),
+	FUNCTION(uim1_data),
+	FUNCTION(uim1_clk),
+	FUNCTION(uim1_reset),
+	FUNCTION(uim1_present),
+	FUNCTION(uim_batt),
+	FUNCTION(phase_flag16),
+	FUNCTION(nav_dr),
+	FUNCTION(phase_flag11),
+	FUNCTION(phase_flag12),
+	FUNCTION(phase_flag13),
+	FUNCTION(atest_char),
+	FUNCTION(adsp_ext),
+	FUNCTION(phase_flag17),
+	FUNCTION(atest_char3),
+	FUNCTION(phase_flag18),
+	FUNCTION(atest_char2),
+	FUNCTION(phase_flag19),
+	FUNCTION(atest_char1),
+	FUNCTION(phase_flag20),
+	FUNCTION(atest_char0),
+	FUNCTION(phase_flag21),
+	FUNCTION(phase_flag22),
+	FUNCTION(phase_flag23),
+	FUNCTION(phase_flag24),
+	FUNCTION(phase_flag25),
+	FUNCTION(modem_tsync),
+	FUNCTION(nav_pps),
+	FUNCTION(phase_flag26),
+	FUNCTION(phase_flag27),
+	FUNCTION(qlink_request),
+	FUNCTION(phase_flag28),
+	FUNCTION(qlink_enable),
+	FUNCTION(phase_flag6),
+	FUNCTION(phase_flag29),
+	FUNCTION(phase_flag30),
+	FUNCTION(phase_flag31),
+	FUNCTION(pa_indicator),
+	FUNCTION(ssbi1),
+	FUNCTION(isense_dbg),
+	FUNCTION(mss_lte),
+};
+
+static const struct msm_pingroup msm8998_groups[] = {
+	PINGROUP(0, EAST, blsp_spi1, blsp_uart1_a, blsp_uim1_a, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(1, EAST, blsp_spi1, blsp_uart1_a, blsp_uim1_a, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(2, EAST, blsp_spi1, blsp_uart1_a, blsp_i2c1, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(3, EAST, blsp_spi1, blsp_uart1_a, blsp_i2c1, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(4, WEST, blsp_spi8, blsp_uart8_a, blsp_uim8_a, NA,
+		 qdss_cti0_b, NA, NA, NA, NA),
+	PINGROUP(5, WEST, blsp_spi8, blsp_uart8_a, blsp_uim8_a, NA,
+		 qdss_cti0_b, NA, NA, NA, NA),
+	PINGROUP(6, WEST, blsp_spi8, blsp_uart8_a, blsp_i2c8, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(7, WEST, blsp_spi8, blsp_uart8_a, blsp_i2c8, ddr_bist, NA,
+		 atest_tsens2, atest_usb1, NA, NA),
+	PINGROUP(8, EAST, blsp_spi4, blsp_uart1_b, blsp_uim1_b, NA, ddr_bist,
+		 NA, wlan1_adc1, atest_usb13, bimc_dte1),
+	PINGROUP(9, EAST, blsp_spi4, blsp_uart1_b, blsp_uim1_b, tsif1_sync,
+		 ddr_bist, NA, wlan1_adc0, atest_usb12, bimc_dte0),
+	PINGROUP(10, EAST, mdp_vsync_a, blsp_spi4, blsp_uart1_b, blsp_i2c4,
+		 ddr_bist, atest_gpsadc1, wlan2_adc1, atest_usb11, bimc_dte1),
+	PINGROUP(11, EAST, mdp_vsync_a, edp_lcd, blsp_spi4, blsp_uart1_b,
+		 blsp_i2c4, dbg_out, atest_gpsadc0, wlan2_adc0, atest_usb10),
+	PINGROUP(12, EAST, mdp_vsync, m_voc, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(13, EAST, cam_mclk, pll_bypassnl, qdss_gpio0, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(14, EAST, cam_mclk, pll_reset, qdss_gpio1, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(15, EAST, cam_mclk, qdss_gpio2, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(16, EAST, cam_mclk, qdss_gpio3, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(17, EAST, cci_i2c, qdss_gpio4, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(18, EAST, cci_i2c, phase_flag14, qdss_gpio5, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(19, EAST, cci_i2c, phase_flag15, qdss_gpio6, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(20, EAST, cci_i2c, qdss_gpio7, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(21, EAST, cci_timer0, NA, qdss_gpio8, vsense_data0, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(22, EAST, cci_timer1, NA, qdss_gpio, vsense_data1, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(23, EAST, cci_timer2, blsp1_spi_b, qdss_gpio9, vsense_mode,
+		 NA, NA, NA, NA, NA),
+	PINGROUP(24, EAST, cci_timer3, cci_async, blsp1_spi_a, NA, qdss_gpio10,
+		 vsense_clkout, NA, NA, NA),
+	PINGROUP(25, EAST, cci_timer4, cci_async, blsp2_spi, NA, qdss_gpio11,
+		 NA, NA, NA, NA),
+	PINGROUP(26, EAST, cci_async, qdss_gpio12, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(27, EAST, blsp1_spi_a, qdss_gpio13, NA, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(28, EAST, blsp1_spi_b, qdss_gpio14, NA, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(29, EAST, blsp2_spi, NA, qdss_gpio15, NA, NA, NA, NA, NA, NA),
+	PINGROUP(30, EAST, hdmi_rcv, blsp2_spi, qdss_gpio, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(31, EAST, hdmi_cec, blsp_spi2, blsp_uart2_a, blsp_uim2_a,
+		 pwr_modem, NA, NA, NA, NA),
+	PINGROUP(32, EAST, hdmi_ddc, blsp_spi2, blsp_uart2_a, blsp_i2c2,
+		 pwr_nav, NA, NA, NA, NA),
+	PINGROUP(33, EAST, hdmi_ddc, blsp_spi2, blsp_uart2_a, blsp_i2c2,
+		 pwr_crypto, NA, NA, NA, NA),
+	PINGROUP(34, EAST, hdmi_hot, edp_hot, blsp_spi2, blsp_uart2_a,
+		 blsp_uim2_a, NA, NA, NA, NA),
+	PINGROUP(35, NORTH, pci_e0, jitter_bist, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(36, NORTH, pci_e0, agera_pll, NA, atest_tsens, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(37, NORTH, agera_pll, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(38, WEST, usb_phy, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(39, WEST, lpass_slimbus, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(40, EAST, sd_write, tsif1_error, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(41, EAST, blsp_spi6, blsp_uart3_b, blsp_uim3_b, NA,
+		 qdss_gpio6, NA, NA, NA, NA),
+	PINGROUP(42, EAST, blsp_spi6, blsp_uart3_b, blsp_uim3_b, NA,
+		 qdss_gpio7, NA, NA, NA, NA),
+	PINGROUP(43, EAST, blsp_spi6, blsp_uart3_b, blsp_i2c6, NA, qdss_gpio14,
+		 NA, NA, NA, NA),
+	PINGROUP(44, EAST, blsp_spi6, blsp_uart3_b, blsp_i2c6, NA, qdss_gpio15,
+		 NA, NA, NA, NA),
+	PINGROUP(45, EAST, blsp_spi3, blsp_uart3_a, blsp_uim3_a, NA, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(46, EAST, blsp_spi3, blsp_uart3_a, blsp_uim3_a, NA, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(47, EAST, blsp_spi3, blsp_uart3_a, blsp_i2c3, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(48, EAST, blsp_spi3, blsp_uart3_a, blsp_i2c3, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(49, NORTH, blsp_spi9, blsp_uart9_a, blsp_uim9_a, blsp10_spi_b,
+		 qdss_cti0_a, NA, NA, NA, NA),
+	PINGROUP(50, NORTH, blsp_spi9, blsp_uart9_a, blsp_uim9_a, blsp10_spi_b,
+		 qdss_cti0_a, NA, NA, NA, NA),
+	PINGROUP(51, NORTH, blsp_spi9, blsp_uart9_a, blsp_i2c9, blsp10_spi_a,
+		 NA, NA, NA, NA, NA),
+	PINGROUP(52, NORTH, blsp_spi9, blsp_uart9_a, blsp_i2c9, blsp10_spi_a,
+		 NA, NA, NA, NA, NA),
+	PINGROUP(53, WEST, blsp_spi7, blsp_uart7_a, blsp_uim7_a, NA, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(54, WEST, blsp_spi7, blsp_uart7_a, blsp_uim7_a, NA, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(55, WEST, blsp_spi7, blsp_uart7_a, blsp_i2c7, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(56, WEST, blsp_spi7, blsp_uart7_a, blsp_i2c7, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(57, WEST, qua_mi2s, blsp10_spi, gcc_gp1_a, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(58, WEST, qua_mi2s, blsp_spi11, blsp_uart8_b, blsp_uim8_b,
+		 gcc_gp2_a, NA, qdss_cti1_a, NA, NA),
+	PINGROUP(59, WEST, qua_mi2s, blsp_spi11, blsp_uart8_b, blsp_uim8_b,
+		 gcc_gp3_a, NA, qdss_cti1_a, NA, NA),
+	PINGROUP(60, WEST, qua_mi2s, blsp_spi11, blsp_uart8_b, blsp_i2c11,
+		 cri_trng0, NA, NA, NA, NA),
+	PINGROUP(61, WEST, qua_mi2s, blsp_spi11, blsp_uart8_b, blsp_i2c11,
+		 cri_trng1, NA, NA, NA, NA),
+	PINGROUP(62, WEST, qua_mi2s, cri_trng, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(63, WEST, qua_mi2s, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(64, WEST, pri_mi2s, sp_cmu, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(65, WEST, pri_mi2s, blsp_spi10, blsp_uart7_b, blsp_uim7_b, NA,
+		 NA, NA, NA, NA),
+	PINGROUP(66, WEST, pri_mi2s_ws, blsp_spi10, blsp_uart7_b, blsp_uim7_b,
+		 NA, NA, NA, NA, NA),
+	PINGROUP(67, WEST, pri_mi2s, blsp_spi10, blsp_uart7_b, blsp_i2c10, NA,
+		 NA, NA, NA, NA),
+	PINGROUP(68, WEST, pri_mi2s, blsp_spi10, blsp_uart7_b, blsp_i2c10, NA,
+		 NA, NA, NA, NA),
+	PINGROUP(69, WEST, spkr_i2s, audio_ref, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(70, WEST, lpass_slimbus, spkr_i2s, blsp9_spi, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(71, WEST, lpass_slimbus, spkr_i2s, blsp9_spi, tsense_pwm1,
+		 tsense_pwm2, NA, NA, NA, NA),
+	PINGROUP(72, WEST, lpass_slimbus, spkr_i2s, blsp9_spi, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(73, WEST, btfm_slimbus, phase_flag0, NA, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(74, WEST, btfm_slimbus, ter_mi2s, phase_flag7, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(75, WEST, ter_mi2s, phase_flag8, qdss_gpio8, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(76, WEST, ter_mi2s, phase_flag9, qdss_gpio9, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(77, WEST, ter_mi2s, phase_flag4, qdss_gpio10, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(78, WEST, ter_mi2s, gcc_gp1_b, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(79, WEST, sec_mi2s, NA, qdss_gpio11, NA, NA, NA, NA, NA, NA),
+	PINGROUP(80, WEST, sec_mi2s, NA, qdss_gpio12, NA, NA, NA, NA, NA, NA),
+	PINGROUP(81, WEST, sec_mi2s, blsp_spi12, blsp_uart9_b, blsp_uim9_b,
+		 gcc_gp2_b, NA, NA, NA, NA),
+	PINGROUP(82, WEST, sec_mi2s, blsp_spi12, blsp_uart9_b, blsp_uim9_b,
+		 gcc_gp3_b, NA, NA, NA, NA),
+	PINGROUP(83, WEST, sec_mi2s, blsp_spi12, blsp_uart9_b, blsp_i2c12, NA,
+		 NA, NA, NA, NA),
+	PINGROUP(84, WEST, blsp_spi12, blsp_uart9_b, blsp_i2c12, NA, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(85, EAST, blsp_spi5, blsp_uart2_b, blsp_uim2_b, NA, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(86, EAST, blsp_spi5, blsp_uart2_b, blsp_uim2_b, NA, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(87, EAST, blsp_spi5, blsp_uart2_b, blsp_i2c5, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(88, EAST, blsp_spi5, blsp_uart2_b, blsp_i2c5, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(89, EAST, tsif1_clk, phase_flag10, NA, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(90, EAST, tsif1_en, mdp_vsync0, mdp_vsync1, mdp_vsync2,
+		 mdp_vsync3, blsp1_spi, tgu_ch0, qdss_cti1_b, NA),
+	PINGROUP(91, EAST, tsif1_data, sdc4_cmd, tgu_ch1, phase_flag1,
+		 qdss_cti1_b, NA, NA, NA, NA),
+	PINGROUP(92, EAST, tsif2_error, sdc43, vfr_1, phase_flag2, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(93, EAST, tsif2_clk, sdc4_clk, NA, qdss_gpio13, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(94, EAST, tsif2_en, sdc42, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(95, EAST, tsif2_data, sdc41, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(96, EAST, tsif2_sync, sdc40, phase_flag3, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(97, WEST, NA, mdp_vsync_b, ldo_en, NA, NA, NA, NA, NA, NA),
+	PINGROUP(98, WEST, NA, mdp_vsync_b, ldo_update, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(99, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(100, WEST, NA, NA, blsp_uart8, NA, NA, NA, NA, NA, NA),
+	PINGROUP(101, WEST, NA, blsp_uart8, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(102, WEST, NA, blsp11_i2c, prng_rosc, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(103, WEST, NA, blsp11_i2c, phase_flag5, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(104, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(105, NORTH, uim2_data, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(106, NORTH, uim2_clk, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(107, NORTH, uim2_reset, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(108, NORTH, uim2_present, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(109, NORTH, uim1_data, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(110, NORTH, uim1_clk, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(111, NORTH, uim1_reset, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(112, NORTH, uim1_present, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(113, NORTH, uim_batt, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(114, WEST, NA, NA, phase_flag16, NA, NA, NA, NA, NA, NA),
+	PINGROUP(115, WEST, NA, nav_dr, phase_flag11, NA, NA, NA, NA, NA, NA),
+	PINGROUP(116, WEST, phase_flag12, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(117, EAST, phase_flag13, qdss_gpio0, atest_char, NA, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(118, EAST, adsp_ext, phase_flag17, qdss_gpio1, atest_char3,
+		 NA, NA, NA, NA, NA),
+	PINGROUP(119, EAST, phase_flag18, qdss_gpio2, atest_char2, NA, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(120, EAST, phase_flag19, qdss_gpio3, atest_char1, NA, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(121, EAST, phase_flag20, qdss_gpio4, atest_char0, NA, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(122, EAST, phase_flag21, qdss_gpio5, NA, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(123, EAST, phase_flag22, qdss_gpio, NA, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(124, EAST, phase_flag23, qdss_gpio, NA, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(125, EAST, phase_flag24, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(126, EAST, phase_flag25, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(127, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(128, WEST, modem_tsync, nav_pps, phase_flag26, NA, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(129, WEST, phase_flag27, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(130, NORTH, qlink_request, phase_flag28, NA, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(131, NORTH, qlink_enable, phase_flag6, NA, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(132, WEST, NA, phase_flag29, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(133, WEST, phase_flag30, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(134, WEST, phase_flag31, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(135, WEST, NA, pa_indicator, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(136, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(137, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(138, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(139, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(140, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(141, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(142, WEST, NA, ssbi1, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(143, WEST, isense_dbg, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(144, WEST, mss_lte, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(145, WEST, mss_lte, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(146, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(147, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(148, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(149, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	SDC_QDSD_PINGROUP(sdc2_clk, 0x999000, 14, 6),
+	SDC_QDSD_PINGROUP(sdc2_cmd, 0x999000, 11, 3),
+	SDC_QDSD_PINGROUP(sdc2_data, 0x999000, 9, 0),
+	UFS_RESET(ufs_reset, 0x19d000),
+};
+
+static const struct msm_pinctrl_soc_data msm8998_pinctrl = {
+	.pins = msm8998_pins,
+	.npins = ARRAY_SIZE(msm8998_pins),
+	.functions = msm8998_functions,
+	.nfunctions = ARRAY_SIZE(msm8998_functions),
+	.groups = msm8998_groups,
+	.ngroups = ARRAY_SIZE(msm8998_groups),
+	.ngpios = 153,
+};
+
+static int msm8998_pinctrl_probe(struct platform_device *pdev)
+{
+	return msm_pinctrl_probe(pdev, &msm8998_pinctrl);
+}
+
+static const struct of_device_id msm8998_pinctrl_of_match[] = {
+	{ .compatible = "qcom,msm8998-pinctrl", },
+	{ },
+};
+
+static struct platform_driver msm8998_pinctrl_driver = {
+	.driver = {
+		.name = "msm8998-pinctrl",
+		.owner = THIS_MODULE,
+		.of_match_table = msm8998_pinctrl_of_match,
+	},
+	.probe = msm8998_pinctrl_probe,
+	.remove = msm_pinctrl_remove,
+};
+
+static int __init msm8998_pinctrl_init(void)
+{
+	return platform_driver_register(&msm8998_pinctrl_driver);
+}
+arch_initcall(msm8998_pinctrl_init);
+
+static void __exit msm8998_pinctrl_exit(void)
+{
+	platform_driver_unregister(&msm8998_pinctrl_driver);
+}
+module_exit(msm8998_pinctrl_exit);
+
+MODULE_DESCRIPTION("QTI msm8998 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, msm8998_pinctrl_of_match);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/pinctrl/qcom/pinctrl-wcd.c	2019-01-22 16:16:26.115270060 +0100
@@ -0,0 +1,443 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mfd/wcd934x/registers.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+
+#define WCD_REG_DIR_CTL WCD934X_CHIP_TIER_CTRL_GPIO_CTL_OE
+#define WCD_REG_VAL_CTL WCD934X_CHIP_TIER_CTRL_GPIO_CTL_DATA
+#define WCD_GPIO_PULL_UP       1
+#define WCD_GPIO_PULL_DOWN     2
+#define WCD_GPIO_BIAS_DISABLE  3
+#define WCD_GPIO_STRING_LEN    20
+
+/**
+ * struct wcd_gpio_pad - keep current GPIO settings
+ * @offset: offset of gpio.
+ * @is_valid: Set to false, when GPIO in high Z state.
+ * @value: value of a pin
+ * @output_enabled: Set to true if GPIO is output and false if it is input
+ * @pullup: Constant current which flow through GPIO output buffer.
+ * @strength: Drive strength of a pin
+ */
+struct wcd_gpio_pad {
+	u16  offset;
+	bool is_valid;
+	bool value;
+	bool output_enabled;
+	unsigned int pullup;
+	unsigned int strength;
+};
+
+struct wcd_gpio_priv {
+	struct device *dev;
+	struct regmap *map;
+	struct pinctrl_dev *ctrl;
+	struct gpio_chip chip;
+};
+
+static inline struct wcd_gpio_priv *to_gpio_state(struct gpio_chip *chip)
+{
+	return container_of(chip, struct wcd_gpio_priv, chip);
+};
+
+static int wcd_gpio_read(struct wcd_gpio_priv *priv_data,
+			  struct wcd_gpio_pad *pad, unsigned int addr)
+{
+	unsigned int val;
+	int ret;
+
+	ret = regmap_read(priv_data->map, addr, &val);
+	if (ret < 0)
+		dev_err(priv_data->dev, "%s: read 0x%x failed\n",
+			__func__, addr);
+	else
+		ret = (val >> pad->offset);
+
+	return ret;
+}
+
+static int wcd_gpio_write(struct wcd_gpio_priv *priv_data,
+			   struct wcd_gpio_pad *pad, unsigned int addr,
+			   unsigned int val)
+{
+	int ret;
+
+	ret = regmap_update_bits(priv_data->map, addr, (1 << pad->offset),
+					val << pad->offset);
+	if (ret < 0)
+		dev_err(priv_data->dev, "write 0x%x failed\n", addr);
+
+	return ret;
+}
+
+static int wcd_get_groups_count(struct pinctrl_dev *pctldev)
+{
+	return pctldev->desc->npins;
+}
+
+static const char *wcd_get_group_name(struct pinctrl_dev *pctldev,
+		unsigned pin)
+{
+	return pctldev->desc->pins[pin].name;
+}
+
+static int wcd_get_group_pins(struct pinctrl_dev *pctldev, unsigned pin,
+		const unsigned **pins, unsigned *num_pins)
+{
+	*pins = &pctldev->desc->pins[pin].number;
+	*num_pins = 1;
+	return 0;
+}
+
+static const struct pinctrl_ops wcd_pinctrl_ops = {
+	.get_groups_count       = wcd_get_groups_count,
+	.get_group_name         = wcd_get_group_name,
+	.get_group_pins         = wcd_get_group_pins,
+	.dt_node_to_map         = pinconf_generic_dt_node_to_map_group,
+	.dt_free_map            = pinctrl_utils_dt_free_map,
+};
+
+static int wcd_config_get(struct pinctrl_dev *pctldev,
+				unsigned int pin, unsigned long *config)
+{
+	unsigned param = pinconf_to_config_param(*config);
+	struct wcd_gpio_pad *pad;
+	unsigned arg;
+
+	pad = pctldev->desc->pins[pin].drv_data;
+
+	switch (param) {
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		arg = pad->pullup == WCD_GPIO_PULL_DOWN;
+		break;
+	case PIN_CONFIG_BIAS_DISABLE:
+		arg = pad->pullup = WCD_GPIO_BIAS_DISABLE;
+		break;
+	case PIN_CONFIG_BIAS_PULL_UP:
+		arg = pad->pullup == WCD_GPIO_PULL_UP;
+		break;
+	case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+		arg = !pad->is_valid;
+		break;
+	case PIN_CONFIG_INPUT_ENABLE:
+		arg = pad->output_enabled;
+		break;
+	case PIN_CONFIG_OUTPUT:
+		arg = pad->value;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	*config = pinconf_to_config_packed(param, arg);
+	return 0;
+}
+
+static int wcd_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
+				unsigned long *configs, unsigned nconfs)
+{
+	struct wcd_gpio_priv *priv_data = pinctrl_dev_get_drvdata(pctldev);
+	struct wcd_gpio_pad *pad;
+	unsigned param, arg;
+	int i, ret;
+
+	pad = pctldev->desc->pins[pin].drv_data;
+
+	for (i = 0; i < nconfs; i++) {
+		param = pinconf_to_config_param(configs[i]);
+		arg = pinconf_to_config_argument(configs[i]);
+
+		dev_dbg(priv_data->dev, "%s: param: %d arg: %d",
+			__func__, param, arg);
+
+		switch (param) {
+		case PIN_CONFIG_BIAS_DISABLE:
+			pad->pullup = WCD_GPIO_BIAS_DISABLE;
+			break;
+		case PIN_CONFIG_BIAS_PULL_UP:
+			pad->pullup = WCD_GPIO_PULL_UP;
+			break;
+		case PIN_CONFIG_BIAS_PULL_DOWN:
+			pad->pullup = WCD_GPIO_PULL_DOWN;
+			break;
+		case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+			pad->is_valid = false;
+			break;
+		case PIN_CONFIG_INPUT_ENABLE:
+			pad->output_enabled = false;
+			break;
+		case PIN_CONFIG_OUTPUT:
+			pad->output_enabled = true;
+			pad->value = arg;
+			break;
+		case PIN_CONFIG_DRIVE_STRENGTH:
+			pad->strength = arg;
+			break;
+		default:
+			ret = -EINVAL;
+			goto done;
+		}
+	}
+
+	if (pad->output_enabled) {
+		ret = wcd_gpio_write(priv_data, pad, WCD_REG_DIR_CTL,
+				     pad->output_enabled);
+		if (ret < 0)
+			goto done;
+		ret = wcd_gpio_write(priv_data, pad, WCD_REG_VAL_CTL,
+				     pad->value);
+	} else
+		ret = wcd_gpio_write(priv_data, pad, WCD_REG_DIR_CTL,
+				     pad->output_enabled);
+done:
+	return ret;
+}
+
+static const struct pinconf_ops wcd_pinconf_ops = {
+	.is_generic  = true,
+	.pin_config_group_get = wcd_config_get,
+	.pin_config_group_set = wcd_config_set,
+};
+
+static int wcd_gpio_direction_input(struct gpio_chip *chip, unsigned pin)
+{
+	struct wcd_gpio_priv *priv_data = to_gpio_state(chip);
+	unsigned long config;
+
+	config = pinconf_to_config_packed(PIN_CONFIG_INPUT_ENABLE, 1);
+
+	return wcd_config_set(priv_data->ctrl, pin, &config, 1);
+}
+
+static int wcd_gpio_direction_output(struct gpio_chip *chip,
+				      unsigned pin, int val)
+{
+	struct wcd_gpio_priv *priv_data = to_gpio_state(chip);
+	unsigned long config;
+
+	config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, val);
+
+	return wcd_config_set(priv_data->ctrl, pin, &config, 1);
+}
+
+static int wcd_gpio_get(struct gpio_chip *chip, unsigned pin)
+{
+	struct wcd_gpio_priv *priv_data = to_gpio_state(chip);
+	struct wcd_gpio_pad *pad;
+	int value;
+
+	pad = priv_data->ctrl->desc->pins[pin].drv_data;
+
+	if (!pad->is_valid)
+		return -EINVAL;
+
+	value = wcd_gpio_read(priv_data, pad, WCD_REG_VAL_CTL);
+	return value;
+}
+
+static void wcd_gpio_set(struct gpio_chip *chip, unsigned pin, int value)
+{
+	struct wcd_gpio_priv *priv_data = to_gpio_state(chip);
+	unsigned long config;
+
+	config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, value);
+
+	wcd_config_set(priv_data->ctrl, pin, &config, 1);
+}
+
+static const struct gpio_chip wcd_gpio_chip = {
+	.direction_input  = wcd_gpio_direction_input,
+	.direction_output = wcd_gpio_direction_output,
+	.get = wcd_gpio_get,
+	.set = wcd_gpio_set,
+};
+
+static int wcd_pinctrl_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct pinctrl_pin_desc *pindesc;
+	struct pinctrl_desc *pctrldesc;
+	struct wcd_gpio_pad *pad, *pads;
+	struct wcd_gpio_priv *priv_data;
+	int ret, i, j;
+	u32 npins;
+	char **name;
+
+	ret = of_property_read_u32(dev->of_node, "qcom,num-gpios", &npins);
+	if (ret) {
+		dev_err(dev, "%s: Looking up %s property in node %s failed\n",
+			__func__, "qcom,num-gpios", dev->of_node->full_name);
+		ret = -EINVAL;
+		goto err_priv_alloc;
+	}
+	if (!npins) {
+		dev_err(dev, "%s: no.of pins are 0\n", __func__);
+		ret = -EINVAL;
+		goto err_priv_alloc;
+	}
+
+	priv_data = devm_kzalloc(dev, sizeof(*priv_data), GFP_KERNEL);
+	if (!priv_data) {
+		ret = -ENOMEM;
+		goto err_priv_alloc;
+	}
+
+	priv_data->dev = dev;
+	priv_data->map = dev_get_regmap(dev->parent, NULL);
+	if (!priv_data->map) {
+		dev_err(dev, "%s: failed to get regmap\n", __func__);
+		ret = -EINVAL;
+		goto err_regmap;
+	}
+
+	pindesc = devm_kcalloc(dev, npins, sizeof(*pindesc), GFP_KERNEL);
+	if (!pindesc) {
+		ret = -ENOMEM;
+		goto err_pinsec_alloc;
+	}
+
+	pads = devm_kcalloc(dev, npins, sizeof(*pads), GFP_KERNEL);
+	if (!pads) {
+		ret = -ENOMEM;
+		goto err_pads_alloc;
+	}
+
+	pctrldesc = devm_kzalloc(dev, sizeof(*pctrldesc), GFP_KERNEL);
+	if (!pctrldesc) {
+		ret = -ENOMEM;
+		goto err_pinctrl_alloc;
+	}
+
+	pctrldesc->pctlops = &wcd_pinctrl_ops;
+	pctrldesc->confops = &wcd_pinconf_ops;
+	pctrldesc->owner = THIS_MODULE;
+	pctrldesc->name = dev_name(dev);
+	pctrldesc->pins = pindesc;
+	pctrldesc->npins = npins;
+
+	name = devm_kcalloc(dev, npins, sizeof(char *), GFP_KERNEL);
+	if (!name) {
+		ret = -ENOMEM;
+		goto err_name_alloc;
+	}
+	for (i = 0; i < npins; i++, pindesc++) {
+		name[i] = devm_kzalloc(dev, sizeof(char) * WCD_GPIO_STRING_LEN,
+				       GFP_KERNEL);
+		if (!name[i]) {
+			ret = -ENOMEM;
+			goto err_pin;
+		}
+		pad = &pads[i];
+		pindesc->drv_data = pad;
+		pindesc->number = i;
+		snprintf(name[i], (WCD_GPIO_STRING_LEN - 1), "gpio%d", (i+1));
+		pindesc->name = name[i];
+		pad->offset = i;
+		pad->is_valid  = true;
+	}
+
+	priv_data->chip = wcd_gpio_chip;
+	priv_data->chip.dev = dev;
+	priv_data->chip.base = -1;
+	priv_data->chip.ngpio = npins;
+	priv_data->chip.label = dev_name(dev);
+	priv_data->chip.of_gpio_n_cells = 2;
+	priv_data->chip.can_sleep = false;
+
+	priv_data->ctrl = pinctrl_register(pctrldesc, dev, priv_data);
+	if (IS_ERR(priv_data->ctrl)) {
+		dev_err(dev, "%s: failed to register to pinctrl\n", __func__);
+		ret = PTR_ERR(priv_data->ctrl);
+		goto err_pin;
+	}
+
+	ret = gpiochip_add(&priv_data->chip);
+	if (ret) {
+		dev_err(dev, "%s: can't add gpio chip\n", __func__);
+		goto err_chip;
+	}
+
+	ret = gpiochip_add_pin_range(&priv_data->chip, dev_name(dev), 0, 0,
+				     npins);
+	if (ret) {
+		dev_err(dev, "%s: failed to add pin range\n", __func__);
+		goto err_range;
+	}
+	platform_set_drvdata(pdev, priv_data);
+
+	return 0;
+
+err_range:
+	gpiochip_remove(&priv_data->chip);
+err_chip:
+	pinctrl_unregister(priv_data->ctrl);
+err_pin:
+	for (j = 0; j < i; j++)
+		devm_kfree(dev, name[j]);
+	devm_kfree(dev, name);
+err_name_alloc:
+	devm_kfree(dev, pctrldesc);
+err_pinctrl_alloc:
+	devm_kfree(dev, pads);
+err_pads_alloc:
+	devm_kfree(dev, pindesc);
+err_pinsec_alloc:
+err_regmap:
+	devm_kfree(dev, priv_data);
+err_priv_alloc:
+	return ret;
+}
+
+static int wcd_pinctrl_remove(struct platform_device *pdev)
+{
+	struct wcd_gpio_priv *priv_data = platform_get_drvdata(pdev);
+
+	gpiochip_remove(&priv_data->chip);
+	pinctrl_unregister(priv_data->ctrl);
+
+	return 0;
+}
+
+static const struct of_device_id wcd_pinctrl_of_match[] = {
+	{ .compatible = "qcom,wcd-pinctrl" },
+	{ },
+};
+
+MODULE_DEVICE_TABLE(of, wcd_pinctrl_of_match);
+
+static struct platform_driver wcd_pinctrl_driver = {
+	.driver = {
+		   .name = "qcom-wcd-pinctrl",
+		   .of_match_table = wcd_pinctrl_of_match,
+	},
+	.probe = wcd_pinctrl_probe,
+	.remove = wcd_pinctrl_remove,
+};
+
+module_platform_driver(wcd_pinctrl_driver);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc WCD GPIO pin control driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./ep_pcie/Makefile linux-4.4.115-fbx/drivers/platform/msm/ep_pcie/Makefile
--- linux-4.4.115-fbx/drivers/platform/msm./ep_pcie/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/ep_pcie/Makefile	2019-01-22 16:16:26.147270350 +0100
@@ -0,0 +1,3 @@
+obj-$(CONFIG_EP_PCIE) += ep_pcie.o
+obj-$(CONFIG_EP_PCIE_HW) += ep_pcie_core.o ep_pcie_phy.o ep_pcie_dbg.o
+
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./gpio-usbdetect.c linux-4.4.115-fbx/drivers/platform/msm/gpio-usbdetect.c
--- linux-4.4.115-fbx/drivers/platform/msm./gpio-usbdetect.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/gpio-usbdetect.c	2019-01-22 16:16:26.147270350 +0100
@@ -0,0 +1,252 @@
+/* Copyright (c) 2013-2015,2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/of_platform.h>
+#include <linux/interrupt.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/extcon.h>
+#include <linux/regulator/consumer.h>
+
+struct gpio_usbdetect {
+	struct platform_device	*pdev;
+	struct regulator	*vin;
+	int			vbus_det_irq;
+	int			id_det_irq;
+	int			gpio;
+	struct extcon_dev	*extcon_dev;
+	int			vbus_state;
+	bool			id_state;
+};
+
+static const unsigned int gpio_usb_extcon_table[] = {
+	EXTCON_USB,
+	EXTCON_USB_HOST,
+	EXTCON_USB_CC,
+	EXTCON_USB_SPEED,
+	EXTCON_NONE,
+};
+
+static irqreturn_t gpio_usbdetect_vbus_irq(int irq, void *data)
+{
+	struct gpio_usbdetect *usb = data;
+
+	usb->vbus_state = gpio_get_value(usb->gpio);
+	if (usb->vbus_state) {
+		dev_dbg(&usb->pdev->dev, "setting vbus notification\n");
+		extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB_SPEED, 1);
+		extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB, 1);
+	} else {
+		dev_dbg(&usb->pdev->dev, "setting vbus removed notification\n");
+		extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB, 0);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t gpio_usbdetect_id_irq(int irq, void *data)
+{
+	struct gpio_usbdetect *usb = data;
+	int ret;
+
+	ret = irq_get_irqchip_state(irq, IRQCHIP_STATE_LINE_LEVEL,
+			&usb->id_state);
+	if (ret < 0) {
+		dev_err(&usb->pdev->dev, "unable to read ID IRQ LINE\n");
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t gpio_usbdetect_id_irq_thread(int irq, void *data)
+{
+	struct gpio_usbdetect *usb = data;
+	bool curr_id_state;
+	static int prev_id_state = -EINVAL;
+
+	curr_id_state = usb->id_state;
+	if (curr_id_state == prev_id_state) {
+		dev_dbg(&usb->pdev->dev, "no change in ID state\n");
+		return IRQ_HANDLED;
+	}
+
+	if (curr_id_state) {
+		dev_dbg(&usb->pdev->dev, "stopping usb host\n");
+		extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB_HOST, 0);
+		enable_irq(usb->vbus_det_irq);
+	} else {
+		dev_dbg(&usb->pdev->dev, "starting usb HOST\n");
+		disable_irq(usb->vbus_det_irq);
+		extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB_SPEED, 1);
+		extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB_HOST, 1);
+	}
+
+	prev_id_state = curr_id_state;
+	return IRQ_HANDLED;
+}
+
+static const u32 gpio_usb_extcon_exclusive[] = {0x3, 0};
+
+static int gpio_usbdetect_probe(struct platform_device *pdev)
+{
+	struct gpio_usbdetect *usb;
+	int rc;
+
+	usb = devm_kzalloc(&pdev->dev, sizeof(*usb), GFP_KERNEL);
+	if (!usb)
+		return -ENOMEM;
+
+	usb->pdev = pdev;
+
+	usb->extcon_dev = devm_extcon_dev_allocate(&pdev->dev,
+			gpio_usb_extcon_table);
+	if (IS_ERR(usb->extcon_dev)) {
+		dev_err(&pdev->dev, "failed to allocate a extcon device\n");
+		return PTR_ERR(usb->extcon_dev);
+	}
+
+	usb->extcon_dev->mutually_exclusive = gpio_usb_extcon_exclusive;
+	rc = devm_extcon_dev_register(&pdev->dev, usb->extcon_dev);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to register extcon device\n");
+		return rc;
+	}
+
+	if (of_get_property(pdev->dev.of_node, "vin-supply", NULL)) {
+		usb->vin = devm_regulator_get(&pdev->dev, "vin");
+		if (IS_ERR(usb->vin)) {
+			dev_err(&pdev->dev, "Failed to get VIN regulator: %ld\n",
+				PTR_ERR(usb->vin));
+			return PTR_ERR(usb->vin);
+		}
+	}
+
+	if (usb->vin) {
+		rc = regulator_enable(usb->vin);
+		if (rc) {
+			dev_err(&pdev->dev, "Failed to enable VIN regulator: %d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	usb->gpio = of_get_named_gpio(pdev->dev.of_node,
+				"qcom,vbus-det-gpio", 0);
+	if (usb->gpio < 0) {
+		dev_err(&pdev->dev, "Failed to get gpio: %d\n", usb->gpio);
+		rc = usb->gpio;
+		goto error;
+	}
+
+	rc = gpio_request(usb->gpio, "vbus-det-gpio");
+	if (rc < 0) {
+		dev_err(&pdev->dev, "Failed to request gpio: %d\n", rc);
+		goto error;
+	}
+
+	usb->vbus_det_irq = gpio_to_irq(usb->gpio);
+	if (usb->vbus_det_irq < 0) {
+		dev_err(&pdev->dev, "get vbus_det_irq failed\n");
+		rc = usb->vbus_det_irq;
+		goto error;
+	}
+
+	rc = devm_request_threaded_irq(&pdev->dev, usb->vbus_det_irq,
+				NULL, gpio_usbdetect_vbus_irq,
+			      IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+			      IRQF_ONESHOT, "vbus_det_irq", usb);
+	if (rc) {
+		dev_err(&pdev->dev, "request for vbus_det_irq failed: %d\n",
+			rc);
+		goto error;
+	}
+
+	usb->id_det_irq = platform_get_irq_byname(pdev, "pmic_id_irq");
+	if (usb->id_det_irq < 0) {
+		dev_err(&pdev->dev, "get id_det_irq failed\n");
+		rc = usb->id_det_irq;
+		goto error;
+	}
+
+	rc = devm_request_threaded_irq(&pdev->dev, usb->id_det_irq,
+				gpio_usbdetect_id_irq,
+				gpio_usbdetect_id_irq_thread,
+				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+				IRQF_ONESHOT, "id_det_irq", usb);
+	if (rc) {
+		dev_err(&pdev->dev, "request for id_det_irq failed: %d\n", rc);
+		goto error;
+	}
+
+	enable_irq_wake(usb->vbus_det_irq);
+	enable_irq_wake(usb->id_det_irq);
+	dev_set_drvdata(&pdev->dev, usb);
+
+	if (usb->id_det_irq) {
+		gpio_usbdetect_id_irq(usb->id_det_irq, usb);
+		if (!usb->id_state) {
+			gpio_usbdetect_id_irq_thread(usb->id_det_irq, usb);
+			return 0;
+		}
+	}
+
+	/* Read and report initial VBUS state */
+	gpio_usbdetect_vbus_irq(usb->vbus_det_irq, usb);
+
+	return 0;
+
+error:
+	if (usb->vin)
+		regulator_disable(usb->vin);
+	return rc;
+}
+
+static int gpio_usbdetect_remove(struct platform_device *pdev)
+{
+	struct gpio_usbdetect *usb = dev_get_drvdata(&pdev->dev);
+
+	disable_irq_wake(usb->vbus_det_irq);
+	disable_irq(usb->vbus_det_irq);
+	disable_irq_wake(usb->id_det_irq);
+	disable_irq(usb->id_det_irq);
+	if (usb->vin)
+		regulator_disable(usb->vin);
+
+	return 0;
+}
+
+static struct of_device_id of_match_table[] = {
+	{ .compatible = "qcom,gpio-usbdetect", },
+	{}
+};
+
+static struct platform_driver gpio_usbdetect_driver = {
+	.driver		= {
+		.name	= "qcom,gpio-usbdetect",
+		.of_match_table = of_match_table,
+	},
+	.probe		= gpio_usbdetect_probe,
+	.remove		= gpio_usbdetect_remove,
+};
+
+module_driver(gpio_usbdetect_driver, platform_driver_register,
+		platform_driver_unregister);
+
+MODULE_DESCRIPTION("GPIO USB VBUS Detection driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./gsi/Makefile linux-4.4.115-fbx/drivers/platform/msm/gsi/Makefile
--- linux-4.4.115-fbx/drivers/platform/msm./gsi/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/gsi/Makefile	2019-01-22 16:16:26.147270350 +0100
@@ -0,0 +1 @@
+obj-$(CONFIG_GSI) += gsi.o gsi_dbg.o
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./ipa/ipa_clients/Makefile linux-4.4.115-fbx/drivers/platform/msm/ipa/ipa_clients/Makefile
--- linux-4.4.115-fbx/drivers/platform/msm./ipa/ipa_clients/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/ipa/ipa_clients/Makefile	2019-01-22 16:16:26.147270350 +0100
@@ -0,0 +1,2 @@
+obj-$(CONFIG_IPA3) += ipa_usb.o odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o
+obj-$(CONFIG_IPA) += odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./ipa/ipa_v2/Makefile linux-4.4.115-fbx/drivers/platform/msm/ipa/ipa_v2/Makefile
--- linux-4.4.115-fbx/drivers/platform/msm./ipa/ipa_v2/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/ipa/ipa_v2/Makefile	2019-01-22 16:16:26.151270386 +0100
@@ -0,0 +1,8 @@
+CFLAGS_ipa.o = -I$(src)
+
+obj-$(CONFIG_IPA) += ipat.o
+ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
+	ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o \
+	ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_uc_ntn.o
+
+obj-$(CONFIG_RMNET_IPA) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./ipa/ipa_v3/ipahal/Makefile linux-4.4.115-fbx/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile
--- linux-4.4.115-fbx/drivers/platform/msm./ipa/ipa_v3/ipahal/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile	2019-01-22 16:16:26.175270603 +0100
@@ -0,0 +1,3 @@
+obj-$(CONFIG_IPA3) += ipa_hal.o
+
+ipa_hal-y := ipahal.o ipahal_reg.o ipahal_fltrt.o
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./ipa/ipa_v3/Makefile linux-4.4.115-fbx/drivers/platform/msm/ipa/ipa_v3/Makefile
--- linux-4.4.115-fbx/drivers/platform/msm./ipa/ipa_v3/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/ipa/ipa_v3/Makefile	2019-01-22 16:16:26.171270567 +0100
@@ -0,0 +1,10 @@
+obj-$(CONFIG_IPA3) += ipahal/
+
+obj-$(CONFIG_IPA3) += ipat.o
+ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
+	ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o \
+	ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_uc_ntn.o
+
+CFLAGS_ipa.o = -I$(src)
+
+obj-$(CONFIG_RMNET_IPA3) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./ipa/Makefile linux-4.4.115-fbx/drivers/platform/msm/ipa/Makefile
--- linux-4.4.115-fbx/drivers/platform/msm./ipa/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/ipa/Makefile	2019-01-22 16:16:26.147270350 +0100
@@ -0,0 +1,5 @@
+obj-$(CONFIG_IPA) += ipa_v2/ ipa_clients/ ipa_common
+obj-$(CONFIG_IPA3) += ipa_v3/ ipa_clients/ ipa_common
+obj-$(CONFIG_IPA_UT) += test/
+
+ipa_common += ipa_api.o ipa_rm.o ipa_rm_dependency_graph.o ipa_rm_peers_list.o ipa_rm_resource.o ipa_rm_inactivity_timer.o
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./ipa/test/Makefile linux-4.4.115-fbx/drivers/platform/msm/ipa/test/Makefile
--- linux-4.4.115-fbx/drivers/platform/msm./ipa/test/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/ipa/test/Makefile	2019-01-22 16:16:26.179270639 +0100
@@ -0,0 +1,2 @@
+obj-$(CONFIG_IPA_UT) += ipa_ut_mod.o
+ipa_ut_mod-y := ipa_ut_framework.o ipa_test_example.o ipa_test_mhi.o ipa_test_dma.o
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./Kconfig linux-4.4.115-fbx/drivers/platform/msm/Kconfig
--- linux-4.4.115-fbx/drivers/platform/msm./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/Kconfig	2019-10-29 09:26:24.589212436 +0100
@@ -0,0 +1,217 @@
+menu "Qualcomm MSM specific device drivers"
+	depends on ARCH_QCOM
+
+config QPNP_REVID
+	tristate "QPNP Revision ID Peripheral"
+	depends on SPMI
+	help
+	  Say 'y' here to include support for the Qualcomm Technologies, Inc.
+	  QPNP REVID peripheral. REVID prints out the PMIC type and revision
+	  numbers in the kernel log along with the PMIC option status. The PMIC
+	  type is mapped to a QTI chip part number and logged as well.
+
+config QPNP_COINCELL
+	tristate "QPNP coincell charger support"
+	depends on SPMI
+	help
+	  This driver supports the QPNP coincell peripheral found inside of
+	  Qualcomm Technologies, Inc. QPNP PMIC devices.  The coincell charger
+	  provides a means to charge a coincell battery or backup capacitor
+	  which is used to maintain PMIC register state when the main battery is
+	  removed from the mobile device.
+
+config SPS
+	bool "SPS support"
+	select GENERIC_ALLOCATOR
+	help
+	  The SPS (Smart Peripheral Switch) is a DMA engine.
+	  It can move data in the following modes:
+		1. Peripheral-to-Peripheral.
+		2. Peripheral-to-Memory.
+		3. Memory-to-Memory.
+
+config SPS_SUPPORT_BAMDMA
+	bool "SPS support BAM DMA"
+	depends on SPS
+	default n
+	help
+	  The BAM-DMA is used for Memory-to-Memory transfers.
+	  The main use cases is RPC between processors.
+	  The BAM-DMA hardware has 2 registers sets:
+		1. A BAM HW like all the peripherals.
+		2. A DMA channel configuration (i.e. channel priority).
+
+config SPS_SUPPORT_NDP_BAM
+	bool "SPS support NDP BAM"
+	depends on SPS
+	default n
+	help
+	  No-Data-Path BAM is used to improve BAM performance.
+
+config EP_PCIE
+	bool "PCIe Endpoint mode support"
+	select GENERIC_ALLOCATOR
+	help
+	  PCIe controller is in endpoint mode.
+	  It supports the APIs to clients as a service layer, and allows
+	  clients to enable/disable PCIe link, configure the address
+	  mapping for the access to host memory, trigger wake interrupt
+	  on host side to wake up host, and trigger MSI to host side.
+
+config EP_PCIE_HW
+	bool "PCIe Endpoint HW driver"
+	depends on EP_PCIE
+	help
+	  PCIe endpoint HW specific implementation.
+	  It supports:
+		1. link training with Root Complex.
+		2. Address mapping.
+		3. Sideband signaling.
+		4. Power management.
+config IPA
+	tristate "IPA support"
+	depends on SPS && NET
+	help
+	  This driver supports the Internet Packet Accelerator (IPA) core.
+	  IPA is a programmable protocol processor HW block.
+	  It is designed to support generic HW processing of UL/DL IP packets
+	  for various use cases independent of radio technology.
+	  The driver support client connection and configuration
+	  for the IPA core.
+	  Kernel and user-space processes can call the IPA driver
+	  to configure IPA core.
+
+config RMNET_IPA
+	tristate "IPA RMNET WWAN Network Device"
+	depends on IPA && MSM_QMI_INTERFACE
+	help
+	  This WWAN Network Driver implements network stack class device.
+	  It supports Embedded data transfer from A7 to Q6. Configures IPA HW
+	  for RmNet Data Driver and also exchange of QMI messages between
+	  A7 and Q6 IPA-driver.
+
+config GSI
+	bool "GSI support"
+	help
+	  This driver provides the transport needed to talk to the
+	  IPA core. It replaces the BAM transport used previously.
+
+	  The GSI connects to a peripheral component via uniform TLV
+	  interface, and allows it to interface with other peripherals
+	  and CPUs over various types of interfaces such as MHI, xDCI,
+	  xHCI, GPI, WDI, Ethernet, etc.
+
+config IPA3
+	tristate "IPA3 support"
+	depends on GSI && NET
+	help
+	  This driver supports the Internet Packet Accelerator (IPA3) core.
+	  IPA is a programmable protocol processor HW block.
+	  It is designed to support generic HW processing of UL/DL IP packets
+	  for various use cases independent of radio technology.
+	  The driver support client connection and configuration
+	  for the IPA core.
+	  Kernel and user-space processes can call the IPA driver
+	  to configure IPA core.
+
+config RMNET_IPA3
+	tristate "IPA3 RMNET WWAN Network Device"
+	depends on IPA3 && MSM_QMI_INTERFACE
+	help
+	  This WWAN Network Driver implements network stack class device.
+	  It supports Embedded data transfer from A7 to Q6. Configures IPA HW
+	  for RmNet Data Driver and also exchange of QMI messages between
+	  A7 and Q6 IPA-driver.
+
+config IPA_UT
+	tristate "IPA Unit-Test Framework and Test Suites"
+	depends on IPA3 && DEBUG_FS
+	help
+	  This Module implements IPA in-kernel test framework.
+	  The framework supports defining and running tests, grouped
+	  into suites according to the sub-unit of the IPA being tested.
+	  The user interface to run and control the tests is debugfs file
+	  system.
+
+config GPIO_USB_DETECT
+	tristate "GPIO-based USB VBUS Detection"
+	depends on POWER_SUPPLY
+	help
+	  This driver supports external USB VBUS detection circuitry whose
+	  output is connected to a GPIO. The driver in turn notifies the
+	  USB driver of VBUS presence/disconnection using the power_supply
+	  framework.
+
+config MSM_MHI
+	tristate "Modem Host Interface Driver"
+	help
+	  This kernel module is used to interact with PCIe endpoints
+	  supporting MHI protocol. MHI is a data transmission protocol
+	  involving communication between a host and a device over shared
+	  memory. The MHI driver manages the shared memory by use of logical
+	  unidirectional channels.
+
+config MSM_MHI_UCI
+	 tristate "MHI Usperspace Control Interface Driver"
+	 depends on MSM_MHI
+	 help
+	  This modules enables userspace software clients to communicate
+	  with devices supporting the MHI protocol. Userspace clients
+	  may open the device nodes exposed by MHI UCI and perform
+	  read, write and ioctl operations to communicate with the
+	  attached device.
+
+config MSM_MHI_DEBUG
+	 bool "MHI debug support"
+	 depends on MSM_MHI
+	 help
+	   Say yes here to enable debugging support in the MHI transport
+	   and individual MHI client drivers. This option may impact
+	   throughput as individual MHI packets and state transitions
+	   will be logged.
+
+config MSM_MHI_DEV
+        tristate "Modem Device Interface Driver"
+	depends on EP_PCIE && IPA
+        help
+          This kernel module is used to interact with PCIe Root complex
+          supporting MHI protocol. MHI is a data transmission protocol
+          involving communication between a host and a device over shared
+          memory. MHI interacts with the IPA for supporting transfers
+	  on the HW accelerated channels between Host and device.
+
+config MSM_11AD
+	tristate "Platform driver for 11ad chip"
+	depends on PCI
+	depends on PCI_MSM
+	default m
+	---help---
+	  This module adds required platform support for wireless adapter based on
+	  Qualcomm Technologies, Inc. 11ad chip, integrated into MSM platform
+
+	  If you choose to build it as a module, it will be called
+	  msm_11ad_proxy.
+
+config SEEMP_CORE
+	tristate "SEEMP Core"
+	help
+	  This option enables Qualcomm Snapdragron Smart Protection to detect
+	  anomalies in various activities. It records task activities in
+	  a log and rates the actions according to whether a typical user would
+	  use the tools.
+
+config USB_BAM
+	bool "USB BAM Driver"
+	depends on SPS && USB_GADGET
+	help
+	  Enabling this option adds USB BAM Driver.
+	  USB BAM driver was added to supports SPS Peripheral-to-Peripheral
+	  transfers between the USB and other peripheral.
+
+config MSM_EXT_DISPLAY
+	bool "MSM External Display Driver"
+	help
+	  Enabling this option adds MSM External Display Driver.
+	  External Display driver was added to support the communication
+	  between external display driver and its couterparts.
+endmenu
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./Makefile linux-4.4.115-fbx/drivers/platform/msm/Makefile
--- linux-4.4.115-fbx/drivers/platform/msm./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/Makefile	2019-10-29 09:26:24.589212436 +0100
@@ -0,0 +1,18 @@
+#
+# Makefile for the MSM specific device drivers.
+#
+obj-$(CONFIG_QPNP_REVID) += qpnp-revid.o
+obj-$(CONFIG_QPNP_COINCELL) += qpnp-coincell.o
+obj-$(CONFIG_MSM_MHI) += mhi/
+obj-$(CONFIG_MSM_MHI_UCI) += mhi_uci/
+obj-$(CONFIG_SPS) += sps/
+obj-$(CONFIG_GSI) += gsi/
+obj-$(CONFIG_IPA) += ipa/
+obj-$(CONFIG_IPA3) += ipa/
+obj-$(CONFIG_EP_PCIE) += ep_pcie/
+obj-$(CONFIG_GPIO_USB_DETECT) += gpio-usbdetect.o
+obj-$(CONFIG_MSM_11AD) += msm_11ad/
+obj-$(CONFIG_SEEMP_CORE) += seemp_core/
+obj-$(CONFIG_USB_BAM) += usb_bam.o
+obj-$(CONFIG_MSM_MHI_DEV) += mhi_dev/
+obj-$(CONFIG_MSM_EXT_DISPLAY) += msm_ext_display.o
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./mhi/Makefile linux-4.4.115-fbx/drivers/platform/msm/mhi/Makefile
--- linux-4.4.115-fbx/drivers/platform/msm./mhi/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/mhi/Makefile	2019-01-22 16:16:26.179270639 +0100
@@ -0,0 +1,15 @@
+# Makefile for MHI driver
+obj-y += mhi_main.o
+obj-y += mhi_iface.o
+obj-y += mhi_init.o
+obj-y += mhi_isr.o
+obj-y += mhi_mmio_ops.o
+obj-y += mhi_ring_ops.o
+obj-y += mhi_states.o
+obj-y += mhi_sys.o
+obj-y += mhi_bhi.o
+obj-y += mhi_pm.o
+obj-y += mhi_ssr.o
+obj-y += mhi_event.o
+CFLAGS_mhi_iface.o := -I$(src)
+CFLAGS_mhi_ssr.o := -I$(src)
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./mhi_dev/Makefile linux-4.4.115-fbx/drivers/platform/msm/mhi_dev/Makefile
--- linux-4.4.115-fbx/drivers/platform/msm./mhi_dev/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/mhi_dev/Makefile	2019-01-22 16:16:26.183270676 +0100
@@ -0,0 +1,6 @@
+# Makefile for MHI driver
+obj-y += mhi_mmio.o
+obj-y += mhi.o
+obj-y += mhi_ring.o
+obj-y += mhi_uci.o
+obj-y += mhi_sm.o
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./mhi_uci/Makefile linux-4.4.115-fbx/drivers/platform/msm/mhi_uci/Makefile
--- linux-4.4.115-fbx/drivers/platform/msm./mhi_uci/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/mhi_uci/Makefile	2019-01-22 16:16:26.183270676 +0100
@@ -0,0 +1,2 @@
+# Makefile for MHI UCI driver
+obj-y += mhi_uci.o
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./msm_11ad/Makefile linux-4.4.115-fbx/drivers/platform/msm/msm_11ad/Makefile
--- linux-4.4.115-fbx/drivers/platform/msm./msm_11ad/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/msm_11ad/Makefile	2019-01-22 16:16:26.183270676 +0100
@@ -0,0 +1,9 @@
+obj-$(CONFIG_MSM_11AD) += msm_11ad_proxy.o
+
+msm_11ad_proxy-y := msm_11ad.o
+subdir-ccflags-y += -D__CHECK_ENDIAN__
+
+# need to locate wil_platform.h
+WIL_11AD_PATH = drivers/net/wireless/ath/wil6210
+subdir-ccflags-y += -I$(WIL_11AD_PATH)
+
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./msm_ext_display.c linux-4.4.115-fbx/drivers/platform/msm/msm_ext_display.c
--- linux-4.4.115-fbx/drivers/platform/msm./msm_ext_display.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/msm_ext_display.c	2019-10-29 09:26:24.617212710 +0100
@@ -0,0 +1,905 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/iopoll.h>
+#include <linux/types.h>
+#include <linux/switch.h>
+#include <linux/of_platform.h>
+#include <linux/msm_ext_display.h>
+
+struct msm_ext_disp_list {
+	struct msm_ext_disp_init_data *data;
+	struct list_head list;
+};
+
+struct msm_ext_disp {
+	struct platform_device *pdev;
+	enum msm_ext_disp_type current_disp;
+	struct msm_ext_disp_audio_codec_ops *ops;
+	struct switch_dev hdmi_sdev;
+	struct switch_dev audio_sdev;
+	bool ack_enabled;
+	bool audio_session_on;
+	struct list_head display_list;
+	struct mutex lock;
+	struct completion hpd_comp;
+	bool update_audio;
+	u32 flags;
+};
+
+static int msm_ext_disp_get_intf_data(struct msm_ext_disp *ext_disp,
+		enum msm_ext_disp_type type,
+		struct msm_ext_disp_init_data **data);
+static int msm_ext_disp_update_audio_ops(struct msm_ext_disp *ext_disp,
+		enum msm_ext_disp_type type,
+		enum msm_ext_disp_cable_state state, u32 flags);
+
+static int msm_ext_disp_switch_dev_register(struct msm_ext_disp *ext_disp)
+{
+	int ret = 0;
+
+	if (!ext_disp) {
+		pr_err("Invalid params\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	memset(&ext_disp->hdmi_sdev, 0x0, sizeof(ext_disp->hdmi_sdev));
+	ext_disp->hdmi_sdev.name = "hdmi";
+	ret = switch_dev_register(&ext_disp->hdmi_sdev);
+	if (ret) {
+		pr_err("hdmi switch registration failed\n");
+		goto end;
+	}
+
+	memset(&ext_disp->audio_sdev, 0x0, sizeof(ext_disp->audio_sdev));
+	ext_disp->audio_sdev.name = "hdmi_audio";
+	ret = switch_dev_register(&ext_disp->audio_sdev);
+	if (ret) {
+		pr_err("hdmi_audio switch registration failed");
+		goto hdmi_audio_failure;
+	}
+
+	pr_debug("Display switch registration pass\n");
+
+	return ret;
+
+hdmi_audio_failure:
+	switch_dev_unregister(&ext_disp->hdmi_sdev);
+end:
+	return ret;
+}
+
+static void msm_ext_disp_switch_dev_unregister(struct msm_ext_disp *ext_disp)
+{
+	if (!ext_disp) {
+		pr_err("Invalid params\n");
+		goto end;
+	}
+
+	switch_dev_unregister(&ext_disp->hdmi_sdev);
+	switch_dev_unregister(&ext_disp->audio_sdev);
+
+end:
+	return;
+}
+
+static const char *msm_ext_disp_name(enum msm_ext_disp_type type)
+{
+	switch (type) {
+	case EXT_DISPLAY_TYPE_HDMI:	return "EXT_DISPLAY_TYPE_HDMI";
+	case EXT_DISPLAY_TYPE_DP:	return "EXT_DISPLAY_TYPE_DP";
+	default: return "???";
+	}
+}
+
+static int msm_ext_disp_add_intf_data(struct msm_ext_disp *ext_disp,
+		struct msm_ext_disp_init_data *data)
+{
+	struct msm_ext_disp_list *node;
+
+	if (!ext_disp && !data) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+
+	node->data = data;
+	list_add(&node->list, &ext_disp->display_list);
+
+	pr_debug("Added new display (%s)\n",
+			msm_ext_disp_name(data->type));
+
+	return 0;
+}
+
+static int msm_ext_disp_get_intf_data(struct msm_ext_disp *ext_disp,
+		enum msm_ext_disp_type type,
+		struct msm_ext_disp_init_data **data)
+{
+	int ret = 0;
+	struct msm_ext_disp_list *node;
+	struct list_head *position = NULL;
+
+	if (!ext_disp || !data || type < EXT_DISPLAY_TYPE_HDMI ||
+			type >=  EXT_DISPLAY_TYPE_MAX) {
+		pr_err("Invalid params\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	*data = NULL;
+	list_for_each(position, &ext_disp->display_list) {
+		node = list_entry(position, struct msm_ext_disp_list, list);
+		if (node->data->type == type) {
+			pr_debug("Found display (%s)\n",
+					msm_ext_disp_name(type));
+			*data = node->data;
+			break;
+		}
+	}
+
+	if (!*data) {
+		pr_debug("Display not found (%s)\n",
+				msm_ext_disp_name(type));
+		ret = -ENODEV;
+	}
+
+end:
+	return ret;
+}
+
+static int msm_ext_disp_send_cable_notification(struct msm_ext_disp *ext_disp,
+		enum msm_ext_disp_cable_state new_state)
+{
+	int state = EXT_DISPLAY_CABLE_STATE_MAX;
+
+	if (!ext_disp) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	state = ext_disp->hdmi_sdev.state;
+	switch_set_state(&ext_disp->hdmi_sdev, !!new_state);
+
+	pr_debug("Cable state %s %d\n",
+			ext_disp->hdmi_sdev.state == state ?
+			"is same" : "switched to",
+			ext_disp->hdmi_sdev.state);
+
+	return ext_disp->hdmi_sdev.state == state ? 0 : 1;
+}
+
+static int msm_ext_disp_send_audio_notification(struct msm_ext_disp *ext_disp,
+		enum msm_ext_disp_cable_state new_state)
+{
+	int state = EXT_DISPLAY_CABLE_STATE_MAX;
+
+	if (!ext_disp) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	state = ext_disp->audio_sdev.state;
+	switch_set_state(&ext_disp->audio_sdev, !!new_state);
+
+	pr_debug("Audio state %s %d\n",
+			ext_disp->audio_sdev.state == state ?
+			"is same" : "switched to",
+			ext_disp->audio_sdev.state);
+
+	return ext_disp->audio_sdev.state == state ? 0 : 1;
+}
+
+static int msm_ext_disp_process_display(struct msm_ext_disp *ext_disp,
+		enum msm_ext_disp_type type,
+		enum msm_ext_disp_cable_state state, u32 flags)
+{
+	int ret = 0;
+
+	if (!(flags & (MSM_EXT_DISP_HPD_VIDEO
+		       | MSM_EXT_DISP_HPD_ASYNC_VIDEO))) {
+		pr_debug("skipping video setup for display (%s)\n",
+			msm_ext_disp_name(type));
+		goto end;
+	}
+
+	if (state == EXT_DISPLAY_CABLE_CONNECT)
+		ext_disp->current_disp = type;
+	else
+		ext_disp->current_disp = EXT_DISPLAY_TYPE_MAX;
+
+	reinit_completion(&ext_disp->hpd_comp);
+
+	ret = msm_ext_disp_send_cable_notification(ext_disp, state);
+
+	/* positive ret value means audio node was switched */
+	if ((ret <= 0) ||
+		(flags & MSM_EXT_DISP_HPD_ASYNC_VIDEO)) {
+		pr_debug("not waiting for display\n");
+		goto end;
+	}
+
+	ret = wait_for_completion_timeout(&ext_disp->hpd_comp, HZ * 5);
+	if (!ret) {
+		pr_err("display timeout\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+end:
+	return (ret >= 0) ? 0 : -EINVAL;
+}
+
+static int msm_ext_disp_process_audio(struct msm_ext_disp *ext_disp,
+		enum msm_ext_disp_type type,
+		enum msm_ext_disp_cable_state state, u32 flags)
+{
+	int ret = 0;
+
+	if (!(flags & (MSM_EXT_DISP_HPD_AUDIO
+		       | MSM_EXT_DISP_HPD_ASYNC_AUDIO))) {
+		pr_debug("skipping audio setup for display (%s)\n",
+			msm_ext_disp_name(type));
+		goto end;
+	}
+
+	reinit_completion(&ext_disp->hpd_comp);
+
+	ret = msm_ext_disp_send_audio_notification(ext_disp, state);
+
+	/* positive ret value means audio node was switched */
+	if ((ret <= 0) || !ext_disp->ack_enabled ||
+		(flags & MSM_EXT_DISP_HPD_ASYNC_AUDIO)) {
+		pr_debug("not waiting for audio\n");
+		goto end;
+	}
+
+	ret = wait_for_completion_timeout(&ext_disp->hpd_comp, HZ * 2);
+	if (!ret) {
+		pr_err("audio timeout\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+end:
+	return (ret >= 0) ? 0 : -EINVAL;
+}
+
+static bool msm_ext_disp_validate_connect(struct msm_ext_disp *ext_disp,
+		enum msm_ext_disp_type type, u32 flags)
+{
+	/* allow new connections */
+	if (ext_disp->current_disp == EXT_DISPLAY_TYPE_MAX)
+		goto end;
+
+	/* if already connected, block a new connection  */
+	if (ext_disp->current_disp != type)
+		return false;
+end:
+	return true;
+}
+
+static bool msm_ext_disp_validate_disconnect(struct msm_ext_disp *ext_disp,
+		enum msm_ext_disp_type type, u32 flags)
+{
+	/* check if nothing connected */
+	if (ext_disp->current_disp == EXT_DISPLAY_TYPE_MAX)
+		return false;
+
+	/* check if a different display's request */
+	if (ext_disp->current_disp != type)
+		return false;
+
+	return true;
+}
+
+static int msm_ext_disp_hpd(struct platform_device *pdev,
+		enum msm_ext_disp_type type,
+		enum msm_ext_disp_cable_state state,
+		u32 flags)
+{
+	int ret = 0;
+	struct msm_ext_disp *ext_disp = NULL;
+
+	if (!pdev) {
+		pr_err("Invalid platform device\n");
+		return -EINVAL;
+	}
+
+	ext_disp = platform_get_drvdata(pdev);
+	if (!ext_disp) {
+		pr_err("Invalid drvdata\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ext_disp->lock);
+
+	pr_debug("HPD for display (%s), NEW STATE = %d, flags = %d\n",
+			msm_ext_disp_name(type), state, flags);
+
+	if (state < EXT_DISPLAY_CABLE_DISCONNECT ||
+			state >= EXT_DISPLAY_CABLE_STATE_MAX) {
+		pr_err("Invalid HPD state (%d)\n", state);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	ext_disp->flags = flags;
+
+	if (state == EXT_DISPLAY_CABLE_CONNECT) {
+		if (!msm_ext_disp_validate_connect(ext_disp, type, flags)) {
+			pr_err("Display interface (%s) already connected\n",
+				msm_ext_disp_name(ext_disp->current_disp));
+			ret = -EINVAL;
+			goto end;
+		}
+
+		ret = msm_ext_disp_process_display(ext_disp, type, state,
+			flags);
+		if (ret)
+			goto end;
+
+		ret = msm_ext_disp_update_audio_ops(ext_disp, type, state,
+			flags);
+		if (ret)
+			goto end;
+
+		ret = msm_ext_disp_process_audio(ext_disp, type, state,
+			flags);
+		if (ret)
+			goto end;
+	} else {
+		if (!msm_ext_disp_validate_disconnect(ext_disp, type, flags)) {
+			pr_err("Display interface (%s) not connected\n",
+				msm_ext_disp_name(type));
+			ret = -EINVAL;
+			goto end;
+		}
+
+		msm_ext_disp_process_audio(ext_disp, type, state, flags);
+		msm_ext_disp_update_audio_ops(ext_disp, type, state, flags);
+		msm_ext_disp_process_display(ext_disp, type, state, flags);
+	}
+
+	pr_debug("Hpd (%d) for display (%s)\n", state,
+			msm_ext_disp_name(type));
+
+end:
+	mutex_unlock(&ext_disp->lock);
+
+	return ret;
+}
+static int msm_ext_disp_get_intf_data_helper(struct platform_device *pdev,
+		struct msm_ext_disp_init_data **data)
+{
+	int ret = 0;
+	struct msm_ext_disp *ext_disp = NULL;
+
+	if (!pdev) {
+		pr_err("No platform device\n");
+		ret = -ENODEV;
+		goto end;
+	}
+
+	ext_disp = platform_get_drvdata(pdev);
+	if (!ext_disp) {
+		pr_err("No drvdata found\n");
+		ret = -ENODEV;
+		goto end;
+	}
+
+	if (ext_disp->current_disp == EXT_DISPLAY_TYPE_MAX) {
+		ret = -EINVAL;
+		pr_err("No display connected\n");
+		goto end;
+	}
+
+	ret = msm_ext_disp_get_intf_data(ext_disp, ext_disp->current_disp,
+			data);
+end:
+	return ret;
+}
+
+static int msm_ext_disp_cable_status(struct platform_device *pdev, u32 vote)
+{
+	int ret = 0;
+	struct msm_ext_disp_init_data *data = NULL;
+
+	ret = msm_ext_disp_get_intf_data_helper(pdev, &data);
+	if (ret || !data)
+		goto end;
+
+	ret = data->codec_ops.cable_status(data->pdev, vote);
+
+end:
+	return ret;
+}
+
+static int msm_ext_disp_get_audio_edid_blk(struct platform_device *pdev,
+	struct msm_ext_disp_audio_edid_blk *blk)
+{
+	int ret = 0;
+	struct msm_ext_disp_init_data *data = NULL;
+
+	ret = msm_ext_disp_get_intf_data_helper(pdev, &data);
+	if (ret || !data)
+		goto end;
+
+	ret = data->codec_ops.get_audio_edid_blk(data->pdev, blk);
+
+end:
+	return ret;
+}
+
+static int msm_ext_disp_audio_info_setup(struct platform_device *pdev,
+	struct msm_ext_disp_audio_setup_params *params)
+{
+	int ret = 0;
+	struct msm_ext_disp_init_data *data = NULL;
+	struct msm_ext_disp *ext_disp = NULL;
+
+	ret = msm_ext_disp_get_intf_data_helper(pdev, &data);
+	if (ret || !data)
+		goto end;
+
+	ext_disp = platform_get_drvdata(pdev);
+	if (!ext_disp) {
+		pr_err("No drvdata found\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	ext_disp->audio_session_on = true;
+
+	ret = data->codec_ops.audio_info_setup(data->pdev, params);
+
+end:
+	return ret;
+}
+
+static void msm_ext_disp_teardown_done(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct msm_ext_disp_init_data *data = NULL;
+	struct msm_ext_disp *ext_disp = NULL;
+
+	ret = msm_ext_disp_get_intf_data_helper(pdev, &data);
+	if (ret || !data) {
+		pr_err("invalid input");
+		return;
+	}
+
+	ext_disp = platform_get_drvdata(pdev);
+	if (!ext_disp) {
+		pr_err("No drvdata found\n");
+		return;
+	}
+
+	if (data->codec_ops.teardown_done)
+		data->codec_ops.teardown_done(data->pdev);
+
+	ext_disp->audio_session_on = false;
+
+	pr_debug("%s tearing down audio\n",
+		msm_ext_disp_name(ext_disp->current_disp));
+
+	complete_all(&ext_disp->hpd_comp);
+}
+
+static int msm_ext_disp_audio_ack(struct platform_device *pdev, u32 ack)
+{
+	u32 ack_hpd;
+	int ret = 0;
+	struct msm_ext_disp *ext_disp = NULL;
+
+	if (!pdev) {
+		pr_err("Invalid platform device\n");
+		return -EINVAL;
+	}
+
+	ext_disp = platform_get_drvdata(pdev);
+	if (!ext_disp) {
+		pr_err("Invalid drvdata\n");
+		return -EINVAL;
+	}
+
+	if (ack & AUDIO_ACK_SET_ENABLE) {
+		ext_disp->ack_enabled = ack & AUDIO_ACK_ENABLE ?
+			true : false;
+
+		pr_debug("audio ack feature %s\n",
+			ext_disp->ack_enabled ? "enabled" : "disabled");
+		goto end;
+	}
+
+	if (!ext_disp->ack_enabled)
+		goto end;
+
+	ack_hpd = ack & AUDIO_ACK_CONNECT;
+
+	pr_debug("%s acknowledging audio (%d)\n",
+		msm_ext_disp_name(ext_disp->current_disp), ack_hpd);
+
+	if (!ext_disp->audio_session_on)
+		complete_all(&ext_disp->hpd_comp);
+end:
+	return ret;
+}
+
+static int msm_ext_disp_get_intf_id(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct msm_ext_disp *ext_disp = NULL;
+
+	if (!pdev) {
+		pr_err("No platform device\n");
+		ret = -ENODEV;
+		goto end;
+	}
+
+	ext_disp = platform_get_drvdata(pdev);
+	if (!ext_disp) {
+		pr_err("No drvdata found\n");
+		ret = -ENODEV;
+		goto end;
+	}
+
+	ret = ext_disp->current_disp;
+
+end:
+	return ret;
+}
+
+static int msm_ext_disp_update_audio_ops(struct msm_ext_disp *ext_disp,
+		enum msm_ext_disp_type type,
+		enum msm_ext_disp_cable_state state, u32 flags)
+{
+	int ret = 0;
+	struct msm_ext_disp_audio_codec_ops *ops = ext_disp->ops;
+	ext_disp->update_audio = false;
+
+	if (!(flags & MSM_EXT_DISP_HPD_AUDIO)) {
+		pr_debug("skipping audio ops setup for display (%s)\n",
+			msm_ext_disp_name(type));
+		goto end;
+	}
+
+	if (!ops) {
+		pr_err("Invalid audio ops\n");
+		if (state == EXT_DISPLAY_CABLE_CONNECT) {
+			/* update audio ops once audio codec gets registered */
+			ext_disp->update_audio = true;
+		}
+		ret = -EINVAL;
+		goto end;
+	}
+
+	if (state == EXT_DISPLAY_CABLE_CONNECT) {
+		ops->audio_info_setup = msm_ext_disp_audio_info_setup;
+		ops->get_audio_edid_blk = msm_ext_disp_get_audio_edid_blk;
+		ops->cable_status = msm_ext_disp_cable_status;
+		ops->get_intf_id = msm_ext_disp_get_intf_id;
+		ops->teardown_done = msm_ext_disp_teardown_done;
+		ops->acknowledge = msm_ext_disp_audio_ack;
+	} else {
+		ops->audio_info_setup = NULL;
+		ops->get_audio_edid_blk = NULL;
+		ops->cable_status = NULL;
+		ops->get_intf_id = NULL;
+		ops->teardown_done = NULL;
+		ops->acknowledge = NULL;
+	}
+end:
+	return ret;
+}
+
+static int msm_ext_disp_notify(struct platform_device *pdev,
+		enum msm_ext_disp_cable_state state)
+{
+	int ret = 0;
+	struct msm_ext_disp *ext_disp = NULL;
+
+	if (!pdev) {
+		pr_err("Invalid platform device\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	ext_disp = platform_get_drvdata(pdev);
+	if (!ext_disp) {
+		pr_err("Invalid drvdata\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	if (state < EXT_DISPLAY_CABLE_DISCONNECT ||
+	    state >= EXT_DISPLAY_CABLE_STATE_MAX) {
+		pr_err("Invalid state (%d)\n", state);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	pr_debug("%s notifying hpd (%d)\n",
+		msm_ext_disp_name(ext_disp->current_disp), state);
+
+	complete_all(&ext_disp->hpd_comp);
+end:
+	return ret;
+}
+
+int msm_hdmi_register_audio_codec(struct platform_device *pdev,
+	struct msm_ext_disp_audio_codec_ops *ops)
+{
+	return msm_ext_disp_register_audio_codec(pdev, ops);
+}
+
+int msm_ext_disp_register_audio_codec(struct platform_device *pdev,
+		struct msm_ext_disp_audio_codec_ops *ops)
+{
+	int ret = 0;
+	struct msm_ext_disp *ext_disp = NULL;
+	struct msm_ext_disp_list *node = NULL;
+
+	if (!pdev || !ops) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	ext_disp = platform_get_drvdata(pdev);
+	if (!ext_disp) {
+		pr_err("Invalid drvdata\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ext_disp->lock);
+
+	if ((ext_disp->current_disp != EXT_DISPLAY_TYPE_MAX)
+			&& ext_disp->ops) {
+		pr_err("Codec already registered\n");
+		mutex_unlock(&ext_disp->lock);
+		return -EINVAL;
+	}
+
+	ext_disp->ops = ops;
+
+	mutex_unlock(&ext_disp->lock);
+
+	list_for_each_entry(node, &ext_disp->display_list, list) {
+		struct msm_ext_disp_init_data *data = node->data;
+
+		if (data->codec_ops.codec_ready)
+			data->codec_ops.codec_ready(data->pdev);
+	}
+
+	pr_debug("audio codec registered\n");
+
+	mutex_lock(&ext_disp->lock);
+	if (ext_disp->update_audio) {
+		msm_ext_disp_update_audio_ops(ext_disp, ext_disp->current_disp,
+				EXT_DISPLAY_CABLE_CONNECT, ext_disp->flags);
+
+		msm_ext_disp_process_audio(ext_disp, ext_disp->current_disp,
+				EXT_DISPLAY_CABLE_CONNECT, ext_disp->flags);
+
+		ext_disp->update_audio = false;
+	}
+	mutex_unlock(&ext_disp->lock);
+
+	return ret;
+}
+
+static int msm_ext_disp_validate_intf(struct msm_ext_disp_init_data *init_data)
+{
+	if (!init_data) {
+		pr_err("Invalid init_data\n");
+		return -EINVAL;
+	}
+
+	if (!init_data->pdev) {
+		pr_err("Invalid display intf pdev\n");
+		return -EINVAL;
+	}
+
+	if (!init_data->codec_ops.get_audio_edid_blk ||
+			!init_data->codec_ops.cable_status ||
+			!init_data->codec_ops.audio_info_setup) {
+		pr_err("Invalid codec operation pointers\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int msm_ext_disp_register_intf(struct platform_device *pdev,
+		struct msm_ext_disp_init_data *init_data)
+{
+	int ret = 0;
+	struct msm_ext_disp_init_data *data = NULL;
+	struct msm_ext_disp *ext_disp = NULL;
+
+	if (!pdev || !init_data) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	ext_disp = platform_get_drvdata(pdev);
+	if (!ext_disp) {
+		pr_err("Invalid drvdata\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ext_disp->lock);
+
+	ret = msm_ext_disp_validate_intf(init_data);
+	if (ret)
+		goto end;
+
+	ret = msm_ext_disp_get_intf_data(ext_disp, init_data->type, &data);
+	if (!ret) {
+		pr_debug("Display (%s) already registered\n",
+				msm_ext_disp_name(init_data->type));
+		goto end;
+	}
+
+	ret = msm_ext_disp_add_intf_data(ext_disp, init_data);
+	if (ret)
+		goto end;
+
+	init_data->intf_ops.hpd = msm_ext_disp_hpd;
+	init_data->intf_ops.notify = msm_ext_disp_notify;
+
+	pr_debug("Display (%s) registered\n",
+			msm_ext_disp_name(init_data->type));
+
+	mutex_unlock(&ext_disp->lock);
+
+	return ret;
+
+end:
+	mutex_unlock(&ext_disp->lock);
+
+	return ret;
+}
+
+static int msm_ext_disp_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct device_node *of_node = NULL;
+	struct msm_ext_disp *ext_disp = NULL;
+
+	if (!pdev) {
+		pr_err("No platform device found\n");
+		ret = -ENODEV;
+		goto end;
+	}
+
+	of_node = pdev->dev.of_node;
+	if (!of_node) {
+		pr_err("No device node found\n");
+		ret = -ENODEV;
+		goto end;
+	}
+
+	ext_disp = devm_kzalloc(&pdev->dev, sizeof(*ext_disp), GFP_KERNEL);
+	if (!ext_disp) {
+		ret = -ENOMEM;
+		goto end;
+	}
+
+	platform_set_drvdata(pdev, ext_disp);
+	ext_disp->pdev = pdev;
+
+	ret = msm_ext_disp_switch_dev_register(ext_disp);
+	if (ret)
+		goto switch_dev_failure;
+
+	ret = of_platform_populate(of_node, NULL, NULL, &pdev->dev);
+	if (ret) {
+		pr_err("Failed to add child devices. Error = %d\n", ret);
+		goto child_node_failure;
+	} else {
+		pr_debug("%s: Added child devices.\n", __func__);
+	}
+
+	mutex_init(&ext_disp->lock);
+
+	INIT_LIST_HEAD(&ext_disp->display_list);
+	init_completion(&ext_disp->hpd_comp);
+	ext_disp->current_disp = EXT_DISPLAY_TYPE_MAX;
+	ext_disp->flags = 0;
+	ext_disp->update_audio = false;
+
+	return ret;
+
+child_node_failure:
+	msm_ext_disp_switch_dev_unregister(ext_disp);
+switch_dev_failure:
+	devm_kfree(&ext_disp->pdev->dev, ext_disp);
+end:
+	return ret;
+}
+
+static int msm_ext_disp_remove(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct msm_ext_disp *ext_disp = NULL;
+
+	if (!pdev) {
+		pr_err("No platform device\n");
+		ret = -ENODEV;
+		goto end;
+	}
+
+	ext_disp = platform_get_drvdata(pdev);
+	if (!ext_disp) {
+		pr_err("No drvdata found\n");
+		ret = -ENODEV;
+		goto end;
+	}
+
+	msm_ext_disp_switch_dev_unregister(ext_disp);
+
+	mutex_destroy(&ext_disp->lock);
+	devm_kfree(&ext_disp->pdev->dev, ext_disp);
+
+end:
+	return ret;
+}
+
+static const struct of_device_id msm_ext_dt_match[] = {
+	{.compatible = "qcom,msm-ext-disp",},
+	{ /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, msm_ext_dt_match);
+
+static struct platform_driver this_driver = {
+	.probe = msm_ext_disp_probe,
+	.remove = msm_ext_disp_remove,
+	.driver = {
+		.name = "msm-ext-disp",
+		.of_match_table = msm_ext_dt_match,
+	},
+};
+
+static int __init msm_ext_disp_init(void)
+{
+	int ret = 0;
+
+	ret = platform_driver_register(&this_driver);
+	if (ret)
+		pr_err("failed, ret = %d\n", ret);
+
+	return ret;
+}
+
+static void __exit msm_ext_disp_exit(void)
+{
+	platform_driver_unregister(&this_driver);
+}
+
+subsys_initcall(msm_ext_disp_init);
+module_exit(msm_ext_disp_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM External Display");
+
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./qpnp-coincell.c linux-4.4.115-fbx/drivers/platform/msm/qpnp-coincell.c
--- linux-4.4.115-fbx/drivers/platform/msm./qpnp-coincell.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/qpnp-coincell.c	2019-01-22 16:16:26.187270712 +0100
@@ -0,0 +1,283 @@
+/* Copyright (c) 2013-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#define QPNP_COINCELL_DRIVER_NAME "qcom,qpnp-coincell"
+
+struct qpnp_coincell {
+	struct platform_device	*pdev;
+	struct regmap		*regmap;
+	u16			base_addr;
+};
+
+#define QPNP_COINCELL_REG_TYPE		0x04
+#define QPNP_COINCELL_REG_SUBTYPE	0x05
+#define QPNP_COINCELL_REG_RSET		0x44
+#define QPNP_COINCELL_REG_VSET		0x45
+#define QPNP_COINCELL_REG_ENABLE	0x46
+
+#define QPNP_COINCELL_TYPE		0x02
+#define QPNP_COINCELL_SUBTYPE		0x20
+#define QPNP_COINCELL_ENABLE		0x80
+#define QPNP_COINCELL_DISABLE		0x00
+
+static const int qpnp_rset_map[] = {2100, 1700, 1200, 800};
+static const int qpnp_vset_map[] = {2500, 3200, 3100, 3000};
+
+static int qpnp_coincell_set_resistance(struct qpnp_coincell *chip, int rset)
+{
+	int i, rc;
+	u8 reg;
+
+	for (i = 0; i < ARRAY_SIZE(qpnp_rset_map); i++)
+		if (rset == qpnp_rset_map[i])
+			break;
+
+	if (i >= ARRAY_SIZE(qpnp_rset_map)) {
+		pr_err("invalid rset=%d value\n", rset);
+		return -EINVAL;
+	}
+
+	reg = i;
+	rc = regmap_write(chip->regmap,
+			  chip->base_addr + QPNP_COINCELL_REG_RSET, reg);
+	if (rc)
+		dev_err(&chip->pdev->dev,
+			"%s: could not write to RSET register, rc=%d\n",
+			__func__, rc);
+
+	return rc;
+}
+
+static int qpnp_coincell_set_voltage(struct qpnp_coincell *chip, int vset)
+{
+	int i, rc;
+	u8 reg;
+
+	for (i = 0; i < ARRAY_SIZE(qpnp_vset_map); i++)
+		if (vset == qpnp_vset_map[i])
+			break;
+
+	if (i >= ARRAY_SIZE(qpnp_vset_map)) {
+		pr_err("invalid vset=%d value\n", vset);
+		return -EINVAL;
+	}
+
+	reg = i;
+	rc = regmap_write(chip->regmap,
+			  chip->base_addr + QPNP_COINCELL_REG_VSET, reg);
+	if (rc)
+		dev_err(&chip->pdev->dev,
+			"%s: could not write to VSET register, rc=%d\n",
+			__func__, rc);
+
+	return rc;
+}
+
+static int qpnp_coincell_set_charge(struct qpnp_coincell *chip, bool enabled)
+{
+	int rc;
+	u8 reg;
+
+	reg = enabled ? QPNP_COINCELL_ENABLE : QPNP_COINCELL_DISABLE;
+	rc = regmap_write(chip->regmap,
+			  chip->base_addr + QPNP_COINCELL_REG_ENABLE, reg);
+	if (rc)
+		dev_err(&chip->pdev->dev,
+			"%s: could not write to ENABLE register, rc=%d\n",
+			__func__, rc);
+
+	return rc;
+}
+
+static void qpnp_coincell_charger_show_state(struct qpnp_coincell *chip)
+{
+	int rc, rset, vset, temp;
+	bool enabled;
+	u8 reg[QPNP_COINCELL_REG_ENABLE - QPNP_COINCELL_REG_RSET + 1];
+
+	rc = regmap_bulk_read(chip->regmap,
+			      chip->base_addr + QPNP_COINCELL_REG_RSET, reg,
+			      ARRAY_SIZE(reg));
+	if (rc) {
+		dev_err(&chip->pdev->dev,
+			"%s: could not read RSET register, rc=%d\n",
+			__func__, rc);
+		return;
+	}
+
+	temp = reg[QPNP_COINCELL_REG_RSET - QPNP_COINCELL_REG_RSET];
+	if (temp >= ARRAY_SIZE(qpnp_rset_map)) {
+		dev_err(&chip->pdev->dev,
+			"unknown RSET=0x%02X register value\n",
+			temp);
+		return;
+	}
+	rset = qpnp_rset_map[temp];
+
+	temp = reg[QPNP_COINCELL_REG_VSET - QPNP_COINCELL_REG_RSET];
+	if (temp >= ARRAY_SIZE(qpnp_vset_map)) {
+		dev_err(&chip->pdev->dev,
+			"unknown VSET=0x%02X register value\n",
+			temp);
+		return;
+	}
+	vset = qpnp_vset_map[temp];
+
+	temp = reg[QPNP_COINCELL_REG_ENABLE - QPNP_COINCELL_REG_RSET];
+	enabled = temp & QPNP_COINCELL_ENABLE;
+
+	pr_info("enabled=%c, voltage=%d mV, resistance=%d ohm\n",
+		(enabled ? 'Y' : 'N'), vset, rset);
+}
+
+static int qpnp_coincell_check_type(struct qpnp_coincell *chip)
+{
+	int rc;
+	u8 type[2];
+
+	rc = regmap_bulk_read(chip->regmap,
+			      chip->base_addr + QPNP_COINCELL_REG_TYPE, type,
+			      2);
+	if (rc) {
+		dev_err(&chip->pdev->dev,
+			"%s: could not read type register, rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	if (type[0] != QPNP_COINCELL_TYPE || type[1] != QPNP_COINCELL_SUBTYPE) {
+		dev_err(&chip->pdev->dev,
+			"%s: invalid type=0x%02X or subtype=0x%02X register value\n",
+			__func__, type[0], type[1]);
+		return -ENODEV;
+	}
+
+	return rc;
+}
+
+static int qpnp_coincell_probe(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct qpnp_coincell *chip;
+	unsigned int base;
+	u32 temp;
+	int rc = 0;
+
+	if (!node) {
+		dev_err(&pdev->dev, "%s: device node missing\n", __func__);
+		return -ENODEV;
+	}
+
+	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!chip->regmap) {
+		dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+		return -EINVAL;
+	}
+	chip->pdev = pdev;
+
+	rc = of_property_read_u32(pdev->dev.of_node, "reg", &base);
+	if (rc < 0) {
+		dev_err(&pdev->dev,
+			"Couldn't find reg in node = %s rc = %d\n",
+			pdev->dev.of_node->full_name, rc);
+		return rc;
+	}
+	chip->base_addr = base;
+
+	rc = qpnp_coincell_check_type(chip);
+	if (rc)
+		return rc;
+
+	rc = of_property_read_u32(node, "qcom,rset-ohms", &temp);
+	if (!rc) {
+		rc = qpnp_coincell_set_resistance(chip, temp);
+		if (rc)
+			return rc;
+	}
+
+	rc = of_property_read_u32(node, "qcom,vset-millivolts", &temp);
+	if (!rc) {
+		rc = qpnp_coincell_set_voltage(chip, temp);
+		if (rc)
+			return rc;
+	}
+
+	rc = of_property_read_u32(node, "qcom,charge-enable", &temp);
+	if (!rc) {
+		rc = qpnp_coincell_set_charge(chip, temp);
+		if (rc)
+			return rc;
+	}
+
+	qpnp_coincell_charger_show_state(chip);
+
+	return 0;
+}
+
+static int qpnp_coincell_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static const struct of_device_id qpnp_coincell_match_table[] = {
+	{ .compatible = QPNP_COINCELL_DRIVER_NAME, },
+	{}
+};
+
+static const struct platform_device_id qpnp_coincell_id[] = {
+	{ QPNP_COINCELL_DRIVER_NAME, 0 },
+	{}
+};
+MODULE_DEVICE_TABLE(spmi, qpnp_coincell_id);
+
+static struct platform_driver qpnp_coincell_driver = {
+	.driver	= {
+		.name		= QPNP_COINCELL_DRIVER_NAME,
+		.of_match_table	= qpnp_coincell_match_table,
+		.owner		= THIS_MODULE,
+	},
+	.probe		= qpnp_coincell_probe,
+	.remove		= qpnp_coincell_remove,
+	.id_table	= qpnp_coincell_id,
+};
+
+static int __init qpnp_coincell_init(void)
+{
+	return platform_driver_register(&qpnp_coincell_driver);
+}
+
+static void __exit qpnp_coincell_exit(void)
+{
+	platform_driver_unregister(&qpnp_coincell_driver);
+}
+
+MODULE_DESCRIPTION("QPNP PMIC coincell charger driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(qpnp_coincell_init);
+module_exit(qpnp_coincell_exit);
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./qpnp-revid.c linux-4.4.115-fbx/drivers/platform/msm/qpnp-revid.c
--- linux-4.4.115-fbx/drivers/platform/msm./qpnp-revid.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/qpnp-revid.c	2019-01-22 16:16:26.187270712 +0100
@@ -0,0 +1,277 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/err.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include <linux/of.h>
+
+#define REVID_REVISION1	0x0
+#define REVID_REVISION2	0x1
+#define REVID_REVISION3	0x2
+#define REVID_REVISION4	0x3
+#define REVID_TYPE	0x4
+#define REVID_SUBTYPE	0x5
+#define REVID_STATUS1	0x8
+#define REVID_SPARE_0	0x60
+#define REVID_TP_REV	0xf1
+#define REVID_FAB_ID	0xf2
+
+#define QPNP_REVID_DEV_NAME "qcom,qpnp-revid"
+
+static const char *const pmic_names[] = {
+	[0] =	"Unknown PMIC",
+	[PM8941_SUBTYPE] = "PM8941",
+	[PM8841_SUBTYPE] = "PM8841",
+	[PM8019_SUBTYPE] = "PM8019",
+	[PM8226_SUBTYPE] = "PM8226",
+	[PM8110_SUBTYPE] = "PM8110",
+	[PMA8084_SUBTYPE] = "PMA8084",
+	[PMI8962_SUBTYPE] = "PMI8962",
+	[PMD9635_SUBTYPE] = "PMD9635",
+	[PM8994_SUBTYPE] = "PM8994",
+	[PMI8994_SUBTYPE] = "PMI8994",
+	[PM8916_SUBTYPE] = "PM8916",
+	[PM8004_SUBTYPE] = "PM8004",
+	[PM8909_SUBTYPE] = "PM8909",
+	[PM2433_SUBTYPE] = "PM2433",
+	[PMD9655_SUBTYPE] = "PMD9655",
+	[PM8950_SUBTYPE] = "PM8950",
+	[PMI8950_SUBTYPE] = "PMI8950",
+	[PMK8001_SUBTYPE] = "PMK8001",
+	[PMI8996_SUBTYPE] = "PMI8996",
+	[PM8998_SUBTYPE] = "PM8998",
+	[PMI8998_SUBTYPE] = "PMI8998",
+	[PM8005_SUBTYPE] = "PM8005",
+	[PM8937_SUBTYPE] = "PM8937",
+	[PM660L_SUBTYPE] = "PM660L",
+	[PM660_SUBTYPE] = "PM660",
+	[PMI8937_SUBTYPE] = "PMI8937",
+};
+
+struct revid_chip {
+	struct list_head	link;
+	struct device_node	*dev_node;
+	struct pmic_revid_data	data;
+};
+
+static LIST_HEAD(revid_chips);
+static DEFINE_MUTEX(revid_chips_lock);
+
+static const struct of_device_id qpnp_revid_match_table[] = {
+	{ .compatible = QPNP_REVID_DEV_NAME },
+	{}
+};
+
+static u8 qpnp_read_byte(struct regmap *regmap, u16 addr)
+{
+	int rc;
+	int val;
+
+	rc = regmap_read(regmap, addr, &val);
+	if (rc) {
+		pr_err("read failed rc=%d\n", rc);
+		return 0;
+	}
+	return (u8)val;
+}
+
+/**
+ * get_revid_data - Return the revision information of PMIC
+ * @dev_node: Pointer to the revid peripheral of the PMIC for which
+ *		revision information is seeked
+ *
+ * CONTEXT: Should be called in non atomic context
+ *
+ * RETURNS: pointer to struct pmic_revid_data filled with the information
+ *		about the PMIC revision
+ */
+struct pmic_revid_data *get_revid_data(struct device_node *dev_node)
+{
+	struct revid_chip *revid_chip;
+
+	if (!dev_node)
+		return ERR_PTR(-EINVAL);
+
+	mutex_lock(&revid_chips_lock);
+	list_for_each_entry(revid_chip, &revid_chips, link) {
+		if (dev_node == revid_chip->dev_node) {
+			mutex_unlock(&revid_chips_lock);
+			return &revid_chip->data;
+		}
+	}
+	mutex_unlock(&revid_chips_lock);
+	return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL(get_revid_data);
+
+#define PM8941_PERIPHERAL_SUBTYPE	0x01
+#define PM8226_PERIPHERAL_SUBTYPE	0x04
+#define PMD9655_PERIPHERAL_SUBTYPE	0x0F
+#define PMI8950_PERIPHERAL_SUBTYPE	0x11
+#define PMI8937_PERIPHERAL_SUBTYPE	0x37
+static size_t build_pmic_string(char *buf, size_t n, int sid,
+		u8 subtype, u8 rev1, u8 rev2, u8 rev3, u8 rev4)
+{
+	size_t pos = 0;
+	/*
+	 * In early versions of PM8941 and PM8226, the major revision number
+	 * started incrementing from 0 (eg 0 = v1.0, 1 = v2.0).
+	 * Increment the major revision number here if the chip is an early
+	 * version of PM8941 or PM8226.
+	 */
+	if (((int)subtype == PM8941_PERIPHERAL_SUBTYPE
+			|| (int)subtype == PM8226_PERIPHERAL_SUBTYPE)
+			&& rev4 < 0x02)
+		rev4++;
+
+	pos += snprintf(buf + pos, n - pos, "PMIC@SID%d", sid);
+	if (subtype >= ARRAY_SIZE(pmic_names) || subtype == 0)
+		pos += snprintf(buf + pos, n - pos, ": %s (subtype: 0x%02X)",
+				pmic_names[0], subtype);
+	else
+		pos += snprintf(buf + pos, n - pos, ": %s",
+				pmic_names[subtype]);
+	pos += snprintf(buf + pos, n - pos, " v%d.%d", rev4, rev3);
+	if (rev2 || rev1)
+		pos += snprintf(buf + pos, n - pos, ".%d", rev2);
+	if (rev1)
+		pos += snprintf(buf + pos, n - pos, ".%d", rev1);
+	return pos;
+}
+
+#define PMIC_PERIPHERAL_TYPE		0x51
+#define PMIC_STRING_MAXLENGTH		80
+static int qpnp_revid_probe(struct platform_device *pdev)
+{
+	u8 rev1, rev2, rev3, rev4, pmic_type, pmic_subtype, pmic_status;
+	u8 option1, option2, option3, option4, spare0;
+	unsigned int base;
+	int rc, fab_id, tp_rev;
+	char pmic_string[PMIC_STRING_MAXLENGTH] = {'\0'};
+	struct revid_chip *revid_chip;
+	struct regmap *regmap;
+
+	regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!regmap) {
+		dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node, "reg", &base);
+	if (rc < 0) {
+		dev_err(&pdev->dev,
+			"Couldn't find reg in node = %s rc = %d\n",
+			pdev->dev.of_node->full_name, rc);
+		return rc;
+	}
+	pmic_type = qpnp_read_byte(regmap, base + REVID_TYPE);
+	if (pmic_type != PMIC_PERIPHERAL_TYPE) {
+		pr_err("Invalid REVID peripheral type: %02X\n", pmic_type);
+		return -EINVAL;
+	}
+
+	rev1 = qpnp_read_byte(regmap, base + REVID_REVISION1);
+	rev2 = qpnp_read_byte(regmap, base + REVID_REVISION2);
+	rev3 = qpnp_read_byte(regmap, base + REVID_REVISION3);
+	rev4 = qpnp_read_byte(regmap, base + REVID_REVISION4);
+
+	pmic_subtype = qpnp_read_byte(regmap, base + REVID_SUBTYPE);
+	if (pmic_subtype != PMD9655_PERIPHERAL_SUBTYPE)
+		pmic_status = qpnp_read_byte(regmap, base + REVID_STATUS1);
+	else
+		pmic_status = 0;
+
+	/* special case for PMI8937 */
+	if (pmic_subtype == PMI8950_PERIPHERAL_SUBTYPE) {
+		/* read spare register */
+		spare0 = qpnp_read_byte(regmap, base + REVID_SPARE_0);
+		if (spare0)
+			pmic_subtype = PMI8937_PERIPHERAL_SUBTYPE;
+	}
+
+	if (of_property_read_bool(pdev->dev.of_node, "qcom,fab-id-valid"))
+		fab_id = qpnp_read_byte(regmap, base + REVID_FAB_ID);
+	else
+		fab_id = -EINVAL;
+
+	if (of_property_read_bool(pdev->dev.of_node, "qcom,tp-rev-valid"))
+		tp_rev = qpnp_read_byte(regmap, base + REVID_TP_REV);
+	else
+		tp_rev = -EINVAL;
+
+	revid_chip = devm_kzalloc(&pdev->dev, sizeof(struct revid_chip),
+						GFP_KERNEL);
+	if (!revid_chip)
+		return -ENOMEM;
+
+	revid_chip->dev_node = pdev->dev.of_node;
+	revid_chip->data.rev1 = rev1;
+	revid_chip->data.rev2 = rev2;
+	revid_chip->data.rev3 = rev3;
+	revid_chip->data.rev4 = rev4;
+	revid_chip->data.pmic_subtype = pmic_subtype;
+	revid_chip->data.pmic_type = pmic_type;
+	revid_chip->data.fab_id = fab_id;
+	revid_chip->data.tp_rev = tp_rev;
+
+	if (pmic_subtype < ARRAY_SIZE(pmic_names))
+		revid_chip->data.pmic_name = pmic_names[pmic_subtype];
+	else
+		revid_chip->data.pmic_name = pmic_names[0];
+
+	mutex_lock(&revid_chips_lock);
+	list_add(&revid_chip->link, &revid_chips);
+	mutex_unlock(&revid_chips_lock);
+
+	option1 = pmic_status & 0x3;
+	option2 = (pmic_status >> 2) & 0x3;
+	option3 = (pmic_status >> 4) & 0x3;
+	option4 = (pmic_status >> 6) & 0x3;
+
+	build_pmic_string(pmic_string, PMIC_STRING_MAXLENGTH,
+			  to_spmi_device(pdev->dev.parent)->usid,
+			pmic_subtype, rev1, rev2, rev3, rev4);
+	pr_info("%s options: %d, %d, %d, %d\n",
+			pmic_string, option1, option2, option3, option4);
+	return 0;
+}
+
+static struct platform_driver qpnp_revid_driver = {
+	.probe	= qpnp_revid_probe,
+	.driver	= {
+		.name		= QPNP_REVID_DEV_NAME,
+		.owner		= THIS_MODULE,
+		.of_match_table	= qpnp_revid_match_table,
+	},
+};
+
+static int __init qpnp_revid_init(void)
+{
+	return platform_driver_register(&qpnp_revid_driver);
+}
+
+static void __exit qpnp_revid_exit(void)
+{
+	return platform_driver_unregister(&qpnp_revid_driver);
+}
+
+subsys_initcall(qpnp_revid_init);
+module_exit(qpnp_revid_exit);
+
+MODULE_DESCRIPTION("QPNP REVID DRIVER");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" QPNP_REVID_DEV_NAME);
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./seemp_core/Makefile linux-4.4.115-fbx/drivers/platform/msm/seemp_core/Makefile
--- linux-4.4.115-fbx/drivers/platform/msm./seemp_core/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/seemp_core/Makefile	2019-01-22 16:16:26.187270712 +0100
@@ -0,0 +1,3 @@
+ccflags-y += -Iinclude/linux
+obj-$(CONFIG_SEEMP_CORE) += seemp_core.o
+seemp_core-objs:= seemp_logk.o seemp_ringbuf.o seemp_event_encoder.o
\ No newline at end of file
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./seemp_core/seemp_event_encoder.c linux-4.4.115-fbx/drivers/platform/msm/seemp_core/seemp_event_encoder.c
--- linux-4.4.115-fbx/drivers/platform/msm./seemp_core/seemp_event_encoder.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/seemp_core/seemp_event_encoder.c	2019-01-22 16:16:26.187270712 +0100
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define PROVIDE_PARAM_ID
+
+#include "seemp_logk.h"
+#include <linux/seemp_param_id.h>
+#include "seemp_event_encoder.h"
+
+static char *scan_id(char *s);
+static void encode_seemp_section(char *section_start, char *section_eq,
+				char *section_end, bool param, bool numeric,
+				int id, __s32 numeric_value);
+
+static void check_param_range(char *section_eq, bool param,
+	bool *numeric, int val_len, __s32 *numeric_value)
+{
+	long long_value = 0;
+
+	if (param && *numeric) {
+		/*check if 2 bytes & in[-99999,999999]*/
+		*numeric = (val_len >= 2) && (val_len <= 6);
+		if (*numeric) {
+			if (kstrtol(section_eq + 1, 10, &long_value)
+			!= 0) {
+				*numeric = false;
+			} else {
+				*numeric_value = (__s32)long_value;
+				/* We are checking whether the value
+				*  lies within 16bits
+				*/
+				*numeric = (long_value >= -32768) &&
+					(long_value <= 32767);
+			}
+		}
+	}
+}
+
+void encode_seemp_params(struct seemp_logk_blk *blk)
+{
+	struct seemp_logk_blk tmp;
+	char *s = 0;
+	char *msg_section_start = 0;
+	char *msg_section_eq = 0;
+	char *msg_s = 0;
+
+	memcpy(tmp.payload.msg, blk->payload.msg, BLK_MAX_MSG_SZ);
+	s = tmp.payload.msg + 1;
+	tmp.payload.msg[BLK_MAX_MSG_SZ - 1] = 0; /* zero-terminate */
+
+	while (true) {
+		char *section_start = s;
+		char *section_eq    = scan_id(s);
+		bool  param         = (section_eq - section_start >= 2) &&
+			(*section_eq == '=') && (section_eq[1] != ' ');
+		bool  numeric       = false;
+		int   id            = -1;
+		__s32 numeric_value = 0;
+		int id_len;
+		int val_len;
+		char ch;
+
+		if (param) {
+			id = param_id_index(section_start, section_eq);
+
+			if (id < 0)
+				param = false;
+		}
+
+		if (!param) {
+			s = section_eq;
+			while ((*s != 0) && (*s != ','))
+				s++;
+		} else {
+			s = section_eq + 1; /* equal sign */
+			numeric = (*s == '-') || ((*s >= '0') && (*s <= '9'));
+
+			if (numeric)
+				s++; /* first char of number */
+
+			while ((*s != 0) && (*s != ',')) {
+				if (*s == '=')
+					param   = false;
+				else if (!((*s >= '0') && (*s <= '9')))
+					numeric = false;
+
+				s++;
+			}
+
+			if (param) {
+				id_len  = section_eq - section_start;
+				val_len = s - (section_eq + 1);
+				param = (id_len >= 2) && (id_len <= 31)
+							&& (val_len <= 31);
+				ch = *s;
+				*s = 0;
+
+				check_param_range(section_eq, param,
+					&numeric, val_len, &numeric_value);
+				*s = ch;
+			}
+		}
+
+		msg_section_start = blk->payload.msg + (section_start -
+				tmp.payload.msg);
+		msg_section_eq = blk->payload.msg + (section_eq -
+				tmp.payload.msg);
+		msg_s = blk->payload.msg + (s - tmp.payload.msg);
+		encode_seemp_section(msg_section_start, msg_section_eq,
+				msg_s, param, numeric, id, numeric_value);
+
+		if (*s == 0)
+			break;
+
+		s++;
+	}
+
+	blk->len = s - blk->payload.msg;
+}
+
+static char *scan_id(char *s)
+{
+	while ((*s == '_') ||
+		((*s >= 'A') && (*s <= 'Z')) ||
+		((*s >= 'a') && (*s <= 'z'))) {
+		s++;
+	}
+
+	return s;
+}
+
+static void encode_seemp_section(char *section_start, char *section_eq,
+				char *section_end, bool param, bool numeric,
+				int id, __s32 numeric_value) {
+	param = param && (section_eq + 1 < section_end);
+
+	if (!param) {
+		/* Encode skip section */
+		int  skip_len	= section_end - section_start;
+		char skip_len_hi = skip_len & 0xE0;
+		char skip_len_lo = skip_len & 0x1F;
+
+		if (skip_len < 32) {
+			section_start[-1] = 0xC0 | skip_len_lo;
+							/* [1:1:0:0 0000] */
+		} else {
+			section_start[-1] = 0xE0 | skip_len_lo;
+							/* [1:1:1:0 0000] */
+
+			if (skip_len_hi & 0x20)
+				section_start[0] |= 0x80;
+
+			if (skip_len_hi & 0x40)
+				section_start[1] |= 0x80;
+
+			if (skip_len_hi & 0x80)
+				section_start[2] |= 0x80;
+		}
+	} else {
+		/* Encode ID=VALUE section */
+		char id_len            = section_eq  - section_start;
+		char value_len         = section_end - (section_eq + 1);
+
+		section_start[-1]      = 0x00 | id_len;
+		*(__s16 *)section_start = id;
+		section_eq[0]          = (!numeric ? 0x80 : 0x00) | value_len;
+
+		if (numeric)
+			*(__s16 *)(section_eq + 1) = numeric_value;
+	}
+}
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./seemp_core/seemp_event_encoder.h linux-4.4.115-fbx/drivers/platform/msm/seemp_core/seemp_event_encoder.h
--- linux-4.4.115-fbx/drivers/platform/msm./seemp_core/seemp_event_encoder.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/seemp_core/seemp_event_encoder.h	2019-01-22 16:16:26.187270712 +0100
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SEEMP_EVENT_ENCODER_H__
+#define __SEEMP_EVENT_ENCODER_H__
+
+#include "seemp_logk.h"
+
+void encode_seemp_params(struct seemp_logk_blk *blk);
+
+#endif /* __SEEMP_EVENT_ENCODER_H__ */
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./seemp_core/seemp_logk.c linux-4.4.115-fbx/drivers/platform/msm/seemp_core/seemp_logk.c
--- linux-4.4.115-fbx/drivers/platform/msm./seemp_core/seemp_logk.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/seemp_core/seemp_logk.c	2019-01-22 16:16:26.187270712 +0100
@@ -0,0 +1,687 @@
+/*
+ * Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "seemp: %s: " fmt, __func__
+
+#include "seemp_logk.h"
+#include "seemp_ringbuf.h"
+
+#ifndef VM_RESERVED
+#define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
+#endif
+
+#define MASK_BUFFER_SIZE 256
+#define FOUR_MB 4
+#define YEAR_BASE 1900
+
+static struct seemp_logk_dev *slogk_dev;
+
+static unsigned int ring_sz = FOUR_MB;
+
+/*
+ * default is besteffort, apps do not get blocked
+ */
+static unsigned int block_apps;
+
+
+/*
+ * When this flag is turned on,
+ * kmalloc should be used for ring buf allocation
+ * otherwise it is vmalloc.
+ * default is to use vmalloc
+ * kmalloc has a limit of 4MB
+ */
+unsigned int kmalloc_flag;
+
+static struct class *cl;
+
+static rwlock_t filter_lock;
+static struct seemp_source_mask *pmask;
+static unsigned int num_sources;
+
+static long seemp_logk_reserve_rdblks(
+		struct seemp_logk_dev *sdev, unsigned long arg);
+static long seemp_logk_set_mask(unsigned long arg);
+static long seemp_logk_set_mapping(unsigned long arg);
+static long seemp_logk_check_filter(unsigned long arg);
+
+void* (*seemp_logk_kernel_begin)(char **buf);
+
+void (*seemp_logk_kernel_end)(void *blck);
+
+/*
+ * the last param is the permission bits *
+ * kernel logging is done in three steps:
+ * (1)  fetch a block, fill everything except payload.
+ * (2)  return payload pointer to the caller.
+ * (3)  caller fills its data directly into the payload area.
+ * (4)  caller invoked finish_record(), to finish writing.
+ */
+void *seemp_logk_kernel_start_record(char **buf)
+{
+	struct seemp_logk_blk *blk;
+	struct timespec now;
+	struct tm ts;
+	int idx;
+	int ret;
+
+	DEFINE_WAIT(write_wait);
+	ret = 0;
+	idx = 0;
+	now = current_kernel_time();
+	blk = ringbuf_fetch_wr_block(slogk_dev);
+	if (!blk) {
+		/*
+		 * there is no blk to write
+		 * if block_apps == 0; quietly return
+		 */
+		if (!block_apps) {
+			*buf = NULL;
+			return NULL;
+		}
+		/*else wait for the blks to be available*/
+		while (1) {
+			mutex_lock(&slogk_dev->lock);
+			prepare_to_wait(&slogk_dev->writers_wq,
+				&write_wait, TASK_INTERRUPTIBLE);
+			ret = (slogk_dev->num_write_avail_blks <= 0);
+			if (!ret) {
+				/* don't have to wait*/
+				break;
+			}
+			mutex_unlock(&slogk_dev->lock);
+			if (signal_pending(current)) {
+				ret = -EINTR;
+				break;
+			}
+			schedule();
+		}
+
+		finish_wait(&slogk_dev->writers_wq, &write_wait);
+		if (ret)
+			return NULL;
+
+		idx = slogk_dev->write_idx;
+		slogk_dev->write_idx =
+			(slogk_dev->write_idx + 1) % slogk_dev->num_tot_blks;
+		slogk_dev->num_write_avail_blks--;
+		slogk_dev->num_write_in_prog_blks++;
+		slogk_dev->num_writers++;
+
+		blk = &slogk_dev->ring[idx];
+		/*mark block invalid*/
+		blk->status = 0x0;
+		mutex_unlock(&slogk_dev->lock);
+	}
+
+	blk->version = OBSERVER_VERSION;
+	blk->pid = current->tgid;
+	blk->tid = current->pid;
+	blk->uid = (current_uid()).val;
+	blk->sec = now.tv_sec;
+	blk->nsec = now.tv_nsec;
+	strlcpy(blk->appname, current->comm, TASK_COMM_LEN);
+	time_to_tm(now.tv_sec, 0, &ts);
+	ts.tm_year += YEAR_BASE;
+	ts.tm_mon += 1;
+
+	snprintf(blk->ts, TS_SIZE, "%04ld-%02d-%02d %02d:%02d:%02d",
+			ts.tm_year, ts.tm_mon, ts.tm_mday,
+			ts.tm_hour, ts.tm_min, ts.tm_sec);
+
+	*buf = blk->payload.msg;
+
+	return blk;
+}
+
+void seemp_logk_kernel_end_record(void *blck)
+{
+	struct seemp_logk_blk *blk = (struct seemp_logk_blk *)blck;
+
+	if (blk) {
+		/*update status at the very end*/
+		blk->status |= 0x1;
+		blk->uid =  (current_uid()).val;
+
+		ringbuf_finish_writer(slogk_dev, blk);
+	}
+}
+
+static int seemp_logk_usr_record(const char __user *buf, size_t count)
+{
+	struct seemp_logk_blk *blk;
+	struct seemp_logk_blk usr_blk;
+	struct seemp_logk_blk *local_blk;
+	struct timespec now;
+	struct tm ts;
+	int idx, ret;
+
+	DEFINE_WAIT(write_wait);
+	if (buf) {
+		local_blk = (struct seemp_logk_blk *)buf;
+		if (copy_from_user(&usr_blk.pid, &local_blk->pid,
+					sizeof(usr_blk.pid)) != 0)
+			return -EFAULT;
+		if (copy_from_user(&usr_blk.tid, &local_blk->tid,
+					sizeof(usr_blk.tid)) != 0)
+			return -EFAULT;
+		if (copy_from_user(&usr_blk.uid, &local_blk->uid,
+					sizeof(usr_blk.uid)) != 0)
+			return -EFAULT;
+		if (copy_from_user(&usr_blk.len, &local_blk->len,
+					sizeof(usr_blk.len)) != 0)
+			return -EFAULT;
+		if (copy_from_user(&usr_blk.payload, &local_blk->payload,
+					sizeof(struct blk_payload)) != 0)
+			return -EFAULT;
+	} else {
+		return -EFAULT;
+	}
+	idx = ret = 0;
+	now = current_kernel_time();
+	blk = ringbuf_fetch_wr_block(slogk_dev);
+	if (!blk) {
+		if (!block_apps)
+			return 0;
+		while (1) {
+			mutex_lock(&slogk_dev->lock);
+			prepare_to_wait(&slogk_dev->writers_wq,
+					&write_wait,
+					TASK_INTERRUPTIBLE);
+			ret = (slogk_dev->num_write_avail_blks <= 0);
+			if (!ret)
+				break;
+			mutex_unlock(&slogk_dev->lock);
+			if (signal_pending(current)) {
+				ret = -EINTR;
+				break;
+			}
+			schedule();
+		}
+		finish_wait(&slogk_dev->writers_wq, &write_wait);
+		if (ret)
+			return -EINTR;
+
+		idx = slogk_dev->write_idx;
+		slogk_dev->write_idx =
+			(slogk_dev->write_idx + 1) % slogk_dev->num_tot_blks;
+		slogk_dev->num_write_avail_blks--;
+		slogk_dev->num_write_in_prog_blks++;
+		slogk_dev->num_writers++;
+		blk = &slogk_dev->ring[idx];
+		/*mark block invalid*/
+		blk->status = 0x0;
+		mutex_unlock(&slogk_dev->lock);
+	}
+	if (usr_blk.len > sizeof(struct blk_payload)-1)
+		usr_blk.len = sizeof(struct blk_payload)-1;
+
+	memcpy(&blk->payload, &usr_blk.payload, sizeof(struct blk_payload));
+	blk->pid = usr_blk.pid;
+	blk->uid = usr_blk.uid;
+	blk->tid = usr_blk.tid;
+	blk->sec = now.tv_sec;
+	blk->nsec = now.tv_nsec;
+	time_to_tm(now.tv_sec, 0, &ts);
+	ts.tm_year += YEAR_BASE;
+	ts.tm_mon += 1;
+	snprintf(blk->ts, TS_SIZE, "%02ld-%02d-%02d %02d:%02d:%02d",
+			ts.tm_year, ts.tm_mon, ts.tm_mday,
+			ts.tm_hour, ts.tm_min, ts.tm_sec);
+	strlcpy(blk->appname, current->comm, TASK_COMM_LEN);
+	blk->status |= 0x1;
+	ringbuf_finish_writer(slogk_dev, blk);
+	return ret;
+}
+
+static void seemp_logk_attach(void)
+{
+	seemp_logk_kernel_end = seemp_logk_kernel_end_record;
+	seemp_logk_kernel_begin = seemp_logk_kernel_start_record;
+}
+
+static void seemp_logk_detach(void)
+{
+	seemp_logk_kernel_begin = NULL;
+	seemp_logk_kernel_end = NULL;
+}
+
+static ssize_t
+seemp_logk_write(struct file *file, const char __user *buf, size_t count,
+		loff_t *ppos)
+{
+	return seemp_logk_usr_record(buf, count);
+}
+
+static int
+seemp_logk_open(struct inode *inode, struct file *filp)
+{
+	int ret;
+
+	/*disallow seeks on this file*/
+	ret = nonseekable_open(inode, filp);
+	if (ret) {
+		pr_err("ret= %d\n", ret);
+		return ret;
+	}
+
+	slogk_dev->minor = iminor(inode);
+	filp->private_data = slogk_dev;
+
+	return 0;
+}
+
+static bool seemp_logk_get_bit_from_vector(__u8 *pVec, __u32 index)
+{
+	unsigned int byte_num = index/8;
+	unsigned int bit_num = index%8;
+	unsigned char byte;
+
+	if (byte_num >= MASK_BUFFER_SIZE)
+		return false;
+
+	byte = pVec[byte_num];
+
+	return !(byte & (1 << bit_num));
+}
+
+static long seemp_logk_ioctl(struct file *filp, unsigned int cmd,
+		unsigned long arg)
+{
+	struct seemp_logk_dev *sdev;
+	int ret;
+
+	sdev = (struct seemp_logk_dev *) filp->private_data;
+
+	if (cmd == SEEMP_CMD_RESERVE_RDBLKS) {
+		return seemp_logk_reserve_rdblks(sdev, arg);
+	} else if (cmd == SEEMP_CMD_RELEASE_RDBLKS) {
+		mutex_lock(&sdev->lock);
+		sdev->read_idx = (sdev->read_idx + sdev->num_read_in_prog_blks)
+			% sdev->num_tot_blks;
+		sdev->num_write_avail_blks += sdev->num_read_in_prog_blks;
+		ret = sdev->num_read_in_prog_blks;
+		sdev->num_read_in_prog_blks = 0;
+		/*wake up any waiting writers*/
+		mutex_unlock(&sdev->lock);
+		if (ret && block_apps)
+			wake_up_interruptible(&sdev->writers_wq);
+	} else if (cmd == SEEMP_CMD_GET_RINGSZ) {
+		if (copy_to_user((unsigned int *)arg, &sdev->ring_sz,
+				sizeof(unsigned int)))
+			return -EFAULT;
+	} else if (cmd == SEEMP_CMD_GET_BLKSZ) {
+		if (copy_to_user((unsigned int *)arg, &sdev->blk_sz,
+				sizeof(unsigned int)))
+			return -EFAULT;
+	} else if (SEEMP_CMD_SET_MASK == cmd) {
+		return seemp_logk_set_mask(arg);
+	} else if (SEEMP_CMD_SET_MAPPING == cmd) {
+		return seemp_logk_set_mapping(arg);
+	} else if (SEEMP_CMD_CHECK_FILTER == cmd) {
+		return seemp_logk_check_filter(arg);
+	} else {
+		pr_err("Invalid Request %X\n", cmd);
+		return -ENOIOCTLCMD;
+	}
+	return 0;
+}
+
+static long seemp_logk_reserve_rdblks(
+		struct seemp_logk_dev *sdev, unsigned long arg)
+{
+	int ret;
+	struct read_range rrange;
+
+	DEFINE_WAIT(read_wait);
+	mutex_lock(&sdev->lock);
+	if (sdev->num_writers > 0 || sdev->num_read_avail_blks <= 0) {
+		ret = -EPERM;
+		pr_debug("(reserve): blocking, cannot read.\n");
+		pr_debug("num_writers=%d num_read_avail_blks=%d\n",
+		sdev->num_writers,
+				sdev->num_read_avail_blks);
+		mutex_unlock(&sdev->lock);
+		/*
+		 * unlock the device
+		 * wait on a wait queue
+		 * after wait, grab the dev lock again
+		 */
+		while (1) {
+			mutex_lock(&sdev->lock);
+			prepare_to_wait(&sdev->readers_wq, &read_wait,
+					TASK_INTERRUPTIBLE);
+			ret = (sdev->num_writers > 0 ||
+					sdev->num_read_avail_blks <= 0);
+			if (!ret) {
+				/*don't have to wait*/
+				break;
+			}
+			mutex_unlock(&sdev->lock);
+			if (signal_pending(current)) {
+				ret = -EINTR;
+				break;
+			}
+			schedule();
+		}
+
+		finish_wait(&sdev->readers_wq, &read_wait);
+		if (ret)
+			return -EINTR;
+	}
+
+	/*sdev->lock is held at this point*/
+	sdev->num_read_in_prog_blks = sdev->num_read_avail_blks;
+	sdev->num_read_avail_blks = 0;
+	rrange.start_idx = sdev->read_idx;
+	rrange.num = sdev->num_read_in_prog_blks;
+	mutex_unlock(&sdev->lock);
+
+	if (copy_to_user((unsigned int *)arg, &rrange,
+			sizeof(struct read_range)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static long seemp_logk_set_mask(unsigned long arg)
+{
+	__u8 buffer[256];
+	int i;
+	unsigned int num_elements;
+
+	if (copy_from_user(&num_elements,
+		(unsigned int __user *) arg, sizeof(unsigned int)))
+		return -EFAULT;
+
+	read_lock(&filter_lock);
+	if (0 == num_sources) {
+		read_unlock(&filter_lock);
+		return -EINVAL;
+	}
+
+	if (num_elements == 0 ||
+		MASK_BUFFER_SIZE < DIV_ROUND_UP(num_sources, 8)) {
+		read_unlock(&filter_lock);
+		return -EINVAL;
+	}
+
+	if (copy_from_user(buffer,
+			(__u8 *)arg, DIV_ROUND_UP(num_sources, 8))) {
+		read_unlock(&filter_lock);
+		return -EFAULT;
+	}
+
+	read_unlock(&filter_lock);
+	write_lock(&filter_lock);
+	if (num_elements != num_sources) {
+		write_unlock(&filter_lock);
+		return -EPERM;
+	}
+
+	for (i = 0; i < num_sources; i++) {
+		pmask[i].isOn =
+				seemp_logk_get_bit_from_vector(
+					(__u8 *)buffer, i);
+	}
+	write_unlock(&filter_lock);
+	return 0;
+}
+
+static long seemp_logk_set_mapping(unsigned long arg)
+{
+	__u32 num_elements;
+	__u32 *pbuffer;
+	int i;
+	struct seemp_source_mask *pnewmask;
+
+	if (copy_from_user(&num_elements,
+					(__u32 __user *)arg, sizeof(__u32)))
+		return -EFAULT;
+
+	if ((0 == num_elements) || (num_elements >
+		(UINT_MAX / sizeof(struct seemp_source_mask))))
+		return -EFAULT;
+
+	write_lock(&filter_lock);
+	if (NULL != pmask) {
+		/*
+		 * Mask is getting set again.
+		 * seemp_core was probably restarted.
+		 */
+		struct seemp_source_mask *ptempmask;
+
+		num_sources = 0;
+		ptempmask = pmask;
+		pmask = NULL;
+		kfree(ptempmask);
+	}
+	write_unlock(&filter_lock);
+	pbuffer = kmalloc(sizeof(struct seemp_source_mask)
+				* num_elements, GFP_KERNEL);
+	if (NULL == pbuffer)
+		return -ENOMEM;
+
+	/*
+	 * Use our new table as scratch space for now.
+	 * We copy an ordered list of hash values into our buffer.
+	 */
+	if (copy_from_user(pbuffer, &((__u32 __user *)arg)[1],
+					num_elements*sizeof(unsigned int))) {
+		kfree(pbuffer);
+		return -EFAULT;
+	}
+	/*
+	 * We arrange the user data into a more usable form.
+	 * This is done in-place.
+	 */
+	pnewmask = (struct seemp_source_mask *) pbuffer;
+	for (i = num_elements - 1; i >= 0; i--) {
+		pnewmask[i].hash = pbuffer[i];
+		/* Observer is off by default*/
+		pnewmask[i].isOn = 0;
+	}
+	write_lock(&filter_lock);
+	pmask = pnewmask;
+	num_sources = num_elements;
+	write_unlock(&filter_lock);
+	return 0;
+}
+
+static long seemp_logk_check_filter(unsigned long arg)
+{
+	int i;
+	unsigned int hash = (unsigned int) arg;
+
+	/*
+	 * This lock may be a bit long.
+	 * If it is a problem, it can be fixed.
+	 */
+	read_lock(&filter_lock);
+	for (i = 0; i < num_sources; i++) {
+		if (hash == pmask[i].hash) {
+			int result = pmask[i].isOn;
+
+			read_unlock(&filter_lock);
+			return result;
+		}
+	}
+	read_unlock(&filter_lock);
+	return 0;
+}
+
+static int seemp_logk_mmap(struct file *filp,
+		struct vm_area_struct *vma)
+{
+	int ret;
+	char *vptr;
+	unsigned long length, pfn;
+	unsigned long start = vma->vm_start;
+
+	length = vma->vm_end - vma->vm_start;
+
+	if (length > (unsigned long) slogk_dev->ring_sz) {
+		pr_err("len check failed\n");
+		return -EIO;
+	}
+
+	vma->vm_flags |= VM_RESERVED | VM_SHARED;
+	vptr = (char *) slogk_dev->ring;
+	ret = 0;
+
+	if (kmalloc_flag) {
+		ret = remap_pfn_range(vma,
+				start,
+				virt_to_phys((void *)
+				((unsigned long)slogk_dev->ring)) >> PAGE_SHIFT,
+				length,
+				vma->vm_page_prot);
+		if (ret != 0) {
+			pr_err("remap_pfn_range() fails with ret = %d\n",
+				ret);
+			return -EAGAIN;
+		}
+	} else {
+		while (length > 0) {
+			pfn = vmalloc_to_pfn(vptr);
+
+			ret = remap_pfn_range(vma, start, pfn, PAGE_SIZE,
+					vma->vm_page_prot);
+			if (ret < 0) {
+				pr_err("remap_pfn_range() fails with ret = %d\n",
+					ret);
+				return ret;
+			}
+			start += PAGE_SIZE;
+			vptr += PAGE_SIZE;
+			length -= PAGE_SIZE;
+		}
+	}
+
+	return 0;
+}
+
+static const struct file_operations seemp_logk_fops = {
+	.write = seemp_logk_write,
+	.open = seemp_logk_open,
+	.unlocked_ioctl = seemp_logk_ioctl,
+	.compat_ioctl = seemp_logk_ioctl,
+	.mmap = seemp_logk_mmap,
+};
+
+__init int seemp_logk_init(void)
+{
+	int ret;
+	int devno = 0;
+
+	num_sources = 0;
+	kmalloc_flag = 0;
+	block_apps = 0;
+	pmask = NULL;
+
+	if (kmalloc_flag && ring_sz > FOUR_MB) {
+		pr_err("kmalloc cannot allocate > 4MB\n");
+		return -ENOMEM;
+	}
+
+	ring_sz = ring_sz * SZ_1M;
+	if (ring_sz <= 0) {
+		pr_err("Too small a ring_sz=%d\n", ring_sz);
+		return -EINVAL;
+	}
+
+	slogk_dev = kmalloc(sizeof(*slogk_dev), GFP_KERNEL);
+	if (slogk_dev == NULL)
+		return -ENOMEM;
+
+	slogk_dev->ring_sz = ring_sz;
+	slogk_dev->blk_sz = sizeof(struct seemp_logk_blk);
+	/*initialize ping-pong buffers*/
+	ret = ringbuf_init(slogk_dev);
+	if (ret < 0) {
+		pr_err("Init Failed, ret = %d\n", ret);
+		goto pingpong_fail;
+	}
+
+	ret = alloc_chrdev_region(&devno, 0, seemp_LOGK_NUM_DEVS,
+			seemp_LOGK_DEV_NAME);
+	if (ret < 0) {
+		pr_err("alloc_chrdev_region failed with ret = %d\n",
+				ret);
+		goto register_fail;
+	}
+
+	slogk_dev->major = MAJOR(devno);
+
+	pr_debug("logk: major# = %d\n", slogk_dev->major);
+
+	cl = class_create(THIS_MODULE, seemp_LOGK_DEV_NAME);
+	if (cl == NULL) {
+		pr_err("class create failed");
+		goto cdev_fail;
+	}
+	if (device_create(cl, NULL, devno, NULL,
+			seemp_LOGK_DEV_NAME) == NULL) {
+		pr_err("device create failed");
+		goto class_destroy_fail;
+	}
+	cdev_init(&(slogk_dev->cdev), &seemp_logk_fops);
+
+	slogk_dev->cdev.owner = THIS_MODULE;
+	ret = cdev_add(&(slogk_dev->cdev), MKDEV(slogk_dev->major, 0), 1);
+	if (ret) {
+		pr_err("cdev_add failed with ret = %d", ret);
+		goto class_destroy_fail;
+	}
+
+	seemp_logk_attach();
+	mutex_init(&slogk_dev->lock);
+	init_waitqueue_head(&slogk_dev->readers_wq);
+	init_waitqueue_head(&slogk_dev->writers_wq);
+	rwlock_init(&filter_lock);
+	return 0;
+class_destroy_fail:
+	class_destroy(cl);
+cdev_fail:
+	unregister_chrdev_region(devno, seemp_LOGK_NUM_DEVS);
+register_fail:
+	ringbuf_cleanup(slogk_dev);
+pingpong_fail:
+	kfree(slogk_dev);
+	return -EPERM;
+}
+
+__exit void seemp_logk_cleanup(void)
+{
+	dev_t devno = MKDEV(slogk_dev->major, slogk_dev->minor);
+
+	seemp_logk_detach();
+
+	cdev_del(&slogk_dev->cdev);
+
+	unregister_chrdev_region(devno, seemp_LOGK_NUM_DEVS);
+	ringbuf_cleanup(slogk_dev);
+	kfree(slogk_dev);
+
+	if (NULL != pmask) {
+		kfree(pmask);
+		pmask = NULL;
+	}
+}
+
+module_init(seemp_logk_init);
+module_exit(seemp_logk_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("seemp Observer");
+
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./seemp_core/seemp_logk.h linux-4.4.115-fbx/drivers/platform/msm/seemp_core/seemp_logk.h
--- linux-4.4.115-fbx/drivers/platform/msm./seemp_core/seemp_logk.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/seemp_core/seemp_logk.h	2019-01-22 16:16:26.187270712 +0100
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SEEMP_LOGK_H__
+#define __SEEMP_LOGK_H__
+
+#define OBSERVER_VERSION 0x01
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/ioctl.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/poll.h>
+#include <linux/wait.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/vmalloc.h>
+#include <asm/ioctls.h>
+
+#define seemp_LOGK_NUM_DEVS 1
+#define seemp_LOGK_DEV_NAME "seemplog"
+
+/*
+ * The logcat driver on Android uses four 256k ring buffers
+ * here, we use two ring buffers of the same size.
+ * we think this is reasonable
+ */
+#define FULL_BUF_SIZE (64 * 1024 * 1024)
+#define HALF_BUF_SIZE (32 * 1024 * 1024)
+#define FULL_BLOCKS (8 * 1024)
+#define HALF_BLOCKS (4 * 1024)
+
+#define READER_NOT_READY 0
+#define READER_READY 1
+
+#define MAGIC 'z'
+
+#define SEEMP_CMD_RESERVE_RDBLKS     _IOR(MAGIC, 1, int)
+#define SEEMP_CMD_RELEASE_RDBLKS     _IO(MAGIC, 2)
+#define SEEMP_CMD_GET_RINGSZ     _IOR(MAGIC, 3, int)
+#define SEEMP_CMD_GET_BLKSZ     _IOR(MAGIC, 4, int)
+#define SEEMP_CMD_SET_MASK          _IO(MAGIC, 5)
+#define SEEMP_CMD_SET_MAPPING       _IO(MAGIC, 6)
+#define SEEMP_CMD_CHECK_FILTER      _IOR(MAGIC, 7, int)
+
+struct read_range {
+	int start_idx;
+	int num;
+};
+
+struct seemp_logk_dev {
+	unsigned int major;
+	unsigned int minor;
+
+	struct cdev cdev;
+	struct class *cls;
+	/*the full buffer*/
+	struct seemp_logk_blk *ring;
+	/*an array of blks*/
+	unsigned int ring_sz;
+	unsigned int blk_sz;
+
+	int num_tot_blks;
+
+	int num_write_avail_blks;
+	int num_write_in_prog_blks;
+
+	int num_read_avail_blks;
+	int num_read_in_prog_blks;
+
+	int num_writers;
+
+	/*
+	 * there is always one reader
+	 * which is the observer daemon
+	 * therefore there is no necessity
+	 * for num_readers variable
+	 */
+
+	/*
+	 * read_idx  and write_idx loop through from zero to ring_sz,
+	 * and then back to zero in a circle, as they advance
+	 * based on the reader's and writers' accesses
+	 */
+	int read_idx;
+
+	int write_idx;
+
+	/*
+	 * wait queues
+	 * readers_wq: implement wait for readers
+	 * writers_wq: implement wait for writers
+	 *
+	 * whether writers are blocked or not is driven by the policy:
+	 * case 1: (best_effort_logging == 1)
+	 *         writers are not blocked, and
+	 *         when there is no mem in the ring to store logs,
+	 *         the logs are simply dropped.
+	 * case 2: (best_effort_logging == 0)
+	 *         when there is no mem in the ring to store logs,
+	 *         the process gets blocked until there is space.
+	 */
+	wait_queue_head_t readers_wq;
+	wait_queue_head_t writers_wq;
+
+	/*
+	 * protects everything in the device
+	 * including ring buffer and all the num_ variables
+	 * spinlock_t lock;
+	 */
+	struct mutex lock;
+};
+
+#define BLK_SIZE       256
+#define BLK_HDR_SIZE   64
+#define TS_SIZE        20
+#define BLK_MAX_MSG_SZ (BLK_SIZE - BLK_HDR_SIZE)
+
+struct blk_payload {
+	__u32 api_id;  /* event API id */
+	char  msg[BLK_MAX_MSG_SZ]; /* event parameters */
+} __packed;
+
+struct seemp_logk_blk {
+	__u8  status;  /* bits: 0->valid/invalid; 1-7: unused as of now! */
+	__u16 len;     /* length of the payload */
+	__u8  version; /* version number */
+	__s32 pid;     /* generating process's pid */
+	__s32 uid;     /* generating process's uid - app specific */
+	__s32 tid;     /* generating process's tid */
+	__s32 sec;     /* seconds since Epoch */
+	__s32 nsec;    /* nanoseconds */
+	char        ts[TS_SIZE];  /* Time Stamp */
+	char        appname[TASK_COMM_LEN];
+	struct blk_payload payload;
+} __packed;
+
+
+extern unsigned int kmalloc_flag;
+
+struct seemp_source_mask {
+	__u32       hash;
+	bool        isOn;
+};
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./seemp_core/seemp_ringbuf.c linux-4.4.115-fbx/drivers/platform/msm/seemp_core/seemp_ringbuf.c
--- linux-4.4.115-fbx/drivers/platform/msm./seemp_core/seemp_ringbuf.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/seemp_core/seemp_ringbuf.c	2019-01-22 16:16:26.187270712 +0100
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+*/
+
+#define pr_fmt(fmt) "seemp: %s: " fmt, __func__
+
+#include "seemp_logk.h"
+#include "seemp_ringbuf.h"
+#include "seemp_event_encoder.h"
+
+/*initial function no need to hold ring_lock*/
+int ringbuf_init(struct seemp_logk_dev *sdev)
+{
+	char *buf;
+	unsigned long virt_addr;
+
+	if (kmalloc_flag) {
+		sdev->ring = kmalloc(sdev->ring_sz, GFP_KERNEL);
+		if (sdev->ring == NULL) {
+			pr_err("kmalloc failed, ring_sz= %d\n", sdev->ring_sz);
+			return -ENOMEM;
+		}
+
+		buf = (char *)sdev->ring;
+
+		/*reserve kmalloc memory as pages to make them remapable*/
+		for (virt_addr = (unsigned long)buf;
+				virt_addr < (unsigned long)buf + sdev->ring_sz;
+				virt_addr += PAGE_SIZE) {
+				SetPageReserved(virt_to_page((virt_addr)));
+		}
+	} else {
+		sdev->ring = vmalloc(sdev->ring_sz);
+		if (sdev->ring == NULL) {
+			pr_err("vmalloc failed, ring_sz = %d\n", sdev->ring_sz);
+			return -ENOMEM;
+		}
+		buf = (char *)sdev->ring;
+
+		/*reserve vmalloc memory as pages to make them remapable*/
+		for (virt_addr = (unsigned long)buf;
+				virt_addr < (unsigned long)buf + sdev->ring_sz;
+				virt_addr += PAGE_SIZE) {
+			SetPageReserved(vmalloc_to_page(
+				(unsigned long *) virt_addr));
+		}
+	}
+
+	memset(sdev->ring, 0, sdev->ring_sz);
+
+	sdev->num_tot_blks = (sdev->ring_sz / BLK_SIZE);
+	sdev->num_writers = 0;
+	sdev->write_idx = 0;
+	sdev->read_idx = 0;
+
+	sdev->num_write_avail_blks = sdev->num_tot_blks;
+	/*no. of blocks available for write*/
+	sdev->num_write_in_prog_blks = 0;
+	/*no. of blocks held by writers to perform writes*/
+
+	sdev->num_read_avail_blks = 0;
+	/*no. of blocks ready for read*/
+	sdev->num_read_in_prog_blks = 0;
+	/*no. of blocks held by the reader to perform read*/
+
+	return 0;
+}
+
+void ringbuf_cleanup(struct seemp_logk_dev *sdev)
+{
+	unsigned long virt_addr;
+
+	if (kmalloc_flag) {
+		for (virt_addr = (unsigned long)sdev->ring;
+			virt_addr < (unsigned long)sdev->ring + sdev->ring_sz;
+			virt_addr += PAGE_SIZE) {
+			/*clear all pages*/
+			ClearPageReserved(virt_to_page((unsigned long *)
+				virt_addr));
+		}
+		kfree(sdev->ring);
+	} else {
+		for (virt_addr = (unsigned long)sdev->ring;
+			virt_addr < (unsigned long)sdev->ring + sdev->ring_sz;
+			virt_addr += PAGE_SIZE) {
+			/*clear all pages*/
+			ClearPageReserved(vmalloc_to_page((unsigned long *)
+				virt_addr));
+		}
+		vfree(sdev->ring);
+	}
+}
+
+struct seemp_logk_blk *ringbuf_fetch_wr_block
+					(struct seemp_logk_dev *sdev)
+{
+	struct seemp_logk_blk *blk = NULL;
+	int idx;
+
+	mutex_lock(&sdev->lock);
+	if (sdev->num_write_avail_blks == 0) {
+		idx = -1;
+		mutex_unlock(&sdev->lock);
+		return blk;
+	}
+
+	idx = sdev->write_idx;
+	sdev->write_idx = (sdev->write_idx + 1) % sdev->num_tot_blks;
+	sdev->num_write_avail_blks--;
+	sdev->num_write_in_prog_blks++;
+	sdev->num_writers++;
+
+	blk = &sdev->ring[idx];
+	blk->status = 0x0;
+
+	mutex_unlock(&sdev->lock);
+	return blk;
+}
+
+void ringbuf_finish_writer(struct seemp_logk_dev *sdev,
+				struct seemp_logk_blk *blk)
+{
+	/* Encode seemp parameters in multi-threaded mode (before mutex lock) */
+	encode_seemp_params(blk);
+
+	/*
+	 * finish writing...
+	 * the calling process will no longer access this block.
+	 */
+	mutex_lock(&sdev->lock);
+
+	sdev->num_writers--;
+	sdev->num_write_in_prog_blks--;
+	sdev->num_read_avail_blks++;
+
+	/*wake up any readers*/
+	if (sdev->num_writers == 0)
+		wake_up_interruptible(&sdev->readers_wq);
+
+	mutex_unlock(&sdev->lock);
+}
+
+int ringbuf_count_marked(struct seemp_logk_dev *sdev)
+{
+	int i;
+	unsigned int marked;
+
+	mutex_lock(&sdev->lock);
+	for (marked = 0, i = 0; i < sdev->num_tot_blks; i++)
+		if (sdev->ring[i].status & 0x1)
+			marked++;
+	mutex_unlock(&sdev->lock);
+
+	return marked;
+}
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./seemp_core/seemp_ringbuf.h linux-4.4.115-fbx/drivers/platform/msm/seemp_core/seemp_ringbuf.h
--- linux-4.4.115-fbx/drivers/platform/msm./seemp_core/seemp_ringbuf.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/seemp_core/seemp_ringbuf.h	2019-01-22 16:16:26.187270712 +0100
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+*/
+
+#ifndef __SEEMP_RINGBUF_H__
+#define __SEEMP_RINGBUF_H__
+
+/*
+ * This header exports pingpong's API
+ */
+
+int ringbuf_init(struct seemp_logk_dev *sdev);
+struct seemp_logk_blk *ringbuf_fetch_wr_block
+(struct seemp_logk_dev *sdev);
+void ringbuf_finish_writer(struct seemp_logk_dev *sdev,
+				struct seemp_logk_blk *blk);
+void ringbuf_cleanup(struct seemp_logk_dev *sdev);
+int ringbuf_count_marked(struct seemp_logk_dev *sdev);
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./sps/bam.c linux-4.4.115-fbx/drivers/platform/msm/sps/bam.c
--- linux-4.4.115-fbx/drivers/platform/msm./sps/bam.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/sps/bam.c	2019-01-22 16:16:26.187270712 +0100
@@ -0,0 +1,2344 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* Bus-Access-Manager (BAM) Hardware manager. */
+
+#include <linux/types.h>	/* u32 */
+#include <linux/kernel.h>	/* pr_info() */
+#include <linux/io.h>		/* ioread32() */
+#include <linux/bitops.h>	/* find_first_bit() */
+#include <linux/errno.h>	/* ENODEV */
+#include <linux/memory.h>
+
+#include "bam.h"
+#include "sps_bam.h"
+
+/**
+ *  Valid BAM Hardware version.
+ *
+ */
+#define BAM_MIN_VERSION 2
+#define BAM_MAX_VERSION 0x2f
+
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+
+/* Maximum number of execution environment */
+#define BAM_MAX_EES 8
+
+/**
+ *  BAM Hardware registers bitmask.
+ *  format: <register>_<field>
+ *
+ */
+/* CTRL */
+#define BAM_MESS_ONLY_CANCEL_WB               0x100000
+#define CACHE_MISS_ERR_RESP_EN                 0x80000
+#define LOCAL_CLK_GATING                       0x60000
+#define IBC_DISABLE                            0x10000
+#define BAM_CACHED_DESC_STORE                   0x8000
+#define BAM_DESC_CACHE_SEL                      0x6000
+#define BAM_EN_ACCUM                              0x10
+#define BAM_EN                                     0x2
+#define BAM_SW_RST                                 0x1
+
+/* REVISION */
+#define BAM_INACTIV_TMR_BASE                0xff000000
+#define BAM_CMD_DESC_EN                       0x800000
+#define BAM_DESC_CACHE_DEPTH                  0x600000
+#define BAM_NUM_INACTIV_TMRS                  0x100000
+#define BAM_INACTIV_TMRS_EXST                  0x80000
+#define BAM_HIGH_FREQUENCY_BAM                 0x40000
+#define BAM_HAS_NO_BYPASS                      0x20000
+#define BAM_SECURED                            0x10000
+#define BAM_USE_VMIDMT                          0x8000
+#define BAM_AXI_ACTIVE                          0x4000
+#define BAM_CE_BUFFER_SIZE                      0x3000
+#define BAM_NUM_EES                              0xf00
+#define BAM_REVISION                              0xff
+
+/* SW_REVISION */
+#define BAM_MAJOR                           0xf0000000
+#define BAM_MINOR                            0xfff0000
+#define BAM_STEP                                0xffff
+
+/* NUM_PIPES */
+#define BAM_NON_PIPE_GRP                    0xff000000
+#define BAM_PERIPH_NON_PIPE_GRP               0xff0000
+#define BAM_DATA_ADDR_BUS_WIDTH                 0xC000
+#define BAM_NUM_PIPES                             0xff
+
+/* TIMER */
+#define BAM_TIMER                               0xffff
+
+/* TIMER_CTRL */
+#define TIMER_RST                           0x80000000
+#define TIMER_RUN                           0x40000000
+#define TIMER_MODE                          0x20000000
+#define TIMER_TRSHLD                            0xffff
+
+/* DESC_CNT_TRSHLD */
+#define BAM_DESC_CNT_TRSHLD                     0xffff
+
+/* IRQ_SRCS */
+#define BAM_IRQ                         0x80000000
+#define P_IRQ                           0x7fffffff
+
+/* IRQ_STTS */
+#define IRQ_STTS_BAM_TIMER_IRQ                         0x10
+#define IRQ_STTS_BAM_EMPTY_IRQ                          0x8
+#define IRQ_STTS_BAM_ERROR_IRQ                          0x4
+#define IRQ_STTS_BAM_HRESP_ERR_IRQ                      0x2
+
+/* IRQ_CLR */
+#define IRQ_CLR_BAM_TIMER_IRQ                          0x10
+#define IRQ_CLR_BAM_EMPTY_CLR                           0x8
+#define IRQ_CLR_BAM_ERROR_CLR                           0x4
+#define IRQ_CLR_BAM_HRESP_ERR_CLR                       0x2
+
+/* IRQ_EN */
+#define IRQ_EN_BAM_TIMER_IRQ                           0x10
+#define IRQ_EN_BAM_EMPTY_EN                             0x8
+#define IRQ_EN_BAM_ERROR_EN                             0x4
+#define IRQ_EN_BAM_HRESP_ERR_EN                         0x2
+
+/* AHB_MASTER_ERR_CTRLS */
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HVMID         0x7c0000
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_DIRECT_MODE    0x20000
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HCID           0x1f000
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HPROT            0xf00
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HBURST            0xe0
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HSIZE             0x18
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HWRITE             0x4
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HTRANS             0x3
+
+/* TRUST_REG  */
+#define LOCK_EE_CTRL                            0x2000
+#define BAM_VMID                                0x1f00
+#define BAM_RST_BLOCK                             0x80
+#define BAM_EE                                     0x7
+
+/* TEST_BUS_SEL */
+#define BAM_SW_EVENTS_ZERO                    0x200000
+#define BAM_SW_EVENTS_SEL                     0x180000
+#define BAM_DATA_ERASE                         0x40000
+#define BAM_DATA_FLUSH                         0x20000
+#define BAM_CLK_ALWAYS_ON                      0x10000
+#define BAM_TESTBUS_SEL                           0x7f
+
+/* CNFG_BITS */
+#define CNFG_BITS_AOS_OVERFLOW_PRVNT		 0x80000000
+#define CNFG_BITS_MULTIPLE_EVENTS_DESC_AVAIL_EN  0x40000000
+#define CNFG_BITS_MULTIPLE_EVENTS_SIZE_EN        0x20000000
+#define CNFG_BITS_BAM_ZLT_W_CD_SUPPORT           0x10000000
+#define CNFG_BITS_BAM_CD_ENABLE                   0x8000000
+#define CNFG_BITS_BAM_AU_ACCUMED                  0x4000000
+#define CNFG_BITS_BAM_PSM_P_HD_DATA               0x2000000
+#define CNFG_BITS_BAM_REG_P_EN                    0x1000000
+#define CNFG_BITS_BAM_WB_DSC_AVL_P_RST             0x800000
+#define CNFG_BITS_BAM_WB_RETR_SVPNT                0x400000
+#define CNFG_BITS_BAM_WB_CSW_ACK_IDL               0x200000
+#define CNFG_BITS_BAM_WB_BLK_CSW                   0x100000
+#define CNFG_BITS_BAM_WB_P_RES                      0x80000
+#define CNFG_BITS_BAM_SI_P_RES                      0x40000
+#define CNFG_BITS_BAM_AU_P_RES                      0x20000
+#define CNFG_BITS_BAM_PSM_P_RES                     0x10000
+#define CNFG_BITS_BAM_PSM_CSW_REQ                    0x8000
+#define CNFG_BITS_BAM_SB_CLK_REQ                     0x4000
+#define CNFG_BITS_BAM_IBC_DISABLE                    0x2000
+#define CNFG_BITS_BAM_NO_EXT_P_RST                   0x1000
+#define CNFG_BITS_BAM_FULL_PIPE                       0x800
+#define CNFG_BITS_BAM_PIPE_CNFG                         0x4
+
+/* PIPE_ATTR_EEn*/
+#define BAM_ENABLED                              0x80000000
+#define P_ATTR                                   0x7fffffff
+
+/* P_ctrln */
+#define P_LOCK_GROUP                          0x1f0000
+#define P_WRITE_NWD                              0x800
+#define P_PREFETCH_LIMIT                         0x600
+#define P_AUTO_EOB_SEL                           0x180
+#define P_AUTO_EOB                                0x40
+#define P_SYS_MODE                                0x20
+#define P_SYS_STRM                                0x10
+#define P_DIRECTION                                0x8
+#define P_EN                                       0x2
+
+/* P_RSTn */
+#define P_RST_P_SW_RST                             0x1
+
+/* P_HALTn */
+#define P_HALT_P_PIPE_EMPTY			   0x8
+#define P_HALT_P_LAST_DESC_ZLT                     0x4
+#define P_HALT_P_PROD_HALTED                       0x2
+#define P_HALT_P_HALT                              0x1
+
+/* P_TRUST_REGn */
+#define BAM_P_VMID                              0x1f00
+#define BAM_P_SUP_GROUP                           0xf8
+#define BAM_P_EE                                   0x7
+
+/* P_IRQ_STTSn */
+#define P_IRQ_STTS_P_HRESP_ERR_IRQ                0x80
+#define P_IRQ_STTS_P_PIPE_RST_ERR_IRQ             0x40
+#define P_IRQ_STTS_P_TRNSFR_END_IRQ               0x20
+#define P_IRQ_STTS_P_ERR_IRQ                      0x10
+#define P_IRQ_STTS_P_OUT_OF_DESC_IRQ               0x8
+#define P_IRQ_STTS_P_WAKE_IRQ                      0x4
+#define P_IRQ_STTS_P_TIMER_IRQ                     0x2
+#define P_IRQ_STTS_P_PRCSD_DESC_IRQ                0x1
+
+/* P_IRQ_CLRn */
+#define P_IRQ_CLR_P_HRESP_ERR_CLR                 0x80
+#define P_IRQ_CLR_P_PIPE_RST_ERR_CLR              0x40
+#define P_IRQ_CLR_P_TRNSFR_END_CLR                0x20
+#define P_IRQ_CLR_P_ERR_CLR                       0x10
+#define P_IRQ_CLR_P_OUT_OF_DESC_CLR                0x8
+#define P_IRQ_CLR_P_WAKE_CLR                       0x4
+#define P_IRQ_CLR_P_TIMER_CLR                      0x2
+#define P_IRQ_CLR_P_PRCSD_DESC_CLR                 0x1
+
+/* P_IRQ_ENn */
+#define P_IRQ_EN_P_HRESP_ERR_EN                   0x80
+#define P_IRQ_EN_P_PIPE_RST_ERR_EN                0x40
+#define P_IRQ_EN_P_TRNSFR_END_EN                  0x20
+#define P_IRQ_EN_P_ERR_EN                         0x10
+#define P_IRQ_EN_P_OUT_OF_DESC_EN                  0x8
+#define P_IRQ_EN_P_WAKE_EN                         0x4
+#define P_IRQ_EN_P_TIMER_EN                        0x2
+#define P_IRQ_EN_P_PRCSD_DESC_EN                   0x1
+
+/* P_TIMERn */
+#define P_TIMER_P_TIMER                         0xffff
+
+/* P_TIMER_ctrln */
+#define P_TIMER_RST                         0x80000000
+#define P_TIMER_RUN                         0x40000000
+#define P_TIMER_MODE                        0x20000000
+#define P_TIMER_TRSHLD                          0xffff
+
+/* P_PRDCR_SDBNDn */
+#define P_PRDCR_SDBNDn_BAM_P_SB_UPDATED      0x1000000
+#define P_PRDCR_SDBNDn_BAM_P_TOGGLE           0x100000
+#define P_PRDCR_SDBNDn_BAM_P_CTRL              0xf0000
+#define P_PRDCR_SDBNDn_BAM_P_BYTES_FREE         0xffff
+
+/* P_CNSMR_SDBNDn */
+#define P_CNSMR_SDBNDn_BAM_P_SB_UPDATED      0x1000000
+#define P_CNSMR_SDBNDn_BAM_P_WAIT_4_ACK       0x800000
+#define P_CNSMR_SDBNDn_BAM_P_ACK_TOGGLE       0x400000
+#define P_CNSMR_SDBNDn_BAM_P_ACK_TOGGLE_R     0x200000
+#define P_CNSMR_SDBNDn_BAM_P_TOGGLE           0x100000
+#define P_CNSMR_SDBNDn_BAM_P_CTRL              0xf0000
+#define P_CNSMR_SDBNDn_BAM_P_BYTES_AVAIL        0xffff
+
+/* P_EVNT_regn */
+#define P_BYTES_CONSUMED                    0xffff0000
+#define P_DESC_FIFO_PEER_OFST                   0xffff
+
+/* P_SW_ofstsn */
+#define SW_OFST_IN_DESC                     0xffff0000
+#define SW_DESC_OFST                            0xffff
+
+/* P_EVNT_GEN_TRSHLDn */
+#define P_EVNT_GEN_TRSHLD_P_TRSHLD              0xffff
+
+/* P_FIFO_sizesn */
+#define P_DATA_FIFO_SIZE                    0xffff0000
+#define P_DESC_FIFO_SIZE                        0xffff
+
+#define P_RETR_CNTXT_RETR_DESC_OFST            0xffff0000
+#define P_RETR_CNTXT_RETR_OFST_IN_DESC             0xffff
+#define P_SI_CNTXT_SI_DESC_OFST                    0xffff
+#define P_DF_CNTXT_WB_ACCUMULATED              0xffff0000
+#define P_DF_CNTXT_DF_DESC_OFST                    0xffff
+#define P_AU_PSM_CNTXT_1_AU_PSM_ACCUMED        0xffff0000
+#define P_AU_PSM_CNTXT_1_AU_ACKED                  0xffff
+#define P_PSM_CNTXT_2_PSM_DESC_VALID           0x80000000
+#define P_PSM_CNTXT_2_PSM_DESC_IRQ             0x40000000
+#define P_PSM_CNTXT_2_PSM_DESC_IRQ_DONE        0x20000000
+#define P_PSM_CNTXT_2_PSM_GENERAL_BITS         0x1e000000
+#define P_PSM_CNTXT_2_PSM_CONS_STATE            0x1c00000
+#define P_PSM_CNTXT_2_PSM_PROD_SYS_STATE         0x380000
+#define P_PSM_CNTXT_2_PSM_PROD_B2B_STATE          0x70000
+#define P_PSM_CNTXT_2_PSM_DESC_SIZE                0xffff
+#define P_PSM_CNTXT_4_PSM_DESC_OFST            0xffff0000
+#define P_PSM_CNTXT_4_PSM_SAVED_ACCUMED_SIZE       0xffff
+#define P_PSM_CNTXT_5_PSM_BLOCK_BYTE_CNT       0xffff0000
+#define P_PSM_CNTXT_5_PSM_OFST_IN_DESC             0xffff
+
+#else
+
+/* Maximum number of execution environment */
+#define BAM_MAX_EES 4
+
+/**
+ *  BAM Hardware registers bitmask.
+ *  format: <register>_<field>
+ *
+ */
+/* CTRL */
+#define IBC_DISABLE                            0x10000
+#define BAM_CACHED_DESC_STORE                   0x8000
+#define BAM_DESC_CACHE_SEL                      0x6000
+/* BAM_PERIPH_IRQ_SIC_SEL is an obsolete field; This bit is reserved now */
+#define BAM_PERIPH_IRQ_SIC_SEL                  0x1000
+#define BAM_EN_ACCUM                              0x10
+#define BAM_EN                                     0x2
+#define BAM_SW_RST                                 0x1
+
+/* REVISION */
+#define BAM_INACTIV_TMR_BASE                0xff000000
+#define BAM_INACTIV_TMRS_EXST                  0x80000
+#define BAM_HIGH_FREQUENCY_BAM                 0x40000
+#define BAM_HAS_NO_BYPASS                      0x20000
+#define BAM_SECURED                            0x10000
+#define BAM_NUM_EES                              0xf00
+#define BAM_REVISION                              0xff
+
+/* NUM_PIPES */
+#define BAM_NON_PIPE_GRP                    0xff000000
+#define BAM_PERIPH_NON_PIPE_GRP               0xff0000
+#define BAM_DATA_ADDR_BUS_WIDTH                 0xC000
+#define BAM_NUM_PIPES                             0xff
+
+/* DESC_CNT_TRSHLD */
+#define BAM_DESC_CNT_TRSHLD                     0xffff
+
+/* IRQ_SRCS */
+#define BAM_IRQ                         0x80000000
+#define P_IRQ                           0x7fffffff
+
+#define IRQ_STTS_BAM_EMPTY_IRQ                          0x8
+#define IRQ_STTS_BAM_ERROR_IRQ                          0x4
+#define IRQ_STTS_BAM_HRESP_ERR_IRQ                      0x2
+#define IRQ_CLR_BAM_EMPTY_CLR                           0x8
+#define IRQ_CLR_BAM_ERROR_CLR                           0x4
+#define IRQ_CLR_BAM_HRESP_ERR_CLR                       0x2
+#define IRQ_EN_BAM_EMPTY_EN                             0x8
+#define IRQ_EN_BAM_ERROR_EN                             0x4
+#define IRQ_EN_BAM_HRESP_ERR_EN                         0x2
+#define IRQ_SIC_SEL_BAM_IRQ_SIC_SEL              0x80000000
+#define IRQ_SIC_SEL_P_IRQ_SIC_SEL                0x7fffffff
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HVMID         0x7c0000
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_DIRECT_MODE    0x20000
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HCID           0x1f000
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HPROT            0xf00
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HBURST            0xe0
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HSIZE             0x18
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HWRITE             0x4
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HTRANS             0x3
+#define CNFG_BITS_BAM_AU_ACCUMED                  0x4000000
+#define CNFG_BITS_BAM_PSM_P_HD_DATA               0x2000000
+#define CNFG_BITS_BAM_REG_P_EN                    0x1000000
+#define CNFG_BITS_BAM_WB_DSC_AVL_P_RST             0x800000
+#define CNFG_BITS_BAM_WB_RETR_SVPNT                0x400000
+#define CNFG_BITS_BAM_WB_CSW_ACK_IDL               0x200000
+#define CNFG_BITS_BAM_WB_BLK_CSW                   0x100000
+#define CNFG_BITS_BAM_WB_P_RES                      0x80000
+#define CNFG_BITS_BAM_SI_P_RES                      0x40000
+#define CNFG_BITS_BAM_AU_P_RES                      0x20000
+#define CNFG_BITS_BAM_PSM_P_RES                     0x10000
+#define CNFG_BITS_BAM_PSM_CSW_REQ                    0x8000
+#define CNFG_BITS_BAM_SB_CLK_REQ                     0x4000
+#define CNFG_BITS_BAM_IBC_DISABLE                    0x2000
+#define CNFG_BITS_BAM_NO_EXT_P_RST                   0x1000
+#define CNFG_BITS_BAM_FULL_PIPE                       0x800
+#define CNFG_BITS_BAM_PIPE_CNFG                         0x4
+
+/* TEST_BUS_SEL */
+#define BAM_DATA_ERASE                         0x40000
+#define BAM_DATA_FLUSH                         0x20000
+#define BAM_CLK_ALWAYS_ON                      0x10000
+#define BAM_TESTBUS_SEL                           0x7f
+
+/* TRUST_REG  */
+#define BAM_VMID                                0x1f00
+#define BAM_RST_BLOCK                             0x80
+#define BAM_EE                                     0x3
+
+/* P_TRUST_REGn */
+#define BAM_P_VMID                              0x1f00
+#define BAM_P_EE                                   0x3
+
+/* P_PRDCR_SDBNDn */
+#define P_PRDCR_SDBNDn_BAM_P_SB_UPDATED      0x1000000
+#define P_PRDCR_SDBNDn_BAM_P_TOGGLE           0x100000
+#define P_PRDCR_SDBNDn_BAM_P_CTRL              0xf0000
+#define P_PRDCR_SDBNDn_BAM_P_BYTES_FREE         0xffff
+/* P_CNSMR_SDBNDn */
+#define P_CNSMR_SDBNDn_BAM_P_SB_UPDATED      0x1000000
+#define P_CNSMR_SDBNDn_BAM_P_WAIT_4_ACK       0x800000
+#define P_CNSMR_SDBNDn_BAM_P_ACK_TOGGLE       0x400000
+#define P_CNSMR_SDBNDn_BAM_P_ACK_TOGGLE_R     0x200000
+#define P_CNSMR_SDBNDn_BAM_P_TOGGLE           0x100000
+#define P_CNSMR_SDBNDn_BAM_P_CTRL              0xf0000
+#define P_CNSMR_SDBNDn_BAM_P_BYTES_AVAIL        0xffff
+
+/* P_ctrln */
+#define P_PREFETCH_LIMIT                         0x600
+#define P_AUTO_EOB_SEL                           0x180
+#define P_AUTO_EOB                                0x40
+#define P_SYS_MODE                             0x20
+#define P_SYS_STRM                             0x10
+#define P_DIRECTION                             0x8
+#define P_EN                                    0x2
+
+#define P_RST_P_SW_RST                                 0x1
+
+#define P_HALT_P_PROD_HALTED                           0x2
+#define P_HALT_P_HALT                                  0x1
+
+#define P_IRQ_STTS_P_TRNSFR_END_IRQ                   0x20
+#define P_IRQ_STTS_P_ERR_IRQ                          0x10
+#define P_IRQ_STTS_P_OUT_OF_DESC_IRQ                   0x8
+#define P_IRQ_STTS_P_WAKE_IRQ                          0x4
+#define P_IRQ_STTS_P_TIMER_IRQ                         0x2
+#define P_IRQ_STTS_P_PRCSD_DESC_IRQ                    0x1
+
+#define P_IRQ_CLR_P_TRNSFR_END_CLR                    0x20
+#define P_IRQ_CLR_P_ERR_CLR                           0x10
+#define P_IRQ_CLR_P_OUT_OF_DESC_CLR                    0x8
+#define P_IRQ_CLR_P_WAKE_CLR                           0x4
+#define P_IRQ_CLR_P_TIMER_CLR                          0x2
+#define P_IRQ_CLR_P_PRCSD_DESC_CLR                     0x1
+
+#define P_IRQ_EN_P_TRNSFR_END_EN                      0x20
+#define P_IRQ_EN_P_ERR_EN                             0x10
+#define P_IRQ_EN_P_OUT_OF_DESC_EN                      0x8
+#define P_IRQ_EN_P_WAKE_EN                             0x4
+#define P_IRQ_EN_P_TIMER_EN                            0x2
+#define P_IRQ_EN_P_PRCSD_DESC_EN                       0x1
+
+#define P_TIMER_P_TIMER                             0xffff
+
+/* P_TIMER_ctrln */
+#define P_TIMER_RST                0x80000000
+#define P_TIMER_RUN                0x40000000
+#define P_TIMER_MODE               0x20000000
+#define P_TIMER_TRSHLD                 0xffff
+
+/* P_EVNT_regn */
+#define P_BYTES_CONSUMED             0xffff0000
+#define P_DESC_FIFO_PEER_OFST            0xffff
+
+/* P_SW_ofstsn */
+#define SW_OFST_IN_DESC              0xffff0000
+#define SW_DESC_OFST                     0xffff
+
+#define P_EVNT_GEN_TRSHLD_P_TRSHLD                  0xffff
+
+/* P_FIFO_sizesn */
+#define P_DATA_FIFO_SIZE           0xffff0000
+#define P_DESC_FIFO_SIZE               0xffff
+
+#define P_RETR_CNTXT_RETR_DESC_OFST            0xffff0000
+#define P_RETR_CNTXT_RETR_OFST_IN_DESC             0xffff
+#define P_SI_CNTXT_SI_DESC_OFST                    0xffff
+#define P_AU_PSM_CNTXT_1_AU_PSM_ACCUMED        0xffff0000
+#define P_AU_PSM_CNTXT_1_AU_ACKED                  0xffff
+#define P_PSM_CNTXT_2_PSM_DESC_VALID           0x80000000
+#define P_PSM_CNTXT_2_PSM_DESC_IRQ             0x40000000
+#define P_PSM_CNTXT_2_PSM_DESC_IRQ_DONE        0x20000000
+#define P_PSM_CNTXT_2_PSM_GENERAL_BITS         0x1e000000
+#define P_PSM_CNTXT_2_PSM_CONS_STATE            0x1c00000
+#define P_PSM_CNTXT_2_PSM_PROD_SYS_STATE         0x380000
+#define P_PSM_CNTXT_2_PSM_PROD_B2B_STATE          0x70000
+#define P_PSM_CNTXT_2_PSM_DESC_SIZE                0xffff
+#define P_PSM_CNTXT_4_PSM_DESC_OFST            0xffff0000
+#define P_PSM_CNTXT_4_PSM_SAVED_ACCUMED_SIZE       0xffff
+#define P_PSM_CNTXT_5_PSM_BLOCK_BYTE_CNT       0xffff0000
+#define P_PSM_CNTXT_5_PSM_OFST_IN_DESC             0xffff
+#endif
+
+#define BAM_ERROR   (-1)
+
+enum bam_regs {
+	CTRL,
+	REVISION,
+	SW_REVISION,
+	NUM_PIPES,
+	TIMER,
+	TIMER_CTRL,
+	DESC_CNT_TRSHLD,
+	IRQ_SRCS,
+	IRQ_SRCS_MSK,
+	IRQ_SRCS_UNMASKED,
+	IRQ_STTS,
+	IRQ_CLR,
+	IRQ_EN,
+	IRQ_SIC_SEL,
+	AHB_MASTER_ERR_CTRLS,
+	AHB_MASTER_ERR_ADDR,
+	AHB_MASTER_ERR_ADDR_MSB,
+	AHB_MASTER_ERR_DATA,
+	IRQ_DEST,
+	PERIPH_IRQ_DEST,
+	TRUST_REG,
+	TEST_BUS_SEL,
+	TEST_BUS_REG,
+	CNFG_BITS,
+	IRQ_SRCS_EE,
+	IRQ_SRCS_MSK_EE,
+	IRQ_SRCS_UNMASKED_EE,
+	PIPE_ATTR_EE,
+	P_CTRL,
+	P_RST,
+	P_HALT,
+	P_IRQ_STTS,
+	P_IRQ_CLR,
+	P_IRQ_EN,
+	P_TIMER,
+	P_TIMER_CTRL,
+	P_PRDCR_SDBND,
+	P_CNSMR_SDBND,
+	P_EVNT_DEST_ADDR,
+	P_EVNT_DEST_ADDR_MSB,
+	P_EVNT_REG,
+	P_SW_OFSTS,
+	P_DATA_FIFO_ADDR,
+	P_DATA_FIFO_ADDR_MSB,
+	P_DESC_FIFO_ADDR,
+	P_DESC_FIFO_ADDR_MSB,
+	P_EVNT_GEN_TRSHLD,
+	P_FIFO_SIZES,
+	P_IRQ_DEST_ADDR,
+	P_RETR_CNTXT,
+	P_SI_CNTXT,
+	P_DF_CNTXT,
+	P_AU_PSM_CNTXT_1,
+	P_PSM_CNTXT_2,
+	P_PSM_CNTXT_3,
+	P_PSM_CNTXT_3_MSB,
+	P_PSM_CNTXT_4,
+	P_PSM_CNTXT_5,
+	P_TRUST_REG,
+	BAM_MAX_REGS,
+};
+
+static u32 bam_regmap[][BAM_MAX_REGS] = {
+	{ /* LEGACY BAM*/
+			[CTRL] = 0xf80,
+			[REVISION] = 0xf84,
+			[NUM_PIPES] = 0xfbc,
+			[DESC_CNT_TRSHLD] = 0xf88,
+			[IRQ_SRCS] = 0xf8c,
+			[IRQ_SRCS_MSK] = 0xf90,
+			[IRQ_SRCS_UNMASKED] = 0xfb0,
+			[IRQ_STTS] = 0xf94,
+			[IRQ_CLR] = 0xf98,
+			[IRQ_EN] = 0xf9c,
+			[IRQ_SIC_SEL] = 0xfa0,
+			[AHB_MASTER_ERR_CTRLS] = 0xfa4,
+			[AHB_MASTER_ERR_ADDR] = 0xfa8,
+			[AHB_MASTER_ERR_DATA] = 0xfac,
+			[IRQ_DEST] = 0xfb4,
+			[PERIPH_IRQ_DEST] = 0xfb8,
+			[TRUST_REG] = 0xff0,
+			[TEST_BUS_SEL] = 0xff4,
+			[TEST_BUS_REG] = 0xff8,
+			[CNFG_BITS] = 0xffc,
+			[IRQ_SRCS_EE] = 0x1800,
+			[IRQ_SRCS_MSK_EE] = 0x1804,
+			[IRQ_SRCS_UNMASKED_EE] = 0x1808,
+			[P_CTRL] = 0x0,
+			[P_RST] = 0x4,
+			[P_HALT] = 0x8,
+			[P_IRQ_STTS] = 0x10,
+			[P_IRQ_CLR] = 0x14,
+			[P_IRQ_EN] = 0x18,
+			[P_TIMER] = 0x1c,
+			[P_TIMER_CTRL] = 0x20,
+			[P_PRDCR_SDBND] = 0x24,
+			[P_CNSMR_SDBND] = 0x28,
+			[P_EVNT_DEST_ADDR] = 0x102c,
+			[P_EVNT_REG] = 0x1018,
+			[P_SW_OFSTS] = 0x1000,
+			[P_DATA_FIFO_ADDR] = 0x1024,
+			[P_DESC_FIFO_ADDR] = 0x101c,
+			[P_EVNT_GEN_TRSHLD] = 0x1028,
+			[P_FIFO_SIZES] = 0x1020,
+			[P_IRQ_DEST_ADDR] = 0x103c,
+			[P_RETR_CNTXT] = 0x1034,
+			[P_SI_CNTXT] = 0x1038,
+			[P_AU_PSM_CNTXT_1] = 0x1004,
+			[P_PSM_CNTXT_2] = 0x1008,
+			[P_PSM_CNTXT_3] = 0x100c,
+			[P_PSM_CNTXT_4] = 0x1010,
+			[P_PSM_CNTXT_5] = 0x1014,
+			[P_TRUST_REG] = 0x30,
+	},
+	{ /* NDP BAM */
+			[CTRL] = 0x0,
+			[REVISION] = 0x4,
+			[SW_REVISION] = 0x80,
+			[NUM_PIPES] = 0x3c,
+			[TIMER] = 0x40,
+			[TIMER_CTRL] = 0x44,
+			[DESC_CNT_TRSHLD] = 0x8,
+			[IRQ_SRCS] = 0xc,
+			[IRQ_SRCS_MSK] = 0x10,
+			[IRQ_SRCS_UNMASKED] = 0x30,
+			[IRQ_STTS] = 0x14,
+			[IRQ_CLR] = 0x18,
+			[IRQ_EN] = 0x1c,
+			[AHB_MASTER_ERR_CTRLS] = 0x24,
+			[AHB_MASTER_ERR_ADDR] = 0x28,
+			[AHB_MASTER_ERR_ADDR_MSB] = 0x104,
+			[AHB_MASTER_ERR_DATA] = 0x2c,
+			[TRUST_REG] = 0x70,
+			[TEST_BUS_SEL] = 0x74,
+			[TEST_BUS_REG] = 0x78,
+			[CNFG_BITS] = 0x7c,
+			[IRQ_SRCS_EE] = 0x800,
+			[IRQ_SRCS_MSK_EE] = 0x804,
+			[IRQ_SRCS_UNMASKED_EE] = 0x808,
+			[PIPE_ATTR_EE] = 0x80c,
+			[P_CTRL] = 0x1000,
+			[P_RST] = 0x1004,
+			[P_HALT] = 0x1008,
+			[P_IRQ_STTS] = 0x1010,
+			[P_IRQ_CLR] = 0x1014,
+			[P_IRQ_EN] = 0x1018,
+			[P_TIMER] = 0x101c,
+			[P_TIMER_CTRL] = 0x1020,
+			[P_PRDCR_SDBND] = 0x1024,
+			[P_CNSMR_SDBND] = 0x1028,
+			[P_EVNT_DEST_ADDR] = 0x182c,
+			[P_EVNT_DEST_ADDR_MSB] = 0x1934,
+			[P_EVNT_REG] = 0x1818,
+			[P_SW_OFSTS] = 0x1800,
+			[P_DATA_FIFO_ADDR] = 0x1824,
+			[P_DATA_FIFO_ADDR_MSB] = 0x1924,
+			[P_DESC_FIFO_ADDR] = 0x181c,
+			[P_DESC_FIFO_ADDR_MSB] = 0x1914,
+			[P_EVNT_GEN_TRSHLD] = 0x1828,
+			[P_FIFO_SIZES] = 0x1820,
+			[P_RETR_CNTXT] = 0x1834,
+			[P_SI_CNTXT] = 0x1838,
+			[P_DF_CNTXT] = 0x1830,
+			[P_AU_PSM_CNTXT_1] = 0x1804,
+			[P_PSM_CNTXT_2] = 0x1808,
+			[P_PSM_CNTXT_3] = 0x180c,
+			[P_PSM_CNTXT_3_MSB] = 0x1904,
+			[P_PSM_CNTXT_4] = 0x1810,
+			[P_PSM_CNTXT_5] = 0x1814,
+			[P_TRUST_REG] = 0x1030,
+	},
+	{ /* 4K OFFSETs*/
+			[CTRL] = 0x0,
+			[REVISION] = 0x1000,
+			[SW_REVISION] = 0x1004,
+			[NUM_PIPES] = 0x1008,
+			[TIMER] = 0x40,
+			[TIMER_CTRL] = 0x44,
+			[DESC_CNT_TRSHLD] = 0x8,
+			[IRQ_SRCS] = 0x3010,
+			[IRQ_SRCS_MSK] = 0x3014,
+			[IRQ_SRCS_UNMASKED] = 0x3018,
+			[IRQ_STTS] = 0x14,
+			[IRQ_CLR] = 0x18,
+			[IRQ_EN] = 0x1c,
+			[AHB_MASTER_ERR_CTRLS] = 0x1024,
+			[AHB_MASTER_ERR_ADDR] = 0x1028,
+			[AHB_MASTER_ERR_ADDR_MSB] = 0x1104,
+			[AHB_MASTER_ERR_DATA] = 0x102c,
+			[TRUST_REG] = 0x2000,
+			[TEST_BUS_SEL] = 0x1010,
+			[TEST_BUS_REG] = 0x1014,
+			[CNFG_BITS] = 0x7c,
+			[IRQ_SRCS_EE] = 0x3000,
+			[IRQ_SRCS_MSK_EE] = 0x3004,
+			[IRQ_SRCS_UNMASKED_EE] = 0x3008,
+			[PIPE_ATTR_EE] = 0x300c,
+			[P_CTRL] = 0x13000,
+			[P_RST] = 0x13004,
+			[P_HALT] = 0x13008,
+			[P_IRQ_STTS] = 0x13010,
+			[P_IRQ_CLR] = 0x13014,
+			[P_IRQ_EN] = 0x13018,
+			[P_TIMER] = 0x1301c,
+			[P_TIMER_CTRL] = 0x13020,
+			[P_PRDCR_SDBND] = 0x13024,
+			[P_CNSMR_SDBND] = 0x13028,
+			[P_EVNT_DEST_ADDR] = 0x1382c,
+			[P_EVNT_DEST_ADDR_MSB] = 0x13934,
+			[P_EVNT_REG] = 0x13818,
+			[P_SW_OFSTS] = 0x13800,
+			[P_DATA_FIFO_ADDR] = 0x13824,
+			[P_DATA_FIFO_ADDR_MSB] = 0x13924,
+			[P_DESC_FIFO_ADDR] = 0x1381c,
+			[P_DESC_FIFO_ADDR_MSB] = 0x13914,
+			[P_EVNT_GEN_TRSHLD] = 0x13828,
+			[P_FIFO_SIZES] = 0x13820,
+			[P_RETR_CNTXT] = 0x13834,
+			[P_SI_CNTXT] = 0x13838,
+			[P_DF_CNTXT] = 0x13830,
+			[P_AU_PSM_CNTXT_1] = 0x13804,
+			[P_PSM_CNTXT_2] = 0x13808,
+			[P_PSM_CNTXT_3] = 0x1380c,
+			[P_PSM_CNTXT_3_MSB] = 0x13904,
+			[P_PSM_CNTXT_4] = 0x13810,
+			[P_PSM_CNTXT_5] = 0x13814,
+			[P_TRUST_REG] = 0x2020,
+	},
+};
+
+/* AHB buffer error control */
+enum bam_nonsecure_reset {
+	BAM_NONSECURE_RESET_ENABLE  = 0,
+	BAM_NONSECURE_RESET_DISABLE = 1,
+};
+
+static inline u32 bam_get_register_offset(void *base, enum bam_regs reg,
+								u32 param)
+{
+	u32 index = BAM_ERROR, offset = 0;
+	u32 *ptr_reg = bam_regmap[bam_type];
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%pK\n",
+				__func__, base);
+		return SPS_ERROR;
+	}
+
+	if (reg >= CTRL && reg < IRQ_SRCS_EE)
+		index = 0;
+	if (reg >= IRQ_SRCS_EE && reg < P_CTRL)
+		index = (bam_type == SPS_BAM_NDP_4K) ? 0x1000 : 0x80;
+	if (reg >= P_CTRL && reg < P_TRUST_REG) {
+		if (bam_type == SPS_BAM_LEGACY) {
+			if (reg >= P_EVNT_DEST_ADDR)
+				index = 0x40;
+			else
+				index = 0x80;
+		} else
+			index = 0x1000;
+	} else if (P_TRUST_REG == reg) {
+		if (bam_type == SPS_BAM_LEGACY)
+			index = 0x80;
+		else
+			index = (bam_type == SPS_BAM_NDP_4K) ? 0x4 : 0x1000;
+	}
+	if (index < 0) {
+		SPS_ERR(dev, "%s:Failed to find register offset index\n",
+			__func__);
+		return index;
+	}
+
+	offset = *(ptr_reg + reg) + (index * param);
+	return offset;
+}
+
+
+/**
+ *
+ * Read register with debug info.
+ *
+ * @base - bam base virtual address.
+ * @offset - register offset.
+ *
+ * @return u32
+ */
+static inline u32 bam_read_reg(void *base, enum bam_regs reg, u32 param)
+{
+	u32 val, offset = 0;
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%pK\n",
+				__func__, base);
+		return SPS_ERROR;
+	}
+	offset = bam_get_register_offset(base, reg, param);
+	if (offset < 0) {
+		SPS_ERR(dev, "%s:Failed to get the register offset\n",
+			__func__);
+		return offset;
+	}
+	val = ioread32(dev->base + offset);
+	SPS_DBG(dev, "sps:bam 0x%pK(va) offset 0x%x reg 0x%x r_val 0x%x.\n",
+			dev->base, offset, reg, val);
+	return val;
+}
+
+/**
+ * Read register masked field with debug info.
+ *
+ * @base - bam base virtual address.
+ * @offset - register offset.
+ * @mask - register bitmask.
+ *
+ * @return u32
+ */
+static inline u32 bam_read_reg_field(void *base, enum bam_regs reg, u32 param,
+								const u32 mask)
+{
+	u32 val, shift, offset = 0;
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%pK\n",
+				__func__, base);
+		return SPS_ERROR;
+	}
+	shift = find_first_bit((void *)&mask, 32);
+	offset = bam_get_register_offset(base, reg, param);
+	if (offset < 0) {
+		SPS_ERR(dev, "%s:Failed to get the register offset\n",
+			__func__);
+		return offset;
+	}
+	val = ioread32(dev->base + offset);
+	val &= mask;		/* clear other bits */
+	val >>= shift;
+	SPS_DBG(dev, "sps:bam 0x%pK(va) read reg 0x%x mask 0x%x r_val 0x%x.\n",
+			dev->base, offset, mask, val);
+	return val;
+}
+
+/**
+ *
+ * Write register with debug info.
+ *
+ * @base - bam base virtual address.
+ * @offset - register offset.
+ * @val - value to write.
+ *
+ */
+static inline void bam_write_reg(void *base, enum bam_regs reg,
+						u32 param, u32 val)
+{
+	u32 offset = 0;
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%pK\n",
+				__func__, base);
+		return;
+	}
+	offset = bam_get_register_offset(base, reg, param);
+	if (offset < 0) {
+		SPS_ERR(dev, "%s:Failed to get the register offset\n",
+			__func__);
+		return;
+	}
+	iowrite32(val, dev->base + offset);
+	SPS_DBG(dev, "sps:bam 0x%pK(va) write reg 0x%x w_val 0x%x.\n",
+			dev->base, offset, val);
+}
+
+/**
+ * Write register masked field with debug info.
+ *
+ * @base - bam base virtual address.
+ * @offset - register offset.
+ * @mask - register bitmask.
+ * @val - value to write.
+ *
+ */
+static inline void bam_write_reg_field(void *base, enum bam_regs reg,
+					u32 param, const u32 mask, u32 val)
+{
+	u32 tmp, shift, offset = 0;
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%pK\n",
+				__func__, base);
+		return;
+	}
+	shift = find_first_bit((void *)&mask, 32);
+	offset = bam_get_register_offset(base, reg, param);
+	if (offset < 0) {
+		SPS_ERR(dev, "%s:Failed to get the register offset\n",
+			__func__);
+		return;
+	}
+	tmp = ioread32(dev->base + offset);
+
+	tmp &= ~mask;		/* clear written bits */
+	val = tmp | (val << shift);
+	iowrite32(val, dev->base + offset);
+	SPS_DBG(dev, "sps:bam 0x%pK(va) write reg 0x%x w_val 0x%x.\n",
+			dev->base, offset, val);
+}
+
+/**
+ * Initialize a BAM device
+ *
+ */
+int bam_init(void *base, u32 ee,
+		u16 summing_threshold,
+		u32 irq_mask, u32 *version,
+		u32 *num_pipes, u32 options)
+{
+	u32 cfg_bits;
+	u32 ver = 0;
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%pK\n",
+				__func__, base);
+		return SPS_ERROR;
+	}
+
+	SPS_DBG3(dev, "sps:%s:bam=%pa 0x%pK(va).ee=%d.", __func__,
+			BAM_ID(dev), dev->base, ee);
+
+	ver = bam_read_reg_field(base, REVISION, 0, BAM_REVISION);
+
+	if ((ver < BAM_MIN_VERSION) || (ver > BAM_MAX_VERSION)) {
+		SPS_ERR(dev, "sps:bam 0x%pK(va) Invalid BAM REVISION 0x%x.\n",
+				dev->base, ver);
+		return -ENODEV;
+	} else
+		SPS_DBG(dev, "sps:REVISION of BAM 0x%pK is 0x%x.\n",
+				dev->base, ver);
+
+	if (summing_threshold == 0) {
+		summing_threshold = 4;
+		SPS_ERR(dev,
+			"sps:bam 0x%pK(va) summing_threshold is zero,use default 4.\n",
+			dev->base);
+	}
+
+	if (options & SPS_BAM_NO_EXT_P_RST)
+		cfg_bits = 0xffffffff & ~(3 << 11);
+	else
+		cfg_bits = 0xffffffff & ~(1 << 11);
+
+	bam_write_reg_field(base, CTRL, 0, BAM_SW_RST, 1);
+	/* No delay needed */
+	bam_write_reg_field(base, CTRL, 0, BAM_SW_RST, 0);
+
+	bam_write_reg_field(base, CTRL, 0, BAM_EN, 1);
+
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+	bam_write_reg_field(base, CTRL, 0, CACHE_MISS_ERR_RESP_EN, 0);
+
+	if (options & SPS_BAM_NO_LOCAL_CLK_GATING)
+		bam_write_reg_field(base, CTRL, 0, LOCAL_CLK_GATING, 0);
+	else
+		bam_write_reg_field(base, CTRL, 0, LOCAL_CLK_GATING, 1);
+
+	if (enhd_pipe) {
+		if (options & SPS_BAM_CANCEL_WB)
+			bam_write_reg_field(base, CTRL, 0,
+					BAM_MESS_ONLY_CANCEL_WB, 1);
+		else
+			bam_write_reg_field(base, CTRL, 0,
+					BAM_MESS_ONLY_CANCEL_WB, 0);
+	}
+#endif
+	bam_write_reg(base, DESC_CNT_TRSHLD, 0, summing_threshold);
+
+	bam_write_reg(base, CNFG_BITS, 0, cfg_bits);
+
+	/*
+	 *  Enable Global BAM Interrupt - for error reasons ,
+	 *  filter with mask.
+	 *  Note: Pipes interrupts are disabled until BAM_P_IRQ_enn is set
+	 */
+	bam_write_reg_field(base, IRQ_SRCS_MSK_EE, ee, BAM_IRQ, 1);
+
+	bam_write_reg(base, IRQ_EN, 0, irq_mask);
+
+	*num_pipes = bam_read_reg_field(base, NUM_PIPES, 0, BAM_NUM_PIPES);
+
+	*version = ver;
+
+	return 0;
+}
+
+/**
+ * Set BAM global execution environment
+ *
+ * @base - BAM virtual base address
+ *
+ * @ee - BAM execution environment index
+ *
+ * @vmid - virtual master identifier
+ *
+ * @reset - enable/disable BAM global software reset
+ */
+static void bam_set_ee(void *base, u32 ee, u32 vmid,
+			enum bam_nonsecure_reset reset)
+{
+	bam_write_reg_field(base, TRUST_REG, 0, BAM_EE, ee);
+	bam_write_reg_field(base, TRUST_REG, 0, BAM_VMID, vmid);
+	bam_write_reg_field(base, TRUST_REG, 0, BAM_RST_BLOCK, reset);
+}
+
+/**
+ * Set the pipe execution environment
+ *
+ * @base - BAM virtual base address
+ *
+ * @pipe - pipe index
+ *
+ * @ee - BAM execution environment index
+ *
+ * @vmid - virtual master identifier
+ */
+static void bam_pipe_set_ee(void *base, u32 pipe, u32 ee, u32 vmid)
+{
+	bam_write_reg_field(base, P_TRUST_REG, pipe, BAM_P_EE, ee);
+	bam_write_reg_field(base, P_TRUST_REG, pipe, BAM_P_VMID, vmid);
+}
+
+/**
+ * Initialize BAM device security execution environment
+ */
+int bam_security_init(void *base, u32 ee, u32 vmid, u32 pipe_mask)
+{
+	u32 version;
+	u32 num_pipes;
+	u32 mask;
+	u32 pipe;
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%pK\n",
+				__func__, base);
+		return SPS_ERROR;
+	}
+
+	SPS_DBG3(dev, "sps:%s:bam=%pa 0x%pK(va).", __func__,
+			BAM_ID(dev), dev->base);
+
+	/*
+	 * Discover the hardware version number and the number of pipes
+	 * supported by this BAM
+	 */
+	version = bam_read_reg_field(base, REVISION, 0, BAM_REVISION);
+	num_pipes = bam_read_reg_field(base, NUM_PIPES, 0, BAM_NUM_PIPES);
+	if (version < 3 || version > 0x1F) {
+		SPS_ERR(dev,
+			"sps:bam 0x%pK(va) security is not supported for this BAM version 0x%x.\n",
+			dev->base, version);
+		return -ENODEV;
+	}
+
+	if (num_pipes > BAM_MAX_PIPES) {
+		SPS_ERR(dev,
+		"sps:bam 0x%pK(va) the number of pipes is more than the maximum number allowed.\n",
+			dev->base);
+		return -ENODEV;
+	}
+
+	for (pipe = 0, mask = 1; pipe < num_pipes; pipe++, mask <<= 1)
+		if ((mask & pipe_mask) != 0)
+			bam_pipe_set_ee(base, pipe, ee, vmid);
+
+	/* If MSbit is set, assign top-level interrupt to this EE */
+	mask = 1UL << 31;
+	if ((mask & pipe_mask) != 0)
+		bam_set_ee(base, ee, vmid, BAM_NONSECURE_RESET_ENABLE);
+
+	return 0;
+}
+
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+static inline u32 bam_get_pipe_attr(void *base, u32 ee, bool global)
+{
+	u32 val;
+
+	if (global)
+		val = bam_read_reg_field(base, PIPE_ATTR_EE, ee, BAM_ENABLED);
+	else
+		val = bam_read_reg_field(base, PIPE_ATTR_EE, ee, P_ATTR);
+
+	return val;
+}
+#else
+static inline u32 bam_get_pipe_attr(void *base, u32 ee, bool global)
+{
+	return 0;
+}
+#endif
+
+/**
+ * Verify that a BAM device is enabled and gathers the hardware
+ * configuration.
+ *
+ */
+int bam_check(void *base, u32 *version, u32 ee, u32 *num_pipes)
+{
+	u32 ver = 0;
+	u32 enabled = 0;
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%pK\n",
+				__func__, base);
+		return SPS_ERROR;
+	}
+
+	SPS_DBG3(dev, "sps:%s:bam=%pa 0x%pK(va).",
+			__func__, BAM_ID(dev), dev->base);
+
+	if (!enhd_pipe)
+		enabled = bam_read_reg_field(base, CTRL, 0, BAM_EN);
+	else
+		enabled = bam_get_pipe_attr(base, ee, true);
+
+	if (!enabled) {
+		SPS_ERR(dev, "sps:%s:bam 0x%pK(va) is not enabled.\n",
+				__func__, dev->base);
+		return -ENODEV;
+	}
+
+	ver = bam_read_reg(base, REVISION, 0) & BAM_REVISION;
+
+	/*
+	 *  Discover the hardware version number and the number of pipes
+	 *  supported by this BAM
+	 */
+	*num_pipes = bam_read_reg_field(base, NUM_PIPES, 0, BAM_NUM_PIPES);
+	*version = ver;
+
+	/* Check BAM version */
+	if ((ver < BAM_MIN_VERSION) || (ver > BAM_MAX_VERSION)) {
+		SPS_ERR(dev, "sps:%s:bam 0x%pK(va) Invalid BAM version 0x%x.\n",
+				__func__, dev->base, ver);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/**
+ * Disable a BAM device
+ *
+ */
+void bam_exit(void *base, u32 ee)
+{
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%pK\n",
+				__func__, base);
+		return;
+	}
+	SPS_DBG3(dev, "sps:%s:bam=%pa 0x%pK(va).ee=%d.",
+			__func__, BAM_ID(dev), dev->base, ee);
+
+	bam_write_reg_field(base, IRQ_SRCS_MSK_EE, ee, BAM_IRQ, 0);
+
+	bam_write_reg(base, IRQ_EN, 0, 0);
+
+	/* Disable the BAM */
+	bam_write_reg_field(base, CTRL, 0, BAM_EN, 0);
+}
+
+/**
+ * Output BAM register content
+ * including the TEST_BUS register content under
+ * different TEST_BUS_SEL values.
+ */
+void bam_output_register_content(void *base, u32 ee)
+{
+	u32 num_pipes;
+	u32 i;
+	u32 pipe_attr = 0;
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%pK\n",
+				__func__, base);
+		return;
+	}
+
+	print_bam_test_bus_reg(base, 0);
+
+	print_bam_selected_reg(base, BAM_MAX_EES);
+
+	num_pipes = bam_read_reg_field(base, NUM_PIPES, 0,
+					BAM_NUM_PIPES);
+	SPS_INFO(dev, "sps:bam %pa 0x%pK(va) has %d pipes.",
+			BAM_ID(dev), dev->base, num_pipes);
+
+	pipe_attr = enhd_pipe ?
+		bam_get_pipe_attr(base, ee, false) : 0x0;
+
+	if (!enhd_pipe || !pipe_attr)
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_selected_reg(base, i);
+	else {
+		for (i = 0; i < num_pipes; i++) {
+			if (pipe_attr & (1UL << i))
+				print_bam_pipe_selected_reg(base, i);
+		}
+	}
+}
+
+/**
+ * Get BAM IRQ source and clear global IRQ status
+ */
+u32 bam_check_irq_source(void *base, u32 ee, u32 mask,
+				enum sps_callback_case *cb_case)
+{
+	u32 source = 0, clr = 0;
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%pK\n",
+				__func__, base);
+		return SPS_ERROR;
+	}
+	source = bam_read_reg(base, IRQ_SRCS_EE, ee);
+	clr = source & (1UL << 31);
+
+	if (clr) {
+		u32 status = 0;
+		status = bam_read_reg(base, IRQ_STTS, 0);
+
+		if (status & IRQ_STTS_BAM_ERROR_IRQ) {
+			SPS_ERR(dev, "sps:bam %pa 0x%pK(va);bam irq status="
+				"0x%x.\nsps: BAM_ERROR_IRQ\n",
+				BAM_ID(dev), dev->base, status);
+			bam_output_register_content(base, ee);
+			*cb_case = SPS_CALLBACK_BAM_ERROR_IRQ;
+		} else if (status & IRQ_STTS_BAM_HRESP_ERR_IRQ) {
+			SPS_ERR(dev, "sps:bam %pa 0x%pK(va);bam irq status="
+				"0x%x.\nsps: BAM_HRESP_ERR_IRQ\n",
+				BAM_ID(dev), dev->base, status);
+			bam_output_register_content(base, ee);
+			*cb_case = SPS_CALLBACK_BAM_HRESP_ERR_IRQ;
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+		} else if (status & IRQ_STTS_BAM_TIMER_IRQ) {
+			SPS_DBG1(dev,
+				"sps:bam 0x%pK(va);receive BAM_TIMER_IRQ\n",
+					dev->base);
+			*cb_case = SPS_CALLBACK_BAM_TIMER_IRQ;
+#endif
+		} else
+			SPS_INFO(dev,
+				"sps:bam %pa 0x%pK(va);bam irq status=0x%x.\n",
+				BAM_ID(dev), dev->base, status);
+
+		bam_write_reg(base, IRQ_CLR, 0, status);
+	}
+
+	source &= (mask|(1UL << 31));
+	return source;
+}
+
+/*
+ * Reset a BAM pipe
+ */
+void bam_pipe_reset(void *base, u32 pipe)
+{
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%pK\n",
+				__func__, base);
+		return;
+	}
+	SPS_DBG2(dev, "sps:%s:bam=%pa 0x%pK(va).pipe=%d.",
+			__func__, BAM_ID(dev), dev->base, pipe);
+
+	bam_write_reg(base, P_RST, pipe, 1);
+	wmb(); /* ensure pipe is reset */
+	bam_write_reg(base, P_RST, pipe, 0);
+	wmb(); /* ensure pipe reset is de-asserted*/
+}
+
+/*
+ * Disable a BAM pipe
+ */
+void bam_disable_pipe(void *base, u32 pipe)
+{
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%pK\n",
+				__func__, base);
+		return;
+	}
+	SPS_DBG2(dev, "sps:%s:bam=0x%pK(va).pipe=%d.", __func__, base, pipe);
+	bam_write_reg_field(base, P_CTRL, pipe, P_EN, 0);
+	wmb(); /* ensure pipe is disabled */
+}
+
+/*
+ * Check if the last desc is ZLT
+ */
+bool bam_pipe_check_zlt(void *base, u32 pipe)
+{
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%pK\n",
+				__func__, base);
+		return false;
+	}
+
+	if (bam_read_reg_field(base, P_HALT, pipe, P_HALT_P_LAST_DESC_ZLT)) {
+		SPS_DBG(dev,
+			"sps:%s:bam=0x%pK(va).pipe=%d: the last desc is ZLT.",
+			__func__, base, pipe);
+		return true;
+	}
+
+	SPS_DBG(dev,
+		"sps:%s:bam=0x%pK(va).pipe=%d: the last desc is not ZLT.",
+		__func__, base, pipe);
+	return false;
+}
+
+/*
+ * Check if desc FIFO is empty
+ */
+bool bam_pipe_check_pipe_empty(void *base, u32 pipe)
+{
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%pK\n",
+				__func__, base);
+		return false;
+	}
+
+	if (bam_read_reg_field(base, P_HALT, pipe, P_HALT_P_PIPE_EMPTY)) {
+		SPS_DBG(dev,
+			"sps:%s:bam=0x%pK(va).pipe=%d: desc FIFO is empty.",
+			__func__, base, pipe);
+		return true;
+	}
+
+	SPS_DBG(dev,
+		"sps:%s:bam=0x%pK(va).pipe=%d: desc FIFO is not empty.",
+		__func__, base, pipe);
+	return false;
+}
+
+/**
+ * Initialize a BAM pipe
+ */
+int bam_pipe_init(void *base, u32 pipe,	struct bam_pipe_parameters *param,
+					u32 ee)
+{
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%pK\n",
+				__func__, base);
+		return SPS_ERROR;
+	}
+	SPS_DBG2(dev, "sps:%s:bam=%pa 0x%pK(va).pipe=%d.",
+			__func__, BAM_ID(dev), dev->base, pipe);
+
+	/* Reset the BAM pipe */
+	bam_write_reg(base, P_RST, pipe, 1);
+	/* No delay needed */
+	bam_write_reg(base, P_RST, pipe, 0);
+
+	/* Enable the Pipe Interrupt at the BAM level */
+	bam_write_reg_field(base, IRQ_SRCS_MSK_EE, ee, (1 << pipe), 1);
+
+	bam_write_reg(base, P_IRQ_EN, pipe, param->pipe_irq_mask);
+
+	bam_write_reg_field(base, P_CTRL, pipe, P_DIRECTION, param->dir);
+	bam_write_reg_field(base, P_CTRL, pipe, P_SYS_MODE, param->mode);
+
+	bam_write_reg(base, P_EVNT_GEN_TRSHLD, pipe, param->event_threshold);
+
+	bam_write_reg(base, P_DESC_FIFO_ADDR, pipe,
+			SPS_GET_LOWER_ADDR(param->desc_base));
+	bam_write_reg_field(base, P_FIFO_SIZES, pipe, P_DESC_FIFO_SIZE,
+			    param->desc_size);
+
+	bam_write_reg_field(base, P_CTRL, pipe, P_SYS_STRM,
+			    param->stream_mode);
+
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+	if (SPS_LPAE && SPS_GET_UPPER_ADDR(param->desc_base))
+		bam_write_reg(base, P_DESC_FIFO_ADDR_MSB, pipe,
+				SPS_GET_UPPER_ADDR(param->desc_base));
+
+	bam_write_reg_field(base, P_CTRL, pipe, P_LOCK_GROUP,
+				param->lock_group);
+
+	SPS_DBG(dev, "sps:bam=0x%pK(va).pipe=%d.lock_group=%d.\n",
+			dev->base, pipe, param->lock_group);
+#endif
+
+	if (param->mode == BAM_PIPE_MODE_BAM2BAM) {
+		u32 peer_dest_addr = param->peer_phys_addr +
+				      bam_get_register_offset(base, P_EVNT_REG,
+						      param->peer_pipe);
+
+		bam_write_reg(base, P_DATA_FIFO_ADDR, pipe,
+			      SPS_GET_LOWER_ADDR(param->data_base));
+		bam_write_reg_field(base, P_FIFO_SIZES, pipe,
+				    P_DATA_FIFO_SIZE, param->data_size);
+
+		bam_write_reg(base, P_EVNT_DEST_ADDR, pipe, peer_dest_addr);
+
+		SPS_DBG2(dev, "sps:bam=0x%pK(va).pipe=%d.peer_bam=0x%x."
+			"peer_pipe=%d.\n",
+			dev->base, pipe,
+			(u32) param->peer_phys_addr,
+			param->peer_pipe);
+
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+		if (SPS_LPAE && SPS_GET_UPPER_ADDR(param->data_base)) {
+			bam_write_reg(base, P_EVNT_DEST_ADDR_MSB, pipe, 0x0);
+			bam_write_reg(base, P_DATA_FIFO_ADDR_MSB, pipe,
+				      SPS_GET_UPPER_ADDR(param->data_base));
+		}
+
+		bam_write_reg_field(base, P_CTRL, pipe, P_WRITE_NWD,
+					param->write_nwd);
+
+		SPS_DBG(dev, "sps:%s WRITE_NWD bit for this bam2bam pipe.",
+			param->write_nwd ? "Set" : "Do not set");
+#endif
+	}
+
+	/* Pipe Enable - at last */
+	bam_write_reg_field(base, P_CTRL, pipe, P_EN, 1);
+
+	return 0;
+}
+
+/**
+ * Reset the BAM pipe
+ *
+ */
+void bam_pipe_exit(void *base, u32 pipe, u32 ee)
+{
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%pK\n",
+				__func__, base);
+		return;
+	}
+	SPS_DBG2(dev, "sps:%s:bam=%pa 0x%pK(va).pipe=%d.",
+			__func__, BAM_ID(dev), dev->base, pipe);
+
+	bam_write_reg(base, P_IRQ_EN, pipe, 0);
+
+	/* Disable the Pipe Interrupt at the BAM level */
+	bam_write_reg_field(base, IRQ_SRCS_MSK_EE, ee, (1 << pipe), 0);
+
+	/* Pipe Disable */
+	bam_write_reg_field(base, P_CTRL, pipe, P_EN, 0);
+}
+
+/**
+ * Enable a BAM pipe
+ *
+ */
+void bam_pipe_enable(void *base, u32 pipe)
+{
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%pK\n",
+				__func__, base);
+		return;
+	}
+	SPS_DBG2(dev, "sps:%s:bam=%pa 0x%pK(va).pipe=%d.",
+			__func__, BAM_ID(dev), dev->base, pipe);
+
+	if (bam_read_reg_field(base, P_CTRL, pipe, P_EN))
+		SPS_DBG2(dev, "sps:bam=0x%pK(va).pipe=%d is already enabled.\n",
+				dev->base, pipe);
+	else
+		bam_write_reg_field(base, P_CTRL, pipe, P_EN, 1);
+}
+
+/**
+ * Diasble a BAM pipe
+ *
+ */
+void bam_pipe_disable(void *base, u32 pipe)
+{
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%pK\n",
+				__func__, base);
+		return;
+	}
+	SPS_DBG2(dev, "sps:%s:bam=%pa 0x%pK(va).pipe=%d.",
+			__func__, BAM_ID(dev), dev->base, pipe);
+
+	bam_write_reg_field(base, P_CTRL, pipe, P_EN, 0);
+}
+
+/**
+ * Check if a BAM pipe is enabled.
+ *
+ */
+int bam_pipe_is_enabled(void *base, u32 pipe)
+{
+	return bam_read_reg_field(base, P_CTRL, pipe, P_EN);
+}
+
+/**
+ * Configure interrupt for a BAM pipe
+ *
+ */
+void bam_pipe_set_irq(void *base, u32 pipe, enum bam_enable irq_en,
+		      u32 src_mask, u32 ee)
+{
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%pK\n",
+				__func__, base);
+		return;
+	}
+	SPS_DBG2(dev,
+		"sps:%s:bam=%pa 0x%pK(va).pipe=%d; irq_en:%d; src_mask:0x%x; ee:%d.\n",
+			__func__, BAM_ID(dev), dev->base, pipe,
+			irq_en, src_mask, ee);
+	if (src_mask & BAM_PIPE_IRQ_RST_ERROR) {
+		if (enhd_pipe)
+			bam_write_reg_field(base, IRQ_EN, 0,
+					IRQ_EN_BAM_ERROR_EN, 0);
+		else {
+			src_mask &= ~BAM_PIPE_IRQ_RST_ERROR;
+			SPS_DBG2(dev,
+				"sps:%s:SPS_O_RST_ERROR is not supported\n",
+				__func__);
+		}
+	}
+	if (src_mask & BAM_PIPE_IRQ_HRESP_ERROR) {
+		if (enhd_pipe)
+			bam_write_reg_field(base, IRQ_EN, 0,
+					IRQ_EN_BAM_HRESP_ERR_EN, 0);
+		else {
+			src_mask &= ~BAM_PIPE_IRQ_HRESP_ERROR;
+			SPS_DBG2(dev,
+				"sps:%s:SPS_O_HRESP_ERROR is not supported\n",
+				__func__);
+		}
+	}
+
+	bam_write_reg(base, P_IRQ_EN, pipe, src_mask);
+	bam_write_reg_field(base, IRQ_SRCS_MSK_EE, ee, (1 << pipe), irq_en);
+}
+
+/**
+ * Configure a BAM pipe for satellite MTI use
+ *
+ */
+void bam_pipe_satellite_mti(void *base, u32 pipe, u32 irq_gen_addr, u32 ee)
+{
+	bam_write_reg(base, P_IRQ_EN, pipe, 0);
+#ifndef CONFIG_SPS_SUPPORT_NDP_BAM
+	bam_write_reg(base, P_IRQ_DEST_ADDR, pipe, irq_gen_addr);
+	bam_write_reg_field(base, IRQ_SIC_SEL, 0, (1 << pipe), 1);
+#endif
+	bam_write_reg_field(base, IRQ_SRCS_MSK, 0, (1 << pipe), 1);
+}
+
+/**
+ * Configure MTI for a BAM pipe
+ *
+ */
+void bam_pipe_set_mti(void *base, u32 pipe, enum bam_enable irq_en,
+		      u32 src_mask, u32 irq_gen_addr)
+{
+	/*
+	 * MTI use is only supported on BAMs when global config is controlled
+	 * by a remote processor.
+	 * Consequently, the global configuration register to enable SIC (MTI)
+	 * support cannot be accessed.
+	 * The remote processor must be relied upon to enable the SIC and the
+	 * interrupt. Since the remote processor enable both SIC and interrupt,
+	 * the interrupt enable mask must be set to zero for polling mode.
+	 */
+#ifndef CONFIG_SPS_SUPPORT_NDP_BAM
+	bam_write_reg(base, P_IRQ_DEST_ADDR, pipe, irq_gen_addr);
+#endif
+	if (!irq_en)
+		src_mask = 0;
+
+	bam_write_reg(base, P_IRQ_EN, pipe, src_mask);
+}
+
+/**
+ * Get and Clear BAM pipe IRQ status
+ *
+ */
+u32 bam_pipe_get_and_clear_irq_status(void *base, u32 pipe)
+{
+	u32 status = 0;
+
+	status = bam_read_reg(base, P_IRQ_STTS, pipe);
+	bam_write_reg(base, P_IRQ_CLR, pipe, status);
+
+	return status;
+}
+
+/**
+ * Set write offset for a BAM pipe
+ *
+ */
+void bam_pipe_set_desc_write_offset(void *base, u32 pipe, u32 next_write)
+{
+	/*
+	 * It is not necessary to perform a read-modify-write masking to write
+	 * the P_DESC_FIFO_PEER_OFST value, since the other field in the
+	 * register (P_BYTES_CONSUMED) is read-only.
+	 */
+	bam_write_reg_field(base, P_EVNT_REG, pipe, P_DESC_FIFO_PEER_OFST,
+			    next_write);
+}
+
+/**
+ * Get write offset for a BAM pipe
+ *
+ */
+u32 bam_pipe_get_desc_write_offset(void *base, u32 pipe)
+{
+	return bam_read_reg_field(base, P_EVNT_REG, pipe,
+				  P_DESC_FIFO_PEER_OFST);
+}
+
+/**
+ * Get read offset for a BAM pipe
+ *
+ */
+u32 bam_pipe_get_desc_read_offset(void *base, u32 pipe)
+{
+	return bam_read_reg_field(base, P_SW_OFSTS, pipe, SW_DESC_OFST);
+}
+
+/**
+ * Configure inactivity timer count for a BAM pipe
+ *
+ */
+void bam_pipe_timer_config(void *base, u32 pipe, enum bam_pipe_timer_mode mode,
+			 u32 timeout_count)
+{
+	u32 for_all_pipes = 0;
+
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+	for_all_pipes = bam_read_reg_field(base, REVISION, 0,
+						BAM_NUM_INACTIV_TMRS);
+#endif
+
+	if (for_all_pipes) {
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+		bam_write_reg_field(base, TIMER_CTRL, 0, TIMER_MODE, mode);
+		bam_write_reg_field(base, TIMER_CTRL, 0, TIMER_TRSHLD,
+				    timeout_count);
+#endif
+	} else {
+		bam_write_reg_field(base, P_TIMER_CTRL, pipe, P_TIMER_MODE,
+					mode);
+		bam_write_reg_field(base, P_TIMER_CTRL, pipe, P_TIMER_TRSHLD,
+				    timeout_count);
+	}
+}
+
+/**
+ * Reset inactivity timer for a BAM pipe
+ *
+ */
+void bam_pipe_timer_reset(void *base, u32 pipe)
+{
+	u32 for_all_pipes = 0;
+
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+	for_all_pipes = bam_read_reg_field(base, REVISION, 0,
+						BAM_NUM_INACTIV_TMRS);
+#endif
+
+	if (for_all_pipes) {
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+		/* reset */
+		bam_write_reg_field(base, TIMER_CTRL, 0, TIMER_RST, 0);
+		/* active */
+		bam_write_reg_field(base, TIMER_CTRL, 0, TIMER_RST, 1);
+#endif
+	} else {
+		/* reset */
+		bam_write_reg_field(base, P_TIMER_CTRL, pipe, P_TIMER_RST, 0);
+		/* active */
+		bam_write_reg_field(base, P_TIMER_CTRL, pipe, P_TIMER_RST, 1);
+	}
+}
+
+/**
+ * Get inactivity timer count for a BAM pipe
+ *
+ */
+u32 bam_pipe_timer_get_count(void *base, u32 pipe)
+{
+	return bam_read_reg(base, P_TIMER, pipe);
+}
+
+/* halt and un-halt a pipe */
+void bam_pipe_halt(void *base, u32 pipe, bool halt)
+{
+	if (halt)
+		bam_write_reg_field(base, P_HALT, pipe, P_HALT_P_HALT, 1);
+	else
+		bam_write_reg_field(base, P_HALT, pipe, P_HALT_P_HALT, 0);
+}
+
+/* output the content of BAM-level registers */
+void print_bam_reg(void *virt_addr)
+{
+	int i, n, index = 0;
+	u32 *bam = (u32 *) virt_addr;
+	u32 ctrl;
+	u32 ver;
+	u32 pipes;
+	u32 offset = 0;
+
+	if (bam == NULL)
+		return;
+
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+	if (bam_type == SPS_BAM_NDP_4K) {
+		ctrl = bam[0x0 / 4];
+		ver = bam[0x1000 / 4];
+		pipes = bam[0x1008 / 4];
+	} else {
+		ctrl = bam[0x0 / 4];
+		ver = bam[0x4 / 4];
+		pipes = bam[0x3c / 4];
+	}
+#else
+	ctrl = bam[0xf80 / 4];
+	ver = bam[0xf84 / 4];
+	pipes = bam[0xfbc / 4];
+#endif
+
+	SPS_DUMP("%s",
+		"\nsps:<bam-begin> --- Content of BAM-level registers---\n");
+
+	SPS_DUMP("BAM_CTRL: 0x%x.\n", ctrl);
+	SPS_DUMP("BAM_REVISION: 0x%x.\n", ver);
+	SPS_DUMP("NUM_PIPES: 0x%x.\n", pipes);
+
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+	if (bam_type == SPS_BAM_NDP_4K)
+		offset = 0x301c;
+	else
+		offset = 0x80;
+	for (i = 0x0; i < offset; i += 0x10)
+
+#else
+	for (i = 0xf80; i < 0x1000; i += 0x10)
+#endif
+		SPS_DUMP("bam addr 0x%x: 0x%x,0x%x,0x%x,0x%x.\n", i,
+			bam[i / 4], bam[(i / 4) + 1],
+			bam[(i / 4) + 2], bam[(i / 4) + 3]);
+
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+	if (bam_type == SPS_BAM_NDP_4K) {
+		offset = 0x3000;
+		index = 0x1000;
+	} else {
+		offset = 0x800;
+		index = 0x80;
+	}
+	for (i = offset, n = 0; n++ < 8; i += index)
+#else
+	for (i = 0x1800, n = 0; n++ < 4; i += 0x80)
+#endif
+		SPS_DUMP("bam addr 0x%x: 0x%x,0x%x,0x%x,0x%x.\n", i,
+			bam[i / 4], bam[(i / 4) + 1],
+			bam[(i / 4) + 2], bam[(i / 4) + 3]);
+
+	SPS_DUMP("%s",
+		"\nsps:<bam-begin> --- Content of BAM-level registers ---\n");
+}
+
+/* output the content of BAM pipe registers */
+void print_bam_pipe_reg(void *virt_addr, u32 pipe_index)
+{
+	int i;
+	u32 *bam = (u32 *) virt_addr;
+	u32 pipe = pipe_index;
+	u32 offset = 0;
+
+	if (bam == NULL)
+		return;
+
+	SPS_DUMP("\nsps:<pipe-begin> --- Content of Pipe %d registers ---\n",
+			pipe);
+
+	SPS_DUMP("%s", "-- Pipe Management Registers --\n");
+
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+	if (bam_type == SPS_BAM_NDP_4K)
+		offset = 0x13000;
+	else
+		offset = 0x1000;
+	for (i = offset + 0x1000 * pipe; i < offset + 0x1000 * pipe + 0x80;
+	    i += 0x10)
+#else
+	for (i = 0x0000 + 0x80 * pipe; i < 0x0000 + 0x80 * (pipe + 1);
+	    i += 0x10)
+#endif
+		SPS_DUMP("bam addr 0x%x: 0x%x,0x%x,0x%x,0x%x.\n", i,
+			bam[i / 4], bam[(i / 4) + 1],
+			bam[(i / 4) + 2], bam[(i / 4) + 3]);
+
+	SPS_DUMP("%s",
+		"-- Pipe Configuration and Internal State Registers --\n");
+
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+	if (bam_type == SPS_BAM_NDP_4K)
+		offset = 0x13800;
+	else
+		offset = 0x1800;
+	for (i = offset + 0x1000 * pipe; i < offset + 0x1000 * pipe + 0x40;
+		i += 0x10)
+#else
+	for (i = 0x1000 + 0x40 * pipe; i < 0x1000 + 0x40 * (pipe + 1);
+	    i += 0x10)
+#endif
+		SPS_DUMP("bam addr 0x%x: 0x%x,0x%x,0x%x,0x%x.\n", i,
+			bam[i / 4], bam[(i / 4) + 1],
+			bam[(i / 4) + 2], bam[(i / 4) + 3]);
+
+	SPS_DUMP("\nsps:<pipe-end> --- Content of Pipe %d registers ---\n",
+			pipe);
+}
+
+/* output the content of selected BAM-level registers */
+void print_bam_selected_reg(void *virt_addr, u32 ee)
+{
+	void *base = virt_addr;
+
+	u32 bam_ctrl;
+	u32 bam_revision;
+	u32 bam_rev_num;
+	u32 bam_rev_ee_num;
+
+	u32 bam_num_pipes;
+	u32 bam_pipe_num;
+	u32 bam_data_addr_bus_width;
+
+	u32 bam_desc_cnt_trshld;
+	u32 bam_desc_cnt_trd_val;
+
+	u32 bam_irq_en;
+	u32 bam_irq_stts;
+
+	u32 bam_irq_src_ee = 0;
+	u32 bam_irq_msk_ee = 0;
+	u32 bam_irq_unmsk_ee = 0;
+	u32 bam_pipe_attr_ee = 0;
+
+	u32 bam_ahb_err_ctrl;
+	u32 bam_ahb_err_addr;
+	u32 bam_ahb_err_data;
+	u32 bam_cnfg_bits;
+
+	u32 bam_sw_rev = 0;
+	u32 bam_timer = 0;
+	u32 bam_timer_ctrl = 0;
+	u32 bam_ahb_err_addr_msb = 0;
+
+	if (base == NULL)
+		return;
+
+	bam_ctrl = bam_read_reg(base, CTRL, 0);
+	bam_revision = bam_read_reg(base, REVISION, 0);
+	bam_rev_num = bam_read_reg_field(base, REVISION, 0, BAM_REVISION);
+	bam_rev_ee_num = bam_read_reg_field(base, REVISION, 0, BAM_NUM_EES);
+
+	bam_num_pipes = bam_read_reg(base, NUM_PIPES, 0);
+	bam_pipe_num = bam_read_reg_field(base, NUM_PIPES, 0, BAM_NUM_PIPES);
+	bam_data_addr_bus_width = bam_read_reg_field(base, NUM_PIPES, 0,
+					BAM_DATA_ADDR_BUS_WIDTH);
+
+	bam_desc_cnt_trshld = bam_read_reg(base, DESC_CNT_TRSHLD, 0);
+	bam_desc_cnt_trd_val = bam_read_reg_field(base, DESC_CNT_TRSHLD, 0,
+					BAM_DESC_CNT_TRSHLD);
+
+	bam_irq_en = bam_read_reg(base, IRQ_EN, 0);
+	bam_irq_stts = bam_read_reg(base, IRQ_STTS, 0);
+
+	if (ee < BAM_MAX_EES) {
+		bam_irq_src_ee = bam_read_reg(base, IRQ_SRCS_EE, ee);
+		bam_irq_msk_ee = bam_read_reg(base, IRQ_SRCS_MSK_EE, ee);
+		bam_irq_unmsk_ee = bam_read_reg(base, IRQ_SRCS_UNMASKED_EE, ee);
+	}
+
+	bam_ahb_err_ctrl = bam_read_reg(base, AHB_MASTER_ERR_CTRLS, 0);
+	bam_ahb_err_addr = bam_read_reg(base, AHB_MASTER_ERR_ADDR, 0);
+	bam_ahb_err_data = bam_read_reg(base, AHB_MASTER_ERR_DATA, 0);
+	bam_cnfg_bits = bam_read_reg(base, CNFG_BITS, 0);
+
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+	bam_sw_rev = bam_read_reg(base, SW_REVISION, 0);
+	bam_timer = bam_read_reg(base, TIMER, 0);
+	bam_timer_ctrl = bam_read_reg(base, TIMER_CTRL, 0);
+	bam_ahb_err_addr_msb = SPS_LPAE ?
+		bam_read_reg(base, AHB_MASTER_ERR_ADDR_MSB, 0) : 0;
+	if (ee < BAM_MAX_EES)
+		bam_pipe_attr_ee = enhd_pipe ?
+			bam_read_reg(base, PIPE_ATTR_EE, ee) : 0x0;
+#endif
+
+
+	SPS_DUMP("%s", "\nsps:<bam-begin> --- BAM-level registers ---\n\n");
+
+	SPS_DUMP("BAM_CTRL: 0x%x\n", bam_ctrl);
+	SPS_DUMP("BAM_REVISION: 0x%x\n", bam_revision);
+	SPS_DUMP("    REVISION: 0x%x\n", bam_rev_num);
+	SPS_DUMP("    NUM_EES: %d\n", bam_rev_ee_num);
+	SPS_DUMP("BAM_SW_REVISION: 0x%x\n", bam_sw_rev);
+	SPS_DUMP("BAM_NUM_PIPES: %d\n", bam_num_pipes);
+	SPS_DUMP("BAM_DATA_ADDR_BUS_WIDTH: %d\n",
+			((bam_data_addr_bus_width == 0x0) ? 32 : 36));
+	SPS_DUMP("    NUM_PIPES: %d\n", bam_pipe_num);
+	SPS_DUMP("BAM_DESC_CNT_TRSHLD: 0x%x\n", bam_desc_cnt_trshld);
+	SPS_DUMP("    DESC_CNT_TRSHLD: 0x%x (%d)\n", bam_desc_cnt_trd_val,
+			bam_desc_cnt_trd_val);
+
+	SPS_DUMP("BAM_IRQ_EN: 0x%x\n", bam_irq_en);
+	SPS_DUMP("BAM_IRQ_STTS: 0x%x\n", bam_irq_stts);
+
+	if (ee < BAM_MAX_EES) {
+		SPS_DUMP("BAM_IRQ_SRCS_EE(%d): 0x%x\n", ee, bam_irq_src_ee);
+		SPS_DUMP("BAM_IRQ_SRCS_MSK_EE(%d): 0x%x\n", ee, bam_irq_msk_ee);
+		SPS_DUMP("BAM_IRQ_SRCS_UNMASKED_EE(%d): 0x%x\n", ee,
+				bam_irq_unmsk_ee);
+		SPS_DUMP("BAM_PIPE_ATTR_EE(%d): 0x%x\n", ee, bam_pipe_attr_ee);
+	}
+
+	SPS_DUMP("BAM_AHB_MASTER_ERR_CTRLS: 0x%x\n", bam_ahb_err_ctrl);
+	SPS_DUMP("BAM_AHB_MASTER_ERR_ADDR: 0x%x\n", bam_ahb_err_addr);
+	SPS_DUMP("BAM_AHB_MASTER_ERR_ADDR_MSB: 0x%x\n", bam_ahb_err_addr_msb);
+	SPS_DUMP("BAM_AHB_MASTER_ERR_DATA: 0x%x\n", bam_ahb_err_data);
+
+	SPS_DUMP("BAM_CNFG_BITS: 0x%x\n", bam_cnfg_bits);
+	SPS_DUMP("BAM_TIMER: 0x%x\n", bam_timer);
+	SPS_DUMP("BAM_TIMER_CTRL: 0x%x\n", bam_timer_ctrl);
+
+	SPS_DUMP("%s", "\nsps:<bam-end> --- BAM-level registers ---\n\n");
+}
+
+/* output the content of selected BAM pipe registers */
+void print_bam_pipe_selected_reg(void *virt_addr, u32 pipe_index)
+{
+	void *base = virt_addr;
+	u32 pipe = pipe_index;
+
+	u32 p_ctrl;
+	u32 p_sys_mode;
+	u32 p_direction;
+	u32 p_lock_group = 0;
+
+	u32 p_irq_en;
+	u32 p_irq_stts;
+	u32 p_irq_stts_eot;
+	u32 p_irq_stts_int;
+
+	u32 p_prd_sdbd;
+	u32 p_bytes_free;
+	u32 p_prd_ctrl;
+	u32 p_prd_toggle;
+	u32 p_prd_sb_updated;
+
+	u32 p_con_sdbd;
+	u32 p_bytes_avail;
+	u32 p_con_ctrl;
+	u32 p_con_toggle;
+	u32 p_con_ack_toggle;
+	u32 p_con_ack_toggle_r;
+	u32 p_con_wait_4_ack;
+	u32 p_con_sb_updated;
+
+	u32 p_sw_offset;
+	u32 p_read_pointer;
+	u32 p_evnt_reg;
+	u32 p_write_pointer;
+
+	u32 p_evnt_dest;
+	u32 p_evnt_dest_msb = 0;
+	u32 p_desc_fifo_addr;
+	u32 p_desc_fifo_addr_msb = 0;
+	u32 p_desc_fifo_size;
+	u32 p_data_fifo_addr;
+	u32 p_data_fifo_addr_msb = 0;
+	u32 p_data_fifo_size;
+	u32 p_fifo_sizes;
+
+	u32 p_evnt_trd;
+	u32 p_evnt_trd_val;
+
+	u32 p_retr_ct;
+	u32 p_retr_offset;
+	u32 p_si_ct;
+	u32 p_si_offset;
+	u32 p_df_ct = 0;
+	u32 p_df_offset = 0;
+	u32 p_au_ct1;
+	u32 p_psm_ct2;
+	u32 p_psm_ct3;
+	u32 p_psm_ct3_msb = 0;
+	u32 p_psm_ct4;
+	u32 p_psm_ct5;
+
+	u32 p_timer;
+	u32 p_timer_ctrl;
+
+	if (base == NULL)
+		return;
+
+	p_ctrl = bam_read_reg(base, P_CTRL, pipe);
+	p_sys_mode = bam_read_reg_field(base, P_CTRL, pipe, P_SYS_MODE);
+	p_direction = bam_read_reg_field(base, P_CTRL, pipe, P_DIRECTION);
+
+	p_irq_en = bam_read_reg(base, P_IRQ_EN, pipe);
+	p_irq_stts = bam_read_reg(base, P_IRQ_STTS, pipe);
+	p_irq_stts_eot = bam_read_reg_field(base, P_IRQ_STTS, pipe,
+					P_IRQ_STTS_P_TRNSFR_END_IRQ);
+	p_irq_stts_int = bam_read_reg_field(base, P_IRQ_STTS, pipe,
+					P_IRQ_STTS_P_PRCSD_DESC_IRQ);
+
+	p_prd_sdbd = bam_read_reg(base, P_PRDCR_SDBND, pipe);
+	p_bytes_free = bam_read_reg_field(base, P_PRDCR_SDBND, pipe,
+					P_PRDCR_SDBNDn_BAM_P_BYTES_FREE);
+	p_prd_ctrl = bam_read_reg_field(base, P_PRDCR_SDBND, pipe,
+					P_PRDCR_SDBNDn_BAM_P_CTRL);
+	p_prd_toggle = bam_read_reg_field(base, P_PRDCR_SDBND, pipe,
+					P_PRDCR_SDBNDn_BAM_P_TOGGLE);
+	p_prd_sb_updated = bam_read_reg_field(base, P_PRDCR_SDBND, pipe,
+					P_PRDCR_SDBNDn_BAM_P_SB_UPDATED);
+	p_con_sdbd = bam_read_reg(base, P_CNSMR_SDBND, pipe);
+	p_bytes_avail = bam_read_reg_field(base, P_CNSMR_SDBND, pipe,
+					P_CNSMR_SDBNDn_BAM_P_BYTES_AVAIL);
+	p_con_ctrl = bam_read_reg_field(base, P_CNSMR_SDBND, pipe,
+					P_CNSMR_SDBNDn_BAM_P_CTRL);
+	p_con_toggle = bam_read_reg_field(base, P_CNSMR_SDBND, pipe,
+					P_CNSMR_SDBNDn_BAM_P_TOGGLE);
+	p_con_ack_toggle = bam_read_reg_field(base, P_CNSMR_SDBND, pipe,
+					P_CNSMR_SDBNDn_BAM_P_ACK_TOGGLE);
+	p_con_ack_toggle_r = bam_read_reg_field(base, P_CNSMR_SDBND, pipe,
+					P_CNSMR_SDBNDn_BAM_P_ACK_TOGGLE_R);
+	p_con_wait_4_ack = bam_read_reg_field(base, P_CNSMR_SDBND, pipe,
+					P_CNSMR_SDBNDn_BAM_P_WAIT_4_ACK);
+	p_con_sb_updated = bam_read_reg_field(base, P_CNSMR_SDBND, pipe,
+					P_CNSMR_SDBNDn_BAM_P_SB_UPDATED);
+
+	p_sw_offset = bam_read_reg(base, P_SW_OFSTS, pipe);
+	p_read_pointer = bam_read_reg_field(base, P_SW_OFSTS, pipe,
+						SW_DESC_OFST);
+	p_evnt_reg = bam_read_reg(base, P_EVNT_REG, pipe);
+	p_write_pointer = bam_read_reg_field(base, P_EVNT_REG, pipe,
+						P_DESC_FIFO_PEER_OFST);
+
+	p_evnt_dest = bam_read_reg(base, P_EVNT_DEST_ADDR, pipe);
+	p_desc_fifo_addr = bam_read_reg(base, P_DESC_FIFO_ADDR, pipe);
+	p_desc_fifo_size = bam_read_reg_field(base, P_FIFO_SIZES, pipe,
+						P_DESC_FIFO_SIZE);
+	p_data_fifo_addr = bam_read_reg(base, P_DATA_FIFO_ADDR, pipe);
+	p_data_fifo_size = bam_read_reg_field(base, P_FIFO_SIZES, pipe,
+						P_DATA_FIFO_SIZE);
+	p_fifo_sizes = bam_read_reg(base, P_FIFO_SIZES, pipe);
+
+	p_evnt_trd = bam_read_reg(base, P_EVNT_GEN_TRSHLD, pipe);
+	p_evnt_trd_val = bam_read_reg_field(base, P_EVNT_GEN_TRSHLD, pipe,
+					P_EVNT_GEN_TRSHLD_P_TRSHLD);
+
+	p_retr_ct = bam_read_reg(base, P_RETR_CNTXT, pipe);
+	p_retr_offset = bam_read_reg_field(base, P_RETR_CNTXT, pipe,
+					P_RETR_CNTXT_RETR_DESC_OFST);
+	p_si_ct = bam_read_reg(base, P_SI_CNTXT, pipe);
+	p_si_offset = bam_read_reg_field(base, P_SI_CNTXT, pipe,
+					P_SI_CNTXT_SI_DESC_OFST);
+	p_au_ct1 = bam_read_reg(base, P_AU_PSM_CNTXT_1, pipe);
+	p_psm_ct2 = bam_read_reg(base, P_PSM_CNTXT_2, pipe);
+	p_psm_ct3 = bam_read_reg(base, P_PSM_CNTXT_3, pipe);
+	p_psm_ct4 = bam_read_reg(base, P_PSM_CNTXT_4, pipe);
+	p_psm_ct5 = bam_read_reg(base, P_PSM_CNTXT_5, pipe);
+
+	p_timer = bam_read_reg(base, P_TIMER, pipe);
+	p_timer_ctrl = bam_read_reg(base, P_TIMER_CTRL, pipe);
+
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+	p_evnt_dest_msb = SPS_LPAE ?
+		bam_read_reg(base, P_EVNT_DEST_ADDR_MSB, pipe) : 0;
+
+	p_desc_fifo_addr_msb = SPS_LPAE ?
+		bam_read_reg(base, P_DESC_FIFO_ADDR_MSB, pipe) : 0;
+	p_data_fifo_addr_msb = SPS_LPAE ?
+		bam_read_reg(base, P_DATA_FIFO_ADDR_MSB, pipe) : 0;
+
+	p_psm_ct3_msb = SPS_LPAE ? bam_read_reg(base, P_PSM_CNTXT_3, pipe) : 0;
+	p_lock_group = bam_read_reg_field(base, P_CTRL, pipe, P_LOCK_GROUP);
+	p_df_ct = bam_read_reg(base, P_DF_CNTXT, pipe);
+	p_df_offset = bam_read_reg_field(base, P_DF_CNTXT, pipe,
+					P_DF_CNTXT_DF_DESC_OFST);
+#endif
+
+	SPS_DUMP("\nsps:<pipe-begin> --- Registers of Pipe %d ---\n\n", pipe);
+
+	SPS_DUMP("BAM_P_CTRL: 0x%x\n", p_ctrl);
+	SPS_DUMP("    SYS_MODE: %d\n", p_sys_mode);
+	if (p_direction)
+		SPS_DUMP("    DIRECTION:%d->Producer\n", p_direction);
+	else
+		SPS_DUMP("    DIRECTION:%d->Consumer\n", p_direction);
+	SPS_DUMP("    LOCK_GROUP: 0x%x (%d)\n", p_lock_group, p_lock_group);
+
+	SPS_DUMP("BAM_P_IRQ_EN: 0x%x\n", p_irq_en);
+	SPS_DUMP("BAM_P_IRQ_STTS: 0x%x\n", p_irq_stts);
+	SPS_DUMP("    TRNSFR_END_IRQ(EOT): 0x%x\n", p_irq_stts_eot);
+	SPS_DUMP("    PRCSD_DESC_IRQ(INT): 0x%x\n", p_irq_stts_int);
+
+	SPS_DUMP("BAM_P_PRDCR_SDBND: 0x%x\n", p_prd_sdbd);
+	SPS_DUMP("    BYTES_FREE: 0x%x (%d)\n", p_bytes_free, p_bytes_free);
+	SPS_DUMP("    CTRL: 0x%x\n", p_prd_ctrl);
+	SPS_DUMP("    TOGGLE: %d\n", p_prd_toggle);
+	SPS_DUMP("    SB_UPDATED: %d\n", p_prd_sb_updated);
+	SPS_DUMP("BAM_P_CNSMR_SDBND: 0x%x\n", p_con_sdbd);
+	SPS_DUMP("    WAIT_4_ACK: %d\n", p_con_wait_4_ack);
+	SPS_DUMP("    BYTES_AVAIL: 0x%x (%d)\n", p_bytes_avail, p_bytes_avail);
+	SPS_DUMP("    CTRL: 0x%x\n", p_con_ctrl);
+	SPS_DUMP("    TOGGLE: %d\n", p_con_toggle);
+	SPS_DUMP("    ACK_TOGGLE: %d\n", p_con_ack_toggle);
+	SPS_DUMP("    ACK_TOGGLE_R: %d\n", p_con_ack_toggle_r);
+	SPS_DUMP("    SB_UPDATED: %d\n", p_con_sb_updated);
+
+	SPS_DUMP("BAM_P_SW_DESC_OFST: 0x%x\n", p_sw_offset);
+	SPS_DUMP("    SW_DESC_OFST: 0x%x\n", p_read_pointer);
+	SPS_DUMP("BAM_P_EVNT_REG: 0x%x\n", p_evnt_reg);
+	SPS_DUMP("    DESC_FIFO_PEER_OFST: 0x%x\n", p_write_pointer);
+
+	SPS_DUMP("BAM_P_RETR_CNTXT: 0x%x\n", p_retr_ct);
+	SPS_DUMP("    RETR_OFFSET: 0x%x\n", p_retr_offset);
+	SPS_DUMP("BAM_P_SI_CNTXT: 0x%x\n", p_si_ct);
+	SPS_DUMP("    SI_OFFSET: 0x%x\n", p_si_offset);
+	SPS_DUMP("BAM_P_DF_CNTXT: 0x%x\n", p_df_ct);
+	SPS_DUMP("    DF_OFFSET: 0x%x\n", p_df_offset);
+
+	SPS_DUMP("BAM_P_DESC_FIFO_ADDR: 0x%x\n", p_desc_fifo_addr);
+	SPS_DUMP("BAM_P_DESC_FIFO_ADDR_MSB: 0x%x\n", p_desc_fifo_addr_msb);
+	SPS_DUMP("BAM_P_DATA_FIFO_ADDR: 0x%x\n", p_data_fifo_addr);
+	SPS_DUMP("BAM_P_DATA_FIFO_ADDR_MSB: 0x%x\n", p_data_fifo_addr_msb);
+	SPS_DUMP("BAM_P_FIFO_SIZES: 0x%x\n", p_fifo_sizes);
+	SPS_DUMP("    DESC_FIFO_SIZE: 0x%x (%d)\n", p_desc_fifo_size,
+							p_desc_fifo_size);
+	SPS_DUMP("    DATA_FIFO_SIZE: 0x%x (%d)\n", p_data_fifo_size,
+							p_data_fifo_size);
+
+	SPS_DUMP("BAM_P_EVNT_DEST_ADDR: 0x%x\n", p_evnt_dest);
+	SPS_DUMP("BAM_P_EVNT_DEST_ADDR_MSB: 0x%x\n", p_evnt_dest_msb);
+	SPS_DUMP("BAM_P_EVNT_GEN_TRSHLD: 0x%x\n", p_evnt_trd);
+	SPS_DUMP("    EVNT_GEN_TRSHLD: 0x%x (%d)\n", p_evnt_trd_val,
+							p_evnt_trd_val);
+
+	SPS_DUMP("BAM_P_AU_PSM_CNTXT_1: 0x%x\n", p_au_ct1);
+	SPS_DUMP("BAM_P_PSM_CNTXT_2: 0x%x\n", p_psm_ct2);
+	SPS_DUMP("BAM_P_PSM_CNTXT_3: 0x%x\n", p_psm_ct3);
+	SPS_DUMP("BAM_P_PSM_CNTXT_3_MSB: 0x%x\n", p_psm_ct3_msb);
+	SPS_DUMP("BAM_P_PSM_CNTXT_4: 0x%x\n", p_psm_ct4);
+	SPS_DUMP("BAM_P_PSM_CNTXT_5: 0x%x\n", p_psm_ct5);
+	SPS_DUMP("BAM_P_TIMER: 0x%x\n", p_timer);
+	SPS_DUMP("BAM_P_TIMER_CTRL: 0x%x\n", p_timer_ctrl);
+
+	SPS_DUMP("\nsps:<pipe-end> --- Registers of Pipe %d ---\n\n", pipe);
+}
+
+/* output descriptor FIFO of a pipe */
+void print_bam_pipe_desc_fifo(void *virt_addr, u32 pipe_index, u32 option)
+{
+	void *base = virt_addr;
+	u32 pipe = pipe_index;
+	u32 desc_fifo_addr;
+	u32 desc_fifo_size;
+	u32 *desc_fifo;
+	int i;
+	char desc_info[MAX_MSG_LEN];
+
+	if (base == NULL)
+		return;
+
+	desc_fifo_addr = bam_read_reg(base, P_DESC_FIFO_ADDR, pipe);
+	desc_fifo_size = bam_read_reg_field(base, P_FIFO_SIZES, pipe,
+						P_DESC_FIFO_SIZE);
+
+	if (desc_fifo_addr == 0) {
+		SPS_ERR(sps, "sps:%s:desc FIFO address of Pipe %d is NULL.\n",
+			__func__, pipe);
+		return;
+	} else if (desc_fifo_size == 0) {
+		SPS_ERR(sps, "sps:%s:desc FIFO size of Pipe %d is 0.\n",
+			__func__, pipe);
+		return;
+	}
+
+	SPS_DUMP("\nsps:<desc-begin> --- descriptor FIFO of Pipe %d -----\n\n",
+			pipe);
+
+	SPS_DUMP("BAM_P_DESC_FIFO_ADDR: 0x%x\n"
+		"BAM_P_DESC_FIFO_SIZE: 0x%x (%d)\n\n",
+		desc_fifo_addr, desc_fifo_size, desc_fifo_size);
+
+	desc_fifo = (u32 *) phys_to_virt(desc_fifo_addr);
+
+	if (option == 100) {
+		SPS_DUMP("%s",
+			"----- start of data blocks -----\n");
+		for (i = 0; i < desc_fifo_size; i += 8) {
+			u32 *data_block_vir;
+			u32 data_block_phy = desc_fifo[i / 4];
+
+			if (data_block_phy) {
+				data_block_vir =
+					(u32 *) phys_to_virt(data_block_phy);
+
+				SPS_DUMP("desc addr:0x%x; data addr:0x%x:\n",
+					desc_fifo_addr + i, data_block_phy);
+				SPS_DUMP("0x%x, 0x%x, 0x%x, 0x%x\n",
+					data_block_vir[0], data_block_vir[1],
+					data_block_vir[2], data_block_vir[3]);
+				SPS_DUMP("0x%x, 0x%x, 0x%x, 0x%x\n",
+					data_block_vir[4], data_block_vir[5],
+					data_block_vir[6], data_block_vir[7]);
+				SPS_DUMP("0x%x, 0x%x, 0x%x, 0x%x\n",
+					data_block_vir[8], data_block_vir[9],
+					data_block_vir[10], data_block_vir[11]);
+				SPS_DUMP("0x%x, 0x%x, 0x%x, 0x%x\n\n",
+					data_block_vir[12], data_block_vir[13],
+					data_block_vir[14], data_block_vir[15]);
+			}
+		}
+		SPS_DUMP("%s",
+			"----- end of data blocks -----\n");
+	} else if (option) {
+		u32 size = option * 128;
+		u32 current_desc = bam_pipe_get_desc_read_offset(base,
+								pipe_index);
+		u32 begin = 0;
+		u32 end = desc_fifo_size;
+
+		if (current_desc > size / 2)
+			begin = current_desc - size / 2;
+
+		if (desc_fifo_size > current_desc + size / 2)
+			end = current_desc + size / 2;
+
+		SPS_DUMP("%s",
+			"------------ begin of partial FIFO ------------\n\n");
+
+		SPS_DUMP("%s",
+			"desc addr; desc content; desc flags\n");
+		for (i = begin; i < end; i += 0x8) {
+			u32 offset;
+			u32 flags = desc_fifo[(i / 4) + 1] >> 16;
+
+			memset(desc_info, 0, sizeof(desc_info));
+			offset = scnprintf(desc_info, 40, "0x%x: 0x%x, 0x%x: ",
+				desc_fifo_addr + i,
+				desc_fifo[i / 4], desc_fifo[(i / 4) + 1]);
+
+			if (flags & SPS_IOVEC_FLAG_INT)
+				offset += scnprintf(desc_info + offset, 5,
+							"INT ");
+			if (flags & SPS_IOVEC_FLAG_EOT)
+				offset += scnprintf(desc_info + offset, 5,
+							"EOT ");
+			if (flags & SPS_IOVEC_FLAG_EOB)
+				offset += scnprintf(desc_info + offset, 5,
+							"EOB ");
+			if (flags & SPS_IOVEC_FLAG_NWD)
+				offset += scnprintf(desc_info + offset, 5,
+							"NWD ");
+			if (flags & SPS_IOVEC_FLAG_CMD)
+				offset += scnprintf(desc_info + offset, 5,
+							"CMD ");
+			if (flags & SPS_IOVEC_FLAG_LOCK)
+				offset += scnprintf(desc_info + offset, 5,
+							"LCK ");
+			if (flags & SPS_IOVEC_FLAG_UNLOCK)
+				offset += scnprintf(desc_info + offset, 5,
+							"UNL ");
+			if (flags & SPS_IOVEC_FLAG_IMME)
+				offset += scnprintf(desc_info + offset, 5,
+							"IMM ");
+
+			SPS_DUMP("%s\n", desc_info);
+		}
+
+		SPS_DUMP("%s",
+			"\n------------  end of partial FIFO  ------------\n");
+	} else {
+		SPS_DUMP("%s",
+			"---------------- begin of FIFO ----------------\n\n");
+
+		for (i = 0; i < desc_fifo_size; i += 0x10)
+			SPS_DUMP("addr 0x%x: 0x%x, 0x%x, 0x%x, 0x%x.\n",
+				desc_fifo_addr + i,
+				desc_fifo[i / 4], desc_fifo[(i / 4) + 1],
+				desc_fifo[(i / 4) + 2], desc_fifo[(i / 4) + 3]);
+
+		SPS_DUMP("%s",
+			"\n----------------  end of FIFO  ----------------\n");
+	}
+
+	SPS_DUMP("\nsps:<desc-end> --- descriptor FIFO of Pipe %d -----\n\n",
+			pipe);
+}
+
+/* output BAM_TEST_BUS_REG with specified TEST_BUS_SEL */
+void print_bam_test_bus_reg(void *base, u32 tb_sel)
+{
+	u32 i;
+	u32 test_bus_selection[] = {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
+			0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf,
+			0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+			0x20, 0x21, 0x22, 0x23,
+			0x41, 0x42, 0x43, 0x44, 0x45, 0x46};
+	u32 size = sizeof(test_bus_selection) / sizeof(u32);
+
+	if (base == NULL) {
+		SPS_ERR(sps, "sps:%s:BAM is NULL.\n", __func__);
+		return;
+	}
+
+	if (tb_sel) {
+		SPS_DUMP("\nsps:Specified TEST_BUS_SEL value: 0x%x\n", tb_sel);
+		bam_write_reg_field(base, TEST_BUS_SEL, 0, BAM_TESTBUS_SEL,
+					tb_sel);
+		SPS_DUMP("sps:BAM_TEST_BUS_REG:0x%x for TEST_BUS_SEL:0x%x\n\n",
+			bam_read_reg(base, TEST_BUS_REG, 0),
+			bam_read_reg_field(base, TEST_BUS_SEL, 0,
+						BAM_TESTBUS_SEL));
+	}
+
+	SPS_DUMP("%s", "\nsps:<testbus-begin> --- BAM TEST_BUS dump -----\n\n");
+
+	/* output other selections */
+	for (i = 0; i < size; i++) {
+		bam_write_reg_field(base, TEST_BUS_SEL, 0, BAM_TESTBUS_SEL,
+					test_bus_selection[i]);
+
+		SPS_DUMP("sps:TEST_BUS_REG:0x%x\t  TEST_BUS_SEL:0x%x\n",
+			bam_read_reg(base, TEST_BUS_REG, 0),
+			bam_read_reg_field(base, TEST_BUS_SEL, 0,
+					BAM_TESTBUS_SEL));
+	}
+
+	SPS_DUMP("%s", "\nsps:<testbus-end> --- BAM TEST_BUS dump -----\n\n");
+}
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./sps/bam.h linux-4.4.115-fbx/drivers/platform/msm/sps/bam.h
--- linux-4.4.115-fbx/drivers/platform/msm./sps/bam.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/sps/bam.h	2019-01-22 16:16:26.187270712 +0100
@@ -0,0 +1,447 @@
+/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* Bus-Access-Manager (BAM) Hardware manager functions API. */
+
+#ifndef _BAM_H_
+#define _BAM_H_
+
+#include <linux/types.h>	/* u32 */
+#include <linux/io.h>		/* ioread32() */
+#include <linux/bitops.h>	/* find_first_bit() */
+#include "spsi.h"
+
+/* Pipe mode */
+enum bam_pipe_mode {
+	BAM_PIPE_MODE_BAM2BAM = 0,	/* BAM to BAM */
+	BAM_PIPE_MODE_SYSTEM = 1,	/* BAM to/from System Memory */
+};
+
+/* Pipe direction */
+enum bam_pipe_dir {
+	/* The Pipe Reads data from data-fifo or system-memory */
+	BAM_PIPE_CONSUMER = 0,
+	/* The Pipe Writes data to data-fifo or system-memory */
+	BAM_PIPE_PRODUCER = 1,
+};
+
+/* Stream mode Type */
+enum bam_stream_mode {
+	BAM_STREAM_MODE_DISABLE = 0,
+	BAM_STREAM_MODE_ENABLE = 1,
+};
+
+/* NWD written Type */
+enum bam_write_nwd {
+	BAM_WRITE_NWD_DISABLE = 0,
+	BAM_WRITE_NWD_ENABLE = 1,
+};
+
+
+/* Enable Type */
+enum bam_enable {
+	BAM_DISABLE = 0,
+	BAM_ENABLE = 1,
+};
+
+/* Pipe timer mode */
+enum bam_pipe_timer_mode {
+	BAM_PIPE_TIMER_ONESHOT = 0,
+	BAM_PIPE_TIMER_PERIODIC = 1,
+};
+
+struct transfer_descriptor {
+	u32 addr;	/* Buffer physical address */
+	u32 size:16;	/* Buffer size in bytes */
+	u32 flags:16;	/* Flag bitmask (see SPS_IOVEC_FLAG_ #defines) */
+}  __packed;
+
+/* BAM pipe initialization parameters */
+struct bam_pipe_parameters {
+	u16 event_threshold;
+	u32 pipe_irq_mask;
+	enum bam_pipe_dir dir;
+	enum bam_pipe_mode mode;
+	enum bam_write_nwd write_nwd;
+	phys_addr_t desc_base;	/* Physical address of descriptor FIFO */
+	u32 desc_size;	/* Size (bytes) of descriptor FIFO */
+	u32 lock_group;	/* The lock group this pipe belongs to */
+	enum bam_stream_mode stream_mode;
+	u32 ee;		/* BAM execution environment index */
+
+	/* The following are only valid if mode is BAM2BAM */
+	u32 peer_phys_addr;
+	u32 peer_pipe;
+	phys_addr_t data_base;	/* Physical address of data FIFO */
+	u32 data_size;	/* Size (bytes) of data FIFO */
+};
+
+/**
+ * Initialize a BAM device
+ *
+ * This function initializes a BAM device.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @ee - BAM execution environment index
+ *
+ * @summing_threshold - summing threshold (global for all pipes)
+ *
+ * @irq_mask - error interrupts mask
+ *
+ * @version - return BAM hardware version
+ *
+ * @num_pipes - return number of pipes
+ *
+ * @options - BAM configuration options
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int bam_init(void *base,
+		u32 ee,
+		u16 summing_threshold,
+		u32 irq_mask, u32 *version,
+		u32 *num_pipes, u32 options);
+
+/**
+ * Initialize BAM device security execution environment
+ *
+ * @base - BAM virtual base address.
+ *
+ * @ee - BAM execution environment index
+ *
+ * @vmid - virtual master identifier
+ *
+ * @pipe_mask - bit mask of pipes to assign to EE
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int bam_security_init(void *base, u32 ee, u32 vmid, u32 pipe_mask);
+
+/**
+ * Check a BAM device
+ *
+ * This function verifies that a BAM device is enabled and gathers
+ *    the hardware configuration.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @version - return BAM hardware version
+ *
+ * @ee - BAM execution environment index
+ *
+ * @num_pipes - return number of pipes
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int bam_check(void *base, u32 *version, u32 ee, u32 *num_pipes);
+
+/**
+ * Disable a BAM device
+ *
+ * This function disables a BAM device.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @ee - BAM execution environment index
+ *
+ */
+void bam_exit(void *base, u32 ee);
+
+/**
+ * This function prints BAM register content
+ * including TEST_BUS and PIPE register content.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @ee - BAM execution environment index
+ */
+void bam_output_register_content(void *base, u32 ee);
+
+
+/**
+ * Get BAM IRQ source and clear global IRQ status
+ *
+ * This function gets BAM IRQ source.
+ * Clear global IRQ status if it is non-zero.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @ee - BAM execution environment index
+ *
+ * @mask - active pipes mask.
+ *
+ * @case - callback case.
+ *
+ * @return IRQ status
+ *
+ */
+u32 bam_check_irq_source(void *base, u32 ee, u32 mask,
+				enum sps_callback_case *cb_case);
+
+
+/**
+ * Initialize a BAM pipe
+ *
+ * This function initializes a BAM pipe.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @param - bam pipe parameters.
+ *
+ * @ee - BAM execution environment index
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int bam_pipe_init(void *base, u32 pipe, struct bam_pipe_parameters *param,
+					u32 ee);
+
+/**
+ * Reset the BAM pipe
+ *
+ * This function resets the BAM pipe.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @ee - BAM execution environment index
+ *
+ */
+void bam_pipe_exit(void *base, u32 pipe, u32 ee);
+
+/**
+ * Enable a BAM pipe
+ *
+ * This function enables a BAM pipe.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ */
+void bam_pipe_enable(void *base, u32 pipe);
+
+/**
+ * Disable a BAM pipe
+ *
+ * This function disables a BAM pipe.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ */
+void bam_pipe_disable(void *base, u32 pipe);
+
+/**
+ * Get a BAM pipe enable state
+ *
+ * This function determines if a BAM pipe is enabled.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @return true if enabled, false if disabled
+ *
+ */
+int bam_pipe_is_enabled(void *base, u32 pipe);
+
+/**
+ * Configure interrupt for a BAM pipe
+ *
+ * This function configures the interrupt for a BAM pipe.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @irq_en - enable or disable interrupt
+ *
+ * @src_mask - interrupt source mask, set regardless of whether
+ *    interrupt is disabled
+ *
+ * @ee - BAM execution environment index
+ *
+ */
+void bam_pipe_set_irq(void *base, u32 pipe, enum bam_enable irq_en,
+		      u32 src_mask, u32 ee);
+
+/**
+ * Configure a BAM pipe for satellite MTI use
+ *
+ * This function configures a BAM pipe for satellite MTI use.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @irq_gen_addr - physical address written to generate MTI
+ *
+ * @ee - BAM execution environment index
+ *
+ */
+void bam_pipe_satellite_mti(void *base, u32 pipe, u32 irq_gen_addr, u32 ee);
+
+/**
+ * Configure MTI for a BAM pipe
+ *
+ * This function configures the interrupt for a BAM pipe.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @irq_en - enable or disable interrupt
+ *
+ * @src_mask - interrupt source mask, set regardless of whether
+ *    interrupt is disabled
+ *
+ * @irq_gen_addr - physical address written to generate MTI
+ *
+ */
+void bam_pipe_set_mti(void *base, u32 pipe, enum bam_enable irq_en,
+		      u32 src_mask, u32 irq_gen_addr);
+
+/**
+ * Get and Clear BAM pipe IRQ status
+ *
+ * This function gets and clears BAM pipe IRQ status.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @return IRQ status
+ *
+ */
+u32 bam_pipe_get_and_clear_irq_status(void *base, u32 pipe);
+
+/**
+ * Set write offset for a BAM pipe
+ *
+ * This function sets the write offset for a BAM pipe.  This is
+ *    the offset that is maintained by software in system mode.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @next_write - descriptor FIFO write offset
+ *
+ */
+void bam_pipe_set_desc_write_offset(void *base, u32 pipe, u32 next_write);
+
+/**
+ * Get write offset for a BAM pipe
+ *
+ * This function gets the write offset for a BAM pipe.  This is
+ *    the offset that is maintained by the pipe's peer pipe or by software.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @return descriptor FIFO write offset
+ *
+ */
+u32 bam_pipe_get_desc_write_offset(void *base, u32 pipe);
+
+/**
+ * Get read offset for a BAM pipe
+ *
+ * This function gets the read offset for a BAM pipe.  This is
+ *    the offset that is maintained by the pipe in system mode.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @return descriptor FIFO read offset
+ *
+ */
+u32 bam_pipe_get_desc_read_offset(void *base, u32 pipe);
+
+/**
+ * Configure inactivity timer count for a BAM pipe
+ *
+ * This function configures the inactivity timer count for a BAM pipe.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @mode - timer operating mode
+ *
+ * @timeout_count - timeout count
+ *
+ */
+void bam_pipe_timer_config(void *base, u32 pipe,
+			   enum bam_pipe_timer_mode mode,
+			   u32 timeout_count);
+
+/**
+ * Reset inactivity timer for a BAM pipe
+ *
+ * This function resets the inactivity timer count for a BAM pipe.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ */
+void bam_pipe_timer_reset(void *base, u32 pipe);
+
+/**
+ * Get inactivity timer count for a BAM pipe
+ *
+ * This function gets the inactivity timer count for a BAM pipe.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @return inactivity timer count
+ *
+ */
+u32 bam_pipe_timer_get_count(void *base, u32 pipe);
+
+/*
+ * bam_pipe_check_zlt - Check if the last desc is ZLT.
+ * @base:	BAM virtual address
+ * @pipe:	pipe index
+ *
+ * This function checks if the last desc in the desc FIFO is a ZLT desc.
+ *
+ * @return true if the last desc in the desc FIFO is a ZLT desc. Otherwise
+ *  return false.
+ */
+bool bam_pipe_check_zlt(void *base, u32 pipe);
+
+/*
+ * bam_pipe_check_pipe_empty - Check if desc FIFO is empty.
+ * @base:	BAM virtual address
+ * @pipe:	pipe index
+ *
+ * This function checks if the desc FIFO of this pipe is empty.
+ *
+ * @return true if desc FIFO is empty. Otherwise return false.
+ */
+bool bam_pipe_check_pipe_empty(void *base, u32 pipe);
+#endif				/* _BAM_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./sps/Makefile linux-4.4.115-fbx/drivers/platform/msm/sps/Makefile
--- linux-4.4.115-fbx/drivers/platform/msm./sps/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/sps/Makefile	2019-01-22 16:16:26.187270712 +0100
@@ -0,0 +1,2 @@
+obj-y += bam.o sps_bam.o sps.o sps_dma.o sps_map.o sps_mem.o sps_rm.o
+
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./sps/sps_bam.c linux-4.4.115-fbx/drivers/platform/msm/sps/sps_bam.c
--- linux-4.4.115-fbx/drivers/platform/msm./sps/sps_bam.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/sps/sps_bam.c	2019-10-29 09:26:24.617212710 +0100
@@ -0,0 +1,2497 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>	/* u32 */
+#include <linux/kernel.h>	/* pr_info() */
+#include <linux/mutex.h>	/* mutex */
+#include <linux/list.h>		/* list_head */
+#include <linux/slab.h>		/* kzalloc() */
+#include <linux/interrupt.h>	/* request_irq() */
+#include <linux/memory.h>	/* memset */
+#include <linux/vmalloc.h>
+
+#include "sps_bam.h"
+#include "bam.h"
+#include "spsi.h"
+
+/* All BAM global IRQ sources */
+#define BAM_IRQ_ALL (BAM_DEV_IRQ_HRESP_ERROR | BAM_DEV_IRQ_ERROR |   \
+	BAM_DEV_IRQ_TIMER)
+
+/* BAM device state flags */
+#define BAM_STATE_INIT     (1UL << 1)
+#define BAM_STATE_IRQ      (1UL << 2)
+#define BAM_STATE_ENABLED  (1UL << 3)
+#define BAM_STATE_BAM2BAM  (1UL << 4)
+#define BAM_STATE_MTI      (1UL << 5)
+#define BAM_STATE_REMOTE   (1UL << 6)
+
+/* Mask for valid hardware descriptor flags */
+#define BAM_IOVEC_FLAG_MASK   \
+	(SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_EOB |   \
+	SPS_IOVEC_FLAG_NWD | SPS_IOVEC_FLAG_CMD | SPS_IOVEC_FLAG_LOCK |   \
+	SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_IMME)
+
+/* Mask for invalid BAM-to-BAM pipe options */
+#define BAM2BAM_O_INVALID   \
+	(SPS_O_DESC_DONE | \
+	 SPS_O_EOT | \
+	 SPS_O_POLL | \
+	 SPS_O_NO_Q | \
+	 SPS_O_ACK_TRANSFERS)
+
+/**
+ * Pipe/client pointer value indicating pipe is allocated, but no client has
+ * been assigned
+ */
+#define BAM_PIPE_UNASSIGNED   ((struct sps_pipe *)((~0x0ul) - 0x88888888))
+
+/* Check whether pipe has been assigned */
+#define BAM_PIPE_IS_ASSIGNED(p)  \
+	(((p) != NULL) && ((p) != BAM_PIPE_UNASSIGNED))
+
+/* Is MTI use supported for a specific BAM version? */
+#define BAM_VERSION_MTI_SUPPORT(ver)   (ver <= 2)
+
+/* Event option<->event translation table entry */
+struct sps_bam_opt_event_table {
+	enum sps_event event_id;
+	enum sps_option option;
+	enum bam_pipe_irq pipe_irq;
+};
+
+static const struct sps_bam_opt_event_table opt_event_table[] = {
+	{SPS_EVENT_EOT, SPS_O_EOT, BAM_PIPE_IRQ_EOT},
+	{SPS_EVENT_DESC_DONE, SPS_O_DESC_DONE, BAM_PIPE_IRQ_DESC_INT},
+	{SPS_EVENT_WAKEUP, SPS_O_WAKEUP, BAM_PIPE_IRQ_WAKE},
+	{SPS_EVENT_INACTIVE, SPS_O_INACTIVE, BAM_PIPE_IRQ_TIMER},
+	{SPS_EVENT_OUT_OF_DESC, SPS_O_OUT_OF_DESC,
+		BAM_PIPE_IRQ_OUT_OF_DESC},
+	{SPS_EVENT_ERROR, SPS_O_ERROR, BAM_PIPE_IRQ_ERROR},
+	{SPS_EVENT_RST_ERROR, SPS_O_RST_ERROR, BAM_PIPE_IRQ_RST_ERROR},
+	{SPS_EVENT_HRESP_ERROR, SPS_O_HRESP_ERROR, BAM_PIPE_IRQ_HRESP_ERROR}
+};
+
+/* Pipe event source handler */
+static void pipe_handler(struct sps_bam *dev,
+			struct sps_pipe *pipe);
+
+/**
+ * Pipe transfer event (EOT, DESC_DONE) source handler.
+ * This function is called by pipe_handler() and other functions to process the
+ * descriptor FIFO.
+ */
+static void pipe_handler_eot(struct sps_bam *dev,
+			   struct sps_pipe *pipe);
+
+/**
+ * BAM driver initialization
+ */
+int sps_bam_driver_init(u32 options)
+{
+	int n;
+
+	/*
+	 * Check that SPS_O_ and BAM_PIPE_IRQ_ values are identical.
+	 * This is required so that the raw pipe IRQ status can be passed
+	 * to the client in the SPS_EVENT_IRQ.
+	 */
+	for (n = 0; n < ARRAY_SIZE(opt_event_table); n++) {
+		if ((u32)opt_event_table[n].option !=
+			(u32)opt_event_table[n].pipe_irq) {
+			SPS_ERR(sps, "sps:SPS_O 0x%x != HAL IRQ 0x%x\n",
+				opt_event_table[n].option,
+				opt_event_table[n].pipe_irq);
+			return SPS_ERROR;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Check BAM interrupt
+ */
+int sps_bam_check_irq(struct sps_bam *dev)
+{
+	struct sps_pipe *pipe;
+	u32 source;
+	unsigned long flags = 0;
+	int ret = 0;
+
+	SPS_DBG1(dev, "sps:%s:bam=%pa.\n", __func__, BAM_ID(dev));
+
+	spin_lock_irqsave(&dev->isr_lock, flags);
+
+polling:
+	/* Get BAM interrupt source(s) */
+	if ((dev->state & BAM_STATE_MTI) == 0) {
+		u32 mask = dev->pipe_active_mask;
+		enum sps_callback_case cb_case;
+		source = bam_check_irq_source(&dev->base, dev->props.ee,
+						mask, &cb_case);
+
+		SPS_DBG1(dev, "sps:bam=%pa;source=0x%x;mask=0x%x.\n",
+				BAM_ID(dev), source, mask);
+
+		if ((source == 0) &&
+			(dev->props.options & SPS_BAM_RES_CONFIRM)) {
+			SPS_DBG2(dev,
+				"sps: BAM %pa has no source (source = 0x%x).\n",
+				BAM_ID(dev), source);
+
+			spin_unlock_irqrestore(&dev->isr_lock, flags);
+			return SPS_ERROR;
+		}
+
+		if ((source & (1UL << 31)) && (dev->props.callback)) {
+			SPS_DBG1(dev, "sps:bam=%pa;callback for case %d.\n",
+				BAM_ID(dev), cb_case);
+			dev->props.callback(cb_case, dev->props.user);
+		}
+
+		/* Mask any non-local source */
+		source &= dev->pipe_active_mask;
+	} else {
+		/* If MTIs are used, must poll each active pipe */
+		source = dev->pipe_active_mask;
+
+		SPS_DBG1(dev, "sps:MTI:bam=%pa;source=0x%x.\n",
+				BAM_ID(dev), source);
+	}
+
+	/* Process active pipe sources */
+	pipe = list_first_entry(&dev->pipes_q, struct sps_pipe, list);
+
+	list_for_each_entry(pipe, &dev->pipes_q, list) {
+		/* Check this pipe's bit in the source mask */
+		if (BAM_PIPE_IS_ASSIGNED(pipe)
+				&& (!pipe->disconnecting)
+				&& (source & pipe->pipe_index_mask)) {
+			/* This pipe has an interrupt pending */
+			pipe_handler(dev, pipe);
+			source &= ~pipe->pipe_index_mask;
+		}
+		if (source == 0)
+			break;
+	}
+
+	/* Process any inactive pipe sources */
+	if (source) {
+		SPS_ERR(dev, "sps:IRQ from BAM %pa inactive pipe(s) 0x%x\n",
+			BAM_ID(dev), source);
+		dev->irq_from_disabled_pipe++;
+	}
+
+	if (dev->props.options & SPS_BAM_RES_CONFIRM) {
+		u32 mask = dev->pipe_active_mask;
+		enum sps_callback_case cb_case;
+		source = bam_check_irq_source(&dev->base, dev->props.ee,
+						mask, &cb_case);
+
+		SPS_DBG1(dev,
+			"sps:check if there is any new IRQ coming:bam=%pa;source=0x%x;mask=0x%x.\n",
+				BAM_ID(dev), source, mask);
+
+		if ((source & (1UL << 31)) && (dev->props.callback)) {
+			SPS_DBG1(dev, "sps:bam=%pa;callback for case %d.\n",
+				BAM_ID(dev), cb_case);
+			dev->props.callback(cb_case, dev->props.user);
+		}
+
+		if (source)
+			goto polling;
+	}
+
+	spin_unlock_irqrestore(&dev->isr_lock, flags);
+
+	return ret;
+}
+
+/**
+ * BAM interrupt service routine
+ *
+ * This function is the BAM interrupt service routine.
+ *
+ * @ctxt - pointer to ISR's registered argument
+ *
+ * @return void
+ */
+static irqreturn_t bam_isr(int irq, void *ctxt)
+{
+	struct sps_bam *dev = ctxt;
+
+	SPS_DBG1(dev, "sps:bam_isr: bam:%pa; IRQ #:%d.\n",
+		BAM_ID(dev), irq);
+
+	if (dev->props.options & SPS_BAM_RES_CONFIRM) {
+		if (dev->props.callback) {
+			bool ready = false;
+			dev->props.callback(SPS_CALLBACK_BAM_RES_REQ, &ready);
+			if (ready) {
+				SPS_DBG1(dev,
+					"sps:bam_isr: handle IRQ for bam:%pa IRQ #:%d.\n",
+					BAM_ID(dev), irq);
+				if (sps_bam_check_irq(dev))
+					SPS_DBG2(dev,
+						"sps:bam_isr: callback bam:%pa IRQ #:%d to poll the pipes.\n",
+						BAM_ID(dev), irq);
+				dev->props.callback(SPS_CALLBACK_BAM_RES_REL,
+							&ready);
+			} else {
+				SPS_DBG1(dev,
+					"sps:bam_isr: BAM is not ready and thus skip IRQ for bam:%pa IRQ #:%d.\n",
+					BAM_ID(dev), irq);
+			}
+		} else {
+			SPS_ERR(dev,
+				"sps:Client of BAM %pa requires confirmation but does not register callback\n",
+				BAM_ID(dev));
+		}
+	} else {
+		sps_bam_check_irq(dev);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * BAM device enable
+ */
+int sps_bam_enable(struct sps_bam *dev)
+{
+	u32 num_pipes;
+	u32 irq_mask;
+	int result;
+	int rc;
+	int MTIenabled;
+
+	/* Is this BAM enabled? */
+	if ((dev->state & BAM_STATE_ENABLED))
+		return 0;	/* Yes, so no work to do */
+
+	/* Is there any access to this BAM? */
+	if ((dev->props.manage & SPS_BAM_MGR_ACCESS_MASK) == SPS_BAM_MGR_NONE) {
+		SPS_ERR(dev, "sps:No local access to BAM %pa\n", BAM_ID(dev));
+		return SPS_ERROR;
+	}
+
+	/* Set interrupt handling */
+	if ((dev->props.options & SPS_BAM_OPT_IRQ_DISABLED) != 0 ||
+	    dev->props.irq == SPS_IRQ_INVALID) {
+		/* Disable the BAM interrupt */
+		irq_mask = 0;
+		dev->state &= ~BAM_STATE_IRQ;
+	} else {
+		/* Register BAM ISR */
+		if (dev->props.irq > 0) {
+			if (dev->props.options & SPS_BAM_RES_CONFIRM) {
+				result = request_irq(dev->props.irq,
+					(irq_handler_t) bam_isr,
+					IRQF_TRIGGER_RISING, "sps", dev);
+				SPS_DBG3(dev,
+					"sps:BAM %pa uses edge for IRQ# %d\n",
+					BAM_ID(dev), dev->props.irq);
+			} else {
+				result = request_irq(dev->props.irq,
+					(irq_handler_t) bam_isr,
+					IRQF_TRIGGER_HIGH, "sps", dev);
+				SPS_DBG3(dev,
+					"sps:BAM %pa uses level for IRQ# %d\n",
+					BAM_ID(dev), dev->props.irq);
+			}
+		} else {
+			SPS_DBG3(dev,
+				"sps:BAM %pa does not have an valid IRQ# %d\n",
+				BAM_ID(dev), dev->props.irq);
+		}
+
+		if (result) {
+			SPS_ERR(dev, "sps:Failed to enable BAM %pa IRQ %d\n",
+				BAM_ID(dev), dev->props.irq);
+			return SPS_ERROR;
+		}
+
+		/* Enable the BAM interrupt */
+		irq_mask = BAM_IRQ_ALL;
+		dev->state |= BAM_STATE_IRQ;
+
+		/* Register BAM IRQ for apps wakeup */
+		if (dev->props.options & SPS_BAM_OPT_IRQ_WAKEUP) {
+			result = enable_irq_wake(dev->props.irq);
+
+			if (result) {
+				SPS_ERR(dev,
+					"sps:Fail to enable wakeup irq for BAM %pa IRQ %d\n",
+					BAM_ID(dev), dev->props.irq);
+				return SPS_ERROR;
+			} else
+				SPS_DBG3(dev,
+					"sps:Enable wakeup irq for BAM %pa IRQ %d\n",
+					BAM_ID(dev), dev->props.irq);
+		}
+	}
+
+	/* Is global BAM control managed by the local processor? */
+	num_pipes = 0;
+	if ((dev->props.manage & SPS_BAM_MGR_DEVICE_REMOTE) == 0)
+		/* Yes, so initialize the BAM device */
+		rc = bam_init(&dev->base,
+				  dev->props.ee,
+				  (u16) dev->props.summing_threshold,
+				  irq_mask,
+				  &dev->version, &num_pipes,
+				  dev->props.options);
+	else
+		/* No, so just verify that it is enabled */
+		rc = bam_check(&dev->base, &dev->version,
+				dev->props.ee, &num_pipes);
+
+	if (rc) {
+		SPS_ERR(dev, "sps:Fail to init BAM %pa IRQ %d\n",
+			BAM_ID(dev), dev->props.irq);
+		return SPS_ERROR;
+	}
+
+	/* Check if this BAM supports MTIs (Message Triggered Interrupts) or
+	 * multiple EEs (Execution Environments).
+	 * MTI and EE support are mutually exclusive.
+	 */
+	MTIenabled = BAM_VERSION_MTI_SUPPORT(dev->version);
+
+	if ((dev->props.manage & SPS_BAM_MGR_DEVICE_REMOTE) != 0 &&
+			(dev->props.manage & SPS_BAM_MGR_MULTI_EE) != 0 &&
+			dev->props.ee == 0 && MTIenabled) {
+		/*
+		 * BAM global is owned by remote processor and local processor
+		 * must use MTI. Thus, force EE index to a non-zero value to
+		 * insure that EE zero globals can't be modified.
+		 */
+		SPS_ERR(dev,
+			"sps:%s:EE for satellite BAM must be set to non-zero.\n",
+			__func__);
+		return SPS_ERROR;
+	}
+
+	/*
+	 * Enable MTI use (message triggered interrupt)
+	 * if local processor does not control the global BAM config
+	 * and this BAM supports MTIs.
+	 */
+	if ((dev->state & BAM_STATE_IRQ) != 0 &&
+		(dev->props.manage & SPS_BAM_MGR_DEVICE_REMOTE) != 0 &&
+		MTIenabled) {
+		if (dev->props.irq_gen_addr == 0 ||
+		    dev->props.irq_gen_addr == SPS_ADDR_INVALID) {
+			SPS_ERR(dev,
+				"sps:MTI destination address not specified for BAM %pa\n",
+				BAM_ID(dev));
+			return SPS_ERROR;
+		}
+		dev->state |= BAM_STATE_MTI;
+	}
+
+	if (num_pipes) {
+		dev->props.num_pipes = num_pipes;
+		SPS_DBG3(dev,
+			"sps:BAM %pa number of pipes reported by hw: %d\n",
+				 BAM_ID(dev), dev->props.num_pipes);
+	}
+
+	/* Check EE index */
+	if (!MTIenabled && dev->props.ee >= SPS_BAM_NUM_EES) {
+		SPS_ERR(dev, "sps:Invalid EE BAM %pa: %d\n", BAM_ID(dev),
+				dev->props.ee);
+		return SPS_ERROR;
+	}
+
+	/*
+	 * Process EE configuration parameters,
+	 * if specified in the properties
+	 */
+	if (!MTIenabled && dev->props.sec_config == SPS_BAM_SEC_DO_CONFIG) {
+		struct sps_bam_sec_config_props *p_sec =
+						dev->props.p_sec_config_props;
+		if (p_sec == NULL) {
+			SPS_ERR(dev,
+				"sps:EE config table is not specified for BAM %pa\n",
+				BAM_ID(dev));
+			return SPS_ERROR;
+		}
+
+		/*
+		 * Set restricted pipes based on the pipes assigned to local EE
+		 */
+		dev->props.restricted_pipes =
+					~p_sec->ees[dev->props.ee].pipe_mask;
+
+		/*
+		 * If local processor manages the BAM, perform the EE
+		 * configuration
+		 */
+		if ((dev->props.manage & SPS_BAM_MGR_DEVICE_REMOTE) == 0) {
+			u32 ee;
+			u32 pipe_mask;
+			int n, i;
+
+			/*
+			 * Verify that there are no overlapping pipe
+			 * assignments
+			 */
+			for (n = 0; n < SPS_BAM_NUM_EES - 1; n++) {
+				for (i = n + 1; i < SPS_BAM_NUM_EES; i++) {
+					if ((p_sec->ees[n].pipe_mask &
+						p_sec->ees[i].pipe_mask) != 0) {
+						SPS_ERR(dev,
+							"sps:Overlapping pipe assignments for BAM %pa: EEs %d and %d\n",
+							BAM_ID(dev), n, i);
+						return SPS_ERROR;
+					}
+				}
+			}
+
+			for (ee = 0; ee < SPS_BAM_NUM_EES; ee++) {
+				/*
+				 * MSbit specifies EE for the global (top-level)
+				 * BAM interrupt
+				 */
+				pipe_mask = p_sec->ees[ee].pipe_mask;
+				if (ee == dev->props.ee)
+					pipe_mask |= (1UL << 31);
+				else
+					pipe_mask &= ~(1UL << 31);
+
+				bam_security_init(&dev->base, ee,
+						p_sec->ees[ee].vmid, pipe_mask);
+			}
+		}
+	}
+
+	/*
+	 * If local processor manages the BAM and the BAM supports MTIs
+	 * but does not support multiple EEs, set all restricted pipes
+	 * to MTI mode.
+	 */
+	if ((dev->props.manage & SPS_BAM_MGR_DEVICE_REMOTE) == 0
+			&& MTIenabled) {
+		u32 pipe_index;
+		u32 pipe_mask;
+		for (pipe_index = 0, pipe_mask = 1;
+		    pipe_index < dev->props.num_pipes;
+		    pipe_index++, pipe_mask <<= 1) {
+			if ((pipe_mask & dev->props.restricted_pipes) == 0)
+				continue;	/* This is a local pipe */
+
+			/*
+			 * Enable MTI with destination address of zero
+			 * (and source mask zero). Pipe is in reset,
+			 * so no interrupt will be generated.
+			 */
+			bam_pipe_satellite_mti(&dev->base, pipe_index, 0,
+						       dev->props.ee);
+		}
+	}
+
+	dev->state |= BAM_STATE_ENABLED;
+
+	if (!dev->props.constrained_logging ||
+		(dev->props.constrained_logging && dev->props.logging_number)) {
+		if (dev->props.logging_number > 0)
+			dev->props.logging_number--;
+		SPS_INFO(dev,
+			"sps:BAM %pa (va:0x%pK) enabled: ver:0x%x, number of pipes:%d\n",
+			BAM_ID(dev), dev->base, dev->version,
+			dev->props.num_pipes);
+	} else
+		SPS_DBG3(dev,
+			"sps:BAM %pa (va:0x%pK) enabled: ver:0x%x, number of pipes:%d\n",
+			BAM_ID(dev), dev->base, dev->version,
+			dev->props.num_pipes);
+
+	return 0;
+}
+
+/**
+ * BAM device disable
+ *
+ */
+int sps_bam_disable(struct sps_bam *dev)
+{
+	if ((dev->state & BAM_STATE_ENABLED) == 0)
+		return 0;
+
+	/* Is there any access to this BAM? */
+	if ((dev->props.manage & SPS_BAM_MGR_ACCESS_MASK) == SPS_BAM_MGR_NONE) {
+		SPS_ERR(dev, "sps:No local access to BAM %pa\n", BAM_ID(dev));
+		return SPS_ERROR;
+	}
+
+	/* Is this BAM controlled by the local processor? */
+	if ((dev->props.manage & SPS_BAM_MGR_DEVICE_REMOTE)) {
+		/* No, so just mark it disabled */
+		dev->state &= ~BAM_STATE_ENABLED;
+		if ((dev->state & BAM_STATE_IRQ) && (dev->props.irq > 0)) {
+			free_irq(dev->props.irq, dev);
+			dev->state &= ~BAM_STATE_IRQ;
+		}
+		return 0;
+	}
+
+	/* Disable BAM (interrupts) */
+	if ((dev->state & BAM_STATE_IRQ)) {
+		bam_exit(&dev->base, dev->props.ee);
+
+		/* Deregister BAM ISR */
+		if ((dev->state & BAM_STATE_IRQ))
+			if (dev->props.irq > 0)
+				free_irq(dev->props.irq, dev);
+		dev->state &= ~BAM_STATE_IRQ;
+	}
+
+	dev->state &= ~BAM_STATE_ENABLED;
+
+	SPS_DBG3(dev, "sps:BAM %pa disabled\n", BAM_ID(dev));
+
+	return 0;
+}
+
+/**
+ * BAM device initialization
+ */
+int sps_bam_device_init(struct sps_bam *dev)
+{
+	if (dev->props.virt_addr == NULL) {
+		SPS_ERR(dev, "sps:%s:NULL BAM virtual address\n", __func__);
+		return SPS_ERROR;
+	}
+	dev->base = (void *) dev->props.virt_addr;
+
+	if (dev->props.num_pipes == 0) {
+		/* Assume max number of pipes until BAM registers can be read */
+		dev->props.num_pipes = BAM_MAX_PIPES;
+		SPS_DBG3(dev, "sps:BAM %pa: assuming max number of pipes: %d\n",
+			BAM_ID(dev), dev->props.num_pipes);
+	}
+
+	/* Init BAM state data */
+	dev->state = 0;
+	dev->pipe_active_mask = 0;
+	dev->pipe_remote_mask = 0;
+	INIT_LIST_HEAD(&dev->pipes_q);
+
+	spin_lock_init(&dev->isr_lock);
+
+	spin_lock_init(&dev->connection_lock);
+
+	if ((dev->props.options & SPS_BAM_OPT_ENABLE_AT_BOOT))
+		if (sps_bam_enable(dev)) {
+			SPS_ERR(dev, "sps:%s:Fail to enable bam device\n",
+					__func__);
+			return SPS_ERROR;
+		}
+
+	SPS_DBG3(dev, "sps:BAM device: phys %pa IRQ %d\n",
+			BAM_ID(dev), dev->props.irq);
+
+	return 0;
+}
+
+/**
+ * BAM device de-initialization
+ *
+ */
+int sps_bam_device_de_init(struct sps_bam *dev)
+{
+	int result;
+
+	SPS_DBG3(dev, "sps:BAM device DEINIT: phys %pa IRQ %d\n",
+		BAM_ID(dev), dev->props.irq);
+
+	result = sps_bam_disable(dev);
+
+	return result;
+}
+
+/**
+ * BAM device reset
+ *
+ */
+int sps_bam_reset(struct sps_bam *dev)
+{
+	struct sps_pipe *pipe;
+	u32 pipe_index;
+	int result;
+
+	SPS_DBG3(dev, "sps:BAM device RESET: phys %pa IRQ %d\n",
+		BAM_ID(dev), dev->props.irq);
+
+	/* If BAM is enabled, then disable */
+	result = 0;
+	if ((dev->state & BAM_STATE_ENABLED)) {
+		/* Verify that no pipes are currently allocated */
+		for (pipe_index = 0; pipe_index < dev->props.num_pipes;
+		      pipe_index++) {
+			pipe = dev->pipes[pipe_index];
+			if (BAM_PIPE_IS_ASSIGNED(pipe)) {
+				SPS_ERR(dev,
+					"sps:BAM device %pa RESET failed: pipe %d in use\n",
+					BAM_ID(dev), pipe_index);
+				result = SPS_ERROR;
+				break;
+			}
+		}
+
+		if (result == 0)
+			result = sps_bam_disable(dev);
+	}
+
+	/* BAM will be reset as part of the enable process */
+	if (result == 0)
+		result = sps_bam_enable(dev);
+
+	return result;
+}
+
+/**
+ * Clear the BAM pipe state struct
+ *
+ * This function clears the BAM pipe state struct.
+ *
+ * @pipe - pointer to client pipe struct
+ *
+ */
+static void pipe_clear(struct sps_pipe *pipe)
+{
+	INIT_LIST_HEAD(&pipe->list);
+
+	pipe->state = 0;
+	pipe->pipe_index = SPS_BAM_PIPE_INVALID;
+	pipe->pipe_index_mask = 0;
+	pipe->irq_mask = 0;
+	pipe->mode = -1;
+	pipe->num_descs = 0;
+	pipe->desc_size = 0;
+	pipe->disconnecting = false;
+	pipe->late_eot = false;
+	memset(&pipe->sys, 0, sizeof(pipe->sys));
+	INIT_LIST_HEAD(&pipe->sys.events_q);
+}
+
+/**
+ * Allocate a BAM pipe
+ *
+ */
+u32 sps_bam_pipe_alloc(struct sps_bam *dev, u32 pipe_index)
+{
+	u32 pipe_mask;
+
+	if (pipe_index == SPS_BAM_PIPE_INVALID) {
+		/* Allocate a pipe from the BAM */
+		if ((dev->props.manage & SPS_BAM_MGR_PIPE_NO_ALLOC)) {
+			SPS_ERR(dev,
+				"sps:Restricted from allocating pipes on BAM %pa\n",
+				BAM_ID(dev));
+			return SPS_BAM_PIPE_INVALID;
+		}
+		for (pipe_index = 0, pipe_mask = 1;
+		    pipe_index < dev->props.num_pipes;
+		    pipe_index++, pipe_mask <<= 1) {
+			if ((pipe_mask & dev->props.restricted_pipes))
+				continue;	/* This is a restricted pipe */
+
+			if (dev->pipes[pipe_index] == NULL)
+				break;	/* Found an available pipe */
+		}
+		if (pipe_index >= dev->props.num_pipes) {
+			SPS_ERR(dev, "sps:Fail to allocate pipe on BAM %pa\n",
+				BAM_ID(dev));
+			return SPS_BAM_PIPE_INVALID;
+		}
+	} else {
+		/* Check that client-specified pipe is available */
+		if (pipe_index >= dev->props.num_pipes) {
+			SPS_ERR(dev,
+				"sps:Invalid pipe %d for allocate on BAM %pa\n",
+				pipe_index, BAM_ID(dev));
+			return SPS_BAM_PIPE_INVALID;
+		}
+		if ((dev->props.restricted_pipes & (1UL << pipe_index))) {
+			SPS_ERR(dev, "sps:BAM %pa pipe %d is not local\n",
+				BAM_ID(dev), pipe_index);
+			return SPS_BAM_PIPE_INVALID;
+		}
+		if (dev->pipes[pipe_index] != NULL) {
+			SPS_ERR(dev,
+				"sps:Pipe %d already allocated on BAM %pa\n",
+				pipe_index, BAM_ID(dev));
+			return SPS_BAM_PIPE_INVALID;
+		}
+	}
+
+	/* Mark pipe as allocated */
+	dev->pipes[pipe_index] = BAM_PIPE_UNASSIGNED;
+
+	return pipe_index;
+}
+
+/**
+ * Free a BAM pipe
+ *
+ */
+void sps_bam_pipe_free(struct sps_bam *dev, u32 pipe_index)
+{
+	struct sps_pipe *pipe;
+
+	if (pipe_index >= dev->props.num_pipes) {
+		SPS_ERR(dev, "sps:Invalid BAM %pa pipe: %d\n", BAM_ID(dev),
+				pipe_index);
+		return;
+	}
+
+	/* Get the client pipe struct and mark the pipe free */
+	pipe = dev->pipes[pipe_index];
+	dev->pipes[pipe_index] = NULL;
+
+	/* Is the pipe currently allocated? */
+	if (pipe == NULL) {
+		SPS_ERR(dev,
+			"sps:Attempt to free unallocated pipe %d on BAM %pa\n",
+			pipe_index, BAM_ID(dev));
+		return;
+	}
+
+	if (pipe == BAM_PIPE_UNASSIGNED)
+		return;		/* Never assigned, so no work to do */
+
+	/* Return pending items to appropriate pools */
+	if (!list_empty(&pipe->sys.events_q)) {
+		struct sps_q_event *sps_event;
+
+		SPS_ERR(dev,
+			"sps:Disconnect BAM %pa pipe %d with events pending\n",
+			BAM_ID(dev), pipe_index);
+
+		sps_event = list_entry((&pipe->sys.events_q)->next,
+				typeof(*sps_event), list);
+
+		while (&sps_event->list != (&pipe->sys.events_q)) {
+			struct sps_q_event *sps_event_delete = sps_event;
+
+			list_del(&sps_event->list);
+			sps_event = list_entry(sps_event->list.next,
+					typeof(*sps_event), list);
+			kfree(sps_event_delete);
+		}
+	}
+
+	/* Clear the BAM pipe state struct */
+	pipe_clear(pipe);
+}
+
+/**
+ * Establish BAM pipe connection
+ *
+ */
+int sps_bam_pipe_connect(struct sps_pipe *bam_pipe,
+			 const struct sps_bam_connect_param *params)
+{
+	struct bam_pipe_parameters hw_params;
+	struct sps_bam *dev;
+	const struct sps_connection *map = bam_pipe->map;
+	const struct sps_conn_end_pt *map_pipe;
+	const struct sps_conn_end_pt *other_pipe;
+	void *desc_buf = NULL;
+	u32 pipe_index;
+	int result;
+
+	/* Clear the client pipe state and hw init struct */
+	pipe_clear(bam_pipe);
+	memset(&hw_params, 0, sizeof(hw_params));
+
+	/* Initialize the BAM state struct */
+	bam_pipe->mode = params->mode;
+
+	/* Set pipe streaming mode */
+	if ((params->options & SPS_O_STREAMING) == 0)
+		hw_params.stream_mode = BAM_STREAM_MODE_DISABLE;
+	else
+		hw_params.stream_mode = BAM_STREAM_MODE_ENABLE;
+
+	/* Determine which end point to connect */
+	if (bam_pipe->mode == SPS_MODE_SRC) {
+		map_pipe = &map->src;
+		other_pipe = &map->dest;
+		hw_params.dir = BAM_PIPE_PRODUCER;
+	} else {
+		map_pipe = &map->dest;
+		other_pipe = &map->src;
+		hw_params.dir = BAM_PIPE_CONSUMER;
+	}
+
+	/* Process map parameters */
+	dev = map_pipe->bam;
+	pipe_index = map_pipe->pipe_index;
+
+	SPS_DBG2(dev,
+		"sps:BAM %pa; pipe %d; mode:%d; options:0x%x.\n",
+		BAM_ID(dev), pipe_index, params->mode, params->options);
+
+	if (pipe_index >= dev->props.num_pipes) {
+		SPS_ERR(dev, "sps:Invalid BAM %pa pipe: %d\n", BAM_ID(dev),
+				pipe_index);
+		return SPS_ERROR;
+	}
+	hw_params.event_threshold = (u16) map_pipe->event_threshold;
+	hw_params.ee = dev->props.ee;
+	hw_params.lock_group = map_pipe->lock_group;
+
+	/* Verify that control of this pipe is allowed */
+	if ((dev->props.manage & SPS_BAM_MGR_PIPE_NO_CTRL) ||
+	    (dev->props.restricted_pipes & (1UL << pipe_index))) {
+		SPS_ERR(dev, "sps:BAM %pa pipe %d is not local\n",
+			BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+
+	/* Control without configuration permission is not supported yet */
+	if ((dev->props.manage & SPS_BAM_MGR_PIPE_NO_CONFIG)) {
+		SPS_ERR(dev,
+			"sps:BAM %pa pipe %d remote config is not supported\n",
+			BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+
+	/* Determine operational mode */
+	if (other_pipe->bam != NULL) {
+		unsigned long iova;
+		struct sps_bam *peer_bam = (struct sps_bam *)(other_pipe->bam);
+		/* BAM-to-BAM mode */
+		bam_pipe->state |= BAM_STATE_BAM2BAM;
+		hw_params.mode = BAM_PIPE_MODE_BAM2BAM;
+
+		if (dev->props.options & SPS_BAM_SMMU_EN) {
+			if (bam_pipe->mode == SPS_MODE_SRC)
+				iova = bam_pipe->connect.dest_iova;
+			else
+				iova = bam_pipe->connect.source_iova;
+			SPS_DBG2(dev,
+				"sps:BAM %pa pipe %d uses IOVA 0x%lx.\n",
+				 BAM_ID(dev), pipe_index, iova);
+			hw_params.peer_phys_addr = (u32)iova;
+		} else {
+			hw_params.peer_phys_addr = peer_bam->props.phys_addr;
+		}
+
+		hw_params.peer_pipe = other_pipe->pipe_index;
+
+		/* Verify FIFO buffers are allocated for BAM-to-BAM pipes */
+		if (map->desc.phys_base == SPS_ADDR_INVALID ||
+		    map->data.phys_base == SPS_ADDR_INVALID ||
+		    map->desc.size == 0 || map->data.size == 0) {
+			SPS_ERR(dev,
+				"sps:FIFO buffers are not allocated for BAM %pa pipe %d.\n",
+				BAM_ID(dev), pipe_index);
+			return SPS_ERROR;
+		}
+
+		if (dev->props.options & SPS_BAM_SMMU_EN) {
+			hw_params.data_base =
+				(phys_addr_t)bam_pipe->connect.data.iova;
+			SPS_DBG2(dev,
+				"sps:BAM %pa pipe %d uses IOVA 0x%lx for data FIFO.\n",
+				 BAM_ID(dev), pipe_index,
+				 bam_pipe->connect.data.iova);
+		} else {
+			hw_params.data_base = map->data.phys_base;
+		}
+
+		hw_params.data_size = map->data.size;
+
+		/* Clear the data FIFO for debug */
+		if (map->data.base != NULL && bam_pipe->mode == SPS_MODE_SRC)
+			memset_io(map->data.base, 0, hw_params.data_size);
+
+		/* set NWD bit for BAM2BAM producer pipe */
+		if (bam_pipe->mode == SPS_MODE_SRC) {
+			if ((params->options & SPS_O_WRITE_NWD) == 0)
+				hw_params.write_nwd = BAM_WRITE_NWD_DISABLE;
+			else
+				hw_params.write_nwd = BAM_WRITE_NWD_ENABLE;
+		}
+	} else {
+		/* System mode */
+		hw_params.mode = BAM_PIPE_MODE_SYSTEM;
+		bam_pipe->sys.desc_buf = map->desc.base;
+		bam_pipe->sys.desc_offset = 0;
+		bam_pipe->sys.acked_offset = 0;
+	}
+
+	/* Initialize the client pipe state */
+	bam_pipe->pipe_index = pipe_index;
+	bam_pipe->pipe_index_mask = 1UL << pipe_index;
+
+	/* Get virtual address for descriptor FIFO */
+	if (map->desc.phys_base != SPS_ADDR_INVALID) {
+		if (map->desc.size < (2 * sizeof(struct sps_iovec))) {
+			SPS_ERR(dev,
+				"sps:Invalid descriptor FIFO size for BAM %pa pipe %d: %d\n",
+				BAM_ID(dev), pipe_index, map->desc.size);
+			return SPS_ERROR;
+		}
+		desc_buf = map->desc.base;
+
+		/*
+		 * Note that descriptor base and size will be left zero from
+		 * the memset() above if the physical address was invalid.
+		 * This allows a satellite driver to set the FIFO as
+		 * local memory	for system mode.
+		 */
+
+		if (dev->props.options & SPS_BAM_SMMU_EN) {
+			hw_params.desc_base =
+				(phys_addr_t)bam_pipe->connect.desc.iova;
+			SPS_DBG2(dev,
+				"sps:BAM %pa pipe %d uses IOVA 0x%lx for desc FIFO.\n",
+				 BAM_ID(dev), pipe_index,
+				 bam_pipe->connect.desc.iova);
+		} else {
+			hw_params.desc_base = map->desc.phys_base;
+		}
+
+		hw_params.desc_size = map->desc.size;
+	}
+
+	/* Configure the descriptor FIFO for both operational modes */
+	if (desc_buf != NULL)
+		if (bam_pipe->mode == SPS_MODE_SRC ||
+		    hw_params.mode == BAM_PIPE_MODE_SYSTEM)
+			memset_io(desc_buf, 0, hw_params.desc_size);
+
+	bam_pipe->desc_size = hw_params.desc_size;
+	bam_pipe->num_descs = bam_pipe->desc_size / sizeof(struct sps_iovec);
+
+	result = SPS_ERROR;
+	/* Insure that the BAM is enabled */
+	if ((dev->state & BAM_STATE_ENABLED) == 0)
+		if (sps_bam_enable(dev))
+			goto exit_init_err;
+
+	/* Check pipe allocation */
+	if (dev->pipes[pipe_index] != BAM_PIPE_UNASSIGNED) {
+		SPS_ERR(dev, "sps:Invalid pipe %d on BAM %pa for connect\n",
+			pipe_index, BAM_ID(dev));
+		return SPS_ERROR;
+	}
+
+	if (bam_pipe_is_enabled(&dev->base, pipe_index)) {
+		if (params->options & SPS_O_NO_DISABLE)
+			SPS_DBG2(dev,
+				"sps:BAM %pa pipe %d is already enabled.\n",
+				BAM_ID(dev), pipe_index);
+		else {
+			SPS_ERR(dev, "sps:BAM %pa pipe %d sharing violation\n",
+				BAM_ID(dev), pipe_index);
+			return SPS_ERROR;
+		}
+	}
+
+	if (bam_pipe_init(&dev->base, pipe_index, &hw_params, dev->props.ee)) {
+		SPS_ERR(dev, "sps:BAM %pa pipe %d init error\n",
+			BAM_ID(dev), pipe_index);
+		goto exit_err;
+	}
+
+	/* Assign pipe to client */
+	dev->pipes[pipe_index] = bam_pipe;
+
+	/* Process configuration parameters */
+	if (params->options != 0 ||
+	    (bam_pipe->state & BAM_STATE_BAM2BAM) == 0) {
+		/* Process init-time only parameters */
+		u32 irq_gen_addr;
+
+		/* Set interrupt mode */
+		irq_gen_addr = SPS_ADDR_INVALID;
+		if ((params->options & SPS_O_IRQ_MTI))
+			/* Client has directly specified the MTI address */
+			irq_gen_addr = params->irq_gen_addr;
+		else if ((dev->state & BAM_STATE_MTI))
+			/* This BAM has MTI use enabled */
+			irq_gen_addr = dev->props.irq_gen_addr;
+
+		if (irq_gen_addr != SPS_ADDR_INVALID) {
+			/*
+			 * No checks - assume BAM is already setup for
+			 * MTI generation,
+			 * or the pipe will be set to satellite control.
+			 */
+			bam_pipe->state |= BAM_STATE_MTI;
+			bam_pipe->irq_gen_addr = irq_gen_addr;
+		}
+
+		/* Process runtime parameters */
+		if (sps_bam_pipe_set_params(dev, pipe_index,
+					  params->options)) {
+			dev->pipes[pipe_index] = BAM_PIPE_UNASSIGNED;
+			goto exit_err;
+		}
+	}
+
+	/* Indicate initialization is complete */
+	dev->pipes[pipe_index] = bam_pipe;
+	dev->pipe_active_mask |= 1UL << pipe_index;
+	list_add_tail(&bam_pipe->list, &dev->pipes_q);
+
+	SPS_DBG2(dev,
+		"sps:BAM %pa; pipe %d; pipe_index_mask:0x%x; pipe_active_mask:0x%x.\n",
+		BAM_ID(dev), pipe_index,
+		bam_pipe->pipe_index_mask, dev->pipe_active_mask);
+
+	bam_pipe->state |= BAM_STATE_INIT;
+	result = 0;
+exit_err:
+	if (result) {
+		if (params->options & SPS_O_NO_DISABLE)
+			SPS_DBG2(dev, "sps:BAM %pa pipe %d connection exits\n",
+				BAM_ID(dev), pipe_index);
+		else
+			bam_pipe_exit(&dev->base, pipe_index, dev->props.ee);
+	}
+exit_init_err:
+	if (result) {
+		/* Clear the client pipe state */
+		pipe_clear(bam_pipe);
+	}
+
+	return result;
+}
+
+/**
+ * Disconnect a BAM pipe connection
+ *
+ */
+int sps_bam_pipe_disconnect(struct sps_bam *dev, u32 pipe_index)
+{
+	struct sps_pipe *pipe;
+	int result;
+	unsigned long flags;
+
+	if (pipe_index >= dev->props.num_pipes) {
+		SPS_ERR(dev, "sps:Invalid BAM %pa pipe: %d\n", BAM_ID(dev),
+				pipe_index);
+		return SPS_ERROR;
+	}
+
+	/* Deallocate and reset the BAM pipe */
+	pipe = dev->pipes[pipe_index];
+	if (BAM_PIPE_IS_ASSIGNED(pipe)) {
+		if ((dev->pipe_active_mask & (1UL << pipe_index))) {
+			spin_lock_irqsave(&dev->isr_lock, flags);
+			list_del(&pipe->list);
+			dev->pipe_active_mask &= ~(1UL << pipe_index);
+			spin_unlock_irqrestore(&dev->isr_lock, flags);
+		}
+		dev->pipe_remote_mask &= ~(1UL << pipe_index);
+		if (pipe->connect.options & SPS_O_NO_DISABLE)
+			SPS_DBG2(dev, "sps:BAM %pa pipe %d exits.\n",
+				BAM_ID(dev), pipe_index);
+		else
+			bam_pipe_exit(&dev->base, pipe_index, dev->props.ee);
+		if (pipe->sys.desc_cache != NULL) {
+			u32 size = pipe->num_descs * sizeof(void *);
+			if (pipe->desc_size + size <= PAGE_SIZE) {
+				if (dev->props.options & SPS_BAM_HOLD_MEM)
+					memset(pipe->sys.desc_cache, 0,
+						pipe->desc_size + size);
+				else
+					kfree(pipe->sys.desc_cache);
+			} else {
+				vfree(pipe->sys.desc_cache);
+			}
+			pipe->sys.desc_cache = NULL;
+		}
+		dev->pipes[pipe_index] = BAM_PIPE_UNASSIGNED;
+		pipe_clear(pipe);
+		result = 0;
+	} else {
+		result = SPS_ERROR;
+	}
+
+	if (result)
+		SPS_ERR(dev, "sps:BAM %pa pipe %d already disconnected\n",
+			BAM_ID(dev), pipe_index);
+
+	return result;
+}
+
+/**
+ * Set BAM pipe interrupt enable state
+ *
+ * This function sets the interrupt enable state for a BAM pipe.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @poll - true if SPS_O_POLL is set, false otherwise
+ *
+ */
+static void pipe_set_irq(struct sps_bam *dev, u32 pipe_index,
+				 u32 poll)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+	enum bam_enable irq_enable;
+
+	SPS_DBG2(dev,
+		"sps:BAM:%pa; pipe %d; poll:%d, irq_mask:0x%x; pipe state:0x%x; dev state:0x%x.\n",
+		BAM_ID(dev), pipe_index, poll, pipe->irq_mask,
+		pipe->state, dev->state);
+
+	if (poll == 0 && pipe->irq_mask != 0 &&
+	    (dev->state & BAM_STATE_IRQ)) {
+		if ((pipe->state & BAM_STATE_BAM2BAM) != 0 &&
+		    (pipe->state & BAM_STATE_IRQ) == 0) {
+			/*
+			 * If enabling the interrupt for a BAM-to-BAM pipe,
+			 * clear the existing interrupt status
+			 */
+			(void)bam_pipe_get_and_clear_irq_status(&dev->base,
+							   pipe_index);
+		}
+		pipe->state |= BAM_STATE_IRQ;
+		irq_enable = BAM_ENABLE;
+		pipe->polled = false;
+	} else {
+		pipe->state &= ~BAM_STATE_IRQ;
+		irq_enable = BAM_DISABLE;
+		pipe->polled = true;
+		if (poll == 0 && pipe->irq_mask)
+			SPS_DBG2(dev,
+				"sps:BAM %pa pipe %d forced to use polling\n",
+				 BAM_ID(dev), pipe_index);
+	}
+	if ((pipe->state & BAM_STATE_MTI) == 0)
+		bam_pipe_set_irq(&dev->base, pipe_index, irq_enable,
+					 pipe->irq_mask, dev->props.ee);
+	else
+		bam_pipe_set_mti(&dev->base, pipe_index, irq_enable,
+					 pipe->irq_mask, pipe->irq_gen_addr);
+
+}
+
+/**
+ * Set BAM pipe parameters
+ *
+ */
+int sps_bam_pipe_set_params(struct sps_bam *dev, u32 pipe_index, u32 options)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+	u32 mask;
+	int wake_up_is_one_shot;
+	int no_queue;
+	int ack_xfers;
+	u32 size;
+	int n;
+
+	SPS_DBG2(dev, "sps:BAM %pa pipe %d opt 0x%x\n",
+		BAM_ID(dev), pipe_index, options);
+
+	/* Capture some options */
+	wake_up_is_one_shot = ((options & SPS_O_WAKEUP_IS_ONESHOT));
+	no_queue = ((options & SPS_O_NO_Q));
+	ack_xfers = ((options & SPS_O_ACK_TRANSFERS));
+
+	pipe->hybrid = options & SPS_O_HYBRID;
+	pipe->late_eot = options & SPS_O_LATE_EOT;
+
+	/* Create interrupt source mask */
+	mask = 0;
+	for (n = 0; n < ARRAY_SIZE(opt_event_table); n++) {
+		/* Is client registering for this event? */
+		if ((options & opt_event_table[n].option) == 0)
+			continue;	/* No */
+
+		mask |= opt_event_table[n].pipe_irq;
+	}
+
+#ifdef SPS_BAM_STATISTICS
+	/* Is an illegal mode change specified? */
+	if (pipe->sys.desc_wr_count > 0 &&
+	    (no_queue != pipe->sys.no_queue
+	     || ack_xfers != pipe->sys.ack_xfers)) {
+		SPS_ERR(dev,
+			"sps:Queue/ack mode change after transfer: BAM %pa pipe %d opt 0x%x\n",
+			BAM_ID(dev), pipe_index, options);
+		return SPS_ERROR;
+	}
+#endif /* SPS_BAM_STATISTICS */
+
+	/* Is client setting invalid options for a BAM-to-BAM connection? */
+	if ((pipe->state & BAM_STATE_BAM2BAM) &&
+	    (options & BAM2BAM_O_INVALID)) {
+		SPS_ERR(dev,
+			"sps:Invalid option for BAM-to-BAM: BAM %pa pipe %d opt 0x%x\n",
+			BAM_ID(dev), pipe_index, options);
+		return SPS_ERROR;
+	}
+
+	/* Allocate descriptor FIFO cache if NO_Q option is disabled */
+	if (!no_queue && pipe->sys.desc_cache == NULL && pipe->num_descs > 0
+	    && (pipe->state & BAM_STATE_BAM2BAM) == 0) {
+		/* Allocate both descriptor cache and user pointer array */
+		size = pipe->num_descs * sizeof(void *);
+
+		if (pipe->desc_size + size <= PAGE_SIZE) {
+			if ((dev->props.options &
+						SPS_BAM_HOLD_MEM)) {
+				if (dev->desc_cache_pointers[pipe_index]) {
+					pipe->sys.desc_cache =
+						dev->desc_cache_pointers
+							[pipe_index];
+				} else {
+					pipe->sys.desc_cache =
+						kzalloc(pipe->desc_size + size,
+								GFP_KERNEL);
+					dev->desc_cache_pointers[pipe_index] =
+							pipe->sys.desc_cache;
+				}
+			} else {
+				pipe->sys.desc_cache =
+						kzalloc(pipe->desc_size + size,
+							GFP_KERNEL);
+			}
+			if (pipe->sys.desc_cache == NULL) {
+				SPS_ERR(dev,
+					"sps:No memory for pipe%d of BAM %pa\n",
+						pipe_index, BAM_ID(dev));
+				return -ENOMEM;
+			}
+		} else {
+			pipe->sys.desc_cache =
+				vmalloc(pipe->desc_size + size);
+
+			if (pipe->sys.desc_cache == NULL) {
+				SPS_ERR(dev,
+					"sps:No memory for pipe %d of BAM %pa\n",
+					pipe_index, BAM_ID(dev));
+				return -ENOMEM;
+			}
+
+			memset(pipe->sys.desc_cache, 0, pipe->desc_size + size);
+		}
+
+		if (pipe->sys.desc_cache == NULL) {
+			/*** MUST BE LAST POINT OF FAILURE (see below) *****/
+			SPS_ERR(dev,
+				"sps:Desc cache error: BAM %pa pipe %d: %d\n",
+				BAM_ID(dev), pipe_index,
+				pipe->desc_size + size);
+			return SPS_ERROR;
+		}
+		pipe->sys.user_ptrs = (void **)(pipe->sys.desc_cache +
+						 pipe->desc_size);
+		pipe->sys.cache_offset = pipe->sys.acked_offset;
+	}
+
+	/*
+	 * No failures beyond this point. Note that malloc() is last point of
+	 * failure, so no free() handling is needed.
+	 */
+
+	/* Enable/disable the pipe's interrupt sources */
+	pipe->irq_mask = mask;
+	pipe_set_irq(dev, pipe_index, (options & SPS_O_POLL));
+
+	/* Store software feature enables */
+	pipe->wake_up_is_one_shot = wake_up_is_one_shot;
+	pipe->sys.no_queue = no_queue;
+	pipe->sys.ack_xfers = ack_xfers;
+
+	return 0;
+}
+
+/**
+ * Enable a BAM pipe
+ *
+ */
+int sps_bam_pipe_enable(struct sps_bam *dev, u32 pipe_index)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+
+	/* Enable the BAM pipe */
+	bam_pipe_enable(&dev->base, pipe_index);
+	pipe->state |= BAM_STATE_ENABLED;
+
+	return 0;
+}
+
+/**
+ * Disable a BAM pipe
+ *
+ */
+int sps_bam_pipe_disable(struct sps_bam *dev, u32 pipe_index)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+
+	/* Disable the BAM pipe */
+	if (pipe->connect.options & SPS_O_NO_DISABLE)
+		SPS_DBG2(dev, "sps:BAM %pa pipe %d enters disable state\n",
+			BAM_ID(dev), pipe_index);
+	else
+		bam_pipe_disable(&dev->base, pipe_index);
+
+	pipe->state &= ~BAM_STATE_ENABLED;
+
+	return 0;
+}
+
+/**
+ * Register an event for a BAM pipe
+ *
+ */
+int sps_bam_pipe_reg_event(struct sps_bam *dev,
+			   u32 pipe_index,
+			   struct sps_register_event *reg)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+	struct sps_bam_event_reg *event_reg;
+	int n;
+
+	if (pipe->sys.no_queue && reg->xfer_done != NULL &&
+	    reg->mode != SPS_TRIGGER_CALLBACK) {
+		SPS_ERR(dev,
+			"sps:Only callback events support for NO_Q: BAM %pa pipe %d mode %d\n",
+			BAM_ID(dev), pipe_index, reg->mode);
+		return SPS_ERROR;
+	}
+
+	for (n = 0; n < ARRAY_SIZE(opt_event_table); n++) {
+		int index;
+
+		/* Is client registering for this event? */
+		if ((reg->options & opt_event_table[n].option) == 0)
+			continue;	/* No */
+
+		index = SPS_EVENT_INDEX(opt_event_table[n].event_id);
+		if (index < 0)
+			SPS_ERR(dev,
+				"sps:Negative event index: BAM %pa pipe %d mode %d\n",
+				BAM_ID(dev), pipe_index, reg->mode);
+		else {
+			event_reg = &pipe->sys.event_regs[index];
+			event_reg->xfer_done = reg->xfer_done;
+			event_reg->callback = reg->callback;
+			event_reg->mode = reg->mode;
+			event_reg->user = reg->user;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * Submit a transfer of a single buffer to a BAM pipe
+ *
+ */
+int sps_bam_pipe_transfer_one(struct sps_bam *dev,
+				    u32 pipe_index, u32 addr, u32 size,
+				    void *user, u32 flags)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+	struct sps_iovec *desc;
+	struct sps_iovec iovec;
+	u32 next_write;
+	static int show_recom;
+
+	SPS_DBG(dev, "sps:BAM %pa pipe %d addr 0x%x size 0x%x flags 0x%x\n",
+			BAM_ID(dev), pipe_index, addr, size, flags);
+
+	/* Is this a BAM-to-BAM or satellite connection? */
+	if ((pipe->state & (BAM_STATE_BAM2BAM | BAM_STATE_REMOTE))) {
+		SPS_ERR(dev, "sps:Transfer on BAM-to-BAM: BAM %pa pipe %d\n",
+			BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+
+	/*
+	 * Client identifier (user pointer) is not supported for
+	 * SPS_O_NO_Q option.
+	 */
+	if (pipe->sys.no_queue && user != NULL) {
+		SPS_ERR(dev, "sps:User pointer arg non-NULL: BAM %pa pipe %d\n",
+			BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+
+	/* Determine if descriptor can be queued */
+	next_write = pipe->sys.desc_offset + sizeof(struct sps_iovec);
+	if (next_write >= pipe->desc_size)
+		next_write = 0;
+
+	if (next_write == pipe->sys.acked_offset) {
+		/*
+		 * If pipe is polled and client is not ACK'ing descriptors,
+		 * perform polling operation so that any outstanding ACKs
+		 * can occur.
+		 */
+		if (!pipe->sys.ack_xfers && pipe->polled) {
+			pipe_handler_eot(dev, pipe);
+			if (next_write == pipe->sys.acked_offset) {
+				if (!show_recom) {
+					show_recom = true;
+					SPS_ERR(dev,
+						"sps:Client of BAM %pa pipe %d is recommended to have flow control\n",
+						BAM_ID(dev), pipe_index);
+				}
+
+				SPS_DBG1(dev,
+					"sps:Descriptor FIFO is full for BAM %pa pipe %d after pipe_handler_eot\n",
+					BAM_ID(dev), pipe_index);
+				return SPS_ERROR;
+			}
+		} else {
+			if (!show_recom) {
+				show_recom = true;
+				SPS_ERR(dev,
+					"sps:Client of BAM %pa pipe %d is recommended to have flow control.\n",
+					BAM_ID(dev), pipe_index);
+			}
+
+			SPS_DBG1(dev,
+				"sps:Descriptor FIFO is full for BAM %pa pipe %d\n",
+				BAM_ID(dev), pipe_index);
+			return SPS_ERROR;
+		}
+	}
+
+	/* Create descriptor */
+	if (!pipe->sys.no_queue)
+		desc = (struct sps_iovec *) (pipe->sys.desc_cache +
+					      pipe->sys.desc_offset);
+	else
+		desc = &iovec;
+
+	desc->addr = addr;
+	desc->size = size;
+
+	if ((flags & SPS_IOVEC_FLAG_DEFAULT) == 0) {
+		desc->flags = (flags & BAM_IOVEC_FLAG_MASK)
+				| DESC_UPPER_ADDR(flags);
+	} else {
+		if (pipe->mode == SPS_MODE_SRC)
+			desc->flags = SPS_IOVEC_FLAG_INT
+					| DESC_UPPER_ADDR(flags);
+		else
+			desc->flags = (SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT)
+					| DESC_UPPER_ADDR(flags);
+	}
+
+#ifdef SPS_BAM_STATISTICS
+	if ((flags & SPS_IOVEC_FLAG_INT))
+		pipe->sys.int_flags++;
+	if ((flags & SPS_IOVEC_FLAG_EOT))
+		pipe->sys.eot_flags++;
+#endif /* SPS_BAM_STATISTICS */
+
+	/* Update hardware descriptor FIFO - should result in burst */
+	*((struct sps_iovec *) (pipe->sys.desc_buf + pipe->sys.desc_offset))
+	= *desc;
+
+	/* Record user pointer value */
+	if (!pipe->sys.no_queue) {
+		u32 index = pipe->sys.desc_offset / sizeof(struct sps_iovec);
+		pipe->sys.user_ptrs[index] = user;
+#ifdef SPS_BAM_STATISTICS
+		if (user != NULL)
+			pipe->sys.user_ptrs_count++;
+#endif /* SPS_BAM_STATISTICS */
+	}
+
+	/* Update descriptor ACK offset */
+	pipe->sys.desc_offset = next_write;
+
+#ifdef SPS_BAM_STATISTICS
+	/* Update statistics */
+	pipe->sys.desc_wr_count++;
+#endif /* SPS_BAM_STATISTICS */
+
+	/* Notify pipe */
+	if ((flags & SPS_IOVEC_FLAG_NO_SUBMIT) == 0) {
+		wmb(); /* Memory Barrier */
+		bam_pipe_set_desc_write_offset(&dev->base, pipe_index,
+					       next_write);
+	}
+
+	if (dev->ipc_loglevel == 0)
+		SPS_DBG(dev,
+			"sps:%s: BAM phy addr:%pa; pipe %d; write pointer to tell HW: 0x%x; write pointer read from HW: 0x%x\n",
+			__func__, BAM_ID(dev), pipe_index, next_write,
+			bam_pipe_get_desc_write_offset(&dev->base, pipe_index));
+
+	return 0;
+}
+
+/**
+ * Submit a transfer to a BAM pipe
+ *
+ */
+int sps_bam_pipe_transfer(struct sps_bam *dev,
+			 u32 pipe_index, struct sps_transfer *transfer)
+{
+	struct sps_iovec *iovec;
+	u32 count;
+	u32 flags;
+	void *user;
+	int n;
+	int result;
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+
+	if (transfer->iovec_count == 0) {
+		SPS_ERR(dev, "sps:iovec count zero: BAM %pa pipe %d\n",
+			BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+
+	if (!pipe->sys.ack_xfers && pipe->polled) {
+		sps_bam_pipe_get_unused_desc_num(dev, pipe_index,
+					&count);
+		count = pipe->desc_size / sizeof(struct sps_iovec) - count - 1;
+	} else
+		sps_bam_get_free_count(dev, pipe_index, &count);
+
+	if (count < transfer->iovec_count) {
+		SPS_ERR(dev,
+			"sps:Insufficient free desc: BAM %pa pipe %d: %d\n",
+			BAM_ID(dev), pipe_index, count);
+		return SPS_ERROR;
+	}
+
+	user = NULL;		/* NULL for all except last descriptor */
+	for (n = (int)transfer->iovec_count - 1, iovec = transfer->iovec;
+	    n >= 0; n--, iovec++) {
+		if (n > 0) {
+			/* This is *not* the last descriptor */
+			flags = iovec->flags | SPS_IOVEC_FLAG_NO_SUBMIT;
+		} else {
+			/* This *is* the last descriptor */
+			flags = iovec->flags;
+			user = transfer->user;
+		}
+		result = sps_bam_pipe_transfer_one(dev, pipe_index,
+						 iovec->addr,
+						 iovec->size, user,
+						 flags);
+		if (result)
+			return SPS_ERROR;
+	}
+
+	return 0;
+}
+
+int sps_bam_pipe_inject_zlt(struct sps_bam *dev, u32 pipe_index)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+	struct sps_iovec *desc;
+	u32 read_p, write_p, next_write;
+
+	if (pipe->state & BAM_STATE_BAM2BAM)
+		SPS_DBG2(dev, "sps: BAM-to-BAM pipe: BAM %pa pipe %d\n",
+			BAM_ID(dev), pipe_index);
+	else
+		SPS_DBG2(dev, "sps: BAM-to-System pipe: BAM %pa pipe %d\n",
+			BAM_ID(dev), pipe_index);
+
+	if (!(pipe->state & BAM_STATE_ENABLED)) {
+		SPS_ERR(dev,
+			"sps: BAM %pa pipe %d is not enabled.\n",
+			BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+
+	read_p = bam_pipe_get_desc_read_offset(&dev->base, pipe_index);
+	write_p = bam_pipe_get_desc_write_offset(&dev->base, pipe_index);
+
+	SPS_DBG2(dev,
+		"sps: BAM %pa pipe %d: read pointer:0x%x; write pointer:0x%x.\n",
+		BAM_ID(dev), pipe_index, read_p, write_p);
+
+	if (read_p == write_p) {
+		SPS_ERR(dev,
+			"sps: BAM %pa pipe %d: read pointer 0x%x is already equal to write pointer.\n",
+			BAM_ID(dev), pipe_index, read_p);
+		return SPS_ERROR;
+	}
+
+	next_write = write_p + sizeof(struct sps_iovec);
+	if (next_write >= pipe->desc_size) {
+		SPS_DBG2(dev,
+			"sps: BAM %pa pipe %d: next write is 0x%x: wrap around.\n",
+			BAM_ID(dev), pipe_index, next_write);
+		next_write = 0;
+	}
+
+	desc = (struct sps_iovec *) (pipe->connect.desc.base + write_p);
+	desc->addr = 0;
+	desc->size = 0;
+	desc->flags = SPS_IOVEC_FLAG_EOT;
+
+	bam_pipe_set_desc_write_offset(&dev->base, pipe_index,
+					       next_write);
+	wmb(); /* update write pointer in HW */
+	SPS_DBG2(dev,
+		"sps: BAM %pa pipe %d: write pointer to tell HW: 0x%x; write pointer read from HW: 0x%x\n",
+		BAM_ID(dev), pipe_index, next_write,
+		bam_pipe_get_desc_write_offset(&dev->base, pipe_index));
+
+	return 0;
+}
+
+/**
+ * Allocate an event tracking struct
+ *
+ * This function allocates an event tracking struct.
+ *
+ * @pipe - pointer to pipe state
+ *
+ * @event_reg - pointer to event registration
+ *
+ * @return - pointer to event notification struct, or NULL
+ *
+ */
+static struct sps_q_event *alloc_event(struct sps_pipe *pipe,
+					struct sps_bam_event_reg *event_reg)
+{
+	struct sps_q_event *event;
+
+	/* A callback event object is registered, so trigger with payload */
+	event = &pipe->sys.event;
+	memset(event, 0, sizeof(*event));
+
+	return event;
+}
+
+/**
+ * Trigger an event notification
+ *
+ * This function triggers an event notification.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe - pointer to pipe state
+ *
+ * @event_reg - pointer to event registration
+ *
+ * @sps_event - pointer to event struct
+ *
+ */
+static void trigger_event(struct sps_bam *dev,
+			  struct sps_pipe *pipe,
+			  struct sps_bam_event_reg *event_reg,
+			  struct sps_q_event *sps_event)
+{
+	if (sps_event == NULL) {
+		SPS_DBG1(dev, "%s", "sps:trigger_event.sps_event is NULL.\n");
+		return;
+	}
+
+	if (event_reg->xfer_done) {
+		complete(event_reg->xfer_done);
+		SPS_DBG(dev, "sps:trigger_event.done=%d.\n",
+			event_reg->xfer_done->done);
+	}
+
+	if (event_reg->callback) {
+		SPS_DBG(dev, "%s", "sps:trigger_event.using callback.\n");
+		event_reg->callback(&sps_event->notify);
+	}
+
+}
+
+/**
+ * Handle a BAM pipe's generic interrupt sources
+ *
+ * This function creates the event notification for a BAM pipe's
+ *    generic interrupt sources.  The caller of this function must lock the BAM
+ *    device's mutex.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe - pointer to pipe state
+ *
+ * @event_id - event identifier enum
+ *
+ */
+static void pipe_handler_generic(struct sps_bam *dev,
+			       struct sps_pipe *pipe,
+			       enum sps_event event_id)
+{
+	struct sps_bam_event_reg *event_reg;
+	struct sps_q_event *sps_event;
+	int index;
+
+	index = SPS_EVENT_INDEX(event_id);
+	if (index < 0 || index >= SPS_EVENT_INDEX(SPS_EVENT_MAX))
+		return;
+
+	event_reg = &pipe->sys.event_regs[index];
+	sps_event = alloc_event(pipe, event_reg);
+	if (sps_event != NULL) {
+		sps_event->notify.event_id = event_id;
+		sps_event->notify.user = event_reg->user;
+		trigger_event(dev, pipe, event_reg, sps_event);
+	}
+}
+
+/**
+ * Handle a BAM pipe's WAKEUP interrupt sources
+ *
+ * This function creates the event notification for a BAM pipe's
+ *    WAKEUP interrupt source.  The caller of this function must lock the BAM
+ *    device's mutex.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe - pointer to pipe state
+ *
+ */
+static void pipe_handler_wakeup(struct sps_bam *dev, struct sps_pipe *pipe)
+{
+	struct sps_bam_event_reg *event_reg;
+	struct sps_q_event *event;
+	u32 pipe_index = pipe->pipe_index;
+
+	if (pipe->wake_up_is_one_shot) {
+		SPS_DBG2(dev,
+			"sps:BAM:%pa pipe %d wake_up_is_one_shot; irq_mask:0x%x.\n",
+			BAM_ID(dev), pipe_index, pipe->irq_mask);
+		/* Disable the pipe WAKEUP interrupt source */
+		pipe->irq_mask &= ~BAM_PIPE_IRQ_WAKE;
+		pipe_set_irq(dev, pipe_index, pipe->polled);
+	}
+
+	event_reg = &pipe->sys.event_regs[SPS_EVENT_INDEX(SPS_EVENT_WAKEUP)];
+	event = alloc_event(pipe, event_reg);
+	if (event != NULL) {
+		event->notify.event_id = SPS_EVENT_WAKEUP;
+		event->notify.user = event_reg->user;
+		trigger_event(dev, pipe, event_reg, event);
+	}
+}
+
+/**
+ * Handle a BAM pipe's EOT/INT interrupt sources
+ *
+ * This function creates the event notification for a BAM pipe's EOT interrupt
+ *    source.  The caller of this function must lock the BAM device's mutex.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe - pointer to pipe state
+ *
+ */
+static void pipe_handler_eot(struct sps_bam *dev, struct sps_pipe *pipe)
+{
+	struct sps_bam_event_reg *event_reg;
+	struct sps_q_event *event;
+	struct sps_iovec *desc;
+	struct sps_iovec *cache;
+	void **user;
+	u32 *update_offset;
+	u32 pipe_index = pipe->pipe_index;
+	u32 offset;
+	u32 end_offset;
+	enum sps_event event_id;
+	u32 flags;
+	u32 enabled;
+	int producer = (pipe->mode == SPS_MODE_SRC);
+
+	if (pipe->sys.handler_eot) {
+		/*
+		 * This can happen if the pipe is configured for polling
+		 * (IRQ disabled) and callback event generation.
+		 * The client may perform a get_iovec() inside the callback.
+		 */
+		SPS_DBG(dev,
+			"sps:%s; still handling EOT for pipe %d.\n",
+			__func__, pipe->pipe_index);
+		return;
+	}
+
+	pipe->sys.handler_eot = true;
+
+	/* Get offset of last descriptor completed by the pipe */
+	end_offset = bam_pipe_get_desc_read_offset(&dev->base, pipe_index);
+
+	if (dev->ipc_loglevel == 0)
+		SPS_DBG(dev,
+			"sps:%s; pipe index:%d; read pointer:0x%x; write pointer:0x%x; sys.acked_offset:0x%x.\n",
+			__func__, pipe->pipe_index, end_offset,
+			bam_pipe_get_desc_write_offset(&dev->base, pipe_index),
+			pipe->sys.acked_offset);
+
+	if (producer && pipe->late_eot) {
+		struct sps_iovec *desc_end;
+		if (end_offset == 0)
+			desc_end = (struct sps_iovec *)(pipe->sys.desc_buf
+				+ pipe->desc_size - sizeof(struct sps_iovec));
+		else
+			desc_end = (struct sps_iovec *)	(pipe->sys.desc_buf
+				+ end_offset - sizeof(struct sps_iovec));
+
+		if (!(desc_end->flags & SPS_IOVEC_FLAG_EOT)) {
+			if (end_offset == 0)
+				end_offset = pipe->desc_size
+					- sizeof(struct sps_iovec);
+			else
+				end_offset -= sizeof(struct sps_iovec);
+		}
+	}
+
+	/* If no queue, then do not generate any events */
+	if (pipe->sys.no_queue) {
+		if (!pipe->sys.ack_xfers) {
+			/* Client is not ACK'ing transfers, so do it now */
+			pipe->sys.acked_offset = end_offset;
+		}
+		pipe->sys.handler_eot = false;
+		SPS_DBG(dev,
+			"sps:%s; pipe %d has no queue.\n",
+			__func__, pipe->pipe_index);
+		return;
+	}
+
+	/*
+	 * Get offset of last descriptor processed by software,
+	 * and update to the last descriptor completed by the pipe
+	 */
+	if (!pipe->sys.ack_xfers) {
+		update_offset = &pipe->sys.acked_offset;
+		offset = *update_offset;
+	} else {
+		update_offset = &pipe->sys.cache_offset;
+		offset = *update_offset;
+	}
+
+	/* Are there any completed descriptors to process? */
+	if (offset == end_offset) {
+		pipe->sys.handler_eot = false;
+		SPS_DBG(dev,
+			"sps:%s; there is no completed desc to process for pipe %d.\n",
+			__func__, pipe->pipe_index);
+		return;
+	}
+
+	/* Determine enabled events */
+	enabled = 0;
+	if ((pipe->irq_mask & SPS_O_EOT))
+		enabled |= SPS_IOVEC_FLAG_EOT;
+
+	if ((pipe->irq_mask & SPS_O_DESC_DONE))
+		enabled |= SPS_IOVEC_FLAG_INT;
+
+	/*
+	 * For producer pipe, update the cached descriptor byte count and flags.
+	 * For consumer pipe, the BAM does not update the descriptors, so just
+	 * use the cached copies.
+	 */
+	if (producer) {
+		/*
+		 * Do copies in a tight loop to increase chance of
+		 * multi-descriptor burst accesses on the bus
+		 */
+		struct sps_iovec *desc_end;
+
+		/* Set starting point for copy */
+		desc = (struct sps_iovec *) (pipe->sys.desc_buf + offset);
+		cache =	(struct sps_iovec *) (pipe->sys.desc_cache + offset);
+
+		/* Fetch all completed descriptors to end of FIFO (wrap) */
+		if (end_offset < offset) {
+			desc_end = (struct sps_iovec *)
+				   (pipe->sys.desc_buf + pipe->desc_size);
+			while (desc < desc_end)
+				*cache++ = *desc++;
+
+			desc = (void *)pipe->sys.desc_buf;
+			cache = (void *)pipe->sys.desc_cache;
+		}
+
+		/* Fetch all remaining completed descriptors (no wrap) */
+		desc_end = (struct sps_iovec *)	(pipe->sys.desc_buf +
+						 end_offset);
+		while (desc < desc_end)
+			*cache++ = *desc++;
+	}
+
+	/* Process all completed descriptors */
+	cache = (struct sps_iovec *) (pipe->sys.desc_cache + offset);
+	user = &pipe->sys.user_ptrs[offset / sizeof(struct sps_iovec)];
+	for (;;) {
+		SPS_DBG(dev,
+			"sps:%s; pipe index:%d; iovec addr:0x%x; size:0x%x; flags:0x%x; enabled:0x%x; *user is %s NULL.\n",
+			__func__, pipe->pipe_index, cache->addr,
+			cache->size, cache->flags, enabled,
+			(*user == NULL) ? "" : "not");
+
+		/*
+		 * Increment offset to next descriptor and update pipe offset
+		 * so a client callback can fetch the I/O vector.
+		 */
+		offset += sizeof(struct sps_iovec);
+		if (offset >= pipe->desc_size)
+			/* Roll to start of descriptor FIFO */
+			offset = 0;
+
+		*update_offset = offset;
+#ifdef SPS_BAM_STATISTICS
+		pipe->sys.desc_rd_count++;
+#endif /* SPS_BAM_STATISTICS */
+
+		/* Did client request notification for this descriptor? */
+		flags = cache->flags & enabled;
+		if (*user != NULL || flags) {
+			int index;
+
+			if ((flags & SPS_IOVEC_FLAG_EOT))
+				event_id = SPS_EVENT_EOT;
+			else
+				event_id = SPS_EVENT_DESC_DONE;
+
+			index = SPS_EVENT_INDEX(event_id);
+			event_reg = &pipe->sys.event_regs[index];
+			event = alloc_event(pipe, event_reg);
+			if (event != NULL) {
+				/*
+				 * Store the descriptor and user pointer
+				 * in the notification
+				 */
+				event->notify.data.transfer.iovec = *cache;
+				event->notify.data.transfer.user = *user;
+
+				event->notify.event_id = event_id;
+				event->notify.user = event_reg->user;
+				trigger_event(dev, pipe, event_reg, event);
+			} else {
+				SPS_ERR(dev,
+					"sps: %s: pipe %d: event is NULL.\n",
+					__func__, pipe->pipe_index);
+			}
+#ifdef SPS_BAM_STATISTICS
+			if (*user != NULL)
+				pipe->sys.user_found++;
+#endif /* SPS_BAM_STATISTICS */
+		}
+
+		/* Increment to next descriptor */
+		if (offset == end_offset)
+			break;	/* No more descriptors */
+
+		if (offset) {
+			cache++;
+			user++;
+		} else {
+			cache = (void *)pipe->sys.desc_cache;
+			user = pipe->sys.user_ptrs;
+		}
+	}
+
+	pipe->sys.handler_eot = false;
+}
+
+/**
+ * Handle a BAM pipe's interrupt sources
+ *
+ * This function handles a BAM pipe's interrupt sources.
+ *    The caller of this function must lock the BAM device's mutex.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @return void
+ *
+ */
+static void pipe_handler(struct sps_bam *dev, struct sps_pipe *pipe)
+{
+	u32 pipe_index;
+	u32 status;
+	enum sps_event event_id;
+
+	/* Get interrupt sources and ack all */
+	pipe_index = pipe->pipe_index;
+	status = bam_pipe_get_and_clear_irq_status(&dev->base, pipe_index);
+
+	SPS_DBG(dev, "sps:pipe_handler.bam %pa.pipe %d.status=0x%x.\n",
+			BAM_ID(dev), pipe_index, status);
+
+	/* Check for enabled interrupt sources */
+	status &= pipe->irq_mask;
+	if (status == 0)
+		/* No enabled interrupt sources are active */
+		return;
+
+	/*
+	 * Process the interrupt sources in order of frequency of occurrance.
+	 * Check for early exit opportunities.
+	 */
+
+	if ((status & (SPS_O_EOT | SPS_O_DESC_DONE)) &&
+	    (pipe->state & BAM_STATE_BAM2BAM) == 0) {
+		pipe_handler_eot(dev, pipe);
+		if (pipe->sys.no_queue) {
+			/*
+			 * EOT handler will not generate any event if there
+			 * is no queue,
+			 * so generate "empty" (no descriptor) event
+			 */
+			if ((status & SPS_O_EOT))
+				event_id = SPS_EVENT_EOT;
+			else
+				event_id = SPS_EVENT_DESC_DONE;
+
+			pipe_handler_generic(dev, pipe, event_id);
+		}
+		status &= ~(SPS_O_EOT | SPS_O_DESC_DONE);
+		if (status == 0)
+			return;
+	}
+
+	if ((status & SPS_O_WAKEUP)) {
+		pipe_handler_wakeup(dev, pipe);
+		status &= ~SPS_O_WAKEUP;
+		if (status == 0)
+			return;
+	}
+
+	if ((status & SPS_O_INACTIVE)) {
+		pipe_handler_generic(dev, pipe, SPS_EVENT_INACTIVE);
+		status &= ~SPS_O_INACTIVE;
+		if (status == 0)
+			return;
+	}
+
+	if ((status & SPS_O_OUT_OF_DESC)) {
+		pipe_handler_generic(dev, pipe,
+					     SPS_EVENT_OUT_OF_DESC);
+		status &= ~SPS_O_OUT_OF_DESC;
+		if (status == 0)
+			return;
+	}
+
+	if ((status & SPS_O_RST_ERROR) && enhd_pipe) {
+		SPS_ERR(dev, "sps:bam %pa ;pipe 0x%x irq status=0x%x.\n"
+				"sps: BAM_PIPE_IRQ_RST_ERROR\n",
+				BAM_ID(dev), pipe_index, status);
+		bam_output_register_content(&dev->base, dev->props.ee);
+		pipe_handler_generic(dev, pipe,
+					     SPS_EVENT_RST_ERROR);
+		status &= ~SPS_O_RST_ERROR;
+		if (status == 0)
+			return;
+	}
+
+	if ((status & SPS_O_HRESP_ERROR) && enhd_pipe) {
+		SPS_ERR(dev, "sps:bam %pa ;pipe 0x%x irq status=0x%x.\n"
+				"sps: BAM_PIPE_IRQ_HRESP_ERROR\n",
+				BAM_ID(dev), pipe_index, status);
+		bam_output_register_content(&dev->base, dev->props.ee);
+		pipe_handler_generic(dev, pipe,
+					     SPS_EVENT_HRESP_ERROR);
+		status &= ~SPS_O_HRESP_ERROR;
+		if (status == 0)
+			return;
+	}
+
+	if ((status & SPS_EVENT_ERROR))
+		pipe_handler_generic(dev, pipe, SPS_EVENT_ERROR);
+}
+
+/**
+ * Get a BAM pipe event
+ *
+ */
+int sps_bam_pipe_get_event(struct sps_bam *dev,
+			   u32 pipe_index, struct sps_event_notify *notify)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+	struct sps_q_event *event_queue;
+
+	if (pipe->sys.no_queue) {
+		SPS_ERR(dev,
+			"sps:Invalid connection for event: BAM %pa pipe %d context 0x%pK\n",
+			BAM_ID(dev), pipe_index, pipe);
+		notify->event_id = SPS_EVENT_INVALID;
+		return SPS_ERROR;
+	}
+
+	/* If pipe is polled, perform polling operation */
+	if (pipe->polled && (pipe->state & BAM_STATE_BAM2BAM) == 0)
+		pipe_handler_eot(dev, pipe);
+
+	/* Pull an event off the synchronous event queue */
+	if (list_empty(&pipe->sys.events_q)) {
+		event_queue = NULL;
+		SPS_DBG(dev, "sps:events_q of bam %pa is empty.\n",
+							BAM_ID(dev));
+	} else {
+		SPS_DBG(dev, "sps:events_q of bam %pa is not empty.\n",
+			BAM_ID(dev));
+		event_queue =
+		list_first_entry(&pipe->sys.events_q, struct sps_q_event,
+				 list);
+		list_del(&event_queue->list);
+	}
+
+	/* Update client's event buffer */
+	if (event_queue == NULL) {
+		/* No event queued, so set client's event to "invalid" */
+		notify->event_id = SPS_EVENT_INVALID;
+	} else {
+		/*
+		 * Copy event into client's buffer and return the event
+		 * to the pool
+		 */
+		*notify = event_queue->notify;
+		kfree(event_queue);
+#ifdef SPS_BAM_STATISTICS
+		pipe->sys.get_events++;
+#endif /* SPS_BAM_STATISTICS */
+	}
+
+	return 0;
+}
+
+/**
+ * Get processed I/O vector
+ */
+int sps_bam_pipe_get_iovec(struct sps_bam *dev, u32 pipe_index,
+			   struct sps_iovec *iovec)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+	struct sps_iovec *desc;
+	u32 read_offset;
+
+	/* Is this a valid pipe configured for get_iovec use? */
+	if (!pipe->sys.ack_xfers ||
+	    (pipe->state & BAM_STATE_BAM2BAM) != 0 ||
+	    (pipe->state & BAM_STATE_REMOTE)) {
+		return SPS_ERROR;
+	}
+
+	/* If pipe is polled and queue is enabled, perform polling operation */
+	if ((pipe->polled || pipe->hybrid) && !pipe->sys.no_queue) {
+		SPS_DBG(dev,
+			"sps:%s; BAM: %pa; pipe index:%d; polled is %d; hybrid is %d.\n",
+			__func__, BAM_ID(dev), pipe_index,
+			pipe->polled, pipe->hybrid);
+		pipe_handler_eot(dev, pipe);
+	}
+
+	/* Is there a completed descriptor? */
+	if (pipe->sys.no_queue)
+		read_offset =
+		bam_pipe_get_desc_read_offset(&dev->base, pipe_index);
+	else
+		read_offset = pipe->sys.cache_offset;
+
+	if (read_offset == pipe->sys.acked_offset) {
+		/* No, so clear the iovec to indicate FIFO is empty */
+		memset(iovec, 0, sizeof(*iovec));
+		SPS_DBG(dev,
+			"sps:%s; BAM: %pa; pipe index:%d; no iovec to process.\n",
+			__func__, BAM_ID(dev), pipe_index);
+		return 0;
+	}
+
+	/* Fetch next descriptor */
+	desc = (struct sps_iovec *) (pipe->sys.desc_buf +
+				     pipe->sys.acked_offset);
+	*iovec = *desc;
+#ifdef SPS_BAM_STATISTICS
+	pipe->sys.get_iovecs++;
+#endif /* SPS_BAM_STATISTICS */
+
+	/* Update read/ACK offset */
+	pipe->sys.acked_offset += sizeof(struct sps_iovec);
+	if (pipe->sys.acked_offset >= pipe->desc_size)
+		pipe->sys.acked_offset = 0;
+
+	SPS_DBG(dev,
+		"sps:%s; pipe index:%d; iovec addr:0x%x; size:0x%x; flags:0x%x; acked_offset:0x%x.\n",
+		__func__, pipe->pipe_index, desc->addr,
+		desc->size, desc->flags, pipe->sys.acked_offset);
+
+	return 0;
+}
+
+/**
+ * Determine whether a BAM pipe descriptor FIFO is empty
+ *
+ */
+int sps_bam_pipe_is_empty(struct sps_bam *dev, u32 pipe_index,
+				u32 *empty)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+	u32 end_offset;
+	u32 acked_offset;
+
+	/* Is this a satellite connection? */
+	if ((pipe->state & BAM_STATE_REMOTE)) {
+		SPS_ERR(dev, "sps:Is empty on remote: BAM %pa pipe %d\n",
+			BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+
+	/* Get offset of last descriptor completed by the pipe */
+	end_offset = bam_pipe_get_desc_read_offset(&dev->base, pipe_index);
+
+	if ((pipe->state & BAM_STATE_BAM2BAM) == 0)
+		/* System mode */
+		acked_offset = pipe->sys.acked_offset;
+	else
+		/* BAM-to-BAM */
+		acked_offset = bam_pipe_get_desc_write_offset(&dev->base,
+							  pipe_index);
+
+
+	/* Determine descriptor FIFO state */
+	if (end_offset == acked_offset) {
+		*empty = true;
+	} else {
+		if ((pipe->state & BAM_STATE_BAM2BAM) == 0) {
+			*empty = false;
+			SPS_DBG1(dev,
+				"sps:%s; pipe index:%d; this sys2bam pipe is NOT empty.\n",
+				__func__, pipe->pipe_index);
+			return 0;
+		}
+		if (bam_pipe_check_zlt(&dev->base, pipe_index)) {
+			bool p_idc;
+			u32 next_write;
+
+			p_idc = bam_pipe_check_pipe_empty(&dev->base,
+								pipe_index);
+
+			next_write = acked_offset + sizeof(struct sps_iovec);
+			if (next_write >= pipe->desc_size)
+				next_write = 0;
+
+			if (next_write == end_offset) {
+				*empty = true;
+				if (!p_idc)
+					SPS_DBG3(dev,
+						"sps:BAM %pa pipe %d pipe empty checking for ZLT.\n",
+						BAM_ID(dev), pipe_index);
+			} else {
+				*empty = false;
+			}
+		} else {
+			*empty = false;
+		}
+	}
+
+	SPS_DBG1(dev,
+		"sps:%s; pipe index:%d; this pipe is %s empty.\n",
+		__func__, pipe->pipe_index, *empty ? "" : "NOT");
+
+	return 0;
+}
+
+/**
+ * Get number of free slots in a BAM pipe descriptor FIFO
+ *
+ */
+int sps_bam_get_free_count(struct sps_bam *dev, u32 pipe_index,
+				 u32 *count)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+	u32 next_write;
+	u32 free;
+
+	/* Is this a BAM-to-BAM or satellite connection? */
+	if ((pipe->state & (BAM_STATE_BAM2BAM | BAM_STATE_REMOTE))) {
+		SPS_ERR(dev,
+			"sps:Free count on BAM-to-BAM or remote: BAM %pa pipe %d\n",
+			BAM_ID(dev), pipe_index);
+		*count = 0;
+		return SPS_ERROR;
+	}
+
+	/* Determine descriptor FIFO state */
+	next_write = pipe->sys.desc_offset + sizeof(struct sps_iovec);
+	if (next_write >= pipe->desc_size)
+		next_write = 0;
+
+	if (pipe->sys.acked_offset >= next_write)
+		free = pipe->sys.acked_offset - next_write;
+	else
+		free = pipe->desc_size - next_write + pipe->sys.acked_offset;
+
+	free /= sizeof(struct sps_iovec);
+	*count = free;
+
+	return 0;
+}
+
+/**
+ * Set BAM pipe to satellite ownership
+ *
+ */
+int sps_bam_set_satellite(struct sps_bam *dev, u32 pipe_index)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+
+	/*
+	 * Switch to satellite control is only supported on processor
+	 * that controls the BAM global config on multi-EE BAMs
+	 */
+	if ((dev->props.manage & SPS_BAM_MGR_MULTI_EE) == 0 ||
+	    (dev->props.manage & SPS_BAM_MGR_DEVICE_REMOTE)) {
+		SPS_ERR(dev,
+			"sps:Cannot grant satellite control to BAM %pa pipe %d\n",
+			BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+
+	/* Is this pipe locally controlled? */
+	if ((dev->pipe_active_mask & (1UL << pipe_index)) == 0) {
+		SPS_ERR(dev, "sps:BAM %pa pipe %d not local and active\n",
+			BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+
+	/* Disable local interrupts for this pipe */
+	if (!pipe->polled)
+		bam_pipe_set_irq(&dev->base, pipe_index, BAM_DISABLE,
+					 pipe->irq_mask, dev->props.ee);
+
+	if (BAM_VERSION_MTI_SUPPORT(dev->version)) {
+		/*
+		 * Set pipe to MTI interrupt mode.
+		 * Must be performed after IRQ disable,
+		 * because it is necessary to re-enable the IRQ to enable
+		 * MTI generation.
+		 * Set both pipe IRQ mask and MTI dest address to zero.
+		 */
+		if ((pipe->state & BAM_STATE_MTI) == 0 || pipe->polled) {
+			bam_pipe_satellite_mti(&dev->base, pipe_index, 0,
+						       dev->props.ee);
+			pipe->state |= BAM_STATE_MTI;
+		}
+	}
+
+	/* Indicate satellite control */
+	list_del(&pipe->list);
+	dev->pipe_active_mask &= ~(1UL << pipe_index);
+	dev->pipe_remote_mask |= pipe->pipe_index_mask;
+	pipe->state |= BAM_STATE_REMOTE;
+
+	return 0;
+}
+
+/**
+ * Perform BAM pipe timer control
+ *
+ */
+int sps_bam_pipe_timer_ctrl(struct sps_bam *dev,
+			    u32 pipe_index,
+			    struct sps_timer_ctrl *timer_ctrl,
+			    struct sps_timer_result *timer_result)
+{
+	enum bam_pipe_timer_mode mode;
+	int result = 0;
+
+	/* Is this pipe locally controlled? */
+	if ((dev->pipe_active_mask & (1UL << pipe_index)) == 0) {
+		SPS_ERR(dev, "sps:BAM %pa pipe %d not local and active\n",
+			BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+
+	/* Perform the timer operation */
+	switch (timer_ctrl->op) {
+	case SPS_TIMER_OP_CONFIG:
+		mode = (timer_ctrl->mode == SPS_TIMER_MODE_ONESHOT) ?
+			BAM_PIPE_TIMER_ONESHOT :
+			BAM_PIPE_TIMER_PERIODIC;
+		bam_pipe_timer_config(&dev->base, pipe_index, mode,
+				    timer_ctrl->timeout_msec * 8);
+		break;
+	case SPS_TIMER_OP_RESET:
+		bam_pipe_timer_reset(&dev->base, pipe_index);
+		break;
+	case SPS_TIMER_OP_READ:
+		break;
+	default:
+		result = SPS_ERROR;
+		break;
+	}
+
+	/* Provide the current timer value */
+	if (timer_result != NULL)
+		timer_result->current_timer =
+			bam_pipe_timer_get_count(&dev->base, pipe_index);
+
+	return result;
+}
+
+/**
+ * Get the number of unused descriptors in the descriptor FIFO
+ * of a pipe
+ */
+int sps_bam_pipe_get_unused_desc_num(struct sps_bam *dev, u32 pipe_index,
+					u32 *desc_num)
+{
+	u32 sw_offset, peer_offset, fifo_size;
+	u32 desc_size = sizeof(struct sps_iovec);
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+
+	if (pipe == NULL)
+		return SPS_ERROR;
+
+	fifo_size = pipe->desc_size;
+
+	sw_offset = bam_pipe_get_desc_read_offset(&dev->base, pipe_index);
+	if ((dev->props.options & SPS_BAM_CACHED_WP) &&
+		!(pipe->state & BAM_STATE_BAM2BAM)) {
+		peer_offset = pipe->sys.desc_offset;
+		SPS_DBG(dev,
+			"sps:BAM %pa pipe %d: peer offset in cache:0x%x\n",
+			BAM_ID(dev), pipe_index, peer_offset);
+	} else {
+		peer_offset = bam_pipe_get_desc_write_offset(&dev->base,
+				pipe_index);
+	}
+
+	if (sw_offset <= peer_offset)
+		*desc_num = (peer_offset - sw_offset) / desc_size;
+	else
+		*desc_num = (peer_offset + fifo_size - sw_offset) / desc_size;
+
+	return 0;
+}
+
+/*
+ * Check if a pipe of a BAM has any pending descriptor
+ */
+bool sps_bam_pipe_pending_desc(struct sps_bam *dev, u32 pipe_index)
+{
+	u32 sw_offset, peer_offset;
+
+	sw_offset = bam_pipe_get_desc_read_offset(&dev->base, pipe_index);
+	peer_offset = bam_pipe_get_desc_write_offset(&dev->base, pipe_index);
+
+	if (sw_offset == peer_offset)
+		return false;
+	else
+		return true;
+}
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./sps/sps_bam.h linux-4.4.115-fbx/drivers/platform/msm/sps/sps_bam.h
--- linux-4.4.115-fbx/drivers/platform/msm./sps/sps_bam.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/sps/sps_bam.h	2019-01-22 16:16:26.187270712 +0100
@@ -0,0 +1,616 @@
+/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Function and data structure declarations for SPS BAM handling.
+ */
+
+
+#ifndef _SPSBAM_H_
+#define _SPSBAM_H_
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+
+#include "spsi.h"
+
+#define BAM_HANDLE_INVALID         0
+
+#define to_sps_bam_dev(x) \
+	container_of((x), struct sps_bam, base)
+
+enum bam_irq {
+	BAM_DEV_IRQ_RDY_TO_SLEEP = 0x00000001,
+	BAM_DEV_IRQ_HRESP_ERROR = 0x00000002,
+	BAM_DEV_IRQ_ERROR = 0x00000004,
+	BAM_DEV_IRQ_TIMER = 0x00000010,
+};
+
+/* Pipe interrupt mask */
+enum bam_pipe_irq {
+	/* BAM finishes descriptor which has INT bit selected */
+	BAM_PIPE_IRQ_DESC_INT = 0x00000001,
+	/* Inactivity timer Expires */
+	BAM_PIPE_IRQ_TIMER = 0x00000002,
+	/* Wakeup peripheral (i.e. USB) */
+	BAM_PIPE_IRQ_WAKE = 0x00000004,
+	/* Producer - no free space for adding a descriptor */
+	/* Consumer - no descriptors for processing */
+	BAM_PIPE_IRQ_OUT_OF_DESC = 0x00000008,
+	/* Pipe Error interrupt */
+	BAM_PIPE_IRQ_ERROR = 0x00000010,
+	/* End-Of-Transfer */
+	BAM_PIPE_IRQ_EOT = 0x00000020,
+	/* Pipe RESET unsuccessful */
+	BAM_PIPE_IRQ_RST_ERROR = 0x00000040,
+	/* Errorneous Hresponse by AHB MASTER */
+	BAM_PIPE_IRQ_HRESP_ERROR = 0x00000080,
+};
+
+/* Halt Type */
+enum bam_halt {
+	BAM_HALT_OFF = 0,
+	BAM_HALT_ON = 1,
+};
+
+/* Threshold values of the DMA channels */
+enum bam_dma_thresh_dma {
+	BAM_DMA_THRESH_512 = 0x3,
+	BAM_DMA_THRESH_256 = 0x2,
+	BAM_DMA_THRESH_128 = 0x1,
+	BAM_DMA_THRESH_64 = 0x0,
+};
+
+/* Weight values of the DMA channels */
+enum bam_dma_weight_dma {
+	BAM_DMA_WEIGHT_HIGH = 7,
+	BAM_DMA_WEIGHT_MED = 3,
+	BAM_DMA_WEIGHT_LOW = 1,
+	BAM_DMA_WEIGHT_DEFAULT = BAM_DMA_WEIGHT_LOW,
+	BAM_DMA_WEIGHT_DISABLE = 0,
+};
+
+
+/* Invalid pipe index value */
+#define SPS_BAM_PIPE_INVALID  ((u32)(-1))
+
+/* Parameters for sps_bam_pipe_connect() */
+struct sps_bam_connect_param {
+	/* which end point must be initialized */
+	enum sps_mode mode;
+
+	/* OR'd connection end point options (see SPS_O defines) */
+	u32 options;
+
+	/* SETPEND/MTI interrupt generation parameters */
+	u32 irq_gen_addr;
+	u32 irq_gen_data;
+
+};
+
+/* Event registration struct */
+struct sps_bam_event_reg {
+	/* Client's event object handle */
+	struct completion *xfer_done;
+	void (*callback)(struct sps_event_notify *notify);
+
+	/* Event trigger mode */
+	enum sps_trigger mode;
+
+	/* User pointer that will be provided in event payload data */
+	void *user;
+
+};
+
+/* Descriptor FIFO cache entry */
+struct sps_bam_desc_cache {
+	struct sps_iovec iovec;
+	void *user; /* User pointer registered with this transfer */
+};
+
+/* Forward declaration */
+struct sps_bam;
+
+/* System mode control */
+struct sps_bam_sys_mode {
+	/* Descriptor FIFO control */
+	u8 *desc_buf; /* Descriptor FIFO for BAM pipe */
+	u32 desc_offset; /* Next new descriptor to be written to hardware */
+	u32 acked_offset; /* Next descriptor to be retired by software */
+
+	/* Descriptor cache control (!no_queue only) */
+	u8 *desc_cache; /* Software cache of descriptor FIFO contents */
+	u32 cache_offset; /* Next descriptor to be cached (ack_xfers only) */
+
+	/* User pointers associated with cached descriptors */
+	void **user_ptrs;
+
+	/* Event handling */
+	struct sps_bam_event_reg event_regs[SPS_EVENT_INDEX(SPS_EVENT_MAX)];
+	struct list_head events_q;
+
+	struct sps_q_event event;	/* Temp storage for event creation */
+	int no_queue;	/* Whether events are queued */
+	int ack_xfers;	/* Whether client must ACK all descriptors */
+	int handler_eot; /* Whether EOT handling is in progress (debug) */
+
+	/* Statistics */
+#ifdef SPS_BAM_STATISTICS
+	u32 desc_wr_count;
+	u32 desc_rd_count;
+	u32 user_ptrs_count;
+	u32 user_found;
+	u32 int_flags;
+	u32 eot_flags;
+	u32 callback_events;
+	u32 wait_events;
+	u32 queued_events;
+	u32 get_events;
+	u32 get_iovecs;
+#endif /* SPS_BAM_STATISTICS */
+};
+
+/* BAM pipe descriptor */
+struct sps_pipe {
+	struct list_head list;
+
+	/* Client state */
+	u32 client_state;
+	struct sps_bam *bam;
+	struct sps_connect connect;
+	const struct sps_connection *map;
+
+	/* Pipe parameters */
+	u32 state;
+	u32 pipe_index;
+	u32 pipe_index_mask;
+	u32 irq_mask;
+	int polled;
+	int hybrid;
+	bool late_eot;
+	u32 irq_gen_addr;
+	enum sps_mode mode;
+	u32 num_descs; /* Size (number of elements) of descriptor FIFO */
+	u32 desc_size; /* Size (bytes) of descriptor FIFO */
+	int wake_up_is_one_shot; /* Whether WAKEUP event is a one-shot or not */
+
+	/* System mode control */
+	struct sps_bam_sys_mode sys;
+
+	bool disconnecting;
+};
+
+/* BAM device descriptor */
+struct sps_bam {
+	struct list_head list;
+
+	/* BAM device properties, including connection defaults */
+	struct sps_bam_props props;
+
+	/* BAM device state */
+	u32 state;
+	struct mutex lock;
+	void *base; /* BAM virtual base address */
+	u32 version;
+	spinlock_t isr_lock;
+	spinlock_t connection_lock;
+	unsigned long irqsave_flags;
+
+	/* Pipe state */
+	u32 pipe_active_mask;
+	u32 pipe_remote_mask;
+	struct sps_pipe *pipes[BAM_MAX_PIPES];
+	struct list_head pipes_q;
+
+	/* Statistics */
+	u32 irq_from_disabled_pipe;
+	u32 event_trigger_failures;
+
+	void *ipc_log0;
+	void *ipc_log1;
+	void *ipc_log2;
+	void *ipc_log3;
+	void *ipc_log4;
+
+	u32 ipc_loglevel;
+
+	/* Desc cache pointers */
+	u8 *desc_cache_pointers[BAM_MAX_PIPES];
+};
+
+/**
+ * BAM driver initialization
+ *
+ * This function initializes the BAM driver.
+ *
+ * @options - driver options bitflags (see SPS_OPT_*)
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_driver_init(u32 options);
+
+/**
+ * BAM device initialization
+ *
+ * This function initializes a BAM device.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_device_init(struct sps_bam *dev);
+
+/**
+ * BAM device de-initialization
+ *
+ * This function de-initializes a BAM device.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_device_de_init(struct sps_bam *dev);
+
+/**
+ * BAM device reset
+ *
+ * This Function resets a BAM device.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_reset(struct sps_bam *dev);
+
+/**
+ * BAM device enable
+ *
+ * This function enables a BAM device.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_enable(struct sps_bam *dev);
+
+/**
+ * BAM device disable
+ *
+ * This Function disables a BAM device.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_disable(struct sps_bam *dev);
+
+/**
+ * Allocate a BAM pipe
+ *
+ * This function allocates a BAM pipe.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - client-specified pipe index, or SPS_BAM_PIPE_INVALID if
+ *    any available pipe is acceptable
+ *
+ * @return - allocated pipe index, or SPS_BAM_PIPE_INVALID on error
+ *
+ */
+u32 sps_bam_pipe_alloc(struct sps_bam *dev, u32 pipe_index);
+
+/**
+ * Free a BAM pipe
+ *
+ * This function frees a BAM pipe.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ */
+void sps_bam_pipe_free(struct sps_bam *dev, u32 pipe_index);
+
+/**
+ * Establish BAM pipe connection
+ *
+ * This function establishes a connection for a BAM pipe (end point).
+ *
+ * @client - pointer to client pipe state struct
+ *
+ * @params - connection parameters
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_connect(struct sps_pipe *client,
+			const struct sps_bam_connect_param *params);
+
+/**
+ * Disconnect a BAM pipe connection
+ *
+ * This function disconnects a connection for a BAM pipe (end point).
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_disconnect(struct sps_bam *dev, u32 pipe_index);
+
+/**
+ * Set BAM pipe parameters
+ *
+ * This function sets parameters for a BAM pipe.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @options - bitflag options (see SPS_O_*)
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_set_params(struct sps_bam *dev, u32 pipe_index, u32 options);
+
+/**
+ * Enable a BAM pipe
+ *
+ * This function enables a BAM pipe.  Note that this function
+ *    is separate from the pipe connect function to allow proper
+ *    sequencing of consumer enable followed by producer enable.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_enable(struct sps_bam *dev, u32 pipe_index);
+
+/**
+ * Disable a BAM pipe
+ *
+ * This function disables a BAM pipe.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_disable(struct sps_bam *dev, u32 pipe_index);
+
+/**
+ * Register an event for a BAM pipe
+ *
+ * This function registers an event for a BAM pipe.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @reg - pointer to event registration struct
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_reg_event(struct sps_bam *dev, u32 pipe_index,
+			   struct sps_register_event *reg);
+
+/**
+ * Submit a transfer of a single buffer to a BAM pipe
+ *
+ * This function submits a transfer of a single buffer to a BAM pipe.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @addr - physical address of buffer to transfer
+ *
+ * @size - number of bytes to transfer
+ *
+ * @user - user pointer to register for event
+ *
+ * @flags - descriptor flags (see SPS_IOVEC_FLAG defines)
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_transfer_one(struct sps_bam *dev, u32 pipe_index, u32 addr,
+			      u32 size, void *user, u32 flags);
+
+/**
+ * Submit a transfer to a BAM pipe
+ *
+ * This function submits a transfer to a BAM pipe.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @transfer - pointer to transfer struct
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_transfer(struct sps_bam *dev, u32 pipe_index,
+			 struct sps_transfer *transfer);
+
+/**
+ * Get a BAM pipe event
+ *
+ * This function polls for a BAM pipe event.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @notify - pointer to event notification struct
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_get_event(struct sps_bam *dev, u32 pipe_index,
+			   struct sps_event_notify *notify);
+
+/**
+ * Get processed I/O vector
+ *
+ * This function fetches the next processed I/O vector.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @iovec - Pointer to I/O vector struct (output).
+ *   This struct will be zeroed if there are no more processed I/O vectors.
+ *
+ * @return 0 on success, negative value on error
+ */
+int sps_bam_pipe_get_iovec(struct sps_bam *dev, u32 pipe_index,
+			   struct sps_iovec *iovec);
+
+/**
+ * Determine whether a BAM pipe descriptor FIFO is empty
+ *
+ * This function returns the empty state of a BAM pipe descriptor FIFO.
+ *
+ * The pipe mutex must be locked before calling this function.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @empty - pointer to client's empty status word (boolean)
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_is_empty(struct sps_bam *dev, u32 pipe_index, u32 *empty);
+
+/**
+ * Get number of free slots in a BAM pipe descriptor FIFO
+ *
+ * This function returns the number of free slots in a BAM pipe descriptor FIFO.
+ *
+ * The pipe mutex must be locked before calling this function.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @count - pointer to count status
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_get_free_count(struct sps_bam *dev, u32 pipe_index, u32 *count);
+
+/**
+ * Set BAM pipe to satellite ownership
+ *
+ * This function sets the BAM pipe to satellite ownership.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_set_satellite(struct sps_bam *dev, u32 pipe_index);
+
+/**
+ * Perform BAM pipe timer control
+ *
+ * This function performs BAM pipe timer control operations.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @timer_ctrl - Pointer to timer control specification
+ *
+ * @timer_result - Pointer to buffer for timer operation result.
+ *    This argument can be NULL if no result is expected for the operation.
+ *    If non-NULL, the current timer value will always provided.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_timer_ctrl(struct sps_bam *dev, u32 pipe_index,
+			    struct sps_timer_ctrl *timer_ctrl,
+			    struct sps_timer_result *timer_result);
+
+
+/**
+ * Get the number of unused descriptors in the descriptor FIFO
+ * of a pipe
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @desc_num - number of unused descriptors
+ *
+ */
+int sps_bam_pipe_get_unused_desc_num(struct sps_bam *dev, u32 pipe_index,
+					u32 *desc_num);
+
+/*
+ * sps_bam_check_irq - check IRQ of a BAM device.
+ * @dev - pointer to BAM device descriptor
+ *
+ * This function checks any pending interrupt of a BAM device.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int sps_bam_check_irq(struct sps_bam *dev);
+
+/*
+ * sps_bam_pipe_pending_desc - checking pending descriptor.
+ * @dev:	BAM device handle
+ * @pipe_index:	pipe index
+ *
+ * This function checks if a pipe of a BAM has any pending descriptor.
+ *
+ * @return true if there is any desc pending
+ */
+bool sps_bam_pipe_pending_desc(struct sps_bam *dev, u32 pipe_index);
+
+/*
+ * sps_bam_pipe_inject_zlt - inject a ZLT with EOT.
+ * @dev:	BAM device handle
+ * @pipe_index:	pipe index
+ *
+ * This function injects a ZLT with EOT for a pipe of a BAM.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int sps_bam_pipe_inject_zlt(struct sps_bam *dev, u32 pipe_index);
+#endif	/* _SPSBAM_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./sps/sps.c linux-4.4.115-fbx/drivers/platform/msm/sps/sps.c
--- linux-4.4.115-fbx/drivers/platform/msm./sps/sps.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/sps/sps.c	2019-10-29 09:26:24.617212710 +0100
@@ -0,0 +1,3066 @@
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* Smart-Peripheral-Switch (SPS) Module. */
+
+#include <linux/types.h>	/* u32 */
+#include <linux/kernel.h>	/* pr_info() */
+#include <linux/module.h>	/* module_init() */
+#include <linux/slab.h>		/* kzalloc() */
+#include <linux/mutex.h>	/* mutex */
+#include <linux/device.h>	/* device */
+#include <linux/fs.h>		/* alloc_chrdev_region() */
+#include <linux/list.h>		/* list_head */
+#include <linux/memory.h>	/* memset */
+#include <linux/io.h>		/* ioremap() */
+#include <linux/clk.h>		/* clk_enable() */
+#include <linux/platform_device.h>	/* platform_get_resource_byname() */
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include "sps_bam.h"
+#include "spsi.h"
+#include "sps_core.h"
+
+#define SPS_DRV_NAME "msm_sps"	/* must match the platform_device name */
+
+/**
+ *  SPS driver state
+ */
+struct sps_drv *sps;
+
+u32 d_type;
+bool enhd_pipe;
+bool imem;
+enum sps_bam_type bam_type;
+enum sps_bam_type bam_types[] = {SPS_BAM_LEGACY, SPS_BAM_NDP, SPS_BAM_NDP_4K};
+
+static void sps_device_de_init(void);
+
+#ifdef CONFIG_DEBUG_FS
+u8 debugfs_record_enabled;
+u8 logging_option;
+u8 debug_level_option;
+u8 print_limit_option;
+u8 reg_dump_option;
+u32 testbus_sel;
+u32 bam_pipe_sel;
+u32 desc_option;
+/**
+ * Specifies range of log level from level 0 to level 3 to have fine-granularity for logging
+ * to serve all BAM use cases.
+ */
+u32 log_level_sel;
+
+static char *debugfs_buf;
+static u32 debugfs_buf_size;
+static u32 debugfs_buf_used;
+static int wraparound;
+static struct mutex sps_debugfs_lock;
+
+struct dentry *dent;
+struct dentry *dfile_info;
+struct dentry *dfile_logging_option;
+struct dentry *dfile_debug_level_option;
+struct dentry *dfile_print_limit_option;
+struct dentry *dfile_reg_dump_option;
+struct dentry *dfile_testbus_sel;
+struct dentry *dfile_bam_pipe_sel;
+struct dentry *dfile_desc_option;
+struct dentry *dfile_bam_addr;
+struct dentry *dfile_log_level_sel;
+
+static struct sps_bam *phy2bam(phys_addr_t phys_addr);
+
+/* record debug info for debugfs */
+void sps_debugfs_record(const char *msg)
+{
+	mutex_lock(&sps_debugfs_lock);
+	if (debugfs_record_enabled) {
+		if (debugfs_buf_used + MAX_MSG_LEN >= debugfs_buf_size) {
+			debugfs_buf_used = 0;
+			wraparound = true;
+		}
+		debugfs_buf_used += scnprintf(debugfs_buf + debugfs_buf_used,
+				debugfs_buf_size - debugfs_buf_used, msg);
+
+		if (wraparound)
+			scnprintf(debugfs_buf + debugfs_buf_used,
+					debugfs_buf_size - debugfs_buf_used,
+					"\n**** end line of sps log ****\n\n");
+	}
+	mutex_unlock(&sps_debugfs_lock);
+}
+
+/* read the recorded debug info to userspace */
+static ssize_t sps_read_info(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int ret = 0;
+	int size;
+
+	mutex_lock(&sps_debugfs_lock);
+	if (debugfs_record_enabled) {
+		if (wraparound)
+			size = debugfs_buf_size - MAX_MSG_LEN;
+		else
+			size = debugfs_buf_used;
+
+		ret = simple_read_from_buffer(ubuf, count, ppos,
+				debugfs_buf, size);
+	}
+	mutex_unlock(&sps_debugfs_lock);
+
+	return ret;
+}
+
+/*
+ * set the buffer size (in KB) for debug info
+ */
+static ssize_t sps_set_info(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	char str[MAX_MSG_LEN];
+	int i;
+	u32 buf_size_kb = 0;
+	u32 new_buf_size;
+	u32 size = sizeof(str) < count ? sizeof(str) : count;
+
+	memset(str, 0, sizeof(str));
+	missing = copy_from_user(str, buf, size);
+	if (missing)
+		return -EFAULT;
+
+	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+		buf_size_kb = (buf_size_kb * 10) + (str[i] - '0');
+
+	pr_info("sps:debugfs: input buffer size is %dKB\n", buf_size_kb);
+
+	if ((logging_option == 0) || (logging_option == 2)) {
+		pr_info("sps:debugfs: need to first turn on recording.\n");
+		return -EFAULT;
+	}
+
+	if (buf_size_kb < 1) {
+		pr_info("sps:debugfs: buffer size should be "
+			"no less than 1KB.\n");
+		return -EFAULT;
+	}
+
+	if (buf_size_kb > (INT_MAX/SZ_1K)) {
+		pr_err("sps:debugfs: buffer size is too large\n");
+		return -EFAULT;
+	}
+
+	new_buf_size = buf_size_kb * SZ_1K;
+
+	mutex_lock(&sps_debugfs_lock);
+	if (debugfs_record_enabled) {
+		if (debugfs_buf_size == new_buf_size) {
+			/* need do nothing */
+			pr_info("sps:debugfs: input buffer size "
+				"is the same as before.\n");
+			mutex_unlock(&sps_debugfs_lock);
+			return count;
+		} else {
+			/* release the current buffer */
+			debugfs_record_enabled = false;
+			debugfs_buf_used = 0;
+			wraparound = false;
+			kfree(debugfs_buf);
+			debugfs_buf = NULL;
+		}
+	}
+
+	/* allocate new buffer */
+	debugfs_buf_size = new_buf_size;
+
+	debugfs_buf = kzalloc(sizeof(char) * debugfs_buf_size,
+			GFP_KERNEL);
+	if (!debugfs_buf) {
+		debugfs_buf_size = 0;
+		pr_err("sps:fail to allocate memory for debug_fs.\n");
+		mutex_unlock(&sps_debugfs_lock);
+		return -ENOMEM;
+	}
+
+	debugfs_buf_used = 0;
+	wraparound = false;
+	debugfs_record_enabled = true;
+	mutex_unlock(&sps_debugfs_lock);
+
+	return count;
+}
+
+const struct file_operations sps_info_ops = {
+	.read = sps_read_info,
+	.write = sps_set_info,
+};
+
+/* return the current logging option to userspace */
+static ssize_t sps_read_logging_option(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	char value[MAX_MSG_LEN];
+	int nbytes;
+
+	nbytes = snprintf(value, MAX_MSG_LEN, "%d\n", logging_option);
+
+	return simple_read_from_buffer(ubuf, count, ppos, value, nbytes);
+}
+
+/*
+ * set the logging option
+ */
+static ssize_t sps_set_logging_option(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	char str[MAX_MSG_LEN];
+	int i;
+	u8 option = 0;
+	u32 size = sizeof(str) < count ? sizeof(str) : count;
+
+	memset(str, 0, sizeof(str));
+	missing = copy_from_user(str, buf, size);
+	if (missing)
+		return -EFAULT;
+
+	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+		option = (option * 10) + (str[i] - '0');
+
+	pr_info("sps:debugfs: try to change logging option to %d\n", option);
+
+	if (option > 3) {
+		pr_err("sps:debugfs: invalid logging option:%d\n", option);
+		return count;
+	}
+
+	mutex_lock(&sps_debugfs_lock);
+	if (((option == 0) || (option == 2)) &&
+		((logging_option == 1) || (logging_option == 3))) {
+		debugfs_record_enabled = false;
+		kfree(debugfs_buf);
+		debugfs_buf = NULL;
+		debugfs_buf_used = 0;
+		debugfs_buf_size = 0;
+		wraparound = false;
+	}
+
+	logging_option = option;
+	mutex_unlock(&sps_debugfs_lock);
+
+	return count;
+}
+
+const struct file_operations sps_logging_option_ops = {
+	.read = sps_read_logging_option,
+	.write = sps_set_logging_option,
+};
+
+/*
+ * input the bam physical address
+ */
+static ssize_t sps_set_bam_addr(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	char str[MAX_MSG_LEN];
+	u32 i;
+	u32 bam_addr = 0;
+	struct sps_bam *bam;
+	u32 num_pipes = 0;
+	void *vir_addr;
+	u32 size = sizeof(str) < count ? sizeof(str) : count;
+
+	memset(str, 0, sizeof(str));
+	missing = copy_from_user(str, buf, size);
+	if (missing)
+		return -EFAULT;
+
+	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+		bam_addr = (bam_addr * 10) + (str[i] - '0');
+
+	pr_info("sps:debugfs:input BAM physical address:0x%x\n", bam_addr);
+
+	bam = phy2bam(bam_addr);
+
+	if (bam == NULL) {
+		pr_err("sps:debugfs:BAM 0x%x is not registered.", bam_addr);
+		return count;
+	} else {
+		vir_addr = &bam->base;
+		num_pipes = bam->props.num_pipes;
+		if (log_level_sel <= SPS_IPC_MAX_LOGLEVEL)
+			bam->ipc_loglevel = log_level_sel;
+	}
+
+	switch (reg_dump_option) {
+	case 1: /* output all registers of this BAM */
+		print_bam_reg(bam->base);
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_reg(bam->base, i);
+		break;
+	case 2: /* output BAM-level registers */
+		print_bam_reg(bam->base);
+		break;
+	case 3: /* output selected BAM-level registers */
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		break;
+	case 4: /* output selected registers of all pipes */
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_selected_reg(vir_addr, i);
+		break;
+	case 5: /* output selected registers of selected pipes */
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i))
+				print_bam_pipe_selected_reg(vir_addr, i);
+		break;
+	case 6: /* output selected registers of typical pipes */
+		print_bam_pipe_selected_reg(vir_addr, 4);
+		print_bam_pipe_selected_reg(vir_addr, 5);
+		break;
+	case 7: /* output desc FIFO of all pipes */
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_desc_fifo(vir_addr, i, 0);
+		break;
+	case 8: /* output desc FIFO of selected pipes */
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i))
+				print_bam_pipe_desc_fifo(vir_addr, i, 0);
+		break;
+	case 9: /* output desc FIFO of typical pipes */
+		print_bam_pipe_desc_fifo(vir_addr, 4, 0);
+		print_bam_pipe_desc_fifo(vir_addr, 5, 0);
+		break;
+	case 10: /* output selected registers and desc FIFO of all pipes */
+		for (i = 0; i < num_pipes; i++) {
+			print_bam_pipe_selected_reg(vir_addr, i);
+			print_bam_pipe_desc_fifo(vir_addr, i, 0);
+		}
+		break;
+	case 11: /* output selected registers and desc FIFO of selected pipes */
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i)) {
+				print_bam_pipe_selected_reg(vir_addr, i);
+				print_bam_pipe_desc_fifo(vir_addr, i, 0);
+			}
+		break;
+	case 12: /* output selected registers and desc FIFO of typical pipes */
+		print_bam_pipe_selected_reg(vir_addr, 4);
+		print_bam_pipe_desc_fifo(vir_addr, 4, 0);
+		print_bam_pipe_selected_reg(vir_addr, 5);
+		print_bam_pipe_desc_fifo(vir_addr, 5, 0);
+		break;
+	case 13: /* output BAM_TEST_BUS_REG */
+		if (testbus_sel)
+			print_bam_test_bus_reg(vir_addr, testbus_sel);
+		else {
+			pr_info("sps:output TEST_BUS_REG for all TEST_BUS_SEL");
+			print_bam_test_bus_reg(vir_addr, testbus_sel);
+		}
+		break;
+	case 14: /* output partial desc FIFO of selected pipes */
+		if (desc_option == 0)
+			desc_option = 1;
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i))
+				print_bam_pipe_desc_fifo(vir_addr, i,
+							desc_option);
+		break;
+	case 15: /* output partial data blocks of descriptors */
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i))
+				print_bam_pipe_desc_fifo(vir_addr, i, 100);
+		break;
+	case 16: /* output all registers of selected pipes */
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i))
+				print_bam_pipe_reg(bam->base, i);
+		break;
+	case 91: /* output testbus register, BAM global regisers
+			and registers of all pipes */
+		print_bam_test_bus_reg(vir_addr, testbus_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_selected_reg(vir_addr, i);
+		break;
+	case 92: /* output testbus register, BAM global regisers
+			and registers of selected pipes */
+		print_bam_test_bus_reg(vir_addr, testbus_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i))
+				print_bam_pipe_selected_reg(vir_addr, i);
+		break;
+	case 93: /* output registers and partial desc FIFOs
+			of selected pipes: format 1 */
+		if (desc_option == 0)
+			desc_option = 1;
+		print_bam_test_bus_reg(vir_addr, testbus_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i))
+				print_bam_pipe_selected_reg(vir_addr, i);
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i))
+				print_bam_pipe_desc_fifo(vir_addr, i,
+							desc_option);
+		break;
+	case 94: /* output registers and partial desc FIFOs
+			of selected pipes: format 2 */
+		if (desc_option == 0)
+			desc_option = 1;
+		print_bam_test_bus_reg(vir_addr, testbus_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i)) {
+				print_bam_pipe_selected_reg(vir_addr, i);
+				print_bam_pipe_desc_fifo(vir_addr, i,
+							desc_option);
+			}
+		break;
+	case 95: /* output registers and desc FIFOs
+			of selected pipes: format 1 */
+		print_bam_test_bus_reg(vir_addr, testbus_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i))
+				print_bam_pipe_selected_reg(vir_addr, i);
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i))
+				print_bam_pipe_desc_fifo(vir_addr, i, 0);
+		break;
+	case 96: /* output registers and desc FIFOs
+			of selected pipes: format 2 */
+		print_bam_test_bus_reg(vir_addr, testbus_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i)) {
+				print_bam_pipe_selected_reg(vir_addr, i);
+				print_bam_pipe_desc_fifo(vir_addr, i, 0);
+			}
+		break;
+	case 97: /* output registers, desc FIFOs and partial data blocks
+			of selected pipes: format 1 */
+		print_bam_test_bus_reg(vir_addr, testbus_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i))
+				print_bam_pipe_selected_reg(vir_addr, i);
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i))
+				print_bam_pipe_desc_fifo(vir_addr, i, 0);
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i))
+				print_bam_pipe_desc_fifo(vir_addr, i, 100);
+		break;
+	case 98: /* output registers, desc FIFOs and partial data blocks
+			of selected pipes: format 2 */
+		print_bam_test_bus_reg(vir_addr, testbus_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i)) {
+				print_bam_pipe_selected_reg(vir_addr, i);
+				print_bam_pipe_desc_fifo(vir_addr, i, 0);
+				print_bam_pipe_desc_fifo(vir_addr, i, 100);
+			}
+		break;
+	case 99: /* output all registers, desc FIFOs and partial data blocks */
+		print_bam_test_bus_reg(vir_addr, testbus_sel);
+		print_bam_reg(bam->base);
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_reg(bam->base, i);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_selected_reg(vir_addr, i);
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_desc_fifo(vir_addr, i, 0);
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_desc_fifo(vir_addr, i, 100);
+		break;
+	default:
+		pr_info("sps:no dump option is chosen yet.");
+	}
+
+	return count;
+}
+
+const struct file_operations sps_bam_addr_ops = {
+	.write = sps_set_bam_addr,
+};
+
+static void sps_debugfs_init(void)
+{
+	debugfs_record_enabled = false;
+	logging_option = 0;
+	debug_level_option = 0;
+	print_limit_option = 0;
+	reg_dump_option = 0;
+	testbus_sel = 0;
+	bam_pipe_sel = 0;
+	desc_option = 0;
+	debugfs_buf_size = 0;
+	debugfs_buf_used = 0;
+	wraparound = false;
+	log_level_sel = SPS_IPC_MAX_LOGLEVEL + 1;
+
+	dent = debugfs_create_dir("sps", 0);
+	if (IS_ERR(dent)) {
+		pr_err("sps:fail to create the folder for debug_fs.\n");
+		return;
+	}
+
+	dfile_info = debugfs_create_file("info", 0664, dent, 0,
+			&sps_info_ops);
+	if (!dfile_info || IS_ERR(dfile_info)) {
+		pr_err("sps:fail to create the file for debug_fs info.\n");
+		goto info_err;
+	}
+
+	dfile_logging_option = debugfs_create_file("logging_option", 0664,
+			dent, 0, &sps_logging_option_ops);
+	if (!dfile_logging_option || IS_ERR(dfile_logging_option)) {
+		pr_err("sps:fail to create the file for debug_fs "
+			"logging_option.\n");
+		goto logging_option_err;
+	}
+
+	dfile_debug_level_option = debugfs_create_u8("debug_level_option",
+					0664, dent, &debug_level_option);
+	if (!dfile_debug_level_option || IS_ERR(dfile_debug_level_option)) {
+		pr_err("sps:fail to create the file for debug_fs "
+			"debug_level_option.\n");
+		goto debug_level_option_err;
+	}
+
+	dfile_print_limit_option = debugfs_create_u8("print_limit_option",
+					0664, dent, &print_limit_option);
+	if (!dfile_print_limit_option || IS_ERR(dfile_print_limit_option)) {
+		pr_err("sps:fail to create the file for debug_fs "
+			"print_limit_option.\n");
+		goto print_limit_option_err;
+	}
+
+	dfile_reg_dump_option = debugfs_create_u8("reg_dump_option", 0664,
+						dent, &reg_dump_option);
+	if (!dfile_reg_dump_option || IS_ERR(dfile_reg_dump_option)) {
+		pr_err("sps:fail to create the file for debug_fs "
+			"reg_dump_option.\n");
+		goto reg_dump_option_err;
+	}
+
+	dfile_testbus_sel = debugfs_create_u32("testbus_sel", 0664,
+						dent, &testbus_sel);
+	if (!dfile_testbus_sel || IS_ERR(dfile_testbus_sel)) {
+		pr_err("sps:fail to create debug_fs file for testbus_sel.\n");
+		goto testbus_sel_err;
+	}
+
+	dfile_bam_pipe_sel = debugfs_create_u32("bam_pipe_sel", 0664,
+						dent, &bam_pipe_sel);
+	if (!dfile_bam_pipe_sel || IS_ERR(dfile_bam_pipe_sel)) {
+		pr_err("sps:fail to create debug_fs file for bam_pipe_sel.\n");
+		goto bam_pipe_sel_err;
+	}
+
+	dfile_desc_option = debugfs_create_u32("desc_option", 0664,
+						dent, &desc_option);
+	if (!dfile_desc_option || IS_ERR(dfile_desc_option)) {
+		pr_err("sps:fail to create debug_fs file for desc_option.\n");
+		goto desc_option_err;
+	}
+
+	dfile_bam_addr = debugfs_create_file("bam_addr", 0664,
+			dent, 0, &sps_bam_addr_ops);
+	if (!dfile_bam_addr || IS_ERR(dfile_bam_addr)) {
+		pr_err("sps:fail to create the file for debug_fs "
+			"bam_addr.\n");
+		goto bam_addr_err;
+	}
+
+	dfile_log_level_sel = debugfs_create_u32("log_level_sel", 0664,
+						dent, &log_level_sel);
+	if (!dfile_log_level_sel || IS_ERR(dfile_log_level_sel)) {
+		pr_err("sps:fail to create debug_fs file for log_level_sel.\n");
+		goto bam_log_level_err;
+	}
+
+	mutex_init(&sps_debugfs_lock);
+
+	return;
+
+bam_log_level_err:
+	debugfs_remove(dfile_bam_addr);
+bam_addr_err:
+	debugfs_remove(dfile_desc_option);
+desc_option_err:
+	debugfs_remove(dfile_bam_pipe_sel);
+bam_pipe_sel_err:
+	debugfs_remove(dfile_testbus_sel);
+testbus_sel_err:
+	debugfs_remove(dfile_reg_dump_option);
+reg_dump_option_err:
+	debugfs_remove(dfile_print_limit_option);
+print_limit_option_err:
+	debugfs_remove(dfile_debug_level_option);
+debug_level_option_err:
+	debugfs_remove(dfile_logging_option);
+logging_option_err:
+	debugfs_remove(dfile_info);
+info_err:
+	debugfs_remove(dent);
+}
+
+static void sps_debugfs_exit(void)
+{
+	if (dfile_info)
+		debugfs_remove(dfile_info);
+	if (dfile_logging_option)
+		debugfs_remove(dfile_logging_option);
+	if (dfile_debug_level_option)
+		debugfs_remove(dfile_debug_level_option);
+	if (dfile_print_limit_option)
+		debugfs_remove(dfile_print_limit_option);
+	if (dfile_reg_dump_option)
+		debugfs_remove(dfile_reg_dump_option);
+	if (dfile_testbus_sel)
+		debugfs_remove(dfile_testbus_sel);
+	if (dfile_bam_pipe_sel)
+		debugfs_remove(dfile_bam_pipe_sel);
+	if (dfile_desc_option)
+		debugfs_remove(dfile_desc_option);
+	if (dfile_bam_addr)
+		debugfs_remove(dfile_bam_addr);
+	if (dent)
+		debugfs_remove(dent);
+	debugfs_remove(dfile_log_level_sel);
+	kfree(debugfs_buf);
+	debugfs_buf = NULL;
+}
+#endif
+
+/* Get the debug info of BAM registers and descriptor FIFOs */
+int sps_get_bam_debug_info(unsigned long dev, u32 option, u32 para,
+		u32 tb_sel, u32 desc_sel)
+{
+	int res = 0;
+	struct sps_bam *bam;
+	u32 i;
+	u32 num_pipes = 0;
+	void *vir_addr;
+
+	if (dev == 0) {
+		SPS_ERR(sps,
+			"sps:%s:device handle should not be 0.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	if (sps == NULL || !sps->is_ready) {
+		SPS_DBG3(sps, "sps:%s:sps driver is not ready.\n", __func__);
+		return -EPROBE_DEFER;
+	}
+
+	mutex_lock(&sps->lock);
+	/* Search for the target BAM device */
+	bam = sps_h2bam(dev);
+	if (bam == NULL) {
+		pr_err("sps:Can't find any BAM with handle 0x%lx.", dev);
+		mutex_unlock(&sps->lock);
+		return SPS_ERROR;
+	}
+	mutex_unlock(&sps->lock);
+
+	vir_addr = &bam->base;
+	num_pipes = bam->props.num_pipes;
+
+	SPS_DUMP("sps:<bam-addr> dump BAM:%pa.\n", &bam->props.phys_addr);
+
+	switch (option) {
+	case 1: /* output all registers of this BAM */
+		print_bam_reg(bam->base);
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_reg(bam->base, i);
+		break;
+	case 2: /* output BAM-level registers */
+		print_bam_reg(bam->base);
+		break;
+	case 3: /* output selected BAM-level registers */
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		break;
+	case 4: /* output selected registers of all pipes */
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_selected_reg(vir_addr, i);
+		break;
+	case 5: /* output selected registers of selected pipes */
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i))
+				print_bam_pipe_selected_reg(vir_addr, i);
+		break;
+	case 6: /* output selected registers of typical pipes */
+		print_bam_pipe_selected_reg(vir_addr, 4);
+		print_bam_pipe_selected_reg(vir_addr, 5);
+		break;
+	case 7: /* output desc FIFO of all pipes */
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_desc_fifo(vir_addr, i, 0);
+		break;
+	case 8: /* output desc FIFO of selected pipes */
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i))
+				print_bam_pipe_desc_fifo(vir_addr, i, 0);
+		break;
+	case 9: /* output desc FIFO of typical pipes */
+		print_bam_pipe_desc_fifo(vir_addr, 4, 0);
+		print_bam_pipe_desc_fifo(vir_addr, 5, 0);
+		break;
+	case 10: /* output selected registers and desc FIFO of all pipes */
+		for (i = 0; i < num_pipes; i++) {
+			print_bam_pipe_selected_reg(vir_addr, i);
+			print_bam_pipe_desc_fifo(vir_addr, i, 0);
+		}
+		break;
+	case 11: /* output selected registers and desc FIFO of selected pipes */
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i)) {
+				print_bam_pipe_selected_reg(vir_addr, i);
+				print_bam_pipe_desc_fifo(vir_addr, i, 0);
+			}
+		break;
+	case 12: /* output selected registers and desc FIFO of typical pipes */
+		print_bam_pipe_selected_reg(vir_addr, 4);
+		print_bam_pipe_desc_fifo(vir_addr, 4, 0);
+		print_bam_pipe_selected_reg(vir_addr, 5);
+		print_bam_pipe_desc_fifo(vir_addr, 5, 0);
+		break;
+	case 13: /* output BAM_TEST_BUS_REG */
+		if (tb_sel)
+			print_bam_test_bus_reg(vir_addr, tb_sel);
+		else
+			pr_info("sps:TEST_BUS_SEL should NOT be zero.");
+		break;
+	case 14: /* output partial desc FIFO of selected pipes */
+		if (desc_sel == 0)
+			desc_sel = 1;
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i))
+				print_bam_pipe_desc_fifo(vir_addr, i,
+							desc_sel);
+		break;
+	case 15: /* output partial data blocks of descriptors */
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i))
+				print_bam_pipe_desc_fifo(vir_addr, i, 100);
+		break;
+	case 16: /* output all registers of selected pipes */
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i))
+				print_bam_pipe_reg(bam->base, i);
+		break;
+	case 91: /* output testbus register, BAM global regisers
+			and registers of all pipes */
+		print_bam_test_bus_reg(vir_addr, tb_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_selected_reg(vir_addr, i);
+		break;
+	case 92: /* output testbus register, BAM global regisers
+			and registers of selected pipes */
+		print_bam_test_bus_reg(vir_addr, tb_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i))
+				print_bam_pipe_selected_reg(vir_addr, i);
+		break;
+	case 93: /* output registers and partial desc FIFOs
+			of selected pipes: format 1 */
+		if (desc_sel == 0)
+			desc_sel = 1;
+		print_bam_test_bus_reg(vir_addr, tb_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i))
+				print_bam_pipe_selected_reg(vir_addr, i);
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i))
+				print_bam_pipe_desc_fifo(vir_addr, i,
+							desc_sel);
+		break;
+	case 94: /* output registers and partial desc FIFOs
+			of selected pipes: format 2 */
+		if (desc_sel == 0)
+			desc_sel = 1;
+		print_bam_test_bus_reg(vir_addr, tb_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i)) {
+				print_bam_pipe_selected_reg(vir_addr, i);
+				print_bam_pipe_desc_fifo(vir_addr, i,
+							desc_sel);
+			}
+		break;
+	case 95: /* output registers and desc FIFOs
+			of selected pipes: format 1 */
+		print_bam_test_bus_reg(vir_addr, tb_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i))
+				print_bam_pipe_selected_reg(vir_addr, i);
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i))
+				print_bam_pipe_desc_fifo(vir_addr, i, 0);
+		break;
+	case 96: /* output registers and desc FIFOs
+			of selected pipes: format 2 */
+		print_bam_test_bus_reg(vir_addr, tb_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i)) {
+				print_bam_pipe_selected_reg(vir_addr, i);
+				print_bam_pipe_desc_fifo(vir_addr, i, 0);
+			}
+		break;
+	case 97: /* output registers, desc FIFOs and partial data blocks
+			of selected pipes: format 1 */
+		print_bam_test_bus_reg(vir_addr, tb_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i))
+				print_bam_pipe_selected_reg(vir_addr, i);
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i))
+				print_bam_pipe_desc_fifo(vir_addr, i, 0);
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i))
+				print_bam_pipe_desc_fifo(vir_addr, i, 100);
+		break;
+	case 98: /* output registers, desc FIFOs and partial data blocks
+			of selected pipes: format 2 */
+		print_bam_test_bus_reg(vir_addr, tb_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i)) {
+				print_bam_pipe_selected_reg(vir_addr, i);
+				print_bam_pipe_desc_fifo(vir_addr, i, 0);
+				print_bam_pipe_desc_fifo(vir_addr, i, 100);
+			}
+		break;
+	case 99: /* output all registers, desc FIFOs and partial data blocks */
+		print_bam_test_bus_reg(vir_addr, tb_sel);
+		print_bam_reg(bam->base);
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_reg(bam->base, i);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_selected_reg(vir_addr, i);
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_desc_fifo(vir_addr, i, 0);
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_desc_fifo(vir_addr, i, 100);
+		break;
+	default:
+		pr_info("sps:no option is chosen yet.");
+	}
+
+	return res;
+}
+EXPORT_SYMBOL(sps_get_bam_debug_info);
+
+/**
+ * Initialize SPS device
+ *
+ * This function initializes the SPS device.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_device_init(void)
+{
+	int result;
+	int success;
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+	struct sps_bam_props bamdma_props = {0};
+#endif
+
+	SPS_DBG3(sps, "sps:%s.", __func__);
+
+	success = false;
+
+	result = sps_mem_init(sps->pipemem_phys_base, sps->pipemem_size);
+	if (result) {
+		SPS_ERR(sps, "sps:%s:SPS memory init failed", __func__);
+		goto exit_err;
+	}
+
+	INIT_LIST_HEAD(&sps->bams_q);
+	mutex_init(&sps->lock);
+
+	if (sps_rm_init(&sps->connection_ctrl, sps->options)) {
+		SPS_ERR(sps, "sps:%s:Fail to init SPS resource manager",
+				__func__);
+		goto exit_err;
+	}
+
+	result = sps_bam_driver_init(sps->options);
+	if (result) {
+		SPS_ERR(sps, "sps:%s:SPS BAM driver init failed", __func__);
+		goto exit_err;
+	}
+
+	/* Initialize the BAM DMA device */
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+	bamdma_props.phys_addr = sps->bamdma_bam_phys_base;
+	bamdma_props.virt_addr = ioremap(sps->bamdma_bam_phys_base,
+					 sps->bamdma_bam_size);
+
+	if (!bamdma_props.virt_addr) {
+		SPS_ERR(sps, "sps:%s:Fail to IO map BAM-DMA BAM registers.\n",
+				__func__);
+		goto exit_err;
+	}
+
+	SPS_DBG3(sps, "sps:bamdma_bam.phys=%pa.virt=0x%pK.",
+		&bamdma_props.phys_addr,
+		bamdma_props.virt_addr);
+
+	bamdma_props.periph_phys_addr =	sps->bamdma_dma_phys_base;
+	bamdma_props.periph_virt_size = sps->bamdma_dma_size;
+	bamdma_props.periph_virt_addr = ioremap(sps->bamdma_dma_phys_base,
+						sps->bamdma_dma_size);
+
+	if (!bamdma_props.periph_virt_addr) {
+		SPS_ERR(sps, "sps:%s:Fail to IO map BAM-DMA peripheral reg.\n",
+				__func__);
+		goto exit_err;
+	}
+
+	SPS_DBG3(sps, "sps:bamdma_dma.phys=%pa.virt=0x%pK.",
+		&bamdma_props.periph_phys_addr,
+		bamdma_props.periph_virt_addr);
+
+	bamdma_props.irq = sps->bamdma_irq;
+
+	bamdma_props.event_threshold = 0x10;	/* Pipe event threshold */
+	bamdma_props.summing_threshold = 0x10;	/* BAM event threshold */
+
+	bamdma_props.options = SPS_BAM_OPT_BAMDMA;
+	bamdma_props.restricted_pipes =	sps->bamdma_restricted_pipes;
+
+	result = sps_dma_init(&bamdma_props);
+	if (result) {
+		SPS_ERR(sps, "sps:%s:SPS BAM DMA driver init failed", __func__);
+		goto exit_err;
+	}
+#endif /* CONFIG_SPS_SUPPORT_BAMDMA */
+
+	result = sps_map_init(NULL, sps->options);
+	if (result) {
+		SPS_ERR(sps,
+			"sps:%s:SPS connection mapping init failed", __func__);
+		goto exit_err;
+	}
+
+	success = true;
+exit_err:
+	if (!success) {
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+		sps_device_de_init();
+#endif
+		return SPS_ERROR;
+	}
+
+	return 0;
+}
+
+/**
+ * De-initialize SPS device
+ *
+ * This function de-initializes the SPS device.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static void sps_device_de_init(void)
+{
+	SPS_DBG3(sps, "sps:%s.", __func__);
+
+	if (sps != NULL) {
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+		sps_dma_de_init();
+#endif
+		/* Are there any remaining BAM registrations? */
+		if (!list_empty(&sps->bams_q))
+			SPS_ERR(sps,
+				"sps:%s:BAMs are still registered", __func__);
+
+		sps_map_de_init();
+	}
+
+	sps_mem_de_init();
+}
+
+/**
+ * Initialize client state context
+ *
+ * This function initializes a client state context struct.
+ *
+ * @client - Pointer to client state context
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_client_init(struct sps_pipe *client)
+{
+	SPS_DBG(sps, "sps:%s.", __func__);
+
+	if (client == NULL)
+		return -EINVAL;
+
+	/*
+	 * NOTE: Cannot store any state within the SPS driver because
+	 * the driver init function may not have been called yet.
+	 */
+	memset(client, 0, sizeof(*client));
+	sps_rm_config_init(&client->connect);
+
+	client->client_state = SPS_STATE_DISCONNECT;
+	client->bam = NULL;
+
+	return 0;
+}
+
+/**
+ * De-initialize client state context
+ *
+ * This function de-initializes a client state context struct.
+ *
+ * @client - Pointer to client state context
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_client_de_init(struct sps_pipe *client)
+{
+	SPS_DBG(sps, "sps:%s.", __func__);
+
+	if (client->client_state != SPS_STATE_DISCONNECT) {
+		SPS_ERR(sps, "sps:De-init client in connected state: 0x%x",
+				   client->client_state);
+		return SPS_ERROR;
+	}
+
+	client->bam = NULL;
+	client->map = NULL;
+	memset(&client->connect, 0, sizeof(client->connect));
+
+	return 0;
+}
+
+/**
+ * Find the BAM device from the physical address
+ *
+ * This function finds a BAM device in the BAM registration list that
+ * matches the specified physical address.
+ *
+ * @phys_addr - physical address of the BAM
+ *
+ * @return - pointer to the BAM device struct, or NULL on error
+ *
+ */
+static struct sps_bam *phy2bam(phys_addr_t phys_addr)
+{
+	struct sps_bam *bam;
+
+	SPS_DBG2(sps, "sps:%s.", __func__);
+
+	list_for_each_entry(bam, &sps->bams_q, list) {
+		if (bam->props.phys_addr == phys_addr)
+			return bam;
+	}
+
+	return NULL;
+}
+
+/**
+ * Find the handle of a BAM device based on the physical address
+ *
+ * This function finds a BAM device in the BAM registration list that
+ * matches the specified physical address, and returns its handle.
+ *
+ * @phys_addr - physical address of the BAM
+ *
+ * @h - device handle of the BAM
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_phy2h(phys_addr_t phys_addr, unsigned long *handle)
+{
+	struct sps_bam *bam;
+
+	SPS_DBG2(sps, "sps:%s.", __func__);
+
+	if (sps == NULL || !sps->is_ready) {
+		SPS_DBG3(sps, "sps:%s:sps driver is not ready.\n", __func__);
+		return -EPROBE_DEFER;
+	}
+
+	if (handle == NULL) {
+		SPS_ERR(sps, "sps:%s:handle is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	list_for_each_entry(bam, &sps->bams_q, list) {
+		if (bam->props.phys_addr == phys_addr) {
+			*handle = (uintptr_t) bam;
+			return 0;
+		}
+	}
+
+	SPS_ERR(sps,
+		"sps: BAM device %pa is not registered yet.\n", &phys_addr);
+
+	return -ENODEV;
+}
+EXPORT_SYMBOL(sps_phy2h);
+
+/**
+ * Setup desc/data FIFO for bam-to-bam connection
+ *
+ * @mem_buffer - Pointer to struct for allocated memory properties.
+ *
+ * @addr - address of FIFO
+ *
+ * @size - FIFO size
+ *
+ * @use_offset - use address offset instead of absolute address
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_setup_bam2bam_fifo(struct sps_mem_buffer *mem_buffer,
+		  u32 addr, u32 size, int use_offset)
+{
+	SPS_DBG1(sps, "sps:%s.", __func__);
+
+	if ((mem_buffer == NULL) || (size == 0)) {
+		SPS_ERR(sps, "sps:%s:invalid buffer address or size.",
+				__func__);
+		return SPS_ERROR;
+	}
+
+	if (sps == NULL || !sps->is_ready) {
+		SPS_DBG3(sps, "sps:%s:sps driver is not ready.\n", __func__);
+		return -EPROBE_DEFER;
+	}
+
+	if (use_offset) {
+		if ((addr + size) <= sps->pipemem_size)
+			mem_buffer->phys_base = sps->pipemem_phys_base + addr;
+		else {
+			SPS_ERR(sps,
+				"sps:%s:requested mem is out of pipe mem range.\n",
+				__func__);
+			return SPS_ERROR;
+		}
+	} else {
+		if (addr >= sps->pipemem_phys_base &&
+			(addr + size) <= (sps->pipemem_phys_base
+						+ sps->pipemem_size))
+			mem_buffer->phys_base = addr;
+		else {
+			SPS_ERR(sps,
+				"sps:%s:requested mem is out of pipe mem range.\n",
+				__func__);
+			return SPS_ERROR;
+		}
+	}
+
+	mem_buffer->base = spsi_get_mem_ptr(mem_buffer->phys_base);
+	mem_buffer->size = size;
+
+	memset(mem_buffer->base, 0, mem_buffer->size);
+
+	return 0;
+}
+EXPORT_SYMBOL(sps_setup_bam2bam_fifo);
+
+/**
+ * Find the BAM device from the handle
+ *
+ * This function finds a BAM device in the BAM registration list that
+ * matches the specified device handle.
+ *
+ * @h - device handle of the BAM
+ *
+ * @return - pointer to the BAM device struct, or NULL on error
+ *
+ */
+struct sps_bam *sps_h2bam(unsigned long h)
+{
+	struct sps_bam *bam;
+
+	SPS_DBG1(sps, "sps:%s: BAM handle:0x%lx.", __func__, h);
+
+	if (h == SPS_DEV_HANDLE_MEM || h == SPS_DEV_HANDLE_INVALID)
+		return NULL;
+
+	list_for_each_entry(bam, &sps->bams_q, list) {
+		if ((uintptr_t) bam == h)
+			return bam;
+	}
+
+	SPS_ERR(sps, "sps:Can't find BAM device for handle 0x%lx.", h);
+
+	return NULL;
+}
+
+/**
+ * Lock BAM device
+ *
+ * This function obtains the BAM spinlock on the client's connection.
+ *
+ * @pipe - pointer to client pipe state
+ *
+ * @return pointer to BAM device struct, or NULL on error
+ *
+ */
+static struct sps_bam *sps_bam_lock(struct sps_pipe *pipe)
+{
+	struct sps_bam *bam;
+	u32 pipe_index;
+
+	bam = pipe->bam;
+	if (bam == NULL) {
+		SPS_ERR(sps, "sps:%s:Connection is not in connected state.",
+				__func__);
+		return NULL;
+	}
+
+	spin_lock_irqsave(&bam->connection_lock, bam->irqsave_flags);
+
+	/* Verify client owns this pipe */
+	pipe_index = pipe->pipe_index;
+	if (pipe_index >= bam->props.num_pipes ||
+	    pipe != bam->pipes[pipe_index]) {
+		SPS_ERR(bam,
+			"sps:Client not owner of BAM %pa pipe: %d (max %d)",
+			&bam->props.phys_addr, pipe_index,
+			bam->props.num_pipes);
+		spin_unlock_irqrestore(&bam->connection_lock,
+						bam->irqsave_flags);
+		return NULL;
+	}
+
+	return bam;
+}
+
+/**
+ * Unlock BAM device
+ *
+ * This function releases the BAM spinlock on the client's connection.
+ *
+ * @bam - pointer to BAM device struct
+ *
+ */
+static inline void sps_bam_unlock(struct sps_bam *bam)
+{
+	spin_unlock_irqrestore(&bam->connection_lock, bam->irqsave_flags);
+}
+
+/**
+ * Connect an SPS connection end point
+ *
+ */
+int sps_connect(struct sps_pipe *h, struct sps_connect *connect)
+{
+	struct sps_pipe *pipe = h;
+	unsigned long dev;
+	struct sps_bam *bam;
+	int result;
+
+	if (h == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (connect == NULL) {
+		SPS_ERR(sps, "sps:%s:connection is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	if (sps == NULL)
+		return -ENODEV;
+
+	if (!sps->is_ready) {
+		SPS_ERR(sps, "sps:%s:sps driver is not ready.\n", __func__);
+		return -EAGAIN;
+	}
+
+	if ((connect->lock_group != SPSRM_CLEAR)
+		&& (connect->lock_group > BAM_MAX_P_LOCK_GROUP_NUM)) {
+		SPS_ERR(sps,
+			"sps:%s:The value of pipe lock group is invalid.\n",
+			__func__);
+		return SPS_ERROR;
+	}
+
+	mutex_lock(&sps->lock);
+	/*
+	 * Must lock the BAM device at the top level function, so must
+	 * determine which BAM is the target for the connection
+	 */
+	if (connect->mode == SPS_MODE_SRC)
+		dev = connect->source;
+	else
+		dev = connect->destination;
+
+	bam = sps_h2bam(dev);
+	if (bam == NULL) {
+		SPS_ERR(sps, "sps:Invalid BAM device handle: 0x%lx", dev);
+		result = SPS_ERROR;
+		goto exit_err;
+	}
+
+	mutex_lock(&bam->lock);
+	SPS_DBG2(bam, "sps:sps_connect: bam %pa src 0x%lx dest 0x%lx mode %s",
+			BAM_ID(bam),
+			connect->source,
+			connect->destination,
+			connect->mode == SPS_MODE_SRC ? "SRC" : "DEST");
+
+	/* Allocate resources for the specified connection */
+	pipe->connect = *connect;
+	result = sps_rm_state_change(pipe, SPS_STATE_ALLOCATE);
+	if (result) {
+		mutex_unlock(&bam->lock);
+		goto exit_err;
+	}
+
+	/* Configure the connection */
+	result = sps_rm_state_change(pipe, SPS_STATE_CONNECT);
+	mutex_unlock(&bam->lock);
+	if (result) {
+		sps_disconnect(h);
+		goto exit_err;
+	}
+
+exit_err:
+	mutex_unlock(&sps->lock);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_connect);
+
+/**
+ * Disconnect an SPS connection end point
+ *
+ * This function disconnects an SPS connection end point.
+ * The SPS hardware associated with that end point will be disabled.
+ * For a connection involving system memory (SPS_DEV_HANDLE_MEM), all
+ * connection resources are deallocated.  For a peripheral-to-peripheral
+ * connection, the resources associated with the connection will not be
+ * deallocated until both end points are closed.
+ *
+ * The client must call sps_connect() for the handle before calling
+ * this function.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_disconnect(struct sps_pipe *h)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_pipe *check;
+	struct sps_bam *bam;
+	int result;
+
+	if (pipe == NULL) {
+		SPS_ERR(sps, "sps:%s:Invalid pipe.", __func__);
+		return SPS_ERROR;
+	}
+
+	bam = pipe->bam;
+	if (bam == NULL) {
+		SPS_ERR(sps,
+			"sps:%s:BAM device of this pipe is NULL.", __func__);
+		return SPS_ERROR;
+	}
+
+	SPS_DBG2(bam,
+		"sps:sps_disconnect: bam %pa src 0x%lx dest 0x%lx mode %s",
+		BAM_ID(bam),
+		pipe->connect.source,
+		pipe->connect.destination,
+		pipe->connect.mode == SPS_MODE_SRC ? "SRC" : "DEST");
+
+	result = SPS_ERROR;
+	/* Cross-check client with map table */
+	if (pipe->connect.mode == SPS_MODE_SRC)
+		check = pipe->map->client_src;
+	else
+		check = pipe->map->client_dest;
+
+	if (check != pipe) {
+		SPS_ERR(sps, "sps:%s:Client context is corrupt", __func__);
+		goto exit_err;
+	}
+
+	/* Disconnect the BAM pipe */
+	mutex_lock(&bam->lock);
+	result = sps_rm_state_change(pipe, SPS_STATE_DISCONNECT);
+	mutex_unlock(&bam->lock);
+	if (result)
+		goto exit_err;
+
+	sps_rm_config_init(&pipe->connect);
+	result = 0;
+
+exit_err:
+
+	return result;
+}
+EXPORT_SYMBOL(sps_disconnect);
+
+/**
+ * Register an event object for an SPS connection end point
+ *
+ */
+int sps_register_event(struct sps_pipe *h, struct sps_register_event *reg)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	if (h == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (reg == NULL) {
+		SPS_ERR(sps, "sps:%s:registered event is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	if (sps == NULL)
+		return -ENODEV;
+
+	if (!sps->is_ready) {
+		SPS_ERR(sps, "sps:%s:sps driver not ready.\n", __func__);
+		return -EAGAIN;
+	}
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	SPS_DBG2(bam, "sps:%s; events:%d.\n", __func__, reg->options);
+
+	result = sps_bam_pipe_reg_event(bam, pipe->pipe_index, reg);
+	sps_bam_unlock(bam);
+	if (result)
+		SPS_ERR(bam,
+			"sps:Fail to register event for BAM %pa pipe %d",
+			&pipe->bam->props.phys_addr, pipe->pipe_index);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_register_event);
+
+/**
+ * Enable an SPS connection end point
+ *
+ */
+int sps_flow_on(struct sps_pipe *h)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result = 0;
+
+	if (h == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	SPS_DBG2(bam, "sps:%s.\n", __func__);
+
+	bam_pipe_halt(&bam->base, pipe->pipe_index, false);
+
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_flow_on);
+
+/**
+ * Disable an SPS connection end point
+ *
+ */
+int sps_flow_off(struct sps_pipe *h, enum sps_flow_off mode)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result = 0;
+
+	if (h == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	SPS_DBG2(bam, "sps:%s.\n", __func__);
+
+	bam_pipe_halt(&bam->base, pipe->pipe_index, true);
+
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_flow_off);
+
+/**
+ * Check if the flags on a descriptor/iovec are valid
+ *
+ * @flags - flags on a descriptor/iovec
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_check_iovec_flags(u32 flags)
+{
+	if ((flags & SPS_IOVEC_FLAG_NWD) &&
+		!(flags & (SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_CMD))) {
+		SPS_ERR(sps,
+			"sps:%s:NWD is only valid with EOT or CMD.\n",
+			__func__);
+		return SPS_ERROR;
+	} else if ((flags & SPS_IOVEC_FLAG_EOT) &&
+		(flags & SPS_IOVEC_FLAG_CMD)) {
+		SPS_ERR(sps,
+			"sps:%s:EOT and CMD are not allowed to coexist.\n",
+			__func__);
+		return SPS_ERROR;
+	} else if (!(flags & SPS_IOVEC_FLAG_CMD) &&
+		(flags & (SPS_IOVEC_FLAG_LOCK | SPS_IOVEC_FLAG_UNLOCK))) {
+		static char err_msg[] =
+		"pipe lock/unlock flags are only valid with Command Descriptor";
+		SPS_ERR(sps, "sps:%s.\n", err_msg);
+		return SPS_ERROR;
+	} else if ((flags & SPS_IOVEC_FLAG_LOCK) &&
+		(flags & SPS_IOVEC_FLAG_UNLOCK)) {
+		static char err_msg[] =
+		"Can't lock and unlock a pipe by the same Command Descriptor";
+		SPS_ERR(sps, "sps:%s.\n", err_msg);
+		return SPS_ERROR;
+	} else if ((flags & SPS_IOVEC_FLAG_IMME) &&
+		(flags & SPS_IOVEC_FLAG_CMD)) {
+		SPS_ERR(sps,
+			"sps:%s:Immediate and CMD are not allowed to coexist.\n",
+			__func__);
+		return SPS_ERROR;
+	} else if ((flags & SPS_IOVEC_FLAG_IMME) &&
+		(flags & SPS_IOVEC_FLAG_NWD)) {
+		SPS_ERR(sps,
+			"sps:%s:Immediate and NWD are not allowed to coexist.\n",
+			__func__);
+		return SPS_ERROR;
+	}
+
+	return 0;
+}
+
+/**
+ * Perform a DMA transfer on an SPS connection end point
+ *
+ */
+int sps_transfer(struct sps_pipe *h, struct sps_transfer *transfer)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+	struct sps_iovec *iovec;
+	int i;
+
+	if (h == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (transfer == NULL) {
+		SPS_ERR(sps, "sps:%s:transfer is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (transfer->iovec == NULL) {
+		SPS_ERR(sps, "sps:%s:iovec list is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (transfer->iovec_count == 0) {
+		SPS_ERR(sps, "sps:%s:iovec list is empty.\n", __func__);
+		return SPS_ERROR;
+	} else if (transfer->iovec_phys == 0) {
+		SPS_ERR(sps,
+			"sps:%s:iovec list address is invalid.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	/* Verify content of IOVECs */
+	iovec = transfer->iovec;
+	for (i = 0; i < transfer->iovec_count; i++) {
+		u32 flags = iovec->flags;
+
+		if (iovec->size > SPS_IOVEC_MAX_SIZE) {
+			SPS_ERR(sps,
+				"sps:%s:iovec size is invalid.\n", __func__);
+			return SPS_ERROR;
+		}
+
+		if (sps_check_iovec_flags(flags))
+			return SPS_ERROR;
+
+		iovec++;
+	}
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	SPS_DBG(bam, "sps:%s.\n", __func__);
+
+	result = sps_bam_pipe_transfer(bam, pipe->pipe_index, transfer);
+
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_transfer);
+
+/**
+ * Perform a single DMA transfer on an SPS connection end point
+ *
+ */
+int sps_transfer_one(struct sps_pipe *h, phys_addr_t addr, u32 size,
+		     void *user, u32 flags)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	if (h == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	if (sps_check_iovec_flags(flags))
+		return SPS_ERROR;
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	SPS_DBG(bam, "sps:%s.\n", __func__);
+
+	result = sps_bam_pipe_transfer_one(bam, pipe->pipe_index,
+				SPS_GET_LOWER_ADDR(addr), size, user,
+				DESC_FLAG_WORD(flags, addr));
+
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_transfer_one);
+
+/**
+ * Read event queue for an SPS connection end point
+ *
+ */
+int sps_get_event(struct sps_pipe *h, struct sps_event_notify *notify)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	if (h == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (notify == NULL) {
+		SPS_ERR(sps, "sps:%s:event_notify is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	SPS_DBG1(bam, "sps:%s.\n", __func__);
+
+	result = sps_bam_pipe_get_event(bam, pipe->pipe_index, notify);
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_get_event);
+
+/**
+ * Determine whether an SPS connection end point FIFO is empty
+ *
+ */
+int sps_is_pipe_empty(struct sps_pipe *h, u32 *empty)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	if (h == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (empty == NULL) {
+		SPS_ERR(sps, "sps:%s:result pointer is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	SPS_DBG1(bam, "sps:%s.\n", __func__);
+
+	result = sps_bam_pipe_is_empty(bam, pipe->pipe_index, empty);
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_is_pipe_empty);
+
+/**
+ * Get number of free transfer entries for an SPS connection end point
+ *
+ */
+int sps_get_free_count(struct sps_pipe *h, u32 *count)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	if (h == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (count == NULL) {
+		SPS_ERR(sps, "sps:%s:result pointer is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	SPS_DBG(bam, "sps:%s.\n", __func__);
+
+	result = sps_bam_get_free_count(bam, pipe->pipe_index, count);
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_get_free_count);
+
+/**
+ * Reset an SPS BAM device
+ *
+ */
+int sps_device_reset(unsigned long dev)
+{
+	struct sps_bam *bam;
+	int result;
+
+	if (dev == 0) {
+		SPS_ERR(sps,
+			"sps:%s:device handle should not be 0.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	if (sps == NULL || !sps->is_ready) {
+		SPS_DBG3(sps, "sps:%s:sps driver is not ready.\n", __func__);
+		return -EPROBE_DEFER;
+	}
+
+	mutex_lock(&sps->lock);
+	/* Search for the target BAM device */
+	bam = sps_h2bam(dev);
+	if (bam == NULL) {
+		SPS_ERR(sps, "sps:Invalid BAM device handle: 0x%lx", dev);
+		result = SPS_ERROR;
+		goto exit_err;
+	}
+
+	SPS_DBG3(bam, "sps:%s.\n", __func__);
+
+	mutex_lock(&bam->lock);
+	result = sps_bam_reset(bam);
+	mutex_unlock(&bam->lock);
+	if (result) {
+		SPS_ERR(sps, "sps:Fail to reset BAM device: 0x%lx", dev);
+		goto exit_err;
+	}
+
+exit_err:
+	mutex_unlock(&sps->lock);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_device_reset);
+
+/**
+ * Get the configuration parameters for an SPS connection end point
+ *
+ */
+int sps_get_config(struct sps_pipe *h, struct sps_connect *config)
+{
+	struct sps_pipe *pipe = h;
+
+	if (h == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (config == NULL) {
+		SPS_ERR(sps, "sps:%s:config pointer is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	if (pipe->bam == NULL)
+		SPS_DBG(sps, "sps:%s.\n", __func__);
+	else
+		SPS_DBG(pipe->bam,
+			"sps:%s; BAM: %pa; pipe index:%d; options:0x%x.\n",
+			__func__, BAM_ID(pipe->bam), pipe->pipe_index,
+			pipe->connect.options);
+
+	/* Copy current client connection state */
+	*config = pipe->connect;
+
+	return 0;
+}
+EXPORT_SYMBOL(sps_get_config);
+
+/**
+ * Set the configuration parameters for an SPS connection end point
+ *
+ */
+int sps_set_config(struct sps_pipe *h, struct sps_connect *config)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	if (h == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (config == NULL) {
+		SPS_ERR(sps, "sps:%s:config pointer is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL) {
+		SPS_ERR(sps, "sps:%s:BAM is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	SPS_DBG(bam, "sps:%s; BAM: %pa; pipe index:%d, config-options:0x%x.\n",
+		__func__, BAM_ID(bam), pipe->pipe_index, config->options);
+
+	result = sps_bam_pipe_set_params(bam, pipe->pipe_index,
+					 config->options);
+	if (result == 0)
+		pipe->connect.options = config->options;
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_set_config);
+
+/**
+ * Set ownership of an SPS connection end point
+ *
+ */
+int sps_set_owner(struct sps_pipe *h, enum sps_owner owner,
+		  struct sps_satellite *connect)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	if (h == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (connect == NULL) {
+		SPS_ERR(sps, "sps:%s:connection is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	if (owner != SPS_OWNER_REMOTE) {
+		SPS_ERR(sps, "sps:Unsupported ownership state: %d", owner);
+		return SPS_ERROR;
+	}
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	SPS_DBG(bam, "sps:%s; BAM: %pa; pipe index:%d.\n",
+		__func__, BAM_ID(bam), pipe->pipe_index);
+
+	result = sps_bam_set_satellite(bam, pipe->pipe_index);
+	if (result)
+		goto exit_err;
+
+	/* Return satellite connect info */
+	if (connect == NULL)
+		goto exit_err;
+
+	if (pipe->connect.mode == SPS_MODE_SRC) {
+		connect->dev = pipe->map->src.bam_phys;
+		connect->pipe_index = pipe->map->src.pipe_index;
+	} else {
+		connect->dev = pipe->map->dest.bam_phys;
+		connect->pipe_index = pipe->map->dest.pipe_index;
+	}
+	connect->config = SPS_CONFIG_SATELLITE;
+	connect->options = (enum sps_option) 0;
+
+exit_err:
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_set_owner);
+
+/**
+ * Allocate memory from the SPS Pipe-Memory.
+ *
+ */
+int sps_alloc_mem(struct sps_pipe *h, enum sps_mem mem,
+		  struct sps_mem_buffer *mem_buffer)
+{
+	if (sps == NULL)
+		return -ENODEV;
+
+	if (!sps->is_ready) {
+		SPS_ERR(sps, "sps:%s:sps driver is not ready.", __func__);
+		return -EAGAIN;
+	}
+
+	if (mem_buffer == NULL || mem_buffer->size == 0) {
+		SPS_ERR(sps, "sps:%s:invalid memory buffer address or size",
+				__func__);
+		return SPS_ERROR;
+	}
+
+	if (h == NULL)
+		SPS_DBG2(sps,
+			"sps:%s:allocate pipe memory before setup pipe",
+			__func__);
+	else
+		SPS_DBG2(sps,
+			"sps:allocate pipe memory for pipe %d", h->pipe_index);
+
+	mem_buffer->phys_base = sps_mem_alloc_io(mem_buffer->size);
+	if (mem_buffer->phys_base == SPS_ADDR_INVALID) {
+		SPS_ERR(sps, "sps:%s:invalid address of allocated memory",
+				__func__);
+		return SPS_ERROR;
+	}
+
+	mem_buffer->base = spsi_get_mem_ptr(mem_buffer->phys_base);
+
+	return 0;
+}
+EXPORT_SYMBOL(sps_alloc_mem);
+
+/**
+ * Free memory from the SPS Pipe-Memory.
+ *
+ */
+int sps_free_mem(struct sps_pipe *h, struct sps_mem_buffer *mem_buffer)
+{
+	SPS_DBG(sps, "sps:%s.", __func__);
+
+	if (mem_buffer == NULL || mem_buffer->phys_base == SPS_ADDR_INVALID) {
+		SPS_ERR(sps, "sps:%s:invalid memory to free", __func__);
+		return SPS_ERROR;
+	}
+
+	if (h == NULL)
+		SPS_DBG2(sps, "sps:%s:free pipe memory.", __func__);
+	else
+		SPS_DBG2(sps,
+			"sps:free pipe memory for pipe %d.", h->pipe_index);
+
+	sps_mem_free_io(mem_buffer->phys_base, mem_buffer->size);
+
+	return 0;
+}
+EXPORT_SYMBOL(sps_free_mem);
+
+/**
+ * Get the number of unused descriptors in the descriptor FIFO
+ * of a pipe
+ *
+ */
+int sps_get_unused_desc_num(struct sps_pipe *h, u32 *desc_num)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	if (h == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (desc_num == NULL) {
+		SPS_ERR(sps, "sps:%s:result pointer is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	SPS_DBG(bam, "sps:%s; BAM: %pa; pipe index:%d.\n",
+		__func__, BAM_ID(bam), pipe->pipe_index);
+
+	result = sps_bam_pipe_get_unused_desc_num(bam, pipe->pipe_index,
+						desc_num);
+
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_get_unused_desc_num);
+
+/**
+ * Vote for or relinquish BAM DMA clock
+ *
+ */
+int sps_ctrl_bam_dma_clk(bool clk_on)
+{
+	int ret;
+
+	if (sps == NULL || !sps->is_ready) {
+		SPS_DBG3(sps, "sps:%s:sps driver is not ready.\n", __func__);
+		return -EPROBE_DEFER;
+	}
+
+	if (clk_on == true) {
+		SPS_DBG1(sps, "%s", "sps:vote for bam dma clk.\n");
+		ret = clk_prepare_enable(sps->bamdma_clk);
+		if (ret) {
+			SPS_ERR(sps,
+				"sps:fail to enable bamdma_clk:ret=%d\n", ret);
+			return ret;
+		}
+	} else {
+		SPS_DBG1(sps, "%s", "sps:relinquish bam dma clk.\n");
+		clk_disable_unprepare(sps->bamdma_clk);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(sps_ctrl_bam_dma_clk);
+
+/**
+ * Register a BAM device
+ *
+ */
+int sps_register_bam_device(const struct sps_bam_props *bam_props,
+				unsigned long *dev_handle)
+{
+	struct sps_bam *bam = NULL;
+	void *virt_addr = NULL;
+	char bam_name[MAX_MSG_LEN];
+	u32 manage;
+	int ok;
+	int result;
+
+	if (bam_props == NULL) {
+		SPS_ERR(sps, "sps:%s:bam_props is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (dev_handle == NULL) {
+		SPS_ERR(sps, "sps:%s:device handle is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	if (sps == NULL) {
+		pr_err("sps:%s:sps driver is not ready.\n", __func__);
+		return -EPROBE_DEFER;
+	}
+
+	SPS_DBG3(sps, "sps:%s: Client requests to register BAM %pa.\n",
+		__func__, &bam_props->phys_addr);
+
+	/* BAM-DMA is registered internally during power-up */
+	if ((!sps->is_ready) && !(bam_props->options & SPS_BAM_OPT_BAMDMA)) {
+		SPS_ERR(sps, "sps:%s:sps driver not ready.\n", __func__);
+		return -EAGAIN;
+	}
+
+	/* Check BAM parameters */
+	manage = bam_props->manage & SPS_BAM_MGR_ACCESS_MASK;
+	if (manage != SPS_BAM_MGR_NONE) {
+		if (bam_props->virt_addr == NULL && bam_props->virt_size == 0) {
+			SPS_ERR(sps, "sps:Invalid properties for BAM: %pa",
+					   &bam_props->phys_addr);
+			return SPS_ERROR;
+		}
+	}
+	if ((bam_props->manage & SPS_BAM_MGR_DEVICE_REMOTE) == 0) {
+		/* BAM global is configured by local processor */
+		if (bam_props->summing_threshold == 0) {
+			SPS_ERR(sps,
+				"sps:Invalid device ctrl properties for "
+					"BAM: %pa", &bam_props->phys_addr);
+			return SPS_ERROR;
+		}
+	}
+	manage = bam_props->manage &
+		  (SPS_BAM_MGR_PIPE_NO_CONFIG | SPS_BAM_MGR_PIPE_NO_CTRL);
+
+	/* In case of error */
+	*dev_handle = SPS_DEV_HANDLE_INVALID;
+	result = SPS_ERROR;
+
+	mutex_lock(&sps->lock);
+	/* Is this BAM already registered? */
+	bam = phy2bam(bam_props->phys_addr);
+	if (bam != NULL) {
+		mutex_unlock(&sps->lock);
+		SPS_ERR(sps, "sps:BAM is already registered: %pa",
+				&bam->props.phys_addr);
+		result = -EEXIST;
+		bam = NULL;   /* Avoid error clean-up kfree(bam) */
+		goto exit_err;
+	}
+
+	/* Perform virtual mapping if required */
+	if ((bam_props->manage & SPS_BAM_MGR_ACCESS_MASK) !=
+	    SPS_BAM_MGR_NONE && bam_props->virt_addr == NULL) {
+		/* Map the memory region */
+		virt_addr = ioremap(bam_props->phys_addr, bam_props->virt_size);
+		if (virt_addr == NULL) {
+			SPS_ERR(sps,
+				"sps:Unable to map BAM IO mem:%pa size:0x%x",
+				&bam_props->phys_addr, bam_props->virt_size);
+			goto exit_err;
+		}
+	}
+
+	bam = kzalloc(sizeof(*bam), GFP_KERNEL);
+	if (bam == NULL) {
+		SPS_ERR(sps,
+			"sps:Unable to allocate BAM device state: size 0x%zu",
+			sizeof(*bam));
+		goto exit_err;
+	}
+	memset(bam, 0, sizeof(*bam));
+
+	mutex_init(&bam->lock);
+	mutex_lock(&bam->lock);
+
+	/* Copy configuration to BAM device descriptor */
+	bam->props = *bam_props;
+	if (virt_addr != NULL)
+		bam->props.virt_addr = virt_addr;
+
+	snprintf(bam_name, sizeof(bam_name), "sps_bam_%pa_0",
+					&bam->props.phys_addr);
+	bam->ipc_log0 = ipc_log_context_create(SPS_IPC_LOGPAGES,
+							bam_name, 0);
+	if (!bam->ipc_log0)
+		SPS_ERR(sps, "%s : unable to create IPC Logging 0 for bam %pa",
+					__func__, &bam->props.phys_addr);
+
+	snprintf(bam_name, sizeof(bam_name), "sps_bam_%pa_1",
+					&bam->props.phys_addr);
+	bam->ipc_log1 = ipc_log_context_create(SPS_IPC_LOGPAGES,
+							bam_name, 0);
+	if (!bam->ipc_log1)
+		SPS_ERR(sps, "%s : unable to create IPC Logging 1 for bam %pa",
+					__func__, &bam->props.phys_addr);
+
+	snprintf(bam_name, sizeof(bam_name), "sps_bam_%pa_2",
+					&bam->props.phys_addr);
+	bam->ipc_log2 = ipc_log_context_create(SPS_IPC_LOGPAGES,
+							bam_name, 0);
+	if (!bam->ipc_log2)
+		SPS_ERR(sps, "%s : unable to create IPC Logging 2 for bam %pa",
+					__func__, &bam->props.phys_addr);
+
+	snprintf(bam_name, sizeof(bam_name), "sps_bam_%pa_3",
+					&bam->props.phys_addr);
+	bam->ipc_log3 = ipc_log_context_create(SPS_IPC_LOGPAGES,
+							bam_name, 0);
+	if (!bam->ipc_log3)
+		SPS_ERR(sps, "%s : unable to create IPC Logging 3 for bam %pa",
+					__func__, &bam->props.phys_addr);
+
+	snprintf(bam_name, sizeof(bam_name), "sps_bam_%pa_4",
+					&bam->props.phys_addr);
+	bam->ipc_log4 = ipc_log_context_create(SPS_IPC_LOGPAGES,
+							bam_name, 0);
+	if (!bam->ipc_log4)
+		SPS_ERR(sps, "%s : unable to create IPC Logging 4 for bam %pa",
+					__func__, &bam->props.phys_addr);
+
+	if (bam_props->ipc_loglevel)
+		bam->ipc_loglevel = bam_props->ipc_loglevel;
+	else
+		bam->ipc_loglevel = SPS_IPC_DEFAULT_LOGLEVEL;
+
+	ok = sps_bam_device_init(bam);
+	mutex_unlock(&bam->lock);
+	if (ok) {
+		SPS_ERR(bam, "sps:Fail to init BAM device: phys %pa",
+			&bam->props.phys_addr);
+		goto exit_err;
+	}
+
+	/* Add BAM to the list */
+	list_add_tail(&bam->list, &sps->bams_q);
+	*dev_handle = (uintptr_t) bam;
+
+	result = 0;
+exit_err:
+	mutex_unlock(&sps->lock);
+
+	if (result) {
+		if (bam != NULL) {
+			if (virt_addr != NULL)
+				iounmap(bam->props.virt_addr);
+			kfree(bam);
+		}
+
+		return result;
+	}
+
+	/* If this BAM is attached to a BAM-DMA, init the BAM-DMA device */
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+	if ((bam->props.options & SPS_BAM_OPT_BAMDMA)) {
+		if (sps_dma_device_init((uintptr_t) bam)) {
+			bam->props.options &= ~SPS_BAM_OPT_BAMDMA;
+			sps_deregister_bam_device((uintptr_t) bam);
+			SPS_ERR(bam, "sps:Fail to init BAM-DMA BAM: phys %pa",
+				&bam->props.phys_addr);
+			return SPS_ERROR;
+		}
+	}
+#endif /* CONFIG_SPS_SUPPORT_BAMDMA */
+
+	SPS_INFO(bam, "sps:BAM %pa is registered.", &bam->props.phys_addr);
+
+	return 0;
+}
+EXPORT_SYMBOL(sps_register_bam_device);
+
+/**
+ * Deregister a BAM device
+ *
+ */
+int sps_deregister_bam_device(unsigned long dev_handle)
+{
+	struct sps_bam *bam;
+	int n;
+
+	if (dev_handle == 0) {
+		SPS_ERR(sps, "sps:%s:device handle should not be 0.\n",
+				__func__);
+		return SPS_ERROR;
+	}
+
+	bam = sps_h2bam(dev_handle);
+	if (bam == NULL) {
+		SPS_ERR(sps, "sps:%s:did not find a BAM for this handle",
+				__func__);
+		return SPS_ERROR;
+	}
+
+	SPS_DBG3(sps, "sps:%s: SPS deregister BAM: phys %pa.",
+		__func__, &bam->props.phys_addr);
+
+	if (bam->props.options & SPS_BAM_HOLD_MEM) {
+		for (n = 0; n < BAM_MAX_PIPES; n++)
+			if (bam->desc_cache_pointers[n] != NULL)
+				kfree(bam->desc_cache_pointers[n]);
+	}
+
+	/* If this BAM is attached to a BAM-DMA, init the BAM-DMA device */
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+	if ((bam->props.options & SPS_BAM_OPT_BAMDMA)) {
+		mutex_lock(&bam->lock);
+		(void)sps_dma_device_de_init((uintptr_t) bam);
+		bam->props.options &= ~SPS_BAM_OPT_BAMDMA;
+		mutex_unlock(&bam->lock);
+	}
+#endif
+
+	/* Remove the BAM from the registration list */
+	mutex_lock(&sps->lock);
+	list_del(&bam->list);
+	mutex_unlock(&sps->lock);
+
+	/* De-init the BAM and free resources */
+	mutex_lock(&bam->lock);
+	sps_bam_device_de_init(bam);
+	mutex_unlock(&bam->lock);
+	ipc_log_context_destroy(bam->ipc_log0);
+	ipc_log_context_destroy(bam->ipc_log1);
+	ipc_log_context_destroy(bam->ipc_log2);
+	ipc_log_context_destroy(bam->ipc_log3);
+	ipc_log_context_destroy(bam->ipc_log4);
+	if (bam->props.virt_size)
+		(void)iounmap(bam->props.virt_addr);
+
+	kfree(bam);
+
+	return 0;
+}
+EXPORT_SYMBOL(sps_deregister_bam_device);
+
+/**
+ * Get processed I/O vector (completed transfers)
+ *
+ */
+int sps_get_iovec(struct sps_pipe *h, struct sps_iovec *iovec)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	if (h == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (iovec == NULL) {
+		SPS_ERR(sps, "sps:%s:iovec pointer is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL) {
+		SPS_ERR(sps, "sps:%s:BAM is not found by handle.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	SPS_DBG(bam, "sps:%s; BAM: %pa; pipe index:%d.\n",
+		__func__, BAM_ID(bam), pipe->pipe_index);
+
+	/* Get an iovec from the BAM pipe descriptor FIFO */
+	result = sps_bam_pipe_get_iovec(bam, pipe->pipe_index, iovec);
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_get_iovec);
+
+/**
+ * Perform timer control
+ *
+ */
+int sps_timer_ctrl(struct sps_pipe *h,
+			struct sps_timer_ctrl *timer_ctrl,
+			struct sps_timer_result *timer_result)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	if (h == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (timer_ctrl == NULL) {
+		SPS_ERR(sps, "sps:%s:timer_ctrl pointer is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (timer_result == NULL) {
+		SPS_DBG(sps, "sps:%s:no result to return.\n", __func__);
+	}
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL) {
+		SPS_ERR(sps, "sps:%s:BAM is not found by handle.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	SPS_DBG2(bam, "sps:%s; BAM: %pa; pipe index:%d.\n",
+		__func__, BAM_ID(bam), pipe->pipe_index);
+
+	/* Perform the BAM pipe timer control operation */
+	result = sps_bam_pipe_timer_ctrl(bam, pipe->pipe_index, timer_ctrl,
+					 timer_result);
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_timer_ctrl);
+
+/*
+ * Reset a BAM pipe
+ */
+int sps_pipe_reset(unsigned long dev, u32 pipe)
+{
+	struct sps_bam *bam;
+
+	if (!dev) {
+		SPS_ERR(sps, "sps:%s:BAM handle is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	if (pipe >= BAM_MAX_PIPES) {
+		SPS_ERR(sps, "sps:%s:pipe index is invalid.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	bam = sps_h2bam(dev);
+	if (bam == NULL) {
+		SPS_ERR(sps, "sps:%s:BAM is not found by handle.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	SPS_DBG2(bam, "sps:%s; BAM: %pa; pipe index:%d.\n",
+		__func__, BAM_ID(bam), pipe);
+
+	bam_pipe_reset(&bam->base, pipe);
+
+	return 0;
+}
+EXPORT_SYMBOL(sps_pipe_reset);
+
+/*
+ * Disable a BAM pipe
+ */
+int sps_pipe_disable(unsigned long dev, u32 pipe)
+{
+	struct sps_bam *bam;
+
+	if (!dev) {
+		SPS_ERR(sps, "sps:%s:BAM handle is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	if (pipe >= BAM_MAX_PIPES) {
+		SPS_ERR(sps, "sps:%s:pipe index is invalid.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	bam = sps_h2bam(dev);
+	if (bam == NULL) {
+		SPS_ERR(sps, "sps:%s:BAM is not found by handle.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	SPS_DBG(bam, "sps:%s; BAM: %pa; pipe index:%d.\n",
+		__func__, BAM_ID(bam), pipe);
+
+	bam_disable_pipe(&bam->base, pipe);
+
+	return 0;
+}
+EXPORT_SYMBOL(sps_pipe_disable);
+
+/*
+ * Check pending descriptors in the descriptor FIFO
+ * of a pipe
+ */
+int sps_pipe_pending_desc(unsigned long dev, u32 pipe, bool *pending)
+{
+
+	struct sps_bam *bam;
+
+	if (!dev) {
+		SPS_ERR(sps, "sps:%s:BAM handle is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	if (pipe >= BAM_MAX_PIPES) {
+		SPS_ERR(sps, "sps:%s:pipe index is invalid.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	if (!pending) {
+		SPS_ERR(sps, "sps:%s:input flag is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	bam = sps_h2bam(dev);
+	if (bam == NULL) {
+		SPS_ERR(sps, "sps:%s:BAM is not found by handle.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	SPS_DBG(bam, "sps:%s; BAM: %pa; pipe index:%d.\n",
+		__func__, BAM_ID(bam), pipe);
+
+	*pending = sps_bam_pipe_pending_desc(bam, pipe);
+
+	return 0;
+}
+EXPORT_SYMBOL(sps_pipe_pending_desc);
+
+/*
+ * Process any pending IRQ of a BAM
+ */
+int sps_bam_process_irq(unsigned long dev)
+{
+	struct sps_bam *bam;
+	int ret = 0;
+
+	if (!dev) {
+		SPS_ERR(sps, "sps:%s:BAM handle is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	bam = sps_h2bam(dev);
+	if (bam == NULL) {
+		SPS_ERR(sps, "sps:%s:BAM is not found by handle.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	SPS_DBG1(bam, "sps:%s; BAM: %pa.\n", __func__, BAM_ID(bam));
+
+	ret = sps_bam_check_irq(bam);
+
+	return ret;
+}
+EXPORT_SYMBOL(sps_bam_process_irq);
+
+/*
+ * Get address info of a BAM
+ */
+int sps_get_bam_addr(unsigned long dev, phys_addr_t *base,
+				u32 *size)
+{
+	struct sps_bam *bam;
+
+	if (!dev) {
+		SPS_ERR(sps, "sps:%s:BAM handle is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	bam = sps_h2bam(dev);
+	if (bam == NULL) {
+		SPS_ERR(sps, "sps:%s:BAM is not found by handle.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	*base = bam->props.phys_addr;
+	*size = bam->props.virt_size;
+
+	SPS_DBG2(bam, "sps:%s; BAM: %pa; base:%pa; size:%d.\n",
+		__func__, BAM_ID(bam), base, *size);
+
+	return 0;
+}
+EXPORT_SYMBOL(sps_get_bam_addr);
+
+/*
+ * Inject a ZLT with EOT for a BAM pipe
+ */
+int sps_pipe_inject_zlt(unsigned long dev, u32 pipe_index)
+{
+	struct sps_bam *bam;
+	int rc;
+
+	if (!dev) {
+		SPS_ERR(sps, "sps:%s:BAM handle is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	if (pipe_index >= BAM_MAX_PIPES) {
+		SPS_ERR(sps, "sps:%s:pipe index is invalid.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	bam = sps_h2bam(dev);
+	if (bam == NULL) {
+		SPS_ERR(sps, "sps:%s:BAM is not found by handle.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	SPS_DBG(bam, "sps:%s; BAM: %pa; pipe index:%d.\n",
+		__func__, BAM_ID(bam), pipe_index);
+
+	rc = sps_bam_pipe_inject_zlt(bam, pipe_index);
+	if (rc)
+		SPS_ERR(bam, "sps:%s:failed to inject a ZLT.\n", __func__);
+
+	return rc;
+}
+EXPORT_SYMBOL(sps_pipe_inject_zlt);
+
+/**
+ * Allocate client state context
+ *
+ */
+struct sps_pipe *sps_alloc_endpoint(void)
+{
+	struct sps_pipe *ctx = NULL;
+
+	SPS_DBG(sps, "sps:%s.", __func__);
+
+	ctx = kzalloc(sizeof(struct sps_pipe), GFP_KERNEL);
+	if (ctx == NULL) {
+		SPS_ERR(sps, "sps:%s:Fail to allocate pipe context.",
+				__func__);
+		return NULL;
+	}
+
+	sps_client_init(ctx);
+
+	return ctx;
+}
+EXPORT_SYMBOL(sps_alloc_endpoint);
+
+/**
+ * Free client state context
+ *
+ */
+int sps_free_endpoint(struct sps_pipe *ctx)
+{
+	int res;
+
+	SPS_DBG(sps, "sps:%s.", __func__);
+
+	if (ctx == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	res = sps_client_de_init(ctx);
+
+	if (res == 0)
+		kfree(ctx);
+
+	return res;
+}
+EXPORT_SYMBOL(sps_free_endpoint);
+
+/**
+ * Platform Driver.
+ */
+static int get_platform_data(struct platform_device *pdev)
+{
+	struct resource *resource;
+	struct msm_sps_platform_data *pdata;
+
+	SPS_DBG3(sps, "sps:%s.", __func__);
+
+	pdata = pdev->dev.platform_data;
+
+	if (pdata == NULL) {
+		SPS_ERR(sps, "sps:%s:inavlid platform data.\n", __func__);
+		sps->bamdma_restricted_pipes = 0;
+		return -EINVAL;
+	} else {
+		sps->bamdma_restricted_pipes = pdata->bamdma_restricted_pipes;
+		SPS_DBG3(sps, "sps:bamdma_restricted_pipes=0x%x.\n",
+			sps->bamdma_restricted_pipes);
+	}
+
+	resource  = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						 "pipe_mem");
+	if (resource) {
+		sps->pipemem_phys_base = resource->start;
+		sps->pipemem_size = resource_size(resource);
+		SPS_DBG3(sps, "sps:pipemem.base=%pa,size=0x%x.\n",
+			&sps->pipemem_phys_base,
+			sps->pipemem_size);
+	}
+
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+	resource  = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						 "bamdma_bam");
+	if (resource) {
+		sps->bamdma_bam_phys_base = resource->start;
+		sps->bamdma_bam_size = resource_size(resource);
+		SPS_DBG(sps, "sps:bamdma_bam.base=%pa,size=0x%x.",
+			&sps->bamdma_bam_phys_base,
+			sps->bamdma_bam_size);
+	}
+
+	resource  = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						 "bamdma_dma");
+	if (resource) {
+		sps->bamdma_dma_phys_base = resource->start;
+		sps->bamdma_dma_size = resource_size(resource);
+		SPS_DBG(sps, "sps:bamdma_dma.base=%pa,size=0x%x.",
+			&sps->bamdma_dma_phys_base,
+			sps->bamdma_dma_size);
+	}
+
+	resource  = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+						 "bamdma_irq");
+	if (resource) {
+		sps->bamdma_irq = resource->start;
+		SPS_DBG(sps, "sps:bamdma_irq=%d.", sps->bamdma_irq);
+	}
+#endif
+
+	return 0;
+}
+
+/**
+ * Read data from device tree
+ */
+static int get_device_tree_data(struct platform_device *pdev)
+{
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+	struct resource *resource;
+
+	SPS_DBG(sps, "sps:%s.", __func__);
+
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,bam-dma-res-pipes",
+				&sps->bamdma_restricted_pipes))
+		SPS_DBG(sps,
+			"sps:%s:No restricted bamdma pipes on this target.\n",
+			__func__);
+	else
+		SPS_DBG(sps, "sps:bamdma_restricted_pipes=0x%x.",
+			sps->bamdma_restricted_pipes);
+
+	resource  = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (resource) {
+		sps->bamdma_bam_phys_base = resource->start;
+		sps->bamdma_bam_size = resource_size(resource);
+		SPS_DBG(sps, "sps:bamdma_bam.base=%pa,size=0x%x.",
+			&sps->bamdma_bam_phys_base,
+			sps->bamdma_bam_size);
+	} else {
+		SPS_ERR(sps, "sps:%s:BAM DMA BAM mem unavailable.", __func__);
+		return -ENODEV;
+	}
+
+	resource  = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (resource) {
+		sps->bamdma_dma_phys_base = resource->start;
+		sps->bamdma_dma_size = resource_size(resource);
+		SPS_DBG(sps, "sps:bamdma_dma.base=%pa,size=0x%x.",
+			&sps->bamdma_dma_phys_base,
+			sps->bamdma_dma_size);
+	} else {
+		SPS_ERR(sps, "sps:%s:BAM DMA mem unavailable.", __func__);
+		return -ENODEV;
+	}
+
+	resource = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+	if (resource) {
+		imem = true;
+		sps->pipemem_phys_base = resource->start;
+		sps->pipemem_size = resource_size(resource);
+		SPS_DBG(sps, "sps:pipemem.base=%pa,size=0x%x.",
+			&sps->pipemem_phys_base,
+			sps->pipemem_size);
+	} else {
+		imem = false;
+		SPS_DBG(sps, "sps:%s:No pipe memory on this target.\n",
+				__func__);
+	}
+
+	resource  = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (resource) {
+		sps->bamdma_irq = resource->start;
+		SPS_DBG(sps, "sps:bamdma_irq=%d.", sps->bamdma_irq);
+	} else {
+		SPS_ERR(sps, "sps:%s:BAM DMA IRQ unavailable.", __func__);
+		return -ENODEV;
+	}
+#endif
+
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,device-type",
+				&d_type)) {
+		d_type = 3;
+		SPS_DBG3(sps, "sps:default device type %d.\n", d_type);
+	} else
+		SPS_DBG3(sps, "sps:device type is %d.", d_type);
+
+	enhd_pipe = of_property_read_bool((&pdev->dev)->of_node,
+			"qcom,pipe-attr-ee");
+	SPS_DBG3(sps, "sps:PIPE_ATTR_EE is %s supported.\n",
+			(enhd_pipe ? "" : "not"));
+
+	return 0;
+}
+
+static struct of_device_id msm_sps_match[] = {
+	{	.compatible = "qcom,msm_sps",
+		.data = &bam_types[SPS_BAM_NDP]
+	},
+	{	.compatible = "qcom,msm_sps_4k",
+		.data = &bam_types[SPS_BAM_NDP_4K]
+	},
+	{}
+};
+
+static int msm_sps_probe(struct platform_device *pdev)
+{
+	int ret = -ENODEV;
+
+	SPS_DBG3(sps, "sps:%s.", __func__);
+
+	if (pdev->dev.of_node) {
+		const struct of_device_id *match;
+
+		if (get_device_tree_data(pdev)) {
+			SPS_ERR(sps,
+				"sps:%s:Fail to get data from device tree.",
+				__func__);
+			return -ENODEV;
+		} else
+			SPS_DBG(sps, "%s", "sps:get data from device tree.");
+
+		match = of_match_device(msm_sps_match, &pdev->dev);
+		if (match) {
+			bam_type = *((enum sps_bam_type *)(match->data));
+			SPS_DBG3(sps, "sps:BAM type is:%d\n", bam_type);
+		} else {
+			bam_type = SPS_BAM_NDP;
+			SPS_DBG3(sps, "sps:use default BAM type:%d\n",
+				bam_type);
+		}
+	} else {
+		d_type = 0;
+		if (get_platform_data(pdev)) {
+			SPS_ERR(sps, "sps:%s:Fail to get platform data.",
+				__func__);
+			return -ENODEV;
+		} else
+			SPS_DBG(sps, "%s", "sps:get platform data.");
+		bam_type = SPS_BAM_LEGACY;
+	}
+
+	/* Create Device */
+	sps->dev_class = class_create(THIS_MODULE, SPS_DRV_NAME);
+
+	ret = alloc_chrdev_region(&sps->dev_num, 0, 1, SPS_DRV_NAME);
+	if (ret) {
+		SPS_ERR(sps, "sps:%s:alloc_chrdev_region err.", __func__);
+		goto alloc_chrdev_region_err;
+	}
+
+	sps->dev = device_create(sps->dev_class, NULL, sps->dev_num, sps,
+				SPS_DRV_NAME);
+	if (IS_ERR(sps->dev)) {
+		SPS_ERR(sps, "sps:%s:device_create err.", __func__);
+		goto device_create_err;
+	}
+
+	if (pdev->dev.of_node)
+		sps->dev->of_node = pdev->dev.of_node;
+
+	if (!d_type) {
+		sps->pmem_clk = clk_get(sps->dev, "mem_clk");
+		if (IS_ERR(sps->pmem_clk)) {
+			if (PTR_ERR(sps->pmem_clk) == -EPROBE_DEFER)
+				ret = -EPROBE_DEFER;
+			else
+				SPS_ERR(sps, "sps:%s:fail to get pmem_clk.",
+					__func__);
+			goto pmem_clk_err;
+		} else {
+			ret = clk_prepare_enable(sps->pmem_clk);
+			if (ret) {
+				SPS_ERR(sps,
+					"sps:%s:failed to enable pmem_clk.",
+					__func__);
+				goto pmem_clk_en_err;
+			}
+		}
+	}
+
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+	sps->dfab_clk = clk_get(sps->dev, "dfab_clk");
+	if (IS_ERR(sps->dfab_clk)) {
+		if (PTR_ERR(sps->dfab_clk) == -EPROBE_DEFER)
+			ret = -EPROBE_DEFER;
+		else
+			SPS_ERR(sps, "sps:%s:fail to get dfab_clk.", __func__);
+		goto dfab_clk_err;
+	} else {
+		ret = clk_set_rate(sps->dfab_clk, 64000000);
+		if (ret) {
+			SPS_ERR(sps, "sps:%s:failed to set dfab_clk rate.",
+				__func__);
+			clk_put(sps->dfab_clk);
+			goto dfab_clk_err;
+		}
+	}
+
+	sps->bamdma_clk = clk_get(sps->dev, "dma_bam_pclk");
+	if (IS_ERR(sps->bamdma_clk)) {
+		if (PTR_ERR(sps->bamdma_clk) == -EPROBE_DEFER)
+			ret = -EPROBE_DEFER;
+		else
+			SPS_ERR(sps, "sps:%s:fail to get bamdma_clk.",
+				__func__);
+		clk_put(sps->dfab_clk);
+		goto dfab_clk_err;
+	} else {
+		ret = clk_prepare_enable(sps->bamdma_clk);
+		if (ret) {
+			SPS_ERR(sps, "sps:failed to enable bamdma_clk. ret=%d",
+									ret);
+			clk_put(sps->bamdma_clk);
+			clk_put(sps->dfab_clk);
+			goto dfab_clk_err;
+		}
+	}
+
+	ret = clk_prepare_enable(sps->dfab_clk);
+	if (ret) {
+		SPS_ERR(sps, "sps:failed to enable dfab_clk. ret=%d", ret);
+		clk_disable_unprepare(sps->bamdma_clk);
+		clk_put(sps->bamdma_clk);
+		clk_put(sps->dfab_clk);
+		goto dfab_clk_err;
+	}
+#endif
+	ret = sps_device_init();
+	if (ret) {
+		SPS_ERR(sps, "sps:%s:sps_device_init err.", __func__);
+
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+		clk_disable_unprepare(sps->dfab_clk);
+		clk_disable_unprepare(sps->bamdma_clk);
+		clk_put(sps->bamdma_clk);
+		clk_put(sps->dfab_clk);
+#endif
+		goto dfab_clk_err;
+	}
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+	clk_disable_unprepare(sps->dfab_clk);
+	clk_disable_unprepare(sps->bamdma_clk);
+#endif
+	sps->is_ready = true;
+
+	SPS_INFO(sps, "%s", "sps:sps is ready.\n");
+
+	return 0;
+dfab_clk_err:
+	if (!d_type)
+		clk_disable_unprepare(sps->pmem_clk);
+pmem_clk_en_err:
+	if (!d_type)
+		clk_put(sps->pmem_clk);
+pmem_clk_err:
+	device_destroy(sps->dev_class, sps->dev_num);
+device_create_err:
+	unregister_chrdev_region(sps->dev_num, 1);
+alloc_chrdev_region_err:
+	class_destroy(sps->dev_class);
+
+	return ret;
+}
+
+static int msm_sps_remove(struct platform_device *pdev)
+{
+	SPS_DBG3(sps, "sps:%s.\n", __func__);
+
+	device_destroy(sps->dev_class, sps->dev_num);
+	unregister_chrdev_region(sps->dev_num, 1);
+	class_destroy(sps->dev_class);
+	sps_device_de_init();
+
+	clk_put(sps->dfab_clk);
+	if (!d_type)
+		clk_put(sps->pmem_clk);
+	clk_put(sps->bamdma_clk);
+
+	return 0;
+}
+
+static struct platform_driver msm_sps_driver = {
+	.probe          = msm_sps_probe,
+	.driver		= {
+		.name	= SPS_DRV_NAME,
+		.owner	= THIS_MODULE,
+		.of_match_table = msm_sps_match,
+		.suppress_bind_attrs = true,
+	},
+	.remove		= msm_sps_remove,
+};
+
+/**
+ * Module Init.
+ */
+static int __init sps_init(void)
+{
+	int ret;
+
+#ifdef CONFIG_DEBUG_FS
+	sps_debugfs_init();
+#endif
+
+	pr_debug("sps:%s.", __func__);
+
+	/* Allocate the SPS driver state struct */
+	sps = kzalloc(sizeof(*sps), GFP_KERNEL);
+	if (sps == NULL)
+		return -ENOMEM;
+
+	sps->ipc_log0 = ipc_log_context_create(SPS_IPC_LOGPAGES,
+							"sps_ipc_log0", 0);
+	if (!sps->ipc_log0)
+		pr_err("Failed to create IPC log0\n");
+	sps->ipc_log1 = ipc_log_context_create(SPS_IPC_LOGPAGES,
+							"sps_ipc_log1", 0);
+	if (!sps->ipc_log1)
+		pr_err("Failed to create IPC log1\n");
+	sps->ipc_log2 = ipc_log_context_create(SPS_IPC_LOGPAGES,
+							"sps_ipc_log2", 0);
+	if (!sps->ipc_log2)
+		pr_err("Failed to create IPC log2\n");
+	sps->ipc_log3 = ipc_log_context_create(SPS_IPC_LOGPAGES,
+							"sps_ipc_log3", 0);
+	if (!sps->ipc_log3)
+		pr_err("Failed to create IPC log3\n");
+	sps->ipc_log4 = ipc_log_context_create(SPS_IPC_LOGPAGES *
+				SPS_IPC_REG_DUMP_FACTOR, "sps_ipc_log4", 0);
+	if (!sps->ipc_log4)
+		pr_err("Failed to create IPC log4\n");
+
+	ret = platform_driver_register(&msm_sps_driver);
+
+	return ret;
+}
+
+/**
+ * Module Exit.
+ */
+static void __exit sps_exit(void)
+{
+	pr_debug("sps:%s.", __func__);
+
+	platform_driver_unregister(&msm_sps_driver);
+
+	if (sps != NULL) {
+		kfree(sps);
+		sps = NULL;
+	}
+
+#ifdef CONFIG_DEBUG_FS
+	sps_debugfs_exit();
+#endif
+}
+
+arch_initcall(sps_init);
+module_exit(sps_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Smart Peripheral Switch (SPS)");
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./sps/sps_core.h linux-4.4.115-fbx/drivers/platform/msm/sps/sps_core.h
--- linux-4.4.115-fbx/drivers/platform/msm./sps/sps_core.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/sps/sps_core.h	2019-01-22 16:16:26.187270712 +0100
@@ -0,0 +1,102 @@
+/* Copyright (c) 2011, 2013, 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Function and data structure declarations.
+ */
+
+#ifndef _SPS_CORE_H_
+#define _SPS_CORE_H_
+
+#include <linux/types.h>	/* u32 */
+#include <linux/mutex.h>	/* mutex */
+#include <linux/list.h>		/* list_head */
+
+#include "spsi.h"
+#include "sps_bam.h"
+
+/* Connection state definitions */
+#define SPS_STATE_DEF(x)   ('S' | ('P' << 8) | ('S' << 16) | ((x) << 24))
+#define IS_SPS_STATE_OK(x) \
+	(((x)->client_state & 0x00ffffff) == SPS_STATE_DEF(0))
+
+/* Configuration indicating satellite connection */
+#define SPS_CONFIG_SATELLITE  0x11111111
+
+/* Client connection state */
+#define SPS_STATE_DISCONNECT  0
+#define SPS_STATE_ALLOCATE    SPS_STATE_DEF(1)
+#define SPS_STATE_CONNECT     SPS_STATE_DEF(2)
+#define SPS_STATE_ENABLE      SPS_STATE_DEF(3)
+#define SPS_STATE_DISABLE     SPS_STATE_DEF(4)
+
+
+/**
+ * Find the BAM device from the handle
+ *
+ * This function finds a BAM device in the BAM registration list that
+ * matches the specified device handle.
+ *
+ * @h - device handle of the BAM
+ *
+ * @return - pointer to the BAM device struct, or NULL on error
+ *
+ */
+struct sps_bam *sps_h2bam(unsigned long h);
+
+/**
+ * Initialize resource manager module
+ *
+ * This function initializes the resource manager module.
+ *
+ * @rm - pointer to resource manager struct
+ *
+ * @options - driver options bitflags (see SPS_OPT_*)
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_rm_init(struct sps_rm *rm, u32 options);
+
+/**
+ * De-initialize resource manager module
+ *
+ * This function de-initializes the resource manager module.
+ *
+ */
+void sps_rm_de_init(void);
+
+/**
+ * Initialize client state context
+ *
+ * This function initializes a client state context struct.
+ *
+ * @connect - pointer to client connection state struct
+ *
+ */
+void sps_rm_config_init(struct sps_connect *connect);
+
+/**
+ * Process connection state change
+ *
+ * This function processes a connection state change.
+ *
+ * @pipe - pointer to pipe context
+ *
+ * @state - new state for connection
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_rm_state_change(struct sps_pipe *pipe, u32 state);
+
+#endif				/* _SPS_CORE_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./sps/sps_dma.c linux-4.4.115-fbx/drivers/platform/msm/sps/sps_dma.c
--- linux-4.4.115-fbx/drivers/platform/msm./sps/sps_dma.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/sps/sps_dma.c	2019-10-29 09:26:24.617212710 +0100
@@ -0,0 +1,924 @@
+/* Copyright (c) 2011-2013, 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* BAM-DMA Manager. */
+
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+
+#include <linux/export.h>
+#include <linux/memory.h>	/* memset */
+
+#include "spsi.h"
+#include "bam.h"
+#include "sps_bam.h"		/* bam_dma_thresh_dma */
+#include "sps_core.h"		/* sps_h2bam() */
+
+/**
+ * registers
+ */
+
+#define DMA_ENBL			(0x00000000)
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+#define DMA_REVISION			(0x00000004)
+#define DMA_CONFIG			(0x00000008)
+#define DMA_CHNL_CONFIG(n)		(0x00001000 + 4096 * (n))
+#else
+#define DMA_CHNL_CONFIG(n)		(0x00000004 + 4 * (n))
+#define DMA_CONFIG			(0x00000040)
+#endif
+
+/**
+ * masks
+ */
+
+/* DMA_CHNL_confign */
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+#define DMA_CHNL_PRODUCER_PIPE_ENABLED	0x40000
+#define DMA_CHNL_CONSUMER_PIPE_ENABLED	0x20000
+#endif
+#define DMA_CHNL_HALT_DONE		0x10000
+#define DMA_CHNL_HALT			0x1000
+#define DMA_CHNL_ENABLE                 0x100
+#define DMA_CHNL_ACT_THRESH             0x30
+#define DMA_CHNL_WEIGHT                 0x7
+
+/* DMA_CONFIG */
+#define TESTBUS_SELECT                  0x3
+
+/**
+ *
+ * Write register with debug info.
+ *
+ * @base - bam base virtual address.
+ * @offset - register offset.
+ * @val - value to write.
+ *
+ */
+static inline void dma_write_reg(void *base, u32 offset, u32 val)
+{
+	iowrite32(val, base + offset);
+	SPS_DBG(sps, "sps:bamdma: write reg 0x%x w_val 0x%x.", offset, val);
+}
+
+/**
+ * Write register masked field with debug info.
+ *
+ * @base - bam base virtual address.
+ * @offset - register offset.
+ * @mask - register bitmask.
+ * @val - value to write.
+ *
+ */
+static inline void dma_write_reg_field(void *base, u32 offset,
+				       const u32 mask, u32 val)
+{
+	u32 shift = find_first_bit((void *)&mask, 32);
+	u32 tmp = ioread32(base + offset);
+
+	tmp &= ~mask;		/* clear written bits */
+	val = tmp | (val << shift);
+	iowrite32(val, base + offset);
+	SPS_DBG(sps, "sps:bamdma: write reg 0x%x w_val 0x%x.", offset, val);
+}
+
+/* Round max number of pipes to nearest multiple of 2 */
+#define DMA_MAX_PIPES         ((BAM_MAX_PIPES / 2) * 2)
+
+/* Maximum number of BAM-DMAs supported */
+#define MAX_BAM_DMA_DEVICES   1
+
+/* Maximum number of BAMs that will be registered */
+#define MAX_BAM_DMA_BAMS      1
+
+/* Pipe enable check values */
+#define DMA_PIPES_STATE_DIFF     0
+#define DMA_PIPES_BOTH_DISABLED  1
+#define DMA_PIPES_BOTH_ENABLED   2
+
+/* Even pipe is tx/dest/input/write, odd pipe is rx/src/output/read */
+#define DMA_PIPE_IS_DEST(p)   (((p) & 1) == 0)
+#define DMA_PIPE_IS_SRC(p)    (((p) & 1) != 0)
+
+/* BAM DMA pipe state */
+enum bamdma_pipe_state {
+	PIPE_INACTIVE = 0,
+	PIPE_ACTIVE
+};
+
+/* BAM DMA channel state */
+enum bamdma_chan_state {
+	DMA_CHAN_STATE_FREE = 0,
+	DMA_CHAN_STATE_ALLOC_EXT,	/* Client allocation */
+	DMA_CHAN_STATE_ALLOC_INT	/* Internal (resource mgr) allocation */
+};
+
+struct bamdma_chan {
+	/* Allocation state */
+	enum bamdma_chan_state state;
+
+	/* BAM DMA channel configuration parameters */
+	u32 threshold;
+	enum sps_dma_priority priority;
+
+	/* HWIO channel configuration parameters */
+	enum bam_dma_thresh_dma thresh;
+	enum bam_dma_weight_dma weight;
+
+};
+
+/* BAM DMA device state */
+struct bamdma_device {
+	/* BAM-DMA device state */
+	int enabled;
+	int local;
+
+	/* BAM device state */
+	struct sps_bam *bam;
+
+	/* BAM handle, for deregistration */
+	unsigned long h;
+
+	/* BAM DMA device virtual mapping */
+	void *virt_addr;
+	int virtual_mapped;
+	phys_addr_t phys_addr;
+	void *hwio;
+
+	/* BAM DMA pipe/channel state */
+	u32 num_pipes;
+	enum bamdma_pipe_state pipes[DMA_MAX_PIPES];
+	struct bamdma_chan chans[DMA_MAX_PIPES / 2];
+
+};
+
+/* BAM-DMA devices */
+static struct bamdma_device bam_dma_dev[MAX_BAM_DMA_DEVICES];
+static struct mutex bam_dma_lock;
+
+/*
+ * The BAM DMA module registers all BAMs in the BSP properties, but only
+ * uses the first BAM-DMA device for allocations.  References to the others
+ * are stored in the following data array.
+ */
+static int num_bams;
+static unsigned long bam_handles[MAX_BAM_DMA_BAMS];
+
+/**
+ * Find BAM-DMA device
+ *
+ * This function finds the BAM-DMA device associated with the BAM handle.
+ *
+ * @h - BAM handle
+ *
+ * @return - pointer to BAM-DMA device, or NULL on error
+ *
+ */
+static struct bamdma_device *sps_dma_find_device(unsigned long h)
+{
+	return &bam_dma_dev[0];
+}
+
+/**
+ * BAM DMA device enable
+ *
+ * This function enables a BAM DMA device and the associated BAM.
+ *
+ * @dev - pointer to BAM DMA device context
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_dma_device_enable(struct bamdma_device *dev)
+{
+	if (dev->enabled)
+		return 0;
+
+	/*
+	 *  If the BAM-DMA device is locally controlled then enable BAM-DMA
+	 *  device
+	 */
+	if (dev->local)
+		dma_write_reg(dev->virt_addr, DMA_ENBL, 1);
+
+	/* Enable BAM device */
+	if (sps_bam_enable(dev->bam)) {
+		SPS_ERR(sps, "sps:Failed to enable BAM DMA's BAM: %pa",
+			&dev->phys_addr);
+		return SPS_ERROR;
+	}
+
+	dev->enabled = true;
+
+	return 0;
+}
+
+/**
+ * BAM DMA device enable
+ *
+ * This function initializes a BAM DMA device.
+ *
+ * @dev - pointer to BAM DMA device context
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_dma_device_disable(struct bamdma_device *dev)
+{
+	u32 pipe_index;
+
+	if (!dev->enabled)
+		return 0;
+
+	/* Do not disable if channels active */
+	for (pipe_index = 0; pipe_index < dev->num_pipes; pipe_index++) {
+		if (dev->pipes[pipe_index] != PIPE_INACTIVE)
+			break;
+	}
+
+	if (pipe_index < dev->num_pipes) {
+		SPS_ERR(sps,
+			"sps:Fail to disable BAM-DMA %pa:channels are active",
+			&dev->phys_addr);
+		return SPS_ERROR;
+	}
+
+	dev->enabled = false;
+
+	/* Disable BAM device */
+	if (sps_bam_disable(dev->bam)) {
+		SPS_ERR(sps,
+			"sps:Fail to disable BAM-DMA BAM:%pa", &dev->phys_addr);
+		return SPS_ERROR;
+	}
+
+	/* Is the BAM-DMA device locally controlled? */
+	if (dev->local)
+		/* Disable BAM-DMA device */
+		dma_write_reg(dev->virt_addr, DMA_ENBL, 0);
+
+	return 0;
+}
+
+/**
+ * Initialize BAM DMA device
+ *
+ */
+int sps_dma_device_init(unsigned long h)
+{
+	struct bamdma_device *dev;
+	struct sps_bam_props *props;
+	int result = SPS_ERROR;
+
+	mutex_lock(&bam_dma_lock);
+
+	/* Find a free BAM-DMA device slot */
+	dev = NULL;
+	if (bam_dma_dev[0].bam != NULL) {
+		SPS_ERR(sps,
+			"sps:%s:BAM-DMA BAM device is already initialized.",
+			__func__);
+		goto exit_err;
+	} else {
+		dev = &bam_dma_dev[0];
+	}
+
+	/* Record BAM */
+	memset(dev, 0, sizeof(*dev));
+	dev->h = h;
+	dev->bam = sps_h2bam(h);
+
+	if (dev->bam == NULL) {
+		SPS_ERR(sps,
+			"sps:%s:BAM-DMA BAM device is not found from the handle.",
+			__func__);
+		goto exit_err;
+	}
+
+	/* Map the BAM DMA device into virtual space, if necessary */
+	props = &dev->bam->props;
+	dev->phys_addr = props->periph_phys_addr;
+	if (props->periph_virt_addr != NULL) {
+		dev->virt_addr = props->periph_virt_addr;
+		dev->virtual_mapped = false;
+	} else {
+		if (props->periph_virt_size == 0) {
+			SPS_ERR(sps,
+				"sps:Unable to map BAM DMA IO memory: %pa %x",
+				&dev->phys_addr, props->periph_virt_size);
+			goto exit_err;
+		}
+
+		dev->virt_addr = ioremap(dev->phys_addr,
+					  props->periph_virt_size);
+		if (dev->virt_addr == NULL) {
+			SPS_ERR(sps,
+				"sps:Unable to map BAM DMA IO memory: %pa %x",
+				&dev->phys_addr, props->periph_virt_size);
+			goto exit_err;
+		}
+		dev->virtual_mapped = true;
+	}
+	dev->hwio = (void *) dev->virt_addr;
+
+	/* Is the BAM-DMA device locally controlled? */
+	if ((props->manage & SPS_BAM_MGR_DEVICE_REMOTE) == 0) {
+		SPS_DBG3(sps, "sps:BAM-DMA is controlled locally: %pa",
+			&dev->phys_addr);
+		dev->local = true;
+	} else {
+		SPS_DBG3(sps, "sps:BAM-DMA is controlled remotely: %pa",
+			&dev->phys_addr);
+		dev->local = false;
+	}
+
+	/*
+	 * Enable the BAM DMA and determine the number of pipes/channels.
+	 * Leave the BAM-DMA enabled, since it is always a shared device.
+	 */
+	if (sps_dma_device_enable(dev))
+		goto exit_err;
+
+	dev->num_pipes = dev->bam->props.num_pipes;
+
+	result = 0;
+exit_err:
+	if (result) {
+		if (dev != NULL) {
+			if (dev->virtual_mapped)
+				iounmap(dev->virt_addr);
+
+			dev->bam = NULL;
+		}
+	}
+
+	mutex_unlock(&bam_dma_lock);
+
+	return result;
+}
+
+/**
+ * De-initialize BAM DMA device
+ *
+ */
+int sps_dma_device_de_init(unsigned long h)
+{
+	struct bamdma_device *dev;
+	u32 pipe_index;
+	u32 chan;
+	int result = 0;
+
+	mutex_lock(&bam_dma_lock);
+
+	dev = sps_dma_find_device(h);
+	if (dev == NULL) {
+		SPS_ERR(sps, "sps:BAM-DMA: not registered: %lx", h);
+		result = SPS_ERROR;
+		goto exit_err;
+	}
+
+	/* Check for channel leaks */
+	for (chan = 0; chan < dev->num_pipes / 2; chan++) {
+		if (dev->chans[chan].state != DMA_CHAN_STATE_FREE) {
+			SPS_ERR(sps, "sps:BAM-DMA: channel not free: %d", chan);
+			result = SPS_ERROR;
+			dev->chans[chan].state = DMA_CHAN_STATE_FREE;
+		}
+	}
+	for (pipe_index = 0; pipe_index < dev->num_pipes; pipe_index++) {
+		if (dev->pipes[pipe_index] != PIPE_INACTIVE) {
+			SPS_ERR(sps, "sps:BAM-DMA: pipe not inactive: %d",
+					pipe_index);
+			result = SPS_ERROR;
+			dev->pipes[pipe_index] = PIPE_INACTIVE;
+		}
+	}
+
+	/* Disable BAM and BAM-DMA */
+	if (sps_dma_device_disable(dev))
+		result = SPS_ERROR;
+
+	dev->h = BAM_HANDLE_INVALID;
+	dev->bam = NULL;
+	if (dev->virtual_mapped)
+		iounmap(dev->virt_addr);
+
+exit_err:
+	mutex_unlock(&bam_dma_lock);
+
+	return result;
+}
+
+/**
+ * Initialize BAM DMA module
+ *
+ */
+int sps_dma_init(const struct sps_bam_props *bam_props)
+{
+	struct sps_bam_props props;
+	const struct sps_bam_props *bam_reg;
+	unsigned long h;
+
+	/* Init local data */
+	memset(&bam_dma_dev, 0, sizeof(bam_dma_dev));
+	num_bams = 0;
+	memset(bam_handles, 0, sizeof(bam_handles));
+
+	/* Create a mutex to control access to the BAM-DMA devices */
+	mutex_init(&bam_dma_lock);
+
+	/* Are there any BAM DMA devices? */
+	if (bam_props == NULL)
+		return 0;
+
+	/*
+	 * Registers all BAMs in the BSP properties, but only uses the first
+	 * BAM-DMA device for allocations.
+	 */
+	if (bam_props->phys_addr) {
+		/* Force multi-EE option for all BAM-DMAs */
+		bam_reg = bam_props;
+		if ((bam_props->options & SPS_BAM_OPT_BAMDMA) &&
+		    (bam_props->manage & SPS_BAM_MGR_MULTI_EE) == 0) {
+			SPS_DBG(sps,
+				"sps:Setting multi-EE options for BAM-DMA: %pa",
+				&bam_props->phys_addr);
+			props = *bam_props;
+			props.manage |= SPS_BAM_MGR_MULTI_EE;
+			bam_reg = &props;
+		}
+
+		/* Register the BAM */
+		if (sps_register_bam_device(bam_reg, &h)) {
+			SPS_ERR(sps,
+				"sps:Fail to register BAM-DMA BAM device: "
+					"phys %pa", &bam_props->phys_addr);
+			return SPS_ERROR;
+		}
+
+		/* Record the BAM so that it may be deregistered later */
+		if (num_bams < MAX_BAM_DMA_BAMS) {
+			bam_handles[num_bams] = h;
+			num_bams++;
+		} else {
+			SPS_ERR(sps, "sps:BAM-DMA: BAM limit exceeded: %d",
+					num_bams);
+			return SPS_ERROR;
+		}
+	} else {
+		SPS_ERR(sps,
+			"sps:%s:BAM-DMA phys_addr is zero.",
+			__func__);
+		return SPS_ERROR;
+	}
+
+
+	return 0;
+}
+
+/**
+ * De-initialize BAM DMA module
+ *
+ */
+void sps_dma_de_init(void)
+{
+	int n;
+
+	/* De-initialize the BAM devices */
+	for (n = 0; n < num_bams; n++)
+		sps_deregister_bam_device(bam_handles[n]);
+
+	/* Clear local data */
+	memset(&bam_dma_dev, 0, sizeof(bam_dma_dev));
+	num_bams = 0;
+	memset(bam_handles, 0, sizeof(bam_handles));
+}
+
+/**
+ * Allocate a BAM DMA channel
+ *
+ */
+int sps_alloc_dma_chan(const struct sps_alloc_dma_chan *alloc,
+		       struct sps_dma_chan *chan_info)
+{
+	struct bamdma_device *dev;
+	struct bamdma_chan *chan;
+	u32 pipe_index;
+	enum bam_dma_thresh_dma thresh = (enum bam_dma_thresh_dma) 0;
+	enum bam_dma_weight_dma weight = (enum bam_dma_weight_dma) 0;
+	int result = SPS_ERROR;
+
+	if (alloc == NULL || chan_info == NULL) {
+		SPS_ERR(sps,
+			"sps:%s:invalid parameters", __func__);
+		return SPS_ERROR;
+	}
+
+	/* Translate threshold and priority to hwio values */
+	if (alloc->threshold != SPS_DMA_THRESHOLD_DEFAULT) {
+		if (alloc->threshold >= 512)
+			thresh = BAM_DMA_THRESH_512;
+		else if (alloc->threshold >= 256)
+			thresh = BAM_DMA_THRESH_256;
+		else if (alloc->threshold >= 128)
+			thresh = BAM_DMA_THRESH_128;
+		else
+			thresh = BAM_DMA_THRESH_64;
+	}
+
+	weight = alloc->priority;
+
+	if ((u32)alloc->priority > (u32)BAM_DMA_WEIGHT_HIGH) {
+		SPS_ERR(sps, "sps:BAM-DMA: invalid priority: %x",
+						alloc->priority);
+		return SPS_ERROR;
+	}
+
+	mutex_lock(&bam_dma_lock);
+
+	dev = sps_dma_find_device(alloc->dev);
+	if (dev == NULL) {
+		SPS_ERR(sps, "sps:BAM-DMA: invalid BAM handle: %lx",
+							alloc->dev);
+		goto exit_err;
+	}
+
+	/* Search for a free set of pipes */
+	for (pipe_index = 0, chan = dev->chans;
+	      pipe_index < dev->num_pipes; pipe_index += 2, chan++) {
+		if (chan->state == DMA_CHAN_STATE_FREE) {
+			/* Just check pipes for safety */
+			if (dev->pipes[pipe_index] != PIPE_INACTIVE ||
+			    dev->pipes[pipe_index + 1] != PIPE_INACTIVE) {
+				SPS_ERR(sps, "sps:BAM-DMA: channel %d state "
+					"error:%d %d",
+					pipe_index / 2, dev->pipes[pipe_index],
+				 dev->pipes[pipe_index + 1]);
+				goto exit_err;
+			}
+			break; /* Found free pipe */
+		}
+	}
+
+	if (pipe_index >= dev->num_pipes) {
+		SPS_ERR(sps, "sps:BAM-DMA: no free channel. num_pipes = %d",
+			dev->num_pipes);
+		goto exit_err;
+	}
+
+	chan->state = DMA_CHAN_STATE_ALLOC_EXT;
+
+	/* Store config values for use when pipes are activated */
+	chan = &dev->chans[pipe_index / 2];
+	chan->threshold = alloc->threshold;
+	chan->thresh = thresh;
+	chan->priority = alloc->priority;
+	chan->weight = weight;
+
+	SPS_DBG3(sps, "sps:sps_alloc_dma_chan. pipe %d.\n", pipe_index);
+
+	/* Report allocated pipes to client */
+	chan_info->dev = dev->h;
+	/* Dest/input/write pipex */
+	chan_info->dest_pipe_index = pipe_index;
+	/* Source/output/read pipe */
+	chan_info->src_pipe_index = pipe_index + 1;
+
+	result = 0;
+exit_err:
+	mutex_unlock(&bam_dma_lock);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_alloc_dma_chan);
+
+/**
+ * Free a BAM DMA channel
+ *
+ */
+int sps_free_dma_chan(struct sps_dma_chan *chan)
+{
+	struct bamdma_device *dev;
+	u32 pipe_index;
+	int result = 0;
+
+	if (chan == NULL) {
+		SPS_ERR(sps,
+			"sps:%s:chan is NULL", __func__);
+		return SPS_ERROR;
+	}
+
+	mutex_lock(&bam_dma_lock);
+
+	dev = sps_dma_find_device(chan->dev);
+	if (dev == NULL) {
+		SPS_ERR(sps, "sps:BAM-DMA: invalid BAM handle: %lx", chan->dev);
+		result = SPS_ERROR;
+		goto exit_err;
+	}
+
+	/* Verify the pipe indices */
+	pipe_index = chan->dest_pipe_index;
+	if (pipe_index >= dev->num_pipes || ((pipe_index & 1)) ||
+	    (pipe_index + 1) != chan->src_pipe_index) {
+		SPS_ERR(sps, "sps:sps_free_dma_chan. Invalid pipe indices."
+			"num_pipes=%d.dest=%d.src=%d.",
+			dev->num_pipes,
+			chan->dest_pipe_index,
+			chan->src_pipe_index);
+		result = SPS_ERROR;
+		goto exit_err;
+	}
+
+	/* Are both pipes inactive? */
+	if (dev->chans[pipe_index / 2].state != DMA_CHAN_STATE_ALLOC_EXT ||
+	    dev->pipes[pipe_index] != PIPE_INACTIVE ||
+	    dev->pipes[pipe_index + 1] != PIPE_INACTIVE) {
+		SPS_ERR(sps,
+			"sps:BAM-DMA: attempt to free active chan %d: %d %d",
+			pipe_index / 2, dev->pipes[pipe_index],
+			dev->pipes[pipe_index + 1]);
+		result = SPS_ERROR;
+		goto exit_err;
+	}
+
+	/* Free the channel */
+	dev->chans[pipe_index / 2].state = DMA_CHAN_STATE_FREE;
+
+exit_err:
+	mutex_unlock(&bam_dma_lock);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_free_dma_chan);
+
+/**
+ * Activate a BAM DMA pipe
+ *
+ * This function activates a BAM DMA pipe.
+ *
+ * @dev - pointer to BAM-DMA device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static u32 sps_dma_check_pipes(struct bamdma_device *dev, u32 pipe_index)
+{
+	u32 pipe_in;
+	u32 pipe_out;
+	int enabled_in;
+	int enabled_out;
+	u32 check;
+
+	pipe_in = pipe_index & ~1;
+	pipe_out = pipe_in + 1;
+	enabled_in = bam_pipe_is_enabled(&dev->bam->base, pipe_in);
+	enabled_out = bam_pipe_is_enabled(&dev->bam->base, pipe_out);
+
+	if (!enabled_in && !enabled_out)
+		check = DMA_PIPES_BOTH_DISABLED;
+	else if (enabled_in && enabled_out)
+		check = DMA_PIPES_BOTH_ENABLED;
+	else
+		check = DMA_PIPES_STATE_DIFF;
+
+	return check;
+}
+
+/**
+ * Allocate a BAM DMA pipe
+ *
+ */
+int sps_dma_pipe_alloc(void *bam_arg, u32 pipe_index, enum sps_mode dir)
+{
+	struct sps_bam *bam = bam_arg;
+	struct bamdma_device *dev;
+	struct bamdma_chan *chan;
+	u32 channel;
+	int result = SPS_ERROR;
+
+	if (bam == NULL) {
+		SPS_ERR(sps, "%s", "sps:BAM context is NULL");
+		return SPS_ERROR;
+	}
+
+	/* Check pipe direction */
+	if ((DMA_PIPE_IS_DEST(pipe_index) && dir != SPS_MODE_DEST) ||
+	    (DMA_PIPE_IS_SRC(pipe_index) && dir != SPS_MODE_SRC)) {
+		SPS_ERR(sps, "sps:BAM-DMA: wrong direction for BAM %pa pipe %d",
+			&bam->props.phys_addr, pipe_index);
+		return SPS_ERROR;
+	}
+
+	mutex_lock(&bam_dma_lock);
+
+	dev = sps_dma_find_device((unsigned long) bam);
+	if (dev == NULL) {
+		SPS_ERR(sps, "sps:BAM-DMA: invalid BAM: %pa",
+			&bam->props.phys_addr);
+		goto exit_err;
+	}
+	if (pipe_index >= dev->num_pipes) {
+		SPS_ERR(sps, "sps:BAM-DMA: BAM %pa invalid pipe: %d",
+			&bam->props.phys_addr, pipe_index);
+		goto exit_err;
+	}
+	if (dev->pipes[pipe_index] != PIPE_INACTIVE) {
+		SPS_ERR(sps, "sps:BAM-DMA: BAM %pa pipe %d already active",
+			&bam->props.phys_addr, pipe_index);
+		goto exit_err;
+	}
+
+	/* Mark pipe active */
+	dev->pipes[pipe_index] = PIPE_ACTIVE;
+
+	/* If channel is not allocated, make an internal allocation */
+	channel = pipe_index / 2;
+	chan = &dev->chans[channel];
+	if (chan->state != DMA_CHAN_STATE_ALLOC_EXT &&
+	    chan->state != DMA_CHAN_STATE_ALLOC_INT) {
+		chan->state = DMA_CHAN_STATE_ALLOC_INT;
+	}
+
+	result = 0;
+exit_err:
+	mutex_unlock(&bam_dma_lock);
+
+	return result;
+}
+
+/**
+ * Enable a BAM DMA pipe
+ *
+ */
+int sps_dma_pipe_enable(void *bam_arg, u32 pipe_index)
+{
+	struct sps_bam *bam = bam_arg;
+	struct bamdma_device *dev;
+	struct bamdma_chan *chan;
+	u32 channel;
+	int result = SPS_ERROR;
+
+	SPS_DBG3(sps, "sps:sps_dma_pipe_enable.pipe %d", pipe_index);
+
+	mutex_lock(&bam_dma_lock);
+
+	dev = sps_dma_find_device((unsigned long) bam);
+	if (dev == NULL) {
+		SPS_ERR(sps, "sps:%s:BAM-DMA: invalid BAM", __func__);
+		goto exit_err;
+	}
+	if (pipe_index >= dev->num_pipes) {
+		SPS_ERR(sps, "sps:BAM-DMA: BAM %pa invalid pipe: %d",
+			&bam->props.phys_addr, pipe_index);
+		goto exit_err;
+	}
+	if (dev->pipes[pipe_index] != PIPE_ACTIVE) {
+		SPS_ERR(sps, "sps:BAM-DMA: BAM %pa pipe %d not active",
+			&bam->props.phys_addr, pipe_index);
+		goto exit_err;
+	}
+
+      /*
+       * The channel must be enabled when the dest/input/write pipe
+       * is enabled
+       */
+	if (DMA_PIPE_IS_DEST(pipe_index)) {
+		/* Configure and enable the channel */
+		channel = pipe_index / 2;
+		chan = &dev->chans[channel];
+
+		if (chan->threshold != SPS_DMA_THRESHOLD_DEFAULT)
+			dma_write_reg_field(dev->virt_addr,
+					    DMA_CHNL_CONFIG(channel),
+					    DMA_CHNL_ACT_THRESH,
+					    chan->thresh);
+
+		if (chan->priority != SPS_DMA_PRI_DEFAULT)
+			dma_write_reg_field(dev->virt_addr,
+					    DMA_CHNL_CONFIG(channel),
+					    DMA_CHNL_WEIGHT,
+					    chan->weight);
+
+		dma_write_reg_field(dev->virt_addr,
+				    DMA_CHNL_CONFIG(channel),
+				    DMA_CHNL_ENABLE, 1);
+	}
+
+	result = 0;
+exit_err:
+	mutex_unlock(&bam_dma_lock);
+
+	return result;
+}
+
+/**
+ * Deactivate a BAM DMA pipe
+ *
+ * This function deactivates a BAM DMA pipe.
+ *
+ * @dev - pointer to BAM-DMA device descriptor
+ *
+ * @bam - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_dma_deactivate_pipe_atomic(struct bamdma_device *dev,
+					  struct sps_bam *bam,
+					  u32 pipe_index)
+{
+	u32 channel;
+
+	if (dev->bam != bam)
+		return SPS_ERROR;
+	if (pipe_index >= dev->num_pipes)
+		return SPS_ERROR;
+	if (dev->pipes[pipe_index] != PIPE_ACTIVE)
+		return SPS_ERROR;	/* Pipe is not active */
+
+	SPS_DBG3(sps, "sps:BAM-DMA: deactivate pipe %d", pipe_index);
+
+	/* Mark pipe inactive */
+	dev->pipes[pipe_index] = PIPE_INACTIVE;
+
+	/*
+	 * Channel must be reset when either pipe is disabled, so just always
+	 * reset regardless of other pipe's state
+	 */
+	channel = pipe_index / 2;
+	dma_write_reg_field(dev->virt_addr, DMA_CHNL_CONFIG(channel),
+			    DMA_CHNL_ENABLE, 0);
+
+	/* If the peer pipe is also inactive, reset the channel */
+	if (sps_dma_check_pipes(dev, pipe_index) == DMA_PIPES_BOTH_DISABLED) {
+		/* Free channel if allocated internally */
+		if (dev->chans[channel].state == DMA_CHAN_STATE_ALLOC_INT)
+			dev->chans[channel].state = DMA_CHAN_STATE_FREE;
+	}
+
+	return 0;
+}
+
+/**
+ * Free a BAM DMA pipe
+ *
+ */
+int sps_dma_pipe_free(void *bam_arg, u32 pipe_index)
+{
+	struct bamdma_device *dev;
+	struct sps_bam *bam = bam_arg;
+	int result;
+
+	mutex_lock(&bam_dma_lock);
+
+	dev = sps_dma_find_device((unsigned long) bam);
+	if (dev == NULL) {
+		SPS_ERR(sps, "sps:%s:BAM-DMA: invalid BAM", __func__);
+		result = SPS_ERROR;
+		goto exit_err;
+	}
+
+	result = sps_dma_deactivate_pipe_atomic(dev, bam, pipe_index);
+
+exit_err:
+	mutex_unlock(&bam_dma_lock);
+
+	return result;
+}
+
+/**
+ * Get the BAM handle for BAM-DMA.
+ *
+ * The BAM handle should be use as source/destination in the sps_connect().
+ *
+ * @return bam handle on success, zero on error
+ */
+unsigned long sps_dma_get_bam_handle(void)
+{
+	return (unsigned long)bam_dma_dev[0].bam;
+}
+EXPORT_SYMBOL(sps_dma_get_bam_handle);
+
+/**
+ * Free the BAM handle for BAM-DMA.
+ *
+ */
+void sps_dma_free_bam_handle(unsigned long h)
+{
+}
+EXPORT_SYMBOL(sps_dma_free_bam_handle);
+
+#endif /* CONFIG_SPS_SUPPORT_BAMDMA */
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./sps/spsi.h linux-4.4.115-fbx/drivers/platform/msm/sps/spsi.h
--- linux-4.4.115-fbx/drivers/platform/msm./sps/spsi.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/sps/spsi.h	2019-01-22 16:16:26.187270712 +0100
@@ -0,0 +1,530 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/**
+ * Smart-Peripheral-Switch (SPS) internal API.
+ */
+
+#ifndef _SPSI_H_
+#define _SPSI_H_
+
+#include <linux/types.h>	/* u32 */
+#include <linux/list.h>		/* list_head */
+#include <linux/kernel.h>	/* pr_info() */
+#include <linux/compiler.h>
+#include <linux/ratelimit.h>
+#include <linux/ipc_logging.h>
+
+#include <linux/msm-sps.h>
+
+#include "sps_map.h"
+
+#if defined(CONFIG_PHYS_ADDR_T_64BIT) || defined(CONFIG_ARM_LPAE)
+#define SPS_LPAE (true)
+#else
+#define SPS_LPAE (false)
+#endif
+
+#define BAM_MAX_PIPES              31
+#define BAM_MAX_P_LOCK_GROUP_NUM   31
+
+/* Adjust for offset of struct sps_q_event */
+#define SPS_EVENT_INDEX(e)    ((e) - 1)
+#define SPS_ERROR -1
+
+/* BAM identifier used in log messages */
+#define BAM_ID(dev)       (&(dev)->props.phys_addr)
+
+/* "Clear" value for the connection parameter struct */
+#define SPSRM_CLEAR     0xccccccccUL
+#define SPSRM_ADDR_CLR \
+	((sizeof(int) == sizeof(long)) ? 0 : (SPSRM_CLEAR << 32))
+
+#define MAX_MSG_LEN 80
+#define SPS_IPC_LOGPAGES 10
+#define SPS_IPC_REG_DUMP_FACTOR 3
+#define SPS_IPC_DEFAULT_LOGLEVEL 3
+#define SPS_IPC_MAX_LOGLEVEL 4
+
+/* Connection mapping control struct */
+struct sps_rm {
+	struct list_head connections_q;
+	struct mutex lock;
+};
+
+/* SPS driver state struct */
+struct sps_drv {
+	struct class *dev_class;
+	dev_t dev_num;
+	struct device *dev;
+	struct clk *pmem_clk;
+	struct clk *bamdma_clk;
+	struct clk *dfab_clk;
+
+	int is_ready;
+
+	/* Platform data */
+	phys_addr_t pipemem_phys_base;
+	u32 pipemem_size;
+	phys_addr_t bamdma_bam_phys_base;
+	u32 bamdma_bam_size;
+	phys_addr_t bamdma_dma_phys_base;
+	u32 bamdma_dma_size;
+	u32 bamdma_irq;
+	u32 bamdma_restricted_pipes;
+
+	/* Driver options bitflags (see SPS_OPT_*) */
+	u32 options;
+
+	/* Mutex to protect BAM and connection queues */
+	struct mutex lock;
+
+	/* BAM devices */
+	struct list_head bams_q;
+
+	char *hal_bam_version;
+
+	/* Connection control state */
+	struct sps_rm connection_ctrl;
+
+	void *ipc_log0;
+	void *ipc_log1;
+	void *ipc_log2;
+	void *ipc_log3;
+	void *ipc_log4;
+
+	u32 ipc_loglevel;
+};
+
+extern struct sps_drv *sps;
+extern u32 d_type;
+extern bool enhd_pipe;
+extern bool imem;
+extern enum sps_bam_type bam_type;
+
+#ifdef CONFIG_DEBUG_FS
+extern u8 debugfs_record_enabled;
+extern u8 logging_option;
+extern u8 debug_level_option;
+extern u8 print_limit_option;
+
+#define SPS_IPC(idx, dev, msg, args...) do { \
+		if (dev) { \
+			if ((idx == 0) && (dev)->ipc_log0) \
+				ipc_log_string((dev)->ipc_log0, \
+					"%s: " msg, __func__, args); \
+			else if ((idx == 1) && (dev)->ipc_log1) \
+				ipc_log_string((dev)->ipc_log1, \
+					"%s: " msg, __func__, args); \
+			else if ((idx == 2) && (dev)->ipc_log2) \
+				ipc_log_string((dev)->ipc_log2, \
+					"%s: " msg, __func__, args); \
+			else if ((idx == 3) && (dev)->ipc_log3) \
+				ipc_log_string((dev)->ipc_log3, \
+					"%s: " msg, __func__, args); \
+			else if ((idx == 4) && (dev)->ipc_log4) \
+				ipc_log_string((dev)->ipc_log4, \
+					"%s: " msg, __func__, args); \
+			else \
+				pr_debug("sps: no such IPC logging index!\n"); \
+		} \
+	} while (0)
+#define SPS_DUMP(msg, args...) do {					\
+		SPS_IPC(4, sps, msg, args); \
+		if (sps) { \
+			if (sps->ipc_log4 == NULL) \
+				pr_info(msg, ##args);	\
+		} \
+	} while (0)
+#define SPS_ERR(dev, msg, args...) do {					\
+		if (logging_option != 1) {	\
+			if (unlikely(print_limit_option > 2))	\
+				pr_err_ratelimited(msg, ##args);	\
+			else	\
+				pr_err(msg, ##args);	\
+		}	\
+		SPS_IPC(3, dev, msg, args); \
+	} while (0)
+#define SPS_INFO(dev, msg, args...) do {				\
+		if (logging_option != 1) {	\
+			if (unlikely(print_limit_option > 1))	\
+				pr_info_ratelimited(msg, ##args);	\
+			else	\
+				pr_info(msg, ##args);	\
+		}	\
+		SPS_IPC(3, dev, msg, args); \
+	} while (0)
+#define SPS_DBG(dev, msg, args...) do {					\
+		if ((unlikely(logging_option > 1))	\
+			&& (unlikely(debug_level_option > 3))) {\
+			if (unlikely(print_limit_option > 0))	\
+				pr_info_ratelimited(msg, ##args);	\
+			else	\
+				pr_info(msg, ##args);	\
+		} else	\
+			pr_debug(msg, ##args);	\
+		if (dev) { \
+			if ((dev)->ipc_loglevel <= 0)	\
+				SPS_IPC(0, dev, msg, args); \
+		}	\
+	} while (0)
+#define SPS_DBG1(dev, msg, args...) do {				\
+		if ((unlikely(logging_option > 1))	\
+			&& (unlikely(debug_level_option > 2))) {\
+			if (unlikely(print_limit_option > 0))	\
+				pr_info_ratelimited(msg, ##args);	\
+			else	\
+				pr_info(msg, ##args);	\
+		} else	\
+			pr_debug(msg, ##args);	\
+		if (dev) { \
+			if ((dev)->ipc_loglevel <= 1)	\
+				SPS_IPC(1, dev, msg, args);	\
+		}	\
+	} while (0)
+#define SPS_DBG2(dev, msg, args...) do {				\
+		if ((unlikely(logging_option > 1))	\
+			&& (unlikely(debug_level_option > 1))) {\
+			if (unlikely(print_limit_option > 0))	\
+				pr_info_ratelimited(msg, ##args);	\
+			else	\
+				pr_info(msg, ##args);	\
+		} else	\
+			pr_debug(msg, ##args);	\
+		if (dev) { \
+			if ((dev)->ipc_loglevel <= 2)	\
+				SPS_IPC(2, dev, msg, args); \
+		}	\
+	} while (0)
+#define SPS_DBG3(dev, msg, args...) do {				\
+		if ((unlikely(logging_option > 1))	\
+			&& (unlikely(debug_level_option > 0))) {\
+			if (unlikely(print_limit_option > 0))	\
+				pr_info_ratelimited(msg, ##args);	\
+			else	\
+				pr_info(msg, ##args);	\
+		} else	\
+			pr_debug(msg, ##args);	\
+		if (dev) { \
+			if ((dev)->ipc_loglevel <= 3)	\
+				SPS_IPC(3, dev, msg, args); \
+		}	\
+	} while (0)
+#else
+#define	SPS_DBG3(x...)		pr_debug(x)
+#define	SPS_DBG2(x...)		pr_debug(x)
+#define	SPS_DBG1(x...)		pr_debug(x)
+#define	SPS_DBG(x...)		pr_debug(x)
+#define	SPS_INFO(x...)		pr_info(x)
+#define	SPS_ERR(x...)		pr_err(x)
+#define	SPS_DUMP(x...)		pr_info(x)
+#endif
+
+/* End point parameters */
+struct sps_conn_end_pt {
+	unsigned long dev;		/* Device handle of BAM */
+	phys_addr_t bam_phys;		/* Physical address of BAM. */
+	u32 pipe_index;		/* Pipe index */
+	u32 event_threshold;	/* Pipe event threshold */
+	u32 lock_group;	/* The lock group this pipe belongs to */
+	void *bam;
+};
+
+/* Connection bookkeeping descriptor struct */
+struct sps_connection {
+	struct list_head list;
+
+	/* Source end point parameters */
+	struct sps_conn_end_pt src;
+
+	/* Destination end point parameters */
+	struct sps_conn_end_pt dest;
+
+	/* Resource parameters */
+	struct sps_mem_buffer desc;	/* Descriptor FIFO */
+	struct sps_mem_buffer data;	/* Data FIFO (BAM-to-BAM mode only) */
+	u32 config;		/* Client specified connection configuration */
+
+	/* Connection state */
+	void *client_src;
+	void *client_dest;
+	int refs;		/* Reference counter */
+
+	/* Dynamically allocated resouces, if required */
+	u32 alloc_src_pipe;	/* Source pipe index */
+	u32 alloc_dest_pipe;	/* Destination pipe index */
+	/* Physical address of descriptor FIFO */
+	phys_addr_t alloc_desc_base;
+	phys_addr_t alloc_data_base;	/* Physical address of data FIFO */
+};
+
+/* Event bookkeeping descriptor struct */
+struct sps_q_event {
+	struct list_head list;
+	/* Event payload data */
+	struct sps_event_notify notify;
+};
+
+/* Memory heap statistics */
+struct sps_mem_stats {
+	u32 base_addr;
+	u32 size;
+	u32 blocks_used;
+	u32 bytes_used;
+	u32 max_bytes_used;
+};
+
+enum sps_bam_type {
+	SPS_BAM_LEGACY,
+	SPS_BAM_NDP,
+	SPS_BAM_NDP_4K
+};
+
+#ifdef CONFIG_DEBUG_FS
+/* record debug info for debugfs */
+void sps_debugfs_record(const char *);
+#endif
+
+/* output the content of BAM-level registers */
+void print_bam_reg(void *);
+
+/* output the content of BAM pipe registers */
+void print_bam_pipe_reg(void *, u32);
+
+/* output the content of selected BAM-level registers */
+void print_bam_selected_reg(void *, u32);
+
+/* output the content of selected BAM pipe registers */
+void print_bam_pipe_selected_reg(void *, u32);
+
+/* output descriptor FIFO of a pipe */
+void print_bam_pipe_desc_fifo(void *, u32, u32);
+
+/* output BAM_TEST_BUS_REG */
+void print_bam_test_bus_reg(void *, u32);
+
+/* halt and un-halt a pipe */
+void bam_pipe_halt(void *, u32, bool);
+
+/**
+ * Translate physical to virtual address
+ *
+ * This Function translates physical to virtual address.
+ *
+ * @phys_addr - physical address to translate
+ *
+ * @return virtual memory pointer
+ *
+ */
+void *spsi_get_mem_ptr(phys_addr_t phys_addr);
+
+/**
+ * Allocate I/O (pipe) memory
+ *
+ * This function allocates target I/O (pipe) memory.
+ *
+ * @bytes - number of bytes to allocate
+ *
+ * @return physical address of allocated memory, or SPS_ADDR_INVALID on error
+ */
+phys_addr_t sps_mem_alloc_io(u32 bytes);
+
+/**
+ * Free I/O (pipe) memory
+ *
+ * This function frees target I/O (pipe) memory.
+ *
+ * @phys_addr - physical address of memory to free
+ *
+ * @bytes - number of bytes to free.
+ */
+void sps_mem_free_io(phys_addr_t phys_addr, u32 bytes);
+
+/**
+ * Find matching connection mapping
+ *
+ * This function searches for a connection mapping that matches the
+ * parameters supplied by the client.  If a match is found, the client's
+ * parameter struct is updated with the values specified in the mapping.
+ *
+ * @connect - pointer to client connection parameters
+ *
+ * @return 0 if match is found, negative value otherwise
+ *
+ */
+int sps_map_find(struct sps_connect *connect);
+
+/**
+ * Allocate a BAM DMA pipe
+ *
+ * This function allocates a BAM DMA pipe, and is intended to be called
+ * internally from the BAM resource manager.  Allocation implies that
+ * the pipe has been referenced by a client Connect() and is in use.
+ *
+ * BAM DMA is permissive with activations, and allows a pipe to be allocated
+ * with or without a client-initiated allocation.  This allows the client to
+ * specify exactly which pipe should be used directly through the Connect() API.
+ * sps_dma_alloc_chan() does not allow the client to specify the pipes/channel.
+ *
+ * @bam - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @dir - pipe direction
+ *
+ * @return 0 on success, negative value on error
+ */
+int sps_dma_pipe_alloc(void *bam, u32 pipe_index, enum sps_mode dir);
+
+/**
+ * Enable a BAM DMA pipe
+ *
+ * This function enables the channel associated with a BAM DMA pipe, and
+ * is intended to be called internally from the BAM resource manager.
+ * Enable must occur *after* the pipe has been enabled so that proper
+ * sequencing between pipe and DMA channel enables can be enforced.
+ *
+ * @bam - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_dma_pipe_enable(void *bam, u32 pipe_index);
+
+/**
+ * Free a BAM DMA pipe
+ *
+ * This function disables and frees a BAM DMA pipe, and is intended to be
+ * called internally from the BAM resource manager.  This must occur *after*
+ * the pipe has been disabled/reset so that proper sequencing between pipe and
+ * DMA channel resets can be enforced.
+ *
+ * @bam_arg - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_dma_pipe_free(void *bam, u32 pipe_index);
+
+/**
+ * Initialize driver memory module
+ *
+ * This function initializes the driver memory module.
+ *
+ * @pipemem_phys_base - Pipe-Memory physical base.
+ *
+ * @pipemem_size - Pipe-Memory size.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_mem_init(phys_addr_t pipemem_phys_base, u32 pipemem_size);
+
+/**
+ * De-initialize driver memory module
+ *
+ * This function de-initializes the driver memory module.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_mem_de_init(void);
+
+/**
+ * Initialize BAM DMA module
+ *
+ * This function initializes the BAM DMA module.
+ *
+ * @bam_props - pointer to BAM DMA devices BSP configuration properties
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_dma_init(const struct sps_bam_props *bam_props);
+
+/**
+ * De-initialize BAM DMA module
+ *
+ * This function de-initializes the SPS BAM DMA module.
+ *
+ */
+void sps_dma_de_init(void);
+
+/**
+ * Initialize BAM DMA device
+ *
+ * This function initializes a BAM DMA device.
+ *
+ * @h - BAM handle
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_dma_device_init(unsigned long h);
+
+/**
+ * De-initialize BAM DMA device
+ *
+ * This function de-initializes a BAM DMA device.
+ *
+ * @h - BAM handle
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_dma_device_de_init(unsigned long h);
+
+/**
+ * Initialize connection mapping module
+ *
+ * This function initializes the SPS connection mapping module.
+ *
+ * @map_props - pointer to connection mapping BSP configuration properties
+ *
+ * @options - driver options bitflags (see SPS_OPT_*)
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+
+int sps_map_init(const struct sps_map *map_props, u32 options);
+
+/**
+ * De-initialize connection mapping module
+ *
+ * This function de-initializes the SPS connection mapping module.
+ *
+ */
+void sps_map_de_init(void);
+
+/*
+ * bam_pipe_reset - reset a BAM pipe.
+ * @base:	BAM virtual address
+ * @pipe:	pipe index
+ *
+ * This function resets a BAM pipe.
+ */
+void bam_pipe_reset(void *base, u32 pipe);
+
+/*
+ * bam_disable_pipe - disable a BAM pipe.
+ * @base:	BAM virtual address
+ * @pipe:	pipe index
+ *
+ * This function disables a BAM pipe.
+ */
+void bam_disable_pipe(void *base, u32 pipe);
+#endif	/* _SPSI_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./sps/sps_map.c linux-4.4.115-fbx/drivers/platform/msm/sps/sps_map.c
--- linux-4.4.115-fbx/drivers/platform/msm./sps/sps_map.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/sps/sps_map.c	2019-01-22 16:16:26.187270712 +0100
@@ -0,0 +1,139 @@
+/* Copyright (c) 2011-2013, 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/**
+ * Connection mapping table managment for SPS device driver.
+ */
+
+#include <linux/types.h>	/* u32 */
+#include <linux/kernel.h>	/* pr_info() */
+#include <linux/memory.h>	/* memset */
+
+#include "spsi.h"
+
+/* Module state */
+struct sps_map_state {
+	const struct sps_map *maps;
+	u32 num_maps;
+	u32 options;
+};
+
+static struct sps_map_state sps_maps;
+
+/**
+ * Initialize connection mapping module
+ *
+ */
+int sps_map_init(const struct sps_map *map_props, u32 options)
+{
+	const struct sps_map *maps;
+
+	/* Are there any connection mappings? */
+	memset(&sps_maps, 0, sizeof(sps_maps));
+	if (map_props == NULL)
+		return 0;
+
+	/* Init the module state */
+	sps_maps.maps = map_props;
+	sps_maps.options = options;
+	for (maps = sps_maps.maps;; maps++, sps_maps.num_maps++)
+		if (maps->src.periph_class == SPS_CLASS_INVALID &&
+		    maps->src.periph_phy_addr == SPS_ADDR_INVALID)
+			break;
+
+	SPS_DBG(sps, "sps: %d mappings", sps_maps.num_maps);
+
+	return 0;
+}
+
+/**
+ * De-initialize connection mapping module
+ *
+ */
+void sps_map_de_init(void)
+{
+	memset(&sps_maps, 0, sizeof(sps_maps));
+}
+
+/**
+ * Find matching connection mapping
+ *
+ */
+int sps_map_find(struct sps_connect *connect)
+{
+	const struct sps_map *map;
+	u32 i;
+	void *desc;
+	void *data;
+
+	/* Are there any connection mappings? */
+	if (sps_maps.num_maps == 0)
+		return SPS_ERROR;
+
+	/* Search the mapping table for a match to the specified connection */
+	for (i = sps_maps.num_maps, map = sps_maps.maps;
+	    i > 0; i--, map++)
+		if (map->src.periph_class == (u32) connect->source &&
+		    map->dest.periph_class == (u32) connect->destination
+		    && map->config == (u32) connect->config)
+			break;
+
+	if (i == 0)
+		return SPS_ERROR;
+
+	/*
+	 * Before modifying client parameter struct, perform all
+	 * operations that might fail
+	 */
+	desc = spsi_get_mem_ptr(map->desc_base);
+	if (desc == NULL) {
+		SPS_ERR(sps,
+			"sps:Cannot get virt addr for I/O buffer: %pa\n",
+			&map->desc_base);
+		return SPS_ERROR;
+	}
+
+	if (map->data_size > 0 && map->data_base != SPS_ADDR_INVALID) {
+		data = spsi_get_mem_ptr(map->data_base);
+		if (data == NULL) {
+			SPS_ERR(sps,
+				"sps:Can't get virt addr for I/O buffer: %pa",
+				&map->data_base);
+			return SPS_ERROR;
+		}
+	} else {
+		data = NULL;
+	}
+
+	/* Copy mapping values to client parameter struct */
+	if (connect->source != SPS_DEV_HANDLE_MEM)
+		connect->src_pipe_index = map->src.pipe_index;
+
+	if (connect->destination != SPS_DEV_HANDLE_MEM)
+		connect->dest_pipe_index = map->dest.pipe_index;
+
+	if (connect->mode == SPS_MODE_SRC)
+		connect->event_thresh = map->src.event_thresh;
+	else
+		connect->event_thresh = map->dest.event_thresh;
+
+	connect->desc.size = map->desc_size;
+	connect->desc.phys_base = map->desc_base;
+	connect->desc.base = desc;
+	if (map->data_size > 0 && map->data_base != SPS_ADDR_INVALID) {
+		connect->data.size = map->data_size;
+		connect->data.phys_base = map->data_base;
+		connect->data.base = data;
+	}
+
+	return 0;
+}
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./sps/sps_map.h linux-4.4.115-fbx/drivers/platform/msm/sps/sps_map.h
--- linux-4.4.115-fbx/drivers/platform/msm./sps/sps_map.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/sps/sps_map.h	2019-01-22 16:16:26.187270712 +0100
@@ -0,0 +1,46 @@
+/* Copyright (c) 2011,2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* SPS driver mapping table data declarations. */
+
+
+#ifndef _SPS_MAP_H_
+#define _SPS_MAP_H_
+
+#include <linux/types.h>	/* u32 */
+
+/* End point parameters */
+struct sps_map_end_point {
+	u32 periph_class;	/* Peripheral device enumeration class */
+	phys_addr_t periph_phy_addr;	/* Peripheral base address */
+	u32 pipe_index;		/* Pipe index */
+	u32 event_thresh;	/* Pipe event threshold */
+};
+
+/* Mapping connection descriptor */
+struct sps_map {
+	/* Source end point parameters */
+	struct sps_map_end_point src;
+
+	/* Destination end point parameters */
+	struct sps_map_end_point dest;
+
+	/* Resource parameters */
+	u32 config;	 /* Configuration (stream) identifier */
+	phys_addr_t desc_base;	 /* Physical address of descriptor FIFO */
+	u32 desc_size;	 /* Size (bytes) of descriptor FIFO */
+	phys_addr_t data_base;	 /* Physical address of data FIFO */
+	u32 data_size;	 /* Size (bytes) of data FIFO */
+
+};
+
+#endif /* _SPS_MAP_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./sps/sps_mem.c linux-4.4.115-fbx/drivers/platform/msm/sps/sps_mem.c
--- linux-4.4.115-fbx/drivers/platform/msm./sps/sps_mem.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/sps/sps_mem.c	2019-10-29 09:26:24.617212710 +0100
@@ -0,0 +1,174 @@
+/* Copyright (c) 2011-2013, 2015, 2017, The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/**
+ * Pipe-Memory allocation/free management.
+ */
+
+#include <linux/types.h>	/* u32 */
+#include <linux/kernel.h>	/* pr_info() */
+#include <linux/io.h>		/* ioremap() */
+#include <linux/mutex.h>	/* mutex */
+#include <linux/list.h>		/* list_head */
+#include <linux/genalloc.h>	/* gen_pool_alloc() */
+#include <linux/errno.h>	/* ENOMEM */
+
+#include "sps_bam.h"
+#include "spsi.h"
+
+static phys_addr_t iomem_phys;
+static void *iomem_virt;
+static u32 iomem_size;
+static u32 iomem_offset;
+static struct gen_pool *pool;
+static u32 nid = 0xaa;
+
+/* Debug */
+static u32 total_alloc;
+static u32 total_free;
+
+/**
+ * Translate physical to virtual address
+ *
+ */
+void *spsi_get_mem_ptr(phys_addr_t phys_addr)
+{
+	void *virt = NULL;
+
+	if ((phys_addr >= iomem_phys) &&
+	    (phys_addr < (iomem_phys + iomem_size))) {
+		virt = (u8 *) iomem_virt + (phys_addr - iomem_phys);
+	} else {
+		virt = phys_to_virt(phys_addr);
+		SPS_ERR(sps, "sps:spsi_get_mem_ptr.invalid phys addr=0x%pa.",
+			&phys_addr);
+	}
+	return virt;
+}
+
+/**
+ * Allocate I/O (pipe) memory
+ *
+ */
+phys_addr_t sps_mem_alloc_io(u32 bytes)
+{
+	phys_addr_t phys_addr = SPS_ADDR_INVALID;
+	unsigned long virt_addr = 0;
+
+	virt_addr = gen_pool_alloc(pool, bytes);
+	if (virt_addr) {
+		iomem_offset = virt_addr - (uintptr_t) iomem_virt;
+		phys_addr = iomem_phys + iomem_offset;
+		total_alloc += bytes;
+	} else {
+		SPS_ERR(sps, "sps:gen_pool_alloc %d bytes fail.", bytes);
+		return SPS_ADDR_INVALID;
+	}
+
+	SPS_DBG3(sps, "sps:sps_mem_alloc_io.phys=%pa.virt=0x%lx.size=0x%x.",
+		&phys_addr, virt_addr, bytes);
+
+	return phys_addr;
+}
+
+/**
+ * Free I/O memory
+ *
+ */
+void sps_mem_free_io(phys_addr_t phys_addr, u32 bytes)
+{
+	unsigned long virt_addr = 0;
+
+	iomem_offset = phys_addr - iomem_phys;
+	virt_addr = (uintptr_t) iomem_virt + iomem_offset;
+
+	SPS_DBG3(sps, "sps:sps_mem_free_io.phys=%pa.virt=0x%lx.size=0x%x.",
+		&phys_addr, virt_addr, bytes);
+
+	gen_pool_free(pool, virt_addr, bytes);
+	total_free += bytes;
+}
+
+/**
+ * Initialize driver memory module
+ *
+ */
+int sps_mem_init(phys_addr_t pipemem_phys_base, u32 pipemem_size)
+{
+	int res;
+
+	/* 2^8=128. The desc-fifo and data-fifo minimal allocation. */
+	int min_alloc_order = 8;
+
+	if ((d_type == 0) || (d_type == 2) || imem) {
+		iomem_phys = pipemem_phys_base;
+		iomem_size = pipemem_size;
+
+		if (iomem_phys == 0) {
+			SPS_ERR(sps, "sps:%s:Invalid Pipe-Mem address",
+				__func__);
+			return SPS_ERROR;
+		} else {
+			iomem_virt = ioremap(iomem_phys, iomem_size);
+			if (!iomem_virt) {
+				SPS_ERR(sps,
+				"sps:%s:Failed to IO map pipe memory.\n",
+					__func__);
+				return -ENOMEM;
+			}
+		}
+
+		iomem_offset = 0;
+		SPS_DBG(sps,
+			"sps:sps_mem_init.iomem_phys=%pa,iomem_virt=0x%pK.",
+			&iomem_phys, iomem_virt);
+	}
+
+	pool = gen_pool_create(min_alloc_order, nid);
+
+	if (!pool) {
+		SPS_ERR(sps, "sps:%s:Failed to create a new memory pool.\n",
+								__func__);
+		return -ENOMEM;
+	}
+
+	if ((d_type == 0) || (d_type == 2) || imem) {
+		res = gen_pool_add(pool, (uintptr_t)iomem_virt,
+				iomem_size, nid);
+		if (res)
+			return res;
+	}
+
+	return 0;
+}
+
+/**
+ * De-initialize driver memory module
+ *
+ */
+int sps_mem_de_init(void)
+{
+	if (iomem_virt != NULL) {
+		gen_pool_destroy(pool);
+		pool = NULL;
+		iounmap(iomem_virt);
+		iomem_virt = NULL;
+	}
+
+	if (total_alloc == total_free)
+		return 0;
+	else {
+		SPS_ERR(sps, "sps:%s:some memory not free", __func__);
+		return SPS_ERROR;
+	}
+}
diff -Nruw linux-4.4.115-fbx/drivers/platform/msm./sps/sps_rm.c linux-4.4.115-fbx/drivers/platform/msm/sps/sps_rm.c
--- linux-4.4.115-fbx/drivers/platform/msm./sps/sps_rm.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/platform/msm/sps/sps_rm.c	2019-10-29 09:26:24.617212710 +0100
@@ -0,0 +1,852 @@
+/* Copyright (c) 2011-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* Resource management for the SPS device driver. */
+
+#include <linux/types.h>	/* u32 */
+#include <linux/kernel.h>	/* pr_info() */
+#include <linux/mutex.h>	/* mutex */
+#include <linux/list.h>		/* list_head */
+#include <linux/slab.h>		/* kzalloc() */
+#include <linux/memory.h>	/* memset */
+#include <linux/interrupt.h>
+
+#include "spsi.h"
+#include "sps_core.h"
+
+/* Max BAM FIFO sizes */
+#define SPSRM_MAX_DESC_FIFO_SIZE    0xffff
+#define SPSRM_MAX_DATA_FIFO_SIZE    0xffff
+
+/* Connection control struct pointer */
+static struct sps_rm *sps_rm;
+
+/**
+ * Initialize resource manager module
+ */
+int sps_rm_init(struct sps_rm *rm, u32 options)
+{
+	/* Set the resource manager state struct pointer */
+	sps_rm = rm;
+
+	/* Initialize the state struct */
+	INIT_LIST_HEAD(&sps_rm->connections_q);
+	mutex_init(&sps_rm->lock);
+
+	return 0;
+}
+
+/**
+ * Initialize client state context
+ *
+ */
+void sps_rm_config_init(struct sps_connect *connect)
+{
+	memset(connect, SPSRM_CLEAR, sizeof(*connect));
+}
+
+/**
+ * Remove reference to connection mapping
+ *
+ * This function removes a reference from a connection mapping struct.
+ *
+ * @map - pointer to connection mapping struct
+ *
+ */
+static void sps_rm_remove_ref(struct sps_connection *map)
+{
+	/* Free this connection */
+	map->refs--;
+	if (map->refs <= 0) {
+		if (map->client_src != NULL || map->client_dest != NULL)
+			SPS_ERR(sps,
+				"sps:%s:Failed to allocate connection struct",
+				__func__);
+
+		list_del(&map->list);
+		kfree(map);
+	}
+}
+
+/**
+ * Compare map to connect parameters
+ *
+ * This function compares client connect parameters to an allocated
+ * connection mapping.
+ *
+ * @pipe - client context for SPS connection end point
+ *
+ * @return - true if match, false otherwise
+ *
+ */
+static int sps_rm_map_match(const struct sps_connect *cfg,
+			    const struct sps_connection *map)
+{
+	if (cfg->source != map->src.dev ||
+	    cfg->destination != map->dest.dev)
+		return false;
+
+	if (cfg->src_pipe_index != SPSRM_CLEAR &&
+	    cfg->src_pipe_index != map->src.pipe_index)
+		return false;
+
+	if (cfg->dest_pipe_index != SPSRM_CLEAR &&
+	    cfg->dest_pipe_index != map->dest.pipe_index)
+		return false;
+
+	if (cfg->config != map->config)
+		return false;
+
+	if (cfg->desc.size != SPSRM_CLEAR) {
+		if (cfg->desc.size != map->desc.size)
+			return false;
+
+		if (cfg->desc.phys_base != (SPSRM_CLEAR|SPSRM_ADDR_CLR) &&
+		    cfg->desc.base != (void *)(SPSRM_CLEAR|SPSRM_ADDR_CLR) &&
+		    (cfg->desc.phys_base != map->desc.phys_base ||
+		     cfg->desc.base != map->desc.base)) {
+			return false;
+		}
+	}
+
+	if (cfg->data.size != SPSRM_CLEAR) {
+		if (cfg->data.size != map->data.size)
+			return false;
+
+		if (cfg->data.phys_base != (SPSRM_CLEAR|SPSRM_ADDR_CLR) &&
+		    cfg->data.base != (void *)(SPSRM_CLEAR|SPSRM_ADDR_CLR) &&
+		    (cfg->data.phys_base != map->data.phys_base ||
+		     cfg->data.base != map->data.base))
+			return false;
+	}
+
+	return true;
+}
+
+/**
+ * Find unconnected mapping
+ *
+ * This function finds an allocated a connection mapping.
+ *
+ * @pipe - client context for SPS connection end point
+ *
+ * @return - pointer to allocated connection mapping, or NULL if not found
+ *
+ */
+static struct sps_connection *find_unconnected(struct sps_pipe *pipe)
+{
+	struct sps_connect *cfg = &pipe->connect;
+	struct sps_connection *map;
+
+	/* Has this connection already been allocated? */
+	list_for_each_entry(map, &sps_rm->connections_q, list) {
+		if (sps_rm_map_match(cfg, map))
+			if ((cfg->mode == SPS_MODE_SRC
+			     && map->client_src == NULL)
+			    || (cfg->mode != SPS_MODE_SRC
+				&& map->client_dest == NULL))
+				return map;	/* Found */
+	}
+
+	return NULL;		/* Not Found */
+}
+
+/**
+ * Assign connection to client
+ *
+ * This function assigns a connection to a client.
+ *
+ * @pipe - client context for SPS connection end point
+ *
+ * @map - connection mapping
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_rm_assign(struct sps_pipe *pipe,
+			 struct sps_connection *map)
+{
+	struct sps_connect *cfg = &pipe->connect;
+
+	/* Check ownership and BAM */
+	if ((cfg->mode == SPS_MODE_SRC && map->client_src != NULL) ||
+	    (cfg->mode != SPS_MODE_SRC && map->client_dest != NULL)) {
+		SPS_ERR(sps,
+			"sps:%s:The end point is already connected.\n",
+			__func__);
+		return SPS_ERROR;
+	}
+
+	/* Check whether this end point is a BAM (not memory) */
+	if ((cfg->mode == SPS_MODE_SRC && map->src.bam == NULL) ||
+	    (cfg->mode != SPS_MODE_SRC && map->dest.bam == NULL)) {
+		SPS_ERR(sps, "sps:%s:The end point is empty.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	/* Record the connection assignment */
+	if (cfg->mode == SPS_MODE_SRC) {
+		map->client_src = pipe;
+		pipe->bam = map->src.bam;
+		pipe->pipe_index = map->src.pipe_index;
+		if (pipe->connect.event_thresh != SPSRM_CLEAR)
+			map->src.event_threshold = pipe->connect.event_thresh;
+		if (pipe->connect.lock_group != SPSRM_CLEAR)
+			map->src.lock_group = pipe->connect.lock_group;
+	} else {
+		map->client_dest = pipe;
+		pipe->bam = map->dest.bam;
+		pipe->pipe_index = map->dest.pipe_index;
+		if (pipe->connect.event_thresh != SPSRM_CLEAR)
+			map->dest.event_threshold =
+			pipe->connect.event_thresh;
+		if (pipe->connect.lock_group != SPSRM_CLEAR)
+			map->dest.lock_group = pipe->connect.lock_group;
+	}
+	pipe->map = map;
+
+	SPS_DBG(pipe->bam, "sps:sps_rm_assign.bam %pa.pipe_index=%d\n",
+			BAM_ID(pipe->bam), pipe->pipe_index);
+
+	/* Copy parameters to client connect state */
+	pipe->connect.src_pipe_index = map->src.pipe_index;
+	pipe->connect.dest_pipe_index = map->dest.pipe_index;
+	pipe->connect.desc = map->desc;
+	pipe->connect.data = map->data;
+
+	pipe->client_state = SPS_STATE_ALLOCATE;
+
+	return 0;
+}
+
+/**
+ * Free connection mapping resources
+ *
+ * This function frees a connection mapping resources.
+ *
+ * @pipe - client context for SPS connection end point
+ *
+ */
+static void sps_rm_free_map_rsrc(struct sps_connection *map)
+{
+	struct sps_bam *bam;
+
+	if (map->client_src != NULL || map->client_dest != NULL)
+		return;
+
+	if (map->alloc_src_pipe != SPS_BAM_PIPE_INVALID) {
+		bam = map->src.bam;
+		sps_bam_pipe_free(bam, map->src.pipe_index);
+
+		/* Is this a BAM-DMA pipe? */
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+		if ((bam->props.options & SPS_BAM_OPT_BAMDMA))
+			/* Deallocate and free the BAM-DMA channel */
+			sps_dma_pipe_free(bam, map->src.pipe_index);
+#endif
+		map->alloc_src_pipe = SPS_BAM_PIPE_INVALID;
+		map->src.pipe_index = SPS_BAM_PIPE_INVALID;
+	}
+	if (map->alloc_dest_pipe != SPS_BAM_PIPE_INVALID) {
+		bam = map->dest.bam;
+		sps_bam_pipe_free(bam, map->dest.pipe_index);
+
+		/* Is this a BAM-DMA pipe? */
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+		if ((bam->props.options & SPS_BAM_OPT_BAMDMA)) {
+			/* Deallocate the BAM-DMA channel */
+			sps_dma_pipe_free(bam, map->dest.pipe_index);
+		}
+#endif
+		map->alloc_dest_pipe = SPS_BAM_PIPE_INVALID;
+		map->dest.pipe_index = SPS_BAM_PIPE_INVALID;
+	}
+	if (map->alloc_desc_base != SPS_ADDR_INVALID) {
+		sps_mem_free_io(map->alloc_desc_base, map->desc.size);
+
+		map->alloc_desc_base = SPS_ADDR_INVALID;
+		map->desc.phys_base = SPS_ADDR_INVALID;
+	}
+	if (map->alloc_data_base != SPS_ADDR_INVALID) {
+		sps_mem_free_io(map->alloc_data_base, map->data.size);
+
+		map->alloc_data_base = SPS_ADDR_INVALID;
+		map->data.phys_base = SPS_ADDR_INVALID;
+	}
+}
+
+/**
+ * Init connection mapping from client connect
+ *
+ * This function initializes a connection mapping from the client's
+ * connect parameters.
+ *
+ * @map - connection mapping struct
+ *
+ * @cfg - client connect parameters
+ *
+ * @return - pointer to allocated connection mapping, or NULL on error
+ *
+ */
+static void sps_rm_init_map(struct sps_connection *map,
+			    const struct sps_connect *cfg)
+{
+	/* Clear the connection mapping struct */
+	memset(map, 0, sizeof(*map));
+	map->desc.phys_base = SPS_ADDR_INVALID;
+	map->data.phys_base = SPS_ADDR_INVALID;
+	map->alloc_desc_base = SPS_ADDR_INVALID;
+	map->alloc_data_base = SPS_ADDR_INVALID;
+	map->alloc_src_pipe = SPS_BAM_PIPE_INVALID;
+	map->alloc_dest_pipe = SPS_BAM_PIPE_INVALID;
+
+	/* Copy client required parameters */
+	map->src.dev = cfg->source;
+	map->dest.dev = cfg->destination;
+	map->desc.size = cfg->desc.size;
+	map->data.size = cfg->data.size;
+	map->config = cfg->config;
+
+	/* Did client specify descriptor FIFO? */
+	if (map->desc.size != SPSRM_CLEAR &&
+	    cfg->desc.phys_base != (SPSRM_CLEAR|SPSRM_ADDR_CLR) &&
+	    cfg->desc.base != (void *)(SPSRM_CLEAR|SPSRM_ADDR_CLR))
+		map->desc = cfg->desc;
+
+	/* Did client specify data FIFO? */
+	if (map->data.size != SPSRM_CLEAR &&
+	    cfg->data.phys_base != (SPSRM_CLEAR|SPSRM_ADDR_CLR) &&
+	    cfg->data.base != (void *)(SPSRM_CLEAR|SPSRM_ADDR_CLR))
+		map->data = cfg->data;
+
+	/* Did client specify source pipe? */
+	if (cfg->src_pipe_index != SPSRM_CLEAR)
+		map->src.pipe_index = cfg->src_pipe_index;
+	else
+		map->src.pipe_index = SPS_BAM_PIPE_INVALID;
+
+
+	/* Did client specify destination pipe? */
+	if (cfg->dest_pipe_index != SPSRM_CLEAR)
+		map->dest.pipe_index = cfg->dest_pipe_index;
+	else
+		map->dest.pipe_index = SPS_BAM_PIPE_INVALID;
+}
+
+/**
+ * Create a new connection mapping
+ *
+ * This function creates a new connection mapping.
+ *
+ * @pipe - client context for SPS connection end point
+ *
+ * @return - pointer to allocated connection mapping, or NULL on error
+ *
+ */
+static struct sps_connection *sps_rm_create(struct sps_pipe *pipe)
+{
+	struct sps_connection *map;
+	struct sps_bam *bam;
+	u32 desc_size;
+	u32 data_size;
+	enum sps_mode dir;
+	int success = false;
+
+	/* Allocate new connection */
+	map = kzalloc(sizeof(*map), GFP_KERNEL);
+	if (map == NULL) {
+		SPS_ERR(sps,
+			"sps:%s:Failed to allocate connection struct",
+			__func__);
+		return NULL;
+	}
+
+	/* Initialize connection struct */
+	sps_rm_init_map(map, &pipe->connect);
+	dir = pipe->connect.mode;
+
+	/* Use a do/while() loop to avoid a "goto" */
+	success = false;
+	/* Get BAMs */
+	map->src.bam = sps_h2bam(map->src.dev);
+	if (map->src.bam == NULL) {
+		if (map->src.dev != SPS_DEV_HANDLE_MEM) {
+			SPS_ERR(sps, "sps:Invalid BAM handle: %pa",
+							&map->src.dev);
+			goto exit_err;
+		}
+		map->src.pipe_index = SPS_BAM_PIPE_INVALID;
+	}
+	map->dest.bam = sps_h2bam(map->dest.dev);
+	if (map->dest.bam == NULL) {
+		if (map->dest.dev != SPS_DEV_HANDLE_MEM) {
+			SPS_ERR(sps, "sps:Invalid BAM handle: %pa",
+							&map->dest.dev);
+			goto exit_err;
+		}
+		map->dest.pipe_index = SPS_BAM_PIPE_INVALID;
+	}
+
+	/* Check the BAM device for the pipe */
+	if ((dir == SPS_MODE_SRC && map->src.bam == NULL) ||
+	    (dir != SPS_MODE_SRC && map->dest.bam == NULL)) {
+		SPS_ERR(sps, "sps:Invalid BAM endpt: dir %d src %pa dest %pa",
+			dir, &map->src.dev, &map->dest.dev);
+		goto exit_err;
+	}
+
+	/* Allocate pipes and copy BAM parameters */
+	if (map->src.bam != NULL) {
+		/* Allocate the pipe */
+		bam = map->src.bam;
+		map->alloc_src_pipe = sps_bam_pipe_alloc(bam,
+							map->src.pipe_index);
+		if (map->alloc_src_pipe == SPS_BAM_PIPE_INVALID)
+			goto exit_err;
+		map->src.pipe_index = map->alloc_src_pipe;
+
+		/* Is this a BAM-DMA pipe? */
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+		if ((bam->props.options & SPS_BAM_OPT_BAMDMA)) {
+			int rc;
+			/* Allocate the BAM-DMA channel */
+			rc = sps_dma_pipe_alloc(bam, map->src.pipe_index,
+						 SPS_MODE_SRC);
+			if (rc) {
+				SPS_ERR(bam,
+					"sps:Failed to alloc BAM-DMA pipe: %d",
+					map->src.pipe_index);
+				goto exit_err;
+			}
+		}
+#endif
+		map->src.bam_phys = bam->props.phys_addr;
+		map->src.event_threshold = bam->props.event_threshold;
+	}
+	if (map->dest.bam != NULL) {
+		/* Allocate the pipe */
+		bam = map->dest.bam;
+		map->alloc_dest_pipe = sps_bam_pipe_alloc(bam,
+							 map->dest.pipe_index);
+		if (map->alloc_dest_pipe == SPS_BAM_PIPE_INVALID)
+			goto exit_err;
+
+		map->dest.pipe_index = map->alloc_dest_pipe;
+
+		/* Is this a BAM-DMA pipe? */
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+		if ((bam->props.options & SPS_BAM_OPT_BAMDMA)) {
+			int rc;
+			/* Allocate the BAM-DMA channel */
+			rc = sps_dma_pipe_alloc(bam, map->dest.pipe_index,
+					       SPS_MODE_DEST);
+			if (rc) {
+				SPS_ERR(bam,
+					"sps:Failed to alloc BAM-DMA pipe: %d",
+					map->dest.pipe_index);
+				goto exit_err;
+			}
+		}
+#endif
+		map->dest.bam_phys = bam->props.phys_addr;
+		map->dest.event_threshold =
+		bam->props.event_threshold;
+	}
+
+	/* Get default FIFO sizes */
+	desc_size = 0;
+	data_size = 0;
+	if (map->src.bam != NULL) {
+		bam = map->src.bam;
+		desc_size = bam->props.desc_size;
+		data_size = bam->props.data_size;
+	}
+	if (map->dest.bam != NULL) {
+		bam = map->dest.bam;
+		if (bam->props.desc_size > desc_size)
+			desc_size = bam->props.desc_size;
+		if (bam->props.data_size > data_size)
+			data_size = bam->props.data_size;
+	}
+
+	/* Set FIFO sizes */
+	if (map->desc.size == SPSRM_CLEAR)
+		map->desc.size = desc_size;
+	if (map->src.bam != NULL && map->dest.bam != NULL) {
+		/* BAM-to-BAM requires data FIFO */
+		if (map->data.size == SPSRM_CLEAR)
+			map->data.size = data_size;
+	} else {
+		map->data.size = 0;
+	}
+	if (map->desc.size > SPSRM_MAX_DESC_FIFO_SIZE) {
+		SPS_ERR(sps, "sps:Invalid desc FIFO size: 0x%x",
+						map->desc.size);
+		goto exit_err;
+	}
+	if (map->src.bam != NULL && map->dest.bam != NULL &&
+	    map->data.size > SPSRM_MAX_DATA_FIFO_SIZE) {
+		SPS_ERR(sps, "sps:Invalid data FIFO size: 0x%x",
+						map->data.size);
+		goto exit_err;
+	}
+
+	/* Allocate descriptor FIFO if necessary */
+	if (map->desc.size && map->desc.phys_base == SPS_ADDR_INVALID) {
+		map->alloc_desc_base = sps_mem_alloc_io(map->desc.size);
+		if (map->alloc_desc_base == SPS_ADDR_INVALID) {
+			SPS_ERR(sps, "sps:I/O memory allocation failure:0x%x",
+				map->desc.size);
+			goto exit_err;
+		}
+		map->desc.phys_base = map->alloc_desc_base;
+		map->desc.base = spsi_get_mem_ptr(map->desc.phys_base);
+		if (map->desc.base == NULL) {
+			SPS_ERR(sps,
+				"sps:Cannot get virt addr for I/O buffer:%pa",
+				&map->desc.phys_base);
+			goto exit_err;
+		}
+	}
+
+	/* Allocate data FIFO if necessary */
+	if (map->data.size && map->data.phys_base == SPS_ADDR_INVALID) {
+		map->alloc_data_base = sps_mem_alloc_io(map->data.size);
+		if (map->alloc_data_base == SPS_ADDR_INVALID) {
+			SPS_ERR(sps, "sps:I/O memory allocation failure:0x%x",
+				map->data.size);
+			goto exit_err;
+		}
+		map->data.phys_base = map->alloc_data_base;
+		map->data.base = spsi_get_mem_ptr(map->data.phys_base);
+		if (map->data.base == NULL) {
+			SPS_ERR(sps,
+				"sps:Cannot get virt addr for I/O buffer:%pa",
+				&map->data.phys_base);
+			goto exit_err;
+		}
+	}
+
+	/* Attempt to assign this connection to the client */
+	if (sps_rm_assign(pipe, map)) {
+		SPS_ERR(sps,
+		"sps:%s:failed to assign a connection to the client.\n",
+			__func__);
+		goto exit_err;
+	}
+
+	/* Initialization was successful */
+	success = true;
+exit_err:
+
+	/* If initialization failed, free resources */
+	if (!success) {
+		sps_rm_free_map_rsrc(map);
+		kfree(map);
+		return NULL;
+	}
+
+	return map;
+}
+
+/**
+ * Free connection mapping
+ *
+ * This function frees a connection mapping.
+ *
+ * @pipe - client context for SPS connection end point
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_rm_free(struct sps_pipe *pipe)
+{
+	struct sps_connection *map = (void *)pipe->map;
+	struct sps_connect *cfg = &pipe->connect;
+
+	mutex_lock(&sps_rm->lock);
+
+	/* Free this connection */
+	if (cfg->mode == SPS_MODE_SRC)
+		map->client_src = NULL;
+	else
+		map->client_dest = NULL;
+
+	pipe->map = NULL;
+	pipe->client_state = SPS_STATE_DISCONNECT;
+	sps_rm_free_map_rsrc(map);
+
+	sps_rm_remove_ref(map);
+
+	mutex_unlock(&sps_rm->lock);
+
+	return 0;
+}
+
+/**
+ * Allocate an SPS connection end point
+ *
+ * This function allocates resources and initializes a BAM connection.
+ *
+ * @pipe - client context for SPS connection end point
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_rm_alloc(struct sps_pipe *pipe)
+{
+	struct sps_connection *map;
+	int result = SPS_ERROR;
+
+	if (pipe->connect.sps_reserved != SPSRM_CLEAR) {
+		/*
+		 * Client did not call sps_get_config()	to init
+		 * struct sps_connect, so only use legacy members.
+		 */
+		unsigned long source = pipe->connect.source;
+		unsigned long destination = pipe->connect.destination;
+		enum sps_mode mode = pipe->connect.mode;
+		u32 config = pipe->connect.config;
+		memset(&pipe->connect, SPSRM_CLEAR,
+			      sizeof(pipe->connect));
+		pipe->connect.source = source;
+		pipe->connect.destination = destination;
+		pipe->connect.mode = mode;
+		pipe->connect.config = config;
+	}
+	if (pipe->connect.config == SPSRM_CLEAR)
+		pipe->connect.config = SPS_CONFIG_DEFAULT;
+
+	/*
+	 *  If configuration is not default, then client is specifying a
+	 * connection mapping.  Find a matching mapping, or fail.
+	 * If a match is found, the client's Connect struct will be updated
+	 * with all the mapping's values.
+	 */
+	if (pipe->connect.config != SPS_CONFIG_DEFAULT) {
+		if (sps_map_find(&pipe->connect)) {
+			SPS_ERR(sps,
+				"sps:%s:Failed to find connection mapping",
+								__func__);
+			return SPS_ERROR;
+		}
+	}
+
+	mutex_lock(&sps_rm->lock);
+	/* Check client state */
+	if (IS_SPS_STATE_OK(pipe)) {
+		SPS_ERR(sps,
+			"sps:%s:Client connection already allocated",
+							__func__);
+		goto exit_err;
+	}
+
+	/* Are the connection resources already allocated? */
+	map = find_unconnected(pipe);
+	if (map != NULL) {
+		/* Attempt to assign this connection to the client */
+		if (sps_rm_assign(pipe, map))
+			/* Assignment failed, so must allocate new */
+			map = NULL;
+	}
+
+	/* Allocate a new connection if necessary */
+	if (map == NULL) {
+		map = sps_rm_create(pipe);
+		if (map == NULL) {
+			SPS_ERR(sps,
+				"sps:%s:Failed to allocate connection",
+							__func__);
+			goto exit_err;
+		}
+		list_add_tail(&map->list, &sps_rm->connections_q);
+	}
+
+	/* Add the connection to the allocated queue */
+	map->refs++;
+
+	/* Initialization was successful */
+	result = 0;
+exit_err:
+	mutex_unlock(&sps_rm->lock);
+
+	if (result)
+		return SPS_ERROR;
+
+	return 0;
+}
+
+/**
+ * Disconnect an SPS connection end point
+ *
+ * This function frees resources and de-initializes a BAM connection.
+ *
+ * @pipe - client context for SPS connection end point
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_rm_disconnect(struct sps_pipe *pipe)
+{
+	sps_rm_free(pipe);
+	return 0;
+}
+
+/**
+ * Process connection state change
+ *
+ * This function processes a connection state change.
+ *
+ * @pipe - pointer to client context
+ *
+ * @state - new state for connection
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_rm_state_change(struct sps_pipe *pipe, u32 state)
+{
+	int auto_enable = false;
+	int result;
+
+	/* Allocate the pipe */
+	if (pipe->client_state == SPS_STATE_DISCONNECT &&
+	    state == SPS_STATE_ALLOCATE) {
+		if (sps_rm_alloc(pipe)) {
+			SPS_ERR(pipe->bam,
+				"sps:Fail to allocate resource for BAM 0x%pK pipe %d.\n",
+					pipe->bam, pipe->pipe_index);
+			return SPS_ERROR;
+		}
+	}
+
+	/* Configure the pipe */
+	if (pipe->client_state == SPS_STATE_ALLOCATE &&
+	    state == SPS_STATE_CONNECT) {
+		/* Connect the BAM pipe */
+		struct sps_bam_connect_param params;
+		memset(&params, 0, sizeof(params));
+		params.mode = pipe->connect.mode;
+		if (pipe->connect.options != SPSRM_CLEAR) {
+			params.options = pipe->connect.options;
+			params.irq_gen_addr = pipe->connect.irq_gen_addr;
+			params.irq_gen_data = pipe->connect.irq_gen_data;
+		}
+		result = sps_bam_pipe_connect(pipe, &params);
+		if (result) {
+			SPS_ERR(pipe->bam,
+				"sps:Failed to connect BAM 0x%pK pipe %d",
+					pipe->bam, pipe->pipe_index);
+			return SPS_ERROR;
+		}
+		pipe->client_state = SPS_STATE_CONNECT;
+
+		/* Set auto-enable for system-mode connections */
+		if (pipe->connect.source == SPS_DEV_HANDLE_MEM ||
+		    pipe->connect.destination == SPS_DEV_HANDLE_MEM) {
+			if (pipe->map->desc.size != 0 &&
+			    pipe->map->desc.phys_base != SPS_ADDR_INVALID)
+				auto_enable = true;
+		}
+	}
+
+	/* Enable the pipe data flow */
+	if (pipe->client_state == SPS_STATE_CONNECT &&
+	    !(state == SPS_STATE_DISABLE
+	      || state == SPS_STATE_DISCONNECT)
+	    && (state == SPS_STATE_ENABLE || auto_enable
+		|| (pipe->connect.options & SPS_O_AUTO_ENABLE))) {
+		result = sps_bam_pipe_enable(pipe->bam, pipe->pipe_index);
+		if (result) {
+			SPS_ERR(pipe->bam,
+				"sps:Failed to set BAM %pa pipe %d flow on",
+				&pipe->bam->props.phys_addr,
+				pipe->pipe_index);
+			return SPS_ERROR;
+		}
+
+		/* Is this a BAM-DMA pipe? */
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+		if ((pipe->bam->props.options & SPS_BAM_OPT_BAMDMA)) {
+			/* Activate the BAM-DMA channel */
+			result = sps_dma_pipe_enable(pipe->bam,
+						     pipe->pipe_index);
+			if (result) {
+				SPS_ERR(pipe->bam,
+					"sps:Failed to activate BAM-DMA"
+					" pipe: %d", pipe->pipe_index);
+				return SPS_ERROR;
+			}
+		}
+#endif
+		pipe->client_state = SPS_STATE_ENABLE;
+	}
+
+	/* Disable the pipe data flow */
+	if (pipe->client_state == SPS_STATE_ENABLE &&
+	    (state == SPS_STATE_DISABLE	|| state == SPS_STATE_DISCONNECT)) {
+		result = sps_bam_pipe_disable(pipe->bam, pipe->pipe_index);
+		if (result) {
+			SPS_ERR(pipe->bam,
+				"sps:Failed to set BAM %pa pipe %d flow off",
+				&pipe->bam->props.phys_addr,
+				pipe->pipe_index);
+			return SPS_ERROR;
+		}
+		pipe->client_state = SPS_STATE_CONNECT;
+	}
+
+	/* Disconnect the BAM pipe */
+	if (pipe->client_state == SPS_STATE_CONNECT &&
+	    state == SPS_STATE_DISCONNECT) {
+		struct sps_connection *map;
+		struct sps_bam *bam = pipe->bam;
+		unsigned long flags;
+		u32 pipe_index;
+
+		if (pipe->connect.mode == SPS_MODE_SRC)
+			pipe_index = pipe->map->src.pipe_index;
+		else
+			pipe_index = pipe->map->dest.pipe_index;
+
+		if (bam->props.irq > 0)
+			synchronize_irq(bam->props.irq);
+
+		spin_lock_irqsave(&bam->isr_lock, flags);
+		pipe->disconnecting = true;
+		spin_unlock_irqrestore(&bam->isr_lock, flags);
+		result = sps_bam_pipe_disconnect(pipe->bam, pipe_index);
+		if (result) {
+			SPS_ERR(pipe->bam,
+				"sps:Failed to disconnect BAM %pa pipe %d",
+				&pipe->bam->props.phys_addr,
+				pipe->pipe_index);
+			return SPS_ERROR;
+		}
+
+		/* Clear map state */
+		map = (void *)pipe->map;
+		if (pipe->connect.mode == SPS_MODE_SRC)
+			map->client_src = NULL;
+		else if (pipe->connect.mode == SPS_MODE_DEST)
+			map->client_dest = NULL;
+
+		sps_rm_disconnect(pipe);
+
+		/* Clear the client state */
+		pipe->map = NULL;
+		pipe->bam = NULL;
+		pipe->client_state = SPS_STATE_DISCONNECT;
+	}
+
+	return 0;
+}
diff -Nruw linux-4.4.115-fbx/drivers/power/qcom./apm.c linux-4.4.115-fbx/drivers/power/qcom/apm.c
--- linux-4.4.115-fbx/drivers/power/qcom./apm.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/qcom/apm.c	2019-01-22 16:16:26.223271038 +0100
@@ -0,0 +1,1059 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/of_device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/power/qcom/apm.h>
+#include <soc/qcom/scm.h>
+#include <linux/arm-smccc.h>
+
+/*
+ *        VDD_APCC
+ * =============================================================
+ *       |      VDD_MX                  |                    |
+ *       |    ==========================|=============       |
+ *    ___|___   ___|___    ___|___   ___|___    ___|___   ___|___
+ *   |       | |       |  |       | |       |  |       | |       |
+ *   | APCC  | | MX HS |  | MX HS | | APCC  |  | MX HS | | APCC  |
+ *   |  HS   | |       |  |       | |  HS   |  |       | |  HS   |
+ *   |_______| |_______|  |_______| |_______|  |_______| |_______|
+ *       |_________|          |_________|         |__________|
+ *            |                    |                    |
+ *      ______|_____         ______|_____        _______|_____
+ *     |            |       |            |      |             |
+ *     |            |       |            |      |             |
+ *     |  CPU MEM   |       |   L2 MEM   |      |    L3 MEM   |
+ *     |   Arrays   |       |   Arrays   |      |    Arrays   |
+ *     |            |       |            |      |             |
+ *     |____________|       |____________|      |_____________|
+ *
+ */
+
+/* Register value definitions */
+#define APCS_GFMUXA_SEL_VAL            0x13
+#define APCS_GFMUXA_DESEL_VAL          0x03
+#define MSM_APM_MX_MODE_VAL            0x00
+#define MSM_APM_APCC_MODE_VAL          0x10
+#define MSM_APM_MX_DONE_VAL            0x00
+#define MSM_APM_APCC_DONE_VAL          0x03
+#define MSM_APM_OVERRIDE_SEL_VAL       0xb0
+#define MSM_APM_SEC_CLK_SEL_VAL        0x30
+#define SPM_EVENT_SET_VAL              0x01
+#define SPM_EVENT_CLEAR_VAL            0x00
+
+/* Register bit mask definitions */
+#define MSM_APM_CTL_STS_MASK            0x0f
+
+/* Register offset definitions */
+#define APCC_APM_MODE              0x00000098
+#define APCC_APM_CTL_STS           0x000000a8
+#define APCS_SPARE                 0x00000068
+#define APCS_VERSION               0x00000fd0
+
+#define HMSS_VERSION_1P2           0x10020000
+
+#define MSM_APM_SWITCH_TIMEOUT_US  10
+#define SPM_WAKEUP_DELAY_US        2
+#define SPM_EVENT_NUM              6
+
+#define MSM_APM_DRIVER_NAME        "qcom,msm-apm"
+
+
+enum {
+	CLOCK_ASSERT_ENABLE,
+	CLOCK_ASSERT_DISABLE,
+	CLOCK_ASSERT_TOGGLE,
+};
+
+enum {
+	MSM8996_ID,
+	MSM8996PRO_ID,
+	MSM8953_ID,
+};
+
+struct msm_apm_ctrl_dev {
+	struct list_head	list;
+	struct device		*dev;
+	enum msm_apm_supply	supply;
+	spinlock_t		lock;
+	void __iomem		*reg_base;
+	void __iomem		*apcs_csr_base;
+	void __iomem		**apcs_spm_events_addr;
+	void __iomem		*apc0_pll_ctl_addr;
+	void __iomem		*apc1_pll_ctl_addr;
+	bool			clk_src_override;
+	u32			version;
+	struct dentry		*debugfs;
+	u32			msm_id;
+};
+
+#if defined(CONFIG_DEBUG_FS)
+static struct dentry *apm_debugfs_base;
+#endif
+
+static DEFINE_MUTEX(apm_ctrl_list_mutex);
+static LIST_HEAD(apm_ctrl_list);
+
+/*
+ * Get the resources associated with the APM controller from device tree
+ * and remap all I/O addresses that are relevant to this HW revision.
+ */
+static int msm_apm_ctrl_devm_ioremap(struct platform_device *pdev,
+				     struct msm_apm_ctrl_dev *ctrl)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	static const char *res_name[SPM_EVENT_NUM] = {
+		"apc0-l2-spm",
+		"apc1-l2-spm",
+		"apc0-cpu0-spm",
+		"apc0-cpu1-spm",
+		"apc1-cpu0-spm",
+		"apc1-cpu1-spm"
+	};
+	int i, ret = 0;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pm-apcc-glb");
+	if (!res) {
+		dev_err(dev, "Missing PM APCC Global register physical address");
+		return -EINVAL;
+	}
+	ctrl->reg_base = devm_ioremap(dev, res->start, resource_size(res));
+	if (!ctrl->reg_base) {
+		dev_err(dev, "Failed to map PM APCC Global registers\n");
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apcs-csr");
+	if (!res) {
+		dev_err(dev, "Missing APCS CSR physical base address");
+		return -EINVAL;
+	}
+	ctrl->apcs_csr_base = devm_ioremap(dev, res->start, resource_size(res));
+	if (!ctrl->apcs_csr_base) {
+		dev_err(dev, "Failed to map APCS CSR registers\n");
+		return -ENOMEM;
+	}
+
+	ctrl->clk_src_override = of_property_read_bool(dev->of_node,
+					       "qcom,clock-source-override");
+
+	if (ctrl->clk_src_override)
+		dev_info(dev, "overriding clock sources across APM switch\n");
+
+	ctrl->version = readl_relaxed(ctrl->apcs_csr_base + APCS_VERSION);
+
+	if (ctrl->version >= HMSS_VERSION_1P2)
+		return ret;
+
+	ctrl->apcs_spm_events_addr = devm_kzalloc(&pdev->dev,
+						  SPM_EVENT_NUM
+						  * sizeof(void __iomem *),
+						  GFP_KERNEL);
+	if (!ctrl->apcs_spm_events_addr) {
+		dev_err(dev, "Failed to allocate memory for APCS SPM event registers\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < SPM_EVENT_NUM; i++) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						   res_name[i]);
+		if (!res) {
+			dev_err(dev, "Missing address for %s\n", res_name[i]);
+			ret = -EINVAL;
+			goto free_events;
+		}
+
+		ctrl->apcs_spm_events_addr[i] = devm_ioremap(dev, res->start,
+						resource_size(res));
+		if (!ctrl->apcs_spm_events_addr[i]) {
+			dev_err(dev, "Failed to map %s\n", res_name[i]);
+			ret = -ENOMEM;
+			goto free_events;
+		}
+
+		dev_dbg(dev, "%s event phys: %pa virt:0x%p\n", res_name[i],
+			&res->start, ctrl->apcs_spm_events_addr[i]);
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "apc0-pll-ctl");
+	if (!res) {
+		dev_err(dev, "Missing APC0 PLL CTL physical address\n");
+		ret = -EINVAL;
+		goto free_events;
+	}
+
+	ctrl->apc0_pll_ctl_addr = devm_ioremap(dev,
+					   res->start,
+					   resource_size(res));
+	if (!ctrl->apc0_pll_ctl_addr) {
+		dev_err(dev, "Failed to map APC0 PLL CTL register\n");
+		ret = -ENOMEM;
+		goto free_events;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "apc1-pll-ctl");
+	if (!res) {
+		dev_err(dev, "Missing APC1 PLL CTL physical address\n");
+		ret = -EINVAL;
+		goto free_events;
+	}
+
+	ctrl->apc1_pll_ctl_addr = devm_ioremap(dev,
+					   res->start,
+					   resource_size(res));
+	if (!ctrl->apc1_pll_ctl_addr) {
+		dev_err(dev, "Failed to map APC1 PLL CTL register\n");
+		ret = -ENOMEM;
+		goto free_events;
+	}
+
+	return ret;
+
+free_events:
+	devm_kfree(dev, ctrl->apcs_spm_events_addr);
+	return ret;
+}
+
+/* MSM8953 register offset definition */
+#define MSM8953_APM_DLY_CNTR		0x2ac
+
+/* Register field shift definitions */
+#define APM_CTL_SEL_SWITCH_DLY_SHIFT	0
+#define APM_CTL_RESUME_CLK_DLY_SHIFT	8
+#define APM_CTL_HALT_CLK_DLY_SHIFT	16
+#define APM_CTL_POST_HALT_DLY_SHIFT	24
+
+/* Register field mask definitions */
+#define APM_CTL_SEL_SWITCH_DLY_MASK	GENMASK(7, 0)
+#define APM_CTL_RESUME_CLK_DLY_MASK	GENMASK(15, 8)
+#define APM_CTL_HALT_CLK_DLY_MASK	GENMASK(23, 16)
+#define APM_CTL_POST_HALT_DLY_MASK	GENMASK(31, 24)
+
+/*
+ * Get the resources associated with the MSM8953 APM controller from
+ * device tree, remap all I/O addresses, and program the initial
+ * register configuration required for the MSM8953 APM controller device.
+ */
+static int msm8953_apm_ctrl_init(struct platform_device *pdev,
+				 struct msm_apm_ctrl_dev *ctrl)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	u32 delay_counter, val = 0, regval = 0;
+	int rc = 0;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pm-apcc-glb");
+	if (!res) {
+		dev_err(dev, "Missing PM APCC Global register physical address\n");
+		return -ENODEV;
+	}
+	ctrl->reg_base = devm_ioremap(dev, res->start, resource_size(res));
+	if (!ctrl->reg_base) {
+		dev_err(dev, "Failed to map PM APCC Global registers\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * Initial APM register configuration required before starting
+	 * APM HW controller.
+	 */
+	regval = readl_relaxed(ctrl->reg_base + MSM8953_APM_DLY_CNTR);
+	val = regval;
+
+	if (of_find_property(dev->of_node, "qcom,apm-post-halt-delay", NULL)) {
+		rc = of_property_read_u32(dev->of_node,
+				"qcom,apm-post-halt-delay", &delay_counter);
+		if (rc < 0) {
+			dev_err(dev, "apm-post-halt-delay read failed, rc = %d",
+				rc);
+			return rc;
+		}
+
+		val &= ~APM_CTL_POST_HALT_DLY_MASK;
+		val |= (delay_counter << APM_CTL_POST_HALT_DLY_SHIFT)
+			& APM_CTL_POST_HALT_DLY_MASK;
+	}
+
+	if (of_find_property(dev->of_node, "qcom,apm-halt-clk-delay", NULL)) {
+		rc = of_property_read_u32(dev->of_node,
+				"qcom,apm-halt-clk-delay", &delay_counter);
+		if (rc < 0) {
+			dev_err(dev, "apm-halt-clk-delay read failed, rc = %d",
+				rc);
+			return rc;
+		}
+
+		val &= ~APM_CTL_HALT_CLK_DLY_MASK;
+		val |= (delay_counter << APM_CTL_HALT_CLK_DLY_SHIFT)
+			& APM_CTL_HALT_CLK_DLY_MASK;
+	}
+
+	if (of_find_property(dev->of_node, "qcom,apm-resume-clk-delay", NULL)) {
+		rc = of_property_read_u32(dev->of_node,
+				"qcom,apm-resume-clk-delay", &delay_counter);
+		if (rc < 0) {
+			dev_err(dev, "apm-resume-clk-delay read failed, rc = %d",
+				rc);
+			return rc;
+		}
+
+		val &= ~APM_CTL_RESUME_CLK_DLY_MASK;
+		val |= (delay_counter << APM_CTL_RESUME_CLK_DLY_SHIFT)
+			& APM_CTL_RESUME_CLK_DLY_MASK;
+	}
+
+	if (of_find_property(dev->of_node, "qcom,apm-sel-switch-delay", NULL)) {
+		rc = of_property_read_u32(dev->of_node,
+				"qcom,apm-sel-switch-delay", &delay_counter);
+		if (rc < 0) {
+			dev_err(dev, "apm-sel-switch-delay read failed, rc = %d",
+				rc);
+			return rc;
+		}
+
+		val &= ~APM_CTL_SEL_SWITCH_DLY_MASK;
+		val |= (delay_counter << APM_CTL_SEL_SWITCH_DLY_SHIFT)
+			& APM_CTL_SEL_SWITCH_DLY_MASK;
+	}
+
+	if (val != regval) {
+		writel_relaxed(val, ctrl->reg_base + MSM8953_APM_DLY_CNTR);
+		/* make sure write completes before return */
+		mb();
+	}
+
+	return rc;
+}
+
+static int msm_apm_secure_clock_source_override(
+			struct msm_apm_ctrl_dev *ctrl_dev, bool enable)
+{
+	int ret;
+
+	if (ctrl_dev->clk_src_override) {
+		ret = __invoke_psci_fn_smc(0xC4000020, 3, enable ?
+					   CLOCK_ASSERT_ENABLE :
+					   CLOCK_ASSERT_DISABLE, 0);
+		if (ret)
+			dev_err(ctrl_dev->dev, "PSCI request to switch to %s clock source failed\n",
+				enable ? "GPLL0" : "original");
+	}
+
+	return 0;
+}
+
+static int msm8996_apm_wait_for_switch(struct msm_apm_ctrl_dev *ctrl_dev,
+					u32 done_val)
+{
+	int timeout = MSM_APM_SWITCH_TIMEOUT_US;
+	u32 regval;
+
+	while (timeout > 0) {
+		regval = readl_relaxed(ctrl_dev->reg_base + APCC_APM_CTL_STS);
+		if ((regval & MSM_APM_CTL_STS_MASK) == done_val)
+			break;
+
+		udelay(1);
+		timeout--;
+	}
+
+	if (timeout == 0) {
+		dev_err(ctrl_dev->dev, "%s switch timed out. APCC_APM_CTL_STS=0x%x\n",
+			done_val == MSM_APM_MX_DONE_VAL
+				? "APCC to MX" : "MX to APCC",
+			regval);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int msm8996_apm_switch_to_mx(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+	unsigned long flags;
+	int i, ret;
+
+	mutex_lock(&scm_lmh_lock);
+	spin_lock_irqsave(&ctrl_dev->lock, flags);
+
+	ret = msm_apm_secure_clock_source_override(ctrl_dev, true);
+	if (ret)
+		goto done;
+
+	/* Perform revision-specific programming steps */
+	if (ctrl_dev->version < HMSS_VERSION_1P2) {
+		/* Clear SPM events */
+		for (i = 0; i < SPM_EVENT_NUM; i++)
+			writel_relaxed(SPM_EVENT_CLEAR_VAL,
+				       ctrl_dev->apcs_spm_events_addr[i]);
+
+		udelay(SPM_WAKEUP_DELAY_US);
+
+		/* Switch APC/CBF to GPLL0 clock */
+		writel_relaxed(APCS_GFMUXA_SEL_VAL,
+			       ctrl_dev->apcs_csr_base + APCS_SPARE);
+		ndelay(200);
+		writel_relaxed(MSM_APM_OVERRIDE_SEL_VAL,
+			       ctrl_dev->apc0_pll_ctl_addr);
+		ndelay(200);
+		writel_relaxed(MSM_APM_OVERRIDE_SEL_VAL,
+			       ctrl_dev->apc1_pll_ctl_addr);
+
+		/* Ensure writes complete before proceeding */
+		mb();
+	}
+
+	/* Switch arrays to MX supply and wait for its completion */
+	writel_relaxed(MSM_APM_MX_MODE_VAL, ctrl_dev->reg_base +
+		       APCC_APM_MODE);
+
+	/* Ensure write above completes before delaying */
+	mb();
+
+	ret = msm8996_apm_wait_for_switch(ctrl_dev, MSM_APM_MX_DONE_VAL);
+
+	/* Perform revision-specific programming steps */
+	if (ctrl_dev->version < HMSS_VERSION_1P2) {
+		/* Switch APC/CBF clocks to original source */
+		writel_relaxed(APCS_GFMUXA_DESEL_VAL,
+			       ctrl_dev->apcs_csr_base + APCS_SPARE);
+		ndelay(200);
+		writel_relaxed(MSM_APM_SEC_CLK_SEL_VAL,
+			       ctrl_dev->apc0_pll_ctl_addr);
+		ndelay(200);
+		writel_relaxed(MSM_APM_SEC_CLK_SEL_VAL,
+			       ctrl_dev->apc1_pll_ctl_addr);
+
+		/* Complete clock source switch before SPM event sequence */
+		mb();
+
+		/* Set SPM events */
+		for (i = 0; i < SPM_EVENT_NUM; i++)
+			writel_relaxed(SPM_EVENT_SET_VAL,
+				       ctrl_dev->apcs_spm_events_addr[i]);
+	}
+
+	/*
+	 * Ensure that HMSS v1.0/v1.1 register writes are completed before
+	 * bailing out in the case of a switching time out.
+	 */
+	if (ret)
+		goto done;
+
+	ret = msm_apm_secure_clock_source_override(ctrl_dev, false);
+	if (ret)
+		goto done;
+
+	ctrl_dev->supply = MSM_APM_SUPPLY_MX;
+	dev_dbg(ctrl_dev->dev, "APM supply switched to MX\n");
+
+done:
+	spin_unlock_irqrestore(&ctrl_dev->lock, flags);
+	mutex_unlock(&scm_lmh_lock);
+
+	return ret;
+}
+
+static int msm8996_apm_switch_to_apcc(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+	unsigned long flags;
+	int i, ret;
+
+	mutex_lock(&scm_lmh_lock);
+	spin_lock_irqsave(&ctrl_dev->lock, flags);
+
+	ret = msm_apm_secure_clock_source_override(ctrl_dev, true);
+	if (ret)
+		goto done;
+
+	/* Perform revision-specific programming steps */
+	if (ctrl_dev->version < HMSS_VERSION_1P2) {
+		/* Clear SPM events */
+		for (i = 0; i < SPM_EVENT_NUM; i++)
+			writel_relaxed(SPM_EVENT_CLEAR_VAL,
+				       ctrl_dev->apcs_spm_events_addr[i]);
+
+		udelay(SPM_WAKEUP_DELAY_US);
+
+		/* Switch APC/CBF to GPLL0 clock */
+		writel_relaxed(APCS_GFMUXA_SEL_VAL,
+			       ctrl_dev->apcs_csr_base + APCS_SPARE);
+		ndelay(200);
+		writel_relaxed(MSM_APM_OVERRIDE_SEL_VAL,
+			       ctrl_dev->apc0_pll_ctl_addr);
+		ndelay(200);
+		writel_relaxed(MSM_APM_OVERRIDE_SEL_VAL,
+			       ctrl_dev->apc1_pll_ctl_addr);
+
+		/* Ensure previous writes complete before proceeding */
+		mb();
+	}
+
+	/* Switch arrays to APCC supply and wait for its completion */
+	writel_relaxed(MSM_APM_APCC_MODE_VAL, ctrl_dev->reg_base +
+		       APCC_APM_MODE);
+
+	/* Ensure write above completes before delaying */
+	mb();
+
+	ret = msm8996_apm_wait_for_switch(ctrl_dev, MSM_APM_APCC_DONE_VAL);
+
+	/* Perform revision-specific programming steps */
+	if (ctrl_dev->version < HMSS_VERSION_1P2) {
+		/* Set SPM events */
+		for (i = 0; i < SPM_EVENT_NUM; i++)
+			writel_relaxed(SPM_EVENT_SET_VAL,
+				       ctrl_dev->apcs_spm_events_addr[i]);
+
+		/* Complete SPM event sequence before clock source switch */
+		mb();
+
+		/* Switch APC/CBF clocks to original source */
+		writel_relaxed(APCS_GFMUXA_DESEL_VAL,
+			       ctrl_dev->apcs_csr_base + APCS_SPARE);
+		ndelay(200);
+		writel_relaxed(MSM_APM_SEC_CLK_SEL_VAL,
+			       ctrl_dev->apc0_pll_ctl_addr);
+		ndelay(200);
+		writel_relaxed(MSM_APM_SEC_CLK_SEL_VAL,
+			       ctrl_dev->apc1_pll_ctl_addr);
+	}
+
+	/*
+	 * Ensure that HMSS v1.0/v1.1 register writes are completed before
+	 * bailing out in the case of a switching time out.
+	 */
+	if (ret)
+		goto done;
+
+	ret = msm_apm_secure_clock_source_override(ctrl_dev, false);
+	if (ret)
+		goto done;
+
+	ctrl_dev->supply = MSM_APM_SUPPLY_APCC;
+	dev_dbg(ctrl_dev->dev, "APM supply switched to APCC\n");
+
+done:
+	spin_unlock_irqrestore(&ctrl_dev->lock, flags);
+	mutex_unlock(&scm_lmh_lock);
+
+	return ret;
+}
+
+static int msm8996pro_apm_switch_to_mx(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&ctrl_dev->lock, flags);
+
+	/* Switch arrays to MX supply and wait for its completion */
+	writel_relaxed(MSM_APM_MX_MODE_VAL, ctrl_dev->reg_base +
+		       APCC_APM_MODE);
+
+	/* Ensure write above completes before delaying */
+	mb();
+
+	ret = msm8996_apm_wait_for_switch(ctrl_dev, MSM_APM_MX_DONE_VAL);
+	if (ret)
+		goto done;
+
+	ctrl_dev->supply = MSM_APM_SUPPLY_MX;
+	dev_dbg(ctrl_dev->dev, "APM supply switched to MX\n");
+
+done:
+	spin_unlock_irqrestore(&ctrl_dev->lock, flags);
+
+	return ret;
+}
+
+static int msm8996pro_apm_switch_to_apcc(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&ctrl_dev->lock, flags);
+
+	/* Switch arrays to APCC supply and wait for its completion */
+	writel_relaxed(MSM_APM_APCC_MODE_VAL, ctrl_dev->reg_base +
+		       APCC_APM_MODE);
+
+	/* Ensure write above completes before delaying */
+	mb();
+
+	ret = msm8996_apm_wait_for_switch(ctrl_dev, MSM_APM_APCC_DONE_VAL);
+	if (ret)
+		goto done;
+
+	ctrl_dev->supply = MSM_APM_SUPPLY_APCC;
+	dev_dbg(ctrl_dev->dev, "APM supply switched to APCC\n");
+
+done:
+	spin_unlock_irqrestore(&ctrl_dev->lock, flags);
+
+	return ret;
+}
+
+/* MSM8953 register value definitions */
+#define MSM8953_APM_MX_MODE_VAL            0x00
+#define MSM8953_APM_APCC_MODE_VAL          0x02
+#define MSM8953_APM_MX_DONE_VAL            0x00
+#define MSM8953_APM_APCC_DONE_VAL          0x03
+
+/* MSM8953 register offset definitions */
+#define MSM8953_APCC_APM_MODE              0x000002a8
+#define MSM8953_APCC_APM_CTL_STS           0x000002b0
+
+/* 8953 constants */
+#define MSM8953_APM_SWITCH_TIMEOUT_US      500
+
+/* Register bit mask definitions */
+#define MSM8953_APM_CTL_STS_MASK           0x1f
+
+static int msm8953_apm_switch_to_mx(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+	int timeout = MSM8953_APM_SWITCH_TIMEOUT_US;
+	u32 regval;
+	int ret = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctrl_dev->lock, flags);
+
+	/* Switch arrays to MX supply and wait for its completion */
+	writel_relaxed(MSM8953_APM_MX_MODE_VAL, ctrl_dev->reg_base +
+		       MSM8953_APCC_APM_MODE);
+
+	/* Ensure write above completes before delaying */
+	mb();
+
+	while (timeout > 0) {
+		regval = readl_relaxed(ctrl_dev->reg_base +
+					MSM8953_APCC_APM_CTL_STS);
+		if ((regval & MSM8953_APM_CTL_STS_MASK) ==
+				MSM8953_APM_MX_DONE_VAL)
+			break;
+
+		udelay(1);
+		timeout--;
+	}
+
+	if (timeout == 0) {
+		ret = -ETIMEDOUT;
+		dev_err(ctrl_dev->dev, "APCC to MX APM switch timed out. APCC_APM_CTL_STS=0x%x\n",
+			regval);
+	} else {
+		ctrl_dev->supply = MSM_APM_SUPPLY_MX;
+		dev_dbg(ctrl_dev->dev, "APM supply switched to MX\n");
+	}
+
+	spin_unlock_irqrestore(&ctrl_dev->lock, flags);
+
+	return ret;
+}
+
+static int msm8953_apm_switch_to_apcc(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+	int timeout = MSM8953_APM_SWITCH_TIMEOUT_US;
+	u32 regval;
+	int ret = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctrl_dev->lock, flags);
+
+	/* Switch arrays to APCC supply and wait for its completion */
+	writel_relaxed(MSM8953_APM_APCC_MODE_VAL, ctrl_dev->reg_base +
+		       MSM8953_APCC_APM_MODE);
+
+	/* Ensure write above completes before delaying */
+	mb();
+
+	while (timeout > 0) {
+		regval = readl_relaxed(ctrl_dev->reg_base +
+					MSM8953_APCC_APM_CTL_STS);
+		if ((regval & MSM8953_APM_CTL_STS_MASK) ==
+				MSM8953_APM_APCC_DONE_VAL)
+			break;
+
+		udelay(1);
+		timeout--;
+	}
+
+	if (timeout == 0) {
+		ret = -ETIMEDOUT;
+		dev_err(ctrl_dev->dev, "MX to APCC APM switch timed out. APCC_APM_CTL_STS=0x%x\n",
+			regval);
+	} else {
+		ctrl_dev->supply = MSM_APM_SUPPLY_APCC;
+		dev_dbg(ctrl_dev->dev, "APM supply switched to APCC\n");
+	}
+
+	spin_unlock_irqrestore(&ctrl_dev->lock, flags);
+
+	return ret;
+}
+
+static int msm_apm_switch_to_mx(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+	int ret = 0;
+
+	switch (ctrl_dev->msm_id) {
+	case MSM8996_ID:
+		ret = msm8996_apm_switch_to_mx(ctrl_dev);
+		break;
+	case MSM8996PRO_ID:
+		ret = msm8996pro_apm_switch_to_mx(ctrl_dev);
+		break;
+	case MSM8953_ID:
+		ret = msm8953_apm_switch_to_mx(ctrl_dev);
+		break;
+	}
+
+	return ret;
+}
+
+static int msm_apm_switch_to_apcc(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+	int ret = 0;
+
+	switch (ctrl_dev->msm_id) {
+	case MSM8996_ID:
+		ret = msm8996_apm_switch_to_apcc(ctrl_dev);
+		break;
+	case MSM8996PRO_ID:
+		ret = msm8996pro_apm_switch_to_apcc(ctrl_dev);
+		break;
+	case MSM8953_ID:
+		ret = msm8953_apm_switch_to_apcc(ctrl_dev);
+		break;
+	}
+
+	return ret;
+}
+
+/**
+ * msm_apm_get_supply() - Returns the supply that is currently
+ *			powering the memory arrays
+ * @ctrl_dev:                   Pointer to an MSM APM controller device
+ *
+ * Returns the supply currently selected by the APM.
+ */
+int msm_apm_get_supply(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+	return ctrl_dev->supply;
+}
+EXPORT_SYMBOL(msm_apm_get_supply);
+
+/**
+ * msm_apm_set_supply() - Perform the necessary steps to switch the voltage
+ *                        source of the memory arrays to a given supply
+ * @ctrl_dev:                   Pointer to an MSM APM controller device
+ * @supply:                     Power rail to use as supply for the memory
+ *                              arrays
+ *
+ * Returns 0 on success, -ETIMEDOUT on APM switch timeout, or -EPERM if
+ * the supply is not supported.
+ */
+int msm_apm_set_supply(struct msm_apm_ctrl_dev *ctrl_dev,
+		       enum msm_apm_supply supply)
+{
+	int ret;
+
+	switch (supply) {
+	case MSM_APM_SUPPLY_APCC:
+		ret = msm_apm_switch_to_apcc(ctrl_dev);
+		break;
+	case MSM_APM_SUPPLY_MX:
+		ret = msm_apm_switch_to_mx(ctrl_dev);
+		break;
+	default:
+		ret = -EPERM;
+		break;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_apm_set_supply);
+
+/**
+ * msm_apm_ctrl_dev_get() - get a handle to the MSM APM controller linked to
+ *                          the device in device tree
+ * @dev:                    Pointer to the device
+ *
+ * The device must specify "qcom,apm-ctrl" property in its device tree
+ * node which points to an MSM APM controller device node.
+ *
+ * Returns an MSM APM controller handle if successful or ERR_PTR on any error.
+ * If the APM controller device hasn't probed yet, ERR_PTR(-EPROBE_DEFER) is
+ * returned.
+ */
+struct msm_apm_ctrl_dev *msm_apm_ctrl_dev_get(struct device *dev)
+{
+	struct msm_apm_ctrl_dev *ctrl_dev = NULL;
+	struct msm_apm_ctrl_dev *dev_found = ERR_PTR(-EPROBE_DEFER);
+	struct device_node *ctrl_node;
+
+	if (!dev || !dev->of_node) {
+		pr_err("Invalid device node\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	ctrl_node = of_parse_phandle(dev->of_node, "qcom,apm-ctrl", 0);
+	if (!ctrl_node) {
+		pr_err("Could not find qcom,apm-ctrl property in %s\n",
+		       dev->of_node->full_name);
+		return ERR_PTR(-ENXIO);
+	}
+
+	mutex_lock(&apm_ctrl_list_mutex);
+	list_for_each_entry(ctrl_dev, &apm_ctrl_list, list) {
+		if (ctrl_dev->dev && ctrl_dev->dev->of_node == ctrl_node) {
+			dev_found = ctrl_dev;
+			break;
+		}
+	}
+	mutex_unlock(&apm_ctrl_list_mutex);
+
+	of_node_put(ctrl_node);
+	return dev_found;
+}
+EXPORT_SYMBOL(msm_apm_ctrl_dev_get);
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int apm_supply_dbg_open(struct inode *inode, struct file *filep)
+{
+	filep->private_data = inode->i_private;
+
+	return 0;
+}
+
+static ssize_t apm_supply_dbg_read(struct file *filep, char __user *ubuf,
+				   size_t count, loff_t *ppos)
+{
+	struct msm_apm_ctrl_dev *ctrl_dev = filep->private_data;
+	char buf[10];
+	int len;
+
+	if (!ctrl_dev) {
+		pr_err("invalid apm ctrl handle\n");
+		return -ENODEV;
+	}
+
+	if (ctrl_dev->supply == MSM_APM_SUPPLY_APCC)
+		len = snprintf(buf, sizeof(buf), "APCC\n");
+	else if (ctrl_dev->supply == MSM_APM_SUPPLY_MX)
+		len = snprintf(buf, sizeof(buf), "MX\n");
+	else
+		len = snprintf(buf, sizeof(buf), "ERR\n");
+
+	return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations apm_supply_fops = {
+	.open = apm_supply_dbg_open,
+	.read = apm_supply_dbg_read,
+};
+
+static void apm_debugfs_base_init(void)
+{
+	apm_debugfs_base = debugfs_create_dir("msm-apm", NULL);
+
+	if (IS_ERR_OR_NULL(apm_debugfs_base))
+		pr_err("msm-apm debugfs base directory creation failed\n");
+}
+
+static void apm_debugfs_init(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+	struct dentry *temp;
+
+	if (IS_ERR_OR_NULL(apm_debugfs_base)) {
+		pr_err("Base directory missing, cannot create apm debugfs nodes\n");
+		return;
+	}
+
+	ctrl_dev->debugfs = debugfs_create_dir(dev_name(ctrl_dev->dev),
+					       apm_debugfs_base);
+	if (IS_ERR_OR_NULL(ctrl_dev->debugfs)) {
+		pr_err("%s debugfs directory creation failed\n",
+		       dev_name(ctrl_dev->dev));
+		return;
+	}
+
+	temp = debugfs_create_file("supply", 0444, ctrl_dev->debugfs,
+				   ctrl_dev, &apm_supply_fops);
+	if (IS_ERR_OR_NULL(temp)) {
+		pr_err("supply mode creation failed\n");
+		return;
+	}
+}
+
+static void apm_debugfs_deinit(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+	if (!IS_ERR_OR_NULL(ctrl_dev->debugfs))
+		debugfs_remove_recursive(ctrl_dev->debugfs);
+}
+
+static void apm_debugfs_base_remove(void)
+{
+	debugfs_remove_recursive(apm_debugfs_base);
+}
+#else
+
+static void apm_debugfs_base_init(void)
+{}
+
+static void apm_debugfs_init(struct msm_apm_ctrl_dev *ctrl_dev)
+{}
+
+static void apm_debugfs_deinit(struct msm_apm_ctrl_dev *ctrl_dev)
+{}
+
+static void apm_debugfs_base_remove(void)
+{}
+
+#endif
+
+static const struct of_device_id msm_apm_match_table[] = {
+	{
+		.compatible = "qcom,msm-apm",
+		.data = (void *)(uintptr_t)MSM8996_ID,
+	},
+	{
+		.compatible = "qcom,msm8996pro-apm",
+		.data = (void *)(uintptr_t)MSM8996PRO_ID,
+	},
+	{
+		.compatible = "qcom,msm8953-apm",
+		.data = (void *)(uintptr_t)MSM8953_ID,
+	},
+	{}
+};
+
+static int msm_apm_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct msm_apm_ctrl_dev *ctrl;
+	const struct of_device_id *match;
+	int ret = 0;
+
+	dev_dbg(dev, "probing MSM Array Power Mux driver\n");
+
+	if (!dev->of_node) {
+		dev_err(dev, "Device tree node is missing\n");
+		return -ENODEV;
+	}
+
+	match = of_match_device(msm_apm_match_table, dev);
+	if (!match)
+		return -ENODEV;
+
+	ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+	if (!ctrl)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&ctrl->list);
+	spin_lock_init(&ctrl->lock);
+	ctrl->dev = dev;
+	ctrl->msm_id = (uintptr_t)match->data;
+	platform_set_drvdata(pdev, ctrl);
+
+	switch (ctrl->msm_id) {
+	case MSM8996_ID:
+	case MSM8996PRO_ID:
+		ret = msm_apm_ctrl_devm_ioremap(pdev, ctrl);
+		if (ret) {
+			dev_err(dev, "Failed to add APM controller device\n");
+			return ret;
+		}
+		break;
+	case MSM8953_ID:
+		ret = msm8953_apm_ctrl_init(pdev, ctrl);
+		if (ret) {
+			dev_err(dev, "Failed to initialize APM controller device: ret=%d\n",
+				ret);
+			return ret;
+		}
+		break;
+	default:
+		dev_err(dev, "unable to add APM controller device for msm_id:%d\n",
+			ctrl->msm_id);
+		return -ENODEV;
+	}
+
+	apm_debugfs_init(ctrl);
+	mutex_lock(&apm_ctrl_list_mutex);
+	list_add_tail(&ctrl->list, &apm_ctrl_list);
+	mutex_unlock(&apm_ctrl_list_mutex);
+
+	dev_dbg(dev, "MSM Array Power Mux driver probe successful");
+
+	return ret;
+}
+
+static int msm_apm_remove(struct platform_device *pdev)
+{
+	struct msm_apm_ctrl_dev *ctrl_dev;
+
+	ctrl_dev = platform_get_drvdata(pdev);
+	if (ctrl_dev) {
+		mutex_lock(&apm_ctrl_list_mutex);
+		list_del(&ctrl_dev->list);
+		mutex_unlock(&apm_ctrl_list_mutex);
+		apm_debugfs_deinit(ctrl_dev);
+	}
+
+	return 0;
+}
+
+static struct platform_driver msm_apm_driver = {
+	.driver		= {
+		.name		= MSM_APM_DRIVER_NAME,
+		.of_match_table	= msm_apm_match_table,
+		.owner		= THIS_MODULE,
+	},
+	.probe		= msm_apm_probe,
+	.remove		= msm_apm_remove,
+};
+
+static int __init msm_apm_init(void)
+{
+	apm_debugfs_base_init();
+	return platform_driver_register(&msm_apm_driver);
+}
+
+static void __exit msm_apm_exit(void)
+{
+	platform_driver_unregister(&msm_apm_driver);
+	apm_debugfs_base_remove();
+}
+
+arch_initcall(msm_apm_init);
+module_exit(msm_apm_exit);
+
+MODULE_DESCRIPTION("MSM Array Power Mux driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/drivers/power/qcom./debug_core.c linux-4.4.115-fbx/drivers/power/qcom/debug_core.c
--- linux-4.4.115-fbx/drivers/power/qcom./debug_core.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/qcom/debug_core.c	2019-01-22 16:16:26.223271038 +0100
@@ -0,0 +1,330 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/string.h>
+#include <linux/debugfs.h>
+#include <linux/ctype.h>
+#include <linux/cpu.h>
+#include "soc/qcom/msm-core.h"
+
+#define MAX_PSTATES 50
+#define NUM_OF_PENTRY 3 /* number of variables for ptable node */
+#define NUM_OF_EENTRY 2 /* number of variables for enable node */
+
+enum arg_offset {
+	CPU_OFFSET,
+	FREQ_OFFSET,
+	POWER_OFFSET,
+};
+
+struct core_debug {
+	int cpu;
+	struct cpu_pstate_pwr *head;
+	int enabled;
+	int len;
+	struct cpu_pwr_stats *ptr;
+	struct cpu_pstate_pwr *driver_data;
+	int driver_len;
+};
+
+static DEFINE_PER_CPU(struct core_debug, c_dgfs);
+static struct cpu_pwr_stats *msm_core_data;
+static struct debugfs_blob_wrapper help_msg = {
+	.data =
+"MSM CORE Debug-FS Support\n"
+"\n"
+"Hierarchy schema\n"
+"/sys/kernel/debug/msm_core\n"
+"  /help        - Static help text\n"
+"  /ptable      - write to p-state table\n"
+"  /enable      - enable the written p-state table\n"
+"  /ptable_dump - Dump the debug ptable\n"
+"\n"
+"Usage\n"
+" Input test frequency and power information in ptable:\n"
+" echo \"0 300000 120\" > ptable\n"
+" format: <cpu> <frequency in khz> <power>\n"
+"\n"
+" Enable the ptable for the cpu:\n"
+" echo \"0 1\" > enable\n"
+" format: <cpu> <1 to enable, 0 to disable>\n"
+" Note: Writing 0 to disable will reset/clear the ptable\n"
+"\n"
+" Dump the entire ptable:\n"
+" cat ptable\n"
+" -----  CPU0 - Enabled ---------\n"
+"     Freq       Power\n"
+"     700000       120\n"
+"-----  CPU0 - Live numbers -----\n"
+"   Freq       Power\n"
+"   300000      218\n"
+" -----  CPU1 - Written ---------\n"
+"     Freq       Power\n"
+"     700000       120\n"
+" Ptable dump will dump the status of the table as well\n"
+" It shows:\n"
+" Enabled -> for a cpu that debug ptable enabled\n"
+" Written -> for a cpu that has debug ptable values written\n"
+"            but not enabled\n"
+"\n",
+
+};
+
+static void add_to_ptable(unsigned int *arg)
+{
+	struct core_debug *node;
+	int i, cpu = arg[CPU_OFFSET];
+	uint32_t freq = arg[FREQ_OFFSET];
+	uint32_t power = arg[POWER_OFFSET];
+
+	if (!cpu_possible(cpu))
+		return;
+
+	if ((freq == 0) || (power == 0)) {
+		pr_warn("Incorrect power data\n");
+		return;
+	}
+
+	node = &per_cpu(c_dgfs, cpu);
+
+	if (node->len >= MAX_PSTATES) {
+		pr_warn("Dropped ptable update - no space left.\n");
+		return;
+	}
+
+	if (!node->head) {
+		node->head = kzalloc(sizeof(struct cpu_pstate_pwr) *
+				     (MAX_PSTATES + 1),
+					GFP_KERNEL);
+		if (!node->head)
+			return;
+	}
+
+	for (i = 0; i < node->len; i++) {
+		if (node->head[i].freq == freq) {
+			node->head[i].power = power;
+			return;
+		}
+	}
+
+	/* Insert a new frequency (may need to move things around to
+	   keep in ascending order). */
+	for (i = MAX_PSTATES - 1; i > 0; i--) {
+		if (node->head[i-1].freq > freq) {
+			node->head[i].freq = node->head[i-1].freq;
+			node->head[i].power = node->head[i-1].power;
+		} else if (node->head[i-1].freq != 0) {
+			break;
+		}
+	}
+
+	if (node->len < MAX_PSTATES) {
+		node->head[i].freq = freq;
+		node->head[i].power = power;
+		node->len++;
+	}
+
+	if (node->ptr)
+		node->ptr->len = node->len;
+}
+
+static int split_ptable_args(char *line, unsigned int *arg, uint32_t n)
+{
+	char *args;
+	int i;
+	int ret = 0;
+
+	for (i = 0; i < n; i++) {
+		if (!line)
+			break;
+		args = strsep(&line, " ");
+		ret = kstrtouint(args, 10, &arg[i]);
+		if (ret)
+			return ret;
+	}
+	return ret;
+}
+
+static ssize_t msm_core_ptable_write(struct file *file,
+		const char __user *ubuf, size_t len, loff_t *offp)
+{
+	char *kbuf;
+	int ret;
+	unsigned int arg[3];
+
+	if (len == 0)
+		return 0;
+
+	kbuf = kzalloc(len + 1, GFP_KERNEL);
+	if (!kbuf)
+		return -ENOMEM;
+
+	if (copy_from_user(kbuf, ubuf, len)) {
+		ret = -EFAULT;
+		goto done;
+	}
+	kbuf[len] = '\0';
+	ret = split_ptable_args(kbuf, arg, NUM_OF_PENTRY);
+	if (!ret) {
+		add_to_ptable(arg);
+		ret = len;
+	}
+done:
+	kfree(kbuf);
+	return ret;
+}
+
+static void print_table(struct seq_file *m, struct cpu_pstate_pwr *c_n,
+		int len)
+{
+	int i;
+
+	seq_puts(m, "   Freq       Power\n");
+	for (i = 0; i < len; i++)
+		seq_printf(m, "  %d       %u\n", c_n[i].freq,
+				c_n[i].power);
+
+}
+
+static int msm_core_ptable_read(struct seq_file *m, void *data)
+{
+	int cpu;
+	struct core_debug *node;
+
+	for_each_possible_cpu(cpu) {
+		node = &per_cpu(c_dgfs, cpu);
+		if (node->head) {
+			seq_printf(m, "-----  CPU%d - %s - Debug -------\n",
+			cpu, node->enabled == 1 ? "Enabled" : "Written");
+			print_table(m, node->head, node->len);
+		}
+		if (msm_core_data[cpu].ptable) {
+			seq_printf(m, "--- CPU%d - Live numbers at %ldC---\n",
+			cpu, node->ptr->temp);
+			print_table(m, msm_core_data[cpu].ptable,
+					node->driver_len);
+		}
+	}
+	return 0;
+}
+
+static ssize_t msm_core_enable_write(struct file *file,
+		const char __user *ubuf, size_t len, loff_t *offp)
+{
+	char *kbuf;
+	int ret;
+	unsigned int arg[3];
+	int cpu;
+
+	if (len == 0)
+		return 0;
+
+	kbuf = kzalloc(len + 1, GFP_KERNEL);
+	if (!kbuf)
+		return -ENOMEM;
+
+	if (copy_from_user(kbuf, ubuf, len)) {
+		ret = -EFAULT;
+		goto done;
+	}
+	kbuf[len] = '\0';
+	ret = split_ptable_args(kbuf, arg, NUM_OF_EENTRY);
+	if (ret)
+		goto done;
+	cpu = arg[CPU_OFFSET];
+
+	if (cpu_possible(cpu)) {
+		struct core_debug *node = &per_cpu(c_dgfs, cpu);
+
+		if (arg[FREQ_OFFSET]) {
+			msm_core_data[cpu].ptable = node->head;
+			msm_core_data[cpu].len = node->len;
+		} else {
+			msm_core_data[cpu].ptable = node->driver_data;
+			msm_core_data[cpu].len = node->driver_len;
+			node->len = 0;
+		}
+		node->enabled = arg[FREQ_OFFSET];
+	}
+	ret = len;
+	blocking_notifier_call_chain(
+			get_power_update_notifier(), cpu, NULL);
+
+done:
+	kfree(kbuf);
+	return ret;
+}
+
+static const struct file_operations msm_core_enable_ops = {
+	.write = msm_core_enable_write,
+};
+
+static int msm_core_dump_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, msm_core_ptable_read, inode->i_private);
+}
+
+static const struct file_operations msm_core_ptable_ops = {
+	.open = msm_core_dump_open,
+	.read = seq_read,
+	.write = msm_core_ptable_write,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+int msm_core_debug_init(void)
+{
+	struct dentry *dir = NULL;
+	struct dentry *file = NULL;
+	int i;
+
+	msm_core_data = get_cpu_pwr_stats();
+	if (!msm_core_data)
+		goto fail;
+
+	dir = debugfs_create_dir("msm_core", NULL);
+	if (IS_ERR_OR_NULL(dir))
+		return PTR_ERR(dir);
+
+	file = debugfs_create_file("enable",
+			S_IRUSR|S_IRGRP|S_IWUSR|S_IWGRP, dir, NULL,
+			&msm_core_enable_ops);
+	if (IS_ERR_OR_NULL(file))
+		goto fail;
+
+	file = debugfs_create_file("ptable",
+			S_IRUSR|S_IRGRP|S_IWUSR|S_IWGRP, dir, NULL,
+			&msm_core_ptable_ops);
+	if (IS_ERR_OR_NULL(file))
+		goto fail;
+
+	help_msg.size = strlen(help_msg.data);
+	file = debugfs_create_blob("help", S_IRUGO, dir, &help_msg);
+	if (IS_ERR_OR_NULL(file))
+		goto fail;
+
+	for (i = 0; i < num_possible_cpus(); i++) {
+		per_cpu(c_dgfs, i).ptr = &msm_core_data[i];
+		per_cpu(c_dgfs, i).driver_data = msm_core_data[i].ptable;
+		per_cpu(c_dgfs, i).driver_len = msm_core_data[i].len;
+	}
+	return 0;
+fail:
+	debugfs_remove(dir);
+	return PTR_ERR(file);
+}
+late_initcall(msm_core_debug_init);
diff -Nruw linux-4.4.115-fbx/drivers/power/qcom./Kconfig linux-4.4.115-fbx/drivers/power/qcom/Kconfig
--- linux-4.4.115-fbx/drivers/power/qcom./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/qcom/Kconfig	2019-01-22 16:16:26.223271038 +0100
@@ -0,0 +1,66 @@
+config MSM_PM
+	depends on PM
+	select MSM_IDLE_STATS if DEBUG_FS
+	select CPU_IDLE_MULTIPLE_DRIVERS
+	bool "Qualcomm platform specific PM driver"
+	help
+	  Platform specific power driver to manage cores and l2
+	  low power modes. It interface with various system
+	  driver and put the cores into low power modes.
+
+config MSM_NOPM
+	default y if !PM
+	bool
+	help
+	  This enables bare minimum support of power management at platform level.
+	  i.e WFI
+
+config APSS_CORE_EA
+	depends on CPU_FREQ && PM_OPP
+	bool "Qualcomm Technology Inc specific power aware driver"
+	help
+	  Platform specific power aware driver to provide power
+	  and temperature information to the scheduler.
+
+config MSM_APM
+	bool "Qualcomm Technologies, Inc. platform specific APM driver"
+	help
+	  Platform specific driver to manage the power source of
+	  memory arrays. Interfaces with regulator drivers to ensure
+	  SRAM Vmin requirements are met across different performance
+	  levels.
+
+if MSM_PM
+menuconfig MSM_IDLE_STATS
+	bool "Collect idle statistics"
+	help
+	  Collect cores various low power mode idle statistics
+	  and export them in proc/msm_pm_stats. User can read
+	  this data and determine what low power modes and how
+	  many times cores have entered into LPM modes.
+
+if MSM_IDLE_STATS
+
+config MSM_IDLE_STATS_FIRST_BUCKET
+	int "First bucket time"
+	default 62500
+	help
+	  Upper time limit in nanoseconds of first bucket.
+
+config MSM_IDLE_STATS_BUCKET_SHIFT
+	int "Bucket shift"
+	default 2
+
+config MSM_IDLE_STATS_BUCKET_COUNT
+	int "Bucket count"
+	default 10
+
+config MSM_SUSPEND_STATS_FIRST_BUCKET
+	int "First bucket time for suspend"
+	default 1000000000
+	help
+	  Upper time limit in nanoseconds of first bucket of the
+	  histogram.  This is for collecting statistics on suspend.
+
+endif # MSM_IDLE_STATS
+endif # MSM_PM
diff -Nruw linux-4.4.115-fbx/drivers/power/qcom./lpm-stats.c linux-4.4.115-fbx/drivers/power/qcom/lpm-stats.c
--- linux-4.4.115-fbx/drivers/power/qcom./lpm-stats.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/qcom/lpm-stats.c	2019-10-29 09:26:24.629212827 +0100
@@ -0,0 +1,874 @@
+/* Copyright (c) 2012-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <soc/qcom/spm.h>
+#include <soc/qcom/pm.h>
+#include <soc/qcom/lpm-stats.h>
+
+#define MAX_STR_LEN 256
+#define MAX_TIME_LEN 20
+const char *lpm_stats_reset = "reset";
+const char *lpm_stats_suspend = "suspend";
+
+struct lpm_sleep_time {
+	struct kobj_attribute ts_attr;
+	unsigned int cpu;
+};
+
+struct level_stats {
+	const char *name;
+	struct lpm_stats *owner;
+	int64_t first_bucket_time;
+	int bucket[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
+	int64_t min_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
+	int64_t max_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
+	int success_count;
+	int failed_count;
+	int64_t total_time;
+	uint64_t enter_time;
+};
+
+static struct level_stats suspend_time_stats;
+
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct lpm_stats, cpu_stats);
+
+static uint64_t get_total_sleep_time(unsigned int cpu_id)
+{
+	struct lpm_stats *stats = &per_cpu(cpu_stats, cpu_id);
+	int i;
+	uint64_t ret = 0;
+
+	for (i = 0; i < stats->num_levels; i++)
+		ret += stats->time_stats[i].total_time;
+
+	return ret;
+}
+
+static void update_level_stats(struct level_stats *stats, uint64_t t,
+				bool success)
+{
+	uint64_t bt;
+	int i;
+
+	if (!success) {
+		stats->failed_count++;
+		return;
+	}
+
+	stats->success_count++;
+	stats->total_time += t;
+	bt = t;
+	do_div(bt, stats->first_bucket_time);
+
+	if (bt < 1ULL << (CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT *
+			(CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1)))
+		i = DIV_ROUND_UP(fls((uint32_t)bt),
+			CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT);
+	else
+		i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
+
+	if (i >= CONFIG_MSM_IDLE_STATS_BUCKET_COUNT)
+		i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
+
+	stats->bucket[i]++;
+
+	if (t < stats->min_time[i] || !stats->max_time[i])
+		stats->min_time[i] = t;
+	if (t > stats->max_time[i])
+		stats->max_time[i] = t;
+	return;
+}
+
+static void level_stats_print(struct seq_file *m, struct level_stats *stats)
+{
+	int i = 0;
+	int64_t bucket_time = 0;
+	char seqs[MAX_STR_LEN] = {0};
+	int64_t s = stats->total_time;
+	uint32_t ns = do_div(s, NSEC_PER_SEC);
+
+	snprintf(seqs, MAX_STR_LEN,
+		"[%s] %s:\n"
+		"  success count: %7d\n"
+		"  total success time: %lld.%09u\n",
+		stats->owner->name,
+		stats->name,
+		stats->success_count,
+		s, ns);
+	seq_puts(m, seqs);
+
+	if (stats->failed_count) {
+		snprintf(seqs, MAX_STR_LEN, "  failed count: %7d\n",
+			stats->failed_count);
+		seq_puts(m, seqs);
+	}
+
+	bucket_time = stats->first_bucket_time;
+	for (i = 0;
+		i < CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
+		i++) {
+		s = bucket_time;
+		ns = do_div(s, NSEC_PER_SEC);
+		snprintf(seqs, MAX_STR_LEN,
+			"\t<%6lld.%09u: %7d (%lld-%lld)\n",
+			s, ns, stats->bucket[i],
+				stats->min_time[i],
+				stats->max_time[i]);
+		seq_puts(m, seqs);
+		bucket_time <<= CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT;
+	}
+	snprintf(seqs, MAX_STR_LEN,
+		"\t>=%5lld.%09u:%8d (%lld-%lld)\n",
+		s, ns, stats->bucket[i],
+		stats->min_time[i],
+		stats->max_time[i]);
+	seq_puts(m, seqs);
+}
+
+static int level_stats_file_show(struct seq_file *m, void *v)
+{
+	struct level_stats *stats = NULL;
+
+	if (!m->private)
+		return -EINVAL;
+
+	stats = (struct level_stats *) m->private;
+
+	level_stats_print(m, stats);
+
+	return 0;
+}
+
+static int level_stats_file_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, level_stats_file_show, inode->i_private);
+}
+
+static void level_stats_print_all(struct seq_file *m, struct lpm_stats *stats)
+{
+	struct list_head *centry = NULL;
+	struct lpm_stats *pos = NULL;
+	int i = 0;
+
+	for (i = 0; i < stats->num_levels; i++)
+		level_stats_print(m, &stats->time_stats[i]);
+
+	if (list_empty(&stats->child))
+		return;
+
+	centry = &stats->child;
+	list_for_each_entry(pos, centry, sibling) {
+		level_stats_print_all(m, pos);
+	}
+}
+
+static void level_stats_reset(struct level_stats *stats)
+{
+	memset(stats->bucket, 0, sizeof(stats->bucket));
+	memset(stats->min_time, 0, sizeof(stats->min_time));
+	memset(stats->max_time, 0, sizeof(stats->max_time));
+	stats->success_count = 0;
+	stats->failed_count = 0;
+	stats->total_time = 0;
+}
+
+static void level_stats_reset_all(struct lpm_stats *stats)
+{
+	struct list_head *centry = NULL;
+	struct lpm_stats *pos = NULL;
+	int i = 0;
+
+	for (i = 0; i < stats->num_levels; i++)
+		level_stats_reset(&stats->time_stats[i]);
+
+	if (list_empty(&stats->child))
+		return;
+
+	centry = &stats->child;
+	list_for_each_entry(pos, centry, sibling) {
+		level_stats_reset_all(pos);
+	}
+}
+
+static int lpm_stats_file_show(struct seq_file *m, void *v)
+{
+	struct lpm_stats *stats = (struct lpm_stats *)m->private;
+
+	if (!m->private) {
+		pr_err("%s: Invalid pdata, Cannot print stats\n", __func__);
+		return -EINVAL;
+	}
+
+	level_stats_print_all(m, stats);
+	level_stats_print(m, &suspend_time_stats);
+
+	return 0;
+}
+
+static int lpm_stats_file_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, lpm_stats_file_show, inode->i_private);
+}
+
+static ssize_t level_stats_file_write(struct file *file,
+	const char __user *buffer, size_t count, loff_t *off)
+{
+	char buf[MAX_STR_LEN] = {0};
+	struct inode *in = file->f_inode;
+	struct level_stats *stats = (struct level_stats *)in->i_private;
+	size_t len = strnlen(lpm_stats_reset, MAX_STR_LEN);
+
+	if (!stats)
+		return -EINVAL;
+
+	if (count != len+1)
+		return -EINVAL;
+
+	if (copy_from_user(buf, buffer, len))
+		return -EFAULT;
+
+	if (strcmp(buf, lpm_stats_reset))
+		return -EINVAL;
+
+	level_stats_reset(stats);
+
+	return count;
+}
+
+static ssize_t lpm_stats_file_write(struct file *file,
+	const char __user *buffer, size_t count, loff_t *off)
+{
+	char buf[MAX_STR_LEN] = {0};
+	struct inode *in = file->f_inode;
+	struct lpm_stats *stats = (struct lpm_stats *)in->i_private;
+	size_t len = strnlen(lpm_stats_reset, MAX_STR_LEN);
+
+	if (!stats)
+		return -EINVAL;
+
+	if (count != len+1)
+		return -EINVAL;
+
+	if (copy_from_user(buf, buffer, len))
+		return -EFAULT;
+
+	if (strcmp(buf, lpm_stats_reset))
+		return -EINVAL;
+
+	level_stats_reset_all(stats);
+
+	return count;
+}
+
+int lifo_stats_file_show(struct seq_file *m, void *v)
+{
+	struct lpm_stats *stats = NULL;
+	struct list_head *centry = NULL;
+	struct lpm_stats *pos = NULL;
+	char seqs[MAX_STR_LEN] = {0};
+
+	if (!m->private)
+		return -EINVAL;
+
+	stats = (struct lpm_stats *)m->private;
+
+	if (list_empty(&stats->child)) {
+		pr_err("%s: ERROR: Lifo level with no children.\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	centry = &stats->child;
+	list_for_each_entry(pos, centry, sibling) {
+		snprintf(seqs, MAX_STR_LEN,
+			"%s:\n"
+			"\tLast-In:%u\n"
+			"\tFirst-Out:%u\n",
+			pos->name,
+			pos->lifo.last_in,
+			pos->lifo.first_out);
+		seq_puts(m, seqs);
+	}
+	return 0;
+}
+
+static int lifo_stats_file_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, lifo_stats_file_show, inode->i_private);
+}
+
+static void lifo_stats_reset_all(struct lpm_stats *stats)
+{
+	struct list_head *centry = NULL;
+	struct lpm_stats *pos = NULL;
+
+	centry = &stats->child;
+	list_for_each_entry(pos, centry, sibling) {
+		pos->lifo.last_in = 0;
+		pos->lifo.first_out = 0;
+		if (!list_empty(&pos->child))
+			lifo_stats_reset_all(pos);
+	}
+}
+
+static ssize_t lifo_stats_file_write(struct file *file,
+	const char __user *buffer, size_t count, loff_t *off)
+{
+	char buf[MAX_STR_LEN] = {0};
+	struct inode *in = file->f_inode;
+	struct lpm_stats *stats = (struct lpm_stats *)in->i_private;
+	size_t len = strnlen(lpm_stats_reset, MAX_STR_LEN);
+
+	if (!stats)
+		return -EINVAL;
+
+	if (count != len+1)
+		return -EINVAL;
+
+	if (copy_from_user(buf, buffer, len))
+		return -EFAULT;
+
+	if (strcmp(buf, lpm_stats_reset))
+		return -EINVAL;
+
+	lifo_stats_reset_all(stats);
+
+	return count;
+}
+
+static const struct file_operations level_stats_fops = {
+	.owner	  = THIS_MODULE,
+	.open	  = level_stats_file_open,
+	.read	  = seq_read,
+	.release  = single_release,
+	.llseek   = no_llseek,
+	.write	  = level_stats_file_write,
+};
+
+static const struct file_operations lpm_stats_fops = {
+	.owner	  = THIS_MODULE,
+	.open	  = lpm_stats_file_open,
+	.read	  = seq_read,
+	.release  = single_release,
+	.llseek   = no_llseek,
+	.write	  = lpm_stats_file_write,
+};
+
+static const struct file_operations lifo_stats_fops = {
+	.owner	  = THIS_MODULE,
+	.open	  = lifo_stats_file_open,
+	.read	  = seq_read,
+	.release  = single_release,
+	.llseek   = no_llseek,
+	.write	  = lifo_stats_file_write,
+};
+
+static void update_last_in_stats(struct lpm_stats *stats)
+{
+	struct list_head *centry = NULL;
+	struct lpm_stats *pos = NULL;
+
+	if (list_empty(&stats->child))
+		return;
+
+	centry = &stats->child;
+	list_for_each_entry(pos, centry, sibling) {
+		if (cpumask_test_cpu(smp_processor_id(), &pos->mask)) {
+			pos->lifo.last_in++;
+			return;
+		}
+	}
+	WARN(1, "Should not reach here\n");
+}
+
+static void update_first_out_stats(struct lpm_stats *stats)
+{
+	struct list_head *centry = NULL;
+	struct lpm_stats *pos = NULL;
+
+	if (list_empty(&stats->child))
+		return;
+
+	centry = &stats->child;
+	list_for_each_entry(pos, centry, sibling) {
+		if (cpumask_test_cpu(smp_processor_id(), &pos->mask)) {
+			pos->lifo.first_out++;
+			return;
+		}
+	}
+	WARN(1, "Should not reach here\n");
+}
+
+static inline void update_exit_stats(struct lpm_stats *stats, uint32_t index,
+					bool success)
+{
+	uint64_t exit_time = 0;
+
+	/* Update time stats only when exit is preceded by enter */
+	exit_time = stats->sleep_time;
+	update_level_stats(&stats->time_stats[index], exit_time,
+					success);
+}
+
+static int config_level(const char *name, const char **levels,
+	int num_levels, struct lpm_stats *parent, struct lpm_stats *stats)
+{
+	int i = 0;
+	struct dentry *directory = NULL;
+	const char *rootname = "lpm_stats";
+	const char *dirname = rootname;
+
+	strlcpy(stats->name, name, MAX_STR_LEN);
+	stats->num_levels = num_levels;
+	stats->parent = parent;
+	INIT_LIST_HEAD(&stats->sibling);
+	INIT_LIST_HEAD(&stats->child);
+
+	stats->time_stats = kzalloc(sizeof(struct level_stats) *
+				num_levels, GFP_KERNEL);
+	if (!stats->time_stats) {
+		pr_err("%s: Insufficient memory for %s level time stats\n",
+			__func__, name);
+		return -ENOMEM;
+	}
+
+	if (parent) {
+		list_add_tail(&stats->sibling, &parent->child);
+		directory = parent->directory;
+		dirname = name;
+	}
+
+	stats->directory = debugfs_create_dir(dirname, directory);
+	if (!stats->directory) {
+		pr_err("%s: Unable to create %s debugfs directory\n",
+			__func__, dirname);
+		kfree(stats->time_stats);
+		return -EPERM;
+	}
+
+	for (i = 0; i < num_levels; i++) {
+		stats->time_stats[i].name = levels[i];
+		stats->time_stats[i].owner = stats;
+		stats->time_stats[i].first_bucket_time =
+			CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
+		stats->time_stats[i].enter_time = 0;
+
+		if (!debugfs_create_file(stats->time_stats[i].name, S_IRUGO,
+			stats->directory, (void *)&stats->time_stats[i],
+			&level_stats_fops)) {
+			pr_err("%s: Unable to create %s %s level-stats file\n",
+				__func__, stats->name,
+				stats->time_stats[i].name);
+			kfree(stats->time_stats);
+			return -EPERM;
+		}
+	}
+
+	if (!debugfs_create_file("stats", S_IRUGO, stats->directory,
+		(void *)stats, &lpm_stats_fops)) {
+		pr_err("%s: Unable to create %s's overall 'stats' file\n",
+			__func__, stats->name);
+		kfree(stats->time_stats);
+		return -EPERM;
+	}
+
+	return 0;
+}
+
+static ssize_t total_sleep_time_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	struct lpm_sleep_time *cpu_sleep_time = container_of(attr,
+			struct lpm_sleep_time, ts_attr);
+	unsigned int cpu = cpu_sleep_time->cpu;
+	uint64_t total_time = get_total_sleep_time(cpu);
+
+	return snprintf(buf, MAX_TIME_LEN, "%llu.%09u\n", total_time,
+			do_div(total_time, NSEC_PER_SEC));
+}
+
+static struct kobject *local_module_kobject(void)
+{
+	struct kobject *kobj;
+
+	kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+
+	if (!kobj) {
+		int err;
+		struct module_kobject *mk;
+
+		mk = kzalloc(sizeof(*mk), GFP_KERNEL);
+		if (!mk)
+			return ERR_PTR(-ENOMEM);
+
+		mk->mod = THIS_MODULE;
+		mk->kobj.kset = module_kset;
+
+		err = kobject_init_and_add(&mk->kobj, &module_ktype, NULL,
+				"%s", KBUILD_MODNAME);
+
+		if (err) {
+			kobject_put(&mk->kobj);
+			kfree(mk);
+			pr_err("%s: cannot create kobject for %s\n",
+					__func__, KBUILD_MODNAME);
+			return ERR_PTR(err);
+		}
+
+		kobject_get(&mk->kobj);
+		kobj = &mk->kobj;
+	}
+
+	return kobj;
+}
+
+static int create_sysfs_node(unsigned int cpu, struct lpm_stats *stats)
+{
+	struct kobject *cpu_kobj = NULL;
+	struct lpm_sleep_time *ts = NULL;
+	struct kobject *stats_kobj;
+	char cpu_name[] = "cpuXX";
+	int ret = -ENOMEM;
+
+	stats_kobj = local_module_kobject();
+
+	if (IS_ERR_OR_NULL(stats_kobj))
+		return PTR_ERR(stats_kobj);
+
+	snprintf(cpu_name, sizeof(cpu_name), "cpu%u", cpu);
+	cpu_kobj = kobject_create_and_add(cpu_name, stats_kobj);
+	if (!cpu_kobj)
+		return -ENOMEM;
+
+	ts = kzalloc(sizeof(*ts), GFP_KERNEL);
+	if (!ts)
+		goto failed;
+
+	sysfs_attr_init(&ts->ts_attr.attr);
+	ts->ts_attr.attr.name = "total_sleep_time_secs";
+	ts->ts_attr.attr.mode = 0444;
+	ts->ts_attr.show = total_sleep_time_show;
+	ts->ts_attr.store = NULL;
+	ts->cpu = cpu;
+
+	ret = sysfs_create_file(cpu_kobj, &ts->ts_attr.attr);
+	if (ret)
+		goto failed;
+
+	return 0;
+
+failed:
+	kfree(ts);
+	kobject_put(cpu_kobj);
+	return ret;
+}
+
+static struct lpm_stats *config_cpu_level(const char *name,
+	const char **levels, int num_levels, struct lpm_stats *parent,
+	struct cpumask *mask)
+{
+	int cpu = 0;
+	struct lpm_stats *pstats = NULL;
+	struct lpm_stats *stats = NULL;
+
+	for (pstats = parent; pstats; pstats = pstats->parent)
+		cpumask_or(&pstats->mask, &pstats->mask, mask);
+
+	for_each_cpu(cpu, mask) {
+		int ret = 0;
+		char cpu_name[MAX_STR_LEN] = {0};
+
+		stats = &per_cpu(cpu_stats, cpu);
+		snprintf(cpu_name, MAX_STR_LEN, "%s%d", name, cpu);
+		cpumask_set_cpu(cpu, &stats->mask);
+
+		stats->is_cpu = true;
+
+		ret = config_level(cpu_name, levels, num_levels, parent,
+					stats);
+		if (ret) {
+			pr_err("%s: Unable to create %s stats\n",
+				__func__, cpu_name);
+			return ERR_PTR(ret);
+		}
+
+		ret = create_sysfs_node(cpu, stats);
+
+		if (ret) {
+			pr_err("Could not create the sysfs node\n");
+			return ERR_PTR(ret);
+		}
+	}
+
+	return stats;
+}
+
+static void config_suspend_level(struct lpm_stats *stats)
+{
+	suspend_time_stats.name = lpm_stats_suspend;
+	suspend_time_stats.owner = stats;
+	suspend_time_stats.first_bucket_time =
+			CONFIG_MSM_SUSPEND_STATS_FIRST_BUCKET;
+	suspend_time_stats.enter_time = 0;
+	suspend_time_stats.success_count = 0;
+	suspend_time_stats.failed_count = 0;
+
+	if (!debugfs_create_file(suspend_time_stats.name, S_IRUGO,
+		stats->directory, (void *)&suspend_time_stats,
+		&level_stats_fops))
+		pr_err("%s: Unable to create %s Suspend stats file\n",
+			__func__, stats->name);
+}
+
+static struct lpm_stats *config_cluster_level(const char *name,
+	const char **levels, int num_levels, struct lpm_stats *parent)
+{
+	struct lpm_stats *stats = NULL;
+	int ret = 0;
+
+	stats = kzalloc(sizeof(struct lpm_stats), GFP_KERNEL);
+	if (!stats) {
+		pr_err("%s: Insufficient memory for %s stats\n",
+			__func__, name);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	stats->is_cpu = false;
+
+	ret = config_level(name, levels, num_levels, parent, stats);
+	if (ret) {
+		pr_err("%s: Unable to create %s stats\n", __func__,
+			name);
+		kfree(stats);
+		return ERR_PTR(ret);
+	}
+
+	if (!debugfs_create_file("lifo", S_IRUGO, stats->directory,
+		(void *)stats, &lifo_stats_fops)) {
+		pr_err("%s: Unable to create %s lifo stats file\n",
+			__func__, stats->name);
+		kfree(stats);
+		return ERR_PTR(-EPERM);
+	}
+
+	if (!parent)
+		config_suspend_level(stats);
+
+	return stats;
+}
+
+static void cleanup_stats(struct lpm_stats *stats)
+{
+	struct list_head *centry = NULL;
+	struct lpm_stats *pos = NULL;
+	struct lpm_stats *n = NULL;
+
+	centry = &stats->child;
+	list_for_each_entry_safe_reverse(pos, n, centry, sibling) {
+		if (!list_empty(&pos->child)) {
+			cleanup_stats(pos);
+			continue;
+		}
+
+		list_del_init(&pos->child);
+
+		kfree(pos->time_stats);
+		if (!pos->is_cpu)
+			kfree(pos);
+	}
+	kfree(stats->time_stats);
+	kfree(stats);
+}
+
+static void lpm_stats_cleanup(struct lpm_stats *stats)
+{
+	struct lpm_stats *pstats = stats;
+
+	if (!pstats)
+		return;
+
+	while (pstats->parent)
+		pstats = pstats->parent;
+
+	debugfs_remove_recursive(pstats->directory);
+
+	cleanup_stats(pstats);
+}
+
+/**
+ * lpm_stats_config_level() - API to configure levels stats.
+ *
+ * @name:	Name of the cluster/cpu.
+ * @levels:	Low power mode level names.
+ * @num_levels:	Number of leves supported.
+ * @parent:	Pointer to the parent's lpm_stats object.
+ * @mask:	cpumask, if configuring cpu stats, else NULL.
+ *
+ * Function to communicate the low power mode levels supported by
+ * cpus or a cluster.
+ *
+ * Return: Pointer to the lpm_stats object or ERR_PTR(-ERRNO)
+ */
+struct lpm_stats *lpm_stats_config_level(const char *name,
+	const char **levels, int num_levels, struct lpm_stats *parent,
+	struct cpumask *mask)
+{
+	struct lpm_stats *stats = NULL;
+
+	if (!levels || num_levels <= 0 || IS_ERR(parent)) {
+		pr_err("%s: Invalid input\n\t\tlevels = %p\n\t\t"
+			"num_levels = %d\n\t\tparent = %ld\n",
+			__func__, levels, num_levels, PTR_ERR(parent));
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (mask)
+		stats = config_cpu_level(name, levels, num_levels, parent,
+						mask);
+	else
+		stats = config_cluster_level(name, levels, num_levels,
+						parent);
+
+	if (IS_ERR(stats)) {
+		lpm_stats_cleanup(parent);
+		return stats;
+	}
+
+	return stats;
+}
+EXPORT_SYMBOL(lpm_stats_config_level);
+
+/**
+ * lpm_stats_cluster_enter() - API to communicate the lpm level a cluster
+ * is prepared to enter.
+ *
+ * @stats:	Pointer to the cluster's lpm_stats object.
+ * @index:	Index of the lpm level that the cluster is going to enter.
+ *
+ * Function to communicate the low power mode level that the cluster is
+ * prepared to enter.
+ */
+void lpm_stats_cluster_enter(struct lpm_stats *stats, uint32_t index)
+{
+	if (IS_ERR_OR_NULL(stats))
+		return;
+
+	update_last_in_stats(stats);
+}
+EXPORT_SYMBOL(lpm_stats_cluster_enter);
+
+/**
+ * lpm_stats_cluster_exit() - API to communicate the lpm level a cluster
+ * exited.
+ *
+ * @stats:	Pointer to the cluster's lpm_stats object.
+ * @index:	Index of the cluster lpm level.
+ * @success:	Success/Failure of the low power mode execution.
+ *
+ * Function to communicate the low power mode level that the cluster
+ * exited.
+ */
+void lpm_stats_cluster_exit(struct lpm_stats *stats, uint32_t index,
+				bool success)
+{
+	if (IS_ERR_OR_NULL(stats))
+		return;
+
+	update_exit_stats(stats, index, success);
+
+	update_first_out_stats(stats);
+}
+EXPORT_SYMBOL(lpm_stats_cluster_exit);
+
+/**
+ * lpm_stats_cpu_enter() - API to communicate the lpm level a cpu
+ * is prepared to enter.
+ *
+ * @index:	cpu's lpm level index.
+ *
+ * Function to communicate the low power mode level that the cpu is
+ * prepared to enter.
+ */
+void lpm_stats_cpu_enter(uint32_t index, uint64_t time)
+{
+	struct lpm_stats *stats = &(*this_cpu_ptr(&(cpu_stats)));
+
+	stats->sleep_time = time;
+
+	if (!stats->time_stats)
+		return;
+
+}
+EXPORT_SYMBOL(lpm_stats_cpu_enter);
+
+/**
+ * lpm_stats_cpu_exit() - API to communicate the lpm level that the cpu exited.
+ *
+ * @index:	cpu's lpm level index.
+ * @success:	Success/Failure of the low power mode execution.
+ *
+ * Function to communicate the low power mode level that the cpu exited.
+ */
+void lpm_stats_cpu_exit(uint32_t index, uint64_t time, bool success)
+{
+	struct lpm_stats *stats = &(*this_cpu_ptr(&(cpu_stats)));
+
+	if (!stats->time_stats)
+		return;
+
+	stats->sleep_time = time - stats->sleep_time;
+
+	update_exit_stats(stats, index, success);
+}
+EXPORT_SYMBOL(lpm_stats_cpu_exit);
+
+/**
+ * lpm_stats_suspend_enter() - API to communicate system entering suspend.
+ *
+ * Function to communicate that the system is ready to enter suspend.
+ */
+void lpm_stats_suspend_enter(void)
+{
+	struct timespec ts;
+
+	getnstimeofday(&ts);
+	suspend_time_stats.enter_time = timespec_to_ns(&ts);
+}
+EXPORT_SYMBOL(lpm_stats_suspend_enter);
+
+/**
+ * lpm_stats_suspend_exit() - API to communicate system exiting suspend.
+ *
+ * Function to communicate that the system exited suspend.
+ */
+void lpm_stats_suspend_exit(void)
+{
+	struct timespec ts;
+	uint64_t exit_time = 0;
+
+	getnstimeofday(&ts);
+	exit_time = timespec_to_ns(&ts) - suspend_time_stats.enter_time;
+	update_level_stats(&suspend_time_stats, exit_time, true);
+}
+EXPORT_SYMBOL(lpm_stats_suspend_exit);
diff -Nruw linux-4.4.115-fbx/drivers/power/qcom./Makefile linux-4.4.115-fbx/drivers/power/qcom/Makefile
--- linux-4.4.115-fbx/drivers/power/qcom./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/qcom/Makefile	2019-01-22 16:16:26.223271038 +0100
@@ -0,0 +1,3 @@
+obj-$(CONFIG_MSM_IDLE_STATS)	+= lpm-stats.o
+obj-$(CONFIG_APSS_CORE_EA)	+= msm-core.o debug_core.o
+obj-$(CONFIG_MSM_APM)		+= apm.o
diff -Nruw linux-4.4.115-fbx/drivers/power/qcom./msm-core.c linux-4.4.115-fbx/drivers/power/qcom/msm-core.c
--- linux-4.4.115-fbx/drivers/power/qcom./msm-core.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/qcom/msm-core.c	2019-01-22 16:16:26.223271038 +0100
@@ -0,0 +1,1136 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kthread.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/msm-core-interface.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pm_opp.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/thermal.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/uio_driver.h>
+#include <asm/smp_plat.h>
+#include <stdbool.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/trace_msm_core.h>
+
+#define TEMP_BASE_POINT 35
+#define TEMP_MAX_POINT 95
+#define CPU_HOTPLUG_LIMIT 80
+#define CPU_BIT_MASK(cpu) BIT(cpu)
+#define DEFAULT_TEMP 40
+#define DEFAULT_LOW_HYST_TEMP 10
+#define DEFAULT_HIGH_HYST_TEMP 5
+#define CLUSTER_OFFSET_FOR_MPIDR 8
+#define MAX_CORES_PER_CLUSTER 4
+#define MAX_NUM_OF_CLUSTERS 2
+#define NUM_OF_CORNERS 10
+#define DEFAULT_SCALING_FACTOR 1
+
+#define ALLOCATE_2D_ARRAY(type)\
+static type **allocate_2d_array_##type(int idx)\
+{\
+	int i;\
+	type **ptr = NULL;\
+	if (!idx) \
+		return ERR_PTR(-EINVAL);\
+	ptr = kzalloc(sizeof(*ptr) * TEMP_DATA_POINTS, \
+				GFP_KERNEL);\
+	if (!ptr) { \
+		return ERR_PTR(-ENOMEM); \
+	} \
+	for (i = 0; i < TEMP_DATA_POINTS; i++) { \
+		ptr[i] = kzalloc(sizeof(*ptr[i]) * \
+					idx, GFP_KERNEL);\
+		if (!ptr[i]) {\
+			goto done;\
+		} \
+	} \
+	return ptr;\
+done:\
+	for (i = 0; i < TEMP_DATA_POINTS; i++) \
+		kfree(ptr[i]);\
+	kfree(ptr);\
+	return ERR_PTR(-ENOMEM);\
+}
+
+struct cpu_activity_info {
+	int cpu;
+	int mpidr;
+	long temp;
+	int sensor_id;
+	struct sensor_threshold hi_threshold;
+	struct sensor_threshold low_threshold;
+	struct cpu_static_info *sp;
+};
+
+struct cpu_static_info {
+	uint32_t **power;
+	cpumask_t mask;
+	struct cpufreq_frequency_table *table;
+	uint32_t *voltage;
+	uint32_t num_of_freqs;
+};
+
+static DEFINE_MUTEX(policy_update_mutex);
+static DEFINE_MUTEX(kthread_update_mutex);
+static DEFINE_SPINLOCK(update_lock);
+static struct delayed_work sampling_work;
+static struct completion sampling_completion;
+static struct task_struct *sampling_task;
+static int low_hyst_temp;
+static int high_hyst_temp;
+static struct platform_device *msm_core_pdev;
+static struct cpu_activity_info activity[NR_CPUS];
+DEFINE_PER_CPU(struct cpu_pstate_pwr *, ptable);
+static struct cpu_pwr_stats cpu_stats[NR_CPUS];
+static uint32_t scaling_factor;
+ALLOCATE_2D_ARRAY(uint32_t);
+
+static int poll_ms;
+module_param_named(polling_interval, poll_ms, int,
+		S_IRUGO | S_IWUSR | S_IWGRP);
+
+static int disabled;
+module_param_named(disabled, disabled, int,
+		S_IRUGO | S_IWUSR | S_IWGRP);
+static bool in_suspend;
+static bool activate_power_table;
+static int max_throttling_temp = 80; /* in C */
+module_param_named(throttling_temp, max_throttling_temp, int,
+		S_IRUGO | S_IWUSR | S_IWGRP);
+
+/*
+ * Cannot be called from an interrupt context
+ */
+static void set_and_activate_threshold(uint32_t sensor_id,
+	struct sensor_threshold *threshold)
+{
+	if (sensor_set_trip(sensor_id, threshold)) {
+		pr_err("%s: Error in setting trip %d\n",
+			KBUILD_MODNAME, threshold->trip);
+		return;
+	}
+
+	if (sensor_activate_trip(sensor_id, threshold, true)) {
+		sensor_cancel_trip(sensor_id, threshold);
+		pr_err("%s: Error in enabling trip %d\n",
+			KBUILD_MODNAME, threshold->trip);
+		return;
+	}
+}
+
+static void set_threshold(struct cpu_activity_info *cpu_node)
+{
+	if (cpu_node->sensor_id < 0)
+		return;
+
+	/*
+	 * Before operating on the threshold structure which is used by
+	 * thermal core ensure that the sensor is disabled to prevent
+	 * incorrect operations on the sensor list maintained by thermal code.
+	 */
+	sensor_activate_trip(cpu_node->sensor_id,
+			&cpu_node->hi_threshold, false);
+	sensor_activate_trip(cpu_node->sensor_id,
+			&cpu_node->low_threshold, false);
+
+	cpu_node->hi_threshold.temp = (cpu_node->temp + high_hyst_temp) *
+					scaling_factor;
+	cpu_node->low_threshold.temp = (cpu_node->temp - low_hyst_temp) *
+					scaling_factor;
+
+	/*
+	 * Set the threshold only if we are below the hotplug limit
+	 * Adding more work at this high temperature range, seems to
+	 * fail hotplug notifications.
+	 */
+	if (cpu_node->hi_threshold.temp < (CPU_HOTPLUG_LIMIT * scaling_factor))
+		set_and_activate_threshold(cpu_node->sensor_id,
+			&cpu_node->hi_threshold);
+
+	set_and_activate_threshold(cpu_node->sensor_id,
+		&cpu_node->low_threshold);
+}
+
+static void samplequeue_handle(struct work_struct *work)
+{
+	complete(&sampling_completion);
+}
+
+/* May be called from an interrupt context */
+static void core_temp_notify(enum thermal_trip_type type,
+		int temp, void *data)
+{
+	struct cpu_activity_info *cpu_node =
+		(struct cpu_activity_info *) data;
+
+	temp /= scaling_factor;
+
+	trace_temp_notification(cpu_node->sensor_id,
+		type, temp, cpu_node->temp);
+
+	cpu_node->temp = temp;
+
+	complete(&sampling_completion);
+}
+
+static void repopulate_stats(int cpu)
+{
+	int i;
+	struct cpu_activity_info *cpu_node = &activity[cpu];
+	int temp_point;
+	struct cpu_pstate_pwr *pt =  per_cpu(ptable, cpu);
+
+	if (!pt)
+		return;
+
+	if (cpu_node->temp < TEMP_BASE_POINT)
+		temp_point = 0;
+	else if (cpu_node->temp > TEMP_MAX_POINT)
+		temp_point = TEMP_DATA_POINTS - 1;
+	else
+		temp_point = (cpu_node->temp - TEMP_BASE_POINT) / 5;
+
+	cpu_stats[cpu].temp = cpu_node->temp;
+	for (i = 0; i < cpu_node->sp->num_of_freqs; i++)
+		pt[i].power = cpu_node->sp->power[temp_point][i];
+
+	trace_cpu_stats(cpu, cpu_stats[cpu].temp, pt[0].power,
+			pt[cpu_node->sp->num_of_freqs-1].power);
+};
+
+void trigger_cpu_pwr_stats_calc(void)
+{
+	int cpu;
+	static long prev_temp[NR_CPUS];
+	struct cpu_activity_info *cpu_node;
+	int temp;
+
+	if (disabled)
+		return;
+
+	spin_lock(&update_lock);
+
+	for_each_online_cpu(cpu) {
+		cpu_node = &activity[cpu];
+		if (cpu_node->sensor_id < 0)
+			continue;
+
+		if (cpu_node->temp == prev_temp[cpu]) {
+			sensor_get_temp(cpu_node->sensor_id, &temp);
+			cpu_node->temp = temp / scaling_factor;
+		}
+
+		prev_temp[cpu] = cpu_node->temp;
+
+		/*
+		 * Do not populate/update stats before policy and ptable have
+		 * been updated.
+		 */
+		if (activate_power_table && cpu_stats[cpu].ptable
+			&& cpu_node->sp->table)
+			repopulate_stats(cpu);
+	}
+	spin_unlock(&update_lock);
+}
+EXPORT_SYMBOL(trigger_cpu_pwr_stats_calc);
+
+void set_cpu_throttled(cpumask_t *mask, bool throttling)
+{
+	int cpu;
+
+	if (!mask)
+		return;
+
+	spin_lock(&update_lock);
+	for_each_cpu(cpu, mask)
+		cpu_stats[cpu].throttling = throttling;
+	spin_unlock(&update_lock);
+}
+EXPORT_SYMBOL(set_cpu_throttled);
+
+static void update_related_freq_table(struct cpufreq_policy *policy)
+{
+	int cpu, num_of_freqs;
+	struct cpufreq_frequency_table *table;
+
+	table = cpufreq_frequency_get_table(policy->cpu);
+	if (!table) {
+		pr_err("Couldn't get freq table for cpu%d\n",
+				policy->cpu);
+		return;
+	}
+
+	for (num_of_freqs = 0; table[num_of_freqs].frequency !=
+			CPUFREQ_TABLE_END;)
+		num_of_freqs++;
+
+	/*
+	 * Synchronous cores within cluster have the same
+	 * policy. Since these cores do not have the cpufreq
+	 * table initialized for all of them, copy the same
+	 * table to all the related cpus.
+	 */
+	for_each_cpu(cpu, policy->related_cpus) {
+		activity[cpu].sp->table = table;
+		activity[cpu].sp->num_of_freqs = num_of_freqs;
+	}
+}
+
+static __ref int do_sampling(void *data)
+{
+	int cpu;
+	struct cpu_activity_info *cpu_node;
+	static int prev_temp[NR_CPUS];
+
+	while (!kthread_should_stop()) {
+		wait_for_completion(&sampling_completion);
+		cancel_delayed_work(&sampling_work);
+
+		mutex_lock(&kthread_update_mutex);
+		if (in_suspend)
+			goto unlock;
+
+		trigger_cpu_pwr_stats_calc();
+
+		for_each_online_cpu(cpu) {
+			cpu_node = &activity[cpu];
+			if (prev_temp[cpu] != cpu_node->temp) {
+				prev_temp[cpu] = cpu_node->temp;
+				set_threshold(cpu_node);
+				trace_temp_threshold(cpu, cpu_node->temp,
+					cpu_node->hi_threshold.temp /
+					scaling_factor,
+					cpu_node->low_threshold.temp /
+					scaling_factor);
+			}
+		}
+		if (!poll_ms)
+			goto unlock;
+
+		schedule_delayed_work(&sampling_work,
+			msecs_to_jiffies(poll_ms));
+unlock:
+		mutex_unlock(&kthread_update_mutex);
+	}
+	return 0;
+}
+
+static void clear_static_power(struct cpu_static_info *sp)
+{
+	int i;
+
+	if (!sp)
+		return;
+
+	if (cpumask_first(&sp->mask) < num_possible_cpus())
+		return;
+
+	for (i = 0; i < TEMP_DATA_POINTS; i++)
+		kfree(sp->power[i]);
+	kfree(sp->power);
+	kfree(sp);
+}
+
+BLOCKING_NOTIFIER_HEAD(msm_core_stats_notifier_list);
+
+struct blocking_notifier_head *get_power_update_notifier(void)
+{
+	return &msm_core_stats_notifier_list;
+}
+
+int register_cpu_pwr_stats_ready_notifier(struct notifier_block *nb)
+{
+	return blocking_notifier_chain_register(&msm_core_stats_notifier_list,
+						nb);
+}
+
+static int update_userspace_power(struct sched_params __user *argp)
+{
+	int i;
+	int ret;
+	int cpu = -1;
+	struct cpu_activity_info *node;
+	struct cpu_static_info *sp, *clear_sp;
+	int cpumask, cluster, mpidr;
+	bool pdata_valid[NR_CPUS] = {0};
+
+	get_user(cpumask, &argp->cpumask);
+	get_user(cluster, &argp->cluster);
+	mpidr = cluster << 8;
+
+	pr_debug("%s: cpumask %d, cluster: %d\n", __func__, cpumask,
+					cluster);
+	for (i = 0; i < MAX_CORES_PER_CLUSTER; i++, cpumask >>= 1) {
+		if (!(cpumask & 0x01))
+			continue;
+
+		mpidr |= i;
+		for_each_possible_cpu(cpu) {
+			if (cpu_logical_map(cpu) == mpidr)
+				break;
+		}
+	}
+
+	if ((cpu < 0) || (cpu >= num_possible_cpus()))
+		return -EINVAL;
+
+	node = &activity[cpu];
+	/* Allocate new memory to copy cpumask specific power
+	 * information.
+	 */
+	sp = kzalloc(sizeof(*sp), GFP_KERNEL);
+	if (!sp)
+		return -ENOMEM;
+
+	mutex_lock(&policy_update_mutex);
+	sp->power = allocate_2d_array_uint32_t(node->sp->num_of_freqs);
+	if (IS_ERR_OR_NULL(sp->power)) {
+		mutex_unlock(&policy_update_mutex);
+		ret = PTR_ERR(sp->power);
+		kfree(sp);
+		return ret;
+	}
+	sp->num_of_freqs = node->sp->num_of_freqs;
+	sp->voltage = node->sp->voltage;
+	sp->table = node->sp->table;
+
+	for (i = 0; i < TEMP_DATA_POINTS; i++) {
+		ret = copy_from_user(sp->power[i], &argp->power[i][0],
+			sizeof(sp->power[i][0]) * node->sp->num_of_freqs);
+		if (ret)
+			goto failed;
+	}
+
+	/* Copy the same power values for all the cpus in the cpumask
+	 * argp->cpumask within the cluster (argp->cluster)
+	 */
+	get_user(cpumask, &argp->cpumask);
+	spin_lock(&update_lock);
+	for (i = 0; i < MAX_CORES_PER_CLUSTER; i++, cpumask >>= 1) {
+		if (!(cpumask & 0x01))
+			continue;
+		mpidr = (cluster << CLUSTER_OFFSET_FOR_MPIDR);
+		mpidr |= i;
+		for_each_possible_cpu(cpu) {
+			if (!(cpu_logical_map(cpu) == mpidr))
+				continue;
+
+			node = &activity[cpu];
+			clear_sp = node->sp;
+			node->sp = sp;
+			cpumask_set_cpu(cpu, &sp->mask);
+			if (clear_sp) {
+				cpumask_clear_cpu(cpu, &clear_sp->mask);
+				clear_static_power(clear_sp);
+			}
+			cpu_stats[cpu].ptable = per_cpu(ptable, cpu);
+			repopulate_stats(cpu);
+			pdata_valid[cpu] = true;
+		}
+	}
+	spin_unlock(&update_lock);
+	mutex_unlock(&policy_update_mutex);
+
+	for_each_possible_cpu(cpu) {
+		if (!pdata_valid[cpu])
+			continue;
+
+		blocking_notifier_call_chain(
+			&msm_core_stats_notifier_list, cpu, NULL);
+	}
+
+	activate_power_table = true;
+	return 0;
+
+failed:
+	mutex_unlock(&policy_update_mutex);
+	for (i = 0; i < TEMP_DATA_POINTS; i++)
+		kfree(sp->power[i]);
+	kfree(sp->power);
+	kfree(sp);
+	return ret;
+}
+
+static long msm_core_ioctl(struct file *file, unsigned int cmd,
+		unsigned long arg)
+{
+	long ret = 0;
+	struct cpu_activity_info *node = NULL;
+	struct sched_params __user *argp = (struct sched_params __user *)arg;
+	int i, cpu = num_possible_cpus();
+	int mpidr, cluster, cpumask;
+
+	if (!argp)
+		return -EINVAL;
+
+	get_user(cluster, &argp->cluster);
+	mpidr = (cluster << (MAX_CORES_PER_CLUSTER *
+			MAX_NUM_OF_CLUSTERS));
+	get_user(cpumask, &argp->cpumask);
+
+	switch (cmd) {
+	case EA_LEAKAGE:
+		ret = update_userspace_power(argp);
+		if (ret)
+			pr_err("Userspace power update failed with %ld\n", ret);
+		break;
+	case EA_VOLT:
+		for (i = 0; cpumask > 0; i++, cpumask >>= 1) {
+			for_each_possible_cpu(cpu) {
+				if (cpu_logical_map(cpu) == (mpidr | i))
+					break;
+			}
+		}
+		if (cpu >= num_possible_cpus())
+			break;
+
+		mutex_lock(&policy_update_mutex);
+		node = &activity[cpu];
+		if (!node->sp->table) {
+			ret = -EINVAL;
+			goto unlock;
+		}
+		ret = copy_to_user((void __user *)&argp->voltage[0],
+				node->sp->voltage,
+				sizeof(uint32_t) * node->sp->num_of_freqs);
+		if (ret)
+			break;
+		for (i = 0; i < node->sp->num_of_freqs; i++) {
+			ret = copy_to_user((void __user *)&argp->freq[i],
+					&node->sp->table[i].frequency,
+					sizeof(uint32_t));
+			if (ret)
+				break;
+		}
+unlock:
+		mutex_unlock(&policy_update_mutex);
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static long msm_core_compat_ioctl(struct file *file, unsigned int cmd,
+		unsigned long arg)
+{
+	arg = (unsigned long)compat_ptr(arg);
+	return msm_core_ioctl(file, cmd, arg);
+}
+#endif
+
+static int msm_core_open(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static int msm_core_release(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static inline void init_sens_threshold(struct sensor_threshold *threshold,
+		enum thermal_trip_type trip, long temp,
+		void *data)
+{
+	threshold->trip = trip;
+	threshold->temp = temp;
+	threshold->data = data;
+	threshold->notify = (void *)core_temp_notify;
+}
+
+static int msm_core_stats_init(struct device *dev, int cpu)
+{
+	int i;
+	struct cpu_activity_info *cpu_node;
+	struct cpu_pstate_pwr *pstate = NULL;
+
+	cpu_node = &activity[cpu];
+	cpu_stats[cpu].cpu = cpu;
+	cpu_stats[cpu].temp = cpu_node->temp;
+	cpu_stats[cpu].throttling = false;
+
+	cpu_stats[cpu].len = cpu_node->sp->num_of_freqs;
+	pstate = devm_kzalloc(dev,
+		sizeof(*pstate) * cpu_node->sp->num_of_freqs,
+		GFP_KERNEL);
+	if (!pstate)
+		return -ENOMEM;
+
+	for (i = 0; i < cpu_node->sp->num_of_freqs; i++)
+		pstate[i].freq = cpu_node->sp->table[i].frequency;
+
+	per_cpu(ptable, cpu) = pstate;
+
+	return 0;
+}
+
+static int msm_core_task_init(struct device *dev)
+{
+	init_completion(&sampling_completion);
+	sampling_task = kthread_run(do_sampling, NULL, "msm-core:sampling");
+	if (IS_ERR(sampling_task)) {
+		pr_err("Failed to create do_sampling err: %ld\n",
+				PTR_ERR(sampling_task));
+		return PTR_ERR(sampling_task);
+	}
+	return 0;
+}
+
+struct cpu_pwr_stats *get_cpu_pwr_stats(void)
+{
+	return cpu_stats;
+}
+EXPORT_SYMBOL(get_cpu_pwr_stats);
+
+static int msm_get_power_values(int cpu, struct cpu_static_info *sp)
+{
+	int i = 0, j;
+	int ret = 0;
+	uint64_t power;
+
+	/* Calculate dynamic power spent for every frequency using formula:
+	 * Power = V * V * f
+	 * where V = voltage for frequency
+	 *       f = frequency
+	 * */
+	sp->power = allocate_2d_array_uint32_t(sp->num_of_freqs);
+	if (IS_ERR_OR_NULL(sp->power))
+		return PTR_ERR(sp->power);
+
+	for (i = 0; i < TEMP_DATA_POINTS; i++) {
+		for (j = 0; j < sp->num_of_freqs; j++) {
+			power = sp->voltage[j] *
+						sp->table[j].frequency;
+			do_div(power, 1000);
+			do_div(power, 1000);
+			power *= sp->voltage[j];
+			do_div(power, 1000);
+			sp->power[i][j] = power;
+		}
+	}
+	return ret;
+}
+
+static int msm_get_voltage_levels(struct device *dev, int cpu,
+		struct cpu_static_info *sp)
+{
+	unsigned int *voltage;
+	int i;
+	int corner;
+	struct dev_pm_opp *opp;
+	struct device *cpu_dev = get_cpu_device(cpu);
+	/*
+	 * Convert cpr corner voltage to average voltage of both
+	 * a53 and a57 votlage value
+	 */
+	int average_voltage[NUM_OF_CORNERS] = {0, 746, 841, 843, 940, 953, 976,
+			1024, 1090, 1100};
+
+	if (!cpu_dev)
+		return -ENODEV;
+
+	voltage = devm_kzalloc(dev,
+			sizeof(*voltage) * sp->num_of_freqs, GFP_KERNEL);
+
+	if (!voltage)
+		return -ENOMEM;
+
+	rcu_read_lock();
+	for (i = 0; i < sp->num_of_freqs; i++) {
+		opp = dev_pm_opp_find_freq_exact(cpu_dev,
+				sp->table[i].frequency * 1000, true);
+		corner = dev_pm_opp_get_voltage(opp);
+
+		if (corner > 400000)
+			voltage[i] = corner / 1000;
+		else if (corner > 0 && corner < ARRAY_SIZE(average_voltage))
+			voltage[i] = average_voltage[corner];
+		else
+			voltage[i]
+			     = average_voltage[ARRAY_SIZE(average_voltage) - 1];
+	}
+	rcu_read_unlock();
+
+	sp->voltage = voltage;
+	return 0;
+}
+
+static int msm_core_dyn_pwr_init(struct platform_device *pdev,
+				int cpu)
+{
+	int ret = 0;
+
+	if (!activity[cpu].sp->table)
+		return 0;
+
+	ret = msm_get_voltage_levels(&pdev->dev, cpu, activity[cpu].sp);
+	if (ret)
+		return ret;
+
+	ret = msm_get_power_values(cpu, activity[cpu].sp);
+
+	return ret;
+}
+
+static int msm_core_tsens_init(struct device_node *node, int cpu)
+{
+	int ret = 0;
+	char *key = NULL;
+	struct device_node *phandle;
+	const char *sensor_type = NULL;
+	struct cpu_activity_info *cpu_node = &activity[cpu];
+	int temp;
+
+	if (!node)
+		return -ENODEV;
+
+	key = "sensor";
+	phandle = of_parse_phandle(node, key, 0);
+	if (!phandle) {
+		pr_info("%s: No sensor mapping found for the core\n",
+				__func__);
+		/* Do not treat this as error as some targets might have
+		 * temperature notification only in userspace.
+		 * Use default temperature for the core. Userspace might
+		 * update the temperature once it is up.
+		 */
+		cpu_node->sensor_id = -ENODEV;
+		cpu_node->temp = DEFAULT_TEMP;
+		return 0;
+	}
+
+	key = "qcom,sensor-name";
+	ret = of_property_read_string(phandle, key,
+				&sensor_type);
+	if (ret) {
+		pr_err("%s: Cannot read tsens id\n", __func__);
+		return ret;
+	}
+
+	cpu_node->sensor_id = sensor_get_id((char *)sensor_type);
+	if (cpu_node->sensor_id < 0)
+		return cpu_node->sensor_id;
+
+	key = "qcom,scaling-factor";
+	ret = of_property_read_u32(phandle, key,
+				&scaling_factor);
+	if (ret) {
+		pr_info("%s: Cannot read tsens scaling factor\n", __func__);
+		scaling_factor = DEFAULT_SCALING_FACTOR;
+	}
+
+	ret = sensor_get_temp(cpu_node->sensor_id, &temp);
+	if (ret)
+		return ret;
+
+	cpu_node->temp = temp / scaling_factor;
+
+	init_sens_threshold(&cpu_node->hi_threshold,
+			THERMAL_TRIP_CONFIGURABLE_HI,
+			(cpu_node->temp + high_hyst_temp) * scaling_factor,
+			(void *)cpu_node);
+	init_sens_threshold(&cpu_node->low_threshold,
+			THERMAL_TRIP_CONFIGURABLE_LOW,
+			(cpu_node->temp - low_hyst_temp) * scaling_factor,
+			(void *)cpu_node);
+
+	return ret;
+}
+
+static int msm_core_mpidr_init(struct device_node *phandle)
+{
+	int ret = 0;
+	char *key = NULL;
+	int mpidr;
+
+	key = "reg";
+	ret = of_property_read_u32(phandle, key,
+				&mpidr);
+	if (ret) {
+		pr_err("%s: Cannot read mpidr\n", __func__);
+		return ret;
+	}
+	return mpidr;
+}
+
+static int msm_core_cpu_policy_handler(struct notifier_block *nb,
+		unsigned long val, void *data)
+{
+	struct cpufreq_policy *policy = data;
+	struct cpu_activity_info *cpu_info = &activity[policy->cpu];
+	int cpu;
+	int ret;
+
+	if (cpu_info->sp->table)
+		return NOTIFY_OK;
+
+	switch (val) {
+	case CPUFREQ_CREATE_POLICY:
+		mutex_lock(&policy_update_mutex);
+		update_related_freq_table(policy);
+
+		for_each_cpu(cpu, policy->related_cpus) {
+			ret = msm_core_dyn_pwr_init(msm_core_pdev, cpu);
+			if (ret)
+				pr_debug("voltage-pwr table update failed\n");
+
+			ret = msm_core_stats_init(&msm_core_pdev->dev, cpu);
+			if (ret)
+				pr_debug("Stats table update failed\n");
+		}
+		mutex_unlock(&policy_update_mutex);
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+struct notifier_block cpu_policy = {
+	.notifier_call = msm_core_cpu_policy_handler
+};
+
+static int system_suspend_handler(struct notifier_block *nb,
+				unsigned long val, void *data)
+{
+	int cpu;
+
+	mutex_lock(&kthread_update_mutex);
+	switch (val) {
+	case PM_POST_HIBERNATION:
+	case PM_POST_SUSPEND:
+	case PM_POST_RESTORE:
+		/*
+		 * Set completion event to read temperature and repopulate
+		 * stats
+		 */
+		in_suspend = 0;
+		complete(&sampling_completion);
+		break;
+	case PM_HIBERNATION_PREPARE:
+	case PM_SUSPEND_PREPARE:
+		/*
+		 * cancel delayed work to be able to restart immediately
+		 * after system resume
+		 */
+		in_suspend = 1;
+		cancel_delayed_work(&sampling_work);
+		/*
+		 * cancel TSENS interrupts as we do not want to wake up from
+		 * suspend to take care of repopulate stats while the system is
+		 * in suspend
+		 */
+		for_each_possible_cpu(cpu) {
+			if (activity[cpu].sensor_id < 0)
+				continue;
+
+			sensor_activate_trip(activity[cpu].sensor_id,
+				&activity[cpu].hi_threshold, false);
+			sensor_activate_trip(activity[cpu].sensor_id,
+				&activity[cpu].low_threshold, false);
+		}
+		break;
+	default:
+		break;
+	}
+	mutex_unlock(&kthread_update_mutex);
+
+	return NOTIFY_OK;
+}
+
+static int msm_core_freq_init(void)
+{
+	int cpu;
+	struct cpufreq_policy *policy;
+
+	for_each_possible_cpu(cpu) {
+		activity[cpu].sp = kzalloc(sizeof(*(activity[cpu].sp)),
+				GFP_KERNEL);
+		if (!activity[cpu].sp)
+			return -ENOMEM;
+	}
+
+	for_each_online_cpu(cpu) {
+		if (activity[cpu].sp->table)
+			continue;
+
+		policy = cpufreq_cpu_get(cpu);
+		if (!policy)
+			continue;
+
+		update_related_freq_table(policy);
+		cpufreq_cpu_put(policy);
+	}
+
+	return 0;
+}
+
+static int msm_core_params_init(struct platform_device *pdev)
+{
+	int ret = 0;
+	unsigned long cpu = 0;
+	struct device_node *child_node = NULL;
+	struct device_node *ea_node = NULL;
+	char *key = NULL;
+	int mpidr;
+
+	for_each_possible_cpu(cpu) {
+		child_node = of_get_cpu_node(cpu, NULL);
+
+		if (!child_node)
+			continue;
+
+		mpidr = msm_core_mpidr_init(child_node);
+		if (mpidr < 0)
+			return mpidr;
+
+		if (cpu >= num_possible_cpus())
+			continue;
+
+		activity[cpu].mpidr = mpidr;
+
+		key = "qcom,ea";
+		ea_node = of_parse_phandle(child_node, key, 0);
+		if (!ea_node) {
+			pr_err("%s Couldn't find the ea_node for cpu%lu\n",
+				__func__, cpu);
+			return -ENODEV;
+		}
+
+		ret = msm_core_tsens_init(ea_node, cpu);
+		if (ret)
+			return ret;
+
+		if (!activity[cpu].sp->table)
+			continue;
+
+		ret = msm_core_dyn_pwr_init(msm_core_pdev, cpu);
+		if (ret)
+			pr_debug("voltage-pwr table update failed\n");
+
+		ret = msm_core_stats_init(&msm_core_pdev->dev, cpu);
+		if (ret)
+			pr_debug("Stats table update failed\n");
+	}
+
+	return 0;
+}
+
+static const struct file_operations msm_core_ops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = msm_core_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = msm_core_compat_ioctl,
+#endif
+	.open = msm_core_open,
+	.release = msm_core_release,
+};
+
+static struct miscdevice msm_core_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "pta",
+	.fops = &msm_core_ops
+};
+
+static void free_dyn_memory(void)
+{
+	int i, cpu;
+
+	for_each_possible_cpu(cpu) {
+		if (activity[cpu].sp) {
+			for (i = 0; i < TEMP_DATA_POINTS; i++) {
+				if (!activity[cpu].sp->power)
+					break;
+
+				kfree(activity[cpu].sp->power[i]);
+			}
+		}
+		kfree(activity[cpu].sp);
+	}
+}
+
+static int uio_init(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct uio_info *info = NULL;
+	struct resource *clnt_res = NULL;
+	u32 ea_mem_size = 0;
+	phys_addr_t ea_mem_pyhsical = 0;
+
+	clnt_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!clnt_res) {
+		pr_err("resource not found\n");
+		return -ENODEV;
+	}
+
+	info = devm_kzalloc(&pdev->dev, sizeof(struct uio_info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	ea_mem_size = resource_size(clnt_res);
+	ea_mem_pyhsical = clnt_res->start;
+
+	if (ea_mem_size == 0) {
+		pr_err("msm-core: memory size is zero");
+		return -EINVAL;
+	}
+
+	/* Setup device */
+	info->name = clnt_res->name;
+	info->version = "1.0";
+	info->mem[0].addr = ea_mem_pyhsical;
+	info->mem[0].size = ea_mem_size;
+	info->mem[0].memtype = UIO_MEM_PHYS;
+
+	ret = uio_register_device(&pdev->dev, info);
+	if (ret) {
+		pr_err("uio register failed ret=%d", ret);
+		return ret;
+	}
+	dev_set_drvdata(&pdev->dev, info);
+
+	return 0;
+}
+
+static int msm_core_dev_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	char *key = NULL;
+	struct device_node *node;
+	int cpu;
+	struct uio_info *info;
+
+	if (!pdev)
+		return -ENODEV;
+
+	msm_core_pdev = pdev;
+	node = pdev->dev.of_node;
+	if (!node)
+		return -ENODEV;
+
+	key = "qcom,low-hyst-temp";
+	ret = of_property_read_u32(node, key, &low_hyst_temp);
+	if (ret)
+		low_hyst_temp = DEFAULT_LOW_HYST_TEMP;
+
+	key = "qcom,high-hyst-temp";
+	ret = of_property_read_u32(node, key, &high_hyst_temp);
+	if (ret)
+		high_hyst_temp = DEFAULT_HIGH_HYST_TEMP;
+
+	key = "qcom,polling-interval";
+	ret = of_property_read_u32(node, key, &poll_ms);
+	if (ret)
+		pr_info("msm-core initialized without polling period\n");
+
+	key = "qcom,throttling-temp";
+	ret = of_property_read_u32(node, key, &max_throttling_temp);
+
+	ret = uio_init(pdev);
+	if (ret)
+		return ret;
+
+	ret = msm_core_freq_init();
+	if (ret)
+		goto failed;
+
+	ret = misc_register(&msm_core_device);
+	if (ret) {
+		pr_err("%s: Error registering device %d\n", __func__, ret);
+		goto failed;
+	}
+
+	ret = msm_core_params_init(pdev);
+	if (ret)
+		goto failed;
+
+	INIT_DEFERRABLE_WORK(&sampling_work, samplequeue_handle);
+	ret = msm_core_task_init(&pdev->dev);
+	if (ret)
+		goto failed;
+
+	for_each_possible_cpu(cpu)
+		set_threshold(&activity[cpu]);
+
+	schedule_delayed_work(&sampling_work, msecs_to_jiffies(0));
+	cpufreq_register_notifier(&cpu_policy, CPUFREQ_POLICY_NOTIFIER);
+	pm_notifier(system_suspend_handler, 0);
+	return 0;
+failed:
+	info = dev_get_drvdata(&pdev->dev);
+	uio_unregister_device(info);
+	free_dyn_memory();
+	return ret;
+}
+
+static int msm_core_remove(struct platform_device *pdev)
+{
+	int cpu;
+	struct uio_info *info = dev_get_drvdata(&pdev->dev);
+
+	uio_unregister_device(info);
+
+	for_each_possible_cpu(cpu) {
+		if (activity[cpu].sensor_id < 0)
+			continue;
+
+		sensor_cancel_trip(activity[cpu].sensor_id,
+				&activity[cpu].hi_threshold);
+		sensor_cancel_trip(activity[cpu].sensor_id,
+				&activity[cpu].low_threshold);
+	}
+	free_dyn_memory();
+	misc_deregister(&msm_core_device);
+	return 0;
+}
+
+static struct of_device_id msm_core_match_table[] = {
+	{.compatible = "qcom,apss-core-ea"},
+	{},
+};
+
+static struct platform_driver msm_core_driver = {
+	.probe = msm_core_dev_probe,
+	.driver = {
+		.name = "msm_core",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_core_match_table,
+		},
+	.remove = msm_core_remove,
+};
+
+static int __init msm_core_init(void)
+{
+	return platform_driver_register(&msm_core_driver);
+}
+late_initcall(msm_core_init);
diff -Nruw linux-4.4.115-fbx/drivers/power/supply./Kconfig linux-4.4.115-fbx/drivers/power/supply/Kconfig
--- linux-4.4.115-fbx/drivers/power/supply./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/supply/Kconfig	2019-01-22 16:16:26.227271074 +0100
@@ -0,0 +1 @@
+source "drivers/power/supply/qcom/Kconfig"
diff -Nruw linux-4.4.115-fbx/drivers/power/supply./Makefile linux-4.4.115-fbx/drivers/power/supply/Makefile
--- linux-4.4.115-fbx/drivers/power/supply./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/supply/Makefile	2019-01-22 16:16:26.227271074 +0100
@@ -0,0 +1 @@
+obj-y	+= qcom/
diff -Nruw linux-4.4.115-fbx/drivers/power/supply./qcom/battery.c linux-4.4.115-fbx/drivers/power/supply/qcom/battery.c
--- linux-4.4.115-fbx/drivers/power/supply./qcom/battery.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/supply/qcom/battery.c	2019-10-29 09:26:24.629212827 +0100
@@ -0,0 +1,1164 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "QCOM-BATT: %s: " fmt, __func__
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/power_supply.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include <linux/printk.h>
+#include <linux/pm_wakeup.h>
+#include <linux/slab.h>
+#include <linux/pmic-voter.h>
+
+#define DRV_MAJOR_VERSION	1
+#define DRV_MINOR_VERSION	0
+
+#define CHG_STATE_VOTER			"CHG_STATE_VOTER"
+#define TAPER_END_VOTER			"TAPER_END_VOTER"
+#define PL_TAPER_EARLY_BAD_VOTER	"PL_TAPER_EARLY_BAD_VOTER"
+#define PARALLEL_PSY_VOTER		"PARALLEL_PSY_VOTER"
+#define PL_HW_ABSENT_VOTER		"PL_HW_ABSENT_VOTER"
+#define PL_VOTER			"PL_VOTER"
+#define RESTRICT_CHG_VOTER		"RESTRICT_CHG_VOTER"
+#define ICL_CHANGE_VOTER		"ICL_CHANGE_VOTER"
+#define PL_INDIRECT_VOTER		"PL_INDIRECT_VOTER"
+#define USBIN_I_VOTER			"USBIN_I_VOTER"
+
+struct pl_data {
+	int			pl_mode;
+	int			slave_pct;
+	int			taper_pct;
+	int			slave_fcc_ua;
+	int			restricted_current;
+	bool			restricted_charging_enabled;
+	struct votable		*fcc_votable;
+	struct votable		*fv_votable;
+	struct votable		*pl_disable_votable;
+	struct votable		*pl_awake_votable;
+	struct votable		*hvdcp_hw_inov_dis_votable;
+	struct votable		*usb_icl_votable;
+	struct votable		*pl_enable_votable_indirect;
+	struct delayed_work	status_change_work;
+	struct work_struct	pl_disable_forever_work;
+	struct delayed_work	pl_taper_work;
+	struct power_supply	*main_psy;
+	struct power_supply	*pl_psy;
+	struct power_supply	*batt_psy;
+	struct power_supply	*usb_psy;
+	int			charge_type;
+	int			total_settled_ua;
+	int			pl_settled_ua;
+	struct class		qcom_batt_class;
+	struct wakeup_source	*pl_ws;
+	struct notifier_block	nb;
+};
+
+struct pl_data *the_chip;
+
+enum print_reason {
+	PR_PARALLEL	= BIT(0),
+};
+
+static int debug_mask;
+module_param_named(debug_mask, debug_mask, int, S_IRUSR | S_IWUSR);
+
+#define pl_dbg(chip, reason, fmt, ...)				\
+	do {								\
+		if (debug_mask & (reason))				\
+			pr_info(fmt, ##__VA_ARGS__);	\
+		else							\
+			pr_debug(fmt, ##__VA_ARGS__);		\
+	} while (0)
+
+enum {
+	VER = 0,
+	SLAVE_PCT,
+	RESTRICT_CHG_ENABLE,
+	RESTRICT_CHG_CURRENT,
+};
+
+/*******
+ * ICL *
+********/
+static void split_settled(struct pl_data *chip)
+{
+	int slave_icl_pct, total_current_ua;
+	int slave_ua = 0, main_settled_ua = 0;
+	union power_supply_propval pval = {0, };
+	int rc, total_settled_ua = 0;
+
+	if ((chip->pl_mode != POWER_SUPPLY_PL_USBIN_USBIN)
+		&& (chip->pl_mode != POWER_SUPPLY_PL_USBIN_USBIN_EXT))
+		return;
+
+	if (!chip->main_psy)
+		return;
+
+	if (!get_effective_result_locked(chip->pl_disable_votable)) {
+		/* read the aicl settled value */
+		rc = power_supply_get_property(chip->main_psy,
+			       POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't get aicl settled value rc=%d\n", rc);
+			return;
+		}
+		main_settled_ua = pval.intval;
+		/* slave gets 10 percent points less for ICL */
+		slave_icl_pct = max(0, chip->slave_pct - 10);
+		slave_ua = ((main_settled_ua + chip->pl_settled_ua)
+						* slave_icl_pct) / 100;
+		total_settled_ua = main_settled_ua + chip->pl_settled_ua;
+	}
+
+	total_current_ua = get_effective_result_locked(chip->usb_icl_votable);
+	if (total_current_ua < 0) {
+		if (!chip->usb_psy)
+			chip->usb_psy = power_supply_get_by_name("usb");
+		if (!chip->usb_psy) {
+			pr_err("Couldn't get usbpsy while splitting settled\n");
+			return;
+		}
+		/* no client is voting, so get the total current from charger */
+		rc = power_supply_get_property(chip->usb_psy,
+			POWER_SUPPLY_PROP_HW_CURRENT_MAX, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't get max current rc=%d\n", rc);
+			return;
+		}
+		total_current_ua = pval.intval;
+	}
+
+	/*
+	 * If there is an increase in slave share
+	 * (Also handles parallel enable case)
+	 *	Set Main ICL then slave ICL
+	 * else
+	 * (Also handles parallel disable case)
+	 *	Set slave ICL then main ICL.
+	 */
+	if (slave_ua > chip->pl_settled_ua) {
+		pval.intval = total_current_ua - slave_ua;
+		/* Set ICL on main charger */
+		rc = power_supply_set_property(chip->main_psy,
+				POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't change slave suspend state rc=%d\n",
+					rc);
+			return;
+		}
+
+		/* set parallel's ICL  could be 0mA when pl is disabled */
+		pval.intval = slave_ua;
+		rc = power_supply_set_property(chip->pl_psy,
+				POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't set parallel icl, rc=%d\n", rc);
+			return;
+		}
+	} else {
+		/* set parallel's ICL  could be 0mA when pl is disabled */
+		pval.intval = slave_ua;
+		rc = power_supply_set_property(chip->pl_psy,
+				POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't set parallel icl, rc=%d\n", rc);
+			return;
+		}
+
+		pval.intval = total_current_ua - slave_ua;
+		/* Set ICL on main charger */
+		rc = power_supply_set_property(chip->main_psy,
+				POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't change slave suspend state rc=%d\n",
+					rc);
+			return;
+		}
+	}
+
+	chip->total_settled_ua = total_settled_ua;
+	chip->pl_settled_ua = slave_ua;
+
+	pl_dbg(chip, PR_PARALLEL,
+		"Split total_current_ua=%d main_settled_ua=%d slave_ua=%d\n",
+		total_current_ua, main_settled_ua, slave_ua);
+}
+
+static ssize_t version_show(struct class *c, struct class_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d.%d\n",
+			DRV_MAJOR_VERSION, DRV_MINOR_VERSION);
+}
+
+/*************
+* SLAVE PCT *
+**************/
+static ssize_t slave_pct_show(struct class *c, struct class_attribute *attr,
+			char *ubuf)
+{
+	struct pl_data *chip = container_of(c, struct pl_data,
+			qcom_batt_class);
+
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", chip->slave_pct);
+}
+
+static ssize_t slave_pct_store(struct class *c, struct class_attribute *attr,
+			const char *ubuf, size_t count)
+{
+	struct pl_data *chip = container_of(c, struct pl_data,
+			qcom_batt_class);
+	unsigned long val;
+
+	if (kstrtoul(ubuf, 10, &val))
+		return -EINVAL;
+
+	chip->slave_pct = val;
+	rerun_election(chip->fcc_votable);
+	rerun_election(chip->fv_votable);
+	split_settled(chip);
+
+	return count;
+}
+
+/**********************
+* RESTICTED CHARGIGNG *
+***********************/
+static ssize_t restrict_chg_show(struct class *c, struct class_attribute *attr,
+			char *ubuf)
+{
+	struct pl_data *chip = container_of(c, struct pl_data,
+			qcom_batt_class);
+
+	return snprintf(ubuf, PAGE_SIZE, "%d\n",
+			chip->restricted_charging_enabled);
+}
+
+static ssize_t restrict_chg_store(struct class *c, struct class_attribute *attr,
+			const char *ubuf, size_t count)
+{
+	struct pl_data *chip = container_of(c, struct pl_data,
+			qcom_batt_class);
+	unsigned long val;
+
+	if (kstrtoul(ubuf, 10, &val))
+		return -EINVAL;
+
+	if (chip->restricted_charging_enabled == !!val)
+		goto no_change;
+
+	chip->restricted_charging_enabled = !!val;
+
+	/* disable parallel charger in case of restricted charging */
+	vote(chip->pl_disable_votable, RESTRICT_CHG_VOTER,
+				chip->restricted_charging_enabled, 0);
+
+	vote(chip->fcc_votable, RESTRICT_CHG_VOTER,
+				chip->restricted_charging_enabled,
+				chip->restricted_current);
+
+no_change:
+	return count;
+}
+
+static ssize_t restrict_cur_show(struct class *c, struct class_attribute *attr,
+			char *ubuf)
+{
+	struct pl_data *chip = container_of(c, struct pl_data,
+			qcom_batt_class);
+
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", chip->restricted_current);
+}
+
+static ssize_t restrict_cur_store(struct class *c, struct class_attribute *attr,
+			const char *ubuf, size_t count)
+{
+	struct pl_data *chip = container_of(c, struct pl_data,
+			qcom_batt_class);
+	unsigned long val;
+
+	if (kstrtoul(ubuf, 10, &val))
+		return -EINVAL;
+
+	chip->restricted_current = val;
+
+	vote(chip->fcc_votable, RESTRICT_CHG_VOTER,
+				chip->restricted_charging_enabled,
+				chip->restricted_current);
+
+	return count;
+}
+
+static struct class_attribute pl_attributes[] = {
+	[VER]			= __ATTR_RO(version),
+	[SLAVE_PCT]		= __ATTR(parallel_pct, S_IRUGO | S_IWUSR,
+					slave_pct_show, slave_pct_store),
+	[RESTRICT_CHG_ENABLE]	= __ATTR(restricted_charging, S_IRUGO | S_IWUSR,
+					restrict_chg_show, restrict_chg_store),
+	[RESTRICT_CHG_CURRENT]	= __ATTR(restricted_current, S_IRUGO | S_IWUSR,
+					restrict_cur_show, restrict_cur_store),
+	__ATTR_NULL,
+};
+
+/***********
+ *  TAPER  *
+************/
+#define MINIMUM_PARALLEL_FCC_UA		500000
+#define PL_TAPER_WORK_DELAY_MS		500
+#define TAPER_RESIDUAL_PCT		75
+static void pl_taper_work(struct work_struct *work)
+{
+	struct pl_data *chip = container_of(work, struct pl_data,
+						pl_taper_work.work);
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	/* exit immediately if parallel is disabled */
+	if (get_effective_result(chip->pl_disable_votable)) {
+		pl_dbg(chip, PR_PARALLEL, "terminating parallel not in progress\n");
+		goto done;
+	}
+
+	pl_dbg(chip, PR_PARALLEL, "entering parallel taper work slave_fcc = %d\n",
+			chip->slave_fcc_ua);
+	if (chip->slave_fcc_ua < MINIMUM_PARALLEL_FCC_UA) {
+		pl_dbg(chip, PR_PARALLEL, "terminating parallel's share lower than 500mA\n");
+		vote(chip->pl_disable_votable, TAPER_END_VOTER, true, 0);
+		goto done;
+	}
+
+	rc = power_supply_get_property(chip->batt_psy,
+			       POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't get batt charge type rc=%d\n", rc);
+		goto done;
+	}
+
+	chip->charge_type = pval.intval;
+	if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
+		pl_dbg(chip, PR_PARALLEL, "master is taper charging; reducing slave FCC\n");
+
+		vote(chip->pl_awake_votable, TAPER_END_VOTER, true, 0);
+		/* Reduce the taper percent by 25 percent */
+		chip->taper_pct = chip->taper_pct * TAPER_RESIDUAL_PCT / 100;
+		rerun_election(chip->fcc_votable);
+		pl_dbg(chip, PR_PARALLEL, "taper entry scheduling work after %d ms\n",
+				PL_TAPER_WORK_DELAY_MS);
+		schedule_delayed_work(&chip->pl_taper_work,
+				msecs_to_jiffies(PL_TAPER_WORK_DELAY_MS));
+		return;
+	}
+
+	/*
+	 * Master back to Fast Charge, get out of this round of taper reduction
+	 */
+	pl_dbg(chip, PR_PARALLEL, "master is fast charging; waiting for next taper\n");
+
+done:
+	vote(chip->pl_awake_votable, TAPER_END_VOTER, false, 0);
+}
+
+/*********
+ *  FCC  *
+**********/
+#define EFFICIENCY_PCT	80
+static void get_fcc_split(struct pl_data *chip, int total_ua,
+			int *master_ua, int *slave_ua)
+{
+	int rc, effective_total_ua, slave_limited_ua, hw_cc_delta_ua = 0,
+		icl_ua, adapter_uv, bcl_ua;
+	union power_supply_propval pval = {0, };
+
+	rc = power_supply_get_property(chip->main_psy,
+			       POWER_SUPPLY_PROP_FCC_DELTA, &pval);
+	if (rc < 0)
+		hw_cc_delta_ua = 0;
+	else
+		hw_cc_delta_ua = pval.intval;
+
+	bcl_ua = INT_MAX;
+	if (chip->pl_mode == POWER_SUPPLY_PL_USBMID_USBMID) {
+		rc = power_supply_get_property(chip->main_psy,
+			       POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't get aicl settled value rc=%d\n", rc);
+			return;
+		}
+		icl_ua = pval.intval;
+
+		rc = power_supply_get_property(chip->main_psy,
+			       POWER_SUPPLY_PROP_INPUT_VOLTAGE_SETTLED, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't get adaptive voltage rc=%d\n", rc);
+			return;
+		}
+		adapter_uv = pval.intval;
+
+		bcl_ua = div64_s64((s64)icl_ua * adapter_uv * EFFICIENCY_PCT,
+			(s64)get_effective_result(chip->fv_votable) * 100);
+	}
+
+	effective_total_ua = max(0, total_ua + hw_cc_delta_ua);
+	slave_limited_ua = min(effective_total_ua, bcl_ua);
+	*slave_ua = (slave_limited_ua * chip->slave_pct) / 100;
+	/*
+	 * In USBIN_USBIN configuration with internal rsense parallel
+	 * charger's current goes through main charger's BATFET, keep
+	 * the main charger's FCC to the votable result.
+	 */
+	if (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+		*master_ua = max(0, total_ua);
+	else
+		*master_ua = max(0, total_ua - *slave_ua);
+
+	*slave_ua = (*slave_ua * chip->taper_pct) / 100;
+}
+
+static int pl_fcc_vote_callback(struct votable *votable, void *data,
+			int total_fcc_ua, const char *client)
+{
+	struct pl_data *chip = data;
+	union power_supply_propval pval = {0, };
+	int rc, master_fcc_ua = total_fcc_ua, slave_fcc_ua = 0;
+
+	if (total_fcc_ua < 0)
+		return 0;
+
+	if (!chip->main_psy)
+		return 0;
+
+	if (chip->pl_mode == POWER_SUPPLY_PL_NONE
+	    || get_effective_result_locked(chip->pl_disable_votable)) {
+		pval.intval = total_fcc_ua;
+		rc = power_supply_set_property(chip->main_psy,
+				POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+				&pval);
+		if (rc < 0)
+			pr_err("Couldn't set main fcc, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (chip->pl_mode != POWER_SUPPLY_PL_NONE) {
+		get_fcc_split(chip, total_fcc_ua,
+			&master_fcc_ua, &slave_fcc_ua);
+
+		/*
+		 * If there is an increase in slave share
+		 * (Also handles parallel enable case)
+		 *	Set Main ICL then slave FCC
+		 * else
+		 * (Also handles parallel disable case)
+		 *	Set slave ICL then main FCC.
+		 */
+		if (slave_fcc_ua > chip->slave_fcc_ua) {
+			pval.intval = master_fcc_ua;
+			rc = power_supply_set_property(chip->main_psy,
+				POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+				&pval);
+			if (rc < 0) {
+				pr_err("Could not set main fcc, rc=%d\n", rc);
+				return rc;
+			}
+
+			pval.intval = slave_fcc_ua;
+			rc = power_supply_set_property(chip->pl_psy,
+				POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+				&pval);
+			if (rc < 0) {
+				pr_err("Couldn't set parallel fcc, rc=%d\n",
+						rc);
+				return rc;
+			}
+
+			chip->slave_fcc_ua = slave_fcc_ua;
+		} else {
+			pval.intval = slave_fcc_ua;
+			rc = power_supply_set_property(chip->pl_psy,
+				POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+				&pval);
+			if (rc < 0) {
+				pr_err("Couldn't set parallel fcc, rc=%d\n",
+						rc);
+				return rc;
+			}
+
+			chip->slave_fcc_ua = slave_fcc_ua;
+
+			pval.intval = master_fcc_ua;
+			rc = power_supply_set_property(chip->main_psy,
+				POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+				&pval);
+			if (rc < 0) {
+				pr_err("Could not set main fcc, rc=%d\n", rc);
+				return rc;
+			}
+		}
+	}
+
+	pl_dbg(chip, PR_PARALLEL, "master_fcc=%d slave_fcc=%d distribution=(%d/%d)\n",
+		   master_fcc_ua, slave_fcc_ua,
+		   (master_fcc_ua * 100) / total_fcc_ua,
+		   (slave_fcc_ua * 100) / total_fcc_ua);
+
+	return 0;
+}
+
+#define PARALLEL_FLOAT_VOLTAGE_DELTA_UV 50000
+static int pl_fv_vote_callback(struct votable *votable, void *data,
+			int fv_uv, const char *client)
+{
+	struct pl_data *chip = data;
+	union power_supply_propval pval = {0, };
+	int rc = 0;
+
+	if (fv_uv < 0)
+		return 0;
+
+	if (!chip->main_psy)
+		return 0;
+
+	pval.intval = fv_uv;
+
+	rc = power_supply_set_property(chip->main_psy,
+			POWER_SUPPLY_PROP_VOLTAGE_MAX, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't set main fv, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (chip->pl_mode != POWER_SUPPLY_PL_NONE) {
+		pval.intval += PARALLEL_FLOAT_VOLTAGE_DELTA_UV;
+		rc = power_supply_set_property(chip->pl_psy,
+				POWER_SUPPLY_PROP_VOLTAGE_MAX, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't set float on parallel rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+#define ICL_STEP_UA	25000
+#define PL_DELAY_MS     3000
+static int usb_icl_vote_callback(struct votable *votable, void *data,
+			int icl_ua, const char *client)
+{
+	int rc;
+	struct pl_data *chip = data;
+	union power_supply_propval pval = {0, };
+	bool rerun_aicl = false;
+
+	if (!chip->main_psy)
+		return 0;
+
+	if (client == NULL)
+		icl_ua = INT_MAX;
+
+	/*
+	 * Disable parallel for new ICL vote - the call to split_settled will
+	 * ensure that all the input current limit gets assigned to the main
+	 * charger.
+	 */
+	vote(chip->pl_disable_votable, ICL_CHANGE_VOTER, true, 0);
+
+	/*
+	 * if (ICL < 1400)
+	 *	disable parallel charger using USBIN_I_VOTER
+	 * else
+	 *	instead of re-enabling here rely on status_changed_work
+	 *	(triggered via AICL completed or scheduled from here to
+	 *	unvote USBIN_I_VOTER) the status_changed_work enables
+	 *	USBIN_I_VOTER based on settled current.
+	 */
+	if (icl_ua <= 1400000)
+		vote(chip->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
+	else
+		schedule_delayed_work(&chip->status_change_work,
+						msecs_to_jiffies(PL_DELAY_MS));
+
+	/* rerun AICL */
+	/* get the settled current */
+	rc = power_supply_get_property(chip->main_psy,
+			       POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+			       &pval);
+	if (rc < 0) {
+		pr_err("Couldn't get aicl settled value rc=%d\n", rc);
+		return rc;
+	}
+
+	/* rerun AICL if new ICL is above settled ICL */
+	if (icl_ua > pval.intval)
+		rerun_aicl = true;
+
+	if (rerun_aicl) {
+		/* set a lower ICL */
+		pval.intval = max(pval.intval - ICL_STEP_UA, ICL_STEP_UA);
+		power_supply_set_property(chip->main_psy,
+				POWER_SUPPLY_PROP_CURRENT_MAX,
+				&pval);
+	}
+
+	/* set the effective ICL */
+	pval.intval = icl_ua;
+	power_supply_set_property(chip->main_psy,
+			POWER_SUPPLY_PROP_CURRENT_MAX,
+			&pval);
+
+	vote(chip->pl_disable_votable, ICL_CHANGE_VOTER, false, 0);
+
+	return 0;
+}
+
+static void pl_disable_forever_work(struct work_struct *work)
+{
+	struct pl_data *chip = container_of(work,
+			struct pl_data, pl_disable_forever_work);
+
+	/* Disable Parallel charger forever */
+	vote(chip->pl_disable_votable, PL_HW_ABSENT_VOTER, true, 0);
+
+	/* Re-enable autonomous mode */
+	if (chip->hvdcp_hw_inov_dis_votable)
+		vote(chip->hvdcp_hw_inov_dis_votable, PL_VOTER, false, 0);
+}
+
+static int pl_disable_vote_callback(struct votable *votable,
+		void *data, int pl_disable, const char *client)
+{
+	struct pl_data *chip = data;
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	chip->taper_pct = 100;
+	chip->total_settled_ua = 0;
+	chip->pl_settled_ua = 0;
+
+	if (!pl_disable) { /* enable */
+		rc = power_supply_get_property(chip->pl_psy,
+				POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
+		if (rc == -ENODEV) {
+			/*
+			 * -ENODEV is returned only if parallel chip
+			 * is not present in the system.
+			 * Disable parallel charger forever.
+			 */
+			schedule_work(&chip->pl_disable_forever_work);
+			return rc;
+		}
+
+		rerun_election(chip->fv_votable);
+		rerun_election(chip->fcc_votable);
+		/*
+		 * Enable will be called with a valid pl_psy always. The
+		 * PARALLEL_PSY_VOTER keeps it disabled unless a pl_psy
+		 * is seen.
+		 */
+		pval.intval = 0;
+		rc = power_supply_set_property(chip->pl_psy,
+				POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval);
+		if (rc < 0)
+			pr_err("Couldn't change slave suspend state rc=%d\n",
+				rc);
+
+		if ((chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+			|| (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
+			split_settled(chip);
+		/*
+		 * we could have been enabled while in taper mode,
+		 *  start the taper work if so
+		 */
+		rc = power_supply_get_property(chip->batt_psy,
+				       POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't get batt charge type rc=%d\n", rc);
+		} else {
+			if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
+				pl_dbg(chip, PR_PARALLEL,
+					"pl enabled in Taper scheduing work\n");
+				schedule_delayed_work(&chip->pl_taper_work, 0);
+			}
+		}
+	} else {
+		if ((chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+			|| (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
+			split_settled(chip);
+
+		/* pl_psy may be NULL while in the disable branch */
+		if (chip->pl_psy) {
+			pval.intval = 1;
+			rc = power_supply_set_property(chip->pl_psy,
+					POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval);
+			if (rc < 0)
+				pr_err("Couldn't change slave suspend state rc=%d\n",
+					rc);
+		}
+		rerun_election(chip->fcc_votable);
+		rerun_election(chip->fv_votable);
+	}
+
+	pl_dbg(chip, PR_PARALLEL, "parallel charging %s\n",
+		   pl_disable ? "disabled" : "enabled");
+
+	return 0;
+}
+
+static int pl_enable_indirect_vote_callback(struct votable *votable,
+			void *data, int pl_enable, const char *client)
+{
+	struct pl_data *chip = data;
+
+	vote(chip->pl_disable_votable, PL_INDIRECT_VOTER, !pl_enable, 0);
+
+	return 0;
+}
+
+static int pl_awake_vote_callback(struct votable *votable,
+			void *data, int awake, const char *client)
+{
+	struct pl_data *chip = data;
+
+	if (awake)
+		__pm_stay_awake(chip->pl_ws);
+	else
+		__pm_relax(chip->pl_ws);
+
+	pr_debug("client: %s awake: %d\n", client, awake);
+	return 0;
+}
+
+static bool is_main_available(struct pl_data *chip)
+{
+	if (chip->main_psy)
+		return true;
+
+	chip->main_psy = power_supply_get_by_name("main");
+
+	return !!chip->main_psy;
+}
+
+static bool is_batt_available(struct pl_data *chip)
+{
+	if (!chip->batt_psy)
+		chip->batt_psy = power_supply_get_by_name("battery");
+
+	if (!chip->batt_psy)
+		return false;
+
+	return true;
+}
+
+static bool is_parallel_available(struct pl_data *chip)
+{
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	if (chip->pl_psy)
+		return true;
+
+	chip->pl_psy = power_supply_get_by_name("parallel");
+	if (!chip->pl_psy)
+		return false;
+
+	rc = power_supply_get_property(chip->pl_psy,
+			       POWER_SUPPLY_PROP_PARALLEL_MODE, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't get parallel mode from parallel rc=%d\n",
+				rc);
+		return false;
+	}
+	/*
+	 * Note that pl_mode will be updated to anything other than a _NONE
+	 * only after pl_psy is found. IOW pl_mode != _NONE implies that
+	 * pl_psy is present and valid.
+	 */
+	chip->pl_mode = pval.intval;
+
+	/* Disable autonomous votage increments for USBIN-USBIN */
+	if ((chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+		|| (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT)) {
+		if (!chip->hvdcp_hw_inov_dis_votable)
+			chip->hvdcp_hw_inov_dis_votable =
+					find_votable("HVDCP_HW_INOV_DIS");
+		if (chip->hvdcp_hw_inov_dis_votable)
+			/* Read current pulse count */
+			vote(chip->hvdcp_hw_inov_dis_votable, PL_VOTER,
+					true, 0);
+		else
+			return false;
+	}
+
+	vote(chip->pl_disable_votable, PARALLEL_PSY_VOTER, false, 0);
+
+	return true;
+}
+
+static void handle_main_charge_type(struct pl_data *chip)
+{
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	rc = power_supply_get_property(chip->batt_psy,
+			       POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't get batt charge type rc=%d\n", rc);
+		return;
+	}
+
+	/* not fast/not taper state to disables parallel */
+	if ((pval.intval != POWER_SUPPLY_CHARGE_TYPE_FAST)
+		&& (pval.intval != POWER_SUPPLY_CHARGE_TYPE_TAPER)) {
+		vote(chip->pl_disable_votable, CHG_STATE_VOTER, true, 0);
+		chip->taper_pct = 100;
+		vote(chip->pl_disable_votable, TAPER_END_VOTER, false, 0);
+		vote(chip->pl_disable_votable, PL_TAPER_EARLY_BAD_VOTER,
+				false, 0);
+		chip->charge_type = pval.intval;
+		return;
+	}
+
+	/* handle taper charge entry */
+	if (chip->charge_type == POWER_SUPPLY_CHARGE_TYPE_FAST
+		&& (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER)) {
+		chip->charge_type = pval.intval;
+		pl_dbg(chip, PR_PARALLEL, "taper entry scheduling work\n");
+		schedule_delayed_work(&chip->pl_taper_work, 0);
+		return;
+	}
+
+	/* handle fast/taper charge entry */
+	if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER
+			|| pval.intval == POWER_SUPPLY_CHARGE_TYPE_FAST) {
+		pl_dbg(chip, PR_PARALLEL, "chg_state enabling parallel\n");
+		vote(chip->pl_disable_votable, CHG_STATE_VOTER, false, 0);
+		chip->charge_type = pval.intval;
+		return;
+	}
+
+	/* remember the new state only if it isn't any of the above */
+	chip->charge_type = pval.intval;
+}
+
+#define MIN_ICL_CHANGE_DELTA_UA		300000
+static void handle_settled_icl_change(struct pl_data *chip)
+{
+	union power_supply_propval pval = {0, };
+	int new_total_settled_ua;
+	int rc;
+	int main_settled_ua;
+	int main_limited;
+	int total_current_ua;
+
+	total_current_ua = get_effective_result_locked(chip->usb_icl_votable);
+
+	/*
+	 * call aicl split only when USBIN_USBIN and enabled
+	 * and if aicl changed
+	 */
+	rc = power_supply_get_property(chip->main_psy,
+			       POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+			       &pval);
+	if (rc < 0) {
+		pr_err("Couldn't get aicl settled value rc=%d\n", rc);
+		return;
+	}
+	main_settled_ua = pval.intval;
+
+	rc = power_supply_get_property(chip->batt_psy,
+			       POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+			       &pval);
+	if (rc < 0) {
+		pr_err("Couldn't get aicl settled value rc=%d\n", rc);
+		return;
+	}
+	main_limited = pval.intval;
+
+	if ((main_limited && (main_settled_ua + chip->pl_settled_ua) < 1400000)
+			|| (main_settled_ua == 0)
+			|| ((total_current_ua >= 0) &&
+				(total_current_ua <= 1400000)))
+		vote(chip->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
+	else
+		vote(chip->pl_enable_votable_indirect, USBIN_I_VOTER, true, 0);
+
+
+	if (get_effective_result(chip->pl_disable_votable))
+		return;
+
+	if (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN
+			|| chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT) {
+		/*
+		 * call aicl split only when USBIN_USBIN and enabled
+		 * and if settled current has changed by more than 300mA
+		 */
+
+		new_total_settled_ua = main_settled_ua + chip->pl_settled_ua;
+		pl_dbg(chip, PR_PARALLEL,
+			"total_settled_ua=%d settled_ua=%d new_total_settled_ua=%d\n",
+			chip->total_settled_ua, pval.intval,
+			new_total_settled_ua);
+
+		/* If ICL change is small skip splitting */
+		if (abs(new_total_settled_ua - chip->total_settled_ua)
+						> MIN_ICL_CHANGE_DELTA_UA)
+			split_settled(chip);
+	} else {
+		rerun_election(chip->fcc_votable);
+	}
+}
+
+static void handle_parallel_in_taper(struct pl_data *chip)
+{
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	if (get_effective_result_locked(chip->pl_disable_votable))
+		return;
+
+	if (!chip->pl_psy)
+		return;
+
+	rc = power_supply_get_property(chip->pl_psy,
+			       POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't get pl charge type rc=%d\n", rc);
+		return;
+	}
+
+	/*
+	 * if parallel is seen in taper mode ever, that is an anomaly and
+	 * we disable parallel charger
+	 */
+	if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
+		vote(chip->pl_disable_votable, PL_TAPER_EARLY_BAD_VOTER,
+				true, 0);
+		return;
+	}
+}
+
+static void status_change_work(struct work_struct *work)
+{
+	struct pl_data *chip = container_of(work,
+			struct pl_data, status_change_work.work);
+
+	if (!chip->main_psy && is_main_available(chip)) {
+		/*
+		 * re-run election for FCC/FV/ICL once main_psy
+		 * is available to ensure all votes are reflected
+		 * on hardware
+		 */
+		rerun_election(chip->usb_icl_votable);
+		rerun_election(chip->fcc_votable);
+		rerun_election(chip->fv_votable);
+	}
+
+	if (!chip->main_psy)
+		return;
+
+	if (!is_batt_available(chip))
+		return;
+
+	is_parallel_available(chip);
+
+	handle_main_charge_type(chip);
+	handle_settled_icl_change(chip);
+	handle_parallel_in_taper(chip);
+}
+
+static int pl_notifier_call(struct notifier_block *nb,
+		unsigned long ev, void *v)
+{
+	struct power_supply *psy = v;
+	struct pl_data *chip = container_of(nb, struct pl_data, nb);
+
+	if (ev != PSY_EVENT_PROP_CHANGED)
+		return NOTIFY_OK;
+
+	if ((strcmp(psy->desc->name, "parallel") == 0)
+	    || (strcmp(psy->desc->name, "battery") == 0)
+	    || (strcmp(psy->desc->name, "main") == 0))
+		schedule_delayed_work(&chip->status_change_work, 0);
+
+	return NOTIFY_OK;
+}
+
+static int pl_register_notifier(struct pl_data *chip)
+{
+	int rc;
+
+	chip->nb.notifier_call = pl_notifier_call;
+	rc = power_supply_reg_notifier(&chip->nb);
+	if (rc < 0) {
+		pr_err("Couldn't register psy notifier rc = %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int pl_determine_initial_status(struct pl_data *chip)
+{
+	status_change_work(&chip->status_change_work.work);
+	return 0;
+}
+
+#define DEFAULT_RESTRICTED_CURRENT_UA	1000000
+int qcom_batt_init(void)
+{
+	struct pl_data *chip;
+	int rc = 0;
+
+	/* initialize just once */
+	if (the_chip) {
+		pr_err("was initialized earlier Failing now\n");
+		return -EINVAL;
+	}
+
+	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+	chip->slave_pct = 50;
+	chip->restricted_current = DEFAULT_RESTRICTED_CURRENT_UA;
+
+	chip->pl_ws = wakeup_source_register("qcom-battery");
+	if (!chip->pl_ws)
+		goto cleanup;
+
+	chip->fcc_votable = create_votable("FCC", VOTE_MIN,
+					pl_fcc_vote_callback,
+					chip);
+	if (IS_ERR(chip->fcc_votable)) {
+		rc = PTR_ERR(chip->fcc_votable);
+		goto release_wakeup_source;
+	}
+
+	chip->fv_votable = create_votable("FV", VOTE_MAX,
+					pl_fv_vote_callback,
+					chip);
+	if (IS_ERR(chip->fv_votable)) {
+		rc = PTR_ERR(chip->fv_votable);
+		goto destroy_votable;
+	}
+
+	chip->usb_icl_votable = create_votable("USB_ICL", VOTE_MIN,
+					usb_icl_vote_callback,
+					chip);
+	if (IS_ERR(chip->usb_icl_votable)) {
+		rc = PTR_ERR(chip->usb_icl_votable);
+		goto destroy_votable;
+	}
+
+	chip->pl_disable_votable = create_votable("PL_DISABLE", VOTE_SET_ANY,
+					pl_disable_vote_callback,
+					chip);
+	if (IS_ERR(chip->pl_disable_votable)) {
+		rc = PTR_ERR(chip->pl_disable_votable);
+		goto destroy_votable;
+	}
+	vote(chip->pl_disable_votable, CHG_STATE_VOTER, true, 0);
+	vote(chip->pl_disable_votable, TAPER_END_VOTER, false, 0);
+	vote(chip->pl_disable_votable, PARALLEL_PSY_VOTER, true, 0);
+
+	chip->pl_awake_votable = create_votable("PL_AWAKE", VOTE_SET_ANY,
+					pl_awake_vote_callback,
+					chip);
+	if (IS_ERR(chip->pl_awake_votable)) {
+		rc = PTR_ERR(chip->pl_disable_votable);
+		goto destroy_votable;
+	}
+
+	chip->pl_enable_votable_indirect = create_votable("PL_ENABLE_INDIRECT",
+					VOTE_SET_ANY,
+					pl_enable_indirect_vote_callback,
+					chip);
+	if (IS_ERR(chip->pl_enable_votable_indirect)) {
+		rc = PTR_ERR(chip->pl_enable_votable_indirect);
+		return rc;
+	}
+
+	vote(chip->pl_disable_votable, PL_INDIRECT_VOTER, true, 0);
+
+	INIT_DELAYED_WORK(&chip->status_change_work, status_change_work);
+	INIT_DELAYED_WORK(&chip->pl_taper_work, pl_taper_work);
+	INIT_WORK(&chip->pl_disable_forever_work, pl_disable_forever_work);
+
+	rc = pl_register_notifier(chip);
+	if (rc < 0) {
+		pr_err("Couldn't register psy notifier rc = %d\n", rc);
+		goto unreg_notifier;
+	}
+
+	rc = pl_determine_initial_status(chip);
+	if (rc < 0) {
+		pr_err("Couldn't determine initial status rc=%d\n", rc);
+		goto unreg_notifier;
+	}
+
+	chip->qcom_batt_class.name = "qcom-battery",
+	chip->qcom_batt_class.owner = THIS_MODULE,
+	chip->qcom_batt_class.class_attrs = pl_attributes;
+
+	rc = class_register(&chip->qcom_batt_class);
+	if (rc < 0) {
+		pr_err("couldn't register pl_data sysfs class rc = %d\n", rc);
+		goto unreg_notifier;
+	}
+
+	the_chip = chip;
+
+	return 0;
+
+unreg_notifier:
+	power_supply_unreg_notifier(&chip->nb);
+destroy_votable:
+	destroy_votable(chip->pl_enable_votable_indirect);
+	destroy_votable(chip->pl_awake_votable);
+	destroy_votable(chip->pl_disable_votable);
+	destroy_votable(chip->fv_votable);
+	destroy_votable(chip->fcc_votable);
+	destroy_votable(chip->usb_icl_votable);
+release_wakeup_source:
+	wakeup_source_unregister(chip->pl_ws);
+cleanup:
+	kfree(chip);
+	return rc;
+}
+
+void qcom_batt_deinit(void)
+{
+	struct pl_data *chip = the_chip;
+
+	if (chip == NULL)
+		return;
+
+	cancel_delayed_work_sync(&chip->status_change_work);
+	cancel_delayed_work_sync(&chip->pl_taper_work);
+	cancel_work_sync(&chip->pl_disable_forever_work);
+
+	power_supply_unreg_notifier(&chip->nb);
+	destroy_votable(chip->pl_enable_votable_indirect);
+	destroy_votable(chip->pl_awake_votable);
+	destroy_votable(chip->pl_disable_votable);
+	destroy_votable(chip->fv_votable);
+	destroy_votable(chip->fcc_votable);
+	wakeup_source_unregister(chip->pl_ws);
+	the_chip = NULL;
+	kfree(chip);
+}
diff -Nruw linux-4.4.115-fbx/drivers/power/supply./qcom/battery_current_limit.c linux-4.4.115-fbx/drivers/power/supply/qcom/battery_current_limit.c
--- linux-4.4.115-fbx/drivers/power/supply./qcom/battery_current_limit.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/supply/qcom/battery_current_limit.c	2019-01-22 16:16:26.227271074 +0100
@@ -0,0 +1,1842 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/power_supply.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/cpufreq.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/cpu.h>
+#include <linux/msm_bcl.h>
+#include <linux/power_supply.h>
+#include <linux/cpumask.h>
+#include <linux/msm_thermal.h>
+
+#define CREATE_TRACE_POINTS
+#define _BCL_SW_TRACE
+#include <trace/trace_thermal.h>
+
+#define BCL_DEV_NAME "battery_current_limit"
+#define BCL_NAME_LENGTH 20
+/*
+ * Default BCL poll interval 1000 msec
+ */
+#define BCL_POLL_INTERVAL 1000
+/*
+ * Mininum BCL poll interval 10 msec
+ */
+#define MIN_BCL_POLL_INTERVAL 10
+#define BATTERY_VOLTAGE_MIN 3400
+#define BTM_8084_FREQ_MITIG_LIMIT 1958400
+#define MAX_CPU_NAME 10
+
+#define BCL_FETCH_DT_U32(_dev, _key, _search_str, _ret, _out) do { \
+		_key = _search_str; \
+		_ret = of_property_read_u32(_dev, _key, &_out); \
+	} while (0)
+
+/*
+ * Battery Current Limit Enable or Not
+ */
+enum bcl_device_mode {
+	BCL_DEVICE_DISABLED = 0,
+	BCL_DEVICE_ENABLED,
+};
+
+/*
+ * Battery Current Limit Iavail Threshold Mode set
+ */
+enum bcl_iavail_threshold_mode {
+	BCL_IAVAIL_THRESHOLD_DISABLED = 0,
+	BCL_IAVAIL_THRESHOLD_ENABLED,
+};
+
+/*
+ * Battery Current Limit Iavail Threshold Mode
+ */
+enum bcl_iavail_threshold_type {
+	BCL_LOW_THRESHOLD_TYPE = 0,
+	BCL_HIGH_THRESHOLD_TYPE,
+	BCL_THRESHOLD_TYPE_MAX,
+};
+
+enum bcl_monitor_type {
+	BCL_IAVAIL_MONITOR_TYPE,
+	BCL_IBAT_MONITOR_TYPE,
+	BCL_IBAT_PERIPH_MONITOR_TYPE,
+	BCL_MONITOR_TYPE_MAX,
+};
+
+enum bcl_adc_monitor_mode {
+	BCL_MONITOR_DISABLED,
+	BCL_VPH_MONITOR_MODE,
+	BCL_IBAT_MONITOR_MODE,
+	BCL_IBAT_HIGH_LOAD_MODE,
+	BCL_MONITOR_MODE_MAX,
+};
+
+static const char *bcl_type[BCL_MONITOR_TYPE_MAX] = {"bcl", "btm",
+		"bcl_peripheral"};
+int adc_timer_val_usec[] = {
+	[ADC_MEAS1_INTERVAL_0MS] = 0,
+	[ADC_MEAS1_INTERVAL_1P0MS] = 1000,
+	[ADC_MEAS1_INTERVAL_2P0MS] = 2000,
+	[ADC_MEAS1_INTERVAL_3P9MS] = 3900,
+	[ADC_MEAS1_INTERVAL_7P8MS] = 7800,
+	[ADC_MEAS1_INTERVAL_15P6MS] = 15600,
+	[ADC_MEAS1_INTERVAL_31P3MS] = 31300,
+	[ADC_MEAS1_INTERVAL_62P5MS] = 62500,
+	[ADC_MEAS1_INTERVAL_125MS] = 125000,
+	[ADC_MEAS1_INTERVAL_250MS] = 250000,
+	[ADC_MEAS1_INTERVAL_500MS] = 500000,
+	[ADC_MEAS1_INTERVAL_1S] = 1000000,
+	[ADC_MEAS1_INTERVAL_2S] = 2000000,
+	[ADC_MEAS1_INTERVAL_4S] = 4000000,
+	[ADC_MEAS1_INTERVAL_8S] = 8000000,
+	[ADC_MEAS1_INTERVAL_16S] = 16000000,
+};
+
+/**
+ * BCL control block
+ *
+ */
+struct bcl_context {
+	/* BCL device */
+	struct device *dev;
+
+	/* BCL related config parameter */
+	/* BCL mode enable or not */
+	enum bcl_device_mode bcl_mode;
+	/* BCL monitoring Iavail or Ibat */
+	enum bcl_monitor_type bcl_monitor_type;
+	/* BCL Iavail Threshold Activate or Not */
+	enum bcl_iavail_threshold_mode
+				bcl_threshold_mode[BCL_THRESHOLD_TYPE_MAX];
+	/* BCL Iavail Threshold value in milli Amp */
+	int bcl_threshold_value_ma[BCL_THRESHOLD_TYPE_MAX];
+	/* BCL Type */
+	char bcl_type[BCL_NAME_LENGTH];
+	/* BCL poll in msec */
+	int bcl_poll_interval_msec;
+
+	/* BCL realtime value based on poll */
+	/* BCL realtime vbat in mV*/
+	int bcl_vbat_mv;
+	/* BCL realtime rbat in mOhms*/
+	int bcl_rbat_mohm;
+	/*BCL realtime iavail in milli Amp*/
+	int bcl_iavail;
+	/*BCL vbatt min in mV*/
+	int bcl_vbat_min;
+	/* BCL period poll delay work structure  */
+	struct delayed_work bcl_iavail_work;
+	/* For non-bms target */
+	bool bcl_no_bms;
+	/* The max CPU frequency the BTM restricts during high load */
+	uint32_t btm_freq_max;
+	/* Indicates whether there is a high load */
+	enum bcl_adc_monitor_mode btm_mode;
+	/* battery current high load clr threshold */
+	int btm_low_threshold_uv;
+	/* battery current high load threshold */
+	int btm_high_threshold_uv;
+	/* ADC battery current polling timer interval */
+	enum qpnp_adc_meas_timer_1 btm_adc_interval;
+	/* Ibat ADC config parameters */
+	struct qpnp_adc_tm_chip *btm_adc_tm_dev;
+	struct qpnp_vadc_chip *btm_vadc_dev;
+	int btm_ibat_chan;
+	struct qpnp_adc_tm_btm_param btm_ibat_adc_param;
+	uint32_t btm_uv_to_ua_numerator;
+	uint32_t btm_uv_to_ua_denominator;
+	/* Vph ADC config parameters */
+	int btm_vph_chan;
+	uint32_t btm_vph_high_thresh;
+	uint32_t btm_vph_low_thresh;
+	struct qpnp_adc_tm_btm_param btm_vph_adc_param;
+	/* Low temp min freq limit requested by thermal */
+	uint32_t thermal_freq_limit;
+	/* state of charge notifier */
+	struct notifier_block psy_nb;
+	struct work_struct soc_mitig_work;
+
+	/* BCL Peripheral monitor parameters */
+	struct bcl_threshold ibat_high_thresh;
+	struct bcl_threshold ibat_low_thresh;
+	struct bcl_threshold vbat_high_thresh;
+	struct bcl_threshold vbat_low_thresh;
+	uint32_t bcl_p_freq_max;
+	struct workqueue_struct *bcl_hotplug_wq;
+	struct device_clnt_data *hotplug_handle;
+	struct device_clnt_data *cpufreq_handle[NR_CPUS];
+};
+
+enum bcl_threshold_state {
+	BCL_LOW_THRESHOLD = 0,
+	BCL_HIGH_THRESHOLD,
+	BCL_THRESHOLD_DISABLED,
+};
+
+static struct bcl_context *gbcl;
+static enum bcl_threshold_state bcl_vph_state = BCL_THRESHOLD_DISABLED,
+		bcl_ibat_state = BCL_THRESHOLD_DISABLED,
+		bcl_soc_state = BCL_THRESHOLD_DISABLED;
+static DEFINE_MUTEX(bcl_notify_mutex);
+static uint32_t bcl_hotplug_request, bcl_hotplug_mask, bcl_soc_hotplug_mask;
+static uint32_t bcl_frequency_mask;
+static struct work_struct bcl_hotplug_work;
+static DEFINE_MUTEX(bcl_hotplug_mutex);
+static DEFINE_MUTEX(bcl_cpufreq_mutex);
+static bool bcl_hotplug_enabled;
+static uint32_t battery_soc_val = 100;
+static uint32_t soc_low_threshold;
+static const char bcl_psy_name[] = "bcl";
+
+static void bcl_handle_hotplug(struct work_struct *work)
+{
+	int ret = 0, cpu = 0;
+	union device_request curr_req;
+
+	trace_bcl_sw_mitigation_event("start hotplug mitigation");
+	mutex_lock(&bcl_hotplug_mutex);
+
+	if  (bcl_soc_state == BCL_LOW_THRESHOLD
+		|| bcl_vph_state == BCL_LOW_THRESHOLD)
+		bcl_hotplug_request = bcl_soc_hotplug_mask;
+	else if (bcl_ibat_state == BCL_HIGH_THRESHOLD)
+		bcl_hotplug_request = bcl_hotplug_mask;
+	else
+		bcl_hotplug_request = 0;
+
+	cpumask_clear(&curr_req.offline_mask);
+	for_each_possible_cpu(cpu) {
+		if (bcl_hotplug_request & BIT(cpu))
+			cpumask_set_cpu(cpu, &curr_req.offline_mask);
+	}
+	trace_bcl_sw_mitigation("Start hotplug CPU", bcl_hotplug_request);
+	ret = devmgr_client_request_mitigation(
+		gbcl->hotplug_handle,
+		HOTPLUG_MITIGATION_REQ,
+		&curr_req);
+	if (ret) {
+		pr_err("hotplug request failed. err:%d\n", ret);
+		goto handle_hotplug_exit;
+	}
+
+handle_hotplug_exit:
+	mutex_unlock(&bcl_hotplug_mutex);
+	trace_bcl_sw_mitigation_event("stop hotplug mitigation");
+}
+
+static void update_cpu_freq(void)
+{
+	int cpu, ret = 0;
+	union device_request cpufreq_req;
+
+	trace_bcl_sw_mitigation_event("Start Frequency Mitigate");
+	mutex_lock(&bcl_cpufreq_mutex);
+	cpufreq_req.freq.max_freq = UINT_MAX;
+	cpufreq_req.freq.min_freq = CPUFREQ_MIN_NO_MITIGATION;
+
+	if (bcl_vph_state == BCL_LOW_THRESHOLD
+		|| bcl_ibat_state == BCL_HIGH_THRESHOLD
+		|| bcl_soc_state == BCL_LOW_THRESHOLD) {
+		cpufreq_req.freq.max_freq = (gbcl->bcl_monitor_type
+			== BCL_IBAT_MONITOR_TYPE) ? gbcl->btm_freq_max
+			: gbcl->bcl_p_freq_max;
+	}
+
+	for_each_possible_cpu(cpu) {
+		if (!(bcl_frequency_mask & BIT(cpu)))
+			continue;
+		pr_debug("Requesting Max freq:%u for CPU%d\n",
+			cpufreq_req.freq.max_freq, cpu);
+		trace_bcl_sw_mitigation("Frequency Mitigate CPU", cpu);
+		ret = devmgr_client_request_mitigation(
+			gbcl->cpufreq_handle[cpu],
+			CPUFREQ_MITIGATION_REQ, &cpufreq_req);
+		if (ret)
+			pr_err("Error updating freq for CPU%d. ret:%d\n",
+				cpu, ret);
+	}
+	mutex_unlock(&bcl_cpufreq_mutex);
+	trace_bcl_sw_mitigation_event("End Frequency Mitigation");
+}
+
+static void soc_mitigate(struct work_struct *work)
+{
+	if (bcl_hotplug_enabled)
+		queue_work(gbcl->bcl_hotplug_wq, &bcl_hotplug_work);
+	update_cpu_freq();
+}
+
+static int get_and_evaluate_battery_soc(void)
+{
+	static struct power_supply *batt_psy;
+	union power_supply_propval ret = {0,};
+	int battery_percentage;
+	enum bcl_threshold_state prev_soc_state;
+
+	if (!batt_psy)
+		batt_psy = power_supply_get_by_name("battery");
+	if (batt_psy) {
+		battery_percentage = power_supply_get_property(batt_psy,
+				POWER_SUPPLY_PROP_CAPACITY, &ret);
+		battery_percentage = ret.intval;
+		battery_soc_val = battery_percentage;
+		pr_debug("Battery SOC reported:%d", battery_soc_val);
+		trace_bcl_sw_mitigation("SoC reported", battery_soc_val);
+		prev_soc_state = bcl_soc_state;
+		bcl_soc_state = (battery_soc_val <= soc_low_threshold) ?
+					BCL_LOW_THRESHOLD : BCL_HIGH_THRESHOLD;
+		if (bcl_soc_state == prev_soc_state)
+			return NOTIFY_OK;
+		trace_bcl_sw_mitigation_event(
+			(bcl_soc_state == BCL_LOW_THRESHOLD)
+			? "trigger SoC mitigation"
+			: "clear SoC mitigation");
+		schedule_work(&gbcl->soc_mitig_work);
+	}
+	return NOTIFY_OK;
+}
+
+static int power_supply_callback(struct notifier_block *nb,
+				  unsigned long event, void *data)
+{
+	struct power_supply *psy = data;
+
+	if (gbcl->bcl_mode != BCL_DEVICE_ENABLED) {
+		pr_debug("BCL is not enabled\n");
+		return NOTIFY_OK;
+	}
+
+	if (strcmp(psy->desc->name, "battery"))
+		return NOTIFY_OK;
+
+	return get_and_evaluate_battery_soc();
+}
+
+static int bcl_get_battery_voltage(int *vbatt_mv)
+{
+	static struct power_supply *psy;
+	union power_supply_propval ret = {0,};
+
+	if (psy == NULL) {
+		psy = power_supply_get_by_name("battery");
+		if (psy  == NULL) {
+			pr_err("failed to get ps battery\n");
+			return -EINVAL;
+		}
+	}
+
+	if (power_supply_get_property(psy, POWER_SUPPLY_PROP_VOLTAGE_NOW, &ret))
+		return -EINVAL;
+
+	if (ret.intval <= 0)
+		return -EINVAL;
+
+	*vbatt_mv = ret.intval / 1000;
+	return 0;
+}
+
+
+static int bcl_get_resistance(int *rbatt_mohm)
+{
+	static struct power_supply *psy;
+	union power_supply_propval ret = {0,};
+
+	if (psy == NULL) {
+		psy =
+		power_supply_get_by_name(gbcl->bcl_no_bms ? "battery" : "bms");
+		if (psy == NULL) {
+			pr_err("failed to get ps %s\n",
+				gbcl->bcl_no_bms ? "battery" : "bms");
+			return -EINVAL;
+		}
+	}
+	if (power_supply_get_property(psy, POWER_SUPPLY_PROP_RESISTANCE, &ret))
+		return -EINVAL;
+
+	if (ret.intval < 1000)
+		return -EINVAL;
+
+	*rbatt_mohm = ret.intval / 1000;
+
+	return 0;
+}
+
+/*
+ * BCL iavail calculation and trigger notification to user space
+ * if iavail cross threshold
+ */
+static void bcl_calculate_iavail_trigger(void)
+{
+	int iavail_ma = 0;
+	int vbatt_mv;
+	int rbatt_mohm;
+	bool threshold_cross = false;
+
+	if (!gbcl) {
+		pr_err("called before initialization\n");
+		return;
+	}
+
+	if (bcl_get_battery_voltage(&vbatt_mv))
+		return;
+
+	if (bcl_get_resistance(&rbatt_mohm))
+		return;
+
+	iavail_ma = (vbatt_mv - gbcl->bcl_vbat_min) * 1000 / rbatt_mohm;
+
+	gbcl->bcl_rbat_mohm = rbatt_mohm;
+	gbcl->bcl_vbat_mv = vbatt_mv;
+	gbcl->bcl_iavail = iavail_ma;
+
+	pr_debug("iavail %d, vbatt %d rbatt %d\n", iavail_ma, vbatt_mv,
+			rbatt_mohm);
+
+	if ((gbcl->bcl_threshold_mode[BCL_HIGH_THRESHOLD_TYPE] ==
+				BCL_IAVAIL_THRESHOLD_ENABLED)
+		&& (iavail_ma >=
+		gbcl->bcl_threshold_value_ma[BCL_HIGH_THRESHOLD_TYPE]))
+		threshold_cross = true;
+	else if ((gbcl->bcl_threshold_mode[BCL_LOW_THRESHOLD_TYPE]
+				== BCL_IAVAIL_THRESHOLD_ENABLED)
+		&& (iavail_ma <=
+		gbcl->bcl_threshold_value_ma[BCL_LOW_THRESHOLD_TYPE]))
+		threshold_cross = true;
+
+	if (threshold_cross)
+		sysfs_notify(&gbcl->dev->kobj, NULL, "type");
+}
+
+/*
+ * BCL iavail work
+ */
+static void bcl_iavail_work(struct work_struct *work)
+{
+	struct bcl_context *bcl = container_of(work,
+			struct bcl_context, bcl_iavail_work.work);
+
+	if (gbcl->bcl_mode == BCL_DEVICE_ENABLED) {
+		bcl_calculate_iavail_trigger();
+		/* restart the delay work for caculating imax */
+		schedule_delayed_work(&bcl->bcl_iavail_work,
+			msecs_to_jiffies(bcl->bcl_poll_interval_msec));
+	}
+}
+
+static void bcl_ibat_notify(enum bcl_threshold_state thresh_type)
+{
+	bcl_ibat_state = thresh_type;
+	if (bcl_hotplug_enabled)
+		queue_work(gbcl->bcl_hotplug_wq, &bcl_hotplug_work);
+	update_cpu_freq();
+}
+
+static void bcl_vph_notify(enum bcl_threshold_state thresh_type)
+{
+	bcl_vph_state = thresh_type;
+	if (bcl_hotplug_enabled)
+		queue_work(gbcl->bcl_hotplug_wq, &bcl_hotplug_work);
+	update_cpu_freq();
+}
+
+int bcl_voltage_notify(bool is_high_thresh)
+{
+	int ret = 0;
+
+	if (!gbcl) {
+		pr_err("BCL Driver not configured\n");
+		return -EINVAL;
+	}
+	if (gbcl->bcl_mode == BCL_DEVICE_ENABLED) {
+		pr_err("BCL Driver is enabled\n");
+		return -EINVAL;
+	}
+
+	trace_bcl_sw_mitigation_event((is_high_thresh)
+		? "vbat High trip notify"
+		: "vbat Low trip notify");
+	bcl_vph_notify((is_high_thresh) ? BCL_HIGH_THRESHOLD
+			: BCL_LOW_THRESHOLD);
+	return ret;
+}
+EXPORT_SYMBOL(bcl_voltage_notify);
+
+int bcl_current_notify(bool is_high_thresh)
+{
+	int ret = 0;
+
+	if (!gbcl) {
+		pr_err("BCL Driver not configured\n");
+		return -EINVAL;
+	}
+	if (gbcl->bcl_mode == BCL_DEVICE_ENABLED) {
+		pr_err("BCL Driver is enabled\n");
+		return -EINVAL;
+	}
+
+	trace_bcl_sw_mitigation_event((is_high_thresh)
+		? "ibat High trip notify"
+		: "ibat Low trip notify");
+	bcl_ibat_notify((is_high_thresh) ? BCL_HIGH_THRESHOLD
+			: BCL_LOW_THRESHOLD);
+	return ret;
+}
+EXPORT_SYMBOL(bcl_current_notify);
+
+static void bcl_ibat_notification(enum qpnp_tm_state state, void *ctx);
+static void bcl_vph_notification(enum qpnp_tm_state state, void *ctx);
+static int bcl_config_ibat_adc(struct bcl_context *bcl,
+			enum bcl_iavail_threshold_type thresh_type);
+static int bcl_config_vph_adc(struct bcl_context *bcl,
+			enum bcl_iavail_threshold_type thresh_type)
+{
+	int ret = 0;
+
+	if (bcl->bcl_mode == BCL_DEVICE_DISABLED
+		|| bcl->bcl_monitor_type != BCL_IBAT_MONITOR_TYPE)
+		return -EINVAL;
+
+	switch (thresh_type) {
+	case BCL_HIGH_THRESHOLD_TYPE:
+		bcl->btm_vph_adc_param.state_request = ADC_TM_HIGH_THR_ENABLE;
+		break;
+	case BCL_LOW_THRESHOLD_TYPE:
+		bcl->btm_vph_adc_param.state_request = ADC_TM_LOW_THR_ENABLE;
+		break;
+	default:
+		pr_err("Invalid threshold type:%d\n", thresh_type);
+		return -EINVAL;
+	}
+	bcl->btm_vph_adc_param.low_thr = bcl->btm_vph_low_thresh;
+	bcl->btm_vph_adc_param.high_thr = bcl->btm_vph_high_thresh;
+	bcl->btm_vph_adc_param.timer_interval =
+			adc_timer_val_usec[ADC_MEAS1_INTERVAL_1S];
+	bcl->btm_vph_adc_param.btm_ctx = bcl;
+	bcl->btm_vph_adc_param.threshold_notification = bcl_vph_notification;
+	bcl->btm_vph_adc_param.channel = bcl->btm_vph_chan;
+
+	ret = qpnp_adc_tm_channel_measure(bcl->btm_adc_tm_dev,
+			&bcl->btm_vph_adc_param);
+	if (ret < 0)
+		pr_err("Error configuring BTM for Vph. ret:%d\n", ret);
+	else
+		pr_debug("Vph config. poll:%d high_uv:%d(%s) low_uv:%d(%s)\n",
+		    bcl->btm_vph_adc_param.timer_interval,
+		    bcl->btm_vph_adc_param.high_thr,
+		    (bcl->btm_vph_adc_param.state_request ==
+			ADC_TM_HIGH_THR_ENABLE) ? "enabled" : "disabled",
+		    bcl->btm_vph_adc_param.low_thr,
+		    (bcl->btm_vph_adc_param.state_request ==
+			ADC_TM_LOW_THR_ENABLE) ? "enabled" : "disabled");
+
+	return ret;
+}
+
+static int current_to_voltage(struct bcl_context *bcl, int ua)
+{
+	return DIV_ROUND_CLOSEST(ua * bcl->btm_uv_to_ua_denominator,
+			bcl->btm_uv_to_ua_numerator);
+}
+
+static int voltage_to_current(struct bcl_context *bcl, int uv)
+{
+	return DIV_ROUND_CLOSEST(uv * bcl->btm_uv_to_ua_numerator,
+			bcl->btm_uv_to_ua_denominator);
+}
+
+static int adc_time_to_uSec(struct bcl_context *bcl,
+		enum qpnp_adc_meas_timer_1 t)
+{
+	return adc_timer_val_usec[t];
+}
+
+static int uSec_to_adc_time(struct bcl_context *bcl, int us)
+{
+	int i;
+
+	for (i = ARRAY_SIZE(adc_timer_val_usec) - 1;
+		i >= 0 && adc_timer_val_usec[i] > us; i--)
+		;
+
+	/* disallow continuous mode */
+	if (i <= 0)
+		return -EINVAL;
+
+	return i;
+}
+
+static int vph_disable(void)
+{
+	int ret = 0;
+
+	ret = qpnp_adc_tm_disable_chan_meas(gbcl->btm_adc_tm_dev,
+			&gbcl->btm_vph_adc_param);
+	if (ret) {
+		pr_err("Error disabling ADC. err:%d\n", ret);
+		gbcl->bcl_mode = BCL_DEVICE_ENABLED;
+		gbcl->btm_mode = BCL_VPH_MONITOR_MODE;
+		goto vph_disable_exit;
+	}
+	bcl_vph_notify(BCL_THRESHOLD_DISABLED);
+	gbcl->btm_mode = BCL_MONITOR_DISABLED;
+
+vph_disable_exit:
+	return ret;
+}
+
+static int ibat_disable(void)
+{
+	int ret = 0;
+
+	ret = qpnp_adc_tm_disable_chan_meas(gbcl->btm_adc_tm_dev,
+			&gbcl->btm_ibat_adc_param);
+	if (ret) {
+		pr_err("Error disabling ADC. err:%d\n", ret);
+		gbcl->bcl_mode = BCL_DEVICE_ENABLED;
+		gbcl->btm_mode = BCL_IBAT_MONITOR_MODE;
+		goto ibat_disable_exit;
+	}
+	bcl_ibat_notify(BCL_THRESHOLD_DISABLED);
+
+ibat_disable_exit:
+	return ret;
+}
+
+static void bcl_periph_ibat_notify(enum bcl_trip_type type, int trip_temp,
+	void *data)
+{
+	if (type == BCL_HIGH_TRIP)
+		bcl_ibat_notify(BCL_HIGH_THRESHOLD);
+	else
+		bcl_ibat_notify(BCL_LOW_THRESHOLD);
+}
+
+static void bcl_periph_vbat_notify(enum bcl_trip_type type, int trip_temp,
+		void *data)
+{
+	if (type == BCL_HIGH_TRIP)
+		bcl_vph_notify(BCL_HIGH_THRESHOLD);
+	else
+		bcl_vph_notify(BCL_LOW_THRESHOLD);
+}
+
+static void bcl_periph_mode_set(enum bcl_device_mode mode)
+{
+	int ret = 0;
+
+	if (mode == BCL_DEVICE_ENABLED) {
+		/*
+		 * Power supply monitor wont send a callback till the
+		 * power state changes. Make sure we read the current SoC
+		 * and mitigate.
+		 */
+		get_and_evaluate_battery_soc();
+		ret = power_supply_reg_notifier(&gbcl->psy_nb);
+		if (ret < 0) {
+			pr_err("Unable to register soc notifier rc = %d\n",
+				ret);
+			return;
+		}
+		ret = msm_bcl_set_threshold(BCL_PARAM_CURRENT, BCL_HIGH_TRIP,
+			&gbcl->ibat_high_thresh);
+		if (ret) {
+			pr_err("Error setting Ibat high threshold. err:%d\n",
+				ret);
+			return;
+		}
+		ret = msm_bcl_set_threshold(BCL_PARAM_CURRENT, BCL_LOW_TRIP,
+			&gbcl->ibat_low_thresh);
+		if (ret) {
+			pr_err("Error setting Ibat low threshold. err:%d\n",
+				ret);
+			return;
+		}
+		ret = msm_bcl_set_threshold(BCL_PARAM_VOLTAGE, BCL_LOW_TRIP,
+			 &gbcl->vbat_low_thresh);
+		if (ret) {
+			pr_err("Error setting Vbat low threshold. err:%d\n",
+				ret);
+			return;
+		}
+		ret = msm_bcl_set_threshold(BCL_PARAM_VOLTAGE, BCL_HIGH_TRIP,
+			 &gbcl->vbat_high_thresh);
+		if (ret) {
+			pr_err("Error setting Vbat high threshold. err:%d\n",
+				ret);
+			return;
+		}
+		ret = msm_bcl_enable();
+		if (ret) {
+			pr_err("Error enabling BCL\n");
+			return;
+		}
+		gbcl->btm_mode = BCL_VPH_MONITOR_MODE;
+	} else {
+		power_supply_unreg_notifier(&gbcl->psy_nb);
+		ret = msm_bcl_disable();
+		if (ret) {
+			pr_err("Error disabling BCL\n");
+			return;
+		}
+		gbcl->btm_mode = BCL_MONITOR_DISABLED;
+		bcl_soc_state = BCL_THRESHOLD_DISABLED;
+		bcl_vph_notify(BCL_HIGH_THRESHOLD);
+		bcl_ibat_notify(BCL_LOW_THRESHOLD);
+		bcl_handle_hotplug(NULL);
+	}
+}
+
+static void ibat_mode_set(enum bcl_device_mode mode)
+{
+	int ret = 0;
+
+	if (mode == BCL_DEVICE_ENABLED) {
+		gbcl->btm_mode = BCL_VPH_MONITOR_MODE;
+		ret = bcl_config_vph_adc(gbcl, BCL_LOW_THRESHOLD_TYPE);
+		if (ret) {
+			pr_err("Vph config error. ret:%d\n", ret);
+			gbcl->bcl_mode = BCL_DEVICE_DISABLED;
+			gbcl->btm_mode = BCL_MONITOR_DISABLED;
+			return;
+		}
+	} else {
+		switch (gbcl->btm_mode) {
+		case BCL_IBAT_MONITOR_MODE:
+		case BCL_IBAT_HIGH_LOAD_MODE:
+			ret = ibat_disable();
+			if (ret)
+				return;
+			ret = vph_disable();
+			if (ret)
+				return;
+			break;
+		case BCL_VPH_MONITOR_MODE:
+			ret = vph_disable();
+			if (ret)
+				return;
+			break;
+		case BCL_MONITOR_DISABLED:
+		default:
+			break;
+		}
+		gbcl->btm_mode = BCL_MONITOR_DISABLED;
+	}
+}
+
+static void bcl_vph_notification(enum qpnp_tm_state state, void *ctx)
+{
+	struct bcl_context *bcl = ctx;
+	int ret = 0;
+
+	mutex_lock(&bcl_notify_mutex);
+	if (bcl->btm_mode == BCL_MONITOR_DISABLED)
+		goto unlock_and_exit;
+
+	switch (state) {
+	case ADC_TM_LOW_STATE:
+		if (bcl->btm_mode != BCL_VPH_MONITOR_MODE) {
+			pr_err("Low thresh received with invalid btm mode:%d\n",
+				bcl->btm_mode);
+			ibat_mode_set(BCL_DEVICE_DISABLED);
+			goto unlock_and_exit;
+		}
+		pr_debug("Initiating Ibat current monitoring\n");
+		bcl_vph_notify(BCL_LOW_THRESHOLD);
+		bcl_config_ibat_adc(gbcl, BCL_HIGH_THRESHOLD_TYPE);
+		bcl_config_vph_adc(gbcl, BCL_HIGH_THRESHOLD_TYPE);
+		bcl->btm_mode = BCL_IBAT_MONITOR_MODE;
+		break;
+	case ADC_TM_HIGH_STATE:
+		if (bcl->btm_mode != BCL_IBAT_MONITOR_MODE
+			&& bcl->btm_mode != BCL_IBAT_HIGH_LOAD_MODE) {
+			pr_err("High thresh received with invalid btm mode:%d\n"
+				, bcl->btm_mode);
+			ibat_mode_set(BCL_DEVICE_DISABLED);
+			goto unlock_and_exit;
+		}
+		pr_debug("Exiting Ibat current monitoring\n");
+		bcl->btm_mode = BCL_VPH_MONITOR_MODE;
+		ret = ibat_disable();
+		if (ret) {
+			pr_err("Error disabling ibat ADC. err:%d\n", ret);
+			goto unlock_and_exit;
+		}
+		bcl_vph_notify(BCL_HIGH_THRESHOLD);
+		bcl_config_vph_adc(gbcl, BCL_LOW_THRESHOLD_TYPE);
+		break;
+	default:
+		goto set_thresh;
+	}
+unlock_and_exit:
+	mutex_unlock(&bcl_notify_mutex);
+	return;
+
+set_thresh:
+	mutex_unlock(&bcl_notify_mutex);
+	bcl_config_vph_adc(gbcl, BCL_HIGH_THRESHOLD_TYPE);
+}
+
+/*
+ * Set BCL mode
+ */
+static void bcl_mode_set(enum bcl_device_mode mode)
+{
+	if (!gbcl)
+		return;
+	if (gbcl->bcl_mode == mode)
+		return;
+
+	gbcl->bcl_mode = mode;
+	switch (gbcl->bcl_monitor_type) {
+	case BCL_IAVAIL_MONITOR_TYPE:
+		if (mode == BCL_DEVICE_ENABLED)
+			schedule_delayed_work(&gbcl->bcl_iavail_work, 0);
+		else
+			cancel_delayed_work_sync(&(gbcl->bcl_iavail_work));
+		break;
+	case BCL_IBAT_MONITOR_TYPE:
+		ibat_mode_set(mode);
+		break;
+	case BCL_IBAT_PERIPH_MONITOR_TYPE:
+		bcl_periph_mode_set(mode);
+		break;
+	default:
+		pr_err("Invalid monitor type:%d\n", gbcl->bcl_monitor_type);
+		break;
+	}
+}
+
+#define show_bcl(name, variable, format) \
+static ssize_t \
+name##_show(struct device *dev, struct device_attribute *attr, char *buf) \
+{ \
+	if (gbcl) \
+		return snprintf(buf, PAGE_SIZE, format, variable); \
+	else \
+		return  -EPERM; \
+}
+
+show_bcl(type, gbcl->bcl_type, "%s\n")
+show_bcl(vbat, gbcl->bcl_vbat_mv, "%d\n")
+show_bcl(rbat, gbcl->bcl_rbat_mohm, "%d\n")
+show_bcl(iavail, gbcl->bcl_iavail, "%d\n")
+show_bcl(vbat_min, gbcl->bcl_vbat_min, "%d\n")
+show_bcl(poll_interval, gbcl->bcl_poll_interval_msec, "%d\n")
+show_bcl(high_ua, (gbcl->bcl_monitor_type == BCL_IBAT_MONITOR_TYPE) ?
+	voltage_to_current(gbcl, gbcl->btm_high_threshold_uv)
+	: gbcl->ibat_high_thresh.trip_value, "%d\n")
+show_bcl(low_ua, (gbcl->bcl_monitor_type == BCL_IBAT_MONITOR_TYPE) ?
+	voltage_to_current(gbcl, gbcl->btm_low_threshold_uv)
+	: gbcl->ibat_low_thresh.trip_value, "%d\n")
+show_bcl(adc_interval_us, (gbcl->bcl_monitor_type == BCL_IBAT_MONITOR_TYPE) ?
+	adc_time_to_uSec(gbcl, gbcl->btm_adc_interval) : 0, "%d\n")
+show_bcl(freq_max, (gbcl->bcl_monitor_type == BCL_IBAT_MONITOR_TYPE) ?
+	gbcl->btm_freq_max : gbcl->bcl_p_freq_max, "%u\n")
+show_bcl(vph_high, (gbcl->bcl_monitor_type == BCL_IBAT_MONITOR_TYPE) ?
+	gbcl->btm_vph_high_thresh : gbcl->vbat_high_thresh.trip_value, "%d\n")
+show_bcl(vph_low, (gbcl->bcl_monitor_type == BCL_IBAT_MONITOR_TYPE) ?
+	gbcl->btm_vph_low_thresh : gbcl->vbat_low_thresh.trip_value, "%d\n")
+show_bcl(freq_limit, gbcl->thermal_freq_limit, "%u\n")
+show_bcl(vph_state, bcl_vph_state, "%d\n")
+show_bcl(ibat_state, bcl_ibat_state, "%d\n")
+show_bcl(hotplug_mask, bcl_hotplug_mask, "%d\n")
+show_bcl(hotplug_soc_mask, bcl_soc_hotplug_mask, "%d\n")
+show_bcl(hotplug_status, bcl_hotplug_request, "%d\n")
+show_bcl(soc_low_thresh, soc_low_threshold, "%d\n")
+
+static ssize_t
+mode_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	if (!gbcl)
+		return -EPERM;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+		gbcl->bcl_mode == BCL_DEVICE_ENABLED ? "enabled"
+			: "disabled");
+}
+
+static ssize_t
+mode_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	if (!gbcl)
+		return -EPERM;
+
+	if (!strcmp(buf, "enable")) {
+		bcl_mode_set(BCL_DEVICE_ENABLED);
+		pr_info("bcl enabled\n");
+	} else if (!strcmp(buf, "disable")) {
+		bcl_mode_set(BCL_DEVICE_DISABLED);
+		pr_info("bcl disabled\n");
+	} else {
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static ssize_t
+poll_interval_store(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	int value = 0, ret = 0;
+
+	if (!gbcl)
+		return -EPERM;
+
+	ret = kstrtoint(buf, 10, &value);
+	if (!ret)
+		return ret;
+
+	if (value < MIN_BCL_POLL_INTERVAL)
+		return -EINVAL;
+
+	gbcl->bcl_poll_interval_msec = value;
+
+	return count;
+}
+
+static ssize_t vbat_min_store(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	int value = 0;
+	int ret = 0;
+
+	if (!gbcl)
+		return -EPERM;
+
+	ret = kstrtoint(buf, 10, &value);
+
+	if (ret || (value < 0)) {
+		pr_err("Incorrect vbatt min value\n");
+		return -EINVAL;
+	}
+
+	gbcl->bcl_vbat_min = value;
+	return count;
+}
+
+static ssize_t iavail_low_threshold_mode_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	if (!gbcl)
+		return -EPERM;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+		gbcl->bcl_threshold_mode[BCL_LOW_THRESHOLD_TYPE]
+		== BCL_IAVAIL_THRESHOLD_ENABLED ? "enabled" : "disabled");
+}
+
+static ssize_t iavail_low_threshold_mode_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	if (!gbcl)
+		return -EPERM;
+
+	if (!strcmp(buf, "enable"))
+		gbcl->bcl_threshold_mode[BCL_LOW_THRESHOLD_TYPE]
+			= BCL_IAVAIL_THRESHOLD_ENABLED;
+	else if (!strcmp(buf, "disable"))
+		gbcl->bcl_threshold_mode[BCL_LOW_THRESHOLD_TYPE]
+			= BCL_IAVAIL_THRESHOLD_DISABLED;
+	else
+		return -EINVAL;
+
+	return count;
+}
+static ssize_t iavail_high_threshold_mode_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	if (!gbcl)
+		return -EPERM;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+		gbcl->bcl_threshold_mode[BCL_HIGH_THRESHOLD_TYPE]
+		== BCL_IAVAIL_THRESHOLD_ENABLED ? "enabled" : "disabled");
+}
+
+static ssize_t iavail_high_threshold_mode_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	if (!gbcl)
+		return -EPERM;
+
+	if (!strcmp(buf, "enable"))
+		gbcl->bcl_threshold_mode[BCL_HIGH_THRESHOLD_TYPE]
+			= BCL_IAVAIL_THRESHOLD_ENABLED;
+	else if (!strcmp(buf, "disable"))
+		gbcl->bcl_threshold_mode[BCL_HIGH_THRESHOLD_TYPE]
+			= BCL_IAVAIL_THRESHOLD_DISABLED;
+	else
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t iavail_low_threshold_value_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	if (!gbcl)
+		return -EPERM;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+		gbcl->bcl_threshold_value_ma[BCL_LOW_THRESHOLD_TYPE]);
+}
+
+
+static ssize_t iavail_low_threshold_value_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	int val = 0;
+	int ret = 0;
+
+	if (!gbcl)
+		return -EPERM;
+
+	ret = kstrtoint(buf, 10, &val);
+
+	if (ret || (val < 0)) {
+		pr_err("Incorrect available current threshold value\n");
+		return -EINVAL;
+	}
+
+	gbcl->bcl_threshold_value_ma[BCL_LOW_THRESHOLD_TYPE] = val;
+
+	return count;
+}
+static ssize_t iavail_high_threshold_value_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	if (!gbcl)
+		return -EPERM;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+		gbcl->bcl_threshold_value_ma[BCL_HIGH_THRESHOLD_TYPE]);
+}
+
+static ssize_t iavail_high_threshold_value_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	int val = 0;
+	int ret = 0;
+
+	if (!gbcl)
+		return -EPERM;
+	ret = kstrtoint(buf, 10, &val);
+
+	if (ret || (val < 0)) {
+		pr_err("Incorrect available current threshold value\n");
+		return -EINVAL;
+	}
+
+	gbcl->bcl_threshold_value_ma[BCL_HIGH_THRESHOLD_TYPE] = val;
+
+	return count;
+}
+
+static int convert_to_int(const char *buf, int *val)
+{
+	int ret = 0;
+
+	if (!gbcl)
+		return -EPERM;
+	if (gbcl->bcl_mode != BCL_DEVICE_DISABLED) {
+		pr_err("BCL is not disabled\n");
+			return -EINVAL;
+	}
+
+	ret = kstrtoint(buf, 10, val);
+	if (ret || (*val < 0)) {
+		pr_err("Invalid high threshold %s val:%d ret:%d\n", buf, *val,
+			ret);
+			return -EINVAL;
+	}
+
+	return ret;
+}
+
+static ssize_t high_ua_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	int val = 0;
+	int ret = 0;
+
+	ret = convert_to_int(buf, &val);
+	if (ret)
+		return ret;
+
+	if (gbcl->bcl_monitor_type == BCL_IBAT_MONITOR_TYPE)
+		gbcl->btm_high_threshold_uv = current_to_voltage(gbcl, val);
+	else
+		gbcl->ibat_high_thresh.trip_value = val;
+
+	return count;
+}
+
+static ssize_t low_ua_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	int val = 0;
+	int ret = 0;
+
+	ret = convert_to_int(buf, &val);
+	if (ret)
+		return ret;
+
+	if (gbcl->bcl_monitor_type == BCL_IBAT_MONITOR_TYPE)
+		gbcl->btm_low_threshold_uv = current_to_voltage(gbcl, val);
+	else
+		gbcl->ibat_low_thresh.trip_value = val;
+
+	return count;
+}
+
+static ssize_t freq_max_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	int val = 0;
+	int ret = 0;
+	uint32_t *freq_lim = NULL;
+
+	ret = convert_to_int(buf, &val);
+	if (ret)
+		return ret;
+	freq_lim = (gbcl->bcl_monitor_type == BCL_IBAT_MONITOR_TYPE) ?
+			&gbcl->btm_freq_max : &gbcl->bcl_p_freq_max;
+	*freq_lim = max_t(uint32_t, val, gbcl->thermal_freq_limit);
+
+	return count;
+}
+
+static ssize_t vph_low_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	int val = 0;
+	int ret = 0;
+	int *thresh = NULL;
+
+	ret = convert_to_int(buf, &val);
+	if (ret)
+		return ret;
+
+	thresh = (gbcl->bcl_monitor_type == BCL_IBAT_MONITOR_TYPE)
+			? (int *)&gbcl->btm_vph_low_thresh
+			: &gbcl->vbat_low_thresh.trip_value;
+	*thresh = val;
+
+	return count;
+}
+
+static ssize_t vph_high_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	int val = 0;
+	int ret = 0;
+	int *thresh = NULL;
+
+	ret = convert_to_int(buf, &val);
+	if (ret)
+		return ret;
+
+	thresh = (gbcl->bcl_monitor_type == BCL_IBAT_MONITOR_TYPE)
+			? (int *)&gbcl->btm_vph_high_thresh
+			: &gbcl->vbat_high_thresh.trip_value;
+	*thresh = val;
+
+	return count;
+}
+
+static ssize_t hotplug_mask_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	int ret = 0, val = 0;
+
+	ret = convert_to_int(buf, &val);
+	if (ret)
+		return ret;
+
+	bcl_hotplug_mask = val;
+	pr_info("bcl hotplug mask updated to %d\n", bcl_hotplug_mask);
+
+	if (!bcl_hotplug_mask && !bcl_soc_hotplug_mask)
+		bcl_hotplug_enabled = false;
+	else
+		bcl_hotplug_enabled = true;
+
+	return count;
+}
+
+static ssize_t hotplug_soc_mask_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	int ret = 0, val = 0;
+
+	ret = convert_to_int(buf, &val);
+	if (ret)
+		return ret;
+
+	bcl_soc_hotplug_mask = val;
+	pr_info("bcl soc hotplug mask updated to %d\n", bcl_soc_hotplug_mask);
+
+	if (!bcl_hotplug_mask && !bcl_soc_hotplug_mask)
+		bcl_hotplug_enabled = false;
+	else
+		bcl_hotplug_enabled = true;
+
+	return count;
+}
+
+static ssize_t soc_low_thresh_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int val = 0;
+	int ret = 0;
+
+	ret = convert_to_int(buf, &val);
+	if (ret)
+		return ret;
+
+	soc_low_threshold = val;
+	pr_info("bcl soc low threshold updated to %d\n", soc_low_threshold);
+
+	return count;
+}
+
+/*
+ * BCL device attributes
+ */
+static struct device_attribute bcl_dev_attr[] = {
+	__ATTR(type, 0444, type_show, NULL),
+	__ATTR(iavail, 0444, iavail_show, NULL),
+	__ATTR(vbat_min, 0644, vbat_min_show, vbat_min_store),
+	__ATTR(vbat, 0444, vbat_show, NULL),
+	__ATTR(rbat, 0444, rbat_show, NULL),
+	__ATTR(mode, 0644, mode_show, mode_store),
+	__ATTR(poll_interval, 0644,
+		poll_interval_show, poll_interval_store),
+	__ATTR(iavail_low_threshold_mode, 0644,
+		iavail_low_threshold_mode_show,
+		iavail_low_threshold_mode_store),
+	__ATTR(iavail_high_threshold_mode, 0644,
+		iavail_high_threshold_mode_show,
+		iavail_high_threshold_mode_store),
+	__ATTR(iavail_low_threshold_value, 0644,
+		iavail_low_threshold_value_show,
+		iavail_low_threshold_value_store),
+	__ATTR(iavail_high_threshold_value, 0644,
+		iavail_high_threshold_value_show,
+		iavail_high_threshold_value_store),
+};
+
+static struct device_attribute btm_dev_attr[] = {
+	__ATTR(type, 0444, type_show, NULL),
+	__ATTR(mode, 0644, mode_show, mode_store),
+	__ATTR(vph_state, 0444, vph_state_show, NULL),
+	__ATTR(ibat_state, 0444, ibat_state_show, NULL),
+	__ATTR(high_threshold_ua, 0644, high_ua_show, high_ua_store),
+	__ATTR(low_threshold_ua, 0644, low_ua_show, low_ua_store),
+	__ATTR(adc_interval_us, 0444, adc_interval_us_show, NULL),
+	__ATTR(freq_max, 0644, freq_max_show, freq_max_store),
+	__ATTR(vph_high_thresh_uv, 0644, vph_high_show, vph_high_store),
+	__ATTR(vph_low_thresh_uv, 0644, vph_low_show, vph_low_store),
+	__ATTR(thermal_freq_limit, 0444, freq_limit_show, NULL),
+	__ATTR(hotplug_status, 0444, hotplug_status_show, NULL),
+	__ATTR(hotplug_mask, 0644, hotplug_mask_show, hotplug_mask_store),
+	__ATTR(hotplug_soc_mask, 0644, hotplug_soc_mask_show,
+		hotplug_soc_mask_store),
+	__ATTR(soc_low_thresh, 0644, soc_low_thresh_show, soc_low_thresh_store),
+};
+
+static int create_bcl_sysfs(struct bcl_context *bcl)
+{
+	int result = 0, num_attr = 0, i;
+	struct device_attribute *attr_ptr = NULL;
+
+	switch (bcl->bcl_monitor_type) {
+	case BCL_IAVAIL_MONITOR_TYPE:
+		num_attr = sizeof(bcl_dev_attr)/sizeof(struct device_attribute);
+		attr_ptr = bcl_dev_attr;
+		break;
+	case BCL_IBAT_MONITOR_TYPE:
+	case BCL_IBAT_PERIPH_MONITOR_TYPE:
+		num_attr = sizeof(btm_dev_attr)/sizeof(struct device_attribute);
+		attr_ptr = btm_dev_attr;
+		break;
+	default:
+		pr_err("Invalid monitor type:%d\n", bcl->bcl_monitor_type);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num_attr; i++) {
+		result = device_create_file(bcl->dev, &attr_ptr[i]);
+		if (result < 0)
+			return result;
+	}
+
+	return result;
+}
+
+static void remove_bcl_sysfs(struct bcl_context *bcl)
+{
+	int num_attr = 0, i;
+	struct device_attribute *attr_ptr = NULL;
+
+	switch (bcl->bcl_monitor_type) {
+	case BCL_IAVAIL_MONITOR_TYPE:
+		num_attr = sizeof(bcl_dev_attr)/sizeof(struct device_attribute);
+		attr_ptr = bcl_dev_attr;
+		break;
+	case BCL_IBAT_MONITOR_TYPE:
+		num_attr = sizeof(btm_dev_attr)/sizeof(struct device_attribute);
+		attr_ptr = btm_dev_attr;
+		break;
+	default:
+		pr_err("Invalid monitor type:%d\n", bcl->bcl_monitor_type);
+		return;
+	}
+
+	for (i = 0; i < num_attr; i++)
+		device_remove_file(bcl->dev, &attr_ptr[i]);
+}
+
+static int bcl_config_ibat_adc(struct bcl_context *bcl,
+		enum bcl_iavail_threshold_type thresh_type)
+{
+	int ret = 0;
+
+	if (bcl->bcl_mode == BCL_DEVICE_DISABLED
+		|| bcl->bcl_monitor_type != BCL_IBAT_MONITOR_TYPE)
+		return -EINVAL;
+
+	switch (thresh_type) {
+	case BCL_HIGH_THRESHOLD_TYPE:
+		bcl->btm_ibat_adc_param.state_request = ADC_TM_HIGH_THR_ENABLE;
+		break;
+	case BCL_LOW_THRESHOLD_TYPE:
+		bcl->btm_ibat_adc_param.state_request = ADC_TM_LOW_THR_ENABLE;
+		break;
+	default:
+		pr_err("Invalid threshold type:%d\n", thresh_type);
+		return -EINVAL;
+	}
+
+	bcl->btm_ibat_adc_param.low_thr = bcl->btm_low_threshold_uv;
+	bcl->btm_ibat_adc_param.high_thr = bcl->btm_high_threshold_uv;
+	bcl->btm_ibat_adc_param.timer_interval = bcl->btm_adc_interval;
+	bcl->btm_ibat_adc_param.btm_ctx = bcl;
+	bcl->btm_ibat_adc_param.threshold_notification = bcl_ibat_notification;
+	bcl->btm_ibat_adc_param.channel = bcl->btm_ibat_chan;
+
+	ret = qpnp_adc_tm_channel_measure(bcl->btm_adc_tm_dev,
+			&bcl->btm_ibat_adc_param);
+	if (ret < 0)
+		pr_err("Error configuring BTM. ret:%d\n", ret);
+	else
+		pr_debug("BTM config. poll:%d high_uv:%d(%s) low_uv:%d(%s)\n",
+		    bcl->btm_adc_interval,
+		    bcl->btm_ibat_adc_param.high_thr,
+		    (bcl->btm_ibat_adc_param.state_request ==
+			ADC_TM_HIGH_THR_ENABLE) ? "enabled" : "disabled",
+		    bcl->btm_ibat_adc_param.low_thr,
+		    (bcl->btm_ibat_adc_param.state_request ==
+			ADC_TM_LOW_THR_ENABLE) ? "enabled" : "disabled");
+	return ret;
+}
+
+static void bcl_ibat_notification(enum qpnp_tm_state state, void *ctx)
+{
+	struct bcl_context *bcl = ctx;
+	int ret = 0;
+
+	mutex_lock(&bcl_notify_mutex);
+	if (bcl->btm_mode == BCL_MONITOR_DISABLED ||
+		bcl->btm_mode == BCL_VPH_MONITOR_MODE)
+		goto unlock_and_return;
+
+	switch (state) {
+	case ADC_TM_LOW_STATE:
+		if (bcl->btm_mode != BCL_IBAT_HIGH_LOAD_MODE)
+			goto set_ibat_threshold;
+		pr_debug("ibat low load enter\n");
+		bcl->btm_mode = BCL_IBAT_MONITOR_MODE;
+		bcl_ibat_notify(BCL_LOW_THRESHOLD);
+		break;
+	case ADC_TM_HIGH_STATE:
+		if (bcl->btm_mode != BCL_IBAT_MONITOR_MODE)
+			goto set_ibat_threshold;
+		pr_debug("ibat high load enter\n");
+		bcl->btm_mode = BCL_IBAT_HIGH_LOAD_MODE;
+		bcl_ibat_notify(BCL_HIGH_THRESHOLD);
+		break;
+	default:
+		pr_err("Invalid threshold state:%d\n", state);
+		bcl_config_ibat_adc(bcl, BCL_HIGH_THRESHOLD_TYPE);
+		goto unlock_and_return;
+	}
+
+set_ibat_threshold:
+	ret = bcl_config_ibat_adc(bcl, (state == ADC_TM_LOW_STATE) ?
+		BCL_HIGH_THRESHOLD_TYPE : BCL_LOW_THRESHOLD_TYPE);
+	if (ret < 0)
+		pr_err("Error configuring %s thresh. err:%d\n",
+			(state == ADC_TM_LOW_STATE) ? "high" : "low", ret);
+unlock_and_return:
+	mutex_unlock(&bcl_notify_mutex);
+}
+
+static int bcl_suspend(struct device *dev)
+{
+	int ret = 0;
+	struct bcl_context *bcl = dev_get_drvdata(dev);
+
+	if (bcl->bcl_monitor_type == BCL_IBAT_MONITOR_TYPE &&
+		bcl->bcl_mode == BCL_DEVICE_ENABLED) {
+		switch (bcl->btm_mode) {
+		case BCL_IBAT_MONITOR_MODE:
+		case BCL_IBAT_HIGH_LOAD_MODE:
+			ret = ibat_disable();
+			if (!ret)
+				vph_disable();
+			break;
+		case BCL_VPH_MONITOR_MODE:
+			vph_disable();
+			break;
+		case BCL_MONITOR_DISABLED:
+		default:
+			break;
+		}
+	}
+	return 0;
+}
+
+static int bcl_resume(struct device *dev)
+{
+	struct bcl_context *bcl = dev_get_drvdata(dev);
+
+	if (bcl->bcl_monitor_type == BCL_IBAT_MONITOR_TYPE &&
+		bcl->bcl_mode == BCL_DEVICE_ENABLED) {
+		bcl->btm_mode = BCL_VPH_MONITOR_MODE;
+		bcl_config_vph_adc(bcl, BCL_LOW_THRESHOLD_TYPE);
+	}
+	return 0;
+}
+
+static void get_vdd_rstr_freq(struct bcl_context *bcl,
+				struct device_node *ibat_node)
+{
+	int ret = 0;
+	struct device_node *phandle = NULL;
+	char *key = NULL;
+
+	key = "qcom,thermal-handle";
+	phandle = of_parse_phandle(ibat_node, key, 0);
+	if (!phandle) {
+		pr_err("Thermal handle not present\n");
+		ret = -ENODEV;
+		goto vdd_rstr_exit;
+	}
+	key = "qcom,levels";
+	ret = of_property_read_u32_index(phandle, key, 0,
+					&bcl->thermal_freq_limit);
+	if (ret) {
+		pr_err("Error reading property %s. ret:%d\n", key, ret);
+		goto vdd_rstr_exit;
+	}
+
+vdd_rstr_exit:
+	if (ret)
+		bcl->thermal_freq_limit = BTM_8084_FREQ_MITIG_LIMIT;
+}
+
+static int probe_bcl_periph_prop(struct bcl_context *bcl)
+{
+	int ret = 0;
+	struct device_node *ibat_node = NULL, *dev_node = bcl->dev->of_node;
+	char *key = NULL;
+
+	key = "qcom,ibat-monitor";
+	ibat_node = of_find_node_by_name(dev_node, key);
+	if (!ibat_node) {
+		ret = -ENODEV;
+		goto ibat_probe_exit;
+	}
+
+	BCL_FETCH_DT_U32(ibat_node, key, "qcom,low-threshold-uamp", ret,
+		bcl->ibat_low_thresh.trip_value);
+	if (ret)
+		goto ibat_probe_exit;
+	BCL_FETCH_DT_U32(ibat_node, key, "qcom,high-threshold-uamp", ret,
+		bcl->ibat_high_thresh.trip_value);
+	if (ret)
+		goto ibat_probe_exit;
+
+	BCL_FETCH_DT_U32(ibat_node, key, "qcom,vph-high-threshold-uv", ret,
+		bcl->vbat_high_thresh.trip_value);
+	if (ret)
+		goto ibat_probe_exit;
+	BCL_FETCH_DT_U32(ibat_node, key, "qcom,vph-low-threshold-uv", ret,
+		bcl->vbat_low_thresh.trip_value);
+	if (ret)
+		goto ibat_probe_exit;
+	BCL_FETCH_DT_U32(ibat_node, key, "qcom,soc-low-threshold", ret,
+		soc_low_threshold);
+	if (ret)
+		goto ibat_probe_exit;
+
+	bcl->vbat_high_thresh.trip_notify
+		= bcl->vbat_low_thresh.trip_notify = bcl_periph_vbat_notify;
+	bcl->vbat_high_thresh.trip_data
+		= bcl->vbat_low_thresh.trip_data = (void *) bcl;
+	bcl->ibat_high_thresh.trip_notify
+		= bcl->ibat_low_thresh.trip_notify = bcl_periph_ibat_notify;
+	bcl->ibat_high_thresh.trip_data
+		= bcl->ibat_low_thresh.trip_data = (void *) bcl;
+
+	if (bcl_frequency_mask) {
+		BCL_FETCH_DT_U32(ibat_node, key, "qcom,mitigation-freq-khz",
+			ret, bcl->bcl_p_freq_max);
+		if (ret)
+			goto ibat_probe_exit;
+		get_vdd_rstr_freq(bcl, ibat_node);
+	} else {
+		bcl->bcl_p_freq_max = UINT_MAX;
+		bcl->thermal_freq_limit = 0;
+	}
+
+	bcl->bcl_p_freq_max = max(bcl->bcl_p_freq_max, bcl->thermal_freq_limit);
+
+	bcl->btm_mode = BCL_MONITOR_DISABLED;
+	bcl->bcl_monitor_type = BCL_IBAT_PERIPH_MONITOR_TYPE;
+	snprintf(bcl->bcl_type, BCL_NAME_LENGTH, "%s",
+			bcl_type[BCL_IBAT_PERIPH_MONITOR_TYPE]);
+
+ibat_probe_exit:
+	if (ret && ret != -EPROBE_DEFER)
+		dev_info(bcl->dev, "%s:%s Error reading key:%s. ret = %d\n",
+				KBUILD_MODNAME, __func__, key, ret);
+
+	return ret;
+}
+
+static int probe_btm_properties(struct bcl_context *bcl)
+{
+	int ret = 0, curr_ua = 0;
+	int adc_interval_us;
+	struct device_node *ibat_node = NULL, *dev_node = bcl->dev->of_node;
+	char *key = NULL;
+
+	key = "qcom,ibat-monitor";
+	ibat_node = of_find_node_by_name(dev_node, key);
+	if (!ibat_node) {
+		ret = -ENODEV;
+		goto btm_probe_exit;
+	}
+
+	key = "qcom,uv-to-ua-numerator";
+	ret = of_property_read_u32(ibat_node, key,
+			&bcl->btm_uv_to_ua_numerator);
+	if (ret < 0)
+		goto btm_probe_exit;
+
+	key = "qcom,uv-to-ua-denominator";
+	ret = of_property_read_u32(ibat_node, key,
+			&bcl->btm_uv_to_ua_denominator);
+	if (ret < 0)
+		goto btm_probe_exit;
+
+	key = "qcom,low-threshold-uamp";
+	ret = of_property_read_u32(ibat_node, key, &curr_ua);
+	if (ret < 0)
+		goto btm_probe_exit;
+	bcl->btm_low_threshold_uv = current_to_voltage(bcl, curr_ua);
+
+	key = "qcom,high-threshold-uamp";
+	ret = of_property_read_u32(ibat_node, key, &curr_ua);
+	if (ret < 0)
+		goto btm_probe_exit;
+	bcl->btm_high_threshold_uv = current_to_voltage(bcl, curr_ua);
+
+	key = "qcom,ibat-channel";
+	ret = of_property_read_u32(ibat_node, key, &bcl->btm_ibat_chan);
+	if (ret < 0)
+		goto btm_probe_exit;
+
+	key = "qcom,adc-interval-usec";
+	ret = of_property_read_u32(ibat_node, key, &adc_interval_us);
+	if (ret < 0)
+		goto btm_probe_exit;
+	bcl->btm_adc_interval = uSec_to_adc_time(bcl, adc_interval_us);
+
+	key = "qcom,vph-channel";
+	ret = of_property_read_u32(ibat_node, key, &bcl->btm_vph_chan);
+	if (ret < 0)
+		goto btm_probe_exit;
+
+	key = "qcom,vph-high-threshold-uv";
+	ret = of_property_read_u32(ibat_node, key, &bcl->btm_vph_high_thresh);
+	if (ret < 0)
+		goto btm_probe_exit;
+
+	key = "qcom,vph-low-threshold-uv";
+	ret = of_property_read_u32(ibat_node, key, &bcl->btm_vph_low_thresh);
+	if (ret < 0)
+		goto btm_probe_exit;
+
+	key = "ibat-threshold";
+	bcl->btm_adc_tm_dev = qpnp_get_adc_tm(bcl->dev, key);
+	if (IS_ERR(bcl->btm_adc_tm_dev)) {
+		ret = PTR_ERR(bcl->btm_adc_tm_dev);
+		goto btm_probe_exit;
+	}
+
+	key = "ibat";
+	bcl->btm_vadc_dev = qpnp_get_vadc(bcl->dev, key);
+	if (IS_ERR(bcl->btm_vadc_dev)) {
+		ret = PTR_ERR(bcl->btm_vadc_dev);
+		goto btm_probe_exit;
+	}
+
+	if (bcl_frequency_mask) {
+		key = "qcom,mitigation-freq-khz";
+		ret = of_property_read_u32(ibat_node, key, &bcl->btm_freq_max);
+		if (ret < 0)
+			goto btm_probe_exit;
+		get_vdd_rstr_freq(bcl, ibat_node);
+	} else {
+		bcl->btm_freq_max = UINT_MAX;
+		bcl->thermal_freq_limit = 0;
+	}
+	bcl->btm_freq_max = max(bcl->btm_freq_max, bcl->thermal_freq_limit);
+
+	bcl->btm_mode = BCL_MONITOR_DISABLED;
+	bcl->bcl_monitor_type = BCL_IBAT_MONITOR_TYPE;
+	snprintf(bcl->bcl_type, BCL_NAME_LENGTH, "%s",
+			bcl_type[BCL_IBAT_MONITOR_TYPE]);
+
+btm_probe_exit:
+	if (ret && ret != -EPROBE_DEFER)
+		dev_info(bcl->dev, "%s:%s Error reading key:%s. ret = %d\n",
+				KBUILD_MODNAME, __func__, key, ret);
+
+	return ret;
+}
+
+static uint32_t get_mask_from_core_handle(struct platform_device *pdev,
+						const char *key)
+{
+	struct device_node *core_phandle = NULL;
+	int i = 0, cpu = 0;
+	uint32_t mask = 0;
+
+	core_phandle = of_parse_phandle(pdev->dev.of_node,
+			key, i++);
+	while (core_phandle) {
+		for_each_possible_cpu(cpu) {
+			if (of_get_cpu_node(cpu, NULL) == core_phandle) {
+				mask |= BIT(cpu);
+				break;
+			}
+		}
+		core_phandle = of_parse_phandle(pdev->dev.of_node,
+			key, i++);
+	}
+
+	return mask;
+}
+
+static int bcl_probe(struct platform_device *pdev)
+{
+	struct bcl_context *bcl = NULL;
+	int ret = 0;
+	enum bcl_device_mode bcl_mode = BCL_DEVICE_DISABLED;
+	char cpu_str[MAX_CPU_NAME];
+	int cpu;
+
+	bcl = devm_kzalloc(&pdev->dev, sizeof(struct bcl_context), GFP_KERNEL);
+	if (!bcl)
+		return -ENOMEM;
+
+	/* For BCL */
+	/* Init default BCL params */
+	if (of_property_read_bool(pdev->dev.of_node, "qcom,bcl-enable"))
+		bcl_mode = BCL_DEVICE_ENABLED;
+	else
+		bcl_mode = BCL_DEVICE_DISABLED;
+	bcl->bcl_mode = BCL_DEVICE_DISABLED;
+	bcl->dev = &pdev->dev;
+	bcl->bcl_monitor_type = BCL_IAVAIL_MONITOR_TYPE;
+	bcl->bcl_threshold_mode[BCL_LOW_THRESHOLD_TYPE] =
+					BCL_IAVAIL_THRESHOLD_DISABLED;
+	bcl->bcl_threshold_mode[BCL_HIGH_THRESHOLD_TYPE] =
+					BCL_IAVAIL_THRESHOLD_DISABLED;
+	bcl->bcl_threshold_value_ma[BCL_LOW_THRESHOLD_TYPE] = 0;
+	bcl->bcl_threshold_value_ma[BCL_HIGH_THRESHOLD_TYPE] = 0;
+	bcl->bcl_vbat_min = BATTERY_VOLTAGE_MIN;
+	snprintf(bcl->bcl_type, BCL_NAME_LENGTH, "%s",
+			bcl_type[BCL_IAVAIL_MONITOR_TYPE]);
+	bcl->bcl_poll_interval_msec = BCL_POLL_INTERVAL;
+
+	if (of_property_read_bool(pdev->dev.of_node, "qcom,bcl-no-bms"))
+		bcl->bcl_no_bms = true;
+	else
+		bcl->bcl_no_bms = false;
+
+	bcl_frequency_mask = get_mask_from_core_handle(pdev,
+					 "qcom,bcl-freq-control-list");
+	bcl_hotplug_mask = get_mask_from_core_handle(pdev,
+					 "qcom,bcl-hotplug-list");
+	bcl_soc_hotplug_mask = get_mask_from_core_handle(pdev,
+					 "qcom,bcl-soc-hotplug-list");
+
+	if (!bcl_hotplug_mask && !bcl_soc_hotplug_mask)
+		bcl_hotplug_enabled = false;
+	else
+		bcl_hotplug_enabled = true;
+
+	if (of_property_read_bool(pdev->dev.of_node,
+		"qcom,bcl-framework-interface"))
+		ret = probe_bcl_periph_prop(bcl);
+	else
+		ret = probe_btm_properties(bcl);
+
+	if (ret == -EPROBE_DEFER)
+		return ret;
+	ret = create_bcl_sysfs(bcl);
+	if (ret < 0) {
+		pr_err("Cannot create bcl sysfs\n");
+		return ret;
+	}
+	INIT_WORK(&bcl->soc_mitig_work, soc_mitigate);
+	bcl->psy_nb.notifier_call = power_supply_callback;
+	bcl->bcl_hotplug_wq = alloc_workqueue("bcl_hotplug_wq",  WQ_HIGHPRI, 0);
+	if (!bcl->bcl_hotplug_wq) {
+		pr_err("Workqueue alloc failed\n");
+		return -ENOMEM;
+	}
+
+	/* Initialize mitigation KTM interface */
+	if (num_possible_cpus() > 1) {
+		bcl->hotplug_handle = devmgr_register_mitigation_client(
+					&pdev->dev, HOTPLUG_DEVICE, NULL);
+		if (IS_ERR(bcl->hotplug_handle)) {
+			ret = PTR_ERR(bcl->hotplug_handle);
+			pr_err("Error registering for hotplug. ret:%d\n", ret);
+			return ret;
+		}
+	}
+	for_each_possible_cpu(cpu) {
+		if (!(bcl_frequency_mask & BIT(cpu)))
+			continue;
+		snprintf(cpu_str, MAX_CPU_NAME, "cpu%d", cpu);
+		bcl->cpufreq_handle[cpu] = devmgr_register_mitigation_client(
+					&pdev->dev, cpu_str, NULL);
+		if (IS_ERR(bcl->cpufreq_handle[cpu])) {
+			ret = PTR_ERR(bcl->cpufreq_handle[cpu]);
+			pr_err("Error registering for cpufreq. ret:%d\n", ret);
+			return ret;
+		}
+	}
+
+	gbcl = bcl;
+	platform_set_drvdata(pdev, bcl);
+	INIT_DEFERRABLE_WORK(&bcl->bcl_iavail_work, bcl_iavail_work);
+	INIT_WORK(&bcl_hotplug_work, bcl_handle_hotplug);
+	if (bcl_mode == BCL_DEVICE_ENABLED)
+		bcl_mode_set(bcl_mode);
+
+	return 0;
+}
+
+static int bcl_remove(struct platform_device *pdev)
+{
+	int cpu;
+
+	/* De-register KTM handle */
+	power_supply_unreg_notifier(&gbcl->psy_nb);
+	if (gbcl->hotplug_handle)
+		devmgr_unregister_mitigation_client(&pdev->dev,
+			gbcl->hotplug_handle);
+	for_each_possible_cpu(cpu) {
+		if (gbcl->cpufreq_handle[cpu])
+			devmgr_unregister_mitigation_client(&pdev->dev,
+			gbcl->cpufreq_handle[cpu]);
+	}
+	remove_bcl_sysfs(gbcl);
+	if (gbcl->bcl_hotplug_wq)
+		destroy_workqueue(gbcl->bcl_hotplug_wq);
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static const struct of_device_id bcl_match_table[] = {
+	{.compatible = "qcom,bcl"},
+	{},
+};
+
+static const struct dev_pm_ops bcl_pm_ops = {
+	.resume         = bcl_resume,
+	.suspend        = bcl_suspend,
+};
+
+static struct platform_driver bcl_driver = {
+	.probe  = bcl_probe,
+	.remove = bcl_remove,
+	.driver = {
+		.name           = BCL_DEV_NAME,
+		.owner          = THIS_MODULE,
+		.of_match_table = bcl_match_table,
+		.pm             = &bcl_pm_ops,
+	},
+};
+
+static int __init bcl_init(void)
+{
+	return platform_driver_register(&bcl_driver);
+}
+
+static void __exit bcl_exit(void)
+{
+	platform_driver_unregister(&bcl_driver);
+}
+
+late_initcall(bcl_init);
+module_exit(bcl_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("battery current limit driver");
+MODULE_ALIAS("platform:" BCL_DEV_NAME);
diff -Nruw linux-4.4.115-fbx/drivers/power/supply./qcom/battery.h linux-4.4.115-fbx/drivers/power/supply/qcom/battery.h
--- linux-4.4.115-fbx/drivers/power/supply./qcom/battery.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/supply/qcom/battery.h	2019-01-22 16:16:26.227271074 +0100
@@ -0,0 +1,17 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __BATTERY_H
+#define __BATTERY_H
+int qcom_batt_init(void);
+void qcom_batt_deinit(void);
+#endif /* __BATTERY_H */
diff -Nruw linux-4.4.115-fbx/drivers/power/supply./qcom/bcl_peripheral.c linux-4.4.115-fbx/drivers/power/supply/qcom/bcl_peripheral.c
--- linux-4.4.115-fbx/drivers/power/supply./qcom/bcl_peripheral.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/supply/qcom/bcl_peripheral.c	2019-01-22 16:16:26.227271074 +0100
@@ -0,0 +1,1367 @@
+/*
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/msm_bcl.h>
+#include <linux/power_supply.h>
+#include <soc/qcom/scm.h>
+#include <linux/slab.h>
+#include <asm/cacheflush.h>
+
+#define CREATE_TRACE_POINTS
+#define _BCL_HW_TRACE
+#include <trace/trace_thermal.h>
+
+#define BCL_DRIVER_NAME         "bcl_peripheral"
+#define BCL_VBAT_INT_NAME       "bcl-low-vbat-int"
+#define BCL_IBAT_INT_NAME       "bcl-high-ibat-int"
+#define BCL_PARAM_MAX_ATTR      3
+
+#define BCL_MONITOR_EN          0x46
+#define BCL_VBAT_VALUE          0x54
+#define BCL_IBAT_VALUE          0x55
+#define BCL_VBAT_MIN            0x58
+#define BCL_IBAT_MAX            0x59
+#define BCL_V_GAIN_BAT          0x60
+#define BCL_I_GAIN_RSENSE       0x61
+#define BCL_I_OFFSET_RSENSE     0x62
+#define BCL_I_GAIN_BATFET       0x63
+#define BCL_I_OFFSET_BATFET     0x64
+#define BCL_I_SENSE_SRC         0x65
+#define BCL_VBAT_MIN_CLR        0x66
+#define BCL_IBAT_MAX_CLR        0x67
+#define BCL_VBAT_TRIP           0x68
+#define BCL_IBAT_TRIP           0x69
+
+#define BCL_8998_VBAT_VALUE     0x58
+#define BCL_8998_IBAT_VALUE     0x59
+#define BCL_8998_VBAT_MIN       0x5C
+#define BCL_8998_IBAT_MAX       0x5D
+#define BCL_8998_MAX_MIN_CLR    0x48
+#define BCL_8998_IBAT_MAX_CLR   3
+#define BCL_8998_VBAT_MIN_CLR   2
+#define BCL_8998_VBAT_ADC_LOW   0x72
+#define BCL_8998_VBAT_COMP_LOW  0x75
+#define BCL_8998_VBAT_COMP_TLOW 0x76
+#define BCL_8998_IBAT_HIGH      0x78
+#define BCL_8998_IBAT_TOO_HIGH  0x79
+#define BCL_8998_LMH_CFG        0xA3
+#define BCL_8998_BCL_CFG        0x6A
+#define LMH_8998_INT_POL_HIGH   0x12
+#define LMH_8998_INT_EN         0x15
+
+#define BCL_8998_VBAT_SCALING   39000
+#define BCL_8998_IBAT_SCALING   80000
+#define BCL_VBAT_LOW_THRESHOLD  0x7 /* 3.1V */
+#define BCL_VBAT_TLOW_THRESHOLD 0x5 /* 2.9v */
+#define BCL_IBAT_HIGH_THRESH_UA 4300000
+#define BCL_LMH_CFG_VAL         0x3
+#define BCL_CFG_VAL             0x81
+#define LMH_INT_VAL             0x7
+
+#define BCL_CONSTANT_NUM        32
+#define BCL_READ_RETRY_LIMIT    3
+#define VAL_CP_REG_BUF_LEN      3
+#define VAL_REG_BUF_OFFSET      0
+#define VAL_CP_REG_BUF_OFFSET   2
+#define PON_SPARE_FULL_CURRENT		0x0
+#define PON_SPARE_DERATED_CURRENT	0x1
+
+#define LMH_DCVSH               0x10
+#define LMH_NODE_DCVS           0x44435653 /* DCVS */
+#define LMH_SUB_FN_BCL          0x42434C00 /* BCL */
+#define LMH_CLUSTER_0           0x6370302D /* cpAG */
+#define LMH_CLUSTER_1           0x6370312D /* cpAU */
+#define LMH_ALGO_ENABLE         0x454E424C /* ENBL */
+
+#define READ_CONV_FACTOR(_node, _key, _val, _ret, _dest) do { \
+		_ret = of_property_read_u32(_node, _key, &_val); \
+		if (_ret) { \
+			pr_err("Error reading key:%s. err:%d\n", _key, _ret); \
+			goto bcl_dev_exit; \
+		} \
+		_dest = _val; \
+	} while (0)
+
+#define READ_OPTIONAL_PROP(_node, _key, _val, _ret, _dest) do { \
+		_ret = of_property_read_u32(_node, _key, &_val); \
+		if (_ret && _ret != -EINVAL) { \
+			pr_err("Error reading key:%s. err:%d\n", _key, _ret); \
+			goto bcl_dev_exit; \
+		} else if (!_ret) { \
+			_dest = _val; \
+		} \
+	} while (0)
+
+enum bcl_monitor_state {
+	BCL_PARAM_INACTIVE,
+	BCL_PARAM_MONITOR,
+	BCL_PARAM_POLLING,
+};
+
+enum bcl_hw_type {
+	BCL_PMI8994,
+	BCL_PMI8998,
+	BCL_VERSION_MAX,
+};
+
+struct bcl_peripheral_data {
+	struct bcl_param_data   *param_data;
+	struct bcl_driver_ops   ops;
+	enum bcl_monitor_state  state;
+	struct delayed_work     poll_work;
+	int                     irq_num;
+	int                     high_trip;
+	int                     low_trip;
+	int                     trip_val;
+	int                     scaling_factor;
+	int                     offset_factor_num;
+	int                     offset_factor_den;
+	int                     offset;
+	int                     gain_factor_num;
+	int                     gain_factor_den;
+	int                     gain;
+	uint32_t                polling_delay_ms;
+	int			inhibit_derating_ua;
+	int (*read_max)         (int *adc_value);
+	int (*clear_max)        (void);
+	struct mutex            state_trans_lock;
+};
+
+struct bcl_device {
+	bool				enabled;
+	struct device			*dev;
+	struct platform_device		*pdev;
+	struct regmap			*regmap;
+	uint16_t			base_addr;
+	uint16_t			pon_spare_addr;
+	uint16_t			fg_lmh_addr;
+	int				i_src;
+	struct bcl_peripheral_data	param[BCL_PARAM_MAX];
+};
+
+static struct bcl_device *bcl_perph;
+static struct power_supply_desc bcl_psy_d;
+static struct power_supply *bcl_psy;
+static const char bcl_psy_name[] = "fg_adc";
+static bool calibration_done;
+static DEFINE_MUTEX(bcl_access_mutex);
+static DEFINE_MUTEX(bcl_enable_mutex);
+static enum bcl_hw_type bcl_perph_version;
+
+static int bcl_read_multi_register(int16_t reg_offset, uint8_t *data, int len)
+{
+	int  ret = 0, trace_len = 0;
+
+	if (!bcl_perph) {
+		pr_err("BCL device not initialized\n");
+		return -EINVAL;
+	}
+	ret = regmap_bulk_read(bcl_perph->regmap,
+			       (bcl_perph->base_addr + reg_offset), data, len);
+	if (ret < 0) {
+		pr_err("Error reading register %d. err:%d", reg_offset, ret);
+		return ret;
+	}
+	while (trace_len < len) {
+		trace_bcl_hw_reg_access("Read",
+			bcl_perph->base_addr + reg_offset + trace_len,
+			data[trace_len]);
+		trace_len++;
+	}
+
+	return ret;
+}
+
+static int bcl_read_register(int16_t reg_offset, uint8_t *data)
+{
+	return bcl_read_multi_register(reg_offset, data, 1);
+}
+
+static int bcl_write_general_register(int16_t reg_offset,
+					uint16_t base, uint8_t data)
+{
+	int  ret = 0;
+	uint8_t *write_buf = &data;
+
+	if (!bcl_perph) {
+		pr_err("BCL device not initialized\n");
+		return -EINVAL;
+	}
+	ret = regmap_write(bcl_perph->regmap, (base + reg_offset), *write_buf);
+	if (ret < 0) {
+		pr_err("Error reading register %d. err:%d", reg_offset, ret);
+		return ret;
+	}
+	pr_debug("wrote 0x%02x to 0x%04x\n", data, base + reg_offset);
+	trace_bcl_hw_reg_access("write", base + reg_offset, data);
+
+	return ret;
+}
+
+static int bcl_write_register(int16_t reg_offset, uint8_t data)
+{
+	return bcl_write_general_register(reg_offset,
+			bcl_perph->base_addr, data);
+}
+
+static void convert_vbat_to_adc_val(int *val)
+{
+	struct bcl_peripheral_data *perph_data = NULL;
+
+	switch (bcl_perph_version) {
+	case BCL_PMI8994:
+		if (!bcl_perph)
+			return;
+		perph_data = &bcl_perph->param[BCL_PARAM_VOLTAGE];
+		*val = (*val * 100
+			/ (100 + (perph_data->gain_factor_num
+			* perph_data->gain) * BCL_CONSTANT_NUM
+			/ perph_data->gain_factor_den))
+			/ perph_data->scaling_factor;
+		break;
+	case BCL_PMI8998:
+		*val = *val / BCL_8998_VBAT_SCALING;
+		break;
+	default:
+		break;
+	}
+
+	return;
+}
+
+static void convert_adc_to_vbat_val(int *val)
+{
+	struct bcl_peripheral_data *perph_data = NULL;
+
+	switch (bcl_perph_version) {
+	case BCL_PMI8994:
+		if (!bcl_perph)
+			return;
+		perph_data = &bcl_perph->param[BCL_PARAM_VOLTAGE];
+		*val = ((*val + 2) * perph_data->scaling_factor)
+			* (100 + (perph_data->gain_factor_num
+			* perph_data->gain)
+			* BCL_CONSTANT_NUM  / perph_data->gain_factor_den)
+			/ 100;
+		break;
+	case BCL_PMI8998:
+		*val = *val * BCL_8998_VBAT_SCALING;
+		break;
+	default:
+		break;
+	}
+
+	return;
+}
+
+static void convert_ibat_to_adc_val(int *val)
+{
+	struct bcl_peripheral_data *perph_data = NULL;
+
+	switch (bcl_perph_version) {
+	case BCL_PMI8994:
+		if (!bcl_perph)
+			return;
+		perph_data = &bcl_perph->param[BCL_PARAM_CURRENT];
+		*val = (*val * 100
+			/ (100 + (perph_data->gain_factor_num
+			* perph_data->gain)
+			* BCL_CONSTANT_NUM / perph_data->gain_factor_den)
+			- (perph_data->offset_factor_num * perph_data->offset)
+			/ perph_data->offset_factor_den)
+			/  perph_data->scaling_factor;
+		break;
+	case BCL_PMI8998:
+		*val = *val / BCL_8998_IBAT_SCALING;
+		break;
+	default:
+		break;
+	}
+
+	return;
+}
+
+static void convert_adc_to_ibat_val(int *val)
+{
+	struct bcl_peripheral_data *perph_data = NULL;
+
+	switch (bcl_perph_version) {
+	case BCL_PMI8994:
+		if (!bcl_perph)
+			return;
+		perph_data = &bcl_perph->param[BCL_PARAM_CURRENT];
+		*val = (*val * perph_data->scaling_factor
+			+ (perph_data->offset_factor_num * perph_data->offset)
+			/ perph_data->offset_factor_den)
+			* (100 + (perph_data->gain_factor_num
+			* perph_data->gain) * BCL_CONSTANT_NUM /
+			perph_data->gain_factor_den) / 100;
+		break;
+	case BCL_PMI8998:
+		*val = *val * BCL_8998_IBAT_SCALING;
+		break;
+	default:
+		break;
+	}
+
+	return;
+}
+
+static int bcl_set_high_vbat(int thresh_value)
+{
+	bcl_perph->param[BCL_PARAM_VOLTAGE].high_trip = thresh_value;
+	return 0;
+}
+
+static int bcl_set_low_ibat(int thresh_value)
+{
+	bcl_perph->param[BCL_PARAM_CURRENT].low_trip = thresh_value;
+	return 0;
+}
+
+static int bcl_set_high_ibat(int thresh_value)
+{
+	int ret = 0, ibat_ua;
+	int8_t val = 0;
+	uint32_t too_high_thresh = BCL_IBAT_HIGH_THRESH_UA;
+
+	ibat_ua = thresh_value;
+	convert_ibat_to_adc_val(&thresh_value);
+	pr_debug("Setting Ibat high trip:%d. ADC_val:%d\n", ibat_ua,
+			thresh_value);
+	val = (int8_t)thresh_value;
+	ret = bcl_write_register((bcl_perph_version == BCL_PMI8994) ?
+		BCL_IBAT_TRIP : BCL_8998_IBAT_HIGH, val);
+	if (ret) {
+		pr_err("Error accessing BCL peripheral. err:%d\n", ret);
+		return ret;
+	}
+	bcl_perph->param[BCL_PARAM_CURRENT].high_trip = thresh_value;
+	if (bcl_perph_version == BCL_PMI8998) {
+		convert_ibat_to_adc_val(&too_high_thresh);
+		pr_debug("Setting Ibat too high trip:%d. ADC_val:%d\n",
+			BCL_IBAT_HIGH_THRESH_UA, too_high_thresh);
+		val = (int8_t)too_high_thresh;
+		ret = bcl_write_register(BCL_8998_IBAT_TOO_HIGH, val);
+		if (ret) {
+			pr_err("Error accessing BCL peripheral. err:%d\n", ret);
+			return ret;
+		}
+	}
+
+	if (bcl_perph->param[BCL_PARAM_CURRENT].inhibit_derating_ua == 0
+			|| bcl_perph->pon_spare_addr == 0)
+		return ret;
+
+	ret = bcl_write_general_register(bcl_perph->pon_spare_addr,
+			PON_SPARE_FULL_CURRENT, val);
+	if (ret) {
+		pr_debug("Error accessing PON register. err:%d\n", ret);
+		return ret;
+	}
+	thresh_value = ibat_ua
+		- bcl_perph->param[BCL_PARAM_CURRENT].inhibit_derating_ua;
+	convert_ibat_to_adc_val(&thresh_value);
+	val = (int8_t)thresh_value;
+	ret = bcl_write_general_register(bcl_perph->pon_spare_addr,
+			PON_SPARE_DERATED_CURRENT, val);
+	if (ret) {
+		pr_debug("Error accessing PON register. err:%d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int bcl_set_low_vbat(int thresh_value)
+{
+	int ret = 0, vbat_uv;
+	int8_t val = 0;
+
+	vbat_uv = thresh_value;
+	convert_vbat_to_adc_val(&thresh_value);
+	pr_debug("Setting Vbat low trip:%d. ADC_val:%d\n", vbat_uv,
+			thresh_value);
+	val = (int8_t)thresh_value;
+	ret = bcl_write_register((bcl_perph_version == BCL_PMI8994)
+		? BCL_VBAT_TRIP : BCL_8998_VBAT_ADC_LOW, val);
+	if (ret) {
+		pr_err("Error accessing BCL peripheral. err:%d\n", ret);
+		return ret;
+	}
+	if (bcl_perph_version == BCL_PMI8998) {
+		ret = bcl_write_register(BCL_8998_VBAT_COMP_LOW,
+			BCL_VBAT_LOW_THRESHOLD);
+		if (ret) {
+			pr_err("Error accessing BCL peripheral. err:%d\n", ret);
+			return ret;
+		}
+		pr_debug("Setting Vbat low comparator threshold:0x%x.\n",
+			BCL_VBAT_LOW_THRESHOLD);
+		ret = bcl_write_register(BCL_8998_VBAT_COMP_TLOW,
+			BCL_VBAT_TLOW_THRESHOLD);
+		if (ret) {
+			pr_err("Error accessing BCL peripheral. err:%d\n", ret);
+			return ret;
+		}
+		pr_debug("Setting Vbat too low comparator threshold:0x%x.\n",
+			BCL_VBAT_TLOW_THRESHOLD);
+	}
+	bcl_perph->param[BCL_PARAM_VOLTAGE].low_trip = thresh_value;
+
+	return ret;
+}
+
+static void bcl_lmh_dcvs_enable(void)
+{
+	struct scm_desc desc_arg;
+	uint32_t *payload = NULL;
+
+	payload = kzalloc(sizeof(uint32_t) * 5,	GFP_KERNEL);
+	if (!payload)
+		return;
+
+	payload[0] = LMH_SUB_FN_BCL;
+	payload[1] = 0; /* unused sub-algorithm */
+	payload[2] = LMH_ALGO_ENABLE;
+	payload[3] = 1; /* number of values */
+	payload[4] = 1;
+
+	desc_arg.args[0] = SCM_BUFFER_PHYS(payload);
+	desc_arg.args[1] = sizeof(uint32_t) * 5;
+	desc_arg.args[2] = LMH_NODE_DCVS;
+	desc_arg.args[3] = LMH_CLUSTER_0;
+	desc_arg.args[4] = 0; /* version */
+	desc_arg.arginfo = SCM_ARGS(5, SCM_RO, SCM_VAL, SCM_VAL,
+				SCM_VAL, SCM_VAL);
+
+	dmac_flush_range(payload, (void *)payload + 5 * (sizeof(uint32_t)));
+	if (scm_call2(SCM_SIP_FNID(SCM_SVC_LMH, LMH_DCVSH),
+			&desc_arg))
+		pr_err("Error enabling LMH BCL monitoringfor cluster0\n");
+
+	desc_arg.args[3] = LMH_CLUSTER_1;
+	if (scm_call2(SCM_SIP_FNID(SCM_SVC_LMH, LMH_DCVSH),
+			&desc_arg))
+		pr_err("Error enabling LMH BCL monitoringfor cluster1\n");
+
+	kfree(payload);
+}
+
+static int bcl_access_monitor_enable(bool enable)
+{
+	int ret = 0, i = 0;
+	struct bcl_peripheral_data *perph_data = NULL;
+	static bool hw_enabled;
+
+	mutex_lock(&bcl_enable_mutex);
+	if (enable == bcl_perph->enabled)
+		goto access_exit;
+
+	if ((bcl_perph_version == BCL_PMI8998) && !hw_enabled && enable) {
+		bcl_lmh_dcvs_enable();
+		hw_enabled = true;
+	}
+
+	for (; i < BCL_PARAM_MAX; i++) {
+		perph_data = &bcl_perph->param[i];
+		mutex_lock(&perph_data->state_trans_lock);
+		if (enable) {
+			switch (perph_data->state) {
+			case BCL_PARAM_INACTIVE:
+				trace_bcl_hw_state_event(
+					(i == BCL_PARAM_VOLTAGE)
+					? "Voltage Inactive to Monitor"
+					: "Current Inactive to Monitor",
+					0);
+				enable_irq(perph_data->irq_num);
+				break;
+			case BCL_PARAM_POLLING:
+			case BCL_PARAM_MONITOR:
+			default:
+				break;
+			}
+			perph_data->state = BCL_PARAM_MONITOR;
+		} else {
+			switch (perph_data->state) {
+			case BCL_PARAM_MONITOR:
+				trace_bcl_hw_state_event(
+					(i == BCL_PARAM_VOLTAGE)
+					? "Voltage Monitor to Inactive"
+					: "Current Monitor to Inactive",
+					0);
+				disable_irq_nosync(perph_data->irq_num);
+				/* Fall through to clear the poll work */
+			case BCL_PARAM_INACTIVE:
+			case BCL_PARAM_POLLING:
+				cancel_delayed_work_sync(
+					&perph_data->poll_work);
+				break;
+			default:
+				break;
+			}
+			perph_data->state = BCL_PARAM_INACTIVE;
+		}
+		mutex_unlock(&perph_data->state_trans_lock);
+	}
+	bcl_perph->enabled = enable;
+
+access_exit:
+	mutex_unlock(&bcl_enable_mutex);
+	return ret;
+}
+
+static int bcl_monitor_enable(void)
+{
+	trace_bcl_hw_event("BCL Enable");
+	return bcl_access_monitor_enable(true);
+}
+
+static int bcl_monitor_disable(void)
+{
+	trace_bcl_hw_event("BCL Disable");
+	return bcl_access_monitor_enable(false);
+}
+
+static int bcl_read_ibat_high_trip(int *thresh_value)
+{
+	int ret = 0;
+	int8_t val = 0;
+
+	*thresh_value = (int)val;
+	ret = bcl_read_register((bcl_perph_version == BCL_PMI8994) ?
+		BCL_IBAT_TRIP : BCL_8998_IBAT_HIGH, &val);
+	if (ret) {
+		pr_err("BCL register read error. err:%d\n", ret);
+		ret = 0;
+		val = bcl_perph->param[BCL_PARAM_CURRENT].high_trip;
+		*thresh_value = (int)val;
+	} else {
+		*thresh_value = (int)val;
+		convert_adc_to_ibat_val(thresh_value);
+		pr_debug("Reading Ibat high trip:%d. ADC_val:%d\n",
+				*thresh_value, val);
+	}
+
+	return ret;
+}
+
+static int bcl_read_ibat_low_trip(int *thresh_value)
+{
+	*thresh_value = bcl_perph->param[BCL_PARAM_CURRENT].low_trip;
+	return 0;
+}
+
+static int bcl_read_vbat_low_trip(int *thresh_value)
+{
+	int ret = 0;
+	int8_t val = 0;
+
+	*thresh_value = (int)val;
+	ret = bcl_read_register((bcl_perph_version == BCL_PMI8994)
+			? BCL_VBAT_TRIP	: BCL_8998_VBAT_ADC_LOW,
+			&val);
+	if (ret) {
+		pr_err("BCL register read error. err:%d\n", ret);
+		ret = 0;
+		*thresh_value = bcl_perph->param[BCL_PARAM_VOLTAGE].low_trip;
+	} else {
+		*thresh_value = (int)val;
+		convert_adc_to_vbat_val(thresh_value);
+		pr_debug("Reading Ibat high trip:%d. ADC_val:%d\n",
+				*thresh_value, val);
+	}
+
+	return ret;
+}
+
+static int bcl_read_vbat_high_trip(int *thresh_value)
+{
+	*thresh_value = bcl_perph->param[BCL_PARAM_VOLTAGE].high_trip;
+	return 0;
+}
+
+static int bcl_clear_vbat_min(void)
+{
+	int ret  = 0;
+
+	if (bcl_perph_version == BCL_PMI8994)
+		ret = bcl_write_register(BCL_VBAT_MIN_CLR, BIT(7));
+	else
+		ret = bcl_write_register(BCL_8998_MAX_MIN_CLR,
+			BIT(BCL_8998_VBAT_MIN_CLR));
+	if (ret)
+		pr_err("Error in clearing vbat min reg. err:%d", ret);
+
+	return ret;
+}
+
+static int bcl_clear_ibat_max(void)
+{
+	int ret  = 0;
+
+	if (bcl_perph_version == BCL_PMI8994)
+		ret = bcl_write_register(BCL_IBAT_MAX_CLR, BIT(7));
+	else
+		ret = bcl_write_register(BCL_8998_MAX_MIN_CLR,
+			BIT(BCL_8998_IBAT_MAX_CLR));
+	if (ret)
+		pr_err("Error in clearing ibat max reg. err:%d", ret);
+
+	return ret;
+}
+
+static int bcl_read_ibat_max(int *adc_value)
+{
+	int ret = 0, timeout = 0;
+	int8_t val[VAL_CP_REG_BUF_LEN] = {0};
+
+	*adc_value = (int)val[VAL_REG_BUF_OFFSET];
+	do {
+		ret = bcl_read_multi_register(
+			(bcl_perph_version == BCL_PMI8994) ? BCL_IBAT_MAX
+			: BCL_8998_IBAT_MAX, val,
+			VAL_CP_REG_BUF_LEN);
+		if (ret) {
+			pr_err("BCL register read error. err:%d\n", ret);
+			goto bcl_read_exit;
+		}
+	} while (val[VAL_REG_BUF_OFFSET] != val[VAL_CP_REG_BUF_OFFSET]
+		&& timeout++ < BCL_READ_RETRY_LIMIT);
+	if (val[VAL_REG_BUF_OFFSET] != val[VAL_CP_REG_BUF_OFFSET]) {
+		ret = -ENODEV;
+		goto bcl_read_exit;
+	}
+	*adc_value = (int)val[VAL_REG_BUF_OFFSET];
+	convert_adc_to_ibat_val(adc_value);
+	pr_debug("Ibat Max:%d. ADC_val:%d\n", *adc_value,
+			val[VAL_REG_BUF_OFFSET]);
+	trace_bcl_hw_sensor_reading("Ibat Max[uA]", *adc_value);
+
+bcl_read_exit:
+	return ret;
+}
+
+static int bcl_read_vbat_min(int *adc_value)
+{
+	int ret = 0, timeout = 0;
+	int8_t val[VAL_CP_REG_BUF_LEN] = {0};
+
+	*adc_value = (int)val[VAL_REG_BUF_OFFSET];
+	do {
+		ret = bcl_read_multi_register(
+			(bcl_perph_version == BCL_PMI8994) ? BCL_VBAT_MIN
+			: BCL_8998_VBAT_MIN, val,
+			VAL_CP_REG_BUF_LEN);
+		if (ret) {
+			pr_err("BCL register read error. err:%d\n", ret);
+			goto bcl_read_exit;
+		}
+	} while (val[VAL_REG_BUF_OFFSET] != val[VAL_CP_REG_BUF_OFFSET]
+		&& timeout++ < BCL_READ_RETRY_LIMIT);
+	if (val[VAL_REG_BUF_OFFSET] != val[VAL_CP_REG_BUF_OFFSET]) {
+		ret = -ENODEV;
+		goto bcl_read_exit;
+	}
+	*adc_value = (int)val[VAL_REG_BUF_OFFSET];
+	convert_adc_to_vbat_val(adc_value);
+	pr_debug("Vbat Min:%d. ADC_val:%d\n", *adc_value,
+			val[VAL_REG_BUF_OFFSET]);
+	trace_bcl_hw_sensor_reading("vbat Min[uV]", *adc_value);
+
+bcl_read_exit:
+	return ret;
+}
+
+static int bcl_read_ibat(int *adc_value)
+{
+	int ret = 0, timeout = 0;
+	int8_t val[VAL_CP_REG_BUF_LEN] = {0};
+
+	*adc_value = (int)val[VAL_REG_BUF_OFFSET];
+	do {
+		ret = bcl_read_multi_register(
+			(bcl_perph_version == BCL_PMI8994) ? BCL_IBAT_VALUE
+			: BCL_8998_IBAT_VALUE, val,
+			VAL_CP_REG_BUF_LEN);
+		if (ret) {
+			pr_err("BCL register read error. err:%d\n", ret);
+			goto bcl_read_exit;
+		}
+	} while (val[VAL_REG_BUF_OFFSET] != val[VAL_CP_REG_BUF_OFFSET]
+		&& timeout++ < BCL_READ_RETRY_LIMIT);
+	if (val[VAL_REG_BUF_OFFSET] != val[VAL_CP_REG_BUF_OFFSET]) {
+		ret = -ENODEV;
+		goto bcl_read_exit;
+	}
+	*adc_value = (int)val[VAL_REG_BUF_OFFSET];
+	convert_adc_to_ibat_val(adc_value);
+	pr_debug("Read Ibat:%d. ADC_val:%d\n", *adc_value,
+			val[VAL_REG_BUF_OFFSET]);
+	trace_bcl_hw_sensor_reading("ibat[uA]", *adc_value);
+
+bcl_read_exit:
+	return ret;
+}
+
+static int bcl_read_vbat(int *adc_value)
+{
+	int ret = 0, timeout = 0;
+	int8_t val[VAL_CP_REG_BUF_LEN] = {0};
+
+	*adc_value = (int)val[VAL_REG_BUF_OFFSET];
+	do {
+		ret = bcl_read_multi_register(
+			(bcl_perph_version == BCL_PMI8994) ? BCL_VBAT_VALUE :
+			BCL_8998_VBAT_VALUE, val,
+			VAL_CP_REG_BUF_LEN);
+		if (ret) {
+			pr_err("BCL register read error. err:%d\n", ret);
+			goto bcl_read_exit;
+		}
+	} while (val[VAL_REG_BUF_OFFSET] != val[VAL_CP_REG_BUF_OFFSET]
+		&& timeout++ < BCL_READ_RETRY_LIMIT);
+	if (val[VAL_REG_BUF_OFFSET] != val[VAL_CP_REG_BUF_OFFSET]) {
+		ret = -ENODEV;
+		goto bcl_read_exit;
+	}
+	*adc_value = (int)val[VAL_REG_BUF_OFFSET];
+	convert_adc_to_vbat_val(adc_value);
+	pr_debug("Read Vbat:%d. ADC_val:%d\n", *adc_value,
+			val[VAL_REG_BUF_OFFSET]);
+	trace_bcl_hw_sensor_reading("vbat[uV]", *adc_value);
+
+bcl_read_exit:
+	return ret;
+}
+
+static void bcl_poll_ibat_low(struct work_struct *work)
+{
+	int ret = 0, val = 0;
+	struct bcl_peripheral_data *perph_data =
+		&bcl_perph->param[BCL_PARAM_CURRENT];
+
+	trace_bcl_hw_event("ibat poll low. Enter");
+	mutex_lock(&perph_data->state_trans_lock);
+	if (perph_data->state != BCL_PARAM_POLLING) {
+		pr_err("Invalid ibat state %d\n", perph_data->state);
+		goto exit_ibat;
+	}
+
+	ret = perph_data->read_max(&val);
+	if (ret) {
+		pr_err("Error in reading ibat. err:%d", ret);
+		goto reschedule_ibat;
+	}
+	ret = perph_data->clear_max();
+	if (ret)
+		pr_err("Error clearing max ibat reg. err:%d\n", ret);
+	if (val <= perph_data->low_trip) {
+		pr_debug("Ibat reached low clear trip. ibat:%d\n", val);
+		trace_bcl_hw_state_event("Polling to Monitor. Ibat[uA]:", val);
+		trace_bcl_hw_mitigation("Ibat low trip. Ibat[uA]", val);
+		perph_data->ops.notify(perph_data->param_data, val,
+			BCL_LOW_TRIP);
+		perph_data->state = BCL_PARAM_MONITOR;
+		enable_irq(perph_data->irq_num);
+	} else {
+		goto reschedule_ibat;
+	}
+
+exit_ibat:
+	mutex_unlock(&perph_data->state_trans_lock);
+	trace_bcl_hw_event("ibat poll low. Exit");
+	return;
+
+reschedule_ibat:
+	mutex_unlock(&perph_data->state_trans_lock);
+	schedule_delayed_work(&perph_data->poll_work,
+		msecs_to_jiffies(perph_data->polling_delay_ms));
+	trace_bcl_hw_event("ibat poll low. Exit");
+	return;
+}
+
+static void bcl_poll_vbat_high(struct work_struct *work)
+{
+	int ret = 0, val = 0;
+	struct bcl_peripheral_data *perph_data =
+		&bcl_perph->param[BCL_PARAM_VOLTAGE];
+
+	trace_bcl_hw_event("vbat poll high. Enter");
+	mutex_lock(&perph_data->state_trans_lock);
+	if (perph_data->state != BCL_PARAM_POLLING) {
+		pr_err("Invalid vbat state %d\n", perph_data->state);
+		goto exit_vbat;
+	}
+
+	ret = perph_data->read_max(&val);
+	if (ret) {
+		pr_err("Error in reading vbat. err:%d", ret);
+		goto reschedule_vbat;
+	}
+	ret = perph_data->clear_max();
+	if (ret)
+		pr_err("Error clearing min vbat reg. err:%d\n", ret);
+	if (val >= perph_data->high_trip) {
+		pr_debug("Vbat reached high clear trip. vbat:%d\n", val);
+		trace_bcl_hw_state_event("Polling to Monitor. vbat[uV]:", val);
+		trace_bcl_hw_mitigation("vbat high trip. vbat[uV]", val);
+		perph_data->ops.notify(perph_data->param_data, val,
+			BCL_HIGH_TRIP);
+		perph_data->state = BCL_PARAM_MONITOR;
+		enable_irq(perph_data->irq_num);
+	} else {
+		goto reschedule_vbat;
+	}
+
+exit_vbat:
+	mutex_unlock(&perph_data->state_trans_lock);
+	trace_bcl_hw_event("vbat poll high. Exit");
+	return;
+
+reschedule_vbat:
+	mutex_unlock(&perph_data->state_trans_lock);
+	schedule_delayed_work(&perph_data->poll_work,
+		msecs_to_jiffies(perph_data->polling_delay_ms));
+	trace_bcl_hw_event("vbat poll high. Exit");
+	return;
+}
+
+static irqreturn_t bcl_handle_ibat(int irq, void *data)
+{
+	int thresh_value = 0, ret = 0;
+	struct bcl_peripheral_data *perph_data =
+		(struct bcl_peripheral_data *)data;
+
+	trace_bcl_hw_mitigation_event("Ibat interrupted");
+	mutex_lock(&perph_data->state_trans_lock);
+	if (perph_data->state == BCL_PARAM_MONITOR) {
+		ret = perph_data->read_max(&perph_data->trip_val);
+		if (ret) {
+			pr_err("Error reading max/min reg. err:%d\n", ret);
+			goto exit_intr;
+		}
+		ret = perph_data->clear_max();
+		if (ret)
+			pr_err("Error clearing max/min reg. err:%d\n", ret);
+		thresh_value = perph_data->high_trip;
+		convert_adc_to_ibat_val(&thresh_value);
+		/* Account threshold trip from PBS threshold for dead time */
+		thresh_value -= perph_data->inhibit_derating_ua;
+		if (perph_data->trip_val < thresh_value) {
+			pr_debug("False Ibat high trip. ibat:%d ibat_thresh_val:%d\n",
+				perph_data->trip_val, thresh_value);
+			trace_bcl_hw_event("Ibat invalid interrupt");
+			goto exit_intr;
+		}
+		pr_debug("Ibat reached high trip. ibat:%d\n",
+				perph_data->trip_val);
+		trace_bcl_hw_state_event("Monitor to Polling. ibat[uA]:",
+				perph_data->trip_val);
+		disable_irq_nosync(perph_data->irq_num);
+		perph_data->state = BCL_PARAM_POLLING;
+		trace_bcl_hw_mitigation("ibat high trip. ibat[uA]",
+				perph_data->trip_val);
+		perph_data->ops.notify(perph_data->param_data,
+			perph_data->trip_val, BCL_HIGH_TRIP);
+		schedule_delayed_work(&perph_data->poll_work,
+			msecs_to_jiffies(perph_data->polling_delay_ms));
+	} else {
+		pr_debug("Ignoring interrupt\n");
+		trace_bcl_hw_event("Ibat Ignoring interrupt");
+	}
+
+exit_intr:
+	mutex_unlock(&perph_data->state_trans_lock);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t bcl_handle_vbat(int irq, void *data)
+{
+	int thresh_value = 0, ret = 0;
+	struct bcl_peripheral_data *perph_data =
+		(struct bcl_peripheral_data *)data;
+
+	trace_bcl_hw_mitigation_event("Vbat Interrupted");
+	mutex_lock(&perph_data->state_trans_lock);
+	if (perph_data->state == BCL_PARAM_MONITOR) {
+		ret = perph_data->read_max(&perph_data->trip_val);
+		if (ret) {
+			pr_err("Error reading max/min reg. err:%d\n", ret);
+			goto exit_intr;
+		}
+		ret = perph_data->clear_max();
+		if (ret)
+			pr_err("Error clearing max/min reg. err:%d\n", ret);
+		thresh_value = perph_data->low_trip;
+		convert_adc_to_vbat_val(&thresh_value);
+		if (perph_data->trip_val > thresh_value) {
+			pr_debug("False vbat min trip. vbat:%d vbat_thresh_val:%d\n",
+				perph_data->trip_val, thresh_value);
+			trace_bcl_hw_event("Vbat Invalid interrupt");
+			goto exit_intr;
+		}
+		pr_debug("Vbat reached Low trip. vbat:%d\n",
+			perph_data->trip_val);
+		trace_bcl_hw_state_event("Monitor to Polling. vbat[uV]:",
+				perph_data->trip_val);
+		disable_irq_nosync(perph_data->irq_num);
+		perph_data->state = BCL_PARAM_POLLING;
+		trace_bcl_hw_mitigation("vbat low trip. vbat[uV]",
+				perph_data->trip_val);
+		perph_data->ops.notify(perph_data->param_data,
+			perph_data->trip_val, BCL_LOW_TRIP);
+		schedule_delayed_work(&perph_data->poll_work,
+			msecs_to_jiffies(perph_data->polling_delay_ms));
+	} else {
+		pr_debug("Ignoring interrupt\n");
+		trace_bcl_hw_event("Vbat Ignoring interrupt");
+	}
+
+exit_intr:
+	mutex_unlock(&perph_data->state_trans_lock);
+	return IRQ_HANDLED;
+}
+
+static int bcl_get_devicetree_data(struct platform_device *pdev)
+{
+	int ret = 0, irq_num = 0, temp_val = 0;
+	char *key = NULL;
+	const __be32 *prop = NULL;
+	struct device_node *dev_node = pdev->dev.of_node;
+
+	prop = of_get_address_by_name(dev_node, "fg_user_adc", 0, 0);
+	if (prop) {
+		bcl_perph->base_addr = be32_to_cpu(*prop);
+		pr_debug("fg_user_adc@%04x\n", bcl_perph->base_addr);
+	} else {
+		dev_err(&pdev->dev, "No fg_user_adc registers found\n");
+		return -EINVAL;
+	}
+
+	prop = of_get_address_by_name(dev_node,
+			"pon_spare", 0, 0);
+	if (prop) {
+		bcl_perph->pon_spare_addr = be32_to_cpu(*prop);
+		pr_debug("pon_spare@%04x\n", bcl_perph->pon_spare_addr);
+	}
+
+	/* Register SPMI peripheral interrupt */
+	irq_num = platform_get_irq_byname(pdev, BCL_VBAT_INT_NAME);
+	if (irq_num < 0) {
+		pr_err("Invalid vbat IRQ\n");
+		ret = -ENXIO;
+		goto bcl_dev_exit;
+	}
+	bcl_perph->param[BCL_PARAM_VOLTAGE].irq_num = irq_num;
+	irq_num = platform_get_irq_byname(pdev, BCL_IBAT_INT_NAME);
+	if (irq_num < 0) {
+		pr_err("Invalid ibat IRQ\n");
+		ret = -ENXIO;
+		goto bcl_dev_exit;
+	}
+	bcl_perph->param[BCL_PARAM_CURRENT].irq_num = irq_num;
+
+	if (bcl_perph_version == BCL_PMI8994) {
+		/* Get VADC and IADC scaling factor */
+		key = "qcom,vbat-scaling-factor";
+		READ_CONV_FACTOR(dev_node, key, temp_val, ret,
+			bcl_perph->param[BCL_PARAM_VOLTAGE].scaling_factor);
+		key = "qcom,vbat-gain-numerator";
+		READ_CONV_FACTOR(dev_node, key, temp_val, ret,
+			bcl_perph->param[BCL_PARAM_VOLTAGE].gain_factor_num);
+		key = "qcom,vbat-gain-denominator";
+		READ_CONV_FACTOR(dev_node, key, temp_val, ret,
+			bcl_perph->param[BCL_PARAM_VOLTAGE].gain_factor_den);
+		key = "qcom,ibat-scaling-factor";
+		READ_CONV_FACTOR(dev_node, key, temp_val, ret,
+			bcl_perph->param[BCL_PARAM_CURRENT].scaling_factor);
+		key = "qcom,ibat-offset-numerator";
+		READ_CONV_FACTOR(dev_node, key, temp_val, ret,
+			bcl_perph->param[BCL_PARAM_CURRENT].offset_factor_num);
+		key = "qcom,ibat-offset-denominator";
+		READ_CONV_FACTOR(dev_node, key, temp_val, ret,
+			bcl_perph->param[BCL_PARAM_CURRENT].offset_factor_den);
+		key = "qcom,ibat-gain-numerator";
+		READ_CONV_FACTOR(dev_node, key, temp_val, ret,
+			bcl_perph->param[BCL_PARAM_CURRENT].gain_factor_num);
+		key = "qcom,ibat-gain-denominator";
+		READ_CONV_FACTOR(dev_node, key, temp_val, ret,
+			bcl_perph->param[BCL_PARAM_CURRENT].gain_factor_den);
+		key = "qcom,inhibit-derating-ua";
+		READ_OPTIONAL_PROP(dev_node, key, temp_val, ret,
+			bcl_perph->param[BCL_PARAM_CURRENT].
+			inhibit_derating_ua);
+	} else {
+		prop = of_get_address_by_name(dev_node,
+			"fg_lmh", 0, 0);
+		if (prop) {
+			bcl_perph->fg_lmh_addr = be32_to_cpu(*prop);
+			pr_debug("fg_lmh@%04x\n", bcl_perph->fg_lmh_addr);
+		} else {
+			return -ENODEV;
+		}
+	}
+	key = "qcom,vbat-polling-delay-ms";
+	READ_CONV_FACTOR(dev_node, key, temp_val, ret,
+		bcl_perph->param[BCL_PARAM_VOLTAGE].polling_delay_ms);
+	key = "qcom,ibat-polling-delay-ms";
+	READ_CONV_FACTOR(dev_node, key, temp_val, ret,
+		bcl_perph->param[BCL_PARAM_CURRENT].polling_delay_ms);
+
+bcl_dev_exit:
+	return ret;
+}
+
+static int bcl_calibrate(void)
+{
+	int ret = 0;
+	int8_t i_src = 0, val = 0;
+
+	ret = bcl_read_register(BCL_I_SENSE_SRC, &i_src);
+	if (ret) {
+		pr_err("Error reading current sense reg. err:%d\n", ret);
+		goto bcl_cal_exit;
+	}
+
+	ret = bcl_read_register((i_src & 0x01) ? BCL_I_GAIN_RSENSE
+		: BCL_I_GAIN_BATFET, &val);
+	if (ret) {
+		pr_err("Error reading %s current gain. err:%d\n",
+			(i_src & 0x01) ? "rsense" : "batfet", ret);
+		goto bcl_cal_exit;
+	}
+	bcl_perph->param[BCL_PARAM_CURRENT].gain = val;
+	ret = bcl_read_register((i_src & 0x01) ? BCL_I_OFFSET_RSENSE
+		: BCL_I_OFFSET_BATFET, &val);
+	if (ret) {
+		pr_err("Error reading %s current offset. err:%d\n",
+			(i_src & 0x01) ? "rsense" : "batfet", ret);
+		goto bcl_cal_exit;
+	}
+	bcl_perph->param[BCL_PARAM_CURRENT].offset = val;
+	ret = bcl_read_register(BCL_V_GAIN_BAT, &val);
+	if (ret) {
+		pr_err("Error reading vbat offset. err:%d\n", ret);
+		goto bcl_cal_exit;
+	}
+	bcl_perph->param[BCL_PARAM_VOLTAGE].gain = val;
+
+	if (((i_src & 0x01) != bcl_perph->i_src)
+		&& (bcl_perph->enabled)) {
+		bcl_set_low_vbat(bcl_perph->param[BCL_PARAM_VOLTAGE]
+				.low_trip);
+		bcl_set_high_ibat(bcl_perph->param[BCL_PARAM_CURRENT]
+				.high_trip);
+		bcl_perph->i_src = i_src;
+	}
+
+bcl_cal_exit:
+	return ret;
+}
+
+static void power_supply_callback(struct power_supply *psy)
+{
+	static struct power_supply *bms_psy;
+	int ret = 0;
+
+	if (calibration_done)
+		return;
+
+	if (!bms_psy)
+		bms_psy = power_supply_get_by_name("bms");
+	if (bms_psy) {
+		calibration_done = true;
+		trace_bcl_hw_event("Recalibrate callback");
+		ret = bcl_calibrate();
+		if (ret)
+			pr_err("Could not read calibration values. err:%d",
+				ret);
+	}
+}
+
+static int bcl_psy_get_property(struct power_supply *psy,
+				enum power_supply_property prop,
+				union power_supply_propval *val)
+{
+	return 0;
+}
+static int bcl_psy_set_property(struct power_supply *psy,
+				enum power_supply_property prop,
+				const union power_supply_propval *val)
+{
+	return -EINVAL;
+}
+
+static int bcl_update_data(void)
+{
+	int ret = 0;
+
+	bcl_perph->param[BCL_PARAM_VOLTAGE].ops.read = bcl_read_vbat;
+	bcl_perph->param[BCL_PARAM_VOLTAGE].ops.get_high_trip
+		= bcl_read_vbat_high_trip;
+	bcl_perph->param[BCL_PARAM_VOLTAGE].ops.get_low_trip
+		= bcl_read_vbat_low_trip;
+	bcl_perph->param[BCL_PARAM_VOLTAGE].ops.set_high_trip
+		= bcl_set_high_vbat;
+	bcl_perph->param[BCL_PARAM_VOLTAGE].ops.set_low_trip
+		= bcl_set_low_vbat;
+	bcl_perph->param[BCL_PARAM_VOLTAGE].ops.enable
+		 = bcl_monitor_enable;
+	bcl_perph->param[BCL_PARAM_VOLTAGE].ops.disable
+		= bcl_monitor_disable;
+	bcl_perph->param[BCL_PARAM_VOLTAGE].read_max
+		 = bcl_read_vbat_min;
+	bcl_perph->param[BCL_PARAM_VOLTAGE].clear_max
+		 = bcl_clear_vbat_min;
+
+	bcl_perph->param[BCL_PARAM_CURRENT].ops.read = bcl_read_ibat;
+	bcl_perph->param[BCL_PARAM_CURRENT].ops.get_high_trip
+		= bcl_read_ibat_high_trip;
+	bcl_perph->param[BCL_PARAM_CURRENT].ops.get_low_trip
+		= bcl_read_ibat_low_trip;
+	bcl_perph->param[BCL_PARAM_CURRENT].ops.set_high_trip
+		 = bcl_set_high_ibat;
+	bcl_perph->param[BCL_PARAM_CURRENT].ops.set_low_trip
+		 = bcl_set_low_ibat;
+	bcl_perph->param[BCL_PARAM_CURRENT].ops.enable
+		= bcl_monitor_enable;
+	bcl_perph->param[BCL_PARAM_CURRENT].ops.disable
+		= bcl_monitor_disable;
+	bcl_perph->param[BCL_PARAM_CURRENT].read_max
+		= bcl_read_ibat_max;
+	bcl_perph->param[BCL_PARAM_CURRENT].clear_max
+		= bcl_clear_ibat_max;
+
+	bcl_perph->param[BCL_PARAM_VOLTAGE].param_data = msm_bcl_register_param(
+		BCL_PARAM_VOLTAGE, &bcl_perph->param[BCL_PARAM_VOLTAGE].ops,
+		"vbat");
+	if (!bcl_perph->param[BCL_PARAM_VOLTAGE].param_data) {
+		pr_err("register Vbat failed.\n");
+		ret = -ENODEV;
+		goto update_data_exit;
+	}
+	bcl_perph->param[BCL_PARAM_CURRENT].param_data = msm_bcl_register_param(
+		BCL_PARAM_CURRENT, &bcl_perph->param[BCL_PARAM_CURRENT].ops,
+		"ibat");
+	if (!bcl_perph->param[BCL_PARAM_CURRENT].param_data) {
+		pr_err("register Ibat failed.\n");
+		ret = -ENODEV;
+		goto update_data_exit;
+	}
+	INIT_DELAYED_WORK(&bcl_perph->param[BCL_PARAM_VOLTAGE].poll_work,
+		bcl_poll_vbat_high);
+	INIT_DELAYED_WORK(&bcl_perph->param[BCL_PARAM_CURRENT].poll_work,
+		bcl_poll_ibat_low);
+	mutex_init(&bcl_perph->param[BCL_PARAM_CURRENT].state_trans_lock);
+	mutex_init(&bcl_perph->param[BCL_PARAM_VOLTAGE].state_trans_lock);
+
+update_data_exit:
+	return ret;
+}
+
+static int bcl_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct power_supply_config bcl_psy_cfg = {};
+
+	bcl_perph = devm_kzalloc(&pdev->dev, sizeof(struct bcl_device),
+			GFP_KERNEL);
+	if (!bcl_perph) {
+		pr_err("Memory alloc failed\n");
+		return -ENOMEM;
+	}
+	bcl_perph->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!bcl_perph->regmap) {
+		dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+		return -EINVAL;
+	}
+	bcl_perph->pdev = pdev;
+	bcl_perph->dev = &(pdev->dev);
+
+	ret = bcl_get_devicetree_data(pdev);
+	if (ret) {
+		pr_err("Device tree data fetch error. err:%d", ret);
+		goto bcl_probe_exit;
+	}
+	if (bcl_perph_version == BCL_PMI8994) {
+		ret = bcl_calibrate();
+		if (ret) {
+			pr_debug("Could not read calibration values. err:%d",
+				ret);
+			goto bcl_probe_exit;
+		}
+		bcl_psy_d.name = bcl_psy_name;
+		bcl_psy_d.type = POWER_SUPPLY_TYPE_BMS;
+		bcl_psy_d.get_property = bcl_psy_get_property;
+		bcl_psy_d.set_property = bcl_psy_set_property;
+		bcl_psy_d.num_properties = 0;
+		bcl_psy_d.external_power_changed = power_supply_callback;
+
+		bcl_psy_cfg.num_supplicants = 0;
+		bcl_psy_cfg.drv_data = bcl_perph;
+
+		bcl_psy = devm_power_supply_register(&pdev->dev, &bcl_psy_d,
+				&bcl_psy_cfg);
+		if (IS_ERR(bcl_psy)) {
+			pr_err("Unable to register bcl_psy rc = %ld\n",
+			PTR_ERR(bcl_psy));
+			return ret;
+		}
+	} else {
+		bcl_write_register(BCL_8998_LMH_CFG, BCL_LMH_CFG_VAL);
+		bcl_write_register(BCL_8998_BCL_CFG, BCL_CFG_VAL);
+		bcl_write_general_register(LMH_8998_INT_POL_HIGH,
+			bcl_perph->fg_lmh_addr, LMH_INT_VAL);
+		bcl_write_general_register(LMH_8998_INT_EN,
+			bcl_perph->fg_lmh_addr, LMH_INT_VAL);
+	}
+
+	ret = bcl_update_data();
+	if (ret) {
+		pr_err("Update data failed. err:%d", ret);
+		goto bcl_probe_exit;
+	}
+	mutex_lock(&bcl_perph->param[BCL_PARAM_VOLTAGE].state_trans_lock);
+	ret = devm_request_threaded_irq(&pdev->dev,
+			bcl_perph->param[BCL_PARAM_VOLTAGE].irq_num,
+			NULL, bcl_handle_vbat,
+			IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+			"bcl_vbat_interrupt",
+			&bcl_perph->param[BCL_PARAM_VOLTAGE]);
+	if (ret) {
+		dev_err(&pdev->dev, "Error requesting VBAT irq. err:%d", ret);
+		mutex_unlock(
+			&bcl_perph->param[BCL_PARAM_VOLTAGE].state_trans_lock);
+		goto bcl_probe_exit;
+	}
+	/*
+	 * BCL is enabled by default in hardware.
+	 * Disable BCL monitoring till a valid threshold is set by APPS
+	 */
+	disable_irq_nosync(bcl_perph->param[BCL_PARAM_VOLTAGE].irq_num);
+	mutex_unlock(&bcl_perph->param[BCL_PARAM_VOLTAGE].state_trans_lock);
+
+	mutex_lock(&bcl_perph->param[BCL_PARAM_CURRENT].state_trans_lock);
+	ret = devm_request_threaded_irq(&pdev->dev,
+			bcl_perph->param[BCL_PARAM_CURRENT].irq_num,
+			NULL, bcl_handle_ibat,
+			IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+			"bcl_ibat_interrupt",
+			&bcl_perph->param[BCL_PARAM_CURRENT]);
+	if (ret) {
+		dev_err(&pdev->dev, "Error requesting IBAT irq. err:%d", ret);
+		mutex_unlock(
+			&bcl_perph->param[BCL_PARAM_CURRENT].state_trans_lock);
+		goto bcl_probe_exit;
+	}
+	disable_irq_nosync(bcl_perph->param[BCL_PARAM_CURRENT].irq_num);
+	mutex_unlock(&bcl_perph->param[BCL_PARAM_CURRENT].state_trans_lock);
+
+	dev_set_drvdata(&pdev->dev, bcl_perph);
+	ret = bcl_write_register(BCL_MONITOR_EN, BIT(7));
+	if (ret) {
+		pr_err("Error accessing BCL peripheral. err:%d\n", ret);
+		goto bcl_probe_exit;
+	}
+
+	return 0;
+
+bcl_probe_exit:
+	bcl_perph = NULL;
+	return ret;
+}
+
+static int bcl_remove(struct platform_device *pdev)
+{
+	int ret = 0, i = 0;
+
+	ret = bcl_monitor_disable();
+	if (ret)
+		pr_err("Error disabling BCL. err:%d\n", ret);
+
+	for (; i < BCL_PARAM_MAX; i++) {
+		if (!bcl_perph->param[i].param_data)
+			continue;
+
+		ret = msm_bcl_unregister_param(bcl_perph->param[i].param_data);
+		if (ret)
+			pr_err("Error unregistering with Framework. err:%d\n",
+					ret);
+	}
+
+	return 0;
+}
+
+static struct of_device_id bcl_match[] = {
+	{	.compatible	= "qcom,msm-bcl",
+		.data		= (void *) BCL_PMI8994,
+	},
+	{	.compatible	= "qcom,msm-bcl-lmh",
+		.data		= (void *) BCL_PMI8998,
+	},
+	{},
+};
+
+static struct platform_driver bcl_driver = {
+	.probe	= bcl_probe,
+	.remove	= bcl_remove,
+	.driver	= {
+		.name		= BCL_DRIVER_NAME,
+		.owner		= THIS_MODULE,
+		.of_match_table	= bcl_match,
+	},
+};
+
+static int __init bcl_perph_init(void)
+{
+	struct device_node *comp_node;
+
+	comp_node = of_find_matching_node(NULL, bcl_match);
+	bcl_perph_version = BCL_PMI8994;
+	if (comp_node) {
+		const struct of_device_id *match = of_match_node(bcl_match,
+							comp_node);
+		if (!match) {
+			pr_err("Couldnt find a match\n");
+			goto plt_register;
+		}
+		bcl_perph_version = (enum bcl_hw_type)match->data;
+		of_node_put(comp_node);
+	}
+
+plt_register:
+	return platform_driver_register(&bcl_driver);
+}
+
+static void __exit bcl_perph_exit(void)
+{
+	platform_driver_unregister(&bcl_driver);
+}
+fs_initcall(bcl_perph_init);
+module_exit(bcl_perph_exit);
+MODULE_ALIAS("platform:" BCL_DRIVER_NAME);
diff -Nruw linux-4.4.115-fbx/drivers/power/supply./qcom/fg-core.h linux-4.4.115-fbx/drivers/power/supply/qcom/fg-core.h
--- linux-4.4.115-fbx/drivers/power/supply./qcom/fg-core.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/supply/qcom/fg-core.h	2019-01-22 16:16:26.227271074 +0100
@@ -0,0 +1,525 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __FG_CORE_H__
+#define __FG_CORE_H__
+
+#include <linux/alarmtimer.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/string_helpers.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/pmic-voter.h>
+
+#define fg_dbg(chip, reason, fmt, ...)			\
+	do {							\
+		if (*chip->debug_mask & (reason))		\
+			pr_info(fmt, ##__VA_ARGS__);	\
+		else						\
+			pr_debug(fmt, ##__VA_ARGS__);	\
+	} while (0)
+
+#define is_between(left, right, value) \
+		(((left) >= (right) && (left) >= (value) \
+			&& (value) >= (right)) \
+		|| ((left) <= (right) && (left) <= (value) \
+			&& (value) <= (right)))
+
+/* Awake votable reasons */
+#define SRAM_READ		"fg_sram_read"
+#define SRAM_WRITE		"fg_sram_write"
+#define PROFILE_LOAD		"fg_profile_load"
+#define TTF_PRIMING		"fg_ttf_priming"
+
+/* Delta BSOC irq votable reasons */
+#define DELTA_BSOC_IRQ_VOTER	"fg_delta_bsoc_irq"
+
+/* Battery missing irq votable reasons */
+#define BATT_MISS_IRQ_VOTER	"fg_batt_miss_irq"
+
+#define DEBUG_PRINT_BUFFER_SIZE		64
+/* 3 byte address + 1 space character */
+#define ADDR_LEN			4
+/* Format is 'XX ' */
+#define CHARS_PER_ITEM			3
+/* 4 data items per line */
+#define ITEMS_PER_LINE			4
+#define MAX_LINE_LENGTH			(ADDR_LEN + (ITEMS_PER_LINE *	\
+					CHARS_PER_ITEM) + 1)		\
+
+#define FG_SRAM_ADDRESS_MAX		255
+#define FG_SRAM_LEN			504
+#define PROFILE_LEN			224
+#define PROFILE_COMP_LEN		148
+#define BUCKET_COUNT			8
+#define BUCKET_SOC_PCT			(256 / BUCKET_COUNT)
+
+#define KI_COEFF_MAX			62200
+#define KI_COEFF_SOC_LEVELS		3
+
+#define SLOPE_LIMIT_COEFF_MAX		31
+
+#define BATT_THERM_NUM_COEFFS		3
+
+#define MAX_CC_STEPS			20
+
+/* Debug flag definitions */
+enum fg_debug_flag {
+	FG_IRQ			= BIT(0), /* Show interrupts */
+	FG_STATUS		= BIT(1), /* Show FG status changes */
+	FG_POWER_SUPPLY		= BIT(2), /* Show POWER_SUPPLY */
+	FG_SRAM_WRITE		= BIT(3), /* Show SRAM writes */
+	FG_SRAM_READ		= BIT(4), /* Show SRAM reads */
+	FG_BUS_WRITE		= BIT(5), /* Show REGMAP writes */
+	FG_BUS_READ		= BIT(6), /* Show REGMAP reads */
+	FG_CAP_LEARN		= BIT(7), /* Show capacity learning */
+	FG_TTF			= BIT(8), /* Show time to full */
+};
+
+/* SRAM access */
+enum sram_access_flags {
+	FG_IMA_DEFAULT	= 0,
+	FG_IMA_ATOMIC	= BIT(0),
+	FG_IMA_NO_WLOCK	= BIT(1),
+};
+
+/* JEITA */
+enum jeita_levels {
+	JEITA_COLD = 0,
+	JEITA_COOL,
+	JEITA_WARM,
+	JEITA_HOT,
+	NUM_JEITA_LEVELS,
+};
+
+/* FG irqs */
+enum fg_irq_index {
+	MSOC_FULL_IRQ = 0,
+	MSOC_HIGH_IRQ,
+	MSOC_EMPTY_IRQ,
+	MSOC_LOW_IRQ,
+	MSOC_DELTA_IRQ,
+	BSOC_DELTA_IRQ,
+	SOC_READY_IRQ,
+	SOC_UPDATE_IRQ,
+	BATT_TEMP_DELTA_IRQ,
+	BATT_MISSING_IRQ,
+	ESR_DELTA_IRQ,
+	VBATT_LOW_IRQ,
+	VBATT_PRED_DELTA_IRQ,
+	DMA_GRANT_IRQ,
+	MEM_XCP_IRQ,
+	IMA_RDY_IRQ,
+	FG_IRQ_MAX,
+};
+
+/*
+ * List of FG_SRAM parameters. Please add a parameter only if it is an entry
+ * that will be used either to configure an entity (e.g. termination current)
+ * which might need some encoding (or) it is an entry that will be read from
+ * SRAM and decoded (e.g. CC_SOC_SW) for SW to use at various places. For
+ * generic read/writes to SRAM registers, please use fg_sram_read/write APIs
+ * directly without adding an entry here.
+ */
+enum fg_sram_param_id {
+	FG_SRAM_BATT_SOC = 0,
+	FG_SRAM_FULL_SOC,
+	FG_SRAM_VOLTAGE_PRED,
+	FG_SRAM_OCV,
+	FG_SRAM_ESR,
+	FG_SRAM_RSLOW,
+	FG_SRAM_ALG_FLAGS,
+	FG_SRAM_CC_SOC,
+	FG_SRAM_CC_SOC_SW,
+	FG_SRAM_ACT_BATT_CAP,
+	FG_SRAM_TIMEBASE,
+	/* Entries below here are configurable during initialization */
+	FG_SRAM_CUTOFF_VOLT,
+	FG_SRAM_EMPTY_VOLT,
+	FG_SRAM_VBATT_LOW,
+	FG_SRAM_FLOAT_VOLT,
+	FG_SRAM_VBATT_FULL,
+	FG_SRAM_ESR_TIMER_DISCHG_MAX,
+	FG_SRAM_ESR_TIMER_DISCHG_INIT,
+	FG_SRAM_ESR_TIMER_CHG_MAX,
+	FG_SRAM_ESR_TIMER_CHG_INIT,
+	FG_SRAM_ESR_PULSE_THRESH,
+	FG_SRAM_SYS_TERM_CURR,
+	FG_SRAM_CHG_TERM_CURR,
+	FG_SRAM_CHG_TERM_BASE_CURR,
+	FG_SRAM_CUTOFF_CURR,
+	FG_SRAM_DELTA_MSOC_THR,
+	FG_SRAM_DELTA_BSOC_THR,
+	FG_SRAM_RECHARGE_SOC_THR,
+	FG_SRAM_RECHARGE_VBATT_THR,
+	FG_SRAM_KI_COEFF_MED_DISCHG,
+	FG_SRAM_KI_COEFF_HI_DISCHG,
+	FG_SRAM_KI_COEFF_FULL_SOC,
+	FG_SRAM_ESR_TIGHT_FILTER,
+	FG_SRAM_ESR_BROAD_FILTER,
+	FG_SRAM_SLOPE_LIMIT,
+	FG_SRAM_MAX,
+};
+
+struct fg_sram_param {
+	u16 addr_word;
+	int addr_byte;
+	u8  len;
+	int value;
+	int numrtr;
+	int denmtr;
+	int offset;
+	void (*encode)(struct fg_sram_param *sp, enum fg_sram_param_id id,
+		int val, u8 *buf);
+	int (*decode)(struct fg_sram_param *sp, enum fg_sram_param_id id,
+		int val);
+};
+
+enum fg_alg_flag_id {
+	ALG_FLAG_SOC_LT_OTG_MIN = 0,
+	ALG_FLAG_SOC_LT_RECHARGE,
+	ALG_FLAG_IBATT_LT_ITERM,
+	ALG_FLAG_IBATT_GT_HPM,
+	ALG_FLAG_IBATT_GT_UPM,
+	ALG_FLAG_VBATT_LT_RECHARGE,
+	ALG_FLAG_VBATT_GT_VFLOAT,
+	ALG_FLAG_MAX,
+};
+
+struct fg_alg_flag {
+	char	*name;
+	u8	bit;
+	bool	invalid;
+};
+
+enum wa_flags {
+	PMI8998_V1_REV_WA = BIT(0),
+	PM660_TSMC_OSC_WA = BIT(1),
+};
+
+enum slope_limit_status {
+	LOW_TEMP_DISCHARGE = 0,
+	LOW_TEMP_CHARGE,
+	HIGH_TEMP_DISCHARGE,
+	HIGH_TEMP_CHARGE,
+	SLOPE_LIMIT_NUM_COEFFS,
+};
+
+enum esr_filter_status {
+	ROOM_TEMP = 1,
+	LOW_TEMP,
+	RELAX_TEMP,
+};
+
+enum esr_timer_config {
+	TIMER_RETRY = 0,
+	TIMER_MAX,
+	NUM_ESR_TIMERS,
+};
+
+enum ttf_mode {
+	TTF_MODE_NORMAL = 0,
+	TTF_MODE_QNOVO,
+};
+
+/* DT parameters for FG device */
+struct fg_dt_props {
+	bool	force_load_profile;
+	bool	hold_soc_while_full;
+	bool	linearize_soc;
+	bool	auto_recharge_soc;
+	int	cutoff_volt_mv;
+	int	empty_volt_mv;
+	int	vbatt_low_thr_mv;
+	int	chg_term_curr_ma;
+	int	chg_term_base_curr_ma;
+	int	sys_term_curr_ma;
+	int	cutoff_curr_ma;
+	int	delta_soc_thr;
+	int	recharge_soc_thr;
+	int	recharge_volt_thr_mv;
+	int	rsense_sel;
+	int	esr_timer_charging[NUM_ESR_TIMERS];
+	int	esr_timer_awake[NUM_ESR_TIMERS];
+	int	esr_timer_asleep[NUM_ESR_TIMERS];
+	int	rconn_mohms;
+	int	esr_clamp_mohms;
+	int	cl_start_soc;
+	int	cl_max_temp;
+	int	cl_min_temp;
+	int	cl_max_cap_inc;
+	int	cl_max_cap_dec;
+	int	cl_max_cap_limit;
+	int	cl_min_cap_limit;
+	int	jeita_hyst_temp;
+	int	batt_temp_delta;
+	int	esr_flt_switch_temp;
+	int	esr_tight_flt_upct;
+	int	esr_broad_flt_upct;
+	int	esr_tight_lt_flt_upct;
+	int	esr_broad_lt_flt_upct;
+	int	esr_flt_rt_switch_temp;
+	int	esr_tight_rt_flt_upct;
+	int	esr_broad_rt_flt_upct;
+	int	slope_limit_temp;
+	int	esr_pulse_thresh_ma;
+	int	esr_meas_curr_ma;
+	int	ki_coeff_full_soc_dischg;
+	int	jeita_thresholds[NUM_JEITA_LEVELS];
+	int	ki_coeff_soc[KI_COEFF_SOC_LEVELS];
+	int	ki_coeff_med_dischg[KI_COEFF_SOC_LEVELS];
+	int	ki_coeff_hi_dischg[KI_COEFF_SOC_LEVELS];
+	int	slope_limit_coeffs[SLOPE_LIMIT_NUM_COEFFS];
+	u8	batt_therm_coeffs[BATT_THERM_NUM_COEFFS];
+};
+
+/* parameters from battery profile */
+struct fg_batt_props {
+	const char	*batt_type_str;
+	char		*batt_profile;
+	int		float_volt_uv;
+	int		vbatt_full_mv;
+	int		fastchg_curr_ma;
+};
+
+struct fg_cyc_ctr_data {
+	bool		en;
+	bool		started[BUCKET_COUNT];
+	u16		count[BUCKET_COUNT];
+	u8		last_soc[BUCKET_COUNT];
+	int		id;
+	struct mutex	lock;
+};
+
+struct fg_cap_learning {
+	bool		active;
+	int		init_cc_soc_sw;
+	int64_t		nom_cap_uah;
+	int64_t		init_cc_uah;
+	int64_t		final_cc_uah;
+	int64_t		learned_cc_uah;
+	struct mutex	lock;
+};
+
+struct fg_irq_info {
+	const char		*name;
+	const irq_handler_t	handler;
+	bool			wakeable;
+	int			irq;
+};
+
+struct fg_circ_buf {
+	int	arr[10];
+	int	size;
+	int	head;
+};
+
+struct fg_cc_step_data {
+	int arr[MAX_CC_STEPS];
+	int sel;
+};
+
+struct fg_pt {
+	s32 x;
+	s32 y;
+};
+
+struct ttf {
+	struct fg_circ_buf	ibatt;
+	struct fg_circ_buf	vbatt;
+	struct fg_cc_step_data	cc_step;
+	struct mutex		lock;
+	int			mode;
+	int			last_ttf;
+	s64			last_ms;
+};
+
+static const struct fg_pt fg_ln_table[] = {
+	{ 1000,		0 },
+	{ 2000,		693 },
+	{ 4000,		1386 },
+	{ 6000,		1792 },
+	{ 8000,		2079 },
+	{ 16000,	2773 },
+	{ 32000,	3466 },
+	{ 64000,	4159 },
+	{ 128000,	4852 },
+};
+
+/* each tuple is - <temperature in degC, Timebase> */
+static const struct fg_pt fg_tsmc_osc_table[] = {
+	{ -20,		395064 },
+	{ -10,		398114 },
+	{   0,		401669 },
+	{  10,		404641 },
+	{  20,		408856 },
+	{  25,		412449 },
+	{  30,		416532 },
+	{  40,		420289 },
+	{  50,		425020 },
+	{  60,		430160 },
+	{  70,		434175 },
+	{  80,		439475 },
+	{  90,		444992 },
+};
+
+struct fg_chip {
+	struct device		*dev;
+	struct pmic_revid_data	*pmic_rev_id;
+	struct regmap		*regmap;
+	struct dentry		*dfs_root;
+	struct power_supply	*fg_psy;
+	struct power_supply	*batt_psy;
+	struct power_supply	*usb_psy;
+	struct power_supply	*dc_psy;
+	struct power_supply	*parallel_psy;
+	struct power_supply	*pc_port_psy;
+	struct iio_channel	*batt_id_chan;
+	struct iio_channel	*die_temp_chan;
+	struct fg_memif		*sram;
+	struct fg_irq_info	*irqs;
+	struct votable		*awake_votable;
+	struct votable		*delta_bsoc_irq_en_votable;
+	struct votable		*batt_miss_irq_en_votable;
+	struct fg_sram_param	*sp;
+	struct fg_alg_flag	*alg_flags;
+	int			*debug_mask;
+	char			batt_profile[PROFILE_LEN];
+	struct fg_dt_props	dt;
+	struct fg_batt_props	bp;
+	struct fg_cyc_ctr_data	cyc_ctr;
+	struct notifier_block	nb;
+	struct fg_cap_learning  cl;
+	struct ttf		ttf;
+	struct mutex		bus_lock;
+	struct mutex		sram_rw_lock;
+	struct mutex		charge_full_lock;
+	struct mutex		qnovo_esr_ctrl_lock;
+	spinlock_t		suspend_lock;
+	u32			batt_soc_base;
+	u32			batt_info_base;
+	u32			mem_if_base;
+	u32			rradc_base;
+	u32			wa_flags;
+	int			batt_id_ohms;
+	int			ki_coeff_full_soc;
+	int			charge_status;
+	int			prev_charge_status;
+	int			charge_done;
+	int			charge_type;
+	int			online_status;
+	int			last_soc;
+	int			last_batt_temp;
+	int			health;
+	int			maint_soc;
+	int			delta_soc;
+	int			last_msoc;
+	int			last_recharge_volt_mv;
+	int			delta_temp_irq_count;
+	int			esr_timer_charging_default[NUM_ESR_TIMERS];
+	enum slope_limit_status	slope_limit_sts;
+	enum esr_filter_status	esr_flt_sts;
+	bool			profile_available;
+	bool			profile_loaded;
+	bool			battery_missing;
+	bool			fg_restarting;
+	bool			charge_full;
+	bool			recharge_soc_adjusted;
+	bool			ki_coeff_dischg_en;
+	bool			esr_fcc_ctrl_en;
+	bool			soc_reporting_ready;
+	bool			esr_flt_cold_temp_en;
+	bool			slope_limit_en;
+	bool			use_ima_single_mode;
+	bool			qnovo_enable;
+	bool			suspended;
+	struct completion	soc_update;
+	struct completion	soc_ready;
+	struct delayed_work	profile_load_work;
+	struct work_struct	status_change_work;
+	struct delayed_work	ttf_work;
+	struct delayed_work	sram_dump_work;
+	struct work_struct	esr_filter_work;
+	struct alarm		esr_filter_alarm;
+	ktime_t			last_delta_temp_time;
+};
+
+/* Debugfs data structures are below */
+
+/* Log buffer */
+struct fg_log_buffer {
+	size_t		rpos;
+	size_t		wpos;
+	size_t		len;
+	char		data[0];
+};
+
+/* transaction parameters */
+struct fg_trans {
+	struct fg_chip		*chip;
+	struct mutex		fg_dfs_lock; /* Prevent thread concurrency */
+	struct fg_log_buffer	*log;
+	u32			cnt;
+	u16			addr;
+	u32			offset;
+	u8			*data;
+};
+
+struct fg_dbgfs {
+	struct debugfs_blob_wrapper	help_msg;
+	struct fg_chip			*chip;
+	struct dentry			*root;
+	u32				cnt;
+	u32				addr;
+};
+
+extern int fg_sram_write(struct fg_chip *chip, u16 address, u8 offset,
+			u8 *val, int len, int flags);
+extern int fg_sram_read(struct fg_chip *chip, u16 address, u8 offset,
+			u8 *val, int len, int flags);
+extern int fg_sram_masked_write(struct fg_chip *chip, u16 address, u8 offset,
+			u8 mask, u8 val, int flags);
+extern int fg_interleaved_mem_read(struct fg_chip *chip, u16 address,
+			u8 offset, u8 *val, int len);
+extern int fg_interleaved_mem_write(struct fg_chip *chip, u16 address,
+			u8 offset, u8 *val, int len, bool atomic_access);
+extern int fg_read(struct fg_chip *chip, int addr, u8 *val, int len);
+extern int fg_write(struct fg_chip *chip, int addr, u8 *val, int len);
+extern int fg_masked_write(struct fg_chip *chip, int addr, u8 mask, u8 val);
+extern int fg_ima_init(struct fg_chip *chip);
+extern int fg_clear_ima_errors_if_any(struct fg_chip *chip, bool check_hw_sts);
+extern int fg_clear_dma_errors_if_any(struct fg_chip *chip);
+extern int fg_debugfs_create(struct fg_chip *chip);
+extern void fill_string(char *str, size_t str_len, u8 *buf, int buf_len);
+extern void dump_sram(u8 *buf, int addr, int len);
+extern int64_t twos_compliment_extend(int64_t val, int s_bit_pos);
+extern s64 fg_float_decode(u16 val);
+extern bool is_input_present(struct fg_chip *chip);
+extern bool is_qnovo_en(struct fg_chip *chip);
+extern void fg_circ_buf_add(struct fg_circ_buf *, int);
+extern void fg_circ_buf_clr(struct fg_circ_buf *);
+extern int fg_circ_buf_avg(struct fg_circ_buf *, int *);
+extern int fg_circ_buf_median(struct fg_circ_buf *, int *);
+extern int fg_lerp(const struct fg_pt *, size_t, s32, s32 *);
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/power/supply./qcom/fg-memif.c linux-4.4.115-fbx/drivers/power/supply/qcom/fg-memif.c
--- linux-4.4.115-fbx/drivers/power/supply./qcom/fg-memif.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/supply/qcom/fg-memif.c	2019-01-22 16:16:26.227271074 +0100
@@ -0,0 +1,780 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"FG: %s: " fmt, __func__
+
+#include "fg-core.h"
+#include "fg-reg.h"
+
+/* Generic definitions */
+#define RETRY_COUNT		3
+#define BYTES_PER_SRAM_WORD	4
+
+enum {
+	FG_READ = 0,
+	FG_WRITE,
+};
+
+static int fg_set_address(struct fg_chip *chip, u16 address)
+{
+	u8 buffer[2];
+	int rc;
+
+	buffer[0] = address & 0xFF;
+	/* MSB has to be written zero */
+	buffer[1] = 0;
+
+	rc = fg_write(chip, MEM_IF_ADDR_LSB(chip), buffer, 2);
+	if (rc < 0) {
+		pr_err("failed to write to 0x%04X, rc=%d\n",
+			MEM_IF_ADDR_LSB(chip), rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int fg_config_access_mode(struct fg_chip *chip, bool access, bool burst)
+{
+	int rc;
+	u8 intf_ctl = 0;
+
+	fg_dbg(chip, FG_SRAM_READ | FG_SRAM_WRITE, "access: %d burst: %d\n",
+		access, burst);
+
+	WARN_ON(burst && chip->use_ima_single_mode);
+	intf_ctl = ((access == FG_WRITE) ? IMA_WR_EN_BIT : 0) |
+			(burst ? MEM_ACS_BURST_BIT : 0);
+
+	rc = fg_masked_write(chip, MEM_IF_IMA_CTL(chip), IMA_CTL_MASK,
+			intf_ctl);
+	if (rc < 0) {
+		pr_err("failed to write to 0x%04x, rc=%d\n",
+			MEM_IF_IMA_CTL(chip), rc);
+		return -EIO;
+	}
+
+	return rc;
+}
+
+static int fg_run_iacs_clear_sequence(struct fg_chip *chip)
+{
+	u8 val, hw_sts, exp_sts;
+	int rc, tries = 250;
+
+	/*
+	 * Values to write for running IACS clear sequence comes from
+	 * hardware documentation.
+	 */
+	rc = fg_masked_write(chip, MEM_IF_IMA_CFG(chip),
+			IACS_CLR_BIT | STATIC_CLK_EN_BIT,
+			IACS_CLR_BIT | STATIC_CLK_EN_BIT);
+	if (rc < 0) {
+		pr_err("failed to write 0x%04x, rc=%d\n", MEM_IF_IMA_CFG(chip),
+			rc);
+		return rc;
+	}
+
+	rc = fg_config_access_mode(chip, FG_READ, false);
+	if (rc < 0) {
+		pr_err("failed to write to 0x%04x, rc=%d\n",
+			MEM_IF_IMA_CTL(chip), rc);
+		return rc;
+	}
+
+	rc = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip),
+				MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT,
+				MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT);
+	if (rc < 0) {
+		pr_err("failed to set ima_req_access bit rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Delay for the clock to reach FG */
+	usleep_range(35, 40);
+
+	while (1) {
+		val = 0;
+		rc = fg_write(chip, MEM_IF_ADDR_MSB(chip), &val, 1);
+		if (rc < 0) {
+			pr_err("failed to write 0x%04x, rc=%d\n",
+				MEM_IF_ADDR_MSB(chip), rc);
+			return rc;
+		}
+
+		val = 0;
+		rc = fg_write(chip, MEM_IF_WR_DATA3(chip), &val, 1);
+		if (rc < 0) {
+			pr_err("failed to write 0x%04x, rc=%d\n",
+				MEM_IF_WR_DATA3(chip), rc);
+			return rc;
+		}
+
+		rc = fg_read(chip, MEM_IF_RD_DATA3(chip), &val, 1);
+		if (rc < 0) {
+			pr_err("failed to read 0x%04x, rc=%d\n",
+				MEM_IF_RD_DATA3(chip), rc);
+			return rc;
+		}
+
+		/* Delay for IMA hardware to clear */
+		usleep_range(35, 40);
+
+		rc = fg_read(chip, MEM_IF_IMA_HW_STS(chip), &hw_sts, 1);
+		if (rc < 0) {
+			pr_err("failed to read ima_hw_sts rc=%d\n", rc);
+			return rc;
+		}
+
+		if (hw_sts != 0)
+			continue;
+
+		rc = fg_read(chip, MEM_IF_IMA_EXP_STS(chip), &exp_sts, 1);
+		if (rc < 0) {
+			pr_err("failed to read ima_exp_sts rc=%d\n", rc);
+			return rc;
+		}
+
+		if (exp_sts == 0 || !(--tries))
+			break;
+	}
+
+	if (!tries)
+		pr_err("Failed to clear the error? hw_sts: %x exp_sts: %d\n",
+			hw_sts, exp_sts);
+
+	rc = fg_masked_write(chip, MEM_IF_IMA_CFG(chip), IACS_CLR_BIT, 0);
+	if (rc < 0) {
+		pr_err("failed to write 0x%04x, rc=%d\n", MEM_IF_IMA_CFG(chip),
+			rc);
+		return rc;
+	}
+
+	udelay(5);
+
+	rc = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip),
+				MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT, 0);
+	if (rc < 0) {
+		pr_err("failed to write to 0x%04x, rc=%d\n",
+			MEM_IF_MEM_INTF_CFG(chip), rc);
+		return rc;
+	}
+
+	/* Delay before next transaction is attempted */
+	usleep_range(35, 40);
+	fg_dbg(chip, FG_SRAM_READ | FG_SRAM_WRITE, "IACS clear sequence complete\n");
+	return rc;
+}
+
+int fg_clear_dma_errors_if_any(struct fg_chip *chip)
+{
+	int rc;
+	u8 dma_sts;
+	bool error_present;
+
+	rc = fg_read(chip, MEM_IF_DMA_STS(chip), &dma_sts, 1);
+	if (rc < 0) {
+		pr_err("failed to read addr=0x%04x, rc=%d\n",
+			MEM_IF_DMA_STS(chip), rc);
+		return rc;
+	}
+	fg_dbg(chip, FG_STATUS, "dma_sts: %x\n", dma_sts);
+
+	error_present = dma_sts & (DMA_WRITE_ERROR_BIT | DMA_READ_ERROR_BIT);
+	rc = fg_masked_write(chip, MEM_IF_DMA_CTL(chip), DMA_CLEAR_LOG_BIT,
+			error_present ? DMA_CLEAR_LOG_BIT : 0);
+	if (rc < 0) {
+		pr_err("failed to write addr=0x%04x, rc=%d\n",
+			MEM_IF_DMA_CTL(chip), rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+int fg_clear_ima_errors_if_any(struct fg_chip *chip, bool check_hw_sts)
+{
+	int rc = 0;
+	u8 err_sts, exp_sts = 0, hw_sts = 0;
+	bool run_err_clr_seq = false;
+
+	rc = fg_read(chip, MEM_IF_IMA_EXP_STS(chip), &exp_sts, 1);
+	if (rc < 0) {
+		pr_err("failed to read ima_exp_sts rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_read(chip, MEM_IF_IMA_HW_STS(chip), &hw_sts, 1);
+	if (rc < 0) {
+		pr_err("failed to read ima_hw_sts rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_read(chip, MEM_IF_IMA_ERR_STS(chip), &err_sts, 1);
+	if (rc < 0) {
+		pr_err("failed to read ima_err_sts rc=%d\n", rc);
+		return rc;
+	}
+
+	fg_dbg(chip, FG_SRAM_READ | FG_SRAM_WRITE, "ima_err_sts=%x ima_exp_sts=%x ima_hw_sts=%x\n",
+		err_sts, exp_sts, hw_sts);
+
+	if (check_hw_sts) {
+		/*
+		 * Lower nibble should be equal to upper nibble before SRAM
+		 * transactions begins from SW side. If they are unequal, then
+		 * the error clear sequence should be run irrespective of IMA
+		 * exception errors.
+		 */
+		if ((hw_sts & 0x0F) != hw_sts >> 4) {
+			pr_err("IMA HW not in correct state, hw_sts=%x\n",
+				hw_sts);
+			run_err_clr_seq = true;
+		}
+	}
+
+	if (exp_sts & (IACS_ERR_BIT | XCT_TYPE_ERR_BIT | DATA_RD_ERR_BIT |
+		DATA_WR_ERR_BIT | ADDR_BURST_WRAP_BIT | ADDR_STABLE_ERR_BIT)) {
+		pr_err("IMA exception bit set, exp_sts=%x\n", exp_sts);
+		run_err_clr_seq = true;
+	}
+
+	if (run_err_clr_seq) {
+		/* clear the error */
+		rc = fg_run_iacs_clear_sequence(chip);
+		if (rc < 0) {
+			pr_err("failed to run iacs clear sequence rc=%d\n", rc);
+			return rc;
+		}
+
+		/* Retry again as there was an error in the transaction */
+		return -EAGAIN;
+	}
+
+	return rc;
+}
+
+static int fg_check_iacs_ready(struct fg_chip *chip)
+{
+	int rc = 0, tries = 250;
+	u8 ima_opr_sts = 0;
+
+	/*
+	 * Additional delay to make sure IACS ready bit is set after
+	 * Read/Write operation.
+	 */
+
+	usleep_range(30, 35);
+	while (1) {
+		rc = fg_read(chip, MEM_IF_IMA_OPR_STS(chip), &ima_opr_sts, 1);
+		if (rc < 0) {
+			pr_err("failed to read 0x%04x, rc=%d\n",
+				MEM_IF_IMA_OPR_STS(chip), rc);
+			return rc;
+		}
+
+		if (ima_opr_sts & IACS_RDY_BIT)
+			break;
+
+		if (!(--tries))
+			break;
+
+		/* delay for iacs_ready to be asserted */
+		usleep_range(5000, 7000);
+	}
+
+	if (!tries) {
+		pr_err("IACS_RDY not set, opr_sts: %d\n", ima_opr_sts);
+		/* check for error condition */
+		rc = fg_clear_ima_errors_if_any(chip, false);
+		if (rc < 0) {
+			if (rc != -EAGAIN)
+				pr_err("Failed to check for ima errors rc=%d\n",
+					rc);
+			return rc;
+		}
+
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int __fg_interleaved_mem_write(struct fg_chip *chip, u16 address,
+				int offset, u8 *val, int len)
+{
+	int rc = 0, i;
+	u8 *ptr = val, byte_enable = 0, num_bytes = 0;
+
+	fg_dbg(chip, FG_SRAM_WRITE, "length %d addr=%02X offset=%d\n", len,
+		address, offset);
+
+	while (len > 0) {
+		num_bytes = (offset + len) > BYTES_PER_SRAM_WORD ?
+				(BYTES_PER_SRAM_WORD - offset) : len;
+
+		/* write to byte_enable */
+		for (i = offset; i < (offset + num_bytes); i++)
+			byte_enable |= BIT(i);
+
+		rc = fg_write(chip, MEM_IF_IMA_BYTE_EN(chip), &byte_enable, 1);
+		if (rc < 0) {
+			pr_err("Unable to write to byte_en_reg rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		/* write data */
+		rc = fg_write(chip, MEM_IF_WR_DATA0(chip) + offset, ptr,
+				num_bytes);
+		if (rc < 0) {
+			pr_err("failed to write to 0x%04x, rc=%d\n",
+				MEM_IF_WR_DATA0(chip) + offset, rc);
+			return rc;
+		}
+
+		/*
+		 * The last-byte WR_DATA3 starts the write transaction.
+		 * Write a dummy value to WR_DATA3 if it does not have
+		 * valid data. This dummy data is not written to the
+		 * SRAM as byte_en for WR_DATA3 is not set.
+		 */
+		if (!(byte_enable & BIT(3))) {
+			u8 dummy_byte = 0x0;
+
+			rc = fg_write(chip, MEM_IF_WR_DATA3(chip), &dummy_byte,
+					1);
+			if (rc < 0) {
+				pr_err("failed to write dummy-data to WR_DATA3 rc=%d\n",
+					rc);
+				return rc;
+			}
+		}
+
+		/* check for error condition */
+		rc = fg_clear_ima_errors_if_any(chip, false);
+		if (rc < 0) {
+			if (rc == -EAGAIN)
+				pr_err("IMA error cleared, address [%d %d] len %d\n",
+					address, offset, len);
+			else
+				pr_err("Failed to check for ima errors rc=%d\n",
+					rc);
+			return rc;
+		}
+
+		ptr += num_bytes;
+		len -= num_bytes;
+		offset = byte_enable = 0;
+
+		if (chip->use_ima_single_mode && len) {
+			address++;
+			rc = fg_set_address(chip, address);
+			if (rc < 0) {
+				pr_err("failed to set address rc = %d\n", rc);
+				return rc;
+			}
+		}
+
+		rc = fg_check_iacs_ready(chip);
+		if (rc < 0) {
+			pr_debug("IACS_RDY failed rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int __fg_interleaved_mem_read(struct fg_chip *chip, u16 address,
+				int offset, u8 *val, int len)
+{
+	int rc = 0, total_len;
+	u8 *rd_data = val, num_bytes;
+	char str[DEBUG_PRINT_BUFFER_SIZE];
+
+	fg_dbg(chip, FG_SRAM_READ, "length %d addr=%02X\n", len, address);
+
+	total_len = len;
+	while (len > 0) {
+		num_bytes = (offset + len) > BYTES_PER_SRAM_WORD ?
+				(BYTES_PER_SRAM_WORD - offset) : len;
+		rc = fg_read(chip, MEM_IF_RD_DATA0(chip) + offset, rd_data,
+				num_bytes);
+		if (rc < 0) {
+			pr_err("failed to read 0x%04x, rc=%d\n",
+				MEM_IF_RD_DATA0(chip) + offset, rc);
+			return rc;
+		}
+
+		rd_data += num_bytes;
+		len -= num_bytes;
+		offset = 0;
+
+		/* check for error condition */
+		rc = fg_clear_ima_errors_if_any(chip, false);
+		if (rc < 0) {
+			if (rc == -EAGAIN)
+				pr_err("IMA error cleared, address [%d %d] len %d\n",
+					address, offset, len);
+			else
+				pr_err("Failed to check for ima errors rc=%d\n",
+					rc);
+			return rc;
+		}
+
+		if (chip->use_ima_single_mode) {
+			if (len) {
+				address++;
+				rc = fg_set_address(chip, address);
+				if (rc < 0) {
+					pr_err("failed to set address rc = %d\n",
+						rc);
+					return rc;
+				}
+			}
+		} else {
+			if (len && len < BYTES_PER_SRAM_WORD) {
+				/*
+				 * Move to single mode. Changing address is not
+				 * required here as it must be in burst mode.
+				 * Address will get incremented internally by FG
+				 * HW once the MSB of RD_DATA is read.
+				 */
+				rc = fg_config_access_mode(chip, FG_READ,
+								false);
+				if (rc < 0) {
+					pr_err("failed to move to single mode rc=%d\n",
+						rc);
+					return -EIO;
+				}
+			}
+		}
+
+		rc = fg_check_iacs_ready(chip);
+		if (rc < 0) {
+			pr_debug("IACS_RDY failed rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	if (*chip->debug_mask & FG_SRAM_READ) {
+		fill_string(str, DEBUG_PRINT_BUFFER_SIZE, val, total_len);
+		pr_info("data read: %s\n", str);
+	}
+
+	return rc;
+}
+
+static int fg_get_mem_access_status(struct fg_chip *chip, bool *status)
+{
+	int rc;
+	u8 mem_if_sts;
+
+	rc = fg_read(chip, MEM_IF_MEM_INTF_CFG(chip), &mem_if_sts, 1);
+	if (rc < 0) {
+		pr_err("failed to read rif_mem status rc=%d\n", rc);
+		return rc;
+	}
+
+	*status = mem_if_sts & MEM_ACCESS_REQ_BIT;
+	return 0;
+}
+
+static bool is_mem_access_available(struct fg_chip *chip, int access)
+{
+	bool rif_mem_sts = true;
+	int rc, time_count = 0;
+
+	while (1) {
+		rc = fg_get_mem_access_status(chip, &rif_mem_sts);
+		if (rc < 0)
+			return rc;
+
+		/* This is an inverting logic */
+		if (!rif_mem_sts)
+			break;
+
+		fg_dbg(chip, FG_SRAM_READ | FG_SRAM_WRITE, "MEM_ACCESS_REQ is not clear yet for IMA_%s\n",
+			access ? "write" : "read");
+
+		/*
+		 * Try this no more than 4 times. If MEM_ACCESS_REQ is not
+		 * clear, then return an error instead of waiting for it again.
+		 */
+		if  (time_count > 4) {
+			pr_err("Tried 4 times(~16ms) polling MEM_ACCESS_REQ\n");
+			return false;
+		}
+
+		/* Wait for 4ms before reading MEM_ACCESS_REQ again */
+		usleep_range(4000, 4100);
+		time_count++;
+	}
+	return true;
+}
+
+static int fg_interleaved_mem_config(struct fg_chip *chip, u8 *val,
+		u16 address, int offset, int len, bool access)
+{
+	int rc = 0;
+	bool burst_mode = false;
+
+	if (!is_mem_access_available(chip, access))
+		return -EBUSY;
+
+	/* configure for IMA access */
+	rc = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip),
+				MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT,
+				MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT);
+	if (rc < 0) {
+		pr_err("failed to set ima_req_access bit rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure for the read/write, single/burst mode */
+	burst_mode = chip->use_ima_single_mode ? false : ((offset + len) > 4);
+	rc = fg_config_access_mode(chip, access, burst_mode);
+	if (rc < 0) {
+		pr_err("failed to set memory access rc = %d\n", rc);
+		return rc;
+	}
+
+	rc = fg_check_iacs_ready(chip);
+	if (rc < 0) {
+		pr_err_ratelimited("IACS_RDY failed rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_set_address(chip, address);
+	if (rc < 0) {
+		pr_err("failed to set address rc = %d\n", rc);
+		return rc;
+	}
+
+	if (access == FG_READ) {
+		rc = fg_check_iacs_ready(chip);
+		if (rc < 0) {
+			pr_debug("IACS_RDY failed rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int fg_get_beat_count(struct fg_chip *chip, u8 *count)
+{
+	int rc;
+
+	rc = fg_read(chip, MEM_IF_FG_BEAT_COUNT(chip), count, 1);
+	*count &= BEAT_COUNT_MASK;
+	return rc;
+}
+
+int fg_interleaved_mem_read(struct fg_chip *chip, u16 address, u8 offset,
+				u8 *val, int len)
+{
+	int rc = 0, ret;
+	u8 start_beat_count, end_beat_count, count = 0;
+	bool retry = false;
+
+	if (offset > 3) {
+		pr_err("offset too large %d\n", offset);
+		return -EINVAL;
+	}
+
+retry:
+	if (count >= RETRY_COUNT) {
+		pr_err("Tried %d times\n", RETRY_COUNT);
+		retry = false;
+		goto out;
+	}
+
+	rc = fg_interleaved_mem_config(chip, val, address, offset, len,
+					FG_READ);
+	if (rc < 0) {
+		pr_err("failed to configure SRAM for IMA rc = %d\n", rc);
+		count++;
+		retry = true;
+		goto out;
+	}
+
+	/* read the start beat count */
+	rc = fg_get_beat_count(chip, &start_beat_count);
+	if (rc < 0) {
+		pr_err("failed to read beat count rc=%d\n", rc);
+		count++;
+		retry = true;
+		goto out;
+	}
+
+	/* read data */
+	rc = __fg_interleaved_mem_read(chip, address, offset, val, len);
+	if (rc < 0) {
+		count++;
+		if (rc == -EAGAIN) {
+			pr_err("IMA read failed retry_count = %d\n", count);
+			goto retry;
+		}
+		pr_err("failed to read SRAM address rc = %d\n", rc);
+		retry = true;
+		goto out;
+	}
+
+	/* read the end beat count */
+	rc = fg_get_beat_count(chip, &end_beat_count);
+	if (rc < 0) {
+		pr_err("failed to read beat count rc=%d\n", rc);
+		count++;
+		retry = true;
+		goto out;
+	}
+
+	fg_dbg(chip, FG_SRAM_READ, "Start beat_count = %x End beat_count = %x\n",
+		start_beat_count, end_beat_count);
+
+	if (start_beat_count != end_beat_count) {
+		fg_dbg(chip, FG_SRAM_READ, "Beat count(%d/%d) do not match - retry transaction\n",
+			start_beat_count, end_beat_count);
+		count++;
+		retry = true;
+	}
+out:
+	/* Release IMA access */
+	ret = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip),
+				MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT, 0);
+	if (rc < 0 && ret < 0) {
+		pr_err("failed to reset IMA access bit ret = %d\n", ret);
+		return ret;
+	}
+
+	if (retry) {
+		retry = false;
+		goto retry;
+	}
+
+	return rc;
+}
+
+int fg_interleaved_mem_write(struct fg_chip *chip, u16 address, u8 offset,
+				u8 *val, int len, bool atomic_access)
+{
+	int rc = 0, ret;
+	u8 start_beat_count, end_beat_count, count = 0;
+	bool retry = false;
+
+	if (offset > 3) {
+		pr_err("offset too large %d\n", offset);
+		return -EINVAL;
+	}
+
+retry:
+	if (count >= RETRY_COUNT) {
+		pr_err("Tried %d times\n", RETRY_COUNT);
+		retry = false;
+		goto out;
+	}
+
+	rc = fg_interleaved_mem_config(chip, val, address, offset, len,
+					FG_WRITE);
+	if (rc < 0) {
+		pr_err("failed to configure SRAM for IMA rc = %d\n", rc);
+		count++;
+		retry = true;
+		goto out;
+	}
+
+	/* read the start beat count */
+	rc = fg_get_beat_count(chip, &start_beat_count);
+	if (rc < 0) {
+		pr_err("failed to read beat count rc=%d\n", rc);
+		count++;
+		retry = true;
+		goto out;
+	}
+
+	/* write data */
+	rc = __fg_interleaved_mem_write(chip, address, offset, val, len);
+	if (rc < 0) {
+		count++;
+		if (rc == -EAGAIN) {
+			pr_err("IMA write failed retry_count = %d\n", count);
+			goto retry;
+		}
+		pr_err("failed to write SRAM address rc = %d\n", rc);
+		retry = true;
+		goto out;
+	}
+
+	/* read the end beat count */
+	rc = fg_get_beat_count(chip, &end_beat_count);
+	if (rc < 0) {
+		pr_err("failed to read beat count rc=%d\n", rc);
+		count++;
+		retry = true;
+		goto out;
+	}
+
+	if (atomic_access && start_beat_count != end_beat_count)
+		pr_err("Start beat_count = %x End beat_count = %x\n",
+			start_beat_count, end_beat_count);
+out:
+	/* Release IMA access */
+	ret = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip),
+				MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT, 0);
+	if (rc < 0 && ret < 0) {
+		pr_err("failed to reset IMA access bit ret = %d\n", ret);
+		return ret;
+	}
+
+	if (retry) {
+		retry = false;
+		goto retry;
+	}
+
+	/* Return the error we got before releasing memory access */
+	return rc;
+}
+
+int fg_ima_init(struct fg_chip *chip)
+{
+	int rc;
+
+	/*
+	 * Change the FG_MEM_INT interrupt to track IACS_READY
+	 * condition instead of end-of-transaction. This makes sure
+	 * that the next transaction starts only after the hw is ready.
+	 */
+	rc = fg_masked_write(chip, MEM_IF_IMA_CFG(chip), IACS_INTR_SRC_SLCT_BIT,
+				IACS_INTR_SRC_SLCT_BIT);
+	if (rc < 0) {
+		pr_err("failed to configure interrupt source %d\n", rc);
+		return rc;
+	}
+
+	/* Clear DMA errors if any before clearing IMA errors */
+	rc = fg_clear_dma_errors_if_any(chip);
+	if (rc < 0) {
+		pr_err("Error in checking DMA errors rc:%d\n", rc);
+		return rc;
+	}
+
+	/* Clear IMA errors if any before SRAM transactions can begin */
+	rc = fg_clear_ima_errors_if_any(chip, true);
+	if (rc < 0 && rc != -EAGAIN) {
+		pr_err("Error in checking IMA errors rc:%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
diff -Nruw linux-4.4.115-fbx/drivers/power/supply./qcom/fg-reg.h linux-4.4.115-fbx/drivers/power/supply/qcom/fg-reg.h
--- linux-4.4.115-fbx/drivers/power/supply./qcom/fg-reg.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/supply/qcom/fg-reg.h	2019-01-22 16:16:26.227271074 +0100
@@ -0,0 +1,329 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __FG_REG_H__
+#define __FG_REG_H__
+
+/* FG_ADC_RR register definitions used only for READ */
+#define ADC_RR_FAKE_BATT_LOW_LSB(chip)		(chip->rradc_base + 0x58)
+#define ADC_RR_FAKE_BATT_HIGH_LSB(chip)		(chip->rradc_base + 0x5A)
+
+/* FG_BATT_SOC register definitions */
+#define BATT_SOC_FG_ALG_STS(chip)		(chip->batt_soc_base + 0x06)
+#define BATT_SOC_FG_ALG_AUX_STS0(chip)		(chip->batt_soc_base + 0x07)
+#define BATT_SOC_SLEEP_SHUTDOWN_STS(chip)	(chip->batt_soc_base + 0x08)
+#define BATT_SOC_FG_MONOTONIC_SOC(chip)		(chip->batt_soc_base + 0x09)
+#define BATT_SOC_FG_MONOTONIC_SOC_CP(chip)	(chip->batt_soc_base + 0x0A)
+#define BATT_SOC_INT_RT_STS(chip)		(chip->batt_soc_base + 0x10)
+#define BATT_SOC_EN_CTL(chip)			(chip->batt_soc_base + 0x46)
+#define BATT_SOC_RESTART(chip)			(chip->batt_soc_base + 0x48)
+#define BATT_SOC_STS_CLR(chip)			(chip->batt_soc_base + 0x4A)
+#define BATT_SOC_LOW_PWR_CFG(chip)		(chip->batt_soc_base + 0x52)
+#define BATT_SOC_LOW_PWR_STS(chip)		(chip->batt_soc_base + 0x56)
+
+/* BATT_SOC_INT_RT_STS */
+#define MSOC_EMPTY_BIT				BIT(5)
+
+/* BATT_SOC_EN_CTL */
+#define FG_ALGORITHM_EN_BIT			BIT(7)
+
+/* BATT_SOC_RESTART */
+#define RESTART_GO_BIT				BIT(0)
+
+/* FG_BATT_INFO register definitions */
+#define BATT_INFO_BATT_TEMP_STS(chip)		(chip->batt_info_base + 0x06)
+#define BATT_INFO_SYS_BATT(chip)		(chip->batt_info_base + 0x07)
+#define BATT_INFO_FG_STS(chip)			(chip->batt_info_base + 0x09)
+#define BATT_INFO_INT_RT_STS(chip)		(chip->batt_info_base + 0x10)
+#define BATT_INFO_BATT_REM_LATCH(chip)		(chip->batt_info_base + 0x4F)
+#define BATT_INFO_BATT_TEMP_LSB(chip)		(chip->batt_info_base + 0x50)
+#define BATT_INFO_BATT_TEMP_MSB(chip)		(chip->batt_info_base + 0x51)
+#define BATT_INFO_BATT_TEMP_CFG(chip)		(chip->batt_info_base + 0x56)
+#define BATT_INFO_BATT_TMPR_INTR(chip)		(chip->batt_info_base + 0x59)
+#define BATT_INFO_THERM_C1(chip)		(chip->batt_info_base + 0x5C)
+#define BATT_INFO_THERM_C2(chip)		(chip->batt_info_base + 0x5D)
+#define BATT_INFO_THERM_C3(chip)		(chip->batt_info_base + 0x5E)
+#define BATT_INFO_THERM_HALF_RANGE(chip)	(chip->batt_info_base + 0x5F)
+#define BATT_INFO_JEITA_CTLS(chip)		(chip->batt_info_base + 0x61)
+#define BATT_INFO_JEITA_TOO_COLD(chip)		(chip->batt_info_base + 0x62)
+#define BATT_INFO_JEITA_COLD(chip)		(chip->batt_info_base + 0x63)
+#define BATT_INFO_JEITA_HOT(chip)		(chip->batt_info_base + 0x64)
+#define BATT_INFO_JEITA_TOO_HOT(chip)		(chip->batt_info_base + 0x65)
+
+/* only for v1.1 */
+#define BATT_INFO_ESR_CFG(chip)			(chip->batt_info_base + 0x69)
+/* starting from v2.0 */
+#define BATT_INFO_ESR_GENERAL_CFG(chip)		(chip->batt_info_base + 0x68)
+#define BATT_INFO_ESR_PULL_DN_CFG(chip)		(chip->batt_info_base + 0x69)
+#define BATT_INFO_ESR_FAST_CRG_CFG(chip)	(chip->batt_info_base + 0x6A)
+
+#define BATT_INFO_BATT_MISS_CFG(chip)		(chip->batt_info_base + 0x6B)
+#define BATT_INFO_WATCHDOG_COUNT(chip)		(chip->batt_info_base + 0x70)
+#define BATT_INFO_WATCHDOG_CFG(chip)		(chip->batt_info_base + 0x71)
+#define BATT_INFO_IBATT_SENSING_CFG(chip)	(chip->batt_info_base + 0x73)
+#define BATT_INFO_QNOVO_CFG(chip)		(chip->batt_info_base + 0x74)
+#define BATT_INFO_QNOVO_SCALER(chip)		(chip->batt_info_base + 0x75)
+
+/* starting from v2.0 */
+#define BATT_INFO_CRG_SERVICES(chip)		(chip->batt_info_base + 0x90)
+
+/* Following LSB/MSB address are for v2.0 and above; v1.1 have them swapped */
+#define BATT_INFO_VBATT_LSB(chip)		(chip->batt_info_base + 0xA0)
+#define BATT_INFO_VBATT_MSB(chip)		(chip->batt_info_base + 0xA1)
+#define BATT_INFO_IBATT_LSB(chip)		(chip->batt_info_base + 0xA2)
+#define BATT_INFO_IBATT_MSB(chip)		(chip->batt_info_base + 0xA3)
+#define BATT_INFO_ESR_LSB(chip)			(chip->batt_info_base + 0xA4)
+#define BATT_INFO_ESR_MSB(chip)			(chip->batt_info_base + 0xA5)
+#define BATT_INFO_VBATT_LSB_CP(chip)		(chip->batt_info_base + 0xA6)
+#define BATT_INFO_VBATT_MSB_CP(chip)		(chip->batt_info_base + 0xA7)
+#define BATT_INFO_IBATT_LSB_CP(chip)		(chip->batt_info_base + 0xA8)
+#define BATT_INFO_IBATT_MSB_CP(chip)		(chip->batt_info_base + 0xA9)
+#define BATT_INFO_ESR_LSB_CP(chip)		(chip->batt_info_base + 0xAA)
+#define BATT_INFO_ESR_MSB_CP(chip)		(chip->batt_info_base + 0xAB)
+#define BATT_INFO_VADC_LSB(chip)		(chip->batt_info_base + 0xAC)
+#define BATT_INFO_VADC_MSB(chip)		(chip->batt_info_base + 0xAD)
+#define BATT_INFO_IADC_LSB(chip)		(chip->batt_info_base + 0xAE)
+#define BATT_INFO_IADC_MSB(chip)		(chip->batt_info_base + 0xAF)
+#define BATT_INFO_TM_MISC(chip)			(chip->batt_info_base + 0xE5)
+#define BATT_INFO_TM_MISC1(chip)		(chip->batt_info_base + 0xE6)
+
+/* BATT_INFO_BATT_TEMP_STS */
+#define JEITA_TOO_HOT_STS_BIT			BIT(7)
+#define JEITA_HOT_STS_BIT			BIT(6)
+#define JEITA_COLD_STS_BIT			BIT(5)
+#define JEITA_TOO_COLD_STS_BIT			BIT(4)
+#define BATT_TEMP_DELTA_BIT			BIT(1)
+#define BATT_TEMP_AVAIL_BIT			BIT(0)
+
+/* BATT_INFO_SYS_BATT */
+#define BATT_REM_LATCH_STS_BIT			BIT(4)
+#define BATT_MISSING_HW_BIT			BIT(2)
+#define BATT_MISSING_ALG_BIT			BIT(1)
+#define BATT_MISSING_CMP_BIT			BIT(0)
+
+/* BATT_INFO_FG_STS */
+#define FG_WD_RESET_BIT				BIT(7)
+/* This bit is not present in v1.1 */
+#define FG_CRG_TRM_BIT				BIT(0)
+
+/* BATT_INFO_INT_RT_STS */
+#define BT_TMPR_DELTA_BIT			BIT(6)
+#define WDOG_EXP_BIT				BIT(5)
+#define BT_ATTN_BIT				BIT(4)
+#define BT_MISS_BIT				BIT(3)
+#define ESR_DELTA_BIT				BIT(2)
+#define VBT_LOW_BIT				BIT(1)
+#define VBT_PRD_DELTA_BIT			BIT(0)
+
+/* BATT_INFO_INT_RT_STS */
+#define BATT_REM_LATCH_CLR_BIT			BIT(7)
+
+/* BATT_INFO_BATT_TEMP_LSB/MSB */
+#define BATT_TEMP_LSB_MASK			GENMASK(7, 0)
+#define BATT_TEMP_MSB_MASK			GENMASK(2, 0)
+
+/* BATT_INFO_BATT_TEMP_CFG */
+#define JEITA_TEMP_HYST_MASK			GENMASK(5, 4)
+#define JEITA_TEMP_HYST_SHIFT			4
+#define JEITA_TEMP_NO_HYST			0x0
+#define JEITA_TEMP_HYST_1C			0x1
+#define JEITA_TEMP_HYST_2C			0x2
+#define JEITA_TEMP_HYST_3C			0x3
+
+/* BATT_INFO_BATT_TMPR_INTR */
+#define CHANGE_THOLD_MASK			GENMASK(1, 0)
+#define BTEMP_DELTA_2K				0x0
+#define BTEMP_DELTA_4K				0x1
+#define BTEMP_DELTA_6K				0x2
+#define BTEMP_DELTA_10K				0x3
+
+/* BATT_INFO_THERM_C1/C2/C3 */
+#define BATT_INFO_THERM_COEFF_MASK		GENMASK(7, 0)
+
+/* BATT_INFO_THERM_HALF_RANGE */
+#define BATT_INFO_THERM_TEMP_MASK		GENMASK(7, 0)
+
+/* BATT_INFO_JEITA_CTLS */
+#define JEITA_STS_CLEAR_BIT			BIT(0)
+
+/* BATT_INFO_JEITA_TOO_COLD/COLD/HOT/TOO_HOT */
+#define JEITA_THOLD_MASK			GENMASK(7, 0)
+
+/* BATT_INFO_ESR_CFG */
+#define CFG_ACTIVE_PD_MASK			GENMASK(2, 1)
+#define CFG_FCC_DEC_MASK			GENMASK(4, 3)
+
+/* BATT_INFO_ESR_GENERAL_CFG */
+#define ESR_DEEP_TAPER_EN_BIT			BIT(0)
+
+/* BATT_INFO_ESR_PULL_DN_CFG */
+#define ESR_PULL_DOWN_IVAL_MASK			GENMASK(3, 2)
+#define ESR_PULL_DOWN_IVAL_SHIFT		2
+#define ESR_MEAS_CUR_60MA			0x0
+#define ESR_MEAS_CUR_120MA			0x1
+#define ESR_MEAS_CUR_180MA			0x2
+#define ESR_MEAS_CUR_240MA			0x3
+#define ESR_PULL_DOWN_MODE_MASK			GENMASK(1, 0)
+#define ESR_NO_PULL_DOWN			0x0
+#define ESR_STATIC_PULL_DOWN			0x1
+#define ESR_CRG_DSC_PULL_DOWN			0x2
+#define ESR_DSC_PULL_DOWN			0x3
+
+/* BATT_INFO_ESR_FAST_CRG_CFG */
+#define ESR_FAST_CRG_IVAL_MASK			GENMASK(3, 1)
+#define ESR_FCC_300MA				0x0
+#define ESR_FCC_600MA				0x1
+#define ESR_FCC_1A				0x2
+#define ESR_FCC_2A				0x3
+#define ESR_FCC_3A				0x4
+#define ESR_FCC_4A				0x5
+#define ESR_FCC_5A				0x6
+#define ESR_FCC_6A				0x7
+#define ESR_FAST_CRG_CTL_EN_BIT			BIT(0)
+
+/* BATT_INFO_BATT_MISS_CFG */
+#define BM_THERM_TH_MASK			GENMASK(5, 4)
+#define RES_TH_0P75_MOHM			0x0
+#define RES_TH_1P00_MOHM			0x1
+#define RES_TH_1P50_MOHM			0x2
+#define RES_TH_3P00_MOHM			0x3
+#define BM_BATT_ID_TH_MASK			GENMASK(3, 2)
+#define BM_FROM_THERM_BIT			BIT(1)
+#define BM_FROM_BATT_ID_BIT			BIT(0)
+
+/* BATT_INFO_WATCHDOG_COUNT */
+#define WATCHDOG_COUNTER			GENMASK(7, 0)
+
+/* BATT_INFO_WATCHDOG_CFG */
+#define RESET_CAPABLE_BIT			BIT(2)
+#define PET_CTRL_BIT				BIT(1)
+#define ENABLE_CTRL_BIT				BIT(0)
+
+/* BATT_INFO_IBATT_SENSING_CFG */
+#define ADC_BITSTREAM_INV_BIT			BIT(4)
+#define SOURCE_SELECT_MASK			GENMASK(1, 0)
+#define SRC_SEL_BATFET				0x0
+#define SRC_SEL_BATFET_SMB			0x2
+#define SRC_SEL_RESERVED			0x3
+
+/* BATT_INFO_QNOVO_CFG */
+#define LD_REG_FORCE_CTL_BIT			BIT(2)
+#define LD_REG_CTRL_FORCE_HIGH			LD_REG_FORCE_CTL_BIT
+#define LD_REG_CTRL_FORCE_LOW			0
+#define LD_REG_CTRL_BIT				BIT(1)
+#define LD_REG_CTRL_REGISTER			LD_REG_CTRL_BIT
+#define LD_REG_CTRL_LOGIC			0
+#define BIT_STREAM_CFG_BIT			BIT(0)
+
+/* BATT_INFO_QNOVO_SCALER */
+#define QNOVO_SCALER_MASK			GENMASK(7, 0)
+
+/* BATT_INFO_CRG_SERVICES */
+#define FG_CRC_TRM_EN_BIT			BIT(0)
+
+/* BATT_INFO_VBATT_LSB/MSB */
+#define VBATT_MASK				GENMASK(7, 0)
+
+/* BATT_INFO_IBATT_LSB/MSB */
+#define IBATT_MASK				GENMASK(7, 0)
+
+/* BATT_INFO_ESR_LSB/MSB */
+#define ESR_LSB_MASK				GENMASK(7, 0)
+#define ESR_MSB_MASK				GENMASK(5, 0)
+
+/* BATT_INFO_VADC_LSB/MSB */
+#define VADC_LSB_MASK				GENMASK(7, 0)
+#define VADC_MSB_MASK				GENMASK(6, 0)
+
+/* BATT_INFO_IADC_LSB/MSB */
+#define IADC_LSB_MASK				GENMASK(7, 0)
+#define IADC_MSB_MASK				GENMASK(6, 0)
+
+/* BATT_INFO_TM_MISC */
+#define FORCE_SEQ_RESP_TOGGLE_BIT		BIT(6)
+#define ALG_DIRECT_VALID_DATA_BIT		BIT(5)
+#define ALG_DIRECT_MODE_EN_BIT			BIT(4)
+#define BATT_VADC_CONV_BIT			BIT(3)
+#define BATT_IADC_CONV_BIT			BIT(2)
+#define ADC_ENABLE_REG_CTRL_BIT			BIT(1)
+#define WDOG_FORCE_EXP_BIT			BIT(0)
+/* only for v1.1 */
+#define ESR_PULSE_FORCE_CTRL_BIT		BIT(7)
+
+/* BATT_INFO_TM_MISC1 */
+/* for v2.0 and above */
+#define ESR_REQ_CTL_BIT				BIT(1)
+#define ESR_REQ_CTL_EN_BIT			BIT(0)
+
+/* FG_MEM_IF register and bit definitions */
+#define MEM_IF_INT_RT_STS(chip)			((chip->mem_if_base) + 0x10)
+#define MEM_IF_MEM_INTF_CFG(chip)		((chip->mem_if_base) + 0x50)
+#define MEM_IF_IMA_CTL(chip)			((chip->mem_if_base) + 0x51)
+#define MEM_IF_IMA_CFG(chip)			((chip->mem_if_base) + 0x52)
+#define MEM_IF_IMA_OPR_STS(chip)		((chip->mem_if_base) + 0x54)
+#define MEM_IF_IMA_EXP_STS(chip)		((chip->mem_if_base) + 0x55)
+#define MEM_IF_IMA_HW_STS(chip)			((chip->mem_if_base) + 0x56)
+#define MEM_IF_FG_BEAT_COUNT(chip)		((chip->mem_if_base) + 0x57)
+#define MEM_IF_IMA_ERR_STS(chip)		((chip->mem_if_base) + 0x5F)
+#define MEM_IF_IMA_BYTE_EN(chip)		((chip->mem_if_base) + 0x60)
+#define MEM_IF_ADDR_LSB(chip)			((chip->mem_if_base) + 0x61)
+#define MEM_IF_ADDR_MSB(chip)			((chip->mem_if_base) + 0x62)
+#define MEM_IF_WR_DATA0(chip)			((chip->mem_if_base) + 0x63)
+#define MEM_IF_WR_DATA3(chip)			((chip->mem_if_base) + 0x66)
+#define MEM_IF_RD_DATA0(chip)			((chip->mem_if_base) + 0x67)
+#define MEM_IF_RD_DATA3(chip)			((chip->mem_if_base) + 0x6A)
+#define MEM_IF_DMA_STS(chip)			((chip->mem_if_base) + 0x70)
+#define MEM_IF_DMA_CTL(chip)			((chip->mem_if_base) + 0x71)
+
+/* MEM_IF_INT_RT_STS */
+#define MEM_XCP_BIT				BIT(1)
+
+/* MEM_IF_MEM_INTF_CFG */
+#define MEM_ACCESS_REQ_BIT			BIT(7)
+#define IACS_SLCT_BIT				BIT(5)
+
+/* MEM_IF_IMA_CTL */
+#define MEM_ACS_BURST_BIT			BIT(7)
+#define IMA_WR_EN_BIT				BIT(6)
+#define IMA_CTL_MASK				GENMASK(7, 6)
+
+/* MEM_IF_IMA_CFG */
+#define IACS_CLR_BIT				BIT(2)
+#define IACS_INTR_SRC_SLCT_BIT			BIT(3)
+#define STATIC_CLK_EN_BIT			BIT(4)
+
+/* MEM_IF_IMA_OPR_STS */
+#define IACS_RDY_BIT				BIT(1)
+
+/* MEM_IF_IMA_EXP_STS */
+#define IACS_ERR_BIT				BIT(0)
+#define XCT_TYPE_ERR_BIT			BIT(1)
+#define DATA_RD_ERR_BIT				BIT(3)
+#define DATA_WR_ERR_BIT				BIT(4)
+#define ADDR_BURST_WRAP_BIT			BIT(5)
+#define ADDR_STABLE_ERR_BIT			BIT(7)
+
+/* MEM_IF_IMA_ERR_STS */
+#define ADDR_STBL_ERR_BIT			BIT(7)
+#define WR_ACS_ERR_BIT				BIT(6)
+#define RD_ACS_ERR_BIT				BIT(5)
+
+/* MEM_IF_FG_BEAT_COUNT */
+#define BEAT_COUNT_MASK				GENMASK(3, 0)
+
+/* MEM_IF_DMA_STS */
+#define DMA_WRITE_ERROR_BIT			BIT(1)
+#define DMA_READ_ERROR_BIT			BIT(2)
+
+/* MEM_IF_DMA_CTL */
+#define DMA_CLEAR_LOG_BIT			BIT(0)
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/power/supply./qcom/fg-util.c linux-4.4.115-fbx/drivers/power/supply/qcom/fg-util.c
--- linux-4.4.115-fbx/drivers/power/supply./qcom/fg-util.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/supply/qcom/fg-util.c	2019-01-22 16:16:26.227271074 +0100
@@ -0,0 +1,963 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/sort.h>
+#include "fg-core.h"
+
+void fg_circ_buf_add(struct fg_circ_buf *buf, int val)
+{
+	buf->arr[buf->head] = val;
+	buf->head = (buf->head + 1) % ARRAY_SIZE(buf->arr);
+	buf->size = min(++buf->size, (int)ARRAY_SIZE(buf->arr));
+}
+
+void fg_circ_buf_clr(struct fg_circ_buf *buf)
+{
+	memset(buf, 0, sizeof(*buf));
+}
+
+int fg_circ_buf_avg(struct fg_circ_buf *buf, int *avg)
+{
+	s64 result = 0;
+	int i;
+
+	if (buf->size == 0)
+		return -ENODATA;
+
+	for (i = 0; i < buf->size; i++)
+		result += buf->arr[i];
+
+	*avg = div_s64(result, buf->size);
+	return 0;
+}
+
+static int cmp_int(const void *a, const void *b)
+{
+	return *(int *)a - *(int *)b;
+}
+
+int fg_circ_buf_median(struct fg_circ_buf *buf, int *median)
+{
+	int *temp;
+
+	if (buf->size == 0)
+		return -ENODATA;
+
+	if (buf->size == 1) {
+		*median = buf->arr[0];
+		return 0;
+	}
+
+	temp = kmalloc_array(buf->size, sizeof(*temp), GFP_KERNEL);
+	if (!temp)
+		return -ENOMEM;
+
+	memcpy(temp, buf->arr, buf->size * sizeof(*temp));
+	sort(temp, buf->size, sizeof(*temp), cmp_int, NULL);
+
+	if (buf->size % 2)
+		*median = temp[buf->size / 2];
+	else
+		*median = (temp[buf->size / 2 - 1] + temp[buf->size / 2]) / 2;
+
+	kfree(temp);
+	return 0;
+}
+
+int fg_lerp(const struct fg_pt *pts, size_t tablesize, s32 input, s32 *output)
+{
+	int i;
+	s64 temp;
+
+	if (pts == NULL) {
+		pr_err("Table is NULL\n");
+		return -EINVAL;
+	}
+
+	if (tablesize < 1) {
+		pr_err("Table has no entries\n");
+		return -ENOENT;
+	}
+
+	if (tablesize == 1) {
+		*output = pts[0].y;
+		return 0;
+	}
+
+	if (pts[0].x > pts[1].x) {
+		pr_err("Table is not in acending order\n");
+		return -EINVAL;
+	}
+
+	if (input <= pts[0].x) {
+		*output = pts[0].y;
+		return 0;
+	}
+
+	if (input >= pts[tablesize - 1].x) {
+		*output = pts[tablesize - 1].y;
+		return 0;
+	}
+
+	for (i = 1; i < tablesize; i++) {
+		if (input >= pts[i].x)
+			continue;
+
+		temp = (s64)(pts[i].y - pts[i - 1].y) *
+						(s64)(input - pts[i - 1].x);
+		temp = div_s64(temp, pts[i].x - pts[i - 1].x);
+		*output = temp + pts[i - 1].y;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static struct fg_dbgfs dbgfs_data = {
+	.help_msg = {
+	.data =
+	"FG Debug-FS support\n"
+	"\n"
+	"Hierarchy schema:\n"
+	"/sys/kernel/debug/fg_sram\n"
+	"       /help            -- Static help text\n"
+	"       /address  -- Starting register address for reads or writes\n"
+	"       /count    -- Number of registers to read (only used for reads)\n"
+	"       /data     -- Initiates the SRAM read (formatted output)\n"
+	"\n",
+	},
+};
+
+static bool is_usb_present(struct fg_chip *chip)
+{
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	if (!chip->usb_psy)
+		chip->usb_psy = power_supply_get_by_name("usb");
+
+	if (!chip->usb_psy)
+		return false;
+
+	rc = power_supply_get_property(chip->usb_psy,
+			POWER_SUPPLY_PROP_PRESENT, &pval);
+	if (rc < 0)
+		return false;
+
+	return pval.intval != 0;
+}
+
+static bool is_dc_present(struct fg_chip *chip)
+{
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	if (!chip->dc_psy)
+		chip->dc_psy = power_supply_get_by_name("dc");
+
+	if (!chip->dc_psy)
+		return false;
+
+	rc = power_supply_get_property(chip->dc_psy,
+			POWER_SUPPLY_PROP_PRESENT, &pval);
+	if (rc < 0)
+		return false;
+
+	return pval.intval != 0;
+}
+
+bool is_input_present(struct fg_chip *chip)
+{
+	return is_usb_present(chip) || is_dc_present(chip);
+}
+
+bool is_qnovo_en(struct fg_chip *chip)
+{
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	if (!chip->batt_psy)
+		chip->batt_psy = power_supply_get_by_name("battery");
+
+	if (!chip->batt_psy)
+		return false;
+
+	rc = power_supply_get_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE, &pval);
+	if (rc < 0)
+		return false;
+
+	return pval.intval != 0;
+}
+
+#define EXPONENT_SHIFT		11
+#define EXPONENT_OFFSET		-9
+#define MANTISSA_SIGN_BIT	10
+#define MICRO_UNIT		1000000
+s64 fg_float_decode(u16 val)
+{
+	s8 exponent;
+	s32 mantissa;
+
+	/* mantissa bits are shifted out during sign extension */
+	exponent = ((s16)val >> EXPONENT_SHIFT) + EXPONENT_OFFSET;
+	/* exponent bits are shifted out during sign extension */
+	mantissa = sign_extend32(val, MANTISSA_SIGN_BIT) * MICRO_UNIT;
+
+	if (exponent < 0)
+		return (s64)mantissa >> -exponent;
+
+	return (s64)mantissa << exponent;
+}
+
+void fill_string(char *str, size_t str_len, u8 *buf, int buf_len)
+{
+	int pos = 0;
+	int i;
+
+	for (i = 0; i < buf_len; i++) {
+		pos += scnprintf(str + pos, str_len - pos, "%02x", buf[i]);
+		if (i < buf_len - 1)
+			pos += scnprintf(str + pos, str_len - pos, " ");
+	}
+}
+
+void dump_sram(u8 *buf, int addr, int len)
+{
+	int i;
+	char str[16];
+
+	/*
+	 * Length passed should be in multiple of 4 as each FG SRAM word
+	 * holds 4 bytes. To keep this simple, even if a length which is
+	 * not a multiple of 4 bytes or less than 4 bytes is passed, SRAM
+	 * registers dumped will be always in multiple of 4 bytes.
+	 */
+	for (i = 0; i < len; i += 4) {
+		str[0] = '\0';
+		fill_string(str, sizeof(str), buf + i, 4);
+		pr_info("%03d %s\n", addr + (i / 4), str);
+	}
+}
+
+static inline bool fg_sram_address_valid(u16 address, int len)
+{
+	if (address > FG_SRAM_ADDRESS_MAX)
+		return false;
+
+	if ((address + DIV_ROUND_UP(len, 4)) > FG_SRAM_ADDRESS_MAX + 1)
+		return false;
+
+	return true;
+}
+
+#define SOC_UPDATE_WAIT_MS	1500
+int fg_sram_write(struct fg_chip *chip, u16 address, u8 offset,
+			u8 *val, int len, int flags)
+{
+	int rc = 0, tries = 0;
+	bool atomic_access = false;
+
+	if (!chip)
+		return -ENXIO;
+
+	if (chip->battery_missing)
+		return -ENODATA;
+
+	if (!fg_sram_address_valid(address, len))
+		return -EFAULT;
+
+	if (!(flags & FG_IMA_NO_WLOCK))
+		vote(chip->awake_votable, SRAM_WRITE, true, 0);
+	mutex_lock(&chip->sram_rw_lock);
+
+	if ((flags & FG_IMA_ATOMIC) && chip->irqs[SOC_UPDATE_IRQ].irq) {
+		/*
+		 * This interrupt need to be enabled only when it is
+		 * required. It will be kept disabled other times.
+		 */
+		reinit_completion(&chip->soc_update);
+		enable_irq(chip->irqs[SOC_UPDATE_IRQ].irq);
+		atomic_access = true;
+	} else {
+		flags = FG_IMA_DEFAULT;
+	}
+
+	/*
+	 * Atomic access mean waiting upon SOC_UPDATE interrupt from
+	 * FG_ALG and do the transaction after that. This is to make
+	 * sure that there will be no SOC update happening when an
+	 * IMA write is happening. SOC_UPDATE interrupt fires every
+	 * FG cycle (~1.47 seconds).
+	 */
+	if (atomic_access) {
+		for (tries = 0; tries < 2; tries++) {
+			/* Wait for SOC_UPDATE completion */
+			rc = wait_for_completion_interruptible_timeout(
+				&chip->soc_update,
+				msecs_to_jiffies(SOC_UPDATE_WAIT_MS));
+			if (rc > 0) {
+				rc = 0;
+				break;
+			} else if (!rc) {
+				rc = -ETIMEDOUT;
+			}
+		}
+
+		if (rc < 0) {
+			pr_err("wait for soc_update timed out rc=%d\n", rc);
+			goto out;
+		}
+	}
+
+	rc = fg_interleaved_mem_write(chip, address, offset, val, len,
+			atomic_access);
+	if (rc < 0)
+		pr_err("Error in writing SRAM address 0x%x[%d], rc=%d\n",
+			address, offset, rc);
+out:
+	if (atomic_access)
+		disable_irq_nosync(chip->irqs[SOC_UPDATE_IRQ].irq);
+
+	mutex_unlock(&chip->sram_rw_lock);
+	if (!(flags & FG_IMA_NO_WLOCK))
+		vote(chip->awake_votable, SRAM_WRITE, false, 0);
+	return rc;
+}
+
+int fg_sram_read(struct fg_chip *chip, u16 address, u8 offset,
+			u8 *val, int len, int flags)
+{
+	int rc = 0;
+
+	if (!chip)
+		return -ENXIO;
+
+	if (chip->battery_missing)
+		return -ENODATA;
+
+	if (!fg_sram_address_valid(address, len))
+		return -EFAULT;
+
+	if (!(flags & FG_IMA_NO_WLOCK))
+		vote(chip->awake_votable, SRAM_READ, true, 0);
+	mutex_lock(&chip->sram_rw_lock);
+
+	rc = fg_interleaved_mem_read(chip, address, offset, val, len);
+	if (rc < 0)
+		pr_err("Error in reading SRAM address 0x%x[%d], rc=%d\n",
+			address, offset, rc);
+
+	mutex_unlock(&chip->sram_rw_lock);
+	if (!(flags & FG_IMA_NO_WLOCK))
+		vote(chip->awake_votable, SRAM_READ, false, 0);
+	return rc;
+}
+
+int fg_sram_masked_write(struct fg_chip *chip, u16 address, u8 offset,
+			u8 mask, u8 val, int flags)
+{
+	int rc = 0;
+	u8 buf[4];
+
+	rc = fg_sram_read(chip, address, 0, buf, 4, flags);
+	if (rc < 0) {
+		pr_err("sram read failed: address=%03X, rc=%d\n", address, rc);
+		return rc;
+	}
+
+	buf[offset] &= ~mask;
+	buf[offset] |= val & mask;
+
+	rc = fg_sram_write(chip, address, 0, buf, 4, flags);
+	if (rc < 0) {
+		pr_err("sram write failed: address=%03X, rc=%d\n", address, rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+int fg_read(struct fg_chip *chip, int addr, u8 *val, int len)
+{
+	int rc, i;
+
+	if (!chip || !chip->regmap)
+		return -ENXIO;
+
+	rc = regmap_bulk_read(chip->regmap, addr, val, len);
+
+	if (rc < 0) {
+		dev_err(chip->dev, "regmap_read failed for address %04x rc=%d\n",
+			addr, rc);
+		return rc;
+	}
+
+	if (*chip->debug_mask & FG_BUS_READ) {
+		pr_info("length %d addr=%04x\n", len, addr);
+		for (i = 0; i < len; i++)
+			pr_info("val[%d]: %02x\n", i, val[i]);
+	}
+
+	return 0;
+}
+
+int fg_write(struct fg_chip *chip, int addr, u8 *val, int len)
+{
+	int rc, i;
+	bool sec_access = false;
+
+	if (!chip || !chip->regmap)
+		return -ENXIO;
+
+	mutex_lock(&chip->bus_lock);
+	sec_access = (addr & 0x00FF) > 0xD0;
+	if (sec_access) {
+		rc = regmap_write(chip->regmap, (addr & 0xFF00) | 0xD0, 0xA5);
+		if (rc < 0) {
+			dev_err(chip->dev, "regmap_write failed for address %x rc=%d\n",
+				addr, rc);
+			goto out;
+		}
+	}
+
+	if (len > 1)
+		rc = regmap_bulk_write(chip->regmap, addr, val, len);
+	else
+		rc = regmap_write(chip->regmap, addr, *val);
+
+	if (rc < 0) {
+		dev_err(chip->dev, "regmap_write failed for address %04x rc=%d\n",
+			addr, rc);
+		goto out;
+	}
+
+	if (*chip->debug_mask & FG_BUS_WRITE) {
+		pr_info("length %d addr=%04x\n", len, addr);
+		for (i = 0; i < len; i++)
+			pr_info("val[%d]: %02x\n", i, val[i]);
+	}
+out:
+	mutex_unlock(&chip->bus_lock);
+	return rc;
+}
+
+int fg_masked_write(struct fg_chip *chip, int addr, u8 mask, u8 val)
+{
+	int rc;
+	bool sec_access = false;
+
+	if (!chip || !chip->regmap)
+		return -ENXIO;
+
+	mutex_lock(&chip->bus_lock);
+	sec_access = (addr & 0x00FF) > 0xD0;
+	if (sec_access) {
+		rc = regmap_write(chip->regmap, (addr & 0xFF00) | 0xD0, 0xA5);
+		if (rc < 0) {
+			dev_err(chip->dev, "regmap_write failed for address %x rc=%d\n",
+				addr, rc);
+			goto out;
+		}
+	}
+
+	rc = regmap_update_bits(chip->regmap, addr, mask, val);
+	if (rc < 0) {
+		dev_err(chip->dev, "regmap_update_bits failed for address %04x rc=%d\n",
+			addr, rc);
+		goto out;
+	}
+
+	fg_dbg(chip, FG_BUS_WRITE, "addr=%04x mask: %02x val: %02x\n", addr,
+		mask, val);
+out:
+	mutex_unlock(&chip->bus_lock);
+	return rc;
+}
+
+int64_t twos_compliment_extend(int64_t val, int sign_bit_pos)
+{
+	int i, nbytes = DIV_ROUND_UP(sign_bit_pos, 8);
+	int64_t mask, val_out;
+
+	val_out = val;
+	mask = 1 << sign_bit_pos;
+	if (val & mask) {
+		for (i = 8; i > nbytes; i--) {
+			mask = 0xFFLL << ((i - 1) * 8);
+			val_out |= mask;
+		}
+
+		if ((nbytes * 8) - 1 > sign_bit_pos) {
+			mask = 1 << sign_bit_pos;
+			for (i = 1; i <= (nbytes * 8) - sign_bit_pos; i++)
+				val_out |= mask << i;
+		}
+	}
+
+	pr_debug("nbytes: %d val: %llx val_out: %llx\n", nbytes, val, val_out);
+	return val_out;
+}
+
+/* All the debugfs related functions are defined below */
+static int fg_sram_dfs_open(struct inode *inode, struct file *file)
+{
+	struct fg_log_buffer *log;
+	struct fg_trans *trans;
+	u8 *data_buf;
+
+	size_t logbufsize = SZ_4K;
+	size_t databufsize = SZ_4K;
+
+	if (!dbgfs_data.chip) {
+		pr_err("Not initialized data\n");
+		return -EINVAL;
+	}
+
+	/* Per file "transaction" data */
+	trans = devm_kzalloc(dbgfs_data.chip->dev, sizeof(*trans), GFP_KERNEL);
+	if (!trans)
+		return -ENOMEM;
+
+	/* Allocate log buffer */
+	log = devm_kzalloc(dbgfs_data.chip->dev, logbufsize, GFP_KERNEL);
+	if (!log)
+		return -ENOMEM;
+
+	log->rpos = 0;
+	log->wpos = 0;
+	log->len = logbufsize - sizeof(*log);
+
+	/* Allocate data buffer */
+	data_buf = devm_kzalloc(dbgfs_data.chip->dev, databufsize, GFP_KERNEL);
+	if (!data_buf)
+		return -ENOMEM;
+
+	trans->log = log;
+	trans->data = data_buf;
+	trans->cnt = dbgfs_data.cnt;
+	trans->addr = dbgfs_data.addr;
+	trans->chip = dbgfs_data.chip;
+	trans->offset = trans->addr;
+	mutex_init(&trans->fg_dfs_lock);
+
+	file->private_data = trans;
+	return 0;
+}
+
+static int fg_sram_dfs_close(struct inode *inode, struct file *file)
+{
+	struct fg_trans *trans = file->private_data;
+
+	if (trans && trans->log && trans->data) {
+		file->private_data = NULL;
+		mutex_destroy(&trans->fg_dfs_lock);
+		devm_kfree(trans->chip->dev, trans->log);
+		devm_kfree(trans->chip->dev, trans->data);
+		devm_kfree(trans->chip->dev, trans);
+	}
+
+	return 0;
+}
+
+/**
+ * print_to_log: format a string and place into the log buffer
+ * @log: The log buffer to place the result into.
+ * @fmt: The format string to use.
+ * @...: The arguments for the format string.
+ *
+ * The return value is the number of characters written to @log buffer
+ * not including the trailing '\0'.
+ */
+static int print_to_log(struct fg_log_buffer *log, const char *fmt, ...)
+{
+	va_list args;
+	int cnt;
+	char *buf = &log->data[log->wpos];
+	size_t size = log->len - log->wpos;
+
+	va_start(args, fmt);
+	cnt = vscnprintf(buf, size, fmt, args);
+	va_end(args);
+
+	log->wpos += cnt;
+	return cnt;
+}
+
+/**
+ * write_next_line_to_log: Writes a single "line" of data into the log buffer
+ * @trans: Pointer to SRAM transaction data.
+ * @offset: SRAM address offset to start reading from.
+ * @pcnt: Pointer to 'cnt' variable.  Indicates the number of bytes to read.
+ *
+ * The 'offset' is a 12-bit SRAM address.
+ *
+ * On a successful read, the pcnt is decremented by the number of data
+ * bytes read from the SRAM.  When the cnt reaches 0, all requested bytes have
+ * been read.
+ */
+static int write_next_line_to_log(struct fg_trans *trans, int offset,
+				size_t *pcnt)
+{
+	int i;
+	u8 data[ITEMS_PER_LINE];
+	u16 address;
+	struct fg_log_buffer *log = trans->log;
+	int cnt = 0;
+	int items_to_read = min(ARRAY_SIZE(data), *pcnt);
+	int items_to_log = min(ITEMS_PER_LINE, items_to_read);
+
+	/* Buffer needs enough space for an entire line */
+	if ((log->len - log->wpos) < MAX_LINE_LENGTH)
+		goto done;
+
+	memcpy(data, trans->data + (offset - trans->addr), items_to_read);
+	*pcnt -= items_to_read;
+
+	/* address is in word now and it increments by 1. */
+	address = trans->addr + ((offset - trans->addr) / ITEMS_PER_LINE);
+	cnt = print_to_log(log, "%3.3d ", address & 0xfff);
+	if (cnt == 0)
+		goto done;
+
+	/* Log the data items */
+	for (i = 0; i < items_to_log; ++i) {
+		cnt = print_to_log(log, "%2.2X ", data[i]);
+		if (cnt == 0)
+			goto done;
+	}
+
+	/* If the last character was a space, then replace it with a newline */
+	if (log->wpos > 0 && log->data[log->wpos - 1] == ' ')
+		log->data[log->wpos - 1] = '\n';
+
+done:
+	return cnt;
+}
+
+/**
+ * get_log_data - reads data from SRAM and saves to the log buffer
+ * @trans: Pointer to SRAM transaction data.
+ *
+ * Returns the number of "items" read or SPMI error code for read failures.
+ */
+static int get_log_data(struct fg_trans *trans)
+{
+	int cnt, rc;
+	int last_cnt;
+	int items_read;
+	int total_items_read = 0;
+	u32 offset = trans->offset;
+	size_t item_cnt = trans->cnt;
+	struct fg_log_buffer *log = trans->log;
+
+	if (item_cnt == 0)
+		return 0;
+
+	if (item_cnt > SZ_4K) {
+		pr_err("Reading too many bytes\n");
+		return -EINVAL;
+	}
+
+	pr_debug("addr: %d offset: %d count: %d\n", trans->addr, trans->offset,
+		trans->cnt);
+	rc = fg_sram_read(trans->chip, trans->addr, 0,
+			trans->data, trans->cnt, 0);
+	if (rc < 0) {
+		pr_err("SRAM read failed: rc = %d\n", rc);
+		return rc;
+	}
+	/* Reset the log buffer 'pointers' */
+	log->wpos = log->rpos = 0;
+
+	/* Keep reading data until the log is full */
+	do {
+		last_cnt = item_cnt;
+		cnt = write_next_line_to_log(trans, offset, &item_cnt);
+		items_read = last_cnt - item_cnt;
+		offset += items_read;
+		total_items_read += items_read;
+	} while (cnt && item_cnt > 0);
+
+	/* Adjust the transaction offset and count */
+	trans->cnt = item_cnt;
+	trans->offset += total_items_read;
+
+	return total_items_read;
+}
+
+/**
+ * fg_sram_dfs_reg_read: reads value(s) from SRAM and fills user's buffer a
+ *  byte array (coded as string)
+ * @file: file pointer
+ * @buf: where to put the result
+ * @count: maximum space available in @buf
+ * @ppos: starting position
+ * @return number of user bytes read, or negative error value
+ */
+static ssize_t fg_sram_dfs_reg_read(struct file *file, char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	struct fg_trans *trans = file->private_data;
+	struct fg_log_buffer *log = trans->log;
+	size_t ret;
+	size_t len;
+
+	mutex_lock(&trans->fg_dfs_lock);
+	/* Is the the log buffer empty */
+	if (log->rpos >= log->wpos) {
+		if (get_log_data(trans) <= 0) {
+			len = 0;
+			goto unlock_mutex;
+		}
+	}
+
+	len = min(count, log->wpos - log->rpos);
+
+	ret = copy_to_user(buf, &log->data[log->rpos], len);
+	if (ret == len) {
+		pr_err("error copy sram register values to user\n");
+		len = -EFAULT;
+		goto unlock_mutex;
+	}
+
+	/* 'ret' is the number of bytes not copied */
+	len -= ret;
+
+	*ppos += len;
+	log->rpos += len;
+
+unlock_mutex:
+	mutex_unlock(&trans->fg_dfs_lock);
+	return len;
+}
+
+/**
+ * fg_sram_dfs_reg_write: write user's byte array (coded as string) to SRAM.
+ * @file: file pointer
+ * @buf: user data to be written.
+ * @count: maximum space available in @buf
+ * @ppos: starting position
+ * @return number of user byte written, or negative error value
+ */
+static ssize_t fg_sram_dfs_reg_write(struct file *file, const char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	int bytes_read;
+	int data;
+	int pos = 0;
+	int cnt = 0;
+	u8  *values;
+	char *kbuf;
+	size_t ret = 0;
+	struct fg_trans *trans = file->private_data;
+	u32 address = trans->addr;
+
+	mutex_lock(&trans->fg_dfs_lock);
+	/* Make a copy of the user data */
+	kbuf = kmalloc(count + 1, GFP_KERNEL);
+	if (!kbuf) {
+		ret = -ENOMEM;
+		goto unlock_mutex;
+	}
+
+	ret = copy_from_user(kbuf, buf, count);
+	if (ret == count) {
+		pr_err("failed to copy data from user\n");
+		ret = -EFAULT;
+		goto free_buf;
+	}
+
+	count -= ret;
+	*ppos += count;
+	kbuf[count] = '\0';
+
+	/* Override the text buffer with the raw data */
+	values = kbuf;
+
+	/* Parse the data in the buffer.  It should be a string of numbers */
+	while ((pos < count) &&
+		sscanf(kbuf + pos, "%i%n", &data, &bytes_read) == 1) {
+		/*
+		 * We shouldn't be receiving a string of characters that
+		 * exceeds a size of 5 to keep this functionally correct.
+		 * Also, we should make sure that pos never gets overflowed
+		 * beyond the limit.
+		 */
+		if (bytes_read > 5 || bytes_read > INT_MAX - pos) {
+			cnt = 0;
+			ret = -EINVAL;
+			break;
+		}
+		pos += bytes_read;
+		values[cnt++] = data & 0xff;
+	}
+
+	if (!cnt)
+		goto free_buf;
+
+	pr_debug("address %d, count %d\n", address, cnt);
+	/* Perform the write(s) */
+
+	ret = fg_sram_write(trans->chip, address, 0, values, cnt, 0);
+	if (ret) {
+		pr_err("SRAM write failed, err = %zu\n", ret);
+	} else {
+		ret = count;
+		trans->offset += cnt > 4 ? 4 : cnt;
+	}
+
+free_buf:
+	kfree(kbuf);
+unlock_mutex:
+	mutex_unlock(&trans->fg_dfs_lock);
+	return ret;
+}
+
+static const struct file_operations fg_sram_dfs_reg_fops = {
+	.open		= fg_sram_dfs_open,
+	.release	= fg_sram_dfs_close,
+	.read		= fg_sram_dfs_reg_read,
+	.write		= fg_sram_dfs_reg_write,
+};
+
+/*
+ * fg_debugfs_create: adds new fg_sram debugfs entry
+ * @return zero on success
+ */
+static int fg_sram_debugfs_create(struct fg_chip *chip)
+{
+	struct dentry *dfs_sram;
+	struct dentry *file;
+	mode_t dfs_mode = S_IRUSR | S_IWUSR;
+
+	pr_debug("Creating FG_SRAM debugfs file-system\n");
+	dfs_sram = debugfs_create_dir("sram", chip->dfs_root);
+	if (!dfs_sram) {
+		pr_err("error creating fg sram dfs rc=%ld\n",
+		       (long)dfs_sram);
+		return -ENOMEM;
+	}
+
+	dbgfs_data.help_msg.size = strlen(dbgfs_data.help_msg.data);
+	file = debugfs_create_blob("help", S_IRUGO, dfs_sram,
+					&dbgfs_data.help_msg);
+	if (!file) {
+		pr_err("error creating help entry\n");
+		goto err_remove_fs;
+	}
+
+	dbgfs_data.chip = chip;
+
+	file = debugfs_create_u32("count", dfs_mode, dfs_sram,
+					&(dbgfs_data.cnt));
+	if (!file) {
+		pr_err("error creating 'count' entry\n");
+		goto err_remove_fs;
+	}
+
+	file = debugfs_create_x32("address", dfs_mode, dfs_sram,
+					&(dbgfs_data.addr));
+	if (!file) {
+		pr_err("error creating 'address' entry\n");
+		goto err_remove_fs;
+	}
+
+	file = debugfs_create_file("data", dfs_mode, dfs_sram, &dbgfs_data,
+					&fg_sram_dfs_reg_fops);
+	if (!file) {
+		pr_err("error creating 'data' entry\n");
+		goto err_remove_fs;
+	}
+
+	return 0;
+
+err_remove_fs:
+	debugfs_remove_recursive(dfs_sram);
+	return -ENOMEM;
+}
+
+static int fg_alg_flags_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t fg_alg_flags_read(struct file *file, char __user *userbuf,
+				 size_t count, loff_t *ppos)
+{
+	struct fg_chip *chip = file->private_data;
+	char buf[512];
+	u8 alg_flags = 0;
+	int rc, i, len;
+
+	rc = fg_sram_read(chip, chip->sp[FG_SRAM_ALG_FLAGS].addr_word,
+			  chip->sp[FG_SRAM_ALG_FLAGS].addr_byte, &alg_flags, 1,
+			  FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("failed to read algorithm flags rc=%d\n", rc);
+		return -EFAULT;
+	}
+
+	len = 0;
+	for (i = 0; i < ALG_FLAG_MAX; ++i) {
+		if (len > ARRAY_SIZE(buf) - 1)
+			return -EFAULT;
+		if (chip->alg_flags[i].invalid)
+			continue;
+
+		len += snprintf(buf + len, sizeof(buf) - sizeof(*buf) * len,
+				"%s = %d\n", chip->alg_flags[i].name,
+				(bool)(alg_flags & chip->alg_flags[i].bit));
+	}
+
+	return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fg_alg_flags_fops = {
+	.open = fg_alg_flags_open,
+	.read = fg_alg_flags_read,
+};
+
+int fg_debugfs_create(struct fg_chip *chip)
+{
+	int rc;
+
+	pr_debug("Creating debugfs file-system\n");
+	chip->dfs_root = debugfs_create_dir("fg", NULL);
+	if (IS_ERR_OR_NULL(chip->dfs_root)) {
+		if (PTR_ERR(chip->dfs_root) == -ENODEV)
+			pr_err("debugfs is not enabled in the kernel\n");
+		else
+			pr_err("error creating fg dfs root rc=%ld\n",
+			       (long)chip->dfs_root);
+		return -ENODEV;
+	}
+
+	rc = fg_sram_debugfs_create(chip);
+	if (rc < 0) {
+		pr_err("failed to create sram dfs rc=%d\n", rc);
+		goto err_remove_fs;
+	}
+
+	if (!debugfs_create_file("alg_flags", S_IRUSR, chip->dfs_root, chip,
+				 &fg_alg_flags_fops)) {
+		pr_err("failed to create alg_flags file\n");
+		goto err_remove_fs;
+	}
+
+	return 0;
+
+err_remove_fs:
+	debugfs_remove_recursive(chip->dfs_root);
+	return -ENOMEM;
+}
diff -Nruw linux-4.4.115-fbx/drivers/power/supply./qcom/Kconfig linux-4.4.115-fbx/drivers/power/supply/qcom/Kconfig
--- linux-4.4.115-fbx/drivers/power/supply./qcom/Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/supply/qcom/Kconfig	2019-01-22 16:16:26.227271074 +0100
@@ -0,0 +1,103 @@
+menu "Qualcomm Technologies Inc Charger and Fuel Gauge support"
+
+config QPNP_SMBCHARGER
+	tristate "QPNP SMB Charger driver"
+	depends on MFD_SPMI_PMIC
+	help
+	  Say Y here to enable the dual path switch mode battery charger which
+	  supports USB detection and battery charging up to 3A.
+	  The driver also offers relevant information to userspace via the
+	  power supply framework.
+
+config QPNP_FG
+	tristate "QPNP fuel gauge driver"
+	depends on MFD_SPMI_PMIC
+	help
+	  Say Y here to enable the Fuel Gauge driver. This adds support for
+	  battery fuel gauging and state of charge of battery connected to the
+	  fuel gauge. The state of charge is reported through a BMS power
+	  supply property and also sends uevents when the capacity is updated.
+
+config QPNP_FG_GEN3
+	tristate "QPNP GEN3 fuel gauge driver"
+	depends on MFD_SPMI_PMIC
+	help
+	  Say Y here to enable the GEN3 Fuel Gauge driver. This adds support
+	  for battery fuel gauging and state of charge of battery connected to
+	  the fuel gauge. The state of charge is reported through a BMS power
+	  supply property and also sends uevents when the capacity is updated.
+
+config SMB135X_CHARGER
+	tristate "SMB135X Battery Charger"
+	depends on I2C
+	help
+	  Say Y to include support for SMB135X Battery Charger.
+	  SMB135X is a dual path switching mode charger capable of charging
+	  the battery with 3Amps of current.
+	  The driver supports charger enable/disable.
+	  The driver reports the charger status via the power supply framework.
+	  A charger status change triggers an IRQ via the device STAT pin.
+
+config SMB1351_USB_CHARGER
+	tristate "smb1351 usb charger (with VBUS detection)"
+	depends on I2C
+	help
+	 Say Y to enable support for the SMB1351 switching mode based charger.
+	 The driver supports charging control (enable/disable) and
+	 charge-current limiting. It also provides USB VBUS detection and
+	 notification support. The driver controls SMB1351 via I2C and
+	 supports device-tree interface.
+
+config MSM_BCL_CTL
+	bool "BCL Framework driver"
+	help
+	  Say Y here to enable this BCL Framework driver. This driver provides
+	  interface, which can be used by the BCL h/w drivers to implement the
+	  basic functionalities. This framework abstracts the underlying
+	  hardware for the top level modules.
+
+config MSM_BCL_PERIPHERAL_CTL
+	bool "BCL driver to control the PMIC BCL peripheral"
+	depends on SPMI
+	depends on MSM_BCL_CTL
+	help
+	  Say Y here to enable this BCL PMIC peripheral driver. This driver
+	  provides routines to configure and monitor the BCL
+	  PMIC peripheral.
+
+config BATTERY_BCL
+	tristate "Battery Current Limit driver"
+	depends on THERMAL_MONITOR
+	help
+	  Say Y here to enable support for battery current limit
+	  device. The BCL driver will poll BMS if
+	  thermal daemon enables BCL.
+	  It will notify thermal daemon if IBat crosses Imax threshold.
+
+config QPNP_SMB2
+	tristate "SMB2 Battery Charger"
+	depends on MFD_SPMI_PMIC
+	help
+	  Enables support for the SMB2 charging peripheral
+
+config SMB138X_CHARGER
+	tristate "SMB138X Battery Charger"
+	depends on MFD_I2C_PMIC
+	help
+	  Say Y to include support for SMB138X Battery Charger.
+	  SMB1380 is a dual phase 6A battery charger, and SMB1381 is a single
+	  phase 5A battery charger.
+	  The driver supports charger enable/disable.
+	  The driver reports the charger status via the power supply framework.
+	  A charger status change triggers an IRQ via the device STAT pin.
+
+config QPNP_QNOVO
+	bool "QPNP QNOVO driver"
+	depends on MFD_SPMI_PMIC
+	help
+	  Say Y here to enable the Qnovo pulse charging engine. Qnovo driver
+	  accepts pulse parameters via sysfs entries and programs the hardware
+	  module. It also allows userspace code to read diagnostics of voltage
+	  and current measured during certain phases of the pulses.
+
+endmenu
diff -Nruw linux-4.4.115-fbx/drivers/power/supply./qcom/Makefile linux-4.4.115-fbx/drivers/power/supply/qcom/Makefile
--- linux-4.4.115-fbx/drivers/power/supply./qcom/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/supply/qcom/Makefile	2019-01-22 16:16:26.227271074 +0100
@@ -0,0 +1,11 @@
+obj-$(CONFIG_QPNP_SMBCHARGER)	+= qpnp-smbcharger.o batterydata-lib.o pmic-voter.o
+obj-$(CONFIG_QPNP_FG)		+= qpnp-fg.o
+obj-$(CONFIG_QPNP_FG_GEN3)     += qpnp-fg-gen3.o fg-memif.o fg-util.o
+obj-$(CONFIG_SMB135X_CHARGER)   += smb135x-charger.o pmic-voter.o
+obj-$(CONFIG_SMB1351_USB_CHARGER) +=  battery.o smb1351-charger.o pmic-voter.o
+obj-$(CONFIG_MSM_BCL_CTL)	+= msm_bcl.o
+obj-$(CONFIG_MSM_BCL_PERIPHERAL_CTL) += bcl_peripheral.o
+obj-$(CONFIG_BATTERY_BCL) += battery_current_limit.o
+obj-$(CONFIG_QPNP_SMB2)		+= step-chg-jeita.o battery.o qpnp-smb2.o smb-lib.o pmic-voter.o storm-watch.o
+obj-$(CONFIG_SMB138X_CHARGER)	+= battery.o smb138x-charger.o smb-lib.o pmic-voter.o storm-watch.o
+obj-$(CONFIG_QPNP_QNOVO)	+= battery.o qpnp-qnovo.o
diff -Nruw linux-4.4.115-fbx/drivers/power/supply./qcom/msm_bcl.c linux-4.4.115-fbx/drivers/power/supply/qcom/msm_bcl.c
--- linux-4.4.115-fbx/drivers/power/supply./qcom/msm_bcl.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/supply/qcom/msm_bcl.c	2019-01-22 16:16:26.227271074 +0100
@@ -0,0 +1,374 @@
+/* Copyright (c) 2014, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/sysfs.h>
+#include <linux/mutex.h>
+#include <linux/msm_bcl.h>
+#include <linux/slab.h>
+
+#define BCL_PARAM_MAX_ATTR      3
+
+#define BCL_DEFINE_RO_PARAM(_attr, _name, _attr_gp, _index) \
+	_attr.attr.name = __stringify(_name); \
+	_attr.attr.mode = 0444; \
+	_attr.show = _name##_show; \
+	_attr_gp.attrs[_index] = &_attr.attr;
+
+static struct bcl_param_data *bcl[BCL_PARAM_MAX];
+
+static ssize_t high_trip_show(struct kobject *kobj, struct kobj_attribute *attr,
+				char *buf)
+{
+	int val = 0, ret = 0;
+	struct bcl_param_data *dev_param = container_of(attr,
+			struct bcl_param_data, high_trip_attr);
+
+	if (!dev_param->registered)
+		return -ENODEV;
+
+	ret = dev_param->ops->get_high_trip(&val);
+	if (ret) {
+		pr_err("High trip value read failed. err:%d\n", ret);
+		return ret;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t low_trip_show(struct kobject *kobj, struct kobj_attribute *attr,
+				char *buf)
+{
+	int val = 0, ret = 0;
+	struct bcl_param_data *dev_param = container_of(attr,
+			struct bcl_param_data, low_trip_attr);
+
+	if (!dev_param->registered)
+		return -ENODEV;
+
+	ret = dev_param->ops->get_low_trip(&val);
+	if (ret) {
+		pr_err("Low trip value read failed. err:%d\n", ret);
+		return ret;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t value_show(struct kobject *kobj, struct kobj_attribute *attr,
+				char *buf)
+{
+	int32_t val = 0, ret = 0;
+	struct bcl_param_data *dev_param = container_of(attr,
+			struct bcl_param_data, val_attr);
+
+	if (!dev_param->registered)
+		return -ENODEV;
+
+	ret = dev_param->ops->read(&val);
+	if (ret) {
+		pr_err("Value read failed. err:%d\n", ret);
+		return ret;
+	}
+	dev_param->last_read_val = val;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+int msm_bcl_set_threshold(enum bcl_param param_type,
+	enum bcl_trip_type trip_type, struct bcl_threshold *inp_thresh)
+{
+	int ret = 0;
+
+	if (param_type >= BCL_PARAM_MAX || !bcl[param_type]
+		|| !bcl[param_type]->registered) {
+		pr_err("BCL not initialized\n");
+		return -EINVAL;
+	}
+	if ((!inp_thresh)
+		|| (inp_thresh->trip_value < 0)
+		|| (!inp_thresh->trip_notify)
+		|| (trip_type >= BCL_TRIP_MAX)) {
+		pr_err("Invalid Input\n");
+		return -EINVAL;
+	}
+
+	bcl[param_type]->thresh[trip_type] = inp_thresh;
+	if (trip_type == BCL_HIGH_TRIP) {
+		bcl[param_type]->high_trip = inp_thresh->trip_value;
+		ret = bcl[param_type]->ops->set_high_trip(
+			inp_thresh->trip_value);
+	} else {
+		bcl[param_type]->low_trip = inp_thresh->trip_value;
+		ret = bcl[param_type]->ops->set_low_trip(
+			inp_thresh->trip_value);
+	}
+	if (ret) {
+		pr_err("Error setting trip%d for param%d. err:%d\n", trip_type,
+				 param_type, ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int bcl_thresh_notify(struct bcl_param_data *param_data, int val,
+					enum bcl_trip_type trip_type)
+{
+	if (!param_data || trip_type >= BCL_TRIP_MAX
+		|| !param_data->registered) {
+		pr_err("Invalid input\n");
+		return -EINVAL;
+	}
+
+	param_data->thresh[trip_type]->trip_notify(trip_type, val,
+		param_data->thresh[trip_type]->trip_data);
+
+	return 0;
+}
+
+static int bcl_add_sysfs_nodes(enum bcl_param param_type);
+struct bcl_param_data *msm_bcl_register_param(enum bcl_param param_type,
+	struct bcl_driver_ops *param_ops, char *name)
+{
+	int ret = 0;
+
+	if (param_type >= BCL_PARAM_MAX
+		|| !bcl[param_type] || !param_ops || !name
+		|| !param_ops->read || !param_ops->set_high_trip
+		|| !param_ops->get_high_trip || !param_ops->set_low_trip
+		|| !param_ops->get_low_trip || !param_ops->enable
+		|| !param_ops->disable) {
+		pr_err("Invalid input\n");
+		return NULL;
+	}
+	if (bcl[param_type]->registered) {
+		pr_err("param%d already initialized\n", param_type);
+		return NULL;
+	}
+
+	ret = bcl_add_sysfs_nodes(param_type);
+	if (ret) {
+		pr_err("Error creating sysfs nodes. err:%d\n", ret);
+		return NULL;
+	}
+	bcl[param_type]->ops = param_ops;
+	bcl[param_type]->registered = true;
+	strlcpy(bcl[param_type]->name, name, BCL_NAME_MAX_LEN);
+	param_ops->notify = bcl_thresh_notify;
+
+	return bcl[param_type];
+}
+
+int msm_bcl_unregister_param(struct bcl_param_data *param_data)
+{
+	int i = 0, ret = -EINVAL;
+
+	if (!bcl[i] || !param_data) {
+		pr_err("Invalid input\n");
+		return ret;
+	}
+	for (; i < BCL_PARAM_MAX; i++) {
+		if (param_data != bcl[i])
+			continue;
+		bcl[i]->ops->disable();
+		bcl[i]->registered = false;
+		ret = 0;
+		break;
+	}
+
+	return ret;
+}
+
+int msm_bcl_disable(void)
+{
+	int ret = 0, i = 0;
+
+	if (!bcl[i]) {
+		pr_err("BCL not initialized\n");
+		return -EINVAL;
+	}
+
+	for (; i < BCL_PARAM_MAX; i++) {
+		if (!bcl[i]->registered)
+			continue;
+		ret = bcl[i]->ops->disable();
+		if (ret) {
+			pr_err("Error in disabling interrupt. param:%d err%d\n",
+				i, ret);
+			return ret;
+		}
+	}
+
+	return ret;
+}
+
+int msm_bcl_enable(void)
+{
+	int ret = 0, i = 0;
+	struct bcl_param_data *param_data = NULL;
+
+	if (!bcl[i] || !bcl[BCL_PARAM_VOLTAGE]->thresh
+		|| !bcl[BCL_PARAM_CURRENT]->thresh) {
+		pr_err("BCL not initialized\n");
+		return -EINVAL;
+	}
+
+	for (; i < BCL_PARAM_MAX; i++) {
+		if (!bcl[i]->registered)
+			continue;
+		param_data = bcl[i];
+		ret = param_data->ops->set_high_trip(param_data->high_trip);
+		if (ret) {
+			pr_err("Error setting high trip. param:%d. err:%d",
+				i, ret);
+			return ret;
+		}
+		ret = param_data->ops->set_low_trip(param_data->low_trip);
+		if (ret) {
+			pr_err("Error setting low trip. param:%d. err:%d",
+				i, ret);
+			return ret;
+		}
+		ret = param_data->ops->enable();
+		if (ret) {
+			pr_err("Error enabling interrupt. param:%d. err:%d",
+				i, ret);
+			return ret;
+		}
+	}
+
+	return ret;
+}
+
+int msm_bcl_read(enum bcl_param param_type, int *value)
+{
+	int ret = 0;
+
+	if (!value || param_type >= BCL_PARAM_MAX) {
+		pr_err("Invalid input\n");
+		return -EINVAL;
+	}
+	if (!bcl[param_type] || !bcl[param_type]->registered) {
+		pr_err("BCL driver not initialized\n");
+		return -ENOSYS;
+	}
+
+	ret = bcl[param_type]->ops->read(value);
+	if (ret) {
+		pr_err("Error reading param%d. err:%d\n", param_type, ret);
+		return ret;
+	}
+	bcl[param_type]->last_read_val = *value;
+
+	return ret;
+}
+
+static struct class msm_bcl_class = {
+	.name = "msm_bcl",
+};
+
+static int bcl_add_sysfs_nodes(enum bcl_param param_type)
+{
+	char *param_name[BCL_PARAM_MAX] = {"voltage", "current"};
+	int ret = 0;
+
+	bcl[param_type]->device.class = &msm_bcl_class;
+	dev_set_name(&bcl[param_type]->device, "%s", param_name[param_type]);
+	ret = device_register(&bcl[param_type]->device);
+	if (ret) {
+		pr_err("Error registering device %s. err:%d\n",
+			param_name[param_type], ret);
+		return ret;
+	}
+	bcl[param_type]->bcl_attr_gp.attrs = kzalloc(sizeof(struct attribute *)
+		* (BCL_PARAM_MAX_ATTR + 1), GFP_KERNEL);
+	if (!bcl[param_type]->bcl_attr_gp.attrs) {
+		pr_err("Sysfs attribute create failed.\n");
+		ret = -ENOMEM;
+		goto add_sysfs_exit;
+	}
+	BCL_DEFINE_RO_PARAM(bcl[param_type]->val_attr, value,
+		bcl[param_type]->bcl_attr_gp, 0);
+	BCL_DEFINE_RO_PARAM(bcl[param_type]->high_trip_attr, high_trip,
+		bcl[param_type]->bcl_attr_gp, 1);
+	BCL_DEFINE_RO_PARAM(bcl[param_type]->low_trip_attr, low_trip,
+		bcl[param_type]->bcl_attr_gp, 2);
+	bcl[param_type]->bcl_attr_gp.attrs[BCL_PARAM_MAX_ATTR] = NULL;
+
+	ret = sysfs_create_group(&bcl[param_type]->device.kobj,
+		&bcl[param_type]->bcl_attr_gp);
+	if (ret) {
+		pr_err("Failure to create sysfs nodes. err:%d", ret);
+		goto add_sysfs_exit;
+	}
+
+add_sysfs_exit:
+	return ret;
+}
+
+static int msm_bcl_init(void)
+{
+	int ret = 0, i = 0;
+
+	for (; i < BCL_PARAM_MAX; i++) {
+		bcl[i] = kzalloc(sizeof(struct bcl_param_data),
+			GFP_KERNEL);
+		if (!bcl[i]) {
+			pr_err("kzalloc failed\n");
+			while ((--i) >= 0)
+				kfree(bcl[i]);
+			return -ENOMEM;
+		}
+	}
+
+	return ret;
+}
+
+
+static int __init msm_bcl_init_driver(void)
+{
+	int ret = 0;
+
+	ret = msm_bcl_init();
+	if (ret) {
+		pr_err("msm bcl init failed. err:%d\n", ret);
+		return ret;
+	}
+	return class_register(&msm_bcl_class);
+}
+
+static void __exit bcl_exit(void)
+{
+	int i = 0;
+
+	for (; i < BCL_PARAM_MAX; i++) {
+		sysfs_remove_group(&bcl[i]->device.kobj,
+			&bcl[i]->bcl_attr_gp);
+		kfree(bcl[i]->bcl_attr_gp.attrs);
+		kfree(bcl[i]);
+	}
+	class_unregister(&msm_bcl_class);
+}
+
+fs_initcall(msm_bcl_init_driver);
+module_exit(bcl_exit);
diff -Nruw linux-4.4.115-fbx/drivers/power/supply./qcom/pmic-voter.c linux-4.4.115-fbx/drivers/power/supply/qcom/pmic-voter.c
--- linux-4.4.115-fbx/drivers/power/supply./qcom/pmic-voter.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/supply/qcom/pmic-voter.c	2019-01-22 16:16:26.227271074 +0100
@@ -0,0 +1,695 @@
+/* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/bitops.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include <linux/pmic-voter.h>
+
+#define NUM_MAX_CLIENTS		16
+#define DEBUG_FORCE_CLIENT	"DEBUG_FORCE_CLIENT"
+
+static DEFINE_SPINLOCK(votable_list_slock);
+static LIST_HEAD(votable_list);
+
+static struct dentry *debug_root;
+
+struct client_vote {
+	bool	enabled;
+	int	value;
+};
+
+struct votable {
+	const char		*name;
+	struct list_head	list;
+	struct client_vote	votes[NUM_MAX_CLIENTS];
+	int			num_clients;
+	int			type;
+	int			effective_client_id;
+	int			effective_result;
+	struct mutex		vote_lock;
+	void			*data;
+	int			(*callback)(struct votable *votable,
+						void *data,
+						int effective_result,
+						const char *effective_client);
+	char			*client_strs[NUM_MAX_CLIENTS];
+	bool			voted_on;
+	struct dentry		*root;
+	struct dentry		*status_ent;
+	u32			force_val;
+	struct dentry		*force_val_ent;
+	bool			force_active;
+	struct dentry		*force_active_ent;
+};
+
+/**
+ * vote_set_any()
+ * @votable:	votable object
+ * @client_id:	client number of the latest voter
+ * @eff_res:	sets 0 or 1 based on the voting
+ * @eff_id:	Always returns the client_id argument
+ *
+ * Note that for SET_ANY voter, the value is always same as enabled. There is
+ * no idea of a voter abstaining from the election. Hence there is never a
+ * situation when the effective_id will be invalid, during election.
+ *
+ * Context:
+ *	Must be called with the votable->lock held
+ */
+static void vote_set_any(struct votable *votable, int client_id,
+				int *eff_res, int *eff_id)
+{
+	int i;
+
+	*eff_res = 0;
+
+	for (i = 0; i < votable->num_clients && votable->client_strs[i]; i++)
+		*eff_res |= votable->votes[i].enabled;
+
+	*eff_id = client_id;
+}
+
+/**
+ * vote_min() -
+ * @votable:	votable object
+ * @client_id:	client number of the latest voter
+ * @eff_res:	sets this to the min. of all the values amongst enabled voters.
+ *		If there is no enabled client, this is set to INT_MAX
+ * @eff_id:	sets this to the client id that has the min value amongst all
+ *		the enabled clients. If there is no enabled client, sets this
+ *		to -EINVAL
+ *
+ * Context:
+ *	Must be called with the votable->lock held
+ */
+static void vote_min(struct votable *votable, int client_id,
+				int *eff_res, int *eff_id)
+{
+	int i;
+
+	*eff_res = INT_MAX;
+	*eff_id = -EINVAL;
+	for (i = 0; i < votable->num_clients && votable->client_strs[i]; i++) {
+		if (votable->votes[i].enabled
+			&& *eff_res > votable->votes[i].value) {
+			*eff_res = votable->votes[i].value;
+			*eff_id = i;
+		}
+	}
+	if (*eff_id == -EINVAL)
+		*eff_res = -EINVAL;
+}
+
+/**
+ * vote_max() -
+ * @votable:	votable object
+ * @client_id:	client number of the latest voter
+ * @eff_res:	sets this to the max. of all the values amongst enabled voters.
+ *		If there is no enabled client, this is set to -EINVAL
+ * @eff_id:	sets this to the client id that has the max value amongst all
+ *		the enabled clients. If there is no enabled client, sets this to
+ *		-EINVAL
+ *
+ * Context:
+ *	Must be called with the votable->lock held
+ */
+static void vote_max(struct votable *votable, int client_id,
+				int *eff_res, int *eff_id)
+{
+	int i;
+
+	*eff_res = INT_MIN;
+	*eff_id = -EINVAL;
+	for (i = 0; i < votable->num_clients && votable->client_strs[i]; i++) {
+		if (votable->votes[i].enabled &&
+				*eff_res < votable->votes[i].value) {
+			*eff_res = votable->votes[i].value;
+			*eff_id = i;
+		}
+	}
+	if (*eff_id == -EINVAL)
+		*eff_res = -EINVAL;
+}
+
+static int get_client_id(struct votable *votable, const char *client_str)
+{
+	int i;
+
+	for (i = 0; i < votable->num_clients; i++) {
+		if (votable->client_strs[i]
+		 && (strcmp(votable->client_strs[i], client_str) == 0))
+			return i;
+	}
+
+	/* new client */
+	for (i = 0; i < votable->num_clients; i++) {
+		if (!votable->client_strs[i]) {
+			votable->client_strs[i]
+				= kstrdup(client_str, GFP_KERNEL);
+			if (!votable->client_strs[i])
+				return -ENOMEM;
+			return i;
+		}
+	}
+	return -EINVAL;
+}
+
+static char *get_client_str(struct votable *votable, int client_id)
+{
+	if (client_id == -EINVAL)
+		return NULL;
+
+	 return votable->client_strs[client_id];
+}
+
+void lock_votable(struct votable *votable)
+{
+	mutex_lock(&votable->vote_lock);
+}
+
+void unlock_votable(struct votable *votable)
+{
+	mutex_unlock(&votable->vote_lock);
+}
+
+/**
+ * is_client_vote_enabled() -
+ * is_client_vote_enabled_locked() -
+ *		The unlocked and locked variants of getting whether a client's
+		vote is enabled.
+ * @votable:	the votable object
+ * @client_str: client of interest
+ *
+ * Returns:
+ *	True if the client's vote is enabled; false otherwise.
+ */
+bool is_client_vote_enabled_locked(struct votable *votable,
+							const char *client_str)
+{
+	int client_id = get_client_id(votable, client_str);
+
+	if (client_id < 0)
+		return false;
+
+	return votable->votes[client_id].enabled;
+}
+
+bool is_client_vote_enabled(struct votable *votable, const char *client_str)
+{
+	bool enabled;
+
+	lock_votable(votable);
+	enabled = is_client_vote_enabled_locked(votable, client_str);
+	unlock_votable(votable);
+	return enabled;
+}
+
+/**
+ * get_client_vote() -
+ * get_client_vote_locked() -
+ *		The unlocked and locked variants of getting a client's voted
+ *		value.
+ * @votable:	the votable object
+ * @client_str: client of interest
+ *
+ * Returns:
+ *	The value the client voted for. -EINVAL is returned if the client
+ *	is not enabled or the client is not found.
+ */
+int get_client_vote_locked(struct votable *votable, const char *client_str)
+{
+	int client_id = get_client_id(votable, client_str);
+
+	if (client_id < 0)
+		return -EINVAL;
+
+	if ((votable->type != VOTE_SET_ANY)
+		&& !votable->votes[client_id].enabled)
+		return -EINVAL;
+
+	return votable->votes[client_id].value;
+}
+
+int get_client_vote(struct votable *votable, const char *client_str)
+{
+	int value;
+
+	lock_votable(votable);
+	value = get_client_vote_locked(votable, client_str);
+	unlock_votable(votable);
+	return value;
+}
+
+/**
+ * get_effective_result() -
+ * get_effective_result_locked() -
+ *		The unlocked and locked variants of getting the effective value
+ *		amongst all the enabled voters.
+ *
+ * @votable:	the votable object
+ *
+ * Returns:
+ *	The effective result.
+ *	For MIN and MAX votable, returns -EINVAL when the votable
+ *	object has been created but no clients have casted their votes or
+ *	the last enabled client disables its vote.
+ *	For SET_ANY votable it returns 0 when no clients have casted their votes
+ *	because for SET_ANY there is no concept of abstaining from election. The
+ *	votes for all the clients of SET_ANY votable is defaulted to false.
+ */
+int get_effective_result_locked(struct votable *votable)
+{
+	if (votable->force_active)
+		return votable->force_val;
+
+	return votable->effective_result;
+}
+
+int get_effective_result(struct votable *votable)
+{
+	int value;
+
+	lock_votable(votable);
+	value = get_effective_result_locked(votable);
+	unlock_votable(votable);
+	return value;
+}
+
+/**
+ * get_effective_client() -
+ * get_effective_client_locked() -
+ *		The unlocked and locked variants of getting the effective client
+ *		amongst all the enabled voters.
+ *
+ * @votable:	the votable object
+ *
+ * Returns:
+ *	The effective client.
+ *	For MIN and MAX votable, returns NULL when the votable
+ *	object has been created but no clients have casted their votes or
+ *	the last enabled client disables its vote.
+ *	For SET_ANY votable it returns NULL too when no clients have casted
+ *	their votes. But for SET_ANY since there is no concept of abstaining
+ *	from election, the only client that casts a vote or the client that
+ *	caused the result to change is returned.
+ */
+const char *get_effective_client_locked(struct votable *votable)
+{
+	if (votable->force_active)
+		return DEBUG_FORCE_CLIENT;
+
+	return get_client_str(votable, votable->effective_client_id);
+}
+
+const char *get_effective_client(struct votable *votable)
+{
+	const char *client_str;
+
+	lock_votable(votable);
+	client_str = get_effective_client_locked(votable);
+	unlock_votable(votable);
+	return client_str;
+}
+
+/**
+ * vote() -
+ *
+ * @votable:	the votable object
+ * @client_str: the voting client
+ * @enabled:	This provides a means for the client to exclude himself from
+ *		election. This clients val (the next argument) will be
+ *		considered only when he has enabled his participation.
+ *		Note that this takes a differnt meaning for SET_ANY type, as
+ *		there is no concept of abstaining from participation.
+ *		Enabled is treated as the boolean value the client is voting.
+ * @val:	The vote value. This is ignored for SET_ANY votable types.
+ *		For MIN, MAX votable types this value is used as the
+ *		clients vote value when the enabled is true, this value is
+ *		ignored if enabled is false.
+ *
+ * The callback is called only when there is a change in the election results or
+ * if it is the first time someone is voting.
+ *
+ * Returns:
+ *	The return from the callback when present and needs to be called
+ *	or zero.
+ */
+int vote(struct votable *votable, const char *client_str, bool enabled, int val)
+{
+	int effective_id = -EINVAL;
+	int effective_result;
+	int client_id;
+	int rc = 0;
+	bool similar_vote = false;
+
+	lock_votable(votable);
+
+	client_id = get_client_id(votable, client_str);
+	if (client_id < 0) {
+		rc = client_id;
+		goto out;
+	}
+
+	/*
+	 * for SET_ANY the val is to be ignored, set it
+	 * to enabled so that the election still works based on
+	 * value regardless of the type
+	 */
+	if (votable->type == VOTE_SET_ANY)
+		val = enabled;
+
+	if ((votable->votes[client_id].enabled == enabled) &&
+		(votable->votes[client_id].value == val)) {
+		pr_debug("%s: %s,%d same vote %s of val=%d\n",
+				votable->name,
+				client_str, client_id,
+				enabled ? "on" : "off",
+				val);
+		similar_vote = true;
+	}
+
+	votable->votes[client_id].enabled = enabled;
+	votable->votes[client_id].value = val;
+
+	if (similar_vote && votable->voted_on) {
+		pr_debug("%s: %s,%d Ignoring similar vote %s of val=%d\n",
+			votable->name,
+			client_str, client_id, enabled ? "on" : "off", val);
+		goto out;
+	}
+
+	pr_debug("%s: %s,%d voting %s of val=%d\n",
+		votable->name,
+		client_str, client_id, enabled ? "on" : "off", val);
+	switch (votable->type) {
+	case VOTE_MIN:
+		vote_min(votable, client_id, &effective_result, &effective_id);
+		break;
+	case VOTE_MAX:
+		vote_max(votable, client_id, &effective_result, &effective_id);
+		break;
+	case VOTE_SET_ANY:
+		vote_set_any(votable, client_id,
+				&effective_result, &effective_id);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/*
+	 * Note that the callback is called with a NULL string and -EINVAL
+	 * result when there are no enabled votes
+	 */
+	if (!votable->voted_on
+			|| (effective_result != votable->effective_result)) {
+		votable->effective_client_id = effective_id;
+		votable->effective_result = effective_result;
+		pr_debug("%s: effective vote is now %d voted by %s,%d\n",
+			votable->name, effective_result,
+			get_client_str(votable, effective_id),
+			effective_id);
+		if (votable->callback && !votable->force_active)
+			rc = votable->callback(votable, votable->data,
+					effective_result,
+					get_client_str(votable, effective_id));
+	}
+
+	votable->voted_on = true;
+out:
+	unlock_votable(votable);
+	return rc;
+}
+
+int rerun_election(struct votable *votable)
+{
+	int rc = 0;
+	int effective_result;
+
+	lock_votable(votable);
+	effective_result = get_effective_result_locked(votable);
+	if (votable->callback)
+		rc = votable->callback(votable,
+			votable->data,
+			effective_result,
+			get_client_str(votable, votable->effective_client_id));
+	unlock_votable(votable);
+	return rc;
+}
+
+struct votable *find_votable(const char *name)
+{
+	unsigned long flags;
+	struct votable *v;
+	bool found = false;
+
+	spin_lock_irqsave(&votable_list_slock, flags);
+	if (list_empty(&votable_list))
+		goto out;
+
+	list_for_each_entry(v, &votable_list, list) {
+		if (strcmp(v->name, name) == 0) {
+			found = true;
+			break;
+		}
+	}
+out:
+	spin_unlock_irqrestore(&votable_list_slock, flags);
+
+	if (found)
+		return v;
+	else
+		return NULL;
+}
+
+static int force_active_get(void *data, u64 *val)
+{
+	struct votable *votable = data;
+
+	*val = votable->force_active;
+
+	return 0;
+}
+
+static int force_active_set(void *data, u64 val)
+{
+	struct votable *votable = data;
+	int rc = 0;
+
+	lock_votable(votable);
+	votable->force_active = !!val;
+
+	if (!votable->callback)
+		goto out;
+
+	if (votable->force_active) {
+		rc = votable->callback(votable, votable->data,
+			votable->force_val,
+			DEBUG_FORCE_CLIENT);
+	} else {
+		rc = votable->callback(votable, votable->data,
+			votable->effective_result,
+			get_client_str(votable, votable->effective_client_id));
+	}
+out:
+	unlock_votable(votable);
+	return rc;
+}
+DEFINE_SIMPLE_ATTRIBUTE(votable_force_ops, force_active_get, force_active_set,
+		"%lld\n");
+
+static int show_votable_clients(struct seq_file *m, void *data)
+{
+	struct votable *votable = m->private;
+	int i;
+	char *type_str = "Unkonwn";
+	const char *effective_client_str;
+
+	lock_votable(votable);
+
+	for (i = 0; i < votable->num_clients; i++) {
+		if (votable->client_strs[i]) {
+			seq_printf(m, "%s: %s:\t\t\ten=%d v=%d\n",
+					votable->name,
+					votable->client_strs[i],
+					votable->votes[i].enabled,
+					votable->votes[i].value);
+		}
+	}
+
+	switch (votable->type) {
+	case VOTE_MIN:
+		type_str = "Min";
+		break;
+	case VOTE_MAX:
+		type_str = "Max";
+		break;
+	case VOTE_SET_ANY:
+		type_str = "Set_any";
+		break;
+	}
+
+	effective_client_str = get_effective_client_locked(votable);
+	seq_printf(m, "%s: effective=%s type=%s v=%d\n",
+			votable->name,
+			effective_client_str ? effective_client_str : "none",
+			type_str,
+			get_effective_result_locked(votable));
+	unlock_votable(votable);
+
+	return 0;
+}
+
+static int votable_status_open(struct inode *inode, struct file *file)
+{
+	struct votable *votable = inode->i_private;
+
+	return single_open(file, show_votable_clients, votable);
+}
+
+static const struct file_operations votable_status_ops = {
+	.owner		= THIS_MODULE,
+	.open		= votable_status_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+struct votable *create_votable(const char *name,
+				int votable_type,
+				int (*callback)(struct votable *votable,
+					void *data,
+					int effective_result,
+					const char *effective_client),
+				void *data)
+{
+	struct votable *votable;
+	unsigned long flags;
+
+	votable = find_votable(name);
+	if (votable)
+		return ERR_PTR(-EEXIST);
+
+	if (debug_root == NULL) {
+		debug_root = debugfs_create_dir("pmic-votable", NULL);
+		if (!debug_root) {
+			pr_err("Couldn't create debug dir\n");
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+
+	if (votable_type >= NUM_VOTABLE_TYPES) {
+		pr_err("Invalid votable_type specified for voter\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	votable = kzalloc(sizeof(struct votable), GFP_KERNEL);
+	if (!votable)
+		return ERR_PTR(-ENOMEM);
+
+	votable->name = kstrdup(name, GFP_KERNEL);
+	if (!votable->name) {
+		kfree(votable);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	votable->num_clients = NUM_MAX_CLIENTS;
+	votable->callback = callback;
+	votable->type = votable_type;
+	votable->data = data;
+	mutex_init(&votable->vote_lock);
+
+	/*
+	 * Because effective_result and client states are invalid
+	 * before the first vote, initialize them to -EINVAL
+	 */
+	votable->effective_result = -EINVAL;
+	if (votable->type == VOTE_SET_ANY)
+		votable->effective_result = 0;
+	votable->effective_client_id = -EINVAL;
+
+	spin_lock_irqsave(&votable_list_slock, flags);
+	list_add(&votable->list, &votable_list);
+	spin_unlock_irqrestore(&votable_list_slock, flags);
+
+	votable->root = debugfs_create_dir(name, debug_root);
+	if (!votable->root) {
+		pr_err("Couldn't create debug dir %s\n", name);
+		kfree(votable->name);
+		kfree(votable);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	votable->status_ent = debugfs_create_file("status", S_IFREG | S_IRUGO,
+				  votable->root, votable,
+				  &votable_status_ops);
+	if (!votable->status_ent) {
+		pr_err("Couldn't create status dbg file for %s\n", name);
+		debugfs_remove_recursive(votable->root);
+		kfree(votable->name);
+		kfree(votable);
+		return ERR_PTR(-EEXIST);
+	}
+
+	votable->force_val_ent = debugfs_create_u32("force_val",
+					S_IFREG | S_IWUSR | S_IRUGO,
+					votable->root,
+					&(votable->force_val));
+
+	if (!votable->force_val_ent) {
+		pr_err("Couldn't create force_val dbg file for %s\n", name);
+		debugfs_remove_recursive(votable->root);
+		kfree(votable->name);
+		kfree(votable);
+		return ERR_PTR(-EEXIST);
+	}
+
+	votable->force_active_ent = debugfs_create_file("force_active",
+					S_IFREG | S_IRUGO,
+					votable->root, votable,
+					&votable_force_ops);
+	if (!votable->force_active_ent) {
+		pr_err("Couldn't create force_active dbg file for %s\n", name);
+		debugfs_remove_recursive(votable->root);
+		kfree(votable->name);
+		kfree(votable);
+		return ERR_PTR(-EEXIST);
+	}
+
+	return votable;
+}
+
+void destroy_votable(struct votable *votable)
+{
+	unsigned long flags;
+	int i;
+
+	if (!votable)
+		return;
+
+	spin_lock_irqsave(&votable_list_slock, flags);
+	list_del(&votable->list);
+	spin_unlock_irqrestore(&votable_list_slock, flags);
+
+	debugfs_remove_recursive(votable->root);
+
+	for (i = 0; i < votable->num_clients && votable->client_strs[i]; i++)
+		kfree(votable->client_strs[i]);
+
+	kfree(votable->name);
+	kfree(votable);
+}
diff -Nruw linux-4.4.115-fbx/drivers/power/supply./qcom/qpnp-fg-gen3.c linux-4.4.115-fbx/drivers/power/supply/qcom/qpnp-fg-gen3.c
--- linux-4.4.115-fbx/drivers/power/supply./qcom/qpnp-fg-gen3.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/supply/qcom/qpnp-fg-gen3.c	2019-10-29 09:26:24.633212866 +0100
@@ -0,0 +1,5546 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"FG: %s: " fmt, __func__
+
+#include <linux/ktime.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/of_batterydata.h>
+#include <linux/platform_device.h>
+#include <linux/iio/consumer.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include "fg-core.h"
+#include "fg-reg.h"
+
+#define FG_GEN3_DEV_NAME	"qcom,fg-gen3"
+
+#define PERPH_SUBTYPE_REG		0x05
+#define FG_BATT_SOC_PMI8998		0x10
+#define FG_BATT_INFO_PMI8998		0x11
+#define FG_MEM_INFO_PMI8998		0x0D
+
+/* SRAM address and offset in ascending order */
+#define ESR_PULSE_THRESH_WORD		2
+#define ESR_PULSE_THRESH_OFFSET		3
+#define SLOPE_LIMIT_WORD		3
+#define SLOPE_LIMIT_OFFSET		0
+#define CUTOFF_CURR_WORD		4
+#define CUTOFF_CURR_OFFSET		0
+#define CUTOFF_VOLT_WORD		5
+#define CUTOFF_VOLT_OFFSET		0
+#define SYS_TERM_CURR_WORD		6
+#define SYS_TERM_CURR_OFFSET		0
+#define VBATT_FULL_WORD			7
+#define VBATT_FULL_OFFSET		0
+#define ESR_FILTER_WORD			8
+#define ESR_UPD_TIGHT_OFFSET		0
+#define ESR_UPD_BROAD_OFFSET		1
+#define ESR_UPD_TIGHT_LOW_TEMP_OFFSET	2
+#define ESR_UPD_BROAD_LOW_TEMP_OFFSET	3
+#define KI_COEFF_MED_DISCHG_WORD	9
+#define TIMEBASE_OFFSET			1
+#define KI_COEFF_MED_DISCHG_OFFSET	3
+#define KI_COEFF_HI_DISCHG_WORD		10
+#define KI_COEFF_HI_DISCHG_OFFSET	0
+#define KI_COEFF_LOW_DISCHG_WORD	10
+#define KI_COEFF_LOW_DISCHG_OFFSET	2
+#define KI_COEFF_FULL_SOC_WORD		12
+#define KI_COEFF_FULL_SOC_OFFSET	2
+#define DELTA_MSOC_THR_WORD		12
+#define DELTA_MSOC_THR_OFFSET		3
+#define DELTA_BSOC_THR_WORD		13
+#define DELTA_BSOC_THR_OFFSET		2
+#define RECHARGE_SOC_THR_WORD		14
+#define RECHARGE_SOC_THR_OFFSET		0
+#define CHG_TERM_CURR_WORD		14
+#define CHG_TERM_CURR_OFFSET		1
+#define EMPTY_VOLT_WORD			15
+#define EMPTY_VOLT_OFFSET		0
+#define VBATT_LOW_WORD			15
+#define VBATT_LOW_OFFSET		1
+#define ESR_TIMER_DISCHG_MAX_WORD	17
+#define ESR_TIMER_DISCHG_MAX_OFFSET	0
+#define ESR_TIMER_DISCHG_INIT_WORD	17
+#define ESR_TIMER_DISCHG_INIT_OFFSET	2
+#define ESR_TIMER_CHG_MAX_WORD		18
+#define ESR_TIMER_CHG_MAX_OFFSET	0
+#define ESR_TIMER_CHG_INIT_WORD		18
+#define ESR_TIMER_CHG_INIT_OFFSET	2
+#define ESR_EXTRACTION_ENABLE_WORD	19
+#define ESR_EXTRACTION_ENABLE_OFFSET	0
+#define PROFILE_LOAD_WORD		24
+#define PROFILE_LOAD_OFFSET		0
+#define ESR_RSLOW_DISCHG_WORD		34
+#define ESR_RSLOW_DISCHG_OFFSET		0
+#define ESR_RSLOW_CHG_WORD		51
+#define ESR_RSLOW_CHG_OFFSET		0
+#define NOM_CAP_WORD			58
+#define NOM_CAP_OFFSET			0
+#define ACT_BATT_CAP_BKUP_WORD		74
+#define ACT_BATT_CAP_BKUP_OFFSET	0
+#define CYCLE_COUNT_WORD		75
+#define CYCLE_COUNT_OFFSET		0
+#define PROFILE_INTEGRITY_WORD		79
+#define SW_CONFIG_OFFSET		0
+#define PROFILE_INTEGRITY_OFFSET	3
+#define BATT_SOC_WORD			91
+#define BATT_SOC_OFFSET			0
+#define FULL_SOC_WORD			93
+#define FULL_SOC_OFFSET			2
+#define MONOTONIC_SOC_WORD		94
+#define MONOTONIC_SOC_OFFSET		2
+#define CC_SOC_WORD			95
+#define CC_SOC_OFFSET			0
+#define CC_SOC_SW_WORD			96
+#define CC_SOC_SW_OFFSET		0
+#define VOLTAGE_PRED_WORD		97
+#define VOLTAGE_PRED_OFFSET		0
+#define OCV_WORD			97
+#define OCV_OFFSET			2
+#define ESR_WORD			99
+#define ESR_OFFSET			0
+#define RSLOW_WORD			101
+#define RSLOW_OFFSET			0
+#define ACT_BATT_CAP_WORD		117
+#define ACT_BATT_CAP_OFFSET		0
+#define LAST_BATT_SOC_WORD		119
+#define LAST_BATT_SOC_OFFSET		0
+#define LAST_MONOTONIC_SOC_WORD		119
+#define LAST_MONOTONIC_SOC_OFFSET	2
+#define ALG_FLAGS_WORD			120
+#define ALG_FLAGS_OFFSET		1
+
+/* v2 SRAM address and offset in ascending order */
+#define KI_COEFF_LOW_DISCHG_v2_WORD	9
+#define KI_COEFF_LOW_DISCHG_v2_OFFSET	3
+#define KI_COEFF_MED_DISCHG_v2_WORD	10
+#define KI_COEFF_MED_DISCHG_v2_OFFSET	0
+#define KI_COEFF_HI_DISCHG_v2_WORD	10
+#define KI_COEFF_HI_DISCHG_v2_OFFSET	1
+#define DELTA_BSOC_THR_v2_WORD		12
+#define DELTA_BSOC_THR_v2_OFFSET	3
+#define DELTA_MSOC_THR_v2_WORD		13
+#define DELTA_MSOC_THR_v2_OFFSET	0
+#define RECHARGE_SOC_THR_v2_WORD	14
+#define RECHARGE_SOC_THR_v2_OFFSET	1
+#define CHG_TERM_CURR_v2_WORD		15
+#define CHG_TERM_BASE_CURR_v2_OFFSET	0
+#define CHG_TERM_CURR_v2_OFFSET		1
+#define EMPTY_VOLT_v2_WORD		15
+#define EMPTY_VOLT_v2_OFFSET		3
+#define VBATT_LOW_v2_WORD		16
+#define VBATT_LOW_v2_OFFSET		0
+#define RECHARGE_VBATT_THR_v2_WORD	16
+#define RECHARGE_VBATT_THR_v2_OFFSET	1
+#define FLOAT_VOLT_v2_WORD		16
+#define FLOAT_VOLT_v2_OFFSET		2
+
+static int fg_decode_voltage_15b(struct fg_sram_param *sp,
+	enum fg_sram_param_id id, int val);
+static int fg_decode_value_16b(struct fg_sram_param *sp,
+	enum fg_sram_param_id id, int val);
+static int fg_decode_default(struct fg_sram_param *sp,
+	enum fg_sram_param_id id, int val);
+static int fg_decode_cc_soc(struct fg_sram_param *sp,
+	enum fg_sram_param_id id, int value);
+static void fg_encode_voltage(struct fg_sram_param *sp,
+	enum fg_sram_param_id id, int val_mv, u8 *buf);
+static void fg_encode_current(struct fg_sram_param *sp,
+	enum fg_sram_param_id id, int val_ma, u8 *buf);
+static void fg_encode_default(struct fg_sram_param *sp,
+	enum fg_sram_param_id id, int val, u8 *buf);
+
+static struct fg_irq_info fg_irqs[FG_IRQ_MAX];
+
+#define PARAM(_id, _addr_word, _addr_byte, _len, _num, _den, _offset,	\
+	      _enc, _dec)						\
+	[FG_SRAM_##_id] = {						\
+		.addr_word	= _addr_word,				\
+		.addr_byte	= _addr_byte,				\
+		.len		= _len,					\
+		.numrtr		= _num,					\
+		.denmtr		= _den,					\
+		.offset		= _offset,				\
+		.encode		= _enc,					\
+		.decode		= _dec,					\
+	}								\
+
+static struct fg_sram_param pmi8998_v1_sram_params[] = {
+	PARAM(BATT_SOC, BATT_SOC_WORD, BATT_SOC_OFFSET, 4, 1, 1, 0, NULL,
+		fg_decode_default),
+	PARAM(FULL_SOC, FULL_SOC_WORD, FULL_SOC_OFFSET, 2, 1, 1, 0, NULL,
+		fg_decode_default),
+	PARAM(VOLTAGE_PRED, VOLTAGE_PRED_WORD, VOLTAGE_PRED_OFFSET, 2, 1000,
+		244141, 0, NULL, fg_decode_voltage_15b),
+	PARAM(OCV, OCV_WORD, OCV_OFFSET, 2, 1000, 244141, 0, NULL,
+		fg_decode_voltage_15b),
+	PARAM(ESR, ESR_WORD, ESR_OFFSET, 2, 1000, 244141, 0, fg_encode_default,
+		fg_decode_value_16b),
+	PARAM(RSLOW, RSLOW_WORD, RSLOW_OFFSET, 2, 1000, 244141, 0, NULL,
+		fg_decode_value_16b),
+	PARAM(ALG_FLAGS, ALG_FLAGS_WORD, ALG_FLAGS_OFFSET, 1, 1, 1, 0, NULL,
+		fg_decode_default),
+	PARAM(CC_SOC, CC_SOC_WORD, CC_SOC_OFFSET, 4, 1, 1, 0, NULL,
+		fg_decode_cc_soc),
+	PARAM(CC_SOC_SW, CC_SOC_SW_WORD, CC_SOC_SW_OFFSET, 4, 1, 1, 0, NULL,
+		fg_decode_cc_soc),
+	PARAM(ACT_BATT_CAP, ACT_BATT_CAP_BKUP_WORD, ACT_BATT_CAP_BKUP_OFFSET, 2,
+		1, 1, 0, NULL, fg_decode_default),
+	/* Entries below here are configurable during initialization */
+	PARAM(CUTOFF_VOLT, CUTOFF_VOLT_WORD, CUTOFF_VOLT_OFFSET, 2, 1000000,
+		244141, 0, fg_encode_voltage, NULL),
+	PARAM(EMPTY_VOLT, EMPTY_VOLT_WORD, EMPTY_VOLT_OFFSET, 1, 100000, 390625,
+		-2500, fg_encode_voltage, NULL),
+	PARAM(VBATT_LOW, VBATT_LOW_WORD, VBATT_LOW_OFFSET, 1, 100000, 390625,
+		-2500, fg_encode_voltage, NULL),
+	PARAM(VBATT_FULL, VBATT_FULL_WORD, VBATT_FULL_OFFSET, 2, 1000,
+		244141, 0, fg_encode_voltage, fg_decode_voltage_15b),
+	PARAM(SYS_TERM_CURR, SYS_TERM_CURR_WORD, SYS_TERM_CURR_OFFSET, 3,
+		1000000, 122070, 0, fg_encode_current, NULL),
+	PARAM(CHG_TERM_CURR, CHG_TERM_CURR_WORD, CHG_TERM_CURR_OFFSET, 1,
+		100000, 390625, 0, fg_encode_current, NULL),
+	PARAM(CUTOFF_CURR, CUTOFF_CURR_WORD, CUTOFF_CURR_OFFSET, 3,
+		1000000, 122070, 0, fg_encode_current, NULL),
+	PARAM(DELTA_MSOC_THR, DELTA_MSOC_THR_WORD, DELTA_MSOC_THR_OFFSET, 1,
+		2048, 100, 0, fg_encode_default, NULL),
+	PARAM(DELTA_BSOC_THR, DELTA_BSOC_THR_WORD, DELTA_BSOC_THR_OFFSET, 1,
+		2048, 100, 0, fg_encode_default, NULL),
+	PARAM(RECHARGE_SOC_THR, RECHARGE_SOC_THR_WORD, RECHARGE_SOC_THR_OFFSET,
+		1, 256, 100, 0, fg_encode_default, NULL),
+	PARAM(ESR_TIMER_DISCHG_MAX, ESR_TIMER_DISCHG_MAX_WORD,
+		ESR_TIMER_DISCHG_MAX_OFFSET, 2, 1, 1, 0, fg_encode_default,
+		NULL),
+	PARAM(ESR_TIMER_DISCHG_INIT, ESR_TIMER_DISCHG_INIT_WORD,
+		ESR_TIMER_DISCHG_INIT_OFFSET, 2, 1, 1, 0, fg_encode_default,
+		NULL),
+	PARAM(ESR_TIMER_CHG_MAX, ESR_TIMER_CHG_MAX_WORD,
+		ESR_TIMER_CHG_MAX_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
+	PARAM(ESR_TIMER_CHG_INIT, ESR_TIMER_CHG_INIT_WORD,
+		ESR_TIMER_CHG_INIT_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
+	PARAM(ESR_PULSE_THRESH, ESR_PULSE_THRESH_WORD, ESR_PULSE_THRESH_OFFSET,
+		1, 100000, 390625, 0, fg_encode_default, NULL),
+	PARAM(KI_COEFF_MED_DISCHG, KI_COEFF_MED_DISCHG_WORD,
+		KI_COEFF_MED_DISCHG_OFFSET, 1, 1000, 244141, 0,
+		fg_encode_default, NULL),
+	PARAM(KI_COEFF_HI_DISCHG, KI_COEFF_HI_DISCHG_WORD,
+		KI_COEFF_HI_DISCHG_OFFSET, 1, 1000, 244141, 0,
+		fg_encode_default, NULL),
+	PARAM(KI_COEFF_FULL_SOC, KI_COEFF_FULL_SOC_WORD,
+		KI_COEFF_FULL_SOC_OFFSET, 1, 1000, 244141, 0,
+		fg_encode_default, NULL),
+	PARAM(ESR_TIGHT_FILTER, ESR_FILTER_WORD, ESR_UPD_TIGHT_OFFSET,
+		1, 512, 1000000, 0, fg_encode_default, NULL),
+	PARAM(ESR_BROAD_FILTER, ESR_FILTER_WORD, ESR_UPD_BROAD_OFFSET,
+		1, 512, 1000000, 0, fg_encode_default, NULL),
+	PARAM(SLOPE_LIMIT, SLOPE_LIMIT_WORD, SLOPE_LIMIT_OFFSET, 1, 8192, 1000,
+		0, fg_encode_default, NULL),
+};
+
+static struct fg_sram_param pmi8998_v2_sram_params[] = {
+	PARAM(BATT_SOC, BATT_SOC_WORD, BATT_SOC_OFFSET, 4, 1, 1, 0, NULL,
+		fg_decode_default),
+	PARAM(FULL_SOC, FULL_SOC_WORD, FULL_SOC_OFFSET, 2, 1, 1, 0, NULL,
+		fg_decode_default),
+	PARAM(VOLTAGE_PRED, VOLTAGE_PRED_WORD, VOLTAGE_PRED_OFFSET, 2, 1000,
+		244141, 0, NULL, fg_decode_voltage_15b),
+	PARAM(OCV, OCV_WORD, OCV_OFFSET, 2, 1000, 244141, 0, NULL,
+		fg_decode_voltage_15b),
+	PARAM(ESR, ESR_WORD, ESR_OFFSET, 2, 1000, 244141, 0, fg_encode_default,
+		fg_decode_value_16b),
+	PARAM(RSLOW, RSLOW_WORD, RSLOW_OFFSET, 2, 1000, 244141, 0, NULL,
+		fg_decode_value_16b),
+	PARAM(ALG_FLAGS, ALG_FLAGS_WORD, ALG_FLAGS_OFFSET, 1, 1, 1, 0, NULL,
+		fg_decode_default),
+	PARAM(CC_SOC, CC_SOC_WORD, CC_SOC_OFFSET, 4, 1, 1, 0, NULL,
+		fg_decode_cc_soc),
+	PARAM(CC_SOC_SW, CC_SOC_SW_WORD, CC_SOC_SW_OFFSET, 4, 1, 1, 0, NULL,
+		fg_decode_cc_soc),
+	PARAM(ACT_BATT_CAP, ACT_BATT_CAP_BKUP_WORD, ACT_BATT_CAP_BKUP_OFFSET, 2,
+		1, 1, 0, NULL, fg_decode_default),
+	PARAM(TIMEBASE, KI_COEFF_MED_DISCHG_WORD, TIMEBASE_OFFSET, 2, 1000,
+		61000, 0, fg_encode_default, NULL),
+	/* Entries below here are configurable during initialization */
+	PARAM(CUTOFF_VOLT, CUTOFF_VOLT_WORD, CUTOFF_VOLT_OFFSET, 2, 1000000,
+		244141, 0, fg_encode_voltage, NULL),
+	PARAM(EMPTY_VOLT, EMPTY_VOLT_v2_WORD, EMPTY_VOLT_v2_OFFSET, 1, 1000,
+		15625, -2000, fg_encode_voltage, NULL),
+	PARAM(VBATT_LOW, VBATT_LOW_v2_WORD, VBATT_LOW_v2_OFFSET, 1, 1000,
+		15625, -2000, fg_encode_voltage, NULL),
+	PARAM(FLOAT_VOLT, FLOAT_VOLT_v2_WORD, FLOAT_VOLT_v2_OFFSET, 1, 1000,
+		15625, -2000, fg_encode_voltage, NULL),
+	PARAM(VBATT_FULL, VBATT_FULL_WORD, VBATT_FULL_OFFSET, 2, 1000,
+		244141, 0, fg_encode_voltage, fg_decode_voltage_15b),
+	PARAM(SYS_TERM_CURR, SYS_TERM_CURR_WORD, SYS_TERM_CURR_OFFSET, 3,
+		1000000, 122070, 0, fg_encode_current, NULL),
+	PARAM(CHG_TERM_CURR, CHG_TERM_CURR_v2_WORD, CHG_TERM_CURR_v2_OFFSET, 1,
+		100000, 390625, 0, fg_encode_current, NULL),
+	PARAM(CHG_TERM_BASE_CURR, CHG_TERM_CURR_v2_WORD,
+		CHG_TERM_BASE_CURR_v2_OFFSET, 1, 1024, 1000, 0,
+		fg_encode_current, NULL),
+	PARAM(CUTOFF_CURR, CUTOFF_CURR_WORD, CUTOFF_CURR_OFFSET, 3,
+		1000000, 122070, 0, fg_encode_current, NULL),
+	PARAM(DELTA_MSOC_THR, DELTA_MSOC_THR_v2_WORD, DELTA_MSOC_THR_v2_OFFSET,
+		1, 2048, 100, 0, fg_encode_default, NULL),
+	PARAM(DELTA_BSOC_THR, DELTA_BSOC_THR_v2_WORD, DELTA_BSOC_THR_v2_OFFSET,
+		1, 2048, 100, 0, fg_encode_default, NULL),
+	PARAM(RECHARGE_SOC_THR, RECHARGE_SOC_THR_v2_WORD,
+		RECHARGE_SOC_THR_v2_OFFSET, 1, 256, 100, 0, fg_encode_default,
+		NULL),
+	PARAM(RECHARGE_VBATT_THR, RECHARGE_VBATT_THR_v2_WORD,
+		RECHARGE_VBATT_THR_v2_OFFSET, 1, 1000, 15625, -2000,
+		fg_encode_voltage, NULL),
+	PARAM(ESR_TIMER_DISCHG_MAX, ESR_TIMER_DISCHG_MAX_WORD,
+		ESR_TIMER_DISCHG_MAX_OFFSET, 2, 1, 1, 0, fg_encode_default,
+		NULL),
+	PARAM(ESR_TIMER_DISCHG_INIT, ESR_TIMER_DISCHG_INIT_WORD,
+		ESR_TIMER_DISCHG_INIT_OFFSET, 2, 1, 1, 0, fg_encode_default,
+		NULL),
+	PARAM(ESR_TIMER_CHG_MAX, ESR_TIMER_CHG_MAX_WORD,
+		ESR_TIMER_CHG_MAX_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
+	PARAM(ESR_TIMER_CHG_INIT, ESR_TIMER_CHG_INIT_WORD,
+		ESR_TIMER_CHG_INIT_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
+	PARAM(ESR_PULSE_THRESH, ESR_PULSE_THRESH_WORD, ESR_PULSE_THRESH_OFFSET,
+		1, 100000, 390625, 0, fg_encode_default, NULL),
+	PARAM(KI_COEFF_MED_DISCHG, KI_COEFF_MED_DISCHG_v2_WORD,
+		KI_COEFF_MED_DISCHG_v2_OFFSET, 1, 1000, 244141, 0,
+		fg_encode_default, NULL),
+	PARAM(KI_COEFF_HI_DISCHG, KI_COEFF_HI_DISCHG_v2_WORD,
+		KI_COEFF_HI_DISCHG_v2_OFFSET, 1, 1000, 244141, 0,
+		fg_encode_default, NULL),
+	PARAM(KI_COEFF_FULL_SOC, KI_COEFF_FULL_SOC_WORD,
+		KI_COEFF_FULL_SOC_OFFSET, 1, 1000, 244141, 0,
+		fg_encode_default, NULL),
+	PARAM(ESR_TIGHT_FILTER, ESR_FILTER_WORD, ESR_UPD_TIGHT_OFFSET,
+		1, 512, 1000000, 0, fg_encode_default, NULL),
+	PARAM(ESR_BROAD_FILTER, ESR_FILTER_WORD, ESR_UPD_BROAD_OFFSET,
+		1, 512, 1000000, 0, fg_encode_default, NULL),
+	PARAM(SLOPE_LIMIT, SLOPE_LIMIT_WORD, SLOPE_LIMIT_OFFSET, 1, 8192, 1000,
+		0, fg_encode_default, NULL),
+};
+
+static struct fg_alg_flag pmi8998_v1_alg_flags[] = {
+	[ALG_FLAG_SOC_LT_OTG_MIN]	= {
+		.name	= "SOC_LT_OTG_MIN",
+		.bit	= BIT(0),
+	},
+	[ALG_FLAG_SOC_LT_RECHARGE]	= {
+		.name	= "SOC_LT_RECHARGE",
+		.bit	= BIT(1),
+	},
+	[ALG_FLAG_IBATT_LT_ITERM]	= {
+		.name	= "IBATT_LT_ITERM",
+		.bit	= BIT(2),
+	},
+	[ALG_FLAG_IBATT_GT_HPM]		= {
+		.name	= "IBATT_GT_HPM",
+		.bit	= BIT(3),
+	},
+	[ALG_FLAG_IBATT_GT_UPM]		= {
+		.name	= "IBATT_GT_UPM",
+		.bit	= BIT(4),
+	},
+	[ALG_FLAG_VBATT_LT_RECHARGE]	= {
+		.name	= "VBATT_LT_RECHARGE",
+		.bit	= BIT(5),
+	},
+	[ALG_FLAG_VBATT_GT_VFLOAT]	= {
+		.invalid = true,
+	},
+};
+
+static struct fg_alg_flag pmi8998_v2_alg_flags[] = {
+	[ALG_FLAG_SOC_LT_OTG_MIN]	= {
+		.name	= "SOC_LT_OTG_MIN",
+		.bit	= BIT(0),
+	},
+	[ALG_FLAG_SOC_LT_RECHARGE]	= {
+		.name	= "SOC_LT_RECHARGE",
+		.bit	= BIT(1),
+	},
+	[ALG_FLAG_IBATT_LT_ITERM]	= {
+		.name	= "IBATT_LT_ITERM",
+		.bit	= BIT(2),
+	},
+	[ALG_FLAG_IBATT_GT_HPM]		= {
+		.name	= "IBATT_GT_HPM",
+		.bit	= BIT(4),
+	},
+	[ALG_FLAG_IBATT_GT_UPM]		= {
+		.name	= "IBATT_GT_UPM",
+		.bit	= BIT(5),
+	},
+	[ALG_FLAG_VBATT_LT_RECHARGE]	= {
+		.name	= "VBATT_LT_RECHARGE",
+		.bit	= BIT(6),
+	},
+	[ALG_FLAG_VBATT_GT_VFLOAT]	= {
+		.name	= "VBATT_GT_VFLOAT",
+		.bit	= BIT(7),
+	},
+};
+
+static int fg_gen3_debug_mask;
+module_param_named(
+	debug_mask, fg_gen3_debug_mask, int, S_IRUSR | S_IWUSR
+);
+
+static bool fg_profile_dump;
+module_param_named(
+	profile_dump, fg_profile_dump, bool, S_IRUSR | S_IWUSR
+);
+
+static int fg_sram_dump_period_ms = 20000;
+module_param_named(
+	sram_dump_period_ms, fg_sram_dump_period_ms, int, S_IRUSR | S_IWUSR
+);
+
+static int fg_restart;
+static bool fg_sram_dump;
+
+/* All getters HERE */
+
+#define VOLTAGE_15BIT_MASK	GENMASK(14, 0)
+static int fg_decode_voltage_15b(struct fg_sram_param *sp,
+				enum fg_sram_param_id id, int value)
+{
+	value &= VOLTAGE_15BIT_MASK;
+	sp[id].value = div_u64((u64)value * sp[id].denmtr, sp[id].numrtr);
+	pr_debug("id: %d raw value: %x decoded value: %x\n", id, value,
+		sp[id].value);
+	return sp[id].value;
+}
+
+static int fg_decode_cc_soc(struct fg_sram_param *sp,
+				enum fg_sram_param_id id, int value)
+{
+	sp[id].value = div_s64((s64)value * sp[id].denmtr, sp[id].numrtr);
+	sp[id].value = sign_extend32(sp[id].value, 31);
+	pr_debug("id: %d raw value: %x decoded value: %x\n", id, value,
+		sp[id].value);
+	return sp[id].value;
+}
+
+static int fg_decode_value_16b(struct fg_sram_param *sp,
+				enum fg_sram_param_id id, int value)
+{
+	sp[id].value = div_u64((u64)(u16)value * sp[id].denmtr, sp[id].numrtr);
+	pr_debug("id: %d raw value: %x decoded value: %x\n", id, value,
+		sp[id].value);
+	return sp[id].value;
+}
+
+static int fg_decode_default(struct fg_sram_param *sp, enum fg_sram_param_id id,
+				int value)
+{
+	sp[id].value = value;
+	return sp[id].value;
+}
+
+static int fg_decode(struct fg_sram_param *sp, enum fg_sram_param_id id,
+			int value)
+{
+	if (!sp[id].decode) {
+		pr_err("No decoding function for parameter %d\n", id);
+		return -EINVAL;
+	}
+
+	return sp[id].decode(sp, id, value);
+}
+
+static void fg_encode_voltage(struct fg_sram_param *sp,
+				enum fg_sram_param_id  id, int val_mv, u8 *buf)
+{
+	int i, mask = 0xff;
+	int64_t temp;
+
+	val_mv += sp[id].offset;
+	temp = (int64_t)div_u64((u64)val_mv * sp[id].numrtr, sp[id].denmtr);
+	pr_debug("temp: %llx id: %d, val_mv: %d, buf: [ ", temp, id, val_mv);
+	for (i = 0; i < sp[id].len; i++) {
+		buf[i] = temp & mask;
+		temp >>= 8;
+		pr_debug("%x ", buf[i]);
+	}
+	pr_debug("]\n");
+}
+
+static void fg_encode_current(struct fg_sram_param *sp,
+				enum fg_sram_param_id  id, int val_ma, u8 *buf)
+{
+	int i, mask = 0xff;
+	int64_t temp;
+	s64 current_ma;
+
+	current_ma = val_ma;
+	temp = (int64_t)div_s64(current_ma * sp[id].numrtr, sp[id].denmtr);
+	pr_debug("temp: %llx id: %d, val: %d, buf: [ ", temp, id, val_ma);
+	for (i = 0; i < sp[id].len; i++) {
+		buf[i] = temp & mask;
+		temp >>= 8;
+		pr_debug("%x ", buf[i]);
+	}
+	pr_debug("]\n");
+}
+
+static void fg_encode_default(struct fg_sram_param *sp,
+				enum fg_sram_param_id  id, int val, u8 *buf)
+{
+	int i, mask = 0xff;
+	int64_t temp;
+
+	temp = (int64_t)div_s64((s64)val * sp[id].numrtr, sp[id].denmtr);
+	pr_debug("temp: %llx id: %d, val: %d, buf: [ ", temp, id, val);
+	for (i = 0; i < sp[id].len; i++) {
+		buf[i] = temp & mask;
+		temp >>= 8;
+		pr_debug("%x ", buf[i]);
+	}
+	pr_debug("]\n");
+}
+
+static void fg_encode(struct fg_sram_param *sp, enum fg_sram_param_id id,
+			int val, u8 *buf)
+{
+	if (!sp[id].encode) {
+		pr_err("No encoding function for parameter %d\n", id);
+		return;
+	}
+
+	sp[id].encode(sp, id, val, buf);
+}
+
+/*
+ * Please make sure *_sram_params table has the entry for the parameter
+ * obtained through this function. In addition to address, offset,
+ * length from where this SRAM parameter is read, a decode function
+ * need to be specified.
+ */
+static int fg_get_sram_prop(struct fg_chip *chip, enum fg_sram_param_id id,
+				int *val)
+{
+	int temp, rc, i;
+	u8 buf[4];
+
+	if (id < 0 || id > FG_SRAM_MAX || chip->sp[id].len > sizeof(buf))
+		return -EINVAL;
+
+	if (chip->battery_missing)
+		return -ENODATA;
+
+	rc = fg_sram_read(chip, chip->sp[id].addr_word, chip->sp[id].addr_byte,
+		buf, chip->sp[id].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error reading address 0x%04x[%d] rc=%d\n",
+			chip->sp[id].addr_word, chip->sp[id].addr_byte, rc);
+		return rc;
+	}
+
+	for (i = 0, temp = 0; i < chip->sp[id].len; i++)
+		temp |= buf[i] << (8 * i);
+
+	*val = fg_decode(chip->sp, id, temp);
+	return 0;
+}
+
+#define CC_SOC_30BIT	GENMASK(29, 0)
+static int fg_get_charge_raw(struct fg_chip *chip, int *val)
+{
+	int rc, cc_soc;
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_CC_SOC, &cc_soc);
+	if (rc < 0) {
+		pr_err("Error in getting CC_SOC, rc=%d\n", rc);
+		return rc;
+	}
+
+	*val = div_s64(cc_soc * chip->cl.nom_cap_uah, CC_SOC_30BIT);
+	return 0;
+}
+
+#define BATT_SOC_32BIT	GENMASK(31, 0)
+static int fg_get_charge_counter_shadow(struct fg_chip *chip, int *val)
+{
+	int rc, batt_soc;
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &batt_soc);
+	if (rc < 0) {
+		pr_err("Error in getting BATT_SOC, rc=%d\n", rc);
+		return rc;
+	}
+
+	*val = div_u64((u32)batt_soc * chip->cl.learned_cc_uah, BATT_SOC_32BIT);
+	return 0;
+}
+
+static int fg_get_charge_counter(struct fg_chip *chip, int *val)
+{
+	int rc, cc_soc;
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_CC_SOC_SW, &cc_soc);
+	if (rc < 0) {
+		pr_err("Error in getting CC_SOC_SW, rc=%d\n", rc);
+		return rc;
+	}
+
+	*val = div_s64(cc_soc * chip->cl.learned_cc_uah, CC_SOC_30BIT);
+	return 0;
+}
+
+static int fg_get_jeita_threshold(struct fg_chip *chip,
+				enum jeita_levels level, int *temp_decidegC)
+{
+	int rc;
+	u8 val;
+	u16 reg;
+
+	switch (level) {
+	case JEITA_COLD:
+		reg = BATT_INFO_JEITA_TOO_COLD(chip);
+		break;
+	case JEITA_COOL:
+		reg = BATT_INFO_JEITA_COLD(chip);
+		break;
+	case JEITA_WARM:
+		reg = BATT_INFO_JEITA_HOT(chip);
+		break;
+	case JEITA_HOT:
+		reg = BATT_INFO_JEITA_TOO_HOT(chip);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	rc = fg_read(chip, reg, &val, 1);
+	if (rc < 0) {
+		pr_err("Error in reading jeita level %d, rc=%d\n", level, rc);
+		return rc;
+	}
+
+	/* Resolution is 0.5C. Base is -30C. */
+	*temp_decidegC = (((5 * val) / 10) - 30) * 10;
+	return 0;
+}
+
+#define BATT_TEMP_NUMR		1
+#define BATT_TEMP_DENR		1
+static int fg_get_battery_temp(struct fg_chip *chip, int *val)
+{
+	int rc = 0, temp;
+	u8 buf[2];
+
+	rc = fg_read(chip, BATT_INFO_BATT_TEMP_LSB(chip), buf, 2);
+	if (rc < 0) {
+		pr_err("failed to read addr=0x%04x, rc=%d\n",
+			BATT_INFO_BATT_TEMP_LSB(chip), rc);
+		return rc;
+	}
+
+	temp = ((buf[1] & BATT_TEMP_MSB_MASK) << 8) |
+		(buf[0] & BATT_TEMP_LSB_MASK);
+	temp = DIV_ROUND_CLOSEST(temp, 4);
+
+	/* Value is in Kelvin; Convert it to deciDegC */
+	temp = (temp - 273) * 10;
+	*val = temp;
+	return 0;
+}
+
+static int fg_get_battery_resistance(struct fg_chip *chip, int *val)
+{
+	int rc, esr_uohms, rslow_uohms;
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_ESR, &esr_uohms);
+	if (rc < 0) {
+		pr_err("failed to get ESR, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_RSLOW, &rslow_uohms);
+	if (rc < 0) {
+		pr_err("failed to get Rslow, rc=%d\n", rc);
+		return rc;
+	}
+
+	*val = esr_uohms + rslow_uohms;
+	return 0;
+}
+
+#define BATT_CURRENT_NUMR	488281
+#define BATT_CURRENT_DENR	1000
+static int fg_get_battery_current(struct fg_chip *chip, int *val)
+{
+	int rc = 0;
+	int64_t temp = 0;
+	u8 buf[2];
+
+	rc = fg_read(chip, BATT_INFO_IBATT_LSB(chip), buf, 2);
+	if (rc < 0) {
+		pr_err("failed to read addr=0x%04x, rc=%d\n",
+			BATT_INFO_IBATT_LSB(chip), rc);
+		return rc;
+	}
+
+	if (chip->wa_flags & PMI8998_V1_REV_WA)
+		temp = buf[0] << 8 | buf[1];
+	else
+		temp = buf[1] << 8 | buf[0];
+
+	pr_debug("buf: %x %x temp: %llx\n", buf[0], buf[1], temp);
+	/* Sign bit is bit 15 */
+	temp = twos_compliment_extend(temp, 15);
+	*val = div_s64((s64)temp * BATT_CURRENT_NUMR, BATT_CURRENT_DENR);
+	return 0;
+}
+
+#define BATT_VOLTAGE_NUMR	122070
+#define BATT_VOLTAGE_DENR	1000
+static int fg_get_battery_voltage(struct fg_chip *chip, int *val)
+{
+	int rc = 0;
+	u16 temp = 0;
+	u8 buf[2];
+
+	rc = fg_read(chip, BATT_INFO_VBATT_LSB(chip), buf, 2);
+	if (rc < 0) {
+		pr_err("failed to read addr=0x%04x, rc=%d\n",
+			BATT_INFO_VBATT_LSB(chip), rc);
+		return rc;
+	}
+
+	if (chip->wa_flags & PMI8998_V1_REV_WA)
+		temp = buf[0] << 8 | buf[1];
+	else
+		temp = buf[1] << 8 | buf[0];
+
+	pr_debug("buf: %x %x temp: %x\n", buf[0], buf[1], temp);
+	*val = div_u64((u64)temp * BATT_VOLTAGE_NUMR, BATT_VOLTAGE_DENR);
+	return 0;
+}
+
+#define MAX_TRIES_SOC		5
+static int fg_get_msoc_raw(struct fg_chip *chip, int *val)
+{
+	u8 cap[2];
+	int rc, tries = 0;
+
+	while (tries < MAX_TRIES_SOC) {
+		rc = fg_read(chip, BATT_SOC_FG_MONOTONIC_SOC(chip), cap, 2);
+		if (rc < 0) {
+			pr_err("failed to read addr=0x%04x, rc=%d\n",
+				BATT_SOC_FG_MONOTONIC_SOC(chip), rc);
+			return rc;
+		}
+
+		if (cap[0] == cap[1])
+			break;
+
+		tries++;
+	}
+
+	if (tries == MAX_TRIES_SOC) {
+		pr_err("shadow registers do not match\n");
+		return -EINVAL;
+	}
+
+	fg_dbg(chip, FG_POWER_SUPPLY, "raw: 0x%02x\n", cap[0]);
+	*val = cap[0];
+	return 0;
+}
+
+#define FULL_CAPACITY	100
+#define FULL_SOC_RAW	255
+static int fg_get_msoc(struct fg_chip *chip, int *msoc)
+{
+	int rc;
+
+	rc = fg_get_msoc_raw(chip, msoc);
+	if (rc < 0)
+		return rc;
+
+	/*
+	 * To have better endpoints for 0 and 100, it is good to tune the
+	 * calculation discarding values 0 and 255 while rounding off. Rest
+	 * of the values 1-254 will be scaled to 1-99. DIV_ROUND_UP will not
+	 * be suitable here as it rounds up any value higher than 252 to 100.
+	 */
+	if (*msoc == FULL_SOC_RAW)
+		*msoc = 100;
+	else if (*msoc == 0)
+		*msoc = 0;
+	else
+		*msoc = DIV_ROUND_CLOSEST((*msoc - 1) * (FULL_CAPACITY - 2),
+				FULL_SOC_RAW - 2) + 1;
+	return 0;
+}
+
+static bool is_batt_empty(struct fg_chip *chip)
+{
+	u8 status;
+	int rc, vbatt_uv, msoc;
+
+	rc = fg_read(chip, BATT_SOC_INT_RT_STS(chip), &status, 1);
+	if (rc < 0) {
+		pr_err("failed to read addr=0x%04x, rc=%d\n",
+			BATT_SOC_INT_RT_STS(chip), rc);
+		return false;
+	}
+
+	if (!(status & MSOC_EMPTY_BIT))
+		return false;
+
+	rc = fg_get_battery_voltage(chip, &vbatt_uv);
+	if (rc < 0) {
+		pr_err("failed to get battery voltage, rc=%d\n", rc);
+		return false;
+	}
+
+	rc = fg_get_msoc(chip, &msoc);
+	if (!rc)
+		pr_warn("batt_soc_rt_sts: %x vbatt: %d uV msoc:%d\n", status,
+			vbatt_uv, msoc);
+
+	return ((vbatt_uv < chip->dt.cutoff_volt_mv * 1000) ? true : false);
+}
+
+static int fg_get_debug_batt_id(struct fg_chip *chip, int *batt_id)
+{
+	int rc;
+	u64 temp;
+	u8 buf[2];
+
+	rc = fg_read(chip, ADC_RR_FAKE_BATT_LOW_LSB(chip), buf, 2);
+	if (rc < 0) {
+		pr_err("failed to read addr=0x%04x, rc=%d\n",
+			ADC_RR_FAKE_BATT_LOW_LSB(chip), rc);
+		return rc;
+	}
+
+	/*
+	 * Fake battery threshold is encoded in the following format.
+	 * Threshold (code) = (battery_id in Ohms) * 0.00015 * 2^10 / 2.5
+	 */
+	temp = (buf[1] << 8 | buf[0]) * 2500000;
+	do_div(temp, 150 * 1024);
+	batt_id[0] = temp;
+	rc = fg_read(chip, ADC_RR_FAKE_BATT_HIGH_LSB(chip), buf, 2);
+	if (rc < 0) {
+		pr_err("failed to read addr=0x%04x, rc=%d\n",
+			ADC_RR_FAKE_BATT_HIGH_LSB(chip), rc);
+		return rc;
+	}
+
+	temp = (buf[1] << 8 | buf[0]) * 2500000;
+	do_div(temp, 150 * 1024);
+	batt_id[1] = temp;
+	pr_debug("debug batt_id range: [%d %d]\n", batt_id[0], batt_id[1]);
+	return 0;
+}
+
+static bool is_debug_batt_id(struct fg_chip *chip)
+{
+	int debug_batt_id[2], rc;
+
+	if (!chip->batt_id_ohms)
+		return false;
+
+	rc = fg_get_debug_batt_id(chip, debug_batt_id);
+	if (rc < 0) {
+		pr_err("Failed to get debug batt_id, rc=%d\n", rc);
+		return false;
+	}
+
+	if (is_between(debug_batt_id[0], debug_batt_id[1],
+		chip->batt_id_ohms)) {
+		fg_dbg(chip, FG_POWER_SUPPLY, "Debug battery id: %dohms\n",
+			chip->batt_id_ohms);
+		return true;
+	}
+
+	return false;
+}
+
+#define DEBUG_BATT_SOC	67
+#define BATT_MISS_SOC	50
+#define EMPTY_SOC	0
+static int fg_get_prop_capacity(struct fg_chip *chip, int *val)
+{
+	int rc, msoc;
+
+	if (is_debug_batt_id(chip)) {
+		*val = DEBUG_BATT_SOC;
+		return 0;
+	}
+
+	if (chip->fg_restarting) {
+		*val = chip->last_soc;
+		return 0;
+	}
+
+	if (chip->battery_missing) {
+		*val = BATT_MISS_SOC;
+		return 0;
+	}
+
+	if (is_batt_empty(chip)) {
+		*val = EMPTY_SOC;
+		return 0;
+	}
+
+	if (chip->charge_full) {
+		*val = FULL_CAPACITY;
+		return 0;
+	}
+
+	rc = fg_get_msoc(chip, &msoc);
+	if (rc < 0)
+		return rc;
+
+	if (chip->dt.linearize_soc && chip->delta_soc > 0)
+		*val = chip->maint_soc;
+	else
+		*val = msoc;
+	return 0;
+}
+
+#define DEFAULT_BATT_TYPE	"Unknown Battery"
+#define MISSING_BATT_TYPE	"Missing Battery"
+#define LOADING_BATT_TYPE	"Loading Battery"
+static const char *fg_get_battery_type(struct fg_chip *chip)
+{
+	if (chip->battery_missing)
+		return MISSING_BATT_TYPE;
+
+	if (chip->bp.batt_type_str) {
+		if (chip->profile_loaded)
+			return chip->bp.batt_type_str;
+		else if (chip->profile_available)
+			return LOADING_BATT_TYPE;
+	}
+
+	return DEFAULT_BATT_TYPE;
+}
+
+static int fg_batt_missing_config(struct fg_chip *chip, bool enable)
+{
+	int rc;
+
+	rc = fg_masked_write(chip, BATT_INFO_BATT_MISS_CFG(chip),
+			BM_FROM_BATT_ID_BIT, enable ? BM_FROM_BATT_ID_BIT : 0);
+	if (rc < 0)
+		pr_err("Error in writing to %04x, rc=%d\n",
+			BATT_INFO_BATT_MISS_CFG(chip), rc);
+	return rc;
+}
+
+static int fg_get_batt_id(struct fg_chip *chip)
+{
+	int rc, ret, batt_id = 0;
+
+	if (!chip->batt_id_chan)
+		return -EINVAL;
+
+	rc = fg_batt_missing_config(chip, false);
+	if (rc < 0) {
+		pr_err("Error in disabling BMD, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = iio_read_channel_processed(chip->batt_id_chan, &batt_id);
+	if (rc < 0) {
+		pr_err("Error in reading batt_id channel, rc:%d\n", rc);
+		goto out;
+	}
+
+	/* Wait for 200ms before enabling BMD again */
+	msleep(200);
+
+	fg_dbg(chip, FG_STATUS, "batt_id: %d\n", batt_id);
+	chip->batt_id_ohms = batt_id;
+out:
+	ret = fg_batt_missing_config(chip, true);
+	if (ret < 0) {
+		pr_err("Error in enabling BMD, ret=%d\n", ret);
+		return ret;
+	}
+
+	vote(chip->batt_miss_irq_en_votable, BATT_MISS_IRQ_VOTER, true, 0);
+	return rc;
+}
+
+static int fg_get_batt_profile(struct fg_chip *chip)
+{
+	struct device_node *node = chip->dev->of_node;
+	struct device_node *batt_node, *profile_node;
+	const char *data;
+	int rc, len;
+
+	batt_node = of_find_node_by_name(node, "qcom,battery-data");
+	if (!batt_node) {
+		pr_err("Batterydata not available\n");
+		return -ENXIO;
+	}
+
+	profile_node = of_batterydata_get_best_profile(batt_node,
+				chip->batt_id_ohms / 1000, NULL);
+	if (IS_ERR(profile_node))
+		return PTR_ERR(profile_node);
+
+	if (!profile_node) {
+		pr_err("couldn't find profile handle\n");
+		return -ENODATA;
+	}
+
+	rc = of_property_read_string(profile_node, "qcom,battery-type",
+			&chip->bp.batt_type_str);
+	if (rc < 0) {
+		pr_err("battery type unavailable, rc:%d\n", rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(profile_node, "qcom,max-voltage-uv",
+			&chip->bp.float_volt_uv);
+	if (rc < 0) {
+		pr_err("battery float voltage unavailable, rc:%d\n", rc);
+		chip->bp.float_volt_uv = -EINVAL;
+	}
+
+	rc = of_property_read_u32(profile_node, "qcom,fastchg-current-ma",
+			&chip->bp.fastchg_curr_ma);
+	if (rc < 0) {
+		pr_err("battery fastchg current unavailable, rc:%d\n", rc);
+		chip->bp.fastchg_curr_ma = -EINVAL;
+	}
+
+	rc = of_property_read_u32(profile_node, "qcom,fg-cc-cv-threshold-mv",
+			&chip->bp.vbatt_full_mv);
+	if (rc < 0) {
+		pr_err("battery cc_cv threshold unavailable, rc:%d\n", rc);
+		chip->bp.vbatt_full_mv = -EINVAL;
+	}
+
+	data = of_get_property(profile_node, "qcom,fg-profile-data", &len);
+	if (!data) {
+		pr_err("No profile data available\n");
+		return -ENODATA;
+	}
+
+	if (len != PROFILE_LEN) {
+		pr_err("battery profile incorrect size: %d\n", len);
+		return -EINVAL;
+	}
+
+	chip->profile_available = true;
+	memcpy(chip->batt_profile, data, len);
+
+	return 0;
+}
+
+static inline void get_batt_temp_delta(int delta, u8 *val)
+{
+	switch (delta) {
+	case 2:
+		*val = BTEMP_DELTA_2K;
+		break;
+	case 4:
+		*val = BTEMP_DELTA_4K;
+		break;
+	case 6:
+		*val = BTEMP_DELTA_6K;
+		break;
+	case 10:
+		*val = BTEMP_DELTA_10K;
+		break;
+	default:
+		*val = BTEMP_DELTA_2K;
+		break;
+	};
+}
+
+static inline void get_esr_meas_current(int curr_ma, u8 *val)
+{
+	switch (curr_ma) {
+	case 60:
+		*val = ESR_MEAS_CUR_60MA;
+		break;
+	case 120:
+		*val = ESR_MEAS_CUR_120MA;
+		break;
+	case 180:
+		*val = ESR_MEAS_CUR_180MA;
+		break;
+	case 240:
+		*val = ESR_MEAS_CUR_240MA;
+		break;
+	default:
+		*val = ESR_MEAS_CUR_120MA;
+		break;
+	};
+
+	*val <<= ESR_PULL_DOWN_IVAL_SHIFT;
+}
+
+static int fg_set_esr_timer(struct fg_chip *chip, int cycles_init,
+				int cycles_max, bool charging, int flags)
+{
+	u8 buf[2];
+	int rc, timer_max, timer_init;
+
+	if (cycles_init < 0 || cycles_max < 0)
+		return 0;
+
+	if (charging) {
+		timer_max = FG_SRAM_ESR_TIMER_CHG_MAX;
+		timer_init = FG_SRAM_ESR_TIMER_CHG_INIT;
+	} else {
+		timer_max = FG_SRAM_ESR_TIMER_DISCHG_MAX;
+		timer_init = FG_SRAM_ESR_TIMER_DISCHG_INIT;
+	}
+
+	fg_encode(chip->sp, timer_max, cycles_max, buf);
+	rc = fg_sram_write(chip,
+			chip->sp[timer_max].addr_word,
+			chip->sp[timer_max].addr_byte, buf,
+			chip->sp[timer_max].len, flags);
+	if (rc < 0) {
+		pr_err("Error in writing esr_timer_dischg_max, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	fg_encode(chip->sp, timer_init, cycles_init, buf);
+	rc = fg_sram_write(chip,
+			chip->sp[timer_init].addr_word,
+			chip->sp[timer_init].addr_byte, buf,
+			chip->sp[timer_init].len, flags);
+	if (rc < 0) {
+		pr_err("Error in writing esr_timer_dischg_init, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	fg_dbg(chip, FG_STATUS, "esr_%s_timer set to %d/%d\n",
+		charging ? "charging" : "discharging", cycles_init, cycles_max);
+	return 0;
+}
+
+/* Other functions HERE */
+
+static void fg_notify_charger(struct fg_chip *chip)
+{
+	union power_supply_propval prop = {0, };
+	int rc;
+
+	if (!chip->batt_psy)
+		return;
+
+	if (!chip->profile_available)
+		return;
+
+	prop.intval = chip->bp.float_volt_uv;
+	rc = power_supply_set_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_VOLTAGE_MAX, &prop);
+	if (rc < 0) {
+		pr_err("Error in setting voltage_max property on batt_psy, rc=%d\n",
+			rc);
+		return;
+	}
+
+	prop.intval = chip->bp.fastchg_curr_ma * 1000;
+	rc = power_supply_set_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &prop);
+	if (rc < 0) {
+		pr_err("Error in setting constant_charge_current_max property on batt_psy, rc=%d\n",
+			rc);
+		return;
+	}
+
+	fg_dbg(chip, FG_STATUS, "Notified charger on float voltage and FCC\n");
+}
+
+static int fg_batt_miss_irq_en_cb(struct votable *votable, void *data,
+					int enable, const char *client)
+{
+	struct fg_chip *chip = data;
+
+	if (!chip->irqs[BATT_MISSING_IRQ].irq)
+		return 0;
+
+	if (enable) {
+		enable_irq(chip->irqs[BATT_MISSING_IRQ].irq);
+		enable_irq_wake(chip->irqs[BATT_MISSING_IRQ].irq);
+	} else {
+		disable_irq_wake(chip->irqs[BATT_MISSING_IRQ].irq);
+		disable_irq_nosync(chip->irqs[BATT_MISSING_IRQ].irq);
+	}
+
+	return 0;
+}
+
+static int fg_delta_bsoc_irq_en_cb(struct votable *votable, void *data,
+					int enable, const char *client)
+{
+	struct fg_chip *chip = data;
+
+	if (!chip->irqs[BSOC_DELTA_IRQ].irq)
+		return 0;
+
+	if (enable) {
+		enable_irq(chip->irqs[BSOC_DELTA_IRQ].irq);
+		enable_irq_wake(chip->irqs[BSOC_DELTA_IRQ].irq);
+	} else {
+		disable_irq_wake(chip->irqs[BSOC_DELTA_IRQ].irq);
+		disable_irq_nosync(chip->irqs[BSOC_DELTA_IRQ].irq);
+	}
+
+	return 0;
+}
+
+static int fg_awake_cb(struct votable *votable, void *data, int awake,
+			const char *client)
+{
+	struct fg_chip *chip = data;
+
+	if (awake)
+		pm_stay_awake(chip->dev);
+	else
+		pm_relax(chip->dev);
+
+	pr_debug("client: %s awake: %d\n", client, awake);
+	return 0;
+}
+
+static bool batt_psy_initialized(struct fg_chip *chip)
+{
+	if (chip->batt_psy)
+		return true;
+
+	chip->batt_psy = power_supply_get_by_name("battery");
+	if (!chip->batt_psy)
+		return false;
+
+	/* batt_psy is initialized, set the fcc and fv */
+	fg_notify_charger(chip);
+
+	return true;
+}
+
+static bool usb_psy_initialized(struct fg_chip *chip)
+{
+	if (chip->usb_psy)
+		return true;
+
+	chip->usb_psy = power_supply_get_by_name("usb");
+	if (!chip->usb_psy)
+		return false;
+
+	return true;
+}
+
+static bool pc_port_psy_initialized(struct fg_chip *chip)
+{
+	if (chip->pc_port_psy)
+		return true;
+
+	chip->pc_port_psy = power_supply_get_by_name("pc_port");
+	if (!chip->pc_port_psy)
+		return false;
+
+	return true;
+}
+
+static bool dc_psy_initialized(struct fg_chip *chip)
+{
+	if (chip->dc_psy)
+		return true;
+
+	chip->dc_psy = power_supply_get_by_name("dc");
+	if (!chip->dc_psy)
+		return false;
+
+	return true;
+}
+
+static bool is_parallel_charger_available(struct fg_chip *chip)
+{
+	if (!chip->parallel_psy)
+		chip->parallel_psy = power_supply_get_by_name("parallel");
+
+	if (!chip->parallel_psy)
+		return false;
+
+	return true;
+}
+
+static int fg_prime_cc_soc_sw(struct fg_chip *chip, int cc_soc_sw)
+{
+	int rc;
+
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_CC_SOC_SW].addr_word,
+		chip->sp[FG_SRAM_CC_SOC_SW].addr_byte, (u8 *)&cc_soc_sw,
+		chip->sp[FG_SRAM_CC_SOC_SW].len, FG_IMA_ATOMIC);
+	if (rc < 0)
+		pr_err("Error in writing cc_soc_sw, rc=%d\n", rc);
+	else
+		fg_dbg(chip, FG_STATUS, "cc_soc_sw: %x\n", cc_soc_sw);
+
+	return rc;
+}
+
+static int fg_save_learned_cap_to_sram(struct fg_chip *chip)
+{
+	int16_t cc_mah;
+	int rc;
+
+	if (chip->battery_missing || !chip->cl.learned_cc_uah)
+		return -EPERM;
+
+	cc_mah = div64_s64(chip->cl.learned_cc_uah, 1000);
+	/* Write to a backup register to use across reboot */
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_ACT_BATT_CAP].addr_word,
+			chip->sp[FG_SRAM_ACT_BATT_CAP].addr_byte, (u8 *)&cc_mah,
+			chip->sp[FG_SRAM_ACT_BATT_CAP].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing act_batt_cap_bkup, rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Write to actual capacity register for coulomb counter operation */
+	rc = fg_sram_write(chip, ACT_BATT_CAP_WORD, ACT_BATT_CAP_OFFSET,
+			(u8 *)&cc_mah, chip->sp[FG_SRAM_ACT_BATT_CAP].len,
+			FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing act_batt_cap, rc=%d\n", rc);
+		return rc;
+	}
+
+	fg_dbg(chip, FG_CAP_LEARN, "learned capacity %llduah/%dmah stored\n",
+		chip->cl.learned_cc_uah, cc_mah);
+	return 0;
+}
+
+#define CAPACITY_DELTA_DECIPCT	500
+static int fg_load_learned_cap_from_sram(struct fg_chip *chip)
+{
+	int rc, act_cap_mah;
+	int64_t delta_cc_uah, pct_nom_cap_uah;
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_ACT_BATT_CAP, &act_cap_mah);
+	if (rc < 0) {
+		pr_err("Error in getting ACT_BATT_CAP, rc=%d\n", rc);
+		return rc;
+	}
+
+	chip->cl.learned_cc_uah = act_cap_mah * 1000;
+
+	if (chip->cl.learned_cc_uah != chip->cl.nom_cap_uah) {
+		if (chip->cl.learned_cc_uah == 0)
+			chip->cl.learned_cc_uah = chip->cl.nom_cap_uah;
+
+		delta_cc_uah = abs(chip->cl.learned_cc_uah -
+					chip->cl.nom_cap_uah);
+		pct_nom_cap_uah = div64_s64((int64_t)chip->cl.nom_cap_uah *
+				CAPACITY_DELTA_DECIPCT, 1000);
+		/*
+		 * If the learned capacity is out of range by 50% from the
+		 * nominal capacity, then overwrite the learned capacity with
+		 * the nominal capacity.
+		 */
+		if (chip->cl.nom_cap_uah && delta_cc_uah > pct_nom_cap_uah) {
+			fg_dbg(chip, FG_CAP_LEARN, "learned_cc_uah: %lld is higher than expected, capping it to nominal: %lld\n",
+				chip->cl.learned_cc_uah, chip->cl.nom_cap_uah);
+			chip->cl.learned_cc_uah = chip->cl.nom_cap_uah;
+		}
+
+		rc = fg_save_learned_cap_to_sram(chip);
+		if (rc < 0)
+			pr_err("Error in saving learned_cc_uah, rc=%d\n", rc);
+	}
+
+	fg_dbg(chip, FG_CAP_LEARN, "learned_cc_uah:%lld nom_cap_uah: %lld\n",
+		chip->cl.learned_cc_uah, chip->cl.nom_cap_uah);
+	return 0;
+}
+
+static bool is_temp_valid_cap_learning(struct fg_chip *chip)
+{
+	int rc, batt_temp;
+
+	rc = fg_get_battery_temp(chip, &batt_temp);
+	if (rc < 0) {
+		pr_err("Error in getting batt_temp\n");
+		return false;
+	}
+
+	if (batt_temp > chip->dt.cl_max_temp ||
+		batt_temp < chip->dt.cl_min_temp) {
+		fg_dbg(chip, FG_CAP_LEARN, "batt temp %d out of range [%d %d]\n",
+			batt_temp, chip->dt.cl_min_temp, chip->dt.cl_max_temp);
+		return false;
+	}
+
+	return true;
+}
+
+#define QNOVO_CL_SKEW_DECIPCT	-30
+static void fg_cap_learning_post_process(struct fg_chip *chip)
+{
+	int64_t max_inc_val, min_dec_val, old_cap;
+	int rc;
+
+	if (is_qnovo_en(chip)) {
+		fg_dbg(chip, FG_CAP_LEARN, "applying skew %d on current learnt capacity %lld\n",
+			QNOVO_CL_SKEW_DECIPCT, chip->cl.final_cc_uah);
+		chip->cl.final_cc_uah = chip->cl.final_cc_uah *
+						(1000 + QNOVO_CL_SKEW_DECIPCT);
+		do_div(chip->cl.final_cc_uah, 1000);
+	}
+
+	max_inc_val = chip->cl.learned_cc_uah
+			* (1000 + chip->dt.cl_max_cap_inc);
+	do_div(max_inc_val, 1000);
+
+	min_dec_val = chip->cl.learned_cc_uah
+			* (1000 - chip->dt.cl_max_cap_dec);
+	do_div(min_dec_val, 1000);
+
+	old_cap = chip->cl.learned_cc_uah;
+	if (chip->cl.final_cc_uah > max_inc_val)
+		chip->cl.learned_cc_uah = max_inc_val;
+	else if (chip->cl.final_cc_uah < min_dec_val)
+		chip->cl.learned_cc_uah = min_dec_val;
+	else
+		chip->cl.learned_cc_uah =
+			chip->cl.final_cc_uah;
+
+	if (chip->dt.cl_max_cap_limit) {
+		max_inc_val = (int64_t)chip->cl.nom_cap_uah * (1000 +
+				chip->dt.cl_max_cap_limit);
+		do_div(max_inc_val, 1000);
+		if (chip->cl.final_cc_uah > max_inc_val) {
+			fg_dbg(chip, FG_CAP_LEARN, "learning capacity %lld goes above max limit %lld\n",
+				chip->cl.final_cc_uah, max_inc_val);
+			chip->cl.learned_cc_uah = max_inc_val;
+		}
+	}
+
+	if (chip->dt.cl_min_cap_limit) {
+		min_dec_val = (int64_t)chip->cl.nom_cap_uah * (1000 -
+				chip->dt.cl_min_cap_limit);
+		do_div(min_dec_val, 1000);
+		if (chip->cl.final_cc_uah < min_dec_val) {
+			fg_dbg(chip, FG_CAP_LEARN, "learning capacity %lld goes below min limit %lld\n",
+				chip->cl.final_cc_uah, min_dec_val);
+			chip->cl.learned_cc_uah = min_dec_val;
+		}
+	}
+
+	rc = fg_save_learned_cap_to_sram(chip);
+	if (rc < 0)
+		pr_err("Error in saving learned_cc_uah, rc=%d\n", rc);
+
+	fg_dbg(chip, FG_CAP_LEARN, "final cc_uah = %lld, learned capacity %lld -> %lld uah\n",
+		chip->cl.final_cc_uah, old_cap, chip->cl.learned_cc_uah);
+}
+
+static int fg_cap_learning_process_full_data(struct fg_chip *chip)
+{
+	int rc, cc_soc_sw, cc_soc_delta_pct;
+	int64_t delta_cc_uah;
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_CC_SOC_SW, &cc_soc_sw);
+	if (rc < 0) {
+		pr_err("Error in getting CC_SOC_SW, rc=%d\n", rc);
+		return rc;
+	}
+
+	cc_soc_delta_pct =
+		div64_s64((int64_t)(cc_soc_sw - chip->cl.init_cc_soc_sw) * 100,
+			CC_SOC_30BIT);
+
+	/* If the delta is < 50%, then skip processing full data */
+	if (cc_soc_delta_pct < 50) {
+		pr_err("cc_soc_delta_pct: %d\n", cc_soc_delta_pct);
+		return -ERANGE;
+	}
+
+	delta_cc_uah = div64_s64(chip->cl.learned_cc_uah * cc_soc_delta_pct,
+				100);
+	chip->cl.final_cc_uah = chip->cl.init_cc_uah + delta_cc_uah;
+	fg_dbg(chip, FG_CAP_LEARN, "Current cc_soc=%d cc_soc_delta_pct=%d total_cc_uah=%lld\n",
+		cc_soc_sw, cc_soc_delta_pct, chip->cl.final_cc_uah);
+	return 0;
+}
+
+static int fg_cap_learning_begin(struct fg_chip *chip, u32 batt_soc)
+{
+	int rc, cc_soc_sw, batt_soc_msb;
+
+	batt_soc_msb = batt_soc >> 24;
+	if (DIV_ROUND_CLOSEST(batt_soc_msb * 100, FULL_SOC_RAW) >
+		chip->dt.cl_start_soc) {
+		fg_dbg(chip, FG_CAP_LEARN, "Battery SOC %d is high!, not starting\n",
+			batt_soc_msb);
+		return -EINVAL;
+	}
+
+	chip->cl.init_cc_uah = div64_s64(chip->cl.learned_cc_uah * batt_soc_msb,
+					FULL_SOC_RAW);
+
+	/* Prime cc_soc_sw with battery SOC when capacity learning begins */
+	cc_soc_sw = div64_s64((int64_t)batt_soc * CC_SOC_30BIT,
+				BATT_SOC_32BIT);
+	rc = fg_prime_cc_soc_sw(chip, cc_soc_sw);
+	if (rc < 0) {
+		pr_err("Error in writing cc_soc_sw, rc=%d\n", rc);
+		goto out;
+	}
+
+	chip->cl.init_cc_soc_sw = cc_soc_sw;
+	fg_dbg(chip, FG_CAP_LEARN, "Capacity learning started @ battery SOC %d init_cc_soc_sw:%d\n",
+		batt_soc_msb, chip->cl.init_cc_soc_sw);
+out:
+	return rc;
+}
+
+static int fg_cap_learning_done(struct fg_chip *chip)
+{
+	int rc, cc_soc_sw;
+
+	rc = fg_cap_learning_process_full_data(chip);
+	if (rc < 0) {
+		pr_err("Error in processing cap learning full data, rc=%d\n",
+			rc);
+		goto out;
+	}
+
+	/* Write a FULL value to cc_soc_sw */
+	cc_soc_sw = CC_SOC_30BIT;
+	rc = fg_prime_cc_soc_sw(chip, cc_soc_sw);
+	if (rc < 0) {
+		pr_err("Error in writing cc_soc_sw, rc=%d\n", rc);
+		goto out;
+	}
+
+	fg_cap_learning_post_process(chip);
+out:
+	return rc;
+}
+
+static void fg_cap_learning_update(struct fg_chip *chip)
+{
+	int rc, batt_soc, batt_soc_msb, cc_soc_sw;
+	bool input_present = is_input_present(chip);
+	bool prime_cc = false;
+
+	mutex_lock(&chip->cl.lock);
+
+	if (!is_temp_valid_cap_learning(chip) || !chip->cl.learned_cc_uah ||
+		chip->battery_missing) {
+		fg_dbg(chip, FG_CAP_LEARN, "Aborting cap_learning %lld\n",
+			chip->cl.learned_cc_uah);
+		chip->cl.active = false;
+		chip->cl.init_cc_uah = 0;
+		goto out;
+	}
+
+	if (chip->charge_status == chip->prev_charge_status)
+		goto out;
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &batt_soc);
+	if (rc < 0) {
+		pr_err("Error in getting ACT_BATT_CAP, rc=%d\n", rc);
+		goto out;
+	}
+
+	batt_soc_msb = (u32)batt_soc >> 24;
+	fg_dbg(chip, FG_CAP_LEARN, "Chg_status: %d cl_active: %d batt_soc: %d\n",
+		chip->charge_status, chip->cl.active, batt_soc_msb);
+
+	/* Initialize the starting point of learning capacity */
+	if (!chip->cl.active) {
+		if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING) {
+			rc = fg_cap_learning_begin(chip, batt_soc);
+			chip->cl.active = (rc == 0);
+		} else {
+			if ((chip->charge_status ==
+					POWER_SUPPLY_STATUS_DISCHARGING) ||
+					chip->charge_done)
+				prime_cc = true;
+		}
+	} else {
+		if (chip->charge_done) {
+			rc = fg_cap_learning_done(chip);
+			if (rc < 0)
+				pr_err("Error in completing capacity learning, rc=%d\n",
+					rc);
+
+			chip->cl.active = false;
+			chip->cl.init_cc_uah = 0;
+		}
+
+		if (chip->charge_status == POWER_SUPPLY_STATUS_DISCHARGING) {
+			if (!input_present) {
+				fg_dbg(chip, FG_CAP_LEARN, "Capacity learning aborted @ battery SOC %d\n",
+					 batt_soc_msb);
+				chip->cl.active = false;
+				chip->cl.init_cc_uah = 0;
+				prime_cc = true;
+			}
+		}
+
+		if (chip->charge_status == POWER_SUPPLY_STATUS_NOT_CHARGING) {
+			if (is_qnovo_en(chip) && input_present) {
+				/*
+				 * Don't abort the capacity learning when qnovo
+				 * is enabled and input is present where the
+				 * charging status can go to "not charging"
+				 * intermittently.
+				 */
+			} else {
+				fg_dbg(chip, FG_CAP_LEARN, "Capacity learning aborted @ battery SOC %d\n",
+					batt_soc_msb);
+				chip->cl.active = false;
+				chip->cl.init_cc_uah = 0;
+				prime_cc = true;
+			}
+		}
+	}
+
+	/*
+	 * Prime CC_SOC_SW when the device is not charging or during charge
+	 * termination when the capacity learning is not active.
+	 */
+
+	if (prime_cc) {
+		if (chip->charge_done)
+			cc_soc_sw = CC_SOC_30BIT;
+		else
+			cc_soc_sw = div_u64((u32)batt_soc *
+					CC_SOC_30BIT, BATT_SOC_32BIT);
+
+		rc = fg_prime_cc_soc_sw(chip, cc_soc_sw);
+		if (rc < 0)
+			pr_err("Error in writing cc_soc_sw, rc=%d\n",
+				rc);
+	}
+
+out:
+	mutex_unlock(&chip->cl.lock);
+}
+
+#define KI_COEFF_MED_DISCHG_DEFAULT	1500
+#define KI_COEFF_HI_DISCHG_DEFAULT	2200
+static int fg_adjust_ki_coeff_dischg(struct fg_chip *chip)
+{
+	int rc, i, msoc;
+	int ki_coeff_med = KI_COEFF_MED_DISCHG_DEFAULT;
+	int ki_coeff_hi = KI_COEFF_HI_DISCHG_DEFAULT;
+	u8 val;
+
+	if (!chip->ki_coeff_dischg_en)
+		return 0;
+
+	rc = fg_get_prop_capacity(chip, &msoc);
+	if (rc < 0) {
+		pr_err("Error in getting capacity, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (chip->charge_status == POWER_SUPPLY_STATUS_DISCHARGING) {
+		for (i = KI_COEFF_SOC_LEVELS - 1; i >= 0; i--) {
+			if (msoc < chip->dt.ki_coeff_soc[i]) {
+				ki_coeff_med = chip->dt.ki_coeff_med_dischg[i];
+				ki_coeff_hi = chip->dt.ki_coeff_hi_dischg[i];
+			}
+		}
+	}
+
+	fg_encode(chip->sp, FG_SRAM_KI_COEFF_MED_DISCHG, ki_coeff_med, &val);
+	rc = fg_sram_write(chip,
+			chip->sp[FG_SRAM_KI_COEFF_MED_DISCHG].addr_word,
+			chip->sp[FG_SRAM_KI_COEFF_MED_DISCHG].addr_byte, &val,
+			chip->sp[FG_SRAM_KI_COEFF_MED_DISCHG].len,
+			FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing ki_coeff_med, rc=%d\n", rc);
+		return rc;
+	}
+
+	fg_encode(chip->sp, FG_SRAM_KI_COEFF_HI_DISCHG, ki_coeff_hi, &val);
+	rc = fg_sram_write(chip,
+			chip->sp[FG_SRAM_KI_COEFF_HI_DISCHG].addr_word,
+			chip->sp[FG_SRAM_KI_COEFF_HI_DISCHG].addr_byte, &val,
+			chip->sp[FG_SRAM_KI_COEFF_HI_DISCHG].len,
+			FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing ki_coeff_hi, rc=%d\n", rc);
+		return rc;
+	}
+
+	fg_dbg(chip, FG_STATUS, "Wrote ki_coeff_med %d ki_coeff_hi %d\n",
+		ki_coeff_med, ki_coeff_hi);
+	return 0;
+}
+
+#define KI_COEFF_FULL_SOC_DEFAULT	733
+static int fg_adjust_ki_coeff_full_soc(struct fg_chip *chip, int batt_temp)
+{
+	int rc, ki_coeff_full_soc;
+	u8 val;
+
+	if (batt_temp < 0)
+		ki_coeff_full_soc = 0;
+	else if (chip->charge_status == POWER_SUPPLY_STATUS_DISCHARGING)
+		ki_coeff_full_soc = chip->dt.ki_coeff_full_soc_dischg;
+	else
+		ki_coeff_full_soc = KI_COEFF_FULL_SOC_DEFAULT;
+
+	if (chip->ki_coeff_full_soc == ki_coeff_full_soc)
+		return 0;
+
+	fg_encode(chip->sp, FG_SRAM_KI_COEFF_FULL_SOC, ki_coeff_full_soc, &val);
+	rc = fg_sram_write(chip,
+			chip->sp[FG_SRAM_KI_COEFF_FULL_SOC].addr_word,
+			chip->sp[FG_SRAM_KI_COEFF_FULL_SOC].addr_byte, &val,
+			chip->sp[FG_SRAM_KI_COEFF_FULL_SOC].len,
+			FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing ki_coeff_full_soc, rc=%d\n", rc);
+		return rc;
+	}
+
+	chip->ki_coeff_full_soc = ki_coeff_full_soc;
+	fg_dbg(chip, FG_STATUS, "Wrote ki_coeff_full_soc %d\n",
+		ki_coeff_full_soc);
+	return 0;
+}
+
+static int fg_set_recharge_voltage(struct fg_chip *chip, int voltage_mv)
+{
+	u8 buf;
+	int rc;
+
+	if (chip->dt.auto_recharge_soc)
+		return 0;
+
+	/* This configuration is available only for pmicobalt v2.0 and above */
+	if (chip->wa_flags & PMI8998_V1_REV_WA)
+		return 0;
+
+	if (voltage_mv == chip->last_recharge_volt_mv)
+		return 0;
+
+	fg_dbg(chip, FG_STATUS, "Setting recharge voltage to %dmV\n",
+		voltage_mv);
+	fg_encode(chip->sp, FG_SRAM_RECHARGE_VBATT_THR, voltage_mv, &buf);
+	rc = fg_sram_write(chip,
+			chip->sp[FG_SRAM_RECHARGE_VBATT_THR].addr_word,
+			chip->sp[FG_SRAM_RECHARGE_VBATT_THR].addr_byte,
+			&buf, chip->sp[FG_SRAM_RECHARGE_VBATT_THR].len,
+			FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing recharge_vbatt_thr, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	chip->last_recharge_volt_mv = voltage_mv;
+	return 0;
+}
+
+static int fg_configure_full_soc(struct fg_chip *chip, int bsoc)
+{
+	int rc;
+	u8 full_soc[2] = {0xFF, 0xFF};
+
+	/*
+	 * Once SOC masking condition is cleared, FULL_SOC and MONOTONIC_SOC
+	 * needs to be updated to reflect the same. Write battery SOC to
+	 * FULL_SOC and write a full value to MONOTONIC_SOC.
+	 */
+	rc = fg_sram_write(chip, FULL_SOC_WORD, FULL_SOC_OFFSET,
+			(u8 *)&bsoc, 2, FG_IMA_ATOMIC);
+	if (rc < 0) {
+		pr_err("failed to write full_soc rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_sram_write(chip, MONOTONIC_SOC_WORD, MONOTONIC_SOC_OFFSET,
+			full_soc, 2, FG_IMA_ATOMIC);
+	if (rc < 0) {
+		pr_err("failed to write monotonic_soc rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+#define AUTO_RECHG_VOLT_LOW_LIMIT_MV	3700
+static int fg_charge_full_update(struct fg_chip *chip)
+{
+	union power_supply_propval prop = {0, };
+	int rc, msoc, bsoc, recharge_soc, msoc_raw;
+
+	if (!chip->dt.hold_soc_while_full)
+		return 0;
+
+	if (!batt_psy_initialized(chip))
+		return 0;
+
+	mutex_lock(&chip->charge_full_lock);
+	vote(chip->delta_bsoc_irq_en_votable, DELTA_BSOC_IRQ_VOTER,
+		chip->charge_done, 0);
+	rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_HEALTH,
+		&prop);
+	if (rc < 0) {
+		pr_err("Error in getting battery health, rc=%d\n", rc);
+		goto out;
+	}
+
+	chip->health = prop.intval;
+	recharge_soc = chip->dt.recharge_soc_thr;
+	recharge_soc = DIV_ROUND_CLOSEST(recharge_soc * FULL_SOC_RAW,
+				FULL_CAPACITY);
+	rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &bsoc);
+	if (rc < 0) {
+		pr_err("Error in getting BATT_SOC, rc=%d\n", rc);
+		goto out;
+	}
+
+	/* We need 2 most significant bytes here */
+	bsoc = (u32)bsoc >> 16;
+	rc = fg_get_msoc_raw(chip, &msoc_raw);
+	if (rc < 0) {
+		pr_err("Error in getting msoc_raw, rc=%d\n", rc);
+		goto out;
+	}
+	msoc = DIV_ROUND_CLOSEST(msoc_raw * FULL_CAPACITY, FULL_SOC_RAW);
+
+	fg_dbg(chip, FG_STATUS, "msoc: %d bsoc: %x health: %d status: %d full: %d\n",
+		msoc, bsoc, chip->health, chip->charge_status,
+		chip->charge_full);
+	if (chip->charge_done && !chip->charge_full) {
+		if (msoc >= 99 && chip->health == POWER_SUPPLY_HEALTH_GOOD) {
+			fg_dbg(chip, FG_STATUS, "Setting charge_full to true\n");
+			chip->charge_full = true;
+			/*
+			 * Lower the recharge voltage so that VBAT_LT_RECHG
+			 * signal will not be asserted soon.
+			 */
+			rc = fg_set_recharge_voltage(chip,
+					AUTO_RECHG_VOLT_LOW_LIMIT_MV);
+			if (rc < 0) {
+				pr_err("Error in reducing recharge voltage, rc=%d\n",
+					rc);
+				goto out;
+			}
+		} else {
+			fg_dbg(chip, FG_STATUS, "Terminated charging @ SOC%d\n",
+				msoc);
+		}
+	} else if ((msoc_raw <= recharge_soc || !chip->charge_done)
+			&& chip->charge_full) {
+		if (chip->dt.linearize_soc) {
+			chip->delta_soc = FULL_CAPACITY - msoc;
+
+			/*
+			 * We're spreading out the delta SOC over every 10%
+			 * change in monotonic SOC. We cannot spread more than
+			 * 9% in the range of 0-100 skipping the first 10%.
+			 */
+			if (chip->delta_soc > 9) {
+				chip->delta_soc = 0;
+				chip->maint_soc = 0;
+			} else {
+				chip->maint_soc = FULL_CAPACITY;
+				chip->last_msoc = msoc;
+			}
+		}
+
+		/*
+		 * Raise the recharge voltage so that VBAT_LT_RECHG signal
+		 * will be asserted soon as battery SOC had dropped below
+		 * the recharge SOC threshold.
+		 */
+		rc = fg_set_recharge_voltage(chip,
+					chip->dt.recharge_volt_thr_mv);
+		if (rc < 0) {
+			pr_err("Error in setting recharge voltage, rc=%d\n",
+				rc);
+			goto out;
+		}
+
+		/*
+		 * If charge_done is still set, wait for recharging or
+		 * discharging to happen.
+		 */
+		if (chip->charge_done)
+			goto out;
+
+		rc = fg_configure_full_soc(chip, bsoc);
+		if (rc < 0)
+			goto out;
+
+		chip->charge_full = false;
+		fg_dbg(chip, FG_STATUS, "msoc_raw = %d bsoc: %d recharge_soc: %d delta_soc: %d\n",
+			msoc_raw, bsoc >> 8, recharge_soc, chip->delta_soc);
+	}
+
+out:
+	mutex_unlock(&chip->charge_full_lock);
+	return rc;
+}
+
+#define RCONN_CONFIG_BIT	BIT(0)
+static int fg_rconn_config(struct fg_chip *chip)
+{
+	int rc, esr_uohms;
+	u64 scaling_factor;
+	u32 val = 0;
+
+	if (!chip->dt.rconn_mohms)
+		return 0;
+
+	rc = fg_sram_read(chip, PROFILE_INTEGRITY_WORD,
+			SW_CONFIG_OFFSET, (u8 *)&val, 1, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in reading SW_CONFIG_OFFSET, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (val & RCONN_CONFIG_BIT) {
+		fg_dbg(chip, FG_STATUS, "Rconn already configured: %x\n", val);
+		return 0;
+	}
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_ESR, &esr_uohms);
+	if (rc < 0) {
+		pr_err("failed to get ESR, rc=%d\n", rc);
+		return rc;
+	}
+
+	scaling_factor = div64_u64((u64)esr_uohms * 1000,
+				esr_uohms + (chip->dt.rconn_mohms * 1000));
+
+	rc = fg_sram_read(chip, ESR_RSLOW_CHG_WORD,
+			ESR_RSLOW_CHG_OFFSET, (u8 *)&val, 1, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in reading ESR_RSLOW_CHG_OFFSET, rc=%d\n", rc);
+		return rc;
+	}
+
+	val *= scaling_factor;
+	do_div(val, 1000);
+	rc = fg_sram_write(chip, ESR_RSLOW_CHG_WORD,
+			ESR_RSLOW_CHG_OFFSET, (u8 *)&val, 1, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing ESR_RSLOW_CHG_OFFSET, rc=%d\n", rc);
+		return rc;
+	}
+	fg_dbg(chip, FG_STATUS, "esr_rslow_chg modified to %x\n", val & 0xFF);
+
+	rc = fg_sram_read(chip, ESR_RSLOW_DISCHG_WORD,
+			ESR_RSLOW_DISCHG_OFFSET, (u8 *)&val, 1, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in reading ESR_RSLOW_DISCHG_OFFSET, rc=%d\n", rc);
+		return rc;
+	}
+
+	val *= scaling_factor;
+	do_div(val, 1000);
+	rc = fg_sram_write(chip, ESR_RSLOW_DISCHG_WORD,
+			ESR_RSLOW_DISCHG_OFFSET, (u8 *)&val, 1, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing ESR_RSLOW_DISCHG_OFFSET, rc=%d\n", rc);
+		return rc;
+	}
+	fg_dbg(chip, FG_STATUS, "esr_rslow_dischg modified to %x\n",
+		val & 0xFF);
+
+	val = RCONN_CONFIG_BIT;
+	rc = fg_sram_write(chip, PROFILE_INTEGRITY_WORD,
+			SW_CONFIG_OFFSET, (u8 *)&val, 1, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing SW_CONFIG_OFFSET, rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int fg_set_jeita_threshold(struct fg_chip *chip,
+				enum jeita_levels level, int temp_decidegC)
+{
+	int rc;
+	u8 val;
+	u16 reg;
+
+	if (temp_decidegC < -300 || temp_decidegC > 970)
+		return -EINVAL;
+
+	/* Resolution is 0.5C. Base is -30C. */
+	val = DIV_ROUND_CLOSEST(((temp_decidegC / 10) + 30) * 10, 5);
+	switch (level) {
+	case JEITA_COLD:
+		reg = BATT_INFO_JEITA_TOO_COLD(chip);
+		break;
+	case JEITA_COOL:
+		reg = BATT_INFO_JEITA_COLD(chip);
+		break;
+	case JEITA_WARM:
+		reg = BATT_INFO_JEITA_HOT(chip);
+		break;
+	case JEITA_HOT:
+		reg = BATT_INFO_JEITA_TOO_HOT(chip);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	rc = fg_write(chip, reg, &val, 1);
+	if (rc < 0) {
+		pr_err("Error in setting jeita level %d, rc=%d\n", level, rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int fg_set_constant_chg_voltage(struct fg_chip *chip, int volt_uv)
+{
+	u8 buf[2];
+	int rc;
+
+	if (volt_uv <= 0 || volt_uv > 15590000) {
+		pr_err("Invalid voltage %d\n", volt_uv);
+		return -EINVAL;
+	}
+
+	fg_encode(chip->sp, FG_SRAM_VBATT_FULL, volt_uv, buf);
+
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_VBATT_FULL].addr_word,
+		chip->sp[FG_SRAM_VBATT_FULL].addr_byte, buf,
+		chip->sp[FG_SRAM_VBATT_FULL].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing vbatt_full, rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int fg_set_recharge_soc(struct fg_chip *chip, int recharge_soc)
+{
+	u8 buf;
+	int rc;
+
+	if (!chip->dt.auto_recharge_soc)
+		return 0;
+
+	if (recharge_soc < 0 || recharge_soc > FULL_CAPACITY)
+		return 0;
+
+	fg_encode(chip->sp, FG_SRAM_RECHARGE_SOC_THR, recharge_soc, &buf);
+	rc = fg_sram_write(chip,
+			chip->sp[FG_SRAM_RECHARGE_SOC_THR].addr_word,
+			chip->sp[FG_SRAM_RECHARGE_SOC_THR].addr_byte, &buf,
+			chip->sp[FG_SRAM_RECHARGE_SOC_THR].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing recharge_soc_thr, rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int fg_adjust_recharge_soc(struct fg_chip *chip)
+{
+	int rc, msoc, recharge_soc, new_recharge_soc = 0;
+	bool recharge_soc_status;
+
+	if (!chip->dt.auto_recharge_soc)
+		return 0;
+
+	recharge_soc = chip->dt.recharge_soc_thr;
+	recharge_soc_status = chip->recharge_soc_adjusted;
+	/*
+	 * If the input is present and charging had been terminated, adjust
+	 * the recharge SOC threshold based on the monotonic SOC at which
+	 * the charge termination had happened.
+	 */
+	if (is_input_present(chip)) {
+		if (chip->charge_done) {
+			if (!chip->recharge_soc_adjusted) {
+				/* Get raw monotonic SOC for calculation */
+				rc = fg_get_msoc(chip, &msoc);
+				if (rc < 0) {
+					pr_err("Error in getting msoc, rc=%d\n",
+						rc);
+					return rc;
+				}
+
+				/* Adjust the recharge_soc threshold */
+				new_recharge_soc = msoc - (FULL_CAPACITY -
+								recharge_soc);
+				chip->recharge_soc_adjusted = true;
+			} else {
+				/* adjusted already, do nothing */
+				return 0;
+			}
+		} else {
+			if (!chip->recharge_soc_adjusted)
+				return 0;
+
+			/* Restore the default value */
+			new_recharge_soc = recharge_soc;
+			chip->recharge_soc_adjusted = false;
+		}
+	} else {
+		/* Restore the default value */
+		new_recharge_soc = recharge_soc;
+		chip->recharge_soc_adjusted = false;
+	}
+
+	rc = fg_set_recharge_soc(chip, new_recharge_soc);
+	if (rc < 0) {
+		chip->recharge_soc_adjusted = recharge_soc_status;
+		pr_err("Couldn't set resume SOC for FG, rc=%d\n", rc);
+		return rc;
+	}
+
+	fg_dbg(chip, FG_STATUS, "resume soc set to %d\n", new_recharge_soc);
+	return 0;
+}
+
+static int fg_adjust_recharge_voltage(struct fg_chip *chip)
+{
+	int rc, recharge_volt_mv;
+
+	if (chip->dt.auto_recharge_soc)
+		return 0;
+
+	fg_dbg(chip, FG_STATUS, "health: %d chg_status: %d chg_done: %d\n",
+		chip->health, chip->charge_status, chip->charge_done);
+
+	recharge_volt_mv = chip->dt.recharge_volt_thr_mv;
+
+	/* Lower the recharge voltage in soft JEITA */
+	if (chip->health == POWER_SUPPLY_HEALTH_WARM ||
+			chip->health == POWER_SUPPLY_HEALTH_COOL)
+		recharge_volt_mv -= 200;
+
+	rc = fg_set_recharge_voltage(chip, recharge_volt_mv);
+	if (rc < 0) {
+		pr_err("Error in setting recharge_voltage, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int fg_slope_limit_config(struct fg_chip *chip, int batt_temp)
+{
+	enum slope_limit_status status;
+	int rc;
+	u8 buf;
+
+	if (!chip->slope_limit_en)
+		return 0;
+
+	if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING ||
+		chip->charge_status == POWER_SUPPLY_STATUS_FULL) {
+		if (batt_temp < chip->dt.slope_limit_temp)
+			status = LOW_TEMP_CHARGE;
+		else
+			status = HIGH_TEMP_CHARGE;
+	} else {
+		if (batt_temp < chip->dt.slope_limit_temp)
+			status = LOW_TEMP_DISCHARGE;
+		else
+			status = HIGH_TEMP_DISCHARGE;
+	}
+
+	if (chip->slope_limit_sts == status)
+		return 0;
+
+	fg_encode(chip->sp, FG_SRAM_SLOPE_LIMIT,
+		chip->dt.slope_limit_coeffs[status], &buf);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_SLOPE_LIMIT].addr_word,
+			chip->sp[FG_SRAM_SLOPE_LIMIT].addr_byte, &buf,
+			chip->sp[FG_SRAM_SLOPE_LIMIT].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in configuring slope_limit coefficient, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	chip->slope_limit_sts = status;
+	fg_dbg(chip, FG_STATUS, "Slope limit status: %d value: %x\n", status,
+		buf);
+	return 0;
+}
+
+static int __fg_esr_filter_config(struct fg_chip *chip,
+				enum esr_filter_status esr_flt_sts)
+{
+	u8 esr_tight_flt, esr_broad_flt;
+	int esr_tight_flt_upct, esr_broad_flt_upct;
+	int rc;
+
+	if (esr_flt_sts == chip->esr_flt_sts)
+		return 0;
+
+	if (esr_flt_sts == ROOM_TEMP) {
+		esr_tight_flt_upct = chip->dt.esr_tight_flt_upct;
+		esr_broad_flt_upct = chip->dt.esr_broad_flt_upct;
+	} else if (esr_flt_sts == LOW_TEMP) {
+		esr_tight_flt_upct = chip->dt.esr_tight_lt_flt_upct;
+		esr_broad_flt_upct = chip->dt.esr_broad_lt_flt_upct;
+	} else if (esr_flt_sts == RELAX_TEMP) {
+		esr_tight_flt_upct = chip->dt.esr_tight_rt_flt_upct;
+		esr_broad_flt_upct = chip->dt.esr_broad_rt_flt_upct;
+	} else {
+		pr_err("Unknown esr filter config\n");
+		return 0;
+	}
+
+	fg_encode(chip->sp, FG_SRAM_ESR_TIGHT_FILTER, esr_tight_flt_upct,
+		&esr_tight_flt);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_ESR_TIGHT_FILTER].addr_word,
+			chip->sp[FG_SRAM_ESR_TIGHT_FILTER].addr_byte,
+			&esr_tight_flt,
+			chip->sp[FG_SRAM_ESR_TIGHT_FILTER].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing ESR LT tight filter, rc=%d\n", rc);
+		return rc;
+	}
+
+	fg_encode(chip->sp, FG_SRAM_ESR_BROAD_FILTER, esr_broad_flt_upct,
+		&esr_broad_flt);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_ESR_BROAD_FILTER].addr_word,
+			chip->sp[FG_SRAM_ESR_BROAD_FILTER].addr_byte,
+			&esr_broad_flt,
+			chip->sp[FG_SRAM_ESR_BROAD_FILTER].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing ESR LT broad filter, rc=%d\n", rc);
+		return rc;
+	}
+
+	chip->esr_flt_sts = esr_flt_sts;
+	fg_dbg(chip, FG_STATUS, "applied ESR filter %d values\n", esr_flt_sts);
+	return 0;
+}
+
+#define DT_IRQ_COUNT			3
+#define DELTA_TEMP_IRQ_TIME_MS		300000
+#define ESR_FILTER_ALARM_TIME_MS	900000
+static int fg_esr_filter_config(struct fg_chip *chip, int batt_temp,
+				bool override)
+{
+	enum esr_filter_status esr_flt_sts = ROOM_TEMP;
+	bool qnovo_en, input_present, count_temp_irq = false;
+	s64 time_ms;
+	int rc;
+
+	/*
+	 * If the battery temperature is lower than -20 C, then skip modifying
+	 * ESR filter.
+	 */
+	if (batt_temp < -210)
+		return 0;
+
+	qnovo_en = is_qnovo_en(chip);
+	input_present = is_input_present(chip);
+
+	/*
+	 * If Qnovo is enabled, after hitting a lower battery temperature of
+	 * say 6 C, count the delta battery temperature interrupts for a
+	 * certain period of time when the battery temperature increases.
+	 * Switch to relaxed filter coefficients once the temperature increase
+	 * is qualified so that ESR accuracy can be improved.
+	 */
+	if (qnovo_en && !override) {
+		if (input_present) {
+			if (chip->esr_flt_sts == RELAX_TEMP) {
+				/* do nothing */
+				return 0;
+			}
+
+			count_temp_irq =  true;
+			if (chip->delta_temp_irq_count) {
+				/* Don't count when temperature is dropping. */
+				if (batt_temp <= chip->last_batt_temp)
+					count_temp_irq = false;
+			} else {
+				/*
+				 * Starting point for counting. Check if the
+				 * temperature is qualified.
+				 */
+				if (batt_temp > chip->dt.esr_flt_rt_switch_temp)
+					count_temp_irq = false;
+				else
+					chip->last_delta_temp_time =
+						ktime_get();
+			}
+		} else {
+			chip->delta_temp_irq_count = 0;
+			rc = alarm_try_to_cancel(&chip->esr_filter_alarm);
+			if (rc < 0)
+				pr_err("Couldn't cancel esr_filter_alarm\n");
+		}
+	}
+
+	/*
+	 * If battery temperature is lesser than 10 C (default), then apply the
+	 * ESR low temperature tight and broad filter values to ESR room
+	 * temperature tight and broad filters. If battery temperature is higher
+	 * than 10 C, then apply back the room temperature ESR filter
+	 * coefficients to ESR room temperature tight and broad filters.
+	 */
+	if (batt_temp > chip->dt.esr_flt_switch_temp)
+		esr_flt_sts = ROOM_TEMP;
+	else
+		esr_flt_sts = LOW_TEMP;
+
+	if (count_temp_irq) {
+		time_ms = ktime_ms_delta(ktime_get(),
+				chip->last_delta_temp_time);
+		chip->delta_temp_irq_count++;
+		fg_dbg(chip, FG_STATUS, "dt_irq_count: %d\n",
+			chip->delta_temp_irq_count);
+
+		if (chip->delta_temp_irq_count >= DT_IRQ_COUNT
+			&& time_ms <= DELTA_TEMP_IRQ_TIME_MS) {
+			fg_dbg(chip, FG_STATUS, "%d interrupts in %lld ms\n",
+				chip->delta_temp_irq_count, time_ms);
+			esr_flt_sts = RELAX_TEMP;
+		}
+	}
+
+	rc = __fg_esr_filter_config(chip, esr_flt_sts);
+	if (rc < 0)
+		return rc;
+
+	if (esr_flt_sts == RELAX_TEMP)
+		alarm_start_relative(&chip->esr_filter_alarm,
+			ms_to_ktime(ESR_FILTER_ALARM_TIME_MS));
+
+	return 0;
+}
+
+#define FG_ESR_FILTER_RESTART_MS	60000
+static void esr_filter_work(struct work_struct *work)
+{
+	struct fg_chip *chip = container_of(work,
+			struct fg_chip, esr_filter_work);
+	int rc, batt_temp;
+
+	rc = fg_get_battery_temp(chip, &batt_temp);
+	if (rc < 0) {
+		pr_err("Error in getting batt_temp\n");
+		alarm_start_relative(&chip->esr_filter_alarm,
+			ms_to_ktime(FG_ESR_FILTER_RESTART_MS));
+	}
+
+	rc = fg_esr_filter_config(chip, batt_temp, true);
+	if (rc < 0) {
+		pr_err("Error in configuring ESR filter rc:%d\n", rc);
+		alarm_start_relative(&chip->esr_filter_alarm,
+			ms_to_ktime(FG_ESR_FILTER_RESTART_MS));
+	}
+
+	chip->delta_temp_irq_count = 0;
+	pm_relax(chip->dev);
+}
+
+static enum alarmtimer_restart fg_esr_filter_alarm_cb(struct alarm *alarm,
+							ktime_t now)
+{
+	struct fg_chip *chip = container_of(alarm, struct fg_chip,
+					esr_filter_alarm);
+
+	fg_dbg(chip, FG_STATUS, "ESR filter alarm triggered %lld\n",
+		ktime_to_ms(now));
+	/*
+	 * We cannot vote for awake votable here as that takes a mutex lock
+	 * and this is executed in an atomic context.
+	 */
+	pm_stay_awake(chip->dev);
+	schedule_work(&chip->esr_filter_work);
+
+	return ALARMTIMER_NORESTART;
+}
+
+static int fg_esr_fcc_config(struct fg_chip *chip)
+{
+	union power_supply_propval prop = {0, };
+	int rc;
+	bool parallel_en = false, qnovo_en;
+
+	if (is_parallel_charger_available(chip)) {
+		rc = power_supply_get_property(chip->parallel_psy,
+			POWER_SUPPLY_PROP_CHARGING_ENABLED, &prop);
+		if (rc < 0) {
+			pr_err("Error in reading charging_enabled from parallel_psy, rc=%d\n",
+				rc);
+			return rc;
+		}
+		parallel_en = prop.intval;
+	}
+
+	qnovo_en = is_qnovo_en(chip);
+
+	fg_dbg(chip, FG_POWER_SUPPLY, "chg_sts: %d par_en: %d qnov_en: %d esr_fcc_ctrl_en: %d\n",
+		chip->charge_status, parallel_en, qnovo_en,
+		chip->esr_fcc_ctrl_en);
+
+	if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING &&
+			(parallel_en || qnovo_en)) {
+		if (chip->esr_fcc_ctrl_en)
+			return 0;
+
+		/*
+		 * When parallel charging or Qnovo is enabled, configure ESR
+		 * FCC to 300mA to trigger an ESR pulse. Without this, FG can
+		 * request the main charger to increase FCC when it is supposed
+		 * to decrease it.
+		 */
+		rc = fg_masked_write(chip, BATT_INFO_ESR_FAST_CRG_CFG(chip),
+				ESR_FAST_CRG_IVAL_MASK |
+				ESR_FAST_CRG_CTL_EN_BIT,
+				ESR_FCC_300MA | ESR_FAST_CRG_CTL_EN_BIT);
+		if (rc < 0) {
+			pr_err("Error in writing to %04x, rc=%d\n",
+				BATT_INFO_ESR_FAST_CRG_CFG(chip), rc);
+			return rc;
+		}
+
+		chip->esr_fcc_ctrl_en = true;
+	} else {
+		if (!chip->esr_fcc_ctrl_en)
+			return 0;
+
+		/*
+		 * If we're here, then it means either the device is not in
+		 * charging state or parallel charging / Qnovo is disabled.
+		 * Disable ESR fast charge current control in SW.
+		 */
+		rc = fg_masked_write(chip, BATT_INFO_ESR_FAST_CRG_CFG(chip),
+				ESR_FAST_CRG_CTL_EN_BIT, 0);
+		if (rc < 0) {
+			pr_err("Error in writing to %04x, rc=%d\n",
+				BATT_INFO_ESR_FAST_CRG_CFG(chip), rc);
+			return rc;
+		}
+
+		chip->esr_fcc_ctrl_en = false;
+	}
+
+	fg_dbg(chip, FG_STATUS, "esr_fcc_ctrl_en set to %d\n",
+		chip->esr_fcc_ctrl_en);
+	return 0;
+}
+
+static int fg_esr_timer_config(struct fg_chip *chip, bool sleep)
+{
+	int rc, cycles_init, cycles_max;
+	bool end_of_charge = false;
+
+	end_of_charge = is_input_present(chip) && chip->charge_done;
+	fg_dbg(chip, FG_STATUS, "sleep: %d eoc: %d\n", sleep, end_of_charge);
+
+	/* ESR discharging timer configuration */
+	cycles_init = sleep ? chip->dt.esr_timer_asleep[TIMER_RETRY] :
+			chip->dt.esr_timer_awake[TIMER_RETRY];
+	if (end_of_charge)
+		cycles_init = 0;
+
+	cycles_max = sleep ? chip->dt.esr_timer_asleep[TIMER_MAX] :
+			chip->dt.esr_timer_awake[TIMER_MAX];
+
+	rc = fg_set_esr_timer(chip, cycles_init, cycles_max, false,
+		sleep ? FG_IMA_NO_WLOCK : FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in setting ESR timer, rc=%d\n", rc);
+		return rc;
+	}
+
+	/* ESR charging timer configuration */
+	cycles_init = cycles_max = -EINVAL;
+	if (end_of_charge || sleep) {
+		cycles_init = chip->dt.esr_timer_charging[TIMER_RETRY];
+		cycles_max = chip->dt.esr_timer_charging[TIMER_MAX];
+	} else if (is_input_present(chip)) {
+		cycles_init = chip->esr_timer_charging_default[TIMER_RETRY];
+		cycles_max = chip->esr_timer_charging_default[TIMER_MAX];
+	}
+
+	rc = fg_set_esr_timer(chip, cycles_init, cycles_max, true,
+		sleep ? FG_IMA_NO_WLOCK : FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in setting ESR timer, rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static void fg_ttf_update(struct fg_chip *chip)
+{
+	int rc;
+	int delay_ms;
+	union power_supply_propval prop = {0, };
+	int online = 0;
+
+	if (usb_psy_initialized(chip)) {
+		rc = power_supply_get_property(chip->usb_psy,
+			POWER_SUPPLY_PROP_ONLINE, &prop);
+		if (rc < 0) {
+			pr_err("Couldn't read usb ONLINE prop rc=%d\n", rc);
+			return;
+		}
+
+		online = online || prop.intval;
+	}
+
+	if (pc_port_psy_initialized(chip)) {
+		rc = power_supply_get_property(chip->pc_port_psy,
+			POWER_SUPPLY_PROP_ONLINE, &prop);
+		if (rc < 0) {
+			pr_err("Couldn't read pc_port ONLINE prop rc=%d\n", rc);
+			return;
+		}
+
+		online = online || prop.intval;
+	}
+
+	if (dc_psy_initialized(chip)) {
+		rc = power_supply_get_property(chip->dc_psy,
+			POWER_SUPPLY_PROP_ONLINE, &prop);
+		if (rc < 0) {
+			pr_err("Couldn't read dc ONLINE prop rc=%d\n", rc);
+			return;
+		}
+
+		online = online || prop.intval;
+	}
+
+
+	if (chip->online_status == online)
+		return;
+
+	chip->online_status = online;
+	if (online)
+		/* wait 35 seconds for the input to settle */
+		delay_ms = 35000;
+	else
+		/* wait 5 seconds for current to settle during discharge */
+		delay_ms = 5000;
+
+	vote(chip->awake_votable, TTF_PRIMING, true, 0);
+	cancel_delayed_work_sync(&chip->ttf_work);
+	mutex_lock(&chip->ttf.lock);
+	fg_circ_buf_clr(&chip->ttf.ibatt);
+	fg_circ_buf_clr(&chip->ttf.vbatt);
+	chip->ttf.last_ttf = 0;
+	chip->ttf.last_ms = 0;
+	mutex_unlock(&chip->ttf.lock);
+	schedule_delayed_work(&chip->ttf_work, msecs_to_jiffies(delay_ms));
+}
+
+static void restore_cycle_counter(struct fg_chip *chip)
+{
+	int rc = 0, i;
+	u8 data[2];
+
+	if (!chip->cyc_ctr.en)
+		return;
+
+	mutex_lock(&chip->cyc_ctr.lock);
+	for (i = 0; i < BUCKET_COUNT; i++) {
+		rc = fg_sram_read(chip, CYCLE_COUNT_WORD + (i / 2),
+				CYCLE_COUNT_OFFSET + (i % 2) * 2, data, 2,
+				FG_IMA_DEFAULT);
+		if (rc < 0)
+			pr_err("failed to read bucket %d rc=%d\n", i, rc);
+		else
+			chip->cyc_ctr.count[i] = data[0] | data[1] << 8;
+	}
+	mutex_unlock(&chip->cyc_ctr.lock);
+}
+
+static void clear_cycle_counter(struct fg_chip *chip)
+{
+	int rc = 0, i;
+
+	if (!chip->cyc_ctr.en)
+		return;
+
+	mutex_lock(&chip->cyc_ctr.lock);
+	memset(chip->cyc_ctr.count, 0, sizeof(chip->cyc_ctr.count));
+	for (i = 0; i < BUCKET_COUNT; i++) {
+		chip->cyc_ctr.started[i] = false;
+		chip->cyc_ctr.last_soc[i] = 0;
+	}
+	rc = fg_sram_write(chip, CYCLE_COUNT_WORD, CYCLE_COUNT_OFFSET,
+			(u8 *)&chip->cyc_ctr.count,
+			sizeof(chip->cyc_ctr.count) / sizeof(u8 *),
+			FG_IMA_DEFAULT);
+	if (rc < 0)
+		pr_err("failed to clear cycle counter rc=%d\n", rc);
+
+	mutex_unlock(&chip->cyc_ctr.lock);
+}
+
+static int fg_inc_store_cycle_ctr(struct fg_chip *chip, int bucket)
+{
+	int rc = 0;
+	u16 cyc_count;
+	u8 data[2];
+
+	if (bucket < 0 || (bucket > BUCKET_COUNT - 1))
+		return 0;
+
+	cyc_count = chip->cyc_ctr.count[bucket];
+	cyc_count++;
+	data[0] = cyc_count & 0xFF;
+	data[1] = cyc_count >> 8;
+
+	rc = fg_sram_write(chip, CYCLE_COUNT_WORD + (bucket / 2),
+			CYCLE_COUNT_OFFSET + (bucket % 2) * 2, data, 2,
+			FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("failed to write BATT_CYCLE[%d] rc=%d\n",
+			bucket, rc);
+		return rc;
+	}
+
+	chip->cyc_ctr.count[bucket] = cyc_count;
+	fg_dbg(chip, FG_STATUS, "Stored count %d in bucket %d\n", cyc_count,
+		bucket);
+
+	return rc;
+}
+
+static void fg_cycle_counter_update(struct fg_chip *chip)
+{
+	int rc = 0, bucket, i, batt_soc;
+
+	if (!chip->cyc_ctr.en)
+		return;
+
+	mutex_lock(&chip->cyc_ctr.lock);
+	rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &batt_soc);
+	if (rc < 0) {
+		pr_err("Failed to read battery soc rc: %d\n", rc);
+		goto out;
+	}
+
+	/* We need only the most significant byte here */
+	batt_soc = (u32)batt_soc >> 24;
+
+	/* Find out which bucket the SOC falls in */
+	bucket = batt_soc / BUCKET_SOC_PCT;
+
+	if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING) {
+		if (!chip->cyc_ctr.started[bucket]) {
+			chip->cyc_ctr.started[bucket] = true;
+			chip->cyc_ctr.last_soc[bucket] = batt_soc;
+		}
+	} else if (chip->charge_done || !is_input_present(chip)) {
+		for (i = 0; i < BUCKET_COUNT; i++) {
+			if (chip->cyc_ctr.started[i] &&
+				batt_soc > chip->cyc_ctr.last_soc[i] + 2) {
+				rc = fg_inc_store_cycle_ctr(chip, i);
+				if (rc < 0)
+					pr_err("Error in storing cycle_ctr rc: %d\n",
+						rc);
+				chip->cyc_ctr.last_soc[i] = 0;
+				chip->cyc_ctr.started[i] = false;
+			}
+		}
+	}
+
+	fg_dbg(chip, FG_STATUS, "batt_soc: %d bucket: %d chg_status: %d\n",
+		batt_soc, bucket, chip->charge_status);
+out:
+	mutex_unlock(&chip->cyc_ctr.lock);
+}
+
+static int fg_get_cycle_count(struct fg_chip *chip)
+{
+	int count;
+
+	if (!chip->cyc_ctr.en)
+		return 0;
+
+	if ((chip->cyc_ctr.id <= 0) || (chip->cyc_ctr.id > BUCKET_COUNT))
+		return -EINVAL;
+
+	mutex_lock(&chip->cyc_ctr.lock);
+	count = chip->cyc_ctr.count[chip->cyc_ctr.id - 1];
+	mutex_unlock(&chip->cyc_ctr.lock);
+	return count;
+}
+
+static void status_change_work(struct work_struct *work)
+{
+	struct fg_chip *chip = container_of(work,
+			struct fg_chip, status_change_work);
+	union power_supply_propval prop = {0, };
+	int rc, batt_temp;
+
+	if (!batt_psy_initialized(chip)) {
+		fg_dbg(chip, FG_STATUS, "Charger not available?!\n");
+		goto out;
+	}
+
+	rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_STATUS,
+			&prop);
+	if (rc < 0) {
+		pr_err("Error in getting charging status, rc=%d\n", rc);
+		goto out;
+	}
+
+	chip->charge_status = prop.intval;
+	rc = power_supply_get_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_CHARGE_TYPE, &prop);
+	if (rc < 0) {
+		pr_err("Error in getting charge type, rc=%d\n", rc);
+		goto out;
+	}
+
+	chip->charge_type = prop.intval;
+	rc = power_supply_get_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_CHARGE_DONE, &prop);
+	if (rc < 0) {
+		pr_err("Error in getting charge_done, rc=%d\n", rc);
+		goto out;
+	}
+
+	chip->charge_done = prop.intval;
+	fg_cycle_counter_update(chip);
+	fg_cap_learning_update(chip);
+
+	rc = fg_charge_full_update(chip);
+	if (rc < 0)
+		pr_err("Error in charge_full_update, rc=%d\n", rc);
+
+	rc = fg_adjust_recharge_soc(chip);
+	if (rc < 0)
+		pr_err("Error in adjusting recharge_soc, rc=%d\n", rc);
+
+	rc = fg_adjust_recharge_voltage(chip);
+	if (rc < 0)
+		pr_err("Error in adjusting recharge_voltage, rc=%d\n", rc);
+
+	rc = fg_adjust_ki_coeff_dischg(chip);
+	if (rc < 0)
+		pr_err("Error in adjusting ki_coeff_dischg, rc=%d\n", rc);
+
+	rc = fg_esr_fcc_config(chip);
+	if (rc < 0)
+		pr_err("Error in adjusting FCC for ESR, rc=%d\n", rc);
+
+	rc = fg_get_battery_temp(chip, &batt_temp);
+	if (!rc) {
+		rc = fg_slope_limit_config(chip, batt_temp);
+		if (rc < 0)
+			pr_err("Error in configuring slope limiter rc:%d\n",
+				rc);
+
+		rc = fg_adjust_ki_coeff_full_soc(chip, batt_temp);
+		if (rc < 0)
+			pr_err("Error in configuring ki_coeff_full_soc rc:%d\n",
+				rc);
+	}
+
+	fg_ttf_update(chip);
+	chip->prev_charge_status = chip->charge_status;
+out:
+	fg_dbg(chip, FG_POWER_SUPPLY, "charge_status:%d charge_type:%d charge_done:%d\n",
+		chip->charge_status, chip->charge_type, chip->charge_done);
+	pm_relax(chip->dev);
+}
+
+static int fg_bp_params_config(struct fg_chip *chip)
+{
+	int rc = 0;
+	u8 buf;
+
+	/* This SRAM register is only present in v2.0 and above */
+	if (!(chip->wa_flags & PMI8998_V1_REV_WA) &&
+					chip->bp.float_volt_uv > 0) {
+		fg_encode(chip->sp, FG_SRAM_FLOAT_VOLT,
+			chip->bp.float_volt_uv / 1000, &buf);
+		rc = fg_sram_write(chip, chip->sp[FG_SRAM_FLOAT_VOLT].addr_word,
+			chip->sp[FG_SRAM_FLOAT_VOLT].addr_byte, &buf,
+			chip->sp[FG_SRAM_FLOAT_VOLT].len, FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in writing float_volt, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	if (chip->bp.vbatt_full_mv > 0) {
+		rc = fg_set_constant_chg_voltage(chip,
+				chip->bp.vbatt_full_mv * 1000);
+		if (rc < 0)
+			return rc;
+	}
+
+	return rc;
+}
+
+#define PROFILE_LOAD_BIT	BIT(0)
+#define BOOTLOADER_LOAD_BIT	BIT(1)
+#define BOOTLOADER_RESTART_BIT	BIT(2)
+#define HLOS_RESTART_BIT	BIT(3)
+static bool is_profile_load_required(struct fg_chip *chip)
+{
+	u8 buf[PROFILE_COMP_LEN], val;
+	bool profiles_same = false;
+	int rc;
+
+	rc = fg_sram_read(chip, PROFILE_INTEGRITY_WORD,
+			PROFILE_INTEGRITY_OFFSET, &val, 1, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("failed to read profile integrity rc=%d\n", rc);
+		return false;
+	}
+
+	/* Check if integrity bit is set */
+	if (val & PROFILE_LOAD_BIT) {
+		fg_dbg(chip, FG_STATUS, "Battery profile integrity bit is set\n");
+
+		/* Whitelist the values */
+		val &= ~PROFILE_LOAD_BIT;
+		if (val != HLOS_RESTART_BIT && val != BOOTLOADER_LOAD_BIT &&
+			val != (BOOTLOADER_LOAD_BIT | BOOTLOADER_RESTART_BIT)) {
+			val |= PROFILE_LOAD_BIT;
+			pr_warn("Garbage value in profile integrity word: 0x%x\n",
+				val);
+			return true;
+		}
+
+		rc = fg_sram_read(chip, PROFILE_LOAD_WORD, PROFILE_LOAD_OFFSET,
+				buf, PROFILE_COMP_LEN, FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in reading battery profile, rc:%d\n", rc);
+			return false;
+		}
+		profiles_same = memcmp(chip->batt_profile, buf,
+					PROFILE_COMP_LEN) == 0;
+		if (profiles_same) {
+			fg_dbg(chip, FG_STATUS, "Battery profile is same, not loading it\n");
+			return false;
+		}
+
+		if (!chip->dt.force_load_profile) {
+			pr_warn("Profiles doesn't match, skipping loading it since force_load_profile is disabled\n");
+			if (fg_profile_dump) {
+				pr_info("FG: loaded profile:\n");
+				dump_sram(buf, PROFILE_LOAD_WORD,
+					PROFILE_COMP_LEN);
+				pr_info("FG: available profile:\n");
+				dump_sram(chip->batt_profile, PROFILE_LOAD_WORD,
+					PROFILE_LEN);
+			}
+			return false;
+		}
+
+		fg_dbg(chip, FG_STATUS, "Profiles are different, loading the correct one\n");
+	} else {
+		fg_dbg(chip, FG_STATUS, "Profile integrity bit is not set\n");
+		if (fg_profile_dump) {
+			pr_info("FG: profile to be loaded:\n");
+			dump_sram(chip->batt_profile, PROFILE_LOAD_WORD,
+				PROFILE_LEN);
+		}
+	}
+	return true;
+}
+
+static void fg_update_batt_profile(struct fg_chip *chip)
+{
+	int rc, offset;
+	u8 val;
+
+	rc = fg_sram_read(chip, PROFILE_INTEGRITY_WORD,
+			SW_CONFIG_OFFSET, &val, 1, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in reading SW_CONFIG_OFFSET, rc=%d\n", rc);
+		return;
+	}
+
+	/*
+	 * If the RCONN had not been updated, no need to update battery
+	 * profile. Else, update the battery profile so that the profile
+	 * modified by bootloader or HLOS matches with the profile read
+	 * from device tree.
+	 */
+
+	if (!(val & RCONN_CONFIG_BIT))
+		return;
+
+	rc = fg_sram_read(chip, ESR_RSLOW_CHG_WORD,
+			ESR_RSLOW_CHG_OFFSET, &val, 1, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in reading ESR_RSLOW_CHG_OFFSET, rc=%d\n", rc);
+		return;
+	}
+	offset = (ESR_RSLOW_CHG_WORD - PROFILE_LOAD_WORD) * 4
+			+ ESR_RSLOW_CHG_OFFSET;
+	chip->batt_profile[offset] = val;
+
+	rc = fg_sram_read(chip, ESR_RSLOW_DISCHG_WORD,
+			ESR_RSLOW_DISCHG_OFFSET, &val, 1, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in reading ESR_RSLOW_DISCHG_OFFSET, rc=%d\n", rc);
+		return;
+	}
+	offset = (ESR_RSLOW_DISCHG_WORD - PROFILE_LOAD_WORD) * 4
+			+ ESR_RSLOW_DISCHG_OFFSET;
+	chip->batt_profile[offset] = val;
+}
+
+static void clear_battery_profile(struct fg_chip *chip)
+{
+	u8 val = 0;
+	int rc;
+
+	rc = fg_sram_write(chip, PROFILE_INTEGRITY_WORD,
+			PROFILE_INTEGRITY_OFFSET, &val, 1, FG_IMA_DEFAULT);
+	if (rc < 0)
+		pr_err("failed to write profile integrity rc=%d\n", rc);
+}
+
+#define SOC_READY_WAIT_MS		2000
+static int __fg_restart(struct fg_chip *chip)
+{
+	int rc, msoc;
+	bool tried_again = false;
+
+	rc = fg_get_prop_capacity(chip, &msoc);
+	if (rc < 0) {
+		pr_err("Error in getting capacity, rc=%d\n", rc);
+		return rc;
+	}
+
+	chip->last_soc = msoc;
+	chip->fg_restarting = true;
+	reinit_completion(&chip->soc_ready);
+	rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT,
+			RESTART_GO_BIT);
+	if (rc < 0) {
+		pr_err("Error in writing to %04x, rc=%d\n",
+			BATT_SOC_RESTART(chip), rc);
+		goto out;
+	}
+
+wait:
+	rc = wait_for_completion_interruptible_timeout(&chip->soc_ready,
+		msecs_to_jiffies(SOC_READY_WAIT_MS));
+
+	/* If we were interrupted wait again one more time. */
+	if (rc == -ERESTARTSYS && !tried_again) {
+		tried_again = true;
+		goto wait;
+	} else if (rc <= 0) {
+		pr_err("wait for soc_ready timed out rc=%d\n", rc);
+	}
+
+	rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT, 0);
+	if (rc < 0) {
+		pr_err("Error in writing to %04x, rc=%d\n",
+			BATT_SOC_RESTART(chip), rc);
+		goto out;
+	}
+out:
+	chip->fg_restarting = false;
+	return rc;
+}
+
+static void profile_load_work(struct work_struct *work)
+{
+	struct fg_chip *chip = container_of(work,
+				struct fg_chip,
+				profile_load_work.work);
+	u8 buf[2], val;
+	int rc;
+
+	vote(chip->awake_votable, PROFILE_LOAD, true, 0);
+
+	rc = fg_get_batt_id(chip);
+	if (rc < 0) {
+		pr_err("Error in getting battery id, rc:%d\n", rc);
+		goto out;
+	}
+
+	rc = fg_get_batt_profile(chip);
+	if (rc < 0) {
+		pr_warn("profile for batt_id=%dKOhms not found..using OTP, rc:%d\n",
+			chip->batt_id_ohms / 1000, rc);
+		goto out;
+	}
+
+	if (!chip->profile_available)
+		goto out;
+
+	fg_update_batt_profile(chip);
+
+	if (!is_profile_load_required(chip))
+		goto done;
+
+	clear_cycle_counter(chip);
+	mutex_lock(&chip->cl.lock);
+	chip->cl.learned_cc_uah = 0;
+	chip->cl.active = false;
+	mutex_unlock(&chip->cl.lock);
+
+	fg_dbg(chip, FG_STATUS, "profile loading started\n");
+	rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT, 0);
+	if (rc < 0) {
+		pr_err("Error in writing to %04x, rc=%d\n",
+			BATT_SOC_RESTART(chip), rc);
+		goto out;
+	}
+
+	/* load battery profile */
+	rc = fg_sram_write(chip, PROFILE_LOAD_WORD, PROFILE_LOAD_OFFSET,
+			chip->batt_profile, PROFILE_LEN, FG_IMA_ATOMIC);
+	if (rc < 0) {
+		pr_err("Error in writing battery profile, rc:%d\n", rc);
+		goto out;
+	}
+
+	rc = __fg_restart(chip);
+	if (rc < 0) {
+		pr_err("Error in restarting FG, rc=%d\n", rc);
+		goto out;
+	}
+
+	fg_dbg(chip, FG_STATUS, "SOC is ready\n");
+
+	/* Set the profile integrity bit */
+	val = HLOS_RESTART_BIT | PROFILE_LOAD_BIT;
+	rc = fg_sram_write(chip, PROFILE_INTEGRITY_WORD,
+			PROFILE_INTEGRITY_OFFSET, &val, 1, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("failed to write profile integrity rc=%d\n", rc);
+		goto out;
+	}
+
+done:
+	rc = fg_bp_params_config(chip);
+	if (rc < 0)
+		pr_err("Error in configuring battery profile params, rc:%d\n",
+			rc);
+
+	rc = fg_sram_read(chip, NOM_CAP_WORD, NOM_CAP_OFFSET, buf, 2,
+			FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in reading %04x[%d] rc=%d\n", NOM_CAP_WORD,
+			NOM_CAP_OFFSET, rc);
+	} else {
+		chip->cl.nom_cap_uah = (int)(buf[0] | buf[1] << 8) * 1000;
+		rc = fg_load_learned_cap_from_sram(chip);
+		if (rc < 0)
+			pr_err("Error in loading capacity learning data, rc:%d\n",
+				rc);
+	}
+
+	rc = fg_rconn_config(chip);
+	if (rc < 0)
+		pr_err("Error in configuring Rconn, rc=%d\n", rc);
+
+	batt_psy_initialized(chip);
+	fg_notify_charger(chip);
+	chip->profile_loaded = true;
+	fg_dbg(chip, FG_STATUS, "profile loaded successfully");
+out:
+	chip->soc_reporting_ready = true;
+	vote(chip->awake_votable, PROFILE_LOAD, false, 0);
+}
+
+static void sram_dump_work(struct work_struct *work)
+{
+	struct fg_chip *chip = container_of(work, struct fg_chip,
+					    sram_dump_work.work);
+	u8 buf[FG_SRAM_LEN];
+	int rc;
+	s64 timestamp_ms, quotient;
+	s32 remainder;
+
+	rc = fg_sram_read(chip, 0, 0, buf, FG_SRAM_LEN, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in reading FG SRAM, rc:%d\n", rc);
+		goto resched;
+	}
+
+	timestamp_ms = ktime_to_ms(ktime_get_boottime());
+	quotient = div_s64_rem(timestamp_ms, 1000, &remainder);
+	fg_dbg(chip, FG_STATUS, "SRAM Dump Started at %lld.%d\n",
+		quotient, remainder);
+	dump_sram(buf, 0, FG_SRAM_LEN);
+	timestamp_ms = ktime_to_ms(ktime_get_boottime());
+	quotient = div_s64_rem(timestamp_ms, 1000, &remainder);
+	fg_dbg(chip, FG_STATUS, "SRAM Dump done at %lld.%d\n",
+		quotient, remainder);
+resched:
+	schedule_delayed_work(&chip->sram_dump_work,
+			msecs_to_jiffies(fg_sram_dump_period_ms));
+}
+
+static int fg_sram_dump_sysfs(const char *val, const struct kernel_param *kp)
+{
+	int rc;
+	struct power_supply *bms_psy;
+	struct fg_chip *chip;
+	bool old_val = fg_sram_dump;
+
+	rc = param_set_bool(val, kp);
+	if (rc) {
+		pr_err("Unable to set fg_sram_dump: %d\n", rc);
+		return rc;
+	}
+
+	if (fg_sram_dump == old_val)
+		return 0;
+
+	bms_psy = power_supply_get_by_name("bms");
+	if (!bms_psy) {
+		pr_err("bms psy not found\n");
+		return -ENODEV;
+	}
+
+	chip = power_supply_get_drvdata(bms_psy);
+	if (fg_sram_dump)
+		schedule_delayed_work(&chip->sram_dump_work,
+				msecs_to_jiffies(fg_sram_dump_period_ms));
+	else
+		cancel_delayed_work_sync(&chip->sram_dump_work);
+
+	return 0;
+}
+
+static struct kernel_param_ops fg_sram_dump_ops = {
+	.set = fg_sram_dump_sysfs,
+	.get = param_get_bool,
+};
+
+module_param_cb(sram_dump_en, &fg_sram_dump_ops, &fg_sram_dump, 0644);
+
+static int fg_restart_sysfs(const char *val, const struct kernel_param *kp)
+{
+	int rc;
+	struct power_supply *bms_psy;
+	struct fg_chip *chip;
+
+	rc = param_set_int(val, kp);
+	if (rc) {
+		pr_err("Unable to set fg_restart: %d\n", rc);
+		return rc;
+	}
+
+	if (fg_restart != 1) {
+		pr_err("Bad value %d\n", fg_restart);
+		return -EINVAL;
+	}
+
+	bms_psy = power_supply_get_by_name("bms");
+	if (!bms_psy) {
+		pr_err("bms psy not found\n");
+		return 0;
+	}
+
+	chip = power_supply_get_drvdata(bms_psy);
+	rc = __fg_restart(chip);
+	if (rc < 0) {
+		pr_err("Error in restarting FG, rc=%d\n", rc);
+		return rc;
+	}
+
+	pr_info("FG restart done\n");
+	return rc;
+}
+
+static struct kernel_param_ops fg_restart_ops = {
+	.set = fg_restart_sysfs,
+	.get = param_get_int,
+};
+
+module_param_cb(restart, &fg_restart_ops, &fg_restart, 0644);
+
+#define HOURS_TO_SECONDS	3600
+#define OCV_SLOPE_UV		10869
+#define MILLI_UNIT		1000
+#define MICRO_UNIT		1000000
+#define NANO_UNIT		1000000000
+static int fg_get_time_to_full_locked(struct fg_chip *chip, int *val)
+{
+	int rc, ibatt_avg, vbatt_avg, rbatt, msoc, full_soc, act_cap_mah,
+		i_cc2cv, soc_cc2cv, tau, divisor, iterm, ttf_mode,
+		i, soc_per_step, msoc_this_step, msoc_next_step,
+		ibatt_this_step, t_predicted_this_step, ttf_slope,
+		t_predicted_cv, t_predicted = 0;
+	s64 delta_ms;
+
+	if (chip->bp.float_volt_uv <= 0) {
+		pr_err("battery profile is not loaded\n");
+		return -ENODATA;
+	}
+
+	if (!batt_psy_initialized(chip)) {
+		fg_dbg(chip, FG_TTF, "charger is not available\n");
+		return -ENODATA;
+	}
+
+	rc = fg_get_prop_capacity(chip, &msoc);
+	if (rc < 0) {
+		pr_err("failed to get msoc rc=%d\n", rc);
+		return rc;
+	}
+	fg_dbg(chip, FG_TTF, "msoc=%d\n", msoc);
+
+	/* the battery is considered full if the SOC is 100% */
+	if (msoc >= 100) {
+		*val = 0;
+		return 0;
+	}
+
+	if (is_qnovo_en(chip))
+		ttf_mode = TTF_MODE_QNOVO;
+	else
+		ttf_mode = TTF_MODE_NORMAL;
+
+	/* when switching TTF algorithms the TTF needs to be reset */
+	if (chip->ttf.mode != ttf_mode) {
+		fg_circ_buf_clr(&chip->ttf.ibatt);
+		fg_circ_buf_clr(&chip->ttf.vbatt);
+		chip->ttf.last_ttf = 0;
+		chip->ttf.last_ms = 0;
+		chip->ttf.mode = ttf_mode;
+	}
+
+	/* at least 10 samples are required to produce a stable IBATT */
+	if (chip->ttf.ibatt.size < 10) {
+		*val = -1;
+		return 0;
+	}
+
+	rc = fg_circ_buf_median(&chip->ttf.ibatt, &ibatt_avg);
+	if (rc < 0) {
+		pr_err("failed to get IBATT AVG rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_circ_buf_median(&chip->ttf.vbatt, &vbatt_avg);
+	if (rc < 0) {
+		pr_err("failed to get VBATT AVG rc=%d\n", rc);
+		return rc;
+	}
+
+	ibatt_avg = -ibatt_avg / MILLI_UNIT;
+	vbatt_avg /= MILLI_UNIT;
+
+	/* clamp ibatt_avg to iterm */
+	if (ibatt_avg < abs(chip->dt.sys_term_curr_ma))
+		ibatt_avg = abs(chip->dt.sys_term_curr_ma);
+
+	fg_dbg(chip, FG_TTF, "ibatt_avg=%d\n", ibatt_avg);
+	fg_dbg(chip, FG_TTF, "vbatt_avg=%d\n", vbatt_avg);
+
+	rc = fg_get_battery_resistance(chip, &rbatt);
+	if (rc < 0) {
+		pr_err("failed to get battery resistance rc=%d\n", rc);
+		return rc;
+	}
+
+	rbatt /= MILLI_UNIT;
+	fg_dbg(chip, FG_TTF, "rbatt=%d\n", rbatt);
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_ACT_BATT_CAP, &act_cap_mah);
+	if (rc < 0) {
+		pr_err("failed to get ACT_BATT_CAP rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_FULL_SOC, &full_soc);
+	if (rc < 0) {
+		pr_err("failed to get full soc rc=%d\n", rc);
+		return rc;
+	}
+	full_soc = DIV_ROUND_CLOSEST(((u16)full_soc >> 8) * FULL_CAPACITY,
+								FULL_SOC_RAW);
+	act_cap_mah = full_soc * act_cap_mah / 100;
+	fg_dbg(chip, FG_TTF, "act_cap_mah=%d\n", act_cap_mah);
+
+	/* estimated battery current at the CC to CV transition */
+	switch (chip->ttf.mode) {
+	case TTF_MODE_NORMAL:
+		i_cc2cv = ibatt_avg * vbatt_avg /
+			max(MILLI_UNIT, chip->bp.float_volt_uv / MILLI_UNIT);
+		break;
+	case TTF_MODE_QNOVO:
+		i_cc2cv = min(
+			chip->ttf.cc_step.arr[MAX_CC_STEPS - 1] / MILLI_UNIT,
+			ibatt_avg * vbatt_avg /
+			max(MILLI_UNIT, chip->bp.float_volt_uv / MILLI_UNIT));
+		break;
+	default:
+		pr_err("TTF mode %d is not supported\n", chip->ttf.mode);
+		break;
+	}
+	fg_dbg(chip, FG_TTF, "i_cc2cv=%d\n", i_cc2cv);
+
+	/* if we are already in CV state then we can skip estimating CC */
+	if (chip->charge_type == POWER_SUPPLY_CHARGE_TYPE_TAPER)
+		goto cv_estimate;
+
+	/* estimated SOC at the CC to CV transition */
+	soc_cc2cv = DIV_ROUND_CLOSEST(rbatt * i_cc2cv, OCV_SLOPE_UV);
+	soc_cc2cv = 100 - soc_cc2cv;
+	fg_dbg(chip, FG_TTF, "soc_cc2cv=%d\n", soc_cc2cv);
+
+	switch (chip->ttf.mode) {
+	case TTF_MODE_NORMAL:
+		if (soc_cc2cv - msoc <= 0)
+			goto cv_estimate;
+
+		divisor = max(100, (ibatt_avg + i_cc2cv) / 2 * 100);
+		t_predicted = div_s64((s64)act_cap_mah * (soc_cc2cv - msoc) *
+						HOURS_TO_SECONDS, divisor);
+		break;
+	case TTF_MODE_QNOVO:
+		soc_per_step = 100 / MAX_CC_STEPS;
+		for (i = msoc / soc_per_step; i < MAX_CC_STEPS - 1; ++i) {
+			msoc_next_step = (i + 1) * soc_per_step;
+			if (i == msoc / soc_per_step)
+				msoc_this_step = msoc;
+			else
+				msoc_this_step = i * soc_per_step;
+
+			/* scale ibatt by 85% to account for discharge pulses */
+			ibatt_this_step = min(
+					chip->ttf.cc_step.arr[i] / MILLI_UNIT,
+					ibatt_avg) * 85 / 100;
+			divisor = max(100, ibatt_this_step * 100);
+			t_predicted_this_step = div_s64((s64)act_cap_mah *
+					(msoc_next_step - msoc_this_step) *
+					HOURS_TO_SECONDS, divisor);
+			t_predicted += t_predicted_this_step;
+			fg_dbg(chip, FG_TTF, "[%d, %d] ma=%d t=%d\n",
+				msoc_this_step, msoc_next_step,
+				ibatt_this_step, t_predicted_this_step);
+		}
+		break;
+	default:
+		pr_err("TTF mode %d is not supported\n", chip->ttf.mode);
+		break;
+	}
+
+cv_estimate:
+	fg_dbg(chip, FG_TTF, "t_predicted_cc=%d\n", t_predicted);
+
+	iterm = max(100, abs(chip->dt.sys_term_curr_ma) + 200);
+	fg_dbg(chip, FG_TTF, "iterm=%d\n", iterm);
+
+	if (chip->charge_type == POWER_SUPPLY_CHARGE_TYPE_TAPER)
+		tau = max(MILLI_UNIT, ibatt_avg * MILLI_UNIT / iterm);
+	else
+		tau = max(MILLI_UNIT, i_cc2cv * MILLI_UNIT / iterm);
+
+	rc = fg_lerp(fg_ln_table, ARRAY_SIZE(fg_ln_table), tau, &tau);
+	if (rc < 0) {
+		pr_err("failed to interpolate tau rc=%d\n", rc);
+		return rc;
+	}
+
+	/* tau is scaled linearly from 95% to 100% SOC */
+	if (msoc >= 95)
+		tau = tau * 2 * (100 - msoc) / 10;
+
+	fg_dbg(chip, FG_TTF, "tau=%d\n", tau);
+	t_predicted_cv = div_s64((s64)act_cap_mah * rbatt * tau *
+						HOURS_TO_SECONDS, NANO_UNIT);
+	fg_dbg(chip, FG_TTF, "t_predicted_cv=%d\n", t_predicted_cv);
+	t_predicted += t_predicted_cv;
+
+	fg_dbg(chip, FG_TTF, "t_predicted_prefilter=%d\n", t_predicted);
+	if (chip->ttf.last_ms != 0) {
+		delta_ms = ktime_ms_delta(ktime_get_boottime(),
+					  ms_to_ktime(chip->ttf.last_ms));
+		if (delta_ms > 10000) {
+			ttf_slope = div64_s64(
+				(s64)(t_predicted - chip->ttf.last_ttf) *
+				MICRO_UNIT, delta_ms);
+			if (ttf_slope > -100)
+				ttf_slope = -100;
+			else if (ttf_slope < -2000)
+				ttf_slope = -2000;
+
+			t_predicted = div_s64(
+				(s64)ttf_slope * delta_ms, MICRO_UNIT) +
+				chip->ttf.last_ttf;
+			fg_dbg(chip, FG_TTF, "ttf_slope=%d\n", ttf_slope);
+		} else {
+			t_predicted = chip->ttf.last_ttf;
+		}
+	}
+
+	/* clamp the ttf to 0 */
+	if (t_predicted < 0)
+		t_predicted = 0;
+
+	fg_dbg(chip, FG_TTF, "t_predicted_postfilter=%d\n", t_predicted);
+	*val = t_predicted;
+	return 0;
+}
+
+static int fg_get_time_to_full(struct fg_chip *chip, int *val)
+{
+	int rc;
+
+	mutex_lock(&chip->ttf.lock);
+	rc = fg_get_time_to_full_locked(chip, val);
+	mutex_unlock(&chip->ttf.lock);
+	return rc;
+}
+
+#define CENTI_ICORRECT_C0	105
+#define CENTI_ICORRECT_C1	20
+static int fg_get_time_to_empty(struct fg_chip *chip, int *val)
+{
+	int rc, ibatt_avg, msoc, full_soc, act_cap_mah, divisor;
+
+	rc = fg_circ_buf_median(&chip->ttf.ibatt, &ibatt_avg);
+	if (rc < 0) {
+		/* try to get instantaneous current */
+		rc = fg_get_battery_current(chip, &ibatt_avg);
+		if (rc < 0) {
+			pr_err("failed to get battery current, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	ibatt_avg /= MILLI_UNIT;
+	/* clamp ibatt_avg to 100mA */
+	if (ibatt_avg < 100)
+		ibatt_avg = 100;
+
+	rc = fg_get_prop_capacity(chip, &msoc);
+	if (rc < 0) {
+		pr_err("Error in getting capacity, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_ACT_BATT_CAP, &act_cap_mah);
+	if (rc < 0) {
+		pr_err("Error in getting ACT_BATT_CAP, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_FULL_SOC, &full_soc);
+	if (rc < 0) {
+		pr_err("failed to get full soc rc=%d\n", rc);
+		return rc;
+	}
+	full_soc = DIV_ROUND_CLOSEST(((u16)full_soc >> 8) * FULL_CAPACITY,
+								FULL_SOC_RAW);
+	act_cap_mah = full_soc * act_cap_mah / 100;
+
+	divisor = CENTI_ICORRECT_C0 * 100 + CENTI_ICORRECT_C1 * msoc;
+	divisor = ibatt_avg * divisor / 100;
+	divisor = max(100, divisor);
+	*val = act_cap_mah * msoc * HOURS_TO_SECONDS / divisor;
+	return 0;
+}
+
+static int fg_update_maint_soc(struct fg_chip *chip)
+{
+	int rc = 0, msoc;
+
+	if (!chip->dt.linearize_soc)
+		return 0;
+
+	mutex_lock(&chip->charge_full_lock);
+	if (chip->delta_soc <= 0)
+		goto out;
+
+	rc = fg_get_msoc(chip, &msoc);
+	if (rc < 0) {
+		pr_err("Error in getting msoc, rc=%d\n", rc);
+		goto out;
+	}
+
+	if (msoc > chip->maint_soc) {
+		/*
+		 * When the monotonic SOC goes above maintenance SOC, we should
+		 * stop showing the maintenance SOC.
+		 */
+		chip->delta_soc = 0;
+		chip->maint_soc = 0;
+	} else if (msoc <= chip->last_msoc) {
+		/* MSOC is decreasing. Decrease maintenance SOC as well */
+		chip->maint_soc -= 1;
+		if (!(msoc % 10)) {
+			/*
+			 * Reduce the maintenance SOC additionally by 1 whenever
+			 * it crosses a SOC multiple of 10.
+			 */
+			chip->maint_soc -= 1;
+			chip->delta_soc -= 1;
+		}
+	}
+
+	fg_dbg(chip, FG_IRQ, "msoc: %d last_msoc: %d maint_soc: %d delta_soc: %d\n",
+		msoc, chip->last_msoc, chip->maint_soc, chip->delta_soc);
+	chip->last_msoc = msoc;
+out:
+	mutex_unlock(&chip->charge_full_lock);
+	return rc;
+}
+
+static int fg_esr_validate(struct fg_chip *chip)
+{
+	int rc, esr_uohms;
+	u8 buf[2];
+
+	if (chip->dt.esr_clamp_mohms <= 0)
+		return 0;
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_ESR, &esr_uohms);
+	if (rc < 0) {
+		pr_err("failed to get ESR, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (esr_uohms >= chip->dt.esr_clamp_mohms * 1000) {
+		pr_debug("ESR %d is > ESR_clamp\n", esr_uohms);
+		return 0;
+	}
+
+	esr_uohms = chip->dt.esr_clamp_mohms * 1000;
+	fg_encode(chip->sp, FG_SRAM_ESR, esr_uohms, buf);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_ESR].addr_word,
+			chip->sp[FG_SRAM_ESR].addr_byte, buf,
+			chip->sp[FG_SRAM_ESR].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing ESR, rc=%d\n", rc);
+		return rc;
+	}
+
+	fg_dbg(chip, FG_STATUS, "ESR clamped to %duOhms\n", esr_uohms);
+	return 0;
+}
+
+static int fg_force_esr_meas(struct fg_chip *chip)
+{
+	int rc;
+	int esr_uohms;
+
+	mutex_lock(&chip->qnovo_esr_ctrl_lock);
+	/* force esr extraction enable */
+	rc = fg_sram_masked_write(chip, ESR_EXTRACTION_ENABLE_WORD,
+			ESR_EXTRACTION_ENABLE_OFFSET, BIT(0), BIT(0),
+			FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("failed to enable esr extn rc=%d\n", rc);
+		goto out;
+	}
+
+	rc = fg_masked_write(chip, BATT_INFO_QNOVO_CFG(chip),
+			LD_REG_CTRL_BIT, 0);
+	if (rc < 0) {
+		pr_err("Error in configuring qnovo_cfg rc=%d\n", rc);
+		goto out;
+	}
+
+	rc = fg_masked_write(chip, BATT_INFO_TM_MISC1(chip),
+			ESR_REQ_CTL_BIT | ESR_REQ_CTL_EN_BIT,
+			ESR_REQ_CTL_BIT | ESR_REQ_CTL_EN_BIT);
+	if (rc < 0) {
+		pr_err("Error in configuring force ESR rc=%d\n", rc);
+		goto out;
+	}
+
+	/*
+	 * Release and grab the lock again after 1.5 seconds so that prepare
+	 * callback can succeed if the request comes in between.
+	 */
+	mutex_unlock(&chip->qnovo_esr_ctrl_lock);
+
+	/* wait 1.5 seconds for hw to measure ESR */
+	msleep(1500);
+
+	mutex_lock(&chip->qnovo_esr_ctrl_lock);
+	rc = fg_masked_write(chip, BATT_INFO_TM_MISC1(chip),
+			ESR_REQ_CTL_BIT | ESR_REQ_CTL_EN_BIT,
+			0);
+	if (rc < 0) {
+		pr_err("Error in restoring force ESR rc=%d\n", rc);
+		goto out;
+	}
+
+	/* If qnovo is disabled, then leave ESR extraction enabled */
+	if (!chip->qnovo_enable)
+		goto done;
+
+	rc = fg_masked_write(chip, BATT_INFO_QNOVO_CFG(chip),
+			LD_REG_CTRL_BIT, LD_REG_CTRL_BIT);
+	if (rc < 0) {
+		pr_err("Error in restoring qnovo_cfg rc=%d\n", rc);
+		goto out;
+	}
+
+	/* force esr extraction disable */
+	rc = fg_sram_masked_write(chip, ESR_EXTRACTION_ENABLE_WORD,
+			ESR_EXTRACTION_ENABLE_OFFSET, BIT(0), 0,
+			FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("failed to disable esr extn rc=%d\n", rc);
+		goto out;
+	}
+
+done:
+	fg_get_battery_resistance(chip, &esr_uohms);
+	fg_dbg(chip, FG_STATUS, "ESR uohms = %d\n", esr_uohms);
+out:
+	mutex_unlock(&chip->qnovo_esr_ctrl_lock);
+	return rc;
+}
+
+static int fg_prepare_for_qnovo(struct fg_chip *chip, int qnovo_enable)
+{
+	int rc = 0;
+
+	mutex_lock(&chip->qnovo_esr_ctrl_lock);
+	/* force esr extraction disable when qnovo enables */
+	rc = fg_sram_masked_write(chip, ESR_EXTRACTION_ENABLE_WORD,
+			ESR_EXTRACTION_ENABLE_OFFSET,
+			BIT(0), qnovo_enable ? 0 : BIT(0),
+			FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in configuring esr extraction rc=%d\n", rc);
+		goto out;
+	}
+
+	rc = fg_masked_write(chip, BATT_INFO_QNOVO_CFG(chip),
+			LD_REG_CTRL_BIT,
+			qnovo_enable ? LD_REG_CTRL_BIT : 0);
+	if (rc < 0) {
+		pr_err("Error in configuring qnovo_cfg rc=%d\n", rc);
+		goto out;
+	}
+
+	fg_dbg(chip, FG_STATUS, "%s for Qnovo\n",
+		qnovo_enable ? "Prepared" : "Unprepared");
+	chip->qnovo_enable = qnovo_enable;
+out:
+	mutex_unlock(&chip->qnovo_esr_ctrl_lock);
+	return rc;
+}
+
+static void ttf_work(struct work_struct *work)
+{
+	struct fg_chip *chip = container_of(work, struct fg_chip,
+					    ttf_work.work);
+	int rc, ibatt_now, vbatt_now, ttf;
+	ktime_t ktime_now;
+
+	mutex_lock(&chip->ttf.lock);
+	if (chip->charge_status != POWER_SUPPLY_STATUS_CHARGING &&
+			chip->charge_status != POWER_SUPPLY_STATUS_DISCHARGING)
+		goto end_work;
+
+	rc = fg_get_battery_current(chip, &ibatt_now);
+	if (rc < 0) {
+		pr_err("failed to get battery current, rc=%d\n", rc);
+		goto end_work;
+	}
+
+	rc = fg_get_battery_voltage(chip, &vbatt_now);
+	if (rc < 0) {
+		pr_err("failed to get battery voltage, rc=%d\n", rc);
+		goto end_work;
+	}
+
+	fg_circ_buf_add(&chip->ttf.ibatt, ibatt_now);
+	fg_circ_buf_add(&chip->ttf.vbatt, vbatt_now);
+
+	if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING) {
+		rc = fg_get_time_to_full_locked(chip, &ttf);
+		if (rc < 0) {
+			pr_err("failed to get ttf, rc=%d\n", rc);
+			goto end_work;
+		}
+
+		/* keep the wake lock and prime the IBATT and VBATT buffers */
+		if (ttf < 0) {
+			/* delay for one FG cycle */
+			schedule_delayed_work(&chip->ttf_work,
+							msecs_to_jiffies(1500));
+			mutex_unlock(&chip->ttf.lock);
+			return;
+		}
+
+		/* update the TTF reference point every minute */
+		ktime_now = ktime_get_boottime();
+		if (ktime_ms_delta(ktime_now,
+				   ms_to_ktime(chip->ttf.last_ms)) > 60000 ||
+				   chip->ttf.last_ms == 0) {
+			chip->ttf.last_ttf = ttf;
+			chip->ttf.last_ms = ktime_to_ms(ktime_now);
+		}
+	}
+
+	/* recurse every 10 seconds */
+	schedule_delayed_work(&chip->ttf_work, msecs_to_jiffies(10000));
+end_work:
+	vote(chip->awake_votable, TTF_PRIMING, false, 0);
+	mutex_unlock(&chip->ttf.lock);
+}
+
+/* PSY CALLBACKS STAY HERE */
+
+static int fg_psy_get_property(struct power_supply *psy,
+				       enum power_supply_property psp,
+				       union power_supply_propval *pval)
+{
+	struct fg_chip *chip = power_supply_get_drvdata(psy);
+	int rc = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_CAPACITY:
+		rc = fg_get_prop_capacity(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY_RAW:
+		rc = fg_get_msoc_raw(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+		if (chip->battery_missing)
+			pval->intval = 3700000;
+		else
+			rc = fg_get_battery_voltage(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_NOW:
+		rc = fg_get_battery_current(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_TEMP:
+		rc = fg_get_battery_temp(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_COLD_TEMP:
+		rc = fg_get_jeita_threshold(chip, JEITA_COLD, &pval->intval);
+		if (rc < 0) {
+			pr_err("Error in reading jeita_cold, rc=%d\n", rc);
+			return rc;
+		}
+		break;
+	case POWER_SUPPLY_PROP_COOL_TEMP:
+		rc = fg_get_jeita_threshold(chip, JEITA_COOL, &pval->intval);
+		if (rc < 0) {
+			pr_err("Error in reading jeita_cool, rc=%d\n", rc);
+			return rc;
+		}
+		break;
+	case POWER_SUPPLY_PROP_WARM_TEMP:
+		rc = fg_get_jeita_threshold(chip, JEITA_WARM, &pval->intval);
+		if (rc < 0) {
+			pr_err("Error in reading jeita_warm, rc=%d\n", rc);
+			return rc;
+		}
+		break;
+	case POWER_SUPPLY_PROP_HOT_TEMP:
+		rc = fg_get_jeita_threshold(chip, JEITA_HOT, &pval->intval);
+		if (rc < 0) {
+			pr_err("Error in reading jeita_hot, rc=%d\n", rc);
+			return rc;
+		}
+		break;
+	case POWER_SUPPLY_PROP_RESISTANCE:
+		rc = fg_get_battery_resistance(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_OCV:
+		rc = fg_get_sram_prop(chip, FG_SRAM_OCV, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+		pval->intval = chip->cl.nom_cap_uah;
+		break;
+	case POWER_SUPPLY_PROP_RESISTANCE_ID:
+		pval->intval = chip->batt_id_ohms;
+		break;
+	case POWER_SUPPLY_PROP_BATTERY_TYPE:
+		pval->strval = fg_get_battery_type(chip);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+		pval->intval = chip->bp.float_volt_uv;
+		break;
+	case POWER_SUPPLY_PROP_CYCLE_COUNT:
+		pval->intval = fg_get_cycle_count(chip);
+		break;
+	case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+		pval->intval = chip->cyc_ctr.id;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_NOW_RAW:
+		rc = fg_get_charge_raw(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_NOW:
+		pval->intval = chip->cl.init_cc_uah;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_FULL:
+		pval->intval = chip->cl.learned_cc_uah;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+		rc = fg_get_charge_counter(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW:
+		rc = fg_get_charge_counter_shadow(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
+		rc = fg_get_time_to_full(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
+		rc = fg_get_time_to_empty(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_SOC_REPORTING_READY:
+		pval->intval = chip->soc_reporting_ready;
+		break;
+	case POWER_SUPPLY_PROP_DEBUG_BATTERY:
+		pval->intval = is_debug_batt_id(chip);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+		rc = fg_get_sram_prop(chip, FG_SRAM_VBATT_FULL, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_CC_STEP:
+		if ((chip->ttf.cc_step.sel >= 0) &&
+				(chip->ttf.cc_step.sel < MAX_CC_STEPS)) {
+			pval->intval =
+				chip->ttf.cc_step.arr[chip->ttf.cc_step.sel];
+		} else {
+			pr_err("cc_step_sel is out of bounds [0, %d]\n",
+				chip->ttf.cc_step.sel);
+			return -EINVAL;
+		}
+		break;
+	case POWER_SUPPLY_PROP_CC_STEP_SEL:
+		pval->intval = chip->ttf.cc_step.sel;
+		break;
+	default:
+		pr_err("unsupported property %d\n", psp);
+		rc = -EINVAL;
+		break;
+	}
+
+	if (rc < 0)
+		return -ENODATA;
+
+	return 0;
+}
+
+static int fg_psy_set_property(struct power_supply *psy,
+				  enum power_supply_property psp,
+				  const union power_supply_propval *pval)
+{
+	struct fg_chip *chip = power_supply_get_drvdata(psy);
+	int rc = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+		if ((pval->intval > 0) && (pval->intval <= BUCKET_COUNT)) {
+			chip->cyc_ctr.id = pval->intval;
+		} else {
+			pr_err("rejecting invalid cycle_count_id = %d\n",
+				pval->intval);
+			return -EINVAL;
+		}
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+		rc = fg_set_constant_chg_voltage(chip, pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_RESISTANCE:
+		rc = fg_force_esr_meas(chip);
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE:
+		rc = fg_prepare_for_qnovo(chip, pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_CC_STEP:
+		if ((chip->ttf.cc_step.sel >= 0) &&
+				(chip->ttf.cc_step.sel < MAX_CC_STEPS)) {
+			chip->ttf.cc_step.arr[chip->ttf.cc_step.sel] =
+								pval->intval;
+		} else {
+			pr_err("cc_step_sel is out of bounds [0, %d]\n",
+				chip->ttf.cc_step.sel);
+			return -EINVAL;
+		}
+		break;
+	case POWER_SUPPLY_PROP_CC_STEP_SEL:
+		if ((pval->intval >= 0) && (pval->intval < MAX_CC_STEPS)) {
+			chip->ttf.cc_step.sel = pval->intval;
+		} else {
+			pr_err("cc_step_sel is out of bounds [0, %d]\n",
+				pval->intval);
+			return -EINVAL;
+		}
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_FULL:
+		if (chip->cl.active) {
+			pr_warn("Capacity learning active!\n");
+			return 0;
+		}
+		if (pval->intval <= 0 || pval->intval > chip->cl.nom_cap_uah) {
+			pr_err("charge_full is out of bounds\n");
+			return -EINVAL;
+		}
+		chip->cl.learned_cc_uah = pval->intval;
+		rc = fg_save_learned_cap_to_sram(chip);
+		if (rc < 0)
+			pr_err("Error in saving learned_cc_uah, rc=%d\n", rc);
+		break;
+	case POWER_SUPPLY_PROP_COLD_TEMP:
+		rc = fg_set_jeita_threshold(chip, JEITA_COLD, pval->intval);
+		if (rc < 0) {
+			pr_err("Error in writing jeita_cold, rc=%d\n", rc);
+			return rc;
+		}
+		break;
+	case POWER_SUPPLY_PROP_COOL_TEMP:
+		rc = fg_set_jeita_threshold(chip, JEITA_COOL, pval->intval);
+		if (rc < 0) {
+			pr_err("Error in writing jeita_cool, rc=%d\n", rc);
+			return rc;
+		}
+		break;
+	case POWER_SUPPLY_PROP_WARM_TEMP:
+		rc = fg_set_jeita_threshold(chip, JEITA_WARM, pval->intval);
+		if (rc < 0) {
+			pr_err("Error in writing jeita_warm, rc=%d\n", rc);
+			return rc;
+		}
+		break;
+	case POWER_SUPPLY_PROP_HOT_TEMP:
+		rc = fg_set_jeita_threshold(chip, JEITA_HOT, pval->intval);
+		if (rc < 0) {
+			pr_err("Error in writing jeita_hot, rc=%d\n", rc);
+			return rc;
+		}
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}
+
+static int fg_property_is_writeable(struct power_supply *psy,
+						enum power_supply_property psp)
+{
+	switch (psp) {
+	case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+	case POWER_SUPPLY_PROP_CC_STEP:
+	case POWER_SUPPLY_PROP_CC_STEP_SEL:
+	case POWER_SUPPLY_PROP_CHARGE_FULL:
+	case POWER_SUPPLY_PROP_COLD_TEMP:
+	case POWER_SUPPLY_PROP_COOL_TEMP:
+	case POWER_SUPPLY_PROP_WARM_TEMP:
+	case POWER_SUPPLY_PROP_HOT_TEMP:
+		return 1;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static void fg_external_power_changed(struct power_supply *psy)
+{
+	pr_debug("power supply changed\n");
+}
+
+static int fg_notifier_cb(struct notifier_block *nb,
+		unsigned long event, void *data)
+{
+	struct power_supply *psy = data;
+	struct fg_chip *chip = container_of(nb, struct fg_chip, nb);
+
+	spin_lock(&chip->suspend_lock);
+	if (chip->suspended) {
+		/* Return if we are still suspended */
+		spin_unlock(&chip->suspend_lock);
+		return NOTIFY_OK;
+	}
+	spin_unlock(&chip->suspend_lock);
+
+	if (event != PSY_EVENT_PROP_CHANGED)
+		return NOTIFY_OK;
+
+	if (work_pending(&chip->status_change_work))
+		return NOTIFY_OK;
+
+	if ((strcmp(psy->desc->name, "battery") == 0)
+		|| (strcmp(psy->desc->name, "parallel") == 0)
+		|| (strcmp(psy->desc->name, "usb") == 0)) {
+		/*
+		 * We cannot vote for awake votable here as that takes
+		 * a mutex lock and this is executed in an atomic context.
+		 */
+		pm_stay_awake(chip->dev);
+		schedule_work(&chip->status_change_work);
+	}
+
+	return NOTIFY_OK;
+}
+
+static enum power_supply_property fg_psy_props[] = {
+	POWER_SUPPLY_PROP_CAPACITY,
+	POWER_SUPPLY_PROP_CAPACITY_RAW,
+	POWER_SUPPLY_PROP_TEMP,
+	POWER_SUPPLY_PROP_COLD_TEMP,
+	POWER_SUPPLY_PROP_COOL_TEMP,
+	POWER_SUPPLY_PROP_WARM_TEMP,
+	POWER_SUPPLY_PROP_HOT_TEMP,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_VOLTAGE_OCV,
+	POWER_SUPPLY_PROP_CURRENT_NOW,
+	POWER_SUPPLY_PROP_RESISTANCE_ID,
+	POWER_SUPPLY_PROP_RESISTANCE,
+	POWER_SUPPLY_PROP_BATTERY_TYPE,
+	POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+	POWER_SUPPLY_PROP_CYCLE_COUNT,
+	POWER_SUPPLY_PROP_CYCLE_COUNT_ID,
+	POWER_SUPPLY_PROP_CHARGE_NOW_RAW,
+	POWER_SUPPLY_PROP_CHARGE_NOW,
+	POWER_SUPPLY_PROP_CHARGE_FULL,
+	POWER_SUPPLY_PROP_CHARGE_COUNTER,
+	POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW,
+	POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
+	POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
+	POWER_SUPPLY_PROP_SOC_REPORTING_READY,
+	POWER_SUPPLY_PROP_DEBUG_BATTERY,
+	POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+	POWER_SUPPLY_PROP_CC_STEP,
+	POWER_SUPPLY_PROP_CC_STEP_SEL,
+};
+
+static const struct power_supply_desc fg_psy_desc = {
+	.name = "bms",
+	.type = POWER_SUPPLY_TYPE_BMS,
+	.properties = fg_psy_props,
+	.num_properties = ARRAY_SIZE(fg_psy_props),
+	.get_property = fg_psy_get_property,
+	.set_property = fg_psy_set_property,
+	.external_power_changed = fg_external_power_changed,
+	.property_is_writeable = fg_property_is_writeable,
+};
+
+/* INIT FUNCTIONS STAY HERE */
+
+#define DEFAULT_ESR_CHG_TIMER_RETRY	8
+#define DEFAULT_ESR_CHG_TIMER_MAX	16
+static int fg_hw_init(struct fg_chip *chip)
+{
+	int rc;
+	u8 buf[4], val;
+
+	fg_encode(chip->sp, FG_SRAM_CUTOFF_VOLT, chip->dt.cutoff_volt_mv, buf);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_CUTOFF_VOLT].addr_word,
+			chip->sp[FG_SRAM_CUTOFF_VOLT].addr_byte, buf,
+			chip->sp[FG_SRAM_CUTOFF_VOLT].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing cutoff_volt, rc=%d\n", rc);
+		return rc;
+	}
+
+	fg_encode(chip->sp, FG_SRAM_EMPTY_VOLT, chip->dt.empty_volt_mv, buf);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_EMPTY_VOLT].addr_word,
+			chip->sp[FG_SRAM_EMPTY_VOLT].addr_byte, buf,
+			chip->sp[FG_SRAM_EMPTY_VOLT].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing empty_volt, rc=%d\n", rc);
+		return rc;
+	}
+
+	fg_encode(chip->sp, FG_SRAM_CHG_TERM_CURR, chip->dt.chg_term_curr_ma,
+		buf);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_CHG_TERM_CURR].addr_word,
+			chip->sp[FG_SRAM_CHG_TERM_CURR].addr_byte, buf,
+			chip->sp[FG_SRAM_CHG_TERM_CURR].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing chg_term_curr, rc=%d\n", rc);
+		return rc;
+	}
+
+	fg_encode(chip->sp, FG_SRAM_SYS_TERM_CURR, chip->dt.sys_term_curr_ma,
+		buf);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_SYS_TERM_CURR].addr_word,
+			chip->sp[FG_SRAM_SYS_TERM_CURR].addr_byte, buf,
+			chip->sp[FG_SRAM_SYS_TERM_CURR].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing sys_term_curr, rc=%d\n", rc);
+		return rc;
+	}
+
+	fg_encode(chip->sp, FG_SRAM_CUTOFF_CURR, chip->dt.cutoff_curr_ma,
+		buf);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_CUTOFF_CURR].addr_word,
+			chip->sp[FG_SRAM_CUTOFF_CURR].addr_byte, buf,
+			chip->sp[FG_SRAM_CUTOFF_CURR].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing cutoff_curr, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (!(chip->wa_flags & PMI8998_V1_REV_WA)) {
+		fg_encode(chip->sp, FG_SRAM_CHG_TERM_BASE_CURR,
+			chip->dt.chg_term_base_curr_ma, buf);
+		rc = fg_sram_write(chip,
+				chip->sp[FG_SRAM_CHG_TERM_BASE_CURR].addr_word,
+				chip->sp[FG_SRAM_CHG_TERM_BASE_CURR].addr_byte,
+				buf, chip->sp[FG_SRAM_CHG_TERM_BASE_CURR].len,
+				FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in writing chg_term_base_curr, rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	if (chip->dt.vbatt_low_thr_mv > 0) {
+		fg_encode(chip->sp, FG_SRAM_VBATT_LOW,
+			chip->dt.vbatt_low_thr_mv, buf);
+		rc = fg_sram_write(chip, chip->sp[FG_SRAM_VBATT_LOW].addr_word,
+				chip->sp[FG_SRAM_VBATT_LOW].addr_byte, buf,
+				chip->sp[FG_SRAM_VBATT_LOW].len,
+				FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in writing vbatt_low_thr, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	if (chip->dt.delta_soc_thr > 0 && chip->dt.delta_soc_thr < 100) {
+		fg_encode(chip->sp, FG_SRAM_DELTA_MSOC_THR,
+			chip->dt.delta_soc_thr, buf);
+		rc = fg_sram_write(chip,
+				chip->sp[FG_SRAM_DELTA_MSOC_THR].addr_word,
+				chip->sp[FG_SRAM_DELTA_MSOC_THR].addr_byte,
+				buf, chip->sp[FG_SRAM_DELTA_MSOC_THR].len,
+				FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in writing delta_msoc_thr, rc=%d\n", rc);
+			return rc;
+		}
+
+		fg_encode(chip->sp, FG_SRAM_DELTA_BSOC_THR,
+			chip->dt.delta_soc_thr, buf);
+		rc = fg_sram_write(chip,
+				chip->sp[FG_SRAM_DELTA_BSOC_THR].addr_word,
+				chip->sp[FG_SRAM_DELTA_BSOC_THR].addr_byte,
+				buf, chip->sp[FG_SRAM_DELTA_BSOC_THR].len,
+				FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in writing delta_bsoc_thr, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	/*
+	 * configure battery thermal coefficients c1,c2,c3
+	 * if its value is not zero.
+	 */
+	if (chip->dt.batt_therm_coeffs[0] > 0) {
+		rc = fg_write(chip, BATT_INFO_THERM_C1(chip),
+			chip->dt.batt_therm_coeffs, BATT_THERM_NUM_COEFFS);
+		if (rc < 0) {
+			pr_err("Error in writing battery thermal coefficients, rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+
+	if (chip->dt.recharge_soc_thr > 0 && chip->dt.recharge_soc_thr < 100) {
+		rc = fg_set_recharge_soc(chip, chip->dt.recharge_soc_thr);
+		if (rc < 0) {
+			pr_err("Error in setting recharge_soc, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	if (chip->dt.recharge_volt_thr_mv > 0) {
+		rc = fg_set_recharge_voltage(chip,
+			chip->dt.recharge_volt_thr_mv);
+		if (rc < 0) {
+			pr_err("Error in setting recharge_voltage, rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	if (chip->dt.rsense_sel >= SRC_SEL_BATFET &&
+			chip->dt.rsense_sel < SRC_SEL_RESERVED) {
+		rc = fg_masked_write(chip, BATT_INFO_IBATT_SENSING_CFG(chip),
+				SOURCE_SELECT_MASK, chip->dt.rsense_sel);
+		if (rc < 0) {
+			pr_err("Error in writing rsense_sel, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	rc = fg_set_jeita_threshold(chip, JEITA_COLD,
+		chip->dt.jeita_thresholds[JEITA_COLD] * 10);
+	if (rc < 0) {
+		pr_err("Error in writing jeita_cold, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_set_jeita_threshold(chip, JEITA_COOL,
+		chip->dt.jeita_thresholds[JEITA_COOL] * 10);
+	if (rc < 0) {
+		pr_err("Error in writing jeita_cool, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_set_jeita_threshold(chip, JEITA_WARM,
+		chip->dt.jeita_thresholds[JEITA_WARM] * 10);
+	if (rc < 0) {
+		pr_err("Error in writing jeita_warm, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_set_jeita_threshold(chip, JEITA_HOT,
+		chip->dt.jeita_thresholds[JEITA_HOT] * 10);
+	if (rc < 0) {
+		pr_err("Error in writing jeita_hot, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (chip->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE) {
+		chip->esr_timer_charging_default[TIMER_RETRY] =
+			DEFAULT_ESR_CHG_TIMER_RETRY;
+		chip->esr_timer_charging_default[TIMER_MAX] =
+			DEFAULT_ESR_CHG_TIMER_MAX;
+	} else {
+		/* We don't need this for pm660 at present */
+		chip->esr_timer_charging_default[TIMER_RETRY] = -EINVAL;
+		chip->esr_timer_charging_default[TIMER_MAX] = -EINVAL;
+	}
+
+	rc = fg_set_esr_timer(chip, chip->dt.esr_timer_charging[TIMER_RETRY],
+		chip->dt.esr_timer_charging[TIMER_MAX], true, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in setting ESR timer, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_set_esr_timer(chip, chip->dt.esr_timer_awake[TIMER_RETRY],
+		chip->dt.esr_timer_awake[TIMER_MAX], false, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in setting ESR timer, rc=%d\n", rc);
+		return rc;
+	}
+
+	restore_cycle_counter(chip);
+
+	if (chip->dt.jeita_hyst_temp >= 0) {
+		val = chip->dt.jeita_hyst_temp << JEITA_TEMP_HYST_SHIFT;
+		rc = fg_masked_write(chip, BATT_INFO_BATT_TEMP_CFG(chip),
+			JEITA_TEMP_HYST_MASK, val);
+		if (rc < 0) {
+			pr_err("Error in writing batt_temp_cfg, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	get_batt_temp_delta(chip->dt.batt_temp_delta, &val);
+	rc = fg_masked_write(chip, BATT_INFO_BATT_TMPR_INTR(chip),
+			CHANGE_THOLD_MASK, val);
+	if (rc < 0) {
+		pr_err("Error in writing batt_temp_delta, rc=%d\n", rc);
+		return rc;
+	}
+
+	fg_encode(chip->sp, FG_SRAM_ESR_TIGHT_FILTER,
+		chip->dt.esr_tight_flt_upct, buf);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_ESR_TIGHT_FILTER].addr_word,
+			chip->sp[FG_SRAM_ESR_TIGHT_FILTER].addr_byte, buf,
+			chip->sp[FG_SRAM_ESR_TIGHT_FILTER].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing ESR tight filter, rc=%d\n", rc);
+		return rc;
+	}
+
+	fg_encode(chip->sp, FG_SRAM_ESR_BROAD_FILTER,
+		chip->dt.esr_broad_flt_upct, buf);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_ESR_BROAD_FILTER].addr_word,
+			chip->sp[FG_SRAM_ESR_BROAD_FILTER].addr_byte, buf,
+			chip->sp[FG_SRAM_ESR_BROAD_FILTER].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing ESR broad filter, rc=%d\n", rc);
+		return rc;
+	}
+
+	fg_encode(chip->sp, FG_SRAM_ESR_PULSE_THRESH,
+		chip->dt.esr_pulse_thresh_ma, buf);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_ESR_PULSE_THRESH].addr_word,
+			chip->sp[FG_SRAM_ESR_PULSE_THRESH].addr_byte, buf,
+			chip->sp[FG_SRAM_ESR_PULSE_THRESH].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing esr_pulse_thresh_ma, rc=%d\n", rc);
+		return rc;
+	}
+
+	get_esr_meas_current(chip->dt.esr_meas_curr_ma, &val);
+	rc = fg_masked_write(chip, BATT_INFO_ESR_PULL_DN_CFG(chip),
+			ESR_PULL_DOWN_IVAL_MASK, val);
+	if (rc < 0) {
+		pr_err("Error in writing esr_meas_curr_ma, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (is_debug_batt_id(chip)) {
+		val = ESR_NO_PULL_DOWN;
+		rc = fg_masked_write(chip, BATT_INFO_ESR_PULL_DN_CFG(chip),
+			ESR_PULL_DOWN_MODE_MASK, val);
+		if (rc < 0) {
+			pr_err("Error in writing esr_pull_down, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int fg_memif_init(struct fg_chip *chip)
+{
+	return fg_ima_init(chip);
+}
+
+static int fg_adjust_timebase(struct fg_chip *chip)
+{
+	int rc = 0, die_temp;
+	s32 time_base = 0;
+	u8 buf[2] = {0};
+
+	if ((chip->wa_flags & PM660_TSMC_OSC_WA) && chip->die_temp_chan) {
+		rc = iio_read_channel_processed(chip->die_temp_chan, &die_temp);
+		if (rc < 0) {
+			pr_err("Error in reading die_temp, rc:%d\n", rc);
+			return rc;
+		}
+
+		rc = fg_lerp(fg_tsmc_osc_table, ARRAY_SIZE(fg_tsmc_osc_table),
+					die_temp / 1000, &time_base);
+		if (rc < 0) {
+			pr_err("Error to lookup fg_tsmc_osc_table rc=%d\n", rc);
+			return rc;
+		}
+
+		fg_encode(chip->sp, FG_SRAM_TIMEBASE, time_base, buf);
+		rc = fg_sram_write(chip,
+			chip->sp[FG_SRAM_TIMEBASE].addr_word,
+			chip->sp[FG_SRAM_TIMEBASE].addr_byte, buf,
+			chip->sp[FG_SRAM_TIMEBASE].len, FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in writing timebase, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+/* INTERRUPT HANDLERS STAY HERE */
+
+static irqreturn_t fg_mem_xcp_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+	u8 status;
+	int rc;
+
+	rc = fg_read(chip, MEM_IF_INT_RT_STS(chip), &status, 1);
+	if (rc < 0) {
+		pr_err("failed to read addr=0x%04x, rc=%d\n",
+			MEM_IF_INT_RT_STS(chip), rc);
+		return IRQ_HANDLED;
+	}
+
+	fg_dbg(chip, FG_IRQ, "irq %d triggered, status:%d\n", irq, status);
+
+	mutex_lock(&chip->sram_rw_lock);
+	rc = fg_clear_dma_errors_if_any(chip);
+	if (rc < 0)
+		pr_err("Error in clearing DMA error, rc=%d\n", rc);
+
+	if (status & MEM_XCP_BIT) {
+		rc = fg_clear_ima_errors_if_any(chip, true);
+		if (rc < 0 && rc != -EAGAIN)
+			pr_err("Error in checking IMA errors rc:%d\n", rc);
+	}
+
+	mutex_unlock(&chip->sram_rw_lock);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_vbatt_low_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+
+	fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_batt_missing_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+	u8 status;
+	int rc;
+
+	rc = fg_read(chip, BATT_INFO_INT_RT_STS(chip), &status, 1);
+	if (rc < 0) {
+		pr_err("failed to read addr=0x%04x, rc=%d\n",
+			BATT_INFO_INT_RT_STS(chip), rc);
+		return IRQ_HANDLED;
+	}
+
+	fg_dbg(chip, FG_IRQ, "irq %d triggered sts:%d\n", irq, status);
+	chip->battery_missing = (status & BT_MISS_BIT);
+
+	if (chip->battery_missing) {
+		chip->profile_available = false;
+		chip->profile_loaded = false;
+		chip->soc_reporting_ready = false;
+		return IRQ_HANDLED;
+	}
+
+	clear_battery_profile(chip);
+	schedule_delayed_work(&chip->profile_load_work, 0);
+
+	if (chip->fg_psy)
+		power_supply_changed(chip->fg_psy);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_delta_batt_temp_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+	union power_supply_propval prop = {0, };
+	int rc, batt_temp;
+
+	rc = fg_get_battery_temp(chip, &batt_temp);
+	if (rc < 0) {
+		pr_err("Error in getting batt_temp\n");
+		return IRQ_HANDLED;
+	}
+	fg_dbg(chip, FG_IRQ, "irq %d triggered bat_temp: %d\n", irq, batt_temp);
+
+	rc = fg_esr_filter_config(chip, batt_temp, false);
+	if (rc < 0)
+		pr_err("Error in configuring ESR filter rc:%d\n", rc);
+
+	rc = fg_slope_limit_config(chip, batt_temp);
+	if (rc < 0)
+		pr_err("Error in configuring slope limiter rc:%d\n", rc);
+
+	rc = fg_adjust_ki_coeff_full_soc(chip, batt_temp);
+	if (rc < 0)
+		pr_err("Error in configuring ki_coeff_full_soc rc:%d\n", rc);
+
+	if (!batt_psy_initialized(chip)) {
+		chip->last_batt_temp = batt_temp;
+		return IRQ_HANDLED;
+	}
+
+	power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_HEALTH,
+		&prop);
+	chip->health = prop.intval;
+
+	if (chip->last_batt_temp != batt_temp) {
+		rc = fg_adjust_timebase(chip);
+		if (rc < 0)
+			pr_err("Error in adjusting timebase, rc=%d\n", rc);
+
+		rc = fg_adjust_recharge_voltage(chip);
+		if (rc < 0)
+			pr_err("Error in adjusting recharge_voltage, rc=%d\n",
+				rc);
+
+		chip->last_batt_temp = batt_temp;
+		power_supply_changed(chip->batt_psy);
+	}
+
+	if (abs(chip->last_batt_temp - batt_temp) > 30)
+		pr_warn("Battery temperature last:%d current: %d\n",
+			chip->last_batt_temp, batt_temp);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_first_est_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+
+	fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+	complete_all(&chip->soc_ready);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_soc_update_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+
+	fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+	complete_all(&chip->soc_update);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_delta_bsoc_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+	int rc;
+
+	fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+	rc = fg_charge_full_update(chip);
+	if (rc < 0)
+		pr_err("Error in charge_full_update, rc=%d\n", rc);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_delta_msoc_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+	int rc;
+
+	fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+	fg_cycle_counter_update(chip);
+
+	if (chip->cl.active)
+		fg_cap_learning_update(chip);
+
+	rc = fg_charge_full_update(chip);
+	if (rc < 0)
+		pr_err("Error in charge_full_update, rc=%d\n", rc);
+
+	rc = fg_adjust_ki_coeff_dischg(chip);
+	if (rc < 0)
+		pr_err("Error in adjusting ki_coeff_dischg, rc=%d\n", rc);
+
+	rc = fg_update_maint_soc(chip);
+	if (rc < 0)
+		pr_err("Error in updating maint_soc, rc=%d\n", rc);
+
+	rc = fg_esr_validate(chip);
+	if (rc < 0)
+		pr_err("Error in validating ESR, rc=%d\n", rc);
+
+	rc = fg_adjust_timebase(chip);
+	if (rc < 0)
+		pr_err("Error in adjusting timebase, rc=%d\n", rc);
+
+	if (batt_psy_initialized(chip))
+		power_supply_changed(chip->batt_psy);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_empty_soc_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+
+	fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+	if (batt_psy_initialized(chip))
+		power_supply_changed(chip->batt_psy);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_soc_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+
+	fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_dummy_irq_handler(int irq, void *data)
+{
+	pr_debug("irq %d triggered\n", irq);
+	return IRQ_HANDLED;
+}
+
+static struct fg_irq_info fg_irqs[FG_IRQ_MAX] = {
+	/* BATT_SOC irqs */
+	[MSOC_FULL_IRQ] = {
+		.name		= "msoc-full",
+		.handler	= fg_soc_irq_handler,
+	},
+	[MSOC_HIGH_IRQ] = {
+		.name		= "msoc-high",
+		.handler	= fg_soc_irq_handler,
+		.wakeable	= true,
+	},
+	[MSOC_EMPTY_IRQ] = {
+		.name		= "msoc-empty",
+		.handler	= fg_empty_soc_irq_handler,
+		.wakeable	= true,
+	},
+	[MSOC_LOW_IRQ] = {
+		.name		= "msoc-low",
+		.handler	= fg_soc_irq_handler,
+		.wakeable	= true,
+	},
+	[MSOC_DELTA_IRQ] = {
+		.name		= "msoc-delta",
+		.handler	= fg_delta_msoc_irq_handler,
+		.wakeable	= true,
+	},
+	[BSOC_DELTA_IRQ] = {
+		.name		= "bsoc-delta",
+		.handler	= fg_delta_bsoc_irq_handler,
+		.wakeable	= true,
+	},
+	[SOC_READY_IRQ] = {
+		.name		= "soc-ready",
+		.handler	= fg_first_est_irq_handler,
+		.wakeable	= true,
+	},
+	[SOC_UPDATE_IRQ] = {
+		.name		= "soc-update",
+		.handler	= fg_soc_update_irq_handler,
+	},
+	/* BATT_INFO irqs */
+	[BATT_TEMP_DELTA_IRQ] = {
+		.name		= "batt-temp-delta",
+		.handler	= fg_delta_batt_temp_irq_handler,
+		.wakeable	= true,
+	},
+	[BATT_MISSING_IRQ] = {
+		.name		= "batt-missing",
+		.handler	= fg_batt_missing_irq_handler,
+		.wakeable	= true,
+	},
+	[ESR_DELTA_IRQ] = {
+		.name		= "esr-delta",
+		.handler	= fg_dummy_irq_handler,
+	},
+	[VBATT_LOW_IRQ] = {
+		.name		= "vbatt-low",
+		.handler	= fg_vbatt_low_irq_handler,
+		.wakeable	= true,
+	},
+	[VBATT_PRED_DELTA_IRQ] = {
+		.name		= "vbatt-pred-delta",
+		.handler	= fg_dummy_irq_handler,
+	},
+	/* MEM_IF irqs */
+	[DMA_GRANT_IRQ] = {
+		.name		= "dma-grant",
+		.handler	= fg_dummy_irq_handler,
+	},
+	[MEM_XCP_IRQ] = {
+		.name		= "mem-xcp",
+		.handler	= fg_mem_xcp_irq_handler,
+	},
+	[IMA_RDY_IRQ] = {
+		.name		= "ima-rdy",
+		.handler	= fg_dummy_irq_handler,
+	},
+};
+
+static int fg_get_irq_index_byname(const char *name)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(fg_irqs); i++) {
+		if (strcmp(fg_irqs[i].name, name) == 0)
+			return i;
+	}
+
+	pr_err("%s is not in irq list\n", name);
+	return -ENOENT;
+}
+
+static int fg_register_interrupts(struct fg_chip *chip)
+{
+	struct device_node *child, *node = chip->dev->of_node;
+	struct property *prop;
+	const char *name;
+	int rc, irq, irq_index;
+
+	for_each_available_child_of_node(node, child) {
+		of_property_for_each_string(child, "interrupt-names", prop,
+						name) {
+			irq = of_irq_get_byname(child, name);
+			if (irq < 0) {
+				dev_err(chip->dev, "failed to get irq %s irq:%d\n",
+					name, irq);
+				return irq;
+			}
+
+			irq_index = fg_get_irq_index_byname(name);
+			if (irq_index < 0)
+				return irq_index;
+
+			rc = devm_request_threaded_irq(chip->dev, irq, NULL,
+					fg_irqs[irq_index].handler,
+					IRQF_ONESHOT, name, chip);
+			if (rc < 0) {
+				dev_err(chip->dev, "failed to register irq handler for %s rc:%d\n",
+					name, rc);
+				return rc;
+			}
+
+			fg_irqs[irq_index].irq = irq;
+			if (fg_irqs[irq_index].wakeable)
+				enable_irq_wake(fg_irqs[irq_index].irq);
+		}
+	}
+
+	return 0;
+}
+
+static int fg_parse_dt_property_u32_array(struct device_node *node,
+				const char *prop_name, int *buf, int len)
+{
+	int rc;
+
+	rc = of_property_count_elems_of_size(node, prop_name, sizeof(u32));
+	if (rc < 0) {
+		if (rc == -EINVAL)
+			return 0;
+		else
+			return rc;
+	} else if (rc != len) {
+		pr_err("Incorrect length %d for %s, rc=%d\n", len, prop_name,
+			rc);
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32_array(node, prop_name, buf, len);
+	if (rc < 0) {
+		pr_err("Error in reading %s, rc=%d\n", prop_name, rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int fg_parse_slope_limit_coefficients(struct fg_chip *chip)
+{
+	struct device_node *node = chip->dev->of_node;
+	int rc, i;
+
+	rc = of_property_read_u32(node, "qcom,slope-limit-temp-threshold",
+			&chip->dt.slope_limit_temp);
+	if (rc < 0)
+		return 0;
+
+	rc = fg_parse_dt_property_u32_array(node, "qcom,slope-limit-coeffs",
+		chip->dt.slope_limit_coeffs, SLOPE_LIMIT_NUM_COEFFS);
+	if (rc < 0)
+		return rc;
+
+	for (i = 0; i < SLOPE_LIMIT_NUM_COEFFS; i++) {
+		if (chip->dt.slope_limit_coeffs[i] > SLOPE_LIMIT_COEFF_MAX ||
+			chip->dt.slope_limit_coeffs[i] < 0) {
+			pr_err("Incorrect slope limit coefficient\n");
+			return -EINVAL;
+		}
+	}
+
+	chip->slope_limit_en = true;
+	return 0;
+}
+
+static int fg_parse_ki_coefficients(struct fg_chip *chip)
+{
+	struct device_node *node = chip->dev->of_node;
+	int rc, i, temp;
+
+	rc = of_property_read_u32(node, "qcom,ki-coeff-full-dischg", &temp);
+	if (!rc)
+		chip->dt.ki_coeff_full_soc_dischg = temp;
+
+	rc = fg_parse_dt_property_u32_array(node, "qcom,ki-coeff-soc-dischg",
+		chip->dt.ki_coeff_soc, KI_COEFF_SOC_LEVELS);
+	if (rc < 0)
+		return rc;
+
+	rc = fg_parse_dt_property_u32_array(node, "qcom,ki-coeff-med-dischg",
+		chip->dt.ki_coeff_med_dischg, KI_COEFF_SOC_LEVELS);
+	if (rc < 0)
+		return rc;
+
+	rc = fg_parse_dt_property_u32_array(node, "qcom,ki-coeff-hi-dischg",
+		chip->dt.ki_coeff_hi_dischg, KI_COEFF_SOC_LEVELS);
+	if (rc < 0)
+		return rc;
+
+	for (i = 0; i < KI_COEFF_SOC_LEVELS; i++) {
+		if (chip->dt.ki_coeff_soc[i] < 0 ||
+			chip->dt.ki_coeff_soc[i] > FULL_CAPACITY) {
+			pr_err("Error in ki_coeff_soc_dischg values\n");
+			return -EINVAL;
+		}
+
+		if (chip->dt.ki_coeff_med_dischg[i] < 0 ||
+			chip->dt.ki_coeff_med_dischg[i] > KI_COEFF_MAX) {
+			pr_err("Error in ki_coeff_med_dischg values\n");
+			return -EINVAL;
+		}
+
+		if (chip->dt.ki_coeff_med_dischg[i] < 0 ||
+			chip->dt.ki_coeff_med_dischg[i] > KI_COEFF_MAX) {
+			pr_err("Error in ki_coeff_med_dischg values\n");
+			return -EINVAL;
+		}
+	}
+	chip->ki_coeff_dischg_en = true;
+	return 0;
+}
+
+#define DEFAULT_CUTOFF_VOLT_MV		3200
+#define DEFAULT_EMPTY_VOLT_MV		2850
+#define DEFAULT_RECHARGE_VOLT_MV	4250
+#define DEFAULT_CHG_TERM_CURR_MA	100
+#define DEFAULT_CHG_TERM_BASE_CURR_MA	75
+#define DEFAULT_SYS_TERM_CURR_MA	-125
+#define DEFAULT_CUTOFF_CURR_MA		500
+#define DEFAULT_DELTA_SOC_THR		1
+#define DEFAULT_RECHARGE_SOC_THR	95
+#define DEFAULT_BATT_TEMP_COLD		0
+#define DEFAULT_BATT_TEMP_COOL		5
+#define DEFAULT_BATT_TEMP_WARM		45
+#define DEFAULT_BATT_TEMP_HOT		50
+#define DEFAULT_CL_START_SOC		15
+#define DEFAULT_CL_MIN_TEMP_DECIDEGC	150
+#define DEFAULT_CL_MAX_TEMP_DECIDEGC	450
+#define DEFAULT_CL_MAX_INC_DECIPERC	5
+#define DEFAULT_CL_MAX_DEC_DECIPERC	100
+#define DEFAULT_CL_MIN_LIM_DECIPERC	0
+#define DEFAULT_CL_MAX_LIM_DECIPERC	0
+#define BTEMP_DELTA_LOW			2
+#define BTEMP_DELTA_HIGH		10
+#define DEFAULT_ESR_FLT_TEMP_DECIDEGC	100
+#define DEFAULT_ESR_TIGHT_FLT_UPCT	3907
+#define DEFAULT_ESR_BROAD_FLT_UPCT	99610
+#define DEFAULT_ESR_TIGHT_LT_FLT_UPCT	30000
+#define DEFAULT_ESR_BROAD_LT_FLT_UPCT	30000
+#define DEFAULT_ESR_FLT_RT_DECIDEGC	60
+#define DEFAULT_ESR_TIGHT_RT_FLT_UPCT	5860
+#define DEFAULT_ESR_BROAD_RT_FLT_UPCT	156250
+#define DEFAULT_ESR_CLAMP_MOHMS		20
+#define DEFAULT_ESR_PULSE_THRESH_MA	110
+#define DEFAULT_ESR_MEAS_CURR_MA	120
+static int fg_parse_dt(struct fg_chip *chip)
+{
+	struct device_node *child, *revid_node, *node = chip->dev->of_node;
+	u32 base, temp;
+	u8 subtype;
+	int rc;
+
+	if (!node)  {
+		dev_err(chip->dev, "device tree node missing\n");
+		return -ENXIO;
+	}
+
+	revid_node = of_parse_phandle(node, "qcom,pmic-revid", 0);
+	if (!revid_node) {
+		pr_err("Missing qcom,pmic-revid property - driver failed\n");
+		return -EINVAL;
+	}
+
+	chip->pmic_rev_id = get_revid_data(revid_node);
+	if (IS_ERR_OR_NULL(chip->pmic_rev_id)) {
+		pr_err("Unable to get pmic_revid rc=%ld\n",
+			PTR_ERR(chip->pmic_rev_id));
+		/*
+		 * the revid peripheral must be registered, any failure
+		 * here only indicates that the rev-id module has not
+		 * probed yet.
+		 */
+		return -EPROBE_DEFER;
+	}
+
+	pr_debug("PMIC subtype %d Digital major %d\n",
+		chip->pmic_rev_id->pmic_subtype, chip->pmic_rev_id->rev4);
+
+	switch (chip->pmic_rev_id->pmic_subtype) {
+	case PMI8998_SUBTYPE:
+		if (chip->pmic_rev_id->rev4 < PMI8998_V2P0_REV4) {
+			chip->sp = pmi8998_v1_sram_params;
+			chip->alg_flags = pmi8998_v1_alg_flags;
+			chip->wa_flags |= PMI8998_V1_REV_WA;
+		} else if (chip->pmic_rev_id->rev4 == PMI8998_V2P0_REV4) {
+			chip->sp = pmi8998_v2_sram_params;
+			chip->alg_flags = pmi8998_v2_alg_flags;
+		} else {
+			return -EINVAL;
+		}
+		break;
+	case PM660_SUBTYPE:
+		chip->sp = pmi8998_v2_sram_params;
+		chip->alg_flags = pmi8998_v2_alg_flags;
+		chip->use_ima_single_mode = true;
+		if (chip->pmic_rev_id->fab_id == PM660_FAB_ID_TSMC)
+			chip->wa_flags |= PM660_TSMC_OSC_WA;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (of_get_available_child_count(node) == 0) {
+		dev_err(chip->dev, "No child nodes specified!\n");
+		return -ENXIO;
+	}
+
+	for_each_available_child_of_node(node, child) {
+		rc = of_property_read_u32(child, "reg", &base);
+		if (rc < 0) {
+			dev_err(chip->dev, "reg not specified in node %s, rc=%d\n",
+				child->full_name, rc);
+			return rc;
+		}
+
+		rc = fg_read(chip, base + PERPH_SUBTYPE_REG, &subtype, 1);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't read subtype for base %d, rc=%d\n",
+				base, rc);
+			return rc;
+		}
+
+		switch (subtype) {
+		case FG_BATT_SOC_PMI8998:
+			chip->batt_soc_base = base;
+			break;
+		case FG_BATT_INFO_PMI8998:
+			chip->batt_info_base = base;
+			break;
+		case FG_MEM_INFO_PMI8998:
+			chip->mem_if_base = base;
+			break;
+		default:
+			dev_err(chip->dev, "Invalid peripheral subtype 0x%x\n",
+				subtype);
+			return -ENXIO;
+		}
+	}
+
+	rc = of_property_read_u32(node, "qcom,rradc-base", &base);
+	if (rc < 0) {
+		dev_err(chip->dev, "rradc-base not specified, rc=%d\n", rc);
+		return rc;
+	}
+	chip->rradc_base = base;
+
+	/* Read all the optional properties below */
+	rc = of_property_read_u32(node, "qcom,fg-cutoff-voltage", &temp);
+	if (rc < 0)
+		chip->dt.cutoff_volt_mv = DEFAULT_CUTOFF_VOLT_MV;
+	else
+		chip->dt.cutoff_volt_mv = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-empty-voltage", &temp);
+	if (rc < 0)
+		chip->dt.empty_volt_mv = DEFAULT_EMPTY_VOLT_MV;
+	else
+		chip->dt.empty_volt_mv = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-vbatt-low-thr", &temp);
+	if (rc < 0)
+		chip->dt.vbatt_low_thr_mv = -EINVAL;
+	else
+		chip->dt.vbatt_low_thr_mv = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-chg-term-current", &temp);
+	if (rc < 0)
+		chip->dt.chg_term_curr_ma = DEFAULT_CHG_TERM_CURR_MA;
+	else
+		chip->dt.chg_term_curr_ma = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-sys-term-current", &temp);
+	if (rc < 0)
+		chip->dt.sys_term_curr_ma = DEFAULT_SYS_TERM_CURR_MA;
+	else
+		chip->dt.sys_term_curr_ma = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-chg-term-base-current", &temp);
+	if (rc < 0)
+		chip->dt.chg_term_base_curr_ma = DEFAULT_CHG_TERM_BASE_CURR_MA;
+	else
+		chip->dt.chg_term_base_curr_ma = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-cutoff-current", &temp);
+	if (rc < 0)
+		chip->dt.cutoff_curr_ma = DEFAULT_CUTOFF_CURR_MA;
+	else
+		chip->dt.cutoff_curr_ma = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-delta-soc-thr", &temp);
+	if (rc < 0)
+		chip->dt.delta_soc_thr = DEFAULT_DELTA_SOC_THR;
+	else
+		chip->dt.delta_soc_thr = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-recharge-soc-thr", &temp);
+	if (rc < 0)
+		chip->dt.recharge_soc_thr = DEFAULT_RECHARGE_SOC_THR;
+	else
+		chip->dt.recharge_soc_thr = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-recharge-voltage", &temp);
+	if (rc < 0)
+		chip->dt.recharge_volt_thr_mv = DEFAULT_RECHARGE_VOLT_MV;
+	else
+		chip->dt.recharge_volt_thr_mv = temp;
+
+	chip->dt.auto_recharge_soc = of_property_read_bool(node,
+					"qcom,fg-auto-recharge-soc");
+
+	rc = of_property_read_u32(node, "qcom,fg-rsense-sel", &temp);
+	if (rc < 0)
+		chip->dt.rsense_sel = SRC_SEL_BATFET_SMB;
+	else
+		chip->dt.rsense_sel = (u8)temp & SOURCE_SELECT_MASK;
+
+	chip->dt.jeita_thresholds[JEITA_COLD] = DEFAULT_BATT_TEMP_COLD;
+	chip->dt.jeita_thresholds[JEITA_COOL] = DEFAULT_BATT_TEMP_COOL;
+	chip->dt.jeita_thresholds[JEITA_WARM] = DEFAULT_BATT_TEMP_WARM;
+	chip->dt.jeita_thresholds[JEITA_HOT] = DEFAULT_BATT_TEMP_HOT;
+	if (of_property_count_elems_of_size(node, "qcom,fg-jeita-thresholds",
+		sizeof(u32)) == NUM_JEITA_LEVELS) {
+		rc = of_property_read_u32_array(node,
+				"qcom,fg-jeita-thresholds",
+				chip->dt.jeita_thresholds, NUM_JEITA_LEVELS);
+		if (rc < 0)
+			pr_warn("Error reading Jeita thresholds, default values will be used rc:%d\n",
+				rc);
+	}
+
+	if (of_property_count_elems_of_size(node,
+		"qcom,battery-thermal-coefficients",
+		sizeof(u8)) == BATT_THERM_NUM_COEFFS) {
+		rc = of_property_read_u8_array(node,
+				"qcom,battery-thermal-coefficients",
+				chip->dt.batt_therm_coeffs,
+				BATT_THERM_NUM_COEFFS);
+		if (rc < 0)
+			pr_warn("Error reading battery thermal coefficients, rc:%d\n",
+				rc);
+	}
+
+	rc = fg_parse_dt_property_u32_array(node, "qcom,fg-esr-timer-charging",
+		chip->dt.esr_timer_charging, NUM_ESR_TIMERS);
+	if (rc < 0) {
+		chip->dt.esr_timer_charging[TIMER_RETRY] = -EINVAL;
+		chip->dt.esr_timer_charging[TIMER_MAX] = -EINVAL;
+	}
+
+	rc = fg_parse_dt_property_u32_array(node, "qcom,fg-esr-timer-awake",
+		chip->dt.esr_timer_awake, NUM_ESR_TIMERS);
+	if (rc < 0) {
+		chip->dt.esr_timer_awake[TIMER_RETRY] = -EINVAL;
+		chip->dt.esr_timer_awake[TIMER_MAX] = -EINVAL;
+	}
+
+	rc = fg_parse_dt_property_u32_array(node, "qcom,fg-esr-timer-asleep",
+		chip->dt.esr_timer_asleep, NUM_ESR_TIMERS);
+	if (rc < 0) {
+		chip->dt.esr_timer_asleep[TIMER_RETRY] = -EINVAL;
+		chip->dt.esr_timer_asleep[TIMER_MAX] = -EINVAL;
+	}
+
+	chip->cyc_ctr.en = of_property_read_bool(node, "qcom,cycle-counter-en");
+	if (chip->cyc_ctr.en)
+		chip->cyc_ctr.id = 1;
+
+	chip->dt.force_load_profile = of_property_read_bool(node,
+					"qcom,fg-force-load-profile");
+
+	rc = of_property_read_u32(node, "qcom,cl-start-capacity", &temp);
+	if (rc < 0)
+		chip->dt.cl_start_soc = DEFAULT_CL_START_SOC;
+	else
+		chip->dt.cl_start_soc = temp;
+
+	rc = of_property_read_u32(node, "qcom,cl-min-temp", &temp);
+	if (rc < 0)
+		chip->dt.cl_min_temp = DEFAULT_CL_MIN_TEMP_DECIDEGC;
+	else
+		chip->dt.cl_min_temp = temp;
+
+	rc = of_property_read_u32(node, "qcom,cl-max-temp", &temp);
+	if (rc < 0)
+		chip->dt.cl_max_temp = DEFAULT_CL_MAX_TEMP_DECIDEGC;
+	else
+		chip->dt.cl_max_temp = temp;
+
+	rc = of_property_read_u32(node, "qcom,cl-max-increment", &temp);
+	if (rc < 0)
+		chip->dt.cl_max_cap_inc = DEFAULT_CL_MAX_INC_DECIPERC;
+	else
+		chip->dt.cl_max_cap_inc = temp;
+
+	rc = of_property_read_u32(node, "qcom,cl-max-decrement", &temp);
+	if (rc < 0)
+		chip->dt.cl_max_cap_dec = DEFAULT_CL_MAX_DEC_DECIPERC;
+	else
+		chip->dt.cl_max_cap_dec = temp;
+
+	rc = of_property_read_u32(node, "qcom,cl-min-limit", &temp);
+	if (rc < 0)
+		chip->dt.cl_min_cap_limit = DEFAULT_CL_MIN_LIM_DECIPERC;
+	else
+		chip->dt.cl_min_cap_limit = temp;
+
+	rc = of_property_read_u32(node, "qcom,cl-max-limit", &temp);
+	if (rc < 0)
+		chip->dt.cl_max_cap_limit = DEFAULT_CL_MAX_LIM_DECIPERC;
+	else
+		chip->dt.cl_max_cap_limit = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-jeita-hyst-temp", &temp);
+	if (rc < 0)
+		chip->dt.jeita_hyst_temp = -EINVAL;
+	else
+		chip->dt.jeita_hyst_temp = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-batt-temp-delta", &temp);
+	if (rc < 0)
+		chip->dt.batt_temp_delta = -EINVAL;
+	else if (temp > BTEMP_DELTA_LOW && temp <= BTEMP_DELTA_HIGH)
+		chip->dt.batt_temp_delta = temp;
+
+	chip->dt.hold_soc_while_full = of_property_read_bool(node,
+					"qcom,hold-soc-while-full");
+
+	chip->dt.linearize_soc = of_property_read_bool(node,
+					"qcom,linearize-soc");
+
+	rc = fg_parse_ki_coefficients(chip);
+	if (rc < 0)
+		pr_err("Error in parsing Ki coefficients, rc=%d\n", rc);
+
+	rc = of_property_read_u32(node, "qcom,fg-rconn-mohms", &temp);
+	if (!rc)
+		chip->dt.rconn_mohms = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-esr-filter-switch-temp",
+			&temp);
+	if (rc < 0)
+		chip->dt.esr_flt_switch_temp = DEFAULT_ESR_FLT_TEMP_DECIDEGC;
+	else
+		chip->dt.esr_flt_switch_temp = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-esr-tight-filter-micro-pct",
+			&temp);
+	if (rc < 0)
+		chip->dt.esr_tight_flt_upct = DEFAULT_ESR_TIGHT_FLT_UPCT;
+	else
+		chip->dt.esr_tight_flt_upct = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-esr-broad-filter-micro-pct",
+			&temp);
+	if (rc < 0)
+		chip->dt.esr_broad_flt_upct = DEFAULT_ESR_BROAD_FLT_UPCT;
+	else
+		chip->dt.esr_broad_flt_upct = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-esr-tight-lt-filter-micro-pct",
+			&temp);
+	if (rc < 0)
+		chip->dt.esr_tight_lt_flt_upct = DEFAULT_ESR_TIGHT_LT_FLT_UPCT;
+	else
+		chip->dt.esr_tight_lt_flt_upct = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-esr-broad-lt-filter-micro-pct",
+			&temp);
+	if (rc < 0)
+		chip->dt.esr_broad_lt_flt_upct = DEFAULT_ESR_BROAD_LT_FLT_UPCT;
+	else
+		chip->dt.esr_broad_lt_flt_upct = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-esr-rt-filter-switch-temp",
+			&temp);
+	if (rc < 0)
+		chip->dt.esr_flt_rt_switch_temp = DEFAULT_ESR_FLT_RT_DECIDEGC;
+	else
+		chip->dt.esr_flt_rt_switch_temp = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-esr-tight-rt-filter-micro-pct",
+			&temp);
+	if (rc < 0)
+		chip->dt.esr_tight_rt_flt_upct = DEFAULT_ESR_TIGHT_RT_FLT_UPCT;
+	else
+		chip->dt.esr_tight_rt_flt_upct = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-esr-broad-rt-filter-micro-pct",
+			&temp);
+	if (rc < 0)
+		chip->dt.esr_broad_rt_flt_upct = DEFAULT_ESR_BROAD_RT_FLT_UPCT;
+	else
+		chip->dt.esr_broad_rt_flt_upct = temp;
+
+	rc = fg_parse_slope_limit_coefficients(chip);
+	if (rc < 0)
+		pr_err("Error in parsing slope limit coeffs, rc=%d\n", rc);
+
+	rc = of_property_read_u32(node, "qcom,fg-esr-clamp-mohms", &temp);
+	if (rc < 0)
+		chip->dt.esr_clamp_mohms = DEFAULT_ESR_CLAMP_MOHMS;
+	else
+		chip->dt.esr_clamp_mohms = temp;
+
+	chip->dt.esr_pulse_thresh_ma = DEFAULT_ESR_PULSE_THRESH_MA;
+	rc = of_property_read_u32(node, "qcom,fg-esr-pulse-thresh-ma", &temp);
+	if (!rc) {
+		/* ESR pulse qualification threshold range is 1-997 mA */
+		if (temp > 0 && temp < 997)
+			chip->dt.esr_pulse_thresh_ma = temp;
+	}
+
+	chip->dt.esr_meas_curr_ma = DEFAULT_ESR_MEAS_CURR_MA;
+	rc = of_property_read_u32(node, "qcom,fg-esr-meas-curr-ma", &temp);
+	if (!rc) {
+		/* ESR measurement current range is 60-240 mA */
+		if (temp >= 60 || temp <= 240)
+			chip->dt.esr_meas_curr_ma = temp;
+	}
+
+	return 0;
+}
+
+static void fg_cleanup(struct fg_chip *chip)
+{
+	alarm_try_to_cancel(&chip->esr_filter_alarm);
+	power_supply_unreg_notifier(&chip->nb);
+	debugfs_remove_recursive(chip->dfs_root);
+	if (chip->awake_votable)
+		destroy_votable(chip->awake_votable);
+
+	if (chip->delta_bsoc_irq_en_votable)
+		destroy_votable(chip->delta_bsoc_irq_en_votable);
+
+	if (chip->batt_miss_irq_en_votable)
+		destroy_votable(chip->batt_miss_irq_en_votable);
+
+	if (chip->batt_id_chan)
+		iio_channel_release(chip->batt_id_chan);
+
+	dev_set_drvdata(chip->dev, NULL);
+}
+
+static int fg_gen3_probe(struct platform_device *pdev)
+{
+	struct fg_chip *chip;
+	struct power_supply_config fg_psy_cfg;
+	int rc, msoc, volt_uv, batt_temp;
+
+	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->dev = &pdev->dev;
+	chip->debug_mask = &fg_gen3_debug_mask;
+	chip->irqs = fg_irqs;
+	chip->charge_status = -EINVAL;
+	chip->prev_charge_status = -EINVAL;
+	chip->ki_coeff_full_soc = -EINVAL;
+	chip->online_status = -EINVAL;
+	chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
+	if (!chip->regmap) {
+		dev_err(chip->dev, "Parent regmap is unavailable\n");
+		return -ENXIO;
+	}
+
+	chip->batt_id_chan = iio_channel_get(chip->dev, "rradc_batt_id");
+	if (IS_ERR(chip->batt_id_chan)) {
+		if (PTR_ERR(chip->batt_id_chan) != -EPROBE_DEFER)
+			pr_err("batt_id_chan unavailable %ld\n",
+				PTR_ERR(chip->batt_id_chan));
+		rc = PTR_ERR(chip->batt_id_chan);
+		chip->batt_id_chan = NULL;
+		return rc;
+	}
+
+	rc = of_property_match_string(chip->dev->of_node,
+				"io-channel-names", "rradc_die_temp");
+	if (rc >= 0) {
+		chip->die_temp_chan = iio_channel_get(chip->dev,
+						"rradc_die_temp");
+		if (IS_ERR(chip->die_temp_chan)) {
+			if (PTR_ERR(chip->die_temp_chan) != -EPROBE_DEFER)
+				pr_err("rradc_die_temp unavailable %ld\n",
+					PTR_ERR(chip->die_temp_chan));
+			rc = PTR_ERR(chip->die_temp_chan);
+			chip->die_temp_chan = NULL;
+			return rc;
+		}
+	}
+
+	chip->awake_votable = create_votable("FG_WS", VOTE_SET_ANY, fg_awake_cb,
+					chip);
+	if (IS_ERR(chip->awake_votable)) {
+		rc = PTR_ERR(chip->awake_votable);
+		chip->awake_votable = NULL;
+		goto exit;
+	}
+
+	chip->delta_bsoc_irq_en_votable = create_votable("FG_DELTA_BSOC_IRQ",
+						VOTE_SET_ANY,
+						fg_delta_bsoc_irq_en_cb, chip);
+	if (IS_ERR(chip->delta_bsoc_irq_en_votable)) {
+		rc = PTR_ERR(chip->delta_bsoc_irq_en_votable);
+		chip->delta_bsoc_irq_en_votable = NULL;
+		goto exit;
+	}
+
+	chip->batt_miss_irq_en_votable = create_votable("FG_BATT_MISS_IRQ",
+						VOTE_SET_ANY,
+						fg_batt_miss_irq_en_cb, chip);
+	if (IS_ERR(chip->batt_miss_irq_en_votable)) {
+		rc = PTR_ERR(chip->batt_miss_irq_en_votable);
+		chip->batt_miss_irq_en_votable = NULL;
+		goto exit;
+	}
+
+	rc = fg_parse_dt(chip);
+	if (rc < 0) {
+		dev_err(chip->dev, "Error in reading DT parameters, rc:%d\n",
+			rc);
+		goto exit;
+	}
+
+	mutex_init(&chip->bus_lock);
+	mutex_init(&chip->sram_rw_lock);
+	mutex_init(&chip->cyc_ctr.lock);
+	mutex_init(&chip->cl.lock);
+	mutex_init(&chip->ttf.lock);
+	mutex_init(&chip->charge_full_lock);
+	mutex_init(&chip->qnovo_esr_ctrl_lock);
+	spin_lock_init(&chip->suspend_lock);
+	init_completion(&chip->soc_update);
+	init_completion(&chip->soc_ready);
+	INIT_DELAYED_WORK(&chip->profile_load_work, profile_load_work);
+	INIT_WORK(&chip->status_change_work, status_change_work);
+	INIT_DELAYED_WORK(&chip->ttf_work, ttf_work);
+	INIT_DELAYED_WORK(&chip->sram_dump_work, sram_dump_work);
+	INIT_WORK(&chip->esr_filter_work, esr_filter_work);
+	alarm_init(&chip->esr_filter_alarm, ALARM_BOOTTIME,
+			fg_esr_filter_alarm_cb);
+
+	rc = fg_memif_init(chip);
+	if (rc < 0) {
+		dev_err(chip->dev, "Error in initializing FG_MEMIF, rc:%d\n",
+			rc);
+		goto exit;
+	}
+
+	rc = fg_hw_init(chip);
+	if (rc < 0) {
+		dev_err(chip->dev, "Error in initializing FG hardware, rc:%d\n",
+			rc);
+		goto exit;
+	}
+
+	platform_set_drvdata(pdev, chip);
+
+	/* Register the power supply */
+	fg_psy_cfg.drv_data = chip;
+	fg_psy_cfg.of_node = NULL;
+	fg_psy_cfg.supplied_to = NULL;
+	fg_psy_cfg.num_supplicants = 0;
+	chip->fg_psy = devm_power_supply_register(chip->dev, &fg_psy_desc,
+			&fg_psy_cfg);
+	if (IS_ERR(chip->fg_psy)) {
+		pr_err("failed to register fg_psy rc = %ld\n",
+				PTR_ERR(chip->fg_psy));
+		goto exit;
+	}
+
+	chip->nb.notifier_call = fg_notifier_cb;
+	rc = power_supply_reg_notifier(&chip->nb);
+	if (rc < 0) {
+		pr_err("Couldn't register psy notifier rc = %d\n", rc);
+		goto exit;
+	}
+
+	rc = fg_register_interrupts(chip);
+	if (rc < 0) {
+		dev_err(chip->dev, "Error in registering interrupts, rc:%d\n",
+			rc);
+		goto exit;
+	}
+
+	/* Keep SOC_UPDATE_IRQ disabled until we require it */
+	if (fg_irqs[SOC_UPDATE_IRQ].irq)
+		disable_irq_nosync(fg_irqs[SOC_UPDATE_IRQ].irq);
+
+	/* Keep BSOC_DELTA_IRQ disabled until we require it */
+	vote(chip->delta_bsoc_irq_en_votable, DELTA_BSOC_IRQ_VOTER, false, 0);
+
+	/* Keep BATT_MISSING_IRQ disabled until we require it */
+	vote(chip->batt_miss_irq_en_votable, BATT_MISS_IRQ_VOTER, false, 0);
+
+	rc = fg_debugfs_create(chip);
+	if (rc < 0) {
+		dev_err(chip->dev, "Error in creating debugfs entries, rc:%d\n",
+			rc);
+		goto exit;
+	}
+
+	rc = fg_get_battery_voltage(chip, &volt_uv);
+	if (!rc)
+		rc = fg_get_prop_capacity(chip, &msoc);
+
+	if (!rc)
+		rc = fg_get_battery_temp(chip, &batt_temp);
+
+	if (!rc) {
+		pr_info("battery SOC:%d voltage: %duV temp: %d id: %dKOhms\n",
+			msoc, volt_uv, batt_temp, chip->batt_id_ohms / 1000);
+		rc = fg_esr_filter_config(chip, batt_temp, false);
+		if (rc < 0)
+			pr_err("Error in configuring ESR filter rc:%d\n", rc);
+	}
+
+	device_init_wakeup(chip->dev, true);
+	schedule_delayed_work(&chip->profile_load_work, 0);
+
+	pr_debug("FG GEN3 driver probed successfully\n");
+	return 0;
+exit:
+	fg_cleanup(chip);
+	return rc;
+}
+
+static int fg_gen3_suspend(struct device *dev)
+{
+	struct fg_chip *chip = dev_get_drvdata(dev);
+	int rc;
+
+	spin_lock(&chip->suspend_lock);
+	chip->suspended = true;
+	spin_unlock(&chip->suspend_lock);
+
+	rc = fg_esr_timer_config(chip, true);
+	if (rc < 0)
+		pr_err("Error in configuring ESR timer, rc=%d\n", rc);
+
+	cancel_delayed_work_sync(&chip->ttf_work);
+	if (fg_sram_dump)
+		cancel_delayed_work_sync(&chip->sram_dump_work);
+	return 0;
+}
+
+static int fg_gen3_resume(struct device *dev)
+{
+	struct fg_chip *chip = dev_get_drvdata(dev);
+	int rc;
+
+	rc = fg_esr_timer_config(chip, false);
+	if (rc < 0)
+		pr_err("Error in configuring ESR timer, rc=%d\n", rc);
+
+	schedule_delayed_work(&chip->ttf_work, 0);
+	if (fg_sram_dump)
+		schedule_delayed_work(&chip->sram_dump_work,
+				msecs_to_jiffies(fg_sram_dump_period_ms));
+
+	if (!work_pending(&chip->status_change_work)) {
+		pm_stay_awake(chip->dev);
+		schedule_work(&chip->status_change_work);
+	}
+
+	spin_lock(&chip->suspend_lock);
+	chip->suspended = false;
+	spin_unlock(&chip->suspend_lock);
+
+	return 0;
+}
+
+static const struct dev_pm_ops fg_gen3_pm_ops = {
+	.suspend	= fg_gen3_suspend,
+	.resume		= fg_gen3_resume,
+};
+
+static int fg_gen3_remove(struct platform_device *pdev)
+{
+	struct fg_chip *chip = dev_get_drvdata(&pdev->dev);
+
+	fg_cleanup(chip);
+	return 0;
+}
+
+static void fg_gen3_shutdown(struct platform_device *pdev)
+{
+	struct fg_chip *chip = dev_get_drvdata(&pdev->dev);
+	int rc, bsoc;
+
+	if (chip->charge_full) {
+		rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &bsoc);
+		if (rc < 0) {
+			pr_err("Error in getting BATT_SOC, rc=%d\n", rc);
+			return;
+		}
+
+		/* We need 2 most significant bytes here */
+		bsoc = (u32)bsoc >> 16;
+
+		rc = fg_configure_full_soc(chip, bsoc);
+		if (rc < 0) {
+			pr_err("Error in configuring full_soc, rc=%d\n", rc);
+			return;
+		}
+	}
+}
+
+static const struct of_device_id fg_gen3_match_table[] = {
+	{.compatible = FG_GEN3_DEV_NAME},
+	{},
+};
+
+static struct platform_driver fg_gen3_driver = {
+	.driver = {
+		.name = FG_GEN3_DEV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = fg_gen3_match_table,
+		.pm		= &fg_gen3_pm_ops,
+	},
+	.probe		= fg_gen3_probe,
+	.remove		= fg_gen3_remove,
+	.shutdown	= fg_gen3_shutdown,
+};
+
+static int __init fg_gen3_init(void)
+{
+	return platform_driver_register(&fg_gen3_driver);
+}
+
+static void __exit fg_gen3_exit(void)
+{
+	return platform_driver_unregister(&fg_gen3_driver);
+}
+
+module_init(fg_gen3_init);
+module_exit(fg_gen3_exit);
+
+MODULE_DESCRIPTION("QPNP Fuel gauge GEN3 driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" FG_GEN3_DEV_NAME);
diff -Nruw linux-4.4.115-fbx/drivers/power/supply./qcom/qpnp-qnovo.c linux-4.4.115-fbx/drivers/power/supply/qcom/qpnp-qnovo.c
--- linux-4.4.115-fbx/drivers/power/supply./qcom/qpnp-qnovo.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/supply/qcom/qpnp-qnovo.c	2019-01-22 16:16:26.231271110 +0100
@@ -0,0 +1,1757 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/power_supply.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include <linux/pmic-voter.h>
+#include <linux/delay.h>
+
+#define QNOVO_REVISION1		0x00
+#define QNOVO_REVISION2		0x01
+#define QNOVO_PERPH_TYPE	0x04
+#define QNOVO_PERPH_SUBTYPE	0x05
+#define QNOVO_PTTIME_STS	0x07
+#define QNOVO_PTRAIN_STS	0x08
+#define QNOVO_ERROR_STS		0x09
+#define QNOVO_ERROR_BIT		BIT(0)
+#define QNOVO_ERROR_STS2	0x0A
+#define QNOVO_ERROR_CHARGING_DISABLED	BIT(1)
+#define QNOVO_INT_RT_STS	0x10
+#define QNOVO_INT_SET_TYPE	0x11
+#define QNOVO_INT_POLARITY_HIGH	0x12
+#define QNOVO_INT_POLARITY_LOW	0x13
+#define QNOVO_INT_LATCHED_CLR	0x14
+#define QNOVO_INT_EN_SET	0x15
+#define QNOVO_INT_EN_CLR	0x16
+#define QNOVO_INT_LATCHED_STS	0x18
+#define QNOVO_INT_PENDING_STS	0x19
+#define QNOVO_INT_MID_SEL	0x1A
+#define QNOVO_INT_PRIORITY	0x1B
+#define QNOVO_PE_CTRL		0x40
+#define QNOVO_PREST1_CTRL	0x41
+#define QNOVO_PPULS1_LSB_CTRL	0x42
+#define QNOVO_PPULS1_MSB_CTRL	0x43
+#define QNOVO_NREST1_CTRL	0x44
+#define QNOVO_NPULS1_CTRL	0x45
+#define QNOVO_PPCNT_CTRL	0x46
+#define QNOVO_VLIM1_LSB_CTRL	0x47
+#define QNOVO_VLIM1_MSB_CTRL	0x48
+#define QNOVO_PTRAIN_EN		0x49
+#define QNOVO_PTRAIN_EN_BIT	BIT(0)
+#define QNOVO_PE_CTRL2		0x4A
+#define QNOVO_PREST2_LSB_CTRL	0x50
+#define QNOVO_PREST2_MSB_CTRL	0x51
+#define QNOVO_PPULS2_LSB_CTRL	0x52
+#define QNOVO_PPULS2_MSB_CTRL	0x53
+#define QNOVO_NREST2_CTRL	0x54
+#define QNOVO_NPULS2_CTRL	0x55
+#define QNOVO_VLIM2_LSB_CTRL	0x56
+#define QNOVO_VLIM2_MSB_CTRL	0x57
+#define QNOVO_PVOLT1_LSB	0x60
+#define QNOVO_PVOLT1_MSB	0x61
+#define QNOVO_PCUR1_LSB		0x62
+#define QNOVO_PCUR1_MSB		0x63
+#define QNOVO_PVOLT2_LSB	0x70
+#define QNOVO_PVOLT2_MSB	0x71
+#define QNOVO_RVOLT2_LSB	0x72
+#define QNOVO_RVOLT2_MSB	0x73
+#define QNOVO_PCUR2_LSB		0x74
+#define QNOVO_PCUR2_MSB		0x75
+#define QNOVO_SCNT		0x80
+#define QNOVO_VMAX_LSB		0x90
+#define QNOVO_VMAX_MSB		0x91
+#define QNOVO_SNUM		0x92
+
+/* Registers ending in 0 imply external rsense */
+#define QNOVO_IADC_OFFSET_0	0xA0
+#define QNOVO_IADC_OFFSET_1	0xA1
+#define QNOVO_IADC_GAIN_0	0xA2
+#define QNOVO_IADC_GAIN_1	0xA3
+#define QNOVO_VADC_OFFSET	0xA4
+#define QNOVO_VADC_GAIN		0xA5
+#define QNOVO_IADC_GAIN_2	0xA6
+#define QNOVO_SPARE		0xA7
+#define QNOVO_STRM_CTRL		0xA8
+#define QNOVO_IADC_OFFSET_OVR_VAL	0xA9
+#define QNOVO_IADC_OFFSET_OVR		0xAA
+
+#define QNOVO_DISABLE_CHARGING		0xAB
+#define ERR_SWITCHER_DISABLED		BIT(7)
+#define ERR_JEITA_SOFT_CONDITION	BIT(6)
+#define ERR_BAT_OV			BIT(5)
+#define ERR_CV_MODE			BIT(4)
+#define ERR_BATTERY_MISSING		BIT(3)
+#define ERR_SAFETY_TIMER_EXPIRED	BIT(2)
+#define ERR_CHARGING_DISABLED		BIT(1)
+#define ERR_JEITA_HARD_CONDITION	BIT(0)
+
+#define QNOVO_TR_IADC_OFFSET_0	0xF1
+#define QNOVO_TR_IADC_OFFSET_1	0xF2
+
+#define DRV_MAJOR_VERSION	1
+#define DRV_MINOR_VERSION	0
+
+#define IADC_LSB_NA	2441400
+#define VADC_LSB_NA	1220700
+#define GAIN_LSB_FACTOR	976560
+
+#define USER_VOTER		"user_voter"
+#define SHUTDOWN_VOTER		"user_voter"
+#define OK_TO_QNOVO_VOTER	"ok_to_qnovo_voter"
+
+#define QNOVO_VOTER		"qnovo_voter"
+#define FG_AVAILABLE_VOTER	"FG_AVAILABLE_VOTER"
+#define QNOVO_OVERALL_VOTER	"QNOVO_OVERALL_VOTER"
+#define QNI_PT_VOTER		"QNI_PT_VOTER"
+#define ESR_VOTER		"ESR_VOTER"
+
+#define HW_OK_TO_QNOVO_VOTER	"HW_OK_TO_QNOVO_VOTER"
+#define CHG_READY_VOTER		"CHG_READY_VOTER"
+#define USB_READY_VOTER		"USB_READY_VOTER"
+#define DC_READY_VOTER		"DC_READY_VOTER"
+
+#define PT_RESTART_VOTER	"PT_RESTART_VOTER"
+
+struct qnovo_dt_props {
+	bool			external_rsense;
+	struct device_node	*revid_dev_node;
+	bool			enable_for_dc;
+};
+
+struct qnovo {
+	int			base;
+	struct mutex		write_lock;
+	struct regmap		*regmap;
+	struct qnovo_dt_props	dt;
+	struct device		*dev;
+	struct votable		*disable_votable;
+	struct votable		*pt_dis_votable;
+	struct votable		*not_ok_to_qnovo_votable;
+	struct votable		*chg_ready_votable;
+	struct votable		*awake_votable;
+	struct class		qnovo_class;
+	struct pmic_revid_data	*pmic_rev_id;
+	u32			wa_flags;
+	s64			external_offset_nA;
+	s64			internal_offset_nA;
+	s64			offset_nV;
+	s64			external_i_gain_mega;
+	s64			internal_i_gain_mega;
+	s64			v_gain_mega;
+	struct notifier_block	nb;
+	struct power_supply	*batt_psy;
+	struct power_supply	*bms_psy;
+	struct power_supply	*usb_psy;
+	struct power_supply	*dc_psy;
+	struct work_struct	status_change_work;
+	int			fv_uV_request;
+	int			fcc_uA_request;
+	int			usb_present;
+	int			dc_present;
+	struct delayed_work	usb_debounce_work;
+	struct delayed_work	dc_debounce_work;
+
+	struct delayed_work	ptrain_restart_work;
+};
+
+static int debug_mask;
+module_param_named(debug_mask, debug_mask, int, 0600);
+
+#define qnovo_dbg(chip, reason, fmt, ...)				\
+	do {								\
+		if (debug_mask & (reason))				\
+			dev_info(chip->dev, fmt, ##__VA_ARGS__);	\
+		else							\
+			dev_dbg(chip->dev, fmt, ##__VA_ARGS__);		\
+	} while (0)
+
+static bool is_secure(struct qnovo *chip, int addr)
+{
+	/* assume everything above 0x40 is secure */
+	return (bool)(addr >= 0x40);
+}
+
+static int qnovo_read(struct qnovo *chip, u16 addr, u8 *buf, int len)
+{
+	return regmap_bulk_read(chip->regmap, chip->base + addr, buf, len);
+}
+
+static int qnovo_masked_write(struct qnovo *chip, u16 addr, u8 mask, u8 val)
+{
+	int rc = 0;
+
+	mutex_lock(&chip->write_lock);
+	if (is_secure(chip, addr)) {
+		rc = regmap_write(chip->regmap,
+				((chip->base + addr) & ~(0xFF)) | 0xD0, 0xA5);
+		if (rc < 0)
+			goto unlock;
+	}
+
+	rc = regmap_update_bits(chip->regmap, chip->base + addr, mask, val);
+
+unlock:
+	mutex_unlock(&chip->write_lock);
+	return rc;
+}
+
+static int qnovo_write(struct qnovo *chip, u16 addr, u8 *buf, int len)
+{
+	int i, rc = 0;
+	bool is_start_secure, is_end_secure;
+
+	is_start_secure = is_secure(chip, addr);
+	is_end_secure = is_secure(chip, addr + len);
+
+	if (!is_start_secure && !is_end_secure) {
+		mutex_lock(&chip->write_lock);
+		rc = regmap_bulk_write(chip->regmap, chip->base + addr,
+					buf, len);
+		goto unlock;
+	}
+
+	mutex_lock(&chip->write_lock);
+	for (i = addr; i < addr + len; i++) {
+		if (is_secure(chip, i)) {
+			rc = regmap_write(chip->regmap,
+				((chip->base + i) & ~(0xFF)) | 0xD0, 0xA5);
+			if (rc < 0)
+				goto unlock;
+		}
+		rc = regmap_write(chip->regmap, chip->base + i, buf[i - addr]);
+		if (rc < 0)
+			goto unlock;
+	}
+
+unlock:
+	mutex_unlock(&chip->write_lock);
+	return rc;
+}
+
+static bool is_batt_available(struct qnovo *chip)
+{
+	if (!chip->batt_psy)
+		chip->batt_psy = power_supply_get_by_name("battery");
+
+	if (!chip->batt_psy)
+		return false;
+
+	return true;
+}
+
+static bool is_fg_available(struct qnovo *chip)
+{
+	if (!chip->bms_psy)
+		chip->bms_psy = power_supply_get_by_name("bms");
+
+	if (!chip->bms_psy)
+		return false;
+
+	return true;
+}
+
+static bool is_usb_available(struct qnovo *chip)
+{
+	if (!chip->usb_psy)
+		chip->usb_psy = power_supply_get_by_name("usb");
+
+	if (!chip->usb_psy)
+		return false;
+
+	return true;
+}
+
+static bool is_dc_available(struct qnovo *chip)
+{
+	if (!chip->dc_psy)
+		chip->dc_psy = power_supply_get_by_name("dc");
+
+	if (!chip->dc_psy)
+		return false;
+
+	return true;
+}
+
+static int qnovo_batt_psy_update(struct qnovo *chip, bool disable)
+{
+	union power_supply_propval pval = {0};
+	int rc = 0;
+
+	if (!is_batt_available(chip))
+		return -EINVAL;
+
+	if (chip->fv_uV_request != -EINVAL) {
+		pval.intval = disable ? -EINVAL : chip->fv_uV_request;
+		rc = power_supply_set_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_VOLTAGE_QNOVO,
+			&pval);
+		if (rc < 0) {
+			pr_err("Couldn't set prop qnovo_fv rc = %d\n", rc);
+			return -EINVAL;
+		}
+	}
+
+	if (chip->fcc_uA_request != -EINVAL) {
+		pval.intval = disable ? -EINVAL : chip->fcc_uA_request;
+		rc = power_supply_set_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_CURRENT_QNOVO,
+			&pval);
+		if (rc < 0) {
+			pr_err("Couldn't set prop qnovo_fcc rc = %d\n", rc);
+			return -EINVAL;
+		}
+	}
+
+	return rc;
+}
+
+static int qnovo_disable_cb(struct votable *votable, void *data, int disable,
+					const char *client)
+{
+	struct qnovo *chip = data;
+	union power_supply_propval pval = {0};
+	int rc;
+
+	if (!is_batt_available(chip))
+		return -EINVAL;
+
+	pval.intval = !disable;
+	rc = power_supply_set_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE,
+			&pval);
+	if (rc < 0) {
+		pr_err("Couldn't set prop qnovo_enable rc = %d\n", rc);
+		return -EINVAL;
+	}
+
+	/*
+	 * fg must be available for enable FG_AVAILABLE_VOTER
+	 * won't enable it otherwise
+	 */
+
+	if (is_fg_available(chip))
+		power_supply_set_property(chip->bms_psy,
+				POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE,
+				&pval);
+
+	vote(chip->pt_dis_votable, QNOVO_OVERALL_VOTER, disable, 0);
+	rc = qnovo_batt_psy_update(chip, disable);
+	return rc;
+}
+
+static int pt_dis_votable_cb(struct votable *votable, void *data, int disable,
+					const char *client)
+{
+	struct qnovo *chip = data;
+	int rc;
+
+	if (disable) {
+		cancel_delayed_work_sync(&chip->ptrain_restart_work);
+		vote(chip->awake_votable, PT_RESTART_VOTER, false, 0);
+	}
+
+	rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT,
+				 (bool)disable ? 0 : QNOVO_PTRAIN_EN_BIT);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't %s pulse train rc=%d\n",
+			(bool)disable ? "disable" : "enable", rc);
+		return rc;
+	}
+
+	if (!disable) {
+		vote(chip->awake_votable, PT_RESTART_VOTER, true, 0);
+		schedule_delayed_work(&chip->ptrain_restart_work,
+				msecs_to_jiffies(20));
+	}
+
+	return 0;
+}
+
+static int not_ok_to_qnovo_cb(struct votable *votable, void *data,
+					int not_ok_to_qnovo,
+					const char *client)
+{
+	struct qnovo *chip = data;
+
+	vote(chip->disable_votable, OK_TO_QNOVO_VOTER, not_ok_to_qnovo, 0);
+	if (not_ok_to_qnovo)
+		vote(chip->disable_votable, USER_VOTER, true, 0);
+
+	kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
+	return 0;
+}
+
+static int chg_ready_cb(struct votable *votable, void *data, int ready,
+					const char *client)
+{
+	struct qnovo *chip = data;
+
+	vote(chip->not_ok_to_qnovo_votable, CHG_READY_VOTER, !ready, 0);
+
+	return 0;
+}
+
+static int awake_cb(struct votable *votable, void *data, int awake,
+					const char *client)
+{
+	struct qnovo *chip = data;
+
+	if (awake)
+		pm_stay_awake(chip->dev);
+	else
+		pm_relax(chip->dev);
+
+	return 0;
+}
+
+static int qnovo_parse_dt(struct qnovo *chip)
+{
+	struct device_node *node = chip->dev->of_node;
+	int rc;
+
+	if (!node) {
+		pr_err("device tree node missing\n");
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32(node, "reg", &chip->base);
+	if (rc < 0) {
+		pr_err("Couldn't read base rc = %d\n", rc);
+		return rc;
+	}
+
+	chip->dt.external_rsense = of_property_read_bool(node,
+			"qcom,external-rsense");
+
+	chip->dt.revid_dev_node = of_parse_phandle(node, "qcom,pmic-revid", 0);
+	if (!chip->dt.revid_dev_node) {
+		pr_err("Missing qcom,pmic-revid property - driver failed\n");
+		return -EINVAL;
+	}
+	chip->dt.enable_for_dc = of_property_read_bool(node,
+					"qcom,enable-for-dc");
+
+	return 0;
+}
+
+enum {
+	VER = 0,
+	OK_TO_QNOVO,
+	QNOVO_ENABLE,
+	PT_ENABLE,
+	FV_REQUEST,
+	FCC_REQUEST,
+	PE_CTRL_REG,
+	PE_CTRL2_REG,
+	PTRAIN_STS_REG,
+	INT_RT_STS_REG,
+	ERR_STS2_REG,
+	PREST1,
+	PPULS1,
+	NREST1,
+	NPULS1,
+	PPCNT,
+	VLIM1,
+	PVOLT1,
+	PCUR1,
+	PTTIME,
+	PREST2,
+	PPULS2,
+	NREST2,
+	NPULS2,
+	VLIM2,
+	PVOLT2,
+	RVOLT2,
+	PCUR2,
+	SCNT,
+	VMAX,
+	SNUM,
+	VBATT,
+	IBATT,
+	BATTTEMP,
+	BATTSOC,
+};
+
+struct param_info {
+	char	*name;
+	int	start_addr;
+	int	num_regs;
+	int	reg_to_unit_multiplier;
+	int	reg_to_unit_divider;
+	int	reg_to_unit_offset;
+	int	min_val;
+	int	max_val;
+	char	*units_str;
+};
+
+static struct param_info params[] = {
+	[PT_ENABLE] = {
+		.name			= "PT_ENABLE",
+		.start_addr		= QNOVO_PTRAIN_EN,
+		.num_regs		= 1,
+		.units_str		= "",
+	},
+	[FV_REQUEST] = {
+		.units_str		= "uV",
+	},
+	[FCC_REQUEST] = {
+		.units_str		= "uA",
+	},
+	[PE_CTRL_REG] = {
+		.name			= "CTRL_REG",
+		.start_addr		= QNOVO_PE_CTRL,
+		.num_regs		= 1,
+		.units_str		= "",
+	},
+	[PE_CTRL2_REG] = {
+		.name			= "PE_CTRL2_REG",
+		.start_addr		= QNOVO_PE_CTRL2,
+		.num_regs		= 1,
+		.units_str		= "",
+	},
+	[PTRAIN_STS_REG] = {
+		.name			= "PTRAIN_STS",
+		.start_addr		= QNOVO_PTRAIN_STS,
+		.num_regs		= 1,
+		.units_str		= "",
+	},
+	[INT_RT_STS_REG] = {
+		.name			= "INT_RT_STS",
+		.start_addr		= QNOVO_INT_RT_STS,
+		.num_regs		= 1,
+		.units_str		= "",
+	},
+	[ERR_STS2_REG] = {
+		.name			= "RAW_CHGR_ERR",
+		.start_addr		= QNOVO_ERROR_STS2,
+		.num_regs		= 1,
+		.units_str		= "",
+	},
+	[PREST1] = {
+		.name			= "PREST1",
+		.start_addr		= QNOVO_PREST1_CTRL,
+		.num_regs		= 1,
+		.reg_to_unit_multiplier	= 5,
+		.reg_to_unit_divider	= 1,
+		.min_val		= 5,
+		.max_val		= 255,
+		.units_str		= "mS",
+	},
+	[PPULS1] = {
+		.name			= "PPULS1",
+		.start_addr		= QNOVO_PPULS1_LSB_CTRL,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 1600, /* converts to uC */
+		.reg_to_unit_divider	= 1,
+		.min_val		= 30000,
+		.max_val		= 65535000,
+		.units_str		= "uC",
+	},
+	[NREST1] = {
+		.name			= "NREST1",
+		.start_addr		= QNOVO_NREST1_CTRL,
+		.num_regs		= 1,
+		.reg_to_unit_multiplier	= 5,
+		.reg_to_unit_divider	= 1,
+		.min_val		= 5,
+		.max_val		= 255,
+		.units_str		= "mS",
+	},
+	[NPULS1] = {
+		.name			= "NPULS1",
+		.start_addr		= QNOVO_NPULS1_CTRL,
+		.num_regs		= 1,
+		.reg_to_unit_multiplier	= 5,
+		.reg_to_unit_divider	= 1,
+		.min_val		= 0,
+		.max_val		= 255,
+		.units_str		= "mS",
+	},
+	[PPCNT] = {
+		.name			= "PPCNT",
+		.start_addr		= QNOVO_PPCNT_CTRL,
+		.num_regs		= 1,
+		.reg_to_unit_multiplier	= 1,
+		.reg_to_unit_divider	= 1,
+		.min_val		= 1,
+		.max_val		= 255,
+		.units_str		= "pulses",
+	},
+	[VLIM1] = {
+		.name			= "VLIM1",
+		.start_addr		= QNOVO_VLIM1_LSB_CTRL,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 610350, /* converts to nV */
+		.reg_to_unit_divider	= 1,
+		.min_val		= 2200000,
+		.max_val		= 4500000,
+		.units_str		= "uV",
+	},
+	[PVOLT1] = {
+		.name			= "PVOLT1",
+		.start_addr		= QNOVO_PVOLT1_LSB,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 610350, /* converts to nV */
+		.reg_to_unit_divider	= 1,
+		.units_str		= "uV",
+	},
+	[PCUR1] = {
+		.name			= "PCUR1",
+		.start_addr		= QNOVO_PCUR1_LSB,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 1220700, /* converts to nA */
+		.reg_to_unit_divider	= 1,
+		.units_str		= "uA",
+	},
+	[PTTIME] = {
+		.name			= "PTTIME",
+		.start_addr		= QNOVO_PTTIME_STS,
+		.num_regs		= 1,
+		.reg_to_unit_multiplier	= 2,
+		.reg_to_unit_divider	= 1,
+		.units_str		= "S",
+	},
+	[PREST2] = {
+		.name			= "PREST2",
+		.start_addr		= QNOVO_PREST2_LSB_CTRL,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 5,
+		.reg_to_unit_divider	= 1,
+		.min_val		= 5,
+		.max_val		= 65535,
+		.units_str		= "mS",
+	},
+	[PPULS2] = {
+		.name			= "PPULS2",
+		.start_addr		= QNOVO_PPULS2_LSB_CTRL,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 1600, /* converts to uC */
+		.reg_to_unit_divider	= 1,
+		.min_val		= 30000,
+		.max_val		= 65535000,
+		.units_str		= "uC",
+	},
+	[NREST2] = {
+		.name			= "NREST2",
+		.start_addr		= QNOVO_NREST2_CTRL,
+		.num_regs		= 1,
+		.reg_to_unit_multiplier	= 5,
+		.reg_to_unit_divider	= 1,
+		.reg_to_unit_offset	= -5,
+		.min_val		= 5,
+		.max_val		= 255,
+		.units_str		= "mS",
+	},
+	[NPULS2] = {
+		.name			= "NPULS2",
+		.start_addr		= QNOVO_NPULS2_CTRL,
+		.num_regs		= 1,
+		.reg_to_unit_multiplier	= 5,
+		.reg_to_unit_divider	= 1,
+		.min_val		= 0,
+		.max_val		= 255,
+		.units_str		= "mS",
+	},
+	[VLIM2] = {
+		.name			= "VLIM2",
+		.start_addr		= QNOVO_VLIM2_LSB_CTRL,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 610350, /* converts to nV */
+		.reg_to_unit_divider	= 1,
+		.min_val		= 2200000,
+		.max_val		= 4500000,
+		.units_str		= "uV",
+	},
+	[PVOLT2] = {
+		.name			= "PVOLT2",
+		.start_addr		= QNOVO_PVOLT2_LSB,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 610350, /* converts to nV */
+		.reg_to_unit_divider	= 1,
+		.units_str		= "uV",
+	},
+	[RVOLT2] = {
+		.name			= "RVOLT2",
+		.start_addr		= QNOVO_RVOLT2_LSB,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 610350,
+		.reg_to_unit_divider	= 1,
+		.units_str		= "uV",
+	},
+	[PCUR2] = {
+		.name			= "PCUR2",
+		.start_addr		= QNOVO_PCUR2_LSB,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 1220700, /* converts to nA */
+		.reg_to_unit_divider	= 1,
+		.units_str		= "uA",
+	},
+	[SCNT] = {
+		.name			= "SCNT",
+		.start_addr		= QNOVO_SCNT,
+		.num_regs		= 1,
+		.reg_to_unit_multiplier	= 1,
+		.reg_to_unit_divider	= 1,
+		.min_val		= 0,
+		.max_val		= 255,
+		.units_str		= "pulses",
+	},
+	[VMAX] = {
+		.name			= "VMAX",
+		.start_addr		= QNOVO_VMAX_LSB,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 814000, /* converts to nV */
+		.reg_to_unit_divider	= 1,
+		.units_str		= "uV",
+	},
+	[SNUM] = {
+		.name			= "SNUM",
+		.start_addr		= QNOVO_SNUM,
+		.num_regs		= 1,
+		.reg_to_unit_multiplier	= 1,
+		.reg_to_unit_divider	= 1,
+		.units_str		= "pulses",
+	},
+	[VBATT]	= {
+		.name			= "POWER_SUPPLY_PROP_VOLTAGE_NOW",
+		.start_addr		= POWER_SUPPLY_PROP_VOLTAGE_NOW,
+		.units_str		= "uV",
+	},
+	[IBATT]	= {
+		.name			= "POWER_SUPPLY_PROP_CURRENT_NOW",
+		.start_addr		= POWER_SUPPLY_PROP_CURRENT_NOW,
+		.units_str		= "uA",
+	},
+	[BATTTEMP] = {
+		.name			= "POWER_SUPPLY_PROP_TEMP",
+		.start_addr		= POWER_SUPPLY_PROP_TEMP,
+		.units_str		= "uV",
+	},
+	[BATTSOC] = {
+		.name			= "POWER_SUPPLY_PROP_CAPACITY",
+		.start_addr		= POWER_SUPPLY_PROP_CAPACITY,
+		.units_str		= "%",
+	},
+};
+
+static struct class_attribute qnovo_attributes[];
+
+static ssize_t version_show(struct class *c, struct class_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d.%d\n",
+			DRV_MAJOR_VERSION, DRV_MINOR_VERSION);
+}
+
+static ssize_t ok_to_qnovo_show(struct class *c, struct class_attribute *attr,
+			char *buf)
+{
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	int val = get_effective_result(chip->not_ok_to_qnovo_votable);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", !val);
+}
+
+static ssize_t qnovo_enable_show(struct class *c, struct class_attribute *attr,
+			char *ubuf)
+{
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	int val = get_effective_result(chip->disable_votable);
+
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", !val);
+}
+
+static ssize_t qnovo_enable_store(struct class *c, struct class_attribute *attr,
+			const char *ubuf, size_t count)
+{
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	unsigned long val;
+
+	if (kstrtoul(ubuf, 0, &val))
+		return -EINVAL;
+
+	vote(chip->disable_votable, USER_VOTER, !val, 0);
+
+	return count;
+}
+
+static ssize_t pt_enable_show(struct class *c, struct class_attribute *attr,
+			char *ubuf)
+{
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	int val = get_effective_result(chip->pt_dis_votable);
+
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", !val);
+}
+
+static ssize_t pt_enable_store(struct class *c, struct class_attribute *attr,
+			const char *ubuf, size_t count)
+{
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	unsigned long val;
+
+	if (kstrtoul(ubuf, 0, &val))
+		return -EINVAL;
+
+	/* val being 0, userspace wishes to disable pt so vote true */
+	vote(chip->pt_dis_votable, QNI_PT_VOTER, val ? false : true, 0);
+
+	return count;
+}
+
+static ssize_t val_show(struct class *c, struct class_attribute *attr,
+			char *ubuf)
+{
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	int i = attr - qnovo_attributes;
+	int val = 0;
+
+	if (i == FV_REQUEST)
+		val = chip->fv_uV_request;
+
+	if (i == FCC_REQUEST)
+		val = chip->fcc_uA_request;
+
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t val_store(struct class *c, struct class_attribute *attr,
+			const char *ubuf, size_t count)
+{
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	int i = attr - qnovo_attributes;
+	unsigned long val;
+
+	if (kstrtoul(ubuf, 0, &val))
+		return -EINVAL;
+
+	if (i == FV_REQUEST)
+		chip->fv_uV_request = val;
+
+	if (i == FCC_REQUEST)
+		chip->fcc_uA_request = val;
+
+	if (!get_effective_result(chip->disable_votable))
+		qnovo_batt_psy_update(chip, false);
+
+	return count;
+}
+
+static ssize_t reg_show(struct class *c, struct class_attribute *attr,
+			char *ubuf)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	u8 buf[2] = {0, 0};
+	u16 regval;
+	int rc;
+
+	rc = qnovo_read(chip, params[i].start_addr, buf, params[i].num_regs);
+	if (rc < 0) {
+		pr_err("Couldn't read %s rc = %d\n", params[i].name, rc);
+		return -EINVAL;
+	}
+	regval = buf[1] << 8 | buf[0];
+
+	return snprintf(ubuf, PAGE_SIZE, "0x%04x\n", regval);
+}
+
+static ssize_t reg_store(struct class *c, struct class_attribute *attr,
+			const char *ubuf, size_t count)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	u8 buf[2] = {0, 0};
+	unsigned long val;
+	int rc;
+
+	if (kstrtoul(ubuf, 0, &val))
+		return -EINVAL;
+
+	buf[0] = val & 0xFF;
+	buf[1] = (val >> 8) & 0xFF;
+
+	rc = qnovo_write(chip, params[i].start_addr, buf, params[i].num_regs);
+	if (rc < 0) {
+		pr_err("Couldn't write %s rc = %d\n", params[i].name, rc);
+		return -EINVAL;
+	}
+	return count;
+}
+
+static ssize_t time_show(struct class *c, struct class_attribute *attr,
+		char *ubuf)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	u8 buf[2] = {0, 0};
+	u16 regval;
+	int val;
+	int rc;
+
+	rc = qnovo_read(chip, params[i].start_addr, buf, params[i].num_regs);
+	if (rc < 0) {
+		pr_err("Couldn't read %s rc = %d\n", params[i].name, rc);
+		return -EINVAL;
+	}
+	regval = buf[1] << 8 | buf[0];
+
+	val = ((regval * params[i].reg_to_unit_multiplier)
+			/ params[i].reg_to_unit_divider)
+		- params[i].reg_to_unit_offset;
+
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t time_store(struct class *c, struct class_attribute *attr,
+		       const char *ubuf, size_t count)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	u8 buf[2] = {0, 0};
+	u16 regval;
+	unsigned long val;
+	int rc;
+
+	if (kstrtoul(ubuf, 0, &val))
+		return -EINVAL;
+
+	if (val < params[i].min_val || val > params[i].max_val) {
+		pr_err("Out of Range %d%s for %s\n", (int)val,
+				params[i].units_str,
+				params[i].name);
+		return -ERANGE;
+	}
+
+	regval = (((int)val + params[i].reg_to_unit_offset)
+			* params[i].reg_to_unit_divider)
+		/ params[i].reg_to_unit_multiplier;
+	buf[0] = regval & 0xFF;
+	buf[1] = (regval >> 8) & 0xFF;
+
+	rc = qnovo_write(chip, params[i].start_addr, buf, params[i].num_regs);
+	if (rc < 0) {
+		pr_err("Couldn't write %s rc = %d\n", params[i].name, rc);
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static ssize_t current_show(struct class *c, struct class_attribute *attr,
+				char *ubuf)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	u8 buf[2] = {0, 0};
+	int rc;
+	int comp_val_uA;
+	s64 regval_nA;
+	s64 gain, offset_nA, comp_val_nA;
+
+	rc = qnovo_read(chip, params[i].start_addr, buf, params[i].num_regs);
+	if (rc < 0) {
+		pr_err("Couldn't read %s rc = %d\n", params[i].name, rc);
+		return -EINVAL;
+	}
+
+	if (buf[1] & BIT(5))
+		buf[1] |= GENMASK(7, 6);
+
+	regval_nA = (s16)(buf[1] << 8 | buf[0]);
+	regval_nA = div_s64(regval_nA * params[i].reg_to_unit_multiplier,
+					params[i].reg_to_unit_divider)
+			- params[i].reg_to_unit_offset;
+
+	if (chip->dt.external_rsense) {
+		offset_nA = chip->external_offset_nA;
+		gain = chip->external_i_gain_mega;
+	} else {
+		offset_nA = chip->internal_offset_nA;
+		gain = chip->internal_i_gain_mega;
+	}
+
+	comp_val_nA = div_s64(regval_nA * gain, 1000000) - offset_nA;
+	comp_val_uA = div_s64(comp_val_nA, 1000);
+
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", comp_val_uA);
+}
+
+static ssize_t voltage_show(struct class *c, struct class_attribute *attr,
+				char *ubuf)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	u8 buf[2] = {0, 0};
+	int rc;
+	int comp_val_uV;
+	s64 regval_nV;
+	s64 gain, offset_nV, comp_val_nV;
+
+	rc = qnovo_read(chip, params[i].start_addr, buf, params[i].num_regs);
+	if (rc < 0) {
+		pr_err("Couldn't read %s rc = %d\n", params[i].name, rc);
+		return -EINVAL;
+	}
+	regval_nV = buf[1] << 8 | buf[0];
+	regval_nV = div_s64(regval_nV * params[i].reg_to_unit_multiplier,
+					params[i].reg_to_unit_divider)
+			- params[i].reg_to_unit_offset;
+
+	offset_nV = chip->offset_nV;
+	gain = chip->v_gain_mega;
+
+	comp_val_nV = div_s64(regval_nV * gain, 1000000) + offset_nV;
+	comp_val_uV = div_s64(comp_val_nV, 1000);
+
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", comp_val_uV);
+}
+
+static ssize_t voltage_store(struct class *c, struct class_attribute *attr,
+		       const char *ubuf, size_t count)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	u8 buf[2] = {0, 0};
+	int rc;
+	unsigned long val_uV;
+	s64 regval_nV;
+	s64 gain, offset_nV;
+
+	if (kstrtoul(ubuf, 0, &val_uV))
+		return -EINVAL;
+
+	if (val_uV < params[i].min_val || val_uV > params[i].max_val) {
+		pr_err("Out of Range %d%s for %s\n", (int)val_uV,
+				params[i].units_str,
+				params[i].name);
+		return -ERANGE;
+	}
+
+	offset_nV = chip->offset_nV;
+	gain = chip->v_gain_mega;
+
+	regval_nV = (s64)val_uV * 1000 - offset_nV;
+	regval_nV = div_s64(regval_nV * 1000000, gain);
+
+	regval_nV = div_s64((regval_nV + params[i].reg_to_unit_offset)
+			* params[i].reg_to_unit_divider,
+			params[i].reg_to_unit_multiplier);
+	buf[0] = regval_nV & 0xFF;
+	buf[1] = ((u64)regval_nV >> 8) & 0xFF;
+
+	rc = qnovo_write(chip, params[i].start_addr, buf, params[i].num_regs);
+	if (rc < 0) {
+		pr_err("Couldn't write %s rc = %d\n", params[i].name, rc);
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static ssize_t coulomb_show(struct class *c, struct class_attribute *attr,
+				char *ubuf)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	u8 buf[2] = {0, 0};
+	int rc;
+	int comp_val_uC;
+	s64 regval_uC, gain;
+
+	rc = qnovo_read(chip, params[i].start_addr, buf, params[i].num_regs);
+	if (rc < 0) {
+		pr_err("Couldn't read %s rc = %d\n", params[i].name, rc);
+		return -EINVAL;
+	}
+	regval_uC = buf[1] << 8 | buf[0];
+	regval_uC = div_s64(regval_uC * params[i].reg_to_unit_multiplier,
+					params[i].reg_to_unit_divider)
+			- params[i].reg_to_unit_offset;
+
+	if (chip->dt.external_rsense)
+		gain = chip->external_i_gain_mega;
+	else
+		gain = chip->internal_i_gain_mega;
+
+	comp_val_uC = div_s64(regval_uC * gain, 1000000);
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", comp_val_uC);
+}
+
+static ssize_t coulomb_store(struct class *c, struct class_attribute *attr,
+		       const char *ubuf, size_t count)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	u8 buf[2] = {0, 0};
+	int rc;
+	unsigned long val_uC;
+	s64 regval;
+	s64 gain;
+
+	if (kstrtoul(ubuf, 0, &val_uC))
+		return -EINVAL;
+
+	if (val_uC < params[i].min_val || val_uC > params[i].max_val) {
+		pr_err("Out of Range %d%s for %s\n", (int)val_uC,
+				params[i].units_str,
+				params[i].name);
+		return -ERANGE;
+	}
+
+	if (chip->dt.external_rsense)
+		gain = chip->external_i_gain_mega;
+	else
+		gain = chip->internal_i_gain_mega;
+
+	regval = div_s64((s64)val_uC * 1000000, gain);
+
+	regval = div_s64((regval + params[i].reg_to_unit_offset)
+			* params[i].reg_to_unit_divider,
+			params[i].reg_to_unit_multiplier);
+
+	buf[0] = regval & 0xFF;
+	buf[1] = ((u64)regval >> 8) & 0xFF;
+
+	rc = qnovo_write(chip, params[i].start_addr, buf, params[i].num_regs);
+	if (rc < 0) {
+		pr_err("Couldn't write %s rc = %d\n", params[i].name, rc);
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static ssize_t batt_prop_show(struct class *c, struct class_attribute *attr,
+				char *ubuf)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	int rc = -EINVAL;
+	int prop = params[i].start_addr;
+	union power_supply_propval pval = {0};
+
+	if (!is_batt_available(chip))
+		return -EINVAL;
+
+	rc = power_supply_get_property(chip->batt_psy, prop, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't read battery prop %s rc = %d\n",
+				params[i].name, rc);
+		return -EINVAL;
+	}
+
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", pval.intval);
+}
+
+static struct class_attribute qnovo_attributes[] = {
+	[VER]			= __ATTR_RO(version),
+	[OK_TO_QNOVO]		= __ATTR_RO(ok_to_qnovo),
+	[QNOVO_ENABLE]		= __ATTR_RW(qnovo_enable),
+	[PT_ENABLE]		= __ATTR_RW(pt_enable),
+	[FV_REQUEST]		= __ATTR(fv_uV_request, 0644,
+					val_show, val_store),
+	[FCC_REQUEST]		= __ATTR(fcc_uA_request, 0644,
+					val_show, val_store),
+	[PE_CTRL_REG]		= __ATTR(PE_CTRL_REG, 0644,
+					reg_show, reg_store),
+	[PE_CTRL2_REG]		= __ATTR(PE_CTRL2_REG, 0644,
+					reg_show, reg_store),
+	[PTRAIN_STS_REG]	= __ATTR(PTRAIN_STS_REG, 0444,
+					reg_show, NULL),
+	[INT_RT_STS_REG]	= __ATTR(INT_RT_STS_REG, 0444,
+					reg_show, NULL),
+	[ERR_STS2_REG]		= __ATTR(ERR_STS2_REG, 0444,
+					reg_show, NULL),
+	[PREST1]		= __ATTR(PREST1_mS, 0644,
+					time_show, time_store),
+	[PPULS1]		= __ATTR(PPULS1_uC, 0644,
+					coulomb_show, coulomb_store),
+	[NREST1]		= __ATTR(NREST1_mS, 0644,
+					time_show, time_store),
+	[NPULS1]		= __ATTR(NPULS1_mS, 0644,
+					time_show, time_store),
+	[PPCNT]			= __ATTR(PPCNT, 0644,
+					time_show, time_store),
+	[VLIM1]			= __ATTR(VLIM1_uV, 0644,
+					voltage_show, voltage_store),
+	[PVOLT1]		= __ATTR(PVOLT1_uV, 0444,
+					voltage_show, NULL),
+	[PCUR1]			= __ATTR(PCUR1_uA, 0444,
+					current_show, NULL),
+	[PTTIME]		= __ATTR(PTTIME_S, 0444,
+					time_show, NULL),
+	[PREST2]		= __ATTR(PREST2_mS, 0644,
+					time_show, time_store),
+	[PPULS2]		= __ATTR(PPULS2_uC, 0644,
+					coulomb_show, coulomb_store),
+	[NREST2]		= __ATTR(NREST2_mS, 0644,
+					time_show, time_store),
+	[NPULS2]		= __ATTR(NPULS2_mS, 0644,
+					time_show, time_store),
+	[VLIM2]			= __ATTR(VLIM2_uV, 0644,
+					voltage_show, voltage_store),
+	[PVOLT2]		= __ATTR(PVOLT2_uV, 0444,
+					voltage_show, NULL),
+	[RVOLT2]		= __ATTR(RVOLT2_uV, 0444,
+					voltage_show, NULL),
+	[PCUR2]			= __ATTR(PCUR2_uA, 0444,
+					current_show, NULL),
+	[SCNT]			= __ATTR(SCNT, 0644,
+					time_show, time_store),
+	[VMAX]			= __ATTR(VMAX_uV, 0444,
+					voltage_show, NULL),
+	[SNUM]			= __ATTR(SNUM, 0444,
+					time_show, NULL),
+	[VBATT]			= __ATTR(VBATT_uV, 0444,
+					batt_prop_show, NULL),
+	[IBATT]			= __ATTR(IBATT_uA, 0444,
+					batt_prop_show, NULL),
+	[BATTTEMP]		= __ATTR(BATTTEMP_deciDegC, 0444,
+					batt_prop_show, NULL),
+	[BATTSOC]		= __ATTR(BATTSOC, 0444,
+					batt_prop_show, NULL),
+	__ATTR_NULL,
+};
+
+static int qnovo_update_status(struct qnovo *chip)
+{
+	u8 val = 0;
+	int rc;
+	bool hw_ok_to_qnovo;
+
+	rc = qnovo_read(chip, QNOVO_ERROR_STS2, &val, 1);
+	if (rc < 0) {
+		pr_err("Couldn't read error sts rc = %d\n", rc);
+		hw_ok_to_qnovo = false;
+	} else {
+		/*
+		 * For CV mode keep qnovo enabled, userspace is expected to
+		 * disable it after few runs
+		 */
+		hw_ok_to_qnovo = (val == ERR_CV_MODE || val == 0) ?
+			true : false;
+	}
+
+	vote(chip->not_ok_to_qnovo_votable, HW_OK_TO_QNOVO_VOTER,
+					!hw_ok_to_qnovo, 0);
+	return 0;
+}
+
+static void usb_debounce_work(struct work_struct *work)
+{
+	struct qnovo *chip = container_of(work,
+				struct qnovo, usb_debounce_work.work);
+
+	vote(chip->chg_ready_votable, USB_READY_VOTER, true, 0);
+	vote(chip->awake_votable, USB_READY_VOTER, false, 0);
+}
+
+static void dc_debounce_work(struct work_struct *work)
+{
+	struct qnovo *chip = container_of(work,
+				struct qnovo, dc_debounce_work.work);
+
+	vote(chip->chg_ready_votable, DC_READY_VOTER, true, 0);
+	vote(chip->awake_votable, DC_READY_VOTER, false, 0);
+}
+
+#define DEBOUNCE_MS 15000  /* 15 seconds */
+static void status_change_work(struct work_struct *work)
+{
+	struct qnovo *chip = container_of(work,
+			struct qnovo, status_change_work);
+	union power_supply_propval pval;
+	bool usb_present = false, dc_present = false;
+	int rc;
+
+	if (is_fg_available(chip))
+		vote(chip->disable_votable, FG_AVAILABLE_VOTER, false, 0);
+
+	if (is_usb_available(chip)) {
+		rc = power_supply_get_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_PRESENT, &pval);
+		usb_present = (rc < 0) ? 0 : pval.intval;
+	}
+
+	if (chip->usb_present && !usb_present) {
+		/* removal */
+		chip->usb_present = 0;
+		cancel_delayed_work_sync(&chip->usb_debounce_work);
+		vote(chip->awake_votable, USB_READY_VOTER, false, 0);
+		vote(chip->chg_ready_votable, USB_READY_VOTER, false, 0);
+	} else if (!chip->usb_present && usb_present) {
+		/* insertion */
+		chip->usb_present = 1;
+		vote(chip->awake_votable, USB_READY_VOTER, true, 0);
+		schedule_delayed_work(&chip->usb_debounce_work,
+				msecs_to_jiffies(DEBOUNCE_MS));
+	}
+
+	if (is_dc_available(chip)) {
+		rc = power_supply_get_property(chip->dc_psy,
+			POWER_SUPPLY_PROP_PRESENT,
+			&pval);
+		dc_present = (rc < 0) ? 0 : pval.intval;
+	}
+
+	if (usb_present)
+		dc_present = 0;
+
+	/* disable qnovo for dc path by forcing dc_present = 0 always */
+	if (!chip->dt.enable_for_dc)
+		dc_present = 0;
+
+	if (chip->dc_present && !dc_present) {
+		/* removal */
+		chip->dc_present = 0;
+		cancel_delayed_work_sync(&chip->dc_debounce_work);
+		vote(chip->awake_votable, DC_READY_VOTER, false, 0);
+		vote(chip->chg_ready_votable, DC_READY_VOTER, false, 0);
+	} else if (!chip->dc_present && dc_present) {
+		/* insertion */
+		chip->dc_present = 1;
+		vote(chip->awake_votable, DC_READY_VOTER, true, 0);
+		schedule_delayed_work(&chip->dc_debounce_work,
+				msecs_to_jiffies(DEBOUNCE_MS));
+	}
+
+	qnovo_update_status(chip);
+}
+
+static void ptrain_restart_work(struct work_struct *work)
+{
+	struct qnovo *chip = container_of(work,
+				struct qnovo, ptrain_restart_work.work);
+	u8 pt_t1, pt_t2;
+	int rc;
+	u8 pt_en;
+
+	rc = qnovo_read(chip, QNOVO_PTRAIN_EN, &pt_en, 1);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't read QNOVO_PTRAIN_EN rc = %d\n",
+				rc);
+		goto clean_up;
+	}
+
+	if (!pt_en) {
+		rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN,
+				QNOVO_PTRAIN_EN_BIT, QNOVO_PTRAIN_EN_BIT);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't enable pulse train rc=%d\n",
+					rc);
+			goto clean_up;
+		}
+		/* sleep 20ms for the pulse trains to restart and settle */
+		msleep(20);
+	}
+
+	rc = qnovo_read(chip, QNOVO_PTTIME_STS, &pt_t1, 1);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't read QNOVO_PTTIME_STS rc = %d\n",
+			rc);
+		goto clean_up;
+	}
+
+	/* pttime increments every 2 seconds */
+	msleep(2100);
+
+	rc = qnovo_read(chip, QNOVO_PTTIME_STS, &pt_t2, 1);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't read QNOVO_PTTIME_STS rc = %d\n",
+			rc);
+		goto clean_up;
+	}
+
+	if (pt_t1 != pt_t2)
+		goto clean_up;
+
+	/* Toggle pt enable to restart pulse train */
+	rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT, 0);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't disable pulse train rc=%d\n", rc);
+		goto clean_up;
+	}
+	msleep(1000);
+	rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT,
+				QNOVO_PTRAIN_EN_BIT);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't enable pulse train rc=%d\n", rc);
+		goto clean_up;
+	}
+
+clean_up:
+	vote(chip->awake_votable, PT_RESTART_VOTER, false, 0);
+}
+
+static int qnovo_notifier_call(struct notifier_block *nb,
+		unsigned long ev, void *v)
+{
+	struct power_supply *psy = v;
+	struct qnovo *chip = container_of(nb, struct qnovo, nb);
+
+	if (ev != PSY_EVENT_PROP_CHANGED)
+		return NOTIFY_OK;
+
+	if (strcmp(psy->desc->name, "battery") == 0
+		|| strcmp(psy->desc->name, "bms") == 0
+		|| strcmp(psy->desc->name, "usb") == 0
+		|| strcmp(psy->desc->name, "dc") == 0)
+		schedule_work(&chip->status_change_work);
+
+	return NOTIFY_OK;
+}
+
+static irqreturn_t handle_ptrain_done(int irq, void *data)
+{
+	struct qnovo *chip = data;
+	union power_supply_propval pval = {0};
+
+	qnovo_update_status(chip);
+
+	/*
+	 * hw resets pt_en bit once ptrain_done triggers.
+	 * vote on behalf of QNI to disable it such that
+	 * once QNI enables it, the votable state changes
+	 * and the callback that sets it is indeed invoked
+	 */
+	vote(chip->pt_dis_votable, QNI_PT_VOTER, true, 0);
+
+	vote(chip->pt_dis_votable, ESR_VOTER, true, 0);
+	if (is_fg_available(chip)
+		&& !get_client_vote(chip->disable_votable, USER_VOTER)
+		&& !get_effective_result(chip->not_ok_to_qnovo_votable))
+		power_supply_set_property(chip->bms_psy,
+				POWER_SUPPLY_PROP_RESISTANCE,
+				&pval);
+
+	vote(chip->pt_dis_votable, ESR_VOTER, false, 0);
+	kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
+	return IRQ_HANDLED;
+}
+
+static int qnovo_hw_init(struct qnovo *chip)
+{
+	int rc;
+	u8 iadc_offset_external, iadc_offset_internal;
+	u8 iadc_gain_external, iadc_gain_internal;
+	u8 vadc_offset, vadc_gain;
+	u8 val;
+
+	vote(chip->chg_ready_votable, USB_READY_VOTER, false, 0);
+	vote(chip->chg_ready_votable, DC_READY_VOTER, false, 0);
+
+	vote(chip->disable_votable, USER_VOTER, true, 0);
+	vote(chip->disable_votable, FG_AVAILABLE_VOTER, true, 0);
+
+	vote(chip->pt_dis_votable, QNI_PT_VOTER, true, 0);
+	vote(chip->pt_dis_votable, QNOVO_OVERALL_VOTER, true, 0);
+	vote(chip->pt_dis_votable, ESR_VOTER, false, 0);
+
+	val = 0;
+	rc = qnovo_write(chip, QNOVO_STRM_CTRL, &val, 1);
+	if (rc < 0) {
+		pr_err("Couldn't write iadc bitstream control rc = %d\n", rc);
+		return rc;
+	}
+
+	rc = qnovo_read(chip, QNOVO_IADC_OFFSET_0, &iadc_offset_external, 1);
+	if (rc < 0) {
+		pr_err("Couldn't read iadc exernal offset rc = %d\n", rc);
+		return rc;
+	}
+
+	/* stored as an 8 bit 2's complement signed integer */
+	val = -1 * iadc_offset_external;
+	rc = qnovo_write(chip, QNOVO_TR_IADC_OFFSET_0, &val, 1);
+	if (rc < 0) {
+		pr_err("Couldn't write iadc offset rc = %d\n", rc);
+		return rc;
+	}
+
+	rc = qnovo_read(chip, QNOVO_IADC_OFFSET_1, &iadc_offset_internal, 1);
+	if (rc < 0) {
+		pr_err("Couldn't read iadc internal offset rc = %d\n", rc);
+		return rc;
+	}
+
+	/* stored as an 8 bit 2's complement signed integer */
+	val = -1 * iadc_offset_internal;
+	rc = qnovo_write(chip, QNOVO_TR_IADC_OFFSET_1, &val, 1);
+	if (rc < 0) {
+		pr_err("Couldn't write iadc offset rc = %d\n", rc);
+		return rc;
+	}
+
+	rc = qnovo_read(chip, QNOVO_IADC_GAIN_0, &iadc_gain_external, 1);
+	if (rc < 0) {
+		pr_err("Couldn't read iadc external gain rc = %d\n", rc);
+		return rc;
+	}
+
+	rc = qnovo_read(chip, QNOVO_IADC_GAIN_1, &iadc_gain_internal, 1);
+	if (rc < 0) {
+		pr_err("Couldn't read iadc internal gain rc = %d\n", rc);
+		return rc;
+	}
+
+	rc = qnovo_read(chip, QNOVO_VADC_OFFSET, &vadc_offset, 1);
+	if (rc < 0) {
+		pr_err("Couldn't read vadc offset rc = %d\n", rc);
+		return rc;
+	}
+
+	rc = qnovo_read(chip, QNOVO_VADC_GAIN, &vadc_gain, 1);
+	if (rc < 0) {
+		pr_err("Couldn't read vadc external gain rc = %d\n", rc);
+		return rc;
+	}
+
+	chip->external_offset_nA = (s64)(s8)iadc_offset_external * IADC_LSB_NA;
+	chip->internal_offset_nA = (s64)(s8)iadc_offset_internal * IADC_LSB_NA;
+	chip->offset_nV = (s64)(s8)vadc_offset * VADC_LSB_NA;
+	chip->external_i_gain_mega
+		= 1000000000 + (s64)(s8)iadc_gain_external * GAIN_LSB_FACTOR;
+	chip->external_i_gain_mega
+		= div_s64(chip->external_i_gain_mega, 1000);
+	chip->internal_i_gain_mega
+		= 1000000000 + (s64)(s8)iadc_gain_internal * GAIN_LSB_FACTOR;
+	chip->internal_i_gain_mega
+		= div_s64(chip->internal_i_gain_mega, 1000);
+	chip->v_gain_mega = 1000000000 + (s64)(s8)vadc_gain * GAIN_LSB_FACTOR;
+	chip->v_gain_mega = div_s64(chip->v_gain_mega, 1000);
+
+	/* allow charger error conditions to disable qnovo, CV mode excluded */
+	val = ERR_SWITCHER_DISABLED | ERR_JEITA_SOFT_CONDITION | ERR_BAT_OV |
+		ERR_BATTERY_MISSING | ERR_SAFETY_TIMER_EXPIRED |
+		ERR_CHARGING_DISABLED | ERR_JEITA_HARD_CONDITION;
+	rc = qnovo_write(chip, QNOVO_DISABLE_CHARGING, &val, 1);
+	if (rc < 0) {
+		pr_err("Couldn't write QNOVO_DISABLE_CHARGING rc = %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int qnovo_register_notifier(struct qnovo *chip)
+{
+	int rc;
+
+	chip->nb.notifier_call = qnovo_notifier_call;
+	rc = power_supply_reg_notifier(&chip->nb);
+	if (rc < 0) {
+		pr_err("Couldn't register psy notifier rc = %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int qnovo_determine_initial_status(struct qnovo *chip)
+{
+	status_change_work(&chip->status_change_work);
+	return 0;
+}
+
+static int qnovo_request_interrupts(struct qnovo *chip)
+{
+	int rc = 0;
+	int irq_ptrain_done = of_irq_get_byname(chip->dev->of_node,
+						"ptrain-done");
+
+	rc = devm_request_threaded_irq(chip->dev, irq_ptrain_done, NULL,
+					handle_ptrain_done,
+					IRQF_ONESHOT, "ptrain-done", chip);
+	if (rc < 0) {
+		pr_err("Couldn't request irq %d rc = %d\n",
+					irq_ptrain_done, rc);
+		return rc;
+	}
+
+	enable_irq_wake(irq_ptrain_done);
+
+	return rc;
+}
+
+static int qnovo_probe(struct platform_device *pdev)
+{
+	struct qnovo *chip;
+	int rc = 0;
+
+	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->fv_uV_request = -EINVAL;
+	chip->fcc_uA_request = -EINVAL;
+	chip->dev = &pdev->dev;
+	mutex_init(&chip->write_lock);
+
+	chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
+	if (!chip->regmap) {
+		pr_err("parent regmap is missing\n");
+		return -EINVAL;
+	}
+
+	rc = qnovo_parse_dt(chip);
+	if (rc < 0) {
+		pr_err("Couldn't parse device tree rc=%d\n", rc);
+		return rc;
+	}
+
+	/* set driver data before resources request it */
+	platform_set_drvdata(pdev, chip);
+
+	chip->disable_votable = create_votable("QNOVO_DISABLE", VOTE_SET_ANY,
+					qnovo_disable_cb, chip);
+	if (IS_ERR(chip->disable_votable)) {
+		rc = PTR_ERR(chip->disable_votable);
+		goto cleanup;
+	}
+
+	chip->pt_dis_votable = create_votable("QNOVO_PT_DIS", VOTE_SET_ANY,
+					pt_dis_votable_cb, chip);
+	if (IS_ERR(chip->pt_dis_votable)) {
+		rc = PTR_ERR(chip->pt_dis_votable);
+		goto destroy_disable_votable;
+	}
+
+	chip->not_ok_to_qnovo_votable = create_votable("QNOVO_NOT_OK",
+					VOTE_SET_ANY,
+					not_ok_to_qnovo_cb, chip);
+	if (IS_ERR(chip->not_ok_to_qnovo_votable)) {
+		rc = PTR_ERR(chip->not_ok_to_qnovo_votable);
+		goto destroy_pt_dis_votable;
+	}
+
+	chip->chg_ready_votable = create_votable("QNOVO_CHG_READY",
+					VOTE_SET_ANY,
+					chg_ready_cb, chip);
+	if (IS_ERR(chip->chg_ready_votable)) {
+		rc = PTR_ERR(chip->chg_ready_votable);
+		goto destroy_not_ok_to_qnovo_votable;
+	}
+
+	chip->awake_votable = create_votable("QNOVO_AWAKE", VOTE_SET_ANY,
+					awake_cb, chip);
+	if (IS_ERR(chip->awake_votable)) {
+		rc = PTR_ERR(chip->awake_votable);
+		goto destroy_chg_ready_votable;
+	}
+
+	INIT_WORK(&chip->status_change_work, status_change_work);
+	INIT_DELAYED_WORK(&chip->dc_debounce_work, dc_debounce_work);
+	INIT_DELAYED_WORK(&chip->usb_debounce_work, usb_debounce_work);
+	INIT_DELAYED_WORK(&chip->ptrain_restart_work, ptrain_restart_work);
+
+	rc = qnovo_hw_init(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize hardware rc=%d\n", rc);
+		goto destroy_awake_votable;
+	}
+
+	rc = qnovo_register_notifier(chip);
+	if (rc < 0) {
+		pr_err("Couldn't register psy notifier rc = %d\n", rc);
+		goto unreg_notifier;
+	}
+
+	rc = qnovo_determine_initial_status(chip);
+	if (rc < 0) {
+		pr_err("Couldn't determine initial status rc=%d\n", rc);
+		goto unreg_notifier;
+	}
+
+	rc = qnovo_request_interrupts(chip);
+	if (rc < 0) {
+		pr_err("Couldn't request interrupts rc=%d\n", rc);
+		goto unreg_notifier;
+	}
+	chip->qnovo_class.name = "qnovo",
+	chip->qnovo_class.owner = THIS_MODULE,
+	chip->qnovo_class.class_attrs = qnovo_attributes;
+
+	rc = class_register(&chip->qnovo_class);
+	if (rc < 0) {
+		pr_err("couldn't register qnovo sysfs class rc = %d\n", rc);
+		goto unreg_notifier;
+	}
+
+	device_init_wakeup(chip->dev, true);
+
+	return rc;
+
+unreg_notifier:
+	power_supply_unreg_notifier(&chip->nb);
+destroy_awake_votable:
+	destroy_votable(chip->awake_votable);
+destroy_chg_ready_votable:
+	destroy_votable(chip->chg_ready_votable);
+destroy_not_ok_to_qnovo_votable:
+	destroy_votable(chip->not_ok_to_qnovo_votable);
+destroy_pt_dis_votable:
+	destroy_votable(chip->pt_dis_votable);
+destroy_disable_votable:
+	destroy_votable(chip->disable_votable);
+cleanup:
+	platform_set_drvdata(pdev, NULL);
+	return rc;
+}
+
+static int qnovo_remove(struct platform_device *pdev)
+{
+	struct qnovo *chip = platform_get_drvdata(pdev);
+
+	class_unregister(&chip->qnovo_class);
+	power_supply_unreg_notifier(&chip->nb);
+	destroy_votable(chip->chg_ready_votable);
+	destroy_votable(chip->not_ok_to_qnovo_votable);
+	destroy_votable(chip->pt_dis_votable);
+	destroy_votable(chip->disable_votable);
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static void qnovo_shutdown(struct platform_device *pdev)
+{
+	struct qnovo *chip = platform_get_drvdata(pdev);
+
+	vote(chip->not_ok_to_qnovo_votable, SHUTDOWN_VOTER, true, 0);
+}
+
+static const struct of_device_id match_table[] = {
+	{ .compatible = "qcom,qpnp-qnovo", },
+	{ },
+};
+
+static struct platform_driver qnovo_driver = {
+	.driver		= {
+		.name		= "qcom,qnovo-driver",
+		.owner		= THIS_MODULE,
+		.of_match_table	= match_table,
+	},
+	.probe		= qnovo_probe,
+	.remove		= qnovo_remove,
+	.shutdown	= qnovo_shutdown,
+};
+module_platform_driver(qnovo_driver);
+
+MODULE_DESCRIPTION("QPNP Qnovo Driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/drivers/power/supply./qcom/qpnp-smb2.c linux-4.4.115-fbx/drivers/power/supply/qcom/qpnp-smb2.c
--- linux-4.4.115-fbx/drivers/power/supply./qcom/qpnp-smb2.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/supply/qcom/qpnp-smb2.c	2019-10-29 09:26:24.633212866 +0100
@@ -0,0 +1,2490 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/power_supply.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/log2.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/machine.h>
+#include "smb-reg.h"
+#include "smb-lib.h"
+#include "storm-watch.h"
+#include <linux/pmic-voter.h>
+
+#define SMB2_DEFAULT_WPWR_UW	8000000
+
+static struct smb_params v1_params = {
+	.fcc			= {
+		.name	= "fast charge current",
+		.reg	= FAST_CHARGE_CURRENT_CFG_REG,
+		.min_u	= 0,
+		.max_u	= 4500000,
+		.step_u	= 25000,
+	},
+	.fv			= {
+		.name	= "float voltage",
+		.reg	= FLOAT_VOLTAGE_CFG_REG,
+		.min_u	= 3487500,
+		.max_u	= 4920000,
+		.step_u	= 7500,
+	},
+	.usb_icl		= {
+		.name	= "usb input current limit",
+		.reg	= USBIN_CURRENT_LIMIT_CFG_REG,
+		.min_u	= 0,
+		.max_u	= 4800000,
+		.step_u	= 25000,
+	},
+	.icl_stat		= {
+		.name	= "input current limit status",
+		.reg	= ICL_STATUS_REG,
+		.min_u	= 0,
+		.max_u	= 4800000,
+		.step_u	= 25000,
+	},
+	.otg_cl			= {
+		.name	= "usb otg current limit",
+		.reg	= OTG_CURRENT_LIMIT_CFG_REG,
+		.min_u	= 250000,
+		.max_u	= 2000000,
+		.step_u	= 250000,
+	},
+	.dc_icl			= {
+		.name	= "dc input current limit",
+		.reg	= DCIN_CURRENT_LIMIT_CFG_REG,
+		.min_u	= 0,
+		.max_u	= 6000000,
+		.step_u	= 25000,
+	},
+	.dc_icl_pt_lv		= {
+		.name	= "dc icl PT <8V",
+		.reg	= ZIN_ICL_PT_REG,
+		.min_u	= 0,
+		.max_u	= 3000000,
+		.step_u	= 25000,
+	},
+	.dc_icl_pt_hv		= {
+		.name	= "dc icl PT >8V",
+		.reg	= ZIN_ICL_PT_HV_REG,
+		.min_u	= 0,
+		.max_u	= 3000000,
+		.step_u	= 25000,
+	},
+	.dc_icl_div2_lv		= {
+		.name	= "dc icl div2 <5.5V",
+		.reg	= ZIN_ICL_LV_REG,
+		.min_u	= 0,
+		.max_u	= 3000000,
+		.step_u	= 25000,
+	},
+	.dc_icl_div2_mid_lv	= {
+		.name	= "dc icl div2 5.5-6.5V",
+		.reg	= ZIN_ICL_MID_LV_REG,
+		.min_u	= 0,
+		.max_u	= 3000000,
+		.step_u	= 25000,
+	},
+	.dc_icl_div2_mid_hv	= {
+		.name	= "dc icl div2 6.5-8.0V",
+		.reg	= ZIN_ICL_MID_HV_REG,
+		.min_u	= 0,
+		.max_u	= 3000000,
+		.step_u	= 25000,
+	},
+	.dc_icl_div2_hv		= {
+		.name	= "dc icl div2 >8.0V",
+		.reg	= ZIN_ICL_HV_REG,
+		.min_u	= 0,
+		.max_u	= 3000000,
+		.step_u	= 25000,
+	},
+	.jeita_cc_comp		= {
+		.name	= "jeita fcc reduction",
+		.reg	= JEITA_CCCOMP_CFG_REG,
+		.min_u	= 0,
+		.max_u	= 1575000,
+		.step_u	= 25000,
+	},
+	.freq_buck		= {
+		.name	= "buck switching frequency",
+		.reg	= CFG_BUCKBOOST_FREQ_SELECT_BUCK_REG,
+		.min_u	= 600,
+		.max_u	= 2000,
+		.step_u	= 200,
+	},
+	.freq_boost		= {
+		.name	= "boost switching frequency",
+		.reg	= CFG_BUCKBOOST_FREQ_SELECT_BOOST_REG,
+		.min_u	= 600,
+		.max_u	= 2000,
+		.step_u	= 200,
+	},
+};
+
+static struct smb_params pm660_params = {
+	.freq_buck		= {
+		.name	= "buck switching frequency",
+		.reg	= FREQ_CLK_DIV_REG,
+		.min_u	= 600,
+		.max_u	= 1600,
+		.set_proc = smblib_set_chg_freq,
+	},
+	.freq_boost		= {
+		.name	= "boost switching frequency",
+		.reg	= FREQ_CLK_DIV_REG,
+		.min_u	= 600,
+		.max_u	= 1600,
+		.set_proc = smblib_set_chg_freq,
+	},
+};
+
+struct smb_dt_props {
+	int	usb_icl_ua;
+	int	dc_icl_ua;
+	int	boost_threshold_ua;
+	int	wipower_max_uw;
+	int	min_freq_khz;
+	int	max_freq_khz;
+	struct	device_node *revid_dev_node;
+	int	float_option;
+	int	chg_inhibit_thr_mv;
+	bool	no_battery;
+	bool	hvdcp_disable;
+	bool	auto_recharge_soc;
+	int	wd_bark_time;
+};
+
+struct smb2 {
+	struct smb_charger	chg;
+	struct dentry		*dfs_root;
+	struct smb_dt_props	dt;
+	bool			bad_part;
+};
+
+static int __debug_mask;
+module_param_named(
+	debug_mask, __debug_mask, int, S_IRUSR | S_IWUSR
+);
+
+static int __weak_chg_icl_ua = 500000;
+module_param_named(
+	weak_chg_icl_ua, __weak_chg_icl_ua, int, S_IRUSR | S_IWUSR);
+
+static int __try_sink_enabled = 1;
+module_param_named(
+	try_sink_enabled, __try_sink_enabled, int, 0600
+);
+
+#define MICRO_1P5A		1500000
+#define MICRO_P1A		100000
+#define OTG_DEFAULT_DEGLITCH_TIME_MS	50
+#define MIN_WD_BARK_TIME		16
+#define DEFAULT_WD_BARK_TIME		64
+#define BITE_WDOG_TIMEOUT_8S		0x3
+#define BARK_WDOG_TIMEOUT_MASK		GENMASK(3, 2)
+#define BARK_WDOG_TIMEOUT_SHIFT		2
+static int smb2_parse_dt(struct smb2 *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	struct device_node *node = chg->dev->of_node;
+	int rc, byte_len;
+
+	if (!node) {
+		pr_err("device tree node missing\n");
+		return -EINVAL;
+	}
+
+	chg->step_chg_enabled = of_property_read_bool(node,
+				"qcom,step-charging-enable");
+
+	chg->sw_jeita_enabled = of_property_read_bool(node,
+				"qcom,sw-jeita-enable");
+
+	rc = of_property_read_u32(node, "qcom,wd-bark-time-secs",
+					&chip->dt.wd_bark_time);
+	if (rc < 0 || chip->dt.wd_bark_time < MIN_WD_BARK_TIME)
+		chip->dt.wd_bark_time = DEFAULT_WD_BARK_TIME;
+
+	chip->dt.no_battery = of_property_read_bool(node,
+						"qcom,batteryless-platform");
+
+	rc = of_property_read_u32(node,
+				"qcom,fcc-max-ua", &chg->batt_profile_fcc_ua);
+	if (rc < 0)
+		chg->batt_profile_fcc_ua = -EINVAL;
+
+	rc = of_property_read_u32(node,
+				"qcom,fv-max-uv", &chg->batt_profile_fv_uv);
+	if (rc < 0)
+		chg->batt_profile_fv_uv = -EINVAL;
+
+	rc = of_property_read_u32(node,
+				"qcom,usb-icl-ua", &chip->dt.usb_icl_ua);
+	if (rc < 0)
+		chip->dt.usb_icl_ua = -EINVAL;
+
+	rc = of_property_read_u32(node,
+				"qcom,otg-cl-ua", &chg->otg_cl_ua);
+	if (rc < 0)
+		chg->otg_cl_ua = MICRO_1P5A;
+
+	rc = of_property_read_u32(node,
+				"qcom,dc-icl-ua", &chip->dt.dc_icl_ua);
+	if (rc < 0)
+		chip->dt.dc_icl_ua = -EINVAL;
+
+	rc = of_property_read_u32(node,
+				"qcom,boost-threshold-ua",
+				&chip->dt.boost_threshold_ua);
+	if (rc < 0)
+		chip->dt.boost_threshold_ua = MICRO_P1A;
+
+	rc = of_property_read_u32(node,
+				"qcom,min-freq-khz",
+				&chip->dt.min_freq_khz);
+	if (rc < 0)
+		chip->dt.min_freq_khz = -EINVAL;
+
+	rc = of_property_read_u32(node,
+				"qcom,max-freq-khz",
+				&chip->dt.max_freq_khz);
+	if (rc < 0)
+		chip->dt.max_freq_khz = -EINVAL;
+
+	rc = of_property_read_u32(node, "qcom,wipower-max-uw",
+				&chip->dt.wipower_max_uw);
+	if (rc < 0)
+		chip->dt.wipower_max_uw = -EINVAL;
+
+	if (of_find_property(node, "qcom,thermal-mitigation", &byte_len)) {
+		chg->thermal_mitigation = devm_kzalloc(chg->dev, byte_len,
+			GFP_KERNEL);
+
+		if (chg->thermal_mitigation == NULL)
+			return -ENOMEM;
+
+		chg->thermal_levels = byte_len / sizeof(u32);
+		rc = of_property_read_u32_array(node,
+				"qcom,thermal-mitigation",
+				chg->thermal_mitigation,
+				chg->thermal_levels);
+		if (rc < 0) {
+			dev_err(chg->dev,
+				"Couldn't read threm limits rc = %d\n", rc);
+			return rc;
+		}
+	}
+
+	of_property_read_u32(node, "qcom,float-option", &chip->dt.float_option);
+	if (chip->dt.float_option < 0 || chip->dt.float_option > 4) {
+		pr_err("qcom,float-option is out of range [0, 4]\n");
+		return -EINVAL;
+	}
+
+	chip->dt.hvdcp_disable = of_property_read_bool(node,
+						"qcom,hvdcp-disable");
+
+	of_property_read_u32(node, "qcom,chg-inhibit-threshold-mv",
+				&chip->dt.chg_inhibit_thr_mv);
+	if ((chip->dt.chg_inhibit_thr_mv < 0 ||
+		chip->dt.chg_inhibit_thr_mv > 300)) {
+		pr_err("qcom,chg-inhibit-threshold-mv is incorrect\n");
+		return -EINVAL;
+	}
+
+	chip->dt.auto_recharge_soc = of_property_read_bool(node,
+						"qcom,auto-recharge-soc");
+
+	chg->micro_usb_mode = of_property_read_bool(node, "qcom,micro-usb");
+
+	chg->dcp_icl_ua = chip->dt.usb_icl_ua;
+
+	chg->suspend_input_on_debug_batt = of_property_read_bool(node,
+					"qcom,suspend-input-on-debug-batt");
+
+	rc = of_property_read_u32(node, "qcom,otg-deglitch-time-ms",
+					&chg->otg_delay_ms);
+	if (rc < 0)
+		chg->otg_delay_ms = OTG_DEFAULT_DEGLITCH_TIME_MS;
+
+	return 0;
+}
+
+/************************
+ * USB PSY REGISTRATION *
+ ************************/
+
+static enum power_supply_property smb2_usb_props[] = {
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_ONLINE,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_PD_CURRENT_MAX,
+	POWER_SUPPLY_PROP_CURRENT_MAX,
+	POWER_SUPPLY_PROP_TYPE,
+	POWER_SUPPLY_PROP_TYPEC_MODE,
+	POWER_SUPPLY_PROP_TYPEC_POWER_ROLE,
+	POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION,
+	POWER_SUPPLY_PROP_PD_ALLOWED,
+	POWER_SUPPLY_PROP_PD_ACTIVE,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_NOW,
+	POWER_SUPPLY_PROP_BOOST_CURRENT,
+	POWER_SUPPLY_PROP_PE_START,
+	POWER_SUPPLY_PROP_CTM_CURRENT_MAX,
+	POWER_SUPPLY_PROP_HW_CURRENT_MAX,
+	POWER_SUPPLY_PROP_REAL_TYPE,
+	POWER_SUPPLY_PROP_PR_SWAP,
+	POWER_SUPPLY_PROP_PD_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_PD_VOLTAGE_MIN,
+	POWER_SUPPLY_PROP_SDP_CURRENT_MAX,
+};
+
+static int smb2_usb_get_prop(struct power_supply *psy,
+		enum power_supply_property psp,
+		union power_supply_propval *val)
+{
+	struct smb2 *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_PRESENT:
+		if (chip->bad_part)
+			val->intval = 1;
+		else
+			rc = smblib_get_prop_usb_present(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+		rc = smblib_get_prop_usb_online(chg, val);
+		if (!val->intval)
+			break;
+
+		if ((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT ||
+			chg->micro_usb_mode) &&
+			chg->real_charger_type == POWER_SUPPLY_TYPE_USB)
+			val->intval = 0;
+		else
+			val->intval = 1;
+		if (chg->real_charger_type == POWER_SUPPLY_TYPE_UNKNOWN)
+			val->intval = 0;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		rc = smblib_get_prop_usb_voltage_max(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+		rc = smblib_get_prop_usb_voltage_now(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PD_CURRENT_MAX:
+		val->intval = get_client_vote(chg->usb_icl_votable, PD_VOTER);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		rc = smblib_get_prop_input_current_settled(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_TYPE:
+		val->intval = POWER_SUPPLY_TYPE_USB_PD;
+		break;
+	case POWER_SUPPLY_PROP_REAL_TYPE:
+		if (chip->bad_part)
+			val->intval = POWER_SUPPLY_TYPE_USB_PD;
+		else
+			val->intval = chg->real_charger_type;
+		break;
+	case POWER_SUPPLY_PROP_TYPEC_MODE:
+		if (chg->micro_usb_mode)
+			val->intval = POWER_SUPPLY_TYPEC_NONE;
+		else if (chip->bad_part)
+			val->intval = POWER_SUPPLY_TYPEC_SOURCE_DEFAULT;
+		else
+			val->intval = chg->typec_mode;
+		break;
+	case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
+		if (chg->micro_usb_mode)
+			val->intval = POWER_SUPPLY_TYPEC_PR_NONE;
+		else
+			rc = smblib_get_prop_typec_power_role(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION:
+		if (chg->micro_usb_mode)
+			val->intval = 0;
+		else
+			rc = smblib_get_prop_typec_cc_orientation(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PD_ALLOWED:
+		rc = smblib_get_prop_pd_allowed(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PD_ACTIVE:
+		val->intval = chg->pd_active;
+		break;
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED:
+		rc = smblib_get_prop_input_current_settled(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_NOW:
+		rc = smblib_get_prop_usb_current_now(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_BOOST_CURRENT:
+		val->intval = chg->boost_current_ua;
+		break;
+	case POWER_SUPPLY_PROP_PD_IN_HARD_RESET:
+		rc = smblib_get_prop_pd_in_hard_reset(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED:
+		val->intval = chg->system_suspend_supported;
+		break;
+	case POWER_SUPPLY_PROP_PE_START:
+		rc = smblib_get_pe_start(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CTM_CURRENT_MAX:
+		val->intval = get_client_vote(chg->usb_icl_votable, CTM_VOTER);
+		break;
+	case POWER_SUPPLY_PROP_HW_CURRENT_MAX:
+		rc = smblib_get_charge_current(chg, &val->intval);
+		break;
+	case POWER_SUPPLY_PROP_PR_SWAP:
+		rc = smblib_get_prop_pr_swap_in_progress(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PD_VOLTAGE_MAX:
+		val->intval = chg->voltage_max_uv;
+		break;
+	case POWER_SUPPLY_PROP_PD_VOLTAGE_MIN:
+		val->intval = chg->voltage_min_uv;
+		break;
+	case POWER_SUPPLY_PROP_SDP_CURRENT_MAX:
+		val->intval = get_client_vote(chg->usb_icl_votable,
+					      USB_PSY_VOTER);
+		break;
+	default:
+		pr_err("get prop %d is not supported in usb\n", psp);
+		rc = -EINVAL;
+		break;
+	}
+	if (rc < 0) {
+		pr_debug("Couldn't get prop %d rc = %d\n", psp, rc);
+		return -ENODATA;
+	}
+	return 0;
+}
+
+static int smb2_usb_set_prop(struct power_supply *psy,
+		enum power_supply_property psp,
+		const union power_supply_propval *val)
+{
+	struct smb2 *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	mutex_lock(&chg->lock);
+	if (!chg->typec_present) {
+		rc = -EINVAL;
+		goto unlock;
+	}
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_PD_CURRENT_MAX:
+		rc = smblib_set_prop_pd_current_max(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
+		rc = smblib_set_prop_typec_power_role(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PD_ACTIVE:
+		rc = smblib_set_prop_pd_active(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PD_IN_HARD_RESET:
+		rc = smblib_set_prop_pd_in_hard_reset(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED:
+		chg->system_suspend_supported = val->intval;
+		break;
+	case POWER_SUPPLY_PROP_BOOST_CURRENT:
+		rc = smblib_set_prop_boost_current(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CTM_CURRENT_MAX:
+		rc = vote(chg->usb_icl_votable, CTM_VOTER,
+						val->intval >= 0, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_PR_SWAP:
+		rc = smblib_set_prop_pr_swap_in_progress(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PD_VOLTAGE_MAX:
+		rc = smblib_set_prop_pd_voltage_max(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PD_VOLTAGE_MIN:
+		rc = smblib_set_prop_pd_voltage_min(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_SDP_CURRENT_MAX:
+		rc = smblib_set_prop_sdp_current_max(chg, val);
+		break;
+	default:
+		pr_err("set prop %d is not supported\n", psp);
+		rc = -EINVAL;
+		break;
+	}
+
+unlock:
+	mutex_unlock(&chg->lock);
+	return rc;
+}
+
+static int smb2_usb_prop_is_writeable(struct power_supply *psy,
+		enum power_supply_property psp)
+{
+	switch (psp) {
+	case POWER_SUPPLY_PROP_CTM_CURRENT_MAX:
+		return 1;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int smb2_init_usb_psy(struct smb2 *chip)
+{
+	struct power_supply_config usb_cfg = {};
+	struct smb_charger *chg = &chip->chg;
+
+	chg->usb_psy_desc.name			= "usb";
+	chg->usb_psy_desc.type			= POWER_SUPPLY_TYPE_USB_PD;
+	chg->usb_psy_desc.properties		= smb2_usb_props;
+	chg->usb_psy_desc.num_properties	= ARRAY_SIZE(smb2_usb_props);
+	chg->usb_psy_desc.get_property		= smb2_usb_get_prop;
+	chg->usb_psy_desc.set_property		= smb2_usb_set_prop;
+	chg->usb_psy_desc.property_is_writeable	= smb2_usb_prop_is_writeable;
+
+	usb_cfg.drv_data = chip;
+	usb_cfg.of_node = chg->dev->of_node;
+	chg->usb_psy = power_supply_register(chg->dev,
+						  &chg->usb_psy_desc,
+						  &usb_cfg);
+	if (IS_ERR(chg->usb_psy)) {
+		pr_err("Couldn't register USB power supply\n");
+		return PTR_ERR(chg->usb_psy);
+	}
+
+	return 0;
+}
+
+/********************************
+ * USB PC_PORT PSY REGISTRATION *
+ ********************************/
+static enum power_supply_property smb2_usb_port_props[] = {
+	POWER_SUPPLY_PROP_TYPE,
+	POWER_SUPPLY_PROP_ONLINE,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_CURRENT_MAX,
+};
+
+static int smb2_usb_port_get_prop(struct power_supply *psy,
+		enum power_supply_property psp,
+		union power_supply_propval *val)
+{
+	struct smb2 *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_TYPE:
+		val->intval = POWER_SUPPLY_TYPE_USB;
+		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+		rc = smblib_get_prop_usb_online(chg, val);
+		if (!val->intval)
+			break;
+
+		if ((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT ||
+			chg->micro_usb_mode) &&
+			chg->real_charger_type == POWER_SUPPLY_TYPE_USB)
+			val->intval = 1;
+		else
+			val->intval = 0;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		val->intval = 5000000;
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		rc = smblib_get_prop_input_current_settled(chg, val);
+		break;
+	default:
+		pr_err_ratelimited("Get prop %d is not supported in pc_port\n",
+				psp);
+		return -EINVAL;
+	}
+
+	if (rc < 0) {
+		pr_debug("Couldn't get prop %d rc = %d\n", psp, rc);
+		return -ENODATA;
+	}
+
+	return 0;
+}
+
+static int smb2_usb_port_set_prop(struct power_supply *psy,
+		enum power_supply_property psp,
+		const union power_supply_propval *val)
+{
+	int rc = 0;
+
+	switch (psp) {
+	default:
+		pr_err_ratelimited("Set prop %d is not supported in pc_port\n",
+				psp);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static const struct power_supply_desc usb_port_psy_desc = {
+	.name		= "pc_port",
+	.type		= POWER_SUPPLY_TYPE_USB,
+	.properties	= smb2_usb_port_props,
+	.num_properties	= ARRAY_SIZE(smb2_usb_port_props),
+	.get_property	= smb2_usb_port_get_prop,
+	.set_property	= smb2_usb_port_set_prop,
+};
+
+static int smb2_init_usb_port_psy(struct smb2 *chip)
+{
+	struct power_supply_config usb_port_cfg = {};
+	struct smb_charger *chg = &chip->chg;
+
+	usb_port_cfg.drv_data = chip;
+	usb_port_cfg.of_node = chg->dev->of_node;
+	chg->usb_port_psy = power_supply_register(chg->dev,
+						  &usb_port_psy_desc,
+						  &usb_port_cfg);
+	if (IS_ERR(chg->usb_port_psy)) {
+		pr_err("Couldn't register USB pc_port power supply\n");
+		return PTR_ERR(chg->usb_port_psy);
+	}
+
+	return 0;
+}
+
+/*****************************
+ * USB MAIN PSY REGISTRATION *
+ *****************************/
+
+static enum power_supply_property smb2_usb_main_props[] = {
+	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+	POWER_SUPPLY_PROP_TYPE,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+	POWER_SUPPLY_PROP_INPUT_VOLTAGE_SETTLED,
+	POWER_SUPPLY_PROP_FCC_DELTA,
+	POWER_SUPPLY_PROP_CURRENT_MAX,
+	/*
+	 * TODO move the TEMP and TEMP_MAX properties here,
+	 * and update the thermal balancer to look here
+	 */
+};
+
+static int smb2_usb_main_get_prop(struct power_supply *psy,
+		enum power_supply_property psp,
+		union power_supply_propval *val)
+{
+	struct smb2 *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		rc = smblib_get_charge_param(chg, &chg->param.fv, &val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		rc = smblib_get_charge_param(chg, &chg->param.fcc,
+							&val->intval);
+		break;
+	case POWER_SUPPLY_PROP_TYPE:
+		val->intval = POWER_SUPPLY_TYPE_MAIN;
+		break;
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED:
+		rc = smblib_get_prop_input_current_settled(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_INPUT_VOLTAGE_SETTLED:
+		rc = smblib_get_prop_input_voltage_settled(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_FCC_DELTA:
+		rc = smblib_get_prop_fcc_delta(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		rc = smblib_get_icl_current(chg, &val->intval);
+		break;
+	default:
+		pr_debug("get prop %d is not supported in usb-main\n", psp);
+		rc = -EINVAL;
+		break;
+	}
+	if (rc < 0) {
+		pr_debug("Couldn't get prop %d rc = %d\n", psp, rc);
+		return -ENODATA;
+	}
+	return 0;
+}
+
+static int smb2_usb_main_set_prop(struct power_supply *psy,
+		enum power_supply_property psp,
+		const union power_supply_propval *val)
+{
+	struct smb2 *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		rc = smblib_set_charge_param(chg, &chg->param.fv, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		rc = smblib_set_charge_param(chg, &chg->param.fcc, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		rc = smblib_set_icl_current(chg, val->intval);
+		break;
+	default:
+		pr_err("set prop %d is not supported\n", psp);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static const struct power_supply_desc usb_main_psy_desc = {
+	.name		= "main",
+	.type		= POWER_SUPPLY_TYPE_MAIN,
+	.properties	= smb2_usb_main_props,
+	.num_properties	= ARRAY_SIZE(smb2_usb_main_props),
+	.get_property	= smb2_usb_main_get_prop,
+	.set_property	= smb2_usb_main_set_prop,
+};
+
+static int smb2_init_usb_main_psy(struct smb2 *chip)
+{
+	struct power_supply_config usb_main_cfg = {};
+	struct smb_charger *chg = &chip->chg;
+
+	usb_main_cfg.drv_data = chip;
+	usb_main_cfg.of_node = chg->dev->of_node;
+	chg->usb_main_psy = power_supply_register(chg->dev,
+						  &usb_main_psy_desc,
+						  &usb_main_cfg);
+	if (IS_ERR(chg->usb_main_psy)) {
+		pr_err("Couldn't register USB main power supply\n");
+		return PTR_ERR(chg->usb_main_psy);
+	}
+
+	return 0;
+}
+
+/*************************
+ * DC PSY REGISTRATION   *
+ *************************/
+
+static enum power_supply_property smb2_dc_props[] = {
+	POWER_SUPPLY_PROP_INPUT_SUSPEND,
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_ONLINE,
+	POWER_SUPPLY_PROP_CURRENT_MAX,
+	POWER_SUPPLY_PROP_REAL_TYPE,
+};
+
+static int smb2_dc_get_prop(struct power_supply *psy,
+		enum power_supply_property psp,
+		union power_supply_propval *val)
+{
+	struct smb2 *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+		val->intval = get_effective_result(chg->dc_suspend_votable);
+		break;
+	case POWER_SUPPLY_PROP_PRESENT:
+		rc = smblib_get_prop_dc_present(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+		rc = smblib_get_prop_dc_online(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		rc = smblib_get_prop_dc_current_max(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_REAL_TYPE:
+		val->intval = POWER_SUPPLY_TYPE_WIPOWER;
+		break;
+	default:
+		return -EINVAL;
+	}
+	if (rc < 0) {
+		pr_debug("Couldn't get prop %d rc = %d\n", psp, rc);
+		return -ENODATA;
+	}
+	return 0;
+}
+
+static int smb2_dc_set_prop(struct power_supply *psy,
+		enum power_supply_property psp,
+		const union power_supply_propval *val)
+{
+	struct smb2 *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+		rc = vote(chg->dc_suspend_votable, WBC_VOTER,
+				(bool)val->intval, 0);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		rc = smblib_set_prop_dc_current_max(chg, val);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int smb2_dc_prop_is_writeable(struct power_supply *psy,
+		enum power_supply_property psp)
+{
+	int rc;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		rc = 1;
+		break;
+	default:
+		rc = 0;
+		break;
+	}
+
+	return rc;
+}
+
+static const struct power_supply_desc dc_psy_desc = {
+	.name = "dc",
+	.type = POWER_SUPPLY_TYPE_WIRELESS,
+	.properties = smb2_dc_props,
+	.num_properties = ARRAY_SIZE(smb2_dc_props),
+	.get_property = smb2_dc_get_prop,
+	.set_property = smb2_dc_set_prop,
+	.property_is_writeable = smb2_dc_prop_is_writeable,
+};
+
+static int smb2_init_dc_psy(struct smb2 *chip)
+{
+	struct power_supply_config dc_cfg = {};
+	struct smb_charger *chg = &chip->chg;
+
+	dc_cfg.drv_data = chip;
+	dc_cfg.of_node = chg->dev->of_node;
+	chg->dc_psy = power_supply_register(chg->dev,
+						  &dc_psy_desc,
+						  &dc_cfg);
+	if (IS_ERR(chg->dc_psy)) {
+		pr_err("Couldn't register USB power supply\n");
+		return PTR_ERR(chg->dc_psy);
+	}
+
+	return 0;
+}
+
+/*************************
+ * BATT PSY REGISTRATION *
+ *************************/
+
+static enum power_supply_property smb2_batt_props[] = {
+	POWER_SUPPLY_PROP_INPUT_SUSPEND,
+	POWER_SUPPLY_PROP_STATUS,
+	POWER_SUPPLY_PROP_HEALTH,
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_CHARGE_TYPE,
+	POWER_SUPPLY_PROP_CAPACITY,
+	POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL,
+	POWER_SUPPLY_PROP_CHARGER_TEMP,
+	POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_VOLTAGE_QNOVO,
+	POWER_SUPPLY_PROP_CURRENT_NOW,
+	POWER_SUPPLY_PROP_CURRENT_QNOVO,
+	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+	POWER_SUPPLY_PROP_TEMP,
+	POWER_SUPPLY_PROP_TECHNOLOGY,
+	POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED,
+	POWER_SUPPLY_PROP_SW_JEITA_ENABLED,
+	POWER_SUPPLY_PROP_CHARGE_DONE,
+	POWER_SUPPLY_PROP_PARALLEL_DISABLE,
+	POWER_SUPPLY_PROP_SET_SHIP_MODE,
+	POWER_SUPPLY_PROP_DIE_HEALTH,
+	POWER_SUPPLY_PROP_RERUN_AICL,
+	POWER_SUPPLY_PROP_DP_DM,
+	POWER_SUPPLY_PROP_CHARGE_COUNTER,
+};
+
+static int smb2_batt_get_prop(struct power_supply *psy,
+		enum power_supply_property psp,
+		union power_supply_propval *val)
+{
+	struct smb_charger *chg = power_supply_get_drvdata(psy);
+	int rc = 0;
+	union power_supply_propval pval = {0, };
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_STATUS:
+		rc = smblib_get_prop_batt_status(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_HEALTH:
+		rc = smblib_get_prop_batt_health(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PRESENT:
+		rc = smblib_get_prop_batt_present(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+		rc = smblib_get_prop_input_suspend(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_TYPE:
+		rc = smblib_get_prop_batt_charge_type(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		rc = smblib_get_prop_batt_capacity(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+		rc = smblib_get_prop_system_temp_level(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGER_TEMP:
+		/* do not query RRADC if charger is not present */
+		rc = smblib_get_prop_usb_present(chg, &pval);
+		if (rc < 0)
+			pr_err("Couldn't get usb present rc=%d\n", rc);
+
+		rc = -ENODATA;
+		if (pval.intval)
+			rc = smblib_get_prop_charger_temp(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
+		rc = smblib_get_prop_charger_temp_max(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
+		rc = smblib_get_prop_input_current_limited(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
+		val->intval = chg->step_chg_enabled;
+		break;
+	case POWER_SUPPLY_PROP_SW_JEITA_ENABLED:
+		val->intval = chg->sw_jeita_enabled;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+		rc = smblib_get_prop_batt_voltage_now(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		val->intval = get_client_vote(chg->fv_votable,
+				BATT_PROFILE_VOTER);
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE:
+		rc = smblib_get_prop_charge_qnovo_enable(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_QNOVO:
+		val->intval = get_client_vote_locked(chg->fv_votable,
+				QNOVO_VOTER);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_NOW:
+		rc = smblib_get_prop_batt_current_now(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_QNOVO:
+		val->intval = get_client_vote_locked(chg->fcc_votable,
+				QNOVO_VOTER);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		val->intval = get_client_vote(chg->fcc_votable,
+					      BATT_PROFILE_VOTER);
+		break;
+	case POWER_SUPPLY_PROP_TEMP:
+		rc = smblib_get_prop_batt_temp(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_TECHNOLOGY:
+		val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_DONE:
+		rc = smblib_get_prop_batt_charge_done(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PARALLEL_DISABLE:
+		val->intval = get_client_vote(chg->pl_disable_votable,
+					      USER_VOTER);
+		break;
+	case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+		/* Not in ship mode as long as device is active */
+		val->intval = 0;
+		break;
+	case POWER_SUPPLY_PROP_DIE_HEALTH:
+		rc = smblib_get_prop_die_health(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_DP_DM:
+		val->intval = chg->pulse_cnt;
+		break;
+	case POWER_SUPPLY_PROP_RERUN_AICL:
+		val->intval = 0;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+		rc = smblib_get_prop_batt_charge_counter(chg, val);
+		break;
+	default:
+		pr_err("batt power supply prop %d not supported\n", psp);
+		return -EINVAL;
+	}
+
+	if (rc < 0) {
+		pr_debug("Couldn't get prop %d rc = %d\n", psp, rc);
+		return -ENODATA;
+	}
+
+	return 0;
+}
+
+static int smb2_batt_set_prop(struct power_supply *psy,
+		enum power_supply_property prop,
+		const union power_supply_propval *val)
+{
+	int rc = 0;
+	struct smb_charger *chg = power_supply_get_drvdata(psy);
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+		rc = smblib_set_prop_input_suspend(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+		rc = smblib_set_prop_system_temp_level(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		rc = smblib_set_prop_batt_capacity(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PARALLEL_DISABLE:
+		vote(chg->pl_disable_votable, USER_VOTER, (bool)val->intval, 0);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		chg->batt_profile_fv_uv = val->intval;
+		vote(chg->fv_votable, BATT_PROFILE_VOTER, true, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE:
+		rc = smblib_set_prop_charge_qnovo_enable(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_QNOVO:
+		if (val->intval == -EINVAL) {
+			vote(chg->fv_votable, BATT_PROFILE_VOTER,
+					true, chg->batt_profile_fv_uv);
+			vote(chg->fv_votable, QNOVO_VOTER, false, 0);
+		} else {
+			vote(chg->fv_votable, QNOVO_VOTER, true, val->intval);
+			vote(chg->fv_votable, BATT_PROFILE_VOTER, false, 0);
+		}
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_QNOVO:
+		vote(chg->pl_disable_votable, PL_QNOVO_VOTER,
+			val->intval != -EINVAL && val->intval < 2000000, 0);
+		if (val->intval == -EINVAL) {
+			vote(chg->fcc_votable, BATT_PROFILE_VOTER,
+					true, chg->batt_profile_fcc_ua);
+			vote(chg->fcc_votable, QNOVO_VOTER, false, 0);
+		} else {
+			vote(chg->fcc_votable, QNOVO_VOTER, true, val->intval);
+			vote(chg->fcc_votable, BATT_PROFILE_VOTER, false, 0);
+		}
+		break;
+	case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
+		chg->step_chg_enabled = !!val->intval;
+		break;
+	case POWER_SUPPLY_PROP_SW_JEITA_ENABLED:
+		if (chg->sw_jeita_enabled != (!!val->intval)) {
+			rc = smblib_disable_hw_jeita(chg, !!val->intval);
+			if (rc == 0)
+				chg->sw_jeita_enabled = !!val->intval;
+		}
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		chg->batt_profile_fcc_ua = val->intval;
+		vote(chg->fcc_votable, BATT_PROFILE_VOTER, true, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+		/* Not in ship mode as long as the device is active */
+		if (!val->intval)
+			break;
+		if (chg->pl.psy)
+			power_supply_set_property(chg->pl.psy,
+				POWER_SUPPLY_PROP_SET_SHIP_MODE, val);
+		rc = smblib_set_prop_ship_mode(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_RERUN_AICL:
+		rc = smblib_rerun_aicl(chg);
+		break;
+	case POWER_SUPPLY_PROP_DP_DM:
+		rc = smblib_dp_dm(chg, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
+		rc = smblib_set_prop_input_current_limited(chg, val);
+		break;
+	default:
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static int smb2_batt_prop_is_writeable(struct power_supply *psy,
+		enum power_supply_property psp)
+{
+	switch (psp) {
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+	case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+	case POWER_SUPPLY_PROP_CAPACITY:
+	case POWER_SUPPLY_PROP_PARALLEL_DISABLE:
+	case POWER_SUPPLY_PROP_DP_DM:
+	case POWER_SUPPLY_PROP_RERUN_AICL:
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
+	case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
+	case POWER_SUPPLY_PROP_SW_JEITA_ENABLED:
+		return 1;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static const struct power_supply_desc batt_psy_desc = {
+	.name = "battery",
+	.type = POWER_SUPPLY_TYPE_BATTERY,
+	.properties = smb2_batt_props,
+	.num_properties = ARRAY_SIZE(smb2_batt_props),
+	.get_property = smb2_batt_get_prop,
+	.set_property = smb2_batt_set_prop,
+	.property_is_writeable = smb2_batt_prop_is_writeable,
+};
+
+static int smb2_init_batt_psy(struct smb2 *chip)
+{
+	struct power_supply_config batt_cfg = {};
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	batt_cfg.drv_data = chg;
+	batt_cfg.of_node = chg->dev->of_node;
+	chg->batt_psy = power_supply_register(chg->dev,
+						   &batt_psy_desc,
+						   &batt_cfg);
+	if (IS_ERR(chg->batt_psy)) {
+		pr_err("Couldn't register battery power supply\n");
+		return PTR_ERR(chg->batt_psy);
+	}
+
+	return rc;
+}
+
+/******************************
+ * VBUS REGULATOR REGISTRATION *
+ ******************************/
+
+static struct regulator_ops smb2_vbus_reg_ops = {
+	.enable = smblib_vbus_regulator_enable,
+	.disable = smblib_vbus_regulator_disable,
+	.is_enabled = smblib_vbus_regulator_is_enabled,
+};
+
+static int smb2_init_vbus_regulator(struct smb2 *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	struct regulator_config cfg = {};
+	int rc = 0;
+
+	chg->vbus_vreg = devm_kzalloc(chg->dev, sizeof(*chg->vbus_vreg),
+				      GFP_KERNEL);
+	if (!chg->vbus_vreg)
+		return -ENOMEM;
+
+	cfg.dev = chg->dev;
+	cfg.driver_data = chip;
+
+	chg->vbus_vreg->rdesc.owner = THIS_MODULE;
+	chg->vbus_vreg->rdesc.type = REGULATOR_VOLTAGE;
+	chg->vbus_vreg->rdesc.ops = &smb2_vbus_reg_ops;
+	chg->vbus_vreg->rdesc.of_match = "qcom,smb2-vbus";
+	chg->vbus_vreg->rdesc.name = "qcom,smb2-vbus";
+
+	chg->vbus_vreg->rdev = devm_regulator_register(chg->dev,
+						&chg->vbus_vreg->rdesc, &cfg);
+	if (IS_ERR(chg->vbus_vreg->rdev)) {
+		rc = PTR_ERR(chg->vbus_vreg->rdev);
+		chg->vbus_vreg->rdev = NULL;
+		if (rc != -EPROBE_DEFER)
+			pr_err("Couldn't register VBUS regualtor rc=%d\n", rc);
+	}
+
+	return rc;
+}
+
+/******************************
+ * VCONN REGULATOR REGISTRATION *
+ ******************************/
+
+static struct regulator_ops smb2_vconn_reg_ops = {
+	.enable = smblib_vconn_regulator_enable,
+	.disable = smblib_vconn_regulator_disable,
+	.is_enabled = smblib_vconn_regulator_is_enabled,
+};
+
+static int smb2_init_vconn_regulator(struct smb2 *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	struct regulator_config cfg = {};
+	int rc = 0;
+
+	if (chg->micro_usb_mode)
+		return 0;
+
+	chg->vconn_vreg = devm_kzalloc(chg->dev, sizeof(*chg->vconn_vreg),
+				      GFP_KERNEL);
+	if (!chg->vconn_vreg)
+		return -ENOMEM;
+
+	cfg.dev = chg->dev;
+	cfg.driver_data = chip;
+
+	chg->vconn_vreg->rdesc.owner = THIS_MODULE;
+	chg->vconn_vreg->rdesc.type = REGULATOR_VOLTAGE;
+	chg->vconn_vreg->rdesc.ops = &smb2_vconn_reg_ops;
+	chg->vconn_vreg->rdesc.of_match = "qcom,smb2-vconn";
+	chg->vconn_vreg->rdesc.name = "qcom,smb2-vconn";
+
+	chg->vconn_vreg->rdev = devm_regulator_register(chg->dev,
+						&chg->vconn_vreg->rdesc, &cfg);
+	if (IS_ERR(chg->vconn_vreg->rdev)) {
+		rc = PTR_ERR(chg->vconn_vreg->rdev);
+		chg->vconn_vreg->rdev = NULL;
+		if (rc != -EPROBE_DEFER)
+			pr_err("Couldn't register VCONN regualtor rc=%d\n", rc);
+	}
+
+	return rc;
+}
+
+/***************************
+ * HARDWARE INITIALIZATION *
+ ***************************/
+static int smb2_config_wipower_input_power(struct smb2 *chip, int uw)
+{
+	int rc;
+	int ua;
+	struct smb_charger *chg = &chip->chg;
+	s64 nw = (s64)uw * 1000;
+
+	if (uw < 0)
+		return 0;
+
+	ua = div_s64(nw, ZIN_ICL_PT_MAX_MV);
+	rc = smblib_set_charge_param(chg, &chg->param.dc_icl_pt_lv, ua);
+	if (rc < 0) {
+		pr_err("Couldn't configure dc_icl_pt_lv rc = %d\n", rc);
+		return rc;
+	}
+
+	ua = div_s64(nw, ZIN_ICL_PT_HV_MAX_MV);
+	rc = smblib_set_charge_param(chg, &chg->param.dc_icl_pt_hv, ua);
+	if (rc < 0) {
+		pr_err("Couldn't configure dc_icl_pt_hv rc = %d\n", rc);
+		return rc;
+	}
+
+	ua = div_s64(nw, ZIN_ICL_LV_MAX_MV);
+	rc = smblib_set_charge_param(chg, &chg->param.dc_icl_div2_lv, ua);
+	if (rc < 0) {
+		pr_err("Couldn't configure dc_icl_div2_lv rc = %d\n", rc);
+		return rc;
+	}
+
+	ua = div_s64(nw, ZIN_ICL_MID_LV_MAX_MV);
+	rc = smblib_set_charge_param(chg, &chg->param.dc_icl_div2_mid_lv, ua);
+	if (rc < 0) {
+		pr_err("Couldn't configure dc_icl_div2_mid_lv rc = %d\n", rc);
+		return rc;
+	}
+
+	ua = div_s64(nw, ZIN_ICL_MID_HV_MAX_MV);
+	rc = smblib_set_charge_param(chg, &chg->param.dc_icl_div2_mid_hv, ua);
+	if (rc < 0) {
+		pr_err("Couldn't configure dc_icl_div2_mid_hv rc = %d\n", rc);
+		return rc;
+	}
+
+	ua = div_s64(nw, ZIN_ICL_HV_MAX_MV);
+	rc = smblib_set_charge_param(chg, &chg->param.dc_icl_div2_hv, ua);
+	if (rc < 0) {
+		pr_err("Couldn't configure dc_icl_div2_hv rc = %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int smb2_configure_typec(struct smb_charger *chg)
+{
+	int rc;
+
+	/*
+	 * trigger the usb-typec-change interrupt only when the CC state
+	 * changes
+	 */
+	rc = smblib_write(chg, TYPE_C_INTRPT_ENB_REG,
+			  TYPEC_CCSTATE_CHANGE_INT_EN_BIT);
+	if (rc < 0) {
+		dev_err(chg->dev,
+			"Couldn't configure Type-C interrupts rc=%d\n", rc);
+		return rc;
+	}
+
+	/*
+	 * disable Type-C factory mode and stay in Attached.SRC state when VCONN
+	 * over-current happens
+	 */
+	rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
+			FACTORY_MODE_DETECTION_EN_BIT | VCONN_OC_CFG_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure Type-C rc=%d\n", rc);
+		return rc;
+	}
+
+	/* increase VCONN softstart */
+	rc = smblib_masked_write(chg, TYPE_C_CFG_2_REG,
+			VCONN_SOFTSTART_CFG_MASK, VCONN_SOFTSTART_CFG_MASK);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't increase VCONN softstart rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	/* disable try.SINK mode and legacy cable IRQs */
+	rc = smblib_masked_write(chg, TYPE_C_CFG_3_REG, EN_TRYSINK_MODE_BIT |
+				TYPEC_NONCOMPLIANT_LEGACY_CABLE_INT_EN_BIT |
+				TYPEC_LEGACY_CABLE_INT_EN_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't set Type-C config rc=%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int smb2_disable_typec(struct smb_charger *chg)
+{
+	int rc;
+
+	/* Move to typeC mode */
+	/* configure FSM in idle state and disable UFP_ENABLE bit */
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+			TYPEC_DISABLE_CMD_BIT | UFP_EN_CMD_BIT,
+			TYPEC_DISABLE_CMD_BIT);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't put FSM in idle rc=%d\n", rc);
+		return rc;
+	}
+
+	/* wait for FSM to enter idle state */
+	msleep(200);
+	/* configure TypeC mode */
+	rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
+			TYPE_C_OR_U_USB_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't enable micro USB mode rc=%d\n", rc);
+		return rc;
+	}
+
+	/* wait for mode change before enabling FSM */
+	usleep_range(10000, 11000);
+	/* release FSM from idle state */
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+			TYPEC_DISABLE_CMD_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't release FSM rc=%d\n", rc);
+		return rc;
+	}
+
+	/* wait for FSM to start */
+	msleep(100);
+	/* move to uUSB mode */
+	/* configure FSM in idle state */
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+			TYPEC_DISABLE_CMD_BIT, TYPEC_DISABLE_CMD_BIT);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't put FSM in idle rc=%d\n", rc);
+		return rc;
+	}
+
+	/* wait for FSM to enter idle state */
+	msleep(200);
+	/* configure micro USB mode */
+	rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
+			TYPE_C_OR_U_USB_BIT, TYPE_C_OR_U_USB_BIT);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't enable micro USB mode rc=%d\n", rc);
+		return rc;
+	}
+
+	/* wait for mode change before enabling FSM */
+	usleep_range(10000, 11000);
+	/* release FSM from idle state */
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+			TYPEC_DISABLE_CMD_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't release FSM rc=%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int smb2_init_hw(struct smb2 *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc;
+	u8 stat, val;
+
+	if (chip->dt.no_battery)
+		chg->fake_capacity = 50;
+
+	if (chg->batt_profile_fcc_ua < 0)
+		smblib_get_charge_param(chg, &chg->param.fcc,
+				&chg->batt_profile_fcc_ua);
+
+	if (chg->batt_profile_fv_uv < 0)
+		smblib_get_charge_param(chg, &chg->param.fv,
+				&chg->batt_profile_fv_uv);
+
+	smblib_get_charge_param(chg, &chg->param.usb_icl,
+				&chg->default_icl_ua);
+	if (chip->dt.usb_icl_ua < 0)
+		chip->dt.usb_icl_ua = chg->default_icl_ua;
+
+	if (chip->dt.dc_icl_ua < 0)
+		smblib_get_charge_param(chg, &chg->param.dc_icl,
+					&chip->dt.dc_icl_ua);
+
+	if (chip->dt.min_freq_khz > 0) {
+		chg->param.freq_buck.min_u = chip->dt.min_freq_khz;
+		chg->param.freq_boost.min_u = chip->dt.min_freq_khz;
+	}
+
+	if (chip->dt.max_freq_khz > 0) {
+		chg->param.freq_buck.max_u = chip->dt.max_freq_khz;
+		chg->param.freq_boost.max_u = chip->dt.max_freq_khz;
+	}
+
+	/* set a slower soft start setting for OTG */
+	rc = smblib_masked_write(chg, DC_ENG_SSUPPLY_CFG2_REG,
+				ENG_SSUPPLY_IVREF_OTG_SS_MASK, OTG_SS_SLOW);
+	if (rc < 0) {
+		pr_err("Couldn't set otg soft start rc=%d\n", rc);
+		return rc;
+	}
+
+	/* set OTG current limit */
+	rc = smblib_set_charge_param(chg, &chg->param.otg_cl,
+				(chg->wa_flags & OTG_WA) ?
+				chg->param.otg_cl.min_u : chg->otg_cl_ua);
+	if (rc < 0) {
+		pr_err("Couldn't set otg current limit rc=%d\n", rc);
+		return rc;
+	}
+
+	chg->boost_threshold_ua = chip->dt.boost_threshold_ua;
+
+	rc = smblib_read(chg, APSD_RESULT_STATUS_REG, &stat);
+	if (rc < 0) {
+		pr_err("Couldn't read APSD_RESULT_STATUS rc=%d\n", rc);
+		return rc;
+	}
+
+	smblib_rerun_apsd_if_required(chg);
+
+	/* clear the ICL override if it is set */
+	if (smblib_icl_override(chg, false) < 0) {
+		pr_err("Couldn't disable ICL override rc=%d\n", rc);
+		return rc;
+	}
+
+	/* votes must be cast before configuring software control */
+	/* vote 0mA on usb_icl for non battery platforms */
+	vote(chg->usb_icl_votable,
+		DEFAULT_VOTER, chip->dt.no_battery, 0);
+	vote(chg->dc_suspend_votable,
+		DEFAULT_VOTER, chip->dt.no_battery, 0);
+	vote(chg->fcc_votable,
+		BATT_PROFILE_VOTER, true, chg->batt_profile_fcc_ua);
+	vote(chg->fv_votable,
+		BATT_PROFILE_VOTER, true, chg->batt_profile_fv_uv);
+	vote(chg->dc_icl_votable,
+		DEFAULT_VOTER, true, chip->dt.dc_icl_ua);
+	vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER,
+			true, 0);
+	vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
+			true, 0);
+	vote(chg->hvdcp_disable_votable_indirect, DEFAULT_VOTER,
+		chip->dt.hvdcp_disable, 0);
+	vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER,
+			true, 0);
+	vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
+			true, 0);
+	vote(chg->pd_disallowed_votable_indirect, MICRO_USB_VOTER,
+			chg->micro_usb_mode, 0);
+	vote(chg->hvdcp_enable_votable, MICRO_USB_VOTER,
+			chg->micro_usb_mode, 0);
+
+	/*
+	 * AICL configuration:
+	 * start from min and AICL ADC disable
+	 */
+	rc = smblib_masked_write(chg, USBIN_AICL_OPTIONS_CFG_REG,
+			USBIN_AICL_START_AT_MAX_BIT
+				| USBIN_AICL_ADC_EN_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure AICL rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Configure charge enable for software control; active high */
+	rc = smblib_masked_write(chg, CHGR_CFG2_REG,
+				 CHG_EN_POLARITY_BIT |
+				 CHG_EN_SRC_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure charger rc=%d\n", rc);
+		return rc;
+	}
+
+	/* enable the charging path */
+	rc = vote(chg->chg_disable_votable, DEFAULT_VOTER, false, 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't enable charging rc=%d\n", rc);
+		return rc;
+	}
+
+	if (chg->micro_usb_mode)
+		rc = smb2_disable_typec(chg);
+	else
+		rc = smb2_configure_typec(chg);
+	if (rc < 0) {
+		dev_err(chg->dev,
+			"Couldn't configure Type-C interrupts rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure VCONN for software control */
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 VCONN_EN_SRC_BIT | VCONN_EN_VALUE_BIT,
+				 VCONN_EN_SRC_BIT);
+	if (rc < 0) {
+		dev_err(chg->dev,
+			"Couldn't configure VCONN for SW control rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure VBUS for software control */
+	rc = smblib_masked_write(chg, OTG_CFG_REG, OTG_EN_SRC_CFG_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev,
+			"Couldn't configure VBUS for SW control rc=%d\n", rc);
+		return rc;
+	}
+
+	val = (ilog2(chip->dt.wd_bark_time / 16) << BARK_WDOG_TIMEOUT_SHIFT) &
+						BARK_WDOG_TIMEOUT_MASK;
+	val |= BITE_WDOG_TIMEOUT_8S;
+	rc = smblib_masked_write(chg, SNARL_BARK_BITE_WD_CFG_REG,
+			BITE_WDOG_DISABLE_CHARGING_CFG_BIT |
+			BARK_WDOG_TIMEOUT_MASK | BITE_WDOG_TIMEOUT_MASK,
+			val);
+	if (rc) {
+		pr_err("Couldn't configue WD config rc=%d\n", rc);
+		return rc;
+	}
+
+	/* enable WD BARK and enable it on plugin */
+	rc = smblib_masked_write(chg, WD_CFG_REG,
+			WATCHDOG_TRIGGER_AFP_EN_BIT |
+			WDOG_TIMER_EN_ON_PLUGIN_BIT |
+			BARK_WDOG_INT_EN_BIT,
+			WDOG_TIMER_EN_ON_PLUGIN_BIT |
+			BARK_WDOG_INT_EN_BIT);
+	if (rc) {
+		pr_err("Couldn't configue WD config rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure wipower watts */
+	rc = smb2_config_wipower_input_power(chip, chip->dt.wipower_max_uw);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure wipower rc=%d\n", rc);
+		return rc;
+	}
+
+	/* disable SW STAT override */
+	rc = smblib_masked_write(chg, STAT_CFG_REG,
+				 STAT_SW_OVERRIDE_CFG_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't disable SW STAT override rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	/* disable h/w autonomous parallel charging control */
+	rc = smblib_masked_write(chg, MISC_CFG_REG,
+				 STAT_PARALLEL_1400MA_EN_CFG_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev,
+			"Couldn't disable h/w autonomous parallel control rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	/*
+	 * allow DRP.DFP time to exceed by tPDdebounce time.
+	 */
+	rc = smblib_masked_write(chg, TAPER_TIMER_SEL_CFG_REG,
+				TYPEC_DRP_DFP_TIME_CFG_BIT,
+				TYPEC_DRP_DFP_TIME_CFG_BIT);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure DRP.DFP time rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	/* configure float charger options */
+	switch (chip->dt.float_option) {
+	case 1:
+		rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+				FLOAT_OPTIONS_MASK, 0);
+		break;
+	case 2:
+		rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+				FLOAT_OPTIONS_MASK, FORCE_FLOAT_SDP_CFG_BIT);
+		break;
+	case 3:
+		rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+				FLOAT_OPTIONS_MASK, FLOAT_DIS_CHGING_CFG_BIT);
+		break;
+	case 4:
+		rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+				FLOAT_OPTIONS_MASK, SUSPEND_FLOAT_CFG_BIT);
+		break;
+	default:
+		rc = 0;
+		break;
+	}
+
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure float charger options rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = smblib_read(chg, USBIN_OPTIONS_2_CFG_REG, &chg->float_cfg);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't read float charger options rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	switch (chip->dt.chg_inhibit_thr_mv) {
+	case 50:
+		rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
+				CHARGE_INHIBIT_THRESHOLD_MASK,
+				CHARGE_INHIBIT_THRESHOLD_50MV);
+		break;
+	case 100:
+		rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
+				CHARGE_INHIBIT_THRESHOLD_MASK,
+				CHARGE_INHIBIT_THRESHOLD_100MV);
+		break;
+	case 200:
+		rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
+				CHARGE_INHIBIT_THRESHOLD_MASK,
+				CHARGE_INHIBIT_THRESHOLD_200MV);
+		break;
+	case 300:
+		rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
+				CHARGE_INHIBIT_THRESHOLD_MASK,
+				CHARGE_INHIBIT_THRESHOLD_300MV);
+		break;
+	case 0:
+		rc = smblib_masked_write(chg, CHGR_CFG2_REG,
+				CHARGER_INHIBIT_BIT, 0);
+	default:
+		break;
+	}
+
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure charge inhibit threshold rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	if (chip->dt.auto_recharge_soc) {
+		rc = smblib_masked_write(chg, FG_UPDATE_CFG_2_SEL_REG,
+				SOC_LT_CHG_RECHARGE_THRESH_SEL_BIT |
+				VBT_LT_CHG_RECHARGE_THRESH_SEL_BIT,
+				VBT_LT_CHG_RECHARGE_THRESH_SEL_BIT);
+		if (rc < 0) {
+			dev_err(chg->dev, "Couldn't configure FG_UPDATE_CFG2_SEL_REG rc=%d\n",
+				rc);
+			return rc;
+		}
+	} else {
+		rc = smblib_masked_write(chg, FG_UPDATE_CFG_2_SEL_REG,
+				SOC_LT_CHG_RECHARGE_THRESH_SEL_BIT |
+				VBT_LT_CHG_RECHARGE_THRESH_SEL_BIT,
+				SOC_LT_CHG_RECHARGE_THRESH_SEL_BIT);
+		if (rc < 0) {
+			dev_err(chg->dev, "Couldn't configure FG_UPDATE_CFG2_SEL_REG rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	if (chg->sw_jeita_enabled) {
+		rc = smblib_disable_hw_jeita(chg, true);
+		if (rc < 0) {
+			dev_err(chg->dev, "Couldn't set hw jeita rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int smb2_post_init(struct smb2 *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc;
+
+	/* In case the usb path is suspended, we would have missed disabling
+	 * the icl change interrupt because the interrupt could have been
+	 * not requested
+	 */
+	rerun_election(chg->usb_icl_votable);
+
+	/* configure power role for dual-role */
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 TYPEC_POWER_ROLE_CMD_MASK, 0);
+	if (rc < 0) {
+		dev_err(chg->dev,
+			"Couldn't configure power role for DRP rc=%d\n", rc);
+		return rc;
+	}
+
+	rerun_election(chg->usb_irq_enable_votable);
+
+	return 0;
+}
+
+static int smb2_chg_config_init(struct smb2 *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	struct pmic_revid_data *pmic_rev_id;
+	struct device_node *revid_dev_node;
+
+	revid_dev_node = of_parse_phandle(chip->chg.dev->of_node,
+					  "qcom,pmic-revid", 0);
+	if (!revid_dev_node) {
+		pr_err("Missing qcom,pmic-revid property\n");
+		return -EINVAL;
+	}
+
+	pmic_rev_id = get_revid_data(revid_dev_node);
+	if (IS_ERR_OR_NULL(pmic_rev_id)) {
+		/*
+		 * the revid peripheral must be registered, any failure
+		 * here only indicates that the rev-id module has not
+		 * probed yet.
+		 */
+		return -EPROBE_DEFER;
+	}
+
+	switch (pmic_rev_id->pmic_subtype) {
+	case PMI8998_SUBTYPE:
+		chip->chg.smb_version = PMI8998_SUBTYPE;
+		chip->chg.wa_flags |= BOOST_BACK_WA | QC_AUTH_INTERRUPT_WA_BIT;
+		if (pmic_rev_id->rev4 == PMI8998_V1P1_REV4) /* PMI rev 1.1 */
+			chg->wa_flags |= QC_CHARGER_DETECTION_WA_BIT;
+		if (pmic_rev_id->rev4 == PMI8998_V2P0_REV4) /* PMI rev 2.0 */
+			chg->wa_flags |= TYPEC_CC2_REMOVAL_WA_BIT;
+		chg->chg_freq.freq_5V		= 600;
+		chg->chg_freq.freq_6V_8V	= 800;
+		chg->chg_freq.freq_9V		= 1000;
+		chg->chg_freq.freq_12V		= 1200;
+		chg->chg_freq.freq_removal	= 1000;
+		chg->chg_freq.freq_below_otg_threshold = 2000;
+		chg->chg_freq.freq_above_otg_threshold = 800;
+		break;
+	case PM660_SUBTYPE:
+		chip->chg.smb_version = PM660_SUBTYPE;
+		chip->chg.wa_flags |= BOOST_BACK_WA | OTG_WA | OV_IRQ_WA_BIT;
+		chg->param.freq_buck = pm660_params.freq_buck;
+		chg->param.freq_boost = pm660_params.freq_boost;
+		chg->chg_freq.freq_5V		= 650;
+		chg->chg_freq.freq_6V_8V	= 850;
+		chg->chg_freq.freq_9V		= 1050;
+		chg->chg_freq.freq_12V		= 1200;
+		chg->chg_freq.freq_removal	= 1050;
+		chg->chg_freq.freq_below_otg_threshold = 1600;
+		chg->chg_freq.freq_above_otg_threshold = 800;
+		break;
+	default:
+		pr_err("PMIC subtype %d not supported\n",
+				pmic_rev_id->pmic_subtype);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/****************************
+ * DETERMINE INITIAL STATUS *
+ ****************************/
+
+static int smb2_determine_initial_status(struct smb2 *chip)
+{
+	struct smb_irq_data irq_data = {chip, "determine-initial-status"};
+	struct smb_charger *chg = &chip->chg;
+
+	if (chg->bms_psy)
+		smblib_suspend_on_debug_battery(chg);
+	smblib_handle_usb_plugin(0, &irq_data);
+	smblib_handle_usb_typec_change(0, &irq_data);
+	smblib_handle_usb_source_change(0, &irq_data);
+	smblib_handle_chg_state_change(0, &irq_data);
+	smblib_handle_icl_change(0, &irq_data);
+	smblib_handle_batt_temp_changed(0, &irq_data);
+	smblib_handle_wdog_bark(0, &irq_data);
+
+	return 0;
+}
+
+/**************************
+ * INTERRUPT REGISTRATION *
+ **************************/
+
+static struct smb_irq_info smb2_irqs[] = {
+/* CHARGER IRQs */
+	[CHG_ERROR_IRQ] = {
+		.name		= "chg-error",
+		.handler	= smblib_handle_debug,
+	},
+	[CHG_STATE_CHANGE_IRQ] = {
+		.name		= "chg-state-change",
+		.handler	= smblib_handle_chg_state_change,
+		.wake		= true,
+	},
+	[STEP_CHG_STATE_CHANGE_IRQ] = {
+		.name		= "step-chg-state-change",
+		.handler	= NULL,
+	},
+	[STEP_CHG_SOC_UPDATE_FAIL_IRQ] = {
+		.name		= "step-chg-soc-update-fail",
+		.handler	= NULL,
+	},
+	[STEP_CHG_SOC_UPDATE_REQ_IRQ] = {
+		.name		= "step-chg-soc-update-request",
+		.handler	= NULL,
+	},
+/* OTG IRQs */
+	[OTG_FAIL_IRQ] = {
+		.name		= "otg-fail",
+		.handler	= smblib_handle_debug,
+	},
+	[OTG_OVERCURRENT_IRQ] = {
+		.name		= "otg-overcurrent",
+		.handler	= smblib_handle_otg_overcurrent,
+	},
+	[OTG_OC_DIS_SW_STS_IRQ] = {
+		.name		= "otg-oc-dis-sw-sts",
+		.handler	= smblib_handle_debug,
+	},
+	[TESTMODE_CHANGE_DET_IRQ] = {
+		.name		= "testmode-change-detect",
+		.handler	= smblib_handle_debug,
+	},
+/* BATTERY IRQs */
+	[BATT_TEMP_IRQ] = {
+		.name		= "bat-temp",
+		.handler	= smblib_handle_batt_temp_changed,
+		.wake		= true,
+	},
+	[BATT_OCP_IRQ] = {
+		.name		= "bat-ocp",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+	[BATT_OV_IRQ] = {
+		.name		= "bat-ov",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+	[BATT_LOW_IRQ] = {
+		.name		= "bat-low",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+	[BATT_THERM_ID_MISS_IRQ] = {
+		.name		= "bat-therm-or-id-missing",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+	[BATT_TERM_MISS_IRQ] = {
+		.name		= "bat-terminal-missing",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+/* USB INPUT IRQs */
+	[USBIN_COLLAPSE_IRQ] = {
+		.name		= "usbin-collapse",
+		.handler	= smblib_handle_debug,
+	},
+	[USBIN_LT_3P6V_IRQ] = {
+		.name		= "usbin-lt-3p6v",
+		.handler	= smblib_handle_debug,
+	},
+	[USBIN_UV_IRQ] = {
+		.name		= "usbin-uv",
+		.handler	= smblib_handle_usbin_uv,
+	},
+	[USBIN_OV_IRQ] = {
+		.name		= "usbin-ov",
+		.handler	= smblib_handle_debug,
+	},
+	[USBIN_PLUGIN_IRQ] = {
+		.name		= "usbin-plugin",
+		.handler	= smblib_handle_usb_plugin,
+		.wake		= true,
+	},
+	[USBIN_SRC_CHANGE_IRQ] = {
+		.name		= "usbin-src-change",
+		.handler	= smblib_handle_usb_source_change,
+		.wake		= true,
+	},
+	[USBIN_ICL_CHANGE_IRQ] = {
+		.name		= "usbin-icl-change",
+		.handler	= smblib_handle_icl_change,
+		.wake		= true,
+	},
+	[TYPE_C_CHANGE_IRQ] = {
+		.name		= "type-c-change",
+		.handler	= smblib_handle_usb_typec_change,
+		.wake		= true,
+	},
+/* DC INPUT IRQs */
+	[DCIN_COLLAPSE_IRQ] = {
+		.name		= "dcin-collapse",
+		.handler	= smblib_handle_debug,
+	},
+	[DCIN_LT_3P6V_IRQ] = {
+		.name		= "dcin-lt-3p6v",
+		.handler	= smblib_handle_debug,
+	},
+	[DCIN_UV_IRQ] = {
+		.name		= "dcin-uv",
+		.handler	= smblib_handle_debug,
+	},
+	[DCIN_OV_IRQ] = {
+		.name		= "dcin-ov",
+		.handler	= smblib_handle_debug,
+	},
+	[DCIN_PLUGIN_IRQ] = {
+		.name		= "dcin-plugin",
+		.handler	= smblib_handle_dc_plugin,
+		.wake		= true,
+	},
+	[DIV2_EN_DG_IRQ] = {
+		.name		= "div2-en-dg",
+		.handler	= smblib_handle_debug,
+	},
+	[DCIN_ICL_CHANGE_IRQ] = {
+		.name		= "dcin-icl-change",
+		.handler	= smblib_handle_debug,
+	},
+/* MISCELLANEOUS IRQs */
+	[WDOG_SNARL_IRQ] = {
+		.name		= "wdog-snarl",
+		.handler	= NULL,
+	},
+	[WDOG_BARK_IRQ] = {
+		.name		= "wdog-bark",
+		.handler	= smblib_handle_wdog_bark,
+		.wake		= true,
+	},
+	[AICL_FAIL_IRQ] = {
+		.name		= "aicl-fail",
+		.handler	= smblib_handle_debug,
+	},
+	[AICL_DONE_IRQ] = {
+		.name		= "aicl-done",
+		.handler	= smblib_handle_debug,
+	},
+	[HIGH_DUTY_CYCLE_IRQ] = {
+		.name		= "high-duty-cycle",
+		.handler	= smblib_handle_high_duty_cycle,
+		.wake		= true,
+	},
+	[INPUT_CURRENT_LIMIT_IRQ] = {
+		.name		= "input-current-limiting",
+		.handler	= smblib_handle_debug,
+	},
+	[TEMPERATURE_CHANGE_IRQ] = {
+		.name		= "temperature-change",
+		.handler	= smblib_handle_debug,
+	},
+	[SWITCH_POWER_OK_IRQ] = {
+		.name		= "switcher-power-ok",
+		.handler	= smblib_handle_switcher_power_ok,
+		.storm_data	= {true, 1000, 8},
+	},
+};
+
+static int smb2_get_irq_index_byname(const char *irq_name)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(smb2_irqs); i++) {
+		if (strcmp(smb2_irqs[i].name, irq_name) == 0)
+			return i;
+	}
+
+	return -ENOENT;
+}
+
+static int smb2_request_interrupt(struct smb2 *chip,
+				struct device_node *node, const char *irq_name)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc, irq, irq_index;
+	struct smb_irq_data *irq_data;
+
+	irq = of_irq_get_byname(node, irq_name);
+	if (irq < 0) {
+		pr_err("Couldn't get irq %s byname\n", irq_name);
+		return irq;
+	}
+
+	irq_index = smb2_get_irq_index_byname(irq_name);
+	if (irq_index < 0) {
+		pr_err("%s is not a defined irq\n", irq_name);
+		return irq_index;
+	}
+
+	if (!smb2_irqs[irq_index].handler)
+		return 0;
+
+	irq_data = devm_kzalloc(chg->dev, sizeof(*irq_data), GFP_KERNEL);
+	if (!irq_data)
+		return -ENOMEM;
+
+	irq_data->parent_data = chip;
+	irq_data->name = irq_name;
+	irq_data->storm_data = smb2_irqs[irq_index].storm_data;
+	mutex_init(&irq_data->storm_data.storm_lock);
+
+	rc = devm_request_threaded_irq(chg->dev, irq, NULL,
+					smb2_irqs[irq_index].handler,
+					IRQF_ONESHOT, irq_name, irq_data);
+	if (rc < 0) {
+		pr_err("Couldn't request irq %d\n", irq);
+		return rc;
+	}
+
+	smb2_irqs[irq_index].irq = irq;
+	smb2_irqs[irq_index].irq_data = irq_data;
+	if (smb2_irqs[irq_index].wake)
+		enable_irq_wake(irq);
+
+	return rc;
+}
+
+static int smb2_request_interrupts(struct smb2 *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	struct device_node *node = chg->dev->of_node;
+	struct device_node *child;
+	int rc = 0;
+	const char *name;
+	struct property *prop;
+
+	for_each_available_child_of_node(node, child) {
+		of_property_for_each_string(child, "interrupt-names",
+					    prop, name) {
+			rc = smb2_request_interrupt(chip, child, name);
+			if (rc < 0)
+				return rc;
+		}
+	}
+	if (chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq)
+		chg->usb_icl_change_irq_enabled = true;
+
+	return rc;
+}
+
+static void smb2_free_interrupts(struct smb_charger *chg)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(smb2_irqs); i++) {
+		if (smb2_irqs[i].irq > 0) {
+			if (smb2_irqs[i].wake)
+				disable_irq_wake(smb2_irqs[i].irq);
+
+			devm_free_irq(chg->dev, smb2_irqs[i].irq,
+					smb2_irqs[i].irq_data);
+		}
+	}
+}
+
+static void smb2_disable_interrupts(struct smb_charger *chg)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(smb2_irqs); i++) {
+		if (smb2_irqs[i].irq > 0)
+			disable_irq(smb2_irqs[i].irq);
+	}
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int force_batt_psy_update_write(void *data, u64 val)
+{
+	struct smb_charger *chg = data;
+
+	power_supply_changed(chg->batt_psy);
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(force_batt_psy_update_ops, NULL,
+			force_batt_psy_update_write, "0x%02llx\n");
+
+static int force_usb_psy_update_write(void *data, u64 val)
+{
+	struct smb_charger *chg = data;
+
+	power_supply_changed(chg->usb_psy);
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(force_usb_psy_update_ops, NULL,
+			force_usb_psy_update_write, "0x%02llx\n");
+
+static int force_dc_psy_update_write(void *data, u64 val)
+{
+	struct smb_charger *chg = data;
+
+	power_supply_changed(chg->dc_psy);
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(force_dc_psy_update_ops, NULL,
+			force_dc_psy_update_write, "0x%02llx\n");
+
+static void smb2_create_debugfs(struct smb2 *chip)
+{
+	struct dentry *file;
+
+	chip->dfs_root = debugfs_create_dir("charger", NULL);
+	if (IS_ERR_OR_NULL(chip->dfs_root)) {
+		pr_err("Couldn't create charger debugfs rc=%ld\n",
+			(long)chip->dfs_root);
+		return;
+	}
+
+	file = debugfs_create_file("force_batt_psy_update", S_IRUSR | S_IWUSR,
+			    chip->dfs_root, chip, &force_batt_psy_update_ops);
+	if (IS_ERR_OR_NULL(file))
+		pr_err("Couldn't create force_batt_psy_update file rc=%ld\n",
+			(long)file);
+
+	file = debugfs_create_file("force_usb_psy_update", S_IRUSR | S_IWUSR,
+			    chip->dfs_root, chip, &force_usb_psy_update_ops);
+	if (IS_ERR_OR_NULL(file))
+		pr_err("Couldn't create force_usb_psy_update file rc=%ld\n",
+			(long)file);
+
+	file = debugfs_create_file("force_dc_psy_update", S_IRUSR | S_IWUSR,
+			    chip->dfs_root, chip, &force_dc_psy_update_ops);
+	if (IS_ERR_OR_NULL(file))
+		pr_err("Couldn't create force_dc_psy_update file rc=%ld\n",
+			(long)file);
+}
+
+#else
+
+static void smb2_create_debugfs(struct smb2 *chip)
+{}
+
+#endif
+
+static int smb2_probe(struct platform_device *pdev)
+{
+	struct smb2 *chip;
+	struct smb_charger *chg;
+	int rc = 0;
+	union power_supply_propval val;
+	int usb_present, batt_present, batt_health, batt_charge_type;
+
+	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chg = &chip->chg;
+	chg->dev = &pdev->dev;
+	chg->param = v1_params;
+	chg->debug_mask = &__debug_mask;
+	chg->try_sink_enabled = &__try_sink_enabled;
+	chg->weak_chg_icl_ua = &__weak_chg_icl_ua;
+	chg->mode = PARALLEL_MASTER;
+	chg->irq_info = smb2_irqs;
+	chg->name = "PMI";
+
+	chg->regmap = dev_get_regmap(chg->dev->parent, NULL);
+	if (!chg->regmap) {
+		pr_err("parent regmap is missing\n");
+		return -EINVAL;
+	}
+
+	rc = smb2_chg_config_init(chip);
+	if (rc < 0) {
+		if (rc != -EPROBE_DEFER)
+			pr_err("Couldn't setup chg_config rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = smb2_parse_dt(chip);
+	if (rc < 0) {
+		pr_err("Couldn't parse device tree rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smblib_init(chg);
+	if (rc < 0) {
+		pr_err("Smblib_init failed rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	/* set driver data before resources request it */
+	platform_set_drvdata(pdev, chip);
+
+	rc = smb2_init_vbus_regulator(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize vbus regulator rc=%d\n",
+			rc);
+		goto cleanup;
+	}
+
+	rc = smb2_init_vconn_regulator(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize vconn regulator rc=%d\n",
+				rc);
+		goto cleanup;
+	}
+
+	/* extcon registration */
+	chg->extcon = devm_extcon_dev_allocate(chg->dev, smblib_extcon_cable);
+	if (IS_ERR(chg->extcon)) {
+		rc = PTR_ERR(chg->extcon);
+		dev_err(chg->dev, "failed to allocate extcon device rc=%d\n",
+				rc);
+		goto cleanup;
+	}
+
+	rc = devm_extcon_dev_register(chg->dev, chg->extcon);
+	if (rc < 0) {
+		dev_err(chg->dev, "failed to register extcon device rc=%d\n",
+				rc);
+		goto cleanup;
+	}
+
+	rc = smb2_init_hw(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize hardware rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb2_init_dc_psy(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize dc psy rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb2_init_usb_psy(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize usb psy rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb2_init_usb_main_psy(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize usb main psy rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb2_init_usb_port_psy(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize usb pc_port psy rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb2_init_batt_psy(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize batt psy rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb2_determine_initial_status(chip);
+	if (rc < 0) {
+		pr_err("Couldn't determine initial status rc=%d\n",
+			rc);
+		goto cleanup;
+	}
+
+	rc = smb2_request_interrupts(chip);
+	if (rc < 0) {
+		pr_err("Couldn't request interrupts rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb2_post_init(chip);
+	if (rc < 0) {
+		pr_err("Failed in post init rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	smb2_create_debugfs(chip);
+
+	rc = smblib_get_prop_usb_present(chg, &val);
+	if (rc < 0) {
+		pr_err("Couldn't get usb present rc=%d\n", rc);
+		goto cleanup;
+	}
+	usb_present = val.intval;
+
+	rc = smblib_get_prop_batt_present(chg, &val);
+	if (rc < 0) {
+		pr_err("Couldn't get batt present rc=%d\n", rc);
+		goto cleanup;
+	}
+	batt_present = val.intval;
+
+	rc = smblib_get_prop_batt_health(chg, &val);
+	if (rc < 0) {
+		pr_err("Couldn't get batt health rc=%d\n", rc);
+		val.intval = POWER_SUPPLY_HEALTH_UNKNOWN;
+	}
+	batt_health = val.intval;
+
+	rc = smblib_get_prop_batt_charge_type(chg, &val);
+	if (rc < 0) {
+		pr_err("Couldn't get batt charge type rc=%d\n", rc);
+		goto cleanup;
+	}
+	batt_charge_type = val.intval;
+
+	device_init_wakeup(chg->dev, true);
+
+	pr_info("QPNP SMB2 probed successfully usb:present=%d type=%d batt:present = %d health = %d charge = %d\n",
+		usb_present, chg->real_charger_type,
+		batt_present, batt_health, batt_charge_type);
+	return rc;
+
+cleanup:
+	smb2_free_interrupts(chg);
+	if (chg->batt_psy)
+		power_supply_unregister(chg->batt_psy);
+	if (chg->usb_main_psy)
+		power_supply_unregister(chg->usb_main_psy);
+	if (chg->usb_psy)
+		power_supply_unregister(chg->usb_psy);
+	if (chg->usb_port_psy)
+		power_supply_unregister(chg->usb_port_psy);
+	if (chg->dc_psy)
+		power_supply_unregister(chg->dc_psy);
+	if (chg->vconn_vreg && chg->vconn_vreg->rdev)
+		devm_regulator_unregister(chg->dev, chg->vconn_vreg->rdev);
+	if (chg->vbus_vreg && chg->vbus_vreg->rdev)
+		devm_regulator_unregister(chg->dev, chg->vbus_vreg->rdev);
+
+	smblib_deinit(chg);
+
+	platform_set_drvdata(pdev, NULL);
+	return rc;
+}
+
+static int smb2_remove(struct platform_device *pdev)
+{
+	struct smb2 *chip = platform_get_drvdata(pdev);
+	struct smb_charger *chg = &chip->chg;
+
+	power_supply_unregister(chg->batt_psy);
+	power_supply_unregister(chg->usb_psy);
+	power_supply_unregister(chg->usb_port_psy);
+	regulator_unregister(chg->vconn_vreg->rdev);
+	regulator_unregister(chg->vbus_vreg->rdev);
+
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static void smb2_shutdown(struct platform_device *pdev)
+{
+	struct smb2 *chip = platform_get_drvdata(pdev);
+	struct smb_charger *chg = &chip->chg;
+
+	/* disable all interrupts */
+	smb2_disable_interrupts(chg);
+
+	/* configure power role for UFP */
+	smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				TYPEC_POWER_ROLE_CMD_MASK, UFP_EN_CMD_BIT);
+
+	/* force HVDCP to 5V */
+	smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+				HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT, 0);
+	smblib_write(chg, CMD_HVDCP_2_REG, FORCE_5V_BIT);
+
+	/* force enable APSD */
+	smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+				 AUTO_SRC_DETECT_BIT, AUTO_SRC_DETECT_BIT);
+}
+
+static const struct of_device_id match_table[] = {
+	{ .compatible = "qcom,qpnp-smb2", },
+	{ },
+};
+
+static struct platform_driver smb2_driver = {
+	.driver		= {
+		.name		= "qcom,qpnp-smb2",
+		.owner		= THIS_MODULE,
+		.of_match_table	= match_table,
+	},
+	.probe		= smb2_probe,
+	.remove		= smb2_remove,
+	.shutdown	= smb2_shutdown,
+};
+module_platform_driver(smb2_driver);
+
+MODULE_DESCRIPTION("QPNP SMB2 Charger Driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/drivers/power/supply./qcom/smb138x-charger.c linux-4.4.115-fbx/drivers/power/supply/qcom/smb138x-charger.c
--- linux-4.4.115-fbx/drivers/power/supply./qcom/smb138x-charger.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/supply/qcom/smb138x-charger.c	2019-01-22 16:16:26.239271183 +0100
@@ -0,0 +1,1655 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "SMB138X: %s: " fmt, __func__
+
+#include <linux/device.h>
+#include <linux/iio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include "smb-reg.h"
+#include "smb-lib.h"
+#include "storm-watch.h"
+#include <linux/pmic-voter.h>
+
+#define SMB138X_DEFAULT_FCC_UA 1000000
+#define SMB138X_DEFAULT_ICL_UA 1500000
+
+/* Registers that are not common to be mentioned in smb-reg.h */
+#define SMB2CHG_MISC_ENG_SDCDC_CFG2	(MISC_BASE + 0xC1)
+#define ENG_SDCDC_SEL_OOB_VTH_BIT	BIT(0)
+
+#define SMB2CHG_MISC_ENG_SDCDC_CFG6	(MISC_BASE + 0xC5)
+#define DEAD_TIME_MASK			GENMASK(7, 4)
+#define HIGH_DEAD_TIME_MASK		GENMASK(7, 4)
+
+#define SMB2CHG_DC_TM_SREFGEN		(DCIN_BASE + 0xE2)
+#define STACKED_DIODE_EN_BIT		BIT(2)
+
+#define TDIE_AVG_COUNT	10
+#define MAX_SPEED_READING_TIMES		5
+
+enum {
+	OOB_COMP_WA_BIT = BIT(0),
+};
+
+static struct smb_params v1_params = {
+	.fcc		= {
+		.name	= "fast charge current",
+		.reg	= FAST_CHARGE_CURRENT_CFG_REG,
+		.min_u	= 0,
+		.max_u	= 6000000,
+		.step_u	= 25000,
+	},
+	.fv		= {
+		.name	= "float voltage",
+		.reg	= FLOAT_VOLTAGE_CFG_REG,
+		.min_u	= 2450000,
+		.max_u	= 4950000,
+		.step_u	= 10000,
+	},
+	.usb_icl	= {
+		.name	= "usb input current limit",
+		.reg	= USBIN_CURRENT_LIMIT_CFG_REG,
+		.min_u	= 0,
+		.max_u	= 6000000,
+		.step_u	= 25000,
+	},
+	.dc_icl		= {
+		.name	= "dc input current limit",
+		.reg	= DCIN_CURRENT_LIMIT_CFG_REG,
+		.min_u	= 0,
+		.max_u	= 6000000,
+		.step_u	= 25000,
+	},
+	.freq_buck	= {
+		.name	= "buck switching frequency",
+		.reg	= CFG_BUCKBOOST_FREQ_SELECT_BUCK_REG,
+		.min_u	= 500,
+		.max_u	= 2000,
+		.step_u	= 100,
+	},
+};
+
+struct smb_dt_props {
+	bool	suspend_input;
+	int	fcc_ua;
+	int	usb_icl_ua;
+	int	dc_icl_ua;
+	int	chg_temp_max_mdegc;
+	int	connector_temp_max_mdegc;
+	int	pl_mode;
+};
+
+struct smb138x {
+	struct smb_charger	chg;
+	struct smb_dt_props	dt;
+	struct power_supply	*parallel_psy;
+	u32			wa_flags;
+};
+
+static int __debug_mask;
+module_param_named(
+	debug_mask, __debug_mask, int, S_IRUSR | S_IWUSR
+);
+
+static irqreturn_t smb138x_handle_slave_chg_state_change(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb138x *chip = irq_data->parent_data;
+
+	if (chip->parallel_psy)
+		power_supply_changed(chip->parallel_psy);
+
+	return IRQ_HANDLED;
+}
+
+static int smb138x_get_prop_charger_temp(struct smb138x *chip,
+				 union power_supply_propval *val)
+{
+	union power_supply_propval pval;
+	int rc = 0, avg = 0, i;
+	struct smb_charger *chg = &chip->chg;
+	int die_avg_count;
+
+	if (chg->temp_speed_reading_count < MAX_SPEED_READING_TIMES) {
+		chg->temp_speed_reading_count++;
+		die_avg_count = 1;
+	} else {
+		die_avg_count = TDIE_AVG_COUNT;
+	}
+
+	for (i = 0; i < die_avg_count; i++) {
+		pval.intval = 0;
+		rc = smblib_get_prop_charger_temp(chg, &pval);
+		if (rc < 0) {
+			pr_err("Couldnt read chg temp at %dth iteration rc = %d\n",
+					i + 1, rc);
+			return rc;
+		}
+		avg += pval.intval;
+	}
+	val->intval = avg / die_avg_count;
+	return rc;
+}
+
+static int smb138x_parse_dt(struct smb138x *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	struct device_node *node = chg->dev->of_node;
+	int rc;
+
+	if (!node) {
+		pr_err("device tree node missing\n");
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32(node,
+				"qcom,parallel-mode", &chip->dt.pl_mode);
+	if (rc < 0)
+		chip->dt.pl_mode = POWER_SUPPLY_PL_USBMID_USBMID;
+
+	chip->dt.suspend_input = of_property_read_bool(node,
+				"qcom,suspend-input");
+
+	rc = of_property_read_u32(node,
+				"qcom,fcc-max-ua", &chip->dt.fcc_ua);
+	if (rc < 0)
+		chip->dt.fcc_ua = SMB138X_DEFAULT_FCC_UA;
+
+	rc = of_property_read_u32(node,
+				"qcom,usb-icl-ua", &chip->dt.usb_icl_ua);
+	if (rc < 0)
+		chip->dt.usb_icl_ua = SMB138X_DEFAULT_ICL_UA;
+
+	rc = of_property_read_u32(node,
+				"qcom,dc-icl-ua", &chip->dt.dc_icl_ua);
+	if (rc < 0)
+		chip->dt.dc_icl_ua = SMB138X_DEFAULT_ICL_UA;
+
+	rc = of_property_read_u32(node,
+				"qcom,charger-temp-max-mdegc",
+				&chip->dt.chg_temp_max_mdegc);
+	if (rc < 0)
+		chip->dt.chg_temp_max_mdegc = 80000;
+
+	rc = of_property_read_u32(node,
+				"qcom,connector-temp-max-mdegc",
+				&chip->dt.connector_temp_max_mdegc);
+	if (rc < 0)
+		chip->dt.connector_temp_max_mdegc = 105000;
+
+	return 0;
+}
+
+/************************
+ * USB PSY REGISTRATION *
+ ************************/
+
+static enum power_supply_property smb138x_usb_props[] = {
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_ONLINE,
+	POWER_SUPPLY_PROP_VOLTAGE_MIN,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_CURRENT_MAX,
+	POWER_SUPPLY_PROP_TYPE,
+	POWER_SUPPLY_PROP_TYPEC_MODE,
+	POWER_SUPPLY_PROP_TYPEC_POWER_ROLE,
+	POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION,
+	POWER_SUPPLY_PROP_SDP_CURRENT_MAX,
+};
+
+static int smb138x_usb_get_prop(struct power_supply *psy,
+				enum power_supply_property prop,
+				union power_supply_propval *val)
+{
+	struct smb138x *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_PRESENT:
+		rc = smblib_get_prop_usb_present(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+		rc = smblib_get_prop_usb_online(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+		val->intval = chg->voltage_min_uv;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		val->intval = chg->voltage_max_uv;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+		rc = smblib_get_prop_usb_voltage_now(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		val->intval = get_effective_result(chg->usb_icl_votable);
+		break;
+	case POWER_SUPPLY_PROP_TYPE:
+		val->intval = chg->usb_psy_desc.type;
+		break;
+	case POWER_SUPPLY_PROP_TYPEC_MODE:
+		val->intval = chg->typec_mode;
+		break;
+	case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
+		rc = smblib_get_prop_typec_power_role(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION:
+		rc = smblib_get_prop_typec_cc_orientation(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_SDP_CURRENT_MAX:
+		val->intval = get_client_vote(chg->usb_icl_votable,
+					      USB_PSY_VOTER);
+		break;
+	default:
+		pr_err("get prop %d is not supported\n", prop);
+		return -EINVAL;
+	}
+
+	if (rc < 0) {
+		pr_debug("Couldn't get prop %d rc = %d\n", prop, rc);
+		return -ENODATA;
+	}
+
+	return rc;
+}
+
+static int smb138x_usb_set_prop(struct power_supply *psy,
+				enum power_supply_property prop,
+				const union power_supply_propval *val)
+{
+	struct smb138x *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
+		rc = smblib_set_prop_typec_power_role(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_SDP_CURRENT_MAX:
+		rc = smblib_set_prop_sdp_current_max(chg, val);
+		break;
+	default:
+		pr_err("set prop %d is not supported\n", prop);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int smb138x_usb_prop_is_writeable(struct power_supply *psy,
+					 enum power_supply_property prop)
+{
+	return 0;
+}
+
+static int smb138x_init_usb_psy(struct smb138x *chip)
+{
+	struct power_supply_config usb_cfg = {};
+	struct smb_charger *chg = &chip->chg;
+
+	chg->usb_psy_desc.name = "usb";
+	chg->usb_psy_desc.type = POWER_SUPPLY_TYPE_UNKNOWN;
+	chg->usb_psy_desc.properties = smb138x_usb_props;
+	chg->usb_psy_desc.num_properties = ARRAY_SIZE(smb138x_usb_props);
+	chg->usb_psy_desc.get_property = smb138x_usb_get_prop;
+	chg->usb_psy_desc.set_property = smb138x_usb_set_prop;
+	chg->usb_psy_desc.property_is_writeable = smb138x_usb_prop_is_writeable;
+
+	usb_cfg.drv_data = chip;
+	usb_cfg.of_node = chg->dev->of_node;
+	chg->usb_psy = devm_power_supply_register(chg->dev,
+						  &chg->usb_psy_desc,
+						  &usb_cfg);
+	if (IS_ERR(chg->usb_psy)) {
+		pr_err("Couldn't register USB power supply\n");
+		return PTR_ERR(chg->usb_psy);
+	}
+
+	return 0;
+}
+
+/*************************
+ * BATT PSY REGISTRATION *
+ *************************/
+
+static enum power_supply_property smb138x_batt_props[] = {
+	POWER_SUPPLY_PROP_INPUT_SUSPEND,
+	POWER_SUPPLY_PROP_STATUS,
+	POWER_SUPPLY_PROP_HEALTH,
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_CHARGE_TYPE,
+	POWER_SUPPLY_PROP_CAPACITY,
+	POWER_SUPPLY_PROP_CHARGER_TEMP,
+	POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
+	POWER_SUPPLY_PROP_SET_SHIP_MODE,
+};
+
+static int smb138x_batt_get_prop(struct power_supply *psy,
+				 enum power_supply_property prop,
+				 union power_supply_propval *val)
+{
+	struct smb138x *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_STATUS:
+		rc = smblib_get_prop_batt_status(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_HEALTH:
+		rc = smblib_get_prop_batt_health(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PRESENT:
+		rc = smblib_get_prop_batt_present(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+		rc = smblib_get_prop_input_suspend(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_TYPE:
+		rc = smblib_get_prop_batt_charge_type(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		rc = smblib_get_prop_batt_capacity(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGER_TEMP:
+		rc = smb138x_get_prop_charger_temp(chip, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
+		rc = smblib_get_prop_charger_temp_max(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+		/* Not in ship mode as long as device is active */
+		val->intval = 0;
+		break;
+	default:
+		pr_err("batt power supply get prop %d not supported\n", prop);
+		return -EINVAL;
+	}
+
+	if (rc < 0) {
+		pr_debug("Couldn't get prop %d rc = %d\n", prop, rc);
+		return -ENODATA;
+	}
+
+	return rc;
+}
+
+static int smb138x_batt_set_prop(struct power_supply *psy,
+				 enum power_supply_property prop,
+				 const union power_supply_propval *val)
+{
+	struct smb138x *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+		rc = smblib_set_prop_input_suspend(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		rc = smblib_set_prop_batt_capacity(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+		/* Not in ship mode as long as the device is active */
+		if (!val->intval)
+			break;
+		rc = smblib_set_prop_ship_mode(chg, val);
+		break;
+	default:
+		pr_err("batt power supply set prop %d not supported\n", prop);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int smb138x_batt_prop_is_writeable(struct power_supply *psy,
+					  enum power_supply_property prop)
+{
+	switch (prop) {
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+	case POWER_SUPPLY_PROP_CAPACITY:
+		return 1;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static const struct power_supply_desc batt_psy_desc = {
+	.name			= "battery",
+	.type			= POWER_SUPPLY_TYPE_BATTERY,
+	.properties		= smb138x_batt_props,
+	.num_properties		= ARRAY_SIZE(smb138x_batt_props),
+	.get_property		= smb138x_batt_get_prop,
+	.set_property		= smb138x_batt_set_prop,
+	.property_is_writeable	= smb138x_batt_prop_is_writeable,
+};
+
+static int smb138x_init_batt_psy(struct smb138x *chip)
+{
+	struct power_supply_config batt_cfg = {};
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	batt_cfg.drv_data = chip;
+	batt_cfg.of_node = chg->dev->of_node;
+	chg->batt_psy = devm_power_supply_register(chg->dev,
+						   &batt_psy_desc,
+						   &batt_cfg);
+	if (IS_ERR(chg->batt_psy)) {
+		pr_err("Couldn't register battery power supply\n");
+		return PTR_ERR(chg->batt_psy);
+	}
+
+	return rc;
+}
+
+/*****************************
+ * PARALLEL PSY REGISTRATION *
+ *****************************/
+
+static int smb138x_get_prop_connector_health(struct smb138x *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc, lb_mdegc, ub_mdegc, rst_mdegc, connector_mdegc;
+
+	if (!chg->iio.connector_temp_chan ||
+		PTR_ERR(chg->iio.connector_temp_chan) == -EPROBE_DEFER)
+		chg->iio.connector_temp_chan = iio_channel_get(chg->dev,
+							"connector_temp");
+
+	if (IS_ERR(chg->iio.connector_temp_chan))
+		return POWER_SUPPLY_HEALTH_UNKNOWN;
+
+	rc = iio_read_channel_processed(chg->iio.connector_temp_thr1_chan,
+							&lb_mdegc);
+	if (rc < 0) {
+		pr_err("Couldn't read connector lower bound rc=%d\n", rc);
+		return POWER_SUPPLY_HEALTH_UNKNOWN;
+	}
+
+	rc = iio_read_channel_processed(chg->iio.connector_temp_thr2_chan,
+							&ub_mdegc);
+	if (rc < 0) {
+		pr_err("Couldn't read connector upper bound rc=%d\n", rc);
+		return POWER_SUPPLY_HEALTH_UNKNOWN;
+	}
+
+	rc = iio_read_channel_processed(chg->iio.connector_temp_thr3_chan,
+							&rst_mdegc);
+	if (rc < 0) {
+		pr_err("Couldn't read connector reset bound rc=%d\n", rc);
+		return POWER_SUPPLY_HEALTH_UNKNOWN;
+	}
+
+	rc = iio_read_channel_processed(chg->iio.connector_temp_chan,
+							&connector_mdegc);
+	if (rc < 0) {
+		pr_err("Couldn't read connector temperature rc=%d\n", rc);
+		return POWER_SUPPLY_HEALTH_UNKNOWN;
+	}
+
+	if (connector_mdegc < lb_mdegc)
+		return POWER_SUPPLY_HEALTH_COOL;
+	else if (connector_mdegc < ub_mdegc)
+		return POWER_SUPPLY_HEALTH_WARM;
+	else if (connector_mdegc < rst_mdegc)
+		return POWER_SUPPLY_HEALTH_HOT;
+
+	return POWER_SUPPLY_HEALTH_OVERHEAT;
+}
+
+static enum power_supply_property smb138x_parallel_props[] = {
+	POWER_SUPPLY_PROP_CHARGE_TYPE,
+	POWER_SUPPLY_PROP_CHARGING_ENABLED,
+	POWER_SUPPLY_PROP_PIN_ENABLED,
+	POWER_SUPPLY_PROP_INPUT_SUSPEND,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+	POWER_SUPPLY_PROP_CURRENT_MAX,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+	POWER_SUPPLY_PROP_CURRENT_NOW,
+	POWER_SUPPLY_PROP_CHARGER_TEMP,
+	POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
+	POWER_SUPPLY_PROP_MODEL_NAME,
+	POWER_SUPPLY_PROP_PARALLEL_MODE,
+	POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
+	POWER_SUPPLY_PROP_SET_SHIP_MODE,
+};
+
+static int smb138x_parallel_get_prop(struct power_supply *psy,
+				     enum power_supply_property prop,
+				     union power_supply_propval *val)
+{
+	struct smb138x *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+	u8 temp;
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_CHARGE_TYPE:
+		rc = smblib_get_prop_batt_charge_type(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+		rc = smblib_read(chg, BATTERY_CHARGER_STATUS_5_REG,
+				 &temp);
+		if (rc >= 0)
+			val->intval = (bool)(temp & CHARGING_ENABLE_BIT);
+		break;
+	case POWER_SUPPLY_PROP_PIN_ENABLED:
+		rc = smblib_read(chg, BATTERY_CHARGER_STATUS_5_REG,
+				 &temp);
+		if (rc >= 0)
+			val->intval = !(temp & DISABLE_CHARGING_BIT);
+		break;
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+		rc = smblib_get_usb_suspend(chg, &val->intval);
+		break;
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
+		if ((chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+		|| (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
+			rc = smblib_get_prop_input_current_limited(chg, val);
+		else
+			val->intval = 0;
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		if ((chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+		|| (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
+			rc = smblib_get_charge_param(chg, &chg->param.usb_icl,
+				&val->intval);
+		else
+			val->intval = 0;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		rc = smblib_get_charge_param(chg, &chg->param.fv, &val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		rc = smblib_get_charge_param(chg, &chg->param.fcc,
+					     &val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_NOW:
+		rc = smblib_get_prop_slave_current_now(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGER_TEMP:
+		rc = smb138x_get_prop_charger_temp(chip, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
+		rc = smblib_get_prop_charger_temp_max(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_MODEL_NAME:
+		val->strval = "smb138x";
+		break;
+	case POWER_SUPPLY_PROP_PARALLEL_MODE:
+		val->intval = chip->dt.pl_mode;
+		break;
+	case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
+		val->intval = smb138x_get_prop_connector_health(chip);
+		break;
+	case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+		/* Not in ship mode as long as device is active */
+		val->intval = 0;
+		break;
+	default:
+		pr_err("parallel power supply get prop %d not supported\n",
+			prop);
+		return -EINVAL;
+	}
+
+	if (rc < 0) {
+		pr_debug("Couldn't get prop %d rc = %d\n", prop, rc);
+		return -ENODATA;
+	}
+
+	return rc;
+}
+
+static int smb138x_set_parallel_suspend(struct smb138x *chip, bool suspend)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	rc = smblib_masked_write(chg, WD_CFG_REG, WDOG_TIMER_EN_BIT,
+				 suspend ? 0 : WDOG_TIMER_EN_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't %s watchdog rc=%d\n",
+		       suspend ? "disable" : "enable", rc);
+		suspend = true;
+	}
+
+	rc = smblib_masked_write(chg, USBIN_CMD_IL_REG, USBIN_SUSPEND_BIT,
+				 suspend ? USBIN_SUSPEND_BIT : 0);
+	if (rc < 0) {
+		pr_err("Couldn't %s parallel charger rc=%d\n",
+		       suspend ? "suspend" : "resume", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int smb138x_parallel_set_prop(struct power_supply *psy,
+				     enum power_supply_property prop,
+				     const union power_supply_propval *val)
+{
+	struct smb138x *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+		rc = smb138x_set_parallel_suspend(chip, (bool)val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		if ((chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+		|| (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
+			rc = smblib_set_charge_param(chg, &chg->param.usb_icl,
+				val->intval);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		rc = smblib_set_charge_param(chg, &chg->param.fv, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		rc = smblib_set_charge_param(chg, &chg->param.fcc, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+		/* Not in ship mode as long as the device is active */
+		if (!val->intval)
+			break;
+		rc = smblib_set_prop_ship_mode(chg, val);
+		break;
+	default:
+		pr_debug("parallel power supply set prop %d not supported\n",
+			prop);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int smb138x_parallel_prop_is_writeable(struct power_supply *psy,
+					      enum power_supply_property prop)
+{
+	return 0;
+}
+
+static const struct power_supply_desc parallel_psy_desc = {
+	.name			= "parallel",
+	.type			= POWER_SUPPLY_TYPE_PARALLEL,
+	.properties		= smb138x_parallel_props,
+	.num_properties		= ARRAY_SIZE(smb138x_parallel_props),
+	.get_property		= smb138x_parallel_get_prop,
+	.set_property		= smb138x_parallel_set_prop,
+	.property_is_writeable	= smb138x_parallel_prop_is_writeable,
+};
+
+static int smb138x_init_parallel_psy(struct smb138x *chip)
+{
+	struct power_supply_config parallel_cfg = {};
+	struct smb_charger *chg = &chip->chg;
+
+	parallel_cfg.drv_data = chip;
+	parallel_cfg.of_node = chg->dev->of_node;
+	chip->parallel_psy = devm_power_supply_register(chg->dev,
+						   &parallel_psy_desc,
+						   &parallel_cfg);
+	if (IS_ERR(chip->parallel_psy)) {
+		pr_err("Couldn't register parallel power supply\n");
+		return PTR_ERR(chip->parallel_psy);
+	}
+
+	return 0;
+}
+
+/******************************
+ * VBUS REGULATOR REGISTRATION *
+ ******************************/
+
+static struct regulator_ops smb138x_vbus_reg_ops = {
+	.enable		= smblib_vbus_regulator_enable,
+	.disable	= smblib_vbus_regulator_disable,
+	.is_enabled	= smblib_vbus_regulator_is_enabled,
+};
+
+static int smb138x_init_vbus_regulator(struct smb138x *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	struct regulator_config cfg = {};
+	int rc = 0;
+
+	chg->vbus_vreg = devm_kzalloc(chg->dev, sizeof(*chg->vbus_vreg),
+				      GFP_KERNEL);
+	if (!chg->vbus_vreg)
+		return -ENOMEM;
+
+	cfg.dev = chg->dev;
+	cfg.driver_data = chip;
+
+	chg->vbus_vreg->rdesc.owner = THIS_MODULE;
+	chg->vbus_vreg->rdesc.type = REGULATOR_VOLTAGE;
+	chg->vbus_vreg->rdesc.ops = &smb138x_vbus_reg_ops;
+	chg->vbus_vreg->rdesc.of_match = "qcom,smb138x-vbus";
+	chg->vbus_vreg->rdesc.name = "qcom,smb138x-vbus";
+
+	chg->vbus_vreg->rdev = devm_regulator_register(chg->dev,
+						&chg->vbus_vreg->rdesc, &cfg);
+	if (IS_ERR(chg->vbus_vreg->rdev)) {
+		rc = PTR_ERR(chg->vbus_vreg->rdev);
+		chg->vbus_vreg->rdev = NULL;
+		if (rc != -EPROBE_DEFER)
+			pr_err("Couldn't register VBUS regualtor rc=%d\n", rc);
+	}
+
+	return rc;
+}
+
+/******************************
+ * VCONN REGULATOR REGISTRATION *
+ ******************************/
+
+static struct regulator_ops smb138x_vconn_reg_ops = {
+	.enable		= smblib_vconn_regulator_enable,
+	.disable	= smblib_vconn_regulator_disable,
+	.is_enabled	= smblib_vconn_regulator_is_enabled,
+};
+
+static int smb138x_init_vconn_regulator(struct smb138x *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	struct regulator_config cfg = {};
+	int rc = 0;
+
+	chg->vconn_vreg = devm_kzalloc(chg->dev, sizeof(*chg->vconn_vreg),
+				      GFP_KERNEL);
+	if (!chg->vconn_vreg)
+		return -ENOMEM;
+
+	cfg.dev = chg->dev;
+	cfg.driver_data = chip;
+
+	chg->vconn_vreg->rdesc.owner = THIS_MODULE;
+	chg->vconn_vreg->rdesc.type = REGULATOR_VOLTAGE;
+	chg->vconn_vreg->rdesc.ops = &smb138x_vconn_reg_ops;
+	chg->vconn_vreg->rdesc.of_match = "qcom,smb138x-vconn";
+	chg->vconn_vreg->rdesc.name = "qcom,smb138x-vconn";
+
+	chg->vconn_vreg->rdev = devm_regulator_register(chg->dev,
+						&chg->vconn_vreg->rdesc, &cfg);
+	if (IS_ERR(chg->vconn_vreg->rdev)) {
+		rc = PTR_ERR(chg->vconn_vreg->rdev);
+		chg->vconn_vreg->rdev = NULL;
+		if (rc != -EPROBE_DEFER)
+			pr_err("Couldn't register VCONN regualtor rc=%d\n", rc);
+	}
+
+	return rc;
+}
+
+/***************************
+ * HARDWARE INITIALIZATION *
+ ***************************/
+
+#define MDEGC_3		3000
+#define MDEGC_15	15000
+static int smb138x_init_slave_hw(struct smb138x *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc;
+
+	if (chip->wa_flags & OOB_COMP_WA_BIT) {
+		rc = smblib_masked_write(chg, SMB2CHG_MISC_ENG_SDCDC_CFG2,
+					ENG_SDCDC_SEL_OOB_VTH_BIT,
+					ENG_SDCDC_SEL_OOB_VTH_BIT);
+		if (rc < 0) {
+			pr_err("Couldn't configure the OOB comp threshold rc = %d\n",
+									rc);
+			return rc;
+		}
+
+		rc = smblib_masked_write(chg, SMB2CHG_MISC_ENG_SDCDC_CFG6,
+				DEAD_TIME_MASK, HIGH_DEAD_TIME_MASK);
+		if (rc < 0) {
+			pr_err("Couldn't configure the sdcdc cfg 6 reg rc = %d\n",
+									rc);
+			return rc;
+		}
+	}
+
+	/* configure to a fixed 700khz freq to avoid tdie errors */
+	rc = smblib_set_charge_param(chg, &chg->param.freq_buck, 700);
+	if (rc < 0) {
+		pr_err("Couldn't configure 700Khz switch freq rc=%d\n", rc);
+		return rc;
+	}
+
+	/* enable watchdog bark and bite interrupts, and disable the watchdog */
+	rc = smblib_masked_write(chg, WD_CFG_REG, WDOG_TIMER_EN_BIT
+			| WDOG_TIMER_EN_ON_PLUGIN_BIT | BITE_WDOG_INT_EN_BIT
+			| BARK_WDOG_INT_EN_BIT,
+			BITE_WDOG_INT_EN_BIT | BARK_WDOG_INT_EN_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't configure the watchdog rc=%d\n", rc);
+		return rc;
+	}
+
+	/* disable charging when watchdog bites */
+	rc = smblib_masked_write(chg, SNARL_BARK_BITE_WD_CFG_REG,
+				 BITE_WDOG_DISABLE_CHARGING_CFG_BIT,
+				 BITE_WDOG_DISABLE_CHARGING_CFG_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't configure the watchdog bite rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Disable OTG */
+	rc = smblib_masked_write(chg, CMD_OTG_REG, OTG_EN_BIT, 0);
+	if (rc < 0) {
+		pr_err("Couldn't disable OTG rc=%d\n", rc);
+		return rc;
+	}
+
+	/* suspend parallel charging */
+	rc = smb138x_set_parallel_suspend(chip, true);
+	if (rc < 0) {
+		pr_err("Couldn't suspend parallel charging rc=%d\n", rc);
+		return rc;
+	}
+
+	/* initialize FCC to 0 */
+	rc = smblib_set_charge_param(chg, &chg->param.fcc, 0);
+	if (rc < 0) {
+		pr_err("Couldn't set 0 FCC rc=%d\n", rc);
+		return rc;
+	}
+
+	/* enable the charging path */
+	rc = smblib_masked_write(chg, CHARGING_ENABLE_CMD_REG,
+				 CHARGING_ENABLE_CMD_BIT,
+				 CHARGING_ENABLE_CMD_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't enable charging rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure charge enable for software control; active high */
+	rc = smblib_masked_write(chg, CHGR_CFG2_REG,
+				 CHG_EN_POLARITY_BIT | CHG_EN_SRC_BIT, 0);
+	if (rc < 0) {
+		pr_err("Couldn't configure charge enable source rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	/* enable parallel current sensing */
+	rc = smblib_masked_write(chg, CFG_REG,
+				 VCHG_EN_CFG_BIT, VCHG_EN_CFG_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't enable parallel current sensing rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	/* enable stacked diode */
+	rc = smblib_write(chg, SMB2CHG_DC_TM_SREFGEN, STACKED_DIODE_EN_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't enable stacked diode rc=%d\n", rc);
+		return rc;
+	}
+
+	/* initialize charger temperature threshold */
+	rc = iio_write_channel_processed(chg->iio.temp_max_chan,
+					chip->dt.chg_temp_max_mdegc);
+	if (rc < 0) {
+		pr_err("Couldn't set charger temp threshold rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = iio_write_channel_processed(chg->iio.connector_temp_thr1_chan,
+				chip->dt.connector_temp_max_mdegc);
+	if (rc < 0) {
+		pr_err("Couldn't set connector temp threshold1 rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = iio_write_channel_processed(chg->iio.connector_temp_thr2_chan,
+				chip->dt.connector_temp_max_mdegc + MDEGC_3);
+	if (rc < 0) {
+		pr_err("Couldn't set connector temp threshold2 rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = iio_write_channel_processed(chg->iio.connector_temp_thr3_chan,
+				chip->dt.connector_temp_max_mdegc + MDEGC_15);
+	if (rc < 0) {
+		pr_err("Couldn't set connector temp threshold3 rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int smb138x_init_hw(struct smb138x *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	/* votes must be cast before configuring software control */
+	vote(chg->dc_suspend_votable,
+		DEFAULT_VOTER, chip->dt.suspend_input, 0);
+	vote(chg->fcc_votable,
+		DEFAULT_VOTER, true, chip->dt.fcc_ua);
+	vote(chg->usb_icl_votable,
+		DCP_VOTER, true, chip->dt.usb_icl_ua);
+	vote(chg->dc_icl_votable,
+		DEFAULT_VOTER, true, chip->dt.dc_icl_ua);
+
+	chg->dcp_icl_ua = chip->dt.usb_icl_ua;
+
+	/* Disable OTG */
+	rc = smblib_masked_write(chg, CMD_OTG_REG, OTG_EN_BIT, 0);
+	if (rc < 0) {
+		pr_err("Couldn't disable OTG rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Unsuspend USB input */
+	rc = smblib_masked_write(chg, USBIN_CMD_IL_REG, USBIN_SUSPEND_BIT, 0);
+	if (rc < 0) {
+		pr_err("Couldn't unsuspend USB, rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure to a fixed 700khz freq to avoid tdie errors */
+	rc = smblib_set_charge_param(chg, &chg->param.freq_buck, 700);
+	if (rc < 0) {
+		pr_err("Couldn't configure 700Khz switch freq rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure charge enable for software control; active high */
+	rc = smblib_masked_write(chg, CHGR_CFG2_REG,
+				 CHG_EN_POLARITY_BIT | CHG_EN_SRC_BIT, 0);
+	if (rc < 0) {
+		pr_err("Couldn't configure charge enable source rc=%d\n", rc);
+		return rc;
+	}
+
+	/* enable the charging path */
+	rc = vote(chg->chg_disable_votable, DEFAULT_VOTER, false, 0);
+	if (rc < 0) {
+		pr_err("Couldn't enable charging rc=%d\n", rc);
+		return rc;
+	}
+
+	/*
+	 * trigger the usb-typec-change interrupt only when the CC state
+	 * changes, or there was a VBUS error
+	 */
+	rc = smblib_write(chg, TYPE_C_INTRPT_ENB_REG,
+			    TYPEC_CCSTATE_CHANGE_INT_EN_BIT
+			  | TYPEC_VBUS_ERROR_INT_EN_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't configure Type-C interrupts rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure VCONN for software control */
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 VCONN_EN_SRC_BIT | VCONN_EN_VALUE_BIT,
+				 VCONN_EN_SRC_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't configure VCONN for SW control rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure VBUS for software control */
+	rc = smblib_masked_write(chg, OTG_CFG_REG, OTG_EN_SRC_CFG_BIT, 0);
+	if (rc < 0) {
+		pr_err("Couldn't configure VBUS for SW control rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure power role for dual-role */
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 TYPEC_POWER_ROLE_CMD_MASK, 0);
+	if (rc < 0) {
+		pr_err("Couldn't configure power role for DRP rc=%d\n", rc);
+		return rc;
+	}
+
+	if (chip->wa_flags & OOB_COMP_WA_BIT) {
+		rc = smblib_masked_write(chg, SMB2CHG_MISC_ENG_SDCDC_CFG2,
+					ENG_SDCDC_SEL_OOB_VTH_BIT,
+					ENG_SDCDC_SEL_OOB_VTH_BIT);
+		if (rc < 0) {
+			pr_err("Couldn't configure the OOB comp threshold rc = %d\n",
+									rc);
+			return rc;
+		}
+
+		rc = smblib_masked_write(chg, SMB2CHG_MISC_ENG_SDCDC_CFG6,
+				DEAD_TIME_MASK, HIGH_DEAD_TIME_MASK);
+		if (rc < 0) {
+			pr_err("Couldn't configure the sdcdc cfg 6 reg rc = %d\n",
+									rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int smb138x_setup_wa_flags(struct smb138x *chip)
+{
+	struct pmic_revid_data *pmic_rev_id;
+	struct device_node *revid_dev_node;
+
+	revid_dev_node = of_parse_phandle(chip->chg.dev->of_node,
+					"qcom,pmic-revid", 0);
+	if (!revid_dev_node) {
+		pr_err("Missing qcom,pmic-revid property\n");
+		return -EINVAL;
+	}
+
+	pmic_rev_id = get_revid_data(revid_dev_node);
+	if (IS_ERR_OR_NULL(pmic_rev_id)) {
+		/*
+		 * the revid peripheral must be registered, any failure
+		 * here only indicates that the rev-id module has not
+		 * probed yet.
+		 */
+		return -EPROBE_DEFER;
+	}
+
+	switch (pmic_rev_id->pmic_subtype) {
+	case SMB1381_SUBTYPE:
+		if (pmic_rev_id->rev4 < 2) /* SMB1381 rev 1.0 */
+			chip->wa_flags |= OOB_COMP_WA_BIT;
+		break;
+	default:
+		pr_err("PMIC subtype %d not supported\n",
+				pmic_rev_id->pmic_subtype);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/****************************
+ * DETERMINE INITIAL STATUS *
+ ****************************/
+
+static irqreturn_t smb138x_handle_temperature_change(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb138x *chip = irq_data->parent_data;
+
+	power_supply_changed(chip->parallel_psy);
+	return IRQ_HANDLED;
+}
+
+static int smb138x_determine_initial_slave_status(struct smb138x *chip)
+{
+	struct smb_irq_data irq_data = {chip, "determine-initial-status"};
+
+	smb138x_handle_temperature_change(0, &irq_data);
+	return 0;
+}
+
+static int smb138x_determine_initial_status(struct smb138x *chip)
+{
+	struct smb_irq_data irq_data = {chip, "determine-initial-status"};
+
+	smblib_handle_usb_plugin(0, &irq_data);
+	smblib_handle_usb_typec_change(0, &irq_data);
+	smblib_handle_usb_source_change(0, &irq_data);
+	return 0;
+}
+
+/**************************
+ * INTERRUPT REGISTRATION *
+ **************************/
+
+static struct smb_irq_info smb138x_irqs[] = {
+/* CHARGER IRQs */
+	[CHG_ERROR_IRQ] = {
+		.name		= "chg-error",
+		.handler	= smblib_handle_debug,
+	},
+	[CHG_STATE_CHANGE_IRQ] = {
+		.name		= "chg-state-change",
+		.handler	= smb138x_handle_slave_chg_state_change,
+		.wake		= true,
+	},
+	[STEP_CHG_STATE_CHANGE_IRQ] = {
+		.name		= "step-chg-state-change",
+		.handler	= smblib_handle_debug,
+	},
+	[STEP_CHG_SOC_UPDATE_FAIL_IRQ] = {
+		.name		= "step-chg-soc-update-fail",
+		.handler	= smblib_handle_debug,
+	},
+	[STEP_CHG_SOC_UPDATE_REQ_IRQ] = {
+		.name		= "step-chg-soc-update-request",
+		.handler	= smblib_handle_debug,
+	},
+/* OTG IRQs */
+	[OTG_FAIL_IRQ] = {
+		.name		= "otg-fail",
+		.handler	= smblib_handle_debug,
+	},
+	[OTG_OVERCURRENT_IRQ] = {
+		.name		= "otg-overcurrent",
+		.handler	= smblib_handle_debug,
+	},
+	[OTG_OC_DIS_SW_STS_IRQ] = {
+		.name		= "otg-oc-dis-sw-sts",
+		.handler	= smblib_handle_debug,
+	},
+	[TESTMODE_CHANGE_DET_IRQ] = {
+		.name		= "testmode-change-detect",
+		.handler	= smblib_handle_debug,
+	},
+/* BATTERY IRQs */
+	[BATT_TEMP_IRQ] = {
+		.name		= "bat-temp",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+	[BATT_OCP_IRQ] = {
+		.name		= "bat-ocp",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+	[BATT_OV_IRQ] = {
+		.name		= "bat-ov",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+	[BATT_LOW_IRQ] = {
+		.name		= "bat-low",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+	[BATT_THERM_ID_MISS_IRQ] = {
+		.name		= "bat-therm-or-id-missing",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+	[BATT_TERM_MISS_IRQ] = {
+		.name		= "bat-terminal-missing",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+/* USB INPUT IRQs */
+	[USBIN_COLLAPSE_IRQ] = {
+		.name		= "usbin-collapse",
+		.handler	= smblib_handle_debug,
+	},
+	[USBIN_LT_3P6V_IRQ] = {
+		.name		= "usbin-lt-3p6v",
+		.handler	= smblib_handle_debug,
+	},
+	[USBIN_UV_IRQ] = {
+		.name		= "usbin-uv",
+		.handler	= smblib_handle_debug,
+	},
+	[USBIN_OV_IRQ] = {
+		.name		= "usbin-ov",
+		.handler	= smblib_handle_debug,
+	},
+	[USBIN_PLUGIN_IRQ] = {
+		.name		= "usbin-plugin",
+		.handler	= smblib_handle_usb_plugin,
+	},
+	[USBIN_SRC_CHANGE_IRQ] = {
+		.name		= "usbin-src-change",
+		.handler	= smblib_handle_usb_source_change,
+	},
+	[USBIN_ICL_CHANGE_IRQ] = {
+		.name		= "usbin-icl-change",
+		.handler	= smblib_handle_debug,
+	},
+	[TYPE_C_CHANGE_IRQ] = {
+		.name		= "type-c-change",
+		.handler	= smblib_handle_usb_typec_change,
+	},
+/* DC INPUT IRQs */
+	[DCIN_COLLAPSE_IRQ] = {
+		.name		= "dcin-collapse",
+		.handler	= smblib_handle_debug,
+	},
+	[DCIN_LT_3P6V_IRQ] = {
+		.name		= "dcin-lt-3p6v",
+		.handler	= smblib_handle_debug,
+	},
+	[DCIN_UV_IRQ] = {
+		.name		= "dcin-uv",
+		.handler	= smblib_handle_debug,
+	},
+	[DCIN_OV_IRQ] = {
+		.name		= "dcin-ov",
+		.handler	= smblib_handle_debug,
+	},
+	[DCIN_PLUGIN_IRQ] = {
+		.name		= "dcin-plugin",
+		.handler	= smblib_handle_debug,
+	},
+	[DIV2_EN_DG_IRQ] = {
+		.name		= "div2-en-dg",
+		.handler	= smblib_handle_debug,
+	},
+	[DCIN_ICL_CHANGE_IRQ] = {
+		.name		= "dcin-icl-change",
+		.handler	= smblib_handle_debug,
+	},
+/* MISCELLANEOUS IRQs */
+	[WDOG_SNARL_IRQ] = {
+		.name		= "wdog-snarl",
+		.handler	= smblib_handle_debug,
+	},
+	[WDOG_BARK_IRQ] = {
+		.name		= "wdog-bark",
+		.handler	= smblib_handle_wdog_bark,
+		.wake		= true,
+	},
+	[AICL_FAIL_IRQ] = {
+		.name		= "aicl-fail",
+		.handler	= smblib_handle_debug,
+	},
+	[AICL_DONE_IRQ] = {
+		.name		= "aicl-done",
+		.handler	= smblib_handle_debug,
+	},
+	[HIGH_DUTY_CYCLE_IRQ] = {
+		.name		= "high-duty-cycle",
+		.handler	= smblib_handle_debug,
+	},
+	[INPUT_CURRENT_LIMIT_IRQ] = {
+		.name		= "input-current-limiting",
+		.handler	= smblib_handle_debug,
+	},
+	[TEMPERATURE_CHANGE_IRQ] = {
+		.name		= "temperature-change",
+		.handler	= smb138x_handle_temperature_change,
+	},
+	[SWITCH_POWER_OK_IRQ] = {
+		.name		= "switcher-power-ok",
+		.handler	= smblib_handle_debug,
+	},
+};
+
+static int smb138x_get_irq_index_byname(const char *irq_name)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(smb138x_irqs); i++) {
+		if (strcmp(smb138x_irqs[i].name, irq_name) == 0)
+			return i;
+	}
+
+	return -ENOENT;
+}
+
+static int smb138x_request_interrupt(struct smb138x *chip,
+				     struct device_node *node,
+				     const char *irq_name)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0, irq, irq_index;
+	struct smb_irq_data *irq_data;
+
+	irq = of_irq_get_byname(node, irq_name);
+	if (irq < 0) {
+		pr_err("Couldn't get irq %s byname\n", irq_name);
+		return irq;
+	}
+
+	irq_index = smb138x_get_irq_index_byname(irq_name);
+	if (irq_index < 0) {
+		pr_err("%s is not a defined irq\n", irq_name);
+		return irq_index;
+	}
+
+	if (!smb138x_irqs[irq_index].handler)
+		return 0;
+
+	irq_data = devm_kzalloc(chg->dev, sizeof(*irq_data), GFP_KERNEL);
+	if (!irq_data)
+		return -ENOMEM;
+
+	irq_data->parent_data = chip;
+	irq_data->name = irq_name;
+	irq_data->storm_data = smb138x_irqs[irq_index].storm_data;
+	mutex_init(&irq_data->storm_data.storm_lock);
+
+	rc = devm_request_threaded_irq(chg->dev, irq, NULL,
+					smb138x_irqs[irq_index].handler,
+					IRQF_ONESHOT, irq_name, irq_data);
+	if (rc < 0) {
+		pr_err("Couldn't request irq %d\n", irq);
+		return rc;
+	}
+
+	if (smb138x_irqs[irq_index].wake)
+		enable_irq_wake(irq);
+
+	return rc;
+}
+
+static int smb138x_request_interrupts(struct smb138x *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	struct device_node *node = chg->dev->of_node;
+	struct device_node *child;
+	int rc = 0;
+	const char *name;
+	struct property *prop;
+
+	for_each_available_child_of_node(node, child) {
+		of_property_for_each_string(child, "interrupt-names",
+					    prop, name) {
+			rc = smb138x_request_interrupt(chip, child, name);
+			if (rc < 0) {
+				pr_err("Couldn't request interrupt %s rc=%d\n",
+				       name, rc);
+				return rc;
+			}
+		}
+	}
+
+	return rc;
+}
+
+/*********
+ * PROBE *
+ *********/
+
+static int smb138x_master_probe(struct smb138x *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	chg->param = v1_params;
+
+	rc = smblib_init(chg);
+	if (rc < 0) {
+		pr_err("Couldn't initialize smblib rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = smb138x_parse_dt(chip);
+	if (rc < 0) {
+		pr_err("Couldn't parse device tree rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = smb138x_init_vbus_regulator(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize vbus regulator rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = smb138x_init_vconn_regulator(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize vconn regulator rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = smb138x_init_usb_psy(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize usb psy rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = smb138x_init_batt_psy(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize batt psy rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = smb138x_init_hw(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize hardware rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = smb138x_determine_initial_status(chip);
+	if (rc < 0) {
+		pr_err("Couldn't determine initial status rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = smb138x_request_interrupts(chip);
+	if (rc < 0) {
+		pr_err("Couldn't request interrupts rc=%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int smb138x_slave_probe(struct smb138x *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	chg->param = v1_params;
+
+	rc = smblib_init(chg);
+	if (rc < 0) {
+		pr_err("Couldn't initialize smblib rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	chg->iio.temp_max_chan = iio_channel_get(chg->dev, "charger_temp_max");
+	if (IS_ERR(chg->iio.temp_max_chan)) {
+		rc = PTR_ERR(chg->iio.temp_max_chan);
+		goto cleanup;
+	}
+
+	chg->iio.connector_temp_thr1_chan = iio_channel_get(chg->dev,
+							"connector_temp_thr1");
+	if (IS_ERR(chg->iio.connector_temp_thr1_chan)) {
+		rc = PTR_ERR(chg->iio.connector_temp_thr1_chan);
+		goto cleanup;
+	}
+
+	chg->iio.connector_temp_thr2_chan = iio_channel_get(chg->dev,
+							"connector_temp_thr2");
+	if (IS_ERR(chg->iio.connector_temp_thr2_chan)) {
+		rc = PTR_ERR(chg->iio.connector_temp_thr2_chan);
+		goto cleanup;
+	}
+
+	chg->iio.connector_temp_thr3_chan = iio_channel_get(chg->dev,
+							"connector_temp_thr3");
+	if (IS_ERR(chg->iio.connector_temp_thr3_chan)) {
+		rc = PTR_ERR(chg->iio.connector_temp_thr3_chan);
+		goto cleanup;
+	}
+
+	rc = smb138x_parse_dt(chip);
+	if (rc < 0) {
+		pr_err("Couldn't parse device tree rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb138x_init_slave_hw(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize hardware rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	if ((chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+		|| (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT)) {
+		rc = smb138x_init_vbus_regulator(chip);
+		if (rc < 0) {
+			pr_err("Couldn't initialize vbus regulator rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	rc = smb138x_init_parallel_psy(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize parallel psy rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb138x_determine_initial_slave_status(chip);
+	if (rc < 0) {
+		pr_err("Couldn't determine initial status rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb138x_request_interrupts(chip);
+	if (rc < 0) {
+		pr_err("Couldn't request interrupts rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	return rc;
+
+cleanup:
+	smblib_deinit(chg);
+	if (chip->parallel_psy)
+		power_supply_unregister(chip->parallel_psy);
+	if (chg->vbus_vreg && chg->vbus_vreg->rdev)
+		regulator_unregister(chg->vbus_vreg->rdev);
+	return rc;
+}
+
+static const struct of_device_id match_table[] = {
+	{
+		.compatible = "qcom,smb138x-charger",
+		.data = (void *) PARALLEL_MASTER
+	},
+	{
+		.compatible = "qcom,smb138x-parallel-slave",
+		.data = (void *) PARALLEL_SLAVE
+	},
+	{ },
+};
+
+static int smb138x_probe(struct platform_device *pdev)
+{
+	struct smb138x *chip;
+	const struct of_device_id *id;
+	int rc = 0;
+
+	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->chg.dev = &pdev->dev;
+	chip->chg.debug_mask = &__debug_mask;
+	chip->chg.irq_info = smb138x_irqs;
+	chip->chg.name = "SMB";
+
+	chip->chg.regmap = dev_get_regmap(chip->chg.dev->parent, NULL);
+	if (!chip->chg.regmap) {
+		pr_err("parent regmap is missing\n");
+		return -EINVAL;
+	}
+
+	id = of_match_device(of_match_ptr(match_table), chip->chg.dev);
+	if (!id) {
+		pr_err("Couldn't find a matching device\n");
+		return -ENODEV;
+	}
+
+	platform_set_drvdata(pdev, chip);
+
+	rc = smb138x_setup_wa_flags(chip);
+	if (rc < 0) {
+		if (rc != -EPROBE_DEFER)
+			pr_err("Couldn't setup wa flags rc = %d\n", rc);
+		return rc;
+	}
+
+	chip->chg.mode = (enum smb_mode) id->data;
+	switch (chip->chg.mode) {
+	case PARALLEL_MASTER:
+		rc = smb138x_master_probe(chip);
+		break;
+	case PARALLEL_SLAVE:
+		rc = smb138x_slave_probe(chip);
+		break;
+	default:
+		pr_err("Couldn't find a matching mode %d\n", chip->chg.mode);
+		rc = -EINVAL;
+		goto cleanup;
+	}
+
+	if (rc < 0) {
+		if (rc != -EPROBE_DEFER)
+			pr_err("Couldn't probe SMB138X rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	pr_info("SMB138X probed successfully mode=%d\n", chip->chg.mode);
+	return rc;
+
+cleanup:
+	platform_set_drvdata(pdev, NULL);
+	return rc;
+}
+
+static int smb138x_remove(struct platform_device *pdev)
+{
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static void smb138x_shutdown(struct platform_device *pdev)
+{
+	struct smb138x *chip = platform_get_drvdata(pdev);
+	struct smb_charger *chg = &chip->chg;
+	int rc;
+
+	/* Suspend charging */
+	rc = smb138x_set_parallel_suspend(chip, true);
+	if (rc < 0)
+		pr_err("Couldn't suspend charging rc=%d\n", rc);
+
+	/* Disable OTG */
+	rc = smblib_masked_write(chg, CMD_OTG_REG, OTG_EN_BIT, 0);
+	if (rc < 0)
+		pr_err("Couldn't disable OTG rc=%d\n", rc);
+
+}
+
+static struct platform_driver smb138x_driver = {
+	.driver	= {
+		.name		= "qcom,smb138x-charger",
+		.owner		= THIS_MODULE,
+		.of_match_table	= match_table,
+	},
+	.probe		= smb138x_probe,
+	.remove		= smb138x_remove,
+	.shutdown	= smb138x_shutdown,
+};
+module_platform_driver(smb138x_driver);
+
+MODULE_DESCRIPTION("QPNP SMB138X Charger Driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/drivers/power/supply./qcom/smb-lib.c linux-4.4.115-fbx/drivers/power/supply/qcom/smb-lib.c
--- linux-4.4.115-fbx/drivers/power/supply./qcom/smb-lib.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/supply/qcom/smb-lib.c	2019-10-29 09:26:24.637212906 +0100
@@ -0,0 +1,5095 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/iio/consumer.h>
+#include <linux/power_supply.h>
+#include <linux/regulator/driver.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include <linux/input/qpnp-power-on.h>
+#include <linux/irq.h>
+#include <linux/pmic-voter.h>
+#include "smb-lib.h"
+#include "smb-reg.h"
+#include "battery.h"
+#include "step-chg-jeita.h"
+#include "storm-watch.h"
+
+#define smblib_err(chg, fmt, ...)		\
+	pr_err("%s: %s: " fmt, chg->name,	\
+		__func__, ##__VA_ARGS__)	\
+
+#define smblib_dbg(chg, reason, fmt, ...)			\
+	do {							\
+		if (*chg->debug_mask & (reason))		\
+			pr_info("%s: %s: " fmt, chg->name,	\
+				__func__, ##__VA_ARGS__);	\
+		else						\
+			pr_debug("%s: %s: " fmt, chg->name,	\
+				__func__, ##__VA_ARGS__);	\
+	} while (0)
+
+static bool is_secure(struct smb_charger *chg, int addr)
+{
+	if (addr == SHIP_MODE_REG || addr == FREQ_CLK_DIV_REG)
+		return true;
+	/* assume everything above 0xA0 is secure */
+	return (bool)((addr & 0xFF) >= 0xA0);
+}
+
+int smblib_read(struct smb_charger *chg, u16 addr, u8 *val)
+{
+	unsigned int temp;
+	int rc = 0;
+
+	rc = regmap_read(chg->regmap, addr, &temp);
+	if (rc >= 0)
+		*val = (u8)temp;
+
+	return rc;
+}
+
+int smblib_multibyte_read(struct smb_charger *chg, u16 addr, u8 *val,
+				int count)
+{
+	return regmap_bulk_read(chg->regmap, addr, val, count);
+}
+
+int smblib_masked_write(struct smb_charger *chg, u16 addr, u8 mask, u8 val)
+{
+	int rc = 0;
+
+	mutex_lock(&chg->write_lock);
+	if (is_secure(chg, addr)) {
+		rc = regmap_write(chg->regmap, (addr & 0xFF00) | 0xD0, 0xA5);
+		if (rc < 0)
+			goto unlock;
+	}
+
+	rc = regmap_update_bits(chg->regmap, addr, mask, val);
+
+unlock:
+	mutex_unlock(&chg->write_lock);
+	return rc;
+}
+
+int smblib_write(struct smb_charger *chg, u16 addr, u8 val)
+{
+	int rc = 0;
+
+	mutex_lock(&chg->write_lock);
+
+	if (is_secure(chg, addr)) {
+		rc = regmap_write(chg->regmap, (addr & ~(0xFF)) | 0xD0, 0xA5);
+		if (rc < 0)
+			goto unlock;
+	}
+
+	rc = regmap_write(chg->regmap, addr, val);
+
+unlock:
+	mutex_unlock(&chg->write_lock);
+	return rc;
+}
+
+static int smblib_get_jeita_cc_delta(struct smb_charger *chg, int *cc_delta_ua)
+{
+	int rc, cc_minus_ua;
+	u8 stat;
+
+	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_2_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_2 rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	if (!(stat & BAT_TEMP_STATUS_SOFT_LIMIT_MASK)) {
+		*cc_delta_ua = 0;
+		return 0;
+	}
+
+	rc = smblib_get_charge_param(chg, &chg->param.jeita_cc_comp,
+					&cc_minus_ua);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get jeita cc minus rc=%d\n", rc);
+		return rc;
+	}
+
+	*cc_delta_ua = -cc_minus_ua;
+	return 0;
+}
+
+int smblib_icl_override(struct smb_charger *chg, bool override)
+{
+	int rc;
+
+	rc = smblib_masked_write(chg, USBIN_LOAD_CFG_REG,
+				ICL_OVERRIDE_AFTER_APSD_BIT,
+				override ? ICL_OVERRIDE_AFTER_APSD_BIT : 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't override ICL rc=%d\n", rc);
+
+	return rc;
+}
+
+/********************
+ * REGISTER GETTERS *
+ ********************/
+
+int smblib_get_charge_param(struct smb_charger *chg,
+			    struct smb_chg_param *param, int *val_u)
+{
+	int rc = 0;
+	u8 val_raw;
+
+	rc = smblib_read(chg, param->reg, &val_raw);
+	if (rc < 0) {
+		smblib_err(chg, "%s: Couldn't read from 0x%04x rc=%d\n",
+			param->name, param->reg, rc);
+		return rc;
+	}
+
+	if (param->get_proc)
+		*val_u = param->get_proc(param, val_raw);
+	else
+		*val_u = val_raw * param->step_u + param->min_u;
+	smblib_dbg(chg, PR_REGISTER, "%s = %d (0x%02x)\n",
+		   param->name, *val_u, val_raw);
+
+	return rc;
+}
+
+int smblib_get_usb_suspend(struct smb_charger *chg, int *suspend)
+{
+	int rc = 0;
+	u8 temp;
+
+	rc = smblib_read(chg, USBIN_CMD_IL_REG, &temp);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read USBIN_CMD_IL rc=%d\n", rc);
+		return rc;
+	}
+	*suspend = temp & USBIN_SUSPEND_BIT;
+
+	return rc;
+}
+
+struct apsd_result {
+	const char * const name;
+	const u8 bit;
+	const enum power_supply_type pst;
+};
+
+enum {
+	UNKNOWN,
+	SDP,
+	CDP,
+	DCP,
+	OCP,
+	FLOAT,
+	HVDCP2,
+	HVDCP3,
+	MAX_TYPES
+};
+
+static const struct apsd_result const smblib_apsd_results[] = {
+	[UNKNOWN] = {
+		.name	= "UNKNOWN",
+		.bit	= 0,
+		.pst	= POWER_SUPPLY_TYPE_UNKNOWN
+	},
+	[SDP] = {
+		.name	= "SDP",
+		.bit	= SDP_CHARGER_BIT,
+		.pst	= POWER_SUPPLY_TYPE_USB
+	},
+	[CDP] = {
+		.name	= "CDP",
+		.bit	= CDP_CHARGER_BIT,
+		.pst	= POWER_SUPPLY_TYPE_USB_CDP
+	},
+	[DCP] = {
+		.name	= "DCP",
+		.bit	= DCP_CHARGER_BIT,
+		.pst	= POWER_SUPPLY_TYPE_USB_DCP
+	},
+	[OCP] = {
+		.name	= "OCP",
+		.bit	= OCP_CHARGER_BIT,
+		.pst	= POWER_SUPPLY_TYPE_USB_DCP
+	},
+	[FLOAT] = {
+		.name	= "FLOAT",
+		.bit	= FLOAT_CHARGER_BIT,
+		.pst	= POWER_SUPPLY_TYPE_USB_FLOAT
+	},
+	[HVDCP2] = {
+		.name	= "HVDCP2",
+		.bit	= DCP_CHARGER_BIT | QC_2P0_BIT,
+		.pst	= POWER_SUPPLY_TYPE_USB_HVDCP
+	},
+	[HVDCP3] = {
+		.name	= "HVDCP3",
+		.bit	= DCP_CHARGER_BIT | QC_3P0_BIT,
+		.pst	= POWER_SUPPLY_TYPE_USB_HVDCP_3,
+	},
+};
+
+static const struct apsd_result *smblib_get_apsd_result(struct smb_charger *chg)
+{
+	int rc, i;
+	u8 apsd_stat, stat;
+	const struct apsd_result *result = &smblib_apsd_results[UNKNOWN];
+
+	rc = smblib_read(chg, APSD_STATUS_REG, &apsd_stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read APSD_STATUS rc=%d\n", rc);
+		return result;
+	}
+	smblib_dbg(chg, PR_REGISTER, "APSD_STATUS = 0x%02x\n", apsd_stat);
+
+	if (!(apsd_stat & APSD_DTC_STATUS_DONE_BIT))
+		return result;
+
+	rc = smblib_read(chg, APSD_RESULT_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read APSD_RESULT_STATUS rc=%d\n",
+			rc);
+		return result;
+	}
+	stat &= APSD_RESULT_STATUS_MASK;
+
+	for (i = 0; i < ARRAY_SIZE(smblib_apsd_results); i++) {
+		if (smblib_apsd_results[i].bit == stat)
+			result = &smblib_apsd_results[i];
+	}
+
+	if (apsd_stat & QC_CHARGER_BIT) {
+		/* since its a qc_charger, either return HVDCP3 or HVDCP2 */
+		if (result != &smblib_apsd_results[HVDCP3])
+			result = &smblib_apsd_results[HVDCP2];
+	}
+
+	return result;
+}
+
+/********************
+ * REGISTER SETTERS *
+ ********************/
+
+static int chg_freq_list[] = {
+	9600, 9600, 6400, 4800, 3800, 3200, 2700, 2400, 2100, 1900, 1700,
+	1600, 1500, 1400, 1300, 1200,
+};
+
+int smblib_set_chg_freq(struct smb_chg_param *param,
+				int val_u, u8 *val_raw)
+{
+	u8 i;
+
+	if (val_u > param->max_u || val_u < param->min_u)
+		return -EINVAL;
+
+	/* Charger FSW is the configured freqency / 2 */
+	val_u *= 2;
+	for (i = 0; i < ARRAY_SIZE(chg_freq_list); i++) {
+		if (chg_freq_list[i] == val_u)
+			break;
+	}
+	if (i == ARRAY_SIZE(chg_freq_list)) {
+		pr_err("Invalid frequency %d Hz\n", val_u / 2);
+		return -EINVAL;
+	}
+
+	*val_raw = i;
+
+	return 0;
+}
+
+static int smblib_set_opt_freq_buck(struct smb_charger *chg, int fsw_khz)
+{
+	union power_supply_propval pval = {0, };
+	int rc = 0;
+
+	rc = smblib_set_charge_param(chg, &chg->param.freq_buck, fsw_khz);
+	if (rc < 0)
+		dev_err(chg->dev, "Error in setting freq_buck rc=%d\n", rc);
+
+	if (chg->mode == PARALLEL_MASTER && chg->pl.psy) {
+		pval.intval = fsw_khz;
+		/*
+		 * Some parallel charging implementations may not have
+		 * PROP_BUCK_FREQ property - they could be running
+		 * with a fixed frequency
+		 */
+		power_supply_set_property(chg->pl.psy,
+				POWER_SUPPLY_PROP_BUCK_FREQ, &pval);
+	}
+
+	return rc;
+}
+
+int smblib_set_charge_param(struct smb_charger *chg,
+			    struct smb_chg_param *param, int val_u)
+{
+	int rc = 0;
+	u8 val_raw;
+
+	if (param->set_proc) {
+		rc = param->set_proc(param, val_u, &val_raw);
+		if (rc < 0)
+			return -EINVAL;
+	} else {
+		if (val_u > param->max_u || val_u < param->min_u) {
+			smblib_err(chg, "%s: %d is out of range [%d, %d]\n",
+				param->name, val_u, param->min_u, param->max_u);
+			return -EINVAL;
+		}
+
+		val_raw = (val_u - param->min_u) / param->step_u;
+	}
+
+	rc = smblib_write(chg, param->reg, val_raw);
+	if (rc < 0) {
+		smblib_err(chg, "%s: Couldn't write 0x%02x to 0x%04x rc=%d\n",
+			param->name, val_raw, param->reg, rc);
+		return rc;
+	}
+
+	smblib_dbg(chg, PR_REGISTER, "%s = %d (0x%02x)\n",
+		   param->name, val_u, val_raw);
+
+	return rc;
+}
+
+int smblib_set_usb_suspend(struct smb_charger *chg, bool suspend)
+{
+	int rc = 0;
+	int irq = chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq;
+
+	if (suspend && irq) {
+		if (chg->usb_icl_change_irq_enabled) {
+			disable_irq_nosync(irq);
+			chg->usb_icl_change_irq_enabled = false;
+		}
+	}
+
+	rc = smblib_masked_write(chg, USBIN_CMD_IL_REG, USBIN_SUSPEND_BIT,
+				 suspend ? USBIN_SUSPEND_BIT : 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't write %s to USBIN_SUSPEND_BIT rc=%d\n",
+			suspend ? "suspend" : "resume", rc);
+
+	if (!suspend && irq) {
+		if (!chg->usb_icl_change_irq_enabled) {
+			enable_irq(irq);
+			chg->usb_icl_change_irq_enabled = true;
+		}
+	}
+
+	return rc;
+}
+
+int smblib_set_dc_suspend(struct smb_charger *chg, bool suspend)
+{
+	int rc = 0;
+
+	rc = smblib_masked_write(chg, DCIN_CMD_IL_REG, DCIN_SUSPEND_BIT,
+				 suspend ? DCIN_SUSPEND_BIT : 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't write %s to DCIN_SUSPEND_BIT rc=%d\n",
+			suspend ? "suspend" : "resume", rc);
+
+	return rc;
+}
+
+static int smblib_set_adapter_allowance(struct smb_charger *chg,
+					u8 allowed_voltage)
+{
+	int rc = 0;
+
+	/* PM660 only support max. 9V */
+	if (chg->smb_version == PM660_SUBTYPE) {
+		switch (allowed_voltage) {
+		case USBIN_ADAPTER_ALLOW_12V:
+		case USBIN_ADAPTER_ALLOW_9V_TO_12V:
+			allowed_voltage = USBIN_ADAPTER_ALLOW_9V;
+			break;
+		case USBIN_ADAPTER_ALLOW_5V_OR_12V:
+		case USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V:
+			allowed_voltage = USBIN_ADAPTER_ALLOW_5V_OR_9V;
+			break;
+		case USBIN_ADAPTER_ALLOW_5V_TO_12V:
+			allowed_voltage = USBIN_ADAPTER_ALLOW_5V_TO_9V;
+			break;
+		}
+	}
+
+	rc = smblib_write(chg, USBIN_ADAPTER_ALLOW_CFG_REG, allowed_voltage);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't write 0x%02x to USBIN_ADAPTER_ALLOW_CFG rc=%d\n",
+			allowed_voltage, rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+#define MICRO_5V	5000000
+#define MICRO_9V	9000000
+#define MICRO_12V	12000000
+static int smblib_set_usb_pd_allowed_voltage(struct smb_charger *chg,
+					int min_allowed_uv, int max_allowed_uv)
+{
+	int rc;
+	u8 allowed_voltage;
+
+	if (min_allowed_uv == MICRO_5V && max_allowed_uv == MICRO_5V) {
+		allowed_voltage = USBIN_ADAPTER_ALLOW_5V;
+		smblib_set_opt_freq_buck(chg, chg->chg_freq.freq_5V);
+	} else if (min_allowed_uv == MICRO_9V && max_allowed_uv == MICRO_9V) {
+		allowed_voltage = USBIN_ADAPTER_ALLOW_9V;
+		smblib_set_opt_freq_buck(chg, chg->chg_freq.freq_9V);
+	} else if (min_allowed_uv == MICRO_12V && max_allowed_uv == MICRO_12V) {
+		allowed_voltage = USBIN_ADAPTER_ALLOW_12V;
+		smblib_set_opt_freq_buck(chg, chg->chg_freq.freq_12V);
+	} else if (min_allowed_uv < MICRO_9V && max_allowed_uv <= MICRO_9V) {
+		allowed_voltage = USBIN_ADAPTER_ALLOW_5V_TO_9V;
+	} else if (min_allowed_uv < MICRO_9V && max_allowed_uv <= MICRO_12V) {
+		allowed_voltage = USBIN_ADAPTER_ALLOW_5V_TO_12V;
+	} else if (min_allowed_uv < MICRO_12V && max_allowed_uv <= MICRO_12V) {
+		allowed_voltage = USBIN_ADAPTER_ALLOW_9V_TO_12V;
+	} else {
+		smblib_err(chg, "invalid allowed voltage [%d, %d]\n",
+			min_allowed_uv, max_allowed_uv);
+		return -EINVAL;
+	}
+
+	rc = smblib_set_adapter_allowance(chg, allowed_voltage);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't configure adapter allowance rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+/********************
+ * HELPER FUNCTIONS *
+ ********************/
+static int smblib_request_dpdm(struct smb_charger *chg, bool enable)
+{
+	int rc = 0;
+
+	/* fetch the DPDM regulator */
+	if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
+				"dpdm-supply", NULL)) {
+		chg->dpdm_reg = devm_regulator_get(chg->dev, "dpdm");
+		if (IS_ERR(chg->dpdm_reg)) {
+			rc = PTR_ERR(chg->dpdm_reg);
+			smblib_err(chg, "Couldn't get dpdm regulator rc=%d\n",
+					rc);
+			chg->dpdm_reg = NULL;
+			return rc;
+		}
+	}
+
+	if (enable) {
+		if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) {
+			smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n");
+			rc = regulator_enable(chg->dpdm_reg);
+			if (rc < 0)
+				smblib_err(chg,
+					"Couldn't enable dpdm regulator rc=%d\n",
+					rc);
+		}
+	} else {
+		if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
+			smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
+			rc = regulator_disable(chg->dpdm_reg);
+			if (rc < 0)
+				smblib_err(chg,
+					"Couldn't disable dpdm regulator rc=%d\n",
+					rc);
+		}
+	}
+
+	return rc;
+}
+
+static void smblib_rerun_apsd(struct smb_charger *chg)
+{
+	int rc;
+
+	smblib_dbg(chg, PR_MISC, "re-running APSD\n");
+	if (chg->wa_flags & QC_AUTH_INTERRUPT_WA_BIT) {
+		rc = smblib_masked_write(chg,
+				USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
+				AUTH_IRQ_EN_CFG_BIT, AUTH_IRQ_EN_CFG_BIT);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't enable HVDCP auth IRQ rc=%d\n",
+									rc);
+	}
+
+	rc = smblib_masked_write(chg, CMD_APSD_REG,
+				APSD_RERUN_BIT, APSD_RERUN_BIT);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't re-run APSD rc=%d\n", rc);
+}
+
+static const struct apsd_result *smblib_update_usb_type(struct smb_charger *chg)
+{
+	const struct apsd_result *apsd_result = smblib_get_apsd_result(chg);
+
+	/* if PD is active, APSD is disabled so won't have a valid result */
+	if (chg->pd_active) {
+		chg->real_charger_type = POWER_SUPPLY_TYPE_USB_PD;
+	} else {
+		/*
+		 * Update real charger type only if its not FLOAT
+		 * detected as as SDP
+		 */
+		if (!(apsd_result->pst == POWER_SUPPLY_TYPE_USB_FLOAT &&
+			chg->real_charger_type == POWER_SUPPLY_TYPE_USB))
+		chg->real_charger_type = apsd_result->pst;
+	}
+
+	smblib_dbg(chg, PR_MISC, "APSD=%s PD=%d\n",
+					apsd_result->name, chg->pd_active);
+	return apsd_result;
+}
+
+static int smblib_notifier_call(struct notifier_block *nb,
+		unsigned long ev, void *v)
+{
+	struct power_supply *psy = v;
+	struct smb_charger *chg = container_of(nb, struct smb_charger, nb);
+
+	if (!strcmp(psy->desc->name, "bms")) {
+		if (!chg->bms_psy)
+			chg->bms_psy = psy;
+		if (ev == PSY_EVENT_PROP_CHANGED)
+			schedule_work(&chg->bms_update_work);
+	}
+
+	if (!chg->pl.psy && !strcmp(psy->desc->name, "parallel"))
+		chg->pl.psy = psy;
+
+	return NOTIFY_OK;
+}
+
+static int smblib_register_notifier(struct smb_charger *chg)
+{
+	int rc;
+
+	chg->nb.notifier_call = smblib_notifier_call;
+	rc = power_supply_reg_notifier(&chg->nb);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't register psy notifier rc = %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+int smblib_mapping_soc_from_field_value(struct smb_chg_param *param,
+					     int val_u, u8 *val_raw)
+{
+	if (val_u > param->max_u || val_u < param->min_u)
+		return -EINVAL;
+
+	*val_raw = val_u << 1;
+
+	return 0;
+}
+
+int smblib_mapping_cc_delta_to_field_value(struct smb_chg_param *param,
+					   u8 val_raw)
+{
+	int val_u  = val_raw * param->step_u + param->min_u;
+
+	if (val_u > param->max_u)
+		val_u -= param->max_u * 2;
+
+	return val_u;
+}
+
+int smblib_mapping_cc_delta_from_field_value(struct smb_chg_param *param,
+					     int val_u, u8 *val_raw)
+{
+	if (val_u > param->max_u || val_u < param->min_u - param->max_u)
+		return -EINVAL;
+
+	val_u += param->max_u * 2 - param->min_u;
+	val_u %= param->max_u * 2;
+	*val_raw = val_u / param->step_u;
+
+	return 0;
+}
+
+static void smblib_uusb_removal(struct smb_charger *chg)
+{
+	int rc;
+	struct smb_irq_data *data;
+	struct storm_watch *wdata;
+
+	cancel_delayed_work_sync(&chg->pl_enable_work);
+
+	rc = smblib_request_dpdm(chg, false);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't to disable DPDM rc=%d\n", rc);
+
+	if (chg->wa_flags & BOOST_BACK_WA) {
+		data = chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data;
+		if (data) {
+			wdata = &data->storm_data;
+			update_storm_count(wdata, WEAK_CHG_STORM_COUNT);
+			vote(chg->usb_icl_votable, BOOST_BACK_VOTER, false, 0);
+			vote(chg->usb_icl_votable, WEAK_CHARGER_VOTER,
+					false, 0);
+		}
+	}
+	vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0);
+	vote(chg->awake_votable, PL_DELAY_VOTER, false, 0);
+
+	/* reset both usbin current and voltage votes */
+	vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
+	vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
+	vote(chg->usb_icl_votable, SW_QC3_VOTER, false, 0);
+	vote(chg->usb_icl_votable, USBIN_USBIN_BOOST_VOTER, false, 0);
+	vote(chg->hvdcp_hw_inov_dis_votable, OV_VOTER, false, 0);
+
+	cancel_delayed_work_sync(&chg->hvdcp_detect_work);
+
+	if (chg->wa_flags & QC_AUTH_INTERRUPT_WA_BIT) {
+		/* re-enable AUTH_IRQ_EN_CFG_BIT */
+		rc = smblib_masked_write(chg,
+				USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
+				AUTH_IRQ_EN_CFG_BIT, AUTH_IRQ_EN_CFG_BIT);
+		if (rc < 0)
+			smblib_err(chg,
+				"Couldn't enable QC auth setting rc=%d\n", rc);
+	}
+
+	/* reconfigure allowed voltage for HVDCP */
+	rc = smblib_set_adapter_allowance(chg,
+			USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't set USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V rc=%d\n",
+			rc);
+
+	chg->voltage_min_uv = MICRO_5V;
+	chg->voltage_max_uv = MICRO_5V;
+	chg->usb_icl_delta_ua = 0;
+	chg->pulse_cnt = 0;
+	chg->uusb_apsd_rerun_done = false;
+
+	/* clear USB ICL vote for USB_PSY_VOTER */
+	rc = vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't un-vote for USB ICL rc=%d\n", rc);
+
+	/* clear USB ICL vote for DCP_VOTER */
+	rc = vote(chg->usb_icl_votable, DCP_VOTER, false, 0);
+	if (rc < 0)
+		smblib_err(chg,
+			"Couldn't un-vote DCP from USB ICL rc=%d\n", rc);
+}
+
+void smblib_suspend_on_debug_battery(struct smb_charger *chg)
+{
+	int rc;
+	union power_supply_propval val;
+
+	if (!chg->suspend_input_on_debug_batt)
+		return;
+
+	rc = power_supply_get_property(chg->bms_psy,
+			POWER_SUPPLY_PROP_DEBUG_BATTERY, &val);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get debug battery prop rc=%d\n", rc);
+		return;
+	}
+
+	vote(chg->usb_icl_votable, DEBUG_BOARD_VOTER, val.intval, 0);
+	vote(chg->dc_suspend_votable, DEBUG_BOARD_VOTER, val.intval, 0);
+	if (val.intval)
+		pr_info("Input suspended: Fake battery\n");
+}
+
+int smblib_rerun_apsd_if_required(struct smb_charger *chg)
+{
+	union power_supply_propval val;
+	int rc;
+
+	rc = smblib_get_prop_usb_present(chg, &val);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get usb present rc = %d\n", rc);
+		return rc;
+	}
+
+	if (!val.intval)
+		return 0;
+
+	rc = smblib_request_dpdm(chg, true);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc);
+
+	chg->uusb_apsd_rerun_done = true;
+	smblib_rerun_apsd(chg);
+
+	return 0;
+}
+
+static int smblib_get_hw_pulse_cnt(struct smb_charger *chg, int *count)
+{
+	int rc;
+	u8 val[2];
+
+	switch (chg->smb_version) {
+	case PMI8998_SUBTYPE:
+		rc = smblib_read(chg, QC_PULSE_COUNT_STATUS_REG, val);
+		if (rc) {
+			pr_err("failed to read QC_PULSE_COUNT_STATUS_REG rc=%d\n",
+					rc);
+			return rc;
+		}
+		*count = val[0] & QC_PULSE_COUNT_MASK;
+		break;
+	case PM660_SUBTYPE:
+		rc = smblib_multibyte_read(chg,
+				QC_PULSE_COUNT_STATUS_1_REG, val, 2);
+		if (rc) {
+			pr_err("failed to read QC_PULSE_COUNT_STATUS_1_REG rc=%d\n",
+					rc);
+			return rc;
+		}
+		*count = (val[1] << 8) | val[0];
+		break;
+	default:
+		smblib_dbg(chg, PR_PARALLEL, "unknown SMB chip %d\n",
+				chg->smb_version);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int smblib_get_pulse_cnt(struct smb_charger *chg, int *count)
+{
+	int rc;
+
+	/* Use software based pulse count if HW INOV is disabled */
+	if (get_effective_result(chg->hvdcp_hw_inov_dis_votable) > 0) {
+		*count = chg->pulse_cnt;
+		return 0;
+	}
+
+	/* Use h/w pulse count if autonomous mode is enabled */
+	rc = smblib_get_hw_pulse_cnt(chg, count);
+	if (rc < 0)
+		smblib_err(chg, "failed to read h/w pulse count rc=%d\n", rc);
+
+	return rc;
+}
+
+#define USBIN_25MA	25000
+#define USBIN_100MA	100000
+#define USBIN_150MA	150000
+#define USBIN_500MA	500000
+#define USBIN_900MA	900000
+
+static int set_sdp_current(struct smb_charger *chg, int icl_ua)
+{
+	int rc;
+	u8 icl_options;
+	const struct apsd_result *apsd_result = smblib_get_apsd_result(chg);
+
+	/* power source is SDP */
+	switch (icl_ua) {
+	case USBIN_100MA:
+		/* USB 2.0 100mA */
+		icl_options = 0;
+		break;
+	case USBIN_150MA:
+		/* USB 3.0 150mA */
+		icl_options = CFG_USB3P0_SEL_BIT;
+		break;
+	case USBIN_500MA:
+		/* USB 2.0 500mA */
+		icl_options = USB51_MODE_BIT;
+		break;
+	case USBIN_900MA:
+		/* USB 3.0 900mA */
+		icl_options = CFG_USB3P0_SEL_BIT | USB51_MODE_BIT;
+		break;
+	default:
+		smblib_err(chg, "ICL %duA isn't supported for SDP\n", icl_ua);
+		return -EINVAL;
+	}
+
+	if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB &&
+		apsd_result->pst == POWER_SUPPLY_TYPE_USB_FLOAT) {
+		/*
+		 * change the float charger configuration to SDP, if this
+		 * is the case of SDP being detected as FLOAT
+		 */
+		rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+			FORCE_FLOAT_SDP_CFG_BIT, FORCE_FLOAT_SDP_CFG_BIT);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't set float ICL options rc=%d\n",
+						rc);
+			return rc;
+		}
+	}
+
+	rc = smblib_masked_write(chg, USBIN_ICL_OPTIONS_REG,
+		CFG_USB3P0_SEL_BIT | USB51_MODE_BIT, icl_options);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set ICL options rc=%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int get_sdp_current(struct smb_charger *chg, int *icl_ua)
+{
+	int rc;
+	u8 icl_options;
+	bool usb3 = false;
+
+	rc = smblib_read(chg, USBIN_ICL_OPTIONS_REG, &icl_options);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get ICL options rc=%d\n", rc);
+		return rc;
+	}
+
+	usb3 = (icl_options & CFG_USB3P0_SEL_BIT);
+
+	if (icl_options & USB51_MODE_BIT)
+		*icl_ua = usb3 ? USBIN_900MA : USBIN_500MA;
+	else
+		*icl_ua = usb3 ? USBIN_150MA : USBIN_100MA;
+
+	return rc;
+}
+
+int smblib_set_icl_current(struct smb_charger *chg, int icl_ua)
+{
+	int rc = 0;
+	bool override;
+
+	/* suspend and return if 25mA or less is requested */
+	if (icl_ua < USBIN_25MA)
+		return smblib_set_usb_suspend(chg, true);
+
+	if (icl_ua == INT_MAX)
+		goto override_suspend_config;
+
+	/* configure current */
+	if (chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT
+		&& (chg->real_charger_type == POWER_SUPPLY_TYPE_USB)) {
+		rc = set_sdp_current(chg, icl_ua);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't set SDP ICL rc=%d\n", rc);
+			goto enable_icl_changed_interrupt;
+		}
+	} else {
+		set_sdp_current(chg, 100000);
+		rc = smblib_set_charge_param(chg, &chg->param.usb_icl, icl_ua);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't set HC ICL rc=%d\n", rc);
+			goto enable_icl_changed_interrupt;
+		}
+	}
+
+override_suspend_config:
+	/* determine if override needs to be enforced */
+	override = true;
+	if (icl_ua == INT_MAX) {
+		/* remove override if no voters - hw defaults is desired */
+		override = false;
+	} else if (chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT) {
+		if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB)
+			/* For std cable with type = SDP never override */
+			override = false;
+		else if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_CDP
+			&& icl_ua == 1500000)
+			/*
+			 * For std cable with type = CDP override only if
+			 * current is not 1500mA
+			 */
+			override = false;
+	}
+
+	/* enforce override */
+	rc = smblib_masked_write(chg, USBIN_ICL_OPTIONS_REG,
+		USBIN_MODE_CHG_BIT, override ? USBIN_MODE_CHG_BIT : 0);
+
+	rc = smblib_icl_override(chg, override);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set ICL override rc=%d\n", rc);
+		goto enable_icl_changed_interrupt;
+	}
+
+	/* unsuspend after configuring current and override */
+	rc = smblib_set_usb_suspend(chg, false);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't resume input rc=%d\n", rc);
+		goto enable_icl_changed_interrupt;
+	}
+
+enable_icl_changed_interrupt:
+	return rc;
+}
+
+int smblib_get_icl_current(struct smb_charger *chg, int *icl_ua)
+{
+	int rc = 0;
+	u8 load_cfg;
+	bool override;
+
+	if ((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT
+		|| chg->micro_usb_mode)
+		&& (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB)) {
+		rc = get_sdp_current(chg, icl_ua);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't get SDP ICL rc=%d\n", rc);
+			return rc;
+		}
+	} else {
+		rc = smblib_read(chg, USBIN_LOAD_CFG_REG, &load_cfg);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't get load cfg rc=%d\n", rc);
+			return rc;
+		}
+		override = load_cfg & ICL_OVERRIDE_AFTER_APSD_BIT;
+		if (!override)
+			return INT_MAX;
+
+		/* override is set */
+		rc = smblib_get_charge_param(chg, &chg->param.usb_icl, icl_ua);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't get HC ICL rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+/*********************
+ * VOTABLE CALLBACKS *
+ *********************/
+
+static int smblib_dc_suspend_vote_callback(struct votable *votable, void *data,
+			int suspend, const char *client)
+{
+	struct smb_charger *chg = data;
+
+	/* resume input if suspend is invalid */
+	if (suspend < 0)
+		suspend = 0;
+
+	return smblib_set_dc_suspend(chg, (bool)suspend);
+}
+
+static int smblib_dc_icl_vote_callback(struct votable *votable, void *data,
+			int icl_ua, const char *client)
+{
+	struct smb_charger *chg = data;
+	int rc = 0;
+	bool suspend;
+
+	if (icl_ua < 0) {
+		smblib_dbg(chg, PR_MISC, "No Voter hence suspending\n");
+		icl_ua = 0;
+	}
+
+	suspend = (icl_ua < USBIN_25MA);
+	if (suspend)
+		goto suspend;
+
+	rc = smblib_set_charge_param(chg, &chg->param.dc_icl, icl_ua);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set DC input current limit rc=%d\n",
+			rc);
+		return rc;
+	}
+
+suspend:
+	rc = vote(chg->dc_suspend_votable, USER_VOTER, suspend, 0);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't vote to %s DC rc=%d\n",
+			suspend ? "suspend" : "resume", rc);
+		return rc;
+	}
+	return rc;
+}
+
+static int smblib_pd_disallowed_votable_indirect_callback(
+	struct votable *votable, void *data, int disallowed, const char *client)
+{
+	struct smb_charger *chg = data;
+	int rc;
+
+	rc = vote(chg->pd_allowed_votable, PD_DISALLOWED_INDIRECT_VOTER,
+		!disallowed, 0);
+
+	return rc;
+}
+
+static int smblib_awake_vote_callback(struct votable *votable, void *data,
+			int awake, const char *client)
+{
+	struct smb_charger *chg = data;
+
+	if (awake)
+		pm_stay_awake(chg->dev);
+	else
+		pm_relax(chg->dev);
+
+	return 0;
+}
+
+static int smblib_chg_disable_vote_callback(struct votable *votable, void *data,
+			int chg_disable, const char *client)
+{
+	struct smb_charger *chg = data;
+	int rc;
+
+	rc = smblib_masked_write(chg, CHARGING_ENABLE_CMD_REG,
+				 CHARGING_ENABLE_CMD_BIT,
+				 chg_disable ? 0 : CHARGING_ENABLE_CMD_BIT);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't %s charging rc=%d\n",
+			chg_disable ? "disable" : "enable", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int smblib_hvdcp_enable_vote_callback(struct votable *votable,
+			void *data,
+			int hvdcp_enable, const char *client)
+{
+	struct smb_charger *chg = data;
+	int rc;
+	u8 val = HVDCP_AUTH_ALG_EN_CFG_BIT | HVDCP_EN_BIT;
+	u8 stat;
+
+	/* vote to enable/disable HW autonomous INOV */
+	vote(chg->hvdcp_hw_inov_dis_votable, client, !hvdcp_enable, 0);
+
+	/*
+	 * Disable the autonomous bit and auth bit for disabling hvdcp.
+	 * This ensures only qc 2.0 detection runs but no vbus
+	 * negotiation happens.
+	 */
+	if (!hvdcp_enable)
+		val = HVDCP_EN_BIT;
+
+	rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+				 HVDCP_EN_BIT | HVDCP_AUTH_ALG_EN_CFG_BIT,
+				 val);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't %s hvdcp rc=%d\n",
+			hvdcp_enable ? "enable" : "disable", rc);
+		return rc;
+	}
+
+	rc = smblib_read(chg, APSD_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read APSD status rc=%d\n", rc);
+		return rc;
+	}
+
+	/* re-run APSD if HVDCP was detected */
+	if (stat & QC_CHARGER_BIT)
+		smblib_rerun_apsd(chg);
+
+	return 0;
+}
+
+static int smblib_hvdcp_disable_indirect_vote_callback(struct votable *votable,
+			void *data, int hvdcp_disable, const char *client)
+{
+	struct smb_charger *chg = data;
+
+	vote(chg->hvdcp_enable_votable, HVDCP_INDIRECT_VOTER,
+			!hvdcp_disable, 0);
+
+	return 0;
+}
+
+static int smblib_apsd_disable_vote_callback(struct votable *votable,
+			void *data,
+			int apsd_disable, const char *client)
+{
+	struct smb_charger *chg = data;
+	int rc;
+
+	if (apsd_disable) {
+		rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+							AUTO_SRC_DETECT_BIT,
+							0);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't disable APSD rc=%d\n", rc);
+			return rc;
+		}
+	} else {
+		rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+							AUTO_SRC_DETECT_BIT,
+							AUTO_SRC_DETECT_BIT);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't enable APSD rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int smblib_hvdcp_hw_inov_dis_vote_callback(struct votable *votable,
+				void *data, int disable, const char *client)
+{
+	struct smb_charger *chg = data;
+	int rc;
+
+	if (disable) {
+		/*
+		 * the pulse count register get zeroed when autonomous mode is
+		 * disabled. Track that in variables before disabling
+		 */
+		rc = smblib_get_hw_pulse_cnt(chg, &chg->pulse_cnt);
+		if (rc < 0) {
+			pr_err("failed to read QC_PULSE_COUNT_STATUS_REG rc=%d\n",
+					rc);
+			return rc;
+		}
+	}
+
+	rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+			HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT,
+			disable ? 0 : HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't %s hvdcp rc=%d\n",
+				disable ? "disable" : "enable", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int smblib_usb_irq_enable_vote_callback(struct votable *votable,
+				void *data, int enable, const char *client)
+{
+	struct smb_charger *chg = data;
+
+	if (!chg->irq_info[INPUT_CURRENT_LIMIT_IRQ].irq ||
+				!chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq)
+		return 0;
+
+	if (enable) {
+		enable_irq(chg->irq_info[INPUT_CURRENT_LIMIT_IRQ].irq);
+		enable_irq(chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq);
+	} else {
+		disable_irq(chg->irq_info[INPUT_CURRENT_LIMIT_IRQ].irq);
+		disable_irq(chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq);
+	}
+
+	return 0;
+}
+
+static int smblib_typec_irq_disable_vote_callback(struct votable *votable,
+				void *data, int disable, const char *client)
+{
+	struct smb_charger *chg = data;
+
+	if (!chg->irq_info[TYPE_C_CHANGE_IRQ].irq)
+		return 0;
+
+	if (disable)
+		disable_irq_nosync(chg->irq_info[TYPE_C_CHANGE_IRQ].irq);
+	else
+		enable_irq(chg->irq_info[TYPE_C_CHANGE_IRQ].irq);
+
+	return 0;
+}
+
+/*******************
+ * VCONN REGULATOR *
+ * *****************/
+
+#define MAX_OTG_SS_TRIES 2
+static int _smblib_vconn_regulator_enable(struct regulator_dev *rdev)
+{
+	struct smb_charger *chg = rdev_get_drvdata(rdev);
+	int rc = 0;
+	u8 val;
+
+	/*
+	 * When enabling VCONN using the command register the CC pin must be
+	 * selected. VCONN should be supplied to the inactive CC pin hence using
+	 * the opposite of the CC_ORIENTATION_BIT.
+	 */
+	smblib_dbg(chg, PR_OTG, "enabling VCONN\n");
+	val = chg->typec_status[3] &
+			CC_ORIENTATION_BIT ? 0 : VCONN_EN_ORIENTATION_BIT;
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 VCONN_EN_VALUE_BIT | VCONN_EN_ORIENTATION_BIT,
+				 VCONN_EN_VALUE_BIT | val);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't enable vconn setting rc=%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+int smblib_vconn_regulator_enable(struct regulator_dev *rdev)
+{
+	struct smb_charger *chg = rdev_get_drvdata(rdev);
+	int rc = 0;
+
+	mutex_lock(&chg->vconn_oc_lock);
+	if (chg->vconn_en)
+		goto unlock;
+
+	rc = _smblib_vconn_regulator_enable(rdev);
+	if (rc >= 0)
+		chg->vconn_en = true;
+
+unlock:
+	mutex_unlock(&chg->vconn_oc_lock);
+	return rc;
+}
+
+static int _smblib_vconn_regulator_disable(struct regulator_dev *rdev)
+{
+	struct smb_charger *chg = rdev_get_drvdata(rdev);
+	int rc = 0;
+
+	smblib_dbg(chg, PR_OTG, "disabling VCONN\n");
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 VCONN_EN_VALUE_BIT, 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't disable vconn regulator rc=%d\n", rc);
+
+	return rc;
+}
+
+int smblib_vconn_regulator_disable(struct regulator_dev *rdev)
+{
+	struct smb_charger *chg = rdev_get_drvdata(rdev);
+	int rc = 0;
+
+	mutex_lock(&chg->vconn_oc_lock);
+	if (!chg->vconn_en)
+		goto unlock;
+
+	rc = _smblib_vconn_regulator_disable(rdev);
+	if (rc >= 0)
+		chg->vconn_en = false;
+
+unlock:
+	mutex_unlock(&chg->vconn_oc_lock);
+	return rc;
+}
+
+int smblib_vconn_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct smb_charger *chg = rdev_get_drvdata(rdev);
+	int ret;
+
+	mutex_lock(&chg->vconn_oc_lock);
+	ret = chg->vconn_en;
+	mutex_unlock(&chg->vconn_oc_lock);
+	return ret;
+}
+
+/*****************
+ * OTG REGULATOR *
+ *****************/
+#define MAX_RETRY		15
+#define MIN_DELAY_US		2000
+#define MAX_DELAY_US		9000
+static int otg_current[] = {250000, 500000, 1000000, 1500000};
+static int smblib_enable_otg_wa(struct smb_charger *chg)
+{
+	u8 stat;
+	int rc, i, retry_count = 0, min_delay = MIN_DELAY_US;
+
+	for (i = 0; i < ARRAY_SIZE(otg_current); i++) {
+		smblib_dbg(chg, PR_OTG, "enabling OTG with %duA\n",
+						otg_current[i]);
+		rc = smblib_set_charge_param(chg, &chg->param.otg_cl,
+						otg_current[i]);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't set otg limit rc=%d\n", rc);
+			return rc;
+		}
+
+		rc = smblib_write(chg, CMD_OTG_REG, OTG_EN_BIT);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't enable OTG rc=%d\n", rc);
+			return rc;
+		}
+
+		retry_count = 0;
+		min_delay = MIN_DELAY_US;
+		do {
+			usleep_range(min_delay, min_delay + 100);
+			rc = smblib_read(chg, OTG_STATUS_REG, &stat);
+			if (rc < 0) {
+				smblib_err(chg, "Couldn't read OTG status rc=%d\n",
+							rc);
+				goto out;
+			}
+
+			if (stat & BOOST_SOFTSTART_DONE_BIT) {
+				rc = smblib_set_charge_param(chg,
+					&chg->param.otg_cl, chg->otg_cl_ua);
+				if (rc < 0) {
+					smblib_err(chg, "Couldn't set otg limit rc=%d\n",
+							rc);
+					goto out;
+				}
+				break;
+			}
+			/* increase the delay for following iterations */
+			if (retry_count > 5)
+				min_delay = MAX_DELAY_US;
+
+		} while (retry_count++ < MAX_RETRY);
+
+		if (retry_count >= MAX_RETRY) {
+			smblib_dbg(chg, PR_OTG, "OTG enable failed with %duA\n",
+								otg_current[i]);
+			rc = smblib_write(chg, CMD_OTG_REG, 0);
+			if (rc < 0) {
+				smblib_err(chg, "disable OTG rc=%d\n", rc);
+				goto out;
+			}
+		} else {
+			smblib_dbg(chg, PR_OTG, "OTG enabled\n");
+			return 0;
+		}
+	}
+
+	if (i == ARRAY_SIZE(otg_current)) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	return 0;
+out:
+	smblib_write(chg, CMD_OTG_REG, 0);
+	return rc;
+}
+
+static int _smblib_vbus_regulator_enable(struct regulator_dev *rdev)
+{
+	struct smb_charger *chg = rdev_get_drvdata(rdev);
+	int rc;
+
+	smblib_dbg(chg, PR_OTG, "halt 1 in 8 mode\n");
+	rc = smblib_masked_write(chg, OTG_ENG_OTG_CFG_REG,
+				 ENG_BUCKBOOST_HALT1_8_MODE_BIT,
+				 ENG_BUCKBOOST_HALT1_8_MODE_BIT);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set OTG_ENG_OTG_CFG_REG rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	smblib_dbg(chg, PR_OTG, "enabling OTG\n");
+
+	if (chg->wa_flags & OTG_WA) {
+		rc = smblib_enable_otg_wa(chg);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't enable OTG rc=%d\n", rc);
+	} else {
+		rc = smblib_write(chg, CMD_OTG_REG, OTG_EN_BIT);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't enable OTG rc=%d\n", rc);
+	}
+
+	return rc;
+}
+
+int smblib_vbus_regulator_enable(struct regulator_dev *rdev)
+{
+	struct smb_charger *chg = rdev_get_drvdata(rdev);
+	int rc = 0;
+
+	mutex_lock(&chg->otg_oc_lock);
+	if (chg->otg_en)
+		goto unlock;
+
+	if (!chg->usb_icl_votable) {
+		chg->usb_icl_votable = find_votable("USB_ICL");
+
+		if (!chg->usb_icl_votable)
+			return -EINVAL;
+	}
+	vote(chg->usb_icl_votable, USBIN_USBIN_BOOST_VOTER, true, 0);
+
+	rc = _smblib_vbus_regulator_enable(rdev);
+	if (rc >= 0)
+		chg->otg_en = true;
+	else
+		vote(chg->usb_icl_votable, USBIN_USBIN_BOOST_VOTER, false, 0);
+
+unlock:
+	mutex_unlock(&chg->otg_oc_lock);
+	return rc;
+}
+
+static int _smblib_vbus_regulator_disable(struct regulator_dev *rdev)
+{
+	struct smb_charger *chg = rdev_get_drvdata(rdev);
+	int rc;
+
+	if (chg->wa_flags & OTG_WA) {
+		/* set OTG current limit to minimum value */
+		rc = smblib_set_charge_param(chg, &chg->param.otg_cl,
+						chg->param.otg_cl.min_u);
+		if (rc < 0) {
+			smblib_err(chg,
+				"Couldn't set otg current limit rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	smblib_dbg(chg, PR_OTG, "disabling OTG\n");
+	rc = smblib_write(chg, CMD_OTG_REG, 0);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't disable OTG regulator rc=%d\n", rc);
+		return rc;
+	}
+
+	smblib_dbg(chg, PR_OTG, "start 1 in 8 mode\n");
+	rc = smblib_masked_write(chg, OTG_ENG_OTG_CFG_REG,
+				 ENG_BUCKBOOST_HALT1_8_MODE_BIT, 0);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set OTG_ENG_OTG_CFG_REG rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+int smblib_vbus_regulator_disable(struct regulator_dev *rdev)
+{
+	struct smb_charger *chg = rdev_get_drvdata(rdev);
+	int rc = 0;
+
+	mutex_lock(&chg->otg_oc_lock);
+	if (!chg->otg_en)
+		goto unlock;
+
+	rc = _smblib_vbus_regulator_disable(rdev);
+	if (rc >= 0)
+		chg->otg_en = false;
+
+	if (chg->usb_icl_votable)
+		vote(chg->usb_icl_votable, USBIN_USBIN_BOOST_VOTER, false, 0);
+unlock:
+	mutex_unlock(&chg->otg_oc_lock);
+	return rc;
+}
+
+int smblib_vbus_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct smb_charger *chg = rdev_get_drvdata(rdev);
+	int ret;
+
+	mutex_lock(&chg->otg_oc_lock);
+	ret = chg->otg_en;
+	mutex_unlock(&chg->otg_oc_lock);
+	return ret;
+}
+
+/********************
+ * BATT PSY GETTERS *
+ ********************/
+
+int smblib_get_prop_input_suspend(struct smb_charger *chg,
+				  union power_supply_propval *val)
+{
+	val->intval
+		= (get_client_vote(chg->usb_icl_votable, USER_VOTER) == 0)
+		 && get_client_vote(chg->dc_suspend_votable, USER_VOTER);
+	return 0;
+}
+
+int smblib_get_prop_batt_present(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, BATIF_BASE + INT_RT_STS_OFFSET, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATIF_INT_RT_STS rc=%d\n", rc);
+		return rc;
+	}
+
+	val->intval = !(stat & (BAT_THERM_OR_ID_MISSING_RT_STS_BIT
+					| BAT_TERMINAL_MISSING_RT_STS_BIT));
+
+	return rc;
+}
+
+int smblib_get_prop_batt_capacity(struct smb_charger *chg,
+				  union power_supply_propval *val)
+{
+	int rc = -EINVAL;
+
+	if (chg->fake_capacity >= 0) {
+		val->intval = chg->fake_capacity;
+		return 0;
+	}
+
+	if (chg->bms_psy)
+		rc = power_supply_get_property(chg->bms_psy,
+				POWER_SUPPLY_PROP_CAPACITY, val);
+	return rc;
+}
+
+int smblib_get_prop_batt_status(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	union power_supply_propval pval = {0, };
+	bool usb_online, dc_online, qnovo_en;
+	u8 stat, pt_en_cmd;
+	int rc;
+
+	rc = smblib_get_prop_usb_online(chg, &pval);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get usb online property rc=%d\n",
+			rc);
+		return rc;
+	}
+	usb_online = (bool)pval.intval;
+
+	rc = smblib_get_prop_dc_online(chg, &pval);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get dc online property rc=%d\n",
+			rc);
+		return rc;
+	}
+	dc_online = (bool)pval.intval;
+
+	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+			rc);
+		return rc;
+	}
+	stat = stat & BATTERY_CHARGER_STATUS_MASK;
+
+	if (!usb_online && !dc_online) {
+		switch (stat) {
+		case TERMINATE_CHARGE:
+		case INHIBIT_CHARGE:
+			val->intval = POWER_SUPPLY_STATUS_FULL;
+			break;
+		default:
+			val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+			break;
+		}
+		return rc;
+	}
+
+	switch (stat) {
+	case TRICKLE_CHARGE:
+	case PRE_CHARGE:
+	case FAST_CHARGE:
+	case FULLON_CHARGE:
+	case TAPER_CHARGE:
+		val->intval = POWER_SUPPLY_STATUS_CHARGING;
+		break;
+	case TERMINATE_CHARGE:
+	case INHIBIT_CHARGE:
+		val->intval = POWER_SUPPLY_STATUS_FULL;
+		break;
+	case DISABLE_CHARGE:
+		val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+		break;
+	default:
+		val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+		break;
+	}
+
+	if (val->intval != POWER_SUPPLY_STATUS_CHARGING)
+		return 0;
+
+	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_7_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_2 rc=%d\n",
+				rc);
+			return rc;
+	}
+
+	stat &= ENABLE_TRICKLE_BIT | ENABLE_PRE_CHARGING_BIT |
+		 ENABLE_FAST_CHARGING_BIT | ENABLE_FULLON_MODE_BIT;
+
+	rc = smblib_read(chg, QNOVO_PT_ENABLE_CMD_REG, &pt_en_cmd);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read QNOVO_PT_ENABLE_CMD_REG rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	qnovo_en = (bool)(pt_en_cmd & QNOVO_PT_ENABLE_CMD_BIT);
+
+	/* ignore stat7 when qnovo is enabled */
+	if (!qnovo_en && !stat)
+		val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+
+	return 0;
+}
+
+int smblib_get_prop_batt_charge_type(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	switch (stat & BATTERY_CHARGER_STATUS_MASK) {
+	case TRICKLE_CHARGE:
+	case PRE_CHARGE:
+		val->intval = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+		break;
+	case FAST_CHARGE:
+	case FULLON_CHARGE:
+		val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST;
+		break;
+	case TAPER_CHARGE:
+		val->intval = POWER_SUPPLY_CHARGE_TYPE_TAPER;
+		break;
+	default:
+		val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE;
+	}
+
+	return rc;
+}
+
+int smblib_get_prop_batt_health(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	union power_supply_propval pval;
+	int rc;
+	int effective_fv_uv;
+	u8 stat;
+
+	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_2_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_2 rc=%d\n",
+			rc);
+		return rc;
+	}
+	smblib_dbg(chg, PR_REGISTER, "BATTERY_CHARGER_STATUS_2 = 0x%02x\n",
+		   stat);
+
+	if (stat & CHARGER_ERROR_STATUS_BAT_OV_BIT) {
+		rc = smblib_get_prop_batt_voltage_now(chg, &pval);
+		if (!rc) {
+			/*
+			 * If Vbatt is within 40mV above Vfloat, then don't
+			 * treat it as overvoltage.
+			 */
+			effective_fv_uv = get_effective_result(chg->fv_votable);
+			if (pval.intval >= effective_fv_uv + 40000) {
+				val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+				smblib_err(chg, "battery over-voltage vbat_fg = %duV, fv = %duV\n",
+						pval.intval, effective_fv_uv);
+				goto done;
+			}
+		}
+	}
+
+	if (stat & BAT_TEMP_STATUS_TOO_COLD_BIT)
+		val->intval = POWER_SUPPLY_HEALTH_COLD;
+	else if (stat & BAT_TEMP_STATUS_TOO_HOT_BIT)
+		val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+	else if (stat & BAT_TEMP_STATUS_COLD_SOFT_LIMIT_BIT)
+		val->intval = POWER_SUPPLY_HEALTH_COOL;
+	else if (stat & BAT_TEMP_STATUS_HOT_SOFT_LIMIT_BIT)
+		val->intval = POWER_SUPPLY_HEALTH_WARM;
+	else
+		val->intval = POWER_SUPPLY_HEALTH_GOOD;
+
+done:
+	return rc;
+}
+
+int smblib_get_prop_system_temp_level(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	val->intval = chg->system_temp_level;
+	return 0;
+}
+
+int smblib_get_prop_input_current_limited(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	u8 stat;
+	int rc;
+
+	if (chg->fake_input_current_limited >= 0) {
+		val->intval = chg->fake_input_current_limited;
+		return 0;
+	}
+
+	rc = smblib_read(chg, AICL_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read AICL_STATUS rc=%d\n", rc);
+		return rc;
+	}
+	val->intval = (stat & SOFT_ILIMIT_BIT) || chg->is_hdc;
+	return 0;
+}
+
+int smblib_get_prop_batt_voltage_now(struct smb_charger *chg,
+				     union power_supply_propval *val)
+{
+	int rc;
+
+	if (!chg->bms_psy)
+		return -EINVAL;
+
+	rc = power_supply_get_property(chg->bms_psy,
+				       POWER_SUPPLY_PROP_VOLTAGE_NOW, val);
+	return rc;
+}
+
+int smblib_get_prop_batt_current_now(struct smb_charger *chg,
+				     union power_supply_propval *val)
+{
+	int rc;
+
+	if (!chg->bms_psy)
+		return -EINVAL;
+
+	rc = power_supply_get_property(chg->bms_psy,
+				       POWER_SUPPLY_PROP_CURRENT_NOW, val);
+	return rc;
+}
+
+int smblib_get_prop_batt_temp(struct smb_charger *chg,
+			      union power_supply_propval *val)
+{
+	int rc;
+
+	if (!chg->bms_psy)
+		return -EINVAL;
+
+	rc = power_supply_get_property(chg->bms_psy,
+				       POWER_SUPPLY_PROP_TEMP, val);
+	return rc;
+}
+
+int smblib_get_prop_batt_charge_done(struct smb_charger *chg,
+					union power_supply_propval *val)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	stat = stat & BATTERY_CHARGER_STATUS_MASK;
+	val->intval = (stat == TERMINATE_CHARGE);
+	return 0;
+}
+
+int smblib_get_prop_charge_qnovo_enable(struct smb_charger *chg,
+				  union power_supply_propval *val)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, QNOVO_PT_ENABLE_CMD_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read QNOVO_PT_ENABLE_CMD rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	val->intval = (bool)(stat & QNOVO_PT_ENABLE_CMD_BIT);
+	return 0;
+}
+
+int smblib_get_prop_batt_charge_counter(struct smb_charger *chg,
+				     union power_supply_propval *val)
+{
+	int rc;
+
+	if (!chg->bms_psy)
+		return -EINVAL;
+
+	rc = power_supply_get_property(chg->bms_psy,
+				       POWER_SUPPLY_PROP_CHARGE_COUNTER, val);
+	return rc;
+}
+
+/***********************
+ * BATTERY PSY SETTERS *
+ ***********************/
+
+int smblib_set_prop_input_suspend(struct smb_charger *chg,
+				  const union power_supply_propval *val)
+{
+	int rc;
+
+	/* vote 0mA when suspended */
+	rc = vote(chg->usb_icl_votable, USER_VOTER, (bool)val->intval, 0);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't vote to %s USB rc=%d\n",
+			(bool)val->intval ? "suspend" : "resume", rc);
+		return rc;
+	}
+
+	rc = vote(chg->dc_suspend_votable, USER_VOTER, (bool)val->intval, 0);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't vote to %s DC rc=%d\n",
+			(bool)val->intval ? "suspend" : "resume", rc);
+		return rc;
+	}
+
+	power_supply_changed(chg->batt_psy);
+	return rc;
+}
+
+int smblib_set_prop_batt_capacity(struct smb_charger *chg,
+				  const union power_supply_propval *val)
+{
+	chg->fake_capacity = val->intval;
+
+	power_supply_changed(chg->batt_psy);
+
+	return 0;
+}
+
+int smblib_set_prop_system_temp_level(struct smb_charger *chg,
+				const union power_supply_propval *val)
+{
+	if (val->intval < 0)
+		return -EINVAL;
+
+	if (chg->thermal_levels <= 0)
+		return -EINVAL;
+
+	if (val->intval > chg->thermal_levels)
+		return -EINVAL;
+
+	chg->system_temp_level = val->intval;
+	/* disable parallel charge in case of system temp level */
+	vote(chg->pl_disable_votable, THERMAL_DAEMON_VOTER,
+			chg->system_temp_level ? true : false, 0);
+
+	if (chg->system_temp_level == chg->thermal_levels)
+		return vote(chg->chg_disable_votable,
+			THERMAL_DAEMON_VOTER, true, 0);
+
+	vote(chg->chg_disable_votable, THERMAL_DAEMON_VOTER, false, 0);
+	if (chg->system_temp_level == 0)
+		return vote(chg->fcc_votable, THERMAL_DAEMON_VOTER, false, 0);
+
+	vote(chg->fcc_votable, THERMAL_DAEMON_VOTER, true,
+			chg->thermal_mitigation[chg->system_temp_level]);
+	return 0;
+}
+
+int smblib_set_prop_charge_qnovo_enable(struct smb_charger *chg,
+				  const union power_supply_propval *val)
+{
+	int rc = 0;
+
+	rc = smblib_masked_write(chg, QNOVO_PT_ENABLE_CMD_REG,
+			QNOVO_PT_ENABLE_CMD_BIT,
+			val->intval ? QNOVO_PT_ENABLE_CMD_BIT : 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't enable qnovo rc=%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+int smblib_set_prop_input_current_limited(struct smb_charger *chg,
+				const union power_supply_propval *val)
+{
+	chg->fake_input_current_limited = val->intval;
+	return 0;
+}
+
+int smblib_rerun_aicl(struct smb_charger *chg)
+{
+	int rc, settled_icl_ua;
+	u8 stat;
+
+	rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read POWER_PATH_STATUS rc=%d\n",
+								rc);
+		return rc;
+	}
+
+	/* USB is suspended so skip re-running AICL */
+	if (stat & USBIN_SUSPEND_STS_BIT)
+		return rc;
+
+	smblib_dbg(chg, PR_MISC, "re-running AICL\n");
+	rc = smblib_get_charge_param(chg, &chg->param.icl_stat,
+			&settled_icl_ua);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get settled ICL rc=%d\n", rc);
+		return rc;
+	}
+
+	vote(chg->usb_icl_votable, AICL_RERUN_VOTER, true,
+			max(settled_icl_ua - chg->param.usb_icl.step_u,
+				chg->param.usb_icl.step_u));
+	vote(chg->usb_icl_votable, AICL_RERUN_VOTER, false, 0);
+
+	return 0;
+}
+
+static int smblib_dp_pulse(struct smb_charger *chg)
+{
+	int rc;
+
+	/* QC 3.0 increment */
+	rc = smblib_masked_write(chg, CMD_HVDCP_2_REG, SINGLE_INCREMENT_BIT,
+			SINGLE_INCREMENT_BIT);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't write to CMD_HVDCP_2_REG rc=%d\n",
+				rc);
+
+	return rc;
+}
+
+static int smblib_dm_pulse(struct smb_charger *chg)
+{
+	int rc;
+
+	/* QC 3.0 decrement */
+	rc = smblib_masked_write(chg, CMD_HVDCP_2_REG, SINGLE_DECREMENT_BIT,
+			SINGLE_DECREMENT_BIT);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't write to CMD_HVDCP_2_REG rc=%d\n",
+				rc);
+
+	return rc;
+}
+
+static int smblib_force_vbus_voltage(struct smb_charger *chg, u8 val)
+{
+	int rc;
+
+	rc = smblib_masked_write(chg, CMD_HVDCP_2_REG, val, val);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't write to CMD_HVDCP_2_REG rc=%d\n",
+				rc);
+
+	return rc;
+}
+
+int smblib_dp_dm(struct smb_charger *chg, int val)
+{
+	int target_icl_ua, rc = 0;
+	union power_supply_propval pval;
+
+	switch (val) {
+	case POWER_SUPPLY_DP_DM_DP_PULSE:
+		rc = smblib_dp_pulse(chg);
+		if (!rc)
+			chg->pulse_cnt++;
+		smblib_dbg(chg, PR_PARALLEL, "DP_DM_DP_PULSE rc=%d cnt=%d\n",
+				rc, chg->pulse_cnt);
+		break;
+	case POWER_SUPPLY_DP_DM_DM_PULSE:
+		rc = smblib_dm_pulse(chg);
+		if (!rc && chg->pulse_cnt)
+			chg->pulse_cnt--;
+		smblib_dbg(chg, PR_PARALLEL, "DP_DM_DM_PULSE rc=%d cnt=%d\n",
+				rc, chg->pulse_cnt);
+		break;
+	case POWER_SUPPLY_DP_DM_ICL_DOWN:
+		target_icl_ua = get_effective_result(chg->usb_icl_votable);
+		if (target_icl_ua < 0) {
+			/* no client vote, get the ICL from charger */
+			rc = power_supply_get_property(chg->usb_psy,
+					POWER_SUPPLY_PROP_HW_CURRENT_MAX,
+					&pval);
+			if (rc < 0) {
+				smblib_err(chg,
+					"Couldn't get max current rc=%d\n",
+					rc);
+				return rc;
+			}
+			target_icl_ua = pval.intval;
+		}
+
+		/*
+		 * Check if any other voter voted on USB_ICL in case of
+		 * voter other than SW_QC3_VOTER reset and restart reduction
+		 * again.
+		 */
+		if (target_icl_ua != get_client_vote(chg->usb_icl_votable,
+							SW_QC3_VOTER))
+			chg->usb_icl_delta_ua = 0;
+
+		chg->usb_icl_delta_ua += 100000;
+		vote(chg->usb_icl_votable, SW_QC3_VOTER, true,
+						target_icl_ua - 100000);
+		smblib_dbg(chg, PR_PARALLEL, "ICL DOWN ICL=%d reduction=%d\n",
+				target_icl_ua, chg->usb_icl_delta_ua);
+		break;
+	case POWER_SUPPLY_DP_DM_FORCE_5V:
+		rc = smblib_force_vbus_voltage(chg, FORCE_5V_BIT);
+		if (rc < 0)
+			pr_err("Failed to force 5V\n");
+		break;
+	case POWER_SUPPLY_DP_DM_FORCE_9V:
+		rc = smblib_force_vbus_voltage(chg, FORCE_9V_BIT);
+		if (rc < 0)
+			pr_err("Failed to force 9V\n");
+		break;
+	case POWER_SUPPLY_DP_DM_FORCE_12V:
+		rc = smblib_force_vbus_voltage(chg, FORCE_12V_BIT);
+		if (rc < 0)
+			pr_err("Failed to force 12V\n");
+		break;
+	case POWER_SUPPLY_DP_DM_ICL_UP:
+	default:
+		break;
+	}
+
+	return rc;
+}
+
+int smblib_disable_hw_jeita(struct smb_charger *chg, bool disable)
+{
+	int rc;
+	u8 mask;
+
+	/*
+	 * Disable h/w base JEITA compensation if s/w JEITA is enabled
+	 */
+	mask = JEITA_EN_COLD_SL_FCV_BIT
+		| JEITA_EN_HOT_SL_FCV_BIT
+		| JEITA_EN_HOT_SL_CCC_BIT
+		| JEITA_EN_COLD_SL_CCC_BIT,
+	rc = smblib_masked_write(chg, JEITA_EN_CFG_REG, mask,
+			disable ? 0 : mask);
+	if (rc < 0) {
+		dev_err(chg->dev,
+			"Couldn't configure s/w jeita rc=%d\n",
+			rc);
+		return rc;
+	}
+	return 0;
+}
+
+/*******************
+ * DC PSY GETTERS *
+ *******************/
+
+int smblib_get_prop_dc_present(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, DCIN_BASE + INT_RT_STS_OFFSET, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read DCIN_RT_STS rc=%d\n", rc);
+		return rc;
+	}
+
+	val->intval = (bool)(stat & DCIN_PLUGIN_RT_STS_BIT);
+	return 0;
+}
+
+int smblib_get_prop_dc_online(struct smb_charger *chg,
+			       union power_supply_propval *val)
+{
+	int rc = 0;
+	u8 stat;
+
+	if (get_client_vote(chg->dc_suspend_votable, USER_VOTER)) {
+		val->intval = false;
+		return rc;
+	}
+
+	rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read POWER_PATH_STATUS rc=%d\n",
+			rc);
+		return rc;
+	}
+	smblib_dbg(chg, PR_REGISTER, "POWER_PATH_STATUS = 0x%02x\n",
+		   stat);
+
+	val->intval = (stat & USE_DCIN_BIT) &&
+		      (stat & VALID_INPUT_POWER_SOURCE_STS_BIT);
+
+	return rc;
+}
+
+int smblib_get_prop_dc_current_max(struct smb_charger *chg,
+				    union power_supply_propval *val)
+{
+	val->intval = get_effective_result_locked(chg->dc_icl_votable);
+	return 0;
+}
+
+/*******************
+ * DC PSY SETTERS *
+ * *****************/
+
+int smblib_set_prop_dc_current_max(struct smb_charger *chg,
+				    const union power_supply_propval *val)
+{
+	int rc;
+
+	rc = vote(chg->dc_icl_votable, USER_VOTER, true, val->intval);
+	return rc;
+}
+
+/*******************
+ * USB PSY GETTERS *
+ *******************/
+
+int smblib_get_prop_usb_present(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, USBIN_BASE + INT_RT_STS_OFFSET, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read USBIN_RT_STS rc=%d\n", rc);
+		return rc;
+	}
+
+	val->intval = (bool)(stat & USBIN_PLUGIN_RT_STS_BIT);
+	return 0;
+}
+
+int smblib_get_prop_usb_online(struct smb_charger *chg,
+			       union power_supply_propval *val)
+{
+	int rc = 0;
+	u8 stat;
+
+	if (get_client_vote_locked(chg->usb_icl_votable, USER_VOTER) == 0) {
+		val->intval = false;
+		return rc;
+	}
+
+	rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read POWER_PATH_STATUS rc=%d\n",
+			rc);
+		return rc;
+	}
+	smblib_dbg(chg, PR_REGISTER, "POWER_PATH_STATUS = 0x%02x\n",
+		   stat);
+
+	val->intval = (stat & USE_USBIN_BIT) &&
+		      (stat & VALID_INPUT_POWER_SOURCE_STS_BIT);
+	return rc;
+}
+
+int smblib_get_prop_usb_voltage_max(struct smb_charger *chg,
+				    union power_supply_propval *val)
+{
+	switch (chg->real_charger_type) {
+	case POWER_SUPPLY_TYPE_USB_HVDCP:
+	case POWER_SUPPLY_TYPE_USB_PD:
+		if (chg->smb_version == PM660_SUBTYPE)
+			val->intval = MICRO_9V;
+		else
+			val->intval = MICRO_12V;
+		break;
+	default:
+		val->intval = MICRO_5V;
+		break;
+	}
+
+	return 0;
+}
+
+int smblib_get_prop_usb_voltage_now(struct smb_charger *chg,
+				    union power_supply_propval *val)
+{
+	if (!chg->iio.usbin_v_chan ||
+		PTR_ERR(chg->iio.usbin_v_chan) == -EPROBE_DEFER)
+		chg->iio.usbin_v_chan = iio_channel_get(chg->dev, "usbin_v");
+
+	if (IS_ERR(chg->iio.usbin_v_chan))
+		return PTR_ERR(chg->iio.usbin_v_chan);
+
+	return iio_read_channel_processed(chg->iio.usbin_v_chan, &val->intval);
+}
+
+int smblib_get_prop_usb_current_now(struct smb_charger *chg,
+				    union power_supply_propval *val)
+{
+	int rc = 0;
+
+	rc = smblib_get_prop_usb_present(chg, val);
+	if (rc < 0 || !val->intval)
+		return rc;
+
+	if (!chg->iio.usbin_i_chan ||
+		PTR_ERR(chg->iio.usbin_i_chan) == -EPROBE_DEFER)
+		chg->iio.usbin_i_chan = iio_channel_get(chg->dev, "usbin_i");
+
+	if (IS_ERR(chg->iio.usbin_i_chan))
+		return PTR_ERR(chg->iio.usbin_i_chan);
+
+	return iio_read_channel_processed(chg->iio.usbin_i_chan, &val->intval);
+}
+
+int smblib_get_prop_charger_temp(struct smb_charger *chg,
+				 union power_supply_propval *val)
+{
+	int rc;
+
+	if (!chg->iio.temp_chan ||
+		PTR_ERR(chg->iio.temp_chan) == -EPROBE_DEFER)
+		chg->iio.temp_chan = iio_channel_get(chg->dev, "charger_temp");
+
+	if (IS_ERR(chg->iio.temp_chan))
+		return PTR_ERR(chg->iio.temp_chan);
+
+	rc = iio_read_channel_processed(chg->iio.temp_chan, &val->intval);
+	val->intval /= 100;
+	return rc;
+}
+
+int smblib_get_prop_charger_temp_max(struct smb_charger *chg,
+				    union power_supply_propval *val)
+{
+	int rc;
+
+	if (!chg->iio.temp_max_chan ||
+		PTR_ERR(chg->iio.temp_max_chan) == -EPROBE_DEFER)
+		chg->iio.temp_max_chan = iio_channel_get(chg->dev,
+							 "charger_temp_max");
+	if (IS_ERR(chg->iio.temp_max_chan))
+		return PTR_ERR(chg->iio.temp_max_chan);
+
+	rc = iio_read_channel_processed(chg->iio.temp_max_chan, &val->intval);
+	val->intval /= 100;
+	return rc;
+}
+
+int smblib_get_prop_typec_cc_orientation(struct smb_charger *chg,
+					 union power_supply_propval *val)
+{
+	if (chg->typec_status[3] & CC_ATTACHED_BIT)
+		val->intval =
+			(bool)(chg->typec_status[3] & CC_ORIENTATION_BIT) + 1;
+	else
+		val->intval = 0;
+
+	return 0;
+}
+
+static const char * const smblib_typec_mode_name[] = {
+	[POWER_SUPPLY_TYPEC_NONE]		  = "NONE",
+	[POWER_SUPPLY_TYPEC_SOURCE_DEFAULT]	  = "SOURCE_DEFAULT",
+	[POWER_SUPPLY_TYPEC_SOURCE_MEDIUM]	  = "SOURCE_MEDIUM",
+	[POWER_SUPPLY_TYPEC_SOURCE_HIGH]	  = "SOURCE_HIGH",
+	[POWER_SUPPLY_TYPEC_NON_COMPLIANT]	  = "NON_COMPLIANT",
+	[POWER_SUPPLY_TYPEC_SINK]		  = "SINK",
+	[POWER_SUPPLY_TYPEC_SINK_POWERED_CABLE]   = "SINK_POWERED_CABLE",
+	[POWER_SUPPLY_TYPEC_SINK_DEBUG_ACCESSORY] = "SINK_DEBUG_ACCESSORY",
+	[POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER]   = "SINK_AUDIO_ADAPTER",
+	[POWER_SUPPLY_TYPEC_POWERED_CABLE_ONLY]   = "POWERED_CABLE_ONLY",
+};
+
+static int smblib_get_prop_ufp_mode(struct smb_charger *chg)
+{
+	switch (chg->typec_status[0]) {
+	case UFP_TYPEC_RDSTD_BIT:
+		return POWER_SUPPLY_TYPEC_SOURCE_DEFAULT;
+	case UFP_TYPEC_RD1P5_BIT:
+		return POWER_SUPPLY_TYPEC_SOURCE_MEDIUM;
+	case UFP_TYPEC_RD3P0_BIT:
+		return POWER_SUPPLY_TYPEC_SOURCE_HIGH;
+	default:
+		break;
+	}
+
+	return POWER_SUPPLY_TYPEC_NONE;
+}
+
+static int smblib_get_prop_dfp_mode(struct smb_charger *chg)
+{
+	switch (chg->typec_status[1] & DFP_TYPEC_MASK) {
+	case DFP_RA_RA_BIT:
+		return POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER;
+	case DFP_RD_RD_BIT:
+		return POWER_SUPPLY_TYPEC_SINK_DEBUG_ACCESSORY;
+	case DFP_RD_RA_VCONN_BIT:
+		return POWER_SUPPLY_TYPEC_SINK_POWERED_CABLE;
+	case DFP_RD_OPEN_BIT:
+		return POWER_SUPPLY_TYPEC_SINK;
+	default:
+		break;
+	}
+
+	return POWER_SUPPLY_TYPEC_NONE;
+}
+
+static int smblib_get_prop_typec_mode(struct smb_charger *chg)
+{
+	if (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT)
+		return smblib_get_prop_dfp_mode(chg);
+	else
+		return smblib_get_prop_ufp_mode(chg);
+}
+
+int smblib_get_prop_typec_power_role(struct smb_charger *chg,
+				     union power_supply_propval *val)
+{
+	int rc = 0;
+	u8 ctrl;
+
+	rc = smblib_read(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG, &ctrl);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_INTRPT_ENB_SOFTWARE_CTRL rc=%d\n",
+			rc);
+		return rc;
+	}
+	smblib_dbg(chg, PR_REGISTER, "TYPE_C_INTRPT_ENB_SOFTWARE_CTRL = 0x%02x\n",
+		   ctrl);
+
+	if (ctrl & TYPEC_DISABLE_CMD_BIT) {
+		val->intval = POWER_SUPPLY_TYPEC_PR_NONE;
+		return rc;
+	}
+
+	switch (ctrl & (DFP_EN_CMD_BIT | UFP_EN_CMD_BIT)) {
+	case 0:
+		val->intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+		break;
+	case DFP_EN_CMD_BIT:
+		val->intval = POWER_SUPPLY_TYPEC_PR_SOURCE;
+		break;
+	case UFP_EN_CMD_BIT:
+		val->intval = POWER_SUPPLY_TYPEC_PR_SINK;
+		break;
+	default:
+		val->intval = POWER_SUPPLY_TYPEC_PR_NONE;
+		smblib_err(chg, "unsupported power role 0x%02lx\n",
+			ctrl & (DFP_EN_CMD_BIT | UFP_EN_CMD_BIT));
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+int smblib_get_prop_pd_allowed(struct smb_charger *chg,
+			       union power_supply_propval *val)
+{
+	val->intval = get_effective_result(chg->pd_allowed_votable);
+	return 0;
+}
+
+int smblib_get_prop_input_current_settled(struct smb_charger *chg,
+					  union power_supply_propval *val)
+{
+	return smblib_get_charge_param(chg, &chg->param.icl_stat, &val->intval);
+}
+
+#define HVDCP3_STEP_UV	200000
+int smblib_get_prop_input_voltage_settled(struct smb_charger *chg,
+						union power_supply_propval *val)
+{
+	int rc, pulses;
+
+	switch (chg->real_charger_type) {
+	case POWER_SUPPLY_TYPE_USB_HVDCP_3:
+		rc = smblib_get_pulse_cnt(chg, &pulses);
+		if (rc < 0) {
+			smblib_err(chg,
+				"Couldn't read QC_PULSE_COUNT rc=%d\n", rc);
+			return 0;
+		}
+		val->intval = MICRO_5V + HVDCP3_STEP_UV * pulses;
+		break;
+	case POWER_SUPPLY_TYPE_USB_PD:
+		val->intval = chg->voltage_min_uv;
+		break;
+	default:
+		val->intval = MICRO_5V;
+		break;
+	}
+
+	return 0;
+}
+
+int smblib_get_prop_pd_in_hard_reset(struct smb_charger *chg,
+			       union power_supply_propval *val)
+{
+	val->intval = chg->pd_hard_reset;
+	return 0;
+}
+
+int smblib_get_pe_start(struct smb_charger *chg,
+			       union power_supply_propval *val)
+{
+	/*
+	 * hvdcp timeout voter is the last one to allow pd. Use its vote
+	 * to indicate start of pe engine
+	 */
+	val->intval
+		= !get_client_vote_locked(chg->pd_disallowed_votable_indirect,
+			HVDCP_TIMEOUT_VOTER);
+	return 0;
+}
+
+int smblib_get_prop_die_health(struct smb_charger *chg,
+						union power_supply_propval *val)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, TEMP_RANGE_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TEMP_RANGE_STATUS_REG rc=%d\n",
+									rc);
+		return rc;
+	}
+
+	/* TEMP_RANGE bits are mutually exclusive */
+	switch (stat & TEMP_RANGE_MASK) {
+	case TEMP_BELOW_RANGE_BIT:
+		val->intval = POWER_SUPPLY_HEALTH_COOL;
+		break;
+	case TEMP_WITHIN_RANGE_BIT:
+		val->intval = POWER_SUPPLY_HEALTH_WARM;
+		break;
+	case TEMP_ABOVE_RANGE_BIT:
+		val->intval = POWER_SUPPLY_HEALTH_HOT;
+		break;
+	case ALERT_LEVEL_BIT:
+		val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+		break;
+	default:
+		val->intval = POWER_SUPPLY_HEALTH_UNKNOWN;
+	}
+
+	return 0;
+}
+
+#define SDP_CURRENT_UA			500000
+#define CDP_CURRENT_UA			1500000
+#define DCP_CURRENT_UA			1500000
+#define HVDCP_CURRENT_UA		3000000
+#define TYPEC_DEFAULT_CURRENT_UA	900000
+#define TYPEC_MEDIUM_CURRENT_UA		1500000
+#define TYPEC_HIGH_CURRENT_UA		3000000
+static int get_rp_based_dcp_current(struct smb_charger *chg, int typec_mode)
+{
+	int rp_ua;
+
+	switch (typec_mode) {
+	case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
+		rp_ua = TYPEC_HIGH_CURRENT_UA;
+		break;
+	case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
+	case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
+	/* fall through */
+	default:
+		rp_ua = DCP_CURRENT_UA;
+	}
+
+	return rp_ua;
+}
+
+/*******************
+ * USB PSY SETTERS *
+ * *****************/
+
+int smblib_set_prop_pd_current_max(struct smb_charger *chg,
+				    const union power_supply_propval *val)
+{
+	int rc;
+
+	if (chg->pd_active)
+		rc = vote(chg->usb_icl_votable, PD_VOTER, true, val->intval);
+	else
+		rc = -EPERM;
+
+	return rc;
+}
+
+static int smblib_handle_usb_current(struct smb_charger *chg,
+					int usb_current)
+{
+	int rc = 0, rp_ua, typec_mode;
+
+	if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_FLOAT) {
+		if (usb_current == -ETIMEDOUT) {
+			/*
+			 * Valid FLOAT charger, report the current based
+			 * of Rp
+			 */
+			typec_mode = smblib_get_prop_typec_mode(chg);
+			rp_ua = get_rp_based_dcp_current(chg, typec_mode);
+			rc = vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER,
+								true, rp_ua);
+			if (rc < 0)
+				return rc;
+		} else {
+			/*
+			 * FLOAT charger detected as SDP by USB driver,
+			 * charge with the requested current and update the
+			 * real_charger_type
+			 */
+			chg->real_charger_type = POWER_SUPPLY_TYPE_USB;
+			rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
+						true, usb_current);
+			if (rc < 0)
+				return rc;
+			rc = vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER,
+							false, 0);
+			if (rc < 0)
+				return rc;
+		}
+	} else {
+		rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
+					true, usb_current);
+	}
+
+	return rc;
+}
+
+int smblib_set_prop_sdp_current_max(struct smb_charger *chg,
+				    const union power_supply_propval *val)
+{
+	int rc = 0;
+
+	if (!chg->pd_active) {
+		rc = smblib_handle_usb_current(chg, val->intval);
+	} else if (chg->system_suspend_supported) {
+		if (val->intval <= USBIN_25MA)
+			rc = vote(chg->usb_icl_votable,
+				PD_SUSPEND_SUPPORTED_VOTER, true, val->intval);
+		else
+			rc = vote(chg->usb_icl_votable,
+				PD_SUSPEND_SUPPORTED_VOTER, false, 0);
+	}
+	return rc;
+}
+
+int smblib_set_prop_boost_current(struct smb_charger *chg,
+				    const union power_supply_propval *val)
+{
+	int rc = 0;
+
+	rc = smblib_set_charge_param(chg, &chg->param.freq_boost,
+				val->intval <= chg->boost_threshold_ua ?
+				chg->chg_freq.freq_below_otg_threshold :
+				chg->chg_freq.freq_above_otg_threshold);
+	if (rc < 0) {
+		dev_err(chg->dev, "Error in setting freq_boost rc=%d\n", rc);
+		return rc;
+	}
+
+	chg->boost_current_ua = val->intval;
+	return rc;
+}
+
+int smblib_set_prop_typec_power_role(struct smb_charger *chg,
+				     const union power_supply_propval *val)
+{
+	int rc = 0;
+	u8 power_role;
+
+	switch (val->intval) {
+	case POWER_SUPPLY_TYPEC_PR_NONE:
+		power_role = TYPEC_DISABLE_CMD_BIT;
+		break;
+	case POWER_SUPPLY_TYPEC_PR_DUAL:
+		power_role = 0;
+		break;
+	case POWER_SUPPLY_TYPEC_PR_SINK:
+		power_role = UFP_EN_CMD_BIT;
+		break;
+	case POWER_SUPPLY_TYPEC_PR_SOURCE:
+		power_role = DFP_EN_CMD_BIT;
+		break;
+	default:
+		smblib_err(chg, "power role %d not supported\n", val->intval);
+		return -EINVAL;
+	}
+
+	if (power_role == UFP_EN_CMD_BIT) {
+		/* disable PBS workaround when forcing sink mode */
+		rc = smblib_write(chg, TM_IO_DTEST4_SEL, 0x0);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't write to TM_IO_DTEST4_SEL rc=%d\n",
+				rc);
+		}
+	} else {
+		/* restore it back to 0xA5 */
+		rc = smblib_write(chg, TM_IO_DTEST4_SEL, 0xA5);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't write to TM_IO_DTEST4_SEL rc=%d\n",
+				rc);
+		}
+	}
+
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 TYPEC_POWER_ROLE_CMD_MASK, power_role);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't write 0x%02x to TYPE_C_INTRPT_ENB_SOFTWARE_CTRL rc=%d\n",
+			power_role, rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+int smblib_set_prop_pd_voltage_min(struct smb_charger *chg,
+				    const union power_supply_propval *val)
+{
+	int rc, min_uv;
+
+	min_uv = min(val->intval, chg->voltage_max_uv);
+	rc = smblib_set_usb_pd_allowed_voltage(chg, min_uv,
+					       chg->voltage_max_uv);
+	if (rc < 0) {
+		smblib_err(chg, "invalid max voltage %duV rc=%d\n",
+			val->intval, rc);
+		return rc;
+	}
+
+	chg->voltage_min_uv = min_uv;
+	power_supply_changed(chg->usb_main_psy);
+	return rc;
+}
+
+int smblib_set_prop_pd_voltage_max(struct smb_charger *chg,
+				    const union power_supply_propval *val)
+{
+	int rc, max_uv;
+
+	max_uv = max(val->intval, chg->voltage_min_uv);
+	rc = smblib_set_usb_pd_allowed_voltage(chg, chg->voltage_min_uv,
+					       max_uv);
+	if (rc < 0) {
+		smblib_err(chg, "invalid min voltage %duV rc=%d\n",
+			val->intval, rc);
+		return rc;
+	}
+
+	chg->voltage_max_uv = max_uv;
+	return rc;
+}
+
+static int __smblib_set_prop_pd_active(struct smb_charger *chg, bool pd_active)
+{
+	int rc;
+	bool orientation, sink_attached, hvdcp;
+	u8 stat;
+
+	chg->pd_active = pd_active;
+	if (chg->pd_active) {
+		vote(chg->apsd_disable_votable, PD_VOTER, true, 0);
+		vote(chg->pd_allowed_votable, PD_VOTER, true, 0);
+		vote(chg->usb_irq_enable_votable, PD_VOTER, true, 0);
+
+		/*
+		 * VCONN_EN_ORIENTATION_BIT controls whether to use CC1 or CC2
+		 * line when TYPEC_SPARE_CFG_BIT (CC pin selection s/w override)
+		 * is set or when VCONN_EN_VALUE_BIT is set.
+		 */
+		orientation = chg->typec_status[3] & CC_ORIENTATION_BIT;
+		rc = smblib_masked_write(chg,
+				TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				VCONN_EN_ORIENTATION_BIT,
+				orientation ? 0 : VCONN_EN_ORIENTATION_BIT);
+		if (rc < 0)
+			smblib_err(chg,
+				"Couldn't enable vconn on CC line rc=%d\n", rc);
+
+		/* SW controlled CC_OUT */
+		rc = smblib_masked_write(chg, TAPER_TIMER_SEL_CFG_REG,
+				TYPEC_SPARE_CFG_BIT, TYPEC_SPARE_CFG_BIT);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't enable SW cc_out rc=%d\n",
+									rc);
+
+		/*
+		 * Enforce 500mA for PD until the real vote comes in later.
+		 * It is guaranteed that pd_active is set prior to
+		 * pd_current_max
+		 */
+		rc = vote(chg->usb_icl_votable, PD_VOTER, true, USBIN_500MA);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't vote for USB ICL rc=%d\n",
+									rc);
+
+		/* since PD was found the cable must be non-legacy */
+		vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, false, 0);
+
+		/* clear USB ICL vote for DCP_VOTER */
+		rc = vote(chg->usb_icl_votable, DCP_VOTER, false, 0);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't un-vote DCP from USB ICL rc=%d\n",
+									rc);
+
+		/* remove USB_PSY_VOTER */
+		rc = vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't unvote USB_PSY rc=%d\n", rc);
+	} else {
+		rc = smblib_read(chg, APSD_STATUS_REG, &stat);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't read APSD status rc=%d\n",
+									rc);
+			return rc;
+		}
+
+		hvdcp = stat & QC_CHARGER_BIT;
+		vote(chg->apsd_disable_votable, PD_VOTER, false, 0);
+		vote(chg->pd_allowed_votable, PD_VOTER, true, 0);
+		vote(chg->usb_irq_enable_votable, PD_VOTER, false, 0);
+		vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER,
+								false, 0);
+
+		/* HW controlled CC_OUT */
+		rc = smblib_masked_write(chg, TAPER_TIMER_SEL_CFG_REG,
+							TYPEC_SPARE_CFG_BIT, 0);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't enable HW cc_out rc=%d\n",
+									rc);
+
+		/*
+		 * This WA should only run for HVDCP. Non-legacy SDP/CDP could
+		 * draw more, but this WA will remove Rd causing VBUS to drop,
+		 * and data could be interrupted. Non-legacy DCP could also draw
+		 * more, but it may impact compliance.
+		 */
+		sink_attached = chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT;
+		if (!chg->typec_legacy_valid && !sink_attached && hvdcp)
+			schedule_work(&chg->legacy_detection_work);
+	}
+
+	smblib_update_usb_type(chg);
+	power_supply_changed(chg->usb_psy);
+	return rc;
+}
+
+int smblib_set_prop_pd_active(struct smb_charger *chg,
+			      const union power_supply_propval *val)
+{
+	if (!get_effective_result(chg->pd_allowed_votable))
+		return -EINVAL;
+
+	return __smblib_set_prop_pd_active(chg, val->intval);
+}
+
+int smblib_set_prop_ship_mode(struct smb_charger *chg,
+				const union power_supply_propval *val)
+{
+	int rc;
+
+	smblib_dbg(chg, PR_MISC, "Set ship mode: %d!!\n", !!val->intval);
+
+	rc = smblib_masked_write(chg, SHIP_MODE_REG, SHIP_MODE_EN_BIT,
+			!!val->intval ? SHIP_MODE_EN_BIT : 0);
+	if (rc < 0)
+		dev_err(chg->dev, "Couldn't %s ship mode, rc=%d\n",
+				!!val->intval ? "enable" : "disable", rc);
+
+	return rc;
+}
+
+int smblib_reg_block_update(struct smb_charger *chg,
+				struct reg_info *entry)
+{
+	int rc = 0;
+
+	while (entry && entry->reg) {
+		rc = smblib_read(chg, entry->reg, &entry->bak);
+		if (rc < 0) {
+			dev_err(chg->dev, "Error in reading %s rc=%d\n",
+				entry->desc, rc);
+			break;
+		}
+		entry->bak &= entry->mask;
+
+		rc = smblib_masked_write(chg, entry->reg,
+					 entry->mask, entry->val);
+		if (rc < 0) {
+			dev_err(chg->dev, "Error in writing %s rc=%d\n",
+				entry->desc, rc);
+			break;
+		}
+		entry++;
+	}
+
+	return rc;
+}
+
+int smblib_reg_block_restore(struct smb_charger *chg,
+				struct reg_info *entry)
+{
+	int rc = 0;
+
+	while (entry && entry->reg) {
+		rc = smblib_masked_write(chg, entry->reg,
+					 entry->mask, entry->bak);
+		if (rc < 0) {
+			dev_err(chg->dev, "Error in writing %s rc=%d\n",
+				entry->desc, rc);
+			break;
+		}
+		entry++;
+	}
+
+	return rc;
+}
+
+static struct reg_info cc2_detach_settings[] = {
+	{
+		.reg	= TYPE_C_CFG_2_REG,
+		.mask	= TYPE_C_UFP_MODE_BIT | EN_TRY_SOURCE_MODE_BIT,
+		.val	= TYPE_C_UFP_MODE_BIT,
+		.desc	= "TYPE_C_CFG_2_REG",
+	},
+	{
+		.reg	= TYPE_C_CFG_3_REG,
+		.mask	= EN_TRYSINK_MODE_BIT,
+		.val	= 0,
+		.desc	= "TYPE_C_CFG_3_REG",
+	},
+	{
+		.reg	= TAPER_TIMER_SEL_CFG_REG,
+		.mask	= TYPEC_SPARE_CFG_BIT,
+		.val	= TYPEC_SPARE_CFG_BIT,
+		.desc	= "TAPER_TIMER_SEL_CFG_REG",
+	},
+	{
+		.reg	= TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+		.mask	= VCONN_EN_ORIENTATION_BIT,
+		.val	= 0,
+		.desc	= "TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG",
+	},
+	{
+		.reg	= MISC_CFG_REG,
+		.mask	= TCC_DEBOUNCE_20MS_BIT,
+		.val	= TCC_DEBOUNCE_20MS_BIT,
+		.desc	= "Tccdebounce time"
+	},
+	{
+	},
+};
+
+static int smblib_cc2_sink_removal_enter(struct smb_charger *chg)
+{
+	int rc, ccout, ufp_mode;
+	u8 stat;
+
+	if ((chg->wa_flags & TYPEC_CC2_REMOVAL_WA_BIT) == 0)
+		return 0;
+
+	if (chg->cc2_detach_wa_active)
+		return 0;
+
+	rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+		return rc;
+	}
+
+	ccout = (stat & CC_ATTACHED_BIT) ?
+					(!!(stat & CC_ORIENTATION_BIT) + 1) : 0;
+	ufp_mode = (stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT) ?
+					!(stat & UFP_DFP_MODE_STATUS_BIT) : 0;
+
+	if (ccout != 2)
+		return 0;
+
+	if (!ufp_mode)
+		return 0;
+
+	chg->cc2_detach_wa_active = true;
+	/* The CC2 removal WA will cause a type-c-change IRQ storm */
+	smblib_reg_block_update(chg, cc2_detach_settings);
+	schedule_work(&chg->rdstd_cc2_detach_work);
+	return rc;
+}
+
+static int smblib_cc2_sink_removal_exit(struct smb_charger *chg)
+{
+	if ((chg->wa_flags & TYPEC_CC2_REMOVAL_WA_BIT) == 0)
+		return 0;
+
+	if (!chg->cc2_detach_wa_active)
+		return 0;
+
+	chg->cc2_detach_wa_active = false;
+	cancel_work_sync(&chg->rdstd_cc2_detach_work);
+	smblib_reg_block_restore(chg, cc2_detach_settings);
+	return 0;
+}
+
+int smblib_set_prop_pd_in_hard_reset(struct smb_charger *chg,
+				const union power_supply_propval *val)
+{
+	int rc = 0;
+
+	if (chg->pd_hard_reset == val->intval)
+		return rc;
+
+	chg->pd_hard_reset = val->intval;
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+			EXIT_SNK_BASED_ON_CC_BIT,
+			(chg->pd_hard_reset) ? EXIT_SNK_BASED_ON_CC_BIT : 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't set EXIT_SNK_BASED_ON_CC rc=%d\n",
+				rc);
+
+	vote(chg->apsd_disable_votable, PD_HARD_RESET_VOTER,
+							chg->pd_hard_reset, 0);
+
+	return rc;
+}
+
+static int smblib_recover_from_soft_jeita(struct smb_charger *chg)
+{
+	u8 stat_1, stat_2;
+	int rc;
+
+	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat_1);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_2_REG, &stat_2);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_2 rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	if ((chg->jeita_status && !(stat_2 & BAT_TEMP_STATUS_SOFT_LIMIT_MASK) &&
+		((stat_1 & BATTERY_CHARGER_STATUS_MASK) == TERMINATE_CHARGE))) {
+		/*
+		 * We are moving from JEITA soft -> Normal and charging
+		 * is terminated
+		 */
+		rc = smblib_write(chg, CHARGING_ENABLE_CMD_REG, 0);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't disable charging rc=%d\n",
+						rc);
+			return rc;
+		}
+		rc = smblib_write(chg, CHARGING_ENABLE_CMD_REG,
+						CHARGING_ENABLE_CMD_BIT);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't enable charging rc=%d\n",
+						rc);
+			return rc;
+		}
+	}
+
+	chg->jeita_status = stat_2 & BAT_TEMP_STATUS_SOFT_LIMIT_MASK;
+
+	return 0;
+}
+
+/***********************
+* USB MAIN PSY GETTERS *
+*************************/
+int smblib_get_prop_fcc_delta(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	int rc, jeita_cc_delta_ua = 0;
+
+	rc = smblib_get_jeita_cc_delta(chg, &jeita_cc_delta_ua);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get jeita cc delta rc=%d\n", rc);
+		jeita_cc_delta_ua = 0;
+	}
+
+	val->intval = jeita_cc_delta_ua;
+	return 0;
+}
+
+/***********************
+* USB MAIN PSY SETTERS *
+*************************/
+int smblib_get_charge_current(struct smb_charger *chg,
+				int *total_current_ua)
+{
+	const struct apsd_result *apsd_result = smblib_get_apsd_result(chg);
+	union power_supply_propval val = {0, };
+	int rc = 0, typec_source_rd, current_ua;
+	bool non_compliant;
+	u8 stat5;
+
+	if (chg->pd_active) {
+		*total_current_ua =
+			get_client_vote_locked(chg->usb_icl_votable, PD_VOTER);
+		return rc;
+	}
+
+	rc = smblib_read(chg, TYPE_C_STATUS_5_REG, &stat5);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_5 rc=%d\n", rc);
+		return rc;
+	}
+	non_compliant = stat5 & TYPEC_NONCOMP_LEGACY_CABLE_STATUS_BIT;
+
+	/* get settled ICL */
+	rc = smblib_get_prop_input_current_settled(chg, &val);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get settled ICL rc=%d\n", rc);
+		return rc;
+	}
+
+	typec_source_rd = smblib_get_prop_ufp_mode(chg);
+
+	/* QC 2.0/3.0 adapter */
+	if (apsd_result->bit & (QC_3P0_BIT | QC_2P0_BIT)) {
+		*total_current_ua = HVDCP_CURRENT_UA;
+		return 0;
+	}
+
+	if (non_compliant) {
+		switch (apsd_result->bit) {
+		case CDP_CHARGER_BIT:
+			current_ua = CDP_CURRENT_UA;
+			break;
+		case DCP_CHARGER_BIT:
+		case OCP_CHARGER_BIT:
+		case FLOAT_CHARGER_BIT:
+			current_ua = DCP_CURRENT_UA;
+			break;
+		default:
+			current_ua = 0;
+			break;
+		}
+
+		*total_current_ua = max(current_ua, val.intval);
+		return 0;
+	}
+
+	switch (typec_source_rd) {
+	case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
+		switch (apsd_result->bit) {
+		case CDP_CHARGER_BIT:
+			current_ua = CDP_CURRENT_UA;
+			break;
+		case DCP_CHARGER_BIT:
+		case OCP_CHARGER_BIT:
+		case FLOAT_CHARGER_BIT:
+			current_ua = chg->default_icl_ua;
+			break;
+		default:
+			current_ua = 0;
+			break;
+		}
+		break;
+	case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
+		current_ua = TYPEC_MEDIUM_CURRENT_UA;
+		break;
+	case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
+		current_ua = TYPEC_HIGH_CURRENT_UA;
+		break;
+	case POWER_SUPPLY_TYPEC_NON_COMPLIANT:
+	case POWER_SUPPLY_TYPEC_NONE:
+	default:
+		current_ua = 0;
+		break;
+	}
+
+	*total_current_ua = max(current_ua, val.intval);
+	return 0;
+}
+
+/************************
+ * PARALLEL PSY GETTERS *
+ ************************/
+
+int smblib_get_prop_slave_current_now(struct smb_charger *chg,
+		union power_supply_propval *pval)
+{
+	if (IS_ERR_OR_NULL(chg->iio.batt_i_chan))
+		chg->iio.batt_i_chan = iio_channel_get(chg->dev, "batt_i");
+
+	if (IS_ERR(chg->iio.batt_i_chan))
+		return PTR_ERR(chg->iio.batt_i_chan);
+
+	return iio_read_channel_processed(chg->iio.batt_i_chan, &pval->intval);
+}
+
+/**********************
+ * INTERRUPT HANDLERS *
+ **********************/
+
+irqreturn_t smblib_handle_debug(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_otg_overcurrent(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, OTG_BASE + INT_RT_STS_OFFSET, &stat);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't read OTG_INT_RT_STS rc=%d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	if (chg->wa_flags & OTG_WA) {
+		if (stat & OTG_OC_DIS_SW_STS_RT_STS_BIT)
+			smblib_err(chg, "OTG disabled by hw\n");
+
+		/* not handling software based hiccups for PM660 */
+		return IRQ_HANDLED;
+	}
+
+	if (stat & OTG_OVERCURRENT_RT_STS_BIT)
+		schedule_work(&chg->otg_oc_work);
+
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_chg_state_change(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+	u8 stat;
+	int rc;
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
+	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+				rc);
+		return IRQ_HANDLED;
+	}
+
+	stat = stat & BATTERY_CHARGER_STATUS_MASK;
+	power_supply_changed(chg->batt_psy);
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_batt_temp_changed(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+	int rc;
+
+	rc = smblib_recover_from_soft_jeita(chg);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't recover chg from soft jeita rc=%d\n",
+				rc);
+		return IRQ_HANDLED;
+	}
+
+	rerun_election(chg->fcc_votable);
+	power_supply_changed(chg->batt_psy);
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_batt_psy_changed(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+	power_supply_changed(chg->batt_psy);
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_usb_psy_changed(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+	power_supply_changed(chg->usb_psy);
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_usbin_uv(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+	struct storm_watch *wdata;
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+	if (!chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data)
+		return IRQ_HANDLED;
+
+	wdata = &chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data->storm_data;
+	reset_storm_count(wdata);
+	return IRQ_HANDLED;
+}
+
+static void smblib_micro_usb_plugin(struct smb_charger *chg, bool vbus_rising)
+{
+	if (vbus_rising) {
+		/* use the typec flag even though its not typec */
+		chg->typec_present = 1;
+	} else {
+		chg->typec_present = 0;
+		smblib_update_usb_type(chg);
+		extcon_set_cable_state_(chg->extcon, EXTCON_USB, false);
+		smblib_uusb_removal(chg);
+	}
+}
+
+void smblib_usb_plugin_hard_reset_locked(struct smb_charger *chg)
+{
+	int rc;
+	u8 stat;
+	bool vbus_rising;
+	struct smb_irq_data *data;
+	struct storm_watch *wdata;
+
+	rc = smblib_read(chg, USBIN_BASE + INT_RT_STS_OFFSET, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read USB_INT_RT_STS rc=%d\n", rc);
+		return;
+	}
+
+	vbus_rising = (bool)(stat & USBIN_PLUGIN_RT_STS_BIT);
+
+	if (vbus_rising) {
+		smblib_cc2_sink_removal_exit(chg);
+	} else {
+		smblib_cc2_sink_removal_enter(chg);
+		if (chg->wa_flags & BOOST_BACK_WA) {
+			data = chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data;
+			if (data) {
+				wdata = &data->storm_data;
+				update_storm_count(wdata,
+						WEAK_CHG_STORM_COUNT);
+				vote(chg->usb_icl_votable, BOOST_BACK_VOTER,
+						false, 0);
+				vote(chg->usb_icl_votable, WEAK_CHARGER_VOTER,
+						false, 0);
+			}
+		}
+	}
+
+	power_supply_changed(chg->usb_psy);
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: usbin-plugin %s\n",
+					vbus_rising ? "attached" : "detached");
+}
+
+#define PL_DELAY_MS			30000
+void smblib_usb_plugin_locked(struct smb_charger *chg)
+{
+	int rc;
+	u8 stat;
+	bool vbus_rising;
+	struct smb_irq_data *data;
+	struct storm_watch *wdata;
+
+	rc = smblib_read(chg, USBIN_BASE + INT_RT_STS_OFFSET, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read USB_INT_RT_STS rc=%d\n", rc);
+		return;
+	}
+
+	vbus_rising = (bool)(stat & USBIN_PLUGIN_RT_STS_BIT);
+	smblib_set_opt_freq_buck(chg, vbus_rising ? chg->chg_freq.freq_5V :
+						chg->chg_freq.freq_removal);
+
+	if (vbus_rising) {
+		rc = smblib_request_dpdm(chg, true);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc);
+
+		/* Schedule work to enable parallel charger */
+		vote(chg->awake_votable, PL_DELAY_VOTER, true, 0);
+		schedule_delayed_work(&chg->pl_enable_work,
+					msecs_to_jiffies(PL_DELAY_MS));
+	} else {
+		if (chg->wa_flags & BOOST_BACK_WA) {
+			data = chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data;
+			if (data) {
+				wdata = &data->storm_data;
+				update_storm_count(wdata,
+						WEAK_CHG_STORM_COUNT);
+				vote(chg->usb_icl_votable, BOOST_BACK_VOTER,
+						false, 0);
+				vote(chg->usb_icl_votable, WEAK_CHARGER_VOTER,
+						false, 0);
+			}
+		}
+
+		rc = smblib_request_dpdm(chg, false);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't disable DPDM rc=%d\n", rc);
+	}
+
+	if (chg->micro_usb_mode)
+		smblib_micro_usb_plugin(chg, vbus_rising);
+
+	power_supply_changed(chg->usb_psy);
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: usbin-plugin %s\n",
+					vbus_rising ? "attached" : "detached");
+}
+
+irqreturn_t smblib_handle_usb_plugin(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	mutex_lock(&chg->lock);
+	if (chg->pd_hard_reset)
+		smblib_usb_plugin_hard_reset_locked(chg);
+	else
+		smblib_usb_plugin_locked(chg);
+	mutex_unlock(&chg->lock);
+	return IRQ_HANDLED;
+}
+
+#define USB_WEAK_INPUT_UA	1400000
+#define ICL_CHANGE_DELAY_MS	1000
+irqreturn_t smblib_handle_icl_change(int irq, void *data)
+{
+	u8 stat;
+	int rc, settled_ua, delay = ICL_CHANGE_DELAY_MS;
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	if (chg->mode == PARALLEL_MASTER) {
+		rc = smblib_read(chg, AICL_STATUS_REG, &stat);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't read AICL_STATUS rc=%d\n",
+					rc);
+			return IRQ_HANDLED;
+		}
+
+		rc = smblib_get_charge_param(chg, &chg->param.icl_stat,
+				&settled_ua);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't get ICL status rc=%d\n", rc);
+			return IRQ_HANDLED;
+		}
+
+		/* If AICL settled then schedule work now */
+		if ((settled_ua == get_effective_result(chg->usb_icl_votable))
+				|| (stat & AICL_DONE_BIT))
+			delay = 0;
+
+		cancel_delayed_work_sync(&chg->icl_change_work);
+		schedule_delayed_work(&chg->icl_change_work,
+						msecs_to_jiffies(delay));
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void smblib_handle_slow_plugin_timeout(struct smb_charger *chg,
+					      bool rising)
+{
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: slow-plugin-timeout %s\n",
+		   rising ? "rising" : "falling");
+}
+
+static void smblib_handle_sdp_enumeration_done(struct smb_charger *chg,
+					       bool rising)
+{
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: sdp-enumeration-done %s\n",
+		   rising ? "rising" : "falling");
+}
+
+#define MICRO_10P3V	10300000
+static void smblib_check_ov_condition(struct smb_charger *chg)
+{
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	if (chg->wa_flags & OV_IRQ_WA_BIT) {
+		rc = power_supply_get_property(chg->usb_psy,
+			POWER_SUPPLY_PROP_VOLTAGE_NOW, &pval);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't get current voltage, rc=%d\n",
+				rc);
+			return;
+		}
+
+		if (pval.intval > MICRO_10P3V) {
+			smblib_err(chg, "USBIN OV detected\n");
+			vote(chg->hvdcp_hw_inov_dis_votable, OV_VOTER, true,
+				0);
+			pval.intval = POWER_SUPPLY_DP_DM_FORCE_5V;
+			rc = power_supply_set_property(chg->batt_psy,
+				POWER_SUPPLY_PROP_DP_DM, &pval);
+			return;
+		}
+	}
+}
+
+#define QC3_PULSES_FOR_6V	5
+#define QC3_PULSES_FOR_9V	20
+#define QC3_PULSES_FOR_12V	35
+static void smblib_hvdcp_adaptive_voltage_change(struct smb_charger *chg)
+{
+	int rc;
+	u8 stat;
+	int pulses;
+
+	smblib_check_ov_condition(chg);
+	power_supply_changed(chg->usb_main_psy);
+	if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_HVDCP) {
+		rc = smblib_read(chg, QC_CHANGE_STATUS_REG, &stat);
+		if (rc < 0) {
+			smblib_err(chg,
+				"Couldn't read QC_CHANGE_STATUS rc=%d\n", rc);
+			return;
+		}
+
+		switch (stat & QC_2P0_STATUS_MASK) {
+		case QC_5V_BIT:
+			smblib_set_opt_freq_buck(chg,
+					chg->chg_freq.freq_5V);
+			break;
+		case QC_9V_BIT:
+			smblib_set_opt_freq_buck(chg,
+					chg->chg_freq.freq_9V);
+			break;
+		case QC_12V_BIT:
+			smblib_set_opt_freq_buck(chg,
+					chg->chg_freq.freq_12V);
+			break;
+		default:
+			smblib_set_opt_freq_buck(chg,
+					chg->chg_freq.freq_removal);
+			break;
+		}
+	}
+
+	if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_HVDCP_3) {
+		rc = smblib_get_pulse_cnt(chg, &pulses);
+		if (rc < 0) {
+			smblib_err(chg,
+				"Couldn't read QC_PULSE_COUNT rc=%d\n", rc);
+			return;
+		}
+
+		if (pulses < QC3_PULSES_FOR_6V)
+			smblib_set_opt_freq_buck(chg,
+				chg->chg_freq.freq_5V);
+		else if (pulses < QC3_PULSES_FOR_9V)
+			smblib_set_opt_freq_buck(chg,
+				chg->chg_freq.freq_6V_8V);
+		else if (pulses < QC3_PULSES_FOR_12V)
+			smblib_set_opt_freq_buck(chg,
+				chg->chg_freq.freq_9V);
+		else
+			smblib_set_opt_freq_buck(chg,
+				chg->chg_freq.freq_12V);
+	}
+}
+
+/* triggers when HVDCP 3.0 authentication has finished */
+static void smblib_handle_hvdcp_3p0_auth_done(struct smb_charger *chg,
+					      bool rising)
+{
+	const struct apsd_result *apsd_result;
+	int rc;
+
+	if (!rising)
+		return;
+
+	if (chg->wa_flags & QC_AUTH_INTERRUPT_WA_BIT) {
+		/*
+		 * Disable AUTH_IRQ_EN_CFG_BIT to receive adapter voltage
+		 * change interrupt.
+		 */
+		rc = smblib_masked_write(chg,
+				USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
+				AUTH_IRQ_EN_CFG_BIT, 0);
+		if (rc < 0)
+			smblib_err(chg,
+				"Couldn't enable QC auth setting rc=%d\n", rc);
+	}
+
+	if (chg->mode == PARALLEL_MASTER)
+		vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, true, 0);
+
+	/* the APSD done handler will set the USB supply type */
+	apsd_result = smblib_get_apsd_result(chg);
+	if (get_effective_result(chg->hvdcp_hw_inov_dis_votable)) {
+		if (apsd_result->pst == POWER_SUPPLY_TYPE_USB_HVDCP) {
+			/* force HVDCP2 to 9V if INOV is disabled */
+			rc = smblib_masked_write(chg, CMD_HVDCP_2_REG,
+					FORCE_9V_BIT, FORCE_9V_BIT);
+			if (rc < 0)
+				smblib_err(chg,
+					"Couldn't force 9V HVDCP rc=%d\n", rc);
+		}
+	}
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: hvdcp-3p0-auth-done rising; %s detected\n",
+		   apsd_result->name);
+}
+
+static void smblib_handle_hvdcp_check_timeout(struct smb_charger *chg,
+					      bool rising, bool qc_charger)
+{
+	const struct apsd_result *apsd_result = smblib_get_apsd_result(chg);
+
+	/* Hold off PD only until hvdcp 2.0 detection timeout */
+	if (rising) {
+		vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
+								false, 0);
+
+		/* enable HDC and ICL irq for QC2/3 charger */
+		if (qc_charger)
+			vote(chg->usb_irq_enable_votable, QC_VOTER, true, 0);
+
+		/*
+		 * HVDCP detection timeout done
+		 * If adapter is not QC2.0/QC3.0 - it is a plain old DCP.
+		 */
+		if (!qc_charger && (apsd_result->bit & DCP_CHARGER_BIT))
+			/* enforce DCP ICL if specified */
+			vote(chg->usb_icl_votable, DCP_VOTER,
+				chg->dcp_icl_ua != -EINVAL, chg->dcp_icl_ua);
+
+		/*
+		 * if pd is not allowed, then set pd_active = false right here,
+		 * so that it starts the hvdcp engine
+		 */
+		if (!get_effective_result(chg->pd_allowed_votable))
+			__smblib_set_prop_pd_active(chg, 0);
+	}
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: smblib_handle_hvdcp_check_timeout %s\n",
+		   rising ? "rising" : "falling");
+}
+
+/* triggers when HVDCP is detected */
+static void smblib_handle_hvdcp_detect_done(struct smb_charger *chg,
+					    bool rising)
+{
+	if (!rising)
+		return;
+
+	/* the APSD done handler will set the USB supply type */
+	cancel_delayed_work_sync(&chg->hvdcp_detect_work);
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: hvdcp-detect-done %s\n",
+		   rising ? "rising" : "falling");
+}
+
+static void smblib_force_legacy_icl(struct smb_charger *chg, int pst)
+{
+	int typec_mode;
+	int rp_ua;
+
+	/* while PD is active it should have complete ICL control */
+	if (chg->pd_active)
+		return;
+
+	switch (pst) {
+	case POWER_SUPPLY_TYPE_USB:
+		/*
+		 * USB_PSY will vote to increase the current to 500/900mA once
+		 * enumeration is done. Ensure that USB_PSY has at least voted
+		 * for 100mA before releasing the LEGACY_UNKNOWN vote
+		 */
+		if (!is_client_vote_enabled(chg->usb_icl_votable,
+								USB_PSY_VOTER))
+			vote(chg->usb_icl_votable, USB_PSY_VOTER, true, 100000);
+		vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, false, 0);
+		break;
+	case POWER_SUPPLY_TYPE_USB_CDP:
+		vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 1500000);
+		break;
+	case POWER_SUPPLY_TYPE_USB_DCP:
+		typec_mode = smblib_get_prop_typec_mode(chg);
+		rp_ua = get_rp_based_dcp_current(chg, typec_mode);
+		vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, rp_ua);
+		break;
+	case POWER_SUPPLY_TYPE_USB_FLOAT:
+		/*
+		 * limit ICL to 100mA, the USB driver will enumerate to check
+		 * if this is a SDP and appropriately set the current
+		 */
+		vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 100000);
+		break;
+	case POWER_SUPPLY_TYPE_USB_HVDCP:
+	case POWER_SUPPLY_TYPE_USB_HVDCP_3:
+		vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 3000000);
+		break;
+	default:
+		smblib_err(chg, "Unknown APSD %d; forcing 500mA\n", pst);
+		vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 500000);
+		break;
+	}
+}
+
+static void smblib_notify_extcon_props(struct smb_charger *chg)
+{
+	union power_supply_propval val;
+
+	smblib_get_prop_typec_cc_orientation(chg, &val);
+	extcon_set_cable_state_(chg->extcon, EXTCON_USB_CC,
+					(val.intval == 2) ? 1 : 0);
+	extcon_set_cable_state_(chg->extcon, EXTCON_USB_SPEED, true);
+}
+
+static void smblib_notify_device_mode(struct smb_charger *chg, bool enable)
+{
+	if (enable)
+		smblib_notify_extcon_props(chg);
+
+	extcon_set_cable_state_(chg->extcon, EXTCON_USB, enable);
+}
+
+static void smblib_notify_usb_host(struct smb_charger *chg, bool enable)
+{
+	if (enable)
+		smblib_notify_extcon_props(chg);
+
+	extcon_set_cable_state_(chg->extcon, EXTCON_USB_HOST, enable);
+}
+
+#define HVDCP_DET_MS 2500
+static void smblib_handle_apsd_done(struct smb_charger *chg, bool rising)
+{
+	const struct apsd_result *apsd_result;
+
+	if (!rising)
+		return;
+
+	apsd_result = smblib_update_usb_type(chg);
+
+	if (!chg->typec_legacy_valid)
+		smblib_force_legacy_icl(chg, apsd_result->pst);
+
+	switch (apsd_result->bit) {
+	case SDP_CHARGER_BIT:
+	case CDP_CHARGER_BIT:
+		if (chg->micro_usb_mode)
+			extcon_set_cable_state_(chg->extcon, EXTCON_USB,
+					true);
+		if (chg->use_extcon)
+			smblib_notify_device_mode(chg, true);
+	case OCP_CHARGER_BIT:
+	case FLOAT_CHARGER_BIT:
+		/* if not DCP then no hvdcp timeout happens, Enable pd here. */
+		vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
+				false, 0);
+		break;
+	case DCP_CHARGER_BIT:
+		if (chg->wa_flags & QC_CHARGER_DETECTION_WA_BIT)
+			schedule_delayed_work(&chg->hvdcp_detect_work,
+					      msecs_to_jiffies(HVDCP_DET_MS));
+		break;
+	default:
+		break;
+	}
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: apsd-done rising; %s detected\n",
+		   apsd_result->name);
+}
+
+irqreturn_t smblib_handle_usb_source_change(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+	int rc = 0;
+	u8 stat;
+
+	rc = smblib_read(chg, APSD_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read APSD_STATUS rc=%d\n", rc);
+		return IRQ_HANDLED;
+	}
+	smblib_dbg(chg, PR_REGISTER, "APSD_STATUS = 0x%02x\n", stat);
+
+	if (chg->micro_usb_mode && (stat & APSD_DTC_STATUS_DONE_BIT)
+			&& !chg->uusb_apsd_rerun_done) {
+		/*
+		 * Force re-run APSD to handle slow insertion related
+		 * charger-mis-detection.
+		 */
+		chg->uusb_apsd_rerun_done = true;
+		smblib_rerun_apsd(chg);
+		return IRQ_HANDLED;
+	}
+
+	smblib_handle_apsd_done(chg,
+		(bool)(stat & APSD_DTC_STATUS_DONE_BIT));
+
+	smblib_handle_hvdcp_detect_done(chg,
+		(bool)(stat & QC_CHARGER_BIT));
+
+	smblib_handle_hvdcp_check_timeout(chg,
+		(bool)(stat & HVDCP_CHECK_TIMEOUT_BIT),
+		(bool)(stat & QC_CHARGER_BIT));
+
+	smblib_handle_hvdcp_3p0_auth_done(chg,
+		(bool)(stat & QC_AUTH_DONE_STATUS_BIT));
+
+	smblib_handle_sdp_enumeration_done(chg,
+		(bool)(stat & ENUMERATION_DONE_BIT));
+
+	smblib_handle_slow_plugin_timeout(chg,
+		(bool)(stat & SLOW_PLUGIN_TIMEOUT_BIT));
+
+	smblib_hvdcp_adaptive_voltage_change(chg);
+
+	power_supply_changed(chg->usb_psy);
+
+	rc = smblib_read(chg, APSD_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read APSD_STATUS rc=%d\n", rc);
+		return IRQ_HANDLED;
+	}
+	smblib_dbg(chg, PR_REGISTER, "APSD_STATUS = 0x%02x\n", stat);
+
+	return IRQ_HANDLED;
+}
+
+static int typec_try_sink(struct smb_charger *chg)
+{
+	union power_supply_propval val;
+	bool debounce_done, vbus_detected, sink;
+	u8 stat;
+	int exit_mode = ATTACHED_SRC, rc;
+
+	/* ignore typec interrupt while try.snk WIP */
+	chg->try_sink_active = true;
+
+	/* force SNK mode */
+	val.intval = POWER_SUPPLY_TYPEC_PR_SINK;
+	rc = smblib_set_prop_typec_power_role(chg, &val);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set UFP mode rc=%d\n", rc);
+		goto try_sink_exit;
+	}
+
+	/* reduce Tccdebounce time to ~20ms */
+	rc = smblib_masked_write(chg, MISC_CFG_REG,
+			TCC_DEBOUNCE_20MS_BIT, TCC_DEBOUNCE_20MS_BIT);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set MISC_CFG_REG rc=%d\n", rc);
+		goto try_sink_exit;
+	}
+
+	/*
+	 * give opportunity to the other side to be a SRC,
+	 * for tDRPTRY + Tccdebounce time
+	 */
+	msleep(120);
+
+	rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n",
+				rc);
+		goto try_sink_exit;
+	}
+
+	debounce_done = stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT;
+
+	if (!debounce_done)
+		/*
+		 * The other side didn't switch to source, either it
+		 * is an adamant sink or is removed go back to showing Rp
+		 */
+		goto try_wait_src;
+
+	/*
+	 * We are in force sink mode and the other side has switched to
+	 * showing Rp. Config DRP in case the other side removes Rp so we
+	 * can quickly (20ms) switch to showing our Rp. Note that the spec
+	 * needs us to show Rp for 80mS while the drp DFP residency is just
+	 * 54mS. But 54mS is plenty time for us to react and force Rp for
+	 * the remaining 26mS.
+	 */
+	val.intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+	rc = smblib_set_prop_typec_power_role(chg, &val);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set DFP mode rc=%d\n",
+				rc);
+		goto try_sink_exit;
+	}
+
+	/*
+	 * while other side is Rp, wait for VBUS from it; exit if other side
+	 * removes Rp
+	 */
+	do {
+		rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n",
+					rc);
+			goto try_sink_exit;
+		}
+
+		debounce_done = stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT;
+		vbus_detected = stat & TYPEC_VBUS_STATUS_BIT;
+
+		/* Successfully transitioned to ATTACHED.SNK */
+		if (vbus_detected && debounce_done) {
+			exit_mode = ATTACHED_SINK;
+			goto try_sink_exit;
+		}
+
+		/*
+		 * Ensure sink since drp may put us in source if other
+		 * side switches back to Rd
+		 */
+		sink = !(stat &  UFP_DFP_MODE_STATUS_BIT);
+
+		usleep_range(1000, 2000);
+	} while (debounce_done && sink);
+
+try_wait_src:
+	/*
+	 * Transition to trywait.SRC state. check if other side still wants
+	 * to be SNK or has been removed.
+	 */
+	val.intval = POWER_SUPPLY_TYPEC_PR_SOURCE;
+	rc = smblib_set_prop_typec_power_role(chg, &val);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set UFP mode rc=%d\n", rc);
+		goto try_sink_exit;
+	}
+
+	/* Need to be in this state for tDRPTRY time, 75ms~150ms */
+	msleep(80);
+
+	rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+		goto try_sink_exit;
+	}
+
+	debounce_done = stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT;
+
+	if (debounce_done)
+		/* the other side wants to be a sink */
+		exit_mode = ATTACHED_SRC;
+	else
+		/* the other side is detached */
+		exit_mode = UNATTACHED_SINK;
+
+try_sink_exit:
+	/* release forcing of SRC/SNK mode */
+	val.intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+	rc = smblib_set_prop_typec_power_role(chg, &val);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't set DFP mode rc=%d\n", rc);
+
+	/* revert Tccdebounce time back to ~120ms */
+	rc = smblib_masked_write(chg, MISC_CFG_REG, TCC_DEBOUNCE_20MS_BIT, 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't set MISC_CFG_REG rc=%d\n", rc);
+
+	chg->try_sink_active = false;
+
+	return exit_mode;
+}
+
+static void typec_sink_insertion(struct smb_charger *chg)
+{
+	int exit_mode;
+
+	/*
+	 * Try.SNK entry status - ATTACHWAIT.SRC state and detected Rd-open
+	 * or RD-Ra for TccDebounce time.
+	 */
+
+	if (*chg->try_sink_enabled) {
+		exit_mode = typec_try_sink(chg);
+
+		if (exit_mode != ATTACHED_SRC) {
+			smblib_usb_typec_change(chg);
+			return;
+		}
+	}
+
+	/* when a sink is inserted we should not wait on hvdcp timeout to
+	 * enable pd
+	 */
+	vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
+			false, 0);
+	if (chg->use_extcon) {
+		smblib_notify_usb_host(chg, true);
+		chg->otg_present = true;
+	}
+}
+
+static void typec_sink_removal(struct smb_charger *chg)
+{
+	smblib_set_charge_param(chg, &chg->param.freq_boost,
+			chg->chg_freq.freq_above_otg_threshold);
+	chg->boost_current_ua = 0;
+}
+
+static void smblib_handle_typec_removal(struct smb_charger *chg)
+{
+	int rc;
+	struct smb_irq_data *data;
+	struct storm_watch *wdata;
+
+	chg->cc2_detach_wa_active = false;
+
+	rc = smblib_request_dpdm(chg, false);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't disable DPDM rc=%d\n", rc);
+
+	if (chg->wa_flags & BOOST_BACK_WA) {
+		data = chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data;
+		if (data) {
+			wdata = &data->storm_data;
+			update_storm_count(wdata, WEAK_CHG_STORM_COUNT);
+			vote(chg->usb_icl_votable, BOOST_BACK_VOTER, false, 0);
+			vote(chg->usb_icl_votable, WEAK_CHARGER_VOTER,
+					false, 0);
+		}
+	}
+
+	/* reset APSD voters */
+	vote(chg->apsd_disable_votable, PD_HARD_RESET_VOTER, false, 0);
+	vote(chg->apsd_disable_votable, PD_VOTER, false, 0);
+
+	cancel_delayed_work_sync(&chg->pl_enable_work);
+	cancel_delayed_work_sync(&chg->hvdcp_detect_work);
+
+	/* reset input current limit voters */
+	vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 100000);
+	vote(chg->usb_icl_votable, PD_VOTER, false, 0);
+	vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
+	vote(chg->usb_icl_votable, DCP_VOTER, false, 0);
+	vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0);
+	vote(chg->usb_icl_votable, SW_QC3_VOTER, false, 0);
+
+	/* reset hvdcp voters */
+	vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER, true, 0);
+	vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER, true, 0);
+	vote(chg->hvdcp_hw_inov_dis_votable, OV_VOTER, false, 0);
+
+	/* reset power delivery voters */
+	vote(chg->pd_allowed_votable, PD_VOTER, false, 0);
+	vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER, true, 0);
+	vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER, true, 0);
+
+	/* reset usb irq voters */
+	vote(chg->usb_irq_enable_votable, PD_VOTER, false, 0);
+	vote(chg->usb_irq_enable_votable, QC_VOTER, false, 0);
+
+	/* reset parallel voters */
+	vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0);
+	vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
+	vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
+	vote(chg->awake_votable, PL_DELAY_VOTER, false, 0);
+
+	vote(chg->usb_icl_votable, USBIN_USBIN_BOOST_VOTER, false, 0);
+	chg->vconn_attempts = 0;
+	chg->otg_attempts = 0;
+	chg->pulse_cnt = 0;
+	chg->usb_icl_delta_ua = 0;
+	chg->voltage_min_uv = MICRO_5V;
+	chg->voltage_max_uv = MICRO_5V;
+	chg->pd_active = 0;
+	chg->pd_hard_reset = 0;
+	chg->typec_legacy_valid = false;
+
+	/* write back the default FLOAT charger configuration */
+	rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+				(u8)FLOAT_OPTIONS_MASK, chg->float_cfg);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't write float charger options rc=%d\n",
+			rc);
+
+	/* reset back to 120mS tCC debounce */
+	rc = smblib_masked_write(chg, MISC_CFG_REG, TCC_DEBOUNCE_20MS_BIT, 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't set 120mS tCC debounce rc=%d\n", rc);
+
+	/* enable APSD CC trigger for next insertion */
+	rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
+				APSD_START_ON_CC_BIT, APSD_START_ON_CC_BIT);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't enable APSD_START_ON_CC rc=%d\n", rc);
+
+	if (chg->wa_flags & QC_AUTH_INTERRUPT_WA_BIT) {
+		/* re-enable AUTH_IRQ_EN_CFG_BIT */
+		rc = smblib_masked_write(chg,
+				USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
+				AUTH_IRQ_EN_CFG_BIT, AUTH_IRQ_EN_CFG_BIT);
+		if (rc < 0)
+			smblib_err(chg,
+				"Couldn't enable QC auth setting rc=%d\n", rc);
+	}
+
+	/* reconfigure allowed voltage for HVDCP */
+	rc = smblib_set_adapter_allowance(chg,
+			USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't set USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V rc=%d\n",
+			rc);
+
+	/* enable DRP */
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 TYPEC_POWER_ROLE_CMD_MASK, 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't enable DRP rc=%d\n", rc);
+
+	/* HW controlled CC_OUT */
+	rc = smblib_masked_write(chg, TAPER_TIMER_SEL_CFG_REG,
+							TYPEC_SPARE_CFG_BIT, 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't enable HW cc_out rc=%d\n", rc);
+
+	/* restore crude sensor */
+	rc = smblib_write(chg, TM_IO_DTEST4_SEL, 0xA5);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't restore crude sensor rc=%d\n", rc);
+
+	mutex_lock(&chg->vconn_oc_lock);
+	if (!chg->vconn_en)
+		goto unlock;
+
+	smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 VCONN_EN_VALUE_BIT, 0);
+	chg->vconn_en = false;
+
+unlock:
+	mutex_unlock(&chg->vconn_oc_lock);
+
+	/* clear exit sink based on cc */
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+						EXIT_SNK_BASED_ON_CC_BIT, 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't clear exit_sink_based_on_cc rc=%d\n",
+				rc);
+
+	typec_sink_removal(chg);
+	smblib_update_usb_type(chg);
+
+	if (chg->use_extcon) {
+		if (chg->otg_present)
+			smblib_notify_usb_host(chg, false);
+		else
+			smblib_notify_device_mode(chg, false);
+	}
+	chg->otg_present = false;
+}
+
+static void smblib_handle_typec_insertion(struct smb_charger *chg)
+{
+	int rc;
+
+	vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER, false, 0);
+
+	/* disable APSD CC trigger since CC is attached */
+	rc = smblib_masked_write(chg, TYPE_C_CFG_REG, APSD_START_ON_CC_BIT, 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't disable APSD_START_ON_CC rc=%d\n",
+									rc);
+
+	if (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT) {
+		typec_sink_insertion(chg);
+	} else {
+		rc = smblib_request_dpdm(chg, true);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc);
+		typec_sink_removal(chg);
+	}
+}
+
+static void smblib_handle_rp_change(struct smb_charger *chg, int typec_mode)
+{
+	int rp_ua;
+	const struct apsd_result *apsd = smblib_get_apsd_result(chg);
+
+	if ((apsd->pst != POWER_SUPPLY_TYPE_USB_DCP)
+		&& (apsd->pst != POWER_SUPPLY_TYPE_USB_FLOAT))
+		return;
+
+	/*
+	 * if APSD indicates FLOAT and the USB stack had detected SDP,
+	 * do not respond to Rp changes as we do not confirm that its
+	 * a legacy cable
+	 */
+	if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB)
+		return;
+	/*
+	 * We want the ICL vote @ 100mA for a FLOAT charger
+	 * until the detection by the USB stack is complete.
+	 * Ignore the Rp changes unless there is a
+	 * pre-existing valid vote.
+	 */
+	if (apsd->pst == POWER_SUPPLY_TYPE_USB_FLOAT &&
+		get_client_vote(chg->usb_icl_votable,
+			LEGACY_UNKNOWN_VOTER) <= 100000)
+		return;
+
+	/*
+	 * handle Rp change for DCP/FLOAT/OCP.
+	 * Update the current only if the Rp is different from
+	 * the last Rp value.
+	 */
+	smblib_dbg(chg, PR_MISC, "CC change old_mode=%d new_mode=%d\n",
+						chg->typec_mode, typec_mode);
+
+	rp_ua = get_rp_based_dcp_current(chg, typec_mode);
+	vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, rp_ua);
+}
+
+static void smblib_handle_typec_cc_state_change(struct smb_charger *chg)
+{
+	int typec_mode;
+
+	if (chg->pr_swap_in_progress)
+		return;
+
+	typec_mode = smblib_get_prop_typec_mode(chg);
+	if (chg->typec_present && (typec_mode != chg->typec_mode))
+		smblib_handle_rp_change(chg, typec_mode);
+
+	chg->typec_mode = typec_mode;
+
+	if (!chg->typec_present && chg->typec_mode != POWER_SUPPLY_TYPEC_NONE) {
+		chg->typec_present = true;
+		smblib_dbg(chg, PR_MISC, "TypeC %s insertion\n",
+			smblib_typec_mode_name[chg->typec_mode]);
+		smblib_handle_typec_insertion(chg);
+	} else if (chg->typec_present &&
+				chg->typec_mode == POWER_SUPPLY_TYPEC_NONE) {
+		chg->typec_present = false;
+		smblib_dbg(chg, PR_MISC, "TypeC removal\n");
+		smblib_handle_typec_removal(chg);
+	}
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: cc-state-change; Type-C %s detected\n",
+				smblib_typec_mode_name[chg->typec_mode]);
+}
+
+void smblib_usb_typec_change(struct smb_charger *chg)
+{
+	int rc;
+
+	rc = smblib_multibyte_read(chg, TYPE_C_STATUS_1_REG,
+							chg->typec_status, 5);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't cache USB Type-C status rc=%d\n", rc);
+		return;
+	}
+
+	smblib_handle_typec_cc_state_change(chg);
+
+	if (chg->typec_status[3] & TYPEC_VBUS_ERROR_STATUS_BIT)
+		smblib_dbg(chg, PR_INTERRUPT, "IRQ: vbus-error\n");
+
+	if (chg->typec_status[3] & TYPEC_VCONN_OVERCURR_STATUS_BIT)
+		schedule_work(&chg->vconn_oc_work);
+
+	power_supply_changed(chg->usb_psy);
+}
+
+irqreturn_t smblib_handle_usb_typec_change(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	if (chg->micro_usb_mode) {
+		cancel_delayed_work_sync(&chg->uusb_otg_work);
+		vote(chg->awake_votable, OTG_DELAY_VOTER, true, 0);
+		smblib_dbg(chg, PR_INTERRUPT, "Scheduling OTG work\n");
+		schedule_delayed_work(&chg->uusb_otg_work,
+				msecs_to_jiffies(chg->otg_delay_ms));
+		return IRQ_HANDLED;
+	}
+
+	if (chg->cc2_detach_wa_active || chg->typec_en_dis_active ||
+					 chg->try_sink_active) {
+		smblib_dbg(chg, PR_MISC | PR_INTERRUPT, "Ignoring since %s active\n",
+			chg->cc2_detach_wa_active ?
+			"cc2_detach_wa" : "typec_en_dis");
+		return IRQ_HANDLED;
+	}
+
+	mutex_lock(&chg->lock);
+	smblib_usb_typec_change(chg);
+	mutex_unlock(&chg->lock);
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_dc_plugin(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	power_supply_changed(chg->dc_psy);
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_high_duty_cycle(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	chg->is_hdc = true;
+	/*
+	 * Disable usb IRQs after the flag set and re-enable IRQs after
+	 * the flag cleared in the delayed work queue, to avoid any IRQ
+	 * storming during the delays
+	 */
+	if (chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq)
+		disable_irq_nosync(chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq);
+
+	schedule_delayed_work(&chg->clear_hdc_work, msecs_to_jiffies(60));
+
+	return IRQ_HANDLED;
+}
+
+static void smblib_bb_removal_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+						bb_removal_work.work);
+
+	vote(chg->usb_icl_votable, BOOST_BACK_VOTER, false, 0);
+	vote(chg->awake_votable, BOOST_BACK_VOTER, false, 0);
+}
+
+#define BOOST_BACK_UNVOTE_DELAY_MS		750
+#define BOOST_BACK_STORM_COUNT			3
+#define WEAK_CHG_STORM_COUNT			8
+irqreturn_t smblib_handle_switcher_power_ok(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+	struct storm_watch *wdata = &irq_data->storm_data;
+	int rc, usb_icl;
+	u8 stat;
+
+	if (!(chg->wa_flags & BOOST_BACK_WA))
+		return IRQ_HANDLED;
+
+	rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read POWER_PATH_STATUS rc=%d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	/* skip suspending input if its already suspended by some other voter */
+	usb_icl = get_effective_result(chg->usb_icl_votable);
+	if ((stat & USE_USBIN_BIT) && usb_icl >= 0 && usb_icl < USBIN_25MA)
+		return IRQ_HANDLED;
+
+	if (stat & USE_DCIN_BIT)
+		return IRQ_HANDLED;
+
+	if (is_storming(&irq_data->storm_data)) {
+		/* This could be a weak charger reduce ICL */
+		if (!is_client_vote_enabled(chg->usb_icl_votable,
+						WEAK_CHARGER_VOTER)) {
+			smblib_err(chg,
+				"Weak charger detected: voting %dmA ICL\n",
+				*chg->weak_chg_icl_ua / 1000);
+			vote(chg->usb_icl_votable, WEAK_CHARGER_VOTER,
+					true, *chg->weak_chg_icl_ua);
+			/*
+			 * reset storm data and set the storm threshold
+			 * to 3 for reverse boost detection.
+			 */
+			update_storm_count(wdata, BOOST_BACK_STORM_COUNT);
+		} else {
+			smblib_err(chg,
+				"Reverse boost detected: voting 0mA to suspend input\n");
+			vote(chg->usb_icl_votable, BOOST_BACK_VOTER, true, 0);
+			vote(chg->awake_votable, BOOST_BACK_VOTER, true, 0);
+			/*
+			 * Remove the boost-back vote after a delay, to avoid
+			 * permanently suspending the input if the boost-back
+			 * condition is unintentionally hit.
+			 */
+			schedule_delayed_work(&chg->bb_removal_work,
+				msecs_to_jiffies(BOOST_BACK_UNVOTE_DELAY_MS));
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_wdog_bark(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+	int rc;
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
+	rc = smblib_write(chg, BARK_BITE_WDOG_PET_REG, BARK_BITE_WDOG_PET_BIT);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't pet the dog rc=%d\n", rc);
+
+	if (chg->step_chg_enabled || chg->sw_jeita_enabled)
+		power_supply_changed(chg->batt_psy);
+
+	return IRQ_HANDLED;
+}
+
+/**************
+ * Additional USB PSY getters/setters
+ * that call interrupt functions
+***************/
+
+int smblib_get_prop_pr_swap_in_progress(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	val->intval = chg->pr_swap_in_progress;
+	return 0;
+}
+
+int smblib_set_prop_pr_swap_in_progress(struct smb_charger *chg,
+				const union power_supply_propval *val)
+{
+	int rc;
+
+	chg->pr_swap_in_progress = val->intval;
+	/*
+	 * call the cc changed irq to handle real removals while
+	 * PR_SWAP was in progress
+	 */
+	smblib_usb_typec_change(chg);
+	rc = smblib_masked_write(chg, MISC_CFG_REG, TCC_DEBOUNCE_20MS_BIT,
+			val->intval ? TCC_DEBOUNCE_20MS_BIT : 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't set tCC debounce rc=%d\n", rc);
+	return 0;
+}
+
+/***************
+ * Work Queues *
+ ***************/
+static void smblib_uusb_otg_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+						uusb_otg_work.work);
+	int rc;
+	u8 stat;
+	bool otg;
+
+	rc = smblib_read(chg, TYPE_C_STATUS_3_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_3 rc=%d\n", rc);
+		goto out;
+	}
+
+	otg = !!(stat & (U_USB_GND_NOVBUS_BIT | U_USB_GND_BIT));
+	extcon_set_cable_state_(chg->extcon, EXTCON_USB_HOST, otg);
+	smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_3 = 0x%02x OTG=%d\n",
+			stat, otg);
+	power_supply_changed(chg->usb_psy);
+
+out:
+	vote(chg->awake_votable, OTG_DELAY_VOTER, false, 0);
+}
+
+
+static void smblib_hvdcp_detect_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+					       hvdcp_detect_work.work);
+
+	vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
+				false, 0);
+	power_supply_changed(chg->usb_psy);
+}
+
+static void bms_update_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+						bms_update_work);
+
+	smblib_suspend_on_debug_battery(chg);
+
+	if (chg->batt_psy)
+		power_supply_changed(chg->batt_psy);
+}
+
+static void clear_hdc_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+						clear_hdc_work.work);
+
+	chg->is_hdc = 0;
+	if (chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq)
+		enable_irq(chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq);
+}
+
+static void rdstd_cc2_detach_work(struct work_struct *work)
+{
+	int rc;
+	u8 stat4, stat5;
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+						rdstd_cc2_detach_work);
+
+	if (!chg->cc2_detach_wa_active)
+		return;
+
+	/*
+	 * WA steps -
+	 * 1. Enable both UFP and DFP, wait for 10ms.
+	 * 2. Disable DFP, wait for 30ms.
+	 * 3. Removal detected if both TYPEC_DEBOUNCE_DONE_STATUS
+	 *    and TIMER_STAGE bits are gone, otherwise repeat all by
+	 *    work rescheduling.
+	 * Note, work will be cancelled when USB_PLUGIN rises.
+	 */
+
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 UFP_EN_CMD_BIT | DFP_EN_CMD_BIT,
+				 UFP_EN_CMD_BIT | DFP_EN_CMD_BIT);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't write TYPE_C_CTRL_REG rc=%d\n", rc);
+		return;
+	}
+
+	usleep_range(10000, 11000);
+
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 UFP_EN_CMD_BIT | DFP_EN_CMD_BIT,
+				 UFP_EN_CMD_BIT);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't write TYPE_C_CTRL_REG rc=%d\n", rc);
+		return;
+	}
+
+	usleep_range(30000, 31000);
+
+	rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat4);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+		return;
+	}
+
+	rc = smblib_read(chg, TYPE_C_STATUS_5_REG, &stat5);
+	if (rc < 0) {
+		smblib_err(chg,
+			"Couldn't read TYPE_C_STATUS_5_REG rc=%d\n", rc);
+		return;
+	}
+
+	if ((stat4 & TYPEC_DEBOUNCE_DONE_STATUS_BIT)
+			|| (stat5 & TIMER_STAGE_2_BIT)) {
+		smblib_dbg(chg, PR_MISC, "rerunning DD=%d TS2BIT=%d\n",
+				(int)(stat4 & TYPEC_DEBOUNCE_DONE_STATUS_BIT),
+				(int)(stat5 & TIMER_STAGE_2_BIT));
+		goto rerun;
+	}
+
+	smblib_dbg(chg, PR_MISC, "Bingo CC2 Removal detected\n");
+	chg->cc2_detach_wa_active = false;
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+						EXIT_SNK_BASED_ON_CC_BIT, 0);
+	smblib_reg_block_restore(chg, cc2_detach_settings);
+	mutex_lock(&chg->lock);
+	smblib_usb_typec_change(chg);
+	mutex_unlock(&chg->lock);
+	return;
+
+rerun:
+	schedule_work(&chg->rdstd_cc2_detach_work);
+}
+
+static void smblib_otg_oc_exit(struct smb_charger *chg, bool success)
+{
+	int rc;
+
+	chg->otg_attempts = 0;
+	if (!success) {
+		smblib_err(chg, "OTG soft start failed\n");
+		chg->otg_en = false;
+	}
+
+	smblib_dbg(chg, PR_OTG, "enabling VBUS < 1V check\n");
+	rc = smblib_masked_write(chg, OTG_CFG_REG,
+					QUICKSTART_OTG_FASTROLESWAP_BIT, 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't enable VBUS < 1V check rc=%d\n", rc);
+}
+
+#define MAX_OC_FALLING_TRIES 10
+static void smblib_otg_oc_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+								otg_oc_work);
+	int rc, i;
+	u8 stat;
+
+	if (!chg->vbus_vreg || !chg->vbus_vreg->rdev)
+		return;
+
+	smblib_err(chg, "over-current detected on VBUS\n");
+	mutex_lock(&chg->otg_oc_lock);
+	if (!chg->otg_en)
+		goto unlock;
+
+	smblib_dbg(chg, PR_OTG, "disabling VBUS < 1V check\n");
+	smblib_masked_write(chg, OTG_CFG_REG,
+					QUICKSTART_OTG_FASTROLESWAP_BIT,
+					QUICKSTART_OTG_FASTROLESWAP_BIT);
+
+	/*
+	 * If 500ms has passed and another over-current interrupt has not
+	 * triggered then it is likely that the software based soft start was
+	 * successful and the VBUS < 1V restriction should be re-enabled.
+	 */
+	schedule_delayed_work(&chg->otg_ss_done_work, msecs_to_jiffies(500));
+
+	rc = _smblib_vbus_regulator_disable(chg->vbus_vreg->rdev);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't disable VBUS rc=%d\n", rc);
+		goto unlock;
+	}
+
+	if (++chg->otg_attempts > OTG_MAX_ATTEMPTS) {
+		cancel_delayed_work_sync(&chg->otg_ss_done_work);
+		smblib_err(chg, "OTG failed to enable after %d attempts\n",
+			   chg->otg_attempts - 1);
+		smblib_otg_oc_exit(chg, false);
+		goto unlock;
+	}
+
+	/*
+	 * The real time status should go low within 10ms. Poll every 1-2ms to
+	 * minimize the delay when re-enabling OTG.
+	 */
+	for (i = 0; i < MAX_OC_FALLING_TRIES; ++i) {
+		usleep_range(1000, 2000);
+		rc = smblib_read(chg, OTG_BASE + INT_RT_STS_OFFSET, &stat);
+		if (rc >= 0 && !(stat & OTG_OVERCURRENT_RT_STS_BIT))
+			break;
+	}
+
+	if (i >= MAX_OC_FALLING_TRIES) {
+		cancel_delayed_work_sync(&chg->otg_ss_done_work);
+		smblib_err(chg, "OTG OC did not fall after %dms\n",
+						2 * MAX_OC_FALLING_TRIES);
+		smblib_otg_oc_exit(chg, false);
+		goto unlock;
+	}
+
+	smblib_dbg(chg, PR_OTG, "OTG OC fell after %dms\n", 2 * i + 1);
+	rc = _smblib_vbus_regulator_enable(chg->vbus_vreg->rdev);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't enable VBUS rc=%d\n", rc);
+		goto unlock;
+	}
+
+unlock:
+	mutex_unlock(&chg->otg_oc_lock);
+}
+
+static void smblib_vconn_oc_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+								vconn_oc_work);
+	int rc, i;
+	u8 stat;
+
+	if (chg->micro_usb_mode)
+		return;
+
+	smblib_err(chg, "over-current detected on VCONN\n");
+	if (!chg->vconn_vreg || !chg->vconn_vreg->rdev)
+		return;
+
+	mutex_lock(&chg->vconn_oc_lock);
+	rc = _smblib_vconn_regulator_disable(chg->vconn_vreg->rdev);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't disable VCONN rc=%d\n", rc);
+		goto unlock;
+	}
+
+	if (++chg->vconn_attempts > VCONN_MAX_ATTEMPTS) {
+		smblib_err(chg, "VCONN failed to enable after %d attempts\n",
+			   chg->otg_attempts - 1);
+		chg->vconn_en = false;
+		chg->vconn_attempts = 0;
+		goto unlock;
+	}
+
+	/*
+	 * The real time status should go low within 10ms. Poll every 1-2ms to
+	 * minimize the delay when re-enabling OTG.
+	 */
+	for (i = 0; i < MAX_OC_FALLING_TRIES; ++i) {
+		usleep_range(1000, 2000);
+		rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+		if (rc >= 0 && !(stat & TYPEC_VCONN_OVERCURR_STATUS_BIT))
+			break;
+	}
+
+	if (i >= MAX_OC_FALLING_TRIES) {
+		smblib_err(chg, "VCONN OC did not fall after %dms\n",
+						2 * MAX_OC_FALLING_TRIES);
+		chg->vconn_en = false;
+		chg->vconn_attempts = 0;
+		goto unlock;
+	}
+
+	smblib_dbg(chg, PR_OTG, "VCONN OC fell after %dms\n", 2 * i + 1);
+	if (++chg->vconn_attempts > VCONN_MAX_ATTEMPTS) {
+		smblib_err(chg, "VCONN failed to enable after %d attempts\n",
+			   chg->vconn_attempts - 1);
+		chg->vconn_en = false;
+		goto unlock;
+	}
+
+	rc = _smblib_vconn_regulator_enable(chg->vconn_vreg->rdev);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't enable VCONN rc=%d\n", rc);
+		goto unlock;
+	}
+
+unlock:
+	mutex_unlock(&chg->vconn_oc_lock);
+}
+
+static void smblib_otg_ss_done_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+							otg_ss_done_work.work);
+	int rc;
+	bool success = false;
+	u8 stat;
+
+	mutex_lock(&chg->otg_oc_lock);
+	rc = smblib_read(chg, OTG_STATUS_REG, &stat);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't read OTG status rc=%d\n", rc);
+	else if (stat & BOOST_SOFTSTART_DONE_BIT)
+		success = true;
+
+	smblib_otg_oc_exit(chg, success);
+	mutex_unlock(&chg->otg_oc_lock);
+}
+
+static void smblib_icl_change_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+							icl_change_work.work);
+	int rc, settled_ua;
+
+	rc = smblib_get_charge_param(chg, &chg->param.icl_stat, &settled_ua);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get ICL status rc=%d\n", rc);
+		return;
+	}
+
+	power_supply_changed(chg->usb_main_psy);
+
+	smblib_dbg(chg, PR_INTERRUPT, "icl_settled=%d\n", settled_ua);
+}
+
+static void smblib_pl_enable_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+							pl_enable_work.work);
+
+	smblib_dbg(chg, PR_PARALLEL, "timer expired, enabling parallel\n");
+	vote(chg->pl_disable_votable, PL_DELAY_VOTER, false, 0);
+	vote(chg->awake_votable, PL_DELAY_VOTER, false, 0);
+}
+
+static void smblib_legacy_detection_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+							legacy_detection_work);
+	int rc;
+	u8 stat;
+	bool legacy, rp_high;
+
+	mutex_lock(&chg->lock);
+	chg->typec_en_dis_active = 1;
+	smblib_dbg(chg, PR_MISC, "running legacy unknown workaround\n");
+	rc = smblib_masked_write(chg,
+				TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				TYPEC_DISABLE_CMD_BIT,
+				TYPEC_DISABLE_CMD_BIT);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't disable type-c rc=%d\n", rc);
+
+	/* wait for the adapter to turn off VBUS */
+	msleep(1000);
+
+	smblib_dbg(chg, PR_MISC, "legacy workaround enabling typec\n");
+
+	rc = smblib_masked_write(chg,
+				TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				TYPEC_DISABLE_CMD_BIT, 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't enable type-c rc=%d\n", rc);
+
+	/* wait for type-c detection to complete */
+	msleep(400);
+
+	rc = smblib_read(chg, TYPE_C_STATUS_5_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read typec stat5 rc = %d\n", rc);
+		goto unlock;
+	}
+
+	chg->typec_legacy_valid = true;
+	vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, false, 0);
+	legacy = stat & TYPEC_LEGACY_CABLE_STATUS_BIT;
+	rp_high = chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_HIGH;
+	smblib_dbg(chg, PR_MISC, "legacy workaround done legacy = %d rp_high = %d\n",
+			legacy, rp_high);
+	if (!legacy || !rp_high)
+		vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
+								false, 0);
+
+unlock:
+	chg->typec_en_dis_active = 0;
+	smblib_usb_typec_change(chg);
+	mutex_unlock(&chg->lock);
+}
+
+static int smblib_create_votables(struct smb_charger *chg)
+{
+	int rc = 0;
+
+	chg->fcc_votable = find_votable("FCC");
+	if (chg->fcc_votable == NULL) {
+		rc = -EINVAL;
+		smblib_err(chg, "Couldn't find FCC votable rc=%d\n", rc);
+		return rc;
+	}
+
+	chg->fv_votable = find_votable("FV");
+	if (chg->fv_votable == NULL) {
+		rc = -EINVAL;
+		smblib_err(chg, "Couldn't find FV votable rc=%d\n", rc);
+		return rc;
+	}
+
+	chg->usb_icl_votable = find_votable("USB_ICL");
+	if (!chg->usb_icl_votable) {
+		rc = -EINVAL;
+		smblib_err(chg, "Couldn't find USB_ICL votable rc=%d\n", rc);
+		return rc;
+	}
+
+	chg->pl_disable_votable = find_votable("PL_DISABLE");
+	if (chg->pl_disable_votable == NULL) {
+		rc = -EINVAL;
+		smblib_err(chg, "Couldn't find votable PL_DISABLE rc=%d\n", rc);
+		return rc;
+	}
+
+	chg->pl_enable_votable_indirect = find_votable("PL_ENABLE_INDIRECT");
+	if (chg->pl_enable_votable_indirect == NULL) {
+		rc = -EINVAL;
+		smblib_err(chg,
+			"Couldn't find votable PL_ENABLE_INDIRECT rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0);
+
+	chg->dc_suspend_votable = create_votable("DC_SUSPEND", VOTE_SET_ANY,
+					smblib_dc_suspend_vote_callback,
+					chg);
+	if (IS_ERR(chg->dc_suspend_votable)) {
+		rc = PTR_ERR(chg->dc_suspend_votable);
+		return rc;
+	}
+
+	chg->dc_icl_votable = create_votable("DC_ICL", VOTE_MIN,
+					smblib_dc_icl_vote_callback,
+					chg);
+	if (IS_ERR(chg->dc_icl_votable)) {
+		rc = PTR_ERR(chg->dc_icl_votable);
+		return rc;
+	}
+
+	chg->pd_disallowed_votable_indirect
+		= create_votable("PD_DISALLOWED_INDIRECT", VOTE_SET_ANY,
+			smblib_pd_disallowed_votable_indirect_callback, chg);
+	if (IS_ERR(chg->pd_disallowed_votable_indirect)) {
+		rc = PTR_ERR(chg->pd_disallowed_votable_indirect);
+		return rc;
+	}
+
+	chg->pd_allowed_votable = create_votable("PD_ALLOWED",
+					VOTE_SET_ANY, NULL, NULL);
+	if (IS_ERR(chg->pd_allowed_votable)) {
+		rc = PTR_ERR(chg->pd_allowed_votable);
+		return rc;
+	}
+
+	chg->awake_votable = create_votable("AWAKE", VOTE_SET_ANY,
+					smblib_awake_vote_callback,
+					chg);
+	if (IS_ERR(chg->awake_votable)) {
+		rc = PTR_ERR(chg->awake_votable);
+		return rc;
+	}
+
+	chg->chg_disable_votable = create_votable("CHG_DISABLE", VOTE_SET_ANY,
+					smblib_chg_disable_vote_callback,
+					chg);
+	if (IS_ERR(chg->chg_disable_votable)) {
+		rc = PTR_ERR(chg->chg_disable_votable);
+		return rc;
+	}
+
+
+	chg->hvdcp_disable_votable_indirect = create_votable(
+				"HVDCP_DISABLE_INDIRECT",
+				VOTE_SET_ANY,
+				smblib_hvdcp_disable_indirect_vote_callback,
+				chg);
+	if (IS_ERR(chg->hvdcp_disable_votable_indirect)) {
+		rc = PTR_ERR(chg->hvdcp_disable_votable_indirect);
+		return rc;
+	}
+
+	chg->hvdcp_enable_votable = create_votable("HVDCP_ENABLE",
+					VOTE_SET_ANY,
+					smblib_hvdcp_enable_vote_callback,
+					chg);
+	if (IS_ERR(chg->hvdcp_enable_votable)) {
+		rc = PTR_ERR(chg->hvdcp_enable_votable);
+		return rc;
+	}
+
+	chg->apsd_disable_votable = create_votable("APSD_DISABLE",
+					VOTE_SET_ANY,
+					smblib_apsd_disable_vote_callback,
+					chg);
+	if (IS_ERR(chg->apsd_disable_votable)) {
+		rc = PTR_ERR(chg->apsd_disable_votable);
+		return rc;
+	}
+
+	chg->hvdcp_hw_inov_dis_votable = create_votable("HVDCP_HW_INOV_DIS",
+					VOTE_SET_ANY,
+					smblib_hvdcp_hw_inov_dis_vote_callback,
+					chg);
+	if (IS_ERR(chg->hvdcp_hw_inov_dis_votable)) {
+		rc = PTR_ERR(chg->hvdcp_hw_inov_dis_votable);
+		return rc;
+	}
+
+	chg->usb_irq_enable_votable = create_votable("USB_IRQ_DISABLE",
+					VOTE_SET_ANY,
+					smblib_usb_irq_enable_vote_callback,
+					chg);
+	if (IS_ERR(chg->usb_irq_enable_votable)) {
+		rc = PTR_ERR(chg->usb_irq_enable_votable);
+		return rc;
+	}
+
+	chg->typec_irq_disable_votable = create_votable("TYPEC_IRQ_DISABLE",
+					VOTE_SET_ANY,
+					smblib_typec_irq_disable_vote_callback,
+					chg);
+	if (IS_ERR(chg->typec_irq_disable_votable)) {
+		rc = PTR_ERR(chg->typec_irq_disable_votable);
+		return rc;
+	}
+
+	return rc;
+}
+
+static void smblib_destroy_votables(struct smb_charger *chg)
+{
+	if (chg->dc_suspend_votable)
+		destroy_votable(chg->dc_suspend_votable);
+	if (chg->usb_icl_votable)
+		destroy_votable(chg->usb_icl_votable);
+	if (chg->dc_icl_votable)
+		destroy_votable(chg->dc_icl_votable);
+	if (chg->pd_disallowed_votable_indirect)
+		destroy_votable(chg->pd_disallowed_votable_indirect);
+	if (chg->pd_allowed_votable)
+		destroy_votable(chg->pd_allowed_votable);
+	if (chg->awake_votable)
+		destroy_votable(chg->awake_votable);
+	if (chg->chg_disable_votable)
+		destroy_votable(chg->chg_disable_votable);
+	if (chg->apsd_disable_votable)
+		destroy_votable(chg->apsd_disable_votable);
+	if (chg->hvdcp_hw_inov_dis_votable)
+		destroy_votable(chg->hvdcp_hw_inov_dis_votable);
+	if (chg->typec_irq_disable_votable)
+		destroy_votable(chg->typec_irq_disable_votable);
+}
+
+static void smblib_iio_deinit(struct smb_charger *chg)
+{
+	if (!IS_ERR_OR_NULL(chg->iio.temp_chan))
+		iio_channel_release(chg->iio.temp_chan);
+	if (!IS_ERR_OR_NULL(chg->iio.temp_max_chan))
+		iio_channel_release(chg->iio.temp_max_chan);
+	if (!IS_ERR_OR_NULL(chg->iio.usbin_i_chan))
+		iio_channel_release(chg->iio.usbin_i_chan);
+	if (!IS_ERR_OR_NULL(chg->iio.usbin_v_chan))
+		iio_channel_release(chg->iio.usbin_v_chan);
+	if (!IS_ERR_OR_NULL(chg->iio.batt_i_chan))
+		iio_channel_release(chg->iio.batt_i_chan);
+}
+
+int smblib_init(struct smb_charger *chg)
+{
+	int rc = 0;
+
+	mutex_init(&chg->lock);
+	mutex_init(&chg->write_lock);
+	mutex_init(&chg->otg_oc_lock);
+	mutex_init(&chg->vconn_oc_lock);
+	INIT_WORK(&chg->bms_update_work, bms_update_work);
+	INIT_WORK(&chg->rdstd_cc2_detach_work, rdstd_cc2_detach_work);
+	INIT_DELAYED_WORK(&chg->hvdcp_detect_work, smblib_hvdcp_detect_work);
+	INIT_DELAYED_WORK(&chg->clear_hdc_work, clear_hdc_work);
+	INIT_WORK(&chg->otg_oc_work, smblib_otg_oc_work);
+	INIT_WORK(&chg->vconn_oc_work, smblib_vconn_oc_work);
+	INIT_DELAYED_WORK(&chg->otg_ss_done_work, smblib_otg_ss_done_work);
+	INIT_DELAYED_WORK(&chg->icl_change_work, smblib_icl_change_work);
+	INIT_DELAYED_WORK(&chg->pl_enable_work, smblib_pl_enable_work);
+	INIT_WORK(&chg->legacy_detection_work, smblib_legacy_detection_work);
+	INIT_DELAYED_WORK(&chg->uusb_otg_work, smblib_uusb_otg_work);
+	INIT_DELAYED_WORK(&chg->bb_removal_work, smblib_bb_removal_work);
+	chg->fake_capacity = -EINVAL;
+	chg->fake_input_current_limited = -EINVAL;
+
+	switch (chg->mode) {
+	case PARALLEL_MASTER:
+		rc = qcom_batt_init();
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't init qcom_batt_init rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		rc = qcom_step_chg_init(chg->step_chg_enabled,
+						chg->sw_jeita_enabled);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't init qcom_step_chg_init rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		rc = smblib_create_votables(chg);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't create votables rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		rc = smblib_register_notifier(chg);
+		if (rc < 0) {
+			smblib_err(chg,
+				"Couldn't register notifier rc=%d\n", rc);
+			return rc;
+		}
+
+		chg->bms_psy = power_supply_get_by_name("bms");
+		chg->pl.psy = power_supply_get_by_name("parallel");
+		break;
+	case PARALLEL_SLAVE:
+		break;
+	default:
+		smblib_err(chg, "Unsupported mode %d\n", chg->mode);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+int smblib_deinit(struct smb_charger *chg)
+{
+	switch (chg->mode) {
+	case PARALLEL_MASTER:
+		cancel_work_sync(&chg->bms_update_work);
+		cancel_work_sync(&chg->rdstd_cc2_detach_work);
+		cancel_delayed_work_sync(&chg->hvdcp_detect_work);
+		cancel_delayed_work_sync(&chg->clear_hdc_work);
+		cancel_work_sync(&chg->otg_oc_work);
+		cancel_work_sync(&chg->vconn_oc_work);
+		cancel_delayed_work_sync(&chg->otg_ss_done_work);
+		cancel_delayed_work_sync(&chg->icl_change_work);
+		cancel_delayed_work_sync(&chg->pl_enable_work);
+		cancel_work_sync(&chg->legacy_detection_work);
+		cancel_delayed_work_sync(&chg->uusb_otg_work);
+		cancel_delayed_work_sync(&chg->bb_removal_work);
+		power_supply_unreg_notifier(&chg->nb);
+		smblib_destroy_votables(chg);
+		qcom_step_chg_deinit();
+		qcom_batt_deinit();
+		break;
+	case PARALLEL_SLAVE:
+		break;
+	default:
+		smblib_err(chg, "Unsupported mode %d\n", chg->mode);
+		return -EINVAL;
+	}
+
+	smblib_iio_deinit(chg);
+
+	return 0;
+}
diff -Nruw linux-4.4.115-fbx/drivers/power/supply./qcom/smb-lib.h linux-4.4.115-fbx/drivers/power/supply/qcom/smb-lib.h
--- linux-4.4.115-fbx/drivers/power/supply./qcom/smb-lib.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/supply/qcom/smb-lib.h	2019-10-29 09:26:24.641212945 +0100
@@ -0,0 +1,535 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SMB2_CHARGER_H
+#define __SMB2_CHARGER_H
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/consumer.h>
+#include <linux/extcon.h>
+#include "storm-watch.h"
+
+enum print_reason {
+	PR_INTERRUPT	= BIT(0),
+	PR_REGISTER	= BIT(1),
+	PR_MISC		= BIT(2),
+	PR_PARALLEL	= BIT(3),
+	PR_OTG		= BIT(4),
+};
+
+#define DEFAULT_VOTER			"DEFAULT_VOTER"
+#define USER_VOTER			"USER_VOTER"
+#define PD_VOTER			"PD_VOTER"
+#define DCP_VOTER			"DCP_VOTER"
+#define QC_VOTER			"QC_VOTER"
+#define PL_USBIN_USBIN_VOTER		"PL_USBIN_USBIN_VOTER"
+#define USB_PSY_VOTER			"USB_PSY_VOTER"
+#define PL_TAPER_WORK_RUNNING_VOTER	"PL_TAPER_WORK_RUNNING_VOTER"
+#define PL_QNOVO_VOTER			"PL_QNOVO_VOTER"
+#define USBIN_V_VOTER			"USBIN_V_VOTER"
+#define CHG_STATE_VOTER			"CHG_STATE_VOTER"
+#define TYPEC_SRC_VOTER			"TYPEC_SRC_VOTER"
+#define TAPER_END_VOTER			"TAPER_END_VOTER"
+#define THERMAL_DAEMON_VOTER		"THERMAL_DAEMON_VOTER"
+#define CC_DETACHED_VOTER		"CC_DETACHED_VOTER"
+#define HVDCP_TIMEOUT_VOTER		"HVDCP_TIMEOUT_VOTER"
+#define PD_DISALLOWED_INDIRECT_VOTER	"PD_DISALLOWED_INDIRECT_VOTER"
+#define PD_HARD_RESET_VOTER		"PD_HARD_RESET_VOTER"
+#define VBUS_CC_SHORT_VOTER		"VBUS_CC_SHORT_VOTER"
+#define PD_INACTIVE_VOTER		"PD_INACTIVE_VOTER"
+#define BOOST_BACK_VOTER		"BOOST_BACK_VOTER"
+#define USBIN_USBIN_BOOST_VOTER		"USBIN_USBIN_BOOST_VOTER"
+#define HVDCP_INDIRECT_VOTER		"HVDCP_INDIRECT_VOTER"
+#define MICRO_USB_VOTER			"MICRO_USB_VOTER"
+#define DEBUG_BOARD_VOTER		"DEBUG_BOARD_VOTER"
+#define PD_SUSPEND_SUPPORTED_VOTER	"PD_SUSPEND_SUPPORTED_VOTER"
+#define PL_DELAY_VOTER			"PL_DELAY_VOTER"
+#define CTM_VOTER			"CTM_VOTER"
+#define SW_QC3_VOTER			"SW_QC3_VOTER"
+#define AICL_RERUN_VOTER		"AICL_RERUN_VOTER"
+#define LEGACY_UNKNOWN_VOTER		"LEGACY_UNKNOWN_VOTER"
+#define CC2_WA_VOTER			"CC2_WA_VOTER"
+#define QNOVO_VOTER			"QNOVO_VOTER"
+#define BATT_PROFILE_VOTER		"BATT_PROFILE_VOTER"
+#define OTG_DELAY_VOTER			"OTG_DELAY_VOTER"
+#define USBIN_I_VOTER			"USBIN_I_VOTER"
+#define WEAK_CHARGER_VOTER		"WEAK_CHARGER_VOTER"
+#define WBC_VOTER			"WBC_VOTER"
+#define OV_VOTER			"OV_VOTER"
+
+#define VCONN_MAX_ATTEMPTS	3
+#define OTG_MAX_ATTEMPTS	3
+#define BOOST_BACK_STORM_COUNT	3
+#define WEAK_CHG_STORM_COUNT	8
+
+enum smb_mode {
+	PARALLEL_MASTER = 0,
+	PARALLEL_SLAVE,
+	NUM_MODES,
+};
+
+enum {
+	QC_CHARGER_DETECTION_WA_BIT	= BIT(0),
+	BOOST_BACK_WA			= BIT(1),
+	TYPEC_CC2_REMOVAL_WA_BIT	= BIT(2),
+	QC_AUTH_INTERRUPT_WA_BIT	= BIT(3),
+	OTG_WA				= BIT(4),
+	OV_IRQ_WA_BIT			= BIT(5),
+};
+
+enum smb_irq_index {
+	CHG_ERROR_IRQ = 0,
+	CHG_STATE_CHANGE_IRQ,
+	STEP_CHG_STATE_CHANGE_IRQ,
+	STEP_CHG_SOC_UPDATE_FAIL_IRQ,
+	STEP_CHG_SOC_UPDATE_REQ_IRQ,
+	OTG_FAIL_IRQ,
+	OTG_OVERCURRENT_IRQ,
+	OTG_OC_DIS_SW_STS_IRQ,
+	TESTMODE_CHANGE_DET_IRQ,
+	BATT_TEMP_IRQ,
+	BATT_OCP_IRQ,
+	BATT_OV_IRQ,
+	BATT_LOW_IRQ,
+	BATT_THERM_ID_MISS_IRQ,
+	BATT_TERM_MISS_IRQ,
+	USBIN_COLLAPSE_IRQ,
+	USBIN_LT_3P6V_IRQ,
+	USBIN_UV_IRQ,
+	USBIN_OV_IRQ,
+	USBIN_PLUGIN_IRQ,
+	USBIN_SRC_CHANGE_IRQ,
+	USBIN_ICL_CHANGE_IRQ,
+	TYPE_C_CHANGE_IRQ,
+	DCIN_COLLAPSE_IRQ,
+	DCIN_LT_3P6V_IRQ,
+	DCIN_UV_IRQ,
+	DCIN_OV_IRQ,
+	DCIN_PLUGIN_IRQ,
+	DIV2_EN_DG_IRQ,
+	DCIN_ICL_CHANGE_IRQ,
+	WDOG_SNARL_IRQ,
+	WDOG_BARK_IRQ,
+	AICL_FAIL_IRQ,
+	AICL_DONE_IRQ,
+	HIGH_DUTY_CYCLE_IRQ,
+	INPUT_CURRENT_LIMIT_IRQ,
+	TEMPERATURE_CHANGE_IRQ,
+	SWITCH_POWER_OK_IRQ,
+	SMB_IRQ_MAX,
+};
+
+enum try_sink_exit_mode {
+	ATTACHED_SRC = 0,
+	ATTACHED_SINK,
+	UNATTACHED_SINK,
+};
+
+struct smb_irq_info {
+	const char			*name;
+	const irq_handler_t		handler;
+	const bool			wake;
+	const struct storm_watch	storm_data;
+	struct smb_irq_data		*irq_data;
+	int				irq;
+};
+
+static const unsigned int smblib_extcon_cable[] = {
+	EXTCON_USB,
+	EXTCON_USB_HOST,
+	EXTCON_USB_CC,
+	EXTCON_USB_SPEED,
+	EXTCON_NONE,
+};
+
+/* EXTCON_USB and EXTCON_USB_HOST are mutually exclusive */
+static const u32 smblib_extcon_exclusive[] = {0x3, 0};
+
+struct smb_regulator {
+	struct regulator_dev	*rdev;
+	struct regulator_desc	rdesc;
+};
+
+struct smb_irq_data {
+	void			*parent_data;
+	const char		*name;
+	struct storm_watch	storm_data;
+};
+
+struct smb_chg_param {
+	const char	*name;
+	u16		reg;
+	int		min_u;
+	int		max_u;
+	int		step_u;
+	int		(*get_proc)(struct smb_chg_param *param,
+				    u8 val_raw);
+	int		(*set_proc)(struct smb_chg_param *param,
+				    int val_u,
+				    u8 *val_raw);
+};
+
+struct smb_chg_freq {
+	unsigned int		freq_5V;
+	unsigned int		freq_6V_8V;
+	unsigned int		freq_9V;
+	unsigned int		freq_12V;
+	unsigned int		freq_removal;
+	unsigned int		freq_below_otg_threshold;
+	unsigned int		freq_above_otg_threshold;
+};
+
+struct smb_params {
+	struct smb_chg_param	fcc;
+	struct smb_chg_param	fv;
+	struct smb_chg_param	usb_icl;
+	struct smb_chg_param	icl_stat;
+	struct smb_chg_param	otg_cl;
+	struct smb_chg_param	dc_icl;
+	struct smb_chg_param	dc_icl_pt_lv;
+	struct smb_chg_param	dc_icl_pt_hv;
+	struct smb_chg_param	dc_icl_div2_lv;
+	struct smb_chg_param	dc_icl_div2_mid_lv;
+	struct smb_chg_param	dc_icl_div2_mid_hv;
+	struct smb_chg_param	dc_icl_div2_hv;
+	struct smb_chg_param	jeita_cc_comp;
+	struct smb_chg_param	freq_buck;
+	struct smb_chg_param	freq_boost;
+};
+
+struct parallel_params {
+	struct power_supply	*psy;
+};
+
+struct smb_iio {
+	struct iio_channel	*temp_chan;
+	struct iio_channel	*temp_max_chan;
+	struct iio_channel	*usbin_i_chan;
+	struct iio_channel	*usbin_v_chan;
+	struct iio_channel	*batt_i_chan;
+	struct iio_channel	*connector_temp_chan;
+	struct iio_channel	*connector_temp_thr1_chan;
+	struct iio_channel	*connector_temp_thr2_chan;
+	struct iio_channel	*connector_temp_thr3_chan;
+};
+
+struct reg_info {
+	u16		reg;
+	u8		mask;
+	u8		val;
+	u8		bak;
+	const char	*desc;
+};
+
+struct smb_charger {
+	struct device		*dev;
+	char			*name;
+	struct regmap		*regmap;
+	struct smb_irq_info	*irq_info;
+	struct smb_params	param;
+	struct smb_iio		iio;
+	int			*debug_mask;
+	int			*try_sink_enabled;
+	enum smb_mode		mode;
+	struct smb_chg_freq	chg_freq;
+	int			smb_version;
+	int			otg_delay_ms;
+	int			*weak_chg_icl_ua;
+
+	/* locks */
+	struct mutex		lock;
+	struct mutex		write_lock;
+	struct mutex		ps_change_lock;
+	struct mutex		otg_oc_lock;
+	struct mutex		vconn_oc_lock;
+
+	/* power supplies */
+	struct power_supply		*batt_psy;
+	struct power_supply		*usb_psy;
+	struct power_supply		*dc_psy;
+	struct power_supply		*bms_psy;
+	struct power_supply_desc	usb_psy_desc;
+	struct power_supply		*usb_main_psy;
+	struct power_supply		*usb_port_psy;
+	enum power_supply_type		real_charger_type;
+
+	/* notifiers */
+	struct notifier_block	nb;
+
+	/* parallel charging */
+	struct parallel_params	pl;
+
+	/* regulators */
+	struct smb_regulator	*vbus_vreg;
+	struct smb_regulator	*vconn_vreg;
+	struct regulator	*dpdm_reg;
+
+	/* votables */
+	struct votable		*dc_suspend_votable;
+	struct votable		*fcc_votable;
+	struct votable		*fv_votable;
+	struct votable		*usb_icl_votable;
+	struct votable		*dc_icl_votable;
+	struct votable		*pd_disallowed_votable_indirect;
+	struct votable		*pd_allowed_votable;
+	struct votable		*awake_votable;
+	struct votable		*pl_disable_votable;
+	struct votable		*chg_disable_votable;
+	struct votable		*pl_enable_votable_indirect;
+	struct votable		*hvdcp_disable_votable_indirect;
+	struct votable		*hvdcp_enable_votable;
+	struct votable		*apsd_disable_votable;
+	struct votable		*hvdcp_hw_inov_dis_votable;
+	struct votable		*usb_irq_enable_votable;
+	struct votable		*typec_irq_disable_votable;
+
+	/* work */
+	struct work_struct	bms_update_work;
+	struct work_struct	rdstd_cc2_detach_work;
+	struct delayed_work	hvdcp_detect_work;
+	struct delayed_work	ps_change_timeout_work;
+	struct delayed_work	clear_hdc_work;
+	struct work_struct	otg_oc_work;
+	struct work_struct	vconn_oc_work;
+	struct delayed_work	otg_ss_done_work;
+	struct delayed_work	icl_change_work;
+	struct delayed_work	pl_enable_work;
+	struct work_struct	legacy_detection_work;
+	struct delayed_work	uusb_otg_work;
+	struct delayed_work	bb_removal_work;
+
+	/* cached status */
+	int			voltage_min_uv;
+	int			voltage_max_uv;
+	int			pd_active;
+	bool			system_suspend_supported;
+	int			boost_threshold_ua;
+	int			system_temp_level;
+	int			thermal_levels;
+	int			*thermal_mitigation;
+	int			dcp_icl_ua;
+	int			fake_capacity;
+	bool			step_chg_enabled;
+	bool			sw_jeita_enabled;
+	bool			is_hdc;
+	bool			chg_done;
+	bool			micro_usb_mode;
+	bool			otg_en;
+	bool			vconn_en;
+	bool			suspend_input_on_debug_batt;
+	int			otg_attempts;
+	int			vconn_attempts;
+	int			default_icl_ua;
+	int			otg_cl_ua;
+	bool			uusb_apsd_rerun_done;
+	bool			pd_hard_reset;
+	bool			typec_present;
+	u8			typec_status[5];
+	bool			typec_legacy_valid;
+	int			fake_input_current_limited;
+	bool			pr_swap_in_progress;
+	int			typec_mode;
+	int			usb_icl_change_irq_enabled;
+	u32			jeita_status;
+	u8			float_cfg;
+	bool			use_extcon;
+	bool			otg_present;
+
+	/* workaround flag */
+	u32			wa_flags;
+	bool			cc2_detach_wa_active;
+	bool			typec_en_dis_active;
+	bool			try_sink_active;
+	int			boost_current_ua;
+	int			temp_speed_reading_count;
+
+	/* extcon for VBUS / ID notification to USB for uUSB */
+	struct extcon_dev	*extcon;
+
+	/* battery profile */
+	int			batt_profile_fcc_ua;
+	int			batt_profile_fv_uv;
+
+	/* qnovo */
+	int			usb_icl_delta_ua;
+	int			pulse_cnt;
+};
+
+int smblib_read(struct smb_charger *chg, u16 addr, u8 *val);
+int smblib_masked_write(struct smb_charger *chg, u16 addr, u8 mask, u8 val);
+int smblib_write(struct smb_charger *chg, u16 addr, u8 val);
+
+int smblib_get_charge_param(struct smb_charger *chg,
+			    struct smb_chg_param *param, int *val_u);
+int smblib_get_usb_suspend(struct smb_charger *chg, int *suspend);
+
+int smblib_enable_charging(struct smb_charger *chg, bool enable);
+int smblib_set_charge_param(struct smb_charger *chg,
+			    struct smb_chg_param *param, int val_u);
+int smblib_set_usb_suspend(struct smb_charger *chg, bool suspend);
+int smblib_set_dc_suspend(struct smb_charger *chg, bool suspend);
+
+int smblib_mapping_soc_from_field_value(struct smb_chg_param *param,
+					     int val_u, u8 *val_raw);
+int smblib_mapping_cc_delta_to_field_value(struct smb_chg_param *param,
+					   u8 val_raw);
+int smblib_mapping_cc_delta_from_field_value(struct smb_chg_param *param,
+					     int val_u, u8 *val_raw);
+int smblib_set_chg_freq(struct smb_chg_param *param,
+				int val_u, u8 *val_raw);
+
+int smblib_vbus_regulator_enable(struct regulator_dev *rdev);
+int smblib_vbus_regulator_disable(struct regulator_dev *rdev);
+int smblib_vbus_regulator_is_enabled(struct regulator_dev *rdev);
+
+int smblib_vconn_regulator_enable(struct regulator_dev *rdev);
+int smblib_vconn_regulator_disable(struct regulator_dev *rdev);
+int smblib_vconn_regulator_is_enabled(struct regulator_dev *rdev);
+
+irqreturn_t smblib_handle_debug(int irq, void *data);
+irqreturn_t smblib_handle_otg_overcurrent(int irq, void *data);
+irqreturn_t smblib_handle_chg_state_change(int irq, void *data);
+irqreturn_t smblib_handle_batt_temp_changed(int irq, void *data);
+irqreturn_t smblib_handle_batt_psy_changed(int irq, void *data);
+irqreturn_t smblib_handle_usb_psy_changed(int irq, void *data);
+irqreturn_t smblib_handle_usbin_uv(int irq, void *data);
+irqreturn_t smblib_handle_usb_plugin(int irq, void *data);
+irqreturn_t smblib_handle_usb_source_change(int irq, void *data);
+irqreturn_t smblib_handle_icl_change(int irq, void *data);
+irqreturn_t smblib_handle_usb_typec_change(int irq, void *data);
+irqreturn_t smblib_handle_dc_plugin(int irq, void *data);
+irqreturn_t smblib_handle_high_duty_cycle(int irq, void *data);
+irqreturn_t smblib_handle_switcher_power_ok(int irq, void *data);
+irqreturn_t smblib_handle_wdog_bark(int irq, void *data);
+
+int smblib_get_prop_input_suspend(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_batt_present(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_batt_capacity(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_batt_status(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_batt_charge_type(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_batt_charge_done(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_batt_health(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_system_temp_level(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_input_current_limited(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_batt_voltage_now(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_batt_current_now(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_batt_temp(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_batt_charge_counter(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_set_prop_input_suspend(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_set_prop_batt_capacity(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_set_prop_system_temp_level(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_set_prop_input_current_limited(struct smb_charger *chg,
+				const union power_supply_propval *val);
+
+int smblib_get_prop_dc_present(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_dc_online(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_dc_current_max(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_set_prop_dc_current_max(struct smb_charger *chg,
+				const union power_supply_propval *val);
+
+int smblib_get_prop_usb_present(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_usb_online(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_usb_suspend(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_usb_voltage_max(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_usb_voltage_now(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_usb_current_now(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_typec_cc_orientation(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_typec_power_role(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_pd_allowed(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_input_current_settled(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_input_voltage_settled(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_pd_in_hard_reset(struct smb_charger *chg,
+			       union power_supply_propval *val);
+int smblib_get_pe_start(struct smb_charger *chg,
+			       union power_supply_propval *val);
+int smblib_get_prop_charger_temp(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_charger_temp_max(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_die_health(struct smb_charger *chg,
+			       union power_supply_propval *val);
+int smblib_get_prop_charge_qnovo_enable(struct smb_charger *chg,
+			       union power_supply_propval *val);
+int smblib_set_prop_pd_current_max(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_set_prop_sdp_current_max(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_set_prop_pd_voltage_max(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_set_prop_pd_voltage_min(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_set_prop_boost_current(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_set_prop_typec_power_role(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_set_prop_pd_active(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_set_prop_pd_in_hard_reset(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_get_prop_slave_current_now(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_set_prop_ship_mode(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_set_prop_charge_qnovo_enable(struct smb_charger *chg,
+				const union power_supply_propval *val);
+void smblib_suspend_on_debug_battery(struct smb_charger *chg);
+int smblib_rerun_apsd_if_required(struct smb_charger *chg);
+int smblib_get_prop_fcc_delta(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_icl_override(struct smb_charger *chg, bool override);
+int smblib_dp_dm(struct smb_charger *chg, int val);
+int smblib_disable_hw_jeita(struct smb_charger *chg, bool disable);
+int smblib_rerun_aicl(struct smb_charger *chg);
+int smblib_set_icl_current(struct smb_charger *chg, int icl_ua);
+int smblib_get_icl_current(struct smb_charger *chg, int *icl_ua);
+int smblib_get_charge_current(struct smb_charger *chg, int *total_current_ua);
+int smblib_get_prop_pr_swap_in_progress(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_set_prop_pr_swap_in_progress(struct smb_charger *chg,
+				const union power_supply_propval *val);
+void smblib_usb_typec_change(struct smb_charger *chg);
+
+int smblib_init(struct smb_charger *chg);
+int smblib_deinit(struct smb_charger *chg);
+#endif /* __SMB2_CHARGER_H */
diff -Nruw linux-4.4.115-fbx/drivers/power/supply./qcom/smb-reg.h linux-4.4.115-fbx/drivers/power/supply/qcom/smb-reg.h
--- linux-4.4.115-fbx/drivers/power/supply./qcom/smb-reg.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/supply/qcom/smb-reg.h	2019-01-22 16:16:26.235271146 +0100
@@ -0,0 +1,1029 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SMB2_CHARGER_REG_H
+#define __SMB2_CHARGER_REG_H
+
+#include <linux/bitops.h>
+
+#define CHGR_BASE	0x1000
+#define OTG_BASE	0x1100
+#define BATIF_BASE	0x1200
+#define USBIN_BASE	0x1300
+#define DCIN_BASE	0x1400
+#define MISC_BASE	0x1600
+#define CHGR_FREQ_BASE	0x1900
+
+#define PERPH_TYPE_OFFSET		0x04
+#define TYPE_MASK			GENMASK(7, 0)
+#define PERPH_SUBTYPE_OFFSET		0x05
+#define SUBTYPE_MASK			GENMASK(7, 0)
+#define INT_RT_STS_OFFSET		0x10
+
+/* CHGR Peripheral Registers */
+#define BATTERY_CHARGER_STATUS_1_REG	(CHGR_BASE + 0x06)
+#define BVR_INITIAL_RAMP_BIT		BIT(7)
+#define CC_SOFT_TERMINATE_BIT		BIT(6)
+#define STEP_CHARGING_STATUS_SHIFT	3
+#define STEP_CHARGING_STATUS_MASK	GENMASK(5, 3)
+#define BATTERY_CHARGER_STATUS_MASK	GENMASK(2, 0)
+enum {
+	TRICKLE_CHARGE = 0,
+	PRE_CHARGE,
+	FAST_CHARGE,
+	FULLON_CHARGE,
+	TAPER_CHARGE,
+	TERMINATE_CHARGE,
+	INHIBIT_CHARGE,
+	DISABLE_CHARGE,
+};
+
+#define BATTERY_CHARGER_STATUS_2_REG			(CHGR_BASE + 0x07)
+#define INPUT_CURRENT_LIMITED_BIT			BIT(7)
+#define CHARGER_ERROR_STATUS_SFT_EXPIRE_BIT		BIT(6)
+#define CHARGER_ERROR_STATUS_BAT_OV_BIT			BIT(5)
+#define CHARGER_ERROR_STATUS_BAT_TERM_MISSING_BIT	BIT(4)
+#define BAT_TEMP_STATUS_MASK				GENMASK(3, 0)
+#define BAT_TEMP_STATUS_SOFT_LIMIT_MASK			GENMASK(3, 2)
+#define BAT_TEMP_STATUS_HOT_SOFT_LIMIT_BIT		BIT(3)
+#define BAT_TEMP_STATUS_COLD_SOFT_LIMIT_BIT		BIT(2)
+#define BAT_TEMP_STATUS_TOO_HOT_BIT			BIT(1)
+#define BAT_TEMP_STATUS_TOO_COLD_BIT			BIT(0)
+
+#define CHG_OPTION_REG					(CHGR_BASE + 0x08)
+#define PIN_BIT						BIT(7)
+
+#define BATTERY_CHARGER_STATUS_3_REG			(CHGR_BASE + 0x09)
+#define FV_POST_JEITA_MASK				GENMASK(7, 0)
+
+#define BATTERY_CHARGER_STATUS_4_REG			(CHGR_BASE + 0x0A)
+#define CHARGE_CURRENT_POST_JEITA_MASK			GENMASK(7, 0)
+
+#define BATTERY_CHARGER_STATUS_5_REG			(CHGR_BASE + 0x0B)
+#define VALID_INPUT_POWER_SOURCE_BIT			BIT(7)
+#define DISABLE_CHARGING_BIT				BIT(6)
+#define FORCE_ZERO_CHARGE_CURRENT_BIT			BIT(5)
+#define CHARGING_ENABLE_BIT				BIT(4)
+#define TAPER_BIT					BIT(3)
+#define ENABLE_CHG_SENSORS_BIT				BIT(2)
+#define ENABLE_TAPER_SENSOR_BIT				BIT(1)
+#define TAPER_REGION_BIT				BIT(0)
+
+#define BATTERY_CHARGER_STATUS_6_REG			(CHGR_BASE + 0x0C)
+#define GF_BATT_OV_BIT					BIT(7)
+#define DROP_IN_BATTERY_VOLTAGE_REFERENCE_BIT		BIT(6)
+#define VBATT_LTET_RECHARGE_BIT				BIT(5)
+#define VBATT_GTET_INHIBIT_BIT				BIT(4)
+#define VBATT_GTET_FLOAT_VOLTAGE_BIT			BIT(3)
+#define BATT_GT_PRE_TO_FAST_BIT				BIT(2)
+#define BATT_GT_FULL_ON_BIT				BIT(1)
+#define VBATT_LT_2V_BIT					BIT(0)
+
+#define BATTERY_CHARGER_STATUS_7_REG			(CHGR_BASE + 0x0D)
+#define ENABLE_TRICKLE_BIT				BIT(7)
+#define ENABLE_PRE_CHARGING_BIT				BIT(6)
+#define ENABLE_FAST_CHARGING_BIT			BIT(5)
+#define ENABLE_FULLON_MODE_BIT				BIT(4)
+#define TOO_COLD_ADC_BIT				BIT(3)
+#define TOO_HOT_ADC_BIT					BIT(2)
+#define HOT_SL_ADC_BIT					BIT(1)
+#define COLD_SL_ADC_BIT					BIT(0)
+
+#define BATTERY_CHARGER_STATUS_8_REG			(CHGR_BASE + 0x0E)
+#define PRE_FAST_BIT					BIT(7)
+#define PRE_FULLON_BIT					BIT(6)
+#define PRE_RCHG_BIT					BIT(5)
+#define PRE_INHIBIT_BIT					BIT(4)
+#define PRE_OVRV_BIT					BIT(3)
+#define PRE_TERM_BIT					BIT(2)
+#define BAT_ID_BMISS_CMP_BIT				BIT(1)
+#define THERM_CMP_BIT					BIT(0)
+
+/* CHGR Interrupt Bits */
+#define CHGR_7_RT_STS_BIT				BIT(7)
+#define CHGR_6_RT_STS_BIT				BIT(6)
+#define FG_FVCAL_QUALIFIED_RT_STS_BIT			BIT(5)
+#define STEP_CHARGING_SOC_UPDATE_REQUEST_RT_STS_BIT	BIT(4)
+#define STEP_CHARGING_SOC_UPDATE_FAIL_RT_STS_BIT	BIT(3)
+#define STEP_CHARGING_STATE_CHANGE_RT_STS_BIT		BIT(2)
+#define CHARGING_STATE_CHANGE_RT_STS_BIT		BIT(1)
+#define CHGR_ERROR_RT_STS_BIT				BIT(0)
+
+#define STEP_CHG_SOC_VBATT_V_REG			(CHGR_BASE + 0x40)
+#define STEP_CHG_SOC_VBATT_V_MASK			GENMASK(7, 0)
+
+#define STEP_CHG_SOC_VBATT_V_UPDATE_REG			(CHGR_BASE + 0x41)
+#define STEP_CHG_SOC_VBATT_V_UPDATE_BIT			BIT(0)
+
+#define CHARGING_ENABLE_CMD_REG				(CHGR_BASE + 0x42)
+#define CHARGING_ENABLE_CMD_BIT				BIT(0)
+
+#define ALLOW_FAST_CHARGING_CMD_REG			(CHGR_BASE + 0x43)
+#define ALLOW_FAST_CHARGING_CMD_BIT			BIT(0)
+
+#define QNOVO_PT_ENABLE_CMD_REG				(CHGR_BASE + 0x44)
+#define QNOVO_PT_ENABLE_CMD_BIT				BIT(0)
+
+#define CHGR_CFG1_REG					(CHGR_BASE + 0x50)
+#define INCREASE_RCHG_TIMEOUT_CFG_BIT			BIT(1)
+#define LOAD_BAT_BIT					BIT(0)
+
+#define CHGR_CFG2_REG					(CHGR_BASE + 0x51)
+#define CHG_EN_SRC_BIT					BIT(7)
+#define CHG_EN_POLARITY_BIT				BIT(6)
+#define PRETOFAST_TRANSITION_CFG_BIT			BIT(5)
+#define BAT_OV_ECC_BIT					BIT(4)
+#define I_TERM_BIT					BIT(3)
+#define AUTO_RECHG_BIT					BIT(2)
+#define EN_ANALOG_DROP_IN_VBATT_BIT			BIT(1)
+#define CHARGER_INHIBIT_BIT				BIT(0)
+
+#define CHARGER_ENABLE_CFG_REG				(CHGR_BASE + 0x52)
+#define CHG_ENB_TIMEOUT_SETTING_BIT			BIT(1)
+#define FORCE_ZERO_CFG_BIT				BIT(0)
+
+#define CFG_REG						(CHGR_BASE + 0x53)
+#define CHG_OPTION_PIN_TRIM_BIT				BIT(7)
+#define BATN_SNS_CFG_BIT				BIT(4)
+#define CFG_TAPER_DIS_AFVC_BIT				BIT(3)
+#define BATFET_SHUTDOWN_CFG_BIT				BIT(2)
+#define VDISCHG_EN_CFG_BIT				BIT(1)
+#define VCHG_EN_CFG_BIT					BIT(0)
+
+#define CHARGER_SPARE_REG				(CHGR_BASE + 0x54)
+#define CHARGER_SPARE_MASK				GENMASK(5, 0)
+
+#define PRE_CHARGE_CURRENT_CFG_REG			(CHGR_BASE + 0x60)
+#define PRE_CHARGE_CURRENT_SETTING_MASK			GENMASK(5, 0)
+
+#define FAST_CHARGE_CURRENT_CFG_REG			(CHGR_BASE + 0x61)
+#define FAST_CHARGE_CURRENT_SETTING_MASK		GENMASK(7, 0)
+
+#define CHARGE_CURRENT_TERMINATION_CFG_REG		(CHGR_BASE + 0x62)
+#define ANALOG_CHARGE_CURRENT_TERMINATION_SETTING_MASK	GENMASK(2, 0)
+
+#define TCCC_CHARGE_CURRENT_TERMINATION_CFG_REG		(CHGR_BASE + 0x63)
+#define TCCC_CHARGE_CURRENT_TERMINATION_SETTING_MASK	GENMASK(3, 0)
+
+#define CHARGE_CURRENT_SOFTSTART_SETTING_CFG_REG	(CHGR_BASE + 0x64)
+#define CHARGE_CURRENT_SOFTSTART_SETTING_MASK		GENMASK(1, 0)
+
+#define FLOAT_VOLTAGE_CFG_REG				(CHGR_BASE + 0x70)
+#define FLOAT_VOLTAGE_SETTING_MASK			GENMASK(7, 0)
+
+#define AUTO_FLOAT_VOLTAGE_COMPENSATION_CFG_REG		(CHGR_BASE + 0x71)
+#define AUTO_FLOAT_VOLTAGE_COMPENSATION_MASK		GENMASK(2, 0)
+
+#define CHARGE_INHIBIT_THRESHOLD_CFG_REG		(CHGR_BASE + 0x72)
+#define CHARGE_INHIBIT_THRESHOLD_MASK			GENMASK(1, 0)
+#define CHARGE_INHIBIT_THRESHOLD_50MV			0
+#define CHARGE_INHIBIT_THRESHOLD_100MV			1
+#define CHARGE_INHIBIT_THRESHOLD_200MV			2
+#define CHARGE_INHIBIT_THRESHOLD_300MV			3
+
+#define RECHARGE_THRESHOLD_CFG_REG			(CHGR_BASE + 0x73)
+#define RECHARGE_THRESHOLD_MASK				GENMASK(1, 0)
+
+#define PRE_TO_FAST_CHARGE_THRESHOLD_CFG_REG		(CHGR_BASE + 0x74)
+#define PRE_TO_FAST_CHARGE_THRESHOLD_MASK		GENMASK(1, 0)
+
+#define FV_HYSTERESIS_CFG_REG				(CHGR_BASE + 0x75)
+#define FV_DROP_HYSTERESIS_CFG_MASK			GENMASK(7, 4)
+#define THRESH_HYSTERESIS_CFG_MASK			GENMASK(3, 0)
+
+#define FVC_CHARGE_INHIBIT_THRESHOLD_CFG_REG		(CHGR_BASE + 0x80)
+#define FVC_CHARGE_INHIBIT_THRESHOLD_MASK		GENMASK(5, 0)
+
+#define FVC_RECHARGE_THRESHOLD_CFG_REG			(CHGR_BASE + 0x81)
+#define FVC_RECHARGE_THRESHOLD_MASK			GENMASK(7, 0)
+
+#define FVC_PRE_TO_FAST_CHARGE_THRESHOLD_CFG_REG	(CHGR_BASE + 0x82)
+#define FVC_PRE_TO_FAST_CHARGE_THRESHOLD_MASK		GENMASK(7, 0)
+
+#define FVC_FULL_ON_THRESHOLD_CFG_REG			(CHGR_BASE + 0x83)
+#define FVC_FULL_ON_THRESHOLD_MASK			GENMASK(7, 0)
+
+#define FVC_CC_MODE_GLITCH_FILTER_SEL_CFG_REG		(CHGR_BASE + 0x84)
+#define FVC_CC_MODE_GLITCH_FILTER_SEL_MASK		GENMASK(1, 0)
+
+#define FVC_TERMINATION_GLITCH_FILTER_SEL_CFG_REG	(CHGR_BASE + 0x85)
+#define FVC_TERMINATION_GLITCH_FILTER_SEL_MASK		GENMASK(1, 0)
+
+#define JEITA_EN_CFG_REG		(CHGR_BASE + 0x90)
+#define JEITA_EN_HARDLIMIT_BIT		BIT(4)
+#define JEITA_EN_HOT_SL_FCV_BIT		BIT(3)
+#define JEITA_EN_COLD_SL_FCV_BIT	BIT(2)
+#define JEITA_EN_HOT_SL_CCC_BIT		BIT(1)
+#define JEITA_EN_COLD_SL_CCC_BIT	BIT(0)
+
+#define JEITA_FVCOMP_CFG_REG		(CHGR_BASE + 0x91)
+#define JEITA_FVCOMP_MASK		GENMASK(7, 0)
+
+#define JEITA_CCCOMP_CFG_REG		(CHGR_BASE + 0x92)
+#define JEITA_CCCOMP_MASK		GENMASK(7, 0)
+
+#define FV_CAL_CFG_REG			(CHGR_BASE + 0x76)
+#define FV_CALIBRATION_CFG_MASK		GENMASK(2, 0)
+
+#define FV_ADJUST_REG			(CHGR_BASE + 0x77)
+#define FLOAT_VOLTAGE_ADJUSTMENT_MASK	GENMASK(4, 0)
+
+#define FG_VADC_DISQ_THRESH_REG		(CHGR_BASE + 0x78)
+#define VADC_DISQUAL_THRESH_MASK	GENMASK(7, 0)
+
+#define FG_IADC_DISQ_THRESH_REG		(CHGR_BASE + 0x79)
+#define IADC_DISQUAL_THRESH_MASK	GENMASK(7, 0)
+
+#define FG_UPDATE_CFG_1_REG	(CHGR_BASE + 0x7A)
+#define BT_TMPR_TCOLD_BIT	BIT(7)
+#define BT_TMPR_COLD_BIT	BIT(6)
+#define BT_TMPR_HOT_BIT		BIT(5)
+#define BT_TMPR_THOT_BIT	BIT(4)
+#define CHG_DIE_TMPR_HOT_BIT	BIT(3)
+#define CHG_DIE_TMPR_THOT_BIT	BIT(2)
+#define SKIN_TMPR_HOT_BIT	BIT(1)
+#define SKIN_TMPR_THOT_BIT	BIT(0)
+
+#define FG_UPDATE_CFG_1_SEL_REG		(CHGR_BASE + 0x7B)
+#define BT_TMPR_TCOLD_SEL_BIT		BIT(7)
+#define BT_TMPR_COLD_SEL_BIT		BIT(6)
+#define BT_TMPR_HOT_SEL_BIT		BIT(5)
+#define BT_TMPR_THOT_SEL_BIT		BIT(4)
+#define CHG_DIE_TMPR_HOT_SEL_BIT	BIT(3)
+#define CHG_DIE_TMPR_THOT_SEL_BIT	BIT(2)
+#define SKIN_TMPR_HOT_SEL_BIT		BIT(1)
+#define SKIN_TMPR_THOT_SEL_BIT		BIT(0)
+
+#define FG_UPDATE_CFG_2_REG		(CHGR_BASE + 0x7C)
+#define SOC_LT_OTG_THRESH_BIT		BIT(3)
+#define SOC_LT_CHG_RECHARGE_THRESH_BIT	BIT(2)
+#define VBT_LT_CHG_RECHARGE_THRESH_BIT	BIT(1)
+#define IBT_LT_CHG_TERM_THRESH_BIT	BIT(0)
+
+#define FG_UPDATE_CFG_2_SEL_REG			(CHGR_BASE + 0x7D)
+#define SOC_LT_OTG_THRESH_SEL_BIT		BIT(3)
+#define SOC_LT_CHG_RECHARGE_THRESH_SEL_BIT	BIT(2)
+#define VBT_LT_CHG_RECHARGE_THRESH_SEL_BIT	BIT(1)
+#define IBT_LT_CHG_TERM_THRESH_SEL_BIT		BIT(0)
+
+#define FG_CHG_INTERFACE_CFG_REG	(CHGR_BASE + 0x7E)
+#define ESR_ISINK_CFG_MASK		GENMASK(7, 6)
+#define ESR_FASTCHG_DECR_CFG_MASK	GENMASK(5, 4)
+#define FG_CHARGER_INHIBIT_BIT		BIT(3)
+#define FG_BATFET_BIT			BIT(2)
+#define IADC_SYNC_CNV_BIT		BIT(1)
+#define VADC_SYNC_CNV_BIT		BIT(0)
+
+#define FG_CHG_INTERFACE_CFG_SEL_REG	(CHGR_BASE + 0x7F)
+#define ESR_ISINK_CFG_SEL_BIT		BIT(5)
+#define ESR_FASTCHG_DECR_CFG_SEL_BIT	BIT(4)
+#define FG_CHARGER_INHIBIT_SEL_BIT	BIT(3)
+#define FG_BATFET_SEL_BIT		BIT(2)
+#define IADC_SYNC_CNV_SEL_BIT		BIT(1)
+#define VADC_SYNC_CNV_SEL_BIT		BIT(0)
+
+#define CHGR_STEP_CHG_MODE_CFG_REG		(CHGR_BASE + 0xB0)
+#define STEP_CHARGING_SOC_FAIL_OPTION_BIT	BIT(3)
+#define STEP_CHARGING_MODE_SELECT_BIT		BIT(2)
+#define STEP_CHARGING_SOURCE_SELECT_BIT		BIT(1)
+#define STEP_CHARGING_ENABLE_BIT		BIT(0)
+
+#define STEP_CHG_UPDATE_REQUEST_TIMEOUT_CFG_REG		(CHGR_BASE + 0xB1)
+#define STEP_CHG_UPDATE_REQUEST_TIMEOUT_CFG_MASK	GENMASK(0, 1)
+#define STEP_CHG_UPDATE_REQUEST_TIMEOUT_5S		0
+#define STEP_CHG_UPDATE_REQUEST_TIMEOUT_10S		1
+#define STEP_CHG_UPDATE_REQUEST_TIMEOUT_20S		2
+#define STEP_CHG_UPDATE_REQUEST_TIMEOUT_40S		3
+
+#define STEP_CHG_UPDATE_FAIL_TIMEOUT_CFG_REG		(CHGR_BASE + 0xB2)
+#define STEP_CHG_UPDATE_FAIL_TIMEOUT_CFG_MASK		GENMASK(0, 1)
+#define STEP_CHG_UPDATE_FAIL_TIMEOUT_10S		0
+#define STEP_CHG_UPDATE_FAIL_TIMEOUT_30S		1
+#define STEP_CHG_UPDATE_FAIL_TIMEOUT_60S		2
+#define STEP_CHG_UPDATE_FAIL_TIMEOUT_120S		3
+
+#define STEP_CHG_SOC_OR_BATT_V_TH1_REG	(CHGR_BASE + 0xB3)
+#define STEP_CHG_SOC_OR_BATT_V_TH2_REG	(CHGR_BASE + 0xB4)
+#define STEP_CHG_SOC_OR_BATT_V_TH3_REG	(CHGR_BASE + 0xB5)
+#define STEP_CHG_SOC_OR_BATT_V_TH4_REG	(CHGR_BASE + 0xB6)
+#define STEP_CHG_CURRENT_DELTA1_REG	(CHGR_BASE + 0xB7)
+#define STEP_CHG_CURRENT_DELTA2_REG	(CHGR_BASE + 0xB8)
+#define STEP_CHG_CURRENT_DELTA3_REG	(CHGR_BASE + 0xB9)
+#define STEP_CHG_CURRENT_DELTA4_REG	(CHGR_BASE + 0xBA)
+#define STEP_CHG_CURRENT_DELTA5_REG	(CHGR_BASE + 0xBB)
+
+/* OTG Peripheral Registers */
+#define RID_CC_CONTROL_23_16_REG	(OTG_BASE + 0x06)
+#define RID_CC_CONTROL_23_BIT		BIT(7)
+#define VCONN_SOFTSTART_EN_BIT		BIT(6)
+#define VCONN_SFTST_CFG_MASK		GENMASK(5, 4)
+#define CONNECT_RIDCC_SENSOR_TO_CC_MASK	GENMASK(3, 2)
+#define EN_CC_1P1CLAMP_BIT		BIT(1)
+#define ENABLE_CRUDESEN_CC_1_BIT	BIT(0)
+
+#define RID_CC_CONTROL_15_8_REG		(OTG_BASE + 0x07)
+#define ENABLE_CRUDESEN_CC_0_BIT	BIT(7)
+#define EN_FMB_2P5UA_CC_MASK		GENMASK(6, 5)
+#define EN_ISRC_180UA_BIT		BIT(4)
+#define ENABLE_CURRENTSOURCE_CC_MASK	GENMASK(3, 2)
+#define EN_BANDGAP_RID_C_DET_BIT	BIT(1)
+#define ENABLE_RD_CC_1_BIT		BIT(0)
+
+#define RID_CC_CONTROL_7_0_REG		(OTG_BASE + 0x08)
+#define ENABLE_RD_CC_0_BIT		BIT(7)
+#define VCONN_ILIM500MA_BIT		BIT(6)
+#define EN_MICRO_USB_MODE_BIT		BIT(5)
+#define UFP_DFP_MODE_BIT		BIT(4)
+#define VCONN_EN_CC_MASK		GENMASK(3, 2)
+#define VREF_SEL_RIDCC_SENSOR_MASK	GENMASK(1, 0)
+
+#define OTG_STATUS_REG			(OTG_BASE + 0x09)
+#define BOOST_SOFTSTART_DONE_BIT	BIT(3)
+#define OTG_STATE_MASK			GENMASK(2, 0)
+#define OTG_STATE_ENABLED		0x2
+
+/* OTG Interrupt Bits */
+#define TESTMODE_CHANGE_DETECT_RT_STS_BIT	BIT(3)
+#define OTG_OC_DIS_SW_STS_RT_STS_BIT		BIT(2)
+#define OTG_OVERCURRENT_RT_STS_BIT		BIT(1)
+#define OTG_FAIL_RT_STS_BIT			BIT(0)
+
+#define CMD_OTG_REG			(OTG_BASE + 0x40)
+#define OTG_EN_BIT			BIT(0)
+
+#define BAT_UVLO_THRESHOLD_CFG_REG	(OTG_BASE + 0x51)
+#define BAT_UVLO_THRESHOLD_MASK		GENMASK(1, 0)
+
+#define OTG_CURRENT_LIMIT_CFG_REG	(OTG_BASE + 0x52)
+#define OTG_CURRENT_LIMIT_MASK		GENMASK(2, 0)
+
+#define OTG_CFG_REG			(OTG_BASE + 0x53)
+#define OTG_RESERVED_MASK		GENMASK(7, 6)
+#define DIS_OTG_ON_TLIM_BIT		BIT(5)
+#define QUICKSTART_OTG_FASTROLESWAP_BIT	BIT(4)
+#define INCREASE_DFP_TIME_BIT		BIT(3)
+#define ENABLE_OTG_IN_DEBUG_MODE_BIT	BIT(2)
+#define OTG_EN_SRC_CFG_BIT		BIT(1)
+#define CONCURRENT_MODE_CFG_BIT		BIT(0)
+
+#define OTG_ENG_OTG_CFG_REG		(OTG_BASE + 0xC0)
+#define ENG_BUCKBOOST_HALT1_8_MODE_BIT	BIT(0)
+
+/* BATIF Peripheral Registers */
+/* BATIF Interrupt Bits */
+#define BAT_7_RT_STS_BIT			BIT(7)
+#define BAT_6_RT_STS_BIT			BIT(6)
+#define BAT_TERMINAL_MISSING_RT_STS_BIT		BIT(5)
+#define BAT_THERM_OR_ID_MISSING_RT_STS_BIT	BIT(4)
+#define BAT_LOW_RT_STS_BIT			BIT(3)
+#define BAT_OV_RT_STS_BIT			BIT(2)
+#define BAT_OCP_RT_STS_BIT			BIT(1)
+#define BAT_TEMP_RT_STS_BIT			BIT(0)
+
+#define SHIP_MODE_REG			(BATIF_BASE + 0x40)
+#define SHIP_MODE_EN_BIT		BIT(0)
+
+#define BATOCP_THRESHOLD_CFG_REG	(BATIF_BASE + 0x50)
+#define BATOCP_ENABLE_CFG_BIT		BIT(3)
+#define BATOCP_THRESHOLD_MASK		GENMASK(2, 0)
+
+#define BATOCP_INTRPT_DELAY_TMR_CFG_REG	(BATIF_BASE + 0x51)
+#define BATOCP_INTRPT_TIMEOUT_MASK	GENMASK(5, 3)
+#define BATOCP_DELAY_TIMEOUT_MASK	GENMASK(2, 0)
+
+#define BATOCP_RESET_TMR_CFG_REG	(BATIF_BASE + 0x52)
+#define EN_BATOCP_RESET_TMR_BIT		BIT(3)
+#define BATOCP_RESET_TIMEOUT_MASK	GENMASK(2, 0)
+
+#define LOW_BATT_DETECT_EN_CFG_REG	(BATIF_BASE + 0x60)
+#define LOW_BATT_DETECT_EN_BIT		BIT(0)
+
+#define LOW_BATT_THRESHOLD_CFG_REG	(BATIF_BASE + 0x61)
+#define LOW_BATT_THRESHOLD_MASK		GENMASK(3, 0)
+
+#define BAT_FET_CFG_REG			(BATIF_BASE + 0x62)
+#define BAT_FET_CFG_BIT			BIT(0)
+
+#define BAT_MISS_SRC_CFG_REG		(BATIF_BASE + 0x70)
+#define BAT_MISS_ALG_EN_BIT		BIT(2)
+#define BAT_MISS_RESERVED_BIT		BIT(1)
+#define BAT_MISS_PIN_SRC_EN_BIT		BIT(0)
+
+#define BAT_MISS_ALG_OPTIONS_CFG_REG	(BATIF_BASE + 0x71)
+#define BAT_MISS_INPUT_PLUGIN_BIT	BIT(2)
+#define BAT_MISS_TMR_START_OPTION_BIT	BIT(1)
+#define BAT_MISS_POLL_EN_BIT		BIT(0)
+
+#define BAT_MISS_PIN_GF_CFG_REG		(BATIF_BASE + 0x72)
+#define BAT_MISS_PIN_GF_MASK		GENMASK(1, 0)
+
+/* USBIN Peripheral Registers */
+#define USBIN_INPUT_STATUS_REG		(USBIN_BASE + 0x06)
+#define USBIN_INPUT_STATUS_7_BIT	BIT(7)
+#define USBIN_INPUT_STATUS_6_BIT	BIT(6)
+#define USBIN_12V_BIT			BIT(5)
+#define USBIN_9V_TO_12V_BIT		BIT(4)
+#define USBIN_9V_BIT			BIT(3)
+#define USBIN_5V_TO_12V_BIT		BIT(2)
+#define USBIN_5V_TO_9V_BIT		BIT(1)
+#define USBIN_5V_BIT			BIT(0)
+#define QC_2P0_STATUS_MASK		GENMASK(2, 0)
+
+#define APSD_STATUS_REG			(USBIN_BASE + 0x07)
+#define APSD_STATUS_7_BIT		BIT(7)
+#define HVDCP_CHECK_TIMEOUT_BIT		BIT(6)
+#define SLOW_PLUGIN_TIMEOUT_BIT		BIT(5)
+#define ENUMERATION_DONE_BIT		BIT(4)
+#define VADP_CHANGE_DONE_AFTER_AUTH_BIT	BIT(3)
+#define QC_AUTH_DONE_STATUS_BIT		BIT(2)
+#define QC_CHARGER_BIT			BIT(1)
+#define APSD_DTC_STATUS_DONE_BIT	BIT(0)
+
+#define APSD_RESULT_STATUS_REG		(USBIN_BASE + 0x08)
+#define ICL_OVERRIDE_LATCH_BIT		BIT(7)
+#define APSD_RESULT_STATUS_MASK		GENMASK(6, 0)
+#define QC_3P0_BIT			BIT(6)
+#define QC_2P0_BIT			BIT(5)
+#define FLOAT_CHARGER_BIT		BIT(4)
+#define DCP_CHARGER_BIT			BIT(3)
+#define CDP_CHARGER_BIT			BIT(2)
+#define OCP_CHARGER_BIT			BIT(1)
+#define SDP_CHARGER_BIT			BIT(0)
+
+#define QC_CHANGE_STATUS_REG		(USBIN_BASE + 0x09)
+#define QC_CHANGE_STATUS_7_BIT		BIT(7)
+#define QC_CHANGE_STATUS_6_BIT		BIT(6)
+#define QC_9V_TO_12V_REASON_BIT		BIT(5)
+#define QC_5V_TO_9V_REASON_BIT		BIT(4)
+#define QC_CONTINUOUS_BIT		BIT(3)
+#define QC_12V_BIT			BIT(2)
+#define QC_9V_BIT			BIT(1)
+#define QC_5V_BIT			BIT(0)
+
+#define QC_PULSE_COUNT_STATUS_REG		(USBIN_BASE + 0x0A)
+#define QC_PULSE_COUNT_STATUS_7_BIT		BIT(7)
+#define QC_PULSE_COUNT_STATUS_6_BIT		BIT(6)
+#define QC_PULSE_COUNT_MASK			GENMASK(5, 0)
+
+#define TYPE_C_STATUS_1_REG			(USBIN_BASE + 0x0B)
+#define UFP_TYPEC_MASK				GENMASK(7, 5)
+#define UFP_TYPEC_RDSTD_BIT			BIT(7)
+#define UFP_TYPEC_RD1P5_BIT			BIT(6)
+#define UFP_TYPEC_RD3P0_BIT			BIT(5)
+#define UFP_TYPEC_FMB_255K_BIT			BIT(4)
+#define UFP_TYPEC_FMB_301K_BIT			BIT(3)
+#define UFP_TYPEC_FMB_523K_BIT			BIT(2)
+#define UFP_TYPEC_FMB_619K_BIT			BIT(1)
+#define UFP_TYPEC_OPEN_OPEN_BIT			BIT(0)
+
+#define TYPE_C_STATUS_2_REG			(USBIN_BASE + 0x0C)
+#define DFP_RA_OPEN_BIT				BIT(7)
+#define TIMER_STAGE_BIT				BIT(6)
+#define EXIT_UFP_MODE_BIT			BIT(5)
+#define EXIT_DFP_MODE_BIT			BIT(4)
+#define DFP_TYPEC_MASK				GENMASK(3, 0)
+#define DFP_RD_OPEN_BIT				BIT(3)
+#define DFP_RD_RA_VCONN_BIT			BIT(2)
+#define DFP_RD_RD_BIT				BIT(1)
+#define DFP_RA_RA_BIT				BIT(0)
+
+#define TYPE_C_STATUS_3_REG			(USBIN_BASE + 0x0D)
+#define ENABLE_BANDGAP_BIT			BIT(7)
+#define U_USB_GND_NOVBUS_BIT			BIT(6)
+#define U_USB_FLOAT_NOVBUS_BIT			BIT(5)
+#define U_USB_GND_BIT				BIT(4)
+#define U_USB_FMB1_BIT				BIT(3)
+#define U_USB_FLOAT1_BIT			BIT(2)
+#define U_USB_FMB2_BIT				BIT(1)
+#define U_USB_FLOAT2_BIT			BIT(0)
+
+#define TYPE_C_STATUS_4_REG			(USBIN_BASE + 0x0E)
+#define UFP_DFP_MODE_STATUS_BIT			BIT(7)
+#define TYPEC_VBUS_STATUS_BIT			BIT(6)
+#define TYPEC_VBUS_ERROR_STATUS_BIT		BIT(5)
+#define TYPEC_DEBOUNCE_DONE_STATUS_BIT		BIT(4)
+#define TYPEC_UFP_AUDIO_ADAPT_STATUS_BIT	BIT(3)
+#define TYPEC_VCONN_OVERCURR_STATUS_BIT		BIT(2)
+#define CC_ORIENTATION_BIT			BIT(1)
+#define CC_ATTACHED_BIT				BIT(0)
+
+#define TYPE_C_STATUS_5_REG			(USBIN_BASE + 0x0F)
+#define TRY_SOURCE_FAILED_BIT			BIT(6)
+#define TRY_SINK_FAILED_BIT			BIT(5)
+#define TIMER_STAGE_2_BIT			BIT(4)
+#define TYPEC_LEGACY_CABLE_STATUS_BIT		BIT(3)
+#define TYPEC_NONCOMP_LEGACY_CABLE_STATUS_BIT	BIT(2)
+#define TYPEC_TRYSOURCE_DETECT_STATUS_BIT	BIT(1)
+#define TYPEC_TRYSINK_DETECT_STATUS_BIT		BIT(0)
+
+/* USBIN Interrupt Bits */
+#define TYPE_C_CHANGE_RT_STS_BIT		BIT(7)
+#define USBIN_ICL_CHANGE_RT_STS_BIT		BIT(6)
+#define USBIN_SOURCE_CHANGE_RT_STS_BIT		BIT(5)
+#define USBIN_PLUGIN_RT_STS_BIT			BIT(4)
+#define USBIN_OV_RT_STS_BIT			BIT(3)
+#define USBIN_UV_RT_STS_BIT			BIT(2)
+#define USBIN_LT_3P6V_RT_STS_BIT		BIT(1)
+#define USBIN_COLLAPSE_RT_STS_BIT		BIT(0)
+
+#define QC_PULSE_COUNT_STATUS_1_REG		(USBIN_BASE + 0x30)
+
+#define USBIN_CMD_IL_REG			(USBIN_BASE + 0x40)
+#define BAT_2_SYS_FET_DIS_BIT			BIT(1)
+#define USBIN_SUSPEND_BIT			BIT(0)
+
+#define CMD_APSD_REG				(USBIN_BASE + 0x41)
+#define ICL_OVERRIDE_BIT			BIT(1)
+#define APSD_RERUN_BIT				BIT(0)
+
+#define CMD_HVDCP_2_REG				(USBIN_BASE + 0x43)
+#define RESTART_AICL_BIT			BIT(7)
+#define TRIGGER_AICL_BIT			BIT(6)
+#define FORCE_12V_BIT				BIT(5)
+#define FORCE_9V_BIT				BIT(4)
+#define FORCE_5V_BIT				BIT(3)
+#define IDLE_BIT				BIT(2)
+#define SINGLE_DECREMENT_BIT			BIT(1)
+#define SINGLE_INCREMENT_BIT			BIT(0)
+
+#define USB_MISC2_REG				(USBIN_BASE + 0x57)
+#define USB_MISC2_MASK				GENMASK(1, 0)
+
+#define TYPE_C_CFG_REG				(USBIN_BASE + 0x58)
+#define APSD_START_ON_CC_BIT			BIT(7)
+#define WAIT_FOR_APSD_BIT			BIT(6)
+#define FACTORY_MODE_DETECTION_EN_BIT		BIT(5)
+#define FACTORY_MODE_ICL_3A_4A_BIT		BIT(4)
+#define FACTORY_MODE_DIS_CHGING_CFG_BIT		BIT(3)
+#define SUSPEND_NON_COMPLIANT_CFG_BIT		BIT(2)
+#define VCONN_OC_CFG_BIT			BIT(1)
+#define TYPE_C_OR_U_USB_BIT			BIT(0)
+
+#define TYPE_C_CFG_2_REG			(USBIN_BASE + 0x59)
+#define TYPE_C_DFP_CURRSRC_MODE_BIT		BIT(7)
+#define VCONN_ILIM500MA_CFG_BIT			BIT(6)
+#define VCONN_SOFTSTART_CFG_MASK		GENMASK(5, 4)
+#define EN_TRY_SOURCE_MODE_BIT			BIT(3)
+#define USB_FACTORY_MODE_ENABLE_BIT		BIT(2)
+#define TYPE_C_UFP_MODE_BIT			BIT(1)
+#define EN_80UA_180UA_CUR_SOURCE_BIT		BIT(0)
+
+#define TYPE_C_CFG_3_REG			(USBIN_BASE + 0x5A)
+#define TVBUS_DEBOUNCE_BIT			BIT(7)
+#define TYPEC_LEGACY_CABLE_INT_EN_BIT		BIT(6)
+#define TYPEC_NONCOMPLIANT_LEGACY_CABLE_INT_EN_BIT		BIT(5)
+#define TYPEC_TRYSOURCE_DETECT_INT_EN_BIT	BIT(4)
+#define TYPEC_TRYSINK_DETECT_INT_EN_BIT		BIT(3)
+#define EN_TRYSINK_MODE_BIT			BIT(2)
+#define EN_LEGACY_CABLE_DETECTION_BIT		BIT(1)
+#define ALLOW_PD_DRING_UFP_TCCDB_BIT		BIT(0)
+
+#define USBIN_ADAPTER_ALLOW_CFG_REG		(USBIN_BASE + 0x60)
+#define USBIN_ADAPTER_ALLOW_MASK		GENMASK(3, 0)
+enum {
+	USBIN_ADAPTER_ALLOW_5V		= 0,
+	USBIN_ADAPTER_ALLOW_9V		= 2,
+	USBIN_ADAPTER_ALLOW_5V_OR_9V	= 3,
+	USBIN_ADAPTER_ALLOW_12V		= 4,
+	USBIN_ADAPTER_ALLOW_5V_OR_12V	= 5,
+	USBIN_ADAPTER_ALLOW_9V_TO_12V	= 6,
+	USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V = 7,
+	USBIN_ADAPTER_ALLOW_5V_TO_9V	= 8,
+	USBIN_ADAPTER_ALLOW_5V_TO_12V	= 12,
+};
+
+#define USBIN_OPTIONS_1_CFG_REG			(USBIN_BASE + 0x62)
+#define CABLE_R_SEL_BIT				BIT(7)
+#define HVDCP_AUTH_ALG_EN_CFG_BIT		BIT(6)
+#define HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT	BIT(5)
+#define INPUT_PRIORITY_BIT			BIT(4)
+#define AUTO_SRC_DETECT_BIT			BIT(3)
+#define HVDCP_EN_BIT				BIT(2)
+#define VADP_INCREMENT_VOLTAGE_LIMIT_BIT	BIT(1)
+#define VADP_TAPER_TIMER_EN_BIT			BIT(0)
+
+#define USBIN_OPTIONS_2_CFG_REG			(USBIN_BASE + 0x63)
+#define WIPWR_RST_EUD_CFG_BIT			BIT(7)
+#define SWITCHER_START_CFG_BIT			BIT(6)
+#define DCD_TIMEOUT_SEL_BIT			BIT(5)
+#define OCD_CURRENT_SEL_BIT			BIT(4)
+#define SLOW_PLUGIN_TIMER_EN_CFG_BIT		BIT(3)
+#define FLOAT_OPTIONS_MASK			GENMASK(2, 0)
+#define FLOAT_DIS_CHGING_CFG_BIT		BIT(2)
+#define SUSPEND_FLOAT_CFG_BIT			BIT(1)
+#define FORCE_FLOAT_SDP_CFG_BIT			BIT(0)
+
+#define TAPER_TIMER_SEL_CFG_REG			(USBIN_BASE + 0x64)
+#define TYPEC_SPARE_CFG_BIT			BIT(7)
+#define TYPEC_DRP_DFP_TIME_CFG_BIT		BIT(5)
+#define TAPER_TIMER_SEL_MASK			GENMASK(1, 0)
+
+#define USBIN_LOAD_CFG_REG			(USBIN_BASE + 0x65)
+#define USBIN_OV_CH_LOAD_OPTION_BIT		BIT(7)
+#define ICL_OVERRIDE_AFTER_APSD_BIT		BIT(4)
+
+#define USBIN_ICL_OPTIONS_REG			(USBIN_BASE + 0x66)
+#define CFG_USB3P0_SEL_BIT			BIT(2)
+#define USB51_MODE_BIT				BIT(1)
+#define USBIN_MODE_CHG_BIT			BIT(0)
+
+#define TYPE_C_INTRPT_ENB_REG			(USBIN_BASE + 0x67)
+#define TYPEC_CCOUT_DETACH_INT_EN_BIT		BIT(7)
+#define TYPEC_CCOUT_ATTACH_INT_EN_BIT		BIT(6)
+#define TYPEC_VBUS_ERROR_INT_EN_BIT		BIT(5)
+#define TYPEC_UFP_AUDIOADAPT_INT_EN_BIT		BIT(4)
+#define TYPEC_DEBOUNCE_DONE_INT_EN_BIT		BIT(3)
+#define TYPEC_CCSTATE_CHANGE_INT_EN_BIT		BIT(2)
+#define TYPEC_VBUS_DEASSERT_INT_EN_BIT		BIT(1)
+#define TYPEC_VBUS_ASSERT_INT_EN_BIT		BIT(0)
+
+#define TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG	(USBIN_BASE + 0x68)
+#define EXIT_SNK_BASED_ON_CC_BIT		BIT(7)
+#define VCONN_EN_ORIENTATION_BIT		BIT(6)
+#define TYPEC_VCONN_OVERCURR_INT_EN_BIT		BIT(5)
+#define VCONN_EN_SRC_BIT			BIT(4)
+#define VCONN_EN_VALUE_BIT			BIT(3)
+#define TYPEC_POWER_ROLE_CMD_MASK		GENMASK(2, 0)
+#define UFP_EN_CMD_BIT				BIT(2)
+#define DFP_EN_CMD_BIT				BIT(1)
+#define TYPEC_DISABLE_CMD_BIT			BIT(0)
+
+#define USBIN_SOURCE_CHANGE_INTRPT_ENB_REG	(USBIN_BASE + 0x69)
+#define SLOW_IRQ_EN_CFG_BIT			BIT(5)
+#define ENUMERATION_IRQ_EN_CFG_BIT		BIT(4)
+#define VADP_IRQ_EN_CFG_BIT			BIT(3)
+#define AUTH_IRQ_EN_CFG_BIT			BIT(2)
+#define HVDCP_IRQ_EN_CFG_BIT			BIT(1)
+#define APSD_IRQ_EN_CFG_BIT			BIT(0)
+
+#define USBIN_CURRENT_LIMIT_CFG_REG		(USBIN_BASE + 0x70)
+#define USBIN_CURRENT_LIMIT_MASK		GENMASK(7, 0)
+
+#define USBIN_AICL_OPTIONS_CFG_REG		(USBIN_BASE + 0x80)
+#define SUSPEND_ON_COLLAPSE_USBIN_BIT		BIT(7)
+#define USBIN_AICL_HDC_EN_BIT			BIT(6)
+#define USBIN_AICL_START_AT_MAX_BIT		BIT(5)
+#define USBIN_AICL_RERUN_EN_BIT			BIT(4)
+#define USBIN_AICL_ADC_EN_BIT			BIT(3)
+#define USBIN_AICL_EN_BIT			BIT(2)
+#define USBIN_HV_COLLAPSE_RESPONSE_BIT		BIT(1)
+#define USBIN_LV_COLLAPSE_RESPONSE_BIT		BIT(0)
+
+#define USBIN_5V_AICL_THRESHOLD_CFG_REG		(USBIN_BASE + 0x81)
+#define USBIN_5V_AICL_THRESHOLD_CFG_MASK	GENMASK(2, 0)
+
+#define USBIN_9V_AICL_THRESHOLD_CFG_REG		(USBIN_BASE + 0x82)
+#define USBIN_9V_AICL_THRESHOLD_CFG_MASK	GENMASK(2, 0)
+
+#define USBIN_12V_AICL_THRESHOLD_CFG_REG	(USBIN_BASE + 0x83)
+#define USBIN_12V_AICL_THRESHOLD_CFG_MASK	GENMASK(2, 0)
+
+#define USBIN_CONT_AICL_THRESHOLD_CFG_REG	(USBIN_BASE + 0x84)
+#define USBIN_CONT_AICL_THRESHOLD_CFG_MASK	GENMASK(5, 0)
+
+/* DCIN Peripheral Registers */
+#define DCIN_INPUT_STATUS_REG		(DCIN_BASE + 0x06)
+#define DCIN_INPUT_STATUS_7_BIT		BIT(7)
+#define DCIN_INPUT_STATUS_6_BIT		BIT(6)
+#define DCIN_12V_BIT			BIT(5)
+#define DCIN_9V_TO_12V_BIT		BIT(4)
+#define DCIN_9V_BIT			BIT(3)
+#define DCIN_5V_TO_12V_BIT		BIT(2)
+#define DCIN_5V_TO_9V_BIT		BIT(1)
+#define DCIN_5V_BIT			BIT(0)
+
+#define WIPWR_STATUS_REG		(DCIN_BASE + 0x07)
+#define WIPWR_STATUS_7_BIT		BIT(7)
+#define WIPWR_STATUS_6_BIT		BIT(6)
+#define WIPWR_STATUS_5_BIT		BIT(5)
+#define DCIN_WIPWR_OV_DG_BIT		BIT(4)
+#define DIV2_EN_DG_BIT			BIT(3)
+#define SHUTDOWN_N_LATCH_BIT		BIT(2)
+#define CHG_OK_PIN_BIT			BIT(1)
+#define WIPWR_CHARGING_ENABLED_BIT	BIT(0)
+
+#define WIPWR_RANGE_STATUS_REG		(DCIN_BASE + 0x08)
+#define WIPWR_RANGE_STATUS_MASK		GENMASK(4, 0)
+
+/* DCIN Interrupt Bits */
+#define WIPWR_VOLTAGE_RANGE_RT_STS_BIT	BIT(7)
+#define DCIN_ICL_CHANGE_RT_STS_BIT	BIT(6)
+#define DIV2_EN_DG_RT_STS_BIT		BIT(5)
+#define DCIN_PLUGIN_RT_STS_BIT		BIT(4)
+#define DCIN_OV_RT_STS_BIT		BIT(3)
+#define DCIN_UV_RT_STS_BIT		BIT(2)
+#define DCIN_LT_3P6V_RT_STS_BIT		BIT(1)
+#define DCIN_COLLAPSE_RT_STS_BIT	BIT(0)
+
+#define DCIN_CMD_IL_REG				(DCIN_BASE + 0x40)
+#define WIRELESS_CHG_DIS_BIT			BIT(3)
+#define SHDN_N_CLEAR_CMD_BIT			BIT(2)
+#define SHDN_N_SET_CMD_BIT			BIT(1)
+#define DCIN_SUSPEND_BIT			BIT(0)
+
+#define DC_SPARE_REG				(DCIN_BASE + 0x58)
+#define DC_SPARE_MASK				GENMASK(3, 0)
+
+#define DCIN_ADAPTER_ALLOW_CFG_REG		(DCIN_BASE + 0x60)
+#define DCIN_ADAPTER_ALLOW_MASK			GENMASK(3, 0)
+
+#define DCIN_LOAD_CFG_REG			(DCIN_BASE + 0x65)
+#define DCIN_OV_CH_LOAD_OPTION_BIT		BIT(7)
+
+#define DCIN_CURRENT_LIMIT_CFG_REG		(DCIN_BASE + 0x70)
+#define DCIN_CURRENT_LIMIT_MASK			GENMASK(7, 0)
+
+#define DCIN_AICL_OPTIONS_CFG_REG		(DCIN_BASE + 0x80)
+#define SUSPEND_ON_COLLAPSE_DCIN_BIT		BIT(7)
+#define DCIN_AICL_HDC_EN_BIT			BIT(6)
+#define DCIN_AICL_START_AT_MAX_BIT		BIT(5)
+#define DCIN_AICL_RERUN_EN_BIT			BIT(4)
+#define DCIN_AICL_ADC_EN_BIT			BIT(3)
+#define DCIN_AICL_EN_BIT			BIT(2)
+#define DCIN_HV_COLLAPSE_RESPONSE_BIT		BIT(1)
+#define DCIN_LV_COLLAPSE_RESPONSE_BIT		BIT(0)
+
+#define DCIN_AICL_REF_SEL_CFG_REG		(DCIN_BASE + 0x81)
+#define DCIN_CONT_AICL_THRESHOLD_CFG_MASK	GENMASK(5, 0)
+
+#define DCIN_ICL_START_CFG_REG			(DCIN_BASE + 0x82)
+#define DCIN_ICL_START_CFG_BIT			BIT(0)
+
+#define DIV2_EN_GF_TIME_CFG_REG			(DCIN_BASE + 0x90)
+#define DIV2_EN_GF_TIME_CFG_MASK		GENMASK(1, 0)
+
+#define WIPWR_IRQ_TMR_CFG_REG			(DCIN_BASE + 0x91)
+#define WIPWR_IRQ_TMR_MASK			GENMASK(2, 0)
+
+#define ZIN_ICL_PT_REG				(DCIN_BASE + 0x92)
+#define ZIN_ICL_PT_MASK				GENMASK(7, 0)
+
+#define ZIN_ICL_LV_REG				(DCIN_BASE + 0x93)
+#define ZIN_ICL_LV_MASK				GENMASK(7, 0)
+
+#define ZIN_ICL_HV_REG				(DCIN_BASE + 0x94)
+#define ZIN_ICL_HV_MASK				GENMASK(7, 0)
+
+#define WI_PWR_OPTIONS_REG			(DCIN_BASE + 0x95)
+#define CHG_OK_BIT				BIT(7)
+#define WIPWR_UVLO_IRQ_OPT_BIT			BIT(6)
+#define BUCK_HOLDOFF_ENABLE_BIT			BIT(5)
+#define CHG_OK_HW_SW_SELECT_BIT			BIT(4)
+#define WIPWR_RST_ENABLE_BIT			BIT(3)
+#define DCIN_WIPWR_IRQ_SELECT_BIT		BIT(2)
+#define AICL_SWITCH_ENABLE_BIT			BIT(1)
+#define ZIN_ICL_ENABLE_BIT			BIT(0)
+
+#define ZIN_ICL_PT_HV_REG			(DCIN_BASE + 0x96)
+#define ZIN_ICL_PT_HV_MASK			GENMASK(7, 0)
+
+#define ZIN_ICL_MID_LV_REG			(DCIN_BASE + 0x97)
+#define ZIN_ICL_MID_LV_MASK			GENMASK(7, 0)
+
+#define ZIN_ICL_MID_HV_REG			(DCIN_BASE + 0x98)
+#define ZIN_ICL_MID_HV_MASK			GENMASK(7, 0)
+
+enum {
+	ZIN_ICL_PT_MAX_MV = 8000,
+	ZIN_ICL_PT_HV_MAX_MV = 9000,
+	ZIN_ICL_LV_MAX_MV = 5500,
+	ZIN_ICL_MID_LV_MAX_MV = 6500,
+	ZIN_ICL_MID_HV_MAX_MV = 8000,
+	ZIN_ICL_HV_MAX_MV = 11000,
+};
+
+#define DC_ENG_SSUPPLY_CFG2_REG			(DCIN_BASE + 0xC1)
+#define ENG_SSUPPLY_IVREF_OTG_SS_MASK		GENMASK(2, 0)
+#define OTG_SS_SLOW				0x3
+
+#define DC_ENG_SSUPPLY_CFG3_REG			(DCIN_BASE + 0xC2)
+#define ENG_SSUPPLY_HI_CAP_BIT			BIT(6)
+#define ENG_SSUPPLY_HI_RES_BIT			BIT(5)
+#define ENG_SSUPPLY_CFG_SKIP_TH_V0P2_BIT	BIT(3)
+#define ENG_SSUPPLY_CFG_SYSOV_TH_4P8_BIT	BIT(2)
+#define ENG_SSUPPLY_5V_OV_OPT_BIT		BIT(0)
+
+/* MISC Peripheral Registers */
+#define REVISION1_REG				(MISC_BASE + 0x00)
+#define DIG_MINOR_MASK				GENMASK(7, 0)
+
+#define REVISION2_REG				(MISC_BASE + 0x01)
+#define DIG_MAJOR_MASK				GENMASK(7, 0)
+
+#define REVISION3_REG				(MISC_BASE + 0x02)
+#define ANA_MINOR_MASK				GENMASK(7, 0)
+
+#define REVISION4_REG				(MISC_BASE + 0x03)
+#define ANA_MAJOR_MASK				GENMASK(7, 0)
+
+#define TEMP_RANGE_STATUS_REG			(MISC_BASE + 0x06)
+#define TEMP_RANGE_STATUS_7_BIT			BIT(7)
+#define THERM_REG_ACTIVE_BIT			BIT(6)
+#define TLIM_BIT				BIT(5)
+#define TEMP_RANGE_MASK				GENMASK(4, 1)
+#define ALERT_LEVEL_BIT				BIT(4)
+#define TEMP_ABOVE_RANGE_BIT			BIT(3)
+#define TEMP_WITHIN_RANGE_BIT			BIT(2)
+#define TEMP_BELOW_RANGE_BIT			BIT(1)
+#define THERMREG_DISABLED_BIT			BIT(0)
+
+#define ICL_STATUS_REG				(MISC_BASE + 0x07)
+#define INPUT_CURRENT_LIMIT_MASK		GENMASK(7, 0)
+
+#define ADAPTER_5V_ICL_STATUS_REG		(MISC_BASE + 0x08)
+#define ADAPTER_5V_ICL_MASK			GENMASK(7, 0)
+
+#define ADAPTER_9V_ICL_STATUS_REG		(MISC_BASE + 0x09)
+#define ADAPTER_9V_ICL_MASK			GENMASK(7, 0)
+
+#define AICL_STATUS_REG				(MISC_BASE + 0x0A)
+#define AICL_STATUS_7_BIT			BIT(7)
+#define SOFT_ILIMIT_BIT				BIT(6)
+#define HIGHEST_DC_BIT				BIT(5)
+#define USBIN_CH_COLLAPSE_BIT			BIT(4)
+#define DCIN_CH_COLLAPSE_BIT			BIT(3)
+#define ICL_IMIN_BIT				BIT(2)
+#define AICL_FAIL_BIT				BIT(1)
+#define AICL_DONE_BIT				BIT(0)
+
+#define POWER_PATH_STATUS_REG			(MISC_BASE + 0x0B)
+#define INPUT_SS_DONE_BIT			BIT(7)
+#define USBIN_SUSPEND_STS_BIT			BIT(6)
+#define DCIN_SUSPEND_STS_BIT			BIT(5)
+#define USE_USBIN_BIT				BIT(4)
+#define USE_DCIN_BIT				BIT(3)
+#define POWER_PATH_MASK				GENMASK(2, 1)
+#define VALID_INPUT_POWER_SOURCE_STS_BIT	BIT(0)
+
+#define WDOG_STATUS_REG				(MISC_BASE + 0x0C)
+#define WDOG_STATUS_7_BIT			BIT(7)
+#define WDOG_STATUS_6_BIT			BIT(6)
+#define WDOG_STATUS_5_BIT			BIT(5)
+#define WDOG_STATUS_4_BIT			BIT(4)
+#define WDOG_STATUS_3_BIT			BIT(3)
+#define WDOG_STATUS_2_BIT			BIT(2)
+#define WDOG_STATUS_1_BIT			BIT(1)
+#define BARK_BITE_STATUS_BIT			BIT(0)
+
+#define SYSOK_REASON_STATUS_REG			(MISC_BASE + 0x0D)
+#define SYSOK_REASON_DCIN_BIT			BIT(1)
+#define SYSOK_REASON_USBIN_BIT			BIT(0)
+
+/* MISC Interrupt Bits */
+#define SWITCHER_POWER_OK_RT_STS_BIT		BIT(7)
+#define TEMPERATURE_CHANGE_RT_STS_BIT		BIT(6)
+#define INPUT_CURRENT_LIMITING_RT_STS_BIT	BIT(5)
+#define HIGH_DUTY_CYCLE_RT_STS_BIT		BIT(4)
+#define AICL_DONE_RT_STS_BIT			BIT(3)
+#define AICL_FAIL_RT_STS_BIT			BIT(2)
+#define WDOG_BARK_RT_STS_BIT			BIT(1)
+#define WDOG_SNARL_RT_STS_BIT			BIT(0)
+
+#define WDOG_RST_REG				(MISC_BASE + 0x40)
+#define WDOG_RST_BIT				BIT(0)
+
+#define AFP_MODE_REG				(MISC_BASE + 0x41)
+#define AFP_MODE_EN_BIT				BIT(0)
+
+#define GSM_PA_ON_ADJ_EN_REG			(MISC_BASE + 0x42)
+#define GSM_PA_ON_ADJ_EN_BIT			BIT(0)
+
+#define BARK_BITE_WDOG_PET_REG			(MISC_BASE + 0x43)
+#define BARK_BITE_WDOG_PET_BIT			BIT(0)
+
+#define PHYON_CMD_REG				(MISC_BASE + 0x44)
+#define PHYON_CMD_BIT				BIT(0)
+
+#define SHDN_CMD_REG				(MISC_BASE + 0x45)
+#define SHDN_CMD_BIT				BIT(0)
+
+#define FINISH_COPY_COMMAND_REG			(MISC_BASE + 0x4F)
+#define START_COPY_BIT				BIT(0)
+
+#define WD_CFG_REG				(MISC_BASE + 0x51)
+#define WATCHDOG_TRIGGER_AFP_EN_BIT		BIT(7)
+#define BARK_WDOG_INT_EN_BIT			BIT(6)
+#define BITE_WDOG_INT_EN_BIT			BIT(5)
+#define SFT_AFTER_WDOG_IRQ_MASK			GENMASK(4, 3)
+#define WDOG_IRQ_SFT_BIT			BIT(2)
+#define WDOG_TIMER_EN_ON_PLUGIN_BIT		BIT(1)
+#define WDOG_TIMER_EN_BIT			BIT(0)
+
+#define MISC_CFG_REG				(MISC_BASE + 0x52)
+#define GSM_PA_ON_ADJ_SEL_BIT			BIT(0)
+#define STAT_PARALLEL_1400MA_EN_CFG_BIT		BIT(3)
+#define TCC_DEBOUNCE_20MS_BIT			BIT(5)
+
+#define SNARL_BARK_BITE_WD_CFG_REG		(MISC_BASE + 0x53)
+#define BITE_WDOG_DISABLE_CHARGING_CFG_BIT	BIT(7)
+#define SNARL_WDOG_TIMEOUT_MASK			GENMASK(6, 4)
+#define BARK_WDOG_TIMEOUT_MASK			GENMASK(3, 2)
+#define BITE_WDOG_TIMEOUT_MASK			GENMASK(1, 0)
+
+#define PHYON_CFG_REG				(MISC_BASE + 0x54)
+#define USBPHYON_PUSHPULL_CFG_BIT		BIT(1)
+#define PHYON_SW_SEL_BIT			BIT(0)
+
+#define CHGR_TRIM_OPTIONS_7_0_REG		(MISC_BASE + 0x55)
+#define TLIM_DIS_TBIT_BIT			BIT(0)
+
+#define CH_OV_OPTION_CFG_REG			(MISC_BASE + 0x56)
+#define OV_OPTION_TBIT_BIT			BIT(0)
+
+#define AICL_CFG_REG				(MISC_BASE + 0x60)
+#define TREG_ALLOW_DECREASE_BIT			BIT(1)
+#define AICL_HIGH_DC_INC_BIT			BIT(0)
+
+#define AICL_RERUN_TIME_CFG_REG			(MISC_BASE + 0x61)
+#define AICL_RERUN_TIME_MASK			GENMASK(1, 0)
+
+#define AICL_RERUN_TEMP_TIME_CFG_REG		(MISC_BASE + 0x62)
+#define AICL_RERUN_TEMP_TIME_MASK		GENMASK(1, 0)
+
+#define THERMREG_SRC_CFG_REG			(MISC_BASE + 0x70)
+#define SKIN_ADC_CFG_BIT			BIT(3)
+#define THERMREG_SKIN_ADC_SRC_EN_BIT		BIT(2)
+#define THERMREG_DIE_ADC_SRC_EN_BIT		BIT(1)
+#define THERMREG_DIE_CMP_SRC_EN_BIT		BIT(0)
+
+#define TREG_DIE_CMP_INC_CYCLE_TIME_CFG_REG	(MISC_BASE + 0x71)
+#define TREG_DIE_CMP_INC_CYCLE_TIME_MASK	GENMASK(1, 0)
+
+#define TREG_DIE_CMP_DEC_CYCLE_TIME_CFG_REG	(MISC_BASE + 0x72)
+#define TREG_DIE_CMP_DEC_CYCLE_TIME_MASK	GENMASK(1, 0)
+
+#define TREG_DIE_ADC_INC_CYCLE_TIME_CFG_REG	(MISC_BASE + 0x73)
+#define TREG_DIE_ADC_INC_CYCLE_TIME_MASK	GENMASK(1, 0)
+
+#define TREG_DIE_ADC_DEC_CYCLE_TIME_CFG_REG	(MISC_BASE + 0x74)
+#define TREG_DIE_ADC_DEC_CYCLE_TIME_MASK	GENMASK(1, 0)
+
+#define TREG_SKIN_ADC_INC_CYCLE_TIME_CFG_REG	(MISC_BASE + 0x75)
+#define TREG_SKIN_ADC_INC_CYCLE_TIME_MASK	GENMASK(1, 0)
+
+#define TREG_SKIN_ADC_DEC_CYCLE_TIME_CFG_REG	(MISC_BASE + 0x76)
+#define TREG_SKIN_ADC_DEC_CYCLE_TIME_MASK	GENMASK(1, 0)
+
+#define BUCK_OPTIONS_CFG_REG			(MISC_BASE + 0x80)
+#define CHG_EN_PIN_SUSPEND_CFG_BIT		BIT(6)
+#define HICCUP_OPTIONS_MASK			GENMASK(5, 4)
+#define INPUT_CURRENT_LIMIT_SOFTSTART_EN_BIT	BIT(3)
+#define HV_HIGH_DUTY_CYCLE_PROTECT_EN_BIT	BIT(2)
+#define BUCK_OC_PROTECT_EN_BIT			BIT(1)
+#define INPUT_MISS_POLL_EN_BIT			BIT(0)
+
+#define ICL_SOFTSTART_RATE_CFG_REG		(MISC_BASE + 0x81)
+#define ICL_SOFTSTART_RATE_MASK			GENMASK(1, 0)
+
+#define ICL_SOFTSTOP_RATE_CFG_REG		(MISC_BASE + 0x82)
+#define ICL_SOFTSTOP_RATE_MASK			GENMASK(1, 0)
+
+#define VSYS_MIN_SEL_CFG_REG			(MISC_BASE + 0x83)
+#define VSYS_MIN_SEL_MASK			GENMASK(1, 0)
+
+#define TRACKING_VOLTAGE_SEL_CFG_REG		(MISC_BASE + 0x84)
+#define TRACKING_VOLTAGE_SEL_BIT		BIT(0)
+
+#define STAT_CFG_REG				(MISC_BASE + 0x90)
+#define STAT_SW_OVERRIDE_VALUE_BIT		BIT(7)
+#define STAT_SW_OVERRIDE_CFG_BIT		BIT(6)
+#define STAT_PARALLEL_OFF_DG_CFG_MASK		GENMASK(5, 4)
+#define STAT_POLARITY_CFG_BIT			BIT(3)
+#define STAT_PARALLEL_CFG_BIT			BIT(2)
+#define STAT_FUNCTION_CFG_BIT			BIT(1)
+#define STAT_IRQ_PULSING_EN_BIT			BIT(0)
+
+#define LBC_EN_CFG_REG				(MISC_BASE + 0x91)
+#define LBC_DURING_CHARGING_CFG_BIT		BIT(1)
+#define LBC_EN_BIT				BIT(0)
+
+#define LBC_PERIOD_CFG_REG			(MISC_BASE + 0x92)
+#define LBC_PERIOD_MASK				GENMASK(2, 0)
+
+#define LBC_DUTY_CYCLE_CFG_REG			(MISC_BASE + 0x93)
+#define LBC_DUTY_CYCLE_MASK			GENMASK(2, 0)
+
+#define SYSOK_CFG_REG				(MISC_BASE + 0x94)
+#define SYSOK_PUSHPULL_CFG_BIT			BIT(5)
+#define SYSOK_B_OR_C_SEL_BIT			BIT(4)
+#define SYSOK_POL_BIT				BIT(3)
+#define SYSOK_OPTIONS_MASK			GENMASK(2, 0)
+
+#define CFG_BUCKBOOST_FREQ_SELECT_BUCK_REG	(MISC_BASE + 0xA0)
+#define CFG_BUCKBOOST_FREQ_SELECT_BOOST_REG	(MISC_BASE + 0xA1)
+
+#define TM_IO_DTEST4_SEL			(MISC_BASE + 0xE9)
+
+/* CHGR FREQ Peripheral registers */
+#define FREQ_CLK_DIV_REG			(CHGR_FREQ_BASE + 0x50)
+
+#endif /* __SMB2_CHARGER_REG_H */
diff -Nruw linux-4.4.115-fbx/drivers/power/supply./qcom/step-chg-jeita.c linux-4.4.115-fbx/drivers/power/supply/qcom/step-chg-jeita.c
--- linux-4.4.115-fbx/drivers/power/supply./qcom/step-chg-jeita.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/supply/qcom/step-chg-jeita.c	2019-01-22 16:16:26.239271183 +0100
@@ -0,0 +1,511 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "QCOM-STEPCHG: %s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+#include <linux/pmic-voter.h>
+#include "step-chg-jeita.h"
+
+#define MAX_STEP_CHG_ENTRIES	8
+#define STEP_CHG_VOTER		"STEP_CHG_VOTER"
+#define JEITA_VOTER		"JEITA_VOTER"
+
+#define is_between(left, right, value) \
+		(((left) >= (right) && (left) >= (value) \
+			&& (value) >= (right)) \
+		|| ((left) <= (right) && (left) <= (value) \
+			&& (value) <= (right)))
+
+struct range_data {
+	u32 low_threshold;
+	u32 high_threshold;
+	u32 value;
+};
+
+struct step_chg_cfg {
+	u32			psy_prop;
+	char			*prop_name;
+	int			hysteresis;
+	struct range_data	fcc_cfg[MAX_STEP_CHG_ENTRIES];
+};
+
+struct jeita_fcc_cfg {
+	u32			psy_prop;
+	char			*prop_name;
+	int			hysteresis;
+	struct range_data	fcc_cfg[MAX_STEP_CHG_ENTRIES];
+};
+
+struct jeita_fv_cfg {
+	u32			psy_prop;
+	char			*prop_name;
+	int			hysteresis;
+	struct range_data	fv_cfg[MAX_STEP_CHG_ENTRIES];
+};
+
+struct step_chg_info {
+	ktime_t			step_last_update_time;
+	ktime_t			jeita_last_update_time;
+	bool			step_chg_enable;
+	bool			sw_jeita_enable;
+	int			jeita_fcc_index;
+	int			jeita_fv_index;
+	int			step_index;
+
+	struct votable		*fcc_votable;
+	struct votable		*fv_votable;
+	struct wakeup_source	*step_chg_ws;
+	struct power_supply	*batt_psy;
+	struct delayed_work	status_change_work;
+	struct notifier_block	nb;
+};
+
+static struct step_chg_info *the_chip;
+
+#define STEP_CHG_HYSTERISIS_DELAY_US		5000000 /* 5 secs */
+
+/*
+ * Step Charging Configuration
+ * Update the table based on the battery profile
+ * Supports VBATT and SOC based source
+ * range data must be in increasing ranges and shouldn't overlap
+ */
+static struct step_chg_cfg step_chg_config = {
+	.psy_prop	= POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	.prop_name	= "VBATT",
+	.hysteresis	= 100000, /* 100mV */
+	.fcc_cfg	= {
+		/* VBAT_LOW	VBAT_HIGH	FCC */
+		{3600000,	4000000,	3000000},
+		{4001000,	4200000,	2800000},
+		{4201000,	4400000,	2000000},
+	},
+	/*
+	 *	SOC STEP-CHG configuration example.
+	 *
+	 *	.psy_prop = POWER_SUPPLY_PROP_CAPACITY,
+	 *	.prop_name = "SOC",
+	 *	.fcc_cfg	= {
+	 *		//SOC_LOW	SOC_HIGH	FCC
+	 *		{20,		70,		3000000},
+	 *		{70,		90,		2750000},
+	 *		{90,		100,		2500000},
+	 *	},
+	 */
+};
+
+/*
+ * Jeita Charging Configuration
+ * Update the table based on the battery profile
+ * Please ensure that the TEMP ranges are programmed in the hw so that
+ * an interrupt is issued and a consequent psy changed will cause us to
+ * react immediately.
+ * range data must be in increasing ranges and shouldn't overlap.
+ * Gaps are okay
+ */
+static struct jeita_fcc_cfg jeita_fcc_config = {
+	.psy_prop	= POWER_SUPPLY_PROP_TEMP,
+	.prop_name	= "BATT_TEMP",
+	.hysteresis	= 10, /* 1degC hysteresis */
+	.fcc_cfg	= {
+		/* TEMP_LOW	TEMP_HIGH	FCC */
+		{0,		100,		600000},
+		{101,		200,		2000000},
+		{201,		450,		3000000},
+		{451,		550,		600000},
+	},
+};
+
+static struct jeita_fv_cfg jeita_fv_config = {
+	.psy_prop	= POWER_SUPPLY_PROP_TEMP,
+	.prop_name	= "BATT_TEMP",
+	.hysteresis	= 10, /* 1degC hysteresis */
+	.fv_cfg		= {
+		/* TEMP_LOW	TEMP_HIGH	FCC */
+		{0,		100,		4200000},
+		{101,		450,		4400000},
+		{451,		550,		4200000},
+	},
+};
+
+static bool is_batt_available(struct step_chg_info *chip)
+{
+	if (!chip->batt_psy)
+		chip->batt_psy = power_supply_get_by_name("battery");
+
+	if (!chip->batt_psy)
+		return false;
+
+	return true;
+}
+
+static int get_val(struct range_data *range, int hysteresis, int current_index,
+		int threshold,
+		int *new_index, int *val)
+{
+	int i;
+
+	*new_index = -EINVAL;
+	/* first find the matching index without hysteresis */
+	for (i = 0; i < MAX_STEP_CHG_ENTRIES; i++)
+		if (is_between(range[i].low_threshold,
+			range[i].high_threshold, threshold)) {
+			*new_index = i;
+			*val = range[i].value;
+		}
+
+	/* if nothing was found, return -ENODATA */
+	if (*new_index == -EINVAL)
+		return -ENODATA;
+	/*
+	 * If we don't have a current_index return this
+	 * newfound value. There is no hysterisis from out of range
+	 * to in range transition
+	 */
+	if (current_index == -EINVAL)
+		return 0;
+
+	/*
+	 * Check for hysteresis if it in the neighbourhood
+	 * of our current index.
+	 */
+	if (*new_index == current_index + 1) {
+		if (threshold < range[*new_index].low_threshold + hysteresis) {
+			/*
+			 * Stay in the current index, threshold is not higher
+			 * by hysteresis amount
+			 */
+			*new_index = current_index;
+			*val = range[current_index].value;
+		}
+	} else if (*new_index == current_index - 1) {
+		if (threshold > range[*new_index].high_threshold - hysteresis) {
+			/*
+			 * stay in the current index, threshold is not lower
+			 * by hysteresis amount
+			 */
+			*new_index = current_index;
+			*val = range[current_index].value;
+		}
+	}
+	return 0;
+}
+
+static int handle_step_chg_config(struct step_chg_info *chip)
+{
+	union power_supply_propval pval = {0, };
+	int rc = 0, fcc_ua = 0;
+	u64 elapsed_us;
+
+	elapsed_us = ktime_us_delta(ktime_get(), chip->step_last_update_time);
+	if (elapsed_us < STEP_CHG_HYSTERISIS_DELAY_US)
+		goto reschedule;
+
+	rc = power_supply_get_property(chip->batt_psy,
+		POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED, &pval);
+	if (rc < 0)
+		chip->step_chg_enable = 0;
+	else
+		chip->step_chg_enable = pval.intval;
+
+	if (!chip->step_chg_enable) {
+		if (chip->fcc_votable)
+			vote(chip->fcc_votable, STEP_CHG_VOTER, false, 0);
+		goto update_time;
+	}
+
+	rc = power_supply_get_property(chip->batt_psy,
+				step_chg_config.psy_prop, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't read %s property rc=%d\n",
+				step_chg_config.prop_name, rc);
+		return rc;
+	}
+
+	rc = get_val(step_chg_config.fcc_cfg, step_chg_config.hysteresis,
+			chip->step_index,
+			pval.intval,
+			&chip->step_index,
+			&fcc_ua);
+	if (rc < 0) {
+		/* remove the vote if no step-based fcc is found */
+		if (chip->fcc_votable)
+			vote(chip->fcc_votable, STEP_CHG_VOTER, false, 0);
+		goto update_time;
+	}
+
+	if (!chip->fcc_votable)
+		chip->fcc_votable = find_votable("FCC");
+	if (!chip->fcc_votable)
+		return -EINVAL;
+
+	vote(chip->fcc_votable, STEP_CHG_VOTER, true, fcc_ua);
+
+	pr_debug("%s = %d Step-FCC = %duA\n",
+		step_chg_config.prop_name, pval.intval, fcc_ua);
+
+update_time:
+	chip->step_last_update_time = ktime_get();
+	return 0;
+
+reschedule:
+	/* reschedule 1000uS after the remaining time */
+	return (STEP_CHG_HYSTERISIS_DELAY_US - elapsed_us + 1000);
+}
+
+static int handle_jeita(struct step_chg_info *chip)
+{
+	union power_supply_propval pval = {0, };
+	int rc = 0, fcc_ua = 0, fv_uv = 0;
+	u64 elapsed_us;
+
+	rc = power_supply_get_property(chip->batt_psy,
+		POWER_SUPPLY_PROP_SW_JEITA_ENABLED, &pval);
+	if (rc < 0)
+		chip->sw_jeita_enable = 0;
+	else
+		chip->sw_jeita_enable = pval.intval;
+
+	if (!chip->sw_jeita_enable) {
+		if (chip->fcc_votable)
+			vote(chip->fcc_votable, JEITA_VOTER, false, 0);
+		if (chip->fv_votable)
+			vote(chip->fv_votable, JEITA_VOTER, false, 0);
+		return 0;
+	}
+
+	elapsed_us = ktime_us_delta(ktime_get(), chip->jeita_last_update_time);
+	if (elapsed_us < STEP_CHG_HYSTERISIS_DELAY_US)
+		goto reschedule;
+
+	rc = power_supply_get_property(chip->batt_psy,
+				jeita_fcc_config.psy_prop, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't read %s property rc=%d\n",
+				step_chg_config.prop_name, rc);
+		return rc;
+	}
+
+	rc = get_val(jeita_fcc_config.fcc_cfg, jeita_fcc_config.hysteresis,
+			chip->jeita_fcc_index,
+			pval.intval,
+			&chip->jeita_fcc_index,
+			&fcc_ua);
+	if (rc < 0) {
+		/* remove the vote if no step-based fcc is found */
+		if (chip->fcc_votable)
+			vote(chip->fcc_votable, JEITA_VOTER, false, 0);
+		goto update_time;
+	}
+
+	if (!chip->fcc_votable)
+		chip->fcc_votable = find_votable("FCC");
+	if (!chip->fcc_votable)
+		/* changing FCC is a must */
+		return -EINVAL;
+
+	vote(chip->fcc_votable, JEITA_VOTER, true, fcc_ua);
+
+	rc = get_val(jeita_fv_config.fv_cfg, jeita_fv_config.hysteresis,
+			chip->jeita_fv_index,
+			pval.intval,
+			&chip->jeita_fv_index,
+			&fv_uv);
+	if (rc < 0) {
+		/* remove the vote if no step-based fcc is found */
+		if (chip->fv_votable)
+			vote(chip->fv_votable, JEITA_VOTER, false, 0);
+		goto update_time;
+	}
+
+	chip->fv_votable = find_votable("FV");
+	if (!chip->fv_votable)
+		goto update_time;
+
+	vote(chip->fv_votable, JEITA_VOTER, true, fv_uv);
+
+	pr_debug("%s = %d FCC = %duA FV = %duV\n",
+		step_chg_config.prop_name, pval.intval, fcc_ua, fv_uv);
+
+update_time:
+	chip->jeita_last_update_time = ktime_get();
+	return 0;
+
+reschedule:
+	/* reschedule 1000uS after the remaining time */
+	return (STEP_CHG_HYSTERISIS_DELAY_US - elapsed_us + 1000);
+}
+
+static void status_change_work(struct work_struct *work)
+{
+	struct step_chg_info *chip = container_of(work,
+			struct step_chg_info, status_change_work.work);
+	int rc = 0;
+	int reschedule_us;
+	int reschedule_jeita_work_us = 0;
+	int reschedule_step_work_us = 0;
+	union power_supply_propval pval = {0, };
+
+	if (!is_batt_available(chip)) {
+		__pm_relax(chip->step_chg_ws);
+		return;
+	}
+
+	/* skip jeita and step if not charging */
+	rc = power_supply_get_property(chip->batt_psy,
+		POWER_SUPPLY_PROP_STATUS, &pval);
+	if (pval.intval != POWER_SUPPLY_STATUS_CHARGING) {
+		__pm_relax(chip->step_chg_ws);
+		return;
+	}
+
+	rc = handle_jeita(chip);
+	if (rc > 0)
+		reschedule_jeita_work_us = rc;
+	else if (rc < 0)
+		pr_err("Couldn't handle sw jeita rc = %d\n", rc);
+
+	rc = handle_step_chg_config(chip);
+	if (rc > 0)
+		reschedule_step_work_us = rc;
+	if (rc < 0)
+		pr_err("Couldn't handle step rc = %d\n", rc);
+
+	reschedule_us = min(reschedule_jeita_work_us, reschedule_step_work_us);
+	if (reschedule_us == 0)
+		__pm_relax(chip->step_chg_ws);
+	else
+		schedule_delayed_work(&chip->status_change_work,
+				usecs_to_jiffies(reschedule_us));
+}
+
+static int step_chg_notifier_call(struct notifier_block *nb,
+		unsigned long ev, void *v)
+{
+	struct power_supply *psy = v;
+	struct step_chg_info *chip = container_of(nb, struct step_chg_info, nb);
+
+	if (ev != PSY_EVENT_PROP_CHANGED)
+		return NOTIFY_OK;
+
+	if ((strcmp(psy->desc->name, "battery") == 0)) {
+		__pm_stay_awake(chip->step_chg_ws);
+		schedule_delayed_work(&chip->status_change_work, 0);
+	}
+
+	return NOTIFY_OK;
+}
+
+static int step_chg_register_notifier(struct step_chg_info *chip)
+{
+	int rc;
+
+	chip->nb.notifier_call = step_chg_notifier_call;
+	rc = power_supply_reg_notifier(&chip->nb);
+	if (rc < 0) {
+		pr_err("Couldn't register psy notifier rc = %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+int qcom_step_chg_init(bool step_chg_enable, bool sw_jeita_enable)
+{
+	int rc;
+	struct step_chg_info *chip;
+
+	if (the_chip) {
+		pr_err("Already initialized\n");
+		return -EINVAL;
+	}
+
+	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->step_chg_ws = wakeup_source_register("qcom-step-chg");
+	if (!chip->step_chg_ws) {
+		rc = -EINVAL;
+		goto cleanup;
+	}
+
+	chip->step_chg_enable = step_chg_enable;
+	chip->sw_jeita_enable = sw_jeita_enable;
+
+	chip->step_index = -EINVAL;
+	chip->jeita_fcc_index = -EINVAL;
+	chip->jeita_fv_index = -EINVAL;
+
+	if (step_chg_enable && (!step_chg_config.psy_prop ||
+				!step_chg_config.prop_name)) {
+		/* fail if step-chg configuration is invalid */
+		pr_err("Step-chg configuration not defined - fail\n");
+		rc = -ENODATA;
+		goto release_wakeup_source;
+	}
+
+	if (sw_jeita_enable && (!jeita_fcc_config.psy_prop ||
+				!jeita_fcc_config.prop_name)) {
+		/* fail if step-chg configuration is invalid */
+		pr_err("Jeita TEMP configuration not defined - fail\n");
+		rc = -ENODATA;
+		goto release_wakeup_source;
+	}
+
+	if (sw_jeita_enable && (!jeita_fv_config.psy_prop ||
+				!jeita_fv_config.prop_name)) {
+		/* fail if step-chg configuration is invalid */
+		pr_err("Jeita TEMP configuration not defined - fail\n");
+		rc = -ENODATA;
+		goto release_wakeup_source;
+	}
+
+	INIT_DELAYED_WORK(&chip->status_change_work, status_change_work);
+
+	rc = step_chg_register_notifier(chip);
+	if (rc < 0) {
+		pr_err("Couldn't register psy notifier rc = %d\n", rc);
+		goto release_wakeup_source;
+	}
+
+	the_chip = chip;
+
+	if (step_chg_enable)
+		pr_info("Step charging enabled. Using %s source\n",
+				step_chg_config.prop_name);
+
+	return 0;
+
+release_wakeup_source:
+	wakeup_source_unregister(chip->step_chg_ws);
+cleanup:
+	kfree(chip);
+	return rc;
+}
+
+void qcom_step_chg_deinit(void)
+{
+	struct step_chg_info *chip = the_chip;
+
+	if (!chip)
+		return;
+
+	cancel_delayed_work_sync(&chip->status_change_work);
+	power_supply_unreg_notifier(&chip->nb);
+	wakeup_source_unregister(chip->step_chg_ws);
+	the_chip = NULL;
+	kfree(chip);
+}
diff -Nruw linux-4.4.115-fbx/drivers/power/supply./qcom/step-chg-jeita.h linux-4.4.115-fbx/drivers/power/supply/qcom/step-chg-jeita.h
--- linux-4.4.115-fbx/drivers/power/supply./qcom/step-chg-jeita.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/supply/qcom/step-chg-jeita.h	2019-01-22 16:16:26.239271183 +0100
@@ -0,0 +1,17 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __STEP_CHG_H__
+#define __STEP_CHG_H__
+int qcom_step_chg_init(bool, bool);
+void qcom_step_chg_deinit(void);
+#endif /* __STEP_CHG_H__ */
diff -Nruw linux-4.4.115-fbx/drivers/power/supply./qcom/storm-watch.c linux-4.4.115-fbx/drivers/power/supply/qcom/storm-watch.c
--- linux-4.4.115-fbx/drivers/power/supply./qcom/storm-watch.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/supply/qcom/storm-watch.c	2019-01-22 16:16:26.239271183 +0100
@@ -0,0 +1,76 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "storm-watch.h"
+
+/**
+ * is_storming(): Check if an event is storming
+ *
+ * @data: Data for tracking an event storm
+ *
+ * The return value will be true if a storm has been detected and
+ * false if a storm was not detected.
+ */
+bool is_storming(struct storm_watch *data)
+{
+	ktime_t curr_kt, delta_kt;
+	bool is_storming = false;
+
+	if (!data)
+		return false;
+
+	if (!data->enabled)
+		return false;
+
+	/* max storm count must be greater than 0 */
+	if (data->max_storm_count <= 0)
+		return false;
+
+	/* the period threshold must be greater than 0ms */
+	if (data->storm_period_ms <= 0)
+		return false;
+
+	mutex_lock(&data->storm_lock);
+	curr_kt = ktime_get_boottime();
+	delta_kt = ktime_sub(curr_kt, data->last_kt);
+
+	if (ktime_to_ms(delta_kt) < data->storm_period_ms)
+		data->storm_count++;
+	else
+		data->storm_count = 0;
+
+	if (data->storm_count > data->max_storm_count) {
+		is_storming = true;
+		data->storm_count = 0;
+	}
+
+	data->last_kt = curr_kt;
+	mutex_unlock(&data->storm_lock);
+	return is_storming;
+}
+
+void reset_storm_count(struct storm_watch *data)
+{
+	mutex_lock(&data->storm_lock);
+	data->storm_count = 0;
+	mutex_unlock(&data->storm_lock);
+}
+
+void update_storm_count(struct storm_watch *data, int max_count)
+{
+	if (!data)
+		return;
+
+	mutex_lock(&data->storm_lock);
+	data->max_storm_count = max_count;
+	mutex_unlock(&data->storm_lock);
+}
diff -Nruw linux-4.4.115-fbx/drivers/power/supply./qcom/storm-watch.h linux-4.4.115-fbx/drivers/power/supply/qcom/storm-watch.h
--- linux-4.4.115-fbx/drivers/power/supply./qcom/storm-watch.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/power/supply/qcom/storm-watch.h	2019-01-22 16:16:26.239271183 +0100
@@ -0,0 +1,41 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __STORM_WATCH_H
+#define __STORM_WATCH_H
+#include <linux/ktime.h>
+#include <linux/mutex.h>
+
+/**
+ * Data used to track an event storm.
+ *
+ * @storm_period_ms: The maximum time interval between two events. If this limit
+ *                   is exceeded then the event chain will be broken and removed
+ *                   from consideration for a storm.
+ * @max_storm_count: The number of chained events required to trigger a storm.
+ * @storm_count:     The current number of chained events.
+ * @last_kt:         Kernel time of the last event seen.
+ * @storm_lock:      Mutex lock to protect storm_watch data.
+ */
+struct storm_watch {
+	bool		enabled;
+	int		storm_period_ms;
+	int		max_storm_count;
+	int		storm_count;
+	ktime_t		last_kt;
+	struct mutex	storm_lock;
+};
+
+bool is_storming(struct storm_watch *data);
+void reset_storm_count(struct storm_watch *data);
+void update_storm_count(struct storm_watch *data, int max_count);
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/pwm/pwm-qpnp.c	2019-10-29 09:26:24.641212945 +0100
@@ -0,0 +1,2255 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Qualcomm Technologies, Inc. QPNP Pulse Width Modulation (PWM) driver
+ *
+ * The HW module is also called LPG (Light Pattern Generator).
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/radix-tree.h>
+#include <linux/qpnp/pwm.h>
+
+#define QPNP_LPG_DRIVER_NAME	"qcom,qpnp-pwm"
+#define QPNP_LPG_CHANNEL_BASE	"qpnp-lpg-channel-base"
+#define QPNP_LPG_LUT_BASE	"qpnp-lpg-lut-base"
+
+#define QPNP_PWM_MODE_ONLY_SUB_TYPE	0x0B
+#define QPNP_LPG_CHAN_SUB_TYPE		0x2
+#define QPNP_LPG_S_CHAN_SUB_TYPE	0x11
+
+/* LPG Control for LPG_PATTERN_CONFIG */
+#define QPNP_RAMP_DIRECTION_SHIFT	4
+#define QPNP_RAMP_DIRECTION_MASK	0x10
+#define QPNP_PATTERN_REPEAT_SHIFT	3
+#define QPNP_PATTERN_REPEAT_MASK	0x08
+#define QPNP_RAMP_TOGGLE_SHIFT		2
+#define QPNP_RAMP_TOGGLE_MASK		0x04
+#define QPNP_EN_PAUSE_HI_SHIFT		1
+#define QPNP_EN_PAUSE_HI_MASK		0x02
+#define QPNP_EN_PAUSE_LO_MASK		0x01
+
+/* LPG Control for LPG_PWM_SIZE_CLK */
+#define QPNP_PWM_SIZE_SHIFT_SUB_TYPE		2
+#define QPNP_PWM_SIZE_MASK_SUB_TYPE		0x4
+#define QPNP_PWM_FREQ_CLK_SELECT_MASK_SUB_TYPE	0x03
+#define QPNP_PWM_SIZE_9_BIT_SUB_TYPE		0x01
+
+#define QPNP_SET_PWM_CLK_SUB_TYPE(val, clk, pwm_size) \
+do { \
+	val = (clk + 1) & QPNP_PWM_FREQ_CLK_SELECT_MASK_SUB_TYPE; \
+	val |= (((pwm_size > 6 ? QPNP_PWM_SIZE_9_BIT_SUB_TYPE : 0) << \
+		QPNP_PWM_SIZE_SHIFT_SUB_TYPE) & QPNP_PWM_SIZE_MASK_SUB_TYPE); \
+} while (0)
+
+#define QPNP_GET_PWM_SIZE_SUB_TYPE(reg) ((reg & QPNP_PWM_SIZE_MASK_SUB_TYPE) \
+				>> QPNP_PWM_SIZE_SHIFT_SUB_TYPE)
+
+#define QPNP_PWM_SIZE_SHIFT			4
+#define QPNP_PWM_SIZE_MASK			0x30
+#define QPNP_PWM_FREQ_CLK_SELECT_MASK		0x03
+#define QPNP_MIN_PWM_BIT_SIZE		6
+#define QPNP_MAX_PWM_BIT_SIZE		9
+#define QPNP_PWM_SIZES_SUPPORTED	10
+
+#define QPNP_SET_PWM_CLK(val, clk, pwm_size) \
+do { \
+	val = (clk + 1) & QPNP_PWM_FREQ_CLK_SELECT_MASK; \
+	val |= (((pwm_size - QPNP_MIN_PWM_BIT_SIZE) << \
+		QPNP_PWM_SIZE_SHIFT) & QPNP_PWM_SIZE_MASK); \
+} while (0)
+
+#define QPNP_GET_PWM_SIZE(reg) ((reg & QPNP_PWM_SIZE_MASK) \
+				>> QPNP_PWM_SIZE_SHIFT)
+
+/* LPG Control for LPG_PWM_FREQ_PREDIV_CLK */
+#define QPNP_PWM_FREQ_PRE_DIVIDE_SHIFT		5
+#define QPNP_PWM_FREQ_PRE_DIVIDE_MASK		0x60
+#define QPNP_PWM_FREQ_EXP_MASK			0x07
+
+#define QPNP_SET_PWM_FREQ_PREDIV(val, pre_div, pre_div_exp) \
+do { \
+	val = (pre_div << QPNP_PWM_FREQ_PRE_DIVIDE_SHIFT) & \
+				QPNP_PWM_FREQ_PRE_DIVIDE_MASK;	\
+	val |= (pre_div_exp & QPNP_PWM_FREQ_EXP_MASK);	\
+} while (0)
+
+/* LPG Control for LPG_PWM_TYPE_CONFIG */
+#define QPNP_EN_GLITCH_REMOVAL_SHIFT		5
+#define QPNP_EN_GLITCH_REMOVAL_MASK		0x20
+#define QPNP_EN_FULL_SCALE_SHIFT		3
+#define QPNP_EN_FULL_SCALE_MASK			0x08
+#define QPNP_EN_PHASE_STAGGER_SHIFT		2
+#define QPNP_EN_PHASE_STAGGER_MASK		0x04
+#define QPNP_PHASE_STAGGER_MASK			0x03
+
+/* LPG Control for PWM_VALUE_LSB */
+#define QPNP_PWM_VALUE_LSB_MASK			0xFF
+
+/* LPG Control for PWM_VALUE_MSB */
+#define QPNP_PWM_VALUE_MSB_SHIFT		8
+#define QPNP_PWM_VALUE_MSB_MASK			0x01
+
+/* LPG Control for ENABLE_CONTROL */
+#define QPNP_EN_PWM_HIGH_SHIFT			7
+#define QPNP_EN_PWM_HIGH_MASK			0x80
+#define QPNP_EN_PWM_LO_SHIFT			6
+#define QPNP_EN_PWM_LO_MASK			0x40
+#define QPNP_EN_PWM_OUTPUT_SHIFT		5
+#define QPNP_EN_PWM_OUTPUT_MASK			0x20
+#define QPNP_PWM_SRC_SELECT_SHIFT		2
+#define QPNP_PWM_SRC_SELECT_MASK		0x04
+#define QPNP_PWM_EN_RAMP_GEN_SHIFT		1
+#define QPNP_PWM_EN_RAMP_GEN_MASK		0x02
+
+/* LPG Control for PWM_SYNC */
+#define QPNP_PWM_SYNC_VALUE			0x01
+#define QPNP_PWM_SYNC_MASK			0x01
+
+/* LPG Control for RAMP_CONTROL */
+#define QPNP_RAMP_START_MASK			0x01
+
+#define QPNP_ENABLE_LUT_V0(value) (value |= QPNP_RAMP_START_MASK)
+#define QPNP_DISABLE_LUT_V0(value) (value &= ~QPNP_RAMP_START_MASK)
+#define QPNP_ENABLE_LUT_V1(value, id) (value |= BIT(id))
+
+/* LPG Control for RAMP_STEP_DURATION_LSB */
+#define QPNP_RAMP_STEP_DURATION_LSB_MASK	0xFF
+
+/* LPG Control for RAMP_STEP_DURATION_MSB */
+#define QPNP_RAMP_STEP_DURATION_MSB_SHIFT	8
+#define QPNP_RAMP_STEP_DURATION_MSB_MASK	0x01
+
+#define QPNP_PWM_1KHZ				1024
+#define QPNP_GET_RAMP_STEP_DURATION(ramp_time_ms) \
+		((ramp_time_ms * QPNP_PWM_1KHZ) / 1000)
+
+/* LPG Control for PAUSE_HI_MULTIPLIER_LSB */
+#define QPNP_PAUSE_HI_MULTIPLIER_LSB_MASK	0xFF
+
+/* LPG Control for PAUSE_HI_MULTIPLIER_MSB */
+#define QPNP_PAUSE_HI_MULTIPLIER_MSB_SHIFT	8
+#define QPNP_PAUSE_HI_MULTIPLIER_MSB_MASK	0x1F
+
+/* LPG Control for PAUSE_LO_MULTIPLIER_LSB */
+#define QPNP_PAUSE_LO_MULTIPLIER_LSB_MASK	0xFF
+
+/* LPG Control for PAUSE_LO_MULTIPLIER_MSB */
+#define QPNP_PAUSE_LO_MULTIPLIER_MSB_SHIFT	8
+#define QPNP_PAUSE_LO_MULTIPLIER_MSB_MASK	0x1F
+
+/* LPG Control for HI_INDEX */
+#define QPNP_HI_INDEX_MASK			0x3F
+
+/* LPG Control for LO_INDEX */
+#define QPNP_LO_INDEX_MASK			0x3F
+
+/* LPG DTEST */
+#define QPNP_LPG_DTEST_LINE_MAX			4
+#define QPNP_LPG_DTEST_OUTPUT_MAX		5
+#define QPNP_LPG_DTEST_OUTPUT_MASK		0x07
+
+/* PWM DTEST */
+#define QPNP_PWM_DTEST_LINE_MAX			2
+#define QPNP_PWM_DTEST_OUTPUT_MAX		2
+#define QPNP_PWM_DTEST_OUTPUT_MASK		0x03
+
+#define NUM_CLOCKS				3
+#define QPNP_PWM_M_MAX				7
+#define NSEC_1024HZ	(NSEC_PER_SEC / 1024)
+#define NSEC_32768HZ	(NSEC_PER_SEC / 32768)
+#define NSEC_19P2MHZ	(NSEC_PER_SEC / 19200000)
+
+#define NUM_LPG_PRE_DIVIDE	4
+
+#define PRE_DIVIDE_1		1
+#define PRE_DIVIDE_3		3
+#define PRE_DIVIDE_5		5
+#define PRE_DIVIDE_6		6
+
+#define SPMI_LPG_REG_BASE_OFFSET	0x40
+#define SPMI_LPG_REVISION2_OFFSET	0x1
+#define SPMI_LPG_REV1_RAMP_CONTROL_OFFSET	0x86
+#define SPMI_LPG_SUB_TYPE_OFFSET	0x5
+#define SPMI_LPG_PWM_SYNC		0x7
+#define SPMI_LPG_REG_ADDR(b, n)	(b + SPMI_LPG_REG_BASE_OFFSET + (n))
+#define SPMI_MAX_BUF_LEN	8
+
+#define QPNP_PWM_LUT_NOT_SUPPORTED	0x1
+
+/* Supported PWM sizes */
+#define QPNP_PWM_SIZE_6_BIT		6
+#define QPNP_PWM_SIZE_7_BIT		7
+#define QPNP_PWM_SIZE_8_BIT		8
+#define QPNP_PWM_SIZE_9_BIT		9
+
+#define QPNP_PWM_SIZE_6_9_BIT		0x9
+#define QPNP_PWM_SIZE_7_8_BIT		0x6
+#define QPNP_PWM_SIZE_6_7_9_BIT		0xB
+
+/*
+ * Registers that don't need to be cached are defined below from an offset
+ * of SPMI_LPG_REG_BASE_OFFSET.
+ */
+#define QPNP_LPG_SEC_ACCESS		0x90
+#define QPNP_LPG_DTEST			0xA2
+
+/* Supported time levels */
+enum time_level {
+	LVL_NSEC,
+	LVL_USEC,
+};
+
+/* LPG revisions */
+enum qpnp_lpg_revision {
+	QPNP_LPG_REVISION_0 = 0x0,
+	QPNP_LPG_REVISION_1 = 0x1,
+};
+
+/* LPG LUT MODE STATE */
+enum qpnp_lut_state {
+	QPNP_LUT_ENABLE = 0x0,
+	QPNP_LUT_DISABLE = 0x1,
+};
+
+/* PWM MODE STATE */
+enum qpnp_pwm_state {
+	QPNP_PWM_ENABLE = 0x0,
+	QPNP_PWM_DISABLE = 0x1,
+};
+
+/* SPMI LPG registers */
+enum qpnp_lpg_registers_list {
+	QPNP_LPG_PATTERN_CONFIG,
+	QPNP_LPG_PWM_SIZE_CLK,
+	QPNP_LPG_PWM_FREQ_PREDIV_CLK,
+	QPNP_LPG_PWM_TYPE_CONFIG,
+	QPNP_PWM_VALUE_LSB,
+	QPNP_PWM_VALUE_MSB,
+	QPNP_ENABLE_CONTROL,
+	QPNP_RAMP_CONTROL,
+	QPNP_RAMP_STEP_DURATION_LSB = QPNP_RAMP_CONTROL + 9,
+	QPNP_RAMP_STEP_DURATION_MSB,
+	QPNP_PAUSE_HI_MULTIPLIER_LSB,
+	QPNP_PAUSE_HI_MULTIPLIER_MSB,
+	QPNP_PAUSE_LO_MULTIPLIER_LSB,
+	QPNP_PAUSE_LO_MULTIPLIER_MSB,
+	QPNP_HI_INDEX,
+	QPNP_LO_INDEX,
+	QPNP_TOTAL_LPG_SPMI_REGISTERS
+};
+
+/*
+ * Formula from HSID,
+ * pause_time (hi/lo) = (pause_cnt- 1)*(ramp_ms)
+ * OR,
+ * pause_cnt = (pause_time / ramp_ms) + 1
+ */
+#define QPNP_SET_PAUSE_CNT(to_pause_cnt, from_pause, ramp_ms) \
+	(to_pause_cnt = (from_pause / (ramp_ms ? ramp_ms : 1)) + 1)
+
+
+static unsigned int pt_t[NUM_LPG_PRE_DIVIDE][NUM_CLOCKS] = {
+	{	PRE_DIVIDE_1 * NSEC_1024HZ,
+		PRE_DIVIDE_1 * NSEC_32768HZ,
+		PRE_DIVIDE_1 * NSEC_19P2MHZ,
+	},
+	{	PRE_DIVIDE_3 * NSEC_1024HZ,
+		PRE_DIVIDE_3 * NSEC_32768HZ,
+		PRE_DIVIDE_3 * NSEC_19P2MHZ,
+	},
+	{	PRE_DIVIDE_5 * NSEC_1024HZ,
+		PRE_DIVIDE_5 * NSEC_32768HZ,
+		PRE_DIVIDE_5 * NSEC_19P2MHZ,
+	},
+	{	PRE_DIVIDE_6 * NSEC_1024HZ,
+		PRE_DIVIDE_6 * NSEC_32768HZ,
+		PRE_DIVIDE_6 * NSEC_19P2MHZ,
+	},
+};
+
+struct qpnp_lut_config {
+	u8	*duty_pct_list;
+	int	list_len;
+	int	ramp_index;
+	int	lo_index;
+	int	hi_index;
+	int	lut_pause_hi_cnt;
+	int	lut_pause_lo_cnt;
+	int	ramp_step_ms;
+	bool	ramp_direction;
+	bool	pattern_repeat;
+	bool	ramp_toggle;
+	bool	enable_pause_hi;
+	bool	enable_pause_lo;
+};
+
+struct qpnp_lpg_config {
+	struct qpnp_lut_config	lut_config;
+	u16			base_addr;
+	u16			lut_base_addr;
+	u16			lut_size;
+};
+
+struct _qpnp_pwm_config {
+	int				pwm_value;
+	int				pwm_period;	/* in microseconds */
+	int				pwm_duty;	/* in microseconds */
+	struct pwm_period_config	period;
+	int				supported_sizes;
+	int				force_pwm_size;
+	bool				update_period;
+};
+
+/* Public facing structure */
+struct qpnp_pwm_chip {
+	struct platform_device	*pdev;
+	struct regmap		*regmap;
+	struct pwm_chip         chip;
+	bool			enabled;
+	struct _qpnp_pwm_config	pwm_config;
+	struct	qpnp_lpg_config	lpg_config;
+	enum pm_pwm_mode	pwm_mode;
+	spinlock_t		lpg_lock;
+	enum qpnp_lpg_revision	revision;
+	u8			sub_type;
+	u32			flags;
+	u8	qpnp_lpg_registers[QPNP_TOTAL_LPG_SPMI_REGISTERS];
+	int			channel_id;
+	const char		*channel_owner;
+	u32			dtest_line;
+	u32			dtest_output;
+	bool			in_test_mode;
+};
+
+/* Internal functions */
+static inline struct qpnp_pwm_chip *qpnp_pwm_from_pwm_dev(
+					struct pwm_device *pwm)
+{
+	return container_of(pwm->chip, struct qpnp_pwm_chip, chip);
+}
+
+static inline struct qpnp_pwm_chip *qpnp_pwm_from_pwm_chip(
+					struct pwm_chip *chip)
+{
+	return container_of(chip, struct qpnp_pwm_chip, chip);
+}
+
+static inline void qpnp_set_pattern_config(u8 *val,
+			struct qpnp_lut_config *lut_config)
+{
+	*val = lut_config->enable_pause_lo & QPNP_EN_PAUSE_LO_MASK;
+	*val |= (lut_config->enable_pause_hi << QPNP_EN_PAUSE_HI_SHIFT) &
+						QPNP_EN_PAUSE_HI_MASK;
+	*val |= (lut_config->ramp_toggle << QPNP_RAMP_TOGGLE_SHIFT) &
+						QPNP_RAMP_TOGGLE_MASK;
+	*val |= (lut_config->pattern_repeat << QPNP_PATTERN_REPEAT_SHIFT) &
+						QPNP_PATTERN_REPEAT_MASK;
+	*val |= (lut_config->ramp_direction << QPNP_RAMP_DIRECTION_SHIFT) &
+						QPNP_RAMP_DIRECTION_MASK;
+}
+
+static inline void qpnp_set_pwm_type_config(u8 *val, bool glitch,
+			bool full_scale, bool en_phase, bool phase)
+{
+	*val = phase;
+	*val |= (en_phase << QPNP_EN_PHASE_STAGGER_SHIFT) &
+				QPNP_EN_PHASE_STAGGER_MASK;
+	*val |= (full_scale << QPNP_EN_FULL_SCALE_SHIFT) &
+				QPNP_EN_FULL_SCALE_MASK;
+	*val |= (glitch << QPNP_EN_GLITCH_REMOVAL_SHIFT) &
+				QPNP_EN_GLITCH_REMOVAL_MASK;
+}
+
+static int qpnp_set_control(struct qpnp_pwm_chip *chip, bool pwm_hi,
+		bool pwm_lo, bool pwm_out, bool pwm_src, bool ramp_gen)
+{
+	int value;
+
+	value = (ramp_gen << QPNP_PWM_EN_RAMP_GEN_SHIFT) |
+		(pwm_src << QPNP_PWM_SRC_SELECT_SHIFT) |
+		(pwm_lo << QPNP_EN_PWM_LO_SHIFT) |
+		(pwm_hi << QPNP_EN_PWM_HIGH_SHIFT);
+	if (chip->sub_type != QPNP_LPG_S_CHAN_SUB_TYPE)
+		value |= (pwm_out << QPNP_EN_PWM_OUTPUT_SHIFT);
+	return value;
+}
+
+#define QPNP_ENABLE_LUT_CONTROL(chip) \
+	qpnp_set_control((chip), 0, 0, 0, 0, 1)
+#define QPNP_ENABLE_PWM_CONTROL(chip) \
+	qpnp_set_control((chip), 0, 0, 0, 1, 0)
+#define QPNP_ENABLE_PWM_MODE(chip) \
+	qpnp_set_control((chip), 1, 1, 1, 1, 0)
+#define QPNP_ENABLE_PWM_MODE_GPLED_CHANNEL(chip) \
+	qpnp_set_control((chip), 1, 1, 1, 1, 1)
+#define QPNP_ENABLE_LPG_MODE(chip) \
+	qpnp_set_control((chip), 1, 1, 1, 0, 1)
+#define QPNP_DISABLE_PWM_MODE(chip) \
+	qpnp_set_control((chip), 0, 0, 0, 1, 0)
+#define QPNP_DISABLE_LPG_MODE(chip) \
+	qpnp_set_control((chip), 0, 0, 0, 0, 1)
+#define QPNP_IS_PWM_CONFIG_SELECTED(val) (val & QPNP_PWM_SRC_SELECT_MASK)
+
+#define QPNP_ENABLE_PWM_MODE_ONLY_SUB_TYPE			0x80
+#define QPNP_DISABLE_PWM_MODE_ONLY_SUB_TYPE			0x0
+#define QPNP_PWM_MODE_ONLY_ENABLE_DISABLE_MASK_SUB_TYPE	0x80
+
+static inline void qpnp_convert_to_lut_flags(int *flags,
+				struct qpnp_lut_config *l_config)
+{
+	*flags = ((l_config->ramp_direction ? PM_PWM_LUT_RAMP_UP : 0) |
+		(l_config->pattern_repeat ? PM_PWM_LUT_LOOP : 0)|
+		(l_config->ramp_toggle ? PM_PWM_LUT_REVERSE : 0) |
+		(l_config->enable_pause_hi ? PM_PWM_LUT_PAUSE_HI_EN : 0) |
+		(l_config->enable_pause_lo ? PM_PWM_LUT_PAUSE_LO_EN : 0));
+}
+
+static inline void qpnp_set_lut_params(struct lut_params *l_params,
+		struct qpnp_lut_config *l_config, int s_idx, int size)
+{
+	l_params->start_idx = s_idx;
+	l_params->idx_len = size;
+	l_params->lut_pause_hi = l_config->lut_pause_hi_cnt;
+	l_params->lut_pause_lo = l_config->lut_pause_lo_cnt;
+	l_params->ramp_step_ms = l_config->ramp_step_ms;
+	qpnp_convert_to_lut_flags(&l_params->flags, l_config);
+}
+
+static void qpnp_lpg_save(u8 *u8p, u8 mask, u8 val)
+{
+	*u8p &= ~mask;
+	*u8p |= val & mask;
+}
+
+static int qpnp_lpg_save_and_write(u8 value, u8 mask, u8 *reg, u16 addr,
+				u16 size, struct qpnp_pwm_chip *chip)
+{
+	qpnp_lpg_save(reg, mask, value);
+
+	return regmap_bulk_write(chip->regmap, addr, reg, size);
+}
+
+/*
+ * PWM Frequency = Clock Frequency / (N * T)
+ *	or
+ * PWM Period = Clock Period * (N * T)
+ *	where
+ * N = 2^9 or 2^6 for 9-bit or 6-bit PWM size
+ * T = Pre-divide * 2^m, where m = 0..7 (exponent)
+ *
+ * This is the formula to figure out m for the best pre-divide and clock:
+ * (PWM Period / N) = (Pre-divide * Clock Period) * 2^m
+ */
+static void qpnp_lpg_calc_period(enum time_level tm_lvl,
+				unsigned int period_value,
+				struct qpnp_pwm_chip *chip)
+{
+	int		n, m, clk, div;
+	int		best_m, best_div, best_clk;
+	unsigned int	last_err, cur_err, min_err;
+	unsigned int	tmp_p, period_n;
+	int             supported_sizes = chip->pwm_config.supported_sizes;
+	int		force_pwm_size = chip->pwm_config.force_pwm_size;
+	struct pwm_period_config *period = &chip->pwm_config.period;
+
+	/* PWM Period / N */
+	if (supported_sizes == QPNP_PWM_SIZE_7_8_BIT)
+		n = 7;
+	else
+		n = 6;
+
+	if (tm_lvl == LVL_USEC) {
+		if (period_value < ((unsigned int)(-1) / NSEC_PER_USEC)) {
+			period_n = (period_value * NSEC_PER_USEC) >> n;
+		} else {
+			if (supported_sizes == QPNP_PWM_SIZE_7_8_BIT)
+				n = 8;
+			else
+				n = 9;
+			period_n = (period_value >> n) * NSEC_PER_USEC;
+		}
+	} else {
+		period_n = period_value >> n;
+	}
+
+	if (force_pwm_size != 0) {
+		if (n < force_pwm_size)
+			period_n = period_n >> (force_pwm_size - n);
+		else
+			period_n = period_n << (n - force_pwm_size);
+		n = force_pwm_size;
+		pr_info("LPG channel '%d' pwm size is forced to=%d\n",
+					chip->channel_id, n);
+	}
+
+	min_err = last_err = (unsigned int)(-1);
+	best_m = 0;
+	best_clk = 0;
+	best_div = 0;
+	for (clk = 0; clk < NUM_CLOCKS; clk++) {
+		for (div = 0; div < NUM_LPG_PRE_DIVIDE; div++) {
+			/* period_n = (PWM Period / N) */
+			/* tmp_p = (Pre-divide * Clock Period) * 2^m */
+			tmp_p = pt_t[div][clk];
+			for (m = 0; m <= QPNP_PWM_M_MAX; m++) {
+				if (period_n > tmp_p)
+					cur_err = period_n - tmp_p;
+				else
+					cur_err = tmp_p - period_n;
+
+				if (cur_err < min_err) {
+					min_err = cur_err;
+					best_m = m;
+					best_clk = clk;
+					best_div = div;
+				}
+
+				if (m && cur_err > last_err)
+					/* Break for bigger cur_err */
+					break;
+
+				last_err = cur_err;
+				tmp_p <<= 1;
+			}
+		}
+	}
+
+	/* Adapt to optimal pwm size, the higher the resolution the better */
+	if (!force_pwm_size) {
+		if (supported_sizes == QPNP_PWM_SIZE_7_8_BIT) {
+			if (n == 7 && best_m >= 1) {
+				n += 1;
+				best_m -= 1;
+			}
+		} else if (n == 6) {
+			if (best_m >= 3) {
+				n += 3;
+				best_m -= 3;
+			} else if (best_m >= 1 && (
+				chip->sub_type != QPNP_PWM_MODE_ONLY_SUB_TYPE &&
+				chip->sub_type != QPNP_LPG_S_CHAN_SUB_TYPE)) {
+				n += 1;
+				best_m -= 1;
+			}
+		}
+	}
+
+	period->pwm_size = n;
+	period->clk = best_clk;
+	period->pre_div = best_div;
+	period->pre_div_exp = best_m;
+}
+
+static void qpnp_lpg_calc_pwm_value(struct _qpnp_pwm_config *pwm_config,
+				      unsigned int period_value,
+				      unsigned int duty_value)
+{
+	unsigned int max_pwm_value, tmp;
+
+	/* Figure out pwm_value with overflow handling */
+	tmp = 1 << (sizeof(tmp) * 8 - pwm_config->period.pwm_size);
+	if (duty_value < tmp) {
+		tmp = duty_value << pwm_config->period.pwm_size;
+		pwm_config->pwm_value = tmp / period_value;
+	} else {
+		tmp = period_value >> pwm_config->period.pwm_size;
+		pwm_config->pwm_value = duty_value / tmp;
+	}
+	max_pwm_value = (1 << pwm_config->period.pwm_size) - 1;
+	if (pwm_config->pwm_value > max_pwm_value)
+		pwm_config->pwm_value = max_pwm_value;
+	pr_debug("pwm_value: %d\n", pwm_config->pwm_value);
+}
+
+static int qpnp_lpg_change_table(struct qpnp_pwm_chip *chip,
+					int duty_pct[], int raw_value)
+{
+	unsigned int		pwm_value, max_pwm_value;
+	struct qpnp_lut_config	*lut = &chip->lpg_config.lut_config;
+	int			i, pwm_size, rc = 0;
+	int			burst_size = SPMI_MAX_BUF_LEN;
+	int			list_len = lut->list_len << 1;
+	int			offset = (lut->lo_index << 1) - 2;
+
+	pwm_size = QPNP_GET_PWM_SIZE(
+			chip->qpnp_lpg_registers[QPNP_LPG_PWM_SIZE_CLK]) +
+				QPNP_MIN_PWM_BIT_SIZE;
+
+	max_pwm_value = (1 << pwm_size) - 1;
+
+	if (unlikely(lut->list_len != (lut->hi_index - lut->lo_index + 1))) {
+		pr_err("LUT internal Data structure corruption detected\n");
+		pr_err("LUT list size: %d\n", lut->list_len);
+		pr_err("However, index size is: %d\n",
+				(lut->hi_index - lut->lo_index + 1));
+		return -EINVAL;
+	}
+
+	for (i = 0; i < lut->list_len; i++) {
+		if (raw_value)
+			pwm_value = duty_pct[i];
+		else
+			pwm_value = (duty_pct[i] << pwm_size) / 100;
+
+		if (pwm_value > max_pwm_value)
+			pwm_value = max_pwm_value;
+
+		if (chip->pwm_config.supported_sizes == QPNP_PWM_SIZE_7_8_BIT) {
+			lut->duty_pct_list[i] = pwm_value;
+		} else {
+			lut->duty_pct_list[i*2] = pwm_value;
+			lut->duty_pct_list[(i*2)+1] = (pwm_value >>
+			 QPNP_PWM_VALUE_MSB_SHIFT) & QPNP_PWM_VALUE_MSB_MASK;
+		}
+	}
+
+	/*
+	 * For the Keypad Backlight Lookup Table (KPDBL_LUT),
+	 * offset is lo_index.
+	 */
+	if (chip->pwm_config.supported_sizes == QPNP_PWM_SIZE_7_8_BIT)
+		offset = lut->lo_index;
+
+	/* Write with max allowable burst mode, each entry is of two bytes */
+	for (i = 0; i < list_len; i += burst_size) {
+		if (i + burst_size >= list_len)
+			burst_size = list_len - i;
+		rc = regmap_bulk_write(chip->regmap,
+			       chip->lpg_config.lut_base_addr + offset + i,
+			       lut->duty_pct_list + i,
+			       burst_size);
+	}
+
+	return rc;
+}
+
+static void qpnp_lpg_save_period(struct qpnp_pwm_chip *chip)
+{
+	u8 mask, val;
+	struct _qpnp_pwm_config	*pwm_config = &chip->pwm_config;
+
+	if (chip->sub_type == QPNP_PWM_MODE_ONLY_SUB_TYPE) {
+		QPNP_SET_PWM_CLK_SUB_TYPE(val, pwm_config->period.clk,
+				pwm_config->period.pwm_size);
+		mask = QPNP_PWM_SIZE_MASK_SUB_TYPE |
+				QPNP_PWM_FREQ_CLK_SELECT_MASK_SUB_TYPE;
+	} else {
+		QPNP_SET_PWM_CLK(val, pwm_config->period.clk,
+				pwm_config->period.pwm_size);
+		mask = QPNP_PWM_SIZE_MASK | QPNP_PWM_FREQ_CLK_SELECT_MASK;
+	}
+
+	qpnp_lpg_save(&chip->qpnp_lpg_registers[QPNP_LPG_PWM_SIZE_CLK],
+							mask, val);
+
+	QPNP_SET_PWM_FREQ_PREDIV(val, pwm_config->period.pre_div,
+					pwm_config->period.pre_div_exp);
+
+	mask = QPNP_PWM_FREQ_PRE_DIVIDE_MASK | QPNP_PWM_FREQ_EXP_MASK;
+
+	qpnp_lpg_save(&chip->qpnp_lpg_registers[QPNP_LPG_PWM_FREQ_PREDIV_CLK],
+								mask, val);
+}
+
+static int qpnp_lpg_save_pwm_value(struct qpnp_pwm_chip *chip)
+{
+	unsigned int		max_pwm_value;
+	int			pwm_size;
+	u8			mask, value;
+	struct _qpnp_pwm_config	*pwm_config = &chip->pwm_config;
+	struct qpnp_lpg_config	*lpg_config = &chip->lpg_config;
+	int rc;
+
+	if (chip->sub_type == QPNP_PWM_MODE_ONLY_SUB_TYPE)
+		pwm_size = QPNP_GET_PWM_SIZE_SUB_TYPE(
+			chip->qpnp_lpg_registers[QPNP_LPG_PWM_SIZE_CLK]) ?
+				QPNP_MAX_PWM_BIT_SIZE : QPNP_MIN_PWM_BIT_SIZE;
+	else
+		pwm_size = QPNP_GET_PWM_SIZE(
+			chip->qpnp_lpg_registers[QPNP_LPG_PWM_SIZE_CLK]) +
+				QPNP_MIN_PWM_BIT_SIZE;
+
+	max_pwm_value = (1 << pwm_size) - 1;
+
+	if (pwm_config->pwm_value > max_pwm_value)
+		pwm_config->pwm_value = max_pwm_value;
+
+	value = pwm_config->pwm_value;
+	mask = QPNP_PWM_VALUE_LSB_MASK;
+
+	pr_debug("pwm_lsb value:%d\n", value & mask);
+	rc = qpnp_lpg_save_and_write(value, mask,
+			&chip->qpnp_lpg_registers[QPNP_PWM_VALUE_LSB],
+			SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+			QPNP_PWM_VALUE_LSB), 1, chip);
+	if (rc)
+		return rc;
+
+	value = (pwm_config->pwm_value >> QPNP_PWM_VALUE_MSB_SHIFT) &
+					QPNP_PWM_VALUE_MSB_MASK;
+
+	mask = QPNP_PWM_VALUE_MSB_MASK;
+
+	pr_debug("pwm_msb value:%d\n", value);
+	rc = qpnp_lpg_save_and_write(value, mask,
+			&chip->qpnp_lpg_registers[QPNP_PWM_VALUE_MSB],
+			SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+			QPNP_PWM_VALUE_MSB), 1, chip);
+	if (rc)
+		return rc;
+
+	if (chip->sub_type == QPNP_PWM_MODE_ONLY_SUB_TYPE ||
+		chip->sub_type == QPNP_LPG_S_CHAN_SUB_TYPE) {
+		value = QPNP_PWM_SYNC_VALUE & QPNP_PWM_SYNC_MASK;
+		rc = regmap_write(chip->regmap,
+					SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+						SPMI_LPG_PWM_SYNC),
+					value);
+	}
+
+	return rc;
+}
+
+static int qpnp_lpg_configure_pattern(struct qpnp_pwm_chip *chip)
+{
+	struct qpnp_lpg_config	*lpg_config = &chip->lpg_config;
+	struct qpnp_lut_config	*lut_config = &lpg_config->lut_config;
+	u8			value, mask;
+
+	qpnp_set_pattern_config(&value, lut_config);
+
+	mask = QPNP_RAMP_DIRECTION_MASK | QPNP_PATTERN_REPEAT_MASK |
+			QPNP_RAMP_TOGGLE_MASK | QPNP_EN_PAUSE_HI_MASK |
+			QPNP_EN_PAUSE_LO_MASK;
+
+	return qpnp_lpg_save_and_write(value, mask,
+		&chip->qpnp_lpg_registers[QPNP_LPG_PATTERN_CONFIG],
+		SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+		QPNP_LPG_PATTERN_CONFIG), 1, chip);
+}
+
+static int qpnp_lpg_glitch_removal(struct qpnp_pwm_chip *chip, bool enable)
+{
+	struct qpnp_lpg_config	*lpg_config = &chip->lpg_config;
+	u8 value, mask;
+
+	qpnp_set_pwm_type_config(&value, enable ? 1 : 0, 0, 0, 0);
+
+	mask = QPNP_EN_GLITCH_REMOVAL_MASK | QPNP_EN_FULL_SCALE_MASK |
+			QPNP_EN_PHASE_STAGGER_MASK | QPNP_PHASE_STAGGER_MASK;
+
+	pr_debug("pwm_type_config: %d\n", value);
+	return qpnp_lpg_save_and_write(value, mask,
+		&chip->qpnp_lpg_registers[QPNP_LPG_PWM_TYPE_CONFIG],
+		SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+		QPNP_LPG_PWM_TYPE_CONFIG), 1, chip);
+}
+
+static int qpnp_lpg_configure_pwm(struct qpnp_pwm_chip *chip)
+{
+	struct qpnp_lpg_config	*lpg_config = &chip->lpg_config;
+	int rc;
+
+	pr_debug("pwm_size_clk: %d\n",
+		chip->qpnp_lpg_registers[QPNP_LPG_PWM_SIZE_CLK]);
+	rc = regmap_write(chip->regmap,
+		SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+		QPNP_LPG_PWM_SIZE_CLK),
+		*&chip->qpnp_lpg_registers[QPNP_LPG_PWM_SIZE_CLK]);
+
+	if (rc)
+		return rc;
+
+	pr_debug("pwm_freq_prediv_clk: %d\n",
+		chip->qpnp_lpg_registers[QPNP_LPG_PWM_FREQ_PREDIV_CLK]);
+	rc = regmap_write(chip->regmap,
+		SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+		QPNP_LPG_PWM_FREQ_PREDIV_CLK),
+		*&chip->qpnp_lpg_registers[QPNP_LPG_PWM_FREQ_PREDIV_CLK]);
+	if (rc)
+		return rc;
+
+	/* Disable glitch removal when LPG/PWM is configured */
+	rc = qpnp_lpg_glitch_removal(chip, false);
+	if (rc) {
+		pr_err("Error in disabling glitch control, rc=%d\n", rc);
+		return rc;
+	}
+	return rc;
+}
+
+static int qpnp_configure_pwm_control(struct qpnp_pwm_chip *chip)
+{
+	struct qpnp_lpg_config	*lpg_config = &chip->lpg_config;
+	u8			value, mask;
+
+	if (chip->sub_type == QPNP_PWM_MODE_ONLY_SUB_TYPE)
+		return 0;
+
+	value = QPNP_ENABLE_PWM_CONTROL(chip);
+
+	mask = QPNP_EN_PWM_HIGH_MASK | QPNP_EN_PWM_LO_MASK |
+		QPNP_PWM_SRC_SELECT_MASK | QPNP_PWM_EN_RAMP_GEN_MASK;
+	if (chip->sub_type != QPNP_LPG_S_CHAN_SUB_TYPE)
+		mask |= QPNP_EN_PWM_OUTPUT_MASK;
+
+	return qpnp_lpg_save_and_write(value, mask,
+		&chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL],
+		SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+		QPNP_ENABLE_CONTROL), 1, chip);
+
+}
+
+static int qpnp_configure_lpg_control(struct qpnp_pwm_chip *chip)
+{
+	struct qpnp_lpg_config	*lpg_config = &chip->lpg_config;
+	u8			value, mask;
+
+	value = QPNP_ENABLE_LUT_CONTROL(chip);
+
+	mask = QPNP_EN_PWM_HIGH_MASK | QPNP_EN_PWM_LO_MASK |
+		QPNP_PWM_SRC_SELECT_MASK | QPNP_PWM_EN_RAMP_GEN_MASK;
+	if (chip->sub_type != QPNP_LPG_S_CHAN_SUB_TYPE)
+		mask |= QPNP_EN_PWM_OUTPUT_MASK;
+
+	return qpnp_lpg_save_and_write(value, mask,
+		&chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL],
+		SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+		QPNP_ENABLE_CONTROL), 1, chip);
+
+}
+
+static int qpnp_lpg_configure_ramp_step_duration(struct qpnp_pwm_chip *chip)
+{
+	struct qpnp_lpg_config	*lpg_config = &chip->lpg_config;
+	struct qpnp_lut_config	lut_config = lpg_config->lut_config;
+	int			rc, value;
+	u8			val, mask;
+
+	value = QPNP_GET_RAMP_STEP_DURATION(lut_config.ramp_step_ms);
+	val = value & QPNP_RAMP_STEP_DURATION_LSB_MASK;
+	mask = QPNP_RAMP_STEP_DURATION_LSB_MASK;
+
+	rc = qpnp_lpg_save_and_write(val, mask,
+		&chip->qpnp_lpg_registers[QPNP_RAMP_STEP_DURATION_LSB],
+		SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+		QPNP_RAMP_STEP_DURATION_LSB), 1, chip);
+	if (rc)
+		return rc;
+
+	val = (value >> QPNP_RAMP_STEP_DURATION_MSB_SHIFT) &
+				QPNP_RAMP_STEP_DURATION_MSB_MASK;
+
+	mask = QPNP_RAMP_STEP_DURATION_MSB_MASK;
+
+	return qpnp_lpg_save_and_write(val, mask,
+		&chip->qpnp_lpg_registers[QPNP_RAMP_STEP_DURATION_MSB],
+		SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+		QPNP_RAMP_STEP_DURATION_MSB), 1, chip);
+}
+
+static int qpnp_lpg_configure_pause(struct qpnp_pwm_chip *chip)
+{
+	struct qpnp_lpg_config	*lpg_config = &chip->lpg_config;
+	struct qpnp_lut_config	lut_config = lpg_config->lut_config;
+	u8			value, mask;
+	int			rc = 0;
+
+	if (lut_config.enable_pause_hi) {
+		value = lut_config.lut_pause_hi_cnt;
+		mask = QPNP_PAUSE_HI_MULTIPLIER_LSB_MASK;
+
+		rc = qpnp_lpg_save_and_write(value, mask,
+		&chip->qpnp_lpg_registers[QPNP_PAUSE_HI_MULTIPLIER_LSB],
+		SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+		QPNP_PAUSE_HI_MULTIPLIER_LSB), 1, chip);
+		if (rc)
+			return rc;
+
+		value = (lut_config.lut_pause_hi_cnt >>
+			QPNP_PAUSE_HI_MULTIPLIER_MSB_SHIFT) &
+					QPNP_PAUSE_HI_MULTIPLIER_MSB_MASK;
+
+		mask = QPNP_PAUSE_HI_MULTIPLIER_MSB_MASK;
+
+		rc = qpnp_lpg_save_and_write(value, mask,
+		&chip->qpnp_lpg_registers[QPNP_PAUSE_HI_MULTIPLIER_MSB],
+		SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+		QPNP_PAUSE_HI_MULTIPLIER_MSB), 1, chip);
+	} else {
+		value = 0;
+		mask = QPNP_PAUSE_HI_MULTIPLIER_LSB_MASK;
+
+		rc = qpnp_lpg_save_and_write(value, mask,
+		&chip->qpnp_lpg_registers[QPNP_PAUSE_HI_MULTIPLIER_LSB],
+		SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+		QPNP_PAUSE_HI_MULTIPLIER_LSB), 1, chip);
+		if (rc)
+			return rc;
+
+		mask = QPNP_PAUSE_HI_MULTIPLIER_MSB_MASK;
+
+		rc = qpnp_lpg_save_and_write(value, mask,
+		&chip->qpnp_lpg_registers[QPNP_PAUSE_HI_MULTIPLIER_MSB],
+		SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+		QPNP_PAUSE_HI_MULTIPLIER_MSB), 1, chip);
+		if (rc)
+			return rc;
+
+	}
+
+	if (lut_config.enable_pause_lo) {
+		value = lut_config.lut_pause_lo_cnt;
+		mask = QPNP_PAUSE_LO_MULTIPLIER_LSB_MASK;
+
+		rc = qpnp_lpg_save_and_write(value, mask,
+		&chip->qpnp_lpg_registers[QPNP_PAUSE_LO_MULTIPLIER_LSB],
+		SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+		QPNP_PAUSE_LO_MULTIPLIER_LSB), 1, chip);
+		if (rc)
+			return rc;
+
+		value = (lut_config.lut_pause_lo_cnt >>
+				QPNP_PAUSE_LO_MULTIPLIER_MSB_SHIFT) &
+					QPNP_PAUSE_LO_MULTIPLIER_MSB_MASK;
+
+		mask = QPNP_PAUSE_LO_MULTIPLIER_MSB_MASK;
+
+		rc = qpnp_lpg_save_and_write(value, mask,
+		&chip->qpnp_lpg_registers[QPNP_PAUSE_LO_MULTIPLIER_MSB],
+		SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+		QPNP_PAUSE_LO_MULTIPLIER_MSB), 1, chip);
+	} else {
+		value = 0;
+		mask = QPNP_PAUSE_LO_MULTIPLIER_LSB_MASK;
+
+		rc = qpnp_lpg_save_and_write(value, mask,
+		&chip->qpnp_lpg_registers[QPNP_PAUSE_LO_MULTIPLIER_LSB],
+		SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+		QPNP_PAUSE_LO_MULTIPLIER_LSB), 1, chip);
+		if (rc)
+			return rc;
+
+		mask = QPNP_PAUSE_LO_MULTIPLIER_MSB_MASK;
+
+		rc = qpnp_lpg_save_and_write(value, mask,
+		&chip->qpnp_lpg_registers[QPNP_PAUSE_LO_MULTIPLIER_MSB],
+		SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+		QPNP_PAUSE_LO_MULTIPLIER_MSB), 1, chip);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int qpnp_lpg_configure_index(struct qpnp_pwm_chip *chip)
+{
+	struct qpnp_lpg_config	*lpg_config = &chip->lpg_config;
+	struct qpnp_lut_config	lut_config = lpg_config->lut_config;
+	u8			value, mask;
+	int			rc = 0;
+
+	value = lut_config.hi_index;
+	mask = QPNP_HI_INDEX_MASK;
+
+	rc = qpnp_lpg_save_and_write(value, mask,
+		&chip->qpnp_lpg_registers[QPNP_HI_INDEX],
+		SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+		QPNP_HI_INDEX), 1, chip);
+	if (rc)
+		return rc;
+
+	value = lut_config.lo_index;
+	mask = QPNP_LO_INDEX_MASK;
+
+	rc = qpnp_lpg_save_and_write(value, mask,
+		&chip->qpnp_lpg_registers[QPNP_LO_INDEX],
+		SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+		QPNP_LO_INDEX), 1, chip);
+
+	return rc;
+}
+
+static int qpnp_lpg_change_lut(struct qpnp_pwm_chip *chip)
+{
+	int	rc;
+
+	rc = qpnp_lpg_configure_pattern(chip);
+	if (rc) {
+		pr_err("Failed to configure LUT pattern");
+		return rc;
+	}
+	rc = qpnp_lpg_configure_pwm(chip);
+	if (rc) {
+		pr_err("Failed to configure LUT pattern");
+		return rc;
+	}
+	rc = qpnp_configure_lpg_control(chip);
+	if (rc) {
+		pr_err("Failed to configure pause registers");
+		return rc;
+	}
+	rc = qpnp_lpg_configure_ramp_step_duration(chip);
+	if (rc) {
+		pr_err("Failed to configure duty time");
+		return rc;
+	}
+	rc = qpnp_lpg_configure_pause(chip);
+	if (rc) {
+		pr_err("Failed to configure pause registers");
+		return rc;
+	}
+	rc = qpnp_lpg_configure_index(chip);
+	if (rc) {
+		pr_err("Failed to configure index registers");
+		return rc;
+	}
+	return rc;
+}
+
+static int qpnp_dtest_config(struct qpnp_pwm_chip *chip, bool enable)
+{
+	struct qpnp_lpg_config	*lpg_config = &chip->lpg_config;
+	u8			value;
+	u8			mask;
+	u16			addr;
+	int			rc = 0;
+
+	value = 0xA5;
+
+	addr = SPMI_LPG_REG_ADDR(lpg_config->base_addr, QPNP_LPG_SEC_ACCESS);
+
+	rc = regmap_write(chip->regmap, addr, value);
+
+	if (rc) {
+		pr_err("Couldn't set the access for test mode\n");
+		return rc;
+	}
+
+	addr = SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+			QPNP_LPG_DTEST + chip->dtest_line - 1);
+
+	if (chip->sub_type == QPNP_PWM_MODE_ONLY_SUB_TYPE)
+		mask = QPNP_PWM_DTEST_OUTPUT_MASK;
+	else
+		mask = QPNP_LPG_DTEST_OUTPUT_MASK;
+
+	if (enable)
+		value = chip->dtest_output & mask;
+	else
+		value = 0;
+
+	pr_debug("Setting TEST mode for channel %d addr:%x value: %x\n",
+		chip->channel_id, addr, value);
+
+	rc = regmap_write(chip->regmap, addr, value);
+
+	return rc;
+}
+
+static int qpnp_lpg_configure_lut_state(struct qpnp_pwm_chip *chip,
+				enum qpnp_lut_state state)
+{
+	struct qpnp_lpg_config	*lpg_config = &chip->lpg_config;
+	u8			value1, value2, mask1, mask2;
+	u8			*reg1, *reg2;
+	u16			addr, addr1;
+	int			rc;
+	bool			test_enable;
+
+	value1 = chip->qpnp_lpg_registers[QPNP_RAMP_CONTROL];
+	reg1 = &chip->qpnp_lpg_registers[QPNP_RAMP_CONTROL];
+	reg2 = &chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL];
+	mask2 = QPNP_EN_PWM_HIGH_MASK | QPNP_EN_PWM_LO_MASK |
+		 QPNP_PWM_SRC_SELECT_MASK | QPNP_PWM_EN_RAMP_GEN_MASK;
+	if (chip->sub_type != QPNP_LPG_S_CHAN_SUB_TYPE)
+		mask2 |= QPNP_EN_PWM_OUTPUT_MASK;
+
+	if (chip->sub_type == QPNP_LPG_CHAN_SUB_TYPE
+		&& chip->revision == QPNP_LPG_REVISION_0) {
+		if (state == QPNP_LUT_ENABLE) {
+			QPNP_ENABLE_LUT_V0(value1);
+			value2 = QPNP_ENABLE_LPG_MODE(chip);
+		} else {
+			QPNP_DISABLE_LUT_V0(value1);
+			value2 = QPNP_DISABLE_LPG_MODE(chip);
+		}
+		mask1 = QPNP_RAMP_START_MASK;
+		addr1 = SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+					QPNP_RAMP_CONTROL);
+	} else if ((chip->sub_type == QPNP_LPG_CHAN_SUB_TYPE
+			&& chip->revision == QPNP_LPG_REVISION_1)
+			|| chip->sub_type == QPNP_LPG_S_CHAN_SUB_TYPE) {
+		if (state == QPNP_LUT_ENABLE) {
+			QPNP_ENABLE_LUT_V1(value1,
+					lpg_config->lut_config.ramp_index);
+			value2 = QPNP_ENABLE_LPG_MODE(chip);
+		} else {
+			value2 = QPNP_DISABLE_LPG_MODE(chip);
+		}
+		mask1 = value1;
+		addr1 = lpg_config->lut_base_addr +
+			SPMI_LPG_REV1_RAMP_CONTROL_OFFSET;
+	} else {
+		pr_err("Unsupported LPG subtype 0x%02x, revision 0x%02x\n",
+			chip->sub_type, chip->revision);
+		return -EINVAL;
+	}
+
+	addr = SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+				QPNP_ENABLE_CONTROL);
+
+	if (chip->in_test_mode) {
+		test_enable = (state == QPNP_LUT_ENABLE) ? 1 : 0;
+		rc = qpnp_dtest_config(chip, test_enable);
+		if (rc)
+			pr_err("Failed to configure TEST mode\n");
+	}
+
+	rc = qpnp_lpg_save_and_write(value2, mask2, reg2,
+					addr, 1, chip);
+	if (rc)
+		return rc;
+
+	if (state == QPNP_LUT_ENABLE
+		|| (chip->sub_type == QPNP_LPG_CHAN_SUB_TYPE
+		&& chip->revision == QPNP_LPG_REVISION_0))
+		rc = qpnp_lpg_save_and_write(value1, mask1, reg1,
+					addr1, 1, chip);
+	return rc;
+}
+
+static inline int qpnp_enable_pwm_mode(struct qpnp_pwm_chip *chip)
+{
+	if (chip->pwm_config.supported_sizes == QPNP_PWM_SIZE_7_8_BIT)
+		return QPNP_ENABLE_PWM_MODE_GPLED_CHANNEL(chip);
+	return QPNP_ENABLE_PWM_MODE(chip);
+}
+
+static int qpnp_lpg_configure_pwm_state(struct qpnp_pwm_chip *chip,
+					enum qpnp_pwm_state state)
+{
+	struct qpnp_lpg_config	*lpg_config = &chip->lpg_config;
+	u8			value, mask;
+	int			rc;
+	bool			test_enable;
+
+	if (chip->sub_type == QPNP_PWM_MODE_ONLY_SUB_TYPE) {
+		if (state == QPNP_PWM_ENABLE)
+			value = QPNP_ENABLE_PWM_MODE_ONLY_SUB_TYPE;
+		else
+			value = QPNP_DISABLE_PWM_MODE_ONLY_SUB_TYPE;
+
+		mask = QPNP_PWM_MODE_ONLY_ENABLE_DISABLE_MASK_SUB_TYPE;
+	} else {
+		if (state == QPNP_PWM_ENABLE)
+			value = qpnp_enable_pwm_mode(chip);
+		else
+			value = QPNP_DISABLE_PWM_MODE(chip);
+
+		mask = QPNP_EN_PWM_HIGH_MASK | QPNP_EN_PWM_LO_MASK |
+			QPNP_PWM_SRC_SELECT_MASK | QPNP_PWM_EN_RAMP_GEN_MASK;
+		if (chip->sub_type != QPNP_LPG_S_CHAN_SUB_TYPE)
+			mask |= QPNP_EN_PWM_OUTPUT_MASK;
+	}
+
+	if (chip->in_test_mode) {
+		test_enable = (state == QPNP_PWM_ENABLE) ? 1 : 0;
+		rc = qpnp_dtest_config(chip, test_enable);
+		if (rc)
+			pr_err("Failed to configure TEST mode\n");
+	}
+
+	pr_debug("pwm_enable_control: %d\n", value);
+	rc = qpnp_lpg_save_and_write(value, mask,
+		&chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL],
+		SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+		QPNP_ENABLE_CONTROL), 1, chip);
+	if (rc)
+		goto out;
+
+	/*
+	 * Due to LPG hardware bug, in the PWM mode, having enabled PWM,
+	 * We have to write PWM values one more time.
+	 */
+	if (state == QPNP_PWM_ENABLE)
+		return qpnp_lpg_save_pwm_value(chip);
+
+out:
+	return rc;
+}
+
+static int _pwm_config(struct qpnp_pwm_chip *chip,
+				enum time_level tm_lvl,
+				int duty_value, int period_value)
+{
+	int rc;
+	struct _qpnp_pwm_config *pwm_config = &chip->pwm_config;
+	struct pwm_period_config *period = &pwm_config->period;
+
+	pwm_config->pwm_duty = (tm_lvl == LVL_USEC) ? duty_value :
+			duty_value / NSEC_PER_USEC;
+	qpnp_lpg_calc_pwm_value(pwm_config, period_value, duty_value);
+	rc = qpnp_lpg_save_pwm_value(chip);
+	if (rc)
+		goto out;
+
+	if (pwm_config->update_period) {
+		rc = qpnp_lpg_configure_pwm(chip);
+		if (rc)
+			goto out;
+		rc = qpnp_configure_pwm_control(chip);
+		if (rc)
+			goto out;
+		if (!rc && chip->enabled) {
+			rc = qpnp_lpg_configure_pwm_state(chip,
+					QPNP_PWM_ENABLE);
+			if (rc) {
+				pr_err("Error in configuring pwm state, rc=%d\n",
+						rc);
+				return rc;
+			}
+
+			/* Enable the glitch removal after PWM is enabled */
+			rc = qpnp_lpg_glitch_removal(chip, true);
+			if (rc) {
+				pr_err("Error in enabling glitch control, rc=%d\n",
+						rc);
+				return rc;
+			}
+		}
+	}
+	pr_debug("duty/period=%u/%u %s: pwm_value=%d (of %d)\n",
+		 (unsigned int)duty_value, (unsigned int)period_value,
+		 (tm_lvl == LVL_USEC) ? "usec" : "nsec",
+		 pwm_config->pwm_value, 1 << period->pwm_size);
+
+out:
+	return rc;
+}
+
+static int _pwm_lut_config(struct qpnp_pwm_chip *chip, int period_us,
+		int duty_pct[], struct lut_params lut_params)
+{
+	struct qpnp_lpg_config		*lpg_config;
+	struct qpnp_lut_config		*lut_config;
+	struct pwm_period_config	*period;
+	struct _qpnp_pwm_config		*pwm_config;
+	int				start_idx = lut_params.start_idx;
+	int				len = lut_params.idx_len;
+	int				flags = lut_params.flags;
+	int				raw_lut, ramp_step_ms;
+	int				rc = 0;
+
+	pwm_config = &chip->pwm_config;
+	lpg_config = &chip->lpg_config;
+	lut_config = &lpg_config->lut_config;
+	period = &pwm_config->period;
+
+	if (flags & PM_PWM_LUT_NO_TABLE)
+		goto after_table_write;
+
+	raw_lut = 0;
+	if (flags & PM_PWM_LUT_USE_RAW_VALUE)
+		raw_lut = 1;
+
+	lut_config->list_len = len;
+	lut_config->lo_index = start_idx + 1;
+	lut_config->hi_index = start_idx + len;
+
+	rc = qpnp_lpg_change_table(chip, duty_pct, raw_lut);
+	if (rc) {
+		pr_err("qpnp_lpg_change_table: rc=%d\n", rc);
+		return -EINVAL;
+	}
+
+after_table_write:
+	ramp_step_ms = lut_params.ramp_step_ms;
+
+	if (ramp_step_ms > PM_PWM_LUT_RAMP_STEP_TIME_MAX)
+		ramp_step_ms = PM_PWM_LUT_RAMP_STEP_TIME_MAX;
+
+	QPNP_SET_PAUSE_CNT(lut_config->lut_pause_lo_cnt,
+			lut_params.lut_pause_lo, ramp_step_ms);
+	if (lut_config->lut_pause_lo_cnt > PM_PWM_MAX_PAUSE_CNT)
+		lut_config->lut_pause_lo_cnt = PM_PWM_MAX_PAUSE_CNT;
+
+	QPNP_SET_PAUSE_CNT(lut_config->lut_pause_hi_cnt,
+			lut_params.lut_pause_hi, ramp_step_ms);
+	if (lut_config->lut_pause_hi_cnt > PM_PWM_MAX_PAUSE_CNT)
+		lut_config->lut_pause_hi_cnt = PM_PWM_MAX_PAUSE_CNT;
+
+	lut_config->ramp_step_ms = ramp_step_ms;
+
+	lut_config->ramp_direction  = !!(flags & PM_PWM_LUT_RAMP_UP);
+	lut_config->pattern_repeat  = !!(flags & PM_PWM_LUT_LOOP);
+	lut_config->ramp_toggle	    = !!(flags & PM_PWM_LUT_REVERSE);
+	lut_config->enable_pause_hi = !!(flags & PM_PWM_LUT_PAUSE_HI_EN);
+	lut_config->enable_pause_lo = !!(flags & PM_PWM_LUT_PAUSE_LO_EN);
+
+	rc = qpnp_lpg_change_lut(chip);
+
+	if (!rc && chip->enabled)
+		rc = qpnp_lpg_configure_lut_state(chip, QPNP_LUT_ENABLE);
+
+	return rc;
+}
+
+/* lpg_lock should be held while calling _pwm_enable() */
+static int _pwm_enable(struct qpnp_pwm_chip *chip)
+{
+	int rc = 0;
+
+	if (QPNP_IS_PWM_CONFIG_SELECTED(
+		chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL]) ||
+			chip->flags & QPNP_PWM_LUT_NOT_SUPPORTED) {
+		rc = qpnp_lpg_configure_pwm_state(chip, QPNP_PWM_ENABLE);
+		if (rc) {
+			pr_err("Failed to enable PWM mode, rc=%d\n", rc);
+			return rc;
+		}
+		rc = qpnp_lpg_glitch_removal(chip, true);
+		if (rc) {
+			pr_err("Failed to enable glitch removal, rc=%d\n", rc);
+			return rc;
+		}
+	} else if (!(chip->flags & QPNP_PWM_LUT_NOT_SUPPORTED)) {
+		rc = qpnp_lpg_configure_lut_state(chip, QPNP_LUT_ENABLE);
+	}
+
+	if (!rc)
+		chip->enabled = true;
+
+	return rc;
+}
+
+/* lpg_lock should be held while calling _pwm_change_mode() */
+static int _pwm_change_mode(struct qpnp_pwm_chip *chip, enum pm_pwm_mode mode)
+{
+	int rc;
+
+	if (mode == PM_PWM_MODE_LPG)
+		rc = qpnp_configure_lpg_control(chip);
+	else
+		rc = qpnp_configure_pwm_control(chip);
+
+	if (rc)
+		pr_err("Failed to change the mode\n");
+	return rc;
+}
+
+/* APIs */
+/**
+ * qpnp_pwm_free - free a PWM device
+ * @pwm_chip: the PWM chip
+ * @pwm: the PWM device
+ */
+static void qpnp_pwm_free(struct pwm_chip *pwm_chip,
+		struct pwm_device *pwm)
+{
+	struct qpnp_pwm_chip	*chip = qpnp_pwm_from_pwm_chip(pwm_chip);
+	unsigned long		flags;
+
+	spin_lock_irqsave(&chip->lpg_lock, flags);
+
+	qpnp_lpg_configure_pwm_state(chip, QPNP_PWM_DISABLE);
+	if (!(chip->flags & QPNP_PWM_LUT_NOT_SUPPORTED))
+		qpnp_lpg_configure_lut_state(chip, QPNP_LUT_DISABLE);
+
+	chip->enabled = false;
+	spin_unlock_irqrestore(&chip->lpg_lock, flags);
+}
+
+/**
+ * qpnp_pwm_config - change a PWM device configuration
+ * @pwm: the PWM device
+ * @period_ns: period in nanoseconds
+ * @duty_ns: duty cycle in nanoseconds
+ */
+static int qpnp_pwm_config(struct pwm_chip *pwm_chip,
+	struct pwm_device *pwm, int duty_ns, int period_ns)
+{
+	int rc;
+	unsigned long flags;
+	struct qpnp_pwm_chip *chip = qpnp_pwm_from_pwm_chip(pwm_chip);
+	int prev_period_us = chip->pwm_config.pwm_period;
+
+	if ((unsigned int)period_ns < PM_PWM_PERIOD_MIN * NSEC_PER_USEC) {
+		pr_err("Invalid pwm handle or parameters\n");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&chip->lpg_lock, flags);
+
+	chip->pwm_config.update_period = false;
+	if (prev_period_us > INT_MAX / NSEC_PER_USEC ||
+			prev_period_us * NSEC_PER_USEC != period_ns) {
+		qpnp_lpg_calc_period(LVL_NSEC, period_ns, chip);
+		qpnp_lpg_save_period(chip);
+		pwm->period = period_ns;
+		chip->pwm_config.pwm_period = period_ns / NSEC_PER_USEC;
+		chip->pwm_config.update_period = true;
+	}
+
+	rc = _pwm_config(chip, LVL_NSEC, duty_ns, period_ns);
+
+	spin_unlock_irqrestore(&chip->lpg_lock, flags);
+
+	if (rc)
+		pr_err("Failed to configure PWM mode\n");
+
+	return rc;
+}
+
+/**
+ * qpnp_pwm_enable - start a PWM output toggling
+ * @pwm_chip: the PWM chip
+ * @pwm: the PWM device
+ */
+static int qpnp_pwm_enable(struct pwm_chip *pwm_chip,
+		struct pwm_device *pwm)
+{
+	int rc;
+	struct qpnp_pwm_chip *chip = qpnp_pwm_from_pwm_chip(pwm_chip);
+	unsigned long flags;
+
+	spin_lock_irqsave(&chip->lpg_lock, flags);
+	rc = _pwm_enable(chip);
+	if (rc)
+		pr_err("Failed to enable PWM channel: %d\n", chip->channel_id);
+
+	spin_unlock_irqrestore(&chip->lpg_lock, flags);
+
+	return rc;
+}
+
+/**
+ * qpnp_pwm_disable - stop a PWM output toggling
+ * @pwm_chip: the PWM chip
+ * @pwm: the PWM device
+ */
+static void qpnp_pwm_disable(struct pwm_chip *pwm_chip,
+		struct pwm_device *pwm)
+{
+
+	struct qpnp_pwm_chip	*chip = qpnp_pwm_from_pwm_chip(pwm_chip);
+	unsigned long		flags;
+	int rc = 0;
+
+	spin_lock_irqsave(&chip->lpg_lock, flags);
+
+	if (QPNP_IS_PWM_CONFIG_SELECTED(
+		chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL]) ||
+			chip->flags & QPNP_PWM_LUT_NOT_SUPPORTED)
+		rc = qpnp_lpg_configure_pwm_state(chip,
+					QPNP_PWM_DISABLE);
+	else if (!(chip->flags & QPNP_PWM_LUT_NOT_SUPPORTED))
+		rc = qpnp_lpg_configure_lut_state(chip,
+					QPNP_LUT_DISABLE);
+
+	if (!rc)
+		chip->enabled = false;
+
+	spin_unlock_irqrestore(&chip->lpg_lock, flags);
+
+	if (rc)
+		pr_err("Failed to disable PWM channel: %d\n",
+					chip->channel_id);
+}
+
+/**
+ * pwm_change_mode - Change the PWM mode configuration
+ * @pwm: the PWM device
+ * @mode: Mode selection value
+ */
+int pwm_change_mode(struct pwm_device *pwm, enum pm_pwm_mode mode)
+{
+	int rc = 0;
+	unsigned long flags;
+	struct qpnp_pwm_chip *chip;
+
+	if (pwm == NULL || IS_ERR(pwm) || pwm->chip == NULL) {
+		pr_err("Invalid pwm handle or no pwm_chip\n");
+		return -EINVAL;
+	}
+
+	if (mode < PM_PWM_MODE_PWM || mode > PM_PWM_MODE_LPG) {
+		pr_err("Invalid mode value\n");
+		return -EINVAL;
+	}
+
+	chip = qpnp_pwm_from_pwm_dev(pwm);
+
+	spin_lock_irqsave(&chip->lpg_lock, flags);
+	if (chip->pwm_mode != mode) {
+		rc = _pwm_change_mode(chip, mode);
+		if (rc) {
+			pr_err("Failed to change mode: %d, rc=%d\n", mode, rc);
+			goto unlock;
+		}
+		chip->pwm_mode = mode;
+		if (chip->enabled) {
+			rc = _pwm_enable(chip);
+			if (rc) {
+				pr_err("Failed to enable PWM, rc=%d\n", rc);
+				goto unlock;
+			}
+		}
+	}
+unlock:
+	spin_unlock_irqrestore(&chip->lpg_lock, flags);
+
+	return rc;
+}
+EXPORT_SYMBOL(pwm_change_mode);
+
+/**
+ * pwm_config_period - change PWM period
+ *
+ * @pwm: the PWM device
+ * @pwm_p: period in struct qpnp_lpg_period
+ */
+int pwm_config_period(struct pwm_device *pwm,
+			     struct pwm_period_config *period)
+{
+	struct _qpnp_pwm_config	*pwm_config;
+	struct qpnp_lpg_config	*lpg_config;
+	struct qpnp_pwm_chip	*chip;
+	unsigned long		flags;
+	int			rc = 0;
+
+	if (pwm == NULL || IS_ERR(pwm) || period == NULL)
+		return -EINVAL;
+	if (pwm->chip == NULL)
+		return -ENODEV;
+
+	chip = qpnp_pwm_from_pwm_dev(pwm);
+	pwm_config = &chip->pwm_config;
+	lpg_config = &chip->lpg_config;
+
+	spin_lock_irqsave(&chip->lpg_lock, flags);
+
+	pwm_config->period.pwm_size = period->pwm_size;
+	pwm_config->period.clk = period->clk;
+	pwm_config->period.pre_div = period->pre_div;
+	pwm_config->period.pre_div_exp = period->pre_div_exp;
+
+	qpnp_lpg_save_period(chip);
+
+	rc = regmap_write(chip->regmap,
+			SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+			QPNP_LPG_PWM_SIZE_CLK),
+			*&chip->qpnp_lpg_registers[QPNP_LPG_PWM_SIZE_CLK]);
+
+	if (rc) {
+		pr_err("Write failed: QPNP_LPG_PWM_SIZE_CLK register, rc: %d\n",
+									rc);
+		goto out_unlock;
+	}
+
+	rc = regmap_write(chip->regmap,
+		SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+		QPNP_LPG_PWM_FREQ_PREDIV_CLK),
+		*&chip->qpnp_lpg_registers[QPNP_LPG_PWM_FREQ_PREDIV_CLK]);
+	if (rc) {
+		pr_err("Failed to write to QPNP_LPG_PWM_FREQ_PREDIV_CLK\n");
+		pr_err("register, rc = %d\n", rc);
+	}
+
+out_unlock:
+	spin_unlock_irqrestore(&chip->lpg_lock, flags);
+	return rc;
+}
+EXPORT_SYMBOL(pwm_config_period);
+
+/**
+ * pwm_config_pwm_value - change a PWM device configuration
+ * @pwm: the PWM device
+ * @pwm_value: the duty cycle in raw PWM value (< 2^pwm_size)
+ */
+int pwm_config_pwm_value(struct pwm_device *pwm, int pwm_value)
+{
+	struct qpnp_lpg_config	*lpg_config;
+	struct _qpnp_pwm_config	*pwm_config;
+	struct qpnp_pwm_chip	*chip;
+	unsigned long		flags;
+	int			rc = 0;
+
+	if (pwm == NULL || IS_ERR(pwm)) {
+		pr_err("Invalid parameter passed\n");
+		return -EINVAL;
+	}
+
+	if (pwm->chip == NULL) {
+		pr_err("Invalid device handle\n");
+		return -ENODEV;
+	}
+
+	chip = qpnp_pwm_from_pwm_dev(pwm);
+	lpg_config = &chip->lpg_config;
+	pwm_config = &chip->pwm_config;
+
+	spin_lock_irqsave(&chip->lpg_lock, flags);
+
+	if (pwm_config->pwm_value == pwm_value)
+		goto out_unlock;
+
+	pwm_config->pwm_value = pwm_value;
+
+	rc = qpnp_lpg_save_pwm_value(chip);
+
+	if (rc)
+		pr_err("Could not update PWM value for channel %d rc=%d\n",
+						chip->channel_id, rc);
+
+out_unlock:
+	spin_unlock_irqrestore(&chip->lpg_lock, flags);
+	return rc;
+}
+EXPORT_SYMBOL(pwm_config_pwm_value);
+
+/**
+ * pwm_config_us - change a PWM device configuration
+ * @pwm: the PWM device
+ * @period_us: period in microseconds
+ * @duty_us: duty cycle in microseconds
+ */
+int pwm_config_us(struct pwm_device *pwm, int duty_us, int period_us)
+{
+	int rc;
+	unsigned long flags;
+	struct qpnp_pwm_chip *chip;
+
+	if (pwm == NULL || IS_ERR(pwm) ||
+		duty_us > period_us ||
+		(unsigned int)period_us > PM_PWM_PERIOD_MAX ||
+		(unsigned int)period_us < PM_PWM_PERIOD_MIN) {
+		pr_err("Invalid pwm handle or parameters\n");
+		return -EINVAL;
+	}
+
+	chip = qpnp_pwm_from_pwm_dev(pwm);
+
+	spin_lock_irqsave(&chip->lpg_lock, flags);
+
+	chip->pwm_config.update_period = false;
+	if (chip->pwm_config.pwm_period != period_us) {
+		qpnp_lpg_calc_period(LVL_USEC, period_us, chip);
+		qpnp_lpg_save_period(chip);
+		chip->pwm_config.pwm_period = period_us;
+		if ((unsigned int)period_us >
+		    (unsigned int)(-1) / NSEC_PER_USEC)
+			pwm->period = 0;
+		else
+			pwm->period = (unsigned int)period_us * NSEC_PER_USEC;
+		chip->pwm_config.update_period = true;
+	}
+
+	rc = _pwm_config(chip, LVL_USEC, duty_us, period_us);
+
+	spin_unlock_irqrestore(&chip->lpg_lock, flags);
+
+	if (rc)
+		pr_err("Failed to configure PWM mode\n");
+
+	return rc;
+}
+EXPORT_SYMBOL(pwm_config_us);
+
+/**
+ * pwm_lut_config - change LPG LUT device configuration
+ * @pwm: the PWM device
+ * @period_us: period in micro second
+ * @duty_pct: array of duty cycles in percent, like 20, 50.
+ * @lut_params: Lookup table parameters
+ */
+int pwm_lut_config(struct pwm_device *pwm, int period_us,
+		int duty_pct[], struct lut_params lut_params)
+{
+	unsigned long flags;
+	struct qpnp_pwm_chip *chip;
+	int rc = 0;
+
+	if (pwm == NULL || IS_ERR(pwm) || !lut_params.idx_len) {
+		pr_err("Invalid pwm handle or idx_len=0\n");
+		return -EINVAL;
+	}
+
+	if (pwm->chip == NULL)
+		return -ENODEV;
+
+	if (duty_pct == NULL && !(lut_params.flags & PM_PWM_LUT_NO_TABLE)) {
+		pr_err("Invalid duty_pct with flag\n");
+		return -EINVAL;
+	}
+
+	chip = qpnp_pwm_from_pwm_dev(pwm);
+
+	if (chip->flags & QPNP_PWM_LUT_NOT_SUPPORTED) {
+		pr_err("LUT mode isn't supported\n");
+		return -EINVAL;
+	}
+
+	if ((lut_params.start_idx + lut_params.idx_len) >
+				chip->lpg_config.lut_size) {
+		pr_err("Exceed LUT limit\n");
+		return -EINVAL;
+	}
+
+	if ((unsigned int)period_us > PM_PWM_PERIOD_MAX ||
+	    (unsigned int)period_us < PM_PWM_PERIOD_MIN) {
+		pr_err("Period out of range\n");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&chip->lpg_lock, flags);
+
+	if (chip->pwm_config.pwm_period != period_us) {
+		qpnp_lpg_calc_period(LVL_USEC, period_us, chip);
+		qpnp_lpg_save_period(chip);
+		chip->pwm_config.pwm_period = period_us;
+	}
+
+	rc = _pwm_lut_config(chip, period_us, duty_pct, lut_params);
+
+	spin_unlock_irqrestore(&chip->lpg_lock, flags);
+
+	if (rc)
+		pr_err("Failed to configure LUT\n");
+
+	return rc;
+}
+EXPORT_SYMBOL(pwm_lut_config);
+
+static int qpnp_parse_pwm_dt_config(struct device_node *of_pwm_node,
+		struct device_node *of_parent, struct qpnp_pwm_chip *chip)
+{
+	int rc, period;
+
+	rc = of_property_read_u32(of_parent, "qcom,period", (u32 *)&period);
+	if (rc) {
+		pr_err("node is missing PWM Period prop");
+		return rc;
+	}
+
+	rc = of_property_read_u32(of_pwm_node, "qcom,duty",
+				&chip->pwm_config.pwm_duty);
+	if (rc) {
+		pr_err("node is missing PWM Duty prop");
+		return rc;
+	}
+
+	if (period < chip->pwm_config.pwm_duty || period > PM_PWM_PERIOD_MAX ||
+		period < PM_PWM_PERIOD_MIN) {
+		pr_err("Invalid pwm period(%d) or duty(%d)\n", period,
+			chip->pwm_config.pwm_duty);
+		return -EINVAL;
+	}
+
+	qpnp_lpg_calc_period(LVL_USEC, period, chip);
+	qpnp_lpg_save_period(chip);
+	chip->pwm_config.pwm_period = period;
+	chip->pwm_config.update_period = true;
+
+	rc = _pwm_config(chip, LVL_USEC, chip->pwm_config.pwm_duty, period);
+
+	return rc;
+}
+
+static int qpnp_parse_lpg_dt_config(struct device_node *of_lpg_node,
+		struct device_node *of_parent, struct qpnp_pwm_chip *chip)
+{
+	int rc, period, list_size, start_idx, *duty_pct_list;
+	struct qpnp_lpg_config	*lpg_config = &chip->lpg_config;
+	struct qpnp_lut_config	*lut_config = &lpg_config->lut_config;
+	struct lut_params	lut_params;
+
+	rc = of_property_read_u32(of_parent, "qcom,period", &period);
+	if (rc) {
+		pr_err("node is missing PWM Period prop\n");
+		return rc;
+	}
+
+	if (!of_get_property(of_lpg_node, "qcom,duty-percents", &list_size)) {
+		pr_err("node is missing duty-pct list\n");
+		return rc;
+	}
+
+	rc = of_property_read_u32(of_lpg_node, "cell-index", &start_idx);
+	if (rc) {
+		pr_err("Missing start index\n");
+		return rc;
+	}
+
+	list_size /= sizeof(u32);
+
+	if (list_size + start_idx > lpg_config->lut_size) {
+		pr_err("duty pct list size overflows\n");
+		return -EINVAL;
+	}
+
+	duty_pct_list = kcalloc(list_size, sizeof(*duty_pct_list), GFP_KERNEL);
+	if (!duty_pct_list)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(of_lpg_node, "qcom,duty-percents",
+						duty_pct_list, list_size);
+	if (rc) {
+		pr_err("invalid or missing property: qcom,duty-pcts-list\n");
+		goto out;
+	}
+
+	/* Read optional properties */
+	rc = of_property_read_u32(of_lpg_node, "qcom,ramp-step-duration",
+				  &lut_config->ramp_step_ms);
+	if (rc && rc != -EINVAL)
+		goto out;
+
+	rc = of_property_read_u32(of_lpg_node, "qcom,lpg-lut-pause-hi",
+				  &lut_config->lut_pause_hi_cnt);
+	if (rc && rc != -EINVAL)
+		goto out;
+
+	rc = of_property_read_u32(of_lpg_node, "qcom,lpg-lut-pause-lo",
+				  &lut_config->lut_pause_lo_cnt);
+	if (rc && rc != -EINVAL)
+		goto out;
+
+	rc = of_property_read_u32(of_lpg_node, "qcom,lpg-lut-ramp-direction",
+				  (u32 *)&lut_config->ramp_direction);
+	if (rc && rc != -EINVAL)
+		goto out;
+
+	rc = of_property_read_u32(of_lpg_node, "qcom,lpg-lut-pattern-repeat",
+				  (u32 *)&lut_config->pattern_repeat);
+	if (rc && rc != -EINVAL)
+		goto out;
+
+	rc = of_property_read_u32(of_lpg_node, "qcom,lpg-lut-ramp-toggle",
+				  (u32 *)&lut_config->ramp_toggle);
+	if (rc && rc != -EINVAL)
+		goto out;
+
+	rc = of_property_read_u32(of_lpg_node, "qcom,lpg-lut-enable-pause-hi",
+				  (u32 *)&lut_config->enable_pause_hi);
+	if (rc && rc != -EINVAL)
+		goto out;
+
+	rc = of_property_read_u32(of_lpg_node, "qcom,lpg-lut-enable-pause-lo",
+				  (u32 *)&lut_config->enable_pause_lo);
+	if (rc && rc != -EINVAL)
+		goto out;
+	rc = 0;
+
+	qpnp_set_lut_params(&lut_params, lut_config, start_idx, list_size);
+
+	_pwm_lut_config(chip, period, duty_pct_list, lut_params);
+
+out:
+	kfree(duty_pct_list);
+	return rc;
+}
+
+static int qpnp_lpg_get_rev_subtype(struct qpnp_pwm_chip *chip)
+{
+	int rc;
+	uint val;
+
+	rc = regmap_read(chip->regmap,
+			 chip->lpg_config.base_addr + SPMI_LPG_SUB_TYPE_OFFSET,
+			 &val);
+
+	if (rc) {
+		pr_err("Couldn't read subtype rc: %d\n", rc);
+		goto out;
+	}
+	 chip->sub_type = (u8)val;
+
+	rc = regmap_read(chip->regmap,
+			 chip->lpg_config.base_addr + SPMI_LPG_REVISION2_OFFSET,
+			 &val);
+
+	if (rc) {
+		pr_err("Couldn't read revision2 rc: %d\n", rc);
+		goto out;
+	}
+	chip->revision = (u8)val;
+
+	if (chip->revision < QPNP_LPG_REVISION_0 ||
+		chip->revision > QPNP_LPG_REVISION_1) {
+		pr_err("Unknown LPG revision detected, rev:%d\n",
+						chip->revision);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (chip->sub_type != QPNP_PWM_MODE_ONLY_SUB_TYPE
+		&& chip->sub_type != QPNP_LPG_CHAN_SUB_TYPE
+		&& chip->sub_type != QPNP_LPG_S_CHAN_SUB_TYPE) {
+		pr_err("Unknown LPG/PWM subtype detected, subtype:%d\n",
+						chip->sub_type);
+		rc = -EINVAL;
+	}
+out:
+	pr_debug("LPG rev 0x%02x subtype 0x%02x rc: %d\n", chip->revision,
+		chip->sub_type, rc);
+	return rc;
+}
+
+/* Fill in lpg device elements based on values found in device tree. */
+static int qpnp_parse_dt_config(struct platform_device *pdev,
+					struct qpnp_pwm_chip *chip)
+{
+	int			rc, mode, lut_entry_size, list_size, i;
+	const char		*label;
+	const __be32		*prop;
+	u32			size;
+	struct device_node	*node;
+	int found_pwm_subnode = 0;
+	int found_lpg_subnode = 0;
+	struct device_node	*of_node = pdev->dev.of_node;
+	struct qpnp_lpg_config	*lpg_config = &chip->lpg_config;
+	struct qpnp_lut_config	*lut_config = &lpg_config->lut_config;
+	struct _qpnp_pwm_config	*pwm_config = &chip->pwm_config;
+	int			force_pwm_size = 0;
+	int			pwm_size_list[QPNP_PWM_SIZES_SUPPORTED];
+
+	rc = of_property_read_u32(of_node, "qcom,channel-id",
+				&chip->channel_id);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: node is missing LPG channel id\n",
+								__func__);
+		return -EINVAL;
+	}
+
+	if (!of_get_property(of_node, "qcom,supported-sizes", &list_size)) {
+		pr_err("Missing qcom,supported-size list\n");
+		return -EINVAL;
+	}
+
+	list_size /= sizeof(u32);
+	if (list_size > QPNP_PWM_SIZES_SUPPORTED) {
+		pr_err(" qcom,supported-size list is too big\n");
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,supported-sizes",
+			pwm_size_list, list_size);
+
+	if (rc) {
+		pr_err("Invalid qcom,supported-size property\n");
+		return rc;
+	}
+
+	for (i = 0; i < list_size; i++) {
+		pwm_config->supported_sizes |=
+			(1 << (pwm_size_list[i] - QPNP_MIN_PWM_BIT_SIZE));
+	}
+
+	if (!(pwm_config->supported_sizes == QPNP_PWM_SIZE_6_9_BIT ||
+		pwm_config->supported_sizes == QPNP_PWM_SIZE_7_8_BIT ||
+		pwm_config->supported_sizes == QPNP_PWM_SIZE_6_7_9_BIT)) {
+		pr_err("PWM sizes list qcom,supported-size is not proper\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * For cetrain LPG channels PWM size can be forced. So that
+	 * for every requested pwm period closest pwm frequency is
+	 * selected in qpnp_lpg_calc_period() for the forced pwm size.
+	 */
+	rc = of_property_read_u32(of_node, "qcom,force-pwm-size",
+				&force_pwm_size);
+	if (pwm_config->supported_sizes == QPNP_PWM_SIZE_7_8_BIT) {
+		if (!(force_pwm_size == QPNP_PWM_SIZE_7_BIT ||
+				force_pwm_size == QPNP_PWM_SIZE_8_BIT))
+			force_pwm_size = 0;
+	} else if (chip->sub_type == QPNP_PWM_MODE_ONLY_SUB_TYPE) {
+		if (!(force_pwm_size == QPNP_PWM_SIZE_6_BIT ||
+				force_pwm_size == QPNP_PWM_SIZE_9_BIT))
+			force_pwm_size = 0;
+	} else if (pwm_config->supported_sizes == QPNP_PWM_SIZE_6_7_9_BIT) {
+		if (!(force_pwm_size == QPNP_PWM_SIZE_6_BIT ||
+				force_pwm_size == QPNP_PWM_SIZE_7_BIT ||
+				force_pwm_size == QPNP_PWM_SIZE_9_BIT))
+			force_pwm_size = 0;
+	}
+
+	pwm_config->force_pwm_size = force_pwm_size;
+
+
+	prop = of_get_address_by_name(pdev->dev.of_node, QPNP_LPG_CHANNEL_BASE,
+			0, 0);
+	if (!prop) {
+		dev_err(&pdev->dev, "Couldnt find channel's base addr rc %d\n",
+				rc);
+		return rc;
+	}
+	lpg_config->base_addr = be32_to_cpu(*prop);
+
+	rc = qpnp_lpg_get_rev_subtype(chip);
+	if (rc)
+		return rc;
+
+	prop = of_get_address_by_name(pdev->dev.of_node, QPNP_LPG_LUT_BASE,
+			0, 0);
+	if (!prop) {
+		chip->flags |= QPNP_PWM_LUT_NOT_SUPPORTED;
+	} else {
+		lpg_config->lut_base_addr = be32_to_cpu(*prop);
+		rc = of_property_read_u32(of_node, "qcom,lpg-lut-size", &size);
+		if (rc < 0) {
+			dev_err(&pdev->dev, "Error reading qcom,lpg-lut-size, rc=%d\n",
+					rc);
+			return rc;
+		}
+
+		/*
+		 * Each entry of LUT is of 2 bytes for generic LUT and of 1 byte
+		 * for KPDBL/GLED LUT.
+		 */
+		lpg_config->lut_size = size >> 1;
+		lut_entry_size = sizeof(u16);
+
+		if (pwm_config->supported_sizes == QPNP_PWM_SIZE_7_8_BIT) {
+			lpg_config->lut_size = size;
+			lut_entry_size = sizeof(u8);
+		}
+
+		lut_config->duty_pct_list = kcalloc(lpg_config->lut_size,
+					lut_entry_size, GFP_KERNEL);
+		if (!lut_config->duty_pct_list)
+			return -ENOMEM;
+
+		rc = of_property_read_u32(of_node, "qcom,ramp-index",
+						&lut_config->ramp_index);
+		if (rc) {
+			pr_err("Missing LPG qcom,ramp-index property\n");
+			kfree(lut_config->duty_pct_list);
+			return rc;
+		}
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,dtest-line",
+		&chip->dtest_line);
+	if (rc) {
+		chip->in_test_mode = 0;
+	} else {
+		rc = of_property_read_u32(of_node, "qcom,dtest-output",
+			&chip->dtest_output);
+		if (rc) {
+			pr_err("Missing DTEST output configuration\n");
+			return rc;
+		}
+		chip->in_test_mode = 1;
+	}
+
+	if (chip->in_test_mode) {
+		if ((chip->sub_type == QPNP_PWM_MODE_ONLY_SUB_TYPE) &&
+			(chip->dtest_line > QPNP_PWM_DTEST_LINE_MAX ||
+			chip->dtest_output > QPNP_PWM_DTEST_OUTPUT_MAX)) {
+			pr_err("DTEST line/output values are improper for PWM channel %d\n",
+				chip->channel_id);
+			return -EINVAL;
+		} else if (chip->dtest_line > QPNP_LPG_DTEST_LINE_MAX ||
+			chip->dtest_output > QPNP_LPG_DTEST_OUTPUT_MAX) {
+			pr_err("DTEST line/output values are improper for LPG channel %d\n",
+				chip->channel_id);
+			return -EINVAL;
+		}
+	}
+
+	for_each_child_of_node(of_node, node) {
+		rc = of_property_read_string(node, "label", &label);
+		if (rc) {
+			dev_err(&pdev->dev, "%s: Missing label property\n",
+								__func__);
+			goto out;
+		}
+		if (!strcmp(label, "pwm")) {
+			rc = qpnp_parse_pwm_dt_config(node, of_node, chip);
+			if (rc)
+				goto out;
+			found_pwm_subnode = 1;
+		} else if (!strcmp(label, "lpg") &&
+				!(chip->flags & QPNP_PWM_LUT_NOT_SUPPORTED)) {
+			rc = qpnp_parse_lpg_dt_config(node, of_node, chip);
+			if (rc)
+				goto out;
+			found_lpg_subnode = 1;
+		} else {
+			dev_err(&pdev->dev,
+				"%s: Invalid value for lable prop",
+								__func__);
+		}
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,mode-select", &mode);
+	if (rc)
+		goto read_opt_props;
+
+	if (mode > PM_PWM_MODE_LPG ||
+		(mode == PM_PWM_MODE_PWM && found_pwm_subnode == 0) ||
+		(mode == PM_PWM_MODE_LPG && found_lpg_subnode == 0)) {
+		dev_err(&pdev->dev, "%s: Invalid mode select\n", __func__);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	chip->pwm_mode = mode;
+	_pwm_change_mode(chip, mode);
+	_pwm_enable(chip);
+
+read_opt_props:
+	/* Initialize optional config parameters from DT if provided */
+	of_property_read_string(node, "qcom,channel-owner",
+				&chip->channel_owner);
+
+	return 0;
+
+out:
+	kfree(lut_config->duty_pct_list);
+	return rc;
+}
+
+static struct pwm_ops qpnp_pwm_ops = {
+	.enable = qpnp_pwm_enable,
+	.disable = qpnp_pwm_disable,
+	.config = qpnp_pwm_config,
+	.free = qpnp_pwm_free,
+	.owner = THIS_MODULE,
+};
+
+static int qpnp_pwm_probe(struct platform_device *pdev)
+{
+	struct qpnp_pwm_chip	*pwm_chip;
+	int			rc;
+
+	pwm_chip = kzalloc(sizeof(*pwm_chip), GFP_KERNEL);
+	if (pwm_chip == NULL)
+		return -ENOMEM;
+
+	pwm_chip->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!pwm_chip->regmap) {
+		dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+		return -EINVAL;
+	}
+
+	spin_lock_init(&pwm_chip->lpg_lock);
+
+	pwm_chip->pdev = pdev;
+	dev_set_drvdata(&pdev->dev, pwm_chip);
+
+	rc = qpnp_parse_dt_config(pdev, pwm_chip);
+
+	if (rc) {
+		pr_err("Failed parsing DT parameters, rc=%d\n", rc);
+		goto failed_config;
+	}
+
+	pwm_chip->chip.dev = &pdev->dev;
+	pwm_chip->chip.ops = &qpnp_pwm_ops;
+	pwm_chip->chip.base = -1;
+	pwm_chip->chip.npwm = 1;
+
+	rc = pwmchip_add(&pwm_chip->chip);
+	if (rc < 0) {
+		pr_err("pwmchip_add() failed: %d\n", rc);
+		goto failed_insert;
+	}
+
+	if (pwm_chip->channel_owner)
+		pwm_chip->chip.pwms[0].label = pwm_chip->channel_owner;
+
+	pr_debug("PWM device channel:%d probed successfully\n",
+		pwm_chip->channel_id);
+	return 0;
+
+failed_insert:
+	kfree(pwm_chip->lpg_config.lut_config.duty_pct_list);
+failed_config:
+	dev_set_drvdata(&pdev->dev, NULL);
+	kfree(pwm_chip);
+	return rc;
+}
+
+static int qpnp_pwm_remove(struct platform_device *pdev)
+{
+	struct qpnp_pwm_chip *pwm_chip;
+	struct qpnp_lpg_config *lpg_config;
+
+	pwm_chip = dev_get_drvdata(&pdev->dev);
+
+	dev_set_drvdata(&pdev->dev, NULL);
+
+	if (pwm_chip) {
+		lpg_config = &pwm_chip->lpg_config;
+		pwmchip_remove(&pwm_chip->chip);
+		kfree(lpg_config->lut_config.duty_pct_list);
+		kfree(pwm_chip);
+	}
+
+	return 0;
+}
+
+static const struct of_device_id spmi_match_table[] = {
+	{ .compatible = QPNP_LPG_DRIVER_NAME, },
+	{}
+};
+
+static const struct platform_device_id qpnp_lpg_id[] = {
+	{ QPNP_LPG_DRIVER_NAME, 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(spmi, qpnp_lpg_id);
+
+static struct platform_driver qpnp_lpg_driver = {
+	.driver		= {
+		.name		= QPNP_LPG_DRIVER_NAME,
+		.of_match_table	= spmi_match_table,
+		.owner		= THIS_MODULE,
+	},
+	.probe		= qpnp_pwm_probe,
+	.remove		= qpnp_pwm_remove,
+	.id_table	= qpnp_lpg_id,
+};
+
+/**
+ * qpnp_lpg_init() - register spmi driver for qpnp-lpg
+ */
+int __init qpnp_lpg_init(void)
+{
+	return platform_driver_register(&qpnp_lpg_driver);
+}
+
+static void __exit qpnp_lpg_exit(void)
+{
+	platform_driver_unregister(&qpnp_lpg_driver);
+}
+
+MODULE_DESCRIPTION("QPNP PMIC LPG driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" QPNP_LPG_DRIVER_NAME);
+
+subsys_initcall(qpnp_lpg_init);
+module_exit(qpnp_lpg_exit);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/regulator/cpr3-hmss-regulator.c	2019-10-29 09:26:24.645212984 +0100
@@ -0,0 +1,1835 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/msm-ldo-regulator.h>
+
+#include "cpr3-regulator.h"
+
+#define MSM8996_HMSS_FUSE_CORNERS	5
+
+/**
+ * struct cpr3_msm8996_hmss_fuses - HMSS specific fuse data for MSM8996
+ * @ro_sel:		Ring oscillator select fuse parameter value for each
+ *			fuse corner
+ * @init_voltage:	Initial (i.e. open-loop) voltage fuse parameter value
+ *			for each fuse corner (raw, not converted to a voltage)
+ * @target_quot:	CPR target quotient fuse parameter value for each fuse
+ *			corner
+ * @quot_offset:	CPR target quotient offset fuse parameter value for each
+ *			fuse corner (raw, not unpacked) used for target quotient
+ *			interpolation
+ * @speed_bin:		Application processor speed bin fuse parameter value for
+ *			the given chip
+ * @cbf_voltage_offset:	Voltage margin offset for the CBF regulator used on
+ *			MSM8996-Pro chips.
+ * @cpr_fusing_rev:	CPR fusing revision fuse parameter value
+ * @redundant_fusing:	Redundant fusing select fuse parameter value
+ * @limitation:		CPR limitation select fuse parameter value
+ * @partial_binning:	Chip partial binning fuse parameter value which defines
+ *			limitations found on a given chip
+ * @vdd_mx_ret_fuse:	Defines the logic retention voltage of VDD_MX
+ * @vdd_apcc_ret_fuse:	Defines the logic retention voltage of VDD_APCC
+ * @aging_init_quot_diff:	Initial quotient difference between CPR aging
+ *			min and max sensors measured at time of manufacturing
+ *
+ * This struct holds the values for all of the fuses read from memory.  The
+ * values for ro_sel, init_voltage, target_quot, and quot_offset come from
+ * either the primary or redundant fuse locations depending upon the value of
+ * redundant_fusing.
+ */
+struct cpr3_msm8996_hmss_fuses {
+	u64	ro_sel[MSM8996_HMSS_FUSE_CORNERS];
+	u64	init_voltage[MSM8996_HMSS_FUSE_CORNERS];
+	u64	target_quot[MSM8996_HMSS_FUSE_CORNERS];
+	u64	quot_offset[MSM8996_HMSS_FUSE_CORNERS];
+	u64	cbf_voltage_offset[MSM8996_HMSS_FUSE_CORNERS];
+	u64	speed_bin;
+	u64	cpr_fusing_rev;
+	u64	redundant_fusing;
+	u64	limitation;
+	u64	partial_binning;
+	u64	vdd_mx_ret_fuse;
+	u64	vdd_apcc_ret_fuse;
+	u64	aging_init_quot_diff;
+};
+
+/*
+ * Fuse combos 0 -  7 map to CPR fusing revision 0 - 7 with speed bin fuse = 0.
+ * Fuse combos 8 - 15 map to CPR fusing revision 0 - 7 with speed bin fuse = 1.
+ * Fuse combos 16 - 23 map to CPR fusing revision 0 - 7 with speed bin fuse = 2.
+ */
+#define CPR3_MSM8996_HMSS_FUSE_COMBO_COUNT	24
+
+/*
+ * Constants which define the name of each fuse corner.  Note that no actual
+ * fuses are defined for LowSVS.  However, a mapping from corner to LowSVS
+ * is required in order to perform target quotient interpolation properly.
+ */
+enum cpr3_msm8996_hmss_fuse_corner {
+	CPR3_MSM8996_HMSS_FUSE_CORNER_MINSVS	= 0,
+	CPR3_MSM8996_HMSS_FUSE_CORNER_LOWSVS	= 1,
+	CPR3_MSM8996_HMSS_FUSE_CORNER_SVS	= 2,
+	CPR3_MSM8996_HMSS_FUSE_CORNER_NOM	= 3,
+	CPR3_MSM8996_HMSS_FUSE_CORNER_TURBO	= 4,
+};
+
+static const char * const cpr3_msm8996_hmss_fuse_corner_name[] = {
+	[CPR3_MSM8996_HMSS_FUSE_CORNER_MINSVS]	= "MinSVS",
+	[CPR3_MSM8996_HMSS_FUSE_CORNER_LOWSVS]	= "LowSVS",
+	[CPR3_MSM8996_HMSS_FUSE_CORNER_SVS]	= "SVS",
+	[CPR3_MSM8996_HMSS_FUSE_CORNER_NOM]	= "NOM",
+	[CPR3_MSM8996_HMSS_FUSE_CORNER_TURBO]	= "TURBO",
+};
+
+/* CPR3 hardware thread IDs */
+#define MSM8996_HMSS_POWER_CLUSTER_THREAD_ID		0
+#define MSM8996_HMSS_PERFORMANCE_CLUSTER_THREAD_ID	1
+
+/*
+ * MSM8996 HMSS fuse parameter locations:
+ *
+ * Structs are organized with the following dimensions:
+ *	Outer:  0 or 1 for power or performance cluster
+ *	Middle: 0 to 3 for fuse corners from lowest to highest corner
+ *	Inner:  large enough to hold the longest set of parameter segments which
+ *		fully defines a fuse parameter, +1 (for NULL termination).
+ *		Each segment corresponds to a contiguous group of bits from a
+ *		single fuse row.  These segments are concatentated together in
+ *		order to form the full fuse parameter value.  The segments for
+ *		a given parameter may correspond to different fuse rows.
+ *
+ * Note that there are only physically 4 sets of fuse parameters which
+ * correspond to the MinSVS, SVS, NOM, and TURBO fuse corners.  However, the SVS
+ * quotient offset fuse is used to define the target quotient for the LowSVS
+ * fuse corner.  In order to utilize LowSVS, it must be treated as if it were a
+ * real fully defined fuse corner.  Thus, LowSVS fuse parameter locations are
+ * specified.  These locations duplicate the SVS values in order to simplify
+ * interpolation logic.
+ */
+static const struct cpr3_fuse_param
+msm8996_hmss_ro_sel_param[2][MSM8996_HMSS_FUSE_CORNERS][2] = {
+	[MSM8996_HMSS_POWER_CLUSTER_THREAD_ID] = {
+		{{66, 38, 41}, {} },
+		{{66, 38, 41}, {} },
+		{{66, 38, 41}, {} },
+		{{66, 34, 37}, {} },
+		{{66, 30, 33}, {} },
+	},
+	[MSM8996_HMSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+		{{64, 54, 57}, {} },
+		{{64, 54, 57}, {} },
+		{{64, 54, 57}, {} },
+		{{64, 50, 53}, {} },
+		{{64, 46, 49}, {} },
+	},
+};
+
+static const struct cpr3_fuse_param
+msm8996_hmss_init_voltage_param[2][MSM8996_HMSS_FUSE_CORNERS][3] = {
+	[MSM8996_HMSS_POWER_CLUSTER_THREAD_ID] = {
+		{{67,  0,  5}, {} },
+		{{66, 58, 63}, {} },
+		{{66, 58, 63}, {} },
+		{{66, 52, 57}, {} },
+		{{66, 46, 51}, {} },
+	},
+	[MSM8996_HMSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+		{{65, 16, 21}, {} },
+		{{65, 10, 15}, {} },
+		{{65, 10, 15}, {} },
+		{{65,  4,  9}, {} },
+		{{64, 62, 63}, {65,  0,  3}, {} },
+	},
+};
+
+static const struct cpr3_fuse_param
+msm8996_hmss_target_quot_param[2][MSM8996_HMSS_FUSE_CORNERS][3] = {
+	[MSM8996_HMSS_POWER_CLUSTER_THREAD_ID] = {
+		{{67, 42, 53}, {} },
+		{{67, 30, 41}, {} },
+		{{67, 30, 41}, {} },
+		{{67, 18, 29}, {} },
+		{{67,  6, 17}, {} },
+	},
+	[MSM8996_HMSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+		{{65, 58, 63}, {66,  0,  5}, {} },
+		{{65, 46, 57}, {} },
+		{{65, 46, 57}, {} },
+		{{65, 34, 45}, {} },
+		{{65, 22, 33}, {} },
+	},
+};
+
+static const struct cpr3_fuse_param
+msm8996_hmss_quot_offset_param[2][MSM8996_HMSS_FUSE_CORNERS][3] = {
+	[MSM8996_HMSS_POWER_CLUSTER_THREAD_ID] = {
+		{{} },
+		{{} },
+		{{68,  6, 13}, {} },
+		{{67, 62, 63}, {68, 0, 5}, {} },
+		{{67, 54, 61}, {} },
+	},
+	[MSM8996_HMSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+		{{} },
+		{{} },
+		{{66, 22, 29}, {} },
+		{{66, 14, 21}, {} },
+		{{66,  6, 13}, {} },
+	},
+};
+
+/*
+ * This fuse is used to define if the redundant set of fuses should be used for
+ * any particular feature.  CPR is one such feature.  The redundant CPR fuses
+ * should be used if this fuse parameter has a value of 1.
+ */
+static const struct cpr3_fuse_param msm8996_redundant_fusing_param[] = {
+	{73, 61, 63},
+	{},
+};
+#define MSM8996_CPR_REDUNDANT_FUSING 1
+
+static const struct cpr3_fuse_param
+msm8996_hmss_redun_ro_sel_param[2][MSM8996_HMSS_FUSE_CORNERS][2] = {
+	[MSM8996_HMSS_POWER_CLUSTER_THREAD_ID] = {
+		{{76, 36, 39}, {} },
+		{{76, 32, 35}, {} },
+		{{76, 32, 35}, {} },
+		{{76, 28, 31}, {} },
+		{{76, 24, 27}, {} },
+	},
+	[MSM8996_HMSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+		{{74, 52, 55}, {} },
+		{{74, 48, 51}, {} },
+		{{74, 48, 51}, {} },
+		{{74, 44, 47}, {} },
+		{{74, 40, 43}, {} },
+	},
+};
+
+static const struct cpr3_fuse_param
+msm8996_hmss_redun_init_voltage_param[2][MSM8996_HMSS_FUSE_CORNERS][3] = {
+	[MSM8996_HMSS_POWER_CLUSTER_THREAD_ID] = {
+		{{76, 58, 63}, {} },
+		{{76, 52, 57}, {} },
+		{{76, 52, 57}, {} },
+		{{76, 46, 51}, {} },
+		{{76, 40, 45}, {} },
+	},
+	[MSM8996_HMSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+		{{75, 10, 15}, {} },
+		{{75,  4,  9}, {} },
+		{{75,  4,  9}, {} },
+		{{74, 62, 63}, {75,  0,  3}, {} },
+		{{74, 56, 61}, {} },
+	},
+};
+
+static const struct cpr3_fuse_param
+msm8996_hmss_redun_target_quot_param[2][MSM8996_HMSS_FUSE_CORNERS][2] = {
+	[MSM8996_HMSS_POWER_CLUSTER_THREAD_ID] = {
+		{{77, 36, 47}, {} },
+		{{77, 24, 35}, {} },
+		{{77, 24, 35}, {} },
+		{{77, 12, 23}, {} },
+		{{77,  0, 11}, {} },
+	},
+	[MSM8996_HMSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+		{{75, 52, 63}, {} },
+		{{75, 40, 51}, {} },
+		{{75, 40, 51}, {} },
+		{{75, 28, 39}, {} },
+		{{75, 16, 27}, {} },
+	},
+};
+
+static const struct cpr3_fuse_param
+msm8996_hmss_redun_quot_offset_param[2][MSM8996_HMSS_FUSE_CORNERS][2] = {
+	[MSM8996_HMSS_POWER_CLUSTER_THREAD_ID] = {
+		{{} },
+		{{} },
+		{{68, 11, 18}, {} },
+		{{77, 56, 63}, {} },
+		{{77, 48, 55}, {} },
+	},
+	[MSM8996_HMSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+		{{} },
+		{{} },
+		{{76, 16, 23}, {} },
+		{{76,  8, 15}, {} },
+		{{76,  0,  7}, {} },
+	},
+};
+
+static const struct cpr3_fuse_param msm8996_cpr_fusing_rev_param[] = {
+	{39, 51, 53},
+	{},
+};
+
+static const struct cpr3_fuse_param msm8996_hmss_speed_bin_param[] = {
+	{38, 29, 31},
+	{},
+};
+
+static const struct cpr3_fuse_param msm8996_cpr_limitation_param[] = {
+	{41, 31, 32},
+	{},
+};
+
+static const struct cpr3_fuse_param msm8996_vdd_mx_ret_param[] = {
+	{41, 2, 4},
+	{},
+};
+
+static const struct cpr3_fuse_param msm8996_vdd_apcc_ret_param[] = {
+	{41, 52, 54},
+	{},
+};
+
+static const struct cpr3_fuse_param msm8996_cpr_partial_binning_param[] = {
+	{39, 55, 59},
+	{},
+};
+
+static const struct cpr3_fuse_param
+msm8996_hmss_aging_init_quot_diff_param[] = {
+	{68, 14, 19},
+	{},
+};
+
+static const struct cpr3_fuse_param
+msm8996pro_hmss_voltage_offset_param[MSM8996_HMSS_FUSE_CORNERS][4] = {
+	{{68, 50, 52}, {41, 63, 63}, {} },
+	{{62, 30, 31}, {62, 63, 63}, {66, 45, 45}, {} },
+	{{61, 35, 36}, {61, 62, 63}, {} },
+	{{61, 26, 26}, {61, 32, 34}, {} },
+	{{61, 22, 25}, {} },
+};
+
+#define MSM8996PRO_SOC_ID			4
+
+/*
+ * Some initial msm8996 parts cannot be used in a meaningful way by software.
+ * Other parts can only be used when operating with CPR disabled (i.e. at the
+ * fused open-loop voltage) when no voltage interpolation is applied.  A fuse
+ * parameter is provided so that software can properly handle these limitations.
+ */
+enum msm8996_cpr_limitation {
+	MSM8996_CPR_LIMITATION_NONE = 0,
+	MSM8996_CPR_LIMITATION_UNSUPPORTED = 2,
+	MSM8996_CPR_LIMITATION_NO_CPR_OR_INTERPOLATION = 3,
+};
+
+/*
+ * Some initial msm8996 parts cannot be operated at low voltages.  A fuse
+ * parameter is provided so that software can properly handle these limitations.
+ */
+enum msm8996_cpr_partial_binning {
+	MSM8996_CPR_PARTIAL_BINNING_SVS = 11,
+	MSM8996_CPR_PARTIAL_BINNING_NOM = 12,
+};
+
+/* Additional MSM8996 specific data: */
+
+/* Open loop voltage fuse reference voltages in microvolts for MSM8996 v1/v2 */
+static const int msm8996_v1_v2_hmss_fuse_ref_volt[MSM8996_HMSS_FUSE_CORNERS] = {
+	605000,
+	745000, /* Place holder entry for LowSVS */
+	745000,
+	905000,
+	1015000,
+};
+
+/* Open loop voltage fuse reference voltages in microvolts for MSM8996 v3 */
+static const int msm8996_v3_hmss_fuse_ref_volt[MSM8996_HMSS_FUSE_CORNERS] = {
+	605000,
+	745000, /* Place holder entry for LowSVS */
+	745000,
+	905000,
+	1140000,
+};
+
+/*
+ * Open loop voltage fuse reference voltages in microvolts for MSM8996 v3 with
+ * speed_bin == 1 and cpr_fusing_rev >= 5.
+ */
+static const int msm8996_v3_speed_bin1_rev5_hmss_fuse_ref_volt[
+						MSM8996_HMSS_FUSE_CORNERS] = {
+	605000,
+	745000, /* Place holder entry for LowSVS */
+	745000,
+	905000,
+	1040000,
+};
+
+/* Defines mapping from retention fuse values to voltages in microvolts */
+static const int msm8996_vdd_apcc_fuse_ret_volt[] = {
+	600000, 550000, 500000, 450000, 400000, 350000, 300000, 600000,
+};
+
+static const int msm8996_vdd_mx_fuse_ret_volt[] = {
+	700000, 650000, 580000, 550000, 490000, 490000, 490000, 490000,
+};
+
+#define MSM8996_HMSS_FUSE_STEP_VOLT		10000
+#define MSM8996_HMSS_VOLTAGE_FUSE_SIZE		6
+#define MSM8996PRO_HMSS_CBF_FUSE_STEP_VOLT	10000
+#define MSM8996PRO_HMSS_CBF_VOLTAGE_FUSE_SIZE	4
+#define MSM8996_HMSS_QUOT_OFFSET_SCALE		5
+#define MSM8996_HMSS_AGING_INIT_QUOT_DIFF_SCALE	2
+#define MSM8996_HMSS_AGING_INIT_QUOT_DIFF_SIZE	6
+
+#define MSM8996_HMSS_CPR_SENSOR_COUNT		25
+#define MSM8996_HMSS_THREAD0_SENSOR_MIN		0
+#define MSM8996_HMSS_THREAD0_SENSOR_MAX		14
+#define MSM8996_HMSS_THREAD1_SENSOR_MIN		15
+#define MSM8996_HMSS_THREAD1_SENSOR_MAX		24
+
+#define MSM8996_HMSS_CPR_CLOCK_RATE		19200000
+
+#define MSM8996_HMSS_AGING_SENSOR_ID		11
+#define MSM8996_HMSS_AGING_BYPASS_MASK0		(GENMASK(7, 0) & ~BIT(3))
+
+/* Use scaled gate count (GCNT) for aging measurements */
+#define MSM8996_HMSS_AGING_GCNT_SCALING_FACTOR	1500
+
+/**
+ * cpr3_msm8996_hmss_use_voltage_offset_fuse() - return if this part utilizes
+ *		voltage offset fuses or not
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * Return: true if this part utilizes voltage offset fuses, else false
+ */
+static inline bool cpr3_msm8996_hmss_use_voltage_offset_fuse(
+					struct cpr3_regulator *vreg)
+{
+	struct cpr3_msm8996_hmss_fuses *fuse = vreg->platform_fuses;
+
+	return vreg->thread->ctrl->soc_revision == MSM8996PRO_SOC_ID
+	       && fuse->cpr_fusing_rev >= 1
+	       && of_property_read_bool(vreg->of_node, "qcom,is-cbf-regulator");
+}
+
+/**
+ * cpr3_msm8996_hmss_read_fuse_data() - load HMSS specific fuse parameter values
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * This function allocates a cpr3_msm8996_hmss_fuses struct, fills it with
+ * values read out of hardware fuses, and finally copies common fuse values
+ * into the CPR3 regulator struct.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_msm8996_hmss_read_fuse_data(struct cpr3_regulator *vreg)
+{
+	void __iomem *base = vreg->thread->ctrl->fuse_base;
+	struct cpr3_msm8996_hmss_fuses *fuse;
+	bool redundant;
+	int i, id, rc;
+
+	fuse = devm_kzalloc(vreg->thread->ctrl->dev, sizeof(*fuse), GFP_KERNEL);
+	if (!fuse)
+		return -ENOMEM;
+
+	rc = cpr3_read_fuse_param(base, msm8996_hmss_speed_bin_param,
+				&fuse->speed_bin);
+	if (rc) {
+		cpr3_err(vreg, "Unable to read speed bin fuse, rc=%d\n", rc);
+		return rc;
+	}
+	cpr3_info(vreg, "speed bin = %llu\n", fuse->speed_bin);
+
+	rc = cpr3_read_fuse_param(base, msm8996_cpr_fusing_rev_param,
+				&fuse->cpr_fusing_rev);
+	if (rc) {
+		cpr3_err(vreg, "Unable to read CPR fusing revision fuse, rc=%d\n",
+			rc);
+		return rc;
+	}
+	cpr3_info(vreg, "CPR fusing revision = %llu\n", fuse->cpr_fusing_rev);
+
+	rc = cpr3_read_fuse_param(base, msm8996_redundant_fusing_param,
+				&fuse->redundant_fusing);
+	if (rc) {
+		cpr3_err(vreg, "Unable to read redundant fusing config fuse, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	redundant = (fuse->redundant_fusing == MSM8996_CPR_REDUNDANT_FUSING);
+	cpr3_info(vreg, "using redundant fuses = %c\n",
+		redundant ? 'Y' : 'N');
+
+	rc = cpr3_read_fuse_param(base, msm8996_cpr_limitation_param,
+				&fuse->limitation);
+	if (rc) {
+		cpr3_err(vreg, "Unable to read CPR limitation fuse, rc=%d\n",
+			rc);
+		return rc;
+	}
+	cpr3_info(vreg, "CPR limitation = %s\n",
+		fuse->limitation == MSM8996_CPR_LIMITATION_UNSUPPORTED
+		? "unsupported chip" : fuse->limitation
+			  == MSM8996_CPR_LIMITATION_NO_CPR_OR_INTERPOLATION
+		? "CPR disabled and no interpolation" : "none");
+
+	rc = cpr3_read_fuse_param(base, msm8996_cpr_partial_binning_param,
+				&fuse->partial_binning);
+	if (rc) {
+		cpr3_err(vreg, "Unable to read partial binning fuse, rc=%d\n",
+			rc);
+		return rc;
+	}
+	cpr3_info(vreg, "CPR partial binning limitation = %s\n",
+		fuse->partial_binning == MSM8996_CPR_PARTIAL_BINNING_SVS
+			? "SVS min voltage"
+		: fuse->partial_binning == MSM8996_CPR_PARTIAL_BINNING_NOM
+			? "NOM min voltage"
+		: "none");
+
+	rc = cpr3_read_fuse_param(base, msm8996_vdd_mx_ret_param,
+				&fuse->vdd_mx_ret_fuse);
+	if (rc) {
+		cpr3_err(vreg, "Unable to read VDD_MX retention fuse, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = cpr3_read_fuse_param(base, msm8996_vdd_apcc_ret_param,
+				&fuse->vdd_apcc_ret_fuse);
+	if (rc) {
+		cpr3_err(vreg, "Unable to read VDD_APCC retention fuse, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	cpr3_info(vreg, "Retention voltage fuses: VDD_MX = %llu, VDD_APCC = %llu\n",
+		  fuse->vdd_mx_ret_fuse, fuse->vdd_apcc_ret_fuse);
+
+	rc = cpr3_read_fuse_param(base, msm8996_hmss_aging_init_quot_diff_param,
+				&fuse->aging_init_quot_diff);
+	if (rc) {
+		cpr3_err(vreg, "Unable to read aging initial quotient difference fuse, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	id = vreg->thread->thread_id;
+
+	for (i = 0; i < MSM8996_HMSS_FUSE_CORNERS; i++) {
+		rc = cpr3_read_fuse_param(base,
+			redundant
+			    ? msm8996_hmss_redun_init_voltage_param[id][i]
+			    : msm8996_hmss_init_voltage_param[id][i],
+			&fuse->init_voltage[i]);
+		if (rc) {
+			cpr3_err(vreg, "Unable to read fuse-corner %d initial voltage fuse, rc=%d\n",
+				i, rc);
+			return rc;
+		}
+
+		rc = cpr3_read_fuse_param(base,
+			redundant
+			    ? msm8996_hmss_redun_target_quot_param[id][i]
+			    : msm8996_hmss_target_quot_param[id][i],
+			&fuse->target_quot[i]);
+		if (rc) {
+			cpr3_err(vreg, "Unable to read fuse-corner %d target quotient fuse, rc=%d\n",
+				i, rc);
+			return rc;
+		}
+
+		rc = cpr3_read_fuse_param(base,
+			redundant
+			    ? msm8996_hmss_redun_ro_sel_param[id][i]
+			    : msm8996_hmss_ro_sel_param[id][i],
+			&fuse->ro_sel[i]);
+		if (rc) {
+			cpr3_err(vreg, "Unable to read fuse-corner %d RO select fuse, rc=%d\n",
+				i, rc);
+			return rc;
+		}
+
+		rc = cpr3_read_fuse_param(base,
+			redundant
+			    ? msm8996_hmss_redun_quot_offset_param[id][i]
+			    : msm8996_hmss_quot_offset_param[id][i],
+			&fuse->quot_offset[i]);
+		if (rc) {
+			cpr3_err(vreg, "Unable to read fuse-corner %d quotient offset fuse, rc=%d\n",
+				i, rc);
+			return rc;
+		}
+	}
+
+	vreg->fuse_combo = fuse->cpr_fusing_rev + 8 * fuse->speed_bin;
+	if (vreg->fuse_combo >= CPR3_MSM8996_HMSS_FUSE_COMBO_COUNT) {
+		cpr3_err(vreg, "invalid CPR fuse combo = %d found\n",
+			vreg->fuse_combo);
+		return -EINVAL;
+	}
+
+	vreg->speed_bin_fuse	= fuse->speed_bin;
+	vreg->cpr_rev_fuse	= fuse->cpr_fusing_rev;
+	vreg->fuse_corner_count	= MSM8996_HMSS_FUSE_CORNERS;
+	vreg->platform_fuses	= fuse;
+
+	if (cpr3_msm8996_hmss_use_voltage_offset_fuse(vreg)) {
+		for (i = 0; i < MSM8996_HMSS_FUSE_CORNERS; i++) {
+			rc = cpr3_read_fuse_param(base,
+				msm8996pro_hmss_voltage_offset_param[i],
+				&fuse->cbf_voltage_offset[i]);
+			if (rc) {
+				cpr3_err(vreg, "Unable to read fuse-corner %d CBF voltage offset fuse, rc=%d\n",
+					i, rc);
+				return rc;
+			}
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * cpr3_hmss_apply_fused_voltage_offset() - adjust the fused voltages for each
+ *		fuse corner according to voltage offset fuse values
+ * @vreg:		Pointer to the CPR3 regulator
+ * @fuse_volt:		Pointer to an array of the fused voltage values; must
+ *			have length equal to vreg->fuse_corner_count
+ *
+ * Voltage values in fuse_volt are modified in place.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_hmss_apply_fused_voltage_offset(struct cpr3_regulator *vreg,
+		int *fuse_volt)
+{
+	struct cpr3_msm8996_hmss_fuses *fuse = vreg->platform_fuses;
+	int i;
+
+	if (!cpr3_msm8996_hmss_use_voltage_offset_fuse(vreg))
+		return 0;
+
+	for (i = 0; i < vreg->fuse_corner_count; i++)
+		fuse_volt[i] += cpr3_convert_open_loop_voltage_fuse(
+					0,
+					MSM8996PRO_HMSS_CBF_FUSE_STEP_VOLT,
+					fuse->cbf_voltage_offset[i],
+					MSM8996PRO_HMSS_CBF_VOLTAGE_FUSE_SIZE);
+
+	return 0;
+}
+
+/**
+ * cpr3_hmss_parse_corner_data() - parse HMSS corner data from device tree
+ *		properties of the CPR3 regulator's device node
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_hmss_parse_corner_data(struct cpr3_regulator *vreg)
+{
+	int rc;
+
+	rc = cpr3_parse_common_corner_data(vreg);
+	if (rc) {
+		cpr3_err(vreg, "error reading corner data, rc=%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+/**
+ * cpr3_msm8996_hmss_calculate_open_loop_voltages() - calculate the open-loop
+ *		voltage for each corner of a CPR3 regulator
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * If open-loop voltage interpolation is allowed in both device tree and in
+ * hardware fuses, then this function calculates the open-loop voltage for a
+ * given corner using linear interpolation.  This interpolation is performed
+ * using the processor frequencies of the lower and higher Fmax corners along
+ * with their fused open-loop voltages.
+ *
+ * If open-loop voltage interpolation is not allowed, then this function uses
+ * the Fmax fused open-loop voltage for all of the corners associated with a
+ * given fuse corner.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_msm8996_hmss_calculate_open_loop_voltages(
+			struct cpr3_regulator *vreg)
+{
+	struct device_node *node = vreg->of_node;
+	struct cpr3_msm8996_hmss_fuses *fuse = vreg->platform_fuses;
+	int rc = 0;
+	bool allow_interpolation;
+	u64 freq_low, volt_low, freq_high, volt_high;
+	int i, j, soc_revision;
+	const int *ref_volt;
+	int *fuse_volt;
+	int *fmax_corner;
+
+	fuse_volt = kcalloc(vreg->fuse_corner_count, sizeof(*fuse_volt),
+				GFP_KERNEL);
+	fmax_corner = kcalloc(vreg->fuse_corner_count, sizeof(*fmax_corner),
+				GFP_KERNEL);
+	if (!fuse_volt || !fmax_corner) {
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	soc_revision = vreg->thread->ctrl->soc_revision;
+	if (soc_revision == 1 || soc_revision == 2)
+		ref_volt = msm8996_v1_v2_hmss_fuse_ref_volt;
+	else if (soc_revision == 3 && fuse->speed_bin == 1
+				   && fuse->cpr_fusing_rev >= 5)
+		ref_volt = msm8996_v3_speed_bin1_rev5_hmss_fuse_ref_volt;
+	else
+		ref_volt = msm8996_v3_hmss_fuse_ref_volt;
+
+	for (i = 0; i < vreg->fuse_corner_count; i++) {
+		fuse_volt[i] = cpr3_convert_open_loop_voltage_fuse(
+			ref_volt[i],
+			MSM8996_HMSS_FUSE_STEP_VOLT, fuse->init_voltage[i],
+			MSM8996_HMSS_VOLTAGE_FUSE_SIZE);
+
+		/* Log fused open-loop voltage values for debugging purposes. */
+		if (i != CPR3_MSM8996_HMSS_FUSE_CORNER_LOWSVS)
+			cpr3_info(vreg, "fused %6s: open-loop=%7d uV\n",
+				cpr3_msm8996_hmss_fuse_corner_name[i],
+				fuse_volt[i]);
+	}
+
+	if (cpr3_msm8996_hmss_use_voltage_offset_fuse(vreg)) {
+		rc = cpr3_hmss_apply_fused_voltage_offset(vreg, fuse_volt);
+		if (rc) {
+			cpr3_err(vreg, "could not apply CBF voltage offsets, rc=%d\n",
+				rc);
+			goto done;
+		}
+
+		for (i = 0; i < vreg->fuse_corner_count; i++)
+			cpr3_info(vreg, "fused %6s: CBF offset open-loop=%7d uV\n",
+					cpr3_msm8996_hmss_fuse_corner_name[i],
+					fuse_volt[i]);
+	}
+
+	rc = cpr3_adjust_fused_open_loop_voltages(vreg, fuse_volt);
+	if (rc) {
+		cpr3_err(vreg, "fused open-loop voltage adjustment failed, rc=%d\n",
+			rc);
+		goto done;
+	}
+
+	allow_interpolation = of_property_read_bool(node,
+				"qcom,allow-voltage-interpolation");
+
+	/*
+	 * No LowSVS open-loop voltage fuse exists.  Instead, intermediate
+	 * voltages are interpolated between MinSVS and SVS.  Set the LowSVS
+	 * voltage to be equal to the adjusted SVS voltage in order to avoid
+	 * triggering an incorrect condition violation in the following loop.
+	 */
+	fuse_volt[CPR3_MSM8996_HMSS_FUSE_CORNER_LOWSVS]
+		= fuse_volt[CPR3_MSM8996_HMSS_FUSE_CORNER_SVS];
+
+	for (i = 1; i < vreg->fuse_corner_count; i++) {
+		if (fuse_volt[i] < fuse_volt[i - 1]) {
+			cpr3_debug(vreg, "fuse corner %d voltage=%d uV < fuse corner %d voltage=%d uV; overriding: fuse corner %d voltage=%d\n",
+				i, fuse_volt[i], i - 1, fuse_volt[i - 1],
+				i, fuse_volt[i - 1]);
+			fuse_volt[i] = fuse_volt[i - 1];
+		}
+	}
+
+	if (fuse->limitation == MSM8996_CPR_LIMITATION_NO_CPR_OR_INTERPOLATION)
+		allow_interpolation = false;
+
+	if (!allow_interpolation) {
+		/* Use fused open-loop voltage for lower frequencies. */
+		for (i = 0; i < vreg->corner_count; i++)
+			vreg->corner[i].open_loop_volt
+				= fuse_volt[vreg->corner[i].cpr_fuse_corner];
+		goto done;
+	}
+
+	for (i = 0; i < vreg->fuse_corner_count; i++)
+		fmax_corner[i] = vreg->fuse_corner_map[i];
+
+	/*
+	 * Interpolation is not possible for corners mapped to the lowest fuse
+	 * corner so use the fuse corner value directly.
+	 */
+	for (i = 0; i <= fmax_corner[0]; i++)
+		vreg->corner[i].open_loop_volt = fuse_volt[0];
+
+	/*
+	 * Interpolation is not possible for corners mapped above the highest
+	 * fuse corner so use the fuse corner value directly.
+	 */
+	j = vreg->fuse_corner_count - 1;
+	for (i = fmax_corner[j] + 1; i < vreg->corner_count; i++)
+		vreg->corner[i].open_loop_volt = fuse_volt[j];
+
+	/*
+	 * Corner LowSVS should be skipped for voltage interpolation
+	 * since no fuse exists for it.  Instead, the lowest interpolation
+	 * should be between MinSVS and SVS.
+	 */
+	for (i = CPR3_MSM8996_HMSS_FUSE_CORNER_LOWSVS;
+	     i < vreg->fuse_corner_count - 1; i++) {
+		fmax_corner[i] = fmax_corner[i + 1];
+		fuse_volt[i] = fuse_volt[i + 1];
+	}
+
+	/* Interpolate voltages for the higher fuse corners. */
+	for (i = 1; i < vreg->fuse_corner_count - 1; i++) {
+		freq_low = vreg->corner[fmax_corner[i - 1]].proc_freq;
+		volt_low = fuse_volt[i - 1];
+		freq_high = vreg->corner[fmax_corner[i]].proc_freq;
+		volt_high = fuse_volt[i];
+
+		for (j = fmax_corner[i - 1] + 1; j <= fmax_corner[i]; j++)
+			vreg->corner[j].open_loop_volt = cpr3_interpolate(
+				freq_low, volt_low, freq_high, volt_high,
+				vreg->corner[j].proc_freq);
+	}
+
+done:
+	if (rc == 0) {
+		cpr3_debug(vreg, "unadjusted per-corner open-loop voltages:\n");
+		for (i = 0; i < vreg->corner_count; i++)
+			cpr3_debug(vreg, "open-loop[%2d] = %d uV\n", i,
+				vreg->corner[i].open_loop_volt);
+
+		rc = cpr3_adjust_open_loop_voltages(vreg);
+		if (rc)
+			cpr3_err(vreg, "open-loop voltage adjustment failed, rc=%d\n",
+				rc);
+	}
+
+	kfree(fuse_volt);
+	kfree(fmax_corner);
+	return rc;
+}
+
+/**
+ * cpr3_msm8996_hmss_set_no_interpolation_quotients() - use the fused target
+ *		quotient values for lower frequencies.
+ * @vreg:		Pointer to the CPR3 regulator
+ * @volt_adjust:	Pointer to array of per-corner closed-loop adjustment
+ *			voltages
+ * @volt_adjust_fuse:	Pointer to array of per-fuse-corner closed-loop
+ *			adjustment voltages
+ * @ro_scale:		Pointer to array of per-fuse-corner RO scaling factor
+ *			values with units of QUOT/V
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_msm8996_hmss_set_no_interpolation_quotients(
+			struct cpr3_regulator *vreg, int *volt_adjust,
+			int *volt_adjust_fuse, int *ro_scale)
+{
+	struct cpr3_msm8996_hmss_fuses *fuse = vreg->platform_fuses;
+	u32 quot, ro;
+	int quot_adjust;
+	int i, fuse_corner;
+
+	for (i = 0; i < vreg->corner_count; i++) {
+		fuse_corner = vreg->corner[i].cpr_fuse_corner;
+		quot = fuse->target_quot[fuse_corner];
+		quot_adjust = cpr3_quot_adjustment(ro_scale[fuse_corner],
+				volt_adjust_fuse[fuse_corner] + volt_adjust[i]);
+		ro = fuse->ro_sel[fuse_corner];
+		vreg->corner[i].target_quot[ro] = quot + quot_adjust;
+		if (quot_adjust)
+			cpr3_debug(vreg, "adjusted corner %d RO%u target quot: %u --> %u (%d uV)\n",
+				i, ro, quot, vreg->corner[i].target_quot[ro],
+				volt_adjust_fuse[fuse_corner] + volt_adjust[i]);
+	}
+
+	return 0;
+}
+
+/**
+ * cpr3_msm8996_hmss_calculate_target_quotients() - calculate the CPR target
+ *		quotient for each corner of a CPR3 regulator
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * If target quotient interpolation is allowed in both device tree and in
+ * hardware fuses, then this function calculates the target quotient for a
+ * given corner using linear interpolation.  This interpolation is performed
+ * using the processor frequencies of the lower and higher Fmax corners along
+ * with the fused target quotient and quotient offset of the higher Fmax corner.
+ *
+ * If target quotient interpolation is not allowed, then this function uses
+ * the Fmax fused target quotient for all of the corners associated with a
+ * given fuse corner.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_msm8996_hmss_calculate_target_quotients(
+			struct cpr3_regulator *vreg)
+{
+	struct cpr3_msm8996_hmss_fuses *fuse = vreg->platform_fuses;
+	int rc;
+	bool allow_interpolation;
+	u64 freq_low, freq_high, prev_quot;
+	u64 *quot_low;
+	u64 *quot_high;
+	u32 quot, ro;
+	int i, j, fuse_corner, quot_adjust;
+	int *fmax_corner;
+	int *volt_adjust, *volt_adjust_fuse, *ro_scale;
+
+	/* Log fused quotient values for debugging purposes. */
+	cpr3_info(vreg, "fused MinSVS: quot[%2llu]=%4llu\n",
+		fuse->ro_sel[CPR3_MSM8996_HMSS_FUSE_CORNER_MINSVS],
+		fuse->target_quot[CPR3_MSM8996_HMSS_FUSE_CORNER_MINSVS]);
+	for (i = CPR3_MSM8996_HMSS_FUSE_CORNER_SVS;
+	     i <= CPR3_MSM8996_HMSS_FUSE_CORNER_TURBO; i++)
+		cpr3_info(vreg, "fused %6s: quot[%2llu]=%4llu, quot_offset[%2llu]=%4llu\n",
+			cpr3_msm8996_hmss_fuse_corner_name[i],
+			fuse->ro_sel[i], fuse->target_quot[i], fuse->ro_sel[i],
+			fuse->quot_offset[i] * MSM8996_HMSS_QUOT_OFFSET_SCALE);
+
+	allow_interpolation = of_property_read_bool(vreg->of_node,
+					"qcom,allow-quotient-interpolation");
+
+	if (fuse->limitation == MSM8996_CPR_LIMITATION_NO_CPR_OR_INTERPOLATION)
+		allow_interpolation = false;
+
+	volt_adjust = kcalloc(vreg->corner_count, sizeof(*volt_adjust),
+					GFP_KERNEL);
+	volt_adjust_fuse = kcalloc(vreg->fuse_corner_count,
+					sizeof(*volt_adjust_fuse), GFP_KERNEL);
+	ro_scale = kcalloc(vreg->fuse_corner_count, sizeof(*ro_scale),
+					GFP_KERNEL);
+	fmax_corner = kcalloc(vreg->fuse_corner_count, sizeof(*fmax_corner),
+					GFP_KERNEL);
+	quot_low = kcalloc(vreg->fuse_corner_count, sizeof(*quot_low),
+					GFP_KERNEL);
+	quot_high = kcalloc(vreg->fuse_corner_count, sizeof(*quot_high),
+					GFP_KERNEL);
+	if (!volt_adjust || !volt_adjust_fuse || !ro_scale ||
+	    !fmax_corner || !quot_low || !quot_high) {
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	rc = cpr3_parse_closed_loop_voltage_adjustments(vreg, &fuse->ro_sel[0],
+				volt_adjust, volt_adjust_fuse, ro_scale);
+	if (rc) {
+		cpr3_err(vreg, "could not load closed-loop voltage adjustments, rc=%d\n",
+			rc);
+		goto done;
+	}
+
+	rc = cpr3_hmss_apply_fused_voltage_offset(vreg, volt_adjust_fuse);
+	if (rc) {
+		cpr3_err(vreg, "could not apply CBF voltage offsets, rc=%d\n",
+			rc);
+		goto done;
+	}
+
+	if (!allow_interpolation) {
+		/* Use fused target quotients for lower frequencies. */
+		return cpr3_msm8996_hmss_set_no_interpolation_quotients(vreg,
+				volt_adjust, volt_adjust_fuse, ro_scale);
+	}
+
+	for (i = 0; i < vreg->fuse_corner_count; i++)
+		fmax_corner[i] = vreg->fuse_corner_map[i];
+
+	/*
+	 * Interpolation is not possible for corners mapped to the lowest fuse
+	 * corner so use the fuse corner value directly.
+	 */
+	i = CPR3_MSM8996_HMSS_FUSE_CORNER_MINSVS;
+	quot_adjust = cpr3_quot_adjustment(ro_scale[i], volt_adjust_fuse[i]);
+	quot = fuse->target_quot[i] + quot_adjust;
+	quot_high[i] = quot;
+	ro = fuse->ro_sel[i];
+	if (quot_adjust)
+		cpr3_debug(vreg, "adjusted fuse corner %d RO%u target quot: %llu --> %u (%d uV)\n",
+			i, ro, fuse->target_quot[i], quot, volt_adjust_fuse[i]);
+	for (i = 0; i <= fmax_corner[CPR3_MSM8996_HMSS_FUSE_CORNER_MINSVS]; i++)
+		vreg->corner[i].target_quot[ro] = quot;
+
+	/*
+	 * Interpolation is not possible for corners mapped above the highest
+	 * fuse corner so use the fuse corner value directly.
+	 */
+	j = vreg->fuse_corner_count - 1;
+	quot_adjust = cpr3_quot_adjustment(ro_scale[j], volt_adjust_fuse[j]);
+	quot = fuse->target_quot[j] + quot_adjust;
+	ro = fuse->ro_sel[j];
+	for (i = fmax_corner[j] + 1; i < vreg->corner_count; i++)
+		vreg->corner[i].target_quot[ro] = quot;
+
+	/*
+	 * The LowSVS target quotient is defined as:
+	 *	(SVS target quotient) - (the unpacked SVS quotient offset)
+	 * MinSVS, LowSVS, and SVS fuse corners all share the same RO so it is
+	 * possible to interpolate between their target quotient values.
+	 */
+	i = CPR3_MSM8996_HMSS_FUSE_CORNER_LOWSVS;
+	quot_high[i] = fuse->target_quot[CPR3_MSM8996_HMSS_FUSE_CORNER_SVS]
+			- fuse->quot_offset[CPR3_MSM8996_HMSS_FUSE_CORNER_SVS]
+				* MSM8996_HMSS_QUOT_OFFSET_SCALE;
+	quot_low[i] = fuse->target_quot[CPR3_MSM8996_HMSS_FUSE_CORNER_MINSVS];
+	if (quot_high[i] < quot_low[i]) {
+		cpr3_info(vreg, "quot_lowsvs=%llu < quot_minsvs=%llu; overriding: quot_lowsvs=%llu\n",
+			quot_high[i], quot_low[i], quot_low[i]);
+		quot_high[i] = quot_low[i];
+	}
+	if (fuse->ro_sel[CPR3_MSM8996_HMSS_FUSE_CORNER_MINSVS]
+	    != fuse->ro_sel[CPR3_MSM8996_HMSS_FUSE_CORNER_SVS]) {
+		cpr3_info(vreg, "MinSVS RO=%llu != SVS RO=%llu; disabling LowSVS interpolation\n",
+			fuse->ro_sel[CPR3_MSM8996_HMSS_FUSE_CORNER_MINSVS],
+			fuse->ro_sel[CPR3_MSM8996_HMSS_FUSE_CORNER_SVS]);
+		quot_low[i] = quot_high[i];
+	}
+
+	for (i = CPR3_MSM8996_HMSS_FUSE_CORNER_SVS;
+	     i < vreg->fuse_corner_count; i++) {
+		quot_high[i] = fuse->target_quot[i];
+		if (fuse->ro_sel[i] == fuse->ro_sel[i - 1])
+			quot_low[i] = quot_high[i - 1];
+		else
+			quot_low[i] = quot_high[i]
+					- fuse->quot_offset[i]
+					  * MSM8996_HMSS_QUOT_OFFSET_SCALE;
+		if (quot_high[i] < quot_low[i]) {
+			cpr3_debug(vreg, "quot_high[%d]=%llu < quot_low[%d]=%llu; overriding: quot_high[%d]=%llu\n",
+				i, quot_high[i], i, quot_low[i],
+				i, quot_low[i]);
+			quot_high[i] = quot_low[i];
+		}
+	}
+
+	/* Perform per-fuse-corner target quotient adjustment */
+	for (i = 1; i < vreg->fuse_corner_count; i++) {
+		quot_adjust = cpr3_quot_adjustment(ro_scale[i],
+						   volt_adjust_fuse[i]);
+		if (quot_adjust) {
+			prev_quot = quot_high[i];
+			quot_high[i] += quot_adjust;
+			cpr3_debug(vreg, "adjusted fuse corner %d RO%llu target quot: %llu --> %llu (%d uV)\n",
+				i, fuse->ro_sel[i], prev_quot, quot_high[i],
+				volt_adjust_fuse[i]);
+		}
+
+		if (fuse->ro_sel[i] == fuse->ro_sel[i - 1])
+			quot_low[i] = quot_high[i - 1];
+		else
+			quot_low[i] += cpr3_quot_adjustment(ro_scale[i],
+						    volt_adjust_fuse[i - 1]);
+
+		if (quot_high[i] < quot_low[i]) {
+			cpr3_debug(vreg, "quot_high[%d]=%llu < quot_low[%d]=%llu after adjustment; overriding: quot_high[%d]=%llu\n",
+				i, quot_high[i], i, quot_low[i],
+				i, quot_low[i]);
+			quot_high[i] = quot_low[i];
+		}
+	}
+
+	/* Interpolate voltages for the higher fuse corners. */
+	for (i = 1; i < vreg->fuse_corner_count; i++) {
+		freq_low = vreg->corner[fmax_corner[i - 1]].proc_freq;
+		freq_high = vreg->corner[fmax_corner[i]].proc_freq;
+
+		ro = fuse->ro_sel[i];
+		for (j = fmax_corner[i - 1] + 1; j <= fmax_corner[i]; j++)
+			vreg->corner[j].target_quot[ro] = cpr3_interpolate(
+				freq_low, quot_low[i], freq_high, quot_high[i],
+				vreg->corner[j].proc_freq);
+	}
+
+	/* Perform per-corner target quotient adjustment */
+	for (i = 0; i < vreg->corner_count; i++) {
+		fuse_corner = vreg->corner[i].cpr_fuse_corner;
+		ro = fuse->ro_sel[fuse_corner];
+		quot_adjust = cpr3_quot_adjustment(ro_scale[fuse_corner],
+						   volt_adjust[i]);
+		if (quot_adjust) {
+			prev_quot = vreg->corner[i].target_quot[ro];
+			vreg->corner[i].target_quot[ro] += quot_adjust;
+			cpr3_debug(vreg, "adjusted corner %d RO%u target quot: %llu --> %u (%d uV)\n",
+				i, ro, prev_quot,
+				vreg->corner[i].target_quot[ro],
+				volt_adjust[i]);
+		}
+	}
+
+	/* Ensure that target quotients increase monotonically */
+	for (i = 1; i < vreg->corner_count; i++) {
+		ro = fuse->ro_sel[vreg->corner[i].cpr_fuse_corner];
+		if (fuse->ro_sel[vreg->corner[i - 1].cpr_fuse_corner] == ro
+		    && vreg->corner[i].target_quot[ro]
+				< vreg->corner[i - 1].target_quot[ro]) {
+			cpr3_debug(vreg, "adjusted corner %d RO%u target quot=%u < adjusted corner %d RO%u target quot=%u; overriding: corner %d RO%u target quot=%u\n",
+				i, ro, vreg->corner[i].target_quot[ro],
+				i - 1, ro, vreg->corner[i - 1].target_quot[ro],
+				i, ro, vreg->corner[i - 1].target_quot[ro]);
+			vreg->corner[i].target_quot[ro]
+				= vreg->corner[i - 1].target_quot[ro];
+		}
+	}
+
+done:
+	kfree(volt_adjust);
+	kfree(volt_adjust_fuse);
+	kfree(ro_scale);
+	kfree(fmax_corner);
+	kfree(quot_low);
+	kfree(quot_high);
+	return rc;
+}
+
+/**
+ * cpr3_msm8996_partial_binning_override() - override the voltage and quotient
+ *		settings for low corners based upon the value of the partial
+ *		binning fuse
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * Some parts are not able to operate at low voltages.  The partial binning
+ * fuse specifies if a given part has such limitations.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_msm8996_partial_binning_override(struct cpr3_regulator *vreg)
+{
+	struct cpr3_msm8996_hmss_fuses *fuse = vreg->platform_fuses;
+	int i, fuse_corner, fmax_corner;
+
+	if (fuse->partial_binning == MSM8996_CPR_PARTIAL_BINNING_SVS)
+		fuse_corner = CPR3_MSM8996_HMSS_FUSE_CORNER_SVS;
+	else if (fuse->partial_binning == MSM8996_CPR_PARTIAL_BINNING_NOM)
+		fuse_corner = CPR3_MSM8996_HMSS_FUSE_CORNER_NOM;
+	else
+		return 0;
+
+	cpr3_info(vreg, "overriding voltages and quotients for all corners below %s Fmax\n",
+		cpr3_msm8996_hmss_fuse_corner_name[fuse_corner]);
+
+	fmax_corner = -1;
+	for (i = vreg->corner_count - 1; i >= 0; i--) {
+		if (vreg->corner[i].cpr_fuse_corner == fuse_corner) {
+			fmax_corner = i;
+			break;
+		}
+	}
+	if (fmax_corner < 0) {
+		cpr3_err(vreg, "could not find %s Fmax corner\n",
+			cpr3_msm8996_hmss_fuse_corner_name[fuse_corner]);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < fmax_corner; i++)
+		vreg->corner[i] = vreg->corner[fmax_corner];
+
+	return 0;
+}
+
+/**
+ * cpr3_hmss_print_settings() - print out HMSS CPR configuration settings into
+ *		the kernel log for debugging purposes
+ * @vreg:		Pointer to the CPR3 regulator
+ */
+static void cpr3_hmss_print_settings(struct cpr3_regulator *vreg)
+{
+	struct cpr3_corner *corner;
+	int i;
+
+	cpr3_debug(vreg, "Corner: Frequency (Hz), Fuse Corner, Floor (uV), Open-Loop (uV), Ceiling (uV)\n");
+	for (i = 0; i < vreg->corner_count; i++) {
+		corner = &vreg->corner[i];
+		cpr3_debug(vreg, "%3d: %10u, %2d, %7d, %7d, %7d\n",
+			i, corner->proc_freq, corner->cpr_fuse_corner,
+			corner->floor_volt, corner->open_loop_volt,
+			corner->ceiling_volt);
+	}
+
+	if (vreg->thread->ctrl->apm)
+		cpr3_debug(vreg, "APM threshold = %d uV, APM adjust = %d uV\n",
+			vreg->thread->ctrl->apm_threshold_volt,
+			vreg->thread->ctrl->apm_adj_volt);
+}
+
+/**
+ * cpr3_hmss_init_thread() - perform steps necessary to initialize the
+ *		configuration data for a CPR3 thread
+ * @thread:		Pointer to the CPR3 thread
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_hmss_init_thread(struct cpr3_thread *thread)
+{
+	int rc;
+
+	rc = cpr3_parse_common_thread_data(thread);
+	if (rc) {
+		cpr3_err(thread->ctrl, "thread %u unable to read CPR thread data from device tree, rc=%d\n",
+			thread->thread_id, rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+#define MAX_VREG_NAME_SIZE 25
+/**
+ * cpr3_hmss_kvreg_init() - initialize HMSS Kryo Regulator data for a CPR3
+ *		regulator
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * This function loads Kryo Regulator data from device tree if it is present
+ * and requests a handle to the appropriate Kryo regulator device. In addition,
+ * it initializes Kryo Regulator data originating from hardware fuses, such as
+ * the LDO retention voltage, and requests the Kryo retention regulator to
+ * be configured to that value.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_hmss_kvreg_init(struct cpr3_regulator *vreg)
+{
+	struct cpr3_msm8996_hmss_fuses *fuse = vreg->platform_fuses;
+	struct device_node *node = vreg->of_node;
+	struct cpr3_controller *ctrl = vreg->thread->ctrl;
+	int id = vreg->thread->thread_id;
+	char kvreg_name_buf[MAX_VREG_NAME_SIZE];
+	int rc;
+
+	scnprintf(kvreg_name_buf, MAX_VREG_NAME_SIZE,
+		"vdd-thread%d-ldo-supply", id);
+
+	if (!of_find_property(ctrl->dev->of_node, kvreg_name_buf, NULL))
+		return 0;
+	else if (!of_find_property(node, "qcom,ldo-min-headroom-voltage", NULL))
+		return 0;
+
+	scnprintf(kvreg_name_buf, MAX_VREG_NAME_SIZE, "vdd-thread%d-ldo", id);
+
+	vreg->ldo_regulator = devm_regulator_get(ctrl->dev, kvreg_name_buf);
+	if (IS_ERR(vreg->ldo_regulator)) {
+		rc = PTR_ERR(vreg->ldo_regulator);
+		if (rc != -EPROBE_DEFER)
+			cpr3_err(vreg, "unable to request %s regulator, rc=%d\n",
+				 kvreg_name_buf, rc);
+		return rc;
+	}
+
+	vreg->ldo_regulator_bypass = BHS_MODE;
+
+	scnprintf(kvreg_name_buf, MAX_VREG_NAME_SIZE, "vdd-thread%d-ldo-ret",
+		  id);
+
+	vreg->ldo_ret_regulator = devm_regulator_get(ctrl->dev, kvreg_name_buf);
+	if (IS_ERR(vreg->ldo_ret_regulator)) {
+		rc = PTR_ERR(vreg->ldo_ret_regulator);
+		if (rc != -EPROBE_DEFER)
+			cpr3_err(vreg, "unable to request %s regulator, rc=%d\n",
+				 kvreg_name_buf, rc);
+		return rc;
+	}
+
+	if (!ctrl->system_supply_max_volt) {
+		cpr3_err(ctrl, "system-supply max voltage must be specified\n");
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32(node, "qcom,ldo-min-headroom-voltage",
+				&vreg->ldo_min_headroom_volt);
+	if (rc) {
+		cpr3_err(vreg, "error reading qcom,ldo-min-headroom-voltage, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(node, "qcom,ldo-max-headroom-voltage",
+				  &vreg->ldo_max_headroom_volt);
+	if (rc) {
+		cpr3_err(vreg, "error reading qcom,ldo-max-headroom-voltage, rc=%d\n",
+			 rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(node, "qcom,ldo-max-voltage",
+				&vreg->ldo_max_volt);
+	if (rc) {
+		cpr3_err(vreg, "error reading qcom,ldo-max-voltage, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	/* Determine the CPU retention voltage based on fused data */
+	vreg->ldo_ret_volt =
+		max(msm8996_vdd_apcc_fuse_ret_volt[fuse->vdd_apcc_ret_fuse],
+		    msm8996_vdd_mx_fuse_ret_volt[fuse->vdd_mx_ret_fuse]);
+
+	rc = regulator_set_voltage(vreg->ldo_ret_regulator, vreg->ldo_ret_volt,
+				   INT_MAX);
+	if (rc < 0) {
+		cpr3_err(vreg, "regulator_set_voltage(ldo_ret) == %d failed, rc=%d\n",
+			 vreg->ldo_ret_volt, rc);
+		return rc;
+	}
+
+	/* optional properties, do not error out if missing */
+	of_property_read_u32(node, "qcom,ldo-adjust-voltage",
+			     &vreg->ldo_adjust_volt);
+
+	vreg->ldo_mode_allowed = !of_property_read_bool(node,
+							"qcom,ldo-disable");
+
+	cpr3_info(vreg, "LDO min headroom=%d uV, LDO max headroom=%d uV, LDO adj=%d uV, LDO mode=%s, LDO retention=%d uV\n",
+		  vreg->ldo_min_headroom_volt,
+		  vreg->ldo_max_headroom_volt,
+		  vreg->ldo_adjust_volt,
+		  vreg->ldo_mode_allowed ? "allowed" : "disallowed",
+		  vreg->ldo_ret_volt);
+
+	return 0;
+}
+
+/**
+ * cpr3_hmss_mem_acc_init() - initialize mem-acc regulator data for
+ *		a CPR3 regulator
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * This function loads mem-acc data from device tree to enable
+ * the control of mem-acc settings based upon the CPR3 regulator
+ * output voltage.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_hmss_mem_acc_init(struct cpr3_regulator *vreg)
+{
+	struct cpr3_controller *ctrl = vreg->thread->ctrl;
+	int id = vreg->thread->thread_id;
+	char mem_acc_vreg_name_buf[MAX_VREG_NAME_SIZE];
+	int rc;
+
+	scnprintf(mem_acc_vreg_name_buf, MAX_VREG_NAME_SIZE,
+		  "mem-acc-thread%d-supply", id);
+
+	if (!of_find_property(ctrl->dev->of_node, mem_acc_vreg_name_buf,
+			      NULL)) {
+		cpr3_debug(vreg, "not using memory accelerator regulator\n");
+		return 0;
+	} else if (!of_property_read_bool(vreg->of_node, "qcom,uses-mem-acc")) {
+		return 0;
+	}
+
+	scnprintf(mem_acc_vreg_name_buf, MAX_VREG_NAME_SIZE,
+		  "mem-acc-thread%d", id);
+
+	vreg->mem_acc_regulator = devm_regulator_get(ctrl->dev,
+						     mem_acc_vreg_name_buf);
+	if (IS_ERR(vreg->mem_acc_regulator)) {
+		rc = PTR_ERR(vreg->mem_acc_regulator);
+		if (rc != -EPROBE_DEFER)
+			cpr3_err(vreg, "unable to request %s regulator, rc=%d\n",
+				 mem_acc_vreg_name_buf, rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+/**
+ * cpr3_hmss_init_regulator() - perform all steps necessary to initialize the
+ *		configuration data for a CPR3 regulator
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_hmss_init_regulator(struct cpr3_regulator *vreg)
+{
+	struct cpr3_msm8996_hmss_fuses *fuse;
+	int rc;
+
+	rc = cpr3_msm8996_hmss_read_fuse_data(vreg);
+	if (rc) {
+		cpr3_err(vreg, "unable to read CPR fuse data, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = cpr3_hmss_kvreg_init(vreg);
+	if (rc) {
+		if (rc != -EPROBE_DEFER)
+			cpr3_err(vreg, "unable to initialize Kryo Regulator settings, rc=%d\n",
+				 rc);
+		return rc;
+	}
+
+	rc = cpr3_hmss_mem_acc_init(vreg);
+	if (rc) {
+		if (rc != -EPROBE_DEFER)
+			cpr3_err(vreg, "unable to initialize mem-acc regulator settings, rc=%d\n",
+				 rc);
+		return rc;
+	}
+
+	fuse = vreg->platform_fuses;
+	if (fuse->limitation == MSM8996_CPR_LIMITATION_UNSUPPORTED) {
+		cpr3_err(vreg, "this chip requires an unsupported voltage\n");
+		return -EPERM;
+	} else if (fuse->limitation
+			== MSM8996_CPR_LIMITATION_NO_CPR_OR_INTERPOLATION) {
+		vreg->thread->ctrl->cpr_allowed_hw = false;
+	}
+
+	rc = of_property_read_u32(vreg->of_node, "qcom,cpr-pd-bypass-mask",
+				&vreg->pd_bypass_mask);
+	if (rc) {
+		cpr3_err(vreg, "error reading qcom,cpr-pd-bypass-mask, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = cpr3_hmss_parse_corner_data(vreg);
+	if (rc) {
+		cpr3_err(vreg, "unable to read CPR corner data from device tree, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	if (of_find_property(vreg->of_node, "qcom,cpr-dynamic-floor-corner",
+				NULL)) {
+		rc = cpr3_parse_array_property(vreg,
+			"qcom,cpr-dynamic-floor-corner",
+			1, &vreg->dynamic_floor_corner);
+		if (rc) {
+			cpr3_err(vreg, "error reading qcom,cpr-dynamic-floor-corner, rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		if (vreg->dynamic_floor_corner <= 0) {
+			vreg->uses_dynamic_floor = false;
+		} else if (vreg->dynamic_floor_corner < CPR3_CORNER_OFFSET
+			   || vreg->dynamic_floor_corner
+				> vreg->corner_count - 1 + CPR3_CORNER_OFFSET) {
+			cpr3_err(vreg, "dynamic floor corner=%d not in range [%d, %d]\n",
+				vreg->dynamic_floor_corner, CPR3_CORNER_OFFSET,
+				vreg->corner_count - 1 + CPR3_CORNER_OFFSET);
+			return -EINVAL;
+		}
+
+		vreg->dynamic_floor_corner -= CPR3_CORNER_OFFSET;
+		vreg->uses_dynamic_floor = true;
+	}
+
+	rc = cpr3_msm8996_hmss_calculate_open_loop_voltages(vreg);
+	if (rc) {
+		cpr3_err(vreg, "unable to calculate open-loop voltages, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = cpr3_limit_open_loop_voltages(vreg);
+	if (rc) {
+		cpr3_err(vreg, "unable to limit open-loop voltages, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	cpr3_open_loop_voltage_as_ceiling(vreg);
+
+	rc = cpr3_limit_floor_voltages(vreg);
+	if (rc) {
+		cpr3_err(vreg, "unable to limit floor voltages, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = cpr3_msm8996_hmss_calculate_target_quotients(vreg);
+	if (rc) {
+		cpr3_err(vreg, "unable to calculate target quotients, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = cpr3_msm8996_partial_binning_override(vreg);
+	if (rc) {
+		cpr3_err(vreg, "unable to override voltages and quotients based on partial binning fuse, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	cpr3_hmss_print_settings(vreg);
+
+	return 0;
+}
+
+/**
+ * cpr3_hmss_init_aging() - perform HMSS CPR3 controller specific
+ *		aging initializations
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_hmss_init_aging(struct cpr3_controller *ctrl)
+{
+	struct cpr3_msm8996_hmss_fuses *fuse = NULL;
+	struct cpr3_regulator *vreg = NULL;
+	u32 aging_ro_scale;
+	int i, j, rc;
+
+	for (i = 0; i < ctrl->thread_count; i++) {
+		for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+			if (ctrl->thread[i].vreg[j].aging_allowed) {
+				ctrl->aging_required = true;
+				vreg = &ctrl->thread[i].vreg[j];
+				fuse = vreg->platform_fuses;
+				break;
+			}
+		}
+	}
+
+	if (!ctrl->aging_required || !fuse || !vreg)
+		return 0;
+
+	rc = cpr3_parse_array_property(vreg, "qcom,cpr-aging-ro-scaling-factor",
+					1, &aging_ro_scale);
+	if (rc)
+		return rc;
+
+	if (aging_ro_scale == 0) {
+		cpr3_err(ctrl, "aging RO scaling factor is invalid: %u\n",
+			aging_ro_scale);
+		return -EINVAL;
+	}
+
+	ctrl->aging_vdd_mode = REGULATOR_MODE_NORMAL;
+	ctrl->aging_complete_vdd_mode = REGULATOR_MODE_IDLE;
+
+	ctrl->aging_sensor_count = 1;
+	ctrl->aging_sensor = kzalloc(sizeof(*ctrl->aging_sensor), GFP_KERNEL);
+	if (!ctrl->aging_sensor)
+		return -ENOMEM;
+
+	ctrl->aging_sensor->sensor_id = MSM8996_HMSS_AGING_SENSOR_ID;
+	ctrl->aging_sensor->bypass_mask[0] = MSM8996_HMSS_AGING_BYPASS_MASK0;
+	ctrl->aging_sensor->ro_scale = aging_ro_scale;
+	ctrl->aging_gcnt_scaling_factor
+				= MSM8996_HMSS_AGING_GCNT_SCALING_FACTOR;
+
+	ctrl->aging_sensor->init_quot_diff
+		= cpr3_convert_open_loop_voltage_fuse(0,
+			MSM8996_HMSS_AGING_INIT_QUOT_DIFF_SCALE,
+			fuse->aging_init_quot_diff,
+			MSM8996_HMSS_AGING_INIT_QUOT_DIFF_SIZE);
+
+	cpr3_debug(ctrl, "sensor %u aging init quotient diff = %d, aging RO scale = %u QUOT/V\n",
+		ctrl->aging_sensor->sensor_id,
+		ctrl->aging_sensor->init_quot_diff,
+		ctrl->aging_sensor->ro_scale);
+
+	return 0;
+}
+
+/**
+ * cpr3_hmss_init_controller() - perform HMSS CPR3 controller specific
+ *		initializations
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_hmss_init_controller(struct cpr3_controller *ctrl)
+{
+	int i, rc;
+
+	rc = cpr3_parse_common_ctrl_data(ctrl);
+	if (rc) {
+		if (rc != -EPROBE_DEFER)
+			cpr3_err(ctrl, "unable to parse common controller data, rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	ctrl->vdd_limit_regulator = devm_regulator_get(ctrl->dev, "vdd-limit");
+	if (IS_ERR(ctrl->vdd_limit_regulator)) {
+		rc = PTR_ERR(ctrl->vdd_limit_regulator);
+		if (rc != -EPROBE_DEFER)
+			cpr3_err(ctrl, "unable to request vdd-supply regulator, rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(ctrl->dev->of_node,
+			"qcom,cpr-up-down-delay-time",
+			&ctrl->up_down_delay_time);
+	if (rc) {
+		cpr3_err(ctrl, "error reading property qcom,cpr-up-down-delay-time, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	/* No error check since this is an optional property. */
+	of_property_read_u32(ctrl->dev->of_node,
+			     "qcom,system-supply-max-voltage",
+			     &ctrl->system_supply_max_volt);
+
+	/* No error check since this is an optional property. */
+	of_property_read_u32(ctrl->dev->of_node, "qcom,cpr-clock-throttling",
+			&ctrl->proc_clock_throttle);
+
+	rc = cpr3_apm_init(ctrl);
+	if (rc) {
+		if (rc != -EPROBE_DEFER)
+			cpr3_err(ctrl, "unable to initialize APM settings, rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	ctrl->sensor_count = MSM8996_HMSS_CPR_SENSOR_COUNT;
+
+	ctrl->sensor_owner = devm_kcalloc(ctrl->dev, ctrl->sensor_count,
+		sizeof(*ctrl->sensor_owner), GFP_KERNEL);
+	if (!ctrl->sensor_owner)
+		return -ENOMEM;
+
+	/* Specify sensor ownership */
+	for (i = MSM8996_HMSS_THREAD0_SENSOR_MIN;
+	     i <= MSM8996_HMSS_THREAD0_SENSOR_MAX; i++)
+		ctrl->sensor_owner[i] = 0;
+	for (i = MSM8996_HMSS_THREAD1_SENSOR_MIN;
+	     i <= MSM8996_HMSS_THREAD1_SENSOR_MAX; i++)
+		ctrl->sensor_owner[i] = 1;
+
+	ctrl->cpr_clock_rate = MSM8996_HMSS_CPR_CLOCK_RATE;
+	ctrl->ctrl_type = CPR_CTRL_TYPE_CPR3;
+	ctrl->supports_hw_closed_loop = true;
+	ctrl->use_hw_closed_loop = of_property_read_bool(ctrl->dev->of_node,
+						"qcom,cpr-hw-closed-loop");
+
+	if (ctrl->mem_acc_regulator) {
+		rc = of_property_read_u32(ctrl->dev->of_node,
+					  "qcom,mem-acc-supply-threshold-voltage",
+					  &ctrl->mem_acc_threshold_volt);
+		if (rc) {
+			cpr3_err(ctrl, "error reading property qcom,mem-acc-supply-threshold-voltage, rc=%d\n",
+				 rc);
+			return rc;
+		}
+
+		ctrl->mem_acc_threshold_volt =
+			CPR3_ROUND(ctrl->mem_acc_threshold_volt,
+				   ctrl->step_volt);
+
+		rc = of_property_read_u32_array(ctrl->dev->of_node,
+			"qcom,mem-acc-supply-corner-map",
+			&ctrl->mem_acc_corner_map[CPR3_MEM_ACC_LOW_CORNER],
+			CPR3_MEM_ACC_CORNERS);
+		if (rc) {
+			cpr3_err(ctrl, "error reading qcom,mem-acc-supply-corner-map, rc=%d\n",
+				 rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int cpr3_hmss_regulator_suspend(struct platform_device *pdev,
+				pm_message_t state)
+{
+	struct cpr3_controller *ctrl = platform_get_drvdata(pdev);
+
+	return cpr3_regulator_suspend(ctrl);
+}
+
+static int cpr3_hmss_regulator_resume(struct platform_device *pdev)
+{
+	struct cpr3_controller *ctrl = platform_get_drvdata(pdev);
+
+	return cpr3_regulator_resume(ctrl);
+}
+
+/* Data corresponds to the SoC revision */
+static const struct of_device_id cpr_regulator_match_table[] = {
+	{
+		.compatible = "qcom,cpr3-msm8996-v1-hmss-regulator",
+		.data = (void *)(uintptr_t)1
+	},
+	{
+		.compatible = "qcom,cpr3-msm8996-v2-hmss-regulator",
+		.data = (void *)(uintptr_t)2
+	},
+	{
+		.compatible = "qcom,cpr3-msm8996-v3-hmss-regulator",
+		.data = (void *)(uintptr_t)3
+	},
+	{
+		.compatible = "qcom,cpr3-msm8996-hmss-regulator",
+		.data = (void *)(uintptr_t)3
+	},
+	{
+		.compatible = "qcom,cpr3-msm8996pro-hmss-regulator",
+		.data = (void *)(uintptr_t)MSM8996PRO_SOC_ID,
+	},
+	{}
+};
+
+static int cpr3_hmss_regulator_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	const struct of_device_id *match;
+	struct cpr3_controller *ctrl;
+	struct cpr3_regulator *vreg = NULL;
+	int i, j, rc;
+
+	if (!dev->of_node) {
+		dev_err(dev, "Device tree node is missing\n");
+		return -EINVAL;
+	}
+
+	ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+	if (!ctrl)
+		return -ENOMEM;
+
+	ctrl->dev = dev;
+	/* Set to false later if anything precludes CPR operation. */
+	ctrl->cpr_allowed_hw = true;
+
+	rc = of_property_read_string(dev->of_node, "qcom,cpr-ctrl-name",
+					&ctrl->name);
+	if (rc) {
+		cpr3_err(ctrl, "unable to read qcom,cpr-ctrl-name, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	match = of_match_node(cpr_regulator_match_table, dev->of_node);
+	if (match)
+		ctrl->soc_revision = (uintptr_t)match->data;
+	else
+		cpr3_err(ctrl, "could not find compatible string match\n");
+
+	rc = cpr3_map_fuse_base(ctrl, pdev);
+	if (rc) {
+		cpr3_err(ctrl, "could not map fuse base address\n");
+		return rc;
+	}
+
+	rc = cpr3_allocate_threads(ctrl, MSM8996_HMSS_POWER_CLUSTER_THREAD_ID,
+		MSM8996_HMSS_PERFORMANCE_CLUSTER_THREAD_ID);
+	if (rc) {
+		cpr3_err(ctrl, "failed to allocate CPR thread array, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	if (ctrl->thread_count < 1) {
+		cpr3_err(ctrl, "thread nodes are missing\n");
+		return -EINVAL;
+	}
+
+	rc = cpr3_hmss_init_controller(ctrl);
+	if (rc) {
+		if (rc != -EPROBE_DEFER)
+			cpr3_err(ctrl, "failed to initialize CPR controller parameters, rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	for (i = 0; i < ctrl->thread_count; i++) {
+		rc = cpr3_hmss_init_thread(&ctrl->thread[i]);
+		if (rc) {
+			cpr3_err(ctrl, "thread %u initialization failed, rc=%d\n",
+				ctrl->thread[i].thread_id, rc);
+			return rc;
+		}
+
+		for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+			vreg = &ctrl->thread[i].vreg[j];
+
+			rc = cpr3_hmss_init_regulator(vreg);
+			if (rc) {
+				cpr3_err(vreg, "regulator initialization failed, rc=%d\n",
+					rc);
+				return rc;
+			}
+		}
+	}
+
+	rc = cpr3_hmss_init_aging(ctrl);
+	if (rc) {
+		cpr3_err(ctrl, "failed to initialize aging configurations, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	platform_set_drvdata(pdev, ctrl);
+
+	return cpr3_regulator_register(pdev, ctrl);
+}
+
+static int cpr3_hmss_regulator_remove(struct platform_device *pdev)
+{
+	struct cpr3_controller *ctrl = platform_get_drvdata(pdev);
+
+	return cpr3_regulator_unregister(ctrl);
+}
+
+static struct platform_driver cpr3_hmss_regulator_driver = {
+	.driver		= {
+		.name		= "qcom,cpr3-hmss-regulator",
+		.of_match_table	= cpr_regulator_match_table,
+		.owner		= THIS_MODULE,
+	},
+	.probe		= cpr3_hmss_regulator_probe,
+	.remove		= cpr3_hmss_regulator_remove,
+	.suspend	= cpr3_hmss_regulator_suspend,
+	.resume		= cpr3_hmss_regulator_resume,
+};
+
+static int cpr_regulator_init(void)
+{
+	return platform_driver_register(&cpr3_hmss_regulator_driver);
+}
+
+static void cpr_regulator_exit(void)
+{
+	platform_driver_unregister(&cpr3_hmss_regulator_driver);
+}
+
+MODULE_DESCRIPTION("CPR3 HMSS regulator driver");
+MODULE_LICENSE("GPL v2");
+
+arch_initcall(cpr_regulator_init);
+module_exit(cpr_regulator_exit);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/regulator/cpr3-mmss-regulator.c	2019-10-29 09:26:24.645212984 +0100
@@ -0,0 +1,1257 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+#include "cpr3-regulator.h"
+
+#define MSM8996_MMSS_FUSE_CORNERS	4
+
+/**
+ * struct cpr3_msm8996_mmss_fuses - MMSS specific fuse data for MSM8996
+ * @init_voltage:	Initial (i.e. open-loop) voltage fuse parameter value
+ *			for each fuse corner (raw, not converted to a voltage)
+ * @offset_voltage:	The closed-loop voltage margin adjustment fuse parameter
+ *			value for each fuse corner (raw, not converted to a
+ *			voltage)
+ * @speed_bin:		Graphics processor speed bin fuse parameter value for
+ *			the given chip
+ * @cpr_fusing_rev:	CPR fusing revision fuse parameter value
+ * @limitation:		CPR limitation select fuse parameter value
+ * @aging_init_quot_diff:	Initial quotient difference between CPR aging
+ *			min and max sensors measured at time of manufacturing
+ * @force_highest_corner:	Flag indicating that all corners must operate
+ *			at the voltage of the highest corner.  This is
+ *			applicable to MSM8998 only.
+ *
+ * This struct holds the values for all of the fuses read from memory.
+ */
+struct cpr3_msm8996_mmss_fuses {
+	u64	init_voltage[MSM8996_MMSS_FUSE_CORNERS];
+	u64	offset_voltage[MSM8996_MMSS_FUSE_CORNERS];
+	u64	speed_bin;
+	u64	cpr_fusing_rev;
+	u64	limitation;
+	u64	aging_init_quot_diff;
+	u64	force_highest_corner;
+};
+
+/* Fuse combos 0 -  7 map to CPR fusing revision 0 - 7 */
+#define CPR3_MSM8996_MMSS_FUSE_COMBO_COUNT	8
+
+/*
+ * Fuse combos 0 -  7 map to CPR fusing revision 0 - 7 with speed bin fuse = 0.
+ * Fuse combos 8 - 15 map to CPR fusing revision 0 - 7 with speed bin fuse = 1.
+ * Fuse combos 16 - 23 map to CPR fusing revision 0 - 7 with speed bin fuse = 2.
+ */
+#define CPR3_MSM8996PRO_MMSS_FUSE_COMBO_COUNT	24
+
+/* Fuse combos 0 -  7 map to CPR fusing revision 0 - 7 */
+#define CPR3_MSM8998_MMSS_FUSE_COMBO_COUNT	8
+
+/*
+ * MSM8996 MMSS fuse parameter locations:
+ *
+ * Structs are organized with the following dimensions:
+ *	Outer: 0 to 3 for fuse corners from lowest to highest corner
+ *	Inner: large enough to hold the longest set of parameter segments which
+ *		fully defines a fuse parameter, +1 (for NULL termination).
+ *		Each segment corresponds to a contiguous group of bits from a
+ *		single fuse row.  These segments are concatentated together in
+ *		order to form the full fuse parameter value.  The segments for
+ *		a given parameter may correspond to different fuse rows.
+ */
+static const struct cpr3_fuse_param
+msm8996_mmss_init_voltage_param[MSM8996_MMSS_FUSE_CORNERS][2] = {
+	{{63, 55, 59}, {} },
+	{{63, 50, 54}, {} },
+	{{63, 45, 49}, {} },
+	{{63, 40, 44}, {} },
+};
+
+static const struct cpr3_fuse_param msm8996_cpr_fusing_rev_param[] = {
+	{39, 48, 50},
+	{},
+};
+
+static const struct cpr3_fuse_param msm8996_cpr_limitation_param[] = {
+	{41, 31, 32},
+	{},
+};
+
+static const struct cpr3_fuse_param
+msm8996_mmss_aging_init_quot_diff_param[] = {
+	{68, 26, 31},
+	{},
+};
+
+/* Offset voltages are defined for SVS and Turbo fuse corners only */
+static const struct cpr3_fuse_param
+msm8996_mmss_offset_voltage_param[MSM8996_MMSS_FUSE_CORNERS][2] = {
+	{{} },
+	{{66, 42, 44}, {} },
+	{{} },
+	{{64, 58, 61}, {} },
+};
+
+static const struct cpr3_fuse_param msm8996pro_mmss_speed_bin_param[] = {
+	{39, 60, 61},
+	{},
+};
+
+/* MSM8998 MMSS fuse parameter locations: */
+static const struct cpr3_fuse_param
+msm8998_mmss_init_voltage_param[MSM8996_MMSS_FUSE_CORNERS][2] = {
+	{{65, 39, 43}, {} },
+	{{65, 34, 38}, {} },
+	{{65, 29, 33}, {} },
+	{{65, 24, 28}, {} },
+};
+
+static const struct cpr3_fuse_param msm8998_cpr_fusing_rev_param[] = {
+	{39, 48, 50},
+	{},
+};
+
+static const struct cpr3_fuse_param msm8998_cpr_limitation_param[] = {
+	{41, 46, 47},
+	{},
+};
+
+static const struct cpr3_fuse_param
+msm8998_mmss_aging_init_quot_diff_param[] = {
+	{65, 60, 63},
+	{66, 0, 3},
+	{},
+};
+
+static const struct cpr3_fuse_param
+msm8998_mmss_offset_voltage_param[MSM8996_MMSS_FUSE_CORNERS][2] = {
+	{{65, 56, 59}, {} },
+	{{65, 52, 55}, {} },
+	{{65, 48, 51}, {} },
+	{{65, 44, 47}, {} },
+};
+
+static const struct cpr3_fuse_param
+msm8998_cpr_force_highest_corner_param[] = {
+	{100, 45, 45},
+	{},
+};
+
+#define MSM8996PRO_SOC_ID			4
+#define MSM8998_V1_SOC_ID			5
+#define MSM8998_V2_SOC_ID			6
+
+/*
+ * Some initial msm8996 parts cannot be used in a meaningful way by software.
+ * Other parts can only be used when operating with CPR disabled (i.e. at the
+ * fused open-loop voltage) when no voltage interpolation is applied.  A fuse
+ * parameter is provided so that software can properly handle these limitations.
+ */
+enum msm8996_cpr_limitation {
+	MSM8996_CPR_LIMITATION_NONE = 0,
+	MSM8996_CPR_LIMITATION_UNSUPPORTED = 2,
+	MSM8996_CPR_LIMITATION_NO_CPR_OR_INTERPOLATION = 3,
+};
+
+/* Additional MSM8996 specific data: */
+
+/* Open loop voltage fuse reference voltages in microvolts */
+static const int msm8996_mmss_fuse_ref_volt[MSM8996_MMSS_FUSE_CORNERS] = {
+	670000,
+	745000,
+	905000,
+	1015000,
+};
+
+static const int msm8996pro_mmss_fuse_ref_volt[MSM8996_MMSS_FUSE_CORNERS] = {
+	670000,
+	745000,
+	905000,
+	1065000,
+};
+
+static const int msm8998_v1_mmss_fuse_ref_volt[MSM8996_MMSS_FUSE_CORNERS] = {
+	528000,
+	656000,
+	812000,
+	932000,
+};
+
+static const int
+msm8998_v1_rev0_mmss_fuse_ref_volt[MSM8996_MMSS_FUSE_CORNERS] = {
+	632000,
+	768000,
+	896000,
+	1032000,
+};
+
+static const int msm8998_v2_mmss_fuse_ref_volt[MSM8996_MMSS_FUSE_CORNERS] = {
+	516000,
+	628000,
+	752000,
+	924000,
+};
+
+static const int
+msm8998_v2_rev0_mmss_fuse_ref_volt[MSM8996_MMSS_FUSE_CORNERS] = {
+	616000,
+	740000,
+	828000,
+	1024000,
+};
+
+#define MSM8996_MMSS_FUSE_STEP_VOLT		10000
+#define MSM8996_MMSS_OFFSET_FUSE_STEP_VOLT	10000
+#define MSM8996_MMSS_VOLTAGE_FUSE_SIZE		5
+#define MSM8996_MMSS_MIN_VOLTAGE_FUSE_VAL	0x1F
+#define MSM8996_MMSS_AGING_INIT_QUOT_DIFF_SCALE	2
+#define MSM8996_MMSS_AGING_INIT_QUOT_DIFF_SIZE	6
+
+#define MSM8996_MMSS_CPR_SENSOR_COUNT		35
+
+#define MSM8996_MMSS_CPR_CLOCK_RATE		19200000
+
+#define MSM8996_MMSS_AGING_SENSOR_ID		29
+#define MSM8996_MMSS_AGING_BYPASS_MASK0		(GENMASK(23, 0))
+
+/* Use scaled gate count (GCNT) for aging measurements */
+#define MSM8996_MMSS_AGING_GCNT_SCALING_FACTOR	1500
+
+#define MSM8998_MMSS_AGING_INIT_QUOT_DIFF_SCALE	1
+#define MSM8998_MMSS_AGING_INIT_QUOT_DIFF_SIZE	8
+
+#define MSM8998_MMSS_CPR_SENSOR_COUNT			35
+
+#define MSM8998_MMSS_AGING_SENSOR_ID			29
+#define MSM8998_MMSS_AGING_BYPASS_MASK0		(GENMASK(23, 0))
+
+#define MSM8998_MMSS_MAX_TEMP_POINTS			3
+#define MSM8998_MMSS_TEMP_SENSOR_ID_START		12
+#define MSM8998_MMSS_TEMP_SENSOR_ID_END		13
+
+/*
+ * Some initial msm8998 parts cannot be operated at low voltages.  The
+ * open-loop voltage fuses are reused to identify these parts so that software
+ * can properly handle the limitation.  0xF means that the next higher fuse
+ * corner should be used.  0xE means that the next higher fuse corner which
+ * does not have a voltage limitation should be used.
+ */
+enum msm8998_cpr_partial_binning {
+	MSM8998_CPR_PARTIAL_BINNING_NEXT_CORNER = 0xF,
+	MSM8998_CPR_PARTIAL_BINNING_SAFE_CORNER = 0xE,
+};
+
+/*
+ * The partial binning open-loop voltage fuse values only apply to the lowest
+ * two fuse corners (0 and 1, i.e. MinSVS and SVS).
+ */
+#define MSM8998_CPR_PARTIAL_BINNING_MAX_FUSE_CORNER	1
+
+static inline bool cpr3_ctrl_is_msm8998(const struct cpr3_controller *ctrl)
+{
+	return ctrl->soc_revision == MSM8998_V1_SOC_ID ||
+		ctrl->soc_revision == MSM8998_V2_SOC_ID;
+}
+
+/**
+ * cpr3_msm8996_mmss_read_fuse_data() - load MMSS specific fuse parameter values
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * This function allocates a cpr3_msm8996_mmss_fuses struct, fills it with
+ * values read out of hardware fuses, and finally copies common fuse values
+ * into the regulator struct.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_msm8996_mmss_read_fuse_data(struct cpr3_regulator *vreg)
+{
+	void __iomem *base = vreg->thread->ctrl->fuse_base;
+	struct cpr3_msm8996_mmss_fuses *fuse;
+	int i, rc, combo_max;
+
+	fuse = devm_kzalloc(vreg->thread->ctrl->dev, sizeof(*fuse), GFP_KERNEL);
+	if (!fuse)
+		return -ENOMEM;
+
+	if (vreg->thread->ctrl->soc_revision == MSM8996PRO_SOC_ID) {
+		rc = cpr3_read_fuse_param(base, msm8996pro_mmss_speed_bin_param,
+					&fuse->speed_bin);
+		if (rc) {
+			cpr3_err(vreg, "Unable to read speed bin fuse, rc=%d\n",
+				rc);
+			return rc;
+		}
+		cpr3_info(vreg, "speed bin = %llu\n", fuse->speed_bin);
+	}
+
+	rc = cpr3_read_fuse_param(base,
+			cpr3_ctrl_is_msm8998(vreg->thread->ctrl)
+				? msm8998_cpr_fusing_rev_param
+				: msm8996_cpr_fusing_rev_param,
+			&fuse->cpr_fusing_rev);
+	if (rc) {
+		cpr3_err(vreg, "Unable to read CPR fusing revision fuse, rc=%d\n",
+			rc);
+		return rc;
+	}
+	cpr3_info(vreg, "CPR fusing revision = %llu\n", fuse->cpr_fusing_rev);
+
+	rc = cpr3_read_fuse_param(base,
+			cpr3_ctrl_is_msm8998(vreg->thread->ctrl)
+				? msm8998_cpr_limitation_param
+				: msm8996_cpr_limitation_param,
+			&fuse->limitation);
+	if (rc) {
+		cpr3_err(vreg, "Unable to read CPR limitation fuse, rc=%d\n",
+			rc);
+		return rc;
+	}
+	cpr3_info(vreg, "CPR limitation = %s\n",
+		fuse->limitation == MSM8996_CPR_LIMITATION_UNSUPPORTED
+		? "unsupported chip" : fuse->limitation
+			  == MSM8996_CPR_LIMITATION_NO_CPR_OR_INTERPOLATION
+		? "CPR disabled and no interpolation" : "none");
+
+	rc = cpr3_read_fuse_param(base,
+			cpr3_ctrl_is_msm8998(vreg->thread->ctrl)
+				? msm8998_mmss_aging_init_quot_diff_param
+				: msm8996_mmss_aging_init_quot_diff_param,
+			&fuse->aging_init_quot_diff);
+	if (rc) {
+		cpr3_err(vreg, "Unable to read aging initial quotient difference fuse, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	for (i = 0; i < MSM8996_MMSS_FUSE_CORNERS; i++) {
+		rc = cpr3_read_fuse_param(base,
+			cpr3_ctrl_is_msm8998(vreg->thread->ctrl)
+				? msm8998_mmss_init_voltage_param[i]
+				: msm8996_mmss_init_voltage_param[i],
+			&fuse->init_voltage[i]);
+		if (rc) {
+			cpr3_err(vreg, "Unable to read fuse-corner %d initial voltage fuse, rc=%d\n",
+				i, rc);
+			return rc;
+		}
+
+		rc = cpr3_read_fuse_param(base,
+			cpr3_ctrl_is_msm8998(vreg->thread->ctrl)
+				? msm8998_mmss_offset_voltage_param[i]
+				: msm8996_mmss_offset_voltage_param[i],
+			&fuse->offset_voltage[i]);
+		if (rc) {
+			cpr3_err(vreg, "Unable to read fuse-corner %d offset voltage fuse, rc=%d\n",
+				i, rc);
+			return rc;
+		}
+	}
+
+	if (cpr3_ctrl_is_msm8998(vreg->thread->ctrl)) {
+		rc = cpr3_read_fuse_param(base,
+			msm8998_cpr_force_highest_corner_param,
+			&fuse->force_highest_corner);
+		if (rc) {
+			cpr3_err(vreg, "Unable to read CPR force highest corner fuse, rc=%d\n",
+				rc);
+			return rc;
+		}
+		if (fuse->force_highest_corner)
+			cpr3_info(vreg, "Fusing requires all operation at the highest corner\n");
+	}
+
+	if (cpr3_ctrl_is_msm8998(vreg->thread->ctrl)) {
+		combo_max = CPR3_MSM8998_MMSS_FUSE_COMBO_COUNT;
+		vreg->fuse_combo = fuse->cpr_fusing_rev;
+	} else if (vreg->thread->ctrl->soc_revision == MSM8996PRO_SOC_ID) {
+		combo_max = CPR3_MSM8996PRO_MMSS_FUSE_COMBO_COUNT;
+		vreg->fuse_combo = fuse->cpr_fusing_rev + 8 * fuse->speed_bin;
+	} else {
+		combo_max = CPR3_MSM8996_MMSS_FUSE_COMBO_COUNT;
+		vreg->fuse_combo = fuse->cpr_fusing_rev;
+	}
+
+	if (vreg->fuse_combo >= combo_max) {
+		cpr3_err(vreg, "invalid CPR fuse combo = %d found, not in range 0 - %d\n",
+			vreg->fuse_combo, combo_max - 1);
+		return -EINVAL;
+	}
+
+	vreg->speed_bin_fuse	= fuse->speed_bin;
+	vreg->cpr_rev_fuse	= fuse->cpr_fusing_rev;
+	vreg->fuse_corner_count	= MSM8996_MMSS_FUSE_CORNERS;
+	vreg->platform_fuses	= fuse;
+
+	return 0;
+}
+
+/**
+ * cpr3_mmss_parse_corner_data() - parse MMSS corner data from device tree
+ *		properties of the regulator's device node
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_mmss_parse_corner_data(struct cpr3_regulator *vreg)
+{
+	int i, rc;
+	u32 *temp;
+
+	rc = cpr3_parse_common_corner_data(vreg);
+	if (rc) {
+		cpr3_err(vreg, "error reading corner data, rc=%d\n", rc);
+		return rc;
+	}
+
+	temp = kcalloc(vreg->corner_count * CPR3_RO_COUNT, sizeof(*temp),
+			GFP_KERNEL);
+	if (!temp)
+		return -ENOMEM;
+
+	rc = cpr3_parse_corner_array_property(vreg, "qcom,cpr-target-quotients",
+			CPR3_RO_COUNT, temp);
+	if (rc) {
+		cpr3_err(vreg, "could not load target quotients, rc=%d\n", rc);
+		goto done;
+	}
+
+	for (i = 0; i < vreg->corner_count; i++)
+		memcpy(vreg->corner[i].target_quot, &temp[i * CPR3_RO_COUNT],
+			sizeof(*temp) * CPR3_RO_COUNT);
+
+done:
+	kfree(temp);
+	return rc;
+}
+
+/**
+ * cpr3_msm8996_mmss_adjust_target_quotients() - adjust the target quotients
+ *		for each corner according to device tree values and fuse values
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_msm8996_mmss_adjust_target_quotients(
+			struct cpr3_regulator *vreg)
+{
+	struct cpr3_msm8996_mmss_fuses *fuse = vreg->platform_fuses;
+	const struct cpr3_fuse_param (*offset_param)[2];
+	int *volt_offset;
+	int i, fuse_len, rc = 0;
+
+	volt_offset = kcalloc(vreg->fuse_corner_count, sizeof(*volt_offset),
+				GFP_KERNEL);
+	if (!volt_offset)
+		return -ENOMEM;
+
+	offset_param = cpr3_ctrl_is_msm8998(vreg->thread->ctrl)
+			? msm8998_mmss_offset_voltage_param
+			: msm8996_mmss_offset_voltage_param;
+	for (i = 0; i < vreg->fuse_corner_count; i++) {
+		fuse_len = offset_param[i][0].bit_end + 1
+			   - offset_param[i][0].bit_start;
+		volt_offset[i] = cpr3_convert_open_loop_voltage_fuse(
+			0, MSM8996_MMSS_OFFSET_FUSE_STEP_VOLT,
+			fuse->offset_voltage[i], fuse_len);
+		if (volt_offset[i])
+			cpr3_info(vreg, "fuse_corner[%d] offset=%7d uV\n",
+				i, volt_offset[i]);
+	}
+
+	rc = cpr3_adjust_target_quotients(vreg, volt_offset);
+	if (rc)
+		cpr3_err(vreg, "adjust target quotients failed, rc=%d\n", rc);
+
+	kfree(volt_offset);
+	return rc;
+}
+
+/**
+ * cpr3_msm8996_mmss_calculate_open_loop_voltages() - calculate the open-loop
+ *		voltage for each corner of a CPR3 regulator
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * If open-loop voltage interpolation is allowed in both device tree and in
+ * hardware fuses, then this function calculates the open-loop voltage for a
+ * given corner using linear interpolation.  This interpolation is performed
+ * using the processor frequencies of the lower and higher Fmax corners along
+ * with their fused open-loop voltages.
+ *
+ * If open-loop voltage interpolation is not allowed, then this function uses
+ * the Fmax fused open-loop voltage for all of the corners associated with a
+ * given fuse corner.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_msm8996_mmss_calculate_open_loop_voltages(
+			struct cpr3_regulator *vreg)
+{
+	struct device_node *node = vreg->of_node;
+	struct cpr3_msm8996_mmss_fuses *fuse = vreg->platform_fuses;
+	bool is_msm8998 = cpr3_ctrl_is_msm8998(vreg->thread->ctrl);
+	int rc = 0;
+	bool allow_interpolation;
+	u64 freq_low, volt_low, freq_high, volt_high, volt_init;
+	int i, j;
+	const int *ref_volt;
+	int *fuse_volt;
+	int *fmax_corner;
+
+	fuse_volt = kcalloc(vreg->fuse_corner_count, sizeof(*fuse_volt),
+				GFP_KERNEL);
+	fmax_corner = kcalloc(vreg->fuse_corner_count, sizeof(*fmax_corner),
+				GFP_KERNEL);
+	if (!fuse_volt || !fmax_corner) {
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	if (vreg->thread->ctrl->soc_revision == MSM8998_V2_SOC_ID
+	    && fuse->cpr_fusing_rev == 0)
+		ref_volt = msm8998_v2_rev0_mmss_fuse_ref_volt;
+	else if (vreg->thread->ctrl->soc_revision == MSM8998_V2_SOC_ID)
+		ref_volt = msm8998_v2_mmss_fuse_ref_volt;
+	else if (vreg->thread->ctrl->soc_revision == MSM8998_V1_SOC_ID
+	    && fuse->cpr_fusing_rev == 0)
+		ref_volt = msm8998_v1_rev0_mmss_fuse_ref_volt;
+	else if (vreg->thread->ctrl->soc_revision == MSM8998_V1_SOC_ID)
+		ref_volt = msm8998_v1_mmss_fuse_ref_volt;
+	else if (vreg->thread->ctrl->soc_revision == MSM8996PRO_SOC_ID)
+		ref_volt = msm8996pro_mmss_fuse_ref_volt;
+	else
+		ref_volt = msm8996_mmss_fuse_ref_volt;
+
+	for (i = 0; i < vreg->fuse_corner_count; i++) {
+		volt_init = fuse->init_voltage[i];
+		/*
+		 * Handle partial binning on MSM8998 where the initial voltage
+		 * fuse is reused as a flag for partial binning needs.  Set the
+		 * open-loop voltage to the minimum possible value so that it
+		 * does not result in higher fuse corners getting forced to
+		 * higher open-loop voltages after monotonicity enforcement.
+		 */
+		if (is_msm8998 &&
+		    (volt_init == MSM8998_CPR_PARTIAL_BINNING_NEXT_CORNER ||
+		     volt_init == MSM8998_CPR_PARTIAL_BINNING_SAFE_CORNER) &&
+		    i <= MSM8998_CPR_PARTIAL_BINNING_MAX_FUSE_CORNER)
+			volt_init = MSM8996_MMSS_MIN_VOLTAGE_FUSE_VAL;
+
+		fuse_volt[i] = cpr3_convert_open_loop_voltage_fuse(ref_volt[i],
+			MSM8996_MMSS_FUSE_STEP_VOLT, volt_init,
+			MSM8996_MMSS_VOLTAGE_FUSE_SIZE);
+		cpr3_info(vreg, "fuse_corner[%d] open-loop=%7d uV\n",
+			i, fuse_volt[i]);
+	}
+
+	rc = cpr3_adjust_fused_open_loop_voltages(vreg, fuse_volt);
+	if (rc) {
+		cpr3_err(vreg, "fused open-loop voltage adjustment failed, rc=%d\n",
+			rc);
+		goto done;
+	}
+
+	allow_interpolation = of_property_read_bool(node,
+				"qcom,allow-voltage-interpolation");
+
+	for (i = 1; i < vreg->fuse_corner_count; i++) {
+		if (fuse_volt[i] < fuse_volt[i - 1]) {
+			cpr3_debug(vreg, "fuse corner %d voltage=%d uV < fuse corner %d voltage=%d uV; overriding: fuse corner %d voltage=%d\n",
+				i, fuse_volt[i], i - 1, fuse_volt[i - 1],
+				i, fuse_volt[i - 1]);
+			fuse_volt[i] = fuse_volt[i - 1];
+		}
+	}
+
+	if (fuse->limitation == MSM8996_CPR_LIMITATION_NO_CPR_OR_INTERPOLATION)
+		allow_interpolation = false;
+
+	if (!allow_interpolation) {
+		/* Use fused open-loop voltage for lower frequencies. */
+		for (i = 0; i < vreg->corner_count; i++)
+			vreg->corner[i].open_loop_volt
+				= fuse_volt[vreg->corner[i].cpr_fuse_corner];
+		goto done;
+	}
+
+	/* Determine highest corner mapped to each fuse corner */
+	j = vreg->fuse_corner_count - 1;
+	for (i = vreg->corner_count - 1; i >= 0; i--) {
+		if (vreg->corner[i].cpr_fuse_corner == j) {
+			fmax_corner[j] = i;
+			j--;
+		}
+	}
+	if (j >= 0) {
+		cpr3_err(vreg, "invalid fuse corner mapping\n");
+		rc = -EINVAL;
+		goto done;
+	}
+
+	/*
+	 * Interpolation is not possible for corners mapped to the lowest fuse
+	 * corner so use the fuse corner value directly.
+	 */
+	for (i = 0; i <= fmax_corner[0]; i++)
+		vreg->corner[i].open_loop_volt = fuse_volt[0];
+
+	/* Interpolate voltages for the higher fuse corners. */
+	for (i = 1; i < vreg->fuse_corner_count; i++) {
+		freq_low = vreg->corner[fmax_corner[i - 1]].proc_freq;
+		volt_low = fuse_volt[i - 1];
+		freq_high = vreg->corner[fmax_corner[i]].proc_freq;
+		volt_high = fuse_volt[i];
+
+		for (j = fmax_corner[i - 1] + 1; j <= fmax_corner[i]; j++)
+			vreg->corner[j].open_loop_volt = cpr3_interpolate(
+				freq_low, volt_low, freq_high, volt_high,
+				vreg->corner[j].proc_freq);
+	}
+
+done:
+	if (rc == 0) {
+		cpr3_debug(vreg, "unadjusted per-corner open-loop voltages:\n");
+		for (i = 0; i < vreg->corner_count; i++)
+			cpr3_debug(vreg, "open-loop[%2d] = %d uV\n", i,
+				vreg->corner[i].open_loop_volt);
+
+		rc = cpr3_adjust_open_loop_voltages(vreg);
+		if (rc)
+			cpr3_err(vreg, "open-loop voltage adjustment failed, rc=%d\n",
+				rc);
+	}
+
+	kfree(fuse_volt);
+	kfree(fmax_corner);
+	return rc;
+}
+
+/**
+ * cpr3_msm8998_partial_binning_override() - override the voltage and quotient
+ *		settings for low corners based upon the special partial binning
+ *		open-loop voltage fuse values
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * Some parts are not able to operate at low voltages.  The partial binning
+ * open-loop voltage fuse values specify if a given part has such limitations.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_msm8998_partial_binning_override(struct cpr3_regulator *vreg)
+{
+	struct cpr3_msm8996_mmss_fuses *fuse = vreg->platform_fuses;
+	u64 next = MSM8998_CPR_PARTIAL_BINNING_NEXT_CORNER;
+	u64 safe = MSM8998_CPR_PARTIAL_BINNING_SAFE_CORNER;
+	u32 proc_freq;
+	struct cpr3_corner *corner;
+	struct cpr3_corner *safe_corner;
+	int i, j, low, high, safe_fuse_corner, max_fuse_corner;
+
+	if (!cpr3_ctrl_is_msm8998(vreg->thread->ctrl))
+		return 0;
+
+	/* Handle the force highest corner fuse. */
+	if (fuse->force_highest_corner) {
+		cpr3_info(vreg, "overriding CPR parameters for corners 0 to %d with quotients and voltages of corner %d\n",
+			vreg->corner_count - 2, vreg->corner_count - 1);
+		corner = &vreg->corner[vreg->corner_count - 1];
+		for (i = 0; i < vreg->corner_count - 1; i++) {
+			proc_freq = vreg->corner[i].proc_freq;
+			vreg->corner[i] = *corner;
+			vreg->corner[i].proc_freq = proc_freq;
+		}
+
+		/*
+		 * Return since the potential partial binning fuse values are
+		 * superceded by the force highest corner fuse value.
+		 */
+		return 0;
+	}
+
+	/*
+	 * Allow up to the max corner which can be fused with partial
+	 * binning values.
+	 */
+	max_fuse_corner = min(MSM8998_CPR_PARTIAL_BINNING_MAX_FUSE_CORNER,
+				vreg->fuse_corner_count - 2);
+
+	for (i = 0; i <= max_fuse_corner; i++) {
+		/* Determine which higher corners to override with (if any). */
+		if (fuse->init_voltage[i] != next
+		    && fuse->init_voltage[i] != safe)
+			continue;
+
+		for (j = i + 1; j <= max_fuse_corner; j++)
+			if (fuse->init_voltage[j] != next
+			    && fuse->init_voltage[j] != safe)
+				break;
+		safe_fuse_corner = j;
+
+		j = fuse->init_voltage[i] == next ? i + 1 : safe_fuse_corner;
+
+		low = i > 0 ? vreg->fuse_corner_map[i] : 0;
+		high = vreg->fuse_corner_map[i + 1] - 1;
+
+		cpr3_info(vreg, "overriding CPR parameters for corners %d to %d with quotients of corner %d and voltages of corner %d\n",
+			low, high, vreg->fuse_corner_map[j],
+			vreg->fuse_corner_map[safe_fuse_corner]);
+
+		corner = &vreg->corner[vreg->fuse_corner_map[j]];
+		safe_corner
+		       = &vreg->corner[vreg->fuse_corner_map[safe_fuse_corner]];
+
+		for (j = low; j <= high; j++) {
+			proc_freq = vreg->corner[j].proc_freq;
+			vreg->corner[j] = *corner;
+			vreg->corner[j].proc_freq = proc_freq;
+
+			vreg->corner[j].floor_volt
+				= safe_corner->floor_volt;
+			vreg->corner[j].ceiling_volt
+				= safe_corner->ceiling_volt;
+			vreg->corner[j].open_loop_volt
+				= safe_corner->open_loop_volt;
+			vreg->corner[j].abs_ceiling_volt
+				= safe_corner->abs_ceiling_volt;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * cpr3_mmss_print_settings() - print out MMSS CPR configuration settings into
+ *		the kernel log for debugging purposes
+ * @vreg:		Pointer to the CPR3 regulator
+ */
+static void cpr3_mmss_print_settings(struct cpr3_regulator *vreg)
+{
+	struct cpr3_corner *corner;
+	int i;
+
+	cpr3_debug(vreg, "Corner: Frequency (Hz), Fuse Corner, Floor (uV), Open-Loop (uV), Ceiling (uV)\n");
+	for (i = 0; i < vreg->corner_count; i++) {
+		corner = &vreg->corner[i];
+		cpr3_debug(vreg, "%3d: %10u, %2d, %7d, %7d, %7d\n",
+			i, corner->proc_freq, corner->cpr_fuse_corner,
+			corner->floor_volt, corner->open_loop_volt,
+			corner->ceiling_volt);
+	}
+}
+
+/**
+ * cpr3_mmss_init_aging() - perform MMSS CPR3 controller specific
+ *		aging initializations
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_mmss_init_aging(struct cpr3_controller *ctrl)
+{
+	struct cpr3_msm8996_mmss_fuses *fuse;
+	struct cpr3_regulator *vreg;
+	u32 aging_ro_scale;
+	int rc;
+
+	vreg = &ctrl->thread[0].vreg[0];
+
+	ctrl->aging_required = vreg->aging_allowed;
+	fuse = vreg->platform_fuses;
+
+	if (!ctrl->aging_required || !fuse)
+		return 0;
+
+	rc = cpr3_parse_array_property(vreg, "qcom,cpr-aging-ro-scaling-factor",
+			1, &aging_ro_scale);
+	if (rc)
+		return rc;
+
+	if (aging_ro_scale == 0) {
+		cpr3_err(ctrl, "aging RO scaling factor is invalid: %u\n",
+			aging_ro_scale);
+		return -EINVAL;
+	}
+
+	ctrl->aging_vdd_mode = REGULATOR_MODE_NORMAL;
+	ctrl->aging_complete_vdd_mode = REGULATOR_MODE_IDLE;
+
+	ctrl->aging_sensor_count = 1;
+	ctrl->aging_sensor = kzalloc(sizeof(*ctrl->aging_sensor), GFP_KERNEL);
+	if (!ctrl->aging_sensor)
+		return -ENOMEM;
+
+	ctrl->aging_sensor->ro_scale = aging_ro_scale;
+	ctrl->aging_gcnt_scaling_factor
+				= MSM8996_MMSS_AGING_GCNT_SCALING_FACTOR;
+
+	if (cpr3_ctrl_is_msm8998(ctrl)) {
+		ctrl->aging_sensor->sensor_id = MSM8998_MMSS_AGING_SENSOR_ID;
+		ctrl->aging_sensor->bypass_mask[0]
+					= MSM8998_MMSS_AGING_BYPASS_MASK0;
+		ctrl->aging_sensor->init_quot_diff
+			= cpr3_convert_open_loop_voltage_fuse(0,
+				MSM8998_MMSS_AGING_INIT_QUOT_DIFF_SCALE,
+				fuse->aging_init_quot_diff,
+				MSM8998_MMSS_AGING_INIT_QUOT_DIFF_SIZE);
+	} else {
+		ctrl->aging_sensor->sensor_id = MSM8996_MMSS_AGING_SENSOR_ID;
+		ctrl->aging_sensor->bypass_mask[0]
+					= MSM8996_MMSS_AGING_BYPASS_MASK0;
+		ctrl->aging_sensor->init_quot_diff
+			= cpr3_convert_open_loop_voltage_fuse(0,
+				MSM8996_MMSS_AGING_INIT_QUOT_DIFF_SCALE,
+				fuse->aging_init_quot_diff,
+				MSM8996_MMSS_AGING_INIT_QUOT_DIFF_SIZE);
+	}
+
+	cpr3_debug(ctrl, "sensor %u aging init quotient diff = %d, aging RO scale = %u QUOT/V\n",
+		ctrl->aging_sensor->sensor_id,
+		ctrl->aging_sensor->init_quot_diff,
+		ctrl->aging_sensor->ro_scale);
+
+	return 0;
+}
+
+/**
+ * cpr3_mmss_init_thread() - perform all steps necessary to initialize the
+ *		configuration data for a CPR3 thread
+ * @thread:		Pointer to the CPR3 thread
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_mmss_init_thread(struct cpr3_thread *thread)
+{
+	struct cpr3_regulator *vreg = &thread->vreg[0];
+	struct cpr3_msm8996_mmss_fuses *fuse;
+	int rc;
+
+	rc = cpr3_parse_common_thread_data(thread);
+	if (rc) {
+		cpr3_err(vreg, "unable to read CPR thread data from device tree, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = cpr3_msm8996_mmss_read_fuse_data(vreg);
+	if (rc) {
+		cpr3_err(vreg, "unable to read CPR fuse data, rc=%d\n", rc);
+		return rc;
+	}
+
+	fuse = vreg->platform_fuses;
+	if (fuse->limitation == MSM8996_CPR_LIMITATION_UNSUPPORTED) {
+		cpr3_err(vreg, "this chip requires an unsupported voltage\n");
+		return -EPERM;
+	} else if (fuse->limitation
+			== MSM8996_CPR_LIMITATION_NO_CPR_OR_INTERPOLATION) {
+		thread->ctrl->cpr_allowed_hw = false;
+	}
+
+	rc = cpr3_mmss_parse_corner_data(vreg);
+	if (rc) {
+		cpr3_err(vreg, "unable to read CPR corner data from device tree, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = cpr3_msm8996_mmss_adjust_target_quotients(vreg);
+	if (rc) {
+		cpr3_err(vreg, "unable to adjust target quotients, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = cpr3_msm8996_mmss_calculate_open_loop_voltages(vreg);
+	if (rc) {
+		cpr3_err(vreg, "unable to calculate open-loop voltages, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = cpr3_limit_open_loop_voltages(vreg);
+	if (rc) {
+		cpr3_err(vreg, "unable to limit open-loop voltages, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	cpr3_open_loop_voltage_as_ceiling(vreg);
+
+	rc = cpr3_limit_floor_voltages(vreg);
+	if (rc) {
+		cpr3_err(vreg, "unable to limit floor voltages, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (cpr3_ctrl_is_msm8998(thread->ctrl)) {
+		rc = cpr4_parse_core_count_temp_voltage_adj(vreg, false);
+		if (rc) {
+			cpr3_err(vreg, "unable to parse temperature based voltage adjustments, rc=%d\n",
+				 rc);
+			return rc;
+		}
+	}
+
+	rc = cpr3_msm8998_partial_binning_override(vreg);
+	if (rc) {
+		cpr3_err(vreg, "unable to override CPR parameters based on partial binning fuse values, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	cpr3_mmss_print_settings(vreg);
+
+	return 0;
+}
+
+/**
+ * cpr4_mmss_parse_temp_adj_properties() - parse temperature based
+ *		adjustment properties from device tree
+ * @ctrl:	Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr4_mmss_parse_temp_adj_properties(struct cpr3_controller *ctrl)
+{
+	struct device_node *of_node = ctrl->dev->of_node;
+	int rc, len, temp_point_count;
+
+	if (!of_find_property(of_node, "qcom,cpr-temp-point-map", &len))
+		return 0;
+
+	temp_point_count = len / sizeof(u32);
+	if (temp_point_count <= 0
+	    || temp_point_count > MSM8998_MMSS_MAX_TEMP_POINTS) {
+		cpr3_err(ctrl, "invalid number of temperature points %d > %d (max)\n",
+			 temp_point_count, MSM8998_MMSS_MAX_TEMP_POINTS);
+		return -EINVAL;
+	}
+
+	ctrl->temp_points = devm_kcalloc(ctrl->dev, temp_point_count,
+					sizeof(*ctrl->temp_points), GFP_KERNEL);
+	if (!ctrl->temp_points)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(of_node, "qcom,cpr-temp-point-map",
+					ctrl->temp_points, temp_point_count);
+	if (rc) {
+		cpr3_err(ctrl, "error reading property qcom,cpr-temp-point-map, rc=%d\n",
+			 rc);
+		return rc;
+	}
+
+	/*
+	 * If t1, t2, and t3 are the temperature points, then the temperature
+	 * bands are: (-inf, t1], (t1, t2], (t2, t3], and (t3, inf).
+	 */
+	ctrl->temp_band_count = temp_point_count + 1;
+
+	rc = of_property_read_u32(of_node, "qcom,cpr-initial-temp-band",
+				  &ctrl->initial_temp_band);
+	if (rc) {
+		cpr3_err(ctrl, "error reading qcom,cpr-initial-temp-band, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	if (ctrl->initial_temp_band >= ctrl->temp_band_count) {
+		cpr3_err(ctrl, "Initial temperature band value %d should be in range [0 - %d]\n",
+			ctrl->initial_temp_band, ctrl->temp_band_count - 1);
+		return -EINVAL;
+	}
+
+	ctrl->temp_sensor_id_start = MSM8998_MMSS_TEMP_SENSOR_ID_START;
+	ctrl->temp_sensor_id_end = MSM8998_MMSS_TEMP_SENSOR_ID_END;
+	ctrl->allow_temp_adj = true;
+
+	return rc;
+}
+
+/**
+ * cpr3_mmss_init_controller() - perform MMSS CPR3 controller specific
+ *		initializations
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_mmss_init_controller(struct cpr3_controller *ctrl)
+{
+	int rc;
+
+	rc = cpr3_parse_common_ctrl_data(ctrl);
+	if (rc) {
+		if (rc != -EPROBE_DEFER)
+			cpr3_err(ctrl, "unable to parse common controller data, rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	if (cpr3_ctrl_is_msm8998(ctrl)) {
+		rc = cpr4_mmss_parse_temp_adj_properties(ctrl);
+		if (rc)
+			return rc;
+	}
+
+	ctrl->sensor_count = cpr3_ctrl_is_msm8998(ctrl)
+				? MSM8998_MMSS_CPR_SENSOR_COUNT
+				: MSM8996_MMSS_CPR_SENSOR_COUNT;
+
+	/*
+	 * MMSS only has one thread (0) so the zeroed array does not need
+	 * further modification.
+	 */
+	ctrl->sensor_owner = devm_kcalloc(ctrl->dev, ctrl->sensor_count,
+				sizeof(*ctrl->sensor_owner), GFP_KERNEL);
+	if (!ctrl->sensor_owner)
+		return -ENOMEM;
+
+	ctrl->cpr_clock_rate = MSM8996_MMSS_CPR_CLOCK_RATE;
+	ctrl->ctrl_type = cpr3_ctrl_is_msm8998(ctrl)
+				? CPR_CTRL_TYPE_CPR4 : CPR_CTRL_TYPE_CPR3;
+
+	if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) {
+		/*
+		 * Use fixed step quotient if specified otherwise use dynamic
+		 * calculated per RO step quotient
+		 */
+		of_property_read_u32(ctrl->dev->of_node,
+				     "qcom,cpr-step-quot-fixed",
+				     &ctrl->step_quot_fixed);
+		ctrl->use_dynamic_step_quot = !ctrl->step_quot_fixed;
+	}
+
+	ctrl->iface_clk = devm_clk_get(ctrl->dev, "iface_clk");
+	if (IS_ERR(ctrl->iface_clk)) {
+		rc = PTR_ERR(ctrl->iface_clk);
+		if (cpr3_ctrl_is_msm8998(ctrl)) {
+			/* iface_clk is optional for msm8998 */
+			ctrl->iface_clk = NULL;
+		} else if (rc == -EPROBE_DEFER) {
+			return rc;
+		} else {
+			cpr3_err(ctrl, "unable to request interface clock, rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	ctrl->bus_clk = devm_clk_get(ctrl->dev, "bus_clk");
+	if (IS_ERR(ctrl->bus_clk)) {
+		rc = PTR_ERR(ctrl->bus_clk);
+		if (rc != -EPROBE_DEFER)
+			cpr3_err(ctrl, "unable request bus clock, rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int cpr3_mmss_regulator_suspend(struct platform_device *pdev,
+				pm_message_t state)
+{
+	struct cpr3_controller *ctrl = platform_get_drvdata(pdev);
+
+	return cpr3_regulator_suspend(ctrl);
+}
+
+static int cpr3_mmss_regulator_resume(struct platform_device *pdev)
+{
+	struct cpr3_controller *ctrl = platform_get_drvdata(pdev);
+
+	return cpr3_regulator_resume(ctrl);
+}
+
+/* Data corresponds to the SoC revision */
+static const struct of_device_id cpr_regulator_match_table[] = {
+	{
+		.compatible = "qcom,cpr3-msm8996-v1-mmss-regulator",
+		.data = (void *)(uintptr_t)1,
+	},
+	{
+		.compatible = "qcom,cpr3-msm8996-v2-mmss-regulator",
+		.data = (void *)(uintptr_t)2,
+	},
+	{
+		.compatible = "qcom,cpr3-msm8996-v3-mmss-regulator",
+		.data = (void *)(uintptr_t)3,
+	},
+	{
+		.compatible = "qcom,cpr3-msm8996-mmss-regulator",
+		.data = (void *)(uintptr_t)3,
+	},
+	{
+		.compatible = "qcom,cpr3-msm8996pro-mmss-regulator",
+		.data = (void *)(uintptr_t)MSM8996PRO_SOC_ID,
+	},
+	{
+		.compatible = "qcom,cpr4-msm8998-v1-mmss-regulator",
+		.data = (void *)(uintptr_t)MSM8998_V1_SOC_ID,
+	},
+	{
+		.compatible = "qcom,cpr4-msm8998-v2-mmss-regulator",
+		.data = (void *)(uintptr_t)MSM8998_V2_SOC_ID,
+	},
+	{
+		.compatible = "qcom,cpr4-msm8998-mmss-regulator",
+		.data = (void *)(uintptr_t)MSM8998_V2_SOC_ID,
+	},
+	{}
+};
+
+static int cpr3_mmss_regulator_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	const struct of_device_id *match;
+	struct cpr3_controller *ctrl;
+	int rc;
+
+	if (!dev->of_node) {
+		dev_err(dev, "Device tree node is missing\n");
+		return -EINVAL;
+	}
+
+	ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+	if (!ctrl)
+		return -ENOMEM;
+
+	ctrl->dev = dev;
+	/* Set to false later if anything precludes CPR operation. */
+	ctrl->cpr_allowed_hw = true;
+
+	rc = of_property_read_string(dev->of_node, "qcom,cpr-ctrl-name",
+					&ctrl->name);
+	if (rc) {
+		cpr3_err(ctrl, "unable to read qcom,cpr-ctrl-name, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	match = of_match_node(cpr_regulator_match_table, dev->of_node);
+	if (match)
+		ctrl->soc_revision = (uintptr_t)match->data;
+	else
+		cpr3_err(ctrl, "could not find compatible string match\n");
+
+	rc = cpr3_map_fuse_base(ctrl, pdev);
+	if (rc) {
+		cpr3_err(ctrl, "could not map fuse base address\n");
+		return rc;
+	}
+
+	rc = cpr3_allocate_threads(ctrl, 0, 0);
+	if (rc) {
+		cpr3_err(ctrl, "failed to allocate CPR thread array, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	if (ctrl->thread_count != 1) {
+		cpr3_err(ctrl, "expected 1 thread but found %d\n",
+			ctrl->thread_count);
+		return -EINVAL;
+	} else if (ctrl->thread[0].vreg_count != 1) {
+		cpr3_err(ctrl, "expected 1 regulator but found %d\n",
+			ctrl->thread[0].vreg_count);
+		return -EINVAL;
+	}
+
+	rc = cpr3_mmss_init_controller(ctrl);
+	if (rc) {
+		if (rc != -EPROBE_DEFER)
+			cpr3_err(ctrl, "failed to initialize CPR controller parameters, rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	rc = cpr3_mmss_init_thread(&ctrl->thread[0]);
+	if (rc) {
+		cpr3_err(&ctrl->thread[0].vreg[0], "thread initialization failed, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = cpr3_mem_acc_init(&ctrl->thread[0].vreg[0]);
+	if (rc) {
+		cpr3_err(ctrl, "failed to initialize mem-acc configuration, rc=%d\n",
+			 rc);
+		return rc;
+	}
+
+	rc = cpr3_mmss_init_aging(ctrl);
+	if (rc) {
+		cpr3_err(ctrl, "failed to initialize aging configurations, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	platform_set_drvdata(pdev, ctrl);
+
+	return cpr3_regulator_register(pdev, ctrl);
+}
+
+static int cpr3_mmss_regulator_remove(struct platform_device *pdev)
+{
+	struct cpr3_controller *ctrl = platform_get_drvdata(pdev);
+
+	return cpr3_regulator_unregister(ctrl);
+}
+
+static struct platform_driver cpr3_mmss_regulator_driver = {
+	.driver		= {
+		.name		= "qcom,cpr3-mmss-regulator",
+		.of_match_table	= cpr_regulator_match_table,
+		.owner		= THIS_MODULE,
+	},
+	.probe		= cpr3_mmss_regulator_probe,
+	.remove		= cpr3_mmss_regulator_remove,
+	.suspend	= cpr3_mmss_regulator_suspend,
+	.resume		= cpr3_mmss_regulator_resume,
+};
+
+static int cpr_regulator_init(void)
+{
+	return platform_driver_register(&cpr3_mmss_regulator_driver);
+}
+
+static void cpr_regulator_exit(void)
+{
+	platform_driver_unregister(&cpr3_mmss_regulator_driver);
+}
+
+MODULE_DESCRIPTION("CPR3 MMSS regulator driver");
+MODULE_LICENSE("GPL v2");
+
+arch_initcall(cpr_regulator_init);
+module_exit(cpr_regulator_exit);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/regulator/cpr3-regulator.c	2019-01-22 16:16:26.263271400 +0100
@@ -0,0 +1,6517 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/msm-ldo-regulator.h>
+
+#include <soc/qcom/spm.h>
+
+#include "cpr3-regulator.h"
+
+#define CPR3_REGULATOR_CORNER_INVALID	(-1)
+#define CPR3_RO_MASK			GENMASK(CPR3_RO_COUNT - 1, 0)
+
+/* CPR3 registers */
+#define CPR3_REG_CPR_CTL			0x4
+#define CPR3_CPR_CTL_LOOP_EN_MASK		BIT(0)
+#define CPR3_CPR_CTL_LOOP_ENABLE		BIT(0)
+#define CPR3_CPR_CTL_LOOP_DISABLE		0
+#define CPR3_CPR_CTL_IDLE_CLOCKS_MASK		GENMASK(5, 1)
+#define CPR3_CPR_CTL_IDLE_CLOCKS_SHIFT		1
+#define CPR3_CPR_CTL_COUNT_MODE_MASK		GENMASK(7, 6)
+#define CPR3_CPR_CTL_COUNT_MODE_SHIFT		6
+#define CPR3_CPR_CTL_COUNT_MODE_ALL_AT_ONCE_MIN	0
+#define CPR3_CPR_CTL_COUNT_MODE_ALL_AT_ONCE_MAX	1
+#define CPR3_CPR_CTL_COUNT_MODE_STAGGERED	2
+#define CPR3_CPR_CTL_COUNT_MODE_ALL_AT_ONCE_AGE	3
+#define CPR3_CPR_CTL_COUNT_REPEAT_MASK		GENMASK(31, 9)
+#define CPR3_CPR_CTL_COUNT_REPEAT_SHIFT		9
+
+#define CPR3_REG_CPR_STATUS			0x8
+#define CPR3_CPR_STATUS_BUSY_MASK		BIT(0)
+#define CPR3_CPR_STATUS_AGING_MEASUREMENT_MASK	BIT(1)
+
+/*
+ * This register is not present on controllers that support HW closed-loop
+ * except CPR4 APSS controller.
+ */
+#define CPR3_REG_CPR_TIMER_AUTO_CONT		0xC
+
+#define CPR3_REG_CPR_STEP_QUOT			0x14
+#define CPR3_CPR_STEP_QUOT_MIN_MASK		GENMASK(5, 0)
+#define CPR3_CPR_STEP_QUOT_MIN_SHIFT		0
+#define CPR3_CPR_STEP_QUOT_MAX_MASK		GENMASK(11, 6)
+#define CPR3_CPR_STEP_QUOT_MAX_SHIFT		6
+
+#define CPR3_REG_GCNT(ro)			(0xA0 + 0x4 * (ro))
+
+#define CPR3_REG_SENSOR_BYPASS_WRITE(sensor)	(0xE0 + 0x4 * ((sensor) / 32))
+#define CPR3_REG_SENSOR_BYPASS_WRITE_BANK(bank)	(0xE0 + 0x4 * (bank))
+
+#define CPR3_REG_SENSOR_MASK_WRITE(sensor)	(0x120 + 0x4 * ((sensor) / 32))
+#define CPR3_REG_SENSOR_MASK_WRITE_BANK(bank)	(0x120 + 0x4 * (bank))
+#define CPR3_REG_SENSOR_MASK_READ(sensor)	(0x140 + 0x4 * ((sensor) / 32))
+
+#define CPR3_REG_SENSOR_OWNER(sensor)	(0x200 + 0x4 * (sensor))
+
+#define CPR3_REG_CONT_CMD		0x800
+#define CPR3_CONT_CMD_ACK		0x1
+#define CPR3_CONT_CMD_NACK		0x0
+
+#define CPR3_REG_THRESH(thread)		(0x808 + 0x440 * (thread))
+#define CPR3_THRESH_CONS_DOWN_MASK	GENMASK(3, 0)
+#define CPR3_THRESH_CONS_DOWN_SHIFT	0
+#define CPR3_THRESH_CONS_UP_MASK	GENMASK(7, 4)
+#define CPR3_THRESH_CONS_UP_SHIFT	4
+#define CPR3_THRESH_DOWN_THRESH_MASK	GENMASK(12, 8)
+#define CPR3_THRESH_DOWN_THRESH_SHIFT	8
+#define CPR3_THRESH_UP_THRESH_MASK	GENMASK(17, 13)
+#define CPR3_THRESH_UP_THRESH_SHIFT	13
+
+#define CPR3_REG_RO_MASK(thread)	(0x80C + 0x440 * (thread))
+
+#define CPR3_REG_RESULT0(thread)	(0x810 + 0x440 * (thread))
+#define CPR3_RESULT0_BUSY_MASK		BIT(0)
+#define CPR3_RESULT0_STEP_DN_MASK	BIT(1)
+#define CPR3_RESULT0_STEP_UP_MASK	BIT(2)
+#define CPR3_RESULT0_ERROR_STEPS_MASK	GENMASK(7, 3)
+#define CPR3_RESULT0_ERROR_STEPS_SHIFT	3
+#define CPR3_RESULT0_ERROR_MASK		GENMASK(19, 8)
+#define CPR3_RESULT0_ERROR_SHIFT	8
+#define CPR3_RESULT0_NEGATIVE_MASK	BIT(20)
+
+#define CPR3_REG_RESULT1(thread)	(0x814 + 0x440 * (thread))
+#define CPR3_RESULT1_QUOT_MIN_MASK	GENMASK(11, 0)
+#define CPR3_RESULT1_QUOT_MIN_SHIFT	0
+#define CPR3_RESULT1_QUOT_MAX_MASK	GENMASK(23, 12)
+#define CPR3_RESULT1_QUOT_MAX_SHIFT	12
+#define CPR3_RESULT1_RO_MIN_MASK	GENMASK(27, 24)
+#define CPR3_RESULT1_RO_MIN_SHIFT	24
+#define CPR3_RESULT1_RO_MAX_MASK	GENMASK(31, 28)
+#define CPR3_RESULT1_RO_MAX_SHIFT	28
+
+#define CPR3_REG_RESULT2(thread)		(0x818 + 0x440 * (thread))
+#define CPR3_RESULT2_STEP_QUOT_MIN_MASK		GENMASK(5, 0)
+#define CPR3_RESULT2_STEP_QUOT_MIN_SHIFT	0
+#define CPR3_RESULT2_STEP_QUOT_MAX_MASK		GENMASK(11, 6)
+#define CPR3_RESULT2_STEP_QUOT_MAX_SHIFT	6
+#define CPR3_RESULT2_SENSOR_MIN_MASK		GENMASK(23, 16)
+#define CPR3_RESULT2_SENSOR_MIN_SHIFT		16
+#define CPR3_RESULT2_SENSOR_MAX_MASK		GENMASK(31, 24)
+#define CPR3_RESULT2_SENSOR_MAX_SHIFT		24
+
+#define CPR3_REG_IRQ_EN			0x81C
+#define CPR3_REG_IRQ_CLEAR		0x820
+#define CPR3_REG_IRQ_STATUS		0x824
+#define CPR3_IRQ_UP			BIT(3)
+#define CPR3_IRQ_MID			BIT(2)
+#define CPR3_IRQ_DOWN			BIT(1)
+
+#define CPR3_REG_TARGET_QUOT(thread, ro) \
+					(0x840 + 0x440 * (thread) + 0x4 * (ro))
+
+/* Registers found only on controllers that support HW closed-loop. */
+#define CPR3_REG_PD_THROTTLE		0xE8
+#define CPR3_PD_THROTTLE_DISABLE	0x0
+
+#define CPR3_REG_HW_CLOSED_LOOP		0x3000
+#define CPR3_HW_CLOSED_LOOP_ENABLE	0x0
+#define CPR3_HW_CLOSED_LOOP_DISABLE	0x1
+
+#define CPR3_REG_CPR_TIMER_MID_CONT	0x3004
+#define CPR3_REG_CPR_TIMER_UP_DN_CONT	0x3008
+
+#define CPR3_REG_LAST_MEASUREMENT		0x7F8
+#define CPR3_LAST_MEASUREMENT_THREAD_DN_SHIFT	0
+#define CPR3_LAST_MEASUREMENT_THREAD_UP_SHIFT	4
+#define CPR3_LAST_MEASUREMENT_THREAD_DN(thread) \
+		(BIT(thread) << CPR3_LAST_MEASUREMENT_THREAD_DN_SHIFT)
+#define CPR3_LAST_MEASUREMENT_THREAD_UP(thread) \
+		(BIT(thread) << CPR3_LAST_MEASUREMENT_THREAD_UP_SHIFT)
+#define CPR3_LAST_MEASUREMENT_AGGR_DN		BIT(8)
+#define CPR3_LAST_MEASUREMENT_AGGR_MID		BIT(9)
+#define CPR3_LAST_MEASUREMENT_AGGR_UP		BIT(10)
+#define CPR3_LAST_MEASUREMENT_VALID		BIT(11)
+#define CPR3_LAST_MEASUREMENT_SAW_ERROR		BIT(12)
+#define CPR3_LAST_MEASUREMENT_PD_BYPASS_MASK	GENMASK(23, 16)
+#define CPR3_LAST_MEASUREMENT_PD_BYPASS_SHIFT	16
+
+/* CPR4 controller specific registers and bit definitions */
+#define CPR4_REG_CPR_TIMER_CLAMP			0x10
+#define CPR4_CPR_TIMER_CLAMP_THREAD_AGGREGATION_EN	BIT(27)
+
+#define CPR4_REG_MISC				0x700
+#define CPR4_MISC_RESET_STEP_QUOT_LOOP_EN	BIT(2)
+#define CPR4_MISC_MARGIN_TABLE_ROW_SELECT_MASK	GENMASK(23, 20)
+#define CPR4_MISC_MARGIN_TABLE_ROW_SELECT_SHIFT	20
+#define CPR4_MISC_TEMP_SENSOR_ID_START_MASK	GENMASK(27, 24)
+#define CPR4_MISC_TEMP_SENSOR_ID_START_SHIFT	24
+#define CPR4_MISC_TEMP_SENSOR_ID_END_MASK	GENMASK(31, 28)
+#define CPR4_MISC_TEMP_SENSOR_ID_END_SHIFT	28
+
+#define CPR4_REG_SAW_ERROR_STEP_LIMIT		0x7A4
+#define CPR4_SAW_ERROR_STEP_LIMIT_UP_MASK	GENMASK(4, 0)
+#define CPR4_SAW_ERROR_STEP_LIMIT_UP_SHIFT	0
+#define CPR4_SAW_ERROR_STEP_LIMIT_DN_MASK	GENMASK(9, 5)
+#define CPR4_SAW_ERROR_STEP_LIMIT_DN_SHIFT	5
+
+#define CPR4_REG_MARGIN_TEMP_CORE_TIMERS			0x7A8
+#define CPR4_MARGIN_TEMP_CORE_TIMERS_SETTLE_VOLTAGE_COUNT_MASK	GENMASK(28, 18)
+#define CPR4_MARGIN_TEMP_CORE_TIMERS_SETTLE_VOLTAGE_COUNT_SHIFT	18
+
+#define CPR4_REG_MARGIN_TEMP_CORE(core)		(0x7AC + 0x4 * (core))
+#define CPR4_MARGIN_TEMP_CORE_ADJ_MASK		GENMASK(7, 0)
+#define CPR4_MARGIN_TEMP_CORE_ADJ_SHIFT		8
+
+#define CPR4_REG_MARGIN_TEMP_POINT0N1		0x7F0
+#define CPR4_MARGIN_TEMP_POINT0_MASK		GENMASK(11, 0)
+#define CPR4_MARGIN_TEMP_POINT0_SHIFT		0
+#define CPR4_MARGIN_TEMP_POINT1_MASK		GENMASK(23, 12)
+#define CPR4_MARGIN_TEMP_POINT1_SHIFT		12
+#define CPR4_REG_MARGIN_TEMP_POINT2		0x7F4
+#define CPR4_MARGIN_TEMP_POINT2_MASK		GENMASK(11, 0)
+#define CPR4_MARGIN_TEMP_POINT2_SHIFT		0
+
+#define CPR4_REG_MARGIN_ADJ_CTL					0x7F8
+#define CPR4_MARGIN_ADJ_CTL_BOOST_EN				BIT(0)
+#define CPR4_MARGIN_ADJ_CTL_CORE_ADJ_EN				BIT(1)
+#define CPR4_MARGIN_ADJ_CTL_TEMP_ADJ_EN				BIT(2)
+#define CPR4_MARGIN_ADJ_CTL_TIMER_SETTLE_VOLTAGE_EN		BIT(3)
+#define CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_EN_MASK		BIT(4)
+#define CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_ENABLE		BIT(4)
+#define CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_DISABLE		0
+#define CPR4_MARGIN_ADJ_CTL_PER_RO_KV_MARGIN_EN			BIT(7)
+#define CPR4_MARGIN_ADJ_CTL_KV_MARGIN_ADJ_EN			BIT(8)
+#define CPR4_MARGIN_ADJ_CTL_PMIC_STEP_SIZE_MASK			GENMASK(16, 12)
+#define CPR4_MARGIN_ADJ_CTL_PMIC_STEP_SIZE_SHIFT		12
+#define CPR4_MARGIN_ADJ_CTL_INITIAL_TEMP_BAND_MASK		GENMASK(21, 19)
+#define CPR4_MARGIN_ADJ_CTL_INITIAL_TEMP_BAND_SHIFT		19
+#define CPR4_MARGIN_ADJ_CTL_MAX_NUM_CORES_MASK			GENMASK(25, 22)
+#define CPR4_MARGIN_ADJ_CTL_MAX_NUM_CORES_SHIFT			22
+#define CPR4_MARGIN_ADJ_CTL_KV_MARGIN_ADJ_STEP_QUOT_MASK	GENMASK(31, 26)
+#define CPR4_MARGIN_ADJ_CTL_KV_MARGIN_ADJ_STEP_QUOT_SHIFT	26
+
+#define CPR4_REG_CPR_MASK_THREAD(thread)	(0x80C + 0x440 * (thread))
+#define CPR4_CPR_MASK_THREAD_DISABLE_THREAD		BIT(31)
+#define CPR4_CPR_MASK_THREAD_RO_MASK4THREAD_MASK	GENMASK(15, 0)
+
+/* CPRh controller specific registers and bit definitions */
+#define CPRH_REG_CORNER(corner)	(0x3A00 + 0x4 * (corner))
+#define CPRH_CORNER_INIT_VOLTAGE_MASK		GENMASK(7, 0)
+#define CPRH_CORNER_INIT_VOLTAGE_SHIFT		0
+#define CPRH_CORNER_FLOOR_VOLTAGE_MASK		GENMASK(15, 8)
+#define CPRH_CORNER_FLOOR_VOLTAGE_SHIFT		8
+#define CPRH_CORNER_QUOT_DELTA_MASK		GENMASK(24, 16)
+#define CPRH_CORNER_QUOT_DELTA_SHIFT		16
+#define CPRH_CORNER_RO_SEL_MASK			GENMASK(28, 25)
+#define CPRH_CORNER_RO_SEL_SHIFT		25
+#define CPRH_CORNER_CPR_CL_DISABLE	BIT(29)
+#define CPRH_CORNER_CORE_TEMP_MARGIN_DISABLE	BIT(30)
+#define CPRH_CORNER_LAST_KNOWN_VOLTAGE_ENABLE	BIT(31)
+#define CPRH_CORNER_INIT_VOLTAGE_MAX_VALUE	255
+#define CPRH_CORNER_FLOOR_VOLTAGE_MAX_VALUE	255
+#define CPRH_CORNER_QUOT_DELTA_MAX_VALUE	511
+
+#define CPRH_REG_CTL				0x3AA0
+#define CPRH_CTL_OSM_ENABLED			BIT(0)
+#define CPRH_CTL_BASE_VOLTAGE_MASK		GENMASK(10, 1)
+#define CPRH_CTL_BASE_VOLTAGE_SHIFT		1
+#define CPRH_CTL_INIT_MODE_MASK		GENMASK(16, 11)
+#define CPRH_CTL_INIT_MODE_SHIFT		11
+#define CPRH_CTL_MODE_SWITCH_DELAY_MASK		GENMASK(24, 17)
+#define CPRH_CTL_MODE_SWITCH_DELAY_SHIFT	17
+#define CPRH_CTL_VOLTAGE_MULTIPLIER_MASK	GENMASK(28, 25)
+#define CPRH_CTL_VOLTAGE_MULTIPLIER_SHIFT	25
+#define CPRH_CTL_LAST_KNOWN_VOLTAGE_MARGIN_MASK		GENMASK(31, 29)
+#define CPRH_CTL_LAST_KNOWN_VOLTAGE_MARGIN_SHIFT	29
+
+#define CPRH_REG_STATUS			0x3AA4
+#define CPRH_STATUS_CORNER			GENMASK(5, 0)
+#define CPRH_STATUS_CORNER_LAST_VOLT_MASK	GENMASK(17, 6)
+#define CPRH_STATUS_CORNER_LAST_VOLT_SHIFT	6
+
+#define CPRH_REG_CORNER_BAND	0x3AA8
+#define CPRH_CORNER_BAND_MASK		GENMASK(5, 0)
+#define CPRH_CORNER_BAND_SHIFT		6
+#define CPRH_CORNER_BAND_MAX_COUNT		4
+
+#define CPRH_MARGIN_TEMP_CORE_VBAND(core, vband) \
+	((vband) == 0 ? CPR4_REG_MARGIN_TEMP_CORE(core) \
+			: 0x3AB0 + 0x40 * ((vband) - 1) + 0x4 * (core))
+
+/*
+ * The amount of time to wait for the CPR controller to become idle when
+ * performing an aging measurement.
+ */
+#define CPR3_AGING_MEASUREMENT_TIMEOUT_NS	5000000
+
+/*
+ * The number of individual aging measurements to perform which are then
+ * averaged together in order to determine the final aging adjustment value.
+ */
+#define CPR3_AGING_MEASUREMENT_ITERATIONS	16
+
+/*
+ * Aging measurements for the aged and unaged ring oscillators take place a few
+ * microseconds apart.  If the vdd-supply voltage fluctuates between the two
+ * measurements, then the difference between them will be incorrect.  The
+ * difference could end up too high or too low.  This constant defines the
+ * number of lowest and highest measurements to ignore when averaging.
+ */
+#define CPR3_AGING_MEASUREMENT_FILTER		3
+
+/*
+ * The number of times to attempt the full aging measurement sequence before
+ * declaring a measurement failure.
+ */
+#define CPR3_AGING_RETRY_COUNT			5
+
+/*
+ * The maximum time to wait in microseconds for a CPR register write to
+ * complete.
+ */
+#define CPR3_REGISTER_WRITE_DELAY_US		200
+
+/*
+ * The number of times the CPRh controller multiplies the mode switch
+ * delay before utilizing it.
+ */
+#define CPRH_MODE_SWITCH_DELAY_FACTOR 4
+
+/*
+ * The number of times the CPRh controller multiplies the delta quotient
+ * steps before utilizing it.
+ */
+#define CPRH_DELTA_QUOT_STEP_FACTOR 4
+
+/*
+ * The multiplier applied to scaling factor value used to derive GCNT
+ * for aging measurements.
+ */
+#define CPR3_AGING_GCNT_SCALING_UNITY	1000
+
+static DEFINE_MUTEX(cpr3_controller_list_mutex);
+static LIST_HEAD(cpr3_controller_list);
+static struct dentry *cpr3_debugfs_base;
+
+/**
+ * cpr3_read() - read four bytes from the memory address specified
+ * @ctrl:		Pointer to the CPR3 controller
+ * @offset:		Offset in bytes from the CPR3 controller's base address
+ *
+ * Return: memory address value
+ */
+static inline u32 cpr3_read(struct cpr3_controller *ctrl, u32 offset)
+{
+	if (!ctrl->cpr_enabled) {
+		cpr3_err(ctrl, "CPR register reads are not possible when CPR clocks are disabled\n");
+		return 0;
+	}
+
+	return readl_relaxed(ctrl->cpr_ctrl_base + offset);
+}
+
+/**
+ * cpr3_write() - write four bytes to the memory address specified
+ * @ctrl:		Pointer to the CPR3 controller
+ * @offset:		Offset in bytes from the CPR3 controller's base address
+ * @value:		Value to write to the memory address
+ *
+ * Return: none
+ */
+static inline void cpr3_write(struct cpr3_controller *ctrl, u32 offset,
+				u32 value)
+{
+	if (!ctrl->cpr_enabled) {
+		cpr3_err(ctrl, "CPR register writes are not possible when CPR clocks are disabled\n");
+		return;
+	}
+
+	writel_relaxed(value, ctrl->cpr_ctrl_base + offset);
+}
+
+/**
+ * cpr3_masked_write() - perform a read-modify-write sequence so that only
+ *		masked bits are modified
+ * @ctrl:		Pointer to the CPR3 controller
+ * @offset:		Offset in bytes from the CPR3 controller's base address
+ * @mask:		Mask identifying the bits that should be modified
+ * @value:		Value to write to the memory address
+ *
+ * Return: none
+ */
+static inline void cpr3_masked_write(struct cpr3_controller *ctrl, u32 offset,
+				u32 mask, u32 value)
+{
+	u32 reg_val, orig_val;
+
+	if (!ctrl->cpr_enabled) {
+		cpr3_err(ctrl, "CPR register writes are not possible when CPR clocks are disabled\n");
+		return;
+	}
+
+	reg_val = orig_val = readl_relaxed(ctrl->cpr_ctrl_base + offset);
+	reg_val &= ~mask;
+	reg_val |= value & mask;
+
+	if (reg_val != orig_val)
+		writel_relaxed(reg_val, ctrl->cpr_ctrl_base + offset);
+}
+
+/**
+ * cpr3_ctrl_loop_enable() - enable the CPR sensing loop for a given controller
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Return: none
+ */
+static inline void cpr3_ctrl_loop_enable(struct cpr3_controller *ctrl)
+{
+	if (ctrl->cpr_enabled && !(ctrl->aggr_corner.sdelta
+		&& ctrl->aggr_corner.sdelta->allow_boost))
+		cpr3_masked_write(ctrl, CPR3_REG_CPR_CTL,
+			CPR3_CPR_CTL_LOOP_EN_MASK, CPR3_CPR_CTL_LOOP_ENABLE);
+}
+
+/**
+ * cpr3_ctrl_loop_disable() - disable the CPR sensing loop for a given
+ *		controller
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Return: none
+ */
+static inline void cpr3_ctrl_loop_disable(struct cpr3_controller *ctrl)
+{
+	if (ctrl->cpr_enabled)
+		cpr3_masked_write(ctrl, CPR3_REG_CPR_CTL,
+			CPR3_CPR_CTL_LOOP_EN_MASK, CPR3_CPR_CTL_LOOP_DISABLE);
+}
+
+/**
+ * cpr3_clock_enable() - prepare and enable all clocks used by this CPR3
+ *		controller
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_clock_enable(struct cpr3_controller *ctrl)
+{
+	int rc;
+
+	rc = clk_prepare_enable(ctrl->bus_clk);
+	if (rc) {
+		cpr3_err(ctrl, "failed to enable bus clock, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = clk_prepare_enable(ctrl->iface_clk);
+	if (rc) {
+		cpr3_err(ctrl, "failed to enable interface clock, rc=%d\n", rc);
+		clk_disable_unprepare(ctrl->bus_clk);
+		return rc;
+	}
+
+	rc = clk_prepare_enable(ctrl->core_clk);
+	if (rc) {
+		cpr3_err(ctrl, "failed to enable core clock, rc=%d\n", rc);
+		clk_disable_unprepare(ctrl->iface_clk);
+		clk_disable_unprepare(ctrl->bus_clk);
+		return rc;
+	}
+
+	return 0;
+}
+
+/**
+ * cpr3_clock_disable() - disable and unprepare all clocks used by this CPR3
+ *		controller
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Return: none
+ */
+static void cpr3_clock_disable(struct cpr3_controller *ctrl)
+{
+	clk_disable_unprepare(ctrl->core_clk);
+	clk_disable_unprepare(ctrl->iface_clk);
+	clk_disable_unprepare(ctrl->bus_clk);
+}
+
+/**
+ * cpr3_ctrl_clear_cpr4_config() - clear the CPR4 register configuration
+ *		programmed for current aggregated corner of a given controller
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static inline int cpr3_ctrl_clear_cpr4_config(struct cpr3_controller *ctrl)
+{
+	struct cpr4_sdelta *aggr_sdelta = ctrl->aggr_corner.sdelta;
+	bool cpr_enabled = ctrl->cpr_enabled;
+	int i, rc = 0;
+
+	if (!aggr_sdelta || !(aggr_sdelta->allow_core_count_adj
+		|| aggr_sdelta->allow_temp_adj || aggr_sdelta->allow_boost))
+		/* cpr4 features are not enabled */
+		return 0;
+
+	/* Ensure that CPR clocks are enabled before writing to registers. */
+	if (!cpr_enabled) {
+		rc = cpr3_clock_enable(ctrl);
+		if (rc) {
+			cpr3_err(ctrl, "clock enable failed, rc=%d\n", rc);
+			return rc;
+		}
+		ctrl->cpr_enabled = true;
+	}
+
+	/*
+	 * Clear feature enable configuration made for current
+	 * aggregated corner.
+	 */
+	cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL,
+		CPR4_MARGIN_ADJ_CTL_MAX_NUM_CORES_MASK
+		| CPR4_MARGIN_ADJ_CTL_CORE_ADJ_EN
+		| CPR4_MARGIN_ADJ_CTL_TEMP_ADJ_EN
+		| CPR4_MARGIN_ADJ_CTL_KV_MARGIN_ADJ_EN
+		| CPR4_MARGIN_ADJ_CTL_BOOST_EN
+		| CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_EN_MASK, 0);
+
+	cpr3_masked_write(ctrl, CPR4_REG_MISC,
+			CPR4_MISC_MARGIN_TABLE_ROW_SELECT_MASK,
+			0 << CPR4_MISC_MARGIN_TABLE_ROW_SELECT_SHIFT);
+
+	for (i = 0; i <= aggr_sdelta->max_core_count; i++) {
+		/* Clear voltage margin adjustments programmed in TEMP_COREi */
+		cpr3_write(ctrl, CPR4_REG_MARGIN_TEMP_CORE(i), 0);
+	}
+
+	/* Turn off CPR clocks if they were off before this function call. */
+	if (!cpr_enabled) {
+		cpr3_clock_disable(ctrl);
+		ctrl->cpr_enabled = false;
+	}
+
+	return 0;
+}
+
+/**
+ * cpr3_closed_loop_enable() - enable logical CPR closed-loop operation
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_closed_loop_enable(struct cpr3_controller *ctrl)
+{
+	int rc;
+
+	if (!ctrl->cpr_allowed_hw || !ctrl->cpr_allowed_sw) {
+		cpr3_err(ctrl, "cannot enable closed-loop CPR operation because it is disallowed\n");
+		return -EPERM;
+	} else if (ctrl->cpr_enabled) {
+		/* Already enabled */
+		return 0;
+	} else if (ctrl->cpr_suspended) {
+		/*
+		 * CPR must remain disabled as the system is entering suspend.
+		 */
+		return 0;
+	}
+
+	rc = cpr3_clock_enable(ctrl);
+	if (rc) {
+		cpr3_err(ctrl, "unable to enable CPR clocks, rc=%d\n", rc);
+		return rc;
+	}
+
+	ctrl->cpr_enabled = true;
+	cpr3_debug(ctrl, "CPR closed-loop operation enabled\n");
+
+	return 0;
+}
+
+/**
+ * cpr3_closed_loop_disable() - disable logical CPR closed-loop operation
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static inline int cpr3_closed_loop_disable(struct cpr3_controller *ctrl)
+{
+	if (!ctrl->cpr_enabled) {
+		/* Already disabled */
+		return 0;
+	}
+
+	cpr3_clock_disable(ctrl);
+	ctrl->cpr_enabled = false;
+	cpr3_debug(ctrl, "CPR closed-loop operation disabled\n");
+
+	return 0;
+}
+
+/**
+ * cpr3_regulator_get_gcnt() - returns the GCNT register value corresponding
+ *		to the clock rate and sensor time of the CPR3 controller
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Return: GCNT value
+ */
+static u32 cpr3_regulator_get_gcnt(struct cpr3_controller *ctrl)
+{
+	u64 temp;
+	unsigned int remainder;
+	u32 gcnt;
+
+	temp = (u64)ctrl->cpr_clock_rate * (u64)ctrl->sensor_time;
+	remainder = do_div(temp, 1000000000);
+	if (remainder)
+		temp++;
+	/*
+	 * GCNT == 0 corresponds to a single ref clock measurement interval so
+	 * offset GCNT values by 1.
+	 */
+	gcnt = temp - 1;
+
+	return gcnt;
+}
+
+/**
+ * cpr3_regulator_init_thread() - performs hardware initialization of CPR
+ *		thread registers
+ * @thread:		Pointer to the CPR3 thread
+ *
+ * CPR interface/bus clocks must be enabled before calling this function.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_init_thread(struct cpr3_thread *thread)
+{
+	u32 reg;
+
+	reg = (thread->consecutive_up << CPR3_THRESH_CONS_UP_SHIFT)
+		& CPR3_THRESH_CONS_UP_MASK;
+	reg |= (thread->consecutive_down << CPR3_THRESH_CONS_DOWN_SHIFT)
+		& CPR3_THRESH_CONS_DOWN_MASK;
+	reg |= (thread->up_threshold << CPR3_THRESH_UP_THRESH_SHIFT)
+		& CPR3_THRESH_UP_THRESH_MASK;
+	reg |= (thread->down_threshold << CPR3_THRESH_DOWN_THRESH_SHIFT)
+		& CPR3_THRESH_DOWN_THRESH_MASK;
+
+	cpr3_write(thread->ctrl, CPR3_REG_THRESH(thread->thread_id), reg);
+
+	/*
+	 * Mask all RO's initially so that unused thread doesn't contribute
+	 * to closed-loop voltage.
+	 */
+	cpr3_write(thread->ctrl, CPR3_REG_RO_MASK(thread->thread_id),
+		CPR3_RO_MASK);
+
+	return 0;
+}
+
+/**
+ * cpr4_regulator_init_temp_points() - performs hardware initialization of CPR4
+ *		registers to track tsen temperature data and also specify the
+ *		temperature band range values to apply different voltage margins
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * CPR interface/bus clocks must be enabled before calling this function.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr4_regulator_init_temp_points(struct cpr3_controller *ctrl)
+{
+	if (!ctrl->allow_temp_adj)
+		return 0;
+
+	cpr3_masked_write(ctrl, CPR4_REG_MISC,
+				CPR4_MISC_TEMP_SENSOR_ID_START_MASK,
+				ctrl->temp_sensor_id_start
+				<< CPR4_MISC_TEMP_SENSOR_ID_START_SHIFT);
+
+	cpr3_masked_write(ctrl, CPR4_REG_MISC,
+				CPR4_MISC_TEMP_SENSOR_ID_END_MASK,
+				ctrl->temp_sensor_id_end
+				<< CPR4_MISC_TEMP_SENSOR_ID_END_SHIFT);
+
+	cpr3_masked_write(ctrl, CPR4_REG_MARGIN_TEMP_POINT2,
+		CPR4_MARGIN_TEMP_POINT2_MASK,
+		(ctrl->temp_band_count == 4 ? ctrl->temp_points[2] : 0x7FF)
+		<< CPR4_MARGIN_TEMP_POINT2_SHIFT);
+
+	cpr3_masked_write(ctrl, CPR4_REG_MARGIN_TEMP_POINT0N1,
+		CPR4_MARGIN_TEMP_POINT1_MASK,
+		(ctrl->temp_band_count >= 3 ? ctrl->temp_points[1] : 0x7FF)
+		<< CPR4_MARGIN_TEMP_POINT1_SHIFT);
+
+	cpr3_masked_write(ctrl, CPR4_REG_MARGIN_TEMP_POINT0N1,
+		CPR4_MARGIN_TEMP_POINT0_MASK,
+		(ctrl->temp_band_count >= 2 ? ctrl->temp_points[0] : 0x7FF)
+		<< CPR4_MARGIN_TEMP_POINT0_SHIFT);
+	return 0;
+}
+
+/**
+ * cpr3_regulator_init_cpr4() - performs hardware initialization at the
+ *		controller and thread level required for CPR4 operation.
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * CPR interface/bus clocks must be enabled before calling this function.
+ * This function allocates sdelta structures and sdelta tables for aggregated
+ * corners of the controller and its threads.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_init_cpr4(struct cpr3_controller *ctrl)
+{
+	struct cpr3_thread *thread;
+	struct cpr3_regulator *vreg;
+	struct cpr4_sdelta *sdelta;
+	int i, j, ctrl_max_core_count, thread_max_core_count, rc = 0;
+	bool ctrl_valid_sdelta, thread_valid_sdelta;
+	u32 pmic_step_size = 1;
+	int thread_id = 0;
+	u64 temp;
+
+	if (ctrl->reset_step_quot_loop_en)
+		cpr3_masked_write(ctrl, CPR4_REG_MISC,
+				CPR4_MISC_RESET_STEP_QUOT_LOOP_EN,
+				CPR4_MISC_RESET_STEP_QUOT_LOOP_EN);
+
+	if (ctrl->supports_hw_closed_loop) {
+		if (ctrl->saw_use_unit_mV)
+			pmic_step_size = ctrl->step_volt / 1000;
+		cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL,
+				  CPR4_MARGIN_ADJ_CTL_PMIC_STEP_SIZE_MASK,
+				  (pmic_step_size
+				  << CPR4_MARGIN_ADJ_CTL_PMIC_STEP_SIZE_SHIFT));
+
+		cpr3_masked_write(ctrl, CPR4_REG_SAW_ERROR_STEP_LIMIT,
+				  CPR4_SAW_ERROR_STEP_LIMIT_DN_MASK,
+				  (ctrl->down_error_step_limit
+					<< CPR4_SAW_ERROR_STEP_LIMIT_DN_SHIFT));
+
+		cpr3_masked_write(ctrl, CPR4_REG_SAW_ERROR_STEP_LIMIT,
+				  CPR4_SAW_ERROR_STEP_LIMIT_UP_MASK,
+				  (ctrl->up_error_step_limit
+					<< CPR4_SAW_ERROR_STEP_LIMIT_UP_SHIFT));
+
+		/*
+		 * Enable thread aggregation regardless of which threads are
+		 * enabled or disabled.
+		 */
+		cpr3_masked_write(ctrl, CPR4_REG_CPR_TIMER_CLAMP,
+				  CPR4_CPR_TIMER_CLAMP_THREAD_AGGREGATION_EN,
+				  CPR4_CPR_TIMER_CLAMP_THREAD_AGGREGATION_EN);
+
+		switch (ctrl->thread_count) {
+		case 0:
+			/* Disable both threads */
+			cpr3_masked_write(ctrl, CPR4_REG_CPR_MASK_THREAD(0),
+				CPR4_CPR_MASK_THREAD_DISABLE_THREAD
+				    | CPR4_CPR_MASK_THREAD_RO_MASK4THREAD_MASK,
+				CPR4_CPR_MASK_THREAD_DISABLE_THREAD
+				    | CPR4_CPR_MASK_THREAD_RO_MASK4THREAD_MASK);
+
+			cpr3_masked_write(ctrl, CPR4_REG_CPR_MASK_THREAD(1),
+				CPR4_CPR_MASK_THREAD_DISABLE_THREAD
+				    | CPR4_CPR_MASK_THREAD_RO_MASK4THREAD_MASK,
+				CPR4_CPR_MASK_THREAD_DISABLE_THREAD
+				    | CPR4_CPR_MASK_THREAD_RO_MASK4THREAD_MASK);
+			break;
+		case 1:
+			/* Disable unused thread */
+			thread_id = ctrl->thread[0].thread_id ? 0 : 1;
+			cpr3_masked_write(ctrl,
+				CPR4_REG_CPR_MASK_THREAD(thread_id),
+				CPR4_CPR_MASK_THREAD_DISABLE_THREAD
+				    | CPR4_CPR_MASK_THREAD_RO_MASK4THREAD_MASK,
+				CPR4_CPR_MASK_THREAD_DISABLE_THREAD
+				    | CPR4_CPR_MASK_THREAD_RO_MASK4THREAD_MASK);
+			break;
+		}
+	}
+
+	if (!ctrl->allow_core_count_adj && !ctrl->allow_temp_adj
+		&& !ctrl->allow_boost) {
+		/*
+		 * Skip below configuration as none of the features
+		 * are enabled.
+		 */
+		return rc;
+	}
+
+	if (ctrl->supports_hw_closed_loop)
+		cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL,
+				  CPR4_MARGIN_ADJ_CTL_TIMER_SETTLE_VOLTAGE_EN,
+				  CPR4_MARGIN_ADJ_CTL_TIMER_SETTLE_VOLTAGE_EN);
+
+	cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL,
+			CPR4_MARGIN_ADJ_CTL_KV_MARGIN_ADJ_STEP_QUOT_MASK,
+			ctrl->step_quot_fixed
+			<< CPR4_MARGIN_ADJ_CTL_KV_MARGIN_ADJ_STEP_QUOT_SHIFT);
+
+	cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL,
+			CPR4_MARGIN_ADJ_CTL_PER_RO_KV_MARGIN_EN,
+			(ctrl->use_dynamic_step_quot
+			? CPR4_MARGIN_ADJ_CTL_PER_RO_KV_MARGIN_EN : 0));
+
+	cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL,
+			CPR4_MARGIN_ADJ_CTL_INITIAL_TEMP_BAND_MASK,
+			ctrl->initial_temp_band
+			<< CPR4_MARGIN_ADJ_CTL_INITIAL_TEMP_BAND_SHIFT);
+
+	rc = cpr4_regulator_init_temp_points(ctrl);
+	if (rc) {
+		cpr3_err(ctrl, "initialize temp points failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (ctrl->voltage_settling_time) {
+		/*
+		 * Configure the settling timer used to account for
+		 * one VDD supply step.
+		 */
+		temp = (u64)ctrl->cpr_clock_rate
+				* (u64)ctrl->voltage_settling_time;
+		do_div(temp, 1000000000);
+		cpr3_masked_write(ctrl, CPR4_REG_MARGIN_TEMP_CORE_TIMERS,
+			CPR4_MARGIN_TEMP_CORE_TIMERS_SETTLE_VOLTAGE_COUNT_MASK,
+			temp
+		    << CPR4_MARGIN_TEMP_CORE_TIMERS_SETTLE_VOLTAGE_COUNT_SHIFT);
+	}
+
+	/*
+	 * Allocate memory for cpr4_sdelta structure and sdelta table for
+	 * controller aggregated corner by finding the maximum core count
+	 * used by any cpr3 regulators.
+	 */
+	ctrl_max_core_count = 1;
+	ctrl_valid_sdelta = false;
+	for (i = 0; i < ctrl->thread_count; i++) {
+		thread = &ctrl->thread[i];
+
+		/*
+		 * Allocate memory for cpr4_sdelta structure and sdelta table
+		 * for thread aggregated corner by finding the maximum core
+		 * count used by any cpr3 regulators of the thread.
+		 */
+		thread_max_core_count = 1;
+		thread_valid_sdelta = false;
+		for (j = 0; j < thread->vreg_count; j++) {
+			vreg = &thread->vreg[j];
+			thread_max_core_count = max(thread_max_core_count,
+							vreg->max_core_count);
+			thread_valid_sdelta |= (vreg->allow_core_count_adj
+							| vreg->allow_temp_adj
+							| vreg->allow_boost);
+		}
+		if (thread_valid_sdelta) {
+			sdelta = devm_kzalloc(ctrl->dev, sizeof(*sdelta),
+					GFP_KERNEL);
+			if (!sdelta)
+				return -ENOMEM;
+
+			sdelta->table = devm_kcalloc(ctrl->dev,
+						thread_max_core_count
+						* ctrl->temp_band_count,
+						sizeof(*sdelta->table),
+						GFP_KERNEL);
+			if (!sdelta->table)
+				return -ENOMEM;
+
+			sdelta->boost_table = devm_kcalloc(ctrl->dev,
+						ctrl->temp_band_count,
+						sizeof(*sdelta->boost_table),
+						GFP_KERNEL);
+			if (!sdelta->boost_table)
+				return -ENOMEM;
+
+			thread->aggr_corner.sdelta = sdelta;
+		}
+
+		ctrl_valid_sdelta |= thread_valid_sdelta;
+		ctrl_max_core_count = max(ctrl_max_core_count,
+						thread_max_core_count);
+	}
+
+	if (ctrl_valid_sdelta) {
+		sdelta = devm_kzalloc(ctrl->dev, sizeof(*sdelta), GFP_KERNEL);
+		if (!sdelta)
+			return -ENOMEM;
+
+		sdelta->table = devm_kcalloc(ctrl->dev, ctrl_max_core_count
+					* ctrl->temp_band_count,
+					sizeof(*sdelta->table), GFP_KERNEL);
+		if (!sdelta->table)
+			return -ENOMEM;
+
+		sdelta->boost_table = devm_kcalloc(ctrl->dev,
+					ctrl->temp_band_count,
+					sizeof(*sdelta->boost_table),
+					GFP_KERNEL);
+		if (!sdelta->boost_table)
+			return -ENOMEM;
+
+		ctrl->aggr_corner.sdelta = sdelta;
+	}
+
+	return 0;
+}
+
+/**
+ * cpr3_write_temp_core_margin() - programs hardware SDELTA registers with
+ *		the voltage margin adjustments that need to be applied for
+ *		different online core-count and temperature bands.
+ * @ctrl:		Pointer to the CPR3 controller
+ * @addr:		SDELTA register address
+ * @temp_core_adj:	Array of voltage margin values for different temperature
+ *			bands.
+ *
+ * CPR interface/bus clocks must be enabled before calling this function.
+ *
+ * Return: none
+ */
+static void cpr3_write_temp_core_margin(struct cpr3_controller *ctrl,
+				 int addr, int *temp_core_adj)
+{
+	int i, margin_steps;
+	u32 reg = 0;
+
+	for (i = 0; i < ctrl->temp_band_count; i++) {
+		margin_steps = max(min(temp_core_adj[i], 127), -128);
+		reg |= (margin_steps & CPR4_MARGIN_TEMP_CORE_ADJ_MASK) <<
+			(i * CPR4_MARGIN_TEMP_CORE_ADJ_SHIFT);
+	}
+
+	cpr3_write(ctrl, addr, reg);
+	cpr3_debug(ctrl, "sdelta offset=0x%08x, val=0x%08x\n", addr, reg);
+}
+
+/**
+ * cpr3_controller_program_sdelta() - programs hardware SDELTA registers with
+ *		the voltage margin adjustments that need to be applied at
+ *		different online core-count and temperature bands. Also,
+ *		programs hardware register configuration for per-online-core
+ *		and per-temperature based adjustments.
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * CPR interface/bus clocks must be enabled before calling this function.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_controller_program_sdelta(struct cpr3_controller *ctrl)
+{
+	struct cpr3_corner *corner = &ctrl->aggr_corner;
+	struct cpr4_sdelta *sdelta = corner->sdelta;
+	int i, index, max_core_count, rc = 0;
+	bool cpr_enabled = ctrl->cpr_enabled;
+
+	if (!sdelta)
+		/* cpr4_sdelta not defined for current aggregated corner */
+		return 0;
+
+	if (ctrl->supports_hw_closed_loop && ctrl->cpr_enabled) {
+		cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL,
+			CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_EN_MASK,
+			(ctrl->use_hw_closed_loop && !sdelta->allow_boost)
+			? CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_ENABLE : 0);
+	}
+
+	if (!sdelta->allow_core_count_adj && !sdelta->allow_temp_adj
+		&& !sdelta->allow_boost) {
+		/*
+		 * Per-online-core, per-temperature and voltage boost
+		 * adjustments are disabled for this aggregation corner.
+		 */
+		return 0;
+	}
+
+	/* Ensure that CPR clocks are enabled before writing to registers. */
+	if (!cpr_enabled) {
+		rc = cpr3_clock_enable(ctrl);
+		if (rc) {
+			cpr3_err(ctrl, "clock enable failed, rc=%d\n", rc);
+			return rc;
+		}
+		ctrl->cpr_enabled = true;
+	}
+
+	max_core_count = sdelta->max_core_count;
+
+	if (sdelta->allow_core_count_adj || sdelta->allow_temp_adj) {
+		if (sdelta->allow_core_count_adj) {
+			/* Program TEMP_CORE0 to same margins as TEMP_CORE1 */
+			cpr3_write_temp_core_margin(ctrl,
+				CPR4_REG_MARGIN_TEMP_CORE(0),
+				&sdelta->table[0]);
+		}
+
+		for (i = 0; i < max_core_count; i++) {
+			index = i * sdelta->temp_band_count;
+			/*
+			 * Program TEMP_COREi with voltage margin adjustments
+			 * that need to be applied when the number of cores
+			 * becomes i.
+			 */
+			cpr3_write_temp_core_margin(ctrl,
+				CPR4_REG_MARGIN_TEMP_CORE(
+						sdelta->allow_core_count_adj
+						? i + 1 : max_core_count),
+						&sdelta->table[index]);
+		}
+	}
+
+	if (sdelta->allow_boost) {
+		/* Program only boost_num_cores row of SDELTA */
+		cpr3_write_temp_core_margin(ctrl,
+			CPR4_REG_MARGIN_TEMP_CORE(sdelta->boost_num_cores),
+					&sdelta->boost_table[0]);
+	}
+
+	if (!sdelta->allow_core_count_adj && !sdelta->allow_boost) {
+		cpr3_masked_write(ctrl, CPR4_REG_MISC,
+			CPR4_MISC_MARGIN_TABLE_ROW_SELECT_MASK,
+			max_core_count
+			<< CPR4_MISC_MARGIN_TABLE_ROW_SELECT_SHIFT);
+	}
+
+	cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL,
+		CPR4_MARGIN_ADJ_CTL_MAX_NUM_CORES_MASK
+		| CPR4_MARGIN_ADJ_CTL_CORE_ADJ_EN
+		| CPR4_MARGIN_ADJ_CTL_TEMP_ADJ_EN
+		| CPR4_MARGIN_ADJ_CTL_KV_MARGIN_ADJ_EN
+		| CPR4_MARGIN_ADJ_CTL_BOOST_EN,
+		max_core_count << CPR4_MARGIN_ADJ_CTL_MAX_NUM_CORES_SHIFT
+		| ((sdelta->allow_core_count_adj || sdelta->allow_boost)
+			? CPR4_MARGIN_ADJ_CTL_CORE_ADJ_EN : 0)
+		| ((sdelta->allow_temp_adj && ctrl->supports_hw_closed_loop
+			&& sdelta->allow_core_count_adj)
+			? CPR4_MARGIN_ADJ_CTL_TEMP_ADJ_EN : 0)
+		| (((ctrl->use_hw_closed_loop && !sdelta->allow_boost)
+		    || !ctrl->supports_hw_closed_loop)
+			? CPR4_MARGIN_ADJ_CTL_KV_MARGIN_ADJ_EN : 0)
+		| (sdelta->allow_boost
+			?  CPR4_MARGIN_ADJ_CTL_BOOST_EN : 0));
+
+	/*
+	 * Ensure that all previous CPR register writes have completed before
+	 * continuing.
+	 */
+	mb();
+
+	/* Turn off CPR clocks if they were off before this function call. */
+	if (!cpr_enabled) {
+		cpr3_clock_disable(ctrl);
+		ctrl->cpr_enabled = false;
+	}
+
+	return 0;
+}
+
+/**
+ * cpr3_regulator_set_base_target_quot() - configure the target quotient
+ *		for each RO of the CPR3 regulator for CPRh operation.
+ *		In particular, the quotient of the RO selected for operation
+ *		should correspond to the lowest target quotient across the
+ *		corners supported by the single regulator of the CPR3 thread.
+ * @vreg:		Pointer to the CPR3 regulator
+ * @base_quots:		Pointer to the base quotient array. The array must be
+ *			of size CPR3_RO_COUNT and it is populated with the
+ *			base quotient per-RO.
+ *
+ * Return: none
+ */
+static void cpr3_regulator_set_base_target_quot(struct cpr3_regulator *vreg,
+						u32 *base_quots)
+{
+	struct cpr3_controller *ctrl = vreg->thread->ctrl;
+	int i, j, ro_mask = CPR3_RO_MASK;
+	u32 min_quot;
+
+	for (i = 0; i < vreg->corner_count; i++)
+		ro_mask &= vreg->corner[i].ro_mask;
+
+	/* Unmask the ROs selected for active use. */
+	cpr3_write(ctrl, CPR3_REG_RO_MASK(vreg->thread->thread_id),
+		   ro_mask);
+
+	for (i = 0; i < CPR3_RO_COUNT; i++) {
+		for (j = 0, min_quot = INT_MAX; j < vreg->corner_count; j++)
+			if (vreg->corner[j].target_quot[i])
+				min_quot = min(min_quot,
+				       vreg->corner[j].target_quot[i]);
+
+		if (min_quot == INT_MAX)
+			min_quot = 0;
+
+		cpr3_write(ctrl,
+			   CPR3_REG_TARGET_QUOT(vreg->thread->thread_id, i),
+			   min_quot);
+
+		base_quots[i] = min_quot;
+	}
+}
+
+/**
+ * cpr3_regulator_init_cprh_corners() - configure the per-corner CPRh registers
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * This function programs the controller registers which contain all information
+ * necessary to resolve the closed-loop voltage per-corner at runtime such as
+ * open-loop and floor voltages, target quotient delta, and RO select value.
+ * These registers also provide a means to disable closed-loop operation, core
+ * and temperature adjustments.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_init_cprh_corners(struct cpr3_regulator *vreg)
+{
+	struct cpr3_controller *ctrl = vreg->thread->ctrl;
+	struct cpr3_corner *corner;
+	u32 reg, delta_quot_steps, ro_sel;
+	u32 *base_quots;
+	int open_loop_volt_steps, floor_volt_steps, i, j, rc = 0;
+
+	base_quots = kcalloc(CPR3_RO_COUNT, sizeof(*base_quots),
+			     GFP_KERNEL);
+	if (!base_quots)
+		return -ENOMEM;
+
+	cpr3_regulator_set_base_target_quot(vreg, base_quots);
+
+	for (i = 0; i < vreg->corner_count; i++) {
+		corner = &vreg->corner[i];
+		for (j = 0, ro_sel = INT_MAX; j < CPR3_RO_COUNT; j++) {
+			if (corner->target_quot[j]) {
+				ro_sel = j;
+				break;
+			}
+		}
+
+		if (ro_sel == INT_MAX) {
+			if (!corner->proc_freq) {
+				/*
+				 * Corner is not used as active DCVS set point
+				 * select RO 0 arbitrarily.
+				 */
+				ro_sel = 0;
+			} else {
+				cpr3_err(vreg, "corner=%d has invalid RO select value\n",
+					 i);
+				rc = -EINVAL;
+				goto free_base_quots;
+			}
+		}
+
+		open_loop_volt_steps = DIV_ROUND_UP(corner->open_loop_volt -
+						    ctrl->base_volt,
+						    ctrl->step_volt);
+		floor_volt_steps = DIV_ROUND_UP(corner->floor_volt -
+						ctrl->base_volt,
+						ctrl->step_volt);
+		delta_quot_steps = corner->proc_freq ?
+			DIV_ROUND_UP(corner->target_quot[ro_sel] -
+				     base_quots[ro_sel],
+				     CPRH_DELTA_QUOT_STEP_FACTOR) :
+			0;
+
+		if (open_loop_volt_steps > CPRH_CORNER_INIT_VOLTAGE_MAX_VALUE ||
+		    floor_volt_steps > CPRH_CORNER_FLOOR_VOLTAGE_MAX_VALUE ||
+		    delta_quot_steps > CPRH_CORNER_QUOT_DELTA_MAX_VALUE) {
+			cpr3_err(ctrl, "invalid CPRh corner configuration: open_loop_volt_steps=%d (%d max.), floor_volt_steps=%d (%d max), delta_quot_steps=%d (%d max)\n",
+				 open_loop_volt_steps,
+				 CPRH_CORNER_INIT_VOLTAGE_MAX_VALUE,
+				 floor_volt_steps,
+				 CPRH_CORNER_FLOOR_VOLTAGE_MAX_VALUE,
+				 delta_quot_steps,
+				 CPRH_CORNER_QUOT_DELTA_MAX_VALUE);
+			rc = -EINVAL;
+			goto free_base_quots;
+		}
+
+		reg = (open_loop_volt_steps << CPRH_CORNER_INIT_VOLTAGE_SHIFT)
+			& CPRH_CORNER_INIT_VOLTAGE_MASK;
+		reg |= (floor_volt_steps << CPRH_CORNER_FLOOR_VOLTAGE_SHIFT)
+			& CPRH_CORNER_FLOOR_VOLTAGE_MASK;
+		reg |= (delta_quot_steps << CPRH_CORNER_QUOT_DELTA_SHIFT)
+			& CPRH_CORNER_QUOT_DELTA_MASK;
+		reg |= (ro_sel << CPRH_CORNER_RO_SEL_SHIFT)
+			& CPRH_CORNER_RO_SEL_MASK;
+
+		if (corner->use_open_loop)
+			reg |= CPRH_CORNER_CPR_CL_DISABLE;
+
+		cpr3_debug(ctrl, "corner=%d open_loop_volt_steps=%d, floor_volt_steps=%d, delta_quot_steps=%d, base_volt=%d, step_volt=%d, base_quot=%d\n",
+			   i, open_loop_volt_steps, floor_volt_steps,
+			   delta_quot_steps, ctrl->base_volt,
+			   ctrl->step_volt, base_quots[ro_sel]);
+		cpr3_write(ctrl, CPRH_REG_CORNER(i), reg);
+	}
+
+free_base_quots:
+	kfree(base_quots);
+	return rc;
+}
+
+/**
+ * cprh_controller_program_sdelta() - programs hardware SDELTA registers with
+ *		the margins that need to be applied at different online
+ *		core-count and temperature bands for each corner band. Also,
+ *		programs hardware register configuration for core-count and
+ *		temp-based adjustments
+ *
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * CPR interface/bus clocks must be enabled before calling this function.
+ *
+ * Return: none
+ */
+static void cprh_controller_program_sdelta(
+		struct cpr3_controller *ctrl)
+{
+	struct cpr3_regulator *vreg = &ctrl->thread[0].vreg[0];
+	struct cprh_corner_band *corner_band;
+	struct cpr4_sdelta *sdelta;
+	int i, j, index;
+	u32 reg = 0;
+
+	if (!vreg->allow_core_count_adj && !vreg->allow_temp_adj)
+		return;
+
+	cpr4_regulator_init_temp_points(ctrl);
+
+	for (i = 0; i < CPRH_CORNER_BAND_MAX_COUNT; i++) {
+		reg |= (i < vreg->corner_band_count ?
+			vreg->corner_band[i].corner
+			& CPRH_CORNER_BAND_MASK :
+			vreg->corner_count + 1)
+			<< (i * CPRH_CORNER_BAND_SHIFT);
+	}
+
+	cpr3_write(ctrl, CPRH_REG_CORNER_BAND, reg);
+
+	for (i = 0; i < vreg->corner_band_count; i++) {
+		corner_band = &vreg->corner_band[i];
+		sdelta = corner_band->sdelta;
+
+		if (!sdelta->allow_core_count_adj && !sdelta->allow_temp_adj) {
+			/*
+			 * Per-online-core and per-temperature margin
+			 * adjustments are disabled for this corner band.
+			 */
+			continue;
+		}
+
+		if (vreg->allow_core_count_adj)
+			cpr3_write_temp_core_margin(ctrl,
+				    CPRH_MARGIN_TEMP_CORE_VBAND(0, i),
+				    &sdelta->table[0]);
+
+		for (j = 0; j < sdelta->max_core_count; j++) {
+			index = j * sdelta->temp_band_count;
+
+			cpr3_write_temp_core_margin(ctrl,
+				    CPRH_MARGIN_TEMP_CORE_VBAND(
+				    sdelta->allow_core_count_adj
+				    ? j + 1 : vreg->max_core_count, i),
+				    &sdelta->table[index]);
+		}
+	}
+
+	if (!vreg->allow_core_count_adj) {
+		cpr3_masked_write(ctrl, CPR4_REG_MISC,
+			CPR4_MISC_MARGIN_TABLE_ROW_SELECT_MASK,
+			vreg->max_core_count
+			<< CPR4_MISC_MARGIN_TABLE_ROW_SELECT_SHIFT);
+	}
+
+	cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL,
+		CPR4_MARGIN_ADJ_CTL_MAX_NUM_CORES_MASK
+		| CPR4_MARGIN_ADJ_CTL_CORE_ADJ_EN
+		| CPR4_MARGIN_ADJ_CTL_TEMP_ADJ_EN
+		| CPR4_MARGIN_ADJ_CTL_KV_MARGIN_ADJ_EN
+		| CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_EN_MASK,
+		vreg->max_core_count << CPR4_MARGIN_ADJ_CTL_MAX_NUM_CORES_SHIFT
+		| ((vreg->allow_core_count_adj)
+		   ? CPR4_MARGIN_ADJ_CTL_CORE_ADJ_EN : 0)
+		| (vreg->allow_temp_adj ? CPR4_MARGIN_ADJ_CTL_TEMP_ADJ_EN : 0)
+		| ((ctrl->use_hw_closed_loop)
+		? CPR4_MARGIN_ADJ_CTL_KV_MARGIN_ADJ_EN : 0)
+		| (ctrl->use_hw_closed_loop
+		? CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_ENABLE : 0));
+
+	/* Ensure that previous CPR register writes complete */
+	mb();
+}
+
+static int cprh_regulator_aging_adjust(struct cpr3_controller *ctrl);
+
+/**
+ * cpr3_regulator_init_cprh() - performs hardware initialization at the
+ *		controller and thread level required for CPRh operation.
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * CPR interface/bus clocks must be enabled before calling this function.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_init_cprh(struct cpr3_controller *ctrl)
+{
+	u32 reg, pmic_step_size = 1;
+	u64 temp;
+	int rc;
+
+	/* Single thread, single regulator supported */
+	if (ctrl->thread_count != 1) {
+		cpr3_err(ctrl, "expected 1 thread but found %d\n",
+			ctrl->thread_count);
+		return -EINVAL;
+	} else if (ctrl->thread[0].vreg_count != 1) {
+		cpr3_err(ctrl, "expected 1 regulator but found %d\n",
+			ctrl->thread[0].vreg_count);
+		return -EINVAL;
+	}
+
+	rc = cprh_regulator_aging_adjust(ctrl);
+	if (rc && rc != -ETIMEDOUT) {
+		/*
+		 * Don't fail initialization if the CPR aging measurement
+		 * timed out due to sensors not being available.
+		 */
+		cpr3_err(ctrl, "CPR aging adjustment failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	cprh_controller_program_sdelta(ctrl);
+
+	rc = cpr3_regulator_init_cprh_corners(&ctrl->thread[0].vreg[0]);
+	if (rc) {
+		cpr3_err(ctrl, "failed to initialize CPRh corner registers\n");
+		return rc;
+	}
+
+	if (ctrl->reset_step_quot_loop_en)
+		cpr3_masked_write(ctrl, CPR4_REG_MISC,
+				CPR4_MISC_RESET_STEP_QUOT_LOOP_EN,
+				CPR4_MISC_RESET_STEP_QUOT_LOOP_EN);
+
+	if (ctrl->saw_use_unit_mV)
+		pmic_step_size = ctrl->step_volt / 1000;
+	cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL,
+				CPR4_MARGIN_ADJ_CTL_PMIC_STEP_SIZE_MASK,
+				(pmic_step_size
+				<< CPR4_MARGIN_ADJ_CTL_PMIC_STEP_SIZE_SHIFT));
+
+	cpr3_masked_write(ctrl, CPR4_REG_SAW_ERROR_STEP_LIMIT,
+				CPR4_SAW_ERROR_STEP_LIMIT_DN_MASK,
+				(ctrl->down_error_step_limit
+				<< CPR4_SAW_ERROR_STEP_LIMIT_DN_SHIFT));
+
+	cpr3_masked_write(ctrl, CPR4_REG_SAW_ERROR_STEP_LIMIT,
+				CPR4_SAW_ERROR_STEP_LIMIT_UP_MASK,
+				(ctrl->up_error_step_limit
+				<< CPR4_SAW_ERROR_STEP_LIMIT_UP_SHIFT));
+
+	cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL,
+			CPR4_MARGIN_ADJ_CTL_KV_MARGIN_ADJ_STEP_QUOT_MASK,
+			ctrl->step_quot_fixed
+			<< CPR4_MARGIN_ADJ_CTL_KV_MARGIN_ADJ_STEP_QUOT_SHIFT);
+
+	cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL,
+			CPR4_MARGIN_ADJ_CTL_PER_RO_KV_MARGIN_EN,
+			(ctrl->use_dynamic_step_quot
+			? CPR4_MARGIN_ADJ_CTL_PER_RO_KV_MARGIN_EN : 0));
+
+	if (ctrl->voltage_settling_time) {
+		/*
+		 * Configure the settling timer used to account for
+		 * one VDD supply step.
+		 */
+		temp = (u64)ctrl->cpr_clock_rate
+				* (u64)ctrl->voltage_settling_time;
+		do_div(temp, 1000000000);
+		cpr3_masked_write(ctrl, CPR4_REG_MARGIN_TEMP_CORE_TIMERS,
+			CPR4_MARGIN_TEMP_CORE_TIMERS_SETTLE_VOLTAGE_COUNT_MASK,
+			temp
+		  << CPR4_MARGIN_TEMP_CORE_TIMERS_SETTLE_VOLTAGE_COUNT_SHIFT);
+	}
+
+	if (ctrl->corner_switch_delay_time) {
+		/*
+		 * Configure the settling timer used to delay
+		 * following SAW requests
+		 */
+		temp = (u64)ctrl->cpr_clock_rate
+			* (u64)ctrl->corner_switch_delay_time;
+		do_div(temp, 1000000000);
+		do_div(temp, CPRH_MODE_SWITCH_DELAY_FACTOR);
+		cpr3_masked_write(ctrl, CPRH_REG_CTL,
+				  CPRH_CTL_MODE_SWITCH_DELAY_MASK,
+				  temp << CPRH_CTL_MODE_SWITCH_DELAY_SHIFT);
+	}
+
+	/*
+	 * Program base voltage and voltage multiplier values which
+	 * are used for floor and initial voltage calculations by the
+	 * CPRh controller.
+	 */
+	reg = (DIV_ROUND_UP(ctrl->base_volt, ctrl->step_volt)
+	       << CPRH_CTL_BASE_VOLTAGE_SHIFT)
+		& CPRH_CTL_BASE_VOLTAGE_MASK;
+	reg |= (DIV_ROUND_UP(ctrl->step_volt, 1000)
+		<< CPRH_CTL_VOLTAGE_MULTIPLIER_SHIFT)
+		& CPRH_CTL_VOLTAGE_MULTIPLIER_MASK;
+	/* Enable OSM block interface with CPR */
+	reg |= CPRH_CTL_OSM_ENABLED;
+	cpr3_masked_write(ctrl, CPRH_REG_CTL, CPRH_CTL_BASE_VOLTAGE_MASK
+			  | CPRH_CTL_VOLTAGE_MULTIPLIER_MASK
+			  | CPRH_CTL_OSM_ENABLED, reg);
+
+	/* Enable loop_en */
+	cpr3_ctrl_loop_enable(ctrl);
+
+	return 0;
+}
+
+/**
+ * cpr3_regulator_init_ctrl() - performs hardware initialization of CPR
+ *		controller registers
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_init_ctrl(struct cpr3_controller *ctrl)
+{
+	int i, j, k, m, rc;
+	u32 ro_used = 0;
+	u32 gcnt, cont_dly, up_down_dly, val;
+	u64 temp;
+	char *mode;
+
+	if (ctrl->core_clk) {
+		rc = clk_set_rate(ctrl->core_clk, ctrl->cpr_clock_rate);
+		if (rc) {
+			cpr3_err(ctrl, "clk_set_rate(core_clk, %u) failed, rc=%d\n",
+				ctrl->cpr_clock_rate, rc);
+			return rc;
+		}
+	}
+
+	rc = cpr3_clock_enable(ctrl);
+	if (rc) {
+		cpr3_err(ctrl, "clock enable failed, rc=%d\n", rc);
+		return rc;
+	}
+	ctrl->cpr_enabled = true;
+
+	/* Find all RO's used by any corner of any regulator. */
+	for (i = 0; i < ctrl->thread_count; i++)
+		for (j = 0; j < ctrl->thread[i].vreg_count; j++)
+			for (k = 0; k < ctrl->thread[i].vreg[j].corner_count;
+			     k++)
+				for (m = 0; m < CPR3_RO_COUNT; m++)
+					if (ctrl->thread[i].vreg[j].corner[k].
+					    target_quot[m])
+						ro_used |= BIT(m);
+
+	/* Configure the GCNT of the RO's that will be used */
+	gcnt = cpr3_regulator_get_gcnt(ctrl);
+	for (i = 0; i < CPR3_RO_COUNT; i++)
+		if (ro_used & BIT(i))
+			cpr3_write(ctrl, CPR3_REG_GCNT(i), gcnt);
+
+	/* Configure the loop delay time */
+	temp = (u64)ctrl->cpr_clock_rate * (u64)ctrl->loop_time;
+	do_div(temp, 1000000000);
+	cont_dly = temp;
+	if (ctrl->supports_hw_closed_loop
+		&& ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3)
+		cpr3_write(ctrl, CPR3_REG_CPR_TIMER_MID_CONT, cont_dly);
+	else
+		cpr3_write(ctrl, CPR3_REG_CPR_TIMER_AUTO_CONT, cont_dly);
+
+	if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) {
+		temp = (u64)ctrl->cpr_clock_rate *
+				(u64)ctrl->up_down_delay_time;
+		do_div(temp, 1000000000);
+		up_down_dly = temp;
+		if (ctrl->supports_hw_closed_loop)
+			cpr3_write(ctrl, CPR3_REG_CPR_TIMER_UP_DN_CONT,
+				up_down_dly);
+		cpr3_debug(ctrl, "up_down_dly=%u, up_down_delay_time=%u ns\n",
+			up_down_dly, ctrl->up_down_delay_time);
+	}
+
+	cpr3_debug(ctrl, "cpr_clock_rate=%u HZ, sensor_time=%u ns, loop_time=%u ns, gcnt=%u, cont_dly=%u\n",
+		ctrl->cpr_clock_rate, ctrl->sensor_time, ctrl->loop_time,
+		gcnt, cont_dly);
+
+	/* Configure CPR sensor operation */
+	val = (ctrl->idle_clocks << CPR3_CPR_CTL_IDLE_CLOCKS_SHIFT)
+		& CPR3_CPR_CTL_IDLE_CLOCKS_MASK;
+	val |= (ctrl->count_mode << CPR3_CPR_CTL_COUNT_MODE_SHIFT)
+		& CPR3_CPR_CTL_COUNT_MODE_MASK;
+	val |= (ctrl->count_repeat << CPR3_CPR_CTL_COUNT_REPEAT_SHIFT)
+		& CPR3_CPR_CTL_COUNT_REPEAT_MASK;
+	cpr3_write(ctrl, CPR3_REG_CPR_CTL, val);
+
+	cpr3_debug(ctrl, "idle_clocks=%u, count_mode=%u, count_repeat=%u; CPR_CTL=0x%08X\n",
+		ctrl->idle_clocks, ctrl->count_mode, ctrl->count_repeat, val);
+
+	/* Configure CPR default step quotients */
+	val = (ctrl->step_quot_init_min << CPR3_CPR_STEP_QUOT_MIN_SHIFT)
+		& CPR3_CPR_STEP_QUOT_MIN_MASK;
+	val |= (ctrl->step_quot_init_max << CPR3_CPR_STEP_QUOT_MAX_SHIFT)
+		& CPR3_CPR_STEP_QUOT_MAX_MASK;
+	cpr3_write(ctrl, CPR3_REG_CPR_STEP_QUOT, val);
+
+	cpr3_debug(ctrl, "step_quot_min=%u, step_quot_max=%u; STEP_QUOT=0x%08X\n",
+		ctrl->step_quot_init_min, ctrl->step_quot_init_max, val);
+
+	/* Configure the CPR sensor ownership */
+	for (i = 0; i < ctrl->sensor_count; i++)
+		cpr3_write(ctrl, CPR3_REG_SENSOR_OWNER(i),
+			   ctrl->sensor_owner[i]);
+
+	/* Configure per-thread registers */
+	for (i = 0; i < ctrl->thread_count; i++) {
+		rc = cpr3_regulator_init_thread(&ctrl->thread[i]);
+		if (rc) {
+			cpr3_err(ctrl, "CPR thread register initialization failed, rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	if (ctrl->supports_hw_closed_loop) {
+		if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4 ||
+		    ctrl->ctrl_type == CPR_CTRL_TYPE_CPRH) {
+			cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL,
+				CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_EN_MASK,
+				ctrl->use_hw_closed_loop
+				? CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_ENABLE
+				: CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_DISABLE);
+		} else if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) {
+			cpr3_write(ctrl, CPR3_REG_HW_CLOSED_LOOP,
+				ctrl->use_hw_closed_loop
+				? CPR3_HW_CLOSED_LOOP_ENABLE
+				: CPR3_HW_CLOSED_LOOP_DISABLE);
+
+			cpr3_debug(ctrl, "PD_THROTTLE=0x%08X\n",
+				ctrl->proc_clock_throttle);
+		}
+
+		if ((ctrl->use_hw_closed_loop ||
+		     ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) &&
+		    ctrl->ctrl_type != CPR_CTRL_TYPE_CPRH) {
+			rc = regulator_enable(ctrl->vdd_limit_regulator);
+			if (rc) {
+				cpr3_err(ctrl, "CPR limit regulator enable failed, rc=%d\n",
+					rc);
+				return rc;
+			}
+
+			if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) {
+				rc = msm_spm_avs_enable_irq(0,
+							   MSM_SPM_AVS_IRQ_MAX);
+				if (rc) {
+					cpr3_err(ctrl, "could not enable max IRQ, rc=%d\n",
+						rc);
+					return rc;
+				}
+			}
+		}
+	}
+
+	if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) {
+		rc = cpr3_regulator_init_cpr4(ctrl);
+		if (rc) {
+			cpr3_err(ctrl, "CPR4-specific controller initialization failed, rc=%d\n",
+				rc);
+			return rc;
+		}
+	} else if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPRH) {
+		rc = cpr3_regulator_init_cprh(ctrl);
+		if (rc) {
+			cpr3_err(ctrl, "CPRh-specific controller initialization failed, rc=%d\n",
+				 rc);
+			return rc;
+		}
+	}
+
+	/* Ensure that all register writes complete before disabling clocks. */
+	wmb();
+
+	/* Keep CPR clocks on for CPRh full HW closed-loop operation */
+	if (ctrl->ctrl_type != CPR_CTRL_TYPE_CPRH) {
+		cpr3_clock_disable(ctrl);
+		ctrl->cpr_enabled = false;
+	}
+
+	if (!ctrl->cpr_allowed_sw || !ctrl->cpr_allowed_hw)
+		mode = "open-loop";
+	else if (ctrl->supports_hw_closed_loop &&
+		 ctrl->ctrl_type != CPR_CTRL_TYPE_CPRH)
+		mode = ctrl->use_hw_closed_loop
+			? "HW closed-loop" : "SW closed-loop";
+	else if (ctrl->supports_hw_closed_loop &&
+		 ctrl->ctrl_type == CPR_CTRL_TYPE_CPRH)
+		mode = ctrl->use_hw_closed_loop
+			? "full HW closed-loop" : "open-loop";
+	else
+		mode = "closed-loop";
+
+	cpr3_info(ctrl, "Default CPR mode = %s", mode);
+
+	return 0;
+}
+
+/**
+ * cpr3_regulator_set_target_quot() - configure the target quotient for each
+ *		RO of the CPR3 thread and set the RO mask
+ * @thread:		Pointer to the CPR3 thread
+ *
+ * Return: none
+ */
+static void cpr3_regulator_set_target_quot(struct cpr3_thread *thread)
+{
+	u32 new_quot, last_quot;
+	int i;
+
+	if (thread->aggr_corner.ro_mask == CPR3_RO_MASK
+	    && thread->last_closed_loop_aggr_corner.ro_mask == CPR3_RO_MASK) {
+		/* Avoid writing target quotients since all RO's are masked. */
+		return;
+	} else if (thread->aggr_corner.ro_mask == CPR3_RO_MASK) {
+		cpr3_write(thread->ctrl, CPR3_REG_RO_MASK(thread->thread_id),
+			CPR3_RO_MASK);
+		thread->last_closed_loop_aggr_corner.ro_mask = CPR3_RO_MASK;
+		/*
+		 * Only the RO_MASK register needs to be written since all
+		 * RO's are masked.
+		 */
+		return;
+	} else if (thread->aggr_corner.ro_mask
+			!= thread->last_closed_loop_aggr_corner.ro_mask) {
+		cpr3_write(thread->ctrl, CPR3_REG_RO_MASK(thread->thread_id),
+			thread->aggr_corner.ro_mask);
+	}
+
+	for (i = 0; i < CPR3_RO_COUNT; i++) {
+		new_quot = thread->aggr_corner.target_quot[i];
+		last_quot = thread->last_closed_loop_aggr_corner.target_quot[i];
+		if (new_quot != last_quot)
+			cpr3_write(thread->ctrl,
+				CPR3_REG_TARGET_QUOT(thread->thread_id, i),
+				new_quot);
+	}
+
+	thread->last_closed_loop_aggr_corner = thread->aggr_corner;
+}
+
+/**
+ * cpr3_update_vreg_closed_loop_volt() - update the last known settled
+ *		closed loop voltage for a CPR3 regulator
+ * @vreg:		Pointer to the CPR3 regulator
+ * @vdd_volt:		Last known settled voltage in microvolts for the
+ *			VDD supply
+ * @reg_last_measurement: Value read from the LAST_MEASUREMENT register
+ *
+ * Return: none
+ */
+static void cpr3_update_vreg_closed_loop_volt(struct cpr3_regulator *vreg,
+				int vdd_volt, u32 reg_last_measurement)
+{
+	bool step_dn, step_up, aggr_step_up, aggr_step_dn, aggr_step_mid;
+	bool valid, pd_valid, saw_error;
+	struct cpr3_controller *ctrl = vreg->thread->ctrl;
+	struct cpr3_corner *corner;
+	u32 id;
+
+	if (vreg->last_closed_loop_corner == CPR3_REGULATOR_CORNER_INVALID)
+		return;
+
+	corner = &vreg->corner[vreg->last_closed_loop_corner];
+
+	if (vreg->thread->last_closed_loop_aggr_corner.ro_mask
+	    == CPR3_RO_MASK  || !vreg->aggregated) {
+		return;
+	} else if (!ctrl->cpr_enabled || !ctrl->last_corner_was_closed_loop) {
+		return;
+	} else if (ctrl->thread_count == 1
+		 && vdd_volt >= corner->floor_volt
+		 && vdd_volt <= corner->ceiling_volt) {
+		corner->last_volt = vdd_volt;
+		cpr3_debug(vreg, "last_volt updated: last_volt[%d]=%d, ceiling_volt[%d]=%d, floor_volt[%d]=%d\n",
+			   vreg->last_closed_loop_corner, corner->last_volt,
+			   vreg->last_closed_loop_corner,
+			   corner->ceiling_volt,
+			   vreg->last_closed_loop_corner,
+			   corner->floor_volt);
+		return;
+	} else if (!ctrl->supports_hw_closed_loop) {
+		return;
+	} else if (ctrl->ctrl_type != CPR_CTRL_TYPE_CPR3) {
+		corner->last_volt = vdd_volt;
+		cpr3_debug(vreg, "last_volt updated: last_volt[%d]=%d, ceiling_volt[%d]=%d, floor_volt[%d]=%d\n",
+			   vreg->last_closed_loop_corner, corner->last_volt,
+			   vreg->last_closed_loop_corner,
+			   corner->ceiling_volt,
+			   vreg->last_closed_loop_corner,
+			   corner->floor_volt);
+		return;
+	}
+
+	/* CPR clocks are on and HW closed loop is supported */
+	valid = !!(reg_last_measurement & CPR3_LAST_MEASUREMENT_VALID);
+	if (!valid) {
+		cpr3_debug(vreg, "CPR_LAST_VALID_MEASUREMENT=0x%X valid bit not set\n",
+			   reg_last_measurement);
+		return;
+	}
+
+	id = vreg->thread->thread_id;
+
+	step_dn
+	       = !!(reg_last_measurement & CPR3_LAST_MEASUREMENT_THREAD_DN(id));
+	step_up
+	       = !!(reg_last_measurement & CPR3_LAST_MEASUREMENT_THREAD_UP(id));
+	aggr_step_dn = !!(reg_last_measurement & CPR3_LAST_MEASUREMENT_AGGR_DN);
+	aggr_step_mid
+		= !!(reg_last_measurement & CPR3_LAST_MEASUREMENT_AGGR_MID);
+	aggr_step_up = !!(reg_last_measurement & CPR3_LAST_MEASUREMENT_AGGR_UP);
+	saw_error = !!(reg_last_measurement & CPR3_LAST_MEASUREMENT_SAW_ERROR);
+	pd_valid
+	     = !((((reg_last_measurement & CPR3_LAST_MEASUREMENT_PD_BYPASS_MASK)
+		       >> CPR3_LAST_MEASUREMENT_PD_BYPASS_SHIFT)
+		      & vreg->pd_bypass_mask) == vreg->pd_bypass_mask);
+
+	if (!pd_valid) {
+		cpr3_debug(vreg, "CPR_LAST_VALID_MEASUREMENT=0x%X, all power domains bypassed\n",
+			   reg_last_measurement);
+		return;
+	} else if (step_dn && step_up) {
+		cpr3_err(vreg, "both up and down status bits set, CPR_LAST_VALID_MEASUREMENT=0x%X\n",
+			 reg_last_measurement);
+		return;
+	} else if (aggr_step_dn && step_dn && vdd_volt < corner->last_volt
+		   && vdd_volt >= corner->floor_volt) {
+		corner->last_volt = vdd_volt;
+	} else if (aggr_step_up && step_up && vdd_volt > corner->last_volt
+		   && vdd_volt <= corner->ceiling_volt) {
+		corner->last_volt = vdd_volt;
+	} else if (aggr_step_mid
+		   && vdd_volt >= corner->floor_volt
+		   && vdd_volt <= corner->ceiling_volt) {
+		corner->last_volt = vdd_volt;
+	} else if (saw_error && (vdd_volt == corner->ceiling_volt
+				 || vdd_volt == corner->floor_volt)) {
+		corner->last_volt = vdd_volt;
+	} else {
+		cpr3_debug(vreg, "last_volt not updated: last_volt[%d]=%d, ceiling_volt[%d]=%d, floor_volt[%d]=%d, vdd_volt=%d, CPR_LAST_VALID_MEASUREMENT=0x%X\n",
+			   vreg->last_closed_loop_corner, corner->last_volt,
+			   vreg->last_closed_loop_corner,
+			   corner->ceiling_volt,
+			   vreg->last_closed_loop_corner, corner->floor_volt,
+			   vdd_volt, reg_last_measurement);
+		return;
+	}
+
+	cpr3_debug(vreg, "last_volt updated: last_volt[%d]=%d, ceiling_volt[%d]=%d, floor_volt[%d]=%d, CPR_LAST_VALID_MEASUREMENT=0x%X\n",
+		   vreg->last_closed_loop_corner, corner->last_volt,
+		   vreg->last_closed_loop_corner, corner->ceiling_volt,
+		   vreg->last_closed_loop_corner, corner->floor_volt,
+		   reg_last_measurement);
+}
+
+/**
+ * cpr3_regulator_config_ldo_retention() - configure per-regulator LDO retention
+ *		mode
+ * @vreg:		Pointer to the CPR3 regulator to configure
+ * @ref_volt:		Reference voltage used to determine if LDO retention
+ *			mode can be allowed. It corresponds either to the
+ *			aggregated floor voltage or the next VDD supply setpoint
+ *
+ * This function determines if a CPR3 regulator's configuration satisfies safe
+ * operating voltages for LDO retention and uses the regulator_allow_bypass()
+ * interface on the LDO retention regulator to enable or disable such feature
+ * accordingly.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_config_ldo_retention(struct cpr3_regulator *vreg,
+					int ref_volt)
+{
+	struct regulator *ldo_ret_reg = vreg->ldo_ret_regulator;
+	int retention_volt, rc;
+	enum msm_ldo_supply_mode mode;
+
+	if (!ldo_ret_reg) {
+		/* LDO retention regulator is not defined */
+		return 0;
+	}
+
+	retention_volt = regulator_get_voltage(ldo_ret_reg);
+	if (retention_volt < 0) {
+		cpr3_err(vreg, "regulator_get_voltage(ldo_ret) failed, rc=%d\n",
+			 retention_volt);
+		return retention_volt;
+
+	}
+
+	mode = ref_volt >= retention_volt + vreg->ldo_min_headroom_volt
+		? LDO_MODE : BHS_MODE;
+
+	rc = regulator_allow_bypass(ldo_ret_reg, mode);
+	if (rc)
+		cpr3_err(vreg, "regulator_allow_bypass(ldo_ret) == %s failed, rc=%d\n",
+			 mode ? "true" : "false", rc);
+
+	return rc;
+}
+
+/**
+ * cpr3_regulator_config_kryo_ldo_mem_acc() - configure the mem-acc regulator
+ *		corner based upon a future Kryo LDO regulator voltage setpoint
+ * @vreg:		Pointer to the CPR3 regulator
+ * @new_volt:		New voltage in microvolts that the LDO regulator needs
+ *			to end up at
+ *
+ * This function determines if a new LDO regulator set point will result
+ * in crossing the voltage threshold that requires reconfiguration of
+ * the mem-acc regulator associated with a CPR3 regulator and if so, performs
+ * the correct sequence to select the correct mem-acc corner.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_config_kryo_ldo_mem_acc(struct cpr3_regulator *vreg,
+					     int new_volt)
+{
+	struct cpr3_controller *ctrl = vreg->thread->ctrl;
+	struct regulator *ldo_reg = vreg->ldo_regulator;
+	struct regulator *mem_acc_reg = vreg->mem_acc_regulator;
+	int mem_acc_volt = ctrl->mem_acc_threshold_volt;
+	int last_volt, safe_volt, mem_acc_corn, rc;
+	enum msm_apm_supply apm_mode;
+
+	if (!mem_acc_reg || !mem_acc_volt || !ldo_reg)
+		return 0;
+
+	apm_mode = msm_apm_get_supply(ctrl->apm);
+	if (apm_mode < 0) {
+		cpr3_err(ctrl, "APM get supply failed, rc=%d\n",
+			 apm_mode);
+		return apm_mode;
+	}
+
+	last_volt = regulator_get_voltage(ldo_reg);
+	if (last_volt < 0) {
+		cpr3_err(vreg, "regulator_get_voltage(ldo) failed, rc=%d\n",
+			 last_volt);
+		return last_volt;
+	}
+
+	if (((last_volt < mem_acc_volt && mem_acc_volt <= new_volt)
+	     || (last_volt >= mem_acc_volt && mem_acc_volt > new_volt))) {
+
+		if (apm_mode == ctrl->apm_high_supply)
+			safe_volt = min(vreg->ldo_max_volt, mem_acc_volt);
+		else
+			safe_volt = min(max(ctrl->system_supply_max_volt -
+					    vreg->ldo_max_headroom_volt,
+					    mem_acc_volt), vreg->ldo_max_volt);
+
+		rc = regulator_set_voltage(ldo_reg, safe_volt,
+					   max(new_volt, last_volt));
+		if (rc) {
+			cpr3_err(ctrl, "regulator_set_voltage(ldo) == %d failed, rc=%d\n",
+				 mem_acc_volt, rc);
+			return rc;
+		}
+
+		mem_acc_corn = new_volt < mem_acc_volt ?
+			ctrl->mem_acc_corner_map[CPR3_MEM_ACC_LOW_CORNER] :
+			ctrl->mem_acc_corner_map[CPR3_MEM_ACC_HIGH_CORNER];
+
+		rc = regulator_set_voltage(mem_acc_reg, mem_acc_corn,
+					   mem_acc_corn);
+		if (rc) {
+			cpr3_err(ctrl, "regulator_set_voltage(mem_acc) == %d failed, rc=%d\n",
+				 0, rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * cpr3_regulator_kryo_bhs_prepare() - configure the Kryo LDO regulator
+ *		associated with a CPR3 regulator in preparation for BHS
+ *		mode switch.
+ * @vreg:		Pointer to the CPR3 regulator
+ * @vdd_volt:		Last known settled voltage in microvolts for the VDD
+ *			supply
+ * @vdd_ceiling_volt:	Last known aggregated ceiling voltage in microvolts for
+ *			the VDD supply
+ *
+ * This function performs the necessary steps prior to switching a Kryo LDO
+ * regulator to BHS mode (LDO bypassed mode).
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_kryo_bhs_prepare(struct cpr3_regulator *vreg,
+			       int vdd_volt, int vdd_ceiling_volt)
+{
+	struct regulator *ldo_reg = vreg->ldo_regulator;
+	int bhs_volt, rc;
+
+	bhs_volt = vdd_volt - vreg->ldo_min_headroom_volt;
+	if (bhs_volt > vreg->ldo_max_volt) {
+		cpr3_debug(vreg, "limited to LDO output of %d uV when switching to BHS mode\n",
+			   vreg->ldo_max_volt);
+		bhs_volt = vreg->ldo_max_volt;
+	}
+
+	rc = cpr3_regulator_config_kryo_ldo_mem_acc(vreg, bhs_volt);
+	if (rc) {
+		cpr3_err(vreg, "failed to configure mem-acc settings\n");
+		return rc;
+	}
+
+	rc = regulator_set_voltage(ldo_reg, bhs_volt, min(vdd_ceiling_volt,
+							  vreg->ldo_max_volt));
+	if (rc) {
+		cpr3_err(vreg, "regulator_set_voltage(ldo) == %d failed, rc=%d\n",
+			 bhs_volt, rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+/**
+ * cpr3_regulator_set_bhs_mode() - configure the LDO regulator associated with
+ *		a CPR3 regulator to BHS mode
+ * @vreg:		Pointer to the CPR3 regulator
+ * @vdd_volt:		Last known settled voltage in microvolts for the VDD
+ *			supply
+ * @vdd_ceiling_volt:	Last known aggregated ceiling voltage in microvolts for
+ *			the VDD supply
+ *
+ * This function performs the necessary steps to switch an LDO regulator
+ * to BHS mode (LDO bypassed mode).
+ */
+static int cpr3_regulator_set_bhs_mode(struct cpr3_regulator *vreg,
+			       int vdd_volt, int vdd_ceiling_volt)
+{
+	struct regulator *ldo_reg = vreg->ldo_regulator;
+	int rc;
+
+	if (vreg->ldo_type == CPR3_LDO_KRYO) {
+		rc = cpr3_regulator_kryo_bhs_prepare(vreg, vdd_volt,
+				vdd_ceiling_volt);
+		if (rc) {
+			cpr3_err(vreg, "cpr3 regulator bhs mode prepare failed, rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	rc = regulator_allow_bypass(ldo_reg, BHS_MODE);
+	if (rc) {
+		cpr3_err(vreg, "regulator_allow_bypass(bhs) == %s failed, rc=%d\n",
+			 BHS_MODE ? "true" : "false", rc);
+		return rc;
+	}
+	vreg->ldo_regulator_bypass = BHS_MODE;
+
+	return rc;
+}
+
+/**
+ * cpr3_regulator_ldo_apm_prepare() - configure LDO regulators associated
+ *		with each CPR3 regulator of a CPR3 controller in preparation
+ *		for an APM switch.
+ * @ctrl:		Pointer to the CPR3 controller
+ * @new_volt:		New voltage in microvolts that the VDD supply
+ *			needs to end up at
+ * @last_volt:		Last known voltage in microvolts for the VDD supply
+ * @aggr_corner:	Pointer to the CPR3 corner which corresponds to the max
+ *			corner aggregated from all CPR3 threads managed by the
+ *			CPR3 controller
+ *
+ * This function ensures LDO regulator hardware requirements are met before
+ * an APM switch is requested. The function must be called as the last step
+ * before switching the APM mode.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_ldo_apm_prepare(struct cpr3_controller *ctrl,
+				int new_volt, int last_volt,
+				struct cpr3_corner *aggr_corner)
+{
+	struct cpr3_regulator *vreg;
+	struct cpr3_corner *current_corner;
+	enum msm_apm_supply apm_mode;
+	int i, j, safe_volt, max_volt, ldo_volt, ref_volt, rc;
+
+	apm_mode = msm_apm_get_supply(ctrl->apm);
+	if (apm_mode < 0) {
+		cpr3_err(ctrl, "APM get supply failed, rc=%d\n", apm_mode);
+		return apm_mode;
+	}
+
+	if (apm_mode == ctrl->apm_low_supply ||
+	    new_volt >= ctrl->apm_threshold_volt)
+		return 0;
+
+	/*
+	 * Guarantee LDO maximum headroom is not violated when the APM is
+	 * switched to the system-supply source.
+	 */
+	for (i = 0; i < ctrl->thread_count; i++) {
+		for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+			vreg = &ctrl->thread[i].vreg[j];
+
+			if (!vreg->vreg_enabled || vreg->current_corner
+			    == CPR3_REGULATOR_CORNER_INVALID)
+				continue;
+
+			if (!vreg->ldo_regulator || !vreg->ldo_mode_allowed ||
+			    vreg->ldo_regulator_bypass == BHS_MODE)
+				continue;
+
+			/*
+			 * If the new VDD configuration does not satisfy
+			 * requirements for LDO usage, switch the regulator
+			 * to BHS mode. By doing so, the LDO maximum headroom
+			 * does not need to be enforced.
+			 */
+			current_corner = &vreg->corner[vreg->current_corner];
+			ldo_volt = current_corner->open_loop_volt
+				- vreg->ldo_adjust_volt;
+			ref_volt = ctrl->use_hw_closed_loop ?
+				aggr_corner->floor_volt :
+				new_volt;
+
+			if (ref_volt < ldo_volt + vreg->ldo_min_headroom_volt
+			    || ldo_volt < ctrl->system_supply_max_volt -
+			    vreg->ldo_max_headroom_volt ||
+			    ldo_volt > vreg->ldo_max_volt) {
+				rc = cpr3_regulator_set_bhs_mode(vreg,
+					 last_volt, aggr_corner->ceiling_volt);
+				if (rc)
+					return rc;
+				/*
+				 * Do not enforce LDO maximum headroom since the
+				 * regulator is now configured to BHS mode.
+				 */
+				continue;
+			}
+
+			safe_volt = min(max(ldo_volt,
+					    ctrl->system_supply_max_volt
+					    - vreg->ldo_max_headroom_volt),
+					vreg->ldo_max_volt);
+			max_volt = min(ctrl->system_supply_max_volt,
+				       vreg->ldo_max_volt);
+
+			rc = regulator_set_voltage(vreg->ldo_regulator,
+						   safe_volt, max_volt);
+			if (rc) {
+				cpr3_err(vreg, "regulator_set_voltage(ldo) == %d failed, rc=%d\n",
+					 safe_volt, rc);
+				return rc;
+			}
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * cpr3_regulator_config_vreg_kryo_ldo() - configure the voltage and bypass
+ *		state for the Kryo LDO regulator associated with a single CPR3
+ *		regulator.
+ *
+ * @vreg:		Pointer to the CPR3 regulator
+ * @vdd_floor_volt:	Last known aggregated floor voltage in microvolts for
+ *			the VDD supply
+ * @vdd_ceiling_volt:	Last known aggregated ceiling voltage in microvolts for
+ *			the VDD supply
+ * @ref_volt:		Reference voltage in microvolts corresponds either to
+ *			the aggregated floor voltage or the next VDD supply
+ *			setpoint.
+ * @last_volt:		Last known voltage in microvolts for the VDD supply
+ *
+ * This function performs all relevant LDO or BHS configurations if a Kryo LDO
+ * regulator is specified.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_config_vreg_kryo_ldo(struct cpr3_regulator *vreg,
+			  int vdd_floor_volt, int vdd_ceiling_volt,
+			  int ref_volt, int last_volt)
+{
+	struct cpr3_controller *ctrl = vreg->thread->ctrl;
+	struct regulator *ldo_reg = vreg->ldo_regulator;
+	struct cpr3_corner *current_corner;
+	enum msm_apm_supply apm_mode;
+	int rc, ldo_volt, final_ldo_volt, bhs_volt, max_volt, safe_volt;
+
+	current_corner = &vreg->corner[vreg->current_corner];
+	ldo_volt = current_corner->open_loop_volt
+		- vreg->ldo_adjust_volt;
+	bhs_volt = last_volt - vreg->ldo_min_headroom_volt;
+	max_volt = min(vdd_ceiling_volt, vreg->ldo_max_volt);
+
+	if (ref_volt >= ldo_volt + vreg->ldo_min_headroom_volt &&
+	    ldo_volt >= ctrl->system_supply_max_volt -
+	    vreg->ldo_max_headroom_volt &&
+	    bhs_volt >= ctrl->system_supply_max_volt -
+	    vreg->ldo_max_headroom_volt &&
+	    ldo_volt <= vreg->ldo_max_volt) {
+		/* LDO minimum and maximum headrooms satisfied */
+		apm_mode = msm_apm_get_supply(ctrl->apm);
+		if (apm_mode < 0) {
+			cpr3_err(ctrl, "APM get supply failed, rc=%d\n",
+				 apm_mode);
+			return apm_mode;
+		}
+
+		if (vreg->ldo_regulator_bypass == BHS_MODE) {
+			/*
+			 * BHS to LDO transition. Configure LDO output
+			 * to min(max LDO output, VDD - LDO headroom)
+			 * voltage if APM is on high supply source or
+			 * min(max(system-supply ceiling - LDO max headroom,
+			 * VDD - LDO headroom), max LDO output) if
+			 * APM is on low supply source, then switch
+			 * regulator mode.
+			 */
+			if (apm_mode == ctrl->apm_high_supply)
+				safe_volt = min(vreg->ldo_max_volt, bhs_volt);
+			else
+				safe_volt =
+					min(max(ctrl->system_supply_max_volt -
+						vreg->ldo_max_headroom_volt,
+						bhs_volt),
+					    vreg->ldo_max_volt);
+
+			rc = cpr3_regulator_config_kryo_ldo_mem_acc(vreg,
+							       safe_volt);
+			if (rc) {
+				cpr3_err(vreg, "failed to configure mem-acc settings\n");
+				return rc;
+			}
+
+			rc = regulator_set_voltage(ldo_reg, safe_volt,
+						   max_volt);
+			if (rc) {
+				cpr3_err(vreg, "regulator_set_voltage(ldo) == %d failed, rc=%d\n",
+					 safe_volt, rc);
+				return rc;
+			}
+
+			rc = regulator_allow_bypass(ldo_reg, LDO_MODE);
+			if (rc) {
+				cpr3_err(vreg, "regulator_allow_bypass(ldo) == %s failed, rc=%d\n",
+					 LDO_MODE ? "true" : "false", rc);
+				return rc;
+			}
+			vreg->ldo_regulator_bypass = LDO_MODE;
+		}
+
+		/* Configure final LDO output voltage */
+		if (apm_mode == ctrl->apm_high_supply)
+			final_ldo_volt = max(ldo_volt,
+					     vdd_ceiling_volt -
+					     vreg->ldo_max_headroom_volt);
+		else
+			final_ldo_volt = ldo_volt;
+
+		rc = cpr3_regulator_config_kryo_ldo_mem_acc(vreg,
+						       final_ldo_volt);
+		if (rc) {
+			cpr3_err(vreg, "failed to configure mem-acc settings\n");
+			return rc;
+		}
+
+		rc = regulator_set_voltage(ldo_reg, final_ldo_volt, max_volt);
+		if (rc) {
+			cpr3_err(vreg, "regulator_set_voltage(ldo) == %d failed, rc=%d\n",
+				 final_ldo_volt, rc);
+			return rc;
+		}
+	} else {
+		if (vreg->ldo_regulator_bypass == LDO_MODE) {
+			/* LDO to BHS transition */
+			rc = cpr3_regulator_set_bhs_mode(vreg, last_volt,
+							 vdd_ceiling_volt);
+			if (rc)
+				return rc;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * cpr3_regulator_config_vreg_ldo300() - configure the voltage and bypass state
+ *		for the LDO300 regulator associated with a single CPR3
+ *		regulator.
+ *
+ * @vreg:		Pointer to the CPR3 regulator
+ * @new_volt:		New voltage in microvolts that VDD supply needs to
+ *			end up at
+ * @vdd_ceiling_volt:	Last known aggregated ceiling voltage in microvolts for
+ *			the VDD supply
+ *
+ * This function performs all relevant LDO or BHS configurations for an LDO300
+ * type regulator.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_config_vreg_ldo300(struct cpr3_regulator *vreg,
+		int new_volt, int vdd_ceiling_volt)
+{
+	struct regulator *ldo_reg = vreg->ldo_regulator;
+	struct cpr3_corner *corner;
+	bool mode;
+	int rc = 0;
+
+	corner = &vreg->corner[vreg->current_corner];
+	mode = corner->ldo_mode_allowed ? LDO_MODE : BHS_MODE;
+
+	if (mode == LDO_MODE) {
+		rc = regulator_set_voltage(ldo_reg, new_volt, vdd_ceiling_volt);
+		if (rc) {
+			cpr3_err(vreg, "regulator_set_voltage(ldo) == %d failed, rc=%d\n",
+				 new_volt, rc);
+			return rc;
+		}
+	}
+
+	if (vreg->ldo_regulator_bypass != mode) {
+		rc = regulator_allow_bypass(ldo_reg, mode);
+		if (rc) {
+			cpr3_err(vreg, "regulator_allow_bypass(%s) is failed, rc=%d\n",
+				 mode == LDO_MODE ? "ldo" : "bhs", rc);
+			return rc;
+		}
+		vreg->ldo_regulator_bypass = mode;
+	}
+
+	return rc;
+}
+
+/**
+ * cpr3_regulator_config_vreg_ldo() - configure the voltage and bypass state for
+ *		the LDO regulator associated with a single CPR3 regulator.
+ *
+ * @vreg:		Pointer to the CPR3 regulator
+ * @vdd_floor_volt:	Last known aggregated floor voltage in microvolts for
+ *			the VDD supply
+ * @vdd_ceiling_volt:	Last known aggregated ceiling voltage in microvolts for
+ *			the VDD supply
+ * @new_volt:		New voltage in microvolts that VDD supply needs to
+ *			end up at
+ * @last_volt:		Last known voltage in microvolts for the VDD supply
+ *
+ * This function identifies the type of LDO regulator associated with a CPR3
+ * regulator and invokes the LDO specific configuration functions.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_config_vreg_ldo(struct cpr3_regulator *vreg,
+			  int vdd_floor_volt, int vdd_ceiling_volt,
+			  int new_volt, int last_volt)
+{
+	struct cpr3_controller *ctrl = vreg->thread->ctrl;
+	int ref_volt, rc;
+
+	ref_volt = ctrl->use_hw_closed_loop ? vdd_floor_volt :
+		new_volt;
+
+	rc = cpr3_regulator_config_ldo_retention(vreg, ref_volt);
+	if (rc)
+		return rc;
+
+	if (!vreg->vreg_enabled ||
+		vreg->current_corner == CPR3_REGULATOR_CORNER_INVALID)
+		return 0;
+
+	switch (vreg->ldo_type) {
+	case CPR3_LDO_KRYO:
+		rc = cpr3_regulator_config_vreg_kryo_ldo(vreg, vdd_floor_volt,
+				vdd_ceiling_volt, ref_volt, last_volt);
+		if (rc)
+			cpr3_err(vreg, "kryo ldo regulator config failed, rc=%d\n",
+				rc);
+		break;
+	case CPR3_LDO300:
+		rc = cpr3_regulator_config_vreg_ldo300(vreg, new_volt,
+				vdd_ceiling_volt);
+		if (rc)
+			cpr3_err(vreg, "ldo300 regulator config failed, rc=%d\n",
+				rc);
+		break;
+	default:
+		cpr3_err(vreg, "invalid ldo regulator type = %d\n",
+				vreg->ldo_type);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+/**
+ * cpr3_regulator_config_ldo() - configure the voltage and bypass state for the
+ *		LDO regulator associated with each CPR3 regulator of a CPR3
+ *		controller
+ * @ctrl:		Pointer to the CPR3 controller
+ * @vdd_floor_volt:	Last known aggregated floor voltage in microvolts for
+ *			the VDD supply
+ * @vdd_ceiling_volt:	Last known aggregated ceiling voltage in microvolts for
+ *			the VDD supply
+ * @new_volt:		New voltage in microvolts that VDD supply needs to
+ *			end up at
+ * @last_volt:		Last known voltage in microvolts for the VDD supply
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_config_ldo(struct cpr3_controller *ctrl,
+				int vdd_floor_volt, int vdd_ceiling_volt,
+				int new_volt, int last_volt)
+{
+	struct cpr3_regulator *vreg;
+	int i, j, rc;
+
+	for (i = 0; i < ctrl->thread_count; i++) {
+		for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+			vreg = &ctrl->thread[i].vreg[j];
+
+			if (!vreg->ldo_regulator || !vreg->ldo_mode_allowed)
+				continue;
+
+			rc = cpr3_regulator_config_vreg_ldo(vreg,
+					vdd_floor_volt, vdd_ceiling_volt,
+					new_volt, last_volt);
+			if (rc)
+				return rc;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * cpr3_regulator_mem_acc_bhs_used() - determines if mem-acc regulators powered
+ *		through a BHS are associated with the CPR3 controller or any of
+ *		the CPR3 regulators it controls.
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * This function determines if the CPR3 controller or any of its CPR3 regulators
+ * need to manage mem-acc regulators that are currently powered through a BHS
+ * and whose corner selection is based upon a particular voltage threshold.
+ *
+ * Return: true or false
+ */
+static bool cpr3_regulator_mem_acc_bhs_used(struct cpr3_controller *ctrl)
+{
+	struct cpr3_regulator *vreg;
+	int i, j;
+
+	if (!ctrl->mem_acc_threshold_volt)
+		return false;
+
+	if (ctrl->mem_acc_regulator)
+		return true;
+
+	for (i = 0; i < ctrl->thread_count; i++) {
+		for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+			vreg = &ctrl->thread[i].vreg[j];
+
+			if (vreg->mem_acc_regulator &&
+			    (!vreg->ldo_regulator ||
+			     vreg->ldo_regulator_bypass
+			     == BHS_MODE))
+				return true;
+		}
+	}
+
+	return false;
+}
+
+/**
+ * cpr3_regulator_config_bhs_mem_acc() - configure the mem-acc regulator
+ *		settings for hardware blocks currently powered through the BHS.
+ * @ctrl:		Pointer to the CPR3 controller
+ * @new_volt:		New voltage in microvolts that VDD supply needs to
+ *			end up at
+ * @last_volt:		Pointer to the last known voltage in microvolts for the
+ *			VDD supply
+ * @aggr_corner:	Pointer to the CPR3 corner which corresponds to the max
+ *			corner aggregated from all CPR3 threads managed by the
+ *			CPR3 controller
+ *
+ * This function programs the mem-acc regulator corners for CPR3 regulators
+ * whose LDO regulators are in bypassed state. The function also handles
+ * CPR3 controllers which utilize mem-acc regulators that operate independently
+ * from the LDO hardware and that must be programmed when the VDD supply
+ * crosses a particular voltage threshold.
+ *
+ * Return: 0 on success, errno on failure. If the VDD supply voltage is
+ * modified, last_volt is updated to reflect the new voltage setpoint.
+ */
+static int cpr3_regulator_config_bhs_mem_acc(struct cpr3_controller *ctrl,
+				     int new_volt, int *last_volt,
+				     struct cpr3_corner *aggr_corner)
+{
+	struct cpr3_regulator *vreg;
+	int i, j, rc, mem_acc_corn, safe_volt;
+	int mem_acc_volt = ctrl->mem_acc_threshold_volt;
+	int ref_volt;
+
+	if (!cpr3_regulator_mem_acc_bhs_used(ctrl))
+		return 0;
+
+	ref_volt = ctrl->use_hw_closed_loop ? aggr_corner->floor_volt :
+		new_volt;
+
+	if (((*last_volt < mem_acc_volt && mem_acc_volt <= ref_volt) ||
+	     (*last_volt >= mem_acc_volt && mem_acc_volt > ref_volt))) {
+		if (ref_volt < *last_volt)
+			safe_volt = max(mem_acc_volt, aggr_corner->last_volt);
+		else
+			safe_volt = max(mem_acc_volt, *last_volt);
+
+		rc = regulator_set_voltage(ctrl->vdd_regulator, safe_volt,
+					   new_volt < *last_volt ?
+					   ctrl->aggr_corner.ceiling_volt :
+					   new_volt);
+		if (rc) {
+			cpr3_err(ctrl, "regulator_set_voltage(vdd) == %d failed, rc=%d\n",
+				 safe_volt, rc);
+			return rc;
+		}
+
+		*last_volt = safe_volt;
+
+		mem_acc_corn = ref_volt < mem_acc_volt ?
+			ctrl->mem_acc_corner_map[CPR3_MEM_ACC_LOW_CORNER] :
+			ctrl->mem_acc_corner_map[CPR3_MEM_ACC_HIGH_CORNER];
+
+		if (ctrl->mem_acc_regulator) {
+			rc = regulator_set_voltage(ctrl->mem_acc_regulator,
+						   mem_acc_corn, mem_acc_corn);
+			if (rc) {
+				cpr3_err(ctrl, "regulator_set_voltage(mem_acc) == %d failed, rc=%d\n",
+					 mem_acc_corn, rc);
+				return rc;
+			}
+		}
+
+		for (i = 0; i < ctrl->thread_count; i++) {
+			for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+				vreg = &ctrl->thread[i].vreg[j];
+
+				if (!vreg->mem_acc_regulator ||
+				    (vreg->ldo_regulator &&
+				     vreg->ldo_regulator_bypass
+				     == LDO_MODE))
+					continue;
+
+				rc = regulator_set_voltage(
+					vreg->mem_acc_regulator, mem_acc_corn,
+					mem_acc_corn);
+				if (rc) {
+					cpr3_err(vreg, "regulator_set_voltage(mem_acc) == %d failed, rc=%d\n",
+						 mem_acc_corn, rc);
+					return rc;
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * cpr3_regulator_switch_apm_mode() - switch the mode of the APM controller
+ *		associated with a given CPR3 controller
+ * @ctrl:		Pointer to the CPR3 controller
+ * @new_volt:		New voltage in microvolts that VDD supply needs to
+ *			end up at
+ * @last_volt:		Pointer to the last known voltage in microvolts for the
+ *			VDD supply
+ * @aggr_corner:	Pointer to the CPR3 corner which corresponds to the max
+ *			corner aggregated from all CPR3 threads managed by the
+ *			CPR3 controller
+ *
+ * This function requests a switch of the APM mode while guaranteeing
+ * any LDO regulator hardware requirements are satisfied. The function must
+ * be called once it is known a new VDD supply setpoint crosses the APM
+ * voltage threshold.
+ *
+ * Return: 0 on success, errno on failure. If the VDD supply voltage is
+ * modified, last_volt is updated to reflect the new voltage setpoint.
+ */
+static int cpr3_regulator_switch_apm_mode(struct cpr3_controller *ctrl,
+					  int new_volt, int *last_volt,
+					  struct cpr3_corner *aggr_corner)
+{
+	struct regulator *vdd = ctrl->vdd_regulator;
+	int apm_volt = ctrl->apm_threshold_volt;
+	int orig_last_volt = *last_volt;
+	int rc;
+
+	rc = regulator_set_voltage(vdd, apm_volt, apm_volt);
+	if (rc) {
+		cpr3_err(ctrl, "regulator_set_voltage(vdd) == %d failed, rc=%d\n",
+			 apm_volt, rc);
+		return rc;
+	}
+
+	*last_volt = apm_volt;
+
+	rc = cpr3_regulator_ldo_apm_prepare(ctrl, new_volt, *last_volt,
+					    aggr_corner);
+	if (rc) {
+		cpr3_err(ctrl, "unable to prepare LDO state for APM switch, rc=%d\n",
+			 rc);
+		return rc;
+	}
+
+	rc = msm_apm_set_supply(ctrl->apm, new_volt >= apm_volt
+				? ctrl->apm_high_supply : ctrl->apm_low_supply);
+	if (rc) {
+		cpr3_err(ctrl, "APM switch failed, rc=%d\n", rc);
+		/* Roll back the voltage. */
+		regulator_set_voltage(vdd, orig_last_volt, INT_MAX);
+		*last_volt = orig_last_volt;
+		return rc;
+	}
+	return 0;
+}
+
+/**
+ * cpr3_regulator_config_voltage_crossings() - configure APM and mem-acc
+ *		settings depending upon a new VDD supply setpoint
+ *
+ * @ctrl:		Pointer to the CPR3 controller
+ * @new_volt:		New voltage in microvolts that VDD supply needs to
+ *			end up at
+ * @last_volt:		Pointer to the last known voltage in microvolts for the
+ *			VDD supply
+ * @aggr_corner:	Pointer to the CPR3 corner which corresponds to the max
+ *			corner aggregated from all CPR3 threads managed by the
+ *			CPR3 controller
+ *
+ * This function handles the APM and mem-acc regulator reconfiguration if
+ * the new VDD supply voltage will result in crossing their respective voltage
+ * thresholds.
+ *
+ * Return: 0 on success, errno on failure. If the VDD supply voltage is
+ * modified, last_volt is updated to reflect the new voltage setpoint.
+ */
+static int cpr3_regulator_config_voltage_crossings(struct cpr3_controller *ctrl,
+				   int new_volt, int *last_volt,
+				   struct cpr3_corner *aggr_corner)
+{
+	bool apm_crossing = false, mem_acc_crossing = false;
+	bool mem_acc_bhs_used;
+	int apm_volt = ctrl->apm_threshold_volt;
+	int mem_acc_volt = ctrl->mem_acc_threshold_volt;
+	int ref_volt, rc;
+
+	if (ctrl->apm && apm_volt > 0
+	    && ((*last_volt < apm_volt && apm_volt <= new_volt)
+		|| (*last_volt >= apm_volt && apm_volt > new_volt)))
+		apm_crossing = true;
+
+	mem_acc_bhs_used = cpr3_regulator_mem_acc_bhs_used(ctrl);
+
+	ref_volt = ctrl->use_hw_closed_loop ? aggr_corner->floor_volt :
+		new_volt;
+
+	if (mem_acc_bhs_used &&
+	    (((*last_volt < mem_acc_volt && mem_acc_volt <= ref_volt) ||
+	      (*last_volt >= mem_acc_volt && mem_acc_volt > ref_volt))))
+		mem_acc_crossing = true;
+
+	if (apm_crossing && mem_acc_crossing) {
+		if ((new_volt < *last_volt && apm_volt >= mem_acc_volt) ||
+		    (new_volt >= *last_volt && apm_volt < mem_acc_volt)) {
+			rc = cpr3_regulator_switch_apm_mode(ctrl, new_volt,
+							    last_volt,
+							    aggr_corner);
+			if (rc) {
+				cpr3_err(ctrl, "unable to switch APM mode\n");
+				return rc;
+			}
+
+			rc = cpr3_regulator_config_bhs_mem_acc(ctrl, new_volt,
+						       last_volt, aggr_corner);
+			if (rc) {
+				cpr3_err(ctrl, "unable to configure BHS mem-acc settings\n");
+				return rc;
+			}
+		} else {
+			rc = cpr3_regulator_config_bhs_mem_acc(ctrl, new_volt,
+						       last_volt, aggr_corner);
+			if (rc) {
+				cpr3_err(ctrl, "unable to configure BHS mem-acc settings\n");
+				return rc;
+			}
+
+			rc = cpr3_regulator_switch_apm_mode(ctrl, new_volt,
+							    last_volt,
+							    aggr_corner);
+			if (rc) {
+				cpr3_err(ctrl, "unable to switch APM mode\n");
+				return rc;
+			}
+		}
+	} else if (apm_crossing) {
+		rc = cpr3_regulator_switch_apm_mode(ctrl, new_volt, last_volt,
+						    aggr_corner);
+		if (rc) {
+			cpr3_err(ctrl, "unable to switch APM mode\n");
+			return rc;
+		}
+	} else if (mem_acc_crossing) {
+		rc = cpr3_regulator_config_bhs_mem_acc(ctrl, new_volt,
+						       last_volt, aggr_corner);
+		if (rc) {
+			cpr3_err(ctrl, "unable to configure BHS mem-acc settings\n");
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * cpr3_regulator_config_mem_acc() - configure the corner of the mem-acc
+ *			regulator associated with the CPR3 controller
+ * @ctrl:		Pointer to the CPR3 controller
+ * @aggr_corner:	Pointer to the CPR3 corner which corresponds to the max
+ *			corner aggregated from all CPR3 threads managed by the
+ *			CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_config_mem_acc(struct cpr3_controller *ctrl,
+					 struct cpr3_corner *aggr_corner)
+{
+	int rc;
+
+	if (ctrl->mem_acc_regulator && aggr_corner->mem_acc_volt) {
+		rc = regulator_set_voltage(ctrl->mem_acc_regulator,
+					   aggr_corner->mem_acc_volt,
+					   aggr_corner->mem_acc_volt);
+		if (rc) {
+			cpr3_err(ctrl, "regulator_set_voltage(mem_acc) == %d failed, rc=%d\n",
+				 aggr_corner->mem_acc_volt, rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * cpr3_regulator_scale_vdd_voltage() - scale the CPR controlled VDD supply
+ *		voltage to the new level while satisfying any other hardware
+ *		requirements
+ * @ctrl:		Pointer to the CPR3 controller
+ * @new_volt:		New voltage in microvolts that VDD supply needs to end
+ *			up at
+ * @last_volt:		Last known voltage in microvolts for the VDD supply
+ * @aggr_corner:	Pointer to the CPR3 corner which corresponds to the max
+ *			corner aggregated from all CPR3 threads managed by the
+ *			CPR3 controller
+ *
+ * This function scales the CPR controlled VDD supply voltage from its
+ * current level to the new voltage that is specified.  If the supply is
+ * configured to use the APM and the APM threshold is crossed as a result of
+ * the voltage scaling, then this function also stops at the APM threshold,
+ * switches the APM source, and finally sets the final new voltage.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_scale_vdd_voltage(struct cpr3_controller *ctrl,
+				int new_volt, int last_volt,
+				struct cpr3_corner *aggr_corner)
+{
+	struct regulator *vdd = ctrl->vdd_regulator;
+	int rc;
+
+	if (new_volt < last_volt) {
+		if (ctrl->support_ldo300_vreg) {
+			rc = cpr3_regulator_config_mem_acc(ctrl, aggr_corner);
+			if (rc)
+				return rc;
+		}
+
+		/* Decreasing VDD voltage */
+		rc = cpr3_regulator_config_ldo(ctrl, aggr_corner->floor_volt,
+					       ctrl->aggr_corner.ceiling_volt,
+					       new_volt, last_volt);
+		if (rc) {
+			cpr3_err(ctrl, "unable to configure LDO state, rc=%d\n",
+				 rc);
+			return rc;
+		}
+
+		if (!ctrl->support_ldo300_vreg) {
+			rc = cpr3_regulator_config_mem_acc(ctrl, aggr_corner);
+			if (rc)
+				return rc;
+		}
+	} else {
+		/* Increasing VDD voltage */
+		if (ctrl->system_regulator) {
+			rc = regulator_set_voltage(ctrl->system_regulator,
+				aggr_corner->system_volt, INT_MAX);
+			if (rc) {
+				cpr3_err(ctrl, "regulator_set_voltage(system) == %d failed, rc=%d\n",
+					aggr_corner->system_volt, rc);
+				return rc;
+			}
+		}
+	}
+
+	rc = cpr3_regulator_config_voltage_crossings(ctrl, new_volt, &last_volt,
+						     aggr_corner);
+	if (rc) {
+		cpr3_err(ctrl, "unable to handle voltage threshold crossing configurations, rc=%d\n",
+			 rc);
+		return rc;
+	}
+
+	/*
+	 * Subtract a small amount from the min_uV parameter so that the
+	 * set voltage request is not dropped by the framework due to being
+	 * duplicate.  This is needed in order to switch from hardware
+	 * closed-loop to open-loop successfully.
+	 */
+	rc = regulator_set_voltage(vdd, new_volt - (ctrl->cpr_enabled ? 0 : 1),
+				   aggr_corner->ceiling_volt);
+	if (rc) {
+		cpr3_err(ctrl, "regulator_set_voltage(vdd) == %d failed, rc=%d\n",
+			new_volt, rc);
+		return rc;
+	}
+
+	if (new_volt == last_volt && ctrl->supports_hw_closed_loop
+	    && ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) {
+		/*
+		 * CPR4 features enforce voltage reprogramming when the last
+		 * set voltage and new set voltage are same. This way, we can
+		 * ensure that SAW PMIC STATUS register is updated with newly
+		 * programmed voltage.
+		 */
+		rc = regulator_sync_voltage(vdd);
+		if (rc) {
+			cpr3_err(ctrl, "regulator_sync_voltage(vdd) == %d failed, rc=%d\n",
+				new_volt, rc);
+			return rc;
+		}
+	}
+
+	if (new_volt >= last_volt) {
+		/* Increasing VDD voltage */
+		rc = cpr3_regulator_config_ldo(ctrl, aggr_corner->floor_volt,
+					       aggr_corner->ceiling_volt,
+					       new_volt, new_volt);
+		if (rc) {
+			cpr3_err(ctrl, "unable to configure LDO state, rc=%d\n",
+				 rc);
+			return rc;
+		}
+
+		rc = cpr3_regulator_config_mem_acc(ctrl, aggr_corner);
+		if (rc)
+			return rc;
+	} else {
+		/* Decreasing VDD voltage */
+		if (ctrl->system_regulator) {
+			rc = regulator_set_voltage(ctrl->system_regulator,
+				aggr_corner->system_volt, INT_MAX);
+			if (rc) {
+				cpr3_err(ctrl, "regulator_set_voltage(system) == %d failed, rc=%d\n",
+					aggr_corner->system_volt, rc);
+				return rc;
+			}
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * cpr3_regulator_get_dynamic_floor_volt() - returns the current dynamic floor
+ *		voltage based upon static configurations and the state of all
+ *		power domains during the last CPR measurement
+ * @ctrl:		Pointer to the CPR3 controller
+ * @reg_last_measurement: Value read from the LAST_MEASUREMENT register
+ *
+ * When using HW closed-loop, the dynamic floor voltage is always returned
+ * regardless of the current state of the power domains.
+ *
+ * Return: dynamic floor voltage in microvolts or 0 if dynamic floor is not
+ *         currently required
+ */
+static int cpr3_regulator_get_dynamic_floor_volt(struct cpr3_controller *ctrl,
+		u32 reg_last_measurement)
+{
+	int dynamic_floor_volt = 0;
+	struct cpr3_regulator *vreg;
+	bool valid, pd_valid;
+	u32 bypass_bits;
+	int i, j;
+
+	if (!ctrl->supports_hw_closed_loop)
+		return 0;
+
+	if (likely(!ctrl->use_hw_closed_loop)) {
+		valid = !!(reg_last_measurement & CPR3_LAST_MEASUREMENT_VALID);
+		bypass_bits
+		 = (reg_last_measurement & CPR3_LAST_MEASUREMENT_PD_BYPASS_MASK)
+			>> CPR3_LAST_MEASUREMENT_PD_BYPASS_SHIFT;
+	} else {
+		/*
+		 * Ensure that the dynamic floor voltage is always used for
+		 * HW closed-loop since the conditions below cannot be evaluated
+		 * after each CPR measurement.
+		 */
+		valid = false;
+		bypass_bits = 0;
+	}
+
+	for (i = 0; i < ctrl->thread_count; i++) {
+		for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+			vreg = &ctrl->thread[i].vreg[j];
+
+			if (!vreg->uses_dynamic_floor)
+				continue;
+
+			pd_valid = !((bypass_bits & vreg->pd_bypass_mask)
+					== vreg->pd_bypass_mask);
+
+			if (!valid || !pd_valid)
+				dynamic_floor_volt = max(dynamic_floor_volt,
+					vreg->corner[
+					 vreg->dynamic_floor_corner].last_volt);
+		}
+	}
+
+	return dynamic_floor_volt;
+}
+
+/**
+ * cpr3_regulator_max_sdelta_diff() - returns the maximum voltage difference in
+ *		microvolts that can result from different operating conditions
+ *		for the specified sdelta struct
+ * @sdelta:		Pointer to the sdelta structure
+ * @step_volt:		Step size in microvolts between available set
+ *			points of the VDD supply.
+ *
+ * Return: voltage difference between the highest and lowest adjustments if
+ *	sdelta and sdelta->table are valid, else 0.
+ */
+static int cpr3_regulator_max_sdelta_diff(const struct cpr4_sdelta *sdelta,
+				int step_volt)
+{
+	int i, j, index, sdelta_min = INT_MAX, sdelta_max = INT_MIN;
+
+	if (!sdelta || !sdelta->table)
+		return 0;
+
+	for (i = 0; i < sdelta->max_core_count; i++) {
+		for (j = 0; j < sdelta->temp_band_count; j++) {
+			index = i * sdelta->temp_band_count + j;
+			sdelta_min = min(sdelta_min, sdelta->table[index]);
+			sdelta_max = max(sdelta_max, sdelta->table[index]);
+		}
+	}
+
+	return (sdelta_max - sdelta_min) * step_volt;
+}
+
+/**
+ * cpr3_regulator_aggregate_sdelta() - check open-loop voltages of current
+ *		aggregated corner and current corner of a given regulator
+ *		and adjust the sdelta strucuture data of aggregate corner.
+ * @aggr_corner:	Pointer to accumulated aggregated corner which
+ *			is both an input and an output
+ * @corner:		Pointer to the corner to be aggregated with
+ *			aggr_corner
+ * @step_volt:		Step size in microvolts between available set
+ *			points of the VDD supply.
+ *
+ * Return: none
+ */
+static void cpr3_regulator_aggregate_sdelta(
+				struct cpr3_corner *aggr_corner,
+				const struct cpr3_corner *corner, int step_volt)
+{
+	struct cpr4_sdelta *aggr_sdelta, *sdelta;
+	int aggr_core_count, core_count, temp_band_count;
+	u32 aggr_index, index;
+	int i, j, sdelta_size, cap_steps, adjust_sdelta;
+
+	aggr_sdelta = aggr_corner->sdelta;
+	sdelta = corner->sdelta;
+
+	if (aggr_corner->open_loop_volt < corner->open_loop_volt) {
+		/*
+		 * Found the new dominant regulator as its open-loop requirement
+		 * is higher than previous dominant regulator. Calculate cap
+		 * voltage to limit the SDELTA values to make sure the runtime
+		 * (Core-count/temp) adjustments do not violate other
+		 * regulators' voltage requirements. Use cpr4_sdelta values of
+		 * new dominant regulator.
+		 */
+		aggr_sdelta->cap_volt = min(aggr_sdelta->cap_volt,
+						(corner->open_loop_volt -
+						aggr_corner->open_loop_volt));
+
+		/* Clear old data in the sdelta table */
+		sdelta_size = aggr_sdelta->max_core_count
+					* aggr_sdelta->temp_band_count;
+
+		if (aggr_sdelta->allow_core_count_adj
+			|| aggr_sdelta->allow_temp_adj)
+			memset(aggr_sdelta->table, 0, sdelta_size
+					* sizeof(*aggr_sdelta->table));
+
+		if (sdelta->allow_temp_adj || sdelta->allow_core_count_adj) {
+			/* Copy new data in sdelta table */
+			sdelta_size = sdelta->max_core_count
+						* sdelta->temp_band_count;
+			if (sdelta->table)
+				memcpy(aggr_sdelta->table, sdelta->table,
+					sdelta_size * sizeof(*sdelta->table));
+		}
+
+		if (sdelta->allow_boost) {
+			memcpy(aggr_sdelta->boost_table, sdelta->boost_table,
+				sdelta->temp_band_count
+				* sizeof(*sdelta->boost_table));
+			aggr_sdelta->boost_num_cores = sdelta->boost_num_cores;
+		} else if (aggr_sdelta->allow_boost) {
+			for (i = 0; i < aggr_sdelta->temp_band_count; i++) {
+				adjust_sdelta = (corner->open_loop_volt
+						- aggr_corner->open_loop_volt)
+						/ step_volt;
+				aggr_sdelta->boost_table[i] += adjust_sdelta;
+				aggr_sdelta->boost_table[i]
+					= min(aggr_sdelta->boost_table[i], 0);
+			}
+		}
+
+		aggr_corner->open_loop_volt = corner->open_loop_volt;
+		aggr_sdelta->allow_temp_adj = sdelta->allow_temp_adj;
+		aggr_sdelta->allow_core_count_adj
+					= sdelta->allow_core_count_adj;
+		aggr_sdelta->max_core_count = sdelta->max_core_count;
+		aggr_sdelta->temp_band_count = sdelta->temp_band_count;
+	} else if (aggr_corner->open_loop_volt > corner->open_loop_volt) {
+		/*
+		 * Adjust the cap voltage if the open-loop requirement of new
+		 * regulator is the next highest.
+		 */
+		aggr_sdelta->cap_volt = min(aggr_sdelta->cap_volt,
+						(aggr_corner->open_loop_volt
+						- corner->open_loop_volt));
+
+		if (sdelta->allow_boost) {
+			for (i = 0; i < aggr_sdelta->temp_band_count; i++) {
+				adjust_sdelta = (aggr_corner->open_loop_volt
+						- corner->open_loop_volt)
+						/ step_volt;
+				aggr_sdelta->boost_table[i] =
+					sdelta->boost_table[i] + adjust_sdelta;
+				aggr_sdelta->boost_table[i]
+					= min(aggr_sdelta->boost_table[i], 0);
+			}
+			aggr_sdelta->boost_num_cores = sdelta->boost_num_cores;
+		}
+	} else {
+		/*
+		 * Found another dominant regulator with same open-loop
+		 * requirement. Make cap voltage to '0'. Disable core-count
+		 * adjustments as we couldn't support for both regulators.
+		 * Keep enable temp based adjustments if enabled for both
+		 * regulators and choose mininum margin adjustment values
+		 * between them.
+		 */
+		aggr_sdelta->cap_volt = 0;
+		aggr_sdelta->allow_core_count_adj = false;
+
+		if (aggr_sdelta->allow_temp_adj
+					&& sdelta->allow_temp_adj) {
+			aggr_core_count = aggr_sdelta->max_core_count - 1;
+			core_count = sdelta->max_core_count - 1;
+			temp_band_count = sdelta->temp_band_count;
+			for (j = 0; j < temp_band_count; j++) {
+				aggr_index = aggr_core_count * temp_band_count
+						+ j;
+				index = core_count * temp_band_count + j;
+				aggr_sdelta->table[aggr_index] =
+					min(aggr_sdelta->table[aggr_index],
+						sdelta->table[index]);
+			}
+		} else {
+			aggr_sdelta->allow_temp_adj = false;
+		}
+
+		if (sdelta->allow_boost) {
+			memcpy(aggr_sdelta->boost_table, sdelta->boost_table,
+				sdelta->temp_band_count
+				* sizeof(*sdelta->boost_table));
+			aggr_sdelta->boost_num_cores = sdelta->boost_num_cores;
+		}
+	}
+
+	/* Keep non-dominant clients boost enable state */
+	aggr_sdelta->allow_boost |= sdelta->allow_boost;
+	if (aggr_sdelta->allow_boost)
+		aggr_sdelta->allow_core_count_adj = false;
+
+	if (aggr_sdelta->cap_volt && !(aggr_sdelta->cap_volt == INT_MAX)) {
+		core_count = aggr_sdelta->max_core_count;
+		temp_band_count = aggr_sdelta->temp_band_count;
+		/*
+		 * Convert cap voltage from uV to PMIC steps and use to limit
+		 * sdelta margin adjustments.
+		 */
+		cap_steps = aggr_sdelta->cap_volt / step_volt;
+		for (i = 0; i < core_count; i++)
+			for (j = 0; j < temp_band_count; j++) {
+				index = i * temp_band_count + j;
+				aggr_sdelta->table[index] =
+						min(aggr_sdelta->table[index],
+							cap_steps);
+		}
+	}
+}
+
+/**
+ * cpr3_regulator_aggregate_corners() - aggregate two corners together
+ * @aggr_corner:		Pointer to accumulated aggregated corner which
+ *				is both an input and an output
+ * @corner:			Pointer to the corner to be aggregated with
+ *				aggr_corner
+ * @aggr_quot:			Flag indicating that target quotients should be
+ *				aggregated as well.
+ * @step_volt:			Step size in microvolts between available set
+ *				points of the VDD supply.
+ *
+ * Return: none
+ */
+static void cpr3_regulator_aggregate_corners(struct cpr3_corner *aggr_corner,
+			const struct cpr3_corner *corner, bool aggr_quot,
+			int step_volt)
+{
+	int i;
+
+	aggr_corner->ceiling_volt
+		= max(aggr_corner->ceiling_volt, corner->ceiling_volt);
+	aggr_corner->floor_volt
+		= max(aggr_corner->floor_volt, corner->floor_volt);
+	aggr_corner->last_volt
+		= max(aggr_corner->last_volt, corner->last_volt);
+	aggr_corner->system_volt
+		= max(aggr_corner->system_volt, corner->system_volt);
+	aggr_corner->mem_acc_volt
+		= max(aggr_corner->mem_acc_volt, corner->mem_acc_volt);
+	aggr_corner->irq_en |= corner->irq_en;
+	aggr_corner->use_open_loop |= corner->use_open_loop;
+	aggr_corner->ldo_mode_allowed |= corner->ldo_mode_allowed;
+
+	if (aggr_quot) {
+		aggr_corner->ro_mask &= corner->ro_mask;
+
+		for (i = 0; i < CPR3_RO_COUNT; i++)
+			aggr_corner->target_quot[i]
+				= max(aggr_corner->target_quot[i],
+				      corner->target_quot[i]);
+	}
+
+	if (aggr_corner->sdelta && corner->sdelta
+		&& (aggr_corner->sdelta->table
+		|| aggr_corner->sdelta->boost_table)) {
+		cpr3_regulator_aggregate_sdelta(aggr_corner, corner, step_volt);
+	} else {
+		aggr_corner->open_loop_volt
+			= max(aggr_corner->open_loop_volt,
+				corner->open_loop_volt);
+	}
+}
+
+/**
+ * cpr3_regulator_update_ctrl_state() - update the state of the CPR controller
+ *		to reflect the corners used by all CPR3 regulators as well as
+ *		the CPR operating mode
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * This function aggregates the CPR parameters for all CPR3 regulators
+ * associated with the VDD supply.  Upon success, it sets the aggregated last
+ * known good voltage.
+ *
+ * The VDD supply voltage will not be physically configured unless this
+ * condition is met by at least one of the regulators of the controller:
+ * regulator->vreg_enabled == true &&
+ * regulator->current_corner != CPR3_REGULATOR_CORNER_INVALID
+ *
+ * CPR registers for the controller and each thread are updated as long as
+ * ctrl->cpr_enabled == true.
+ *
+ * Note, CPR3 controller lock must be held by the caller.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int _cpr3_regulator_update_ctrl_state(struct cpr3_controller *ctrl)
+{
+	struct cpr3_corner aggr_corner = {};
+	struct cpr3_thread *thread;
+	struct cpr3_regulator *vreg;
+	struct cpr4_sdelta *sdelta;
+	bool valid = false;
+	bool thread_valid;
+	int i, j, rc;
+	int new_volt, vdd_volt, dynamic_floor_volt, last_corner_volt = 0;
+	u32 reg_last_measurement = 0, sdelta_size;
+	int *sdelta_table, *boost_table;
+
+	if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) {
+		rc = cpr3_ctrl_clear_cpr4_config(ctrl);
+		if (rc) {
+			cpr3_err(ctrl, "failed to clear CPR4 configuration,rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	cpr3_ctrl_loop_disable(ctrl);
+
+	vdd_volt = regulator_get_voltage(ctrl->vdd_regulator);
+	if (vdd_volt < 0) {
+		cpr3_err(ctrl, "regulator_get_voltage(vdd) failed, rc=%d\n",
+			 vdd_volt);
+		return vdd_volt;
+	}
+
+	if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) {
+		/*
+		 * Save aggregated corner open-loop voltage which was programmed
+		 * during last corner switch which is used when programming new
+		 * aggregated corner open-loop voltage.
+		 */
+		last_corner_volt = ctrl->aggr_corner.open_loop_volt;
+	}
+
+	if (ctrl->cpr_enabled && ctrl->use_hw_closed_loop &&
+		ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3)
+		reg_last_measurement
+			= cpr3_read(ctrl, CPR3_REG_LAST_MEASUREMENT);
+
+	aggr_corner.sdelta = ctrl->aggr_corner.sdelta;
+	if (aggr_corner.sdelta) {
+		sdelta = aggr_corner.sdelta;
+		sdelta_table = sdelta->table;
+		if (sdelta_table) {
+			sdelta_size = sdelta->max_core_count *
+					sdelta->temp_band_count;
+			memset(sdelta_table, 0, sdelta_size
+					* sizeof(*sdelta_table));
+		}
+
+		boost_table = sdelta->boost_table;
+		if (boost_table)
+			memset(boost_table, 0, sdelta->temp_band_count
+					* sizeof(*boost_table));
+
+		memset(sdelta, 0, sizeof(*sdelta));
+		sdelta->table = sdelta_table;
+		sdelta->cap_volt = INT_MAX;
+		sdelta->boost_table = boost_table;
+	}
+
+	/* Aggregate the requests of all threads */
+	for (i = 0; i < ctrl->thread_count; i++) {
+		thread = &ctrl->thread[i];
+		thread_valid = false;
+
+		sdelta = thread->aggr_corner.sdelta;
+		if (sdelta) {
+			sdelta_table = sdelta->table;
+			if (sdelta_table) {
+				sdelta_size = sdelta->max_core_count *
+						sdelta->temp_band_count;
+				memset(sdelta_table, 0, sdelta_size
+						* sizeof(*sdelta_table));
+			}
+
+			boost_table = sdelta->boost_table;
+			if (boost_table)
+				memset(boost_table, 0, sdelta->temp_band_count
+						* sizeof(*boost_table));
+
+			memset(sdelta, 0, sizeof(*sdelta));
+			sdelta->table = sdelta_table;
+			sdelta->cap_volt = INT_MAX;
+			sdelta->boost_table = boost_table;
+		}
+
+		memset(&thread->aggr_corner, 0, sizeof(thread->aggr_corner));
+		thread->aggr_corner.sdelta = sdelta;
+		thread->aggr_corner.ro_mask = CPR3_RO_MASK;
+
+		for (j = 0; j < thread->vreg_count; j++) {
+			vreg = &thread->vreg[j];
+
+			if (ctrl->cpr_enabled && ctrl->use_hw_closed_loop)
+				cpr3_update_vreg_closed_loop_volt(vreg,
+						vdd_volt, reg_last_measurement);
+
+			if (!vreg->vreg_enabled
+			    || vreg->current_corner
+					    == CPR3_REGULATOR_CORNER_INVALID) {
+				/* Cannot participate in aggregation. */
+				vreg->aggregated = false;
+				continue;
+			} else {
+				vreg->aggregated = true;
+				thread_valid = true;
+			}
+
+			cpr3_regulator_aggregate_corners(&thread->aggr_corner,
+					&vreg->corner[vreg->current_corner],
+					true, ctrl->step_volt);
+		}
+
+		valid |= thread_valid;
+
+		if (thread_valid)
+			cpr3_regulator_aggregate_corners(&aggr_corner,
+					&thread->aggr_corner,
+					false, ctrl->step_volt);
+	}
+
+	if (valid && ctrl->cpr_allowed_hw && ctrl->cpr_allowed_sw) {
+		rc = cpr3_closed_loop_enable(ctrl);
+		if (rc) {
+			cpr3_err(ctrl, "could not enable CPR, rc=%d\n", rc);
+			return rc;
+		}
+	} else {
+		rc = cpr3_closed_loop_disable(ctrl);
+		if (rc) {
+			cpr3_err(ctrl, "could not disable CPR, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	/* No threads are enabled with a valid corner so exit. */
+	if (!valid)
+		return 0;
+
+	/*
+	 * When using CPR hardware closed-loop, the voltage may vary anywhere
+	 * between the floor and ceiling voltage without software notification.
+	 * Therefore, it is required that the floor to ceiling range for the
+	 * aggregated corner not intersect the APM threshold voltage.  Adjust
+	 * the floor to ceiling range if this requirement is violated.
+	 *
+	 * The following algorithm is applied in the case that
+	 * floor < threshold <= ceiling:
+	 *	if open_loop >= threshold - adj, then floor = threshold
+	 *	else ceiling = threshold - step
+	 * where adj = an adjustment factor to ensure sufficient voltage margin
+	 * and step = VDD output step size
+	 *
+	 * The open-loop and last known voltages are also bounded by the new
+	 * floor or ceiling value as needed.
+	 */
+	if (ctrl->use_hw_closed_loop
+	    && aggr_corner.ceiling_volt >= ctrl->apm_threshold_volt
+	    && aggr_corner.floor_volt < ctrl->apm_threshold_volt) {
+
+		if (aggr_corner.open_loop_volt
+		    >= ctrl->apm_threshold_volt - ctrl->apm_adj_volt)
+			aggr_corner.floor_volt = ctrl->apm_threshold_volt;
+		else
+			aggr_corner.ceiling_volt
+				= ctrl->apm_threshold_volt - ctrl->step_volt;
+
+		aggr_corner.last_volt
+		    = max(aggr_corner.last_volt, aggr_corner.floor_volt);
+		aggr_corner.last_volt
+		    = min(aggr_corner.last_volt, aggr_corner.ceiling_volt);
+		aggr_corner.open_loop_volt
+		    = max(aggr_corner.open_loop_volt, aggr_corner.floor_volt);
+		aggr_corner.open_loop_volt
+		    = min(aggr_corner.open_loop_volt, aggr_corner.ceiling_volt);
+	}
+
+	if (ctrl->use_hw_closed_loop
+	    && aggr_corner.ceiling_volt >= ctrl->mem_acc_threshold_volt
+	    && aggr_corner.floor_volt < ctrl->mem_acc_threshold_volt) {
+		aggr_corner.floor_volt = ctrl->mem_acc_threshold_volt;
+		aggr_corner.last_volt = max(aggr_corner.last_volt,
+					     aggr_corner.floor_volt);
+		aggr_corner.open_loop_volt = max(aggr_corner.open_loop_volt,
+						  aggr_corner.floor_volt);
+	}
+
+	if (ctrl->use_hw_closed_loop) {
+		dynamic_floor_volt
+			= cpr3_regulator_get_dynamic_floor_volt(ctrl,
+							reg_last_measurement);
+		if (aggr_corner.floor_volt < dynamic_floor_volt) {
+			aggr_corner.floor_volt = dynamic_floor_volt;
+			aggr_corner.last_volt = max(aggr_corner.last_volt,
+							aggr_corner.floor_volt);
+			aggr_corner.open_loop_volt
+				= max(aggr_corner.open_loop_volt,
+					aggr_corner.floor_volt);
+			aggr_corner.ceiling_volt = max(aggr_corner.ceiling_volt,
+							aggr_corner.floor_volt);
+		}
+	}
+
+	if (ctrl->cpr_enabled && ctrl->last_corner_was_closed_loop) {
+		/*
+		 * Always program open-loop voltage for CPR4 controllers which
+		 * support hardware closed-loop.  Storing the last closed loop
+		 * voltage in corner structure can still help with debugging.
+		 */
+		if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3)
+			new_volt = aggr_corner.last_volt;
+		else if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4
+			 && ctrl->supports_hw_closed_loop)
+			new_volt = aggr_corner.open_loop_volt;
+		else
+			new_volt = min(aggr_corner.last_volt +
+			      cpr3_regulator_max_sdelta_diff(aggr_corner.sdelta,
+							     ctrl->step_volt),
+				       aggr_corner.ceiling_volt);
+	} else {
+		new_volt = aggr_corner.open_loop_volt;
+		aggr_corner.last_volt = aggr_corner.open_loop_volt;
+	}
+
+	if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4
+	    && ctrl->supports_hw_closed_loop) {
+		/*
+		 * Store last aggregated corner open-loop voltage in vdd_volt
+		 * which is used when programming current aggregated corner
+		 * required voltage.
+		 */
+		vdd_volt = last_corner_volt;
+	}
+
+	cpr3_debug(ctrl, "setting new voltage=%d uV\n", new_volt);
+	rc = cpr3_regulator_scale_vdd_voltage(ctrl, new_volt,
+					      vdd_volt, &aggr_corner);
+	if (rc) {
+		cpr3_err(ctrl, "vdd voltage scaling failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Only update registers if CPR is enabled. */
+	if (ctrl->cpr_enabled) {
+		if (ctrl->use_hw_closed_loop) {
+			/* Hardware closed-loop */
+
+			/* Set ceiling and floor limits in hardware */
+			rc = regulator_set_voltage(ctrl->vdd_limit_regulator,
+				aggr_corner.floor_volt,
+				aggr_corner.ceiling_volt);
+			if (rc) {
+				cpr3_err(ctrl, "could not configure HW closed-loop voltage limits, rc=%d\n",
+					rc);
+				return rc;
+			}
+		} else {
+			/* Software closed-loop */
+
+			/*
+			 * Disable UP or DOWN interrupts when at ceiling or
+			 * floor respectively.
+			 */
+			if (new_volt == aggr_corner.floor_volt)
+				aggr_corner.irq_en &= ~CPR3_IRQ_DOWN;
+			if (new_volt == aggr_corner.ceiling_volt)
+				aggr_corner.irq_en &= ~CPR3_IRQ_UP;
+
+			cpr3_write(ctrl, CPR3_REG_IRQ_CLEAR,
+				CPR3_IRQ_UP | CPR3_IRQ_DOWN);
+			cpr3_write(ctrl, CPR3_REG_IRQ_EN, aggr_corner.irq_en);
+		}
+
+		for (i = 0; i < ctrl->thread_count; i++) {
+			cpr3_regulator_set_target_quot(&ctrl->thread[i]);
+
+			for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+				vreg = &ctrl->thread[i].vreg[j];
+
+				if (vreg->vreg_enabled)
+					vreg->last_closed_loop_corner
+						= vreg->current_corner;
+			}
+		}
+
+		if (ctrl->proc_clock_throttle) {
+			if (aggr_corner.ceiling_volt > aggr_corner.floor_volt
+			    && (ctrl->use_hw_closed_loop
+					|| new_volt < aggr_corner.ceiling_volt))
+				cpr3_write(ctrl, CPR3_REG_PD_THROTTLE,
+						ctrl->proc_clock_throttle);
+			else
+				cpr3_write(ctrl, CPR3_REG_PD_THROTTLE,
+						CPR3_PD_THROTTLE_DISABLE);
+		}
+
+		/*
+		 * Ensure that all CPR register writes complete before
+		 * re-enabling CPR loop operation.
+		 */
+		wmb();
+	} else if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4
+		   && ctrl->vdd_limit_regulator) {
+		/* Set ceiling and floor limits in hardware */
+		rc = regulator_set_voltage(ctrl->vdd_limit_regulator,
+			aggr_corner.floor_volt,
+			aggr_corner.ceiling_volt);
+		if (rc) {
+			cpr3_err(ctrl, "could not configure HW closed-loop voltage limits, rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	ctrl->aggr_corner = aggr_corner;
+
+	if (ctrl->allow_core_count_adj || ctrl->allow_temp_adj
+		|| ctrl->allow_boost) {
+		rc = cpr3_controller_program_sdelta(ctrl);
+		if (rc) {
+			cpr3_err(ctrl, "failed to program sdelta, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	/*
+	 * Only enable the CPR controller if it is possible to set more than
+	 * one vdd-supply voltage.
+	 */
+	if (aggr_corner.ceiling_volt > aggr_corner.floor_volt &&
+			!aggr_corner.use_open_loop)
+		cpr3_ctrl_loop_enable(ctrl);
+
+	ctrl->last_corner_was_closed_loop = ctrl->cpr_enabled;
+	cpr3_debug(ctrl, "CPR configuration updated\n");
+
+	return 0;
+}
+
+/**
+ * cpr3_regulator_wait_for_idle() - wait for the CPR controller to no longer be
+ *		busy
+ * @ctrl:		Pointer to the CPR3 controller
+ * @max_wait_ns:	Max wait time in nanoseconds
+ *
+ * Return: 0 on success or -ETIMEDOUT if the controller was still busy after
+ *	   the maximum delay time
+ */
+static int cpr3_regulator_wait_for_idle(struct cpr3_controller *ctrl,
+					s64 max_wait_ns)
+{
+	ktime_t start, end;
+	s64 time_ns;
+	u32 reg;
+
+	/*
+	 * Ensure that all previous CPR register writes have completed before
+	 * checking the status register.
+	 */
+	mb();
+
+	start = ktime_get();
+	do {
+		end = ktime_get();
+		time_ns = ktime_to_ns(ktime_sub(end, start));
+		if (time_ns > max_wait_ns) {
+			cpr3_err(ctrl, "CPR controller still busy after %lld us\n",
+				div_s64(time_ns, 1000));
+			return -ETIMEDOUT;
+		}
+		usleep_range(50, 100);
+		reg = cpr3_read(ctrl, CPR3_REG_CPR_STATUS);
+	} while (reg & CPR3_CPR_STATUS_BUSY_MASK);
+
+	return 0;
+}
+
+/**
+ * cmp_int() - int comparison function to be passed into the sort() function
+ *		which leads to ascending sorting
+ * @a:			First int value
+ * @b:			Second int value
+ *
+ * Return: >0 if a > b, 0 if a == b, <0 if a < b
+ */
+static int cmp_int(const void *a, const void *b)
+{
+	return *(int *)a - *(int *)b;
+}
+
+/**
+ * cpr3_regulator_measure_aging() - measure the quotient difference for the
+ *		specified CPR aging sensor
+ * @ctrl:		Pointer to the CPR3 controller
+ * @aging_sensor:	Aging sensor to measure
+ *
+ * Note that vdd-supply must be configured to the aging reference voltage before
+ * calling this function.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_measure_aging(struct cpr3_controller *ctrl,
+				struct cpr3_aging_sensor_info *aging_sensor)
+{
+	u32 mask, reg, result, quot_min, quot_max, sel_min, sel_max;
+	u32 quot_min_scaled, quot_max_scaled;
+	u32 gcnt, gcnt_ref, gcnt0_restore, gcnt1_restore, irq_restore;
+	u32 ro_mask_restore, cont_dly_restore, up_down_dly_restore = 0;
+	int quot_delta, quot_delta_scaled, quot_delta_scaled_sum;
+	int *quot_delta_results;
+	int rc, rc2, i, aging_measurement_count, filtered_count;
+	bool is_aging_measurement;
+
+	quot_delta_results = kcalloc(CPR3_AGING_MEASUREMENT_ITERATIONS,
+			sizeof(*quot_delta_results), GFP_KERNEL);
+	if (!quot_delta_results)
+		return -ENOMEM;
+
+	if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) {
+		rc = cpr3_ctrl_clear_cpr4_config(ctrl);
+		if (rc) {
+			cpr3_err(ctrl, "failed to clear CPR4 configuration,rc=%d\n",
+				rc);
+			kfree(quot_delta_results);
+			return rc;
+		}
+	}
+
+	cpr3_ctrl_loop_disable(ctrl);
+
+	/* Enable up, down, and mid CPR interrupts */
+	irq_restore = cpr3_read(ctrl, CPR3_REG_IRQ_EN);
+	cpr3_write(ctrl, CPR3_REG_IRQ_EN,
+			CPR3_IRQ_UP | CPR3_IRQ_DOWN | CPR3_IRQ_MID);
+
+	/* Ensure that the aging sensor is assigned to CPR thread 0 */
+	cpr3_write(ctrl, CPR3_REG_SENSOR_OWNER(aging_sensor->sensor_id), 0);
+
+	/* Switch from HW to SW closed-loop if necessary */
+	if (ctrl->supports_hw_closed_loop) {
+		if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4 ||
+		    ctrl->ctrl_type == CPR_CTRL_TYPE_CPRH) {
+			cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL,
+				CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_EN_MASK,
+				CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_DISABLE);
+		} else if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) {
+			cpr3_write(ctrl, CPR3_REG_HW_CLOSED_LOOP,
+				CPR3_HW_CLOSED_LOOP_DISABLE);
+		}
+	}
+
+	/* Configure the GCNT for RO0 and RO1 that are used for aging */
+	gcnt0_restore = cpr3_read(ctrl, CPR3_REG_GCNT(0));
+	gcnt1_restore = cpr3_read(ctrl, CPR3_REG_GCNT(1));
+	gcnt_ref = cpr3_regulator_get_gcnt(ctrl);
+
+	gcnt = gcnt_ref;
+	if (ctrl->aging_gcnt_scaling_factor)
+		gcnt = gcnt_ref * ctrl->aging_gcnt_scaling_factor
+				/ CPR3_AGING_GCNT_SCALING_UNITY;
+
+	cpr3_write(ctrl, CPR3_REG_GCNT(0), gcnt);
+	cpr3_write(ctrl, CPR3_REG_GCNT(1), gcnt);
+
+	/* Unmask all RO's */
+	ro_mask_restore = cpr3_read(ctrl, CPR3_REG_RO_MASK(0));
+	cpr3_write(ctrl, CPR3_REG_RO_MASK(0), 0);
+
+	/*
+	 * Mask all sensors except for the one to measure and bypass all
+	 * sensors in collapsible domains.
+	 */
+	for (i = 0; i <= ctrl->sensor_count / 32; i++) {
+		mask = GENMASK(min(31, ctrl->sensor_count - i * 32), 0);
+		if (aging_sensor->sensor_id / 32 >= i
+		    && aging_sensor->sensor_id / 32 < (i + 1))
+			mask &= ~BIT(aging_sensor->sensor_id % 32);
+		cpr3_write(ctrl, CPR3_REG_SENSOR_MASK_WRITE_BANK(i), mask);
+		cpr3_write(ctrl, CPR3_REG_SENSOR_BYPASS_WRITE_BANK(i),
+				aging_sensor->bypass_mask[i]);
+	}
+
+	/* Set CPR loop delays to 0 us */
+	if (ctrl->supports_hw_closed_loop
+		&& ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) {
+		cont_dly_restore = cpr3_read(ctrl, CPR3_REG_CPR_TIMER_MID_CONT);
+		up_down_dly_restore = cpr3_read(ctrl,
+						CPR3_REG_CPR_TIMER_UP_DN_CONT);
+		cpr3_write(ctrl, CPR3_REG_CPR_TIMER_MID_CONT, 0);
+		cpr3_write(ctrl, CPR3_REG_CPR_TIMER_UP_DN_CONT, 0);
+	} else {
+		cont_dly_restore = cpr3_read(ctrl,
+						CPR3_REG_CPR_TIMER_AUTO_CONT);
+		cpr3_write(ctrl, CPR3_REG_CPR_TIMER_AUTO_CONT, 0);
+	}
+
+	/* Set count mode to all-at-once min with no repeat */
+	cpr3_masked_write(ctrl, CPR3_REG_CPR_CTL,
+		CPR3_CPR_CTL_COUNT_MODE_MASK | CPR3_CPR_CTL_COUNT_REPEAT_MASK,
+		CPR3_CPR_CTL_COUNT_MODE_ALL_AT_ONCE_MIN
+			<< CPR3_CPR_CTL_COUNT_MODE_SHIFT);
+
+	cpr3_ctrl_loop_enable(ctrl);
+
+	rc = cpr3_regulator_wait_for_idle(ctrl,
+					CPR3_AGING_MEASUREMENT_TIMEOUT_NS);
+	if (rc)
+		goto cleanup;
+
+	/* Set count mode to all-at-once aging */
+	cpr3_masked_write(ctrl, CPR3_REG_CPR_CTL, CPR3_CPR_CTL_COUNT_MODE_MASK,
+			CPR3_CPR_CTL_COUNT_MODE_ALL_AT_ONCE_AGE
+				<< CPR3_CPR_CTL_COUNT_MODE_SHIFT);
+
+	aging_measurement_count = 0;
+	for (i = 0; i < CPR3_AGING_MEASUREMENT_ITERATIONS; i++) {
+		/* Send CONT_NACK */
+		cpr3_write(ctrl, CPR3_REG_CONT_CMD, CPR3_CONT_CMD_NACK);
+
+		rc = cpr3_regulator_wait_for_idle(ctrl,
+					CPR3_AGING_MEASUREMENT_TIMEOUT_NS);
+		if (rc)
+			goto cleanup;
+
+		/* Check for PAGE_IS_AGE flag in status register */
+		reg = cpr3_read(ctrl, CPR3_REG_CPR_STATUS);
+		is_aging_measurement
+			= reg & CPR3_CPR_STATUS_AGING_MEASUREMENT_MASK;
+
+		/* Read CPR measurement results */
+		result = cpr3_read(ctrl, CPR3_REG_RESULT1(0));
+		quot_min = (result & CPR3_RESULT1_QUOT_MIN_MASK)
+				>> CPR3_RESULT1_QUOT_MIN_SHIFT;
+		quot_max = (result & CPR3_RESULT1_QUOT_MAX_MASK)
+				>> CPR3_RESULT1_QUOT_MAX_SHIFT;
+		sel_min = (result & CPR3_RESULT1_RO_MIN_MASK)
+				>> CPR3_RESULT1_RO_MIN_SHIFT;
+		sel_max = (result & CPR3_RESULT1_RO_MAX_MASK)
+				>> CPR3_RESULT1_RO_MAX_SHIFT;
+
+		/*
+		 * Scale the quotients so that they are equivalent to the fused
+		 * values.  This accounts for the difference in measurement
+		 * interval times.
+		 */
+		quot_min_scaled = quot_min * (gcnt_ref + 1) / (gcnt + 1);
+		quot_max_scaled = quot_max * (gcnt_ref + 1) / (gcnt + 1);
+
+		if (sel_max == 1) {
+			quot_delta = quot_max - quot_min;
+			quot_delta_scaled = quot_max_scaled - quot_min_scaled;
+		} else {
+			quot_delta = quot_min - quot_max;
+			quot_delta_scaled = quot_min_scaled - quot_max_scaled;
+		}
+
+		if (is_aging_measurement)
+			quot_delta_results[aging_measurement_count++]
+				= quot_delta_scaled;
+
+		cpr3_debug(ctrl, "aging results: page_is_age=%u, sel_min=%u, sel_max=%u, quot_min=%u, quot_max=%u, quot_delta=%d, quot_min_scaled=%u, quot_max_scaled=%u, quot_delta_scaled=%d\n",
+			is_aging_measurement, sel_min, sel_max, quot_min,
+			quot_max, quot_delta, quot_min_scaled, quot_max_scaled,
+			quot_delta_scaled);
+	}
+
+	filtered_count
+		= aging_measurement_count - CPR3_AGING_MEASUREMENT_FILTER * 2;
+	if (filtered_count > 0) {
+		sort(quot_delta_results, aging_measurement_count,
+			sizeof(*quot_delta_results), cmp_int, NULL);
+
+		quot_delta_scaled_sum = 0;
+		for (i = 0; i < filtered_count; i++)
+			quot_delta_scaled_sum
+				+= quot_delta_results[i
+					+ CPR3_AGING_MEASUREMENT_FILTER];
+
+		aging_sensor->measured_quot_diff
+			= quot_delta_scaled_sum / filtered_count;
+		cpr3_info(ctrl, "average quotient delta=%d (count=%d)\n",
+			aging_sensor->measured_quot_diff,
+			filtered_count);
+	} else {
+		cpr3_err(ctrl, "%d aging measurements completed after %d iterations\n",
+			aging_measurement_count,
+			CPR3_AGING_MEASUREMENT_ITERATIONS);
+		rc = -EBUSY;
+	}
+
+cleanup:
+	kfree(quot_delta_results);
+
+	if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) {
+		rc2 = cpr3_ctrl_clear_cpr4_config(ctrl);
+		if (rc2) {
+			cpr3_err(ctrl, "failed to clear CPR4 configuration,rc=%d\n",
+				rc2);
+			rc = rc2;
+		}
+	}
+
+	cpr3_ctrl_loop_disable(ctrl);
+
+	cpr3_write(ctrl, CPR3_REG_IRQ_EN, irq_restore);
+
+	cpr3_write(ctrl, CPR3_REG_RO_MASK(0), ro_mask_restore);
+
+	cpr3_write(ctrl, CPR3_REG_GCNT(0), gcnt0_restore);
+	cpr3_write(ctrl, CPR3_REG_GCNT(1), gcnt1_restore);
+
+	if (ctrl->supports_hw_closed_loop
+		&& ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) {
+		cpr3_write(ctrl, CPR3_REG_CPR_TIMER_MID_CONT, cont_dly_restore);
+		cpr3_write(ctrl, CPR3_REG_CPR_TIMER_UP_DN_CONT,
+				up_down_dly_restore);
+	} else {
+		cpr3_write(ctrl, CPR3_REG_CPR_TIMER_AUTO_CONT,
+				cont_dly_restore);
+	}
+
+	for (i = 0; i <= ctrl->sensor_count / 32; i++) {
+		cpr3_write(ctrl, CPR3_REG_SENSOR_MASK_WRITE_BANK(i), 0);
+		cpr3_write(ctrl, CPR3_REG_SENSOR_BYPASS_WRITE_BANK(i), 0);
+	}
+
+	cpr3_masked_write(ctrl, CPR3_REG_CPR_CTL,
+		CPR3_CPR_CTL_COUNT_MODE_MASK | CPR3_CPR_CTL_COUNT_REPEAT_MASK,
+		(ctrl->count_mode << CPR3_CPR_CTL_COUNT_MODE_SHIFT)
+		| (ctrl->count_repeat << CPR3_CPR_CTL_COUNT_REPEAT_SHIFT));
+
+	cpr3_write(ctrl, CPR3_REG_SENSOR_OWNER(aging_sensor->sensor_id),
+			ctrl->sensor_owner[aging_sensor->sensor_id]);
+
+	cpr3_write(ctrl, CPR3_REG_IRQ_CLEAR,
+			CPR3_IRQ_UP | CPR3_IRQ_DOWN | CPR3_IRQ_MID);
+
+	if (ctrl->supports_hw_closed_loop) {
+		if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4 ||
+		    ctrl->ctrl_type == CPR_CTRL_TYPE_CPRH) {
+			cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL,
+				CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_EN_MASK,
+				ctrl->use_hw_closed_loop
+				? CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_ENABLE
+				: CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_DISABLE);
+		} else if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) {
+			cpr3_write(ctrl, CPR3_REG_HW_CLOSED_LOOP,
+				ctrl->use_hw_closed_loop
+				? CPR3_HW_CLOSED_LOOP_ENABLE
+				: CPR3_HW_CLOSED_LOOP_DISABLE);
+		}
+	}
+
+	return rc;
+}
+
+/**
+ * cpr3_regulator_readjust_volt_and_quot() - readjust the target quotients as
+ *		well as the floor, ceiling, and open-loop voltages for the
+ *		regulator by removing the old adjustment and adding the new one
+ * @vreg:		Pointer to the CPR3 regulator
+ * @old_adjust_volt:	Old aging adjustment voltage in microvolts
+ * @new_adjust_volt:	New aging adjustment voltage in microvolts
+ *
+ * Also reset the cached closed loop voltage (last_volt) to equal the open-loop
+ * voltage for each corner.
+ *
+ * Return: None
+ */
+static void cpr3_regulator_readjust_volt_and_quot(struct cpr3_regulator *vreg,
+		int old_adjust_volt, int new_adjust_volt)
+{
+	unsigned long long temp;
+	int i, j, old_volt, new_volt, rounded_volt;
+
+	if (!vreg->aging_allowed)
+		return;
+
+	for (i = 0; i < vreg->corner_count; i++) {
+		temp = (unsigned long long)old_adjust_volt
+			* (unsigned long long)vreg->corner[i].aging_derate;
+		do_div(temp, 1000);
+		old_volt = temp;
+
+		temp = (unsigned long long)new_adjust_volt
+			* (unsigned long long)vreg->corner[i].aging_derate;
+		do_div(temp, 1000);
+		new_volt = temp;
+
+		old_volt = min(vreg->aging_max_adjust_volt, old_volt);
+		new_volt = min(vreg->aging_max_adjust_volt, new_volt);
+
+		for (j = 0; j < CPR3_RO_COUNT; j++) {
+			if (vreg->corner[i].target_quot[j] != 0) {
+				vreg->corner[i].target_quot[j]
+					+= cpr3_quot_adjustment(
+						vreg->corner[i].ro_scale[j],
+						new_volt)
+					   - cpr3_quot_adjustment(
+						vreg->corner[i].ro_scale[j],
+						old_volt);
+			}
+		}
+
+		rounded_volt = CPR3_ROUND(new_volt,
+					vreg->thread->ctrl->step_volt);
+
+		if (!vreg->aging_allow_open_loop_adj)
+			rounded_volt = 0;
+
+		vreg->corner[i].ceiling_volt
+			= vreg->corner[i].unaged_ceiling_volt + rounded_volt;
+		vreg->corner[i].ceiling_volt = min(vreg->corner[i].ceiling_volt,
+					      vreg->corner[i].abs_ceiling_volt);
+		vreg->corner[i].floor_volt
+			= vreg->corner[i].unaged_floor_volt + rounded_volt;
+		vreg->corner[i].floor_volt = min(vreg->corner[i].floor_volt,
+						vreg->corner[i].ceiling_volt);
+		vreg->corner[i].open_loop_volt
+			= vreg->corner[i].unaged_open_loop_volt + rounded_volt;
+		vreg->corner[i].open_loop_volt
+			= min(vreg->corner[i].open_loop_volt,
+				vreg->corner[i].ceiling_volt);
+
+		vreg->corner[i].last_volt = vreg->corner[i].open_loop_volt;
+
+		cpr3_debug(vreg, "corner %d: applying %d uV closed-loop and %d uV open-loop voltage margin adjustment\n",
+			i, new_volt, rounded_volt);
+	}
+}
+
+/**
+ * cpr3_regulator_set_aging_ref_adjustment() - adjust target quotients for the
+ *		regulators managed by this CPR controller to account for aging
+ * @ctrl:		Pointer to the CPR3 controller
+ * @ref_adjust_volt:	New aging reference adjustment voltage in microvolts to
+ *			apply to all regulators managed by this CPR controller
+ *
+ * The existing aging adjustment as defined by ctrl->aging_ref_adjust_volt is
+ * first removed and then the adjustment is applied.  Lastly, the value of
+ * ctrl->aging_ref_adjust_volt is updated to ref_adjust_volt.
+ */
+static void cpr3_regulator_set_aging_ref_adjustment(
+		struct cpr3_controller *ctrl, int ref_adjust_volt)
+{
+	struct cpr3_regulator *vreg;
+	int i, j;
+
+	for (i = 0; i < ctrl->thread_count; i++) {
+		for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+			vreg = &ctrl->thread[i].vreg[j];
+			cpr3_regulator_readjust_volt_and_quot(vreg,
+				ctrl->aging_ref_adjust_volt, ref_adjust_volt);
+			if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPRH)
+				cprh_adjust_voltages_for_apm(vreg);
+		}
+	}
+
+	ctrl->aging_ref_adjust_volt = ref_adjust_volt;
+}
+
+/**
+ * cpr3_regulator_aging_adjust() - adjust the target quotients for regulators
+ *		based on the output of CPR aging sensors
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_aging_adjust(struct cpr3_controller *ctrl)
+{
+	struct cpr3_regulator *vreg;
+	struct cpr3_corner restore_aging_corner;
+	struct cpr3_corner *corner;
+	int *restore_current_corner;
+	bool *restore_vreg_enabled;
+	int i, j, id, rc, rc2, vreg_count, aging_volt, max_aging_volt = 0;
+	u32 reg;
+
+	if (!ctrl->aging_required || !ctrl->cpr_enabled
+	    || ctrl->aggr_corner.ceiling_volt == 0
+	    || ctrl->aggr_corner.ceiling_volt > ctrl->aging_ref_volt)
+		return 0;
+
+	for (i = 0, vreg_count = 0; i < ctrl->thread_count; i++) {
+		for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+			vreg = &ctrl->thread[i].vreg[j];
+			vreg_count++;
+
+			if (vreg->aging_allowed && vreg->vreg_enabled
+			    && vreg->current_corner > vreg->aging_corner)
+				return 0;
+		}
+	}
+
+	/* Verify that none of the aging sensors are currently masked. */
+	for (i = 0; i < ctrl->aging_sensor_count; i++) {
+		id = ctrl->aging_sensor[i].sensor_id;
+		reg = cpr3_read(ctrl, CPR3_REG_SENSOR_MASK_READ(id));
+		if (reg & BIT(id % 32))
+			return 0;
+	}
+
+	/*
+	 * Verify that the aging possible register (if specified) has an
+	 * acceptable value.
+	 */
+	if (ctrl->aging_possible_reg) {
+		reg = readl_relaxed(ctrl->aging_possible_reg);
+		reg &= ctrl->aging_possible_mask;
+		if (reg != ctrl->aging_possible_val)
+			return 0;
+	}
+
+	restore_current_corner = kcalloc(vreg_count,
+				sizeof(*restore_current_corner), GFP_KERNEL);
+	restore_vreg_enabled = kcalloc(vreg_count,
+				sizeof(*restore_vreg_enabled), GFP_KERNEL);
+	if (!restore_current_corner || !restore_vreg_enabled) {
+		kfree(restore_current_corner);
+		kfree(restore_vreg_enabled);
+		return -ENOMEM;
+	}
+
+	/* Force all regulators to the aging corner */
+	for (i = 0, vreg_count = 0; i < ctrl->thread_count; i++) {
+		for (j = 0; j < ctrl->thread[i].vreg_count; j++, vreg_count++) {
+			vreg = &ctrl->thread[i].vreg[j];
+
+			restore_current_corner[vreg_count]
+				= vreg->current_corner;
+			restore_vreg_enabled[vreg_count]
+				= vreg->vreg_enabled;
+
+			vreg->current_corner = vreg->aging_corner;
+			vreg->vreg_enabled = true;
+		}
+	}
+
+	/* Force one of the regulators to require the aging reference voltage */
+	vreg = &ctrl->thread[0].vreg[0];
+	corner = &vreg->corner[vreg->current_corner];
+	restore_aging_corner = *corner;
+	corner->ceiling_volt = ctrl->aging_ref_volt;
+	corner->floor_volt = ctrl->aging_ref_volt;
+	corner->open_loop_volt = ctrl->aging_ref_volt;
+	corner->last_volt = ctrl->aging_ref_volt;
+
+	/* Skip last_volt caching */
+	ctrl->last_corner_was_closed_loop = false;
+
+	/* Set the vdd supply voltage to the aging reference voltage */
+	rc = _cpr3_regulator_update_ctrl_state(ctrl);
+	if (rc) {
+		cpr3_err(ctrl, "unable to force vdd-supply to the aging reference voltage=%d uV, rc=%d\n",
+			ctrl->aging_ref_volt, rc);
+		goto cleanup;
+	}
+
+	if (ctrl->aging_vdd_mode) {
+		rc = regulator_set_mode(ctrl->vdd_regulator,
+					ctrl->aging_vdd_mode);
+		if (rc) {
+			cpr3_err(ctrl, "unable to configure vdd-supply for mode=%u, rc=%d\n",
+				ctrl->aging_vdd_mode, rc);
+			goto cleanup;
+		}
+	}
+
+	/* Perform aging measurement on all aging sensors */
+	for (i = 0; i < ctrl->aging_sensor_count; i++) {
+		for (j = 0; j < CPR3_AGING_RETRY_COUNT; j++) {
+			rc = cpr3_regulator_measure_aging(ctrl,
+					&ctrl->aging_sensor[i]);
+			if (!rc)
+				break;
+		}
+
+		if (!rc) {
+			aging_volt =
+				cpr3_voltage_adjustment(
+					ctrl->aging_sensor[i].ro_scale,
+					ctrl->aging_sensor[i].measured_quot_diff
+					- ctrl->aging_sensor[i].init_quot_diff);
+			max_aging_volt = max(max_aging_volt, aging_volt);
+		} else {
+			cpr3_err(ctrl, "CPR aging measurement failed after %d tries, rc=%d\n",
+				j, rc);
+			ctrl->aging_failed = true;
+			ctrl->aging_required = false;
+			goto cleanup;
+		}
+	}
+
+cleanup:
+	vreg = &ctrl->thread[0].vreg[0];
+	vreg->corner[vreg->current_corner] = restore_aging_corner;
+
+	for (i = 0, vreg_count = 0; i < ctrl->thread_count; i++) {
+		for (j = 0; j < ctrl->thread[i].vreg_count; j++, vreg_count++) {
+			vreg = &ctrl->thread[i].vreg[j];
+			vreg->current_corner
+				= restore_current_corner[vreg_count];
+			vreg->vreg_enabled = restore_vreg_enabled[vreg_count];
+		}
+	}
+
+	kfree(restore_current_corner);
+	kfree(restore_vreg_enabled);
+
+	/* Adjust the CPR target quotients according to the aging measurement */
+	if (!rc) {
+		cpr3_regulator_set_aging_ref_adjustment(ctrl, max_aging_volt);
+
+		cpr3_info(ctrl, "aging measurement successful; aging reference adjustment voltage=%d uV\n",
+			ctrl->aging_ref_adjust_volt);
+		ctrl->aging_succeeded = true;
+		ctrl->aging_required = false;
+	}
+
+	if (ctrl->aging_complete_vdd_mode) {
+		rc = regulator_set_mode(ctrl->vdd_regulator,
+					ctrl->aging_complete_vdd_mode);
+		if (rc)
+			cpr3_err(ctrl, "unable to configure vdd-supply for mode=%u, rc=%d\n",
+				ctrl->aging_complete_vdd_mode, rc);
+	}
+
+	/* Skip last_volt caching */
+	ctrl->last_corner_was_closed_loop = false;
+
+	/*
+	 * Restore vdd-supply to the voltage before the aging measurement and
+	 * restore the CPR3 controller hardware state.
+	 */
+	rc2 = _cpr3_regulator_update_ctrl_state(ctrl);
+
+	/* Stop last_volt caching on for the next request */
+	ctrl->last_corner_was_closed_loop = false;
+
+	return rc ? rc : rc2;
+}
+
+/**
+ * cprh_regulator_aging_adjust() - adjust the target quotients and open-loop
+ *		voltages for CPRh regulators based on the output of CPR aging
+ *		sensors
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cprh_regulator_aging_adjust(struct cpr3_controller *ctrl)
+{
+	int i, j, id, rc, rc2, aging_volt, init_volt;
+	int max_aging_volt = 0;
+	u32 reg;
+
+	if (!ctrl->aging_required || !ctrl->cpr_enabled)
+		return 0;
+
+	if (!ctrl->vdd_regulator) {
+		cpr3_err(ctrl, "vdd-supply regulator missing\n");
+		return -ENODEV;
+	}
+
+	init_volt = regulator_get_voltage(ctrl->vdd_regulator);
+	if (init_volt < 0) {
+		cpr3_err(ctrl, "could not get vdd-supply voltage, rc=%d\n",
+			init_volt);
+		return init_volt;
+	}
+
+	if (init_volt > ctrl->aging_ref_volt) {
+		cpr3_info(ctrl, "unable to perform CPR aging measurement as vdd=%d uV > aging voltage=%d uV\n",
+			init_volt, ctrl->aging_ref_volt);
+		return 0;
+	}
+
+	/* Verify that none of the aging sensors are currently masked. */
+	for (i = 0; i < ctrl->aging_sensor_count; i++) {
+		id = ctrl->aging_sensor[i].sensor_id;
+		reg = cpr3_read(ctrl, CPR3_REG_SENSOR_MASK_READ(id));
+		if (reg & BIT(id % 32)) {
+			cpr3_info(ctrl, "unable to perform CPR aging measurement as CPR sensor %d is masked\n",
+				id);
+			return 0;
+		}
+	}
+
+	rc = regulator_set_voltage(ctrl->vdd_regulator, ctrl->aging_ref_volt,
+				INT_MAX);
+	if (rc) {
+		cpr3_err(ctrl, "unable to set vdd-supply to aging voltage=%d uV, rc=%d\n",
+			ctrl->aging_ref_volt, rc);
+		return rc;
+	}
+
+	if (ctrl->aging_vdd_mode) {
+		rc = regulator_set_mode(ctrl->vdd_regulator,
+					ctrl->aging_vdd_mode);
+		if (rc) {
+			cpr3_err(ctrl, "unable to configure vdd-supply for mode=%u, rc=%d\n",
+				ctrl->aging_vdd_mode, rc);
+			goto cleanup;
+		}
+	}
+
+	/* Perform aging measurement on all aging sensors */
+	for (i = 0; i < ctrl->aging_sensor_count; i++) {
+		for (j = 0; j < CPR3_AGING_RETRY_COUNT; j++) {
+			rc = cpr3_regulator_measure_aging(ctrl,
+					&ctrl->aging_sensor[i]);
+			if (!rc)
+				break;
+		}
+
+		if (!rc) {
+			aging_volt =
+				cpr3_voltage_adjustment(
+					ctrl->aging_sensor[i].ro_scale,
+					ctrl->aging_sensor[i].measured_quot_diff
+					- ctrl->aging_sensor[i].init_quot_diff);
+			max_aging_volt = max(max_aging_volt, aging_volt);
+		} else {
+			cpr3_err(ctrl, "CPR aging measurement failed after %d tries, rc=%d\n",
+				j, rc);
+			ctrl->aging_failed = true;
+			ctrl->aging_required = false;
+			goto cleanup;
+		}
+	}
+
+cleanup:
+	/* Adjust the CPR target quotients according to the aging measurement */
+	if (!rc) {
+		cpr3_regulator_set_aging_ref_adjustment(ctrl, max_aging_volt);
+
+		cpr3_info(ctrl, "aging measurement successful; aging reference adjustment voltage=%d uV\n",
+			ctrl->aging_ref_adjust_volt);
+		ctrl->aging_succeeded = true;
+		ctrl->aging_required = false;
+	}
+
+	rc2 = regulator_set_voltage(ctrl->vdd_regulator, init_volt, INT_MAX);
+	if (rc2) {
+		cpr3_err(ctrl, "unable to reset vdd-supply to initial voltage=%d uV, rc=%d\n",
+			init_volt, rc2);
+		return rc2;
+	}
+
+	if (ctrl->aging_complete_vdd_mode) {
+		rc2 = regulator_set_mode(ctrl->vdd_regulator,
+					ctrl->aging_complete_vdd_mode);
+		if (rc2)  {
+			cpr3_err(ctrl, "unable to configure vdd-supply for mode=%u, rc=%d\n",
+				ctrl->aging_complete_vdd_mode, rc2);
+			return rc2;
+		}
+	}
+
+	return rc;
+}
+
+/**
+ * cpr3_regulator_update_ctrl_state() - update the state of the CPR controller
+ *		to reflect the corners used by all CPR3 regulators as well as
+ *		the CPR operating mode and perform aging adjustments if needed
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Note, CPR3 controller lock must be held by the caller.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_update_ctrl_state(struct cpr3_controller *ctrl)
+{
+	int rc;
+
+	rc = _cpr3_regulator_update_ctrl_state(ctrl);
+	if (rc)
+		return rc;
+
+	return cpr3_regulator_aging_adjust(ctrl);
+}
+
+/**
+ * cpr3_regulator_set_voltage() - set the voltage corner for the CPR3 regulator
+ *			associated with the regulator device
+ * @rdev:		Regulator device pointer for the cpr3-regulator
+ * @corner:		New voltage corner to set (offset by CPR3_CORNER_OFFSET)
+ * @corner_max:		Maximum voltage corner allowed (offset by
+ *			CPR3_CORNER_OFFSET)
+ * @selector:		Pointer which is filled with the selector value for the
+ *			corner
+ *
+ * This function is passed as a callback function into the regulator ops that
+ * are registered for each cpr3-regulator device.  The VDD voltage will not be
+ * physically configured until both this function and cpr3_regulator_enable()
+ * are called.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_set_voltage(struct regulator_dev *rdev,
+		int corner, int corner_max, unsigned int *selector)
+{
+	struct cpr3_regulator *vreg = rdev_get_drvdata(rdev);
+	struct cpr3_controller *ctrl = vreg->thread->ctrl;
+	int rc = 0;
+	int last_corner;
+
+	corner -= CPR3_CORNER_OFFSET;
+	corner_max -= CPR3_CORNER_OFFSET;
+	*selector = corner;
+
+	mutex_lock(&ctrl->lock);
+
+	if (!vreg->vreg_enabled) {
+		vreg->current_corner = corner;
+		cpr3_debug(vreg, "stored corner=%d\n", corner);
+		goto done;
+	} else if (vreg->current_corner == corner) {
+		goto done;
+	}
+
+	last_corner = vreg->current_corner;
+	vreg->current_corner = corner;
+
+	rc = cpr3_regulator_update_ctrl_state(ctrl);
+	if (rc) {
+		cpr3_err(vreg, "could not update CPR state, rc=%d\n", rc);
+		vreg->current_corner = last_corner;
+	}
+
+	cpr3_debug(vreg, "set corner=%d\n", corner);
+done:
+	mutex_unlock(&ctrl->lock);
+
+	return rc;
+}
+
+/**
+ * cpr3_regulator_get_voltage() - get the voltage corner for the CPR3 regulator
+ *			associated with the regulator device
+ * @rdev:		Regulator device pointer for the cpr3-regulator
+ *
+ * This function is passed as a callback function into the regulator ops that
+ * are registered for each cpr3-regulator device.
+ *
+ * Return: voltage corner value offset by CPR3_CORNER_OFFSET
+ */
+static int cpr3_regulator_get_voltage(struct regulator_dev *rdev)
+{
+	struct cpr3_regulator *vreg = rdev_get_drvdata(rdev);
+
+	if (vreg->current_corner == CPR3_REGULATOR_CORNER_INVALID)
+		return CPR3_CORNER_OFFSET;
+	else
+		return vreg->current_corner + CPR3_CORNER_OFFSET;
+}
+
+/**
+ * cpr3_regulator_list_voltage() - return the voltage corner mapped to the
+ *			specified selector
+ * @rdev:		Regulator device pointer for the cpr3-regulator
+ * @selector:		Regulator selector
+ *
+ * This function is passed as a callback function into the regulator ops that
+ * are registered for each cpr3-regulator device.
+ *
+ * Return: voltage corner value offset by CPR3_CORNER_OFFSET
+ */
+static int cpr3_regulator_list_voltage(struct regulator_dev *rdev,
+		unsigned int selector)
+{
+	struct cpr3_regulator *vreg = rdev_get_drvdata(rdev);
+
+	if (selector < vreg->corner_count)
+		return selector + CPR3_CORNER_OFFSET;
+	else
+		return 0;
+}
+
+/**
+ * cpr3_regulator_list_corner_voltage() - return the ceiling voltage mapped to
+ *			the specified voltage corner
+ * @rdev:		Regulator device pointer for the cpr3-regulator
+ * @corner:		Voltage corner
+ *
+ * This function is passed as a callback function into the regulator ops that
+ * are registered for each cpr3-regulator device.
+ *
+ * Return: voltage value in microvolts or -EINVAL if the corner is out of range
+ */
+static int cpr3_regulator_list_corner_voltage(struct regulator_dev *rdev,
+		int corner)
+{
+	struct cpr3_regulator *vreg = rdev_get_drvdata(rdev);
+
+	corner -= CPR3_CORNER_OFFSET;
+
+	if (corner >= 0 && corner < vreg->corner_count)
+		return vreg->corner[corner].ceiling_volt;
+	else
+		return -EINVAL;
+}
+
+/**
+ * cpr3_regulator_is_enabled() - return the enable state of the CPR3 regulator
+ * @rdev:		Regulator device pointer for the cpr3-regulator
+ *
+ * This function is passed as a callback function into the regulator ops that
+ * are registered for each cpr3-regulator device.
+ *
+ * Return: true if regulator is enabled, false if regulator is disabled
+ */
+static int cpr3_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct cpr3_regulator *vreg = rdev_get_drvdata(rdev);
+
+	return vreg->vreg_enabled;
+}
+
+/**
+ * cpr3_regulator_enable() - enable the CPR3 regulator
+ * @rdev:		Regulator device pointer for the cpr3-regulator
+ *
+ * This function is passed as a callback function into the regulator ops that
+ * are registered for each cpr3-regulator device.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_enable(struct regulator_dev *rdev)
+{
+	struct cpr3_regulator *vreg = rdev_get_drvdata(rdev);
+	struct cpr3_controller *ctrl = vreg->thread->ctrl;
+	int rc = 0;
+
+	if (vreg->vreg_enabled == true)
+		return 0;
+
+	mutex_lock(&ctrl->lock);
+
+	if (ctrl->system_regulator) {
+		rc = regulator_enable(ctrl->system_regulator);
+		if (rc) {
+			cpr3_err(ctrl, "regulator_enable(system) failed, rc=%d\n",
+				rc);
+			goto done;
+		}
+	}
+
+	rc = regulator_enable(ctrl->vdd_regulator);
+	if (rc) {
+		cpr3_err(vreg, "regulator_enable(vdd) failed, rc=%d\n", rc);
+		goto done;
+	}
+
+	if (vreg->ldo_regulator) {
+		rc = regulator_enable(vreg->ldo_regulator);
+		if (rc) {
+			cpr3_err(vreg, "regulator_enable(ldo) failed, rc=%d\n",
+				 rc);
+			goto done;
+		}
+	}
+
+	vreg->vreg_enabled = true;
+	rc = cpr3_regulator_update_ctrl_state(ctrl);
+	if (rc) {
+		cpr3_err(vreg, "could not update CPR state, rc=%d\n", rc);
+		regulator_disable(ctrl->vdd_regulator);
+		vreg->vreg_enabled = false;
+		goto done;
+	}
+
+	cpr3_debug(vreg, "Enabled\n");
+done:
+	mutex_unlock(&ctrl->lock);
+
+	return rc;
+}
+
+/**
+ * cpr3_regulator_disable() - disable the CPR3 regulator
+ * @rdev:		Regulator device pointer for the cpr3-regulator
+ *
+ * This function is passed as a callback function into the regulator ops that
+ * are registered for each cpr3-regulator device.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_disable(struct regulator_dev *rdev)
+{
+	struct cpr3_regulator *vreg = rdev_get_drvdata(rdev);
+	struct cpr3_controller *ctrl = vreg->thread->ctrl;
+	int rc, rc2;
+
+	if (vreg->vreg_enabled == false)
+		return 0;
+
+	mutex_lock(&ctrl->lock);
+
+	if (vreg->ldo_regulator && vreg->ldo_regulator_bypass == LDO_MODE) {
+		rc = regulator_get_voltage(ctrl->vdd_regulator);
+		if (rc < 0) {
+			cpr3_err(vreg, "regulator_get_voltage(vdd) failed, rc=%d\n",
+				 rc);
+			goto done;
+		}
+
+		/* Switch back to BHS for safe operation */
+		rc = cpr3_regulator_set_bhs_mode(vreg, rc,
+				       ctrl->aggr_corner.ceiling_volt);
+		if (rc) {
+			cpr3_err(vreg, "unable to switch to BHS mode, rc=%d\n",
+				 rc);
+			goto done;
+		}
+	}
+
+	if (vreg->ldo_regulator) {
+		rc = regulator_disable(vreg->ldo_regulator);
+		if (rc) {
+			cpr3_err(vreg, "regulator_disable(ldo) failed, rc=%d\n",
+				 rc);
+			goto done;
+		}
+	}
+	rc = regulator_disable(ctrl->vdd_regulator);
+	if (rc) {
+		cpr3_err(vreg, "regulator_disable(vdd) failed, rc=%d\n", rc);
+		goto done;
+	}
+
+	vreg->vreg_enabled = false;
+	rc = cpr3_regulator_update_ctrl_state(ctrl);
+	if (rc) {
+		cpr3_err(vreg, "could not update CPR state, rc=%d\n", rc);
+		rc2 = regulator_enable(ctrl->vdd_regulator);
+		vreg->vreg_enabled = true;
+		goto done;
+	}
+
+	if (ctrl->system_regulator) {
+		rc = regulator_disable(ctrl->system_regulator);
+		if (rc) {
+			cpr3_err(ctrl, "regulator_disable(system) failed, rc=%d\n",
+				rc);
+			goto done;
+		}
+		if (ctrl->support_ldo300_vreg) {
+			rc = regulator_set_voltage(ctrl->system_regulator, 0,
+						INT_MAX);
+			if (rc)
+				cpr3_err(ctrl, "failed to set voltage on system rc=%d\n",
+					rc);
+			goto done;
+		}
+	}
+
+	cpr3_debug(vreg, "Disabled\n");
+done:
+	mutex_unlock(&ctrl->lock);
+
+	return rc;
+}
+
+static struct regulator_ops cpr3_regulator_ops = {
+	.enable			= cpr3_regulator_enable,
+	.disable		= cpr3_regulator_disable,
+	.is_enabled		= cpr3_regulator_is_enabled,
+	.set_voltage		= cpr3_regulator_set_voltage,
+	.get_voltage		= cpr3_regulator_get_voltage,
+	.list_voltage		= cpr3_regulator_list_voltage,
+	.list_corner_voltage	= cpr3_regulator_list_corner_voltage,
+};
+
+/**
+ * cprh_regulator_get_voltage() - get the voltage corner for the CPR3 regulator
+ *			associated with the regulator device
+ * @rdev:		Regulator device pointer for the cpr3-regulator
+ *
+ * This function is passed as a callback function into the regulator ops that
+ * are registered for each cpr3-regulator device of a CPRh controller. The
+ * corner is read directly from CPRh hardware register.
+ *
+ * Return: voltage corner value offset by CPR3_CORNER_OFFSET
+ */
+static int cprh_regulator_get_voltage(struct regulator_dev *rdev)
+{
+	struct cpr3_regulator *vreg = rdev_get_drvdata(rdev);
+	struct cpr3_controller *ctrl = vreg->thread->ctrl;
+	bool cpr_enabled;
+	u32 reg, rc;
+
+	mutex_lock(&ctrl->lock);
+
+	cpr_enabled = ctrl->cpr_enabled;
+	if (!cpr_enabled) {
+		rc = cpr3_clock_enable(ctrl);
+		if (rc) {
+			cpr3_err(ctrl, "clock enable failed, rc=%d\n", rc);
+			mutex_unlock(&ctrl->lock);
+			return CPR3_REGULATOR_CORNER_INVALID;
+		}
+		ctrl->cpr_enabled = true;
+	}
+
+	reg = cpr3_read(vreg->thread->ctrl, CPRH_REG_STATUS);
+
+	if (!cpr_enabled) {
+		cpr3_clock_disable(ctrl);
+		ctrl->cpr_enabled = false;
+	}
+
+	mutex_unlock(&ctrl->lock);
+
+	return (reg & CPRH_STATUS_CORNER)
+		+ CPR3_CORNER_OFFSET;
+}
+
+static struct regulator_ops cprh_regulator_ops = {
+	.get_voltage		= cprh_regulator_get_voltage,
+	.list_corner_voltage	= cpr3_regulator_list_corner_voltage,
+};
+
+/**
+ * cpr3_print_result() - print CPR measurement results to the kernel log for
+ *		debugging purposes
+ * @thread:		Pointer to the CPR3 thread
+ *
+ * Return: None
+ */
+static void cpr3_print_result(struct cpr3_thread *thread)
+{
+	struct cpr3_controller *ctrl = thread->ctrl;
+	u32 result[3], busy, step_dn, step_up, error_steps, error, negative;
+	u32 quot_min, quot_max, ro_min, ro_max, step_quot_min, step_quot_max;
+	u32 sensor_min, sensor_max;
+	char *sign;
+
+	result[0] = cpr3_read(ctrl, CPR3_REG_RESULT0(thread->thread_id));
+	result[1] = cpr3_read(ctrl, CPR3_REG_RESULT1(thread->thread_id));
+	result[2] = cpr3_read(ctrl, CPR3_REG_RESULT2(thread->thread_id));
+
+	busy = !!(result[0] & CPR3_RESULT0_BUSY_MASK);
+	step_dn = !!(result[0] & CPR3_RESULT0_STEP_DN_MASK);
+	step_up = !!(result[0] & CPR3_RESULT0_STEP_UP_MASK);
+	error_steps = (result[0] & CPR3_RESULT0_ERROR_STEPS_MASK)
+			>> CPR3_RESULT0_ERROR_STEPS_SHIFT;
+	error = (result[0] & CPR3_RESULT0_ERROR_MASK)
+			>> CPR3_RESULT0_ERROR_SHIFT;
+	negative = !!(result[0] & CPR3_RESULT0_NEGATIVE_MASK);
+
+	quot_min = (result[1] & CPR3_RESULT1_QUOT_MIN_MASK)
+			>> CPR3_RESULT1_QUOT_MIN_SHIFT;
+	quot_max = (result[1] & CPR3_RESULT1_QUOT_MAX_MASK)
+			>> CPR3_RESULT1_QUOT_MAX_SHIFT;
+	ro_min = (result[1] & CPR3_RESULT1_RO_MIN_MASK)
+			>> CPR3_RESULT1_RO_MIN_SHIFT;
+	ro_max = (result[1] & CPR3_RESULT1_RO_MAX_MASK)
+			>> CPR3_RESULT1_RO_MAX_SHIFT;
+
+	step_quot_min = (result[2] & CPR3_RESULT2_STEP_QUOT_MIN_MASK)
+			>> CPR3_RESULT2_STEP_QUOT_MIN_SHIFT;
+	step_quot_max = (result[2] & CPR3_RESULT2_STEP_QUOT_MAX_MASK)
+			>> CPR3_RESULT2_STEP_QUOT_MAX_SHIFT;
+	sensor_min = (result[2] & CPR3_RESULT2_SENSOR_MIN_MASK)
+			>> CPR3_RESULT2_SENSOR_MIN_SHIFT;
+	sensor_max = (result[2] & CPR3_RESULT2_SENSOR_MAX_MASK)
+			>> CPR3_RESULT2_SENSOR_MAX_SHIFT;
+
+	sign = negative ? "-" : "";
+	cpr3_debug(ctrl, "thread %u: busy=%u, step_dn=%u, step_up=%u, error_steps=%s%u, error=%s%u\n",
+		thread->thread_id, busy, step_dn, step_up, sign, error_steps,
+		sign, error);
+	cpr3_debug(ctrl, "thread %u: quot_min=%u, quot_max=%u, ro_min=%u, ro_max=%u\n",
+		thread->thread_id, quot_min, quot_max, ro_min, ro_max);
+	cpr3_debug(ctrl, "thread %u: step_quot_min=%u, step_quot_max=%u, sensor_min=%u, sensor_max=%u\n",
+		thread->thread_id, step_quot_min, step_quot_max, sensor_min,
+		sensor_max);
+}
+
+/**
+ * cpr3_thread_busy() - returns if the specified CPR3 thread is busy taking
+ *		a measurement
+ * @thread:		Pointer to the CPR3 thread
+ *
+ * Return: CPR3 busy status
+ */
+static bool cpr3_thread_busy(struct cpr3_thread *thread)
+{
+	u32 result;
+
+	result = cpr3_read(thread->ctrl, CPR3_REG_RESULT0(thread->thread_id));
+
+	return !!(result & CPR3_RESULT0_BUSY_MASK);
+}
+
+/**
+ * cpr3_irq_handler() - CPR interrupt handler callback function used for
+ *		software closed-loop operation
+ * @irq:		CPR interrupt number
+ * @data:		Private data corresponding to the CPR3 controller
+ *			pointer
+ *
+ * This function increases or decreases the vdd supply voltage based upon the
+ * CPR controller recommendation.
+ *
+ * Return: IRQ_HANDLED
+ */
+static irqreturn_t cpr3_irq_handler(int irq, void *data)
+{
+	struct cpr3_controller *ctrl = data;
+	struct cpr3_corner *aggr = &ctrl->aggr_corner;
+	u32 cont = CPR3_CONT_CMD_NACK;
+	u32 reg_last_measurement = 0;
+	struct cpr3_regulator *vreg;
+	struct cpr3_corner *corner;
+	unsigned long flags;
+	int i, j, new_volt, last_volt, dynamic_floor_volt, rc;
+	u32 irq_en, status, cpr_status, ctl;
+	bool up, down;
+
+	mutex_lock(&ctrl->lock);
+
+	if (!ctrl->cpr_enabled) {
+		cpr3_debug(ctrl, "CPR interrupt received but CPR is disabled\n");
+		mutex_unlock(&ctrl->lock);
+		return IRQ_HANDLED;
+	} else if (ctrl->use_hw_closed_loop) {
+		cpr3_debug(ctrl, "CPR interrupt received but CPR is using HW closed-loop\n");
+		goto done;
+	}
+
+	/*
+	 * CPR IRQ status checking and CPR controller disabling must happen
+	 * atomically and without invening delay in order to avoid an interrupt
+	 * storm caused by the handler racing with the CPR controller.
+	 */
+	local_irq_save(flags);
+	preempt_disable();
+
+	status = cpr3_read(ctrl, CPR3_REG_IRQ_STATUS);
+	up = status & CPR3_IRQ_UP;
+	down = status & CPR3_IRQ_DOWN;
+
+	if (!up && !down) {
+		/*
+		 * Toggle the CPR controller off and then back on since the
+		 * hardware and software states are out of sync.  This condition
+		 * occurs after an aging measurement completes as the CPR IRQ
+		 * physically triggers during the aging measurement but the
+		 * handler is stuck waiting on the mutex lock.
+		 */
+		cpr3_ctrl_loop_disable(ctrl);
+
+		local_irq_restore(flags);
+		preempt_enable();
+
+		/* Wait for the loop disable write to complete */
+		mb();
+
+		/* Wait for BUSY=1 and LOOP_EN=0 in CPR controller registers. */
+		for (i = 0; i < CPR3_REGISTER_WRITE_DELAY_US / 10; i++) {
+			cpr_status = cpr3_read(ctrl, CPR3_REG_CPR_STATUS);
+			ctl = cpr3_read(ctrl, CPR3_REG_CPR_CTL);
+			if (cpr_status & CPR3_CPR_STATUS_BUSY_MASK
+			    && (ctl & CPR3_CPR_CTL_LOOP_EN_MASK)
+					== CPR3_CPR_CTL_LOOP_DISABLE)
+				break;
+			udelay(10);
+		}
+		if (i == CPR3_REGISTER_WRITE_DELAY_US / 10)
+			cpr3_debug(ctrl, "CPR controller not disabled after %d us\n",
+				CPR3_REGISTER_WRITE_DELAY_US);
+
+		/* Clear interrupt status */
+		cpr3_write(ctrl, CPR3_REG_IRQ_CLEAR,
+			CPR3_IRQ_UP | CPR3_IRQ_DOWN);
+
+		/* Wait for the interrupt clearing write to complete */
+		mb();
+
+		/* Wait for IRQ_STATUS register to be cleared. */
+		for (i = 0; i < CPR3_REGISTER_WRITE_DELAY_US / 10; i++) {
+			status = cpr3_read(ctrl, CPR3_REG_IRQ_STATUS);
+			if (!(status & (CPR3_IRQ_UP | CPR3_IRQ_DOWN)))
+				break;
+			udelay(10);
+		}
+		if (i == CPR3_REGISTER_WRITE_DELAY_US / 10)
+			cpr3_debug(ctrl, "CPR interrupts not cleared after %d us\n",
+				CPR3_REGISTER_WRITE_DELAY_US);
+
+		cpr3_ctrl_loop_enable(ctrl);
+
+		cpr3_debug(ctrl, "CPR interrupt received but no up or down status bit is set\n");
+
+		mutex_unlock(&ctrl->lock);
+		return IRQ_HANDLED;
+	} else if (up && down) {
+		cpr3_debug(ctrl, "both up and down status bits set\n");
+		/* The up flag takes precedence over the down flag. */
+		down = false;
+	}
+
+	if (ctrl->supports_hw_closed_loop)
+		reg_last_measurement
+			= cpr3_read(ctrl, CPR3_REG_LAST_MEASUREMENT);
+	dynamic_floor_volt = cpr3_regulator_get_dynamic_floor_volt(ctrl,
+							reg_last_measurement);
+
+	local_irq_restore(flags);
+	preempt_enable();
+
+	irq_en = aggr->irq_en;
+	last_volt = aggr->last_volt;
+
+	for (i = 0; i < ctrl->thread_count; i++) {
+		if (cpr3_thread_busy(&ctrl->thread[i])) {
+			cpr3_debug(ctrl, "CPR thread %u busy when it should be waiting for SW cont\n",
+				ctrl->thread[i].thread_id);
+			goto done;
+		}
+	}
+
+	new_volt = up ? last_volt + ctrl->step_volt
+		      : last_volt - ctrl->step_volt;
+
+	/* Re-enable UP/DOWN interrupt when its opposite is received. */
+	irq_en |= up ? CPR3_IRQ_DOWN : CPR3_IRQ_UP;
+
+	if (new_volt > aggr->ceiling_volt) {
+		new_volt = aggr->ceiling_volt;
+		irq_en &= ~CPR3_IRQ_UP;
+		cpr3_debug(ctrl, "limiting to ceiling=%d uV\n",
+			aggr->ceiling_volt);
+	} else if (new_volt < aggr->floor_volt) {
+		new_volt = aggr->floor_volt;
+		irq_en &= ~CPR3_IRQ_DOWN;
+		cpr3_debug(ctrl, "limiting to floor=%d uV\n", aggr->floor_volt);
+	}
+
+	if (down && new_volt < dynamic_floor_volt) {
+		/*
+		 * The vdd-supply voltage should not be decreased below the
+		 * dynamic floor voltage.  However, it is not necessary (and
+		 * counter productive) to force the voltage up to this level
+		 * if it happened to be below it since the closed-loop voltage
+		 * must have gotten there in a safe manner while the power
+		 * domains for the CPR3 regulator imposing the dynamic floor
+		 * were not bypassed.
+		 */
+		new_volt = last_volt;
+		irq_en &= ~CPR3_IRQ_DOWN;
+		cpr3_debug(ctrl, "limiting to dynamic floor=%d uV\n",
+			dynamic_floor_volt);
+	}
+
+	for (i = 0; i < ctrl->thread_count; i++)
+		cpr3_print_result(&ctrl->thread[i]);
+
+	cpr3_debug(ctrl, "%s: new_volt=%d uV, last_volt=%d uV\n",
+		up ? "UP" : "DN", new_volt, last_volt);
+
+	if (ctrl->proc_clock_throttle && last_volt == aggr->ceiling_volt
+	    && new_volt < last_volt)
+		cpr3_write(ctrl, CPR3_REG_PD_THROTTLE,
+				ctrl->proc_clock_throttle);
+
+	if (new_volt != last_volt) {
+		rc = cpr3_regulator_scale_vdd_voltage(ctrl, new_volt,
+						      last_volt,
+						      aggr);
+		if (rc) {
+			cpr3_err(ctrl, "scale_vdd() failed to set vdd=%d uV, rc=%d\n",
+				 new_volt, rc);
+			goto done;
+		}
+		cont = CPR3_CONT_CMD_ACK;
+
+		/*
+		 * Update the closed-loop voltage for all regulators managed
+		 * by this CPR controller.
+		 */
+		for (i = 0; i < ctrl->thread_count; i++) {
+			for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+				vreg = &ctrl->thread[i].vreg[j];
+				cpr3_update_vreg_closed_loop_volt(vreg,
+					new_volt, reg_last_measurement);
+			}
+		}
+	}
+
+	if (ctrl->proc_clock_throttle && new_volt == aggr->ceiling_volt)
+		cpr3_write(ctrl, CPR3_REG_PD_THROTTLE,
+				CPR3_PD_THROTTLE_DISABLE);
+
+	corner = &ctrl->thread[0].vreg[0].corner[
+			ctrl->thread[0].vreg[0].current_corner];
+
+	if (irq_en != aggr->irq_en) {
+		aggr->irq_en = irq_en;
+		cpr3_write(ctrl, CPR3_REG_IRQ_EN, irq_en);
+	}
+
+	aggr->last_volt = new_volt;
+
+done:
+	/* Clear interrupt status */
+	cpr3_write(ctrl, CPR3_REG_IRQ_CLEAR, CPR3_IRQ_UP | CPR3_IRQ_DOWN);
+
+	/* ACK or NACK the CPR controller */
+	cpr3_write(ctrl, CPR3_REG_CONT_CMD, cont);
+
+	mutex_unlock(&ctrl->lock);
+	return IRQ_HANDLED;
+}
+
+/**
+ * cpr3_ceiling_irq_handler() - CPR ceiling reached interrupt handler callback
+ *		function used for hardware closed-loop operation
+ * @irq:		CPR ceiling interrupt number
+ * @data:		Private data corresponding to the CPR3 controller
+ *			pointer
+ *
+ * This function disables processor clock throttling and closed-loop operation
+ * when the ceiling voltage is reached.
+ *
+ * Return: IRQ_HANDLED
+ */
+static irqreturn_t cpr3_ceiling_irq_handler(int irq, void *data)
+{
+	struct cpr3_controller *ctrl = data;
+	int rc, volt;
+
+	mutex_lock(&ctrl->lock);
+
+	if (!ctrl->cpr_enabled) {
+		cpr3_debug(ctrl, "CPR ceiling interrupt received but CPR is disabled\n");
+		goto done;
+	} else if (!ctrl->use_hw_closed_loop) {
+		cpr3_debug(ctrl, "CPR ceiling interrupt received but CPR is using SW closed-loop\n");
+		goto done;
+	}
+
+	volt = regulator_get_voltage(ctrl->vdd_regulator);
+	if (volt < 0) {
+		cpr3_err(ctrl, "could not get vdd voltage, rc=%d\n", volt);
+		goto done;
+	} else if (volt != ctrl->aggr_corner.ceiling_volt) {
+		cpr3_debug(ctrl, "CPR ceiling interrupt received but vdd voltage: %d uV != ceiling voltage: %d uV\n",
+			volt, ctrl->aggr_corner.ceiling_volt);
+		goto done;
+	}
+
+	if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) {
+		/*
+		 * Since the ceiling voltage has been reached, disable processor
+		 * clock throttling as well as CPR closed-loop operation.
+		 */
+		cpr3_write(ctrl, CPR3_REG_PD_THROTTLE,
+				CPR3_PD_THROTTLE_DISABLE);
+		cpr3_ctrl_loop_disable(ctrl);
+		cpr3_debug(ctrl, "CPR closed-loop and throttling disabled\n");
+	}
+
+done:
+	rc = msm_spm_avs_clear_irq(0, MSM_SPM_AVS_IRQ_MAX);
+	if (rc)
+		cpr3_err(ctrl, "could not clear max IRQ, rc=%d\n", rc);
+
+	mutex_unlock(&ctrl->lock);
+	return IRQ_HANDLED;
+}
+
+/**
+ * cpr3_regulator_vreg_register() - register a regulator device for a CPR3
+ *		regulator
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * This function initializes all regulator framework related structures and then
+ * calls regulator_register() for the CPR3 regulator.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_vreg_register(struct cpr3_regulator *vreg)
+{
+	struct regulator_config config = {};
+	struct regulator_desc *rdesc;
+	struct regulator_init_data *init_data;
+	int rc;
+
+	init_data = of_get_regulator_init_data(vreg->thread->ctrl->dev,
+						vreg->of_node, &vreg->rdesc);
+	if (!init_data) {
+		cpr3_err(vreg, "regulator init data is missing\n");
+		return -EINVAL;
+	}
+
+	init_data->constraints.input_uV = init_data->constraints.max_uV;
+	rdesc			= &vreg->rdesc;
+	if (vreg->thread->ctrl->ctrl_type == CPR_CTRL_TYPE_CPRH) {
+		/* CPRh regulators are treated as always-on regulators */
+		rdesc->ops = &cprh_regulator_ops;
+	} else {
+		init_data->constraints.valid_ops_mask
+			|= REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS;
+		rdesc->ops = &cpr3_regulator_ops;
+	}
+
+	rdesc->n_voltages	= vreg->corner_count;
+	rdesc->name		= init_data->constraints.name;
+	rdesc->owner		= THIS_MODULE;
+	rdesc->type		= REGULATOR_VOLTAGE;
+
+	config.dev		= vreg->thread->ctrl->dev;
+	config.driver_data	= vreg;
+	config.init_data	= init_data;
+	config.of_node		= vreg->of_node;
+
+	vreg->rdev = regulator_register(rdesc, &config);
+	if (IS_ERR(vreg->rdev)) {
+		rc = PTR_ERR(vreg->rdev);
+		cpr3_err(vreg, "regulator_register failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int debugfs_int_set(void *data, u64 val)
+{
+	*(int *)data = val;
+	return 0;
+}
+
+static int debugfs_int_get(void *data, u64 *val)
+{
+	*val = *(int *)data;
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_int, debugfs_int_get, debugfs_int_set, "%lld\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_int_ro, debugfs_int_get, NULL, "%lld\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_int_wo, NULL, debugfs_int_set, "%lld\n");
+
+/**
+ * debugfs_create_int - create a debugfs file that is used to read and write a
+ *		signed int value
+ * @name:		Pointer to a string containing the name of the file to
+ *			create
+ * @mode:		The permissions that the file should have
+ * @parent:		Pointer to the parent dentry for this file.  This should
+ *			be a directory dentry if set.  If this parameter is
+ *			%NULL, then the file will be created in the root of the
+ *			debugfs filesystem.
+ * @value:		Pointer to the variable that the file should read to and
+ *			write from
+ *
+ * This function creates a file in debugfs with the given name that
+ * contains the value of the variable @value.  If the @mode variable is so
+ * set, it can be read from, and written to.
+ *
+ * This function will return a pointer to a dentry if it succeeds.  This
+ * pointer must be passed to the debugfs_remove() function when the file is
+ * to be removed.  If an error occurs, %NULL will be returned.
+ */
+static struct dentry *debugfs_create_int(const char *name, umode_t mode,
+				struct dentry *parent, int *value)
+{
+	/* if there are no write bits set, make read only */
+	if (!(mode & 0222))
+		return debugfs_create_file(name, mode, parent, value,
+					   &fops_int_ro);
+	/* if there are no read bits set, make write only */
+	if (!(mode & 0444))
+		return debugfs_create_file(name, mode, parent, value,
+					   &fops_int_wo);
+
+	return debugfs_create_file(name, mode, parent, value, &fops_int);
+}
+
+static int debugfs_bool_get(void *data, u64 *val)
+{
+	*val = *(bool *)data;
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_bool_ro, debugfs_bool_get, NULL, "%lld\n");
+
+/**
+ * cpr3_debug_ldo_mode_allowed_set() - debugfs callback used to change the
+ *		value of the CPR3 regulator ldo_mode_allowed flag
+ * @data:		Pointer to private data which is equal to the CPR3
+ *			regulator pointer
+ * @val:		New value for ldo_mode_allowed
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_debug_ldo_mode_allowed_set(void *data, u64 val)
+{
+	struct cpr3_regulator *vreg = data;
+	struct cpr3_controller *ctrl = vreg->thread->ctrl;
+	bool allow = !!val;
+	int rc, vdd_volt;
+
+	mutex_lock(&ctrl->lock);
+
+	if (vreg->ldo_mode_allowed == allow)
+		goto done;
+
+	vreg->ldo_mode_allowed = allow;
+
+	if (!allow && vreg->ldo_regulator_bypass == LDO_MODE) {
+		vdd_volt = regulator_get_voltage(ctrl->vdd_regulator);
+		if (vdd_volt < 0) {
+			cpr3_err(vreg, "regulator_get_voltage(vdd) failed, rc=%d\n",
+				 vdd_volt);
+			goto done;
+		}
+
+		/* Switch back to BHS */
+		rc = cpr3_regulator_set_bhs_mode(vreg, vdd_volt,
+				       ctrl->aggr_corner.ceiling_volt);
+		if (rc) {
+			cpr3_err(vreg, "unable to switch to BHS mode, rc=%d\n",
+				 rc);
+			goto done;
+		}
+	} else {
+		rc = cpr3_regulator_update_ctrl_state(ctrl);
+		if (rc) {
+			cpr3_err(vreg, "could not change LDO mode=%s, rc=%d\n",
+				allow ? "allowed" : "disallowed", rc);
+			goto done;
+		}
+	}
+
+	cpr3_debug(vreg, "LDO mode=%s\n", allow ? "allowed" : "disallowed");
+
+done:
+	mutex_unlock(&ctrl->lock);
+	return 0;
+}
+
+/**
+ * cpr3_debug_ldo_mode_allowed_get() - debugfs callback used to retrieve the
+ *		value of the CPR3 regulator ldo_mode_allowed flag
+ * @data:		Pointer to private data which is equal to the CPR3
+ *			regulator pointer
+ * @val:		Output parameter written with a value of the
+ *			ldo_mode_allowed flag
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_debug_ldo_mode_allowed_get(void *data, u64 *val)
+{
+	struct cpr3_regulator *vreg = data;
+
+	*val = vreg->ldo_mode_allowed;
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr3_debug_ldo_mode_allowed_fops,
+			cpr3_debug_ldo_mode_allowed_get,
+			cpr3_debug_ldo_mode_allowed_set,
+			"%llu\n");
+
+/**
+ * cpr3_debug_ldo_mode_get() - debugfs callback used to retrieve the state of
+ *		the CPR3 regulator's LDO
+ * @data:		Pointer to private data which is equal to the CPR3
+ *			regulator pointer
+ * @val:		Output parameter written with a value of 1 if using
+ *			LDO mode or 0 if the LDO is bypassed
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_debug_ldo_mode_get(void *data, u64 *val)
+{
+	struct cpr3_regulator *vreg = data;
+
+	*val = (vreg->ldo_regulator_bypass == LDO_MODE);
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr3_debug_ldo_mode_fops, cpr3_debug_ldo_mode_get,
+			NULL, "%llu\n");
+
+/**
+ * struct cpr3_debug_corner_info - data structure used by the
+ *		cpr3_debugfs_create_corner_int function
+ * @vreg:		Pointer to the CPR3 regulator
+ * @index:		Pointer to the corner array index
+ * @member_offset:	Offset in bytes from the beginning of struct cpr3_corner
+ *			to the beginning of the value to be read from
+ * @corner:		Pointer to the CPR3 corner array
+ */
+struct cpr3_debug_corner_info {
+	struct cpr3_regulator	*vreg;
+	int			*index;
+	size_t			member_offset;
+	struct cpr3_corner	*corner;
+};
+
+static int cpr3_debug_corner_int_get(void *data, u64 *val)
+{
+	struct cpr3_debug_corner_info *info = data;
+	struct cpr3_controller *ctrl = info->vreg->thread->ctrl;
+	int i;
+
+	mutex_lock(&ctrl->lock);
+
+	i = *info->index;
+	if (i < 0)
+		i = 0;
+
+	*val = *(int *)((char *)&info->vreg->corner[i] + info->member_offset);
+
+	mutex_unlock(&ctrl->lock);
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr3_debug_corner_int_fops, cpr3_debug_corner_int_get,
+			NULL, "%lld\n");
+
+/**
+ * cpr3_debugfs_create_corner_int - create a debugfs file that is used to read
+ *		a signed int value out of a CPR3 regulator's corner array
+ * @vreg:		Pointer to the CPR3 regulator
+ * @name:		Pointer to a string containing the name of the file to
+ *			create
+ * @mode:		The permissions that the file should have
+ * @parent:		Pointer to the parent dentry for this file.  This should
+ *			be a directory dentry if set.  If this parameter is
+ *			%NULL, then the file will be created in the root of the
+ *			debugfs filesystem.
+ * @index:		Pointer to the corner array index
+ * @member_offset:	Offset in bytes from the beginning of struct cpr3_corner
+ *			to the beginning of the value to be read from
+ *
+ * This function creates a file in debugfs with the given name that
+ * contains the value of the int type variable vreg->corner[index].member
+ * where member_offset == offsetof(struct cpr3_corner, member).
+ */
+static struct dentry *cpr3_debugfs_create_corner_int(
+		struct cpr3_regulator *vreg, const char *name, umode_t mode,
+		struct dentry *parent, int *index, size_t member_offset)
+{
+	struct cpr3_debug_corner_info *info;
+
+	info = devm_kzalloc(vreg->thread->ctrl->dev, sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return NULL;
+
+	info->vreg = vreg;
+	info->index = index;
+	info->member_offset = member_offset;
+
+	return debugfs_create_file(name, mode, parent, info,
+				   &cpr3_debug_corner_int_fops);
+}
+
+static int cpr3_debug_quot_open(struct inode *inode, struct file *file)
+{
+	struct cpr3_debug_corner_info *info = inode->i_private;
+	struct cpr3_thread *thread = info->vreg->thread;
+	int size, i, pos;
+	u32 *quot;
+	char *buf;
+
+	/*
+	 * Max size:
+	 *  - 10 digits + ' ' or '\n' = 11 bytes per number
+	 *  - terminating '\0'
+	 */
+	size = CPR3_RO_COUNT * 11;
+	buf = kzalloc(size + 1, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	file->private_data = buf;
+
+	mutex_lock(&thread->ctrl->lock);
+
+	quot = info->corner[*info->index].target_quot;
+
+	for (i = 0, pos = 0; i < CPR3_RO_COUNT; i++)
+		pos += scnprintf(buf + pos, size - pos, "%u%c",
+			quot[i], i < CPR3_RO_COUNT - 1 ? ' ' : '\n');
+
+	mutex_unlock(&thread->ctrl->lock);
+
+	return nonseekable_open(inode, file);
+}
+
+static ssize_t cpr3_debug_quot_read(struct file *file, char __user *buf,
+		size_t len, loff_t *ppos)
+{
+	return simple_read_from_buffer(buf, len, ppos, file->private_data,
+					strlen(file->private_data));
+}
+
+static int cpr3_debug_quot_release(struct inode *inode, struct file *file)
+{
+	kfree(file->private_data);
+
+	return 0;
+}
+
+static const struct file_operations cpr3_debug_quot_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = cpr3_debug_quot_open,
+	.release = cpr3_debug_quot_release,
+	.read	 = cpr3_debug_quot_read,
+	.llseek  = no_llseek,
+};
+
+/**
+ * cpr3_regulator_debugfs_corner_add() - add debugfs files to expose
+ *		configuration data for the CPR corner
+ * @vreg:		Pointer to the CPR3 regulator
+ * @corner_dir:		Pointer to the parent corner dentry for the new files
+ * @index:		Pointer to the corner array index
+ *
+ * Return: none
+ */
+static void cpr3_regulator_debugfs_corner_add(struct cpr3_regulator *vreg,
+		struct dentry *corner_dir, int *index)
+{
+	struct cpr3_debug_corner_info *info;
+	struct dentry *temp;
+
+	temp = cpr3_debugfs_create_corner_int(vreg, "floor_volt", 0444,
+		corner_dir, index, offsetof(struct cpr3_corner, floor_volt));
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr3_err(vreg, "floor_volt debugfs file creation failed\n");
+		return;
+	}
+
+	temp = cpr3_debugfs_create_corner_int(vreg, "ceiling_volt", 0444,
+		corner_dir, index, offsetof(struct cpr3_corner, ceiling_volt));
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr3_err(vreg, "ceiling_volt debugfs file creation failed\n");
+		return;
+	}
+
+	temp = cpr3_debugfs_create_corner_int(vreg, "open_loop_volt", 0444,
+		corner_dir, index,
+		offsetof(struct cpr3_corner, open_loop_volt));
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr3_err(vreg, "open_loop_volt debugfs file creation failed\n");
+		return;
+	}
+
+	temp = cpr3_debugfs_create_corner_int(vreg, "last_volt", 0444,
+		corner_dir, index, offsetof(struct cpr3_corner, last_volt));
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr3_err(vreg, "last_volt debugfs file creation failed\n");
+		return;
+	}
+
+	info = devm_kzalloc(vreg->thread->ctrl->dev, sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return;
+
+	info->vreg = vreg;
+	info->index = index;
+	info->corner = vreg->corner;
+
+	temp = debugfs_create_file("target_quots", 0444, corner_dir, info,
+				&cpr3_debug_quot_fops);
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr3_err(vreg, "target_quots debugfs file creation failed\n");
+		return;
+	}
+}
+
+/**
+ * cpr3_debug_corner_index_set() - debugfs callback used to change the
+ *		value of the CPR3 regulator debug_corner index
+ * @data:		Pointer to private data which is equal to the CPR3
+ *			regulator pointer
+ * @val:		New value for debug_corner
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_debug_corner_index_set(void *data, u64 val)
+{
+	struct cpr3_regulator *vreg = data;
+
+	if (val < CPR3_CORNER_OFFSET || val > vreg->corner_count) {
+		cpr3_err(vreg, "invalid corner index %llu; allowed values: %d-%d\n",
+			val, CPR3_CORNER_OFFSET, vreg->corner_count);
+		return -EINVAL;
+	}
+
+	mutex_lock(&vreg->thread->ctrl->lock);
+	vreg->debug_corner = val - CPR3_CORNER_OFFSET;
+	mutex_unlock(&vreg->thread->ctrl->lock);
+
+	return 0;
+}
+
+/**
+ * cpr3_debug_corner_index_get() - debugfs callback used to retrieve
+ *		the value of the CPR3 regulator debug_corner index
+ * @data:		Pointer to private data which is equal to the CPR3
+ *			regulator pointer
+ * @val:		Output parameter written with the value of
+ *			debug_corner
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_debug_corner_index_get(void *data, u64 *val)
+{
+	struct cpr3_regulator *vreg = data;
+
+	*val = vreg->debug_corner + CPR3_CORNER_OFFSET;
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr3_debug_corner_index_fops,
+			cpr3_debug_corner_index_get,
+			cpr3_debug_corner_index_set,
+			"%llu\n");
+
+/**
+ * cpr3_debug_current_corner_index_get() - debugfs callback used to retrieve
+ *		the value of the CPR3 regulator current_corner index
+ * @data:		Pointer to private data which is equal to the CPR3
+ *			regulator pointer
+ * @val:		Output parameter written with the value of
+ *			current_corner
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_debug_current_corner_index_get(void *data, u64 *val)
+{
+	struct cpr3_regulator *vreg = data;
+
+	*val = vreg->current_corner + CPR3_CORNER_OFFSET;
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr3_debug_current_corner_index_fops,
+			cpr3_debug_current_corner_index_get,
+			NULL, "%llu\n");
+
+/**
+ * cpr3_regulator_debugfs_vreg_add() - add debugfs files to expose configuration
+ *		data for the CPR3 regulator
+ * @vreg:		Pointer to the CPR3 regulator
+ * @thread_dir		CPR3 thread debugfs directory handle
+ *
+ * Return: none
+ */
+static void cpr3_regulator_debugfs_vreg_add(struct cpr3_regulator *vreg,
+				struct dentry *thread_dir)
+{
+	struct dentry *temp, *corner_dir, *vreg_dir;
+
+	vreg_dir = debugfs_create_dir(vreg->name, thread_dir);
+	if (IS_ERR_OR_NULL(vreg_dir)) {
+		cpr3_err(vreg, "%s debugfs directory creation failed\n",
+			vreg->name);
+		return;
+	}
+
+	temp = debugfs_create_int("speed_bin_fuse", 0444, vreg_dir,
+				  &vreg->speed_bin_fuse);
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr3_err(vreg, "speed_bin_fuse debugfs file creation failed\n");
+		return;
+	}
+
+	temp = debugfs_create_int("cpr_rev_fuse", 0444, vreg_dir,
+				  &vreg->cpr_rev_fuse);
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr3_err(vreg, "cpr_rev_fuse debugfs file creation failed\n");
+		return;
+	}
+
+	temp = debugfs_create_int("fuse_combo", 0444, vreg_dir,
+				  &vreg->fuse_combo);
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr3_err(vreg, "fuse_combo debugfs file creation failed\n");
+		return;
+	}
+
+	if (vreg->ldo_regulator) {
+		temp = debugfs_create_file("ldo_mode", 0444, vreg_dir, vreg,
+					&cpr3_debug_ldo_mode_fops);
+		if (IS_ERR_OR_NULL(temp)) {
+			cpr3_err(vreg, "ldo_mode debugfs file creation failed\n");
+			return;
+		}
+
+		temp = debugfs_create_file("ldo_mode_allowed",
+				0644, vreg_dir, vreg,
+				&cpr3_debug_ldo_mode_allowed_fops);
+		if (IS_ERR_OR_NULL(temp)) {
+			cpr3_err(vreg, "ldo_mode_allowed debugfs file creation failed\n");
+			return;
+		}
+	}
+
+	temp = debugfs_create_int("corner_count", 0444, vreg_dir,
+				  &vreg->corner_count);
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr3_err(vreg, "corner_count debugfs file creation failed\n");
+		return;
+	}
+
+	corner_dir = debugfs_create_dir("corner", vreg_dir);
+	if (IS_ERR_OR_NULL(corner_dir)) {
+		cpr3_err(vreg, "corner debugfs directory creation failed\n");
+		return;
+	}
+
+	temp = debugfs_create_file("index", 0644, corner_dir, vreg,
+				&cpr3_debug_corner_index_fops);
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr3_err(vreg, "index debugfs file creation failed\n");
+		return;
+	}
+
+	cpr3_regulator_debugfs_corner_add(vreg, corner_dir,
+					&vreg->debug_corner);
+
+	corner_dir = debugfs_create_dir("current_corner", vreg_dir);
+	if (IS_ERR_OR_NULL(corner_dir)) {
+		cpr3_err(vreg, "current_corner debugfs directory creation failed\n");
+		return;
+	}
+
+	temp = debugfs_create_file("index", 0444, corner_dir, vreg,
+				&cpr3_debug_current_corner_index_fops);
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr3_err(vreg, "index debugfs file creation failed\n");
+		return;
+	}
+
+	cpr3_regulator_debugfs_corner_add(vreg, corner_dir,
+					  &vreg->current_corner);
+}
+
+/**
+ * cpr3_regulator_debugfs_thread_add() - add debugfs files to expose
+ *		configuration data for the CPR thread
+ * @thread:		Pointer to the CPR3 thread
+ *
+ * Return: none
+ */
+static void cpr3_regulator_debugfs_thread_add(struct cpr3_thread *thread)
+{
+	struct cpr3_controller *ctrl = thread->ctrl;
+	struct dentry *aggr_dir, *temp, *thread_dir;
+	struct cpr3_debug_corner_info *info;
+	char buf[20];
+	int *index;
+	int i;
+
+	scnprintf(buf, sizeof(buf), "thread%u", thread->thread_id);
+	thread_dir = debugfs_create_dir(buf, thread->ctrl->debugfs);
+	if (IS_ERR_OR_NULL(thread_dir)) {
+		cpr3_err(ctrl, "thread %u %s debugfs directory creation failed\n",
+			thread->thread_id, buf);
+		return;
+	}
+
+	aggr_dir = debugfs_create_dir("max_aggregated_params", thread_dir);
+	if (IS_ERR_OR_NULL(aggr_dir)) {
+		cpr3_err(ctrl, "thread %u max_aggregated_params debugfs directory creation failed\n",
+			thread->thread_id);
+		return;
+	}
+
+	temp = debugfs_create_int("floor_volt", 0444, aggr_dir,
+				  &thread->aggr_corner.floor_volt);
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr3_err(ctrl, "thread %u aggr floor_volt debugfs file creation failed\n",
+			thread->thread_id);
+		return;
+	}
+
+	temp = debugfs_create_int("ceiling_volt", 0444, aggr_dir,
+				  &thread->aggr_corner.ceiling_volt);
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr3_err(ctrl, "thread %u aggr ceiling_volt debugfs file creation failed\n",
+			thread->thread_id);
+		return;
+	}
+
+	temp = debugfs_create_int("open_loop_volt", 0444, aggr_dir,
+				  &thread->aggr_corner.open_loop_volt);
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr3_err(ctrl, "thread %u aggr open_loop_volt debugfs file creation failed\n",
+			thread->thread_id);
+		return;
+	}
+
+	temp = debugfs_create_int("last_volt", 0444, aggr_dir,
+				  &thread->aggr_corner.last_volt);
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr3_err(ctrl, "thread %u aggr last_volt debugfs file creation failed\n",
+			thread->thread_id);
+		return;
+	}
+
+	info = devm_kzalloc(thread->ctrl->dev, sizeof(*info), GFP_KERNEL);
+	index = devm_kzalloc(thread->ctrl->dev, sizeof(*index), GFP_KERNEL);
+	if (!info || !index)
+		return;
+	*index = 0;
+	info->vreg = &thread->vreg[0];
+	info->index = index;
+	info->corner = &thread->aggr_corner;
+
+	temp = debugfs_create_file("target_quots", 0444, aggr_dir, info,
+				&cpr3_debug_quot_fops);
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr3_err(ctrl, "thread %u target_quots debugfs file creation failed\n",
+			thread->thread_id);
+		return;
+	}
+
+	for (i = 0; i < thread->vreg_count; i++)
+		cpr3_regulator_debugfs_vreg_add(&thread->vreg[i], thread_dir);
+}
+
+/**
+ * cpr3_debug_closed_loop_enable_set() - debugfs callback used to change the
+ *		value of the CPR controller cpr_allowed_sw flag which enables or
+ *		disables closed-loop operation
+ * @data:		Pointer to private data which is equal to the CPR
+ *			controller pointer
+ * @val:		New value for cpr_allowed_sw
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_debug_closed_loop_enable_set(void *data, u64 val)
+{
+	struct cpr3_controller *ctrl = data;
+	bool enable = !!val;
+	int rc;
+
+	mutex_lock(&ctrl->lock);
+
+	if (ctrl->cpr_allowed_sw == enable)
+		goto done;
+
+	if (enable && !ctrl->cpr_allowed_hw) {
+		cpr3_err(ctrl, "CPR closed-loop operation is not allowed\n");
+		goto done;
+	}
+
+	ctrl->cpr_allowed_sw = enable;
+
+	if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPRH) {
+		cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL,
+			CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_EN_MASK,
+			ctrl->cpr_allowed_sw && ctrl->use_hw_closed_loop
+			? CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_ENABLE
+			: CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_DISABLE);
+	} else {
+		rc = cpr3_regulator_update_ctrl_state(ctrl);
+		if (rc) {
+			cpr3_err(ctrl, "could not change CPR enable state=%u, rc=%d\n",
+				 enable, rc);
+			goto done;
+		}
+
+		if (ctrl->proc_clock_throttle && !ctrl->cpr_enabled) {
+			rc = cpr3_clock_enable(ctrl);
+			if (rc) {
+				cpr3_err(ctrl, "clock enable failed, rc=%d\n",
+					 rc);
+				goto done;
+			}
+			ctrl->cpr_enabled = true;
+
+			cpr3_write(ctrl, CPR3_REG_PD_THROTTLE,
+				   CPR3_PD_THROTTLE_DISABLE);
+
+			cpr3_clock_disable(ctrl);
+			ctrl->cpr_enabled = false;
+		}
+	}
+
+	if (ctrl->ctrl_type != CPR_CTRL_TYPE_CPRH) {
+		cpr3_debug(ctrl, "closed-loop=%s\n", enable ?
+			   "enabled" : "disabled");
+	} else {
+		cpr3_debug(ctrl, "closed-loop=%s\n", enable &&
+			   ctrl->use_hw_closed_loop ? "enabled" : "disabled");
+	}
+done:
+	mutex_unlock(&ctrl->lock);
+	return 0;
+}
+
+/**
+ * cpr3_debug_closed_loop_enable_get() - debugfs callback used to retrieve
+ *		the value of the CPR controller cpr_allowed_sw flag which
+ *		indicates if closed-loop operation is enabled
+ * @data:		Pointer to private data which is equal to the CPR
+ *			controller pointer
+ * @val:		Output parameter written with the value of
+ *			cpr_allowed_sw
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_debug_closed_loop_enable_get(void *data, u64 *val)
+{
+	struct cpr3_controller *ctrl = data;
+
+	*val = ctrl->cpr_allowed_sw;
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr3_debug_closed_loop_enable_fops,
+			cpr3_debug_closed_loop_enable_get,
+			cpr3_debug_closed_loop_enable_set,
+			"%llu\n");
+
+/**
+ * cpr3_debug_hw_closed_loop_enable_set() - debugfs callback used to change the
+ *		value of the CPR controller use_hw_closed_loop flag which
+ *		switches between software closed-loop and hardware closed-loop
+ *		operation for CPR3 and CPR4 controllers and between open-loop
+ *		and full hardware closed-loop operation for CPRh controllers.
+ * @data:		Pointer to private data which is equal to the CPR
+ *			controller pointer
+ * @val:		New value for use_hw_closed_loop
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_debug_hw_closed_loop_enable_set(void *data, u64 val)
+{
+	struct cpr3_controller *ctrl = data;
+	bool use_hw_closed_loop = !!val;
+	struct cpr3_regulator *vreg;
+	bool cpr_enabled;
+	int i, j, k, rc;
+
+	mutex_lock(&ctrl->lock);
+
+	if (ctrl->use_hw_closed_loop == use_hw_closed_loop)
+		goto done;
+
+	if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) {
+		rc = cpr3_ctrl_clear_cpr4_config(ctrl);
+		if (rc) {
+			cpr3_err(ctrl, "failed to clear CPR4 configuration,rc=%d\n",
+				rc);
+			goto done;
+		}
+	}
+
+	if (ctrl->ctrl_type != CPR_CTRL_TYPE_CPRH)
+		cpr3_ctrl_loop_disable(ctrl);
+
+	ctrl->use_hw_closed_loop = use_hw_closed_loop;
+
+	cpr_enabled = ctrl->cpr_enabled;
+
+	/* Ensure that CPR clocks are enabled before writing to registers. */
+	if (!cpr_enabled) {
+		rc = cpr3_clock_enable(ctrl);
+		if (rc) {
+			cpr3_err(ctrl, "clock enable failed, rc=%d\n", rc);
+			goto done;
+		}
+		ctrl->cpr_enabled = true;
+	}
+
+	if (ctrl->use_hw_closed_loop && ctrl->ctrl_type != CPR_CTRL_TYPE_CPRH)
+		cpr3_write(ctrl, CPR3_REG_IRQ_EN, 0);
+
+	if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) {
+		cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL,
+			CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_EN_MASK,
+			ctrl->use_hw_closed_loop
+			? CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_ENABLE
+			: CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_DISABLE);
+	} else if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPRH) {
+		cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL,
+			CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_EN_MASK,
+			ctrl->cpr_allowed_sw && ctrl->use_hw_closed_loop
+			? CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_ENABLE
+			: CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_DISABLE);
+	} else if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) {
+		cpr3_write(ctrl, CPR3_REG_HW_CLOSED_LOOP,
+			ctrl->use_hw_closed_loop
+			? CPR3_HW_CLOSED_LOOP_ENABLE
+			: CPR3_HW_CLOSED_LOOP_DISABLE);
+	}
+
+	/* Turn off CPR clocks if they were off before this function call. */
+	if (!cpr_enabled) {
+		cpr3_clock_disable(ctrl);
+		ctrl->cpr_enabled = false;
+	}
+
+	if (ctrl->use_hw_closed_loop && ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) {
+		rc = regulator_enable(ctrl->vdd_limit_regulator);
+		if (rc) {
+			cpr3_err(ctrl, "CPR limit regulator enable failed, rc=%d\n",
+				rc);
+			goto done;
+		}
+
+		rc = msm_spm_avs_enable_irq(0, MSM_SPM_AVS_IRQ_MAX);
+		if (rc) {
+			cpr3_err(ctrl, "could not enable max IRQ, rc=%d\n", rc);
+			goto done;
+		}
+	} else if (!ctrl->use_hw_closed_loop
+			&& ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) {
+		rc = regulator_disable(ctrl->vdd_limit_regulator);
+		if (rc) {
+			cpr3_err(ctrl, "CPR limit regulator disable failed, rc=%d\n",
+				rc);
+			goto done;
+		}
+
+		rc = msm_spm_avs_disable_irq(0, MSM_SPM_AVS_IRQ_MAX);
+		if (rc) {
+			cpr3_err(ctrl, "could not disable max IRQ, rc=%d\n",
+				rc);
+			goto done;
+		}
+	}
+
+	if (ctrl->ctrl_type != CPR_CTRL_TYPE_CPRH) {
+		/*
+		 * Due to APM and mem-acc floor restriction constraints,
+		 * the closed-loop voltage may be different when using
+		 * software closed-loop vs hardware closed-loop.  Therefore,
+		 * reset the cached closed-loop voltage for all corners to the
+		 * corresponding open-loop voltage when switching between
+		 * SW and HW closed-loop mode.
+		 */
+		for (i = 0; i < ctrl->thread_count; i++) {
+			for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+				vreg = &ctrl->thread[i].vreg[j];
+				for (k = 0; k < vreg->corner_count; k++)
+					vreg->corner[k].last_volt
+					= vreg->corner[k].open_loop_volt;
+			}
+		}
+
+		/* Skip last_volt caching */
+		ctrl->last_corner_was_closed_loop = false;
+
+		rc = cpr3_regulator_update_ctrl_state(ctrl);
+		if (rc) {
+			cpr3_err(ctrl, "could not change CPR HW closed-loop enable state=%u, rc=%d\n",
+				 use_hw_closed_loop, rc);
+			goto done;
+		}
+
+		cpr3_debug(ctrl, "CPR mode=%s\n",
+			   use_hw_closed_loop ?
+			   "HW closed-loop" : "SW closed-loop");
+	} else {
+		cpr3_debug(ctrl, "CPR mode=%s\n",
+			   ctrl->cpr_allowed_sw && use_hw_closed_loop ?
+			   "full HW closed-loop" : "open-loop");
+	}
+done:
+	mutex_unlock(&ctrl->lock);
+	return 0;
+}
+
+/**
+ * cpr3_debug_hw_closed_loop_enable_get() - debugfs callback used to retrieve
+ *		the value of the CPR controller use_hw_closed_loop flag which
+ *		indicates if hardware closed-loop operation is being used in
+ *		place of software closed-loop operation
+ * @data:		Pointer to private data which is equal to the CPR
+ *			controller pointer
+ * @val:		Output parameter written with the value of
+ *			use_hw_closed_loop
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_debug_hw_closed_loop_enable_get(void *data, u64 *val)
+{
+	struct cpr3_controller *ctrl = data;
+
+	*val = ctrl->use_hw_closed_loop;
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr3_debug_hw_closed_loop_enable_fops,
+			cpr3_debug_hw_closed_loop_enable_get,
+			cpr3_debug_hw_closed_loop_enable_set,
+			"%llu\n");
+
+/**
+ * cpr3_debug_trigger_aging_measurement_set() - debugfs callback used to trigger
+ *		another CPR measurement
+ * @data:		Pointer to private data which is equal to the CPR
+ *			controller pointer
+ * @val:		Unused
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_debug_trigger_aging_measurement_set(void *data, u64 val)
+{
+	struct cpr3_controller *ctrl = data;
+	int rc;
+
+	mutex_lock(&ctrl->lock);
+
+	if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) {
+		rc = cpr3_ctrl_clear_cpr4_config(ctrl);
+		if (rc) {
+			cpr3_err(ctrl, "failed to clear CPR4 configuration,rc=%d\n",
+				rc);
+			goto done;
+		}
+	}
+
+	cpr3_ctrl_loop_disable(ctrl);
+
+	cpr3_regulator_set_aging_ref_adjustment(ctrl, INT_MAX);
+	ctrl->aging_required = true;
+	ctrl->aging_succeeded = false;
+	ctrl->aging_failed = false;
+
+	rc = cpr3_regulator_update_ctrl_state(ctrl);
+	if (rc) {
+		cpr3_err(ctrl, "could not update the CPR controller state, rc=%d\n",
+			rc);
+		goto done;
+	}
+
+done:
+	mutex_unlock(&ctrl->lock);
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr3_debug_trigger_aging_measurement_fops,
+			NULL,
+			cpr3_debug_trigger_aging_measurement_set,
+			"%llu\n");
+
+/**
+ * cpr3_regulator_debugfs_ctrl_add() - add debugfs files to expose configuration
+ *		data for the CPR controller
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Return: none
+ */
+static void cpr3_regulator_debugfs_ctrl_add(struct cpr3_controller *ctrl)
+{
+	struct dentry *temp, *aggr_dir;
+	int i;
+
+	/* Add cpr3-regulator base directory if it isn't present already. */
+	if (cpr3_debugfs_base == NULL) {
+		cpr3_debugfs_base = debugfs_create_dir("cpr3-regulator", NULL);
+		if (IS_ERR_OR_NULL(cpr3_debugfs_base)) {
+			cpr3_err(ctrl, "cpr3-regulator debugfs base directory creation failed\n");
+			cpr3_debugfs_base = NULL;
+			return;
+		}
+	}
+
+	ctrl->debugfs = debugfs_create_dir(ctrl->name, cpr3_debugfs_base);
+	if (IS_ERR_OR_NULL(ctrl->debugfs)) {
+		cpr3_err(ctrl, "cpr3-regulator controller debugfs directory creation failed\n");
+		return;
+	}
+
+	temp = debugfs_create_file("cpr_closed_loop_enable", 0644,
+					ctrl->debugfs, ctrl,
+					&cpr3_debug_closed_loop_enable_fops);
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr3_err(ctrl, "cpr_closed_loop_enable debugfs file creation failed\n");
+		return;
+	}
+
+	if (ctrl->supports_hw_closed_loop) {
+		temp = debugfs_create_file("use_hw_closed_loop", 0644,
+					ctrl->debugfs, ctrl,
+					&cpr3_debug_hw_closed_loop_enable_fops);
+		if (IS_ERR_OR_NULL(temp)) {
+			cpr3_err(ctrl, "use_hw_closed_loop debugfs file creation failed\n");
+			return;
+		}
+	}
+
+	temp = debugfs_create_int("thread_count", 0444, ctrl->debugfs,
+				  &ctrl->thread_count);
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr3_err(ctrl, "thread_count debugfs file creation failed\n");
+		return;
+	}
+
+	if (ctrl->apm) {
+		temp = debugfs_create_int("apm_threshold_volt", 0444,
+				ctrl->debugfs, &ctrl->apm_threshold_volt);
+		if (IS_ERR_OR_NULL(temp)) {
+			cpr3_err(ctrl, "apm_threshold_volt debugfs file creation failed\n");
+			return;
+		}
+	}
+
+	if (ctrl->aging_required || ctrl->aging_succeeded
+	    || ctrl->aging_failed) {
+		temp = debugfs_create_int("aging_adj_volt", 0444,
+				ctrl->debugfs, &ctrl->aging_ref_adjust_volt);
+		if (IS_ERR_OR_NULL(temp)) {
+			cpr3_err(ctrl, "aging_adj_volt debugfs file creation failed\n");
+			return;
+		}
+
+		temp = debugfs_create_file("aging_succeeded", 0444,
+			ctrl->debugfs, &ctrl->aging_succeeded, &fops_bool_ro);
+		if (IS_ERR_OR_NULL(temp)) {
+			cpr3_err(ctrl, "aging_succeeded debugfs file creation failed\n");
+			return;
+		}
+
+		temp = debugfs_create_file("aging_failed", 0444,
+			ctrl->debugfs, &ctrl->aging_failed, &fops_bool_ro);
+		if (IS_ERR_OR_NULL(temp)) {
+			cpr3_err(ctrl, "aging_failed debugfs file creation failed\n");
+			return;
+		}
+
+		temp = debugfs_create_file("aging_trigger", 0200,
+			ctrl->debugfs, ctrl,
+			&cpr3_debug_trigger_aging_measurement_fops);
+		if (IS_ERR_OR_NULL(temp)) {
+			cpr3_err(ctrl, "aging_trigger debugfs file creation failed\n");
+			return;
+		}
+	}
+
+	aggr_dir = debugfs_create_dir("max_aggregated_voltages", ctrl->debugfs);
+	if (IS_ERR_OR_NULL(aggr_dir)) {
+		cpr3_err(ctrl, "max_aggregated_voltages debugfs directory creation failed\n");
+		return;
+	}
+
+	temp = debugfs_create_int("floor_volt", 0444, aggr_dir,
+				  &ctrl->aggr_corner.floor_volt);
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr3_err(ctrl, "aggr floor_volt debugfs file creation failed\n");
+		return;
+	}
+
+	temp = debugfs_create_int("ceiling_volt", 0444, aggr_dir,
+				  &ctrl->aggr_corner.ceiling_volt);
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr3_err(ctrl, "aggr ceiling_volt debugfs file creation failed\n");
+		return;
+	}
+
+	temp = debugfs_create_int("open_loop_volt", 0444, aggr_dir,
+				  &ctrl->aggr_corner.open_loop_volt);
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr3_err(ctrl, "aggr open_loop_volt debugfs file creation failed\n");
+		return;
+	}
+
+	temp = debugfs_create_int("last_volt", 0444, aggr_dir,
+				  &ctrl->aggr_corner.last_volt);
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr3_err(ctrl, "aggr last_volt debugfs file creation failed\n");
+		return;
+	}
+
+	for (i = 0; i < ctrl->thread_count; i++)
+		cpr3_regulator_debugfs_thread_add(&ctrl->thread[i]);
+}
+
+/**
+ * cpr3_regulator_debugfs_ctrl_remove() - remove debugfs files for the CPR
+ *		controller
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Note, this function must be called after the controller has been removed from
+ * cpr3_controller_list and while the cpr3_controller_list_mutex lock is held.
+ *
+ * Return: none
+ */
+static void cpr3_regulator_debugfs_ctrl_remove(struct cpr3_controller *ctrl)
+{
+	if (list_empty(&cpr3_controller_list)) {
+		debugfs_remove_recursive(cpr3_debugfs_base);
+		cpr3_debugfs_base = NULL;
+	} else {
+		debugfs_remove_recursive(ctrl->debugfs);
+	}
+}
+
+/**
+ * cpr3_regulator_init_ctrl_data() - performs initialization of CPR controller
+ *					elements
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_init_ctrl_data(struct cpr3_controller *ctrl)
+{
+	/* Read the initial vdd voltage from hardware. */
+	ctrl->aggr_corner.last_volt
+		= regulator_get_voltage(ctrl->vdd_regulator);
+	if (ctrl->aggr_corner.last_volt < 0) {
+		cpr3_err(ctrl, "regulator_get_voltage(vdd) failed, rc=%d\n",
+				ctrl->aggr_corner.last_volt);
+		return ctrl->aggr_corner.last_volt;
+	}
+	ctrl->aggr_corner.open_loop_volt = ctrl->aggr_corner.last_volt;
+
+	return 0;
+}
+
+/**
+ * cpr3_regulator_init_vreg_data() - performs initialization of common CPR3
+ *		regulator elements and validate aging configurations
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_init_vreg_data(struct cpr3_regulator *vreg)
+{
+	int i, j;
+	bool init_aging;
+
+	vreg->current_corner = CPR3_REGULATOR_CORNER_INVALID;
+	vreg->last_closed_loop_corner = CPR3_REGULATOR_CORNER_INVALID;
+
+	init_aging = vreg->aging_allowed && vreg->thread->ctrl->aging_required;
+
+	for (i = 0; i < vreg->corner_count; i++) {
+		vreg->corner[i].last_volt = vreg->corner[i].open_loop_volt;
+		vreg->corner[i].irq_en = CPR3_IRQ_UP | CPR3_IRQ_DOWN;
+
+		vreg->corner[i].ro_mask = 0;
+		for (j = 0; j < CPR3_RO_COUNT; j++) {
+			if (vreg->corner[i].target_quot[j] == 0)
+				vreg->corner[i].ro_mask |= BIT(j);
+		}
+
+		if (init_aging) {
+			vreg->corner[i].unaged_floor_volt
+				= vreg->corner[i].floor_volt;
+			vreg->corner[i].unaged_ceiling_volt
+				= vreg->corner[i].ceiling_volt;
+			vreg->corner[i].unaged_open_loop_volt
+				= vreg->corner[i].open_loop_volt;
+		}
+
+		if (vreg->aging_allowed) {
+			if (vreg->corner[i].unaged_floor_volt <= 0) {
+				cpr3_err(vreg, "invalid unaged_floor_volt[%d] = %d\n",
+					i, vreg->corner[i].unaged_floor_volt);
+				return -EINVAL;
+			}
+			if (vreg->corner[i].unaged_ceiling_volt <= 0) {
+				cpr3_err(vreg, "invalid unaged_ceiling_volt[%d] = %d\n",
+					i, vreg->corner[i].unaged_ceiling_volt);
+				return -EINVAL;
+			}
+			if (vreg->corner[i].unaged_open_loop_volt <= 0) {
+				cpr3_err(vreg, "invalid unaged_open_loop_volt[%d] = %d\n",
+				      i, vreg->corner[i].unaged_open_loop_volt);
+				return -EINVAL;
+			}
+		}
+	}
+
+	if (vreg->aging_allowed && vreg->corner[vreg->aging_corner].ceiling_volt
+	    > vreg->thread->ctrl->aging_ref_volt) {
+		cpr3_err(vreg, "aging corner %d ceiling voltage = %d > aging ref voltage = %d uV\n",
+			vreg->aging_corner,
+			vreg->corner[vreg->aging_corner].ceiling_volt,
+			vreg->thread->ctrl->aging_ref_volt);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * cpr3_regulator_suspend() - perform common required CPR3 power down steps
+ *		before the system enters suspend
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_regulator_suspend(struct cpr3_controller *ctrl)
+{
+	int rc;
+
+	mutex_lock(&ctrl->lock);
+
+	if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) {
+		rc = cpr3_ctrl_clear_cpr4_config(ctrl);
+		if (rc) {
+			cpr3_err(ctrl, "failed to clear CPR4 configuration,rc=%d\n",
+				rc);
+			mutex_unlock(&ctrl->lock);
+			return rc;
+		}
+	}
+
+	cpr3_ctrl_loop_disable(ctrl);
+
+	if (ctrl->ctrl_type != CPR_CTRL_TYPE_CPRH) {
+		rc = cpr3_closed_loop_disable(ctrl);
+		if (rc)
+			cpr3_err(ctrl, "could not disable CPR, rc=%d\n", rc);
+
+		ctrl->cpr_suspended = true;
+	}
+
+	mutex_unlock(&ctrl->lock);
+	return 0;
+}
+
+/**
+ * cpr3_regulator_resume() - perform common required CPR3 power up steps after
+ *		the system resumes from suspend
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_regulator_resume(struct cpr3_controller *ctrl)
+{
+	int rc;
+
+	mutex_lock(&ctrl->lock);
+
+	if (ctrl->ctrl_type != CPR_CTRL_TYPE_CPRH) {
+		ctrl->cpr_suspended = false;
+		rc = cpr3_regulator_update_ctrl_state(ctrl);
+		if (rc)
+			cpr3_err(ctrl, "could not enable CPR, rc=%d\n", rc);
+	} else {
+		cpr3_ctrl_loop_enable(ctrl);
+	}
+
+	mutex_unlock(&ctrl->lock);
+	return 0;
+}
+
+/**
+ * cpr3_regulator_cpu_hotplug_callback() - reset CPR IRQ affinity when a CPU is
+ *		brought online via hotplug
+ * @nb:			Pointer to the notifier block
+ * @action:		hotplug action
+ * @hcpu:		long value corresponding to the CPU number
+ *
+ * Return: NOTIFY_OK
+ */
+static int cpr3_regulator_cpu_hotplug_callback(struct notifier_block *nb,
+					    unsigned long action, void *hcpu)
+{
+	struct cpr3_controller *ctrl = container_of(nb, struct cpr3_controller,
+					cpu_hotplug_notifier);
+	int cpu = (long)hcpu;
+
+	action &= ~CPU_TASKS_FROZEN;
+
+	if (action == CPU_ONLINE
+	    && cpumask_test_cpu(cpu, &ctrl->irq_affinity_mask))
+		irq_set_affinity(ctrl->irq, &ctrl->irq_affinity_mask);
+
+	return NOTIFY_OK;
+}
+
+/**
+ * cpr3_regulator_validate_controller() - verify the data passed in via the
+ *		cpr3_controller data structure
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_validate_controller(struct cpr3_controller *ctrl)
+{
+	struct cpr3_thread *thread;
+	struct cpr3_regulator *vreg;
+	int i, j, allow_boost_vreg_count = 0;
+
+	if (!ctrl->vdd_regulator && ctrl->ctrl_type != CPR_CTRL_TYPE_CPRH) {
+		cpr3_err(ctrl, "vdd regulator missing\n");
+		return -EINVAL;
+	} else if (ctrl->sensor_count <= 0
+		   || ctrl->sensor_count > CPR3_MAX_SENSOR_COUNT) {
+		cpr3_err(ctrl, "invalid CPR sensor count=%d\n",
+			ctrl->sensor_count);
+		return -EINVAL;
+	} else if (!ctrl->sensor_owner) {
+		cpr3_err(ctrl, "CPR sensor ownership table missing\n");
+		return -EINVAL;
+	}
+
+	if (ctrl->aging_required) {
+		for (i = 0; i < ctrl->aging_sensor_count; i++) {
+			if (ctrl->aging_sensor[i].sensor_id
+			    >= ctrl->sensor_count) {
+				cpr3_err(ctrl, "aging_sensor[%d] id=%u is not in the value range 0-%d",
+					i, ctrl->aging_sensor[i].sensor_id,
+					ctrl->sensor_count - 1);
+				return -EINVAL;
+			}
+		}
+	}
+
+	for (i = 0; i < ctrl->thread_count; i++) {
+		thread = &ctrl->thread[i];
+		for (j = 0; j < thread->vreg_count; j++) {
+			vreg = &thread->vreg[j];
+			if (vreg->allow_boost)
+				allow_boost_vreg_count++;
+		}
+	}
+
+	if (allow_boost_vreg_count > 1) {
+		/*
+		 * Boost feature is not allowed to be used for more
+		 * than one CPR3 regulator of a CPR3 controller.
+		 */
+		cpr3_err(ctrl, "Boost feature is enabled for more than one regulator\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * cpr3_panic_callback() - panic notification callback function. This function
+ *		is invoked when a kernel panic occurs.
+ * @nfb:	Notifier block pointer of CPR3 controller
+ * @event:	Value passed unmodified to notifier function
+ * @data:	Pointer passed unmodified to notifier function
+ *
+ * Return: NOTIFY_OK
+ */
+static int cpr3_panic_callback(struct notifier_block *nfb,
+			unsigned long event, void *data)
+{
+	struct cpr3_controller *ctrl = container_of(nfb,
+				struct cpr3_controller, panic_notifier);
+	struct cpr3_panic_regs_info *regs_info = ctrl->panic_regs_info;
+	struct cpr3_reg_info *reg;
+	int i = 0;
+
+	for (i = 0; i < regs_info->reg_count; i++) {
+		reg = &(regs_info->regs[i]);
+		reg->value = readl_relaxed(reg->virt_addr);
+		pr_debug("%s[0x%08x] = 0x%08x\n", reg->name, reg->addr,
+			reg->value);
+	}
+	/*
+	 * Barrier to ensure that the information has been updated in the
+	 * structure.
+	 */
+	mb();
+
+	return NOTIFY_OK;
+}
+
+/**
+ * cpr3_regulator_register() - register the regulators for a CPR3 controller and
+ *		perform CPR hardware initialization
+ * @pdev:		Platform device pointer for the CPR3 controller
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_regulator_register(struct platform_device *pdev,
+			struct cpr3_controller *ctrl)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	int i, j, rc;
+
+	if (!dev->of_node) {
+		dev_err(dev, "%s: Device tree node is missing\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!ctrl || !ctrl->name) {
+		dev_err(dev, "%s: CPR controller data is missing\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = cpr3_regulator_validate_controller(ctrl);
+	if (rc) {
+		cpr3_err(ctrl, "controller validation failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	mutex_init(&ctrl->lock);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpr_ctrl");
+	if (!res || !res->start) {
+		cpr3_err(ctrl, "CPR controller address is missing\n");
+		return -ENXIO;
+	}
+	ctrl->cpr_ctrl_base = devm_ioremap(dev, res->start, resource_size(res));
+
+	if (ctrl->aging_possible_mask) {
+		/*
+		 * Aging possible register address is required if an aging
+		 * possible mask has been specified.
+		 */
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"aging_allowed");
+		if (!res || !res->start) {
+			cpr3_err(ctrl, "CPR aging allowed address is missing\n");
+			return -ENXIO;
+		}
+		ctrl->aging_possible_reg = devm_ioremap(dev, res->start,
+							resource_size(res));
+	}
+
+	if (ctrl->ctrl_type != CPR_CTRL_TYPE_CPRH) {
+		ctrl->irq = platform_get_irq_byname(pdev, "cpr");
+		if (ctrl->irq < 0) {
+			cpr3_err(ctrl, "missing CPR interrupt\n");
+			return ctrl->irq;
+		}
+	}
+
+	if (ctrl->supports_hw_closed_loop) {
+		rc = msm_spm_probe_done();
+		if (rc) {
+			if (rc != -EPROBE_DEFER)
+				cpr3_err(ctrl, "spm unavailable, rc=%d\n", rc);
+			return rc;
+		}
+
+		if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) {
+			ctrl->ceiling_irq = platform_get_irq_byname(pdev,
+						"ceiling");
+			if (ctrl->ceiling_irq < 0) {
+				cpr3_err(ctrl, "missing ceiling interrupt\n");
+				return ctrl->ceiling_irq;
+			}
+		}
+	}
+
+	if (ctrl->ctrl_type != CPR_CTRL_TYPE_CPRH) {
+		rc = cpr3_regulator_init_ctrl_data(ctrl);
+		if (rc) {
+			cpr3_err(ctrl, "CPR controller data initialization failed, rc=%d\n",
+				 rc);
+			return rc;
+		}
+	}
+
+	for (i = 0; i < ctrl->thread_count; i++) {
+		for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+			rc = cpr3_regulator_init_vreg_data(
+						&ctrl->thread[i].vreg[j]);
+			if (rc)
+				return rc;
+			cpr3_print_quots(&ctrl->thread[i].vreg[j]);
+		}
+	}
+
+	/*
+	 * Add the maximum possible aging voltage margin until it is possible
+	 * to perform an aging measurement.
+	 */
+	if (ctrl->aging_required)
+		cpr3_regulator_set_aging_ref_adjustment(ctrl, INT_MAX);
+
+	rc = cpr3_regulator_init_ctrl(ctrl);
+	if (rc) {
+		cpr3_err(ctrl, "CPR controller initialization failed, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	/* Register regulator devices for all threads. */
+	for (i = 0; i < ctrl->thread_count; i++) {
+		for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+			rc = cpr3_regulator_vreg_register(
+					&ctrl->thread[i].vreg[j]);
+			if (rc) {
+				cpr3_err(&ctrl->thread[i].vreg[j], "failed to register regulator, rc=%d\n",
+					rc);
+				goto free_regulators;
+			}
+		}
+	}
+
+	if (ctrl->ctrl_type != CPR_CTRL_TYPE_CPRH) {
+		rc = devm_request_threaded_irq(dev, ctrl->irq, NULL,
+					       cpr3_irq_handler,
+					       IRQF_ONESHOT |
+					       IRQF_TRIGGER_RISING,
+					       "cpr3", ctrl);
+		if (rc) {
+			cpr3_err(ctrl, "could not request IRQ %d, rc=%d\n",
+				 ctrl->irq, rc);
+			goto free_regulators;
+		}
+	}
+
+	if (ctrl->supports_hw_closed_loop &&
+	    ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) {
+		rc = devm_request_threaded_irq(dev, ctrl->ceiling_irq, NULL,
+			cpr3_ceiling_irq_handler,
+			IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+			"cpr3_ceiling", ctrl);
+		if (rc) {
+			cpr3_err(ctrl, "could not request ceiling IRQ %d, rc=%d\n",
+				ctrl->ceiling_irq, rc);
+			goto free_regulators;
+		}
+	}
+
+	if (ctrl->irq && !cpumask_empty(&ctrl->irq_affinity_mask)) {
+		irq_set_affinity(ctrl->irq, &ctrl->irq_affinity_mask);
+
+		ctrl->cpu_hotplug_notifier.notifier_call
+			= cpr3_regulator_cpu_hotplug_callback;
+		register_hotcpu_notifier(&ctrl->cpu_hotplug_notifier);
+	}
+
+	mutex_lock(&cpr3_controller_list_mutex);
+	cpr3_regulator_debugfs_ctrl_add(ctrl);
+	list_add(&ctrl->list, &cpr3_controller_list);
+	mutex_unlock(&cpr3_controller_list_mutex);
+
+	if (ctrl->panic_regs_info) {
+		/* Register panic notification call back */
+		ctrl->panic_notifier.notifier_call = cpr3_panic_callback;
+		atomic_notifier_chain_register(&panic_notifier_list,
+			&ctrl->panic_notifier);
+	}
+
+	return 0;
+
+free_regulators:
+	for (i = 0; i < ctrl->thread_count; i++)
+		for (j = 0; j < ctrl->thread[i].vreg_count; j++)
+			if (!IS_ERR_OR_NULL(ctrl->thread[i].vreg[j].rdev))
+				regulator_unregister(
+					ctrl->thread[i].vreg[j].rdev);
+	return rc;
+}
+
+/**
+ * cpr3_regulator_unregister() - unregister the regulators for a CPR3 controller
+ *		and perform CPR hardware shutdown
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_regulator_unregister(struct cpr3_controller *ctrl)
+{
+	int i, j, rc = 0;
+
+	mutex_lock(&cpr3_controller_list_mutex);
+	list_del(&ctrl->list);
+	cpr3_regulator_debugfs_ctrl_remove(ctrl);
+	mutex_unlock(&cpr3_controller_list_mutex);
+
+	if (ctrl->irq && !cpumask_empty(&ctrl->irq_affinity_mask))
+		unregister_hotcpu_notifier(&ctrl->cpu_hotplug_notifier);
+
+	if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) {
+		rc = cpr3_ctrl_clear_cpr4_config(ctrl);
+		if (rc)
+			cpr3_err(ctrl, "failed to clear CPR4 configuration,rc=%d\n",
+				rc);
+	}
+
+	cpr3_ctrl_loop_disable(ctrl);
+
+	cpr3_closed_loop_disable(ctrl);
+
+	if (ctrl->vdd_limit_regulator) {
+		regulator_disable(ctrl->vdd_limit_regulator);
+
+		if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3)
+			msm_spm_avs_disable_irq(0, MSM_SPM_AVS_IRQ_MAX);
+	}
+
+	for (i = 0; i < ctrl->thread_count; i++)
+		for (j = 0; j < ctrl->thread[i].vreg_count; j++)
+			regulator_unregister(ctrl->thread[i].vreg[j].rdev);
+
+	if (ctrl->panic_notifier.notifier_call)
+		atomic_notifier_chain_unregister(&panic_notifier_list,
+			&ctrl->panic_notifier);
+
+	return 0;
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/regulator/cpr3-regulator.h	2019-01-22 16:16:26.263271400 +0100
@@ -0,0 +1,1110 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __REGULATOR_CPR3_REGULATOR_H__
+#define __REGULATOR_CPR3_REGULATOR_H__
+
+#include <linux/clk.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/power/qcom/apm.h>
+#include <linux/regulator/driver.h>
+
+struct cpr3_controller;
+struct cpr3_thread;
+
+/**
+ * struct cpr3_fuse_param - defines one contiguous segment of a fuse parameter
+ *			    that is contained within a given row.
+ * @row:	Fuse row number
+ * @bit_start:	The first bit within the row of the fuse parameter segment
+ * @bit_end:	The last bit within the row of the fuse parameter segment
+ *
+ * Each fuse row is 64 bits in length.  bit_start and bit_end may take values
+ * from 0 to 63.  bit_start must be less than or equal to bit_end.
+ */
+struct cpr3_fuse_param {
+	unsigned int		row;
+	unsigned int		bit_start;
+	unsigned int		bit_end;
+};
+
+/* Each CPR3 sensor has 16 ring oscillators */
+#define CPR3_RO_COUNT		16
+
+/* The maximum number of sensors that can be present on a single CPR loop. */
+#define CPR3_MAX_SENSOR_COUNT	256
+
+/* This constant is used when allocating array printing buffers. */
+#define MAX_CHARS_PER_INT	10
+
+/**
+ * struct cpr4_sdelta - CPR4 controller specific data structure for the sdelta
+ *			adjustment table which is used to adjust the VDD supply
+ *			voltage automatically based upon the temperature and/or
+ *			the number of online CPU cores.
+ * @allow_core_count_adj: Core count adjustments are allowed.
+ * @allow_temp_adj:	Temperature based adjustments are allowed.
+ * @max_core_count:	Maximum number of cores considered for core count
+ *			adjustment logic.
+ * @temp_band_count:	Number of temperature bands considered for temperature
+ *			based adjustment logic.
+ * @cap_volt:		CAP in uV to apply to SDELTA margins with multiple
+ *			cpr3-regulators defined for single controller.
+ * @table:		SDELTA table with per-online-core and temperature based
+ *			adjustments of size (max_core_count * temp_band_count)
+ *			Outer: core count
+ *			Inner: temperature band
+ *			Each element has units of VDD supply steps. Positive
+ *			values correspond to a reduction in voltage and negative
+ *			value correspond to an increase (this follows the SDELTA
+ *			register semantics).
+ * @allow_boost:	Voltage boost allowed.
+ * @boost_num_cores:	The number of online cores at which the boost voltage
+ *			adjustments will be applied
+ * @boost_table:	SDELTA table with boost voltage adjustments of size
+ *			temp_band_count. Each element has units of VDD supply
+ *			steps. Positive values correspond to a reduction in
+ *			voltage and negative value correspond to an increase
+ *			(this follows the SDELTA register semantics).
+ */
+struct cpr4_sdelta {
+	bool	allow_core_count_adj;
+	bool	allow_temp_adj;
+	int	max_core_count;
+	int	temp_band_count;
+	int	cap_volt;
+	int	*table;
+	bool	allow_boost;
+	int	boost_num_cores;
+	int	*boost_table;
+};
+
+/**
+ * struct cpr3_corner - CPR3 virtual voltage corner data structure
+ * @floor_volt:		CPR closed-loop floor voltage in microvolts
+ * @ceiling_volt:	CPR closed-loop ceiling voltage in microvolts
+ * @open_loop_volt:	CPR open-loop voltage (i.e. initial voltage) in
+ *			microvolts
+ * @last_volt:		Last known settled CPR closed-loop voltage which is used
+ *			when switching to a new corner
+ * @abs_ceiling_volt:	The absolute CPR closed-loop ceiling voltage in
+ *			microvolts.  This is used to limit the ceiling_volt
+ *			value when it is increased as a result of aging
+ *			adjustment.
+ * @unaged_floor_volt:	The CPR closed-loop floor voltage in microvolts before
+ *			any aging adjustment is performed
+ * @unaged_ceiling_volt: The CPR closed-loop ceiling voltage in microvolts
+ *			before any aging adjustment is performed
+ * @unaged_open_loop_volt: The CPR open-loop voltage (i.e. initial voltage) in
+ *			microvolts before any aging adjusment is performed
+ * @system_volt:	The system-supply voltage in microvolts or corners or
+ *			levels
+ * @mem_acc_volt:	The mem-acc-supply voltage in corners
+ * @proc_freq:		Processor frequency in Hertz. For CPR rev. 3 and 4
+ *			conrollers, this field is only used by platform specific
+ *			CPR3 driver for interpolation. For CPRh-compliant
+ *			controllers, this frequency is also utilized by the
+ *			clock driver to determine the corner to CPU clock
+ *			frequency mappings.
+ * @cpr_fuse_corner:	Fused corner index associated with this virtual corner
+ *			(only used by platform specific CPR3 driver for
+ *			mapping purposes)
+ * @target_quot:	Array of target quotient values to use for each ring
+ *			oscillator (RO) for this corner.  A value of 0 should be
+ *			specified as the target quotient for each RO that is
+ *			unused by this corner.
+ * @ro_scale:		Array of CPR ring oscillator (RO) scaling factors.  The
+ *			scaling factor for each RO is defined from RO0 to RO15
+ *			with units of QUOT/V.  A value of 0 may be specified for
+ *			an RO that is unused.
+ * @ro_mask:		Bitmap where each of the 16 LSBs indicate if the
+ *			corresponding ROs should be masked for this corner
+ * @irq_en:		Bitmap of the CPR interrupts to enable for this corner
+ * @aging_derate:	The amount to derate the aging voltage adjustment
+ *			determined for the reference corner in units of uV/mV.
+ *			E.g. a value of 900 would imply that the adjustment for
+ *			this corner should be 90% (900/1000) of that for the
+ *			reference corner.
+ * @use_open_loop:	Boolean indicating that open-loop (i.e CPR disabled) as
+ *			opposed to closed-loop operation must be used for this
+ *			corner on CPRh controllers.
+ * @ldo_mode_allowed:	Boolean which indicates if LDO mode is allowed for this
+ *			corner. This field is applicable for CPR4 controllers
+ *			that manage LDO300 supply regulator.
+ * @sdelta:		The CPR4 controller specific data for this corner. This
+ *			field is applicable for CPR4 controllers.
+ *
+ * The value of last_volt is initialized inside of the cpr3_regulator_register()
+ * call with the open_loop_volt value.  It can later be updated to the settled
+ * VDD supply voltage.  The values for unaged_floor_volt, unaged_ceiling_volt,
+ * and unaged_open_loop_volt are initialized inside of cpr3_regulator_register()
+ * if ctrl->aging_required == true.  These three values must be pre-initialized
+ * if cpr3_regulator_register() is called with ctrl->aging_required == false and
+ * ctrl->aging_succeeded == true.
+ *
+ * The values of ro_mask and irq_en are initialized inside of the
+ * cpr3_regulator_register() call.
+ */
+struct cpr3_corner {
+	int			floor_volt;
+	int			ceiling_volt;
+	int			open_loop_volt;
+	int			last_volt;
+	int			abs_ceiling_volt;
+	int			unaged_floor_volt;
+	int			unaged_ceiling_volt;
+	int			unaged_open_loop_volt;
+	int			system_volt;
+	int			mem_acc_volt;
+	u32			proc_freq;
+	int			cpr_fuse_corner;
+	u32			target_quot[CPR3_RO_COUNT];
+	u32			ro_scale[CPR3_RO_COUNT];
+	u32			ro_mask;
+	u32			irq_en;
+	int			aging_derate;
+	bool			use_open_loop;
+	bool			ldo_mode_allowed;
+	struct cpr4_sdelta	*sdelta;
+};
+
+/**
+ * struct cprh_corner_band - CPRh controller specific data structure which
+ *			encapsulates the range of corners and the SDELTA
+ *			adjustment table to be applied to the corners within
+ *			the min and max bounds of the corner band.
+ * @corner:		Corner number which defines the corner band boundary
+ * @sdelta:		The SDELTA adjustment table which contains core-count
+ *			and temp based margin adjustments that are applicable
+ *			to the corner band.
+ */
+struct cprh_corner_band {
+	int			corner;
+	struct cpr4_sdelta	*sdelta;
+};
+
+/**
+ * enum cpr3_ldo_type - Constants which define the LDO supply regulator
+ *	types used to manage the subsystem component rail voltage.
+ * %CPR3_LDO_KRYO:	Kryo LDO regulator used to sub-regulate the HMSS
+ *			per-cluster voltage.
+ * %CPR3_LDO300:	LDO regulator used to sub-regulate the GFX voltage.
+ */
+enum cpr3_ldo_type {
+	CPR3_LDO_KRYO	= 0,
+	CPR3_LDO300	= 1,
+};
+
+/**
+ * struct cpr3_regulator - CPR3 logical regulator instance associated with a
+ *			given CPR3 hardware thread
+ * @of_node:		Device node associated with the device tree child node
+ *			of this CPR3 regulator
+ * @thread:		Pointer to the CPR3 thread which manages this CPR3
+ *			regulator
+ * @name:		Unique name for this CPR3 regulator which is filled
+ *			using the device tree regulator-name property
+ * @rdesc:		Regulator description for this CPR3 regulator
+ * @rdev:		Regulator device pointer for the regulator registered
+ *			for this CPR3 regulator
+ * @mem_acc_regulator:	Pointer to the optional mem-acc supply regulator used
+ *			to manage memory circuitry settings based upon CPR3
+ *			regulator output voltage.
+ * @ldo_regulator:	Pointer to the LDO supply regulator used to manage
+ *			per-cluster LDO voltage and bypass state
+ * @ldo_regulator_bypass: Cached copy of the LDO regulator bypass state
+ * @ldo_ret_regulator:	Pointer to the LDO retention supply regulator used to
+ *			manage LDO retention bypass state
+ * @corner:		Array of all corners supported by this CPR3 regulator
+ * @corner_count:	The number of elements in the corner array
+ * @corner_band:	Array of all corner bands supported by CPRh compatible
+ *			controllers
+ * @corner_band_count:	The number of elements in the corner band array
+ * @platform_fuses:	Pointer to platform specific CPR fuse data (only used by
+ *			platform specific CPR3 driver)
+ * @speed_bin_fuse:	Value read from the speed bin fuse parameter
+ * @speed_bins_supported: The number of speed bins supported by the device tree
+ *			configuration for this CPR3 regulator
+ * @cpr_rev_fuse:	Value read from the CPR fusing revision fuse parameter
+ * @fuse_combo:		Platform specific enum value identifying the specific
+ *			combination of fuse values found on a given chip
+ * @fuse_combos_supported: The number of fuse combinations supported by the
+ *			device tree configuration for this CPR3 regulator
+ * @fuse_corner_count:	Number of corners defined by fuse parameters
+ * @fuse_corner_map:	Array of length fuse_corner_count which specifies the
+ *			highest corner associated with each fuse corner.  Note
+ *			that each element must correspond to a valid corner
+ *			and that element values must be strictly increasing.
+ *			Also, it is acceptable for the lowest fuse corner to map
+ *			to a corner other than the lowest.  Likewise, it is
+ *			acceptable for the highest fuse corner to map to a
+ *			corner other than the highest.
+ * @fuse_combo_corner_sum: The sum of the corner counts across all fuse combos
+ * @fuse_combo_offset:	The device tree property array offset for the selected
+ *			fuse combo
+ * @speed_bin_corner_sum: The sum of the corner counts across all speed bins
+ *			This may be specified as 0 if per speed bin parsing
+ *			support is not required.
+ * @speed_bin_offset:	The device tree property array offset for the selected
+ *			speed bin
+ * @fuse_combo_corner_band_sum: The sum of the corner band counts across all
+ *			fuse combos
+ * @fuse_combo_corner_band_offset: The device tree property array offset for
+ *			the corner band count corresponding to the selected
+ *			fuse combo
+ * @speed_bin_corner_band_sum: The sum of the corner band counts across all
+ *			speed bins. This may be specified as 0 if per speed bin
+ *			parsing support is not required
+ * @speed_bin_corner_band_offset: The device tree property array offset for the
+ *			corner band count corresponding to the selected speed
+ *			bin
+ * @pd_bypass_mask:	Bit mask of power domains associated with this CPR3
+ *			regulator
+ * @dynamic_floor_corner: Index identifying the voltage corner for the CPR3
+ *			regulator whose last_volt value should be used as the
+ *			global CPR floor voltage if all of the power domains
+ *			associated with this CPR3 regulator are bypassed
+ * @uses_dynamic_floor: Boolean flag indicating that dynamic_floor_corner should
+ *			be utilized for the CPR3 regulator
+ * @current_corner:	Index identifying the currently selected voltage corner
+ *			for the CPR3 regulator or less than 0 if no corner has
+ *			been requested
+ * @last_closed_loop_corner: Index identifying the last voltage corner for the
+ *			CPR3 regulator which was configured when operating in
+ *			CPR closed-loop mode or less than 0 if no corner has
+ *			been requested.  CPR registers are only written to when
+ *			using closed-loop mode.
+ * @aggregated:		Boolean flag indicating that this CPR3 regulator
+ *			participated in the last aggregation event
+ * @debug_corner:	Index identifying voltage corner used for displaying
+ *			corner configuration values in debugfs
+ * @ldo_type:		LDO regulator type.
+ * @ldo_min_headroom_volt: Minimum voltage difference in microvolts required
+ *			between the VDD supply voltage and the LDO output in
+ *			order for the LDO operate
+ * @ldo_max_headroom_volt: Maximum voltage difference in microvolts between
+ *			the input and output of the active LDO hardware to
+ *			maintain optimum operability.
+ * @ldo_adjust_volt:	Voltage in microvolts used to offset margin assigned
+ *			to IR drop between PMIC and CPU
+ * @ldo_ret_volt:	The lowest supported CPU retention voltage in
+ *			microvolts. This voltage may vary part-to-part based
+ *			upon the value of hardware fuses.
+ * @ldo_max_volt:	The maximum physically supported LDO voltage in
+ *			microvolts
+ * @ldo_mode_allowed:	Boolean which indicates if LDO mode is allowed for this
+ *			CPR3 regulator
+ * @vreg_enabled:	Boolean defining the enable state of the CPR3
+ *			regulator's regulator within the regulator framework.
+ * @aging_allowed:	Boolean defining if CPR aging adjustments are allowed
+ *			for this CPR3 regulator given the fuse combo of the
+ *			device
+ * @aging_allow_open_loop_adj: Boolean defining if the open-loop voltage of each
+ *			corner of this regulator should be adjusted as a result
+ *			of an aging measurement.  This flag can be set to false
+ *			when the open-loop voltage adjustments have been
+ *			specified such that they include the maximum possible
+ *			aging adjustment.  This flag is only used if
+ *			aging_allowed == true.
+ * @aging_corner:	The corner that should be configured for this regulator
+ *			when an aging measurement is performed.
+ * @aging_max_adjust_volt: The maximum aging voltage margin in microvolts that
+ *			may be added to the target quotients of this regulator.
+ *			A value of 0 may be specified if this regulator does not
+ *			require any aging adjustment.
+ * @allow_core_count_adj: Core count adjustments are allowed for this regulator.
+ * @allow_temp_adj:	Temperature based adjustments are allowed for this
+ *			regulator.
+ * @max_core_count:	Maximum number of cores considered for core count
+ *			adjustment logic.
+ * @allow_boost:	Voltage boost allowed for this regulator.
+ *
+ * This structure contains both configuration and runtime state data.  The
+ * elements current_corner, last_closed_loop_corner, aggregated, debug_corner,
+ * ldo_mode_allowed, and vreg_enabled are state variables.
+ */
+struct cpr3_regulator {
+	struct device_node	*of_node;
+	struct cpr3_thread	*thread;
+	const char		*name;
+	struct regulator_desc	rdesc;
+	struct regulator_dev	*rdev;
+	struct regulator	*mem_acc_regulator;
+	struct regulator	*ldo_regulator;
+	bool			ldo_regulator_bypass;
+	struct regulator	*ldo_ret_regulator;
+	struct cpr3_corner	*corner;
+	int			corner_count;
+	struct cprh_corner_band *corner_band;
+	u32			corner_band_count;
+
+	void			*platform_fuses;
+	int			speed_bin_fuse;
+	int			speed_bins_supported;
+	int			cpr_rev_fuse;
+	int			fuse_combo;
+	int			fuse_combos_supported;
+	int			fuse_corner_count;
+	int			*fuse_corner_map;
+	int			fuse_combo_corner_sum;
+	int			fuse_combo_offset;
+	int			speed_bin_corner_sum;
+	int			speed_bin_offset;
+	int			fuse_combo_corner_band_sum;
+	int			fuse_combo_corner_band_offset;
+	int			speed_bin_corner_band_sum;
+	int			speed_bin_corner_band_offset;
+	u32			pd_bypass_mask;
+	int			dynamic_floor_corner;
+	bool			uses_dynamic_floor;
+
+	int			current_corner;
+	int			last_closed_loop_corner;
+	bool			aggregated;
+	int			debug_corner;
+	enum cpr3_ldo_type	ldo_type;
+	int			ldo_min_headroom_volt;
+	int			ldo_max_headroom_volt;
+	int			ldo_adjust_volt;
+	int			ldo_ret_volt;
+	int			ldo_max_volt;
+	bool			ldo_mode_allowed;
+	bool			vreg_enabled;
+
+	bool			aging_allowed;
+	bool			aging_allow_open_loop_adj;
+	int			aging_corner;
+	int			aging_max_adjust_volt;
+
+	bool			allow_core_count_adj;
+	bool			allow_temp_adj;
+	int			max_core_count;
+	bool			allow_boost;
+};
+
+/**
+ * struct cpr3_thread - CPR3 hardware thread data structure
+ * @thread_id:		Hardware thread ID
+ * @of_node:		Device node associated with the device tree child node
+ *			of this CPR3 thread
+ * @ctrl:		Pointer to the CPR3 controller which manages this thread
+ * @vreg:		Array of CPR3 regulators handled by the CPR3 thread
+ * @vreg_count:		Number of elements in the vreg array
+ * @aggr_corner:	CPR corner containing the in process aggregated voltage
+ *			and target quotient configurations which will be applied
+ * @last_closed_loop_aggr_corner: CPR corner containing the most recent
+ *			configurations which were written into hardware
+ *			registers when operating in closed loop mode (i.e. with
+ *			CPR enabled)
+ * @consecutive_up:	The number of consecutive CPR step up events needed to
+ *			to trigger an up interrupt
+ * @consecutive_down:	The number of consecutive CPR step down events needed to
+ *			to trigger a down interrupt
+ * @up_threshold:	The number CPR error steps required to generate an up
+ *			event
+ * @down_threshold:	The number CPR error steps required to generate a down
+ *			event
+ *
+ * This structure contains both configuration and runtime state data.  The
+ * elements aggr_corner and last_closed_loop_aggr_corner are state variables.
+ */
+struct cpr3_thread {
+	u32			thread_id;
+	struct device_node	*of_node;
+	struct cpr3_controller	*ctrl;
+	struct cpr3_regulator	*vreg;
+	int			vreg_count;
+	struct cpr3_corner	aggr_corner;
+	struct cpr3_corner	last_closed_loop_aggr_corner;
+
+	u32			consecutive_up;
+	u32			consecutive_down;
+	u32			up_threshold;
+	u32			down_threshold;
+};
+
+/* Per CPR controller data */
+/**
+ * enum cpr3_mem_acc_corners - Constants which define the number of mem-acc
+ *		regulator corners available in the mem-acc corner map array.
+ * %CPR3_MEM_ACC_LOW_CORNER:	Index in mem-acc corner map array mapping to the
+ *				mem-acc regulator corner
+ *				to be used for low voltage vdd supply
+ * %CPR3_MEM_ACC_HIGH_CORNER:	Index in mem-acc corner map array mapping to the
+ *				mem-acc regulator corner to be used for high
+ *				voltage vdd supply
+ * %CPR3_MEM_ACC_CORNERS:	Number of elements in the mem-acc corner map
+ *				array
+ */
+enum cpr3_mem_acc_corners {
+	CPR3_MEM_ACC_LOW_CORNER		= 0,
+	CPR3_MEM_ACC_HIGH_CORNER	= 1,
+	CPR3_MEM_ACC_CORNERS		= 2,
+};
+
+/**
+ * enum cpr3_count_mode - CPR3 controller count mode which defines the
+ *		method that CPR sensor data is acquired
+ * %CPR3_COUNT_MODE_ALL_AT_ONCE_MIN:	Capture all CPR sensor readings
+ *					simultaneously and report the minimum
+ *					value seen in successive measurements
+ * %CPR3_COUNT_MODE_ALL_AT_ONCE_MAX:	Capture all CPR sensor readings
+ *					simultaneously and report the maximum
+ *					value seen in successive measurements
+ * %CPR3_COUNT_MODE_STAGGERED:		Read one sensor at a time in a
+ *					sequential fashion
+ * %CPR3_COUNT_MODE_ALL_AT_ONCE_AGE:	Capture all CPR aging sensor readings
+ *					simultaneously.
+ */
+enum cpr3_count_mode {
+	CPR3_COUNT_MODE_ALL_AT_ONCE_MIN	= 0,
+	CPR3_COUNT_MODE_ALL_AT_ONCE_MAX	= 1,
+	CPR3_COUNT_MODE_STAGGERED	= 2,
+	CPR3_COUNT_MODE_ALL_AT_ONCE_AGE	= 3,
+};
+
+/**
+ * enum cpr_controller_type - supported CPR controller hardware types
+ * %CPR_CTRL_TYPE_CPR3:	HW has CPR3 controller
+ * %CPR_CTRL_TYPE_CPR4:	HW has CPR4 controller
+ * %CPR_CTRL_TYPE_CPRH:	HW has CPRh controller
+ */
+enum cpr_controller_type {
+	CPR_CTRL_TYPE_CPR3,
+	CPR_CTRL_TYPE_CPR4,
+	CPR_CTRL_TYPE_CPRH,
+};
+
+/**
+ * struct cpr3_aging_sensor_info - CPR3 aging sensor information
+ * @sensor_id		The index of the CPR3 sensor to be used in the aging
+ *			measurement.
+ * @ro_scale		The CPR ring oscillator (RO) scaling factor for the
+ *			aging sensor with units of QUOT/V.
+ * @init_quot_diff:	The fused quotient difference between aged and un-aged
+ *			paths that was measured at manufacturing time.
+ * @measured_quot_diff: The quotient difference measured at runtime.
+ * @bypass_mask:	Bit mask of the CPR sensors that must be bypassed during
+ *			the aging measurement for this sensor
+ *
+ * This structure contains both configuration and runtime state data.  The
+ * element measured_quot_diff is a state variable.
+ */
+struct cpr3_aging_sensor_info {
+	u32			sensor_id;
+	u32			ro_scale;
+	int			init_quot_diff;
+	int			measured_quot_diff;
+	u32			bypass_mask[CPR3_MAX_SENSOR_COUNT / 32];
+};
+
+/**
+ * struct cpr3_reg_info - Register information data structure
+ * @name:	Register name
+ * @addr:	Register physical address
+ * @value:	Register content
+ * @virt_addr:	Register virtual address
+ *
+ * This data structure is used to dump some critical register contents
+ * when the device crashes due to a kernel panic.
+ */
+struct cpr3_reg_info {
+	const char	*name;
+	u32		addr;
+	u32		value;
+	void __iomem	*virt_addr;
+};
+
+/**
+ * struct cpr3_panic_regs_info - Data structure to dump critical register
+ *		contents.
+ * @reg_count:		Number of elements in the regs array
+ * @regs:		Array of critical registers information
+ *
+ * This data structure is used to dump critical register contents when
+ * the device crashes due to a kernel panic.
+ */
+struct cpr3_panic_regs_info {
+	int			reg_count;
+	struct cpr3_reg_info	*regs;
+};
+
+/**
+ * struct cpr3_controller - CPR3 controller data structure
+ * @dev:		Device pointer for the CPR3 controller device
+ * @name:		Unique name for the CPR3 controller
+ * @ctrl_id:		Controller ID corresponding to the VDD supply number
+ *			that this CPR3 controller manages.
+ * @cpr_ctrl_base:	Virtual address of the CPR3 controller base register
+ * @fuse_base:		Virtual address of fuse row 0
+ * @aging_possible_reg:	Virtual address of an optional platform-specific
+ *			register that must be ready to determine if it is
+ *			possible to perform an aging measurement.
+ * @list:		list head used in a global cpr3-regulator list so that
+ *			cpr3-regulator structs can be found easily in RAM dumps
+ * @thread:		Array of CPR3 threads managed by the CPR3 controller
+ * @thread_count:	Number of elements in the thread array
+ * @sensor_owner:	Array of thread IDs indicating which thread owns a given
+ *			CPR sensor
+ * @sensor_count:	The number of CPR sensors found on the CPR loop managed
+ *			by this CPR controller.  Must be equal to the number of
+ *			elements in the sensor_owner array
+ * @soc_revision:	Revision number of the SoC.  This may be unused by
+ *			platforms that do not have different behavior for
+ *			different SoC revisions.
+ * @lock:		Mutex lock used to ensure mutual exclusion between
+ *			all of the threads associated with the controller
+ * @vdd_regulator:	Pointer to the VDD supply regulator which this CPR3
+ *			controller manages
+ * @system_regulator:	Pointer to the optional system-supply regulator upon
+ *			which the VDD supply regulator depends.
+ * @mem_acc_regulator:	Pointer to the optional mem-acc supply regulator used
+ *			to manage memory circuitry settings based upon the
+ *			VDD supply output voltage.
+ * @vdd_limit_regulator: Pointer to the VDD supply limit regulator which is used
+ *			for hardware closed-loop in order specify ceiling and
+ *			floor voltage limits (platform specific)
+ * @system_supply_max_volt: Voltage in microvolts which corresponds to the
+ *			absolute ceiling voltage of the system-supply
+ * @mem_acc_threshold_volt: mem-acc threshold voltage in microvolts
+ * @mem_acc_corner_map: mem-acc regulator corners mapping to low and high
+ *			voltage mem-acc settings for the memories powered by
+ *			this CPR3 controller and its associated CPR3 regulators
+ * @mem_acc_crossover_volt: Voltage in microvolts corresponding to the voltage
+ *			that the VDD supply must be set to while a MEM ACC
+ *			switch is in progress. This element must be initialized
+ *			for CPRh controllers when a MEM ACC threshold voltage is
+ *			defined.
+ * @core_clk:		Pointer to the CPR3 controller core clock
+ * @iface_clk:		Pointer to the CPR3 interface clock (platform specific)
+ * @bus_clk:		Pointer to the CPR3 bus clock (platform specific)
+ * @irq:		CPR interrupt number
+ * @irq_affinity_mask:	The cpumask for the CPUs which the CPR interrupt should
+ *			have affinity for
+ * @cpu_hotplug_notifier: CPU hotplug notifier used to reset IRQ affinity when a
+ *			CPU is brought back online
+ * @ceiling_irq:	Interrupt number for the interrupt that is triggered
+ *			when hardware closed-loop attempts to exceed the ceiling
+ *			voltage
+ * @apm:		Handle to the array power mux (APM)
+ * @apm_threshold_volt:	Voltage in microvolts which defines the threshold
+ *			voltage to determine the APM supply selection for
+ *			each corner
+ * @apm_crossover_volt:	Voltage in microvolts corresponding to the voltage that
+ *			the VDD supply must be set to while an APM switch is in
+ *			progress. This element must be initialized for CPRh
+ *			controllers when an APM threshold voltage is defined
+ * @apm_adj_volt:	Minimum difference between APM threshold voltage and
+ *			open-loop voltage which allows the APM threshold voltage
+ *			to be used as a ceiling
+ * @apm_high_supply:	APM supply to configure if VDD voltage is greater than
+ *			or equal to the APM threshold voltage
+ * @apm_low_supply:	APM supply to configure if the VDD voltage is less than
+ *			the APM threshold voltage
+ * @base_volt:		Minimum voltage in microvolts supported by the VDD
+ *			supply managed by this CPR controller
+ * @corner_switch_delay_time: The delay time in nanoseconds used by the CPR
+ *			controller to wait for voltage settling before
+ *			acknowledging the OSM block after corner changes
+ * @cpr_clock_rate:	CPR reference clock frequency in Hz.
+ * @sensor_time:	The time in nanoseconds that each sensor takes to
+ *			perform a measurement.
+ * @loop_time:		The time in nanoseconds between consecutive CPR
+ *			measurements.
+ * @up_down_delay_time: The time to delay in nanoseconds between consecutive CPR
+ *			measurements when the last measurement recommended
+ *			increasing or decreasing the vdd-supply voltage.
+ *			(platform specific)
+ * @idle_clocks:	Number of CPR reference clock ticks that the CPR
+ *			controller waits in transitional states.
+ * @step_quot_init_min:	The default minimum CPR step quotient value.  The step
+ *			quotient is the number of additional ring oscillator
+ *			ticks observed when increasing one step in vdd-supply
+ *			output voltage.
+ * @step_quot_init_max:	The default maximum CPR step quotient value.
+ * @step_volt:		Step size in microvolts between available set points
+ *			of the VDD supply
+ * @down_error_step_limit: CPR4 hardware closed-loop down error step limit which
+ *			defines the maximum number of VDD supply regulator steps
+ *			that the voltage may be reduced as the result of a
+ *			single CPR measurement.
+ * @up_error_step_limit: CPR4 hardware closed-loop up error step limit which
+ *			defines the maximum number of VDD supply regulator steps
+ *			that the voltage may be increased as the result of a
+ *			single CPR measurement.
+ * @count_mode:		CPR controller count mode
+ * @count_repeat:	Number of times to perform consecutive sensor
+ *			measurements when using all-at-once count modes.
+ * @proc_clock_throttle: Defines the processor clock frequency throttling
+ *			register value to use.  This can be used to reduce the
+ *			clock frequency when a power domain exits a low power
+ *			mode until CPR settles at a new voltage.
+ *			(platform specific)
+ * @cpr_allowed_hw:	Boolean which indicates if closed-loop CPR operation is
+ *			permitted for a given chip based upon hardware fuse
+ *			values
+ * @cpr_allowed_sw:	Boolean which indicates if closed-loop CPR operation is
+ *			permitted based upon software policies
+ * @supports_hw_closed_loop: Boolean which indicates if this CPR3/4 controller
+ *			physically supports hardware closed-loop CPR operation
+ * @use_hw_closed_loop:	Boolean which indicates that this controller will be
+ *			using hardware closed-loop operation in place of
+ *			software closed-loop operation.
+ * @ctrl_type:		CPR controller type
+ * @saw_use_unit_mV:	Boolean which indicates the unit used in SAW PVC
+ *			interface is mV.
+ * @aggr_corner:	CPR corner containing the most recently aggregated
+ *			voltage configurations which are being used currently
+ * @cpr_enabled:	Boolean which indicates that the CPR controller is
+ *			enabled and operating in closed-loop mode.  CPR clocks
+ *			have been prepared and enabled whenever this flag is
+ *			true.
+ * @last_corner_was_closed_loop: Boolean indicating if the last known corners
+ *			were updated during closed loop operation.
+ * @cpr_suspended:	Boolean which indicates that CPR has been temporarily
+ *			disabled while enterring system suspend.
+ * @debugfs:		Pointer to the debugfs directory of this CPR3 controller
+ * @aging_ref_volt:	Reference voltage in microvolts to configure when
+ *			performing CPR aging measurements.
+ * @aging_vdd_mode:	vdd-supply regulator mode to configure before performing
+ *			a CPR aging measurement.  It should be one of
+ *			REGULATOR_MODE_*.
+ * @aging_complete_vdd_mode: vdd-supply regulator mode to configure after
+ *			performing a CPR aging measurement.  It should be one of
+ *			REGULATOR_MODE_*.
+ * @aging_ref_adjust_volt: The reference aging voltage margin in microvolts that
+ *			should be added to the target quotients of the
+ *			regulators managed by this controller after derating.
+ * @aging_required:	Flag which indicates that a CPR aging measurement still
+ *			needs to be performed for this CPR3 controller.
+ * @aging_succeeded:	Flag which indicates that a CPR aging measurement has
+ *			completed successfully.
+ * @aging_failed:	Flag which indicates that a CPR aging measurement has
+ *			failed to complete successfully.
+ * @aging_sensor:	Array of CPR3 aging sensors which are used to perform
+ *			aging measurements at a runtime.
+ * @aging_sensor_count:	Number of elements in the aging_sensor array
+ * @aging_possible_mask: Optional bitmask used to mask off the
+ *			aging_possible_reg register.
+ * @aging_possible_val:	Optional value that the masked aging_possible_reg
+ *			register must have in order for a CPR aging measurement
+ *			to be possible.
+ * @aging_gcnt_scaling_factor: The scaling factor used to derive the gate count
+ *			used for aging measurements. This value is divided by
+ *			1000 when used as shown in the below equation:
+ *			      Aging_GCNT = GCNT_REF * scaling_factor / 1000.
+ *			For example, a value of 1500 specifies that the gate
+ *			count (GCNT) used for aging measurement should be 1.5
+ *			times of reference gate count (GCNT_REF).
+ * @step_quot_fixed:	Fixed step quotient value used for target quotient
+ *			adjustment if use_dynamic_step_quot is not set.
+ *			This parameter is only relevant for CPR4 controllers
+ *			when using the per-online-core or per-temperature
+ *			adjustments.
+ * @initial_temp_band:	Temperature band used for calculation of base-line
+ *			target quotients (fused).
+ * @use_dynamic_step_quot: Boolean value which indicates that margin adjustment
+ *			of target quotient will be based on the step quotient
+ *			calculated dynamically in hardware for each RO.
+ * @allow_core_count_adj: Core count adjustments are allowed for this controller
+ * @allow_temp_adj:	Temperature based adjustments are allowed for
+ *			this controller
+ * @allow_boost:	Voltage boost allowed for this controller.
+ * @temp_band_count:	Number of temperature bands used for temperature based
+ *			adjustment logic
+ * @temp_points:	Array of temperature points in decidegrees Celsius used
+ *			to specify the ranges for selected temperature bands.
+ *			The array must have (temp_band_count - 1) elements
+ *			allocated.
+ * @temp_sensor_id_start: Start ID of temperature sensors used for temperature
+ *			based adjustments.
+ * @temp_sensor_id_end:	End ID of temperature sensors used for temperature
+ *			based adjustments.
+ * @voltage_settling_time: The time in nanoseconds that it takes for the
+ *			VDD supply voltage to settle after being increased or
+ *			decreased by step_volt microvolts which is used when
+ *			SDELTA voltage margin adjustments are applied.
+ * @panic_regs_info:	Array of panic registers information which provides the
+ *			list of registers to dump when the device crashes.
+ * @panic_notifier:	Notifier block registered to global panic notifier list.
+ * @support_ldo300_vreg: Boolean value which indicates that this CPR controller
+ *			manages an underlying LDO regulator of type LDO300.
+ * @reset_step_quot_loop_en: Boolean value which indicates that this CPR
+ *			controller should be configured to reset step_quot on
+ *			each loop_en = 0 transition. This configuration allows
+ *			the CPR controller to first use the default step_quot
+ *			and then later switch to the run-time calibrated
+ *			step_quot.
+ *
+ * This structure contains both configuration and runtime state data.  The
+ * elements cpr_allowed_sw, use_hw_closed_loop, aggr_corner, cpr_enabled,
+ * last_corner_was_closed_loop, cpr_suspended, aging_ref_adjust_volt,
+ * aging_required, aging_succeeded, and aging_failed are state variables.
+ *
+ * The apm* elements do not need to be initialized if the VDD supply managed by
+ * the CPR3 controller does not utilize an APM.
+ *
+ * The elements step_quot_fixed, initial_temp_band, allow_core_count_adj,
+ * allow_temp_adj and temp* need to be initialized for CPR4 controllers which
+ * are using per-online-core or per-temperature adjustments.
+ */
+struct cpr3_controller {
+	struct device		*dev;
+	const char		*name;
+	int			ctrl_id;
+	void __iomem		*cpr_ctrl_base;
+	void __iomem		*fuse_base;
+	void __iomem		*aging_possible_reg;
+	struct list_head	list;
+	struct cpr3_thread	*thread;
+	int			thread_count;
+	u8			*sensor_owner;
+	int			sensor_count;
+	int			soc_revision;
+	struct mutex		lock;
+	struct regulator	*vdd_regulator;
+	struct regulator	*system_regulator;
+	struct regulator	*mem_acc_regulator;
+	struct regulator	*vdd_limit_regulator;
+	int			system_supply_max_volt;
+	int			mem_acc_threshold_volt;
+	int			mem_acc_corner_map[CPR3_MEM_ACC_CORNERS];
+	int			mem_acc_crossover_volt;
+	struct clk		*core_clk;
+	struct clk		*iface_clk;
+	struct clk		*bus_clk;
+	int			irq;
+	struct cpumask		irq_affinity_mask;
+	struct notifier_block	cpu_hotplug_notifier;
+	int			ceiling_irq;
+	struct msm_apm_ctrl_dev *apm;
+	int			apm_threshold_volt;
+	int			apm_crossover_volt;
+	int			apm_adj_volt;
+	enum msm_apm_supply	apm_high_supply;
+	enum msm_apm_supply	apm_low_supply;
+	int			base_volt;
+	u32			corner_switch_delay_time;
+	u32			cpr_clock_rate;
+	u32			sensor_time;
+	u32			loop_time;
+	u32			up_down_delay_time;
+	u32			idle_clocks;
+	u32			step_quot_init_min;
+	u32			step_quot_init_max;
+	int			step_volt;
+	u32			down_error_step_limit;
+	u32			up_error_step_limit;
+	enum cpr3_count_mode	count_mode;
+	u32			count_repeat;
+	u32			proc_clock_throttle;
+	bool			cpr_allowed_hw;
+	bool			cpr_allowed_sw;
+	bool			supports_hw_closed_loop;
+	bool			use_hw_closed_loop;
+	enum cpr_controller_type ctrl_type;
+	bool			saw_use_unit_mV;
+	struct cpr3_corner	aggr_corner;
+	bool			cpr_enabled;
+	bool			last_corner_was_closed_loop;
+	bool			cpr_suspended;
+	struct dentry		*debugfs;
+
+	int			aging_ref_volt;
+	unsigned int		aging_vdd_mode;
+	unsigned int		aging_complete_vdd_mode;
+	int			aging_ref_adjust_volt;
+	bool			aging_required;
+	bool			aging_succeeded;
+	bool			aging_failed;
+	struct cpr3_aging_sensor_info *aging_sensor;
+	int			aging_sensor_count;
+	u32			aging_possible_mask;
+	u32			aging_possible_val;
+	u32			aging_gcnt_scaling_factor;
+
+	u32			step_quot_fixed;
+	u32			initial_temp_band;
+	bool			use_dynamic_step_quot;
+	bool			allow_core_count_adj;
+	bool			allow_temp_adj;
+	bool			allow_boost;
+	int			temp_band_count;
+	int			*temp_points;
+	u32			temp_sensor_id_start;
+	u32			temp_sensor_id_end;
+	u32			voltage_settling_time;
+	struct cpr3_panic_regs_info *panic_regs_info;
+	struct notifier_block	panic_notifier;
+	bool			support_ldo300_vreg;
+	bool			reset_step_quot_loop_en;
+};
+
+/* Used for rounding voltages to the closest physically available set point. */
+#define CPR3_ROUND(n, d) (DIV_ROUND_UP(n, d) * (d))
+
+#define cpr3_err(cpr3_thread, message, ...) \
+	pr_err("%s: " message, (cpr3_thread)->name, ##__VA_ARGS__)
+#define cpr3_info(cpr3_thread, message, ...) \
+	pr_info("%s: " message, (cpr3_thread)->name, ##__VA_ARGS__)
+#define cpr3_debug(cpr3_thread, message, ...) \
+	pr_debug("%s: " message, (cpr3_thread)->name, ##__VA_ARGS__)
+
+/*
+ * Offset subtracted from voltage corner values passed in from the regulator
+ * framework in order to get internal voltage corner values.  This is needed
+ * since the regulator framework treats 0 as an error value at regulator
+ * registration time.
+ */
+#define CPR3_CORNER_OFFSET	1
+
+#ifdef CONFIG_REGULATOR_CPR3
+
+int cpr3_regulator_register(struct platform_device *pdev,
+			struct cpr3_controller *ctrl);
+int cpr3_regulator_unregister(struct cpr3_controller *ctrl);
+int cpr3_regulator_suspend(struct cpr3_controller *ctrl);
+int cpr3_regulator_resume(struct cpr3_controller *ctrl);
+
+int cpr3_allocate_threads(struct cpr3_controller *ctrl, u32 min_thread_id,
+			u32 max_thread_id);
+int cpr3_map_fuse_base(struct cpr3_controller *ctrl,
+			struct platform_device *pdev);
+int cpr3_read_fuse_param(void __iomem *fuse_base_addr,
+			const struct cpr3_fuse_param *param, u64 *param_value);
+int cpr3_convert_open_loop_voltage_fuse(int ref_volt, int step_volt, u32 fuse,
+			int fuse_len);
+u64 cpr3_interpolate(u64 x1, u64 y1, u64 x2, u64 y2, u64 x);
+int cpr3_parse_array_property(struct cpr3_regulator *vreg,
+			const char *prop_name, int tuple_size, u32 *out);
+int cpr3_parse_corner_array_property(struct cpr3_regulator *vreg,
+			const char *prop_name, int tuple_size, u32 *out);
+int cpr3_parse_corner_band_array_property(struct cpr3_regulator *vreg,
+			const char *prop_name, int tuple_size, u32 *out);
+int cpr3_parse_common_corner_data(struct cpr3_regulator *vreg);
+int cpr3_parse_thread_u32(struct cpr3_thread *thread, const char *propname,
+			u32 *out_value, u32 value_min, u32 value_max);
+int cpr3_parse_ctrl_u32(struct cpr3_controller *ctrl, const char *propname,
+			u32 *out_value, u32 value_min, u32 value_max);
+int cpr3_parse_common_thread_data(struct cpr3_thread *thread);
+int cpr3_parse_common_ctrl_data(struct cpr3_controller *ctrl);
+int cpr3_limit_open_loop_voltages(struct cpr3_regulator *vreg);
+void cpr3_open_loop_voltage_as_ceiling(struct cpr3_regulator *vreg);
+int cpr3_limit_floor_voltages(struct cpr3_regulator *vreg);
+void cpr3_print_quots(struct cpr3_regulator *vreg);
+int cpr3_adjust_fused_open_loop_voltages(struct cpr3_regulator *vreg,
+			int *fuse_volt);
+int cpr3_adjust_open_loop_voltages(struct cpr3_regulator *vreg);
+int cpr3_quot_adjustment(int ro_scale, int volt_adjust);
+int cpr3_voltage_adjustment(int ro_scale, int quot_adjust);
+int cpr3_parse_closed_loop_voltage_adjustments(struct cpr3_regulator *vreg,
+			u64 *ro_sel, int *volt_adjust,
+			int *volt_adjust_fuse, int *ro_scale);
+int cpr4_parse_core_count_temp_voltage_adj(struct cpr3_regulator *vreg,
+			bool use_corner_band);
+int cpr3_apm_init(struct cpr3_controller *ctrl);
+int cpr3_mem_acc_init(struct cpr3_regulator *vreg);
+void cprh_adjust_voltages_for_apm(struct cpr3_regulator *vreg);
+void cprh_adjust_voltages_for_mem_acc(struct cpr3_regulator *vreg);
+int cpr3_adjust_target_quotients(struct cpr3_regulator *vreg,
+			int *fuse_volt_adjust);
+
+#else
+
+static inline int cpr3_regulator_register(struct platform_device *pdev,
+			struct cpr3_controller *ctrl)
+{
+	return -ENXIO;
+}
+
+static inline int cpr3_regulator_unregister(struct cpr3_controller *ctrl)
+{
+	return -ENXIO;
+}
+
+static inline int cpr3_regulator_suspend(struct cpr3_controller *ctrl)
+{
+	return -ENXIO;
+}
+
+static inline int cpr3_regulator_resume(struct cpr3_controller *ctrl)
+{
+	return -ENXIO;
+}
+
+static inline int cpr3_get_thread_name(struct cpr3_thread *thread,
+			struct device_node *thread_node)
+{
+	return -EPERM;
+}
+
+static inline int cpr3_allocate_threads(struct cpr3_controller *ctrl,
+			u32 min_thread_id, u32 max_thread_id)
+{
+	return -EPERM;
+}
+
+static inline int cpr3_map_fuse_base(struct cpr3_controller *ctrl,
+			struct platform_device *pdev)
+{
+	return -ENXIO;
+}
+
+static inline int cpr3_read_fuse_param(void __iomem *fuse_base_addr,
+			const struct cpr3_fuse_param *param, u64 *param_value)
+{
+	return -EPERM;
+}
+
+static inline int cpr3_convert_open_loop_voltage_fuse(int ref_volt,
+			int step_volt, u32 fuse, int fuse_len)
+{
+	return -EPERM;
+}
+
+static inline u64 cpr3_interpolate(u64 x1, u64 y1, u64 x2, u64 y2, u64 x)
+{
+	return 0;
+}
+
+static inline int cpr3_parse_array_property(struct cpr3_regulator *vreg,
+			const char *prop_name, int tuple_size, u32 *out)
+{
+	return -EPERM;
+}
+
+static inline int cpr3_parse_corner_array_property(struct cpr3_regulator *vreg,
+			const char *prop_name, int tuple_size, u32 *out)
+{
+	return -EPERM;
+}
+
+static inline int cpr3_parse_corner_band_array_property(
+			struct cpr3_regulator *vreg, const char *prop_name,
+			int tuple_size, u32 *out)
+{
+	return -EPERM;
+}
+
+static inline int cpr3_parse_common_corner_data(struct cpr3_regulator *vreg)
+{
+	return -EPERM;
+}
+
+static inline int cpr3_parse_thread_u32(struct cpr3_thread *thread,
+			const char *propname, u32 *out_value, u32 value_min,
+			u32 value_max)
+{
+	return -EPERM;
+}
+
+static inline int cpr3_parse_ctrl_u32(struct cpr3_controller *ctrl,
+			const char *propname, u32 *out_value, u32 value_min,
+			u32 value_max)
+{
+	return -EPERM;
+}
+
+static inline int cpr3_parse_common_thread_data(struct cpr3_thread *thread)
+{
+	return -EPERM;
+}
+
+static inline int cpr3_parse_common_ctrl_data(struct cpr3_controller *ctrl)
+{
+	return -EPERM;
+}
+
+static inline int cpr3_limit_open_loop_voltages(struct cpr3_regulator *vreg)
+{
+	return -EPERM;
+}
+
+static inline void cpr3_open_loop_voltage_as_ceiling(
+			struct cpr3_regulator *vreg)
+{
+}
+
+static inline int cpr3_limit_floor_voltages(struct cpr3_regulator *vreg)
+{
+	return -EPERM;
+}
+
+static inline void cpr3_print_quots(struct cpr3_regulator *vreg)
+{
+}
+
+static inline int cpr3_adjust_fused_open_loop_voltages(
+			struct cpr3_regulator *vreg, int *fuse_volt)
+{
+	return -EPERM;
+}
+
+static inline int cpr3_adjust_open_loop_voltages(struct cpr3_regulator *vreg)
+{
+	return -EPERM;
+}
+
+static inline int cpr3_quot_adjustment(int ro_scale, int volt_adjust)
+{
+	return 0;
+}
+
+static inline int cpr3_voltage_adjustment(int ro_scale, int quot_adjust)
+{
+	return 0;
+}
+
+static inline int cpr3_parse_closed_loop_voltage_adjustments(
+			struct cpr3_regulator *vreg, u64 *ro_sel,
+			int *volt_adjust, int *volt_adjust_fuse, int *ro_scale)
+{
+	return 0;
+}
+
+static inline int cpr4_parse_core_count_temp_voltage_adj(
+			struct cpr3_regulator *vreg, bool use_corner_band)
+{
+	return 0;
+}
+
+static inline int cpr3_apm_init(struct cpr3_controller *ctrl)
+{
+	return 0;
+}
+
+static inline int cpr3_mem_acc_init(struct cpr3_regulator *vreg)
+{
+	return 0;
+}
+
+static inline void cprh_adjust_voltages_for_apm(struct cpr3_regulator *vreg)
+{
+}
+
+static inline void cprh_adjust_voltages_for_mem_acc(struct cpr3_regulator *vreg)
+{
+}
+
+static inline int cpr3_adjust_target_quotients(struct cpr3_regulator *vreg,
+			int *fuse_volt_adjust)
+{
+	return 0;
+}
+
+#endif /* CONFIG_REGULATOR_CPR3 */
+
+#endif /* __REGULATOR_CPR_REGULATOR_H__ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/regulator/cpr3-util.c	2019-01-22 16:16:26.263271400 +0100
@@ -0,0 +1,2413 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * This file contains utility functions to be used by platform specific CPR3
+ * regulator drivers.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "cpr3-regulator.h"
+
+#define BYTES_PER_FUSE_ROW		8
+#define MAX_FUSE_ROW_BIT		63
+
+#define CPR3_CONSECUTIVE_UP_DOWN_MIN	0
+#define CPR3_CONSECUTIVE_UP_DOWN_MAX	15
+#define CPR3_UP_DOWN_THRESHOLD_MIN	0
+#define CPR3_UP_DOWN_THRESHOLD_MAX	31
+#define CPR3_STEP_QUOT_MIN		0
+#define CPR3_STEP_QUOT_MAX		63
+#define CPR3_IDLE_CLOCKS_MIN		0
+#define CPR3_IDLE_CLOCKS_MAX		31
+
+/* This constant has units of uV/mV so 1000 corresponds to 100%. */
+#define CPR3_AGING_DERATE_UNITY		1000
+
+/**
+ * cpr3_allocate_regulators() - allocate and initialize CPR3 regulators for a
+ *		given thread based upon device tree data
+ * @thread:		Pointer to the CPR3 thread
+ *
+ * This function allocates the thread->vreg array based upon the number of
+ * device tree regulator subnodes.  It also initializes generic elements of each
+ * regulator struct such as name, of_node, and thread.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_allocate_regulators(struct cpr3_thread *thread)
+{
+	struct device_node *node;
+	int i, rc;
+
+	thread->vreg_count = 0;
+
+	for_each_available_child_of_node(thread->of_node, node) {
+		thread->vreg_count++;
+	}
+
+	thread->vreg = devm_kcalloc(thread->ctrl->dev, thread->vreg_count,
+			sizeof(*thread->vreg), GFP_KERNEL);
+	if (!thread->vreg)
+		return -ENOMEM;
+
+	i = 0;
+	for_each_available_child_of_node(thread->of_node, node) {
+		thread->vreg[i].of_node = node;
+		thread->vreg[i].thread = thread;
+
+		rc = of_property_read_string(node, "regulator-name",
+						&thread->vreg[i].name);
+		if (rc) {
+			dev_err(thread->ctrl->dev, "could not find regulator name, rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		i++;
+	}
+
+	return 0;
+}
+
+/**
+ * cpr3_allocate_threads() - allocate and initialize CPR3 threads for a given
+ *			     controller based upon device tree data
+ * @ctrl:		Pointer to the CPR3 controller
+ * @min_thread_id:	Minimum allowed hardware thread ID for this controller
+ * @max_thread_id:	Maximum allowed hardware thread ID for this controller
+ *
+ * This function allocates the ctrl->thread array based upon the number of
+ * device tree thread subnodes.  It also initializes generic elements of each
+ * thread struct such as thread_id, of_node, ctrl, and vreg array.
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_allocate_threads(struct cpr3_controller *ctrl, u32 min_thread_id,
+			u32 max_thread_id)
+{
+	struct device *dev = ctrl->dev;
+	struct device_node *thread_node;
+	int i, j, rc;
+
+	ctrl->thread_count = 0;
+
+	for_each_available_child_of_node(dev->of_node, thread_node) {
+		ctrl->thread_count++;
+	}
+
+	ctrl->thread = devm_kcalloc(dev, ctrl->thread_count,
+			sizeof(*ctrl->thread), GFP_KERNEL);
+	if (!ctrl->thread)
+		return -ENOMEM;
+
+	i = 0;
+	for_each_available_child_of_node(dev->of_node, thread_node) {
+		ctrl->thread[i].of_node = thread_node;
+		ctrl->thread[i].ctrl = ctrl;
+
+		rc = of_property_read_u32(thread_node, "qcom,cpr-thread-id",
+					  &ctrl->thread[i].thread_id);
+		if (rc) {
+			dev_err(dev, "could not read DT property qcom,cpr-thread-id, rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		if (ctrl->thread[i].thread_id < min_thread_id ||
+				ctrl->thread[i].thread_id > max_thread_id) {
+			dev_err(dev, "invalid thread id = %u; not within [%u, %u]\n",
+				ctrl->thread[i].thread_id, min_thread_id,
+				max_thread_id);
+			return -EINVAL;
+		}
+
+		/* Verify that the thread ID is unique for all child nodes. */
+		for (j = 0; j < i; j++) {
+			if (ctrl->thread[j].thread_id
+					== ctrl->thread[i].thread_id) {
+				dev_err(dev, "duplicate thread id = %u found\n",
+					ctrl->thread[i].thread_id);
+				return -EINVAL;
+			}
+		}
+
+		rc = cpr3_allocate_regulators(&ctrl->thread[i]);
+		if (rc)
+			return rc;
+
+		i++;
+	}
+
+	return 0;
+}
+
+/**
+ * cpr3_map_fuse_base() - ioremap the base address of the fuse region
+ * @ctrl:	Pointer to the CPR3 controller
+ * @pdev:	Platform device pointer for the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_map_fuse_base(struct cpr3_controller *ctrl,
+			struct platform_device *pdev)
+{
+	struct resource *res;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fuse_base");
+	if (!res || !res->start) {
+		dev_err(&pdev->dev, "fuse base address is missing\n");
+		return -ENXIO;
+	}
+
+	ctrl->fuse_base = devm_ioremap(&pdev->dev, res->start,
+						resource_size(res));
+
+	return 0;
+}
+
+/**
+ * cpr3_read_fuse_param() - reads a CPR3 fuse parameter out of eFuses
+ * @fuse_base_addr:	Virtual memory address of the eFuse base address
+ * @param:		Null terminated array of fuse param segments to read
+ *			from
+ * @param_value:	Output with value read from the eFuses
+ *
+ * This function reads from each of the parameter segments listed in the param
+ * array and concatenates their values together.  Reading stops when an element
+ * is reached which has all 0 struct values.  The total number of bits specified
+ * for the fuse parameter across all segments must be less than or equal to 64.
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_read_fuse_param(void __iomem *fuse_base_addr,
+		const struct cpr3_fuse_param *param, u64 *param_value)
+{
+	u64 fuse_val, val;
+	int bits;
+	int bits_total = 0;
+
+	*param_value = 0;
+
+	while (param->row || param->bit_start || param->bit_end) {
+		if (param->bit_start > param->bit_end
+		    || param->bit_end > MAX_FUSE_ROW_BIT) {
+			pr_err("Invalid fuse parameter segment: row=%u, start=%u, end=%u\n",
+				param->row, param->bit_start, param->bit_end);
+			return -EINVAL;
+		}
+
+		bits = param->bit_end - param->bit_start + 1;
+		if (bits_total + bits > 64) {
+			pr_err("Invalid fuse parameter segments; total bits = %d\n",
+				bits_total + bits);
+			return -EINVAL;
+		}
+
+		fuse_val = readq_relaxed(fuse_base_addr
+					 + param->row * BYTES_PER_FUSE_ROW);
+		val = (fuse_val >> param->bit_start) & ((1ULL << bits) - 1);
+		*param_value |= val << bits_total;
+		bits_total += bits;
+
+		param++;
+	}
+
+	return 0;
+}
+
+/**
+ * cpr3_convert_open_loop_voltage_fuse() - converts an open loop voltage fuse
+ *		value into an absolute voltage with units of microvolts
+ * @ref_volt:		Reference voltage in microvolts
+ * @step_volt:		The step size in microvolts of the fuse LSB
+ * @fuse:		Open loop voltage fuse value
+ * @fuse_len:		The bit length of the fuse value
+ *
+ * The MSB of the fuse parameter corresponds to a sign bit.  If it is set, then
+ * the lower bits correspond to the number of steps to go down from the
+ * reference voltage.  If it is not set, then the lower bits correspond to the
+ * number of steps to go up from the reference voltage.
+ */
+int cpr3_convert_open_loop_voltage_fuse(int ref_volt, int step_volt, u32 fuse,
+					int fuse_len)
+{
+	int sign, steps;
+
+	sign = (fuse & (1 << (fuse_len - 1))) ? -1 : 1;
+	steps = fuse & ((1 << (fuse_len - 1)) - 1);
+
+	return ref_volt + sign * steps * step_volt;
+}
+
+/**
+ * cpr3_interpolate() - performs linear interpolation
+ * @x1		Lower known x value
+ * @y1		Lower known y value
+ * @x2		Upper known x value
+ * @y2		Upper known y value
+ * @x		Intermediate x value
+ *
+ * Returns y where (x, y) falls on the line between (x1, y1) and (x2, y2).
+ * It is required that x1 < x2, y1 <= y2, and x1 <= x <= x2.  If these
+ * conditions are not met, then y2 will be returned.
+ */
+u64 cpr3_interpolate(u64 x1, u64 y1, u64 x2, u64 y2, u64 x)
+{
+	u64 temp;
+
+	if (x1 >= x2 || y1 > y2 || x1 > x || x > x2)
+		return y2;
+
+	temp = (x2 - x) * (y2 - y1);
+	do_div(temp, (u32)(x2 - x1));
+
+	return y2 - temp;
+}
+
+/**
+ * cpr3_parse_array_property() - fill an array from a portion of the values
+ *		specified for a device tree property
+ * @vreg:		Pointer to the CPR3 regulator
+ * @prop_name:		The name of the device tree property to read from
+ * @tuple_size:		The number of elements in each tuple
+ * @out:		Output data array which must be of size tuple_size
+ *
+ * cpr3_parse_common_corner_data() must be called for vreg before this function
+ * is called so that fuse combo and speed bin size elements are initialized.
+ *
+ * Three formats are supported for the device tree property:
+ * 1. Length == tuple_size
+ *	(reading begins at index 0)
+ * 2. Length == tuple_size * vreg->fuse_combos_supported
+ *	(reading begins at index tuple_size * vreg->fuse_combo)
+ * 3. Length == tuple_size * vreg->speed_bins_supported
+ *	(reading begins at index tuple_size * vreg->speed_bin_fuse)
+ *
+ * All other property lengths are treated as errors.
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_parse_array_property(struct cpr3_regulator *vreg,
+		const char *prop_name, int tuple_size, u32 *out)
+{
+	struct device_node *node = vreg->of_node;
+	int len = 0;
+	int i, offset, rc;
+
+	if (!of_find_property(node, prop_name, &len)) {
+		cpr3_err(vreg, "property %s is missing\n", prop_name);
+		return -EINVAL;
+	}
+
+	if (len == tuple_size * sizeof(u32)) {
+		offset = 0;
+	} else if (len == tuple_size * vreg->fuse_combos_supported
+				     * sizeof(u32)) {
+		offset = tuple_size * vreg->fuse_combo;
+	} else if (vreg->speed_bins_supported > 0 &&
+		 len == tuple_size * vreg->speed_bins_supported * sizeof(u32)) {
+		offset = tuple_size * vreg->speed_bin_fuse;
+	} else {
+		if (vreg->speed_bins_supported > 0)
+			cpr3_err(vreg, "property %s has invalid length=%d, should be %zu, %zu, or %zu\n",
+				prop_name, len,
+				tuple_size * sizeof(u32),
+				tuple_size * vreg->speed_bins_supported
+					   * sizeof(u32),
+				tuple_size * vreg->fuse_combos_supported
+					   * sizeof(u32));
+		else
+			cpr3_err(vreg, "property %s has invalid length=%d, should be %zu or %zu\n",
+				prop_name, len,
+				tuple_size * sizeof(u32),
+				tuple_size * vreg->fuse_combos_supported
+					   * sizeof(u32));
+		return -EINVAL;
+	}
+
+	for (i = 0; i < tuple_size; i++) {
+		rc = of_property_read_u32_index(node, prop_name, offset + i,
+						&out[i]);
+		if (rc) {
+			cpr3_err(vreg, "error reading property %s, rc=%d\n",
+				prop_name, rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * cpr3_parse_corner_array_property() - fill a per-corner array from a portion
+ *		of the values specified for a device tree property
+ * @vreg:		Pointer to the CPR3 regulator
+ * @prop_name:		The name of the device tree property to read from
+ * @tuple_size:		The number of elements in each per-corner tuple
+ * @out:		Output data array which must be of size:
+ *			tuple_size * vreg->corner_count
+ *
+ * cpr3_parse_common_corner_data() must be called for vreg before this function
+ * is called so that fuse combo and speed bin size elements are initialized.
+ *
+ * Three formats are supported for the device tree property:
+ * 1. Length == tuple_size * vreg->corner_count
+ *	(reading begins at index 0)
+ * 2. Length == tuple_size * vreg->fuse_combo_corner_sum
+ *	(reading begins at index tuple_size * vreg->fuse_combo_offset)
+ * 3. Length == tuple_size * vreg->speed_bin_corner_sum
+ *	(reading begins at index tuple_size * vreg->speed_bin_offset)
+ *
+ * All other property lengths are treated as errors.
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_parse_corner_array_property(struct cpr3_regulator *vreg,
+		const char *prop_name, int tuple_size, u32 *out)
+{
+	struct device_node *node = vreg->of_node;
+	int len = 0;
+	int i, offset, rc;
+
+	if (!of_find_property(node, prop_name, &len)) {
+		cpr3_err(vreg, "property %s is missing\n", prop_name);
+		return -EINVAL;
+	}
+
+	if (len == tuple_size * vreg->corner_count * sizeof(u32)) {
+		offset = 0;
+	} else if (len == tuple_size * vreg->fuse_combo_corner_sum
+				     * sizeof(u32)) {
+		offset = tuple_size * vreg->fuse_combo_offset;
+	} else if (vreg->speed_bin_corner_sum > 0 &&
+		 len == tuple_size * vreg->speed_bin_corner_sum * sizeof(u32)) {
+		offset = tuple_size * vreg->speed_bin_offset;
+	} else {
+		if (vreg->speed_bin_corner_sum > 0)
+			cpr3_err(vreg, "property %s has invalid length=%d, should be %zu, %zu, or %zu\n",
+				prop_name, len,
+				tuple_size * vreg->corner_count * sizeof(u32),
+				tuple_size * vreg->speed_bin_corner_sum
+					   * sizeof(u32),
+				tuple_size * vreg->fuse_combo_corner_sum
+					   * sizeof(u32));
+		else
+			cpr3_err(vreg, "property %s has invalid length=%d, should be %zu or %zu\n",
+				prop_name, len,
+				tuple_size * vreg->corner_count * sizeof(u32),
+				tuple_size * vreg->fuse_combo_corner_sum
+					   * sizeof(u32));
+		return -EINVAL;
+	}
+
+	for (i = 0; i < tuple_size * vreg->corner_count; i++) {
+		rc = of_property_read_u32_index(node, prop_name, offset + i,
+						&out[i]);
+		if (rc) {
+			cpr3_err(vreg, "error reading property %s, rc=%d\n",
+				prop_name, rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * cpr3_parse_corner_band_array_property() - fill a per-corner band array
+ *		from a portion of the values specified for a device tree
+ *		property
+ * @vreg:		Pointer to the CPR3 regulator
+ * @prop_name:		The name of the device tree property to read from
+ * @tuple_size:		The number of elements in each per-corner band tuple
+ * @out:		Output data array which must be of size:
+ *			tuple_size * vreg->corner_band_count
+ *
+ * cpr3_parse_common_corner_data() must be called for vreg before this function
+ * is called so that fuse combo and speed bin size elements are initialized.
+ * In addition, corner band fuse combo and speed bin sum and offset elements
+ * must be initialized prior to executing this function.
+ *
+ * Three formats are supported for the device tree property:
+ * 1. Length == tuple_size * vreg->corner_band_count
+ *	(reading begins at index 0)
+ * 2. Length == tuple_size * vreg->fuse_combo_corner_band_sum
+ *	(reading begins at index tuple_size *
+ *		vreg->fuse_combo_corner_band_offset)
+ * 3. Length == tuple_size * vreg->speed_bin_corner_band_sum
+ *	(reading begins at index tuple_size *
+ *		vreg->speed_bin_corner_band_offset)
+ *
+ * All other property lengths are treated as errors.
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_parse_corner_band_array_property(struct cpr3_regulator *vreg,
+		const char *prop_name, int tuple_size, u32 *out)
+{
+	struct device_node *node = vreg->of_node;
+	int len = 0;
+	int i, offset, rc;
+
+	if (!of_find_property(node, prop_name, &len)) {
+		cpr3_err(vreg, "property %s is missing\n", prop_name);
+		return -EINVAL;
+	}
+
+	if (len == tuple_size * vreg->corner_band_count * sizeof(u32)) {
+		offset = 0;
+	} else if (len == tuple_size * vreg->fuse_combo_corner_band_sum
+				     * sizeof(u32)) {
+		offset = tuple_size * vreg->fuse_combo_corner_band_offset;
+	} else if (vreg->speed_bin_corner_band_sum > 0 &&
+		 len == tuple_size * vreg->speed_bin_corner_band_sum *
+		   sizeof(u32)) {
+		offset = tuple_size * vreg->speed_bin_corner_band_offset;
+	} else {
+		if (vreg->speed_bin_corner_band_sum > 0)
+			cpr3_err(vreg, "property %s has invalid length=%d, should be %zu, %zu, or %zu\n",
+				prop_name, len,
+				tuple_size * vreg->corner_band_count *
+				 sizeof(u32),
+				tuple_size * vreg->speed_bin_corner_band_sum
+					   * sizeof(u32),
+				tuple_size * vreg->fuse_combo_corner_band_sum
+					   * sizeof(u32));
+		else
+			cpr3_err(vreg, "property %s has invalid length=%d, should be %zu or %zu\n",
+				prop_name, len,
+				tuple_size * vreg->corner_band_count *
+				 sizeof(u32),
+				tuple_size * vreg->fuse_combo_corner_band_sum
+					   * sizeof(u32));
+		return -EINVAL;
+	}
+
+	for (i = 0; i < tuple_size * vreg->corner_band_count; i++) {
+		rc = of_property_read_u32_index(node, prop_name, offset + i,
+						&out[i]);
+		if (rc) {
+			cpr3_err(vreg, "error reading property %s, rc=%d\n",
+				prop_name, rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * cpr3_parse_common_corner_data() - parse common CPR3 properties relating to
+ *		the corners supported by a CPR3 regulator from device tree
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * This function reads, validates, and utilizes the following device tree
+ * properties: qcom,cpr-fuse-corners, qcom,cpr-fuse-combos, qcom,cpr-speed-bins,
+ * qcom,cpr-speed-bin-corners, qcom,cpr-corners, qcom,cpr-voltage-ceiling,
+ * qcom,cpr-voltage-floor, qcom,corner-frequencies,
+ * and qcom,cpr-corner-fmax-map.
+ *
+ * It initializes these CPR3 regulator elements: corner, corner_count,
+ * fuse_combos_supported, fuse_corner_map, and speed_bins_supported.  It
+ * initializes these elements for each corner: ceiling_volt, floor_volt,
+ * proc_freq, and cpr_fuse_corner.
+ *
+ * It requires that the following CPR3 regulator elements be initialized before
+ * being called: fuse_corner_count, fuse_combo, and speed_bin_fuse.
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_parse_common_corner_data(struct cpr3_regulator *vreg)
+{
+	struct device_node *node = vreg->of_node;
+	struct cpr3_controller *ctrl = vreg->thread->ctrl;
+	u32 max_fuse_combos, fuse_corners, aging_allowed = 0;
+	u32 max_speed_bins = 0;
+	u32 *combo_corners;
+	u32 *speed_bin_corners;
+	u32 *temp;
+	int i, j, rc;
+
+	rc = of_property_read_u32(node, "qcom,cpr-fuse-corners", &fuse_corners);
+	if (rc) {
+		cpr3_err(vreg, "error reading property qcom,cpr-fuse-corners, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	if (vreg->fuse_corner_count != fuse_corners) {
+		cpr3_err(vreg, "device tree config supports %d fuse corners but the hardware has %d fuse corners\n",
+			fuse_corners, vreg->fuse_corner_count);
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32(node, "qcom,cpr-fuse-combos",
+				&max_fuse_combos);
+	if (rc) {
+		cpr3_err(vreg, "error reading property qcom,cpr-fuse-combos, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	/*
+	 * Sanity check against arbitrarily large value to avoid excessive
+	 * memory allocation.
+	 */
+	if (max_fuse_combos > 100 || max_fuse_combos == 0) {
+		cpr3_err(vreg, "qcom,cpr-fuse-combos is invalid: %u\n",
+			max_fuse_combos);
+		return -EINVAL;
+	}
+
+	if (vreg->fuse_combo >= max_fuse_combos) {
+		cpr3_err(vreg, "device tree config supports fuse combos 0-%u but the hardware has combo %d\n",
+			max_fuse_combos - 1, vreg->fuse_combo);
+		BUG_ON(1);
+		return -EINVAL;
+	}
+
+	vreg->fuse_combos_supported = max_fuse_combos;
+
+	of_property_read_u32(node, "qcom,cpr-speed-bins", &max_speed_bins);
+
+	/*
+	 * Sanity check against arbitrarily large value to avoid excessive
+	 * memory allocation.
+	 */
+	if (max_speed_bins > 100) {
+		cpr3_err(vreg, "qcom,cpr-speed-bins is invalid: %u\n",
+			max_speed_bins);
+		return -EINVAL;
+	}
+
+	if (max_speed_bins && vreg->speed_bin_fuse >= max_speed_bins) {
+		cpr3_err(vreg, "device tree config supports speed bins 0-%u but the hardware has speed bin %d\n",
+			max_speed_bins - 1, vreg->speed_bin_fuse);
+		BUG();
+		return -EINVAL;
+	}
+
+	vreg->speed_bins_supported = max_speed_bins;
+
+	combo_corners = kcalloc(vreg->fuse_combos_supported,
+				sizeof(*combo_corners), GFP_KERNEL);
+	if (!combo_corners)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(node, "qcom,cpr-corners", combo_corners,
+					vreg->fuse_combos_supported);
+	if (rc == -EOVERFLOW) {
+		/* Single value case */
+		rc = of_property_read_u32(node, "qcom,cpr-corners",
+					combo_corners);
+		for (i = 1; i < vreg->fuse_combos_supported; i++)
+			combo_corners[i] = combo_corners[0];
+	}
+	if (rc) {
+		cpr3_err(vreg, "error reading property qcom,cpr-corners, rc=%d\n",
+			rc);
+		kfree(combo_corners);
+		return rc;
+	}
+
+	vreg->fuse_combo_offset = 0;
+	vreg->fuse_combo_corner_sum = 0;
+	for (i = 0; i < vreg->fuse_combos_supported; i++) {
+		vreg->fuse_combo_corner_sum += combo_corners[i];
+		if (i < vreg->fuse_combo)
+			vreg->fuse_combo_offset += combo_corners[i];
+	}
+
+	vreg->corner_count = combo_corners[vreg->fuse_combo];
+
+	kfree(combo_corners);
+
+	vreg->speed_bin_offset = 0;
+	vreg->speed_bin_corner_sum = 0;
+	if (vreg->speed_bins_supported > 0) {
+		speed_bin_corners = kcalloc(vreg->speed_bins_supported,
+					sizeof(*speed_bin_corners), GFP_KERNEL);
+		if (!speed_bin_corners)
+			return -ENOMEM;
+
+		rc = of_property_read_u32_array(node,
+				"qcom,cpr-speed-bin-corners", speed_bin_corners,
+				vreg->speed_bins_supported);
+		if (rc) {
+			cpr3_err(vreg, "error reading property qcom,cpr-speed-bin-corners, rc=%d\n",
+				rc);
+			kfree(speed_bin_corners);
+			return rc;
+		}
+
+		for (i = 0; i < vreg->speed_bins_supported; i++) {
+			vreg->speed_bin_corner_sum += speed_bin_corners[i];
+			if (i < vreg->speed_bin_fuse)
+				vreg->speed_bin_offset += speed_bin_corners[i];
+		}
+
+		if (speed_bin_corners[vreg->speed_bin_fuse]
+		    != vreg->corner_count) {
+			cpr3_err(vreg, "qcom,cpr-corners and qcom,cpr-speed-bin-corners conflict on number of corners: %d vs %u\n",
+				vreg->corner_count,
+				speed_bin_corners[vreg->speed_bin_fuse]);
+			kfree(speed_bin_corners);
+			return -EINVAL;
+		}
+
+		kfree(speed_bin_corners);
+	}
+
+	/*
+	 * For CPRh compliant controllers two additional corners are
+	 * allocated to correspond to the APM crossover voltage and the MEM ACC
+	 * crossover voltage.
+	 */
+	vreg->corner = devm_kcalloc(ctrl->dev, ctrl->ctrl_type ==
+				    CPR_CTRL_TYPE_CPRH ?
+				    vreg->corner_count + 2 :
+				    vreg->corner_count,
+				    sizeof(*vreg->corner), GFP_KERNEL);
+	temp = kcalloc(vreg->corner_count, sizeof(*temp), GFP_KERNEL);
+	if (!vreg->corner || !temp)
+		return -ENOMEM;
+
+	rc = cpr3_parse_corner_array_property(vreg, "qcom,cpr-voltage-ceiling",
+			1, temp);
+	if (rc)
+		goto free_temp;
+	for (i = 0; i < vreg->corner_count; i++) {
+		vreg->corner[i].ceiling_volt
+			= CPR3_ROUND(temp[i], ctrl->step_volt);
+		vreg->corner[i].abs_ceiling_volt = vreg->corner[i].ceiling_volt;
+	}
+
+	rc = cpr3_parse_corner_array_property(vreg, "qcom,cpr-voltage-floor",
+			1, temp);
+	if (rc)
+		goto free_temp;
+	for (i = 0; i < vreg->corner_count; i++)
+		vreg->corner[i].floor_volt
+			= CPR3_ROUND(temp[i], ctrl->step_volt);
+
+	/* Validate ceiling and floor values */
+	for (i = 0; i < vreg->corner_count; i++) {
+		if (vreg->corner[i].floor_volt
+		    > vreg->corner[i].ceiling_volt) {
+			cpr3_err(vreg, "CPR floor[%d]=%d > ceiling[%d]=%d uV\n",
+				i, vreg->corner[i].floor_volt,
+				i, vreg->corner[i].ceiling_volt);
+			rc = -EINVAL;
+			goto free_temp;
+		}
+	}
+
+	/* Load optional system-supply voltages */
+	if (of_find_property(vreg->of_node, "qcom,system-voltage", NULL)) {
+		rc = cpr3_parse_corner_array_property(vreg,
+			"qcom,system-voltage", 1, temp);
+		if (rc)
+			goto free_temp;
+		for (i = 0; i < vreg->corner_count; i++)
+			vreg->corner[i].system_volt = temp[i];
+	}
+
+	rc = cpr3_parse_corner_array_property(vreg, "qcom,corner-frequencies",
+			1, temp);
+	if (rc)
+		goto free_temp;
+	for (i = 0; i < vreg->corner_count; i++)
+		vreg->corner[i].proc_freq = temp[i];
+
+	/* Validate frequencies */
+	for (i = 1; i < vreg->corner_count; i++) {
+		if (vreg->corner[i].proc_freq
+		    < vreg->corner[i - 1].proc_freq) {
+			cpr3_err(vreg, "invalid frequency: freq[%d]=%u < freq[%d]=%u\n",
+				i, vreg->corner[i].proc_freq, i - 1,
+				vreg->corner[i - 1].proc_freq);
+			rc = -EINVAL;
+			goto free_temp;
+		}
+	}
+
+	vreg->fuse_corner_map = devm_kcalloc(ctrl->dev, vreg->fuse_corner_count,
+				    sizeof(*vreg->fuse_corner_map), GFP_KERNEL);
+	if (!vreg->fuse_corner_map) {
+		rc = -ENOMEM;
+		goto free_temp;
+	}
+
+	rc = cpr3_parse_array_property(vreg, "qcom,cpr-corner-fmax-map",
+		vreg->fuse_corner_count, temp);
+	if (rc)
+		goto free_temp;
+	for (i = 0; i < vreg->fuse_corner_count; i++) {
+		vreg->fuse_corner_map[i] = temp[i] - CPR3_CORNER_OFFSET;
+		if (temp[i] < CPR3_CORNER_OFFSET
+		    || temp[i] > vreg->corner_count + CPR3_CORNER_OFFSET) {
+			cpr3_err(vreg, "invalid corner value specified in qcom,cpr-corner-fmax-map: %u\n",
+				temp[i]);
+			rc = -EINVAL;
+			goto free_temp;
+		} else if (i > 0 && temp[i - 1] >= temp[i]) {
+			cpr3_err(vreg, "invalid corner %u less than or equal to previous corner %u\n",
+				temp[i], temp[i - 1]);
+			rc = -EINVAL;
+			goto free_temp;
+		}
+	}
+	if (temp[vreg->fuse_corner_count - 1] != vreg->corner_count)
+		cpr3_debug(vreg, "Note: highest Fmax corner %u in qcom,cpr-corner-fmax-map does not match highest supported corner %d\n",
+			temp[vreg->fuse_corner_count - 1],
+			vreg->corner_count);
+
+	for (i = 0; i < vreg->corner_count; i++) {
+		for (j = 0; j < vreg->fuse_corner_count; j++) {
+			if (i + CPR3_CORNER_OFFSET <= temp[j]) {
+				vreg->corner[i].cpr_fuse_corner = j;
+				break;
+			}
+		}
+		if (j == vreg->fuse_corner_count) {
+			/*
+			 * Handle the case where the highest fuse corner maps
+			 * to a corner below the highest corner.
+			 */
+			vreg->corner[i].cpr_fuse_corner
+				= vreg->fuse_corner_count - 1;
+		}
+	}
+
+	if (of_find_property(vreg->of_node,
+				"qcom,allow-aging-voltage-adjustment", NULL)) {
+		rc = cpr3_parse_array_property(vreg,
+			"qcom,allow-aging-voltage-adjustment",
+			1, &aging_allowed);
+		if (rc)
+			goto free_temp;
+
+		vreg->aging_allowed = aging_allowed;
+	}
+
+	if (of_find_property(vreg->of_node,
+		       "qcom,allow-aging-open-loop-voltage-adjustment", NULL)) {
+		rc = cpr3_parse_array_property(vreg,
+			"qcom,allow-aging-open-loop-voltage-adjustment",
+			1, &aging_allowed);
+		if (rc)
+			goto free_temp;
+
+		vreg->aging_allow_open_loop_adj = aging_allowed;
+	}
+
+	if (vreg->aging_allowed) {
+		if (ctrl->aging_ref_volt <= 0) {
+			cpr3_err(ctrl, "qcom,cpr-aging-ref-voltage must be specified\n");
+			rc = -EINVAL;
+			goto free_temp;
+		}
+
+		rc = cpr3_parse_array_property(vreg,
+			"qcom,cpr-aging-max-voltage-adjustment",
+			1, &vreg->aging_max_adjust_volt);
+		if (rc)
+			goto free_temp;
+
+		rc = cpr3_parse_array_property(vreg,
+			"qcom,cpr-aging-ref-corner", 1, &vreg->aging_corner);
+		if (rc) {
+			goto free_temp;
+		} else if (vreg->aging_corner < CPR3_CORNER_OFFSET
+			   || vreg->aging_corner > vreg->corner_count - 1
+							+ CPR3_CORNER_OFFSET) {
+			cpr3_err(vreg, "aging reference corner=%d not in range [%d, %d]\n",
+				vreg->aging_corner, CPR3_CORNER_OFFSET,
+				vreg->corner_count - 1 + CPR3_CORNER_OFFSET);
+			rc = -EINVAL;
+			goto free_temp;
+		}
+		vreg->aging_corner -= CPR3_CORNER_OFFSET;
+
+		if (of_find_property(vreg->of_node, "qcom,cpr-aging-derate",
+					NULL)) {
+			rc = cpr3_parse_corner_array_property(vreg,
+				"qcom,cpr-aging-derate", 1, temp);
+			if (rc)
+				goto free_temp;
+
+			for (i = 0; i < vreg->corner_count; i++)
+				vreg->corner[i].aging_derate = temp[i];
+		} else {
+			for (i = 0; i < vreg->corner_count; i++)
+				vreg->corner[i].aging_derate
+					= CPR3_AGING_DERATE_UNITY;
+		}
+	}
+
+free_temp:
+	kfree(temp);
+	return rc;
+}
+
+/**
+ * cpr3_parse_thread_u32() - parse the specified property from the CPR3 thread's
+ *		device tree node and verify that it is within the allowed limits
+ * @thread:		Pointer to the CPR3 thread
+ * @propname:		The name of the device tree property to read
+ * @out_value:		The output pointer to fill with the value read
+ * @value_min:		The minimum allowed property value
+ * @value_max:		The maximum allowed property value
+ *
+ * This function prints a verbose error message if the property is missing or
+ * has a value which is not within the specified range.
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_parse_thread_u32(struct cpr3_thread *thread, const char *propname,
+		       u32 *out_value, u32 value_min, u32 value_max)
+{
+	int rc;
+
+	rc = of_property_read_u32(thread->of_node, propname, out_value);
+	if (rc) {
+		cpr3_err(thread->ctrl, "thread %u error reading property %s, rc=%d\n",
+			thread->thread_id, propname, rc);
+		return rc;
+	}
+
+	if (*out_value < value_min || *out_value > value_max) {
+		cpr3_err(thread->ctrl, "thread %u %s=%u is invalid; allowed range: [%u, %u]\n",
+			thread->thread_id, propname, *out_value, value_min,
+			value_max);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * cpr3_parse_ctrl_u32() - parse the specified property from the CPR3
+ *		controller's device tree node and verify that it is within the
+ *		allowed limits
+ * @ctrl:		Pointer to the CPR3 controller
+ * @propname:		The name of the device tree property to read
+ * @out_value:		The output pointer to fill with the value read
+ * @value_min:		The minimum allowed property value
+ * @value_max:		The maximum allowed property value
+ *
+ * This function prints a verbose error message if the property is missing or
+ * has a value which is not within the specified range.
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_parse_ctrl_u32(struct cpr3_controller *ctrl, const char *propname,
+		       u32 *out_value, u32 value_min, u32 value_max)
+{
+	int rc;
+
+	rc = of_property_read_u32(ctrl->dev->of_node, propname, out_value);
+	if (rc) {
+		cpr3_err(ctrl, "error reading property %s, rc=%d\n",
+			propname, rc);
+		return rc;
+	}
+
+	if (*out_value < value_min || *out_value > value_max) {
+		cpr3_err(ctrl, "%s=%u is invalid; allowed range: [%u, %u]\n",
+			propname, *out_value, value_min, value_max);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * cpr3_parse_common_thread_data() - parse common CPR3 thread properties from
+ *		device tree
+ * @thread:		Pointer to the CPR3 thread
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_parse_common_thread_data(struct cpr3_thread *thread)
+{
+	int rc;
+
+	rc = cpr3_parse_thread_u32(thread, "qcom,cpr-consecutive-up",
+			&thread->consecutive_up, CPR3_CONSECUTIVE_UP_DOWN_MIN,
+			CPR3_CONSECUTIVE_UP_DOWN_MAX);
+	if (rc)
+		return rc;
+
+	rc = cpr3_parse_thread_u32(thread, "qcom,cpr-consecutive-down",
+			&thread->consecutive_down, CPR3_CONSECUTIVE_UP_DOWN_MIN,
+			CPR3_CONSECUTIVE_UP_DOWN_MAX);
+	if (rc)
+		return rc;
+
+	rc = cpr3_parse_thread_u32(thread, "qcom,cpr-up-threshold",
+			&thread->up_threshold, CPR3_UP_DOWN_THRESHOLD_MIN,
+			CPR3_UP_DOWN_THRESHOLD_MAX);
+	if (rc)
+		return rc;
+
+	rc = cpr3_parse_thread_u32(thread, "qcom,cpr-down-threshold",
+			&thread->down_threshold, CPR3_UP_DOWN_THRESHOLD_MIN,
+			CPR3_UP_DOWN_THRESHOLD_MAX);
+	if (rc)
+		return rc;
+
+	return rc;
+}
+
+/**
+ * cpr3_parse_irq_affinity() - parse CPR IRQ affinity information
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_parse_irq_affinity(struct cpr3_controller *ctrl)
+{
+	struct device_node *cpu_node;
+	int i, cpu;
+	int len = 0;
+
+	if (!of_find_property(ctrl->dev->of_node, "qcom,cpr-interrupt-affinity",
+				&len)) {
+		/* No IRQ affinity required */
+		return 0;
+	}
+
+	len /= sizeof(u32);
+
+	for (i = 0; i < len; i++) {
+		cpu_node = of_parse_phandle(ctrl->dev->of_node,
+					    "qcom,cpr-interrupt-affinity", i);
+		if (!cpu_node) {
+			cpr3_err(ctrl, "could not find CPU node %d\n", i);
+			return -EINVAL;
+		}
+
+		for_each_possible_cpu(cpu) {
+			if (of_get_cpu_node(cpu, NULL) == cpu_node) {
+				cpumask_set_cpu(cpu, &ctrl->irq_affinity_mask);
+				break;
+			}
+		}
+		of_node_put(cpu_node);
+	}
+
+	return 0;
+}
+
+static int cpr3_panic_notifier_init(struct cpr3_controller *ctrl)
+{
+	struct device_node *node = ctrl->dev->of_node;
+	struct cpr3_panic_regs_info *panic_regs_info;
+	struct cpr3_reg_info *regs;
+	int i, reg_count, len, rc = 0;
+
+	if (!of_find_property(node, "qcom,cpr-panic-reg-addr-list", &len)) {
+		/* panic register address list not specified */
+		return rc;
+	}
+
+	reg_count = len / sizeof(u32);
+	if (!reg_count) {
+		cpr3_err(ctrl, "qcom,cpr-panic-reg-addr-list has invalid len = %d\n",
+			len);
+		return -EINVAL;
+	}
+
+	if (!of_find_property(node, "qcom,cpr-panic-reg-name-list", NULL)) {
+		cpr3_err(ctrl, "property qcom,cpr-panic-reg-name-list not specified\n");
+		return -EINVAL;
+	}
+
+	len = of_property_count_strings(node, "qcom,cpr-panic-reg-name-list");
+	if (reg_count != len) {
+		cpr3_err(ctrl, "qcom,cpr-panic-reg-name-list should have %d strings\n",
+			reg_count);
+		return -EINVAL;
+	}
+
+	panic_regs_info = devm_kzalloc(ctrl->dev, sizeof(*panic_regs_info),
+					GFP_KERNEL);
+	if (!panic_regs_info)
+		return -ENOMEM;
+
+	regs = devm_kcalloc(ctrl->dev, reg_count, sizeof(*regs), GFP_KERNEL);
+	if (!regs)
+		return -ENOMEM;
+
+	for (i = 0; i < reg_count; i++) {
+		rc = of_property_read_string_index(node,
+				"qcom,cpr-panic-reg-name-list", i,
+				&(regs[i].name));
+		if (rc) {
+			cpr3_err(ctrl, "error reading property qcom,cpr-panic-reg-name-list, rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		rc = of_property_read_u32_index(node,
+				"qcom,cpr-panic-reg-addr-list", i,
+				&(regs[i].addr));
+		if (rc) {
+			cpr3_err(ctrl, "error reading property qcom,cpr-panic-reg-addr-list, rc=%d\n",
+				rc);
+			return rc;
+		}
+		regs[i].virt_addr = devm_ioremap(ctrl->dev, regs[i].addr, 0x4);
+		if (!regs[i].virt_addr) {
+			pr_err("Unable to map panic register addr 0x%08x\n",
+				regs[i].addr);
+			return -EINVAL;
+		}
+		regs[i].value = 0xFFFFFFFF;
+	}
+
+	panic_regs_info->reg_count = reg_count;
+	panic_regs_info->regs = regs;
+	ctrl->panic_regs_info = panic_regs_info;
+
+	return rc;
+}
+
+/**
+ * cpr3_parse_common_ctrl_data() - parse common CPR3 controller properties from
+ *		device tree
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_parse_common_ctrl_data(struct cpr3_controller *ctrl)
+{
+	int rc;
+
+	rc = cpr3_parse_ctrl_u32(ctrl, "qcom,cpr-sensor-time",
+			&ctrl->sensor_time, 0, UINT_MAX);
+	if (rc)
+		return rc;
+
+	rc = cpr3_parse_ctrl_u32(ctrl, "qcom,cpr-loop-time",
+			&ctrl->loop_time, 0, UINT_MAX);
+	if (rc)
+		return rc;
+
+	rc = cpr3_parse_ctrl_u32(ctrl, "qcom,cpr-idle-cycles",
+			&ctrl->idle_clocks, CPR3_IDLE_CLOCKS_MIN,
+			CPR3_IDLE_CLOCKS_MAX);
+	if (rc)
+		return rc;
+
+	rc = cpr3_parse_ctrl_u32(ctrl, "qcom,cpr-step-quot-init-min",
+			&ctrl->step_quot_init_min, CPR3_STEP_QUOT_MIN,
+			CPR3_STEP_QUOT_MAX);
+	if (rc)
+		return rc;
+
+	rc = cpr3_parse_ctrl_u32(ctrl, "qcom,cpr-step-quot-init-max",
+			&ctrl->step_quot_init_max, CPR3_STEP_QUOT_MIN,
+			CPR3_STEP_QUOT_MAX);
+	if (rc)
+		return rc;
+
+	rc = of_property_read_u32(ctrl->dev->of_node, "qcom,voltage-step",
+				&ctrl->step_volt);
+	if (rc) {
+		cpr3_err(ctrl, "error reading property qcom,voltage-step, rc=%d\n",
+			rc);
+		return rc;
+	}
+	if (ctrl->step_volt <= 0) {
+		cpr3_err(ctrl, "qcom,voltage-step=%d is invalid\n",
+			ctrl->step_volt);
+		return -EINVAL;
+	}
+
+	rc = cpr3_parse_ctrl_u32(ctrl, "qcom,cpr-count-mode",
+			&ctrl->count_mode, CPR3_COUNT_MODE_ALL_AT_ONCE_MIN,
+			CPR3_COUNT_MODE_STAGGERED);
+	if (rc)
+		return rc;
+
+	/* Count repeat is optional */
+	ctrl->count_repeat = 0;
+	of_property_read_u32(ctrl->dev->of_node, "qcom,cpr-count-repeat",
+			&ctrl->count_repeat);
+
+	ctrl->cpr_allowed_sw = of_property_read_bool(ctrl->dev->of_node,
+			"qcom,cpr-enable");
+
+	rc = cpr3_parse_irq_affinity(ctrl);
+	if (rc)
+		return rc;
+
+	/* Aging reference voltage is optional */
+	ctrl->aging_ref_volt = 0;
+	of_property_read_u32(ctrl->dev->of_node, "qcom,cpr-aging-ref-voltage",
+			&ctrl->aging_ref_volt);
+
+	/* Aging possible bitmask is optional */
+	ctrl->aging_possible_mask = 0;
+	of_property_read_u32(ctrl->dev->of_node,
+			"qcom,cpr-aging-allowed-reg-mask",
+			&ctrl->aging_possible_mask);
+
+	if (ctrl->aging_possible_mask) {
+		/*
+		 * Aging possible register value required if bitmask is
+		 * specified
+		 */
+		rc = cpr3_parse_ctrl_u32(ctrl,
+				"qcom,cpr-aging-allowed-reg-value",
+				&ctrl->aging_possible_val, 0, UINT_MAX);
+		if (rc)
+			return rc;
+	}
+
+	if (of_find_property(ctrl->dev->of_node, "clock-names", NULL)) {
+		ctrl->core_clk = devm_clk_get(ctrl->dev, "core_clk");
+		if (IS_ERR(ctrl->core_clk)) {
+			rc = PTR_ERR(ctrl->core_clk);
+			if (rc != -EPROBE_DEFER)
+				cpr3_err(ctrl, "unable request core clock, rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	rc = cpr3_panic_notifier_init(ctrl);
+	if (rc)
+		return rc;
+
+	if (of_find_property(ctrl->dev->of_node, "vdd-supply", NULL)) {
+		ctrl->vdd_regulator = devm_regulator_get(ctrl->dev, "vdd");
+		if (IS_ERR(ctrl->vdd_regulator)) {
+			rc = PTR_ERR(ctrl->vdd_regulator);
+			if (rc != -EPROBE_DEFER)
+				cpr3_err(ctrl, "unable to request vdd regulator, rc=%d\n",
+					 rc);
+			return rc;
+		}
+	} else if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPRH) {
+		/* vdd-supply is optional for CPRh controllers. */
+		ctrl->vdd_regulator = NULL;
+	} else {
+		cpr3_err(ctrl, "vdd supply is not defined\n");
+		return -ENODEV;
+	}
+
+	/*
+	 * Reset step_quot to default on each loop_en = 0 transition is
+	 * optional.
+	 */
+	ctrl->reset_step_quot_loop_en
+		= of_property_read_bool(ctrl->dev->of_node,
+					"qcom,cpr-reset-step-quot-loop-en");
+
+	/*
+	 * Regulator device handles are not necessary for CPRh controllers
+	 * since communication with the regulators is completely managed
+	 * in hardware.
+	 */
+	if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPRH)
+		return rc;
+
+	ctrl->system_regulator = devm_regulator_get_optional(ctrl->dev,
+								"system");
+	if (IS_ERR(ctrl->system_regulator)) {
+		rc = PTR_ERR(ctrl->system_regulator);
+		if (rc != -EPROBE_DEFER) {
+			rc = 0;
+			ctrl->system_regulator = NULL;
+		} else {
+			return rc;
+		}
+	}
+
+	ctrl->mem_acc_regulator = devm_regulator_get_optional(ctrl->dev,
+							      "mem-acc");
+	if (IS_ERR(ctrl->mem_acc_regulator)) {
+		rc = PTR_ERR(ctrl->mem_acc_regulator);
+		if (rc != -EPROBE_DEFER) {
+			rc = 0;
+			ctrl->mem_acc_regulator = NULL;
+		} else {
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+/**
+ * cpr3_limit_open_loop_voltages() - modify the open-loop voltage of each corner
+ *				so that it fits within the floor to ceiling
+ *				voltage range of the corner
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * This function clips the open-loop voltage for each corner so that it is
+ * limited to the floor to ceiling range.  It also rounds each open-loop voltage
+ * so that it corresponds to a set point available to the underlying regulator.
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_limit_open_loop_voltages(struct cpr3_regulator *vreg)
+{
+	int i, volt;
+
+	cpr3_debug(vreg, "open-loop voltages after trimming and rounding:\n");
+	for (i = 0; i < vreg->corner_count; i++) {
+		volt = CPR3_ROUND(vreg->corner[i].open_loop_volt,
+					vreg->thread->ctrl->step_volt);
+		if (volt < vreg->corner[i].floor_volt)
+			volt = vreg->corner[i].floor_volt;
+		else if (volt > vreg->corner[i].ceiling_volt)
+			volt = vreg->corner[i].ceiling_volt;
+		vreg->corner[i].open_loop_volt = volt;
+		cpr3_debug(vreg, "corner[%2d]: open-loop=%d uV\n", i, volt);
+	}
+
+	return 0;
+}
+
+/**
+ * cpr3_open_loop_voltage_as_ceiling() - configures the ceiling voltage for each
+ *		corner to equal the open-loop voltage if the relevant device
+ *		tree property is found for the CPR3 regulator
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * This function assumes that the the open-loop voltage for each corner has
+ * already been rounded to the nearest allowed set point and that it falls
+ * within the floor to ceiling range.
+ *
+ * Return: none
+ */
+void cpr3_open_loop_voltage_as_ceiling(struct cpr3_regulator *vreg)
+{
+	int i;
+
+	if (!of_property_read_bool(vreg->of_node,
+				"qcom,cpr-scaled-open-loop-voltage-as-ceiling"))
+		return;
+
+	for (i = 0; i < vreg->corner_count; i++)
+		vreg->corner[i].ceiling_volt
+			= vreg->corner[i].open_loop_volt;
+}
+
+/**
+ * cpr3_limit_floor_voltages() - raise the floor voltage of each corner so that
+ *		the optional maximum floor to ceiling voltage range specified in
+ *		device tree is satisfied
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * This function also ensures that the open-loop voltage for each corner falls
+ * within the final floor to ceiling voltage range and that floor voltages
+ * increase monotonically.
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_limit_floor_voltages(struct cpr3_regulator *vreg)
+{
+	char *prop = "qcom,cpr-floor-to-ceiling-max-range";
+	int i, floor_new;
+	u32 *floor_range;
+	int rc = 0;
+
+	if (!of_find_property(vreg->of_node, prop, NULL))
+		goto enforce_monotonicity;
+
+	floor_range = kcalloc(vreg->corner_count, sizeof(*floor_range),
+				GFP_KERNEL);
+	if (!floor_range)
+		return -ENOMEM;
+
+	rc = cpr3_parse_corner_array_property(vreg, prop, 1, floor_range);
+	if (rc)
+		goto free_floor_adjust;
+
+	for (i = 0; i < vreg->corner_count; i++) {
+		if ((s32)floor_range[i] >= 0) {
+			floor_new = CPR3_ROUND(vreg->corner[i].ceiling_volt
+							- floor_range[i],
+						vreg->thread->ctrl->step_volt);
+
+			vreg->corner[i].floor_volt = max(floor_new,
+						vreg->corner[i].floor_volt);
+			if (vreg->corner[i].open_loop_volt
+			    < vreg->corner[i].floor_volt)
+				vreg->corner[i].open_loop_volt
+					= vreg->corner[i].floor_volt;
+		}
+	}
+
+free_floor_adjust:
+	kfree(floor_range);
+
+enforce_monotonicity:
+	/* Ensure that floor voltages increase monotonically. */
+	for (i = 1; i < vreg->corner_count; i++) {
+		if (vreg->corner[i].floor_volt
+		    < vreg->corner[i - 1].floor_volt) {
+			cpr3_debug(vreg, "corner %d floor voltage=%d uV < corner %d voltage=%d uV; overriding: corner %d voltage=%d\n",
+				i, vreg->corner[i].floor_volt,
+				i - 1, vreg->corner[i - 1].floor_volt,
+				i, vreg->corner[i - 1].floor_volt);
+			vreg->corner[i].floor_volt
+				= vreg->corner[i - 1].floor_volt;
+
+			if (vreg->corner[i].open_loop_volt
+			    < vreg->corner[i].floor_volt)
+				vreg->corner[i].open_loop_volt
+					= vreg->corner[i].floor_volt;
+			if (vreg->corner[i].ceiling_volt
+			    < vreg->corner[i].floor_volt)
+				vreg->corner[i].ceiling_volt
+					= vreg->corner[i].floor_volt;
+		}
+	}
+
+	return rc;
+}
+
+/**
+ * cpr3_print_quots() - print CPR target quotients into the kernel log for
+ *		debugging purposes
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * Return: none
+ */
+void cpr3_print_quots(struct cpr3_regulator *vreg)
+{
+	int i, j, pos;
+	size_t buflen;
+	char *buf;
+
+	buflen = sizeof(*buf) * CPR3_RO_COUNT * (MAX_CHARS_PER_INT + 2);
+	buf = kzalloc(buflen, GFP_KERNEL);
+	if (!buf)
+		return;
+
+	for (i = 0; i < vreg->corner_count; i++) {
+		for (j = 0, pos = 0; j < CPR3_RO_COUNT; j++)
+			pos += scnprintf(buf + pos, buflen - pos, " %u",
+				vreg->corner[i].target_quot[j]);
+		cpr3_debug(vreg, "target quots[%2d]:%s\n", i, buf);
+	}
+
+	kfree(buf);
+}
+
+/**
+ * cpr3_adjust_fused_open_loop_voltages() - adjust the fused open-loop voltages
+ *		for each fuse corner according to device tree values
+ * @vreg:		Pointer to the CPR3 regulator
+ * @fuse_volt:		Pointer to an array of the fused open-loop voltage
+ *			values
+ *
+ * Voltage values in fuse_volt are modified in place.
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_adjust_fused_open_loop_voltages(struct cpr3_regulator *vreg,
+		int *fuse_volt)
+{
+	int i, rc, prev_volt;
+	int *volt_adjust;
+
+	if (!of_find_property(vreg->of_node,
+			"qcom,cpr-open-loop-voltage-fuse-adjustment", NULL)) {
+		/* No adjustment required. */
+		return 0;
+	}
+
+	volt_adjust = kcalloc(vreg->fuse_corner_count, sizeof(*volt_adjust),
+				GFP_KERNEL);
+	if (!volt_adjust)
+		return -ENOMEM;
+
+	rc = cpr3_parse_array_property(vreg,
+		"qcom,cpr-open-loop-voltage-fuse-adjustment",
+		vreg->fuse_corner_count, volt_adjust);
+	if (rc) {
+		cpr3_err(vreg, "could not load open-loop fused voltage adjustments, rc=%d\n",
+			rc);
+		goto done;
+	}
+
+	for (i = 0; i < vreg->fuse_corner_count; i++) {
+		if (volt_adjust[i]) {
+			prev_volt = fuse_volt[i];
+			fuse_volt[i] += volt_adjust[i];
+			cpr3_debug(vreg, "adjusted fuse corner %d open-loop voltage: %d --> %d uV\n",
+				i, prev_volt, fuse_volt[i]);
+		}
+	}
+
+done:
+	kfree(volt_adjust);
+	return rc;
+}
+
+/**
+ * cpr3_adjust_open_loop_voltages() - adjust the open-loop voltages for each
+ *		corner according to device tree values
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_adjust_open_loop_voltages(struct cpr3_regulator *vreg)
+{
+	int i, rc, prev_volt, min_volt;
+	int *volt_adjust, *volt_diff;
+
+	if (!of_find_property(vreg->of_node,
+			"qcom,cpr-open-loop-voltage-adjustment", NULL)) {
+		/* No adjustment required. */
+		return 0;
+	}
+
+	volt_adjust = kcalloc(vreg->corner_count, sizeof(*volt_adjust),
+				GFP_KERNEL);
+	volt_diff = kcalloc(vreg->corner_count, sizeof(*volt_diff), GFP_KERNEL);
+	if (!volt_adjust || !volt_diff) {
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	rc = cpr3_parse_corner_array_property(vreg,
+		"qcom,cpr-open-loop-voltage-adjustment", 1, volt_adjust);
+	if (rc) {
+		cpr3_err(vreg, "could not load open-loop voltage adjustments, rc=%d\n",
+			rc);
+		goto done;
+	}
+
+	for (i = 0; i < vreg->corner_count; i++) {
+		if (volt_adjust[i]) {
+			prev_volt = vreg->corner[i].open_loop_volt;
+			vreg->corner[i].open_loop_volt += volt_adjust[i];
+			cpr3_debug(vreg, "adjusted corner %d open-loop voltage: %d --> %d uV\n",
+				i, prev_volt, vreg->corner[i].open_loop_volt);
+		}
+	}
+
+	if (of_find_property(vreg->of_node,
+			"qcom,cpr-open-loop-voltage-min-diff", NULL)) {
+		rc = cpr3_parse_corner_array_property(vreg,
+			"qcom,cpr-open-loop-voltage-min-diff", 1, volt_diff);
+		if (rc) {
+			cpr3_err(vreg, "could not load minimum open-loop voltage differences, rc=%d\n",
+				rc);
+			goto done;
+		}
+	}
+
+	/*
+	 * Ensure that open-loop voltages increase monotonically with respect
+	 * to configurable minimum allowed differences.
+	 */
+	for (i = 1; i < vreg->corner_count; i++) {
+		min_volt = vreg->corner[i - 1].open_loop_volt + volt_diff[i];
+		if (vreg->corner[i].open_loop_volt < min_volt) {
+			cpr3_debug(vreg, "adjusted corner %d open-loop voltage=%d uV < corner %d voltage=%d uV + min diff=%d uV; overriding: corner %d voltage=%d\n",
+				i, vreg->corner[i].open_loop_volt,
+				i - 1, vreg->corner[i - 1].open_loop_volt,
+				volt_diff[i], i, min_volt);
+			vreg->corner[i].open_loop_volt = min_volt;
+		}
+	}
+
+done:
+	kfree(volt_diff);
+	kfree(volt_adjust);
+	return rc;
+}
+
+/**
+ * cpr3_quot_adjustment() - returns the quotient adjustment value resulting from
+ *		the specified voltage adjustment and RO scaling factor
+ * @ro_scale:		The CPR ring oscillator (RO) scaling factor with units
+ *			of QUOT/V
+ * @volt_adjust:	The amount to adjust the voltage by in units of
+ *			microvolts.  This value may be positive or negative.
+ */
+int cpr3_quot_adjustment(int ro_scale, int volt_adjust)
+{
+	unsigned long long temp;
+	int quot_adjust;
+	int sign = 1;
+
+	if (ro_scale < 0) {
+		sign = -sign;
+		ro_scale = -ro_scale;
+	}
+
+	if (volt_adjust < 0) {
+		sign = -sign;
+		volt_adjust = -volt_adjust;
+	}
+
+	temp = (unsigned long long)ro_scale * (unsigned long long)volt_adjust;
+	do_div(temp, 1000000);
+
+	quot_adjust = temp;
+	quot_adjust *= sign;
+
+	return quot_adjust;
+}
+
+/**
+ * cpr3_voltage_adjustment() - returns the voltage adjustment value resulting
+ *		from the specified quotient adjustment and RO scaling factor
+ * @ro_scale:		The CPR ring oscillator (RO) scaling factor with units
+ *			of QUOT/V
+ * @quot_adjust:	The amount to adjust the quotient by in units of
+ *			QUOT.  This value may be positive or negative.
+ */
+int cpr3_voltage_adjustment(int ro_scale, int quot_adjust)
+{
+	unsigned long long temp;
+	int volt_adjust;
+	int sign = 1;
+
+	if (ro_scale < 0) {
+		sign = -sign;
+		ro_scale = -ro_scale;
+	}
+
+	if (quot_adjust < 0) {
+		sign = -sign;
+		quot_adjust = -quot_adjust;
+	}
+
+	if (ro_scale == 0)
+		return 0;
+
+	temp = (unsigned long long)quot_adjust * 1000000;
+	do_div(temp, ro_scale);
+
+	volt_adjust = temp;
+	volt_adjust *= sign;
+
+	return volt_adjust;
+}
+
+/**
+ * cpr3_parse_closed_loop_voltage_adjustments() - load per-fuse-corner and
+ *		per-corner closed-loop adjustment values from device tree
+ * @vreg:		Pointer to the CPR3 regulator
+ * @ro_sel:		Array of ring oscillator values selected for each
+ *			fuse corner
+ * @volt_adjust:	Pointer to array which will be filled with the
+ *			per-corner closed-loop adjustment voltages
+ * @volt_adjust_fuse:	Pointer to array which will be filled with the
+ *			per-fuse-corner closed-loop adjustment voltages
+ * @ro_scale:		Pointer to array which will be filled with the
+ *			per-fuse-corner RO scaling factor values with units of
+ *			QUOT/V
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_parse_closed_loop_voltage_adjustments(
+			struct cpr3_regulator *vreg, u64 *ro_sel,
+			int *volt_adjust, int *volt_adjust_fuse, int *ro_scale)
+{
+	int i, rc;
+	u32 *ro_all_scale;
+
+	if (!of_find_property(vreg->of_node,
+			"qcom,cpr-closed-loop-voltage-adjustment", NULL)
+	    && !of_find_property(vreg->of_node,
+			"qcom,cpr-closed-loop-voltage-fuse-adjustment", NULL)
+	    && !vreg->aging_allowed) {
+		/* No adjustment required. */
+		return 0;
+	} else if (!of_find_property(vreg->of_node,
+			"qcom,cpr-ro-scaling-factor", NULL)) {
+		cpr3_err(vreg, "qcom,cpr-ro-scaling-factor is required for closed-loop voltage adjustment, but is missing\n");
+		return -EINVAL;
+	}
+
+	ro_all_scale = kcalloc(vreg->fuse_corner_count * CPR3_RO_COUNT,
+				sizeof(*ro_all_scale), GFP_KERNEL);
+	if (!ro_all_scale)
+		return -ENOMEM;
+
+	rc = cpr3_parse_array_property(vreg, "qcom,cpr-ro-scaling-factor",
+		vreg->fuse_corner_count * CPR3_RO_COUNT, ro_all_scale);
+	if (rc) {
+		cpr3_err(vreg, "could not load RO scaling factors, rc=%d\n",
+			rc);
+		goto done;
+	}
+
+	for (i = 0; i < vreg->fuse_corner_count; i++)
+		ro_scale[i] = ro_all_scale[i * CPR3_RO_COUNT + ro_sel[i]];
+
+	for (i = 0; i < vreg->corner_count; i++)
+		memcpy(vreg->corner[i].ro_scale,
+		 &ro_all_scale[vreg->corner[i].cpr_fuse_corner * CPR3_RO_COUNT],
+		 sizeof(*ro_all_scale) * CPR3_RO_COUNT);
+
+	if (of_find_property(vreg->of_node,
+			"qcom,cpr-closed-loop-voltage-fuse-adjustment", NULL)) {
+		rc = cpr3_parse_array_property(vreg,
+			"qcom,cpr-closed-loop-voltage-fuse-adjustment",
+			vreg->fuse_corner_count, volt_adjust_fuse);
+		if (rc) {
+			cpr3_err(vreg, "could not load closed-loop fused voltage adjustments, rc=%d\n",
+				rc);
+			goto done;
+		}
+	}
+
+	if (of_find_property(vreg->of_node,
+			"qcom,cpr-closed-loop-voltage-adjustment", NULL)) {
+		rc = cpr3_parse_corner_array_property(vreg,
+			"qcom,cpr-closed-loop-voltage-adjustment",
+			1, volt_adjust);
+		if (rc) {
+			cpr3_err(vreg, "could not load closed-loop voltage adjustments, rc=%d\n",
+				rc);
+			goto done;
+		}
+	}
+
+done:
+	kfree(ro_all_scale);
+	return rc;
+}
+
+/**
+ * cpr3_apm_init() - initialize APM data for a CPR3 controller
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * This function loads memory array power mux (APM) data from device tree
+ * if it is present and requests a handle to the appropriate APM controller
+ * device.
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_apm_init(struct cpr3_controller *ctrl)
+{
+	struct device_node *node = ctrl->dev->of_node;
+	int rc;
+
+	if (!of_find_property(node, "qcom,apm-ctrl", NULL)) {
+		/* No APM used */
+		return 0;
+	}
+
+	ctrl->apm = msm_apm_ctrl_dev_get(ctrl->dev);
+	if (IS_ERR(ctrl->apm)) {
+		rc = PTR_ERR(ctrl->apm);
+		if (rc != -EPROBE_DEFER)
+			cpr3_err(ctrl, "APM get failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(node, "qcom,apm-threshold-voltage",
+				&ctrl->apm_threshold_volt);
+	if (rc) {
+		cpr3_err(ctrl, "error reading qcom,apm-threshold-voltage, rc=%d\n",
+			rc);
+		return rc;
+	}
+	ctrl->apm_threshold_volt
+		= CPR3_ROUND(ctrl->apm_threshold_volt, ctrl->step_volt);
+
+	/* No error check since this is an optional property. */
+	of_property_read_u32(node, "qcom,apm-hysteresis-voltage",
+				&ctrl->apm_adj_volt);
+	ctrl->apm_adj_volt = CPR3_ROUND(ctrl->apm_adj_volt, ctrl->step_volt);
+
+	ctrl->apm_high_supply = MSM_APM_SUPPLY_APCC;
+	ctrl->apm_low_supply = MSM_APM_SUPPLY_MX;
+
+	return 0;
+}
+
+/**
+ * cpr3_mem_acc_init() - initialize mem-acc regulator data for
+ *		a CPR3 regulator
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_mem_acc_init(struct cpr3_regulator *vreg)
+{
+	struct cpr3_controller *ctrl = vreg->thread->ctrl;
+	u32 *temp;
+	int i, rc;
+
+	if (!ctrl->mem_acc_regulator) {
+		cpr3_info(ctrl, "not using memory accelerator regulator\n");
+		return 0;
+	}
+
+	temp = kcalloc(vreg->corner_count, sizeof(*temp), GFP_KERNEL);
+	if (!temp)
+		return -ENOMEM;
+
+	rc = cpr3_parse_corner_array_property(vreg, "qcom,mem-acc-voltage",
+					      1, temp);
+	if (rc) {
+		cpr3_err(ctrl, "could not load mem-acc corners, rc=%d\n", rc);
+	} else {
+		for (i = 0; i < vreg->corner_count; i++)
+			vreg->corner[i].mem_acc_volt = temp[i];
+	}
+
+	kfree(temp);
+	return rc;
+}
+
+/**
+ * cpr4_load_core_and_temp_adj() - parse amount of voltage adjustment for
+ *		per-online-core and per-temperature voltage adjustment for a
+ *		given corner or corner band from device tree.
+ * @vreg:	Pointer to the CPR3 regulator
+ * @num:	Corner number or corner band number
+ * @use_corner_band:	Boolean indicating if the CPR3 regulator supports
+ *			adjustments per corner band
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr4_load_core_and_temp_adj(struct cpr3_regulator *vreg,
+					int num, bool use_corner_band)
+{
+	struct cpr3_controller *ctrl = vreg->thread->ctrl;
+	struct cpr4_sdelta *sdelta;
+	int sdelta_size, i, j, pos, rc = 0;
+	char str[75];
+	size_t buflen;
+	char *buf;
+
+	sdelta = use_corner_band ? vreg->corner_band[num].sdelta :
+		vreg->corner[num].sdelta;
+
+	if (!sdelta->allow_core_count_adj && !sdelta->allow_temp_adj) {
+		/* corner doesn't need sdelta table */
+		sdelta->max_core_count = 0;
+		sdelta->temp_band_count = 0;
+		return rc;
+	}
+
+	sdelta_size = sdelta->max_core_count * sdelta->temp_band_count;
+	snprintf(str, sizeof(str), use_corner_band ?
+	 "corner_band=%d core_config_count=%d temp_band_count=%d sdelta_size=%d\n"
+	 : "corner=%d core_config_count=%d temp_band_count=%d sdelta_size=%d\n",
+		 num, sdelta->max_core_count,
+		 sdelta->temp_band_count, sdelta_size);
+
+	cpr3_debug(vreg, "%s", str);
+
+	sdelta->table = devm_kcalloc(ctrl->dev, sdelta_size,
+				sizeof(*sdelta->table), GFP_KERNEL);
+	if (!sdelta->table)
+		return -ENOMEM;
+
+	snprintf(str, sizeof(str), use_corner_band ?
+		 "qcom,cpr-corner-band%d-temp-core-voltage-adjustment" :
+		 "qcom,cpr-corner%d-temp-core-voltage-adjustment",
+		 num + CPR3_CORNER_OFFSET);
+
+	rc = cpr3_parse_array_property(vreg, str, sdelta_size,
+				sdelta->table);
+	if (rc) {
+		cpr3_err(vreg, "could not load %s, rc=%d\n", str, rc);
+		return rc;
+	}
+
+	/*
+	 * Convert sdelta margins from uV to PMIC steps and apply negation to
+	 * follow the SDELTA register semantics.
+	 */
+	for (i = 0; i < sdelta_size; i++)
+		sdelta->table[i] = -(sdelta->table[i] / ctrl->step_volt);
+
+	buflen = sizeof(*buf) * sdelta_size * (MAX_CHARS_PER_INT + 2);
+	buf = kzalloc(buflen, GFP_KERNEL);
+	if (!buf)
+		return rc;
+
+	for (i = 0; i < sdelta->max_core_count; i++) {
+		for (j = 0, pos = 0; j < sdelta->temp_band_count; j++)
+			pos += scnprintf(buf + pos, buflen - pos, " %u",
+			 sdelta->table[i * sdelta->temp_band_count + j]);
+		cpr3_debug(vreg, "sdelta[%d]:%s\n", i, buf);
+	}
+
+	kfree(buf);
+	return rc;
+}
+
+/**
+ * cpr4_parse_core_count_temp_voltage_adj() - parse configuration data for
+ *		per-online-core and per-temperature voltage adjustment for
+ *		a CPR3 regulator from device tree.
+ * @vreg:	Pointer to the CPR3 regulator
+ * @use_corner_band:	Boolean indicating if the CPR3 regulator supports
+ *			adjustments per corner band
+ *
+ * This function supports parsing of per-online-core and per-temperature
+ * adjustments per corner or per corner band. CPR controllers which support
+ * corner bands apply the same adjustments to all corners within a corner band.
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr4_parse_core_count_temp_voltage_adj(
+			struct cpr3_regulator *vreg, bool use_corner_band)
+{
+	struct cpr3_controller *ctrl = vreg->thread->ctrl;
+	struct device_node *node = vreg->of_node;
+	struct cpr3_corner *corner;
+	struct cpr4_sdelta *sdelta;
+	int i, sdelta_table_count, rc = 0;
+	int *allow_core_count_adj = NULL, *allow_temp_adj = NULL;
+	char prop_str[75];
+
+	if (of_find_property(node, use_corner_band ?
+			     "qcom,corner-band-allow-temp-adjustment"
+			     : "qcom,corner-allow-temp-adjustment", NULL)) {
+		if (!ctrl->allow_temp_adj) {
+			cpr3_err(ctrl, "Temperature adjustment configurations missing\n");
+			return -EINVAL;
+		}
+
+		vreg->allow_temp_adj = true;
+	}
+
+	if (of_find_property(node, use_corner_band ?
+			     "qcom,corner-band-allow-core-count-adjustment"
+			     : "qcom,corner-allow-core-count-adjustment",
+			     NULL)) {
+		rc = of_property_read_u32(node, "qcom,max-core-count",
+				&vreg->max_core_count);
+		if (rc) {
+			cpr3_err(vreg, "error reading qcom,max-core-count, rc=%d\n",
+				rc);
+			return -EINVAL;
+		}
+
+		vreg->allow_core_count_adj = true;
+		ctrl->allow_core_count_adj = true;
+	}
+
+	if (!vreg->allow_temp_adj && !vreg->allow_core_count_adj) {
+		/*
+		 * Both per-online-core and temperature based adjustments are
+		 * disabled for this regulator.
+		 */
+		return 0;
+	} else if (!vreg->allow_core_count_adj) {
+		/*
+		 * Only per-temperature voltage adjusments are allowed.
+		 * Keep max core count value as 1 to allocate SDELTA.
+		 */
+		vreg->max_core_count = 1;
+	}
+
+	if (vreg->allow_core_count_adj) {
+		allow_core_count_adj = kcalloc(vreg->corner_count,
+					sizeof(*allow_core_count_adj),
+					GFP_KERNEL);
+		if (!allow_core_count_adj)
+			return -ENOMEM;
+
+		snprintf(prop_str, sizeof(prop_str), use_corner_band ?
+			 "qcom,corner-band-allow-core-count-adjustment" :
+			 "qcom,corner-allow-core-count-adjustment");
+
+		rc = use_corner_band ?
+			cpr3_parse_corner_band_array_property(vreg, prop_str,
+					      1, allow_core_count_adj) :
+			cpr3_parse_corner_array_property(vreg, prop_str,
+						 1, allow_core_count_adj);
+		if (rc) {
+			cpr3_err(vreg, "error reading %s, rc=%d\n", prop_str,
+				 rc);
+			goto done;
+		}
+	}
+
+	if (vreg->allow_temp_adj) {
+		allow_temp_adj = kcalloc(vreg->corner_count,
+					sizeof(*allow_temp_adj), GFP_KERNEL);
+		if (!allow_temp_adj) {
+			rc = -ENOMEM;
+			goto done;
+		}
+
+		snprintf(prop_str, sizeof(prop_str), use_corner_band ?
+			 "qcom,corner-band-allow-temp-adjustment" :
+			 "qcom,corner-allow-temp-adjustment");
+
+		rc = use_corner_band ?
+			cpr3_parse_corner_band_array_property(vreg, prop_str,
+						      1, allow_temp_adj) :
+			cpr3_parse_corner_array_property(vreg, prop_str,
+						 1, allow_temp_adj);
+		if (rc) {
+			cpr3_err(vreg, "error reading %s, rc=%d\n", prop_str,
+				 rc);
+			goto done;
+		}
+	}
+
+	sdelta_table_count = use_corner_band ? vreg->corner_band_count :
+		vreg->corner_count;
+
+	for (i = 0; i < sdelta_table_count; i++) {
+		sdelta = devm_kzalloc(ctrl->dev, sizeof(*corner->sdelta),
+				      GFP_KERNEL);
+		if (!sdelta) {
+			rc = -ENOMEM;
+			goto done;
+		}
+
+		if (allow_core_count_adj)
+			sdelta->allow_core_count_adj = allow_core_count_adj[i];
+		if (allow_temp_adj)
+			sdelta->allow_temp_adj = allow_temp_adj[i];
+		sdelta->max_core_count = vreg->max_core_count;
+		sdelta->temp_band_count = ctrl->temp_band_count;
+
+		if (use_corner_band)
+			vreg->corner_band[i].sdelta = sdelta;
+		else
+			vreg->corner[i].sdelta = sdelta;
+
+		rc = cpr4_load_core_and_temp_adj(vreg, i, use_corner_band);
+		if (rc) {
+			cpr3_err(vreg, "corner/band %d core and temp adjustment loading failed, rc=%d\n",
+				 i, rc);
+			goto done;
+		}
+	}
+
+done:
+	kfree(allow_core_count_adj);
+	kfree(allow_temp_adj);
+
+	return rc;
+}
+
+/**
+ * cprh_adjust_voltages_for_apm() - adjust per-corner floor and ceiling voltages
+ *		so that they do not overlap the APM threshold voltage.
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * The memory array power mux (APM) must be configured for a specific supply
+ * based upon where the VDD voltage lies with respect to the APM threshold
+ * voltage.  When using CPR hardware closed-loop, the voltage may vary anywhere
+ * between the floor and ceiling voltage without software notification.
+ * Therefore, it is required that the floor to ceiling range for every corner
+ * not intersect the APM threshold voltage.  This function adjusts the floor to
+ * ceiling range for each corner which violates this requirement.
+ *
+ * The following algorithm is applied:
+ *	if floor < threshold <= ceiling:
+ *		if open_loop >= threshold, then floor = threshold - adj
+ *		else ceiling = threshold - step
+ * where:
+ *	adj = APM hysteresis voltage established to minimize the number of
+ *	      corners with artificially increased floor voltages
+ *	step = voltage in microvolts of a single step of the VDD supply
+ *
+ * The open-loop voltage is also bounded by the new floor or ceiling value as
+ * needed.
+ *
+ * Return: none
+ */
+void cprh_adjust_voltages_for_apm(struct cpr3_regulator *vreg)
+{
+	struct cpr3_controller *ctrl = vreg->thread->ctrl;
+	struct cpr3_corner *corner;
+	int i, adj, threshold, prev_ceiling, prev_floor, prev_open_loop;
+
+	if (!ctrl->apm_threshold_volt) {
+		/* APM not being used. */
+		return;
+	}
+
+	ctrl->apm_threshold_volt = CPR3_ROUND(ctrl->apm_threshold_volt,
+						ctrl->step_volt);
+	ctrl->apm_adj_volt = CPR3_ROUND(ctrl->apm_adj_volt, ctrl->step_volt);
+
+	threshold = ctrl->apm_threshold_volt;
+	adj = ctrl->apm_adj_volt;
+
+	for (i = 0; i < vreg->corner_count; i++) {
+		corner = &vreg->corner[i];
+
+		if (threshold <= corner->floor_volt
+		    || threshold > corner->ceiling_volt)
+			continue;
+
+		prev_floor = corner->floor_volt;
+		prev_ceiling = corner->ceiling_volt;
+		prev_open_loop = corner->open_loop_volt;
+
+		if (corner->open_loop_volt >= threshold) {
+			corner->floor_volt = max(corner->floor_volt,
+						 threshold - adj);
+			if (corner->open_loop_volt < corner->floor_volt)
+				corner->open_loop_volt = corner->floor_volt;
+		} else {
+			corner->ceiling_volt = threshold - ctrl->step_volt;
+		}
+
+		if (corner->floor_volt != prev_floor
+		    || corner->ceiling_volt != prev_ceiling
+		    || corner->open_loop_volt != prev_open_loop)
+			cpr3_debug(vreg, "APM threshold=%d, APM adj=%d changed corner %d voltages; prev: floor=%d, ceiling=%d, open-loop=%d; new: floor=%d, ceiling=%d, open-loop=%d\n",
+				threshold, adj, i, prev_floor, prev_ceiling,
+				prev_open_loop, corner->floor_volt,
+				corner->ceiling_volt, corner->open_loop_volt);
+	}
+}
+
+/**
+ * cprh_adjust_voltages_for_mem_acc() - adjust per-corner floor and ceiling
+ *		voltages so that they do not intersect the MEM ACC threshold
+ *		voltage
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * The following algorithm is applied:
+ *	if floor < threshold <= ceiling:
+ *		if open_loop >= threshold, then floor = threshold
+ *		else ceiling = threshold - step
+ * where:
+ *	step = voltage in microvolts of a single step of the VDD supply
+ *
+ * The open-loop voltage is also bounded by the new floor or ceiling value as
+ * needed.
+ *
+ * Return: none
+ */
+void cprh_adjust_voltages_for_mem_acc(struct cpr3_regulator *vreg)
+{
+	struct cpr3_controller *ctrl = vreg->thread->ctrl;
+	struct cpr3_corner *corner;
+	int i, threshold, prev_ceiling, prev_floor, prev_open_loop;
+
+	if (!ctrl->mem_acc_threshold_volt) {
+		/* MEM ACC not being used. */
+		return;
+	}
+
+	ctrl->mem_acc_threshold_volt = CPR3_ROUND(ctrl->mem_acc_threshold_volt,
+						ctrl->step_volt);
+
+	threshold = ctrl->mem_acc_threshold_volt;
+
+	for (i = 0; i < vreg->corner_count; i++) {
+		corner = &vreg->corner[i];
+
+		if (threshold <= corner->floor_volt
+		    || threshold > corner->ceiling_volt)
+			continue;
+
+		prev_floor = corner->floor_volt;
+		prev_ceiling = corner->ceiling_volt;
+		prev_open_loop = corner->open_loop_volt;
+
+		if (corner->open_loop_volt >= threshold) {
+			corner->floor_volt = max(corner->floor_volt, threshold);
+			if (corner->open_loop_volt < corner->floor_volt)
+				corner->open_loop_volt = corner->floor_volt;
+		} else {
+			corner->ceiling_volt = threshold - ctrl->step_volt;
+		}
+
+		if (corner->floor_volt != prev_floor
+		    || corner->ceiling_volt != prev_ceiling
+		    || corner->open_loop_volt != prev_open_loop)
+			cpr3_debug(vreg, "MEM ACC threshold=%d changed corner %d voltages; prev: floor=%d, ceiling=%d, open-loop=%d; new: floor=%d, ceiling=%d, open-loop=%d\n",
+				threshold, i, prev_floor, prev_ceiling,
+				prev_open_loop, corner->floor_volt,
+				corner->ceiling_volt, corner->open_loop_volt);
+	}
+}
+
+/**
+ * cpr3_apply_closed_loop_offset_voltages() - modify the closed-loop voltage
+ *		adjustments by the amounts that are needed for this
+ *		fuse combo
+ * @vreg:		Pointer to the CPR3 regulator
+ * @volt_adjust:	Array of closed-loop voltage adjustment values of length
+ *			vreg->corner_count which is further adjusted based upon
+ *			offset voltage fuse values.
+ * @fuse_volt_adjust:	Fused closed-loop voltage adjustment values of length
+ *			vreg->fuse_corner_count.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_apply_closed_loop_offset_voltages(struct cpr3_regulator *vreg,
+			int *volt_adjust, int *fuse_volt_adjust)
+{
+	u32 *corner_map;
+	int rc = 0, i;
+
+	if (!of_find_property(vreg->of_node,
+		"qcom,cpr-fused-closed-loop-voltage-adjustment-map", NULL)) {
+		/* No closed-loop offset required. */
+		return 0;
+	}
+
+	corner_map = kcalloc(vreg->corner_count, sizeof(*corner_map),
+				GFP_KERNEL);
+	if (!corner_map)
+		return -ENOMEM;
+
+	rc = cpr3_parse_corner_array_property(vreg,
+		"qcom,cpr-fused-closed-loop-voltage-adjustment-map",
+		1, corner_map);
+	if (rc)
+		goto done;
+
+	for (i = 0; i < vreg->corner_count; i++) {
+		if (corner_map[i] == 0) {
+			continue;
+		} else if (corner_map[i] > vreg->fuse_corner_count) {
+			cpr3_err(vreg, "corner %d mapped to invalid fuse corner: %u\n",
+				i, corner_map[i]);
+			rc = -EINVAL;
+			goto done;
+		}
+
+		volt_adjust[i] += fuse_volt_adjust[corner_map[i] - 1];
+	}
+
+done:
+	kfree(corner_map);
+	return rc;
+}
+
+/**
+ * cpr3_enforce_inc_quotient_monotonicity() - Ensure that target quotients
+ *		increase monotonically from lower to higher corners
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * Return: 0 on success, errno on failure
+ */
+static void cpr3_enforce_inc_quotient_monotonicity(struct cpr3_regulator *vreg)
+{
+	int i, j;
+
+	for (i = 1; i < vreg->corner_count; i++) {
+		for (j = 0; j < CPR3_RO_COUNT; j++) {
+			if (vreg->corner[i].target_quot[j]
+			    && vreg->corner[i].target_quot[j]
+					< vreg->corner[i - 1].target_quot[j]) {
+				cpr3_debug(vreg, "corner %d RO%u target quot=%u < corner %d RO%u target quot=%u; overriding: corner %d RO%u target quot=%u\n",
+					i, j,
+					vreg->corner[i].target_quot[j],
+					i - 1, j,
+					vreg->corner[i - 1].target_quot[j],
+					i, j,
+					vreg->corner[i - 1].target_quot[j]);
+				vreg->corner[i].target_quot[j]
+					= vreg->corner[i - 1].target_quot[j];
+			}
+		}
+	}
+}
+
+/**
+ * cpr3_enforce_dec_quotient_monotonicity() - Ensure that target quotients
+ *		decrease monotonically from higher to lower corners
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * Return: 0 on success, errno on failure
+ */
+static void cpr3_enforce_dec_quotient_monotonicity(struct cpr3_regulator *vreg)
+{
+	int i, j;
+
+	for (i = vreg->corner_count - 2; i >= 0; i--) {
+		for (j = 0; j < CPR3_RO_COUNT; j++) {
+			if (vreg->corner[i + 1].target_quot[j]
+			    && vreg->corner[i].target_quot[j]
+					> vreg->corner[i + 1].target_quot[j]) {
+				cpr3_debug(vreg, "corner %d RO%u target quot=%u > corner %d RO%u target quot=%u; overriding: corner %d RO%u target quot=%u\n",
+					i, j,
+					vreg->corner[i].target_quot[j],
+					i + 1, j,
+					vreg->corner[i + 1].target_quot[j],
+					i, j,
+					vreg->corner[i + 1].target_quot[j]);
+				vreg->corner[i].target_quot[j]
+					= vreg->corner[i + 1].target_quot[j];
+			}
+		}
+	}
+}
+
+/**
+ * _cpr3_adjust_target_quotients() - adjust the target quotients for each
+ *		corner of the regulator according to input adjustment and
+ *		scaling arrays
+ * @vreg:		Pointer to the CPR3 regulator
+ * @volt_adjust:	Pointer to an array of closed-loop voltage adjustments
+ *			with units of microvolts.  The array must have
+ *			vreg->corner_count number of elements.
+ * @ro_scale:		Pointer to a flattened 2D array of RO scaling factors.
+ *			The array must have an inner dimension of CPR3_RO_COUNT
+ *			and an outer dimension of vreg->corner_count
+ * @label:		Null terminated string providing a label for the type
+ *			of adjustment.
+ *
+ * Return: true if any corners received a positive voltage adjustment (> 0),
+ *	   else false
+ */
+static bool _cpr3_adjust_target_quotients(struct cpr3_regulator *vreg,
+		const int *volt_adjust, const int *ro_scale, const char *label)
+{
+	int i, j, quot_adjust;
+	bool is_increasing = false;
+	u32 prev_quot;
+
+	for (i = 0; i < vreg->corner_count; i++) {
+		for (j = 0; j < CPR3_RO_COUNT; j++) {
+			if (vreg->corner[i].target_quot[j]) {
+				quot_adjust = cpr3_quot_adjustment(
+					ro_scale[i * CPR3_RO_COUNT + j],
+					volt_adjust[i]);
+				if (quot_adjust) {
+					prev_quot = vreg->corner[i].
+							target_quot[j];
+					vreg->corner[i].target_quot[j]
+						+= quot_adjust;
+					cpr3_debug(vreg, "adjusted corner %d RO%d target quot %s: %u --> %u (%d uV)\n",
+						i, j, label, prev_quot,
+						vreg->corner[i].target_quot[j],
+						volt_adjust[i]);
+				}
+			}
+		}
+		if (volt_adjust[i] > 0)
+			is_increasing = true;
+	}
+
+	return is_increasing;
+}
+
+/**
+ * cpr3_adjust_target_quotients() - adjust the target quotients for each
+ *			corner according to device tree values and fuse values
+ * @vreg:		Pointer to the CPR3 regulator
+ * @fuse_volt_adjust:	Fused closed-loop voltage adjustment values of length
+ *			vreg->fuse_corner_count. This parameter could be null
+ *			pointer when no fused adjustments are needed.
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_adjust_target_quotients(struct cpr3_regulator *vreg,
+			int *fuse_volt_adjust)
+{
+	int i, rc;
+	int *volt_adjust, *ro_scale;
+	bool explicit_adjustment, fused_adjustment, is_increasing;
+
+	explicit_adjustment = of_find_property(vreg->of_node,
+		"qcom,cpr-closed-loop-voltage-adjustment", NULL);
+	fused_adjustment = of_find_property(vreg->of_node,
+		"qcom,cpr-fused-closed-loop-voltage-adjustment-map", NULL);
+
+	if (!explicit_adjustment && !fused_adjustment && !vreg->aging_allowed) {
+		/* No adjustment required. */
+		return 0;
+	} else if (!of_find_property(vreg->of_node,
+			"qcom,cpr-ro-scaling-factor", NULL)) {
+		cpr3_err(vreg, "qcom,cpr-ro-scaling-factor is required for closed-loop voltage adjustment, but is missing\n");
+		return -EINVAL;
+	}
+
+	volt_adjust = kcalloc(vreg->corner_count, sizeof(*volt_adjust),
+				GFP_KERNEL);
+	ro_scale = kcalloc(vreg->corner_count * CPR3_RO_COUNT,
+				sizeof(*ro_scale), GFP_KERNEL);
+	if (!volt_adjust || !ro_scale) {
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	rc = cpr3_parse_corner_array_property(vreg,
+			"qcom,cpr-ro-scaling-factor", CPR3_RO_COUNT, ro_scale);
+	if (rc) {
+		cpr3_err(vreg, "could not load RO scaling factors, rc=%d\n",
+			rc);
+		goto done;
+	}
+
+	for (i = 0; i < vreg->corner_count; i++)
+		memcpy(vreg->corner[i].ro_scale, &ro_scale[i * CPR3_RO_COUNT],
+			sizeof(*ro_scale) * CPR3_RO_COUNT);
+
+	if (explicit_adjustment) {
+		rc = cpr3_parse_corner_array_property(vreg,
+			"qcom,cpr-closed-loop-voltage-adjustment",
+			1, volt_adjust);
+		if (rc) {
+			cpr3_err(vreg, "could not load closed-loop voltage adjustments, rc=%d\n",
+				rc);
+			goto done;
+		}
+
+		_cpr3_adjust_target_quotients(vreg, volt_adjust, ro_scale,
+			"from DT");
+		cpr3_enforce_inc_quotient_monotonicity(vreg);
+	}
+
+	if (fused_adjustment && fuse_volt_adjust) {
+		memset(volt_adjust, 0,
+			sizeof(*volt_adjust) * vreg->corner_count);
+
+		rc = cpr3_apply_closed_loop_offset_voltages(vreg, volt_adjust,
+				fuse_volt_adjust);
+		if (rc) {
+			cpr3_err(vreg, "could not apply fused closed-loop voltage reductions, rc=%d\n",
+				rc);
+			goto done;
+		}
+
+		is_increasing = _cpr3_adjust_target_quotients(vreg, volt_adjust,
+					ro_scale, "from fuse");
+		if (is_increasing)
+			cpr3_enforce_inc_quotient_monotonicity(vreg);
+		else
+			cpr3_enforce_dec_quotient_monotonicity(vreg);
+	}
+
+done:
+	kfree(volt_adjust);
+	kfree(ro_scale);
+	return rc;
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/regulator/cprh-kbss-regulator.c	2019-10-29 09:26:24.645212984 +0100
@@ -0,0 +1,2456 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+#include "cpr3-regulator.h"
+
+#define MSM8998_KBSS_FUSE_CORNERS	4
+#define SDM660_KBSS_FUSE_CORNERS	5
+#define SDM630_POWER_KBSS_FUSE_CORNERS	3
+#define SDM630_PERF_KBSS_FUSE_CORNERS	5
+
+/**
+ * struct cprh_kbss_fuses - KBSS specific fuse data
+ * @ro_sel:		Ring oscillator select fuse parameter value for each
+ *			fuse corner
+ * @init_voltage:	Initial (i.e. open-loop) voltage fuse parameter value
+ *			for each fuse corner (raw, not converted to a voltage)
+ * @target_quot:	CPR target quotient fuse parameter value for each fuse
+ *			corner
+ * @quot_offset:	CPR target quotient offset fuse parameter value for each
+ *			fuse corner (raw, not unpacked) used for target quotient
+ *			interpolation
+ * @speed_bin:		Application processor speed bin fuse parameter value for
+ *			the given chip
+ * @cpr_fusing_rev:	CPR fusing revision fuse parameter value
+ * @force_highest_corner:	Flag indicating that all corners must operate
+ *			at the voltage of the highest corner.  This is
+ *			applicable to MSM8998 only.
+ * @aging_init_quot_diff:	Initial quotient difference between CPR aging
+ *			min and max sensors measured at time of manufacturing
+ *
+ * This struct holds the values for all of the fuses read from memory.
+ */
+struct cprh_kbss_fuses {
+	u64	*ro_sel;
+	u64	*init_voltage;
+	u64	*target_quot;
+	u64	*quot_offset;
+	u64	speed_bin;
+	u64	cpr_fusing_rev;
+	u64	force_highest_corner;
+	u64	aging_init_quot_diff;
+};
+
+/*
+ * Fuse combos 0 - 7 map to CPR fusing revision 0 - 7 with speed bin fuse = 0.
+ * Fuse combos 8 - 15 map to CPR fusing revision 0 - 7 with speed bin fuse = 1.
+ * Fuse combos 16 - 23 map to CPR fusing revision 0 - 7 with speed bin fuse = 2.
+ * Fuse combos 24 - 31 map to CPR fusing revision 0 - 7 with speed bin fuse = 3.
+ * Fuse combos 32 - 39 map to CPR fusing revision 0 - 7 with speed bin fuse = 4.
+ */
+#define CPRH_MSM8998_KBSS_FUSE_COMBO_COUNT	32
+#define CPRH_SDM660_KBSS_FUSE_COMBO_COUNT	40
+#define CPRH_SDM630_KBSS_FUSE_COMBO_COUNT	24
+
+/*
+ * Constants which define the name of each fuse corner.
+ */
+enum cprh_msm8998_kbss_fuse_corner {
+	CPRH_MSM8998_KBSS_FUSE_CORNER_LOWSVS		= 0,
+	CPRH_MSM8998_KBSS_FUSE_CORNER_SVS		= 1,
+	CPRH_MSM8998_KBSS_FUSE_CORNER_NOM		= 2,
+	CPRH_MSM8998_KBSS_FUSE_CORNER_TURBO_L1	= 3,
+};
+
+static const char * const cprh_msm8998_kbss_fuse_corner_name[] = {
+	[CPRH_MSM8998_KBSS_FUSE_CORNER_LOWSVS]	= "LowSVS",
+	[CPRH_MSM8998_KBSS_FUSE_CORNER_SVS]		= "SVS",
+	[CPRH_MSM8998_KBSS_FUSE_CORNER_NOM]		= "NOM",
+	[CPRH_MSM8998_KBSS_FUSE_CORNER_TURBO_L1]	= "TURBO_L1",
+};
+
+enum cprh_sdm660_power_kbss_fuse_corner {
+	CPRH_SDM660_POWER_KBSS_FUSE_CORNER_LOWSVS	= 0,
+	CPRH_SDM660_POWER_KBSS_FUSE_CORNER_SVS		= 1,
+	CPRH_SDM660_POWER_KBSS_FUSE_CORNER_SVSPLUS	= 2,
+	CPRH_SDM660_POWER_KBSS_FUSE_CORNER_NOM		= 3,
+	CPRH_SDM660_POWER_KBSS_FUSE_CORNER_TURBO_L1	= 4,
+};
+
+static const char * const cprh_sdm660_power_kbss_fuse_corner_name[] = {
+	[CPRH_SDM660_POWER_KBSS_FUSE_CORNER_LOWSVS]	= "LowSVS",
+	[CPRH_SDM660_POWER_KBSS_FUSE_CORNER_SVS]	= "SVS",
+	[CPRH_SDM660_POWER_KBSS_FUSE_CORNER_SVSPLUS]	= "SVSPLUS",
+	[CPRH_SDM660_POWER_KBSS_FUSE_CORNER_NOM]	= "NOM",
+	[CPRH_SDM660_POWER_KBSS_FUSE_CORNER_TURBO_L1]	= "TURBO_L1",
+};
+
+enum cprh_sdm660_perf_kbss_fuse_corner {
+	CPRH_SDM660_PERF_KBSS_FUSE_CORNER_SVS		= 0,
+	CPRH_SDM660_PERF_KBSS_FUSE_CORNER_SVSPLUS	= 1,
+	CPRH_SDM660_PERF_KBSS_FUSE_CORNER_NOM		= 2,
+	CPRH_SDM660_PERF_KBSS_FUSE_CORNER_TURBO		= 3,
+	CPRH_SDM660_PERF_KBSS_FUSE_CORNER_TURBO_L2	= 4,
+};
+
+static const char * const cprh_sdm660_perf_kbss_fuse_corner_name[] = {
+	[CPRH_SDM660_PERF_KBSS_FUSE_CORNER_SVS]		= "SVS",
+	[CPRH_SDM660_PERF_KBSS_FUSE_CORNER_SVSPLUS]	= "SVSPLUS",
+	[CPRH_SDM660_PERF_KBSS_FUSE_CORNER_NOM]		= "NOM",
+	[CPRH_SDM660_PERF_KBSS_FUSE_CORNER_TURBO]	= "TURBO",
+	[CPRH_SDM660_PERF_KBSS_FUSE_CORNER_TURBO_L2]	= "TURBO_L2",
+};
+
+enum cprh_sdm630_power_kbss_fuse_corner {
+	CPRH_SDM630_POWER_KBSS_FUSE_CORNER_LOWSVS	= 0,
+	CPRH_SDM630_POWER_KBSS_FUSE_CORNER_SVSPLUS	= 1,
+	CPRH_SDM630_POWER_KBSS_FUSE_CORNER_TURBO_L1	= 2,
+};
+
+static const char * const cprh_sdm630_power_kbss_fuse_corner_name[] = {
+	[CPRH_SDM630_POWER_KBSS_FUSE_CORNER_LOWSVS]	= "LowSVS",
+	[CPRH_SDM630_POWER_KBSS_FUSE_CORNER_SVSPLUS]	= "SVSPLUS",
+	[CPRH_SDM630_POWER_KBSS_FUSE_CORNER_TURBO_L1]	= "TURBO_L1",
+};
+
+enum cprh_sdm630_perf_kbss_fuse_corner {
+	CPRH_SDM630_PERF_KBSS_FUSE_CORNER_LOWSVS	= 0,
+	CPRH_SDM630_PERF_KBSS_FUSE_CORNER_SVSPLUS	= 1,
+	CPRH_SDM630_PERF_KBSS_FUSE_CORNER_NOM		= 2,
+	CPRH_SDM630_PERF_KBSS_FUSE_CORNER_TURBO		= 3,
+	CPRH_SDM630_PERF_KBSS_FUSE_CORNER_TURBO_L2	= 4,
+};
+
+static const char * const cprh_sdm630_perf_kbss_fuse_corner_name[] = {
+	[CPRH_SDM630_PERF_KBSS_FUSE_CORNER_LOWSVS]	= "LowSVS",
+	[CPRH_SDM630_PERF_KBSS_FUSE_CORNER_SVSPLUS]	= "SVSPLUS",
+	[CPRH_SDM630_PERF_KBSS_FUSE_CORNER_NOM]		= "NOM",
+	[CPRH_SDM630_PERF_KBSS_FUSE_CORNER_TURBO]	= "TURBO",
+	[CPRH_SDM630_PERF_KBSS_FUSE_CORNER_TURBO_L2]	= "TURBO_L2",
+};
+
+/* KBSS cluster IDs */
+#define CPRH_KBSS_POWER_CLUSTER_ID 0
+#define CPRH_KBSS_PERFORMANCE_CLUSTER_ID 1
+
+/* KBSS controller IDs */
+#define CPRH_KBSS_MIN_CONTROLLER_ID 0
+#define CPRH_KBSS_MAX_CONTROLLER_ID 1
+
+/*
+ * MSM8998 KBSS fuse parameter locations:
+ *
+ * Structs are organized with the following dimensions:
+ *	Outer:  0 or 1 for power or performance cluster
+ *	Middle: 0 to 3 for fuse corners from lowest to highest corner
+ *	Inner:  large enough to hold the longest set of parameter segments which
+ *		fully defines a fuse parameter, +1 (for NULL termination).
+ *		Each segment corresponds to a contiguous group of bits from a
+ *		single fuse row.  These segments are concatentated together in
+ *		order to form the full fuse parameter value.  The segments for
+ *		a given parameter may correspond to different fuse rows.
+ *
+ */
+static const struct cpr3_fuse_param
+msm8998_kbss_ro_sel_param[2][MSM8998_KBSS_FUSE_CORNERS][2] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		{{67, 12, 15}, {} },
+		{{67,  8, 11}, {} },
+		{{67,  4,  7}, {} },
+		{{67,  0,  3}, {} },
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		{{69, 26, 29}, {} },
+		{{69, 22, 25}, {} },
+		{{69, 18, 21}, {} },
+		{{69, 14, 17}, {} },
+	},
+};
+
+static const struct cpr3_fuse_param
+sdm660_kbss_ro_sel_param[2][SDM660_KBSS_FUSE_CORNERS][3] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		{{67, 12, 15}, {} },
+		{{67,  8, 11}, {} },
+		{{65, 56, 59}, {} },
+		{{67,  4,  7}, {} },
+		{{67,  0,  3}, {} },
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		{{68, 61, 63}, {69,  0,  0} },
+		{{69,  1,  4}, {} },
+		{{68, 57, 60}, {} },
+		{{68, 53, 56}, {} },
+		{{66, 14, 17}, {} },
+	},
+};
+
+static const struct cpr3_fuse_param
+sdm630_kbss_ro_sel_param[2][SDM630_PERF_KBSS_FUSE_CORNERS][3] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		{{67, 12, 15}, {} },
+		{{65, 56, 59}, {} },
+		{{67,  0,  3}, {} },
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		{{68, 61, 63}, {69,  0,  0} },
+		{{69,  1,  4}, {} },
+		{{68, 57, 60}, {} },
+		{{68, 53, 56}, {} },
+		{{66, 14, 17}, {} },
+	},
+};
+
+static const struct cpr3_fuse_param
+msm8998_kbss_init_voltage_param[2][MSM8998_KBSS_FUSE_CORNERS][2] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		{{67, 34, 39}, {} },
+		{{67, 28, 33}, {} },
+		{{67, 22, 27}, {} },
+		{{67, 16, 21}, {} },
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		{{69, 48, 53}, {} },
+		{{69, 42, 47}, {} },
+		{{69, 36, 41}, {} },
+		{{69, 30, 35}, {} },
+	},
+};
+
+static const struct cpr3_fuse_param
+sdm660_kbss_init_voltage_param[2][SDM660_KBSS_FUSE_CORNERS][2] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		{{67, 34, 39}, {} },
+		{{67, 28, 33}, {} },
+		{{71,  3,  8}, {} },
+		{{67, 22, 27}, {} },
+		{{67, 16, 21}, {} },
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		{{69, 17, 22}, {} },
+		{{69, 23, 28}, {} },
+		{{69, 11, 16}, {} },
+		{{69,  5, 10}, {} },
+		{{70, 42, 47}, {} },
+	},
+};
+
+static const struct cpr3_fuse_param
+sdm630_kbss_init_voltage_param[2][SDM630_PERF_KBSS_FUSE_CORNERS][2] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		{{67, 34, 39}, {} },
+		{{71,  3,  8}, {} },
+		{{67, 16, 21}, {} },
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		{{69, 17, 22}, {} },
+		{{69, 23, 28}, {} },
+		{{69, 11, 16}, {} },
+		{{69,  5, 10}, {} },
+		{{70, 42, 47}, {} },
+	},
+};
+
+static const struct cpr3_fuse_param
+msm8998_kbss_target_quot_param[2][MSM8998_KBSS_FUSE_CORNERS][3] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		{{68, 18, 29}, {} },
+		{{68,  6, 17}, {} },
+		{{67, 58, 63}, {68,  0,  5} },
+		{{67, 46, 57}, {} },
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		{{70, 32, 43}, {} },
+		{{70, 20, 31}, {} },
+		{{70,  8, 19}, {} },
+		{{69, 60, 63}, {70,  0,  7}, {} },
+	},
+};
+
+static const struct cpr3_fuse_param
+sdm660_kbss_target_quot_param[2][SDM660_KBSS_FUSE_CORNERS][3] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		{{68, 12, 23}, {} },
+		{{68,  0, 11}, {} },
+		{{71,  9, 20}, {} },
+		{{67, 52, 63}, {} },
+		{{67, 40, 51}, {} },
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		{{69, 53, 63}, {70,  0,  0}, {} },
+		{{70,  1, 12}, {} },
+		{{69, 41, 52}, {} },
+		{{69, 29, 40}, {} },
+		{{70, 48, 59}, {} },
+	},
+};
+
+static const struct cpr3_fuse_param
+sdm630_kbss_target_quot_param[2][SDM630_PERF_KBSS_FUSE_CORNERS][3] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		{{68, 12, 23}, {} },
+		{{71,  9, 20}, {} },
+		{{67, 40, 51}, {} },
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		{{69, 53, 63}, {70,  0,  0}, {} },
+		{{70,  1, 12}, {} },
+		{{69, 41, 52}, {} },
+		{{69, 29, 40}, {} },
+		{{70, 48, 59}, {} },
+	},
+};
+
+static const struct cpr3_fuse_param
+msm8998_kbss_quot_offset_param[2][MSM8998_KBSS_FUSE_CORNERS][3] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		{{} },
+		{{68, 63, 63}, {69, 0, 5}, {} },
+		{{68, 56, 62}, {} },
+		{{68, 49, 55}, {} },
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		{{} },
+		{{71, 13, 15}, {71, 21, 24}, {} },
+		{{71,  6, 12}, {} },
+		{{70, 63, 63}, {71,  0,  5}, {} },
+	},
+};
+
+static const struct cpr3_fuse_param
+sdm660_kbss_quot_offset_param[2][SDM660_KBSS_FUSE_CORNERS][3] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		{{} },
+		{{68, 38, 44}, {} },
+		{{71, 21, 27}, {} },
+		{{68, 31, 37}, {} },
+		{{68, 24, 30}, {} },
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		{{} },
+		{{70, 27, 33}, {} },
+		{{70, 20, 26}, {} },
+		{{70, 13, 19}, {} },
+		{{70, 60, 63}, {71,  0,  2}, {} },
+	},
+};
+
+static const struct cpr3_fuse_param
+sdm630_kbss_quot_offset_param[2][SDM630_PERF_KBSS_FUSE_CORNERS][3] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		{{} },
+		{{71, 21, 27}, {} },
+		{{68, 24, 30}, {} },
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		{{} },
+		{{70, 27, 33}, {} },
+		{{70, 20, 26}, {} },
+		{{70, 13, 19}, {} },
+		{{70, 60, 63}, {71,  0,  2}, {} },
+	},
+};
+
+static const struct cpr3_fuse_param msm8998_cpr_fusing_rev_param[] = {
+	{39, 51, 53},
+	{},
+};
+
+static const struct cpr3_fuse_param sdm660_cpr_fusing_rev_param[] = {
+	{71, 28, 30},
+	{},
+};
+
+static const struct cpr3_fuse_param sdm630_cpr_fusing_rev_param[] = {
+	{71, 28, 30},
+	{},
+};
+
+static const struct cpr3_fuse_param kbss_speed_bin_param[] = {
+	{38, 29, 31},
+	{},
+};
+
+static const struct cpr3_fuse_param
+msm8998_cpr_force_highest_corner_param[] = {
+	{100, 45, 45},
+	{},
+};
+
+static const struct cpr3_fuse_param
+msm8998_kbss_aging_init_quot_diff_param[2][2] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		{69, 6, 13},
+		{},
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		{71, 25, 32},
+		{},
+	},
+};
+
+static const struct cpr3_fuse_param
+sdm660_kbss_aging_init_quot_diff_param[2][2] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		{68, 45, 52},
+		{},
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		{70, 34, 41},
+		{},
+	},
+};
+
+static const struct cpr3_fuse_param
+sdm630_kbss_aging_init_quot_diff_param[2][2] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		{68, 45, 52},
+		{},
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		{70, 34, 41},
+		{},
+	},
+};
+
+/*
+ * Open loop voltage fuse reference voltages in microvolts for MSM8998 v1
+ */
+static const int
+msm8998_v1_kbss_fuse_ref_volt[MSM8998_KBSS_FUSE_CORNERS] = {
+	696000,
+	768000,
+	896000,
+	1112000,
+};
+
+/*
+ * Open loop voltage fuse reference voltages in microvolts for MSM8998 v2
+ */
+static const int
+msm8998_v2_kbss_fuse_ref_volt[2][MSM8998_KBSS_FUSE_CORNERS] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		688000,
+		756000,
+		828000,
+		1056000,
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		756000,
+		756000,
+		828000,
+		1056000,
+	},
+};
+
+/*
+ * Open loop voltage fuse reference voltages in microvolts for SDM660
+ */
+static const int
+sdm660_kbss_fuse_ref_volt[2][SDM660_KBSS_FUSE_CORNERS] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		644000,
+		724000,
+		788000,
+		868000,
+		1068000,
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		724000,
+		788000,
+		868000,
+		988000,
+		1068000,
+	},
+};
+
+/*
+ * Open loop voltage fuse reference voltages in microvolts for SDM630
+ */
+static const int
+sdm630_kbss_fuse_ref_volt[2][SDM630_PERF_KBSS_FUSE_CORNERS] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		644000,
+		788000,
+		1068000,
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		644000,
+		788000,
+		868000,
+		988000,
+		1068000,
+	},
+};
+
+static const int
+sdm630_perf_kbss_speed_bin_2_fuse_ref_volt[SDM630_PERF_KBSS_FUSE_CORNERS] = {
+	644000,
+	788000,
+	868000,
+	988000,
+	1140000,
+};
+
+#define CPRH_KBSS_FUSE_STEP_VOLT		10000
+#define CPRH_KBSS_VOLTAGE_FUSE_SIZE		6
+#define CPRH_KBSS_QUOT_OFFSET_SCALE		5
+#define CPRH_KBSS_AGING_INIT_QUOT_DIFF_SIZE	8
+#define CPRH_KBSS_AGING_INIT_QUOT_DIFF_SCALE	1
+
+#define CPRH_KBSS_CPR_CLOCK_RATE		19200000
+
+#define CPRH_KBSS_MAX_CORNER_BAND_COUNT		4
+#define CPRH_KBSS_MAX_CORNER_COUNT		40
+
+#define CPRH_KBSS_CPR_SDELTA_CORE_COUNT		4
+
+#define CPRH_KBSS_MAX_TEMP_POINTS		3
+
+/*
+ * msm8998 configuration
+ */
+#define MSM8998_KBSS_POWER_CPR_SENSOR_COUNT		6
+#define MSM8998_KBSS_PERFORMANCE_CPR_SENSOR_COUNT	9
+
+#define MSM8998_KBSS_POWER_TEMP_SENSOR_ID_START		1
+#define MSM8998_KBSS_POWER_TEMP_SENSOR_ID_END		5
+#define MSM8998_KBSS_PERFORMANCE_TEMP_SENSOR_ID_START	6
+#define MSM8998_KBSS_PERFORMANCE_TEMP_SENSOR_ID_END	10
+
+#define MSM8998_KBSS_POWER_AGING_SENSOR_ID		0
+#define MSM8998_KBSS_POWER_AGING_BYPASS_MASK0		0
+
+#define MSM8998_KBSS_PERFORMANCE_AGING_SENSOR_ID	0
+#define MSM8998_KBSS_PERFORMANCE_AGING_BYPASS_MASK0	0
+
+/*
+ * sdm660 configuration
+ */
+#define SDM660_KBSS_POWER_CPR_SENSOR_COUNT		6
+#define SDM660_KBSS_PERFORMANCE_CPR_SENSOR_COUNT	9
+
+#define SDM660_KBSS_POWER_TEMP_SENSOR_ID_START		10
+#define SDM660_KBSS_POWER_TEMP_SENSOR_ID_END		11
+#define SDM660_KBSS_PERFORMANCE_TEMP_SENSOR_ID_START	4
+#define SDM660_KBSS_PERFORMANCE_TEMP_SENSOR_ID_END	9
+
+#define SDM660_KBSS_POWER_AGING_SENSOR_ID		0
+#define SDM660_KBSS_POWER_AGING_BYPASS_MASK0		0
+
+#define SDM660_KBSS_PERFORMANCE_AGING_SENSOR_ID		0
+#define SDM660_KBSS_PERFORMANCE_AGING_BYPASS_MASK0	0
+
+/*
+ * sdm630 configuration
+ */
+#define SDM630_KBSS_POWER_CPR_SENSOR_COUNT		6
+#define SDM630_KBSS_PERFORMANCE_CPR_SENSOR_COUNT	6
+
+/*
+ * SOC IDs
+ */
+enum soc_id {
+	MSM8998_V1_SOC_ID = 1,
+	MSM8998_V2_SOC_ID = 2,
+	SDM660_SOC_ID     = 3,
+	SDM630_SOC_ID     = 4,
+};
+
+/**
+ * cprh_msm8998_kbss_read_fuse_data() - load msm8998 KBSS specific fuse
+ *		parameter values
+ * @vreg:		Pointer to the CPR3 regulator
+ * @fuse:		KBSS specific fuse data
+ *
+ * This function fills cprh_kbss_fuses struct with values read out of hardware
+ * fuses.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cprh_msm8998_kbss_read_fuse_data(struct cpr3_regulator *vreg,
+		struct cprh_kbss_fuses *fuse)
+{
+	void __iomem *base = vreg->thread->ctrl->fuse_base;
+	int i, id, rc;
+
+	rc = cpr3_read_fuse_param(base, msm8998_cpr_fusing_rev_param,
+				&fuse->cpr_fusing_rev);
+	if (rc) {
+		cpr3_err(vreg, "Unable to read CPR fusing revision fuse, rc=%d\n",
+			rc);
+		return rc;
+	}
+	cpr3_info(vreg, "CPR fusing revision = %llu\n", fuse->cpr_fusing_rev);
+
+	id = vreg->thread->ctrl->ctrl_id;
+	for (i = 0; i < MSM8998_KBSS_FUSE_CORNERS; i++) {
+		rc = cpr3_read_fuse_param(base,
+				msm8998_kbss_init_voltage_param[id][i],
+				&fuse->init_voltage[i]);
+		if (rc) {
+			cpr3_err(vreg, "Unable to read fuse-corner %d initial voltage fuse, rc=%d\n",
+				i, rc);
+			return rc;
+		}
+
+		rc = cpr3_read_fuse_param(base,
+				msm8998_kbss_target_quot_param[id][i],
+				&fuse->target_quot[i]);
+		if (rc) {
+			cpr3_err(vreg, "Unable to read fuse-corner %d target quotient fuse, rc=%d\n",
+				i, rc);
+			return rc;
+		}
+
+		rc = cpr3_read_fuse_param(base,
+				msm8998_kbss_ro_sel_param[id][i],
+				&fuse->ro_sel[i]);
+		if (rc) {
+			cpr3_err(vreg, "Unable to read fuse-corner %d RO select fuse, rc=%d\n",
+				i, rc);
+			return rc;
+		}
+
+		rc = cpr3_read_fuse_param(base,
+				msm8998_kbss_quot_offset_param[id][i],
+				&fuse->quot_offset[i]);
+		if (rc) {
+			cpr3_err(vreg, "Unable to read fuse-corner %d quotient offset fuse, rc=%d\n",
+				i, rc);
+			return rc;
+		}
+
+	}
+
+	rc = cpr3_read_fuse_param(base,
+				msm8998_kbss_aging_init_quot_diff_param[id],
+				&fuse->aging_init_quot_diff);
+	if (rc) {
+		cpr3_err(vreg, "Unable to read aging initial quotient difference fuse, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = cpr3_read_fuse_param(base,
+			  msm8998_cpr_force_highest_corner_param,
+			  &fuse->force_highest_corner);
+	if (rc) {
+		cpr3_err(vreg, "Unable to read CPR force highest corner fuse, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	if (fuse->force_highest_corner)
+		cpr3_info(vreg, "Fusing requires all operation at the highest corner\n");
+
+	vreg->fuse_combo = fuse->cpr_fusing_rev + 8 * fuse->speed_bin;
+	if (vreg->fuse_combo >= CPRH_MSM8998_KBSS_FUSE_COMBO_COUNT) {
+		cpr3_err(vreg, "invalid CPR fuse combo = %d found\n",
+			vreg->fuse_combo);
+		return -EINVAL;
+	}
+
+	return rc;
+};
+
+/**
+ * cprh_sdm660_kbss_read_fuse_data() - load SDM660 KBSS specific fuse parameter
+ *		values
+ * @vreg:		Pointer to the CPR3 regulator
+ * @fuse:		KBSS specific fuse data
+ *
+ * This function fills cprh_kbss_fuses struct with values read out of hardware
+ * fuses.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cprh_sdm660_kbss_read_fuse_data(struct cpr3_regulator *vreg,
+		struct cprh_kbss_fuses *fuse)
+{
+	void __iomem *base = vreg->thread->ctrl->fuse_base;
+	int i, id, rc;
+
+	rc = cpr3_read_fuse_param(base, sdm660_cpr_fusing_rev_param,
+				&fuse->cpr_fusing_rev);
+	if (rc) {
+		cpr3_err(vreg, "Unable to read CPR fusing revision fuse, rc=%d\n",
+			rc);
+		return rc;
+	}
+	cpr3_info(vreg, "CPR fusing revision = %llu\n", fuse->cpr_fusing_rev);
+
+	id = vreg->thread->ctrl->ctrl_id;
+	for (i = 0; i < SDM660_KBSS_FUSE_CORNERS; i++) {
+		rc = cpr3_read_fuse_param(base,
+				sdm660_kbss_init_voltage_param[id][i],
+				&fuse->init_voltage[i]);
+		if (rc) {
+			cpr3_err(vreg, "Unable to read fuse-corner %d initial voltage fuse, rc=%d\n",
+				i, rc);
+			return rc;
+		}
+
+		rc = cpr3_read_fuse_param(base,
+				sdm660_kbss_target_quot_param[id][i],
+				&fuse->target_quot[i]);
+		if (rc) {
+			cpr3_err(vreg, "Unable to read fuse-corner %d target quotient fuse, rc=%d\n",
+				i, rc);
+			return rc;
+		}
+
+		rc = cpr3_read_fuse_param(base,
+				sdm660_kbss_ro_sel_param[id][i],
+				&fuse->ro_sel[i]);
+		if (rc) {
+			cpr3_err(vreg, "Unable to read fuse-corner %d RO select fuse, rc=%d\n",
+				i, rc);
+			return rc;
+		}
+
+		rc = cpr3_read_fuse_param(base,
+				sdm660_kbss_quot_offset_param[id][i],
+				&fuse->quot_offset[i]);
+		if (rc) {
+			cpr3_err(vreg, "Unable to read fuse-corner %d quotient offset fuse, rc=%d\n",
+				i, rc);
+			return rc;
+		}
+	}
+
+	rc = cpr3_read_fuse_param(base,
+				sdm660_kbss_aging_init_quot_diff_param[id],
+				&fuse->aging_init_quot_diff);
+	if (rc) {
+		cpr3_err(vreg, "Unable to read aging initial quotient difference fuse, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	vreg->fuse_combo = fuse->cpr_fusing_rev + 8 * fuse->speed_bin;
+	if (vreg->fuse_combo >= CPRH_SDM660_KBSS_FUSE_COMBO_COUNT) {
+		cpr3_err(vreg, "invalid CPR fuse combo = %d found\n",
+			vreg->fuse_combo);
+		return -EINVAL;
+	}
+
+	return rc;
+};
+
+/**
+ * cprh_sdm630_kbss_read_fuse_data() - load SDM630 KBSS specific fuse parameter
+ *		values
+ * @vreg:		Pointer to the CPR3 regulator
+ * @fuse:		KBSS specific fuse data
+ *
+ * This function fills cprh_kbss_fuses struct with values read out of hardware
+ * fuses.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cprh_sdm630_kbss_read_fuse_data(struct cpr3_regulator *vreg,
+		struct cprh_kbss_fuses *fuse)
+{
+	void __iomem *base = vreg->thread->ctrl->fuse_base;
+	int i, id, rc, fuse_corners;
+
+	rc = cpr3_read_fuse_param(base, sdm630_cpr_fusing_rev_param,
+				&fuse->cpr_fusing_rev);
+	if (rc) {
+		cpr3_err(vreg, "Unable to read CPR fusing revision fuse, rc=%d\n",
+			rc);
+		return rc;
+	}
+	cpr3_info(vreg, "CPR fusing revision = %llu\n", fuse->cpr_fusing_rev);
+
+	id = vreg->thread->ctrl->ctrl_id;
+	if (id == CPRH_KBSS_POWER_CLUSTER_ID)
+		fuse_corners = SDM630_POWER_KBSS_FUSE_CORNERS;
+	else
+		fuse_corners = SDM630_PERF_KBSS_FUSE_CORNERS;
+
+	for (i = 0; i < fuse_corners; i++) {
+		rc = cpr3_read_fuse_param(base,
+				sdm630_kbss_init_voltage_param[id][i],
+				&fuse->init_voltage[i]);
+		if (rc) {
+			cpr3_err(vreg, "Unable to read fuse-corner %d initial voltage fuse, rc=%d\n",
+				i, rc);
+			return rc;
+		}
+
+		rc = cpr3_read_fuse_param(base,
+				sdm630_kbss_target_quot_param[id][i],
+				&fuse->target_quot[i]);
+		if (rc) {
+			cpr3_err(vreg, "Unable to read fuse-corner %d target quotient fuse, rc=%d\n",
+				i, rc);
+			return rc;
+		}
+
+		rc = cpr3_read_fuse_param(base,
+				sdm630_kbss_ro_sel_param[id][i],
+				&fuse->ro_sel[i]);
+		if (rc) {
+			cpr3_err(vreg, "Unable to read fuse-corner %d RO select fuse, rc=%d\n",
+				i, rc);
+			return rc;
+		}
+
+		rc = cpr3_read_fuse_param(base,
+				sdm630_kbss_quot_offset_param[id][i],
+				&fuse->quot_offset[i]);
+		if (rc) {
+			cpr3_err(vreg, "Unable to read fuse-corner %d quotient offset fuse, rc=%d\n",
+				i, rc);
+			return rc;
+		}
+	}
+
+	rc = cpr3_read_fuse_param(base,
+				sdm630_kbss_aging_init_quot_diff_param[id],
+				&fuse->aging_init_quot_diff);
+	if (rc) {
+		cpr3_err(vreg, "Unable to read aging initial quotient difference fuse, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	vreg->fuse_combo = fuse->cpr_fusing_rev + 8 * fuse->speed_bin;
+	if (vreg->fuse_combo >= CPRH_SDM630_KBSS_FUSE_COMBO_COUNT) {
+		cpr3_err(vreg, "invalid CPR fuse combo = %d found\n",
+			vreg->fuse_combo);
+		return -EINVAL;
+	}
+
+	return rc;
+};
+
+/**
+ * cprh_kbss_read_fuse_data() - load KBSS specific fuse parameter values
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * This function allocates a cprh_kbss_fuses struct, fills it with values
+ * read out of hardware fuses, and finally copies common fuse values
+ * into the CPR3 regulator struct.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cprh_kbss_read_fuse_data(struct cpr3_regulator *vreg)
+{
+	void __iomem *base = vreg->thread->ctrl->fuse_base;
+	struct cprh_kbss_fuses *fuse;
+	int rc, fuse_corners;
+	enum soc_id soc_revision;
+
+	fuse = devm_kzalloc(vreg->thread->ctrl->dev, sizeof(*fuse), GFP_KERNEL);
+	if (!fuse)
+		return -ENOMEM;
+
+	soc_revision = vreg->thread->ctrl->soc_revision;
+	switch (soc_revision) {
+	case SDM660_SOC_ID:
+		fuse_corners = SDM660_KBSS_FUSE_CORNERS;
+		break;
+	case SDM630_SOC_ID:
+		if (vreg->thread->ctrl->ctrl_id == CPRH_KBSS_POWER_CLUSTER_ID)
+			fuse_corners = SDM630_POWER_KBSS_FUSE_CORNERS;
+		else
+			fuse_corners = SDM630_PERF_KBSS_FUSE_CORNERS;
+		break;
+	case MSM8998_V1_SOC_ID:
+	case MSM8998_V2_SOC_ID:
+		fuse_corners = MSM8998_KBSS_FUSE_CORNERS;
+		break;
+	default:
+		cpr3_err(vreg, "unsupported soc id = %d\n", soc_revision);
+		return -EINVAL;
+	}
+
+	fuse->ro_sel = devm_kcalloc(vreg->thread->ctrl->dev, fuse_corners,
+			sizeof(*fuse->ro_sel), GFP_KERNEL);
+	fuse->init_voltage = devm_kcalloc(vreg->thread->ctrl->dev, fuse_corners,
+			sizeof(*fuse->init_voltage), GFP_KERNEL);
+	fuse->target_quot = devm_kcalloc(vreg->thread->ctrl->dev, fuse_corners,
+			sizeof(*fuse->target_quot), GFP_KERNEL);
+	fuse->quot_offset = devm_kcalloc(vreg->thread->ctrl->dev, fuse_corners,
+			sizeof(*fuse->quot_offset), GFP_KERNEL);
+
+	if (!fuse->ro_sel || !fuse->init_voltage || !fuse->target_quot
+			|| !fuse->quot_offset)
+		return -ENOMEM;
+
+	rc = cpr3_read_fuse_param(base, kbss_speed_bin_param, &fuse->speed_bin);
+	if (rc) {
+		cpr3_err(vreg, "Unable to read speed bin fuse, rc=%d\n", rc);
+		return rc;
+	}
+	cpr3_info(vreg, "speed bin = %llu\n", fuse->speed_bin);
+
+	switch (soc_revision) {
+	case SDM660_SOC_ID:
+		rc = cprh_sdm660_kbss_read_fuse_data(vreg, fuse);
+		if (rc) {
+			cpr3_err(vreg, "sdm660 kbss fuse data read failed, rc=%d\n",
+				rc);
+			return rc;
+		}
+		break;
+	case SDM630_SOC_ID:
+		rc = cprh_sdm630_kbss_read_fuse_data(vreg, fuse);
+		if (rc) {
+			cpr3_err(vreg, "sdm630 kbss fuse data read failed, rc=%d\n",
+				rc);
+			return rc;
+		}
+		break;
+	case MSM8998_V1_SOC_ID:
+	case MSM8998_V2_SOC_ID:
+		rc = cprh_msm8998_kbss_read_fuse_data(vreg, fuse);
+		if (rc) {
+			cpr3_err(vreg, "msm8998 kbss fuse data read failed, rc=%d\n",
+				rc);
+			return rc;
+		}
+		break;
+	default:
+		cpr3_err(vreg, "unsupported soc id = %d\n", soc_revision);
+		return -EINVAL;
+	}
+
+	vreg->speed_bin_fuse	= fuse->speed_bin;
+	vreg->cpr_rev_fuse	= fuse->cpr_fusing_rev;
+	vreg->fuse_corner_count	= fuse_corners;
+	vreg->platform_fuses	= fuse;
+
+	return 0;
+}
+
+/**
+ * cprh_kbss_parse_corner_data() - parse KBSS corner data from device tree
+ *		properties of the CPR3 regulator's device node
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cprh_kbss_parse_corner_data(struct cpr3_regulator *vreg)
+{
+	int rc;
+
+	rc = cpr3_parse_common_corner_data(vreg);
+	if (rc) {
+		cpr3_err(vreg, "error reading corner data, rc=%d\n", rc);
+		return rc;
+	}
+
+	/*
+	 * A total of CPRH_KBSS_MAX_CORNER_COUNT - 1 corners
+	 * may be specified in device tree as an additional corner
+	 * must be allocated to correspond to the APM crossover voltage.
+	 */
+	if (vreg->corner_count > CPRH_KBSS_MAX_CORNER_COUNT - 1) {
+		cpr3_err(vreg, "corner count %d exceeds supported maximum %d\n",
+		 vreg->corner_count, CPRH_KBSS_MAX_CORNER_COUNT - 1);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+/**
+ * cprh_kbss_calculate_open_loop_voltages() - calculate the open-loop
+ *		voltage for each corner of a CPR3 regulator
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * If open-loop voltage interpolation is allowed in device tree, then this
+ * function calculates the open-loop voltage for a given corner using linear
+ * interpolation.  This interpolation is performed using the processor
+ * frequencies of the lower and higher Fmax corners along with their fused
+ * open-loop voltages.
+ *
+ * If open-loop voltage interpolation is not allowed, then this function uses
+ * the Fmax fused open-loop voltage for all of the corners associated with a
+ * given fuse corner.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cprh_kbss_calculate_open_loop_voltages(struct cpr3_regulator *vreg)
+{
+	struct device_node *node = vreg->of_node;
+	struct cprh_kbss_fuses *fuse = vreg->platform_fuses;
+	int i, j, id, rc = 0;
+	bool allow_interpolation;
+	u64 freq_low, volt_low, freq_high, volt_high;
+	const int *ref_volt;
+	int *fuse_volt;
+	int *fmax_corner;
+	const char * const *corner_name;
+	enum soc_id soc_revision;
+
+	fuse_volt = kcalloc(vreg->fuse_corner_count, sizeof(*fuse_volt),
+				GFP_KERNEL);
+	fmax_corner = kcalloc(vreg->fuse_corner_count, sizeof(*fmax_corner),
+				GFP_KERNEL);
+	if (!fuse_volt || !fmax_corner) {
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	id = vreg->thread->ctrl->ctrl_id;
+	soc_revision = vreg->thread->ctrl->soc_revision;
+
+	switch (soc_revision) {
+	case SDM660_SOC_ID:
+		ref_volt = sdm660_kbss_fuse_ref_volt[id];
+		if (id == CPRH_KBSS_POWER_CLUSTER_ID)
+			corner_name = cprh_sdm660_power_kbss_fuse_corner_name;
+		else
+			corner_name = cprh_sdm660_perf_kbss_fuse_corner_name;
+		break;
+	case SDM630_SOC_ID:
+		ref_volt = sdm630_kbss_fuse_ref_volt[id];
+		if (id == CPRH_KBSS_PERFORMANCE_CLUSTER_ID
+			&& vreg->speed_bin_fuse == 2)
+			ref_volt = sdm630_perf_kbss_speed_bin_2_fuse_ref_volt;
+
+		if (id == CPRH_KBSS_POWER_CLUSTER_ID)
+			corner_name = cprh_sdm630_power_kbss_fuse_corner_name;
+		else
+			corner_name = cprh_sdm630_perf_kbss_fuse_corner_name;
+		break;
+	case MSM8998_V1_SOC_ID:
+		ref_volt = msm8998_v1_kbss_fuse_ref_volt;
+		corner_name = cprh_msm8998_kbss_fuse_corner_name;
+		break;
+	case MSM8998_V2_SOC_ID:
+		ref_volt = msm8998_v2_kbss_fuse_ref_volt[id];
+		corner_name = cprh_msm8998_kbss_fuse_corner_name;
+		break;
+	default:
+		cpr3_err(vreg, "unsupported soc id = %d\n", soc_revision);
+		rc = -EINVAL;
+		goto done;
+	}
+
+	for (i = 0; i < vreg->fuse_corner_count; i++) {
+		fuse_volt[i] = cpr3_convert_open_loop_voltage_fuse(ref_volt[i],
+			CPRH_KBSS_FUSE_STEP_VOLT, fuse->init_voltage[i],
+			CPRH_KBSS_VOLTAGE_FUSE_SIZE);
+
+		/* SDM660 speed bin #3 does not support TURBO_L1/L2 */
+		if (soc_revision == SDM660_SOC_ID && vreg->speed_bin_fuse == 3
+		    && (id == CPRH_KBSS_PERFORMANCE_CLUSTER_ID)
+		    && (i == CPRH_SDM660_PERF_KBSS_FUSE_CORNER_TURBO_L2))
+			continue;
+
+		/* Log fused open-loop voltage values for debugging purposes. */
+		cpr3_info(vreg, "fused %8s: open-loop=%7d uV\n", corner_name[i],
+			  fuse_volt[i]);
+	}
+
+	rc = cpr3_adjust_fused_open_loop_voltages(vreg, fuse_volt);
+	if (rc) {
+		cpr3_err(vreg, "fused open-loop voltage adjustment failed, rc=%d\n",
+			rc);
+		goto done;
+	}
+
+	allow_interpolation = of_property_read_bool(node,
+				"qcom,allow-voltage-interpolation");
+
+	for (i = 1; i < vreg->fuse_corner_count; i++) {
+		if (fuse_volt[i] < fuse_volt[i - 1]) {
+			cpr3_info(vreg, "fuse corner %d voltage=%d uV < fuse corner %d voltage=%d uV; overriding: fuse corner %d voltage=%d\n",
+				i, fuse_volt[i], i - 1, fuse_volt[i - 1],
+				i, fuse_volt[i - 1]);
+			fuse_volt[i] = fuse_volt[i - 1];
+		}
+	}
+
+	if (!allow_interpolation) {
+		/* Use fused open-loop voltage for lower frequencies. */
+		for (i = 0; i < vreg->corner_count; i++)
+			vreg->corner[i].open_loop_volt
+				= fuse_volt[vreg->corner[i].cpr_fuse_corner];
+		goto done;
+	}
+
+	/* Determine highest corner mapped to each fuse corner */
+	j = vreg->fuse_corner_count - 1;
+	for (i = vreg->corner_count - 1; i >= 0; i--) {
+		if (vreg->corner[i].cpr_fuse_corner == j) {
+			fmax_corner[j] = i;
+			j--;
+		}
+	}
+	if (j >= 0) {
+		cpr3_err(vreg, "invalid fuse corner mapping\n");
+		rc = -EINVAL;
+		goto done;
+	}
+
+	/*
+	 * Interpolation is not possible for corners mapped to the lowest fuse
+	 * corner so use the fuse corner value directly.
+	 */
+	for (i = 0; i <= fmax_corner[0]; i++)
+		vreg->corner[i].open_loop_volt = fuse_volt[0];
+
+	/* Interpolate voltages for the higher fuse corners. */
+	for (i = 1; i < vreg->fuse_corner_count; i++) {
+		freq_low = vreg->corner[fmax_corner[i - 1]].proc_freq;
+		volt_low = fuse_volt[i - 1];
+		freq_high = vreg->corner[fmax_corner[i]].proc_freq;
+		volt_high = fuse_volt[i];
+
+		for (j = fmax_corner[i - 1] + 1; j <= fmax_corner[i]; j++)
+			vreg->corner[j].open_loop_volt = cpr3_interpolate(
+				freq_low, volt_low, freq_high, volt_high,
+				vreg->corner[j].proc_freq);
+	}
+
+done:
+	if (rc == 0) {
+		cpr3_debug(vreg, "unadjusted per-corner open-loop voltages:\n");
+		for (i = 0; i < vreg->corner_count; i++)
+			cpr3_debug(vreg, "open-loop[%2d] = %d uV\n", i,
+				vreg->corner[i].open_loop_volt);
+
+		rc = cpr3_adjust_open_loop_voltages(vreg);
+		if (rc)
+			cpr3_err(vreg, "open-loop voltage adjustment failed, rc=%d\n",
+				rc);
+	}
+
+	kfree(fuse_volt);
+	kfree(fmax_corner);
+	return rc;
+}
+
+/**
+ * cprh_msm8998_partial_binning_override() - override the voltage and quotient
+ *		settings for low corners based upon special partial binning
+ *		fuse values
+ *
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * Some parts are not able to operate at low voltages.  The force highest
+ * corner fuse specifies if a given part must operate with voltages
+ * corresponding to the highest corner.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cprh_msm8998_partial_binning_override(struct cpr3_regulator *vreg)
+{
+	struct cprh_kbss_fuses *fuse = vreg->platform_fuses;
+	struct cpr3_corner *corner;
+	struct cpr4_sdelta *sdelta;
+	int i;
+	u32 proc_freq;
+
+	if (fuse->force_highest_corner) {
+		cpr3_info(vreg, "overriding CPR parameters for corners 0 to %d with quotients and voltages of corner %d\n",
+			  vreg->corner_count - 2, vreg->corner_count - 1);
+		corner = &vreg->corner[vreg->corner_count - 1];
+		for (i = 0; i < vreg->corner_count - 1; i++) {
+			proc_freq = vreg->corner[i].proc_freq;
+			sdelta = vreg->corner[i].sdelta;
+			if (sdelta) {
+				if (sdelta->table)
+					devm_kfree(vreg->thread->ctrl->dev,
+						   sdelta->table);
+				if (sdelta->boost_table)
+					devm_kfree(vreg->thread->ctrl->dev,
+						   sdelta->boost_table);
+				devm_kfree(vreg->thread->ctrl->dev,
+					   sdelta);
+			}
+			vreg->corner[i] = *corner;
+			vreg->corner[i].proc_freq = proc_freq;
+		}
+
+		return 0;
+	}
+
+	return 0;
+};
+
+/**
+ * cprh_kbss_parse_core_count_temp_adj_properties() - load device tree
+ *		properties associated with per-corner-band and temperature
+ *		voltage adjustments.
+ * @vreg:	Pointer to the CPR3 regulator
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cprh_kbss_parse_core_count_temp_adj_properties(
+		struct cpr3_regulator *vreg)
+{
+	struct cpr3_controller *ctrl = vreg->thread->ctrl;
+	struct device_node *node = vreg->of_node;
+	u32 *temp, *combo_corner_bands, *speed_bin_corner_bands;
+	int rc, i, len, temp_point_count;
+
+	vreg->allow_core_count_adj = of_find_property(node,
+					"qcom,corner-band-allow-core-count-adjustment",
+					NULL);
+	vreg->allow_temp_adj = of_find_property(node,
+					"qcom,corner-band-allow-temp-adjustment",
+					NULL);
+
+	if (!vreg->allow_core_count_adj && !vreg->allow_temp_adj)
+		return 0;
+
+	combo_corner_bands = kcalloc(vreg->fuse_combos_supported,
+				     sizeof(*combo_corner_bands),
+				     GFP_KERNEL);
+	if (!combo_corner_bands)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(node, "qcom,cpr-corner-bands",
+					combo_corner_bands,
+					vreg->fuse_combos_supported);
+	if (rc == -EOVERFLOW) {
+		/* Single value case */
+		rc = of_property_read_u32(node, "qcom,cpr-corner-bands",
+					  combo_corner_bands);
+		for (i = 1; i < vreg->fuse_combos_supported; i++)
+			combo_corner_bands[i] = combo_corner_bands[0];
+	}
+	if (rc) {
+		cpr3_err(vreg, "error reading property qcom,cpr-corner-bands, rc=%d\n",
+			rc);
+		kfree(combo_corner_bands);
+		return rc;
+	}
+
+	vreg->fuse_combo_corner_band_offset = 0;
+	vreg->fuse_combo_corner_band_sum = 0;
+	for (i = 0; i < vreg->fuse_combos_supported; i++) {
+		vreg->fuse_combo_corner_band_sum += combo_corner_bands[i];
+		if (i < vreg->fuse_combo)
+			vreg->fuse_combo_corner_band_offset +=
+				combo_corner_bands[i];
+	}
+
+	vreg->corner_band_count = combo_corner_bands[vreg->fuse_combo];
+
+	kfree(combo_corner_bands);
+
+	if (vreg->corner_band_count <= 0 ||
+	    vreg->corner_band_count > CPRH_KBSS_MAX_CORNER_BAND_COUNT ||
+	    vreg->corner_band_count > vreg->corner_count) {
+		cpr3_err(vreg, "invalid corner band count %d > %d (max) for %d corners\n",
+			 vreg->corner_band_count,
+			 CPRH_KBSS_MAX_CORNER_BAND_COUNT,
+			 vreg->corner_count);
+		return -EINVAL;
+	}
+
+	vreg->speed_bin_corner_band_offset = 0;
+	vreg->speed_bin_corner_band_sum = 0;
+	if (vreg->speed_bins_supported > 0) {
+		speed_bin_corner_bands = kcalloc(vreg->speed_bins_supported,
+					sizeof(*speed_bin_corner_bands),
+					GFP_KERNEL);
+		if (!speed_bin_corner_bands)
+			return -ENOMEM;
+
+		rc = of_property_read_u32_array(node,
+						"qcom,cpr-speed-bin-corner-bands",
+						speed_bin_corner_bands,
+						vreg->speed_bins_supported);
+		if (rc) {
+			cpr3_err(vreg, "error reading property qcom,cpr-speed-bin-corner-bands, rc=%d\n",
+				 rc);
+			kfree(speed_bin_corner_bands);
+			return rc;
+		}
+
+		for (i = 0; i < vreg->speed_bins_supported; i++) {
+			vreg->speed_bin_corner_band_sum +=
+				speed_bin_corner_bands[i];
+			if (i < vreg->speed_bin_fuse)
+				vreg->speed_bin_corner_band_offset +=
+					speed_bin_corner_bands[i];
+		}
+
+		if (speed_bin_corner_bands[vreg->speed_bin_fuse]
+		    != vreg->corner_band_count) {
+			cpr3_err(vreg, "qcom,cpr-corner-bands and qcom,cpr-speed-bin-corner-bands conflict on number of corners bands: %d vs %u\n",
+				vreg->corner_band_count,
+				speed_bin_corner_bands[vreg->speed_bin_fuse]);
+			kfree(speed_bin_corner_bands);
+			return -EINVAL;
+		}
+
+		kfree(speed_bin_corner_bands);
+	}
+
+	vreg->corner_band = devm_kcalloc(ctrl->dev,
+					 vreg->corner_band_count,
+					 sizeof(*vreg->corner_band),
+					 GFP_KERNEL);
+
+	temp = kcalloc(vreg->corner_band_count, sizeof(*temp), GFP_KERNEL);
+
+	if (!vreg->corner_band || !temp) {
+		rc = -ENOMEM;
+		goto free_temp;
+	}
+
+	rc = cpr3_parse_corner_band_array_property(vreg,
+						   "qcom,cpr-corner-band-map",
+						   1, temp);
+	if (rc) {
+		cpr3_err(vreg, "could not load corner band map, rc=%d\n",
+			 rc);
+		goto free_temp;
+	}
+
+	for (i = 1; i < vreg->corner_band_count; i++) {
+		if (temp[i - 1] > temp[i]) {
+			cpr3_err(vreg, "invalid corner band mapping: band %d corner %d, band %d corner %d\n",
+				 i - 1, temp[i - 1],
+				 i, temp[i]);
+			rc = -EINVAL;
+			goto free_temp;
+		}
+	}
+
+	for (i = 0; i < vreg->corner_band_count; i++)
+		vreg->corner_band[i].corner = temp[i] - CPR3_CORNER_OFFSET;
+
+	if (!of_find_property(ctrl->dev->of_node,
+			      "qcom,cpr-temp-point-map", &len)) {
+		/*
+		 * Temperature based adjustments are not defined. Single
+		 * temperature band is still valid for per-online-core
+		 * adjustments.
+		 */
+		ctrl->temp_band_count = 1;
+		rc = 0;
+		goto free_temp;
+	}
+
+	if (!vreg->allow_temp_adj) {
+		rc = 0;
+		goto free_temp;
+	}
+
+	temp_point_count = len / sizeof(u32);
+	if (temp_point_count <= 0 || temp_point_count >
+	    CPRH_KBSS_MAX_TEMP_POINTS) {
+		cpr3_err(ctrl, "invalid number of temperature points %d > %d (max)\n",
+			 temp_point_count, CPRH_KBSS_MAX_TEMP_POINTS);
+		rc = -EINVAL;
+		goto free_temp;
+	}
+
+	ctrl->temp_points = devm_kcalloc(ctrl->dev, temp_point_count,
+					sizeof(*ctrl->temp_points), GFP_KERNEL);
+	if (!ctrl->temp_points) {
+		rc = -ENOMEM;
+		goto free_temp;
+	}
+	rc = of_property_read_u32_array(ctrl->dev->of_node,
+					"qcom,cpr-temp-point-map",
+					ctrl->temp_points, temp_point_count);
+	if (rc) {
+		cpr3_err(ctrl, "error reading property qcom,cpr-temp-point-map, rc=%d\n",
+			 rc);
+		goto free_temp;
+	}
+
+	for (i = 0; i < temp_point_count; i++)
+		cpr3_debug(ctrl, "Temperature Point %d=%d\n", i,
+				   ctrl->temp_points[i]);
+
+	/*
+	 * If t1, t2, and t3 are the temperature points, then the temperature
+	 * bands are: (-inf, t1], (t1, t2], (t2, t3], and (t3, inf).
+	 */
+	ctrl->temp_band_count = temp_point_count + 1;
+	cpr3_debug(ctrl, "Number of temp bands=%d\n",
+		   ctrl->temp_band_count);
+
+	rc = of_property_read_u32(ctrl->dev->of_node,
+				  "qcom,cpr-initial-temp-band",
+				  &ctrl->initial_temp_band);
+	if (rc) {
+		cpr3_err(ctrl, "error reading qcom,cpr-initial-temp-band, rc=%d\n",
+			rc);
+		goto free_temp;
+	}
+
+	if (ctrl->initial_temp_band >= ctrl->temp_band_count) {
+		cpr3_err(ctrl, "Initial temperature band value %d should be in range [0 - %d]\n",
+			ctrl->initial_temp_band, ctrl->temp_band_count - 1);
+		rc = -EINVAL;
+		goto free_temp;
+	}
+
+	switch (ctrl->soc_revision) {
+	case SDM660_SOC_ID:
+		ctrl->temp_sensor_id_start = ctrl->ctrl_id ==
+			CPRH_KBSS_POWER_CLUSTER_ID
+			? SDM660_KBSS_POWER_TEMP_SENSOR_ID_START :
+			SDM660_KBSS_PERFORMANCE_TEMP_SENSOR_ID_START;
+		ctrl->temp_sensor_id_end = ctrl->ctrl_id ==
+			CPRH_KBSS_POWER_CLUSTER_ID
+			? SDM660_KBSS_POWER_TEMP_SENSOR_ID_END :
+			SDM660_KBSS_PERFORMANCE_TEMP_SENSOR_ID_END;
+		break;
+	case MSM8998_V1_SOC_ID:
+	case MSM8998_V2_SOC_ID:
+		ctrl->temp_sensor_id_start = ctrl->ctrl_id ==
+			CPRH_KBSS_POWER_CLUSTER_ID
+			? MSM8998_KBSS_POWER_TEMP_SENSOR_ID_START :
+			MSM8998_KBSS_PERFORMANCE_TEMP_SENSOR_ID_START;
+		ctrl->temp_sensor_id_end = ctrl->ctrl_id ==
+			CPRH_KBSS_POWER_CLUSTER_ID
+			? MSM8998_KBSS_POWER_TEMP_SENSOR_ID_END :
+			MSM8998_KBSS_PERFORMANCE_TEMP_SENSOR_ID_END;
+		break;
+	default:
+		cpr3_err(ctrl, "unsupported soc id = %d\n", ctrl->soc_revision);
+		rc = -EINVAL;
+		goto free_temp;
+	}
+	ctrl->allow_temp_adj = true;
+
+free_temp:
+	kfree(temp);
+
+	return rc;
+}
+
+/**
+ * cprh_kbss_apm_crossover_as_corner() - introduce a corner whose floor,
+ *		open-loop, and ceiling voltages correspond to the APM
+ *		crossover voltage.
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * The APM corner is utilized as a crossover corner by OSM and CPRh
+ * hardware to set the VDD supply voltage during the APM switch
+ * routine.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cprh_kbss_apm_crossover_as_corner(struct cpr3_regulator *vreg)
+{
+	struct cpr3_controller *ctrl = vreg->thread->ctrl;
+	struct cpr3_corner *corner;
+
+	if (!ctrl->apm_crossover_volt) {
+		/* APM voltage crossover corner not required. */
+		return 0;
+	}
+
+	corner = &vreg->corner[vreg->corner_count];
+	/*
+	 * 0 MHz indicates this corner is not to be
+	 * used as active DCVS set point.
+	 */
+	corner->proc_freq = 0;
+	corner->floor_volt = ctrl->apm_crossover_volt;
+	corner->ceiling_volt = ctrl->apm_crossover_volt;
+	corner->open_loop_volt = ctrl->apm_crossover_volt;
+	corner->abs_ceiling_volt = ctrl->apm_crossover_volt;
+	corner->use_open_loop = true;
+	vreg->corner_count++;
+
+	return 0;
+}
+
+/**
+ * cprh_kbss_mem_acc_crossover_as_corner() - introduce a corner whose floor,
+ *		open-loop, and ceiling voltages correspond to the MEM ACC
+ *		crossover voltage.
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * The MEM ACC corner is utilized as a crossover corner by OSM and CPRh
+ * hardware to set the VDD supply voltage during the MEM ACC switch
+ * routine.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cprh_kbss_mem_acc_crossover_as_corner(struct cpr3_regulator *vreg)
+{
+	struct cpr3_controller *ctrl = vreg->thread->ctrl;
+	struct cpr3_corner *corner;
+
+	if (!ctrl->mem_acc_crossover_volt) {
+		/* MEM ACC voltage crossover corner not required. */
+		return 0;
+	}
+
+	corner = &vreg->corner[vreg->corner_count];
+	/*
+	 * 0 MHz indicates this corner is not to be
+	 * used as active DCVS set point.
+	 */
+	corner->proc_freq = 0;
+	corner->floor_volt = ctrl->mem_acc_crossover_volt;
+	corner->ceiling_volt = ctrl->mem_acc_crossover_volt;
+	corner->open_loop_volt = ctrl->mem_acc_crossover_volt;
+	corner->abs_ceiling_volt = ctrl->mem_acc_crossover_volt;
+	corner->use_open_loop = true;
+	vreg->corner_count++;
+
+	return 0;
+}
+
+/**
+ * cprh_kbss_set_no_interpolation_quotients() - use the fused target quotient
+ *		values for lower frequencies.
+ * @vreg:		Pointer to the CPR3 regulator
+ * @volt_adjust:	Pointer to array of per-corner closed-loop adjustment
+ *			voltages
+ * @volt_adjust_fuse:	Pointer to array of per-fuse-corner closed-loop
+ *			adjustment voltages
+ * @ro_scale:		Pointer to array of per-fuse-corner RO scaling factor
+ *			values with units of QUOT/V
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cprh_kbss_set_no_interpolation_quotients(struct cpr3_regulator *vreg,
+			int *volt_adjust, int *volt_adjust_fuse, int *ro_scale)
+{
+	struct cprh_kbss_fuses *fuse = vreg->platform_fuses;
+	u32 quot, ro;
+	int quot_adjust;
+	int i, fuse_corner;
+
+	for (i = 0; i < vreg->corner_count; i++) {
+		fuse_corner = vreg->corner[i].cpr_fuse_corner;
+		quot = fuse->target_quot[fuse_corner];
+		quot_adjust = cpr3_quot_adjustment(ro_scale[fuse_corner],
+					   volt_adjust_fuse[fuse_corner] +
+					   volt_adjust[i]);
+		ro = fuse->ro_sel[fuse_corner];
+		vreg->corner[i].target_quot[ro] = quot + quot_adjust;
+		cpr3_debug(vreg, "corner=%d RO=%u target quot=%u\n",
+			  i, ro, quot);
+
+		if (quot_adjust)
+			cpr3_debug(vreg, "adjusted corner %d RO%u target quot: %u --> %u (%d uV)\n",
+				  i, ro, quot, vreg->corner[i].target_quot[ro],
+				  volt_adjust_fuse[fuse_corner] +
+				  volt_adjust[i]);
+	}
+
+	return 0;
+}
+
+/**
+ * cprh_kbss_calculate_target_quotients() - calculate the CPR target
+ *		quotient for each corner of a CPR3 regulator
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * If target quotient interpolation is allowed in device tree, then this
+ * function calculates the target quotient for a given corner using linear
+ * interpolation.  This interpolation is performed using the processor
+ * frequencies of the lower and higher Fmax corners along with the fused
+ * target quotient and quotient offset of the higher Fmax corner.
+ *
+ * If target quotient interpolation is not allowed, then this function uses
+ * the Fmax fused target quotient for all of the corners associated with a
+ * given fuse corner.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cprh_kbss_calculate_target_quotients(struct cpr3_regulator *vreg)
+{
+	struct cprh_kbss_fuses *fuse = vreg->platform_fuses;
+	int rc;
+	bool allow_interpolation;
+	u64 freq_low, freq_high, prev_quot;
+	u64 *quot_low;
+	u64 *quot_high;
+	u32 quot, ro;
+	int i, j, fuse_corner, quot_adjust;
+	int *fmax_corner;
+	int *volt_adjust, *volt_adjust_fuse, *ro_scale;
+	int lowest_fuse_corner, highest_fuse_corner;
+	const char * const *corner_name;
+
+	switch (vreg->thread->ctrl->soc_revision) {
+	case SDM660_SOC_ID:
+		if (vreg->thread->ctrl->ctrl_id == CPRH_KBSS_POWER_CLUSTER_ID) {
+			corner_name = cprh_sdm660_power_kbss_fuse_corner_name;
+			lowest_fuse_corner =
+				CPRH_SDM660_POWER_KBSS_FUSE_CORNER_LOWSVS;
+			highest_fuse_corner =
+				CPRH_SDM660_POWER_KBSS_FUSE_CORNER_TURBO_L1;
+		} else {
+			corner_name = cprh_sdm660_perf_kbss_fuse_corner_name;
+			lowest_fuse_corner =
+				CPRH_SDM660_PERF_KBSS_FUSE_CORNER_SVS;
+			highest_fuse_corner =
+				CPRH_SDM660_PERF_KBSS_FUSE_CORNER_TURBO_L2;
+
+			/* speed-bin 3 does not have Turbo_L2 fuse */
+			if (vreg->speed_bin_fuse == 3)
+				highest_fuse_corner =
+					CPRH_SDM660_PERF_KBSS_FUSE_CORNER_TURBO;
+		}
+		break;
+	case SDM630_SOC_ID:
+		if (vreg->thread->ctrl->ctrl_id == CPRH_KBSS_POWER_CLUSTER_ID) {
+			corner_name = cprh_sdm630_power_kbss_fuse_corner_name;
+			lowest_fuse_corner =
+				CPRH_SDM630_POWER_KBSS_FUSE_CORNER_LOWSVS;
+			highest_fuse_corner =
+				CPRH_SDM630_POWER_KBSS_FUSE_CORNER_TURBO_L1;
+		} else {
+			corner_name = cprh_sdm630_perf_kbss_fuse_corner_name;
+			lowest_fuse_corner =
+				CPRH_SDM630_PERF_KBSS_FUSE_CORNER_LOWSVS;
+			highest_fuse_corner =
+				CPRH_SDM630_PERF_KBSS_FUSE_CORNER_TURBO_L2;
+		}
+		break;
+	case MSM8998_V1_SOC_ID:
+	case MSM8998_V2_SOC_ID:
+		corner_name = cprh_msm8998_kbss_fuse_corner_name;
+		lowest_fuse_corner =
+			CPRH_MSM8998_KBSS_FUSE_CORNER_LOWSVS;
+		highest_fuse_corner =
+			CPRH_MSM8998_KBSS_FUSE_CORNER_TURBO_L1;
+		break;
+	default:
+		cpr3_err(vreg, "unsupported soc id = %d\n",
+				vreg->thread->ctrl->soc_revision);
+		return -EINVAL;
+	}
+
+	/* Log fused quotient values for debugging purposes. */
+	cpr3_info(vreg, "fused %8s: quot[%2llu]=%4llu\n",
+		corner_name[lowest_fuse_corner],
+		fuse->ro_sel[lowest_fuse_corner],
+		fuse->target_quot[lowest_fuse_corner]);
+	for (i = lowest_fuse_corner + 1; i <= highest_fuse_corner; i++)
+		cpr3_info(vreg, "fused %8s: quot[%2llu]=%4llu, quot_offset[%2llu]=%4llu\n",
+			corner_name[i], fuse->ro_sel[i], fuse->target_quot[i],
+			fuse->ro_sel[i], fuse->quot_offset[i] *
+			CPRH_KBSS_QUOT_OFFSET_SCALE);
+
+	allow_interpolation = of_property_read_bool(vreg->of_node,
+					"qcom,allow-quotient-interpolation");
+
+	volt_adjust = kcalloc(vreg->corner_count, sizeof(*volt_adjust),
+					GFP_KERNEL);
+	volt_adjust_fuse = kcalloc(vreg->fuse_corner_count,
+					sizeof(*volt_adjust_fuse), GFP_KERNEL);
+	ro_scale = kcalloc(vreg->fuse_corner_count, sizeof(*ro_scale),
+					GFP_KERNEL);
+	fmax_corner = kcalloc(vreg->fuse_corner_count, sizeof(*fmax_corner),
+					GFP_KERNEL);
+	quot_low = kcalloc(vreg->fuse_corner_count, sizeof(*quot_low),
+					GFP_KERNEL);
+	quot_high = kcalloc(vreg->fuse_corner_count, sizeof(*quot_high),
+					GFP_KERNEL);
+	if (!volt_adjust || !volt_adjust_fuse || !ro_scale ||
+	    !fmax_corner || !quot_low || !quot_high) {
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	rc = cpr3_parse_closed_loop_voltage_adjustments(vreg, &fuse->ro_sel[0],
+				volt_adjust, volt_adjust_fuse, ro_scale);
+	if (rc) {
+		cpr3_err(vreg, "could not load closed-loop voltage adjustments, rc=%d\n",
+			rc);
+		goto done;
+	}
+
+	if (!allow_interpolation) {
+		/* Use fused target quotients for lower frequencies. */
+		return cprh_kbss_set_no_interpolation_quotients(vreg,
+				volt_adjust, volt_adjust_fuse, ro_scale);
+	}
+
+	/* Determine highest corner mapped to each fuse corner */
+	j = vreg->fuse_corner_count - 1;
+	for (i = vreg->corner_count - 1; i >= 0; i--) {
+		if (vreg->corner[i].cpr_fuse_corner == j) {
+			fmax_corner[j] = i;
+			j--;
+		}
+	}
+	if (j >= 0) {
+		cpr3_err(vreg, "invalid fuse corner mapping\n");
+		rc = -EINVAL;
+		goto done;
+	}
+
+	/*
+	 * Interpolation is not possible for corners mapped to the lowest fuse
+	 * corner so use the fuse corner value directly.
+	 */
+	i = lowest_fuse_corner;
+	quot_adjust = cpr3_quot_adjustment(ro_scale[i], volt_adjust_fuse[i]);
+	quot = fuse->target_quot[i] + quot_adjust;
+	quot_high[i] = quot_low[i] = quot;
+	ro = fuse->ro_sel[i];
+	if (quot_adjust)
+		cpr3_debug(vreg, "adjusted fuse corner %d RO%u target quot: %llu --> %u (%d uV)\n",
+			i, ro, fuse->target_quot[i], quot, volt_adjust_fuse[i]);
+
+	for (i = 0; i <= fmax_corner[lowest_fuse_corner]; i++)
+		vreg->corner[i].target_quot[ro] = quot;
+
+	for (i = lowest_fuse_corner + 1; i < vreg->fuse_corner_count; i++) {
+		quot_high[i] = fuse->target_quot[i];
+		if (fuse->ro_sel[i] == fuse->ro_sel[i - 1])
+			quot_low[i] = quot_high[i - 1];
+		else
+			quot_low[i] = quot_high[i]
+					- fuse->quot_offset[i]
+					  * CPRH_KBSS_QUOT_OFFSET_SCALE;
+		if (quot_high[i] < quot_low[i]) {
+			cpr3_debug(vreg, "quot_high[%d]=%llu < quot_low[%d]=%llu; overriding: quot_high[%d]=%llu\n",
+				i, quot_high[i], i, quot_low[i],
+				i, quot_low[i]);
+			quot_high[i] = quot_low[i];
+		}
+	}
+
+	/* Perform per-fuse-corner target quotient adjustment */
+	for (i = 1; i < vreg->fuse_corner_count; i++) {
+		quot_adjust = cpr3_quot_adjustment(ro_scale[i],
+						   volt_adjust_fuse[i]);
+		if (quot_adjust) {
+			prev_quot = quot_high[i];
+			quot_high[i] += quot_adjust;
+			cpr3_debug(vreg, "adjusted fuse corner %d RO%llu target quot: %llu --> %llu (%d uV)\n",
+				i, fuse->ro_sel[i], prev_quot, quot_high[i],
+				volt_adjust_fuse[i]);
+		}
+
+		if (fuse->ro_sel[i] == fuse->ro_sel[i - 1])
+			quot_low[i] = quot_high[i - 1];
+		else
+			quot_low[i] += cpr3_quot_adjustment(ro_scale[i],
+						    volt_adjust_fuse[i - 1]);
+
+		if (quot_high[i] < quot_low[i]) {
+			cpr3_debug(vreg, "quot_high[%d]=%llu < quot_low[%d]=%llu after adjustment; overriding: quot_high[%d]=%llu\n",
+				i, quot_high[i], i, quot_low[i],
+				i, quot_low[i]);
+			quot_high[i] = quot_low[i];
+		}
+	}
+
+	/* Interpolate voltages for the higher fuse corners. */
+	for (i = 1; i < vreg->fuse_corner_count; i++) {
+		freq_low = vreg->corner[fmax_corner[i - 1]].proc_freq;
+		freq_high = vreg->corner[fmax_corner[i]].proc_freq;
+
+		ro = fuse->ro_sel[i];
+		for (j = fmax_corner[i - 1] + 1; j <= fmax_corner[i]; j++)
+			vreg->corner[j].target_quot[ro] = cpr3_interpolate(
+				freq_low, quot_low[i], freq_high, quot_high[i],
+				vreg->corner[j].proc_freq);
+	}
+
+	/* Perform per-corner target quotient adjustment */
+	for (i = 0; i < vreg->corner_count; i++) {
+		fuse_corner = vreg->corner[i].cpr_fuse_corner;
+		ro = fuse->ro_sel[fuse_corner];
+		quot_adjust = cpr3_quot_adjustment(ro_scale[fuse_corner],
+						   volt_adjust[i]);
+		if (quot_adjust) {
+			prev_quot = vreg->corner[i].target_quot[ro];
+			vreg->corner[i].target_quot[ro] += quot_adjust;
+			cpr3_debug(vreg, "adjusted corner %d RO%u target quot: %llu --> %u (%d uV)\n",
+				i, ro, prev_quot,
+				vreg->corner[i].target_quot[ro],
+				volt_adjust[i]);
+		}
+	}
+
+	/* Ensure that target quotients increase monotonically */
+	for (i = 1; i < vreg->corner_count; i++) {
+		ro = fuse->ro_sel[vreg->corner[i].cpr_fuse_corner];
+		if (fuse->ro_sel[vreg->corner[i - 1].cpr_fuse_corner] == ro
+		    && vreg->corner[i].target_quot[ro]
+				< vreg->corner[i - 1].target_quot[ro]) {
+			cpr3_debug(vreg, "adjusted corner %d RO%u target quot=%u < adjusted corner %d RO%u target quot=%u; overriding: corner %d RO%u target quot=%u\n",
+				i, ro, vreg->corner[i].target_quot[ro],
+				i - 1, ro, vreg->corner[i - 1].target_quot[ro],
+				i, ro, vreg->corner[i - 1].target_quot[ro]);
+			vreg->corner[i].target_quot[ro]
+				= vreg->corner[i - 1].target_quot[ro];
+		}
+	}
+
+done:
+	kfree(volt_adjust);
+	kfree(volt_adjust_fuse);
+	kfree(ro_scale);
+	kfree(fmax_corner);
+	kfree(quot_low);
+	kfree(quot_high);
+	return rc;
+}
+
+/**
+ * cprh_kbss_print_settings() - print out KBSS CPR configuration settings into
+ *		the kernel log for debugging purposes
+ * @vreg:		Pointer to the CPR3 regulator
+ */
+static void cprh_kbss_print_settings(struct cpr3_regulator *vreg)
+{
+	struct cpr3_corner *corner;
+	int i;
+
+	cpr3_debug(vreg, "Corner: Frequency (Hz), Fuse Corner, Floor (uV), Open-Loop (uV), Ceiling (uV)\n");
+	for (i = 0; i < vreg->corner_count; i++) {
+		corner = &vreg->corner[i];
+		cpr3_debug(vreg, "%3d: %10u, %2d, %7d, %7d, %7d\n",
+			i, corner->proc_freq, corner->cpr_fuse_corner,
+			corner->floor_volt, corner->open_loop_volt,
+			corner->ceiling_volt);
+	}
+}
+
+/**
+ * cprh_kbss_init_thread() - perform steps necessary to initialize the
+ *		configuration data for a CPR3 thread
+ * @thread:		Pointer to the CPR3 thread
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cprh_kbss_init_thread(struct cpr3_thread *thread)
+{
+	int rc;
+
+	rc = cpr3_parse_common_thread_data(thread);
+	if (rc) {
+		cpr3_err(thread->ctrl, "thread %u unable to read CPR thread data from device tree, rc=%d\n",
+			thread->thread_id, rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+/**
+ * cprh_kbss_init_regulator() - perform all steps necessary to initialize the
+ *		configuration data for a CPR3 regulator
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cprh_kbss_init_regulator(struct cpr3_regulator *vreg)
+{
+	struct cprh_kbss_fuses *fuse;
+	int rc;
+
+	rc = cprh_kbss_read_fuse_data(vreg);
+	if (rc) {
+		cpr3_err(vreg, "unable to read CPR fuse data, rc=%d\n", rc);
+		return rc;
+	}
+
+	fuse = vreg->platform_fuses;
+
+	rc = cprh_kbss_parse_corner_data(vreg);
+	if (rc) {
+		cpr3_err(vreg, "unable to read CPR corner data from device tree, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = cprh_kbss_calculate_open_loop_voltages(vreg);
+	if (rc) {
+		cpr3_err(vreg, "unable to calculate open-loop voltages, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = cpr3_limit_open_loop_voltages(vreg);
+	if (rc) {
+		cpr3_err(vreg, "unable to limit open-loop voltages, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	cprh_adjust_voltages_for_apm(vreg);
+	cprh_adjust_voltages_for_mem_acc(vreg);
+
+	cpr3_open_loop_voltage_as_ceiling(vreg);
+
+	rc = cpr3_limit_floor_voltages(vreg);
+	if (rc) {
+		cpr3_err(vreg, "unable to limit floor voltages, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = cprh_kbss_calculate_target_quotients(vreg);
+	if (rc) {
+		cpr3_err(vreg, "unable to calculate target quotients, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = cprh_kbss_parse_core_count_temp_adj_properties(vreg);
+	if (rc) {
+		cpr3_err(vreg, "unable to parse core count and temperature adjustment properties, rc=%d\n",
+			 rc);
+		return rc;
+	}
+
+	rc = cpr4_parse_core_count_temp_voltage_adj(vreg, true);
+	if (rc) {
+		cpr3_err(vreg, "unable to parse temperature and core count voltage adjustments, rc=%d\n",
+			 rc);
+		return rc;
+	}
+
+	if (vreg->allow_core_count_adj && (vreg->max_core_count <= 0
+				   || vreg->max_core_count >
+				   CPRH_KBSS_CPR_SDELTA_CORE_COUNT)) {
+		cpr3_err(vreg, "qcom,max-core-count has invalid value = %d\n",
+			 vreg->max_core_count);
+		return -EINVAL;
+	}
+
+	rc = cprh_msm8998_partial_binning_override(vreg);
+	if (rc) {
+		cpr3_err(vreg, "unable to override CPR parameters based on partial binning fuse values, rc=%d\n",
+			 rc);
+		return rc;
+	}
+
+	rc = cprh_kbss_apm_crossover_as_corner(vreg);
+	if (rc) {
+		cpr3_err(vreg, "unable to introduce APM voltage crossover corner, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = cprh_kbss_mem_acc_crossover_as_corner(vreg);
+	if (rc) {
+		cpr3_err(vreg, "unable to introduce MEM ACC voltage crossover corner, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	cprh_kbss_print_settings(vreg);
+
+	return 0;
+}
+
+/**
+ * cprh_kbss_init_aging() - perform KBSS CPRh controller specific aging
+ *		initializations
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cprh_kbss_init_aging(struct cpr3_controller *ctrl)
+{
+	struct cprh_kbss_fuses *fuse = NULL;
+	struct cpr3_regulator *vreg = NULL;
+	u32 aging_ro_scale;
+	int i, j, rc = 0;
+
+	for (i = 0; i < ctrl->thread_count; i++) {
+		for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+			if (ctrl->thread[i].vreg[j].aging_allowed) {
+				ctrl->aging_required = true;
+				vreg = &ctrl->thread[i].vreg[j];
+				fuse = vreg->platform_fuses;
+				break;
+			}
+		}
+	}
+
+	if (!ctrl->aging_required || !fuse || !vreg)
+		return 0;
+
+	rc = cpr3_parse_array_property(vreg, "qcom,cpr-aging-ro-scaling-factor",
+					1, &aging_ro_scale);
+	if (rc)
+		return rc;
+
+	if (aging_ro_scale == 0) {
+		cpr3_err(ctrl, "aging RO scaling factor is invalid: %u\n",
+			aging_ro_scale);
+		return -EINVAL;
+	}
+
+	ctrl->aging_vdd_mode = REGULATOR_MODE_NORMAL;
+	ctrl->aging_complete_vdd_mode = REGULATOR_MODE_IDLE;
+
+	ctrl->aging_sensor_count = 1;
+	ctrl->aging_sensor = devm_kzalloc(ctrl->dev,
+					sizeof(*ctrl->aging_sensor),
+					GFP_KERNEL);
+	if (!ctrl->aging_sensor)
+		return -ENOMEM;
+
+	switch (ctrl->soc_revision) {
+	case SDM660_SOC_ID:
+		if (ctrl->ctrl_id == CPRH_KBSS_POWER_CLUSTER_ID) {
+			ctrl->aging_sensor->sensor_id
+				= SDM660_KBSS_POWER_AGING_SENSOR_ID;
+			ctrl->aging_sensor->bypass_mask[0]
+				= SDM660_KBSS_POWER_AGING_BYPASS_MASK0;
+		} else  {
+			ctrl->aging_sensor->sensor_id
+				= SDM660_KBSS_PERFORMANCE_AGING_SENSOR_ID;
+			ctrl->aging_sensor->bypass_mask[0]
+				= SDM660_KBSS_PERFORMANCE_AGING_BYPASS_MASK0;
+		}
+		break;
+	case MSM8998_V1_SOC_ID:
+	case MSM8998_V2_SOC_ID:
+		if (ctrl->ctrl_id == CPRH_KBSS_POWER_CLUSTER_ID) {
+			ctrl->aging_sensor->sensor_id
+				= MSM8998_KBSS_POWER_AGING_SENSOR_ID;
+			ctrl->aging_sensor->bypass_mask[0]
+				= MSM8998_KBSS_POWER_AGING_BYPASS_MASK0;
+		} else  {
+			ctrl->aging_sensor->sensor_id
+				= MSM8998_KBSS_PERFORMANCE_AGING_SENSOR_ID;
+			ctrl->aging_sensor->bypass_mask[0]
+				= MSM8998_KBSS_PERFORMANCE_AGING_BYPASS_MASK0;
+		}
+		break;
+	default:
+		cpr3_err(ctrl, "unsupported soc id = %d\n", ctrl->soc_revision);
+		return -EINVAL;
+	}
+	ctrl->aging_sensor->ro_scale = aging_ro_scale;
+
+	ctrl->aging_sensor->init_quot_diff
+		= cpr3_convert_open_loop_voltage_fuse(0,
+			CPRH_KBSS_AGING_INIT_QUOT_DIFF_SCALE,
+			fuse->aging_init_quot_diff,
+			CPRH_KBSS_AGING_INIT_QUOT_DIFF_SIZE);
+
+	cpr3_debug(ctrl, "sensor %u aging init quotient diff = %d, aging RO scale = %u QUOT/V\n",
+		ctrl->aging_sensor->sensor_id,
+		ctrl->aging_sensor->init_quot_diff,
+		ctrl->aging_sensor->ro_scale);
+
+	return 0;
+}
+
+/**
+ * cprh_kbss_init_controller() - perform KBSS CPRh controller specific
+ *		initializations
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cprh_kbss_init_controller(struct cpr3_controller *ctrl)
+{
+	int rc;
+
+	ctrl->ctrl_type = CPR_CTRL_TYPE_CPRH;
+	rc = cpr3_parse_common_ctrl_data(ctrl);
+	if (rc) {
+		if (rc != -EPROBE_DEFER)
+			cpr3_err(ctrl, "unable to parse common controller data, rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(ctrl->dev->of_node, "qcom,cpr-controller-id",
+				  &ctrl->ctrl_id);
+	if (rc) {
+		cpr3_err(ctrl, "could not read DT property qcom,cpr-controller-id, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	if (ctrl->ctrl_id < CPRH_KBSS_MIN_CONTROLLER_ID ||
+	    ctrl->ctrl_id > CPRH_KBSS_MAX_CONTROLLER_ID) {
+		cpr3_err(ctrl, "invalid qcom,cpr-controller-id specified\n");
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32(ctrl->dev->of_node,
+				  "qcom,cpr-down-error-step-limit",
+				  &ctrl->down_error_step_limit);
+	if (rc) {
+		cpr3_err(ctrl, "error reading qcom,cpr-down-error-step-limit, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(ctrl->dev->of_node,
+				  "qcom,cpr-up-error-step-limit",
+				  &ctrl->up_error_step_limit);
+	if (rc) {
+		cpr3_err(ctrl, "error reading qcom,cpr-up-error-step-limit, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(ctrl->dev->of_node,
+				  "qcom,voltage-base",
+				  &ctrl->base_volt);
+	if (rc) {
+		cpr3_err(ctrl, "error reading property qcom,voltage-base, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(ctrl->dev->of_node,
+			"qcom,cpr-up-down-delay-time",
+			&ctrl->up_down_delay_time);
+	if (rc) {
+		cpr3_err(ctrl, "error reading property qcom,cpr-up-down-delay-time, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(ctrl->dev->of_node,
+				  "qcom,apm-threshold-voltage",
+				  &ctrl->apm_threshold_volt);
+	if (rc) {
+		cpr3_debug(ctrl, "qcom,apm-threshold-voltage not specified\n");
+	} else {
+		rc = of_property_read_u32(ctrl->dev->of_node,
+					  "qcom,apm-crossover-voltage",
+					  &ctrl->apm_crossover_volt);
+		if (rc) {
+			cpr3_err(ctrl, "error reading property qcom,apm-crossover-voltage, rc=%d\n",
+				 rc);
+			return rc;
+		}
+	}
+
+	of_property_read_u32(ctrl->dev->of_node, "qcom,apm-hysteresis-voltage",
+				&ctrl->apm_adj_volt);
+	ctrl->apm_adj_volt = CPR3_ROUND(ctrl->apm_adj_volt, ctrl->step_volt);
+
+	ctrl->saw_use_unit_mV = of_property_read_bool(ctrl->dev->of_node,
+					"qcom,cpr-saw-use-unit-mV");
+
+	rc = of_property_read_u32(ctrl->dev->of_node,
+				  "qcom,mem-acc-threshold-voltage",
+				  &ctrl->mem_acc_threshold_volt);
+	if (!rc) {
+		ctrl->mem_acc_threshold_volt
+		    = CPR3_ROUND(ctrl->mem_acc_threshold_volt, ctrl->step_volt);
+
+		rc = of_property_read_u32(ctrl->dev->of_node,
+					  "qcom,mem-acc-crossover-voltage",
+					  &ctrl->mem_acc_crossover_volt);
+		if (rc) {
+			cpr3_err(ctrl, "error reading property qcom,mem-acc-crossover-voltage, rc=%d\n",
+				 rc);
+			return rc;
+		}
+		ctrl->mem_acc_crossover_volt
+		    = CPR3_ROUND(ctrl->mem_acc_crossover_volt, ctrl->step_volt);
+	}
+
+	/*
+	 * Use fixed step quotient if specified otherwise use dynamically
+	 * calculated per RO step quotient
+	 */
+	of_property_read_u32(ctrl->dev->of_node, "qcom,cpr-step-quot-fixed",
+			&ctrl->step_quot_fixed);
+	ctrl->use_dynamic_step_quot = !ctrl->step_quot_fixed;
+
+	of_property_read_u32(ctrl->dev->of_node,
+			"qcom,cpr-voltage-settling-time",
+			&ctrl->voltage_settling_time);
+
+	of_property_read_u32(ctrl->dev->of_node,
+			     "qcom,cpr-corner-switch-delay-time",
+			     &ctrl->corner_switch_delay_time);
+
+	switch (ctrl->soc_revision) {
+	case SDM660_SOC_ID:
+		if (ctrl->ctrl_id == CPRH_KBSS_POWER_CLUSTER_ID)
+			ctrl->sensor_count =
+				SDM660_KBSS_POWER_CPR_SENSOR_COUNT;
+		else
+			ctrl->sensor_count =
+				SDM660_KBSS_PERFORMANCE_CPR_SENSOR_COUNT;
+		break;
+	case SDM630_SOC_ID:
+		if (ctrl->ctrl_id == CPRH_KBSS_POWER_CLUSTER_ID)
+			ctrl->sensor_count =
+				SDM630_KBSS_POWER_CPR_SENSOR_COUNT;
+		else
+			ctrl->sensor_count =
+				SDM630_KBSS_PERFORMANCE_CPR_SENSOR_COUNT;
+		break;
+	case MSM8998_V1_SOC_ID:
+	case MSM8998_V2_SOC_ID:
+		if (ctrl->ctrl_id == CPRH_KBSS_POWER_CLUSTER_ID)
+			ctrl->sensor_count =
+				MSM8998_KBSS_POWER_CPR_SENSOR_COUNT;
+		else
+			ctrl->sensor_count =
+				MSM8998_KBSS_PERFORMANCE_CPR_SENSOR_COUNT;
+		break;
+	default:
+		cpr3_err(ctrl, "unsupported soc id = %d\n", ctrl->soc_revision);
+		return -EINVAL;
+	}
+
+	/*
+	 * KBSS only has one thread (0) per controller so the zeroed
+	 * array does not need further modification.
+	 */
+	ctrl->sensor_owner = devm_kcalloc(ctrl->dev, ctrl->sensor_count,
+		sizeof(*ctrl->sensor_owner), GFP_KERNEL);
+	if (!ctrl->sensor_owner)
+		return -ENOMEM;
+
+	ctrl->cpr_clock_rate = CPRH_KBSS_CPR_CLOCK_RATE;
+	ctrl->supports_hw_closed_loop = true;
+	ctrl->use_hw_closed_loop = of_property_read_bool(ctrl->dev->of_node,
+						 "qcom,cpr-hw-closed-loop");
+
+	return 0;
+}
+
+/**
+ * cprh_kbss_populate_opp_table() - populate an Operating Performance Point
+ *		table with the frequencies associated with each corner.
+ *		This table may be used to resolve corner to frequency to
+ *		open-loop voltage mappings.
+ * @pdev:		Pointer to the platform device
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cprh_kbss_populate_opp_table(struct cpr3_controller *ctrl)
+{
+	struct device *dev = ctrl->dev;
+	struct cpr3_regulator *vreg = &ctrl->thread[0].vreg[0];
+	struct cpr3_corner *corner;
+	int rc, i;
+
+	for (i = 0; i < vreg->corner_count; i++) {
+		corner = &vreg->corner[i];
+		if (!corner->proc_freq) {
+			/*
+			 * 0 MHz indicates this corner is not to be
+			 * used as active DCVS set point. Don't add it
+			 * to the OPP table.
+			 */
+			continue;
+		}
+		rc = dev_pm_opp_add(dev, corner->proc_freq, i + 1);
+		if (rc) {
+			cpr3_err(ctrl, "could not add OPP for corner %d with frequency %u MHz, rc=%d\n",
+				 i + 1, corner->proc_freq, rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int cprh_kbss_regulator_suspend(struct platform_device *pdev,
+				pm_message_t state)
+{
+	struct cpr3_controller *ctrl = platform_get_drvdata(pdev);
+
+	return cpr3_regulator_suspend(ctrl);
+}
+
+static int cprh_kbss_regulator_resume(struct platform_device *pdev)
+{
+	struct cpr3_controller *ctrl = platform_get_drvdata(pdev);
+
+	return cpr3_regulator_resume(ctrl);
+}
+
+/* Data corresponds to the SoC revision */
+static const struct of_device_id cprh_regulator_match_table[] = {
+	{
+		.compatible =  "qcom,cprh-msm8998-v1-kbss-regulator",
+		.data = (void *)(uintptr_t)MSM8998_V1_SOC_ID,
+	},
+	{
+		.compatible = "qcom,cprh-msm8998-v2-kbss-regulator",
+		.data = (void *)(uintptr_t)MSM8998_V2_SOC_ID,
+	},
+	{
+		.compatible = "qcom,cprh-msm8998-kbss-regulator",
+		.data = (void *)(uintptr_t)MSM8998_V2_SOC_ID,
+	},
+	{
+		.compatible = "qcom,cprh-sdm660-kbss-regulator",
+		.data = (void *)(uintptr_t)SDM660_SOC_ID,
+	},
+	{
+		.compatible = "qcom,cprh-sdm630-kbss-regulator",
+		.data = (void *)(uintptr_t)SDM630_SOC_ID,
+	},
+	{}
+};
+
+static int cprh_kbss_regulator_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	const struct of_device_id *match;
+	struct cpr3_controller *ctrl;
+	int rc;
+
+	if (!dev->of_node) {
+		dev_err(dev, "Device tree node is missing\n");
+		return -EINVAL;
+	}
+
+	ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+	if (!ctrl)
+		return -ENOMEM;
+
+	ctrl->dev = dev;
+	ctrl->cpr_allowed_hw = true;
+
+	rc = of_property_read_string(dev->of_node, "qcom,cpr-ctrl-name",
+					&ctrl->name);
+	if (rc) {
+		cpr3_err(ctrl, "unable to read qcom,cpr-ctrl-name, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	match = of_match_node(cprh_regulator_match_table, dev->of_node);
+	if (match)
+		ctrl->soc_revision = (uintptr_t)match->data;
+	else
+		cpr3_err(ctrl, "could not find compatible string match\n");
+
+	rc = cpr3_map_fuse_base(ctrl, pdev);
+	if (rc) {
+		cpr3_err(ctrl, "could not map fuse base address\n");
+		return rc;
+	}
+
+	rc = cpr3_allocate_threads(ctrl, 0, 0);
+	if (rc) {
+		cpr3_err(ctrl, "failed to allocate CPR thread array, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	if (ctrl->thread_count != 1) {
+		cpr3_err(ctrl, "expected 1 thread but found %d\n",
+			ctrl->thread_count);
+		return -EINVAL;
+	} else if (ctrl->thread[0].vreg_count != 1) {
+		cpr3_err(ctrl, "expected 1 regulator but found %d\n",
+			ctrl->thread[0].vreg_count);
+		return -EINVAL;
+	}
+
+	rc = cprh_kbss_init_controller(ctrl);
+	if (rc) {
+		if (rc != -EPROBE_DEFER)
+			cpr3_err(ctrl, "failed to initialize CPR controller parameters, rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	rc = cprh_kbss_init_thread(&ctrl->thread[0]);
+	if (rc) {
+		cpr3_err(ctrl, "thread initialization failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = cprh_kbss_init_regulator(&ctrl->thread[0].vreg[0]);
+	if (rc) {
+		cpr3_err(&ctrl->thread[0].vreg[0], "regulator initialization failed, rc=%d\n",
+			 rc);
+		return rc;
+	}
+
+	rc = cprh_kbss_init_aging(ctrl);
+	if (rc) {
+		cpr3_err(ctrl, "failed to initialize aging configurations, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	platform_set_drvdata(pdev, ctrl);
+
+	rc = cprh_kbss_populate_opp_table(ctrl);
+	if (rc)
+		panic("cprh-kbss-regulator OPP table initialization failed\n");
+
+	return cpr3_regulator_register(pdev, ctrl);
+}
+
+static int cprh_kbss_regulator_remove(struct platform_device *pdev)
+{
+	struct cpr3_controller *ctrl = platform_get_drvdata(pdev);
+
+	return cpr3_regulator_unregister(ctrl);
+}
+
+static struct platform_driver cprh_kbss_regulator_driver = {
+	.driver		= {
+		.name		= "qcom,cprh-kbss-regulator",
+		.of_match_table	= cprh_regulator_match_table,
+		.owner		= THIS_MODULE,
+	},
+	.probe		= cprh_kbss_regulator_probe,
+	.remove		= cprh_kbss_regulator_remove,
+	.suspend	= cprh_kbss_regulator_suspend,
+	.resume		= cprh_kbss_regulator_resume,
+};
+
+static int cpr_regulator_init(void)
+{
+	return platform_driver_register(&cprh_kbss_regulator_driver);
+}
+
+static void cpr_regulator_exit(void)
+{
+	platform_driver_unregister(&cprh_kbss_regulator_driver);
+}
+
+MODULE_DESCRIPTION("CPRh KBSS regulator driver");
+MODULE_LICENSE("GPL v2");
+
+arch_initcall(cpr_regulator_init);
+module_exit(cpr_regulator_exit);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/regulator/mem-acc-regulator.c	2019-01-22 16:16:26.271271472 +0100
@@ -0,0 +1,1387 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt)	"ACC: %s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/string.h>
+#include <soc/qcom/scm.h>
+
+#define MEM_ACC_DEFAULT_SEL_SIZE	2
+
+#define BYTES_PER_FUSE_ROW		8
+
+/* mem-acc config flags */
+
+enum {
+	MEM_ACC_USE_CORNER_ACC_MAP	= BIT(0),
+	MEM_ACC_USE_ADDR_VAL_MAP	= BIT(1),
+};
+
+#define FUSE_MAP_NO_MATCH		(-1)
+#define FUSE_PARAM_MATCH_ANY		(-1)
+#define PARAM_MATCH_ANY			(-1)
+
+enum {
+	MEMORY_L1,
+	MEMORY_L2,
+	MEMORY_MAX,
+};
+
+#define MEM_ACC_TYPE_MAX		6
+
+/**
+ * struct acc_reg_value - Acc register configuration structure
+ * @addr_index:	An index in to phys_reg_addr_list and remap_reg_addr_list
+ *		to get the ACC register physical address and remapped address.
+ * @reg_val:	Value to program in to the register mapped by addr_index.
+ */
+struct acc_reg_value {
+	u32		addr_index;
+	u32		reg_val;
+};
+
+struct corner_acc_reg_config {
+	struct acc_reg_value	*reg_config_list;
+	int			max_reg_config_len;
+};
+
+struct mem_acc_regulator {
+	struct device		*dev;
+	struct regulator_desc	rdesc;
+	struct regulator_dev	*rdev;
+
+	int			corner;
+	bool			mem_acc_supported[MEMORY_MAX];
+	bool			mem_acc_custom_supported[MEMORY_MAX];
+
+	u32			*acc_sel_mask[MEMORY_MAX];
+	u32			*acc_sel_bit_pos[MEMORY_MAX];
+	u32			acc_sel_bit_size[MEMORY_MAX];
+	u32			num_acc_sel[MEMORY_MAX];
+	u32			*acc_en_bit_pos;
+	u32			num_acc_en;
+	u32			*corner_acc_map;
+	u32			num_corners;
+	u32			override_fuse_value;
+	int			override_map_match;
+	int			override_map_count;
+
+
+	void __iomem		*acc_sel_base[MEMORY_MAX];
+	void __iomem		*acc_en_base;
+	phys_addr_t		acc_sel_addr[MEMORY_MAX];
+	phys_addr_t		acc_en_addr;
+	u32			flags;
+
+	void __iomem		*acc_custom_addr[MEMORY_MAX];
+	u32			*acc_custom_data[MEMORY_MAX];
+
+	phys_addr_t		mem_acc_type_addr[MEM_ACC_TYPE_MAX];
+	u32			*mem_acc_type_data;
+
+	/* eFuse parameters */
+	phys_addr_t		efuse_addr;
+	void __iomem		*efuse_base;
+
+	u32			num_acc_reg;
+	u32			*phys_reg_addr_list;
+	void __iomem		**remap_reg_addr_list;
+	struct corner_acc_reg_config	*corner_acc_reg_config;
+};
+
+static DEFINE_MUTEX(mem_acc_memory_mutex);
+
+static u64 mem_acc_read_efuse_row(struct mem_acc_regulator *mem_acc_vreg,
+					u32 row_num, bool use_tz_api)
+{
+	int rc;
+	u64 efuse_bits;
+	struct scm_desc desc = {0};
+	struct mem_acc_read_req {
+		u32 row_address;
+		int addr_type;
+	} req;
+
+	struct mem_acc_read_rsp {
+		u32 row_data[2];
+		u32 status;
+	} rsp;
+
+	if (!use_tz_api) {
+		efuse_bits = readq_relaxed(mem_acc_vreg->efuse_base
+			+ row_num * BYTES_PER_FUSE_ROW);
+		return efuse_bits;
+	}
+
+	desc.args[0] = req.row_address = mem_acc_vreg->efuse_addr +
+					row_num * BYTES_PER_FUSE_ROW;
+	desc.args[1] = req.addr_type = 0;
+	desc.arginfo = SCM_ARGS(2);
+	efuse_bits = 0;
+
+	if (!is_scm_armv8()) {
+		rc = scm_call(SCM_SVC_FUSE, SCM_FUSE_READ,
+			&req, sizeof(req), &rsp, sizeof(rsp));
+	} else {
+		rc = scm_call2(SCM_SIP_FNID(SCM_SVC_FUSE, SCM_FUSE_READ),
+				&desc);
+		rsp.row_data[0] = desc.ret[0];
+		rsp.row_data[1] = desc.ret[1];
+		rsp.status = desc.ret[2];
+	}
+
+	if (rc) {
+		pr_err("read row %d failed, err code = %d", row_num, rc);
+	} else {
+		efuse_bits = ((u64)(rsp.row_data[1]) << 32) +
+				(u64)rsp.row_data[0];
+	}
+
+	return efuse_bits;
+}
+
+static inline u32 apc_to_acc_corner(struct mem_acc_regulator *mem_acc_vreg,
+								int corner)
+{
+	/*
+	 * corner_acc_map maps the corner from index 0 and  APC corner value
+	 * starts from the value 1
+	 */
+	return mem_acc_vreg->corner_acc_map[corner - 1];
+}
+
+static void __update_acc_sel(struct mem_acc_regulator *mem_acc_vreg,
+						int corner, int mem_type)
+{
+	u32 acc_data, acc_data_old, i, bit, acc_corner;
+
+	acc_data = readl_relaxed(mem_acc_vreg->acc_sel_base[mem_type]);
+	acc_data_old = acc_data;
+	for (i = 0; i < mem_acc_vreg->num_acc_sel[mem_type]; i++) {
+		bit = mem_acc_vreg->acc_sel_bit_pos[mem_type][i];
+		acc_data &= ~mem_acc_vreg->acc_sel_mask[mem_type][i];
+		acc_corner = apc_to_acc_corner(mem_acc_vreg, corner);
+		acc_data |= (acc_corner << bit) &
+			mem_acc_vreg->acc_sel_mask[mem_type][i];
+	}
+	pr_debug("corner=%d old_acc_sel=0x%02x new_acc_sel=0x%02x mem_type=%d\n",
+			corner, acc_data_old, acc_data, mem_type);
+	writel_relaxed(acc_data, mem_acc_vreg->acc_sel_base[mem_type]);
+}
+
+static void __update_acc_type(struct mem_acc_regulator *mem_acc_vreg,
+				int corner)
+{
+	int i, rc;
+
+	for (i = 0; i < MEM_ACC_TYPE_MAX; i++) {
+		if (mem_acc_vreg->mem_acc_type_addr[i]) {
+			rc = scm_io_write(mem_acc_vreg->mem_acc_type_addr[i],
+				mem_acc_vreg->mem_acc_type_data[corner - 1 + i *
+				mem_acc_vreg->num_corners]);
+			if (rc)
+				pr_err("scm_io_write: %pa failure rc:%d\n",
+					&(mem_acc_vreg->mem_acc_type_addr[i]),
+					rc);
+		}
+	}
+}
+
+static void __update_acc_custom(struct mem_acc_regulator *mem_acc_vreg,
+						int corner, int mem_type)
+{
+	writel_relaxed(
+		mem_acc_vreg->acc_custom_data[mem_type][corner-1],
+		mem_acc_vreg->acc_custom_addr[mem_type]);
+	pr_debug("corner=%d mem_type=%d custom_data=0x%2x\n", corner,
+		mem_type, mem_acc_vreg->acc_custom_data[mem_type][corner-1]);
+}
+
+static void update_acc_sel(struct mem_acc_regulator *mem_acc_vreg, int corner)
+{
+	int i;
+
+	for (i = 0; i < MEMORY_MAX; i++) {
+		if (mem_acc_vreg->mem_acc_supported[i])
+			__update_acc_sel(mem_acc_vreg, corner, i);
+		if (mem_acc_vreg->mem_acc_custom_supported[i])
+			__update_acc_custom(mem_acc_vreg, corner, i);
+	}
+
+	if (mem_acc_vreg->mem_acc_type_data)
+		__update_acc_type(mem_acc_vreg, corner);
+}
+
+static void update_acc_reg(struct mem_acc_regulator *mem_acc_vreg, int corner)
+{
+	struct corner_acc_reg_config *corner_acc_reg_config;
+	struct acc_reg_value *reg_config_list;
+	int i, index;
+	u32 addr_index, reg_val;
+
+	corner_acc_reg_config =
+		&mem_acc_vreg->corner_acc_reg_config[mem_acc_vreg->corner];
+	reg_config_list = corner_acc_reg_config->reg_config_list;
+	for (i = 0; i < corner_acc_reg_config->max_reg_config_len; i++) {
+		/*
+		 * Use (corner - 1) in the below equation as
+		 * the reg_config_list[] stores the values starting from
+		 * index '0' where as the minimum corner value allowed
+		 * in regulator framework is '1'.
+		 */
+		index = (corner - 1) * corner_acc_reg_config->max_reg_config_len
+			+ i;
+		addr_index = reg_config_list[index].addr_index;
+		reg_val = reg_config_list[index].reg_val;
+
+		if (addr_index == PARAM_MATCH_ANY)
+			break;
+
+		writel_relaxed(reg_val,
+				mem_acc_vreg->remap_reg_addr_list[addr_index]);
+		/* make sure write complete */
+		mb();
+
+		pr_debug("corner=%d register:0x%x value:0x%x\n", corner,
+			mem_acc_vreg->phys_reg_addr_list[addr_index], reg_val);
+	}
+}
+
+static int mem_acc_regulator_set_voltage(struct regulator_dev *rdev,
+		int corner, int corner_max, unsigned int *selector)
+{
+	struct mem_acc_regulator *mem_acc_vreg = rdev_get_drvdata(rdev);
+	int i;
+
+	if (corner > mem_acc_vreg->num_corners) {
+		pr_err("Invalid corner=%d requested\n", corner);
+		return -EINVAL;
+	}
+
+	pr_debug("old corner=%d, new corner=%d\n",
+			mem_acc_vreg->corner, corner);
+
+	if (corner == mem_acc_vreg->corner)
+		return 0;
+
+	/* go up or down one level at a time */
+	mutex_lock(&mem_acc_memory_mutex);
+
+	if (mem_acc_vreg->flags & MEM_ACC_USE_ADDR_VAL_MAP) {
+		update_acc_reg(mem_acc_vreg, corner);
+	} else if (mem_acc_vreg->flags & MEM_ACC_USE_CORNER_ACC_MAP) {
+		if (corner > mem_acc_vreg->corner) {
+			for (i = mem_acc_vreg->corner + 1; i <= corner; i++) {
+				pr_debug("UP: to corner %d\n", i);
+				update_acc_sel(mem_acc_vreg, i);
+			}
+		} else {
+			for (i = mem_acc_vreg->corner - 1; i >= corner; i--) {
+				pr_debug("DOWN: to corner %d\n", i);
+				update_acc_sel(mem_acc_vreg, i);
+			}
+		}
+	}
+
+	mutex_unlock(&mem_acc_memory_mutex);
+
+	pr_debug("new voltage corner set %d\n", corner);
+
+	mem_acc_vreg->corner = corner;
+
+	return 0;
+}
+
+static int mem_acc_regulator_get_voltage(struct regulator_dev *rdev)
+{
+	struct mem_acc_regulator *mem_acc_vreg = rdev_get_drvdata(rdev);
+
+	return mem_acc_vreg->corner;
+}
+
+static struct regulator_ops mem_acc_corner_ops = {
+	.set_voltage		= mem_acc_regulator_set_voltage,
+	.get_voltage		= mem_acc_regulator_get_voltage,
+};
+
+static int __mem_acc_sel_init(struct mem_acc_regulator *mem_acc_vreg,
+							int mem_type)
+{
+	int i;
+	u32 bit, mask;
+
+	mem_acc_vreg->acc_sel_mask[mem_type] = devm_kzalloc(mem_acc_vreg->dev,
+		mem_acc_vreg->num_acc_sel[mem_type] * sizeof(u32), GFP_KERNEL);
+	if (!mem_acc_vreg->acc_sel_mask[mem_type])
+		return -ENOMEM;
+
+	for (i = 0; i < mem_acc_vreg->num_acc_sel[mem_type]; i++) {
+		bit = mem_acc_vreg->acc_sel_bit_pos[mem_type][i];
+		mask = BIT(mem_acc_vreg->acc_sel_bit_size[mem_type]) - 1;
+		mem_acc_vreg->acc_sel_mask[mem_type][i] = mask << bit;
+	}
+
+	return 0;
+}
+
+static int mem_acc_sel_init(struct mem_acc_regulator *mem_acc_vreg)
+{
+	int i, rc;
+
+	for (i = 0; i < MEMORY_MAX; i++) {
+		if (mem_acc_vreg->mem_acc_supported[i]) {
+			rc = __mem_acc_sel_init(mem_acc_vreg, i);
+			if (rc) {
+				pr_err("Unable to initialize mem_type=%d rc=%d\n",
+					i, rc);
+				return rc;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static void mem_acc_en_init(struct mem_acc_regulator *mem_acc_vreg)
+{
+	int i, bit;
+	u32 acc_data;
+
+	acc_data = readl_relaxed(mem_acc_vreg->acc_en_base);
+	pr_debug("init: acc_en_register=%x\n", acc_data);
+	for (i = 0; i < mem_acc_vreg->num_acc_en; i++) {
+		bit = mem_acc_vreg->acc_en_bit_pos[i];
+		acc_data |= BIT(bit);
+	}
+	pr_debug("final: acc_en_register=%x\n", acc_data);
+	writel_relaxed(acc_data, mem_acc_vreg->acc_en_base);
+}
+
+static int populate_acc_data(struct mem_acc_regulator *mem_acc_vreg,
+			const char *prop_name, u32 **value, u32 *len)
+{
+	int rc;
+
+	if (!of_get_property(mem_acc_vreg->dev->of_node, prop_name, len)) {
+		pr_err("Unable to find %s property\n", prop_name);
+		return -EINVAL;
+	}
+	*len /= sizeof(u32);
+	if (!(*len)) {
+		pr_err("Incorrect entries in %s\n", prop_name);
+		return -EINVAL;
+	}
+
+	*value = devm_kzalloc(mem_acc_vreg->dev, (*len) * sizeof(u32),
+							GFP_KERNEL);
+	if (!(*value)) {
+		pr_err("Unable to allocate memory for %s\n", prop_name);
+		return -ENOMEM;
+	}
+
+	pr_debug("Found %s, data-length = %d\n", prop_name, *len);
+
+	rc = of_property_read_u32_array(mem_acc_vreg->dev->of_node,
+					prop_name, *value, *len);
+	if (rc) {
+		pr_err("Unable to populate %s rc=%d\n", prop_name, rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int mem_acc_sel_setup(struct mem_acc_regulator *mem_acc_vreg,
+			struct resource *res, int mem_type)
+{
+	int len, rc;
+	char *mem_select_str;
+	char *mem_select_size_str;
+
+	mem_acc_vreg->acc_sel_addr[mem_type] = res->start;
+	len = res->end - res->start + 1;
+	pr_debug("'acc_sel_addr' = %pa mem_type=%d (len=%d)\n",
+					&res->start, mem_type, len);
+
+	mem_acc_vreg->acc_sel_base[mem_type] = devm_ioremap(mem_acc_vreg->dev,
+			mem_acc_vreg->acc_sel_addr[mem_type], len);
+	if (!mem_acc_vreg->acc_sel_base[mem_type]) {
+		pr_err("Unable to map 'acc_sel_addr' %pa for mem_type=%d\n",
+			&mem_acc_vreg->acc_sel_addr[mem_type], mem_type);
+		return -EINVAL;
+	}
+
+	switch (mem_type) {
+	case MEMORY_L1:
+		mem_select_str = "qcom,acc-sel-l1-bit-pos";
+		mem_select_size_str = "qcom,acc-sel-l1-bit-size";
+		break;
+	case MEMORY_L2:
+		mem_select_str = "qcom,acc-sel-l2-bit-pos";
+		mem_select_size_str = "qcom,acc-sel-l2-bit-size";
+		break;
+	default:
+		pr_err("Invalid memory type: %d\n", mem_type);
+		return -EINVAL;
+	}
+
+	mem_acc_vreg->acc_sel_bit_size[mem_type] = MEM_ACC_DEFAULT_SEL_SIZE;
+	of_property_read_u32(mem_acc_vreg->dev->of_node, mem_select_size_str,
+			&mem_acc_vreg->acc_sel_bit_size[mem_type]);
+
+	rc = populate_acc_data(mem_acc_vreg, mem_select_str,
+			&mem_acc_vreg->acc_sel_bit_pos[mem_type],
+			&mem_acc_vreg->num_acc_sel[mem_type]);
+	if (rc)
+		pr_err("Unable to populate '%s' rc=%d\n", mem_select_str, rc);
+
+	return rc;
+}
+
+static int mem_acc_efuse_init(struct platform_device *pdev,
+				 struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct resource *res;
+	int len;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "efuse_addr");
+	if (!res || !res->start) {
+		mem_acc_vreg->efuse_base = NULL;
+		pr_debug("'efuse_addr' resource missing or not used.\n");
+		return 0;
+	}
+
+	mem_acc_vreg->efuse_addr = res->start;
+	len = res->end - res->start + 1;
+
+	pr_info("efuse_addr = %pa (len=0x%x)\n", &res->start, len);
+
+	mem_acc_vreg->efuse_base = devm_ioremap(&pdev->dev,
+						mem_acc_vreg->efuse_addr, len);
+	if (!mem_acc_vreg->efuse_base) {
+		pr_err("Unable to map efuse_addr %pa\n",
+				&mem_acc_vreg->efuse_addr);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mem_acc_custom_data_init(struct platform_device *pdev,
+				 struct mem_acc_regulator *mem_acc_vreg,
+				 int mem_type)
+{
+	struct resource *res;
+	char *custom_apc_addr_str, *custom_apc_data_str;
+	int len, rc = 0;
+
+	switch (mem_type) {
+	case MEMORY_L1:
+		custom_apc_addr_str = "acc-l1-custom";
+		custom_apc_data_str = "qcom,l1-acc-custom-data";
+		break;
+	case MEMORY_L2:
+		custom_apc_addr_str = "acc-l2-custom";
+		custom_apc_data_str = "qcom,l2-acc-custom-data";
+		break;
+	default:
+		pr_err("Invalid memory type: %d\n", mem_type);
+		return -EINVAL;
+	}
+
+	if (!of_find_property(mem_acc_vreg->dev->of_node,
+				custom_apc_data_str, NULL)) {
+		pr_debug("%s custom_data not specified\n", custom_apc_data_str);
+		return 0;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						custom_apc_addr_str);
+	if (!res || !res->start) {
+		pr_debug("%s resource missing\n", custom_apc_addr_str);
+		return -EINVAL;
+	}
+
+	len = res->end - res->start + 1;
+	mem_acc_vreg->acc_custom_addr[mem_type] =
+		devm_ioremap(mem_acc_vreg->dev, res->start, len);
+	if (!mem_acc_vreg->acc_custom_addr[mem_type]) {
+		pr_err("Unable to map %s %pa\n",
+			custom_apc_addr_str, &res->start);
+		return -EINVAL;
+	}
+
+	rc = populate_acc_data(mem_acc_vreg, custom_apc_data_str,
+				&mem_acc_vreg->acc_custom_data[mem_type], &len);
+	if (rc) {
+		pr_err("Unable to find %s rc=%d\n", custom_apc_data_str, rc);
+		return rc;
+	}
+
+	if (mem_acc_vreg->num_corners != len) {
+		pr_err("Custom data is not present for all the corners\n");
+		return -EINVAL;
+	}
+
+	mem_acc_vreg->mem_acc_custom_supported[mem_type] = true;
+
+	return 0;
+}
+
+static int override_mem_acc_custom_data(struct platform_device *pdev,
+				 struct mem_acc_regulator *mem_acc_vreg,
+				 int mem_type)
+{
+	char *custom_apc_data_str;
+	int len, rc = 0, i;
+	int tuple_count, tuple_match;
+	u32 index = 0, value = 0;
+
+	switch (mem_type) {
+	case MEMORY_L1:
+		custom_apc_data_str = "qcom,override-l1-acc-custom-data";
+		break;
+	case MEMORY_L2:
+		custom_apc_data_str = "qcom,override-l2-acc-custom-data";
+		break;
+	default:
+		pr_err("Invalid memory type: %d\n", mem_type);
+		return -EINVAL;
+	}
+
+	if (!of_find_property(mem_acc_vreg->dev->of_node,
+				custom_apc_data_str, &len)) {
+		pr_debug("%s not specified\n", custom_apc_data_str);
+		return 0;
+	}
+
+	if (mem_acc_vreg->override_map_count) {
+		if (mem_acc_vreg->override_map_match == FUSE_MAP_NO_MATCH)
+			return 0;
+		tuple_count = mem_acc_vreg->override_map_count;
+		tuple_match = mem_acc_vreg->override_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	if (len != mem_acc_vreg->num_corners * tuple_count * sizeof(u32)) {
+		pr_err("%s length=%d is invalid\n", custom_apc_data_str, len);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < mem_acc_vreg->num_corners; i++) {
+		index = (tuple_match * mem_acc_vreg->num_corners) + i;
+		rc = of_property_read_u32_index(mem_acc_vreg->dev->of_node,
+					custom_apc_data_str, index, &value);
+		if (rc) {
+			pr_err("Unable read %s index %u, rc=%d\n",
+					custom_apc_data_str, index, rc);
+			return rc;
+		}
+		mem_acc_vreg->acc_custom_data[mem_type][i] = value;
+	}
+
+	return 0;
+}
+
+static int mem_acc_override_corner_map(struct mem_acc_regulator *mem_acc_vreg)
+{
+	int len = 0, i, rc;
+	int tuple_count, tuple_match;
+	u32 index = 0, value = 0;
+	char *prop_str = "qcom,override-corner-acc-map";
+
+	if (!of_find_property(mem_acc_vreg->dev->of_node, prop_str, &len))
+		return 0;
+
+	if (mem_acc_vreg->override_map_count) {
+		if (mem_acc_vreg->override_map_match ==	FUSE_MAP_NO_MATCH)
+			return 0;
+		tuple_count = mem_acc_vreg->override_map_count;
+		tuple_match = mem_acc_vreg->override_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	if (len != mem_acc_vreg->num_corners * tuple_count * sizeof(u32)) {
+		pr_err("%s length=%d is invalid\n", prop_str, len);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < mem_acc_vreg->num_corners; i++) {
+		index = (tuple_match * mem_acc_vreg->num_corners) + i;
+		rc = of_property_read_u32_index(mem_acc_vreg->dev->of_node,
+						prop_str, index, &value);
+		if (rc) {
+			pr_err("Unable read %s index %u, rc=%d\n",
+						prop_str, index, rc);
+			return rc;
+		}
+		mem_acc_vreg->corner_acc_map[i] = value;
+	}
+
+	return 0;
+
+}
+
+static int mem_acc_find_override_map_match(struct platform_device *pdev,
+				 struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	int i, rc, tuple_size;
+	int len = 0;
+	u32 *tmp;
+	char *prop_str = "qcom,override-fuse-version-map";
+
+	/* Specify default no match case. */
+	mem_acc_vreg->override_map_match = FUSE_MAP_NO_MATCH;
+	mem_acc_vreg->override_map_count = 0;
+
+	if (!of_find_property(of_node, prop_str, &len)) {
+		/* No mapping present. */
+		return 0;
+	}
+
+	tuple_size = 1;
+	mem_acc_vreg->override_map_count = len / (sizeof(u32) * tuple_size);
+
+	if (len == 0 || len % (sizeof(u32) * tuple_size)) {
+		pr_err("%s length=%d is invalid\n", prop_str, len);
+		return -EINVAL;
+	}
+
+	tmp = kzalloc(len, GFP_KERNEL);
+	if (!tmp)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(of_node, prop_str, tmp,
+			mem_acc_vreg->override_map_count * tuple_size);
+	if (rc) {
+		pr_err("could not read %s rc=%d\n", prop_str, rc);
+		goto done;
+	}
+
+	for (i = 0; i < mem_acc_vreg->override_map_count; i++) {
+		if (tmp[i * tuple_size] != mem_acc_vreg->override_fuse_value
+		    && tmp[i * tuple_size] != FUSE_PARAM_MATCH_ANY) {
+			continue;
+		} else {
+			mem_acc_vreg->override_map_match = i;
+			break;
+		}
+	}
+
+	if (mem_acc_vreg->override_map_match != FUSE_MAP_NO_MATCH)
+		pr_debug("%s tuple match found: %d\n", prop_str,
+				mem_acc_vreg->override_map_match);
+	else
+		pr_err("%s tuple match not found\n", prop_str);
+
+done:
+	kfree(tmp);
+	return rc;
+}
+
+#define MAX_CHARS_PER_INT	20
+
+static int mem_acc_reg_addr_val_dump(struct mem_acc_regulator *mem_acc_vreg,
+			struct corner_acc_reg_config *corner_acc_reg_config,
+			u32 corner)
+{
+	int i, k, index, pos = 0;
+	u32 addr_index;
+	size_t buflen;
+	char *buf;
+	struct acc_reg_value *reg_config_list =
+					corner_acc_reg_config->reg_config_list;
+	int max_reg_config_len = corner_acc_reg_config->max_reg_config_len;
+	int num_corners = mem_acc_vreg->num_corners;
+
+	/*
+	 * Log register and value mapping since they are useful for
+	 * baseline MEM ACC logging.
+	 */
+	buflen = max_reg_config_len * (MAX_CHARS_PER_INT + 6) * sizeof(*buf);
+	buf = kzalloc(buflen, GFP_KERNEL);
+	if (buf == NULL) {
+		pr_err("Could not allocate memory for acc register and value logging\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < num_corners; i++) {
+		if (corner == i + 1)
+			continue;
+
+		pr_debug("Corner: %d --> %d:\n", corner, i + 1);
+		pos = 0;
+		for (k = 0; k < max_reg_config_len; k++) {
+			index = i * max_reg_config_len + k;
+			addr_index = reg_config_list[index].addr_index;
+			if (addr_index == PARAM_MATCH_ANY)
+				break;
+
+			pos += scnprintf(buf + pos, buflen - pos,
+				"<0x%x 0x%x> ",
+				mem_acc_vreg->phys_reg_addr_list[addr_index],
+				reg_config_list[index].reg_val);
+		}
+		buf[pos] = '\0';
+		pr_debug("%s\n", buf);
+	}
+
+	kfree(buf);
+	return 0;
+}
+
+static int mem_acc_get_reg_addr_val(struct device_node *of_node,
+		const char *prop_str, struct acc_reg_value *reg_config_list,
+		int list_offset, int list_size, u32 max_reg_index)
+{
+
+	int i, index, rc  = 0;
+
+	for (i = 0; i < list_size / 2; i++) {
+		index = (list_offset * list_size) + i * 2;
+		rc = of_property_read_u32_index(of_node, prop_str, index,
+					&reg_config_list[i].addr_index);
+		rc |= of_property_read_u32_index(of_node, prop_str, index + 1,
+					&reg_config_list[i].reg_val);
+		if (rc) {
+			pr_err("could not read %s at tuple %u: rc=%d\n",
+				prop_str, index, rc);
+			return rc;
+		}
+
+		if (reg_config_list[i].addr_index == PARAM_MATCH_ANY)
+			continue;
+
+		if ((!reg_config_list[i].addr_index) ||
+			reg_config_list[i].addr_index > max_reg_index) {
+			pr_err("Invalid register index %u in %s at tuple %u\n",
+				reg_config_list[i].addr_index, prop_str, index);
+			return -EINVAL;
+		}
+	}
+
+	return rc;
+}
+
+static int mem_acc_init_reg_config(struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct device_node *of_node = mem_acc_vreg->dev->of_node;
+	int i, size, len = 0, rc = 0;
+	u32 addr_index, reg_val, index;
+	char *prop_str = "qcom,acc-init-reg-config";
+
+	if (!of_find_property(of_node, prop_str, &len)) {
+		/* Initial acc register configuration not specified */
+		return rc;
+	}
+
+	size = len / sizeof(u32);
+	if ((!size) || (size % 2)) {
+		pr_err("%s specified with invalid length: %d\n",
+			prop_str, size);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < size / 2; i++) {
+		index = i * 2;
+		rc = of_property_read_u32_index(of_node, prop_str, index,
+						&addr_index);
+		rc |= of_property_read_u32_index(of_node, prop_str, index + 1,
+						&reg_val);
+		if (rc) {
+			pr_err("could not read %s at tuple %u: rc=%d\n",
+				prop_str, index, rc);
+			return rc;
+		}
+
+		if ((!addr_index) || addr_index > mem_acc_vreg->num_acc_reg) {
+			pr_err("Invalid register index %u in %s at tuple %u\n",
+				addr_index, prop_str, index);
+			return -EINVAL;
+		}
+
+		writel_relaxed(reg_val,
+				mem_acc_vreg->remap_reg_addr_list[addr_index]);
+		/* make sure write complete */
+		mb();
+
+		pr_debug("acc initial config: register:0x%x value:0x%x\n",
+			mem_acc_vreg->phys_reg_addr_list[addr_index], reg_val);
+	}
+
+	return rc;
+}
+
+static int mem_acc_get_reg_addr(struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct device_node *of_node = mem_acc_vreg->dev->of_node;
+	void __iomem **remap_reg_addr_list;
+	u32 *phys_reg_addr_list;
+	int i, num_acc_reg, len = 0, rc = 0;
+
+	if (!of_find_property(of_node, "qcom,acc-reg-addr-list", &len)) {
+		/* acc register address list not specified */
+		return rc;
+	}
+
+	num_acc_reg = len / sizeof(u32);
+	if (!num_acc_reg) {
+		pr_err("qcom,acc-reg-addr-list has invalid len = %d\n", len);
+		return -EINVAL;
+	}
+
+	phys_reg_addr_list = devm_kcalloc(mem_acc_vreg->dev, num_acc_reg + 1,
+				sizeof(*phys_reg_addr_list), GFP_KERNEL);
+	if (!phys_reg_addr_list)
+		return -ENOMEM;
+
+	remap_reg_addr_list = devm_kcalloc(mem_acc_vreg->dev, num_acc_reg + 1,
+				sizeof(*remap_reg_addr_list), GFP_KERNEL);
+	if (!remap_reg_addr_list)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(of_node, "qcom,acc-reg-addr-list",
+					&phys_reg_addr_list[1], num_acc_reg);
+	if (rc) {
+		pr_err("Read- qcom,acc-reg-addr-list failed: rc=%d\n", rc);
+		return rc;
+	}
+
+	for (i = 1; i <= num_acc_reg; i++) {
+		remap_reg_addr_list[i] = devm_ioremap(mem_acc_vreg->dev,
+						phys_reg_addr_list[i], 0x4);
+		if (!remap_reg_addr_list[i]) {
+			pr_err("Unable to map register address 0x%x\n",
+					phys_reg_addr_list[i]);
+			return -EINVAL;
+		}
+	}
+
+	mem_acc_vreg->num_acc_reg = num_acc_reg;
+	mem_acc_vreg->phys_reg_addr_list = phys_reg_addr_list;
+	mem_acc_vreg->remap_reg_addr_list = remap_reg_addr_list;
+
+	return rc;
+}
+
+static int mem_acc_reg_config_init(struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct device_node *of_node = mem_acc_vreg->dev->of_node;
+	struct acc_reg_value *reg_config_list;
+	int len, size, rc, i, num_corners;
+	struct property *prop;
+	char prop_str[30];
+	struct corner_acc_reg_config *corner_acc_reg_config;
+
+	rc = of_property_read_u32(of_node, "qcom,num-acc-corners",
+				&num_corners);
+	if (rc) {
+		pr_err("could not read qcom,num-acc-corners: rc=%d\n", rc);
+		return rc;
+	}
+
+	mem_acc_vreg->num_corners = num_corners;
+
+	rc = of_property_read_u32(of_node, "qcom,boot-acc-corner",
+				&mem_acc_vreg->corner);
+	if (rc) {
+		pr_err("could not read qcom,boot-acc-corner: rc=%d\n", rc);
+		return rc;
+	}
+	pr_debug("boot acc corner = %d\n", mem_acc_vreg->corner);
+
+	corner_acc_reg_config = devm_kcalloc(mem_acc_vreg->dev, num_corners + 1,
+						sizeof(*corner_acc_reg_config),
+						GFP_KERNEL);
+	if (!corner_acc_reg_config)
+		return -ENOMEM;
+
+	for (i = 1; i <= num_corners; i++) {
+		snprintf(prop_str, sizeof(prop_str),
+				"qcom,corner%d-reg-config", i);
+		prop = of_find_property(of_node, prop_str, &len);
+		size = len / sizeof(u32);
+		if ((!prop) || (!size) || size < (num_corners * 2)) {
+			pr_err("%s property is missed or invalid length: len=%d\n",
+				prop_str, len);
+			return -EINVAL;
+		}
+
+		reg_config_list = devm_kcalloc(mem_acc_vreg->dev, size / 2,
+					sizeof(*reg_config_list), GFP_KERNEL);
+		if (!reg_config_list)
+			return -ENOMEM;
+
+		rc = mem_acc_get_reg_addr_val(of_node, prop_str,
+						reg_config_list, 0, size,
+						mem_acc_vreg->num_acc_reg);
+		if (rc) {
+			pr_err("Failed to read %s property: rc=%d\n",
+				prop_str, rc);
+			return rc;
+		}
+
+		corner_acc_reg_config[i].max_reg_config_len =
+						size / (num_corners * 2);
+		corner_acc_reg_config[i].reg_config_list = reg_config_list;
+
+		rc = mem_acc_reg_addr_val_dump(mem_acc_vreg,
+						&corner_acc_reg_config[i], i);
+		if (rc) {
+			pr_err("could not dump acc address-value dump for corner=%d: rc=%d\n",
+				i, rc);
+			return rc;
+		}
+	}
+
+	mem_acc_vreg->corner_acc_reg_config = corner_acc_reg_config;
+	mem_acc_vreg->flags |= MEM_ACC_USE_ADDR_VAL_MAP;
+	return rc;
+}
+
+static int mem_acc_override_reg_addr_val_init(
+			struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct device_node *of_node = mem_acc_vreg->dev->of_node;
+	struct corner_acc_reg_config *corner_acc_reg_config;
+	struct acc_reg_value *override_reg_config_list;
+	int i, tuple_count, tuple_match, len = 0, rc = 0;
+	u32 list_size, override_max_reg_config_len;
+	char prop_str[40];
+	struct property *prop;
+	int num_corners = mem_acc_vreg->num_corners;
+
+	if (!mem_acc_vreg->corner_acc_reg_config)
+		return 0;
+
+	if (mem_acc_vreg->override_map_count) {
+		if (mem_acc_vreg->override_map_match ==	FUSE_MAP_NO_MATCH)
+			return 0;
+		tuple_count = mem_acc_vreg->override_map_count;
+		tuple_match = mem_acc_vreg->override_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	corner_acc_reg_config = mem_acc_vreg->corner_acc_reg_config;
+	for (i = 1; i <= num_corners; i++) {
+		snprintf(prop_str, sizeof(prop_str),
+				"qcom,override-corner%d-addr-val-map", i);
+		prop = of_find_property(of_node, prop_str, &len);
+		list_size = len / (tuple_count * sizeof(u32));
+		if (!prop) {
+			pr_debug("%s property not specified\n", prop_str);
+			continue;
+		}
+
+		if ((!list_size) || list_size < (num_corners * 2)) {
+			pr_err("qcom,override-corner%d-addr-val-map property is missed or invalid length: len=%d\n",
+			i, len);
+			return -EINVAL;
+		}
+
+		override_max_reg_config_len = list_size / (num_corners * 2);
+		override_reg_config_list =
+				corner_acc_reg_config[i].reg_config_list;
+
+		if (corner_acc_reg_config[i].max_reg_config_len
+					!= override_max_reg_config_len) {
+			/* Free already allocate memory */
+			devm_kfree(mem_acc_vreg->dev, override_reg_config_list);
+
+			/* Allocated memory for new requirement */
+			override_reg_config_list =
+				devm_kcalloc(mem_acc_vreg->dev,
+				override_max_reg_config_len * num_corners,
+				sizeof(*override_reg_config_list), GFP_KERNEL);
+			if (!override_reg_config_list)
+				return -ENOMEM;
+
+			corner_acc_reg_config[i].max_reg_config_len =
+						override_max_reg_config_len;
+			corner_acc_reg_config[i].reg_config_list =
+						override_reg_config_list;
+		}
+
+		rc = mem_acc_get_reg_addr_val(of_node, prop_str,
+					override_reg_config_list, tuple_match,
+					list_size, mem_acc_vreg->num_acc_reg);
+		if (rc) {
+			pr_err("Failed to read %s property: rc=%d\n",
+				prop_str, rc);
+			return rc;
+		}
+
+		rc = mem_acc_reg_addr_val_dump(mem_acc_vreg,
+						&corner_acc_reg_config[i], i);
+		if (rc) {
+			pr_err("could not dump acc address-value dump for corner=%d: rc=%d\n",
+				i, rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+#define MEM_TYPE_STRING_LEN	20
+static int mem_acc_init(struct platform_device *pdev,
+		struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	struct resource *res;
+	int len, rc, i, j;
+	u32 fuse_sel[4];
+	u64 fuse_bits;
+	bool acc_type_present = false;
+	char tmps[MEM_TYPE_STRING_LEN];
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "acc-en");
+	if (!res || !res->start) {
+		pr_debug("'acc-en' resource missing or not used.\n");
+	} else {
+		mem_acc_vreg->acc_en_addr = res->start;
+		len = res->end - res->start + 1;
+		pr_debug("'acc_en_addr' = %pa (len=0x%x)\n", &res->start, len);
+
+		mem_acc_vreg->acc_en_base = devm_ioremap(mem_acc_vreg->dev,
+				mem_acc_vreg->acc_en_addr, len);
+		if (!mem_acc_vreg->acc_en_base) {
+			pr_err("Unable to map 'acc_en_addr' %pa\n",
+					&mem_acc_vreg->acc_en_addr);
+			return -EINVAL;
+		}
+
+		rc = populate_acc_data(mem_acc_vreg, "qcom,acc-en-bit-pos",
+				&mem_acc_vreg->acc_en_bit_pos,
+				&mem_acc_vreg->num_acc_en);
+		if (rc) {
+			pr_err("Unable to populate 'qcom,acc-en-bit-pos' rc=%d\n",
+					rc);
+			return rc;
+		}
+	}
+
+	rc = mem_acc_efuse_init(pdev, mem_acc_vreg);
+	if (rc) {
+		pr_err("Wrong eFuse address specified: rc=%d\n", rc);
+		return rc;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "acc-sel-l1");
+	if (!res || !res->start) {
+		pr_debug("'acc-sel-l1' resource missing or not used.\n");
+	} else {
+		rc = mem_acc_sel_setup(mem_acc_vreg, res, MEMORY_L1);
+		if (rc) {
+			pr_err("Unable to setup mem-acc for mem_type=%d rc=%d\n",
+					MEMORY_L1, rc);
+			return rc;
+		}
+		mem_acc_vreg->mem_acc_supported[MEMORY_L1] = true;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "acc-sel-l2");
+	if (!res || !res->start) {
+		pr_debug("'acc-sel-l2' resource missing or not used.\n");
+	} else {
+		rc = mem_acc_sel_setup(mem_acc_vreg, res, MEMORY_L2);
+		if (rc) {
+			pr_err("Unable to setup mem-acc for mem_type=%d rc=%d\n",
+					MEMORY_L2, rc);
+			return rc;
+		}
+		mem_acc_vreg->mem_acc_supported[MEMORY_L2] = true;
+	}
+
+	for (i = 0; i < MEM_ACC_TYPE_MAX; i++) {
+		snprintf(tmps, MEM_TYPE_STRING_LEN, "mem-acc-type%d", i + 1);
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, tmps);
+
+		if (!res || !res->start) {
+			pr_debug("'%s' resource missing or not used.\n", tmps);
+		} else {
+			mem_acc_vreg->mem_acc_type_addr[i] = res->start;
+			acc_type_present = true;
+		}
+	}
+
+	rc = mem_acc_get_reg_addr(mem_acc_vreg);
+	if (rc) {
+		pr_err("Unable to get acc register addresses: rc=%d\n", rc);
+		return rc;
+	}
+
+	if (mem_acc_vreg->phys_reg_addr_list) {
+		rc = mem_acc_reg_config_init(mem_acc_vreg);
+		if (rc) {
+			pr_err("acc register address-value map failed: rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	if (of_find_property(of_node, "qcom,corner-acc-map", NULL)) {
+		rc = populate_acc_data(mem_acc_vreg, "qcom,corner-acc-map",
+			&mem_acc_vreg->corner_acc_map,
+			&mem_acc_vreg->num_corners);
+
+		/* Check if at least one valid mem-acc config. is specified */
+		for (i = 0; i < MEMORY_MAX; i++) {
+			if (mem_acc_vreg->mem_acc_supported[i])
+				break;
+		}
+		if (i == MEMORY_MAX && !acc_type_present) {
+			pr_err("No mem-acc configuration specified\n");
+			return -EINVAL;
+		}
+
+		mem_acc_vreg->flags |= MEM_ACC_USE_CORNER_ACC_MAP;
+	}
+
+	if ((mem_acc_vreg->flags & MEM_ACC_USE_CORNER_ACC_MAP) &&
+		(mem_acc_vreg->flags & MEM_ACC_USE_ADDR_VAL_MAP)) {
+		pr_err("Invalid configuration, both qcom,corner-acc-map and qcom,cornerX-addr-val-map specified\n");
+		return -EINVAL;
+	}
+
+	pr_debug("num_corners = %d\n", mem_acc_vreg->num_corners);
+
+	if (mem_acc_vreg->num_acc_en)
+		mem_acc_en_init(mem_acc_vreg);
+
+	if (mem_acc_vreg->phys_reg_addr_list) {
+		rc = mem_acc_init_reg_config(mem_acc_vreg);
+		if (rc) {
+			pr_err("acc initial register configuration failed: rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	rc = mem_acc_sel_init(mem_acc_vreg);
+	if (rc) {
+		pr_err("Unable to initialize mem_acc_sel reg rc=%d\n", rc);
+		return rc;
+	}
+
+	for (i = 0; i < MEMORY_MAX; i++) {
+		rc = mem_acc_custom_data_init(pdev, mem_acc_vreg, i);
+		if (rc) {
+			pr_err("Unable to initialize custom data for mem_type=%d rc=%d\n",
+					i, rc);
+			return rc;
+		}
+	}
+
+	if (of_find_property(mem_acc_vreg->dev->of_node,
+				"qcom,override-acc-fuse-sel", NULL)) {
+		rc = of_property_read_u32_array(mem_acc_vreg->dev->of_node,
+			"qcom,override-acc-fuse-sel", fuse_sel, 4);
+		if (rc < 0) {
+			pr_err("Read failed - qcom,override-acc-fuse-sel rc=%d\n",
+					rc);
+			return rc;
+		}
+
+		fuse_bits = mem_acc_read_efuse_row(mem_acc_vreg, fuse_sel[0],
+								fuse_sel[3]);
+		/*
+		 * fuse_sel[1] = LSB position in row (shift)
+		 * fuse_sel[2] = num of bits (mask)
+		 */
+		mem_acc_vreg->override_fuse_value = (fuse_bits >> fuse_sel[1]) &
+						((1 << fuse_sel[2]) - 1);
+
+		rc = mem_acc_find_override_map_match(pdev, mem_acc_vreg);
+		if (rc) {
+			pr_err("Unable to find fuse map match rc=%d\n", rc);
+			return rc;
+		}
+
+		pr_debug("override_fuse_val=%d override_map_match=%d\n",
+					mem_acc_vreg->override_fuse_value,
+					mem_acc_vreg->override_map_match);
+
+		rc = mem_acc_override_corner_map(mem_acc_vreg);
+		if (rc) {
+			pr_err("Unable to override corner map rc=%d\n", rc);
+			return rc;
+		}
+
+		rc = mem_acc_override_reg_addr_val_init(mem_acc_vreg);
+		if (rc) {
+			pr_err("Unable to override reg_config_list init rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		for (i = 0; i < MEMORY_MAX; i++) {
+			rc = override_mem_acc_custom_data(pdev,
+							mem_acc_vreg, i);
+			if (rc) {
+				pr_err("Unable to override custom data for mem_type=%d rc=%d\n",
+					i, rc);
+				return rc;
+			}
+		}
+	}
+
+	if (acc_type_present) {
+		mem_acc_vreg->mem_acc_type_data = devm_kzalloc(
+			mem_acc_vreg->dev, mem_acc_vreg->num_corners *
+			MEM_ACC_TYPE_MAX * sizeof(u32), GFP_KERNEL);
+
+		if (!mem_acc_vreg->mem_acc_type_data) {
+			pr_err("Unable to allocate memory for mem_acc_type\n");
+			return -ENOMEM;
+		}
+
+		for (i = 0; i < MEM_ACC_TYPE_MAX; i++) {
+			if (mem_acc_vreg->mem_acc_type_addr[i]) {
+				snprintf(tmps, MEM_TYPE_STRING_LEN,
+					"qcom,mem-acc-type%d", i + 1);
+
+				j = i * mem_acc_vreg->num_corners;
+				rc = of_property_read_u32_array(
+					mem_acc_vreg->dev->of_node,
+					tmps,
+					&mem_acc_vreg->mem_acc_type_data[j],
+					mem_acc_vreg->num_corners);
+				if (rc) {
+					pr_err("Unable to get property %s rc=%d\n",
+						tmps, rc);
+					return rc;
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int mem_acc_regulator_probe(struct platform_device *pdev)
+{
+	struct regulator_config reg_config = {};
+	struct mem_acc_regulator *mem_acc_vreg;
+	struct regulator_desc *rdesc;
+	struct regulator_init_data *init_data;
+	int rc;
+
+	if (!pdev->dev.of_node) {
+		pr_err("Device tree node is missing\n");
+		return -EINVAL;
+	}
+
+	init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node,
+					NULL);
+	if (!init_data) {
+		pr_err("regulator init data is missing\n");
+		return -EINVAL;
+	}
+
+	init_data->constraints.input_uV = init_data->constraints.max_uV;
+	init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_VOLTAGE;
+
+	mem_acc_vreg = devm_kzalloc(&pdev->dev, sizeof(*mem_acc_vreg),
+			GFP_KERNEL);
+	if (!mem_acc_vreg)
+		return -ENOMEM;
+
+	mem_acc_vreg->dev = &pdev->dev;
+
+	rc = mem_acc_init(pdev, mem_acc_vreg);
+	if (rc) {
+		pr_err("Unable to initialize mem_acc configuration rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	rdesc			= &mem_acc_vreg->rdesc;
+	rdesc->owner		= THIS_MODULE;
+	rdesc->type		= REGULATOR_VOLTAGE;
+	rdesc->ops		= &mem_acc_corner_ops;
+	rdesc->name		= init_data->constraints.name;
+
+	reg_config.dev = &pdev->dev;
+	reg_config.init_data = init_data;
+	reg_config.driver_data = mem_acc_vreg;
+	reg_config.of_node = pdev->dev.of_node;
+	mem_acc_vreg->rdev = regulator_register(rdesc, &reg_config);
+	if (IS_ERR(mem_acc_vreg->rdev)) {
+		rc = PTR_ERR(mem_acc_vreg->rdev);
+		if (rc != -EPROBE_DEFER)
+			pr_err("regulator_register failed: rc=%d\n", rc);
+		return rc;
+	}
+
+	platform_set_drvdata(pdev, mem_acc_vreg);
+
+	return 0;
+}
+
+static int mem_acc_regulator_remove(struct platform_device *pdev)
+{
+	struct mem_acc_regulator *mem_acc_vreg = platform_get_drvdata(pdev);
+
+	regulator_unregister(mem_acc_vreg->rdev);
+
+	return 0;
+}
+
+static const struct of_device_id mem_acc_regulator_match_table[] = {
+	{ .compatible = "qcom,mem-acc-regulator", },
+	{}
+};
+
+static struct platform_driver mem_acc_regulator_driver = {
+	.probe		= mem_acc_regulator_probe,
+	.remove		= mem_acc_regulator_remove,
+	.driver		= {
+		.name		= "qcom,mem-acc-regulator",
+		.of_match_table = mem_acc_regulator_match_table,
+		.owner		= THIS_MODULE,
+	},
+};
+
+int __init mem_acc_regulator_init(void)
+{
+	return platform_driver_register(&mem_acc_regulator_driver);
+}
+postcore_initcall(mem_acc_regulator_init);
+
+static void __exit mem_acc_regulator_exit(void)
+{
+	platform_driver_unregister(&mem_acc_regulator_driver);
+}
+module_exit(mem_acc_regulator_exit);
+
+MODULE_DESCRIPTION("MEM-ACC-SEL regulator driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/regulator/proxy-consumer.c	2019-01-22 16:16:26.275271509 +0100
@@ -0,0 +1,231 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/proxy-consumer.h>
+
+struct proxy_consumer {
+	struct list_head	list;
+	struct regulator	*reg;
+	bool			enable;
+	int			min_uV;
+	int			max_uV;
+	u32			current_uA;
+};
+
+static DEFINE_MUTEX(proxy_consumer_list_mutex);
+static LIST_HEAD(proxy_consumer_list);
+static bool proxy_consumers_removed;
+
+/**
+ * regulator_proxy_consumer_register() - conditionally register a proxy consumer
+ *		 for the specified regulator and set its boot time parameters
+ * @reg_dev:		Device pointer of the regulator
+ * @reg_node:		Device node pointer of the regulator
+ *
+ * Returns a struct proxy_consumer pointer corresponding to the regulator on
+ * success, ERR_PTR() if an error occurred, or NULL if no proxy consumer is
+ * needed for the regulator.  This function calls
+ * regulator_get(reg_dev, "proxy") after first checking if any proxy consumer
+ * properties are present in the reg_node device node.  After that, the voltage,
+ * minimum current, and/or the enable state will be set based upon the device
+ * node property values.
+ */
+struct proxy_consumer *regulator_proxy_consumer_register(struct device *reg_dev,
+			struct device_node *reg_node)
+{
+	struct proxy_consumer *consumer = NULL;
+	const char *reg_name = "";
+	u32 voltage[2] = {0};
+	int rc;
+
+	/* Return immediately if no proxy consumer properties are specified. */
+	if (!of_find_property(reg_node, "qcom,proxy-consumer-enable", NULL)
+	    && !of_find_property(reg_node, "qcom,proxy-consumer-voltage", NULL)
+	    && !of_find_property(reg_node, "qcom,proxy-consumer-current", NULL))
+		return NULL;
+
+	mutex_lock(&proxy_consumer_list_mutex);
+
+	/* Do not register new consumers if they cannot be removed later. */
+	if (proxy_consumers_removed) {
+		rc = -EPERM;
+		goto unlock;
+	}
+
+	if (dev_name(reg_dev))
+		reg_name = dev_name(reg_dev);
+
+	consumer = kzalloc(sizeof(*consumer), GFP_KERNEL);
+	if (!consumer) {
+		pr_err("kzalloc failed\n");
+		rc = -ENOMEM;
+		goto unlock;
+	}
+
+	consumer->enable
+		= of_property_read_bool(reg_node, "qcom,proxy-consumer-enable");
+	of_property_read_u32(reg_node, "qcom,proxy-consumer-current",
+				&consumer->current_uA);
+	rc = of_property_read_u32_array(reg_node, "qcom,proxy-consumer-voltage",
+					voltage, 2);
+	if (!rc) {
+		consumer->min_uV = voltage[0];
+		consumer->max_uV = voltage[1];
+	}
+
+	dev_dbg(reg_dev, "proxy consumer request: enable=%d, voltage_range=[%d, %d] uV, min_current=%d uA\n",
+		consumer->enable, consumer->min_uV, consumer->max_uV,
+		consumer->current_uA);
+
+	consumer->reg = regulator_get(reg_dev, "proxy");
+	if (IS_ERR_OR_NULL(consumer->reg)) {
+		rc = PTR_ERR(consumer->reg);
+		pr_err("regulator_get() failed for %s, rc=%d\n", reg_name, rc);
+		goto unlock;
+	}
+
+	if (consumer->max_uV > 0 && consumer->min_uV <= consumer->max_uV) {
+		rc = regulator_set_voltage(consumer->reg, consumer->min_uV,
+						consumer->max_uV);
+		if (rc) {
+			pr_err("regulator_set_voltage %s failed, rc=%d\n",
+				reg_name, rc);
+			goto free_regulator;
+		}
+	}
+
+	if (consumer->current_uA > 0) {
+		rc = regulator_set_load(consumer->reg,
+						consumer->current_uA);
+		if (rc < 0) {
+			pr_err("regulator_set_load %s failed, rc=%d\n",
+				reg_name, rc);
+			goto remove_voltage;
+		}
+	}
+
+	if (consumer->enable) {
+		rc = regulator_enable(consumer->reg);
+		if (rc) {
+			pr_err("regulator_enable %s failed, rc=%d\n", reg_name,
+				rc);
+			goto remove_current;
+		}
+	}
+
+	list_add(&consumer->list, &proxy_consumer_list);
+	mutex_unlock(&proxy_consumer_list_mutex);
+
+	return consumer;
+
+remove_current:
+	regulator_set_load(consumer->reg, 0);
+remove_voltage:
+	regulator_set_voltage(consumer->reg, 0, INT_MAX);
+free_regulator:
+	regulator_put(consumer->reg);
+unlock:
+	kfree(consumer);
+	mutex_unlock(&proxy_consumer_list_mutex);
+	return ERR_PTR(rc);
+}
+
+/* proxy_consumer_list_mutex must be held by caller. */
+static int regulator_proxy_consumer_remove(struct proxy_consumer *consumer)
+{
+	int rc = 0;
+
+	if (consumer->enable) {
+		rc = regulator_disable(consumer->reg);
+		if (rc)
+			pr_err("regulator_disable failed, rc=%d\n", rc);
+	}
+
+	if (consumer->current_uA > 0) {
+		rc = regulator_set_load(consumer->reg, 0);
+		if (rc < 0)
+			pr_err("regulator_set_load failed, rc=%d\n",
+				rc);
+	}
+
+	if (consumer->max_uV > 0 && consumer->min_uV <= consumer->max_uV) {
+		rc = regulator_set_voltage(consumer->reg, 0, INT_MAX);
+		if (rc)
+			pr_err("regulator_set_voltage failed, rc=%d\n", rc);
+	}
+
+	regulator_put(consumer->reg);
+	list_del(&consumer->list);
+	kfree(consumer);
+
+	return rc;
+}
+
+/**
+ * regulator_proxy_consumer_unregister() - unregister a proxy consumer and
+ *					   remove its boot time requests
+ * @consumer:		Pointer to proxy_consumer to be removed
+ *
+ * Returns 0 on success or errno on failure.  This function removes all requests
+ * made by the proxy consumer in regulator_proxy_consumer_register() and then
+ * frees the consumer's resources.
+ */
+int regulator_proxy_consumer_unregister(struct proxy_consumer *consumer)
+{
+	int rc = 0;
+
+	if (IS_ERR_OR_NULL(consumer))
+		return 0;
+
+	mutex_lock(&proxy_consumer_list_mutex);
+	if (!proxy_consumers_removed)
+		rc = regulator_proxy_consumer_remove(consumer);
+	mutex_unlock(&proxy_consumer_list_mutex);
+
+	return rc;
+}
+
+/*
+ * Remove all proxy requests at late_initcall_sync.  The assumption is that all
+ * devices have probed at this point and made their own regulator requests.
+ */
+static int __init regulator_proxy_consumer_remove_all(void)
+{
+	struct proxy_consumer *consumer;
+	struct proxy_consumer *temp;
+
+	mutex_lock(&proxy_consumer_list_mutex);
+	proxy_consumers_removed = true;
+
+	if (!list_empty(&proxy_consumer_list))
+		pr_info("removing regulator proxy consumer requests\n");
+
+	list_for_each_entry_safe(consumer, temp, &proxy_consumer_list, list) {
+		regulator_proxy_consumer_remove(consumer);
+	}
+	mutex_unlock(&proxy_consumer_list_mutex);
+
+	return 0;
+}
+late_initcall_sync(regulator_proxy_consumer_remove_all);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/regulator/qpnp-labibb-regulator.c	2019-10-29 09:26:24.649213023 +0100
@@ -0,0 +1,4244 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/regmap.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include <linux/regulator/qpnp-labibb-regulator.h>
+
+#define QPNP_LABIBB_REGULATOR_DRIVER_NAME	"qcom,qpnp-labibb-regulator"
+
+#define REG_REVISION_2			0x01
+#define REG_PERPH_TYPE			0x04
+#define REG_INT_RT_STS			0x10
+
+#define QPNP_LAB_TYPE			0x24
+#define QPNP_IBB_TYPE			0x20
+
+/* Common register value for LAB/IBB */
+#define REG_LAB_IBB_LCD_MODE		0x0
+#define REG_LAB_IBB_AMOLED_MODE		BIT(7)
+#define REG_LAB_IBB_SEC_ACCESS		0xD0
+#define REG_LAB_IBB_SEC_UNLOCK_CODE	0xA5
+
+/* LAB register offset definitions */
+#define REG_LAB_STATUS1			0x08
+#define REG_LAB_SWIRE_PGM_CTL		0x40
+#define REG_LAB_VOLTAGE			0x41
+#define REG_LAB_RING_SUPPRESSION_CTL	0x42
+#define REG_LAB_LCD_AMOLED_SEL		0x44
+#define REG_LAB_MODULE_RDY		0x45
+#define REG_LAB_ENABLE_CTL		0x46
+#define REG_LAB_PD_CTL			0x47
+#define REG_LAB_CLK_DIV			0x48
+#define REG_LAB_IBB_EN_RDY		0x49
+#define REG_LAB_CURRENT_LIMIT		0x4B
+#define REG_LAB_CURRENT_SENSE		0x4C
+#define REG_LAB_PS_CTL			0x50
+#define REG_LAB_RDSON_MNGMNT		0x53
+#define REG_LAB_PRECHARGE_CTL		0x5E
+#define REG_LAB_SOFT_START_CTL		0x5F
+#define REG_LAB_SPARE_CTL		0x60
+#define REG_LAB_PFM_CTL			0x62
+
+/* LAB registers for PM660A */
+#define REG_LAB_VOUT_DEFAULT		0x44
+#define REG_LAB_SW_HIGH_PSRR_CTL	0x70
+#define REG_LAB_LDO_PD_CTL		0x78
+#define REG_LAB_VPH_ENVELOP_CTL		0x7E
+
+/* LAB register bits definitions */
+
+/* REG_LAB_STATUS1 */
+#define LAB_STATUS1_VREG_OK_BIT		BIT(7)
+#define LAB_STATUS1_SC_DETECT_BIT	BIT(6)
+
+/* REG_LAB_SWIRE_PGM_CTL */
+#define LAB_EN_SWIRE_PGM_VOUT		BIT(7)
+#define LAB_EN_SWIRE_PGM_PD		BIT(6)
+
+/* REG_LAB_VOLTAGE */
+#define LAB_VOLTAGE_OVERRIDE_EN		BIT(7)
+#define LAB_VOLTAGE_SET_MASK		GENMASK(3, 0)
+
+/* REG_LAB_RING_SUPPRESSION_CTL */
+#define LAB_RING_SUPPRESSION_CTL_EN	BIT(7)
+
+/* REG_LAB_MODULE_RDY */
+#define LAB_MODULE_RDY_EN		BIT(7)
+
+/* REG_LAB_ENABLE_CTL */
+#define LAB_ENABLE_CTL_EN		BIT(7)
+
+/* REG_LAB_PD_CTL */
+#define LAB_PD_CTL_STRONG_PULL		BIT(0)
+#define LAB_PD_CTL_STRENGTH_MASK	BIT(0)
+#define LAB_PD_CTL_DISABLE_PD		BIT(1)
+#define LAB_PD_CTL_EN_MASK		BIT(1)
+
+/* REG_LAB_IBB_EN_RDY */
+#define LAB_IBB_EN_RDY_EN		BIT(7)
+
+/* REG_LAB_CURRENT_LIMIT */
+#define LAB_CURRENT_LIMIT_MASK		GENMASK(2, 0)
+#define LAB_CURRENT_LIMIT_EN_BIT	BIT(7)
+#define LAB_OVERRIDE_CURRENT_MAX_BIT	BIT(3)
+
+/* REG_LAB_CURRENT_SENSE */
+#define LAB_CURRENT_SENSE_GAIN_MASK	GENMASK(1, 0)
+
+/* REG_LAB_PS_CTL */
+#define LAB_PS_THRESH_MASK		GENMASK(1, 0)
+#define LAB_PS_CTL_EN			BIT(7)
+
+/* REG_LAB_RDSON_MNGMNT */
+#define LAB_RDSON_MNGMNT_NFET_SLEW_EN	BIT(5)
+#define LAB_RDSON_MNGMNT_PFET_SLEW_EN	BIT(4)
+#define LAB_RDSON_MNGMNT_NFET_MASK	GENMASK(3, 2)
+#define LAB_RDSON_MNGMNT_NFET_SHIFT	2
+#define LAB_RDSON_MNGMNT_PFET_MASK	GENMASK(1, 0)
+#define LAB_RDSON_NFET_SW_SIZE_QUARTER	0x0
+#define LAB_RDSON_PFET_SW_SIZE_QUARTER	0x0
+
+/* REG_LAB_PRECHARGE_CTL */
+#define LAB_FAST_PRECHARGE_CTL_EN	BIT(2)
+#define LAB_MAX_PRECHARGE_TIME_MASK	GENMASK(1, 0)
+
+/* REG_LAB_SOFT_START_CTL */
+#define LAB_SOFT_START_CTL_MASK		GENMASK(1, 0)
+
+/* REG_LAB_SPARE_CTL */
+#define LAB_SPARE_TOUCH_WAKE_BIT	BIT(3)
+#define LAB_SPARE_DISABLE_SCP_BIT	BIT(0)
+
+/* REG_LAB_PFM_CTL */
+#define LAB_PFM_EN_BIT			BIT(7)
+
+/* REG_LAB_SW_HIGH_PSRR_CTL */
+#define LAB_EN_SW_HIGH_PSRR_MODE	BIT(7)
+#define LAB_SW_HIGH_PSRR_REQ		BIT(0)
+
+/* REG_LAB_VPH_ENVELOP_CTL */
+#define LAB_VREF_HIGH_PSRR_SEL_MASK	GENMASK(7, 6)
+#define LAB_SEL_HW_HIGH_PSRR_SRC_MASK	GENMASK(1, 0)
+#define LAB_SEL_HW_HIGH_PSRR_SRC_SHIFT	6
+
+/* IBB register offset definitions */
+#define REG_IBB_REVISION4		0x03
+#define REG_IBB_STATUS1			0x08
+#define REG_IBB_VOLTAGE			0x41
+#define REG_IBB_RING_SUPPRESSION_CTL	0x42
+#define REG_IBB_LCD_AMOLED_SEL		0x44
+#define REG_IBB_MODULE_RDY		0x45
+#define REG_IBB_ENABLE_CTL		0x46
+#define REG_IBB_PD_CTL			0x47
+#define REG_IBB_CLK_DIV			0x48
+#define REG_IBB_CURRENT_LIMIT		0x4B
+#define REG_IBB_PS_CTL			0x50
+#define REG_IBB_RDSON_MNGMNT		0x53
+#define REG_IBB_NONOVERLAP_TIME_1	0x56
+#define REG_IBB_NONOVERLAP_TIME_2	0x57
+#define REG_IBB_PWRUP_PWRDN_CTL_1	0x58
+#define REG_IBB_PWRUP_PWRDN_CTL_2	0x59
+#define REG_IBB_SOFT_START_CTL		0x5F
+#define REG_IBB_SWIRE_CTL		0x5A
+#define REG_IBB_OUTPUT_SLEW_CTL		0x5D
+#define REG_IBB_SPARE_CTL		0x60
+#define REG_IBB_NLIMIT_DAC		0x61
+
+/* IBB registers for PM660A */
+#define REG_IBB_DEFAULT_VOLTAGE		0x40
+#define REG_IBB_FLOAT_CTL		0x43
+#define REG_IBB_VREG_OK_CTL		0x55
+#define REG_IBB_VOUT_MIN_MAGNITUDE	0x5C
+#define REG_IBB_PFM_CTL			0x62
+#define REG_IBB_SMART_PS_CTL		0x65
+#define REG_IBB_ADAPT_DEAD_TIME		0x67
+
+/* IBB register bits definition */
+
+/* REG_IBB_STATUS1 */
+#define IBB_STATUS1_VREG_OK_BIT		BIT(7)
+#define IBB_STATUS1_SC_DETECT_BIT	BIT(6)
+
+/* REG_IBB_VOLTAGE */
+#define IBB_VOLTAGE_OVERRIDE_EN		BIT(7)
+#define IBB_VOLTAGE_SET_MASK		GENMASK(5, 0)
+
+/* REG_IBB_CLK_DIV */
+#define IBB_CLK_DIV_OVERRIDE_EN		BIT(7)
+#define IBB_CLK_DIV_MASK		GENMASK(3, 0)
+
+/* REG_IBB_RING_SUPPRESSION_CTL */
+#define IBB_RING_SUPPRESSION_CTL_EN	BIT(7)
+
+/* REG_IBB_FLOAT_CTL */
+#define IBB_FLOAT_EN			BIT(0)
+#define IBB_SMART_FLOAT_EN		BIT(7)
+
+/* REG_IBB_MIN_MAGNITUDE */
+#define IBB_MIN_VOLTAGE_0P8_V		BIT(3)
+
+/* REG_IBB_MODULE_RDY */
+#define IBB_MODULE_RDY_EN		BIT(7)
+
+/* REG_IBB_ENABLE_CTL */
+#define IBB_ENABLE_CTL_MASK		(BIT(7) | BIT(6))
+#define IBB_ENABLE_CTL_SWIRE_RDY	BIT(6)
+#define IBB_ENABLE_CTL_MODULE_EN	BIT(7)
+
+/* REG_IBB_PD_CTL */
+#define IBB_PD_CTL_HALF_STRENGTH	BIT(0)
+#define IBB_PD_CTL_STRENGTH_MASK	BIT(0)
+#define IBB_PD_CTL_EN			BIT(7)
+#define IBB_SWIRE_PD_UPD		BIT(1)
+#define IBB_PD_CTL_EN_MASK		BIT(7)
+
+/* REG_IBB_CURRENT_LIMIT */
+#define IBB_CURRENT_LIMIT_MASK		GENMASK(4, 0)
+#define IBB_CURRENT_LIMIT_DEBOUNCE_SHIFT	5
+#define IBB_CURRENT_LIMIT_DEBOUNCE_MASK	GENMASK(6, 5)
+#define IBB_CURRENT_LIMIT_EN		BIT(7)
+#define IBB_ILIMIT_COUNT_CYC8		0
+#define IBB_CURRENT_MAX_500MA		0xA
+
+/* REG_IBB_PS_CTL */
+#define IBB_PS_CTL_EN			0x85
+
+/* REG_IBB_SMART_PS_CTL */
+#define IBB_SMART_PS_CTL_EN			BIT(7)
+#define IBB_NUM_SWIRE_PULSE_WAIT		0x5
+
+/* REG_IBB_OUTPUT_SLEW_CTL */
+#define IBB_SLEW_CTL_EN				BIT(7)
+#define IBB_SLEW_RATE_SPEED_FAST_EN		BIT(6)
+#define IBB_SLEW_RATE_TRANS_TIME_FAST_SHIFT	3
+#define IBB_SLEW_RATE_TRANS_TIME_FAST_MASK	GENMASK(5, 3)
+#define IBB_SLEW_RATE_TRANS_TIME_SLOW_MASK	GENMASK(2, 0)
+
+/* REG_IBB_VREG_OK_CTL */
+#define IBB_VREG_OK_EN_OVERLOAD_BLANK		BIT(7)
+#define IBB_VREG_OK_OVERLOAD_DEB_SHIFT		5
+#define IBB_VREG_OK_OVERLOAD_DEB_MASK		GENMASK(6, 5)
+
+/* REG_IBB_RDSON_MNGMNT */
+#define IBB_NFET_SLEW_EN		BIT(7)
+#define IBB_PFET_SLEW_EN		BIT(6)
+#define IBB_OVERRIDE_NFET_SW_SIZE	BIT(5)
+#define IBB_OVERRIDE_PFET_SW_SIZE	BIT(2)
+#define IBB_NFET_SW_SIZE_MASK		GENMASK(3, 2)
+#define IBB_PFET_SW_SIZE_MASK		GENMASK(1, 0)
+
+/* REG_IBB_NONOVERLAP_TIME_1 */
+#define IBB_OVERRIDE_NONOVERLAP		BIT(6)
+#define IBB_NONOVERLAP_NFET_MASK	GENMASK(2, 0)
+#define IBB_NFET_GATE_DELAY_2		0x3
+
+/* REG_IBB_NONOVERLAP_TIME_2 */
+#define IBB_N2P_MUX_SEL		BIT(0)
+
+/* REG_IBB_SOFT_START_CTL */
+#define IBB_SOFT_START_CHARGING_RESISTOR_16K	0x3
+
+/* REG_IBB_SPARE_CTL */
+#define IBB_BYPASS_PWRDN_DLY2_BIT	BIT(5)
+#define IBB_POFF_CTL_MASK		BIT(4)
+#define IBB_FASTER_PFET_OFF		BIT(4)
+#define IBB_FAST_STARTUP		BIT(3)
+
+/* REG_IBB_SWIRE_CTL */
+#define IBB_SWIRE_VOUT_UPD_EN		BIT(6)
+#define IBB_OUTPUT_VOLTAGE_AT_ONE_PULSE_MASK	GENMASK(5, 0)
+#define MAX_OUTPUT_EDGE_VOLTAGE_MV	6300
+#define MAX_OUTPUT_PULSE_VOLTAGE_MV	7700
+#define MIN_OUTPUT_PULSE_VOLTAGE_MV	1400
+#define OUTPUT_VOLTAGE_STEP_MV		100
+
+/* REG_IBB_NLIMIT_DAC */
+#define IBB_DEFAULT_NLIMIT_DAC		0x5
+
+/* REG_IBB_PFM_CTL */
+#define IBB_PFM_ENABLE			BIT(7)
+#define IBB_PFM_PEAK_CURRENT_BIT_SHIFT	1
+#define IBB_PFM_PEAK_CURRENT_MASK	GENMASK(3, 1)
+#define IBB_PFM_HYSTERESIS_BIT_SHIFT	4
+#define IBB_PFM_HYSTERESIS_MASK		GENMASK(5, 4)
+
+/* REG_IBB_PWRUP_PWRDN_CTL_1 */
+#define IBB_PWRUP_PWRDN_CTL_1_DLY1_BITS	2
+#define IBB_PWRUP_PWRDN_CTL_1_DLY1_MASK	GENMASK(5, 4)
+#define IBB_PWRUP_PWRDN_CTL_1_DLY1_SHIFT	4
+#define IBB_PWRUP_PWRDN_CTL_1_EN_DLY2	BIT(3)
+#define IBB_PWRUP_PWRDN_CTL_1_DLY2_MASK	GENMASK(1, 0)
+#define IBB_PWRUP_PWRDN_CTL_1_LAB_VREG_OK	BIT(7)
+#define IBB_PWRUP_PWRDN_CTL_1_EN_DLY1	BIT(6)
+#define PWRUP_PWRDN_CTL_1_DISCHARGE_EN	BIT(2)
+
+/* REG_IBB_PWRUP_PWRDN_CTL_2 */
+#define IBB_DIS_DLY_MASK		GENMASK(1, 0)
+#define IBB_WAIT_MBG_OK			BIT(2)
+
+/* Constants */
+#define SWIRE_DEFAULT_2ND_CMD_DLY_MS		20
+#define SWIRE_DEFAULT_IBB_PS_ENABLE_DLY_MS	200
+#define IBB_HW_DEFAULT_SLEW_RATE		12000
+
+/**
+ * enum qpnp_labibb_mode - working mode of LAB/IBB regulators
+ * %QPNP_LABIBB_LCD_MODE:		configure LAB and IBB regulators
+ * together to provide power supply for LCD
+ * %QPNP_LABIBB_AMOLED_MODE:		configure LAB and IBB regulators
+ * together to provide power supply for AMOLED
+ * %QPNP_LABIBB_MAX_MODE		max number of configureable modes
+ * supported by qpnp_labibb_regulator
+ */
+enum qpnp_labibb_mode {
+	QPNP_LABIBB_LCD_MODE,
+	QPNP_LABIBB_AMOLED_MODE,
+	QPNP_LABIBB_MAX_MODE,
+};
+
+/**
+ * IBB_SW_CONTROL_EN: Specifies IBB is enabled through software.
+ * IBB_SW_CONTROL_DIS: Specifies IBB is disabled through software.
+ * IBB_HW_CONTROL: Specifies IBB is controlled through SWIRE (hardware).
+ */
+enum ibb_mode {
+	IBB_SW_CONTROL_EN,
+	IBB_SW_CONTROL_DIS,
+	IBB_HW_CONTROL,
+	IBB_HW_SW_CONTROL,
+};
+
+static const int ibb_dischg_res_table[] = {
+	300,
+	64,
+	32,
+	16,
+};
+
+static const int ibb_pwrup_dly_table[] = {
+	1000,
+	2000,
+	4000,
+	8000,
+};
+
+static const int ibb_pwrdn_dly_table[] = {
+	1000,
+	2000,
+	4000,
+	8000,
+};
+
+static const int lab_clk_div_table[] = {
+	3200,
+	2740,
+	2400,
+	2130,
+	1920,
+	1750,
+	1600,
+	1480,
+	1370,
+	1280,
+	1200,
+	1130,
+	1070,
+	1010,
+	960,
+	910,
+};
+
+static const int ibb_clk_div_table[] = {
+	3200,
+	2740,
+	2400,
+	2130,
+	1920,
+	1750,
+	1600,
+	1480,
+	1370,
+	1280,
+	1200,
+	1130,
+	1070,
+	1010,
+	960,
+	910,
+};
+
+static const int lab_current_limit_table[] = {
+	200,
+	400,
+	600,
+	800,
+	1000,
+	1200,
+	1400,
+	1600,
+};
+
+static const char * const lab_current_sense_table[] = {
+	"0.5x",
+	"1x",
+	"1.5x",
+	"2x"
+};
+
+static const int ibb_current_limit_table[] = {
+	0,
+	50,
+	100,
+	150,
+	200,
+	250,
+	300,
+	350,
+	400,
+	450,
+	500,
+	550,
+	600,
+	650,
+	700,
+	750,
+	800,
+	850,
+	900,
+	950,
+	1000,
+	1050,
+	1100,
+	1150,
+	1200,
+	1250,
+	1300,
+	1350,
+	1400,
+	1450,
+	1500,
+	1550,
+};
+
+static const int ibb_output_slew_ctl_table[] = {
+	100,
+	200,
+	500,
+	1000,
+	2000,
+	10000,
+	12000,
+	15000
+};
+
+static const int ibb_debounce_table[] = {
+	8,
+	16,
+	32,
+	64,
+};
+
+static const int ibb_overload_debounce_table[] = {
+	1,
+	2,
+	4,
+	8
+};
+
+static const int ibb_vreg_ok_deb_table[] = {
+	4,
+	8,
+	16,
+	32
+};
+
+static const int lab_ps_thresh_table_v1[] = {
+	20,
+	30,
+	40,
+	50,
+};
+
+static const int lab_ps_thresh_table_v2[] = {
+	50,
+	60,
+	70,
+	80,
+};
+
+static const int lab_soft_start_table[] = {
+	200,
+	400,
+	600,
+	800,
+};
+
+static const int lab_rdson_nfet_table[] = {
+	25,
+	50,
+	75,
+	100,
+};
+
+static const int lab_rdson_pfet_table[] = {
+	25,
+	50,
+	75,
+	100,
+};
+
+static const int lab_max_precharge_table[] = {
+	200,
+	300,
+	400,
+	500,
+};
+
+static const int ibb_pfm_peak_curr_table[] = {
+	150,
+	200,
+	250,
+	300,
+	350,
+	400,
+	450,
+	500
+};
+
+static const int ibb_pfm_hysteresis_table[] = {
+	0,
+	25,
+	50,
+	0
+};
+
+static const int lab_vref_high_psrr_table[] = {
+	350,
+	400,
+	450,
+	500
+};
+
+struct lab_regulator {
+	struct regulator_desc		rdesc;
+	struct regulator_dev		*rdev;
+	struct mutex			lab_mutex;
+
+	int				lab_vreg_ok_irq;
+	int				lab_sc_irq;
+
+	int				curr_volt;
+	int				min_volt;
+
+	int				step_size;
+	int				slew_rate;
+	int				soft_start;
+	int				sc_wait_time_ms;
+
+	int				vreg_enabled;
+};
+
+struct ibb_regulator {
+	struct regulator_desc		rdesc;
+	struct regulator_dev		*rdev;
+	struct mutex			ibb_mutex;
+
+	int				ibb_sc_irq;
+
+	int				curr_volt;
+	int				min_volt;
+
+	int				step_size;
+	int				slew_rate;
+	int				soft_start;
+
+	u32				pwrup_dly;
+	u32				pwrdn_dly;
+
+	int				vreg_enabled;
+	int				num_swire_trans;
+};
+
+struct qpnp_labibb {
+	struct device			*dev;
+	struct platform_device		*pdev;
+	struct regmap			*regmap;
+	struct pmic_revid_data		*pmic_rev_id;
+	u16				lab_base;
+	u16				ibb_base;
+	u8				lab_dig_major;
+	u8				ibb_dig_major;
+	struct lab_regulator		lab_vreg;
+	struct ibb_regulator		ibb_vreg;
+	const struct ibb_ver_ops	*ibb_ver_ops;
+	const struct lab_ver_ops	*lab_ver_ops;
+	struct mutex			bus_mutex;
+	enum qpnp_labibb_mode		mode;
+	struct work_struct		lab_vreg_ok_work;
+	struct delayed_work		sc_err_recovery_work;
+	struct hrtimer			sc_err_check_timer;
+	int				sc_err_count;
+	bool				standalone;
+	bool				ttw_en;
+	bool				in_ttw_mode;
+	bool				ibb_settings_saved;
+	bool				swire_control;
+	bool				pbs_control;
+	bool				ttw_force_lab_on;
+	bool				skip_2nd_swire_cmd;
+	bool				pfm_enable;
+	bool				notify_lab_vreg_ok_sts;
+	bool				detect_lab_sc;
+	bool				sc_detected;
+	u32				swire_2nd_cmd_delay;
+	u32				swire_ibb_ps_enable_delay;
+};
+
+static RAW_NOTIFIER_HEAD(labibb_notifier);
+
+struct ibb_ver_ops {
+	int (*set_default_voltage)(struct qpnp_labibb *labibb,
+			bool use_default);
+	int (*set_voltage)(struct qpnp_labibb *labibb, int min_uV, int max_uV);
+	int (*sel_mode)(struct qpnp_labibb *labibb, bool is_ibb);
+	int (*get_mode)(struct qpnp_labibb *labibb);
+	int (*set_clk_div)(struct qpnp_labibb *labibb, u8 val);
+	int (*smart_ps_config)(struct qpnp_labibb *labibb, bool enable,
+				int num_swire_trans, int neg_curr_limit);
+	int (*soft_start_ctl)(struct qpnp_labibb *labibb,
+				 struct device_node *of_node);
+	int (*voltage_at_one_pulse)(struct qpnp_labibb *labibb, u32 volt);
+};
+
+struct lab_ver_ops {
+	const char *ver_str;
+	int (*set_default_voltage)(struct qpnp_labibb *labibb,
+					bool default_pres);
+	int (*ps_ctl)(struct qpnp_labibb *labibb,
+				u32 thresh, bool enable);
+};
+
+enum ibb_settings_index {
+	IBB_PD_CTL = 0,
+	IBB_CURRENT_LIMIT,
+	IBB_RDSON_MNGMNT,
+	IBB_PWRUP_PWRDN_CTL_1,
+	IBB_PWRUP_PWRDN_CTL_2,
+	IBB_NLIMIT_DAC,
+	IBB_PS_CTL,
+	IBB_SOFT_START_CTL,
+	IBB_SETTINGS_MAX,
+};
+
+enum lab_settings_index {
+	LAB_SOFT_START_CTL = 0,
+	LAB_PS_CTL,
+	LAB_RDSON_MNGMNT,
+	LAB_SETTINGS_MAX,
+};
+
+struct settings {
+	u16	address;
+	u8	value;
+	bool	sec_access;
+};
+
+#define SETTING(_id, _sec_access)		\
+	[_id] = {				\
+		.address = REG_##_id,		\
+		.sec_access = _sec_access,	\
+	}
+
+static struct settings ibb_settings[IBB_SETTINGS_MAX] = {
+	SETTING(IBB_PD_CTL, false),
+	SETTING(IBB_CURRENT_LIMIT, true),
+	SETTING(IBB_RDSON_MNGMNT, false),
+	SETTING(IBB_PWRUP_PWRDN_CTL_1, true),
+	SETTING(IBB_PWRUP_PWRDN_CTL_2, true),
+	SETTING(IBB_NLIMIT_DAC, false),
+	SETTING(IBB_PS_CTL, false),
+	SETTING(IBB_SOFT_START_CTL, false),
+};
+
+static struct settings lab_settings[LAB_SETTINGS_MAX] = {
+	SETTING(LAB_SOFT_START_CTL, false),
+	SETTING(LAB_PS_CTL, false),
+	SETTING(LAB_RDSON_MNGMNT, false),
+};
+
+static int
+qpnp_labibb_read(struct qpnp_labibb *labibb, u16 address,
+			u8 *val, int count)
+{
+	int rc = 0;
+	struct platform_device *pdev = labibb->pdev;
+
+	mutex_lock(&(labibb->bus_mutex));
+	rc = regmap_bulk_read(labibb->regmap, address, val, count);
+	if (rc < 0)
+		pr_err("SPMI read failed address=0x%02x sid=0x%02x rc=%d\n",
+			address, to_spmi_device(pdev->dev.parent)->usid, rc);
+
+	mutex_unlock(&(labibb->bus_mutex));
+	return rc;
+}
+
+static int
+qpnp_labibb_write(struct qpnp_labibb *labibb, u16 address,
+			u8 *val, int count)
+{
+	int rc = 0;
+	struct platform_device *pdev = labibb->pdev;
+
+	mutex_lock(&(labibb->bus_mutex));
+	if (address == 0) {
+		pr_err("address cannot be zero address=0x%02x sid=0x%02x rc=%d\n",
+			address, to_spmi_device(pdev->dev.parent)->usid, rc);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	rc = regmap_bulk_write(labibb->regmap, address, val, count);
+	if (rc < 0)
+		pr_err("write failed address=0x%02x sid=0x%02x rc=%d\n",
+			address, to_spmi_device(pdev->dev.parent)->usid, rc);
+
+error:
+	mutex_unlock(&(labibb->bus_mutex));
+	return rc;
+}
+
+static int
+qpnp_labibb_masked_write(struct qpnp_labibb *labibb, u16 address,
+						u8 mask, u8 val)
+{
+	int rc = 0;
+	struct platform_device *pdev = labibb->pdev;
+
+	mutex_lock(&(labibb->bus_mutex));
+	if (address == 0) {
+		pr_err("address cannot be zero address=0x%02x sid=0x%02x\n",
+			address, to_spmi_device(pdev->dev.parent)->usid);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	rc = regmap_update_bits(labibb->regmap, address, mask, val);
+	if (rc < 0)
+		pr_err("spmi write failed: addr=%03X, rc=%d\n", address, rc);
+
+error:
+	mutex_unlock(&(labibb->bus_mutex));
+	return rc;
+}
+
+static int qpnp_labibb_sec_write(struct qpnp_labibb *labibb, u16 base,
+					u8 offset, u8 val)
+{
+	int rc = 0;
+	u8 sec_val = REG_LAB_IBB_SEC_UNLOCK_CODE;
+	struct platform_device *pdev = labibb->pdev;
+
+	mutex_lock(&(labibb->bus_mutex));
+	if (base == 0) {
+		pr_err("base cannot be zero base=0x%02x sid=0x%02x\n",
+			base, to_spmi_device(pdev->dev.parent)->usid);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	rc = regmap_write(labibb->regmap, base + REG_LAB_IBB_SEC_ACCESS,
+				sec_val);
+	if (rc < 0) {
+		pr_err("register %x failed rc = %d\n",
+			base + REG_LAB_IBB_SEC_ACCESS, rc);
+		goto error;
+	}
+
+	rc = regmap_write(labibb->regmap, base + offset, val);
+	if (rc < 0)
+		pr_err("failed: addr=%03X, rc=%d\n",
+			base + offset, rc);
+
+error:
+	mutex_unlock(&(labibb->bus_mutex));
+	return rc;
+}
+
+static int qpnp_labibb_sec_masked_write(struct qpnp_labibb *labibb, u16 base,
+					u8 offset, u8 mask, u8 val)
+{
+	int rc = 0;
+	u8 sec_val = REG_LAB_IBB_SEC_UNLOCK_CODE;
+	struct platform_device *pdev = labibb->pdev;
+
+	mutex_lock(&(labibb->bus_mutex));
+	if (base == 0) {
+		pr_err("base cannot be zero base=0x%02x sid=0x%02x\n",
+			base, to_spmi_device(pdev->dev.parent)->usid);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	rc = regmap_write(labibb->regmap, base + REG_LAB_IBB_SEC_ACCESS,
+				sec_val);
+	if (rc < 0) {
+		pr_err("register %x failed rc = %d\n",
+			base + REG_LAB_IBB_SEC_ACCESS, rc);
+		goto error;
+	}
+
+	rc = regmap_update_bits(labibb->regmap, base + offset, mask, val);
+	if (rc < 0)
+		pr_err("spmi write failed: addr=%03X, rc=%d\n", base, rc);
+
+error:
+	mutex_unlock(&(labibb->bus_mutex));
+	return rc;
+}
+
+static int qpnp_ibb_smart_ps_config_v1(struct qpnp_labibb *labibb, bool enable,
+					int num_swire_trans, int neg_curr_limit)
+{
+	return 0;
+}
+
+static int qpnp_ibb_smart_ps_config_v2(struct qpnp_labibb *labibb, bool enable,
+					int num_swire_trans, int neg_curr_limit)
+{
+	u8 val;
+	int rc = 0;
+
+	if (enable) {
+		val = IBB_NUM_SWIRE_PULSE_WAIT;
+		rc = qpnp_labibb_write(labibb,
+			labibb->ibb_base + REG_IBB_PS_CTL, &val, 1);
+		if (rc < 0) {
+			pr_err("write register %x failed rc = %d\n",
+						REG_IBB_PS_CTL, rc);
+			return rc;
+		}
+	}
+
+	val = enable ? IBB_SMART_PS_CTL_EN : IBB_NUM_SWIRE_PULSE_WAIT;
+	if (num_swire_trans)
+		val |= num_swire_trans;
+	else
+		val |= IBB_NUM_SWIRE_PULSE_WAIT;
+
+	rc = qpnp_labibb_write(labibb,
+		labibb->ibb_base + REG_IBB_SMART_PS_CTL, &val, 1);
+	if (rc < 0) {
+		pr_err("write register %x failed rc = %d\n",
+					REG_IBB_SMART_PS_CTL, rc);
+		return rc;
+	}
+
+	val = enable ? (neg_curr_limit ? neg_curr_limit :
+		IBB_DEFAULT_NLIMIT_DAC) : IBB_DEFAULT_NLIMIT_DAC;
+
+	rc = qpnp_labibb_write(labibb,
+		labibb->ibb_base + REG_IBB_NLIMIT_DAC, &val, 1);
+	if (rc < 0)
+		pr_err("write register %x failed rc = %d\n",
+					REG_IBB_NLIMIT_DAC, rc);
+
+	return rc;
+}
+
+static int qpnp_labibb_sel_mode_v1(struct qpnp_labibb *labibb, bool is_ibb)
+{
+	int rc = 0;
+	u8 val;
+	u16 base;
+
+	val = (labibb->mode == QPNP_LABIBB_LCD_MODE) ? REG_LAB_IBB_LCD_MODE :
+				 REG_LAB_IBB_AMOLED_MODE;
+
+	base = is_ibb ? labibb->ibb_base : labibb->lab_base;
+
+	rc = qpnp_labibb_sec_write(labibb, base, REG_LAB_LCD_AMOLED_SEL,
+					val);
+	if (rc < 0)
+		pr_err("register %x failed rc = %d\n",
+			REG_LAB_LCD_AMOLED_SEL, rc);
+
+	return rc;
+}
+
+static int qpnp_labibb_sel_mode_v2(struct qpnp_labibb *labibb, bool is_ibb)
+{
+	return 0;
+}
+
+static int qpnp_ibb_get_mode_v1(struct qpnp_labibb *labibb)
+{
+	int rc = 0;
+	u8 val;
+
+	rc = qpnp_labibb_read(labibb, labibb->ibb_base + REG_IBB_LCD_AMOLED_SEL,
+				&val, 1);
+	if (rc < 0)
+		return rc;
+
+	if (val == REG_LAB_IBB_AMOLED_MODE)
+		labibb->mode = QPNP_LABIBB_AMOLED_MODE;
+	else
+		labibb->mode = QPNP_LABIBB_LCD_MODE;
+
+	return 0;
+}
+
+static int qpnp_ibb_get_mode_v2(struct qpnp_labibb *labibb)
+{
+	labibb->mode = QPNP_LABIBB_AMOLED_MODE;
+
+	return 0;
+}
+
+static int qpnp_ibb_set_clk_div_v1(struct qpnp_labibb *labibb, u8 val)
+{
+	int rc = 0;
+
+	rc = qpnp_labibb_write(labibb, labibb->ibb_base + REG_IBB_CLK_DIV,
+				&val, 1);
+
+	return rc;
+}
+
+static int qpnp_ibb_set_clk_div_v2(struct qpnp_labibb *labibb, u8 val)
+{
+	int rc = 0;
+
+	val |= IBB_CLK_DIV_OVERRIDE_EN;
+	rc = qpnp_labibb_masked_write(labibb, labibb->ibb_base +
+				REG_IBB_CLK_DIV, IBB_CLK_DIV_MASK |
+				IBB_CLK_DIV_OVERRIDE_EN, val);
+
+	return rc;
+}
+
+static int qpnp_ibb_soft_start_ctl_v1(struct qpnp_labibb *labibb,
+					struct device_node *of_node)
+{
+	int rc = 0;
+	u8 val;
+	u32 tmp;
+
+	rc = of_property_read_u32(of_node, "qcom,qpnp-ibb-soft-start",
+					&(labibb->ibb_vreg.soft_start));
+	if (rc < 0) {
+		pr_err("qcom,qpnp-ibb-soft-start is missing, rc = %d\n",
+			rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,qpnp-ibb-discharge-resistor",
+			&tmp);
+	if (!rc) {
+		for (val = 0; val < ARRAY_SIZE(ibb_dischg_res_table); val++) {
+			if (ibb_dischg_res_table[val] == tmp)
+				break;
+		}
+
+		if (val == ARRAY_SIZE(ibb_dischg_res_table)) {
+			pr_err("Invalid value in qcom,qpnp-ibb-discharge-resistor\n");
+			return -EINVAL;
+		}
+
+		rc = qpnp_labibb_write(labibb, labibb->ibb_base +
+				REG_IBB_SOFT_START_CTL, &val, 1);
+		if (rc < 0) {
+			pr_err("write to register %x failed rc = %d\n",
+				REG_IBB_SOFT_START_CTL,	rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int qpnp_ibb_soft_start_ctl_v2(struct qpnp_labibb *labibb,
+			 struct device_node *of_node)
+{
+	return 0;
+}
+
+static int qpnp_ibb_vreg_ok_ctl(struct qpnp_labibb *labibb,
+			struct device_node *of_node)
+{
+	u8 val = 0;
+	int rc = 0, i = 0;
+	u32 tmp;
+
+	if (labibb->pmic_rev_id->pmic_subtype != PM660L_SUBTYPE)
+		return rc;
+
+	val |= IBB_VREG_OK_EN_OVERLOAD_BLANK;
+
+	rc = of_property_read_u32(of_node,
+				"qcom,qpnp-ibb-overload-debounce", &tmp);
+	if (rc < 0) {
+		pr_err("failed to read qcom,qpnp-ibb-overload-debounce rc=%d\n",
+								rc);
+		return rc;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(ibb_overload_debounce_table); i++)
+		if (ibb_overload_debounce_table[i] == tmp)
+			break;
+
+	if (i == ARRAY_SIZE(ibb_overload_debounce_table)) {
+		pr_err("Invalid value in qcom,qpnp-ibb-overload-debounce\n");
+		return -EINVAL;
+	}
+	val |= i << IBB_VREG_OK_OVERLOAD_DEB_SHIFT;
+
+	rc = of_property_read_u32(of_node,
+				"qcom,qpnp-ibb-vreg-ok-debounce", &tmp);
+	if (rc < 0) {
+		pr_err("failed to read qcom,qpnp-ibb-vreg-ok-debounce rc=%d\n",
+								rc);
+		return rc;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(ibb_vreg_ok_deb_table); i++)
+		if (ibb_vreg_ok_deb_table[i] == tmp)
+			break;
+
+	if (i == ARRAY_SIZE(ibb_vreg_ok_deb_table)) {
+		pr_err("Invalid value in qcom,qpnp-ibb-vreg-ok-debounce\n");
+		return -EINVAL;
+	}
+	val |= i;
+
+	rc = qpnp_labibb_write(labibb, labibb->ibb_base +
+				REG_IBB_VREG_OK_CTL,
+				&val, 1);
+	if (rc < 0)
+		pr_err("write to register %x failed rc = %d\n",
+		 REG_IBB_VREG_OK_CTL, rc);
+
+	return rc;
+}
+
+static int qpnp_ibb_set_default_voltage_v1(struct qpnp_labibb *labibb,
+						 bool use_default)
+{
+	u8 val;
+	int rc = 0;
+
+	if (!use_default) {
+		if (labibb->ibb_vreg.curr_volt < labibb->ibb_vreg.min_volt) {
+			pr_err("qcom,qpnp-ibb-init-voltage %d is less than the the minimum voltage %d",
+			 labibb->ibb_vreg.curr_volt, labibb->ibb_vreg.min_volt);
+				return -EINVAL;
+		}
+
+		val = DIV_ROUND_UP(labibb->ibb_vreg.curr_volt -
+				labibb->ibb_vreg.min_volt,
+				labibb->ibb_vreg.step_size);
+		if (val > IBB_VOLTAGE_SET_MASK) {
+			pr_err("qcom,qpnp-lab-init-voltage %d is larger than the max supported voltage %ld",
+				labibb->ibb_vreg.curr_volt,
+				labibb->ibb_vreg.min_volt +
+				labibb->ibb_vreg.step_size *
+				IBB_VOLTAGE_SET_MASK);
+			return -EINVAL;
+		}
+
+		labibb->ibb_vreg.curr_volt = val * labibb->ibb_vreg.step_size +
+				labibb->ibb_vreg.min_volt;
+		val |= IBB_VOLTAGE_OVERRIDE_EN;
+	} else {
+		val = 0;
+	}
+
+	rc = qpnp_labibb_masked_write(labibb, labibb->ibb_base +
+			REG_IBB_VOLTAGE, IBB_VOLTAGE_SET_MASK |
+			IBB_VOLTAGE_OVERRIDE_EN, val);
+	if (rc < 0)
+		pr_err("write to register %x failed rc = %d\n", REG_IBB_VOLTAGE,
+			rc);
+
+	return rc;
+}
+
+static int qpnp_ibb_set_default_voltage_v2(struct qpnp_labibb *labibb,
+						bool use_default)
+{
+	int rc = 0;
+	u8 val;
+
+	val = DIV_ROUND_UP(labibb->ibb_vreg.curr_volt,
+			labibb->ibb_vreg.step_size);
+	if (val > IBB_VOLTAGE_SET_MASK) {
+		pr_err("Invalid qcom,qpnp-ibb-init-voltage property %d",
+			labibb->ibb_vreg.curr_volt);
+		return -EINVAL;
+	}
+
+	labibb->ibb_vreg.curr_volt = val * labibb->ibb_vreg.step_size;
+
+	rc = qpnp_labibb_write(labibb, labibb->ibb_base +
+				REG_IBB_DEFAULT_VOLTAGE, &val, 1);
+	if (rc < 0)
+		pr_err("write to register %x failed rc = %d\n",
+			 REG_IBB_DEFAULT_VOLTAGE, rc);
+
+	return rc;
+}
+
+static int qpnp_ibb_set_voltage_v1(struct qpnp_labibb *labibb,
+				 int min_uV, int max_uV)
+{
+	int rc, new_uV;
+	u8 val;
+
+	if (min_uV < labibb->ibb_vreg.min_volt) {
+		pr_err("min_uV %d is less than min_volt %d", min_uV,
+			labibb->ibb_vreg.min_volt);
+		return -EINVAL;
+	}
+
+	val = DIV_ROUND_UP(min_uV - labibb->ibb_vreg.min_volt,
+				labibb->ibb_vreg.step_size);
+	new_uV = val * labibb->ibb_vreg.step_size + labibb->ibb_vreg.min_volt;
+
+	if (new_uV > max_uV) {
+		pr_err("unable to set voltage %d (min:%d max:%d)\n", new_uV,
+			min_uV, max_uV);
+		return -EINVAL;
+	}
+
+	rc = qpnp_labibb_masked_write(labibb, labibb->ibb_base +
+				REG_IBB_VOLTAGE,
+				IBB_VOLTAGE_SET_MASK |
+				IBB_VOLTAGE_OVERRIDE_EN,
+				val | IBB_VOLTAGE_OVERRIDE_EN);
+
+	if (rc < 0) {
+		pr_err("write to register %x failed rc = %d\n", REG_IBB_VOLTAGE,
+			rc);
+		return rc;
+	}
+
+	if (new_uV > labibb->ibb_vreg.curr_volt) {
+		val = DIV_ROUND_UP(new_uV - labibb->ibb_vreg.curr_volt,
+				labibb->ibb_vreg.step_size);
+		udelay(val * labibb->ibb_vreg.slew_rate);
+	}
+	labibb->ibb_vreg.curr_volt = new_uV;
+
+	return 0;
+}
+
+static int qpnp_ibb_set_voltage_v2(struct qpnp_labibb *labibb,
+				int min_uV, int max_uV)
+{
+	int rc, new_uV;
+	u8 val;
+
+	val = DIV_ROUND_UP(min_uV, labibb->ibb_vreg.step_size);
+	new_uV = val * labibb->ibb_vreg.step_size;
+
+	if (new_uV > max_uV) {
+		pr_err("unable to set voltage %d (min:%d max:%d)\n", new_uV,
+			min_uV, max_uV);
+		return -EINVAL;
+	}
+
+	rc = qpnp_labibb_write(labibb, labibb->ibb_base +
+				REG_IBB_VOLTAGE, &val, 1);
+	if (rc < 0) {
+		pr_err("write to register %x failed rc = %d\n", REG_IBB_VOLTAGE,
+			rc);
+		return rc;
+	}
+
+	if (new_uV > labibb->ibb_vreg.curr_volt) {
+		val = DIV_ROUND_UP(new_uV - labibb->ibb_vreg.curr_volt,
+				labibb->ibb_vreg.step_size);
+		udelay(val * labibb->ibb_vreg.slew_rate);
+	}
+	labibb->ibb_vreg.curr_volt = new_uV;
+
+	return 0;
+}
+
+static int qpnp_ibb_output_voltage_at_one_pulse_v1(struct qpnp_labibb *labibb,
+						u32 volt)
+{
+	int rc = 0;
+	u8 val;
+
+	/*
+	 * Set the output voltage 100mV lower as the IBB HW module
+	 * counts one pulse less in SWIRE mode.
+	 */
+	val = DIV_ROUND_UP((volt - MIN_OUTPUT_PULSE_VOLTAGE_MV),
+				OUTPUT_VOLTAGE_STEP_MV) - 1;
+	rc = qpnp_labibb_masked_write(labibb, labibb->ibb_base +
+			REG_IBB_SWIRE_CTL,
+			IBB_OUTPUT_VOLTAGE_AT_ONE_PULSE_MASK,
+			val);
+	if (rc < 0)
+		pr_err("write register %x failed rc = %d\n",
+			REG_IBB_SWIRE_CTL, rc);
+
+	return rc;
+}
+
+static int qpnp_ibb_output_voltage_at_one_pulse_v2(struct qpnp_labibb *labibb,
+						u32 volt)
+{
+	int rc = 0;
+	u8 val;
+
+	val = DIV_ROUND_UP(volt, OUTPUT_VOLTAGE_STEP_MV);
+
+	rc = qpnp_labibb_masked_write(labibb, labibb->ibb_base +
+			REG_IBB_SWIRE_CTL,
+			IBB_OUTPUT_VOLTAGE_AT_ONE_PULSE_MASK,
+			val);
+	if (rc < 0)
+		pr_err("qpnp_labiibb_write register %x failed rc = %d\n",
+			REG_IBB_SWIRE_CTL, rc);
+
+	return rc;
+}
+
+/* For PMI8998 and earlier PMICs */
+static const struct ibb_ver_ops ibb_ops_v1 = {
+	.set_default_voltage	= qpnp_ibb_set_default_voltage_v1,
+	.set_voltage		= qpnp_ibb_set_voltage_v1,
+	.sel_mode		= qpnp_labibb_sel_mode_v1,
+	.get_mode		= qpnp_ibb_get_mode_v1,
+	.set_clk_div		= qpnp_ibb_set_clk_div_v1,
+	.smart_ps_config	= qpnp_ibb_smart_ps_config_v1,
+	.soft_start_ctl		= qpnp_ibb_soft_start_ctl_v1,
+	.voltage_at_one_pulse	= qpnp_ibb_output_voltage_at_one_pulse_v1,
+};
+
+/* For PM660A and later PMICs */
+static const struct ibb_ver_ops ibb_ops_v2 = {
+	.set_default_voltage	= qpnp_ibb_set_default_voltage_v2,
+	.set_voltage		= qpnp_ibb_set_voltage_v2,
+	.sel_mode		= qpnp_labibb_sel_mode_v2,
+	.get_mode		= qpnp_ibb_get_mode_v2,
+	.set_clk_div		= qpnp_ibb_set_clk_div_v2,
+	.smart_ps_config	= qpnp_ibb_smart_ps_config_v2,
+	.soft_start_ctl		= qpnp_ibb_soft_start_ctl_v2,
+	.voltage_at_one_pulse	= qpnp_ibb_output_voltage_at_one_pulse_v2,
+};
+
+static int qpnp_lab_set_default_voltage_v1(struct qpnp_labibb *labibb,
+						 bool default_pres)
+{
+	u8 val;
+	int rc = 0;
+
+	if (!default_pres) {
+		if (labibb->lab_vreg.curr_volt < labibb->lab_vreg.min_volt) {
+			pr_err("qcom,qpnp-lab-init-voltage %d is less than the the minimum voltage %d",
+				labibb->lab_vreg.curr_volt,
+				labibb->lab_vreg.min_volt);
+			return -EINVAL;
+		}
+
+		val = DIV_ROUND_UP(labibb->lab_vreg.curr_volt -
+				labibb->lab_vreg.min_volt,
+				labibb->lab_vreg.step_size);
+		if (val > LAB_VOLTAGE_SET_MASK) {
+			pr_err("qcom,qpnp-lab-init-voltage %d is larger than the max supported voltage %ld",
+				labibb->lab_vreg.curr_volt,
+				labibb->lab_vreg.min_volt +
+				labibb->lab_vreg.step_size *
+				LAB_VOLTAGE_SET_MASK);
+			return -EINVAL;
+		}
+
+		labibb->lab_vreg.curr_volt = val * labibb->lab_vreg.step_size +
+				labibb->lab_vreg.min_volt;
+		val |= LAB_VOLTAGE_OVERRIDE_EN;
+
+	} else {
+		val = 0;
+	}
+
+	rc = qpnp_labibb_masked_write(labibb, labibb->lab_base +
+				REG_LAB_VOLTAGE, LAB_VOLTAGE_SET_MASK |
+				LAB_VOLTAGE_OVERRIDE_EN, val);
+
+	if (rc < 0)
+		pr_err("write to register %x failed rc = %d\n", REG_LAB_VOLTAGE,
+			rc);
+
+	return rc;
+}
+
+static int qpnp_lab_set_default_voltage_v2(struct qpnp_labibb *labibb,
+						 bool default_pres)
+{
+	int rc = 0;
+	u8 val;
+
+	val = DIV_ROUND_UP((labibb->lab_vreg.curr_volt
+		 - labibb->lab_vreg.min_volt), labibb->lab_vreg.step_size);
+
+	rc = qpnp_labibb_write(labibb, labibb->lab_base +
+				REG_LAB_VOUT_DEFAULT, &val, 1);
+	if (rc < 0)
+		pr_err("write to register %x failed rc = %d\n",
+			 REG_LAB_VOUT_DEFAULT, rc);
+
+	return rc;
+}
+
+static int qpnp_lab_ps_ctl_v1(struct qpnp_labibb *labibb,
+					u32 thresh, bool enable)
+{
+	int rc = 0;
+	u8 val;
+
+	if (enable) {
+		for (val = 0; val < ARRAY_SIZE(lab_ps_thresh_table_v1); val++)
+			if (lab_ps_thresh_table_v1[val] == thresh)
+				break;
+
+		if (val == ARRAY_SIZE(lab_ps_thresh_table_v1)) {
+			pr_err("Invalid value in qcom,qpnp-lab-ps-threshold\n");
+			return -EINVAL;
+		}
+
+		val |= LAB_PS_CTL_EN;
+	} else {
+		val = 0;
+	}
+
+	rc = qpnp_labibb_write(labibb, labibb->lab_base +
+			 REG_LAB_PS_CTL, &val, 1);
+
+	if (rc < 0)
+		pr_err("write register %x failed rc = %d\n",
+				REG_LAB_PS_CTL, rc);
+
+	return rc;
+}
+
+static int qpnp_lab_ps_ctl_v2(struct qpnp_labibb *labibb,
+				u32 thresh, bool enable)
+{
+	int rc = 0;
+	u8 val, mask;
+
+	mask = LAB_PS_CTL_EN;
+	if (enable) {
+		for (val = 0; val < ARRAY_SIZE(lab_ps_thresh_table_v2); val++)
+			if (lab_ps_thresh_table_v2[val] == thresh)
+				break;
+
+		if (val == ARRAY_SIZE(lab_ps_thresh_table_v2)) {
+			pr_err("Invalid value in qcom,qpnp-lab-ps-threshold\n");
+			return -EINVAL;
+		}
+
+		val |= LAB_PS_CTL_EN;
+		mask |= LAB_PS_THRESH_MASK;
+	} else {
+		val = 0;
+	}
+
+	rc = qpnp_labibb_masked_write(labibb, labibb->lab_base +
+			 REG_LAB_PS_CTL, mask, val);
+	if (rc < 0)
+		pr_err("write register %x failed rc = %d\n",
+				REG_LAB_PS_CTL, rc);
+
+	return rc;
+}
+
+/* For PMI8996 and earlier PMICs */
+static const struct lab_ver_ops lab_ops_v1 = {
+	.set_default_voltage	= qpnp_lab_set_default_voltage_v1,
+	.ps_ctl			= qpnp_lab_ps_ctl_v1,
+};
+
+static const struct lab_ver_ops pmi8998_lab_ops = {
+	.set_default_voltage	= qpnp_lab_set_default_voltage_v1,
+	.ps_ctl			= qpnp_lab_ps_ctl_v2,
+};
+
+static const struct lab_ver_ops pm660_lab_ops = {
+	.set_default_voltage	= qpnp_lab_set_default_voltage_v2,
+	.ps_ctl			= qpnp_lab_ps_ctl_v2,
+};
+
+static int qpnp_labibb_get_matching_idx(const char *val)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(lab_current_sense_table); i++)
+		if (!strcmp(lab_current_sense_table[i], val))
+			return i;
+
+	return -EINVAL;
+}
+
+static int qpnp_ibb_set_mode(struct qpnp_labibb *labibb, enum ibb_mode mode)
+{
+	int rc;
+	u8 val;
+
+	if (mode == IBB_SW_CONTROL_EN)
+		val = IBB_ENABLE_CTL_MODULE_EN;
+	else if (mode == IBB_HW_CONTROL)
+		val = IBB_ENABLE_CTL_SWIRE_RDY;
+	else if (mode == IBB_HW_SW_CONTROL)
+		val = IBB_ENABLE_CTL_MODULE_EN | IBB_ENABLE_CTL_SWIRE_RDY;
+	else if (mode == IBB_SW_CONTROL_DIS)
+		val = 0;
+	else
+		return -EINVAL;
+
+	rc = qpnp_labibb_masked_write(labibb,
+		labibb->ibb_base + REG_IBB_ENABLE_CTL,
+		IBB_ENABLE_CTL_MASK, val);
+	if (rc < 0)
+		pr_err("Unable to configure IBB_ENABLE_CTL rc=%d\n", rc);
+
+	return rc;
+}
+
+static int qpnp_ibb_ps_config(struct qpnp_labibb *labibb, bool enable)
+{
+	u8 val;
+	int rc;
+
+	val = enable ? IBB_PS_CTL_EN : IBB_NUM_SWIRE_PULSE_WAIT;
+	rc = qpnp_labibb_write(labibb, labibb->ibb_base + REG_IBB_PS_CTL,
+							&val, 1);
+	if (rc < 0) {
+		pr_err("write register %x failed rc = %d\n",
+					REG_IBB_PS_CTL, rc);
+		return rc;
+	}
+
+	val = enable ? 0 : IBB_DEFAULT_NLIMIT_DAC;
+	rc = qpnp_labibb_write(labibb, labibb->ibb_base + REG_IBB_NLIMIT_DAC,
+							&val, 1);
+	if (rc < 0)
+		pr_err("write register %x failed rc = %d\n",
+					REG_IBB_NLIMIT_DAC, rc);
+	return rc;
+}
+
+static int qpnp_lab_dt_init(struct qpnp_labibb *labibb,
+				struct device_node *of_node)
+{
+	int rc = 0;
+	u8 i, val, mask;
+	u32 tmp;
+
+	/*
+	 * Do not configure LCD_AMOLED_SEL for pmi8998 as it will be done by
+	 * GPIO selector.
+	 */
+	if (labibb->pmic_rev_id->pmic_subtype != PMI8998_SUBTYPE) {
+		rc = labibb->ibb_ver_ops->sel_mode(labibb, 0);
+		if (rc < 0)
+			return rc;
+	}
+
+	val = 0;
+	if (of_property_read_bool(of_node, "qcom,qpnp-lab-full-pull-down"))
+		val |= LAB_PD_CTL_STRONG_PULL;
+
+	if (!of_property_read_bool(of_node, "qcom,qpnp-lab-pull-down-enable"))
+		val |= LAB_PD_CTL_DISABLE_PD;
+
+	mask = LAB_PD_CTL_EN_MASK | LAB_PD_CTL_STRENGTH_MASK;
+	rc = qpnp_labibb_masked_write(labibb, labibb->lab_base + REG_LAB_PD_CTL,
+					mask, val);
+
+	if (rc < 0) {
+		pr_err("write to register %x failed rc = %d\n",
+				REG_LAB_PD_CTL, rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(of_node,
+		"qcom,qpnp-lab-switching-clock-frequency", &tmp);
+	if (!rc) {
+		for (val = 0; val < ARRAY_SIZE(lab_clk_div_table); val++)
+			if (lab_clk_div_table[val] == tmp)
+				break;
+
+		if (val == ARRAY_SIZE(lab_clk_div_table)) {
+			pr_err("Invalid value in qpnp-lab-switching-clock-frequency\n");
+			return -EINVAL;
+		}
+
+		rc = qpnp_labibb_write(labibb, labibb->lab_base +
+				REG_LAB_CLK_DIV, &val, 1);
+		if (rc < 0) {
+			pr_err("write to register %x failed rc = %d\n",
+				REG_LAB_CLK_DIV, rc);
+			return rc;
+		}
+	}
+
+	if (of_property_read_bool(of_node,
+		"qcom,qpnp-lab-limit-max-current-enable")) {
+		val = LAB_CURRENT_LIMIT_EN_BIT;
+
+		rc = of_property_read_u32(of_node,
+			"qcom,qpnp-lab-limit-maximum-current", &tmp);
+
+		if (rc < 0) {
+			pr_err("get qcom,qpnp-lab-limit-maximum-current failed rc = %d\n",
+				rc);
+			return rc;
+		}
+
+		for (i = 0; i < ARRAY_SIZE(lab_current_limit_table); i++)
+			if (lab_current_limit_table[i] == tmp)
+				break;
+
+		if (i == ARRAY_SIZE(lab_current_limit_table)) {
+			pr_err("Invalid value in qcom,qpnp-lab-limit-maximum-current\n");
+			return -EINVAL;
+		}
+
+		val |= i;
+		rc = qpnp_labibb_write(labibb, labibb->lab_base +
+					REG_LAB_CURRENT_LIMIT, &val, 1);
+		if (rc < 0) {
+			pr_err("write to register %x failed rc = %d\n",
+					REG_LAB_CURRENT_LIMIT, rc);
+			return rc;
+		}
+	}
+
+	if (of_property_read_bool(of_node,
+		"qcom,qpnp-lab-ring-suppression-enable")) {
+		val = LAB_RING_SUPPRESSION_CTL_EN;
+		rc = qpnp_labibb_write(labibb, labibb->lab_base +
+					REG_LAB_RING_SUPPRESSION_CTL, &val, 1);
+		if (rc < 0) {
+			pr_err("write to register %x failed rc = %d\n",
+				REG_LAB_RING_SUPPRESSION_CTL, rc);
+			return rc;
+		}
+	}
+
+	if (of_property_read_bool(of_node, "qcom,qpnp-lab-ps-enable")) {
+
+		rc = of_property_read_u32(of_node,
+				 "qcom,qpnp-lab-ps-threshold", &tmp);
+
+		if (rc < 0) {
+			pr_err("get qcom,qpnp-lab-ps-threshold failed rc = %d\n",
+				rc);
+			return rc;
+		}
+		rc = labibb->lab_ver_ops->ps_ctl(labibb, tmp, true);
+		if (rc < 0)
+			return rc;
+	} else {
+		rc = labibb->lab_ver_ops->ps_ctl(labibb, tmp, false);
+		if (rc < 0)
+			return rc;
+	}
+
+	val = 0;
+	mask = 0;
+	rc = of_property_read_u32(of_node, "qcom,qpnp-lab-pfet-size", &tmp);
+	if (!rc) {
+		for (val = 0; val < ARRAY_SIZE(lab_rdson_pfet_table); val++)
+			if (tmp == lab_rdson_pfet_table[val])
+				break;
+
+		if (val == ARRAY_SIZE(lab_rdson_pfet_table)) {
+			pr_err("Invalid value in qcom,qpnp-lab-pfet-size\n");
+			return -EINVAL;
+		}
+		val |= LAB_RDSON_MNGMNT_PFET_SLEW_EN;
+		mask |= LAB_RDSON_MNGMNT_PFET_MASK |
+				LAB_RDSON_MNGMNT_PFET_SLEW_EN;
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,qpnp-lab-nfet-size",
+				 &tmp);
+	if (!rc) {
+		for (i = 0; i < ARRAY_SIZE(lab_rdson_nfet_table); i++)
+			if (tmp == lab_rdson_nfet_table[i])
+				break;
+
+		if (i == ARRAY_SIZE(lab_rdson_nfet_table)) {
+			pr_err("Invalid value in qcom,qpnp-lab-nfet-size\n");
+			return -EINVAL;
+		}
+
+		val |= i << LAB_RDSON_MNGMNT_NFET_SHIFT;
+		val |= LAB_RDSON_MNGMNT_NFET_SLEW_EN;
+		mask |= LAB_RDSON_MNGMNT_NFET_MASK |
+				LAB_RDSON_MNGMNT_NFET_SLEW_EN;
+	}
+
+	rc = qpnp_labibb_masked_write(labibb, labibb->lab_base +
+				REG_LAB_RDSON_MNGMNT, mask, val);
+	if (rc < 0) {
+		pr_err("write to register %x failed rc = %d\n",
+			REG_LAB_RDSON_MNGMNT, rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,qpnp-lab-init-voltage",
+				&(labibb->lab_vreg.curr_volt));
+	if (rc < 0) {
+		pr_err("get qcom,qpnp-lab-init-voltage failed, rc = %d\n",
+				 rc);
+		return rc;
+	}
+
+	if (of_property_read_bool(of_node,
+			"qcom,qpnp-lab-use-default-voltage"))
+		rc = labibb->lab_ver_ops->set_default_voltage(labibb, true);
+	else
+		rc = labibb->lab_ver_ops->set_default_voltage(labibb, false);
+
+	if (rc < 0)
+		return rc;
+
+	if (of_property_read_bool(of_node,
+		"qcom,qpnp-lab-enable-sw-high-psrr")) {
+		val = LAB_EN_SW_HIGH_PSRR_MODE;
+
+		rc = qpnp_labibb_write(labibb, labibb->lab_base +
+					REG_LAB_SW_HIGH_PSRR_CTL, &val, 1);
+		if (rc < 0) {
+			pr_err("write to register %x failed rc = %d\n",
+				REG_LAB_SW_HIGH_PSRR_CTL, rc);
+			return rc;
+		}
+	}
+
+	rc = of_property_read_u32(of_node,
+		"qcom,qpnp-lab-ldo-pulldown-enable", (u32 *)&val);
+	if (!rc) {
+		rc = qpnp_labibb_write(labibb, labibb->lab_base +
+			REG_LAB_LDO_PD_CTL, &val, 1);
+		if (rc < 0) {
+			pr_err("write to register %x failed rc = %d\n",
+				REG_LAB_LDO_PD_CTL, rc);
+			return rc;
+		}
+	}
+
+	rc = of_property_read_u32(of_node,
+		"qcom,qpnp-lab-high-psrr-src-select", &tmp);
+	if (!rc) {
+		val = tmp;
+
+		rc = of_property_read_u32(of_node,
+			"qcom,qpnp-lab-vref-high-psrr-select", &tmp);
+		if (rc < 0) {
+			pr_err("get qcom,qpnp-lab-vref-high-psrr-select failed rc = %d\n",
+				rc);
+			return rc;
+		}
+
+		for (i = 0; i < ARRAY_SIZE(lab_vref_high_psrr_table); i++)
+			if (lab_vref_high_psrr_table[i] == tmp)
+				break;
+
+		if (i == ARRAY_SIZE(lab_vref_high_psrr_table)) {
+			pr_err("Invalid value in qpnp-lab-vref-high-psrr-selct\n");
+			return -EINVAL;
+		}
+		val |= (i << LAB_SEL_HW_HIGH_PSRR_SRC_SHIFT);
+
+		rc = qpnp_labibb_masked_write(labibb, labibb->lab_base +
+				REG_LAB_VPH_ENVELOP_CTL,
+				LAB_VREF_HIGH_PSRR_SEL_MASK |
+				LAB_SEL_HW_HIGH_PSRR_SRC_MASK,
+				val);
+
+		if (rc < 0) {
+			pr_err("write to register %x failed rc = %d\n",
+				REG_LAB_VPH_ENVELOP_CTL, rc);
+			return rc;
+		}
+	}
+
+	if (labibb->swire_control) {
+		rc = qpnp_ibb_set_mode(labibb, IBB_HW_CONTROL);
+		if (rc < 0) {
+			pr_err("Unable to set SWIRE_RDY rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+#define LAB_CURRENT_MAX_1600MA	0x7
+#define LAB_CURRENT_MAX_400MA	0x1
+static int qpnp_lab_pfm_disable(struct qpnp_labibb *labibb)
+{
+	int rc = 0;
+	u8 val, mask;
+
+	mutex_lock(&(labibb->lab_vreg.lab_mutex));
+	if (!labibb->pfm_enable) {
+		pr_debug("PFM already disabled\n");
+		goto out;
+	}
+
+	val = 0;
+	mask = LAB_PFM_EN_BIT;
+	rc = qpnp_labibb_masked_write(labibb, labibb->lab_base +
+				REG_LAB_PFM_CTL, mask, val);
+	if (rc < 0) {
+		pr_err("Write register %x failed rc = %d\n",
+			REG_LAB_PFM_CTL, rc);
+		goto out;
+	}
+
+	val = LAB_CURRENT_MAX_1600MA;
+	mask = LAB_OVERRIDE_CURRENT_MAX_BIT | LAB_CURRENT_LIMIT_MASK;
+	rc = qpnp_labibb_masked_write(labibb, labibb->lab_base +
+				REG_LAB_CURRENT_LIMIT, mask, val);
+	if (rc < 0) {
+		pr_err("Write register %x failed rc = %d\n",
+			REG_LAB_CURRENT_LIMIT, rc);
+		goto out;
+	}
+
+	labibb->pfm_enable = false;
+out:
+	mutex_unlock(&(labibb->lab_vreg.lab_mutex));
+	return rc;
+}
+
+static int qpnp_lab_pfm_enable(struct qpnp_labibb *labibb)
+{
+	int rc = 0;
+	u8 val, mask;
+
+	mutex_lock(&(labibb->lab_vreg.lab_mutex));
+	if (labibb->pfm_enable) {
+		pr_debug("PFM already enabled\n");
+		goto out;
+	}
+
+	/* Wait for ~100uS */
+	usleep_range(100, 105);
+
+	val = LAB_OVERRIDE_CURRENT_MAX_BIT | LAB_CURRENT_MAX_400MA;
+	mask = LAB_OVERRIDE_CURRENT_MAX_BIT | LAB_CURRENT_LIMIT_MASK;
+	rc = qpnp_labibb_masked_write(labibb, labibb->lab_base +
+				REG_LAB_CURRENT_LIMIT, mask, val);
+	if (rc < 0) {
+		pr_err("Write register %x failed rc = %d\n",
+			REG_LAB_CURRENT_LIMIT, rc);
+		goto out;
+	}
+
+	/* Wait for ~100uS */
+	usleep_range(100, 105);
+
+	val = LAB_PFM_EN_BIT;
+	mask = LAB_PFM_EN_BIT;
+	rc = qpnp_labibb_masked_write(labibb, labibb->lab_base +
+				REG_LAB_PFM_CTL, mask, val);
+	if (rc < 0) {
+		pr_err("Write register %x failed rc = %d\n",
+			REG_LAB_PFM_CTL, rc);
+		goto out;
+	}
+
+	labibb->pfm_enable = true;
+out:
+	mutex_unlock(&(labibb->lab_vreg.lab_mutex));
+	return rc;
+}
+
+static int qpnp_labibb_restore_settings(struct qpnp_labibb *labibb)
+{
+	int rc, i;
+
+	for (i = 0; i < ARRAY_SIZE(ibb_settings); i++) {
+		if (ibb_settings[i].sec_access)
+			rc = qpnp_labibb_sec_write(labibb, labibb->ibb_base,
+					ibb_settings[i].address,
+					ibb_settings[i].value);
+		else
+			rc = qpnp_labibb_write(labibb, labibb->ibb_base +
+					ibb_settings[i].address,
+					&ibb_settings[i].value, 1);
+
+		if (rc < 0) {
+			pr_err("write to register %x failed rc = %d\n",
+				ibb_settings[i].address, rc);
+			return rc;
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(lab_settings); i++) {
+		if (lab_settings[i].sec_access)
+			rc = qpnp_labibb_sec_write(labibb, labibb->lab_base,
+					lab_settings[i].address,
+					lab_settings[i].value);
+		else
+			rc = qpnp_labibb_write(labibb, labibb->lab_base +
+					lab_settings[i].address,
+					&lab_settings[i].value, 1);
+
+		if (rc < 0) {
+			pr_err("write to register %x failed rc = %d\n",
+				lab_settings[i].address, rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int qpnp_labibb_save_settings(struct qpnp_labibb *labibb)
+{
+	int rc, i;
+
+	for (i = 0; i < ARRAY_SIZE(ibb_settings); i++) {
+		rc = qpnp_labibb_read(labibb, labibb->ibb_base +
+			 ibb_settings[i].address, &ibb_settings[i].value, 1);
+		if (rc < 0) {
+			pr_err("read register %x failed rc = %d\n",
+				ibb_settings[i].address, rc);
+			return rc;
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(lab_settings); i++) {
+		rc = qpnp_labibb_read(labibb, labibb->lab_base +
+			lab_settings[i].address, &lab_settings[i].value, 1);
+		if (rc < 0) {
+			pr_err("read register %x failed rc = %d\n",
+				lab_settings[i].address, rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int qpnp_labibb_ttw_enter_ibb_common(struct qpnp_labibb *labibb)
+{
+	int rc = 0;
+	u8 val;
+
+	val = 0;
+	rc = qpnp_labibb_write(labibb, labibb->ibb_base + REG_IBB_PD_CTL,
+				&val, 1);
+	if (rc < 0) {
+		pr_err("read register %x failed rc = %d\n",
+			REG_IBB_PD_CTL, rc);
+		return rc;
+	}
+
+	val = 0;
+	rc = qpnp_labibb_sec_write(labibb, labibb->ibb_base,
+				REG_IBB_PWRUP_PWRDN_CTL_1, val);
+	if (rc < 0) {
+		pr_err("write to register %x failed rc = %d\n",
+			REG_IBB_PWRUP_PWRDN_CTL_1, rc);
+		return rc;
+	}
+
+	val = IBB_WAIT_MBG_OK;
+	rc = qpnp_labibb_sec_masked_write(labibb, labibb->ibb_base,
+				REG_IBB_PWRUP_PWRDN_CTL_2,
+				IBB_DIS_DLY_MASK | IBB_WAIT_MBG_OK, val);
+	if (rc < 0) {
+		pr_err("write to register %x failed rc = %d\n",
+			REG_IBB_PWRUP_PWRDN_CTL_2, rc);
+		return rc;
+	}
+
+	val = IBB_NFET_SLEW_EN | IBB_PFET_SLEW_EN | IBB_OVERRIDE_NFET_SW_SIZE |
+		IBB_OVERRIDE_PFET_SW_SIZE;
+	rc = qpnp_labibb_masked_write(labibb, labibb->ibb_base +
+				REG_IBB_RDSON_MNGMNT, 0xFF, val);
+	if (rc < 0) {
+		pr_err("write to register %x failed rc = %d\n",
+			REG_IBB_RDSON_MNGMNT, rc);
+		return rc;
+	}
+
+	val = IBB_CURRENT_LIMIT_EN | IBB_CURRENT_MAX_500MA |
+		(IBB_ILIMIT_COUNT_CYC8 << IBB_CURRENT_LIMIT_DEBOUNCE_SHIFT);
+	rc = qpnp_labibb_sec_write(labibb, labibb->ibb_base,
+				REG_IBB_CURRENT_LIMIT, val);
+	if (rc < 0)
+		pr_err("write to register %x failed rc = %d\n",
+			REG_IBB_CURRENT_LIMIT, rc);
+
+	return rc;
+}
+
+static int qpnp_labibb_ttw_enter_ibb_pmi8996(struct qpnp_labibb *labibb)
+{
+	int rc;
+	u8 val;
+
+	val = IBB_BYPASS_PWRDN_DLY2_BIT | IBB_FAST_STARTUP;
+	rc = qpnp_labibb_write(labibb, labibb->ibb_base + REG_IBB_SPARE_CTL,
+				&val, 1);
+	if (rc < 0)
+		pr_err("write to register %x failed rc = %d\n",
+			REG_IBB_SPARE_CTL, rc);
+
+	return rc;
+}
+
+static int qpnp_labibb_ttw_enter_ibb_pmi8950(struct qpnp_labibb *labibb)
+{
+	int rc;
+	u8 val;
+
+	rc = qpnp_ibb_ps_config(labibb, true);
+	if (rc < 0) {
+		pr_err("Failed to enable ibb_ps_config rc=%d\n", rc);
+		return rc;
+	}
+
+	val = IBB_SOFT_START_CHARGING_RESISTOR_16K;
+	rc = qpnp_labibb_write(labibb, labibb->ibb_base +
+				REG_IBB_SOFT_START_CTL, &val, 1);
+	if (rc < 0) {
+		pr_err("write to register %x failed rc = %d\n",
+			REG_IBB_SOFT_START_CTL, rc);
+		return rc;
+	}
+
+	val = IBB_MODULE_RDY_EN;
+	rc = qpnp_labibb_write(labibb, labibb->lab_base +
+				REG_IBB_MODULE_RDY, &val, 1);
+	if (rc < 0)
+		pr_err("write to register %x failed rc = %d\n",
+				REG_IBB_MODULE_RDY, rc);
+
+	return rc;
+}
+
+static int qpnp_labibb_regulator_ttw_mode_enter(struct qpnp_labibb *labibb)
+{
+	int rc = 0;
+	u8 val;
+
+	/* Save the IBB settings before they get modified for TTW mode */
+	if (!labibb->ibb_settings_saved) {
+		rc = qpnp_labibb_save_settings(labibb);
+		if (rc) {
+			pr_err("Error in storing IBB setttings, rc=%d\n", rc);
+			return rc;
+		}
+		labibb->ibb_settings_saved = true;
+	}
+
+	if (labibb->ttw_force_lab_on) {
+		val = LAB_MODULE_RDY_EN;
+		rc = qpnp_labibb_write(labibb, labibb->lab_base +
+					REG_LAB_MODULE_RDY, &val, 1);
+		if (rc < 0) {
+			pr_err("write to register %x failed rc = %d\n",
+				REG_LAB_MODULE_RDY, rc);
+			return rc;
+		}
+
+		/* Prevents LAB being turned off by IBB */
+		val = LAB_ENABLE_CTL_EN;
+		rc = qpnp_labibb_write(labibb, labibb->lab_base +
+					REG_LAB_ENABLE_CTL, &val, 1);
+		if (rc < 0) {
+			pr_err("write to register %x failed rc = %d\n",
+				REG_LAB_ENABLE_CTL, rc);
+			return rc;
+		}
+
+		val = LAB_RDSON_MNGMNT_NFET_SLEW_EN |
+			LAB_RDSON_MNGMNT_PFET_SLEW_EN |
+			LAB_RDSON_NFET_SW_SIZE_QUARTER |
+			LAB_RDSON_PFET_SW_SIZE_QUARTER;
+		rc = qpnp_labibb_write(labibb, labibb->lab_base +
+				REG_LAB_RDSON_MNGMNT, &val, 1);
+		if (rc < 0) {
+			pr_err("write to register %x failed rc = %d\n",
+				REG_LAB_RDSON_MNGMNT, rc);
+			return rc;
+		}
+
+		rc = qpnp_labibb_masked_write(labibb, labibb->lab_base +
+				REG_LAB_PS_CTL, LAB_PS_CTL_EN, LAB_PS_CTL_EN);
+		if (rc < 0) {
+			pr_err("qpnp_labibb_write register %x failed rc = %d\n",
+				REG_LAB_PS_CTL, rc);
+			return rc;
+		}
+	} else {
+		val = LAB_PD_CTL_DISABLE_PD;
+		rc = qpnp_labibb_write(labibb, labibb->lab_base +
+				REG_LAB_PD_CTL, &val, 1);
+		if (rc < 0) {
+			pr_err("qpnp_labibb_write register %x failed rc = %d\n",
+				REG_LAB_PD_CTL, rc);
+			return rc;
+		}
+
+		val = LAB_SPARE_DISABLE_SCP_BIT;
+		if (labibb->pmic_rev_id->pmic_subtype != PMI8950_SUBTYPE)
+			val |= LAB_SPARE_TOUCH_WAKE_BIT;
+		rc = qpnp_labibb_write(labibb, labibb->lab_base +
+				REG_LAB_SPARE_CTL, &val, 1);
+		if (rc < 0) {
+			pr_err("qpnp_labibb_write register %x failed rc = %d\n",
+				REG_LAB_SPARE_CTL, rc);
+			return rc;
+		}
+
+		val = 0;
+		rc = qpnp_labibb_write(labibb, labibb->lab_base +
+				REG_LAB_SOFT_START_CTL, &val, 1);
+		if (rc < 0) {
+			pr_err("qpnp_labibb_write register %x failed rc = %d\n",
+				REG_LAB_SOFT_START_CTL, rc);
+			return rc;
+		}
+	}
+
+	rc = qpnp_labibb_ttw_enter_ibb_common(labibb);
+	if (rc) {
+		pr_err("Failed to apply TTW ibb common settings rc=%d\n", rc);
+		return rc;
+	}
+
+	switch (labibb->pmic_rev_id->pmic_subtype) {
+	case PMI8996_SUBTYPE:
+		rc = qpnp_labibb_ttw_enter_ibb_pmi8996(labibb);
+		break;
+	case PMI8950_SUBTYPE:
+		rc = qpnp_labibb_ttw_enter_ibb_pmi8950(labibb);
+		break;
+	}
+	if (rc < 0) {
+		pr_err("Failed to configure TTW-enter for IBB rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = qpnp_ibb_set_mode(labibb, IBB_HW_CONTROL);
+	if (rc < 0) {
+		pr_err("Unable to set SWIRE_RDY rc = %d\n", rc);
+		return rc;
+	}
+	labibb->in_ttw_mode = true;
+	return 0;
+}
+
+static int qpnp_labibb_ttw_exit_ibb_common(struct qpnp_labibb *labibb)
+{
+	int rc;
+	u8 val;
+
+	val = IBB_FASTER_PFET_OFF;
+	rc = qpnp_labibb_write(labibb, labibb->ibb_base + REG_IBB_SPARE_CTL,
+			&val, 1);
+	if (rc < 0)
+		pr_err("qpnp_labibb_write register %x failed rc = %d\n",
+			REG_IBB_SPARE_CTL, rc);
+
+	return rc;
+}
+
+static int qpnp_labibb_regulator_ttw_mode_exit(struct qpnp_labibb *labibb)
+{
+	int rc = 0;
+	u8 val;
+
+	if (!labibb->ibb_settings_saved) {
+		pr_err("IBB settings are not saved!\n");
+		return -EINVAL;
+	}
+
+	/* Restore the IBB settings back to switch back to normal mode */
+	rc = qpnp_labibb_restore_settings(labibb);
+	if (rc < 0) {
+		pr_err("Error in restoring IBB setttings, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (labibb->ttw_force_lab_on) {
+		val = 0;
+		rc = qpnp_labibb_write(labibb, labibb->lab_base +
+					REG_LAB_ENABLE_CTL, &val, 1);
+		if (rc < 0) {
+			pr_err("qpnp_labibb_write register %x failed rc = %d\n",
+				REG_LAB_ENABLE_CTL, rc);
+			return rc;
+		}
+	} else {
+		val = LAB_PD_CTL_STRONG_PULL;
+		rc = qpnp_labibb_write(labibb, labibb->lab_base +
+					REG_LAB_PD_CTL,	&val, 1);
+		if (rc < 0) {
+			pr_err("qpnp_labibb_write register %x failed rc = %d\n",
+						REG_LAB_PD_CTL, rc);
+			return rc;
+		}
+
+		val = 0;
+		rc = qpnp_labibb_write(labibb, labibb->lab_base +
+					REG_LAB_SPARE_CTL, &val, 1);
+		if (rc < 0) {
+			pr_err("qpnp_labibb_write register %x failed rc = %d\n",
+					REG_LAB_SPARE_CTL, rc);
+			return rc;
+		}
+	}
+
+	switch (labibb->pmic_rev_id->pmic_subtype) {
+	case PMI8996_SUBTYPE:
+	case PMI8994_SUBTYPE:
+	case PMI8950_SUBTYPE:
+		rc = qpnp_labibb_ttw_exit_ibb_common(labibb);
+		break;
+	}
+	if (rc < 0) {
+		pr_err("Failed to configure TTW-exit for IBB rc=%d\n", rc);
+		return rc;
+	}
+
+	labibb->in_ttw_mode = false;
+	return rc;
+}
+
+static void qpnp_lab_vreg_notifier_work(struct work_struct *work)
+{
+	int rc = 0;
+	u16 retries = 1000, dly = 5000;
+	u8 val;
+	struct qpnp_labibb *labibb  = container_of(work, struct qpnp_labibb,
+							lab_vreg_ok_work);
+	if (labibb->lab_vreg.sc_wait_time_ms != -EINVAL)
+		retries = labibb->lab_vreg.sc_wait_time_ms / 5;
+
+	while (retries) {
+		rc = qpnp_labibb_read(labibb, labibb->lab_base +
+					REG_LAB_STATUS1, &val, 1);
+		if (rc < 0) {
+			pr_err("read register %x failed rc = %d\n",
+				REG_LAB_STATUS1, rc);
+			return;
+		}
+
+		if (val & LAB_STATUS1_VREG_OK_BIT) {
+			raw_notifier_call_chain(&labibb_notifier,
+						LAB_VREG_OK, NULL);
+			break;
+		}
+
+		usleep_range(dly, dly + 100);
+		retries--;
+	}
+
+	if (!retries) {
+		if (labibb->detect_lab_sc) {
+			pr_crit("short circuit detected on LAB rail.. disabling the LAB/IBB/OLEDB modules\n");
+			/* Disable LAB module */
+			val = 0;
+			rc = qpnp_labibb_write(labibb, labibb->lab_base +
+					REG_LAB_MODULE_RDY, &val, 1);
+			if (rc < 0) {
+				pr_err("write register %x failed rc = %d\n",
+					REG_LAB_MODULE_RDY, rc);
+				return;
+			}
+			raw_notifier_call_chain(&labibb_notifier,
+						LAB_VREG_NOT_OK, NULL);
+			labibb->sc_detected = true;
+			labibb->lab_vreg.vreg_enabled = 0;
+			labibb->ibb_vreg.vreg_enabled = 0;
+		} else {
+			pr_err("LAB_VREG_OK not set, failed to notify\n");
+		}
+	}
+}
+
+static int qpnp_lab_enable_standalone(struct qpnp_labibb *labibb)
+{
+	int rc;
+	u8 val;
+
+	val = LAB_ENABLE_CTL_EN;
+	rc = qpnp_labibb_write(labibb,
+		labibb->lab_base + REG_LAB_ENABLE_CTL, &val, 1);
+	if (rc < 0) {
+		pr_err("Write register %x failed rc = %d\n",
+					REG_LAB_ENABLE_CTL, rc);
+		return rc;
+	}
+
+	udelay(labibb->lab_vreg.soft_start);
+
+	rc = qpnp_labibb_read(labibb, labibb->lab_base +
+				REG_LAB_STATUS1, &val, 1);
+	if (rc < 0) {
+		pr_err("Read register %x failed rc = %d\n",
+					REG_LAB_STATUS1, rc);
+		return rc;
+	}
+
+	if (!(val & LAB_STATUS1_VREG_OK_BIT)) {
+		pr_err("Can't enable LAB standalone\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int qpnp_ibb_enable_standalone(struct qpnp_labibb *labibb)
+{
+	int rc, delay, retries = 10;
+	u8 val;
+
+	rc = qpnp_ibb_set_mode(labibb, IBB_SW_CONTROL_EN);
+	if (rc < 0) {
+		pr_err("Unable to set IBB_MODULE_EN rc = %d\n", rc);
+		return rc;
+	}
+
+	delay = labibb->ibb_vreg.soft_start;
+	while (retries--) {
+		/* Wait for a small period before reading IBB_STATUS1 */
+		usleep_range(delay, delay + 100);
+
+		rc = qpnp_labibb_read(labibb, labibb->ibb_base +
+				REG_IBB_STATUS1, &val, 1);
+		if (rc < 0) {
+			pr_err("Read register %x failed rc = %d\n",
+				REG_IBB_STATUS1, rc);
+			return rc;
+		}
+
+		if (val & IBB_STATUS1_VREG_OK_BIT)
+			break;
+	}
+
+	if (!(val & IBB_STATUS1_VREG_OK_BIT)) {
+		pr_err("Can't enable IBB standalone\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int qpnp_labibb_regulator_enable(struct qpnp_labibb *labibb)
+{
+	int rc;
+	u8 val;
+	int dly;
+	int retries;
+	bool enabled = false;
+
+	if (labibb->ttw_en && !labibb->ibb_vreg.vreg_enabled &&
+		labibb->in_ttw_mode) {
+		rc = qpnp_labibb_regulator_ttw_mode_exit(labibb);
+		if (rc) {
+			pr_err("Error in exiting TTW mode rc = %d\n", rc);
+			return rc;
+		}
+	}
+
+	rc = qpnp_ibb_set_mode(labibb, IBB_SW_CONTROL_EN);
+	if (rc) {
+		pr_err("Unable to set IBB_MODULE_EN rc = %d\n", rc);
+		return rc;
+	}
+
+	/* total delay time */
+	dly = labibb->lab_vreg.soft_start + labibb->ibb_vreg.soft_start
+				+ labibb->ibb_vreg.pwrup_dly;
+	usleep_range(dly, dly + 100);
+
+	/* after this delay, lab should be enabled */
+	rc = qpnp_labibb_read(labibb, labibb->lab_base + REG_LAB_STATUS1,
+			&val, 1);
+	if (rc < 0) {
+		pr_err("read register %x failed rc = %d\n",
+			REG_LAB_STATUS1, rc);
+		goto err_out;
+	}
+
+	pr_debug("soft=%d %d up=%d dly=%d\n",
+		labibb->lab_vreg.soft_start, labibb->ibb_vreg.soft_start,
+				labibb->ibb_vreg.pwrup_dly, dly);
+
+	if (!(val & LAB_STATUS1_VREG_OK_BIT)) {
+		pr_err("failed for LAB %x\n", val);
+		goto err_out;
+	}
+
+	/* poll IBB_STATUS to make sure ibb had been enabled */
+	dly = labibb->ibb_vreg.soft_start + labibb->ibb_vreg.pwrup_dly;
+	retries = 10;
+	while (retries--) {
+		rc = qpnp_labibb_read(labibb, labibb->ibb_base +
+					REG_IBB_STATUS1, &val, 1);
+		if (rc < 0) {
+			pr_err("read register %x failed rc = %d\n",
+				REG_IBB_STATUS1, rc);
+			goto err_out;
+		}
+
+		if (val & IBB_STATUS1_VREG_OK_BIT) {
+			enabled = true;
+			break;
+		}
+		usleep_range(dly, dly + 100);
+	}
+
+	if (!enabled) {
+		pr_err("failed for IBB %x\n", val);
+		goto err_out;
+	}
+
+	labibb->lab_vreg.vreg_enabled = 1;
+	labibb->ibb_vreg.vreg_enabled = 1;
+
+	return 0;
+err_out:
+	rc = qpnp_ibb_set_mode(labibb, IBB_SW_CONTROL_DIS);
+	if (rc < 0) {
+		pr_err("Unable to set IBB_MODULE_EN rc = %d\n", rc);
+		return rc;
+	}
+	return -EINVAL;
+}
+
+static int qpnp_labibb_regulator_disable(struct qpnp_labibb *labibb)
+{
+	int rc;
+	u8 val;
+	int dly;
+	int retries;
+	bool disabled = false;
+
+	/*
+	 * When TTW mode is enabled and LABIBB regulators are disabled, it is
+	 * recommended not to disable IBB through IBB_ENABLE_CTL when switching
+	 * to SWIRE control on entering TTW mode. Hence, just enter TTW mode
+	 * and mark the regulators disabled. When we exit TTW mode, normal
+	 * mode settings will be restored anyways and regulators will be
+	 * enabled as before.
+	 */
+	if (labibb->ttw_en && !labibb->in_ttw_mode) {
+		rc = qpnp_labibb_regulator_ttw_mode_enter(labibb);
+		if (rc < 0) {
+			pr_err("Error in entering TTW mode rc = %d\n", rc);
+			return rc;
+		}
+		labibb->lab_vreg.vreg_enabled = 0;
+		labibb->ibb_vreg.vreg_enabled = 0;
+		return 0;
+	}
+
+	rc = qpnp_ibb_set_mode(labibb, IBB_SW_CONTROL_DIS);
+	if (rc < 0) {
+		pr_err("Unable to set IBB_MODULE_EN rc = %d\n", rc);
+		return rc;
+	}
+
+	/* poll IBB_STATUS to make sure ibb had been disabled */
+	dly = labibb->ibb_vreg.pwrdn_dly;
+	retries = 2;
+	while (retries--) {
+		usleep_range(dly, dly + 100);
+		rc = qpnp_labibb_read(labibb, labibb->ibb_base +
+				REG_IBB_STATUS1, &val, 1);
+		if (rc < 0) {
+			pr_err("read register %x failed rc = %d\n",
+				REG_IBB_STATUS1, rc);
+			return rc;
+		}
+
+		if (!(val & IBB_STATUS1_VREG_OK_BIT)) {
+			disabled = true;
+			break;
+		}
+	}
+
+	if (!disabled) {
+		pr_err("failed for IBB %x\n", val);
+		return -EINVAL;
+	}
+
+	if (labibb->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE &&
+		labibb->mode == QPNP_LABIBB_LCD_MODE) {
+		rc = qpnp_lab_pfm_disable(labibb);
+		if (rc < 0) {
+			pr_err("Error in disabling PFM, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	labibb->lab_vreg.vreg_enabled = 0;
+	labibb->ibb_vreg.vreg_enabled = 0;
+
+	return 0;
+}
+
+static int qpnp_lab_regulator_enable(struct regulator_dev *rdev)
+{
+	int rc;
+	struct qpnp_labibb *labibb  = rdev_get_drvdata(rdev);
+
+	if (labibb->sc_detected) {
+		pr_info("Short circuit detected: disabled LAB/IBB rails\n");
+		return 0;
+	}
+
+	if (labibb->skip_2nd_swire_cmd) {
+		rc = qpnp_ibb_ps_config(labibb, false);
+		if (rc < 0) {
+			pr_err("Failed to disable IBB PS rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	if (!labibb->lab_vreg.vreg_enabled && !labibb->swire_control) {
+		if (!labibb->standalone)
+			return qpnp_labibb_regulator_enable(labibb);
+
+		rc = qpnp_lab_enable_standalone(labibb);
+		if (rc) {
+			pr_err("enable lab standalone failed, rc=%d\n", rc);
+			return rc;
+		}
+		labibb->lab_vreg.vreg_enabled = 1;
+	}
+
+	if (labibb->notify_lab_vreg_ok_sts || labibb->detect_lab_sc)
+		schedule_work(&labibb->lab_vreg_ok_work);
+
+	return 0;
+}
+
+static int qpnp_lab_regulator_disable(struct regulator_dev *rdev)
+{
+	int rc;
+	u8 val;
+	struct qpnp_labibb *labibb  = rdev_get_drvdata(rdev);
+
+	if (labibb->lab_vreg.vreg_enabled && !labibb->swire_control) {
+
+		if (!labibb->standalone)
+			return qpnp_labibb_regulator_disable(labibb);
+
+		val = 0;
+		rc = qpnp_labibb_write(labibb,
+			labibb->lab_base + REG_LAB_ENABLE_CTL, &val, 1);
+		if (rc < 0) {
+			pr_err("qpnp_lab_regulator_enable write register %x failed rc = %d\n",
+				REG_LAB_ENABLE_CTL, rc);
+			return rc;
+		}
+
+		labibb->lab_vreg.vreg_enabled = 0;
+	}
+	return 0;
+}
+
+static int qpnp_lab_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct qpnp_labibb *labibb  = rdev_get_drvdata(rdev);
+
+	if (labibb->swire_control)
+		return 0;
+
+	return labibb->lab_vreg.vreg_enabled;
+}
+
+static int qpnp_labibb_force_enable(struct qpnp_labibb *labibb)
+{
+	int rc;
+
+	if (labibb->skip_2nd_swire_cmd) {
+		rc = qpnp_ibb_ps_config(labibb, false);
+		if (rc < 0) {
+			pr_err("Failed to disable IBB PS rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	if (!labibb->swire_control) {
+		if (!labibb->standalone)
+			return qpnp_labibb_regulator_enable(labibb);
+
+		rc = qpnp_ibb_enable_standalone(labibb);
+		if (rc < 0) {
+			pr_err("enable ibb standalone failed, rc=%d\n", rc);
+			return rc;
+		}
+		labibb->ibb_vreg.vreg_enabled = 1;
+
+		rc = qpnp_lab_enable_standalone(labibb);
+		if (rc < 0) {
+			pr_err("enable lab standalone failed, rc=%d\n", rc);
+			return rc;
+		}
+		labibb->lab_vreg.vreg_enabled = 1;
+	}
+
+	return 0;
+}
+
+#define SC_ERR_RECOVERY_DELAY_MS	250
+#define SC_ERR_COUNT_INTERVAL_SEC	1
+#define POLLING_SCP_DONE_COUNT		2
+#define POLLING_SCP_DONE_INTERVAL_MS	5
+static irqreturn_t labibb_sc_err_handler(int irq, void *_labibb)
+{
+	int rc;
+	u16 reg;
+	u8 sc_err_mask, val;
+	char *str;
+	struct qpnp_labibb *labibb = (struct qpnp_labibb *)_labibb;
+	bool in_sc_err, lab_en, ibb_en, scp_done = false;
+	int count;
+
+	if (irq == labibb->lab_vreg.lab_sc_irq) {
+		reg = labibb->lab_base + REG_LAB_STATUS1;
+		sc_err_mask = LAB_STATUS1_SC_DETECT_BIT;
+		str = "LAB";
+	} else if (irq == labibb->ibb_vreg.ibb_sc_irq) {
+		reg = labibb->ibb_base + REG_IBB_STATUS1;
+		sc_err_mask = IBB_STATUS1_SC_DETECT_BIT;
+		str = "IBB";
+	} else {
+		return IRQ_HANDLED;
+	}
+
+	rc = qpnp_labibb_read(labibb, reg, &val, 1);
+	if (rc < 0) {
+		pr_err("Read 0x%x failed, rc=%d\n", reg, rc);
+		return IRQ_HANDLED;
+	}
+	pr_debug("%s SC error triggered! %s_STATUS1 = %d\n", str, str, val);
+
+	in_sc_err = !!(val & sc_err_mask);
+
+	/*
+	 * The SC fault would trigger PBS to disable regulators
+	 * for protection. This would cause the SC_DETECT status being
+	 * cleared so that it's not able to get the SC fault status.
+	 * Check if LAB/IBB regulators are enabled in the driver but
+	 * disabled in hardware, this means a SC fault had happened
+	 * and SCP handling is completed by PBS.
+	 */
+	if (!in_sc_err) {
+		count = POLLING_SCP_DONE_COUNT;
+		do {
+			reg = labibb->lab_base + REG_LAB_ENABLE_CTL;
+			rc = qpnp_labibb_read(labibb, reg, &val, 1);
+			if (rc < 0) {
+				pr_err("Read 0x%x failed, rc=%d\n", reg, rc);
+				return IRQ_HANDLED;
+			}
+			lab_en = !!(val & LAB_ENABLE_CTL_EN);
+
+			reg = labibb->ibb_base + REG_IBB_ENABLE_CTL;
+			rc = qpnp_labibb_read(labibb, reg, &val, 1);
+			if (rc < 0) {
+				pr_err("Read 0x%x failed, rc=%d\n", reg, rc);
+				return IRQ_HANDLED;
+			}
+			ibb_en = !!(val & IBB_ENABLE_CTL_MODULE_EN);
+			if (lab_en || ibb_en)
+				msleep(POLLING_SCP_DONE_INTERVAL_MS);
+			else
+				break;
+		} while ((lab_en || ibb_en) && count--);
+
+		if (labibb->lab_vreg.vreg_enabled
+				&& labibb->ibb_vreg.vreg_enabled
+				&& !lab_en && !ibb_en) {
+			pr_debug("LAB/IBB has been disabled by SCP\n");
+			scp_done = true;
+		}
+	}
+
+	if (in_sc_err || scp_done) {
+		if (hrtimer_active(&labibb->sc_err_check_timer) ||
+			hrtimer_callback_running(&labibb->sc_err_check_timer)) {
+			labibb->sc_err_count++;
+		} else {
+			labibb->sc_err_count = 1;
+			hrtimer_start(&labibb->sc_err_check_timer,
+					ktime_set(SC_ERR_COUNT_INTERVAL_SEC, 0),
+					HRTIMER_MODE_REL);
+		}
+		schedule_delayed_work(&labibb->sc_err_recovery_work,
+				msecs_to_jiffies(SC_ERR_RECOVERY_DELAY_MS));
+	}
+
+	return IRQ_HANDLED;
+}
+
+#define SC_FAULT_COUNT_MAX		4
+static enum hrtimer_restart labibb_check_sc_err_count(struct hrtimer *timer)
+{
+	struct qpnp_labibb *labibb = container_of(timer,
+			struct qpnp_labibb, sc_err_check_timer);
+	/*
+	 * if SC fault triggers more than 4 times in 1 second,
+	 * then disable the IRQs and leave as it.
+	 */
+	if (labibb->sc_err_count > SC_FAULT_COUNT_MAX) {
+		disable_irq(labibb->lab_vreg.lab_sc_irq);
+		disable_irq(labibb->ibb_vreg.ibb_sc_irq);
+	}
+
+	return HRTIMER_NORESTART;
+}
+
+static void labibb_sc_err_recovery_work(struct work_struct *work)
+{
+	struct qpnp_labibb *labibb = container_of(work, struct qpnp_labibb,
+					sc_err_recovery_work.work);
+	int rc;
+
+	labibb->ibb_vreg.vreg_enabled = 0;
+	labibb->lab_vreg.vreg_enabled = 0;
+	rc = qpnp_labibb_force_enable(labibb);
+	if (rc < 0)
+		pr_err("force enable labibb failed, rc=%d\n", rc);
+
+}
+
+static int qpnp_lab_regulator_set_voltage(struct regulator_dev *rdev,
+				int min_uV, int max_uV, unsigned int *selector)
+{
+	int rc, new_uV;
+	u8 val;
+	struct qpnp_labibb *labibb = rdev_get_drvdata(rdev);
+
+	if (labibb->swire_control)
+		return 0;
+
+	if (min_uV < labibb->lab_vreg.min_volt) {
+		pr_err("min_uV %d is less than min_volt %d", min_uV,
+			labibb->lab_vreg.min_volt);
+		return -EINVAL;
+	}
+
+	val = DIV_ROUND_UP(min_uV - labibb->lab_vreg.min_volt,
+				labibb->lab_vreg.step_size);
+	new_uV = val * labibb->lab_vreg.step_size + labibb->lab_vreg.min_volt;
+
+	if (new_uV > max_uV) {
+		pr_err("unable to set voltage %d (min:%d max:%d)\n", new_uV,
+			min_uV, max_uV);
+		return -EINVAL;
+	}
+
+	rc = qpnp_labibb_masked_write(labibb, labibb->lab_base +
+				REG_LAB_VOLTAGE,
+				LAB_VOLTAGE_SET_MASK |
+				LAB_VOLTAGE_OVERRIDE_EN,
+				val | LAB_VOLTAGE_OVERRIDE_EN);
+
+	if (rc < 0) {
+		pr_err("write to register %x failed rc = %d\n", REG_LAB_VOLTAGE,
+			rc);
+		return rc;
+	}
+
+	if (new_uV > labibb->lab_vreg.curr_volt) {
+		val = DIV_ROUND_UP(new_uV - labibb->lab_vreg.curr_volt,
+				labibb->lab_vreg.step_size);
+		udelay(val * labibb->lab_vreg.slew_rate);
+	}
+	labibb->lab_vreg.curr_volt = new_uV;
+
+	return 0;
+}
+
+static int qpnp_skip_swire_command(struct qpnp_labibb *labibb)
+{
+	int rc = 0, retry = 50, dly;
+	u8 reg;
+
+	do {
+		/* poll for ibb vreg_ok */
+		rc = qpnp_labibb_read(labibb, labibb->ibb_base +
+					REG_IBB_STATUS1, &reg, 1);
+		if (rc < 0) {
+			pr_err("Failed to read ibb_status1 reg rc=%d\n", rc);
+			return rc;
+		}
+		if (reg & IBB_STATUS1_VREG_OK_BIT)
+			break;
+
+		/* poll delay */
+		usleep_range(500, 600);
+
+	} while (--retry);
+
+	if (!retry) {
+		pr_err("ibb vreg_ok failed to turn-on\n");
+		return -EBUSY;
+	}
+
+	/* move to SW control */
+	rc = qpnp_ibb_set_mode(labibb, IBB_SW_CONTROL_EN);
+	if (rc < 0) {
+		pr_err("Failed switch to IBB_SW_CONTROL rc=%d\n", rc);
+		return rc;
+	}
+
+	/* delay to skip the second swire command */
+	dly = labibb->swire_2nd_cmd_delay * 1000;
+	while (dly / 20000) {
+		usleep_range(20000, 20010);
+		dly -= 20000;
+	}
+	if (dly)
+		usleep_range(dly, dly + 10);
+
+	rc = qpnp_ibb_set_mode(labibb, IBB_HW_SW_CONTROL);
+	if (rc < 0) {
+		pr_err("Failed switch to IBB_HW_SW_CONTROL rc=%d\n", rc);
+		return rc;
+	}
+
+	/* delay for SPMI to SWIRE transition */
+	usleep_range(1000, 1100);
+
+	/* Move back to SWIRE control */
+	rc = qpnp_ibb_set_mode(labibb, IBB_HW_CONTROL);
+	if (rc < 0)
+		pr_err("Failed switch to IBB_HW_CONTROL rc=%d\n", rc);
+
+	/* delay before enabling the PS mode */
+	msleep(labibb->swire_ibb_ps_enable_delay);
+	rc = qpnp_ibb_ps_config(labibb, true);
+	if (rc < 0)
+		pr_err("Unable to enable IBB PS rc=%d\n", rc);
+
+	return rc;
+}
+
+static irqreturn_t lab_vreg_ok_handler(int irq, void *_labibb)
+{
+	struct qpnp_labibb *labibb = _labibb;
+	int rc;
+
+	if (labibb->skip_2nd_swire_cmd && labibb->lab_dig_major < 2) {
+		rc = qpnp_skip_swire_command(labibb);
+		if (rc < 0)
+			pr_err("Failed in 'qpnp_skip_swire_command' rc=%d\n",
+				rc);
+	} else if (labibb->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE &&
+		labibb->mode == QPNP_LABIBB_LCD_MODE) {
+		rc = qpnp_lab_pfm_enable(labibb);
+		if (rc < 0)
+			pr_err("Failed to config PFM, rc=%d\n", rc);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int qpnp_lab_regulator_get_voltage(struct regulator_dev *rdev)
+{
+	struct qpnp_labibb *labibb  = rdev_get_drvdata(rdev);
+
+	if (labibb->swire_control)
+		return 0;
+
+	return labibb->lab_vreg.curr_volt;
+}
+
+static bool is_lab_vreg_ok_irq_available(struct qpnp_labibb *labibb)
+{
+	/*
+	 * LAB VREG_OK interrupt is used only to skip 2nd SWIRE command in
+	 * dig_major < 2 targets. For pmi8998, it is used to enable PFM in
+	 * LCD mode.
+	 */
+	if (labibb->skip_2nd_swire_cmd && labibb->lab_dig_major < 2)
+		return true;
+
+	if (labibb->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE &&
+		labibb->mode == QPNP_LABIBB_LCD_MODE)
+		return true;
+
+	return false;
+}
+
+static struct regulator_ops qpnp_lab_ops = {
+	.enable			= qpnp_lab_regulator_enable,
+	.disable		= qpnp_lab_regulator_disable,
+	.is_enabled		= qpnp_lab_regulator_is_enabled,
+	.set_voltage		= qpnp_lab_regulator_set_voltage,
+	.get_voltage		= qpnp_lab_regulator_get_voltage,
+};
+
+static int register_qpnp_lab_regulator(struct qpnp_labibb *labibb,
+					struct device_node *of_node)
+{
+	int rc = 0;
+	struct regulator_init_data *init_data;
+	struct regulator_desc *rdesc = &labibb->lab_vreg.rdesc;
+	struct regulator_config cfg = {};
+	u8 val, mask;
+	const char *current_sense_str;
+	bool config_current_sense = false;
+	u32 tmp;
+
+	if (!of_node) {
+		dev_err(labibb->dev, "qpnp lab regulator device tree node is missing\n");
+		return -EINVAL;
+	}
+
+	init_data = of_get_regulator_init_data(labibb->dev, of_node, rdesc);
+	if (!init_data) {
+		pr_err("unable to get regulator init data for qpnp lab regulator\n");
+		return -ENOMEM;
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,qpnp-lab-min-voltage",
+					&(labibb->lab_vreg.min_volt));
+	if (rc < 0) {
+		pr_err("qcom,qpnp-lab-min-voltage is missing, rc = %d\n",
+			rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,qpnp-lab-step-size",
+					&(labibb->lab_vreg.step_size));
+	if (rc < 0) {
+		pr_err("qcom,qpnp-lab-step-size is missing, rc = %d\n", rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,qpnp-lab-slew-rate",
+					&(labibb->lab_vreg.slew_rate));
+	if (rc < 0) {
+		pr_err("qcom,qpnp-lab-slew-rate is missing, rc = %d\n",
+			rc);
+		return rc;
+	}
+
+	labibb->notify_lab_vreg_ok_sts = of_property_read_bool(of_node,
+					"qcom,notify-lab-vreg-ok-sts");
+
+	labibb->lab_vreg.sc_wait_time_ms = -EINVAL;
+	if (labibb->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE &&
+					labibb->detect_lab_sc)
+		of_property_read_u32(of_node, "qcom,qpnp-lab-sc-wait-time-ms",
+					&labibb->lab_vreg.sc_wait_time_ms);
+
+	rc = of_property_read_u32(of_node, "qcom,qpnp-lab-soft-start",
+					&(labibb->lab_vreg.soft_start));
+	if (!rc) {
+		for (val = 0; val < ARRAY_SIZE(lab_soft_start_table); val++)
+			if (lab_soft_start_table[val] ==
+					labibb->lab_vreg.soft_start)
+				break;
+
+		if (val == ARRAY_SIZE(lab_soft_start_table))
+			val = ARRAY_SIZE(lab_soft_start_table) - 1;
+
+		rc = qpnp_labibb_write(labibb, labibb->lab_base +
+				REG_LAB_SOFT_START_CTL, &val, 1);
+		if (rc < 0) {
+			pr_err("qpnp_labibb_write register %x failed rc = %d\n",
+				REG_LAB_SOFT_START_CTL, rc);
+			return rc;
+		}
+
+		labibb->lab_vreg.soft_start = lab_soft_start_table
+				[val & LAB_SOFT_START_CTL_MASK];
+	}
+
+	val = 0;
+	mask = 0;
+	rc = of_property_read_u32(of_node,
+		"qcom,qpnp-lab-max-precharge-time", &tmp);
+	if (!rc) {
+		for (val = 0; val < ARRAY_SIZE(lab_max_precharge_table); val++)
+			if (lab_max_precharge_table[val] == tmp)
+				break;
+
+		if (val == ARRAY_SIZE(lab_max_precharge_table)) {
+			pr_err("Invalid value in qcom,qpnp-lab-max-precharge-time\n");
+			return -EINVAL;
+		}
+
+		mask = LAB_MAX_PRECHARGE_TIME_MASK;
+	}
+
+	if (of_property_read_bool(of_node,
+			"qcom,qpnp-lab-max-precharge-enable")) {
+		val |= LAB_FAST_PRECHARGE_CTL_EN;
+		mask |= LAB_FAST_PRECHARGE_CTL_EN;
+	}
+
+	rc = qpnp_labibb_masked_write(labibb, labibb->lab_base +
+				REG_LAB_PRECHARGE_CTL, mask, val);
+	if (rc < 0) {
+		pr_err("qpnp_lab_dt_init write register %x failed rc = %d\n",
+			REG_LAB_PRECHARGE_CTL, rc);
+		return rc;
+	}
+
+	if (labibb->mode == QPNP_LABIBB_AMOLED_MODE &&
+		labibb->pmic_rev_id->pmic_subtype != PM660L_SUBTYPE) {
+		/*
+		 * default to 1.5 times current gain if
+		 * user doesn't specify the current-sense
+		 * dt parameter
+		 */
+		current_sense_str = "1.5x";
+		val = qpnp_labibb_get_matching_idx(current_sense_str);
+		config_current_sense = true;
+	}
+
+	if (of_find_property(of_node,
+		"qcom,qpnp-lab-current-sense", NULL)) {
+		config_current_sense = true;
+		rc = of_property_read_string(of_node,
+			"qcom,qpnp-lab-current-sense",
+			&current_sense_str);
+		if (!rc) {
+			val = qpnp_labibb_get_matching_idx(
+					current_sense_str);
+		} else {
+			pr_err("qcom,qpnp-lab-current-sense configured incorrectly rc = %d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	if (config_current_sense) {
+		rc = qpnp_labibb_masked_write(labibb, labibb->lab_base +
+			REG_LAB_CURRENT_SENSE,
+			LAB_CURRENT_SENSE_GAIN_MASK,
+			val);
+		if (rc < 0) {
+			pr_err("qpnp_labibb_write register %x failed rc = %d\n",
+				REG_LAB_CURRENT_SENSE, rc);
+			return rc;
+		}
+	}
+
+	val = (labibb->standalone) ? 0 : LAB_IBB_EN_RDY_EN;
+	rc = qpnp_labibb_sec_write(labibb, labibb->lab_base,
+				REG_LAB_IBB_EN_RDY, val);
+
+	if (rc < 0) {
+		pr_err("qpnp_lab_sec_write register %x failed rc = %d\n",
+			REG_LAB_IBB_EN_RDY, rc);
+		return rc;
+	}
+
+	rc = qpnp_labibb_read(labibb, labibb->ibb_base + REG_IBB_ENABLE_CTL,
+				&val, 1);
+	if (rc < 0) {
+		pr_err("qpnp_labibb_read register %x failed rc = %d\n",
+			REG_IBB_ENABLE_CTL, rc);
+		return rc;
+	}
+
+	if (!(val & (IBB_ENABLE_CTL_SWIRE_RDY | IBB_ENABLE_CTL_MODULE_EN))) {
+		/* SWIRE_RDY and IBB_MODULE_EN not enabled */
+		rc = qpnp_lab_dt_init(labibb, of_node);
+		if (rc < 0) {
+			pr_err("qpnp-lab: wrong DT parameter specified: rc = %d\n",
+				rc);
+			return rc;
+		}
+	} else {
+		rc = labibb->ibb_ver_ops->get_mode(labibb);
+
+		rc = qpnp_labibb_read(labibb, labibb->lab_base +
+					REG_LAB_VOLTAGE, &val, 1);
+		if (rc < 0) {
+			pr_err("qpnp_lab_read read register %x failed rc = %d\n",
+				REG_LAB_VOLTAGE, rc);
+			return rc;
+		}
+
+		labibb->lab_vreg.curr_volt =
+					(val &
+					LAB_VOLTAGE_SET_MASK) *
+					labibb->lab_vreg.step_size +
+					labibb->lab_vreg.min_volt;
+		if (labibb->mode == QPNP_LABIBB_LCD_MODE) {
+			rc = of_property_read_u32(of_node,
+				"qcom,qpnp-lab-init-lcd-voltage",
+				&(labibb->lab_vreg.curr_volt));
+			if (rc < 0) {
+				pr_err("get qcom,qpnp-lab-init-lcd-voltage failed, rc = %d\n",
+					rc);
+				return rc;
+			}
+		} else if (!(val & LAB_VOLTAGE_OVERRIDE_EN)) {
+			rc = of_property_read_u32(of_node,
+				"qcom,qpnp-lab-init-amoled-voltage",
+				&(labibb->lab_vreg.curr_volt));
+			if (rc < 0) {
+				pr_err("get qcom,qpnp-lab-init-amoled-voltage failed, rc = %d\n",
+					rc);
+				return rc;
+			}
+		}
+
+		labibb->lab_vreg.vreg_enabled = 1;
+	}
+
+	if (is_lab_vreg_ok_irq_available(labibb)) {
+		rc = devm_request_threaded_irq(labibb->dev,
+				labibb->lab_vreg.lab_vreg_ok_irq, NULL,
+				lab_vreg_ok_handler,
+				IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+				"lab-vreg-ok", labibb);
+		if (rc) {
+			pr_err("Failed to register 'lab-vreg-ok' irq rc=%d\n",
+						rc);
+			return rc;
+		}
+	}
+
+	if (labibb->lab_vreg.lab_sc_irq != -EINVAL) {
+		rc = devm_request_threaded_irq(labibb->dev,
+				labibb->lab_vreg.lab_sc_irq, NULL,
+				labibb_sc_err_handler,
+				IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+				"lab-sc-err", labibb);
+		if (rc) {
+			pr_err("Failed to register 'lab-sc-err' irq rc=%d\n",
+						rc);
+			return rc;
+		}
+	}
+	rc = qpnp_labibb_read(labibb, labibb->lab_base + REG_LAB_MODULE_RDY,
+				&val, 1);
+	if (rc < 0) {
+		pr_err("qpnp_lab_read read register %x failed rc = %d\n",
+			REG_LAB_MODULE_RDY, rc);
+		return rc;
+	}
+
+	if (!(val & LAB_MODULE_RDY_EN)) {
+		val = LAB_MODULE_RDY_EN;
+
+		rc = qpnp_labibb_write(labibb, labibb->lab_base +
+			REG_LAB_MODULE_RDY, &val, 1);
+
+		if (rc < 0) {
+			pr_err("qpnp_lab_dt_init write register %x failed rc = %d\n",
+				REG_LAB_MODULE_RDY, rc);
+			return rc;
+		}
+	}
+
+	if (init_data->constraints.name) {
+		rdesc->owner		= THIS_MODULE;
+		rdesc->type		= REGULATOR_VOLTAGE;
+		rdesc->ops		= &qpnp_lab_ops;
+		rdesc->name		= init_data->constraints.name;
+
+		cfg.dev = labibb->dev;
+		cfg.init_data = init_data;
+		cfg.driver_data = labibb;
+		cfg.of_node = of_node;
+
+		if (of_get_property(labibb->dev->of_node, "parent-supply",
+				NULL))
+			init_data->supply_regulator = "parent";
+
+		init_data->constraints.valid_ops_mask
+				|= REGULATOR_CHANGE_VOLTAGE |
+					REGULATOR_CHANGE_STATUS;
+
+		labibb->lab_vreg.rdev = regulator_register(rdesc, &cfg);
+		if (IS_ERR(labibb->lab_vreg.rdev)) {
+			rc = PTR_ERR(labibb->lab_vreg.rdev);
+			labibb->lab_vreg.rdev = NULL;
+			pr_err("unable to get regulator init data for qpnp lab regulator, rc = %d\n",
+				rc);
+
+			return rc;
+		}
+	} else {
+		dev_err(labibb->dev, "qpnp lab regulator name missing\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int qpnp_ibb_pfm_mode_enable(struct qpnp_labibb *labibb,
+			struct device_node *of_node)
+{
+	int rc = 0;
+	u32 i, tmp = 0;
+	u8 val = IBB_PFM_ENABLE;
+
+	rc = of_property_read_u32(of_node, "qcom,qpnp-ibb-pfm-peak-curr",
+				&tmp);
+	if (rc < 0) {
+		pr_err("qcom,qpnp-ibb-pfm-peak-curr is missing, rc = %d\n",
+			rc);
+		return rc;
+	}
+	for (i = 0; i < ARRAY_SIZE(ibb_pfm_peak_curr_table); i++)
+		if (ibb_pfm_peak_curr_table[i] == tmp)
+			break;
+
+	if (i == ARRAY_SIZE(ibb_pfm_peak_curr_table)) {
+		pr_err("Invalid value in qcom,qpnp-ibb-pfm-peak-curr\n");
+		return -EINVAL;
+	}
+
+	val |= (i << IBB_PFM_PEAK_CURRENT_BIT_SHIFT);
+
+	rc = of_property_read_u32(of_node, "qcom,qpnp-ibb-pfm-hysteresis",
+				&tmp);
+	if (rc < 0) {
+		pr_err("qcom,qpnp-ibb-pfm-hysteresis is missing, rc = %d\n",
+			rc);
+		return rc;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(ibb_pfm_hysteresis_table); i++)
+		if (ibb_pfm_hysteresis_table[i] == tmp)
+			break;
+
+	if (i == ARRAY_SIZE(ibb_pfm_hysteresis_table)) {
+		pr_err("Invalid value in qcom,qpnp-ibb-pfm-hysteresis\n");
+		return -EINVAL;
+	}
+
+	val |= (i << IBB_PFM_HYSTERESIS_BIT_SHIFT);
+
+	rc = qpnp_labibb_write(labibb, labibb->ibb_base +
+				REG_IBB_PFM_CTL, &val, 1);
+	if (rc < 0)
+		pr_err("qpnp_ibb_pfm_ctl write register %x failed rc = %d\n",
+					REG_IBB_PFM_CTL, rc);
+
+	return rc;
+}
+
+static int qpnp_labibb_pbs_mode_enable(struct qpnp_labibb *labibb,
+			struct device_node *of_node)
+{
+	int rc = 0;
+
+	rc = qpnp_labibb_masked_write(labibb, labibb->ibb_base +
+				REG_IBB_SWIRE_CTL,
+				IBB_SWIRE_VOUT_UPD_EN, 0);
+	if (rc < 0) {
+		pr_err("qpnp_ibb_swire_ctl write register %x failed rc = %d\n",
+					REG_IBB_SWIRE_CTL, rc);
+		return rc;
+	}
+
+	rc = qpnp_labibb_masked_write(labibb, labibb->ibb_base +
+				REG_IBB_PD_CTL, IBB_SWIRE_PD_UPD, 0);
+	if (rc < 0) {
+		pr_err("qpnp_ibb_pd_ctl write register %x failed rc = %d\n",
+					REG_IBB_PD_CTL, rc);
+		return rc;
+	}
+
+	rc = qpnp_labibb_masked_write(labibb, labibb->lab_base +
+				REG_LAB_SWIRE_PGM_CTL, LAB_EN_SWIRE_PGM_VOUT |
+				LAB_EN_SWIRE_PGM_PD, 0);
+	if (rc < 0)
+		pr_err("qpnp_lab_swire_pgm_ctl write register %x failed rc = %d\n",
+					REG_LAB_SWIRE_PGM_CTL, rc);
+
+	return rc;
+}
+
+static int qpnp_ibb_slew_rate_config(struct qpnp_labibb *labibb,
+			struct device_node *of_node)
+{
+	int rc = 0;
+	u32 i, tmp = 0;
+	u8 val = 0, mask = 0;
+
+	rc = of_property_read_u32(of_node, "qcom,qpnp-ibb-fast-slew-rate",
+						&tmp);
+	if (!rc) {
+		for (i = 0; i < ARRAY_SIZE(ibb_output_slew_ctl_table); i++)
+			if (ibb_output_slew_ctl_table[i] == tmp)
+				break;
+
+		if (i == ARRAY_SIZE(ibb_output_slew_ctl_table)) {
+			pr_err("Invalid value in qcom,qpnp-ibb-fast-slew-rate\n");
+			return -EINVAL;
+		}
+
+		labibb->ibb_vreg.slew_rate = tmp;
+		val |= (i << IBB_SLEW_RATE_TRANS_TIME_FAST_SHIFT) |
+				IBB_SLEW_RATE_SPEED_FAST_EN | IBB_SLEW_CTL_EN;
+
+		mask = IBB_SLEW_RATE_SPEED_FAST_EN |
+			IBB_SLEW_RATE_TRANS_TIME_FAST_MASK | IBB_SLEW_CTL_EN;
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,qpnp-ibb-slow-slew-rate",
+				&tmp);
+	if (!rc) {
+		for (i = 0; i < ARRAY_SIZE(ibb_output_slew_ctl_table); i++)
+			if (ibb_output_slew_ctl_table[i] == tmp)
+				break;
+
+		if (i == ARRAY_SIZE(ibb_output_slew_ctl_table)) {
+			pr_err("Invalid value in qcom,qpnp-ibb-slow-slew-rate\n");
+			return -EINVAL;
+		}
+
+		labibb->ibb_vreg.slew_rate = tmp;
+		val |= (i | IBB_SLEW_CTL_EN);
+
+		mask |= IBB_SLEW_RATE_SPEED_FAST_EN |
+			IBB_SLEW_RATE_TRANS_TIME_SLOW_MASK | IBB_SLEW_CTL_EN;
+	}
+
+	rc = qpnp_labibb_masked_write(labibb, labibb->ibb_base +
+				REG_IBB_OUTPUT_SLEW_CTL,
+				mask, val);
+	if (rc < 0)
+		pr_err("qpnp_labibb_write register %x failed rc = %d\n",
+			REG_IBB_OUTPUT_SLEW_CTL, rc);
+
+	return rc;
+}
+
+static bool qpnp_ibb_poff_ctl_required(struct qpnp_labibb *labibb)
+{
+	if (labibb->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE)
+		return false;
+
+	return true;
+}
+
+static int qpnp_ibb_dt_init(struct qpnp_labibb *labibb,
+				struct device_node *of_node)
+{
+	int rc = 0;
+	u32 i = 0, tmp = 0;
+	u8 val, mask;
+
+	/*
+	 * Do not configure LCD_AMOLED_SEL for pmi8998 as it will be done by
+	 * GPIO selector. Override the labibb->mode with what was configured
+	 * by the bootloader.
+	 */
+	if (labibb->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE) {
+		rc = qpnp_labibb_read(labibb, labibb->ibb_base +
+				REG_IBB_LCD_AMOLED_SEL, &val, 1);
+		if (rc) {
+			pr_err("qpnp_labibb_read register %x failed rc = %d\n",
+						REG_IBB_LCD_AMOLED_SEL, rc);
+			return rc;
+		}
+		if (val == REG_LAB_IBB_AMOLED_MODE)
+			labibb->mode = QPNP_LABIBB_AMOLED_MODE;
+		else
+			labibb->mode = QPNP_LABIBB_LCD_MODE;
+	} else {
+		rc = labibb->ibb_ver_ops->sel_mode(labibb, 1);
+		if (rc < 0) {
+			pr_err("qpnp_labibb_sec_write register %x failed rc = %d\n",
+				REG_IBB_LCD_AMOLED_SEL, rc);
+			return rc;
+		}
+	}
+
+	val = 0;
+	mask = 0;
+	rc = of_property_read_u32(of_node,
+		"qcom,qpnp-ibb-lab-pwrdn-delay", &tmp);
+	if (!rc) {
+		if (tmp > 0) {
+			for (i = 0; i < ARRAY_SIZE(ibb_pwrdn_dly_table); i++) {
+				if (ibb_pwrdn_dly_table[i] == tmp)
+					break;
+			}
+
+			if (i == ARRAY_SIZE(ibb_pwrdn_dly_table)) {
+				pr_err("Invalid value in qcom,qpnp-ibb-lab-pwrdn-delay\n");
+				return -EINVAL;
+			}
+		}
+
+		labibb->ibb_vreg.pwrdn_dly = tmp;
+
+		if (tmp > 0)
+			val = i | IBB_PWRUP_PWRDN_CTL_1_EN_DLY2;
+
+		mask |= IBB_PWRUP_PWRDN_CTL_1_EN_DLY2;
+	}
+
+	rc = of_property_read_u32(of_node,
+			"qcom,qpnp-ibb-lab-pwrup-delay", &tmp);
+	if (!rc) {
+		if (tmp > 0) {
+			for (i = 0; i < ARRAY_SIZE(ibb_pwrup_dly_table); i++) {
+				if (ibb_pwrup_dly_table[i] == tmp)
+					break;
+			}
+
+			if (i == ARRAY_SIZE(ibb_pwrup_dly_table)) {
+				pr_err("Invalid value in qcom,qpnp-ibb-lab-pwrup-delay\n");
+				return -EINVAL;
+			}
+		}
+
+		labibb->ibb_vreg.pwrup_dly = tmp;
+
+		if (tmp > 0)
+			val |= IBB_PWRUP_PWRDN_CTL_1_EN_DLY1;
+
+		val |= (i << IBB_PWRUP_PWRDN_CTL_1_DLY1_SHIFT);
+		val |= IBB_PWRUP_PWRDN_CTL_1_LAB_VREG_OK;
+		mask |= (IBB_PWRUP_PWRDN_CTL_1_EN_DLY1 |
+			IBB_PWRUP_PWRDN_CTL_1_DLY1_MASK |
+			IBB_PWRUP_PWRDN_CTL_1_LAB_VREG_OK);
+	}
+
+	if (of_property_read_bool(of_node,
+				"qcom,qpnp-ibb-en-discharge")) {
+		val |= PWRUP_PWRDN_CTL_1_DISCHARGE_EN;
+		mask |= PWRUP_PWRDN_CTL_1_DISCHARGE_EN;
+	}
+
+	rc = qpnp_labibb_sec_masked_write(labibb, labibb->ibb_base,
+				REG_IBB_PWRUP_PWRDN_CTL_1, mask, val);
+	if (rc < 0) {
+		pr_err("qpnp_labibb_sec_write register %x failed rc = %d\n",
+			REG_IBB_PWRUP_PWRDN_CTL_1, rc);
+		return rc;
+	}
+
+	if (of_property_read_bool(of_node, "qcom,qpnp-ibb-slew-rate-config")) {
+
+		rc = qpnp_ibb_slew_rate_config(labibb, of_node);
+		if (rc < 0)
+			return rc;
+	}
+
+	val = 0;
+	if (!of_property_read_bool(of_node, "qcom,qpnp-ibb-full-pull-down"))
+		val = IBB_PD_CTL_HALF_STRENGTH;
+
+	if (of_property_read_bool(of_node, "qcom,qpnp-ibb-pull-down-enable"))
+		val |= IBB_PD_CTL_EN;
+
+	mask = IBB_PD_CTL_STRENGTH_MASK | IBB_PD_CTL_EN;
+	rc = qpnp_labibb_masked_write(labibb,
+			labibb->ibb_base + REG_IBB_PD_CTL, mask, val);
+
+	if (rc < 0) {
+		pr_err("qpnp_lab_dt_init write register %x failed rc = %d\n",
+				REG_IBB_PD_CTL, rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(of_node,
+		"qcom,qpnp-ibb-switching-clock-frequency", &tmp);
+	if (!rc) {
+		for (val = 0; val < ARRAY_SIZE(ibb_clk_div_table); val++)
+			if (ibb_clk_div_table[val] == tmp)
+				break;
+
+		if (val == ARRAY_SIZE(ibb_clk_div_table)) {
+			pr_err("Invalid value in qpnp-ibb-switching-clock-frequency\n");
+			return -EINVAL;
+		}
+		rc = labibb->ibb_ver_ops->set_clk_div(labibb, val);
+		if (rc < 0) {
+			pr_err("qpnp_ibb_dt_init write register %x failed rc = %d\n",
+				REG_IBB_CLK_DIV, rc);
+			return rc;
+		}
+	}
+
+	val = 0;
+	mask = 0;
+	rc = of_property_read_u32(of_node,
+		"qcom,qpnp-ibb-limit-maximum-current", &tmp);
+	if (!rc) {
+		for (val = 0; val < ARRAY_SIZE(ibb_current_limit_table); val++)
+			if (ibb_current_limit_table[val] == tmp)
+				break;
+
+		if (val == ARRAY_SIZE(ibb_current_limit_table)) {
+			pr_err("Invalid value in qcom,qpnp-ibb-limit-maximum-current\n");
+			return -EINVAL;
+		}
+
+		mask = IBB_CURRENT_LIMIT_MASK;
+	}
+
+	rc = of_property_read_u32(of_node,
+		"qcom,qpnp-ibb-debounce-cycle", &tmp);
+	if (!rc) {
+		for (i = 0; i < ARRAY_SIZE(ibb_debounce_table); i++)
+			if (ibb_debounce_table[i] == tmp)
+				break;
+
+		if (i == ARRAY_SIZE(ibb_debounce_table)) {
+			pr_err("Invalid value in qcom,qpnp-ibb-debounce-cycle\n");
+			return -EINVAL;
+		}
+
+		val |= (i << IBB_CURRENT_LIMIT_DEBOUNCE_SHIFT);
+		mask |= IBB_CURRENT_LIMIT_DEBOUNCE_MASK;
+	}
+
+	if (of_property_read_bool(of_node,
+		"qcom,qpnp-ibb-limit-max-current-enable")) {
+		val |= IBB_CURRENT_LIMIT_EN;
+		mask |= IBB_CURRENT_LIMIT_EN;
+	}
+
+	rc = qpnp_labibb_sec_masked_write(labibb, labibb->ibb_base,
+				REG_IBB_CURRENT_LIMIT, mask, val);
+	if (rc < 0) {
+		pr_err("qpnp_labibb_sec_write register %x failed rc = %d\n",
+				REG_IBB_CURRENT_LIMIT, rc);
+		return rc;
+	}
+
+	if (of_property_read_bool(of_node,
+		"qcom,qpnp-ibb-ring-suppression-enable")) {
+		val = IBB_RING_SUPPRESSION_CTL_EN;
+		rc = qpnp_labibb_write(labibb, labibb->ibb_base +
+					REG_IBB_RING_SUPPRESSION_CTL,
+					&val,
+					1);
+		if (rc < 0) {
+			pr_err("qpnp_ibb_dt_init write register %x failed rc = %d\n",
+				REG_IBB_RING_SUPPRESSION_CTL, rc);
+			return rc;
+		}
+	}
+
+	if (of_property_read_bool(of_node, "qcom,qpnp-ibb-ps-enable")) {
+		rc = qpnp_ibb_ps_config(labibb, true);
+		if (rc < 0) {
+			pr_err("qpnp_ibb_dt_init PS enable failed rc=%d\n", rc);
+			return rc;
+		}
+	} else {
+		rc = qpnp_ibb_ps_config(labibb, false);
+		if (rc < 0) {
+			pr_err("qpnp_ibb_dt_init PS disable failed rc=%d\n",
+									rc);
+			return rc;
+		}
+	}
+
+	if (of_property_read_bool(of_node,
+				 "qcom,qpnp-ibb-smart-ps-enable")){
+		of_property_read_u32(of_node, "qcom,qpnp-ibb-num-swire-trans",
+					&labibb->ibb_vreg.num_swire_trans);
+
+		of_property_read_u32(of_node,
+				"qcom,qpnp-ibb-neg-curr-limit", &tmp);
+
+		rc = labibb->ibb_ver_ops->smart_ps_config(labibb, true,
+					labibb->ibb_vreg.num_swire_trans, tmp);
+		if (rc < 0) {
+			pr_err("qpnp_ibb_dt_init smart PS enable failed rc=%d\n",
+					 rc);
+			return rc;
+		}
+
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,qpnp-ibb-init-voltage",
+					&(labibb->ibb_vreg.curr_volt));
+	if (rc < 0) {
+		pr_err("get qcom,qpnp-ibb-init-voltage failed, rc = %d\n", rc);
+		return rc;
+	}
+
+	if (of_property_read_bool(of_node,
+			"qcom,qpnp-ibb-use-default-voltage"))
+		rc = labibb->ibb_ver_ops->set_default_voltage(labibb, true);
+	else
+		rc = labibb->ibb_ver_ops->set_default_voltage(labibb, false);
+
+	if (rc < 0)
+		return rc;
+
+	if (of_property_read_bool(of_node, "qcom,qpnp-ibb-overload-blank")) {
+		rc = qpnp_ibb_vreg_ok_ctl(labibb, of_node);
+		if (rc < 0)
+			return rc;
+	}
+
+	return 0;
+}
+
+static int qpnp_ibb_regulator_enable(struct regulator_dev *rdev)
+{
+	int rc = 0;
+	struct qpnp_labibb *labibb  = rdev_get_drvdata(rdev);
+
+	if (labibb->sc_detected) {
+		pr_info("Short circuit detected: disabled LAB/IBB rails\n");
+		return 0;
+	}
+
+	if (!labibb->ibb_vreg.vreg_enabled && !labibb->swire_control) {
+		if (!labibb->standalone)
+			return qpnp_labibb_regulator_enable(labibb);
+
+		rc = qpnp_ibb_enable_standalone(labibb);
+		if (rc < 0) {
+			pr_err("enable ibb standalone failed, rc=%d\n", rc);
+			return rc;
+		}
+		labibb->ibb_vreg.vreg_enabled = 1;
+	}
+
+	return 0;
+}
+
+static int qpnp_ibb_regulator_disable(struct regulator_dev *rdev)
+{
+	int rc;
+	struct qpnp_labibb *labibb  = rdev_get_drvdata(rdev);
+
+	if (labibb->ibb_vreg.vreg_enabled && !labibb->swire_control) {
+
+		if (!labibb->standalone)
+			return qpnp_labibb_regulator_disable(labibb);
+
+		rc = qpnp_ibb_set_mode(labibb, IBB_SW_CONTROL_DIS);
+		if (rc < 0) {
+			pr_err("Unable to set IBB_MODULE_EN rc = %d\n", rc);
+			return rc;
+		}
+
+		labibb->ibb_vreg.vreg_enabled = 0;
+	}
+	return 0;
+}
+
+static int qpnp_ibb_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct qpnp_labibb *labibb  = rdev_get_drvdata(rdev);
+
+	if (labibb->swire_control)
+		return 0;
+
+	return labibb->ibb_vreg.vreg_enabled;
+}
+
+static int qpnp_ibb_regulator_set_voltage(struct regulator_dev *rdev,
+				int min_uV, int max_uV, unsigned int *selector)
+{
+	int rc = 0;
+
+	struct qpnp_labibb *labibb = rdev_get_drvdata(rdev);
+
+	if (labibb->swire_control)
+		return 0;
+
+	rc = labibb->ibb_ver_ops->set_voltage(labibb, min_uV, max_uV);
+	return rc;
+}
+
+static int qpnp_ibb_regulator_get_voltage(struct regulator_dev *rdev)
+{
+	struct qpnp_labibb *labibb  = rdev_get_drvdata(rdev);
+
+	if (labibb->swire_control)
+		return 0;
+
+	return labibb->ibb_vreg.curr_volt;
+}
+
+static struct regulator_ops qpnp_ibb_ops = {
+	.enable			= qpnp_ibb_regulator_enable,
+	.disable		= qpnp_ibb_regulator_disable,
+	.is_enabled		= qpnp_ibb_regulator_is_enabled,
+	.set_voltage		= qpnp_ibb_regulator_set_voltage,
+	.get_voltage		= qpnp_ibb_regulator_get_voltage,
+};
+
+static int register_qpnp_ibb_regulator(struct qpnp_labibb *labibb,
+					struct device_node *of_node)
+{
+	int rc = 0;
+	struct regulator_init_data *init_data;
+	struct regulator_desc *rdesc = &labibb->ibb_vreg.rdesc;
+	struct regulator_config cfg = {};
+	u8 val, ibb_enable_ctl, index;
+	u32 tmp;
+
+	if (!of_node) {
+		dev_err(labibb->dev, "qpnp ibb regulator device tree node is missing\n");
+		return -EINVAL;
+	}
+
+	init_data = of_get_regulator_init_data(labibb->dev, of_node, rdesc);
+	if (!init_data) {
+		pr_err("unable to get regulator init data for qpnp ibb regulator\n");
+		return -ENOMEM;
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,qpnp-ibb-min-voltage",
+					&(labibb->ibb_vreg.min_volt));
+	if (rc < 0) {
+		pr_err("qcom,qpnp-ibb-min-voltage is missing, rc = %d\n",
+			rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,qpnp-ibb-step-size",
+					&(labibb->ibb_vreg.step_size));
+	if (rc < 0) {
+		pr_err("qcom,qpnp-ibb-step-size is missing, rc = %d\n", rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,qpnp-ibb-slew-rate",
+					&(labibb->ibb_vreg.slew_rate));
+	if (rc < 0)
+		labibb->ibb_vreg.slew_rate = IBB_HW_DEFAULT_SLEW_RATE;
+
+	rc = labibb->ibb_ver_ops->soft_start_ctl(labibb, of_node);
+	if (rc < 0) {
+		pr_err("qpnp_labibb_write register %x failed rc = %d\n",
+			REG_IBB_SOFT_START_CTL, rc);
+		return rc;
+	}
+
+	if (of_find_property(of_node, "qcom,output-voltage-one-pulse", NULL)) {
+		if (!labibb->swire_control) {
+			pr_err("output-voltage-one-pulse valid for SWIRE only\n");
+			return -EINVAL;
+		}
+		rc = of_property_read_u32(of_node,
+				"qcom,output-voltage-one-pulse", &tmp);
+		if (rc < 0) {
+			pr_err("failed to read qcom,output-voltage-one-pulse rc=%d\n",
+									rc);
+			return rc;
+		}
+		if (tmp > MAX_OUTPUT_PULSE_VOLTAGE_MV ||
+				tmp < MIN_OUTPUT_PULSE_VOLTAGE_MV) {
+			pr_err("Invalid one-pulse voltage range %d\n", tmp);
+			return -EINVAL;
+		}
+		rc = labibb->ibb_ver_ops->voltage_at_one_pulse(labibb, tmp);
+		if (rc < 0)
+			return rc;
+	}
+
+	rc = qpnp_labibb_read(labibb, labibb->ibb_base + REG_IBB_ENABLE_CTL,
+				&ibb_enable_ctl, 1);
+	if (rc < 0) {
+		pr_err("qpnp_ibb_read register %x failed rc = %d\n",
+			REG_IBB_ENABLE_CTL, rc);
+		return rc;
+	}
+
+	/*
+	 * For pmi8998, override swire_control with what was configured
+	 * before by the bootloader.
+	 */
+	if (labibb->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE)
+		labibb->swire_control = ibb_enable_ctl &
+						IBB_ENABLE_CTL_SWIRE_RDY;
+
+	if (ibb_enable_ctl &
+		(IBB_ENABLE_CTL_SWIRE_RDY | IBB_ENABLE_CTL_MODULE_EN)) {
+
+		rc = labibb->ibb_ver_ops->get_mode(labibb);
+		if (rc < 0) {
+			pr_err("qpnp_labibb_read register %x failed rc = %d\n",
+				REG_IBB_LCD_AMOLED_SEL, rc);
+			return rc;
+		}
+		rc = qpnp_labibb_read(labibb, labibb->ibb_base +
+					REG_IBB_VOLTAGE, &val, 1);
+		if (rc < 0) {
+			pr_err("qpnp_labibb_read read register %x failed rc = %d\n",
+				REG_IBB_VOLTAGE, rc);
+			return rc;
+		}
+
+		labibb->ibb_vreg.curr_volt =
+			(val & IBB_VOLTAGE_SET_MASK) *
+			labibb->ibb_vreg.step_size +
+			labibb->ibb_vreg.min_volt;
+
+		if (labibb->mode == QPNP_LABIBB_LCD_MODE) {
+			rc = of_property_read_u32(of_node,
+				"qcom,qpnp-ibb-init-lcd-voltage",
+				&(labibb->ibb_vreg.curr_volt));
+			if (rc < 0) {
+				pr_err("get qcom,qpnp-ibb-init-lcd-voltage failed, rc = %d\n",
+					rc);
+				return rc;
+			}
+		} else if (!(val & IBB_VOLTAGE_OVERRIDE_EN)) {
+			rc = of_property_read_u32(of_node,
+				"qcom,qpnp-ibb-init-amoled-voltage",
+				&(labibb->ibb_vreg.curr_volt));
+			if (rc < 0) {
+				pr_err("get qcom,qpnp-ibb-init-amoled-voltage failed, rc = %d\n",
+					rc);
+				return rc;
+			}
+
+		}
+
+		rc = qpnp_labibb_read(labibb, labibb->ibb_base +
+				REG_IBB_PWRUP_PWRDN_CTL_1, &val, 1);
+		if (rc < 0) {
+			pr_err("qpnp_labibb_config_init read register %x failed rc = %d\n",
+				REG_IBB_PWRUP_PWRDN_CTL_1, rc);
+			return rc;
+		}
+
+		index = (val & IBB_PWRUP_PWRDN_CTL_1_DLY1_MASK) >>
+				IBB_PWRUP_PWRDN_CTL_1_DLY1_SHIFT;
+		labibb->ibb_vreg.pwrup_dly = ibb_pwrup_dly_table[index];
+		index = val & IBB_PWRUP_PWRDN_CTL_1_DLY2_MASK;
+		labibb->ibb_vreg.pwrdn_dly =  ibb_pwrdn_dly_table[index];
+
+		labibb->ibb_vreg.vreg_enabled = 1;
+	} else {
+		/* SWIRE_RDY and IBB_MODULE_EN not enabled */
+		rc = qpnp_ibb_dt_init(labibb, of_node);
+		if (rc < 0) {
+			pr_err("qpnp-ibb: wrong DT parameter specified: rc = %d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	if (labibb->mode == QPNP_LABIBB_AMOLED_MODE &&
+			qpnp_ibb_poff_ctl_required(labibb)) {
+
+		val = IBB_OVERRIDE_NONOVERLAP | IBB_NFET_GATE_DELAY_2;
+		rc = qpnp_labibb_sec_masked_write(labibb, labibb->ibb_base,
+			REG_IBB_NONOVERLAP_TIME_1,
+			IBB_OVERRIDE_NONOVERLAP | IBB_NONOVERLAP_NFET_MASK,
+			val);
+
+		if (rc < 0) {
+			pr_err("qpnp_labibb_sec_masked_write register %x failed rc = %d\n",
+				REG_IBB_NONOVERLAP_TIME_1, rc);
+			return rc;
+		}
+
+		val = IBB_N2P_MUX_SEL;
+		rc = qpnp_labibb_sec_write(labibb, labibb->ibb_base,
+			REG_IBB_NONOVERLAP_TIME_2, val);
+
+		if (rc < 0) {
+			pr_err("qpnp_labibb_sec_write register %x failed rc = %d\n",
+				REG_IBB_NONOVERLAP_TIME_2, rc);
+			return rc;
+		}
+
+		val = IBB_FASTER_PFET_OFF;
+		rc = qpnp_labibb_masked_write(labibb,
+				labibb->ibb_base + REG_IBB_SPARE_CTL,
+				IBB_POFF_CTL_MASK, val);
+		if (rc < 0) {
+			pr_err("write to register %x failed rc = %d\n",
+				 REG_IBB_SPARE_CTL, rc);
+			return rc;
+		}
+	}
+
+	if (labibb->standalone) {
+		val = 0;
+		rc = qpnp_labibb_sec_write(labibb, labibb->ibb_base,
+				REG_IBB_PWRUP_PWRDN_CTL_1, val);
+		if (rc < 0) {
+			pr_err("qpnp_labibb_sec_write register %x failed rc = %d\n",
+				REG_IBB_PWRUP_PWRDN_CTL_1, rc);
+			return rc;
+		}
+		labibb->ibb_vreg.pwrup_dly = 0;
+		labibb->ibb_vreg.pwrdn_dly = 0;
+	}
+
+	if (labibb->ibb_vreg.ibb_sc_irq != -EINVAL) {
+		rc = devm_request_threaded_irq(labibb->dev,
+				labibb->ibb_vreg.ibb_sc_irq, NULL,
+				labibb_sc_err_handler,
+				IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+				"ibb-sc-err", labibb);
+		if (rc) {
+			pr_err("Failed to register 'ibb-sc-err' irq rc=%d\n",
+						rc);
+			return rc;
+		}
+	}
+
+	rc = qpnp_labibb_read(labibb, labibb->ibb_base + REG_IBB_MODULE_RDY,
+				&val, 1);
+	if (rc < 0) {
+		pr_err("qpnp_ibb_read read register %x failed rc = %d\n",
+			REG_IBB_MODULE_RDY, rc);
+		return rc;
+	}
+
+	if (!(val & IBB_MODULE_RDY_EN)) {
+		val = IBB_MODULE_RDY_EN;
+
+		rc = qpnp_labibb_write(labibb, labibb->ibb_base +
+			REG_IBB_MODULE_RDY, &val, 1);
+
+		if (rc < 0) {
+			pr_err("qpnp_ibb_dt_init write register %x failed rc = %d\n",
+				REG_IBB_MODULE_RDY, rc);
+			return rc;
+		}
+	}
+
+	if (of_property_read_bool(of_node,
+			"qcom,qpnp-ibb-enable-pfm-mode")) {
+		rc = qpnp_ibb_pfm_mode_enable(labibb, of_node);
+		if (rc < 0)
+			return rc;
+	}
+
+	if (labibb->pbs_control) {
+		rc = qpnp_labibb_pbs_mode_enable(labibb, of_node);
+		if (rc < 0)
+			return rc;
+	}
+
+	if (init_data->constraints.name) {
+		rdesc->owner		= THIS_MODULE;
+		rdesc->type		= REGULATOR_VOLTAGE;
+		rdesc->ops		= &qpnp_ibb_ops;
+		rdesc->name		= init_data->constraints.name;
+
+		cfg.dev = labibb->dev;
+		cfg.init_data = init_data;
+		cfg.driver_data = labibb;
+		cfg.of_node = of_node;
+
+		if (of_get_property(labibb->dev->of_node, "parent-supply",
+				 NULL))
+			init_data->supply_regulator = "parent";
+
+		init_data->constraints.valid_ops_mask
+				|= REGULATOR_CHANGE_VOLTAGE |
+					REGULATOR_CHANGE_STATUS;
+
+		labibb->ibb_vreg.rdev = regulator_register(rdesc, &cfg);
+		if (IS_ERR(labibb->ibb_vreg.rdev)) {
+			rc = PTR_ERR(labibb->ibb_vreg.rdev);
+			labibb->ibb_vreg.rdev = NULL;
+			pr_err("unable to get regulator init data for qpnp ibb regulator, rc = %d\n",
+				rc);
+
+			return rc;
+		}
+	} else {
+		dev_err(labibb->dev, "qpnp ibb regulator name missing\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int qpnp_lab_register_irq(struct device_node *child,
+				struct qpnp_labibb *labibb)
+{
+	int rc = 0;
+
+	if (is_lab_vreg_ok_irq_available(labibb)) {
+		rc = of_irq_get_byname(child, "lab-vreg-ok");
+		if (rc < 0) {
+			pr_err("Invalid lab-vreg-ok irq\n");
+			return rc;
+		}
+		labibb->lab_vreg.lab_vreg_ok_irq = rc;
+	}
+
+	labibb->lab_vreg.lab_sc_irq = -EINVAL;
+	rc = of_irq_get_byname(child, "lab-sc-err");
+	if (rc < 0)
+		pr_debug("Unable to get lab-sc-err, rc = %d\n", rc);
+	else
+		labibb->lab_vreg.lab_sc_irq = rc;
+
+	return 0;
+}
+
+static int qpnp_ibb_register_irq(struct device_node *child,
+				struct qpnp_labibb *labibb)
+{
+	int rc;
+
+	labibb->ibb_vreg.ibb_sc_irq = -EINVAL;
+	rc = of_irq_get_byname(child, "ibb-sc-err");
+	if (rc < 0)
+		pr_debug("Unable to get ibb-sc-err, rc = %d\n", rc);
+	else
+		labibb->ibb_vreg.ibb_sc_irq = rc;
+
+	return 0;
+}
+
+static int qpnp_labibb_check_ttw_supported(struct qpnp_labibb *labibb)
+{
+	int rc = 0;
+	u8 val;
+
+	switch (labibb->pmic_rev_id->pmic_subtype) {
+	case PMI8996_SUBTYPE:
+		rc = qpnp_labibb_read(labibb, labibb->ibb_base +
+					REG_IBB_REVISION4, &val, 1);
+		if (rc < 0) {
+			pr_err("qpnp_labibb_read register %x failed rc = %d\n",
+				REG_IBB_REVISION4, rc);
+			return rc;
+		}
+
+		/* PMI8996 has revision 1 */
+		if (val < 1) {
+			pr_err("TTW feature cannot be enabled for revision %d\n",
+									val);
+			labibb->ttw_en = false;
+		}
+		/* FORCE_LAB_ON in TTW is not required for PMI8996 */
+		labibb->ttw_force_lab_on = false;
+		break;
+	case PMI8950_SUBTYPE:
+		/* TTW supported for all revisions */
+		break;
+	default:
+		pr_info("TTW mode not supported for PMIC-subtype = %d\n",
+					labibb->pmic_rev_id->pmic_subtype);
+		labibb->ttw_en = false;
+		break;
+
+	}
+	return rc;
+}
+
+static int qpnp_labibb_regulator_probe(struct platform_device *pdev)
+{
+	struct qpnp_labibb *labibb;
+	unsigned int base;
+	struct device_node *child, *revid_dev_node;
+	const char *mode_name;
+	u8 type, revision;
+	int rc = 0;
+
+	labibb = devm_kzalloc(&pdev->dev, sizeof(*labibb), GFP_KERNEL);
+	if (labibb == NULL)
+		return -ENOMEM;
+
+	labibb->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!labibb->regmap) {
+		dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+		return -EINVAL;
+	}
+
+	labibb->dev = &(pdev->dev);
+	labibb->pdev = pdev;
+
+	mutex_init(&(labibb->lab_vreg.lab_mutex));
+	mutex_init(&(labibb->ibb_vreg.ibb_mutex));
+	mutex_init(&(labibb->bus_mutex));
+
+	revid_dev_node = of_parse_phandle(labibb->dev->of_node,
+					"qcom,pmic-revid", 0);
+	if (!revid_dev_node) {
+		pr_err("Missing qcom,pmic-revid property - driver failed\n");
+		return -EINVAL;
+	}
+
+	labibb->pmic_rev_id = get_revid_data(revid_dev_node);
+	if (IS_ERR(labibb->pmic_rev_id)) {
+		pr_debug("Unable to get revid data\n");
+		return -EPROBE_DEFER;
+	}
+
+	if (labibb->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE) {
+		labibb->ibb_ver_ops = &ibb_ops_v2;
+		labibb->lab_ver_ops = &pm660_lab_ops;
+	} else if (labibb->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE) {
+		labibb->ibb_ver_ops = &ibb_ops_v1;
+		labibb->lab_ver_ops = &pmi8998_lab_ops;
+	} else {
+		labibb->ibb_ver_ops = &ibb_ops_v1;
+		labibb->lab_ver_ops = &lab_ops_v1;
+	}
+
+	if (labibb->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE) {
+		labibb->mode = QPNP_LABIBB_AMOLED_MODE;
+		/* Enable polling for LAB short circuit detection for PM660A */
+		labibb->detect_lab_sc = true;
+	} else {
+		rc = of_property_read_string(labibb->dev->of_node,
+				"qcom,qpnp-labibb-mode", &mode_name);
+		if (!rc) {
+			if (strcmp("lcd", mode_name) == 0) {
+				labibb->mode = QPNP_LABIBB_LCD_MODE;
+			} else if (strcmp("amoled", mode_name) == 0) {
+				labibb->mode = QPNP_LABIBB_AMOLED_MODE;
+			} else {
+				pr_err("Invalid device property in qcom,qpnp-labibb-mode: %s\n",
+					mode_name);
+				return -EINVAL;
+			}
+		} else {
+			pr_err("qpnp_labibb: qcom,qpnp-labibb-mode is missing.\n");
+			return rc;
+		}
+	}
+
+	labibb->standalone = of_property_read_bool(labibb->dev->of_node,
+				"qcom,labibb-standalone");
+
+	labibb->ttw_en = of_property_read_bool(labibb->dev->of_node,
+				"qcom,labibb-touch-to-wake-en");
+	if (labibb->ttw_en && labibb->mode != QPNP_LABIBB_LCD_MODE) {
+		pr_err("Invalid mode for TTW\n");
+		return -EINVAL;
+	}
+
+	labibb->ttw_force_lab_on = of_property_read_bool(
+		labibb->dev->of_node, "qcom,labibb-ttw-force-lab-on");
+
+	labibb->swire_control = of_property_read_bool(labibb->dev->of_node,
+							"qcom,swire-control");
+
+	labibb->pbs_control = of_property_read_bool(labibb->dev->of_node,
+							"qcom,pbs-control");
+	if (labibb->swire_control && labibb->mode != QPNP_LABIBB_AMOLED_MODE) {
+		pr_err("Invalid mode for SWIRE control\n");
+		return -EINVAL;
+	}
+
+	if (labibb->swire_control) {
+		labibb->skip_2nd_swire_cmd =
+				of_property_read_bool(labibb->dev->of_node,
+				"qcom,skip-2nd-swire-cmd");
+
+		rc = of_property_read_u32(labibb->dev->of_node,
+				"qcom,swire-2nd-cmd-delay",
+				&labibb->swire_2nd_cmd_delay);
+		if (rc < 0)
+			labibb->swire_2nd_cmd_delay =
+					SWIRE_DEFAULT_2ND_CMD_DLY_MS;
+
+		rc = of_property_read_u32(labibb->dev->of_node,
+				"qcom,swire-ibb-ps-enable-delay",
+				&labibb->swire_ibb_ps_enable_delay);
+		if (rc < 0)
+			labibb->swire_ibb_ps_enable_delay =
+					SWIRE_DEFAULT_IBB_PS_ENABLE_DLY_MS;
+	}
+
+	if (of_get_available_child_count(pdev->dev.of_node) == 0) {
+		pr_err("no child nodes\n");
+		return -ENXIO;
+	}
+
+	for_each_available_child_of_node(pdev->dev.of_node, child) {
+		rc = of_property_read_u32(child, "reg", &base);
+		if (rc < 0) {
+			dev_err(&pdev->dev,
+				"Couldn't find reg in node = %s rc = %d\n",
+				child->full_name, rc);
+			return rc;
+		}
+
+		rc = qpnp_labibb_read(labibb, base + REG_REVISION_2,
+					 &revision, 1);
+		if (rc < 0) {
+			pr_err("Reading REVISION_2 failed rc=%d\n", rc);
+			goto fail_registration;
+		}
+
+		rc = qpnp_labibb_read(labibb, base + REG_PERPH_TYPE,
+					&type, 1);
+		if (rc < 0) {
+			pr_err("Peripheral type read failed rc=%d\n", rc);
+			goto fail_registration;
+		}
+
+		switch (type) {
+		case QPNP_LAB_TYPE:
+			labibb->lab_base = base;
+			labibb->lab_dig_major = revision;
+			rc = qpnp_lab_register_irq(child, labibb);
+			if (rc) {
+				pr_err("Failed to register LAB IRQ rc=%d\n",
+							rc);
+				goto fail_registration;
+			}
+			rc = register_qpnp_lab_regulator(labibb, child);
+			if (rc < 0)
+				goto fail_registration;
+		break;
+
+		case QPNP_IBB_TYPE:
+			labibb->ibb_base = base;
+			labibb->ibb_dig_major = revision;
+			qpnp_ibb_register_irq(child, labibb);
+			rc = register_qpnp_ibb_regulator(labibb, child);
+			if (rc < 0)
+				goto fail_registration;
+		break;
+
+		default:
+			pr_err("qpnp_labibb: unknown peripheral type %x\n",
+				type);
+			rc = -EINVAL;
+			goto fail_registration;
+		}
+	}
+
+	if (labibb->ttw_en) {
+		rc = qpnp_labibb_check_ttw_supported(labibb);
+		if (rc < 0) {
+			pr_err("pmic revision check failed for TTW rc=%d\n",
+									rc);
+			goto fail_registration;
+		}
+	}
+
+	INIT_WORK(&labibb->lab_vreg_ok_work, qpnp_lab_vreg_notifier_work);
+	INIT_DELAYED_WORK(&labibb->sc_err_recovery_work,
+			labibb_sc_err_recovery_work);
+	hrtimer_init(&labibb->sc_err_check_timer,
+			CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	labibb->sc_err_check_timer.function = labibb_check_sc_err_count;
+	dev_set_drvdata(&pdev->dev, labibb);
+	pr_info("LAB/IBB registered successfully, lab_vreg enable=%d ibb_vreg enable=%d swire_control=%d\n",
+						labibb->lab_vreg.vreg_enabled,
+						labibb->ibb_vreg.vreg_enabled,
+						labibb->swire_control);
+
+	return 0;
+
+fail_registration:
+	if (labibb->lab_vreg.rdev)
+		regulator_unregister(labibb->lab_vreg.rdev);
+	if (labibb->ibb_vreg.rdev)
+		regulator_unregister(labibb->ibb_vreg.rdev);
+
+	return rc;
+}
+
+int qpnp_labibb_notifier_register(struct notifier_block *nb)
+{
+	return raw_notifier_chain_register(&labibb_notifier, nb);
+}
+EXPORT_SYMBOL(qpnp_labibb_notifier_register);
+
+int qpnp_labibb_notifier_unregister(struct notifier_block *nb)
+{
+	return raw_notifier_chain_unregister(&labibb_notifier, nb);
+}
+EXPORT_SYMBOL(qpnp_labibb_notifier_unregister);
+
+static int qpnp_labibb_regulator_remove(struct platform_device *pdev)
+{
+	struct qpnp_labibb *labibb = dev_get_drvdata(&pdev->dev);
+
+	if (labibb) {
+		if (labibb->lab_vreg.rdev)
+			regulator_unregister(labibb->lab_vreg.rdev);
+		if (labibb->ibb_vreg.rdev)
+			regulator_unregister(labibb->ibb_vreg.rdev);
+
+		cancel_work_sync(&labibb->lab_vreg_ok_work);
+	}
+	return 0;
+}
+
+static const struct of_device_id spmi_match_table[] = {
+	{ .compatible = QPNP_LABIBB_REGULATOR_DRIVER_NAME, },
+	{ },
+};
+
+static struct platform_driver qpnp_labibb_regulator_driver = {
+	.driver		= {
+		.name		= QPNP_LABIBB_REGULATOR_DRIVER_NAME,
+		.of_match_table	= spmi_match_table,
+	},
+	.probe		= qpnp_labibb_regulator_probe,
+	.remove		= qpnp_labibb_regulator_remove,
+};
+
+static int __init qpnp_labibb_regulator_init(void)
+{
+	return platform_driver_register(&qpnp_labibb_regulator_driver);
+}
+arch_initcall(qpnp_labibb_regulator_init);
+
+static void __exit qpnp_labibb_regulator_exit(void)
+{
+	platform_driver_unregister(&qpnp_labibb_regulator_driver);
+}
+module_exit(qpnp_labibb_regulator_exit);
+
+MODULE_DESCRIPTION("QPNP labibb driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/regulator/qpnp-lcdb-regulator.c	2019-01-22 16:16:26.275271509 +0100
@@ -0,0 +1,1952 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"LCDB: %s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/ktime.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/machine.h>
+#include <linux/qpnp/qpnp-revid.h>
+
+#define QPNP_LCDB_REGULATOR_DRIVER_NAME		"qcom,qpnp-lcdb-regulator"
+
+/* LCDB */
+#define LCDB_STS1_REG			0x08
+
+#define INT_RT_STATUS_REG		0x10
+#define VREG_OK_RT_STS_BIT		BIT(0)
+#define SC_ERROR_RT_STS_BIT		BIT(1)
+
+#define LCDB_STS3_REG			0x0A
+#define LDO_VREG_OK_BIT			BIT(7)
+
+#define LCDB_STS4_REG			0x0B
+#define NCP_VREG_OK_BIT			BIT(7)
+
+#define LCDB_AUTO_TOUCH_WAKE_CTL_REG	0x40
+#define EN_AUTO_TOUCH_WAKE_BIT		BIT(7)
+#define ATTW_TOFF_TIME_MASK		GENMASK(3, 2)
+#define ATTW_TON_TIME_MASK		GENMASK(1, 0)
+#define ATTW_TOFF_TIME_SHIFT		2
+#define ATTW_MIN_MS			4
+#define ATTW_MAX_MS			32
+
+#define LCDB_BST_OUTPUT_VOLTAGE_REG	0x41
+
+#define LCDB_MODULE_RDY_REG		0x45
+#define MODULE_RDY_BIT			BIT(7)
+
+#define LCDB_ENABLE_CTL1_REG		0x46
+#define MODULE_EN_BIT			BIT(7)
+#define HWEN_RDY_BIT			BIT(6)
+
+/* BST */
+#define LCDB_BST_PD_CTL_REG		0x47
+#define BOOST_DIS_PULLDOWN_BIT		BIT(1)
+#define BOOST_PD_STRENGTH_BIT		BIT(0)
+
+#define LCDB_BST_ILIM_CTL_REG		0x4B
+#define EN_BST_ILIM_BIT			BIT(7)
+#define SET_BST_ILIM_MASK		GENMASK(2, 0)
+#define MIN_BST_ILIM_MA			200
+#define MAX_BST_ILIM_MA			1600
+
+#define LCDB_PS_CTL_REG			0x50
+#define EN_PS_BIT			BIT(7)
+#define PS_THRESHOLD_MASK		GENMASK(1, 0)
+#define MIN_BST_PS_MA			50
+#define MAX_BST_PS_MA			80
+
+#define LCDB_RDSON_MGMNT_REG		0x53
+#define NFET_SW_SIZE_MASK		GENMASK(3, 2)
+#define NFET_SW_SIZE_SHIFT		2
+#define PFET_SW_SIZE_MASK		GENMASK(1, 0)
+
+#define LCDB_BST_VREG_OK_CTL_REG	0x55
+#define BST_VREG_OK_DEB_MASK		GENMASK(1, 0)
+
+#define LCDB_SOFT_START_CTL_REG		0x5F
+
+#define LCDB_MISC_CTL_REG		0x60
+#define AUTO_GM_EN_BIT			BIT(4)
+#define EN_TOUCH_WAKE_BIT		BIT(3)
+#define DIS_SCP_BIT			BIT(0)
+
+#define LCDB_PFM_CTL_REG		0x62
+#define EN_PFM_BIT			BIT(7)
+#define BYP_BST_SOFT_START_COMP_BIT	BIT(0)
+#define PFM_HYSTERESIS_SHIFT		4
+#define PFM_CURRENT_SHIFT		2
+
+#define LCDB_PWRUP_PWRDN_CTL_REG	0x66
+
+/* LDO */
+#define LCDB_LDO_OUTPUT_VOLTAGE_REG	0x71
+#define SET_OUTPUT_VOLTAGE_MASK		GENMASK(4, 0)
+
+#define LCDB_LDO_VREG_OK_CTL_REG	0x75
+#define VREG_OK_DEB_MASK		GENMASK(1, 0)
+
+#define LCDB_LDO_PD_CTL_REG		0x77
+#define LDO_DIS_PULLDOWN_BIT		BIT(1)
+#define LDO_PD_STRENGTH_BIT		BIT(0)
+
+#define LCDB_LDO_ILIM_CTL1_REG		0x7B
+#define EN_LDO_ILIM_BIT			BIT(7)
+#define SET_LDO_ILIM_MASK		GENMASK(2, 0)
+#define MIN_LDO_ILIM_MA			110
+#define MAX_LDO_ILIM_MA			460
+#define LDO_ILIM_STEP_MA		50
+
+#define LCDB_LDO_ILIM_CTL2_REG		0x7C
+
+#define LCDB_LDO_SOFT_START_CTL_REG	0x7F
+#define SOFT_START_MASK			GENMASK(1, 0)
+
+/* NCP */
+#define LCDB_NCP_OUTPUT_VOLTAGE_REG	0x81
+
+#define LCDB_NCP_VREG_OK_CTL_REG	0x85
+
+#define LCDB_NCP_PD_CTL_REG		0x87
+#define NCP_DIS_PULLDOWN_BIT		BIT(1)
+#define NCP_PD_STRENGTH_BIT		BIT(0)
+
+#define LCDB_NCP_ILIM_CTL1_REG		0x8B
+#define EN_NCP_ILIM_BIT			BIT(7)
+#define SET_NCP_ILIM_MASK		GENMASK(1, 0)
+#define MIN_NCP_ILIM_MA			260
+#define MAX_NCP_ILIM_MA			810
+
+#define LCDB_NCP_ILIM_CTL2_REG		0x8C
+
+#define LCDB_NCP_SOFT_START_CTL_REG	0x8F
+
+/* common for BST/NCP/LDO */
+#define MIN_DBC_US			2
+#define MAX_DBC_US			32
+
+#define MIN_SOFT_START_US		0
+#define MAX_SOFT_START_US		2000
+
+#define BST_HEADROOM_DEFAULT_MV		200
+
+struct ldo_regulator {
+	struct regulator_desc		rdesc;
+	struct regulator_dev		*rdev;
+	struct device_node		*node;
+
+	/* LDO DT params */
+	int				pd;
+	int				pd_strength;
+	int				ilim_ma;
+	int				soft_start_us;
+	int				vreg_ok_dbc_us;
+	int				voltage_mv;
+};
+
+struct ncp_regulator {
+	struct regulator_desc		rdesc;
+	struct regulator_dev		*rdev;
+	struct device_node		*node;
+
+	/* NCP DT params */
+	int				pd;
+	int				pd_strength;
+	int				ilim_ma;
+	int				soft_start_us;
+	int				vreg_ok_dbc_us;
+	int				voltage_mv;
+};
+
+struct bst_params {
+	struct device_node		*node;
+
+	/* BST DT params */
+	int				pd;
+	int				pd_strength;
+	int				ilim_ma;
+	int				ps;
+	int				ps_threshold;
+	int				soft_start_us;
+	int				vreg_ok_dbc_us;
+	int				voltage_mv;
+	u16				headroom_mv;
+};
+
+struct qpnp_lcdb {
+	struct device			*dev;
+	struct platform_device		*pdev;
+	struct regmap			*regmap;
+	struct pmic_revid_data		*pmic_rev_id;
+	u32				base;
+	u32				wa_flags;
+	int				sc_irq;
+
+	/* TTW params */
+	bool				ttw_enable;
+	bool				ttw_mode_sw;
+
+	/* status parameters */
+	bool				lcdb_enabled;
+	bool				settings_saved;
+	bool				lcdb_sc_disable;
+	int				sc_count;
+	ktime_t				sc_module_enable_time;
+
+	struct mutex			lcdb_mutex;
+	struct mutex			read_write_mutex;
+	struct bst_params		bst;
+	struct ldo_regulator		ldo;
+	struct ncp_regulator		ncp;
+};
+
+struct settings {
+	u16	address;
+	u8	value;
+	bool	sec_access;
+};
+
+enum lcdb_module {
+	LDO,
+	NCP,
+	BST,
+};
+
+enum pfm_hysteresis {
+	PFM_HYST_15MV,
+	PFM_HYST_25MV,
+	PFM_HYST_35MV,
+	PFM_HYST_45MV,
+};
+
+enum pfm_peak_current {
+	PFM_PEAK_CURRENT_300MA,
+	PFM_PEAK_CURRENT_400MA,
+	PFM_PEAK_CURRENT_500MA,
+	PFM_PEAK_CURRENT_600MA,
+};
+
+enum rdson_fet_size {
+	RDSON_QUARTER,
+	RDSON_HALF,
+	RDSON_THREE_FOURTH,
+	RDSON_FULLSIZE,
+};
+
+enum lcdb_settings_index {
+	LCDB_BST_PD_CTL = 0,
+	LCDB_RDSON_MGMNT,
+	LCDB_MISC_CTL,
+	LCDB_SOFT_START_CTL,
+	LCDB_PFM_CTL,
+	LCDB_PWRUP_PWRDN_CTL,
+	LCDB_LDO_PD_CTL,
+	LCDB_LDO_SOFT_START_CTL,
+	LCDB_NCP_PD_CTL,
+	LCDB_NCP_SOFT_START_CTL,
+	LCDB_SETTING_MAX,
+};
+
+enum lcdb_wa_flags {
+	NCP_SCP_DISABLE_WA = BIT(0),
+};
+
+static u32 soft_start_us[] = {
+	0,
+	500,
+	1000,
+	2000,
+};
+
+static u32 dbc_us[] = {
+	2,
+	4,
+	16,
+	32,
+};
+
+static u32 ncp_ilim_ma[] = {
+	260,
+	460,
+	640,
+	810,
+};
+
+#define SETTING(_id, _sec_access)		\
+	[_id] = {				\
+		.address = _id##_REG,		\
+		.sec_access = _sec_access,	\
+	}					\
+
+static bool is_between(int value, int min, int max)
+{
+	if (value < min || value > max)
+		return false;
+	return true;
+}
+
+static int qpnp_lcdb_read(struct qpnp_lcdb *lcdb,
+			u16 addr, u8 *value, u8 count)
+{
+	int rc = 0;
+
+	mutex_lock(&lcdb->read_write_mutex);
+	rc = regmap_bulk_read(lcdb->regmap, addr, value, count);
+	if (rc < 0)
+		pr_err("Failed to read from addr=0x%02x rc=%d\n", addr, rc);
+	mutex_unlock(&lcdb->read_write_mutex);
+
+	return rc;
+}
+
+static int qpnp_lcdb_write(struct qpnp_lcdb *lcdb,
+			u16 addr, u8 *value, u8 count)
+{
+	int rc;
+
+	mutex_lock(&lcdb->read_write_mutex);
+	rc = regmap_bulk_write(lcdb->regmap, addr, value, count);
+	if (rc < 0)
+		pr_err("Failed to write to addr=0x%02x rc=%d\n", addr, rc);
+	mutex_unlock(&lcdb->read_write_mutex);
+
+	return rc;
+}
+
+#define SEC_ADDRESS_REG			0xD0
+#define SECURE_UNLOCK_VALUE		0xA5
+static int qpnp_lcdb_secure_write(struct qpnp_lcdb *lcdb,
+					u16 addr, u8 value)
+{
+	int rc;
+	u8 val = SECURE_UNLOCK_VALUE;
+
+	mutex_lock(&lcdb->read_write_mutex);
+	rc = regmap_write(lcdb->regmap, lcdb->base + SEC_ADDRESS_REG, val);
+	if (rc < 0) {
+		pr_err("Failed to unlock register rc=%d\n", rc);
+		goto fail_write;
+	}
+	rc = regmap_write(lcdb->regmap, addr, value);
+	if (rc < 0)
+		pr_err("Failed to write to addr=0x%02x rc=%d\n", addr, rc);
+
+fail_write:
+	mutex_unlock(&lcdb->read_write_mutex);
+	return rc;
+}
+
+static int qpnp_lcdb_masked_write(struct qpnp_lcdb *lcdb,
+				u16 addr, u8 mask, u8 value)
+{
+	int rc = 0;
+
+	mutex_lock(&lcdb->read_write_mutex);
+	rc = regmap_update_bits(lcdb->regmap, addr, mask, value);
+	if (rc < 0)
+		pr_err("Failed to write addr=0x%02x value=0x%02x rc=%d\n",
+			addr, value, rc);
+	mutex_unlock(&lcdb->read_write_mutex);
+
+	return rc;
+}
+
+static bool is_lcdb_enabled(struct qpnp_lcdb *lcdb)
+{
+	int rc;
+	u8 val = 0;
+
+	rc = qpnp_lcdb_read(lcdb, lcdb->base + LCDB_ENABLE_CTL1_REG, &val, 1);
+	if (rc < 0)
+		pr_err("Failed to read ENABLE_CTL1 rc=%d\n", rc);
+
+	return rc ? false : !!(val & MODULE_EN_BIT);
+}
+
+static int dump_status_registers(struct qpnp_lcdb *lcdb)
+{
+	int rc = 0;
+	u8 sts[6] = {0};
+
+	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_STS1_REG, &sts[0], 6);
+	if (rc < 0) {
+		pr_err("Failed to write to STS registers rc=%d\n", rc);
+	} else {
+		rc = qpnp_lcdb_read(lcdb, lcdb->base + LCDB_STS1_REG, sts, 6);
+		if (rc < 0)
+			pr_err("Failed to read lcdb status rc=%d\n", rc);
+		else
+			pr_err("STS1=0x%02x STS2=0x%02x STS3=0x%02x STS4=0x%02x STS5=0x%02x, STS6=0x%02x\n",
+				sts[0], sts[1], sts[2], sts[3], sts[4], sts[5]);
+	}
+
+	return rc;
+}
+
+static struct settings lcdb_settings[] = {
+	SETTING(LCDB_BST_PD_CTL, false),
+	SETTING(LCDB_RDSON_MGMNT, false),
+	SETTING(LCDB_MISC_CTL, false),
+	SETTING(LCDB_SOFT_START_CTL, false),
+	SETTING(LCDB_PFM_CTL, false),
+	SETTING(LCDB_PWRUP_PWRDN_CTL, true),
+	SETTING(LCDB_LDO_PD_CTL, false),
+	SETTING(LCDB_LDO_SOFT_START_CTL, false),
+	SETTING(LCDB_NCP_PD_CTL, false),
+	SETTING(LCDB_NCP_SOFT_START_CTL, false),
+};
+
+static int qpnp_lcdb_save_settings(struct qpnp_lcdb *lcdb)
+{
+	int i, rc = 0;
+
+	for (i = 0; i < ARRAY_SIZE(lcdb_settings); i++) {
+		rc = qpnp_lcdb_read(lcdb, lcdb->base +
+				lcdb_settings[i].address,
+				&lcdb_settings[i].value, 1);
+		if (rc < 0) {
+			pr_err("Failed to read lcdb register address=%x\n",
+						lcdb_settings[i].address);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int qpnp_lcdb_restore_settings(struct qpnp_lcdb *lcdb)
+{
+	int i, rc = 0;
+
+	for (i = 0; i < ARRAY_SIZE(lcdb_settings); i++) {
+		if (lcdb_settings[i].sec_access)
+			rc = qpnp_lcdb_secure_write(lcdb, lcdb->base +
+					lcdb_settings[i].address,
+					lcdb_settings[i].value);
+		else
+			rc = qpnp_lcdb_write(lcdb, lcdb->base +
+					lcdb_settings[i].address,
+					&lcdb_settings[i].value, 1);
+		if (rc < 0) {
+			pr_err("Failed to write register address=%x\n",
+						lcdb_settings[i].address);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int qpnp_lcdb_ttw_enter(struct qpnp_lcdb *lcdb)
+{
+	int rc;
+	u8 val;
+
+	if (!lcdb->settings_saved) {
+		rc = qpnp_lcdb_save_settings(lcdb);
+		if (rc < 0) {
+			pr_err("Failed to save LCDB settings rc=%d\n", rc);
+			return rc;
+		}
+		lcdb->settings_saved = true;
+	}
+
+	val = BOOST_DIS_PULLDOWN_BIT;
+	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_BST_PD_CTL_REG,
+							&val, 1);
+	if (rc < 0) {
+		pr_err("Failed to set BST PD rc=%d\n", rc);
+		return rc;
+	}
+
+	val = (RDSON_HALF << NFET_SW_SIZE_SHIFT) | RDSON_HALF;
+	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_RDSON_MGMNT_REG,
+							&val, 1);
+	if (rc < 0) {
+		pr_err("Failed to set RDSON MGMT rc=%d\n", rc);
+		return rc;
+	}
+
+	val = AUTO_GM_EN_BIT | EN_TOUCH_WAKE_BIT | DIS_SCP_BIT;
+	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_MISC_CTL_REG,
+							&val, 1);
+	if (rc < 0) {
+		pr_err("Failed to set MISC CTL rc=%d\n", rc);
+		return rc;
+	}
+
+	val = 0;
+	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_SOFT_START_CTL_REG,
+							&val, 1);
+	if (rc < 0) {
+		pr_err("Failed to set LCDB_SOFT_START rc=%d\n", rc);
+		return rc;
+	}
+
+	val = EN_PFM_BIT | (PFM_HYST_25MV << PFM_HYSTERESIS_SHIFT) |
+		     (PFM_PEAK_CURRENT_400MA << PFM_CURRENT_SHIFT) |
+				BYP_BST_SOFT_START_COMP_BIT;
+	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_PFM_CTL_REG,
+							&val, 1);
+	if (rc < 0) {
+		pr_err("Failed to set PFM_CTL rc=%d\n", rc);
+		return rc;
+	}
+
+	val = 0;
+	rc = qpnp_lcdb_secure_write(lcdb, lcdb->base + LCDB_PWRUP_PWRDN_CTL_REG,
+							val);
+	if (rc < 0) {
+		pr_err("Failed to set PWRUP_PWRDN_CTL rc=%d\n", rc);
+		return rc;
+	}
+
+	val = LDO_DIS_PULLDOWN_BIT;
+	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_LDO_PD_CTL_REG,
+							&val, 1);
+	if (rc < 0) {
+		pr_err("Failed to set LDO_PD_CTL rc=%d\n", rc);
+		return rc;
+	}
+
+	val = 0;
+	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_LDO_SOFT_START_CTL_REG,
+							&val, 1);
+	if (rc < 0) {
+		pr_err("Failed to set LDO_SOFT_START rc=%d\n", rc);
+		return rc;
+	}
+
+	val = NCP_DIS_PULLDOWN_BIT;
+	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_NCP_PD_CTL_REG,
+							&val, 1);
+	if (rc < 0) {
+		pr_err("Failed to set NCP_PD_CTL rc=%d\n", rc);
+		return rc;
+	}
+
+	val = 0;
+	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_NCP_SOFT_START_CTL_REG,
+							&val, 1);
+	if (rc < 0) {
+		pr_err("Failed to set NCP_SOFT_START rc=%d\n", rc);
+		return rc;
+	}
+
+	if (lcdb->ttw_mode_sw) {
+		rc = qpnp_lcdb_masked_write(lcdb, lcdb->base +
+				LCDB_AUTO_TOUCH_WAKE_CTL_REG,
+				EN_AUTO_TOUCH_WAKE_BIT,
+				EN_AUTO_TOUCH_WAKE_BIT);
+		if (rc < 0)
+			pr_err("Failed to enable auto(sw) TTW\n rc = %d\n", rc);
+	} else {
+		val = HWEN_RDY_BIT;
+		rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_ENABLE_CTL1_REG,
+							&val, 1);
+		if (rc < 0)
+			pr_err("Failed to hw_enable lcdb rc= %d\n", rc);
+	}
+
+	return rc;
+}
+
+static int qpnp_lcdb_ttw_exit(struct qpnp_lcdb *lcdb)
+{
+	int rc;
+
+	if (lcdb->settings_saved) {
+		rc = qpnp_lcdb_restore_settings(lcdb);
+		if (rc < 0) {
+			pr_err("Failed to restore lcdb settings rc=%d\n", rc);
+			return rc;
+		}
+		lcdb->settings_saved = false;
+	}
+
+	return 0;
+}
+
+static int qpnp_lcdb_enable_wa(struct qpnp_lcdb *lcdb)
+{
+	int rc;
+	u8 val = 0;
+
+	/* required only for PM660L */
+	if (lcdb->pmic_rev_id->pmic_subtype != PM660L_SUBTYPE)
+		return 0;
+
+	val = MODULE_EN_BIT;
+	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_ENABLE_CTL1_REG,
+						&val, 1);
+	if (rc < 0) {
+		pr_err("Failed to enable lcdb rc= %d\n", rc);
+		return rc;
+	}
+
+	val = 0;
+	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_ENABLE_CTL1_REG,
+							&val, 1);
+	if (rc < 0) {
+		pr_err("Failed to disable lcdb rc= %d\n", rc);
+		return rc;
+	}
+
+	if (lcdb->wa_flags & NCP_SCP_DISABLE_WA) {
+		/*
+		 * delay to make sure that the MID pin â€“ ie the
+		 * output of the LCDB boost â€“ returns to 0V
+		 * after the module is disabled
+		 */
+		usleep_range(10000, 10100);
+
+		rc = qpnp_lcdb_masked_write(lcdb,
+				lcdb->base + LCDB_MISC_CTL_REG,
+				DIS_SCP_BIT, DIS_SCP_BIT);
+		if (rc < 0) {
+			pr_err("Failed to disable SC rc=%d\n", rc);
+			return rc;
+		}
+		/* delay for SC-disable to take effect */
+		usleep_range(1000, 1100);
+
+		rc = qpnp_lcdb_masked_write(lcdb,
+				lcdb->base + LCDB_MISC_CTL_REG,
+				DIS_SCP_BIT, 0);
+		if (rc < 0) {
+			pr_err("Failed to enable SC rc=%d\n", rc);
+			return rc;
+		}
+		/* delay for SC-enable to take effect */
+		usleep_range(1000, 1100);
+	}
+
+	return 0;
+}
+
+static int qpnp_lcdb_enable(struct qpnp_lcdb *lcdb)
+{
+	int rc = 0, timeout, delay;
+	u8 val = 0;
+
+	if (lcdb->lcdb_enabled || lcdb->lcdb_sc_disable) {
+		pr_debug("lcdb_enabled=%d lcdb_sc_disable=%d\n",
+			lcdb->lcdb_enabled, lcdb->lcdb_sc_disable);
+		return 0;
+	}
+
+	if (lcdb->ttw_enable) {
+		rc = qpnp_lcdb_ttw_exit(lcdb);
+		if (rc < 0) {
+			pr_err("Failed to exit TTW mode rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	rc = qpnp_lcdb_enable_wa(lcdb);
+	if (rc < 0) {
+		pr_err("Failed to execute enable_wa rc=%d\n", rc);
+		return rc;
+	}
+
+	val = MODULE_EN_BIT;
+	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_ENABLE_CTL1_REG,
+							&val, 1);
+	if (rc < 0) {
+		pr_err("Failed to disable lcdb rc= %d\n", rc);
+		goto fail_enable;
+	}
+
+	/* poll for vreg_ok */
+	timeout = 10;
+	delay = lcdb->bst.soft_start_us + lcdb->ldo.soft_start_us +
+					lcdb->ncp.soft_start_us;
+	delay += lcdb->bst.vreg_ok_dbc_us + lcdb->ldo.vreg_ok_dbc_us +
+					lcdb->ncp.vreg_ok_dbc_us;
+	while (timeout--) {
+		rc = qpnp_lcdb_read(lcdb, lcdb->base + INT_RT_STATUS_REG,
+								&val, 1);
+		if (rc < 0) {
+			pr_err("Failed to poll for vreg-ok status rc=%d\n", rc);
+			break;
+		}
+		if (val & VREG_OK_RT_STS_BIT)
+			break;
+
+		usleep_range(delay, delay + 100);
+	}
+
+	if (rc || !timeout) {
+		if (!timeout) {
+			pr_err("lcdb-vreg-ok status failed to change\n");
+			rc = -ETIMEDOUT;
+		}
+		goto fail_enable;
+	}
+
+	lcdb->lcdb_enabled = true;
+	pr_debug("lcdb enabled successfully!\n");
+
+	return 0;
+
+fail_enable:
+	dump_status_registers(lcdb);
+	pr_err("Failed to enable lcdb rc=%d\n", rc);
+	return rc;
+}
+
+static int qpnp_lcdb_disable(struct qpnp_lcdb *lcdb)
+{
+	int rc = 0;
+	u8 val;
+
+	if (!lcdb->lcdb_enabled)
+		return 0;
+
+	if (lcdb->ttw_enable) {
+		rc = qpnp_lcdb_ttw_enter(lcdb);
+		if (rc < 0) {
+			pr_err("Failed to enable TTW mode rc=%d\n", rc);
+			return rc;
+		}
+		lcdb->lcdb_enabled = false;
+
+		return 0;
+	}
+
+	val = 0;
+	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_ENABLE_CTL1_REG,
+							&val, 1);
+	if (rc < 0)
+		pr_err("Failed to disable lcdb rc= %d\n", rc);
+	else
+		lcdb->lcdb_enabled = false;
+
+	return rc;
+}
+
+#define LCDB_SC_RESET_CNT_DLY_US	1000000
+#define LCDB_SC_CNT_MAX			10
+static int qpnp_lcdb_handle_sc_event(struct qpnp_lcdb *lcdb)
+{
+	int rc = 0;
+	s64 elapsed_time_us;
+
+	mutex_lock(&lcdb->lcdb_mutex);
+	rc = qpnp_lcdb_disable(lcdb);
+	if (rc < 0) {
+		pr_err("Failed to disable lcdb rc=%d\n", rc);
+		goto unlock_mutex;
+	}
+
+	/* Check if the SC re-occurred immediately */
+	elapsed_time_us = ktime_us_delta(ktime_get(),
+			lcdb->sc_module_enable_time);
+	if (elapsed_time_us > LCDB_SC_RESET_CNT_DLY_US) {
+		lcdb->sc_count = 0;
+	} else if (lcdb->sc_count > LCDB_SC_CNT_MAX) {
+		pr_err("SC trigged %d times, disabling LCDB forever!\n",
+						lcdb->sc_count);
+		lcdb->lcdb_sc_disable = true;
+		goto unlock_mutex;
+	}
+	lcdb->sc_count++;
+	lcdb->sc_module_enable_time = ktime_get();
+
+	/* delay for SC to clear */
+	usleep_range(10000, 10100);
+
+	rc = qpnp_lcdb_enable(lcdb);
+	if (rc < 0)
+		pr_err("Failed to enable lcdb rc=%d\n", rc);
+
+unlock_mutex:
+	mutex_unlock(&lcdb->lcdb_mutex);
+	return rc;
+}
+
+static irqreturn_t qpnp_lcdb_sc_irq_handler(int irq, void *data)
+{
+	struct qpnp_lcdb *lcdb = data;
+	int rc;
+	u8 val, val2[2] = {0};
+
+	rc = qpnp_lcdb_read(lcdb, lcdb->base + INT_RT_STATUS_REG, &val, 1);
+	if (rc < 0)
+		goto irq_handled;
+
+	if (val & SC_ERROR_RT_STS_BIT) {
+		rc = qpnp_lcdb_read(lcdb,
+			lcdb->base + LCDB_MISC_CTL_REG, &val, 1);
+		if (rc < 0)
+			goto irq_handled;
+
+		if (val & EN_TOUCH_WAKE_BIT) {
+			/* blanking time */
+			usleep_range(300, 310);
+			/*
+			 * The status registers need to written with any value
+			 * before reading
+			 */
+			rc = qpnp_lcdb_write(lcdb,
+				lcdb->base + LCDB_STS3_REG, val2, 2);
+			if (rc < 0)
+				goto irq_handled;
+
+			rc = qpnp_lcdb_read(lcdb,
+				lcdb->base + LCDB_STS3_REG, val2, 2);
+			if (rc < 0)
+				goto irq_handled;
+
+			if (!(val2[0] & LDO_VREG_OK_BIT) ||
+					!(val2[1] & NCP_VREG_OK_BIT)) {
+				rc = qpnp_lcdb_handle_sc_event(lcdb);
+				if (rc < 0) {
+					pr_err("Failed to handle SC rc=%d\n",
+								rc);
+					goto irq_handled;
+				}
+			}
+		} else {
+			/* blanking time */
+			usleep_range(2000, 2100);
+			/* Read the SC status again to confirm true SC */
+			rc = qpnp_lcdb_read(lcdb,
+				lcdb->base + INT_RT_STATUS_REG, &val, 1);
+			if (rc < 0)
+				goto irq_handled;
+
+			if (val & SC_ERROR_RT_STS_BIT) {
+				rc = qpnp_lcdb_handle_sc_event(lcdb);
+				if (rc < 0) {
+					pr_err("Failed to handle SC rc=%d\n",
+								rc);
+					goto irq_handled;
+				}
+			}
+		}
+	}
+irq_handled:
+	return IRQ_HANDLED;
+}
+
+#define MIN_BST_VOLTAGE_MV			4700
+#define MAX_BST_VOLTAGE_MV			6250
+#define MIN_VOLTAGE_MV				4000
+#define MAX_VOLTAGE_MV				6000
+#define VOLTAGE_MIN_STEP_100_MV			4000
+#define VOLTAGE_MIN_STEP_50_MV			4950
+#define VOLTAGE_STEP_100_MV			100
+#define VOLTAGE_STEP_50_MV			50
+#define VOLTAGE_STEP_50MV_OFFSET		0xA
+static int qpnp_lcdb_set_bst_voltage(struct qpnp_lcdb *lcdb,
+					int voltage_mv, u8 type)
+{
+	int rc = 0;
+	u8 val = 0;
+	int bst_voltage_mv;
+	struct ldo_regulator *ldo = &lcdb->ldo;
+	struct ncp_regulator *ncp = &lcdb->ncp;
+	struct bst_params *bst = &lcdb->bst;
+
+	/* Vout_Boost = headroom_mv + max( Vout_LDO, abs (Vout_NCP)) */
+	bst_voltage_mv = max(voltage_mv, max(ldo->voltage_mv, ncp->voltage_mv));
+	bst_voltage_mv += bst->headroom_mv;
+
+	if (bst_voltage_mv < MIN_BST_VOLTAGE_MV)
+		bst_voltage_mv = MIN_BST_VOLTAGE_MV;
+	else if (bst_voltage_mv > MAX_BST_VOLTAGE_MV)
+		bst_voltage_mv = MAX_BST_VOLTAGE_MV;
+
+	if (bst_voltage_mv != bst->voltage_mv) {
+		val = DIV_ROUND_UP(bst_voltage_mv - MIN_BST_VOLTAGE_MV,
+						VOLTAGE_STEP_50_MV);
+
+		rc = qpnp_lcdb_masked_write(lcdb, lcdb->base +
+					LCDB_BST_OUTPUT_VOLTAGE_REG,
+					SET_OUTPUT_VOLTAGE_MASK, val);
+		if (rc < 0) {
+			pr_err("Failed to set boost voltage %d mv rc=%d\n",
+				bst_voltage_mv, rc);
+		} else {
+			pr_debug("Boost voltage set = %d mv (0x%02x = 0x%02x)\n",
+			      bst_voltage_mv, LCDB_BST_OUTPUT_VOLTAGE_REG, val);
+			bst->voltage_mv = bst_voltage_mv;
+		}
+	}
+
+	return rc;
+}
+
+static int qpnp_lcdb_get_bst_voltage(struct qpnp_lcdb *lcdb,
+					int *voltage_mv)
+{
+	int rc;
+	u8 val = 0;
+
+	rc = qpnp_lcdb_read(lcdb, lcdb->base + LCDB_BST_OUTPUT_VOLTAGE_REG,
+						&val, 1);
+	if (rc < 0) {
+		pr_err("Failed to reat BST voltage rc=%d\n", rc);
+		return rc;
+	}
+
+	val &= SET_OUTPUT_VOLTAGE_MASK;
+	*voltage_mv = (val * VOLTAGE_STEP_50_MV) + MIN_BST_VOLTAGE_MV;
+
+	return 0;
+}
+
+static int qpnp_lcdb_set_voltage(struct qpnp_lcdb *lcdb,
+					int voltage_mv, u8 type)
+{
+	int rc = 0;
+	u16 offset = LCDB_LDO_OUTPUT_VOLTAGE_REG;
+	u8 val = 0;
+
+	if (!is_between(voltage_mv, MIN_VOLTAGE_MV, MAX_VOLTAGE_MV)) {
+		pr_err("Invalid voltage %dmv (min=%d max=%d)\n",
+			voltage_mv, MIN_VOLTAGE_MV, MAX_VOLTAGE_MV);
+		return -EINVAL;
+	}
+
+	rc = qpnp_lcdb_set_bst_voltage(lcdb, voltage_mv, type);
+	if (rc < 0) {
+		pr_err("Failed to set boost voltage rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Below logic is only valid for LDO and NCP type */
+	if (voltage_mv < VOLTAGE_MIN_STEP_50_MV) {
+		val = DIV_ROUND_UP(voltage_mv - VOLTAGE_MIN_STEP_100_MV,
+						VOLTAGE_STEP_100_MV);
+	} else {
+		val = DIV_ROUND_UP(voltage_mv - VOLTAGE_MIN_STEP_50_MV,
+						VOLTAGE_STEP_50_MV);
+		val += VOLTAGE_STEP_50MV_OFFSET;
+	}
+
+	if (type == NCP)
+		offset = LCDB_NCP_OUTPUT_VOLTAGE_REG;
+
+	rc = qpnp_lcdb_masked_write(lcdb, lcdb->base + offset,
+				SET_OUTPUT_VOLTAGE_MASK, val);
+	if (rc < 0)
+		pr_err("Failed to set output voltage %d mv for %s rc=%d\n",
+			voltage_mv, (type == LDO) ? "LDO" : "NCP", rc);
+	else
+		pr_debug("%s voltage set = %d mv (0x%02x = 0x%02x)\n",
+			(type == LDO) ? "LDO" : "NCP", voltage_mv, offset, val);
+
+	return rc;
+}
+
+static int qpnp_lcdb_get_voltage(struct qpnp_lcdb *lcdb,
+					u32 *voltage_mv, u8 type)
+{
+	int rc = 0;
+	u16 offset = LCDB_LDO_OUTPUT_VOLTAGE_REG;
+	u8 val = 0;
+
+	if (type == BST)
+		return qpnp_lcdb_get_bst_voltage(lcdb, voltage_mv);
+
+	if (type == NCP)
+		offset = LCDB_NCP_OUTPUT_VOLTAGE_REG;
+
+	rc = qpnp_lcdb_read(lcdb, lcdb->base + offset, &val, 1);
+	if (rc < 0) {
+		pr_err("Failed to read %s volatge rc=%d\n",
+			(type == LDO) ? "LDO" : "NCP", rc);
+		return rc;
+	}
+
+	if (val < VOLTAGE_STEP_50MV_OFFSET) {
+		*voltage_mv = VOLTAGE_MIN_STEP_100_MV +
+				(val * VOLTAGE_STEP_100_MV);
+	} else {
+		*voltage_mv = VOLTAGE_MIN_STEP_50_MV +
+			((val - VOLTAGE_STEP_50MV_OFFSET) * VOLTAGE_STEP_50_MV);
+	}
+
+	if (!rc)
+		pr_debug("%s voltage read-back = %d mv (0x%02x = 0x%02x)\n",
+					(type == LDO) ? "LDO" : "NCP",
+					*voltage_mv, offset, val);
+
+	return rc;
+}
+
+static int qpnp_lcdb_set_soft_start(struct qpnp_lcdb *lcdb,
+					u32 ss_us, u8 type)
+{
+	int rc = 0, i = 0;
+	u16 offset = LCDB_LDO_SOFT_START_CTL_REG;
+	u8 val = 0;
+
+	if (type == NCP)
+		offset = LCDB_NCP_SOFT_START_CTL_REG;
+
+	if (!is_between(ss_us, MIN_SOFT_START_US, MAX_SOFT_START_US)) {
+		pr_err("Invalid soft_start_us %d (min=%d max=%d)\n",
+			ss_us, MIN_SOFT_START_US, MAX_SOFT_START_US);
+		return -EINVAL;
+	}
+
+	i = 0;
+	while (ss_us > soft_start_us[i])
+		i++;
+	val = ((i == 0) ? 0 : i - 1) & SOFT_START_MASK;
+
+	rc = qpnp_lcdb_masked_write(lcdb,
+			lcdb->base + offset, SOFT_START_MASK, val);
+	if (rc < 0)
+		pr_err("Failed to write %s soft-start time %d rc=%d",
+			(type == LDO) ? "LDO" : "NCP", soft_start_us[i], rc);
+
+	return rc;
+}
+
+static int qpnp_lcdb_ldo_regulator_enable(struct regulator_dev *rdev)
+{
+	int rc = 0;
+	struct qpnp_lcdb *lcdb  = rdev_get_drvdata(rdev);
+
+	mutex_lock(&lcdb->lcdb_mutex);
+	rc = qpnp_lcdb_enable(lcdb);
+	if (rc < 0)
+		pr_err("Failed to enable lcdb rc=%d\n", rc);
+	mutex_unlock(&lcdb->lcdb_mutex);
+
+	return rc;
+}
+
+static int qpnp_lcdb_ldo_regulator_disable(struct regulator_dev *rdev)
+{
+	int rc = 0;
+	struct qpnp_lcdb *lcdb  = rdev_get_drvdata(rdev);
+
+	mutex_lock(&lcdb->lcdb_mutex);
+	rc = qpnp_lcdb_disable(lcdb);
+	if (rc < 0)
+		pr_err("Failed to disable lcdb rc=%d\n", rc);
+	mutex_unlock(&lcdb->lcdb_mutex);
+
+	return rc;
+}
+
+static int qpnp_lcdb_ldo_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct qpnp_lcdb *lcdb  = rdev_get_drvdata(rdev);
+
+	return lcdb->lcdb_enabled;
+}
+
+static int qpnp_lcdb_ldo_regulator_set_voltage(struct regulator_dev *rdev,
+				int min_uV, int max_uV, unsigned int *selector)
+{
+	int rc = 0;
+	struct qpnp_lcdb *lcdb  = rdev_get_drvdata(rdev);
+
+	rc = qpnp_lcdb_set_voltage(lcdb, min_uV / 1000, LDO);
+	if (rc < 0)
+		pr_err("Failed to set LDO voltage rc=%c\n", rc);
+	else
+		lcdb->ldo.voltage_mv = min_uV / 1000;
+
+	return rc;
+}
+
+static int qpnp_lcdb_ldo_regulator_get_voltage(struct regulator_dev *rdev)
+{
+	int rc = 0;
+	u32 voltage_mv = 0;
+	struct qpnp_lcdb *lcdb  = rdev_get_drvdata(rdev);
+
+	rc = qpnp_lcdb_get_voltage(lcdb, &voltage_mv, LDO);
+	if (rc < 0) {
+		pr_err("Failed to get ldo voltage rc=%d\n", rc);
+		return rc;
+	}
+
+	return voltage_mv * 1000;
+}
+
+static struct regulator_ops qpnp_lcdb_ldo_ops = {
+	.enable			= qpnp_lcdb_ldo_regulator_enable,
+	.disable		= qpnp_lcdb_ldo_regulator_disable,
+	.is_enabled		= qpnp_lcdb_ldo_regulator_is_enabled,
+	.set_voltage		= qpnp_lcdb_ldo_regulator_set_voltage,
+	.get_voltage		= qpnp_lcdb_ldo_regulator_get_voltage,
+};
+
+static int qpnp_lcdb_ncp_regulator_enable(struct regulator_dev *rdev)
+{
+	int rc = 0;
+	struct qpnp_lcdb *lcdb  = rdev_get_drvdata(rdev);
+
+	mutex_lock(&lcdb->lcdb_mutex);
+	rc = qpnp_lcdb_enable(lcdb);
+	if (rc < 0)
+		pr_err("Failed to enable lcdb rc=%d\n", rc);
+	mutex_unlock(&lcdb->lcdb_mutex);
+
+	return rc;
+}
+
+static int qpnp_lcdb_ncp_regulator_disable(struct regulator_dev *rdev)
+{
+	int rc = 0;
+	struct qpnp_lcdb *lcdb  = rdev_get_drvdata(rdev);
+
+	mutex_lock(&lcdb->lcdb_mutex);
+	rc = qpnp_lcdb_disable(lcdb);
+	if (rc < 0)
+		pr_err("Failed to disable lcdb rc=%d\n", rc);
+	mutex_unlock(&lcdb->lcdb_mutex);
+
+	return rc;
+}
+
+static int qpnp_lcdb_ncp_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct qpnp_lcdb *lcdb  = rdev_get_drvdata(rdev);
+
+	return lcdb->lcdb_enabled;
+}
+
+static int qpnp_lcdb_ncp_regulator_set_voltage(struct regulator_dev *rdev,
+				int min_uV, int max_uV, unsigned int *selector)
+{
+	int rc = 0;
+	struct qpnp_lcdb *lcdb  = rdev_get_drvdata(rdev);
+
+	rc = qpnp_lcdb_set_voltage(lcdb, min_uV / 1000, NCP);
+	if (rc < 0)
+		pr_err("Failed to set LDO voltage rc=%c\n", rc);
+	else
+		lcdb->ncp.voltage_mv = min_uV / 1000;
+
+	return rc;
+}
+
+static int qpnp_lcdb_ncp_regulator_get_voltage(struct regulator_dev *rdev)
+{
+	int rc;
+	u32 voltage_mv = 0;
+	struct qpnp_lcdb *lcdb  = rdev_get_drvdata(rdev);
+
+	rc = qpnp_lcdb_get_voltage(lcdb, &voltage_mv, NCP);
+	if (rc < 0) {
+		pr_err("Failed to get ncp voltage rc=%d\n", rc);
+		return rc;
+	}
+
+	return voltage_mv * 1000;
+}
+
+static struct regulator_ops qpnp_lcdb_ncp_ops = {
+	.enable			= qpnp_lcdb_ncp_regulator_enable,
+	.disable		= qpnp_lcdb_ncp_regulator_disable,
+	.is_enabled		= qpnp_lcdb_ncp_regulator_is_enabled,
+	.set_voltage		= qpnp_lcdb_ncp_regulator_set_voltage,
+	.get_voltage		= qpnp_lcdb_ncp_regulator_get_voltage,
+};
+
+static int qpnp_lcdb_regulator_register(struct qpnp_lcdb *lcdb, u8 type)
+{
+	int rc = 0;
+	struct regulator_init_data *init_data;
+	struct regulator_config cfg = {};
+	struct regulator_desc *rdesc;
+	struct regulator_dev *rdev;
+	struct device_node *node;
+
+	if (type == LDO) {
+		node			= lcdb->ldo.node;
+		rdesc			= &lcdb->ldo.rdesc;
+		rdesc->ops		= &qpnp_lcdb_ldo_ops;
+		rdev			= lcdb->ldo.rdev;
+	} else if (type == NCP) {
+		node			= lcdb->ncp.node;
+		rdesc			= &lcdb->ncp.rdesc;
+		rdesc->ops		= &qpnp_lcdb_ncp_ops;
+		rdev			= lcdb->ncp.rdev;
+	} else {
+		pr_err("Invalid regulator type %d\n", type);
+		return -EINVAL;
+	}
+
+	init_data = of_get_regulator_init_data(lcdb->dev, node, rdesc);
+	if (!init_data) {
+		pr_err("Failed to get regulator_init_data for %s\n",
+					(type == LDO) ? "LDO" : "NCP");
+		return -ENOMEM;
+	}
+
+	if (init_data->constraints.name) {
+		rdesc->owner		= THIS_MODULE;
+		rdesc->type		= REGULATOR_VOLTAGE;
+		rdesc->name		= init_data->constraints.name;
+
+		cfg.dev = lcdb->dev;
+		cfg.init_data = init_data;
+		cfg.driver_data = lcdb;
+		cfg.of_node = node;
+
+		if (of_get_property(lcdb->dev->of_node, "parent-supply", NULL))
+			init_data->supply_regulator = "parent";
+
+		init_data->constraints.valid_ops_mask
+				|= REGULATOR_CHANGE_VOLTAGE
+				| REGULATOR_CHANGE_STATUS;
+
+		rdev = devm_regulator_register(lcdb->dev, rdesc, &cfg);
+		if (IS_ERR(rdev)) {
+			rc = PTR_ERR(rdev);
+			rdev = NULL;
+			pr_err("Failed to register lcdb_%s regulator rc = %d\n",
+				(type == LDO) ? "LDO" : "NCP", rc);
+			return rc;
+		}
+	} else {
+		pr_err("%s_regulator name missing\n",
+				(type == LDO) ? "LDO" : "NCP");
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int qpnp_lcdb_parse_ttw(struct qpnp_lcdb *lcdb)
+{
+	int rc = 0;
+	u32 temp;
+	u8 val = 0;
+	struct device_node *node = lcdb->dev->of_node;
+
+	if (of_property_read_bool(node, "qcom,ttw-mode-sw")) {
+		lcdb->ttw_mode_sw = true;
+		rc = of_property_read_u32(node, "qcom,attw-toff-ms", &temp);
+		if (!rc) {
+			if (!is_between(temp, ATTW_MIN_MS, ATTW_MAX_MS)) {
+				pr_err("Invalid TOFF val %d (min=%d max=%d)\n",
+					temp, ATTW_MIN_MS, ATTW_MAX_MS);
+					return -EINVAL;
+			}
+			val = ilog2(temp / 4) << ATTW_TOFF_TIME_SHIFT;
+		} else {
+			pr_err("qcom,attw-toff-ms not specified for TTW SW mode\n");
+			return rc;
+		}
+
+		rc = of_property_read_u32(node, "qcom,attw-ton-ms", &temp);
+		if (!rc) {
+			if (!is_between(temp, ATTW_MIN_MS, ATTW_MAX_MS)) {
+				pr_err("Invalid TON value %d (min=%d max=%d)\n",
+					temp, ATTW_MIN_MS, ATTW_MAX_MS);
+				return -EINVAL;
+			}
+			val |= ilog2(temp / 4);
+		} else {
+			pr_err("qcom,attw-ton-ms not specified for TTW SW mode\n");
+			return rc;
+		}
+		rc = qpnp_lcdb_masked_write(lcdb, lcdb->base +
+				LCDB_AUTO_TOUCH_WAKE_CTL_REG,
+				ATTW_TON_TIME_MASK | ATTW_TOFF_TIME_MASK, val);
+		if (rc < 0) {
+			pr_err("Failed to write ATTW ON/OFF rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int qpnp_lcdb_ldo_dt_init(struct qpnp_lcdb *lcdb)
+{
+	int rc = 0;
+	struct device_node *node = lcdb->ldo.node;
+
+	/* LDO output voltage */
+	lcdb->ldo.voltage_mv = -EINVAL;
+	rc = of_property_read_u32(node, "qcom,ldo-voltage-mv",
+					&lcdb->ldo.voltage_mv);
+	if (!rc && !is_between(lcdb->ldo.voltage_mv, MIN_VOLTAGE_MV,
+						MAX_VOLTAGE_MV)) {
+		pr_err("Invalid LDO voltage %dmv (min=%d max=%d)\n",
+			lcdb->ldo.voltage_mv, MIN_VOLTAGE_MV, MAX_VOLTAGE_MV);
+		return -EINVAL;
+	}
+
+	/* LDO PD configuration */
+	lcdb->ldo.pd = -EINVAL;
+	of_property_read_u32(node, "qcom,ldo-pd", &lcdb->ldo.pd);
+
+	lcdb->ldo.pd_strength = -EINVAL;
+	of_property_read_u32(node, "qcom,ldo-pd-strength",
+					&lcdb->ldo.pd_strength);
+
+	/* LDO ILIM configuration */
+	lcdb->ldo.ilim_ma = -EINVAL;
+	rc = of_property_read_u32(node, "qcom,ldo-ilim-ma", &lcdb->ldo.ilim_ma);
+	if (!rc && !is_between(lcdb->ldo.ilim_ma, MIN_LDO_ILIM_MA,
+						MAX_LDO_ILIM_MA)) {
+		pr_err("Invalid ilim_ma %d (min=%d, max=%d)\n",
+			lcdb->ldo.ilim_ma, MIN_LDO_ILIM_MA,
+					MAX_LDO_ILIM_MA);
+		return -EINVAL;
+	}
+
+	/* LDO soft-start (SS) configuration */
+	lcdb->ldo.soft_start_us = -EINVAL;
+	of_property_read_u32(node, "qcom,ldo-soft-start-us",
+					&lcdb->ldo.soft_start_us);
+
+	return 0;
+}
+
+static int qpnp_lcdb_ncp_dt_init(struct qpnp_lcdb *lcdb)
+{
+	int rc = 0;
+	struct device_node *node = lcdb->ncp.node;
+
+	/* NCP output voltage */
+	lcdb->ncp.voltage_mv = -EINVAL;
+	rc = of_property_read_u32(node, "qcom,ncp-voltage-mv",
+					&lcdb->ncp.voltage_mv);
+	if (!rc && !is_between(lcdb->ncp.voltage_mv, MIN_VOLTAGE_MV,
+						MAX_VOLTAGE_MV)) {
+		pr_err("Invalid NCP voltage %dmv (min=%d max=%d)\n",
+			lcdb->ldo.voltage_mv, MIN_VOLTAGE_MV, MAX_VOLTAGE_MV);
+		return -EINVAL;
+	}
+
+	/* NCP PD configuration */
+	lcdb->ncp.pd = -EINVAL;
+	of_property_read_u32(node, "qcom,ncp-pd", &lcdb->ncp.pd);
+
+	lcdb->ncp.pd_strength = -EINVAL;
+	of_property_read_u32(node, "qcom,ncp-pd-strength",
+					&lcdb->ncp.pd_strength);
+
+	/* NCP ILIM configuration */
+	lcdb->ncp.ilim_ma = -EINVAL;
+	rc = of_property_read_u32(node, "qcom,ncp-ilim-ma", &lcdb->ncp.ilim_ma);
+	if (!rc && !is_between(lcdb->ncp.ilim_ma, MIN_NCP_ILIM_MA,
+						MAX_NCP_ILIM_MA)) {
+		pr_err("Invalid ilim_ma %d (min=%d, max=%d)\n",
+			lcdb->ncp.ilim_ma, MIN_NCP_ILIM_MA, MAX_NCP_ILIM_MA);
+		return -EINVAL;
+	}
+
+	/* NCP soft-start (SS) configuration */
+	lcdb->ncp.soft_start_us = -EINVAL;
+	of_property_read_u32(node, "qcom,ncp-soft-start-us",
+					&lcdb->ncp.soft_start_us);
+
+	return 0;
+}
+
+static int qpnp_lcdb_bst_dt_init(struct qpnp_lcdb *lcdb)
+{
+	int rc = 0;
+	struct device_node *node = lcdb->bst.node;
+
+	/* Boost PD  configuration */
+	lcdb->bst.pd = -EINVAL;
+	of_property_read_u32(node, "qcom,bst-pd", &lcdb->bst.pd);
+
+	lcdb->bst.pd_strength = -EINVAL;
+	of_property_read_u32(node, "qcom,bst-pd-strength",
+					&lcdb->bst.pd_strength);
+
+	/* Boost ILIM */
+	lcdb->bst.ilim_ma = -EINVAL;
+	rc = of_property_read_u32(node, "qcom,bst-ilim-ma", &lcdb->bst.ilim_ma);
+	if (!rc && !is_between(lcdb->bst.ilim_ma, MIN_BST_ILIM_MA,
+						MAX_BST_ILIM_MA)) {
+		pr_err("Invalid ilim_ma %d (min=%d, max=%d)\n",
+			lcdb->bst.ilim_ma, MIN_BST_ILIM_MA, MAX_BST_ILIM_MA);
+			return -EINVAL;
+	}
+
+	/* Boost PS configuration */
+	lcdb->bst.ps = -EINVAL;
+	of_property_read_u32(node, "qcom,bst-ps", &lcdb->bst.ps);
+
+	lcdb->bst.ps_threshold = -EINVAL;
+	rc = of_property_read_u32(node, "qcom,bst-ps-threshold-ma",
+					&lcdb->bst.ps_threshold);
+	if (!rc && !is_between(lcdb->bst.ps_threshold,
+				MIN_BST_PS_MA, MAX_BST_PS_MA)) {
+		pr_err("Invalid bst ps_threshold %d (min=%d, max=%d)\n",
+			lcdb->bst.ps_threshold, MIN_BST_PS_MA, MAX_BST_PS_MA);
+		return -EINVAL;
+	}
+
+	/* Boost head room configuration */
+	of_property_read_u16(node, "qcom,bst-headroom-mv",
+					&lcdb->bst.headroom_mv);
+	if (lcdb->bst.headroom_mv < BST_HEADROOM_DEFAULT_MV)
+		lcdb->bst.headroom_mv = BST_HEADROOM_DEFAULT_MV;
+
+	return 0;
+}
+
+static int qpnp_lcdb_init_ldo(struct qpnp_lcdb *lcdb)
+{
+	int rc = 0, ilim_ma;
+	u8 val = 0;
+
+	/* configure parameters only if LCDB is disabled */
+	if (!is_lcdb_enabled(lcdb)) {
+		if (lcdb->ldo.voltage_mv != -EINVAL) {
+			rc = qpnp_lcdb_set_voltage(lcdb,
+					lcdb->ldo.voltage_mv, LDO);
+			if (rc < 0) {
+				pr_err("Failed to set voltage rc=%d\n", rc);
+				return rc;
+			}
+		}
+
+		if (lcdb->ldo.pd != -EINVAL) {
+			rc = qpnp_lcdb_masked_write(lcdb, lcdb->base +
+				LCDB_LDO_PD_CTL_REG, LDO_DIS_PULLDOWN_BIT,
+				lcdb->ldo.pd ? 0 : LDO_DIS_PULLDOWN_BIT);
+			if (rc < 0) {
+				pr_err("Failed to configure LDO PD rc=%d\n",
+								rc);
+				return rc;
+			}
+		}
+
+		if (lcdb->ldo.pd_strength != -EINVAL) {
+			rc = qpnp_lcdb_masked_write(lcdb, lcdb->base +
+				LCDB_LDO_PD_CTL_REG, LDO_PD_STRENGTH_BIT,
+				lcdb->ldo.pd_strength ?
+				LDO_PD_STRENGTH_BIT : 0);
+			if (rc < 0) {
+				pr_err("Failed to configure LDO PD strength %s rc=%d",
+						lcdb->ldo.pd_strength ?
+						"(strong)" : "(weak)", rc);
+				return rc;
+			}
+		}
+
+		if (lcdb->ldo.ilim_ma != -EINVAL) {
+			ilim_ma = lcdb->ldo.ilim_ma - MIN_LDO_ILIM_MA;
+			ilim_ma /= LDO_ILIM_STEP_MA;
+			val = (ilim_ma & SET_LDO_ILIM_MASK) | EN_LDO_ILIM_BIT;
+			rc = qpnp_lcdb_masked_write(lcdb, lcdb->base +
+					LCDB_LDO_ILIM_CTL1_REG,
+					SET_LDO_ILIM_MASK | EN_LDO_ILIM_BIT,
+					val);
+			if (rc < 0) {
+				pr_err("Failed to configure LDO ilim_ma (CTL1=%d) rc=%d",
+							val, rc);
+				return rc;
+			}
+
+			val = ilim_ma & SET_LDO_ILIM_MASK;
+			rc = qpnp_lcdb_masked_write(lcdb,
+					lcdb->base + LCDB_LDO_ILIM_CTL2_REG,
+					SET_LDO_ILIM_MASK, val);
+			if (rc < 0) {
+				pr_err("Failed to configure LDO ilim_ma (CTL2=%d) rc=%d",
+							val, rc);
+				return rc;
+			}
+		}
+
+		if (lcdb->ldo.soft_start_us != -EINVAL) {
+			rc = qpnp_lcdb_set_soft_start(lcdb,
+					lcdb->ldo.soft_start_us, LDO);
+			if (rc < 0) {
+				pr_err("Failed to set LDO soft_start rc=%d\n",
+									rc);
+				return rc;
+			}
+		}
+	}
+
+	rc = qpnp_lcdb_get_voltage(lcdb, &lcdb->ldo.voltage_mv, LDO);
+	if (rc < 0) {
+		pr_err("Failed to get LDO volatge rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = qpnp_lcdb_read(lcdb, lcdb->base +
+			LCDB_LDO_VREG_OK_CTL_REG, &val, 1);
+	if (rc < 0) {
+		pr_err("Failed to read ldo_vreg_ok rc=%d\n", rc);
+		return rc;
+	}
+	lcdb->ldo.vreg_ok_dbc_us = dbc_us[val & VREG_OK_DEB_MASK];
+
+	rc = qpnp_lcdb_read(lcdb, lcdb->base +
+			LCDB_LDO_SOFT_START_CTL_REG, &val, 1);
+	if (rc < 0) {
+		pr_err("Failed to read ldo_soft_start_ctl rc=%d\n", rc);
+		return rc;
+	}
+	lcdb->ldo.soft_start_us = soft_start_us[val & SOFT_START_MASK];
+
+	rc = qpnp_lcdb_regulator_register(lcdb, LDO);
+	if (rc < 0)
+		pr_err("Failed to register ldo rc=%d\n", rc);
+
+	return rc;
+}
+
+static int qpnp_lcdb_init_ncp(struct qpnp_lcdb *lcdb)
+{
+	int rc = 0, i = 0;
+	u8 val = 0;
+
+	/* configure parameters only if LCDB is disabled */
+	if (!is_lcdb_enabled(lcdb)) {
+		if (lcdb->ncp.voltage_mv != -EINVAL) {
+			rc = qpnp_lcdb_set_voltage(lcdb,
+					lcdb->ncp.voltage_mv, NCP);
+			if (rc < 0) {
+				pr_err("Failed to set voltage rc=%d\n", rc);
+				return rc;
+			}
+		}
+
+		if (lcdb->ncp.pd != -EINVAL) {
+			rc = qpnp_lcdb_masked_write(lcdb, lcdb->base +
+				LCDB_NCP_PD_CTL_REG, NCP_DIS_PULLDOWN_BIT,
+				lcdb->ncp.pd ? 0 : NCP_DIS_PULLDOWN_BIT);
+			if (rc < 0) {
+				pr_err("Failed to configure NCP PD rc=%d\n",
+									rc);
+				return rc;
+			}
+		}
+
+		if (lcdb->ncp.pd_strength != -EINVAL) {
+			rc = qpnp_lcdb_masked_write(lcdb, lcdb->base +
+				LCDB_NCP_PD_CTL_REG, NCP_PD_STRENGTH_BIT,
+				lcdb->ncp.pd_strength ?
+				NCP_PD_STRENGTH_BIT : 0);
+			if (rc < 0) {
+				pr_err("Failed to configure NCP PD strength %s rc=%d",
+					lcdb->ncp.pd_strength ?
+					"(strong)" : "(weak)", rc);
+				return rc;
+			}
+		}
+
+		if (lcdb->ncp.ilim_ma != -EINVAL) {
+			while (lcdb->ncp.ilim_ma > ncp_ilim_ma[i])
+				i++;
+			val = (i == 0) ? 0 : i - 1;
+			val = (lcdb->ncp.ilim_ma & SET_NCP_ILIM_MASK) |
+							EN_NCP_ILIM_BIT;
+			rc = qpnp_lcdb_masked_write(lcdb, lcdb->base +
+						LCDB_NCP_ILIM_CTL1_REG,
+				SET_NCP_ILIM_MASK | EN_NCP_ILIM_BIT, val);
+			if (rc < 0) {
+				pr_err("Failed to configure NCP ilim_ma (CTL1=%d) rc=%d",
+								val, rc);
+				return rc;
+			}
+			val = lcdb->ncp.ilim_ma & SET_NCP_ILIM_MASK;
+			rc = qpnp_lcdb_masked_write(lcdb,
+					lcdb->base + LCDB_NCP_ILIM_CTL2_REG,
+					SET_NCP_ILIM_MASK, val);
+			if (rc < 0) {
+				pr_err("Failed to configure NCP ilim_ma (CTL2=%d) rc=%d",
+							val, rc);
+				return rc;
+			}
+		}
+
+		if (lcdb->ncp.soft_start_us != -EINVAL) {
+			rc = qpnp_lcdb_set_soft_start(lcdb,
+				lcdb->ncp.soft_start_us, NCP);
+			if (rc < 0) {
+				pr_err("Failed to set NCP soft_start rc=%d\n",
+								rc);
+				return rc;
+			}
+		}
+	}
+
+	rc = qpnp_lcdb_get_voltage(lcdb, &lcdb->ncp.voltage_mv, NCP);
+	if (rc < 0) {
+		pr_err("Failed to get NCP volatge rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = qpnp_lcdb_read(lcdb, lcdb->base +
+			LCDB_NCP_VREG_OK_CTL_REG, &val, 1);
+	if (rc < 0) {
+		pr_err("Failed to read ncp_vreg_ok rc=%d\n", rc);
+		return rc;
+	}
+	lcdb->ncp.vreg_ok_dbc_us = dbc_us[val & VREG_OK_DEB_MASK];
+
+	rc = qpnp_lcdb_read(lcdb, lcdb->base +
+			LCDB_NCP_SOFT_START_CTL_REG, &val, 1);
+	if (rc < 0) {
+		pr_err("Failed to read ncp_soft_start_ctl rc=%d\n", rc);
+		return rc;
+	}
+	lcdb->ncp.soft_start_us = soft_start_us[val & SOFT_START_MASK];
+
+	rc = qpnp_lcdb_regulator_register(lcdb, NCP);
+	if (rc < 0)
+		pr_err("Failed to register NCP rc=%d\n", rc);
+
+	return rc;
+}
+
+static int qpnp_lcdb_init_bst(struct qpnp_lcdb *lcdb)
+{
+	int rc = 0;
+	u8 val = 0;
+
+	/* configure parameters only if LCDB is disabled */
+	if (!is_lcdb_enabled(lcdb)) {
+		if (lcdb->bst.pd != -EINVAL) {
+			rc = qpnp_lcdb_masked_write(lcdb, lcdb->base +
+				LCDB_BST_PD_CTL_REG, BOOST_DIS_PULLDOWN_BIT,
+				lcdb->bst.pd ? 0 : BOOST_DIS_PULLDOWN_BIT);
+			if (rc < 0) {
+				pr_err("Failed to configure BST PD rc=%d\n",
+									rc);
+				return rc;
+			}
+		}
+
+		if (lcdb->bst.pd_strength != -EINVAL) {
+			rc = qpnp_lcdb_masked_write(lcdb, lcdb->base +
+				LCDB_NCP_PD_CTL_REG, BOOST_PD_STRENGTH_BIT,
+				lcdb->bst.pd_strength ?
+				BOOST_PD_STRENGTH_BIT : 0);
+			if (rc < 0) {
+				pr_err("Failed to configure NCP PD strength %s rc=%d",
+					lcdb->bst.pd_strength ?
+					"(strong)" : "(weak)", rc);
+				return rc;
+			}
+		}
+
+		if (lcdb->bst.ilim_ma != -EINVAL) {
+			val = (lcdb->bst.ilim_ma / MIN_BST_ILIM_MA) - 1;
+			val = (lcdb->bst.ilim_ma & SET_BST_ILIM_MASK) |
+							EN_BST_ILIM_BIT;
+			rc = qpnp_lcdb_masked_write(lcdb, lcdb->base +
+				LCDB_BST_ILIM_CTL_REG,
+				SET_BST_ILIM_MASK | EN_BST_ILIM_BIT, val);
+			if (rc < 0) {
+				pr_err("Failed to configure BST ilim_ma rc=%d",
+									rc);
+				return rc;
+			}
+		}
+
+		if (lcdb->bst.ps != -EINVAL) {
+			rc = qpnp_lcdb_masked_write(lcdb, lcdb->base +
+					LCDB_PS_CTL_REG, EN_PS_BIT,
+					&lcdb->bst.ps ? EN_PS_BIT : 0);
+			if (rc < 0) {
+				pr_err("Failed to disable BST PS rc=%d", rc);
+				return rc;
+			}
+		}
+
+		if (lcdb->bst.ps_threshold != -EINVAL) {
+			val = (lcdb->bst.ps_threshold - MIN_BST_PS_MA) / 10;
+			val = (lcdb->bst.ps_threshold & PS_THRESHOLD_MASK) |
+								EN_PS_BIT;
+			rc = qpnp_lcdb_masked_write(lcdb, lcdb->base +
+						LCDB_PS_CTL_REG,
+						PS_THRESHOLD_MASK | EN_PS_BIT,
+						val);
+			if (rc < 0) {
+				pr_err("Failed to configure BST PS threshold rc=%d",
+								rc);
+				return rc;
+			}
+		}
+	}
+
+	rc = qpnp_lcdb_get_voltage(lcdb, &lcdb->bst.voltage_mv, BST);
+	if (rc < 0) {
+		pr_err("Failed to get BST volatge rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = qpnp_lcdb_read(lcdb, lcdb->base +
+			LCDB_BST_VREG_OK_CTL_REG, &val, 1);
+	if (rc < 0) {
+		pr_err("Failed to read bst_vreg_ok rc=%d\n", rc);
+		return rc;
+	}
+	lcdb->bst.vreg_ok_dbc_us = dbc_us[val & VREG_OK_DEB_MASK];
+
+	rc = qpnp_lcdb_read(lcdb, lcdb->base +
+			LCDB_SOFT_START_CTL_REG, &val, 1);
+	if (rc < 0) {
+		pr_err("Failed to read ncp_soft_start_ctl rc=%d\n", rc);
+		return rc;
+	}
+	lcdb->bst.soft_start_us = (val & SOFT_START_MASK) * 200	+ 200;
+
+	if (!lcdb->bst.headroom_mv)
+		lcdb->bst.headroom_mv = BST_HEADROOM_DEFAULT_MV;
+
+	return 0;
+}
+
+static void qpnp_lcdb_pmic_config(struct qpnp_lcdb *lcdb)
+{
+	switch (lcdb->pmic_rev_id->pmic_subtype) {
+	case PM660L_SUBTYPE:
+		if (lcdb->pmic_rev_id->rev4 < PM660L_V2P0_REV4)
+			lcdb->wa_flags |= NCP_SCP_DISABLE_WA;
+		break;
+	default:
+		break;
+	}
+
+	pr_debug("LCDB wa_flags = 0x%2x\n", lcdb->wa_flags);
+}
+
+static int qpnp_lcdb_hw_init(struct qpnp_lcdb *lcdb)
+{
+	int rc = 0;
+	u8 val = 0;
+
+	qpnp_lcdb_pmic_config(lcdb);
+
+	rc = qpnp_lcdb_init_bst(lcdb);
+	if (rc < 0) {
+		pr_err("Failed to initialize BOOST rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = qpnp_lcdb_init_ldo(lcdb);
+	if (rc < 0) {
+		pr_err("Failed to initialize LDO rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = qpnp_lcdb_init_ncp(lcdb);
+	if (rc < 0) {
+		pr_err("Failed to initialize NCP rc=%d\n", rc);
+		return rc;
+	}
+
+	if (lcdb->sc_irq >= 0 && !(lcdb->wa_flags & NCP_SCP_DISABLE_WA)) {
+		lcdb->sc_count = 0;
+		rc = devm_request_threaded_irq(lcdb->dev, lcdb->sc_irq,
+				NULL, qpnp_lcdb_sc_irq_handler, IRQF_ONESHOT,
+				"qpnp_lcdb_sc_irq", lcdb);
+		if (rc < 0) {
+			pr_err("Unable to request sc(%d) irq rc=%d\n",
+						lcdb->sc_irq, rc);
+			return rc;
+		}
+	}
+
+	if (!is_lcdb_enabled(lcdb)) {
+		rc = qpnp_lcdb_read(lcdb, lcdb->base +
+				LCDB_MODULE_RDY_REG, &val, 1);
+		if (rc < 0) {
+			pr_err("Failed to read MODULE_RDY rc=%d\n", rc);
+			return rc;
+		}
+		if (!(val & MODULE_RDY_BIT)) {
+			rc = qpnp_lcdb_masked_write(lcdb, lcdb->base +
+				LCDB_MODULE_RDY_REG, MODULE_RDY_BIT,
+						MODULE_RDY_BIT);
+			if (rc < 0) {
+				pr_err("Failed to set MODULE RDY rc=%d\n", rc);
+				return rc;
+			}
+		}
+	} else {
+		/* module already enabled */
+		lcdb->lcdb_enabled = true;
+	}
+
+	return 0;
+}
+
+static int qpnp_lcdb_parse_dt(struct qpnp_lcdb *lcdb)
+{
+	int rc = 0;
+	const char *label;
+	struct device_node *revid_dev_node, *temp, *node = lcdb->dev->of_node;
+
+	revid_dev_node = of_parse_phandle(node, "qcom,pmic-revid", 0);
+	if (!revid_dev_node) {
+		pr_err("Missing qcom,pmic-revid property - fail driver\n");
+		return -EINVAL;
+	}
+
+	lcdb->pmic_rev_id = get_revid_data(revid_dev_node);
+	if (IS_ERR(lcdb->pmic_rev_id)) {
+		pr_debug("Unable to get revid data\n");
+		/*
+		 * revid should to be defined, return -EPROBE_DEFER
+		 * until the revid module registers.
+		 */
+		return -EPROBE_DEFER;
+	}
+
+	for_each_available_child_of_node(node, temp) {
+		rc = of_property_read_string(temp, "label", &label);
+		if (rc < 0) {
+			pr_err("Failed to read label rc=%d\n", rc);
+			return rc;
+		}
+
+		if (!strcmp(label, "ldo")) {
+			lcdb->ldo.node = temp;
+			rc = qpnp_lcdb_ldo_dt_init(lcdb);
+		} else if (!strcmp(label, "ncp")) {
+			lcdb->ncp.node = temp;
+			rc = qpnp_lcdb_ncp_dt_init(lcdb);
+		} else if (!strcmp(label, "bst")) {
+			lcdb->bst.node = temp;
+			rc = qpnp_lcdb_bst_dt_init(lcdb);
+		} else {
+			pr_err("Failed to identify label %s\n", label);
+			return -EINVAL;
+		}
+		if (rc < 0) {
+			pr_err("Failed to register %s module\n", label);
+			return rc;
+		}
+	}
+
+	if (of_property_read_bool(node, "qcom,ttw-enable")) {
+		rc = qpnp_lcdb_parse_ttw(lcdb);
+		if (rc < 0) {
+			pr_err("Failed to parse ttw-params rc=%d\n", rc);
+			return rc;
+		}
+		lcdb->ttw_enable = true;
+	}
+
+	lcdb->sc_irq = platform_get_irq_byname(lcdb->pdev, "sc-irq");
+	if (lcdb->sc_irq < 0)
+		pr_debug("sc irq is not defined\n");
+
+	return rc;
+}
+
+static int qpnp_lcdb_regulator_probe(struct platform_device *pdev)
+{
+	int rc;
+	struct device_node *node;
+	struct qpnp_lcdb *lcdb;
+
+	node = pdev->dev.of_node;
+	if (!node) {
+		pr_err("No nodes defined\n");
+		return -ENODEV;
+	}
+
+	lcdb = devm_kzalloc(&pdev->dev, sizeof(*lcdb), GFP_KERNEL);
+	if (!lcdb)
+		return -ENOMEM;
+
+	rc = of_property_read_u32(node, "reg", &lcdb->base);
+	if (rc < 0) {
+		pr_err("Failed to find reg node rc=%d\n", rc);
+		return rc;
+	}
+
+	lcdb->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!lcdb->regmap) {
+		pr_err("Failed to get the regmap handle rc=%d\n", rc);
+		return -EINVAL;
+	}
+
+	lcdb->dev = &pdev->dev;
+	lcdb->pdev = pdev;
+	mutex_init(&lcdb->lcdb_mutex);
+	mutex_init(&lcdb->read_write_mutex);
+
+	rc = qpnp_lcdb_parse_dt(lcdb);
+	if (rc < 0) {
+		pr_err("Failed to parse dt rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = qpnp_lcdb_hw_init(lcdb);
+	if (rc < 0)
+		pr_err("Failed to initialize LCDB module rc=%d\n", rc);
+	else
+		pr_info("LCDB module successfully registered! lcdb_en=%d ldo_voltage=%dmV ncp_voltage=%dmV bst_voltage=%dmV\n",
+			lcdb->lcdb_enabled, lcdb->ldo.voltage_mv,
+			lcdb->ncp.voltage_mv, lcdb->bst.voltage_mv);
+
+	return rc;
+}
+
+static int qpnp_lcdb_regulator_remove(struct platform_device *pdev)
+{
+	struct qpnp_lcdb *lcdb = dev_get_drvdata(&pdev->dev);
+
+	mutex_destroy(&lcdb->lcdb_mutex);
+	mutex_destroy(&lcdb->read_write_mutex);
+
+	return 0;
+}
+
+static const struct of_device_id lcdb_match_table[] = {
+	{ .compatible = QPNP_LCDB_REGULATOR_DRIVER_NAME, },
+	{ },
+};
+
+static struct platform_driver qpnp_lcdb_regulator_driver = {
+	.driver		= {
+		.name		= QPNP_LCDB_REGULATOR_DRIVER_NAME,
+		.of_match_table	= lcdb_match_table,
+	},
+	.probe		= qpnp_lcdb_regulator_probe,
+	.remove		= qpnp_lcdb_regulator_remove,
+};
+
+static int __init qpnp_lcdb_regulator_init(void)
+{
+	return platform_driver_register(&qpnp_lcdb_regulator_driver);
+}
+arch_initcall(qpnp_lcdb_regulator_init);
+
+static void __exit qpnp_lcdb_regulator_exit(void)
+{
+	platform_driver_unregister(&qpnp_lcdb_regulator_driver);
+}
+module_exit(qpnp_lcdb_regulator_exit);
+
+MODULE_DESCRIPTION("QPNP LCDB regulator driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/regulator/qpnp-regulator.c	2019-01-22 16:16:26.275271509 +0100
@@ -0,0 +1,2535 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/ktime.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/qpnp-regulator.h>
+
+/* Debug Flag Definitions */
+enum {
+	QPNP_VREG_DEBUG_REQUEST		= BIT(0), /* Show requests */
+	QPNP_VREG_DEBUG_DUPLICATE	= BIT(1), /* Show duplicate requests */
+	QPNP_VREG_DEBUG_INIT		= BIT(2), /* Show state after probe */
+	QPNP_VREG_DEBUG_WRITES		= BIT(3), /* Show SPMI writes */
+	QPNP_VREG_DEBUG_READS		= BIT(4), /* Show SPMI reads */
+	QPNP_VREG_DEBUG_OCP		= BIT(5), /* Show VS OCP IRQ events */
+};
+
+static int qpnp_vreg_debug_mask;
+module_param_named(
+	debug_mask, qpnp_vreg_debug_mask, int, 0600
+);
+
+#define vreg_err(vreg, fmt, ...) \
+	pr_err("%s: " fmt, vreg->rdesc.name, ##__VA_ARGS__)
+
+/* These types correspond to unique register layouts. */
+enum qpnp_regulator_logical_type {
+	QPNP_REGULATOR_LOGICAL_TYPE_SMPS,
+	QPNP_REGULATOR_LOGICAL_TYPE_LDO,
+	QPNP_REGULATOR_LOGICAL_TYPE_VS,
+	QPNP_REGULATOR_LOGICAL_TYPE_BOOST,
+	QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS,
+	QPNP_REGULATOR_LOGICAL_TYPE_BOOST_BYP,
+	QPNP_REGULATOR_LOGICAL_TYPE_LN_LDO,
+	QPNP_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS,
+	QPNP_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS,
+	QPNP_REGULATOR_LOGICAL_TYPE_ULT_LDO,
+	QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS2,
+};
+
+enum qpnp_regulator_type {
+	QPNP_REGULATOR_TYPE_BUCK		= 0x03,
+	QPNP_REGULATOR_TYPE_LDO			= 0x04,
+	QPNP_REGULATOR_TYPE_VS			= 0x05,
+	QPNP_REGULATOR_TYPE_BOOST		= 0x1B,
+	QPNP_REGULATOR_TYPE_FTS			= 0x1C,
+	QPNP_REGULATOR_TYPE_BOOST_BYP		= 0x1F,
+	QPNP_REGULATOR_TYPE_ULT_LDO		= 0x21,
+	QPNP_REGULATOR_TYPE_ULT_BUCK		= 0x22,
+};
+
+enum qpnp_regulator_subtype {
+	QPNP_REGULATOR_SUBTYPE_GP_CTL		= 0x08,
+	QPNP_REGULATOR_SUBTYPE_RF_CTL		= 0x09,
+	QPNP_REGULATOR_SUBTYPE_N50		= 0x01,
+	QPNP_REGULATOR_SUBTYPE_N150		= 0x02,
+	QPNP_REGULATOR_SUBTYPE_N300		= 0x03,
+	QPNP_REGULATOR_SUBTYPE_N600		= 0x04,
+	QPNP_REGULATOR_SUBTYPE_N1200		= 0x05,
+	QPNP_REGULATOR_SUBTYPE_N600_ST		= 0x06,
+	QPNP_REGULATOR_SUBTYPE_N1200_ST		= 0x07,
+	QPNP_REGULATOR_SUBTYPE_N300_ST		= 0x15,
+	QPNP_REGULATOR_SUBTYPE_P50		= 0x08,
+	QPNP_REGULATOR_SUBTYPE_P150		= 0x09,
+	QPNP_REGULATOR_SUBTYPE_P300		= 0x0A,
+	QPNP_REGULATOR_SUBTYPE_P600		= 0x0B,
+	QPNP_REGULATOR_SUBTYPE_P1200		= 0x0C,
+	QPNP_REGULATOR_SUBTYPE_LN		= 0x10,
+	QPNP_REGULATOR_SUBTYPE_LV_P50		= 0x28,
+	QPNP_REGULATOR_SUBTYPE_LV_P150		= 0x29,
+	QPNP_REGULATOR_SUBTYPE_LV_P300		= 0x2A,
+	QPNP_REGULATOR_SUBTYPE_LV_P600		= 0x2B,
+	QPNP_REGULATOR_SUBTYPE_LV_P1200		= 0x2C,
+	QPNP_REGULATOR_SUBTYPE_LV100		= 0x01,
+	QPNP_REGULATOR_SUBTYPE_LV300		= 0x02,
+	QPNP_REGULATOR_SUBTYPE_MV300		= 0x08,
+	QPNP_REGULATOR_SUBTYPE_MV500		= 0x09,
+	QPNP_REGULATOR_SUBTYPE_HDMI		= 0x10,
+	QPNP_REGULATOR_SUBTYPE_OTG		= 0x11,
+	QPNP_REGULATOR_SUBTYPE_5V_BOOST		= 0x01,
+	QPNP_REGULATOR_SUBTYPE_FTS_CTL		= 0x08,
+	QPNP_REGULATOR_SUBTYPE_FTS2p5_CTL	= 0x09,
+	QPNP_REGULATOR_SUBTYPE_FTS426		= 0x0A,
+	QPNP_REGULATOR_SUBTYPE_BB_2A		= 0x01,
+	QPNP_REGULATOR_SUBTYPE_ULT_HF_CTL1	= 0x0D,
+	QPNP_REGULATOR_SUBTYPE_ULT_HF_CTL2	= 0x0E,
+	QPNP_REGULATOR_SUBTYPE_ULT_HF_CTL3	= 0x0F,
+	QPNP_REGULATOR_SUBTYPE_ULT_HF_CTL4	= 0x10,
+};
+
+/* First common register layout used by older devices */
+enum qpnp_common_regulator_registers {
+	QPNP_COMMON_REG_DIG_MAJOR_REV		= 0x01,
+	QPNP_COMMON_REG_TYPE			= 0x04,
+	QPNP_COMMON_REG_SUBTYPE			= 0x05,
+	QPNP_COMMON_REG_VOLTAGE_RANGE		= 0x40,
+	QPNP_COMMON_REG_VOLTAGE_SET		= 0x41,
+	QPNP_COMMON_REG_MODE			= 0x45,
+	QPNP_COMMON_REG_ENABLE			= 0x46,
+	QPNP_COMMON_REG_PULL_DOWN		= 0x48,
+	QPNP_COMMON_REG_STEP_CTRL		= 0x61,
+	QPNP_COMMON_REG_UL_LL_CTRL		= 0x68,
+	QPNP_COMMON_REG_VOLTAGE_ULS_VALID	= 0x6A,
+	QPNP_COMMON_REG_VOLTAGE_LLS_VALID	= 0x6C,
+};
+
+/*
+ * Second common register layout used by newer devices
+ * Note that some of the registers from the first common layout remain
+ * unchanged and their definition is not duplicated.
+ */
+enum qpnp_common2_regulator_registers {
+	QPNP_COMMON2_REG_VOLTAGE_LSB		= 0x40,
+	QPNP_COMMON2_REG_VOLTAGE_MSB		= 0x41,
+	QPNP_COMMON2_REG_MODE			= 0x45,
+	QPNP_COMMON2_REG_STEP_CTRL		= 0x61,
+	QPNP_COMMON2_REG_VOLTAGE_ULS_LSB	= 0x68,
+	QPNP_COMMON2_REG_VOLTAGE_ULS_MSB	= 0x69,
+};
+
+enum qpnp_ldo_registers {
+	QPNP_LDO_REG_SOFT_START			= 0x4C,
+};
+
+enum qpnp_vs_registers {
+	QPNP_VS_REG_OCP				= 0x4A,
+	QPNP_VS_REG_SOFT_START			= 0x4C,
+};
+
+enum qpnp_boost_registers {
+	QPNP_BOOST_REG_CURRENT_LIMIT		= 0x4A,
+};
+
+enum qpnp_boost_byp_registers {
+	QPNP_BOOST_BYP_REG_CURRENT_LIMIT	= 0x4B,
+};
+
+/* Used for indexing into ctrl_reg.  These are offets from 0x40 */
+enum qpnp_common_control_register_index {
+	QPNP_COMMON_IDX_VOLTAGE_RANGE		= 0,
+	QPNP_COMMON_IDX_VOLTAGE_SET		= 1,
+	QPNP_COMMON_IDX_MODE			= 5,
+	QPNP_COMMON_IDX_ENABLE			= 6,
+};
+
+enum qpnp_common2_control_register_index {
+	QPNP_COMMON2_IDX_VOLTAGE_LSB		= 0,
+	QPNP_COMMON2_IDX_VOLTAGE_MSB		= 1,
+	QPNP_COMMON2_IDX_MODE			= 5,
+};
+
+/* Common regulator control register layout */
+#define QPNP_COMMON_ENABLE_MASK			0x80
+#define QPNP_COMMON_ENABLE			0x80
+#define QPNP_COMMON_DISABLE			0x00
+#define QPNP_COMMON_ENABLE_FOLLOW_HW_EN3_MASK	0x08
+#define QPNP_COMMON_ENABLE_FOLLOW_HW_EN2_MASK	0x04
+#define QPNP_COMMON_ENABLE_FOLLOW_HW_EN1_MASK	0x02
+#define QPNP_COMMON_ENABLE_FOLLOW_HW_EN0_MASK	0x01
+#define QPNP_COMMON_ENABLE_FOLLOW_ALL_MASK	0x0F
+
+/* First common regulator mode register layout */
+#define QPNP_COMMON_MODE_HPM_MASK		0x80
+#define QPNP_COMMON_MODE_AUTO_MASK		0x40
+#define QPNP_COMMON_MODE_BYPASS_MASK		0x20
+#define QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK	0x10
+#define QPNP_COMMON_MODE_FOLLOW_HW_EN3_MASK	0x08
+#define QPNP_COMMON_MODE_FOLLOW_HW_EN2_MASK	0x04
+#define QPNP_COMMON_MODE_FOLLOW_HW_EN1_MASK	0x02
+#define QPNP_COMMON_MODE_FOLLOW_HW_EN0_MASK	0x01
+#define QPNP_COMMON_MODE_FOLLOW_ALL_MASK	0x1F
+
+/* Second common regulator mode register values */
+#define QPNP_COMMON2_MODE_BYPASS		3
+#define QPNP_COMMON2_MODE_RETENTION		4
+#define QPNP_COMMON2_MODE_LPM			5
+#define QPNP_COMMON2_MODE_AUTO			6
+#define QPNP_COMMON2_MODE_HPM			7
+
+#define QPNP_COMMON2_MODE_MASK			0x07
+
+/* Common regulator pull down control register layout */
+#define QPNP_COMMON_PULL_DOWN_ENABLE_MASK	0x80
+
+/* Common regulator UL & LL limits control register layout */
+#define QPNP_COMMON_UL_EN_MASK			0x80
+#define QPNP_COMMON_LL_EN_MASK			0x40
+
+/* LDO regulator current limit control register layout */
+#define QPNP_LDO_CURRENT_LIMIT_ENABLE_MASK	0x80
+
+/* LDO regulator soft start control register layout */
+#define QPNP_LDO_SOFT_START_ENABLE_MASK		0x80
+
+/* VS regulator over current protection control register layout */
+#define QPNP_VS_OCP_OVERRIDE			0x01
+#define QPNP_VS_OCP_NO_OVERRIDE			0x00
+
+/* VS regulator soft start control register layout */
+#define QPNP_VS_SOFT_START_ENABLE_MASK		0x80
+#define QPNP_VS_SOFT_START_SEL_MASK		0x03
+
+/* Boost regulator current limit control register layout */
+#define QPNP_BOOST_CURRENT_LIMIT_ENABLE_MASK	0x80
+#define QPNP_BOOST_CURRENT_LIMIT_MASK		0x07
+
+#define QPNP_VS_OCP_DEFAULT_MAX_RETRIES		10
+#define QPNP_VS_OCP_DEFAULT_RETRY_DELAY_MS	30
+#define QPNP_VS_OCP_FALL_DELAY_US		90
+#define QPNP_VS_OCP_FAULT_DELAY_US		20000
+
+#define QPNP_FTSMPS_STEP_CTRL_STEP_MASK		0x18
+#define QPNP_FTSMPS_STEP_CTRL_STEP_SHIFT	3
+#define QPNP_FTSMPS_STEP_CTRL_DELAY_MASK	0x07
+#define QPNP_FTSMPS_STEP_CTRL_DELAY_SHIFT	0
+
+/* Clock rate in kHz of the FTSMPS regulator reference clock. */
+#define QPNP_FTSMPS_CLOCK_RATE		19200
+
+/* Minimum voltage stepper delay for each step. */
+#define QPNP_FTSMPS_STEP_DELAY		8
+
+/*
+ * The ratio QPNP_FTSMPS_STEP_MARGIN_NUM/QPNP_FTSMPS_STEP_MARGIN_DEN is used to
+ * adjust the step rate in order to account for oscillator variance.
+ */
+#define QPNP_FTSMPS_STEP_MARGIN_NUM	4
+#define QPNP_FTSMPS_STEP_MARGIN_DEN	5
+
+#define QPNP_FTSMPS2_STEP_CTRL_DELAY_MASK	0x03
+#define QPNP_FTSMPS2_STEP_CTRL_DELAY_SHIFT	0
+
+/* Clock rate in kHz of the FTSMPS2 regulator reference clock. */
+#define QPNP_FTSMPS2_CLOCK_RATE		4800
+
+/* Minimum voltage stepper delay for each step. */
+#define QPNP_FTSMPS2_STEP_DELAY		2
+
+/*
+ * The ratio QPNP_FTSMPS2_STEP_MARGIN_NUM/QPNP_FTSMPS2_STEP_MARGIN_DEN is used
+ * to adjust the step rate in order to account for oscillator variance.
+ */
+#define QPNP_FTSMPS2_STEP_MARGIN_NUM	10
+#define QPNP_FTSMPS2_STEP_MARGIN_DEN	11
+
+/*
+ * This voltage in uV is returned by get_voltage functions when there is no way
+ * to determine the current voltage level.  It is needed because the regulator
+ * framework treats a 0 uV voltage as an error.
+ */
+#define VOLTAGE_UNKNOWN 1
+
+/* VSET value to decide the range of ULT SMPS */
+#define ULT_SMPS_RANGE_SPLIT 0x60
+
+/**
+ * struct qpnp_voltage_range - regulator set point voltage mapping description
+ * @min_uV:		Minimum programmable output voltage resulting from
+ *			set point register value 0x00
+ * @max_uV:		Maximum programmable output voltage
+ * @step_uV:		Output voltage increase resulting from the set point
+ *			register value increasing by 1
+ * @set_point_min_uV:	Minimum allowed voltage
+ * @set_point_max_uV:	Maximum allowed voltage.  This may be tweaked in order
+ *			to pick which range should be used in the case of
+ *			overlapping set points.
+ * @n_voltages:		Number of preferred voltage set points present in this
+ *			range
+ * @range_sel:		Voltage range register value corresponding to this range
+ *
+ * The following relationships must be true for the values used in this struct:
+ * (max_uV - min_uV) % step_uV == 0
+ * (set_point_min_uV - min_uV) % step_uV == 0*
+ * (set_point_max_uV - min_uV) % step_uV == 0*
+ * n_voltages = (set_point_max_uV - set_point_min_uV) / step_uV + 1
+ *
+ * *Note, set_point_min_uV == set_point_max_uV == 0 is allowed in order to
+ * specify that the voltage range has meaning, but is not preferred.
+ */
+struct qpnp_voltage_range {
+	int					min_uV;
+	int					max_uV;
+	int					step_uV;
+	int					set_point_min_uV;
+	int					set_point_max_uV;
+	unsigned int				n_voltages;
+	u8					range_sel;
+};
+
+/*
+ * The ranges specified in the qpnp_voltage_set_points struct must be listed
+ * so that range[i].set_point_max_uV < range[i+1].set_point_min_uV.
+ */
+struct qpnp_voltage_set_points {
+	struct qpnp_voltage_range		*range;
+	int					count;
+	unsigned int				n_voltages;
+};
+
+struct qpnp_regulator_mapping {
+	enum qpnp_regulator_type		type;
+	enum qpnp_regulator_subtype		subtype;
+	enum qpnp_regulator_logical_type	logical_type;
+	u32					revision_min;
+	u32					revision_max;
+	struct regulator_ops			*ops;
+	struct qpnp_voltage_set_points		*set_points;
+	int					hpm_min_load;
+};
+
+struct qpnp_regulator {
+	struct regulator_desc			rdesc;
+	struct delayed_work			ocp_work;
+	struct platform_device			*pdev;
+	struct regmap				*regmap;
+	struct regulator_dev			*rdev;
+	struct qpnp_voltage_set_points		*set_points;
+	enum qpnp_regulator_logical_type	logical_type;
+	int					enable_time;
+	int					ocp_enable;
+	int					ocp_irq;
+	int					ocp_count;
+	int					ocp_max_retries;
+	int					ocp_retry_delay_ms;
+	int					system_load;
+	int					hpm_min_load;
+	int					slew_rate;
+	u32					write_count;
+	u32					prev_write_count;
+	ktime_t					vs_enable_time;
+	u16					base_addr;
+	/* ctrl_reg provides a shadow copy of register values 0x40 to 0x47. */
+	u8					ctrl_reg[8];
+	u8					init_mode;
+};
+
+#define QPNP_VREG_MAP(_type, _subtype, _dig_major_min, _dig_major_max, \
+		      _logical_type, _ops_val, _set_points_val, _hpm_min_load) \
+	{ \
+		.type		= QPNP_REGULATOR_TYPE_##_type, \
+		.subtype	= QPNP_REGULATOR_SUBTYPE_##_subtype, \
+		.revision_min	= _dig_major_min, \
+		.revision_max	= _dig_major_max, \
+		.logical_type	= QPNP_REGULATOR_LOGICAL_TYPE_##_logical_type, \
+		.ops		= &qpnp_##_ops_val##_ops, \
+		.set_points	= &_set_points_val##_set_points, \
+		.hpm_min_load	= _hpm_min_load, \
+	}
+
+#define VOLTAGE_RANGE(_range_sel, _min_uV, _set_point_min_uV, \
+			_set_point_max_uV, _max_uV, _step_uV) \
+	{ \
+		.min_uV			= _min_uV, \
+		.max_uV			= _max_uV, \
+		.set_point_min_uV	= _set_point_min_uV, \
+		.set_point_max_uV	= _set_point_max_uV, \
+		.step_uV		= _step_uV, \
+		.range_sel		= _range_sel, \
+	}
+
+#define SET_POINTS(_ranges) \
+{ \
+	.range	= _ranges, \
+	.count	= ARRAY_SIZE(_ranges), \
+}
+
+/*
+ * These tables contain the physically available PMIC regulator voltage setpoint
+ * ranges.  Where two ranges overlap in hardware, one of the ranges is trimmed
+ * to ensure that the setpoints available to software are monotonically
+ * increasing and unique.  The set_voltage callback functions expect these
+ * properties to hold.
+ */
+static struct qpnp_voltage_range pldo_ranges[] = {
+	VOLTAGE_RANGE(2,  750000,  750000, 1537500, 1537500, 12500),
+	VOLTAGE_RANGE(3, 1500000, 1550000, 3075000, 3075000, 25000),
+	VOLTAGE_RANGE(4, 1750000, 3100000, 4900000, 4900000, 50000),
+};
+
+static struct qpnp_voltage_range nldo1_ranges[] = {
+	VOLTAGE_RANGE(2,  750000,  750000, 1537500, 1537500, 12500),
+};
+
+static struct qpnp_voltage_range nldo2_ranges[] = {
+	VOLTAGE_RANGE(0,  375000,       0,       0, 1537500, 12500),
+	VOLTAGE_RANGE(1,  375000,  375000,  768750,  768750,  6250),
+	VOLTAGE_RANGE(2,  750000,  775000, 1537500, 1537500, 12500),
+};
+
+static struct qpnp_voltage_range nldo3_ranges[] = {
+	VOLTAGE_RANGE(0,  375000,  375000, 1537500, 1537500, 12500),
+	VOLTAGE_RANGE(1,  375000,       0,       0, 1537500, 12500),
+	VOLTAGE_RANGE(2,  750000,       0,       0, 1537500, 12500),
+};
+
+static struct qpnp_voltage_range ln_ldo_ranges[] = {
+	VOLTAGE_RANGE(1,  690000,  690000, 1110000, 1110000, 60000),
+	VOLTAGE_RANGE(0, 1380000, 1380000, 2220000, 2220000, 120000),
+};
+
+static struct qpnp_voltage_range smps_ranges[] = {
+	VOLTAGE_RANGE(0,  375000,  375000, 1562500, 1562500, 12500),
+	VOLTAGE_RANGE(1, 1550000, 1575000, 3125000, 3125000, 25000),
+};
+
+static struct qpnp_voltage_range ftsmps_ranges[] = {
+	VOLTAGE_RANGE(0,       0,  350000, 1275000, 1275000,  5000),
+	VOLTAGE_RANGE(1,       0, 1280000, 2040000, 2040000, 10000),
+};
+
+static struct qpnp_voltage_range ftsmps2p5_ranges[] = {
+	VOLTAGE_RANGE(0,   80000,  350000, 1355000, 1355000,  5000),
+	VOLTAGE_RANGE(1,  160000, 1360000, 2200000, 2200000, 10000),
+};
+
+static struct qpnp_voltage_range boost_ranges[] = {
+	VOLTAGE_RANGE(0, 4000000, 4000000, 5550000, 5550000, 50000),
+};
+
+static struct qpnp_voltage_range boost_byp_ranges[] = {
+	VOLTAGE_RANGE(0, 2500000, 2500000, 5200000, 5650000, 50000),
+};
+
+static struct qpnp_voltage_range ult_lo_smps_ranges[] = {
+	VOLTAGE_RANGE(0,  375000,  375000, 1562500, 1562500, 12500),
+	VOLTAGE_RANGE(1,  750000,       0,       0, 1525000, 25000),
+};
+
+static struct qpnp_voltage_range ult_ho_smps_ranges[] = {
+	VOLTAGE_RANGE(0, 1550000, 1550000, 2325000, 2325000, 25000),
+};
+
+static struct qpnp_voltage_range ult_nldo_ranges[] = {
+	VOLTAGE_RANGE(0,  375000,  375000, 1537500, 1537500, 12500),
+};
+
+static struct qpnp_voltage_range ult_pldo_ranges[] = {
+	VOLTAGE_RANGE(0, 1750000, 1750000, 3337500, 3337500, 12500),
+};
+
+static struct qpnp_voltage_range ftsmps426_ranges[] = {
+	VOLTAGE_RANGE(0,       0,  320000, 1352000, 1352000,  4000),
+};
+
+static struct qpnp_voltage_set_points pldo_set_points = SET_POINTS(pldo_ranges);
+static struct qpnp_voltage_set_points nldo1_set_points
+					= SET_POINTS(nldo1_ranges);
+static struct qpnp_voltage_set_points nldo2_set_points
+					= SET_POINTS(nldo2_ranges);
+static struct qpnp_voltage_set_points nldo3_set_points
+					= SET_POINTS(nldo3_ranges);
+static struct qpnp_voltage_set_points ln_ldo_set_points
+					= SET_POINTS(ln_ldo_ranges);
+static struct qpnp_voltage_set_points smps_set_points = SET_POINTS(smps_ranges);
+static struct qpnp_voltage_set_points ftsmps_set_points
+					= SET_POINTS(ftsmps_ranges);
+static struct qpnp_voltage_set_points ftsmps2p5_set_points
+					= SET_POINTS(ftsmps2p5_ranges);
+static struct qpnp_voltage_set_points boost_set_points
+					= SET_POINTS(boost_ranges);
+static struct qpnp_voltage_set_points boost_byp_set_points
+					= SET_POINTS(boost_byp_ranges);
+static struct qpnp_voltage_set_points ult_lo_smps_set_points
+					= SET_POINTS(ult_lo_smps_ranges);
+static struct qpnp_voltage_set_points ult_ho_smps_set_points
+					= SET_POINTS(ult_ho_smps_ranges);
+static struct qpnp_voltage_set_points ult_nldo_set_points
+					= SET_POINTS(ult_nldo_ranges);
+static struct qpnp_voltage_set_points ult_pldo_set_points
+					= SET_POINTS(ult_pldo_ranges);
+static struct qpnp_voltage_set_points ftsmps426_set_points
+					= SET_POINTS(ftsmps426_ranges);
+static struct qpnp_voltage_set_points none_set_points;
+
+static struct qpnp_voltage_set_points *all_set_points[] = {
+	&pldo_set_points,
+	&nldo1_set_points,
+	&nldo2_set_points,
+	&nldo3_set_points,
+	&ln_ldo_set_points,
+	&smps_set_points,
+	&ftsmps_set_points,
+	&ftsmps2p5_set_points,
+	&boost_set_points,
+	&boost_byp_set_points,
+	&ult_lo_smps_set_points,
+	&ult_ho_smps_set_points,
+	&ult_nldo_set_points,
+	&ult_pldo_set_points,
+	&ftsmps426_set_points,
+};
+
+/* Determines which label to add to a debug print statement. */
+enum qpnp_regulator_action {
+	QPNP_REGULATOR_ACTION_INIT,
+	QPNP_REGULATOR_ACTION_ENABLE,
+	QPNP_REGULATOR_ACTION_DISABLE,
+	QPNP_REGULATOR_ACTION_VOLTAGE,
+	QPNP_REGULATOR_ACTION_MODE,
+};
+
+static void qpnp_vreg_show_state(struct regulator_dev *rdev,
+				   enum qpnp_regulator_action action);
+
+#define DEBUG_PRINT_BUFFER_SIZE 64
+static void fill_string(char *str, size_t str_len, u8 *buf, int buf_len)
+{
+	int pos = 0;
+	int i;
+
+	for (i = 0; i < buf_len; i++) {
+		pos += scnprintf(str + pos, str_len - pos, "0x%02X", buf[i]);
+		if (i < buf_len - 1)
+			pos += scnprintf(str + pos, str_len - pos, ", ");
+	}
+}
+
+static inline int qpnp_vreg_read(struct qpnp_regulator *vreg, u16 addr, u8 *buf,
+				 int len)
+{
+	char str[DEBUG_PRINT_BUFFER_SIZE];
+	int rc = 0;
+
+	rc = regmap_bulk_read(vreg->regmap, vreg->base_addr + addr, buf, len);
+
+	if (!rc && (qpnp_vreg_debug_mask & QPNP_VREG_DEBUG_READS)) {
+		str[0] = '\0';
+		fill_string(str, DEBUG_PRINT_BUFFER_SIZE, buf, len);
+		pr_info(" %-11s:  read(0x%04X), sid=%d, len=%d; %s\n",
+			vreg->rdesc.name, vreg->base_addr + addr,
+			to_spmi_device(vreg->pdev->dev.parent)->usid, len,
+			str);
+	}
+
+	return rc;
+}
+
+static inline int qpnp_vreg_write(struct qpnp_regulator *vreg, u16 addr,
+				u8 *buf, int len)
+{
+	char str[DEBUG_PRINT_BUFFER_SIZE];
+	int rc = 0;
+
+	if (qpnp_vreg_debug_mask & QPNP_VREG_DEBUG_WRITES) {
+		str[0] = '\0';
+		fill_string(str, DEBUG_PRINT_BUFFER_SIZE, buf, len);
+		pr_info("%-11s: write(0x%04X), sid=%d, len=%d; %s\n",
+			vreg->rdesc.name, vreg->base_addr + addr,
+			to_spmi_device(vreg->pdev->dev.parent)->usid, len,
+			str);
+	}
+
+	rc = regmap_bulk_write(vreg->regmap, vreg->base_addr + addr, buf, len);
+	if (!rc)
+		vreg->write_count += len;
+
+	return rc;
+}
+
+/*
+ * qpnp_vreg_write_optimized - write the minimum sized contiguous subset of buf
+ * @vreg:	qpnp_regulator pointer for this regulator
+ * @addr:	local SPMI address offset from this peripheral's base address
+ * @buf:	new data to write into the SPMI registers
+ * @buf_save:	old data in the registers
+ * @len:	number of bytes to write
+ *
+ * This function checks for unchanged register values between buf and buf_save
+ * starting at both ends of buf.  Only the contiguous subset in the middle of
+ * buf starting and ending with new values is sent.
+ *
+ * Consider the following example:
+ * buf offset: 0 1 2 3 4 5 6 7
+ * reg state:  U U C C U C U U
+ * (U = unchanged, C = changed)
+ * In this example registers 2 through 5 will be written with a single
+ * transaction.
+ */
+static inline int qpnp_vreg_write_optimized(struct qpnp_regulator *vreg,
+		u16 addr, u8 *buf, u8 *buf_save, int len)
+{
+	int i, rc, start, end;
+
+	for (i = 0; i < len; i++)
+		if (buf[i] != buf_save[i])
+			break;
+	start = i;
+
+	for (i = len - 1; i >= 0; i--)
+		if (buf[i] != buf_save[i])
+			break;
+	end = i;
+
+	if (start > end) {
+		/* No modified register values present. */
+		return 0;
+	}
+
+	rc = qpnp_vreg_write(vreg, addr + start, &buf[start], end - start + 1);
+	if (!rc)
+		for (i = start; i <= end; i++)
+			buf_save[i] = buf[i];
+
+	return rc;
+}
+
+/*
+ * Perform a masked write to a PMIC register only if the new value differs
+ * from the last value written to the register.  This removes redundant
+ * register writing.
+ */
+static int qpnp_vreg_masked_write(struct qpnp_regulator *vreg, u16 addr, u8 val,
+		u8 mask, u8 *reg_save)
+{
+	int rc = 0;
+	u8 reg;
+
+	reg = (*reg_save & ~mask) | (val & mask);
+	if (reg != *reg_save) {
+		rc = qpnp_vreg_write(vreg, addr, &reg, 1);
+
+		if (rc) {
+			vreg_err(vreg, "write failed; addr=0x%03X, rc=%d\n",
+				addr, rc);
+		} else {
+			*reg_save = reg;
+		}
+	}
+
+	return rc;
+}
+
+/*
+ * Perform a masked read-modify-write to a PMIC register only if the new value
+ * differs from the value currently in the register.  This removes redundant
+ * register writing.
+ */
+static int qpnp_vreg_masked_read_write(struct qpnp_regulator *vreg, u16 addr,
+		u8 val, u8 mask)
+{
+	int rc;
+	u8 reg;
+
+	rc = qpnp_vreg_read(vreg, addr, &reg, 1);
+	if (rc) {
+		vreg_err(vreg, "read failed; addr=0x%03X, rc=%d\n", addr, rc);
+		return rc;
+	}
+
+	return qpnp_vreg_masked_write(vreg, addr, val, mask, &reg);
+}
+
+static int qpnp_regulator_common_is_enabled(struct regulator_dev *rdev)
+{
+	struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+
+	return (vreg->ctrl_reg[QPNP_COMMON_IDX_ENABLE]
+		& QPNP_COMMON_ENABLE_MASK)
+			== QPNP_COMMON_ENABLE;
+}
+
+static int qpnp_regulator_common_enable(struct regulator_dev *rdev)
+{
+	struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	rc = qpnp_vreg_masked_write(vreg, QPNP_COMMON_REG_ENABLE,
+		QPNP_COMMON_ENABLE, QPNP_COMMON_ENABLE_MASK,
+		&vreg->ctrl_reg[QPNP_COMMON_IDX_ENABLE]);
+
+	if (rc)
+		vreg_err(vreg, "qpnp_vreg_masked_write failed, rc=%d\n", rc);
+	else
+		qpnp_vreg_show_state(rdev, QPNP_REGULATOR_ACTION_ENABLE);
+
+	return rc;
+}
+
+static int qpnp_regulator_vs_enable(struct regulator_dev *rdev)
+{
+	struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+
+	if (vreg->ocp_irq) {
+		vreg->ocp_count = 0;
+		vreg->vs_enable_time = ktime_get();
+	}
+
+	return qpnp_regulator_common_enable(rdev);
+}
+
+static int qpnp_regulator_common_disable(struct regulator_dev *rdev)
+{
+	struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	rc = qpnp_vreg_masked_write(vreg, QPNP_COMMON_REG_ENABLE,
+		QPNP_COMMON_DISABLE, QPNP_COMMON_ENABLE_MASK,
+		&vreg->ctrl_reg[QPNP_COMMON_IDX_ENABLE]);
+
+	if (rc)
+		vreg_err(vreg, "qpnp_vreg_masked_write failed, rc=%d\n", rc);
+	else
+		qpnp_vreg_show_state(rdev, QPNP_REGULATOR_ACTION_DISABLE);
+
+	return rc;
+}
+
+/*
+ * Returns 1 if the voltage can be set in the current range, 0 if the voltage
+ * cannot be set in the current range, or errno if an error occurred.
+ */
+static int qpnp_regulator_select_voltage_same_range(struct qpnp_regulator *vreg,
+		int min_uV, int max_uV, int *range_sel, int *voltage_sel,
+		unsigned int *selector)
+{
+	struct qpnp_voltage_range *range = NULL;
+	int uV = min_uV;
+	int i;
+
+	*range_sel = vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_RANGE];
+
+	for (i = 0; i < vreg->set_points->count; i++) {
+		if (vreg->set_points->range[i].range_sel == *range_sel) {
+			range = &vreg->set_points->range[i];
+			break;
+		}
+	}
+
+	if (!range) {
+		/* Unknown range */
+		return 0;
+	}
+
+	if (uV < range->min_uV && max_uV >= range->min_uV)
+		uV = range->min_uV;
+
+	if (uV < range->min_uV || uV > range->max_uV) {
+		/* Current range doesn't support the requested voltage. */
+		return 0;
+	}
+
+	/*
+	 * Force uV to be an allowed set point by applying a ceiling function to
+	 * the uV value.
+	 */
+	*voltage_sel = DIV_ROUND_UP(uV - range->min_uV, range->step_uV);
+	uV = *voltage_sel * range->step_uV + range->min_uV;
+
+	if (uV > max_uV) {
+		/*
+		 * No set point in the current voltage range is within the
+		 * requested min_uV to max_uV range.
+		 */
+		return 0;
+	}
+
+	*selector = 0;
+	for (i = 0; i < vreg->set_points->count; i++) {
+		if (uV >= vreg->set_points->range[i].set_point_min_uV
+		    && uV <= vreg->set_points->range[i].set_point_max_uV) {
+			*selector +=
+			    (uV - vreg->set_points->range[i].set_point_min_uV)
+				/ vreg->set_points->range[i].step_uV;
+			break;
+		}
+
+		*selector += vreg->set_points->range[i].n_voltages;
+	}
+
+	if (*selector >= vreg->set_points->n_voltages)
+		return 0;
+
+	return 1;
+}
+
+static int qpnp_regulator_select_voltage(struct qpnp_regulator *vreg,
+		int min_uV, int max_uV, int *range_sel, int *voltage_sel,
+		unsigned int *selector)
+{
+	struct qpnp_voltage_range *range;
+	int uV = min_uV;
+	int lim_min_uV, lim_max_uV, i, range_id, range_max_uV;
+
+	/* Check if request voltage is outside of physically settable range. */
+	lim_min_uV = vreg->set_points->range[0].set_point_min_uV;
+	lim_max_uV =
+	  vreg->set_points->range[vreg->set_points->count - 1].set_point_max_uV;
+
+	if (uV < lim_min_uV && max_uV >= lim_min_uV)
+		uV = lim_min_uV;
+
+	if (uV < lim_min_uV || uV > lim_max_uV) {
+		vreg_err(vreg,
+			"request v=[%d, %d] is outside possible v=[%d, %d]\n",
+			 min_uV, max_uV, lim_min_uV, lim_max_uV);
+		return -EINVAL;
+	}
+
+	/* Find the range which uV is inside of. */
+	for (i = vreg->set_points->count - 1; i > 0; i--) {
+		range_max_uV = vreg->set_points->range[i - 1].set_point_max_uV;
+		if (uV > range_max_uV && range_max_uV > 0)
+			break;
+	}
+
+	range_id = i;
+	range = &vreg->set_points->range[range_id];
+	*range_sel = range->range_sel;
+
+	/*
+	 * Force uV to be an allowed set point by applying a ceiling function to
+	 * the uV value.
+	 */
+	*voltage_sel = (uV - range->min_uV + range->step_uV - 1)
+			/ range->step_uV;
+	uV = *voltage_sel * range->step_uV + range->min_uV;
+
+	if (uV > max_uV) {
+		vreg_err(vreg,
+			"request v=[%d, %d] cannot be met by any set point; "
+			"next set point: %d\n",
+			min_uV, max_uV, uV);
+		return -EINVAL;
+	}
+
+	*selector = 0;
+	for (i = 0; i < range_id; i++)
+		*selector += vreg->set_points->range[i].n_voltages;
+	*selector += (uV - range->set_point_min_uV) / range->step_uV;
+
+	return 0;
+}
+
+static int qpnp_regulator_delay_for_slewing(struct qpnp_regulator *vreg,
+		int prev_voltage)
+{
+	int current_voltage;
+
+	/* Delay for voltage slewing if a step rate is specified. */
+	if (vreg->slew_rate && vreg->rdesc.ops->get_voltage) {
+		current_voltage = vreg->rdesc.ops->get_voltage(vreg->rdev);
+		if (current_voltage < 0) {
+			vreg_err(vreg, "could not get new voltage, rc=%d\n",
+				current_voltage);
+			return current_voltage;
+		}
+
+		udelay(DIV_ROUND_UP(abs(current_voltage - prev_voltage),
+					vreg->slew_rate));
+	}
+
+	return 0;
+}
+
+static int qpnp_regulator_common_set_voltage(struct regulator_dev *rdev,
+		int min_uV, int max_uV, unsigned int *selector)
+{
+	struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+	int rc, range_sel, voltage_sel, voltage_old = 0;
+	u8 buf[2];
+
+	if (vreg->slew_rate && vreg->rdesc.ops->get_voltage) {
+		voltage_old = vreg->rdesc.ops->get_voltage(rdev);
+		if (voltage_old < 0) {
+			vreg_err(vreg, "could not get current voltage, rc=%d\n",
+				voltage_old);
+			return voltage_old;
+		}
+	}
+
+	/*
+	 * Favor staying in the current voltage range if possible.  This avoids
+	 * voltage spikes that occur when changing the voltage range.
+	 */
+	rc = qpnp_regulator_select_voltage_same_range(vreg, min_uV, max_uV,
+		&range_sel, &voltage_sel, selector);
+	if (rc == 0)
+		rc = qpnp_regulator_select_voltage(vreg, min_uV, max_uV,
+			&range_sel, &voltage_sel, selector);
+	if (rc < 0) {
+		vreg_err(vreg, "could not set voltage, rc=%d\n", rc);
+		return rc;
+	}
+
+	buf[0] = range_sel;
+	buf[1] = voltage_sel;
+	if ((vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_RANGE] != range_sel)
+	    && (vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_SET] == voltage_sel)) {
+		/* Handle latched range change. */
+		rc = qpnp_vreg_write(vreg, QPNP_COMMON_REG_VOLTAGE_RANGE,
+				buf, 2);
+		if (!rc) {
+			vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_RANGE] = buf[0];
+			vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_SET] = buf[1];
+		}
+	} else {
+		/* Either write can be optimized away safely. */
+		rc = qpnp_vreg_write_optimized(vreg,
+			QPNP_COMMON_REG_VOLTAGE_RANGE, buf,
+			&vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_RANGE], 2);
+	}
+
+	if (rc) {
+		vreg_err(vreg, "SPMI write failed, rc=%d\n", rc);
+	} else {
+		rc = qpnp_regulator_delay_for_slewing(vreg, voltage_old);
+		if (rc)
+			return rc;
+
+		qpnp_vreg_show_state(rdev, QPNP_REGULATOR_ACTION_VOLTAGE);
+	}
+
+	return rc;
+}
+
+static int qpnp_regulator_common_get_voltage(struct regulator_dev *rdev)
+{
+	struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+	struct qpnp_voltage_range *range = NULL;
+	int range_sel, voltage_sel, i;
+
+	range_sel = vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_RANGE];
+	voltage_sel = vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_SET];
+
+	for (i = 0; i < vreg->set_points->count; i++) {
+		if (vreg->set_points->range[i].range_sel == range_sel) {
+			range = &vreg->set_points->range[i];
+			break;
+		}
+	}
+
+	if (!range) {
+		vreg_err(vreg, "voltage unknown, range %d is invalid\n",
+			range_sel);
+		return VOLTAGE_UNKNOWN;
+	}
+
+	return range->step_uV * voltage_sel + range->min_uV;
+}
+
+static int qpnp_regulator_single_range_set_voltage(struct regulator_dev *rdev,
+		int min_uV, int max_uV, unsigned int *selector)
+{
+	struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+	int rc, range_sel, voltage_sel;
+
+	rc = qpnp_regulator_select_voltage(vreg, min_uV, max_uV, &range_sel,
+		&voltage_sel, selector);
+	if (rc) {
+		vreg_err(vreg, "could not set voltage, rc=%d\n", rc);
+		return rc;
+	}
+
+	/*
+	 * Certain types of regulators do not have a range select register so
+	 * only voltage set register needs to be written.
+	 */
+	rc = qpnp_vreg_masked_write(vreg, QPNP_COMMON_REG_VOLTAGE_SET,
+	       voltage_sel, 0xFF, &vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_SET]);
+
+	if (rc)
+		vreg_err(vreg, "SPMI write failed, rc=%d\n", rc);
+	else
+		qpnp_vreg_show_state(rdev, QPNP_REGULATOR_ACTION_VOLTAGE);
+
+	return rc;
+}
+
+static int qpnp_regulator_single_range_get_voltage(struct regulator_dev *rdev)
+{
+	struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+	struct qpnp_voltage_range *range = &vreg->set_points->range[0];
+	int voltage_sel = vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_SET];
+
+	return range->step_uV * voltage_sel + range->min_uV;
+}
+
+static int qpnp_regulator_ult_lo_smps_set_voltage(struct regulator_dev *rdev,
+		int min_uV, int max_uV, unsigned int *selector)
+{
+	struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+	int rc, range_sel, voltage_sel;
+
+	/*
+	 * Favor staying in the current voltage range if possible. This avoids
+	 * voltage spikes that occur when changing the voltage range.
+	 */
+	rc = qpnp_regulator_select_voltage_same_range(vreg, min_uV, max_uV,
+		&range_sel, &voltage_sel, selector);
+	if (rc == 0)
+		rc = qpnp_regulator_select_voltage(vreg, min_uV, max_uV,
+			&range_sel, &voltage_sel, selector);
+	if (rc < 0) {
+		vreg_err(vreg, "could not set voltage, rc=%d\n", rc);
+		return rc;
+	}
+
+	/*
+	 * Calculate VSET based on range
+	 * In case of range 0: voltage_sel is a 7 bit value, can be written
+	 *			witout any modification.
+	 * In case of range 1: voltage_sel is a 5 bit value, bits[7-5] set to
+	 *			[011].
+	 */
+	if (range_sel == 1)
+		voltage_sel |= ULT_SMPS_RANGE_SPLIT;
+
+	rc = qpnp_vreg_masked_write(vreg, QPNP_COMMON_REG_VOLTAGE_SET,
+	       voltage_sel, 0xFF, &vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_SET]);
+	if (rc) {
+		vreg_err(vreg, "SPMI write failed, rc=%d\n", rc);
+	} else {
+		vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_RANGE] = range_sel;
+		qpnp_vreg_show_state(rdev, QPNP_REGULATOR_ACTION_VOLTAGE);
+	}
+
+	return rc;
+}
+
+static int qpnp_regulator_ult_lo_smps_get_voltage(struct regulator_dev *rdev)
+{
+	struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+	struct qpnp_voltage_range *range = NULL;
+	int range_sel, voltage_sel, i;
+
+	range_sel = vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_RANGE];
+	voltage_sel = vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_SET];
+
+	for (i = 0; i < vreg->set_points->count; i++) {
+		if (vreg->set_points->range[i].range_sel == range_sel) {
+			range = &vreg->set_points->range[i];
+			break;
+		}
+	}
+
+	if (!range) {
+		vreg_err(vreg, "voltage unknown, range %d is invalid\n",
+			range_sel);
+		return VOLTAGE_UNKNOWN;
+	}
+
+	if (range_sel == 1)
+		voltage_sel &= ~ULT_SMPS_RANGE_SPLIT;
+
+	return range->step_uV * voltage_sel + range->min_uV;
+}
+
+static int qpnp_regulator_common_list_voltage(struct regulator_dev *rdev,
+			unsigned int selector)
+{
+	struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+	int uV = 0;
+	int i;
+
+	if (selector >= vreg->set_points->n_voltages)
+		return 0;
+
+	for (i = 0; i < vreg->set_points->count; i++) {
+		if (selector < vreg->set_points->range[i].n_voltages) {
+			uV = selector * vreg->set_points->range[i].step_uV
+				+ vreg->set_points->range[i].set_point_min_uV;
+			break;
+		}
+
+		selector -= vreg->set_points->range[i].n_voltages;
+	}
+
+	return uV;
+}
+
+static int qpnp_regulator_common2_set_voltage(struct regulator_dev *rdev,
+		int min_uV, int max_uV, unsigned int *selector)
+{
+	struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+	int rc, range_sel, voltage_sel, voltage_old = 0;
+	int voltage_uV, voltage_mV;
+	u8 buf[2];
+
+	if (vreg->slew_rate && vreg->rdesc.ops->get_voltage) {
+		voltage_old = vreg->rdesc.ops->get_voltage(rdev);
+		if (voltage_old < 0) {
+			vreg_err(vreg, "could not get current voltage, rc=%d\n",
+				voltage_old);
+			return voltage_old;
+		}
+	}
+
+	rc = qpnp_regulator_select_voltage(vreg, min_uV, max_uV, &range_sel,
+					   &voltage_sel, selector);
+	if (rc < 0) {
+		vreg_err(vreg, "could not set voltage, rc=%d\n", rc);
+		return rc;
+	}
+
+	voltage_uV = qpnp_regulator_common_list_voltage(rdev, *selector);
+	voltage_mV = voltage_uV / 1000;
+	buf[0] = voltage_mV & 0xFF;
+	buf[1] = (voltage_mV >> 8) & 0xFF;
+
+	if (vreg->ctrl_reg[QPNP_COMMON2_IDX_VOLTAGE_LSB] != buf[0]
+	    || vreg->ctrl_reg[QPNP_COMMON2_IDX_VOLTAGE_MSB] != buf[1]) {
+		/* MSB must always be written even if it is unchanged. */
+		rc = qpnp_vreg_write(vreg, QPNP_COMMON2_REG_VOLTAGE_LSB,
+				     buf, 2);
+		if (rc) {
+			vreg_err(vreg, "SPMI write failed, rc=%d\n", rc);
+			return rc;
+		}
+
+		vreg->ctrl_reg[QPNP_COMMON2_IDX_VOLTAGE_LSB] = buf[0];
+		vreg->ctrl_reg[QPNP_COMMON2_IDX_VOLTAGE_MSB] = buf[1];
+
+		rc = qpnp_regulator_delay_for_slewing(vreg, voltage_old);
+		if (rc)
+			return rc;
+
+		qpnp_vreg_show_state(rdev, QPNP_REGULATOR_ACTION_VOLTAGE);
+	}
+
+	return rc;
+}
+
+static int qpnp_regulator_common2_get_voltage(struct regulator_dev *rdev)
+{
+	struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+
+	return (((int)vreg->ctrl_reg[QPNP_COMMON2_IDX_VOLTAGE_MSB] << 8)
+		| (int)vreg->ctrl_reg[QPNP_COMMON2_IDX_VOLTAGE_LSB]) * 1000;
+}
+
+static unsigned int qpnp_regulator_common_get_mode(struct regulator_dev *rdev)
+{
+	struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+
+	return (vreg->ctrl_reg[QPNP_COMMON_IDX_MODE]
+		& QPNP_COMMON_MODE_HPM_MASK)
+			? REGULATOR_MODE_NORMAL : REGULATOR_MODE_IDLE;
+}
+
+static int qpnp_regulator_common_set_mode(struct regulator_dev *rdev,
+					unsigned int mode)
+{
+	struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+	int rc = 0;
+	u8 val;
+
+	if (mode != REGULATOR_MODE_NORMAL && mode != REGULATOR_MODE_IDLE) {
+		vreg_err(vreg, "invalid mode: %u\n", mode);
+		return -EINVAL;
+	}
+
+	val = (mode == REGULATOR_MODE_NORMAL ? QPNP_COMMON_MODE_HPM_MASK : 0);
+
+	rc = qpnp_vreg_masked_write(vreg, QPNP_COMMON_REG_MODE, val,
+		QPNP_COMMON_MODE_HPM_MASK,
+		&vreg->ctrl_reg[QPNP_COMMON_IDX_MODE]);
+
+	if (rc)
+		vreg_err(vreg, "SPMI write failed, rc=%d\n", rc);
+	else
+		qpnp_vreg_show_state(rdev, QPNP_REGULATOR_ACTION_MODE);
+
+	return rc;
+}
+
+static unsigned int qpnp_regulator_common_get_optimum_mode(
+		struct regulator_dev *rdev, int input_uV, int output_uV,
+		int load_uA)
+{
+	struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+	unsigned int mode;
+
+	if (load_uA + vreg->system_load >= vreg->hpm_min_load)
+		mode = REGULATOR_MODE_NORMAL;
+	else
+		mode = REGULATOR_MODE_IDLE;
+
+	return mode;
+}
+
+static unsigned int qpnp_regulator_common2_get_mode(struct regulator_dev *rdev)
+{
+	struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+
+	return vreg->ctrl_reg[QPNP_COMMON2_IDX_MODE] == QPNP_COMMON2_MODE_HPM
+		? REGULATOR_MODE_NORMAL : REGULATOR_MODE_IDLE;
+}
+
+static int qpnp_regulator_common2_set_mode(struct regulator_dev *rdev,
+					unsigned int mode)
+{
+	struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+	int rc = 0;
+	u8 val = QPNP_COMMON2_MODE_HPM;
+
+	if (mode != REGULATOR_MODE_NORMAL && mode != REGULATOR_MODE_IDLE) {
+		vreg_err(vreg, "invalid mode: %u\n", mode);
+		return -EINVAL;
+	}
+
+	/*
+	 * Use init_mode as the low power mode unless it is equal to HPM.  This
+	 * ensures that AUTO mode is re-asserted after switching away from
+	 * forced HPM if it was configured initially.
+	 */
+	if (mode == REGULATOR_MODE_NORMAL)
+		val = QPNP_COMMON2_MODE_HPM;
+	else if (vreg->init_mode == QPNP_COMMON2_MODE_HPM)
+		val = QPNP_COMMON2_MODE_LPM;
+	else
+		val = vreg->init_mode;
+
+	rc = qpnp_vreg_write_optimized(vreg, QPNP_COMMON2_REG_MODE, &val,
+				&vreg->ctrl_reg[QPNP_COMMON2_IDX_MODE], 1);
+	if (rc)
+		vreg_err(vreg, "SPMI write failed, rc=%d\n", rc);
+	else
+		qpnp_vreg_show_state(rdev, QPNP_REGULATOR_ACTION_MODE);
+
+	return rc;
+}
+
+static int qpnp_regulator_common_enable_time(struct regulator_dev *rdev)
+{
+	struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+
+	return vreg->enable_time;
+}
+
+static int qpnp_regulator_vs_clear_ocp(struct qpnp_regulator *vreg)
+{
+	int rc;
+
+	rc = qpnp_vreg_masked_write(vreg, QPNP_COMMON_REG_ENABLE,
+		QPNP_COMMON_DISABLE, QPNP_COMMON_ENABLE_MASK,
+		&vreg->ctrl_reg[QPNP_COMMON_IDX_ENABLE]);
+	if (rc)
+		vreg_err(vreg, "qpnp_vreg_masked_write failed, rc=%d\n", rc);
+
+	vreg->vs_enable_time = ktime_get();
+
+	rc = qpnp_vreg_masked_write(vreg, QPNP_COMMON_REG_ENABLE,
+		QPNP_COMMON_ENABLE, QPNP_COMMON_ENABLE_MASK,
+		&vreg->ctrl_reg[QPNP_COMMON_IDX_ENABLE]);
+	if (rc)
+		vreg_err(vreg, "qpnp_vreg_masked_write failed, rc=%d\n", rc);
+
+	if (qpnp_vreg_debug_mask & QPNP_VREG_DEBUG_OCP) {
+		pr_info("%s: switch state toggled after OCP event\n",
+			vreg->rdesc.name);
+	}
+
+	return rc;
+}
+
+static void qpnp_regulator_vs_ocp_work(struct work_struct *work)
+{
+	struct delayed_work *dwork
+		= container_of(work, struct delayed_work, work);
+	struct qpnp_regulator *vreg
+		= container_of(dwork, struct qpnp_regulator, ocp_work);
+
+	qpnp_regulator_vs_clear_ocp(vreg);
+}
+
+static irqreturn_t qpnp_regulator_vs_ocp_isr(int irq, void *data)
+{
+	struct qpnp_regulator *vreg = data;
+	ktime_t ocp_irq_time;
+	s64 ocp_trigger_delay_us;
+
+	ocp_irq_time = ktime_get();
+	ocp_trigger_delay_us = ktime_us_delta(ocp_irq_time,
+						vreg->vs_enable_time);
+
+	/*
+	 * Reset the OCP count if there is a large delay between switch enable
+	 * and when OCP triggers.  This is indicative of a hotplug event as
+	 * opposed to a fault.
+	 */
+	if (ocp_trigger_delay_us > QPNP_VS_OCP_FAULT_DELAY_US)
+		vreg->ocp_count = 0;
+
+	/* Wait for switch output to settle back to 0 V after OCP triggered. */
+	udelay(QPNP_VS_OCP_FALL_DELAY_US);
+
+	vreg->ocp_count++;
+
+	if (qpnp_vreg_debug_mask & QPNP_VREG_DEBUG_OCP) {
+		pr_info("%s: VS OCP triggered, count = %d, delay = %lld us\n",
+			vreg->rdesc.name, vreg->ocp_count,
+			ocp_trigger_delay_us);
+	}
+
+	if (vreg->ocp_count == 1) {
+		/* Immediately clear the over current condition. */
+		qpnp_regulator_vs_clear_ocp(vreg);
+	} else if (vreg->ocp_count <= vreg->ocp_max_retries) {
+		/* Schedule the over current clear task to run later. */
+		schedule_delayed_work(&vreg->ocp_work,
+			msecs_to_jiffies(vreg->ocp_retry_delay_ms) + 1);
+	} else {
+		vreg_err(vreg, "OCP triggered %d times; no further retries\n",
+			vreg->ocp_count);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static const char * const qpnp_print_actions[] = {
+	[QPNP_REGULATOR_ACTION_INIT]	= "initial    ",
+	[QPNP_REGULATOR_ACTION_ENABLE]	= "enable     ",
+	[QPNP_REGULATOR_ACTION_DISABLE]	= "disable    ",
+	[QPNP_REGULATOR_ACTION_VOLTAGE]	= "set voltage",
+	[QPNP_REGULATOR_ACTION_MODE]	= "set mode   ",
+};
+
+static const char * const qpnp_common2_mode_label[] = {
+	[0]				= "RSV",
+	[1]				= "RSV",
+	[2]				= "RSV",
+	[QPNP_COMMON2_MODE_BYPASS]	= "BYP",
+	[QPNP_COMMON2_MODE_RETENTION]	= "RET",
+	[QPNP_COMMON2_MODE_LPM]		= "LPM",
+	[QPNP_COMMON2_MODE_AUTO]	= "AUTO",
+	[QPNP_COMMON2_MODE_HPM]		= "HPM",
+};
+
+static void qpnp_vreg_show_state(struct regulator_dev *rdev,
+				   enum qpnp_regulator_action action)
+{
+	struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+	const char *action_label = qpnp_print_actions[action];
+	unsigned int mode = 0;
+	int uV = 0;
+	const char *mode_label = "";
+	enum qpnp_regulator_logical_type type;
+	const char *enable_label = "";
+	char pc_enable_label[5] = {'\0'};
+	char pc_mode_label[8] = {'\0'};
+	bool show_req, show_dupe, show_init, has_changed;
+	u8 en_reg, mode_reg;
+
+	/* Do not print unless appropriate flags are set. */
+	show_req = qpnp_vreg_debug_mask & QPNP_VREG_DEBUG_REQUEST;
+	show_dupe = qpnp_vreg_debug_mask & QPNP_VREG_DEBUG_DUPLICATE;
+	show_init = qpnp_vreg_debug_mask & QPNP_VREG_DEBUG_INIT;
+	has_changed = vreg->write_count != vreg->prev_write_count;
+	if (!((show_init && action == QPNP_REGULATOR_ACTION_INIT)
+	      || (show_req && (has_changed || show_dupe)))) {
+		return;
+	}
+
+	vreg->prev_write_count = vreg->write_count;
+
+	type = vreg->logical_type;
+
+	if (vreg->rdesc.ops->is_enabled)
+		enable_label = vreg->rdesc.ops->is_enabled(rdev)
+				? "on " : "off";
+
+	if (vreg->rdesc.ops->get_voltage)
+		uV = vreg->rdesc.ops->get_voltage(rdev);
+
+	if (vreg->rdesc.ops->get_mode) {
+		mode = vreg->rdesc.ops->get_mode(rdev);
+		mode_label = mode == REGULATOR_MODE_NORMAL ? "HPM" : "LPM";
+	}
+
+	if (type == QPNP_REGULATOR_LOGICAL_TYPE_SMPS
+	    || type == QPNP_REGULATOR_LOGICAL_TYPE_LDO
+	    || type == QPNP_REGULATOR_LOGICAL_TYPE_VS) {
+		en_reg = vreg->ctrl_reg[QPNP_COMMON_IDX_ENABLE];
+		pc_enable_label[0] =
+		     en_reg & QPNP_COMMON_ENABLE_FOLLOW_HW_EN3_MASK ? '3' : '_';
+		pc_enable_label[1] =
+		     en_reg & QPNP_COMMON_ENABLE_FOLLOW_HW_EN2_MASK ? '2' : '_';
+		pc_enable_label[2] =
+		     en_reg & QPNP_COMMON_ENABLE_FOLLOW_HW_EN1_MASK ? '1' : '_';
+		pc_enable_label[3] =
+		     en_reg & QPNP_COMMON_ENABLE_FOLLOW_HW_EN0_MASK ? '0' : '_';
+	}
+
+	switch (type) {
+	case QPNP_REGULATOR_LOGICAL_TYPE_SMPS:
+		mode_reg = vreg->ctrl_reg[QPNP_COMMON_IDX_MODE];
+		pc_mode_label[0] =
+		     mode_reg & QPNP_COMMON_MODE_AUTO_MASK          ? 'A' : '_';
+		pc_mode_label[1] =
+		     mode_reg & QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK  ? 'W' : '_';
+		pc_mode_label[2] =
+		     mode_reg & QPNP_COMMON_MODE_FOLLOW_HW_EN3_MASK ? '3' : '_';
+		pc_mode_label[3] =
+		     mode_reg & QPNP_COMMON_MODE_FOLLOW_HW_EN2_MASK ? '2' : '_';
+		pc_mode_label[4] =
+		     mode_reg & QPNP_COMMON_MODE_FOLLOW_HW_EN1_MASK ? '1' : '_';
+		pc_mode_label[5] =
+		     mode_reg & QPNP_COMMON_MODE_FOLLOW_HW_EN0_MASK ? '0' : '_';
+
+		pr_info("%s %-11s: %s, v=%7d uV, mode=%s, pc_en=%s, alt_mode=%s\n",
+			action_label, vreg->rdesc.name, enable_label, uV,
+			mode_label, pc_enable_label, pc_mode_label);
+		break;
+	case QPNP_REGULATOR_LOGICAL_TYPE_LDO:
+		mode_reg = vreg->ctrl_reg[QPNP_COMMON_IDX_MODE];
+		pc_mode_label[0] =
+		     mode_reg & QPNP_COMMON_MODE_AUTO_MASK          ? 'A' : '_';
+		pc_mode_label[1] =
+		     mode_reg & QPNP_COMMON_MODE_BYPASS_MASK        ? 'B' : '_';
+		pc_mode_label[2] =
+		     mode_reg & QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK  ? 'W' : '_';
+		pc_mode_label[3] =
+		     mode_reg & QPNP_COMMON_MODE_FOLLOW_HW_EN3_MASK ? '3' : '_';
+		pc_mode_label[4] =
+		     mode_reg & QPNP_COMMON_MODE_FOLLOW_HW_EN2_MASK ? '2' : '_';
+		pc_mode_label[5] =
+		     mode_reg & QPNP_COMMON_MODE_FOLLOW_HW_EN1_MASK ? '1' : '_';
+		pc_mode_label[6] =
+		     mode_reg & QPNP_COMMON_MODE_FOLLOW_HW_EN0_MASK ? '0' : '_';
+
+		pr_info("%s %-11s: %s, v=%7d uV, mode=%s, pc_en=%s, alt_mode=%s\n",
+			action_label, vreg->rdesc.name, enable_label, uV,
+			mode_label, pc_enable_label, pc_mode_label);
+		break;
+	case QPNP_REGULATOR_LOGICAL_TYPE_LN_LDO:
+		mode_reg = vreg->ctrl_reg[QPNP_COMMON_IDX_MODE];
+		pc_mode_label[0] =
+		     mode_reg & QPNP_COMMON_MODE_BYPASS_MASK ? 'B' : '_';
+
+		pr_info("%s %-11s: %s, v=%7d uV, alt_mode=%s\n",
+			action_label, vreg->rdesc.name, enable_label, uV,
+			pc_mode_label);
+		break;
+	case QPNP_REGULATOR_LOGICAL_TYPE_VS:
+		mode_reg = vreg->ctrl_reg[QPNP_COMMON_IDX_MODE];
+		pc_mode_label[0] =
+		     mode_reg & QPNP_COMMON_MODE_AUTO_MASK          ? 'A' : '_';
+		pc_mode_label[1] =
+		     mode_reg & QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK  ? 'W' : '_';
+
+		pr_info("%s %-11s: %s, mode=%s, pc_en=%s, alt_mode=%s\n",
+			action_label, vreg->rdesc.name, enable_label,
+			mode_label, pc_enable_label, pc_mode_label);
+		break;
+	case QPNP_REGULATOR_LOGICAL_TYPE_BOOST:
+		pr_info("%s %-11s: %s, v=%7d uV\n",
+			action_label, vreg->rdesc.name, enable_label, uV);
+		break;
+	case QPNP_REGULATOR_LOGICAL_TYPE_BOOST_BYP:
+		pr_info("%s %-11s: %s, v=%7d uV\n",
+			action_label, vreg->rdesc.name, enable_label, uV);
+		break;
+	case QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS:
+		mode_reg = vreg->ctrl_reg[QPNP_COMMON_IDX_MODE];
+		pc_mode_label[0] =
+		     mode_reg & QPNP_COMMON_MODE_AUTO_MASK          ? 'A' : '_';
+
+		pr_info("%s %-11s: %s, v=%7d uV, mode=%s, alt_mode=%s\n",
+			action_label, vreg->rdesc.name, enable_label, uV,
+			mode_label, pc_mode_label);
+		break;
+	case QPNP_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS:
+	case QPNP_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS:
+		mode_reg = vreg->ctrl_reg[QPNP_COMMON_IDX_MODE];
+		pc_mode_label[0] =
+		     mode_reg & QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK  ? 'W' : '_';
+		pr_info("%s %-11s: %s, v=%7d uV, mode=%s, alt_mode=%s\n",
+			action_label, vreg->rdesc.name, enable_label, uV,
+			mode_label, pc_mode_label);
+		break;
+	case QPNP_REGULATOR_LOGICAL_TYPE_ULT_LDO:
+		mode_reg = vreg->ctrl_reg[QPNP_COMMON_IDX_MODE];
+		pc_mode_label[0] =
+		     mode_reg & QPNP_COMMON_MODE_BYPASS_MASK        ? 'B' : '_';
+		pc_mode_label[1] =
+		     mode_reg & QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK  ? 'W' : '_';
+		pr_info("%s %-11s: %s, v=%7d uV, mode=%s, alt_mode=%s\n",
+			action_label, vreg->rdesc.name, enable_label, uV,
+			mode_label, pc_mode_label);
+		break;
+	case QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS2:
+		mode_reg = vreg->ctrl_reg[QPNP_COMMON_IDX_MODE];
+		mode_label = qpnp_common2_mode_label[mode_reg
+						     & QPNP_COMMON2_MODE_MASK];
+		pr_info("%s %-11s: %s, v=%7d uV, mode=%s\n",
+			action_label, vreg->rdesc.name, enable_label, uV,
+			mode_label);
+		break;
+	default:
+		break;
+	}
+}
+
+static struct regulator_ops qpnp_smps_ops = {
+	.enable			= qpnp_regulator_common_enable,
+	.disable		= qpnp_regulator_common_disable,
+	.is_enabled		= qpnp_regulator_common_is_enabled,
+	.set_voltage		= qpnp_regulator_common_set_voltage,
+	.get_voltage		= qpnp_regulator_common_get_voltage,
+	.list_voltage		= qpnp_regulator_common_list_voltage,
+	.set_mode		= qpnp_regulator_common_set_mode,
+	.get_mode		= qpnp_regulator_common_get_mode,
+	.get_optimum_mode	= qpnp_regulator_common_get_optimum_mode,
+	.enable_time		= qpnp_regulator_common_enable_time,
+};
+
+static struct regulator_ops qpnp_ldo_ops = {
+	.enable			= qpnp_regulator_common_enable,
+	.disable		= qpnp_regulator_common_disable,
+	.is_enabled		= qpnp_regulator_common_is_enabled,
+	.set_voltage		= qpnp_regulator_common_set_voltage,
+	.get_voltage		= qpnp_regulator_common_get_voltage,
+	.list_voltage		= qpnp_regulator_common_list_voltage,
+	.set_mode		= qpnp_regulator_common_set_mode,
+	.get_mode		= qpnp_regulator_common_get_mode,
+	.get_optimum_mode	= qpnp_regulator_common_get_optimum_mode,
+	.enable_time		= qpnp_regulator_common_enable_time,
+};
+
+static struct regulator_ops qpnp_ln_ldo_ops = {
+	.enable			= qpnp_regulator_common_enable,
+	.disable		= qpnp_regulator_common_disable,
+	.is_enabled		= qpnp_regulator_common_is_enabled,
+	.set_voltage		= qpnp_regulator_common_set_voltage,
+	.get_voltage		= qpnp_regulator_common_get_voltage,
+	.list_voltage		= qpnp_regulator_common_list_voltage,
+	.enable_time		= qpnp_regulator_common_enable_time,
+};
+
+static struct regulator_ops qpnp_vs_ops = {
+	.enable			= qpnp_regulator_vs_enable,
+	.disable		= qpnp_regulator_common_disable,
+	.is_enabled		= qpnp_regulator_common_is_enabled,
+	.enable_time		= qpnp_regulator_common_enable_time,
+};
+
+static struct regulator_ops qpnp_boost_ops = {
+	.enable			= qpnp_regulator_common_enable,
+	.disable		= qpnp_regulator_common_disable,
+	.is_enabled		= qpnp_regulator_common_is_enabled,
+	.set_voltage		= qpnp_regulator_single_range_set_voltage,
+	.get_voltage		= qpnp_regulator_single_range_get_voltage,
+	.list_voltage		= qpnp_regulator_common_list_voltage,
+	.enable_time		= qpnp_regulator_common_enable_time,
+};
+
+static struct regulator_ops qpnp_ftsmps_ops = {
+	.enable			= qpnp_regulator_common_enable,
+	.disable		= qpnp_regulator_common_disable,
+	.is_enabled		= qpnp_regulator_common_is_enabled,
+	.set_voltage		= qpnp_regulator_common_set_voltage,
+	.get_voltage		= qpnp_regulator_common_get_voltage,
+	.list_voltage		= qpnp_regulator_common_list_voltage,
+	.set_mode		= qpnp_regulator_common_set_mode,
+	.get_mode		= qpnp_regulator_common_get_mode,
+	.get_optimum_mode	= qpnp_regulator_common_get_optimum_mode,
+	.enable_time		= qpnp_regulator_common_enable_time,
+};
+
+static struct regulator_ops qpnp_ult_lo_smps_ops = {
+	.enable			= qpnp_regulator_common_enable,
+	.disable		= qpnp_regulator_common_disable,
+	.is_enabled		= qpnp_regulator_common_is_enabled,
+	.set_voltage		= qpnp_regulator_ult_lo_smps_set_voltage,
+	.get_voltage		= qpnp_regulator_ult_lo_smps_get_voltage,
+	.list_voltage		= qpnp_regulator_common_list_voltage,
+	.set_mode		= qpnp_regulator_common_set_mode,
+	.get_mode		= qpnp_regulator_common_get_mode,
+	.get_optimum_mode	= qpnp_regulator_common_get_optimum_mode,
+	.enable_time		= qpnp_regulator_common_enable_time,
+};
+
+static struct regulator_ops qpnp_ult_ho_smps_ops = {
+	.enable			= qpnp_regulator_common_enable,
+	.disable		= qpnp_regulator_common_disable,
+	.is_enabled		= qpnp_regulator_common_is_enabled,
+	.set_voltage		= qpnp_regulator_single_range_set_voltage,
+	.get_voltage		= qpnp_regulator_single_range_get_voltage,
+	.list_voltage		= qpnp_regulator_common_list_voltage,
+	.set_mode		= qpnp_regulator_common_set_mode,
+	.get_mode		= qpnp_regulator_common_get_mode,
+	.get_optimum_mode	= qpnp_regulator_common_get_optimum_mode,
+	.enable_time		= qpnp_regulator_common_enable_time,
+};
+
+static struct regulator_ops qpnp_ult_ldo_ops = {
+	.enable			= qpnp_regulator_common_enable,
+	.disable		= qpnp_regulator_common_disable,
+	.is_enabled		= qpnp_regulator_common_is_enabled,
+	.set_voltage		= qpnp_regulator_single_range_set_voltage,
+	.get_voltage		= qpnp_regulator_single_range_get_voltage,
+	.list_voltage		= qpnp_regulator_common_list_voltage,
+	.set_mode		= qpnp_regulator_common_set_mode,
+	.get_mode		= qpnp_regulator_common_get_mode,
+	.get_optimum_mode	= qpnp_regulator_common_get_optimum_mode,
+	.enable_time		= qpnp_regulator_common_enable_time,
+};
+
+static struct regulator_ops qpnp_ftsmps426_ops = {
+	.enable			= qpnp_regulator_common_enable,
+	.disable		= qpnp_regulator_common_disable,
+	.is_enabled		= qpnp_regulator_common_is_enabled,
+	.set_voltage		= qpnp_regulator_common2_set_voltage,
+	.get_voltage		= qpnp_regulator_common2_get_voltage,
+	.list_voltage		= qpnp_regulator_common_list_voltage,
+	.set_mode		= qpnp_regulator_common2_set_mode,
+	.get_mode		= qpnp_regulator_common2_get_mode,
+	.get_optimum_mode	= qpnp_regulator_common_get_optimum_mode,
+	.enable_time		= qpnp_regulator_common_enable_time,
+};
+
+/* Maximum possible digital major revision value */
+#define INF 0xFF
+
+static const struct qpnp_regulator_mapping supported_regulators[] = {
+	/*           type subtype dig_min dig_max ltype ops setpoints hpm_min */
+	QPNP_VREG_MAP(BUCK,  GP_CTL,   0, INF, SMPS,   smps,   smps,   100000),
+	QPNP_VREG_MAP(LDO,   N300,     0, INF, LDO,    ldo,    nldo1,   10000),
+	QPNP_VREG_MAP(LDO,   N600,     0,   0, LDO,    ldo,    nldo2,   10000),
+	QPNP_VREG_MAP(LDO,   N1200,    0,   0, LDO,    ldo,    nldo2,   10000),
+	QPNP_VREG_MAP(LDO,   N600,     1, INF, LDO,    ldo,    nldo3,   10000),
+	QPNP_VREG_MAP(LDO,   N1200,    1, INF, LDO,    ldo,    nldo3,   10000),
+	QPNP_VREG_MAP(LDO,   N600_ST,  0,   0, LDO,    ldo,    nldo2,   10000),
+	QPNP_VREG_MAP(LDO,   N1200_ST, 0,   0, LDO,    ldo,    nldo2,   10000),
+	QPNP_VREG_MAP(LDO,   N600_ST,  1, INF, LDO,    ldo,    nldo3,   10000),
+	QPNP_VREG_MAP(LDO,   N1200_ST, 1, INF, LDO,    ldo,    nldo3,   10000),
+	QPNP_VREG_MAP(LDO,   P50,      0, INF, LDO,    ldo,    pldo,     5000),
+	QPNP_VREG_MAP(LDO,   P150,     0, INF, LDO,    ldo,    pldo,    10000),
+	QPNP_VREG_MAP(LDO,   P300,     0, INF, LDO,    ldo,    pldo,    10000),
+	QPNP_VREG_MAP(LDO,   P600,     0, INF, LDO,    ldo,    pldo,    10000),
+	QPNP_VREG_MAP(LDO,   P1200,    0, INF, LDO,    ldo,    pldo,    10000),
+	QPNP_VREG_MAP(LDO,   LN,       0, INF, LN_LDO, ln_ldo, ln_ldo,      0),
+	QPNP_VREG_MAP(LDO,   LV_P50,   0, INF, LDO,    ldo,    pldo,     5000),
+	QPNP_VREG_MAP(LDO,   LV_P150,  0, INF, LDO,    ldo,    pldo,    10000),
+	QPNP_VREG_MAP(LDO,   LV_P300,  0, INF, LDO,    ldo,    pldo,    10000),
+	QPNP_VREG_MAP(LDO,   LV_P600,  0, INF, LDO,    ldo,    pldo,    10000),
+	QPNP_VREG_MAP(LDO,   LV_P1200, 0, INF, LDO,    ldo,    pldo,    10000),
+	QPNP_VREG_MAP(VS,    LV100,    0, INF, VS,     vs,     none,        0),
+	QPNP_VREG_MAP(VS,    LV300,    0, INF, VS,     vs,     none,        0),
+	QPNP_VREG_MAP(VS,    MV300,    0, INF, VS,     vs,     none,        0),
+	QPNP_VREG_MAP(VS,    MV500,    0, INF, VS,     vs,     none,        0),
+	QPNP_VREG_MAP(VS,    HDMI,     0, INF, VS,     vs,     none,        0),
+	QPNP_VREG_MAP(VS,    OTG,      0, INF, VS,     vs,     none,        0),
+	QPNP_VREG_MAP(BOOST, 5V_BOOST, 0, INF, BOOST,  boost,  boost,       0),
+	QPNP_VREG_MAP(FTS,   FTS_CTL,  0, INF, FTSMPS, ftsmps, ftsmps, 100000),
+	QPNP_VREG_MAP(FTS, FTS2p5_CTL, 0, INF, FTSMPS, ftsmps, ftsmps2p5,
+								       100000),
+	QPNP_VREG_MAP(BOOST_BYP, BB_2A, 0, INF, BOOST_BYP, boost, boost_byp, 0),
+	QPNP_VREG_MAP(ULT_BUCK, ULT_HF_CTL1, 0, INF, ULT_LO_SMPS, ult_lo_smps,
+							ult_lo_smps,   100000),
+	QPNP_VREG_MAP(ULT_BUCK, ULT_HF_CTL2, 0, INF, ULT_LO_SMPS, ult_lo_smps,
+							ult_lo_smps,   100000),
+	QPNP_VREG_MAP(ULT_BUCK, ULT_HF_CTL3, 0, INF, ULT_LO_SMPS, ult_lo_smps,
+							ult_lo_smps,   100000),
+	QPNP_VREG_MAP(ULT_BUCK, ULT_HF_CTL4, 0, INF, ULT_HO_SMPS, ult_ho_smps,
+							ult_ho_smps,   100000),
+	QPNP_VREG_MAP(ULT_LDO, N300_ST, 0, INF, ULT_LDO, ult_ldo, ult_nldo,
+									10000),
+	QPNP_VREG_MAP(ULT_LDO, N600_ST, 0, INF, ULT_LDO, ult_ldo, ult_nldo,
+									10000),
+	QPNP_VREG_MAP(ULT_LDO, N1200_ST, 0, INF, ULT_LDO, ult_ldo, ult_nldo,
+									10000),
+	QPNP_VREG_MAP(ULT_LDO, LV_P150,  0, INF, ULT_LDO, ult_ldo, ult_pldo,
+									10000),
+	QPNP_VREG_MAP(ULT_LDO, LV_P300,  0, INF, ULT_LDO, ult_ldo, ult_pldo,
+									10000),
+	QPNP_VREG_MAP(ULT_LDO, P600,     0, INF, ULT_LDO, ult_ldo, ult_pldo,
+									10000),
+	QPNP_VREG_MAP(ULT_LDO, P150,     0, INF, ULT_LDO, ult_ldo, ult_pldo,
+									10000),
+	QPNP_VREG_MAP(ULT_LDO, P50,     0, INF, ULT_LDO, ult_ldo, ult_pldo,
+									 5000),
+	QPNP_VREG_MAP(FTS,     FTS426,  0, INF, FTSMPS2, ftsmps426, ftsmps426,
+								       100000),
+};
+
+static int qpnp_regulator_match(struct qpnp_regulator *vreg)
+{
+	const struct qpnp_regulator_mapping *mapping;
+	struct device_node *node = vreg->pdev->dev.of_node;
+	int rc, i;
+	u32 type_reg[2], dig_major_rev;
+	u8 version[QPNP_COMMON_REG_SUBTYPE - QPNP_COMMON_REG_DIG_MAJOR_REV + 1];
+	u8 type, subtype;
+
+	rc = qpnp_vreg_read(vreg, QPNP_COMMON_REG_DIG_MAJOR_REV, version,
+		ARRAY_SIZE(version));
+	if (rc) {
+		vreg_err(vreg, "could not read version registers, rc=%d\n", rc);
+		return rc;
+	}
+	dig_major_rev	= version[QPNP_COMMON_REG_DIG_MAJOR_REV
+					- QPNP_COMMON_REG_DIG_MAJOR_REV];
+	type		= version[QPNP_COMMON_REG_TYPE
+					- QPNP_COMMON_REG_DIG_MAJOR_REV];
+	subtype		= version[QPNP_COMMON_REG_SUBTYPE
+					- QPNP_COMMON_REG_DIG_MAJOR_REV];
+
+	/*
+	 * Override type and subtype register values if qcom,force-type is
+	 * present in the device tree node.
+	 */
+	rc = of_property_read_u32_array(node, "qcom,force-type", type_reg, 2);
+	if (!rc) {
+		type = type_reg[0];
+		subtype = type_reg[1];
+	}
+
+	rc = -ENODEV;
+	for (i = 0; i < ARRAY_SIZE(supported_regulators); i++) {
+		mapping = &supported_regulators[i];
+		if (mapping->type == type && mapping->subtype == subtype
+		    && mapping->revision_min <= dig_major_rev
+		    && mapping->revision_max >= dig_major_rev) {
+			vreg->logical_type	= mapping->logical_type;
+			vreg->set_points	= mapping->set_points;
+			vreg->hpm_min_load	= mapping->hpm_min_load;
+			vreg->rdesc.ops		= mapping->ops;
+			vreg->rdesc.n_voltages
+				= mapping->set_points->n_voltages;
+			rc = 0;
+			break;
+		}
+	}
+
+	if (rc)
+		vreg_err(vreg, "unsupported regulator: type=0x%02X, subtype=0x%02X, dig major rev=0x%02X\n",
+			type, subtype, dig_major_rev);
+
+	return rc;
+}
+
+static int qpnp_regulator_check_constraints(struct qpnp_regulator *vreg,
+				struct qpnp_regulator_platform_data *pdata)
+{
+	struct qpnp_voltage_range *range = NULL;
+	int i, rc = 0, limit_min_uV, limit_max_uV, max_uV;
+	u8 reg[2];
+
+	limit_min_uV = 0;
+	limit_max_uV = INT_MAX;
+
+	if (vreg->logical_type == QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS) {
+		max_uV = pdata->init_data.constraints.max_uV;
+		/* Find the range which max_uV is inside of. */
+		for (i = vreg->set_points->count - 1; i >= 0; i--) {
+			range = &vreg->set_points->range[i];
+			if (range->set_point_max_uV > 0
+				&& max_uV >= range->set_point_min_uV
+				&& max_uV <= range->set_point_max_uV)
+				break;
+		}
+
+		if (i < 0 || range == NULL) {
+			vreg_err(vreg, "max_uV doesn't fit in any voltage range\n");
+			return -EINVAL;
+		}
+
+		rc = qpnp_vreg_read(vreg, QPNP_COMMON_REG_UL_LL_CTRL,
+					&reg[0], 1);
+		if (rc) {
+			vreg_err(vreg, "UL_LL register read failed, rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		if (reg[0] & QPNP_COMMON_UL_EN_MASK) {
+			rc = qpnp_vreg_read(vreg,
+					QPNP_COMMON_REG_VOLTAGE_ULS_VALID,
+					&reg[1], 1);
+			if (rc) {
+				vreg_err(vreg, "ULS_VALID register read failed, rc=%d\n",
+					rc);
+				return rc;
+			}
+
+			limit_max_uV =  range->step_uV * reg[1] + range->min_uV;
+		}
+
+		if (reg[0] & QPNP_COMMON_LL_EN_MASK) {
+			rc = qpnp_vreg_read(vreg,
+					QPNP_COMMON_REG_VOLTAGE_LLS_VALID,
+					&reg[1], 1);
+			if (rc) {
+				vreg_err(vreg, "LLS_VALID register read failed, rc=%d\n",
+					rc);
+				return rc;
+			}
+
+			limit_min_uV =  range->step_uV * reg[1] + range->min_uV;
+		}
+	} else if (vreg->logical_type == QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS2) {
+		rc = qpnp_vreg_read(vreg, QPNP_COMMON2_REG_VOLTAGE_ULS_LSB,
+					reg, 2);
+		if (rc) {
+			vreg_err(vreg, "ULS registers read failed, rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		limit_max_uV = (((int)reg[1] << 8) | (int)reg[0]) * 1000;
+	}
+
+	if (pdata->init_data.constraints.min_uV < limit_min_uV
+	    || pdata->init_data.constraints.max_uV >  limit_max_uV) {
+		vreg_err(vreg, "regulator min/max(%d/%d) constraints do not fit within HW configured min/max(%d/%d) constraints\n",
+			pdata->init_data.constraints.min_uV,
+			pdata->init_data.constraints.max_uV,
+			limit_min_uV, limit_max_uV);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int qpnp_regulator_ftsmps_init_slew_rate(struct qpnp_regulator *vreg)
+{
+	int rc;
+	u8 reg = 0;
+	int step = 0, delay, i, range_sel;
+	struct qpnp_voltage_range *range = NULL;
+
+	rc = qpnp_vreg_read(vreg, QPNP_COMMON_REG_STEP_CTRL, &reg, 1);
+	if (rc) {
+		vreg_err(vreg, "spmi read failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	range_sel = vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_RANGE];
+
+	for (i = 0; i < vreg->set_points->count; i++) {
+		if (vreg->set_points->range[i].range_sel == range_sel) {
+			range = &vreg->set_points->range[i];
+			break;
+		}
+	}
+
+	if (!range) {
+		vreg_err(vreg, "range %d is invalid\n", range_sel);
+		return -EINVAL;
+	}
+
+	step = (reg & QPNP_FTSMPS_STEP_CTRL_STEP_MASK)
+		>> QPNP_FTSMPS_STEP_CTRL_STEP_SHIFT;
+
+	delay = (reg & QPNP_FTSMPS_STEP_CTRL_DELAY_MASK)
+		>> QPNP_FTSMPS_STEP_CTRL_DELAY_SHIFT;
+
+	/* slew_rate has units of uV/us. */
+	vreg->slew_rate = QPNP_FTSMPS_CLOCK_RATE * range->step_uV * (1 << step);
+
+	vreg->slew_rate /= 1000 * (QPNP_FTSMPS_STEP_DELAY << delay);
+
+	vreg->slew_rate = vreg->slew_rate * QPNP_FTSMPS_STEP_MARGIN_NUM
+				/ QPNP_FTSMPS_STEP_MARGIN_DEN;
+
+	/* Ensure that the slew rate is greater than 0. */
+	vreg->slew_rate = max(vreg->slew_rate, 1);
+
+	return rc;
+}
+
+static int qpnp_regulator_ftsmps2_init_slew_rate(struct qpnp_regulator *vreg)
+{
+	struct qpnp_voltage_range *range = NULL;
+	int i, rc, delay;
+	u8 reg = 0;
+
+	rc = qpnp_vreg_read(vreg, QPNP_COMMON2_REG_STEP_CTRL, &reg, 1);
+	if (rc) {
+		vreg_err(vreg, "spmi read failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	/*
+	 * Regulators using the common #2 register layout do not have a voltage
+	 * range select register.  Choose the lowest possible step size to be
+	 * conservative in the slew rate calculation.
+	 */
+	for (i = 0; i < vreg->set_points->count; i++) {
+		if (!range || vreg->set_points->range[i].step_uV
+				< range->step_uV)
+			range = &vreg->set_points->range[i];
+	}
+
+	if (!range) {
+		vreg_err(vreg, "range is invalid\n");
+		return -EINVAL;
+	}
+
+	delay = (reg & QPNP_FTSMPS2_STEP_CTRL_DELAY_MASK)
+		>> QPNP_FTSMPS2_STEP_CTRL_DELAY_SHIFT;
+
+	/* slew_rate has units of uV/us. */
+	vreg->slew_rate = QPNP_FTSMPS2_CLOCK_RATE * range->step_uV;
+	vreg->slew_rate /= 1000 * (QPNP_FTSMPS2_STEP_DELAY << delay);
+	vreg->slew_rate = vreg->slew_rate * QPNP_FTSMPS2_STEP_MARGIN_NUM
+				/ QPNP_FTSMPS2_STEP_MARGIN_DEN;
+
+	/* Ensure that the slew rate is greater than 0. */
+	vreg->slew_rate = max(vreg->slew_rate, 1);
+
+	return rc;
+}
+
+static int qpnp_regulator_init_registers(struct qpnp_regulator *vreg,
+				struct qpnp_regulator_platform_data *pdata)
+{
+	int rc, i;
+	enum qpnp_regulator_logical_type type;
+	u8 ctrl_reg[8], reg, mask;
+
+	type = vreg->logical_type;
+
+	rc = qpnp_vreg_read(vreg, QPNP_COMMON_REG_VOLTAGE_RANGE,
+			    vreg->ctrl_reg, 8);
+	if (rc) {
+		vreg_err(vreg, "spmi read failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(ctrl_reg); i++)
+		ctrl_reg[i] = vreg->ctrl_reg[i];
+
+	/* Set up enable pin control. */
+	if ((type == QPNP_REGULATOR_LOGICAL_TYPE_SMPS
+	     || type == QPNP_REGULATOR_LOGICAL_TYPE_LDO
+	     || type == QPNP_REGULATOR_LOGICAL_TYPE_VS)
+	    && !(pdata->pin_ctrl_enable
+			& QPNP_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT)) {
+		ctrl_reg[QPNP_COMMON_IDX_ENABLE] &=
+			~QPNP_COMMON_ENABLE_FOLLOW_ALL_MASK;
+		ctrl_reg[QPNP_COMMON_IDX_ENABLE] |=
+		    pdata->pin_ctrl_enable & QPNP_COMMON_ENABLE_FOLLOW_ALL_MASK;
+	}
+
+	/* Set up HPM control. */
+	if ((type == QPNP_REGULATOR_LOGICAL_TYPE_SMPS
+	     || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS
+	     || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS
+	     || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LDO
+	     || type == QPNP_REGULATOR_LOGICAL_TYPE_LDO
+	     || type == QPNP_REGULATOR_LOGICAL_TYPE_VS
+	     || type == QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS)
+	    && (pdata->hpm_enable != QPNP_REGULATOR_USE_HW_DEFAULT)) {
+		ctrl_reg[QPNP_COMMON_IDX_MODE] &= ~QPNP_COMMON_MODE_HPM_MASK;
+		ctrl_reg[QPNP_COMMON_IDX_MODE] |=
+		     (pdata->hpm_enable ? QPNP_COMMON_MODE_HPM_MASK : 0);
+	}
+
+	/* Set up auto mode control. */
+	if ((type == QPNP_REGULATOR_LOGICAL_TYPE_SMPS
+	     || type == QPNP_REGULATOR_LOGICAL_TYPE_LDO
+	     || type == QPNP_REGULATOR_LOGICAL_TYPE_VS
+	     || type == QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS)
+	    && (pdata->auto_mode_enable != QPNP_REGULATOR_USE_HW_DEFAULT)) {
+		ctrl_reg[QPNP_COMMON_IDX_MODE] &=
+			~QPNP_COMMON_MODE_AUTO_MASK;
+		ctrl_reg[QPNP_COMMON_IDX_MODE] |=
+		     (pdata->auto_mode_enable ? QPNP_COMMON_MODE_AUTO_MASK : 0);
+	}
+
+	if (type == QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS2) {
+		if (pdata->hpm_enable == QPNP_REGULATOR_ENABLE)
+			ctrl_reg[QPNP_COMMON2_IDX_MODE]
+				= QPNP_COMMON2_MODE_HPM;
+		else if (pdata->auto_mode_enable == QPNP_REGULATOR_ENABLE)
+			ctrl_reg[QPNP_COMMON2_IDX_MODE]
+				= QPNP_COMMON2_MODE_AUTO;
+		else if (pdata->hpm_enable == QPNP_REGULATOR_DISABLE
+			 && ctrl_reg[QPNP_COMMON2_IDX_MODE]
+					== QPNP_COMMON2_MODE_HPM)
+			ctrl_reg[QPNP_COMMON2_IDX_MODE]
+				= QPNP_COMMON2_MODE_LPM;
+		else if (pdata->auto_mode_enable == QPNP_REGULATOR_DISABLE
+			 && ctrl_reg[QPNP_COMMON2_IDX_MODE]
+					== QPNP_COMMON2_MODE_AUTO)
+			ctrl_reg[QPNP_COMMON2_IDX_MODE]
+				= QPNP_COMMON2_MODE_LPM;
+	}
+
+	/* Set up mode pin control. */
+	if ((type == QPNP_REGULATOR_LOGICAL_TYPE_SMPS
+	    || type == QPNP_REGULATOR_LOGICAL_TYPE_LDO)
+		&& !(pdata->pin_ctrl_hpm
+			& QPNP_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT)) {
+		ctrl_reg[QPNP_COMMON_IDX_MODE] &=
+			~QPNP_COMMON_MODE_FOLLOW_ALL_MASK;
+		ctrl_reg[QPNP_COMMON_IDX_MODE] |=
+			pdata->pin_ctrl_hpm & QPNP_COMMON_MODE_FOLLOW_ALL_MASK;
+	}
+
+	if (type == QPNP_REGULATOR_LOGICAL_TYPE_VS
+	   && !(pdata->pin_ctrl_hpm & QPNP_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT)) {
+		ctrl_reg[QPNP_COMMON_IDX_MODE] &=
+			~QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK;
+		ctrl_reg[QPNP_COMMON_IDX_MODE] |=
+		       pdata->pin_ctrl_hpm & QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK;
+	}
+
+	if ((type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS
+		|| type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS
+		|| type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LDO)
+		&& !(pdata->pin_ctrl_hpm
+			& QPNP_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT)) {
+		ctrl_reg[QPNP_COMMON_IDX_MODE] &=
+			~QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK;
+		ctrl_reg[QPNP_COMMON_IDX_MODE] |=
+		       pdata->pin_ctrl_hpm & QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK;
+	}
+
+	if ((type == QPNP_REGULATOR_LOGICAL_TYPE_LDO
+	    || type == QPNP_REGULATOR_LOGICAL_TYPE_LN_LDO
+	    || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LDO)
+	      && pdata->bypass_mode_enable != QPNP_REGULATOR_USE_HW_DEFAULT) {
+		ctrl_reg[QPNP_COMMON_IDX_MODE] &=
+			~QPNP_COMMON_MODE_BYPASS_MASK;
+		ctrl_reg[QPNP_COMMON_IDX_MODE] |=
+			(pdata->bypass_mode_enable
+				? QPNP_COMMON_MODE_BYPASS_MASK : 0);
+	}
+
+	/* Set boost current limit. */
+	if ((type == QPNP_REGULATOR_LOGICAL_TYPE_BOOST
+	    || type == QPNP_REGULATOR_LOGICAL_TYPE_BOOST_BYP)
+		&& pdata->boost_current_limit
+			!= QPNP_BOOST_CURRENT_LIMIT_HW_DEFAULT) {
+		reg = pdata->boost_current_limit;
+		mask = QPNP_BOOST_CURRENT_LIMIT_MASK;
+		rc = qpnp_vreg_masked_read_write(vreg,
+			(type == QPNP_REGULATOR_LOGICAL_TYPE_BOOST
+				? QPNP_BOOST_REG_CURRENT_LIMIT
+				: QPNP_BOOST_BYP_REG_CURRENT_LIMIT),
+			reg, mask);
+		if (rc) {
+			vreg_err(vreg, "spmi write failed, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	/* Write back any control register values that were modified. */
+	rc = qpnp_vreg_write_optimized(vreg, QPNP_COMMON_REG_VOLTAGE_RANGE,
+		ctrl_reg, vreg->ctrl_reg, 8);
+	if (rc) {
+		vreg_err(vreg, "spmi write failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Setup initial range for ULT_LO_SMPS */
+	if (type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS) {
+		ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_RANGE] =
+			(ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_SET]
+			 < ULT_SMPS_RANGE_SPLIT) ? 0 : 1;
+	}
+
+	/* Set pull down. */
+	if ((type == QPNP_REGULATOR_LOGICAL_TYPE_SMPS
+	    || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS
+	    || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS
+	    || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LDO
+	    || type == QPNP_REGULATOR_LOGICAL_TYPE_LDO
+	    || type == QPNP_REGULATOR_LOGICAL_TYPE_VS)
+	    && pdata->pull_down_enable != QPNP_REGULATOR_USE_HW_DEFAULT) {
+		reg = pdata->pull_down_enable
+			? QPNP_COMMON_PULL_DOWN_ENABLE_MASK : 0;
+		rc = qpnp_vreg_write(vreg, QPNP_COMMON_REG_PULL_DOWN, &reg, 1);
+		if (rc) {
+			vreg_err(vreg, "spmi write failed, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	if ((type == QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS
+	    || type == QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS2)
+	    && pdata->pull_down_enable != QPNP_REGULATOR_USE_HW_DEFAULT) {
+		/* FTSMPS has other bits in the pull down control register. */
+		reg = pdata->pull_down_enable
+			? QPNP_COMMON_PULL_DOWN_ENABLE_MASK : 0;
+		rc = qpnp_vreg_masked_read_write(vreg,
+			QPNP_COMMON_REG_PULL_DOWN, reg,
+			QPNP_COMMON_PULL_DOWN_ENABLE_MASK);
+		if (rc) {
+			vreg_err(vreg, "spmi write failed, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	/* Set soft start for LDO. */
+	if ((type == QPNP_REGULATOR_LOGICAL_TYPE_LDO
+	    || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LDO)
+	    && pdata->soft_start_enable != QPNP_REGULATOR_USE_HW_DEFAULT) {
+		reg = pdata->soft_start_enable
+			? QPNP_LDO_SOFT_START_ENABLE_MASK : 0;
+		rc = qpnp_vreg_write(vreg, QPNP_LDO_REG_SOFT_START, &reg, 1);
+		if (rc) {
+			vreg_err(vreg, "spmi write failed, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	/* Set soft start strength and over current protection for VS. */
+	if (type == QPNP_REGULATOR_LOGICAL_TYPE_VS) {
+		reg = 0;
+		mask = 0;
+		if (pdata->soft_start_enable != QPNP_REGULATOR_USE_HW_DEFAULT) {
+			reg |= pdata->soft_start_enable
+				? QPNP_VS_SOFT_START_ENABLE_MASK : 0;
+			mask |= QPNP_VS_SOFT_START_ENABLE_MASK;
+		}
+		if (pdata->vs_soft_start_strength
+				!= QPNP_VS_SOFT_START_STR_HW_DEFAULT) {
+			reg |= pdata->vs_soft_start_strength
+				& QPNP_VS_SOFT_START_SEL_MASK;
+			mask |= QPNP_VS_SOFT_START_SEL_MASK;
+		}
+		rc = qpnp_vreg_masked_read_write(vreg, QPNP_VS_REG_SOFT_START,
+						 reg, mask);
+		if (rc) {
+			vreg_err(vreg, "spmi write failed, rc=%d\n", rc);
+			return rc;
+		}
+
+		if (pdata->ocp_enable != QPNP_REGULATOR_USE_HW_DEFAULT) {
+			reg = pdata->ocp_enable ? QPNP_VS_OCP_NO_OVERRIDE
+						: QPNP_VS_OCP_OVERRIDE;
+			rc = qpnp_vreg_write(vreg, QPNP_VS_REG_OCP, &reg, 1);
+			if (rc) {
+				vreg_err(vreg, "spmi write failed, rc=%d\n",
+					rc);
+				return rc;
+			}
+		}
+	}
+
+	/* Calculate the slew rate for FTSMPS regulators. */
+	if (type == QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS) {
+		rc = qpnp_regulator_ftsmps_init_slew_rate(vreg);
+		if (rc) {
+			vreg_err(vreg, "failed to initialize step rate, rc=%d\n",
+				 rc);
+			return rc;
+		}
+	}
+
+	/* Calculate the slew rate for FTSMPS2 regulators. */
+	if (type == QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS2) {
+		rc = qpnp_regulator_ftsmps2_init_slew_rate(vreg);
+		if (rc) {
+			vreg_err(vreg, "failed to initialize step rate, rc=%d\n",
+				 rc);
+			return rc;
+		}
+	}
+
+	vreg->init_mode = vreg->ctrl_reg[QPNP_COMMON_IDX_MODE];
+
+	return rc;
+}
+
+/* Fill in pdata elements based on values found in device tree. */
+static int qpnp_regulator_get_dt_config(struct platform_device *pdev,
+				struct qpnp_regulator_platform_data *pdata)
+{
+	unsigned int base;
+	struct device_node *node = pdev->dev.of_node;
+	int rc = 0;
+
+	pdata->init_data.constraints.input_uV
+		= pdata->init_data.constraints.max_uV;
+
+	rc = of_property_read_u32(pdev->dev.of_node, "reg", &base);
+	if (rc < 0) {
+		dev_err(&pdev->dev,
+			"Couldn't find reg in node = %s rc = %d\n",
+			pdev->dev.of_node->full_name, rc);
+		return rc;
+	}
+	pdata->base_addr = base;
+
+	/* OCP IRQ is optional so ignore get errors. */
+	pdata->ocp_irq = platform_get_irq_byname(pdev, "ocp");
+	if (pdata->ocp_irq < 0)
+		pdata->ocp_irq = 0;
+
+	/*
+	 * Initialize configuration parameters to use hardware default in case
+	 * no value is specified via device tree.
+	 */
+	pdata->auto_mode_enable		= QPNP_REGULATOR_USE_HW_DEFAULT;
+	pdata->bypass_mode_enable	= QPNP_REGULATOR_USE_HW_DEFAULT;
+	pdata->ocp_enable		= QPNP_REGULATOR_USE_HW_DEFAULT;
+	pdata->pull_down_enable		= QPNP_REGULATOR_USE_HW_DEFAULT;
+	pdata->soft_start_enable	= QPNP_REGULATOR_USE_HW_DEFAULT;
+	pdata->boost_current_limit	= QPNP_BOOST_CURRENT_LIMIT_HW_DEFAULT;
+	pdata->pin_ctrl_enable	    = QPNP_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT;
+	pdata->pin_ctrl_hpm	    = QPNP_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT;
+	pdata->vs_soft_start_strength	= QPNP_VS_SOFT_START_STR_HW_DEFAULT;
+	pdata->hpm_enable		= QPNP_REGULATOR_USE_HW_DEFAULT;
+
+	/* These bindings are optional, so it is okay if they are not found. */
+	of_property_read_u32(node, "qcom,auto-mode-enable",
+		&pdata->auto_mode_enable);
+	of_property_read_u32(node, "qcom,bypass-mode-enable",
+		&pdata->bypass_mode_enable);
+	of_property_read_u32(node, "qcom,ocp-enable", &pdata->ocp_enable);
+	of_property_read_u32(node, "qcom,ocp-max-retries",
+		&pdata->ocp_max_retries);
+	of_property_read_u32(node, "qcom,ocp-retry-delay",
+		&pdata->ocp_retry_delay_ms);
+	of_property_read_u32(node, "qcom,pull-down-enable",
+		&pdata->pull_down_enable);
+	of_property_read_u32(node, "qcom,soft-start-enable",
+		&pdata->soft_start_enable);
+	of_property_read_u32(node, "qcom,boost-current-limit",
+		&pdata->boost_current_limit);
+	of_property_read_u32(node, "qcom,pin-ctrl-enable",
+		&pdata->pin_ctrl_enable);
+	of_property_read_u32(node, "qcom,pin-ctrl-hpm", &pdata->pin_ctrl_hpm);
+	of_property_read_u32(node, "qcom,hpm-enable", &pdata->hpm_enable);
+	of_property_read_u32(node, "qcom,vs-soft-start-strength",
+		&pdata->vs_soft_start_strength);
+	of_property_read_u32(node, "qcom,system-load", &pdata->system_load);
+	of_property_read_u32(node, "qcom,enable-time", &pdata->enable_time);
+
+	return rc;
+}
+
+static const struct of_device_id spmi_match_table[];
+
+#define MAX_NAME_LEN	127
+
+static int qpnp_regulator_probe(struct platform_device *pdev)
+{
+	struct regulator_config reg_config = {};
+	struct qpnp_regulator_platform_data *pdata;
+	struct qpnp_regulator *vreg;
+	struct regulator_desc *rdesc;
+	struct qpnp_regulator_platform_data of_pdata;
+	struct regulator_init_data *init_data;
+	char *reg_name;
+	int rc;
+	bool is_dt;
+
+	vreg = kzalloc(sizeof(struct qpnp_regulator), GFP_KERNEL);
+	if (!vreg)
+		return -ENOMEM;
+
+	vreg->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!vreg->regmap) {
+		dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+		return -EINVAL;
+	}
+
+	is_dt = of_match_device(spmi_match_table, &pdev->dev);
+
+	/* Check if device tree is in use. */
+	if (is_dt) {
+		init_data = of_get_regulator_init_data(&pdev->dev,
+						       pdev->dev.of_node,
+						       &vreg->rdesc);
+		if (!init_data) {
+			dev_err(&pdev->dev, "%s: unable to allocate memory\n",
+					__func__);
+			kfree(vreg);
+			return -ENOMEM;
+		}
+		memset(&of_pdata, 0,
+			sizeof(struct qpnp_regulator_platform_data));
+		memcpy(&of_pdata.init_data, init_data,
+			sizeof(struct regulator_init_data));
+
+		if (of_get_property(pdev->dev.of_node, "parent-supply", NULL))
+			of_pdata.init_data.supply_regulator = "parent";
+
+		rc = qpnp_regulator_get_dt_config(pdev, &of_pdata);
+		if (rc) {
+			dev_err(&pdev->dev, "%s: DT parsing failed, rc=%d\n",
+					__func__, rc);
+			kfree(vreg);
+			return -ENOMEM;
+		}
+
+		pdata = &of_pdata;
+	} else {
+		pdata = pdev->dev.platform_data;
+	}
+
+	if (pdata == NULL) {
+		dev_err(&pdev->dev, "%s: no platform data specified\n",
+			__func__);
+		kfree(vreg);
+		return -EINVAL;
+	}
+
+	vreg->pdev		= pdev;
+	vreg->prev_write_count	= -1;
+	vreg->write_count	= 0;
+	vreg->base_addr		= pdata->base_addr;
+	vreg->enable_time	= pdata->enable_time;
+	vreg->system_load	= pdata->system_load;
+	vreg->ocp_enable	= pdata->ocp_enable;
+	vreg->ocp_irq		= pdata->ocp_irq;
+	vreg->ocp_max_retries	= pdata->ocp_max_retries;
+	vreg->ocp_retry_delay_ms = pdata->ocp_retry_delay_ms;
+
+	if (vreg->ocp_max_retries == 0)
+		vreg->ocp_max_retries = QPNP_VS_OCP_DEFAULT_MAX_RETRIES;
+	if (vreg->ocp_retry_delay_ms == 0)
+		vreg->ocp_retry_delay_ms = QPNP_VS_OCP_DEFAULT_RETRY_DELAY_MS;
+
+	rdesc			= &vreg->rdesc;
+	rdesc->id		= to_spmi_device(pdev->dev.parent)->ctrl->nr;
+	rdesc->owner		= THIS_MODULE;
+	rdesc->type		= REGULATOR_VOLTAGE;
+
+	reg_name = kzalloc(strnlen(pdata->init_data.constraints.name,
+				MAX_NAME_LEN) + 1, GFP_KERNEL);
+	if (!reg_name) {
+		kfree(vreg);
+		return -ENOMEM;
+	}
+	strlcpy(reg_name, pdata->init_data.constraints.name,
+		strnlen(pdata->init_data.constraints.name, MAX_NAME_LEN) + 1);
+	rdesc->name = reg_name;
+
+	dev_set_drvdata(&pdev->dev, vreg);
+
+	rc = qpnp_regulator_match(vreg);
+	if (rc)
+		goto bail;
+
+	if (is_dt && rdesc->ops) {
+		/* Fill in ops and mode masks when using device tree. */
+		if (rdesc->ops->enable)
+			pdata->init_data.constraints.valid_ops_mask
+				|= REGULATOR_CHANGE_STATUS;
+		if (rdesc->ops->get_voltage)
+			pdata->init_data.constraints.valid_ops_mask
+				|= REGULATOR_CHANGE_VOLTAGE;
+		if (rdesc->ops->get_mode) {
+			pdata->init_data.constraints.valid_ops_mask
+				|= REGULATOR_CHANGE_MODE
+				| REGULATOR_CHANGE_DRMS;
+			pdata->init_data.constraints.valid_modes_mask
+				= REGULATOR_MODE_NORMAL | REGULATOR_MODE_IDLE;
+		}
+	}
+
+	rc = qpnp_regulator_check_constraints(vreg, pdata);
+	if (rc) {
+		vreg_err(vreg, "regulator constraints check failed, rc=%d\n",
+			rc);
+		goto bail;
+	}
+
+	rc = qpnp_regulator_init_registers(vreg, pdata);
+	if (rc) {
+		vreg_err(vreg, "common initialization failed, rc=%d\n", rc);
+		goto bail;
+	}
+
+	if (vreg->logical_type != QPNP_REGULATOR_LOGICAL_TYPE_VS)
+		vreg->ocp_irq = 0;
+
+	if (vreg->ocp_irq) {
+		rc = devm_request_irq(&pdev->dev, vreg->ocp_irq,
+			qpnp_regulator_vs_ocp_isr, IRQF_TRIGGER_RISING, "ocp",
+			vreg);
+		if (rc < 0) {
+			vreg_err(vreg, "failed to request irq %d, rc=%d\n",
+				vreg->ocp_irq, rc);
+			goto bail;
+		}
+
+		INIT_DELAYED_WORK(&vreg->ocp_work, qpnp_regulator_vs_ocp_work);
+	}
+
+	reg_config.dev = &pdev->dev;
+	reg_config.init_data = &pdata->init_data;
+	reg_config.driver_data = vreg;
+	reg_config.of_node = pdev->dev.of_node;
+	vreg->rdev = regulator_register(rdesc, &reg_config);
+	if (IS_ERR(vreg->rdev)) {
+		rc = PTR_ERR(vreg->rdev);
+		if (rc != -EPROBE_DEFER)
+			vreg_err(vreg, "regulator_register failed, rc=%d\n",
+				rc);
+		goto cancel_ocp_work;
+	}
+
+	if (qpnp_vreg_debug_mask & QPNP_VREG_DEBUG_INIT && vreg->slew_rate)
+		pr_info("%-11s: step rate=%d uV/us\n", vreg->rdesc.name,
+			vreg->slew_rate);
+
+	qpnp_vreg_show_state(vreg->rdev, QPNP_REGULATOR_ACTION_INIT);
+
+	return 0;
+
+cancel_ocp_work:
+	if (vreg->ocp_irq)
+		cancel_delayed_work_sync(&vreg->ocp_work);
+bail:
+	if (rc && rc != -EPROBE_DEFER)
+		vreg_err(vreg, "probe failed, rc=%d\n", rc);
+
+	kfree(vreg->rdesc.name);
+	kfree(vreg);
+
+	return rc;
+}
+
+static int qpnp_regulator_remove(struct platform_device *pdev)
+{
+	struct qpnp_regulator *vreg;
+
+	vreg = dev_get_drvdata(&pdev->dev);
+	dev_set_drvdata(&pdev->dev, NULL);
+
+	if (vreg) {
+		regulator_unregister(vreg->rdev);
+		if (vreg->ocp_irq)
+			cancel_delayed_work_sync(&vreg->ocp_work);
+		kfree(vreg->rdesc.name);
+		kfree(vreg);
+	}
+
+	return 0;
+}
+
+static const struct of_device_id spmi_match_table[] = {
+	{ .compatible = QPNP_REGULATOR_DRIVER_NAME, },
+	{}
+};
+
+static const struct platform_device_id qpnp_regulator_id[] = {
+	{ QPNP_REGULATOR_DRIVER_NAME, 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(spmi, qpnp_regulator_id);
+
+static struct platform_driver qpnp_regulator_driver = {
+	.driver		= {
+		.name		= QPNP_REGULATOR_DRIVER_NAME,
+		.of_match_table	= spmi_match_table,
+		.owner		= THIS_MODULE,
+	},
+	.probe		= qpnp_regulator_probe,
+	.remove		= qpnp_regulator_remove,
+	.id_table	= qpnp_regulator_id,
+};
+
+/*
+ * Pre-compute the number of set points available for each regulator type to
+ * avoid unnecessary calculations later in runtime.
+ */
+static void qpnp_regulator_set_point_init(void)
+{
+	struct qpnp_voltage_set_points **set_points;
+	int i, j, temp;
+
+	set_points = all_set_points;
+
+	for (i = 0; i < ARRAY_SIZE(all_set_points); i++) {
+		temp = 0;
+		for (j = 0; j < all_set_points[i]->count; j++) {
+			all_set_points[i]->range[j].n_voltages
+				= (all_set_points[i]->range[j].set_point_max_uV
+				 - all_set_points[i]->range[j].set_point_min_uV)
+				   / all_set_points[i]->range[j].step_uV + 1;
+			if (all_set_points[i]->range[j].set_point_max_uV == 0)
+				all_set_points[i]->range[j].n_voltages = 0;
+			temp += all_set_points[i]->range[j].n_voltages;
+		}
+		all_set_points[i]->n_voltages = temp;
+	}
+}
+
+/**
+ * qpnp_regulator_init() - register spmi driver for qpnp-regulator
+ *
+ * This initialization function should be called in systems in which driver
+ * registration ordering must be controlled precisely.
+ */
+int __init qpnp_regulator_init(void)
+{
+	static bool has_registered;
+
+	if (has_registered)
+		return 0;
+	has_registered = true;
+
+	qpnp_regulator_set_point_init();
+
+	return platform_driver_register(&qpnp_regulator_driver);
+}
+EXPORT_SYMBOL(qpnp_regulator_init);
+
+static void __exit qpnp_regulator_exit(void)
+{
+	platform_driver_unregister(&qpnp_regulator_driver);
+}
+
+MODULE_DESCRIPTION("QPNP PMIC regulator driver");
+MODULE_LICENSE("GPL v2");
+
+arch_initcall(qpnp_regulator_init);
+module_exit(qpnp_regulator_exit);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/regulator/rpm-smd-regulator.c	2019-01-22 16:16:26.279271545 +0100
@@ -0,0 +1,2019 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+#include <soc/qcom/rpm-smd.h>
+
+/* Debug Definitions */
+
+enum {
+	RPM_VREG_DEBUG_REQUEST		= BIT(0),
+	RPM_VREG_DEBUG_FULL_REQUEST	= BIT(1),
+	RPM_VREG_DEBUG_DUPLICATE	= BIT(2),
+};
+
+static int rpm_vreg_debug_mask;
+module_param_named(
+	debug_mask, rpm_vreg_debug_mask, int, S_IRUSR | S_IWUSR
+);
+
+#define vreg_err(req, fmt, ...) \
+	pr_err("%s: " fmt, req->rdesc.name, ##__VA_ARGS__)
+
+/* RPM regulator request types */
+enum rpm_regulator_type {
+	RPM_REGULATOR_TYPE_LDO,
+	RPM_REGULATOR_TYPE_SMPS,
+	RPM_REGULATOR_TYPE_VS,
+	RPM_REGULATOR_TYPE_NCP,
+	RPM_REGULATOR_TYPE_BOB,
+	RPM_REGULATOR_TYPE_MAX,
+};
+
+/* RPM resource parameters */
+enum rpm_regulator_param_index {
+	RPM_REGULATOR_PARAM_ENABLE,
+	RPM_REGULATOR_PARAM_VOLTAGE,
+	RPM_REGULATOR_PARAM_CURRENT,
+	RPM_REGULATOR_PARAM_MODE_LDO,
+	RPM_REGULATOR_PARAM_MODE_SMPS,
+	RPM_REGULATOR_PARAM_PIN_CTRL_ENABLE,
+	RPM_REGULATOR_PARAM_PIN_CTRL_MODE,
+	RPM_REGULATOR_PARAM_FREQUENCY,
+	RPM_REGULATOR_PARAM_HEAD_ROOM,
+	RPM_REGULATOR_PARAM_QUIET_MODE,
+	RPM_REGULATOR_PARAM_FREQ_REASON,
+	RPM_REGULATOR_PARAM_CORNER,
+	RPM_REGULATOR_PARAM_BYPASS,
+	RPM_REGULATOR_PARAM_FLOOR_CORNER,
+	RPM_REGULATOR_PARAM_LEVEL,
+	RPM_REGULATOR_PARAM_FLOOR_LEVEL,
+	RPM_REGULATOR_PARAM_MODE_BOB,
+	RPM_REGULATOR_PARAM_PIN_CTRL_VOLTAGE1,
+	RPM_REGULATOR_PARAM_PIN_CTRL_VOLTAGE2,
+	RPM_REGULATOR_PARAM_PIN_CTRL_VOLTAGE3,
+	RPM_REGULATOR_PARAM_MAX,
+};
+
+enum rpm_regulator_smps_mode {
+	RPM_REGULATOR_SMPS_MODE_AUTO	= 0,
+	RPM_REGULATOR_SMPS_MODE_IPEAK	= 1,
+	RPM_REGULATOR_SMPS_MODE_PWM	= 2,
+};
+
+enum rpm_regulator_ldo_mode {
+	RPM_REGULATOR_LDO_MODE_IPEAK	= 0,
+	RPM_REGULATOR_LDO_MODE_HPM	= 1,
+};
+
+enum rpm_regulator_bob_mode {
+	RPM_REGULATOR_BOB_MODE_PASS	= 0,
+	RPM_REGULATOR_BOB_MODE_PFM	= 1,
+	RPM_REGULATOR_BOB_MODE_AUTO	= 2,
+	RPM_REGULATOR_BOB_MODE_PWM	= 3,
+};
+
+#define RPM_SET_CONFIG_ACTIVE			BIT(0)
+#define RPM_SET_CONFIG_SLEEP			BIT(1)
+#define RPM_SET_CONFIG_BOTH			(RPM_SET_CONFIG_ACTIVE \
+						 | RPM_SET_CONFIG_SLEEP)
+struct rpm_regulator_param {
+	char	*name;
+	char	*property_name;
+	u32	key;
+	u32	min;
+	u32	max;
+	u32	supported_regulator_types;
+};
+
+#define PARAM(_idx, _support_ldo, _support_smps, _support_vs, _support_ncp, \
+		_support_bob, _name, _min, _max, _property_name)	\
+	[RPM_REGULATOR_PARAM_##_idx] = { \
+		.name = _name, \
+		.property_name = _property_name, \
+		.min = _min, \
+		.max = _max, \
+		.supported_regulator_types = \
+			_support_ldo << RPM_REGULATOR_TYPE_LDO | \
+			_support_smps << RPM_REGULATOR_TYPE_SMPS | \
+			_support_vs << RPM_REGULATOR_TYPE_VS | \
+			_support_ncp << RPM_REGULATOR_TYPE_NCP | \
+			_support_bob << RPM_REGULATOR_TYPE_BOB, \
+	}
+
+static struct rpm_regulator_param params[RPM_REGULATOR_PARAM_MAX] = {
+	/*    ID               LDO SMPS VS  NCP BOB  name  min max          property-name */
+	PARAM(ENABLE,            1,  1,  1,  1,  1, "swen", 0, 1,          "qcom,init-enable"),
+	PARAM(VOLTAGE,           1,  1,  0,  1,  1, "uv",   0, 0x7FFFFFF,  "qcom,init-voltage"),
+	PARAM(CURRENT,           1,  1,  0,  0,  0, "ma",   0, 0x1FFF,     "qcom,init-current"),
+	PARAM(MODE_LDO,          1,  0,  0,  0,  0, "lsmd", 0, 1,          "qcom,init-ldo-mode"),
+	PARAM(MODE_SMPS,         0,  1,  0,  0,  0, "ssmd", 0, 2,          "qcom,init-smps-mode"),
+	PARAM(PIN_CTRL_ENABLE,   1,  1,  1,  0,  0, "pcen", 0, 0xF,        "qcom,init-pin-ctrl-enable"),
+	PARAM(PIN_CTRL_MODE,     1,  1,  1,  0,  0, "pcmd", 0, 0x1F,       "qcom,init-pin-ctrl-mode"),
+	PARAM(FREQUENCY,         0,  1,  0,  1,  0, "freq", 0, 31,         "qcom,init-frequency"),
+	PARAM(HEAD_ROOM,         1,  0,  0,  1,  0, "hr",   0, 0x7FFFFFFF, "qcom,init-head-room"),
+	PARAM(QUIET_MODE,        0,  1,  0,  0,  0, "qm",   0, 2,          "qcom,init-quiet-mode"),
+	PARAM(FREQ_REASON,       0,  1,  0,  1,  0, "resn", 0, 8,          "qcom,init-freq-reason"),
+	PARAM(CORNER,            1,  1,  0,  0,  0, "corn", 0, 6,          "qcom,init-voltage-corner"),
+	PARAM(BYPASS,            1,  0,  0,  0,  0, "bypa", 0, 1,          "qcom,init-disallow-bypass"),
+	PARAM(FLOOR_CORNER,      1,  1,  0,  0,  0, "vfc",  0, 6,          "qcom,init-voltage-floor-corner"),
+	PARAM(LEVEL,             1,  1,  0,  0,  0, "vlvl", 0, 0xFFFF,     "qcom,init-voltage-level"),
+	PARAM(FLOOR_LEVEL,       1,  1,  0,  0,  0, "vfl",  0, 0xFFFF,     "qcom,init-voltage-floor-level"),
+	PARAM(MODE_BOB,          0,  0,  0,  0,  1, "bobm", 0, 3,          "qcom,init-bob-mode"),
+	PARAM(PIN_CTRL_VOLTAGE1, 0,  0,  0,  0,  1, "pcv1", 0, 0x7FFFFFF,  "qcom,init-pin-ctrl-voltage1"),
+	PARAM(PIN_CTRL_VOLTAGE2, 0,  0,  0,  0,  1, "pcv2", 0, 0x7FFFFFF,  "qcom,init-pin-ctrl-voltage2"),
+	PARAM(PIN_CTRL_VOLTAGE3, 0,  0,  0,  0,  1, "pcv3", 0, 0x7FFFFFF,  "qcom,init-pin-ctrl-voltage3"),
+};
+
+struct rpm_regulator_mode_map {
+	int			ldo_mode;
+	int			smps_mode;
+};
+
+static struct rpm_regulator_mode_map mode_mapping[] = {
+	[RPM_REGULATOR_MODE_AUTO]
+		= {-1,				 RPM_REGULATOR_SMPS_MODE_AUTO},
+	[RPM_REGULATOR_MODE_IPEAK]
+		= {RPM_REGULATOR_LDO_MODE_IPEAK, RPM_REGULATOR_SMPS_MODE_IPEAK},
+	[RPM_REGULATOR_MODE_HPM]
+		= {RPM_REGULATOR_LDO_MODE_HPM,   RPM_REGULATOR_SMPS_MODE_PWM},
+};
+
+/* Indices for use with pin control enable via enable/disable feature. */
+#define RPM_VREG_PIN_CTRL_STATE_DISABLE	0
+#define RPM_VREG_PIN_CTRL_STATE_ENABLE	1
+#define RPM_VREG_PIN_CTRL_STATE_COUNT	2
+
+struct rpm_vreg_request {
+	u32			param[RPM_REGULATOR_PARAM_MAX];
+	u32			valid;
+	u32			modified;
+};
+
+struct rpm_vreg {
+	struct rpm_vreg_request	aggr_req_active;
+	struct rpm_vreg_request	aggr_req_sleep;
+	struct list_head	reg_list;
+	const char		*resource_name;
+	u32			resource_id;
+	bool			allow_atomic;
+	int			regulator_type;
+	int			enable_time;
+	spinlock_t		slock;
+	struct mutex		mlock;
+	unsigned long		flags;
+	bool			sleep_request_sent;
+	bool			wait_for_ack_active;
+	bool			wait_for_ack_sleep;
+	bool			always_wait_for_ack;
+	bool			apps_only;
+	struct msm_rpm_request	*handle_active;
+	struct msm_rpm_request	*handle_sleep;
+};
+
+struct rpm_regulator {
+	struct regulator_desc	rdesc;
+	struct regulator_dev	*rdev;
+	struct rpm_vreg		*rpm_vreg;
+	struct list_head	list;
+	bool			set_active;
+	bool			set_sleep;
+	bool			always_send_voltage;
+	bool			always_send_current;
+	bool			use_pin_ctrl_for_enable;
+	struct rpm_vreg_request	req;
+	int			system_load;
+	int			hpm_threshold_current;
+	int			min_uV;
+	int			max_uV;
+	u32			pin_ctrl_mask[RPM_VREG_PIN_CTRL_STATE_COUNT];
+	enum rpm_regulator_param_index voltage_index;
+	int			voltage_offset;
+};
+
+/*
+ * This voltage in uV is returned by get_voltage functions when there is no way
+ * to determine the current voltage level.  It is needed because the regulator
+ * framework treats a 0 uV voltage as an error.
+ */
+#define VOLTAGE_UNKNOWN 1
+
+/*
+ * Regulator requests sent in the active set take effect immediately.  Requests
+ * sent in the sleep set take effect when the Apps processor transitions into
+ * RPM assisted power collapse.  For any given regulator, if an active set
+ * request is present, but not a sleep set request, then the active set request
+ * is used at all times, even when the Apps processor is power collapsed.
+ *
+ * The rpm-regulator-smd takes advantage of this default usage of the active set
+ * request by only sending a sleep set request if it differs from the
+ * corresponding active set request.
+ */
+#define RPM_SET_ACTIVE	MSM_RPM_CTX_ACTIVE_SET
+#define RPM_SET_SLEEP	MSM_RPM_CTX_SLEEP_SET
+
+static u32 rpm_vreg_string_to_int(const u8 *str)
+{
+	int i, len;
+	u32 output = 0;
+
+	len = strnlen(str, sizeof(u32));
+	for (i = 0; i < len; i++)
+		output |= str[i] << (i * 8);
+
+	return output;
+}
+
+static inline void rpm_vreg_lock(struct rpm_vreg *rpm_vreg)
+{
+	if (rpm_vreg->allow_atomic)
+		spin_lock_irqsave(&rpm_vreg->slock, rpm_vreg->flags);
+	else
+		mutex_lock(&rpm_vreg->mlock);
+}
+
+static inline void rpm_vreg_unlock(struct rpm_vreg *rpm_vreg)
+{
+	if (rpm_vreg->allow_atomic)
+		spin_unlock_irqrestore(&rpm_vreg->slock, rpm_vreg->flags);
+	else
+		mutex_unlock(&rpm_vreg->mlock);
+}
+
+static inline bool rpm_vreg_active_or_sleep_enabled(struct rpm_vreg *rpm_vreg)
+{
+	return (rpm_vreg->aggr_req_active.param[RPM_REGULATOR_PARAM_ENABLE]
+			&& (rpm_vreg->aggr_req_active.valid
+				& BIT(RPM_REGULATOR_PARAM_ENABLE)))
+	    || ((rpm_vreg->aggr_req_sleep.param[RPM_REGULATOR_PARAM_ENABLE])
+				&& (rpm_vreg->aggr_req_sleep.valid
+					& BIT(RPM_REGULATOR_PARAM_ENABLE)));
+}
+
+static inline bool rpm_vreg_shared_active_or_sleep_enabled_valid
+						(struct rpm_vreg *rpm_vreg)
+{
+	return !rpm_vreg->apps_only &&
+		((rpm_vreg->aggr_req_active.valid
+					& BIT(RPM_REGULATOR_PARAM_ENABLE))
+		 || (rpm_vreg->aggr_req_sleep.valid
+					& BIT(RPM_REGULATOR_PARAM_ENABLE)));
+}
+
+static const u32 power_level_params =
+	BIT(RPM_REGULATOR_PARAM_ENABLE) |
+	BIT(RPM_REGULATOR_PARAM_VOLTAGE) |
+	BIT(RPM_REGULATOR_PARAM_CURRENT) |
+	BIT(RPM_REGULATOR_PARAM_CORNER) |
+	BIT(RPM_REGULATOR_PARAM_BYPASS) |
+	BIT(RPM_REGULATOR_PARAM_FLOOR_CORNER) |
+	BIT(RPM_REGULATOR_PARAM_LEVEL) |
+	BIT(RPM_REGULATOR_PARAM_FLOOR_LEVEL);
+
+static bool rpm_vreg_ack_required(struct rpm_vreg *rpm_vreg, u32 set,
+				const u32 *prev_param, const u32 *param,
+				u32 prev_valid, u32 modified)
+{
+	u32 mask;
+	int i;
+
+	if (rpm_vreg->always_wait_for_ack
+	    || (set == RPM_SET_ACTIVE && rpm_vreg->wait_for_ack_active)
+	    || (set == RPM_SET_SLEEP && rpm_vreg->wait_for_ack_sleep))
+		return true;
+
+	for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+		mask = BIT(i);
+		if (modified & mask) {
+			if ((prev_valid & mask) && (power_level_params & mask)
+			    && (param[i] <= prev_param[i]))
+				continue;
+			else
+				return true;
+		}
+	}
+
+	return false;
+}
+
+static void rpm_vreg_check_param_max(struct rpm_regulator *regulator, int index,
+					u32 new_max)
+{
+	struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+
+	if (regulator->set_active
+	    && (rpm_vreg->aggr_req_active.valid & BIT(index))
+	    && rpm_vreg->aggr_req_active.param[index] > new_max)
+		rpm_vreg->wait_for_ack_active = true;
+
+	if (regulator->set_sleep
+	    && (rpm_vreg->aggr_req_sleep.valid & BIT(index))
+	    && rpm_vreg->aggr_req_sleep.param[index] > new_max)
+		rpm_vreg->wait_for_ack_sleep = true;
+}
+
+#define MICRO_TO_MILLI(uV)	((uV) / 1000)
+#define MILLI_TO_MICRO(uV)	((uV) * 1000)
+
+#define DEBUG_PRINT_BUFFER_SIZE 512
+#define REQ_SENT	0
+#define REQ_PREV	1
+#define REQ_CACHED	2
+#define REQ_TYPES	3
+
+static void rpm_regulator_req(struct rpm_regulator *regulator, int set,
+				bool sent)
+{
+	char buf[DEBUG_PRINT_BUFFER_SIZE];
+	size_t buflen = DEBUG_PRINT_BUFFER_SIZE;
+	struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+	struct rpm_vreg_request *aggr;
+	bool first;
+	u32 mask[REQ_TYPES] = {0, 0, 0};
+	const char *req_names[REQ_TYPES] = {"sent", "prev", "cached"};
+	int pos = 0;
+	int i, j;
+
+	aggr = (set == RPM_SET_ACTIVE)
+		? &rpm_vreg->aggr_req_active : &rpm_vreg->aggr_req_sleep;
+
+	if (rpm_vreg_debug_mask & RPM_VREG_DEBUG_DUPLICATE) {
+		mask[REQ_SENT] = aggr->modified;
+		mask[REQ_PREV] = aggr->valid & ~aggr->modified;
+	} else if (sent
+		   && (rpm_vreg_debug_mask & RPM_VREG_DEBUG_FULL_REQUEST)) {
+		mask[REQ_SENT] = aggr->modified;
+		mask[REQ_PREV] = aggr->valid & ~aggr->modified;
+	} else if (sent && (rpm_vreg_debug_mask & RPM_VREG_DEBUG_REQUEST)) {
+		mask[REQ_SENT] = aggr->modified;
+	}
+
+	if (!(mask[REQ_SENT] | mask[REQ_PREV]))
+		return;
+
+	if (set == RPM_SET_SLEEP && !rpm_vreg->sleep_request_sent) {
+		mask[REQ_CACHED] = mask[REQ_SENT] | mask[REQ_PREV];
+		mask[REQ_SENT] = 0;
+		mask[REQ_PREV] = 0;
+	}
+
+	pos += scnprintf(buf + pos, buflen - pos, "%s%s: ",
+			KERN_INFO, __func__);
+
+	pos += scnprintf(buf + pos, buflen - pos, "%s %u (%s): s=%s",
+			rpm_vreg->resource_name, rpm_vreg->resource_id,
+			regulator->rdesc.name,
+			(set == RPM_SET_ACTIVE ? "act" : "slp"));
+
+	for (i = 0; i < REQ_TYPES; i++) {
+		if (mask[i])
+			pos += scnprintf(buf + pos, buflen - pos, "; %s: ",
+					req_names[i]);
+
+		first = true;
+		for (j = 0; j < RPM_REGULATOR_PARAM_MAX; j++) {
+			if (mask[i] & BIT(j)) {
+				pos += scnprintf(buf + pos, buflen - pos,
+					"%s%s=%u", (first ? "" : ", "),
+					params[j].name, aggr->param[j]);
+				first = false;
+			}
+		}
+	}
+
+	pos += scnprintf(buf + pos, buflen - pos, "\n");
+	printk(buf);
+}
+
+#define RPM_VREG_SET_PARAM(_regulator, _param, _val) \
+{ \
+	(_regulator)->req.param[RPM_REGULATOR_PARAM_##_param] = _val; \
+	(_regulator)->req.modified |= BIT(RPM_REGULATOR_PARAM_##_param); \
+} \
+
+static int rpm_vreg_add_kvp_to_request(struct rpm_vreg *rpm_vreg,
+				       const u32 *param, int idx, u32 set)
+{
+	struct msm_rpm_request *handle;
+
+	handle = (set == RPM_SET_ACTIVE	? rpm_vreg->handle_active
+					: rpm_vreg->handle_sleep);
+
+	if (rpm_vreg->allow_atomic)
+		return msm_rpm_add_kvp_data_noirq(handle, params[idx].key,
+						  (u8 *)&param[idx], 4);
+	else
+		return msm_rpm_add_kvp_data(handle, params[idx].key,
+					    (u8 *)&param[idx], 4);
+}
+
+static void rpm_vreg_check_modified_requests(const u32 *prev_param,
+		const u32 *param, u32 prev_valid, u32 *modified)
+{
+	u32 value_changed = 0;
+	int i;
+
+	for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+		if (param[i] != prev_param[i])
+			value_changed |= BIT(i);
+	}
+
+	/*
+	 * Only keep bits that are for changed parameters or previously
+	 * invalid parameters.
+	 */
+	*modified &= value_changed | ~prev_valid;
+}
+
+static int rpm_vreg_add_modified_requests(struct rpm_regulator *regulator,
+		u32 set, const u32 *param, u32 modified)
+{
+	struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+	int rc = 0;
+	int i;
+
+	for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+		/* Only send requests for modified parameters. */
+		if (modified & BIT(i)) {
+			rc = rpm_vreg_add_kvp_to_request(rpm_vreg, param, i,
+							set);
+			if (rc) {
+				vreg_err(regulator,
+					"add KVP failed: %s %u; %s, rc=%d\n",
+					rpm_vreg->resource_name,
+					rpm_vreg->resource_id, params[i].name,
+					rc);
+				return rc;
+			}
+		}
+	}
+
+	return rc;
+}
+
+static int rpm_vreg_send_request(struct rpm_regulator *regulator, u32 set,
+				bool wait_for_ack)
+{
+	struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+	struct msm_rpm_request *handle
+		= (set == RPM_SET_ACTIVE ? rpm_vreg->handle_active
+					: rpm_vreg->handle_sleep);
+	int rc = 0;
+	void *temp;
+
+	if (unlikely(rpm_vreg->allow_atomic)) {
+		rc = msm_rpm_wait_for_ack_noirq(msm_rpm_send_request_noirq(
+						  handle));
+	} else if (wait_for_ack) {
+		rc = msm_rpm_wait_for_ack(msm_rpm_send_request(handle));
+	} else {
+		temp = msm_rpm_send_request_noack(handle);
+		if (IS_ERR(temp))
+			rc = PTR_ERR(temp);
+	}
+
+	if (rc)
+		vreg_err(regulator,
+			"msm rpm send failed: %s %u; set=%s, rc=%d\n",
+			rpm_vreg->resource_name,
+			rpm_vreg->resource_id,
+			(set == RPM_SET_ACTIVE ? "act" : "slp"), rc);
+
+	return rc;
+}
+
+#define RPM_VREG_AGGR_MIN(_idx, _param_aggr, _param_reg) \
+{ \
+	_param_aggr[RPM_REGULATOR_PARAM_##_idx] \
+	 = min(_param_aggr[RPM_REGULATOR_PARAM_##_idx], \
+		_param_reg[RPM_REGULATOR_PARAM_##_idx]); \
+}
+
+#define RPM_VREG_AGGR_MAX(_idx, _param_aggr, _param_reg) \
+{ \
+	_param_aggr[RPM_REGULATOR_PARAM_##_idx] \
+	 = max(_param_aggr[RPM_REGULATOR_PARAM_##_idx], \
+		_param_reg[RPM_REGULATOR_PARAM_##_idx]); \
+}
+
+#define RPM_VREG_AGGR_SUM(_idx, _param_aggr, _param_reg) \
+{ \
+	_param_aggr[RPM_REGULATOR_PARAM_##_idx] \
+		 += _param_reg[RPM_REGULATOR_PARAM_##_idx]; \
+}
+
+#define RPM_VREG_AGGR_OR(_idx, _param_aggr, _param_reg) \
+{ \
+	_param_aggr[RPM_REGULATOR_PARAM_##_idx] \
+		|= _param_reg[RPM_REGULATOR_PARAM_##_idx]; \
+}
+
+/*
+ * Aggregation is performed on each parameter based on the way that the RPM
+ * aggregates that type internally between RPM masters.
+ */
+static void rpm_vreg_aggregate_params(u32 *param_aggr, const u32 *param_reg)
+{
+	RPM_VREG_AGGR_MAX(ENABLE, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(VOLTAGE, param_aggr, param_reg);
+	RPM_VREG_AGGR_SUM(CURRENT, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(MODE_LDO, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(MODE_SMPS, param_aggr, param_reg);
+	RPM_VREG_AGGR_OR(PIN_CTRL_ENABLE, param_aggr, param_reg);
+	RPM_VREG_AGGR_OR(PIN_CTRL_MODE, param_aggr, param_reg);
+	RPM_VREG_AGGR_MIN(FREQUENCY, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(HEAD_ROOM, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(QUIET_MODE, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(FREQ_REASON, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(CORNER, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(BYPASS, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(FLOOR_CORNER, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(LEVEL, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(FLOOR_LEVEL, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(MODE_BOB, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(PIN_CTRL_VOLTAGE1, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(PIN_CTRL_VOLTAGE2, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(PIN_CTRL_VOLTAGE3, param_aggr, param_reg);
+}
+
+static int rpm_vreg_aggregate_requests(struct rpm_regulator *regulator)
+{
+	struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+	u32 param_active[RPM_REGULATOR_PARAM_MAX];
+	u32 param_sleep[RPM_REGULATOR_PARAM_MAX];
+	u32 modified_active, modified_sleep;
+	struct rpm_regulator *reg;
+	bool sleep_set_differs = false;
+	bool send_active = false;
+	bool send_sleep = false;
+	bool wait_for_ack;
+	int rc = 0;
+	int i;
+
+	memset(param_active, 0, sizeof(param_active));
+	memset(param_sleep, 0, sizeof(param_sleep));
+	modified_active = rpm_vreg->aggr_req_active.modified;
+	modified_sleep = rpm_vreg->aggr_req_sleep.modified;
+
+	/*
+	 * Aggregate all of the requests for this regulator in both active
+	 * and sleep sets.
+	 */
+	list_for_each_entry(reg, &rpm_vreg->reg_list, list) {
+		if (reg->set_active) {
+			rpm_vreg_aggregate_params(param_active, reg->req.param);
+			modified_active |= reg->req.modified;
+		}
+		if (reg->set_sleep) {
+			rpm_vreg_aggregate_params(param_sleep, reg->req.param);
+			modified_sleep |= reg->req.modified;
+		}
+	}
+
+	/*
+	 * Check if the aggregated sleep set parameter values differ from the
+	 * aggregated active set parameter values.
+	 */
+	if (!rpm_vreg->sleep_request_sent) {
+		for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+			if ((param_active[i] != param_sleep[i])
+			    && (modified_sleep & BIT(i))) {
+				sleep_set_differs = true;
+				break;
+			}
+		}
+	}
+
+	/* Add KVPs to the active set RPM request if they have new values. */
+	rpm_vreg_check_modified_requests(rpm_vreg->aggr_req_active.param,
+		param_active, rpm_vreg->aggr_req_active.valid,
+		&modified_active);
+	rc = rpm_vreg_add_modified_requests(regulator, RPM_SET_ACTIVE,
+		param_active, modified_active);
+	if (rc)
+		return rc;
+	send_active = modified_active;
+
+	/*
+	 * Sleep set configurations are only sent if they differ from the
+	 * active set values.  This is because the active set values will take
+	 * effect during rpm assisted power collapse in the absence of sleep set
+	 * values.
+	 *
+	 * However, once a sleep set request is sent for a given regulator,
+	 * additional sleep set requests must be sent in the future even if they
+	 * match the corresponding active set requests.
+	 */
+	if (rpm_vreg->sleep_request_sent || sleep_set_differs) {
+		/* Add KVPs to the sleep set RPM request if they are new. */
+		rpm_vreg_check_modified_requests(rpm_vreg->aggr_req_sleep.param,
+			param_sleep, rpm_vreg->aggr_req_sleep.valid,
+			&modified_sleep);
+		rc = rpm_vreg_add_modified_requests(regulator, RPM_SET_SLEEP,
+			param_sleep, modified_sleep);
+		if (rc)
+			return rc;
+		send_sleep = modified_sleep;
+	}
+
+	/* Send active set request to the RPM if it contains new KVPs. */
+	if (send_active) {
+		wait_for_ack = rpm_vreg_ack_required(rpm_vreg, RPM_SET_ACTIVE,
+					rpm_vreg->aggr_req_active.param,
+					param_active,
+					rpm_vreg->aggr_req_active.valid,
+					modified_active);
+		rc = rpm_vreg_send_request(regulator, RPM_SET_ACTIVE,
+						wait_for_ack);
+		if (rc)
+			return rc;
+		rpm_vreg->aggr_req_active.valid |= modified_active;
+		rpm_vreg->wait_for_ack_active = false;
+	}
+	/* Store the results of the aggregation. */
+	rpm_vreg->aggr_req_active.modified = modified_active;
+	memcpy(rpm_vreg->aggr_req_active.param, param_active,
+		sizeof(param_active));
+
+	/* Handle debug printing of the active set request. */
+	rpm_regulator_req(regulator, RPM_SET_ACTIVE, send_active);
+	if (send_active)
+		rpm_vreg->aggr_req_active.modified = 0;
+
+	/* Send sleep set request to the RPM if it contains new KVPs. */
+	if (send_sleep) {
+		wait_for_ack = rpm_vreg_ack_required(rpm_vreg, RPM_SET_SLEEP,
+					rpm_vreg->aggr_req_sleep.param,
+					param_sleep,
+					rpm_vreg->aggr_req_sleep.valid,
+					modified_sleep);
+		rc = rpm_vreg_send_request(regulator, RPM_SET_SLEEP,
+						wait_for_ack);
+		if (rc)
+			return rc;
+		else
+			rpm_vreg->sleep_request_sent = true;
+		rpm_vreg->aggr_req_sleep.valid |= modified_sleep;
+		rpm_vreg->wait_for_ack_sleep = false;
+	}
+	/* Store the results of the aggregation. */
+	rpm_vreg->aggr_req_sleep.modified = modified_sleep;
+	memcpy(rpm_vreg->aggr_req_sleep.param, param_sleep,
+		sizeof(param_sleep));
+
+	/* Handle debug printing of the sleep set request. */
+	rpm_regulator_req(regulator, RPM_SET_SLEEP, send_sleep);
+	if (send_sleep)
+		rpm_vreg->aggr_req_sleep.modified = 0;
+
+	/*
+	 * Loop over all requests for this regulator to update the valid and
+	 * modified values for use in future aggregation.
+	 */
+	list_for_each_entry(reg, &rpm_vreg->reg_list, list) {
+		reg->req.valid |= reg->req.modified;
+		reg->req.modified = 0;
+	}
+
+	return rc;
+}
+
+static int rpm_vreg_is_enabled(struct regulator_dev *rdev)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+
+	if (likely(!reg->use_pin_ctrl_for_enable))
+		return reg->req.param[RPM_REGULATOR_PARAM_ENABLE];
+	else
+		return reg->req.param[RPM_REGULATOR_PARAM_PIN_CTRL_ENABLE]
+			== reg->pin_ctrl_mask[RPM_VREG_PIN_CTRL_STATE_ENABLE];
+}
+
+static int rpm_vreg_enable(struct regulator_dev *rdev)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+	int rc;
+	u32 prev_enable;
+
+	rpm_vreg_lock(reg->rpm_vreg);
+
+	if (likely(!reg->use_pin_ctrl_for_enable)) {
+		/* Enable using swen KVP. */
+		prev_enable = reg->req.param[RPM_REGULATOR_PARAM_ENABLE];
+		RPM_VREG_SET_PARAM(reg, ENABLE, 1);
+		rc = rpm_vreg_aggregate_requests(reg);
+		if (rc) {
+			vreg_err(reg, "enable failed, rc=%d", rc);
+			RPM_VREG_SET_PARAM(reg, ENABLE, prev_enable);
+		}
+	} else {
+		/* Enable using pcen KVP. */
+		prev_enable
+			= reg->req.param[RPM_REGULATOR_PARAM_PIN_CTRL_ENABLE];
+		RPM_VREG_SET_PARAM(reg, PIN_CTRL_ENABLE,
+			reg->pin_ctrl_mask[RPM_VREG_PIN_CTRL_STATE_ENABLE]);
+		rc = rpm_vreg_aggregate_requests(reg);
+		if (rc) {
+			vreg_err(reg, "enable failed, rc=%d", rc);
+			RPM_VREG_SET_PARAM(reg, PIN_CTRL_ENABLE, prev_enable);
+		}
+	}
+
+	rpm_vreg_unlock(reg->rpm_vreg);
+
+	return rc;
+}
+
+static int rpm_vreg_disable(struct regulator_dev *rdev)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+	int rc;
+	u32 prev_enable;
+
+	rpm_vreg_lock(reg->rpm_vreg);
+
+	if (likely(!reg->use_pin_ctrl_for_enable)) {
+		/* Disable using swen KVP. */
+		prev_enable = reg->req.param[RPM_REGULATOR_PARAM_ENABLE];
+		RPM_VREG_SET_PARAM(reg, ENABLE, 0);
+		rc = rpm_vreg_aggregate_requests(reg);
+		if (rc) {
+			vreg_err(reg, "disable failed, rc=%d", rc);
+			RPM_VREG_SET_PARAM(reg, ENABLE, prev_enable);
+		}
+	} else {
+		/* Disable using pcen KVP. */
+		prev_enable
+			= reg->req.param[RPM_REGULATOR_PARAM_PIN_CTRL_ENABLE];
+		RPM_VREG_SET_PARAM(reg, PIN_CTRL_ENABLE,
+			reg->pin_ctrl_mask[RPM_VREG_PIN_CTRL_STATE_DISABLE]);
+		rc = rpm_vreg_aggregate_requests(reg);
+		if (rc) {
+			vreg_err(reg, "disable failed, rc=%d", rc);
+			RPM_VREG_SET_PARAM(reg, PIN_CTRL_ENABLE, prev_enable);
+		}
+	}
+
+	rpm_vreg_unlock(reg->rpm_vreg);
+
+	return rc;
+}
+
+#define RPM_VREG_SET_VOLTAGE(_regulator, _val) \
+{ \
+	(_regulator)->req.param[(_regulator)->voltage_index] = _val; \
+	(_regulator)->req.modified |= BIT((_regulator)->voltage_index); \
+} \
+
+static int rpm_vreg_set_voltage(struct regulator_dev *rdev, int min_uV,
+				int max_uV, unsigned *selector)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+	int rc = 0;
+	int voltage;
+	u32 prev_voltage;
+
+	voltage = min_uV - reg->voltage_offset;
+
+	if (voltage < params[reg->voltage_index].min
+	    || voltage > params[reg->voltage_index].max) {
+		vreg_err(reg, "voltage=%d for key=%s is not within allowed range: [%u, %u]\n",
+			voltage, params[reg->voltage_index].name,
+			params[reg->voltage_index].min,
+			params[reg->voltage_index].max);
+		return -EINVAL;
+	}
+
+	rpm_vreg_lock(reg->rpm_vreg);
+
+	prev_voltage = reg->req.param[reg->voltage_index];
+	RPM_VREG_SET_VOLTAGE(reg, voltage);
+
+	rpm_vreg_check_param_max(reg, reg->voltage_index,
+				max_uV - reg->voltage_offset);
+
+	/*
+	 * Only send a new voltage if the regulator is currently enabled or
+	 * if the regulator has been configured to always send voltage updates.
+	 */
+	if (reg->always_send_voltage
+	    || rpm_vreg_active_or_sleep_enabled(reg->rpm_vreg)
+	    || rpm_vreg_shared_active_or_sleep_enabled_valid(reg->rpm_vreg))
+		rc = rpm_vreg_aggregate_requests(reg);
+
+	if (rc) {
+		vreg_err(reg, "set voltage for key=%s failed, rc=%d",
+			params[reg->voltage_index].name, rc);
+		RPM_VREG_SET_VOLTAGE(reg, prev_voltage);
+	}
+
+	rpm_vreg_unlock(reg->rpm_vreg);
+
+	return rc;
+}
+
+static int rpm_vreg_get_voltage(struct regulator_dev *rdev)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+	int uV;
+
+	uV = reg->req.param[reg->voltage_index] + reg->voltage_offset;
+	if (uV == 0)
+		uV = VOLTAGE_UNKNOWN;
+
+	return uV;
+}
+
+static const struct rpm_regulator_mode_map vreg_mode_mapping[] = {
+	[REGULATOR_MODE_NORMAL]	= {
+		RPM_REGULATOR_LDO_MODE_HPM,
+		RPM_REGULATOR_SMPS_MODE_PWM,
+	},
+	[REGULATOR_MODE_IDLE]	= {
+		RPM_REGULATOR_LDO_MODE_IPEAK,
+		RPM_REGULATOR_SMPS_MODE_AUTO,
+	},
+};
+
+static int rpm_vreg_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+	int index = 0;
+	u32 rpm_mode, prev_mode;
+	int rc;
+
+	switch (reg->rpm_vreg->regulator_type) {
+	case RPM_REGULATOR_TYPE_SMPS:
+		index = RPM_REGULATOR_PARAM_MODE_SMPS;
+		rpm_mode = vreg_mode_mapping[mode].smps_mode;
+		break;
+	case RPM_REGULATOR_TYPE_LDO:
+		index = RPM_REGULATOR_PARAM_MODE_LDO;
+		rpm_mode = vreg_mode_mapping[mode].ldo_mode;
+		break;
+	default:
+		vreg_err(reg, "unsupported regulator type: %d\n",
+			reg->rpm_vreg->regulator_type);
+		return -EINVAL;
+	}
+
+	rpm_vreg_lock(reg->rpm_vreg);
+
+	prev_mode = reg->req.param[index];
+	reg->req.param[index] = rpm_mode;
+	reg->req.modified |= BIT(index);
+
+	rc = rpm_vreg_aggregate_requests(reg);
+	if (rc) {
+		vreg_err(reg, "set mode failed, rc=%d", rc);
+		reg->req.param[index] = prev_mode;
+	}
+
+	rpm_vreg_unlock(reg->rpm_vreg);
+
+	return rc;
+}
+
+static unsigned int rpm_vreg_get_mode(struct regulator_dev *rdev)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+	unsigned int rpm_mode;
+
+	switch (reg->rpm_vreg->regulator_type) {
+	case RPM_REGULATOR_TYPE_SMPS:
+		rpm_mode = reg->req.param[RPM_REGULATOR_PARAM_MODE_SMPS];
+		if (rpm_mode ==
+			     vreg_mode_mapping[REGULATOR_MODE_IDLE].smps_mode)
+			return REGULATOR_MODE_IDLE;
+		else
+			return REGULATOR_MODE_NORMAL;
+		break;
+	case RPM_REGULATOR_TYPE_LDO:
+		rpm_mode = reg->req.param[RPM_REGULATOR_PARAM_MODE_LDO];
+		if (rpm_mode ==
+			      vreg_mode_mapping[REGULATOR_MODE_IDLE].ldo_mode)
+			return REGULATOR_MODE_IDLE;
+		else
+			return REGULATOR_MODE_NORMAL;
+		break;
+	default:
+		vreg_err(reg, "unsupported regulator type %d\n",
+			 reg->rpm_vreg->regulator_type);
+		break;
+	}
+
+	return REGULATOR_MODE_NORMAL;
+}
+
+static int rpm_vreg_set_load(struct regulator_dev *rdev, int load_uA)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+	int rc = 0;
+	u32 load_mA;
+	u32 prev_load_mA;
+
+	rpm_vreg_lock(reg->rpm_vreg);
+
+	load_mA = MICRO_TO_MILLI(load_uA + reg->system_load);
+	if (load_mA > params[RPM_REGULATOR_PARAM_CURRENT].max)
+		load_mA = params[RPM_REGULATOR_PARAM_CURRENT].max;
+
+	prev_load_mA = reg->req.param[RPM_REGULATOR_PARAM_CURRENT];
+	RPM_VREG_SET_PARAM(reg, CURRENT, load_mA);
+
+	/*
+	 * Only send a new load current value if the regulator is currently
+	 * enabled or if the regulator has been configured to always send
+	 * current updates.
+	 */
+	if (reg->always_send_current
+	    || rpm_vreg_active_or_sleep_enabled(reg->rpm_vreg)
+	    || rpm_vreg_shared_active_or_sleep_enabled_valid(reg->rpm_vreg))
+		rc = rpm_vreg_aggregate_requests(reg);
+
+	if (rc) {
+		vreg_err(reg, "set mode failed, rc=%d\n", rc);
+		RPM_VREG_SET_PARAM(reg, CURRENT, prev_load_mA);
+	}
+
+	rpm_vreg_unlock(reg->rpm_vreg);
+
+	return rc;
+}
+
+static int rpm_vreg_set_bob_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+	int rc;
+	u32 prev_mode;
+
+	rpm_vreg_lock(reg->rpm_vreg);
+
+	prev_mode = reg->req.param[RPM_REGULATOR_PARAM_MODE_BOB];
+
+	switch (mode) {
+	case REGULATOR_MODE_FAST:
+		RPM_VREG_SET_PARAM(reg, MODE_BOB, RPM_REGULATOR_BOB_MODE_PWM);
+		break;
+	case REGULATOR_MODE_NORMAL:
+		RPM_VREG_SET_PARAM(reg, MODE_BOB, RPM_REGULATOR_BOB_MODE_AUTO);
+		break;
+	case REGULATOR_MODE_IDLE:
+		RPM_VREG_SET_PARAM(reg, MODE_BOB, RPM_REGULATOR_BOB_MODE_PFM);
+		break;
+	case REGULATOR_MODE_STANDBY:
+		RPM_VREG_SET_PARAM(reg, MODE_BOB, RPM_REGULATOR_BOB_MODE_PASS);
+		break;
+	default:
+		vreg_err(reg, "invalid mode: %u\n", mode);
+		rpm_vreg_unlock(reg->rpm_vreg);
+		return -EINVAL;
+	}
+
+	rc = rpm_vreg_aggregate_requests(reg);
+	if (rc) {
+		vreg_err(reg, "set BoB mode failed, rc=%d\n", rc);
+		RPM_VREG_SET_PARAM(reg, MODE_BOB, prev_mode);
+	}
+
+	rpm_vreg_unlock(reg->rpm_vreg);
+
+	return rc;
+}
+
+static unsigned int rpm_vreg_get_bob_mode(struct regulator_dev *rdev)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+	unsigned int mode;
+
+	switch (reg->req.param[RPM_REGULATOR_PARAM_MODE_BOB]) {
+	case RPM_REGULATOR_BOB_MODE_PWM:
+		mode = REGULATOR_MODE_FAST;
+		break;
+	case RPM_REGULATOR_BOB_MODE_AUTO:
+		mode = REGULATOR_MODE_NORMAL;
+		break;
+	case RPM_REGULATOR_BOB_MODE_PFM:
+		mode = REGULATOR_MODE_IDLE;
+		break;
+	case RPM_REGULATOR_BOB_MODE_PASS:
+		mode = REGULATOR_MODE_STANDBY;
+		break;
+	default:
+		vreg_err(reg, "BoB mode unknown\n");
+		mode = REGULATOR_MODE_NORMAL;
+	}
+
+	return mode;
+}
+
+static unsigned int rpm_vreg_get_optimum_mode(struct regulator_dev *rdev,
+				int input_uV, int output_uV, int load_uA)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+	u32 mode = REGULATOR_MODE_NORMAL;
+
+	if (reg->hpm_threshold_current > 0) {
+		if (load_uA >= reg->hpm_threshold_current) {
+			/* PWM mode */
+			mode = (reg->rpm_vreg->regulator_type
+				== RPM_REGULATOR_TYPE_BOB)
+					? REGULATOR_MODE_FAST
+					: REGULATOR_MODE_NORMAL;
+		} else {
+			/* AUTO mode */
+			mode = (reg->rpm_vreg->regulator_type
+				== RPM_REGULATOR_TYPE_BOB)
+					? REGULATOR_MODE_NORMAL
+					: REGULATOR_MODE_IDLE;
+		}
+	} else {
+		/* Default to the current mode if no threshold is present. */
+		mode = reg->rdesc.ops->get_mode(rdev);
+	}
+
+	return mode;
+}
+
+static int rpm_vreg_enable_time(struct regulator_dev *rdev)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+
+	return reg->rpm_vreg->enable_time;
+}
+
+static int rpm_vreg_send_defaults(struct rpm_regulator *reg)
+{
+	int rc;
+
+	rpm_vreg_lock(reg->rpm_vreg);
+	rc = rpm_vreg_aggregate_requests(reg);
+	if (rc)
+		vreg_err(reg, "RPM request failed, rc=%d", rc);
+	rpm_vreg_unlock(reg->rpm_vreg);
+
+	return rc;
+}
+
+static int rpm_vreg_configure_pin_control_enable(struct rpm_regulator *reg,
+		struct device_node *node)
+{
+	struct rpm_regulator_param *pcen_param =
+			&params[RPM_REGULATOR_PARAM_PIN_CTRL_ENABLE];
+	int rc, i;
+
+	if (!of_find_property(node, "qcom,enable-with-pin-ctrl", NULL))
+		return 0;
+
+	if (pcen_param->supported_regulator_types
+			& BIT(reg->rpm_vreg->regulator_type)) {
+		rc = of_property_read_u32_array(node,
+			"qcom,enable-with-pin-ctrl", reg->pin_ctrl_mask,
+			RPM_VREG_PIN_CTRL_STATE_COUNT);
+		if (rc) {
+			vreg_err(reg, "could not read qcom,enable-with-pin-ctrl, rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		/* Verify that the mask values are valid. */
+		for (i = 0; i < RPM_VREG_PIN_CTRL_STATE_COUNT; i++) {
+			if (reg->pin_ctrl_mask[i] < pcen_param->min
+			    || reg->pin_ctrl_mask[i] > pcen_param->max) {
+				vreg_err(reg, "device tree property: qcom,enable-with-pin-ctrl[%d]=%u is outside allowed range [%u, %u]\n",
+					i, reg->pin_ctrl_mask[i],
+					pcen_param->min, pcen_param->max);
+				return -EINVAL;
+			}
+		}
+
+		reg->use_pin_ctrl_for_enable = true;
+	} else {
+		pr_warn("%s: regulator type=%d does not support device tree property: qcom,enable-with-pin-ctrl\n",
+			reg->rdesc.name, reg->rpm_vreg->regulator_type);
+	}
+
+	return 0;
+}
+
+/**
+ * rpm_regulator_get() - lookup and obtain a handle to an RPM regulator
+ * @dev: device for regulator consumer
+ * @supply: supply name
+ *
+ * Returns a struct rpm_regulator corresponding to the regulator producer,
+ * or ERR_PTR() containing errno.
+ *
+ * This function may only be called from nonatomic context.
+ */
+struct rpm_regulator *rpm_regulator_get(struct device *dev, const char *supply)
+{
+	struct rpm_regulator *framework_reg;
+	struct rpm_regulator *priv_reg = NULL;
+	struct regulator *regulator;
+	struct rpm_vreg *rpm_vreg;
+
+	regulator = regulator_get(dev, supply);
+	if (IS_ERR(regulator)) {
+		pr_err("could not find regulator for: dev=%s, supply=%s, rc=%ld\n",
+			(dev ? dev_name(dev) : ""), (supply ? supply : ""),
+			PTR_ERR(regulator));
+		return ERR_CAST(regulator);
+	}
+
+	framework_reg = regulator_get_drvdata(regulator);
+	if (framework_reg == NULL) {
+		pr_err("regulator structure not found.\n");
+		regulator_put(regulator);
+		return ERR_PTR(-ENODEV);
+	}
+	regulator_put(regulator);
+
+	rpm_vreg = framework_reg->rpm_vreg;
+
+	priv_reg = kzalloc(sizeof(struct rpm_regulator), GFP_KERNEL);
+	if (priv_reg == NULL) {
+		vreg_err(framework_reg,
+			"could not allocate memory for regulator\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/*
+	 * Allocate a regulator_dev struct so that framework callback functions
+	 * can be called from the private API functions.
+	 */
+	priv_reg->rdev = kzalloc(sizeof(struct regulator_dev), GFP_KERNEL);
+	if (priv_reg->rdev == NULL) {
+		vreg_err(framework_reg,
+			"could not allocate memory for regulator_dev\n");
+		kfree(priv_reg);
+		return ERR_PTR(-ENOMEM);
+	}
+	priv_reg->rdev->reg_data	= priv_reg;
+	priv_reg->rpm_vreg		= rpm_vreg;
+	priv_reg->rdesc.name		= framework_reg->rdesc.name;
+	priv_reg->rdesc.ops		= framework_reg->rdesc.ops;
+	priv_reg->set_active		= framework_reg->set_active;
+	priv_reg->set_sleep		= framework_reg->set_sleep;
+	priv_reg->min_uV		= framework_reg->min_uV;
+	priv_reg->max_uV		= framework_reg->max_uV;
+	priv_reg->system_load		= framework_reg->system_load;
+
+	might_sleep_if(!rpm_vreg->allow_atomic);
+	rpm_vreg_lock(rpm_vreg);
+	list_add(&priv_reg->list, &rpm_vreg->reg_list);
+	rpm_vreg_unlock(rpm_vreg);
+
+	return priv_reg;
+}
+EXPORT_SYMBOL(rpm_regulator_get);
+
+static int rpm_regulator_check_input(struct rpm_regulator *regulator)
+{
+	if (IS_ERR_OR_NULL(regulator) || regulator->rpm_vreg == NULL) {
+		pr_err("invalid rpm_regulator pointer\n");
+		return -EINVAL;
+	}
+
+	might_sleep_if(!regulator->rpm_vreg->allow_atomic);
+
+	return 0;
+}
+
+/**
+ * rpm_regulator_put() - free the RPM regulator handle
+ * @regulator: RPM regulator handle
+ *
+ * Parameter reaggregation does not take place when rpm_regulator_put is called.
+ * Therefore, regulator enable state and voltage must be configured
+ * appropriately before calling rpm_regulator_put.
+ *
+ * This function may be called from either atomic or nonatomic context.  If this
+ * function is called from atomic context, then the regulator being operated on
+ * must be configured via device tree with qcom,allow-atomic == 1.
+ */
+void rpm_regulator_put(struct rpm_regulator *regulator)
+{
+	struct rpm_vreg *rpm_vreg;
+	int rc = rpm_regulator_check_input(regulator);
+
+	if (rc)
+		return;
+
+	rpm_vreg = regulator->rpm_vreg;
+
+	might_sleep_if(!rpm_vreg->allow_atomic);
+	rpm_vreg_lock(rpm_vreg);
+	list_del(&regulator->list);
+	rpm_vreg_unlock(rpm_vreg);
+
+	kfree(regulator->rdev);
+	kfree(regulator);
+}
+EXPORT_SYMBOL(rpm_regulator_put);
+
+/**
+ * rpm_regulator_enable() - enable regulator output
+ * @regulator: RPM regulator handle
+ *
+ * Returns 0 on success or errno on failure.
+ *
+ * This function may be called from either atomic or nonatomic context.  If this
+ * function is called from atomic context, then the regulator being operated on
+ * must be configured via device tree with qcom,allow-atomic == 1.
+ */
+int rpm_regulator_enable(struct rpm_regulator *regulator)
+{
+	int rc = rpm_regulator_check_input(regulator);
+
+	if (rc)
+		return rc;
+
+	return rpm_vreg_enable(regulator->rdev);
+}
+EXPORT_SYMBOL(rpm_regulator_enable);
+
+/**
+ * rpm_regulator_disable() - disable regulator output
+ * @regulator: RPM regulator handle
+ *
+ * Returns 0 on success or errno on failure.
+ *
+ * The enable state of the regulator is determined by aggregating the requests
+ * of all consumers.  Therefore, it is possible that the regulator will remain
+ * enabled even after rpm_regulator_disable is called.
+ *
+ * This function may be called from either atomic or nonatomic context.  If this
+ * function is called from atomic context, then the regulator being operated on
+ * must be configured via device tree with qcom,allow-atomic == 1.
+ */
+int rpm_regulator_disable(struct rpm_regulator *regulator)
+{
+	int rc = rpm_regulator_check_input(regulator);
+
+	if (rc)
+		return rc;
+
+	return rpm_vreg_disable(regulator->rdev);
+}
+EXPORT_SYMBOL(rpm_regulator_disable);
+
+/**
+ * rpm_regulator_set_voltage() - set regulator output voltage
+ * @regulator: RPM regulator handle
+ * @min_uV: minimum required voltage in uV
+ * @max_uV: maximum acceptable voltage in uV
+ *
+ * Sets a voltage regulator to the desired output voltage. This can be set
+ * while the regulator is disabled or enabled.  If the regulator is enabled then
+ * the voltage will change to the new value immediately; otherwise, if the
+ * regulator is disabled, then the regulator will output at the new voltage when
+ * enabled.
+ *
+ * The min_uV to max_uV voltage range requested must intersect with the
+ * voltage constraint range configured for the regulator.
+ *
+ * Returns 0 on success or errno on failure.
+ *
+ * The final voltage value that is sent to the RPM is aggregated based upon the
+ * values requested by all consumers of the regulator.  This corresponds to the
+ * maximum min_uV value.
+ *
+ * This function may be called from either atomic or nonatomic context.  If this
+ * function is called from atomic context, then the regulator being operated on
+ * must be configured via device tree with qcom,allow-atomic == 1.
+ */
+int rpm_regulator_set_voltage(struct rpm_regulator *regulator, int min_uV,
+			      int max_uV)
+{
+	int rc = rpm_regulator_check_input(regulator);
+	int uV = min_uV;
+
+	if (rc)
+		return rc;
+
+	if (regulator->rpm_vreg->regulator_type == RPM_REGULATOR_TYPE_VS) {
+		vreg_err(regulator, "unsupported regulator type: %d\n",
+			regulator->rpm_vreg->regulator_type);
+		return -EINVAL;
+	}
+
+	if (min_uV > max_uV) {
+		vreg_err(regulator, "min_uV=%d must be less than max_uV=%d\n",
+			min_uV, max_uV);
+		return -EINVAL;
+	}
+
+	if (uV < regulator->min_uV && max_uV >= regulator->min_uV)
+		uV = regulator->min_uV;
+
+	if (uV < regulator->min_uV || uV > regulator->max_uV) {
+		vreg_err(regulator,
+			"request v=[%d, %d] is outside allowed v=[%d, %d]\n",
+			min_uV, max_uV, regulator->min_uV, regulator->max_uV);
+		return -EINVAL;
+	}
+
+	return regulator->rdesc.ops->set_voltage(regulator->rdev, uV, uV, NULL);
+}
+EXPORT_SYMBOL(rpm_regulator_set_voltage);
+
+/**
+ * rpm_regulator_set_mode() - set regulator operating mode
+ * @regulator: RPM regulator handle
+ * @mode: operating mode requested for the regulator
+ *
+ * Requests that the mode of the regulator be set to the mode specified.  This
+ * parameter is aggregated using a max function such that AUTO < IPEAK < HPM.
+ *
+ * Returns 0 on success or errno on failure.
+ */
+int rpm_regulator_set_mode(struct rpm_regulator *regulator,
+				enum rpm_regulator_mode mode)
+{
+	int index = 0;
+	u32 new_mode, prev_mode;
+	int rc;
+
+	rc = rpm_regulator_check_input(regulator);
+	if (rc)
+		return rc;
+
+	if (mode < 0 || mode >= ARRAY_SIZE(mode_mapping)) {
+		vreg_err(regulator, "invalid mode requested: %d\n", mode);
+		return -EINVAL;
+	}
+
+	switch (regulator->rpm_vreg->regulator_type) {
+	case RPM_REGULATOR_TYPE_SMPS:
+		index = RPM_REGULATOR_PARAM_MODE_SMPS;
+		new_mode = mode_mapping[mode].smps_mode;
+		break;
+	case RPM_REGULATOR_TYPE_LDO:
+		index = RPM_REGULATOR_PARAM_MODE_LDO;
+		new_mode = mode_mapping[mode].ldo_mode;
+		break;
+	default:
+		vreg_err(regulator, "unsupported regulator type: %d\n",
+			regulator->rpm_vreg->regulator_type);
+		return -EINVAL;
+	};
+
+	if (new_mode < params[index].min || new_mode > params[index].max) {
+		vreg_err(regulator, "invalid mode requested: %d for type: %d\n",
+			mode, regulator->rpm_vreg->regulator_type);
+		return -EINVAL;
+	}
+
+	rpm_vreg_lock(regulator->rpm_vreg);
+
+	prev_mode = regulator->req.param[index];
+	regulator->req.param[index] = new_mode;
+	regulator->req.modified |= BIT(index);
+
+	rc = rpm_vreg_aggregate_requests(regulator);
+	if (rc) {
+		vreg_err(regulator, "set mode failed, rc=%d", rc);
+		regulator->req.param[index] = prev_mode;
+	}
+
+	rpm_vreg_unlock(regulator->rpm_vreg);
+
+	return rc;
+}
+EXPORT_SYMBOL(rpm_regulator_set_mode);
+
+static struct regulator_ops ldo_ops = {
+	.enable			= rpm_vreg_enable,
+	.disable		= rpm_vreg_disable,
+	.is_enabled		= rpm_vreg_is_enabled,
+	.set_voltage		= rpm_vreg_set_voltage,
+	.get_voltage		= rpm_vreg_get_voltage,
+	.set_mode		= rpm_vreg_set_mode,
+	.get_mode		= rpm_vreg_get_mode,
+	.set_load		= rpm_vreg_set_load,
+	.enable_time		= rpm_vreg_enable_time,
+};
+
+static struct regulator_ops smps_ops = {
+	.enable			= rpm_vreg_enable,
+	.disable		= rpm_vreg_disable,
+	.is_enabled		= rpm_vreg_is_enabled,
+	.set_voltage		= rpm_vreg_set_voltage,
+	.get_voltage		= rpm_vreg_get_voltage,
+	.set_mode		= rpm_vreg_set_mode,
+	.get_mode		= rpm_vreg_get_mode,
+	.set_load		= rpm_vreg_set_load,
+	.enable_time		= rpm_vreg_enable_time,
+};
+
+static struct regulator_ops smps_optimum_mode_ops = {
+	.enable			= rpm_vreg_enable,
+	.disable		= rpm_vreg_disable,
+	.is_enabled		= rpm_vreg_is_enabled,
+	.set_voltage		= rpm_vreg_set_voltage,
+	.get_voltage		= rpm_vreg_get_voltage,
+	.set_mode		= rpm_vreg_set_mode,
+	.get_mode		= rpm_vreg_get_mode,
+	.get_optimum_mode	= rpm_vreg_get_optimum_mode,
+	.enable_time		= rpm_vreg_enable_time,
+};
+
+static struct regulator_ops switch_ops = {
+	.enable			= rpm_vreg_enable,
+	.disable		= rpm_vreg_disable,
+	.is_enabled		= rpm_vreg_is_enabled,
+	.enable_time		= rpm_vreg_enable_time,
+};
+
+static struct regulator_ops ncp_ops = {
+	.enable			= rpm_vreg_enable,
+	.disable		= rpm_vreg_disable,
+	.is_enabled		= rpm_vreg_is_enabled,
+	.set_voltage		= rpm_vreg_set_voltage,
+	.get_voltage		= rpm_vreg_get_voltage,
+	.enable_time		= rpm_vreg_enable_time,
+};
+
+static struct regulator_ops bob_ops = {
+	.enable			= rpm_vreg_enable,
+	.disable		= rpm_vreg_disable,
+	.is_enabled		= rpm_vreg_is_enabled,
+	.set_voltage		= rpm_vreg_set_voltage,
+	.get_voltage		= rpm_vreg_get_voltage,
+	.set_mode		= rpm_vreg_set_bob_mode,
+	.get_mode		= rpm_vreg_get_bob_mode,
+	.get_optimum_mode	= rpm_vreg_get_optimum_mode,
+	.enable_time		= rpm_vreg_enable_time,
+};
+
+static struct regulator_ops *vreg_ops[] = {
+	[RPM_REGULATOR_TYPE_LDO]	= &ldo_ops,
+	[RPM_REGULATOR_TYPE_SMPS]	= &smps_ops,
+	[RPM_REGULATOR_TYPE_VS]		= &switch_ops,
+	[RPM_REGULATOR_TYPE_NCP]	= &ncp_ops,
+	[RPM_REGULATOR_TYPE_BOB]	= &bob_ops,
+};
+
+static int rpm_vreg_device_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct rpm_regulator *reg;
+	struct rpm_vreg *rpm_vreg;
+
+	reg = platform_get_drvdata(pdev);
+	if (reg) {
+		rpm_vreg = reg->rpm_vreg;
+		rpm_vreg_lock(rpm_vreg);
+		regulator_unregister(reg->rdev);
+		list_del(&reg->list);
+		kfree(reg);
+		rpm_vreg_unlock(rpm_vreg);
+	} else {
+		dev_err(dev, "%s: drvdata missing\n", __func__);
+		return -EINVAL;
+	}
+
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static int rpm_vreg_resource_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct rpm_regulator *reg, *reg_temp;
+	struct rpm_vreg *rpm_vreg;
+
+	rpm_vreg = platform_get_drvdata(pdev);
+	if (rpm_vreg) {
+		rpm_vreg_lock(rpm_vreg);
+		list_for_each_entry_safe(reg, reg_temp, &rpm_vreg->reg_list,
+				list) {
+			/* Only touch data for private consumers. */
+			if (reg->rdev->desc == NULL) {
+				list_del(&reg->list);
+				kfree(reg->rdev);
+				kfree(reg);
+			} else {
+				dev_err(dev, "%s: not all child devices have been removed\n",
+					__func__);
+			}
+		}
+		rpm_vreg_unlock(rpm_vreg);
+
+		msm_rpm_free_request(rpm_vreg->handle_active);
+		msm_rpm_free_request(rpm_vreg->handle_sleep);
+
+		kfree(rpm_vreg);
+	} else {
+		dev_err(dev, "%s: drvdata missing\n", __func__);
+		return -EINVAL;
+	}
+
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static int rpm_vreg_set_smps_ldo_voltage_index(struct device *dev,
+					struct rpm_regulator *reg)
+{
+	struct device_node *node = dev->of_node;
+	int chosen = 0;
+
+	if (of_property_read_bool(node, "qcom,use-voltage-corner")) {
+		reg->voltage_index = RPM_REGULATOR_PARAM_CORNER;
+		reg->voltage_offset = RPM_REGULATOR_CORNER_NONE;
+		chosen++;
+	}
+
+	if (of_property_read_bool(node, "qcom,use-voltage-floor-corner")) {
+		reg->voltage_index = RPM_REGULATOR_PARAM_FLOOR_CORNER;
+		reg->voltage_offset = RPM_REGULATOR_CORNER_NONE;
+		chosen++;
+	}
+
+	if (of_property_read_bool(node, "qcom,use-voltage-level")) {
+		reg->voltage_index = RPM_REGULATOR_PARAM_LEVEL;
+		chosen++;
+	}
+
+	if (of_property_read_bool(node, "qcom,use-voltage-floor-level")) {
+		reg->voltage_index = RPM_REGULATOR_PARAM_FLOOR_LEVEL;
+		chosen++;
+	}
+
+	if (chosen > 1) {
+		dev_err(dev, "only one qcom,use-voltage-* may be specified\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int rpm_vreg_set_bob_voltage_index(struct device *dev,
+					struct rpm_regulator *reg)
+{
+	struct device_node *node = dev->of_node;
+	int chosen = 0;
+
+	if (of_property_read_bool(node, "qcom,use-pin-ctrl-voltage1")) {
+		reg->voltage_index = RPM_REGULATOR_PARAM_PIN_CTRL_VOLTAGE1;
+		chosen++;
+	}
+
+	if (of_property_read_bool(node, "qcom,use-pin-ctrl-voltage2")) {
+		reg->voltage_index = RPM_REGULATOR_PARAM_PIN_CTRL_VOLTAGE2;
+		chosen++;
+	}
+
+	if (of_property_read_bool(node, "qcom,use-pin-ctrl-voltage3")) {
+		reg->voltage_index = RPM_REGULATOR_PARAM_PIN_CTRL_VOLTAGE3;
+		chosen++;
+	}
+
+	if (chosen > 1) {
+		dev_err(dev, "only one qcom,use-pin-ctrl-voltage* may be specified\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int rpm_vreg_device_set_voltage_index(struct device *dev,
+					struct rpm_regulator *reg, int type)
+{
+	int rc = 0;
+
+	reg->voltage_index = RPM_REGULATOR_PARAM_VOLTAGE;
+
+	switch (type) {
+	case RPM_REGULATOR_TYPE_SMPS:
+	case RPM_REGULATOR_TYPE_LDO:
+		rc = rpm_vreg_set_smps_ldo_voltage_index(dev, reg);
+		break;
+	case RPM_REGULATOR_TYPE_BOB:
+		rc = rpm_vreg_set_bob_voltage_index(dev, reg);
+		break;
+	}
+
+	return rc;
+}
+
+/*
+ * This probe is called for child rpm-regulator devices which have
+ * properties which are required to configure individual regulator
+ * framework regulators for a given RPM regulator resource.
+ */
+static int rpm_vreg_device_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct regulator_init_data *init_data;
+	struct rpm_vreg *rpm_vreg;
+	struct rpm_regulator *reg;
+	struct regulator_config reg_config = {};
+	int rc = 0;
+	int i, regulator_type;
+	u32 val;
+
+	if (!dev->of_node) {
+		dev_err(dev, "%s: device tree information missing\n", __func__);
+		return -ENODEV;
+	}
+
+	if (pdev->dev.parent == NULL) {
+		dev_err(dev, "%s: parent device missing\n", __func__);
+		return -ENODEV;
+	}
+
+	rpm_vreg = dev_get_drvdata(pdev->dev.parent);
+	if (rpm_vreg == NULL) {
+		dev_err(dev, "%s: rpm_vreg not found in parent device\n",
+			__func__);
+		return -ENODEV;
+	}
+
+	reg = kzalloc(sizeof(struct rpm_regulator), GFP_KERNEL);
+	if (reg == NULL) {
+		dev_err(dev, "%s: could not allocate memory for reg\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	regulator_type		= rpm_vreg->regulator_type;
+	reg->rpm_vreg		= rpm_vreg;
+	reg->rdesc.owner	= THIS_MODULE;
+	reg->rdesc.type		= REGULATOR_VOLTAGE;
+	reg->rdesc.ops		= vreg_ops[regulator_type];
+
+	rc = rpm_vreg_device_set_voltage_index(dev, reg, regulator_type);
+	if (rc)
+		goto fail_free_reg;
+
+	reg->always_send_voltage
+		= of_property_read_bool(node, "qcom,always-send-voltage");
+	reg->always_send_current
+		= of_property_read_bool(node, "qcom,always-send-current");
+
+	if (regulator_type == RPM_REGULATOR_TYPE_VS)
+		reg->rdesc.n_voltages = 0;
+	else
+		reg->rdesc.n_voltages = 2;
+
+	rc = of_property_read_u32(node, "qcom,set", &val);
+	if (rc) {
+		dev_err(dev, "%s: sleep set and/or active set must be configured via qcom,set property, rc=%d\n",
+			__func__, rc);
+		goto fail_free_reg;
+	} else if (!(val & RPM_SET_CONFIG_BOTH)) {
+		dev_err(dev, "%s: qcom,set=%u property is invalid\n", __func__,
+			val);
+		rc = -EINVAL;
+		goto fail_free_reg;
+	}
+
+	reg->set_active = !!(val & RPM_SET_CONFIG_ACTIVE);
+	reg->set_sleep = !!(val & RPM_SET_CONFIG_SLEEP);
+
+	init_data = of_get_regulator_init_data(dev, node, &reg->rdesc);
+	if (init_data == NULL) {
+		dev_err(dev, "%s: unable to allocate memory\n", __func__);
+		rc = -ENOMEM;
+		goto fail_free_reg;
+	}
+	if (init_data->constraints.name == NULL) {
+		dev_err(dev, "%s: regulator name not specified\n", __func__);
+		rc = -EINVAL;
+		goto fail_free_reg;
+	}
+
+	init_data->constraints.input_uV	= init_data->constraints.max_uV;
+
+	if (of_get_property(node, "parent-supply", NULL))
+		init_data->supply_regulator = "parent";
+
+	of_property_read_u32(node, "qcom,pwm-threshold-current",
+					&reg->hpm_threshold_current);
+	if (reg->hpm_threshold_current > 0
+	    && regulator_type == RPM_REGULATOR_TYPE_SMPS)
+		reg->rdesc.ops = &smps_optimum_mode_ops;
+
+	/*
+	 * Fill in ops and mode masks based on callbacks specified for
+	 * this type of regulator.
+	 */
+	if (reg->rdesc.ops->enable)
+		init_data->constraints.valid_ops_mask
+			|= REGULATOR_CHANGE_STATUS;
+	if (reg->rdesc.ops->get_voltage)
+		init_data->constraints.valid_ops_mask
+			|= REGULATOR_CHANGE_VOLTAGE;
+	if (reg->rdesc.ops->get_mode) {
+		init_data->constraints.valid_ops_mask
+			|= REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_DRMS;
+
+		if (regulator_type == RPM_REGULATOR_TYPE_BOB)
+			init_data->constraints.valid_modes_mask
+				= REGULATOR_MODE_FAST | REGULATOR_MODE_NORMAL;
+		else
+			init_data->constraints.valid_modes_mask
+				|= REGULATOR_MODE_NORMAL | REGULATOR_MODE_IDLE;
+	}
+
+	reg->rdesc.name		= init_data->constraints.name;
+	reg->min_uV		= init_data->constraints.min_uV;
+	reg->max_uV		= init_data->constraints.max_uV;
+
+	/* Initialize the param array based on optional properties. */
+	for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+		rc = of_property_read_u32(node, params[i].property_name, &val);
+		if (rc == 0) {
+			if (params[i].supported_regulator_types
+					& BIT(regulator_type)) {
+				if (val < params[i].min
+						|| val > params[i].max) {
+					pr_warn("%s: device tree property: %s=%u is outsided allowed range [%u, %u]\n",
+						reg->rdesc.name,
+						params[i].property_name, val,
+						params[i].min, params[i].max);
+					continue;
+				}
+				reg->req.param[i] = val;
+				reg->req.modified |= BIT(i);
+			} else {
+				pr_warn("%s: regulator type=%d does not support device tree property: %s\n",
+					reg->rdesc.name, regulator_type,
+					params[i].property_name);
+			}
+		}
+	}
+
+	of_property_read_u32(node, "qcom,system-load", &reg->system_load);
+
+	rc = rpm_vreg_configure_pin_control_enable(reg, node);
+	if (rc) {
+		vreg_err(reg, "could not configure pin control enable, rc=%d\n",
+			rc);
+		goto fail_free_reg;
+	}
+
+	rpm_vreg_lock(rpm_vreg);
+	list_add(&reg->list, &rpm_vreg->reg_list);
+	rpm_vreg_unlock(rpm_vreg);
+
+	if (of_property_read_bool(node, "qcom,send-defaults")) {
+		rc = rpm_vreg_send_defaults(reg);
+		if (rc) {
+			vreg_err(reg, "could not send defaults, rc=%d\n", rc);
+			goto fail_remove_from_list;
+		}
+	}
+
+	reg_config.dev = dev;
+	reg_config.init_data = init_data;
+	reg_config.of_node = node;
+	reg_config.driver_data = reg;
+	reg->rdev = regulator_register(&reg->rdesc, &reg_config);
+	if (IS_ERR(reg->rdev)) {
+		rc = PTR_ERR(reg->rdev);
+		reg->rdev = NULL;
+		pr_err("regulator_register failed: %s, rc=%d\n",
+			reg->rdesc.name, rc);
+		goto fail_remove_from_list;
+	}
+
+	platform_set_drvdata(pdev, reg);
+
+	pr_debug("successfully probed: %s\n", reg->rdesc.name);
+
+	return 0;
+
+fail_remove_from_list:
+	rpm_vreg_lock(rpm_vreg);
+	list_del(&reg->list);
+	rpm_vreg_unlock(rpm_vreg);
+
+fail_free_reg:
+	kfree(reg);
+	return rc;
+}
+
+/*
+ * This probe is called for parent rpm-regulator devices which have
+ * properties which are required to identify a given RPM resource.
+ */
+static int rpm_vreg_resource_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct rpm_vreg *rpm_vreg;
+	int val = 0;
+	u32 resource_type;
+	int rc;
+
+	if (!dev->of_node) {
+		dev_err(dev, "%s: device tree information missing\n", __func__);
+		return -ENODEV;
+	}
+
+	/* Create new rpm_vreg entry. */
+	rpm_vreg = kzalloc(sizeof(struct rpm_vreg), GFP_KERNEL);
+	if (rpm_vreg == NULL) {
+		dev_err(dev, "%s: could not allocate memory for vreg\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	/* Required device tree properties: */
+	rc = of_property_read_string(node, "qcom,resource-name",
+			&rpm_vreg->resource_name);
+	if (rc) {
+		dev_err(dev, "%s: qcom,resource-name missing in DT node\n",
+			__func__);
+		goto fail_free_vreg;
+	}
+	resource_type = rpm_vreg_string_to_int(rpm_vreg->resource_name);
+
+	rc = of_property_read_u32(node, "qcom,resource-id",
+			&rpm_vreg->resource_id);
+	if (rc) {
+		dev_err(dev, "%s: qcom,resource-id missing in DT node\n",
+			__func__);
+		goto fail_free_vreg;
+	}
+
+	rc = of_property_read_u32(node, "qcom,regulator-type",
+			&rpm_vreg->regulator_type);
+	if (rc) {
+		dev_err(dev, "%s: qcom,regulator-type missing in DT node\n",
+			__func__);
+		goto fail_free_vreg;
+	}
+
+	if ((rpm_vreg->regulator_type < 0)
+	    || (rpm_vreg->regulator_type >= RPM_REGULATOR_TYPE_MAX)) {
+		dev_err(dev, "%s: invalid regulator type: %d\n", __func__,
+			rpm_vreg->regulator_type);
+		rc = -EINVAL;
+		goto fail_free_vreg;
+	}
+
+	/* Optional device tree properties: */
+	of_property_read_u32(node, "qcom,allow-atomic", &val);
+	rpm_vreg->allow_atomic = !!val;
+	of_property_read_u32(node, "qcom,enable-time", &rpm_vreg->enable_time);
+	rpm_vreg->apps_only = of_property_read_bool(node, "qcom,apps-only");
+	rpm_vreg->always_wait_for_ack
+		= of_property_read_bool(node, "qcom,always-wait-for-ack");
+
+	rpm_vreg->handle_active = msm_rpm_create_request(RPM_SET_ACTIVE,
+		resource_type, rpm_vreg->resource_id, RPM_REGULATOR_PARAM_MAX);
+	if (rpm_vreg->handle_active == NULL
+	    || IS_ERR(rpm_vreg->handle_active)) {
+		rc = PTR_ERR(rpm_vreg->handle_active);
+		if (rc != -EPROBE_DEFER)
+			dev_err(dev, "%s: failed to create active RPM handle, rc=%d\n",
+				__func__, rc);
+		goto fail_free_vreg;
+	}
+
+	rpm_vreg->handle_sleep = msm_rpm_create_request(RPM_SET_SLEEP,
+		resource_type, rpm_vreg->resource_id, RPM_REGULATOR_PARAM_MAX);
+	if (rpm_vreg->handle_sleep == NULL || IS_ERR(rpm_vreg->handle_sleep)) {
+		rc = PTR_ERR(rpm_vreg->handle_sleep);
+		if (rc != -EPROBE_DEFER)
+			dev_err(dev, "%s: failed to create sleep RPM handle, rc=%d\n",
+				__func__, rc);
+		goto fail_free_handle_active;
+	}
+
+	INIT_LIST_HEAD(&rpm_vreg->reg_list);
+
+	if (rpm_vreg->allow_atomic)
+		spin_lock_init(&rpm_vreg->slock);
+	else
+		mutex_init(&rpm_vreg->mlock);
+
+	platform_set_drvdata(pdev, rpm_vreg);
+
+	rc = of_platform_populate(node, NULL, NULL, dev);
+	if (rc) {
+		dev_err(dev, "%s: failed to add child nodes, rc=%d\n", __func__,
+			rc);
+		goto fail_unset_drvdata;
+	}
+
+	pr_debug("successfully probed: %s (%08X) %u\n", rpm_vreg->resource_name,
+		resource_type, rpm_vreg->resource_id);
+
+	return rc;
+
+fail_unset_drvdata:
+	platform_set_drvdata(pdev, NULL);
+	msm_rpm_free_request(rpm_vreg->handle_sleep);
+
+fail_free_handle_active:
+	msm_rpm_free_request(rpm_vreg->handle_active);
+
+fail_free_vreg:
+	kfree(rpm_vreg);
+
+	return rc;
+}
+
+static struct of_device_id rpm_vreg_match_table_device[] = {
+	{ .compatible = "qcom,rpm-smd-regulator", },
+	{}
+};
+
+static struct of_device_id rpm_vreg_match_table_resource[] = {
+	{ .compatible = "qcom,rpm-smd-regulator-resource", },
+	{}
+};
+
+static struct platform_driver rpm_vreg_device_driver = {
+	.probe = rpm_vreg_device_probe,
+	.remove = rpm_vreg_device_remove,
+	.driver = {
+		.name = "qcom,rpm-smd-regulator",
+		.owner = THIS_MODULE,
+		.of_match_table = rpm_vreg_match_table_device,
+	},
+};
+
+static struct platform_driver rpm_vreg_resource_driver = {
+	.probe = rpm_vreg_resource_probe,
+	.remove = rpm_vreg_resource_remove,
+	.driver = {
+		.name = "qcom,rpm-smd-regulator-resource",
+		.owner = THIS_MODULE,
+		.of_match_table = rpm_vreg_match_table_resource,
+	},
+};
+
+/**
+ * rpm_smd_regulator_driver_init() - initialize the RPM SMD regulator drivers
+ *
+ * This function registers the RPM SMD regulator platform drivers.
+ *
+ * Returns 0 on success or errno on failure.
+ */
+int __init rpm_smd_regulator_driver_init(void)
+{
+	static bool initialized;
+	int i, rc;
+
+	if (initialized)
+		return 0;
+	else
+		initialized = true;
+
+	/* Store parameter string names as integers */
+	for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++)
+		params[i].key = rpm_vreg_string_to_int(params[i].name);
+
+	rc = platform_driver_register(&rpm_vreg_device_driver);
+	if (rc)
+		return rc;
+
+	return platform_driver_register(&rpm_vreg_resource_driver);
+}
+EXPORT_SYMBOL(rpm_smd_regulator_driver_init);
+
+static void __exit rpm_vreg_exit(void)
+{
+	platform_driver_unregister(&rpm_vreg_device_driver);
+	platform_driver_unregister(&rpm_vreg_resource_driver);
+}
+
+arch_initcall(rpm_smd_regulator_driver_init);
+module_exit(rpm_vreg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM RPM SMD regulator driver");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/regulator/spm-regulator.c	2019-01-22 16:16:26.279271545 +0100
@@ -0,0 +1,1329 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/spm-regulator.h>
+#include <soc/qcom/spm.h>
+#include <linux/arm-smccc.h>
+
+#if defined(CONFIG_ARM64) || (defined(CONFIG_ARM) && defined(CONFIG_ARM_PSCI))
+#else
+	#define __invoke_psci_fn_smc(a, b, c, d) 0
+#endif
+
+#define SPM_REGULATOR_DRIVER_NAME "qcom,spm-regulator"
+
+struct voltage_range {
+	int min_uV;
+	int set_point_min_uV;
+	int max_uV;
+	int step_uV;
+};
+
+enum qpnp_regulator_uniq_type {
+	QPNP_TYPE_HF,
+	QPNP_TYPE_FTS2,
+	QPNP_TYPE_FTS2p5,
+	QPNP_TYPE_FTS426,
+	QPNP_TYPE_ULT_HF,
+};
+
+enum qpnp_regulator_type {
+	QPNP_HF_TYPE		= 0x03,
+	QPNP_FTS2_TYPE		= 0x1C,
+	QPNP_FTS2p5_TYPE	= 0x1C,
+	QPNP_FTS426_TYPE	= 0x1C,
+	QPNP_ULT_HF_TYPE	= 0x22,
+};
+
+enum qpnp_regulator_subtype {
+	QPNP_FTS2_SUBTYPE	= 0x08,
+	QPNP_HF_SUBTYPE		= 0x08,
+	QPNP_FTS2p5_SUBTYPE	= 0x09,
+	QPNP_FTS426_SUBTYPE	= 0x0A,
+	QPNP_ULT_HF_SUBTYPE	= 0x0D,
+};
+
+enum qpnp_logical_mode {
+	QPNP_LOGICAL_MODE_AUTO,
+	QPNP_LOGICAL_MODE_PWM,
+};
+
+static const struct voltage_range fts2_range0 = {0, 350000, 1275000,  5000};
+static const struct voltage_range fts2_range1 = {0, 700000, 2040000, 10000};
+static const struct voltage_range fts2p5_range0
+					 = { 80000, 350000, 1355000,  5000};
+static const struct voltage_range fts2p5_range1
+					 = {160000, 700000, 2200000, 10000};
+static const struct voltage_range fts426_range = {0, 320000, 1352000, 4000};
+static const struct voltage_range ult_hf_range0 = {375000, 375000, 1562500,
+								12500};
+static const struct voltage_range ult_hf_range1 = {750000, 750000, 1525000,
+								25000};
+static const struct voltage_range hf_range0 = {375000, 375000, 1562500, 12500};
+static const struct voltage_range hf_range1 = {1550000, 1550000, 3125000,
+								25000};
+
+#define QPNP_SMPS_REG_TYPE		0x04
+#define QPNP_SMPS_REG_SUBTYPE		0x05
+#define QPNP_SMPS_REG_VOLTAGE_RANGE	0x40
+#define QPNP_SMPS_REG_VOLTAGE_SETPOINT	0x41
+#define QPNP_SMPS_REG_MODE		0x45
+#define QPNP_SMPS_REG_STEP_CTRL		0x61
+#define QPNP_SMPS_REG_UL_LL_CTRL	0x68
+
+/* FTS426 voltage control registers */
+#define QPNP_FTS426_REG_VOLTAGE_LB		0x40
+#define QPNP_FTS426_REG_VOLTAGE_UB		0x41
+#define QPNP_FTS426_REG_VOLTAGE_VALID_LB	0x42
+#define QPNP_FTS426_REG_VOLTAGE_VALID_UB	0x43
+
+/* HF voltage limit registers */
+#define QPNP_HF_REG_VOLTAGE_ULS		0x69
+#define QPNP_HF_REG_VOLTAGE_LLS		0x6B
+
+/* FTS voltage limit registers */
+#define QPNP_FTS_REG_VOLTAGE_ULS_VALID	0x6A
+#define QPNP_FTS_REG_VOLTAGE_LLS_VALID	0x6C
+
+/* FTS426 voltage limit registers */
+#define QPNP_FTS426_REG_VOLTAGE_ULS_LB	0x68
+#define QPNP_FTS426_REG_VOLTAGE_ULS_UB	0x69
+
+/* Common regulator UL & LL limits control register layout */
+#define QPNP_COMMON_UL_EN_MASK		0x80
+#define QPNP_COMMON_LL_EN_MASK		0x40
+
+#define QPNP_SMPS_MODE_PWM		0x80
+#define QPNP_SMPS_MODE_AUTO		0x40
+#define QPNP_FTS426_MODE_PWM		0x07
+#define QPNP_FTS426_MODE_AUTO		0x06
+
+#define QPNP_SMPS_STEP_CTRL_STEP_MASK	0x18
+#define QPNP_SMPS_STEP_CTRL_STEP_SHIFT	3
+#define QPNP_SMPS_STEP_CTRL_DELAY_MASK	0x07
+#define QPNP_SMPS_STEP_CTRL_DELAY_SHIFT	0
+#define QPNP_FTS426_STEP_CTRL_DELAY_MASK	0x03
+#define QPNP_FTS426_STEP_CTRL_DELAY_SHIFT	0
+
+/* Clock rate in kHz of the FTS2 regulator reference clock. */
+#define QPNP_SMPS_CLOCK_RATE		19200
+#define QPNP_FTS426_CLOCK_RATE		4800
+
+/* Time to delay in us to ensure that a mode change has completed. */
+#define QPNP_FTS2_MODE_CHANGE_DELAY	50
+
+/* Minimum time in us that it takes to complete a single SPMI write. */
+#define QPNP_SPMI_WRITE_MIN_DELAY	8
+
+/* Minimum voltage stepper delay for each step. */
+#define QPNP_FTS2_STEP_DELAY		8
+#define QPNP_HF_STEP_DELAY		20
+#define QPNP_FTS426_STEP_DELAY		2
+
+/* Arbitrarily large max step size used to avoid possible numerical overflow */
+#define SPM_REGULATOR_MAX_STEP_UV	10000000
+
+/*
+ * The ratio QPNP_FTS2_STEP_MARGIN_NUM/QPNP_FTS2_STEP_MARGIN_DEN is use to
+ * adjust the step rate in order to account for oscillator variance.
+ */
+#define QPNP_FTS2_STEP_MARGIN_NUM	4
+#define QPNP_FTS2_STEP_MARGIN_DEN	5
+#define QPNP_FTS426_STEP_MARGIN_NUM	10
+#define QPNP_FTS426_STEP_MARGIN_DEN	11
+
+/*
+ * Settling delay for FTS2.5
+ * Warm-up=20uS, 0-10% & 90-100% non-linear V-ramp delay = 50uS
+ */
+#define FTS2P5_SETTLING_DELAY_US	70
+
+/* VSET value to decide the range of ULT SMPS */
+#define ULT_SMPS_RANGE_SPLIT 0x60
+
+struct spm_vreg {
+	struct regulator_desc		rdesc;
+	struct regulator_dev		*rdev;
+	struct platform_device		*pdev;
+	struct regmap			*regmap;
+	const struct voltage_range	*range;
+	int				uV;
+	int				last_set_uV;
+	unsigned			vlevel;
+	unsigned			last_set_vlevel;
+	u32				max_step_uV;
+	bool				online;
+	u16				spmi_base_addr;
+	enum qpnp_logical_mode		init_mode;
+	enum qpnp_logical_mode		mode;
+	int				step_rate;
+	enum qpnp_regulator_uniq_type	regulator_type;
+	u32				cpu_num;
+	bool				bypass_spm;
+	struct regulator_desc		avs_rdesc;
+	struct regulator_dev		*avs_rdev;
+	int				avs_min_uV;
+	int				avs_max_uV;
+	bool				avs_enabled;
+	u32				recal_cluster_mask;
+};
+
+static inline bool spm_regulator_using_avs(struct spm_vreg *vreg)
+{
+	return vreg->avs_rdev && !vreg->bypass_spm;
+}
+
+static int spm_regulator_uv_to_vlevel(struct spm_vreg *vreg, int uV)
+{
+	int vlevel;
+
+	if (vreg->regulator_type == QPNP_TYPE_FTS426)
+		return roundup(uV, vreg->range->step_uV) / 1000;
+
+	vlevel = DIV_ROUND_UP(uV - vreg->range->min_uV, vreg->range->step_uV);
+
+	/* Fix VSET for ULT HF Buck */
+	if (vreg->regulator_type == QPNP_TYPE_ULT_HF
+	    && vreg->range == &ult_hf_range1) {
+		vlevel &= 0x1F;
+		vlevel |= ULT_SMPS_RANGE_SPLIT;
+	}
+
+	return vlevel;
+}
+
+static int spm_regulator_vlevel_to_uv(struct spm_vreg *vreg, int vlevel)
+{
+	if (vreg->regulator_type == QPNP_TYPE_FTS426)
+		return vlevel * 1000;
+	/*
+	 * Calculate ULT HF buck VSET based on range:
+	 * In case of range 0: VSET is a 7 bit value.
+	 * In case of range 1: VSET is a 5 bit value.
+	 */
+	if (vreg->regulator_type == QPNP_TYPE_ULT_HF
+	    && vreg->range == &ult_hf_range1)
+		vlevel &= ~ULT_SMPS_RANGE_SPLIT;
+
+	return vlevel * vreg->range->step_uV + vreg->range->min_uV;
+}
+
+static unsigned spm_regulator_vlevel_to_selector(struct spm_vreg *vreg,
+						 unsigned vlevel)
+{
+	/* Fix VSET for ULT HF Buck */
+	if (vreg->regulator_type == QPNP_TYPE_ULT_HF
+	    && vreg->range == &ult_hf_range1)
+		vlevel &= ~ULT_SMPS_RANGE_SPLIT;
+
+	return vlevel - (vreg->range->set_point_min_uV - vreg->range->min_uV)
+				/ vreg->range->step_uV;
+}
+
+static int qpnp_smps_read_voltage(struct spm_vreg *vreg)
+{
+	int rc;
+	u8 val[2] = {0};
+
+	if (vreg->regulator_type == QPNP_TYPE_FTS426) {
+		rc = regmap_bulk_read(vreg->regmap,
+			vreg->spmi_base_addr + QPNP_FTS426_REG_VOLTAGE_VALID_LB,
+				 val, 2);
+		if (rc) {
+			dev_err(&vreg->pdev->dev, "%s: could not read voltage setpoint registers, rc=%d\n",
+				__func__, rc);
+			return rc;
+		}
+
+		vreg->last_set_vlevel = ((unsigned)val[1] << 8) | val[0];
+	} else {
+		rc = regmap_bulk_read(vreg->regmap,
+			vreg->spmi_base_addr + QPNP_SMPS_REG_VOLTAGE_SETPOINT,
+				val, 1);
+		if (rc) {
+			dev_err(&vreg->pdev->dev, "%s: could not read voltage setpoint register, rc=%d\n",
+				__func__, rc);
+			return rc;
+		}
+		vreg->last_set_vlevel = val[0];
+	}
+
+	vreg->last_set_uV = spm_regulator_vlevel_to_uv(vreg,
+						vreg->last_set_vlevel);
+	return rc;
+}
+
+static int qpnp_smps_write_voltage(struct spm_vreg *vreg, unsigned vlevel)
+{
+	int rc = 0;
+	u8 reg[2];
+
+	/* Set voltage control registers via SPMI. */
+	reg[0] = vlevel & 0xFF;
+	reg[1] = (vlevel >> 8) & 0xFF;
+
+	if (vreg->regulator_type == QPNP_TYPE_FTS426) {
+		rc = regmap_bulk_write(vreg->regmap,
+			  vreg->spmi_base_addr + QPNP_FTS426_REG_VOLTAGE_LB,
+			  reg, 2);
+	} else {
+		rc = regmap_write(vreg->regmap,
+			  vreg->spmi_base_addr + QPNP_SMPS_REG_VOLTAGE_SETPOINT,
+			  reg[0]);
+	}
+
+	if (rc)
+		pr_err("%s: regmap_write failed, rc=%d\n",
+			vreg->rdesc.name, rc);
+
+	return rc;
+}
+
+static inline enum qpnp_logical_mode qpnp_regval_to_mode(struct spm_vreg *vreg,
+							u8 regval)
+{
+	if (vreg->regulator_type == QPNP_TYPE_FTS426)
+		return (regval == QPNP_FTS426_MODE_PWM)
+			? QPNP_LOGICAL_MODE_PWM : QPNP_LOGICAL_MODE_AUTO;
+	else
+		return (regval & QPNP_SMPS_MODE_PWM)
+			? QPNP_LOGICAL_MODE_PWM : QPNP_LOGICAL_MODE_AUTO;
+}
+
+static inline u8 qpnp_mode_to_regval(struct spm_vreg *vreg,
+					enum qpnp_logical_mode mode)
+{
+	if (vreg->regulator_type == QPNP_TYPE_FTS426)
+		return (mode == QPNP_LOGICAL_MODE_PWM)
+			? QPNP_FTS426_MODE_PWM : QPNP_FTS426_MODE_AUTO;
+	else
+		return (mode == QPNP_LOGICAL_MODE_PWM)
+			? QPNP_SMPS_MODE_PWM : QPNP_SMPS_MODE_AUTO;
+}
+
+static int qpnp_smps_set_mode(struct spm_vreg *vreg, u8 mode)
+{
+	int rc;
+
+	rc = regmap_write(vreg->regmap,
+			  vreg->spmi_base_addr + QPNP_SMPS_REG_MODE,
+			  qpnp_mode_to_regval(vreg, mode));
+	if (rc)
+		dev_err(&vreg->pdev->dev,
+			"%s: could not write to mode register, rc=%d\n",
+			__func__, rc);
+
+	return rc;
+}
+
+static int spm_regulator_get_voltage(struct regulator_dev *rdev)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+	int vlevel, rc;
+
+	if (spm_regulator_using_avs(vreg)) {
+		vlevel = msm_spm_get_vdd(vreg->cpu_num);
+
+		if (IS_ERR_VALUE(vlevel)) {
+			pr_debug("%s: msm_spm_get_vdd failed, rc=%d; falling back on SPMI read\n",
+				vreg->rdesc.name, vlevel);
+
+			rc = qpnp_smps_read_voltage(vreg);
+			if (rc) {
+				pr_err("%s: voltage read failed, rc=%d\n",
+				       vreg->rdesc.name, rc);
+				return rc;
+			}
+
+			return vreg->last_set_uV;
+		}
+
+		vreg->last_set_vlevel = vlevel;
+		vreg->last_set_uV = spm_regulator_vlevel_to_uv(vreg, vlevel);
+
+		return vreg->last_set_uV;
+	} else {
+		return vreg->uV;
+	}
+};
+
+static int spm_regulator_write_voltage(struct spm_vreg *vreg, int uV)
+{
+	unsigned vlevel = spm_regulator_uv_to_vlevel(vreg, uV);
+	bool spm_failed = false;
+	int rc = 0;
+	u32 slew_delay;
+
+	if (likely(!vreg->bypass_spm)) {
+		/* Set voltage control register via SPM. */
+		rc = msm_spm_set_vdd(vreg->cpu_num, vlevel);
+		if (rc) {
+			pr_debug("%s: msm_spm_set_vdd failed, rc=%d; falling back on SPMI write\n",
+				vreg->rdesc.name, rc);
+			spm_failed = true;
+		}
+	}
+
+	if (unlikely(vreg->bypass_spm || spm_failed)) {
+		rc = qpnp_smps_write_voltage(vreg, vlevel);
+		if (rc) {
+			pr_err("%s: voltage write failed, rc=%d\n",
+				vreg->rdesc.name, rc);
+			return rc;
+		}
+	}
+
+	if (uV > vreg->last_set_uV) {
+		/* Wait for voltage stepping to complete. */
+		slew_delay = DIV_ROUND_UP(uV - vreg->last_set_uV,
+					vreg->step_rate);
+		if (vreg->regulator_type == QPNP_TYPE_FTS2p5)
+			slew_delay += FTS2P5_SETTLING_DELAY_US;
+		udelay(slew_delay);
+	} else if (vreg->regulator_type == QPNP_TYPE_FTS2p5) {
+		/* add the ramp-down delay */
+		slew_delay = DIV_ROUND_UP(vreg->last_set_uV - uV,
+				vreg->step_rate) + FTS2P5_SETTLING_DELAY_US;
+		udelay(slew_delay);
+	}
+
+	vreg->last_set_uV = uV;
+	vreg->last_set_vlevel = vlevel;
+
+	return rc;
+}
+
+static int spm_regulator_recalibrate(struct spm_vreg *vreg)
+{
+	int rc;
+
+	if (!vreg->recal_cluster_mask)
+		return 0;
+
+	rc = __invoke_psci_fn_smc(0xC4000020, vreg->recal_cluster_mask,
+				  2, 0);
+	if (rc)
+		pr_err("%s: recalibration failed, rc=%d\n", vreg->rdesc.name,
+			rc);
+
+	return rc;
+}
+
+static int _spm_regulator_set_voltage(struct regulator_dev *rdev)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+	bool pwm_required;
+	int rc = 0;
+	int uV;
+
+	rc = spm_regulator_get_voltage(rdev);
+	if (IS_ERR_VALUE(rc))
+		return rc;
+
+	if (vreg->vlevel == vreg->last_set_vlevel)
+		return 0;
+
+	pwm_required = (vreg->regulator_type == QPNP_TYPE_FTS2)
+			&& (vreg->init_mode != QPNP_LOGICAL_MODE_PWM)
+			&& vreg->uV > vreg->last_set_uV;
+
+	if (pwm_required) {
+		/* Switch to PWM mode so that voltage ramping is fast. */
+		rc = qpnp_smps_set_mode(vreg, QPNP_LOGICAL_MODE_PWM);
+		if (rc)
+			return rc;
+	}
+
+	do {
+		uV = vreg->uV > vreg->last_set_uV
+		    ? min(vreg->uV, vreg->last_set_uV + (int)vreg->max_step_uV)
+		    : max(vreg->uV, vreg->last_set_uV - (int)vreg->max_step_uV);
+
+		rc = spm_regulator_write_voltage(vreg, uV);
+		if (rc)
+			return rc;
+	} while (vreg->last_set_uV != vreg->uV);
+
+	if (pwm_required) {
+		/* Wait for mode transition to complete. */
+		udelay(QPNP_FTS2_MODE_CHANGE_DELAY - QPNP_SPMI_WRITE_MIN_DELAY);
+		/* Switch to AUTO mode so that power consumption is lowered. */
+		rc = qpnp_smps_set_mode(vreg, QPNP_LOGICAL_MODE_AUTO);
+		if (rc)
+			return rc;
+	}
+
+	rc = spm_regulator_recalibrate(vreg);
+
+	return rc;
+}
+
+static int spm_regulator_set_voltage(struct regulator_dev *rdev, int min_uV,
+					int max_uV, unsigned *selector)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+	const struct voltage_range *range = vreg->range;
+	int uV = min_uV;
+	unsigned vlevel;
+
+	if (uV < range->set_point_min_uV && max_uV >= range->set_point_min_uV)
+		uV = range->set_point_min_uV;
+
+	if (uV < range->set_point_min_uV || uV > range->max_uV) {
+		pr_err("%s: request v=[%d, %d] is outside possible v=[%d, %d]\n",
+			vreg->rdesc.name, min_uV, max_uV,
+			range->set_point_min_uV, range->max_uV);
+		return -EINVAL;
+	}
+
+	vlevel = spm_regulator_uv_to_vlevel(vreg, uV);
+	uV = spm_regulator_vlevel_to_uv(vreg, vlevel);
+
+	if (uV > max_uV) {
+		pr_err("%s: request v=[%d, %d] cannot be met by any set point\n",
+			vreg->rdesc.name, min_uV, max_uV);
+		return -EINVAL;
+	}
+
+	*selector = spm_regulator_vlevel_to_selector(vreg, vlevel);
+	vreg->vlevel = vlevel;
+	vreg->uV = uV;
+
+	if (!vreg->online)
+		return 0;
+
+	return _spm_regulator_set_voltage(rdev);
+}
+
+static int spm_regulator_list_voltage(struct regulator_dev *rdev,
+					unsigned selector)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+	if (selector >= vreg->rdesc.n_voltages)
+		return 0;
+
+	return selector * vreg->range->step_uV + vreg->range->set_point_min_uV;
+}
+
+static int spm_regulator_enable(struct regulator_dev *rdev)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	rc = _spm_regulator_set_voltage(rdev);
+
+	if (!rc)
+		vreg->online = true;
+
+	return rc;
+}
+
+static int spm_regulator_disable(struct regulator_dev *rdev)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+	vreg->online = false;
+
+	return 0;
+}
+
+static int spm_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+	return vreg->online;
+}
+
+static unsigned int spm_regulator_get_mode(struct regulator_dev *rdev)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+	return vreg->mode == QPNP_LOGICAL_MODE_PWM
+			? REGULATOR_MODE_NORMAL : REGULATOR_MODE_IDLE;
+}
+
+static int spm_regulator_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+	/*
+	 * Map REGULATOR_MODE_NORMAL to PWM mode and REGULATOR_MODE_IDLE to
+	 * init_mode.  This ensures that the regulator always stays in PWM mode
+	 * in the case that qcom,mode has been specified as "pwm" in device
+	 * tree.
+	 */
+	vreg->mode = (mode == REGULATOR_MODE_NORMAL) ? QPNP_LOGICAL_MODE_PWM
+						     : vreg->init_mode;
+
+	return qpnp_smps_set_mode(vreg, vreg->mode);
+}
+
+static struct regulator_ops spm_regulator_ops = {
+	.get_voltage	= spm_regulator_get_voltage,
+	.set_voltage	= spm_regulator_set_voltage,
+	.list_voltage	= spm_regulator_list_voltage,
+	.get_mode	= spm_regulator_get_mode,
+	.set_mode	= spm_regulator_set_mode,
+	.enable		= spm_regulator_enable,
+	.disable	= spm_regulator_disable,
+	.is_enabled	= spm_regulator_is_enabled,
+};
+
+static int spm_regulator_avs_set_voltage(struct regulator_dev *rdev, int min_uV,
+					int max_uV, unsigned *selector)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+	const struct voltage_range *range = vreg->range;
+	unsigned vlevel_min, vlevel_max;
+	int uV, avs_min_uV, avs_max_uV, rc;
+
+	uV = min_uV;
+
+	if (uV < range->set_point_min_uV && max_uV >= range->set_point_min_uV)
+		uV = range->set_point_min_uV;
+
+	if (uV < range->set_point_min_uV || uV > range->max_uV) {
+		pr_err("%s: request v=[%d, %d] is outside possible v=[%d, %d]\n",
+			vreg->avs_rdesc.name, min_uV, max_uV,
+			range->set_point_min_uV, range->max_uV);
+		return -EINVAL;
+	}
+
+	vlevel_min = spm_regulator_uv_to_vlevel(vreg, uV);
+	avs_min_uV = spm_regulator_vlevel_to_uv(vreg, vlevel_min);
+
+	if (avs_min_uV > max_uV) {
+		pr_err("%s: request v=[%d, %d] cannot be met by any set point\n",
+			vreg->avs_rdesc.name, min_uV, max_uV);
+		return -EINVAL;
+	}
+
+	uV = max_uV;
+
+	if (uV > range->max_uV && min_uV <= range->max_uV)
+		uV = range->max_uV;
+
+	if (uV < range->set_point_min_uV || uV > range->max_uV) {
+		pr_err("%s: request v=[%d, %d] is outside possible v=[%d, %d]\n",
+			vreg->avs_rdesc.name, min_uV, max_uV,
+			range->set_point_min_uV, range->max_uV);
+		return -EINVAL;
+	}
+
+	vlevel_max = spm_regulator_uv_to_vlevel(vreg, uV);
+	avs_max_uV = spm_regulator_vlevel_to_uv(vreg, vlevel_max);
+
+	if (avs_max_uV < min_uV) {
+		pr_err("%s: request v=[%d, %d] cannot be met by any set point\n",
+			vreg->avs_rdesc.name, min_uV, max_uV);
+		return -EINVAL;
+	}
+
+	if (likely(!vreg->bypass_spm)) {
+		rc = msm_spm_avs_set_limit(vreg->cpu_num, vlevel_min,
+						vlevel_max);
+		if (rc) {
+			pr_err("%s: AVS limit setting failed, rc=%d\n",
+				vreg->avs_rdesc.name, rc);
+			return rc;
+		}
+	}
+
+	*selector = spm_regulator_vlevel_to_selector(vreg, vlevel_min);
+	vreg->avs_min_uV = avs_min_uV;
+	vreg->avs_max_uV = avs_max_uV;
+
+	return 0;
+}
+
+static int spm_regulator_avs_get_voltage(struct regulator_dev *rdev)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+	return vreg->avs_min_uV;
+}
+
+static int spm_regulator_avs_enable(struct regulator_dev *rdev)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	if (likely(!vreg->bypass_spm)) {
+		rc = msm_spm_avs_enable(vreg->cpu_num);
+		if (rc) {
+			pr_err("%s: AVS enable failed, rc=%d\n",
+				vreg->avs_rdesc.name, rc);
+			return rc;
+		}
+	}
+
+	vreg->avs_enabled = true;
+
+	return 0;
+}
+
+static int spm_regulator_avs_disable(struct regulator_dev *rdev)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	if (likely(!vreg->bypass_spm)) {
+		rc = msm_spm_avs_disable(vreg->cpu_num);
+		if (rc) {
+			pr_err("%s: AVS disable failed, rc=%d\n",
+				vreg->avs_rdesc.name, rc);
+			return rc;
+		}
+	}
+
+	vreg->avs_enabled = false;
+
+	return 0;
+}
+
+static int spm_regulator_avs_is_enabled(struct regulator_dev *rdev)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+	return vreg->avs_enabled;
+}
+
+static struct regulator_ops spm_regulator_avs_ops = {
+	.get_voltage	= spm_regulator_avs_get_voltage,
+	.set_voltage	= spm_regulator_avs_set_voltage,
+	.list_voltage	= spm_regulator_list_voltage,
+	.enable		= spm_regulator_avs_enable,
+	.disable	= spm_regulator_avs_disable,
+	.is_enabled	= spm_regulator_avs_is_enabled,
+};
+
+static int qpnp_smps_check_type(struct spm_vreg *vreg)
+{
+	int rc;
+	u8 type[2];
+
+	rc = regmap_bulk_read(vreg->regmap,
+			      vreg->spmi_base_addr + QPNP_SMPS_REG_TYPE,
+			      type,
+			      2);
+	if (rc) {
+		dev_err(&vreg->pdev->dev,
+			"%s: could not read type register, rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	if (type[0] == QPNP_FTS2_TYPE && type[1] == QPNP_FTS2_SUBTYPE) {
+		vreg->regulator_type = QPNP_TYPE_FTS2;
+	} else if (type[0] == QPNP_FTS2p5_TYPE
+					&& type[1] == QPNP_FTS2p5_SUBTYPE) {
+		vreg->regulator_type = QPNP_TYPE_FTS2p5;
+	} else if (type[0] == QPNP_FTS426_TYPE
+					&& type[1] == QPNP_FTS426_SUBTYPE) {
+		vreg->regulator_type = QPNP_TYPE_FTS426;
+	} else if (type[0] == QPNP_ULT_HF_TYPE
+					&& type[1] == QPNP_ULT_HF_SUBTYPE) {
+		vreg->regulator_type = QPNP_TYPE_ULT_HF;
+	} else if (type[0] == QPNP_HF_TYPE
+					&& type[1] == QPNP_HF_SUBTYPE) {
+		vreg->regulator_type = QPNP_TYPE_HF;
+	} else {
+		dev_err(&vreg->pdev->dev,
+			"%s: invalid type=0x%02X, subtype=0x%02X register pair\n",
+			 __func__, type[0], type[1]);
+		return -ENODEV;
+	};
+
+	return rc;
+}
+
+static int qpnp_smps_init_range(struct spm_vreg *vreg,
+	const struct voltage_range *range0, const struct voltage_range *range1)
+{
+	int rc;
+	u8 reg = 0;
+	uint val;
+
+	rc = regmap_read(vreg->regmap,
+			 vreg->spmi_base_addr + QPNP_SMPS_REG_VOLTAGE_RANGE,
+			 &val);
+	if (rc) {
+		dev_err(&vreg->pdev->dev,
+			"%s: could not read voltage range register, rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+	reg = (u8)val;
+
+	if (reg == 0x00) {
+		vreg->range = range0;
+	} else if (reg == 0x01) {
+		vreg->range = range1;
+	} else {
+		dev_err(&vreg->pdev->dev, "%s: voltage range=%d is invalid\n",
+			__func__, reg);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static int qpnp_ult_hf_init_range(struct spm_vreg *vreg)
+{
+	int rc;
+	u8 reg = 0;
+	uint val;
+
+	rc = regmap_read(vreg->regmap,
+			 vreg->spmi_base_addr + QPNP_SMPS_REG_VOLTAGE_SETPOINT,
+			 &val);
+	if (rc) {
+		dev_err(&vreg->pdev->dev,
+			"%s: could not read voltage range register, rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+	reg = (u8)val;
+
+	vreg->range = (reg < ULT_SMPS_RANGE_SPLIT) ? &ult_hf_range0 :
+							&ult_hf_range1;
+	return rc;
+}
+
+static int qpnp_smps_init_voltage(struct spm_vreg *vreg)
+{
+	int rc;
+
+	rc = qpnp_smps_read_voltage(vreg);
+	if (rc) {
+		pr_err("%s: voltage read failed, rc=%d\n", vreg->rdesc.name,
+			rc);
+		return rc;
+	}
+
+	vreg->vlevel = vreg->last_set_vlevel;
+	vreg->uV = vreg->last_set_uV;
+
+	/* Initialize SAW voltage control register */
+	if (!vreg->bypass_spm) {
+		rc = msm_spm_set_vdd(vreg->cpu_num, vreg->vlevel);
+		if (rc)
+			pr_err("%s: msm_spm_set_vdd failed, rc=%d\n",
+			       vreg->rdesc.name, rc);
+	}
+
+	return 0;
+}
+
+static int qpnp_smps_init_mode(struct spm_vreg *vreg)
+{
+	const char *mode_name;
+	int rc;
+	uint val;
+
+	rc = of_property_read_string(vreg->pdev->dev.of_node, "qcom,mode",
+					&mode_name);
+	if (!rc) {
+		if (strcmp("pwm", mode_name) == 0) {
+			vreg->init_mode = QPNP_LOGICAL_MODE_PWM;
+		} else if ((strcmp("auto", mode_name) == 0) &&
+				(vreg->regulator_type != QPNP_TYPE_ULT_HF)) {
+			vreg->init_mode = QPNP_LOGICAL_MODE_AUTO;
+		} else {
+			dev_err(&vreg->pdev->dev,
+				"%s: unknown regulator mode: %s\n",
+				__func__, mode_name);
+			return -EINVAL;
+		}
+
+		rc = qpnp_smps_set_mode(vreg, vreg->init_mode);
+		if (rc)
+			return rc;
+	} else {
+		rc = regmap_read(vreg->regmap,
+				 vreg->spmi_base_addr + QPNP_SMPS_REG_MODE,
+				 &val);
+		if (rc)
+			dev_err(&vreg->pdev->dev,
+				"%s: could not read mode register, rc=%d\n",
+				__func__, rc);
+		 vreg->init_mode = qpnp_regval_to_mode(vreg, val);
+	}
+
+	vreg->mode = vreg->init_mode;
+
+	return rc;
+}
+
+static int qpnp_smps_init_step_rate(struct spm_vreg *vreg)
+{
+	int rc;
+	u8 reg = 0;
+	int step = 0, delay;
+	uint val;
+
+	rc = regmap_read(vreg->regmap,
+			 vreg->spmi_base_addr + QPNP_SMPS_REG_STEP_CTRL, &val);
+	if (rc) {
+		dev_err(&vreg->pdev->dev,
+			"%s: could not read stepping control register, rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+	reg = (u8)val;
+
+	/* ULT and FTS426 bucks do not support steps */
+	if (vreg->regulator_type != QPNP_TYPE_ULT_HF && vreg->regulator_type !=
+			QPNP_TYPE_FTS426)
+		step = (reg & QPNP_SMPS_STEP_CTRL_STEP_MASK)
+			>> QPNP_SMPS_STEP_CTRL_STEP_SHIFT;
+
+	if (vreg->regulator_type == QPNP_TYPE_FTS426) {
+		delay = (reg & QPNP_FTS426_STEP_CTRL_DELAY_MASK)
+			>> QPNP_FTS426_STEP_CTRL_DELAY_SHIFT;
+
+		/* step_rate has units of uV/us. */
+		vreg->step_rate = QPNP_FTS426_CLOCK_RATE * vreg->range->step_uV;
+	} else {
+		delay = (reg & QPNP_SMPS_STEP_CTRL_DELAY_MASK)
+			>> QPNP_SMPS_STEP_CTRL_DELAY_SHIFT;
+
+		/* step_rate has units of uV/us. */
+		vreg->step_rate = QPNP_SMPS_CLOCK_RATE * vreg->range->step_uV
+					* (1 << step);
+	}
+
+	if ((vreg->regulator_type == QPNP_TYPE_ULT_HF)
+			|| (vreg->regulator_type == QPNP_TYPE_HF))
+		vreg->step_rate /= 1000 * (QPNP_HF_STEP_DELAY << delay);
+	else if (vreg->regulator_type == QPNP_TYPE_FTS426)
+		vreg->step_rate /= 1000 * (QPNP_FTS426_STEP_DELAY << delay);
+	else
+		vreg->step_rate /= 1000 * (QPNP_FTS2_STEP_DELAY << delay);
+
+	if (vreg->regulator_type == QPNP_TYPE_FTS426)
+		vreg->step_rate = vreg->step_rate * QPNP_FTS426_STEP_MARGIN_NUM
+					/ QPNP_FTS426_STEP_MARGIN_DEN;
+	else
+		vreg->step_rate = vreg->step_rate * QPNP_FTS2_STEP_MARGIN_NUM
+					/ QPNP_FTS2_STEP_MARGIN_DEN;
+
+	/* Ensure that the stepping rate is greater than 0. */
+	vreg->step_rate = max(vreg->step_rate, 1);
+
+	return rc;
+}
+
+static int qpnp_smps_check_constraints(struct spm_vreg *vreg,
+					struct regulator_init_data *init_data)
+{
+	int rc = 0, limit_min_uV, limit_max_uV;
+	u16 ul_reg, ll_reg;
+	u8 reg[2];
+
+	limit_min_uV = 0;
+	limit_max_uV = INT_MAX;
+
+	ul_reg = QPNP_FTS_REG_VOLTAGE_ULS_VALID;
+	ll_reg = QPNP_FTS_REG_VOLTAGE_LLS_VALID;
+
+	switch (vreg->regulator_type) {
+	case QPNP_TYPE_HF:
+		ul_reg = QPNP_HF_REG_VOLTAGE_ULS;
+		ll_reg = QPNP_HF_REG_VOLTAGE_LLS;
+	case QPNP_TYPE_FTS2:
+	case QPNP_TYPE_FTS2p5:
+		rc = regmap_bulk_read(vreg->regmap, vreg->spmi_base_addr
+					+ QPNP_SMPS_REG_UL_LL_CTRL, reg, 1);
+		if (rc) {
+			dev_err(&vreg->pdev->dev, "%s: UL_LL register read failed, rc=%d\n",
+				__func__, rc);
+			return rc;
+		}
+
+		if (reg[0] & QPNP_COMMON_UL_EN_MASK) {
+			rc = regmap_bulk_read(vreg->regmap, vreg->spmi_base_addr
+						+ ul_reg, &reg[1], 1);
+			if (rc) {
+				dev_err(&vreg->pdev->dev, "%s: ULS register read failed, rc=%d\n",
+					__func__, rc);
+				return rc;
+			}
+
+			limit_max_uV = spm_regulator_vlevel_to_uv(vreg, reg[1]);
+		}
+
+		if (reg[0] & QPNP_COMMON_LL_EN_MASK) {
+			rc = regmap_bulk_read(vreg->regmap, vreg->spmi_base_addr
+						+ ll_reg, &reg[1], 1);
+			if (rc) {
+				dev_err(&vreg->pdev->dev, "%s: LLS register read failed, rc=%d\n",
+					__func__, rc);
+				return rc;
+			}
+
+			limit_min_uV = spm_regulator_vlevel_to_uv(vreg, reg[1]);
+		}
+
+		break;
+	case QPNP_TYPE_FTS426:
+		rc = regmap_bulk_read(vreg->regmap, vreg->spmi_base_addr
+					+ QPNP_FTS426_REG_VOLTAGE_ULS_LB,
+					reg, 2);
+		if (rc) {
+			dev_err(&vreg->pdev->dev, "%s: could not read voltage limit registers, rc=%d\n",
+				__func__, rc);
+			return rc;
+		}
+
+		limit_max_uV = spm_regulator_vlevel_to_uv(vreg,
+					((unsigned)reg[1] << 8) | reg[0]);
+		break;
+	case QPNP_TYPE_ULT_HF:
+		/* no HW voltage limit configuration */
+		break;
+	}
+
+	if (init_data->constraints.min_uV < limit_min_uV
+	    || init_data->constraints.max_uV >  limit_max_uV) {
+		dev_err(&vreg->pdev->dev, "regulator min/max(%d/%d) constraints do not fit within HW configured min/max(%d/%d) constraints\n",
+			init_data->constraints.min_uV,
+			init_data->constraints.max_uV, limit_min_uV,
+			limit_max_uV);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static bool spm_regulator_using_range0(struct spm_vreg *vreg)
+{
+	return vreg->range == &fts2_range0 || vreg->range == &fts2p5_range0
+		|| vreg->range == &ult_hf_range0 || vreg->range == &hf_range0
+		|| vreg->range == &fts426_range;
+}
+
+/* Register a regulator to enable/disable AVS and set AVS min/max limits. */
+static int spm_regulator_avs_register(struct spm_vreg *vreg,
+				struct device *dev, struct device_node *node)
+{
+	struct regulator_config reg_config = {};
+	struct device_node *avs_node = NULL;
+	struct device_node *child_node;
+	struct regulator_init_data *init_data;
+	int rc;
+
+	/*
+	 * Find the first available child node (if any).  It corresponds to an
+	 * AVS limits regulator.
+	 */
+	for_each_available_child_of_node(node, child_node) {
+		avs_node = child_node;
+		break;
+	}
+
+	if (!avs_node)
+		return 0;
+
+	init_data = of_get_regulator_init_data(dev, avs_node, &vreg->avs_rdesc);
+	if (!init_data) {
+		dev_err(dev, "%s: unable to allocate memory\n", __func__);
+		return -ENOMEM;
+	}
+	init_data->constraints.input_uV = init_data->constraints.max_uV;
+	init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS
+						| REGULATOR_CHANGE_VOLTAGE;
+
+	if (!init_data->constraints.name) {
+		dev_err(dev, "%s: AVS node is missing regulator name\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	vreg->avs_rdesc.name	= init_data->constraints.name;
+	vreg->avs_rdesc.type	= REGULATOR_VOLTAGE;
+	vreg->avs_rdesc.owner	= THIS_MODULE;
+	vreg->avs_rdesc.ops	= &spm_regulator_avs_ops;
+	vreg->avs_rdesc.n_voltages
+		= (vreg->range->max_uV - vreg->range->set_point_min_uV)
+			/ vreg->range->step_uV + 1;
+
+	reg_config.dev = dev;
+	reg_config.init_data = init_data;
+	reg_config.driver_data = vreg;
+	reg_config.of_node = avs_node;
+
+	vreg->avs_rdev = regulator_register(&vreg->avs_rdesc, &reg_config);
+	if (IS_ERR(vreg->avs_rdev)) {
+		rc = PTR_ERR(vreg->avs_rdev);
+		dev_err(dev, "%s: AVS regulator_register failed, rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	if (vreg->bypass_spm)
+		pr_debug("%s: SPM bypassed so AVS regulator calls are no-ops\n",
+			vreg->avs_rdesc.name);
+
+	return 0;
+}
+
+static int spm_regulator_probe(struct platform_device *pdev)
+{
+	struct regulator_config reg_config = {};
+	struct device_node *node = pdev->dev.of_node;
+	struct regulator_init_data *init_data;
+	struct spm_vreg *vreg;
+	unsigned int base;
+	bool bypass_spm;
+	int rc;
+
+	if (!node) {
+		dev_err(&pdev->dev, "%s: device node missing\n", __func__);
+		return -ENODEV;
+	}
+
+	bypass_spm = of_property_read_bool(node, "qcom,bypass-spm");
+	if (!bypass_spm) {
+		rc = msm_spm_probe_done();
+		if (rc) {
+			if (rc != -EPROBE_DEFER)
+				dev_err(&pdev->dev,
+					"%s: spm unavailable, rc=%d\n",
+					__func__, rc);
+			return rc;
+		}
+	}
+
+	vreg = devm_kzalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL);
+	if (!vreg) {
+		pr_err("allocation failed.\n");
+		return -ENOMEM;
+	}
+	vreg->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!vreg->regmap) {
+		dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+		return -EINVAL;
+	}
+	vreg->pdev = pdev;
+	vreg->bypass_spm = bypass_spm;
+
+	rc = of_property_read_u32(pdev->dev.of_node, "reg", &base);
+	if (rc < 0) {
+		dev_err(&pdev->dev,
+			"Couldn't find reg in node = %s rc = %d\n",
+			pdev->dev.of_node->full_name, rc);
+		return rc;
+	}
+	vreg->spmi_base_addr = base;
+
+	rc = qpnp_smps_check_type(vreg);
+	if (rc)
+		return rc;
+
+	/* Specify CPU 0 as default in order to handle shared regulator case. */
+	vreg->cpu_num = 0;
+	of_property_read_u32(vreg->pdev->dev.of_node, "qcom,cpu-num",
+						&vreg->cpu_num);
+
+	of_property_read_u32(vreg->pdev->dev.of_node, "qcom,recal-mask",
+						&vreg->recal_cluster_mask);
+
+	/*
+	 * The regulator must be initialized to range 0 or range 1 during
+	 * PMIC power on sequence.  Once it is set, it cannot be changed
+	 * dynamically.
+	 */
+	if (vreg->regulator_type == QPNP_TYPE_FTS2)
+		rc = qpnp_smps_init_range(vreg, &fts2_range0, &fts2_range1);
+	else if (vreg->regulator_type == QPNP_TYPE_FTS2p5)
+		rc = qpnp_smps_init_range(vreg, &fts2p5_range0, &fts2p5_range1);
+	else if (vreg->regulator_type == QPNP_TYPE_FTS426)
+		vreg->range = &fts426_range;
+	else if (vreg->regulator_type == QPNP_TYPE_HF)
+		rc = qpnp_smps_init_range(vreg, &hf_range0, &hf_range1);
+	else if (vreg->regulator_type == QPNP_TYPE_ULT_HF)
+		rc = qpnp_ult_hf_init_range(vreg);
+	if (rc)
+		return rc;
+
+	rc = qpnp_smps_init_voltage(vreg);
+	if (rc)
+		return rc;
+
+	rc = qpnp_smps_init_mode(vreg);
+	if (rc)
+		return rc;
+
+	rc = qpnp_smps_init_step_rate(vreg);
+	if (rc)
+		return rc;
+
+	init_data = of_get_regulator_init_data(&pdev->dev, node, &vreg->rdesc);
+	if (!init_data) {
+		dev_err(&pdev->dev, "%s: unable to allocate memory\n",
+				__func__);
+		return -ENOMEM;
+	}
+	init_data->constraints.input_uV = init_data->constraints.max_uV;
+	init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS
+			| REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE;
+	init_data->constraints.valid_modes_mask
+				= REGULATOR_MODE_NORMAL | REGULATOR_MODE_IDLE;
+
+	if (!init_data->constraints.name) {
+		dev_err(&pdev->dev, "%s: node is missing regulator name\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	rc = qpnp_smps_check_constraints(vreg, init_data);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: regulator constraints check failed, rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	vreg->rdesc.name	= init_data->constraints.name;
+	vreg->rdesc.type	= REGULATOR_VOLTAGE;
+	vreg->rdesc.owner	= THIS_MODULE;
+	vreg->rdesc.ops		= &spm_regulator_ops;
+	vreg->rdesc.n_voltages
+		= (vreg->range->max_uV - vreg->range->set_point_min_uV)
+			/ vreg->range->step_uV + 1;
+
+	vreg->max_step_uV = SPM_REGULATOR_MAX_STEP_UV;
+	of_property_read_u32(vreg->pdev->dev.of_node,
+				"qcom,max-voltage-step", &vreg->max_step_uV);
+
+	if (vreg->max_step_uV > SPM_REGULATOR_MAX_STEP_UV)
+		vreg->max_step_uV = SPM_REGULATOR_MAX_STEP_UV;
+
+	vreg->max_step_uV = rounddown(vreg->max_step_uV, vreg->range->step_uV);
+	pr_debug("%s: max single voltage step size=%u uV\n",
+		vreg->rdesc.name, vreg->max_step_uV);
+
+	reg_config.dev = &pdev->dev;
+	reg_config.init_data = init_data;
+	reg_config.driver_data = vreg;
+	reg_config.of_node = node;
+	vreg->rdev = regulator_register(&vreg->rdesc, &reg_config);
+
+	if (IS_ERR(vreg->rdev)) {
+		rc = PTR_ERR(vreg->rdev);
+		dev_err(&pdev->dev, "%s: regulator_register failed, rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	rc = spm_regulator_avs_register(vreg, &pdev->dev, node);
+	if (rc) {
+		regulator_unregister(vreg->rdev);
+		return rc;
+	}
+
+	dev_set_drvdata(&pdev->dev, vreg);
+
+	pr_info("name=%s, range=%s, voltage=%d uV, mode=%s, step rate=%d uV/us\n",
+		vreg->rdesc.name,
+		spm_regulator_using_range0(vreg) ? "LV" : "MV",
+		vreg->uV,
+		vreg->init_mode == QPNP_LOGICAL_MODE_PWM ? "PWM" :
+		   (vreg->init_mode == QPNP_LOGICAL_MODE_AUTO ? "AUTO" : "PFM"),
+		vreg->step_rate);
+
+	return rc;
+}
+
+static int spm_regulator_remove(struct platform_device *pdev)
+{
+	struct spm_vreg *vreg = dev_get_drvdata(&pdev->dev);
+
+	if (vreg->avs_rdev)
+		regulator_unregister(vreg->avs_rdev);
+	regulator_unregister(vreg->rdev);
+
+	return 0;
+}
+
+static struct of_device_id spm_regulator_match_table[] = {
+	{ .compatible = SPM_REGULATOR_DRIVER_NAME, },
+	{}
+};
+
+static const struct platform_device_id spm_regulator_id[] = {
+	{ SPM_REGULATOR_DRIVER_NAME, 0 },
+	{}
+};
+MODULE_DEVICE_TABLE(spmi, spm_regulator_id);
+
+static struct platform_driver spm_regulator_driver = {
+	.driver = {
+		.name		= SPM_REGULATOR_DRIVER_NAME,
+		.of_match_table = spm_regulator_match_table,
+		.owner		= THIS_MODULE,
+	},
+	.probe		= spm_regulator_probe,
+	.remove		= spm_regulator_remove,
+	.id_table	= spm_regulator_id,
+};
+
+/**
+ * spm_regulator_init() - register spmi driver for spm-regulator
+ *
+ * This initialization function should be called in systems in which driver
+ * registration ordering must be controlled precisely.
+ *
+ * Returns 0 on success or errno on failure.
+ */
+int __init spm_regulator_init(void)
+{
+	static bool has_registered;
+
+	if (has_registered)
+		return 0;
+	else
+		has_registered = true;
+
+	return platform_driver_register(&spm_regulator_driver);
+}
+EXPORT_SYMBOL(spm_regulator_init);
+
+static void __exit spm_regulator_exit(void)
+{
+	platform_driver_unregister(&spm_regulator_driver);
+}
+
+arch_initcall(spm_regulator_init);
+module_exit(spm_regulator_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SPM regulator driver");
+MODULE_ALIAS("platform:spm-regulator");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/regulator/stub-regulator.c	2019-01-22 16:16:26.279271545 +0100
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/stub-regulator.h>
+
+#define STUB_REGULATOR_MAX_NAME 40
+
+struct regulator_stub {
+	struct regulator_desc	rdesc;
+	struct regulator_dev	*rdev;
+	int			voltage;
+	bool			enabled;
+	int			mode;
+	int			hpm_min_load;
+	int			system_uA;
+	char			name[STUB_REGULATOR_MAX_NAME];
+};
+
+static int regulator_stub_set_voltage(struct regulator_dev *rdev, int min_uV,
+				  int max_uV, unsigned *selector)
+{
+	struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+	vreg_priv->voltage = min_uV;
+	return 0;
+}
+
+static int regulator_stub_get_voltage(struct regulator_dev *rdev)
+{
+	struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+	return vreg_priv->voltage;
+}
+
+static int regulator_stub_list_voltage(struct regulator_dev *rdev,
+				    unsigned selector)
+{
+	struct regulation_constraints *constraints = rdev->constraints;
+
+	if (selector >= 2)
+		return -EINVAL;
+	else if (selector == 0)
+		return constraints->min_uV;
+	else
+		return constraints->max_uV;
+}
+
+static unsigned int regulator_stub_get_mode(struct regulator_dev *rdev)
+{
+	struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+	return vreg_priv->mode;
+}
+
+static int regulator_stub_set_mode(struct regulator_dev *rdev,
+				   unsigned int mode)
+{
+	struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+
+	if (mode != REGULATOR_MODE_NORMAL && mode != REGULATOR_MODE_IDLE) {
+		dev_err(&rdev->dev, "%s: invalid mode requested %u\n",
+							__func__, mode);
+		return -EINVAL;
+	}
+	vreg_priv->mode = mode;
+	return 0;
+}
+
+static unsigned int regulator_stub_get_optimum_mode(struct regulator_dev *rdev,
+		int input_uV, int output_uV, int load_uA)
+{
+	struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+	unsigned int mode;
+
+	if (load_uA + vreg_priv->system_uA >= vreg_priv->hpm_min_load)
+		mode = REGULATOR_MODE_NORMAL;
+	else
+		mode = REGULATOR_MODE_IDLE;
+
+	return mode;
+}
+
+static int regulator_stub_enable(struct regulator_dev *rdev)
+{
+	struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+	vreg_priv->enabled = true;
+	return 0;
+}
+
+static int regulator_stub_disable(struct regulator_dev *rdev)
+{
+	struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+	vreg_priv->enabled = false;
+	return 0;
+}
+
+static int regulator_stub_is_enabled(struct regulator_dev *rdev)
+{
+	struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+	return vreg_priv->enabled;
+}
+
+/* Real regulator operations. */
+static struct regulator_ops regulator_stub_ops = {
+	.enable			= regulator_stub_enable,
+	.disable		= regulator_stub_disable,
+	.is_enabled		= regulator_stub_is_enabled,
+	.set_voltage		= regulator_stub_set_voltage,
+	.get_voltage		= regulator_stub_get_voltage,
+	.list_voltage		= regulator_stub_list_voltage,
+	.set_mode		= regulator_stub_set_mode,
+	.get_mode		= regulator_stub_get_mode,
+	.get_optimum_mode	= regulator_stub_get_optimum_mode,
+};
+
+static void regulator_stub_cleanup(struct regulator_stub *vreg_priv)
+{
+	if (vreg_priv && vreg_priv->rdev)
+		regulator_unregister(vreg_priv->rdev);
+	kfree(vreg_priv);
+}
+
+static int regulator_stub_probe(struct platform_device *pdev)
+{
+	struct regulator_config reg_config = {};
+	struct regulator_init_data *init_data = NULL;
+	struct device *dev = &pdev->dev;
+	struct stub_regulator_pdata *vreg_pdata;
+	struct regulator_desc *rdesc;
+	struct regulator_stub *vreg_priv;
+	int rc;
+
+	vreg_priv = kzalloc(sizeof(*vreg_priv), GFP_KERNEL);
+	if (!vreg_priv) {
+		dev_err(dev, "%s: Unable to allocate memory\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	if (dev->of_node) {
+		/* Use device tree. */
+		init_data = of_get_regulator_init_data(dev, dev->of_node,
+							&vreg_priv->rdesc);
+		if (!init_data) {
+			dev_err(dev, "%s: unable to allocate memory\n",
+					__func__);
+			rc = -ENOMEM;
+			goto err_probe;
+		}
+
+		if (init_data->constraints.name == NULL) {
+			dev_err(dev, "%s: regulator name not specified\n",
+				__func__);
+			rc = -EINVAL;
+			goto err_probe;
+		}
+
+		if (of_get_property(dev->of_node, "parent-supply", NULL))
+			init_data->supply_regulator = "parent";
+
+		of_property_read_u32(dev->of_node, "qcom,system-load",
+					&vreg_priv->system_uA);
+		of_property_read_u32(dev->of_node, "qcom,hpm-min-load",
+					&vreg_priv->hpm_min_load);
+
+		init_data->constraints.input_uV	= init_data->constraints.max_uV;
+
+		init_data->constraints.valid_ops_mask
+			|= REGULATOR_CHANGE_STATUS;
+		init_data->constraints.valid_ops_mask
+			|= REGULATOR_CHANGE_VOLTAGE;
+		init_data->constraints.valid_ops_mask
+			|= REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_DRMS;
+		init_data->constraints.valid_modes_mask
+			= REGULATOR_MODE_NORMAL | REGULATOR_MODE_IDLE;
+	} else {
+		/* Use platform data. */
+		vreg_pdata = dev->platform_data;
+		if (!vreg_pdata) {
+			dev_err(dev, "%s: no platform data\n", __func__);
+			rc = -EINVAL;
+			goto err_probe;
+		}
+		init_data = &vreg_pdata->init_data;
+
+		vreg_priv->system_uA = vreg_pdata->system_uA;
+		vreg_priv->hpm_min_load = vreg_pdata->hpm_min_load;
+	}
+
+	dev_set_drvdata(dev, vreg_priv);
+
+	rdesc = &vreg_priv->rdesc;
+	strlcpy(vreg_priv->name, init_data->constraints.name,
+						   STUB_REGULATOR_MAX_NAME);
+	rdesc->name = vreg_priv->name;
+	rdesc->ops = &regulator_stub_ops;
+
+	/*
+	 * Ensure that voltage set points are handled correctly for regulators
+	 * which have a specified voltage constraint range, as well as those
+	 * that do not.
+	 */
+	if (init_data->constraints.min_uV == 0 &&
+	    init_data->constraints.max_uV == 0)
+		rdesc->n_voltages = 0;
+	else
+		rdesc->n_voltages = 2;
+
+	rdesc->id    = pdev->id;
+	rdesc->owner = THIS_MODULE;
+	rdesc->type  = REGULATOR_VOLTAGE;
+	vreg_priv->voltage = init_data->constraints.min_uV;
+	if (vreg_priv->system_uA >= vreg_priv->hpm_min_load)
+		vreg_priv->mode = REGULATOR_MODE_NORMAL;
+	else
+		vreg_priv->mode = REGULATOR_MODE_IDLE;
+
+	reg_config.dev = dev;
+	reg_config.init_data = init_data;
+	reg_config.driver_data = vreg_priv;
+	reg_config.of_node = dev->of_node;
+	vreg_priv->rdev = regulator_register(rdesc, &reg_config);
+
+	if (IS_ERR(vreg_priv->rdev)) {
+		rc = PTR_ERR(vreg_priv->rdev);
+		vreg_priv->rdev = NULL;
+		if (rc != -EPROBE_DEFER)
+			dev_err(dev, "%s: regulator_register failed\n",
+				__func__);
+		goto err_probe;
+	}
+
+	return 0;
+
+err_probe:
+	regulator_stub_cleanup(vreg_priv);
+	return rc;
+}
+
+static int regulator_stub_remove(struct platform_device *pdev)
+{
+	struct regulator_stub *vreg_priv = dev_get_drvdata(&pdev->dev);
+
+	regulator_stub_cleanup(vreg_priv);
+	return 0;
+}
+
+static struct of_device_id regulator_stub_match_table[] = {
+	{ .compatible = "qcom," STUB_REGULATOR_DRIVER_NAME, },
+	{}
+};
+
+static struct platform_driver regulator_stub_driver = {
+	.probe	= regulator_stub_probe,
+	.remove	= regulator_stub_remove,
+	.driver	= {
+		.name	= STUB_REGULATOR_DRIVER_NAME,
+		.owner	= THIS_MODULE,
+		.of_match_table = regulator_stub_match_table,
+	},
+};
+
+int __init regulator_stub_init(void)
+{
+	static int registered;
+
+	if (registered)
+		return 0;
+	else
+		registered = 1;
+	return platform_driver_register(&regulator_stub_driver);
+}
+postcore_initcall(regulator_stub_init);
+EXPORT_SYMBOL(regulator_stub_init);
+
+static void __exit regulator_stub_exit(void)
+{
+	platform_driver_unregister(&regulator_stub_driver);
+}
+module_exit(regulator_stub_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("stub regulator driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform: " STUB_REGULATOR_DRIVER_NAME);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/rtc/qpnp-rtc.c	2019-10-29 09:26:24.653213062 +0100
@@ -0,0 +1,717 @@
+/* Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/init.h>
+#include <linux/rtc.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/alarmtimer.h>
+
+/* RTC/ALARM Register offsets */
+#define REG_OFFSET_ALARM_RW	0x40
+#define REG_OFFSET_ALARM_CTRL1	0x46
+#define REG_OFFSET_ALARM_CTRL2	0x48
+#define REG_OFFSET_RTC_WRITE	0x40
+#define REG_OFFSET_RTC_CTRL	0x46
+#define REG_OFFSET_RTC_READ	0x48
+#define REG_OFFSET_PERP_SUBTYPE	0x05
+
+/* RTC_CTRL register bit fields */
+#define BIT_RTC_ENABLE		BIT(7)
+#define BIT_RTC_ALARM_ENABLE	BIT(7)
+#define BIT_RTC_ABORT_ENABLE	BIT(0)
+#define BIT_RTC_ALARM_CLEAR	BIT(0)
+
+/* RTC/ALARM peripheral subtype values */
+#define RTC_PERPH_SUBTYPE       0x1
+#define ALARM_PERPH_SUBTYPE     0x3
+
+#define NUM_8_BIT_RTC_REGS	0x4
+
+#define TO_SECS(arr)		(arr[0] | (arr[1] << 8) | (arr[2] << 16) | \
+							(arr[3] << 24))
+
+/* Module parameter to control power-on-alarm */
+bool poweron_alarm;
+EXPORT_SYMBOL(poweron_alarm);
+module_param(poweron_alarm, bool, 0644);
+MODULE_PARM_DESC(poweron_alarm, "Enable/Disable power-on alarm");
+
+/* rtc driver internal structure */
+struct qpnp_rtc {
+	u8			rtc_ctrl_reg;
+	u8			alarm_ctrl_reg1;
+	u16			rtc_base;
+	u16			alarm_base;
+	u32			rtc_write_enable;
+	u32			rtc_alarm_powerup;
+	int			rtc_alarm_irq;
+	struct device		*rtc_dev;
+	struct rtc_device	*rtc;
+	struct platform_device	*pdev;
+	struct regmap		*regmap;
+	spinlock_t		alarm_ctrl_lock;
+};
+
+static int qpnp_read_wrapper(struct qpnp_rtc *rtc_dd, u8 *rtc_val,
+			u16 base, int count)
+{
+	int rc;
+
+	rc = regmap_bulk_read(rtc_dd->regmap, base, rtc_val, count);
+	if (rc) {
+		dev_err(rtc_dd->rtc_dev, "SPMI read failed\n");
+		return rc;
+	}
+	return 0;
+}
+
+static int qpnp_write_wrapper(struct qpnp_rtc *rtc_dd, u8 *rtc_val,
+			u16 base, int count)
+{
+	int rc;
+
+	rc = regmap_bulk_write(rtc_dd->regmap, base, rtc_val, count);
+	if (rc) {
+		dev_err(rtc_dd->rtc_dev, "SPMI write failed\n");
+		return rc;
+	}
+
+	return 0;
+}
+
+static int
+qpnp_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+	int rc;
+	unsigned long secs, irq_flags;
+	u8 value[4], reg = 0, alarm_enabled = 0, ctrl_reg;
+	u8 rtc_disabled = 0, rtc_ctrl_reg;
+	struct qpnp_rtc *rtc_dd = dev_get_drvdata(dev);
+
+	rtc_tm_to_time(tm, &secs);
+
+	value[0] = secs & 0xFF;
+	value[1] = (secs >> 8) & 0xFF;
+	value[2] = (secs >> 16) & 0xFF;
+	value[3] = (secs >> 24) & 0xFF;
+
+	dev_dbg(dev, "Seconds value to be written to RTC = %lu\n", secs);
+
+	spin_lock_irqsave(&rtc_dd->alarm_ctrl_lock, irq_flags);
+	ctrl_reg = rtc_dd->alarm_ctrl_reg1;
+
+	if (ctrl_reg & BIT_RTC_ALARM_ENABLE) {
+		alarm_enabled = 1;
+		ctrl_reg &= ~BIT_RTC_ALARM_ENABLE;
+		rc = qpnp_write_wrapper(rtc_dd, &ctrl_reg,
+			rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL1, 1);
+		if (rc) {
+			dev_err(dev, "Write to ALARM ctrl reg failed\n");
+			goto rtc_rw_fail;
+		}
+	} else
+		spin_unlock_irqrestore(&rtc_dd->alarm_ctrl_lock, irq_flags);
+
+	/*
+	 * 32 bit seconds value is coverted to four 8 bit values
+	 *	|<------  32 bit time value in seconds  ------>|
+	 *      <- 8 bit ->|<- 8 bit ->|<- 8 bit ->|<- 8 bit ->|
+	 *       ----------------------------------------------
+	 *      | BYTE[3]  |  BYTE[2]  |  BYTE[1]  |  BYTE[0]  |
+	 *       ----------------------------------------------
+	 *
+	 * RTC has four 8 bit registers for writing time in seconds:
+	 *             WDATA[3], WDATA[2], WDATA[1], WDATA[0]
+	 *
+	 * Write to the RTC registers should be done in following order
+	 * Clear WDATA[0] register
+	 *
+	 * Write BYTE[1], BYTE[2] and BYTE[3] of time to
+	 * RTC WDATA[3], WDATA[2], WDATA[1] registers
+	 *
+	 * Write BYTE[0] of time to RTC WDATA[0] register
+	 *
+	 * Clearing BYTE[0] and writing in the end will prevent any
+	 * unintentional overflow from WDATA[0] to higher bytes during the
+	 * write operation
+	 */
+
+	/* Disable RTC H/w before writing on RTC register*/
+	rtc_ctrl_reg = rtc_dd->rtc_ctrl_reg;
+	if (rtc_ctrl_reg & BIT_RTC_ENABLE) {
+		rtc_disabled = 1;
+		rtc_ctrl_reg &= ~BIT_RTC_ENABLE;
+		rc = qpnp_write_wrapper(rtc_dd, &rtc_ctrl_reg,
+				rtc_dd->rtc_base + REG_OFFSET_RTC_CTRL, 1);
+		if (rc) {
+			dev_err(dev, "Disabling of RTC control reg failed with error:%d\n",
+				rc);
+			goto rtc_rw_fail;
+		}
+		rtc_dd->rtc_ctrl_reg = rtc_ctrl_reg;
+	}
+
+	/* Clear WDATA[0] */
+	reg = 0x0;
+	rc = qpnp_write_wrapper(rtc_dd, &reg,
+				rtc_dd->rtc_base + REG_OFFSET_RTC_WRITE, 1);
+	if (rc) {
+		dev_err(dev, "Write to RTC reg failed\n");
+		goto rtc_rw_fail;
+	}
+
+	/* Write to WDATA[3], WDATA[2] and WDATA[1] */
+	rc = qpnp_write_wrapper(rtc_dd, &value[1],
+			rtc_dd->rtc_base + REG_OFFSET_RTC_WRITE + 1, 3);
+	if (rc) {
+		dev_err(dev, "Write to RTC reg failed\n");
+		goto rtc_rw_fail;
+	}
+
+	/* Write to WDATA[0] */
+	rc = qpnp_write_wrapper(rtc_dd, value,
+				rtc_dd->rtc_base + REG_OFFSET_RTC_WRITE, 1);
+	if (rc) {
+		dev_err(dev, "Write to RTC reg failed\n");
+		goto rtc_rw_fail;
+	}
+
+	/* Enable RTC H/w after writing on RTC register*/
+	if (rtc_disabled) {
+		rtc_ctrl_reg |= BIT_RTC_ENABLE;
+		rc = qpnp_write_wrapper(rtc_dd, &rtc_ctrl_reg,
+				rtc_dd->rtc_base + REG_OFFSET_RTC_CTRL, 1);
+		if (rc) {
+			dev_err(dev, "Enabling of RTC control reg failed with error:%d\n",
+				rc);
+			goto rtc_rw_fail;
+		}
+		rtc_dd->rtc_ctrl_reg = rtc_ctrl_reg;
+	}
+
+	if (alarm_enabled) {
+		ctrl_reg |= BIT_RTC_ALARM_ENABLE;
+		rc = qpnp_write_wrapper(rtc_dd, &ctrl_reg,
+			rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL1, 1);
+		if (rc) {
+			dev_err(dev, "Write to ALARM ctrl reg failed\n");
+			goto rtc_rw_fail;
+		}
+	}
+
+	rtc_dd->alarm_ctrl_reg1 = ctrl_reg;
+
+rtc_rw_fail:
+	if (alarm_enabled)
+		spin_unlock_irqrestore(&rtc_dd->alarm_ctrl_lock, irq_flags);
+
+	return rc;
+}
+
+static int
+qpnp_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+	int rc;
+	u8 value[4], reg;
+	unsigned long secs;
+	struct qpnp_rtc *rtc_dd = dev_get_drvdata(dev);
+
+	rc = qpnp_read_wrapper(rtc_dd, value,
+				rtc_dd->rtc_base + REG_OFFSET_RTC_READ,
+				NUM_8_BIT_RTC_REGS);
+	if (rc) {
+		dev_err(dev, "Read from RTC reg failed\n");
+		return rc;
+	}
+
+	/*
+	 * Read the LSB again and check if there has been a carry over
+	 * If there is, redo the read operation
+	 */
+	rc = qpnp_read_wrapper(rtc_dd, &reg,
+				rtc_dd->rtc_base + REG_OFFSET_RTC_READ, 1);
+	if (rc) {
+		dev_err(dev, "Read from RTC reg failed\n");
+		return rc;
+	}
+
+	if (reg < value[0]) {
+		rc = qpnp_read_wrapper(rtc_dd, value,
+				rtc_dd->rtc_base + REG_OFFSET_RTC_READ,
+				NUM_8_BIT_RTC_REGS);
+		if (rc) {
+			dev_err(dev, "Read from RTC reg failed\n");
+			return rc;
+		}
+	}
+
+	secs = TO_SECS(value);
+
+	rtc_time_to_tm(secs, tm);
+
+	rc = rtc_valid_tm(tm);
+	if (rc) {
+		dev_err(dev, "Invalid time read from RTC\n");
+		return rc;
+	}
+
+	dev_dbg(dev, "secs = %lu, h:m:s == %d:%d:%d, d/m/y = %d/%d/%d\n",
+			secs, tm->tm_hour, tm->tm_min, tm->tm_sec,
+			tm->tm_mday, tm->tm_mon, tm->tm_year);
+
+	return 0;
+}
+
+static int
+qpnp_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+	int rc;
+	u8 value[4], ctrl_reg;
+	unsigned long secs, secs_rtc, irq_flags;
+	struct qpnp_rtc *rtc_dd = dev_get_drvdata(dev);
+	struct rtc_time rtc_tm;
+
+	rtc_tm_to_time(&alarm->time, &secs);
+
+	/*
+	 * Read the current RTC time and verify if the alarm time is in the
+	 * past. If yes, return invalid
+	 */
+	rc = qpnp_rtc_read_time(dev, &rtc_tm);
+	if (rc) {
+		dev_err(dev, "Unable to read RTC time\n");
+		return -EINVAL;
+	}
+
+	rtc_tm_to_time(&rtc_tm, &secs_rtc);
+	if (secs < secs_rtc) {
+		dev_err(dev, "Trying to set alarm in the past\n");
+		return -EINVAL;
+	}
+
+	value[0] = secs & 0xFF;
+	value[1] = (secs >> 8) & 0xFF;
+	value[2] = (secs >> 16) & 0xFF;
+	value[3] = (secs >> 24) & 0xFF;
+
+	spin_lock_irqsave(&rtc_dd->alarm_ctrl_lock, irq_flags);
+
+	rc = qpnp_write_wrapper(rtc_dd, value,
+				rtc_dd->alarm_base + REG_OFFSET_ALARM_RW,
+				NUM_8_BIT_RTC_REGS);
+	if (rc) {
+		dev_err(dev, "Write to ALARM reg failed\n");
+		goto rtc_rw_fail;
+	}
+
+	ctrl_reg = (alarm->enabled) ?
+			(rtc_dd->alarm_ctrl_reg1 | BIT_RTC_ALARM_ENABLE) :
+			(rtc_dd->alarm_ctrl_reg1 & ~BIT_RTC_ALARM_ENABLE);
+
+	rc = qpnp_write_wrapper(rtc_dd, &ctrl_reg,
+			rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL1, 1);
+	if (rc) {
+		dev_err(dev, "Write to ALARM cntrol reg failed\n");
+		goto rtc_rw_fail;
+	}
+
+	rtc_dd->alarm_ctrl_reg1 = ctrl_reg;
+
+	dev_dbg(dev, "Alarm Set for h:r:s=%d:%d:%d, d/m/y=%d/%d/%d\n",
+			alarm->time.tm_hour, alarm->time.tm_min,
+			alarm->time.tm_sec, alarm->time.tm_mday,
+			alarm->time.tm_mon, alarm->time.tm_year);
+rtc_rw_fail:
+	spin_unlock_irqrestore(&rtc_dd->alarm_ctrl_lock, irq_flags);
+	return rc;
+}
+
+static int
+qpnp_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+	int rc;
+	u8 value[4];
+	unsigned long secs;
+	struct qpnp_rtc *rtc_dd = dev_get_drvdata(dev);
+
+	rc = qpnp_read_wrapper(rtc_dd, value,
+				rtc_dd->alarm_base + REG_OFFSET_ALARM_RW,
+				NUM_8_BIT_RTC_REGS);
+	if (rc) {
+		dev_err(dev, "Read from ALARM reg failed\n");
+		return rc;
+	}
+
+	secs = TO_SECS(value);
+	rtc_time_to_tm(secs, &alarm->time);
+
+	rc = rtc_valid_tm(&alarm->time);
+	if (rc) {
+		dev_err(dev, "Invalid time read from RTC\n");
+		return rc;
+	}
+
+	dev_dbg(dev, "Alarm set for - h:r:s=%d:%d:%d, d/m/y=%d/%d/%d\n",
+		alarm->time.tm_hour, alarm->time.tm_min,
+				alarm->time.tm_sec, alarm->time.tm_mday,
+				alarm->time.tm_mon, alarm->time.tm_year);
+
+	return 0;
+}
+
+
+static int
+qpnp_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	int rc;
+	unsigned long irq_flags;
+	struct qpnp_rtc *rtc_dd = dev_get_drvdata(dev);
+	u8 ctrl_reg;
+	u8 value[4] = {0};
+
+	spin_lock_irqsave(&rtc_dd->alarm_ctrl_lock, irq_flags);
+	ctrl_reg = rtc_dd->alarm_ctrl_reg1;
+	ctrl_reg = enabled ? (ctrl_reg | BIT_RTC_ALARM_ENABLE) :
+				(ctrl_reg & ~BIT_RTC_ALARM_ENABLE);
+
+	rc = qpnp_write_wrapper(rtc_dd, &ctrl_reg,
+			rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL1, 1);
+	if (rc) {
+		dev_err(dev, "Write to ALARM control reg failed\n");
+		goto rtc_rw_fail;
+	}
+
+	rtc_dd->alarm_ctrl_reg1 = ctrl_reg;
+
+	/* Clear Alarm register */
+	if (!enabled) {
+		rc = qpnp_write_wrapper(rtc_dd, value,
+			rtc_dd->alarm_base + REG_OFFSET_ALARM_RW,
+			NUM_8_BIT_RTC_REGS);
+		if (rc)
+			dev_err(dev, "Clear ALARM value reg failed\n");
+	}
+
+rtc_rw_fail:
+	spin_unlock_irqrestore(&rtc_dd->alarm_ctrl_lock, irq_flags);
+	return rc;
+}
+
+static const struct rtc_class_ops qpnp_rtc_ro_ops = {
+	.read_time = qpnp_rtc_read_time,
+	.set_alarm = qpnp_rtc_set_alarm,
+	.read_alarm = qpnp_rtc_read_alarm,
+	.alarm_irq_enable = qpnp_rtc_alarm_irq_enable,
+};
+
+static const struct rtc_class_ops qpnp_rtc_rw_ops = {
+	.read_time = qpnp_rtc_read_time,
+	.set_alarm = qpnp_rtc_set_alarm,
+	.read_alarm = qpnp_rtc_read_alarm,
+	.alarm_irq_enable = qpnp_rtc_alarm_irq_enable,
+	.set_time = qpnp_rtc_set_time,
+};
+
+static irqreturn_t qpnp_alarm_trigger(int irq, void *dev_id)
+{
+	struct qpnp_rtc *rtc_dd = dev_id;
+	u8 ctrl_reg;
+	int rc;
+	unsigned long irq_flags;
+
+	rtc_update_irq(rtc_dd->rtc, 1, RTC_IRQF | RTC_AF);
+
+	spin_lock_irqsave(&rtc_dd->alarm_ctrl_lock, irq_flags);
+
+	/* Clear the alarm enable bit */
+	ctrl_reg = rtc_dd->alarm_ctrl_reg1;
+	ctrl_reg &= ~BIT_RTC_ALARM_ENABLE;
+
+	rc = qpnp_write_wrapper(rtc_dd, &ctrl_reg,
+			rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL1, 1);
+	if (rc) {
+		spin_unlock_irqrestore(&rtc_dd->alarm_ctrl_lock, irq_flags);
+		dev_err(rtc_dd->rtc_dev,
+				"Write to ALARM control reg failed\n");
+		goto rtc_alarm_handled;
+	}
+
+	rtc_dd->alarm_ctrl_reg1 = ctrl_reg;
+	spin_unlock_irqrestore(&rtc_dd->alarm_ctrl_lock, irq_flags);
+
+	/* Set ALARM_CLR bit */
+	ctrl_reg = 0x1;
+	rc = qpnp_write_wrapper(rtc_dd, &ctrl_reg,
+			rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL2, 1);
+	if (rc)
+		dev_err(rtc_dd->rtc_dev,
+				"Write to ALARM control reg failed\n");
+
+rtc_alarm_handled:
+	return IRQ_HANDLED;
+}
+
+static int qpnp_rtc_probe(struct platform_device *pdev)
+{
+	const struct rtc_class_ops *rtc_ops = &qpnp_rtc_ro_ops;
+	int rc;
+	u8 subtype;
+	struct qpnp_rtc *rtc_dd;
+	unsigned int base;
+	struct device_node *child;
+
+	rtc_dd = devm_kzalloc(&pdev->dev, sizeof(*rtc_dd), GFP_KERNEL);
+	if (rtc_dd == NULL)
+		return -ENOMEM;
+
+	rtc_dd->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!rtc_dd->regmap) {
+		dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+		return -EINVAL;
+	}
+
+	/* Get the rtc write property */
+	rc = of_property_read_u32(pdev->dev.of_node, "qcom,qpnp-rtc-write",
+						&rtc_dd->rtc_write_enable);
+	if (rc && rc != -EINVAL) {
+		dev_err(&pdev->dev,
+			"Error reading rtc_write_enable property %d\n", rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+						"qcom,qpnp-rtc-alarm-pwrup",
+						&rtc_dd->rtc_alarm_powerup);
+	if (rc && rc != -EINVAL) {
+		dev_err(&pdev->dev,
+			"Error reading rtc_alarm_powerup property %d\n", rc);
+		return rc;
+	}
+
+	/* Initialise spinlock to protect RTC control register */
+	spin_lock_init(&rtc_dd->alarm_ctrl_lock);
+
+	rtc_dd->rtc_dev = &(pdev->dev);
+	rtc_dd->pdev = pdev;
+
+
+	if (of_get_available_child_count(pdev->dev.of_node) == 0) {
+		pr_err("no child nodes\n");
+		rc = -ENXIO;
+		goto fail_rtc_enable;
+	}
+
+	/* Get RTC/ALARM resources */
+	for_each_available_child_of_node(pdev->dev.of_node, child) {
+		rc = of_property_read_u32(child, "reg", &base);
+		if (rc < 0) {
+			dev_err(&pdev->dev,
+				"Couldn't find reg in node = %s rc = %d\n",
+				child->full_name, rc);
+			goto fail_rtc_enable;
+		}
+
+		rc = qpnp_read_wrapper(rtc_dd, &subtype,
+				base + REG_OFFSET_PERP_SUBTYPE, 1);
+		if (rc) {
+			dev_err(&pdev->dev,
+				"Peripheral subtype read failed\n");
+			goto fail_rtc_enable;
+		}
+
+		switch (subtype) {
+		case RTC_PERPH_SUBTYPE:
+			rtc_dd->rtc_base = base;
+			break;
+		case ALARM_PERPH_SUBTYPE:
+			rtc_dd->alarm_base = base;
+			rtc_dd->rtc_alarm_irq = of_irq_get(child, 0);
+			if (rtc_dd->rtc_alarm_irq < 0) {
+				dev_err(&pdev->dev, "ALARM IRQ absent\n");
+				rc = -ENXIO;
+				goto fail_rtc_enable;
+			}
+			break;
+		default:
+			dev_err(&pdev->dev, "Invalid peripheral subtype\n");
+			rc = -EINVAL;
+			goto fail_rtc_enable;
+		}
+	}
+
+	rc = qpnp_read_wrapper(rtc_dd, &rtc_dd->rtc_ctrl_reg,
+				rtc_dd->rtc_base + REG_OFFSET_RTC_CTRL, 1);
+	if (rc) {
+		dev_err(&pdev->dev, "Read from RTC control reg failed\n");
+		goto fail_rtc_enable;
+	}
+
+	if (!(rtc_dd->rtc_ctrl_reg & BIT_RTC_ENABLE)) {
+		dev_err(&pdev->dev, "RTC h/w disabled, rtc not registered\n");
+		goto fail_rtc_enable;
+	}
+
+	rc = qpnp_read_wrapper(rtc_dd, &rtc_dd->alarm_ctrl_reg1,
+				rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL1, 1);
+	if (rc) {
+		dev_err(&pdev->dev, "Read from  Alarm control reg failed\n");
+		goto fail_rtc_enable;
+	}
+	/* Enable abort enable feature */
+	rtc_dd->alarm_ctrl_reg1 |= BIT_RTC_ABORT_ENABLE;
+	rc = qpnp_write_wrapper(rtc_dd, &rtc_dd->alarm_ctrl_reg1,
+			rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL1, 1);
+	if (rc) {
+		dev_err(&pdev->dev, "SPMI write failed!\n");
+		goto fail_rtc_enable;
+	}
+
+	if (rtc_dd->rtc_write_enable == true)
+		rtc_ops = &qpnp_rtc_rw_ops;
+
+	dev_set_drvdata(&pdev->dev, rtc_dd);
+
+	/* Register the RTC device */
+	rtc_dd->rtc = rtc_device_register("qpnp_rtc", &pdev->dev,
+					  rtc_ops, THIS_MODULE);
+	if (IS_ERR(rtc_dd->rtc)) {
+		dev_err(&pdev->dev, "%s: RTC registration failed (%ld)\n",
+					__func__, PTR_ERR(rtc_dd->rtc));
+		rc = PTR_ERR(rtc_dd->rtc);
+		goto fail_rtc_enable;
+	}
+
+	/* Init power_on_alarm after adding rtc device */
+	power_on_alarm_init();
+
+	/* Request the alarm IRQ */
+	rc = request_any_context_irq(rtc_dd->rtc_alarm_irq,
+				 qpnp_alarm_trigger, IRQF_TRIGGER_RISING,
+				 "qpnp_rtc_alarm", rtc_dd);
+	if (rc) {
+		dev_err(&pdev->dev, "Request IRQ failed (%d)\n", rc);
+		goto fail_req_irq;
+	}
+
+	device_init_wakeup(&pdev->dev, 1);
+	enable_irq_wake(rtc_dd->rtc_alarm_irq);
+
+	dev_dbg(&pdev->dev, "Probe success !!\n");
+
+	return 0;
+
+fail_req_irq:
+	rtc_device_unregister(rtc_dd->rtc);
+fail_rtc_enable:
+	dev_set_drvdata(&pdev->dev, NULL);
+
+	return rc;
+}
+
+static int qpnp_rtc_remove(struct platform_device *pdev)
+{
+	struct qpnp_rtc *rtc_dd = dev_get_drvdata(&pdev->dev);
+
+	device_init_wakeup(&pdev->dev, 0);
+	free_irq(rtc_dd->rtc_alarm_irq, rtc_dd);
+	rtc_device_unregister(rtc_dd->rtc);
+	dev_set_drvdata(&pdev->dev, NULL);
+
+	return 0;
+}
+
+static void qpnp_rtc_shutdown(struct platform_device *pdev)
+{
+	u8 value[4] = {0};
+	u8 reg;
+	int rc;
+	unsigned long irq_flags;
+	struct qpnp_rtc *rtc_dd;
+	bool rtc_alarm_powerup;
+
+	if (!pdev) {
+		pr_err("qpnp-rtc: spmi device not found\n");
+		return;
+	}
+	rtc_dd = dev_get_drvdata(&pdev->dev);
+	if (!rtc_dd) {
+		pr_err("qpnp-rtc: rtc driver data not found\n");
+		return;
+	}
+	rtc_alarm_powerup = rtc_dd->rtc_alarm_powerup;
+	if (!rtc_alarm_powerup && !poweron_alarm) {
+		spin_lock_irqsave(&rtc_dd->alarm_ctrl_lock, irq_flags);
+		dev_dbg(&pdev->dev, "Disabling alarm interrupts\n");
+
+		/* Disable RTC alarms */
+		reg = rtc_dd->alarm_ctrl_reg1;
+		reg &= ~BIT_RTC_ALARM_ENABLE;
+		rc = qpnp_write_wrapper(rtc_dd, &reg,
+			rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL1, 1);
+		if (rc) {
+			dev_err(rtc_dd->rtc_dev, "SPMI write failed\n");
+			goto fail_alarm_disable;
+		}
+
+		/* Clear Alarm register */
+		rc = qpnp_write_wrapper(rtc_dd, value,
+				rtc_dd->alarm_base + REG_OFFSET_ALARM_RW,
+				NUM_8_BIT_RTC_REGS);
+		if (rc)
+			dev_err(rtc_dd->rtc_dev, "SPMI write failed\n");
+
+fail_alarm_disable:
+		spin_unlock_irqrestore(&rtc_dd->alarm_ctrl_lock, irq_flags);
+	}
+}
+
+static const struct of_device_id spmi_match_table[] = {
+	{
+		.compatible = "qcom,qpnp-rtc",
+	},
+	{}
+};
+
+static struct platform_driver qpnp_rtc_driver = {
+	.probe		= qpnp_rtc_probe,
+	.remove		= qpnp_rtc_remove,
+	.shutdown	= qpnp_rtc_shutdown,
+	.driver		= {
+		.name		= "qcom,qpnp-rtc",
+		.owner		= THIS_MODULE,
+		.of_match_table	= spmi_match_table,
+	},
+};
+
+static int __init qpnp_rtc_init(void)
+{
+	return platform_driver_register(&qpnp_rtc_driver);
+}
+module_init(qpnp_rtc_init);
+
+static void __exit qpnp_rtc_exit(void)
+{
+	platform_driver_unregister(&qpnp_rtc_driver);
+}
+module_exit(qpnp_rtc_exit);
+
+MODULE_DESCRIPTION("SMPI PMIC RTC driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/scsi/ufs/ufs-debugfs.c	2019-01-22 16:16:26.631274732 +0100
@@ -0,0 +1,1671 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * UFS debugfs - add debugfs interface to the ufshcd.
+ * This is currently used for statistics collection and exporting from the
+ * UFS driver.
+ * This infrastructure can be used for debugging or direct tweaking
+ * of the driver from userspace.
+ *
+ */
+
+#include <linux/random.h>
+#include "ufs-debugfs.h"
+#include "unipro.h"
+#include "ufshci.h"
+
+enum field_width {
+	BYTE	= 1,
+	WORD	= 2,
+};
+
+struct desc_field_offset {
+	char *name;
+	int offset;
+	enum field_width width_byte;
+};
+
+#define UFS_ERR_STATS_PRINT(file, error_index, string, error_seen)	\
+	do {								\
+		if (err_stats[error_index]) {				\
+			seq_printf(file, string,			\
+					err_stats[error_index]);	\
+			error_seen = true;				\
+		}							\
+	} while (0)
+
+#define DOORBELL_CLR_TOUT_US	(1000 * 1000) /* 1 sec */
+
+#ifdef CONFIG_UFS_FAULT_INJECTION
+
+#define INJECT_COMMAND_HANG (0x0)
+
+static DECLARE_FAULT_ATTR(fail_default_attr);
+static char *fail_request;
+module_param(fail_request, charp, 0);
+
+/**
+ * struct ufsdbg_err_scenario - error scenario use case
+ * @name: the name of the error scenario
+ * @err_code_arr: error codes array for this error scenario
+ * @num_err_codes: number of error codes in err_code_arr
+ */
+struct ufsdbg_err_scenario {
+	const char *name;
+	const int *err_code_arr;
+	u32 num_err_codes;
+	u32 num_err_injected;
+};
+
+/*
+ * the following static arrays are aggregation of possible errors
+ * that might occur during the relevant error scenario
+ */
+static const int err_inject_intr_err_codes[] = {
+	CONTROLLER_FATAL_ERROR,
+	SYSTEM_BUS_FATAL_ERROR,
+	INJECT_COMMAND_HANG,
+};
+
+static const int err_inject_pwr_change_err_codes[] = {
+	-EIO,
+	-ETIMEDOUT,
+	-1,
+	PWR_REMOTE,
+	PWR_BUSY,
+	PWR_ERROR_CAP,
+	PWR_FATAL_ERROR,
+};
+
+static const int err_inject_uic_err_codes[] = {
+	-EIO,
+	-ETIMEDOUT,
+};
+
+static const int err_inject_dme_attr_err_codes[] = {
+	/* an invalid DME attribute for host and device */
+	0x1600,
+};
+
+static const int err_inject_query_err_codes[] = {
+	/* an invalid idn for flag/attribute/descriptor query request */
+	0xFF,
+};
+
+static struct ufsdbg_err_scenario err_scen_arr[] = {
+	{
+		"ERR_INJECT_INTR",
+		err_inject_intr_err_codes,
+		ARRAY_SIZE(err_inject_intr_err_codes),
+	},
+	{
+		"ERR_INJECT_PWR_CHANGE",
+		err_inject_pwr_change_err_codes,
+		ARRAY_SIZE(err_inject_pwr_change_err_codes),
+	},
+	{
+		"ERR_INJECT_UIC",
+		err_inject_uic_err_codes,
+		ARRAY_SIZE(err_inject_uic_err_codes),
+	},
+	{
+		"ERR_INJECT_DME_ATTR",
+		err_inject_dme_attr_err_codes,
+		ARRAY_SIZE(err_inject_dme_attr_err_codes),
+	},
+	{
+		"ERR_INJECT_QUERY",
+		err_inject_query_err_codes,
+		ARRAY_SIZE(err_inject_query_err_codes),
+	},
+};
+
+static bool inject_fatal_err_tr(struct ufs_hba *hba, u8 ocs_err)
+{
+	int tag;
+
+	tag = find_first_bit(&hba->outstanding_reqs, hba->nutrs);
+	if (tag == hba->nutrs)
+		return 0;
+
+	ufshcd_writel(hba, ~(1 << tag), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
+	(&hba->lrb[tag])->utr_descriptor_ptr->header.dword_2 =
+							cpu_to_be32(ocs_err);
+
+	/* fatal error injected */
+	return 1;
+}
+
+static bool inject_fatal_err_tm(struct ufs_hba *hba, u8 ocs_err)
+{
+	int tag;
+
+	tag = find_first_bit(&hba->outstanding_tasks, hba->nutmrs);
+	if (tag == hba->nutmrs)
+		return 0;
+
+	ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
+	(&hba->utmrdl_base_addr[tag])->header.dword_2 =
+						cpu_to_be32(ocs_err);
+
+	/* fatal error injected */
+	return 1;
+}
+
+static bool inject_cmd_hang_tr(struct ufs_hba *hba)
+{
+	int tag;
+
+	tag = find_first_bit(&hba->outstanding_reqs, hba->nutrs);
+	if (tag == hba->nutrs)
+		return 0;
+
+	__clear_bit(tag, &hba->outstanding_reqs);
+	hba->lrb[tag].cmd = NULL;
+	__clear_bit(tag, &hba->lrb_in_use);
+
+	/* command hang injected */
+	return 1;
+}
+
+static int inject_cmd_hang_tm(struct ufs_hba *hba)
+{
+	int tag;
+
+	tag = find_first_bit(&hba->outstanding_tasks, hba->nutmrs);
+	if (tag == hba->nutmrs)
+		return 0;
+
+	__clear_bit(tag, &hba->outstanding_tasks);
+	__clear_bit(tag, &hba->tm_slots_in_use);
+
+	/* command hang injected */
+	return 1;
+}
+
+static void
+ufsdbg_intr_fail_request(struct ufs_hba *hba, u32 *intr_status)
+{
+	u8 ocs_err;
+
+	dev_info(hba->dev, "%s: fault-inject error: 0x%x\n",
+			__func__, *intr_status);
+
+	switch (*intr_status) {
+	case CONTROLLER_FATAL_ERROR: /* fall through */
+		ocs_err = OCS_FATAL_ERROR;
+		goto set_ocs;
+	case SYSTEM_BUS_FATAL_ERROR:
+		ocs_err = OCS_INVALID_CMD_TABLE_ATTR;
+set_ocs:
+		if (!inject_fatal_err_tr(hba, ocs_err))
+			if (!inject_fatal_err_tm(hba, ocs_err))
+				goto out;
+		break;
+	case INJECT_COMMAND_HANG:
+		if (!inject_cmd_hang_tr(hba))
+			inject_cmd_hang_tm(hba);
+		break;
+	default:
+		BUG();
+		/* some configurations ignore panics caused by BUG() */
+		break;
+	}
+out:
+	return;
+}
+
+static bool
+ufsdbg_find_err_code(enum ufsdbg_err_inject_scenario usecase,
+		     int *ret, u32 *index)
+{
+	struct ufsdbg_err_scenario *err_scen = &err_scen_arr[usecase];
+	u32 err_code_index;
+
+	if (!err_scen->num_err_codes)
+		return false;
+
+	err_code_index = prandom_u32() % err_scen->num_err_codes;
+
+	*index = err_code_index;
+	*ret = err_scen->err_code_arr[err_code_index];
+	return true;
+}
+
+void ufsdbg_error_inject_dispatcher(struct ufs_hba *hba,
+			enum ufsdbg_err_inject_scenario usecase,
+			int success_value, int *ret_value)
+{
+	int opt_ret = 0;
+	u32 err_code_index = 0;
+
+	/* sanity check and verify error scenario bit */
+	if ((unlikely(!hba || !ret_value)) ||
+	    (likely(!(hba->debugfs_files.err_inj_scenario_mask &
+						BIT(usecase)))))
+		goto out;
+
+	if (usecase < 0 || usecase >= ERR_INJECT_MAX_ERR_SCENARIOS) {
+		dev_err(hba->dev, "%s: invalid usecase value (%d)\n",
+			__func__, usecase);
+		goto out;
+	}
+
+	if (!ufsdbg_find_err_code(usecase, &opt_ret, &err_code_index))
+		goto out;
+
+	if (!should_fail(&hba->debugfs_files.fail_attr, 1))
+		goto out;
+
+	/* if an error already occurred/injected */
+	if (*ret_value != success_value)
+		goto out;
+
+	switch (usecase) {
+	case ERR_INJECT_INTR:
+		/* an error already occurred */
+		if (*ret_value & UFSHCD_ERROR_MASK)
+			goto out;
+
+		ufsdbg_intr_fail_request(hba, (u32 *)&opt_ret);
+		/* fall through */
+	case ERR_INJECT_PWR_CHANGE:
+	case ERR_INJECT_UIC:
+	case ERR_INJECT_DME_ATTR:
+	case ERR_INJECT_QUERY:
+		goto should_fail;
+	default:
+		dev_err(hba->dev, "%s: unsupported error scenario\n",
+				__func__);
+		goto out;
+	}
+
+should_fail:
+	*ret_value = opt_ret;
+	err_scen_arr[usecase].num_err_injected++;
+	pr_debug("%s: error code index [%d], error code %d (0x%x) is injected for scenario \"%s\"\n",
+		 __func__, err_code_index, *ret_value, *ret_value,
+		 err_scen_arr[usecase].name);
+out:
+	/**
+	 * here it's guaranteed that ret_value has the correct value,
+	 * whether it was assigned with a new value, or kept its own
+	 * original incoming value
+	 */
+	return;
+}
+
+static int ufsdbg_err_inj_scenario_read(struct seq_file *file, void *data)
+{
+	struct ufs_hba *hba = (struct ufs_hba *)file->private;
+	enum ufsdbg_err_inject_scenario err_case;
+
+	if (!hba)
+		return -EINVAL;
+
+	seq_printf(file, "%-40s %-17s %-15s\n",
+		   "Error Scenario:", "Bit[#]", "STATUS");
+
+	for (err_case = ERR_INJECT_INTR;
+		err_case < ERR_INJECT_MAX_ERR_SCENARIOS; err_case++) {
+		seq_printf(file, "%-40s 0x%-15lx %-15s\n",
+			   err_scen_arr[err_case].name,
+			   UFS_BIT(err_case),
+			   hba->debugfs_files.err_inj_scenario_mask &
+				UFS_BIT(err_case) ? "ENABLE" : "DISABLE");
+	}
+
+	seq_printf(file, "bitwise of error scenario is 0x%x\n\n",
+		   hba->debugfs_files.err_inj_scenario_mask);
+
+	seq_puts(file, "usage example:\n");
+	seq_puts(file, "echo 0x4 > /sys/kernel/debug/.../err_inj_scenario\n");
+	seq_puts(file, "in order to enable ERR_INJECT_INTR\n");
+
+	return 0;
+}
+
+static
+int ufsdbg_err_inj_scenario_open(struct inode *inode, struct file *file)
+{
+	return single_open(file,
+			ufsdbg_err_inj_scenario_read, inode->i_private);
+}
+
+static ssize_t ufsdbg_err_inj_scenario_write(struct file *file,
+				     const char __user *ubuf, size_t cnt,
+				     loff_t *ppos)
+{
+	struct ufs_hba *hba = file->f_mapping->host->i_private;
+	int ret;
+	int err_scen = 0;
+
+	if (!hba)
+		return -EINVAL;
+
+	ret = kstrtoint_from_user(ubuf, cnt, 0, &err_scen);
+	if (ret) {
+		dev_err(hba->dev, "%s: Invalid argument\n", __func__);
+		return ret;
+	}
+
+	hba->debugfs_files.err_inj_scenario_mask = err_scen;
+
+	return cnt;
+}
+
+static const struct file_operations ufsdbg_err_inj_scenario_ops = {
+	.open		= ufsdbg_err_inj_scenario_open,
+	.read		= seq_read,
+	.write		= ufsdbg_err_inj_scenario_write,
+};
+
+static int ufsdbg_err_inj_stats_read(struct seq_file *file, void *data)
+{
+	enum ufsdbg_err_inject_scenario err;
+
+	seq_printf(file, "%-40s %-20s\n",
+		   "Error Scenario:", "Num of Errors Injected");
+
+	for (err = 0; err < ERR_INJECT_MAX_ERR_SCENARIOS; err++) {
+		seq_printf(file, "%-40s %-20d\n",
+			err_scen_arr[err].name,
+			err_scen_arr[err].num_err_injected);
+	}
+
+	return 0;
+}
+
+static
+int ufsdbg_err_inj_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file,
+			ufsdbg_err_inj_stats_read, inode->i_private);
+}
+
+static ssize_t ufsdbg_err_inj_stats_write(struct file *file,
+				     const char __user *ubuf, size_t cnt,
+				     loff_t *ppos)
+{
+	enum ufsdbg_err_inject_scenario err;
+
+	for (err = 0; err < ERR_INJECT_MAX_ERR_SCENARIOS; err++)
+		err_scen_arr[err].num_err_injected = 0;
+
+	return cnt;
+}
+
+static const struct file_operations ufsdbg_err_inj_stats_ops = {
+	.open		= ufsdbg_err_inj_stats_open,
+	.read		= seq_read,
+	.write		= ufsdbg_err_inj_stats_write,
+};
+
+static void ufsdbg_setup_fault_injection(struct ufs_hba *hba)
+{
+	struct dentry *fault_dir;
+
+	hba->debugfs_files.fail_attr = fail_default_attr;
+
+	if (fail_request)
+		setup_fault_attr(&hba->debugfs_files.fail_attr, fail_request);
+
+	/* suppress dump stack every time failure is injected */
+	hba->debugfs_files.fail_attr.verbose = 0;
+
+	fault_dir = fault_create_debugfs_attr("inject_fault",
+					hba->debugfs_files.debugfs_root,
+					&hba->debugfs_files.fail_attr);
+
+	if (IS_ERR(fault_dir)) {
+		dev_err(hba->dev, "%s: failed to create debugfs entry for fault injection\n",
+			__func__);
+		return;
+	}
+
+	hba->debugfs_files.err_inj_scenario =
+		debugfs_create_file("err_inj_scenario",
+				   S_IRUGO | S_IWUGO,
+				   hba->debugfs_files.debugfs_root, hba,
+				   &ufsdbg_err_inj_scenario_ops);
+
+	if (!hba->debugfs_files.err_inj_scenario) {
+		dev_err(hba->dev,
+			"%s: Could not create debugfs entry for err_scenario",
+				__func__);
+		goto fail_err_inj_scenario;
+	}
+
+	hba->debugfs_files.err_inj_stats =
+		debugfs_create_file("err_inj_stats", S_IRUSR | S_IWUSR,
+				    hba->debugfs_files.debugfs_root, hba,
+				    &ufsdbg_err_inj_stats_ops);
+	if (!hba->debugfs_files.err_inj_stats) {
+		dev_err(hba->dev,
+			"%s:  failed create err_inj_stats debugfs entry\n",
+			__func__);
+		goto fail_err_inj_stats;
+	}
+
+	return;
+
+fail_err_inj_stats:
+	debugfs_remove(hba->debugfs_files.err_inj_scenario);
+fail_err_inj_scenario:
+	debugfs_remove_recursive(fault_dir);
+}
+#else
+static void ufsdbg_setup_fault_injection(struct ufs_hba *hba)
+{
+}
+#endif /* CONFIG_UFS_FAULT_INJECTION */
+
+#define BUFF_LINE_SIZE 16 /* Must be a multiplication of sizeof(u32) */
+#define TAB_CHARS 8
+
+static int ufsdbg_tag_stats_show(struct seq_file *file, void *data)
+{
+	struct ufs_hba *hba = (struct ufs_hba *)file->private;
+	struct ufs_stats *ufs_stats;
+	int i, j;
+	int max_depth;
+	bool is_tag_empty = true;
+	unsigned long flags;
+	char *sep = " | * | ";
+
+	if (!hba)
+		goto exit;
+
+	ufs_stats = &hba->ufs_stats;
+
+	if (!ufs_stats->enabled) {
+		pr_debug("%s: ufs statistics are disabled\n", __func__);
+		seq_puts(file, "ufs statistics are disabled");
+		goto exit;
+	}
+
+	max_depth = hba->nutrs;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	/* Header */
+	seq_printf(file, " Tag Stat\t\t%s Number of pending reqs upon issue (Q fullness)\n",
+		sep);
+	for (i = 0; i < TAB_CHARS * (TS_NUM_STATS + 4); i++) {
+		seq_puts(file, "-");
+		if (i == (TAB_CHARS * 3 - 1))
+			seq_puts(file, sep);
+	}
+	seq_printf(file,
+		"\n #\tnum uses\t%s\t #\tAll\tRead\tWrite\tUrg.R\tUrg.W\tFlush\n",
+		sep);
+
+	/* values */
+	for (i = 0; i < max_depth; i++) {
+		if (ufs_stats->tag_stats[i][TS_TAG] <= 0 &&
+				ufs_stats->tag_stats[i][TS_READ] <= 0 &&
+				ufs_stats->tag_stats[i][TS_WRITE] <= 0 &&
+				ufs_stats->tag_stats[i][TS_URGENT_READ] <= 0 &&
+				ufs_stats->tag_stats[i][TS_URGENT_WRITE] <= 0 &&
+				ufs_stats->tag_stats[i][TS_FLUSH] <= 0)
+			continue;
+
+		is_tag_empty = false;
+		seq_printf(file, " %d\t ", i);
+		for (j = 0; j < TS_NUM_STATS; j++) {
+			seq_printf(file, "%llu\t", ufs_stats->tag_stats[i][j]);
+			if (j != 0)
+				continue;
+			seq_printf(file, "\t%s\t %d\t%llu\t", sep, i,
+				ufs_stats->tag_stats[i][TS_READ] +
+				ufs_stats->tag_stats[i][TS_WRITE] +
+				ufs_stats->tag_stats[i][TS_URGENT_READ] +
+				ufs_stats->tag_stats[i][TS_URGENT_WRITE] +
+				ufs_stats->tag_stats[i][TS_FLUSH]);
+		}
+		seq_puts(file, "\n");
+	}
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	if (is_tag_empty)
+		pr_debug("%s: All tags statistics are empty", __func__);
+
+exit:
+	return 0;
+}
+
+static int ufsdbg_tag_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ufsdbg_tag_stats_show, inode->i_private);
+}
+
+static ssize_t ufsdbg_tag_stats_write(struct file *filp,
+				      const char __user *ubuf, size_t cnt,
+				       loff_t *ppos)
+{
+	struct ufs_hba *hba = filp->f_mapping->host->i_private;
+	struct ufs_stats *ufs_stats;
+	int val = 0;
+	int ret, bit = 0;
+	unsigned long flags;
+
+	ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
+	if (ret) {
+		dev_err(hba->dev, "%s: Invalid argument\n", __func__);
+		return ret;
+	}
+
+	ufs_stats = &hba->ufs_stats;
+	spin_lock_irqsave(hba->host->host_lock, flags);
+
+	if (!val) {
+		ufs_stats->enabled = false;
+		pr_debug("%s: Disabling UFS tag statistics", __func__);
+	} else {
+		ufs_stats->enabled = true;
+		pr_debug("%s: Enabling & Resetting UFS tag statistics",
+			 __func__);
+		memset(hba->ufs_stats.tag_stats[0], 0,
+			sizeof(**hba->ufs_stats.tag_stats) *
+			TS_NUM_STATS * hba->nutrs);
+
+		/* initialize current queue depth */
+		ufs_stats->q_depth = 0;
+		for_each_set_bit_from(bit, &hba->outstanding_reqs, hba->nutrs)
+			ufs_stats->q_depth++;
+		pr_debug("%s: Enabled UFS tag statistics", __func__);
+	}
+
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	return cnt;
+}
+
+static const struct file_operations ufsdbg_tag_stats_fops = {
+	.open		= ufsdbg_tag_stats_open,
+	.read		= seq_read,
+	.write		= ufsdbg_tag_stats_write,
+};
+
+static int ufsdbg_query_stats_show(struct seq_file *file, void *data)
+{
+	struct ufs_hba *hba = (struct ufs_hba *)file->private;
+	struct ufs_stats *ufs_stats = &hba->ufs_stats;
+	int i, j;
+	static const char *opcode_name[UPIU_QUERY_OPCODE_MAX] = {
+		"QUERY_OPCODE_NOP:",
+		"QUERY_OPCODE_READ_DESC:",
+		"QUERY_OPCODE_WRITE_DESC:",
+		"QUERY_OPCODE_READ_ATTR:",
+		"QUERY_OPCODE_WRITE_ATTR:",
+		"QUERY_OPCODE_READ_FLAG:",
+		"QUERY_OPCODE_SET_FLAG:",
+		"QUERY_OPCODE_CLEAR_FLAG:",
+		"QUERY_OPCODE_TOGGLE_FLAG:",
+	};
+
+	seq_puts(file, "\n");
+	seq_puts(file, "The following table shows how many TIMES each IDN was sent to device for each QUERY OPCODE:\n");
+	seq_puts(file, "\n");
+
+	for (i = 0; i < UPIU_QUERY_OPCODE_MAX; i++) {
+		seq_printf(file, "%-30s", opcode_name[i]);
+
+		for (j = 0; j < MAX_QUERY_IDN; j++) {
+			/*
+			 * we would like to print only the non-zero data,
+			 * (non-zero number of times that IDN was sent
+			 * to the device per opcode). There is no
+			 * importance to the "table structure" of the output.
+			 */
+			if (ufs_stats->query_stats_arr[i][j])
+				seq_printf(file, "IDN 0x%02X: %d,\t", j,
+					   ufs_stats->query_stats_arr[i][j]);
+		}
+		seq_puts(file, "\n");
+	}
+
+	return 0;
+}
+
+static int ufsdbg_query_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ufsdbg_query_stats_show, inode->i_private);
+}
+
+static ssize_t ufsdbg_query_stats_write(struct file *filp,
+				      const char __user *ubuf, size_t cnt,
+				       loff_t *ppos)
+{
+	struct ufs_hba *hba = filp->f_mapping->host->i_private;
+	struct ufs_stats *ufs_stats = &hba->ufs_stats;
+	int i, j;
+
+	mutex_lock(&hba->dev_cmd.lock);
+
+	for (i = 0; i < UPIU_QUERY_OPCODE_MAX; i++)
+		for (j = 0; j < MAX_QUERY_IDN; j++)
+			ufs_stats->query_stats_arr[i][j] = 0;
+
+	mutex_unlock(&hba->dev_cmd.lock);
+
+	return cnt;
+}
+
+static const struct file_operations ufsdbg_query_stats_fops = {
+	.open		= ufsdbg_query_stats_open,
+	.read		= seq_read,
+	.write		= ufsdbg_query_stats_write,
+};
+
+static int ufsdbg_err_stats_show(struct seq_file *file, void *data)
+{
+	struct ufs_hba *hba = (struct ufs_hba *)file->private;
+	int *err_stats;
+	unsigned long flags;
+	bool error_seen = false;
+
+	if (!hba)
+		goto exit;
+
+	err_stats = hba->ufs_stats.err_stats;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+
+	seq_puts(file, "\n==UFS errors that caused controller reset==\n");
+
+	UFS_ERR_STATS_PRINT(file, UFS_ERR_HIBERN8_EXIT,
+			"controller reset due to hibern8 exit error:\t %d\n",
+			error_seen);
+
+	UFS_ERR_STATS_PRINT(file, UFS_ERR_VOPS_SUSPEND,
+			"controller reset due to vops suspend error:\t\t %d\n",
+			error_seen);
+
+	UFS_ERR_STATS_PRINT(file, UFS_ERR_EH,
+			"controller reset due to error handling:\t\t %d\n",
+			error_seen);
+
+	UFS_ERR_STATS_PRINT(file, UFS_ERR_CLEAR_PEND_XFER_TM,
+			"controller reset due to clear xfer/tm regs:\t\t %d\n",
+			error_seen);
+
+	UFS_ERR_STATS_PRINT(file, UFS_ERR_INT_FATAL_ERRORS,
+			"controller reset due to fatal interrupt:\t %d\n",
+			error_seen);
+
+	UFS_ERR_STATS_PRINT(file, UFS_ERR_INT_UIC_ERROR,
+			"controller reset due to uic interrupt error:\t %d\n",
+			error_seen);
+
+	if (error_seen)
+		error_seen = false;
+	else
+		seq_puts(file,
+			"so far, no errors that caused controller reset\n\n");
+
+	seq_puts(file, "\n\n==UFS other errors==\n");
+
+	UFS_ERR_STATS_PRINT(file, UFS_ERR_HIBERN8_ENTER,
+			"hibern8 enter:\t\t %d\n", error_seen);
+
+	UFS_ERR_STATS_PRINT(file, UFS_ERR_RESUME,
+			"resume error:\t\t %d\n", error_seen);
+
+	UFS_ERR_STATS_PRINT(file, UFS_ERR_SUSPEND,
+			"suspend error:\t\t %d\n", error_seen);
+
+	UFS_ERR_STATS_PRINT(file, UFS_ERR_LINKSTARTUP,
+			"linkstartup error:\t\t %d\n", error_seen);
+
+	UFS_ERR_STATS_PRINT(file, UFS_ERR_POWER_MODE_CHANGE,
+			"power change error:\t %d\n", error_seen);
+
+	UFS_ERR_STATS_PRINT(file, UFS_ERR_TASK_ABORT,
+			"abort callback:\t\t %d\n\n", error_seen);
+
+	if (!error_seen)
+		seq_puts(file,
+		"so far, no other UFS related errors\n\n");
+
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+exit:
+	return 0;
+}
+
+static int ufsdbg_err_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ufsdbg_err_stats_show, inode->i_private);
+}
+
+static ssize_t ufsdbg_err_stats_write(struct file *filp,
+				      const char __user *ubuf, size_t cnt,
+				       loff_t *ppos)
+{
+	struct ufs_hba *hba = filp->f_mapping->host->i_private;
+	struct ufs_stats *ufs_stats;
+	unsigned long flags;
+
+	ufs_stats = &hba->ufs_stats;
+	spin_lock_irqsave(hba->host->host_lock, flags);
+
+	pr_debug("%s: Resetting UFS error statistics", __func__);
+	memset(ufs_stats->err_stats, 0, sizeof(hba->ufs_stats.err_stats));
+
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	return cnt;
+}
+
+static const struct file_operations ufsdbg_err_stats_fops = {
+	.open		= ufsdbg_err_stats_open,
+	.read		= seq_read,
+	.write		= ufsdbg_err_stats_write,
+};
+
+static int ufshcd_init_statistics(struct ufs_hba *hba)
+{
+	struct ufs_stats *stats = &hba->ufs_stats;
+	int ret = 0;
+	int i;
+
+	stats->enabled = false;
+	stats->tag_stats = kzalloc(sizeof(*stats->tag_stats) * hba->nutrs,
+			GFP_KERNEL);
+	if (!hba->ufs_stats.tag_stats)
+		goto no_mem;
+
+	stats->tag_stats[0] = kzalloc(sizeof(**stats->tag_stats) *
+			TS_NUM_STATS * hba->nutrs, GFP_KERNEL);
+	if (!stats->tag_stats[0])
+		goto no_mem;
+
+	for (i = 1; i < hba->nutrs; i++)
+		stats->tag_stats[i] = &stats->tag_stats[0][i * TS_NUM_STATS];
+
+	memset(stats->err_stats, 0, sizeof(hba->ufs_stats.err_stats));
+
+	goto exit;
+
+no_mem:
+	dev_err(hba->dev, "%s: Unable to allocate UFS tag_stats", __func__);
+	ret = -ENOMEM;
+exit:
+	return ret;
+}
+
+void ufsdbg_pr_buf_to_std(struct ufs_hba *hba, int offset, int num_regs,
+				char *str, void *priv)
+{
+	int i;
+	char linebuf[38];
+	int size = num_regs * sizeof(u32);
+	int lines = size / BUFF_LINE_SIZE +
+			(size % BUFF_LINE_SIZE ? 1 : 0);
+	struct seq_file *file = priv;
+
+	if (!hba || !file) {
+		pr_err("%s called with NULL pointer\n", __func__);
+		return;
+	}
+
+	for (i = 0; i < lines; i++) {
+		hex_dump_to_buffer(hba->mmio_base + offset + i * BUFF_LINE_SIZE,
+				min(BUFF_LINE_SIZE, size), BUFF_LINE_SIZE, 4,
+				linebuf, sizeof(linebuf), false);
+		seq_printf(file, "%s [%x]: %s\n", str, i * BUFF_LINE_SIZE,
+				linebuf);
+		size -= BUFF_LINE_SIZE/sizeof(u32);
+	}
+}
+
+static int ufsdbg_host_regs_show(struct seq_file *file, void *data)
+{
+	struct ufs_hba *hba = (struct ufs_hba *)file->private;
+
+	pm_runtime_get_sync(hba->dev);
+	ufshcd_hold(hba, false);
+	ufsdbg_pr_buf_to_std(hba, 0, UFSHCI_REG_SPACE_SIZE / sizeof(u32),
+				"host regs", file);
+	ufshcd_release(hba, false);
+	pm_runtime_put_sync(hba->dev);
+	return 0;
+}
+
+static int ufsdbg_host_regs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ufsdbg_host_regs_show, inode->i_private);
+}
+
+static const struct file_operations ufsdbg_host_regs_fops = {
+	.open		= ufsdbg_host_regs_open,
+	.read		= seq_read,
+};
+
+static int ufsdbg_dump_device_desc_show(struct seq_file *file, void *data)
+{
+	int err = 0;
+	int buff_len = QUERY_DESC_DEVICE_MAX_SIZE;
+	u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
+	struct ufs_hba *hba = (struct ufs_hba *)file->private;
+
+	struct desc_field_offset device_desc_field_name[] = {
+		{"bLength",		0x00, BYTE},
+		{"bDescriptorType",	0x01, BYTE},
+		{"bDevice",		0x02, BYTE},
+		{"bDeviceClass",	0x03, BYTE},
+		{"bDeviceSubClass",	0x04, BYTE},
+		{"bProtocol",		0x05, BYTE},
+		{"bNumberLU",		0x06, BYTE},
+		{"bNumberWLU",		0x07, BYTE},
+		{"bBootEnable",		0x08, BYTE},
+		{"bDescrAccessEn",	0x09, BYTE},
+		{"bInitPowerMode",	0x0A, BYTE},
+		{"bHighPriorityLUN",	0x0B, BYTE},
+		{"bSecureRemovalType",	0x0C, BYTE},
+		{"bSecurityLU",		0x0D, BYTE},
+		{"Reserved",		0x0E, BYTE},
+		{"bInitActiveICCLevel",	0x0F, BYTE},
+		{"wSpecVersion",	0x10, WORD},
+		{"wManufactureDate",	0x12, WORD},
+		{"iManufactureName",	0x14, BYTE},
+		{"iProductName",	0x15, BYTE},
+		{"iSerialNumber",	0x16, BYTE},
+		{"iOemID",		0x17, BYTE},
+		{"wManufactureID",	0x18, WORD},
+		{"bUD0BaseOffset",	0x1A, BYTE},
+		{"bUDConfigPLength",	0x1B, BYTE},
+		{"bDeviceRTTCap",	0x1C, BYTE},
+		{"wPeriodicRTCUpdate",	0x1D, WORD}
+	};
+
+	pm_runtime_get_sync(hba->dev);
+	err = ufshcd_read_device_desc(hba, desc_buf, buff_len);
+	pm_runtime_put_sync(hba->dev);
+
+	if (!err) {
+		int i;
+		struct desc_field_offset *tmp;
+		for (i = 0; i < ARRAY_SIZE(device_desc_field_name); ++i) {
+			tmp = &device_desc_field_name[i];
+
+			if (tmp->width_byte == BYTE) {
+				seq_printf(file,
+					   "Device Descriptor[Byte offset 0x%x]: %s = 0x%x\n",
+					   tmp->offset,
+					   tmp->name,
+					   (u8)desc_buf[tmp->offset]);
+			} else if (tmp->width_byte == WORD) {
+				seq_printf(file,
+					   "Device Descriptor[Byte offset 0x%x]: %s = 0x%x\n",
+					   tmp->offset,
+					   tmp->name,
+					   *(u16 *)&desc_buf[tmp->offset]);
+			} else {
+				seq_printf(file,
+				"Device Descriptor[offset 0x%x]: %s. Wrong Width = %d",
+				tmp->offset, tmp->name, tmp->width_byte);
+			}
+		}
+	} else {
+		seq_printf(file, "Reading Device Descriptor failed. err = %d\n",
+			   err);
+	}
+
+	return err;
+}
+
+static int ufsdbg_show_hba_show(struct seq_file *file, void *data)
+{
+	struct ufs_hba *hba = (struct ufs_hba *)file->private;
+
+	seq_printf(file, "hba->outstanding_tasks = 0x%x\n",
+			(u32)hba->outstanding_tasks);
+	seq_printf(file, "hba->outstanding_reqs = 0x%x\n",
+			(u32)hba->outstanding_reqs);
+
+	seq_printf(file, "hba->capabilities = 0x%x\n", hba->capabilities);
+	seq_printf(file, "hba->nutrs = %d\n", hba->nutrs);
+	seq_printf(file, "hba->nutmrs = %d\n", hba->nutmrs);
+	seq_printf(file, "hba->ufs_version = 0x%x\n", hba->ufs_version);
+	seq_printf(file, "hba->irq = 0x%x\n", hba->irq);
+	seq_printf(file, "hba->auto_bkops_enabled = %d\n",
+			hba->auto_bkops_enabled);
+
+	seq_printf(file, "hba->ufshcd_state = 0x%x\n", hba->ufshcd_state);
+	seq_printf(file, "hba->clk_gating.state = 0x%x\n",
+			hba->clk_gating.state);
+	seq_printf(file, "hba->eh_flags = 0x%x\n", hba->eh_flags);
+	seq_printf(file, "hba->intr_mask = 0x%x\n", hba->intr_mask);
+	seq_printf(file, "hba->ee_ctrl_mask = 0x%x\n", hba->ee_ctrl_mask);
+
+	/* HBA Errors */
+	seq_printf(file, "hba->errors = 0x%x\n", hba->errors);
+	seq_printf(file, "hba->uic_error = 0x%x\n", hba->uic_error);
+	seq_printf(file, "hba->saved_err = 0x%x\n", hba->saved_err);
+	seq_printf(file, "hba->saved_uic_err = 0x%x\n", hba->saved_uic_err);
+
+	seq_printf(file, "power_mode_change_cnt = %d\n",
+			hba->ufs_stats.power_mode_change_cnt);
+	seq_printf(file, "hibern8_exit_cnt = %d\n",
+			hba->ufs_stats.hibern8_exit_cnt);
+	return 0;
+}
+
+static int ufsdbg_show_hba_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ufsdbg_show_hba_show, inode->i_private);
+}
+
+static const struct file_operations ufsdbg_show_hba_fops = {
+	.open		= ufsdbg_show_hba_open,
+	.read		= seq_read,
+};
+
+static int ufsdbg_dump_device_desc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file,
+			   ufsdbg_dump_device_desc_show, inode->i_private);
+}
+
+static const struct file_operations ufsdbg_dump_device_desc = {
+	.open		= ufsdbg_dump_device_desc_open,
+	.read		= seq_read,
+};
+
+static int ufsdbg_power_mode_show(struct seq_file *file, void *data)
+{
+	struct ufs_hba *hba = (struct ufs_hba *)file->private;
+	char *names[] = {
+		"INVALID MODE",
+		"FAST MODE",
+		"SLOW MODE",
+		"INVALID MODE",
+		"FASTAUTO MODE",
+		"SLOWAUTO MODE",
+		"INVALID MODE",
+	};
+
+	/* Print current status */
+	seq_puts(file, "UFS current power mode [RX, TX]:");
+	seq_printf(file, "gear=[%d,%d], lane=[%d,%d], pwr=[%s,%s], rate = %c",
+		 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
+		 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
+		 names[hba->pwr_info.pwr_rx],
+		 names[hba->pwr_info.pwr_tx],
+		 hba->pwr_info.hs_rate == PA_HS_MODE_B ? 'B' : 'A');
+	seq_puts(file, "\n\n");
+
+	/* Print usage */
+	seq_puts(file,
+		"To change power mode write 'GGLLMM' where:\n"
+		"G - selected gear\n"
+		"L - number of lanes\n"
+		"M - power mode:\n"
+		"\t1 = fast mode\n"
+		"\t2 = slow mode\n"
+		"\t4 = fast-auto mode\n"
+		"\t5 = slow-auto mode\n"
+		"first letter is for RX, second letter is for TX.\n\n");
+
+	return 0;
+}
+
+static bool ufsdbg_power_mode_validate(struct ufs_pa_layer_attr *pwr_mode)
+{
+	if (pwr_mode->gear_rx < UFS_PWM_G1 || pwr_mode->gear_rx > UFS_PWM_G7 ||
+	    pwr_mode->gear_tx < UFS_PWM_G1 || pwr_mode->gear_tx > UFS_PWM_G7 ||
+	    pwr_mode->lane_rx < 1 || pwr_mode->lane_rx > 2 ||
+	    pwr_mode->lane_tx < 1 || pwr_mode->lane_tx > 2 ||
+	    (pwr_mode->pwr_rx != FAST_MODE && pwr_mode->pwr_rx != SLOW_MODE &&
+	     pwr_mode->pwr_rx != FASTAUTO_MODE &&
+	     pwr_mode->pwr_rx != SLOWAUTO_MODE) ||
+	    (pwr_mode->pwr_tx != FAST_MODE && pwr_mode->pwr_tx != SLOW_MODE &&
+	     pwr_mode->pwr_tx != FASTAUTO_MODE &&
+	     pwr_mode->pwr_tx != SLOWAUTO_MODE)) {
+		pr_err("%s: power parameters are not valid\n", __func__);
+		return false;
+	}
+
+	return true;
+}
+
+static int ufsdbg_cfg_pwr_param(struct ufs_hba *hba,
+				struct ufs_pa_layer_attr *new_pwr,
+				struct ufs_pa_layer_attr *final_pwr)
+{
+	int ret = 0;
+	bool is_dev_sup_hs = false;
+	bool is_new_pwr_hs = false;
+	int dev_pwm_max_rx_gear;
+	int dev_pwm_max_tx_gear;
+
+	if (!hba->max_pwr_info.is_valid) {
+		dev_err(hba->dev, "%s: device max power is not valid. can't configure power\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (hba->max_pwr_info.info.pwr_rx == FAST_MODE)
+		is_dev_sup_hs = true;
+
+	if (new_pwr->pwr_rx == FAST_MODE || new_pwr->pwr_rx == FASTAUTO_MODE)
+		is_new_pwr_hs = true;
+
+	final_pwr->lane_rx = hba->max_pwr_info.info.lane_rx;
+	final_pwr->lane_tx = hba->max_pwr_info.info.lane_tx;
+
+	/* device doesn't support HS but requested power is HS */
+	if (!is_dev_sup_hs && is_new_pwr_hs) {
+		pr_err("%s: device doesn't support HS. requested power is HS\n",
+			__func__);
+		return -ENOTSUPP;
+	} else if ((is_dev_sup_hs && is_new_pwr_hs) ||
+		   (!is_dev_sup_hs && !is_new_pwr_hs)) {
+		/*
+		 * If device and requested power mode are both HS or both PWM
+		 * then dev_max->gear_xx are the gears to be assign to
+		 * final_pwr->gear_xx
+		 */
+		final_pwr->gear_rx = hba->max_pwr_info.info.gear_rx;
+		final_pwr->gear_tx = hba->max_pwr_info.info.gear_tx;
+	} else if (is_dev_sup_hs && !is_new_pwr_hs) {
+		/*
+		 * If device supports HS but requested power is PWM, then we
+		 * need to find out what is the max gear in PWM the device
+		 * supports
+		 */
+
+		ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
+			       &dev_pwm_max_rx_gear);
+
+		if (!dev_pwm_max_rx_gear) {
+			pr_err("%s: couldn't get device max pwm rx gear\n",
+				__func__);
+			ret = -EINVAL;
+			goto out;
+		}
+
+		ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
+				    &dev_pwm_max_tx_gear);
+
+		if (!dev_pwm_max_tx_gear) {
+			pr_err("%s: couldn't get device max pwm tx gear\n",
+				__func__);
+			ret = -EINVAL;
+			goto out;
+		}
+
+		final_pwr->gear_rx = dev_pwm_max_rx_gear;
+		final_pwr->gear_tx = dev_pwm_max_tx_gear;
+	}
+
+	if ((new_pwr->gear_rx > final_pwr->gear_rx) ||
+	    (new_pwr->gear_tx > final_pwr->gear_tx) ||
+	    (new_pwr->lane_rx > final_pwr->lane_rx) ||
+	    (new_pwr->lane_tx > final_pwr->lane_tx)) {
+		pr_err("%s: (RX,TX) GG,LL: in PWM/HS new pwr [%d%d,%d%d] exceeds device limitation [%d%d,%d%d]\n",
+			__func__,
+			new_pwr->gear_rx, new_pwr->gear_tx,
+			new_pwr->lane_rx, new_pwr->lane_tx,
+			final_pwr->gear_rx, final_pwr->gear_tx,
+			final_pwr->lane_rx, final_pwr->lane_tx);
+		return -ENOTSUPP;
+	}
+
+	final_pwr->gear_rx = new_pwr->gear_rx;
+	final_pwr->gear_tx = new_pwr->gear_tx;
+	final_pwr->lane_rx = new_pwr->lane_rx;
+	final_pwr->lane_tx = new_pwr->lane_tx;
+	final_pwr->pwr_rx = new_pwr->pwr_rx;
+	final_pwr->pwr_tx = new_pwr->pwr_tx;
+	final_pwr->hs_rate = new_pwr->hs_rate;
+
+out:
+	return ret;
+}
+
+static int ufsdbg_config_pwr_mode(struct ufs_hba *hba,
+		struct ufs_pa_layer_attr *desired_pwr_mode)
+{
+	int ret;
+
+	pm_runtime_get_sync(hba->dev);
+	ufshcd_scsi_block_requests(hba);
+	ret = ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US);
+	if (!ret)
+		ret = ufshcd_change_power_mode(hba, desired_pwr_mode);
+	ufshcd_scsi_unblock_requests(hba);
+	pm_runtime_put_sync(hba->dev);
+
+	return ret;
+}
+
+static ssize_t ufsdbg_power_mode_write(struct file *file,
+				const char __user *ubuf, size_t cnt,
+				loff_t *ppos)
+{
+	struct ufs_hba *hba = file->f_mapping->host->i_private;
+	struct ufs_pa_layer_attr pwr_mode;
+	struct ufs_pa_layer_attr final_pwr_mode;
+	char pwr_mode_str[BUFF_LINE_SIZE] = {0};
+	loff_t buff_pos = 0;
+	int ret;
+	int idx = 0;
+
+	ret = simple_write_to_buffer(pwr_mode_str, BUFF_LINE_SIZE,
+		&buff_pos, ubuf, cnt);
+
+	pwr_mode.gear_rx = pwr_mode_str[idx++] - '0';
+	pwr_mode.gear_tx = pwr_mode_str[idx++] - '0';
+	pwr_mode.lane_rx = pwr_mode_str[idx++] - '0';
+	pwr_mode.lane_tx = pwr_mode_str[idx++] - '0';
+	pwr_mode.pwr_rx = pwr_mode_str[idx++] - '0';
+	pwr_mode.pwr_tx = pwr_mode_str[idx++] - '0';
+
+	/*
+	 * Switching between rates is not currently supported so use the
+	 * current rate.
+	 * TODO: add rate switching if and when it is supported in the future
+	 */
+	pwr_mode.hs_rate = hba->pwr_info.hs_rate;
+
+	/* Validate user input */
+	if (!ufsdbg_power_mode_validate(&pwr_mode))
+		return -EINVAL;
+
+	pr_debug("%s: new power mode requested [RX,TX]: Gear=[%d,%d], Lane=[%d,%d], Mode=[%d,%d]\n",
+		__func__,
+		pwr_mode.gear_rx, pwr_mode.gear_tx, pwr_mode.lane_rx,
+		pwr_mode.lane_tx, pwr_mode.pwr_rx, pwr_mode.pwr_tx);
+
+	ret = ufsdbg_cfg_pwr_param(hba, &pwr_mode, &final_pwr_mode);
+	if (ret) {
+		dev_err(hba->dev,
+			"%s: failed to configure new power parameters, ret = %d\n",
+			__func__, ret);
+		return cnt;
+	}
+
+	ret = ufsdbg_config_pwr_mode(hba, &final_pwr_mode);
+	if (ret == -EBUSY)
+		dev_err(hba->dev,
+			"%s: ufshcd_config_pwr_mode failed: system is busy, try again\n",
+			__func__);
+	else if (ret)
+		dev_err(hba->dev,
+			"%s: ufshcd_config_pwr_mode failed, ret=%d\n",
+			__func__, ret);
+
+	return cnt;
+}
+
+static int ufsdbg_power_mode_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ufsdbg_power_mode_show, inode->i_private);
+}
+
+static const struct file_operations ufsdbg_power_mode_desc = {
+	.open		= ufsdbg_power_mode_open,
+	.read		= seq_read,
+	.write		= ufsdbg_power_mode_write,
+};
+
+static int ufsdbg_dme_read(void *data, u64 *attr_val, bool peer)
+{
+	int ret;
+	struct ufs_hba *hba = data;
+	u32 attr_id, read_val = 0;
+	int (*read_func)(struct ufs_hba *, u32, u32 *);
+	u32 attr_sel;
+
+	if (!hba)
+		return -EINVAL;
+
+	read_func = peer ? ufshcd_dme_peer_get : ufshcd_dme_get;
+	attr_id = peer ? hba->debugfs_files.dme_peer_attr_id :
+			 hba->debugfs_files.dme_local_attr_id;
+	pm_runtime_get_sync(hba->dev);
+	ufshcd_scsi_block_requests(hba);
+	ret = ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US);
+	if (!ret) {
+		if ((attr_id >= MPHY_RX_ATTR_ADDR_START)
+		    && (attr_id <= MPHY_RX_ATTR_ADDR_END))
+			attr_sel = UIC_ARG_MIB_SEL(attr_id,
+					UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0));
+		else
+			attr_sel = UIC_ARG_MIB(attr_id);
+
+		ret = read_func(hba, attr_sel, &read_val);
+	}
+	ufshcd_scsi_unblock_requests(hba);
+	pm_runtime_put_sync(hba->dev);
+
+	if (!ret)
+		*attr_val = (u64)read_val;
+
+	return ret;
+}
+
+static int ufsdbg_dme_local_set_attr_id(void *data, u64 attr_id)
+{
+	struct ufs_hba *hba = data;
+
+	if (!hba)
+		return -EINVAL;
+
+	hba->debugfs_files.dme_local_attr_id = (u32)attr_id;
+
+	return 0;
+}
+
+static int ufsdbg_dme_local_read(void *data, u64 *attr_val)
+{
+	return ufsdbg_dme_read(data, attr_val, false);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ufsdbg_dme_local_read_ops,
+			ufsdbg_dme_local_read,
+			ufsdbg_dme_local_set_attr_id,
+			"%llu\n");
+
+static int ufsdbg_dme_peer_read(void *data, u64 *attr_val)
+{
+	struct ufs_hba *hba = data;
+
+	if (!hba)
+		return -EINVAL;
+	else
+		return ufsdbg_dme_read(data, attr_val, true);
+}
+
+static int ufsdbg_dme_peer_set_attr_id(void *data, u64 attr_id)
+{
+	struct ufs_hba *hba = data;
+
+	if (!hba)
+		return -EINVAL;
+
+	hba->debugfs_files.dme_peer_attr_id = (u32)attr_id;
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ufsdbg_dme_peer_read_ops,
+			ufsdbg_dme_peer_read,
+			ufsdbg_dme_peer_set_attr_id,
+			"%llu\n");
+
+static int ufsdbg_dbg_print_en_read(void *data, u64 *attr_val)
+{
+	struct ufs_hba *hba = data;
+
+	if (!hba)
+		return -EINVAL;
+
+	*attr_val = (u64)hba->ufshcd_dbg_print;
+	return 0;
+}
+
+static int ufsdbg_dbg_print_en_set(void *data, u64 attr_id)
+{
+	struct ufs_hba *hba = data;
+
+	if (!hba)
+		return -EINVAL;
+
+	if (attr_id & ~UFSHCD_DBG_PRINT_ALL)
+		return -EINVAL;
+
+	hba->ufshcd_dbg_print = (u32)attr_id;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ufsdbg_dbg_print_en_ops,
+			ufsdbg_dbg_print_en_read,
+			ufsdbg_dbg_print_en_set,
+			"%llu\n");
+
+static ssize_t ufsdbg_req_stats_write(struct file *filp,
+		const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+	struct ufs_hba *hba = filp->f_mapping->host->i_private;
+	int val;
+	int ret;
+	unsigned long flags;
+
+	ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
+	if (ret) {
+		dev_err(hba->dev, "%s: Invalid argument\n", __func__);
+		return ret;
+	}
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	ufshcd_init_req_stats(hba);
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	return cnt;
+}
+
+static int ufsdbg_req_stats_show(struct seq_file *file, void *data)
+{
+	struct ufs_hba *hba = (struct ufs_hba *)file->private;
+	int i;
+	unsigned long flags;
+
+	/* Header */
+	seq_printf(file, "\t%-10s %-10s %-10s %-10s %-10s %-10s",
+		"All", "Write", "Read", "Read(urg)", "Write(urg)", "Flush");
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+
+	seq_printf(file, "\n%s:\t", "Min");
+	for (i = 0; i < TS_NUM_STATS; i++)
+		seq_printf(file, "%-10llu ", hba->ufs_stats.req_stats[i].min);
+	seq_printf(file, "\n%s:\t", "Max");
+	for (i = 0; i < TS_NUM_STATS; i++)
+		seq_printf(file, "%-10llu ", hba->ufs_stats.req_stats[i].max);
+	seq_printf(file, "\n%s:\t", "Avg.");
+	for (i = 0; i < TS_NUM_STATS; i++)
+		seq_printf(file, "%-10llu ",
+			div64_u64(hba->ufs_stats.req_stats[i].sum,
+				hba->ufs_stats.req_stats[i].count));
+	seq_printf(file, "\n%s:\t", "Count");
+	for (i = 0; i < TS_NUM_STATS; i++)
+		seq_printf(file, "%-10llu ", hba->ufs_stats.req_stats[i].count);
+	seq_puts(file, "\n");
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	return 0;
+}
+
+static int ufsdbg_req_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ufsdbg_req_stats_show, inode->i_private);
+}
+
+static const struct file_operations ufsdbg_req_stats_desc = {
+	.open		= ufsdbg_req_stats_open,
+	.read		= seq_read,
+	.write		= ufsdbg_req_stats_write,
+};
+
+
+static int ufsdbg_reset_controller_show(struct seq_file *file, void *data)
+{
+	seq_puts(file, "echo 1 > /sys/kernel/debug/.../reset_controller\n");
+	seq_puts(file, "resets the UFS controller and restores its operational state\n\n");
+
+	return 0;
+}
+
+static int ufsdbg_reset_controller_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ufsdbg_reset_controller_show,
+						inode->i_private);
+}
+
+static ssize_t ufsdbg_reset_controller_write(struct file *filp,
+		const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+	struct ufs_hba *hba = filp->f_mapping->host->i_private;
+	unsigned long flags;
+
+	pm_runtime_get_sync(hba->dev);
+	ufshcd_hold(hba, false);
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	/*
+	 * simulating a dummy error in order to "convince"
+	 * eh_work to actually reset the controller
+	 */
+	hba->saved_err |= INT_FATAL_ERRORS;
+	hba->silence_err_logs = true;
+	schedule_work(&hba->eh_work);
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	flush_work(&hba->eh_work);
+
+	ufshcd_release(hba, false);
+	pm_runtime_put_sync(hba->dev);
+
+	return cnt;
+}
+
+static const struct file_operations ufsdbg_reset_controller = {
+	.open		= ufsdbg_reset_controller_open,
+	.read		= seq_read,
+	.write		= ufsdbg_reset_controller_write,
+};
+
+static int ufsdbg_clear_err_state(void *data, u64 val)
+{
+	struct ufs_hba *hba = data;
+
+	if (!hba)
+		return -EINVAL;
+
+	/* clear the error state on any write attempt */
+	hba->debugfs_files.err_occurred = false;
+
+	return 0;
+}
+
+static int ufsdbg_read_err_state(void *data, u64 *val)
+{
+	struct ufs_hba *hba = data;
+
+	if (!hba)
+		return -EINVAL;
+
+	*val = hba->debugfs_files.err_occurred ? 1 : 0;
+
+	return 0;
+}
+
+void ufsdbg_set_err_state(struct ufs_hba *hba)
+{
+	hba->debugfs_files.err_occurred = true;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ufsdbg_err_state,
+			ufsdbg_read_err_state,
+			ufsdbg_clear_err_state,
+			"%llu\n");
+
+void ufsdbg_add_debugfs(struct ufs_hba *hba)
+{
+	char root_name[sizeof("ufshcd00")];
+
+	if (!hba) {
+		pr_err("%s: NULL hba, exiting", __func__);
+		return;
+	}
+
+	snprintf(root_name, ARRAY_SIZE(root_name), "%s%d", UFSHCD,
+		hba->host->host_no);
+
+	hba->debugfs_files.debugfs_root = debugfs_create_dir(root_name, NULL);
+	if (IS_ERR(hba->debugfs_files.debugfs_root))
+		/* Don't complain -- debugfs just isn't enabled */
+		goto err_no_root;
+	if (!hba->debugfs_files.debugfs_root) {
+		/*
+		 * Complain -- debugfs is enabled, but it failed to
+		 * create the directory
+		 */
+		dev_err(hba->dev,
+			"%s: NULL debugfs root directory, exiting", __func__);
+		goto err_no_root;
+	}
+
+	hba->debugfs_files.stats_folder = debugfs_create_dir("stats",
+					hba->debugfs_files.debugfs_root);
+	if (!hba->debugfs_files.stats_folder) {
+		dev_err(hba->dev,
+			"%s: NULL stats_folder, exiting", __func__);
+		goto err;
+	}
+
+	hba->debugfs_files.tag_stats =
+		debugfs_create_file("tag_stats", S_IRUSR | S_IWUSR,
+					   hba->debugfs_files.stats_folder, hba,
+					   &ufsdbg_tag_stats_fops);
+	if (!hba->debugfs_files.tag_stats) {
+		dev_err(hba->dev, "%s:  NULL tag_stats file, exiting",
+			__func__);
+		goto err;
+	}
+
+	hba->debugfs_files.query_stats =
+		debugfs_create_file("query_stats", S_IRUSR | S_IWUSR,
+					   hba->debugfs_files.stats_folder, hba,
+					   &ufsdbg_query_stats_fops);
+	if (!hba->debugfs_files.query_stats) {
+		dev_err(hba->dev, "%s:  NULL query_stats file, exiting",
+			__func__);
+		goto err;
+	}
+
+	hba->debugfs_files.err_stats =
+		debugfs_create_file("err_stats", S_IRUSR | S_IWUSR,
+					   hba->debugfs_files.stats_folder, hba,
+					   &ufsdbg_err_stats_fops);
+	if (!hba->debugfs_files.err_stats) {
+		dev_err(hba->dev, "%s:  NULL err_stats file, exiting",
+			__func__);
+		goto err;
+	}
+
+	if (ufshcd_init_statistics(hba)) {
+		dev_err(hba->dev, "%s: Error initializing statistics",
+			__func__);
+		goto err;
+	}
+
+	hba->debugfs_files.host_regs = debugfs_create_file("host_regs", S_IRUSR,
+				hba->debugfs_files.debugfs_root, hba,
+				&ufsdbg_host_regs_fops);
+	if (!hba->debugfs_files.host_regs) {
+		dev_err(hba->dev, "%s:  NULL hcd regs file, exiting", __func__);
+		goto err;
+	}
+
+	hba->debugfs_files.show_hba = debugfs_create_file("show_hba", S_IRUSR,
+				hba->debugfs_files.debugfs_root, hba,
+				&ufsdbg_show_hba_fops);
+	if (!hba->debugfs_files.show_hba) {
+		dev_err(hba->dev, "%s:  NULL hba file, exiting", __func__);
+		goto err;
+	}
+
+	hba->debugfs_files.dump_dev_desc =
+		debugfs_create_file("dump_device_desc", S_IRUSR,
+				    hba->debugfs_files.debugfs_root, hba,
+				    &ufsdbg_dump_device_desc);
+	if (!hba->debugfs_files.dump_dev_desc) {
+		dev_err(hba->dev,
+			"%s:  NULL dump_device_desc file, exiting", __func__);
+		goto err;
+	}
+
+	hba->debugfs_files.power_mode =
+		debugfs_create_file("power_mode", S_IRUSR | S_IWUSR,
+				    hba->debugfs_files.debugfs_root, hba,
+				    &ufsdbg_power_mode_desc);
+	if (!hba->debugfs_files.power_mode) {
+		dev_err(hba->dev,
+			"%s:  NULL power_mode_desc file, exiting", __func__);
+		goto err;
+	}
+
+	hba->debugfs_files.dme_local_read =
+		debugfs_create_file("dme_local_read", S_IRUSR | S_IWUSR,
+				    hba->debugfs_files.debugfs_root, hba,
+				    &ufsdbg_dme_local_read_ops);
+	if (!hba->debugfs_files.dme_local_read) {
+		dev_err(hba->dev,
+			"%s:  failed create dme_local_read debugfs entry\n",
+			__func__);
+		goto err;
+	}
+
+	hba->debugfs_files.dme_peer_read =
+		debugfs_create_file("dme_peer_read", S_IRUSR | S_IWUSR,
+				    hba->debugfs_files.debugfs_root, hba,
+				    &ufsdbg_dme_peer_read_ops);
+	if (!hba->debugfs_files.dme_peer_read) {
+		dev_err(hba->dev,
+			"%s:  failed create dme_peer_read debugfs entry\n",
+			__func__);
+		goto err;
+	}
+
+	hba->debugfs_files.dbg_print_en =
+		debugfs_create_file("dbg_print_en", S_IRUSR | S_IWUSR,
+				    hba->debugfs_files.debugfs_root, hba,
+				    &ufsdbg_dbg_print_en_ops);
+	if (!hba->debugfs_files.dbg_print_en) {
+		dev_err(hba->dev,
+			"%s:  failed create dbg_print_en debugfs entry\n",
+			__func__);
+		goto err;
+	}
+
+	hba->debugfs_files.req_stats =
+		debugfs_create_file("req_stats", S_IRUSR | S_IWUSR,
+			hba->debugfs_files.stats_folder, hba,
+			&ufsdbg_req_stats_desc);
+	if (!hba->debugfs_files.req_stats) {
+		dev_err(hba->dev,
+			"%s:  failed create req_stats debugfs entry\n",
+			__func__);
+		goto err;
+	}
+
+	hba->debugfs_files.reset_controller =
+		debugfs_create_file("reset_controller", S_IRUSR | S_IWUSR,
+			hba->debugfs_files.debugfs_root, hba,
+			&ufsdbg_reset_controller);
+	if (!hba->debugfs_files.reset_controller) {
+		dev_err(hba->dev,
+			"%s: failed create reset_controller debugfs entry",
+				__func__);
+		goto err;
+	}
+
+	hba->debugfs_files.err_state =
+		debugfs_create_file("err_state", S_IRUSR | S_IWUSR,
+			hba->debugfs_files.debugfs_root, hba,
+			&ufsdbg_err_state);
+	if (!hba->debugfs_files.err_state) {
+		dev_err(hba->dev,
+		     "%s: failed create err_state debugfs entry", __func__);
+		goto err;
+	}
+
+	ufsdbg_setup_fault_injection(hba);
+
+	ufshcd_vops_add_debugfs(hba, hba->debugfs_files.debugfs_root);
+
+	return;
+
+err:
+	debugfs_remove_recursive(hba->debugfs_files.debugfs_root);
+	hba->debugfs_files.debugfs_root = NULL;
+err_no_root:
+	dev_err(hba->dev, "%s: failed to initialize debugfs\n", __func__);
+}
+
+void ufsdbg_remove_debugfs(struct ufs_hba *hba)
+{
+	ufshcd_vops_remove_debugfs(hba);
+	debugfs_remove_recursive(hba->debugfs_files.debugfs_root);
+	kfree(hba->ufs_stats.tag_stats);
+
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/scsi/ufs/ufs-debugfs.h	2019-01-22 16:16:26.631274732 +0100
@@ -0,0 +1,69 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * UFS debugfs - add debugfs interface to the ufshcd.
+ * This is currently used for statistics collection and exporting from the
+ * UFS driver.
+ * This infrastructure can be used for debugging or direct tweaking
+ * of the driver from userspace.
+ *
+ */
+
+#ifndef _UFS_DEBUGFS_H
+#define _UFS_DEBUGFS_H
+
+#include <linux/debugfs.h>
+#include "ufshcd.h"
+
+enum ufsdbg_err_inject_scenario {
+	ERR_INJECT_INTR,
+	ERR_INJECT_PWR_CHANGE,
+	ERR_INJECT_UIC,
+	ERR_INJECT_DME_ATTR,
+	ERR_INJECT_QUERY,
+	ERR_INJECT_MAX_ERR_SCENARIOS,
+};
+
+#ifdef CONFIG_DEBUG_FS
+void ufsdbg_add_debugfs(struct ufs_hba *hba);
+void ufsdbg_remove_debugfs(struct ufs_hba *hba);
+void ufsdbg_pr_buf_to_std(struct ufs_hba *hba, int offset, int num_regs,
+				char *str, void *priv);
+void ufsdbg_set_err_state(struct ufs_hba *hba);
+#else
+static inline void ufsdbg_add_debugfs(struct ufs_hba *hba)
+{
+}
+static inline void ufsdbg_remove_debugfs(struct ufs_hba *hba)
+{
+}
+static inline void ufsdbg_pr_buf_to_std(struct ufs_hba *hba, int offset,
+	int num_regs, char *str, void *priv)
+{
+}
+void ufsdbg_set_err_state(struct ufs_hba *hba)
+{
+}
+#endif
+
+#ifdef CONFIG_UFS_FAULT_INJECTION
+void ufsdbg_error_inject_dispatcher(struct ufs_hba *hba,
+			enum ufsdbg_err_inject_scenario err_scenario,
+			int success_value, int *ret_value);
+#else
+static inline void ufsdbg_error_inject_dispatcher(struct ufs_hba *hba,
+			enum ufsdbg_err_inject_scenario err_scenario,
+			int success_value, int *ret_value)
+{
+}
+#endif
+
+#endif /* End of Header */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/scsi/ufs/ufs-qcom-debugfs.c	2019-01-22 16:16:26.631274732 +0100
@@ -0,0 +1,389 @@
+/*
+ * Copyright (c) 2015,2017, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include "ufs-qcom.h"
+#include "ufs-qcom-debugfs.h"
+#include "ufs-debugfs.h"
+
+#define TESTBUS_CFG_BUFF_LINE_SIZE	sizeof("0xXY, 0xXY")
+
+static void ufs_qcom_dbg_remove_debugfs(struct ufs_qcom_host *host);
+
+static int ufs_qcom_dbg_print_en_read(void *data, u64 *attr_val)
+{
+	struct ufs_qcom_host *host = data;
+
+	if (!host)
+		return -EINVAL;
+
+	*attr_val = (u64)host->dbg_print_en;
+	return 0;
+}
+
+static int ufs_qcom_dbg_print_en_set(void *data, u64 attr_id)
+{
+	struct ufs_qcom_host *host = data;
+
+	if (!host)
+		return -EINVAL;
+
+	if (attr_id & ~UFS_QCOM_DBG_PRINT_ALL)
+		return -EINVAL;
+
+	host->dbg_print_en = (u32)attr_id;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ufs_qcom_dbg_print_en_ops,
+			ufs_qcom_dbg_print_en_read,
+			ufs_qcom_dbg_print_en_set,
+			"%llu\n");
+
+static int ufs_qcom_dbg_testbus_en_read(void *data, u64 *attr_val)
+{
+	struct ufs_qcom_host *host = data;
+	bool enabled;
+
+	if (!host)
+		return -EINVAL;
+
+	enabled = !!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN);
+	*attr_val = (u64)enabled;
+	return 0;
+}
+
+static int ufs_qcom_dbg_testbus_en_set(void *data, u64 attr_id)
+{
+	struct ufs_qcom_host *host = data;
+	int ret = 0;
+
+	if (!host)
+		return -EINVAL;
+
+	if (!!attr_id)
+		host->dbg_print_en |= UFS_QCOM_DBG_PRINT_TEST_BUS_EN;
+	else
+		host->dbg_print_en &= ~UFS_QCOM_DBG_PRINT_TEST_BUS_EN;
+
+	pm_runtime_get_sync(host->hba->dev);
+	ufshcd_hold(host->hba, false);
+	ret = ufs_qcom_testbus_config(host);
+	ufshcd_release(host->hba, false);
+	pm_runtime_put_sync(host->hba->dev);
+
+	return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ufs_qcom_dbg_testbus_en_ops,
+			ufs_qcom_dbg_testbus_en_read,
+			ufs_qcom_dbg_testbus_en_set,
+			"%llu\n");
+
+static int ufs_qcom_dbg_testbus_cfg_show(struct seq_file *file, void *data)
+{
+	struct ufs_qcom_host *host = (struct ufs_qcom_host *)file->private;
+
+	seq_printf(file , "Current configuration: major=%d, minor=%d\n\n",
+			host->testbus.select_major, host->testbus.select_minor);
+
+	/* Print usage */
+	seq_puts(file,
+		"To change the test-bus configuration, write 'MAJ,MIN' where:\n"
+		"MAJ - major select\n"
+		"MIN - minor select\n\n");
+	return 0;
+}
+
+static ssize_t ufs_qcom_dbg_testbus_cfg_write(struct file *file,
+				const char __user *ubuf, size_t cnt,
+				loff_t *ppos)
+{
+	struct ufs_qcom_host *host = file->f_mapping->host->i_private;
+	char configuration[TESTBUS_CFG_BUFF_LINE_SIZE] = {'\0'};
+	loff_t buff_pos = 0;
+	char *comma;
+	int ret = 0;
+	int major;
+	int minor;
+	unsigned long flags;
+	struct ufs_hba *hba = host->hba;
+
+
+	ret = simple_write_to_buffer(configuration,
+		TESTBUS_CFG_BUFF_LINE_SIZE - 1,
+		&buff_pos, ubuf, cnt);
+	if (ret < 0) {
+		dev_err(host->hba->dev, "%s: failed to read user data\n",
+			__func__);
+		goto out;
+	}
+	configuration[ret] = '\0';
+
+	comma = strnchr(configuration, TESTBUS_CFG_BUFF_LINE_SIZE, ',');
+	if (!comma || comma == configuration) {
+		dev_err(host->hba->dev,
+			"%s: error in configuration of testbus\n", __func__);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (sscanf(configuration, "%i,%i", &major, &minor) != 2) {
+		dev_err(host->hba->dev,
+			"%s: couldn't parse input to 2 numeric values\n",
+			__func__);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (!ufs_qcom_testbus_cfg_is_ok(host, major, minor)) {
+		ret = -EPERM;
+		goto out;
+	}
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	host->testbus.select_major = (u8)major;
+	host->testbus.select_minor = (u8)minor;
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	/*
+	 * Sanity check of the {major, minor} tuple is done in the
+	 * config function
+	 */
+	pm_runtime_get_sync(host->hba->dev);
+	ufshcd_hold(host->hba, false);
+	ret = ufs_qcom_testbus_config(host);
+	ufshcd_release(host->hba, false);
+	pm_runtime_put_sync(host->hba->dev);
+	if (!ret)
+		dev_dbg(host->hba->dev,
+				"%s: New configuration: major=%d, minor=%d\n",
+				__func__, host->testbus.select_major,
+				host->testbus.select_minor);
+
+out:
+	return ret ? ret : cnt;
+}
+
+static int ufs_qcom_dbg_testbus_cfg_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ufs_qcom_dbg_testbus_cfg_show,
+				inode->i_private);
+}
+
+static const struct file_operations ufs_qcom_dbg_testbus_cfg_desc = {
+	.open		= ufs_qcom_dbg_testbus_cfg_open,
+	.read		= seq_read,
+	.write		= ufs_qcom_dbg_testbus_cfg_write,
+};
+
+static int ufs_qcom_dbg_testbus_bus_read(void *data, u64 *attr_val)
+{
+	struct ufs_qcom_host *host = data;
+
+	if (!host)
+		return -EINVAL;
+
+	pm_runtime_get_sync(host->hba->dev);
+	ufshcd_hold(host->hba, false);
+	*attr_val = (u64)ufshcd_readl(host->hba, UFS_TEST_BUS);
+	ufshcd_release(host->hba, false);
+	pm_runtime_put_sync(host->hba->dev);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ufs_qcom_dbg_testbus_bus_ops,
+			ufs_qcom_dbg_testbus_bus_read,
+			NULL,
+			"%llu\n");
+
+static int ufs_qcom_dbg_dbg_regs_show(struct seq_file *file, void *data)
+{
+	struct ufs_qcom_host *host = (struct ufs_qcom_host *)file->private;
+	bool dbg_print_reg = !!(host->dbg_print_en &
+				UFS_QCOM_DBG_PRINT_REGS_EN);
+
+	pm_runtime_get_sync(host->hba->dev);
+	ufshcd_hold(host->hba, false);
+
+	/* Temporarily override the debug print enable */
+	host->dbg_print_en |= UFS_QCOM_DBG_PRINT_REGS_EN;
+	ufs_qcom_print_hw_debug_reg_all(host->hba, file, ufsdbg_pr_buf_to_std);
+	/* Restore previous debug print enable value */
+	if (!dbg_print_reg)
+		host->dbg_print_en &= ~UFS_QCOM_DBG_PRINT_REGS_EN;
+
+	ufshcd_release(host->hba, false);
+	pm_runtime_put_sync(host->hba->dev);
+
+	return 0;
+}
+
+static int ufs_qcom_dbg_dbg_regs_open(struct inode *inode,
+					      struct file *file)
+{
+	return single_open(file, ufs_qcom_dbg_dbg_regs_show,
+				inode->i_private);
+}
+
+static const struct file_operations ufs_qcom_dbg_dbg_regs_desc = {
+	.open		= ufs_qcom_dbg_dbg_regs_open,
+	.read		= seq_read,
+};
+
+static int ufs_qcom_dbg_pm_qos_show(struct seq_file *file, void *data)
+{
+	struct ufs_qcom_host *host = (struct ufs_qcom_host *)file->private;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(host->hba->host->host_lock, flags);
+
+	seq_printf(file, "enabled: %d\n", host->pm_qos.is_enabled);
+	for (i = 0; i < host->pm_qos.num_groups && host->pm_qos.groups; i++)
+		seq_printf(file,
+			"CPU Group #%d(mask=0x%lx): active_reqs=%d, state=%d, latency=%d\n",
+			i, host->pm_qos.groups[i].mask.bits[0],
+			host->pm_qos.groups[i].active_reqs,
+			host->pm_qos.groups[i].state,
+			host->pm_qos.groups[i].latency_us);
+
+	spin_unlock_irqrestore(host->hba->host->host_lock, flags);
+
+	return 0;
+}
+
+static int ufs_qcom_dbg_pm_qos_open(struct inode *inode,
+					      struct file *file)
+{
+	return single_open(file, ufs_qcom_dbg_pm_qos_show, inode->i_private);
+}
+
+static const struct file_operations ufs_qcom_dbg_pm_qos_desc = {
+	.open		= ufs_qcom_dbg_pm_qos_open,
+	.read		= seq_read,
+};
+
+void ufs_qcom_dbg_add_debugfs(struct ufs_hba *hba, struct dentry *root)
+{
+	struct ufs_qcom_host *host;
+
+	if (!hba || !hba->priv) {
+		pr_err("%s: NULL host, exiting\n", __func__);
+		return;
+	}
+
+	host = hba->priv;
+	host->debugfs_files.debugfs_root = debugfs_create_dir("qcom", root);
+	if (IS_ERR(host->debugfs_files.debugfs_root))
+		/* Don't complain -- debugfs just isn't enabled */
+		goto err_no_root;
+	if (!host->debugfs_files.debugfs_root) {
+		/*
+		 * Complain -- debugfs is enabled, but it failed to
+		 * create the directory
+		 */
+		dev_err(host->hba->dev,
+			"%s: NULL debugfs root directory, exiting", __func__);
+		goto err_no_root;
+	}
+
+	host->debugfs_files.dbg_print_en =
+		debugfs_create_file("dbg_print_en", S_IRUSR | S_IWUSR,
+				    host->debugfs_files.debugfs_root, host,
+				    &ufs_qcom_dbg_print_en_ops);
+	if (!host->debugfs_files.dbg_print_en) {
+		dev_err(host->hba->dev,
+			"%s: failed to create dbg_print_en debugfs entry\n",
+			__func__);
+		goto err;
+	}
+
+	host->debugfs_files.testbus = debugfs_create_dir("testbus",
+					host->debugfs_files.debugfs_root);
+	if (!host->debugfs_files.testbus) {
+		dev_err(host->hba->dev,
+			"%s: failed create testbus directory\n",
+			__func__);
+		goto err;
+	}
+
+	host->debugfs_files.testbus_en =
+		debugfs_create_file("enable", S_IRUSR | S_IWUSR,
+				    host->debugfs_files.testbus, host,
+				    &ufs_qcom_dbg_testbus_en_ops);
+	if (!host->debugfs_files.testbus_en) {
+		dev_err(host->hba->dev,
+			"%s: failed create testbus_en debugfs entry\n",
+			__func__);
+		goto err;
+	}
+
+	host->debugfs_files.testbus_cfg =
+		debugfs_create_file("configuration", S_IRUSR | S_IWUSR,
+				    host->debugfs_files.testbus, host,
+				    &ufs_qcom_dbg_testbus_cfg_desc);
+	if (!host->debugfs_files.testbus_cfg) {
+		dev_err(host->hba->dev,
+			"%s: failed create testbus_cfg debugfs entry\n",
+			__func__);
+		goto err;
+	}
+
+	host->debugfs_files.testbus_bus =
+		debugfs_create_file("TEST_BUS", S_IRUSR,
+				    host->debugfs_files.testbus, host,
+				    &ufs_qcom_dbg_testbus_bus_ops);
+	if (!host->debugfs_files.testbus_bus) {
+		dev_err(host->hba->dev,
+			"%s: failed create testbus_bus debugfs entry\n",
+			__func__);
+		goto err;
+	}
+
+	host->debugfs_files.dbg_regs =
+		debugfs_create_file("debug-regs", S_IRUSR,
+				    host->debugfs_files.debugfs_root, host,
+				    &ufs_qcom_dbg_dbg_regs_desc);
+	if (!host->debugfs_files.dbg_regs) {
+		dev_err(host->hba->dev,
+			"%s: failed create dbg_regs debugfs entry\n",
+			__func__);
+		goto err;
+	}
+
+	host->debugfs_files.pm_qos =
+		debugfs_create_file("pm_qos", S_IRUSR,
+				host->debugfs_files.debugfs_root, host,
+				&ufs_qcom_dbg_pm_qos_desc);
+		if (!host->debugfs_files.dbg_regs) {
+			dev_err(host->hba->dev,
+				"%s: failed create dbg_regs debugfs entry\n",
+				__func__);
+			goto err;
+		}
+
+	return;
+
+err:
+	ufs_qcom_dbg_remove_debugfs(host);
+err_no_root:
+	dev_err(host->hba->dev, "%s: failed to initialize debugfs\n", __func__);
+}
+
+static void ufs_qcom_dbg_remove_debugfs(struct ufs_qcom_host *host)
+{
+	debugfs_remove_recursive(host->debugfs_files.debugfs_root);
+	host->debugfs_files.debugfs_root = NULL;
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/scsi/ufs/ufs-qcom-debugfs.h	2019-01-22 16:16:26.631274732 +0100
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2015, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef QCOM_DEBUGFS_H_
+#define QCOM_DEBUGFS_H_
+
+#include "ufshcd.h"
+
+#ifdef CONFIG_DEBUG_FS
+void ufs_qcom_dbg_add_debugfs(struct ufs_hba *hba, struct dentry *root);
+#endif
+
+#endif /* End of Header */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/scsi/ufs/ufs-qcom-ice.h	2019-01-22 16:16:26.631274732 +0100
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UFS_QCOM_ICE_H_
+#define _UFS_QCOM_ICE_H_
+
+#include <scsi/scsi_cmnd.h>
+
+#include "ufs-qcom.h"
+
+/*
+ * UFS host controller ICE registers. There are n [0..31]
+ * of each of these registers
+ */
+enum {
+	REG_UFS_QCOM_ICE_CFG		         = 0x2200,
+	REG_UFS_QCOM_ICE_CTRL_INFO_1_n           = 0x2204,
+	REG_UFS_QCOM_ICE_CTRL_INFO_2_n           = 0x2208,
+	REG_UFS_QCOM_ICE_CTRL_INFO_3_n           = 0x220C,
+};
+#define NUM_QCOM_ICE_CTRL_INFO_n_REGS		32
+
+/* UFS QCOM ICE CTRL Info register offset */
+enum {
+	OFFSET_UFS_QCOM_ICE_CTRL_INFO_BYPASS     = 0,
+	OFFSET_UFS_QCOM_ICE_CTRL_INFO_KEY_INDEX  = 0x1,
+	OFFSET_UFS_QCOM_ICE_CTRL_INFO_CDU        = 0x6,
+};
+
+/* UFS QCOM ICE CTRL Info register masks */
+enum {
+	MASK_UFS_QCOM_ICE_CTRL_INFO_BYPASS     = 0x1,
+	MASK_UFS_QCOM_ICE_CTRL_INFO_KEY_INDEX  = 0x1F,
+	MASK_UFS_QCOM_ICE_CTRL_INFO_CDU        = 0x8,
+};
+
+/* UFS QCOM ICE encryption/decryption bypass state */
+enum {
+	UFS_QCOM_ICE_DISABLE_BYPASS  = 0,
+	UFS_QCOM_ICE_ENABLE_BYPASS = 1,
+};
+
+/* UFS QCOM ICE Crypto Data Unit of target DUN of Transfer Request */
+enum {
+	UFS_QCOM_ICE_TR_DATA_UNIT_512_B          = 0,
+	UFS_QCOM_ICE_TR_DATA_UNIT_1_KB           = 1,
+	UFS_QCOM_ICE_TR_DATA_UNIT_2_KB           = 2,
+	UFS_QCOM_ICE_TR_DATA_UNIT_4_KB           = 3,
+	UFS_QCOM_ICE_TR_DATA_UNIT_8_KB           = 4,
+	UFS_QCOM_ICE_TR_DATA_UNIT_16_KB          = 5,
+	UFS_QCOM_ICE_TR_DATA_UNIT_32_KB          = 6,
+};
+
+/* UFS QCOM ICE internal state */
+enum {
+	UFS_QCOM_ICE_STATE_DISABLED   = 0,
+	UFS_QCOM_ICE_STATE_ACTIVE     = 1,
+	UFS_QCOM_ICE_STATE_SUSPENDED  = 2,
+};
+
+#ifdef CONFIG_SCSI_UFS_QCOM_ICE
+int ufs_qcom_ice_get_dev(struct ufs_qcom_host *qcom_host);
+int ufs_qcom_ice_init(struct ufs_qcom_host *qcom_host);
+int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host,
+			   struct scsi_cmnd *cmd, u8 *cc_index, bool *enable);
+int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
+		struct scsi_cmnd *cmd);
+int ufs_qcom_ice_cfg_end(struct ufs_qcom_host *qcom_host,
+		struct request *req);
+int ufs_qcom_ice_reset(struct ufs_qcom_host *qcom_host);
+int ufs_qcom_ice_resume(struct ufs_qcom_host *qcom_host);
+int ufs_qcom_ice_suspend(struct ufs_qcom_host *qcom_host);
+int ufs_qcom_ice_get_status(struct ufs_qcom_host *qcom_host, int *ice_status);
+void ufs_qcom_ice_print_regs(struct ufs_qcom_host *qcom_host);
+#else
+inline int ufs_qcom_ice_get_dev(struct ufs_qcom_host *qcom_host)
+{
+	if (qcom_host) {
+		qcom_host->ice.pdev = NULL;
+		qcom_host->ice.vops = NULL;
+	}
+	return -ENODEV;
+}
+inline int ufs_qcom_ice_init(struct ufs_qcom_host *qcom_host)
+{
+	return 0;
+}
+inline int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
+					struct scsi_cmnd *cmd)
+{
+	return 0;
+}
+inline int ufs_qcom_ice_cfg_end(struct ufs_qcom_host *qcom_host,
+					struct request *req)
+{
+	return 0;
+}
+inline int ufs_qcom_ice_reset(struct ufs_qcom_host *qcom_host)
+{
+	return 0;
+}
+inline int ufs_qcom_ice_resume(struct ufs_qcom_host *qcom_host)
+{
+	return 0;
+}
+inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *qcom_host)
+{
+	return 0;
+}
+inline int ufs_qcom_ice_get_status(struct ufs_qcom_host *qcom_host,
+				   int *ice_status)
+{
+	return 0;
+}
+inline void ufs_qcom_ice_print_regs(struct ufs_qcom_host *qcom_host)
+{
+	return;
+}
+#endif /* CONFIG_SCSI_UFS_QCOM_ICE */
+
+#endif /* UFS_QCOM_ICE_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/scsi/ufs/ufs_quirks.c	2019-01-22 16:16:26.631274732 +0100
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "ufshcd.h"
+#include "ufs_quirks.h"
+
+
+static struct ufs_card_fix ufs_fixups[] = {
+	/* UFS cards deviations table */
+	UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
+	UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+		UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
+	UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+		UFS_DEVICE_NO_FASTAUTO),
+	UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
+		UFS_DEVICE_QUIRK_PA_TACTIVATE),
+	UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
+		UFS_DEVICE_QUIRK_PA_TACTIVATE),
+	UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+		UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
+	UFS_FIX(UFS_VENDOR_HYNIX, UFS_ANY_MODEL,
+		UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
+	UFS_FIX(UFS_VENDOR_HYNIX, "hB8aL1",
+		UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+	UFS_FIX(UFS_VENDOR_HYNIX, "hC8aL1",
+		UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+	UFS_FIX(UFS_VENDOR_HYNIX, "hD8aL1",
+		UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+	UFS_FIX(UFS_VENDOR_HYNIX, "hC8aM1",
+		UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+	UFS_FIX(UFS_VENDOR_HYNIX, "h08aM1",
+		UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+	UFS_FIX(UFS_VENDOR_HYNIX, "hC8GL1",
+		UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+	UFS_FIX(UFS_VENDOR_HYNIX, "hC8HL1",
+		UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+
+	END_FIX
+};
+
+static int ufs_get_device_info(struct ufs_hba *hba,
+				struct ufs_card_info *card_data)
+{
+	int err;
+	u8 model_index;
+	u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1];
+	u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
+
+	err = ufshcd_read_device_desc(hba, desc_buf,
+					QUERY_DESC_DEVICE_MAX_SIZE);
+	if (err)
+		goto out;
+
+	/*
+	 * getting vendor (manufacturerID) and Bank Index in big endian
+	 * format
+	 */
+	card_data->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
+				     desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
+
+	model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
+
+	memset(str_desc_buf, 0, QUERY_DESC_STRING_MAX_SIZE);
+	err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
+					QUERY_DESC_STRING_MAX_SIZE, ASCII_STD);
+	if (err)
+		goto out;
+
+	str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0';
+	strlcpy(card_data->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
+		min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
+		      MAX_MODEL_LEN));
+	/* Null terminate the model string */
+	card_data->model[MAX_MODEL_LEN] = '\0';
+
+out:
+	return err;
+}
+
+void ufs_advertise_fixup_device(struct ufs_hba *hba)
+{
+	int err;
+	struct ufs_card_fix *f;
+	struct ufs_card_info card_data;
+
+	card_data.wmanufacturerid = 0;
+	card_data.model = kmalloc(MAX_MODEL_LEN + 1, GFP_KERNEL);
+	if (!card_data.model)
+		goto out;
+
+	/* get device data*/
+	err = ufs_get_device_info(hba, &card_data);
+	if (err) {
+		dev_err(hba->dev, "%s: Failed getting device info\n", __func__);
+		goto out;
+	}
+
+	for (f = ufs_fixups; f->quirk; f++) {
+		/* if same wmanufacturerid */
+		if (((f->card.wmanufacturerid == card_data.wmanufacturerid) ||
+		     (f->card.wmanufacturerid == UFS_ANY_VENDOR)) &&
+		    /* and same model */
+		    (STR_PRFX_EQUAL(f->card.model, card_data.model) ||
+		     !strcmp(f->card.model, UFS_ANY_MODEL)))
+			/* update quirks */
+			hba->dev_quirks |= f->quirk;
+	}
+out:
+	kfree(card_data.model);
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/scsi/ufs/ufs_quirks.h	2019-01-22 16:16:26.631274732 +0100
@@ -0,0 +1,152 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UFS_QUIRKS_H_
+#define _UFS_QUIRKS_H_
+
+/* return true if s1 is a prefix of s2 */
+#define STR_PRFX_EQUAL(s1, s2) !strncmp(s1, s2, strlen(s1))
+
+#define UFS_ANY_VENDOR -1
+#define UFS_ANY_MODEL  "ANY_MODEL"
+
+#define MAX_MODEL_LEN 16
+
+#define UFS_VENDOR_TOSHIBA     0x198
+#define UFS_VENDOR_SAMSUNG     0x1CE
+#define UFS_VENDOR_HYNIX       0x1AD
+
+/* UFS TOSHIBA MODELS */
+#define UFS_MODEL_TOSHIBA_32GB "THGLF2G8D4KBADR"
+#define UFS_MODEL_TOSHIBA_64GB "THGLF2G9D8KBADG"
+
+/**
+ * ufs_card_info - ufs device details
+ * @wmanufacturerid: card details
+ * @model: card model
+ */
+struct ufs_card_info {
+	u16 wmanufacturerid;
+	char *model;
+};
+
+/**
+ * ufs_card_fix - ufs device quirk info
+ * @card: ufs card details
+ * @quirk: device quirk
+ */
+struct ufs_card_fix {
+	struct ufs_card_info card;
+	unsigned int quirk;
+};
+
+#define END_FIX { { 0 } , 0 }
+
+/* add specific device quirk */
+#define UFS_FIX(_vendor, _model, _quirk) \
+		{						  \
+				.card.wmanufacturerid = (_vendor),\
+				.card.model = (_model),		  \
+				.quirk = (_quirk),		  \
+		}
+
+/*
+ * If UFS device is having issue in processing LCC (Line Control
+ * Command) coming from UFS host controller then enable this quirk.
+ * When this quirk is enabled, host controller driver should disable
+ * the LCC transmission on UFS host controller (by clearing
+ * TX_LCC_ENABLE attribute of host to 0).
+ */
+#define UFS_DEVICE_QUIRK_BROKEN_LCC (1 << 0)
+
+/*
+ * Some UFS devices don't need VCCQ rail for device operations. Enabling this
+ * quirk for such devices will make sure that VCCQ rail is not voted.
+ */
+#define UFS_DEVICE_NO_VCCQ (1 << 1)
+
+/*
+ * Some vendor's UFS device sends back to back NACs for the DL data frames
+ * causing the host controller to raise the DFES error status. Sometimes
+ * such UFS devices send back to back NAC without waiting for new
+ * retransmitted DL frame from the host and in such cases it might be possible
+ * the Host UniPro goes into bad state without raising the DFES error
+ * interrupt. If this happens then all the pending commands would timeout
+ * only after respective SW command (which is generally too large).
+ *
+ * We can workaround such device behaviour like this:
+ * - As soon as SW sees the DL NAC error, it should schedule the error handler
+ * - Error handler would sleep for 50ms to see if there are any fatal errors
+ *   raised by UFS controller.
+ *    - If there are fatal errors then SW does normal error recovery.
+ *    - If there are no fatal errors then SW sends the NOP command to device
+ *      to check if link is alive.
+ *        - If NOP command times out, SW does normal error recovery
+ *        - If NOP command succeed, skip the error handling.
+ *
+ * If DL NAC error is seen multiple times with some vendor's UFS devices then
+ * enable this quirk to initiate quick error recovery and also silence related
+ * error logs to reduce spamming of kernel logs.
+ */
+#define UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS (1 << 2)
+
+/*
+ * Some UFS devices may not work properly after resume if the link was kept
+ * in off state during suspend. Enabling this quirk will not allow the
+ * link to be kept in off state during suspend.
+ */
+#define UFS_DEVICE_QUIRK_NO_LINK_OFF	(1 << 3)
+
+/*
+ * Few Toshiba UFS device models advertise RX_MIN_ACTIVATETIME_CAPABILITY as
+ * 600us which may not be enough for reliable hibern8 exit hardware sequence
+ * from UFS device.
+ * To workaround this issue, host should set its PA_TACTIVATE time to 1ms even
+ * if device advertises RX_MIN_ACTIVATETIME_CAPABILITY less than 1ms.
+ */
+#define UFS_DEVICE_QUIRK_PA_TACTIVATE	(1 << 4)
+
+/*
+ * Some UFS memory devices may have really low read/write throughput in
+ * FAST AUTO mode, enable this quirk to make sure that FAST AUTO mode is
+ * never enabled for such devices.
+ */
+#define UFS_DEVICE_NO_FASTAUTO		(1 << 5)
+
+/*
+ * Some UFS devices require host PA_TACTIVATE to be lower than device
+ * PA_TACTIVATE, enabling this quirk ensure this.
+ */
+#define UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE	(1 << 6)
+
+/*
+ * The max. value PA_SaveConfigTime is 250 (10us) but this is not enough for
+ * some vendors.
+ * Gear switch from PWM to HS may fail even with this max. PA_SaveConfigTime.
+ * Gear switch can be issued by host controller as an error recovery and any
+ * software delay will not help on this case so we need to increase
+ * PA_SaveConfigTime to >32us as per vendor recommendation.
+ */
+#define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME	(1 << 7)
+
+/*
+ * Some UFS devices may stop responding after switching from HS-G1 to HS-G3.
+ * Also, it is found that these devices work fine if we do 2 steps switch:
+ * HS-G1 to HS-G2 followed by HS-G2 to HS-G3. Enabling this quirk for such
+ * device would apply this 2 steps gear switch workaround.
+ */
+#define UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH (1 << 8)
+
+struct ufs_hba;
+void ufs_advertise_fixup_device(struct ufs_hba *hba);
+#endif /* UFS_QUIRKS_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/sensors./Kconfig linux-4.4.115-fbx/drivers/sensors/Kconfig
--- linux-4.4.115-fbx/drivers/sensors./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/sensors/Kconfig	2019-01-22 16:16:26.639274805 +0100
@@ -0,0 +1,6 @@
+config SENSORS_SSC
+	bool "Enable Sensors Driver Support for SSC"
+	help
+	  Add support for sensors SSC driver.
+	  This driver is used for exercising sensors use case,
+	  time syncing with ADSP clock.
diff -Nruw linux-4.4.115-fbx/drivers/sensors./Makefile linux-4.4.115-fbx/drivers/sensors/Makefile
--- linux-4.4.115-fbx/drivers/sensors./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/sensors/Makefile	2019-01-22 16:16:26.639274805 +0100
@@ -0,0 +1 @@
+obj-$(CONFIG_SENSORS_SSC)	+= sensors_ssc.o
diff -Nruw linux-4.4.115-fbx/drivers/sensors./sensors_ssc.c linux-4.4.115-fbx/drivers/sensors/sensors_ssc.c
--- linux-4.4.115-fbx/drivers/sensors./sensors_ssc.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/sensors/sensors_ssc.c	2019-01-22 16:16:26.639274805 +0100
@@ -0,0 +1,416 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/msm_dsps.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/of_device.h>
+#include <asm/arch_timer.h>
+#include <linux/uaccess.h>
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/workqueue.h>
+
+#include <soc/qcom/subsystem_restart.h>
+
+#define IMAGE_LOAD_CMD 1
+#define IMAGE_UNLOAD_CMD 0
+#define CLASS_NAME	"ssc"
+#define DRV_NAME	"sensors"
+#define DRV_VERSION	"2.00"
+#ifdef CONFIG_COMPAT
+#define DSPS_IOCTL_READ_SLOW_TIMER32 _IOR(DSPS_IOCTL_MAGIC, 3, compat_uint_t)
+#endif
+
+struct sns_ssc_control_s {
+	struct class *dev_class;
+	dev_t dev_num;
+	struct device *dev;
+	struct cdev *cdev;
+};
+static struct sns_ssc_control_s sns_ctl;
+
+static ssize_t slpi_boot_store(struct kobject *kobj,
+	struct kobj_attribute *attr,
+	const char *buf, size_t count);
+
+struct slpi_loader_private {
+	void *pil_h;
+	struct kobject *boot_slpi_obj;
+	struct attribute_group *attr_group;
+};
+
+static struct kobj_attribute slpi_boot_attribute =
+	__ATTR(boot, 0220, NULL, slpi_boot_store);
+
+static struct attribute *attrs[] = {
+	&slpi_boot_attribute.attr,
+	NULL,
+};
+
+static struct platform_device *slpi_private;
+static struct work_struct slpi_ldr_work;
+
+static void slpi_load_fw(struct work_struct *slpi_ldr_work)
+{
+	struct platform_device *pdev = slpi_private;
+	struct slpi_loader_private *priv = NULL;
+	int ret;
+	const char *firmware_name = NULL;
+
+	if (!pdev) {
+		dev_err(&pdev->dev, "%s: Platform device null\n", __func__);
+		goto fail;
+	}
+
+	if (!pdev->dev.of_node) {
+		dev_err(&pdev->dev,
+			"%s: Device tree information missing\n", __func__);
+		goto fail;
+	}
+
+	ret = of_property_read_string(pdev->dev.of_node,
+		"qcom,firmware-name", &firmware_name);
+	if (ret < 0) {
+		pr_err("can't get fw name.\n");
+		goto fail;
+	}
+
+	priv = platform_get_drvdata(pdev);
+	if (!priv) {
+		dev_err(&pdev->dev,
+		" %s: Private data get failed\n", __func__);
+		goto fail;
+	}
+
+	priv->pil_h = subsystem_get_with_fwname("slpi", firmware_name);
+	if (IS_ERR(priv->pil_h)) {
+		dev_err(&pdev->dev, "%s: pil get failed,\n",
+			__func__);
+		goto fail;
+	}
+
+	dev_err(&pdev->dev, "%s: SLPI image is loaded\n", __func__);
+	return;
+
+fail:
+	dev_err(&pdev->dev, "%s: SLPI image loading failed\n", __func__);
+}
+
+static void slpi_loader_do(struct platform_device *pdev)
+{
+	dev_info(&pdev->dev, "%s: scheduling work to load SLPI fw\n", __func__);
+	schedule_work(&slpi_ldr_work);
+}
+
+static void slpi_loader_unload(struct platform_device *pdev)
+{
+	struct slpi_loader_private *priv = NULL;
+
+	priv = platform_get_drvdata(pdev);
+
+	if (!priv)
+		return;
+
+	if (priv->pil_h) {
+		dev_dbg(&pdev->dev, "%s: calling subsystem put\n", __func__);
+		subsystem_put(priv->pil_h);
+		priv->pil_h = NULL;
+	}
+}
+
+static ssize_t slpi_boot_store(struct kobject *kobj,
+	struct kobj_attribute *attr,
+	const char *buf,
+	size_t count)
+{
+	int boot = 0;
+
+	if (sscanf(buf, "%du", &boot) != 1)
+		return -EINVAL;
+
+	if (boot == IMAGE_LOAD_CMD) {
+		pr_debug("%s: going to call slpi_loader_do\n", __func__);
+		slpi_loader_do(slpi_private);
+	} else if (boot == IMAGE_UNLOAD_CMD) {
+		pr_debug("%s: going to call slpi_unloader\n", __func__);
+		slpi_loader_unload(slpi_private);
+	}
+	return count;
+}
+
+static int slpi_loader_init_sysfs(struct platform_device *pdev)
+{
+	int ret = -EINVAL;
+	struct slpi_loader_private *priv = NULL;
+
+	slpi_private = NULL;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		ret = -ENOMEM;
+		return ret;
+	}
+
+	platform_set_drvdata(pdev, priv);
+
+	priv->pil_h = NULL;
+	priv->boot_slpi_obj = NULL;
+	priv->attr_group = devm_kzalloc(&pdev->dev,
+				sizeof(*(priv->attr_group)),
+				GFP_KERNEL);
+	if (!priv->attr_group) {
+		dev_err(&pdev->dev, "%s: malloc attr_group failed\n",
+						__func__);
+		ret = -ENOMEM;
+		goto error_return;
+	}
+
+	priv->attr_group->attrs = attrs;
+
+	priv->boot_slpi_obj = kobject_create_and_add("boot_slpi", kernel_kobj);
+	if (!priv->boot_slpi_obj) {
+		dev_err(&pdev->dev, "%s: sysfs create and add failed\n",
+						__func__);
+		ret = -ENOMEM;
+		goto error_return;
+	}
+
+	ret = sysfs_create_group(priv->boot_slpi_obj, priv->attr_group);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: sysfs create group failed %d\n",
+							__func__, ret);
+		goto error_return;
+	}
+
+	slpi_private = pdev;
+
+	return 0;
+
+error_return:
+
+	if (priv->boot_slpi_obj) {
+		kobject_del(priv->boot_slpi_obj);
+		priv->boot_slpi_obj = NULL;
+	}
+
+	return ret;
+}
+
+static int slpi_loader_remove(struct platform_device *pdev)
+{
+	struct slpi_loader_private *priv = NULL;
+
+	priv = platform_get_drvdata(pdev);
+
+	if (!priv)
+		return 0;
+
+	if (priv->pil_h) {
+		subsystem_put(priv->pil_h);
+		priv->pil_h = NULL;
+	}
+
+	if (priv->boot_slpi_obj) {
+		sysfs_remove_group(priv->boot_slpi_obj, priv->attr_group);
+		kobject_del(priv->boot_slpi_obj);
+		priv->boot_slpi_obj = NULL;
+	}
+
+	return 0;
+}
+
+/*
+ * Read virtual QTimer clock ticks and scale down to 32KHz clock as used
+ * in DSPS
+ */
+static u32 sns_read_qtimer(void)
+{
+	u64 val;
+	val = arch_counter_get_cntvct();
+	/*
+	 * To convert ticks from 19.2 Mhz clock to 32768 Hz clock:
+	 * x = (value * 32768) / 19200000
+	 * This is same as first left shift the value by 4 bits, i.e. mutiply
+	 * by 16, and then divide by 9375. The latter is preferable since
+	 * QTimer tick (value) is 56-bit, so (value * 32768) could overflow,
+	 * while (value * 16) will never do
+	 */
+	val <<= 4;
+	do_div(val, 9375);
+
+	return (u32)val;
+}
+
+static int sensors_ssc_open(struct inode *ip, struct file *fp)
+{
+	return 0;
+}
+
+static int sensors_ssc_release(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static long sensors_ssc_ioctl(struct file *file,
+			unsigned int cmd, unsigned long arg)
+{
+	int ret = 0;
+	u32 val = 0;
+
+	switch (cmd) {
+	case DSPS_IOCTL_READ_SLOW_TIMER:
+#ifdef CONFIG_COMPAT
+	case DSPS_IOCTL_READ_SLOW_TIMER32:
+#endif
+		val = sns_read_qtimer();
+		ret = put_user(val, (u32 __user *) arg);
+		break;
+
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+const struct file_operations sensors_ssc_fops = {
+	.owner = THIS_MODULE,
+	.open = sensors_ssc_open,
+	.release = sensors_ssc_release,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = sensors_ssc_ioctl,
+#endif
+	.unlocked_ioctl = sensors_ssc_ioctl
+};
+
+static int sensors_ssc_probe(struct platform_device *pdev)
+{
+	int ret = slpi_loader_init_sysfs(pdev);
+
+	if (ret != 0) {
+		dev_err(&pdev->dev, "%s: Error in initing sysfs\n", __func__);
+		return ret;
+	}
+
+	sns_ctl.dev_class = class_create(THIS_MODULE, CLASS_NAME);
+	if (sns_ctl.dev_class == NULL) {
+		pr_err("%s: class_create fail.\n", __func__);
+		goto res_err;
+	}
+
+	ret = alloc_chrdev_region(&sns_ctl.dev_num, 0, 1, DRV_NAME);
+	if (ret) {
+		pr_err("%s: alloc_chrdev_region fail.\n", __func__);
+		goto alloc_chrdev_region_err;
+	}
+
+	sns_ctl.dev = device_create(sns_ctl.dev_class, NULL,
+				     sns_ctl.dev_num,
+				     &sns_ctl, DRV_NAME);
+	if (IS_ERR(sns_ctl.dev)) {
+		pr_err("%s: device_create fail.\n", __func__);
+		goto device_create_err;
+	}
+
+	sns_ctl.cdev = cdev_alloc();
+	if (sns_ctl.cdev == NULL) {
+		pr_err("%s: cdev_alloc fail.\n", __func__);
+		goto cdev_alloc_err;
+	}
+	cdev_init(sns_ctl.cdev, &sensors_ssc_fops);
+	sns_ctl.cdev->owner = THIS_MODULE;
+
+	ret = cdev_add(sns_ctl.cdev, sns_ctl.dev_num, 1);
+	if (ret) {
+		pr_err("%s: cdev_add fail.\n", __func__);
+		goto cdev_add_err;
+	}
+
+	INIT_WORK(&slpi_ldr_work, slpi_load_fw);
+
+	return 0;
+
+cdev_add_err:
+	kfree(sns_ctl.cdev);
+cdev_alloc_err:
+	device_destroy(sns_ctl.dev_class, sns_ctl.dev_num);
+device_create_err:
+	unregister_chrdev_region(sns_ctl.dev_num, 1);
+alloc_chrdev_region_err:
+	class_destroy(sns_ctl.dev_class);
+res_err:
+	return -ENODEV;
+}
+
+static int sensors_ssc_remove(struct platform_device *pdev)
+{
+	slpi_loader_remove(pdev);
+	cdev_del(sns_ctl.cdev);
+	kfree(sns_ctl.cdev);
+	sns_ctl.cdev = NULL;
+	device_destroy(sns_ctl.dev_class, sns_ctl.dev_num);
+	unregister_chrdev_region(sns_ctl.dev_num, 1);
+	class_destroy(sns_ctl.dev_class);
+
+	return 0;
+}
+
+static const struct of_device_id msm_ssc_sensors_dt_match[] = {
+	{.compatible = "qcom,msm-ssc-sensors"},
+	{},
+};
+MODULE_DEVICE_TABLE(of, msm_ssc_sensors_dt_match);
+
+static struct platform_driver sensors_ssc_driver = {
+	.driver = {
+		.name = "sensors-ssc",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_ssc_sensors_dt_match,
+	},
+	.probe = sensors_ssc_probe,
+	.remove = sensors_ssc_remove,
+};
+
+static int __init sensors_ssc_init(void)
+{
+	int rc;
+
+	pr_debug("%s driver version %s.\n", DRV_NAME, DRV_VERSION);
+	rc = platform_driver_register(&sensors_ssc_driver);
+	if (rc) {
+		pr_err("%s: Failed to register sensors ssc driver\n",
+			__func__);
+		return rc;
+	}
+
+	return 0;
+}
+
+static void __exit sensors_ssc_exit(void)
+{
+	platform_driver_unregister(&sensors_ssc_driver);
+}
+
+module_init(sensors_ssc_init);
+module_exit(sensors_ssc_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Sensors SSC driver");
diff -Nruw linux-4.4.115-fbx/drivers/slimbus./Kconfig linux-4.4.115-fbx/drivers/slimbus/Kconfig
--- linux-4.4.115-fbx/drivers/slimbus./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/slimbus/Kconfig	2019-01-22 16:16:26.643274841 +0100
@@ -0,0 +1,28 @@
+#
+# SLIMBUS driver configuration
+#
+menuconfig SLIMBUS
+	bool "Slimbus support"
+	depends on HAS_IOMEM
+	help
+	  Slimbus is standard interface between baseband and
+	  application processors and peripheral components in mobile
+	  terminals.
+
+if SLIMBUS
+config SLIMBUS_MSM_CTRL
+	tristate "Qualcomm Slimbus Master Component"
+	default n
+	help
+	  Select driver for Qualcomm's Slimbus Master Component.
+
+config SLIMBUS_MSM_NGD
+	tristate "Qualcomm Slimbus Satellite Component"
+	help
+	  Select driver for Qualcomm's Slimbus Satellite Component.
+	  This is light-weight slimbus controller driver responsible for
+	  communicating with slave HW directly over the bus using messaging
+	  interface, and communicating with master component residing on ADSP
+	  for bandwidth and data-channel management.
+
+endif
diff -Nruw linux-4.4.115-fbx/drivers/slimbus./Makefile linux-4.4.115-fbx/drivers/slimbus/Makefile
--- linux-4.4.115-fbx/drivers/slimbus./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/slimbus/Makefile	2019-01-22 16:16:26.643274841 +0100
@@ -0,0 +1,6 @@
+#
+# Makefile for kernel slimbus framework.
+#
+obj-$(CONFIG_SLIMBUS)			+= slimbus.o
+obj-$(CONFIG_SLIMBUS_MSM_CTRL)		+= slim-msm.o slim-msm-ctrl.o
+obj-$(CONFIG_SLIMBUS_MSM_NGD)		+= slim-msm.o slim-msm-ngd.o
diff -Nruw linux-4.4.115-fbx/drivers/slimbus./slimbus.c linux-4.4.115-fbx/drivers/slimbus/slimbus.c
--- linux-4.4.115-fbx/drivers/slimbus./slimbus.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/slimbus/slimbus.c	2019-10-29 09:26:24.805214550 +0100
@@ -0,0 +1,3362 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/completion.h>
+#include <linux/idr.h>
+#include <linux/pm_runtime.h>
+#include <linux/slimbus/slimbus.h>
+
+#define SLIM_PORT_HDL(la, f, p) ((la)<<24 | (f) << 16 | (p))
+
+#define SLIM_HDL_TO_LA(hdl)	((u32)((hdl) & 0xFF000000) >> 24)
+#define SLIM_HDL_TO_FLOW(hdl)	(((u32)(hdl) & 0xFF0000) >> 16)
+#define SLIM_HDL_TO_PORT(hdl)	((u32)(hdl) & 0xFF)
+
+#define SLIM_HDL_TO_CHIDX(hdl)	((u16)(hdl) & 0xFF)
+#define SLIM_GRP_TO_NCHAN(hdl)	((u16)(hdl >> 8) & 0xFF)
+
+#define SLIM_SLAVE_PORT(p, la)	(((la)<<16) | (p))
+#define SLIM_MGR_PORT(p)	((0xFF << 16) | (p))
+#define SLIM_LA_MANAGER		0xFF
+
+#define SLIM_START_GRP		(1 << 8)
+#define SLIM_END_GRP		(1 << 9)
+
+#define SLIM_MAX_INTR_COEFF_3	(SLIM_SL_PER_SUPERFRAME/3)
+#define SLIM_MAX_INTR_COEFF_1	SLIM_SL_PER_SUPERFRAME
+
+static DEFINE_MUTEX(slim_lock);
+static DEFINE_IDR(ctrl_idr);
+static struct device_type slim_dev_type;
+static struct device_type slim_ctrl_type;
+
+#define DEFINE_SLIM_LDEST_TXN(name, mc, len, rl, rbuf, wbuf, la) \
+	struct slim_msg_txn name = { rl, 0, mc, SLIM_MSG_DEST_LOGICALADDR, 0,\
+					len, 0, la, false, rbuf, wbuf, NULL, }
+
+#define DEFINE_SLIM_BCAST_TXN(name, mc, len, rl, rbuf, wbuf, la) \
+	struct slim_msg_txn name = { rl, 0, mc, SLIM_MSG_DEST_BROADCAST, 0,\
+					len, 0, la, false, rbuf, wbuf, NULL, }
+
+static const struct slim_device_id *slim_match(const struct slim_device_id *id,
+					const struct slim_device *slim_dev)
+{
+	while (id->name[0]) {
+		if (strncmp(slim_dev->name, id->name, SLIMBUS_NAME_SIZE) == 0)
+			return id;
+		id++;
+	}
+	return NULL;
+}
+
+const struct slim_device_id *slim_get_device_id(const struct slim_device *sdev)
+{
+	const struct slim_driver *sdrv = to_slim_driver(sdev->dev.driver);
+
+	return slim_match(sdrv->id_table, sdev);
+}
+EXPORT_SYMBOL(slim_get_device_id);
+
+static int slim_device_match(struct device *dev, struct device_driver *driver)
+{
+	struct slim_device *slim_dev;
+	struct slim_driver *drv = to_slim_driver(driver);
+
+	if (dev->type == &slim_dev_type)
+		slim_dev = to_slim_device(dev);
+	else
+		return 0;
+	if (drv->id_table)
+		return slim_match(drv->id_table, slim_dev) != NULL;
+
+	if (driver->name)
+		return strncmp(slim_dev->name, driver->name, SLIMBUS_NAME_SIZE)
+			== 0;
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int slim_legacy_suspend(struct device *dev, pm_message_t mesg)
+{
+	struct slim_device *slim_dev = NULL;
+	struct slim_driver *driver;
+	if (dev->type == &slim_dev_type)
+		slim_dev = to_slim_device(dev);
+
+	if (!slim_dev || !dev->driver)
+		return 0;
+
+	driver = to_slim_driver(dev->driver);
+	if (!driver->suspend)
+		return 0;
+
+	return driver->suspend(slim_dev, mesg);
+}
+
+static int slim_legacy_resume(struct device *dev)
+{
+	struct slim_device *slim_dev = NULL;
+	struct slim_driver *driver;
+	if (dev->type == &slim_dev_type)
+		slim_dev = to_slim_device(dev);
+
+	if (!slim_dev || !dev->driver)
+		return 0;
+
+	driver = to_slim_driver(dev->driver);
+	if (!driver->resume)
+		return 0;
+
+	return driver->resume(slim_dev);
+}
+
+static int slim_pm_suspend(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	if (pm)
+		return pm_generic_suspend(dev);
+	else
+		return slim_legacy_suspend(dev, PMSG_SUSPEND);
+}
+
+static int slim_pm_resume(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	if (pm)
+		return pm_generic_resume(dev);
+	else
+		return slim_legacy_resume(dev);
+}
+
+#else
+#define slim_pm_suspend		NULL
+#define slim_pm_resume		NULL
+#endif
+
+static const struct dev_pm_ops slimbus_pm = {
+	.suspend = slim_pm_suspend,
+	.resume = slim_pm_resume,
+	SET_RUNTIME_PM_OPS(
+		pm_generic_suspend,
+		pm_generic_resume,
+		NULL
+		)
+};
+struct bus_type slimbus_type = {
+	.name		= "slimbus",
+	.match		= slim_device_match,
+	.pm		= &slimbus_pm,
+};
+EXPORT_SYMBOL_GPL(slimbus_type);
+
+struct device slimbus_dev = {
+	.init_name = "slimbus",
+};
+
+static void __exit slimbus_exit(void)
+{
+	device_unregister(&slimbus_dev);
+	bus_unregister(&slimbus_type);
+}
+
+static int __init slimbus_init(void)
+{
+	int retval;
+
+	retval = bus_register(&slimbus_type);
+	if (!retval)
+		retval = device_register(&slimbus_dev);
+
+	if (retval)
+		bus_unregister(&slimbus_type);
+
+	return retval;
+}
+postcore_initcall(slimbus_init);
+module_exit(slimbus_exit);
+
+static int slim_drv_probe(struct device *dev)
+{
+	const struct slim_driver *sdrv = to_slim_driver(dev->driver);
+	struct slim_device *sbdev = to_slim_device(dev);
+	struct slim_controller *ctrl = sbdev->ctrl;
+
+	if (sdrv->probe) {
+		int ret;
+		ret = sdrv->probe(sbdev);
+		if (ret)
+			return ret;
+		if (sdrv->device_up)
+			queue_work(ctrl->wq, &sbdev->wd);
+		return 0;
+	}
+	return -ENODEV;
+}
+
+static int slim_drv_remove(struct device *dev)
+{
+	const struct slim_driver *sdrv = to_slim_driver(dev->driver);
+	struct slim_device *sbdev = to_slim_device(dev);
+
+	sbdev->notified = false;
+	if (sdrv->remove)
+		return sdrv->remove(to_slim_device(dev));
+	return -ENODEV;
+}
+
+static void slim_drv_shutdown(struct device *dev)
+{
+	const struct slim_driver *sdrv = to_slim_driver(dev->driver);
+
+	if (sdrv->shutdown)
+		sdrv->shutdown(to_slim_device(dev));
+}
+
+/*
+ * slim_driver_register: Client driver registration with slimbus
+ * @drv:Client driver to be associated with client-device.
+ * This API will register the client driver with the slimbus
+ * It is called from the driver's module-init function.
+ */
+int slim_driver_register(struct slim_driver *drv)
+{
+	drv->driver.bus = &slimbus_type;
+	if (drv->probe)
+		drv->driver.probe = slim_drv_probe;
+
+	if (drv->remove)
+		drv->driver.remove = slim_drv_remove;
+
+	if (drv->shutdown)
+		drv->driver.shutdown = slim_drv_shutdown;
+
+	return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(slim_driver_register);
+
+/*
+ * slim_driver_unregister: Undo effects of slim_driver_register
+ * @drv: Client driver to be unregistered
+ */
+void slim_driver_unregister(struct slim_driver *drv)
+{
+	if (drv)
+		driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(slim_driver_unregister);
+
+#define slim_ctrl_attr_gr NULL
+
+static void slim_ctrl_release(struct device *dev)
+{
+	struct slim_controller *ctrl = to_slim_controller(dev);
+
+	complete(&ctrl->dev_released);
+}
+
+static struct device_type slim_ctrl_type = {
+	.groups		= slim_ctrl_attr_gr,
+	.release	= slim_ctrl_release,
+};
+
+static struct slim_controller *slim_ctrl_get(struct slim_controller *ctrl)
+{
+	if (!ctrl || !get_device(&ctrl->dev))
+		return NULL;
+
+	return ctrl;
+}
+
+static void slim_ctrl_put(struct slim_controller *ctrl)
+{
+	if (ctrl)
+		put_device(&ctrl->dev);
+}
+
+#define slim_device_attr_gr NULL
+#define slim_device_uevent NULL
+static void slim_dev_release(struct device *dev)
+{
+	struct slim_device *sbdev = to_slim_device(dev);
+	slim_ctrl_put(sbdev->ctrl);
+}
+
+static struct device_type slim_dev_type = {
+	.groups		= slim_device_attr_gr,
+	.uevent		= slim_device_uevent,
+	.release	= slim_dev_release,
+};
+
+static void slim_report(struct work_struct *work)
+{
+	struct slim_driver *sbdrv;
+	struct slim_device *sbdev =
+			container_of(work, struct slim_device, wd);
+	if (!sbdev->dev.driver)
+		return;
+	/* check if device-up or down needs to be called */
+	if ((!sbdev->reported && !sbdev->notified) ||
+			(sbdev->reported && sbdev->notified))
+		return;
+
+	sbdrv = to_slim_driver(sbdev->dev.driver);
+	/*
+	 * address no longer valid, means device reported absent, whereas
+	 * address valid, means device reported present
+	 */
+	if (sbdev->notified && !sbdev->reported) {
+		sbdev->notified = false;
+		if (sbdrv->device_down)
+			sbdrv->device_down(sbdev);
+	} else if (!sbdev->notified && sbdev->reported) {
+		sbdev->notified = true;
+		if (sbdrv->device_up)
+			sbdrv->device_up(sbdev);
+	}
+}
+
+/*
+ * slim_add_device: Add a new device without register board info.
+ * @ctrl: Controller to which this device is to be added to.
+ * Called when device doesn't have an explicit client-driver to be probed, or
+ * the client-driver is a module installed dynamically.
+ */
+int slim_add_device(struct slim_controller *ctrl, struct slim_device *sbdev)
+{
+	sbdev->dev.bus = &slimbus_type;
+	sbdev->dev.parent = ctrl->dev.parent;
+	sbdev->dev.type = &slim_dev_type;
+	sbdev->dev.driver = NULL;
+	sbdev->ctrl = ctrl;
+	slim_ctrl_get(ctrl);
+	dev_set_name(&sbdev->dev, "%s", sbdev->name);
+	mutex_init(&sbdev->sldev_reconf);
+	INIT_LIST_HEAD(&sbdev->mark_define);
+	INIT_LIST_HEAD(&sbdev->mark_suspend);
+	INIT_LIST_HEAD(&sbdev->mark_removal);
+	INIT_WORK(&sbdev->wd, slim_report);
+	mutex_lock(&ctrl->m_ctrl);
+	list_add_tail(&sbdev->dev_list, &ctrl->devs);
+	mutex_unlock(&ctrl->m_ctrl);
+	/* probe slave on this controller */
+	return device_register(&sbdev->dev);
+}
+EXPORT_SYMBOL_GPL(slim_add_device);
+
+struct sbi_boardinfo {
+	struct list_head	list;
+	struct slim_boardinfo	board_info;
+};
+
+static LIST_HEAD(board_list);
+static LIST_HEAD(slim_ctrl_list);
+static DEFINE_MUTEX(board_lock);
+
+/* If controller is not present, only add to boards list */
+static void slim_match_ctrl_to_boardinfo(struct slim_controller *ctrl,
+				struct slim_boardinfo *bi)
+{
+	int ret;
+	if (ctrl->nr != bi->bus_num)
+		return;
+
+	ret = slim_add_device(ctrl, bi->slim_slave);
+	if (ret != 0)
+		dev_err(ctrl->dev.parent, "can't create new device for %s\n",
+			bi->slim_slave->name);
+}
+
+/*
+ * slim_register_board_info: Board-initialization routine.
+ * @info: List of all devices on all controllers present on the board.
+ * @n: number of entries.
+ * API enumerates respective devices on corresponding controller.
+ * Called from board-init function.
+ */
+int slim_register_board_info(struct slim_boardinfo const *info, unsigned n)
+{
+	struct sbi_boardinfo *bi;
+	int i;
+
+	bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
+	if (!bi)
+		return -ENOMEM;
+
+	for (i = 0; i < n; i++, bi++, info++) {
+		struct slim_controller *ctrl;
+
+		memcpy(&bi->board_info, info, sizeof(*info));
+		mutex_lock(&board_lock);
+		list_add_tail(&bi->list, &board_list);
+		list_for_each_entry(ctrl, &slim_ctrl_list, list)
+			slim_match_ctrl_to_boardinfo(ctrl, &bi->board_info);
+		mutex_unlock(&board_lock);
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(slim_register_board_info);
+
+/*
+ * slim_ctrl_add_boarddevs: Add devices registered by board-info
+ * @ctrl: Controller to which these devices are to be added to.
+ * This API is called by controller when it is up and running.
+ * If devices on a controller were registered before controller,
+ * this will make sure that they get probed when controller is up.
+ */
+void slim_ctrl_add_boarddevs(struct slim_controller *ctrl)
+{
+	struct sbi_boardinfo *bi;
+	mutex_lock(&board_lock);
+	list_add_tail(&ctrl->list, &slim_ctrl_list);
+	list_for_each_entry(bi, &board_list, list)
+		slim_match_ctrl_to_boardinfo(ctrl, &bi->board_info);
+	mutex_unlock(&board_lock);
+}
+EXPORT_SYMBOL_GPL(slim_ctrl_add_boarddevs);
+
+/*
+ * slim_busnum_to_ctrl: Map bus number to controller
+ * @busnum: Bus number
+ * Returns controller representing this bus number
+ */
+struct slim_controller *slim_busnum_to_ctrl(u32 bus_num)
+{
+	struct slim_controller *ctrl;
+	mutex_lock(&board_lock);
+	list_for_each_entry(ctrl, &slim_ctrl_list, list)
+		if (bus_num == ctrl->nr) {
+			mutex_unlock(&board_lock);
+			return ctrl;
+		}
+	mutex_unlock(&board_lock);
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(slim_busnum_to_ctrl);
+
+static int slim_register_controller(struct slim_controller *ctrl)
+{
+	int ret = 0;
+
+	/* Can't register until after driver model init */
+	if (WARN_ON(!slimbus_type.p)) {
+		ret = -EPROBE_DEFER;
+		goto out_list;
+	}
+
+	dev_set_name(&ctrl->dev, "sb-%d", ctrl->nr);
+	ctrl->dev.bus = &slimbus_type;
+	ctrl->dev.type = &slim_ctrl_type;
+	ctrl->num_dev = 0;
+	if (!ctrl->min_cg)
+		ctrl->min_cg = SLIM_MIN_CLK_GEAR;
+	if (!ctrl->max_cg)
+		ctrl->max_cg = SLIM_MAX_CLK_GEAR;
+	spin_lock_init(&ctrl->txn_lock);
+	mutex_init(&ctrl->m_ctrl);
+	mutex_init(&ctrl->sched.m_reconf);
+	ret = device_register(&ctrl->dev);
+	if (ret)
+		goto out_list;
+
+	dev_dbg(&ctrl->dev, "Bus [%s] registered:dev:%p\n", ctrl->name,
+							&ctrl->dev);
+
+	if (ctrl->nports) {
+		ctrl->ports = kzalloc(ctrl->nports * sizeof(struct slim_port),
+					GFP_KERNEL);
+		if (!ctrl->ports) {
+			ret = -ENOMEM;
+			goto err_port_failed;
+		}
+	}
+	if (ctrl->nchans) {
+		ctrl->chans = kzalloc(ctrl->nchans * sizeof(struct slim_ich),
+					GFP_KERNEL);
+		if (!ctrl->chans) {
+			ret = -ENOMEM;
+			goto err_chan_failed;
+		}
+
+		ctrl->sched.chc1 =
+			kzalloc(ctrl->nchans * sizeof(struct slim_ich *),
+			GFP_KERNEL);
+		if (!ctrl->sched.chc1) {
+			kfree(ctrl->chans);
+			ret = -ENOMEM;
+			goto err_chan_failed;
+		}
+		ctrl->sched.chc3 =
+			kzalloc(ctrl->nchans * sizeof(struct slim_ich *),
+			GFP_KERNEL);
+		if (!ctrl->sched.chc3) {
+			kfree(ctrl->sched.chc1);
+			kfree(ctrl->chans);
+			ret = -ENOMEM;
+			goto err_chan_failed;
+		}
+	}
+#ifdef DEBUG
+	ctrl->sched.slots = kzalloc(SLIM_SL_PER_SUPERFRAME, GFP_KERNEL);
+#endif
+	init_completion(&ctrl->pause_comp);
+
+	INIT_LIST_HEAD(&ctrl->devs);
+	ctrl->wq = create_singlethread_workqueue(dev_name(&ctrl->dev));
+	if (!ctrl->wq)
+		goto err_workq_failed;
+
+	return 0;
+
+err_workq_failed:
+	kfree(ctrl->sched.chc3);
+	kfree(ctrl->sched.chc1);
+	kfree(ctrl->chans);
+err_chan_failed:
+	kfree(ctrl->ports);
+err_port_failed:
+	device_unregister(&ctrl->dev);
+out_list:
+	mutex_lock(&slim_lock);
+	idr_remove(&ctrl_idr, ctrl->nr);
+	mutex_unlock(&slim_lock);
+	return ret;
+}
+
+/* slim_remove_device: Remove the effect of slim_add_device() */
+void slim_remove_device(struct slim_device *sbdev)
+{
+	struct slim_controller *ctrl = sbdev->ctrl;
+	mutex_lock(&ctrl->m_ctrl);
+	list_del_init(&sbdev->dev_list);
+	mutex_unlock(&ctrl->m_ctrl);
+	device_unregister(&sbdev->dev);
+}
+EXPORT_SYMBOL_GPL(slim_remove_device);
+
+static void slim_ctrl_remove_device(struct slim_controller *ctrl,
+				struct slim_boardinfo *bi)
+{
+	if (ctrl->nr == bi->bus_num)
+		slim_remove_device(bi->slim_slave);
+}
+
+/*
+ * slim_del_controller: Controller tear-down.
+ * Controller added with the above API is teared down using this API.
+ */
+int slim_del_controller(struct slim_controller *ctrl)
+{
+	struct slim_controller *found;
+	struct sbi_boardinfo *bi;
+
+	/* First make sure that this bus was added */
+	mutex_lock(&slim_lock);
+	found = idr_find(&ctrl_idr, ctrl->nr);
+	mutex_unlock(&slim_lock);
+	if (found != ctrl)
+		return -EINVAL;
+
+	/* Remove all clients */
+	mutex_lock(&board_lock);
+	list_for_each_entry(bi, &board_list, list)
+		slim_ctrl_remove_device(ctrl, &bi->board_info);
+	mutex_unlock(&board_lock);
+
+	init_completion(&ctrl->dev_released);
+	device_unregister(&ctrl->dev);
+
+	wait_for_completion(&ctrl->dev_released);
+	list_del(&ctrl->list);
+	destroy_workqueue(ctrl->wq);
+	/* free bus id */
+	mutex_lock(&slim_lock);
+	idr_remove(&ctrl_idr, ctrl->nr);
+	mutex_unlock(&slim_lock);
+
+	kfree(ctrl->sched.chc1);
+	kfree(ctrl->sched.chc3);
+#ifdef DEBUG
+	kfree(ctrl->sched.slots);
+#endif
+	kfree(ctrl->chans);
+	kfree(ctrl->ports);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(slim_del_controller);
+
+/*
+ * slim_add_numbered_controller: Controller bring-up.
+ * @ctrl: Controller to be registered.
+ * A controller is registered with the framework using this API. ctrl->nr is the
+ * desired number with which slimbus framework registers the controller.
+ * Function will return -EBUSY if the number is in use.
+ */
+int slim_add_numbered_controller(struct slim_controller *ctrl)
+{
+	int	id;
+
+	mutex_lock(&slim_lock);
+	id = idr_alloc(&ctrl_idr, ctrl, ctrl->nr, ctrl->nr + 1, GFP_KERNEL);
+	mutex_unlock(&slim_lock);
+
+	if (id < 0)
+		return id;
+
+	ctrl->nr = id;
+	return slim_register_controller(ctrl);
+}
+EXPORT_SYMBOL_GPL(slim_add_numbered_controller);
+
+/*
+ * slim_report_absent: Controller calls this function when a device
+ *	reports absent, OR when the device cannot be communicated with
+ * @sbdev: Device that cannot be reached, or sent report absent
+ */
+void slim_report_absent(struct slim_device *sbdev)
+{
+	struct slim_controller *ctrl;
+	int i;
+	if (!sbdev)
+		return;
+	ctrl = sbdev->ctrl;
+	if (!ctrl)
+		return;
+	/* invalidate logical addresses */
+	mutex_lock(&ctrl->m_ctrl);
+	for (i = 0; i < ctrl->num_dev; i++) {
+		if (sbdev->laddr == ctrl->addrt[i].laddr)
+			ctrl->addrt[i].valid = false;
+	}
+	mutex_unlock(&ctrl->m_ctrl);
+	sbdev->reported = false;
+	queue_work(ctrl->wq, &sbdev->wd);
+}
+EXPORT_SYMBOL(slim_report_absent);
+
+static int slim_remove_ch(struct slim_controller *ctrl, struct slim_ich *slc);
+/*
+ * slim_framer_booted: This function is called by controller after the active
+ * framer has booted (using Bus Reset sequence, or after it has shutdown and has
+ * come back up). Components, devices on the bus may be in undefined state,
+ * and this function triggers their drivers to do the needful
+ * to bring them back in Reset state so that they can acquire sync, report
+ * present and be operational again.
+ */
+void slim_framer_booted(struct slim_controller *ctrl)
+{
+	struct slim_device *sbdev;
+	struct list_head *pos, *next;
+	int i;
+
+	if (!ctrl)
+		return;
+
+	/* Since framer has rebooted, reset all data channels */
+	mutex_lock(&ctrl->sched.m_reconf);
+	for (i = 0; i < ctrl->nchans; i++) {
+		struct slim_ich *slc = &ctrl->chans[i];
+
+		if (slc->state > SLIM_CH_DEFINED)
+			slim_remove_ch(ctrl, slc);
+	}
+	mutex_unlock(&ctrl->sched.m_reconf);
+	mutex_lock(&ctrl->m_ctrl);
+	list_for_each_safe(pos, next, &ctrl->devs) {
+		struct slim_driver *sbdrv;
+		sbdev = list_entry(pos, struct slim_device, dev_list);
+		mutex_unlock(&ctrl->m_ctrl);
+		if (sbdev && sbdev->dev.driver) {
+			sbdrv = to_slim_driver(sbdev->dev.driver);
+			if (sbdrv->reset_device)
+				sbdrv->reset_device(sbdev);
+		}
+		mutex_lock(&ctrl->m_ctrl);
+	}
+	mutex_unlock(&ctrl->m_ctrl);
+}
+EXPORT_SYMBOL(slim_framer_booted);
+
+/*
+ * slim_msg_response: Deliver Message response received from a device to the
+ *	framework.
+ * @ctrl: Controller handle
+ * @reply: Reply received from the device
+ * @len: Length of the reply
+ * @tid: Transaction ID received with which framework can associate reply.
+ * Called by controller to inform framework about the response received.
+ * This helps in making the API asynchronous, and controller-driver doesn't need
+ * to manage 1 more table other than the one managed by framework mapping TID
+ * with buffers
+ */
+void slim_msg_response(struct slim_controller *ctrl, u8 *reply, u8 tid, u8 len)
+{
+	int i;
+	unsigned long flags;
+	bool async;
+	struct slim_msg_txn *txn;
+
+	spin_lock_irqsave(&ctrl->txn_lock, flags);
+	txn = ctrl->txnt[tid];
+	if (txn == NULL || txn->rbuf == NULL) {
+		spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+		if (txn == NULL)
+			dev_err(&ctrl->dev, "Got response to invalid TID:%d, len:%d",
+				tid, len);
+		else
+			dev_err(&ctrl->dev, "Invalid client buffer passed\n");
+		return;
+	}
+	async = txn->async;
+	for (i = 0; i < len; i++)
+		txn->rbuf[i] = reply[i];
+	if (txn->comp)
+		complete(txn->comp);
+	ctrl->txnt[tid] = NULL;
+	spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+	if (async)
+		kfree(txn);
+}
+EXPORT_SYMBOL_GPL(slim_msg_response);
+
+static int slim_processtxn(struct slim_controller *ctrl,
+				struct slim_msg_txn *txn, bool need_tid)
+{
+	u8 i = 0;
+	int ret = 0;
+	unsigned long flags;
+
+	if (need_tid) {
+		spin_lock_irqsave(&ctrl->txn_lock, flags);
+		for (i = 0; i < ctrl->last_tid; i++) {
+			if (ctrl->txnt[i] == NULL)
+				break;
+		}
+		if (i >= ctrl->last_tid) {
+			if (ctrl->last_tid == 255) {
+				spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+				return -ENOMEM;
+			}
+			ctrl->last_tid++;
+		}
+		ctrl->txnt[i] = txn;
+		txn->tid = i;
+		spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+	}
+
+	ret = ctrl->xfer_msg(ctrl, txn);
+	return ret;
+}
+
+static int ctrl_getlogical_addr(struct slim_controller *ctrl, const u8 *eaddr,
+				u8 e_len, u8 *entry)
+{
+	u8 i;
+	for (i = 0; i < ctrl->num_dev; i++) {
+		if (ctrl->addrt[i].valid &&
+			memcmp(ctrl->addrt[i].eaddr, eaddr, e_len) == 0) {
+			*entry = i;
+			return 0;
+		}
+	}
+	return -ENXIO;
+}
+
+/*
+ * slim_assign_laddr: Assign logical address to a device enumerated.
+ * @ctrl: Controller with which device is enumerated.
+ * @e_addr: 6-byte elemental address of the device.
+ * @e_len: buffer length for e_addr
+  * @laddr: Return logical address (if valid flag is false)
+  * @valid: true if laddr holds a valid address that controller wants to
+  *	set for this enumeration address. Otherwise framework sets index into
+  *	address table as logical address.
+ * Called by controller in response to REPORT_PRESENT. Framework will assign
+ * a logical address to this enumeration address.
+ * Function returns -EXFULL to indicate that all logical addresses are already
+ * taken.
+ */
+int slim_assign_laddr(struct slim_controller *ctrl, const u8 *e_addr,
+				u8 e_len, u8 *laddr, bool valid)
+{
+	int ret;
+	u8 i = 0;
+	bool exists = false;
+	struct slim_device *sbdev;
+	struct list_head *pos, *next;
+
+	mutex_lock(&ctrl->m_ctrl);
+	/* already assigned */
+	if (ctrl_getlogical_addr(ctrl, e_addr, e_len, &i) == 0) {
+		*laddr = ctrl->addrt[i].laddr;
+		exists = true;
+	} else {
+		if (ctrl->num_dev >= 254) {
+			ret = -EXFULL;
+			goto ret_assigned_laddr;
+		}
+		for (i = 0; i < ctrl->num_dev; i++) {
+			if (ctrl->addrt[i].valid == false)
+				break;
+		}
+		if (i == ctrl->num_dev) {
+			ctrl->addrt = krealloc(ctrl->addrt,
+					(ctrl->num_dev + 1) *
+					sizeof(struct slim_addrt),
+					GFP_KERNEL);
+			if (!ctrl->addrt) {
+				ret = -ENOMEM;
+				goto ret_assigned_laddr;
+			}
+			ctrl->num_dev++;
+		}
+		memcpy(ctrl->addrt[i].eaddr, e_addr, e_len);
+		ctrl->addrt[i].valid = true;
+		/* Preferred address is index into table */
+		if (!valid)
+			*laddr = i;
+	}
+
+	ret = ctrl->set_laddr(ctrl, (const u8 *)&ctrl->addrt[i].eaddr, 6,
+				*laddr);
+	if (ret) {
+		ctrl->addrt[i].valid = false;
+		goto ret_assigned_laddr;
+	}
+	ctrl->addrt[i].laddr = *laddr;
+
+	dev_dbg(&ctrl->dev, "setting slimbus l-addr:%x\n", *laddr);
+ret_assigned_laddr:
+	mutex_unlock(&ctrl->m_ctrl);
+	if (exists || ret)
+		return ret;
+
+	pr_info("slimbus:%d laddr:0x%x, EAPC:0x%x:0x%x", ctrl->nr, *laddr,
+				e_addr[1], e_addr[2]);
+	mutex_lock(&ctrl->m_ctrl);
+	list_for_each_safe(pos, next, &ctrl->devs) {
+		sbdev = list_entry(pos, struct slim_device, dev_list);
+		if (memcmp(sbdev->e_addr, e_addr, 6) == 0) {
+			struct slim_driver *sbdrv;
+			sbdev->laddr = *laddr;
+			sbdev->reported = true;
+			if (sbdev->dev.driver) {
+				sbdrv = to_slim_driver(sbdev->dev.driver);
+				if (sbdrv->device_up)
+					queue_work(ctrl->wq, &sbdev->wd);
+			}
+			break;
+		}
+	}
+	mutex_unlock(&ctrl->m_ctrl);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(slim_assign_laddr);
+
+/*
+ * slim_get_logical_addr: Return the logical address of a slimbus device.
+ * @sb: client handle requesting the adddress.
+ * @e_addr: Elemental address of the device.
+ * @e_len: Length of e_addr
+ * @laddr: output buffer to store the address
+ * context: can sleep
+ * -EINVAL is returned in case of invalid parameters, and -ENXIO is returned if
+ *  the device with this elemental address is not found.
+ */
+int slim_get_logical_addr(struct slim_device *sb, const u8 *e_addr,
+				u8 e_len, u8 *laddr)
+{
+	int ret = 0;
+	u8 entry;
+	struct slim_controller *ctrl = sb->ctrl;
+	if (!ctrl || !laddr || !e_addr || e_len != 6)
+		return -EINVAL;
+	mutex_lock(&ctrl->m_ctrl);
+	ret = ctrl_getlogical_addr(ctrl, e_addr, e_len, &entry);
+	if (!ret)
+		*laddr = ctrl->addrt[entry].laddr;
+	mutex_unlock(&ctrl->m_ctrl);
+	if (ret == -ENXIO && ctrl->get_laddr) {
+		ret = ctrl->get_laddr(ctrl, e_addr, e_len, laddr);
+		if (!ret)
+			ret = slim_assign_laddr(ctrl, e_addr, e_len, laddr,
+						true);
+	}
+	return ret;
+}
+EXPORT_SYMBOL_GPL(slim_get_logical_addr);
+
+static int slim_ele_access_sanity(struct slim_ele_access *msg, int oper,
+				u8 *rbuf, const u8 *wbuf, u8 len)
+{
+	if (!msg || msg->num_bytes > 16 || msg->start_offset + len > 0xC00)
+		return -EINVAL;
+	switch (oper) {
+	case SLIM_MSG_MC_REQUEST_VALUE:
+	case SLIM_MSG_MC_REQUEST_INFORMATION:
+		if (rbuf == NULL)
+			return -EINVAL;
+		return 0;
+	case SLIM_MSG_MC_CHANGE_VALUE:
+	case SLIM_MSG_MC_CLEAR_INFORMATION:
+		if (wbuf == NULL)
+			return -EINVAL;
+		return 0;
+	case SLIM_MSG_MC_REQUEST_CHANGE_VALUE:
+	case SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION:
+		if (rbuf == NULL || wbuf == NULL)
+			return -EINVAL;
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static u16 slim_slicecodefromsize(u32 req)
+{
+	u8 codetosize[8] = {1, 2, 3, 4, 6, 8, 12, 16};
+	if (req >= 8)
+		return 0;
+	else
+		return codetosize[req];
+}
+
+static u16 slim_slicesize(u32 code)
+{
+	u8 sizetocode[16] = {0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7};
+	if (code == 0)
+		code = 1;
+	if (code > 16)
+		code = 16;
+	return sizetocode[code - 1];
+}
+
+
+/* Message APIs Unicast message APIs used by slimbus slave drivers */
+
+/*
+ * Message API access routines.
+ * @sb: client handle requesting elemental message reads, writes.
+ * @msg: Input structure for start-offset, number of bytes to read.
+ * @rbuf: data buffer to be filled with values read.
+ * @len: data buffer size
+ * @wbuf: data buffer containing value/information to be written
+ * context: can sleep
+ * Returns:
+ * -EINVAL: Invalid parameters
+ * -ETIMEDOUT: If controller could not complete the request. This may happen if
+ *  the bus lines are not clocked, controller is not powered-on, slave with
+ *  given address is not enumerated/responding.
+ */
+int slim_request_val_element(struct slim_device *sb,
+				struct slim_ele_access *msg, u8 *buf, u8 len)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	if (!ctrl)
+		return -EINVAL;
+	return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_REQUEST_VALUE, buf,
+			NULL, len);
+}
+EXPORT_SYMBOL_GPL(slim_request_val_element);
+
+int slim_request_inf_element(struct slim_device *sb,
+				struct slim_ele_access *msg, u8 *buf, u8 len)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	if (!ctrl)
+		return -EINVAL;
+	return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_REQUEST_INFORMATION,
+			buf, NULL, len);
+}
+EXPORT_SYMBOL_GPL(slim_request_inf_element);
+
+int slim_change_val_element(struct slim_device *sb, struct slim_ele_access *msg,
+				const u8 *buf, u8 len)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	if (!ctrl)
+		return -EINVAL;
+	return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_CHANGE_VALUE, NULL, buf,
+					len);
+}
+EXPORT_SYMBOL_GPL(slim_change_val_element);
+
+int slim_clear_inf_element(struct slim_device *sb, struct slim_ele_access *msg,
+				u8 *buf, u8 len)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	if (!ctrl)
+		return -EINVAL;
+	return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_CLEAR_INFORMATION, NULL,
+					buf, len);
+}
+EXPORT_SYMBOL_GPL(slim_clear_inf_element);
+
+int slim_request_change_val_element(struct slim_device *sb,
+					struct slim_ele_access *msg, u8 *rbuf,
+					const u8 *wbuf, u8 len)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	if (!ctrl)
+		return -EINVAL;
+	return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_REQUEST_CHANGE_VALUE,
+					rbuf, wbuf, len);
+}
+EXPORT_SYMBOL_GPL(slim_request_change_val_element);
+
+int slim_request_clear_inf_element(struct slim_device *sb,
+					struct slim_ele_access *msg, u8 *rbuf,
+					const u8 *wbuf, u8 len)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	if (!ctrl)
+		return -EINVAL;
+	return slim_xfer_msg(ctrl, sb, msg,
+					SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION,
+					rbuf, wbuf, len);
+}
+EXPORT_SYMBOL_GPL(slim_request_clear_inf_element);
+
+/*
+ * Broadcast message API:
+ * call this API directly with sbdev = NULL.
+ * For broadcast reads, make sure that buffers are big-enough to incorporate
+ * replies from all logical addresses.
+ * All controllers may not support broadcast
+ */
+int slim_xfer_msg(struct slim_controller *ctrl, struct slim_device *sbdev,
+			struct slim_ele_access *msg, u16 mc, u8 *rbuf,
+			const u8 *wbuf, u8 len)
+{
+	DECLARE_COMPLETION_ONSTACK(complete);
+	DEFINE_SLIM_LDEST_TXN(txn_stack, mc, len, 6, rbuf, wbuf, sbdev->laddr);
+	struct slim_msg_txn *txn;
+	int ret;
+	u16 sl, cur;
+	if (msg->comp && rbuf) {
+		txn = kmalloc(sizeof(struct slim_msg_txn),
+						GFP_KERNEL);
+		if (IS_ERR_OR_NULL(txn))
+			return PTR_ERR(txn);
+		*txn = txn_stack;
+		txn->async = true;
+		txn->comp = msg->comp;
+	} else {
+		txn = &txn_stack;
+		if (rbuf)
+			txn->comp = &complete;
+	}
+
+	ret = slim_ele_access_sanity(msg, mc, rbuf, wbuf, len);
+	if (ret)
+		goto xfer_err;
+
+	sl = slim_slicesize(len);
+	dev_dbg(&ctrl->dev, "SB xfer msg:os:%x, len:%d, MC:%x, sl:%x\n",
+				msg->start_offset, len, mc, sl);
+
+	cur = slim_slicecodefromsize(sl);
+	txn->ec = ((sl | (1 << 3)) | ((msg->start_offset & 0xFFF) << 4));
+
+	if (wbuf)
+		txn->rl += len;
+	if (rbuf) {
+		unsigned long flags;
+
+		txn->rl++;
+		ret = slim_processtxn(ctrl, txn, true);
+
+		/* sync read */
+		if (!ret && !msg->comp) {
+			ret = wait_for_completion_timeout(&complete, HZ);
+			if (!ret) {
+				dev_err(&ctrl->dev, "slimbus Read timed out");
+				spin_lock_irqsave(&ctrl->txn_lock, flags);
+				/* Invalidate the transaction */
+				ctrl->txnt[txn->tid] = NULL;
+				spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+				ret = -ETIMEDOUT;
+			} else
+				ret = 0;
+		} else if (ret < 0 && !msg->comp) {
+			dev_err(&ctrl->dev, "slimbus Read error");
+			spin_lock_irqsave(&ctrl->txn_lock, flags);
+			/* Invalidate the transaction */
+			ctrl->txnt[txn->tid] = NULL;
+			spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+		}
+
+	} else
+		ret = slim_processtxn(ctrl, txn, false);
+xfer_err:
+	return ret;
+}
+EXPORT_SYMBOL_GPL(slim_xfer_msg);
+
+/*
+ * User message:
+ * slim_user_msg: Send user message that is interpreted by destination device
+ * @sb: Client handle sending the message
+ * @la: Destination device for this user message
+ * @mt: Message Type (Soruce-referred, or Destination-referred)
+ * @mc: Message Code
+ * @msg: Message structure (start offset, number of bytes) to be sent
+ * @buf: data buffer to be sent
+ * @len: data buffer size in bytes
+ */
+int slim_user_msg(struct slim_device *sb, u8 la, u8 mt, u8 mc,
+				struct slim_ele_access *msg, u8 *buf, u8 len)
+{
+	if (!sb || !sb->ctrl || !msg || mt == SLIM_MSG_MT_CORE)
+		return -EINVAL;
+	if (!sb->ctrl->xfer_user_msg)
+		return -EPROTONOSUPPORT;
+	return sb->ctrl->xfer_user_msg(sb->ctrl, la, mt, mc, msg, buf, len);
+}
+EXPORT_SYMBOL(slim_user_msg);
+
+/*
+ * Queue bulk of message writes:
+ * slim_bulk_msg_write: Write bulk of messages (e.g. downloading FW)
+ * @sb: Client handle sending these messages
+ * @la: Destination device for these messages
+ * @mt: Message Type
+ * @mc: Message Code
+ * @msgs: List of messages to be written in bulk
+ * @n: Number of messages in the list
+ * @cb: Callback if client needs this to be non-blocking
+ * @ctx: Context for this callback
+ * If supported by controller, this message list will be sent in bulk to the HW
+ * If the client specifies this to be non-blocking, the callback will be
+ * called from atomic context.
+ */
+int slim_bulk_msg_write(struct slim_device *sb, u8 mt, u8 mc,
+			struct slim_val_inf msgs[], int n,
+			int (*comp_cb)(void *ctx, int err), void *ctx)
+{
+	int i, ret;
+
+	if (!sb || !sb->ctrl || !msgs)
+		return -EINVAL;
+	if (!sb->ctrl->xfer_bulk_wr) {
+		pr_warn("controller does not support bulk WR, serializing");
+		for (i = 0; i < n; i++) {
+			struct slim_ele_access ele;
+
+			ele.comp = NULL;
+			ele.start_offset = msgs[i].start_offset;
+			ele.num_bytes = msgs[i].num_bytes;
+			ret = slim_xfer_msg(sb->ctrl, sb, &ele, mc,
+					msgs[i].rbuf, msgs[i].wbuf,
+					ele.num_bytes);
+			if (ret)
+				return ret;
+		}
+		return ret;
+	}
+	return sb->ctrl->xfer_bulk_wr(sb->ctrl, sb->laddr, mt, mc, msgs, n,
+					comp_cb, ctx);
+}
+EXPORT_SYMBOL(slim_bulk_msg_write);
+
+/*
+ * slim_alloc_mgrports: Allocate port on manager side.
+ * @sb: device/client handle.
+ * @req: Port request type.
+ * @nports: Number of ports requested
+ * @rh: output buffer to store the port handles
+ * @hsz: size of buffer storing handles
+ * context: can sleep
+ * This port will be typically used by SW. e.g. client driver wants to receive
+ * some data from audio codec HW using a data channel.
+ * Port allocated using this API will be used to receive the data.
+ * If half-duplex ports are requested, two adjacent ports are allocated for
+ * 1 half-duplex port. So the handle-buffer size should be twice the number
+ * of half-duplex ports to be allocated.
+ * -EDQUOT is returned if all ports are in use.
+ */
+int slim_alloc_mgrports(struct slim_device *sb, enum slim_port_req req,
+				int nports, u32 *rh, int hsz)
+{
+	int i, j;
+	int ret = -EINVAL;
+	int nphysp = nports;
+	struct slim_controller *ctrl = sb->ctrl;
+
+	if (!rh || !ctrl)
+		return -EINVAL;
+	if (req == SLIM_REQ_HALF_DUP)
+		nphysp *= 2;
+	if (hsz/sizeof(u32) < nphysp)
+		return -EINVAL;
+	mutex_lock(&ctrl->m_ctrl);
+
+	for (i = 0; i < ctrl->nports; i++) {
+		bool multiok = true;
+		if (ctrl->ports[i].state != SLIM_P_FREE)
+			continue;
+		/* Start half duplex channel at even port */
+		if (req == SLIM_REQ_HALF_DUP && (i % 2))
+			continue;
+		/* Allocate ports contiguously for multi-ch */
+		if (ctrl->nports < (i + nphysp)) {
+			i = ctrl->nports;
+			break;
+		}
+		if (req == SLIM_REQ_MULTI_CH) {
+			multiok = true;
+			for (j = i; j < i + nphysp; j++) {
+				if (ctrl->ports[j].state != SLIM_P_FREE) {
+					multiok = false;
+					break;
+				}
+			}
+			if (!multiok)
+				continue;
+		}
+		break;
+	}
+	if (i >= ctrl->nports) {
+		ret = -EDQUOT;
+		goto alloc_err;
+	}
+	ret = 0;
+	for (j = i; j < i + nphysp; j++) {
+		ctrl->ports[j].state = SLIM_P_UNCFG;
+		ctrl->ports[j].req = req;
+		if (req == SLIM_REQ_HALF_DUP && (j % 2))
+			ctrl->ports[j].flow = SLIM_SINK;
+		else
+			ctrl->ports[j].flow = SLIM_SRC;
+		if (ctrl->alloc_port)
+			ret = ctrl->alloc_port(ctrl, j);
+		if (ret) {
+			for (; j >= i; j--)
+				ctrl->ports[j].state = SLIM_P_FREE;
+			goto alloc_err;
+		}
+		*rh++ = SLIM_PORT_HDL(SLIM_LA_MANAGER, 0, j);
+	}
+alloc_err:
+	mutex_unlock(&ctrl->m_ctrl);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(slim_alloc_mgrports);
+
+/* Deallocate the port(s) allocated using the API above */
+int slim_dealloc_mgrports(struct slim_device *sb, u32 *hdl, int nports)
+{
+	int i;
+	struct slim_controller *ctrl = sb->ctrl;
+
+	if (!ctrl || !hdl)
+		return -EINVAL;
+
+	mutex_lock(&ctrl->m_ctrl);
+
+	for (i = 0; i < nports; i++) {
+		u8 pn;
+		pn = SLIM_HDL_TO_PORT(hdl[i]);
+
+		if (pn >= ctrl->nports || ctrl->ports[pn].state == SLIM_P_CFG) {
+			int j, ret;
+			if (pn >= ctrl->nports) {
+				dev_err(&ctrl->dev, "invalid port number");
+				ret = -EINVAL;
+			} else {
+				dev_err(&ctrl->dev,
+					"Can't dealloc connected port:%d", i);
+				ret = -EISCONN;
+			}
+			for (j = i - 1; j >= 0; j--) {
+				pn = SLIM_HDL_TO_PORT(hdl[j]);
+				ctrl->ports[pn].state = SLIM_P_UNCFG;
+			}
+			mutex_unlock(&ctrl->m_ctrl);
+			return ret;
+		}
+		if (ctrl->dealloc_port)
+			ctrl->dealloc_port(ctrl, pn);
+		ctrl->ports[pn].state = SLIM_P_FREE;
+	}
+	mutex_unlock(&ctrl->m_ctrl);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(slim_dealloc_mgrports);
+
+/*
+ * slim_config_mgrports: Configure manager side ports
+ * @sb: device/client handle.
+ * @ph: array of port handles for which this configuration is valid
+ * @nports: Number of ports in ph
+ * @cfg: configuration requested for port(s)
+ * Configure port settings if they are different than the default ones.
+ * Returns success if the config could be applied. Returns -EISCONN if the
+ * port is in use
+ */
+int slim_config_mgrports(struct slim_device *sb, u32 *ph, int nports,
+				struct slim_port_cfg *cfg)
+{
+	int i;
+	struct slim_controller *ctrl;
+
+	if (!sb || !ph || !nports || !sb->ctrl || !cfg)
+		return -EINVAL;
+
+	ctrl = sb->ctrl;
+	mutex_lock(&ctrl->sched.m_reconf);
+	for (i = 0; i < nports; i++) {
+		u8 pn = SLIM_HDL_TO_PORT(ph[i]);
+
+		if (ctrl->ports[pn].state == SLIM_P_CFG)
+			return -EISCONN;
+		ctrl->ports[pn].cfg = *cfg;
+	}
+	mutex_unlock(&ctrl->sched.m_reconf);
+	return 0;
+}
+EXPORT_SYMBOL(slim_config_mgrports);
+
+/*
+ * slim_get_slaveport: Get slave port handle
+ * @la: slave device logical address.
+ * @idx: port index at slave
+ * @rh: return handle
+ * @flw: Flow type (source or destination)
+ * This API only returns a slave port's representation as expected by slimbus
+ * driver. This port is not managed by the slimbus driver. Caller is expected
+ * to have visibility of this port since it's a device-port.
+ */
+int slim_get_slaveport(u8 la, int idx, u32 *rh, enum slim_port_flow flw)
+{
+	if (rh == NULL)
+		return -EINVAL;
+	*rh = SLIM_PORT_HDL(la, flw, idx);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(slim_get_slaveport);
+
+static int connect_port_ch(struct slim_controller *ctrl, u8 ch, u32 ph,
+				enum slim_port_flow flow)
+{
+	int ret;
+	u8 buf[2];
+	u32 la = SLIM_HDL_TO_LA(ph);
+	u8 pn = (u8)SLIM_HDL_TO_PORT(ph);
+	DEFINE_SLIM_LDEST_TXN(txn, 0, 2, 6, NULL, buf, la);
+
+	if (flow == SLIM_SRC)
+		txn.mc = SLIM_MSG_MC_CONNECT_SOURCE;
+	else
+		txn.mc = SLIM_MSG_MC_CONNECT_SINK;
+	buf[0] = pn;
+	buf[1] = ctrl->chans[ch].chan;
+	if (la == SLIM_LA_MANAGER)
+		ctrl->ports[pn].flow = flow;
+	ret = slim_processtxn(ctrl, &txn, false);
+	if (!ret && la == SLIM_LA_MANAGER)
+		ctrl->ports[pn].state = SLIM_P_CFG;
+	return ret;
+}
+
+static int disconnect_port_ch(struct slim_controller *ctrl, u32 ph)
+{
+	int ret;
+	u32 la = SLIM_HDL_TO_LA(ph);
+	u8 pn = (u8)SLIM_HDL_TO_PORT(ph);
+	DEFINE_SLIM_LDEST_TXN(txn, 0, 1, 5, NULL, &pn, la);
+
+	txn.mc = SLIM_MSG_MC_DISCONNECT_PORT;
+	ret = slim_processtxn(ctrl, &txn, false);
+	if (ret)
+		return ret;
+	if (la == SLIM_LA_MANAGER) {
+		ctrl->ports[pn].state = SLIM_P_UNCFG;
+		ctrl->ports[pn].cfg.watermark = 0;
+		ctrl->ports[pn].cfg.port_opts = 0;
+		ctrl->ports[pn].ch = NULL;
+	}
+	return 0;
+}
+
+/*
+ * slim_connect_src: Connect source port to channel.
+ * @sb: client handle
+ * @srch: source handle to be connected to this channel
+ * @chanh: Channel with which the ports need to be associated with.
+ * Per slimbus specification, a channel may have 1 source port.
+ * Channel specified in chanh needs to be allocated first.
+ * Returns -EALREADY if source is already configured for this channel.
+ * Returns -ENOTCONN if channel is not allocated
+ * Returns -EINVAL if invalid direction is specified for non-manager port,
+ * or if the manager side port number is out of bounds, or in incorrect state
+ */
+int slim_connect_src(struct slim_device *sb, u32 srch, u16 chanh)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	int ret;
+	u8 chan = SLIM_HDL_TO_CHIDX(chanh);
+	struct slim_ich *slc = &ctrl->chans[chan];
+	enum slim_port_flow flow = SLIM_HDL_TO_FLOW(srch);
+	u8 la = SLIM_HDL_TO_LA(srch);
+	u8 pn = SLIM_HDL_TO_PORT(srch);
+
+	/* manager ports don't have direction when they are allocated */
+	if (la != SLIM_LA_MANAGER && flow != SLIM_SRC)
+		return -EINVAL;
+
+	mutex_lock(&ctrl->sched.m_reconf);
+
+	if (la == SLIM_LA_MANAGER) {
+		if (pn >= ctrl->nports ||
+			ctrl->ports[pn].state != SLIM_P_UNCFG) {
+			ret = -EINVAL;
+			goto connect_src_err;
+		}
+	}
+
+	if (slc->state == SLIM_CH_FREE) {
+		ret = -ENOTCONN;
+		goto connect_src_err;
+	}
+	/*
+	 * Once channel is removed, its ports can be considered disconnected
+	 * So its ports can be reassigned. Source port is zeroed
+	 * when channel is deallocated.
+	 */
+	if (slc->srch) {
+		ret = -EALREADY;
+		goto connect_src_err;
+	}
+	ctrl->ports[pn].ch = &slc->prop;
+	ret = connect_port_ch(ctrl, chan, srch, SLIM_SRC);
+
+	if (!ret)
+		slc->srch = srch;
+
+connect_src_err:
+	mutex_unlock(&ctrl->sched.m_reconf);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(slim_connect_src);
+
+/*
+ * slim_connect_sink: Connect sink port(s) to channel.
+ * @sb: client handle
+ * @sinkh: sink handle(s) to be connected to this channel
+ * @nsink: number of sinks
+ * @chanh: Channel with which the ports need to be associated with.
+ * Per slimbus specification, a channel may have multiple sink-ports.
+ * Channel specified in chanh needs to be allocated first.
+ * Returns -EALREADY if sink is already configured for this channel.
+ * Returns -ENOTCONN if channel is not allocated
+ * Returns -EINVAL if invalid parameters are passed, or invalid direction is
+ * specified for non-manager port, or if the manager side port number is out of
+ * bounds, or in incorrect state
+ */
+int slim_connect_sink(struct slim_device *sb, u32 *sinkh, int nsink, u16 chanh)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	int j;
+	int ret = 0;
+	u8 chan = SLIM_HDL_TO_CHIDX(chanh);
+	struct slim_ich *slc = &ctrl->chans[chan];
+
+	if (!sinkh || !nsink)
+		return -EINVAL;
+
+	mutex_lock(&ctrl->sched.m_reconf);
+
+	/*
+	 * Once channel is removed, its ports can be considered disconnected
+	 * So its ports can be reassigned. Sink ports are freed when channel
+	 * is deallocated.
+	 */
+	if (slc->state == SLIM_CH_FREE) {
+		ret = -ENOTCONN;
+		goto connect_sink_err;
+	}
+
+	for (j = 0; j < nsink; j++) {
+		enum slim_port_flow flow = SLIM_HDL_TO_FLOW(sinkh[j]);
+		u8 la = SLIM_HDL_TO_LA(sinkh[j]);
+		u8 pn = SLIM_HDL_TO_PORT(sinkh[j]);
+		if (la != SLIM_LA_MANAGER && flow != SLIM_SINK)
+			ret = -EINVAL;
+		else if (la == SLIM_LA_MANAGER &&
+				(pn >= ctrl->nports ||
+				ctrl->ports[pn].state != SLIM_P_UNCFG))
+				ret = -EINVAL;
+		else {
+			ctrl->ports[pn].ch = &slc->prop;
+			ret = connect_port_ch(ctrl, chan, sinkh[j], SLIM_SINK);
+		}
+		if (ret) {
+			for (j = j - 1; j >= 0; j--)
+				disconnect_port_ch(ctrl, sinkh[j]);
+			goto connect_sink_err;
+		}
+	}
+
+	slc->sinkh = krealloc(slc->sinkh, (sizeof(u32) * (slc->nsink + nsink)),
+				GFP_KERNEL);
+	if (!slc->sinkh) {
+		ret = -ENOMEM;
+		for (j = 0; j < nsink; j++)
+			disconnect_port_ch(ctrl, sinkh[j]);
+		goto connect_sink_err;
+	}
+
+	memcpy(slc->sinkh + slc->nsink, sinkh, (sizeof(u32) * nsink));
+	slc->nsink += nsink;
+
+connect_sink_err:
+	mutex_unlock(&ctrl->sched.m_reconf);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(slim_connect_sink);
+
+/*
+ * slim_disconnect_ports: Disconnect port(s) from channel
+ * @sb: client handle
+ * @ph: ports to be disconnected
+ * @nph: number of ports.
+ * Disconnects ports from a channel.
+ */
+int slim_disconnect_ports(struct slim_device *sb, u32 *ph, int nph)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	int i;
+
+	mutex_lock(&ctrl->sched.m_reconf);
+
+	for (i = 0; i < nph; i++)
+		disconnect_port_ch(ctrl, ph[i]);
+	mutex_unlock(&ctrl->sched.m_reconf);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(slim_disconnect_ports);
+
+/*
+ * slim_port_xfer: Schedule buffer to be transferred/received using port-handle.
+ * @sb: client handle
+ * @ph: port-handle
+ * @iobuf: buffer to be transferred or populated
+ * @len: buffer size.
+ * @comp: completion signal to indicate transfer done or error.
+ * context: can sleep
+ * Returns number of bytes transferred/received if used synchronously.
+ * Will return 0 if used asynchronously.
+ * Client will call slim_port_get_xfer_status to get error and/or number of
+ * bytes transferred if used asynchronously.
+ */
+int slim_port_xfer(struct slim_device *sb, u32 ph, phys_addr_t iobuf, u32 len,
+				struct completion *comp)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	u8 pn = SLIM_HDL_TO_PORT(ph);
+	dev_dbg(&ctrl->dev, "port xfer: num:%d", pn);
+	return ctrl->port_xfer(ctrl, pn, iobuf, len, comp);
+}
+EXPORT_SYMBOL_GPL(slim_port_xfer);
+
+/*
+ * slim_port_get_xfer_status: Poll for port transfers, or get transfer status
+ *	after completion is done.
+ * @sb: client handle
+ * @ph: port-handle
+ * @done_buf: return pointer (iobuf from slim_port_xfer) which is processed.
+ * @done_len: Number of bytes transferred.
+ * This can be called when port_xfer complition is signalled.
+ * The API will return port transfer error (underflow/overflow/disconnect)
+ * and/or done_len will reflect number of bytes transferred. Note that
+ * done_len may be valid even if port error (overflow/underflow) has happened.
+ * e.g. If the transfer was scheduled with a few bytes to be transferred and
+ * client has not supplied more data to be transferred, done_len will indicate
+ * number of bytes transferred with underflow error. To avoid frequent underflow
+ * errors, multiple transfers can be queued (e.g. ping-pong buffers) so that
+ * channel has data to be transferred even if client is not ready to transfer
+ * data all the time. done_buf will indicate address of the last buffer
+ * processed from the multiple transfers.
+ */
+enum slim_port_err slim_port_get_xfer_status(struct slim_device *sb, u32 ph,
+			phys_addr_t *done_buf, u32 *done_len)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	u8 pn = SLIM_HDL_TO_PORT(ph);
+	u32 la = SLIM_HDL_TO_LA(ph);
+	enum slim_port_err err;
+	dev_dbg(&ctrl->dev, "get status port num:%d", pn);
+	/*
+	 * Framework only has insight into ports managed by ported device
+	 * used by the manager and not slave
+	 */
+	if (la != SLIM_LA_MANAGER) {
+		if (done_buf)
+			*done_buf = 0;
+		if (done_len)
+			*done_len = 0;
+		return SLIM_P_NOT_OWNED;
+	}
+	err = ctrl->port_xfer_status(ctrl, pn, done_buf, done_len);
+	if (err == SLIM_P_INPROGRESS)
+		err = ctrl->ports[pn].err;
+	return err;
+}
+EXPORT_SYMBOL_GPL(slim_port_get_xfer_status);
+
+static void slim_add_ch(struct slim_controller *ctrl, struct slim_ich *slc)
+{
+	struct slim_ich **arr;
+	int i, j;
+	int *len;
+	int sl = slc->seglen << slc->rootexp;
+	/* Channel is already active and other end is transmitting data */
+	if (slc->state >= SLIM_CH_ACTIVE)
+		return;
+	if (slc->coeff == SLIM_COEFF_1) {
+		arr = ctrl->sched.chc1;
+		len = &ctrl->sched.num_cc1;
+	} else {
+		arr = ctrl->sched.chc3;
+		len = &ctrl->sched.num_cc3;
+		sl *= 3;
+	}
+
+	*len += 1;
+
+	/* Insert the channel based on rootexp and seglen */
+	for (i = 0; i < *len - 1; i++) {
+		/*
+		 * Primary key: exp low to high.
+		 * Secondary key: seglen: high to low
+		 */
+		if ((slc->rootexp > arr[i]->rootexp) ||
+			((slc->rootexp == arr[i]->rootexp) &&
+			(slc->seglen < arr[i]->seglen)))
+			continue;
+		else
+			break;
+	}
+	for (j = *len - 1; j > i; j--)
+		arr[j] = arr[j - 1];
+	arr[i] = slc;
+	if (!ctrl->allocbw)
+		ctrl->sched.usedslots += sl;
+
+	return;
+}
+
+static int slim_remove_ch(struct slim_controller *ctrl, struct slim_ich *slc)
+{
+	struct slim_ich **arr;
+	int i;
+	u32 la, ph;
+	int *len;
+	if (slc->coeff == SLIM_COEFF_1) {
+		arr = ctrl->sched.chc1;
+		len = &ctrl->sched.num_cc1;
+	} else {
+		arr = ctrl->sched.chc3;
+		len = &ctrl->sched.num_cc3;
+	}
+
+	for (i = 0; i < *len; i++) {
+		if (arr[i] == slc)
+			break;
+	}
+	if (i >= *len)
+		return -EXFULL;
+	for (; i < *len - 1; i++)
+		arr[i] = arr[i + 1];
+	*len -= 1;
+	arr[*len] = NULL;
+
+	slc->state = SLIM_CH_ALLOCATED;
+	slc->def = 0;
+	slc->newintr = 0;
+	slc->newoff = 0;
+	for (i = 0; i < slc->nsink; i++) {
+		ph = slc->sinkh[i];
+		la = SLIM_HDL_TO_LA(ph);
+		/*
+		 * For ports managed by manager's ported device, no need to send
+		 * disconnect. It is client's responsibility to call disconnect
+		 * on ports owned by the slave device
+		 */
+		if (la == SLIM_LA_MANAGER) {
+			ctrl->ports[SLIM_HDL_TO_PORT(ph)].state = SLIM_P_UNCFG;
+			ctrl->ports[SLIM_HDL_TO_PORT(ph)].ch = NULL;
+		}
+	}
+
+	ph = slc->srch;
+	la = SLIM_HDL_TO_LA(ph);
+	if (la == SLIM_LA_MANAGER) {
+		u8 pn = SLIM_HDL_TO_PORT(ph);
+
+		ctrl->ports[pn].state = SLIM_P_UNCFG;
+		ctrl->ports[pn].cfg.watermark = 0;
+		ctrl->ports[pn].cfg.port_opts = 0;
+	}
+
+	kfree(slc->sinkh);
+	slc->sinkh = NULL;
+	slc->srch = 0;
+	slc->nsink = 0;
+	return 0;
+}
+
+static u32 slim_calc_prrate(struct slim_controller *ctrl, struct slim_ch *prop)
+{
+	u32 rate = 0, rate4k = 0, rate11k = 0;
+	u32 exp = 0;
+	u32 pr = 0;
+	bool exact = true;
+	bool done = false;
+	enum slim_ch_rate ratefam;
+
+	if (prop->prot >= SLIM_ASYNC_SMPLX)
+		return 0;
+	if (prop->baser == SLIM_RATE_1HZ) {
+		rate = prop->ratem / 4000;
+		rate4k = rate;
+		if (rate * 4000 == prop->ratem)
+			ratefam = SLIM_RATE_4000HZ;
+		else {
+			rate = prop->ratem / 11025;
+			rate11k = rate;
+			if (rate * 11025 == prop->ratem)
+				ratefam = SLIM_RATE_11025HZ;
+			else
+				ratefam = SLIM_RATE_1HZ;
+		}
+	} else {
+		ratefam = prop->baser;
+		rate = prop->ratem;
+	}
+	if (ratefam == SLIM_RATE_1HZ) {
+		exact = false;
+		if ((rate4k + 1) * 4000 < (rate11k + 1) * 11025) {
+			rate = rate4k + 1;
+			ratefam = SLIM_RATE_4000HZ;
+		} else {
+			rate = rate11k + 1;
+			ratefam = SLIM_RATE_11025HZ;
+		}
+	}
+	/* covert rate to coeff-exp */
+	while (!done) {
+		while ((rate & 0x1) != 0x1) {
+			rate >>= 1;
+			exp++;
+		}
+		if (rate > 3) {
+			/* roundup if not exact */
+			rate++;
+			exact = false;
+		} else
+			done = true;
+	}
+	if (ratefam == SLIM_RATE_4000HZ) {
+		if (rate == 1)
+			pr = 0x10;
+		else {
+			pr = 0;
+			exp++;
+		}
+	} else {
+		pr = 8;
+		exp++;
+	}
+	if (exp <= 7) {
+		pr |= exp;
+		if (exact)
+			pr |= 0x80;
+	} else
+		pr = 0;
+	return pr;
+}
+
+static int slim_nextdefine_ch(struct slim_device *sb, u8 chan)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	u32 chrate = 0;
+	u32 exp = 0;
+	u32 coeff = 0;
+	bool exact = true;
+	bool done = false;
+	int ret = 0;
+	struct slim_ich *slc = &ctrl->chans[chan];
+	struct slim_ch *prop = &slc->prop;
+
+	slc->prrate = slim_calc_prrate(ctrl, prop);
+	dev_dbg(&ctrl->dev, "ch:%d, chan PR rate:%x\n", chan, slc->prrate);
+	if (prop->baser == SLIM_RATE_4000HZ)
+		chrate = 4000 * prop->ratem;
+	else if (prop->baser == SLIM_RATE_11025HZ)
+		chrate = 11025 * prop->ratem;
+	else
+		chrate = prop->ratem;
+	/* max allowed sample freq = 768 seg/frame */
+	if (chrate > 3600000)
+		return -EDQUOT;
+	if (prop->baser == SLIM_RATE_4000HZ &&
+			ctrl->a_framer->superfreq == 4000)
+		coeff = prop->ratem;
+	else if (prop->baser == SLIM_RATE_11025HZ &&
+			ctrl->a_framer->superfreq == 3675)
+		coeff = 3 * prop->ratem;
+	else {
+		u32 tempr = 0;
+		tempr = chrate * SLIM_CL_PER_SUPERFRAME_DIV8;
+		coeff = tempr / ctrl->a_framer->rootfreq;
+		if (coeff * ctrl->a_framer->rootfreq != tempr) {
+			coeff++;
+			exact = false;
+		}
+	}
+
+	/* convert coeff to coeff-exponent */
+	exp = 0;
+	while (!done) {
+		while ((coeff & 0x1) != 0x1) {
+			coeff >>= 1;
+			exp++;
+		}
+		if (coeff > 3) {
+			coeff++;
+			exact = false;
+		} else
+			done = true;
+	}
+	if (prop->prot == SLIM_HARD_ISO && !exact)
+		return -EPROTONOSUPPORT;
+	else if (prop->prot == SLIM_AUTO_ISO) {
+		if (exact)
+			prop->prot = SLIM_HARD_ISO;
+		else
+			prop->prot = SLIM_PUSH;
+	}
+	slc->rootexp = exp;
+	slc->seglen = prop->sampleszbits/SLIM_CL_PER_SL;
+	if (prop->prot != SLIM_HARD_ISO)
+		slc->seglen++;
+	if (prop->prot >= SLIM_EXT_SMPLX)
+		slc->seglen++;
+	/* convert coeff to enum */
+	if (coeff == 1) {
+		if (exp > 9)
+			ret = -EIO;
+		coeff = SLIM_COEFF_1;
+	} else {
+		if (exp > 8)
+			ret = -EIO;
+		coeff = SLIM_COEFF_3;
+	}
+	slc->coeff = coeff;
+
+	return ret;
+}
+
+/*
+ * slim_alloc_ch: Allocate a slimbus channel and return its handle.
+ * @sb: client handle.
+ * @chanh: return channel handle
+ * Slimbus channels are limited to 256 per specification.
+ * -EXFULL is returned if all channels are in use.
+ * Although slimbus specification supports 256 channels, a controller may not
+ * support that many channels.
+ */
+int slim_alloc_ch(struct slim_device *sb, u16 *chanh)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	u16 i;
+
+	if (!ctrl)
+		return -EINVAL;
+	mutex_lock(&ctrl->sched.m_reconf);
+	for (i = 0; i < ctrl->nchans; i++) {
+		if (ctrl->chans[i].state == SLIM_CH_FREE)
+			break;
+	}
+	if (i >= ctrl->nchans) {
+		mutex_unlock(&ctrl->sched.m_reconf);
+		return -EXFULL;
+	}
+	*chanh = i;
+	ctrl->chans[i].nextgrp = 0;
+	ctrl->chans[i].state = SLIM_CH_ALLOCATED;
+	ctrl->chans[i].chan = (u8)(ctrl->reserved + i);
+
+	mutex_unlock(&ctrl->sched.m_reconf);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(slim_alloc_ch);
+
+/*
+ * slim_query_ch: Get reference-counted handle for a channel number. Every
+ * channel is reference counted by upto one as producer and the others as
+ * consumer)
+ * @sb: client handle
+ * @chan: slimbus channel number
+ * @chanh: return channel handle
+ * If request channel number is not in use, it is allocated, and reference
+ * count is set to one. If the channel was was already allocated, this API
+ * will return handle to that channel and reference count is incremented.
+ * -EXFULL is returned if all channels are in use
+ */
+int slim_query_ch(struct slim_device *sb, u8 ch, u16 *chanh)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	u16 i, j;
+	int ret = 0;
+	if (!ctrl || !chanh)
+		return -EINVAL;
+	mutex_lock(&ctrl->sched.m_reconf);
+	/* start with modulo number */
+	i = ch % ctrl->nchans;
+
+	for (j = 0; j < ctrl->nchans; j++) {
+		if (ctrl->chans[i].chan == ch) {
+			*chanh = i;
+			ctrl->chans[i].ref++;
+			if (ctrl->chans[i].state == SLIM_CH_FREE)
+				ctrl->chans[i].state = SLIM_CH_ALLOCATED;
+			goto query_out;
+		}
+		i = (i + 1) % ctrl->nchans;
+	}
+
+	/* Channel not in table yet */
+	ret = -EXFULL;
+	for (j = 0; j < ctrl->nchans; j++) {
+		if (ctrl->chans[i].state == SLIM_CH_FREE) {
+			ctrl->chans[i].state =
+				SLIM_CH_ALLOCATED;
+			*chanh = i;
+			ctrl->chans[i].ref++;
+			ctrl->chans[i].chan = ch;
+			ctrl->chans[i].nextgrp = 0;
+			ret = 0;
+			break;
+		}
+		i = (i + 1) % ctrl->nchans;
+	}
+query_out:
+	mutex_unlock(&ctrl->sched.m_reconf);
+	dev_dbg(&ctrl->dev, "query ch:%d,hdl:%d,ref:%d,ret:%d",
+				ch, i, ctrl->chans[i].ref, ret);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(slim_query_ch);
+
+/*
+ * slim_dealloc_ch: Deallocate channel allocated using the API above
+ * -EISCONN is returned if the channel is tried to be deallocated without
+ *  being removed first.
+ *  -ENOTCONN is returned if deallocation is tried on a channel that's not
+ *  allocated.
+ */
+int slim_dealloc_ch(struct slim_device *sb, u16 chanh)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	u8 chan = SLIM_HDL_TO_CHIDX(chanh);
+	struct slim_ich *slc = &ctrl->chans[chan];
+	if (!ctrl)
+		return -EINVAL;
+
+	mutex_lock(&ctrl->sched.m_reconf);
+	if (slc->state == SLIM_CH_FREE) {
+		mutex_unlock(&ctrl->sched.m_reconf);
+		return -ENOTCONN;
+	}
+	if (slc->ref > 1) {
+		slc->ref--;
+		mutex_unlock(&ctrl->sched.m_reconf);
+		dev_dbg(&ctrl->dev, "remove chan:%d,hdl:%d,ref:%d",
+					slc->chan, chanh, slc->ref);
+		return 0;
+	}
+	if (slc->state >= SLIM_CH_PENDING_ACTIVE) {
+		dev_err(&ctrl->dev, "Channel:%d should be removed first", chan);
+		mutex_unlock(&ctrl->sched.m_reconf);
+		return -EISCONN;
+	}
+	slc->ref--;
+	slc->state = SLIM_CH_FREE;
+	mutex_unlock(&ctrl->sched.m_reconf);
+	dev_dbg(&ctrl->dev, "remove chan:%d,hdl:%d,ref:%d",
+				slc->chan, chanh, slc->ref);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(slim_dealloc_ch);
+
+/*
+ * slim_get_ch_state: Channel state.
+ * This API returns the channel's state (active, suspended, inactive etc)
+ */
+enum slim_ch_state slim_get_ch_state(struct slim_device *sb, u16 chanh)
+{
+	u8 chan = SLIM_HDL_TO_CHIDX(chanh);
+	struct slim_ich *slc = &sb->ctrl->chans[chan];
+	return slc->state;
+}
+EXPORT_SYMBOL_GPL(slim_get_ch_state);
+
+/*
+ * slim_define_ch: Define a channel.This API defines channel parameters for a
+ *	given channel.
+ * @sb: client handle.
+ * @prop: slim_ch structure with channel parameters desired to be used.
+ * @chanh: list of channels to be defined.
+ * @nchan: number of channels in a group (1 if grp is false)
+ * @grp: Are the channels grouped
+ * @grph: return group handle if grouping of channels is desired.
+ * Channels can be grouped if multiple channels use same parameters
+ * (e.g. 5.1 audio has 6 channels with same parameters. They will all be grouped
+ * and given 1 handle for simplicity and avoid repeatedly calling the API)
+ * -EISCONN is returned if channel is already used with different parameters.
+ * -ENXIO is returned if the channel is not yet allocated.
+ */
+int slim_define_ch(struct slim_device *sb, struct slim_ch *prop, u16 *chanh,
+			u8 nchan, bool grp, u16 *grph)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	int i, ret = 0;
+
+	if (!ctrl || !chanh || !prop || !nchan)
+		return -EINVAL;
+	mutex_lock(&ctrl->sched.m_reconf);
+	for (i = 0; i < nchan; i++) {
+		u8 chan = SLIM_HDL_TO_CHIDX(chanh[i]);
+		struct slim_ich *slc = &ctrl->chans[chan];
+		dev_dbg(&ctrl->dev, "define_ch: ch:%d, state:%d", chan,
+				(int)ctrl->chans[chan].state);
+		if (slc->state < SLIM_CH_ALLOCATED) {
+			ret = -ENXIO;
+			goto err_define_ch;
+		}
+		if (slc->state >= SLIM_CH_DEFINED && slc->ref >= 2) {
+			if (prop->ratem != slc->prop.ratem ||
+			prop->sampleszbits != slc->prop.sampleszbits ||
+			prop->baser != slc->prop.baser) {
+				ret = -EISCONN;
+				goto err_define_ch;
+			}
+		} else if (slc->state > SLIM_CH_DEFINED) {
+			ret = -EISCONN;
+			goto err_define_ch;
+		} else {
+			ctrl->chans[chan].prop = *prop;
+			ret = slim_nextdefine_ch(sb, chan);
+			if (ret)
+				goto err_define_ch;
+		}
+		if (i < (nchan - 1))
+			ctrl->chans[chan].nextgrp = chanh[i + 1];
+		if (i == 0)
+			ctrl->chans[chan].nextgrp |= SLIM_START_GRP;
+		if (i == (nchan - 1))
+			ctrl->chans[chan].nextgrp |= SLIM_END_GRP;
+	}
+
+	if (grp)
+		*grph = ((nchan << 8) | SLIM_HDL_TO_CHIDX(chanh[0]));
+	for (i = 0; i < nchan; i++) {
+		u8 chan = SLIM_HDL_TO_CHIDX(chanh[i]);
+		struct slim_ich *slc = &ctrl->chans[chan];
+		if (slc->state == SLIM_CH_ALLOCATED)
+			slc->state = SLIM_CH_DEFINED;
+	}
+err_define_ch:
+	dev_dbg(&ctrl->dev, "define_ch: ch:%d, ret:%d", *chanh, ret);
+	mutex_unlock(&ctrl->sched.m_reconf);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(slim_define_ch);
+
+static u32 getsubfrmcoding(u32 *ctrlw, u32 *subfrml, u32 *msgsl)
+{
+	u32 code = 0;
+	if (*ctrlw == *subfrml) {
+		*ctrlw = 8;
+		*subfrml = 8;
+		*msgsl = SLIM_SL_PER_SUPERFRAME - SLIM_FRM_SLOTS_PER_SUPERFRAME
+				- SLIM_GDE_SLOTS_PER_SUPERFRAME;
+		return 0;
+	}
+	if (*subfrml == 6) {
+		code = 0;
+		*msgsl = 256;
+	} else if (*subfrml == 8) {
+		code = 1;
+		*msgsl = 192;
+	} else if (*subfrml == 24) {
+		code = 2;
+		*msgsl = 64;
+	} else { /* 32 */
+		code = 3;
+		*msgsl = 48;
+	}
+
+	if (*ctrlw < 8) {
+		if (*ctrlw >= 6) {
+			*ctrlw = 6;
+			code |= 0x14;
+		} else {
+			if (*ctrlw == 5)
+				*ctrlw = 4;
+			code |= (*ctrlw << 2);
+		}
+	} else {
+		code -= 2;
+		if (*ctrlw >= 24) {
+			*ctrlw = 24;
+			code |= 0x1e;
+		} else if (*ctrlw >= 16) {
+			*ctrlw = 16;
+			code |= 0x1c;
+		} else if (*ctrlw >= 12) {
+			*ctrlw = 12;
+			code |= 0x1a;
+		} else {
+			*ctrlw = 8;
+			code |= 0x18;
+		}
+	}
+
+	*msgsl = (*msgsl * *ctrlw) - SLIM_FRM_SLOTS_PER_SUPERFRAME -
+				SLIM_GDE_SLOTS_PER_SUPERFRAME;
+	return code;
+}
+
+static void shiftsegoffsets(struct slim_controller *ctrl, struct slim_ich **ach,
+				int sz, u32 shft)
+{
+	int i;
+	u32 oldoff;
+	for (i = 0; i < sz; i++) {
+		struct slim_ich *slc;
+		if (ach[i] == NULL)
+			continue;
+		slc = ach[i];
+		if (slc->state == SLIM_CH_PENDING_REMOVAL)
+			continue;
+		oldoff = slc->newoff;
+		slc->newoff += shft;
+		/* seg. offset must be <= interval */
+		if (slc->newoff >= slc->newintr)
+			slc->newoff -= slc->newintr;
+	}
+}
+
+static int slim_sched_chans(struct slim_device *sb, u32 clkgear,
+			u32 *ctrlw, u32 *subfrml)
+{
+	int coeff1, coeff3;
+	enum slim_ch_coeff bias;
+	struct slim_controller *ctrl = sb->ctrl;
+	int last1 = ctrl->sched.num_cc1 - 1;
+	int last3 = ctrl->sched.num_cc3 - 1;
+
+	/*
+	 * Find first channels with coeff 1 & 3 as starting points for
+	 * scheduling
+	 */
+	for (coeff3 = 0; coeff3 < ctrl->sched.num_cc3; coeff3++) {
+		struct slim_ich *slc = ctrl->sched.chc3[coeff3];
+		if (slc->state == SLIM_CH_PENDING_REMOVAL)
+			continue;
+		else
+			break;
+	}
+	for (coeff1 = 0; coeff1 < ctrl->sched.num_cc1; coeff1++) {
+		struct slim_ich *slc = ctrl->sched.chc1[coeff1];
+		if (slc->state == SLIM_CH_PENDING_REMOVAL)
+			continue;
+		else
+			break;
+	}
+	if (coeff3 == ctrl->sched.num_cc3 && coeff1 == ctrl->sched.num_cc1) {
+		*ctrlw = 8;
+		*subfrml = 8;
+		return 0;
+	} else if (coeff3 == ctrl->sched.num_cc3)
+		bias = SLIM_COEFF_1;
+	else
+		bias = SLIM_COEFF_3;
+
+	/*
+	 * Find last chan in coeff1, 3 list, we will use to know when we
+	* have done scheduling all coeff1 channels
+	*/
+	while (last1 >= 0) {
+		if (ctrl->sched.chc1[last1] != NULL &&
+			(ctrl->sched.chc1[last1])->state !=
+			SLIM_CH_PENDING_REMOVAL)
+			break;
+		last1--;
+	}
+	while (last3 >= 0) {
+		if (ctrl->sched.chc3[last3] != NULL &&
+			(ctrl->sched.chc3[last3])->state !=
+			SLIM_CH_PENDING_REMOVAL)
+			break;
+		last3--;
+	}
+
+	if (bias == SLIM_COEFF_1) {
+		struct slim_ich *slc1 = ctrl->sched.chc1[coeff1];
+		u32 expshft = SLIM_MAX_CLK_GEAR - clkgear;
+		int curexp, finalexp;
+		u32 curintr, curmaxsl;
+		int opensl1[2];
+		int maxctrlw1;
+
+		finalexp = (ctrl->sched.chc1[last1])->rootexp;
+		curexp = (int)expshft - 1;
+
+		curintr = (SLIM_MAX_INTR_COEFF_1 * 2) >> (curexp + 1);
+		curmaxsl = curintr >> 1;
+		opensl1[0] = opensl1[1] = curmaxsl;
+
+		while ((coeff1 < ctrl->sched.num_cc1) || (curintr > 24)) {
+			curintr >>= 1;
+			curmaxsl >>= 1;
+
+			/* update 4K family open slot records */
+			if (opensl1[1] < opensl1[0])
+				opensl1[1] -= curmaxsl;
+			else
+				opensl1[1] = opensl1[0] - curmaxsl;
+			opensl1[0] = curmaxsl;
+			if (opensl1[1] < 0) {
+				opensl1[0] += opensl1[1];
+				opensl1[1] = 0;
+			}
+			if (opensl1[0] <= 0) {
+				dev_dbg(&ctrl->dev, "reconfig failed:%d\n",
+						__LINE__);
+				return -EXFULL;
+			}
+			curexp++;
+			/* schedule 4k family channels */
+
+			while ((coeff1 < ctrl->sched.num_cc1) && (curexp ==
+					(int)(slc1->rootexp + expshft))) {
+				if (slc1->state == SLIM_CH_PENDING_REMOVAL) {
+					coeff1++;
+					slc1 = ctrl->sched.chc1[coeff1];
+					continue;
+				}
+				if (opensl1[1] >= opensl1[0] ||
+					(finalexp == (int)slc1->rootexp &&
+					 curintr <= 24 &&
+					 opensl1[0] == curmaxsl)) {
+					opensl1[1] -= slc1->seglen;
+					slc1->newoff = curmaxsl + opensl1[1];
+					if (opensl1[1] < 0 &&
+						opensl1[0] == curmaxsl) {
+						opensl1[0] += opensl1[1];
+						opensl1[1] = 0;
+						if (opensl1[0] < 0) {
+							dev_dbg(&ctrl->dev,
+							"reconfig failed:%d\n",
+							__LINE__);
+							return -EXFULL;
+						}
+					}
+				} else {
+					if (slc1->seglen > opensl1[0]) {
+						dev_dbg(&ctrl->dev,
+						"reconfig failed:%d\n",
+						__LINE__);
+						return -EXFULL;
+					}
+					slc1->newoff = opensl1[0] -
+							slc1->seglen;
+					opensl1[0] = slc1->newoff;
+				}
+				slc1->newintr = curintr;
+				coeff1++;
+				slc1 = ctrl->sched.chc1[coeff1];
+			}
+		}
+		/* Leave some slots for messaging space */
+		if (opensl1[1] <= 0 && opensl1[0] <= 0)
+			return -EXFULL;
+		if (opensl1[1] > opensl1[0]) {
+			int temp = opensl1[0];
+			opensl1[0] = opensl1[1];
+			opensl1[1] = temp;
+			shiftsegoffsets(ctrl, ctrl->sched.chc1,
+					ctrl->sched.num_cc1, curmaxsl);
+		}
+		/* choose subframe mode to maximize bw */
+		maxctrlw1 = opensl1[0];
+		if (opensl1[0] == curmaxsl)
+			maxctrlw1 += opensl1[1];
+		if (curintr >= 24) {
+			*subfrml = 24;
+			*ctrlw = maxctrlw1;
+		} else if (curintr == 12) {
+			if (maxctrlw1 > opensl1[1] * 4) {
+				*subfrml = 24;
+				*ctrlw = maxctrlw1;
+			} else {
+				*subfrml = 6;
+				*ctrlw = opensl1[1];
+			}
+		} else {
+			*subfrml = 6;
+			*ctrlw = maxctrlw1;
+		}
+	} else {
+		struct slim_ich *slc1 = NULL;
+		struct slim_ich *slc3 = ctrl->sched.chc3[coeff3];
+		u32 expshft = SLIM_MAX_CLK_GEAR - clkgear;
+		int curexp, finalexp, exp1;
+		u32 curintr, curmaxsl;
+		int opensl3[2];
+		int opensl1[6];
+		bool opensl1valid = false;
+		int maxctrlw1, maxctrlw3, i;
+		finalexp = (ctrl->sched.chc3[last3])->rootexp;
+		if (last1 >= 0) {
+			slc1 = ctrl->sched.chc1[coeff1];
+			exp1 = (ctrl->sched.chc1[last1])->rootexp;
+			if (exp1 > finalexp)
+				finalexp = exp1;
+		}
+		curexp = (int)expshft - 1;
+
+		curintr = (SLIM_MAX_INTR_COEFF_3 * 2) >> (curexp + 1);
+		curmaxsl = curintr >> 1;
+		opensl3[0] = opensl3[1] = curmaxsl;
+
+		while (coeff1 < ctrl->sched.num_cc1 ||
+			coeff3 < ctrl->sched.num_cc3 ||
+			curintr > 32) {
+			curintr >>= 1;
+			curmaxsl >>= 1;
+
+			/* update 12k family open slot records */
+			if (opensl3[1] < opensl3[0])
+				opensl3[1] -= curmaxsl;
+			else
+				opensl3[1] = opensl3[0] - curmaxsl;
+			opensl3[0] = curmaxsl;
+			if (opensl3[1] < 0) {
+				opensl3[0] += opensl3[1];
+				opensl3[1] = 0;
+			}
+			if (opensl3[0] <= 0) {
+				dev_dbg(&ctrl->dev, "reconfig failed:%d\n",
+						__LINE__);
+				return -EXFULL;
+			}
+			curexp++;
+
+			/* schedule 12k family channels */
+			while (coeff3 < ctrl->sched.num_cc3 &&
+				curexp == (int)slc3->rootexp + expshft) {
+				if (slc3->state == SLIM_CH_PENDING_REMOVAL) {
+					coeff3++;
+					slc3 = ctrl->sched.chc3[coeff3];
+					continue;
+				}
+				opensl1valid = false;
+				if (opensl3[1] >= opensl3[0] ||
+					(finalexp == (int)slc3->rootexp &&
+					 curintr <= 32 &&
+					 opensl3[0] == curmaxsl &&
+					 last1 < 0)) {
+					opensl3[1] -= slc3->seglen;
+					slc3->newoff = curmaxsl + opensl3[1];
+					if (opensl3[1] < 0 &&
+						opensl3[0] == curmaxsl) {
+						opensl3[0] += opensl3[1];
+						opensl3[1] = 0;
+					}
+					if (opensl3[0] < 0) {
+						dev_dbg(&ctrl->dev,
+						"reconfig failed:%d\n",
+						__LINE__);
+						return -EXFULL;
+					}
+				} else {
+					if (slc3->seglen > opensl3[0]) {
+						dev_dbg(&ctrl->dev,
+						"reconfig failed:%d\n",
+						__LINE__);
+						return -EXFULL;
+					}
+					slc3->newoff = opensl3[0] -
+							slc3->seglen;
+					opensl3[0] = slc3->newoff;
+				}
+				slc3->newintr = curintr;
+				coeff3++;
+				slc3 = ctrl->sched.chc3[coeff3];
+			}
+			/* update 4k openslot records */
+			if (opensl1valid == false) {
+				for (i = 0; i < 3; i++) {
+					opensl1[i * 2] = opensl3[0];
+					opensl1[(i * 2) + 1] = opensl3[1];
+				}
+			} else {
+				int opensl1p[6];
+				memcpy(opensl1p, opensl1, sizeof(opensl1));
+				for (i = 0; i < 3; i++) {
+					if (opensl1p[i] < opensl1p[i + 3])
+						opensl1[(i * 2) + 1] =
+							opensl1p[i];
+					else
+						opensl1[(i * 2) + 1] =
+							opensl1p[i + 3];
+				}
+				for (i = 0; i < 3; i++) {
+					opensl1[(i * 2) + 1] -= curmaxsl;
+					opensl1[i * 2] = curmaxsl;
+					if (opensl1[(i * 2) + 1] < 0) {
+						opensl1[i * 2] +=
+							opensl1[(i * 2) + 1];
+						opensl1[(i * 2) + 1] = 0;
+					}
+					if (opensl1[i * 2] < 0) {
+						dev_dbg(&ctrl->dev,
+						"reconfig failed:%d\n",
+						__LINE__);
+						return -EXFULL;
+					}
+				}
+			}
+			/* schedule 4k family channels */
+			while (coeff1 < ctrl->sched.num_cc1 &&
+				curexp == (int)slc1->rootexp + expshft) {
+				/* searchorder effective when opensl valid */
+				static const int srcho[] = { 5, 2, 4, 1, 3, 0 };
+				int maxopensl = 0;
+				int maxi = 0;
+				if (slc1->state == SLIM_CH_PENDING_REMOVAL) {
+					coeff1++;
+					slc1 = ctrl->sched.chc1[coeff1];
+					continue;
+				}
+				opensl1valid = true;
+				for (i = 0; i < 6; i++) {
+					if (opensl1[srcho[i]] > maxopensl) {
+						maxopensl = opensl1[srcho[i]];
+						maxi = srcho[i];
+					}
+				}
+				opensl1[maxi] -= slc1->seglen;
+				slc1->newoff = (curmaxsl * maxi) +
+						opensl1[maxi];
+				if (opensl1[maxi] < 0) {
+					if (((maxi & 1) == 1) &&
+					(opensl1[maxi - 1] == curmaxsl)) {
+						opensl1[maxi - 1] +=
+							opensl1[maxi];
+						if (opensl3[0] >
+							opensl1[maxi - 1])
+							opensl3[0] =
+							opensl1[maxi - 1];
+						opensl3[1] = 0;
+						opensl1[maxi] = 0;
+						if (opensl1[maxi - 1] < 0) {
+							dev_dbg(&ctrl->dev,
+							"reconfig failed:%d\n",
+							__LINE__);
+							return -EXFULL;
+						}
+					} else {
+						dev_dbg(&ctrl->dev,
+						"reconfig failed:%d\n",
+						__LINE__);
+						return -EXFULL;
+					}
+				} else {
+					if (opensl3[maxi & 1] > opensl1[maxi])
+						opensl3[maxi & 1] =
+							opensl1[maxi];
+				}
+				slc1->newintr = curintr * 3;
+				coeff1++;
+				slc1 = ctrl->sched.chc1[coeff1];
+			}
+		}
+		/* Leave some slots for messaging space */
+		if (opensl3[1] <= 0 && opensl3[0] <= 0)
+			return -EXFULL;
+		/* swap 1st and 2nd bucket if 2nd bucket has more open slots */
+		if (opensl3[1] > opensl3[0]) {
+			int temp = opensl3[0];
+			opensl3[0] = opensl3[1];
+			opensl3[1] = temp;
+			temp = opensl1[5];
+			opensl1[5] = opensl1[4];
+			opensl1[4] = opensl1[3];
+			opensl1[3] = opensl1[2];
+			opensl1[2] = opensl1[1];
+			opensl1[1] = opensl1[0];
+			opensl1[0] = temp;
+			shiftsegoffsets(ctrl, ctrl->sched.chc1,
+					ctrl->sched.num_cc1, curmaxsl);
+			shiftsegoffsets(ctrl, ctrl->sched.chc3,
+					ctrl->sched.num_cc3, curmaxsl);
+		}
+		/* subframe mode to maximize BW */
+		maxctrlw3 = opensl3[0];
+		maxctrlw1 = opensl1[0];
+		if (opensl3[0] == curmaxsl)
+			maxctrlw3 += opensl3[1];
+		for (i = 0; i < 5 && opensl1[i] == curmaxsl; i++)
+			maxctrlw1 += opensl1[i + 1];
+		if (curintr >= 32) {
+			*subfrml = 32;
+			*ctrlw = maxctrlw3;
+		} else if (curintr == 16) {
+			if (maxctrlw3 > (opensl3[1] * 4)) {
+				*subfrml = 32;
+				*ctrlw = maxctrlw3;
+			} else {
+				*subfrml = 8;
+				*ctrlw = opensl3[1];
+			}
+		} else {
+			if ((maxctrlw1 * 8) >= (maxctrlw3 * 24)) {
+				*subfrml = 24;
+				*ctrlw = maxctrlw1;
+			} else {
+				*subfrml = 8;
+				*ctrlw = maxctrlw3;
+			}
+		}
+	}
+	return 0;
+}
+
+#ifdef DEBUG
+static int slim_verifychansched(struct slim_controller *ctrl, u32 ctrlw,
+				u32 subfrml, u32 clkgear)
+{
+	int sl, i;
+	int cc1 = 0;
+	int cc3 = 0;
+	struct slim_ich *slc = NULL;
+	if (!ctrl->sched.slots)
+		return 0;
+	memset(ctrl->sched.slots, 0, SLIM_SL_PER_SUPERFRAME);
+	dev_dbg(&ctrl->dev, "Clock gear is:%d\n", clkgear);
+	for (sl = 0; sl < SLIM_SL_PER_SUPERFRAME; sl += subfrml) {
+		for (i = 0; i < ctrlw; i++)
+			ctrl->sched.slots[sl + i] = 33;
+	}
+	while (cc1 < ctrl->sched.num_cc1) {
+		slc = ctrl->sched.chc1[cc1];
+		if (slc == NULL) {
+			dev_err(&ctrl->dev, "SLC1 null in verify: chan%d\n",
+				cc1);
+			return -EIO;
+		}
+		dev_dbg(&ctrl->dev, "chan:%d, offset:%d, intr:%d, seglen:%d\n",
+				(slc - ctrl->chans), slc->newoff,
+				slc->newintr, slc->seglen);
+
+		if (slc->state != SLIM_CH_PENDING_REMOVAL) {
+			for (sl = slc->newoff;
+				sl < SLIM_SL_PER_SUPERFRAME;
+				sl += slc->newintr) {
+				for (i = 0; i < slc->seglen; i++) {
+					if (ctrl->sched.slots[sl + i])
+						return -EXFULL;
+					ctrl->sched.slots[sl + i] = cc1 + 1;
+				}
+			}
+		}
+		cc1++;
+	}
+	while (cc3 < ctrl->sched.num_cc3) {
+		slc = ctrl->sched.chc3[cc3];
+		if (slc == NULL) {
+			dev_err(&ctrl->dev, "SLC3 null in verify: chan%d\n",
+				cc3);
+			return -EIO;
+		}
+		dev_dbg(&ctrl->dev, "chan:%d, offset:%d, intr:%d, seglen:%d\n",
+				(slc - ctrl->chans), slc->newoff,
+				slc->newintr, slc->seglen);
+		if (slc->state != SLIM_CH_PENDING_REMOVAL) {
+			for (sl = slc->newoff;
+				sl < SLIM_SL_PER_SUPERFRAME;
+				sl += slc->newintr) {
+				for (i = 0; i < slc->seglen; i++) {
+					if (ctrl->sched.slots[sl + i])
+						return -EXFULL;
+					ctrl->sched.slots[sl + i] = cc3 + 1;
+				}
+			}
+		}
+		cc3++;
+	}
+
+	return 0;
+}
+#else
+static int slim_verifychansched(struct slim_controller *ctrl, u32 ctrlw,
+				u32 subfrml, u32 clkgear)
+{
+	return 0;
+}
+#endif
+
+static void slim_sort_chan_grp(struct slim_controller *ctrl,
+				struct slim_ich *slc)
+{
+	u8  last = (u8)-1;
+	u8 second = 0;
+
+	for (; last > 0; last--) {
+		struct slim_ich *slc1 = slc;
+		struct slim_ich *slc2;
+		u8 next = SLIM_HDL_TO_CHIDX(slc1->nextgrp);
+		slc2 = &ctrl->chans[next];
+		for (second = 1; second <= last && slc2 &&
+			(slc2->state == SLIM_CH_ACTIVE ||
+			 slc2->state == SLIM_CH_PENDING_ACTIVE); second++) {
+			if (slc1->newoff > slc2->newoff) {
+				u32 temp = slc2->newoff;
+				slc2->newoff = slc1->newoff;
+				slc1->newoff = temp;
+			}
+			if (slc2->nextgrp & SLIM_END_GRP) {
+				last = second;
+				break;
+			}
+			slc1 = slc2;
+			next = SLIM_HDL_TO_CHIDX(slc1->nextgrp);
+			slc2 = &ctrl->chans[next];
+		}
+		if (slc2 == NULL)
+			last = second - 1;
+	}
+}
+
+
+static int slim_allocbw(struct slim_device *sb, int *subfrmc, int *clkgear)
+{
+	u32 msgsl = 0;
+	u32 ctrlw = 0;
+	u32 subfrml = 0;
+	int ret = -EIO;
+	struct slim_controller *ctrl = sb->ctrl;
+	u32 usedsl = ctrl->sched.usedslots + ctrl->sched.pending_msgsl;
+	u32 availsl = SLIM_SL_PER_SUPERFRAME - SLIM_FRM_SLOTS_PER_SUPERFRAME -
+			SLIM_GDE_SLOTS_PER_SUPERFRAME;
+	*clkgear = SLIM_MAX_CLK_GEAR;
+
+	dev_dbg(&ctrl->dev, "used sl:%u, availlable sl:%u\n", usedsl, availsl);
+	dev_dbg(&ctrl->dev, "pending:chan sl:%u, :msg sl:%u, clkgear:%u\n",
+				ctrl->sched.usedslots,
+				ctrl->sched.pending_msgsl, *clkgear);
+	/*
+	 * If number of slots are 0, that means channels are inactive.
+	 * It is very likely that the manager will call clock pause very soon.
+	 * By making sure that bus is in MAX_GEAR, clk pause sequence will take
+	 * minimum amount of time.
+	 */
+	if (ctrl->sched.usedslots != 0) {
+		while ((usedsl * 2 <= availsl) && (*clkgear > ctrl->min_cg)) {
+			*clkgear -= 1;
+			usedsl *= 2;
+		}
+	}
+
+	/*
+	 * Try scheduling data channels at current clock gear, if all channels
+	 * can be scheduled, or reserved BW can't be satisfied, increase clock
+	 * gear and try again
+	 */
+	for (; *clkgear <= ctrl->max_cg; (*clkgear)++) {
+		ret = slim_sched_chans(sb, *clkgear, &ctrlw, &subfrml);
+
+		if (ret == 0) {
+			*subfrmc = getsubfrmcoding(&ctrlw, &subfrml, &msgsl);
+			if ((msgsl >> (ctrl->max_cg - *clkgear) <
+				ctrl->sched.pending_msgsl) &&
+				(*clkgear < ctrl->max_cg))
+				continue;
+			else
+				break;
+		}
+	}
+	if (ret == 0) {
+		int i;
+		/* Sort channel-groups */
+		for (i = 0; i < ctrl->sched.num_cc1; i++) {
+			struct slim_ich *slc = ctrl->sched.chc1[i];
+			if (slc->state == SLIM_CH_PENDING_REMOVAL)
+				continue;
+			if ((slc->nextgrp & SLIM_START_GRP) &&
+				!(slc->nextgrp & SLIM_END_GRP)) {
+				slim_sort_chan_grp(ctrl, slc);
+			}
+		}
+		for (i = 0; i < ctrl->sched.num_cc3; i++) {
+			struct slim_ich *slc = ctrl->sched.chc3[i];
+			if (slc->state == SLIM_CH_PENDING_REMOVAL)
+				continue;
+			if ((slc->nextgrp & SLIM_START_GRP) &&
+				!(slc->nextgrp & SLIM_END_GRP)) {
+				slim_sort_chan_grp(ctrl, slc);
+			}
+		}
+
+		ret = slim_verifychansched(ctrl, ctrlw, subfrml, *clkgear);
+	}
+
+	return ret;
+}
+
+static void slim_change_existing_chans(struct slim_controller *ctrl, int coeff)
+{
+	struct slim_ich **arr;
+	int len, i;
+	if (coeff == SLIM_COEFF_1) {
+		arr = ctrl->sched.chc1;
+		len = ctrl->sched.num_cc1;
+	} else {
+		arr = ctrl->sched.chc3;
+		len = ctrl->sched.num_cc3;
+	}
+	for (i = 0; i < len; i++) {
+		struct slim_ich *slc = arr[i];
+		if (slc->state == SLIM_CH_ACTIVE ||
+			slc->state == SLIM_CH_SUSPENDED) {
+			slc->offset = slc->newoff;
+			slc->interval = slc->newintr;
+		}
+	}
+}
+static void slim_chan_changes(struct slim_device *sb, bool revert)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	while (!list_empty(&sb->mark_define)) {
+		struct slim_ich *slc;
+		struct slim_pending_ch *pch =
+				list_entry(sb->mark_define.next,
+					struct slim_pending_ch, pending);
+		slc = &ctrl->chans[pch->chan];
+		if (revert) {
+			if (slc->state == SLIM_CH_PENDING_ACTIVE) {
+				u32 sl = slc->seglen << slc->rootexp;
+				if (slc->coeff == SLIM_COEFF_3)
+					sl *= 3;
+				if (!ctrl->allocbw)
+					ctrl->sched.usedslots -= sl;
+				slim_remove_ch(ctrl, slc);
+				slc->state = SLIM_CH_DEFINED;
+			}
+		} else {
+			slc->state = SLIM_CH_ACTIVE;
+			slc->def++;
+		}
+		list_del_init(&pch->pending);
+		kfree(pch);
+	}
+
+	while (!list_empty(&sb->mark_removal)) {
+		struct slim_pending_ch *pch =
+				list_entry(sb->mark_removal.next,
+					struct slim_pending_ch, pending);
+		struct slim_ich *slc = &ctrl->chans[pch->chan];
+		u32 sl = slc->seglen << slc->rootexp;
+		if (revert || slc->def > 0) {
+			if (slc->coeff == SLIM_COEFF_3)
+				sl *= 3;
+			if (!ctrl->allocbw)
+				ctrl->sched.usedslots += sl;
+			if (revert)
+				slc->def++;
+			slc->state = SLIM_CH_ACTIVE;
+		} else
+			slim_remove_ch(ctrl, slc);
+		list_del_init(&pch->pending);
+		kfree(pch);
+	}
+
+	while (!list_empty(&sb->mark_suspend)) {
+		struct slim_pending_ch *pch =
+				list_entry(sb->mark_suspend.next,
+					struct slim_pending_ch, pending);
+		struct slim_ich *slc = &ctrl->chans[pch->chan];
+		if (revert)
+			slc->state = SLIM_CH_ACTIVE;
+		list_del_init(&pch->pending);
+		kfree(pch);
+	}
+	/* Change already active channel if reconfig succeeded */
+	if (!revert) {
+		slim_change_existing_chans(ctrl, SLIM_COEFF_1);
+		slim_change_existing_chans(ctrl, SLIM_COEFF_3);
+	}
+}
+
+/*
+ * slim_reconfigure_now: Request reconfiguration now.
+ * @sb: client handle
+ * This API does what commit flag in other scheduling APIs do.
+ * -EXFULL is returned if there is no space in TDM to reserve the
+ * bandwidth. -EBUSY is returned if reconfiguration request is already in
+ * progress.
+ */
+int slim_reconfigure_now(struct slim_device *sb)
+{
+	u8 i;
+	u8 wbuf[4];
+	u32 clkgear, subframe;
+	u32 curexp;
+	int ret;
+	struct slim_controller *ctrl = sb->ctrl;
+	u32 expshft;
+	u32 segdist;
+	struct slim_pending_ch *pch;
+	DEFINE_SLIM_BCAST_TXN(txn, SLIM_MSG_MC_BEGIN_RECONFIGURATION, 0, 3,
+				NULL, NULL, sb->laddr);
+
+	mutex_lock(&ctrl->sched.m_reconf);
+	/*
+	 * If there are no pending changes from this client, avoid sending
+	 * the reconfiguration sequence
+	 */
+	if (sb->pending_msgsl == sb->cur_msgsl &&
+		list_empty(&sb->mark_define) &&
+		list_empty(&sb->mark_suspend)) {
+		struct list_head *pos, *next;
+		list_for_each_safe(pos, next, &sb->mark_removal) {
+			struct slim_ich *slc;
+			pch = list_entry(pos, struct slim_pending_ch, pending);
+			slc = &ctrl->chans[pch->chan];
+			if (slc->def > 0)
+				slc->def--;
+			/* Disconnect source port to free it up */
+			if (SLIM_HDL_TO_LA(slc->srch) == sb->laddr)
+				slc->srch = 0;
+			/*
+			 * If controller overrides BW allocation,
+			 * delete this in remove channel itself
+			 */
+			if (slc->def != 0 && !ctrl->allocbw) {
+				list_del(&pch->pending);
+				kfree(pch);
+			}
+		}
+		if (list_empty(&sb->mark_removal)) {
+			mutex_unlock(&ctrl->sched.m_reconf);
+			pr_info("SLIM_CL: skip reconfig sequence");
+			return 0;
+		}
+	}
+
+	ctrl->sched.pending_msgsl += sb->pending_msgsl - sb->cur_msgsl;
+	list_for_each_entry(pch, &sb->mark_define, pending) {
+		struct slim_ich *slc = &ctrl->chans[pch->chan];
+		slim_add_ch(ctrl, slc);
+		if (slc->state < SLIM_CH_ACTIVE)
+			slc->state = SLIM_CH_PENDING_ACTIVE;
+	}
+
+	list_for_each_entry(pch, &sb->mark_removal, pending) {
+		struct slim_ich *slc = &ctrl->chans[pch->chan];
+		u32 sl = slc->seglen << slc->rootexp;
+		if (slc->coeff == SLIM_COEFF_3)
+			sl *= 3;
+		if (!ctrl->allocbw)
+			ctrl->sched.usedslots -= sl;
+		slc->state = SLIM_CH_PENDING_REMOVAL;
+	}
+	list_for_each_entry(pch, &sb->mark_suspend, pending) {
+		struct slim_ich *slc = &ctrl->chans[pch->chan];
+		slc->state = SLIM_CH_SUSPENDED;
+	}
+
+	/*
+	 * Controller can override default channel scheduling algorithm.
+	 * (e.g. if controller needs to use fixed channel scheduling based
+	 * on number of channels)
+	 */
+	if (ctrl->allocbw)
+		ret = ctrl->allocbw(sb, &subframe, &clkgear);
+	else
+		ret = slim_allocbw(sb, &subframe, &clkgear);
+
+	if (!ret) {
+		ret = slim_processtxn(ctrl, &txn, false);
+		dev_dbg(&ctrl->dev, "sending begin_reconfig:ret:%d\n", ret);
+	}
+
+	if (!ret && subframe != ctrl->sched.subfrmcode) {
+		wbuf[0] = (u8)(subframe & 0xFF);
+		txn.mc = SLIM_MSG_MC_NEXT_SUBFRAME_MODE;
+		txn.len = 1;
+		txn.rl = 4;
+		txn.wbuf = wbuf;
+		ret = slim_processtxn(ctrl, &txn, false);
+		dev_dbg(&ctrl->dev, "sending subframe:%d,ret:%d\n",
+				(int)wbuf[0], ret);
+	}
+	if (!ret && clkgear != ctrl->clkgear) {
+		wbuf[0] = (u8)(clkgear & 0xFF);
+		txn.mc = SLIM_MSG_MC_NEXT_CLOCK_GEAR;
+		txn.len = 1;
+		txn.rl = 4;
+		txn.wbuf = wbuf;
+		ret = slim_processtxn(ctrl, &txn, false);
+		dev_dbg(&ctrl->dev, "sending clkgear:%d,ret:%d\n",
+				(int)wbuf[0], ret);
+	}
+	if (ret)
+		goto revert_reconfig;
+
+	expshft = SLIM_MAX_CLK_GEAR - clkgear;
+	/* activate/remove channel */
+	list_for_each_entry(pch, &sb->mark_define, pending) {
+		struct slim_ich *slc = &ctrl->chans[pch->chan];
+		/* Define content */
+		wbuf[0] = slc->chan;
+		wbuf[1] = slc->prrate;
+		wbuf[2] = slc->prop.dataf | (slc->prop.auxf << 4);
+		wbuf[3] = slc->prop.sampleszbits / SLIM_CL_PER_SL;
+		txn.mc = SLIM_MSG_MC_NEXT_DEFINE_CONTENT;
+		txn.len = 4;
+		txn.rl = 7;
+		txn.wbuf = wbuf;
+		dev_dbg(&ctrl->dev, "define content, activate:%x, %x, %x, %x\n",
+				wbuf[0], wbuf[1], wbuf[2], wbuf[3]);
+		/* Right now, channel link bit is not supported */
+		ret = slim_processtxn(ctrl, &txn, false);
+		if (ret)
+			goto revert_reconfig;
+
+		txn.mc = SLIM_MSG_MC_NEXT_ACTIVATE_CHANNEL;
+		txn.len = 1;
+		txn.rl = 4;
+		ret = slim_processtxn(ctrl, &txn, false);
+		if (ret)
+			goto revert_reconfig;
+	}
+
+	list_for_each_entry(pch, &sb->mark_removal, pending) {
+		struct slim_ich *slc = &ctrl->chans[pch->chan];
+		dev_dbg(&ctrl->dev, "remove chan:%x\n", pch->chan);
+		wbuf[0] = slc->chan;
+		txn.mc = SLIM_MSG_MC_NEXT_REMOVE_CHANNEL;
+		txn.len = 1;
+		txn.rl = 4;
+		txn.wbuf = wbuf;
+		ret = slim_processtxn(ctrl, &txn, false);
+		if (ret)
+			goto revert_reconfig;
+	}
+	list_for_each_entry(pch, &sb->mark_suspend, pending) {
+		struct slim_ich *slc = &ctrl->chans[pch->chan];
+		dev_dbg(&ctrl->dev, "suspend chan:%x\n", pch->chan);
+		wbuf[0] = slc->chan;
+		txn.mc = SLIM_MSG_MC_NEXT_DEACTIVATE_CHANNEL;
+		txn.len = 1;
+		txn.rl = 4;
+		txn.wbuf = wbuf;
+		ret = slim_processtxn(ctrl, &txn, false);
+		if (ret)
+			goto revert_reconfig;
+	}
+
+	/* Define CC1 channel */
+	for (i = 0; i < ctrl->sched.num_cc1; i++) {
+		struct slim_ich *slc = ctrl->sched.chc1[i];
+		if (slc->state == SLIM_CH_PENDING_REMOVAL)
+			continue;
+		curexp = slc->rootexp + expshft;
+		segdist = (slc->newoff << curexp) & 0x1FF;
+		expshft = SLIM_MAX_CLK_GEAR - clkgear;
+		dev_dbg(&ctrl->dev, "new-intr:%d, old-intr:%d, dist:%d\n",
+				slc->newintr, slc->interval, segdist);
+		dev_dbg(&ctrl->dev, "new-off:%d, old-off:%d\n",
+				slc->newoff, slc->offset);
+
+		if (slc->state < SLIM_CH_ACTIVE || slc->def < slc->ref ||
+			slc->newintr != slc->interval ||
+			slc->newoff != slc->offset) {
+			segdist |= 0x200;
+			segdist >>= curexp;
+			segdist |= (slc->newoff << (curexp + 1)) & 0xC00;
+			wbuf[0] = slc->chan;
+			wbuf[1] = (u8)(segdist & 0xFF);
+			wbuf[2] = (u8)((segdist & 0xF00) >> 8) |
+					(slc->prop.prot << 4);
+			wbuf[3] = slc->seglen;
+			txn.mc = SLIM_MSG_MC_NEXT_DEFINE_CHANNEL;
+			txn.len = 4;
+			txn.rl = 7;
+			txn.wbuf = wbuf;
+			ret = slim_processtxn(ctrl, &txn, false);
+			if (ret)
+				goto revert_reconfig;
+		}
+	}
+
+	/* Define CC3 channels */
+	for (i = 0; i < ctrl->sched.num_cc3; i++) {
+		struct slim_ich *slc = ctrl->sched.chc3[i];
+		if (slc->state == SLIM_CH_PENDING_REMOVAL)
+			continue;
+		curexp = slc->rootexp + expshft;
+		segdist = (slc->newoff << curexp) & 0x1FF;
+		expshft = SLIM_MAX_CLK_GEAR - clkgear;
+		dev_dbg(&ctrl->dev, "new-intr:%d, old-intr:%d, dist:%d\n",
+				slc->newintr, slc->interval, segdist);
+		dev_dbg(&ctrl->dev, "new-off:%d, old-off:%d\n",
+				slc->newoff, slc->offset);
+
+		if (slc->state < SLIM_CH_ACTIVE || slc->def < slc->ref ||
+			slc->newintr != slc->interval ||
+			slc->newoff != slc->offset) {
+			segdist |= 0x200;
+			segdist >>= curexp;
+			segdist |= 0xC00;
+			wbuf[0] = slc->chan;
+			wbuf[1] = (u8)(segdist & 0xFF);
+			wbuf[2] = (u8)((segdist & 0xF00) >> 8) |
+					(slc->prop.prot << 4);
+			wbuf[3] = (u8)(slc->seglen);
+			txn.mc = SLIM_MSG_MC_NEXT_DEFINE_CHANNEL;
+			txn.len = 4;
+			txn.rl = 7;
+			txn.wbuf = wbuf;
+			ret = slim_processtxn(ctrl, &txn, false);
+			if (ret)
+				goto revert_reconfig;
+		}
+	}
+	txn.mc = SLIM_MSG_MC_RECONFIGURE_NOW;
+	txn.len = 0;
+	txn.rl = 3;
+	txn.wbuf = NULL;
+	ret = slim_processtxn(ctrl, &txn, false);
+	dev_dbg(&ctrl->dev, "reconfig now:ret:%d\n", ret);
+	if (!ret) {
+		ctrl->sched.subfrmcode = subframe;
+		ctrl->clkgear = clkgear;
+		ctrl->sched.msgsl = ctrl->sched.pending_msgsl;
+		sb->cur_msgsl = sb->pending_msgsl;
+		slim_chan_changes(sb, false);
+		mutex_unlock(&ctrl->sched.m_reconf);
+		return 0;
+	}
+
+revert_reconfig:
+	/* Revert channel changes */
+	slim_chan_changes(sb, true);
+	mutex_unlock(&ctrl->sched.m_reconf);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(slim_reconfigure_now);
+
+static int add_pending_ch(struct list_head *listh, u8 chan)
+{
+	struct slim_pending_ch *pch;
+	pch = kmalloc(sizeof(struct slim_pending_ch), GFP_KERNEL);
+	if (!pch)
+		return -ENOMEM;
+	pch->chan = chan;
+	list_add_tail(&pch->pending, listh);
+	return 0;
+}
+
+/*
+ * slim_control_ch: Channel control API.
+ * @sb: client handle
+ * @chanh: group or channel handle to be controlled
+ * @chctrl: Control command (activate/suspend/remove)
+ * @commit: flag to indicate whether the control should take effect right-away.
+ * This API activates, removes or suspends a channel (or group of channels)
+ * chanh indicates the channel or group handle (returned by the define_ch API).
+ * Reconfiguration may be time-consuming since it can change all other active
+ * channel allocations on the bus, change in clock gear used by the slimbus,
+ * and change in the control space width used for messaging.
+ * commit makes sure that multiple channels can be activated/deactivated before
+ * reconfiguration is started.
+ * -EXFULL is returned if there is no space in TDM to reserve the bandwidth.
+ * -EISCONN/-ENOTCONN is returned if the channel is already connected or not
+ * yet defined.
+ * -EINVAL is returned if individual control of a grouped-channel is attempted.
+ */
+int slim_control_ch(struct slim_device *sb, u16 chanh,
+			enum slim_ch_control chctrl, bool commit)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	int ret = 0;
+	/* Get rid of the group flag in MSB if any */
+	u8 chan = SLIM_HDL_TO_CHIDX(chanh);
+	u8 nchan = 0;
+	struct slim_ich *slc = &ctrl->chans[chan];
+	if (!(slc->nextgrp & SLIM_START_GRP))
+		return -EINVAL;
+
+	mutex_lock(&sb->sldev_reconf);
+	mutex_lock(&ctrl->sched.m_reconf);
+	do {
+		struct slim_pending_ch *pch;
+		u8 add_mark_removal  = true;
+
+		slc = &ctrl->chans[chan];
+		dev_dbg(&ctrl->dev, "chan:%d,ctrl:%d,def:%d", chan, chctrl,
+					slc->def);
+		if (slc->state < SLIM_CH_DEFINED) {
+			ret = -ENOTCONN;
+			break;
+		}
+		if (chctrl == SLIM_CH_SUSPEND) {
+			ret = add_pending_ch(&sb->mark_suspend, chan);
+			if (ret)
+				break;
+		} else if (chctrl == SLIM_CH_ACTIVATE) {
+			if (slc->state > SLIM_CH_ACTIVE) {
+				ret = -EISCONN;
+				break;
+			}
+			ret = add_pending_ch(&sb->mark_define, chan);
+			if (ret)
+				break;
+		} else {
+			if (slc->state < SLIM_CH_ACTIVE) {
+				ret = -ENOTCONN;
+				break;
+			}
+			/* If channel removal request comes when pending
+			 * in the mark_define, remove it from the define
+			 * list instead of adding it to removal list
+			 */
+			if (!list_empty(&sb->mark_define)) {
+				struct list_head *pos, *next;
+				list_for_each_safe(pos, next,
+						  &sb->mark_define) {
+					pch = list_entry(pos,
+						struct slim_pending_ch,
+						pending);
+					if (pch->chan == chan) {
+						list_del(&pch->pending);
+						kfree(pch);
+						add_mark_removal = false;
+						break;
+					}
+				}
+			}
+			if (add_mark_removal == true) {
+				ret = add_pending_ch(&sb->mark_removal, chan);
+				if (ret)
+					break;
+			}
+		}
+
+		nchan++;
+		if (nchan < SLIM_GRP_TO_NCHAN(chanh))
+			chan = SLIM_HDL_TO_CHIDX(slc->nextgrp);
+	} while (nchan < SLIM_GRP_TO_NCHAN(chanh));
+	mutex_unlock(&ctrl->sched.m_reconf);
+	if (!ret && commit == true)
+		ret = slim_reconfigure_now(sb);
+	mutex_unlock(&sb->sldev_reconf);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(slim_control_ch);
+
+/*
+ * slim_reservemsg_bw: Request to reserve bandwidth for messages.
+ * @sb: client handle
+ * @bw_bps: message bandwidth in bits per second to be requested
+ * @commit: indicates whether the reconfiguration needs to be acted upon.
+ * This API call can be grouped with slim_control_ch API call with only one of
+ * the APIs specifying the commit flag to avoid reconfiguration being called too
+ * frequently. -EXFULL is returned if there is no space in TDM to reserve the
+ * bandwidth. -EBUSY is returned if reconfiguration is requested, but a request
+ * is already in progress.
+ */
+int slim_reservemsg_bw(struct slim_device *sb, u32 bw_bps, bool commit)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	int ret = 0;
+	int sl;
+	mutex_lock(&sb->sldev_reconf);
+	if ((bw_bps >> 3) >= ctrl->a_framer->rootfreq)
+		sl = SLIM_SL_PER_SUPERFRAME;
+	else {
+		sl = (bw_bps * (SLIM_CL_PER_SUPERFRAME_DIV8/SLIM_CL_PER_SL/2) +
+			(ctrl->a_framer->rootfreq/2 - 1)) /
+			(ctrl->a_framer->rootfreq/2);
+	}
+	dev_dbg(&ctrl->dev, "request:bw:%d, slots:%d, current:%d\n", bw_bps, sl,
+						sb->cur_msgsl);
+	sb->pending_msgsl = sl;
+	if (commit == true)
+		ret = slim_reconfigure_now(sb);
+	mutex_unlock(&sb->sldev_reconf);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(slim_reservemsg_bw);
+
+/*
+ * slim_ctrl_clk_pause: Called by slimbus controller to request clock to be
+ *	paused or woken up out of clock pause
+ * or woken up from clock pause
+ * @ctrl: controller requesting bus to be paused or woken up
+ * @wakeup: Wakeup this controller from clock pause.
+ * @restart: Restart time value per spec used for clock pause. This value
+ *	isn't used when controller is to be woken up.
+ * This API executes clock pause reconfiguration sequence if wakeup is false.
+ * If wakeup is true, controller's wakeup is called
+ * Slimbus clock is idle and can be disabled by the controller later.
+ */
+int slim_ctrl_clk_pause(struct slim_controller *ctrl, bool wakeup, u8 restart)
+{
+	int ret = 0;
+	int i;
+	DEFINE_SLIM_BCAST_TXN(txn, SLIM_MSG_CLK_PAUSE_SEQ_FLG |
+				SLIM_MSG_MC_BEGIN_RECONFIGURATION, 0, 3,
+				NULL, NULL, 0);
+
+	if (wakeup == false && restart > SLIM_CLK_UNSPECIFIED)
+		return -EINVAL;
+	mutex_lock(&ctrl->m_ctrl);
+	if (wakeup) {
+		if (ctrl->clk_state == SLIM_CLK_ACTIVE) {
+			mutex_unlock(&ctrl->m_ctrl);
+			return 0;
+		}
+		wait_for_completion(&ctrl->pause_comp);
+		/*
+		 * Slimbus framework will call controller wakeup
+		 * Controller should make sure that it sets active framer
+		 * out of clock pause by doing appropriate setting
+		 */
+		if (ctrl->clk_state == SLIM_CLK_PAUSED && ctrl->wakeup)
+			ret = ctrl->wakeup(ctrl);
+		/*
+		 * If wakeup fails, make sure that next attempt can succeed.
+		 * Since we already consumed pause_comp, complete it so
+		 * that next wakeup isn't blocked forever
+		 */
+		if (!ret)
+			ctrl->clk_state = SLIM_CLK_ACTIVE;
+		else
+			complete(&ctrl->pause_comp);
+		mutex_unlock(&ctrl->m_ctrl);
+		return ret;
+	} else {
+		switch (ctrl->clk_state) {
+		case SLIM_CLK_ENTERING_PAUSE:
+		case SLIM_CLK_PAUSE_FAILED:
+			/*
+			 * If controller is already trying to enter clock pause,
+			 * let it finish.
+			 * In case of error, retry
+			 * In both cases, previous clock pause has signalled
+			 * completion.
+			 */
+			wait_for_completion(&ctrl->pause_comp);
+			/* retry upon failure */
+			if (ctrl->clk_state == SLIM_CLK_PAUSE_FAILED) {
+				ctrl->clk_state = SLIM_CLK_ACTIVE;
+				break;
+			} else {
+				mutex_unlock(&ctrl->m_ctrl);
+				/*
+				 * Signal completion so that wakeup can wait on
+				 * it.
+				 */
+				complete(&ctrl->pause_comp);
+				return 0;
+			}
+			break;
+		case SLIM_CLK_PAUSED:
+			/* already paused */
+			mutex_unlock(&ctrl->m_ctrl);
+			return 0;
+		case SLIM_CLK_ACTIVE:
+		default:
+			break;
+		}
+	}
+	/* Pending response for a message */
+	for (i = 0; i < ctrl->last_tid; i++) {
+		if (ctrl->txnt[i]) {
+			ret = -EBUSY;
+			pr_info("slim_clk_pause: txn-rsp for %d pending", i);
+			mutex_unlock(&ctrl->m_ctrl);
+			return -EBUSY;
+		}
+	}
+	ctrl->clk_state = SLIM_CLK_ENTERING_PAUSE;
+	mutex_unlock(&ctrl->m_ctrl);
+
+	mutex_lock(&ctrl->sched.m_reconf);
+	/* Data channels active */
+	if (ctrl->sched.usedslots) {
+		pr_info("slim_clk_pause: data channel active");
+		ret = -EBUSY;
+		goto clk_pause_ret;
+	}
+
+	ret = slim_processtxn(ctrl, &txn, false);
+	if (ret)
+		goto clk_pause_ret;
+
+	txn.mc = SLIM_MSG_CLK_PAUSE_SEQ_FLG | SLIM_MSG_MC_NEXT_PAUSE_CLOCK;
+	txn.len = 1;
+	txn.rl = 4;
+	txn.wbuf = &restart;
+	ret = slim_processtxn(ctrl, &txn, false);
+	if (ret)
+		goto clk_pause_ret;
+
+	txn.mc = SLIM_MSG_CLK_PAUSE_SEQ_FLG | SLIM_MSG_MC_RECONFIGURE_NOW;
+	txn.len = 0;
+	txn.rl = 3;
+	txn.wbuf = NULL;
+	ret = slim_processtxn(ctrl, &txn, false);
+	if (ret)
+		goto clk_pause_ret;
+
+clk_pause_ret:
+	if (ret)
+		ctrl->clk_state = SLIM_CLK_PAUSE_FAILED;
+	else
+		ctrl->clk_state = SLIM_CLK_PAUSED;
+	complete(&ctrl->pause_comp);
+	mutex_unlock(&ctrl->sched.m_reconf);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(slim_ctrl_clk_pause);
+
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.1");
+MODULE_DESCRIPTION("Slimbus module");
+MODULE_ALIAS("platform:slimbus");
diff -Nruw linux-4.4.115-fbx/drivers/slimbus./slim-msm.c linux-4.4.115-fbx/drivers/slimbus/slim-msm.c
--- linux-4.4.115-fbx/drivers/slimbus./slim-msm.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/slimbus/slim-msm.c	2019-01-22 16:16:26.643274841 +0100
@@ -0,0 +1,1618 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/pm_runtime.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/slimbus/slimbus.h>
+#include <linux/msm-sps.h>
+#include <linux/gcd.h>
+#include "slim-msm.h"
+
+int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len)
+{
+	spin_lock(&dev->rx_lock);
+	if ((dev->tail + 1) % MSM_CONCUR_MSG == dev->head) {
+		spin_unlock(&dev->rx_lock);
+		dev_err(dev->dev, "RX QUEUE full!");
+		return -EXFULL;
+	}
+	memcpy((u8 *)dev->rx_msgs[dev->tail], (u8 *)buf, len);
+	dev->tail = (dev->tail + 1) % MSM_CONCUR_MSG;
+	spin_unlock(&dev->rx_lock);
+	return 0;
+}
+
+int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&dev->rx_lock, flags);
+	if (dev->tail == dev->head) {
+		spin_unlock_irqrestore(&dev->rx_lock, flags);
+		return -ENODATA;
+	}
+	memcpy(buf, (u8 *)dev->rx_msgs[dev->head], 40);
+	dev->head = (dev->head + 1) % MSM_CONCUR_MSG;
+	spin_unlock_irqrestore(&dev->rx_lock, flags);
+	return 0;
+}
+
+int msm_slim_get_ctrl(struct msm_slim_ctrl *dev)
+{
+#ifdef CONFIG_PM
+	int ref = 0;
+	int ret = pm_runtime_get_sync(dev->dev);
+	if (ret >= 0) {
+		ref = atomic_read(&dev->dev->power.usage_count);
+		if (ref <= 0) {
+			SLIM_WARN(dev, "reference count -ve:%d", ref);
+			ret = -ENODEV;
+		}
+	}
+	return ret;
+#else
+	return -ENODEV;
+#endif
+}
+void msm_slim_put_ctrl(struct msm_slim_ctrl *dev)
+{
+#ifdef CONFIG_PM
+	int ref;
+	pm_runtime_mark_last_busy(dev->dev);
+	ref = atomic_read(&dev->dev->power.usage_count);
+	if (ref <= 0)
+		SLIM_WARN(dev, "reference count mismatch:%d", ref);
+	else
+		pm_runtime_put_sync(dev->dev);
+#endif
+}
+
+irqreturn_t msm_slim_port_irq_handler(struct msm_slim_ctrl *dev, u32 pstat)
+{
+	int i;
+	u32 int_en = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
+							dev->ver));
+	/*
+	 * different port-interrupt than what we enabled, ignore.
+	 * This may happen if overflow/underflow is reported, but
+	 * was disabled due to unavailability of buffers provided by
+	 * client.
+	 */
+	if ((pstat & int_en) == 0)
+		return IRQ_HANDLED;
+	for (i = 0; i < dev->port_nums; i++) {
+		struct msm_slim_endp *endpoint = &dev->pipes[i];
+		if (pstat & (1 << endpoint->port_b)) {
+			u32 val = readl_relaxed(PGD_PORT(PGD_PORT_STATn,
+					endpoint->port_b, dev->ver));
+			if (val & MSM_PORT_OVERFLOW) {
+				dev->ctrl.ports[i].err =
+						SLIM_P_OVERFLOW;
+			} else if (val & MSM_PORT_UNDERFLOW) {
+				dev->ctrl.ports[i].err =
+					SLIM_P_UNDERFLOW;
+			}
+		}
+	}
+	/*
+	 * Disable port interrupt here. Re-enable when more
+	 * buffers are provided for this port.
+	 */
+	writel_relaxed((int_en & (~pstat)),
+			PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
+					dev->ver));
+	/* clear port interrupts */
+	writel_relaxed(pstat, PGD_THIS_EE(PGD_PORT_INT_CL_EEn,
+							dev->ver));
+	SLIM_INFO(dev, "disabled overflow/underflow for port 0x%x", pstat);
+
+	/*
+	 * Guarantee that port interrupt bit(s) clearing writes go
+	 * through before exiting ISR
+	 */
+	mb();
+	return IRQ_HANDLED;
+}
+
+int msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep)
+{
+	int ret;
+	struct sps_pipe *endpoint;
+	struct sps_connect *config = &ep->config;
+
+	/* Allocate the endpoint */
+	endpoint = sps_alloc_endpoint();
+	if (!endpoint) {
+		dev_err(dev->dev, "sps_alloc_endpoint failed\n");
+		return -ENOMEM;
+	}
+
+	/* Get default connection configuration for an endpoint */
+	ret = sps_get_config(endpoint, config);
+	if (ret) {
+		dev_err(dev->dev, "sps_get_config failed 0x%x\n", ret);
+		goto sps_config_failed;
+	}
+
+	ep->sps = endpoint;
+	return 0;
+
+sps_config_failed:
+	sps_free_endpoint(endpoint);
+	return ret;
+}
+
+void msm_slim_free_endpoint(struct msm_slim_endp *ep)
+{
+	sps_free_endpoint(ep->sps);
+	ep->sps = NULL;
+}
+
+int msm_slim_sps_mem_alloc(
+		struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len)
+{
+	dma_addr_t phys;
+
+	mem->size = len;
+	mem->min_size = 0;
+	mem->base = dma_alloc_coherent(dev->dev, mem->size, &phys, GFP_KERNEL);
+
+	if (!mem->base) {
+		dev_err(dev->dev, "dma_alloc_coherent(%d) failed\n", len);
+		return -ENOMEM;
+	}
+
+	mem->phys_base = phys;
+	memset(mem->base, 0x00, mem->size);
+	return 0;
+}
+
+void
+msm_slim_sps_mem_free(struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem)
+{
+	if (mem->base && mem->phys_base)
+		dma_free_coherent(dev->dev, mem->size, mem->base,
+							mem->phys_base);
+	 else
+		dev_err(dev->dev, "cant dma free. they are NULL\n");
+	mem->size = 0;
+	mem->base = NULL;
+	mem->phys_base = 0;
+}
+
+void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pipenum, u8 portnum)
+{
+	struct slim_controller *ctrl;
+	struct slim_ch *chan;
+	struct msm_slim_pshpull_parm *parm;
+	u32 set_cfg = 0;
+	struct slim_port_cfg cfg = dev->ctrl.ports[portnum].cfg;
+
+	if (!dev) {
+		pr_err("%s:Dev node is null\n", __func__);
+		return;
+	}
+	if (portnum >= dev->port_nums) {
+		pr_err("%s:Invalid port\n", __func__);
+		return;
+	}
+	ctrl = &dev->ctrl;
+	chan = ctrl->ports[portnum].ch;
+	parm = &dev->pipes[portnum].psh_pull;
+
+	if (cfg.watermark)
+		set_cfg = (cfg.watermark << 1);
+	else
+		set_cfg = DEF_WATERMARK;
+
+	if (cfg.port_opts & SLIM_OPT_NO_PACK)
+		set_cfg |= DEF_NO_PACK;
+	else
+		set_cfg |= DEF_PACK;
+
+	if (cfg.port_opts & SLIM_OPT_ALIGN_MSB)
+		set_cfg |= DEF_ALIGN_MSB;
+	else
+		set_cfg |= DEF_ALIGN_LSB;
+
+	set_cfg |= ENABLE_PORT;
+
+	writel_relaxed(set_cfg, PGD_PORT(PGD_PORT_CFGn, pipenum, dev->ver));
+	writel_relaxed(DEF_BLKSZ, PGD_PORT(PGD_PORT_BLKn, pipenum, dev->ver));
+	writel_relaxed(DEF_TRANSZ, PGD_PORT(PGD_PORT_TRANn, pipenum, dev->ver));
+
+	if (chan->prot == SLIM_PUSH || chan->prot == SLIM_PULL) {
+		set_cfg = 0;
+		set_cfg |= ((0xFFFF & parm->num_samples)<<16);
+		set_cfg |= (0xFFFF & parm->rpt_period);
+		writel_relaxed(set_cfg, PGD_PORT(PGD_PORT_PSHPLLn,
+							pipenum, dev->ver));
+	}
+	/* Make sure that port registers are updated before returning */
+	mb();
+}
+
+static void msm_slim_disconn_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
+{
+	struct msm_slim_endp *endpoint = &dev->pipes[pn];
+	struct sps_register_event sps_event;
+	u32 int_port = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
+					dev->ver));
+	writel_relaxed(0, PGD_PORT(PGD_PORT_CFGn, (endpoint->port_b),
+					dev->ver));
+	writel_relaxed((int_port & ~(1 << endpoint->port_b)),
+		PGD_THIS_EE(PGD_PORT_INT_EN_EEn, dev->ver));
+	/* Make sure port register is updated */
+	mb();
+	memset(&sps_event, 0, sizeof(sps_event));
+	sps_register_event(endpoint->sps, &sps_event);
+	sps_disconnect(endpoint->sps);
+	dev->pipes[pn].connected = false;
+}
+
+static void msm_slim_calc_pshpull_parm(struct msm_slim_ctrl *dev,
+					u8 pn, struct slim_ch *prop)
+{
+	struct msm_slim_endp *endpoint = &dev->pipes[pn];
+	struct msm_slim_pshpull_parm *parm = &endpoint->psh_pull;
+	int	chan_freq, round_off, divisor, super_freq;
+
+	super_freq = dev->ctrl.a_framer->superfreq;
+
+	if (prop->baser == SLIM_RATE_4000HZ)
+		chan_freq = 4000 * prop->ratem;
+	else if (prop->baser == SLIM_RATE_11025HZ)
+		chan_freq = 11025 * prop->ratem;
+	else
+		chan_freq = prop->baser * prop->ratem;
+
+	/*
+	 * If channel frequency is multiple of super frame frequency
+	 * ISO protocol is suggested
+	 */
+	if (!(chan_freq % super_freq)) {
+		prop->prot = SLIM_HARD_ISO;
+		return;
+	}
+	round_off = DIV_ROUND_UP(chan_freq, super_freq);
+	divisor = gcd(round_off * super_freq, chan_freq);
+	parm->num_samples = chan_freq/divisor;
+	parm->rpt_period = (round_off * super_freq)/divisor;
+}
+
+int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
+{
+	struct msm_slim_endp *endpoint;
+	struct sps_connect *cfg;
+	struct slim_ch *prop;
+	u32 stat;
+	int ret;
+
+	if (!dev || pn >= dev->port_nums)
+		return -ENODEV;
+	endpoint = &dev->pipes[pn];
+	cfg = &endpoint->config;
+	prop = dev->ctrl.ports[pn].ch;
+
+	endpoint = &dev->pipes[pn];
+	ret = sps_get_config(dev->pipes[pn].sps, cfg);
+	if (ret) {
+		dev_err(dev->dev, "sps pipe-port get config error%x\n", ret);
+		return ret;
+	}
+	cfg->options = SPS_O_DESC_DONE | SPS_O_ERROR |
+				SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
+
+	if (prop->prot == SLIM_PUSH || prop->prot ==  SLIM_PULL)
+		msm_slim_calc_pshpull_parm(dev, pn, prop);
+
+	if (dev->pipes[pn].connected &&
+			dev->ctrl.ports[pn].state == SLIM_P_CFG) {
+		return -EISCONN;
+	} else if (dev->pipes[pn].connected) {
+		writel_relaxed(0, PGD_PORT(PGD_PORT_CFGn,
+			(endpoint->port_b), dev->ver));
+		/* Make sure port disabling goes through */
+		mb();
+		/* Is pipe already connected in desired direction */
+		if ((dev->ctrl.ports[pn].flow == SLIM_SRC &&
+			cfg->mode == SPS_MODE_DEST) ||
+			(dev->ctrl.ports[pn].flow == SLIM_SINK &&
+			 cfg->mode == SPS_MODE_SRC)) {
+			msm_hw_set_port(dev, endpoint->port_b, pn);
+			return 0;
+		}
+		msm_slim_disconn_pipe_port(dev, pn);
+	}
+
+	stat = readl_relaxed(PGD_PORT(PGD_PORT_STATn, endpoint->port_b,
+					dev->ver));
+	if (dev->ctrl.ports[pn].flow == SLIM_SRC) {
+		cfg->destination = dev->bam.hdl;
+		cfg->source = SPS_DEV_HANDLE_MEM;
+		cfg->dest_pipe_index = ((stat & (0xFF << 4)) >> 4);
+		cfg->src_pipe_index = 0;
+		dev_dbg(dev->dev, "flow src:pipe num:%d",
+					cfg->dest_pipe_index);
+		cfg->mode = SPS_MODE_DEST;
+	} else {
+		cfg->source = dev->bam.hdl;
+		cfg->destination = SPS_DEV_HANDLE_MEM;
+		cfg->src_pipe_index = ((stat & (0xFF << 4)) >> 4);
+		cfg->dest_pipe_index = 0;
+		dev_dbg(dev->dev, "flow dest:pipe num:%d",
+					cfg->src_pipe_index);
+		cfg->mode = SPS_MODE_SRC;
+	}
+	/* Space for desciptor FIFOs */
+	ret = msm_slim_sps_mem_alloc(dev, &cfg->desc,
+				MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
+	if (ret)
+		pr_err("mem alloc for descr failed:%d", ret);
+	else
+		ret = sps_connect(dev->pipes[pn].sps, cfg);
+
+	if (!ret) {
+		dev->pipes[pn].connected = true;
+		msm_hw_set_port(dev, endpoint->port_b, pn);
+	}
+	return ret;
+}
+
+int msm_alloc_port(struct slim_controller *ctrl, u8 pn)
+{
+	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+	struct msm_slim_endp *endpoint;
+	int ret = 0;
+	if (ctrl->ports[pn].req == SLIM_REQ_HALF_DUP ||
+		ctrl->ports[pn].req == SLIM_REQ_MULTI_CH)
+		return -EPROTONOSUPPORT;
+	if (pn >= dev->port_nums)
+		return -ENODEV;
+
+	endpoint = &dev->pipes[pn];
+	ret = msm_slim_init_endpoint(dev, endpoint);
+	dev_dbg(dev->dev, "sps register bam error code:%x\n", ret);
+	return ret;
+}
+
+void msm_dealloc_port(struct slim_controller *ctrl, u8 pn)
+{
+	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+	struct msm_slim_endp *endpoint;
+	if (pn >= dev->port_nums)
+		return;
+	endpoint = &dev->pipes[pn];
+	if (dev->pipes[pn].connected) {
+		struct sps_connect *config = &endpoint->config;
+		msm_slim_disconn_pipe_port(dev, pn);
+		msm_slim_sps_mem_free(dev, &config->desc);
+	}
+	if (endpoint->sps)
+		msm_slim_free_endpoint(endpoint);
+}
+
+enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
+				u8 pn, phys_addr_t *done_buf, u32 *done_len)
+{
+	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctr);
+	struct sps_iovec sio;
+	int ret;
+	if (done_len)
+		*done_len = 0;
+	if (done_buf)
+		*done_buf = 0;
+	if (!dev->pipes[pn].connected)
+		return SLIM_P_DISCONNECT;
+	ret = sps_get_iovec(dev->pipes[pn].sps, &sio);
+	if (!ret) {
+		if (done_len)
+			*done_len = sio.size;
+		if (done_buf)
+			*done_buf = (phys_addr_t)sio.addr;
+	}
+	dev_dbg(dev->dev, "get iovec returned %d\n", ret);
+	return SLIM_P_INPROGRESS;
+}
+
+static void msm_slim_port_cb(struct sps_event_notify *ev)
+{
+
+	struct completion *comp = ev->data.transfer.user;
+	struct sps_iovec *iovec = &ev->data.transfer.iovec;
+
+	if (ev->event_id == SPS_EVENT_DESC_DONE) {
+
+		pr_debug("desc done iovec = (0x%x 0x%x 0x%x)\n",
+			iovec->addr, iovec->size, iovec->flags);
+
+	} else {
+		pr_err("%s: ERR event %d\n",
+					__func__, ev->event_id);
+	}
+	if (comp)
+		complete(comp);
+}
+
+int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, phys_addr_t iobuf,
+			u32 len, struct completion *comp)
+{
+	struct sps_register_event sreg;
+	int ret;
+	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+	if (pn >= dev->port_nums)
+		return -ENODEV;
+
+	if (!dev->pipes[pn].connected)
+		return -ENOTCONN;
+
+	sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR);
+	sreg.mode = SPS_TRIGGER_WAIT;
+	sreg.xfer_done = NULL;
+	sreg.callback = msm_slim_port_cb;
+	sreg.user = NULL;
+	ret = sps_register_event(dev->pipes[pn].sps, &sreg);
+	if (ret) {
+		dev_dbg(dev->dev, "sps register event error:%x\n", ret);
+		return ret;
+	}
+	ret = sps_transfer_one(dev->pipes[pn].sps, iobuf, len, comp,
+				SPS_IOVEC_FLAG_INT);
+	dev_dbg(dev->dev, "sps submit xfer error code:%x\n", ret);
+	if (!ret) {
+		/* Enable port interrupts */
+		u32 int_port = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
+						dev->ver));
+		if (!(int_port & (1 << (dev->pipes[pn].port_b))))
+			writel_relaxed((int_port |
+				(1 << dev->pipes[pn].port_b)),
+				PGD_THIS_EE(PGD_PORT_INT_EN_EEn, dev->ver));
+		/* Make sure that port registers are updated before returning */
+		mb();
+	}
+
+	return ret;
+}
+
+/* Queue up Tx message buffer */
+static int msm_slim_post_tx_msgq(struct msm_slim_ctrl *dev, u8 *buf, int len)
+{
+	int ret;
+	struct msm_slim_endp *endpoint = &dev->tx_msgq;
+	struct sps_mem_buffer *mem = &endpoint->buf;
+	struct sps_pipe *pipe = endpoint->sps;
+	int ix = (buf - (u8 *)mem->base);
+
+	phys_addr_t phys_addr = mem->phys_base + ix;
+
+	for (ret = 0; ret < ((len + 3) >> 2); ret++)
+		pr_debug("BAM TX buf[%d]:0x%x", ret, ((u32 *)buf)[ret]);
+
+	ret = sps_transfer_one(pipe, phys_addr, ((len + 3) & 0xFC), NULL,
+				SPS_IOVEC_FLAG_EOT);
+	if (ret)
+		dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
+
+	return ret;
+}
+
+void msm_slim_tx_msg_return(struct msm_slim_ctrl *dev, int err)
+{
+	struct msm_slim_endp *endpoint = &dev->tx_msgq;
+	struct sps_mem_buffer *mem = &endpoint->buf;
+	struct sps_pipe *pipe = endpoint->sps;
+	struct sps_iovec iovec;
+	int idx, ret = 0;
+	phys_addr_t addr;
+	if (dev->use_tx_msgqs != MSM_MSGQ_ENABLED) {
+		/* use 1 buffer, non-blocking writes are not possible */
+		if (dev->wr_comp[0]) {
+			struct completion *comp = dev->wr_comp[0];
+			dev->wr_comp[0] = NULL;
+			complete(comp);
+		}
+		return;
+	}
+	while (!ret) {
+		ret = sps_get_iovec(pipe, &iovec);
+		addr = DESC_FULL_ADDR(iovec.flags, iovec.addr);
+		if (ret || addr == 0) {
+			if (ret)
+				pr_err("SLIM TX get IOVEC failed:%d", ret);
+			return;
+		}
+		if (addr == dev->bulk.wr_dma) {
+			dma_unmap_single(dev->dev, dev->bulk.wr_dma,
+					 dev->bulk.size, DMA_TO_DEVICE);
+			if (!dev->bulk.cb)
+				SLIM_WARN(dev, "no callback for bulk WR?");
+			else
+				dev->bulk.cb(dev->bulk.ctx, err);
+			dev->bulk.in_progress = false;
+			pm_runtime_mark_last_busy(dev->dev);
+			return;
+		} else if (addr < mem->phys_base ||
+			   (addr > (mem->phys_base +
+				    (MSM_TX_BUFS * SLIM_MSGQ_BUF_LEN)))) {
+			SLIM_WARN(dev, "BUF out of bounds:base:0x%pa, io:0x%pa",
+					&mem->phys_base, &addr);
+			continue;
+		}
+		idx = (int) ((addr - mem->phys_base)
+			/ SLIM_MSGQ_BUF_LEN);
+		if (dev->wr_comp[idx]) {
+			struct completion *comp = dev->wr_comp[idx];
+			dev->wr_comp[idx] = NULL;
+			complete(comp);
+		}
+		if (err) {
+			int i;
+			u32 *addr = (u32 *)mem->base +
+					(idx * (SLIM_MSGQ_BUF_LEN >> 2));
+			/* print the descriptor that resulted in error */
+			for (i = 0; i < (SLIM_MSGQ_BUF_LEN >> 2); i++)
+				SLIM_WARN(dev, "err desc[%d]:0x%x", i, addr[i]);
+		}
+		/* reclaim all packets that were delivered out of order */
+		if (idx != dev->tx_head)
+			SLIM_WARN(dev, "SLIM OUT OF ORDER TX:idx:%d, head:%d",
+				idx, dev->tx_head);
+		dev->tx_head = (dev->tx_head + 1) % MSM_TX_BUFS;
+	}
+}
+
+static u32 *msm_slim_modify_tx_buf(struct msm_slim_ctrl *dev,
+					struct completion *comp)
+{
+	struct msm_slim_endp *endpoint = &dev->tx_msgq;
+	struct sps_mem_buffer *mem = &endpoint->buf;
+	u32 *retbuf = NULL;
+	if ((dev->tx_tail + 1) % MSM_TX_BUFS == dev->tx_head)
+		return NULL;
+
+	retbuf = (u32 *)((u8 *)mem->base +
+				(dev->tx_tail * SLIM_MSGQ_BUF_LEN));
+	dev->wr_comp[dev->tx_tail] = comp;
+	dev->tx_tail = (dev->tx_tail + 1) % MSM_TX_BUFS;
+	return retbuf;
+}
+u32 *msm_slim_manage_tx_msgq(struct msm_slim_ctrl *dev, bool getbuf,
+					struct completion *comp, int err)
+{
+	int ret = 0;
+	int retries = 0;
+	u32 *retbuf = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->tx_buf_lock, flags);
+	if (!getbuf) {
+		msm_slim_tx_msg_return(dev, err);
+		spin_unlock_irqrestore(&dev->tx_buf_lock, flags);
+		return NULL;
+	}
+
+	retbuf = msm_slim_modify_tx_buf(dev, comp);
+	if (retbuf) {
+		spin_unlock_irqrestore(&dev->tx_buf_lock, flags);
+		return retbuf;
+	}
+
+	do {
+		msm_slim_tx_msg_return(dev, err);
+		retbuf = msm_slim_modify_tx_buf(dev, comp);
+		if (!retbuf)
+			ret = -EAGAIN;
+		else {
+			if (retries > 0)
+				SLIM_INFO(dev, "SLIM TX retrieved:%d retries",
+							retries);
+			spin_unlock_irqrestore(&dev->tx_buf_lock, flags);
+			return retbuf;
+		}
+
+		/*
+		 * superframe size will vary based on clock gear
+		 * 1 superframe will consume at least 1 message
+		 * if HW is in good condition. With MX_RETRIES,
+		 * make sure we wait for ~2 superframes
+		 * before deciding HW couldn't process descriptors
+		 */
+		udelay(50);
+		retries++;
+	} while (ret && (retries < INIT_MX_RETRIES));
+
+	spin_unlock_irqrestore(&dev->tx_buf_lock, flags);
+	return NULL;
+}
+
+int msm_send_msg_buf(struct msm_slim_ctrl *dev, u32 *buf, u8 len, u32 tx_reg)
+{
+	if (dev->use_tx_msgqs != MSM_MSGQ_ENABLED) {
+		int i;
+		for (i = 0; i < (len + 3) >> 2; i++) {
+			dev_dbg(dev->dev, "AHB TX data:0x%x\n", buf[i]);
+			writel_relaxed(buf[i], dev->base + tx_reg + (i * 4));
+		}
+		/* Guarantee that message is sent before returning */
+		mb();
+		return 0;
+	}
+	return msm_slim_post_tx_msgq(dev, (u8 *)buf, len);
+}
+
+u32 *msm_get_msg_buf(struct msm_slim_ctrl *dev, int len,
+			struct completion *comp)
+{
+	/*
+	 * Currently we block a transaction until the current one completes.
+	 * In case we need multiple transactions, use message Q
+	 */
+	if (dev->use_tx_msgqs != MSM_MSGQ_ENABLED) {
+		dev->wr_comp[0] = comp;
+		return dev->tx_buf;
+	}
+
+	return msm_slim_manage_tx_msgq(dev, true, comp, 0);
+}
+
+static void
+msm_slim_rx_msgq_event(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
+{
+	if (ev->event_id == SPS_EVENT_DESC_DONE)
+		complete(&dev->rx_msgq_notify);
+	else
+		dev_err(dev->dev, "%s: unknown event %d\n",
+					__func__, ev->event_id);
+}
+
+static void
+msm_slim_handle_rx(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
+{
+	int ret = 0;
+	u32 mc = 0;
+	u32 mt = 0;
+	u8 msg_len = 0;
+
+	if (ev->event_id != SPS_EVENT_EOT) {
+		dev_err(dev->dev, "%s: unknown event %d\n",
+					__func__, ev->event_id);
+		return;
+	}
+
+	do {
+		ret = msm_slim_rx_msgq_get(dev, dev->current_rx_buf,
+					   dev->current_count);
+		if (ret == -ENODATA) {
+			return;
+		} else if (ret) {
+			SLIM_ERR(dev, "rx_msgq_get() failed 0x%x\n",
+								ret);
+			return;
+		}
+
+		/* Traverse first byte of message for message length */
+		if (dev->current_count++ == 0) {
+			msg_len = *(dev->current_rx_buf) & 0x1F;
+			mt = (*(dev->current_rx_buf) >> 5) & 0x7;
+			mc = (*(dev->current_rx_buf) >> 8) & 0xff;
+			dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
+		}
+
+		msg_len = (msg_len < 4) ? 0 : (msg_len - 4);
+
+		if (!msg_len) {
+			dev->rx_slim(dev, (u8 *)dev->current_rx_buf);
+			dev->current_count = 0;
+		}
+
+	} while (1);
+}
+
+static void msm_slim_rx_msgq_cb(struct sps_event_notify *notify)
+{
+	struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)notify->user;
+	/* is this manager controller or NGD controller? */
+	if (dev->ctrl.wakeup)
+		msm_slim_rx_msgq_event(dev, notify);
+	else
+		msm_slim_handle_rx(dev, notify);
+}
+
+/* Queue up Rx message buffer */
+static int msm_slim_post_rx_msgq(struct msm_slim_ctrl *dev, int ix)
+{
+	int ret;
+	struct msm_slim_endp *endpoint = &dev->rx_msgq;
+	struct sps_mem_buffer *mem = &endpoint->buf;
+	struct sps_pipe *pipe = endpoint->sps;
+
+	/* Rx message queue buffers are 4 bytes in length */
+	u8 *virt_addr = mem->base + (4 * ix);
+	phys_addr_t phys_addr = mem->phys_base + (4 * ix);
+
+	ret = sps_transfer_one(pipe, phys_addr, 4, virt_addr, 0);
+	if (ret)
+		dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
+
+	return ret;
+}
+
+int msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset)
+{
+	struct msm_slim_endp *endpoint = &dev->rx_msgq;
+	struct sps_mem_buffer *mem = &endpoint->buf;
+	struct sps_pipe *pipe = endpoint->sps;
+	struct sps_iovec iovec;
+	phys_addr_t addr;
+	int index;
+	int ret;
+
+	ret = sps_get_iovec(pipe, &iovec);
+	if (ret) {
+		dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
+		goto err_exit;
+	}
+
+	addr = DESC_FULL_ADDR(iovec.flags, iovec.addr);
+	pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
+		iovec.addr, iovec.size, iovec.flags);
+
+	/* no more descriptors */
+	if (!ret && (iovec.addr == 0) && (iovec.size == 0)) {
+		ret = -ENODATA;
+		goto err_exit;
+	}
+
+	/* Calculate buffer index */
+	index = (addr - mem->phys_base) / 4;
+	*(data + offset) = *((u32 *)mem->base + index);
+
+	pr_debug("buf = 0x%p, data = 0x%x\n", (u32 *)mem->base + index, *data);
+
+	/* Add buffer back to the queue */
+	(void)msm_slim_post_rx_msgq(dev, index);
+
+err_exit:
+	return ret;
+}
+
+int msm_slim_connect_endp(struct msm_slim_ctrl *dev,
+				struct msm_slim_endp *endpoint)
+{
+	int i, ret;
+	struct sps_register_event sps_error_event; /* SPS_ERROR */
+	struct sps_register_event sps_descr_event; /* DESCR_DONE */
+	struct sps_connect *config = &endpoint->config;
+	unsigned long flags;
+
+	ret = sps_connect(endpoint->sps, config);
+	if (ret) {
+		dev_err(dev->dev, "sps_connect failed 0x%x\n", ret);
+		return ret;
+	}
+
+	memset(&sps_descr_event, 0x00, sizeof(sps_descr_event));
+
+	if (endpoint == &dev->rx_msgq) {
+		sps_descr_event.mode = SPS_TRIGGER_CALLBACK;
+		sps_descr_event.options = SPS_O_EOT;
+		sps_descr_event.user = (void *)dev;
+		sps_descr_event.callback = msm_slim_rx_msgq_cb;
+		sps_descr_event.xfer_done = NULL;
+
+		ret = sps_register_event(endpoint->sps, &sps_descr_event);
+		if (ret) {
+			dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
+			goto sps_reg_event_failed;
+		}
+	}
+
+	/* Register callback for errors */
+	memset(&sps_error_event, 0x00, sizeof(sps_error_event));
+	sps_error_event.mode = SPS_TRIGGER_CALLBACK;
+	sps_error_event.options = SPS_O_ERROR;
+	sps_error_event.user = (void *)dev;
+	sps_error_event.callback = msm_slim_rx_msgq_cb;
+
+	ret = sps_register_event(endpoint->sps, &sps_error_event);
+	if (ret) {
+		dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
+		goto sps_reg_event_failed;
+	}
+
+	/*
+	 * Call transfer_one for each 4-byte buffer
+	 * Use (buf->size/4) - 1 for the number of buffer to post
+	 */
+
+	if (endpoint == &dev->rx_msgq) {
+		/* Setup the transfer */
+		for (i = 0; i < (MSM_SLIM_DESC_NUM - 1); i++) {
+			ret = msm_slim_post_rx_msgq(dev, i);
+			if (ret) {
+				dev_err(dev->dev,
+					"post_rx_msgq() failed 0x%x\n", ret);
+				goto sps_transfer_failed;
+			}
+		}
+		dev->use_rx_msgqs = MSM_MSGQ_ENABLED;
+	} else {
+		spin_lock_irqsave(&dev->tx_buf_lock, flags);
+		dev->tx_tail = 0;
+		dev->tx_head = 0;
+		for (i = 0; i < MSM_TX_BUFS; i++)
+			dev->wr_comp[i] = NULL;
+		spin_unlock_irqrestore(&dev->tx_buf_lock, flags);
+		dev->use_tx_msgqs = MSM_MSGQ_ENABLED;
+	}
+
+	return 0;
+sps_transfer_failed:
+	memset(&sps_error_event, 0x00, sizeof(sps_error_event));
+	sps_register_event(endpoint->sps, &sps_error_event);
+sps_reg_event_failed:
+	sps_disconnect(endpoint->sps);
+	return ret;
+}
+
+static int msm_slim_init_rx_msgq(struct msm_slim_ctrl *dev, u32 pipe_reg)
+{
+	int ret;
+	u32 pipe_offset;
+	struct msm_slim_endp *endpoint = &dev->rx_msgq;
+	struct sps_connect *config = &endpoint->config;
+	struct sps_mem_buffer *descr = &config->desc;
+	struct sps_mem_buffer *mem = &endpoint->buf;
+
+	if (dev->use_rx_msgqs == MSM_MSGQ_DISABLED)
+		return 0;
+
+	/* Allocate the endpoint */
+	ret = msm_slim_init_endpoint(dev, endpoint);
+	if (ret) {
+		dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
+		goto sps_init_endpoint_failed;
+	}
+
+	/* Get the pipe indices for the message queues */
+	pipe_offset = (readl_relaxed(dev->base + pipe_reg) & 0xfc) >> 2;
+	dev_dbg(dev->dev, "Message queue pipe offset %d\n", pipe_offset);
+
+	config->mode = SPS_MODE_SRC;
+	config->source = dev->bam.hdl;
+	config->destination = SPS_DEV_HANDLE_MEM;
+	config->src_pipe_index = pipe_offset;
+	config->options = SPS_O_EOT | SPS_O_ERROR |
+				SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
+
+	/* Allocate memory for the FIFO descriptors */
+	ret = msm_slim_sps_mem_alloc(dev, descr,
+				MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
+	if (ret) {
+		dev_err(dev->dev, "unable to allocate SPS descriptors\n");
+		goto alloc_descr_failed;
+	}
+
+	/* Allocate memory for the message buffer(s), N descrs, 4-byte mesg */
+	ret = msm_slim_sps_mem_alloc(dev, mem, MSM_SLIM_DESC_NUM * 4);
+	if (ret) {
+		dev_err(dev->dev, "dma_alloc_coherent failed\n");
+		goto alloc_buffer_failed;
+	}
+
+	ret = msm_slim_connect_endp(dev, endpoint);
+
+	if (!ret)
+		return 0;
+
+	msm_slim_sps_mem_free(dev, mem);
+alloc_buffer_failed:
+	msm_slim_sps_mem_free(dev, descr);
+alloc_descr_failed:
+	msm_slim_free_endpoint(endpoint);
+sps_init_endpoint_failed:
+	dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
+	return ret;
+}
+
+static int msm_slim_init_tx_msgq(struct msm_slim_ctrl *dev, u32 pipe_reg)
+{
+	int ret;
+	u32 pipe_offset;
+	struct msm_slim_endp *endpoint = &dev->tx_msgq;
+	struct sps_connect *config = &endpoint->config;
+	struct sps_mem_buffer *descr = &config->desc;
+	struct sps_mem_buffer *mem = &endpoint->buf;
+
+	if (dev->use_tx_msgqs == MSM_MSGQ_DISABLED)
+		return 0;
+
+	/* Allocate the endpoint */
+	ret = msm_slim_init_endpoint(dev, endpoint);
+	if (ret) {
+		dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
+		goto sps_init_endpoint_failed;
+	}
+
+	/* Get the pipe indices for the message queues */
+	pipe_offset = (readl_relaxed(dev->base + pipe_reg) & 0xfc) >> 2;
+	pipe_offset += 1;
+	dev_dbg(dev->dev, "TX Message queue pipe offset %d\n", pipe_offset);
+
+	config->mode = SPS_MODE_DEST;
+	config->source = SPS_DEV_HANDLE_MEM;
+	config->destination = dev->bam.hdl;
+	config->dest_pipe_index = pipe_offset;
+	config->src_pipe_index = 0;
+	config->options = SPS_O_ERROR | SPS_O_NO_Q |
+				SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
+
+	/* Desc and TX buf are circular queues */
+	/* Allocate memory for the FIFO descriptors */
+	ret = msm_slim_sps_mem_alloc(dev, descr,
+				(MSM_TX_BUFS + 1) * sizeof(struct sps_iovec));
+	if (ret) {
+		dev_err(dev->dev, "unable to allocate SPS descriptors\n");
+		goto alloc_descr_failed;
+	}
+
+	/* Allocate TX buffer from which descriptors are created */
+	ret = msm_slim_sps_mem_alloc(dev, mem, ((MSM_TX_BUFS + 1) *
+					SLIM_MSGQ_BUF_LEN));
+	if (ret) {
+		dev_err(dev->dev, "dma_alloc_coherent failed\n");
+		goto alloc_buffer_failed;
+	}
+	ret = msm_slim_connect_endp(dev, endpoint);
+
+	if (!ret)
+		return 0;
+
+	msm_slim_sps_mem_free(dev, mem);
+alloc_buffer_failed:
+	msm_slim_sps_mem_free(dev, descr);
+alloc_descr_failed:
+	msm_slim_free_endpoint(endpoint);
+sps_init_endpoint_failed:
+	dev->use_tx_msgqs = MSM_MSGQ_DISABLED;
+	return ret;
+}
+
+static int msm_slim_data_port_assign(struct msm_slim_ctrl *dev)
+{
+	int i, data_ports = 0;
+	/* First 7 bits are for message Qs */
+	for (i = 7; i < 32; i++) {
+		/* Check what pipes are owned by Apps. */
+		if ((dev->pdata.apps_pipes >> i) & 0x1) {
+			if (dev->pipes)
+				dev->pipes[data_ports].port_b = i - 7;
+			data_ports++;
+		}
+	}
+	return data_ports;
+}
+/* Registers BAM h/w resource with SPS driver and initializes msgq endpoints */
+int msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem,
+			u32 pipe_reg, bool remote)
+{
+	int ret;
+	unsigned long bam_handle;
+	struct sps_bam_props bam_props = {0};
+
+	static struct sps_bam_sec_config_props sec_props = {
+		.ees = {
+			[0] = {		/* LPASS */
+				.vmid = 0,
+				.pipe_mask = 0xFFFF98,
+			},
+			[1] = {		/* Krait Apps */
+				.vmid = 1,
+				.pipe_mask = 0x3F000007,
+			},
+			[2] = {		/* Modem */
+				.vmid = 2,
+				.pipe_mask = 0x00000060,
+			},
+		},
+	};
+
+	if (dev->bam.hdl) {
+		bam_handle = dev->bam.hdl;
+		goto init_pipes;
+	}
+	bam_props.ee = dev->ee;
+	bam_props.virt_addr = dev->bam.base;
+	bam_props.phys_addr = bam_mem->start;
+	bam_props.irq = dev->bam.irq;
+	if (!remote) {
+		bam_props.manage = SPS_BAM_MGR_LOCAL;
+		bam_props.sec_config = SPS_BAM_SEC_DO_CONFIG;
+	} else {
+		bam_props.manage = SPS_BAM_MGR_DEVICE_REMOTE |
+					SPS_BAM_MGR_MULTI_EE;
+		bam_props.sec_config = SPS_BAM_SEC_DO_NOT_CONFIG;
+	}
+	bam_props.summing_threshold = MSM_SLIM_PERF_SUMM_THRESHOLD;
+
+	bam_props.p_sec_config_props = &sec_props;
+
+	bam_props.options = SPS_O_DESC_DONE | SPS_O_ERROR |
+				SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
+
+	/* override apps channel pipes if specified in platform-data or DT */
+	if (dev->pdata.apps_pipes)
+		sec_props.ees[dev->ee].pipe_mask = dev->pdata.apps_pipes;
+
+	/* Register the BAM device with the SPS driver */
+	ret = sps_register_bam_device(&bam_props, &bam_handle);
+	if (ret) {
+		dev_err(dev->dev, "disabling BAM: reg-bam failed 0x%x\n", ret);
+		dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
+		dev->use_tx_msgqs = MSM_MSGQ_DISABLED;
+		return ret;
+	}
+	dev->bam.hdl = bam_handle;
+	dev_dbg(dev->dev, "SLIM BAM registered, handle = 0x%lx\n", bam_handle);
+
+init_pipes:
+	if (dev->port_nums)
+		goto init_msgq;
+
+	/* get the # of ports first */
+	dev->port_nums = msm_slim_data_port_assign(dev);
+	if (dev->port_nums && !dev->pipes) {
+		dev->pipes = kzalloc(sizeof(struct msm_slim_endp) *
+					dev->port_nums,
+					GFP_KERNEL);
+		if (IS_ERR_OR_NULL(dev->pipes)) {
+			dev_err(dev->dev, "no memory for data ports");
+			sps_deregister_bam_device(bam_handle);
+			return PTR_ERR(dev->pipes);
+		}
+		/* assign the ports now */
+		msm_slim_data_port_assign(dev);
+	}
+
+init_msgq:
+	ret = msm_slim_init_rx_msgq(dev, pipe_reg);
+	if (ret)
+		dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
+	if (ret && bam_handle)
+		dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
+
+	ret = msm_slim_init_tx_msgq(dev, pipe_reg);
+	if (ret)
+		dev_err(dev->dev, "msm_slim_init_tx_msgq failed 0x%x\n", ret);
+	if (ret && bam_handle)
+		dev->use_tx_msgqs = MSM_MSGQ_DISABLED;
+
+	/*
+	 * If command interface for BAM fails, register interface is used for
+	 * commands.
+	 * It is possible that other BAM usecases (e.g. apps channels) will
+	 * still need BAM. Since BAM is successfully initialized, we can
+	 * continue using it for non-command use cases.
+	 */
+
+	return 0;
+}
+
+void msm_slim_disconnect_endp(struct msm_slim_ctrl *dev,
+					struct msm_slim_endp *endpoint,
+					enum msm_slim_msgq *msgq_flag)
+{
+	if (*msgq_flag >= MSM_MSGQ_ENABLED) {
+		sps_disconnect(endpoint->sps);
+		*msgq_flag = MSM_MSGQ_RESET;
+	}
+}
+
+static int msm_slim_discard_rx_data(struct msm_slim_ctrl *dev,
+					struct msm_slim_endp *endpoint)
+{
+	struct sps_iovec sio;
+	int desc_num = 0, ret = 0;
+
+	ret = sps_get_unused_desc_num(endpoint->sps, &desc_num);
+	if (ret) {
+		dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
+		return ret;
+	}
+	while (desc_num--)
+		sps_get_iovec(endpoint->sps, &sio);
+	return ret;
+}
+
+static void msm_slim_remove_ep(struct msm_slim_ctrl *dev,
+					struct msm_slim_endp *endpoint,
+					enum msm_slim_msgq *msgq_flag)
+{
+	struct sps_connect *config = &endpoint->config;
+	struct sps_mem_buffer *descr = &config->desc;
+	struct sps_mem_buffer *mem = &endpoint->buf;
+
+	msm_slim_sps_mem_free(dev, mem);
+	msm_slim_sps_mem_free(dev, descr);
+	msm_slim_free_endpoint(endpoint);
+}
+
+void msm_slim_deinit_ep(struct msm_slim_ctrl *dev,
+				struct msm_slim_endp *endpoint,
+				enum msm_slim_msgq *msgq_flag)
+{
+	int ret = 0;
+	struct sps_connect *config = &endpoint->config;
+
+	if (*msgq_flag == MSM_MSGQ_ENABLED) {
+		if (config->mode == SPS_MODE_SRC) {
+			ret = msm_slim_discard_rx_data(dev, endpoint);
+			if (ret)
+				SLIM_WARN(dev, "discarding Rx data failed\n");
+		}
+		msm_slim_disconnect_endp(dev, endpoint, msgq_flag);
+		msm_slim_remove_ep(dev, endpoint, msgq_flag);
+	}
+}
+
+static void msm_slim_sps_unreg_event(struct sps_pipe *sps)
+{
+	struct sps_register_event sps_event;
+	memset(&sps_event, 0x00, sizeof(sps_event));
+	/* Disable interrupt and signal notification for Rx/Tx pipe */
+	sps_register_event(sps, &sps_event);
+}
+
+void msm_slim_sps_exit(struct msm_slim_ctrl *dev, bool dereg)
+{
+	int i;
+
+	if (dev->use_rx_msgqs >= MSM_MSGQ_ENABLED)
+		msm_slim_sps_unreg_event(dev->rx_msgq.sps);
+	if (dev->use_tx_msgqs >= MSM_MSGQ_ENABLED)
+		msm_slim_sps_unreg_event(dev->tx_msgq.sps);
+
+	for (i = 0; i < dev->port_nums; i++) {
+		if (dev->pipes[i].connected)
+			msm_slim_disconn_pipe_port(dev, i);
+	}
+	if (dereg) {
+		for (i = 0; i < dev->port_nums; i++) {
+			if (dev->pipes[i].connected)
+				msm_dealloc_port(&dev->ctrl, i);
+		}
+		sps_deregister_bam_device(dev->bam.hdl);
+		dev->bam.hdl = 0L;
+		kfree(dev->pipes);
+		dev->pipes = NULL;
+	}
+	dev->port_nums = 0;
+}
+
+/* Slimbus QMI Messaging */
+#define SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01 0x0020
+#define SLIMBUS_QMI_SELECT_INSTANCE_RESP_V01 0x0020
+#define SLIMBUS_QMI_POWER_REQ_V01 0x0021
+#define SLIMBUS_QMI_POWER_RESP_V01 0x0021
+#define SLIMBUS_QMI_CHECK_FRAMER_STATUS_REQ 0x0022
+#define SLIMBUS_QMI_CHECK_FRAMER_STATUS_RESP 0x0022
+
+#define SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN 7
+#define SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN 7
+#define SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN 14
+#define SLIMBUS_QMI_SELECT_INSTANCE_RESP_MAX_MSG_LEN 7
+#define SLIMBUS_QMI_CHECK_FRAMER_STAT_RESP_MAX_MSG_LEN 7
+
+enum slimbus_mode_enum_type_v01 {
+	/* To force a 32 bit signed enum. Do not change or use*/
+	SLIMBUS_MODE_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
+	SLIMBUS_MODE_SATELLITE_V01 = 1,
+	SLIMBUS_MODE_MASTER_V01 = 2,
+	SLIMBUS_MODE_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
+};
+
+enum slimbus_pm_enum_type_v01 {
+	/* To force a 32 bit signed enum. Do not change or use*/
+	SLIMBUS_PM_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
+	SLIMBUS_PM_INACTIVE_V01 = 1,
+	SLIMBUS_PM_ACTIVE_V01 = 2,
+	SLIMBUS_PM_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
+};
+
+struct slimbus_select_inst_req_msg_v01 {
+	/* Mandatory */
+	/* Hardware Instance Selection */
+	uint32_t instance;
+
+	/* Optional */
+	/* Optional Mode Request Operation */
+	/* Must be set to true if mode is being passed */
+	uint8_t mode_valid;
+	enum slimbus_mode_enum_type_v01 mode;
+};
+
+struct slimbus_select_inst_resp_msg_v01 {
+	/* Mandatory */
+	/* Result Code */
+	struct qmi_response_type_v01 resp;
+};
+
+struct slimbus_power_req_msg_v01 {
+	/* Mandatory */
+	/* Power Request Operation */
+	enum slimbus_pm_enum_type_v01 pm_req;
+};
+
+struct slimbus_power_resp_msg_v01 {
+	/* Mandatory */
+	/* Result Code */
+	struct qmi_response_type_v01 resp;
+};
+
+struct slimbus_chkfrm_resp_msg {
+	/* Mandatory */
+	/* Result Code */
+	struct qmi_response_type_v01 resp;
+};
+
+
+static struct elem_info slimbus_select_inst_req_msg_v01_ei[] = {
+	{
+		.data_type = QMI_UNSIGNED_4_BYTE,
+		.elem_len  = 1,
+		.elem_size = sizeof(uint32_t),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x01,
+		.offset    = offsetof(struct slimbus_select_inst_req_msg_v01,
+				      instance),
+		.ei_array  = NULL,
+	},
+	{
+		.data_type = QMI_OPT_FLAG,
+		.elem_len  = 1,
+		.elem_size = sizeof(uint8_t),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x10,
+		.offset    = offsetof(struct slimbus_select_inst_req_msg_v01,
+				      mode_valid),
+		.ei_array  = NULL,
+	},
+	{
+		.data_type = QMI_UNSIGNED_4_BYTE,
+		.elem_len  = 1,
+		.elem_size = sizeof(enum slimbus_mode_enum_type_v01),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x10,
+		.offset    = offsetof(struct slimbus_select_inst_req_msg_v01,
+				      mode),
+		.ei_array  = NULL,
+	},
+	{
+		.data_type = QMI_EOTI,
+		.elem_len  = 0,
+		.elem_size = 0,
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x00,
+		.offset    = 0,
+		.ei_array  = NULL,
+	},
+};
+
+static struct elem_info slimbus_select_inst_resp_msg_v01_ei[] = {
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len  = 1,
+		.elem_size = sizeof(struct qmi_response_type_v01),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x02,
+		.offset    = offsetof(struct slimbus_select_inst_resp_msg_v01,
+				      resp),
+		.ei_array  = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type = QMI_EOTI,
+		.elem_len  = 0,
+		.elem_size = 0,
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x00,
+		.offset    = 0,
+		.ei_array  = NULL,
+	},
+};
+
+static struct elem_info slimbus_power_req_msg_v01_ei[] = {
+	{
+		.data_type = QMI_UNSIGNED_4_BYTE,
+		.elem_len  = 1,
+		.elem_size = sizeof(enum slimbus_pm_enum_type_v01),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x01,
+		.offset    = offsetof(struct slimbus_power_req_msg_v01, pm_req),
+		.ei_array  = NULL,
+	},
+	{
+		.data_type = QMI_EOTI,
+		.elem_len  = 0,
+		.elem_size = 0,
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x00,
+		.offset    = 0,
+		.ei_array  = NULL,
+	},
+};
+
+static struct elem_info slimbus_power_resp_msg_v01_ei[] = {
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len  = 1,
+		.elem_size = sizeof(struct qmi_response_type_v01),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x02,
+		.offset    = offsetof(struct slimbus_power_resp_msg_v01, resp),
+		.ei_array  = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type = QMI_EOTI,
+		.elem_len  = 0,
+		.elem_size = 0,
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x00,
+		.offset    = 0,
+		.ei_array  = NULL,
+	},
+};
+
+static struct elem_info slimbus_chkfrm_resp_msg_v01_ei[] = {
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len  = 1,
+		.elem_size = sizeof(struct qmi_response_type_v01),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x02,
+		.offset    = offsetof(struct slimbus_chkfrm_resp_msg, resp),
+		.ei_array  = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type = QMI_EOTI,
+		.elem_len  = 0,
+		.elem_size = 0,
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x00,
+		.offset    = 0,
+		.ei_array  = NULL,
+	},
+};
+
+static void msm_slim_qmi_recv_msg(struct kthread_work *work)
+{
+	int rc;
+	struct msm_slim_qmi *qmi =
+			container_of(work, struct msm_slim_qmi, kwork);
+
+	/* Drain all packets received */
+	do {
+		rc = qmi_recv_msg(qmi->handle);
+	} while (rc == 0);
+	if (rc != -ENOMSG)
+		pr_err("%s: Error receiving QMI message:%d\n", __func__, rc);
+}
+
+static void msm_slim_qmi_notify(struct qmi_handle *handle,
+				enum qmi_event_type event, void *notify_priv)
+{
+	struct msm_slim_ctrl *dev = notify_priv;
+	struct msm_slim_qmi *qmi = &dev->qmi;
+
+	switch (event) {
+	case QMI_RECV_MSG:
+		queue_kthread_work(&qmi->kworker, &qmi->kwork);
+		break;
+	default:
+		break;
+	}
+}
+
+static const char *get_qmi_error(struct qmi_response_type_v01 *r)
+{
+	if (r->result == QMI_RESULT_SUCCESS_V01 || r->error == QMI_ERR_NONE_V01)
+		return "No Error";
+	else if (r->error == QMI_ERR_NO_MEMORY_V01)
+		return "Out of Memory";
+	else if (r->error == QMI_ERR_INTERNAL_V01)
+		return "Unexpected error occurred";
+	else if (r->error == QMI_ERR_INCOMPATIBLE_STATE_V01)
+		return "Slimbus s/w already configured to a different mode";
+	else if (r->error == QMI_ERR_INVALID_ID_V01)
+		return "Slimbus hardware instance is not valid";
+	else
+		return "Unknown error";
+}
+
+static int msm_slim_qmi_send_select_inst_req(struct msm_slim_ctrl *dev,
+				struct slimbus_select_inst_req_msg_v01 *req)
+{
+	struct slimbus_select_inst_resp_msg_v01 resp = { { 0, 0 } };
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	req_desc.msg_id = SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01;
+	req_desc.max_msg_len = SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN;
+	req_desc.ei_array = slimbus_select_inst_req_msg_v01_ei;
+
+	resp_desc.msg_id = SLIMBUS_QMI_SELECT_INSTANCE_RESP_V01;
+	resp_desc.max_msg_len = SLIMBUS_QMI_SELECT_INSTANCE_RESP_MAX_MSG_LEN;
+	resp_desc.ei_array = slimbus_select_inst_resp_msg_v01_ei;
+
+	rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, req, sizeof(*req),
+			&resp_desc, &resp, sizeof(resp), SLIM_QMI_RESP_TOUT);
+	if (rc < 0) {
+		SLIM_ERR(dev, "%s: QMI send req failed %d\n", __func__, rc);
+		return rc;
+	}
+
+	/* Check the response */
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		SLIM_ERR(dev, "%s: QMI request failed 0x%x (%s)\n", __func__,
+				resp.resp.result, get_qmi_error(&resp.resp));
+		return -EREMOTEIO;
+	}
+
+	return 0;
+}
+
+static int msm_slim_qmi_send_power_request(struct msm_slim_ctrl *dev,
+				struct slimbus_power_req_msg_v01 *req)
+{
+	struct slimbus_power_resp_msg_v01 resp = { { 0, 0 } };
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	req_desc.msg_id = SLIMBUS_QMI_POWER_REQ_V01;
+	req_desc.max_msg_len = SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN;
+	req_desc.ei_array = slimbus_power_req_msg_v01_ei;
+
+	resp_desc.msg_id = SLIMBUS_QMI_POWER_RESP_V01;
+	resp_desc.max_msg_len = SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN;
+	resp_desc.ei_array = slimbus_power_resp_msg_v01_ei;
+
+	rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, req, sizeof(*req),
+			&resp_desc, &resp, sizeof(resp), SLIM_QMI_RESP_TOUT);
+	if (rc < 0) {
+		SLIM_ERR(dev, "%s: QMI send req failed %d\n", __func__, rc);
+		return rc;
+	}
+
+	/* Check the response */
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		SLIM_ERR(dev, "%s: QMI request failed 0x%x (%s)\n", __func__,
+				resp.resp.result, get_qmi_error(&resp.resp));
+		return -EREMOTEIO;
+	}
+
+	return 0;
+}
+
+int msm_slim_qmi_init(struct msm_slim_ctrl *dev, bool apps_is_master)
+{
+	int rc = 0;
+	struct qmi_handle *handle;
+	struct slimbus_select_inst_req_msg_v01 req;
+
+	init_kthread_worker(&dev->qmi.kworker);
+
+	dev->qmi.task = kthread_run(kthread_worker_fn,
+			&dev->qmi.kworker, "msm_slim_qmi_clnt%d", dev->ctrl.nr);
+
+	if (IS_ERR(dev->qmi.task)) {
+		pr_err("%s: Failed to create QMI client kthread\n", __func__);
+		return -ENOMEM;
+	}
+
+	init_kthread_work(&dev->qmi.kwork, msm_slim_qmi_recv_msg);
+
+	handle = qmi_handle_create(msm_slim_qmi_notify, dev);
+	if (!handle) {
+		rc = -ENOMEM;
+		pr_err("%s: QMI client handle alloc failed\n", __func__);
+		goto qmi_handle_create_failed;
+	}
+
+	rc = qmi_connect_to_service(handle, SLIMBUS_QMI_SVC_ID,
+						SLIMBUS_QMI_SVC_V1,
+						SLIMBUS_QMI_INS_ID);
+	if (rc < 0) {
+		SLIM_ERR(dev, "%s: QMI server not found\n", __func__);
+		goto qmi_connect_to_service_failed;
+	}
+
+	/* Instance is 0 based */
+	req.instance = (dev->ctrl.nr >> 1);
+	req.mode_valid = 1;
+
+	/* Mode indicates the role of the ADSP */
+	if (apps_is_master)
+		req.mode = SLIMBUS_MODE_SATELLITE_V01;
+	else
+		req.mode = SLIMBUS_MODE_MASTER_V01;
+
+	dev->qmi.handle = handle;
+
+	rc = msm_slim_qmi_send_select_inst_req(dev, &req);
+	if (rc) {
+		pr_err("%s: failed to select h/w instance\n", __func__);
+		goto qmi_select_instance_failed;
+	}
+
+	return 0;
+
+qmi_select_instance_failed:
+	dev->qmi.handle = NULL;
+qmi_connect_to_service_failed:
+	qmi_handle_destroy(handle);
+qmi_handle_create_failed:
+	flush_kthread_worker(&dev->qmi.kworker);
+	kthread_stop(dev->qmi.task);
+	dev->qmi.task = NULL;
+	return rc;
+}
+
+void msm_slim_qmi_exit(struct msm_slim_ctrl *dev)
+{
+	if (!dev->qmi.handle || !dev->qmi.task)
+		return;
+	qmi_handle_destroy(dev->qmi.handle);
+	flush_kthread_worker(&dev->qmi.kworker);
+	kthread_stop(dev->qmi.task);
+	dev->qmi.task = NULL;
+	dev->qmi.handle = NULL;
+}
+
+int msm_slim_qmi_power_request(struct msm_slim_ctrl *dev, bool active)
+{
+	struct slimbus_power_req_msg_v01 req;
+
+	if (active)
+		req.pm_req = SLIMBUS_PM_ACTIVE_V01;
+	else
+		req.pm_req = SLIMBUS_PM_INACTIVE_V01;
+
+	return msm_slim_qmi_send_power_request(dev, &req);
+}
+
+int msm_slim_qmi_check_framer_request(struct msm_slim_ctrl *dev)
+{
+	struct slimbus_chkfrm_resp_msg resp = { { 0, 0 } };
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	req_desc.msg_id = SLIMBUS_QMI_CHECK_FRAMER_STATUS_REQ;
+	req_desc.max_msg_len = 0;
+	req_desc.ei_array = NULL;
+
+	resp_desc.msg_id = SLIMBUS_QMI_CHECK_FRAMER_STATUS_RESP;
+	resp_desc.max_msg_len = SLIMBUS_QMI_CHECK_FRAMER_STAT_RESP_MAX_MSG_LEN;
+	resp_desc.ei_array = slimbus_chkfrm_resp_msg_v01_ei;
+
+	rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, NULL, 0,
+		&resp_desc, &resp, sizeof(resp), SLIM_QMI_RESP_TOUT);
+	if (rc < 0) {
+		SLIM_ERR(dev, "%s: QMI send req failed %d\n", __func__, rc);
+		return rc;
+	}
+	/* Check the response */
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		SLIM_ERR(dev, "%s: QMI request failed 0x%x (%s)\n",
+			__func__, resp.resp.result, get_qmi_error(&resp.resp));
+		return -EREMOTEIO;
+	}
+	return 0;
+}
diff -Nruw linux-4.4.115-fbx/drivers/slimbus./slim-msm.h linux-4.4.115-fbx/drivers/slimbus/slim-msm.h
--- linux-4.4.115-fbx/drivers/slimbus./slim-msm.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/slimbus/slim-msm.h	2019-01-22 16:16:26.643274841 +0100
@@ -0,0 +1,440 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SLIM_MSM_H
+#define _SLIM_MSM_H
+
+#include <linux/irq.h>
+#include <linux/kthread.h>
+#include <soc/qcom/msm_qmi_interface.h>
+#include <linux/ipc_logging.h>
+
+/* Per spec.max 40 bytes per received message */
+#define SLIM_MSGQ_BUF_LEN	40
+
+#define MSM_TX_BUFS		32
+
+#define SLIM_USR_MC_GENERIC_ACK		0x25
+#define SLIM_USR_MC_MASTER_CAPABILITY	0x0
+#define SLIM_USR_MC_REPORT_SATELLITE	0x1
+#define SLIM_USR_MC_ADDR_QUERY		0xD
+#define SLIM_USR_MC_ADDR_REPLY		0xE
+#define SLIM_USR_MC_DEFINE_CHAN		0x20
+#define SLIM_USR_MC_DEF_ACT_CHAN	0x21
+#define SLIM_USR_MC_CHAN_CTRL		0x23
+#define SLIM_USR_MC_RECONFIG_NOW	0x24
+#define SLIM_USR_MC_REQ_BW		0x28
+#define SLIM_USR_MC_CONNECT_SRC		0x2C
+#define SLIM_USR_MC_CONNECT_SINK	0x2D
+#define SLIM_USR_MC_DISCONNECT_PORT	0x2E
+
+#define SLIM_USR_MC_REPEAT_CHANGE_VALUE	0x0
+#define MSM_SLIM_VE_MAX_MAP_ADDR	0xFFF
+#define SLIM_MAX_VE_SLC_BYTES		16
+
+#define MSM_SLIM_AUTOSUSPEND		MSEC_PER_SEC
+
+#define SLIM_RX_MSGQ_TIMEOUT_VAL	0x10000
+/*
+ * Messages that can be received simultaneously:
+ * Client reads, LPASS master responses, announcement messages
+ * Receive upto 10 messages simultaneously.
+ */
+#define MSM_SLIM_DESC_NUM		32
+
+/* MSM Slimbus peripheral settings */
+#define MSM_SLIM_PERF_SUMM_THRESHOLD	0x8000
+#define MSM_SLIM_NPORTS			24
+#define MSM_SLIM_NCHANS			32
+
+#define QC_MFGID_LSB	0x2
+#define QC_MFGID_MSB	0x17
+#define QC_CHIPID_SL	0x10
+#define QC_DEVID_SAT1	0x3
+#define QC_DEVID_SAT2	0x4
+#define QC_DEVID_PGD	0x5
+
+#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
+		((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
+
+#define INIT_MX_RETRIES 3
+#define DEF_RETRY_MS	10
+#define MSM_CONCUR_MSG	8
+#define SAT_CONCUR_MSG	8
+
+#define DEF_WATERMARK	(8 << 1)
+#define DEF_ALIGN_LSB	0
+#define DEF_ALIGN_MSB	(1 << 7)
+#define DEF_PACK	(1 << 6)
+#define DEF_NO_PACK	0
+#define ENABLE_PORT	1
+
+#define DEF_BLKSZ	0
+#define DEF_TRANSZ	0
+
+#define SAT_MAGIC_LSB	0xD9
+#define SAT_MAGIC_MSB	0xC5
+#define SAT_MSG_VER	0x1
+#define SAT_MSG_PROT	0x1
+#define MSM_SAT_SUCCSS	0x20
+#define MSM_MAX_NSATS	2
+#define MSM_MAX_SATCH	32
+
+/* Slimbus QMI service */
+#define SLIMBUS_QMI_SVC_ID 0x0301
+#define SLIMBUS_QMI_SVC_V1 1
+#define SLIMBUS_QMI_INS_ID 0
+
+/* QMI response timeout of 500ms */
+#define SLIM_QMI_RESP_TOUT 1000
+
+#define PGD_THIS_EE(r, v) ((v) ? PGD_THIS_EE_V2(r) : PGD_THIS_EE_V1(r))
+#define PGD_PORT(r, p, v) ((v) ? PGD_PORT_V2(r, p) : PGD_PORT_V1(r, p))
+#define CFG_PORT(r, v) ((v) ? CFG_PORT_V2(r) : CFG_PORT_V1(r))
+
+#define PGD_THIS_EE_V2(r) (dev->base + (r ## _V2) + (dev->ee * 0x1000))
+#define PGD_PORT_V2(r, p) (dev->base + (r ## _V2) + ((p) * 0x1000))
+#define CFG_PORT_V2(r) ((r ## _V2))
+/* Component registers */
+enum comp_reg_v2 {
+	COMP_CFG_V2		= 4,
+	COMP_TRUST_CFG_V2	= 0x3000,
+};
+
+/* Manager PGD registers */
+enum pgd_reg_v2 {
+	PGD_CFG_V2		= 0x800,
+	PGD_STAT_V2		= 0x804,
+	PGD_INT_EN_V2		= 0x810,
+	PGD_INT_STAT_V2		= 0x814,
+	PGD_INT_CLR_V2		= 0x818,
+	PGD_OWN_EEn_V2		= 0x300C,
+	PGD_PORT_INT_EN_EEn_V2	= 0x5000,
+	PGD_PORT_INT_ST_EEn_V2	= 0x5004,
+	PGD_PORT_INT_CL_EEn_V2	= 0x5008,
+	PGD_PORT_CFGn_V2	= 0x14000,
+	PGD_PORT_STATn_V2	= 0x14004,
+	PGD_PORT_PARAMn_V2	= 0x14008,
+	PGD_PORT_BLKn_V2	= 0x1400C,
+	PGD_PORT_TRANn_V2	= 0x14010,
+	PGD_PORT_MCHANn_V2	= 0x14014,
+	PGD_PORT_PSHPLLn_V2	= 0x14018,
+	PGD_PORT_PC_CFGn_V2	= 0x8000,
+	PGD_PORT_PC_VALn_V2	= 0x8004,
+	PGD_PORT_PC_VFR_TSn_V2	= 0x8008,
+	PGD_PORT_PC_VFR_STn_V2	= 0x800C,
+	PGD_PORT_PC_VFR_CLn_V2	= 0x8010,
+	PGD_IE_STAT_V2		= 0x820,
+	PGD_VE_STAT_V2		= 0x830,
+};
+
+#define PGD_THIS_EE_V1(r) (dev->base + (r ## _V1) + (dev->ee * 16))
+#define PGD_PORT_V1(r, p) (dev->base + (r ## _V1) + ((p) * 32))
+#define CFG_PORT_V1(r) ((r ## _V1))
+/* Component registers */
+enum comp_reg_v1 {
+	COMP_CFG_V1		= 0,
+	COMP_TRUST_CFG_V1	= 0x14,
+};
+
+/* Manager PGD registers */
+enum pgd_reg_v1 {
+	PGD_CFG_V1		= 0x1000,
+	PGD_STAT_V1		= 0x1004,
+	PGD_INT_EN_V1		= 0x1010,
+	PGD_INT_STAT_V1		= 0x1014,
+	PGD_INT_CLR_V1		= 0x1018,
+	PGD_OWN_EEn_V1		= 0x1020,
+	PGD_PORT_INT_EN_EEn_V1	= 0x1030,
+	PGD_PORT_INT_ST_EEn_V1	= 0x1034,
+	PGD_PORT_INT_CL_EEn_V1	= 0x1038,
+	PGD_PORT_CFGn_V1	= 0x1080,
+	PGD_PORT_STATn_V1	= 0x1084,
+	PGD_PORT_PARAMn_V1	= 0x1088,
+	PGD_PORT_BLKn_V1	= 0x108C,
+	PGD_PORT_TRANn_V1	= 0x1090,
+	PGD_PORT_MCHANn_V1	= 0x1094,
+	PGD_PORT_PSHPLLn_V1	= 0x1098,
+	PGD_PORT_PC_CFGn_V1	= 0x1600,
+	PGD_PORT_PC_VALn_V1	= 0x1604,
+	PGD_PORT_PC_VFR_TSn_V1	= 0x1608,
+	PGD_PORT_PC_VFR_STn_V1	= 0x160C,
+	PGD_PORT_PC_VFR_CLn_V1	= 0x1610,
+	PGD_IE_STAT_V1		= 0x1700,
+	PGD_VE_STAT_V1		= 0x1710,
+};
+
+enum msm_slim_port_status {
+	MSM_PORT_OVERFLOW	= 1 << 2,
+	MSM_PORT_UNDERFLOW	= 1 << 3,
+	MSM_PORT_DISCONNECT	= 1 << 19,
+};
+
+enum msm_ctrl_state {
+	MSM_CTRL_AWAKE,
+	MSM_CTRL_IDLE,
+	MSM_CTRL_ASLEEP,
+	MSM_CTRL_DOWN,
+};
+
+enum msm_slim_msgq {
+	MSM_MSGQ_DISABLED,
+	MSM_MSGQ_RESET,
+	MSM_MSGQ_ENABLED,
+	MSM_MSGQ_DOWN,
+};
+
+struct msm_slim_sps_bam {
+	unsigned long		hdl;
+	void __iomem		*base;
+	int			irq;
+};
+
+/*
+ * struct slim_pshpull_parm: Structure to store push pull protocol parameters
+ * @num_samples: Number of samples in a period
+ * @rpt_period: Repeat period value
+ */
+struct msm_slim_pshpull_parm {
+	int		num_samples;
+	int		rpt_period;
+};
+
+struct msm_slim_endp {
+	struct sps_pipe			*sps;
+	struct sps_connect		config;
+	struct sps_register_event	event;
+	struct sps_mem_buffer		buf;
+	bool				connected;
+	int				port_b;
+	struct msm_slim_pshpull_parm	psh_pull;
+};
+
+struct msm_slim_qmi {
+	struct qmi_handle		*handle;
+	struct task_struct		*task;
+	struct task_struct		*slave_thread;
+	struct completion		slave_notify;
+	struct kthread_work		kwork;
+	struct kthread_worker		kworker;
+	struct completion		qmi_comp;
+	struct notifier_block		nb;
+};
+
+enum msm_slim_dom {
+	MSM_SLIM_DOM_NONE,
+	MSM_SLIM_DOM_PD,
+	MSM_SLIM_DOM_SS,
+};
+
+struct msm_slim_ss {
+	struct notifier_block nb;
+	void *domr;
+	enum msm_ctrl_state state;
+	struct work_struct dom_up;
+	enum msm_slim_dom dom_t;
+};
+
+struct msm_slim_pdata {
+	u32 apps_pipes;
+	u32 eapc;
+};
+
+struct msm_slim_bulk_wr {
+	dma_addr_t	wr_dma;
+	void		*base;
+	int		size;
+	int		buf_sz;
+	int		(*cb)(void *ctx, int err);
+	void		*ctx;
+	bool		in_progress;
+};
+
+struct msm_slim_ctrl {
+	struct slim_controller  ctrl;
+	struct slim_framer	framer;
+	struct device		*dev;
+	void __iomem		*base;
+	struct resource		*slew_mem;
+	struct resource		*bam_mem;
+	u32			curr_bw;
+	u8			msg_cnt;
+	u32			tx_buf[10];
+	u8			rx_msgs[MSM_CONCUR_MSG][SLIM_MSGQ_BUF_LEN];
+	int			tx_tail;
+	int			tx_head;
+	spinlock_t		rx_lock;
+	int			head;
+	int			tail;
+	int			irq;
+	int			err;
+	int			ee;
+	struct completion	**wr_comp;
+	struct msm_slim_sat	*satd[MSM_MAX_NSATS];
+	struct msm_slim_endp	*pipes;
+	struct msm_slim_sps_bam	bam;
+	struct msm_slim_endp	tx_msgq;
+	struct msm_slim_endp	rx_msgq;
+	struct completion	rx_msgq_notify;
+	struct task_struct	*rx_msgq_thread;
+	struct clk		*rclk;
+	struct clk		*hclk;
+	struct mutex		tx_lock;
+	struct mutex		ssr_lock;
+	spinlock_t		tx_buf_lock;
+	u8			pgdla;
+	enum msm_slim_msgq	use_rx_msgqs;
+	enum msm_slim_msgq	use_tx_msgqs;
+	int			port_nums;
+	struct completion	reconf;
+	bool			reconf_busy;
+	bool			chan_active;
+	enum msm_ctrl_state	state;
+	struct completion	ctrl_up;
+	int			nsats;
+	u32			ver;
+	struct msm_slim_qmi	qmi;
+	struct msm_slim_pdata	pdata;
+	struct msm_slim_ss	ext_mdm;
+	struct msm_slim_ss	dsp;
+	struct msm_slim_bulk_wr	bulk;
+	int			default_ipc_log_mask;
+	int			ipc_log_mask;
+	bool			sysfs_created;
+	void			*ipc_slimbus_log;
+	void (*rx_slim)(struct msm_slim_ctrl *dev, u8 *buf);
+	u32			current_rx_buf[10];
+	int			current_count;
+	atomic_t		ssr_in_progress;
+};
+
+struct msm_sat_chan {
+	u8 chan;
+	u16 chanh;
+	int req_rem;
+	int req_def;
+	bool reconf;
+};
+
+struct msm_slim_sat {
+	struct slim_device	satcl;
+	struct msm_slim_ctrl	*dev;
+	struct workqueue_struct *wq;
+	struct work_struct	wd;
+	u8			sat_msgs[SAT_CONCUR_MSG][40];
+	struct msm_sat_chan	*satch;
+	u8			nsatch;
+	bool			sent_capability;
+	bool			pending_reconf;
+	bool			pending_capability;
+	int			shead;
+	int			stail;
+	spinlock_t lock;
+};
+
+enum rsc_grp {
+	EE_MGR_RSC_GRP	= 1 << 10,
+	EE_NGD_2	= 2 << 6,
+	EE_NGD_1	= 0,
+};
+
+
+/* IPC logging stuff */
+#define IPC_SLIMBUS_LOG_PAGES 5
+
+/* Log levels */
+enum {
+	FATAL_LEV = 0U,
+	ERR_LEV = 1U,
+	WARN_LEV = 2U,
+	INFO_LEV = 3U,
+	DBG_LEV = 4U,
+};
+
+/* Default IPC log level INFO */
+#define SLIM_DBG(dev, x...) do { \
+	pr_debug(x); \
+	if (dev->ipc_slimbus_log && dev->ipc_log_mask >= DBG_LEV) { \
+		ipc_log_string(dev->ipc_slimbus_log, x); \
+	} \
+} while (0)
+
+#define SLIM_INFO(dev, x...) do { \
+	pr_debug(x); \
+	if (dev->ipc_slimbus_log && dev->ipc_log_mask >= INFO_LEV) {\
+		ipc_log_string(dev->ipc_slimbus_log, x); \
+	} \
+} while (0)
+
+/* warnings and errors show up on console always */
+#define SLIM_WARN(dev, x...) do { \
+	pr_warn(x); \
+	if (dev->ipc_slimbus_log && dev->ipc_log_mask >= WARN_LEV) \
+		ipc_log_string(dev->ipc_slimbus_log, x); \
+} while (0)
+
+/* ERROR condition in the driver sets the hs_serial_debug_mask
+ * to ERR_FATAL level, so that this message can be seen
+ * in IPC logging. Further errors continue to log on the console
+ */
+#define SLIM_ERR(dev, x...) do { \
+	pr_err(x); \
+	if (dev->ipc_slimbus_log && dev->ipc_log_mask >= ERR_LEV) { \
+		ipc_log_string(dev->ipc_slimbus_log, x); \
+		dev->default_ipc_log_mask = dev->ipc_log_mask; \
+		dev->ipc_log_mask = FATAL_LEV; \
+	} \
+} while (0)
+
+#define SLIM_RST_LOGLVL(dev) { \
+	dev->ipc_log_mask = dev->default_ipc_log_mask; \
+}
+
+int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len);
+int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf);
+int msm_slim_get_ctrl(struct msm_slim_ctrl *dev);
+void msm_slim_put_ctrl(struct msm_slim_ctrl *dev);
+irqreturn_t msm_slim_port_irq_handler(struct msm_slim_ctrl *dev, u32 pstat);
+int msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep);
+void msm_slim_free_endpoint(struct msm_slim_endp *ep);
+void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pipenum, u8 portnum);
+int msm_alloc_port(struct slim_controller *ctrl, u8 pn);
+void msm_dealloc_port(struct slim_controller *ctrl, u8 pn);
+int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn);
+enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
+				u8 pn, phys_addr_t *done_buf, u32 *done_len);
+int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, phys_addr_t iobuf,
+			u32 len, struct completion *comp);
+int msm_send_msg_buf(struct msm_slim_ctrl *dev, u32 *buf, u8 len, u32 tx_reg);
+u32 *msm_get_msg_buf(struct msm_slim_ctrl *dev, int len,
+			struct completion *comp);
+u32 *msm_slim_manage_tx_msgq(struct msm_slim_ctrl *dev, bool getbuf,
+			struct completion *comp, int err);
+int msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset);
+int msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem,
+			u32 pipe_reg, bool remote);
+void msm_slim_sps_exit(struct msm_slim_ctrl *dev, bool dereg);
+
+int msm_slim_connect_endp(struct msm_slim_ctrl *dev,
+				struct msm_slim_endp *endpoint);
+void msm_slim_disconnect_endp(struct msm_slim_ctrl *dev,
+					struct msm_slim_endp *endpoint,
+					enum msm_slim_msgq *msgq_flag);
+void msm_slim_deinit_ep(struct msm_slim_ctrl *dev,
+				struct msm_slim_endp *endpoint,
+				enum msm_slim_msgq *msgq_flag);
+
+void msm_slim_qmi_exit(struct msm_slim_ctrl *dev);
+int msm_slim_qmi_init(struct msm_slim_ctrl *dev, bool apps_is_master);
+int msm_slim_qmi_power_request(struct msm_slim_ctrl *dev, bool active);
+int msm_slim_qmi_check_framer_request(struct msm_slim_ctrl *dev);
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/slimbus./slim-msm-ngd.c linux-4.4.115-fbx/drivers/slimbus/slim-msm-ngd.c
--- linux-4.4.115-fbx/drivers/slimbus./slim-msm-ngd.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/slimbus/slim-msm-ngd.c	2019-01-22 16:16:26.643274841 +0100
@@ -0,0 +1,2089 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/slimbus/slimbus.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_slimbus.h>
+#include <linux/timer.h>
+#include <linux/msm-sps.h>
+#include <soc/qcom/service-locator.h>
+#include <soc/qcom/service-notifier.h>
+#include <soc/qcom/subsystem_notif.h>
+#include "slim-msm.h"
+
+#define NGD_SLIM_NAME	"ngd_msm_ctrl"
+#define SLIM_LA_MGR	0xFF
+#define SLIM_ROOT_FREQ	24576000
+#define LADDR_RETRY	5
+
+#define NGD_BASE_V1(r)	(((r) % 2) ? 0x800 : 0xA00)
+#define NGD_BASE_V2(r)	(((r) % 2) ? 0x1000 : 0x2000)
+#define NGD_BASE(r, v) ((v) ? NGD_BASE_V2(r) : NGD_BASE_V1(r))
+/* NGD (Non-ported Generic Device) registers */
+enum ngd_reg {
+	NGD_CFG		= 0x0,
+	NGD_STATUS	= 0x4,
+	NGD_RX_MSGQ_CFG	= 0x8,
+	NGD_INT_EN	= 0x10,
+	NGD_INT_STAT	= 0x14,
+	NGD_INT_CLR	= 0x18,
+	NGD_TX_MSG	= 0x30,
+	NGD_RX_MSG	= 0x70,
+	NGD_IE_STAT	= 0xF0,
+	NGD_VE_STAT	= 0x100,
+};
+
+enum ngd_msg_cfg {
+	NGD_CFG_ENABLE		= 1,
+	NGD_CFG_RX_MSGQ_EN	= 1 << 1,
+	NGD_CFG_TX_MSGQ_EN	= 1 << 2,
+};
+
+enum ngd_intr {
+	NGD_INT_RECFG_DONE	= 1 << 24,
+	NGD_INT_TX_NACKED_2	= 1 << 25,
+	NGD_INT_MSG_BUF_CONTE	= 1 << 26,
+	NGD_INT_MSG_TX_INVAL	= 1 << 27,
+	NGD_INT_IE_VE_CHG	= 1 << 28,
+	NGD_INT_DEV_ERR		= 1 << 29,
+	NGD_INT_RX_MSG_RCVD	= 1 << 30,
+	NGD_INT_TX_MSG_SENT	= 1 << 31,
+};
+
+enum ngd_offsets {
+	NGD_NACKED_MC		= 0x7F00000,
+	NGD_ACKED_MC		= 0xFE000,
+	NGD_ERROR		= 0x1800,
+	NGD_MSGQ_SUPPORT	= 0x400,
+	NGD_RX_MSGQ_TIME_OUT	= 0x16,
+	NGD_ENUMERATED		= 0x1,
+	NGD_TX_BUSY		= 0x0,
+};
+
+enum ngd_status {
+	NGD_LADDR		= 1 << 1,
+};
+
+static void ngd_slim_rx(struct msm_slim_ctrl *dev, u8 *buf);
+static int ngd_slim_runtime_resume(struct device *device);
+static int ngd_slim_power_up(struct msm_slim_ctrl *dev, bool mdm_restart);
+static void ngd_dom_down(struct msm_slim_ctrl *dev);
+static int dsp_domr_notify_cb(struct notifier_block *n, unsigned long code,
+				void *_cmd);
+
+static irqreturn_t ngd_slim_interrupt(int irq, void *d)
+{
+	struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)d;
+	void __iomem *ngd = dev->base + NGD_BASE(dev->ctrl.nr, dev->ver);
+	u32 stat = readl_relaxed(ngd + NGD_INT_STAT);
+	u32 pstat;
+
+	if ((stat & NGD_INT_MSG_BUF_CONTE) ||
+		(stat & NGD_INT_MSG_TX_INVAL) || (stat & NGD_INT_DEV_ERR) ||
+		(stat & NGD_INT_TX_NACKED_2)) {
+		writel_relaxed(stat, ngd + NGD_INT_CLR);
+		if (stat & NGD_INT_MSG_TX_INVAL)
+			dev->err = -EINVAL;
+		else
+			dev->err = -EIO;
+
+		SLIM_WARN(dev, "NGD interrupt error:0x%x, err:%d\n", stat,
+								dev->err);
+		/* Guarantee that error interrupts are cleared */
+		mb();
+		msm_slim_manage_tx_msgq(dev, false, NULL, dev->err);
+
+	} else if (stat & NGD_INT_TX_MSG_SENT) {
+		writel_relaxed(NGD_INT_TX_MSG_SENT, ngd + NGD_INT_CLR);
+		/* Make sure interrupt is cleared */
+		mb();
+		msm_slim_manage_tx_msgq(dev, false, NULL, 0);
+	}
+	if (stat & NGD_INT_RX_MSG_RCVD) {
+		u32 rx_buf[10];
+		u8 len, i;
+		rx_buf[0] = readl_relaxed(ngd + NGD_RX_MSG);
+		len = rx_buf[0] & 0x1F;
+		for (i = 1; i < ((len + 3) >> 2); i++) {
+			rx_buf[i] = readl_relaxed(ngd + NGD_RX_MSG +
+						(4 * i));
+			SLIM_DBG(dev, "REG-RX data: %x\n", rx_buf[i]);
+		}
+		writel_relaxed(NGD_INT_RX_MSG_RCVD,
+				ngd + NGD_INT_CLR);
+		/*
+		 * Guarantee that CLR bit write goes through before
+		 * queuing work
+		 */
+		mb();
+		ngd_slim_rx(dev, (u8 *)rx_buf);
+	}
+	if (stat & NGD_INT_RECFG_DONE) {
+		writel_relaxed(NGD_INT_RECFG_DONE, ngd + NGD_INT_CLR);
+		/* Guarantee RECONFIG DONE interrupt is cleared */
+		mb();
+		/* In satellite mode, just log the reconfig done IRQ */
+		SLIM_DBG(dev, "reconfig done IRQ for NGD\n");
+	}
+	if (stat & NGD_INT_IE_VE_CHG) {
+		writel_relaxed(NGD_INT_IE_VE_CHG, ngd + NGD_INT_CLR);
+		/* Guarantee IE VE change interrupt is cleared */
+		mb();
+		SLIM_DBG(dev, "NGD IE VE change\n");
+	}
+
+	pstat = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_ST_EEn, dev->ver));
+	if (pstat != 0)
+		return msm_slim_port_irq_handler(dev, pstat);
+	return IRQ_HANDLED;
+}
+
+static int ngd_qmi_available(struct notifier_block *n, unsigned long code,
+				void *_cmd)
+{
+	struct msm_slim_qmi *qmi = container_of(n, struct msm_slim_qmi, nb);
+	struct msm_slim_ctrl *dev =
+		container_of(qmi, struct msm_slim_ctrl, qmi);
+	SLIM_INFO(dev, "Slimbus QMI NGD CB received event:%ld\n", code);
+	switch (code) {
+	case QMI_SERVER_ARRIVE:
+		atomic_set(&dev->ssr_in_progress, 0);
+		schedule_work(&dev->dsp.dom_up);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static void ngd_reg_ssr(struct msm_slim_ctrl *dev)
+{
+	int ret;
+	const char *subsys_name = NULL;
+
+	dev->dsp.dom_t = MSM_SLIM_DOM_NONE;
+	ret = of_property_read_string(dev->dev->of_node,
+				"qcom,subsys-name", &subsys_name);
+	if (ret)
+		subsys_name = "adsp";
+
+	dev->dsp.nb.notifier_call = dsp_domr_notify_cb;
+	dev->dsp.domr = subsys_notif_register_notifier(subsys_name,
+							&dev->dsp.nb);
+	if (IS_ERR_OR_NULL(dev->dsp.domr)) {
+		dev_err(dev->dev,
+			"subsys_notif_register_notifier failed %ld",
+			PTR_ERR(dev->dsp.domr));
+		return;
+	}
+	dev->dsp.dom_t = MSM_SLIM_DOM_SS;
+	SLIM_INFO(dev, "reg-SSR with:%s, PDR not available\n",
+			subsys_name);
+}
+
+static int dsp_domr_notify_cb(struct notifier_block *n, unsigned long code,
+				void *_cmd)
+{
+	int cur = -1;
+	struct msm_slim_ss *dsp = container_of(n, struct msm_slim_ss, nb);
+	struct msm_slim_ctrl *dev = container_of(dsp, struct msm_slim_ctrl,
+						dsp);
+	struct pd_qmi_client_data *reg;
+
+	SLIM_INFO(dev, "SLIM DSP SSR/PDR notify cb:0x%lx, type:%d\n",
+			code, dsp->dom_t);
+	switch (code) {
+	case SUBSYS_BEFORE_SHUTDOWN:
+	case SERVREG_NOTIF_SERVICE_STATE_DOWN_V01:
+		SLIM_INFO(dev, "SLIM DSP SSR notify cb:%lu\n", code);
+		atomic_set(&dev->ssr_in_progress, 1);
+		/* wait for current transaction */
+		mutex_lock(&dev->tx_lock);
+		/* make sure autosuspend is not called until ADSP comes up*/
+		pm_runtime_get_noresume(dev->dev);
+		dev->state = MSM_CTRL_DOWN;
+		msm_slim_sps_exit(dev, false);
+		ngd_dom_down(dev);
+		mutex_unlock(&dev->tx_lock);
+		break;
+	case LOCATOR_UP:
+		reg = _cmd;
+		if (!reg || reg->total_domains != 1) {
+			SLIM_WARN(dev, "error locating audio-PD\n");
+			if (reg)
+				SLIM_WARN(dev, "audio-PDs matched:%d\n",
+						reg->total_domains);
+
+			/* Fall back to SSR */
+			ngd_reg_ssr(dev);
+			return NOTIFY_DONE;
+		}
+		dev->dsp.domr = service_notif_register_notifier(
+				reg->domain_list->name,
+				reg->domain_list->instance_id,
+				&dev->dsp.nb,
+				&cur);
+		SLIM_INFO(dev, "reg-PD client:%s with service:%s\n",
+				reg->client_name, reg->service_name);
+		SLIM_INFO(dev, "reg-PD dom:%s instance:%d, cur:%d\n",
+				reg->domain_list->name,
+				reg->domain_list->instance_id, cur);
+		if (IS_ERR_OR_NULL(dev->dsp.domr))
+			ngd_reg_ssr(dev);
+		else
+			dev->dsp.dom_t = MSM_SLIM_DOM_PD;
+		break;
+	case LOCATOR_DOWN:
+		ngd_reg_ssr(dev);
+	default:
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static void ngd_dom_init(struct msm_slim_ctrl *dev)
+{
+	struct pd_qmi_client_data reg;
+	int ret;
+
+	memset(&reg, 0, sizeof(struct pd_qmi_client_data));
+	dev->dsp.nb.priority = 4;
+	dev->dsp.nb.notifier_call = dsp_domr_notify_cb;
+	scnprintf(reg.client_name, QMI_SERVREG_LOC_NAME_LENGTH_V01, "appsngd%d",
+		 dev->ctrl.nr);
+	scnprintf(reg.service_name, QMI_SERVREG_LOC_NAME_LENGTH_V01,
+		 "avs/audio");
+	ret = get_service_location(reg.client_name, reg.service_name,
+				   &dev->dsp.nb);
+	if (ret)
+		ngd_reg_ssr(dev);
+}
+
+static int mdm_ssr_notify_cb(struct notifier_block *n, unsigned long code,
+				void *_cmd)
+{
+	void __iomem *ngd;
+	struct msm_slim_ss *ext_mdm = container_of(n, struct msm_slim_ss, nb);
+	struct msm_slim_ctrl *dev = container_of(ext_mdm, struct msm_slim_ctrl,
+						ext_mdm);
+	struct slim_controller *ctrl = &dev->ctrl;
+	u32 laddr;
+	struct slim_device *sbdev;
+
+	switch (code) {
+	case SUBSYS_BEFORE_SHUTDOWN:
+		SLIM_INFO(dev, "SLIM %lu external_modem SSR notify cb\n", code);
+		/* vote for runtime-pm so that ADSP doesn't go down */
+		msm_slim_get_ctrl(dev);
+		/*
+		 * checking framer here will wake-up ADSP and may avoid framer
+		 * handover later
+		 */
+		msm_slim_qmi_check_framer_request(dev);
+		dev->ext_mdm.state = MSM_CTRL_DOWN;
+		msm_slim_put_ctrl(dev);
+		break;
+	case SUBSYS_AFTER_POWERUP:
+		if (dev->ext_mdm.state != MSM_CTRL_DOWN)
+			return NOTIFY_DONE;
+		SLIM_INFO(dev,
+			"SLIM %lu external_modem SSR notify cb\n", code);
+		/* vote for runtime-pm so that ADSP doesn't go down */
+		msm_slim_get_ctrl(dev);
+		msm_slim_qmi_check_framer_request(dev);
+		/* If NGD enumeration is lost, we will need to power us up */
+		ngd = dev->base + NGD_BASE(dev->ctrl.nr, dev->ver);
+		laddr = readl_relaxed(ngd + NGD_STATUS);
+		if (!(laddr & NGD_LADDR)) {
+			mutex_lock(&dev->tx_lock);
+			/* runtime-pm state should be consistent with HW */
+			pm_runtime_disable(dev->dev);
+			pm_runtime_set_suspended(dev->dev);
+			dev->state = MSM_CTRL_DOWN;
+			mutex_unlock(&dev->tx_lock);
+			SLIM_INFO(dev,
+				"SLIM MDM SSR (active framer on MDM) dev-down\n");
+			list_for_each_entry(sbdev, &ctrl->devs, dev_list)
+				slim_report_absent(sbdev);
+			ngd_slim_runtime_resume(dev->dev);
+			pm_runtime_set_active(dev->dev);
+			pm_runtime_enable(dev->dev);
+		}
+		dev->ext_mdm.state = MSM_CTRL_AWAKE;
+		msm_slim_put_ctrl(dev);
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static int ngd_get_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn,
+				u8 *tid, struct completion *done)
+{
+	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctrl->txn_lock, flags);
+	if (ctrl->last_tid <= 255) {
+		dev->msg_cnt = ctrl->last_tid;
+		ctrl->last_tid++;
+	} else {
+		int i;
+		for (i = 0; i < 256; i++) {
+			dev->msg_cnt = ((dev->msg_cnt + 1) & 0xFF);
+			if (ctrl->txnt[dev->msg_cnt] == NULL)
+				break;
+		}
+		if (i >= 256) {
+			dev_err(&ctrl->dev, "out of TID");
+			spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+			return -ENOMEM;
+		}
+	}
+	ctrl->txnt[dev->msg_cnt] = txn;
+	txn->tid = dev->msg_cnt;
+	txn->comp = done;
+	*tid = dev->msg_cnt;
+	spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+	return 0;
+}
+
+static void slim_reinit_tx_msgq(struct msm_slim_ctrl *dev)
+{
+	/*
+	 * disconnect/recoonect pipe so that subsequent
+	 * transactions don't timeout due to unavailable
+	 * descriptors
+	 */
+	if (dev->state != MSM_CTRL_DOWN) {
+		msm_slim_disconnect_endp(dev, &dev->tx_msgq,
+					&dev->use_tx_msgqs);
+		msm_slim_connect_endp(dev, &dev->tx_msgq);
+	}
+}
+
+static int ngd_check_hw_status(struct msm_slim_ctrl *dev)
+{
+	void __iomem *ngd = dev->base + NGD_BASE(dev->ctrl.nr, dev->ver);
+	u32 laddr = readl_relaxed(ngd + NGD_STATUS);
+	int ret = 0;
+
+	/* Lost logical addr due to noise */
+	if (!(laddr & NGD_LADDR)) {
+		SLIM_WARN(dev, "NGD lost LADDR: status:0x%x\n", laddr);
+		ret = ngd_slim_power_up(dev, false);
+
+		if (ret) {
+			SLIM_WARN(dev, "slim resume ret:%d, state:%d\n",
+					ret, dev->state);
+			ret = -EREMOTEIO;
+		}
+	}
+	return ret;
+}
+
+static int ngd_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
+{
+	DECLARE_COMPLETION_ONSTACK(done);
+	DECLARE_COMPLETION_ONSTACK(tx_sent);
+
+	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+	u32 *pbuf;
+	u8 *puc;
+	int ret = 0;
+	u8 la = txn->la;
+	u8 txn_mt;
+	u16 txn_mc = txn->mc;
+	u8 wbuf[SLIM_MSGQ_BUF_LEN];
+	bool report_sat = false;
+	bool sync_wr = true;
+
+	if (txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)
+		return -EPROTONOSUPPORT;
+
+	if (txn->mt == SLIM_MSG_MT_CORE &&
+		(txn->mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION &&
+		 txn->mc <= SLIM_MSG_MC_RECONFIGURE_NOW))
+		return 0;
+
+	if (txn->mc == SLIM_USR_MC_REPORT_SATELLITE &&
+		txn->mt == SLIM_MSG_MT_SRC_REFERRED_USER)
+		report_sat = true;
+	else
+		mutex_lock(&dev->tx_lock);
+
+	if (!report_sat && !pm_runtime_enabled(dev->dev) &&
+			dev->state == MSM_CTRL_ASLEEP) {
+		/*
+		 * Counter-part of system-suspend when runtime-pm is not enabled
+		 * This way, resume can be left empty and device will be put in
+		 * active mode only if client requests anything on the bus
+		 * If the state was DOWN, SSR UP notification will take
+		 * care of putting the device in active state.
+		 */
+		mutex_unlock(&dev->tx_lock);
+		ret = ngd_slim_runtime_resume(dev->dev);
+
+		if (ret) {
+			SLIM_ERR(dev, "slim resume failed ret:%d, state:%d",
+					ret, dev->state);
+			return -EREMOTEIO;
+		}
+		mutex_lock(&dev->tx_lock);
+	}
+
+	/* If txn is tried when controller is down, wait for ADSP to boot */
+	if (!report_sat) {
+		if (dev->state == MSM_CTRL_DOWN) {
+			u8 mc = (u8)txn->mc;
+			int timeout;
+			mutex_unlock(&dev->tx_lock);
+			SLIM_INFO(dev, "ADSP slimbus not up yet\n");
+			/*
+			 * Messages related to data channel management can't
+			 * wait since they are holding reconfiguration lock.
+			 * clk_pause in resume (which can change state back to
+			 * MSM_CTRL_AWAKE), will need that lock.
+			 * Port disconnection, channel removal calls should pass
+			 * through since there is no activity on the bus and
+			 * those calls are triggered by clients due to
+			 * device_down callback in that situation.
+			 * Returning 0 on the disconnections and
+			 * removals will ensure consistent state of channels,
+			 * ports with the HW
+			 * Remote requests to remove channel/port will be
+			 * returned from the path where they wait on
+			 * acknowledgement from ADSP
+			 */
+			if ((txn->mt == SLIM_MSG_MT_DEST_REFERRED_USER) &&
+				((mc == SLIM_USR_MC_CHAN_CTRL ||
+				mc == SLIM_USR_MC_DISCONNECT_PORT ||
+				mc == SLIM_USR_MC_RECONFIG_NOW)))
+				return -EREMOTEIO;
+			if ((txn->mt == SLIM_MSG_MT_CORE) &&
+				((mc == SLIM_MSG_MC_DISCONNECT_PORT ||
+				mc == SLIM_MSG_MC_NEXT_REMOVE_CHANNEL ||
+				mc == SLIM_USR_MC_RECONFIG_NOW)))
+				return 0;
+			if ((txn->mt == SLIM_MSG_MT_CORE) &&
+				((mc >= SLIM_MSG_MC_CONNECT_SOURCE &&
+				mc <= SLIM_MSG_MC_CHANGE_CONTENT) ||
+				(mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION &&
+				mc <= SLIM_MSG_MC_RECONFIGURE_NOW)))
+				return -EREMOTEIO;
+			if ((txn->mt == SLIM_MSG_MT_DEST_REFERRED_USER) &&
+				((mc >= SLIM_USR_MC_DEFINE_CHAN &&
+				mc < SLIM_USR_MC_DISCONNECT_PORT)))
+				return -EREMOTEIO;
+			timeout = wait_for_completion_timeout(&dev->ctrl_up,
+							HZ);
+			if (!timeout)
+				return -ETIMEDOUT;
+			mutex_lock(&dev->tx_lock);
+		}
+
+		mutex_unlock(&dev->tx_lock);
+		ret = msm_slim_get_ctrl(dev);
+		mutex_lock(&dev->tx_lock);
+		/*
+		 * Runtime-pm's callbacks are not called until runtime-pm's
+		 * error status is cleared
+		 * Setting runtime status to suspended clears the error
+		 * It also makes HW status cosistent with what SW has it here
+		 */
+		if ((pm_runtime_enabled(dev->dev) && ret < 0) ||
+				dev->state >= MSM_CTRL_ASLEEP) {
+			SLIM_ERR(dev, "slim ctrl vote failed ret:%d, state:%d",
+					ret, dev->state);
+			pm_runtime_set_suspended(dev->dev);
+			mutex_unlock(&dev->tx_lock);
+			msm_slim_put_ctrl(dev);
+			return -EREMOTEIO;
+		}
+		ret = ngd_check_hw_status(dev);
+		if (ret) {
+			mutex_unlock(&dev->tx_lock);
+			msm_slim_put_ctrl(dev);
+			return ret;
+		}
+	}
+
+	if (txn->mt == SLIM_MSG_MT_CORE &&
+		(txn->mc == SLIM_MSG_MC_CONNECT_SOURCE ||
+		txn->mc == SLIM_MSG_MC_CONNECT_SINK ||
+		txn->mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
+		int i = 0;
+		if (txn->mc != SLIM_MSG_MC_DISCONNECT_PORT)
+			SLIM_INFO(dev,
+				"Connect port: laddr 0x%x  port_num %d chan_num %d\n",
+					txn->la, txn->wbuf[0], txn->wbuf[1]);
+		else
+			SLIM_INFO(dev,
+				"Disconnect port: laddr 0x%x  port_num %d\n",
+					txn->la, txn->wbuf[0]);
+		txn->mt = SLIM_MSG_MT_DEST_REFERRED_USER;
+		if (txn->mc == SLIM_MSG_MC_CONNECT_SOURCE)
+			txn->mc = SLIM_USR_MC_CONNECT_SRC;
+		else if (txn->mc == SLIM_MSG_MC_CONNECT_SINK)
+			txn->mc = SLIM_USR_MC_CONNECT_SINK;
+		else if (txn->mc == SLIM_MSG_MC_DISCONNECT_PORT)
+			txn->mc = SLIM_USR_MC_DISCONNECT_PORT;
+		if (txn->la == SLIM_LA_MGR) {
+			if (dev->pgdla == SLIM_LA_MGR) {
+				u8 ea[] = {0, QC_DEVID_PGD, 0, 0, QC_MFGID_MSB,
+						QC_MFGID_LSB};
+				ea[2] = (u8)(dev->pdata.eapc & 0xFF);
+				ea[3] = (u8)((dev->pdata.eapc & 0xFF00) >> 8);
+				mutex_unlock(&dev->tx_lock);
+				ret = dev->ctrl.get_laddr(&dev->ctrl, ea, 6,
+						&dev->pgdla);
+				SLIM_DBG(dev, "SLIM PGD LA:0x%x, ret:%d\n",
+					dev->pgdla, ret);
+				if (ret) {
+					SLIM_ERR(dev,
+						"Incorrect SLIM-PGD EAPC:0x%x\n",
+							dev->pdata.eapc);
+					return ret;
+				}
+				mutex_lock(&dev->tx_lock);
+			}
+			txn->la = dev->pgdla;
+		}
+		wbuf[i++] = txn->la;
+		la = SLIM_LA_MGR;
+		wbuf[i++] = txn->wbuf[0];
+		if (txn->mc != SLIM_USR_MC_DISCONNECT_PORT)
+			wbuf[i++] = txn->wbuf[1];
+		ret = ngd_get_tid(ctrl, txn, &wbuf[i++], &done);
+		if (ret) {
+			SLIM_ERR(dev, "TID for connect/disconnect fail:%d\n",
+					ret);
+			goto ngd_xfer_err;
+		}
+		txn->len = i;
+		txn->wbuf = wbuf;
+		txn->rl = txn->len + 4;
+	}
+	txn->rl--;
+
+	if (txn->len > SLIM_MSGQ_BUF_LEN || txn->rl > SLIM_MSGQ_BUF_LEN) {
+		SLIM_WARN(dev, "msg exeeds HW lim:%d, rl:%d, mc:0x%x, mt:0x%x",
+					txn->len, txn->rl, txn->mc, txn->mt);
+		ret = -EDQUOT;
+		goto ngd_xfer_err;
+	}
+
+	if (txn->mt == SLIM_MSG_MT_CORE && txn->comp &&
+		dev->use_tx_msgqs == MSM_MSGQ_ENABLED &&
+		(txn_mc != SLIM_MSG_MC_REQUEST_INFORMATION &&
+		 txn_mc != SLIM_MSG_MC_REQUEST_VALUE &&
+		 txn_mc != SLIM_MSG_MC_REQUEST_CHANGE_VALUE &&
+		 txn_mc != SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION)) {
+		sync_wr = false;
+		pbuf = msm_get_msg_buf(dev, txn->rl, txn->comp);
+	} else if (txn->mt == SLIM_MSG_MT_DEST_REFERRED_USER &&
+			dev->use_tx_msgqs == MSM_MSGQ_ENABLED &&
+			txn->mc == SLIM_USR_MC_REPEAT_CHANGE_VALUE &&
+			txn->comp) {
+		sync_wr = false;
+		pbuf = msm_get_msg_buf(dev, txn->rl, txn->comp);
+	} else {
+		pbuf = msm_get_msg_buf(dev, txn->rl, &tx_sent);
+	}
+
+	if (!pbuf) {
+		SLIM_ERR(dev, "Message buffer unavailable\n");
+		ret = -ENOMEM;
+		goto ngd_xfer_err;
+	}
+	dev->err = 0;
+
+	if (txn->dt == SLIM_MSG_DEST_ENUMADDR) {
+		ret = -EPROTONOSUPPORT;
+		goto ngd_xfer_err;
+	}
+	if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
+		*pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, txn->mc, 0,
+				la);
+	else
+		*pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, txn->mc, 1,
+				la);
+	if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
+		puc = ((u8 *)pbuf) + 3;
+	else
+		puc = ((u8 *)pbuf) + 2;
+	if (txn->rbuf)
+		*(puc++) = txn->tid;
+	if (((txn->mt == SLIM_MSG_MT_CORE) &&
+		((txn->mc >= SLIM_MSG_MC_REQUEST_INFORMATION &&
+		txn->mc <= SLIM_MSG_MC_REPORT_INFORMATION) ||
+		(txn->mc >= SLIM_MSG_MC_REQUEST_VALUE &&
+		 txn->mc <= SLIM_MSG_MC_CHANGE_VALUE))) ||
+		(txn->mc == SLIM_USR_MC_REPEAT_CHANGE_VALUE &&
+		txn->mt == SLIM_MSG_MT_DEST_REFERRED_USER)) {
+		*(puc++) = (txn->ec & 0xFF);
+		*(puc++) = (txn->ec >> 8)&0xFF;
+	}
+	if (txn->wbuf)
+		memcpy(puc, txn->wbuf, txn->len);
+	if (txn->mt == SLIM_MSG_MT_DEST_REFERRED_USER &&
+		(txn->mc == SLIM_USR_MC_CONNECT_SRC ||
+		 txn->mc == SLIM_USR_MC_CONNECT_SINK ||
+		 txn->mc == SLIM_USR_MC_DISCONNECT_PORT) && txn->wbuf &&
+		wbuf[0] == dev->pgdla) {
+		if (txn->mc != SLIM_USR_MC_DISCONNECT_PORT)
+			dev->err = msm_slim_connect_pipe_port(dev, wbuf[1]);
+		else
+			writel_relaxed(0, PGD_PORT(PGD_PORT_CFGn,
+					(dev->pipes[wbuf[1]].port_b),
+						dev->ver));
+		if (dev->err) {
+			SLIM_ERR(dev, "pipe-port connect err:%d\n", dev->err);
+			goto ngd_xfer_err;
+		}
+		/* Add port-base to port number if this is manager side port */
+		puc[1] = (u8)dev->pipes[wbuf[1]].port_b;
+	}
+	dev->err = 0;
+	/*
+	 * If it's a read txn, it may be freed if a response is received by
+	 * received thread before reaching end of this function.
+	 * mc, mt may have changed to convert standard slimbus code/type to
+	 * satellite user-defined message. Reinitialize again
+	 */
+	txn_mc = txn->mc;
+	txn_mt = txn->mt;
+	ret = msm_send_msg_buf(dev, pbuf, txn->rl,
+			NGD_BASE(dev->ctrl.nr, dev->ver) + NGD_TX_MSG);
+	if (!ret && sync_wr) {
+		int i;
+		int timeout = wait_for_completion_timeout(&tx_sent, HZ);
+		if (!timeout && dev->use_tx_msgqs == MSM_MSGQ_ENABLED) {
+			struct msm_slim_endp *endpoint = &dev->tx_msgq;
+			struct sps_mem_buffer *mem = &endpoint->buf;
+			u32 idx = (u32) (((u8 *)pbuf - (u8 *)mem->base) /
+						SLIM_MSGQ_BUF_LEN);
+			phys_addr_t addr = mem->phys_base +
+						(idx * SLIM_MSGQ_BUF_LEN);
+			ret = -ETIMEDOUT;
+			SLIM_WARN(dev, "timeout, BAM desc_idx:%d, phys:%llx",
+					idx, (u64)addr);
+			for (i = 0; i < (SLIM_MSGQ_BUF_LEN >> 2) ; i++)
+				SLIM_WARN(dev, "timeout:bam-desc[%d]:0x%x",
+							i, *(pbuf + i));
+			if (idx < MSM_TX_BUFS)
+				dev->wr_comp[idx] = NULL;
+			slim_reinit_tx_msgq(dev);
+		} else if (!timeout) {
+			ret = -ETIMEDOUT;
+			SLIM_WARN(dev, "timeout non-BAM TX,len:%d", txn->rl);
+			for (i = 0; i < (SLIM_MSGQ_BUF_LEN >> 2) ; i++)
+				SLIM_WARN(dev, "timeout:txbuf[%d]:0x%x", i,
+						dev->tx_buf[i]);
+		} else {
+			ret = dev->err;
+		}
+	}
+	if (ret) {
+		u32 conf, stat, rx_msgq, int_stat, int_en, int_clr;
+		void __iomem *ngd = dev->base + NGD_BASE(dev->ctrl.nr,
+							dev->ver);
+		SLIM_WARN(dev, "TX failed :MC:0x%x,mt:0x%x, ret:%d, ver:%d\n",
+				txn_mc, txn_mt, ret, dev->ver);
+		conf = readl_relaxed(ngd);
+		stat = readl_relaxed(ngd + NGD_STATUS);
+		rx_msgq = readl_relaxed(ngd + NGD_RX_MSGQ_CFG);
+		int_stat = readl_relaxed(ngd + NGD_INT_STAT);
+		int_en = readl_relaxed(ngd + NGD_INT_EN);
+		int_clr = readl_relaxed(ngd + NGD_INT_CLR);
+
+		SLIM_WARN(dev, "conf:0x%x,stat:0x%x,rxmsgq:0x%x\n",
+				conf, stat, rx_msgq);
+		SLIM_ERR(dev, "int_stat:0x%x,int_en:0x%x,int_cll:0x%x\n",
+				int_stat, int_en, int_clr);
+	}
+
+	if (txn_mt == SLIM_MSG_MT_DEST_REFERRED_USER &&
+		(txn_mc == SLIM_USR_MC_CONNECT_SRC ||
+		 txn_mc == SLIM_USR_MC_CONNECT_SINK ||
+		 txn_mc == SLIM_USR_MC_DISCONNECT_PORT)) {
+		int timeout;
+		unsigned long flags;
+
+		mutex_unlock(&dev->tx_lock);
+		msm_slim_put_ctrl(dev);
+		if (!ret) {
+			timeout = wait_for_completion_timeout(txn->comp, HZ);
+			/* remote side did not acknowledge */
+			if (!timeout)
+				ret = -EREMOTEIO;
+			else
+				ret = txn->ec;
+		}
+		if (ret) {
+			SLIM_ERR(dev,
+				"connect/disconnect:0x%x,tid:%d err:%d\n",
+					txn->mc, txn->tid, ret);
+			spin_lock_irqsave(&ctrl->txn_lock, flags);
+			ctrl->txnt[txn->tid] = NULL;
+			spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+		}
+		return ret ? ret : dev->err;
+	}
+ngd_xfer_err:
+	if (!report_sat) {
+		mutex_unlock(&dev->tx_lock);
+		msm_slim_put_ctrl(dev);
+	}
+	return ret ? ret : dev->err;
+}
+
+static int ngd_get_ec(u16 start_offset, u8 len, u16 *ec)
+{
+	if (len > SLIM_MAX_VE_SLC_BYTES ||
+		start_offset > MSM_SLIM_VE_MAX_MAP_ADDR)
+		return -EINVAL;
+	if (len <= 4) {
+		*ec = len - 1;
+	} else if (len <= 8) {
+		if (len & 0x1)
+			return -EINVAL;
+		*ec = ((len >> 1) + 1);
+	} else {
+		if (len & 0x3)
+			return -EINVAL;
+		*ec = ((len >> 2) + 3);
+	}
+	*ec |= (0x8 | ((start_offset & 0xF) << 4));
+	*ec |= ((start_offset & 0xFF0) << 4);
+	return 0;
+}
+
+static int ngd_user_msg(struct slim_controller *ctrl, u8 la, u8 mt, u8 mc,
+				struct slim_ele_access *msg, u8 *buf, u8 len)
+{
+	int ret;
+	struct slim_msg_txn txn;
+
+	if (mt != SLIM_MSG_MT_DEST_REFERRED_USER ||
+		mc != SLIM_USR_MC_REPEAT_CHANGE_VALUE) {
+		return -EPROTONOSUPPORT;
+	}
+
+	ret = ngd_get_ec(msg->start_offset, len, &txn.ec);
+	if (ret)
+		return ret;
+	txn.la = la;
+	txn.mt = mt;
+	txn.mc = mc;
+	txn.dt = SLIM_MSG_DEST_LOGICALADDR;
+	txn.len = len;
+	txn.rl = len + 6;
+	txn.wbuf = buf;
+	txn.rbuf = NULL;
+	txn.comp = msg->comp;
+	return ngd_xfer_msg(ctrl, &txn);
+}
+
+static int ngd_bulk_cb(void *ctx, int err)
+{
+	if (ctx)
+		complete(ctx);
+	return err;
+}
+
+static int ngd_bulk_wr(struct slim_controller *ctrl, u8 la, u8 mt, u8 mc,
+			struct slim_val_inf msgs[], int n,
+			int (*comp_cb)(void *ctx, int err), void *ctx)
+{
+	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+	int i, ret;
+	struct msm_slim_endp *endpoint = &dev->tx_msgq;
+	u32 *header;
+	DECLARE_COMPLETION_ONSTACK(done);
+
+	ret = msm_slim_get_ctrl(dev);
+	mutex_lock(&dev->tx_lock);
+
+	if ((pm_runtime_enabled(dev->dev) && ret < 0) ||
+			dev->state >= MSM_CTRL_ASLEEP) {
+		SLIM_WARN(dev, "vote failed/SSR in-progress ret:%d, state:%d",
+				ret, dev->state);
+		pm_runtime_set_suspended(dev->dev);
+		mutex_unlock(&dev->tx_lock);
+		msm_slim_put_ctrl(dev);
+		return -EREMOTEIO;
+	}
+	if (!pm_runtime_enabled(dev->dev) && dev->state == MSM_CTRL_ASLEEP) {
+		mutex_unlock(&dev->tx_lock);
+		ret = ngd_slim_runtime_resume(dev->dev);
+
+		if (ret) {
+			SLIM_ERR(dev, "slim resume failed ret:%d, state:%d",
+					ret, dev->state);
+			return -EREMOTEIO;
+		}
+		mutex_lock(&dev->tx_lock);
+	}
+
+	ret = ngd_check_hw_status(dev);
+	if (ret) {
+		mutex_unlock(&dev->tx_lock);
+		msm_slim_put_ctrl(dev);
+		return ret;
+	}
+
+	if (dev->use_tx_msgqs != MSM_MSGQ_ENABLED) {
+		SLIM_WARN(dev, "bulk wr not supported");
+		ret = -EPROTONOSUPPORT;
+		goto retpath;
+	}
+	if (dev->bulk.in_progress) {
+		SLIM_WARN(dev, "bulk wr in progress:");
+		ret = -EAGAIN;
+		goto retpath;
+	}
+	dev->bulk.in_progress = true;
+	/* every txn has 5 bytes of overhead: la, mc, mt, ec, len */
+	dev->bulk.size = n * 5;
+	for (i = 0; i < n; i++) {
+		dev->bulk.size += msgs[i].num_bytes;
+		dev->bulk.size += (4 - ((msgs[i].num_bytes + 1) & 0x3));
+	}
+
+	if (dev->bulk.size > 0xffff) {
+		SLIM_WARN(dev, "len exceeds limit, split bulk and retry");
+		ret = -EDQUOT;
+		goto retpath;
+	}
+	if (dev->bulk.size > dev->bulk.buf_sz) {
+		void *temp = krealloc(dev->bulk.base, dev->bulk.size,
+				      GFP_KERNEL | GFP_DMA);
+		if (!temp) {
+			ret = -ENOMEM;
+			goto retpath;
+		}
+		dev->bulk.base = temp;
+		dev->bulk.buf_sz = dev->bulk.size;
+	}
+
+	header = dev->bulk.base;
+	for (i = 0; i < n; i++) {
+		u8 *buf = (u8 *)header;
+		int rl = msgs[i].num_bytes + 5;
+		u16 ec;
+
+		*header = SLIM_MSG_ASM_FIRST_WORD(rl, mt, mc, 0, la);
+		buf += 3;
+		ret = ngd_get_ec(msgs[i].start_offset, msgs[i].num_bytes, &ec);
+		if (ret)
+			goto retpath;
+		*(buf++) = (ec & 0xFF);
+		*(buf++) = (ec >> 8) & 0xFF;
+		memcpy(buf, msgs[i].wbuf, msgs[i].num_bytes);
+		buf += msgs[i].num_bytes;
+		header += (rl >> 2);
+		if (rl & 3) {
+			header++;
+			memset(buf, 0, ((u8 *)header - buf));
+		}
+	}
+	header = dev->bulk.base;
+	if (comp_cb) {
+		dev->bulk.cb = comp_cb;
+		dev->bulk.ctx = ctx;
+	} else {
+		dev->bulk.cb = ngd_bulk_cb;
+		dev->bulk.ctx = &done;
+	}
+	dev->bulk.wr_dma = dma_map_single(dev->dev, dev->bulk.base,
+					  dev->bulk.size, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev->dev, dev->bulk.wr_dma)) {
+		ret = -ENOMEM;
+		goto retpath;
+	}
+
+	ret = sps_transfer_one(endpoint->sps, dev->bulk.wr_dma, dev->bulk.size,
+						NULL, SPS_IOVEC_FLAG_EOT);
+	if (ret) {
+		SLIM_WARN(dev, "sps transfer one returned error:%d", ret);
+		goto retpath;
+	}
+	if (dev->bulk.cb == ngd_bulk_cb) {
+		int timeout = wait_for_completion_timeout(&done, HZ);
+
+		if (!timeout) {
+			SLIM_WARN(dev, "timeout for bulk wr");
+			dma_unmap_single(dev->dev, dev->bulk.wr_dma,
+					 dev->bulk.size, DMA_TO_DEVICE);
+			ret = -ETIMEDOUT;
+		}
+	}
+retpath:
+	if (ret) {
+		dev->bulk.in_progress = false;
+		dev->bulk.ctx = NULL;
+		dev->bulk.wr_dma = 0;
+		slim_reinit_tx_msgq(dev);
+	}
+	mutex_unlock(&dev->tx_lock);
+	msm_slim_put_ctrl(dev);
+	return ret;
+}
+
+static int ngd_xferandwait_ack(struct slim_controller *ctrl,
+				struct slim_msg_txn *txn)
+{
+	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+	unsigned long flags;
+	int ret;
+
+	if (dev->state == MSM_CTRL_DOWN) {
+		/*
+		 * no need to send anything to the bus due to SSR
+		 * transactions related to channel removal marked as success
+		 * since HW is down
+		 */
+		if ((txn->mt == SLIM_MSG_MT_DEST_REFERRED_USER) &&
+			((txn->mc >= SLIM_USR_MC_CHAN_CTRL &&
+			  txn->mc <= SLIM_USR_MC_REQ_BW) ||
+			txn->mc == SLIM_USR_MC_DISCONNECT_PORT)) {
+			spin_lock_irqsave(&ctrl->txn_lock, flags);
+			ctrl->txnt[txn->tid] = NULL;
+			spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+			return 0;
+		}
+	}
+
+	ret = ngd_xfer_msg(ctrl, txn);
+	if (!ret) {
+		int timeout;
+		timeout = wait_for_completion_timeout(txn->comp, HZ);
+		if (!timeout)
+			ret = -ETIMEDOUT;
+		else
+			ret = txn->ec;
+	}
+
+	if (ret) {
+		if (ret != -EREMOTEIO || txn->mc != SLIM_USR_MC_CHAN_CTRL)
+			SLIM_ERR(dev, "master msg:0x%x,tid:%d ret:%d\n",
+				txn->mc, txn->tid, ret);
+		spin_lock_irqsave(&ctrl->txn_lock, flags);
+		ctrl->txnt[txn->tid] = NULL;
+		spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+	}
+
+	return ret;
+}
+
+static int ngd_allocbw(struct slim_device *sb, int *subfrmc, int *clkgear)
+{
+	int ret = 0, num_chan = 0;
+	struct slim_pending_ch *pch;
+	struct slim_msg_txn txn;
+	struct slim_controller *ctrl = sb->ctrl;
+	DECLARE_COMPLETION_ONSTACK(done);
+	u8 wbuf[SLIM_MSGQ_BUF_LEN];
+	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+
+	*clkgear = ctrl->clkgear;
+	*subfrmc = 0;
+	txn.mt = SLIM_MSG_MT_DEST_REFERRED_USER;
+	txn.dt = SLIM_MSG_DEST_LOGICALADDR;
+	txn.la = SLIM_LA_MGR;
+	txn.len = 0;
+	txn.ec = 0;
+	txn.wbuf = wbuf;
+	txn.rbuf = NULL;
+
+	if (ctrl->sched.msgsl != ctrl->sched.pending_msgsl) {
+		SLIM_DBG(dev, "slim reserve BW for messaging: req: %d\n",
+				ctrl->sched.pending_msgsl);
+		txn.mc = SLIM_USR_MC_REQ_BW;
+		wbuf[txn.len++] = ((sb->laddr & 0x1f) |
+				((u8)(ctrl->sched.pending_msgsl & 0x7) << 5));
+		wbuf[txn.len++] = (u8)(ctrl->sched.pending_msgsl >> 3);
+		ret = ngd_get_tid(ctrl, &txn, &wbuf[txn.len++], &done);
+		if (ret)
+			return ret;
+		txn.rl = txn.len + 4;
+		ret = ngd_xferandwait_ack(ctrl, &txn);
+		if (ret)
+			return ret;
+
+		txn.mc = SLIM_USR_MC_RECONFIG_NOW;
+		txn.len = 2;
+		wbuf[1] = sb->laddr;
+		txn.rl = txn.len + 4;
+		ret = ngd_get_tid(ctrl, &txn, &wbuf[0], &done);
+		if (ret)
+			return ret;
+		ret = ngd_xferandwait_ack(ctrl, &txn);
+		if (ret)
+			return ret;
+
+		txn.len = 0;
+	}
+	list_for_each_entry(pch, &sb->mark_define, pending) {
+		struct slim_ich *slc;
+		slc = &ctrl->chans[pch->chan];
+		if (!slc) {
+			SLIM_WARN(dev, "no channel in define?\n");
+			return -ENXIO;
+		}
+		if (txn.len == 0) {
+			/* Per protocol, only last 5 bits for client no. */
+			wbuf[txn.len++] = (u8) (slc->prop.dataf << 5) |
+					(sb->laddr & 0x1f);
+			wbuf[txn.len] = slc->prop.sampleszbits >> 2;
+			if (slc->srch && slc->prop.prot == SLIM_PUSH)
+				slc->prop.prot = SLIM_PULL;
+			if (slc->coeff == SLIM_COEFF_3)
+				wbuf[txn.len] |= 1 << 5;
+			wbuf[txn.len++] |= slc->prop.auxf << 6;
+			wbuf[txn.len++] = slc->rootexp << 4 | slc->prop.prot;
+			wbuf[txn.len++] = slc->prrate;
+			ret = ngd_get_tid(ctrl, &txn, &wbuf[txn.len++], &done);
+			if (ret) {
+				SLIM_WARN(dev, "no tid for channel define?\n");
+				return -ENXIO;
+			}
+		}
+		num_chan++;
+		wbuf[txn.len++] = slc->chan;
+		SLIM_INFO(dev, "slim activate chan:%d, laddr: 0x%x\n",
+				slc->chan, sb->laddr);
+	}
+	if (txn.len) {
+		txn.mc = SLIM_USR_MC_DEF_ACT_CHAN;
+		txn.rl = txn.len + 4;
+		ret = ngd_xferandwait_ack(ctrl, &txn);
+		if (ret)
+			return ret;
+
+		txn.mc = SLIM_USR_MC_RECONFIG_NOW;
+		txn.len = 2;
+		wbuf[1] = sb->laddr;
+		txn.rl = txn.len + 4;
+		ret = ngd_get_tid(ctrl, &txn, &wbuf[0], &done);
+		if (ret)
+			return ret;
+		ret = ngd_xferandwait_ack(ctrl, &txn);
+		if (ret)
+			return ret;
+	}
+	txn.len = 0;
+	list_for_each_entry(pch, &sb->mark_removal, pending) {
+		struct slim_ich *slc;
+		slc = &ctrl->chans[pch->chan];
+		if (!slc) {
+			SLIM_WARN(dev, "no channel in removal?\n");
+			return -ENXIO;
+		}
+		if (txn.len == 0) {
+			/* Per protocol, only last 5 bits for client no. */
+			wbuf[txn.len++] = (u8) (SLIM_CH_REMOVE << 6) |
+					(sb->laddr & 0x1f);
+			ret = ngd_get_tid(ctrl, &txn, &wbuf[txn.len++], &done);
+			if (ret) {
+				SLIM_WARN(dev, "no tid for channel define?\n");
+				return -ENXIO;
+			}
+		}
+		wbuf[txn.len++] = slc->chan;
+		SLIM_INFO(dev, "slim remove chan:%d, laddr: 0x%x\n",
+			   slc->chan, sb->laddr);
+	}
+	if (txn.len) {
+		txn.mc = SLIM_USR_MC_CHAN_CTRL;
+		txn.rl = txn.len + 4;
+		ret = ngd_xferandwait_ack(ctrl, &txn);
+		/* HW restarting, channel removal should succeed */
+		if (ret == -EREMOTEIO)
+			return 0;
+		else if (ret)
+			return ret;
+
+		txn.mc = SLIM_USR_MC_RECONFIG_NOW;
+		txn.len = 2;
+		wbuf[1] = sb->laddr;
+		txn.rl = txn.len + 4;
+		ret = ngd_get_tid(ctrl, &txn, &wbuf[0], &done);
+		if (ret)
+			return ret;
+		ret = ngd_xferandwait_ack(ctrl, &txn);
+		if (ret)
+			return ret;
+		txn.len = 0;
+	}
+	return 0;
+}
+
+static int ngd_set_laddr(struct slim_controller *ctrl, const u8 *ea,
+				u8 elen, u8 laddr)
+{
+	return 0;
+}
+
+static int ngd_get_laddr(struct slim_controller *ctrl, const u8 *ea,
+				u8 elen, u8 *laddr)
+{
+	int ret;
+	u8 wbuf[10];
+	struct slim_msg_txn txn;
+	DECLARE_COMPLETION_ONSTACK(done);
+	txn.mt = SLIM_MSG_MT_DEST_REFERRED_USER;
+	txn.dt = SLIM_MSG_DEST_LOGICALADDR;
+	txn.la = SLIM_LA_MGR;
+	txn.ec = 0;
+	ret = ngd_get_tid(ctrl, &txn, &wbuf[0], &done);
+	if (ret) {
+		return ret;
+	}
+	memcpy(&wbuf[1], ea, elen);
+	txn.mc = SLIM_USR_MC_ADDR_QUERY;
+	txn.rl = 11;
+	txn.len = 7;
+	txn.wbuf = wbuf;
+	txn.rbuf = NULL;
+	ret = ngd_xferandwait_ack(ctrl, &txn);
+	if (!ret && txn.la == 0xFF)
+		ret = -ENXIO;
+	else if (!ret)
+		*laddr = txn.la;
+	return ret;
+}
+
+static void ngd_slim_setup(struct msm_slim_ctrl *dev)
+{
+	u32 new_cfg = NGD_CFG_ENABLE;
+	u32 cfg = readl_relaxed(dev->base +
+				 NGD_BASE(dev->ctrl.nr, dev->ver));
+	if (dev->state == MSM_CTRL_DOWN) {
+		/* if called after SSR, cleanup and re-assign */
+		if (dev->use_tx_msgqs != MSM_MSGQ_RESET)
+			msm_slim_deinit_ep(dev, &dev->tx_msgq,
+					   &dev->use_tx_msgqs);
+
+		if (dev->use_rx_msgqs != MSM_MSGQ_RESET)
+			msm_slim_deinit_ep(dev, &dev->rx_msgq,
+					   &dev->use_rx_msgqs);
+
+		msm_slim_sps_init(dev, dev->bam_mem,
+			NGD_BASE(dev->ctrl.nr,
+			dev->ver) + NGD_STATUS, true);
+	} else {
+		if (dev->use_rx_msgqs == MSM_MSGQ_DISABLED)
+			goto setup_tx_msg_path;
+
+		if ((dev->use_rx_msgqs == MSM_MSGQ_ENABLED) &&
+			(cfg & NGD_CFG_RX_MSGQ_EN))
+			goto setup_tx_msg_path;
+
+		if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED)
+			msm_slim_disconnect_endp(dev, &dev->rx_msgq,
+						 &dev->use_rx_msgqs);
+		msm_slim_connect_endp(dev, &dev->rx_msgq);
+
+setup_tx_msg_path:
+		if (dev->use_tx_msgqs == MSM_MSGQ_DISABLED)
+			goto ngd_enable;
+		if (dev->use_tx_msgqs == MSM_MSGQ_ENABLED &&
+			cfg & NGD_CFG_TX_MSGQ_EN)
+			goto ngd_enable;
+
+		if (dev->use_tx_msgqs == MSM_MSGQ_ENABLED)
+			msm_slim_disconnect_endp(dev, &dev->tx_msgq,
+						 &dev->use_tx_msgqs);
+		msm_slim_connect_endp(dev, &dev->tx_msgq);
+	}
+ngd_enable:
+
+	if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED)
+		new_cfg |= NGD_CFG_RX_MSGQ_EN;
+	if (dev->use_tx_msgqs == MSM_MSGQ_ENABLED)
+		new_cfg |= NGD_CFG_TX_MSGQ_EN;
+
+	/* Enable NGD, and program MSGQs if not already */
+	if (cfg == new_cfg)
+		return;
+
+	writel_relaxed(new_cfg, dev->base + NGD_BASE(dev->ctrl.nr, dev->ver));
+	/* make sure NGD MSG-Q config goes through */
+	mb();
+}
+
+static void ngd_slim_rx(struct msm_slim_ctrl *dev, u8 *buf)
+{
+	unsigned long flags;
+	u8 mc, mt, len;
+
+	len = buf[0] & 0x1F;
+	mt = (buf[0] >> 5) & 0x7;
+	mc = buf[1];
+	if (mc == SLIM_USR_MC_MASTER_CAPABILITY &&
+		mt == SLIM_MSG_MT_SRC_REFERRED_USER)
+		complete(&dev->rx_msgq_notify);
+
+	if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
+			mc == SLIM_MSG_MC_REPLY_VALUE) {
+		u8 tid = buf[3];
+		dev_dbg(dev->dev, "tid:%d, len:%d\n", tid, len);
+		slim_msg_response(&dev->ctrl, &buf[4], tid,
+					len - 4);
+		pm_runtime_mark_last_busy(dev->dev);
+	}
+	if (mc == SLIM_USR_MC_ADDR_REPLY &&
+		mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
+		struct slim_msg_txn *txn;
+		u8 failed_ea[6] = {0, 0, 0, 0, 0, 0};
+
+		spin_lock_irqsave(&dev->ctrl.txn_lock, flags);
+		txn = dev->ctrl.txnt[buf[3]];
+		if (!txn) {
+			spin_unlock_irqrestore(&dev->ctrl.txn_lock, flags);
+			SLIM_WARN(dev,
+				"LADDR response after timeout, tid:0x%x\n",
+					buf[3]);
+			return;
+		}
+		if (memcmp(&buf[4], failed_ea, 6))
+			txn->la = buf[10];
+		dev->ctrl.txnt[buf[3]] = NULL;
+		complete(txn->comp);
+		spin_unlock_irqrestore(&dev->ctrl.txn_lock, flags);
+	}
+	if (mc == SLIM_USR_MC_GENERIC_ACK &&
+		mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
+		struct slim_msg_txn *txn;
+
+		spin_lock_irqsave(&dev->ctrl.txn_lock, flags);
+		txn = dev->ctrl.txnt[buf[3]];
+		if (!txn) {
+			spin_unlock_irqrestore(&dev->ctrl.txn_lock, flags);
+			SLIM_WARN(dev, "ACK received after timeout, tid:0x%x\n",
+				buf[3]);
+			return;
+		}
+		dev_dbg(dev->dev, "got response:tid:%d, response:0x%x",
+				(int)buf[3], buf[4]);
+		if (!(buf[4] & MSM_SAT_SUCCSS)) {
+			SLIM_WARN(dev, "TID:%d, NACK code:0x%x\n", (int)buf[3],
+						buf[4]);
+			txn->ec = -EIO;
+		}
+		dev->ctrl.txnt[buf[3]] = NULL;
+		complete(txn->comp);
+		spin_unlock_irqrestore(&dev->ctrl.txn_lock, flags);
+	}
+}
+
+static int ngd_slim_power_up(struct msm_slim_ctrl *dev, bool mdm_restart)
+{
+	void __iomem *ngd;
+	int timeout, retries = 0, ret = 0;
+	enum msm_ctrl_state cur_state = dev->state;
+	u32 laddr;
+	u32 rx_msgq;
+	u32 ngd_int = (NGD_INT_TX_NACKED_2 |
+			NGD_INT_MSG_BUF_CONTE | NGD_INT_MSG_TX_INVAL |
+			NGD_INT_IE_VE_CHG | NGD_INT_DEV_ERR |
+			NGD_INT_TX_MSG_SENT);
+
+	if (!mdm_restart && cur_state == MSM_CTRL_DOWN) {
+		int timeout = wait_for_completion_timeout(&dev->qmi.qmi_comp,
+						HZ);
+		if (!timeout) {
+			SLIM_ERR(dev, "slimbus QMI init timed out\n");
+			return -EREMOTEIO;
+		}
+	}
+
+hw_init_retry:
+	/* No need to vote if contorller is not in low power mode */
+	if (!mdm_restart &&
+		(cur_state == MSM_CTRL_DOWN || cur_state == MSM_CTRL_ASLEEP)) {
+		ret = msm_slim_qmi_power_request(dev, true);
+		if (ret) {
+			SLIM_WARN(dev, "SLIM power req failed:%d, retry:%d\n",
+					ret, retries);
+			if (!atomic_read(&dev->ssr_in_progress))
+				msm_slim_qmi_power_request(dev, false);
+			if (retries < INIT_MX_RETRIES &&
+				!atomic_read(&dev->ssr_in_progress)) {
+				retries++;
+				goto hw_init_retry;
+			}
+			return ret;
+		}
+	}
+	retries = 0;
+
+	if (!dev->ver) {
+		dev->ver = readl_relaxed(dev->base);
+		/* Version info in 16 MSbits */
+		dev->ver >>= 16;
+	}
+	ngd = dev->base + NGD_BASE(dev->ctrl.nr, dev->ver);
+	laddr = readl_relaxed(ngd + NGD_STATUS);
+	if (laddr & NGD_LADDR) {
+		u32 int_en = readl_relaxed(ngd + NGD_INT_EN);
+
+		/*
+		 * external MDM restart case where ADSP itself was active framer
+		 * For example, modem restarted when playback was active
+		 */
+		if (cur_state == MSM_CTRL_AWAKE) {
+			SLIM_INFO(dev, "Subsys restart: ADSP active framer\n");
+			return 0;
+		}
+		/*
+		 * ADSP power collapse case, where HW wasn't reset.
+		 */
+		if (int_en != 0)
+			return 0;
+
+		/* Retention */
+		if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED)
+			msm_slim_disconnect_endp(dev, &dev->rx_msgq,
+						 &dev->use_rx_msgqs);
+		if (dev->use_tx_msgqs == MSM_MSGQ_ENABLED)
+			msm_slim_disconnect_endp(dev, &dev->tx_msgq,
+						 &dev->use_tx_msgqs);
+
+		writel_relaxed(ngd_int, (dev->base + NGD_INT_EN +
+					NGD_BASE(dev->ctrl.nr, dev->ver)));
+
+		rx_msgq = readl_relaxed(ngd + NGD_RX_MSGQ_CFG);
+		/**
+		 * Program with minimum value so that signal get
+		 * triggered immediately after receiving the message
+		 */
+		writel_relaxed((rx_msgq | SLIM_RX_MSGQ_TIMEOUT_VAL),
+						(ngd + NGD_RX_MSGQ_CFG));
+		/* reconnect BAM pipes if needed and enable NGD */
+		ngd_slim_setup(dev);
+		return 0;
+	}
+
+	if (mdm_restart) {
+		/*
+		 * external MDM SSR when MDM is active framer
+		 * ADSP will reset slimbus HW. disconnect BAM pipes so that
+		 * they can be connected after capability message is received.
+		 * Set device state to ASLEEP to be synchronous with the HW
+		 */
+		/* make current state as DOWN */
+		cur_state = MSM_CTRL_DOWN;
+		SLIM_INFO(dev,
+			"SLIM MDM restart: MDM active framer: reinit HW\n");
+		/* disconnect BAM pipes */
+		msm_slim_sps_exit(dev, false);
+		dev->state = MSM_CTRL_DOWN;
+	}
+
+capability_retry:
+	/*
+	 * ADSP power collapse case (OR SSR), where HW was reset
+	 * BAM programming will happen when capability message is received
+	 */
+	writel_relaxed(ngd_int, dev->base + NGD_INT_EN +
+				NGD_BASE(dev->ctrl.nr, dev->ver));
+
+	rx_msgq = readl_relaxed(ngd + NGD_RX_MSGQ_CFG);
+	/* Program with minimum value so that signal get
+	 * triggered immediately after receiving the message */
+	writel_relaxed(rx_msgq|SLIM_RX_MSGQ_TIMEOUT_VAL,
+					ngd + NGD_RX_MSGQ_CFG);
+	/* make sure register got updated */
+	mb();
+
+	/* reconnect BAM pipes if needed and enable NGD */
+	ngd_slim_setup(dev);
+
+	timeout = wait_for_completion_timeout(&dev->reconf, HZ);
+	if (!timeout) {
+		u32 cfg = readl_relaxed(dev->base +
+					 NGD_BASE(dev->ctrl.nr, dev->ver));
+		laddr = readl_relaxed(ngd + NGD_STATUS);
+		SLIM_WARN(dev,
+			  "slim capability time-out:%d, stat:0x%x,cfg:0x%x\n",
+				retries, laddr, cfg);
+		if ((retries < INIT_MX_RETRIES) &&
+				!atomic_read(&dev->ssr_in_progress)) {
+			retries++;
+			goto capability_retry;
+		}
+		return -ETIMEDOUT;
+	}
+	/* mutliple transactions waiting on slimbus to power up? */
+	if (cur_state == MSM_CTRL_DOWN)
+		complete_all(&dev->ctrl_up);
+	/* Resetting the log level */
+	SLIM_RST_LOGLVL(dev);
+	return 0;
+}
+
+static int ngd_slim_enable(struct msm_slim_ctrl *dev, bool enable)
+{
+	int ret = 0;
+	if (enable) {
+		ret = msm_slim_qmi_init(dev, false);
+		/* controller state should be in sync with framework state */
+		if (!ret) {
+			complete(&dev->qmi.qmi_comp);
+			if (!pm_runtime_enabled(dev->dev) ||
+					!pm_runtime_suspended(dev->dev))
+				ngd_slim_runtime_resume(dev->dev);
+			else
+				pm_runtime_resume(dev->dev);
+			pm_runtime_mark_last_busy(dev->dev);
+			pm_runtime_put(dev->dev);
+		} else
+			SLIM_ERR(dev, "qmi init fail, ret:%d, state:%d\n",
+					ret, dev->state);
+	} else {
+		msm_slim_qmi_exit(dev);
+	}
+
+	return ret;
+}
+
+#ifdef CONFIG_PM
+static int ngd_slim_power_down(struct msm_slim_ctrl *dev)
+{
+	unsigned long flags;
+	int i;
+	struct slim_controller *ctrl = &dev->ctrl;
+
+	spin_lock_irqsave(&ctrl->txn_lock, flags);
+	/* Pending response for a message */
+	for (i = 0; i < ctrl->last_tid; i++) {
+		if (ctrl->txnt[i]) {
+			spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+			SLIM_INFO(dev, "NGD down:txn-rsp for %d pending", i);
+			return -EBUSY;
+		}
+	}
+	spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+	return msm_slim_qmi_power_request(dev, false);
+}
+#endif
+
+static int ngd_slim_rx_msgq_thread(void *data)
+{
+	struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
+	struct completion *notify = &dev->rx_msgq_notify;
+	int ret = 0;
+
+	while (!kthread_should_stop()) {
+		struct slim_msg_txn txn;
+		int retries = 0;
+		u8 wbuf[8];
+
+		wait_for_completion_interruptible(notify);
+
+		txn.dt = SLIM_MSG_DEST_LOGICALADDR;
+		txn.ec = 0;
+		txn.rbuf = NULL;
+		txn.mc = SLIM_USR_MC_REPORT_SATELLITE;
+		txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
+		txn.la = SLIM_LA_MGR;
+		wbuf[0] = SAT_MAGIC_LSB;
+		wbuf[1] = SAT_MAGIC_MSB;
+		wbuf[2] = SAT_MSG_VER;
+		wbuf[3] = SAT_MSG_PROT;
+		txn.wbuf = wbuf;
+		txn.len = 4;
+		SLIM_INFO(dev, "SLIM SAT: Rcvd master capability\n");
+capability_retry:
+		txn.rl = 8;
+		ret = ngd_xfer_msg(&dev->ctrl, &txn);
+		if (!ret) {
+			enum msm_ctrl_state prev_state = dev->state;
+
+			SLIM_INFO(dev,
+				"SLIM SAT: capability exchange successful\n");
+			if (prev_state < MSM_CTRL_ASLEEP)
+				SLIM_WARN(dev,
+					"capability due to noise, state:%d\n",
+						prev_state);
+			complete(&dev->reconf);
+			/* ADSP SSR, send device_up notifications */
+			if (prev_state == MSM_CTRL_DOWN)
+				complete(&dev->qmi.slave_notify);
+		} else if (ret == -EIO) {
+			SLIM_WARN(dev, "capability message NACKed, retrying\n");
+			if (retries < INIT_MX_RETRIES) {
+				msleep(DEF_RETRY_MS);
+				retries++;
+				goto capability_retry;
+			}
+		} else {
+			SLIM_WARN(dev, "SLIM: capability TX failed:%d\n", ret);
+		}
+	}
+	return 0;
+}
+
+static int ngd_notify_slaves(void *data)
+{
+	struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
+	struct slim_controller *ctrl = &dev->ctrl;
+	struct slim_device *sbdev;
+	struct list_head *pos, *next;
+	int ret, i = 0;
+	ret = qmi_svc_event_notifier_register(SLIMBUS_QMI_SVC_ID,
+				SLIMBUS_QMI_SVC_V1,
+				SLIMBUS_QMI_INS_ID, &dev->qmi.nb);
+	if (ret) {
+		pr_err("Slimbus QMI service registration failed:%d", ret);
+		return ret;
+	}
+
+	while (!kthread_should_stop()) {
+		wait_for_completion_interruptible(&dev->qmi.slave_notify);
+		/* Probe devices for first notification */
+		if (!i) {
+			i++;
+			dev->err = 0;
+			if (dev->dev->of_node)
+				of_register_slim_devices(&dev->ctrl);
+
+			/*
+			 * Add devices registered with board-info now that
+			 * controller is up
+			 */
+			slim_ctrl_add_boarddevs(&dev->ctrl);
+			ngd_dom_init(dev);
+		} else {
+			slim_framer_booted(ctrl);
+		}
+		mutex_lock(&ctrl->m_ctrl);
+		list_for_each_safe(pos, next, &ctrl->devs) {
+			int j;
+			sbdev = list_entry(pos, struct slim_device, dev_list);
+			mutex_unlock(&ctrl->m_ctrl);
+			for (j = 0; j < LADDR_RETRY; j++) {
+				ret = slim_get_logical_addr(sbdev,
+						sbdev->e_addr,
+						6, &sbdev->laddr);
+				if (!ret)
+					break;
+				else /* time for ADSP to assign LA */
+					msleep(20);
+			}
+			mutex_lock(&ctrl->m_ctrl);
+		}
+		mutex_unlock(&ctrl->m_ctrl);
+	}
+	return 0;
+}
+
+static void ngd_dom_down(struct msm_slim_ctrl *dev)
+{
+	struct slim_controller *ctrl = &dev->ctrl;
+	struct slim_device *sbdev;
+
+	mutex_lock(&dev->ssr_lock);
+	ngd_slim_enable(dev, false);
+	/* device up should be called again after SSR */
+	list_for_each_entry(sbdev, &ctrl->devs, dev_list)
+		slim_report_absent(sbdev);
+	SLIM_INFO(dev, "SLIM ADSP SSR (DOWN) done\n");
+	mutex_unlock(&dev->ssr_lock);
+}
+
+static void ngd_dom_up(struct work_struct *work)
+{
+	struct msm_slim_ss *dsp =
+		container_of(work, struct msm_slim_ss, dom_up);
+	struct msm_slim_ctrl *dev =
+		container_of(dsp, struct msm_slim_ctrl, dsp);
+	mutex_lock(&dev->ssr_lock);
+	ngd_slim_enable(dev, true);
+	mutex_unlock(&dev->ssr_lock);
+}
+
+static ssize_t show_mask(struct device *device, struct device_attribute *attr,
+			char *buf)
+{
+	struct platform_device *pdev = to_platform_device(device);
+	struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+	return snprintf(buf, sizeof(int), "%u\n", dev->ipc_log_mask);
+}
+
+static ssize_t set_mask(struct device *device, struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct platform_device *pdev = to_platform_device(device);
+	struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+
+	dev->ipc_log_mask = buf[0] - '0';
+	if (dev->ipc_log_mask > DBG_LEV)
+		dev->ipc_log_mask = DBG_LEV;
+	return count;
+}
+
+static DEVICE_ATTR(debug_mask, S_IRUGO | S_IWUSR, show_mask, set_mask);
+
+static int ngd_slim_probe(struct platform_device *pdev)
+{
+	struct msm_slim_ctrl *dev;
+	int ret;
+	struct resource		*bam_mem;
+	struct resource		*slim_mem;
+	struct resource		*irq, *bam_irq;
+	bool			rxreg_access = false;
+	bool			slim_mdm = false;
+	const char		*ext_modem_id = NULL;
+
+	slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"slimbus_physical");
+	if (!slim_mem) {
+		dev_err(&pdev->dev, "no slimbus physical memory resource\n");
+		return -ENODEV;
+	}
+	bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"slimbus_bam_physical");
+	if (!bam_mem) {
+		dev_err(&pdev->dev, "no slimbus BAM memory resource\n");
+		return -ENODEV;
+	}
+	irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+						"slimbus_irq");
+	if (!irq) {
+		dev_err(&pdev->dev, "no slimbus IRQ resource\n");
+		return -ENODEV;
+	}
+	bam_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+						"slimbus_bam_irq");
+	if (!bam_irq) {
+		dev_err(&pdev->dev, "no slimbus BAM IRQ resource\n");
+		return -ENODEV;
+	}
+
+	dev = kzalloc(sizeof(struct msm_slim_ctrl), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(dev)) {
+		dev_err(&pdev->dev, "no memory for MSM slimbus controller\n");
+		return PTR_ERR(dev);
+	}
+	dev->wr_comp = kzalloc(sizeof(struct completion *) * MSM_TX_BUFS,
+				GFP_KERNEL);
+	if (!dev->wr_comp)
+		return -ENOMEM;
+
+	/* typical txn numbers and size used in bulk operation */
+	dev->bulk.buf_sz = SLIM_MAX_TXNS * 8;
+	dev->bulk.base = kzalloc(dev->bulk.buf_sz, GFP_KERNEL | GFP_DMA);
+	if (!dev->bulk.base) {
+		ret = -ENOMEM;
+		goto err_nobulk;
+	}
+
+	dev->dev = &pdev->dev;
+	platform_set_drvdata(pdev, dev);
+	slim_set_ctrldata(&dev->ctrl, dev);
+
+	/* Create IPC log context */
+	dev->ipc_slimbus_log = ipc_log_context_create(IPC_SLIMBUS_LOG_PAGES,
+						dev_name(dev->dev), 0);
+	if (!dev->ipc_slimbus_log)
+		dev_err(&pdev->dev, "error creating ipc_logging context\n");
+	else {
+		/* Initialize the log mask */
+		dev->ipc_log_mask = INFO_LEV;
+		dev->default_ipc_log_mask = INFO_LEV;
+		SLIM_INFO(dev, "start logging for slim dev %s\n",
+				dev_name(dev->dev));
+	}
+	ret = sysfs_create_file(&dev->dev->kobj, &dev_attr_debug_mask.attr);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to create dev. attr\n");
+		dev->sysfs_created = false;
+	} else
+		dev->sysfs_created = true;
+
+	dev->base = ioremap(slim_mem->start, resource_size(slim_mem));
+	if (!dev->base) {
+		dev_err(&pdev->dev, "IOremap failed\n");
+		ret = -ENOMEM;
+		goto err_ioremap_failed;
+	}
+	dev->bam.base = ioremap(bam_mem->start, resource_size(bam_mem));
+	if (!dev->bam.base) {
+		dev_err(&pdev->dev, "BAM IOremap failed\n");
+		ret = -ENOMEM;
+		goto err_ioremap_bam_failed;
+	}
+	if (pdev->dev.of_node) {
+
+		ret = of_property_read_u32(pdev->dev.of_node, "cell-index",
+					&dev->ctrl.nr);
+		if (ret) {
+			dev_err(&pdev->dev, "Cell index not specified:%d", ret);
+			goto err_ctrl_failed;
+		}
+		rxreg_access = of_property_read_bool(pdev->dev.of_node,
+					"qcom,rxreg-access");
+		of_property_read_u32(pdev->dev.of_node, "qcom,apps-ch-pipes",
+					&dev->pdata.apps_pipes);
+		of_property_read_u32(pdev->dev.of_node, "qcom,ea-pc",
+					&dev->pdata.eapc);
+		ret = of_property_read_string(pdev->dev.of_node,
+					"qcom,slim-mdm", &ext_modem_id);
+		if (!ret)
+			slim_mdm = true;
+	} else {
+		dev->ctrl.nr = pdev->id;
+	}
+	/*
+	 * Keep PGD's logical address as manager's. Query it when first data
+	 * channel request comes in
+	 */
+	dev->pgdla = SLIM_LA_MGR;
+	dev->ctrl.nchans = MSM_SLIM_NCHANS;
+	dev->ctrl.nports = MSM_SLIM_NPORTS;
+	dev->framer.rootfreq = SLIM_ROOT_FREQ >> 3;
+	dev->framer.superfreq =
+		dev->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
+	dev->ctrl.a_framer = &dev->framer;
+	dev->ctrl.clkgear = SLIM_MAX_CLK_GEAR;
+	dev->ctrl.set_laddr = ngd_set_laddr;
+	dev->ctrl.get_laddr = ngd_get_laddr;
+	dev->ctrl.allocbw = ngd_allocbw;
+	dev->ctrl.xfer_msg = ngd_xfer_msg;
+	dev->ctrl.xfer_user_msg = ngd_user_msg;
+	dev->ctrl.xfer_bulk_wr = ngd_bulk_wr;
+	dev->ctrl.wakeup = NULL;
+	dev->ctrl.alloc_port = msm_alloc_port;
+	dev->ctrl.dealloc_port = msm_dealloc_port;
+	dev->ctrl.port_xfer = msm_slim_port_xfer;
+	dev->ctrl.port_xfer_status = msm_slim_port_xfer_status;
+	dev->bam_mem = bam_mem;
+	dev->rx_slim = ngd_slim_rx;
+
+	init_completion(&dev->reconf);
+	init_completion(&dev->ctrl_up);
+	mutex_init(&dev->tx_lock);
+	mutex_init(&dev->ssr_lock);
+	spin_lock_init(&dev->tx_buf_lock);
+	spin_lock_init(&dev->rx_lock);
+	dev->ee = 1;
+	dev->irq = irq->start;
+	dev->bam.irq = bam_irq->start;
+	atomic_set(&dev->ssr_in_progress, 0);
+
+	if (rxreg_access)
+		dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
+	else
+		dev->use_rx_msgqs = MSM_MSGQ_RESET;
+
+	/* Enable TX message queues by default as recommended by HW */
+	dev->use_tx_msgqs = MSM_MSGQ_RESET;
+
+	init_completion(&dev->rx_msgq_notify);
+	init_completion(&dev->qmi.slave_notify);
+
+	/* Register with framework */
+	ret = slim_add_numbered_controller(&dev->ctrl);
+	if (ret) {
+		dev_err(dev->dev, "error adding controller\n");
+		goto err_ctrl_failed;
+	}
+
+	dev->ctrl.dev.parent = &pdev->dev;
+	dev->ctrl.dev.of_node = pdev->dev.of_node;
+	dev->state = MSM_CTRL_DOWN;
+
+	/*
+	 * As this does not perform expensive
+	 * operations, it can execute in an
+	 * interrupt context. This avoids
+	 * context switches, provides
+	 * extensive benifits and performance
+	 * improvements.
+	 */
+	ret = request_irq(dev->irq,
+			ngd_slim_interrupt,
+			IRQF_TRIGGER_HIGH,
+			"ngd_slim_irq", dev);
+
+	if (ret) {
+		dev_err(&pdev->dev, "request IRQ failed\n");
+		goto err_request_irq_failed;
+	}
+
+	init_completion(&dev->qmi.qmi_comp);
+	dev->err = -EPROBE_DEFER;
+	pm_runtime_use_autosuspend(dev->dev);
+	pm_runtime_set_autosuspend_delay(dev->dev, MSM_SLIM_AUTOSUSPEND);
+	pm_runtime_set_suspended(dev->dev);
+	pm_runtime_enable(dev->dev);
+
+	if (slim_mdm) {
+		dev->ext_mdm.nb.notifier_call = mdm_ssr_notify_cb;
+		dev->ext_mdm.domr = subsys_notif_register_notifier(ext_modem_id,
+							&dev->ext_mdm.nb);
+		if (IS_ERR_OR_NULL(dev->ext_mdm.domr))
+			dev_err(dev->dev,
+				"subsys_notif_register_notifier failed %p",
+				dev->ext_mdm.domr);
+	}
+
+	INIT_WORK(&dev->dsp.dom_up, ngd_dom_up);
+	dev->qmi.nb.notifier_call = ngd_qmi_available;
+	pm_runtime_get_noresume(dev->dev);
+
+	/* Fire up the Rx message queue thread */
+	dev->rx_msgq_thread = kthread_run(ngd_slim_rx_msgq_thread, dev,
+					"ngd_rx_thread%d", dev->ctrl.nr);
+	if (IS_ERR(dev->rx_msgq_thread)) {
+		ret = PTR_ERR(dev->rx_msgq_thread);
+		dev_err(dev->dev, "Failed to start Rx thread:%d\n", ret);
+		goto err_rx_thread_create_failed;
+	}
+
+	/* Start thread to probe, and notify slaves */
+	dev->qmi.slave_thread = kthread_run(ngd_notify_slaves, dev,
+					"ngd_notify_sl%d", dev->ctrl.nr);
+	if (IS_ERR(dev->qmi.slave_thread)) {
+		ret = PTR_ERR(dev->qmi.slave_thread);
+		dev_err(dev->dev, "Failed to start notifier thread:%d\n", ret);
+		goto err_notify_thread_create_failed;
+	}
+	SLIM_INFO(dev, "NGD SB controller is up!\n");
+	return 0;
+
+err_notify_thread_create_failed:
+	kthread_stop(dev->rx_msgq_thread);
+err_rx_thread_create_failed:
+	free_irq(dev->irq, dev);
+err_request_irq_failed:
+err_ctrl_failed:
+	iounmap(dev->bam.base);
+err_ioremap_bam_failed:
+	iounmap(dev->base);
+err_ioremap_failed:
+	if (dev->sysfs_created)
+		sysfs_remove_file(&dev->dev->kobj,
+				&dev_attr_debug_mask.attr);
+	kfree(dev->bulk.base);
+err_nobulk:
+	kfree(dev->wr_comp);
+	kfree(dev);
+	return ret;
+}
+
+static int ngd_slim_remove(struct platform_device *pdev)
+{
+	struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+	ngd_slim_enable(dev, false);
+	if (dev->sysfs_created)
+		sysfs_remove_file(&dev->dev->kobj,
+				&dev_attr_debug_mask.attr);
+	qmi_svc_event_notifier_unregister(SLIMBUS_QMI_SVC_ID,
+				SLIMBUS_QMI_SVC_V1,
+				SLIMBUS_QMI_INS_ID, &dev->qmi.nb);
+	pm_runtime_disable(&pdev->dev);
+	if (dev->dsp.dom_t == MSM_SLIM_DOM_SS)
+		subsys_notif_unregister_notifier(dev->dsp.domr,
+						&dev->dsp.nb);
+	if (dev->dsp.dom_t == MSM_SLIM_DOM_PD)
+		service_notif_unregister_notifier(dev->dsp.domr,
+						&dev->dsp.nb);
+	if (!IS_ERR_OR_NULL(dev->ext_mdm.domr))
+		subsys_notif_unregister_notifier(dev->ext_mdm.domr,
+						&dev->ext_mdm.nb);
+	kfree(dev->bulk.base);
+	free_irq(dev->irq, dev);
+	slim_del_controller(&dev->ctrl);
+	kthread_stop(dev->rx_msgq_thread);
+	iounmap(dev->bam.base);
+	iounmap(dev->base);
+	kfree(dev->wr_comp);
+	kfree(dev);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int ngd_slim_runtime_idle(struct device *device)
+{
+	struct platform_device *pdev = to_platform_device(device);
+	struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+	mutex_lock(&dev->tx_lock);
+	if (dev->state == MSM_CTRL_AWAKE)
+		dev->state = MSM_CTRL_IDLE;
+	mutex_unlock(&dev->tx_lock);
+	dev_dbg(device, "pm_runtime: idle...\n");
+	pm_request_autosuspend(device);
+	return -EAGAIN;
+}
+#endif
+
+/*
+ * If PM_RUNTIME is not defined, these 2 functions become helper
+ * functions to be called from system suspend/resume. So they are not
+ * inside ifdef CONFIG_PM_RUNTIME
+ */
+static int ngd_slim_runtime_resume(struct device *device)
+{
+	struct platform_device *pdev = to_platform_device(device);
+	struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+	int ret = 0;
+
+	mutex_lock(&dev->tx_lock);
+	if (dev->state >= MSM_CTRL_ASLEEP)
+		ret = ngd_slim_power_up(dev, false);
+	if (ret) {
+		/* Did SSR cause this power up failure */
+		if (dev->state != MSM_CTRL_DOWN)
+			dev->state = MSM_CTRL_ASLEEP;
+		else
+			SLIM_WARN(dev, "HW wakeup attempt during SSR\n");
+	} else {
+		dev->state = MSM_CTRL_AWAKE;
+	}
+	mutex_unlock(&dev->tx_lock);
+	SLIM_INFO(dev, "Slim runtime resume: ret %d\n", ret);
+	return ret;
+}
+
+#ifdef CONFIG_PM
+static int ngd_slim_runtime_suspend(struct device *device)
+{
+	struct platform_device *pdev = to_platform_device(device);
+	struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+	int ret = 0;
+	mutex_lock(&dev->tx_lock);
+	ret = ngd_slim_power_down(dev);
+	if (ret && ret != -EBUSY)
+		SLIM_INFO(dev, "slim resource not idle:%d\n", ret);
+	if (!ret || ret == -ETIMEDOUT)
+		dev->state = MSM_CTRL_ASLEEP;
+	mutex_unlock(&dev->tx_lock);
+	SLIM_INFO(dev, "Slim runtime suspend: ret %d\n", ret);
+	return ret;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int ngd_slim_suspend(struct device *dev)
+{
+	int ret = -EBUSY;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
+	if (!pm_runtime_enabled(dev) ||
+		(!pm_runtime_suspended(dev) &&
+			cdev->state == MSM_CTRL_IDLE)) {
+		ret = ngd_slim_runtime_suspend(dev);
+		/*
+		 * If runtime-PM still thinks it's active, then make sure its
+		 * status is in sync with HW status.
+		 * Since this suspend calls QMI api, it results in holding a
+		 * wakelock. That results in failure of first suspend.
+		 * Subsequent suspend should not call low-power transition
+		 * again since the HW is already in suspended state.
+		 */
+		if (!ret) {
+			pm_runtime_disable(dev);
+			pm_runtime_set_suspended(dev);
+			pm_runtime_enable(dev);
+		}
+	}
+	if (ret == -EBUSY) {
+		/*
+		* There is a possibility that some audio stream is active
+		* during suspend. We dont want to return suspend failure in
+		* that case so that display and relevant components can still
+		* go to suspend.
+		* If there is some other error, then it should be passed-on
+		* to system level suspend
+		*/
+		ret = 0;
+	}
+	SLIM_INFO(cdev, "system suspend\n");
+	return ret;
+}
+
+static int ngd_slim_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
+	/*
+	 * Rely on runtime-PM to call resume in case it is enabled.
+	 * Even if it's not enabled, rely on 1st client transaction to do
+	 * clock/power on
+	 */
+	SLIM_INFO(cdev, "system resume\n");
+	return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops ngd_slim_dev_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(
+		ngd_slim_suspend,
+		ngd_slim_resume
+	)
+	SET_RUNTIME_PM_OPS(
+		ngd_slim_runtime_suspend,
+		ngd_slim_runtime_resume,
+		ngd_slim_runtime_idle
+	)
+};
+
+static struct of_device_id ngd_slim_dt_match[] = {
+	{
+		.compatible = "qcom,slim-ngd",
+	},
+	{}
+};
+
+static struct platform_driver ngd_slim_driver = {
+	.probe = ngd_slim_probe,
+	.remove = ngd_slim_remove,
+	.driver	= {
+		.name = NGD_SLIM_NAME,
+		.owner = THIS_MODULE,
+		.pm = &ngd_slim_dev_pm_ops,
+		.of_match_table = ngd_slim_dt_match,
+	},
+};
+
+static int ngd_slim_init(void)
+{
+	return platform_driver_register(&ngd_slim_driver);
+}
+late_initcall(ngd_slim_init);
+
+static void ngd_slim_exit(void)
+{
+	platform_driver_unregister(&ngd_slim_driver);
+}
+module_exit(ngd_slim_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM Slimbus controller");
+MODULE_ALIAS("platform:msm-slim-ngd");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/avtimer.c	2019-10-29 09:26:24.805214550 +0100
@@ -0,0 +1,538 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 and
+* only version 2 as published by the Free Software Foundation.
+
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/avtimer.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/qdsp6v2/apr.h>
+#include <sound/q6core.h>
+
+#define DEVICE_NAME "avtimer"
+#define TIMEOUT_MS 1000
+#define CORE_CLIENT 1
+#define TEMP_PORT ((CORE_CLIENT << 8) | 0x0001)
+#define SSR_WAKETIME 1000
+#define Q6_READY_RETRY 250
+#define Q6_READY_MAX_RETRIES 40
+
+#define AVCS_CMD_REMOTE_AVTIMER_VOTE_REQUEST 0x00012914
+#define AVCS_CMD_RSP_REMOTE_AVTIMER_VOTE_REQUEST 0x00012915
+#define AVCS_CMD_REMOTE_AVTIMER_RELEASE_REQUEST 0x00012916
+#define AVTIMER_REG_CNT 2
+
+struct adsp_avt_timer {
+	struct apr_hdr hdr;
+	union {
+		char client_name[8];
+		u32 avtimer_handle;
+	};
+} __packed;
+
+static int major;
+
+struct avtimer_t {
+	struct apr_svc *core_handle_q;
+	struct cdev myc;
+	struct class *avtimer_class;
+	struct mutex avtimer_lock;
+	int avtimer_open_cnt;
+	struct delayed_work ssr_dwork;
+	wait_queue_head_t adsp_resp_wait;
+	int enable_timer_resp_recieved;
+	int timer_handle;
+	void __iomem *p_avtimer_msw;
+	void __iomem *p_avtimer_lsw;
+	uint32_t clk_div;
+	atomic_t adsp_ready;
+	int num_retries;
+};
+
+static struct avtimer_t avtimer;
+
+static int32_t aprv2_core_fn_q(struct apr_client_data *data, void *priv)
+{
+	uint32_t *payload1;
+
+	if (!data) {
+		pr_err("%s: Invalid params\n", __func__);
+		return -EINVAL;
+	}
+	pr_debug("%s: core msg: payload len = %u, apr resp opcode = 0x%X\n",
+		__func__, data->payload_size, data->opcode);
+
+	switch (data->opcode) {
+
+	case APR_BASIC_RSP_RESULT:{
+
+		if (!data->payload_size) {
+			pr_err("%s: APR_BASIC_RSP_RESULT No Payload ",
+					__func__);
+			return 0;
+		}
+
+		payload1 = data->payload;
+		switch (payload1[0]) {
+		case AVCS_CMD_REMOTE_AVTIMER_RELEASE_REQUEST:
+			pr_debug("%s: Cmd = TIMER RELEASE status[0x%x]\n",
+			__func__, payload1[1]);
+			break;
+		default:
+			pr_err("Invalid cmd rsp[0x%x][0x%x]\n",
+					payload1[0], payload1[1]);
+			break;
+		}
+		break;
+	}
+
+	case RESET_EVENTS:{
+		pr_debug("%s: Reset event received in AV timer\n", __func__);
+		apr_reset(avtimer.core_handle_q);
+		avtimer.core_handle_q = NULL;
+		avtimer.avtimer_open_cnt = 0;
+		atomic_set(&avtimer.adsp_ready, 0);
+		schedule_delayed_work(&avtimer.ssr_dwork,
+				  msecs_to_jiffies(SSR_WAKETIME));
+		break;
+	}
+
+	case AVCS_CMD_RSP_REMOTE_AVTIMER_VOTE_REQUEST:
+		payload1 = data->payload;
+		pr_debug("%s: RSP_REMOTE_AVTIMER_VOTE_REQUEST handle %x\n",
+			__func__, payload1[0]);
+		avtimer.timer_handle = payload1[0];
+		avtimer.enable_timer_resp_recieved = 1;
+		wake_up(&avtimer.adsp_resp_wait);
+		break;
+	default:
+		pr_err("%s: Message adspcore svc: %d\n",
+				__func__, data->opcode);
+		break;
+	}
+
+	return 0;
+}
+
+int avcs_core_open(void)
+{
+	if (!avtimer.core_handle_q)
+		avtimer.core_handle_q = apr_register("ADSP", "CORE",
+					aprv2_core_fn_q, TEMP_PORT, NULL);
+	pr_debug("%s: Open_q %p\n", __func__, avtimer.core_handle_q);
+	if (!avtimer.core_handle_q) {
+		pr_err("%s: Unable to register CORE\n", __func__);
+		return -EINVAL;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(avcs_core_open);
+
+static int avcs_core_disable_avtimer(int timerhandle)
+{
+	int rc = -EINVAL;
+	struct adsp_avt_timer payload;
+
+	if (!timerhandle) {
+		pr_err("%s: Invalid timer handle\n", __func__);
+		return -EINVAL;
+	}
+	memset(&payload, 0, sizeof(payload));
+	rc = avcs_core_open();
+	if (!rc && avtimer.core_handle_q) {
+		payload.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+			APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+		payload.hdr.pkt_size =
+			sizeof(struct adsp_avt_timer);
+		payload.hdr.src_svc = avtimer.core_handle_q->id;
+		payload.hdr.src_domain = APR_DOMAIN_APPS;
+		payload.hdr.dest_domain = APR_DOMAIN_ADSP;
+		payload.hdr.dest_svc = APR_SVC_ADSP_CORE;
+		payload.hdr.src_port = TEMP_PORT;
+		payload.hdr.dest_port = TEMP_PORT;
+		payload.hdr.token = CORE_CLIENT;
+		payload.hdr.opcode = AVCS_CMD_REMOTE_AVTIMER_RELEASE_REQUEST;
+		payload.avtimer_handle = timerhandle;
+		pr_debug("%s: disable avtimer opcode %x handle %x\n",
+			__func__, payload.hdr.opcode, payload.avtimer_handle);
+		rc = apr_send_pkt(avtimer.core_handle_q,
+						(uint32_t *)&payload);
+		if (rc < 0)
+			pr_err("%s: Enable AVtimer failed op[0x%x]rc[%d]\n",
+				__func__, payload.hdr.opcode, rc);
+		else
+			rc = 0;
+	}
+	return rc;
+}
+
+static int avcs_core_enable_avtimer(char *client_name)
+{
+	int rc = -EINVAL, ret = -EINVAL;
+	struct adsp_avt_timer payload;
+
+	if (!client_name) {
+		pr_err("%s: Invalid params\n", __func__);
+		return -EINVAL;
+	}
+	memset(&payload, 0, sizeof(payload));
+	rc = avcs_core_open();
+	if (!rc && avtimer.core_handle_q) {
+		avtimer.enable_timer_resp_recieved = 0;
+		payload.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT,
+			APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+		payload.hdr.pkt_size =
+			sizeof(struct adsp_avt_timer);
+		payload.hdr.src_svc = avtimer.core_handle_q->id;
+		payload.hdr.src_domain = APR_DOMAIN_APPS;
+		payload.hdr.dest_domain = APR_DOMAIN_ADSP;
+		payload.hdr.dest_svc = APR_SVC_ADSP_CORE;
+		payload.hdr.src_port = TEMP_PORT;
+		payload.hdr.dest_port = TEMP_PORT;
+		payload.hdr.token = CORE_CLIENT;
+		payload.hdr.opcode = AVCS_CMD_REMOTE_AVTIMER_VOTE_REQUEST;
+		strlcpy(payload.client_name, client_name,
+			   sizeof(payload.client_name));
+		pr_debug("%s: enable avtimer opcode %x client name %s\n",
+			__func__, payload.hdr.opcode, payload.client_name);
+		rc = apr_send_pkt(avtimer.core_handle_q,
+						(uint32_t *)&payload);
+		if (rc < 0) {
+			pr_err("%s: Enable AVtimer failed op[0x%x]rc[%d]\n",
+				__func__, payload.hdr.opcode, rc);
+			goto bail;
+		} else
+			rc = 0;
+		ret = wait_event_timeout(avtimer.adsp_resp_wait,
+			(avtimer.enable_timer_resp_recieved == 1),
+			msecs_to_jiffies(TIMEOUT_MS));
+		if (!ret) {
+			pr_err("%s: wait_event timeout for Enable timer\n",
+					__func__);
+			rc = -ETIMEDOUT;
+		}
+		if (rc)
+			avtimer.timer_handle = 0;
+	}
+bail:
+	return rc;
+}
+
+int avcs_core_disable_power_collapse(int enable)
+{
+	int rc = 0;
+	mutex_lock(&avtimer.avtimer_lock);
+	if (enable) {
+		if (avtimer.avtimer_open_cnt) {
+			avtimer.avtimer_open_cnt++;
+			pr_debug("%s: opened avtimer open count=%d\n",
+				__func__, avtimer.avtimer_open_cnt);
+			rc = 0;
+			goto done;
+		}
+		rc = avcs_core_enable_avtimer("timer");
+		if (!rc) {
+			avtimer.avtimer_open_cnt++;
+			atomic_set(&avtimer.adsp_ready, 1);
+		}
+	} else {
+		if (avtimer.avtimer_open_cnt > 0) {
+			avtimer.avtimer_open_cnt--;
+			if (!avtimer.avtimer_open_cnt) {
+				rc = avcs_core_disable_avtimer(
+				avtimer.timer_handle);
+				avtimer.timer_handle = 0;
+			}
+		}
+	}
+done:
+	mutex_unlock(&avtimer.avtimer_lock);
+	return rc;
+}
+EXPORT_SYMBOL(avcs_core_disable_power_collapse);
+
+static void reset_work(struct work_struct *work)
+{
+	if (q6core_is_adsp_ready()) {
+		avcs_core_disable_power_collapse(1);
+		avtimer.num_retries = Q6_READY_MAX_RETRIES;
+		return;
+	}
+	pr_debug("%s:Q6 not ready-retry after sometime\n", __func__);
+	if (--avtimer.num_retries > 0) {
+		schedule_delayed_work(&avtimer.ssr_dwork,
+			  msecs_to_jiffies(Q6_READY_RETRY));
+	} else {
+		pr_err("%s: Q6 failed responding after multiple retries\n",
+							__func__);
+		avtimer.num_retries = Q6_READY_MAX_RETRIES;
+	}
+}
+
+int avcs_core_query_timer(uint64_t *avtimer_tick)
+{
+	uint32_t avtimer_msw = 0, avtimer_lsw = 0;
+	uint32_t res = 0;
+	uint64_t avtimer_tick_temp;
+
+	if (!atomic_read(&avtimer.adsp_ready)) {
+		pr_debug("%s:In SSR, return\n", __func__);
+		return -ENETRESET;
+	}
+	avtimer_lsw = ioread32(avtimer.p_avtimer_lsw);
+	avtimer_msw = ioread32(avtimer.p_avtimer_msw);
+
+	avtimer_tick_temp =
+		(uint64_t)((uint64_t)avtimer_msw << 32)
+			| avtimer_lsw;
+	res = do_div(avtimer_tick_temp, avtimer.clk_div);
+	*avtimer_tick = avtimer_tick_temp;
+	pr_debug_ratelimited("%s:Avtimer: msw: %u, lsw: %u, tick: %llu\n",
+			__func__,
+			avtimer_msw, avtimer_lsw, *avtimer_tick);
+	return 0;
+}
+EXPORT_SYMBOL(avcs_core_query_timer);
+
+static int avtimer_open(struct inode *inode, struct file *file)
+{
+	return avcs_core_disable_power_collapse(1);
+}
+
+static int avtimer_release(struct inode *inode, struct file *file)
+{
+	return avcs_core_disable_power_collapse(0);
+}
+
+/*
+ * ioctl call provides GET_AVTIMER
+ */
+static long avtimer_ioctl(struct file *file, unsigned int ioctl_num,
+				unsigned long ioctl_param)
+{
+	switch (ioctl_num) {
+	case IOCTL_GET_AVTIMER_TICK:
+	{
+		uint64_t avtimer_tick = 0;
+		int rc;
+
+		rc = avcs_core_query_timer(&avtimer_tick);
+
+		if (rc) {
+			pr_err("%s: Error: Invalid AV Timer tick, rc = %d\n",
+				__func__, rc);
+			return rc;
+		}
+
+		pr_debug_ratelimited("%s: AV Timer tick: time %llx\n",
+		__func__, avtimer_tick);
+		if (copy_to_user((void *) ioctl_param, &avtimer_tick,
+				sizeof(avtimer_tick))) {
+					pr_err("copy_to_user failed\n");
+					return -EFAULT;
+			}
+		}
+		break;
+
+	default:
+		pr_err("%s: invalid cmd\n", __func__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static const struct file_operations avtimer_fops = {
+	.unlocked_ioctl = avtimer_ioctl,
+	.compat_ioctl = avtimer_ioctl,
+	.open = avtimer_open,
+	.release = avtimer_release
+};
+
+static int dev_avtimer_probe(struct platform_device *pdev)
+{
+	int result = 0;
+	dev_t dev = MKDEV(major, 0);
+	struct device *device_handle;
+	struct resource *reg_lsb = NULL, *reg_msb = NULL;
+	uint32_t clk_div_val;
+
+	if (!pdev) {
+		pr_err("%s: Invalid params\n", __func__);
+		return -EINVAL;
+	}
+	reg_lsb = platform_get_resource_byname(pdev,
+		IORESOURCE_MEM, "avtimer_lsb_addr");
+	if (!reg_lsb) {
+		dev_err(&pdev->dev, "%s: Looking up %s property",
+			"avtimer_lsb_addr", __func__);
+		return -EINVAL;
+	}
+	reg_msb = platform_get_resource_byname(pdev,
+		IORESOURCE_MEM, "avtimer_msb_addr");
+	if (!reg_msb) {
+		dev_err(&pdev->dev, "%s: Looking up %s property",
+			"avtimer_msb_addr", __func__);
+		return -EINVAL;
+	}
+	INIT_DELAYED_WORK(&avtimer.ssr_dwork, reset_work);
+
+	avtimer.p_avtimer_lsw = devm_ioremap_nocache(&pdev->dev,
+				reg_lsb->start, resource_size(reg_lsb));
+	if (!avtimer.p_avtimer_lsw) {
+		dev_err(&pdev->dev, "%s: ioremap failed for lsb avtimer register",
+			__func__);
+		return -ENOMEM;
+	}
+
+	avtimer.p_avtimer_msw = devm_ioremap_nocache(&pdev->dev,
+				reg_msb->start, resource_size(reg_msb));
+	if (!avtimer.p_avtimer_msw) {
+		dev_err(&pdev->dev, "%s: ioremap failed for msb avtimer register",
+			__func__);
+		goto unmap;
+	}
+	avtimer.num_retries = Q6_READY_MAX_RETRIES;
+	/* get the device number */
+	if (major)
+		result = register_chrdev_region(dev, 1, DEVICE_NAME);
+	else {
+		result = alloc_chrdev_region(&dev, 0, 1, DEVICE_NAME);
+		major = MAJOR(dev);
+	}
+
+	if (result < 0) {
+		dev_err(&pdev->dev, "%s: Registering avtimer device failed\n",
+			__func__);
+		goto unmap;
+	}
+
+	avtimer.avtimer_class = class_create(THIS_MODULE, "avtimer");
+	if (IS_ERR(avtimer.avtimer_class)) {
+		result = PTR_ERR(avtimer.avtimer_class);
+		dev_err(&pdev->dev, "%s: Error creating avtimer class: %d\n",
+			__func__, result);
+		goto unregister_chrdev_region;
+	}
+
+	cdev_init(&avtimer.myc, &avtimer_fops);
+	result = cdev_add(&avtimer.myc, dev, 1);
+
+	if (result < 0) {
+		dev_err(&pdev->dev, "%s: Registering file operations failed\n",
+			__func__);
+		goto class_destroy;
+	}
+
+	device_handle = device_create(avtimer.avtimer_class,
+			NULL, avtimer.myc.dev, NULL, "avtimer");
+	if (IS_ERR(device_handle)) {
+		result = PTR_ERR(device_handle);
+		pr_err("%s: device_create failed: %d\n", __func__, result);
+		goto class_destroy;
+	}
+	init_waitqueue_head(&avtimer.adsp_resp_wait);
+	mutex_init(&avtimer.avtimer_lock);
+	avtimer.avtimer_open_cnt = 0;
+
+	pr_debug("%s: Device create done for avtimer major=%d\n",
+			__func__, major);
+
+	if (of_property_read_u32(pdev->dev.of_node,
+			"qcom,clk-div", &clk_div_val))
+		avtimer.clk_div = 1;
+	else
+		avtimer.clk_div = clk_div_val;
+
+	pr_debug("avtimer.clk_div = %d\n", avtimer.clk_div);
+	return 0;
+
+class_destroy:
+	class_destroy(avtimer.avtimer_class);
+unregister_chrdev_region:
+	unregister_chrdev_region(MKDEV(major, 0), 1);
+unmap:
+	if (avtimer.p_avtimer_lsw)
+		devm_iounmap(&pdev->dev, avtimer.p_avtimer_lsw);
+	if (avtimer.p_avtimer_msw)
+		devm_iounmap(&pdev->dev, avtimer.p_avtimer_msw);
+	avtimer.p_avtimer_lsw = NULL;
+	avtimer.p_avtimer_msw = NULL;
+	return result;
+
+}
+
+static int dev_avtimer_remove(struct platform_device *pdev)
+{
+	pr_debug("%s: dev_avtimer_remove\n", __func__);
+
+	if (avtimer.p_avtimer_lsw)
+		devm_iounmap(&pdev->dev, avtimer.p_avtimer_lsw);
+	if (avtimer.p_avtimer_msw)
+		devm_iounmap(&pdev->dev, avtimer.p_avtimer_msw);
+	device_destroy(avtimer.avtimer_class, avtimer.myc.dev);
+	cdev_del(&avtimer.myc);
+	class_destroy(avtimer.avtimer_class);
+	unregister_chrdev_region(MKDEV(major, 0), 1);
+
+	return 0;
+}
+
+static const struct of_device_id avtimer_machine_of_match[]  = {
+	{ .compatible = "qcom,avtimer", },
+	{},
+};
+static struct platform_driver dev_avtimer_driver = {
+	.probe = dev_avtimer_probe,
+	.remove = dev_avtimer_remove,
+	.driver = {
+		.name = "dev_avtimer",
+		.of_match_table = avtimer_machine_of_match,
+	},
+};
+
+static int  __init avtimer_init(void)
+{
+	s32 rc;
+	rc = platform_driver_register(&dev_avtimer_driver);
+	if (IS_ERR_VALUE(rc)) {
+		pr_err("%s: platform_driver_register failed\n", __func__);
+		goto error_platform_driver;
+	}
+	pr_debug("%s: dev_avtimer_init : done\n", __func__);
+
+	return 0;
+error_platform_driver:
+
+	pr_err("%s: encounterd error\n", __func__);
+	return rc;
+}
+
+static void __exit avtimer_exit(void)
+{
+	pr_debug("%s: avtimer_exit\n", __func__);
+	platform_driver_unregister(&dev_avtimer_driver);
+}
+
+module_init(avtimer_init);
+module_exit(avtimer_exit);
+
+MODULE_DESCRIPTION("avtimer driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/boot_stats.c	2019-10-29 09:26:24.805214550 +0100
@@ -0,0 +1,144 @@
+/* Copyright (c) 2013-2014,2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/sched.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/export.h>
+#include <linux/types.h>
+#include <soc/qcom/boot_stats.h>
+
+static void __iomem *mpm_counter_base;
+static uint32_t mpm_counter_freq;
+struct boot_stats __iomem *boot_stats;
+
+static int mpm_parse_dt(void)
+{
+	struct device_node *np;
+	u32 freq;
+
+	np = of_find_compatible_node(NULL, NULL, "qcom,msm-imem-boot_stats");
+	if (!np) {
+		pr_err("can't find qcom,msm-imem node\n");
+		return -ENODEV;
+	}
+	boot_stats = of_iomap(np, 0);
+	if (!boot_stats) {
+		pr_err("boot_stats: Can't map imem\n");
+		return -ENODEV;
+	}
+
+	np = of_find_compatible_node(NULL, NULL, "qcom,mpm2-sleep-counter");
+	if (!np) {
+		pr_err("mpm_counter: can't find DT node\n");
+		return -ENODEV;
+	}
+
+	if (!of_property_read_u32(np, "clock-frequency", &freq))
+		mpm_counter_freq = freq;
+	else
+		return -ENODEV;
+
+	if (of_get_address(np, 0, NULL, NULL)) {
+		mpm_counter_base = of_iomap(np, 0);
+		if (!mpm_counter_base) {
+			pr_err("mpm_counter: cant map counter base\n");
+			return -ENODEV;
+		}
+	}
+
+	return 0;
+}
+
+static void print_boot_stats(void)
+{
+	pr_info("KPI: Bootloader start count = %u\n",
+		readl_relaxed(&boot_stats->bootloader_start));
+	pr_info("KPI: Bootloader end count = %u\n",
+		readl_relaxed(&boot_stats->bootloader_end));
+	pr_info("KPI: Bootloader display count = %u\n",
+		readl_relaxed(&boot_stats->bootloader_display));
+	pr_info("KPI: Bootloader load kernel count = %u\n",
+		readl_relaxed(&boot_stats->bootloader_load_kernel));
+	pr_info("KPI: Kernel MPM timestamp = %u\n",
+		readl_relaxed(mpm_counter_base));
+	pr_info("KPI: Kernel MPM Clock frequency = %u\n",
+		mpm_counter_freq);
+}
+
+unsigned long long int msm_timer_get_sclk_ticks(void)
+{
+	unsigned long long int t1, t2;
+	int loop_count = 10;
+	int loop_zero_count = 3;
+	int tmp = USEC_PER_SEC;
+	void __iomem *sclk_tick;
+
+	do_div(tmp, TIMER_KHZ);
+	tmp /= (loop_zero_count-1);
+	sclk_tick = mpm_counter_base;
+	if (!sclk_tick)
+		return -EINVAL;
+	while (loop_zero_count--) {
+		t1 = __raw_readl_no_log(sclk_tick);
+		do {
+			udelay(1);
+			t2 = t1;
+			t1 = __raw_readl_no_log(sclk_tick);
+		} while ((t2 != t1) && --loop_count);
+		if (!loop_count) {
+			pr_err("boot_stats: SCLK  did not stabilize\n");
+			return 0;
+		}
+		if (t1)
+			break;
+
+		udelay(tmp);
+	}
+	if (!loop_zero_count) {
+		pr_err("boot_stats: SCLK reads zero\n");
+		return 0;
+	}
+	return t1;
+}
+
+int boot_stats_init(void)
+{
+	int ret;
+
+	ret = mpm_parse_dt();
+	if (ret < 0)
+		return -ENODEV;
+
+	print_boot_stats();
+
+	if (!(boot_marker_enabled()))
+		boot_stats_exit();
+	return 0;
+}
+
+int boot_stats_exit(void)
+{
+	iounmap(boot_stats);
+	iounmap(mpm_counter_base);
+	return 0;
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/core_hang_detect.c	2019-01-22 16:16:26.647274877 +0100
@@ -0,0 +1,355 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/cpu.h>
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <soc/qcom/scm.h>
+#include <linux/platform_device.h>
+
+/* pmu event min and max value */
+#define PMU_EVENT_MIN			0
+#define PMU_EVENT_MAX			0x1F
+
+#define PMU_MUX_OFFSET			4
+#define PMU_MUX_MASK_BITS		0xF
+#define ENABLE_OFFSET			1
+#define ENABLE_MASK_BITS		0x1
+
+#define _VAL(z)			(z##_MASK_BITS << z##_OFFSET)
+#define _VALUE(_val, z)		(_val<<(z##_OFFSET))
+#define _WRITE(x, y, z)		(((~(_VAL(z))) & y) | _VALUE(x, z))
+
+#define MODULE_NAME	"msm_hang_detect"
+#define MAX_SYSFS_LEN 12
+
+struct hang_detect {
+	phys_addr_t threshold[NR_CPUS];
+	phys_addr_t config[NR_CPUS];
+	uint32_t enabled;
+	uint32_t pmu_event_sel;
+	uint32_t threshold_val;
+	struct kobject kobj;
+};
+
+/* interface for exporting attributes */
+struct core_hang_attribute {
+	struct attribute        attr;
+	ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
+			char *buf);
+	size_t (*store)(struct kobject *kobj, struct attribute *attr,
+			const char *buf, size_t count);
+};
+
+#define CORE_HANG_ATTR(_name, _mode, _show, _store)	\
+	struct core_hang_attribute hang_attr_##_name =	\
+			__ATTR(_name, _mode, _show, _store)
+
+#define to_core_hang_dev(kobj) \
+	container_of(kobj, struct hang_detect, kobj)
+
+#define to_core_attr(_attr) \
+	container_of(_attr, struct core_hang_attribute, attr)
+
+/*
+ * On the kernel command line specify core_hang_detect.enable=1
+ * to enable the core hang detect module.
+ * By default core hang detect is turned on
+ */
+static int enable = 1;
+module_param(enable, int, 0444);
+
+static ssize_t attr_show(struct kobject *kobj, struct attribute *attr,
+				char *buf)
+{
+	struct core_hang_attribute *core_attr = to_core_attr(attr);
+	ssize_t ret = -EIO;
+
+	if (core_attr->show)
+		ret = core_attr->show(kobj, attr, buf);
+
+	return ret;
+}
+
+static ssize_t attr_store(struct kobject *kobj, struct attribute *attr,
+				const char *buf, size_t count)
+{
+	struct core_hang_attribute *core_attr = to_core_attr(attr);
+	ssize_t ret = -EIO;
+
+	if (core_attr->store)
+		ret = core_attr->store(kobj, attr, buf, count);
+
+	return ret;
+}
+
+static const struct sysfs_ops core_sysfs_ops = {
+	.show	= attr_show,
+	.store	= attr_store,
+};
+
+static struct kobj_type core_ktype = {
+	.sysfs_ops	= &core_sysfs_ops,
+};
+
+static ssize_t show_threshold(struct kobject *kobj, struct attribute *attr,
+				char *buf)
+{
+	struct hang_detect *device =  to_core_hang_dev(kobj);
+
+	return snprintf(buf, MAX_SYSFS_LEN, "0x%x\n", device->threshold_val);
+}
+
+static size_t store_threshold(struct kobject *kobj, struct attribute *attr,
+				const char *buf, size_t count)
+{
+	struct hang_detect *hang_dev = to_core_hang_dev(kobj);
+	uint32_t threshold_val;
+	int ret, cpu;
+
+	ret = kstrtouint(buf, 0, &threshold_val);
+	if (ret < 0)
+		return ret;
+
+	if (threshold_val <= 0)
+		return -EINVAL;
+
+	for_each_possible_cpu(cpu) {
+		if (!hang_dev->threshold[cpu])
+			continue;
+
+		if (scm_io_write(hang_dev->threshold[cpu], threshold_val)) {
+			pr_err("%s: Failed to set threshold for core%d\n",
+					__func__, cpu);
+			return -EIO;
+		}
+	}
+
+	hang_dev->threshold_val = threshold_val;
+	return count;
+}
+CORE_HANG_ATTR(threshold, 0644, show_threshold, store_threshold);
+
+static ssize_t show_pmu_event_sel(struct kobject *kobj, struct attribute *attr,
+			char *buf)
+{
+	struct hang_detect *hang_device = to_core_hang_dev(kobj);
+
+	return snprintf(buf, MAX_SYSFS_LEN, "0x%x\n",
+			hang_device->pmu_event_sel);
+}
+
+static size_t store_pmu_event_sel(struct kobject *kobj, struct attribute *attr,
+			const char *buf, size_t count)
+{
+	int  cpu, ret;
+	uint32_t pmu_event_sel, reg_value;
+	struct hang_detect *hang_dev = to_core_hang_dev(kobj);
+
+	ret = kstrtouint(buf, 0, &pmu_event_sel);
+	if (ret < 0)
+		return ret;
+
+	if (pmu_event_sel < PMU_EVENT_MIN || pmu_event_sel > PMU_EVENT_MAX)
+		return -EINVAL;
+
+	for_each_possible_cpu(cpu) {
+		if (!hang_dev->config[cpu])
+			continue;
+
+		reg_value = scm_io_read(hang_dev->config[cpu]);
+		if (scm_io_write(hang_dev->config[cpu],
+			_WRITE(pmu_event_sel, reg_value, PMU_MUX))) {
+			pr_err("%s: Failed to set pmu event for core%d\n",
+					__func__, cpu);
+			return -EIO;
+		}
+	}
+
+	hang_dev->pmu_event_sel = pmu_event_sel;
+	return count;
+}
+CORE_HANG_ATTR(pmu_event_sel, 0644, show_pmu_event_sel, store_pmu_event_sel);
+
+static ssize_t show_enable(struct kobject *kobj, struct attribute *attr,
+				char *buf)
+{
+	struct hang_detect *hang_device = to_core_hang_dev(kobj);
+
+	return snprintf(buf, MAX_SYSFS_LEN, "%u\n", hang_device->enabled);
+}
+
+static size_t store_enable(struct kobject *kobj, struct attribute *attr,
+				const char *buf, size_t count)
+{
+	struct hang_detect *hang_dev = to_core_hang_dev(kobj);
+	uint32_t enabled, reg_value;
+	int cpu, ret;
+
+	ret = kstrtouint(buf, 0, &enabled);
+	if (ret < 0)
+		return -EINVAL;
+
+	if (!(enabled == 0 || enabled == 1))
+		return -EINVAL;
+
+	for_each_possible_cpu(cpu) {
+		if (!hang_dev->config[cpu])
+			continue;
+
+		reg_value = scm_io_read(hang_dev->config[cpu]);
+		if (scm_io_write(hang_dev->config[cpu],
+			_WRITE(enabled, reg_value, ENABLE))) {
+			pr_err("%s: Failed to set enable for core%d\n",
+					__func__, cpu);
+			return -EIO;
+		}
+	}
+
+	hang_dev->enabled = enabled;
+	return count;
+}
+CORE_HANG_ATTR(enable, 0644, show_enable, store_enable);
+
+static struct attribute *hang_attrs[] = {
+	&hang_attr_threshold.attr,
+	&hang_attr_pmu_event_sel.attr,
+	&hang_attr_enable.attr,
+	NULL
+};
+
+static struct attribute_group hang_attr_group = {
+	.attrs = hang_attrs,
+};
+
+static const struct of_device_id msm_hang_detect_table[] = {
+	{ .compatible = "qcom,core-hang-detect" },
+	{}
+};
+
+static int msm_hang_detect_probe(struct platform_device *pdev)
+{
+	struct device_node *cpu_node;
+	struct device_node *node = pdev->dev.of_node;
+	struct hang_detect *hang_det = NULL;
+	int cpu, ret, cpu_count = 0;
+	const char *name;
+	u32 treg[NR_CPUS] = {0}, creg[NR_CPUS] = {0};
+	u32 num_reg = 0;
+
+	if (!pdev->dev.of_node || !enable)
+		return -ENODEV;
+
+	hang_det = devm_kzalloc(&pdev->dev,
+			sizeof(struct hang_detect), GFP_KERNEL);
+
+	if (!hang_det) {
+		pr_err("Can't allocate hang_detect memory\n");
+		return -ENOMEM;
+	}
+
+	name = of_get_property(node, "label", NULL);
+	if (!name) {
+		pr_err("Can't get label property\n");
+		return -EINVAL;
+	}
+
+	num_reg = of_property_count_u32_elems(node,
+			"qcom,threshold-arr");
+	if (num_reg < 0) {
+		pr_err("Can't get threshold-arr property\n");
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32_array(node, "qcom,threshold-arr",
+				treg, num_reg);
+	if (ret) {
+		pr_err("Can't get threshold-arr property\n");
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32_array(node, "qcom,config-arr",
+				creg, num_reg);
+	if (ret) {
+		pr_err("Can't get config-arr property\n");
+		return -EINVAL;
+	}
+
+	for_each_possible_cpu(cpu) {
+		cpu_node = of_get_cpu_node(cpu, NULL);
+		if (cpu_node == NULL)
+			continue;
+		else {
+			hang_det->threshold[cpu] = treg[cpu];
+			hang_det->config[cpu] = creg[cpu];
+			cpu_count++;
+		}
+	}
+
+	if (cpu_count == 0) {
+		pr_err("%s:core-hang-arr prop is missing %d\n" , __func__, ret);
+		return -EINVAL;
+	}
+
+	ret = kobject_init_and_add(&hang_det->kobj, &core_ktype,
+			&cpu_subsys.dev_root->kobj, "%s_%s",
+			"hang_detect", name);
+	if (ret) {
+		pr_err("%s:Error in creation kobject_add\n", __func__);
+		goto out_put_kobj;
+	}
+
+	ret = sysfs_create_group(&hang_det->kobj, &hang_attr_group);
+	if (ret) {
+		pr_err("%s:Error in creation sysfs_create_group\n", __func__);
+		goto out_del_kobj;
+	}
+
+	platform_set_drvdata(pdev, hang_det);
+	return 0;
+
+out_del_kobj:
+	kobject_del(&hang_det->kobj);
+out_put_kobj:
+	kobject_put(&hang_det->kobj);
+
+	return ret;
+}
+
+static int msm_hang_detect_remove(struct platform_device *pdev)
+{
+	struct hang_detect *hang_det = platform_get_drvdata(pdev);
+
+	platform_set_drvdata(pdev, NULL);
+	sysfs_remove_group(&hang_det->kobj, &hang_attr_group);
+	kobject_del(&hang_det->kobj);
+	kobject_put(&hang_det->kobj);
+	return 0;
+}
+
+static struct platform_driver msm_hang_detect_driver = {
+	.probe = msm_hang_detect_probe,
+	.remove = msm_hang_detect_remove,
+	.driver = {
+		.name = MODULE_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = msm_hang_detect_table,
+	},
+};
+
+module_platform_driver(msm_hang_detect_driver);
+
+MODULE_DESCRIPTION("MSM Core Hang Detect Driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/dcc.c	2019-01-22 16:16:26.647274877 +0100
@@ -0,0 +1,1362 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/cdev.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/fs.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <soc/qcom/memory_dump.h>
+#include <soc/qcom/rpm-smd.h>
+#include <soc/qcom/scm.h>
+
+#define RPM_MISC_REQ_TYPE	0x6373696d
+#define RPM_MISC_DDR_DCC_ENABLE 0x32726464
+
+#define TIMEOUT_US		(100)
+
+#define BM(lsb, msb)		((BIT(msb) - BIT(lsb)) + BIT(msb))
+#define BMVAL(val, lsb, msb)	((val & BM(lsb, msb)) >> lsb)
+#define BVAL(val, n)		((val & BIT(n)) >> n)
+
+#define dcc_writel(drvdata, val, off)					\
+	__raw_writel((val), drvdata->base + off)
+#define dcc_readl(drvdata, off)						\
+	__raw_readl(drvdata->base + off)
+
+#define dcc_sram_writel(drvdata, val, off)				\
+	__raw_writel((val), drvdata->ram_base + off)
+#define dcc_sram_readl(drvdata, off)					\
+	__raw_readl(drvdata->ram_base + off)
+
+/* DCC registers */
+#define DCC_HW_VERSION		(0x00)
+#define DCC_HW_INFO		(0x04)
+#define DCC_CGC_CFG		(0x10)
+#define DCC_LL			(0x14)
+#define DCC_RAM_CFG		(0x18)
+#define DCC_CFG			(0x1C)
+#define DCC_SW_CTL		(0x20)
+#define DCC_STATUS		(0x24)
+#define DCC_FETCH_ADDR		(0x28)
+#define DCC_SRAM_ADDR		(0x2C)
+#define DCC_INT_ENABLE		(0x30)
+#define DCC_INT_STATUS		(0x34)
+#define DCC_QSB_CFG		(0x38)
+
+#define DCC_REG_DUMP_MAGIC_V2		(0x42445953)
+#define DCC_REG_DUMP_VER		(1)
+
+#define MAX_DCC_OFFSET		(0xFF * 4)
+#define MAX_DCC_LEN		0x7F
+
+#define SCM_SVC_DISABLE_XPU	0x23
+
+enum dcc_func_type {
+	DCC_FUNC_TYPE_CAPTURE,
+	DCC_FUNC_TYPE_CRC,
+};
+
+static const char * const str_dcc_func_type[] = {
+	[DCC_FUNC_TYPE_CAPTURE]		= "cap",
+	[DCC_FUNC_TYPE_CRC]		= "crc",
+};
+
+enum dcc_data_sink {
+	DCC_DATA_SINK_ATB,
+	DCC_DATA_SINK_SRAM
+};
+
+static const char * const str_dcc_data_sink[] = {
+	[DCC_DATA_SINK_ATB]		= "atb",
+	[DCC_DATA_SINK_SRAM]		= "sram",
+};
+
+struct rpm_trig_req {
+	uint32_t    enable;
+	uint32_t    reserved;
+};
+
+struct dcc_config_entry {
+	uint32_t		base;
+	uint32_t		offset;
+	uint32_t		len;
+	uint32_t		index;
+	struct list_head	list;
+};
+
+struct dcc_drvdata {
+	void __iomem		*base;
+	uint32_t		reg_size;
+	struct device		*dev;
+	struct mutex		mutex;
+	void __iomem		*ram_base;
+	uint32_t		ram_size;
+	struct clk		*clk;
+	enum dcc_data_sink	data_sink;
+	enum dcc_func_type	func_type;
+	uint32_t		ram_cfg;
+	bool			enable;
+	bool			interrupt_disable;
+	char			*sram_node;
+	struct cdev		sram_dev;
+	struct class		*sram_class;
+	struct list_head	config_head;
+	uint32_t		nr_config;
+	void			*reg_buf;
+	struct msm_dump_data	reg_data;
+	bool			save_reg;
+	void			*sram_buf;
+	struct msm_dump_data	sram_data;
+	struct rpm_trig_req	rpm_trig_req;
+	struct msm_rpm_kvp	rpm_kvp;
+	bool			xpu_scm_avail;
+	uint64_t		xpu_addr;
+	uint32_t		xpu_unlock_count;
+};
+
+static int dcc_cfg_xpu(struct dcc_drvdata *drvdata, bool enable)
+{
+	struct scm_desc desc = {0};
+
+	desc.args[0] = drvdata->xpu_addr;
+	desc.args[1] = enable;
+	desc.arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL);
+
+	return scm_call2(SCM_SIP_FNID(SCM_SVC_MP, SCM_SVC_DISABLE_XPU), &desc);
+}
+
+static int dcc_xpu_lock(struct dcc_drvdata *drvdata)
+{
+	int ret = 0;
+
+	mutex_lock(&drvdata->mutex);
+	if (!drvdata->xpu_scm_avail)
+		goto err;
+
+	if (drvdata->xpu_unlock_count == 0)
+		goto err;
+
+	if (drvdata->xpu_unlock_count == 1) {
+		ret = clk_prepare_enable(drvdata->clk);
+		if (ret)
+			goto err;
+
+		/* make sure all access to DCC are completed */
+		mb();
+
+		ret = dcc_cfg_xpu(drvdata, 1);
+		if (ret)
+			dev_err(drvdata->dev, "Falied to lock DCC XPU.\n");
+
+		clk_disable_unprepare(drvdata->clk);
+	}
+
+	if (!ret)
+		drvdata->xpu_unlock_count--;
+err:
+	mutex_unlock(&drvdata->mutex);
+	return ret;
+}
+
+static int dcc_xpu_unlock(struct dcc_drvdata *drvdata)
+{
+	int ret = 0;
+
+	mutex_lock(&drvdata->mutex);
+	if (!drvdata->xpu_scm_avail)
+		goto err;
+
+	if (drvdata->xpu_unlock_count == 0) {
+		ret = clk_prepare_enable(drvdata->clk);
+		if (ret)
+			goto err;
+
+		ret = dcc_cfg_xpu(drvdata, 0);
+		if (ret)
+			dev_err(drvdata->dev, "Falied to unlock DCC XPU.\n");
+
+		clk_disable_unprepare(drvdata->clk);
+	}
+
+	if (!ret)
+		drvdata->xpu_unlock_count++;
+err:
+	mutex_unlock(&drvdata->mutex);
+	return ret;
+}
+
+static bool dcc_ready(struct dcc_drvdata *drvdata)
+{
+	uint32_t val;
+
+	/* poll until DCC ready */
+	if (!readl_poll_timeout((drvdata->base + DCC_STATUS), val,
+				(BVAL(val, 4) == 1), 1, TIMEOUT_US))
+		return true;
+
+	return false;
+}
+
+static int dcc_sw_trigger(struct dcc_drvdata *drvdata)
+{
+	int ret;
+
+	ret = 0;
+	mutex_lock(&drvdata->mutex);
+
+	if (!drvdata->enable) {
+		dev_err(drvdata->dev,
+			"DCC is disabled. Can't send sw trigger.\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	if (!dcc_ready(drvdata)) {
+		dev_err(drvdata->dev, "DCC is not ready!\n");
+		ret = -EBUSY;
+		goto err;
+	}
+
+	dcc_writel(drvdata, 1, DCC_SW_CTL);
+
+	if (!dcc_ready(drvdata)) {
+		dev_err(drvdata->dev,
+			"DCC is busy after receiving sw tigger.\n");
+		ret = -EBUSY;
+		goto err;
+	}
+err:
+	mutex_unlock(&drvdata->mutex);
+	return ret;
+}
+
+static int __dcc_ll_cfg(struct dcc_drvdata *drvdata)
+{
+	int ret = 0;
+	uint32_t sram_offset = 0;
+	uint32_t prev_addr, addr;
+	uint32_t prev_off = 0, off;
+	uint32_t link;
+	uint32_t pos, total_len = 0;
+	struct dcc_config_entry *entry;
+
+	if (list_empty(&drvdata->config_head)) {
+		dev_err(drvdata->dev,
+			"No configuration is available to program in DCC SRAM!\n");
+		return -EINVAL;
+	}
+
+	memset_io(drvdata->ram_base, 0, drvdata->ram_size);
+
+	prev_addr = 0;
+	link = 0;
+
+	list_for_each_entry(entry, &drvdata->config_head, list) {
+		/* Address type */
+		addr = (entry->base >> 4) & BM(0, 27);
+		addr |= BIT(31);
+		off = entry->offset/4;
+		total_len += entry->len * 4;
+
+		if (!prev_addr || prev_addr != addr || prev_off > off) {
+			/* Check if we need to write link of prev entry */
+			if (link) {
+				dcc_sram_writel(drvdata, link, sram_offset);
+				sram_offset += 4;
+			}
+
+			/* Write address */
+			dcc_sram_writel(drvdata, addr, sram_offset);
+			sram_offset += 4;
+
+			/* Reset link and prev_off */
+			link = 0;
+			prev_off = 0;
+		}
+
+		if ((off - prev_off) > 0xFF || entry->len > MAX_DCC_LEN) {
+			dev_err(drvdata->dev,
+				"DCC: Progamming error! Base: 0x%x, offset 0x%x.\n",
+				entry->base, entry->offset);
+			ret = -EINVAL;
+			goto err;
+		}
+
+		if (link) {
+			/*
+			 * link already has one offset-length so new
+			 * offset-length needs to be placed at bits [31:16]
+			 */
+			pos = 16;
+
+			/* Clear bits [31:16] */
+			link &= BM(0, 15);
+
+		} else {
+			/*
+			 * link is empty, so new offset-length needs to be
+			 * placed at bits [15:0]
+			 */
+			pos = 0;
+			link = 1 << 16;
+		}
+
+		/* write new offset-length pair to correct position */
+		link |= (((off-prev_off) & BM(0, 7)) |
+			 ((entry->len << 8) & BM(8, 14))) << pos;
+
+		if (pos) {
+			dcc_sram_writel(drvdata, link, sram_offset);
+			sram_offset += 4;
+			link = 0;
+		}
+
+		prev_off  = off;
+		prev_addr = addr;
+	}
+
+	if (link) {
+		dcc_sram_writel(drvdata, link, sram_offset);
+		sram_offset += 4;
+	}
+
+	/* Setting zero to indicate end of the list */
+	dcc_sram_writel(drvdata, 0, sram_offset);
+	sram_offset += 4;
+
+	/* check if the data will overstep */
+	if (drvdata->data_sink == DCC_DATA_SINK_SRAM
+	    && drvdata->func_type == DCC_FUNC_TYPE_CAPTURE) {
+		if (sram_offset + total_len > drvdata->ram_size) {
+			sram_offset += total_len;
+			goto overstep;
+		}
+	} else {
+		if (sram_offset > drvdata->ram_size)
+			goto overstep;
+	}
+
+	drvdata->ram_cfg = (sram_offset  / 4);
+	return 0;
+overstep:
+	ret = -EINVAL;
+	memset_io(drvdata->ram_base, 0, drvdata->ram_size);
+	dev_err(drvdata->dev, "DCC SRAM oversteps, 0x%x (0x%x)\n",
+		sram_offset, drvdata->ram_size);
+err:
+	return ret;
+}
+
+static void __dcc_reg_dump(struct dcc_drvdata *drvdata)
+{
+	uint32_t *reg_buf;
+
+	if (!drvdata->reg_buf)
+		return;
+
+	drvdata->reg_data.version = DCC_REG_DUMP_VER;
+
+	reg_buf = drvdata->reg_buf;
+
+	reg_buf[0] = dcc_readl(drvdata, DCC_HW_VERSION);
+	reg_buf[1] = dcc_readl(drvdata, DCC_HW_INFO);
+	reg_buf[2] = dcc_readl(drvdata, DCC_CGC_CFG);
+	reg_buf[3] = dcc_readl(drvdata, DCC_LL);
+	reg_buf[4] = dcc_readl(drvdata, DCC_RAM_CFG);
+	reg_buf[5] = dcc_readl(drvdata, DCC_CFG);
+	reg_buf[6] = dcc_readl(drvdata, DCC_SW_CTL);
+	reg_buf[7] = dcc_readl(drvdata, DCC_STATUS);
+	reg_buf[8] = dcc_readl(drvdata, DCC_FETCH_ADDR);
+	reg_buf[9] = dcc_readl(drvdata, DCC_SRAM_ADDR);
+	reg_buf[10] = dcc_readl(drvdata, DCC_INT_ENABLE);
+	reg_buf[11] = dcc_readl(drvdata, DCC_INT_STATUS);
+	reg_buf[12] = dcc_readl(drvdata, DCC_QSB_CFG);
+
+	drvdata->reg_data.magic = DCC_REG_DUMP_MAGIC_V2;
+}
+
+static void __dcc_first_crc(struct dcc_drvdata *drvdata)
+{
+	int i;
+
+	/*
+	 * Need to send 2 triggers to DCC. First trigger sets CRC error status
+	 * bit. So need second trigger to reset this bit.
+	 */
+	for (i = 0; i < 2; i++) {
+		if (!dcc_ready(drvdata))
+			dev_err(drvdata->dev, "DCC is not ready!\n");
+
+		dcc_writel(drvdata, 1, DCC_SW_CTL);
+	}
+
+	/* Clear CRC error interrupt */
+	dcc_writel(drvdata, BIT(0), DCC_INT_STATUS);
+}
+
+static int dcc_enable(struct dcc_drvdata *drvdata)
+{
+	int ret = 0;
+
+	mutex_lock(&drvdata->mutex);
+
+	if (drvdata->enable) {
+		dev_err(drvdata->dev, "DCC is already enabled!\n");
+		mutex_unlock(&drvdata->mutex);
+		return 0;
+	}
+
+	/* 1. Prepare and enable DCC clock */
+	ret = clk_prepare_enable(drvdata->clk);
+	if (ret)
+		goto err;
+
+	dcc_writel(drvdata, 0, DCC_LL);
+
+	/* 2. Program linked-list in the SRAM */
+	ret = __dcc_ll_cfg(drvdata);
+	if (ret)
+		goto err_prog_ll;
+
+	/* 3. If in capture mode program DCC_RAM_CFG reg */
+	if (drvdata->func_type == DCC_FUNC_TYPE_CAPTURE)
+		dcc_writel(drvdata, drvdata->ram_cfg, DCC_RAM_CFG);
+
+	/* 4. Configure data sink and function type */
+	dcc_writel(drvdata, ((drvdata->data_sink << 4) | (drvdata->func_type)),
+		   DCC_CFG);
+
+	/* 5. Clears interrupt status register */
+	dcc_writel(drvdata, 0, DCC_INT_ENABLE);
+	dcc_writel(drvdata, (BIT(4) | BIT(0)), DCC_INT_STATUS);
+
+	/* Make sure all config is written in sram */
+	mb();
+
+	/* 6. Set LL bit */
+	dcc_writel(drvdata, 1, DCC_LL);
+	drvdata->enable = 1;
+
+	if (drvdata->func_type == DCC_FUNC_TYPE_CRC) {
+		__dcc_first_crc(drvdata);
+
+		/* Enable CRC error interrupt */
+		if (!drvdata->interrupt_disable)
+			dcc_writel(drvdata, BIT(0), DCC_INT_ENABLE);
+	}
+
+	/* Save DCC registers */
+	if (drvdata->save_reg)
+		__dcc_reg_dump(drvdata);
+
+err_prog_ll:
+	if (!drvdata->enable)
+		clk_disable_unprepare(drvdata->clk);
+err:
+	mutex_unlock(&drvdata->mutex);
+	return ret;
+}
+
+static int __dcc_rpm_sw_trigger(struct dcc_drvdata *drvdata, bool enable)
+{
+	int ret = 0;
+	struct msm_rpm_kvp *rpm_kvp = &drvdata->rpm_kvp;
+
+	if (enable == drvdata->rpm_trig_req.enable)
+		return 0;
+
+	if (enable && (!drvdata->enable || drvdata->func_type !=
+		       DCC_FUNC_TYPE_CRC)) {
+		dev_err(drvdata->dev,
+			"DCC: invalid state! Can't send sw trigger req to rpm\n");
+		return -EINVAL;
+	}
+
+	drvdata->rpm_trig_req.enable = enable;
+	rpm_kvp->key = RPM_MISC_DDR_DCC_ENABLE;
+	rpm_kvp->length = sizeof(struct rpm_trig_req);
+	rpm_kvp->data = (void *)(&drvdata->rpm_trig_req);
+
+	ret = msm_rpm_send_message(MSM_RPM_CTX_ACTIVE_SET,
+				   RPM_MISC_REQ_TYPE, 0, rpm_kvp, 1);
+	if (ret) {
+		dev_err(drvdata->dev,
+			"DCC: SW trigger %s req to rpm failed %d\n",
+			(enable ? "enable" : "disable"), ret);
+		drvdata->rpm_trig_req.enable = !enable;
+	}
+
+	return ret;
+}
+
+static void dcc_disable(struct dcc_drvdata *drvdata)
+{
+	mutex_lock(&drvdata->mutex);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->mutex);
+		return;
+	}
+
+	/* Send request to RPM to disable DCC SW trigger */
+
+	if (__dcc_rpm_sw_trigger(drvdata, 0))
+		dev_err(drvdata->dev,
+			"DCC: Request to RPM to disable SW trigger failed.\n");
+
+	if (!dcc_ready(drvdata))
+		dev_err(drvdata->dev, "DCC is not ready! Disabling DCC...\n");
+
+	dcc_writel(drvdata, 0, DCC_LL);
+	drvdata->enable = 0;
+
+	/* Save DCC registers */
+	if (drvdata->save_reg)
+		__dcc_reg_dump(drvdata);
+
+	clk_disable_unprepare(drvdata->clk);
+
+	mutex_unlock(&drvdata->mutex);
+}
+
+static ssize_t dcc_show_func_type(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n",
+			 str_dcc_func_type[drvdata->func_type]);
+}
+
+static ssize_t dcc_store_func_type(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t size)
+{
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+	char str[10] = "";
+	int ret;
+
+	if (strlen(buf) >= 10)
+		return -EINVAL;
+	if (sscanf(buf, "%s", str) != 1)
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+	if (drvdata->enable) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	if (!strcmp(str, str_dcc_func_type[DCC_FUNC_TYPE_CAPTURE]))
+		drvdata->func_type = DCC_FUNC_TYPE_CAPTURE;
+	else if (!strcmp(str, str_dcc_func_type[DCC_FUNC_TYPE_CRC]))
+		drvdata->func_type = DCC_FUNC_TYPE_CRC;
+	else {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = size;
+out:
+	mutex_unlock(&drvdata->mutex);
+	return ret;
+}
+static DEVICE_ATTR(func_type, S_IRUGO | S_IWUSR,
+		   dcc_show_func_type, dcc_store_func_type);
+
+static ssize_t dcc_show_data_sink(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n",
+			 str_dcc_data_sink[drvdata->data_sink]);
+}
+
+static ssize_t dcc_store_data_sink(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t size)
+{
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+	char str[10] = "";
+	int ret;
+
+	if (strlen(buf) >= 10)
+		return -EINVAL;
+	if (sscanf(buf, "%s", str) != 1)
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+	if (drvdata->enable) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	if (!strcmp(str, str_dcc_data_sink[DCC_DATA_SINK_SRAM]))
+		drvdata->data_sink = DCC_DATA_SINK_SRAM;
+	else if (!strcmp(str, str_dcc_data_sink[DCC_DATA_SINK_ATB]))
+		drvdata->data_sink = DCC_DATA_SINK_ATB;
+	else {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = size;
+out:
+	mutex_unlock(&drvdata->mutex);
+	return ret;
+}
+static DEVICE_ATTR(data_sink, S_IRUGO | S_IWUSR,
+		   dcc_show_data_sink, dcc_store_data_sink);
+
+static ssize_t dcc_store_trigger(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t size)
+{
+	int ret = 0;
+	unsigned long val;
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (val != 1)
+		return -EINVAL;
+
+	ret = dcc_xpu_unlock(drvdata);
+	if (ret)
+		return ret;
+
+	ret = dcc_sw_trigger(drvdata);
+	if (!ret)
+		ret = size;
+
+	dcc_xpu_lock(drvdata);
+	return ret;
+}
+static DEVICE_ATTR(trigger, S_IWUSR, NULL, dcc_store_trigger);
+
+static ssize_t dcc_show_enable(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n",
+			 (unsigned)drvdata->enable);
+}
+
+static ssize_t dcc_store_enable(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t size)
+{
+	int ret = 0;
+	unsigned long val;
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	ret = dcc_xpu_unlock(drvdata);
+	if (ret)
+		return ret;
+
+	if (val)
+		ret = dcc_enable(drvdata);
+	else
+		dcc_disable(drvdata);
+
+	if (!ret)
+		ret = size;
+
+	dcc_xpu_lock(drvdata);
+	return ret;
+
+}
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, dcc_show_enable,
+		   dcc_store_enable);
+
+static ssize_t dcc_show_config(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+	struct dcc_config_entry *entry;
+	char local_buf[64];
+	int len = 0, count = 0;
+
+	buf[0] = '\0';
+
+	mutex_lock(&drvdata->mutex);
+	list_for_each_entry(entry, &drvdata->config_head, list) {
+		len = snprintf(local_buf, 64,
+			       "Index: 0x%x, Base: 0x%x, Offset: 0x%x, len: 0x%x\n",
+			       entry->index, entry->base,
+			       entry->offset, entry->len);
+
+		if ((count + len) > PAGE_SIZE) {
+			dev_err(dev, "DCC: Couldn't write complete config!\n");
+			break;
+		}
+
+		strlcat(buf, local_buf, PAGE_SIZE);
+		count += len;
+	}
+
+	mutex_unlock(&drvdata->mutex);
+
+	return count;
+}
+
+static int dcc_config_add(struct dcc_drvdata *drvdata, unsigned addr,
+			  unsigned len)
+{
+	int ret;
+	struct dcc_config_entry *entry, *pentry;
+	unsigned base, offset;
+
+	mutex_lock(&drvdata->mutex);
+
+	if (!len) {
+		dev_err(drvdata->dev, "DCC: Invalid length!\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	base = addr & BM(4, 31);
+
+	if (!list_empty(&drvdata->config_head)) {
+		pentry = list_last_entry(&drvdata->config_head,
+					 struct dcc_config_entry, list);
+
+		if (addr >= (pentry->base + pentry->offset) &&
+		    addr <= (pentry->base + pentry->offset + MAX_DCC_OFFSET)) {
+
+			/* Re-use base address from last entry */
+			base =  pentry->base;
+
+			/*
+			 * Check if new address is contiguous to last entry's
+			 * addresses. If yes then we can re-use last entry and
+			 * just need to update its length.
+			 */
+			if ((pentry->len * 4 + pentry->base + pentry->offset)
+			    == addr) {
+				len += pentry->len;
+
+				/*
+				 * Check if last entry can hold additional new
+				 * length. If yes then we don't need to create
+				 * a new entry else we need to add a new entry
+				 * with same base but updated offset.
+				 */
+				if (len > MAX_DCC_LEN)
+					pentry->len = MAX_DCC_LEN;
+				else
+					pentry->len = len;
+
+				/*
+				 * Update start addr and len for remaining
+				 * addresses, which will be part of new
+				 * entry.
+				 */
+				addr = pentry->base + pentry->offset +
+					pentry->len * 4;
+				len -= pentry->len;
+			}
+		}
+	}
+
+	offset = addr - base;
+
+	while (len) {
+		entry = devm_kzalloc(drvdata->dev, sizeof(*entry), GFP_KERNEL);
+		if (!entry) {
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		entry->base = base;
+		entry->offset = offset;
+		entry->len = min_t(uint32_t, len, MAX_DCC_LEN);
+		entry->index = drvdata->nr_config++;
+		INIT_LIST_HEAD(&entry->list);
+		list_add_tail(&entry->list, &drvdata->config_head);
+
+		len -= entry->len;
+		offset += MAX_DCC_LEN * 4;
+	}
+
+	mutex_unlock(&drvdata->mutex);
+	return 0;
+err:
+	mutex_unlock(&drvdata->mutex);
+	return ret;
+}
+
+static ssize_t dcc_store_config(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t size)
+{
+	int ret;
+	unsigned base, len;
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+	int nval;
+
+	nval = sscanf(buf, "%x %i", &base, &len);
+	if (nval <= 0 || nval > 2)
+		return -EINVAL;
+
+	if (nval == 1)
+		len = 1;
+
+	ret = dcc_config_add(drvdata, base, len);
+	if (ret)
+		return ret;
+
+	return size;
+
+}
+static DEVICE_ATTR(config, S_IRUGO | S_IWUSR, dcc_show_config,
+		   dcc_store_config);
+
+static void dcc_config_reset(struct dcc_drvdata *drvdata)
+{
+	struct dcc_config_entry *entry, *temp;
+
+	mutex_lock(&drvdata->mutex);
+
+	list_for_each_entry_safe(entry, temp, &drvdata->config_head, list) {
+		list_del(&entry->list);
+		devm_kfree(drvdata->dev, entry);
+		drvdata->nr_config--;
+	}
+
+	mutex_unlock(&drvdata->mutex);
+}
+
+static ssize_t dcc_store_config_reset(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t size)
+{
+	unsigned long val;
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	if (val)
+		dcc_config_reset(drvdata);
+
+	return size;
+}
+static DEVICE_ATTR(config_reset, S_IWUSR, NULL, dcc_store_config_reset);
+
+static ssize_t dcc_show_crc_error(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	int ret;
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	ret = dcc_xpu_unlock(drvdata);
+	if (ret)
+		return ret;
+
+	mutex_lock(&drvdata->mutex);
+	if (!drvdata->enable) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	ret = scnprintf(buf, PAGE_SIZE, "%u\n",
+			(unsigned)BVAL(dcc_readl(drvdata, DCC_STATUS), 0));
+err:
+	mutex_unlock(&drvdata->mutex);
+	dcc_xpu_lock(drvdata);
+	return ret;
+}
+static DEVICE_ATTR(crc_error, S_IRUGO, dcc_show_crc_error, NULL);
+
+static ssize_t dcc_show_ready(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	int ret;
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	ret = dcc_xpu_unlock(drvdata);
+	if (ret)
+		return ret;
+
+	mutex_lock(&drvdata->mutex);
+	if (!drvdata->enable) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	ret = scnprintf(buf, PAGE_SIZE, "%u\n",
+			(unsigned)BVAL(dcc_readl(drvdata, DCC_STATUS), 4));
+err:
+	mutex_unlock(&drvdata->mutex);
+	dcc_xpu_lock(drvdata);
+	return ret;
+}
+static DEVICE_ATTR(ready, S_IRUGO, dcc_show_ready, NULL);
+
+static ssize_t dcc_show_interrupt_disable(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n",
+			 (unsigned)drvdata->interrupt_disable);
+}
+
+static ssize_t dcc_store_interrupt_disable(struct device *dev,
+					   struct device_attribute *attr,
+					   const char *buf, size_t size)
+{
+	unsigned long val;
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+	drvdata->interrupt_disable = (val ? 1:0);
+	mutex_unlock(&drvdata->mutex);
+	return size;
+}
+static DEVICE_ATTR(interrupt_disable, S_IRUGO | S_IWUSR,
+		   dcc_show_interrupt_disable, dcc_store_interrupt_disable);
+
+static ssize_t dcc_show_rpm_sw_trigger_on(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n",
+			 (unsigned)drvdata->rpm_trig_req.enable);
+}
+
+static ssize_t dcc_store_rpm_sw_trigger_on(struct device *dev,
+					   struct device_attribute *attr,
+					   const char *buf, size_t size)
+{
+	unsigned long val;
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+	__dcc_rpm_sw_trigger(drvdata, !!val);
+	mutex_unlock(&drvdata->mutex);
+	return size;
+}
+static DEVICE_ATTR(rpm_sw_trigger_on, S_IRUGO | S_IWUSR,
+		   dcc_show_rpm_sw_trigger_on, dcc_store_rpm_sw_trigger_on);
+
+static ssize_t dcc_store_xpu_unlock(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	if (kstrtoul(buf, 10, &val))
+		return -EINVAL;
+
+	ret = val ? dcc_xpu_unlock(drvdata) : dcc_xpu_lock(drvdata);
+	if (!ret)
+		ret = size;
+
+	return ret;
+}
+static DEVICE_ATTR(xpu_unlock, S_IWUSR, NULL, dcc_store_xpu_unlock);
+
+static const struct device_attribute *dcc_attrs[] = {
+	&dev_attr_func_type,
+	&dev_attr_data_sink,
+	&dev_attr_trigger,
+	&dev_attr_enable,
+	&dev_attr_config,
+	&dev_attr_config_reset,
+	&dev_attr_ready,
+	&dev_attr_crc_error,
+	&dev_attr_interrupt_disable,
+	&dev_attr_rpm_sw_trigger_on,
+	&dev_attr_xpu_unlock,
+	NULL,
+};
+
+static int dcc_create_files(struct device *dev,
+			    const struct device_attribute **attrs)
+{
+	int ret = 0, i;
+
+	for (i = 0; attrs[i] != NULL; i++) {
+		ret = device_create_file(dev, attrs[i]);
+		if (ret) {
+			dev_err(dev, "DCC: Couldn't create sysfs attribute: %s!\n",
+				attrs[i]->attr.name);
+			break;
+		}
+	}
+	return ret;
+}
+
+static int dcc_sram_open(struct inode *inode, struct file *file)
+{
+	struct dcc_drvdata *drvdata = container_of(inode->i_cdev,
+						   struct dcc_drvdata,
+						   sram_dev);
+	file->private_data = drvdata;
+
+	return  dcc_xpu_unlock(drvdata);
+}
+
+static ssize_t dcc_sram_read(struct file *file, char __user *data,
+			     size_t len, loff_t *ppos)
+{
+	int ret;
+	unsigned char *buf;
+	struct dcc_drvdata *drvdata = file->private_data;
+
+	/* EOF check */
+	if (drvdata->ram_size <= *ppos)
+		return 0;
+
+	if ((*ppos + len) > drvdata->ram_size)
+		len = (drvdata->ram_size - *ppos);
+
+	buf = kzalloc(len, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	ret = clk_prepare_enable(drvdata->clk);
+	if (ret) {
+		kfree(buf);
+		return ret;
+	}
+
+	memcpy_fromio(buf, (drvdata->ram_base + *ppos), len);
+
+	clk_disable_unprepare(drvdata->clk);
+
+	if (copy_to_user(data, buf, len)) {
+		dev_err(drvdata->dev,
+			"DCC: Couldn't copy all data to user!\n");
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	*ppos += len;
+
+	kfree(buf);
+
+	return len;
+}
+
+static int dcc_sram_release(struct inode *inode, struct file *file)
+{
+	struct dcc_drvdata *drvdata = file->private_data;
+
+	return dcc_xpu_lock(drvdata);
+}
+
+static const struct file_operations dcc_sram_fops = {
+	.owner		= THIS_MODULE,
+	.open		= dcc_sram_open,
+	.read		= dcc_sram_read,
+	.release	= dcc_sram_release,
+	.llseek		= no_llseek,
+};
+
+static int dcc_sram_dev_register(struct dcc_drvdata *drvdata)
+{
+	int ret;
+	struct device *device;
+	dev_t dev;
+
+	ret = alloc_chrdev_region(&dev, 0, 1, drvdata->sram_node);
+	if (ret)
+		goto err_alloc;
+
+	cdev_init(&drvdata->sram_dev, &dcc_sram_fops);
+
+	drvdata->sram_dev.owner = THIS_MODULE;
+	ret = cdev_add(&drvdata->sram_dev, dev, 1);
+	if (ret)
+		goto err_cdev_add;
+
+	drvdata->sram_class = class_create(THIS_MODULE,
+					   drvdata->sram_node);
+	if (IS_ERR(drvdata->sram_class)) {
+		ret = PTR_ERR(drvdata->sram_class);
+		goto err_class_create;
+	}
+
+	device = device_create(drvdata->sram_class, NULL,
+			       drvdata->sram_dev.dev, drvdata,
+			       drvdata->sram_node);
+	if (IS_ERR(device)) {
+		ret = PTR_ERR(device);
+		goto err_dev_create;
+	}
+
+	return 0;
+err_dev_create:
+	class_destroy(drvdata->sram_class);
+err_class_create:
+	cdev_del(&drvdata->sram_dev);
+err_cdev_add:
+	unregister_chrdev_region(drvdata->sram_dev.dev, 1);
+err_alloc:
+	return ret;
+}
+
+static void dcc_sram_dev_deregister(struct dcc_drvdata *drvdata)
+{
+	device_destroy(drvdata->sram_class, drvdata->sram_dev.dev);
+	class_destroy(drvdata->sram_class);
+	cdev_del(&drvdata->sram_dev);
+	unregister_chrdev_region(drvdata->sram_dev.dev, 1);
+}
+
+static int dcc_sram_dev_init(struct dcc_drvdata *drvdata)
+{
+	int ret = 0;
+	size_t node_size;
+	char *node_name = "dcc_sram";
+	struct device *dev = drvdata->dev;
+
+	node_size = strlen(node_name) + 1;
+
+	drvdata->sram_node = devm_kzalloc(dev, node_size, GFP_KERNEL);
+	if (!drvdata->sram_node)
+		return -ENOMEM;
+
+	strlcpy(drvdata->sram_node, node_name, node_size);
+	ret = dcc_sram_dev_register(drvdata);
+	if (ret)
+		dev_err(drvdata->dev, "DCC: sram node not registered.\n");
+
+	return ret;
+}
+
+static void dcc_sram_dev_exit(struct dcc_drvdata *drvdata)
+{
+	dcc_sram_dev_deregister(drvdata);
+}
+
+static void dcc_allocate_dump_mem(struct dcc_drvdata *drvdata)
+{
+	int ret;
+	struct device *dev = drvdata->dev;
+	struct msm_dump_entry reg_dump_entry, sram_dump_entry;
+
+	/* Allocate memory for dcc reg dump */
+	drvdata->reg_buf = devm_kzalloc(dev, drvdata->reg_size, GFP_KERNEL);
+	if (drvdata->reg_buf) {
+		strlcpy(drvdata->reg_data.name, "KDCC_REG",
+				 sizeof(drvdata->reg_data.name));
+		drvdata->reg_data.addr = virt_to_phys(drvdata->reg_buf);
+		drvdata->reg_data.len = drvdata->reg_size;
+		reg_dump_entry.id = MSM_DUMP_DATA_DCC_REG;
+		reg_dump_entry.addr = virt_to_phys(&drvdata->reg_data);
+		ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS,
+					     &reg_dump_entry);
+		if (ret) {
+			dev_err(dev, "DCC REG dump setup failed\n");
+			devm_kfree(dev, drvdata->reg_buf);
+		}
+	} else {
+		dev_err(dev, "DCC REG dump allocation failed\n");
+	}
+
+	/* Allocate memory for dcc sram dump */
+	drvdata->sram_buf = devm_kzalloc(dev, drvdata->ram_size, GFP_KERNEL);
+	if (drvdata->sram_buf) {
+		strlcpy(drvdata->sram_data.name, "KDCC_SRAM",
+				 sizeof(drvdata->sram_data.name));
+		drvdata->sram_data.addr = virt_to_phys(drvdata->sram_buf);
+		drvdata->sram_data.len = drvdata->ram_size;
+		sram_dump_entry.id = MSM_DUMP_DATA_DCC_SRAM;
+		sram_dump_entry.addr = virt_to_phys(&drvdata->sram_data);
+		ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS,
+					     &sram_dump_entry);
+		if (ret) {
+			dev_err(dev, "DCC SRAM dump setup failed\n");
+			devm_kfree(dev, drvdata->sram_buf);
+		}
+	} else {
+		dev_err(dev, "DCC SRAM dump allocation failed\n");
+	}
+}
+
+static int dcc_probe(struct platform_device *pdev)
+{
+	int ret, i;
+	struct device *dev = &pdev->dev;
+	struct dcc_drvdata *drvdata;
+	struct resource *res;
+	const char *data_sink;
+
+	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+	if (!drvdata)
+		return -ENOMEM;
+
+	drvdata->dev = &pdev->dev;
+	platform_set_drvdata(pdev, drvdata);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dcc-base");
+	if (!res)
+		return -EINVAL;
+
+	drvdata->reg_size = resource_size(res);
+	drvdata->base = devm_ioremap(dev, res->start, resource_size(res));
+	if (!drvdata->base)
+		return -ENOMEM;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "dcc-ram-base");
+	if (!res)
+		return -EINVAL;
+
+	drvdata->ram_size = resource_size(res);
+	drvdata->ram_base = devm_ioremap(dev, res->start, resource_size(res));
+	if (!drvdata->ram_base)
+		return -ENOMEM;
+
+	drvdata->clk = devm_clk_get(dev, "dcc_clk");
+	if (IS_ERR(drvdata->clk)) {
+		ret = PTR_ERR(drvdata->clk);
+		goto err;
+	}
+
+	drvdata->save_reg = of_property_read_bool(pdev->dev.of_node,
+						  "qcom,save-reg");
+
+	mutex_init(&drvdata->mutex);
+
+	INIT_LIST_HEAD(&drvdata->config_head);
+	drvdata->nr_config = 0;
+	drvdata->xpu_scm_avail = 0;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "dcc-xpu-base");
+	if (res) {
+		if (scm_is_call_available(SCM_SVC_MP,
+					  SCM_SVC_DISABLE_XPU) > 0) {
+			drvdata->xpu_scm_avail = 1;
+			drvdata->xpu_addr = res->start;
+		} else {
+			dev_err(dev, "scm call is not available\n");
+			return -EINVAL;
+		}
+	} else {
+		dev_info(dev, "DCC XPU is not specified\n");
+	}
+
+	ret = dcc_xpu_unlock(drvdata);
+	if (ret)
+		goto err;
+
+	ret = clk_prepare_enable(drvdata->clk);
+	if (ret) {
+		dcc_xpu_lock(drvdata);
+		goto err;
+	}
+
+	memset_io(drvdata->ram_base, 0, drvdata->ram_size);
+
+	dcc_xpu_lock(drvdata);
+
+	clk_disable_unprepare(drvdata->clk);
+
+	drvdata->data_sink = DCC_DATA_SINK_SRAM;
+	ret = of_property_read_string(pdev->dev.of_node, "qcom,data-sink",
+				      &data_sink);
+	if (!ret) {
+		for (i = 0; i < ARRAY_SIZE(str_dcc_data_sink); i++)
+			if (!strcmp(data_sink, str_dcc_data_sink[i])) {
+				drvdata->data_sink = i;
+				break;
+			}
+
+		if (i == ARRAY_SIZE(str_dcc_data_sink)) {
+			dev_err(dev, "Unknown sink type for DCC! Using '%s' as data sink\n",
+				str_dcc_data_sink[drvdata->data_sink]);
+		}
+	}
+
+	ret = dcc_sram_dev_init(drvdata);
+	if (ret)
+		goto err;
+
+	ret = dcc_create_files(dev, dcc_attrs);
+	if (ret)
+		goto err;
+
+	dcc_allocate_dump_mem(drvdata);
+
+	return 0;
+err:
+	return ret;
+}
+
+static int dcc_remove(struct platform_device *pdev)
+{
+	struct dcc_drvdata *drvdata = platform_get_drvdata(pdev);
+
+	dcc_sram_dev_exit(drvdata);
+
+	dcc_config_reset(drvdata);
+
+	return 0;
+}
+
+static const struct of_device_id msm_dcc_match[] = {
+	{ .compatible = "qcom,dcc"},
+	{}
+};
+
+static struct platform_driver dcc_driver = {
+	.probe          = dcc_probe,
+	.remove         = dcc_remove,
+	.driver         = {
+		.name   = "msm-dcc",
+		.owner	= THIS_MODULE,
+		.of_match_table	= msm_dcc_match,
+	},
+};
+
+static int __init dcc_init(void)
+{
+	return platform_driver_register(&dcc_driver);
+}
+module_init(dcc_init);
+
+static void __exit dcc_exit(void)
+{
+	platform_driver_unregister(&dcc_driver);
+}
+module_exit(dcc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM data capture and compare engine");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/early_random.c	2019-01-22 16:16:26.647274877 +0100
@@ -0,0 +1,63 @@
+/* Copyright (c) 2013-2014, 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/random.h>
+
+#include <soc/qcom/scm.h>
+
+#include <asm/io.h>
+#include <asm/cacheflush.h>
+
+#define TZ_SVC_CRYPTO	10
+#define PRNG_CMD_ID	0x01
+
+struct tz_prng_data {
+	uint8_t		*out_buf;
+	uint32_t	out_buf_sz;
+} __packed;
+
+DEFINE_SCM_BUFFER(common_scm_buf)
+#define RANDOM_BUFFER_SIZE	PAGE_SIZE
+char random_buffer[RANDOM_BUFFER_SIZE] __aligned(PAGE_SIZE);
+
+void __init init_random_pool(void)
+{
+	struct tz_prng_data data;
+	int ret;
+	u32 resp;
+	struct scm_desc desc;
+
+	data.out_buf = (uint8_t *) virt_to_phys(random_buffer);
+	desc.args[0] = (unsigned long) data.out_buf;
+	desc.args[1] = data.out_buf_sz = SZ_512;
+	desc.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL);
+
+	dmac_flush_range(random_buffer, random_buffer + RANDOM_BUFFER_SIZE);
+
+	if (!is_scm_armv8())
+		ret = scm_call_noalloc(TZ_SVC_CRYPTO, PRNG_CMD_ID, &data,
+				sizeof(data), &resp, sizeof(resp),
+				common_scm_buf,
+				SCM_BUFFER_SIZE(common_scm_buf));
+	else
+		ret = scm_call2(SCM_SIP_FNID(TZ_SVC_CRYPTO, PRNG_CMD_ID),
+					&desc);
+
+	if (!ret) {
+		dmac_inv_range(random_buffer, random_buffer +
+						RANDOM_BUFFER_SIZE);
+		add_device_randomness(random_buffer, SZ_512);
+	}
+}
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/event_timer.c	2019-01-22 16:16:26.647274877 +0100
@@ -0,0 +1,505 @@
+/* Copyright (c) 2012, 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/cpu.h>
+#include <soc/qcom/event_timer.h>
+
+/**
+ * struct event_timer_info - basic event timer structure
+ * @node: timerqueue node to track time ordered data structure
+ *        of event timers
+ * @notify: irq affinity notifier.
+ * @timer: hrtimer created for this event.
+ * @function : callback function for event timer.
+ * @data : callback data for event timer.
+ * @irq: irq number for which event timer is created.
+ * @cpu: event timer associated cpu.
+ */
+struct event_timer_info {
+	struct timerqueue_node node;
+	struct irq_affinity_notify notify;
+	void (*function)(void *);
+	void *data;
+	int irq;
+	int cpu;
+};
+
+struct hrtimer_info {
+	struct hrtimer event_hrtimer;
+	bool timer_initialized;
+};
+
+static DEFINE_PER_CPU(struct hrtimer_info, per_cpu_hrtimer);
+
+static DEFINE_PER_CPU(struct timerqueue_head, timer_head) = {
+	.head = RB_ROOT,
+	.next = NULL,
+};
+
+static DEFINE_SPINLOCK(event_timer_lock);
+static DEFINE_SPINLOCK(event_setup_lock);
+
+static void create_timer_smp(void *data);
+static void setup_event_hrtimer(struct event_timer_info *event);
+static enum hrtimer_restart event_hrtimer_cb(struct hrtimer *hrtimer);
+static void irq_affinity_change_notifier(struct irq_affinity_notify *notify,
+						const cpumask_t *new_cpu_mask);
+static void irq_affinity_release(struct kref *ref);
+
+static int msm_event_debug_mask;
+module_param_named(
+	debug_mask, msm_event_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
+);
+
+enum {
+	MSM_EVENT_TIMER_DEBUG = 1U << 0,
+};
+
+/**
+ * add_event_timer() : Add a wakeup event. Intended to be called
+ *                     by clients once. Returns a handle to be used
+ *                     for future transactions.
+ * @irq: event associated irq number.
+ * @function : The callback function will be called when event
+ *             timer expires.
+ * @data: callback data provided by client.
+ */
+struct event_timer_info *add_event_timer(uint32_t irq,
+				void (*function)(void *), void *data)
+{
+	struct event_timer_info *event_info =
+			kzalloc(sizeof(struct event_timer_info), GFP_KERNEL);
+
+	if (!event_info)
+		return NULL;
+
+	event_info->function = function;
+	event_info->data = data;
+
+	if (irq) {
+		struct irq_desc *desc = irq_to_desc(irq);
+		struct cpumask *mask = desc->irq_common_data.affinity;
+
+		get_online_cpus();
+		event_info->cpu = cpumask_any_and(mask, cpu_online_mask);
+		if (event_info->cpu >= nr_cpu_ids)
+			event_info->cpu = cpumask_first(cpu_online_mask);
+
+		event_info->notify.notify = irq_affinity_change_notifier;
+		event_info->notify.release = irq_affinity_release;
+		irq_set_affinity_notifier(irq, &event_info->notify);
+		put_online_cpus();
+	}
+
+	/* Init rb node and hr timer */
+	timerqueue_init(&event_info->node);
+	pr_debug("New Event Added. Event %p(on cpu%d). irq %d.\n",
+					event_info, event_info->cpu, irq);
+
+	return event_info;
+}
+EXPORT_SYMBOL(add_event_timer);
+
+/**
+ * is_event_next(): Helper function to check if the event is the next
+ *                  expiring event
+ * @event : handle to the event to be checked.
+ */
+static bool is_event_next(struct event_timer_info *event)
+{
+	struct event_timer_info *next_event;
+	struct timerqueue_node *next;
+	bool ret = false;
+
+	next = timerqueue_getnext(&per_cpu(timer_head, event->cpu));
+	if (!next)
+		goto exit_is_next_event;
+
+	next_event = container_of(next, struct event_timer_info, node);
+	if (!next_event)
+		goto exit_is_next_event;
+
+	if (next_event == event)
+		ret = true;
+
+exit_is_next_event:
+	return ret;
+}
+
+/**
+ * is_event_active(): Helper function to check if the timer for a given event
+ *                    has been started.
+ * @event : handle to the event to be checked.
+ */
+static bool is_event_active(struct event_timer_info *event)
+{
+	struct timerqueue_node *next;
+	struct event_timer_info *next_event;
+	bool ret = false;
+
+	for (next = timerqueue_getnext(&per_cpu(timer_head, event->cpu)); next;
+			next = timerqueue_iterate_next(next)) {
+		next_event = container_of(next, struct event_timer_info, node);
+
+		if (event == next_event) {
+			ret = true;
+			break;
+		}
+	}
+	return ret;
+}
+
+/**
+ * create_hrtimer(): Helper function to setup hrtimer.
+ */
+static void create_hrtimer(struct event_timer_info *event)
+
+{
+	bool timer_initialized = per_cpu(per_cpu_hrtimer.timer_initialized,
+								event->cpu);
+	struct hrtimer *event_hrtimer = &per_cpu(per_cpu_hrtimer.event_hrtimer,
+								event->cpu);
+
+	if (!timer_initialized) {
+		hrtimer_init(event_hrtimer, CLOCK_MONOTONIC,
+						HRTIMER_MODE_ABS_PINNED);
+		per_cpu(per_cpu_hrtimer.timer_initialized, event->cpu) = true;
+	}
+
+	event_hrtimer->function = event_hrtimer_cb;
+	hrtimer_start(event_hrtimer, event->node.expires,
+					HRTIMER_MODE_ABS_PINNED);
+}
+
+/**
+ * event_hrtimer_cb() : Callback function for hr timer.
+ *                      Make the client CB from here and remove the event
+ *                      from the time ordered queue.
+ */
+static enum hrtimer_restart event_hrtimer_cb(struct hrtimer *hrtimer)
+{
+	struct event_timer_info *event;
+	struct timerqueue_node *next;
+	unsigned long flags;
+	int cpu;
+
+	spin_lock_irqsave(&event_timer_lock, flags);
+	cpu = smp_processor_id();
+	next = timerqueue_getnext(&per_cpu(timer_head, cpu));
+
+	while (next && (ktime_to_ns(next->expires)
+		<= ktime_to_ns(hrtimer->node.expires))) {
+		event = container_of(next, struct event_timer_info, node);
+		if (!event)
+			goto hrtimer_cb_exit;
+
+		WARN_ON_ONCE(event->cpu != cpu);
+
+		if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
+			pr_debug("Deleting event %p @ %lu(on cpu%d)\n", event,
+				(unsigned long)ktime_to_ns(next->expires), cpu);
+
+		timerqueue_del(&per_cpu(timer_head, cpu), &event->node);
+
+		if (event->function)
+			event->function(event->data);
+
+		next = timerqueue_getnext(&per_cpu(timer_head, cpu));
+	}
+
+	if (next) {
+		event = container_of(next, struct event_timer_info, node);
+		create_hrtimer(event);
+	}
+hrtimer_cb_exit:
+	spin_unlock_irqrestore(&event_timer_lock, flags);
+	return HRTIMER_NORESTART;
+}
+
+/**
+ * create_timer_smp(): Helper function used setting up timer on CPUs.
+ */
+static void create_timer_smp(void *data)
+{
+	unsigned long flags;
+	struct event_timer_info *event =
+		(struct event_timer_info *)data;
+	struct timerqueue_node *next;
+
+	spin_lock_irqsave(&event_timer_lock, flags);
+
+	if (is_event_active(event))
+		timerqueue_del(&per_cpu(timer_head, event->cpu), &event->node);
+
+	next = timerqueue_getnext(&per_cpu(timer_head, event->cpu));
+	timerqueue_add(&per_cpu(timer_head, event->cpu), &event->node);
+
+	if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
+		pr_debug("Adding Event %p(on cpu%d) for %lu\n", event,
+		event->cpu,
+		(unsigned long)ktime_to_ns(event->node.expires));
+
+	if (!next || (next && (ktime_to_ns(event->node.expires) <
+						ktime_to_ns(next->expires)))) {
+		if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
+			pr_debug("Setting timer for %lu(on cpu%d)\n",
+			(unsigned long)ktime_to_ns(event->node.expires),
+			event->cpu);
+
+		create_hrtimer(event);
+	}
+	spin_unlock_irqrestore(&event_timer_lock, flags);
+}
+
+/**
+ *  setup_timer() : Helper function to setup timer on primary
+ *                  core during hrtimer callback.
+ *  @event: event handle causing the wakeup.
+ */
+static void setup_event_hrtimer(struct event_timer_info *event)
+{
+	smp_call_function_single(event->cpu, create_timer_smp, event, 1);
+}
+
+static void irq_affinity_release(struct kref *ref)
+{
+	struct event_timer_info *event;
+	struct irq_affinity_notify *notify =
+			container_of(ref, struct irq_affinity_notify, kref);
+
+	event = container_of(notify, struct event_timer_info, notify);
+	pr_debug("event = %p\n", event);
+}
+
+static void irq_affinity_change_notifier(struct irq_affinity_notify *notify,
+						const cpumask_t *mask_val)
+{
+	struct event_timer_info *event;
+	unsigned long flags;
+	unsigned int irq;
+	int old_cpu = -EINVAL, new_cpu = -EINVAL;
+	bool next_event = false;
+
+	event = container_of(notify, struct event_timer_info, notify);
+	irq = notify->irq;
+
+	if (!event)
+		return;
+
+	/*
+	 * This logic is inline with irq-gic.c for finding
+	 * the next affinity CPU.
+	 */
+	new_cpu = cpumask_any_and(mask_val, cpu_online_mask);
+	if (new_cpu >= nr_cpu_ids)
+		return;
+
+	old_cpu = event->cpu;
+
+	if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
+		pr_debug("irq %d, event %p, old_cpu(%d)->new_cpu(%d).\n",
+						irq, event, old_cpu, new_cpu);
+
+	/* No change in IRQ affinity */
+	if (old_cpu == new_cpu)
+		return;
+
+	spin_lock_irqsave(&event_timer_lock, flags);
+
+	/* If the event is not active OR
+	 * If it is the next event
+	 * and the timer is already in callback
+	 * Just reset cpu and return
+	 */
+	if (!is_event_active(event) ||
+		(is_event_next(event) &&
+		(hrtimer_try_to_cancel(&per_cpu(per_cpu_hrtimer.
+				event_hrtimer, old_cpu)) < 0))) {
+		event->cpu = new_cpu;
+		spin_unlock_irqrestore(&event_timer_lock, flags);
+		if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
+			pr_debug("Event:%p is not active or in callback\n",
+					event);
+		return;
+	}
+
+	/* Update the flag based on EVENT is next are not */
+	if (is_event_next(event))
+		next_event = true;
+
+	event->cpu = new_cpu;
+
+	/*
+	 * We are here either because hrtimer was active or event is not next
+	 * Delete the event from the timer queue anyway
+	 */
+	timerqueue_del(&per_cpu(timer_head, old_cpu), &event->node);
+
+	if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
+		pr_debug("Event:%p is in the list\n", event);
+
+	spin_unlock_irqrestore(&event_timer_lock, flags);
+
+	/*
+	 * Migrating event timer to a new CPU is automatically
+	 * taken care. Since we have already modify the event->cpu
+	 * with new CPU.
+	 *
+	 * Typical cases are
+	 *
+	 * 1)
+	 *		C0			C1
+	 *		|			^
+	 *	-----------------		|
+	 *	|	|	|		|
+	 *	E1	E2	E3		|
+	 *		|(migrating)		|
+	 *		-------------------------
+	 *
+	 * 2)
+	 *		C0			C1
+	 *		|			^
+	 *	----------------		|
+	 *	|	|	|		|
+	 *	E1	E2	E3		|
+	 *	|(migrating)			|
+	 *	---------------------------------
+	 *
+	 * Here after moving the E1 to C1. Need to start
+	 * E2 on C0.
+	 */
+	spin_lock(&event_setup_lock);
+	/* Setup event timer on new cpu*/
+	setup_event_hrtimer(event);
+
+	/* Setup event on the old cpu*/
+	if (next_event) {
+		struct timerqueue_node *next;
+
+		next = timerqueue_getnext(&per_cpu(timer_head, old_cpu));
+		if (next) {
+			event = container_of(next,
+					struct event_timer_info, node);
+			setup_event_hrtimer(event);
+		}
+	}
+	spin_unlock(&event_setup_lock);
+}
+
+/**
+ * activate_event_timer() : Set the expiration time for an event in absolute
+ *                           ktime. This is a oneshot event timer, clients
+ *                           should call this again to set another expiration.
+ *  @event : event handle.
+ *  @event_time : event time in absolute ktime.
+ */
+void activate_event_timer(struct event_timer_info *event, ktime_t event_time)
+{
+	if (!event)
+		return;
+
+	if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
+		pr_debug("Adding event %p timer @ %lu(on cpu%d)\n", event,
+				(unsigned long)ktime_to_us(event_time),
+				event->cpu);
+
+	spin_lock(&event_setup_lock);
+	event->node.expires = event_time;
+	/* Start hrtimer and add event to rb tree */
+	setup_event_hrtimer(event);
+	spin_unlock(&event_setup_lock);
+}
+EXPORT_SYMBOL(activate_event_timer);
+
+/**
+ * deactivate_event_timer() : Deactivate an event timer, this removes the event from
+ *                            the time ordered queue of event timers.
+ * @event: event handle.
+ */
+void deactivate_event_timer(struct event_timer_info *event)
+{
+	unsigned long flags;
+
+	if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
+		pr_debug("Deactivate timer\n");
+
+	spin_lock_irqsave(&event_timer_lock, flags);
+	if (is_event_active(event)) {
+		if (is_event_next(event))
+			hrtimer_try_to_cancel(&per_cpu(
+				per_cpu_hrtimer.event_hrtimer, event->cpu));
+
+		timerqueue_del(&per_cpu(timer_head, event->cpu), &event->node);
+	}
+	spin_unlock_irqrestore(&event_timer_lock, flags);
+}
+
+/**
+ * destroy_event_timer() : Free the event info data structure allocated during
+ *                         add_event_timer().
+ * @event: event handle.
+ */
+void destroy_event_timer(struct event_timer_info *event)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&event_timer_lock, flags);
+	if (is_event_active(event)) {
+		if (is_event_next(event))
+			hrtimer_try_to_cancel(&per_cpu(
+				per_cpu_hrtimer.event_hrtimer, event->cpu));
+
+		timerqueue_del(&per_cpu(timer_head, event->cpu), &event->node);
+	}
+	spin_unlock_irqrestore(&event_timer_lock, flags);
+	kfree(event);
+}
+EXPORT_SYMBOL(destroy_event_timer);
+
+/**
+ * get_next_event_timer() - Get the next wakeup event. Returns
+ *                          a ktime value of the next expiring event.
+ */
+ktime_t get_next_event_time(int cpu)
+{
+	unsigned long flags;
+	struct timerqueue_node *next;
+	struct event_timer_info *event;
+	ktime_t next_event = ns_to_ktime(0);
+
+	spin_lock_irqsave(&event_timer_lock, flags);
+	next = timerqueue_getnext(&per_cpu(timer_head, cpu));
+	event = container_of(next, struct event_timer_info, node);
+	spin_unlock_irqrestore(&event_timer_lock, flags);
+
+	if (!next || event->cpu != cpu)
+		return next_event;
+
+	next_event = hrtimer_get_remaining(
+				&per_cpu(per_cpu_hrtimer.event_hrtimer, cpu));
+
+	if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
+		pr_debug("Next Event %lu(on cpu%d)\n",
+			(unsigned long)ktime_to_us(next_event), cpu);
+
+	return next_event;
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/gladiator_erp_v2.c	2019-01-22 16:16:26.647274877 +0100
@@ -0,0 +1,859 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/cpu_pm.h>
+#include <linux/platform_device.h>
+#include <soc/qcom/scm.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+
+#define MODULE_NAME "gladiator-v2_error_reporting"
+#ifdef CONFIG_MSM_GLADIATOR_ERROR_V2_MAIN_LOGGER_ONLY
+#define OBSERVER_ERROR_ENABLE	0
+#else
+#define OBSERVER_ERROR_ENABLE	1
+#endif
+
+/* Register Offsets */
+#define GLADIATOR_ID_COREID	0x0
+#define GLADIATOR_ID_REVISIONID	0x4
+#define GLADIATOR_FAULTEN	0x1010
+#define GLADIATOR_ERRVLD	0x1014
+#define GLADIATOR_ERRCLR	0x1018
+#define GLADIATOR_ERRLOG0	0x101C
+#define GLADIATOR_ERRLOG1	0x1020
+#define GLADIATOR_ERRLOG2	0x1024
+#define GLADIATOR_ERRLOG3	0x1028
+#define GLADIATOR_ERRLOG4	0x102C
+#define GLADIATOR_ERRLOG5	0x1030
+#define GLADIATOR_ERRLOG6	0x1034
+#define GLADIATOR_ERRLOG7	0x1038
+#define GLADIATOR_ERRLOG8	0x103C
+#define OBSERVER_0_ID_COREID	0x8000
+#define OBSERVER_0_ID_REVISIONID	0x8004
+#define OBSERVER_0_FAULTEN	0x8008
+#define OBSERVER_0_ERRVLD	0x800C
+#define OBSERVER_0_ERRCLR	0x8010
+#define OBSERVER_0_ERRLOG0	0x8014
+#define OBSERVER_0_ERRLOG1	0x8018
+#define OBSERVER_0_ERRLOG2	0x801C
+#define OBSERVER_0_ERRLOG3	0x8020
+#define OBSERVER_0_ERRLOG4	0x8024
+#define OBSERVER_0_ERRLOG5	0x8028
+#define OBSERVER_0_ERRLOG6	0x802C
+#define OBSERVER_0_ERRLOG7	0x8030
+#define OBSERVER_0_ERRLOG8	0x8034
+#define OBSERVER_0_STALLEN	0x8038
+
+#define GLD_TRANS_OPCODE_MASK			0xE
+#define GLD_TRANS_OPCODE_SHIFT			1
+#define GLD_ERROR_TYPE_MASK				0x700
+#define GLD_ERROR_TYPE_SHIFT			8
+#define GLD_LEN1_MASK					0xFFF0000
+#define GLD_LEN1_SHIFT					16
+#define	GLD_TRANS_SOURCEID_MASK			0x7
+#define	GLD_TRANS_SOURCEID_SHIFT		0
+#define	GLD_TRANS_TARGETID_MASK			0x7
+#define	GLD_TRANS_TARGETID_SHIFT		0
+#define	GLD_ERRLOG_ERROR				0x7
+#define GLD_ERRLOG5_ERROR_TYPE_MASK		0xFF000000
+#define GLD_ERRLOG5_ERROR_TYPE_SHIFT	24
+#define GLD_ACE_PORT_PARITY_MASK		0xc000
+#define GLD_ACE_PORT_PARITY_SHIFT		14
+#define GLD_ACE_PORT_DISCONNECT_MASK	0xf0000
+#define GLD_ACE_PORT_DISCONNECT_SHIFT	16
+#define GLD_ACE_PORT_DIRECTORY_MASK		0xf00000
+#define GLD_ACE_PORT_DIRECTORY_SHIFT	20
+#define GLD_INDEX_PARITY_MASK			0x1FFF
+#define GLD_INDEX_PARITY_SHIFT			0
+#define OBS_TRANS_OPCODE_MASK			0x1E
+#define OBS_TRANS_OPCODE_SHIFT			1
+#define OBS_ERROR_TYPE_MASK				0x700
+#define OBS_ERROR_TYPE_SHIFT			8
+#define OBS_LEN1_MASK					0x7F0000
+#define OBS_LEN1_SHIFT					16
+
+struct msm_gladiator_data {
+	void __iomem *gladiator_virt_base;
+	int erp_irq;
+	struct notifier_block pm_notifier_block;
+	struct clk *qdss_clk;
+};
+
+static int enable_panic_on_error;
+module_param(enable_panic_on_error, int, 0);
+
+enum gld_trans_opcode {
+	GLD_RD,
+	GLD_RDX,
+	GLD_RDL,
+	GLD_RESERVED,
+	GLD_WR,
+	GLD_WRC,
+	GLD_PRE,
+};
+
+enum obs_trans_opcode {
+	OBS_RD,
+	OBS_RDW,
+	OBS_RDL,
+	OBS_RDX,
+	OBS_WR,
+	OBS_WRW,
+	OBS_WRC,
+	OBS_RESERVED,
+	OBS_PRE,
+	OBS_URG,
+};
+
+enum obs_err_code {
+	OBS_SLV,
+	OBS_DEC,
+	OBS_UNS,
+	OBS_DISC,
+	OBS_SEC,
+	OBS_HIDE,
+	OBS_TMO,
+	OBS_RSV,
+};
+
+enum err_log {
+	ID_COREID,
+	ID_REVISIONID,
+	FAULTEN,
+	ERRVLD,
+	ERRCLR,
+	ERR_LOG0,
+	ERR_LOG1,
+	ERR_LOG2,
+	ERR_LOG3,
+	ERR_LOG4,
+	ERR_LOG5,
+	ERR_LOG6,
+	ERR_LOG7,
+	ERR_LOG8,
+	STALLEN,
+	MAX_NUM,
+};
+
+enum type_logger_error {
+	DATA_TRANSFER_ERROR,
+	DVM_ERROR,
+	TX_ERROR,
+	TXR_ERROR,
+	DISCONNECT_ERROR,
+	DIRECTORY_ERROR,
+	PARITY_ERROR,
+};
+
+static void clear_gladiator_error(void __iomem *gladiator_virt_base)
+{
+	writel_relaxed(1, gladiator_virt_base + GLADIATOR_ERRCLR);
+	writel_relaxed(1, gladiator_virt_base + OBSERVER_0_ERRCLR);
+}
+
+static inline void print_gld_transaction(unsigned int opc)
+{
+	switch (opc) {
+	case GLD_RD:
+		pr_alert("Transaction type: READ\n");
+		break;
+	case GLD_RDX:
+		pr_alert("Transaction type: EXCLUSIVE READ\n");
+		break;
+	case GLD_RDL:
+		pr_alert("Transaction type: LINKED READ\n");
+		break;
+	case GLD_WR:
+		pr_alert("Transaction type: WRITE\n");
+		break;
+	case GLD_WRC:
+		pr_alert("Transaction type: CONDITIONAL WRITE\n");
+		break;
+	case GLD_PRE:
+		pr_alert("Transaction: Preamble packet of linked sequence\n");
+		break;
+	default:
+		pr_alert("Transaction type: Unknown; value:%u\n", opc);
+	}
+}
+
+static inline void print_gld_errtype(unsigned int errtype)
+{
+	if (errtype == 0)
+		pr_alert("Error type: Snoop data transfer\n");
+	else if (errtype == 1)
+		pr_alert("Error type: DVM error\n");
+	else if (errtype == 3)
+		pr_alert("Error type: Disconnect, directory, or parity error\n");
+	else
+		pr_alert("Error type: Unknown; value:%u\n", errtype);
+}
+
+static void decode_gld_errlog0(u32 err_reg)
+{
+	unsigned int opc, errtype, len1;
+
+	opc = (err_reg & GLD_TRANS_OPCODE_MASK) >> GLD_TRANS_OPCODE_SHIFT;
+	errtype = (err_reg & GLD_ERROR_TYPE_MASK) >> GLD_ERROR_TYPE_SHIFT;
+	len1 = (err_reg & GLD_LEN1_MASK) >> GLD_LEN1_SHIFT;
+
+	print_gld_transaction(opc);
+	print_gld_errtype(errtype);
+	pr_alert("number of payload bytes: %d\n", len1 + 1);
+}
+
+static void decode_gld_errlog1(u32 err_reg)
+{
+	if ((err_reg & GLD_ERRLOG_ERROR) == GLD_ERRLOG_ERROR)
+		pr_alert("Transaction issued on IO target generic interface\n");
+	else
+		pr_alert("Transaction source ID: %d\n",
+				(err_reg & GLD_TRANS_SOURCEID_MASK)
+				>> GLD_TRANS_SOURCEID_SHIFT);
+}
+
+static void decode_gld_errlog2(u32 err_reg)
+{
+	if ((err_reg & GLD_ERRLOG_ERROR) == GLD_ERRLOG_ERROR)
+		pr_alert("Error response coming from: external DVM network\n");
+	else
+		pr_alert("Error response coming from: Target ID: %d\n",
+				(err_reg & GLD_TRANS_TARGETID_MASK)
+				>> GLD_TRANS_TARGETID_SHIFT);
+}
+
+static void decode_ace_port_index(u32 type, u32 error)
+{
+	unsigned port;
+
+	switch (type) {
+	case DISCONNECT_ERROR:
+		port = (error & GLD_ACE_PORT_DISCONNECT_MASK)
+			>> GLD_ACE_PORT_DISCONNECT_SHIFT;
+		pr_alert("ACE port index: %d\n", port);
+		break;
+	case DIRECTORY_ERROR:
+		port = (error & GLD_ACE_PORT_DIRECTORY_MASK)
+			>> GLD_ACE_PORT_DIRECTORY_SHIFT;
+		pr_alert("ACE port index: %d\n", port);
+		break;
+	case PARITY_ERROR:
+		port = (error & GLD_ACE_PORT_PARITY_MASK)
+			>> GLD_ACE_PORT_PARITY_SHIFT;
+		pr_alert("ACE port index: %d\n", port);
+	}
+}
+
+static void decode_index_parity(u32 error)
+{
+	pr_alert("Index: %d\n",
+			(error & GLD_INDEX_PARITY_MASK)
+			>> GLD_INDEX_PARITY_SHIFT);
+}
+
+static void decode_gld_logged_error(u32 err_reg5)
+{
+	unsigned int log_err_type, i, value;
+
+	log_err_type = (err_reg5 & GLD_ERRLOG5_ERROR_TYPE_MASK)
+		>> GLD_ERRLOG5_ERROR_TYPE_SHIFT;
+	for (i = 0 ; i <= 6 ; i++) {
+		value = log_err_type & 0x1;
+		switch (i) {
+		case DATA_TRANSFER_ERROR:
+			if (value == 0)
+				continue;
+			pr_alert("Error type: Data transfer error\n");
+			break;
+		case DVM_ERROR:
+			if (value == 0)
+				continue;
+			pr_alert("Error type: DVM error\n");
+			break;
+		case TX_ERROR:
+			if (value == 0)
+				continue;
+			pr_alert("Error type: Tx error\n");
+			break;
+		case TXR_ERROR:
+			if (value == 0)
+				continue;
+			pr_alert("Error type: TxR error\n");
+			break;
+		case DISCONNECT_ERROR:
+			if (value == 0)
+				continue;
+			pr_alert("Error type: Disconnect error\n");
+			decode_ace_port_index(
+					DISCONNECT_ERROR,
+					err_reg5);
+			break;
+		case DIRECTORY_ERROR:
+			if (value == 0)
+				continue;
+			pr_alert("Error type: Directory error\n");
+			decode_ace_port_index(
+					DIRECTORY_ERROR,
+					err_reg5);
+			break;
+		case PARITY_ERROR:
+			if (value == 0)
+				continue;
+			pr_alert("Error type: Parity error\n");
+			decode_ace_port_index(PARITY_ERROR, err_reg5);
+			decode_index_parity(err_reg5);
+			break;
+		}
+		log_err_type = log_err_type >> 1;
+	}
+}
+
+static void decode_gld_errlog(u32 err_reg, unsigned int err_log)
+{
+	switch (err_log) {
+	case ERR_LOG0:
+		decode_gld_errlog0(err_reg);
+		break;
+	case ERR_LOG1:
+		decode_gld_errlog1(err_reg);
+		break;
+	case ERR_LOG2:
+		decode_gld_errlog2(err_reg);
+		break;
+	case ERR_LOG3:
+		pr_alert("Lower 32-bits of error address: %08x\n", err_reg);
+		break;
+	case ERR_LOG4:
+		pr_alert("Upper 32-bits of error address: %08x\n", err_reg);
+		break;
+	case ERR_LOG5:
+		pr_alert("Lower 32-bits of user: %08x\n", err_reg);
+		break;
+	case ERR_LOG6:
+		pr_alert("Mid 32-bits(63-32) of user: %08x\n", err_reg);
+		break;
+	case ERR_LOG7:
+		break;
+	case ERR_LOG8:
+		pr_alert("Upper 32-bits(95-64) of user: %08x\n", err_reg);
+		break;
+	default:
+		pr_alert("Invalid error register; reg num:%u\n", err_log);
+	}
+}
+
+static inline void print_obs_transaction(unsigned int opc)
+{
+	switch (opc) {
+	case OBS_RD:
+		pr_alert("Transaction type: READ\n");
+		break;
+	case OBS_RDW:
+		pr_alert("Transaction type: WRAPPED READ\n");
+		break;
+	case OBS_RDL:
+		pr_alert("Transaction type: LINKED READ\n");
+		break;
+	case OBS_RDX:
+		pr_alert("Transaction type: EXCLUSIVE READ\n");
+		break;
+	case OBS_WR:
+		pr_alert("Transaction type: WRITE\n");
+		break;
+	case OBS_WRW:
+		pr_alert("Transaction type: WRAPPED WRITE\n");
+		break;
+	case OBS_WRC:
+		pr_alert("Transaction type: CONDITIONAL WRITE\n");
+		break;
+	case OBS_PRE:
+		pr_alert("Transaction: Preamble packet of linked sequence\n");
+		break;
+	case OBS_URG:
+		pr_alert("Transaction type: Urgency Packet\n");
+		break;
+	default:
+		pr_alert("Transaction type: Unknown; value:%u\n", opc);
+	}
+}
+
+static inline void print_obs_errcode(unsigned int errcode)
+{
+	switch (errcode) {
+	case OBS_SLV:
+		pr_alert("Error code: Target error detected by slave\n");
+		pr_alert("Source: Target\n");
+		break;
+	case OBS_DEC:
+		pr_alert("Error code: Address decode error\n");
+		pr_alert("Source: Initiator NIU\n");
+		break;
+	case OBS_UNS:
+		pr_alert("Error code: Unsupported request\n");
+		pr_alert("Source: Target NIU\n");
+		break;
+	case OBS_DISC:
+		pr_alert("Error code: Disconnected target or domain\n");
+		pr_alert("Source: Power Disconnect\n");
+		break;
+	case OBS_SEC:
+		pr_alert("Error code: Security violation\n");
+		pr_alert("Source: Initiator NIU or Firewall\n");
+		break;
+	case OBS_HIDE:
+		pr_alert("Error :Hidden security violation, reported as OK\n");
+		pr_alert("Source: Firewall\n");
+		break;
+	case OBS_TMO:
+		pr_alert("Error code: Time-out\n");
+		pr_alert("Source: Target NIU\n");
+		break;
+	default:
+		pr_alert("Error code: Unknown; code:%u\n", errcode);
+	}
+}
+
+static void decode_obs_errlog0(u32 err_reg)
+{
+	unsigned int opc, errcode, len1;
+
+	opc = (err_reg & OBS_TRANS_OPCODE_MASK) >> OBS_TRANS_OPCODE_SHIFT;
+	errcode = (err_reg & OBS_ERROR_TYPE_MASK) >> OBS_ERROR_TYPE_SHIFT;
+	len1 = (err_reg & OBS_LEN1_MASK) >> OBS_LEN1_SHIFT;
+
+	print_obs_transaction(opc);
+	print_obs_errcode(errcode);
+	pr_alert("number of payload bytes: %d\n", len1 + 1);
+}
+
+static void decode_obs_errlog(u32 err_reg, unsigned int err_log)
+{
+	switch (err_log) {
+	case ERR_LOG0:
+		decode_obs_errlog0(err_reg);
+		break;
+	case ERR_LOG1:
+		pr_alert("RouteId of the error: %08x\n", err_reg);
+		break;
+	case ERR_LOG2:
+		/* reserved error log register */
+		break;
+	case ERR_LOG3:
+		pr_alert("Lower 32-bits of error address: %08x\n", err_reg);
+		break;
+	case ERR_LOG4:
+		pr_alert("Upper 12-bits of error address: %08x\n", err_reg);
+		break;
+	case ERR_LOG5:
+		pr_alert("Lower 13-bits of user: %08x\n", err_reg);
+		break;
+	case ERR_LOG6:
+		/* reserved error log register */
+		break;
+	case ERR_LOG7:
+		pr_alert("Security filed of the logged error: %08x\n", err_reg);
+		break;
+	case ERR_LOG8:
+		/* reserved error log register */
+		break;
+	case STALLEN:
+		pr_alert("stall mode of the error logger: %08x\n",
+				err_reg & 0x1);
+		break;
+	default:
+		pr_alert("Invalid error register; reg num:%u\n", err_log);
+	}
+}
+
+static u32 get_gld_offset(unsigned int err_log)
+{
+	u32 offset = 0;
+
+	switch (err_log) {
+	case FAULTEN:
+		offset = GLADIATOR_FAULTEN;
+		break;
+	case ERRVLD:
+		offset = GLADIATOR_ERRVLD;
+		break;
+	case ERRCLR:
+		offset = GLADIATOR_ERRCLR;
+		break;
+	case ERR_LOG0:
+		offset = GLADIATOR_ERRLOG0;
+		break;
+	case ERR_LOG1:
+		offset = GLADIATOR_ERRLOG1;
+		break;
+	case ERR_LOG2:
+		offset = GLADIATOR_ERRLOG2;
+		break;
+	case ERR_LOG3:
+		offset = GLADIATOR_ERRLOG3;
+		break;
+	case ERR_LOG4:
+		offset = GLADIATOR_ERRLOG4;
+		break;
+	case ERR_LOG5:
+		offset = GLADIATOR_ERRLOG5;
+		break;
+	case ERR_LOG6:
+		offset = GLADIATOR_ERRLOG6;
+		break;
+	case ERR_LOG7:
+		offset = GLADIATOR_ERRLOG7;
+		break;
+	case ERR_LOG8:
+		offset = GLADIATOR_ERRLOG8;
+		break;
+	default:
+		pr_alert("Invalid gladiator error register; reg num:%u\n",
+				err_log);
+	}
+	return offset;
+}
+
+static u32 get_obs_offset(unsigned int err_log)
+{
+	u32 offset = 0;
+
+	switch (err_log) {
+	case ID_COREID:
+		offset = OBSERVER_0_ID_COREID;
+		break;
+	case ID_REVISIONID:
+		offset = OBSERVER_0_ID_REVISIONID;
+		break;
+	case FAULTEN:
+		offset = OBSERVER_0_FAULTEN;
+		break;
+	case ERRVLD:
+		offset = OBSERVER_0_ERRVLD;
+		break;
+	case ERRCLR:
+		offset = OBSERVER_0_ERRCLR;
+		break;
+	case ERR_LOG0:
+		offset = OBSERVER_0_ERRLOG0;
+		break;
+	case ERR_LOG1:
+		offset = OBSERVER_0_ERRLOG1;
+		break;
+	case ERR_LOG2:
+		offset = OBSERVER_0_ERRLOG2;
+		break;
+	case ERR_LOG3:
+		offset = OBSERVER_0_ERRLOG3;
+		break;
+	case ERR_LOG4:
+		offset = OBSERVER_0_ERRLOG4;
+		break;
+	case ERR_LOG5:
+		offset = OBSERVER_0_ERRLOG5;
+		break;
+	case ERR_LOG6:
+		offset = OBSERVER_0_ERRLOG6;
+		break;
+	case ERR_LOG7:
+		offset = OBSERVER_0_ERRLOG7;
+		break;
+	case ERR_LOG8:
+		offset = OBSERVER_0_ERRLOG8;
+		break;
+	case STALLEN:
+		offset = OBSERVER_0_STALLEN;
+		break;
+	default:
+		pr_alert("Invalid observer error register; reg num:%u\n",
+				err_log);
+	}
+	return offset;
+}
+
+static void decode_gld_errlog5(struct msm_gladiator_data *msm_gld_data)
+{
+	unsigned int errtype;
+	u32 err_reg0, err_reg5;
+
+	err_reg0 = readl_relaxed(msm_gld_data->gladiator_virt_base +
+			get_gld_offset(ERR_LOG0));
+	err_reg5 = readl_relaxed(msm_gld_data->gladiator_virt_base +
+			get_gld_offset(ERR_LOG5));
+
+	errtype = (err_reg0 & GLD_ERROR_TYPE_MASK) >> GLD_ERROR_TYPE_SHIFT;
+	if (errtype == 3)
+		decode_gld_logged_error(err_reg5);
+	else if (errtype == 0 || errtype == 1)
+		pr_alert("Lower 32-bits of user: %08x\n", err_reg5);
+	else
+		pr_alert("Error type: Unknown; value:%u\n", errtype);
+}
+
+static irqreturn_t msm_gladiator_isr(int irq, void *dev_id)
+{
+	u32 err_reg;
+	unsigned int err_log, err_buf[MAX_NUM];
+
+	struct msm_gladiator_data *msm_gld_data = dev_id;
+
+	/* Check validity */
+	bool gld_err_valid = readl_relaxed(msm_gld_data->gladiator_virt_base +
+			GLADIATOR_ERRVLD);
+
+	bool obsrv_err_valid = readl_relaxed(
+			msm_gld_data->gladiator_virt_base + OBSERVER_0_ERRVLD);
+
+	if (!gld_err_valid && !obsrv_err_valid) {
+		pr_err("%s Invalid Gladiator error reported, clear it\n",
+				__func__);
+		/* Clear IRQ */
+		clear_gladiator_error(msm_gld_data->gladiator_virt_base);
+		return IRQ_HANDLED;
+	}
+	pr_alert("Gladiator Error Detected:\n");
+	if (gld_err_valid) {
+		for (err_log = FAULTEN; err_log <= ERR_LOG8; err_log++) {
+			err_buf[err_log] = readl_relaxed(
+					msm_gld_data->gladiator_virt_base +
+					get_gld_offset(err_log));
+		}
+		pr_alert("Main log register data:\n%08x %08x %08x %08x\n"
+				"%08x %08x %08x %08x\n%08x %08x %08x %08x\n",
+			err_buf[2], err_buf[3], err_buf[4], err_buf[5],
+			err_buf[6], err_buf[7], err_buf[8], err_buf[9],
+			err_buf[10], err_buf[11], err_buf[12], err_buf[13]);
+	}
+
+	if (obsrv_err_valid) {
+		for (err_log = ID_COREID; err_log <= STALLEN; err_log++) {
+			err_buf[err_log] = readl_relaxed(
+					msm_gld_data->gladiator_virt_base +
+					get_obs_offset(err_log));
+		}
+		pr_alert("Observer log register data:\n%08x %08x %08x %08x\n%08x %08x %08x %08x\n%08x %08x %08x %08x\n%08x\n",
+			err_buf[0], err_buf[1], err_buf[2], err_buf[3], err_buf[4], err_buf[5], err_buf[6], err_buf[7],
+			err_buf[8], err_buf[9], err_buf[10], err_buf[11], err_buf[12]);
+	}
+
+	if (gld_err_valid) {
+		pr_alert("Main error log register data:\n");
+		for (err_log = ERR_LOG0; err_log <= ERR_LOG8; err_log++) {
+			/* skip log register 7 as its reserved */
+			if (err_log == ERR_LOG7)
+				continue;
+			if (err_log == ERR_LOG5) {
+				decode_gld_errlog5(msm_gld_data);
+				continue;
+			}
+			err_reg = readl_relaxed(
+					msm_gld_data->gladiator_virt_base +
+					get_gld_offset(err_log));
+			decode_gld_errlog(err_reg, err_log);
+		}
+	}
+	if (obsrv_err_valid) {
+		pr_alert("Observor error log register data:\n");
+		for (err_log = ERR_LOG0; err_log <= STALLEN; err_log++)	{
+			/* skip log register 2, 6 and 8 as they are reserved */
+			if ((err_log == ERR_LOG2) || (err_log == ERR_LOG6)
+					|| (err_log == ERR_LOG8))
+				continue;
+			err_reg = readl_relaxed(
+					msm_gld_data->gladiator_virt_base +
+					get_obs_offset(err_log));
+			decode_obs_errlog(err_reg, err_log);
+		}
+	}
+	/* Clear IRQ */
+	clear_gladiator_error(msm_gld_data->gladiator_virt_base);
+	if (enable_panic_on_error)
+		panic("Gladiator Cache Interconnect Error Detected!\n");
+	else
+		WARN(1, "Gladiator Cache Interconnect Error Detected\n");
+
+	return IRQ_HANDLED;
+}
+
+static const struct of_device_id gladiator_erp_v2_match_table[] = {
+	{ .compatible = "qcom,msm-gladiator-v2" },
+	{},
+};
+
+static int parse_dt_node(struct platform_device *pdev,
+		struct msm_gladiator_data *msm_gld_data)
+{
+	int ret = 0;
+	struct resource *res;
+
+	res = platform_get_resource_byname(pdev,
+			IORESOURCE_MEM, "gladiator_base");
+	if (!res)
+		return -ENODEV;
+	if (!devm_request_mem_region(&pdev->dev, res->start,
+				resource_size(res),
+				"msm-gladiator-erp")) {
+
+		dev_err(&pdev->dev, "%s cannot reserve gladiator erp region\n",
+				__func__);
+		return -ENXIO;
+	}
+	msm_gld_data->gladiator_virt_base  = devm_ioremap(&pdev->dev,
+			res->start, resource_size(res));
+	if (!msm_gld_data->gladiator_virt_base) {
+		dev_err(&pdev->dev, "%s cannot map gladiator register space\n",
+				__func__);
+		return -ENXIO;
+	}
+	msm_gld_data->erp_irq = platform_get_irq(pdev, 0);
+	if (!msm_gld_data->erp_irq)
+		return -ENODEV;
+
+	/* clear existing errors before enabling the interrupt */
+	clear_gladiator_error(msm_gld_data->gladiator_virt_base);
+	ret = devm_request_irq(&pdev->dev, msm_gld_data->erp_irq,
+			msm_gladiator_isr, IRQF_TRIGGER_HIGH,
+			"gladiator-error", msm_gld_data);
+	if (ret)
+		dev_err(&pdev->dev, "Failed to register irq handler\n");
+
+	return ret;
+}
+
+static inline void gladiator_irq_init(void __iomem *gladiator_virt_base)
+{
+	writel_relaxed(1, gladiator_virt_base + GLADIATOR_FAULTEN);
+	writel_relaxed(OBSERVER_ERROR_ENABLE,
+			gladiator_virt_base + OBSERVER_0_FAULTEN);
+}
+
+#define CCI_LEVEL 2
+static int gladiator_erp_pm_callback(struct notifier_block *nb,
+		unsigned long val, void *data)
+{
+	unsigned int level = (unsigned long) data;
+	struct msm_gladiator_data *msm_gld_data = container_of(nb,
+			struct msm_gladiator_data, pm_notifier_block);
+
+	if (level != CCI_LEVEL)
+		return NOTIFY_DONE;
+
+	switch (val) {
+	case CPU_CLUSTER_PM_EXIT:
+		gladiator_irq_init(msm_gld_data->gladiator_virt_base);
+		break;
+	default:
+		return NOTIFY_DONE;
+	}
+
+	return NOTIFY_OK;
+}
+
+static int gladiator_erp_v2_probe(struct platform_device *pdev)
+{
+	int ret = -1;
+	struct msm_gladiator_data *msm_gld_data;
+
+	msm_gld_data = devm_kzalloc(&pdev->dev,
+			sizeof(struct msm_gladiator_data), GFP_KERNEL);
+	if (!msm_gld_data) {
+		ret = -ENOMEM;
+		goto bail;
+	}
+
+	if (of_property_match_string(pdev->dev.of_node,
+				"clock-names", "atb_clk") >= 0) {
+		msm_gld_data->qdss_clk = devm_clk_get(&pdev->dev, "atb_clk");
+		if (IS_ERR(msm_gld_data->qdss_clk)) {
+			dev_err(&pdev->dev, "Failed to get QDSS ATB clock\n");
+			goto bail;
+		}
+	} else {
+		dev_err(&pdev->dev, "No matching string of QDSS ATB clock\n");
+		goto bail;
+	}
+
+	ret = clk_prepare_enable(msm_gld_data->qdss_clk);
+	if (ret)
+		goto err_atb_clk;
+
+	ret = parse_dt_node(pdev, msm_gld_data);
+	if (ret)
+		goto bail;
+	msm_gld_data->pm_notifier_block.notifier_call =
+		gladiator_erp_pm_callback;
+
+	gladiator_irq_init(msm_gld_data->gladiator_virt_base);
+	platform_set_drvdata(pdev, msm_gld_data);
+	cpu_pm_register_notifier(&msm_gld_data->pm_notifier_block);
+#ifdef CONFIG_PANIC_ON_GLADIATOR_ERROR_V2
+	enable_panic_on_error = 1;
+#endif
+	dev_info(&pdev->dev, "MSM Gladiator Error Reporting V2 Initialized\n");
+	return ret;
+
+err_atb_clk:
+	clk_disable_unprepare(msm_gld_data->qdss_clk);
+
+bail:
+	dev_err(&pdev->dev, "Probe failed bailing out\n");
+	return ret;
+}
+
+static int gladiator_erp_v2_remove(struct platform_device *pdev)
+{
+	struct msm_gladiator_data *msm_gld_data = platform_get_drvdata(pdev);
+
+	platform_set_drvdata(pdev, NULL);
+	cpu_pm_unregister_notifier(&msm_gld_data->pm_notifier_block);
+	clk_disable_unprepare(msm_gld_data->qdss_clk);
+	return 0;
+}
+
+static struct platform_driver gladiator_erp_driver = {
+	.probe = gladiator_erp_v2_probe,
+	.remove = gladiator_erp_v2_remove,
+	.driver = {
+		.name = MODULE_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = gladiator_erp_v2_match_table,
+	},
+};
+
+static int __init init_gladiator_erp_v2(void)
+{
+	int ret;
+
+	ret = scm_is_secure_device();
+	if (ret == 0) {
+		pr_info("Gladiator Error Reporting not available\n");
+		return -ENODEV;
+	}
+
+	return platform_driver_register(&gladiator_erp_driver);
+}
+module_init(init_gladiator_erp_v2);
+
+static void __exit exit_gladiator_erp_v2(void)
+{
+	return platform_driver_unregister(&gladiator_erp_driver);
+}
+module_exit(exit_gladiator_erp_v2);
+
+MODULE_DESCRIPTION("Gladiator Error Reporting V2");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/gladiator_hang_detect.c	2019-01-22 16:16:26.647274877 +0100
@@ -0,0 +1,561 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/cpu.h>
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <linux/stat.h>
+#include <soc/qcom/scm.h>
+#include <linux/platform_device.h>
+
+#define ACE_OFFSET	0
+#define IO_OFFSET	2
+#define M1_OFFSET	3
+#define M2_OFFSET	4
+#define PCIO_OFFSET	5
+#define ENABLE_MASK_BITS	0x1
+
+#define _VAL(z)			(ENABLE_MASK_BITS << z##_OFFSET)
+#define _VALUE(_val, z)		(_val<<(z##_OFFSET))
+#define _WRITE(x, y, z)		(((~(_VAL(z))) & y) | _VALUE(x, z))
+
+#define NR_GLA_REG 6
+#define MODULE_NAME	"gladiator_hang_detect"
+#define MAX_THRES	0xFFFFFFFF
+#define MAX_LEN_SYSFS 12
+
+struct hang_detect {
+	phys_addr_t threshold[NR_GLA_REG];
+	phys_addr_t config;
+	int ACE_enable, IO_enable, M1_enable, M2_enable, PCIO_enable;
+	uint32_t ACE_threshold, IO_threshold, M1_threshold, M2_threshold,
+			 PCIO_threshold;
+	struct kobject kobj;
+	struct mutex lock;
+};
+
+/* interface for exporting attributes */
+struct gladiator_hang_attr {
+	struct attribute        attr;
+	ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
+			char *buf);
+	size_t (*store)(struct kobject *kobj, struct attribute *attr,
+			const char *buf, size_t count);
+};
+
+#define GLADIATOR_HANG_ATTR(_name, _mode, _show, _store)	\
+	struct gladiator_hang_attr hang_attr_##_name =	\
+			__ATTR(_name, _mode, _show, _store)
+
+#define to_gladiator_hang_dev(kobj) \
+	container_of(kobj, struct hang_detect, kobj)
+
+#define to_gladiator_attr(_attr) \
+	container_of(_attr, struct gladiator_hang_attr, attr)
+
+static void set_threshold(int offset, struct hang_detect *hang_dev,
+		int32_t threshold_val)
+{
+	switch (offset) {
+	case ACE_OFFSET:
+		hang_dev->ACE_threshold = threshold_val;
+		break;
+	case IO_OFFSET:
+		hang_dev->IO_threshold = threshold_val;
+		break;
+	case M1_OFFSET:
+		hang_dev->M1_threshold = threshold_val;
+		break;
+	case M2_OFFSET:
+		hang_dev->M2_threshold = threshold_val;
+		break;
+	case PCIO_OFFSET:
+		hang_dev->PCIO_threshold = threshold_val;
+		break;
+	}
+}
+
+static void get_threshold(int offset, struct hang_detect *hang_dev,
+		uint32_t *reg_value)
+{
+	switch (offset) {
+	case ACE_OFFSET:
+		*reg_value = hang_dev->ACE_threshold;
+	break;
+	case IO_OFFSET:
+		*reg_value = hang_dev->IO_threshold;
+		break;
+	case M1_OFFSET:
+		*reg_value = hang_dev->M1_threshold;
+		break;
+	case M2_OFFSET:
+		*reg_value = hang_dev->M2_threshold;
+		break;
+	case PCIO_OFFSET:
+		*reg_value = hang_dev->PCIO_threshold;
+		break;
+	}
+}
+
+static void set_enable(int offset, struct hang_detect *hang_dev,
+		int enabled)
+{
+	switch (offset) {
+	case ACE_OFFSET:
+		hang_dev->ACE_enable = enabled;
+		break;
+	case IO_OFFSET:
+		hang_dev->IO_enable = enabled;
+		break;
+	case M1_OFFSET:
+		hang_dev->M1_enable = enabled;
+		break;
+	case M2_OFFSET:
+		hang_dev->M2_enable = enabled;
+		break;
+	case PCIO_OFFSET:
+		hang_dev->PCIO_enable = enabled;
+		break;
+	}
+}
+
+static void get_enable(int offset, struct hang_detect *hang_dev,
+		uint32_t *reg_value)
+{
+	switch (offset) {
+	case ACE_OFFSET:
+		*reg_value = hang_dev->ACE_enable;
+		break;
+	case IO_OFFSET:
+		*reg_value = hang_dev->IO_enable;
+		break;
+	case M1_OFFSET:
+		*reg_value = hang_dev->M1_enable;
+		break;
+	case M2_OFFSET:
+		*reg_value = hang_dev->M2_enable;
+		break;
+	case PCIO_OFFSET:
+		*reg_value = hang_dev->PCIO_enable;
+		break;
+	}
+}
+
+static void scm_enable_write(int offset, struct hang_detect *hang_dev,
+		int enabled, uint32_t reg_value, int *ret)
+{
+	switch (offset) {
+	case ACE_OFFSET:
+		*ret = scm_io_write(hang_dev->config,
+			_WRITE(enabled, reg_value, ACE));
+		break;
+	case IO_OFFSET:
+		*ret = scm_io_write(hang_dev->config,
+				_WRITE(enabled, reg_value, IO));
+		break;
+	case M1_OFFSET:
+		*ret = scm_io_write(hang_dev->config,
+				_WRITE(enabled, reg_value, M1));
+		break;
+	case M2_OFFSET:
+		*ret = scm_io_write(hang_dev->config,
+				_WRITE(enabled, reg_value, M2));
+		break;
+	case PCIO_OFFSET:
+		*ret = scm_io_write(hang_dev->config,
+				_WRITE(enabled, reg_value, PCIO));
+		break;
+	}
+}
+
+static int enable_check(const char *buf , int *enabled_pt)
+{
+	int ret;
+
+	ret = kstrtouint(buf, 0, enabled_pt);
+	if (ret < 0)
+		return ret;
+	if (!(*enabled_pt == 0 || *enabled_pt == 1))
+		return -EINVAL;
+	return ret;
+}
+
+
+static inline ssize_t generic_enable_show(struct kobject *kobj,
+		struct attribute *attr, char *buf, int offset)
+{
+	struct hang_detect *hang_dev = to_gladiator_hang_dev(kobj);
+	uint32_t reg_value;
+
+	get_enable(offset, hang_dev, &reg_value);
+	return snprintf(buf, MAX_LEN_SYSFS, "%u\n", reg_value);
+}
+
+static inline ssize_t generic_threshold_show(struct kobject *kobj,
+		struct attribute *attr, char *buf, int offset)
+{
+	struct hang_detect *hang_dev = to_gladiator_hang_dev(kobj);
+	uint32_t reg_value;
+
+	get_threshold(offset, hang_dev, &reg_value);
+	return snprintf(buf, MAX_LEN_SYSFS, "0x%x\n", reg_value);
+}
+
+static inline size_t generic_threshold_store(struct kobject *kobj,
+		struct attribute *attr, const char *buf, size_t count,
+		int offset)
+{
+	struct hang_detect *hang_dev = to_gladiator_hang_dev(kobj);
+	uint32_t threshold_val;
+	int ret;
+
+	ret = kstrtouint(buf, 0, &threshold_val);
+	if (ret < 0)
+		return ret;
+	if (threshold_val <= 0 || threshold_val > MAX_THRES)
+		return -EINVAL;
+	if (scm_io_write(hang_dev->threshold[offset],
+				threshold_val)){
+		pr_err("%s: Failed to set threshold for gladiator port",
+				__func__);
+		return -EIO;
+	}
+	set_threshold(offset, hang_dev, threshold_val);
+	return count;
+}
+
+static inline size_t generic_enable_store(struct kobject *kobj,
+		struct attribute *attr, const char *buf, size_t count,
+		int offset)
+{
+	int  ret, enabled;
+	uint32_t reg_value;
+	struct hang_detect *hang_dev = to_gladiator_hang_dev(kobj);
+
+	ret = enable_check(buf, &enabled);
+	if (ret < 0)
+		return ret;
+	get_threshold(offset, hang_dev, &reg_value);
+	if (reg_value <= 0)
+		return -EPERM;
+	mutex_lock(&hang_dev->lock);
+	reg_value = scm_io_read(hang_dev->config);
+
+	scm_enable_write(offset, hang_dev, enabled, reg_value, &ret);
+
+	if (ret) {
+		pr_err("%s: Gladiator failed to set enable for port %s\n",
+				__func__, "#_name");
+		mutex_unlock(&hang_dev->lock);
+		return -EIO;
+	}
+	mutex_unlock(&hang_dev->lock);
+	set_enable(offset, hang_dev, enabled);
+	return count;
+}
+
+
+static ssize_t attr_show(struct kobject *kobj, struct attribute *attr,
+				char *buf)
+{
+	struct gladiator_hang_attr *gladiator_attr = to_gladiator_attr(attr);
+	ssize_t ret = -EIO;
+
+	if (gladiator_attr->show)
+		ret = gladiator_attr->show(kobj, attr, buf);
+
+	return ret;
+}
+
+static ssize_t attr_store(struct kobject *kobj, struct attribute *attr,
+				const char *buf, size_t count)
+{
+	struct gladiator_hang_attr *gladiator_attr = to_gladiator_attr(attr);
+	ssize_t ret = -EIO;
+
+	if (gladiator_attr->store)
+		ret = gladiator_attr->store(kobj, attr, buf, count);
+
+	return ret;
+}
+
+static const struct sysfs_ops gladiator_sysfs_ops = {
+	.show	= attr_show,
+	.store	= attr_store,
+};
+
+static struct kobj_type gladiator_ktype = {
+	.sysfs_ops	= &gladiator_sysfs_ops,
+};
+
+static ssize_t show_ace_threshold(struct kobject *kobj, struct attribute *attr,
+				char *buf)
+{
+	return generic_threshold_show(kobj, attr, buf, ACE_OFFSET);
+}
+
+static size_t store_ace_threshold(struct kobject *kobj, struct attribute *attr,
+				const char *buf, size_t count)
+{
+	return generic_threshold_store(kobj, attr, buf, count, ACE_OFFSET);
+}
+GLADIATOR_HANG_ATTR(ace_threshold, S_IRUGO|S_IWUSR, show_ace_threshold,
+					store_ace_threshold);
+
+static ssize_t show_io_threshold(struct kobject *kobj, struct attribute *attr,
+				char *buf)
+{
+	return generic_threshold_show(kobj, attr, buf, IO_OFFSET);
+}
+
+static size_t store_io_threshold(struct kobject *kobj, struct attribute *attr,
+				const char *buf, size_t count)
+{
+	return generic_threshold_store(kobj, attr, buf, count, IO_OFFSET);
+}
+GLADIATOR_HANG_ATTR(io_threshold, S_IRUGO|S_IWUSR, show_io_threshold,
+					store_io_threshold);
+
+static ssize_t show_m1_threshold(struct kobject *kobj, struct attribute *attr,
+				char *buf)
+{
+	return generic_threshold_show(kobj, attr, buf, M1_OFFSET);
+}
+
+static size_t store_m1_threshold(struct kobject *kobj, struct attribute *attr,
+				const char *buf, size_t count)
+{
+	return generic_threshold_store(kobj, attr, buf, count, M1_OFFSET);
+}
+GLADIATOR_HANG_ATTR(m1_threshold, S_IRUGO|S_IWUSR, show_m1_threshold,
+					store_m1_threshold);
+
+static ssize_t show_m2_threshold(struct kobject *kobj, struct attribute *attr,
+				char *buf)
+{
+	return generic_threshold_show(kobj, attr, buf, M2_OFFSET);
+}
+
+static size_t store_m2_threshold(struct kobject *kobj, struct attribute *attr,
+				const char *buf, size_t count)
+{
+	return generic_threshold_store(kobj, attr, buf, count, M2_OFFSET);
+}
+GLADIATOR_HANG_ATTR(m2_threshold, S_IRUGO|S_IWUSR, show_m2_threshold,
+					store_m2_threshold);
+
+static ssize_t show_pcio_threshold(struct kobject *kobj, struct attribute *attr,
+				char *buf)
+{
+	return generic_threshold_show(kobj, attr, buf, PCIO_OFFSET);
+}
+
+static size_t store_pcio_threshold(struct kobject *kobj, struct attribute *attr,
+				const char *buf, size_t count)
+{
+	return generic_threshold_store(kobj, attr, buf, count, PCIO_OFFSET);
+}
+GLADIATOR_HANG_ATTR(pcio_threshold, S_IRUGO|S_IWUSR, show_pcio_threshold,
+					store_pcio_threshold);
+
+static ssize_t show_ace_enable(struct kobject *kobj,
+			struct attribute *attr, char *buf)
+{
+	return generic_enable_show(kobj, attr, buf, ACE_OFFSET);
+}
+
+static size_t store_ace_enable(struct kobject *kobj,
+			struct attribute *attr, const char *buf, size_t count)
+{
+	return generic_enable_store(kobj, attr, buf, count, ACE_OFFSET);
+}
+GLADIATOR_HANG_ATTR(ace_enable, S_IRUGO|S_IWUSR, show_ace_enable,
+		store_ace_enable);
+
+static ssize_t show_io_enable(struct kobject *kobj,
+			struct attribute *attr, char *buf)
+{
+	return generic_enable_show(kobj, attr, buf, IO_OFFSET);
+}
+
+static size_t store_io_enable(struct kobject *kobj,
+			struct attribute *attr, const char *buf, size_t count)
+{
+	return generic_enable_store(kobj, attr, buf, count, IO_OFFSET);
+}
+GLADIATOR_HANG_ATTR(io_enable, S_IRUGO|S_IWUSR,
+		show_io_enable, store_io_enable);
+
+
+static ssize_t show_m1_enable(struct kobject *kobj,
+			struct attribute *attr, char *buf)
+{
+	return generic_enable_show(kobj, attr, buf, M1_OFFSET);
+}
+
+static size_t store_m1_enable(struct kobject *kobj,
+			struct attribute *attr, const char *buf, size_t count)
+{
+	return generic_enable_store(kobj, attr, buf, count, M1_OFFSET);
+}
+GLADIATOR_HANG_ATTR(m1_enable, S_IRUGO|S_IWUSR,
+		show_m1_enable, store_m1_enable);
+
+static ssize_t show_m2_enable(struct kobject *kobj,
+			struct attribute *attr, char *buf)
+{
+	return generic_enable_show(kobj, attr, buf, M2_OFFSET);
+}
+
+static size_t store_m2_enable(struct kobject *kobj,
+			struct attribute *attr, const char *buf, size_t count)
+{
+	return generic_enable_store(kobj, attr, buf, count, M2_OFFSET);
+}
+GLADIATOR_HANG_ATTR(m2_enable, S_IRUGO|S_IWUSR,
+		show_m2_enable, store_m2_enable);
+
+static ssize_t show_pcio_enable(struct kobject *kobj,
+			struct attribute *attr, char *buf)
+{
+	return generic_enable_show(kobj, attr, buf, PCIO_OFFSET);
+}
+
+static size_t store_pcio_enable(struct kobject *kobj,
+			struct attribute *attr, const char *buf, size_t count)
+{
+	return generic_enable_store(kobj, attr, buf, count, PCIO_OFFSET);
+}
+GLADIATOR_HANG_ATTR(pcio_enable, S_IRUGO|S_IWUSR,
+		show_pcio_enable, store_pcio_enable);
+
+static struct attribute *hang_attrs[] = {
+	&hang_attr_ace_threshold.attr,
+	&hang_attr_io_threshold.attr,
+	&hang_attr_m1_threshold.attr,
+	&hang_attr_m2_threshold.attr,
+	&hang_attr_pcio_threshold.attr,
+	&hang_attr_ace_enable.attr,
+	&hang_attr_io_enable.attr,
+	&hang_attr_m1_enable.attr,
+	&hang_attr_m2_enable.attr,
+	&hang_attr_pcio_enable.attr,
+	NULL
+};
+
+static struct attribute_group hang_attr_group = {
+	.attrs = hang_attrs,
+};
+
+static const struct of_device_id msm_gladiator_hang_detect_table[] = {
+	{ .compatible = "qcom,gladiator-hang-detect" },
+	{}
+};
+
+static int msm_gladiator_hang_detect_probe(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct hang_detect *hang_det = NULL;
+	int i = 0, ret;
+	u32 treg[NR_GLA_REG], creg;
+
+	if (!pdev->dev.of_node)
+		return -ENODEV;
+
+	hang_det = devm_kzalloc(&pdev->dev,
+			sizeof(struct hang_detect), GFP_KERNEL);
+
+	if (!hang_det) {
+		pr_err("Can't allocate hang_detect memory\n");
+		return -ENOMEM;
+	}
+
+	ret = of_property_read_u32_array(node, "qcom,threshold-arr",
+			treg, NR_GLA_REG);
+	if (ret) {
+		pr_err("Can't get threshold-arr property\n");
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32(node, "qcom,config-reg", &creg);
+	if (ret) {
+		pr_err("Can't get config-reg property\n");
+		return -EINVAL;
+	}
+
+	for (i = 0 ; i < NR_GLA_REG ; i++)
+		hang_det->threshold[i] = treg[i];
+
+	hang_det->config = creg;
+
+	ret = kobject_init_and_add(&hang_det->kobj, &gladiator_ktype,
+		&cpu_subsys.dev_root->kobj, "%s", "gladiator_hang_detect");
+	if (ret) {
+		pr_err("%s:Error in creation kobject_add\n", __func__);
+		goto out_put_kobj;
+	}
+
+	ret = sysfs_create_group(&hang_det->kobj, &hang_attr_group);
+	if (ret) {
+		pr_err("%s:Error in creation sysfs_create_group\n", __func__);
+		goto out_del_kobj;
+	}
+	mutex_init(&hang_det->lock);
+	platform_set_drvdata(pdev, hang_det);
+	return 0;
+
+out_del_kobj:
+	kobject_del(&hang_det->kobj);
+out_put_kobj:
+	kobject_put(&hang_det->kobj);
+
+	return ret;
+}
+
+static int msm_gladiator_hang_detect_remove(struct platform_device *pdev)
+{
+	struct hang_detect *hang_det = platform_get_drvdata(pdev);
+
+	platform_set_drvdata(pdev, NULL);
+	sysfs_remove_group(&hang_det->kobj, &hang_attr_group);
+	kobject_del(&hang_det->kobj);
+	kobject_put(&hang_det->kobj);
+	mutex_destroy(&hang_det->lock);
+	return 0;
+}
+
+static struct platform_driver msm_gladiator_hang_detect_driver = {
+	.probe = msm_gladiator_hang_detect_probe,
+	.remove = msm_gladiator_hang_detect_remove,
+	.driver = {
+		.name = MODULE_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = msm_gladiator_hang_detect_table,
+	},
+};
+
+static int __init init_gladiator_hang_detect(void)
+{
+	return platform_driver_register(&msm_gladiator_hang_detect_driver);
+}
+module_init(init_gladiator_hang_detect);
+
+static void __exit exit_gladiator_hang_detect(void)
+{
+	platform_driver_unregister(&msm_gladiator_hang_detect_driver);
+}
+module_exit(exit_gladiator_hang_detect);
+
+MODULE_DESCRIPTION("MSM Gladiator Hang Detect Driver");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/glink.c	2019-10-29 09:26:24.809214589 +0100
@@ -0,0 +1,6325 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <asm/arch_timer.h>
+#include <linux/err.h>
+#include <linux/ipc_logging.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/rwsem.h>
+#include <linux/pm_qos.h>
+#include <soc/qcom/glink.h>
+#include <soc/qcom/tracer_pkt.h>
+#include "glink_core_if.h"
+#include "glink_private.h"
+#include "glink_xprt_if.h"
+
+/* Number of internal IPC Logging log pages */
+#define NUM_LOG_PAGES	10
+#define GLINK_PM_QOS_HOLDOFF_MS		10
+#define GLINK_QOS_DEF_NUM_TOKENS	10
+#define GLINK_QOS_DEF_NUM_PRIORITY	1
+#define GLINK_QOS_DEF_MTU		2048
+
+#define GLINK_KTHREAD_PRIO 1
+
+/**
+ * struct glink_qos_priority_bin - Packet Scheduler's priority bucket
+ * @max_rate_kBps:	Maximum rate supported by the priority bucket.
+ * @power_state:	Transport power state for this priority bin.
+ * @tx_ready:		List of channels ready for tx in the priority bucket.
+ * @active_ch_cnt:	Active channels of this priority.
+ */
+struct glink_qos_priority_bin {
+	unsigned long max_rate_kBps;
+	uint32_t power_state;
+	struct list_head tx_ready;
+	uint32_t active_ch_cnt;
+};
+
+/**
+ * struct glink_core_xprt_ctx - transport representation structure
+ * @xprt_state_lhb0:		controls read/write access to transport state
+ * @list_node:			used to chain this transport in a global
+ *				transport list
+ * @name:			name of this transport
+ * @edge:			what this transport connects to
+ * @id:				the id to use for channel migration
+ * @versions:			array of transport versions this implementation
+ *				supports
+ * @versions_entries:		number of entries in @versions
+ * @local_version_idx:		local version index into @versions this
+ *				transport is currently running
+ * @remote_version_idx:		remote version index into @versions this
+ *				transport is currently running
+ * @l_features:			Features negotiated by the local side
+ * @capabilities:		Capabilities of underlying transport
+ * @ops:			transport defined implementation of common
+ *				operations
+ * @local_state:		value from local_channel_state_e representing
+ *				the local state of this transport
+ * @remote_neg_completed:	is the version negotiation with the remote end
+ *				completed
+ * @xprt_ctx_lock_lhb1		lock to protect @next_lcid and @channels
+ * @next_lcid:			logical channel identifier to assign to the next
+ *				created channel
+ * @max_cid:			maximum number of channel identifiers supported
+ * @max_iid:			maximum number of intent identifiers supported
+ * @tx_kwork:			work item to process @tx_ready
+ * @tx_wq:				workqueue to run @tx_kwork
+ * @tx_task:		handle to the running kthread
+ * @channels:			list of all existing channels on this transport
+ * @dummy_in_use:		True when channels are being migrated to dummy.
+ * @notified:			list holds channels during dummy xprt cleanup.
+ * @mtu:			MTU supported by this transport.
+ * @token_count:		Number of tokens to be assigned per assignment.
+ * @curr_qos_rate_kBps:		Aggregate of currently supported QoS requests.
+ * @threshold_rate_kBps:	Maximum Rate allocated for QoS traffic.
+ * @num_priority:		Number of priority buckets in the transport.
+ * @tx_ready_lock_lhb3:	lock to protect @tx_ready
+ * @active_high_prio:		Highest priority of active channels.
+ * @prio_bin:			Pointer to priority buckets.
+ * @pm_qos_req:			power management QoS request for TX path
+ * @qos_req_active:		a vote is active with the PM QoS system
+ * @tx_path_activity:		transmit activity has occurred
+ * @pm_qos_work:		removes PM QoS vote due to inactivity
+ * @xprt_dbgfs_lock_lhb4:	debugfs channel structure lock
+ * @log_ctx:			IPC logging context for this transport.
+ */
+struct glink_core_xprt_ctx {
+	struct rwref_lock xprt_state_lhb0;
+	struct list_head list_node;
+	char name[GLINK_NAME_SIZE];
+	char edge[GLINK_NAME_SIZE];
+	uint16_t id;
+	const struct glink_core_version *versions;
+	size_t versions_entries;
+	uint32_t local_version_idx;
+	uint32_t remote_version_idx;
+	uint32_t l_features;
+	uint32_t capabilities;
+	struct glink_transport_if *ops;
+	enum transport_state_e local_state;
+	bool remote_neg_completed;
+
+	spinlock_t xprt_ctx_lock_lhb1;
+	struct list_head channels;
+	uint32_t next_lcid;
+	struct list_head free_lcid_list;
+	struct list_head notified;
+	bool dummy_in_use;
+
+	uint32_t max_cid;
+	uint32_t max_iid;
+	struct kthread_work tx_kwork;
+	struct kthread_worker tx_wq;
+	struct task_struct *tx_task;
+
+	size_t mtu;
+	uint32_t token_count;
+	unsigned long curr_qos_rate_kBps;
+	unsigned long threshold_rate_kBps;
+	uint32_t num_priority;
+	spinlock_t tx_ready_lock_lhb3;
+	uint32_t active_high_prio;
+	struct glink_qos_priority_bin *prio_bin;
+
+	struct pm_qos_request pm_qos_req;
+	bool qos_req_active;
+	bool tx_path_activity;
+	struct delayed_work pm_qos_work;
+	struct glink_core_edge_ctx *edge_ctx;
+
+	struct mutex xprt_dbgfs_lock_lhb4;
+	void *log_ctx;
+};
+
+/**
+ * Edge Context
+ * @list_node		edge list node used by edge list
+ * @name:		name of the edge
+ * @edge_migration_lock:mutex lock for migration over edge
+ * @edge_ref_lock:	lock for reference count
+ */
+struct glink_core_edge_ctx {
+	struct list_head list_node;
+	char name[GLINK_NAME_SIZE];
+	struct mutex edge_migration_lock_lhd2;
+	struct rwref_lock edge_ref_lock_lhd1;
+};
+
+static LIST_HEAD(edge_list);
+static DEFINE_MUTEX(edge_list_lock_lhd0);
+/**
+ * Channel Context
+ * @xprt_state_lhb0:	controls read/write access to channel state
+ * @port_list_node:	channel list node used by transport "channels" list
+ * @tx_ready_list_node:	channels that have data ready to transmit
+ * @name:		name of the channel
+ *
+ * @user_priv:		user opaque data type passed into glink_open()
+ * @notify_rx:		RX notification function
+ * @notify_tx_done:	TX-done notification function (remote side is done)
+ * @notify_state:	Channel state (connected / disconnected) notifications
+ * @notify_rx_intent_req: Request from remote side for an intent
+ * @notify_rxv:		RX notification function (for io buffer chain)
+ * @notify_rx_sigs:	RX signal change notification
+ * @notify_rx_abort:	Channel close RX Intent aborted
+ * @notify_tx_abort:	Channel close TX aborted
+ * @notify_rx_tracer_pkt:	Receive notification for tracer packet
+ * @notify_remote_rx_intent:	Receive notification for remote-queued RX intent
+ *
+ * @transport_ptr:		Transport this channel uses
+ * @lcid:			Local channel ID
+ * @rcid:			Remote channel ID
+ * @local_open_state:		Local channel state
+ * @remote_opened:		Remote channel state (opened or closed)
+ * @int_req_ack:		Remote side intent request ACK state
+ * @int_req_ack_complete:	Intent tracking completion - received remote ACK
+ * @int_req_complete:		Intent tracking completion - received intent
+ * @rx_intent_req_timeout_jiffies:	Timeout for requesting an RX intent, in
+ *			jiffies; if set to 0, timeout is infinite
+ *
+ * @local_rx_intent_lst_lock_lhc1:	RX intent list lock
+ * @local_rx_intent_list:		Active RX Intents queued by client
+ * @local_rx_intent_ntfy_list:		Client notified, waiting for rx_done()
+ * @local_rx_intent_free_list:		Available intent container structure
+ *
+ * @rmt_rx_intent_lst_lock_lhc2:	Remote RX intent list lock
+ * @rmt_rx_intent_list:			Remote RX intent list
+ *
+ * @max_used_liid:			Maximum Local Intent ID used
+ * @dummy_riid:				Dummy remote intent ID
+ *
+ * @tx_lists_lock_lhc3:		TX list lock
+ * @tx_active:				Ready to transmit
+ *
+ * @tx_pending_rmt_done_lock_lhc4:	Remote-done list lock
+ * @tx_pending_remote_done:		Transmitted, waiting for remote done
+ * @lsigs:				Local signals
+ * @rsigs:				Remote signals
+ * @pending_delete:			waiting for channel to be deleted
+ * @no_migrate:				The local client does not want to
+ *					migrate transports
+ * @local_xprt_req:			The transport the local side requested
+ * @local_xprt_resp:			The response to @local_xprt_req
+ * @remote_xprt_req:			The transport the remote side requested
+ * @remote_xprt_resp:			The response to @remote_xprt_req
+ * @curr_priority:			Channel's current priority.
+ * @initial_priority:			Channel's initial priority.
+ * @token_count:			Tokens for consumption by packet.
+ * @txd_len:				Transmitted data size in the current
+ *					token assignment cycle.
+ * @token_start_time:			Time at which tokens are assigned.
+ * @req_rate_kBps:			Current QoS request by the channel.
+ * @tx_intent_cnt:			Intent count to transmit soon in future.
+ * @tx_cnt:				Packets to be picked by tx scheduler.
+ * @rt_vote_on:				Number of times RT vote on is called.
+ * @rt_vote_off:			Number of times RT vote off is called.
+*/
+struct channel_ctx {
+	struct rwref_lock ch_state_lhb2;
+	struct list_head port_list_node;
+	struct list_head tx_ready_list_node;
+	char name[GLINK_NAME_SIZE];
+
+	/* user info */
+	void *user_priv;
+	void (*notify_rx)(void *handle, const void *priv, const void *pkt_priv,
+			const void *ptr, size_t size);
+	void (*notify_tx_done)(void *handle, const void *priv,
+			const void *pkt_priv, const void *ptr);
+	void (*notify_state)(void *handle, const void *priv, unsigned event);
+	bool (*notify_rx_intent_req)(void *handle, const void *priv,
+			size_t req_size);
+	void (*notify_rxv)(void *handle, const void *priv, const void *pkt_priv,
+			   void *iovec, size_t size,
+			   void * (*vbuf_provider)(void *iovec, size_t offset,
+						  size_t *size),
+			   void * (*pbuf_provider)(void *iovec, size_t offset,
+						  size_t *size));
+	void (*notify_rx_sigs)(void *handle, const void *priv,
+			uint32_t old_sigs, uint32_t new_sigs);
+	void (*notify_rx_abort)(void *handle, const void *priv,
+				const void *pkt_priv);
+	void (*notify_tx_abort)(void *handle, const void *priv,
+				const void *pkt_priv);
+	void (*notify_rx_tracer_pkt)(void *handle, const void *priv,
+			const void *pkt_priv, const void *ptr, size_t size);
+	void (*notify_remote_rx_intent)(void *handle, const void *priv,
+					size_t size);
+
+	/* internal port state */
+	struct glink_core_xprt_ctx *transport_ptr;
+	uint32_t lcid;
+	uint32_t rcid;
+	enum local_channel_state_e local_open_state;
+	bool remote_opened;
+	bool int_req_ack;
+	struct completion int_req_ack_complete;
+	struct completion int_req_complete;
+	unsigned long rx_intent_req_timeout_jiffies;
+
+	spinlock_t local_rx_intent_lst_lock_lhc1;
+	struct list_head local_rx_intent_list;
+	struct list_head local_rx_intent_ntfy_list;
+	struct list_head local_rx_intent_free_list;
+
+	spinlock_t rmt_rx_intent_lst_lock_lhc2;
+	struct list_head rmt_rx_intent_list;
+
+	uint32_t max_used_liid;
+	uint32_t dummy_riid;
+
+	spinlock_t tx_lists_lock_lhc3;
+	struct list_head tx_active;
+
+	spinlock_t tx_pending_rmt_done_lock_lhc4;
+	struct list_head tx_pending_remote_done;
+
+	uint32_t lsigs;
+	uint32_t rsigs;
+	bool pending_delete;
+
+	bool no_migrate;
+	uint16_t local_xprt_req;
+	uint16_t local_xprt_resp;
+	uint16_t remote_xprt_req;
+	uint16_t remote_xprt_resp;
+
+	uint32_t curr_priority;
+	uint32_t initial_priority;
+	uint32_t token_count;
+	size_t txd_len;
+	unsigned long token_start_time;
+	unsigned long req_rate_kBps;
+	uint32_t tx_intent_cnt;
+	uint32_t tx_cnt;
+
+	uint32_t rt_vote_on;
+	uint32_t rt_vote_off;
+};
+
+static struct glink_core_if core_impl;
+static void *log_ctx;
+static unsigned glink_debug_mask = QCOM_GLINK_INFO;
+module_param_named(debug_mask, glink_debug_mask,
+		   uint, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static unsigned glink_pm_qos;
+module_param_named(pm_qos_enable, glink_pm_qos,
+		   uint, S_IRUGO | S_IWUSR | S_IWGRP);
+
+
+static LIST_HEAD(transport_list);
+
+/*
+ * Used while notifying the clients about link state events. Since the clients
+ * need to store the callback information temporarily and since all the
+ * existing accesses to transport list are in non-IRQ context, defining the
+ * transport_list_lock as a mutex.
+ */
+static DEFINE_MUTEX(transport_list_lock_lha0);
+
+struct link_state_notifier_info {
+	struct list_head list;
+	char transport[GLINK_NAME_SIZE];
+	char edge[GLINK_NAME_SIZE];
+	void (*glink_link_state_notif_cb)(
+			struct glink_link_state_cb_info *cb_info, void *priv);
+	void *priv;
+};
+static LIST_HEAD(link_state_notifier_list);
+static DEFINE_MUTEX(link_state_notifier_lock_lha1);
+
+static struct glink_core_xprt_ctx *find_open_transport(const char *edge,
+						       const char *name,
+						       bool initial_xprt,
+						       uint16_t *best_id);
+
+static bool xprt_is_fully_opened(struct glink_core_xprt_ctx *xprt);
+
+static struct channel_ctx *xprt_lcid_to_ch_ctx_get(
+					struct glink_core_xprt_ctx *xprt_ctx,
+					uint32_t lcid);
+
+static struct channel_ctx *xprt_rcid_to_ch_ctx_get(
+					struct glink_core_xprt_ctx *xprt_ctx,
+					uint32_t rcid);
+
+static void xprt_schedule_tx(struct glink_core_xprt_ctx *xprt_ptr,
+			     struct channel_ctx *ch_ptr,
+			     struct glink_core_tx_pkt *tx_info);
+
+static int xprt_single_threaded_tx(struct glink_core_xprt_ctx *xprt_ptr,
+			     struct channel_ctx *ch_ptr,
+			     struct glink_core_tx_pkt *tx_info);
+
+static void tx_func(struct kthread_work *work);
+
+static struct channel_ctx *ch_name_to_ch_ctx_create(
+					struct glink_core_xprt_ctx *xprt_ctx,
+					const char *name);
+
+static void ch_push_remote_rx_intent(struct channel_ctx *ctx, size_t size,
+					uint32_t riid, void *cookie);
+
+static int ch_pop_remote_rx_intent(struct channel_ctx *ctx, size_t size,
+			uint32_t *riid_ptr, size_t *intent_size, void **cookie);
+
+static struct glink_core_rx_intent *ch_push_local_rx_intent(
+		struct channel_ctx *ctx, const void *pkt_priv, size_t size);
+
+static void ch_remove_local_rx_intent(struct channel_ctx *ctx, uint32_t liid);
+
+static struct glink_core_rx_intent *ch_get_local_rx_intent(
+		struct channel_ctx *ctx, uint32_t liid);
+
+static void ch_set_local_rx_intent_notified(struct channel_ctx *ctx,
+				struct glink_core_rx_intent *intent_ptr);
+
+static struct glink_core_rx_intent *ch_get_local_rx_intent_notified(
+		struct channel_ctx *ctx, const void *ptr);
+
+static void ch_remove_local_rx_intent_notified(struct channel_ctx *ctx,
+			struct glink_core_rx_intent *liid_ptr, bool reuse);
+
+static struct glink_core_rx_intent *ch_get_free_local_rx_intent(
+		struct channel_ctx *ctx);
+
+static void ch_purge_intent_lists(struct channel_ctx *ctx);
+
+static void ch_add_rcid(struct glink_core_xprt_ctx *xprt_ctx,
+			struct channel_ctx *ctx,
+			uint32_t rcid);
+
+static bool ch_is_fully_opened(struct channel_ctx *ctx);
+static bool ch_is_fully_closed(struct channel_ctx *ctx);
+
+struct glink_core_tx_pkt *ch_get_tx_pending_remote_done(struct channel_ctx *ctx,
+							uint32_t riid);
+
+static void ch_remove_tx_pending_remote_done(struct channel_ctx *ctx,
+					struct glink_core_tx_pkt *tx_pkt);
+
+static void glink_core_rx_cmd_rx_intent_req_ack(struct glink_transport_if
+					*if_ptr, uint32_t rcid, bool granted);
+
+static bool glink_core_remote_close_common(struct channel_ctx *ctx, bool safe);
+
+static void check_link_notifier_and_notify(struct glink_core_xprt_ctx *xprt_ptr,
+					   enum glink_link_state link_state);
+
+static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr);
+static void glink_pm_qos_vote(struct glink_core_xprt_ctx *xprt_ptr);
+static void glink_pm_qos_unvote(struct glink_core_xprt_ctx *xprt_ptr);
+static void glink_pm_qos_cancel_worker(struct work_struct *work);
+static bool ch_update_local_state(struct channel_ctx *ctx,
+			enum local_channel_state_e lstate);
+static bool ch_update_rmt_state(struct channel_ctx *ctx, bool rstate);
+static void glink_core_deinit_xprt_qos_cfg(
+			struct glink_core_xprt_ctx *xprt_ptr);
+
+#define glink_prio_to_power_state(xprt_ctx, priority) \
+		((xprt_ctx)->prio_bin[priority].power_state)
+
+#define GLINK_GET_CH_TX_STATE(ctx) \
+		((ctx)->tx_intent_cnt || (ctx)->tx_cnt)
+
+static int glink_get_ch_ctx(struct channel_ctx *ctx)
+{
+	if (!ctx)
+		return -EINVAL;
+	rwref_get(&ctx->ch_state_lhb2);
+	return 0;
+}
+
+static void glink_put_ch_ctx(struct channel_ctx *ctx)
+{
+	rwref_put(&ctx->ch_state_lhb2);
+}
+
+
+/**
+ * glink_subsys_up() - Inform transport about remote subsystem up.
+ * @subsystem:	The name of the subsystem
+ *
+ * Call into the transport using the subsys_up(if_ptr) function to allow it to
+ * initialize any necessary structures.
+ *
+ * Return: Standard error codes.
+ */
+int glink_subsys_up(const char *subsystem)
+{
+	int ret = 0;
+	bool transport_found = false;
+	struct glink_core_xprt_ctx *xprt_ctx = NULL;
+
+	mutex_lock(&transport_list_lock_lha0);
+	list_for_each_entry(xprt_ctx, &transport_list, list_node) {
+		if (!strcmp(subsystem, xprt_ctx->edge) &&
+				!xprt_is_fully_opened(xprt_ctx)) {
+			GLINK_INFO_XPRT(xprt_ctx, "%s: %s Subsystem up\n",
+							__func__, subsystem);
+			if (xprt_ctx->ops->subsys_up)
+				xprt_ctx->ops->subsys_up(xprt_ctx->ops);
+			transport_found = true;
+		}
+	}
+	mutex_unlock(&transport_list_lock_lha0);
+
+	if (!transport_found)
+		ret = -ENODEV;
+
+	return ret;
+}
+EXPORT_SYMBOL(glink_subsys_up);
+
+/**
+ * glink_ssr() - Clean up locally for SSR by simulating remote close
+ * @subsystem:	The name of the subsystem being restarted
+ *
+ * Call into the transport using the ssr(if_ptr) function to allow it to
+ * clean up any necessary structures, then simulate a remote close from
+ * subsystem for all channels on that edge.
+ *
+ * Return: Standard error codes.
+ */
+int glink_ssr(const char *subsystem)
+{
+	int ret = 0;
+	bool transport_found = false;
+	struct glink_core_xprt_ctx *xprt_ctx = NULL;
+	struct channel_ctx *ch_ctx, *temp_ch_ctx;
+	uint32_t i;
+	unsigned long flags;
+
+	mutex_lock(&transport_list_lock_lha0);
+	list_for_each_entry(xprt_ctx, &transport_list, list_node) {
+		if (!strcmp(subsystem, xprt_ctx->edge) &&
+				xprt_is_fully_opened(xprt_ctx)) {
+			GLINK_INFO_XPRT(xprt_ctx, "%s: SSR\n", __func__);
+			spin_lock_irqsave(&xprt_ctx->tx_ready_lock_lhb3,
+					  flags);
+			for (i = 0; i < xprt_ctx->num_priority; i++)
+				list_for_each_entry_safe(ch_ctx, temp_ch_ctx,
+						&xprt_ctx->prio_bin[i].tx_ready,
+						tx_ready_list_node)
+					list_del_init(
+						&ch_ctx->tx_ready_list_node);
+			spin_unlock_irqrestore(&xprt_ctx->tx_ready_lock_lhb3,
+						flags);
+
+			xprt_ctx->ops->ssr(xprt_ctx->ops);
+			transport_found = true;
+		}
+	}
+	mutex_unlock(&transport_list_lock_lha0);
+
+	if (!transport_found)
+		ret = -ENODEV;
+
+	return ret;
+}
+EXPORT_SYMBOL(glink_ssr);
+
+/**
+ * glink_core_ch_close_ack_common() - handles the common operations during
+ *                                    close ack.
+ * @ctx:	Pointer to channel instance.
+ * @is_safe:	Is function called while holding ctx lock
+ *
+ * Return: True if the channel is fully closed after the state change,
+ *	false otherwise.
+ */
+static bool glink_core_ch_close_ack_common(struct channel_ctx *ctx, bool safe)
+{
+	bool is_fully_closed;
+
+	if (ctx == NULL)
+		return false;
+
+	if (safe) {
+		ctx->local_open_state = GLINK_CHANNEL_CLOSED;
+		is_fully_closed = ch_is_fully_closed(ctx);
+	} else {
+		is_fully_closed = ch_update_local_state(ctx,
+							GLINK_CHANNEL_CLOSED);
+	}
+
+	GLINK_INFO_PERF_CH(ctx,
+		"%s: local:GLINK_CHANNEL_CLOSING->GLINK_CHANNEL_CLOSED\n",
+		__func__);
+
+	if (ctx->notify_state) {
+		ctx->notify_state(ctx, ctx->user_priv,
+			GLINK_LOCAL_DISCONNECTED);
+		ch_purge_intent_lists(ctx);
+		GLINK_INFO_PERF_CH(ctx,
+		"%s: notify state: GLINK_LOCAL_DISCONNECTED\n",
+		__func__);
+	}
+
+	return is_fully_closed;
+}
+
+/**
+ * glink_core_remote_close_common() - Handles the common operations during
+ *                                    a remote close.
+ * @ctx:	Pointer to channel instance.
+ * @safe:       Is function called with ctx rwref lock already acquired.
+ * Return: True if the channel is fully closed after the state change,
+ *	false otherwise.
+ */
+static bool glink_core_remote_close_common(struct channel_ctx *ctx, bool safe)
+{
+	bool is_fully_closed;
+
+	if (ctx == NULL)
+		return false;
+
+	if (safe) {
+		ctx->remote_opened = false;
+		is_fully_closed = ch_is_fully_closed(ctx);
+	} else {
+		is_fully_closed = ch_update_rmt_state(ctx, false);
+	}
+	ctx->rcid = 0;
+
+	ctx->int_req_ack = false;
+	complete_all(&ctx->int_req_ack_complete);
+	complete_all(&ctx->int_req_complete);
+	if (ctx->local_open_state != GLINK_CHANNEL_CLOSED &&
+		ctx->local_open_state != GLINK_CHANNEL_CLOSING) {
+		if (ctx->notify_state)
+			ctx->notify_state(ctx, ctx->user_priv,
+				GLINK_REMOTE_DISCONNECTED);
+		GLINK_INFO_CH(ctx,
+				"%s: %s: GLINK_REMOTE_DISCONNECTED\n",
+				__func__, "notify state");
+	}
+
+	if (ctx->local_open_state == GLINK_CHANNEL_CLOSED)
+		GLINK_INFO_CH(ctx,
+			"%s: %s, %s\n", __func__,
+			"Did not send GLINK_REMOTE_DISCONNECTED",
+			"local state is already CLOSED");
+
+	ch_purge_intent_lists(ctx);
+
+	return is_fully_closed;
+}
+
+/**
+ * glink_qos_calc_rate_kBps() - Calculate the transmit rate in kBps
+ * @pkt_size:		Worst case packet size per transmission.
+ * @interval_us:	Packet transmit interval in us.
+ *
+ * This function is used to calculate the rate of transmission rate of
+ * a channel in kBps.
+ *
+ * Return: Transmission rate in kBps.
+ */
+static unsigned long glink_qos_calc_rate_kBps(size_t pkt_size,
+				       unsigned long interval_us)
+{
+	unsigned long rate_kBps, rem;
+
+	rate_kBps = pkt_size * USEC_PER_SEC;
+	rem = do_div(rate_kBps, (interval_us * 1024));
+	return rate_kBps;
+}
+
+/**
+ * glink_qos_check_feasibility() - Feasibility test on a QoS Request
+ * @xprt_ctx:		Transport in which the QoS request is made.
+ * @req_rate_kBps:	QoS Request.
+ *
+ * This function is used to perform the schedulability test on a QoS request
+ * over a specific transport.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_check_feasibility(struct glink_core_xprt_ctx *xprt_ctx,
+				       unsigned long req_rate_kBps)
+{
+	unsigned long new_rate_kBps;
+
+	if (xprt_ctx->num_priority == GLINK_QOS_DEF_NUM_PRIORITY)
+		return -EOPNOTSUPP;
+
+	new_rate_kBps = xprt_ctx->curr_qos_rate_kBps + req_rate_kBps;
+	if (new_rate_kBps > xprt_ctx->threshold_rate_kBps) {
+		GLINK_ERR_XPRT(xprt_ctx,
+			"New_rate(%lu + %lu) > threshold_rate(%lu)\n",
+			xprt_ctx->curr_qos_rate_kBps, req_rate_kBps,
+			xprt_ctx->threshold_rate_kBps);
+		return -EBUSY;
+	}
+	return 0;
+}
+
+/**
+ * glink_qos_update_ch_prio() - Update the channel priority
+ * @ctx:		Channel context whose priority is updated.
+ * @new_priority:	New priority of the channel.
+ *
+ * This function is called to update the channel priority during QoS request,
+ * QoS Cancel or Priority evaluation by packet scheduler. This function must
+ * be called with transport's tx_ready_lock_lhb3 lock and channel's
+ * tx_lists_lock_lhc3 locked.
+ */
+static void glink_qos_update_ch_prio(struct channel_ctx *ctx,
+				     uint32_t new_priority)
+{
+	uint32_t old_priority;
+
+	if (unlikely(!ctx))
+		return;
+
+	old_priority = ctx->curr_priority;
+	if (!list_empty(&ctx->tx_ready_list_node)) {
+		ctx->transport_ptr->prio_bin[old_priority].active_ch_cnt--;
+		list_move(&ctx->tx_ready_list_node,
+			  &ctx->transport_ptr->prio_bin[new_priority].tx_ready);
+		ctx->transport_ptr->prio_bin[new_priority].active_ch_cnt++;
+	}
+	ctx->curr_priority = new_priority;
+}
+
+/**
+ * glink_qos_assign_priority() - Assign priority to a channel
+ * @ctx:		Channel for which the priority has to be assigned.
+ * @req_rate_kBps:	QoS request by the channel.
+ *
+ * This function is used to assign a priority to the channel depending on its
+ * QoS Request.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_assign_priority(struct channel_ctx *ctx,
+				     unsigned long req_rate_kBps)
+{
+	int ret;
+	uint32_t i;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctx->transport_ptr->tx_ready_lock_lhb3, flags);
+	if (ctx->req_rate_kBps) {
+		spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb3,
+					flags);
+		GLINK_ERR_CH(ctx, "%s: QoS Request already exists\n", __func__);
+		return -EINVAL;
+	}
+
+	ret = glink_qos_check_feasibility(ctx->transport_ptr, req_rate_kBps);
+	if (ret < 0) {
+		spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb3,
+					flags);
+		return ret;
+	}
+
+	spin_lock(&ctx->tx_lists_lock_lhc3);
+	i = ctx->transport_ptr->num_priority - 1;
+	while (i > 0 &&
+	       ctx->transport_ptr->prio_bin[i-1].max_rate_kBps >= req_rate_kBps)
+		i--;
+
+	ctx->initial_priority = i;
+	glink_qos_update_ch_prio(ctx, i);
+	ctx->req_rate_kBps = req_rate_kBps;
+	if (i > 0) {
+		ctx->transport_ptr->curr_qos_rate_kBps += req_rate_kBps;
+		ctx->token_count = ctx->transport_ptr->token_count;
+		ctx->txd_len = 0;
+		ctx->token_start_time = arch_counter_get_cntvct();
+	}
+	spin_unlock(&ctx->tx_lists_lock_lhc3);
+	spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb3, flags);
+	return 0;
+}
+
+/**
+ * glink_qos_reset_priority() - Reset the channel priority
+ * @ctx:	Channel for which the priority is reset.
+ *
+ * This function is used to reset the channel priority when the QoS request
+ * is cancelled by the channel.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_reset_priority(struct channel_ctx *ctx)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctx->transport_ptr->tx_ready_lock_lhb3, flags);
+	spin_lock(&ctx->tx_lists_lock_lhc3);
+	if (ctx->initial_priority > 0) {
+		ctx->initial_priority = 0;
+		glink_qos_update_ch_prio(ctx, 0);
+		ctx->transport_ptr->curr_qos_rate_kBps -= ctx->req_rate_kBps;
+		ctx->txd_len = 0;
+		ctx->req_rate_kBps = 0;
+	}
+	spin_unlock(&ctx->tx_lists_lock_lhc3);
+	spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb3, flags);
+	return 0;
+}
+
+/**
+ * glink_qos_ch_vote_xprt() - Vote the transport that channel is active
+ * @ctx:	Channel context which is active.
+ *
+ * This function is called to vote for the transport either when the channel
+ * is transmitting or when it shows an intention to transmit sooner. This
+ * function must be called with transport's tx_ready_lock_lhb3 lock and
+ * channel's tx_lists_lock_lhc3 locked.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_ch_vote_xprt(struct channel_ctx *ctx)
+{
+	uint32_t prio;
+
+	if (unlikely(!ctx || !ctx->transport_ptr))
+		return -EINVAL;
+
+	prio = ctx->curr_priority;
+	ctx->transport_ptr->prio_bin[prio].active_ch_cnt++;
+
+	if (ctx->transport_ptr->prio_bin[prio].active_ch_cnt == 1 &&
+	    ctx->transport_ptr->active_high_prio < prio) {
+		/*
+		 * One active channel in this priority and this is the
+		 * highest active priority bucket
+		 */
+		ctx->transport_ptr->active_high_prio = prio;
+		return ctx->transport_ptr->ops->power_vote(
+				ctx->transport_ptr->ops,
+				glink_prio_to_power_state(ctx->transport_ptr,
+							  prio));
+	}
+	return 0;
+}
+
+/**
+ * glink_qos_ch_unvote_xprt() - Unvote the transport when channel is inactive
+ * @ctx:	Channel context which is inactive.
+ *
+ * This function is called to unvote for the transport either when all the
+ * packets queued by the channel are transmitted by the scheduler. This
+ * function must be called with transport's tx_ready_lock_lhb3 lock and
+ * channel's tx_lists_lock_lhc3 locked.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_ch_unvote_xprt(struct channel_ctx *ctx)
+{
+	uint32_t prio;
+
+	if (unlikely(!ctx || !ctx->transport_ptr))
+		return -EINVAL;
+
+	prio = ctx->curr_priority;
+	ctx->transport_ptr->prio_bin[prio].active_ch_cnt--;
+
+	if (ctx->transport_ptr->prio_bin[prio].active_ch_cnt ||
+	    ctx->transport_ptr->active_high_prio > prio)
+		return 0;
+
+	/*
+	 * No active channel in this priority and this is the
+	 * highest active priority bucket
+	 */
+	while (prio > 0) {
+		prio--;
+		if (!ctx->transport_ptr->prio_bin[prio].active_ch_cnt)
+			continue;
+
+		ctx->transport_ptr->active_high_prio = prio;
+		return ctx->transport_ptr->ops->power_vote(
+				ctx->transport_ptr->ops,
+				glink_prio_to_power_state(ctx->transport_ptr,
+							  prio));
+	}
+	return ctx->transport_ptr->ops->power_unvote(ctx->transport_ptr->ops);
+}
+
+/**
+ * glink_qos_add_ch_tx_intent() - Add the channel's intention to transmit soon
+ * @ctx:	Channel context which is going to be active.
+ *
+ * This function is called to update the channel state when it is intending to
+ * transmit sooner. This function must be called with transport's
+ * tx_ready_lock_lhb3 lock and channel's tx_lists_lock_lhc3 locked.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_add_ch_tx_intent(struct channel_ctx *ctx)
+{
+	bool active_tx;
+
+	if (unlikely(!ctx))
+		return -EINVAL;
+
+	active_tx = GLINK_GET_CH_TX_STATE(ctx);
+	ctx->tx_intent_cnt++;
+	if (!active_tx)
+		glink_qos_ch_vote_xprt(ctx);
+	return 0;
+}
+
+/**
+ * glink_qos_do_ch_tx() - Update the channel's state that it is transmitting
+ * @ctx:	Channel context which is transmitting.
+ *
+ * This function is called to update the channel state when it is queueing a
+ * packet to transmit. This function must be called with transport's
+ * tx_ready_lock_lhb3 lock and channel's tx_lists_lock_lhc3 locked.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_do_ch_tx(struct channel_ctx *ctx)
+{
+	bool active_tx;
+
+	if (unlikely(!ctx))
+		return -EINVAL;
+
+	active_tx = GLINK_GET_CH_TX_STATE(ctx);
+	ctx->tx_cnt++;
+	if (ctx->tx_intent_cnt)
+		ctx->tx_intent_cnt--;
+	if (!active_tx)
+		glink_qos_ch_vote_xprt(ctx);
+	return 0;
+}
+
+/**
+ * glink_qos_done_ch_tx() - Update the channel's state when transmission is done
+ * @ctx:	Channel context for which all packets are transmitted.
+ *
+ * This function is called to update the channel state when all packets in its
+ * transmit queue are successfully transmitted. This function must be called
+ * with transport's tx_ready_lock_lhb3 lock and channel's tx_lists_lock_lhc3
+ * locked.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_done_ch_tx(struct channel_ctx *ctx)
+{
+	bool active_tx;
+
+	if (unlikely(!ctx))
+		return -EINVAL;
+
+	WARN_ON(ctx->tx_cnt == 0);
+	ctx->tx_cnt = 0;
+	active_tx = GLINK_GET_CH_TX_STATE(ctx);
+	if (!active_tx)
+		glink_qos_ch_unvote_xprt(ctx);
+	return 0;
+}
+
+/**
+ * tx_linear_vbuf_provider() - Virtual Buffer Provider for linear buffers
+ * @iovec:	Pointer to the beginning of the linear buffer.
+ * @offset:	Offset into the buffer whose address is needed.
+ * @size:	Pointer to hold the length of the contiguous buffer space.
+ *
+ * This function is used when a linear buffer is transmitted.
+ *
+ * Return: Address of the buffer which is at offset "offset" from the beginning
+ *         of the buffer.
+ */
+static void *tx_linear_vbuf_provider(void *iovec, size_t offset, size_t *size)
+{
+	struct glink_core_tx_pkt *tx_info = (struct glink_core_tx_pkt *)iovec;
+
+	if (unlikely(!iovec || !size))
+		return NULL;
+
+	if (offset >= tx_info->size)
+		return NULL;
+
+	if (unlikely(OVERFLOW_ADD_UNSIGNED(void *, tx_info->data, offset)))
+		return NULL;
+
+	*size = tx_info->size - offset;
+
+	return (void *)tx_info->data + offset;
+}
+
+/**
+ * linearize_vector() - Linearize the vector buffer
+ * @iovec:	Pointer to the vector buffer.
+ * @size:	Size of data in the vector buffer.
+ * vbuf_provider:	Virtual address-space Buffer Provider for the vector.
+ * pbuf_provider:	Physical address-space Buffer Provider for the vector.
+ *
+ * This function is used to linearize the vector buffer provided by the
+ * transport when the client has registered to receive only the vector
+ * buffer.
+ *
+ * Return: address of the linear buffer on success, NULL on failure.
+ */
+static void *linearize_vector(void *iovec, size_t size,
+	void * (*vbuf_provider)(void *iovec, size_t offset, size_t *buf_size),
+	void * (*pbuf_provider)(void *iovec, size_t offset, size_t *buf_size))
+{
+	void *bounce_buf;
+	void *pdata;
+	void *vdata;
+	size_t data_size;
+	size_t offset = 0;
+
+	bounce_buf = kmalloc(size, GFP_KERNEL);
+	if (!bounce_buf)
+		return ERR_PTR(-ENOMEM);
+
+	do {
+		if (vbuf_provider) {
+			vdata = vbuf_provider(iovec, offset, &data_size);
+		} else {
+			pdata = pbuf_provider(iovec, offset, &data_size);
+			vdata = phys_to_virt((unsigned long)pdata);
+		}
+
+		if (!vdata)
+			break;
+
+		if (OVERFLOW_ADD_UNSIGNED(size_t, data_size, offset)) {
+			GLINK_ERR("%s: overflow data_size %zu + offset %zu\n",
+				  __func__, data_size, offset);
+			goto err;
+		}
+
+		memcpy(bounce_buf + offset, vdata, data_size);
+		offset += data_size;
+	} while (offset < size);
+
+	if (offset != size) {
+		GLINK_ERR("%s: Error size_copied %zu != total_size %zu\n",
+			  __func__, offset, size);
+		goto err;
+	}
+	return bounce_buf;
+
+err:
+	kfree(bounce_buf);
+	return NULL;
+}
+
+/**
+ * glink_core_migration_edge_lock() - gains a reference count for edge and
+ *					take muted lock
+ * @xprt_ctx:	transport of the edge
+ */
+static void glink_core_migration_edge_lock(struct glink_core_xprt_ctx *xprt_ctx)
+{
+	struct glink_core_edge_ctx *edge_ctx = xprt_ctx->edge_ctx;
+
+	rwref_get(&edge_ctx->edge_ref_lock_lhd1);
+	mutex_lock(&edge_ctx->edge_migration_lock_lhd2);
+}
+
+/**
+ * glink_core_migration_edge_unlock() - release a reference count for edge
+ *					and release muted lock.
+ * @xprt_ctx:	transport of the edge
+ */
+static void glink_core_migration_edge_unlock(
+					struct glink_core_xprt_ctx *xprt_ctx)
+{
+	struct glink_core_edge_ctx *edge_ctx = xprt_ctx->edge_ctx;
+
+	mutex_unlock(&edge_ctx->edge_migration_lock_lhd2);
+	rwref_put(&edge_ctx->edge_ref_lock_lhd1);
+}
+
+/**
+ * glink_edge_ctx_release - Free the edge context
+ * @ch_st_lock:	handle to the rwref_lock associated with the edge
+ *
+ * This should only be called when the reference count associated with the
+ * edge goes to zero.
+ */
+static void glink_edge_ctx_release(struct rwref_lock *ch_st_lock)
+{
+	struct glink_core_edge_ctx *ctx = container_of(ch_st_lock,
+					struct glink_core_edge_ctx,
+						edge_ref_lock_lhd1);
+
+	mutex_lock(&edge_list_lock_lhd0);
+	list_del(&ctx->list_node);
+	mutex_unlock(&edge_list_lock_lhd0);
+	kfree(ctx);
+}
+
+
+/**
+ * edge_name_to_ctx_create() - lookup a edge by name, create the edge ctx if
+ *                              it is not found.
+ * @xprt_ctx:	Transport to search for a matching edge.
+ *
+ * Return: The edge ctx corresponding to edge of @xprt_ctx or
+ *	NULL if memory allocation fails.
+ */
+static struct glink_core_edge_ctx *edge_name_to_ctx_create(
+				struct glink_core_xprt_ctx *xprt_ctx)
+{
+	struct glink_core_edge_ctx *edge_ctx;
+
+	mutex_lock(&edge_list_lock_lhd0);
+	list_for_each_entry(edge_ctx, &edge_list, list_node) {
+		if (!strcmp(edge_ctx->name, xprt_ctx->edge)) {
+			rwref_get(&edge_ctx->edge_ref_lock_lhd1);
+			mutex_unlock(&edge_list_lock_lhd0);
+			return edge_ctx;
+		}
+	}
+	edge_ctx = kzalloc(sizeof(struct glink_core_edge_ctx), GFP_KERNEL);
+	if (!edge_ctx) {
+		mutex_unlock(&edge_list_lock_lhd0);
+		return NULL;
+	}
+	strlcpy(edge_ctx->name, xprt_ctx->edge, GLINK_NAME_SIZE);
+	rwref_lock_init(&edge_ctx->edge_ref_lock_lhd1, glink_edge_ctx_release);
+	mutex_init(&edge_ctx->edge_migration_lock_lhd2);
+	INIT_LIST_HEAD(&edge_ctx->list_node);
+	list_add_tail(&edge_ctx->list_node, &edge_list);
+	mutex_unlock(&edge_list_lock_lhd0);
+	return edge_ctx;
+}
+
+/**
+ * xprt_lcid_to_ch_ctx_get() - lookup a channel by local id
+ * @xprt_ctx:	Transport to search for a matching channel.
+ * @lcid:	Local channel identifier corresponding to the desired channel.
+ *
+ * If the channel is found, the reference count is incremented to ensure the
+ * lifetime of the channel context.  The caller must call rwref_put() when done.
+ *
+ * Return: The channel corresponding to @lcid or NULL if a matching channel
+ *	is not found.
+ */
+static struct channel_ctx *xprt_lcid_to_ch_ctx_get(
+					struct glink_core_xprt_ctx *xprt_ctx,
+					uint32_t lcid)
+{
+	struct channel_ctx *entry;
+	unsigned long flags;
+
+	spin_lock_irqsave(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+	list_for_each_entry(entry, &xprt_ctx->channels, port_list_node)
+		if (entry->lcid == lcid) {
+			rwref_get(&entry->ch_state_lhb2);
+			spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1,
+					flags);
+			return entry;
+		}
+	spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+
+	return NULL;
+}
+
+/**
+ * xprt_rcid_to_ch_ctx_get() - lookup a channel by remote id
+ * @xprt_ctx:	Transport to search for a matching channel.
+ * @rcid:	Remote channel identifier corresponding to the desired channel.
+ *
+ * If the channel is found, the reference count is incremented to ensure the
+ * lifetime of the channel context.  The caller must call rwref_put() when done.
+ *
+ * Return: The channel corresponding to @rcid or NULL if a matching channel
+ *	is not found.
+ */
+static struct channel_ctx *xprt_rcid_to_ch_ctx_get(
+					struct glink_core_xprt_ctx *xprt_ctx,
+					uint32_t rcid)
+{
+	struct channel_ctx *entry;
+	unsigned long flags;
+
+	spin_lock_irqsave(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+	list_for_each_entry(entry, &xprt_ctx->channels, port_list_node)
+		if (entry->rcid == rcid) {
+			rwref_get(&entry->ch_state_lhb2);
+			spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1,
+					flags);
+			return entry;
+		}
+	spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+
+	return NULL;
+}
+
+/**
+ * ch_check_duplicate_riid() - Checks for duplicate riid
+ * @ctx:	Local channel context
+ * @riid:	Remote intent ID
+ *
+ * This functions check the riid is present in the remote_rx_list or not
+ */
+bool ch_check_duplicate_riid(struct channel_ctx *ctx, int riid)
+{
+	struct glink_core_rx_intent *intent;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+	list_for_each_entry(intent, &ctx->rmt_rx_intent_list, list) {
+		if (riid == intent->id) {
+			spin_unlock_irqrestore(
+				&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+			return true;
+		}
+	}
+	spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+	return false;
+}
+
+/**
+ * ch_pop_remote_rx_intent() - Finds a matching RX intent
+ * @ctx:	Local channel context
+ * @size:	Size of Intent
+ * @riid_ptr:	Pointer to return value of remote intent ID
+ * @cookie:	Transport-specific cookie to return
+ *
+ * This functions searches for an RX intent that is >= to the requested size.
+ */
+int ch_pop_remote_rx_intent(struct channel_ctx *ctx, size_t size,
+	uint32_t *riid_ptr, size_t *intent_size, void **cookie)
+{
+	struct glink_core_rx_intent *intent;
+	struct glink_core_rx_intent *intent_tmp;
+	struct glink_core_rx_intent *best_intent = NULL;
+	unsigned long flags;
+
+	if (GLINK_MAX_PKT_SIZE < size) {
+		GLINK_ERR_CH(ctx, "%s: R[]:%zu Invalid size.\n", __func__,
+				size);
+		return -EINVAL;
+	}
+
+	if (riid_ptr == NULL)
+		return -EINVAL;
+
+	*riid_ptr = 0;
+	spin_lock_irqsave(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+	if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS) {
+		*riid_ptr = ++ctx->dummy_riid;
+		spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2,
+					flags);
+		return 0;
+	}
+	list_for_each_entry_safe(intent, intent_tmp, &ctx->rmt_rx_intent_list,
+			list) {
+		if (intent->intent_size >= size) {
+			if (!best_intent)
+				best_intent = intent;
+			else if (best_intent->intent_size > intent->intent_size)
+				best_intent = intent;
+			if (best_intent->intent_size == size)
+				break;
+		}
+	}
+	if (best_intent) {
+		list_del(&best_intent->list);
+		GLINK_DBG_CH(ctx,
+				"%s: R[%u]:%zu Removed remote intent\n",
+				__func__,
+				best_intent->id,
+				best_intent->intent_size);
+		*riid_ptr = best_intent->id;
+		*intent_size = best_intent->intent_size;
+		*cookie = best_intent->cookie;
+		kfree(best_intent);
+		spin_unlock_irqrestore(
+			&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+		return 0;
+	}
+	spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+	return -EAGAIN;
+}
+
+/**
+ * ch_push_remote_rx_intent() - Registers a remote RX intent
+ * @ctx:	Local channel context
+ * @size:	Size of Intent
+ * @riid:	Remote intent ID
+ * @cookie:	Transport-specific cookie to cache
+ *
+ * This functions adds a remote RX intent to the remote RX intent list.
+ */
+void ch_push_remote_rx_intent(struct channel_ctx *ctx, size_t size,
+		uint32_t riid, void *cookie)
+{
+	struct glink_core_rx_intent *intent;
+	unsigned long flags;
+	gfp_t gfp_flag;
+
+	if (GLINK_MAX_PKT_SIZE < size) {
+		GLINK_ERR_CH(ctx, "%s: R[%u]:%zu Invalid size.\n", __func__,
+				riid, size);
+		return;
+	}
+
+	if (ch_check_duplicate_riid(ctx, riid)) {
+		GLINK_ERR_CH(ctx, "%s: R[%d]:%zu Duplicate RIID found\n",
+				__func__, riid, size);
+		return;
+	}
+
+	gfp_flag = (ctx->transport_ptr->capabilities & GCAP_AUTO_QUEUE_RX_INT) ?
+							GFP_ATOMIC : GFP_KERNEL;
+	intent = kzalloc(sizeof(struct glink_core_rx_intent), gfp_flag);
+	if (!intent) {
+		GLINK_ERR_CH(ctx,
+			"%s: R[%u]:%zu Memory allocation for intent failed\n",
+			__func__, riid, size);
+		return;
+	}
+	intent->id = riid;
+	intent->intent_size = size;
+	intent->cookie = cookie;
+
+	spin_lock_irqsave(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+	list_add_tail(&intent->list, &ctx->rmt_rx_intent_list);
+
+	complete_all(&ctx->int_req_complete);
+	if (ctx->notify_remote_rx_intent)
+		ctx->notify_remote_rx_intent(ctx, ctx->user_priv, size);
+	spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+
+	GLINK_DBG_CH(ctx, "%s: R[%u]:%zu Pushed remote intent\n", __func__,
+			intent->id,
+			intent->intent_size);
+}
+
+/**
+ * ch_push_local_rx_intent() - Create an rx_intent
+ * @ctx:	Local channel context
+ * @pkt_priv:	Opaque private pointer provided by client to be returned later
+ * @size:	Size of intent
+ *
+ * This functions creates a local intent and adds it to the local
+ * intent list.
+ */
+struct glink_core_rx_intent *ch_push_local_rx_intent(struct channel_ctx *ctx,
+		const void *pkt_priv, size_t size)
+{
+	struct glink_core_rx_intent *intent;
+	unsigned long flags;
+	int ret;
+
+	if (GLINK_MAX_PKT_SIZE < size) {
+		GLINK_ERR_CH(ctx,
+			"%s: L[]:%zu Invalid size\n", __func__, size);
+		return NULL;
+	}
+
+	intent = ch_get_free_local_rx_intent(ctx);
+	if (!intent) {
+		if (ctx->max_used_liid >= ctx->transport_ptr->max_iid) {
+			GLINK_ERR_CH(ctx,
+				"%s: All intents are in USE max_iid[%d]",
+				__func__, ctx->transport_ptr->max_iid);
+			return NULL;
+		}
+
+		intent = kzalloc(sizeof(struct glink_core_rx_intent),
+								GFP_KERNEL);
+		if (!intent) {
+			GLINK_ERR_CH(ctx,
+			"%s: Memory Allocation for local rx_intent failed",
+				__func__);
+			return NULL;
+		}
+		intent->id = ++ctx->max_used_liid;
+	}
+
+	/* transport is responsible for allocating/reserving for the intent */
+	ret = ctx->transport_ptr->ops->allocate_rx_intent(
+					ctx->transport_ptr->ops, size, intent);
+	if (ret < 0) {
+		/* intent data allocation failure */
+		GLINK_ERR_CH(ctx, "%s: unable to allocate intent sz[%zu] %d",
+			__func__, size, ret);
+		spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+		list_add_tail(&intent->list,
+				&ctx->local_rx_intent_free_list);
+		spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1,
+				flags);
+		return NULL;
+	}
+
+	intent->pkt_priv = pkt_priv;
+	intent->intent_size = size;
+	intent->write_offset = 0;
+	intent->pkt_size = 0;
+	intent->bounce_buf = NULL;
+
+	spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	list_add_tail(&intent->list, &ctx->local_rx_intent_list);
+	spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	GLINK_DBG_CH(ctx, "%s: L[%u]:%zu Pushed intent\n", __func__,
+			intent->id,
+			intent->intent_size);
+	return intent;
+}
+
+/**
+ * ch_remove_local_rx_intent() - Find and remove RX Intent from list
+ * @ctx:	Local channel context
+ * @liid:	Local channel Intent ID
+ *
+ * This functions parses the local intent list for a specific channel
+ * and checks for the intent using the intent ID. If found, the intent
+ * is deleted from the list.
+ */
+void ch_remove_local_rx_intent(struct channel_ctx *ctx, uint32_t liid)
+{
+	struct glink_core_rx_intent *intent, *tmp_intent;
+	unsigned long flags;
+
+	if (ctx->transport_ptr->max_iid < liid) {
+		GLINK_ERR_CH(ctx, "%s: L[%u] Invalid ID.\n", __func__,
+				liid);
+		return;
+	}
+
+	spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	list_for_each_entry_safe(intent, tmp_intent, &ctx->local_rx_intent_list,
+									list) {
+		if (liid == intent->id) {
+			list_del(&intent->list);
+			list_add_tail(&intent->list,
+					&ctx->local_rx_intent_free_list);
+			spin_unlock_irqrestore(
+					&ctx->local_rx_intent_lst_lock_lhc1,
+					flags);
+			GLINK_DBG_CH(ctx,
+			"%s: L[%u]:%zu moved intent to Free/unused list\n",
+				__func__,
+				intent->id,
+				intent->intent_size);
+			return;
+		}
+	}
+	spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	GLINK_ERR_CH(ctx, "%s: L[%u] Intent not found.\n", __func__,
+			liid);
+}
+
+/**
+ * ch_get_dummy_rx_intent() - Get a dummy rx_intent
+ * @ctx:	Local channel context
+ * @liid:	Local channel Intent ID
+ *
+ * This functions parses the local intent list for a specific channel and
+ * returns either a matching intent or allocates a dummy one if no matching
+ * intents can be found.
+ *
+ * Return: Pointer to the intent if intent is found else NULL
+ */
+struct glink_core_rx_intent *ch_get_dummy_rx_intent(struct channel_ctx *ctx,
+		uint32_t liid)
+{
+	struct glink_core_rx_intent *intent;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	if (!list_empty(&ctx->local_rx_intent_list)) {
+		intent = list_first_entry(&ctx->local_rx_intent_list,
+					  struct glink_core_rx_intent, list);
+		spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1,
+					flags);
+		return intent;
+	}
+	spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+
+	intent = ch_get_free_local_rx_intent(ctx);
+	if (!intent) {
+		intent = kzalloc(sizeof(struct glink_core_rx_intent),
+								GFP_ATOMIC);
+		if (!intent) {
+			GLINK_ERR_CH(ctx,
+			"%s: Memory Allocation for local rx_intent failed",
+				__func__);
+			return NULL;
+		}
+		intent->id = ++ctx->max_used_liid;
+	}
+	intent->intent_size = 0;
+	intent->write_offset = 0;
+	intent->pkt_size = 0;
+	intent->bounce_buf = NULL;
+	intent->pkt_priv = NULL;
+
+	spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	list_add_tail(&intent->list, &ctx->local_rx_intent_list);
+	spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	GLINK_DBG_CH(ctx, "%s: L[%u]:%zu Pushed intent\n", __func__,
+			intent->id,
+			intent->intent_size);
+	return intent;
+}
+
+/**
+ * ch_get_local_rx_intent() - Search for an rx_intent
+ * @ctx:	Local channel context
+ * @liid:	Local channel Intent ID
+ *
+ * This functions parses the local intent list for a specific channel
+ * and checks for the intent using the intent ID. If found, pointer to
+ * the intent is returned.
+ *
+ * Return: Pointer to the intent if intent is found else NULL
+ */
+struct glink_core_rx_intent *ch_get_local_rx_intent(struct channel_ctx *ctx,
+		uint32_t liid)
+{
+	struct glink_core_rx_intent *intent;
+	unsigned long flags;
+
+	if (ctx->transport_ptr->max_iid < liid) {
+		GLINK_ERR_CH(ctx, "%s: L[%u] Invalid ID.\n", __func__,
+				liid);
+		return NULL;
+	}
+
+	if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS)
+		return ch_get_dummy_rx_intent(ctx, liid);
+
+	spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	list_for_each_entry(intent, &ctx->local_rx_intent_list, list) {
+		if (liid == intent->id) {
+			spin_unlock_irqrestore(
+				&ctx->local_rx_intent_lst_lock_lhc1, flags);
+			return intent;
+		}
+	}
+	spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	GLINK_ERR_CH(ctx, "%s: L[%u] Intent not found.\n", __func__,
+			liid);
+	return NULL;
+}
+
+/**
+ * ch_set_local_rx_intent_notified() - Add a rx intent to local intent
+ *					notified list
+ * @ctx:	Local channel context
+ * @intent_ptr:	Pointer to the local intent
+ *
+ * This functions parses the local intent list for a specific channel
+ * and checks for the intent. If found, the function deletes the intent
+ * from local_rx_intent list and adds it to local_rx_intent_notified list.
+ */
+void ch_set_local_rx_intent_notified(struct channel_ctx *ctx,
+		struct glink_core_rx_intent *intent_ptr)
+{
+	struct glink_core_rx_intent *tmp_intent, *intent;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	list_for_each_entry_safe(intent, tmp_intent, &ctx->local_rx_intent_list,
+									list) {
+		if (intent == intent_ptr) {
+			list_del(&intent->list);
+			list_add_tail(&intent->list,
+				&ctx->local_rx_intent_ntfy_list);
+			GLINK_DBG_CH(ctx,
+				"%s: L[%u]:%zu Moved intent %s",
+				__func__,
+				intent_ptr->id,
+				intent_ptr->intent_size,
+				"from local to notify list\n");
+			spin_unlock_irqrestore(
+					&ctx->local_rx_intent_lst_lock_lhc1,
+					flags);
+			return;
+		}
+	}
+	spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	GLINK_ERR_CH(ctx, "%s: L[%u] Intent not found.\n", __func__,
+			intent_ptr->id);
+}
+
+/**
+ * ch_get_local_rx_intent_notified() - Find rx intent in local notified list
+ * @ctx:	Local channel context
+ * @ptr:	Pointer to the rx intent
+ *
+ * This functions parses the local intent notify list for a specific channel
+ * and checks for the intent.
+ *
+ * Return: Pointer to the intent if intent is found else NULL.
+ */
+struct glink_core_rx_intent *ch_get_local_rx_intent_notified(
+	struct channel_ctx *ctx, const void *ptr)
+{
+	struct glink_core_rx_intent *ptr_intent;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	list_for_each_entry(ptr_intent, &ctx->local_rx_intent_ntfy_list,
+								list) {
+		if (ptr_intent->data == ptr || ptr_intent->iovec == ptr ||
+		    ptr_intent->bounce_buf == ptr) {
+			spin_unlock_irqrestore(
+					&ctx->local_rx_intent_lst_lock_lhc1,
+					flags);
+			return ptr_intent;
+		}
+	}
+	spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	GLINK_ERR_CH(ctx, "%s: Local intent not found\n", __func__);
+	return NULL;
+}
+
+/**
+ * ch_remove_local_rx_intent_notified() - Remove a rx intent in local intent
+ *					notified list
+ * @ctx:	Local channel context
+ * @ptr:	Pointer to the rx intent
+ * @reuse:	Reuse the rx intent
+ *
+ * This functions parses the local intent notify list for a specific channel
+ * and checks for the intent. If found, the function deletes the intent
+ * from local_rx_intent_notified list and adds it to local_rx_intent_free list.
+ */
+void ch_remove_local_rx_intent_notified(struct channel_ctx *ctx,
+	struct glink_core_rx_intent *liid_ptr, bool reuse)
+{
+	struct glink_core_rx_intent *ptr_intent, *tmp_intent;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	list_for_each_entry_safe(ptr_intent, tmp_intent,
+				&ctx->local_rx_intent_ntfy_list, list) {
+		if (ptr_intent == liid_ptr) {
+			list_del(&ptr_intent->list);
+			GLINK_DBG_CH(ctx,
+				"%s: L[%u]:%zu Removed intent from notify list\n",
+				__func__,
+				ptr_intent->id,
+				ptr_intent->intent_size);
+			kfree(ptr_intent->bounce_buf);
+			ptr_intent->bounce_buf = NULL;
+			ptr_intent->write_offset = 0;
+			ptr_intent->pkt_size = 0;
+			if (reuse)
+				list_add_tail(&ptr_intent->list,
+					&ctx->local_rx_intent_list);
+			else
+				list_add_tail(&ptr_intent->list,
+					&ctx->local_rx_intent_free_list);
+			spin_unlock_irqrestore(
+					&ctx->local_rx_intent_lst_lock_lhc1,
+					flags);
+			return;
+		}
+	}
+	spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	GLINK_ERR_CH(ctx, "%s: L[%u] Intent not found.\n", __func__,
+			liid_ptr->id);
+}
+
+/**
+ * ch_get_free_local_rx_intent() - Return a rx intent in local intent
+ *					free list
+ * @ctx:	Local channel context
+ *
+ * This functions parses the local_rx_intent_free list for a specific channel
+ * and checks for the free unused intent. If found, the function returns
+ * the free intent pointer else NULL pointer.
+ */
+struct glink_core_rx_intent *ch_get_free_local_rx_intent(
+	struct channel_ctx *ctx)
+{
+	struct glink_core_rx_intent *ptr_intent = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	if (!list_empty(&ctx->local_rx_intent_free_list)) {
+		ptr_intent = list_first_entry(&ctx->local_rx_intent_free_list,
+				struct glink_core_rx_intent,
+				list);
+		list_del(&ptr_intent->list);
+	}
+	spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	return ptr_intent;
+}
+
+/**
+ * ch_purge_intent_lists() - Remove all intents for a channel
+ *
+ * @ctx:	Local channel context
+ *
+ * This functions parses the local intent lists for a specific channel and
+ * removes and frees all intents.
+ */
+void ch_purge_intent_lists(struct channel_ctx *ctx)
+{
+	struct glink_core_rx_intent *ptr_intent, *tmp_intent;
+	struct glink_core_tx_pkt *tx_info, *tx_info_temp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags);
+	list_for_each_entry_safe(tx_info, tx_info_temp, &ctx->tx_active,
+			list_node) {
+		ctx->notify_tx_abort(ctx, ctx->user_priv,
+				tx_info->pkt_priv);
+		rwref_put(&tx_info->pkt_ref);
+	}
+	spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
+
+	spin_lock_irqsave(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
+	list_for_each_entry_safe(tx_info, tx_info_temp,
+				 &ctx->tx_pending_remote_done, list_done) {
+		ctx->notify_tx_abort(ctx, ctx->user_priv, tx_info->pkt_priv);
+		rwref_put(&tx_info->pkt_ref);
+	}
+	spin_unlock_irqrestore(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
+
+	spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	list_for_each_entry_safe(ptr_intent, tmp_intent,
+				&ctx->local_rx_intent_list, list) {
+		ctx->notify_rx_abort(ctx, ctx->user_priv,
+				ptr_intent->pkt_priv);
+		ctx->transport_ptr->ops->deallocate_rx_intent(
+					ctx->transport_ptr->ops, ptr_intent);
+		list_del(&ptr_intent->list);
+		kfree(ptr_intent);
+	}
+
+	if (!list_empty(&ctx->local_rx_intent_ntfy_list))
+		/*
+		 * The client is still processing an rx_notify() call and has
+		 * not yet called glink_rx_done() to return the pointer to us.
+		 * glink_rx_done() will do the appropriate cleanup when this
+		 * call occurs, but log a message here just for internal state
+		 * tracking.
+		 */
+		GLINK_INFO_CH(ctx, "%s: waiting on glink_rx_done()\n",
+				__func__);
+
+	list_for_each_entry_safe(ptr_intent, tmp_intent,
+				&ctx->local_rx_intent_free_list, list) {
+		list_del(&ptr_intent->list);
+		kfree(ptr_intent);
+	}
+	ctx->max_used_liid = 0;
+	spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+
+	spin_lock_irqsave(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+	list_for_each_entry_safe(ptr_intent, tmp_intent,
+			&ctx->rmt_rx_intent_list, list) {
+		list_del(&ptr_intent->list);
+		kfree(ptr_intent);
+	}
+	spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+}
+
+/**
+ * ch_get_tx_pending_remote_done() - Lookup for a packet that is waiting for
+ *                                   the remote-done notification.
+ * @ctx:	Pointer to the channel context
+ * @riid:	riid of transmit packet
+ *
+ * This function adds a packet to the tx_pending_remote_done list.
+ *
+ * The tx_lists_lock_lhc3 lock needs to be held while calling this function.
+ *
+ * Return: Pointer to the tx packet
+ */
+struct glink_core_tx_pkt *ch_get_tx_pending_remote_done(
+	struct channel_ctx *ctx, uint32_t riid)
+{
+	struct glink_core_tx_pkt *tx_pkt;
+	unsigned long flags;
+
+	if (!ctx) {
+		GLINK_ERR("%s: Invalid context pointer", __func__);
+		return NULL;
+	}
+
+	spin_lock_irqsave(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
+	list_for_each_entry(tx_pkt, &ctx->tx_pending_remote_done, list_done) {
+		if (tx_pkt->riid == riid) {
+			if (tx_pkt->size_remaining) {
+				GLINK_ERR_CH(ctx, "%s: R[%u] TX not complete",
+						__func__, riid);
+				tx_pkt = NULL;
+			}
+			spin_unlock_irqrestore(
+				&ctx->tx_pending_rmt_done_lock_lhc4, flags);
+			return tx_pkt;
+		}
+	}
+	spin_unlock_irqrestore(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
+
+	GLINK_ERR_CH(ctx, "%s: R[%u] Tx packet for intent not found.\n",
+			__func__, riid);
+	return NULL;
+}
+
+/**
+ * ch_remove_tx_pending_remote_done() - Removes a packet transmit context for a
+ *                     packet that is waiting for the remote-done notification
+ * @ctx:	Pointer to the channel context
+ * @tx_pkt:	Pointer to the transmit packet
+ *
+ * This function parses through tx_pending_remote_done and removes a
+ * packet that matches with the tx_pkt.
+ */
+void ch_remove_tx_pending_remote_done(struct channel_ctx *ctx,
+	struct glink_core_tx_pkt *tx_pkt)
+{
+	struct glink_core_tx_pkt *local_tx_pkt, *tmp_tx_pkt;
+	unsigned long flags;
+
+	if (!ctx || !tx_pkt) {
+		GLINK_ERR("%s: Invalid input", __func__);
+		return;
+	}
+
+	spin_lock_irqsave(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
+	list_for_each_entry_safe(local_tx_pkt, tmp_tx_pkt,
+			&ctx->tx_pending_remote_done, list_done) {
+		if (tx_pkt == local_tx_pkt) {
+			list_del_init(&tx_pkt->list_done);
+			GLINK_DBG_CH(ctx,
+				"%s: R[%u] Removed Tx packet for intent\n",
+				__func__,
+				tx_pkt->riid);
+			rwref_put(&tx_pkt->pkt_ref);
+			spin_unlock_irqrestore(
+				&ctx->tx_pending_rmt_done_lock_lhc4, flags);
+			return;
+		}
+	}
+	spin_unlock_irqrestore(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
+
+	GLINK_ERR_CH(ctx, "%s: R[%u] Tx packet for intent not found", __func__,
+			tx_pkt->riid);
+}
+
+/**
+ * glink_add_free_lcid_list() - adds the lcid of a to be deleted channel to
+ *				available lcid list
+ * @ctx:	Pointer to channel context.
+ */
+static void glink_add_free_lcid_list(struct channel_ctx *ctx)
+{
+	struct channel_lcid *free_lcid;
+	unsigned long flags;
+
+	free_lcid = kzalloc(sizeof(*free_lcid), GFP_KERNEL);
+	if (!free_lcid) {
+		GLINK_ERR(
+			"%s: allocation failed on xprt:edge [%s:%s] for lcid [%d]\n",
+			__func__, ctx->transport_ptr->name,
+			ctx->transport_ptr->edge, ctx->lcid);
+		return;
+	}
+	free_lcid->lcid = ctx->lcid;
+	spin_lock_irqsave(&ctx->transport_ptr->xprt_ctx_lock_lhb1, flags);
+	list_add_tail(&free_lcid->list_node,
+			&ctx->transport_ptr->free_lcid_list);
+	spin_unlock_irqrestore(&ctx->transport_ptr->xprt_ctx_lock_lhb1,
+					flags);
+}
+
+/**
+ * glink_ch_ctx_release - Free the channel context
+ * @ch_st_lock:	handle to the rwref_lock associated with the chanel
+ *
+ * This should only be called when the reference count associated with the
+ * channel goes to zero.
+ */
+static void glink_ch_ctx_release(struct rwref_lock *ch_st_lock)
+{
+	struct channel_ctx *ctx = container_of(ch_st_lock, struct channel_ctx,
+						ch_state_lhb2);
+	ctx->transport_ptr = NULL;
+	kfree(ctx);
+	GLINK_INFO("%s: freed the channel ctx in pid [%d]\n", __func__,
+			current->pid);
+	ctx = NULL;
+}
+
+/**
+ * ch_name_to_ch_ctx_create() - lookup a channel by name, create the channel if
+ *                              it is not found and get reference of context.
+ * @xprt_ctx:	Transport to search for a matching channel.
+ * @name:	Name of the desired channel.
+ *
+ * Return: The channel corresponding to @name, NULL if a matching channel was
+ *         not found AND a new channel could not be created.
+ */
+static struct channel_ctx *ch_name_to_ch_ctx_create(
+					struct glink_core_xprt_ctx *xprt_ctx,
+					const char *name)
+{
+	struct channel_ctx *entry;
+	struct channel_ctx *ctx;
+	struct channel_ctx *temp;
+	unsigned long flags;
+	struct channel_lcid *flcid;
+
+	ctx = kzalloc(sizeof(struct channel_ctx), GFP_KERNEL);
+	if (!ctx) {
+		GLINK_ERR_XPRT(xprt_ctx, "%s: Failed to allocated ctx, %s",
+			"checking if there is one existing\n",
+			__func__);
+		goto check_ctx;
+	}
+
+	ctx->local_open_state = GLINK_CHANNEL_CLOSED;
+	strlcpy(ctx->name, name, GLINK_NAME_SIZE);
+	rwref_lock_init(&ctx->ch_state_lhb2, glink_ch_ctx_release);
+	INIT_LIST_HEAD(&ctx->tx_ready_list_node);
+	init_completion(&ctx->int_req_ack_complete);
+	init_completion(&ctx->int_req_complete);
+	INIT_LIST_HEAD(&ctx->local_rx_intent_list);
+	INIT_LIST_HEAD(&ctx->local_rx_intent_ntfy_list);
+	INIT_LIST_HEAD(&ctx->local_rx_intent_free_list);
+	spin_lock_init(&ctx->local_rx_intent_lst_lock_lhc1);
+	INIT_LIST_HEAD(&ctx->rmt_rx_intent_list);
+	spin_lock_init(&ctx->rmt_rx_intent_lst_lock_lhc2);
+	INIT_LIST_HEAD(&ctx->tx_active);
+	spin_lock_init(&ctx->tx_pending_rmt_done_lock_lhc4);
+	INIT_LIST_HEAD(&ctx->tx_pending_remote_done);
+	spin_lock_init(&ctx->tx_lists_lock_lhc3);
+
+check_ctx:
+	rwref_write_get(&xprt_ctx->xprt_state_lhb0);
+	if (xprt_ctx->local_state != GLINK_XPRT_OPENED) {
+		kfree(ctx);
+		rwref_write_put(&xprt_ctx->xprt_state_lhb0);
+		return NULL;
+	}
+	spin_lock_irqsave(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+	list_for_each_entry_safe(entry, temp, &xprt_ctx->channels,
+		    port_list_node)
+		if (!strcmp(entry->name, name) && !entry->pending_delete) {
+			spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1,
+					flags);
+			kfree(ctx);
+			rwref_get(&entry->ch_state_lhb2);
+			rwref_write_put(&xprt_ctx->xprt_state_lhb0);
+			return entry;
+		}
+
+	if (ctx) {
+		if (list_empty(&xprt_ctx->free_lcid_list)) {
+			if (xprt_ctx->next_lcid > xprt_ctx->max_cid) {
+				/* no more channels available */
+				GLINK_ERR_XPRT(xprt_ctx,
+					"%s: unable to exceed %u channels\n",
+					__func__, xprt_ctx->max_cid);
+				spin_unlock_irqrestore(
+						&xprt_ctx->xprt_ctx_lock_lhb1,
+						flags);
+				kfree(ctx);
+				rwref_write_put(&xprt_ctx->xprt_state_lhb0);
+				return NULL;
+			} else {
+				ctx->lcid = xprt_ctx->next_lcid++;
+			}
+		} else {
+			flcid = list_first_entry(&xprt_ctx->free_lcid_list,
+						struct channel_lcid, list_node);
+			ctx->lcid = flcid->lcid;
+			list_del(&flcid->list_node);
+			kfree(flcid);
+		}
+
+		ctx->transport_ptr = xprt_ctx;
+		rwref_get(&ctx->ch_state_lhb2);
+		list_add_tail(&ctx->port_list_node, &xprt_ctx->channels);
+
+		GLINK_INFO_PERF_CH_XPRT(ctx, xprt_ctx,
+			"%s: local:GLINK_CHANNEL_CLOSED\n",
+			__func__);
+	}
+	spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+	rwref_write_put(&xprt_ctx->xprt_state_lhb0);
+	mutex_lock(&xprt_ctx->xprt_dbgfs_lock_lhb4);
+	if (ctx != NULL)
+		glink_debugfs_add_channel(ctx, xprt_ctx);
+	mutex_unlock(&xprt_ctx->xprt_dbgfs_lock_lhb4);
+	return ctx;
+}
+
+/**
+ * ch_add_rcid() - add a remote channel identifier to an existing channel
+ * @xprt_ctx:	Transport the channel resides on.
+ * @ctx:	Channel receiving the identifier.
+ * @rcid:	The remote channel identifier.
+ */
+static void ch_add_rcid(struct glink_core_xprt_ctx *xprt_ctx,
+			struct channel_ctx *ctx,
+			uint32_t rcid)
+{
+	ctx->rcid = rcid;
+}
+
+/**
+ * ch_update_local_state() - Update the local channel state
+ * @ctx:	Pointer to channel context.
+ * @lstate:	Local channel state.
+ *
+ * Return: True if the channel is fully closed as a result of this update,
+ *	false otherwise.
+ */
+static bool ch_update_local_state(struct channel_ctx *ctx,
+					enum local_channel_state_e lstate)
+{
+	bool is_fully_closed;
+
+	rwref_write_get(&ctx->ch_state_lhb2);
+	ctx->local_open_state = lstate;
+	is_fully_closed = ch_is_fully_closed(ctx);
+	rwref_write_put(&ctx->ch_state_lhb2);
+
+	return is_fully_closed;
+}
+
+/**
+ * ch_update_local_state() - Update the local channel state
+ * @ctx:	Pointer to channel context.
+ * @rstate:	Remote Channel state.
+ *
+ * Return: True if the channel is fully closed as result of this update,
+ *	false otherwise.
+ */
+static bool ch_update_rmt_state(struct channel_ctx *ctx, bool rstate)
+{
+	bool is_fully_closed;
+
+	rwref_write_get(&ctx->ch_state_lhb2);
+	ctx->remote_opened = rstate;
+	is_fully_closed = ch_is_fully_closed(ctx);
+	rwref_write_put(&ctx->ch_state_lhb2);
+
+	return is_fully_closed;
+}
+
+/*
+ * ch_is_fully_opened() - Verify if a channel is open
+ * ctx:	Pointer to channel context
+ *
+ * Return: True if open, else flase
+ */
+static bool ch_is_fully_opened(struct channel_ctx *ctx)
+{
+	if (ctx->remote_opened && ctx->local_open_state == GLINK_CHANNEL_OPENED)
+		return true;
+
+	return false;
+}
+
+/*
+ * ch_is_fully_closed() - Verify if a channel is closed on both sides
+ * @ctx: Pointer to channel context
+ * @returns: True if open, else flase
+ */
+static bool ch_is_fully_closed(struct channel_ctx *ctx)
+{
+	if (!ctx->remote_opened &&
+			ctx->local_open_state == GLINK_CHANNEL_CLOSED)
+		return true;
+
+	return false;
+}
+
+/**
+ * find_open_transport() - find a specific open transport
+ * @edge:		Edge the transport is on.
+ * @name:		Name of the transport (or NULL if no preference)
+ * @initial_xprt:	The specified transport is the start for migration
+ * @best_id:		The best transport found for this connection
+ *
+ * Find an open transport corresponding to the specified @name and @edge.  @edge
+ * is expected to be valid.  @name is expected to be NULL (unspecified) or
+ * valid.  If @name is not specified, then the best transport found on the
+ * specified edge will be returned.
+ *
+ * Return: Transport with the specified name on the specified edge, if open.
+ *	NULL if the transport exists, but is not fully open.  ENODEV if no such
+ *	transport exists.
+ */
+static struct glink_core_xprt_ctx *find_open_transport(const char *edge,
+						       const char *name,
+						       bool initial_xprt,
+						       uint16_t *best_id)
+{
+	struct glink_core_xprt_ctx *xprt;
+	struct glink_core_xprt_ctx *best_xprt = NULL;
+	struct glink_core_xprt_ctx *ret;
+	bool first = true;
+
+	ret = (struct glink_core_xprt_ctx *)ERR_PTR(-ENODEV);
+	*best_id = USHRT_MAX;
+
+	mutex_lock(&transport_list_lock_lha0);
+	list_for_each_entry(xprt, &transport_list, list_node) {
+		if (strcmp(edge, xprt->edge))
+			continue;
+		if (first) {
+			first = false;
+			ret = NULL;
+		}
+		if (!xprt_is_fully_opened(xprt))
+			continue;
+
+		if (xprt->id < *best_id) {
+			*best_id = xprt->id;
+			best_xprt = xprt;
+		}
+
+		/*
+		 * Braces are required in this instacne because the else will
+		 * attach to the wrong if otherwise.
+		 */
+		if (name) {
+			if (!strcmp(name, xprt->name))
+				ret = xprt;
+		} else {
+			ret = best_xprt;
+		}
+	}
+
+	mutex_unlock(&transport_list_lock_lha0);
+
+	if (IS_ERR_OR_NULL(ret))
+		return ret;
+	if (!initial_xprt)
+		*best_id = ret->id;
+
+	return ret;
+}
+
+/**
+ * xprt_is_fully_opened() - check the open status of a transport
+ * @xprt:	Transport being checked.
+ *
+ * Return: True if the transport is fully opened, false otherwise.
+ */
+static bool xprt_is_fully_opened(struct glink_core_xprt_ctx *xprt)
+{
+	if (xprt->remote_neg_completed &&
+					xprt->local_state == GLINK_XPRT_OPENED)
+		return true;
+
+	return false;
+}
+
+/**
+ * glink_dummy_notify_rx_intent_req() - Dummy RX Request
+ *
+ * @handle:	Channel handle (ignored)
+ * @priv:	Private data pointer (ignored)
+ * @req_size:	Requested size (ignored)
+ *
+ * Dummy RX intent request if client does not implement the optional callback
+ * function.
+ *
+ * Return:  False
+ */
+static bool glink_dummy_notify_rx_intent_req(void *handle, const void *priv,
+	size_t req_size)
+{
+	return false;
+}
+
+/**
+ * glink_dummy_notify_rx_sigs() - Dummy signal callback
+ *
+ * @handle:	Channel handle (ignored)
+ * @priv:	Private data pointer (ignored)
+ * @req_size:	Requested size (ignored)
+ *
+ * Dummy signal callback if client does not implement the optional callback
+ * function.
+ *
+ * Return:  False
+ */
+static void glink_dummy_notify_rx_sigs(void *handle, const void *priv,
+				uint32_t old_sigs, uint32_t new_sigs)
+{
+	/* intentionally left blank */
+}
+
+/**
+ * glink_dummy_rx_abort() - Dummy rx abort callback
+ *
+ * handle:	Channel handle (ignored)
+ * priv:	Private data pointer (ignored)
+ * pkt_priv:	Private intent data pointer (ignored)
+ *
+ * Dummy rx abort callback if client does not implement the optional callback
+ * function.
+ */
+static void glink_dummy_notify_rx_abort(void *handle, const void *priv,
+				const void *pkt_priv)
+{
+	/* intentionally left blank */
+}
+
+/**
+ * glink_dummy_tx_abort() - Dummy tx abort callback
+ *
+ * @handle:	Channel handle (ignored)
+ * @priv:	Private data pointer (ignored)
+ * @pkt_priv:	Private intent data pointer (ignored)
+ *
+ * Dummy tx abort callback if client does not implement the optional callback
+ * function.
+ */
+static void glink_dummy_notify_tx_abort(void *handle, const void *priv,
+				const void *pkt_priv)
+{
+	/* intentionally left blank */
+}
+
+/**
+ * dummy_poll() - a dummy poll() for transports that don't define one
+ * @if_ptr:	The transport interface handle for this transport.
+ * @lcid:	The channel to poll.
+ *
+ * Return: An error to indicate that this operation is unsupported.
+ */
+static int dummy_poll(struct glink_transport_if *if_ptr, uint32_t lcid)
+{
+	return -EOPNOTSUPP;
+}
+
+/**
+ * dummy_reuse_rx_intent() - a dummy reuse_rx_intent() for transports that
+ *			     don't define one
+ * @if_ptr:	The transport interface handle for this transport.
+ * @intent:	The intent to reuse.
+ *
+ * Return: Success.
+ */
+static int dummy_reuse_rx_intent(struct glink_transport_if *if_ptr,
+				 struct glink_core_rx_intent *intent)
+{
+	return 0;
+}
+
+/**
+ * dummy_mask_rx_irq() - a dummy mask_rx_irq() for transports that don't define
+ *			 one
+ * @if_ptr:	The transport interface handle for this transport.
+ * @lcid:	The local channel id for this channel.
+ * @mask:	True to mask the irq, false to unmask.
+ * @pstruct:	Platform defined structure with data necessary for masking.
+ *
+ * Return: An error to indicate that this operation is unsupported.
+ */
+static int dummy_mask_rx_irq(struct glink_transport_if *if_ptr, uint32_t lcid,
+			     bool mask, void *pstruct)
+{
+	return -EOPNOTSUPP;
+}
+
+/**
+ * dummy_wait_link_down() - a dummy wait_link_down() for transports that don't
+ *			define one
+ * @if_ptr:	The transport interface handle for this transport.
+ *
+ * Return: An error to indicate that this operation is unsupported.
+ */
+static int dummy_wait_link_down(struct glink_transport_if *if_ptr)
+{
+	return -EOPNOTSUPP;
+}
+
+/**
+ * dummy_allocate_rx_intent() - a dummy RX intent allocation function that does
+ *				not allocate anything
+ * @if_ptr:	The transport the intent is associated with.
+ * @size:	Size of intent.
+ * @intent:	Pointer to the intent structure.
+ *
+ * Return:	Success.
+ */
+static int dummy_allocate_rx_intent(struct glink_transport_if *if_ptr,
+			size_t size, struct glink_core_rx_intent *intent)
+{
+	return 0;
+}
+
+/**
+ * dummy_tx_cmd_tracer_pkt() - a dummy tracer packet tx cmd for transports
+ *                             that don't define one
+ * @if_ptr:	The transport interface handle for this transport.
+ * @lcid:	The channel in which the tracer packet is transmitted.
+ * @pctx:	Context of the packet to be transmitted.
+ *
+ * Return: 0.
+ */
+static int dummy_tx_cmd_tracer_pkt(struct glink_transport_if *if_ptr,
+		uint32_t lcid, struct glink_core_tx_pkt *pctx)
+{
+	pctx->size_remaining = 0;
+	return 0;
+}
+
+/**
+ * dummy_deallocate_rx_intent() - a dummy rx intent deallocation function that
+ *				does not deallocate anything
+ * @if_ptr:	The transport the intent is associated with.
+ * @intent:	Pointer to the intent structure.
+ *
+ * Return:	Success.
+ */
+static int dummy_deallocate_rx_intent(struct glink_transport_if *if_ptr,
+				struct glink_core_rx_intent *intent)
+{
+	return 0;
+}
+
+/**
+ * dummy_tx_cmd_local_rx_intent() - dummy local rx intent request
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @size:	The intent size to encode.
+ * @liid:	The local intent id to encode.
+ *
+ * Return:	Success.
+ */
+static int dummy_tx_cmd_local_rx_intent(struct glink_transport_if *if_ptr,
+				uint32_t lcid, size_t size, uint32_t liid)
+{
+	return 0;
+}
+
+/**
+ * dummy_tx_cmd_local_rx_done() - dummy rx done command
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @liid:	The local intent id to encode.
+ * @reuse:	Reuse the consumed intent.
+ */
+static void dummy_tx_cmd_local_rx_done(struct glink_transport_if *if_ptr,
+				uint32_t lcid, uint32_t liid, bool reuse)
+{
+	/* intentionally left blank */
+}
+
+/**
+ * dummy_tx() - dummy tx() that does not send anything
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @pctx:	The data to encode.
+ *
+ * Return: Number of bytes written i.e. zero.
+ */
+static int dummy_tx(struct glink_transport_if *if_ptr, uint32_t lcid,
+				struct glink_core_tx_pkt *pctx)
+{
+	return 0;
+}
+
+/**
+ * dummy_tx_cmd_rx_intent_req() - dummy rx intent request functon
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @size:	The requested intent size to encode.
+ *
+ * Return:	Success.
+ */
+static int dummy_tx_cmd_rx_intent_req(struct glink_transport_if *if_ptr,
+				uint32_t lcid, size_t size)
+{
+	return 0;
+}
+
+/**
+ * dummy_tx_cmd_rx_intent_req_ack() - dummy rx intent request ack
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @granted:	The request response to encode.
+ *
+ * Return:	Success.
+ */
+static int dummy_tx_cmd_remote_rx_intent_req_ack(
+					struct glink_transport_if *if_ptr,
+					uint32_t lcid, bool granted)
+{
+	return 0;
+}
+
+/**
+ * dummy_tx_cmd_set_sigs() - dummy signals ack transmit function
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @sigs:	The signals to encode.
+ *
+ * Return:	Success.
+ */
+static int dummy_tx_cmd_set_sigs(struct glink_transport_if *if_ptr,
+				uint32_t lcid, uint32_t sigs)
+{
+	return 0;
+}
+
+/**
+ * dummy_tx_cmd_ch_close() - dummy channel close transmit function
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ *
+ * Return:	Success.
+ */
+static int dummy_tx_cmd_ch_close(struct glink_transport_if *if_ptr,
+				uint32_t lcid)
+{
+	return 0;
+}
+
+/**
+ * dummy_tx_cmd_ch_remote_close_ack() - dummy channel close ack sending function
+ * @if_ptr:	The transport to transmit on.
+ * @rcid:	The remote channel id to encode.
+ */
+static void dummy_tx_cmd_ch_remote_close_ack(struct glink_transport_if *if_ptr,
+				       uint32_t rcid)
+{
+	/* intentionally left blank */
+}
+
+/**
+ * dummy_tx_cmd_ch_open() - dummy channel open cmd sending function
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @name:	The channel name to encode.
+ * @req_xprt:	The transport the core would like to migrate this channel to.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int dummy_tx_cmd_ch_open(struct glink_transport_if *if_ptr,
+			uint32_t lcid, const char *name,
+			uint16_t req_xprt)
+{
+	return -EOPNOTSUPP;
+}
+
+/**
+ * dummy_tx_cmd_ch_remote_open_ack() - convert a channel open ack cmd to wire
+ *				format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @rcid:	The remote channel id to encode.
+ * @xprt_resp:	The response to a transport migration request.
+ */
+static void dummy_tx_cmd_ch_remote_open_ack(struct glink_transport_if *if_ptr,
+					uint32_t rcid, uint16_t xprt_resp)
+{
+	/* intentionally left blank */
+}
+
+/**
+ * dummy_get_power_vote_ramp_time() - Dummy Power vote ramp time
+ * @if_ptr:	The transport to transmit on.
+ * @state:	The power state being requested from the transport.
+ */
+static unsigned long dummy_get_power_vote_ramp_time(
+		struct glink_transport_if *if_ptr, uint32_t state)
+{
+	return (unsigned long)-EOPNOTSUPP;
+}
+
+/**
+ * dummy_power_vote() - Dummy Power vote operation
+ * @if_ptr:	The transport to transmit on.
+ * @state:	The power state being requested from the transport.
+ */
+static int dummy_power_vote(struct glink_transport_if *if_ptr,
+			    uint32_t state)
+{
+	return -EOPNOTSUPP;
+}
+
+/**
+ * dummy_power_unvote() - Dummy Power unvote operation
+ * @if_ptr:	The transport to transmit on.
+ */
+static int dummy_power_unvote(struct glink_transport_if *if_ptr)
+{
+	return -EOPNOTSUPP;
+}
+
+/**
+ * dummy_rx_rt_vote() - Dummy RX Realtime thread vote
+ * @if_ptr:	The transport to transmit on.
+
+ */
+static int dummy_rx_rt_vote(struct glink_transport_if *if_ptr)
+{
+	return -EOPNOTSUPP;
+}
+
+/**
+ * dummy_rx_rt_unvote() - Dummy RX Realtime thread unvote
+ * @if_ptr:	The transport to transmit on.
+ */
+static int dummy_rx_rt_unvote(struct glink_transport_if *if_ptr)
+{
+	return -EOPNOTSUPP;
+}
+
+/**
+ * notif_if_up_all_xprts() - Check and notify existing transport state if up
+ * @notif_info:	Data structure containing transport information to be notified.
+ *
+ * This function is called when the client registers a notifier to know about
+ * the state of a transport. This function matches the existing transports with
+ * the transport in the "notif_info" parameter. When a matching transport is
+ * found, the callback function in the "notif_info" parameter is called with
+ * the state of the matching transport.
+ *
+ * If an edge or transport is not defined, then all edges and/or transports
+ * will be matched and will receive up notifications.
+ */
+static void notif_if_up_all_xprts(
+		struct link_state_notifier_info *notif_info)
+{
+	struct glink_core_xprt_ctx *xprt_ptr;
+	struct glink_link_state_cb_info cb_info;
+
+	cb_info.link_state = GLINK_LINK_STATE_UP;
+	mutex_lock(&transport_list_lock_lha0);
+	list_for_each_entry(xprt_ptr, &transport_list, list_node) {
+		if (strlen(notif_info->edge) &&
+		    strcmp(notif_info->edge, xprt_ptr->edge))
+			continue;
+
+		if (strlen(notif_info->transport) &&
+		    strcmp(notif_info->transport, xprt_ptr->name))
+			continue;
+
+		if (!xprt_is_fully_opened(xprt_ptr))
+			continue;
+
+		cb_info.transport = xprt_ptr->name;
+		cb_info.edge = xprt_ptr->edge;
+		notif_info->glink_link_state_notif_cb(&cb_info,
+						notif_info->priv);
+	}
+	mutex_unlock(&transport_list_lock_lha0);
+}
+
+/**
+ * check_link_notifier_and_notify() - Check and notify clients about link state
+ * @xprt_ptr:	Transport whose state to be notified.
+ * @link_state:	State of the transport to be notified.
+ *
+ * This function is called when the state of the transport changes. This
+ * function matches the transport with the clients that have registered to
+ * be notified about the state changes. When a matching client notifier is
+ * found, the callback function in the client notifier is called with the
+ * new state of the transport.
+ */
+static void check_link_notifier_and_notify(struct glink_core_xprt_ctx *xprt_ptr,
+					   enum glink_link_state link_state)
+{
+	struct link_state_notifier_info *notif_info;
+	struct glink_link_state_cb_info cb_info;
+
+	cb_info.link_state = link_state;
+	mutex_lock(&link_state_notifier_lock_lha1);
+	list_for_each_entry(notif_info, &link_state_notifier_list, list) {
+		if (strlen(notif_info->edge) &&
+		    strcmp(notif_info->edge, xprt_ptr->edge))
+			continue;
+
+		if (strlen(notif_info->transport) &&
+		    strcmp(notif_info->transport, xprt_ptr->name))
+			continue;
+
+		cb_info.transport = xprt_ptr->name;
+		cb_info.edge = xprt_ptr->edge;
+		notif_info->glink_link_state_notif_cb(&cb_info,
+						notif_info->priv);
+	}
+	mutex_unlock(&link_state_notifier_lock_lha1);
+}
+
+/**
+ * Open GLINK channel.
+ *
+ * @cfg_ptr:	Open configuration structure (the structure is copied before
+ *		glink_open returns).  All unused fields should be zero-filled.
+ *
+ * This should not be called from link state callback context by clients.
+ * It is recommended that client should invoke this function from their own
+ * thread.
+ *
+ * Return:  Pointer to channel on success, PTR_ERR() with standard Linux
+ * error code on failure.
+ */
+void *glink_open(const struct glink_open_config *cfg)
+{
+	struct channel_ctx *ctx = NULL;
+	struct glink_core_xprt_ctx *transport_ptr;
+	size_t len;
+	int ret;
+	uint16_t best_id;
+
+	if (!cfg->edge || !cfg->name) {
+		GLINK_ERR("%s: !cfg->edge || !cfg->name\n", __func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	len = strlen(cfg->edge);
+	if (len == 0 || len >= GLINK_NAME_SIZE) {
+		GLINK_ERR("%s: [EDGE] len == 0 || len >= GLINK_NAME_SIZE\n",
+				__func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	len = strlen(cfg->name);
+	if (len == 0 || len >= GLINK_NAME_SIZE) {
+		GLINK_ERR("%s: [NAME] len == 0 || len >= GLINK_NAME_SIZE\n",
+				__func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (cfg->transport) {
+		len = strlen(cfg->transport);
+		if (len == 0 || len >= GLINK_NAME_SIZE) {
+			GLINK_ERR("%s: [TRANSPORT] len == 0 || %s\n",
+				__func__,
+				"len >= GLINK_NAME_SIZE");
+			return ERR_PTR(-EINVAL);
+		}
+	}
+
+	/* confirm required notification parameters */
+	if (!(cfg->notify_rx || cfg->notify_rxv) || !cfg->notify_tx_done
+		|| !cfg->notify_state
+		|| ((cfg->options & GLINK_OPT_RX_INTENT_NOTIF)
+			&& !cfg->notify_remote_rx_intent)) {
+		GLINK_ERR("%s: Incorrect notification parameters\n", __func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* find transport */
+	transport_ptr = find_open_transport(cfg->edge, cfg->transport,
+					cfg->options & GLINK_OPT_INITIAL_XPORT,
+					&best_id);
+	if (IS_ERR_OR_NULL(transport_ptr)) {
+		GLINK_ERR("%s:%s %s: Error %d - unable to find transport\n",
+				cfg->transport, cfg->edge, __func__,
+				(unsigned)PTR_ERR(transport_ptr));
+		return ERR_PTR(-ENODEV);
+	}
+
+	/*
+	 * look for an existing port structure which can occur in
+	 * reopen and remote-open-first cases
+	 */
+	ctx = ch_name_to_ch_ctx_create(transport_ptr, cfg->name);
+	if (ctx == NULL) {
+		GLINK_ERR("%s:%s %s: Error - unable to allocate new channel\n",
+				cfg->transport, cfg->edge, __func__);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* port already exists */
+	if (ctx->local_open_state != GLINK_CHANNEL_CLOSED) {
+		/* not ready to be re-opened */
+		GLINK_INFO_CH_XPRT(ctx, transport_ptr,
+		"%s: Channel not ready to be re-opened. State: %u\n",
+		__func__, ctx->local_open_state);
+		rwref_put(&ctx->ch_state_lhb2);
+		return ERR_PTR(-EBUSY);
+	}
+
+	/* initialize port structure */
+	ctx->user_priv = cfg->priv;
+	ctx->rx_intent_req_timeout_jiffies =
+		msecs_to_jiffies(cfg->rx_intent_req_timeout_ms);
+	ctx->notify_rx = cfg->notify_rx;
+	ctx->notify_tx_done = cfg->notify_tx_done;
+	ctx->notify_state = cfg->notify_state;
+	ctx->notify_rx_intent_req = cfg->notify_rx_intent_req;
+	ctx->notify_rxv = cfg->notify_rxv;
+	ctx->notify_rx_sigs = cfg->notify_rx_sigs;
+	ctx->notify_rx_abort = cfg->notify_rx_abort;
+	ctx->notify_tx_abort = cfg->notify_tx_abort;
+	ctx->notify_rx_tracer_pkt = cfg->notify_rx_tracer_pkt;
+	ctx->notify_remote_rx_intent = cfg->notify_remote_rx_intent;
+
+	if (!ctx->notify_rx_intent_req)
+		ctx->notify_rx_intent_req = glink_dummy_notify_rx_intent_req;
+	if (!ctx->notify_rx_sigs)
+		ctx->notify_rx_sigs = glink_dummy_notify_rx_sigs;
+	if (!ctx->notify_rx_abort)
+		ctx->notify_rx_abort = glink_dummy_notify_rx_abort;
+	if (!ctx->notify_tx_abort)
+		ctx->notify_tx_abort = glink_dummy_notify_tx_abort;
+
+	if (!ctx->rx_intent_req_timeout_jiffies)
+		ctx->rx_intent_req_timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
+
+	ctx->local_xprt_req = best_id;
+	ctx->no_migrate = cfg->transport &&
+				!(cfg->options & GLINK_OPT_INITIAL_XPORT);
+	ctx->local_open_state = GLINK_CHANNEL_OPENING;
+	GLINK_INFO_PERF_CH(ctx,
+		"%s: local:GLINK_CHANNEL_CLOSED->GLINK_CHANNEL_OPENING\n",
+		__func__);
+
+	/* start local-open sequence */
+	ret = ctx->transport_ptr->ops->tx_cmd_ch_open(ctx->transport_ptr->ops,
+		ctx->lcid, cfg->name, best_id);
+	if (ret) {
+		/* failure to send open command (transport failure) */
+		ctx->local_open_state = GLINK_CHANNEL_CLOSED;
+		GLINK_ERR_CH(ctx, "%s: Unable to send open command %d\n",
+			__func__, ret);
+		rwref_put(&ctx->ch_state_lhb2);
+		return ERR_PTR(ret);
+	}
+
+	GLINK_INFO_CH(ctx, "%s: Created channel, sent OPEN command. ctx %p\n",
+			__func__, ctx);
+	rwref_put(&ctx->ch_state_lhb2);
+	return ctx;
+}
+EXPORT_SYMBOL(glink_open);
+
+/**
+ * glink_get_channel_id_for_handle() - Get logical channel ID
+ *
+ * @handle:	handle of channel
+ *
+ * Used internally by G-Link debugfs.
+ *
+ * Return:  Logical Channel ID or standard Linux error code
+ */
+int glink_get_channel_id_for_handle(void *handle)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	if (ctx == NULL)
+		return -EINVAL;
+
+	return ctx->lcid;
+}
+EXPORT_SYMBOL(glink_get_channel_id_for_handle);
+
+/**
+ * glink_get_channel_name_for_handle() - return channel name
+ *
+ * @handle:	handle of channel
+ *
+ * Used internally by G-Link debugfs.
+ *
+ * Return:  Channel name or NULL
+ */
+char *glink_get_channel_name_for_handle(void *handle)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	if (ctx == NULL)
+		return NULL;
+
+	return ctx->name;
+}
+EXPORT_SYMBOL(glink_get_channel_name_for_handle);
+
+/**
+ * glink_delete_ch_from_list() -  delete the channel from the list
+ * @ctx:	Pointer to channel context.
+ * @add_flcid:	Boolean value to decide whether the lcid should be added or not.
+ *
+ * This function deletes the channel from the list along with the debugfs
+ * information associated with it. It also adds the channel lcid to the free
+ * lcid list except if the channel is deleted in case of ssr/unregister case.
+ * It can only called when channel is fully closed.
+ *
+ * Return: true when transport_ptr->channels is empty.
+ */
+static bool glink_delete_ch_from_list(struct channel_ctx *ctx, bool add_flcid)
+{
+	unsigned long flags;
+	bool ret = false;
+
+	spin_lock_irqsave(&ctx->transport_ptr->xprt_ctx_lock_lhb1,
+				flags);
+	if (!list_empty(&ctx->port_list_node))
+		list_del_init(&ctx->port_list_node);
+	if (list_empty(&ctx->transport_ptr->channels) &&
+			list_empty(&ctx->transport_ptr->notified))
+		ret = true;
+	spin_unlock_irqrestore(
+			&ctx->transport_ptr->xprt_ctx_lock_lhb1,
+			flags);
+	if (add_flcid)
+		glink_add_free_lcid_list(ctx);
+	mutex_lock(&ctx->transport_ptr->xprt_dbgfs_lock_lhb4);
+	glink_debugfs_remove_channel(ctx, ctx->transport_ptr);
+	mutex_unlock(&ctx->transport_ptr->xprt_dbgfs_lock_lhb4);
+	rwref_put(&ctx->ch_state_lhb2);
+	return ret;
+}
+
+/**
+ * glink_close() - Close a previously opened channel.
+ *
+ * @handle:	handle to close
+ *
+ * Once the closing process has been completed, the GLINK_LOCAL_DISCONNECTED
+ * state event will be sent and the channel can be reopened.
+ *
+ * Return:  0 on success; -EINVAL for invalid handle, -EBUSY is close is
+ * already in progress, standard Linux Error code otherwise.
+ */
+int glink_close(void *handle)
+{
+	struct glink_core_xprt_ctx *xprt_ctx = NULL;
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	int ret = 0;
+	unsigned long flags;
+	bool is_empty = false;
+
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
+
+	GLINK_INFO_CH(ctx, "%s: Closing channel, ctx: %p\n", __func__, ctx);
+	if (ctx->local_open_state == GLINK_CHANNEL_CLOSED) {
+		glink_put_ch_ctx(ctx);
+		return 0;
+	}
+
+	if (ctx->local_open_state == GLINK_CHANNEL_CLOSING) {
+		/* close already pending */
+		glink_put_ch_ctx(ctx);
+		return -EBUSY;
+	}
+
+	rwref_get(&ctx->ch_state_lhb2);
+relock: xprt_ctx = ctx->transport_ptr;
+	rwref_read_get(&xprt_ctx->xprt_state_lhb0);
+	rwref_write_get(&ctx->ch_state_lhb2);
+	if (xprt_ctx != ctx->transport_ptr) {
+		rwref_write_put(&ctx->ch_state_lhb2);
+		rwref_read_put(&xprt_ctx->xprt_state_lhb0);
+		goto relock;
+	}
+
+	/* Set the channel state before removing it from xprt's list(s) */
+	GLINK_INFO_PERF_CH(ctx,
+		"%s: local:%u->GLINK_CHANNEL_CLOSING\n",
+		__func__, ctx->local_open_state);
+	ctx->local_open_state = GLINK_CHANNEL_CLOSING;
+
+	ctx->pending_delete = true;
+	ctx->int_req_ack = false;
+
+	spin_lock_irqsave(&xprt_ctx->tx_ready_lock_lhb3, flags);
+	if (!list_empty(&ctx->tx_ready_list_node))
+		list_del_init(&ctx->tx_ready_list_node);
+	spin_unlock_irqrestore(&xprt_ctx->tx_ready_lock_lhb3, flags);
+
+	if (xprt_ctx->local_state != GLINK_XPRT_DOWN) {
+		glink_qos_reset_priority(ctx);
+		ret = xprt_ctx->ops->tx_cmd_ch_close(xprt_ctx->ops, ctx->lcid);
+		rwref_write_put(&ctx->ch_state_lhb2);
+	} else if (!strcmp(xprt_ctx->name, "dummy")) {
+		/*
+		 * This check will avoid any race condition when clients call
+		 * glink_close before the dummy xprt swapping happens in link
+		 * down scenario.
+		 */
+		ret = 0;
+		rwref_write_put(&ctx->ch_state_lhb2);
+		glink_core_ch_close_ack_common(ctx, false);
+		if (ch_is_fully_closed(ctx)) {
+			is_empty = glink_delete_ch_from_list(ctx, false);
+			rwref_put(&xprt_ctx->xprt_state_lhb0);
+			if (is_empty && !xprt_ctx->dummy_in_use)
+				/* For the xprt reference */
+				rwref_put(&xprt_ctx->xprt_state_lhb0);
+		} else {
+			GLINK_ERR_CH(ctx,
+			"channel Not closed yet local state [%d] remote_state [%d]\n",
+			ctx->local_open_state, ctx->remote_opened);
+		}
+	} else {
+		/*
+		 * This case handles the scenario where glink_core_link_down
+		 * changes the local_state to GLINK_XPRT_DOWN but glink_close
+		 * gets the channel write lock before glink_core_channel_cleanup
+		 */
+		rwref_write_put(&ctx->ch_state_lhb2);
+	}
+	complete_all(&ctx->int_req_ack_complete);
+	complete_all(&ctx->int_req_complete);
+
+	rwref_put(&ctx->ch_state_lhb2);
+	rwref_read_put(&xprt_ctx->xprt_state_lhb0);
+	glink_put_ch_ctx(ctx);
+	return ret;
+}
+EXPORT_SYMBOL(glink_close);
+
+/**
+ * glink_tx_pkt_release() - Release a packet's transmit information
+ * @tx_pkt_ref:	Packet information which needs to be released.
+ *
+ * This function is called when all the references to a packet information
+ * is dropped.
+ */
+static void glink_tx_pkt_release(struct rwref_lock *tx_pkt_ref)
+{
+	struct glink_core_tx_pkt *tx_info = container_of(tx_pkt_ref,
+						struct glink_core_tx_pkt,
+						pkt_ref);
+	if (!list_empty(&tx_info->list_done))
+		list_del_init(&tx_info->list_done);
+	if (!list_empty(&tx_info->list_node))
+		list_del_init(&tx_info->list_node);
+	kfree(tx_info);
+}
+
+/**
+ * glink_tx_common() - Common TX implementation
+ *
+ * @handle:	handle returned by glink_open()
+ * @pkt_priv:	opaque data value that will be returned to client with
+ *		notify_tx_done notification
+ * @data:	pointer to the data
+ * @size:	size of data
+ * @vbuf_provider: Virtual Address-space Buffer Provider for the tx buffer.
+ * @vbuf_provider: Physical Address-space Buffer Provider for the tx buffer.
+ * @tx_flags:	Flags to indicate transmit options
+ *
+ * Return:	-EINVAL for invalid handle; -EBUSY if channel isn't ready for
+ *		transmit operation (not fully opened); -EAGAIN if remote side
+ *		has not provided a receive intent that is big enough.
+ */
+static int glink_tx_common(void *handle, void *pkt_priv,
+	void *data, void *iovec, size_t size,
+	void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size),
+	void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size),
+	uint32_t tx_flags)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	uint32_t riid;
+	int ret = 0;
+	struct glink_core_tx_pkt *tx_info = NULL;
+	size_t intent_size;
+	bool is_atomic =
+		tx_flags & (GLINK_TX_SINGLE_THREADED | GLINK_TX_ATOMIC);
+	unsigned long flags;
+	void *cookie = NULL;
+
+	if (!size)
+		return -EINVAL;
+
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
+
+	rwref_read_get_atomic(&ctx->ch_state_lhb2, is_atomic);
+	tx_info = kzalloc(sizeof(struct glink_core_tx_pkt),
+				is_atomic ? GFP_ATOMIC : GFP_KERNEL);
+	if (!tx_info) {
+		GLINK_ERR_CH(ctx, "%s: No memory for allocation\n", __func__);
+		ret = -ENOMEM;
+		goto glink_tx_common_err;
+	}
+	if (!(vbuf_provider || pbuf_provider)) {
+		ret = -EINVAL;
+		goto glink_tx_common_err;
+	}
+
+	if (!ch_is_fully_opened(ctx)) {
+		ret = -EBUSY;
+		goto glink_tx_common_err;
+	}
+
+	if (size > GLINK_MAX_PKT_SIZE) {
+		ret = -EINVAL;
+		goto glink_tx_common_err;
+	}
+
+	if (unlikely(tx_flags & GLINK_TX_TRACER_PKT)) {
+		if (!(ctx->transport_ptr->capabilities & GCAP_TRACER_PKT)) {
+			ret = -EOPNOTSUPP;
+			goto glink_tx_common_err;
+		}
+		tracer_pkt_log_event(data, GLINK_CORE_TX);
+	}
+
+	/* find matching rx intent (first-fit algorithm for now) */
+	if (ch_pop_remote_rx_intent(ctx, size, &riid, &intent_size, &cookie)) {
+		if (!(tx_flags & GLINK_TX_REQ_INTENT)) {
+			/* no rx intent available */
+			GLINK_ERR_CH(ctx,
+				"%s: R[%u]:%zu Intent not present for lcid\n",
+				__func__, riid, size);
+			ret = -EAGAIN;
+			goto glink_tx_common_err;
+		}
+		if (is_atomic && !(ctx->transport_ptr->capabilities &
+					  GCAP_AUTO_QUEUE_RX_INT)) {
+			GLINK_ERR_CH(ctx,
+				"%s: Cannot request intent in atomic context\n",
+				__func__);
+			ret = -EINVAL;
+			goto glink_tx_common_err;
+		}
+
+		/* request intent of correct size */
+		reinit_completion(&ctx->int_req_ack_complete);
+		ret = ctx->transport_ptr->ops->tx_cmd_rx_intent_req(
+				ctx->transport_ptr->ops, ctx->lcid, size);
+		if (ret) {
+			GLINK_ERR_CH(ctx, "%s: Request intent failed %d\n",
+					__func__, ret);
+			goto glink_tx_common_err;
+		}
+
+		while (ch_pop_remote_rx_intent(ctx, size, &riid,
+						&intent_size, &cookie)) {
+			rwref_read_put(&ctx->ch_state_lhb2);
+			if (is_atomic) {
+				GLINK_ERR_CH(ctx,
+				    "%s Intent of size %zu not ready\n",
+				    __func__, size);
+				ret = -EAGAIN;
+				goto glink_tx_common_err_2;
+			}
+
+			if (ctx->transport_ptr->local_state == GLINK_XPRT_DOWN
+			    || !ch_is_fully_opened(ctx)) {
+				GLINK_ERR_CH(ctx,
+					"%s: Channel closed while waiting for intent\n",
+					__func__);
+				ret = -EBUSY;
+				goto glink_tx_common_err_2;
+			}
+
+			/* wait for the remote intent req ack */
+			if (!wait_for_completion_timeout(
+					&ctx->int_req_ack_complete,
+					ctx->rx_intent_req_timeout_jiffies)) {
+				GLINK_ERR(
+					"%s: Intent request ack with size: %zu not granted for lcid\n",
+					__func__, size);
+				ret = -ETIMEDOUT;
+				goto glink_tx_common_err_2;
+			}
+
+			if (!ctx->int_req_ack) {
+				GLINK_ERR_CH(ctx,
+				    "%s: Intent Request with size: %zu %s",
+				    __func__, size,
+				    "not granted for lcid\n");
+				ret = -EAGAIN;
+				goto glink_tx_common_err_2;
+			}
+
+			/* wait for the rx_intent from remote side */
+			if (!wait_for_completion_timeout(
+					&ctx->int_req_complete,
+					ctx->rx_intent_req_timeout_jiffies)) {
+				GLINK_ERR(
+					"%s: Intent request with size: %zu not granted for lcid\n",
+					__func__, size);
+				ret = -ETIMEDOUT;
+				goto glink_tx_common_err_2;
+			}
+
+			reinit_completion(&ctx->int_req_complete);
+			rwref_read_get(&ctx->ch_state_lhb2);
+		}
+	}
+
+	if (!is_atomic) {
+		spin_lock_irqsave(&ctx->transport_ptr->tx_ready_lock_lhb3,
+				  flags);
+		glink_pm_qos_vote(ctx->transport_ptr);
+		spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb3,
+					flags);
+	}
+
+	GLINK_INFO_PERF_CH(ctx, "%s: R[%u]:%zu data[%p], size[%zu]. TID %u\n",
+			__func__, riid, intent_size,
+			data ? data : iovec, size, current->pid);
+
+	rwref_lock_init(&tx_info->pkt_ref, glink_tx_pkt_release);
+	INIT_LIST_HEAD(&tx_info->list_done);
+	INIT_LIST_HEAD(&tx_info->list_node);
+	tx_info->pkt_priv = pkt_priv;
+	tx_info->data = data;
+	tx_info->riid = riid;
+	tx_info->rcid = ctx->rcid;
+	tx_info->size = size;
+	tx_info->size_remaining = size;
+	tx_info->tracer_pkt = tx_flags & GLINK_TX_TRACER_PKT ? true : false;
+	tx_info->iovec = iovec ? iovec : (void *)tx_info;
+	tx_info->vprovider = vbuf_provider;
+	tx_info->pprovider = pbuf_provider;
+	tx_info->intent_size = intent_size;
+	tx_info->cookie = cookie;
+
+	/* schedule packet for transmit */
+	if ((tx_flags & GLINK_TX_SINGLE_THREADED) &&
+	    (ctx->transport_ptr->capabilities & GCAP_INTENTLESS))
+		ret = xprt_single_threaded_tx(ctx->transport_ptr,
+					       ctx, tx_info);
+	else
+		xprt_schedule_tx(ctx->transport_ptr, ctx, tx_info);
+
+	rwref_read_put(&ctx->ch_state_lhb2);
+	glink_put_ch_ctx(ctx);
+	return ret;
+
+glink_tx_common_err:
+	rwref_read_put(&ctx->ch_state_lhb2);
+glink_tx_common_err_2:
+	glink_put_ch_ctx(ctx);
+	kfree(tx_info);
+	return ret;
+}
+
+/**
+ * glink_tx() - Transmit packet.
+ *
+ * @handle:	handle returned by glink_open()
+ * @pkt_priv:	opaque data value that will be returned to client with
+ *		notify_tx_done notification
+ * @data:	pointer to the data
+ * @size:	size of data
+ * @tx_flags:	Flags to specify transmit specific options
+ *
+ * Return:	-EINVAL for invalid handle; -EBUSY if channel isn't ready for
+ *		transmit operation (not fully opened); -EAGAIN if remote side
+ *		has not provided a receive intent that is big enough.
+ */
+int glink_tx(void *handle, void *pkt_priv, void *data, size_t size,
+							uint32_t tx_flags)
+{
+	return glink_tx_common(handle, pkt_priv, data, NULL, size,
+			       tx_linear_vbuf_provider, NULL, tx_flags);
+}
+EXPORT_SYMBOL(glink_tx);
+
+/**
+ * glink_queue_rx_intent() - Register an intent to receive data.
+ *
+ * @handle:	handle returned by glink_open()
+ * @pkt_priv:	opaque data type that is returned when a packet is received
+ * size:	maximum size of data to receive
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_queue_rx_intent(void *handle, const void *pkt_priv, size_t size)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	struct glink_core_rx_intent *intent_ptr;
+	int ret = 0;
+
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
+
+	if (!ch_is_fully_opened(ctx)) {
+		/* Can only queue rx intents if channel is fully opened */
+		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+			__func__);
+		glink_put_ch_ctx(ctx);
+		return -EBUSY;
+	}
+
+	intent_ptr = ch_push_local_rx_intent(ctx, pkt_priv, size);
+	if (!intent_ptr) {
+		GLINK_ERR_CH(ctx,
+			"%s: Intent pointer allocation failed size[%zu]\n",
+			__func__, size);
+		glink_put_ch_ctx(ctx);
+		return -ENOMEM;
+	}
+	GLINK_DBG_CH(ctx, "%s: L[%u]:%zu\n", __func__, intent_ptr->id,
+			intent_ptr->intent_size);
+
+	if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS) {
+		glink_put_ch_ctx(ctx);
+		return ret;
+	}
+
+	/* notify remote side of rx intent */
+	ret = ctx->transport_ptr->ops->tx_cmd_local_rx_intent(
+		ctx->transport_ptr->ops, ctx->lcid, size, intent_ptr->id);
+	if (ret)
+		/* unable to transmit, dequeue intent */
+		ch_remove_local_rx_intent(ctx, intent_ptr->id);
+	glink_put_ch_ctx(ctx);
+	return ret;
+}
+EXPORT_SYMBOL(glink_queue_rx_intent);
+
+/**
+ * glink_rx_intent_exists() - Check if an intent exists.
+ *
+ * @handle:	handle returned by glink_open()
+ * @size:	size of an intent to check or 0 for any intent
+ *
+ * Return:	TRUE if an intent exists with greater than or equal to the size
+ *		else FALSE
+ */
+bool glink_rx_intent_exists(void *handle, size_t size)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	struct glink_core_rx_intent *intent;
+	unsigned long flags;
+	int ret;
+
+	if (!ctx || !ch_is_fully_opened(ctx))
+		return false;
+
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return false;
+	spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	list_for_each_entry(intent, &ctx->local_rx_intent_list, list) {
+		if (size <= intent->intent_size) {
+			spin_unlock_irqrestore(
+				&ctx->local_rx_intent_lst_lock_lhc1, flags);
+			glink_put_ch_ctx(ctx);
+			return true;
+		}
+	}
+	spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	glink_put_ch_ctx(ctx);
+	return false;
+}
+EXPORT_SYMBOL(glink_rx_intent_exists);
+
+/**
+ * glink_rx_done() - Return receive buffer to remote side.
+ *
+ * @handle:	handle returned by glink_open()
+ * @ptr:	data pointer provided in the notify_rx() call
+ * @reuse:	if true, receive intent is re-used
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_rx_done(void *handle, const void *ptr, bool reuse)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	struct glink_core_rx_intent *liid_ptr;
+	uint32_t id;
+	int ret = 0;
+
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
+	liid_ptr = ch_get_local_rx_intent_notified(ctx, ptr);
+
+	if (IS_ERR_OR_NULL(liid_ptr)) {
+		/* invalid pointer */
+		GLINK_ERR_CH(ctx, "%s: Invalid pointer %p\n", __func__, ptr);
+		glink_put_ch_ctx(ctx);
+		return -EINVAL;
+	}
+
+	GLINK_INFO_PERF_CH(ctx, "%s: L[%u]: data[%p]. TID %u\n",
+			__func__, liid_ptr->id, ptr, current->pid);
+	id = liid_ptr->id;
+	if (reuse) {
+		ret = ctx->transport_ptr->ops->reuse_rx_intent(
+					ctx->transport_ptr->ops, liid_ptr);
+		if (ret) {
+			GLINK_ERR_CH(ctx, "%s: Intent reuse err %d for %p\n",
+					__func__, ret, ptr);
+			ret = -ENOBUFS;
+			reuse = false;
+			ctx->transport_ptr->ops->deallocate_rx_intent(
+					ctx->transport_ptr->ops, liid_ptr);
+		}
+	} else {
+		ctx->transport_ptr->ops->deallocate_rx_intent(
+					ctx->transport_ptr->ops, liid_ptr);
+	}
+	ch_remove_local_rx_intent_notified(ctx, liid_ptr, reuse);
+	/* send rx done */
+	ctx->transport_ptr->ops->tx_cmd_local_rx_done(ctx->transport_ptr->ops,
+			ctx->lcid, id, reuse);
+	glink_put_ch_ctx(ctx);
+	return ret;
+}
+EXPORT_SYMBOL(glink_rx_done);
+
+/**
+ * glink_txv() - Transmit a packet in vector form.
+ *
+ * @handle:	handle returned by glink_open()
+ * @pkt_priv:	opaque data value that will be returned to client with
+ *		notify_tx_done notification
+ * @iovec:	pointer to the vector (must remain valid until notify_tx_done
+ *		notification)
+ * @size:	size of data/vector
+ * @vbuf_provider: Client provided helper function to iterate the vector
+ *		in physical address space
+ * @pbuf_provider: Client provided helper function to iterate the vector
+ *		in virtual address space
+ * @tx_flags:	Flags to specify transmit specific options
+ *
+ * Return: -EINVAL for invalid handle; -EBUSY if channel isn't ready for
+ *           transmit operation (not fully opened); -EAGAIN if remote side has
+ *           not provided a receive intent that is big enough.
+ */
+int glink_txv(void *handle, void *pkt_priv,
+	void *iovec, size_t size,
+	void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size),
+	void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size),
+	uint32_t tx_flags)
+{
+	return glink_tx_common(handle, pkt_priv, NULL, iovec, size,
+			vbuf_provider, pbuf_provider, tx_flags);
+}
+EXPORT_SYMBOL(glink_txv);
+
+/**
+ * glink_sigs_set() - Set the local signals for the GLINK channel
+ *
+ * @handle:	handle returned by glink_open()
+ * @sigs:	modified signal value
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_sigs_set(void *handle, uint32_t sigs)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	int ret;
+
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
+	if (!ch_is_fully_opened(ctx)) {
+		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+			__func__);
+		glink_put_ch_ctx(ctx);
+		return -EBUSY;
+	}
+
+	ctx->lsigs = sigs;
+
+	ret = ctx->transport_ptr->ops->tx_cmd_set_sigs(ctx->transport_ptr->ops,
+			ctx->lcid, ctx->lsigs);
+	GLINK_INFO_CH(ctx, "%s: Sent SIGNAL SET command\n", __func__);
+
+	glink_put_ch_ctx(ctx);
+	return ret;
+}
+EXPORT_SYMBOL(glink_sigs_set);
+
+/**
+ * glink_sigs_local_get() - Get the local signals for the GLINK channel
+ *
+ * handle:	handle returned by glink_open()
+ * sigs:	Pointer to hold the signals
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_sigs_local_get(void *handle, uint32_t *sigs)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	int ret;
+
+	if (!sigs)
+		return -EINVAL;
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
+	if (!ch_is_fully_opened(ctx)) {
+		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+			__func__);
+		glink_put_ch_ctx(ctx);
+		return -EBUSY;
+	}
+
+	*sigs = ctx->lsigs;
+	glink_put_ch_ctx(ctx);
+	return 0;
+}
+EXPORT_SYMBOL(glink_sigs_local_get);
+
+/**
+ * glink_sigs_remote_get() - Get the Remote signals for the GLINK channel
+ *
+ * handle:	handle returned by glink_open()
+ * sigs:	Pointer to hold the signals
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_sigs_remote_get(void *handle, uint32_t *sigs)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	int ret;
+
+	if (!sigs)
+		return -EINVAL;
+
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
+	if (!ch_is_fully_opened(ctx)) {
+		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+			__func__);
+		glink_put_ch_ctx(ctx);
+		return -EBUSY;
+	}
+
+	*sigs = ctx->rsigs;
+	glink_put_ch_ctx(ctx);
+	return 0;
+}
+EXPORT_SYMBOL(glink_sigs_remote_get);
+
+/**
+ * glink_register_link_state_cb() - Register for link state notification
+ * @link_info:	Data structure containing the link identification and callback.
+ * @priv:	Private information to be passed with the callback.
+ *
+ * This function is used to register a notifier to receive the updates about a
+ * link's/transport's state. This notifier needs to be registered first before
+ * an attempt to open a channel.
+ *
+ * Return: a reference to the notifier handle.
+ */
+void *glink_register_link_state_cb(struct glink_link_info *link_info,
+				   void *priv)
+{
+	struct link_state_notifier_info *notif_info;
+
+	if (!link_info || !link_info->glink_link_state_notif_cb)
+		return ERR_PTR(-EINVAL);
+
+	notif_info = kzalloc(sizeof(*notif_info), GFP_KERNEL);
+	if (!notif_info) {
+		GLINK_ERR("%s: Error allocating link state notifier info\n",
+			  __func__);
+		return ERR_PTR(-ENOMEM);
+	}
+	if (link_info->transport)
+		strlcpy(notif_info->transport, link_info->transport,
+			GLINK_NAME_SIZE);
+
+	if (link_info->edge)
+		strlcpy(notif_info->edge, link_info->edge, GLINK_NAME_SIZE);
+	notif_info->priv = priv;
+	notif_info->glink_link_state_notif_cb =
+				link_info->glink_link_state_notif_cb;
+
+	mutex_lock(&link_state_notifier_lock_lha1);
+	list_add_tail(&notif_info->list, &link_state_notifier_list);
+	mutex_unlock(&link_state_notifier_lock_lha1);
+
+	notif_if_up_all_xprts(notif_info);
+	return notif_info;
+}
+EXPORT_SYMBOL(glink_register_link_state_cb);
+
+/**
+ * glink_unregister_link_state_cb() - Unregister the link state notification
+ * notif_handle:	Handle to be unregistered.
+ *
+ * This function is used to unregister a notifier to stop receiving the updates
+ * about a link's/ transport's state.
+ */
+void glink_unregister_link_state_cb(void *notif_handle)
+{
+	struct link_state_notifier_info *notif_info, *tmp_notif_info;
+
+	if (IS_ERR_OR_NULL(notif_handle))
+		return;
+
+	mutex_lock(&link_state_notifier_lock_lha1);
+	list_for_each_entry_safe(notif_info, tmp_notif_info,
+				 &link_state_notifier_list, list) {
+		if (notif_info == notif_handle) {
+			list_del(&notif_info->list);
+			mutex_unlock(&link_state_notifier_lock_lha1);
+			kfree(notif_info);
+			return;
+		}
+	}
+	mutex_unlock(&link_state_notifier_lock_lha1);
+	return;
+}
+EXPORT_SYMBOL(glink_unregister_link_state_cb);
+
+/**
+ * glink_qos_latency() - Register the latency QoS requirement
+ * @handle:	Channel handle in which the latency is required.
+ * @latency_us:	Latency requirement in units of micro-seconds.
+ * @pkt_size:	Worst case packet size for which the latency is required.
+ *
+ * This function is used to register the latency requirement for a channel
+ * and ensures that the latency requirement for this channel is met without
+ * impacting the existing latency requirements of other channels.
+ *
+ * Return: 0 if QoS request is achievable, standard Linux error codes on error
+ */
+int glink_qos_latency(void *handle, unsigned long latency_us, size_t pkt_size)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	int ret;
+	unsigned long req_rate_kBps;
+
+	if (!latency_us || !pkt_size)
+		return -EINVAL;
+
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
+	if (!ch_is_fully_opened(ctx)) {
+		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+			__func__);
+		glink_put_ch_ctx(ctx);
+		return -EBUSY;
+	}
+
+	req_rate_kBps = glink_qos_calc_rate_kBps(pkt_size, latency_us);
+
+	ret = glink_qos_assign_priority(ctx, req_rate_kBps);
+	if (ret < 0)
+		GLINK_ERR_CH(ctx, "%s: QoS %lu:%zu cannot be met\n",
+			     __func__, latency_us, pkt_size);
+	glink_put_ch_ctx(ctx);
+	return ret;
+}
+EXPORT_SYMBOL(glink_qos_latency);
+
+/**
+ * glink_qos_cancel() - Cancel or unregister the QoS request
+ * @handle:	Channel handle for which the QoS request is cancelled.
+ *
+ * This function is used to cancel/unregister the QoS requests for a channel.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_qos_cancel(void *handle)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	int ret;
+
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
+	if (!ch_is_fully_opened(ctx)) {
+		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+			__func__);
+		glink_put_ch_ctx(ctx);
+		return -EBUSY;
+	}
+
+	ret = glink_qos_reset_priority(ctx);
+	glink_put_ch_ctx(ctx);
+	return ret;
+}
+EXPORT_SYMBOL(glink_qos_cancel);
+
+/**
+ * glink_qos_start() - Start of the transmission requiring QoS
+ * @handle:	Channel handle in which the transmit activity is performed.
+ *
+ * This function is called by the clients to indicate G-Link regarding the
+ * start of the transmission which requires a certain QoS. The clients
+ * must account for the QoS ramp time to ensure meeting the QoS.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_qos_start(void *handle)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	int ret;
+	unsigned long flags;
+
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
+	if (!ch_is_fully_opened(ctx)) {
+		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+			__func__);
+		glink_put_ch_ctx(ctx);
+		return -EBUSY;
+	}
+
+	spin_lock_irqsave(&ctx->transport_ptr->tx_ready_lock_lhb3, flags);
+	spin_lock(&ctx->tx_lists_lock_lhc3);
+	ret = glink_qos_add_ch_tx_intent(ctx);
+	spin_unlock(&ctx->tx_lists_lock_lhc3);
+	spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb3, flags);
+	glink_put_ch_ctx(ctx);
+	return ret;
+}
+EXPORT_SYMBOL(glink_qos_start);
+
+/**
+ * glink_qos_get_ramp_time() - Get the QoS ramp time
+ * @handle:	Channel handle for which the QoS ramp time is required.
+ * @pkt_size:	Worst case packet size.
+ *
+ * This function is called by the clients to obtain the ramp time required
+ * to meet the QoS requirements.
+ *
+ * Return: QoS ramp time is returned in units of micro-seconds on success,
+ *	   standard Linux error codes cast to unsigned long on error.
+ */
+unsigned long glink_qos_get_ramp_time(void *handle, size_t pkt_size)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	int ret;
+
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return (unsigned long)ret;
+
+	if (!ch_is_fully_opened(ctx)) {
+		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+			__func__);
+		glink_put_ch_ctx(ctx);
+		return (unsigned long)-EBUSY;
+	}
+
+	glink_put_ch_ctx(ctx);
+	return ctx->transport_ptr->ops->get_power_vote_ramp_time(
+			ctx->transport_ptr->ops,
+			glink_prio_to_power_state(ctx->transport_ptr,
+						ctx->initial_priority));
+}
+EXPORT_SYMBOL(glink_qos_get_ramp_time);
+
+
+/**
+ * glink_start_rx_rt() - Vote for RT thread priority on RX.
+ * @handle:	Channel handle for which transaction are occurring.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_start_rx_rt(void *handle)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	int ret;
+
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
+	if (!ch_is_fully_opened(ctx)) {
+		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+			__func__);
+		glink_put_ch_ctx(ctx);
+		return -EBUSY;
+	}
+	ret = ctx->transport_ptr->ops->rx_rt_vote(ctx->transport_ptr->ops);
+	ctx->rt_vote_on++;
+	GLINK_INFO_CH(ctx, "%s: Voting RX Realtime Thread %d", __func__, ret);
+	glink_put_ch_ctx(ctx);
+	return ret;
+}
+
+/**
+ * glink_end_rx_rt() - Vote for RT thread priority on RX.
+ * @handle:	Channel handle for which transaction are occurring.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_end_rx_rt(void *handle)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	int ret;
+
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
+	if (!ch_is_fully_opened(ctx)) {
+		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+			__func__);
+		glink_put_ch_ctx(ctx);
+		return -EBUSY;
+	}
+	ret = ctx->transport_ptr->ops->rx_rt_unvote(ctx->transport_ptr->ops);
+	ctx->rt_vote_off++;
+	GLINK_INFO_CH(ctx, "%s: Unvoting RX Realtime Thread %d", __func__, ret);
+	glink_put_ch_ctx(ctx);
+	return ret;
+}
+
+/**
+ * glink_rpm_rx_poll() - Poll and receive any available events
+ * @handle:	Channel handle in which this operation is performed.
+ *
+ * This function is used to poll and receive events and packets while the
+ * receive interrupt from RPM is disabled.
+ *
+ * Note that even if a return value > 0 is returned indicating that some events
+ * were processed, clients should only use the notification functions passed
+ * into glink_open() to determine if an entire packet has been received since
+ * some events may be internal details that are not visible to clients.
+ *
+ * Return: 0 for no packets available; > 0 for events available; standard
+ * Linux error codes on failure.
+ */
+int glink_rpm_rx_poll(void *handle)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+
+	if (!ctx)
+		return -EINVAL;
+
+	if (!ch_is_fully_opened(ctx))
+		return -EBUSY;
+
+	if (!ctx->transport_ptr ||
+	    !(ctx->transport_ptr->capabilities & GCAP_INTENTLESS))
+		return -EOPNOTSUPP;
+
+	return ctx->transport_ptr->ops->poll(ctx->transport_ptr->ops,
+					     ctx->lcid);
+}
+EXPORT_SYMBOL(glink_rpm_rx_poll);
+
+/**
+ * glink_rpm_mask_rx_interrupt() - Mask or unmask the RPM receive interrupt
+ * @handle:	Channel handle in which this operation is performed.
+ * @mask:	Flag to mask or unmask the interrupt.
+ * @pstruct:	Pointer to any platform specific data.
+ *
+ * This function is used to mask or unmask the receive interrupt from RPM.
+ * "mask" set to true indicates masking the interrupt and when set to false
+ * indicates unmasking the interrupt.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int glink_rpm_mask_rx_interrupt(void *handle, bool mask, void *pstruct)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+
+	if (!ctx)
+		return -EINVAL;
+
+	if (!ch_is_fully_opened(ctx))
+		return -EBUSY;
+
+	if (!ctx->transport_ptr ||
+	    !(ctx->transport_ptr->capabilities & GCAP_INTENTLESS))
+		return -EOPNOTSUPP;
+
+	return ctx->transport_ptr->ops->mask_rx_irq(ctx->transport_ptr->ops,
+						    ctx->lcid, mask, pstruct);
+
+}
+EXPORT_SYMBOL(glink_rpm_mask_rx_interrupt);
+
+/**
+ * glink_wait_link_down() - Get status of link
+ * @handle:	Channel handle in which this operation is performed
+ *
+ * This function will query the transport for its status, to allow clients to
+ * proceed in cleanup operations.
+ */
+int glink_wait_link_down(void *handle)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	int ret;
+
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
+	if (!ctx->transport_ptr) {
+		glink_put_ch_ctx(ctx);
+		return -EOPNOTSUPP;
+	}
+	glink_put_ch_ctx(ctx);
+	return ctx->transport_ptr->ops->wait_link_down(ctx->transport_ptr->ops);
+}
+EXPORT_SYMBOL(glink_wait_link_down);
+
+/**
+ * glink_xprt_ctx_release - Free the transport context
+ * @ch_st_lock:	handle to the rwref_lock associated with the transport
+ *
+ * This should only be called when the reference count associated with the
+ * transport goes to zero.
+ */
+void glink_xprt_ctx_release(struct rwref_lock *xprt_st_lock)
+{
+	struct glink_dbgfs xprt_rm_dbgfs;
+	struct glink_core_xprt_ctx *xprt_ctx = container_of(xprt_st_lock,
+				struct glink_core_xprt_ctx, xprt_state_lhb0);
+	GLINK_INFO("%s: freeing transport [%s->%s]context\n", __func__,
+				xprt_ctx->name,
+				xprt_ctx->edge);
+	xprt_rm_dbgfs.curr_name = xprt_ctx->name;
+	xprt_rm_dbgfs.par_name = "xprt";
+	glink_debugfs_remove_recur(&xprt_rm_dbgfs);
+	GLINK_INFO("%s: xprt debugfs removec\n", __func__);
+	rwref_put(&xprt_ctx->edge_ctx->edge_ref_lock_lhd1);
+	kthread_stop(xprt_ctx->tx_task);
+	xprt_ctx->tx_task = NULL;
+	glink_core_deinit_xprt_qos_cfg(xprt_ctx);
+	kfree(xprt_ctx);
+	xprt_ctx = NULL;
+}
+
+/**
+ * glink_dummy_xprt_ctx_release - free the dummy transport context
+ * @xprt_st_lock:	Handle to the rwref_lock associated with the transport.
+ *
+ * The release function is called when all the channels on this dummy
+ * transport are closed and the reference count goes to zero.
+ */
+static void glink_dummy_xprt_ctx_release(struct rwref_lock *xprt_st_lock)
+{
+	struct glink_core_xprt_ctx *xprt_ctx = container_of(xprt_st_lock,
+				struct glink_core_xprt_ctx, xprt_state_lhb0);
+	GLINK_INFO("%s: freeing transport [%s->%s]context\n", __func__,
+				xprt_ctx->name,
+				xprt_ctx->edge);
+	kfree(xprt_ctx->ops);
+	xprt_ctx->ops = NULL;
+	kfree(xprt_ctx);
+}
+
+/**
+ * glink_xprt_name_to_id() - convert transport name to id
+ * @name:	Name of the transport.
+ * @id:		Assigned id.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+int glink_xprt_name_to_id(const char *name, uint16_t *id)
+{
+	if (!strcmp(name, "smem")) {
+		*id = SMEM_XPRT_ID;
+		return 0;
+	}
+	if (!strcmp(name, "mailbox")) {
+		*id = SMEM_XPRT_ID;
+		return 0;
+	}
+	if (!strcmp(name, "spi")) {
+		*id = SPIV2_XPRT_ID;
+		return 0;
+	}
+	if (!strcmp(name, "smd_trans")) {
+		*id = SMD_TRANS_XPRT_ID;
+		return 0;
+	}
+	if (!strcmp(name, "lloop")) {
+		*id = LLOOP_XPRT_ID;
+		return 0;
+	}
+	if (!strcmp(name, "mock")) {
+		*id = MOCK_XPRT_ID;
+		return 0;
+	}
+	if (!strcmp(name, "mock_low")) {
+		*id = MOCK_XPRT_LOW_ID;
+		return 0;
+	}
+	if (!strcmp(name, "mock_high")) {
+		*id = MOCK_XPRT_HIGH_ID;
+		return 0;
+	}
+	return -ENODEV;
+}
+EXPORT_SYMBOL(glink_xprt_name_to_id);
+
+/**
+ * of_get_glink_core_qos_cfg() - Parse the qos related dt entries
+ * @phandle:	The handle to the qos related node in DT.
+ * @cfg:	The transport configuration to be filled.
+ *
+ * Return: 0 on Success, standard Linux error otherwise.
+ */
+int of_get_glink_core_qos_cfg(struct device_node *phandle,
+				struct glink_core_transport_cfg *cfg)
+{
+	int rc, i;
+	char *key;
+	uint32_t num_flows;
+	uint32_t *arr32;
+
+	if (!phandle) {
+		GLINK_ERR("%s: phandle is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	key = "qcom,mtu-size";
+	rc = of_property_read_u32(phandle, key, (uint32_t *)&cfg->mtu);
+	if (rc) {
+		GLINK_ERR("%s: missing key %s\n", __func__, key);
+		return -ENODEV;
+	}
+
+	key = "qcom,tput-stats-cycle";
+	rc = of_property_read_u32(phandle, key, &cfg->token_count);
+	if (rc) {
+		GLINK_ERR("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto error;
+	}
+
+	key = "qcom,flow-info";
+	if (!of_find_property(phandle, key, &num_flows)) {
+		GLINK_ERR("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto error;
+	}
+
+	num_flows /= sizeof(uint32_t);
+	if (num_flows % 2) {
+		GLINK_ERR("%s: Invalid flow info length\n", __func__);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	num_flows /= 2;
+	cfg->num_flows = num_flows;
+
+	cfg->flow_info = kmalloc_array(num_flows, sizeof(*(cfg->flow_info)),
+					GFP_KERNEL);
+	if (!cfg->flow_info) {
+		GLINK_ERR("%s: Memory allocation for flow info failed\n",
+				__func__);
+		rc = -ENOMEM;
+		goto error;
+	}
+	arr32 = kmalloc_array(num_flows * 2, sizeof(uint32_t), GFP_KERNEL);
+	if (!arr32) {
+		GLINK_ERR("%s: Memory allocation for temporary array failed\n",
+				__func__);
+		rc = -ENOMEM;
+		goto temp_mem_alloc_fail;
+	}
+
+	of_property_read_u32_array(phandle, key, arr32, num_flows * 2);
+
+	for (i = 0; i < num_flows; i++) {
+		cfg->flow_info[i].mtu_tx_time_us = arr32[2 * i];
+		cfg->flow_info[i].power_state = arr32[2 * i + 1];
+	}
+
+	kfree(arr32);
+	of_node_put(phandle);
+	return 0;
+
+temp_mem_alloc_fail:
+	kfree(cfg->flow_info);
+error:
+	cfg->mtu = 0;
+	cfg->token_count = 0;
+	cfg->num_flows = 0;
+	cfg->flow_info = NULL;
+	return rc;
+}
+EXPORT_SYMBOL(of_get_glink_core_qos_cfg);
+
+/**
+ * glink_core_init_xprt_qos_cfg() - Initialize a transport's QoS configuration
+ * @xprt_ptr:	Transport to be initialized with QoS configuration.
+ * @cfg:	Data structure containing QoS configuration.
+ *
+ * This function is used during the transport registration to initialize it
+ * with QoS configuration.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_core_init_xprt_qos_cfg(struct glink_core_xprt_ctx *xprt_ptr,
+					 struct glink_core_transport_cfg *cfg)
+{
+	int i;
+	struct sched_param param = { .sched_priority = GLINK_KTHREAD_PRIO };
+
+	xprt_ptr->mtu = cfg->mtu ? cfg->mtu : GLINK_QOS_DEF_MTU;
+	xprt_ptr->num_priority = cfg->num_flows ? cfg->num_flows :
+					GLINK_QOS_DEF_NUM_PRIORITY;
+	xprt_ptr->token_count = cfg->token_count ? cfg->token_count :
+					GLINK_QOS_DEF_NUM_TOKENS;
+
+	xprt_ptr->prio_bin = kzalloc(xprt_ptr->num_priority *
+				sizeof(struct glink_qos_priority_bin),
+				GFP_KERNEL);
+	if (xprt_ptr->num_priority > 1)
+		sched_setscheduler(xprt_ptr->tx_task, SCHED_FIFO, &param);
+	if (!xprt_ptr->prio_bin) {
+		GLINK_ERR("%s: unable to allocate priority bins\n", __func__);
+		return -ENOMEM;
+	}
+	for (i = 1; i < xprt_ptr->num_priority; i++) {
+		xprt_ptr->prio_bin[i].max_rate_kBps =
+			glink_qos_calc_rate_kBps(xprt_ptr->mtu,
+				cfg->flow_info[i].mtu_tx_time_us);
+		xprt_ptr->prio_bin[i].power_state =
+				cfg->flow_info[i].power_state;
+		INIT_LIST_HEAD(&xprt_ptr->prio_bin[i].tx_ready);
+	}
+	xprt_ptr->prio_bin[0].max_rate_kBps = 0;
+	if (cfg->flow_info)
+		xprt_ptr->prio_bin[0].power_state =
+						cfg->flow_info[0].power_state;
+	INIT_LIST_HEAD(&xprt_ptr->prio_bin[0].tx_ready);
+	xprt_ptr->threshold_rate_kBps =
+		xprt_ptr->prio_bin[xprt_ptr->num_priority - 1].max_rate_kBps;
+
+	return 0;
+}
+
+/**
+ * glink_core_deinit_xprt_qos_cfg() - Reset a transport's QoS configuration
+ * @xprt_ptr: Transport to be deinitialized.
+ *
+ * This function is used during the time of transport unregistration to
+ * de-initialize the QoS configuration from a transport.
+ */
+static void glink_core_deinit_xprt_qos_cfg(struct glink_core_xprt_ctx *xprt_ptr)
+{
+	kfree(xprt_ptr->prio_bin);
+	xprt_ptr->prio_bin = NULL;
+	xprt_ptr->mtu = 0;
+	xprt_ptr->num_priority = 0;
+	xprt_ptr->token_count = 0;
+	xprt_ptr->threshold_rate_kBps = 0;
+}
+
+/**
+ * glink_core_register_transport() - register a new transport
+ * @if_ptr:	The interface to the transport.
+ * @cfg:	Description and configuration of the transport.
+ *
+ * Return: 0 on success, EINVAL for invalid input.
+ */
+int glink_core_register_transport(struct glink_transport_if *if_ptr,
+				  struct glink_core_transport_cfg *cfg)
+{
+	struct glink_core_xprt_ctx *xprt_ptr;
+	size_t len;
+	uint16_t id;
+	int ret;
+	char log_name[GLINK_NAME_SIZE*2+2] = {0};
+
+	if (!if_ptr || !cfg || !cfg->name || !cfg->edge)
+		return -EINVAL;
+
+	len = strlen(cfg->name);
+	if (len == 0 || len >= GLINK_NAME_SIZE)
+		return -EINVAL;
+
+	len = strlen(cfg->edge);
+	if (len == 0 || len >= GLINK_NAME_SIZE)
+		return -EINVAL;
+
+	if (cfg->versions_entries < 1)
+		return -EINVAL;
+
+	ret = glink_xprt_name_to_id(cfg->name, &id);
+	if (ret)
+		return ret;
+
+	xprt_ptr = kzalloc(sizeof(struct glink_core_xprt_ctx), GFP_KERNEL);
+	if (xprt_ptr == NULL)
+		return -ENOMEM;
+
+	xprt_ptr->id = id;
+	rwref_lock_init(&xprt_ptr->xprt_state_lhb0,
+			glink_xprt_ctx_release);
+	strlcpy(xprt_ptr->name, cfg->name, GLINK_NAME_SIZE);
+	strlcpy(xprt_ptr->edge, cfg->edge, GLINK_NAME_SIZE);
+	xprt_ptr->versions = cfg->versions;
+	xprt_ptr->versions_entries = cfg->versions_entries;
+	xprt_ptr->local_version_idx = cfg->versions_entries - 1;
+	xprt_ptr->remote_version_idx = cfg->versions_entries - 1;
+	xprt_ptr->edge_ctx = edge_name_to_ctx_create(xprt_ptr);
+	if (!xprt_ptr->edge_ctx) {
+		kfree(xprt_ptr);
+		return -ENOMEM;
+	}
+	xprt_ptr->l_features =
+			cfg->versions[cfg->versions_entries - 1].features;
+	if (!if_ptr->poll)
+		if_ptr->poll = dummy_poll;
+	if (!if_ptr->mask_rx_irq)
+		if_ptr->mask_rx_irq = dummy_mask_rx_irq;
+	if (!if_ptr->reuse_rx_intent)
+		if_ptr->reuse_rx_intent = dummy_reuse_rx_intent;
+	if (!if_ptr->wait_link_down)
+		if_ptr->wait_link_down = dummy_wait_link_down;
+	if (!if_ptr->tx_cmd_tracer_pkt)
+		if_ptr->tx_cmd_tracer_pkt = dummy_tx_cmd_tracer_pkt;
+	if (!if_ptr->get_power_vote_ramp_time)
+		if_ptr->get_power_vote_ramp_time =
+					dummy_get_power_vote_ramp_time;
+	if (!if_ptr->power_vote)
+		if_ptr->power_vote = dummy_power_vote;
+	if (!if_ptr->power_unvote)
+		if_ptr->power_unvote = dummy_power_unvote;
+	if (!if_ptr->rx_rt_vote)
+		if_ptr->rx_rt_vote = dummy_rx_rt_vote;
+	if (!if_ptr->rx_rt_unvote)
+		if_ptr->rx_rt_unvote = dummy_rx_rt_unvote;
+	xprt_ptr->capabilities = 0;
+	xprt_ptr->ops = if_ptr;
+	spin_lock_init(&xprt_ptr->xprt_ctx_lock_lhb1);
+	xprt_ptr->next_lcid = 1; /* 0 reserved for default unconfigured */
+	INIT_LIST_HEAD(&xprt_ptr->free_lcid_list);
+	xprt_ptr->max_cid = cfg->max_cid;
+	xprt_ptr->max_iid = cfg->max_iid;
+	xprt_ptr->local_state = GLINK_XPRT_DOWN;
+	xprt_ptr->remote_neg_completed = false;
+	INIT_LIST_HEAD(&xprt_ptr->channels);
+	INIT_LIST_HEAD(&xprt_ptr->notified);
+	spin_lock_init(&xprt_ptr->tx_ready_lock_lhb3);
+	mutex_init(&xprt_ptr->xprt_dbgfs_lock_lhb4);
+	init_kthread_work(&xprt_ptr->tx_kwork, tx_func);
+	init_kthread_worker(&xprt_ptr->tx_wq);
+	xprt_ptr->tx_task = kthread_run(kthread_worker_fn,
+			&xprt_ptr->tx_wq, "%s_%s_glink_tx",
+			xprt_ptr->edge, xprt_ptr->name);
+	if (IS_ERR_OR_NULL(xprt_ptr->tx_task)) {
+		GLINK_ERR("%s: unable to run thread\n", __func__);
+		kfree(xprt_ptr);
+		return -ENOMEM;
+	}
+	ret = glink_core_init_xprt_qos_cfg(xprt_ptr, cfg);
+	if (ret < 0) {
+		kfree(xprt_ptr);
+		return ret;
+	}
+	INIT_DELAYED_WORK(&xprt_ptr->pm_qos_work, glink_pm_qos_cancel_worker);
+	pm_qos_add_request(&xprt_ptr->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
+			PM_QOS_DEFAULT_VALUE);
+
+	if_ptr->glink_core_priv = xprt_ptr;
+	if_ptr->glink_core_if_ptr = &core_impl;
+
+	mutex_lock(&transport_list_lock_lha0);
+	list_add_tail(&xprt_ptr->list_node, &transport_list);
+	mutex_unlock(&transport_list_lock_lha0);
+	glink_debugfs_add_xprt(xprt_ptr);
+	snprintf(log_name, sizeof(log_name), "%s_%s",
+			xprt_ptr->edge, xprt_ptr->name);
+	xprt_ptr->log_ctx = ipc_log_context_create(NUM_LOG_PAGES, log_name, 0);
+	if (!xprt_ptr->log_ctx)
+		GLINK_ERR("%s: unable to create log context for [%s:%s]\n",
+				__func__, xprt_ptr->edge, xprt_ptr->name);
+
+	return 0;
+}
+EXPORT_SYMBOL(glink_core_register_transport);
+
+/**
+ * glink_core_unregister_transport() - unregister a transport
+ *
+ * @if_ptr:	The interface to the transport.
+ */
+void glink_core_unregister_transport(struct glink_transport_if *if_ptr)
+{
+	struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
+
+	GLINK_DBG_XPRT(xprt_ptr, "%s: destroying transport\n", __func__);
+	if (xprt_ptr->local_state != GLINK_XPRT_DOWN) {
+		GLINK_ERR_XPRT(xprt_ptr,
+		"%s: link_down should have been called before this\n",
+		__func__);
+		return;
+	}
+
+	mutex_lock(&transport_list_lock_lha0);
+	list_del(&xprt_ptr->list_node);
+	mutex_unlock(&transport_list_lock_lha0);
+	flush_delayed_work(&xprt_ptr->pm_qos_work);
+	pm_qos_remove_request(&xprt_ptr->pm_qos_req);
+	ipc_log_context_destroy(xprt_ptr->log_ctx);
+	xprt_ptr->log_ctx = NULL;
+	rwref_put(&xprt_ptr->xprt_state_lhb0);
+}
+EXPORT_SYMBOL(glink_core_unregister_transport);
+
+/**
+ * glink_core_link_up() - transport link-up notification
+ *
+ * @if_ptr:	pointer to transport interface
+ */
+static void glink_core_link_up(struct glink_transport_if *if_ptr)
+{
+	struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
+
+	/* start local negotiation */
+	xprt_ptr->local_state = GLINK_XPRT_NEGOTIATING;
+	xprt_ptr->local_version_idx = xprt_ptr->versions_entries - 1;
+	xprt_ptr->l_features =
+		xprt_ptr->versions[xprt_ptr->local_version_idx].features;
+	if_ptr->tx_cmd_version(if_ptr,
+		    xprt_ptr->versions[xprt_ptr->local_version_idx].version,
+		    xprt_ptr->versions[xprt_ptr->local_version_idx].features);
+
+}
+
+/**
+ * glink_core_link_down() - transport link-down notification
+ *
+ * @if_ptr:	pointer to transport interface
+ */
+static void glink_core_link_down(struct glink_transport_if *if_ptr)
+{
+	struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
+
+	rwref_write_get(&xprt_ptr->xprt_state_lhb0);
+	xprt_ptr->next_lcid = 1;
+	xprt_ptr->local_state = GLINK_XPRT_DOWN;
+	xprt_ptr->curr_qos_rate_kBps = 0;
+	xprt_ptr->local_version_idx = xprt_ptr->versions_entries - 1;
+	xprt_ptr->remote_version_idx = xprt_ptr->versions_entries - 1;
+	xprt_ptr->l_features =
+		xprt_ptr->versions[xprt_ptr->local_version_idx].features;
+	xprt_ptr->remote_neg_completed = false;
+	rwref_write_put(&xprt_ptr->xprt_state_lhb0);
+	GLINK_DBG_XPRT(xprt_ptr,
+		"%s: Flushing work from tx_wq. Thread: %u\n", __func__,
+		current->pid);
+	flush_kthread_worker(&xprt_ptr->tx_wq);
+	glink_core_channel_cleanup(xprt_ptr);
+	check_link_notifier_and_notify(xprt_ptr, GLINK_LINK_STATE_DOWN);
+}
+
+/**
+ * glink_create_dummy_xprt_ctx() - create a dummy transport that replaces all
+ *				the transport interface functions with a dummy
+ * @orig_xprt_ctx:	Pointer to the original transport context.
+ *
+ * The dummy transport is used only when it is swapped with the actual transport
+ * pointer in ssr/unregister case.
+ *
+ * Return:	Pointer to dummy transport context.
+ */
+static struct glink_core_xprt_ctx *glink_create_dummy_xprt_ctx(
+				struct glink_core_xprt_ctx *orig_xprt_ctx)
+{
+
+	struct glink_core_xprt_ctx *xprt_ptr;
+	struct glink_transport_if *if_ptr;
+
+	xprt_ptr = kzalloc(sizeof(*xprt_ptr), GFP_KERNEL);
+	if (!xprt_ptr)
+		return ERR_PTR(-ENOMEM);
+	if_ptr = kmalloc(sizeof(*if_ptr), GFP_KERNEL);
+	if (!if_ptr) {
+		kfree(xprt_ptr);
+		return ERR_PTR(-ENOMEM);
+	}
+	rwref_lock_init(&xprt_ptr->xprt_state_lhb0,
+			glink_dummy_xprt_ctx_release);
+
+	strlcpy(xprt_ptr->name, "dummy", GLINK_NAME_SIZE);
+	strlcpy(xprt_ptr->edge, orig_xprt_ctx->edge, GLINK_NAME_SIZE);
+	if_ptr->poll = dummy_poll;
+	if_ptr->mask_rx_irq = dummy_mask_rx_irq;
+	if_ptr->reuse_rx_intent = dummy_reuse_rx_intent;
+	if_ptr->wait_link_down = dummy_wait_link_down;
+	if_ptr->allocate_rx_intent = dummy_allocate_rx_intent;
+	if_ptr->deallocate_rx_intent = dummy_deallocate_rx_intent;
+	if_ptr->tx_cmd_local_rx_intent = dummy_tx_cmd_local_rx_intent;
+	if_ptr->tx_cmd_local_rx_done = dummy_tx_cmd_local_rx_done;
+	if_ptr->tx = dummy_tx;
+	if_ptr->tx_cmd_rx_intent_req = dummy_tx_cmd_rx_intent_req;
+	if_ptr->tx_cmd_remote_rx_intent_req_ack =
+				dummy_tx_cmd_remote_rx_intent_req_ack;
+	if_ptr->tx_cmd_set_sigs = dummy_tx_cmd_set_sigs;
+	if_ptr->tx_cmd_ch_open = dummy_tx_cmd_ch_open;
+	if_ptr->tx_cmd_ch_remote_open_ack = dummy_tx_cmd_ch_remote_open_ack;
+	if_ptr->tx_cmd_ch_close = dummy_tx_cmd_ch_close;
+	if_ptr->tx_cmd_ch_remote_close_ack = dummy_tx_cmd_ch_remote_close_ack;
+	if_ptr->tx_cmd_tracer_pkt = dummy_tx_cmd_tracer_pkt;
+	if_ptr->get_power_vote_ramp_time = dummy_get_power_vote_ramp_time;
+	if_ptr->power_vote = dummy_power_vote;
+	if_ptr->power_unvote = dummy_power_unvote;
+
+	xprt_ptr->ops = if_ptr;
+	xprt_ptr->log_ctx = log_ctx;
+	spin_lock_init(&xprt_ptr->xprt_ctx_lock_lhb1);
+	INIT_LIST_HEAD(&xprt_ptr->free_lcid_list);
+	xprt_ptr->local_state = GLINK_XPRT_DOWN;
+	xprt_ptr->remote_neg_completed = false;
+	INIT_LIST_HEAD(&xprt_ptr->channels);
+	xprt_ptr->dummy_in_use = true;
+	INIT_LIST_HEAD(&xprt_ptr->notified);
+	spin_lock_init(&xprt_ptr->tx_ready_lock_lhb3);
+	mutex_init(&xprt_ptr->xprt_dbgfs_lock_lhb4);
+	return xprt_ptr;
+}
+
+static struct channel_ctx *get_first_ch_ctx(
+	struct glink_core_xprt_ctx *xprt_ctx)
+{
+	unsigned long flags;
+	struct channel_ctx *ctx;
+
+	spin_lock_irqsave(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+	if (!list_empty(&xprt_ctx->channels)) {
+		ctx = list_first_entry(&xprt_ctx->channels,
+					struct channel_ctx, port_list_node);
+		rwref_get(&ctx->ch_state_lhb2);
+	} else {
+		ctx = NULL;
+	}
+	spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+	return ctx;
+}
+
+static void glink_core_move_ch_node(struct glink_core_xprt_ctx *xprt_ptr,
+	struct glink_core_xprt_ctx *dummy_xprt_ctx, struct channel_ctx *ctx)
+{
+	unsigned long flags, d_flags;
+
+	spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
+	spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
+	rwref_get(&dummy_xprt_ctx->xprt_state_lhb0);
+	list_move_tail(&ctx->port_list_node, &dummy_xprt_ctx->channels);
+	spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
+	spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
+}
+
+/**
+ * glink_core_channel_cleanup() - cleanup all channels for the transport
+ *
+ * @xprt_ptr:	pointer to transport context
+ *
+ * This function should be called either from link_down or ssr
+ */
+static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr)
+{
+	unsigned long flags, d_flags;
+	struct channel_ctx *ctx;
+	struct channel_lcid *temp_lcid, *temp_lcid1;
+	struct glink_core_xprt_ctx *dummy_xprt_ctx;
+
+	dummy_xprt_ctx = glink_create_dummy_xprt_ctx(xprt_ptr);
+	if (IS_ERR_OR_NULL(dummy_xprt_ctx)) {
+		GLINK_ERR("%s: Dummy Transport creation failed\n", __func__);
+		return;
+	}
+	rwref_read_get(&dummy_xprt_ctx->xprt_state_lhb0);
+	rwref_read_get(&xprt_ptr->xprt_state_lhb0);
+	ctx = get_first_ch_ctx(xprt_ptr);
+	while (ctx) {
+		spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb3, flags);
+		spin_lock(&ctx->tx_lists_lock_lhc3);
+		if (!list_empty(&ctx->tx_active))
+			glink_qos_done_ch_tx(ctx);
+		spin_unlock(&ctx->tx_lists_lock_lhc3);
+		spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3, flags);
+		rwref_write_get_atomic(&ctx->ch_state_lhb2, true);
+		if (ctx->local_open_state == GLINK_CHANNEL_OPENED ||
+			ctx->local_open_state == GLINK_CHANNEL_OPENING) {
+			ctx->transport_ptr = dummy_xprt_ctx;
+			glink_core_move_ch_node(xprt_ptr, dummy_xprt_ctx, ctx);
+		} else {
+			/* local state is in either CLOSED or CLOSING */
+			glink_core_remote_close_common(ctx, true);
+			if (ctx->local_open_state == GLINK_CHANNEL_CLOSING)
+				glink_core_ch_close_ack_common(ctx, true);
+			/* Channel should be fully closed now. Delete here */
+			if (ch_is_fully_closed(ctx))
+				glink_delete_ch_from_list(ctx, false);
+		}
+		rwref_put(&ctx->ch_state_lhb2);
+		rwref_write_put(&ctx->ch_state_lhb2);
+		ctx = get_first_ch_ctx(xprt_ptr);
+	}
+	spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
+	list_for_each_entry_safe(temp_lcid, temp_lcid1,
+			&xprt_ptr->free_lcid_list, list_node) {
+		list_del(&temp_lcid->list_node);
+		kfree(&temp_lcid->list_node);
+	}
+	spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
+	rwref_read_put(&xprt_ptr->xprt_state_lhb0);
+
+	spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
+	dummy_xprt_ctx->dummy_in_use = false;
+	while (!list_empty(&dummy_xprt_ctx->channels)) {
+		ctx = list_first_entry(&dummy_xprt_ctx->channels,
+					struct channel_ctx, port_list_node);
+		list_move_tail(&ctx->port_list_node,
+					&dummy_xprt_ctx->notified);
+
+		rwref_get(&ctx->ch_state_lhb2);
+		spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1,
+				d_flags);
+		glink_core_remote_close_common(ctx, false);
+		spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1,
+				d_flags);
+		rwref_put(&ctx->ch_state_lhb2);
+	}
+	spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
+	rwref_read_put(&dummy_xprt_ctx->xprt_state_lhb0);
+}
+/**
+ * glink_core_rx_cmd_version() - receive version/features from remote system
+ *
+ * @if_ptr:	pointer to transport interface
+ * @r_version:	remote version
+ * @r_features:	remote features
+ *
+ * This function is called in response to a remote-initiated version/feature
+ * negotiation sequence.
+ */
+static void glink_core_rx_cmd_version(struct glink_transport_if *if_ptr,
+	uint32_t r_version, uint32_t r_features)
+{
+	struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
+	const struct glink_core_version *versions = xprt_ptr->versions;
+	bool neg_complete = false;
+	uint32_t l_version;
+
+	if (xprt_is_fully_opened(xprt_ptr)) {
+		GLINK_ERR_XPRT(xprt_ptr,
+			"%s: Negotiation already complete\n", __func__);
+		return;
+	}
+
+	l_version = versions[xprt_ptr->remote_version_idx].version;
+
+	GLINK_INFO_XPRT(xprt_ptr,
+		"%s: [local]%x:%08x [remote]%x:%08x\n", __func__,
+		l_version, xprt_ptr->l_features, r_version, r_features);
+
+	if (l_version > r_version) {
+		/* Find matching version */
+		while (true) {
+			uint32_t rver_idx;
+
+			if (xprt_ptr->remote_version_idx == 0) {
+				/* version negotiation failed */
+				GLINK_ERR_XPRT(xprt_ptr,
+					"%s: Transport negotiation failed\n",
+					__func__);
+				l_version = 0;
+				xprt_ptr->l_features = 0;
+				break;
+			}
+			--xprt_ptr->remote_version_idx;
+			rver_idx = xprt_ptr->remote_version_idx;
+
+			if (versions[rver_idx].version <= r_version) {
+				/* found a potential match */
+				l_version = versions[rver_idx].version;
+				xprt_ptr->l_features =
+					versions[rver_idx].features;
+				break;
+			}
+		}
+	}
+
+	if (l_version == r_version) {
+		GLINK_INFO_XPRT(xprt_ptr,
+			"%s: Remote and local version are matched %x:%08x\n",
+			__func__, r_version, r_features);
+		if (xprt_ptr->l_features != r_features) {
+			uint32_t rver_idx = xprt_ptr->remote_version_idx;
+
+			xprt_ptr->l_features = versions[rver_idx]
+						.negotiate_features(if_ptr,
+					&xprt_ptr->versions[rver_idx],
+					r_features);
+			GLINK_INFO_XPRT(xprt_ptr,
+				"%s: negotiate features %x:%08x\n",
+				__func__, l_version, xprt_ptr->l_features);
+		}
+		neg_complete = true;
+	}
+	if_ptr->tx_cmd_version_ack(if_ptr, l_version, xprt_ptr->l_features);
+
+	if (neg_complete) {
+		GLINK_INFO_XPRT(xprt_ptr,
+			"%s: Remote negotiation complete %x:%08x\n", __func__,
+			l_version, xprt_ptr->l_features);
+
+		if (xprt_ptr->local_state == GLINK_XPRT_OPENED) {
+			xprt_ptr->capabilities = if_ptr->set_version(if_ptr,
+							l_version,
+							xprt_ptr->l_features);
+		}
+		if_ptr->glink_core_priv->remote_neg_completed = true;
+		if (xprt_is_fully_opened(xprt_ptr))
+			check_link_notifier_and_notify(xprt_ptr,
+						       GLINK_LINK_STATE_UP);
+	}
+}
+
+/**
+ * glink_core_rx_cmd_version_ack() - receive negotiation ack from remote system
+ *
+ * @if_ptr:	pointer to transport interface
+ * @r_version:	remote version response
+ * @r_features:	remote features response
+ *
+ * This function is called in response to a local-initiated version/feature
+ * negotiation sequence and is the counter-offer from the remote side based
+ * upon the initial version and feature set requested.
+ */
+static void glink_core_rx_cmd_version_ack(struct glink_transport_if *if_ptr,
+	uint32_t r_version, uint32_t r_features)
+{
+	struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
+	const struct glink_core_version *versions = xprt_ptr->versions;
+	uint32_t l_version;
+	bool neg_complete = false;
+
+	if (xprt_is_fully_opened(xprt_ptr)) {
+		GLINK_ERR_XPRT(xprt_ptr,
+			"%s: Negotiation already complete\n", __func__);
+		return;
+	}
+
+	l_version = versions[xprt_ptr->local_version_idx].version;
+
+	GLINK_INFO_XPRT(xprt_ptr,
+		"%s: [local]%x:%08x [remote]%x:%08x\n", __func__,
+		 l_version, xprt_ptr->l_features, r_version, r_features);
+
+	if (l_version > r_version) {
+		/* find matching version */
+		while (true) {
+			uint32_t lver_idx = xprt_ptr->local_version_idx;
+
+			if (xprt_ptr->local_version_idx == 0) {
+				/* version negotiation failed */
+				xprt_ptr->local_state = GLINK_XPRT_FAILED;
+				GLINK_ERR_XPRT(xprt_ptr,
+					"%s: Transport negotiation failed\n",
+					__func__);
+				l_version = 0;
+				xprt_ptr->l_features = 0;
+				break;
+			}
+			--xprt_ptr->local_version_idx;
+			lver_idx = xprt_ptr->local_version_idx;
+
+			if (versions[lver_idx].version <= r_version) {
+				/* found a potential match */
+				l_version = versions[lver_idx].version;
+				xprt_ptr->l_features =
+					versions[lver_idx].features;
+				break;
+			}
+		}
+	} else if (l_version == r_version) {
+		if (xprt_ptr->l_features != r_features) {
+			/* version matches, negotiate features */
+			uint32_t lver_idx = xprt_ptr->local_version_idx;
+
+			xprt_ptr->l_features = versions[lver_idx]
+						.negotiate_features(if_ptr,
+							&versions[lver_idx],
+							r_features);
+			GLINK_INFO_XPRT(xprt_ptr,
+				"%s: negotiation features %x:%08x\n",
+				__func__, l_version, xprt_ptr->l_features);
+		} else {
+			neg_complete = true;
+		}
+	} else {
+		/*
+		 * r_version > l_version
+		 *
+		 * Remote responded with a version greater than what we
+		 * requested which is invalid and is treated as failure of the
+		 * negotiation algorithm.
+		 */
+		GLINK_ERR_XPRT(xprt_ptr,
+			"%s: [local]%x:%08x [remote]%x:%08x neg failure\n",
+			__func__, l_version, xprt_ptr->l_features, r_version,
+			r_features);
+		xprt_ptr->local_state = GLINK_XPRT_FAILED;
+		l_version = 0;
+		xprt_ptr->l_features = 0;
+	}
+
+	if (neg_complete) {
+		/* negotiation complete */
+		GLINK_INFO_XPRT(xprt_ptr,
+			"%s: Local negotiation complete %x:%08x\n",
+			__func__, l_version, xprt_ptr->l_features);
+
+		if (xprt_ptr->remote_neg_completed) {
+			xprt_ptr->capabilities = if_ptr->set_version(if_ptr,
+							l_version,
+							xprt_ptr->l_features);
+		}
+
+		xprt_ptr->local_state = GLINK_XPRT_OPENED;
+		if (xprt_is_fully_opened(xprt_ptr))
+			check_link_notifier_and_notify(xprt_ptr,
+						       GLINK_LINK_STATE_UP);
+	} else {
+		if_ptr->tx_cmd_version(if_ptr, l_version, xprt_ptr->l_features);
+	}
+}
+
+/**
+ * find_l_ctx_get() - find a local channel context based on a remote one
+ * @r_ctx:	The remote channel to use as a lookup key.
+ *
+ * If the channel is found, the reference count is incremented to ensure the
+ * lifetime of the channel context.  The caller must call rwref_put() when done.
+ *
+ * Return: The corresponding local ctx or NULL is not found.
+ */
+static struct channel_ctx *find_l_ctx_get(struct channel_ctx *r_ctx)
+{
+	struct glink_core_xprt_ctx *xprt;
+	struct channel_ctx *ctx;
+	unsigned long flags;
+	struct channel_ctx *l_ctx = NULL;
+
+	mutex_lock(&transport_list_lock_lha0);
+	list_for_each_entry(xprt, &transport_list, list_node)
+		if (!strcmp(r_ctx->transport_ptr->edge, xprt->edge)) {
+			rwref_write_get(&xprt->xprt_state_lhb0);
+			if (xprt->local_state != GLINK_XPRT_OPENED) {
+				rwref_write_put(&xprt->xprt_state_lhb0);
+				continue;
+			}
+			spin_lock_irqsave(&xprt->xprt_ctx_lock_lhb1, flags);
+			list_for_each_entry(ctx, &xprt->channels,
+							port_list_node)
+				if (!strcmp(ctx->name, r_ctx->name) &&
+							ctx->local_xprt_req &&
+							ctx->local_xprt_resp) {
+					l_ctx = ctx;
+					rwref_get(&l_ctx->ch_state_lhb2);
+				}
+			spin_unlock_irqrestore(&xprt->xprt_ctx_lock_lhb1,
+									flags);
+			rwref_write_put(&xprt->xprt_state_lhb0);
+		}
+	mutex_unlock(&transport_list_lock_lha0);
+
+	return l_ctx;
+}
+
+/**
+ * find_r_ctx_get() - find a remote channel context based on a local one
+ * @l_ctx:	The local channel to use as a lookup key.
+ *
+ * If the channel is found, the reference count is incremented to ensure the
+ * lifetime of the channel context.  The caller must call rwref_put() when done.
+ *
+ * Return: The corresponding remote ctx or NULL is not found.
+ */
+static struct channel_ctx *find_r_ctx_get(struct channel_ctx *l_ctx)
+{
+	struct glink_core_xprt_ctx *xprt;
+	struct channel_ctx *ctx;
+	unsigned long flags;
+	struct channel_ctx *r_ctx = NULL;
+
+	mutex_lock(&transport_list_lock_lha0);
+	list_for_each_entry(xprt, &transport_list, list_node)
+		if (!strcmp(l_ctx->transport_ptr->edge, xprt->edge)) {
+			rwref_write_get(&xprt->xprt_state_lhb0);
+			if (xprt->local_state != GLINK_XPRT_OPENED) {
+				rwref_write_put(&xprt->xprt_state_lhb0);
+				continue;
+			}
+			spin_lock_irqsave(&xprt->xprt_ctx_lock_lhb1, flags);
+			list_for_each_entry(ctx, &xprt->channels,
+							port_list_node)
+				if (!strcmp(ctx->name, l_ctx->name) &&
+							ctx->remote_xprt_req &&
+							ctx->remote_xprt_resp) {
+					r_ctx = ctx;
+					rwref_get(&r_ctx->ch_state_lhb2);
+				}
+			spin_unlock_irqrestore(&xprt->xprt_ctx_lock_lhb1,
+									flags);
+			rwref_write_put(&xprt->xprt_state_lhb0);
+		}
+	mutex_unlock(&transport_list_lock_lha0);
+
+	return r_ctx;
+}
+
+/**
+ * will_migrate() - will a channel migrate to a different transport
+ * @l_ctx:	The local channel to migrate.
+ * @r_ctx:	The remote channel to migrate.
+ *
+ * One of the channel contexts can be NULL if not known, but at least one ctx
+ * must be provided.
+ *
+ * Return: Bool indicating if migration will occur.
+ */
+static bool will_migrate(struct channel_ctx *l_ctx, struct channel_ctx *r_ctx)
+{
+	uint16_t new_xprt;
+	bool migrate = false;
+
+	if (!r_ctx)
+		r_ctx = find_r_ctx_get(l_ctx);
+	else
+		rwref_get(&r_ctx->ch_state_lhb2);
+	if (!r_ctx)
+		return migrate;
+
+	if (!l_ctx)
+		l_ctx = find_l_ctx_get(r_ctx);
+	else
+		rwref_get(&l_ctx->ch_state_lhb2);
+	if (!l_ctx)
+		goto exit;
+
+	if (l_ctx->local_xprt_req == r_ctx->remote_xprt_req &&
+			l_ctx->local_xprt_req == l_ctx->transport_ptr->id)
+		goto exit;
+	if (l_ctx->no_migrate)
+		goto exit;
+
+	if (l_ctx->local_xprt_req > r_ctx->transport_ptr->id)
+		l_ctx->local_xprt_req = r_ctx->transport_ptr->id;
+
+	if (ch_is_fully_opened(l_ctx) &&
+		(l_ctx->transport_ptr->id == l_ctx->local_xprt_req))
+		goto exit;
+
+	new_xprt = max(l_ctx->local_xprt_req, r_ctx->remote_xprt_req);
+
+	if (new_xprt == l_ctx->transport_ptr->id)
+		goto exit;
+
+	migrate = true;
+exit:
+	if (l_ctx)
+		rwref_put(&l_ctx->ch_state_lhb2);
+	if (r_ctx)
+		rwref_put(&r_ctx->ch_state_lhb2);
+
+	return migrate;
+}
+
+/**
+ * ch_migrate() - migrate a channel to a different transport
+ * @l_ctx:	The local channel to migrate.
+ * @r_ctx:	The remote channel to migrate.
+ *
+ * One of the channel contexts can be NULL if not known, but at least one ctx
+ * must be provided.
+ *
+ * Return: Bool indicating if migration occurred.
+ */
+static bool ch_migrate(struct channel_ctx *l_ctx, struct channel_ctx *r_ctx)
+{
+	uint16_t new_xprt;
+	struct glink_core_xprt_ctx *xprt;
+	unsigned long flags;
+	struct channel_lcid *flcid;
+	uint16_t best_xprt = USHRT_MAX;
+	struct channel_ctx *ctx_clone;
+	bool migrated = false;
+
+	if (!r_ctx)
+		r_ctx = find_r_ctx_get(l_ctx);
+	else
+		rwref_get(&r_ctx->ch_state_lhb2);
+	if (!r_ctx)
+		return migrated;
+
+	if (!l_ctx)
+		l_ctx = find_l_ctx_get(r_ctx);
+	else
+		rwref_get(&l_ctx->ch_state_lhb2);
+	if (!l_ctx) {
+		rwref_put(&r_ctx->ch_state_lhb2);
+		return migrated;
+	}
+	if (ch_is_fully_opened(l_ctx) &&
+		(l_ctx->transport_ptr->id == l_ctx->local_xprt_req)) {
+		rwref_put(&l_ctx->ch_state_lhb2);
+		rwref_put(&r_ctx->ch_state_lhb2);
+		return migrated;
+	}
+
+	if (l_ctx->local_xprt_req == r_ctx->remote_xprt_req &&
+			l_ctx->local_xprt_req == l_ctx->transport_ptr->id)
+		goto exit;
+	if (l_ctx->no_migrate)
+		goto exit;
+
+	if (l_ctx->local_xprt_req > r_ctx->transport_ptr->id)
+		l_ctx->local_xprt_req = r_ctx->transport_ptr->id;
+
+	new_xprt = max(l_ctx->local_xprt_req, r_ctx->remote_xprt_req);
+
+	if (new_xprt == l_ctx->transport_ptr->id)
+		goto exit;
+
+	ctx_clone = kmalloc(sizeof(*ctx_clone), GFP_KERNEL);
+	if (!ctx_clone)
+		goto exit;
+
+	mutex_lock(&transport_list_lock_lha0);
+	list_for_each_entry(xprt, &transport_list, list_node)
+		if (!strcmp(l_ctx->transport_ptr->edge, xprt->edge))
+			if (xprt->id == new_xprt)
+				break;
+	mutex_unlock(&transport_list_lock_lha0);
+
+	spin_lock_irqsave(&l_ctx->transport_ptr->xprt_ctx_lock_lhb1, flags);
+	list_del_init(&l_ctx->port_list_node);
+	spin_unlock_irqrestore(&l_ctx->transport_ptr->xprt_ctx_lock_lhb1,
+									flags);
+	mutex_lock(&l_ctx->transport_ptr->xprt_dbgfs_lock_lhb4);
+	glink_debugfs_remove_channel(l_ctx, l_ctx->transport_ptr);
+	mutex_unlock(&l_ctx->transport_ptr->xprt_dbgfs_lock_lhb4);
+
+	memcpy(ctx_clone, l_ctx, sizeof(*ctx_clone));
+	ctx_clone->local_xprt_req = 0;
+	ctx_clone->local_xprt_resp = 0;
+	ctx_clone->remote_xprt_req = 0;
+	ctx_clone->remote_xprt_resp = 0;
+	ctx_clone->notify_state = NULL;
+	ctx_clone->local_open_state = GLINK_CHANNEL_CLOSING;
+	rwref_lock_init(&ctx_clone->ch_state_lhb2, glink_ch_ctx_release);
+	init_completion(&ctx_clone->int_req_ack_complete);
+	init_completion(&ctx_clone->int_req_complete);
+	spin_lock_init(&ctx_clone->local_rx_intent_lst_lock_lhc1);
+	spin_lock_init(&ctx_clone->rmt_rx_intent_lst_lock_lhc2);
+	INIT_LIST_HEAD(&ctx_clone->tx_ready_list_node);
+	INIT_LIST_HEAD(&ctx_clone->local_rx_intent_list);
+	INIT_LIST_HEAD(&ctx_clone->local_rx_intent_ntfy_list);
+	INIT_LIST_HEAD(&ctx_clone->local_rx_intent_free_list);
+	INIT_LIST_HEAD(&ctx_clone->rmt_rx_intent_list);
+	INIT_LIST_HEAD(&ctx_clone->tx_active);
+	spin_lock_init(&ctx_clone->tx_pending_rmt_done_lock_lhc4);
+	INIT_LIST_HEAD(&ctx_clone->tx_pending_remote_done);
+	spin_lock_init(&ctx_clone->tx_lists_lock_lhc3);
+	spin_lock_irqsave(&l_ctx->transport_ptr->xprt_ctx_lock_lhb1, flags);
+	list_add_tail(&ctx_clone->port_list_node,
+					&l_ctx->transport_ptr->channels);
+	spin_unlock_irqrestore(&l_ctx->transport_ptr->xprt_ctx_lock_lhb1,
+									flags);
+
+	l_ctx->transport_ptr->ops->tx_cmd_ch_close(l_ctx->transport_ptr->ops,
+								l_ctx->lcid);
+
+	l_ctx->transport_ptr = xprt;
+	l_ctx->local_xprt_req = 0;
+	l_ctx->local_xprt_resp = 0;
+	if (new_xprt != r_ctx->transport_ptr->id || l_ctx == r_ctx) {
+		if (new_xprt != r_ctx->transport_ptr->id) {
+			r_ctx->local_xprt_req = 0;
+			r_ctx->local_xprt_resp = 0;
+			r_ctx->remote_xprt_req = 0;
+			r_ctx->remote_xprt_resp = 0;
+		}
+
+		l_ctx->remote_xprt_req = 0;
+		l_ctx->remote_xprt_resp = 0;
+		l_ctx->remote_opened = false;
+
+		rwref_write_get(&xprt->xprt_state_lhb0);
+		spin_lock_irqsave(&xprt->xprt_ctx_lock_lhb1, flags);
+		if (list_empty(&xprt->free_lcid_list)) {
+			l_ctx->lcid = xprt->next_lcid++;
+		} else {
+			flcid = list_first_entry(&xprt->free_lcid_list,
+						struct channel_lcid, list_node);
+			l_ctx->lcid = flcid->lcid;
+			list_del(&flcid->list_node);
+			kfree(flcid);
+		}
+		list_add_tail(&l_ctx->port_list_node, &xprt->channels);
+		spin_unlock_irqrestore(&xprt->xprt_ctx_lock_lhb1, flags);
+		rwref_write_put(&xprt->xprt_state_lhb0);
+	} else {
+		l_ctx->lcid = r_ctx->lcid;
+		l_ctx->rcid = r_ctx->rcid;
+		l_ctx->remote_opened = r_ctx->remote_opened;
+		l_ctx->remote_xprt_req = r_ctx->remote_xprt_req;
+		l_ctx->remote_xprt_resp = r_ctx->remote_xprt_resp;
+		glink_delete_ch_from_list(r_ctx, false);
+
+		spin_lock_irqsave(&xprt->xprt_ctx_lock_lhb1, flags);
+		list_add_tail(&l_ctx->port_list_node, &xprt->channels);
+		spin_unlock_irqrestore(&xprt->xprt_ctx_lock_lhb1, flags);
+	}
+
+	mutex_lock(&xprt->xprt_dbgfs_lock_lhb4);
+	glink_debugfs_add_channel(l_ctx, xprt);
+	mutex_unlock(&xprt->xprt_dbgfs_lock_lhb4);
+
+	mutex_lock(&transport_list_lock_lha0);
+	list_for_each_entry(xprt, &transport_list, list_node)
+		if (!strcmp(l_ctx->transport_ptr->edge, xprt->edge))
+			if (xprt->id < best_xprt)
+				best_xprt = xprt->id;
+	mutex_unlock(&transport_list_lock_lha0);
+	l_ctx->local_open_state = GLINK_CHANNEL_OPENING;
+	l_ctx->local_xprt_req = best_xprt;
+	l_ctx->transport_ptr->ops->tx_cmd_ch_open(l_ctx->transport_ptr->ops,
+					l_ctx->lcid, l_ctx->name, best_xprt);
+
+	migrated = true;
+exit:
+	rwref_put(&l_ctx->ch_state_lhb2);
+	rwref_put(&r_ctx->ch_state_lhb2);
+
+	return migrated;
+}
+
+/**
+ * calculate_xprt_resp() - calculate the response to a remote xprt request
+ * @r_ctx:	The channel the remote xprt request is for.
+ *
+ * Return: The calculated response.
+ */
+static uint16_t calculate_xprt_resp(struct channel_ctx *r_ctx)
+{
+	struct channel_ctx *l_ctx;
+
+	l_ctx = find_l_ctx_get(r_ctx);
+	if (!l_ctx) {
+		r_ctx->remote_xprt_resp = r_ctx->transport_ptr->id;
+	} else if (r_ctx->remote_xprt_req == r_ctx->transport_ptr->id) {
+		r_ctx->remote_xprt_resp = r_ctx->remote_xprt_req;
+	} else {
+		if (!l_ctx->local_xprt_req)
+			r_ctx->remote_xprt_resp = r_ctx->remote_xprt_req;
+		else if (l_ctx->no_migrate)
+			r_ctx->remote_xprt_resp = l_ctx->local_xprt_req;
+		else
+			r_ctx->remote_xprt_resp = max(l_ctx->local_xprt_req,
+							r_ctx->remote_xprt_req);
+	}
+
+	if (l_ctx)
+		rwref_put(&l_ctx->ch_state_lhb2);
+
+	return r_ctx->remote_xprt_resp;
+}
+
+/**
+ * glink_core_rx_cmd_ch_remote_open() - Remote-initiated open command
+ *
+ * @if_ptr:	Pointer to transport instance
+ * @rcid:	Remote Channel ID
+ * @name:	Channel name
+ * @req_xprt:	Requested transport to migrate to
+ */
+static void glink_core_rx_cmd_ch_remote_open(struct glink_transport_if *if_ptr,
+	uint32_t rcid, const char *name, uint16_t req_xprt)
+{
+	struct channel_ctx *ctx;
+	uint16_t xprt_resp;
+	bool do_migrate;
+
+	glink_core_migration_edge_lock(if_ptr->glink_core_priv);
+	ctx = ch_name_to_ch_ctx_create(if_ptr->glink_core_priv, name);
+	if (ctx == NULL) {
+		GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+		       "%s: invalid rcid %u received, name '%s'\n",
+		       __func__, rcid, name);
+		glink_core_migration_edge_unlock(if_ptr->glink_core_priv);
+		return;
+	}
+
+	/* port already exists */
+	if (ctx->remote_opened) {
+		GLINK_ERR_CH(ctx,
+		       "%s: Duplicate remote open for rcid %u, name '%s'\n",
+		       __func__, rcid, name);
+		rwref_put(&ctx->ch_state_lhb2);
+		glink_core_migration_edge_unlock(if_ptr->glink_core_priv);
+		return;
+	}
+
+	ctx->remote_opened = true;
+	ch_add_rcid(if_ptr->glink_core_priv, ctx, rcid);
+	ctx->transport_ptr = if_ptr->glink_core_priv;
+
+	ctx->remote_xprt_req = req_xprt;
+	xprt_resp = calculate_xprt_resp(ctx);
+
+	do_migrate = will_migrate(NULL, ctx);
+	GLINK_INFO_CH(ctx, "%s: remote: CLOSED->OPENED ; xprt req:resp %u:%u\n",
+			__func__, req_xprt, xprt_resp);
+
+	if_ptr->tx_cmd_ch_remote_open_ack(if_ptr, rcid, xprt_resp);
+	if (!do_migrate && ch_is_fully_opened(ctx))
+		ctx->notify_state(ctx, ctx->user_priv, GLINK_CONNECTED);
+
+
+	if (do_migrate)
+		ch_migrate(NULL, ctx);
+	rwref_put(&ctx->ch_state_lhb2);
+	glink_core_migration_edge_unlock(if_ptr->glink_core_priv);
+}
+
+/**
+ * glink_core_rx_cmd_ch_open_ack() - Receive ack to previously sent open request
+ *
+ * if_ptr:	Pointer to transport instance
+ * lcid:	Local Channel ID
+ * @xprt_resp:	Response to the transport migration request
+ */
+static void glink_core_rx_cmd_ch_open_ack(struct glink_transport_if *if_ptr,
+	uint32_t lcid, uint16_t xprt_resp)
+{
+	struct channel_ctx *ctx;
+	glink_core_migration_edge_lock(if_ptr->glink_core_priv);
+	ctx = xprt_lcid_to_ch_ctx_get(if_ptr->glink_core_priv, lcid);
+	if (!ctx) {
+		/* unknown LCID received - this shouldn't happen */
+		GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+				"%s: invalid lcid %u received\n", __func__,
+				(unsigned)lcid);
+		glink_core_migration_edge_unlock(if_ptr->glink_core_priv);
+		return;
+	}
+
+	if (ctx->local_open_state != GLINK_CHANNEL_OPENING) {
+		GLINK_ERR_CH(ctx,
+			"%s: unexpected open ack receive for lcid. Current state: %u. Thread: %u\n",
+				__func__, ctx->local_open_state, current->pid);
+		rwref_put(&ctx->ch_state_lhb2);
+		glink_core_migration_edge_unlock(if_ptr->glink_core_priv);
+		return;
+	}
+
+	ctx->local_xprt_resp = xprt_resp;
+	if (!ch_migrate(ctx, NULL)) {
+		ctx->local_open_state = GLINK_CHANNEL_OPENED;
+		GLINK_INFO_PERF_CH(ctx,
+			"%s: local:GLINK_CHANNEL_OPENING_WAIT->GLINK_CHANNEL_OPENED\n",
+			__func__);
+
+		if (ch_is_fully_opened(ctx)) {
+			ctx->notify_state(ctx, ctx->user_priv, GLINK_CONNECTED);
+			GLINK_INFO_PERF_CH(ctx,
+					"%s: notify state: GLINK_CONNECTED\n",
+					__func__);
+		}
+	}
+	rwref_put(&ctx->ch_state_lhb2);
+	glink_core_migration_edge_unlock(if_ptr->glink_core_priv);
+}
+
+/**
+ * glink_core_rx_cmd_ch_remote_close() - Receive remote close command
+ *
+ * if_ptr:	Pointer to transport instance
+ * rcid:	Remote Channel ID
+ */
+static void glink_core_rx_cmd_ch_remote_close(
+		struct glink_transport_if *if_ptr, uint32_t rcid)
+{
+	struct channel_ctx *ctx;
+	bool is_ch_fully_closed;
+	struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
+
+	ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+	if (!ctx) {
+		/* unknown LCID received - this shouldn't happen */
+		GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+				"%s: invalid rcid %u received\n", __func__,
+				(unsigned)rcid);
+		return;
+	}
+
+	if (!ctx->remote_opened) {
+		GLINK_ERR_CH(ctx,
+			"%s: unexpected remote close receive for rcid %u\n",
+			__func__, (unsigned)rcid);
+		rwref_put(&ctx->ch_state_lhb2);
+		return;
+	}
+	GLINK_INFO_CH(ctx, "%s: remote: OPENED->CLOSED\n", __func__);
+
+	is_ch_fully_closed = glink_core_remote_close_common(ctx, false);
+
+	ctx->pending_delete = true;
+	if_ptr->tx_cmd_ch_remote_close_ack(if_ptr, rcid);
+
+	if (is_ch_fully_closed) {
+		glink_delete_ch_from_list(ctx, true);
+		flush_kthread_worker(&xprt_ptr->tx_wq);
+	}
+	rwref_put(&ctx->ch_state_lhb2);
+}
+
+/**
+ * glink_core_rx_cmd_ch_close_ack() - Receive locally-request close ack
+ *
+ * if_ptr:	Pointer to transport instance
+ * lcid:	Local Channel ID
+ */
+static void glink_core_rx_cmd_ch_close_ack(struct glink_transport_if *if_ptr,
+	uint32_t lcid)
+{
+	struct channel_ctx *ctx;
+	bool is_ch_fully_closed;
+	struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
+
+	ctx = xprt_lcid_to_ch_ctx_get(if_ptr->glink_core_priv, lcid);
+	if (!ctx) {
+		/* unknown LCID received - this shouldn't happen */
+		GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+				"%s: invalid lcid %u received\n", __func__,
+				(unsigned)lcid);
+		return;
+	}
+
+	if (ctx->local_open_state != GLINK_CHANNEL_CLOSING) {
+		GLINK_ERR_CH(ctx,
+			"%s: unexpected close ack receive for lcid %u\n",
+			__func__, (unsigned)lcid);
+		rwref_put(&ctx->ch_state_lhb2);
+		return;
+	}
+
+	is_ch_fully_closed = glink_core_ch_close_ack_common(ctx, false);
+	if (is_ch_fully_closed) {
+		glink_delete_ch_from_list(ctx, true);
+		flush_kthread_worker(&xprt_ptr->tx_wq);
+	}
+	rwref_put(&ctx->ch_state_lhb2);
+}
+
+/**
+ * glink_core_remote_rx_intent_put() - Receive remove intent
+ *
+ * @if_ptr:	Pointer to transport instance
+ * @rcid:	Remote Channel ID
+ * @riid:	Remote Intent ID
+ * @size:	Size of the remote intent ID
+ */
+static void glink_core_remote_rx_intent_put(struct glink_transport_if *if_ptr,
+		uint32_t rcid, uint32_t riid, size_t size)
+{
+	struct channel_ctx *ctx;
+
+	ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+	if (!ctx) {
+		/* unknown rcid received - this shouldn't happen */
+		GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+				"%s: invalid rcid received %u\n", __func__,
+				(unsigned)rcid);
+		return;
+	}
+
+	ch_push_remote_rx_intent(ctx, size, riid, NULL);
+	rwref_put(&ctx->ch_state_lhb2);
+}
+
+/**
+ * glink_core_remote_rx_intent_put_cookie() - Receive remove intent
+ *
+ * @if_ptr:    Pointer to transport instance
+ * @rcid:      Remote Channel ID
+ * @riid:      Remote Intent ID
+ * @size:      Size of the remote intent ID
+ * @cookie:    Transport-specific cookie to cache
+ */
+static void glink_core_remote_rx_intent_put_cookie(
+		struct glink_transport_if *if_ptr,
+		uint32_t rcid, uint32_t riid, size_t size, void *cookie)
+{
+	struct channel_ctx *ctx;
+
+	ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+	if (!ctx) {
+		/* unknown rcid received - this shouldn't happen */
+		GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+				"%s: invalid rcid received %u\n", __func__,
+				(unsigned)rcid);
+		return;
+	}
+
+	ch_push_remote_rx_intent(ctx, size, riid, cookie);
+	rwref_put(&ctx->ch_state_lhb2);
+}
+
+/**
+ * glink_core_rx_cmd_remote_rx_intent_req() - Receive a request for rx_intent
+ *                                            from remote side
+ * if_ptr:	Pointer to the transport interface
+ * rcid:	Remote channel ID
+ * size:	size of the intent
+ *
+ * The function searches for the local channel to which the request for
+ * rx_intent has arrived and informs this request to the local channel through
+ * notify_rx_intent_req callback registered by the local channel.
+ */
+static void glink_core_rx_cmd_remote_rx_intent_req(
+	struct glink_transport_if *if_ptr, uint32_t rcid, size_t size)
+{
+	struct channel_ctx *ctx;
+	bool cb_ret;
+
+	ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+	if (!ctx) {
+		GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+				"%s: invalid rcid received %u\n", __func__,
+				(unsigned)rcid);
+		return;
+	}
+	if (!ctx->notify_rx_intent_req) {
+		GLINK_ERR_CH(ctx,
+			"%s: Notify function not defined for local channel",
+			__func__);
+		rwref_put(&ctx->ch_state_lhb2);
+		return;
+	}
+
+	cb_ret = ctx->notify_rx_intent_req(ctx, ctx->user_priv, size);
+	if_ptr->tx_cmd_remote_rx_intent_req_ack(if_ptr, ctx->lcid, cb_ret);
+	rwref_put(&ctx->ch_state_lhb2);
+}
+
+/**
+ * glink_core_rx_cmd_remote_rx_intent_req_ack()- Receive ack from remote side
+ *						for a local rx_intent request
+ * if_ptr:	Pointer to the transport interface
+ * rcid:	Remote channel ID
+ * size:	size of the intent
+ *
+ * This function receives the ack for rx_intent request from local channel.
+ */
+static void glink_core_rx_cmd_rx_intent_req_ack(struct glink_transport_if
+					*if_ptr, uint32_t rcid, bool granted)
+{
+	struct channel_ctx *ctx;
+
+	ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+	if (!ctx) {
+		GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+				"%s: Invalid rcid received %u\n", __func__,
+				(unsigned)rcid);
+		return;
+	}
+	ctx->int_req_ack = granted;
+	complete_all(&ctx->int_req_ack_complete);
+	rwref_put(&ctx->ch_state_lhb2);
+}
+
+/**
+ * glink_core_rx_get_pkt_ctx() - lookup RX intent structure
+ *
+ * if_ptr:	Pointer to the transport interface
+ * rcid:	Remote channel ID
+ * liid:	Local RX Intent ID
+ *
+ * Note that this function is designed to always be followed by a call to
+ * glink_core_rx_put_pkt_ctx() to complete an RX operation by the transport.
+ *
+ * Return: Pointer to RX intent structure (or NULL if none found)
+ */
+static struct glink_core_rx_intent *glink_core_rx_get_pkt_ctx(
+		struct glink_transport_if *if_ptr, uint32_t rcid, uint32_t liid)
+{
+	struct channel_ctx *ctx;
+	struct glink_core_rx_intent *intent_ptr;
+
+	ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+	if (!ctx) {
+		/* unknown LCID received - this shouldn't happen */
+		GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+				"%s: invalid rcid received %u\n", __func__,
+				(unsigned)rcid);
+		return NULL;
+	}
+
+	/* match pending intent */
+	intent_ptr = ch_get_local_rx_intent(ctx, liid);
+	if (intent_ptr == NULL) {
+		GLINK_ERR_CH(ctx,
+			"%s: L[%u]: No matching rx intent\n",
+			__func__, liid);
+		rwref_put(&ctx->ch_state_lhb2);
+		return NULL;
+	}
+
+	rwref_put(&ctx->ch_state_lhb2);
+	return intent_ptr;
+}
+
+/**
+ * glink_core_rx_put_pkt_ctx() - lookup RX intent structure
+ *
+ * if_ptr:	Pointer to the transport interface
+ * rcid:	Remote channel ID
+ * intent_ptr:	Pointer to the RX intent
+ * complete:	Packet has been completely received
+ *
+ * Note that this function should always be preceded by a call to
+ * glink_core_rx_get_pkt_ctx().
+ */
+void glink_core_rx_put_pkt_ctx(struct glink_transport_if *if_ptr,
+	uint32_t rcid, struct glink_core_rx_intent *intent_ptr, bool complete)
+{
+	struct channel_ctx *ctx;
+
+	if (!complete) {
+		GLINK_DBG_XPRT(if_ptr->glink_core_priv,
+			"%s: rcid[%u] liid[%u] pkt_size[%zu] write_offset[%zu] Fragment received\n",
+				__func__, rcid, intent_ptr->id,
+				intent_ptr->pkt_size,
+				intent_ptr->write_offset);
+		return;
+	}
+
+	/* packet complete */
+	ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+	if (!ctx) {
+		/* unknown LCID received - this shouldn't happen */
+		GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+			       "%s: invalid rcid received %u\n", __func__,
+			       (unsigned)rcid);
+		return;
+	}
+
+	if (unlikely(intent_ptr->tracer_pkt)) {
+		tracer_pkt_log_event(intent_ptr->data, GLINK_CORE_RX);
+		ch_set_local_rx_intent_notified(ctx, intent_ptr);
+		if (ctx->notify_rx_tracer_pkt)
+			ctx->notify_rx_tracer_pkt(ctx, ctx->user_priv,
+				intent_ptr->pkt_priv, intent_ptr->data,
+				intent_ptr->pkt_size);
+		rwref_put(&ctx->ch_state_lhb2);
+		return;
+	}
+
+	GLINK_PERF_CH(ctx, "%s: L[%u]: data[%p] size[%zu]\n",
+		__func__, intent_ptr->id,
+		intent_ptr->data ? intent_ptr->data : intent_ptr->iovec,
+		intent_ptr->write_offset);
+	if (!intent_ptr->data && !ctx->notify_rxv) {
+		/* Received a vector, but client can't handle a vector */
+		intent_ptr->bounce_buf = linearize_vector(intent_ptr->iovec,
+						intent_ptr->pkt_size,
+						intent_ptr->vprovider,
+						intent_ptr->pprovider);
+		if (IS_ERR_OR_NULL(intent_ptr->bounce_buf)) {
+			GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+				"%s: Error %ld linearizing vector\n", __func__,
+				PTR_ERR(intent_ptr->bounce_buf));
+			BUG();
+			rwref_put(&ctx->ch_state_lhb2);
+			return;
+		}
+	}
+
+	ch_set_local_rx_intent_notified(ctx, intent_ptr);
+	if (ctx->notify_rx && (intent_ptr->data || intent_ptr->bounce_buf)) {
+		ctx->notify_rx(ctx, ctx->user_priv, intent_ptr->pkt_priv,
+			       intent_ptr->data ?
+				intent_ptr->data : intent_ptr->bounce_buf,
+			       intent_ptr->pkt_size);
+	} else if (ctx->notify_rxv) {
+		ctx->notify_rxv(ctx, ctx->user_priv, intent_ptr->pkt_priv,
+				intent_ptr->iovec, intent_ptr->pkt_size,
+				intent_ptr->vprovider, intent_ptr->pprovider);
+	} else {
+		GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+				"%s: Unable to process rx data\n", __func__);
+		BUG();
+	}
+	rwref_put(&ctx->ch_state_lhb2);
+}
+
+/**
+ * glink_core_rx_cmd_tx_done() - Receive Transmit Done Command
+ * @xprt_ptr:	Transport to send packet on.
+ * @rcid:	Remote channel ID
+ * @riid:	Remote intent ID
+ * @reuse:	Reuse the consumed intent
+ */
+void glink_core_rx_cmd_tx_done(struct glink_transport_if *if_ptr,
+			       uint32_t rcid, uint32_t riid, bool reuse)
+{
+	struct channel_ctx *ctx;
+	struct glink_core_tx_pkt *tx_pkt;
+	unsigned long flags;
+	size_t intent_size;
+	void *cookie;
+
+	ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+	if (!ctx) {
+		/* unknown RCID received - this shouldn't happen */
+		GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+				"%s: invalid rcid %u received\n", __func__,
+				rcid);
+		return;
+	}
+
+	spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags);
+	tx_pkt = ch_get_tx_pending_remote_done(ctx, riid);
+	if (IS_ERR_OR_NULL(tx_pkt)) {
+		/*
+		 * FUTURE - in the case of a zero-copy transport, this is a
+		 * fatal protocol failure since memory corruption could occur
+		 * in this case.  Prevent this by adding code in glink_close()
+		 * to recall any buffers in flight / wait for them to be
+		 * returned.
+		 */
+		GLINK_ERR_CH(ctx, "%s: R[%u]: No matching tx\n",
+				__func__,
+				(unsigned)riid);
+		spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
+		rwref_put(&ctx->ch_state_lhb2);
+		return;
+	}
+
+	/* notify client */
+	ctx->notify_tx_done(ctx, ctx->user_priv, tx_pkt->pkt_priv,
+			    tx_pkt->data ? tx_pkt->data : tx_pkt->iovec);
+	intent_size = tx_pkt->intent_size;
+	cookie = tx_pkt->cookie;
+	ch_remove_tx_pending_remote_done(ctx, tx_pkt);
+	spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
+
+	if (reuse)
+		ch_push_remote_rx_intent(ctx, intent_size, riid, cookie);
+	rwref_put(&ctx->ch_state_lhb2);
+}
+
+/**
+ * xprt_schedule_tx() - Schedules packet for transmit.
+ * @xprt_ptr:	Transport to send packet on.
+ * @ch_ptr:	Channel to send packet on.
+ * @tx_info:	Packet to transmit.
+ */
+static void xprt_schedule_tx(struct glink_core_xprt_ctx *xprt_ptr,
+			     struct channel_ctx *ch_ptr,
+			     struct glink_core_tx_pkt *tx_info)
+{
+	unsigned long flags;
+
+	if (unlikely(xprt_ptr->local_state == GLINK_XPRT_DOWN)) {
+		GLINK_ERR_CH(ch_ptr, "%s: Error XPRT is down\n", __func__);
+		kfree(tx_info);
+		return;
+	}
+
+	spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb3, flags);
+	if (unlikely(!ch_is_fully_opened(ch_ptr))) {
+		spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3, flags);
+		GLINK_ERR_CH(ch_ptr, "%s: Channel closed before tx\n",
+			     __func__);
+		kfree(tx_info);
+		return;
+	}
+	if (list_empty(&ch_ptr->tx_ready_list_node))
+		list_add_tail(&ch_ptr->tx_ready_list_node,
+			&xprt_ptr->prio_bin[ch_ptr->curr_priority].tx_ready);
+
+	spin_lock(&ch_ptr->tx_lists_lock_lhc3);
+	list_add_tail(&tx_info->list_node, &ch_ptr->tx_active);
+	glink_qos_do_ch_tx(ch_ptr);
+	if (unlikely(tx_info->tracer_pkt))
+		tracer_pkt_log_event((void *)(tx_info->data),
+				     GLINK_QUEUE_TO_SCHEDULER);
+
+	spin_unlock(&ch_ptr->tx_lists_lock_lhc3);
+	spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3, flags);
+
+	queue_kthread_work(&xprt_ptr->tx_wq, &xprt_ptr->tx_kwork);
+}
+
+/**
+ * xprt_single_threaded_tx() - Transmit in the context of sender.
+ * @xprt_ptr:	Transport to send packet on.
+ * @ch_ptr:	Channel to send packet on.
+ * @tx_info:	Packet to transmit.
+ */
+static int xprt_single_threaded_tx(struct glink_core_xprt_ctx *xprt_ptr,
+			     struct channel_ctx *ch_ptr,
+			     struct glink_core_tx_pkt *tx_info)
+{
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ch_ptr->tx_pending_rmt_done_lock_lhc4, flags);
+	do {
+		ret = xprt_ptr->ops->tx(ch_ptr->transport_ptr->ops,
+					ch_ptr->lcid, tx_info);
+	} while (ret == -EAGAIN);
+	if (ret < 0 || tx_info->size_remaining) {
+		GLINK_ERR_CH(ch_ptr, "%s: Error %d writing data\n",
+			     __func__, ret);
+		kfree(tx_info);
+	} else {
+		list_add_tail(&tx_info->list_done,
+			      &ch_ptr->tx_pending_remote_done);
+		ret = 0;
+	}
+	spin_unlock_irqrestore(&ch_ptr->tx_pending_rmt_done_lock_lhc4, flags);
+	return ret;
+}
+
+/**
+ * glink_scheduler_eval_prio() - Evaluate the channel priority
+ * @ctx:	Channel whose priority is evaluated.
+ * @xprt_ctx:	Transport in which the channel is part of.
+ *
+ * This function is called by the packet scheduler to measure the traffic
+ * rate observed in the channel and compare it against the traffic rate
+ * requested by the channel. The comparison result is used to evaluate the
+ * priority of the channel.
+ */
+static void glink_scheduler_eval_prio(struct channel_ctx *ctx,
+			struct glink_core_xprt_ctx *xprt_ctx)
+{
+	unsigned long token_end_time;
+	unsigned long token_consume_time, rem;
+	unsigned long obs_rate_kBps;
+
+	if (ctx->initial_priority == 0)
+		return;
+
+	if (ctx->token_count)
+		return;
+
+	token_end_time = arch_counter_get_cntvct();
+
+	token_consume_time = NSEC_PER_SEC;
+	rem = do_div(token_consume_time, arch_timer_get_rate());
+	token_consume_time = (token_end_time - ctx->token_start_time) *
+				token_consume_time;
+	rem = do_div(token_consume_time, 1000);
+	obs_rate_kBps = glink_qos_calc_rate_kBps(ctx->txd_len,
+				token_consume_time);
+	if (obs_rate_kBps > ctx->req_rate_kBps) {
+		GLINK_INFO_CH(ctx, "%s: Obs. Rate (%lu) > Req. Rate (%lu)\n",
+			__func__, obs_rate_kBps, ctx->req_rate_kBps);
+		glink_qos_update_ch_prio(ctx, 0);
+	} else {
+		glink_qos_update_ch_prio(ctx, ctx->initial_priority);
+	}
+
+	ctx->token_count = xprt_ctx->token_count;
+	ctx->txd_len = 0;
+	ctx->token_start_time = arch_counter_get_cntvct();
+}
+
+/**
+ * glink_scheduler_tx() - Transmit operation by the scheduler
+ * @ctx:	Channel which is scheduled for transmission.
+ * @xprt_ctx:	Transport context in which the transmission is performed.
+ *
+ * This function is called by the scheduler after scheduling a channel for
+ * transmission over the transport.
+ *
+ * Return: return value as returned by the transport on success,
+ *         standard Linux error codes on failure.
+ */
+static int glink_scheduler_tx(struct channel_ctx *ctx,
+			struct glink_core_xprt_ctx *xprt_ctx)
+{
+	unsigned long flags;
+	struct glink_core_tx_pkt *tx_info, *temp_tx_info;
+	size_t txd_len = 0;
+	size_t tx_len = 0;
+	uint32_t num_pkts = 0;
+	int ret = 0;
+
+	spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags);
+	while (txd_len < xprt_ctx->mtu &&
+		!list_empty(&ctx->tx_active)) {
+		tx_info = list_first_entry(&ctx->tx_active,
+				struct glink_core_tx_pkt, list_node);
+		rwref_get(&tx_info->pkt_ref);
+
+		spin_lock(&ctx->tx_pending_rmt_done_lock_lhc4);
+		if (list_empty(&tx_info->list_done))
+			list_add(&tx_info->list_done,
+				 &ctx->tx_pending_remote_done);
+		spin_unlock(&ctx->tx_pending_rmt_done_lock_lhc4);
+		spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
+
+		if (unlikely(tx_info->tracer_pkt)) {
+			tracer_pkt_log_event((void *)(tx_info->data),
+					      GLINK_SCHEDULER_TX);
+			ret = xprt_ctx->ops->tx_cmd_tracer_pkt(xprt_ctx->ops,
+						ctx->lcid, tx_info);
+		} else {
+			tx_len = tx_info->size_remaining <
+				 (xprt_ctx->mtu - txd_len) ?
+				 tx_info->size_remaining :
+				 (xprt_ctx->mtu - txd_len);
+			tx_info->tx_len = tx_len;
+			ret = xprt_ctx->ops->tx(xprt_ctx->ops,
+						ctx->lcid, tx_info);
+		}
+		spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags);
+		if (!list_empty(&ctx->tx_active)) {
+			/*
+			 * Verify if same tx_info still exist in tx_active
+			 * list and is not removed during tx operation.
+			 * It can happen if SSR and tx done both happen
+			 * before tx_lists_lock_lhc3 is taken.
+			 */
+			temp_tx_info = list_first_entry(&ctx->tx_active,
+					struct glink_core_tx_pkt, list_node);
+			if (temp_tx_info != tx_info)
+				continue;
+		} else {
+			break;
+		}
+		if (ret == -EAGAIN) {
+			/*
+			 * transport unable to send at the moment and will call
+			 * tx_resume() when it can send again.
+			 */
+			rwref_put(&tx_info->pkt_ref);
+			break;
+		} else if (ret < 0) {
+			/*
+			 * General failure code that indicates that the
+			 * transport is unable to recover.  In this case, the
+			 * communication failure will be detected at a higher
+			 * level and a subsystem restart of the affected system
+			 * will be triggered.
+			 */
+			GLINK_ERR_XPRT(xprt_ctx,
+					"%s: unrecoverable xprt failure %d\n",
+					__func__, ret);
+			rwref_put(&tx_info->pkt_ref);
+			break;
+		} else if (!ret && tx_info->size_remaining) {
+			/*
+			 * Transport unable to send any data on this channel.
+			 * Break out of the loop so that the scheduler can
+			 * continue with the next channel.
+			 */
+			rwref_put(&tx_info->pkt_ref);
+			break;
+		} else {
+			txd_len += tx_len;
+		}
+
+		if (!tx_info->size_remaining) {
+			num_pkts++;
+			list_del_init(&tx_info->list_node);
+		}
+		rwref_put(&tx_info->pkt_ref);
+	}
+
+	ctx->txd_len += txd_len;
+	if (txd_len) {
+		if (num_pkts >= ctx->token_count)
+			ctx->token_count = 0;
+		else if (num_pkts)
+			ctx->token_count -= num_pkts;
+		else
+			ctx->token_count--;
+	}
+	spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
+
+	return ret;
+}
+
+/**
+ * tx_func()	Transmit Kthread
+ * @work:	Linux kthread work structure
+ */
+static void tx_func(struct kthread_work *work)
+{
+	struct channel_ctx *ch_ptr;
+	uint32_t prio;
+	uint32_t tx_ready_head_prio = 0;
+	int ret;
+	struct channel_ctx *tx_ready_head = NULL;
+	bool transmitted_successfully = true;
+	unsigned long flags;
+	struct glink_core_xprt_ctx *xprt_ptr = container_of(work,
+			struct glink_core_xprt_ctx, tx_kwork);
+
+	GLINK_PERF("%s: worker starting\n", __func__);
+
+	while (1) {
+		prio = xprt_ptr->num_priority - 1;
+		spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb3, flags);
+		while (list_empty(&xprt_ptr->prio_bin[prio].tx_ready)) {
+			if (prio == 0) {
+				spin_unlock_irqrestore(
+					&xprt_ptr->tx_ready_lock_lhb3, flags);
+				return;
+			}
+			prio--;
+		}
+		glink_pm_qos_vote(xprt_ptr);
+		ch_ptr = list_first_entry(&xprt_ptr->prio_bin[prio].tx_ready,
+				struct channel_ctx, tx_ready_list_node);
+		rwref_get(&ch_ptr->ch_state_lhb2);
+		spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3, flags);
+
+		if (tx_ready_head == NULL || tx_ready_head_prio < prio) {
+			tx_ready_head = ch_ptr;
+			tx_ready_head_prio = prio;
+		}
+
+		if (ch_ptr == tx_ready_head && !transmitted_successfully) {
+			GLINK_ERR_XPRT(xprt_ptr,
+				"%s: Unable to send data on this transport.\n",
+				__func__);
+			rwref_put(&ch_ptr->ch_state_lhb2);
+			break;
+		}
+		transmitted_successfully = false;
+
+		ret = glink_scheduler_tx(ch_ptr, xprt_ptr);
+		if (ret == -EAGAIN) {
+			/*
+			 * transport unable to send at the moment and will call
+			 * tx_resume() when it can send again.
+			 */
+			rwref_put(&ch_ptr->ch_state_lhb2);
+			break;
+		} else if (ret < 0) {
+			/*
+			 * General failure code that indicates that the
+			 * transport is unable to recover.  In this case, the
+			 * communication failure will be detected at a higher
+			 * level and a subsystem restart of the affected system
+			 * will be triggered.
+			 */
+			GLINK_ERR_XPRT(xprt_ptr,
+					"%s: unrecoverable xprt failure %d\n",
+					__func__, ret);
+			rwref_put(&ch_ptr->ch_state_lhb2);
+			break;
+		} else if (!ret) {
+			/*
+			 * Transport unable to send any data on this channel,
+			 * but didn't return an error. Move to the next channel
+			 * and continue.
+			 */
+			spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb3, flags);
+			list_rotate_left(&xprt_ptr->prio_bin[prio].tx_ready);
+			spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3,
+						flags);
+			rwref_put(&ch_ptr->ch_state_lhb2);
+			continue;
+		}
+
+		spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb3, flags);
+		spin_lock(&ch_ptr->tx_lists_lock_lhc3);
+
+		glink_scheduler_eval_prio(ch_ptr, xprt_ptr);
+		if (list_empty(&ch_ptr->tx_active)) {
+			list_del_init(&ch_ptr->tx_ready_list_node);
+			glink_qos_done_ch_tx(ch_ptr);
+		}
+
+		spin_unlock(&ch_ptr->tx_lists_lock_lhc3);
+		spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3, flags);
+
+		tx_ready_head = NULL;
+		transmitted_successfully = true;
+		rwref_put(&ch_ptr->ch_state_lhb2);
+	}
+	glink_pm_qos_unvote(xprt_ptr);
+	GLINK_PERF("%s: worker exiting\n", __func__);
+}
+
+static void glink_core_tx_resume(struct glink_transport_if *if_ptr)
+{
+	struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
+
+	queue_kthread_work(&xprt_ptr->tx_wq, &xprt_ptr->tx_kwork);
+}
+
+/**
+ * glink_pm_qos_vote() - Add Power Management QoS Vote
+ * @xprt_ptr:	Transport for power vote
+ *
+ * Note - must be called with tx_ready_lock_lhb3 locked.
+ */
+static void glink_pm_qos_vote(struct glink_core_xprt_ctx *xprt_ptr)
+{
+	if (glink_pm_qos && !xprt_ptr->qos_req_active) {
+		GLINK_PERF("%s: qos vote %u us\n", __func__, glink_pm_qos);
+		pm_qos_update_request(&xprt_ptr->pm_qos_req, glink_pm_qos);
+		xprt_ptr->qos_req_active = true;
+	}
+	xprt_ptr->tx_path_activity = true;
+}
+
+/**
+ * glink_pm_qos_unvote() - Schedule Power Management QoS Vote Removal
+ * @xprt_ptr:	Transport for power vote removal
+ *
+ * Note - must be called with tx_ready_lock_lhb3 locked.
+ */
+static void glink_pm_qos_unvote(struct glink_core_xprt_ctx *xprt_ptr)
+{
+	xprt_ptr->tx_path_activity = false;
+	if (xprt_ptr->qos_req_active) {
+		GLINK_PERF("%s: qos unvote\n", __func__);
+		schedule_delayed_work(&xprt_ptr->pm_qos_work,
+				msecs_to_jiffies(GLINK_PM_QOS_HOLDOFF_MS));
+	}
+}
+
+/**
+ * glink_pm_qos_cancel_worker() - Remove Power Management QoS Vote
+ * @work:	Delayed work structure
+ *
+ * Removes PM QoS vote if no additional transmit activity has occurred between
+ * the unvote and when this worker runs.
+ */
+static void glink_pm_qos_cancel_worker(struct work_struct *work)
+{
+	struct glink_core_xprt_ctx *xprt_ptr;
+	unsigned long flags;
+
+	xprt_ptr = container_of(to_delayed_work(work),
+			struct glink_core_xprt_ctx, pm_qos_work);
+
+	spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb3, flags);
+	if (!xprt_ptr->tx_path_activity) {
+		/* no more tx activity */
+		GLINK_PERF("%s: qos off\n", __func__);
+		pm_qos_update_request(&xprt_ptr->pm_qos_req,
+				PM_QOS_DEFAULT_VALUE);
+		xprt_ptr->qos_req_active = false;
+	}
+	xprt_ptr->tx_path_activity = false;
+	spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3, flags);
+}
+
+/**
+ * glink_core_rx_cmd_remote_sigs() - Receive remote channel signal command
+ *
+ * if_ptr:	Pointer to transport instance
+ * rcid:	Remote Channel ID
+ */
+static void glink_core_rx_cmd_remote_sigs(struct glink_transport_if *if_ptr,
+					uint32_t rcid, uint32_t sigs)
+{
+	struct channel_ctx *ctx;
+	uint32_t old_sigs;
+
+	ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+	if (!ctx) {
+		/* unknown LCID received - this shouldn't happen */
+		GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+				"%s: invalid rcid %u received\n", __func__,
+				(unsigned)rcid);
+		return;
+	}
+
+	if (!ch_is_fully_opened(ctx)) {
+		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+			__func__);
+		rwref_put(&ctx->ch_state_lhb2);
+		return;
+	}
+
+	old_sigs = ctx->rsigs;
+	ctx->rsigs = sigs;
+	if (ctx->notify_rx_sigs) {
+		ctx->notify_rx_sigs(ctx, ctx->user_priv, old_sigs, ctx->rsigs);
+		GLINK_INFO_CH(ctx, "%s: notify rx sigs old:0x%x new:0x%x\n",
+				__func__, old_sigs, ctx->rsigs);
+	}
+	rwref_put(&ctx->ch_state_lhb2);
+}
+
+static struct glink_core_if core_impl = {
+	.link_up = glink_core_link_up,
+	.link_down = glink_core_link_down,
+	.rx_cmd_version = glink_core_rx_cmd_version,
+	.rx_cmd_version_ack = glink_core_rx_cmd_version_ack,
+	.rx_cmd_ch_remote_open = glink_core_rx_cmd_ch_remote_open,
+	.rx_cmd_ch_open_ack = glink_core_rx_cmd_ch_open_ack,
+	.rx_cmd_ch_remote_close = glink_core_rx_cmd_ch_remote_close,
+	.rx_cmd_ch_close_ack = glink_core_rx_cmd_ch_close_ack,
+	.rx_get_pkt_ctx = glink_core_rx_get_pkt_ctx,
+	.rx_put_pkt_ctx = glink_core_rx_put_pkt_ctx,
+	.rx_cmd_remote_rx_intent_put = glink_core_remote_rx_intent_put,
+	.rx_cmd_remote_rx_intent_put_cookie =
+					glink_core_remote_rx_intent_put_cookie,
+	.rx_cmd_remote_rx_intent_req = glink_core_rx_cmd_remote_rx_intent_req,
+	.rx_cmd_rx_intent_req_ack = glink_core_rx_cmd_rx_intent_req_ack,
+	.rx_cmd_tx_done = glink_core_rx_cmd_tx_done,
+	.tx_resume = glink_core_tx_resume,
+	.rx_cmd_remote_sigs = glink_core_rx_cmd_remote_sigs,
+};
+
+/**
+ * glink_xprt_ctx_iterator_init() - Initializes the transport context list iterator
+ * @xprt_i:	pointer to the transport context iterator.
+ *
+ * This function acquires the transport context lock which must then be
+ * released by glink_xprt_ctx_iterator_end()
+ */
+void glink_xprt_ctx_iterator_init(struct xprt_ctx_iterator *xprt_i)
+{
+	if (xprt_i == NULL)
+		return;
+
+	mutex_lock(&transport_list_lock_lha0);
+	xprt_i->xprt_list = &transport_list;
+	xprt_i->i_curr = list_entry(&transport_list,
+			struct glink_core_xprt_ctx, list_node);
+}
+EXPORT_SYMBOL(glink_xprt_ctx_iterator_init);
+
+/**
+ * glink_xprt_ctx_iterator_end() - Ends the transport context list iteration
+ * @xprt_i:	pointer to the transport context iterator.
+ */
+void glink_xprt_ctx_iterator_end(struct xprt_ctx_iterator *xprt_i)
+{
+	if (xprt_i == NULL)
+		return;
+
+	xprt_i->xprt_list = NULL;
+	xprt_i->i_curr = NULL;
+	mutex_unlock(&transport_list_lock_lha0);
+}
+EXPORT_SYMBOL(glink_xprt_ctx_iterator_end);
+
+/**
+ * glink_xprt_ctx_iterator_next() - iterates element by element in transport context list
+ * @xprt_i:	pointer to the transport context iterator.
+ *
+ * Return: pointer to the transport context structure
+ */
+struct glink_core_xprt_ctx *glink_xprt_ctx_iterator_next(
+			struct xprt_ctx_iterator *xprt_i)
+{
+	struct glink_core_xprt_ctx *xprt_ctx = NULL;
+
+	if (xprt_i == NULL)
+		return xprt_ctx;
+
+	if (list_empty(xprt_i->xprt_list))
+		return xprt_ctx;
+
+	list_for_each_entry_continue(xprt_i->i_curr,
+			xprt_i->xprt_list, list_node) {
+		xprt_ctx = xprt_i->i_curr;
+		break;
+	}
+	return xprt_ctx;
+}
+EXPORT_SYMBOL(glink_xprt_ctx_iterator_next);
+
+/**
+ * glink_get_xprt_name() - get the transport name
+ * @xprt_ctx:	pointer to the transport context.
+ *
+ * Return: name of the transport
+ */
+char *glink_get_xprt_name(struct glink_core_xprt_ctx *xprt_ctx)
+{
+	if (xprt_ctx == NULL)
+		return NULL;
+
+	return xprt_ctx->name;
+}
+EXPORT_SYMBOL(glink_get_xprt_name);
+
+/**
+ * glink_get_xprt_name() - get the name of the remote processor/edge
+ *				of the transport
+ * @xprt_ctx:	pointer to the transport context.
+ *
+ * Return: Name of the remote processor/edge
+ */
+char *glink_get_xprt_edge_name(struct glink_core_xprt_ctx *xprt_ctx)
+{
+	if (xprt_ctx == NULL)
+		return NULL;
+	return xprt_ctx->edge;
+}
+EXPORT_SYMBOL(glink_get_xprt_edge_name);
+
+/**
+ * glink_get_xprt_state() - get the state of the transport
+ * @xprt_ctx:	pointer to the transport context.
+ *
+ * Return: Name of the transport state, NULL in case of invalid input
+ */
+const char *glink_get_xprt_state(struct glink_core_xprt_ctx *xprt_ctx)
+{
+	if (xprt_ctx == NULL)
+		return NULL;
+
+	return glink_get_xprt_state_string(xprt_ctx->local_state);
+}
+EXPORT_SYMBOL(glink_get_xprt_state);
+
+/**
+ * glink_get_xprt_version_features() - get the version and feature set
+ *					of local transport in glink
+ * @xprt_ctx:	pointer to the transport context.
+ *
+ * Return: pointer to the glink_core_version
+ */
+const struct glink_core_version *glink_get_xprt_version_features(
+		struct glink_core_xprt_ctx *xprt_ctx)
+{
+	const struct glink_core_version *ver = NULL;
+	if (xprt_ctx == NULL)
+		return ver;
+
+	ver = &xprt_ctx->versions[xprt_ctx->local_version_idx];
+	return ver;
+}
+EXPORT_SYMBOL(glink_get_xprt_version_features);
+
+/**
+ * glink_ch_ctx_iterator_init() - Initializes the channel context list iterator
+ * @ch_iter:	pointer to the channel context iterator.
+ * xprt:	pointer to the transport context that holds the channel list
+ *
+ * This function acquires the channel context lock which must then be
+ * released by glink_ch_ctx_iterator_end()
+ */
+void glink_ch_ctx_iterator_init(struct ch_ctx_iterator *ch_iter,
+		struct glink_core_xprt_ctx *xprt)
+{
+	unsigned long flags;
+
+	if (ch_iter == NULL || xprt == NULL)
+		return;
+
+	spin_lock_irqsave(&xprt->xprt_ctx_lock_lhb1, flags);
+	ch_iter->ch_list = &(xprt->channels);
+	ch_iter->i_curr = list_entry(&(xprt->channels),
+				struct channel_ctx, port_list_node);
+	ch_iter->ch_list_flags = flags;
+}
+EXPORT_SYMBOL(glink_ch_ctx_iterator_init);
+
+/**
+ * glink_ch_ctx_iterator_end() - Ends the channel context list iteration
+ * @ch_iter:	pointer to the channel context iterator.
+ */
+void glink_ch_ctx_iterator_end(struct ch_ctx_iterator *ch_iter,
+				struct glink_core_xprt_ctx *xprt)
+{
+	if (ch_iter == NULL || xprt == NULL)
+		return;
+
+	spin_unlock_irqrestore(&xprt->xprt_ctx_lock_lhb1,
+			ch_iter->ch_list_flags);
+	ch_iter->ch_list = NULL;
+	ch_iter->i_curr = NULL;
+}
+EXPORT_SYMBOL(glink_ch_ctx_iterator_end);
+
+/**
+ * glink_ch_ctx_iterator_next() - iterates element by element in channel context list
+ * @c_i:	pointer to the channel context iterator.
+ *
+ * Return: pointer to the channel context structure
+ */
+struct channel_ctx *glink_ch_ctx_iterator_next(struct ch_ctx_iterator *c_i)
+{
+	struct channel_ctx *ch_ctx = NULL;
+
+	if (c_i == NULL)
+		return ch_ctx;
+
+	if (list_empty(c_i->ch_list))
+		return ch_ctx;
+
+	list_for_each_entry_continue(c_i->i_curr,
+			c_i->ch_list, port_list_node) {
+		ch_ctx = c_i->i_curr;
+		break;
+	}
+	return ch_ctx;
+}
+EXPORT_SYMBOL(glink_ch_ctx_iterator_next);
+
+/**
+ * glink_get_ch_name() - get the channel name
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: name of the channel, NULL in case of invalid input
+ */
+char *glink_get_ch_name(struct channel_ctx *ch_ctx)
+{
+	if (ch_ctx == NULL)
+		return NULL;
+
+	return ch_ctx->name;
+}
+EXPORT_SYMBOL(glink_get_ch_name);
+
+/**
+ * glink_get_ch_edge_name() - get the edge on whcih channel is created
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: name of the edge, NULL in case of invalid input
+ */
+char *glink_get_ch_edge_name(struct channel_ctx *ch_ctx)
+{
+	if (ch_ctx == NULL)
+		return NULL;
+
+	return ch_ctx->transport_ptr->edge;
+}
+EXPORT_SYMBOL(glink_get_ch_edge_name);
+
+/**
+ * glink_get_ch_lcid() - get the local channel ID
+ * @c_i:	pointer to the channel context.
+ *
+ * Return: local channel id, -EINVAL in case of invalid input
+ */
+int glink_get_ch_lcid(struct channel_ctx *ch_ctx)
+{
+	if (ch_ctx == NULL)
+		return -EINVAL;
+
+	return ch_ctx->lcid;
+}
+EXPORT_SYMBOL(glink_get_ch_lcid);
+
+/**
+ * glink_get_ch_rcid() - get the remote channel ID
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: remote channel id, -EINVAL in case of invalid input
+ */
+int glink_get_ch_rcid(struct channel_ctx *ch_ctx)
+{
+	if (ch_ctx == NULL)
+		return -EINVAL;
+
+	return ch_ctx->rcid;
+}
+EXPORT_SYMBOL(glink_get_ch_rcid);
+
+/**
+ * glink_get_ch_lstate() - get the local channel state
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: Name of the local channel state, NUll in case of invalid input
+ */
+const char *glink_get_ch_lstate(struct channel_ctx *ch_ctx)
+{
+	if (ch_ctx == NULL)
+		return NULL;
+
+	return glink_get_ch_state_string(ch_ctx->local_open_state);
+}
+EXPORT_SYMBOL(glink_get_ch_lstate);
+
+/**
+ * glink_get_ch_rstate() - get the remote channel state
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: true if remote side is opened false otherwise
+ */
+bool glink_get_ch_rstate(struct channel_ctx *ch_ctx)
+{
+	if (ch_ctx == NULL)
+		return NULL;
+
+	return ch_ctx->remote_opened;
+}
+EXPORT_SYMBOL(glink_get_ch_rstate);
+
+/**
+ * glink_get_ch_xprt_name() - get the name of the transport to which
+ *				the channel belongs
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: name of the export, NULL in case of invalid input
+ */
+char *glink_get_ch_xprt_name(struct channel_ctx *ch_ctx)
+{
+	if (ch_ctx == NULL)
+		return NULL;
+
+	return ch_ctx->transport_ptr->name;
+}
+EXPORT_SYMBOL(glink_get_ch_xprt_name);
+
+/**
+ * glink_get_tx_pkt_count() - get the total number of packets sent
+ *				through this channel
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: number of packets transmitted, -EINVAL in case of invalid input
+ */
+int glink_get_ch_tx_pkt_count(struct channel_ctx *ch_ctx)
+{
+	if (ch_ctx == NULL)
+		return -EINVAL;
+
+	/* FUTURE: packet stats not yet implemented */
+
+	return -ENOSYS;
+}
+EXPORT_SYMBOL(glink_get_ch_tx_pkt_count);
+
+/**
+ * glink_get_ch_rx_pkt_count() - get the total number of packets
+ *				recieved at this channel
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: number of packets recieved, -EINVAL in case of invalid input
+ */
+int glink_get_ch_rx_pkt_count(struct channel_ctx *ch_ctx)
+{
+	if (ch_ctx == NULL)
+		return -EINVAL;
+
+	/* FUTURE: packet stats not yet implemented */
+
+	return -ENOSYS;
+}
+EXPORT_SYMBOL(glink_get_ch_rx_pkt_count);
+
+/**
+ * glink_get_ch_lintents_queued() - get the total number of intents queued
+ *				at local side
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: number of intents queued, -EINVAL in case of invalid input
+ */
+int glink_get_ch_lintents_queued(struct channel_ctx *ch_ctx)
+{
+	struct glink_core_rx_intent *intent;
+	int ilrx_count = 0;
+
+	if (ch_ctx == NULL)
+		return -EINVAL;
+
+	list_for_each_entry(intent, &ch_ctx->local_rx_intent_list, list)
+		ilrx_count++;
+
+	return ilrx_count;
+}
+EXPORT_SYMBOL(glink_get_ch_lintents_queued);
+
+/**
+ * glink_get_ch_rintents_queued() - get the total number of intents queued
+ *				from remote side
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: number of intents queued, -EINVAL in case of invalid input
+ */
+int glink_get_ch_rintents_queued(struct channel_ctx *ch_ctx)
+{
+	struct glink_core_rx_intent *intent;
+	int irrx_count = 0;
+
+	if (ch_ctx == NULL)
+		return -EINVAL;
+
+	list_for_each_entry(intent, &ch_ctx->rmt_rx_intent_list, list)
+		irrx_count++;
+
+	return irrx_count;
+}
+EXPORT_SYMBOL(glink_get_ch_rintents_queued);
+
+/**
+ * glink_get_ch_intent_info() - get the intent details of a channel
+ * @ch_ctx:	pointer to the channel context.
+ * ch_ctx_i:	pointer to a structure that will contain intent details
+ *
+ * This function is used to get all the channel intent details including locks.
+ */
+void glink_get_ch_intent_info(struct channel_ctx *ch_ctx,
+			struct glink_ch_intent_info *ch_ctx_i)
+{
+	if (ch_ctx == NULL || ch_ctx_i == NULL)
+		return;
+
+	ch_ctx_i->li_lst_lock = &ch_ctx->local_rx_intent_lst_lock_lhc1;
+	ch_ctx_i->li_avail_list = &ch_ctx->local_rx_intent_list;
+	ch_ctx_i->li_used_list = &ch_ctx->local_rx_intent_ntfy_list;
+	ch_ctx_i->ri_lst_lock = &ch_ctx->rmt_rx_intent_lst_lock_lhc2;
+	ch_ctx_i->ri_list = &ch_ctx->rmt_rx_intent_list;
+}
+EXPORT_SYMBOL(glink_get_ch_intent_info);
+
+/**
+ * glink_get_debug_mask() - Return debug mask attribute
+ *
+ * Return: debug mask attribute
+ */
+unsigned glink_get_debug_mask(void)
+{
+	return glink_debug_mask;
+}
+EXPORT_SYMBOL(glink_get_debug_mask);
+
+/**
+ * glink_get_log_ctx() - Return log context for other GLINK modules.
+ *
+ * Return: Log context or NULL if none.
+ */
+void *glink_get_log_ctx(void)
+{
+	return log_ctx;
+}
+EXPORT_SYMBOL(glink_get_log_ctx);
+
+/**
+ * glink_get_xprt_log_ctx() - Return log context for GLINK xprts.
+ *
+ * Return: Log context or NULL if none.
+ */
+void *glink_get_xprt_log_ctx(struct glink_core_xprt_ctx *xprt)
+{
+	if (xprt)
+		return xprt->log_ctx;
+	else
+		return NULL;
+}
+EXPORT_SYMBOL(glink_get_xprt_log_ctx);
+
+static int glink_init(void)
+{
+	log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "glink", 0);
+	if (!log_ctx)
+		GLINK_ERR("%s: unable to create log context\n", __func__);
+	glink_debugfs_init();
+
+	return 0;
+}
+arch_initcall(glink_init);
+
+MODULE_DESCRIPTION("MSM Generic Link (G-Link) Transport");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/glink_core_if.h	2019-10-29 09:26:24.809214589 +0100
@@ -0,0 +1,218 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _SOC_QCOM_GLINK_CORE_IF_H_
+#define _SOC_QCOM_GLINK_CORE_IF_H_
+
+#include <linux/of.h>
+#include <linux/types.h>
+#include "glink_private.h"
+
+/* Local Channel state */
+enum local_channel_state_e {
+	GLINK_CHANNEL_CLOSED = 0,
+	GLINK_CHANNEL_OPENING,
+	GLINK_CHANNEL_OPENED,
+	GLINK_CHANNEL_CLOSING,
+};
+
+/* Transport Negotiation State */
+enum transport_state_e {
+	GLINK_XPRT_DOWN,
+	GLINK_XPRT_NEGOTIATING,
+	GLINK_XPRT_OPENED,
+	GLINK_XPRT_FAILED,
+};
+
+struct channel_ctx;
+struct glink_core_xprt_ctx;
+struct glink_transport_if;
+struct glink_core_version;
+
+/**
+ * struct glink_core_version - Individual version element
+ *
+ * version:	supported version
+ * features:	all supported features for version
+ */
+struct glink_core_version {
+	uint32_t version;
+	uint32_t features;
+
+	uint32_t (*negotiate_features)(struct glink_transport_if *if_ptr,
+			const struct glink_core_version *version_ptr,
+			uint32_t features);
+};
+
+/**
+ * RX intent
+ *
+ * data:	pointer to the data (may be NULL for zero-copy)
+ * id:		remote or local intent ID
+ * pkt_size:	total size of packet
+ * write_offset: next write offset (initially 0)
+ * intent_size:	size of the original intent (do not modify)
+ * tracer_pkt:	Flag to indicate if the data is a tracer packet
+ * iovec:	Pointer to vector buffer if the transport passes a vector buffer
+ * vprovider:	Virtual address-space buffer provider for a vector buffer
+ * pprovider:	Physical address-space buffer provider for a vector buffer
+ * cookie:	Private transport specific cookie
+ * pkt_priv:	G-Link core owned packet-private data
+ * list:	G-Link core owned list node
+ * bounce_buf:	Pointer to the temporary/internal bounce buffer
+ */
+struct glink_core_rx_intent {
+	void *data;
+	uint32_t id;
+	size_t pkt_size;
+	size_t write_offset;
+	size_t intent_size;
+	bool tracer_pkt;
+	void *iovec;
+	void * (*vprovider)(void *iovec, size_t offset, size_t *size);
+	void * (*pprovider)(void *iovec, size_t offset, size_t *size);
+	void *cookie;
+
+	/* G-Link-Core-owned elements - please ignore */
+	struct list_head list;
+	const void *pkt_priv;
+	void *bounce_buf;
+};
+
+/**
+ * struct glink_core_flow_info - Flow specific Information
+ * @mtu_tx_time_us:	Time to transmit an MTU in microseconds.
+ * @power_state:	Power state associated with the traffic flow.
+ */
+struct glink_core_flow_info {
+	unsigned long mtu_tx_time_us;
+	uint32_t power_state;
+};
+
+/**
+ * struct glink_core_transport_cfg - configuration of a new transport
+ * @name:		Name of the transport.
+ * @edge:		Subsystem the transport connects to.
+ * @versions:		Array of transport versions supported.
+ * @versions_entries:	Number of entries in @versions.
+ * @max_cid:		Maximum number of channel identifiers supported.
+ * @max_iid:		Maximum number of intent identifiers supported.
+ * @mtu:		MTU supported by this transport.
+ * @num_flows:		Number of traffic flows/priority buckets.
+ * @flow_info:		Information about each flow/priority.
+ * @token_count:	Number of tokens per assignment.
+ */
+struct glink_core_transport_cfg {
+	const char *name;
+	const char *edge;
+	const struct glink_core_version *versions;
+	size_t versions_entries;
+	uint32_t max_cid;
+	uint32_t max_iid;
+
+	size_t mtu;
+	uint32_t num_flows;
+	struct glink_core_flow_info *flow_info;
+	uint32_t token_count;
+};
+
+struct glink_core_if {
+	/* Negotiation */
+	void (*link_up)(struct glink_transport_if *if_ptr);
+	void (*link_down)(struct glink_transport_if *if_ptr);
+	void (*rx_cmd_version)(struct glink_transport_if *if_ptr,
+			uint32_t version,
+			uint32_t features);
+	void (*rx_cmd_version_ack)(struct glink_transport_if *if_ptr,
+			uint32_t version,
+			uint32_t features);
+
+	/* channel management */
+	void (*rx_cmd_ch_remote_open)(struct glink_transport_if *if_ptr,
+			uint32_t rcid, const char *name, uint16_t req_xprt);
+	void (*rx_cmd_ch_open_ack)(struct glink_transport_if *if_ptr,
+			uint32_t lcid, uint16_t xprt_resp);
+	void (*rx_cmd_ch_remote_close)(struct glink_transport_if *if_ptr,
+			uint32_t rcid);
+	void (*rx_cmd_ch_close_ack)(struct glink_transport_if *if_ptr,
+			uint32_t lcid);
+
+	/* channel data */
+	struct glink_core_rx_intent * (*rx_get_pkt_ctx)(
+			struct glink_transport_if *if_ptr,
+			uint32_t rcid, uint32_t liid);
+	void (*rx_put_pkt_ctx)(struct glink_transport_if *if_ptr, uint32_t rcid,
+			struct glink_core_rx_intent *intent_ptr, bool complete);
+	void (*rx_cmd_remote_rx_intent_put)(struct glink_transport_if *if_ptr,
+			uint32_t rcid, uint32_t riid, size_t size);
+	void (*rx_cmd_remote_rx_intent_put_cookie)(
+			struct glink_transport_if *if_ptr, uint32_t rcid,
+			uint32_t riid, size_t size, void *cookie);
+	void (*rx_cmd_tx_done)(struct glink_transport_if *if_ptr, uint32_t rcid,
+			uint32_t riid, bool reuse);
+	void (*rx_cmd_remote_rx_intent_req)(struct glink_transport_if *if_ptr,
+			uint32_t rcid, size_t size);
+	void (*rx_cmd_rx_intent_req_ack)(struct glink_transport_if *if_ptr,
+			uint32_t rcid, bool granted);
+	void (*rx_cmd_remote_sigs)(struct glink_transport_if *if_ptr,
+			uint32_t rcid, uint32_t sigs);
+
+	/* channel scheduling */
+	void (*tx_resume)(struct glink_transport_if *if_ptr);
+};
+
+int glink_core_register_transport(struct glink_transport_if *if_ptr,
+		struct glink_core_transport_cfg *cfg);
+
+void glink_core_unregister_transport(struct glink_transport_if *if_ptr);
+
+/**
+ * of_get_glink_core_qos_cfg() - Parse the qos related dt entries
+ * @phandle:	The handle to the qos related node in DT.
+ * @cfg:	The transport configuration to be filled.
+ *
+ * Return: 0 on Success, standard Linux error otherwise.
+ */
+int of_get_glink_core_qos_cfg(struct device_node *phandle,
+				struct glink_core_transport_cfg *cfg);
+
+/**
+ * rx_linear_vbuf_provider() - Virtual Buffer Provider for linear buffers
+ * iovec:	Pointer to the beginning of the linear buffer.
+ * offset:	Offset into the buffer whose address is needed.
+ * size:	Pointer to hold the length of the contiguous buffer space.
+ *
+ * This function is used when a linear buffer is received while the client has
+ * registered to receive vector buffers.
+ *
+ * Return: Address of the buffer which is at offset "offset" from the beginning
+ *         of the buffer.
+ */
+static inline void *rx_linear_vbuf_provider(void *iovec, size_t offset,
+					    size_t *size)
+{
+	struct glink_core_rx_intent *rx_info =
+		(struct glink_core_rx_intent *)iovec;
+
+	if (unlikely(!iovec || !size))
+		return NULL;
+
+	if (unlikely(offset >= rx_info->pkt_size))
+		return NULL;
+
+	if (unlikely(OVERFLOW_ADD_UNSIGNED(void *, rx_info->data, offset)))
+		return NULL;
+
+	*size = rx_info->pkt_size - offset;
+	return rx_info->data + offset;
+}
+
+#endif /* _SOC_QCOM_GLINK_CORE_IF_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/glink_debugfs.c	2019-01-22 16:16:26.651274914 +0100
@@ -0,0 +1,783 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/ipc_logging.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <soc/qcom/glink.h>
+#include "glink_private.h"
+#include "glink_core_if.h"
+
+
+static const char * const ss_string[] = {
+	[GLINK_DBGFS_MPSS] = "mpss",
+	[GLINK_DBGFS_APSS] = "apss",
+	[GLINK_DBGFS_LPASS] = "lpass",
+	[GLINK_DBGFS_DSPS] = "dsps",
+	[GLINK_DBGFS_RPM] = "rpm",
+	[GLINK_DBGFS_WCNSS] = "wcnss",
+	[GLINK_DBGFS_LLOOP] = "lloop",
+	[GLINK_DBGFS_MOCK] = "mock"
+};
+
+static const char * const xprt_string[] = {
+	[GLINK_DBGFS_SMEM] = "smem",
+	[GLINK_DBGFS_SMD] = "smd",
+	[GLINK_DBGFS_XLLOOP] = "lloop",
+	[GLINK_DBGFS_XMOCK] = "mock",
+	[GLINK_DBGFS_XMOCK_LOW] = "mock_low",
+	[GLINK_DBGFS_XMOCK_HIGH] = "mock_high"
+};
+
+static const char * const ch_st_string[] = {
+	[GLINK_CHANNEL_CLOSED] = "CLOSED",
+	[GLINK_CHANNEL_OPENING] = "OPENING",
+	[GLINK_CHANNEL_OPENED] = "OPENED",
+	[GLINK_CHANNEL_CLOSING] = "CLOSING",
+};
+
+static const char * const xprt_st_string[] = {
+	[GLINK_XPRT_DOWN] = "DOWN",
+	[GLINK_XPRT_NEGOTIATING] = "NEGOT",
+	[GLINK_XPRT_OPENED] = "OPENED",
+	[GLINK_XPRT_FAILED] = "FAILED"
+};
+
+#if defined(CONFIG_DEBUG_FS)
+#define GLINK_DBGFS_NAME_SIZE (2 * GLINK_NAME_SIZE + 1)
+
+struct glink_dbgfs_dent {
+	struct list_head list_node;
+	char par_name[GLINK_DBGFS_NAME_SIZE];
+	char self_name[GLINK_DBGFS_NAME_SIZE];
+	struct dentry *parent;
+	struct dentry *self;
+	spinlock_t file_list_lock_lhb0;
+	struct list_head file_list;
+};
+
+static struct dentry *dent;
+static LIST_HEAD(dent_list);
+static DEFINE_MUTEX(dent_list_lock_lha0);
+
+static int debugfs_show(struct seq_file *s, void *data)
+{
+	struct glink_dbgfs_data *dfs_d;
+	dfs_d = s->private;
+	dfs_d->o_func(s);
+	return 0;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, debugfs_show, inode->i_private);
+}
+
+static const struct file_operations debug_ops = {
+	.open = debug_open,
+	.release = single_release,
+	.read = seq_read,
+	.llseek = seq_lseek,
+};
+#endif
+
+/**
+ * glink_get_ss_enum_string() - get the name of the subsystem based on enum value
+ * @enum_id:	enum id of a specific subsystem.
+ *
+ * Return: name of the subsystem, NULL in case of invalid input
+ */
+const char *glink_get_ss_enum_string(unsigned int enum_id)
+{
+	if (enum_id >= ARRAY_SIZE(ss_string))
+		return NULL;
+
+	return ss_string[enum_id];
+}
+EXPORT_SYMBOL(glink_get_ss_enum_string);
+
+/**
+ * glink_get_xprt_enum_string() - get the name of the transport based on enum value
+ * @enum_id:	enum id of a specific transport.
+ *
+ * Return: name of the transport, NULL in case of invalid input
+ */
+const char *glink_get_xprt_enum_string(unsigned int enum_id)
+{
+	if (enum_id >= ARRAY_SIZE(xprt_string))
+		return NULL;
+	return xprt_string[enum_id];
+}
+EXPORT_SYMBOL(glink_get_xprt_enum_string);
+
+/**
+ * glink_get_xprt_state_string() - get the name of the transport based on enum value
+ * @enum_id:	enum id of the state of the transport.
+ *
+ * Return: name of the transport state, NULL in case of invalid input
+ */
+const char *glink_get_xprt_state_string(
+				enum transport_state_e enum_id)
+{
+	if (enum_id >= ARRAY_SIZE(xprt_st_string))
+		return NULL;
+
+	return xprt_st_string[enum_id];
+}
+EXPORT_SYMBOL(glink_get_xprt_state_string);
+
+/**
+ * glink_get_ch_state_string() - get the name of the transport based on enum value
+ * @enum_id:	enum id of a specific state of the channel.
+ *
+ * Return: name of the channel state, NULL in case of invalid input
+ */
+const char *glink_get_ch_state_string(
+				enum local_channel_state_e enum_id)
+{
+	if (enum_id >= ARRAY_SIZE(ch_st_string))
+		return NULL;
+
+	return ch_st_string[enum_id];
+}
+EXPORT_SYMBOL(glink_get_ch_state_string);
+
+#if defined(CONFIG_DEBUG_FS)
+/**
+ * glink_dfs_create_file() - create the debugfs file
+ * @name:	debugfs file name
+ * @parent:	pointer to the parent dentry structure
+ * @show:	pointer to the actual function which will be invoked upon
+ *		opening this file.
+ *
+ * Return:	pointer to the allocated glink_dbgfs_data structure or
+ *		NULL in case of an error.
+ *
+ * This function actually create a debugfs file under the parent directory
+ */
+static struct glink_dbgfs_data *glink_dfs_create_file(const char *name,
+		struct dentry *parent, void (*show)(struct seq_file *s),
+		void *dbgfs_data, bool b_free_req)
+{
+	struct dentry *file;
+	struct glink_dbgfs_data *dfs_d;
+
+	dfs_d = kzalloc(sizeof(struct glink_dbgfs_data), GFP_KERNEL);
+	if (dfs_d == NULL)
+		return NULL;
+
+	dfs_d->o_func = show;
+	if (dbgfs_data != NULL) {
+		dfs_d->priv_data = dbgfs_data;
+		dfs_d->b_priv_free_req = b_free_req;
+	}
+	file = debugfs_create_file(name, 0400, parent, dfs_d, &debug_ops);
+	if (!file)
+		GLINK_DBG("%s: unable to create file '%s'\n", __func__,
+				name);
+	dfs_d->dent = file;
+	return dfs_d;
+}
+
+/**
+ * write_ch_intent() - write channel intent details
+ * @s:		pointer to the sequential file
+ * @intent:	pointer glink core intent structure
+ * @i_type:	type of intent
+ * @count:	serial number of the intent.
+ *
+ * This function is a helper function of glink_dfs_update_ch_intents()
+ * that prints out details of any specific intent.
+ */
+static void write_ch_intent(struct seq_file *s,
+			struct glink_core_rx_intent *intent,
+			char *i_type, unsigned int count)
+{
+	char *intent_type;
+	/*
+	* formatted, human readable channel state output, ie:
+	* TYPE       |SN  |ID |PKT_SIZE|W_OFFSET|INT_SIZE|
+	* --------------------------------------------------------------
+	* LOCAL_LIST|#2  |1   |0       |0       |8       |
+	*/
+	if (count == 1) {
+		intent_type = i_type;
+		seq_puts(s,
+		"\n--------------------------------------------------------\n");
+	} else {
+		intent_type = "";
+	}
+	seq_printf(s, "%-20s|#%-5d|%-6u|%-10zu|%-10zu|%-10zu|\n",
+			intent_type,
+			count,
+			intent->id,
+			intent->pkt_size,
+			intent->write_offset,
+			intent->intent_size);
+}
+
+/**
+ * glink_dfs_update_ch_intent() - writes the intent details of a specific
+ *				  channel to the corresponding debugfs file
+ * @s:		pointer to the sequential file
+ *
+ * This function extracts the intent details of a channel & prints them to
+ * corrseponding debugfs file of that channel.
+ */
+static void glink_dfs_update_ch_intent(struct seq_file *s)
+{
+	struct glink_dbgfs_data *dfs_d;
+	struct channel_ctx *ch_ctx;
+	struct glink_core_rx_intent *intent;
+	struct glink_core_rx_intent *intent_temp;
+	struct glink_ch_intent_info ch_intent_info;
+	unsigned long flags;
+	unsigned int count = 0;
+
+	dfs_d = s->private;
+	ch_ctx = dfs_d->priv_data;
+	if (ch_ctx != NULL) {
+		glink_get_ch_intent_info(ch_ctx, &ch_intent_info);
+		seq_puts(s,
+		"---------------------------------------------------------------\n");
+		seq_printf(s, "%-20s|%-6s|%-6s|%-10s|%-10s|%-10s|\n",
+					"INTENT TYPE",
+					"SN",
+					"ID",
+					"PKT_SIZE",
+					"W_OFFSET",
+					"INT_SIZE");
+		seq_puts(s,
+		"---------------------------------------------------------------\n");
+		spin_lock_irqsave(ch_intent_info.li_lst_lock, flags);
+		list_for_each_entry_safe(intent, intent_temp,
+				ch_intent_info.li_avail_list, list) {
+			count++;
+			write_ch_intent(s, intent, "LOCAL_AVAIL_LIST", count);
+		}
+
+		count = 0;
+		list_for_each_entry_safe(intent, intent_temp,
+				ch_intent_info.li_used_list, list) {
+			count++;
+			write_ch_intent(s, intent, "LOCAL_USED_LIST", count);
+		}
+		spin_unlock_irqrestore(ch_intent_info.li_lst_lock, flags);
+
+		count = 0;
+		spin_lock_irqsave(ch_intent_info.ri_lst_lock, flags);
+		list_for_each_entry_safe(intent, intent_temp,
+				ch_intent_info.ri_list, list) {
+			count++;
+			write_ch_intent(s, intent, "REMOTE_LIST", count);
+		}
+		spin_unlock_irqrestore(ch_intent_info.ri_lst_lock,
+					flags);
+		seq_puts(s,
+		"---------------------------------------------------------------\n");
+	}
+}
+
+/**
+ * glink_dfs_update_ch_stats() - writes statistics of a specific
+ *				 channel to the corresponding debugfs file
+ * @s:		pointer to the sequential file
+ *
+ * This function extracts other statistics of a channel & prints them to
+ * corrseponding debugfs file of that channel
+ */
+static void glink_dfs_update_ch_stats(struct seq_file *s)
+{
+	/* FUTURE:  add channel statistics */
+	seq_puts(s, "not yet implemented\n");
+}
+
+/**
+ * glink_debugfs_remove_channel() - remove all channel specifc files & folder in
+ *				 debugfs when channel is fully closed
+ * @ch_ctx:		pointer to the channel_contenxt
+ * @xprt_ctx:		pointer to the transport_context
+ *
+ * This function is invoked when any channel is fully closed. It removes the
+ * folders & other files in debugfs for that channel.
+ */
+void glink_debugfs_remove_channel(struct channel_ctx *ch_ctx,
+			struct glink_core_xprt_ctx *xprt_ctx){
+
+	struct glink_dbgfs ch_rm_dbgfs;
+	char *edge_name;
+	char curr_dir_name[GLINK_DBGFS_NAME_SIZE];
+	char *xprt_name;
+
+	ch_rm_dbgfs.curr_name = glink_get_ch_name(ch_ctx);
+	edge_name = glink_get_xprt_edge_name(xprt_ctx);
+	xprt_name = glink_get_xprt_name(xprt_ctx);
+	if (!xprt_name || !edge_name) {
+		GLINK_ERR("%s: Invalid xprt_name  or edge_name for ch '%s'\n",
+				__func__, ch_rm_dbgfs.curr_name);
+		return;
+	}
+	snprintf(curr_dir_name, sizeof(curr_dir_name), "%s_%s",
+					edge_name, xprt_name);
+	ch_rm_dbgfs.par_name = curr_dir_name;
+	glink_debugfs_remove_recur(&ch_rm_dbgfs);
+}
+EXPORT_SYMBOL(glink_debugfs_remove_channel);
+
+/**
+ * glink_debugfs_add_channel() - create channel specifc files & folder in
+ *				 debugfs when channel is added
+ * @ch_ctx:		pointer to the channel_contenxt
+ * @xprt_ctx:		pointer to the transport_context
+ *
+ * This function is invoked when a new channel is created. It creates the
+ * folders & other files in debugfs for that channel
+ */
+void glink_debugfs_add_channel(struct channel_ctx *ch_ctx,
+		struct glink_core_xprt_ctx *xprt_ctx)
+{
+	struct glink_dbgfs ch_dbgfs;
+	char *ch_name;
+	char *edge_name;
+	char *xprt_name;
+	char curr_dir_name[GLINK_DBGFS_NAME_SIZE];
+
+	if (ch_ctx == NULL) {
+		GLINK_ERR("%s: Channel Context is NULL\n", __func__);
+		return;
+	}
+
+	ch_name = glink_get_ch_name(ch_ctx);
+	edge_name =  glink_get_xprt_edge_name(xprt_ctx);
+	xprt_name =  glink_get_xprt_name(xprt_ctx);
+	if (!xprt_name || !edge_name) {
+		GLINK_ERR("%s: Invalid xprt_name  or edge_name for ch '%s'\n",
+				__func__, ch_name);
+		return;
+	}
+	snprintf(curr_dir_name, sizeof(curr_dir_name), "%s_%s",
+					edge_name, xprt_name);
+
+	ch_dbgfs.curr_name = curr_dir_name;
+	ch_dbgfs.par_name = "channel";
+	ch_dbgfs.b_dir_create = true;
+	glink_debugfs_create(ch_name, NULL, &ch_dbgfs, NULL, false);
+
+	ch_dbgfs.par_name = ch_dbgfs.curr_name;
+	ch_dbgfs.curr_name = ch_name;
+	ch_dbgfs.b_dir_create = false;
+	glink_debugfs_create("stats", glink_dfs_update_ch_stats,
+				&ch_dbgfs, (void *)ch_ctx, false);
+	glink_debugfs_create("intents", glink_dfs_update_ch_intent,
+			&ch_dbgfs, (void *)ch_ctx, false);
+}
+EXPORT_SYMBOL(glink_debugfs_add_channel);
+
+/**
+ * glink_debugfs_add_xprt() - create transport specifc files & folder in
+ *			      debugfs when new transport is registerd
+ * @xprt_ctx:		pointer to the transport_context
+ *
+ * This function is invoked when a new transport is registered. It creates the
+ * folders & other files in debugfs for that transport
+ */
+void glink_debugfs_add_xprt(struct glink_core_xprt_ctx *xprt_ctx)
+{
+	struct glink_dbgfs xprt_dbgfs;
+	char *xprt_name;
+	char *edge_name;
+	char curr_dir_name[GLINK_DBGFS_NAME_SIZE];
+
+	if (xprt_ctx == NULL)
+		GLINK_ERR("%s: Transport Context is NULL\n", __func__);
+	xprt_name = glink_get_xprt_name(xprt_ctx);
+	edge_name = glink_get_xprt_edge_name(xprt_ctx);
+	if (!xprt_name || !edge_name) {
+		GLINK_ERR("%s: xprt name or edge name is NULL\n", __func__);
+		return;
+	}
+	snprintf(curr_dir_name, sizeof(curr_dir_name), "%s_%s",
+					edge_name, xprt_name);
+	xprt_dbgfs.par_name = "glink";
+	xprt_dbgfs.curr_name = "xprt";
+	xprt_dbgfs.b_dir_create = true;
+	glink_debugfs_create(curr_dir_name, NULL, &xprt_dbgfs, NULL, false);
+	xprt_dbgfs.curr_name = "channel";
+	glink_debugfs_create(curr_dir_name, NULL, &xprt_dbgfs, NULL, false);
+}
+EXPORT_SYMBOL(glink_debugfs_add_xprt);
+
+/**
+ * glink_dfs_create_channel_list() - create & update the channel details
+ * s:	pointer to seq_file
+ *
+ * This function updates channel details in debugfs
+ * file present in /glink/channel/channels
+ */
+static void glink_dfs_create_channel_list(struct seq_file *s)
+{
+	struct xprt_ctx_iterator xprt_iter;
+	struct ch_ctx_iterator ch_iter;
+
+	struct glink_core_xprt_ctx *xprt_ctx;
+	struct channel_ctx *ch_ctx;
+	int count = 0;
+	/*
+	* formatted, human readable channel state output, ie:
+	* NAME               |LCID|RCID|XPRT|EDGE|LSTATE |RSTATE|LINT-Q|RINT-Q|
+	* --------------------------------------------------------------------
+	* LOCAL_LOOPBACK_CLNT|2   |1  |lloop|local|OPENED|OPENED|5     |6    |
+	* N.B. Number of TX & RX Packets not implemented yet. -ENOSYS is printed
+	*/
+	seq_printf(s, "%-20s|%-4s|%-4s|%-10s|%-6s|%-7s|%-7s|%-5s|%-5s|\n",
+								"NAME",
+								"LCID",
+								"RCID",
+								"XPRT",
+								"EDGE",
+								"LSTATE",
+								"RSTATE",
+								"LINTQ",
+								"RINTQ");
+	seq_puts(s,
+		"-------------------------------------------------------------------------------\n");
+	glink_xprt_ctx_iterator_init(&xprt_iter);
+	xprt_ctx = glink_xprt_ctx_iterator_next(&xprt_iter);
+
+	while (xprt_ctx != NULL) {
+		glink_ch_ctx_iterator_init(&ch_iter, xprt_ctx);
+		ch_ctx = glink_ch_ctx_iterator_next(&ch_iter);
+		while (ch_ctx != NULL) {
+			count++;
+			seq_printf(s, "%-20s|%-4i|%-4i|%-10s|%-6s|%-7s|",
+					glink_get_ch_name(ch_ctx),
+					glink_get_ch_lcid(ch_ctx),
+					glink_get_ch_rcid(ch_ctx),
+					glink_get_ch_xprt_name(ch_ctx),
+					glink_get_ch_edge_name(ch_ctx),
+					glink_get_ch_lstate(ch_ctx));
+			seq_printf(s, "%-7s|%-5i|%-5i|\n",
+			(glink_get_ch_rstate(ch_ctx) ? "OPENED" : "CLOSED"),
+			glink_get_ch_lintents_queued(ch_ctx),
+			glink_get_ch_rintents_queued(ch_ctx));
+
+			ch_ctx = glink_ch_ctx_iterator_next(&ch_iter);
+		}
+		glink_ch_ctx_iterator_end(&ch_iter, xprt_ctx);
+		xprt_ctx = glink_xprt_ctx_iterator_next(&xprt_iter);
+	}
+
+	glink_xprt_ctx_iterator_end(&xprt_iter);
+}
+
+/**
+ * glink_dfs_create_xprt_list() - create & update the transport details
+ * @s:	pointer to seq_file
+ *
+ * This function updates channel details in debugfs file present
+ * in /glink/xprt/xprts
+ */
+static void glink_dfs_create_xprt_list(struct seq_file *s)
+{
+	struct xprt_ctx_iterator xprt_iter;
+	struct glink_core_xprt_ctx *xprt_ctx;
+	const struct glink_core_version  *gver;
+	uint32_t version;
+	uint32_t features;
+	int count = 0;
+	/*
+	* formatted, human readable channel state output, ie:
+	* XPRT_NAME|REMOTE    |STATE|VERSION |FEATURES|
+	* ---------------------------------------------
+	* smd_trans|lpass     |2    |0       |1       |
+	* smem     |mpss      |0    |0       |0       |
+	*/
+	seq_printf(s, "%-20s|%-20s|%-6s|%-8s|%-8s|\n",
+							"XPRT_NAME",
+							"REMOTE",
+							"STATE",
+							"VERSION",
+							"FEATURES");
+	seq_puts(s,
+		"-------------------------------------------------------------------------------\n");
+	glink_xprt_ctx_iterator_init(&xprt_iter);
+	xprt_ctx = glink_xprt_ctx_iterator_next(&xprt_iter);
+
+	while (xprt_ctx != NULL) {
+		count++;
+		seq_printf(s, "%-20s|%-20s|",
+					glink_get_xprt_name(xprt_ctx),
+					glink_get_xprt_edge_name(xprt_ctx));
+		gver = glink_get_xprt_version_features(xprt_ctx);
+		if (gver != NULL) {
+			version = gver->version;
+			features = gver->features;
+			seq_printf(s, "%-6s|%-8i|%-8i|\n",
+					glink_get_xprt_state(xprt_ctx),
+					version,
+					features);
+		} else {
+			seq_printf(s, "%-6s|%-8i|%-8i|\n",
+					glink_get_xprt_state(xprt_ctx),
+					-ENODATA,
+					-ENODATA);
+		}
+		xprt_ctx = glink_xprt_ctx_iterator_next(&xprt_iter);
+
+	}
+
+	glink_xprt_ctx_iterator_end(&xprt_iter);
+}
+
+/**
+ * glink_dfs_update_list() - update the internally maintained dentry linked list
+ * @curr_dent:	pointer to the current dentry object
+ * @parent:	pointer to the parent dentry object
+ * @curr:	current directory name
+ * @par_dir:	parent directory name
+ */
+void glink_dfs_update_list(struct dentry *curr_dent, struct dentry *parent,
+			const char *curr, const char *par_dir)
+{
+	struct glink_dbgfs_dent *dbgfs_dent_s;
+	if (curr_dent != NULL) {
+		dbgfs_dent_s = kzalloc(sizeof(struct glink_dbgfs_dent),
+				GFP_KERNEL);
+		if (dbgfs_dent_s != NULL) {
+			INIT_LIST_HEAD(&dbgfs_dent_s->file_list);
+			spin_lock_init(&dbgfs_dent_s->file_list_lock_lhb0);
+			dbgfs_dent_s->parent = parent;
+			dbgfs_dent_s->self = curr_dent;
+			strlcpy(dbgfs_dent_s->self_name,
+				curr, strlen(curr) + 1);
+			strlcpy(dbgfs_dent_s->par_name, par_dir,
+					strlen(par_dir) + 1);
+			mutex_lock(&dent_list_lock_lha0);
+			list_add_tail(&dbgfs_dent_s->list_node, &dent_list);
+			mutex_unlock(&dent_list_lock_lha0);
+		}
+	} else {
+		GLINK_DBG("%s:create directory failed for par:curr [%s:%s]\n",
+				__func__, par_dir, curr);
+	}
+	return;
+}
+
+/**
+ * glink_remove_dfs_entry() - remove the the entries from dent_list
+ * @entry:	pointer to the glink_dbgfs_dent structure
+ *
+ * This function removes the removes the entries from internally maintained
+ * linked list of dentries. It also deletes the file list and associated memory
+ * if present.
+ */
+void glink_remove_dfs_entry(struct glink_dbgfs_dent *entry)
+{
+	struct glink_dbgfs_data *fentry, *fentry_temp;
+	unsigned long flags;
+
+	if (entry == NULL)
+		return;
+	if (!list_empty(&entry->file_list)) {
+		spin_lock_irqsave(&entry->file_list_lock_lhb0, flags);
+		list_for_each_entry_safe(fentry, fentry_temp,
+				&entry->file_list, flist) {
+			if (fentry->b_priv_free_req)
+				kfree(fentry->priv_data);
+			list_del(&fentry->flist);
+			kfree(fentry);
+			fentry = NULL;
+		}
+		spin_unlock_irqrestore(&entry->file_list_lock_lhb0, flags);
+	}
+	list_del(&entry->list_node);
+	kfree(entry);
+	entry = NULL;
+}
+
+/**
+ * glink_debugfs_remove_recur() - remove the the directory & files recursively
+ * @rm_dfs:	pointer to the structure glink_dbgfs
+ *
+ * This function removes the files & directories below the given directory.
+ * This also takes care of freeing any memory associated with the debugfs file.
+ */
+void glink_debugfs_remove_recur(struct glink_dbgfs *rm_dfs)
+{
+	const char *c_dir_name;
+	const char *p_dir_name;
+	struct glink_dbgfs_dent *entry, *entry_temp;
+	struct dentry *par_dent = NULL;
+
+	if (rm_dfs == NULL)
+		return;
+
+	c_dir_name = rm_dfs->curr_name;
+	p_dir_name = rm_dfs->par_name;
+
+	mutex_lock(&dent_list_lock_lha0);
+	list_for_each_entry_safe(entry, entry_temp, &dent_list, list_node) {
+		if (!strcmp(entry->par_name, c_dir_name)) {
+			glink_remove_dfs_entry(entry);
+		} else if (!strcmp(entry->self_name, c_dir_name)
+				&& !strcmp(entry->par_name, p_dir_name)) {
+			par_dent = entry->self;
+			glink_remove_dfs_entry(entry);
+		}
+	}
+	mutex_unlock(&dent_list_lock_lha0);
+	if (par_dent != NULL)
+		debugfs_remove_recursive(par_dent);
+}
+EXPORT_SYMBOL(glink_debugfs_remove_recur);
+
+/**
+ * glink_debugfs_create() - create the debugfs file
+ * @name:	debugfs file name
+ * @show:	pointer to the actual function which will be invoked upon
+ *		opening this file.
+ * @dir:	pointer to a structure debugfs_dir
+ * dbgfs_data:	pointer to any private data need to be associated with debugfs
+ * b_free_req:	boolean value to decide to free the memory associated with
+ *		@dbgfs_data during deletion of the file
+ *
+ * Return:	pointer to the file/directory created, NULL in case of error
+ *
+ * This function checks which directory will be used to create the debugfs file
+ * and calls glink_dfs_create_file. Anybody who intend to allocate some memory
+ * for the dbgfs_data and required to free it in deletion, need to set
+ * b_free_req to true. Otherwise, there will be a memory leak.
+ */
+struct dentry *glink_debugfs_create(const char *name,
+		void (*show)(struct seq_file *),
+		struct glink_dbgfs *dir, void *dbgfs_data, bool b_free_req)
+{
+	struct dentry *parent =  NULL;
+	struct dentry *dent = NULL;
+	struct glink_dbgfs_dent *entry;
+	struct glink_dbgfs_data *file_data;
+	const char *c_dir_name;
+	const char *p_dir_name;
+	unsigned long flags;
+
+	if (dir == NULL) {
+		GLINK_ERR("%s: debugfs_dir strucutre is null\n", __func__);
+		return NULL;
+	}
+	c_dir_name = dir->curr_name;
+	p_dir_name = dir->par_name;
+
+	mutex_lock(&dent_list_lock_lha0);
+	list_for_each_entry(entry, &dent_list, list_node)
+		if (!strcmp(entry->par_name, p_dir_name)
+				&& !strcmp(entry->self_name, c_dir_name)) {
+			parent = entry->self;
+			break;
+		}
+	mutex_unlock(&dent_list_lock_lha0);
+	p_dir_name = c_dir_name;
+	c_dir_name = name;
+	if (parent != NULL) {
+		if (dir->b_dir_create) {
+			dent = debugfs_create_dir(name, parent);
+			if (dent != NULL)
+				glink_dfs_update_list(dent, parent,
+							c_dir_name, p_dir_name);
+		} else {
+			file_data = glink_dfs_create_file(name, parent, show,
+							dbgfs_data, b_free_req);
+			spin_lock_irqsave(&entry->file_list_lock_lhb0, flags);
+			if (file_data != NULL)
+				list_add_tail(&file_data->flist,
+						&entry->file_list);
+			spin_unlock_irqrestore(&entry->file_list_lock_lhb0,
+						flags);
+		}
+	} else {
+		GLINK_DBG("%s: parent dentry is null for [%s]\n",
+				__func__, name);
+	}
+	return dent;
+}
+EXPORT_SYMBOL(glink_debugfs_create);
+
+/**
+ * glink_debugfs_init() - initialize the glink debugfs directory structure
+ *
+ * Return:	0 in success otherwise appropriate error code
+ *
+ * This function initializes the debugfs directory for glink
+ */
+int glink_debugfs_init(void)
+{
+	struct glink_dbgfs dbgfs;
+
+	/* fake parent name */
+	dent = debugfs_create_dir("glink", NULL);
+	if (IS_ERR_OR_NULL(dent))
+		return PTR_ERR(dent);
+
+	glink_dfs_update_list(dent, NULL, "glink", "root");
+
+	dbgfs.b_dir_create = true;
+	dbgfs.curr_name = "glink";
+	dbgfs.par_name = "root";
+	glink_debugfs_create("xprt", NULL, &dbgfs, NULL, false);
+	glink_debugfs_create("channel", NULL, &dbgfs, NULL, false);
+
+	dbgfs.curr_name = "channel";
+	dbgfs.par_name = "glink";
+	dbgfs.b_dir_create = false;
+	glink_debugfs_create("channels", glink_dfs_create_channel_list,
+				&dbgfs, NULL, false);
+	dbgfs.curr_name = "xprt";
+	glink_debugfs_create("xprts", glink_dfs_create_xprt_list,
+				&dbgfs, NULL, false);
+
+	return 0;
+}
+EXPORT_SYMBOL(glink_debugfs_init);
+
+/**
+ * glink_debugfs_exit() - removes the glink debugfs directory
+ *
+ * This function recursively remove all the debugfs directories
+ * starting from dent
+ */
+void glink_debugfs_exit(void)
+{
+	if (dent != NULL)
+		debugfs_remove_recursive(dent);
+}
+EXPORT_SYMBOL(glink_debugfs_exit);
+#else
+void glink_debugfs_remove_recur(struct glink_dbgfs *dfs) { }
+EXPORT_SYMBOL(glink_debugfs_remove_recur);
+
+void glink_debugfs_remove_channel(struct channel_ctx *ch_ctx,
+			struct glink_core_xprt_ctx *xprt_ctx) { }
+EXPORT_SYMBOL(glink_debugfs_remove_channel);
+
+void glink_debugfs_add_channel(struct channel_ctx *ch_ctx,
+		struct glink_core_xprt_ctx *xprt_ctx) { }
+EXPORT_SYMBOL(glink_debugfs_add_channel);
+
+void glink_debugfs_add_xprt(struct glink_core_xprt_ctx *xprt_ctx) { }
+EXPORT_SYMBOL(glink_debugfs_add_xprt);
+
+int glink_debugfs_init(void) { return 0; }
+EXPORT_SYMBOL(glink_debugfs_init);
+
+void glink_debugfs_exit(void) { }
+EXPORT_SYMBOL(glink_debugfs_exit);
+#endif /* CONFIG_DEBUG_FS */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/glink_loopback_commands.h	2019-01-22 16:16:26.651274914 +0100
@@ -0,0 +1,104 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _GLINK_LOOPBACK_COMMANDS_H_
+#define _GLINK_LOOPBACK_COMMANDS_H_
+
+#define MAX_NAME_LEN 32
+
+enum request_type {
+	OPEN = 1,
+	CLOSE,
+	QUEUE_RX_INTENT_CONFIG,
+	TX_CONFIG,
+	RX_DONE_CONFIG,
+};
+
+struct req_hdr {
+	uint32_t req_id;
+	uint32_t req_type;
+	uint32_t req_size;
+};
+
+struct open_req {
+	uint32_t delay_ms;
+	uint32_t name_len;
+	char ch_name[MAX_NAME_LEN];
+};
+
+struct close_req {
+	uint32_t delay_ms;
+	uint32_t name_len;
+	char ch_name[MAX_NAME_LEN];
+};
+
+struct queue_rx_intent_config_req {
+	uint32_t num_intents;
+	uint32_t intent_size;
+	uint32_t random_delay;
+	uint32_t delay_ms;
+	uint32_t name_len;
+	char ch_name[MAX_NAME_LEN];
+};
+
+enum transform_type {
+	NO_TRANSFORM = 0,
+	PACKET_COUNT,
+	CHECKSUM,
+};
+
+struct tx_config_req {
+	uint32_t random_delay;
+	uint32_t delay_ms;
+	uint32_t echo_count;
+	uint32_t transform_type;
+	uint32_t name_len;
+	char ch_name[MAX_NAME_LEN];
+};
+
+struct rx_done_config_req {
+	uint32_t random_delay;
+	uint32_t delay_ms;
+	uint32_t name_len;
+	char ch_name[MAX_NAME_LEN];
+};
+
+union req_payload {
+	struct open_req open;
+	struct close_req close;
+	struct queue_rx_intent_config_req q_rx_int_conf;
+	struct tx_config_req tx_conf;
+	struct rx_done_config_req rx_done_conf;
+};
+
+struct req {
+	struct req_hdr hdr;
+	union req_payload payload;
+};
+
+struct resp {
+	uint32_t req_id;
+	uint32_t req_type;
+	uint32_t response;
+};
+
+/*
+ * Tracer Packet Event IDs for Loopback Client/Server.
+ * This being a client of G-Link, the tracer packet events start
+ * from 256.
+ */
+enum loopback_tracer_pkt_events {
+	LOOPBACK_SRV_TX = 256,
+	LOOPBACK_SRV_RX = 257,
+	LOOPBACK_CLNT_TX = 258,
+	LOOPBACK_CLNT_RX = 259,
+};
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/glink_loopback_server.c	2019-10-29 09:26:24.809214589 +0100
@@ -0,0 +1,1296 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/err.h>
+#include <linux/ipc_logging.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/uio.h>
+#include <soc/qcom/glink.h>
+#include <soc/qcom/tracer_pkt.h>
+#include "glink_loopback_commands.h"
+
+
+/* Number of internal IPC Logging log pages */
+#define GLINK_LBSRV_NUM_LOG_PAGES	3
+
+static void *glink_lbsrv_log_ctx;
+
+#define GLINK_LBSRV_IPC_LOG_STR(x...) do { \
+	if (glink_lbsrv_log_ctx) \
+		ipc_log_string(glink_lbsrv_log_ctx, x); \
+} while (0)
+
+#define LBSRV_INFO(x...) GLINK_LBSRV_IPC_LOG_STR("<LBSRV> " x)
+
+#define LBSRV_ERR(x...) do {                              \
+	pr_err("<LBSRV> " x); \
+	GLINK_LBSRV_IPC_LOG_STR("<LBSRV> " x);  \
+} while (0)
+
+enum ch_type {
+	CTL,
+	DATA,
+};
+
+enum buf_type {
+	LINEAR,
+	VECTOR,
+};
+
+struct tx_config_info {
+	uint32_t random_delay;
+	uint32_t delay_ms;
+	uint32_t echo_count;
+	uint32_t transform_type;
+};
+
+struct rx_done_config_info {
+	uint32_t random_delay;
+	uint32_t delay_ms;
+};
+
+struct rmt_rx_intent_req_work_info {
+	size_t req_intent_size;
+	struct delayed_work work;
+	struct ch_info *work_ch_info;
+};
+
+struct queue_rx_intent_work_info {
+	uint32_t req_id;
+	bool deferred;
+	struct ch_info *req_ch_info;
+	uint32_t num_intents;
+	uint32_t intent_size;
+	uint32_t random_delay;
+	uint32_t delay_ms;
+	struct delayed_work work;
+	struct ch_info *work_ch_info;
+};
+
+struct lbsrv_vec {
+	uint32_t num_bufs;
+	struct kvec vec[0];
+};
+
+struct tx_work_info {
+	struct tx_config_info tx_config;
+	struct delayed_work work;
+	struct ch_info *tx_ch_info;
+	void *data;
+	bool tracer_pkt;
+	uint32_t buf_type;
+	size_t size;
+	void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size);
+	void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size);
+};
+
+struct rx_done_work_info {
+	struct delayed_work work;
+	struct ch_info *rx_done_ch_info;
+	void *ptr;
+};
+
+struct rx_work_info {
+	struct ch_info *rx_ch_info;
+	void *pkt_priv;
+	void *ptr;
+	bool tracer_pkt;
+	uint32_t buf_type;
+	size_t size;
+	void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size);
+	void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size);
+	struct delayed_work work;
+};
+
+struct ch_info {
+	struct list_head list;
+	struct mutex ch_info_lock;
+	char name[MAX_NAME_LEN];
+	char edge[GLINK_NAME_SIZE];
+	char transport[GLINK_NAME_SIZE];
+	void *handle;
+	bool fully_opened;
+	uint32_t type;
+	struct delayed_work open_work;
+	struct delayed_work close_work;
+	struct tx_config_info tx_config;
+	struct rx_done_config_info rx_done_config;
+	struct queue_rx_intent_work_info *queue_rx_intent_work_info;
+};
+
+struct ctl_ch_info {
+	char name[MAX_NAME_LEN];
+	char edge[GLINK_NAME_SIZE];
+	char transport[GLINK_NAME_SIZE];
+};
+
+static struct ctl_ch_info ctl_ch_tbl[] = {
+	{"LOCAL_LOOPBACK_SRV", "local", "lloop"},
+	{"LOOPBACK_CTL_APSS", "mpss", "smem"},
+	{"LOOPBACK_CTL_APSS", "lpass", "smem"},
+	{"LOOPBACK_CTL_APSS", "dsps", "smem"},
+	{"LOOPBACK_CTL_APSS", "spss", "mailbox"},
+};
+
+static DEFINE_MUTEX(ctl_ch_list_lock);
+static LIST_HEAD(ctl_ch_list);
+static DEFINE_MUTEX(data_ch_list_lock);
+static LIST_HEAD(data_ch_list);
+
+struct workqueue_struct *glink_lbsrv_wq;
+
+/**
+ * link_state_work_info - Information about work handling link state updates
+ * edge:	Remote subsystem name in the link.
+ * transport:	Name of the transport/link.
+ * link_state:	State of the transport/link.
+ * work:	Reference to the work item.
+ */
+struct link_state_work_info {
+	char edge[GLINK_NAME_SIZE];
+	char transport[GLINK_NAME_SIZE];
+	enum glink_link_state link_state;
+	struct delayed_work work;
+};
+
+static void glink_lbsrv_link_state_cb(struct glink_link_state_cb_info *cb_info,
+				      void *priv);
+static struct glink_link_info glink_lbsrv_link_info = {
+			NULL, NULL, glink_lbsrv_link_state_cb};
+static void *glink_lbsrv_link_state_notif_handle;
+
+static void glink_lbsrv_open_worker(struct work_struct *work);
+static void glink_lbsrv_close_worker(struct work_struct *work);
+static void glink_lbsrv_rmt_rx_intent_req_worker(struct work_struct *work);
+static void glink_lbsrv_queue_rx_intent_worker(struct work_struct *work);
+static void glink_lbsrv_rx_worker(struct work_struct *work);
+static void glink_lbsrv_rx_done_worker(struct work_struct *work);
+static void glink_lbsrv_tx_worker(struct work_struct *work);
+
+int glink_lbsrv_send_response(void *handle, uint32_t req_id, uint32_t req_type,
+		uint32_t response)
+{
+	struct resp *resp_pkt = kzalloc(sizeof(struct resp), GFP_KERNEL);
+
+	if (!resp_pkt) {
+		LBSRV_ERR("%s: Error allocating response packet\n", __func__);
+		return -ENOMEM;
+	}
+
+	resp_pkt->req_id = req_id;
+	resp_pkt->req_type = req_type;
+	resp_pkt->response = response;
+
+	return glink_tx(handle, (void *)LINEAR, (void *)resp_pkt,
+			sizeof(struct resp), 0);
+}
+
+static uint32_t calc_delay_ms(uint32_t random_delay, uint32_t delay_ms)
+{
+	uint32_t tmp_delay_ms;
+
+	if (random_delay && delay_ms)
+		tmp_delay_ms = prandom_u32() % delay_ms;
+	else if (random_delay)
+		tmp_delay_ms = prandom_u32();
+	else
+		tmp_delay_ms = delay_ms;
+
+	return tmp_delay_ms;
+}
+
+static int create_ch_info(char *name, char *edge, char *transport,
+			  uint32_t type, struct ch_info **ret_ch_info)
+{
+	struct ch_info *tmp_ch_info;
+
+	tmp_ch_info = kzalloc(sizeof(struct ch_info), GFP_KERNEL);
+	if (!tmp_ch_info) {
+		LBSRV_ERR("%s: Error allocation ch_info\n", __func__);
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&tmp_ch_info->list);
+	mutex_init(&tmp_ch_info->ch_info_lock);
+	strlcpy(tmp_ch_info->name, name, MAX_NAME_LEN);
+	strlcpy(tmp_ch_info->edge, edge, GLINK_NAME_SIZE);
+	strlcpy(tmp_ch_info->transport, transport, GLINK_NAME_SIZE);
+	tmp_ch_info->type = type;
+	INIT_DELAYED_WORK(&tmp_ch_info->open_work,
+			  glink_lbsrv_open_worker);
+	INIT_DELAYED_WORK(&tmp_ch_info->close_work,
+			  glink_lbsrv_close_worker);
+	tmp_ch_info->tx_config.echo_count = 1;
+
+	if (type == CTL) {
+		mutex_lock(&ctl_ch_list_lock);
+		list_add_tail(&tmp_ch_info->list, &ctl_ch_list);
+		mutex_unlock(&ctl_ch_list_lock);
+	} else if (type == DATA) {
+		mutex_lock(&data_ch_list_lock);
+		list_add_tail(&tmp_ch_info->list, &data_ch_list);
+		mutex_unlock(&data_ch_list_lock);
+	} else {
+		LBSRV_ERR("%s:%s:%s %s: Invalid ch type %d\n", transport,
+				edge, name, __func__, type);
+		kfree(tmp_ch_info);
+		return -EINVAL;
+	}
+	*ret_ch_info = tmp_ch_info;
+	return 0;
+}
+
+struct ch_info *lookup_ch_list(char *name, char *edge, char *transport,
+			       uint32_t type)
+{
+	struct list_head *ch_list;
+	struct mutex *lock;
+	struct ch_info *tmp_ch_info;
+
+	if (type == DATA) {
+		ch_list = &data_ch_list;
+		lock = &data_ch_list_lock;
+	} else if (type == CTL) {
+		ch_list = &ctl_ch_list;
+		lock = &ctl_ch_list_lock;
+	} else {
+		LBSRV_ERR("%s:%s:%s %s: Invalid ch type %d\n", transport,
+			    edge, name, __func__, type);
+		return NULL;
+	}
+
+	mutex_lock(lock);
+	list_for_each_entry(tmp_ch_info, ch_list, list) {
+		if (!strcmp(name, tmp_ch_info->name) &&
+		    !strcmp(edge, tmp_ch_info->edge) &&
+		    !strcmp(transport, tmp_ch_info->transport)) {
+			mutex_unlock(lock);
+			return tmp_ch_info;
+		}
+	}
+	mutex_unlock(lock);
+	return NULL;
+}
+
+int glink_lbsrv_handle_open_req(struct ch_info *rx_ch_info,
+				struct open_req req)
+{
+	struct ch_info *tmp_ch_info;
+	int ret;
+	char name[MAX_NAME_LEN];
+	char *temp;
+
+	strlcpy(name, req.ch_name, MAX_NAME_LEN);
+	if (!strcmp(rx_ch_info->transport, "lloop")) {
+		temp = strnstr(name, "_CLNT", MAX_NAME_LEN);
+		if (temp)
+			*temp = '\0';
+		strlcat(name, "_SRV", MAX_NAME_LEN);
+	}
+	LBSRV_INFO("%s:%s:%s %s: delay_ms[%d]\n",
+		   rx_ch_info->transport, rx_ch_info->edge,
+		   name, __func__, req.delay_ms);
+	tmp_ch_info = lookup_ch_list(name, rx_ch_info->edge,
+				     rx_ch_info->transport, DATA);
+	if (tmp_ch_info)
+		goto queue_open_work;
+
+	ret = create_ch_info(name, rx_ch_info->edge, rx_ch_info->transport,
+			     DATA, &tmp_ch_info);
+	if (ret)
+		return ret;
+queue_open_work:
+	queue_delayed_work(glink_lbsrv_wq, &tmp_ch_info->open_work,
+			   msecs_to_jiffies(req.delay_ms));
+	return 0;
+}
+
+int glink_lbsrv_handle_close_req(struct ch_info *rx_ch_info,
+				 struct close_req req)
+{
+	struct ch_info *tmp_ch_info;
+	char name[MAX_NAME_LEN];
+	char *temp;
+
+	strlcpy(name, req.ch_name, MAX_NAME_LEN);
+	if (!strcmp(rx_ch_info->transport, "lloop")) {
+		temp = strnstr(name, "_CLNT", MAX_NAME_LEN);
+		if (temp)
+			*temp = '\0';
+		strlcat(name, "_SRV", MAX_NAME_LEN);
+	}
+	LBSRV_INFO("%s:%s:%s %s: delay_ms[%d]\n",
+		    rx_ch_info->transport, rx_ch_info->edge,
+		    name, __func__, req.delay_ms);
+	tmp_ch_info = lookup_ch_list(name, rx_ch_info->edge,
+				     rx_ch_info->transport, DATA);
+	if (tmp_ch_info)
+		queue_delayed_work(glink_lbsrv_wq, &tmp_ch_info->close_work,
+				   msecs_to_jiffies(req.delay_ms));
+	return 0;
+}
+
+int glink_lbsrv_handle_queue_rx_intent_config_req(struct ch_info *rx_ch_info,
+			struct queue_rx_intent_config_req req, uint32_t req_id)
+{
+	struct ch_info *tmp_ch_info;
+	struct queue_rx_intent_work_info *tmp_work_info;
+	char name[MAX_NAME_LEN];
+	char *temp;
+	uint32_t delay_ms;
+
+	strlcpy(name, req.ch_name, MAX_NAME_LEN);
+	if (!strcmp(rx_ch_info->transport, "lloop")) {
+		temp = strnstr(name, "_CLNT", MAX_NAME_LEN);
+		if (temp)
+			*temp = '\0';
+		strlcat(name, "_SRV", MAX_NAME_LEN);
+	}
+	LBSRV_INFO("%s:%s:%s %s: num_intents[%d] size[%d]\n",
+		   rx_ch_info->transport, rx_ch_info->edge, name, __func__,
+		   req.num_intents, req.intent_size);
+	tmp_ch_info = lookup_ch_list(name, rx_ch_info->edge,
+				     rx_ch_info->transport, DATA);
+	if (!tmp_ch_info) {
+		LBSRV_ERR("%s:%s:%s %s: Channel info not found\n",
+				rx_ch_info->transport, rx_ch_info->edge,
+				name, __func__);
+		return -EINVAL;
+	}
+
+	tmp_work_info = kzalloc(sizeof(struct queue_rx_intent_work_info),
+				GFP_KERNEL);
+	if (!tmp_work_info) {
+		LBSRV_ERR("%s: Error allocating work_info\n", __func__);
+		return -ENOMEM;
+	}
+
+	tmp_work_info->req_id = req_id;
+	tmp_work_info->req_ch_info = rx_ch_info;
+	tmp_work_info->num_intents = req.num_intents;
+	tmp_work_info->intent_size = req.intent_size;
+	tmp_work_info->random_delay =  req.random_delay;
+	tmp_work_info->delay_ms = req.delay_ms;
+	INIT_DELAYED_WORK(&tmp_work_info->work,
+			  glink_lbsrv_queue_rx_intent_worker);
+	tmp_work_info->work_ch_info = tmp_ch_info;
+
+	mutex_lock(&tmp_ch_info->ch_info_lock);
+	if (tmp_ch_info->fully_opened) {
+		mutex_unlock(&tmp_ch_info->ch_info_lock);
+		delay_ms = calc_delay_ms(tmp_work_info->random_delay,
+					 tmp_work_info->delay_ms);
+		queue_delayed_work(glink_lbsrv_wq, &tmp_work_info->work,
+				   msecs_to_jiffies(delay_ms));
+
+		if (tmp_work_info->random_delay || tmp_work_info->delay_ms)
+			glink_lbsrv_send_response(rx_ch_info->handle, req_id,
+					QUEUE_RX_INTENT_CONFIG, 0);
+	} else {
+		tmp_work_info->deferred = true;
+		tmp_ch_info->queue_rx_intent_work_info = tmp_work_info;
+		mutex_unlock(&tmp_ch_info->ch_info_lock);
+
+		glink_lbsrv_send_response(rx_ch_info->handle, req_id,
+				QUEUE_RX_INTENT_CONFIG, 0);
+	}
+
+	return 0;
+}
+
+int glink_lbsrv_handle_tx_config_req(struct ch_info *rx_ch_info,
+				     struct tx_config_req req)
+{
+	struct ch_info *tmp_ch_info;
+	char name[MAX_NAME_LEN];
+	char *temp;
+
+	strlcpy(name, req.ch_name, MAX_NAME_LEN);
+	if (!strcmp(rx_ch_info->transport, "lloop")) {
+		temp = strnstr(name, "_CLNT", MAX_NAME_LEN);
+		if (temp)
+			*temp = '\0';
+		strlcat(name, "_SRV", MAX_NAME_LEN);
+	}
+	LBSRV_INFO("%s:%s:%s %s: echo_count[%d] transform[%d]\n",
+		   rx_ch_info->transport, rx_ch_info->edge, name, __func__,
+		   req.echo_count, req.transform_type);
+	tmp_ch_info = lookup_ch_list(name, rx_ch_info->edge,
+				     rx_ch_info->transport, DATA);
+	if (!tmp_ch_info) {
+		LBSRV_ERR("%s:%s:%s %s: Channel info not found\n",
+				rx_ch_info->transport, rx_ch_info->edge,
+				name, __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&tmp_ch_info->ch_info_lock);
+	tmp_ch_info->tx_config.random_delay = req.random_delay;
+	tmp_ch_info->tx_config.delay_ms = req.delay_ms;
+	tmp_ch_info->tx_config.echo_count = req.echo_count;
+	tmp_ch_info->tx_config.transform_type = req.transform_type;
+	mutex_unlock(&tmp_ch_info->ch_info_lock);
+	return 0;
+}
+
+int glink_lbsrv_handle_rx_done_config_req(struct ch_info *rx_ch_info,
+					  struct rx_done_config_req req)
+{
+	struct ch_info *tmp_ch_info;
+	char name[MAX_NAME_LEN];
+	char *temp;
+
+	strlcpy(name, req.ch_name, MAX_NAME_LEN);
+	if (!strcmp(rx_ch_info->transport, "lloop")) {
+		temp = strnstr(name, "_CLNT", MAX_NAME_LEN);
+		if (temp)
+			*temp = '\0';
+		strlcat(name, "_SRV", MAX_NAME_LEN);
+	}
+	LBSRV_INFO("%s:%s:%s %s: delay_ms[%d] random_delay[%d]\n",
+		   rx_ch_info->transport, rx_ch_info->edge, name,
+		   __func__, req.delay_ms, req.random_delay);
+	tmp_ch_info = lookup_ch_list(name, rx_ch_info->edge,
+				     rx_ch_info->transport, DATA);
+	if (!tmp_ch_info) {
+		LBSRV_ERR("%s:%s:%s %s: Channel info not found\n",
+				rx_ch_info->transport, rx_ch_info->edge,
+				name, __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&tmp_ch_info->ch_info_lock);
+	tmp_ch_info->rx_done_config.random_delay = req.random_delay;
+	tmp_ch_info->rx_done_config.delay_ms = req.delay_ms;
+	mutex_unlock(&tmp_ch_info->ch_info_lock);
+	return 0;
+}
+
+/**
+ * glink_lbsrv_handle_req() - Handle the request commands received by clients
+ *
+ * rx_ch_info:	Channel info on which the request is received
+ * pkt:	Request structure received from client
+ *
+ * This function handles the all supported request types received from client
+ * and send the response back to client
+ */
+void glink_lbsrv_handle_req(struct ch_info *rx_ch_info, struct req pkt)
+{
+	int ret;
+
+	LBSRV_INFO("%s:%s:%s %s: Request packet type[%d]:id[%d]\n",
+			rx_ch_info->transport, rx_ch_info->edge,
+			rx_ch_info->name, __func__, pkt.hdr.req_type,
+			pkt.hdr.req_id);
+	switch (pkt.hdr.req_type) {
+	case OPEN:
+		ret = glink_lbsrv_handle_open_req(rx_ch_info,
+						  pkt.payload.open);
+		break;
+	case CLOSE:
+		ret = glink_lbsrv_handle_close_req(rx_ch_info,
+						   pkt.payload.close);
+		break;
+	case QUEUE_RX_INTENT_CONFIG:
+		ret = glink_lbsrv_handle_queue_rx_intent_config_req(
+			rx_ch_info, pkt.payload.q_rx_int_conf, pkt.hdr.req_id);
+		break;
+	case TX_CONFIG:
+		ret = glink_lbsrv_handle_tx_config_req(rx_ch_info,
+						       pkt.payload.tx_conf);
+		break;
+	case RX_DONE_CONFIG:
+		ret = glink_lbsrv_handle_rx_done_config_req(rx_ch_info,
+						pkt.payload.rx_done_conf);
+		break;
+	default:
+		LBSRV_ERR("%s:%s:%s %s: Invalid Request type [%d]\n",
+				rx_ch_info->transport, rx_ch_info->edge,
+				rx_ch_info->name, __func__, pkt.hdr.req_type);
+		ret = -1;
+		break;
+	}
+
+	if (pkt.hdr.req_type != QUEUE_RX_INTENT_CONFIG)
+		glink_lbsrv_send_response(rx_ch_info->handle, pkt.hdr.req_id,
+				pkt.hdr.req_type, ret);
+}
+
+static void *glink_lbsrv_vbuf_provider(void *iovec, size_t offset,
+				       size_t *buf_size)
+{
+	struct lbsrv_vec *tmp_vec_info = (struct lbsrv_vec *)iovec;
+	uint32_t i;
+	size_t temp_size = 0;
+
+	for (i = 0; i < tmp_vec_info->num_bufs; i++) {
+		temp_size += tmp_vec_info->vec[i].iov_len;
+		if (offset >= temp_size)
+			continue;
+		*buf_size = temp_size - offset;
+		return (void *)tmp_vec_info->vec[i].iov_base +
+			tmp_vec_info->vec[i].iov_len - *buf_size;
+	}
+	*buf_size = 0;
+	return NULL;
+}
+
+static void glink_lbsrv_free_data(void *data, uint32_t buf_type)
+{
+	struct lbsrv_vec *tmp_vec_info;
+	uint32_t i;
+
+	if (buf_type == LINEAR) {
+		kfree(data);
+	} else {
+		tmp_vec_info = (struct lbsrv_vec *)data;
+		for (i = 0; i < tmp_vec_info->num_bufs; i++) {
+			kfree(tmp_vec_info->vec[i].iov_base);
+			tmp_vec_info->vec[i].iov_base = NULL;
+		}
+		kfree(tmp_vec_info);
+	}
+}
+
+static void *copy_linear_data(struct rx_work_info *tmp_rx_work_info)
+{
+	char *data;
+	struct ch_info *rx_ch_info = tmp_rx_work_info->rx_ch_info;
+
+	data = kmalloc(tmp_rx_work_info->size, GFP_KERNEL);
+	if (data)
+		memcpy(data, tmp_rx_work_info->ptr, tmp_rx_work_info->size);
+	else
+		LBSRV_ERR("%s:%s:%s %s: Error allocating the data\n",
+				rx_ch_info->transport, rx_ch_info->edge,
+				rx_ch_info->name, __func__);
+	return data;
+}
+
+static void *copy_vector_data(struct rx_work_info *tmp_rx_work_info)
+{
+	uint32_t num_bufs = 0;
+	struct ch_info *rx_ch_info = tmp_rx_work_info->rx_ch_info;
+	struct lbsrv_vec *tmp_vec_info;
+	void *buf, *pbuf, *dest_buf;
+	size_t offset = 0;
+	size_t buf_size;
+	uint32_t i;
+
+	do {
+		if (tmp_rx_work_info->vbuf_provider)
+			buf = tmp_rx_work_info->vbuf_provider(
+				tmp_rx_work_info->ptr, offset, &buf_size);
+		else
+			buf = tmp_rx_work_info->pbuf_provider(
+				tmp_rx_work_info->ptr, offset, &buf_size);
+		if (!buf)
+			break;
+		offset += buf_size;
+		num_bufs++;
+	} while (buf);
+
+	tmp_vec_info = kzalloc(sizeof(*tmp_vec_info) +
+			       num_bufs * sizeof(struct kvec), GFP_KERNEL);
+	if (!tmp_vec_info) {
+		LBSRV_ERR("%s:%s:%s %s: Error allocating vector info\n",
+			  rx_ch_info->transport, rx_ch_info->edge,
+			  rx_ch_info->name, __func__);
+		return NULL;
+	}
+	tmp_vec_info->num_bufs = num_bufs;
+
+	offset = 0;
+	for (i = 0; i < num_bufs; i++) {
+		if (tmp_rx_work_info->vbuf_provider) {
+			buf = tmp_rx_work_info->vbuf_provider(
+				tmp_rx_work_info->ptr, offset, &buf_size);
+		} else {
+			pbuf = tmp_rx_work_info->pbuf_provider(
+				tmp_rx_work_info->ptr, offset, &buf_size);
+			buf = phys_to_virt((unsigned long)pbuf);
+		}
+		dest_buf = kmalloc(buf_size, GFP_KERNEL);
+		if (!dest_buf) {
+			LBSRV_ERR("%s:%s:%s %s: Error allocating data\n",
+				  rx_ch_info->transport, rx_ch_info->edge,
+				  rx_ch_info->name, __func__);
+			goto out_copy_vector_data;
+		}
+		memcpy(dest_buf, buf, buf_size);
+		tmp_vec_info->vec[i].iov_base = dest_buf;
+		tmp_vec_info->vec[i].iov_len = buf_size;
+		offset += buf_size;
+	}
+	return tmp_vec_info;
+out_copy_vector_data:
+	glink_lbsrv_free_data((void *)tmp_vec_info, VECTOR);
+	return NULL;
+}
+
+static void *glink_lbsrv_copy_data(struct rx_work_info *tmp_rx_work_info)
+{
+	if (tmp_rx_work_info->buf_type == LINEAR)
+		return copy_linear_data(tmp_rx_work_info);
+	else
+		return copy_vector_data(tmp_rx_work_info);
+}
+
+static int glink_lbsrv_handle_data(struct rx_work_info *tmp_rx_work_info)
+{
+	void *data;
+	int ret;
+	struct ch_info *rx_ch_info = tmp_rx_work_info->rx_ch_info;
+	struct tx_work_info *tmp_tx_work_info;
+	struct rx_done_work_info *tmp_rx_done_work_info;
+	uint32_t delay_ms;
+
+	data = glink_lbsrv_copy_data(tmp_rx_work_info);
+	if (!data) {
+		ret = -ENOMEM;
+		goto out_handle_data;
+	}
+
+	tmp_rx_done_work_info = kmalloc(sizeof(struct rx_done_work_info),
+					GFP_KERNEL);
+	if (!tmp_rx_done_work_info) {
+		LBSRV_ERR("%s:%s:%s %s: Error allocating rx_done_work_info\n",
+			  rx_ch_info->transport, rx_ch_info->edge,
+			  rx_ch_info->name, __func__);
+		glink_lbsrv_free_data(data, tmp_rx_work_info->buf_type);
+		ret = -ENOMEM;
+		goto out_handle_data;
+	}
+	INIT_DELAYED_WORK(&tmp_rx_done_work_info->work,
+			  glink_lbsrv_rx_done_worker);
+	tmp_rx_done_work_info->rx_done_ch_info = rx_ch_info;
+	tmp_rx_done_work_info->ptr = tmp_rx_work_info->ptr;
+	delay_ms = calc_delay_ms(rx_ch_info->rx_done_config.random_delay,
+				 rx_ch_info->rx_done_config.delay_ms);
+	queue_delayed_work(glink_lbsrv_wq, &tmp_rx_done_work_info->work,
+			   msecs_to_jiffies(delay_ms));
+
+	tmp_tx_work_info = kmalloc(sizeof(struct tx_work_info), GFP_KERNEL);
+	if (!tmp_tx_work_info) {
+		LBSRV_ERR("%s:%s:%s %s: Error allocating tx_work_info\n",
+				rx_ch_info->transport, rx_ch_info->edge,
+				rx_ch_info->name, __func__);
+		glink_lbsrv_free_data(data, tmp_rx_work_info->buf_type);
+		return -ENOMEM;
+	}
+	mutex_lock(&rx_ch_info->ch_info_lock);
+	tmp_tx_work_info->tx_config.random_delay =
+					rx_ch_info->tx_config.random_delay;
+	tmp_tx_work_info->tx_config.delay_ms = rx_ch_info->tx_config.delay_ms;
+	tmp_tx_work_info->tx_config.echo_count =
+					rx_ch_info->tx_config.echo_count;
+	tmp_tx_work_info->tx_config.transform_type =
+					rx_ch_info->tx_config.transform_type;
+	mutex_unlock(&rx_ch_info->ch_info_lock);
+	INIT_DELAYED_WORK(&tmp_tx_work_info->work, glink_lbsrv_tx_worker);
+	tmp_tx_work_info->tx_ch_info = rx_ch_info;
+	tmp_tx_work_info->data = data;
+	tmp_tx_work_info->tracer_pkt = tmp_rx_work_info->tracer_pkt;
+	tmp_tx_work_info->buf_type = tmp_rx_work_info->buf_type;
+	tmp_tx_work_info->size = tmp_rx_work_info->size;
+	if (tmp_tx_work_info->buf_type == VECTOR)
+		tmp_tx_work_info->vbuf_provider = glink_lbsrv_vbuf_provider;
+	else
+		tmp_tx_work_info->vbuf_provider = NULL;
+	tmp_tx_work_info->pbuf_provider = NULL;
+	delay_ms = calc_delay_ms(tmp_tx_work_info->tx_config.random_delay,
+				 tmp_tx_work_info->tx_config.delay_ms);
+	queue_delayed_work(glink_lbsrv_wq, &tmp_tx_work_info->work,
+			   msecs_to_jiffies(delay_ms));
+	return 0;
+out_handle_data:
+	glink_rx_done(rx_ch_info->handle, tmp_rx_work_info->ptr, false);
+	return ret;
+}
+
+void glink_lpbsrv_notify_rx(void *handle, const void *priv,
+			    const void *pkt_priv, const void *ptr, size_t size)
+{
+	struct rx_work_info *tmp_work_info;
+	struct ch_info *rx_ch_info = (struct ch_info *)priv;
+
+	LBSRV_INFO(
+		"%s:%s:%s %s: end (Success) RX priv[%p] data[%p] size[%zu]\n",
+		rx_ch_info->transport, rx_ch_info->edge, rx_ch_info->name,
+		__func__, pkt_priv, (char *)ptr, size);
+	tmp_work_info = kzalloc(sizeof(struct rx_work_info), GFP_ATOMIC);
+	if (!tmp_work_info) {
+		LBSRV_ERR("%s:%s:%s %s: Error allocating rx_work\n",
+				rx_ch_info->transport, rx_ch_info->edge,
+				rx_ch_info->name, __func__);
+		return;
+	}
+
+	tmp_work_info->rx_ch_info = rx_ch_info;
+	tmp_work_info->pkt_priv = (void *)pkt_priv;
+	tmp_work_info->ptr = (void *)ptr;
+	tmp_work_info->buf_type = LINEAR;
+	tmp_work_info->size = size;
+	INIT_DELAYED_WORK(&tmp_work_info->work, glink_lbsrv_rx_worker);
+	queue_delayed_work(glink_lbsrv_wq, &tmp_work_info->work, 0);
+}
+
+void glink_lpbsrv_notify_rxv(void *handle, const void *priv,
+	const void *pkt_priv, void *ptr, size_t size,
+	void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size),
+	void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size))
+{
+	struct rx_work_info *tmp_work_info;
+	struct ch_info *rx_ch_info = (struct ch_info *)priv;
+
+	LBSRV_INFO("%s:%s:%s %s: priv[%p] data[%p] size[%zu]\n",
+		   rx_ch_info->transport, rx_ch_info->edge, rx_ch_info->name,
+		   __func__, pkt_priv, (char *)ptr, size);
+	tmp_work_info = kzalloc(sizeof(struct rx_work_info), GFP_ATOMIC);
+	if (!tmp_work_info) {
+		LBSRV_ERR("%s:%s:%s %s: Error allocating rx_work\n",
+				rx_ch_info->transport, rx_ch_info->edge,
+				rx_ch_info->name, __func__);
+		return;
+	}
+
+	tmp_work_info->rx_ch_info = rx_ch_info;
+	tmp_work_info->pkt_priv = (void *)pkt_priv;
+	tmp_work_info->ptr = (void *)ptr;
+	tmp_work_info->buf_type = VECTOR;
+	tmp_work_info->size = size;
+	tmp_work_info->vbuf_provider = vbuf_provider;
+	tmp_work_info->pbuf_provider = pbuf_provider;
+	INIT_DELAYED_WORK(&tmp_work_info->work, glink_lbsrv_rx_worker);
+	queue_delayed_work(glink_lbsrv_wq, &tmp_work_info->work, 0);
+}
+
+void glink_lpbsrv_notify_rx_tp(void *handle, const void *priv,
+			    const void *pkt_priv, const void *ptr, size_t size)
+{
+	struct rx_work_info *tmp_work_info;
+	struct ch_info *rx_ch_info = (struct ch_info *)priv;
+
+	LBSRV_INFO(
+		"%s:%s:%s %s: end (Success) RX priv[%p] data[%p] size[%zu]\n",
+		rx_ch_info->transport, rx_ch_info->edge, rx_ch_info->name,
+		__func__, pkt_priv, (char *)ptr, size);
+	tracer_pkt_log_event((void *)ptr, LOOPBACK_SRV_RX);
+	tmp_work_info = kmalloc(sizeof(struct rx_work_info), GFP_ATOMIC);
+	if (!tmp_work_info) {
+		LBSRV_ERR("%s:%s:%s %s: Error allocating rx_work\n",
+				rx_ch_info->transport, rx_ch_info->edge,
+				rx_ch_info->name, __func__);
+		return;
+	}
+
+	tmp_work_info->rx_ch_info = rx_ch_info;
+	tmp_work_info->pkt_priv = (void *)pkt_priv;
+	tmp_work_info->ptr = (void *)ptr;
+	tmp_work_info->tracer_pkt = true;
+	tmp_work_info->buf_type = LINEAR;
+	tmp_work_info->size = size;
+	INIT_DELAYED_WORK(&tmp_work_info->work, glink_lbsrv_rx_worker);
+	queue_delayed_work(glink_lbsrv_wq, &tmp_work_info->work, 0);
+}
+
+void glink_lpbsrv_notify_tx_done(void *handle, const void *priv,
+				 const void *pkt_priv, const void *ptr)
+{
+	struct ch_info *tx_done_ch_info = (struct ch_info *)priv;
+	LBSRV_INFO("%s:%s:%s %s: end (Success) TX_DONE ptr[%p]\n",
+			tx_done_ch_info->transport, tx_done_ch_info->edge,
+			tx_done_ch_info->name, __func__, ptr);
+
+	if (pkt_priv != (const void *)0xFFFFFFFF)
+		glink_lbsrv_free_data((void *)ptr,
+				(uint32_t)(uintptr_t)pkt_priv);
+}
+
+void glink_lpbsrv_notify_state(void *handle, const void *priv, unsigned event)
+{
+	int ret;
+	uint32_t delay_ms;
+	struct ch_info *tmp_ch_info = (struct ch_info *)priv;
+	struct queue_rx_intent_work_info *tmp_work_info = NULL;
+
+	LBSRV_INFO("%s:%s:%s %s: event[%d]\n",
+			tmp_ch_info->transport, tmp_ch_info->edge,
+			tmp_ch_info->name, __func__, event);
+	if (tmp_ch_info->type == CTL) {
+		if (event == GLINK_CONNECTED) {
+			ret = glink_queue_rx_intent(handle,
+					priv, sizeof(struct req));
+			LBSRV_INFO(
+				"%s:%s:%s %s: QUEUE RX INTENT size[%zu] ret[%d]\n",
+				tmp_ch_info->transport,
+				tmp_ch_info->edge,
+				tmp_ch_info->name,
+				__func__, sizeof(struct req), ret);
+		} else if (event == GLINK_LOCAL_DISCONNECTED) {
+			queue_delayed_work(glink_lbsrv_wq,
+					&tmp_ch_info->open_work,
+					msecs_to_jiffies(0));
+		} else if (event == GLINK_REMOTE_DISCONNECTED)
+			if (!IS_ERR_OR_NULL(tmp_ch_info->handle))
+				queue_delayed_work(glink_lbsrv_wq,
+					&tmp_ch_info->close_work, 0);
+	} else if (tmp_ch_info->type == DATA) {
+
+		if (event == GLINK_CONNECTED) {
+			mutex_lock(&tmp_ch_info->ch_info_lock);
+			tmp_ch_info->fully_opened = true;
+			tmp_work_info = tmp_ch_info->queue_rx_intent_work_info;
+			tmp_ch_info->queue_rx_intent_work_info = NULL;
+			mutex_unlock(&tmp_ch_info->ch_info_lock);
+
+			if (tmp_work_info) {
+				delay_ms = calc_delay_ms(
+						tmp_work_info->random_delay,
+						tmp_work_info->delay_ms);
+				queue_delayed_work(glink_lbsrv_wq,
+						&tmp_work_info->work,
+						msecs_to_jiffies(delay_ms));
+			}
+		} else if (event == GLINK_LOCAL_DISCONNECTED ||
+			event == GLINK_REMOTE_DISCONNECTED) {
+
+			mutex_lock(&tmp_ch_info->ch_info_lock);
+			tmp_ch_info->fully_opened = false;
+			/*
+			* If the state has changed to LOCAL_DISCONNECTED,
+			* the channel has been fully closed and can now be
+			* re-opened. If the handle value is -EBUSY, an earlier
+			* open request failed because the channel was in the
+			* process of closing. Requeue the work from the open
+			* request.
+			*/
+			if (event == GLINK_LOCAL_DISCONNECTED &&
+				tmp_ch_info->handle == ERR_PTR(-EBUSY)) {
+				queue_delayed_work(glink_lbsrv_wq,
+				&tmp_ch_info->open_work,
+				msecs_to_jiffies(0));
+			}
+			if (event == GLINK_REMOTE_DISCONNECTED)
+				if (!IS_ERR_OR_NULL(tmp_ch_info->handle))
+					queue_delayed_work(
+					glink_lbsrv_wq,
+					&tmp_ch_info->close_work, 0);
+			mutex_unlock(&tmp_ch_info->ch_info_lock);
+		}
+	}
+}
+
+bool glink_lpbsrv_rmt_rx_intent_req_cb(void *handle, const void *priv,
+				       size_t sz)
+{
+	struct rmt_rx_intent_req_work_info *tmp_work_info;
+	struct ch_info *tmp_ch_info = (struct ch_info *)priv;
+	LBSRV_INFO("%s:%s:%s %s: QUEUE RX INTENT to receive size[%zu]\n",
+		   tmp_ch_info->transport, tmp_ch_info->edge, tmp_ch_info->name,
+		   __func__, sz);
+
+	tmp_work_info = kmalloc(sizeof(struct rmt_rx_intent_req_work_info),
+				GFP_ATOMIC);
+	if (!tmp_work_info) {
+		LBSRV_ERR("%s:%s:%s %s: Error allocating rx_work\n",
+				tmp_ch_info->transport, tmp_ch_info->edge,
+				tmp_ch_info->name, __func__);
+		return false;
+	}
+	tmp_work_info->req_intent_size = sz;
+	tmp_work_info->work_ch_info = tmp_ch_info;
+
+	INIT_DELAYED_WORK(&tmp_work_info->work,
+			  glink_lbsrv_rmt_rx_intent_req_worker);
+	queue_delayed_work(glink_lbsrv_wq, &tmp_work_info->work, 0);
+	return true;
+}
+
+void glink_lpbsrv_notify_rx_sigs(void *handle, const void *priv,
+			uint32_t old_sigs, uint32_t new_sigs)
+{
+	LBSRV_INFO(" %s old_sigs[0x%x] New_sigs[0x%x]\n",
+				__func__, old_sigs, new_sigs);
+	glink_sigs_set(handle, new_sigs);
+}
+
+static void glink_lbsrv_rx_worker(struct work_struct *work)
+{
+	struct delayed_work *rx_work = to_delayed_work(work);
+	struct rx_work_info *tmp_rx_work_info =
+		container_of(rx_work, struct rx_work_info, work);
+	struct ch_info *rx_ch_info = tmp_rx_work_info->rx_ch_info;
+	struct req request_pkt;
+	int ret;
+
+	if (rx_ch_info->type == CTL) {
+		request_pkt = *((struct req *)tmp_rx_work_info->ptr);
+		glink_rx_done(rx_ch_info->handle, tmp_rx_work_info->ptr, false);
+		ret = glink_queue_rx_intent(rx_ch_info->handle, rx_ch_info,
+					    sizeof(struct req));
+		LBSRV_INFO("%s:%s:%s %s: QUEUE RX INTENT size[%zu] ret[%d]\n",
+				rx_ch_info->transport, rx_ch_info->edge,
+				rx_ch_info->name, __func__,
+				sizeof(struct req), ret);
+		glink_lbsrv_handle_req(rx_ch_info, request_pkt);
+	} else {
+		ret = glink_lbsrv_handle_data(tmp_rx_work_info);
+	}
+	kfree(tmp_rx_work_info);
+}
+
+static void glink_lbsrv_open_worker(struct work_struct *work)
+{
+	struct delayed_work *open_work = to_delayed_work(work);
+	struct ch_info *tmp_ch_info =
+		container_of(open_work, struct ch_info, open_work);
+	struct glink_open_config open_cfg;
+
+	LBSRV_INFO("%s: glink_loopback_server_init\n", __func__);
+	mutex_lock(&tmp_ch_info->ch_info_lock);
+	if (!IS_ERR_OR_NULL(tmp_ch_info->handle)) {
+		mutex_unlock(&tmp_ch_info->ch_info_lock);
+		return;
+	}
+
+	memset(&open_cfg, 0, sizeof(struct glink_open_config));
+	open_cfg.transport = tmp_ch_info->transport;
+	open_cfg.edge = tmp_ch_info->edge;
+	open_cfg.name = tmp_ch_info->name;
+
+	open_cfg.notify_rx = glink_lpbsrv_notify_rx;
+	if (tmp_ch_info->type == DATA)
+		open_cfg.notify_rxv = glink_lpbsrv_notify_rxv;
+	open_cfg.notify_tx_done = glink_lpbsrv_notify_tx_done;
+	open_cfg.notify_state = glink_lpbsrv_notify_state;
+	open_cfg.notify_rx_intent_req = glink_lpbsrv_rmt_rx_intent_req_cb;
+	open_cfg.notify_rx_sigs = glink_lpbsrv_notify_rx_sigs;
+	open_cfg.notify_rx_abort = NULL;
+	open_cfg.notify_tx_abort = NULL;
+	open_cfg.notify_rx_tracer_pkt = glink_lpbsrv_notify_rx_tp;
+	open_cfg.priv = tmp_ch_info;
+
+	tmp_ch_info->handle = glink_open(&open_cfg);
+	if (IS_ERR_OR_NULL(tmp_ch_info->handle)) {
+		LBSRV_ERR("%s:%s:%s %s: unable to open channel\n",
+			  open_cfg.transport, open_cfg.edge, open_cfg.name,
+			  __func__);
+		mutex_unlock(&tmp_ch_info->ch_info_lock);
+		return;
+	}
+	mutex_unlock(&tmp_ch_info->ch_info_lock);
+	LBSRV_INFO("%s:%s:%s %s: Open complete\n", open_cfg.transport,
+			open_cfg.edge, open_cfg.name, __func__);
+}
+
+static void glink_lbsrv_close_worker(struct work_struct *work)
+{
+	struct delayed_work *close_work = to_delayed_work(work);
+	struct ch_info *tmp_ch_info =
+		container_of(close_work, struct ch_info, close_work);
+
+	mutex_lock(&tmp_ch_info->ch_info_lock);
+	if (!IS_ERR_OR_NULL(tmp_ch_info->handle)) {
+		glink_close(tmp_ch_info->handle);
+		tmp_ch_info->handle = NULL;
+	}
+	mutex_unlock(&tmp_ch_info->ch_info_lock);
+	LBSRV_INFO("%s:%s:%s %s: Close complete\n", tmp_ch_info->transport,
+			tmp_ch_info->edge, tmp_ch_info->name, __func__);
+}
+
+static void glink_lbsrv_rmt_rx_intent_req_worker(struct work_struct *work)
+{
+
+	struct delayed_work *rmt_rx_intent_req_work = to_delayed_work(work);
+	struct rmt_rx_intent_req_work_info *tmp_work_info =
+		container_of(rmt_rx_intent_req_work,
+			struct rmt_rx_intent_req_work_info, work);
+	struct ch_info *tmp_ch_info = tmp_work_info->work_ch_info;
+	int ret;
+
+	mutex_lock(&tmp_ch_info->ch_info_lock);
+	if (IS_ERR_OR_NULL(tmp_ch_info->handle)) {
+		mutex_unlock(&tmp_ch_info->ch_info_lock);
+		LBSRV_ERR("%s:%s:%s %s: Invalid CH handle\n",
+				  tmp_ch_info->transport,
+				  tmp_ch_info->edge,
+				  tmp_ch_info->name, __func__);
+		kfree(tmp_work_info);
+		return;
+	}
+	ret = glink_queue_rx_intent(tmp_ch_info->handle,
+			(void *)tmp_ch_info, tmp_work_info->req_intent_size);
+	mutex_unlock(&tmp_ch_info->ch_info_lock);
+	LBSRV_INFO("%s:%s:%s %s: QUEUE RX INTENT size[%zu] ret[%d]\n",
+		   tmp_ch_info->transport, tmp_ch_info->edge,
+		   tmp_ch_info->name, __func__, tmp_work_info->req_intent_size,
+		   ret);
+	if (ret < 0) {
+		LBSRV_ERR("%s:%s:%s %s: Err %d q'ing intent size %zu\n",
+			  tmp_ch_info->transport, tmp_ch_info->edge,
+			  tmp_ch_info->name, __func__, ret,
+			  tmp_work_info->req_intent_size);
+	}
+	kfree(tmp_work_info);
+	return;
+}
+
+static void glink_lbsrv_queue_rx_intent_worker(struct work_struct *work)
+{
+	struct delayed_work *queue_rx_intent_work = to_delayed_work(work);
+	struct queue_rx_intent_work_info *tmp_work_info =
+		container_of(queue_rx_intent_work,
+			struct queue_rx_intent_work_info, work);
+	struct ch_info *tmp_ch_info = tmp_work_info->work_ch_info;
+	int ret;
+	uint32_t delay_ms;
+
+	while (1) {
+		mutex_lock(&tmp_ch_info->ch_info_lock);
+		if (IS_ERR_OR_NULL(tmp_ch_info->handle)) {
+			mutex_unlock(&tmp_ch_info->ch_info_lock);
+			return;
+		}
+
+		ret = glink_queue_rx_intent(tmp_ch_info->handle,
+			(void *)tmp_ch_info, tmp_work_info->intent_size);
+		mutex_unlock(&tmp_ch_info->ch_info_lock);
+		if (ret < 0) {
+			LBSRV_ERR("%s:%s:%s %s: Err %d q'ing intent size %d\n",
+				  tmp_ch_info->transport, tmp_ch_info->edge,
+				  tmp_ch_info->name, __func__, ret,
+				  tmp_work_info->intent_size);
+			kfree(tmp_work_info);
+			return;
+		}
+		LBSRV_INFO("%s:%s:%s %s: Queued rx intent of size %d\n",
+			   tmp_ch_info->transport, tmp_ch_info->edge,
+			   tmp_ch_info->name, __func__,
+			   tmp_work_info->intent_size);
+		tmp_work_info->num_intents--;
+		if (!tmp_work_info->num_intents)
+			break;
+
+		delay_ms = calc_delay_ms(tmp_work_info->random_delay,
+					 tmp_work_info->delay_ms);
+		if (delay_ms) {
+			queue_delayed_work(glink_lbsrv_wq, &tmp_work_info->work,
+					   msecs_to_jiffies(delay_ms));
+			return;
+		}
+	}
+	LBSRV_INFO("%s:%s:%s %s: Queued all intents. size:%d\n",
+		   tmp_ch_info->transport, tmp_ch_info->edge, tmp_ch_info->name,
+		   __func__, tmp_work_info->intent_size);
+
+	if (!tmp_work_info->deferred && !tmp_work_info->random_delay &&
+			!tmp_work_info->delay_ms)
+		glink_lbsrv_send_response(tmp_work_info->req_ch_info->handle,
+				tmp_work_info->req_id, QUEUE_RX_INTENT_CONFIG,
+				0);
+	kfree(tmp_work_info);
+}
+
+static void glink_lbsrv_rx_done_worker(struct work_struct *work)
+{
+	struct delayed_work *rx_done_work = to_delayed_work(work);
+	struct rx_done_work_info *tmp_work_info =
+		container_of(rx_done_work, struct rx_done_work_info, work);
+	struct ch_info *tmp_ch_info = tmp_work_info->rx_done_ch_info;
+
+	mutex_lock(&tmp_ch_info->ch_info_lock);
+	if (!IS_ERR_OR_NULL(tmp_ch_info->handle))
+		glink_rx_done(tmp_ch_info->handle, tmp_work_info->ptr, false);
+	mutex_unlock(&tmp_ch_info->ch_info_lock);
+	kfree(tmp_work_info);
+}
+
+static void glink_lbsrv_tx_worker(struct work_struct *work)
+{
+	struct delayed_work *tx_work = to_delayed_work(work);
+	struct tx_work_info *tmp_work_info =
+		container_of(tx_work, struct tx_work_info, work);
+	struct ch_info *tmp_ch_info = tmp_work_info->tx_ch_info;
+	int ret;
+	uint32_t delay_ms;
+	uint32_t flags;
+
+	LBSRV_INFO("%s:%s:%s %s: start TX data[%p] size[%zu]\n",
+		   tmp_ch_info->transport, tmp_ch_info->edge, tmp_ch_info->name,
+		   __func__, tmp_work_info->data, tmp_work_info->size);
+	while (1) {
+		mutex_lock(&tmp_ch_info->ch_info_lock);
+		if (IS_ERR_OR_NULL(tmp_ch_info->handle)) {
+			mutex_unlock(&tmp_ch_info->ch_info_lock);
+			return;
+		}
+
+		flags = 0;
+		if (tmp_work_info->tracer_pkt) {
+			flags |= GLINK_TX_TRACER_PKT;
+			tracer_pkt_log_event(tmp_work_info->data,
+					     LOOPBACK_SRV_TX);
+		}
+		if (tmp_work_info->buf_type == LINEAR)
+			ret = glink_tx(tmp_ch_info->handle,
+			       (tmp_work_info->tx_config.echo_count > 1 ?
+					(void *)0xFFFFFFFF :
+					(void *)(uintptr_t)
+						tmp_work_info->buf_type),
+			       (void *)tmp_work_info->data,
+			       tmp_work_info->size, flags);
+		else
+			ret = glink_txv(tmp_ch_info->handle,
+				(tmp_work_info->tx_config.echo_count > 1 ?
+					(void *)0xFFFFFFFF :
+					(void *)(uintptr_t)
+						tmp_work_info->buf_type),
+				(void *)tmp_work_info->data,
+				tmp_work_info->size,
+				tmp_work_info->vbuf_provider,
+				tmp_work_info->pbuf_provider,
+				flags);
+		mutex_unlock(&tmp_ch_info->ch_info_lock);
+		if (ret < 0 && ret != -EAGAIN) {
+			LBSRV_ERR("%s:%s:%s %s: TX Error %d\n",
+					tmp_ch_info->transport,
+					tmp_ch_info->edge,
+					tmp_ch_info->name, __func__, ret);
+			glink_lbsrv_free_data(tmp_work_info->data,
+					      tmp_work_info->buf_type);
+			kfree(tmp_work_info);
+			return;
+		}
+		if (ret != -EAGAIN)
+			tmp_work_info->tx_config.echo_count--;
+		if (!tmp_work_info->tx_config.echo_count)
+			break;
+
+		delay_ms = calc_delay_ms(tmp_work_info->tx_config.random_delay,
+					 tmp_work_info->tx_config.delay_ms);
+		if (delay_ms) {
+			queue_delayed_work(glink_lbsrv_wq, &tmp_work_info->work,
+					   msecs_to_jiffies(delay_ms));
+			return;
+		}
+	}
+	kfree(tmp_work_info);
+}
+
+/**
+ * glink_lbsrv_link_state_worker() - Function to handle link state updates
+ * work:	Pointer to the work item in the link_state_work_info.
+ *
+ * This worker function is scheduled when there is a link state update. Since
+ * the loopback server registers for all transports, it receives all link state
+ * updates about all transports that get registered in the system.
+ */
+static void glink_lbsrv_link_state_worker(struct work_struct *work)
+{
+	struct delayed_work *ls_work = to_delayed_work(work);
+	struct link_state_work_info *ls_info =
+		container_of(ls_work, struct link_state_work_info, work);
+	struct ch_info *tmp_ch_info;
+
+	if (ls_info->link_state == GLINK_LINK_STATE_UP) {
+		LBSRV_INFO("%s: LINK_STATE_UP %s:%s\n",
+			  __func__, ls_info->edge, ls_info->transport);
+		mutex_lock(&ctl_ch_list_lock);
+		list_for_each_entry(tmp_ch_info, &ctl_ch_list, list) {
+			if (strcmp(tmp_ch_info->edge, ls_info->edge) ||
+			    strcmp(tmp_ch_info->transport, ls_info->transport))
+				continue;
+			queue_delayed_work(glink_lbsrv_wq,
+					   &tmp_ch_info->open_work, 0);
+		}
+		mutex_unlock(&ctl_ch_list_lock);
+	} else if (ls_info->link_state == GLINK_LINK_STATE_DOWN) {
+		LBSRV_INFO("%s: LINK_STATE_DOWN %s:%s\n",
+			  __func__, ls_info->edge, ls_info->transport);
+
+	}
+	kfree(ls_info);
+	return;
+}
+
+/**
+ * glink_lbsrv_link_state_cb() - Callback to receive link state updates
+ * cb_info:	Information containing link & its state.
+ * priv:	Private data passed during the link state registration.
+ *
+ * This function is called by the GLINK core to notify the loopback server
+ * regarding the link state updates. This function is registered with the
+ * GLINK core by the loopback server during glink_register_link_state_cb().
+ */
+static void glink_lbsrv_link_state_cb(struct glink_link_state_cb_info *cb_info,
+				      void *priv)
+{
+	struct link_state_work_info *ls_info;
+
+	if (!cb_info)
+		return;
+
+	LBSRV_INFO("%s: %s:%s\n", __func__, cb_info->edge, cb_info->transport);
+	ls_info = kmalloc(sizeof(*ls_info), GFP_KERNEL);
+	if (!ls_info) {
+		LBSRV_ERR("%s: Error allocating link state info\n", __func__);
+		return;
+	}
+
+	strlcpy(ls_info->edge, cb_info->edge, GLINK_NAME_SIZE);
+	strlcpy(ls_info->transport, cb_info->transport, GLINK_NAME_SIZE);
+	ls_info->link_state = cb_info->link_state;
+	INIT_DELAYED_WORK(&ls_info->work, glink_lbsrv_link_state_worker);
+	queue_delayed_work(glink_lbsrv_wq, &ls_info->work, 0);
+}
+
+static int glink_loopback_server_init(void)
+{
+	int i;
+	int ret;
+	struct ch_info *tmp_ch_info;
+
+	glink_lbsrv_log_ctx = ipc_log_context_create(GLINK_LBSRV_NUM_LOG_PAGES,
+							"glink_lbsrv", 0);
+	if (!glink_lbsrv_log_ctx)
+		pr_err("%s: unable to create log context\n", __func__);
+
+	glink_lbsrv_wq = create_singlethread_workqueue("glink_lbsrv");
+	if (!glink_lbsrv_wq) {
+		LBSRV_ERR("%s: Error creating glink_lbsrv_wq\n", __func__);
+		return -EFAULT;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(ctl_ch_tbl); i++) {
+		ret = create_ch_info(ctl_ch_tbl[i].name, ctl_ch_tbl[i].edge,
+				     ctl_ch_tbl[i].transport, CTL,
+				     &tmp_ch_info);
+		if (ret < 0) {
+			LBSRV_ERR("%s: Error creating ctl ch index %d\n",
+				__func__, i);
+			continue;
+		}
+	}
+	glink_lbsrv_link_state_notif_handle = glink_register_link_state_cb(
+						&glink_lbsrv_link_info, NULL);
+	return 0;
+}
+
+module_init(glink_loopback_server_init);
+
+MODULE_DESCRIPTION("MSM Generic Link (G-Link) Loopback Server");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/glink_private.h	2019-01-22 16:16:26.651274914 +0100
@@ -0,0 +1,1085 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _SOC_QCOM_GLINK_PRIVATE_H_
+#define _SOC_QCOM_GLINK_PRIVATE_H_
+
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/dcache.h>
+#include <linux/ipc_logging.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/ratelimit.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <soc/qcom/glink.h>
+
+struct glink_core_xprt_ctx;
+struct channel_ctx;
+enum transport_state_e;
+enum local_channel_state_e;
+
+/* Logging Macros */
+enum {
+	QCOM_GLINK_INFO = 1U << 0,
+	QCOM_GLINK_DEBUG = 1U << 1,
+	QCOM_GLINK_GPIO = 1U << 2,
+	QCOM_GLINK_PERF = 1U << 3,
+};
+
+enum glink_dbgfs_ss {
+	GLINK_DBGFS_MPSS,
+	GLINK_DBGFS_APSS,
+	GLINK_DBGFS_LPASS,
+	GLINK_DBGFS_DSPS,
+	GLINK_DBGFS_RPM,
+	GLINK_DBGFS_WCNSS,
+	GLINK_DBGFS_LLOOP,
+	GLINK_DBGFS_MOCK,
+	GLINK_DBGFS_MAX_NUM_SUBS
+};
+
+enum glink_dbgfs_xprt {
+	GLINK_DBGFS_SMEM,
+	GLINK_DBGFS_SMD,
+	GLINK_DBGFS_XLLOOP,
+	GLINK_DBGFS_XMOCK,
+	GLINK_DBGFS_XMOCK_LOW,
+	GLINK_DBGFS_XMOCK_HIGH,
+	GLINK_DBGFS_MAX_NUM_XPRTS
+};
+
+struct glink_dbgfs {
+	const char *curr_name;
+	const char *par_name;
+	bool b_dir_create;
+};
+
+struct glink_dbgfs_data {
+	struct list_head flist;
+	struct dentry *dent;
+	void (*o_func)(struct seq_file *s);
+	void *priv_data;
+	bool b_priv_free_req;
+};
+
+struct xprt_ctx_iterator {
+	struct list_head *xprt_list;
+	struct glink_core_xprt_ctx *i_curr;
+	unsigned long xprt_list_flags;
+};
+
+struct ch_ctx_iterator {
+	struct list_head *ch_list;
+	struct channel_ctx *i_curr;
+	unsigned long ch_list_flags;
+};
+
+struct glink_ch_intent_info {
+	spinlock_t *li_lst_lock;
+	struct list_head *li_avail_list;
+	struct list_head *li_used_list;
+	spinlock_t *ri_lst_lock;
+	struct list_head *ri_list;
+};
+
+/* Tracer Packet Event IDs for G-Link */
+enum glink_tracer_pkt_events {
+	GLINK_CORE_TX = 1,
+	GLINK_QUEUE_TO_SCHEDULER = 2,
+	GLINK_SCHEDULER_TX = 3,
+	GLINK_XPRT_TX = 4,
+	GLINK_XPRT_RX = 5,
+	GLINK_CORE_RX = 6,
+};
+
+/**
+ * glink_get_ss_enum_string() - get the name of the subsystem based on enum value
+ * @enum_id:	enum id of a specific subsystem.
+ *
+ * Return: name of the subsystem, NULL in case of invalid input
+ */
+const char *glink_get_ss_enum_string(unsigned int enum_id);
+
+/**
+ * glink_get_xprt_enum_string() - get the name of the transport based on enum value
+ * @enum_id:	enum id of a specific transport.
+ *
+ * Return: name of the transport, NULL in case of invalid input
+ */
+const char *glink_get_xprt_enum_string(unsigned int enum_id);
+
+/**
+ * glink_get_xprt_state_string() - get the name of the transport based on enum value
+ * @enum_id:	enum id of the state of the transport.
+ *
+ * Return: name of the transport state, NULL in case of invalid input
+ */
+const char *glink_get_xprt_state_string(enum transport_state_e enum_id);
+
+/**
+ * glink_get_ch_state_string() - get the name of the transport based on enum value
+ * @enum_id:	enum id of a specific state of the channel.
+ *
+ * Return: name of the channel state, NULL in case of invalid input
+ */
+const char *glink_get_ch_state_string(enum local_channel_state_e enum_id);
+
+#define GLINK_IPC_LOG_STR(x...) do { \
+	if (glink_get_log_ctx()) \
+		ipc_log_string(glink_get_log_ctx(), x); \
+} while (0)
+
+#define GLINK_DBG(x...) do {                              \
+	if (glink_get_debug_mask() & QCOM_GLINK_DEBUG) \
+			GLINK_IPC_LOG_STR(x);  \
+} while (0)
+
+#define GLINK_INFO(x...) do {                              \
+	if (glink_get_debug_mask() & QCOM_GLINK_INFO) \
+			GLINK_IPC_LOG_STR(x);  \
+} while (0)
+
+#define GLINK_INFO_PERF(x...) do {                              \
+	if (glink_get_debug_mask() & (QCOM_GLINK_INFO | QCOM_GLINK_PERF)) \
+			GLINK_IPC_LOG_STR(x);  \
+} while (0)
+
+#define GLINK_PERF(x...) do {                              \
+	if (glink_get_debug_mask() & QCOM_GLINK_PERF) \
+			GLINK_IPC_LOG_STR("<PERF> " x);  \
+} while (0)
+
+#define GLINK_UT_ERR(x...) do {                              \
+	if (!(glink_get_debug_mask() & QCOM_GLINK_PERF)) \
+		pr_err("<UT> " x); \
+	GLINK_IPC_LOG_STR("<UT> " x);  \
+} while (0)
+
+#define GLINK_UT_DBG(x...) do {                              \
+	if (glink_get_debug_mask() & QCOM_GLINK_DEBUG) \
+			GLINK_IPC_LOG_STR("<UT> " x);  \
+} while (0)
+
+#define GLINK_UT_INFO(x...) do {                              \
+	if (glink_get_debug_mask() & QCOM_GLINK_INFO) \
+			GLINK_IPC_LOG_STR("<UT> " x);  \
+} while (0)
+
+#define GLINK_UT_INFO_PERF(x...) do {                              \
+	if (glink_get_debug_mask() & (QCOM_GLINK_INFO | QCOM_GLINK_PERF)) \
+			GLINK_IPC_LOG_STR("<UT> " x);  \
+} while (0)
+
+#define GLINK_UT_PERF(x...) do {                              \
+	if (glink_get_debug_mask() & QCOM_GLINK_PERF) \
+			GLINK_IPC_LOG_STR("<PERF> " x);  \
+} while (0)
+
+#define GLINK_XPRT_IPC_LOG_STR(xprt, x...) do { \
+	if (glink_get_xprt_log_ctx(xprt)) \
+		ipc_log_string(glink_get_xprt_log_ctx(xprt), x); \
+} while (0)
+
+#define GLINK_XPRT_IF_INFO(xprt_if, x...) do { \
+	if (glink_get_debug_mask() & QCOM_GLINK_INFO) \
+		GLINK_XPRT_IPC_LOG_STR(xprt_if.glink_core_priv, "<XPRT> " x); \
+} while (0)
+
+#define GLINK_XPRT_IF_DBG(xprt_if, x...) do { \
+	if (glink_get_debug_mask() & QCOM_GLINK_DEBUG) \
+		GLINK_XPRT_IPC_LOG_STR(xprt_if.glink_core_priv, "<XPRT> " x); \
+} while (0)
+
+#define GLINK_XPRT_IF_ERR(xprt_if, x...) do { \
+	pr_err("<XPRT> " x); \
+	GLINK_XPRT_IPC_LOG_STR(xprt_if.glink_core_priv, "<XPRT> " x); \
+} while (0)
+
+#define GLINK_PERF_XPRT(xprt, fmt, args...) do { \
+	if (glink_get_debug_mask() & QCOM_GLINK_PERF) \
+			GLINK_XPRT_IPC_LOG_STR(xprt, "<PERF> %s:%s " fmt, \
+					xprt->name, xprt->edge, args);  \
+} while (0)
+
+#define GLINK_PERF_CH(ctx, fmt, args...) do { \
+	if (glink_get_debug_mask() & QCOM_GLINK_PERF) \
+			GLINK_XPRT_IPC_LOG_STR(ctx->transport_ptr, \
+					"<PERF> %s:%s:%s[%u:%u] " fmt, \
+					ctx->transport_ptr->name, \
+					ctx->transport_ptr->edge, \
+					ctx->name, \
+					ctx->lcid, \
+					ctx->rcid, args);  \
+} while (0)
+
+#define GLINK_PERF_CH_XPRT(ctx, xprt, fmt, args...) do { \
+	if (glink_get_debug_mask() & QCOM_GLINK_PERF) \
+			GLINK_XPRT_IPC_LOG_STR(xprt, \
+					"<PERF> %s:%s:%s[%u:%u] " fmt, \
+					xprt->name, \
+					xprt->edge, \
+					ctx->name, \
+					ctx->lcid, \
+					ctx->rcid, args);  \
+} while (0)
+
+#define GLINK_INFO_PERF_XPRT(xprt, fmt, args...) do { \
+	if (glink_get_debug_mask() & (QCOM_GLINK_INFO | QCOM_GLINK_PERF)) \
+			GLINK_XPRT_IPC_LOG_STR(xprt, "<CORE> %s:%s " fmt, \
+					xprt->name, xprt->edge, args);  \
+} while (0)
+
+#define GLINK_INFO_PERF_CH(ctx, fmt, args...) do { \
+	if (glink_get_debug_mask() & (QCOM_GLINK_INFO | QCOM_GLINK_PERF)) \
+			GLINK_XPRT_IPC_LOG_STR(ctx->transport_ptr, \
+					"<CORE> %s:%s:%s[%u:%u] " fmt, \
+					ctx->transport_ptr->name, \
+					ctx->transport_ptr->edge, \
+					ctx->name, \
+					ctx->lcid, \
+					ctx->rcid, args);  \
+} while (0)
+
+#define GLINK_INFO_PERF_CH_XPRT(ctx, xprt, fmt, args...) do { \
+	if (glink_get_debug_mask() & (QCOM_GLINK_INFO | QCOM_GLINK_PERF)) \
+			GLINK_XPRT_IPC_LOG_STR(xprt,\
+					"<CORE> %s:%s:%s[%u:%u] " fmt, \
+					xprt->name, \
+					xprt->edge, \
+					ctx->name, \
+					ctx->lcid, \
+					ctx->rcid, args);  \
+} while (0)
+
+#define GLINK_INFO_XPRT(xprt, fmt, args...) do { \
+	if (glink_get_debug_mask() & QCOM_GLINK_INFO) \
+			GLINK_XPRT_IPC_LOG_STR(xprt, "<CORE> %s:%s " fmt, \
+					xprt->name, xprt->edge, args);  \
+} while (0)
+
+#define GLINK_INFO_CH(ctx, fmt, args...) do { \
+	if (glink_get_debug_mask() & QCOM_GLINK_INFO) \
+			GLINK_XPRT_IPC_LOG_STR(ctx->transport_ptr, \
+					"<CORE> %s:%s:%s[%u:%u] " fmt, \
+					ctx->transport_ptr->name, \
+					ctx->transport_ptr->edge, \
+					ctx->name, \
+					ctx->lcid, \
+					ctx->rcid, args);  \
+} while (0)
+
+#define GLINK_INFO_CH_XPRT(ctx, xprt, fmt, args...) do { \
+	if (glink_get_debug_mask() & QCOM_GLINK_INFO) \
+			GLINK_XPRT_IPC_LOG_STR(xprt, \
+					"<CORE> %s:%s:%s[%u:%u] " fmt, \
+					xprt->name, \
+					xprt->edge, \
+					ctx->name, \
+					ctx->lcid, \
+					ctx->rcid, args);  \
+} while (0)
+
+#define GLINK_DBG_XPRT(xprt, fmt, args...) do { \
+	if (glink_get_debug_mask() & QCOM_GLINK_DEBUG) \
+			GLINK_XPRT_IPC_LOG_STR(xprt, "<CORE> %s:%s " fmt, \
+					xprt->name, xprt->edge, args);  \
+} while (0)
+
+#define GLINK_DBG_CH(ctx, fmt, args...) do { \
+	if (glink_get_debug_mask() & QCOM_GLINK_DEBUG) \
+			GLINK_XPRT_IPC_LOG_STR(ctx->transport_ptr, \
+					"<CORE> %s:%s:%s[%u:%u] " fmt, \
+					ctx->transport_ptr->name, \
+					ctx->transport_ptr->edge, \
+					ctx->name, \
+					ctx->lcid, \
+					ctx->rcid, args);  \
+} while (0)
+
+#define GLINK_DBG_CH_XPRT(ctx, xprt, fmt, args...) do { \
+	if (glink_get_debug_mask() & QCOM_GLINK_DEBUG) \
+			GLINK_XPRT_IPC_LOG_STR(xprt, \
+					"<CORE> %s:%s:%s[%u:%u] " fmt, \
+					xprt->name, \
+					xprt->edge, \
+					ctx->name, \
+					ctx->lcid, \
+					ctx->rcid, args);  \
+} while (0)
+
+#define GLINK_ERR(x...) do {                              \
+	pr_err_ratelimited("<CORE> " x); \
+	GLINK_IPC_LOG_STR("<CORE> " x);  \
+} while (0)
+
+#define GLINK_ERR_XPRT(xprt, fmt, args...) do { \
+	pr_err_ratelimited("<CORE> %s:%s " fmt, \
+		xprt->name, xprt->edge, args);  \
+	GLINK_INFO_XPRT(xprt, fmt, args); \
+} while (0)
+
+#define GLINK_ERR_CH(ctx, fmt, args...) do { \
+	pr_err_ratelimited("<CORE> %s:%s:%s[%u:%u] " fmt, \
+		ctx->transport_ptr->name, \
+		ctx->transport_ptr->edge, \
+		ctx->name, \
+		ctx->lcid, \
+		ctx->rcid, args);  \
+	GLINK_INFO_CH(ctx, fmt, args); \
+} while (0)
+
+#define GLINK_ERR_CH_XPRT(ctx, xprt, fmt, args...) do { \
+	pr_err_ratelimited("<CORE> %s:%s:%s[%u:%u] " fmt, \
+		xprt->name, \
+		xprt->edge, \
+		ctx->name, \
+		ctx->lcid, \
+		ctx->rcid, args);  \
+	GLINK_INFO_CH_XPRT(ctx, xprt, fmt, args); \
+} while (0)
+
+/**
+ * OVERFLOW_ADD_UNSIGNED() - check for unsigned overflow
+ *
+ * type:	type to check for overflow
+ * a:	left value to use
+ * b:	right value to use
+ * returns:	true if a + b will result in overflow; false otherwise
+ */
+#define OVERFLOW_ADD_UNSIGNED(type, a, b) \
+	(((type)~0 - (a)) < (b) ? true : false)
+
+/**
+ * glink_get_debug_mask() - Return debug mask attribute
+ *
+ * Return: debug mask attribute
+ */
+unsigned glink_get_debug_mask(void);
+
+/**
+ * glink_get_log_ctx() - Return log context for other GLINK modules.
+ *
+ * Return: Log context or NULL if none.
+ */
+void *glink_get_log_ctx(void);
+
+/**
+ * glink_get_xprt_log_ctx() - Return log context for other GLINK modules.
+ *
+ * Return: Log context or NULL if none.
+ */
+void *glink_get_xprt_log_ctx(struct glink_core_xprt_ctx *xprt);
+
+/**
+ * glink_get_channel_id_for_handle() - Get logical channel ID
+ *
+ * @handle:	handle of channel
+ *
+ * Used internally by G-Link debugfs.
+ *
+ * Return:  Logical Channel ID or standard Linux error code
+ */
+int glink_get_channel_id_for_handle(void *handle);
+
+/**
+ * glink_get_channel_name_for_handle() - return channel name
+ *
+ * @handle:	handle of channel
+ *
+ * Used internally by G-Link debugfs.
+ *
+ * Return:  Channel name or NULL
+ */
+char *glink_get_channel_name_for_handle(void *handle);
+
+/**
+ * glink_debugfs_init() - initialize glink debugfs directory
+ *
+ * Return: error code or success.
+ */
+int glink_debugfs_init(void);
+
+/**
+ * glink_debugfs_exit() - removes glink debugfs directory
+ */
+void glink_debugfs_exit(void);
+
+/**
+ * glink_debugfs_create() - create the debugfs file
+ * @name:	debugfs file name
+ * @show:	pointer to the actual function which will be invoked upon
+ *		opening this file.
+ * @dir:	pointer to a structure debugfs_dir
+ * @dbgfs_data: pointer to any private data need to be associated with debugfs
+ * @b_free_req: boolean value to decide to free the memory associated with
+ *		@dbgfs_data during deletion of the file
+ *
+ * Return:	pointer to the file/directory created, NULL in case of error
+ *
+ * This function checks which directory will be used to create the debugfs file
+ * and calls glink_dfs_create_file. Anybody who intend to allocate some memory
+ * for the dbgfs_data and required to free it in deletion, need to set
+ * b_free_req to true. Otherwise, there will be a memory leak.
+ */
+struct dentry *glink_debugfs_create(const char *name,
+		void (*show)(struct seq_file *),
+		struct glink_dbgfs *dir, void *dbgfs_data, bool b_free_req);
+
+/**
+ * glink_debugfs_remove_recur() - remove the the directory & files recursively
+ * @rm_dfs:	pointer to the structure glink_dbgfs
+ *
+ * This function removes the files & directories. This also takes care of
+ * freeing any memory associated with the debugfs file.
+ */
+void glink_debugfs_remove_recur(struct glink_dbgfs *dfs);
+
+/**
+ * glink_debugfs_remove_channel() - remove all channel specifc files & folder in
+ *				 debugfs when channel is fully closed
+ * @ch_ctx:		pointer to the channel_contenxt
+ * @xprt_ctx:		pointer to the transport_context
+ *
+ * This function is invoked when any channel is fully closed. It removes the
+ * folders & other files in debugfs for that channel.
+ */
+void glink_debugfs_remove_channel(struct channel_ctx *ch_ctx,
+			struct glink_core_xprt_ctx *xprt_ctx);
+
+/**
+ * glink_debugfs_add_channel() - create channel specifc files & folder in
+ *				 debugfs when channel is added
+ * @ch_ctx:		pointer to the channel_contenxt
+ * @xprt_ctx:		pointer to the transport_context
+ *
+ * This function is invoked when a new channel is created. It creates the
+ * folders & other files in debugfs for that channel
+ */
+void glink_debugfs_add_channel(struct channel_ctx *ch_ctx,
+		struct glink_core_xprt_ctx *xprt_ctx);
+
+/**
+ * glink_debugfs_add_xprt() - create transport specifc files & folder in
+ *			      debugfs when new transport is registerd
+ * @xprt_ctx:		pointer to the transport_context
+ *
+ * This function is invoked when a new transport is registered. It creates the
+ * folders & other files in debugfs for that transport
+ */
+void glink_debugfs_add_xprt(struct glink_core_xprt_ctx *xprt_ctx);
+
+/**
+ * glink_xprt_ctx_iterator_init() - Initializes the transport context list iterator
+ * @xprt_i:	pointer to the transport context iterator.
+ *
+ * Return: None
+ *
+ * This function acquires the transport context lock which must then be
+ * released by glink_xprt_ctx_iterator_end()
+ */
+void glink_xprt_ctx_iterator_init(struct xprt_ctx_iterator *xprt_i);
+
+/**
+ * glink_xprt_ctx_iterator_end() - Ends the transport context list iteration
+ * @xprt_i:	pointer to the transport context iterator.
+ *
+ * Return: None
+ */
+void glink_xprt_ctx_iterator_end(struct xprt_ctx_iterator *xprt_i);
+
+/**
+ * glink_xprt_ctx_iterator_next() - iterates element by element in transport context list
+ * @xprt_i:	pointer to the transport context iterator.
+ *
+ * Return: pointer to the transport context structure
+ */
+struct glink_core_xprt_ctx *glink_xprt_ctx_iterator_next(
+			struct xprt_ctx_iterator *xprt_i);
+
+/**
+ * glink_get_xprt_name() - get the transport name
+ * @xprt_ctx:	pointer to the transport context.
+ *
+ * Return: name of the transport
+ */
+char  *glink_get_xprt_name(struct glink_core_xprt_ctx *xprt_ctx);
+
+/**
+ * glink_get_xprt_edge_name() - get the name of the remote processor/edge
+ *				of the transport
+ * @xprt_ctx:	pointer to the transport context.
+ *
+ * Return: name of the remote processor/edge
+ */
+char *glink_get_xprt_edge_name(struct glink_core_xprt_ctx *xprt_ctx);
+
+/**
+ * glink_get_xprt_state() - get the state of the transport
+ * @xprt_ctx:	pointer to the transport context.
+ *
+ * Return: name of the transport state, NULL in case of invalid input
+ */
+const char *glink_get_xprt_state(struct glink_core_xprt_ctx *xprt_ctx);
+
+/**
+ * glink_get_xprt_version_features() - get the version and feature set
+ *					of local transport in glink
+ * @xprt_ctx:	pointer to the transport context.
+ *
+ * Return: pointer to the glink_core_version
+ */
+const struct glink_core_version *glink_get_xprt_version_features(
+			struct glink_core_xprt_ctx *xprt_ctx);
+
+/**
+ * glink_ch_ctx_iterator_init() - Initializes the channel context list iterator
+ * @ch_iter:	pointer to the channel context iterator.
+ * @xprt:       pointer to the transport context that holds the channel list
+ *
+ * This function acquires the channel context lock which must then be
+ * released by glink_ch_ctx_iterator_end()
+ */
+void  glink_ch_ctx_iterator_init(struct ch_ctx_iterator *ch_iter,
+			struct glink_core_xprt_ctx *xprt);
+
+/**
+ * glink_ch_ctx_iterator_end() - Ends the channel context list iteration
+ * @ch_iter:	pointer to the channel context iterator.
+ *
+ */
+void glink_ch_ctx_iterator_end(struct ch_ctx_iterator *ch_iter,
+				struct glink_core_xprt_ctx *xprt);
+
+/**
+ * glink_ch_ctx_iterator_next() - iterates element by element in channel context list
+ * @c_i:	pointer to the channel context iterator.
+ *
+ * Return: pointer to the channel context structure
+ */
+struct channel_ctx *glink_ch_ctx_iterator_next(struct ch_ctx_iterator *ch_iter);
+
+/**
+ * glink_get_ch_name() - get the channel name
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: name of the channel, NULL in case of invalid input
+ */
+char *glink_get_ch_name(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_edge_name() - get the name of the remote processor/edge
+ *				of the channel
+ * @xprt_ctx:	pointer to the channel context.
+ *
+ * Return: name of the remote processor/edge
+ */
+char *glink_get_ch_edge_name(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_rcid() - get the remote channel ID
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: remote channel id, -EINVAL in case of invalid input
+ */
+int glink_get_ch_lcid(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_rcid() - get the remote channel ID
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: remote channel id, -EINVAL in case of invalid input
+ */
+int glink_get_ch_rcid(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_lstate() - get the local channel state
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: name of the local channel state, NULL in case of invalid input
+ */
+const char *glink_get_ch_lstate(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_rstate() - get the remote channel state
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: true if remote side is opened false otherwise
+ */
+bool glink_get_ch_rstate(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_xprt_name() - get the name of the transport to which
+ *				the channel belongs
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: name of the export, NULL in case of invalid input
+ */
+char *glink_get_ch_xprt_name(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_tx_pkt_count() - get the total number of packets sent
+ *				through this channel
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: number of packets transmitted, -EINVAL in case of invalid input
+ */
+int glink_get_ch_tx_pkt_count(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_rx_pkt_count() - get the total number of packets
+ *				recieved at this channel
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: number of packets recieved, -EINVAL in case of invalid input
+ */
+int glink_get_ch_rx_pkt_count(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_lintents_queued() - get the total number of intents queued
+ *				at local side
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: number of intents queued, -EINVAL in case of invalid input
+ */
+int glink_get_ch_lintents_queued(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_rintents_queued() - get the total number of intents queued
+ *				from remote side
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: number of intents queued
+ */
+int glink_get_ch_rintents_queued(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_intent_info() - get the intent details of a channel
+ * @ch_ctx:	pointer to the channel context.
+ * @ch_ctx_i:   pointer to a structure that will contain intent details
+ *
+ * This funcion is used to get all the channel intent details including locks.
+ */
+void glink_get_ch_intent_info(struct channel_ctx *ch_ctx,
+			struct glink_ch_intent_info *ch_ctx_i);
+
+/**
+ * enum ssr_command - G-Link SSR protocol commands
+ */
+enum ssr_command {
+	GLINK_SSR_DO_CLEANUP,
+	GLINK_SSR_CLEANUP_DONE,
+};
+
+/**
+ * struct ssr_notify_data - Contains private data used for client notifications
+ *                          from G-Link.
+ * tx_done:		Indicates whether or not the tx_done notification has
+ *			been received.
+ * event:		The state notification event received.
+ * responded:		Indicates whether or not a cleanup_done response was
+ *			received.
+ * edge:		The G-Link edge name for the channel associated with
+ *			this callback data
+ * cb_kref:		Kref object to maintain cb_data reference.
+ */
+struct ssr_notify_data {
+	bool tx_done;
+	unsigned event;
+	bool responded;
+	const char *edge;
+	struct kref cb_kref;
+};
+
+/**
+ * struct subsys_info - Subsystem info structure
+ * ssr_name:		name of the subsystem recognized by the SSR framework
+ * edge:		name of the G-Link edge
+ * xprt:		name of the G-Link transport
+ * handle:		glink_ssr channel used for this subsystem
+ * link_state_handle:	link state handle for this edge, used to unregister
+ *			from receiving link state callbacks
+ * link_info:		Transport info used in link state callback registration
+ * cb_data:		Private callback data structure for notification
+ *			functions
+ * subsystem_list_node:	used to chain this structure in a list of subsystem
+ *			info structures
+ * notify_list:		list of subsys_info_leaf structures, containing the
+ *			subsystems to notify if this subsystem undergoes SSR
+ * notify_list_len:	length of notify_list
+ * link_up:		Flag indicating whether transport is up or not
+ * link_up_lock:	Lock for protecting the link_up flag
+ */
+struct subsys_info {
+	const char *ssr_name;
+	const char *edge;
+	const char *xprt;
+	void *handle;
+	void *link_state_handle;
+	struct glink_link_info *link_info;
+	struct ssr_notify_data *cb_data;
+	struct list_head subsystem_list_node;
+	struct list_head notify_list;
+	int notify_list_len;
+	bool link_up;
+	spinlock_t link_up_lock;
+	spinlock_t cb_lock;
+};
+
+/**
+ * struct subsys_info_leaf - Subsystem info leaf structure (a subsystem on the
+ *                           notify list of a subsys_info structure)
+ * ssr_name:	Name of the subsystem recognized by the SSR framework
+ * edge:	Name of the G-Link edge
+ * xprt:	Name of the G-Link transport
+ * cb_data:	Private callback data structure for notification functions
+ * notify_list_node:	used to chain this structure in the notify list
+ */
+struct subsys_info_leaf {
+	const char *ssr_name;
+	const char *edge;
+	const char *xprt;
+	struct ssr_notify_data *cb_data;
+	struct list_head notify_list_node;
+};
+
+/**
+ * struct do_cleanup_msg - The data structure for an SSR do_cleanup message
+ * version:	The G-Link SSR protocol version
+ * command:	The G-Link SSR command - do_cleanup
+ * seq_num:	Sequence number
+ * name_len:	Length of the name of the subsystem being restarted
+ * name:	G-Link edge name of the subsystem being restarted
+ */
+struct do_cleanup_msg {
+	uint32_t version;
+	uint32_t command;
+	uint32_t seq_num;
+	uint32_t name_len;
+	char name[32];
+};
+
+/**
+ * struct cleanup_done_msg - The data structure for an SSR cleanup_done message
+ * version:	The G-Link SSR protocol version
+ * response:	The G-Link SSR response to a do_cleanup command, cleanup_done
+ * seq_num:	Sequence number
+ */
+struct cleanup_done_msg {
+	uint32_t version;
+	uint32_t response;
+	uint32_t seq_num;
+};
+
+/**
+ * get_info_for_subsystem() - Retrieve information about a subsystem from the
+ *                            global subsystem_info_list
+ * @subsystem:	The name of the subsystem recognized by the SSR
+ *		framework
+ *
+ * Return: subsys_info structure containing info for the requested subsystem;
+ *         NULL if no structure can be found for the requested subsystem
+ */
+struct subsys_info *get_info_for_subsystem(const char *subsystem);
+
+/**
+ * get_info_for_edge() - Retrieve information about a subsystem from the
+ *                       global subsystem_info_list
+ * @edge:	The name of the edge recognized by G-Link
+ *
+ * Return: subsys_info structure containing info for the requested subsystem;
+ *         NULL if no structure can be found for the requested subsystem
+ */
+struct subsys_info *get_info_for_edge(const char *edge);
+
+/**
+ * glink_ssr_get_seq_num() - Get the current SSR sequence number
+ *
+ * Return: The current SSR sequence number
+ */
+uint32_t glink_ssr_get_seq_num(void);
+
+/*
+ * glink_ssr() - SSR cleanup function.
+ *
+ * Return: Standard error code.
+ */
+int glink_ssr(const char *subsystem);
+
+/*
+ * glink_subsys_up() - SSR sub system up function.
+ * @subsystem:	Constant string for name of remote subsystem.
+ *
+ * Return: Standard error code.
+ */
+int glink_subsys_up(const char *subsystem);
+
+/**
+ * notify for subsystem() - Notify other subsystems that a subsystem is being
+ *                          restarted
+ * @ss_info:	Subsystem info structure for the subsystem being restarted
+ *
+ * This function sends notifications to affected subsystems that the subsystem
+ * in ss_info is being restarted, and waits for the cleanup done response from
+ * all of those subsystems. It also initiates any local cleanup that is
+ * necessary.
+ *
+ * Return: 0 on success, standard error codes otherwise
+ */
+int notify_for_subsystem(struct subsys_info *ss_info);
+
+/**
+ * glink_ssr_wait_cleanup_done() - Get the value of the
+ *                                 notifications_successful flag in glink_ssr.
+ * @timeout_multiplier: timeout multiplier for waiting on all processors
+ *
+ *
+ * Return: True if cleanup_done received from all processors, false otherwise
+ */
+bool glink_ssr_wait_cleanup_done(unsigned ssr_timeout_multiplier);
+
+struct channel_lcid {
+	struct list_head list_node;
+	uint32_t lcid;
+};
+
+/**
+ * struct rwref_lock - Read/Write Reference Lock
+ *
+ * kref:	reference count
+ * read_count:	number of readers that own the lock
+ * write_count:	number of writers (max 1) that own the lock
+ * count_zero:	used for internal signaling for non-atomic locks
+ *
+ * A Read/Write Reference Lock is a combination of a read/write spinlock and a
+ * refence count.  The main difference is that no locks are held in the
+ * critical section and the lifetime of the object is guaranteed.
+ *
+ * Read Locking
+ * Multiple readers may access the lock at any given time and a read lock will
+ * also ensure that the object exists for the life of the lock.
+ *
+ * rwref_read_get()
+ *     use resource in "critical section" (no locks are held)
+ * rwref_read_put()
+ *
+ * Write Locking
+ * A single writer may access the lock at any given time and a write lock will
+ * also ensure that the object exists for the life of the lock.
+ *
+ * rwref_write_get()
+ *     use resource in "critical section" (no locks are held)
+ * rwref_write_put()
+ *
+ * Reference Lock
+ * To ensure the lifetime of the lock (and not affect the read or write lock),
+ * a simple reference can be done.  By default, rwref_lock_init() will set the
+ * reference count to 1.
+ *
+ * rwref_lock_init()  Reference count is 1
+ * rwref_get()        Reference count is 2
+ * rwref_put()        Reference count is 1
+ * rwref_put()        Reference count goes to 0 and object is destroyed
+ */
+struct rwref_lock {
+	struct kref kref;
+	unsigned read_count;
+	unsigned write_count;
+	spinlock_t lock;
+	wait_queue_head_t count_zero;
+
+	void (*release)(struct rwref_lock *);
+};
+
+/**
+ * rwref_lock_release() - Initialize rwref_lock
+ * lock_ptr:	pointer to lock structure
+ */
+static inline void rwref_lock_release(struct kref *kref_ptr)
+{
+	struct rwref_lock *lock_ptr;
+
+	BUG_ON(kref_ptr == NULL);
+
+	lock_ptr = container_of(kref_ptr, struct rwref_lock, kref);
+	if (lock_ptr->release)
+		lock_ptr->release(lock_ptr);
+}
+
+/**
+ * rwref_lock_init() - Initialize rwref_lock
+ * lock_ptr:	pointer to lock structure
+ * release:	release function called when reference count goes to 0
+ */
+static inline void rwref_lock_init(struct rwref_lock *lock_ptr,
+		void (*release)(struct rwref_lock *))
+{
+	BUG_ON(lock_ptr == NULL);
+
+	kref_init(&lock_ptr->kref);
+	lock_ptr->read_count = 0;
+	lock_ptr->write_count = 0;
+	spin_lock_init(&lock_ptr->lock);
+	init_waitqueue_head(&lock_ptr->count_zero);
+	lock_ptr->release = release;
+}
+
+/**
+ * rwref_get() - gains a reference count for the object
+ * lock_ptr:	pointer to lock structure
+ */
+static inline void rwref_get(struct rwref_lock *lock_ptr)
+{
+	BUG_ON(lock_ptr == NULL);
+
+	kref_get(&lock_ptr->kref);
+}
+
+/**
+ * rwref_put() - puts a reference count for the object
+ * lock_ptr:	pointer to lock structure
+ *
+ * If the reference count goes to zero, the release function is called.
+ */
+static inline void rwref_put(struct rwref_lock *lock_ptr)
+{
+	BUG_ON(lock_ptr == NULL);
+
+	kref_put(&lock_ptr->kref, rwref_lock_release);
+}
+
+/**
+ * rwref_read_get_atomic() - gains a reference count for a read operation
+ * lock_ptr:	pointer to lock structure
+ * is_atomic:   if True, do not wait when acquiring lock
+ *
+ * Multiple readers may acquire the lock as long as the write count is zero.
+ */
+static inline void rwref_read_get_atomic(struct rwref_lock *lock_ptr,
+			bool is_atomic)
+{
+	unsigned long flags;
+
+	BUG_ON(lock_ptr == NULL);
+
+	kref_get(&lock_ptr->kref);
+	while (1) {
+		spin_lock_irqsave(&lock_ptr->lock, flags);
+		if (lock_ptr->write_count == 0) {
+			lock_ptr->read_count++;
+			spin_unlock_irqrestore(&lock_ptr->lock, flags);
+			break;
+		}
+		spin_unlock_irqrestore(&lock_ptr->lock, flags);
+		if (!is_atomic) {
+			wait_event(lock_ptr->count_zero,
+					lock_ptr->write_count == 0);
+		}
+	}
+}
+
+/**
+ * rwref_read_get() - gains a reference count for a read operation
+ * lock_ptr:	pointer to lock structure
+ *
+ * Multiple readers may acquire the lock as long as the write count is zero.
+ */
+static inline void rwref_read_get(struct rwref_lock *lock_ptr)
+{
+	rwref_read_get_atomic(lock_ptr, false);
+}
+
+/**
+ * rwref_read_put() - returns a reference count for a read operation
+ * lock_ptr:	pointer to lock structure
+ *
+ * Must be preceded by a call to rwref_read_get().
+ */
+static inline void rwref_read_put(struct rwref_lock *lock_ptr)
+{
+	unsigned long flags;
+
+	BUG_ON(lock_ptr == NULL);
+
+	spin_lock_irqsave(&lock_ptr->lock, flags);
+	BUG_ON(lock_ptr->read_count == 0);
+	if (--lock_ptr->read_count == 0)
+		wake_up(&lock_ptr->count_zero);
+	spin_unlock_irqrestore(&lock_ptr->lock, flags);
+	kref_put(&lock_ptr->kref, rwref_lock_release);
+}
+
+/**
+ * rwref_write_get_atomic() - gains a reference count for a write operation
+ * lock_ptr:	pointer to lock structure
+ * is_atomic:   if True, do not wait when acquiring lock
+ *
+ * Only one writer may acquire the lock as long as the reader count is zero.
+ */
+static inline void rwref_write_get_atomic(struct rwref_lock *lock_ptr,
+			bool is_atomic)
+{
+	unsigned long flags;
+
+	BUG_ON(lock_ptr == NULL);
+
+	kref_get(&lock_ptr->kref);
+	while (1) {
+		spin_lock_irqsave(&lock_ptr->lock, flags);
+		if (lock_ptr->read_count == 0 && lock_ptr->write_count == 0) {
+			lock_ptr->write_count++;
+			spin_unlock_irqrestore(&lock_ptr->lock, flags);
+			break;
+		}
+		spin_unlock_irqrestore(&lock_ptr->lock, flags);
+		if (!is_atomic) {
+			wait_event(lock_ptr->count_zero,
+					(lock_ptr->read_count == 0 &&
+					lock_ptr->write_count == 0));
+		}
+	}
+}
+
+/**
+ * rwref_write_get() - gains a reference count for a write operation
+ * lock_ptr:	pointer to lock structure
+ *
+ * Only one writer may acquire the lock as long as the reader count is zero.
+ */
+static inline void rwref_write_get(struct rwref_lock *lock_ptr)
+{
+	rwref_write_get_atomic(lock_ptr, false);
+}
+
+/**
+ * rwref_write_put() - returns a reference count for a write operation
+ * lock_ptr:	pointer to lock structure
+ *
+ * Must be preceded by a call to rwref_write_get().
+ */
+static inline void rwref_write_put(struct rwref_lock *lock_ptr)
+{
+	unsigned long flags;
+
+	BUG_ON(lock_ptr == NULL);
+
+	spin_lock_irqsave(&lock_ptr->lock, flags);
+	BUG_ON(lock_ptr->write_count != 1);
+	if (--lock_ptr->write_count == 0)
+		wake_up(&lock_ptr->count_zero);
+	spin_unlock_irqrestore(&lock_ptr->lock, flags);
+	kref_put(&lock_ptr->kref, rwref_lock_release);
+}
+
+#endif /* _SOC_QCOM_GLINK_PRIVATE_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/glink_smd_xprt.c	2019-01-22 16:16:26.651274914 +0100
@@ -0,0 +1,2110 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/srcu.h>
+#include <linux/termios.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <soc/qcom/smd.h>
+#include <soc/qcom/glink.h>
+#include "glink_core_if.h"
+#include "glink_private.h"
+#include "glink_xprt_if.h"
+
+#define NUM_EDGES 5
+#define XPRT_NAME "smd_trans"
+#define SMD_DTR_SIG BIT(31)
+#define SMD_CTS_SIG BIT(30)
+#define SMD_CD_SIG BIT(29)
+#define SMD_RI_SIG BIT(28)
+
+/**
+ * enum command_types - commands send/received from remote system
+ * @CMD_OPEN:		Channel open request
+ * @CMD_OPEN_ACK:	Response to @CMD_OPEN
+ * @CMD_CLOSE:		Channel close request
+ * @CMD_CLOSE_ACK:	Response to @CMD_CLOSE
+ */
+enum command_types {
+	CMD_OPEN,
+	CMD_OPEN_ACK,
+	CMD_CLOSE,
+	CMD_CLOSE_ACK,
+};
+
+/*
+ * Max of 64 channels, the 128 offset puts the rcid out of the
+ * range the remote might use
+ */
+#define LEGACY_RCID_CHANNEL_OFFSET	128
+
+#define SMDXPRT_ERR(einfo, x...) GLINK_XPRT_IF_ERR(einfo->xprt_if, x)
+#define SMDXPRT_INFO(einfo, x...) GLINK_XPRT_IF_INFO(einfo->xprt_if, x)
+#define SMDXPRT_DBG(einfo, x...) GLINK_XPRT_IF_DBG(einfo->xprt_if, x)
+
+/**
+ * struct edge_info() - local information for managing an edge
+ * @xprt_if:		The transport interface registered with the glink code
+ *			associated with this edge.
+ * @xprt_cfg:		The transport configuration for the glink core
+ *			associated with this edge.
+ * @smd_edge:		The smd edge value corresponding to this edge.
+ * @channels:		A list of all the channels that currently exist on this
+ *			edge.
+ * @channels_lock:	Protects @channels "reads" from "writes".
+ * @intentless:		Flag indicating this edge is intentless.
+ * @irq_disabled:	Flag indicating whether interrupt is enabled or
+ *			disabled.
+ * @ssr_sync:		Synchronizes SSR with any ongoing activity that might
+ *			conflict.
+ * @in_ssr:		Prevents new activity that might conflict with an active
+ *			SSR.
+ * @ssr_work:		Ends SSR processing after giving SMD a chance to wrap up
+ *			SSR.
+ * @smd_ch:		Private SMD channel for channel migration.
+ * @smd_lock:		Serializes write access to @smd_ch.
+ * @in_ssr_lock:	Lock to protect the @in_ssr.
+ * @smd_ctl_ch_open:	Indicates that @smd_ch is fully open.
+ * @work:		Work item for processing migration data.
+ * @rx_cmd_lock:	The transport interface lock to notify about received
+ *			commands in a sequential manner.
+ *
+ * Each transport registered with the core is represented by a single instance
+ * of this structure which allows for complete management of the transport.
+ */
+struct edge_info {
+	struct glink_transport_if xprt_if;
+	struct glink_core_transport_cfg xprt_cfg;
+	uint32_t smd_edge;
+	struct list_head channels;
+	spinlock_t channels_lock;
+	bool intentless;
+	bool irq_disabled;
+	struct srcu_struct ssr_sync;
+	bool in_ssr;
+	struct delayed_work ssr_work;
+	smd_channel_t *smd_ch;
+	struct mutex smd_lock;
+	struct mutex in_ssr_lock;
+	bool smd_ctl_ch_open;
+	struct work_struct work;
+	struct mutex rx_cmd_lock;
+};
+
+/**
+ * struct channel() - local information for managing a channel
+ * @node:		For chaining this channel on list for its edge.
+ * @name:		The name of this channel.
+ * @lcid:		The local channel id the core uses for this channel.
+ * @rcid:		The true remote channel id for this channel.
+ * @ch_probe_lock:	Lock to protect channel probe status.
+ * @wait_for_probe:	This channel is waiting for a probe from SMD.
+ * @had_probed:		This channel probed in the past and may skip probe.
+ * @edge:		Handle to the edge_info this channel is associated with.
+ * @smd_ch:		Handle to the underlying smd channel.
+ * @intents:		List of active intents on this channel.
+ * @used_intents:	List of consumed intents on this channel.
+ * @intents_lock:	Lock to protect @intents and @used_intents.
+ * @next_intent_id:	The next id to use for generated intents.
+ * @wq:			Handle for running tasks.
+ * @work:		Task to process received data.
+ * @cur_intent:		The current intent for received data.
+ * @intent_req:		Flag indicating if an intent has been requested for rx.
+ * @is_closing:		Flag indicating this channel is currently in the closing
+ *			state.
+ * @local_legacy:	The local side of the channel is in legacy mode.
+ * @remote_legacy:	The remote side of the channel is in legacy mode.
+ * @rx_data_lock:	Used to serialize RX data processing.
+ * @streaming_ch:	Indicates the underlying SMD channel is streaming type.
+ * @tx_resume_needed:	Indicates whether a tx_resume call should be triggered.
+ */
+struct channel {
+	struct list_head node;
+	char name[GLINK_NAME_SIZE];
+	uint32_t lcid;
+	uint32_t rcid;
+	struct mutex ch_probe_lock;
+	struct mutex ch_tasklet_lock;
+	bool wait_for_probe;
+	bool had_probed;
+	struct edge_info *edge;
+	smd_channel_t *smd_ch;
+	struct list_head intents;
+	struct list_head used_intents;
+	spinlock_t intents_lock;
+	uint32_t next_intent_id;
+	struct workqueue_struct *wq;
+	struct tasklet_struct data_tasklet;
+	struct intent_info *cur_intent;
+	bool intent_req;
+	bool is_closing;
+	bool local_legacy;
+	bool remote_legacy;
+	size_t intent_req_size;
+	spinlock_t rx_data_lock;
+	bool streaming_ch;
+	bool tx_resume_needed;
+	bool is_tasklet_enabled;
+	struct completion open_notifier;
+};
+
+/**
+ * struct intent_info() - information for managing an intent
+ * @node:	Used for putting this intent in a list for its channel.
+ * @llid:	The local intent id the core uses to identify this intent.
+ * @size:	The size of the intent in bytes.
+ */
+struct intent_info {
+	struct list_head node;
+	uint32_t liid;
+	size_t size;
+};
+
+/**
+ * struct channel_work() - a task to be processed for a specific channel
+ * @ch:		The channel associated with this task.
+ * @iid:	Intent id associated with this task, may not always be valid.
+ * @work:	The task to be processed.
+ */
+struct channel_work {
+	struct channel *ch;
+	uint32_t iid;
+	struct work_struct work;
+};
+
+/**
+ * struct pdrvs - Tracks a platform driver and its use among channels
+ * @node:	For tracking in the pdrv_list.
+ * @pdrv:	The platform driver to track.
+ */
+struct pdrvs {
+	struct list_head node;
+	struct platform_driver pdrv;
+};
+
+static uint32_t negotiate_features_v1(struct glink_transport_if *if_ptr,
+				      const struct glink_core_version *version,
+				      uint32_t features);
+
+static struct edge_info edge_infos[NUM_EDGES] = {
+	{
+		.xprt_cfg.edge = "dsps",
+		.smd_edge = SMD_APPS_DSPS,
+	},
+	{
+		.xprt_cfg.edge = "lpass",
+		.smd_edge = SMD_APPS_QDSP,
+	},
+	{
+		.xprt_cfg.edge = "mpss",
+		.smd_edge = SMD_APPS_MODEM,
+	},
+	{
+		.xprt_cfg.edge = "wcnss",
+		.smd_edge = SMD_APPS_WCNSS,
+	},
+	{
+		.xprt_cfg.edge = "rpm",
+		.smd_edge = SMD_APPS_RPM,
+		.intentless = true,
+	},
+};
+
+static struct glink_core_version versions[] = {
+	{1, 0x00, negotiate_features_v1},
+};
+
+static LIST_HEAD(pdrv_list);
+static DEFINE_MUTEX(pdrv_list_mutex);
+
+static void process_data_event(unsigned long param);
+static int add_platform_driver(struct channel *ch);
+static void smd_data_ch_close(struct channel *ch);
+
+/**
+ * check_write_avail() - Check if there is space to to write on the smd channel,
+ *			 and enable the read interrupt if there is not.
+ * @check_fn:	The function to use to check if there is space to write
+ * @ch:		The channel to check
+ *
+ * Return: 0 on success or standard Linux error codes.
+ */
+static int check_write_avail(int (*check_fn)(smd_channel_t *),
+			     struct channel *ch)
+{
+	int rc = check_fn(ch->smd_ch);
+
+	if (rc == 0) {
+		ch->tx_resume_needed = true;
+		smd_enable_read_intr(ch->smd_ch);
+		rc = check_fn(ch->smd_ch);
+		if (rc > 0) {
+			ch->tx_resume_needed = false;
+			smd_disable_read_intr(ch->smd_ch);
+		}
+	}
+
+	return rc;
+}
+
+/**
+ * process_ctl_event() - process a control channel event task
+ * @work:	The migration task to process.
+ */
+static void process_ctl_event(struct work_struct *work)
+{
+	struct command {
+		uint32_t cmd;
+		uint32_t id;
+		uint32_t priority;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	struct channel *ch;
+	struct channel *temp_ch;
+	int pkt_size;
+	int read_avail;
+	char name[GLINK_NAME_SIZE];
+	bool found;
+	unsigned long flags;
+
+	einfo = container_of(work, struct edge_info, work);
+
+	mutex_lock(&einfo->in_ssr_lock);
+	if (einfo->in_ssr) {
+		einfo->in_ssr = false;
+		einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
+	}
+	mutex_unlock(&einfo->in_ssr_lock);
+
+	while (smd_read_avail(einfo->smd_ch)) {
+		found = false;
+		pkt_size = smd_cur_packet_size(einfo->smd_ch);
+		read_avail = smd_read_avail(einfo->smd_ch);
+
+		if (pkt_size != read_avail)
+			continue;
+
+		smd_read(einfo->smd_ch, &cmd, sizeof(cmd));
+		if (cmd.cmd == CMD_OPEN) {
+			smd_read(einfo->smd_ch, name, GLINK_NAME_SIZE);
+			SMDXPRT_INFO(einfo, "%s RX OPEN '%s'\n",
+					__func__, name);
+
+			spin_lock_irqsave(&einfo->channels_lock, flags);
+			list_for_each_entry(ch, &einfo->channels, node) {
+				if (!strcmp(name, ch->name)) {
+					found = true;
+					break;
+				}
+			}
+			spin_unlock_irqrestore(&einfo->channels_lock, flags);
+
+			if (!found) {
+				ch = kzalloc(sizeof(*ch), GFP_KERNEL);
+				if (!ch) {
+					SMDXPRT_ERR(einfo,
+						"%s: ch alloc failed\n",
+						__func__);
+					continue;
+				}
+				strlcpy(ch->name, name, GLINK_NAME_SIZE);
+				ch->edge = einfo;
+				mutex_init(&ch->ch_probe_lock);
+				mutex_init(&ch->ch_tasklet_lock);
+				init_completion(&ch->open_notifier);
+				INIT_LIST_HEAD(&ch->intents);
+				INIT_LIST_HEAD(&ch->used_intents);
+				spin_lock_init(&ch->intents_lock);
+				spin_lock_init(&ch->rx_data_lock);
+				mutex_lock(&ch->ch_tasklet_lock);
+				tasklet_init(&ch->data_tasklet,
+				process_data_event, (unsigned long)ch);
+				tasklet_disable(&ch->data_tasklet);
+				ch->is_tasklet_enabled = false;
+				mutex_unlock(&ch->ch_tasklet_lock);
+				ch->wq = create_singlethread_workqueue(
+								ch->name);
+				if (!ch->wq) {
+					SMDXPRT_ERR(einfo,
+						"%s: ch wq create failed\n",
+						__func__);
+					kfree(ch);
+					continue;
+				}
+
+				/*
+				 * Channel could have been added to the list by
+				 * someone else so scan again.  Channel creation
+				 * is non-atomic, so unlock and recheck is
+				 * necessary
+				 */
+				temp_ch = ch;
+				spin_lock_irqsave(&einfo->channels_lock, flags);
+				list_for_each_entry(ch, &einfo->channels, node)
+					if (!strcmp(name, ch->name)) {
+						found = true;
+						break;
+					}
+
+				if (!found) {
+					ch = temp_ch;
+					list_add_tail(&ch->node,
+							&einfo->channels);
+					spin_unlock_irqrestore(
+						&einfo->channels_lock, flags);
+				} else {
+					spin_unlock_irqrestore(
+						&einfo->channels_lock, flags);
+					tasklet_kill(&temp_ch->data_tasklet);
+					destroy_workqueue(temp_ch->wq);
+					kfree(temp_ch);
+				}
+			}
+
+			if (ch->remote_legacy) {
+				SMDXPRT_DBG(einfo, "%s SMD Remote Open '%s'\n",
+						__func__, name);
+				cmd.cmd = CMD_OPEN_ACK;
+				cmd.priority = SMD_TRANS_XPRT_ID;
+				mutex_lock(&einfo->smd_lock);
+				while (smd_write_avail(einfo->smd_ch) <
+								sizeof(cmd))
+					msleep(20);
+				smd_write(einfo->smd_ch, &cmd, sizeof(cmd));
+				mutex_unlock(&einfo->smd_lock);
+				continue;
+			} else {
+				SMDXPRT_DBG(einfo,
+						"%s G-Link Remote Open '%s'\n",
+						__func__, name);
+			}
+
+			ch->rcid = cmd.id;
+			mutex_lock(&einfo->rx_cmd_lock);
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_remote_open(
+								&einfo->xprt_if,
+								cmd.id,
+								name,
+								cmd.priority);
+			mutex_unlock(&einfo->rx_cmd_lock);
+		} else if (cmd.cmd == CMD_OPEN_ACK) {
+			SMDXPRT_INFO(einfo,
+				"%s RX OPEN ACK lcid %u; xprt_req %u\n",
+				__func__, cmd.id, cmd.priority);
+
+			spin_lock_irqsave(&einfo->channels_lock, flags);
+			list_for_each_entry(ch, &einfo->channels, node)
+				if (cmd.id == ch->lcid) {
+					found = true;
+					break;
+				}
+			spin_unlock_irqrestore(&einfo->channels_lock, flags);
+			if (!found) {
+				SMDXPRT_ERR(einfo, "%s No channel match %u\n",
+						__func__, cmd.id);
+				continue;
+			}
+			reinit_completion(&ch->open_notifier);
+			add_platform_driver(ch);
+			mutex_lock(&einfo->rx_cmd_lock);
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_open_ack(
+								&einfo->xprt_if,
+								cmd.id,
+								cmd.priority);
+			mutex_unlock(&einfo->rx_cmd_lock);
+			complete_all(&ch->open_notifier);
+		} else if (cmd.cmd == CMD_CLOSE) {
+			SMDXPRT_INFO(einfo, "%s RX REMOTE CLOSE rcid %u\n",
+					__func__, cmd.id);
+			spin_lock_irqsave(&einfo->channels_lock, flags);
+			list_for_each_entry(ch, &einfo->channels, node)
+				if (cmd.id == ch->rcid) {
+					found = true;
+					break;
+				}
+			spin_unlock_irqrestore(&einfo->channels_lock, flags);
+
+			if (!found)
+				SMDXPRT_ERR(einfo, "%s no matching rcid %u\n",
+						__func__, cmd.id);
+
+			if (found && !ch->remote_legacy) {
+				mutex_lock(&einfo->rx_cmd_lock);
+				einfo->xprt_if.glink_core_if_ptr->
+							rx_cmd_ch_remote_close(
+								&einfo->xprt_if,
+								cmd.id);
+				mutex_unlock(&einfo->rx_cmd_lock);
+			} else {
+				/* not found or a legacy channel */
+				SMDXPRT_INFO(einfo,
+						"%s Sim RX CLOSE ACK lcid %u\n",
+						__func__, cmd.id);
+				cmd.cmd = CMD_CLOSE_ACK;
+				mutex_lock(&einfo->smd_lock);
+				while (smd_write_avail(einfo->smd_ch) <
+								sizeof(cmd))
+					msleep(20);
+				smd_write(einfo->smd_ch, &cmd, sizeof(cmd));
+				mutex_unlock(&einfo->smd_lock);
+			}
+		} else if (cmd.cmd == CMD_CLOSE_ACK) {
+			int rcu_id;
+
+			SMDXPRT_INFO(einfo, "%s RX CLOSE ACK lcid %u\n",
+					__func__, cmd.id);
+
+			spin_lock_irqsave(&einfo->channels_lock, flags);
+			list_for_each_entry(ch, &einfo->channels, node) {
+				if (cmd.id == ch->lcid) {
+					found = true;
+					break;
+				}
+			}
+			spin_unlock_irqrestore(&einfo->channels_lock, flags);
+			if (!found) {
+				SMDXPRT_ERR(einfo, "%s LCID not found %u\n",
+						__func__, cmd.id);
+				continue;
+			}
+
+			rcu_id = srcu_read_lock(&einfo->ssr_sync);
+			smd_data_ch_close(ch);
+			srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+			mutex_lock(&einfo->rx_cmd_lock);
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_close_ack(
+								&einfo->xprt_if,
+								cmd.id);
+			mutex_unlock(&einfo->rx_cmd_lock);
+		}
+	}
+}
+
+/**
+ * ctl_ch_notify() - process an event from the smd channel for ch migration
+ * @priv:	The edge the event occurred on.
+ * @event:	The event to process
+ */
+static void ctl_ch_notify(void *priv, unsigned event)
+{
+	struct edge_info *einfo = priv;
+
+	switch (event) {
+	case SMD_EVENT_DATA:
+		schedule_work(&einfo->work);
+		break;
+	case SMD_EVENT_OPEN:
+		einfo->smd_ctl_ch_open = true;
+		break;
+	case SMD_EVENT_CLOSE:
+		einfo->smd_ctl_ch_open = false;
+		break;
+	}
+}
+
+static int ctl_ch_probe(struct platform_device *pdev)
+{
+	int i;
+	struct edge_info *einfo;
+	int ret = 0;
+
+	for (i = 0; i < NUM_EDGES; ++i)
+		if (pdev->id == edge_infos[i].smd_edge)
+			break;
+
+	einfo = &edge_infos[i];
+	ret = smd_named_open_on_edge("GLINK_CTRL", einfo->smd_edge,
+			&einfo->smd_ch, einfo, ctl_ch_notify);
+	if (ret != 0)
+		SMDXPRT_ERR(einfo,
+			"%s Opening failed %d for %d:'GLINK_CTRL'\n",
+			__func__, ret, einfo->smd_edge);
+	return ret;
+}
+
+/**
+ * ssr_work_func() - process the end of ssr
+ * @work:	The ssr task to finish.
+ */
+static void ssr_work_func(struct work_struct *work)
+{
+	struct delayed_work *w;
+	struct edge_info *einfo;
+
+	w = container_of(work, struct delayed_work, work);
+	einfo = container_of(w, struct edge_info, ssr_work);
+
+	mutex_lock(&einfo->in_ssr_lock);
+	if (einfo->in_ssr) {
+		einfo->in_ssr = false;
+		einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
+	}
+	mutex_unlock(&einfo->in_ssr_lock);
+}
+
+/**
+ * deferred_close_ack() - Generate a deferred channel close ack
+ * @work:	The channel close ack work to generate.
+ */
+static void deferred_close_ack(struct work_struct *work)
+{
+	struct channel_work *ch_work;
+	struct channel *ch;
+
+	ch_work = container_of(work, struct channel_work, work);
+	ch = ch_work->ch;
+	mutex_lock(&ch->edge->rx_cmd_lock);
+	ch->edge->xprt_if.glink_core_if_ptr->rx_cmd_ch_close_ack(
+				&ch->edge->xprt_if, ch->lcid);
+	mutex_unlock(&ch->edge->rx_cmd_lock);
+	kfree(ch_work);
+}
+
+/**
+ * process_tx_done() - process a tx done task
+ * @work:	The tx done task to process.
+ */
+static void process_tx_done(struct work_struct *work)
+{
+	struct channel_work *ch_work;
+	struct channel *ch;
+	struct edge_info *einfo;
+	uint32_t riid;
+
+	ch_work = container_of(work, struct channel_work, work);
+	ch = ch_work->ch;
+	riid = ch_work->iid;
+	einfo = ch->edge;
+	kfree(ch_work);
+	einfo->xprt_if.glink_core_if_ptr->rx_cmd_tx_done(&einfo->xprt_if,
+								ch->rcid,
+								riid,
+								false);
+}
+
+/**
+ * process_open_event() - process an open event task
+ * @work:	The open task to process.
+ */
+static void process_open_event(struct work_struct *work)
+{
+	struct channel_work *ch_work;
+	struct channel *ch;
+	struct edge_info *einfo;
+	int ret;
+
+	ch_work = container_of(work, struct channel_work, work);
+	ch = ch_work->ch;
+	einfo = ch->edge;
+	/*
+	 * The SMD client is supposed to already know its channel type, but we
+	 * are just a translation layer, so we need to dynamically detect the
+	 * channel type.
+	 */
+	ret = smd_write_segment_avail(ch->smd_ch);
+	if (ret == -ENODEV)
+		ch->streaming_ch = true;
+	if (ch->remote_legacy || !ch->rcid) {
+		ch->remote_legacy = true;
+		ch->rcid = ch->lcid + LEGACY_RCID_CHANNEL_OFFSET;
+		mutex_lock(&einfo->rx_cmd_lock);
+		einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_remote_open(
+							&einfo->xprt_if,
+							ch->rcid,
+							ch->name,
+							SMD_TRANS_XPRT_ID);
+		mutex_unlock(&einfo->rx_cmd_lock);
+	}
+	mutex_lock(&ch->ch_tasklet_lock);
+	if (!ch->is_tasklet_enabled) {
+		tasklet_enable(&ch->data_tasklet);
+		ch->is_tasklet_enabled = true;
+	}
+	mutex_unlock(&ch->ch_tasklet_lock);
+	wait_for_completion(&ch->open_notifier);
+	kfree(ch_work);
+}
+
+/**
+ * process_close_event() - process a close event task
+ * @work:	The close task to process.
+ */
+static void process_close_event(struct work_struct *work)
+{
+	struct channel_work *ch_work;
+	struct channel *ch;
+	struct edge_info *einfo;
+
+	ch_work = container_of(work, struct channel_work, work);
+	ch = ch_work->ch;
+	einfo = ch->edge;
+	kfree(ch_work);
+	if (ch->remote_legacy) {
+		mutex_lock(&einfo->rx_cmd_lock);
+		einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_remote_close(
+								&einfo->xprt_if,
+								ch->rcid);
+		mutex_unlock(&einfo->rx_cmd_lock);
+	}
+	mutex_lock(&ch->ch_tasklet_lock);
+	if (ch->is_tasklet_enabled) {
+		tasklet_disable(&ch->data_tasklet);
+		ch->is_tasklet_enabled = false;
+	}
+	mutex_unlock(&ch->ch_tasklet_lock);
+	ch->rcid = 0;
+}
+
+/**
+ * process_status_event() - process a status event task
+ * @work:	The status task to process.
+ */
+static void process_status_event(struct work_struct *work)
+{
+	struct channel_work *ch_work;
+	struct channel *ch;
+	struct edge_info *einfo;
+	uint32_t sigs = 0;
+	int set;
+
+	ch_work = container_of(work, struct channel_work, work);
+	ch = ch_work->ch;
+	einfo = ch->edge;
+	kfree(ch_work);
+
+	set = smd_tiocmget(ch->smd_ch);
+	if (set < 0)
+		return;
+
+	if (set & TIOCM_DTR)
+		sigs |= SMD_DTR_SIG;
+	if (set & TIOCM_RTS)
+		sigs |= SMD_CTS_SIG;
+	if (set & TIOCM_CD)
+		sigs |= SMD_CD_SIG;
+	if (set & TIOCM_RI)
+		sigs |= SMD_RI_SIG;
+
+	einfo->xprt_if.glink_core_if_ptr->rx_cmd_remote_sigs(&einfo->xprt_if,
+								ch->rcid,
+								sigs);
+}
+
+/**
+ * process_reopen_event() - process a reopen ready event task
+ * @work:	The reopen ready task to process.
+ */
+static void process_reopen_event(struct work_struct *work)
+{
+	struct channel_work *ch_work;
+	struct channel *ch;
+	struct edge_info *einfo;
+
+	ch_work = container_of(work, struct channel_work, work);
+	ch = ch_work->ch;
+	einfo = ch->edge;
+	kfree(ch_work);
+	if (ch->remote_legacy) {
+		mutex_lock(&einfo->rx_cmd_lock);
+		einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_remote_close(
+								&einfo->xprt_if,
+								ch->rcid);
+		mutex_unlock(&einfo->rx_cmd_lock);
+	}
+	if (ch->local_legacy) {
+		ch->local_legacy = false;
+		mutex_lock(&einfo->rx_cmd_lock);
+		einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_close_ack(
+								&einfo->xprt_if,
+								ch->lcid);
+		mutex_unlock(&einfo->rx_cmd_lock);
+	}
+}
+
+/**
+ * process_data_event() - process a data event task
+ * @param:	Pointer to the channel in long format.
+ */
+static void process_data_event(unsigned long param)
+{
+	struct channel *ch;
+	struct edge_info *einfo;
+	struct glink_core_rx_intent *intent;
+	int pkt_remaining;
+	int read_avail;
+	struct intent_info *i;
+	uint32_t liid;
+	unsigned long intents_flags;
+	unsigned long rx_data_flags;
+
+	ch = (struct channel *)param;
+	einfo = ch->edge;
+
+	if (ch->tx_resume_needed && smd_write_avail(ch->smd_ch) > 0) {
+		ch->tx_resume_needed = false;
+		smd_disable_read_intr(ch->smd_ch);
+		einfo->xprt_if.glink_core_if_ptr->tx_resume(&einfo->xprt_if);
+	}
+
+	spin_lock_irqsave(&ch->rx_data_lock, rx_data_flags);
+	while (!ch->is_closing && smd_read_avail(ch->smd_ch)) {
+		if (!ch->streaming_ch)
+			pkt_remaining = smd_cur_packet_size(ch->smd_ch);
+		else
+			pkt_remaining = smd_read_avail(ch->smd_ch);
+		SMDXPRT_DBG(einfo, "%s Reading packet chunk %u '%s' %u:%u\n",
+				__func__, pkt_remaining, ch->name, ch->lcid,
+				ch->rcid);
+		if (!ch->cur_intent && !einfo->intentless) {
+			spin_lock_irqsave(&ch->intents_lock, intents_flags);
+			ch->intent_req = true;
+			ch->intent_req_size = pkt_remaining;
+			list_for_each_entry(i, &ch->intents, node) {
+				if (i->size >= pkt_remaining) {
+					list_del(&i->node);
+					ch->cur_intent = i;
+					ch->intent_req = false;
+					break;
+				}
+			}
+			spin_unlock_irqrestore(&ch->intents_lock,
+								intents_flags);
+			if (!ch->cur_intent) {
+				spin_unlock_irqrestore(&ch->rx_data_lock,
+								rx_data_flags);
+				SMDXPRT_DBG(einfo,
+					"%s Reqesting intent '%s' %u:%u\n",
+					__func__, ch->name,
+					ch->lcid, ch->rcid);
+				einfo->xprt_if.glink_core_if_ptr->
+						rx_cmd_remote_rx_intent_req(
+								&einfo->xprt_if,
+								ch->rcid,
+								pkt_remaining);
+				return;
+			}
+		}
+
+		liid = einfo->intentless ? 0 : ch->cur_intent->liid;
+		read_avail = smd_read_avail(ch->smd_ch);
+		if (ch->streaming_ch && read_avail > pkt_remaining)
+			read_avail = pkt_remaining;
+		intent = einfo->xprt_if.glink_core_if_ptr->rx_get_pkt_ctx(
+							&einfo->xprt_if,
+							ch->rcid,
+							liid);
+		if (!intent->data && einfo->intentless) {
+			intent->data = kmalloc(pkt_remaining, GFP_ATOMIC);
+			if (!intent->data) {
+				SMDXPRT_DBG(einfo,
+					"%s kmalloc failed '%s' %u:%u\n",
+					__func__, ch->name,
+					ch->lcid, ch->rcid);
+				continue;
+			}
+		}
+		smd_read(ch->smd_ch, intent->data + intent->write_offset,
+								read_avail);
+		spin_unlock_irqrestore(&ch->rx_data_lock, rx_data_flags);
+		intent->write_offset += read_avail;
+		intent->pkt_size += read_avail;
+		if (read_avail == pkt_remaining && !einfo->intentless) {
+			spin_lock_irqsave(&ch->intents_lock, intents_flags);
+			list_add_tail(&ch->cur_intent->node, &ch->used_intents);
+			spin_unlock_irqrestore(&ch->intents_lock,
+								intents_flags);
+			ch->cur_intent = NULL;
+		}
+		einfo->xprt_if.glink_core_if_ptr->rx_put_pkt_ctx(
+						&einfo->xprt_if,
+						ch->rcid,
+						intent,
+						read_avail == pkt_remaining);
+		spin_lock_irqsave(&ch->rx_data_lock, rx_data_flags);
+	}
+	spin_unlock_irqrestore(&ch->rx_data_lock, rx_data_flags);
+}
+
+/**
+ * smd_data_ch_notify() - process an event from the smd channel
+ * @priv:	The channel the event occurred on.
+ * @event:	The event to process
+ */
+static void smd_data_ch_notify(void *priv, unsigned event)
+{
+	struct channel *ch = priv;
+	struct channel_work *work;
+
+	switch (event) {
+	case SMD_EVENT_DATA:
+		tasklet_hi_schedule(&ch->data_tasklet);
+		break;
+	case SMD_EVENT_OPEN:
+		work = kmalloc(sizeof(*work), GFP_ATOMIC);
+		if (!work) {
+			SMDXPRT_ERR(ch->edge,
+					"%s: unable to process event %d\n",
+					__func__, SMD_EVENT_OPEN);
+			return;
+		}
+		work->ch = ch;
+		INIT_WORK(&work->work, process_open_event);
+		queue_work(ch->wq, &work->work);
+		break;
+	case SMD_EVENT_CLOSE:
+		work = kmalloc(sizeof(*work), GFP_ATOMIC);
+		if (!work) {
+			SMDXPRT_ERR(ch->edge,
+					"%s: unable to process event %d\n",
+					__func__, SMD_EVENT_CLOSE);
+			return;
+		}
+		work->ch = ch;
+		INIT_WORK(&work->work, process_close_event);
+		queue_work(ch->wq, &work->work);
+		break;
+	case SMD_EVENT_STATUS:
+		SMDXPRT_DBG(ch->edge,
+				"%s Processing STATUS for '%s' %u:%u\n",
+				__func__, ch->name, ch->lcid, ch->rcid);
+
+		work = kmalloc(sizeof(*work), GFP_ATOMIC);
+		if (!work) {
+			SMDXPRT_ERR(ch->edge,
+					"%s: unable to process event %d\n",
+					__func__, SMD_EVENT_STATUS);
+			return;
+		}
+		work->ch = ch;
+		INIT_WORK(&work->work, process_status_event);
+		queue_work(ch->wq, &work->work);
+		break;
+	case SMD_EVENT_REOPEN_READY:
+		work = kmalloc(sizeof(*work), GFP_ATOMIC);
+		if (!work) {
+			SMDXPRT_ERR(ch->edge,
+					"%s: unable to process event %d\n",
+					__func__, SMD_EVENT_REOPEN_READY);
+			return;
+		}
+		work->ch = ch;
+		INIT_WORK(&work->work, process_reopen_event);
+		queue_work(ch->wq, &work->work);
+		break;
+	}
+}
+
+/**
+ * smd_data_ch_close() - close and cleanup SMD data channel
+ * @ch:	Channel to cleanup
+ *
+ * Must be called with einfo->ssr_sync SRCU locked.
+ */
+static void smd_data_ch_close(struct channel *ch)
+{
+	struct intent_info *intent;
+	unsigned long flags;
+	struct channel_work *ch_work;
+
+	SMDXPRT_INFO(ch->edge, "%s Closing SMD channel lcid %u\n",
+			__func__, ch->lcid);
+
+	ch->is_closing = true;
+	ch->tx_resume_needed = false;
+	mutex_lock(&ch->ch_tasklet_lock);
+	if (ch->is_tasklet_enabled) {
+		tasklet_disable(&ch->data_tasklet);
+		ch->is_tasklet_enabled = false;
+	}
+	mutex_unlock(&ch->ch_tasklet_lock);
+	flush_workqueue(ch->wq);
+
+	mutex_lock(&ch->ch_probe_lock);
+	ch->wait_for_probe = false;
+	if (ch->smd_ch) {
+		smd_close(ch->smd_ch);
+		ch->smd_ch = NULL;
+	} else if (ch->local_legacy) {
+		ch_work = kzalloc(sizeof(*ch_work), GFP_KERNEL);
+		ch->local_legacy = false;
+		if (ch_work) {
+			ch_work->ch = ch;
+			INIT_WORK(&ch_work->work, deferred_close_ack);
+			queue_work(ch->wq, &ch_work->work);
+		}
+	}
+	mutex_unlock(&ch->ch_probe_lock);
+
+
+	spin_lock_irqsave(&ch->intents_lock, flags);
+	while (!list_empty(&ch->intents)) {
+		intent = list_first_entry(&ch->intents, struct
+				intent_info, node);
+		list_del(&intent->node);
+		kfree(intent);
+	}
+	while (!list_empty(&ch->used_intents)) {
+		intent = list_first_entry(&ch->used_intents,
+				struct intent_info, node);
+		list_del(&intent->node);
+		kfree(intent);
+	}
+	spin_unlock_irqrestore(&ch->intents_lock, flags);
+	ch->is_closing = false;
+}
+
+static void data_ch_probe_body(struct channel *ch)
+{
+	struct edge_info *einfo;
+	int ret;
+
+	einfo = ch->edge;
+	SMDXPRT_DBG(einfo, "%s Opening SMD channel %d:'%s'\n", __func__,
+			einfo->smd_edge, ch->name);
+
+	ret = smd_named_open_on_edge(ch->name, einfo->smd_edge, &ch->smd_ch, ch,
+			smd_data_ch_notify);
+	if (ret != 0) {
+		SMDXPRT_ERR(einfo, "%s Opening failed %d for %d:'%s'\n",
+				__func__, ret, einfo->smd_edge, ch->name);
+		return;
+	}
+	smd_disable_read_intr(ch->smd_ch);
+}
+
+static int channel_probe(struct platform_device *pdev)
+{
+	struct channel *ch;
+	struct edge_info *einfo;
+	int i;
+	bool found = false;
+	unsigned long flags;
+
+	for (i = 0; i < NUM_EDGES; ++i) {
+		if (edge_infos[i].smd_edge == pdev->id) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found)
+		return -EPROBE_DEFER;
+
+	einfo = &edge_infos[i];
+
+	found = false;
+	spin_lock_irqsave(&einfo->channels_lock, flags);
+	list_for_each_entry(ch, &einfo->channels, node) {
+		if (!strcmp(pdev->name, ch->name)) {
+			found = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&einfo->channels_lock, flags);
+
+	if (!found)
+		return -EPROBE_DEFER;
+
+	mutex_lock(&ch->ch_probe_lock);
+	if (!ch->wait_for_probe) {
+		mutex_unlock(&ch->ch_probe_lock);
+		return -EPROBE_DEFER;
+	}
+
+	ch->wait_for_probe = false;
+	ch->had_probed = true;
+
+	data_ch_probe_body(ch);
+	mutex_unlock(&ch->ch_probe_lock);
+
+	return 0;
+}
+
+static int dummy_probe(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static struct platform_driver dummy_driver = {
+	.probe = dummy_probe,
+	.driver = {
+		.name = "dummydriver12345",
+		.owner = THIS_MODULE,
+	},
+};
+
+static struct platform_device dummy_device = {
+	.name = "dummydriver12345",
+};
+
+/**
+ * add_platform_driver() - register the needed platform driver for a channel
+ * @ch:	The channel that needs a platform driver registered.
+ *
+ * SMD channels are unique by name/edge tuples, but the platform driver can
+ * only specify the name of the channel, so multiple unique SMD channels can
+ * be covered under one platform driver.  Therfore we need to smartly manage
+ * the muxing of channels on platform drivers.
+ *
+ * Return: Success or standard linux error code.
+ */
+static int add_platform_driver(struct channel *ch)
+{
+	struct pdrvs *pdrv;
+	bool found = false;
+	int ret = 0;
+	static bool first = true;
+
+	mutex_lock(&pdrv_list_mutex);
+	mutex_lock(&ch->ch_probe_lock);
+	ch->wait_for_probe = true;
+	list_for_each_entry(pdrv, &pdrv_list, node) {
+		if (!strcmp(ch->name, pdrv->pdrv.driver.name)) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found) {
+		mutex_unlock(&ch->ch_probe_lock);
+		pdrv = kzalloc(sizeof(*pdrv), GFP_KERNEL);
+		if (!pdrv) {
+			ret = -ENOMEM;
+			mutex_lock(&ch->ch_probe_lock);
+			ch->wait_for_probe = false;
+			mutex_unlock(&ch->ch_probe_lock);
+			goto out;
+		}
+		pdrv->pdrv.driver.name = ch->name;
+		pdrv->pdrv.driver.owner = THIS_MODULE;
+		pdrv->pdrv.probe = channel_probe;
+		list_add_tail(&pdrv->node, &pdrv_list);
+		ret = platform_driver_register(&pdrv->pdrv);
+		if (ret) {
+			list_del(&pdrv->node);
+			kfree(pdrv);
+			mutex_lock(&ch->ch_probe_lock);
+			ch->wait_for_probe = false;
+			mutex_unlock(&ch->ch_probe_lock);
+		}
+	} else {
+		if (ch->had_probed)
+			data_ch_probe_body(ch);
+		mutex_unlock(&ch->ch_probe_lock);
+		/*
+		 * channel_probe might have seen the device we want, but
+		 * returned EPROBE_DEFER so we need to kick the deferred list
+		 */
+		platform_driver_register(&dummy_driver);
+		if (first) {
+			platform_device_register(&dummy_device);
+			first = false;
+		}
+		platform_driver_unregister(&dummy_driver);
+	}
+
+out:
+	mutex_unlock(&pdrv_list_mutex);
+	return ret;
+}
+
+/**
+ * tx_cmd_version() - convert a version cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @version:	The version number to encode.
+ * @features:	The features information to encode.
+ *
+ * The remote side doesn't speak G-Link, so we fake the version negotiation.
+ */
+static void tx_cmd_version(struct glink_transport_if *if_ptr, uint32_t version,
+			   uint32_t features)
+{
+	struct edge_info *einfo;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+	einfo->xprt_if.glink_core_if_ptr->rx_cmd_version_ack(&einfo->xprt_if,
+								version,
+								features);
+	einfo->xprt_if.glink_core_if_ptr->rx_cmd_version(&einfo->xprt_if,
+								version,
+								features);
+}
+
+/**
+ * tx_cmd_version_ack() - convert a version ack cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @version:	The version number to encode.
+ * @features:	The features information to encode.
+ *
+ * The remote side doesn't speak G-Link.  The core is acking a version command
+ * we faked.  Do nothing.
+ */
+static void tx_cmd_version_ack(struct glink_transport_if *if_ptr,
+			       uint32_t version,
+			       uint32_t features)
+{
+}
+
+/**
+ * set_version() - activate a negotiated version and feature set
+ * @if_ptr:	The transport to configure.
+ * @version:	The version to use.
+ * @features:	The features to use.
+ *
+ * Return: The supported capabilities of the transport.
+ */
+static uint32_t set_version(struct glink_transport_if *if_ptr, uint32_t version,
+			uint32_t features)
+{
+	struct edge_info *einfo;
+	uint32_t capabilities = GCAP_SIGNALS | GCAP_AUTO_QUEUE_RX_INT;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	return einfo->intentless ?
+				GCAP_INTENTLESS | capabilities : capabilities;
+}
+
+/**
+ * tx_cmd_ch_open() - convert a channel open cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @name:	The channel name to encode.
+ * @req_xprt:	The transport the core would like to migrate this channel to.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_ch_open(struct glink_transport_if *if_ptr, uint32_t lcid,
+			  const char *name, uint16_t req_xprt)
+{
+	struct command {
+		uint32_t cmd;
+		uint32_t id;
+		uint32_t priority;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	struct channel *ch;
+	struct channel *temp_ch;
+	bool found = false;
+	int rcu_id;
+	int ret = 0;
+	int len;
+	unsigned long flags;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->ssr_sync);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+		return -EFAULT;
+	}
+
+	spin_lock_irqsave(&einfo->channels_lock, flags);
+	list_for_each_entry(ch, &einfo->channels, node) {
+		if (!strcmp(name, ch->name)) {
+			found = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&einfo->channels_lock, flags);
+
+	if (!found) {
+		ch = kzalloc(sizeof(*ch), GFP_KERNEL);
+		if (!ch) {
+			SMDXPRT_ERR(einfo,
+				"%s: channel struct allocation failed\n",
+				__func__);
+			srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+			return -ENOMEM;
+		}
+		strlcpy(ch->name, name, GLINK_NAME_SIZE);
+		ch->edge = einfo;
+		mutex_init(&ch->ch_probe_lock);
+		mutex_init(&ch->ch_tasklet_lock);
+		init_completion(&ch->open_notifier);
+		INIT_LIST_HEAD(&ch->intents);
+		INIT_LIST_HEAD(&ch->used_intents);
+		spin_lock_init(&ch->intents_lock);
+		spin_lock_init(&ch->rx_data_lock);
+		mutex_lock(&ch->ch_tasklet_lock);
+		tasklet_init(&ch->data_tasklet, process_data_event,
+				(unsigned long)ch);
+		tasklet_disable(&ch->data_tasklet);
+		ch->is_tasklet_enabled = false;
+		mutex_unlock(&ch->ch_tasklet_lock);
+		ch->wq = create_singlethread_workqueue(ch->name);
+		if (!ch->wq) {
+			SMDXPRT_ERR(einfo,
+					"%s: channel workqueue create failed\n",
+					__func__);
+			kfree(ch);
+			srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+			return -ENOMEM;
+		}
+
+		/*
+		 * Channel could have been added to the list by someone else
+		 * so scan again.  Channel creation is non-atomic, so unlock
+		 * and recheck is necessary
+		 */
+		temp_ch = ch;
+		spin_lock_irqsave(&einfo->channels_lock, flags);
+		list_for_each_entry(ch, &einfo->channels, node)
+			if (!strcmp(name, ch->name)) {
+				found = true;
+				break;
+			}
+
+		if (!found) {
+			ch = temp_ch;
+			list_add_tail(&ch->node, &einfo->channels);
+			spin_unlock_irqrestore(&einfo->channels_lock, flags);
+		} else {
+			spin_unlock_irqrestore(&einfo->channels_lock, flags);
+			tasklet_kill(&temp_ch->data_tasklet);
+			destroy_workqueue(temp_ch->wq);
+			kfree(temp_ch);
+		}
+	}
+
+	ch->tx_resume_needed = false;
+	ch->lcid = lcid;
+
+	if (einfo->smd_ctl_ch_open) {
+		SMDXPRT_INFO(einfo, "%s TX OPEN '%s' lcid %u reqxprt %u\n",
+				__func__, name, lcid, req_xprt);
+		cmd.cmd = CMD_OPEN;
+		cmd.id = lcid;
+		cmd.priority = req_xprt;
+		len = strlen(name) + 1;
+		len += sizeof(cmd);
+		mutex_lock(&einfo->smd_lock);
+		while (smd_write_avail(einfo->smd_ch) < len)
+			msleep(20);
+		smd_write_start(einfo->smd_ch, len);
+		smd_write_segment(einfo->smd_ch, &cmd, sizeof(cmd));
+		smd_write_segment(einfo->smd_ch, name, strlen(name) + 1);
+		smd_write_end(einfo->smd_ch);
+		mutex_unlock(&einfo->smd_lock);
+	} else {
+		SMDXPRT_INFO(einfo, "%s Legacy Open '%s' lcid %u reqxprt %u\n",
+				__func__, name, lcid, req_xprt);
+		ch->rcid = lcid + LEGACY_RCID_CHANNEL_OFFSET;
+		ch->local_legacy = true;
+		ch->remote_legacy = true;
+		reinit_completion(&ch->open_notifier);
+		ret = add_platform_driver(ch);
+		if (!ret) {
+			mutex_lock(&einfo->rx_cmd_lock);
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_open_ack(
+						&einfo->xprt_if,
+						ch->lcid, SMD_TRANS_XPRT_ID);
+			mutex_unlock(&einfo->rx_cmd_lock);
+		}
+		complete_all(&ch->open_notifier);
+	}
+
+	srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+	return ret;
+}
+
+/**
+ * tx_cmd_ch_close() - convert a channel close cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_ch_close(struct glink_transport_if *if_ptr, uint32_t lcid)
+{
+	struct command {
+		uint32_t cmd;
+		uint32_t id;
+		uint32_t reserved;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	struct channel *ch;
+	int rcu_id;
+	bool found = false;
+	unsigned long flags;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->ssr_sync);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+		return -EFAULT;
+	}
+
+	spin_lock_irqsave(&einfo->channels_lock, flags);
+	list_for_each_entry(ch, &einfo->channels, node)
+		if (lcid == ch->lcid) {
+			found = true;
+			break;
+		}
+	spin_unlock_irqrestore(&einfo->channels_lock, flags);
+
+	if (!found) {
+		SMDXPRT_ERR(einfo, "%s LCID not found %u\n",
+				__func__, lcid);
+		srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+		return -ENODEV;
+	}
+
+	if (!ch->local_legacy) {
+		SMDXPRT_INFO(einfo, "%s TX CLOSE lcid %u\n", __func__, lcid);
+		cmd.cmd = CMD_CLOSE;
+		cmd.id = lcid;
+		cmd.reserved = 0;
+		mutex_lock(&einfo->smd_lock);
+		while (smd_write_avail(einfo->smd_ch) < sizeof(cmd))
+			msleep(20);
+		smd_write(einfo->smd_ch, &cmd, sizeof(cmd));
+		mutex_unlock(&einfo->smd_lock);
+	} else {
+		smd_data_ch_close(ch);
+	}
+	srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+	return 0;
+}
+
+/**
+ * tx_cmd_ch_remote_open_ack() - convert a channel open ack cmd to wire format
+ *				 and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @rcid:	The remote channel id to encode.
+ * @xprt_resp:	The response to a transport migration request.
+ */
+static void tx_cmd_ch_remote_open_ack(struct glink_transport_if *if_ptr,
+				      uint32_t rcid, uint16_t xprt_resp)
+{
+	struct command {
+		uint32_t cmd;
+		uint32_t id;
+		uint32_t priority;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	struct channel *ch;
+	bool found = false;
+	unsigned long flags;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	if (!einfo->smd_ctl_ch_open)
+		return;
+
+	spin_lock_irqsave(&einfo->channels_lock, flags);
+	list_for_each_entry(ch, &einfo->channels, node)
+		if (ch->rcid == rcid) {
+			found = true;
+			break;
+		}
+	spin_unlock_irqrestore(&einfo->channels_lock, flags);
+
+	if (!found) {
+		SMDXPRT_ERR(einfo, "%s No matching SMD channel for rcid %u\n",
+				__func__, rcid);
+		return;
+	}
+
+	if (ch->remote_legacy) {
+		SMDXPRT_INFO(einfo, "%s Legacy ch rcid %u xprt_resp %u\n",
+				__func__, rcid, xprt_resp);
+		return;
+	}
+
+	SMDXPRT_INFO(einfo, "%s TX OPEN ACK rcid %u xprt_resp %u\n",
+			__func__, rcid, xprt_resp);
+
+	cmd.cmd = CMD_OPEN_ACK;
+	cmd.id = ch->rcid;
+	cmd.priority = xprt_resp;
+
+	mutex_lock(&einfo->smd_lock);
+	while (smd_write_avail(einfo->smd_ch) < sizeof(cmd))
+		msleep(20);
+
+	smd_write(einfo->smd_ch, &cmd, sizeof(cmd));
+	mutex_unlock(&einfo->smd_lock);
+}
+
+/**
+ * tx_cmd_ch_remote_close_ack() - convert a channel close ack cmd to wire format
+ *				  and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @rcid:	The remote channel id to encode.
+ */
+static void tx_cmd_ch_remote_close_ack(struct glink_transport_if *if_ptr,
+				       uint32_t rcid)
+{
+	struct command {
+		uint32_t cmd;
+		uint32_t id;
+		uint32_t reserved;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	struct channel *ch;
+	bool found = false;
+	unsigned long flags;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	spin_lock_irqsave(&einfo->channels_lock, flags);
+	list_for_each_entry(ch, &einfo->channels, node)
+		if (rcid == ch->rcid) {
+			found = true;
+			break;
+		}
+	spin_unlock_irqrestore(&einfo->channels_lock, flags);
+
+	if (!found) {
+		SMDXPRT_ERR(einfo,
+			"%s No matching SMD channel for rcid %u\n",
+			__func__, rcid);
+		return;
+	}
+
+	if (!ch->remote_legacy) {
+		SMDXPRT_INFO(einfo, "%s TX CLOSE ACK rcid %u\n",
+				__func__, rcid);
+		cmd.cmd = CMD_CLOSE_ACK;
+		cmd.id = rcid;
+		cmd.reserved = 0;
+		mutex_lock(&einfo->smd_lock);
+		while (smd_write_avail(einfo->smd_ch) < sizeof(cmd))
+			msleep(20);
+		smd_write(einfo->smd_ch, &cmd, sizeof(cmd));
+		mutex_unlock(&einfo->smd_lock);
+	}
+	ch->remote_legacy = false;
+	ch->rcid = 0;
+}
+
+/**
+ * ssr() - process a subsystem restart notification of a transport
+ * @if_ptr:	The transport to restart.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int ssr(struct glink_transport_if *if_ptr)
+{
+	struct edge_info *einfo;
+	struct channel *ch;
+	struct intent_info *intent;
+	unsigned long flags;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	einfo->in_ssr = true;
+	synchronize_srcu(&einfo->ssr_sync);
+
+	einfo->smd_ctl_ch_open = false;
+
+	spin_lock_irqsave(&einfo->channels_lock, flags);
+	list_for_each_entry(ch, &einfo->channels, node) {
+		spin_unlock_irqrestore(&einfo->channels_lock, flags);
+		ch->is_closing = true;
+		mutex_lock(&ch->ch_tasklet_lock);
+		if (ch->is_tasklet_enabled) {
+			tasklet_disable(&ch->data_tasklet);
+			ch->is_tasklet_enabled = false;
+		}
+		mutex_unlock(&ch->ch_tasklet_lock);
+		flush_workqueue(ch->wq);
+		mutex_lock(&ch->ch_probe_lock);
+		ch->wait_for_probe = false;
+		if (ch->smd_ch) {
+			smd_close(ch->smd_ch);
+			ch->smd_ch = NULL;
+		}
+		mutex_unlock(&ch->ch_probe_lock);
+		ch->local_legacy = false;
+		ch->remote_legacy = false;
+		ch->rcid = 0;
+		ch->tx_resume_needed = false;
+
+		spin_lock_irqsave(&ch->intents_lock, flags);
+		while (!list_empty(&ch->intents)) {
+			intent = list_first_entry(&ch->intents,
+							struct intent_info,
+							node);
+			list_del(&intent->node);
+			kfree(intent);
+		}
+		while (!list_empty(&ch->used_intents)) {
+			intent = list_first_entry(&ch->used_intents,
+							struct intent_info,
+							node);
+			list_del(&intent->node);
+			kfree(intent);
+		}
+		kfree(ch->cur_intent);
+		ch->cur_intent = NULL;
+		spin_unlock_irqrestore(&ch->intents_lock, flags);
+		ch->is_closing = false;
+		spin_lock_irqsave(&einfo->channels_lock, flags);
+	}
+	spin_unlock_irqrestore(&einfo->channels_lock, flags);
+
+	einfo->xprt_if.glink_core_if_ptr->link_down(&einfo->xprt_if);
+	schedule_delayed_work(&einfo->ssr_work, 5 * HZ);
+	return 0;
+}
+
+/**
+ * allocate_rx_intent() - allocate/reserve space for RX Intent
+ * @if_ptr:	The transport the intent is associated with.
+ * @size:	size of intent.
+ * @intent:	Pointer to the intent structure.
+ *
+ * Assign "data" with the buffer created, since the transport creates
+ * a linear buffer and "iovec" with the "intent" itself, so that
+ * the data can be passed to a client that receives only vector buffer.
+ * Note that returning NULL for the pointer is valid (it means that space has
+ * been reserved, but the actual pointer will be provided later).
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int allocate_rx_intent(struct glink_transport_if *if_ptr, size_t size,
+			      struct glink_core_rx_intent *intent)
+{
+	void *t;
+
+	t = kmalloc(size, GFP_KERNEL);
+	if (!t)
+		return -ENOMEM;
+
+	intent->data = t;
+	intent->iovec = (void *)intent;
+	intent->vprovider = rx_linear_vbuf_provider;
+	intent->pprovider = NULL;
+	return 0;
+}
+
+/**
+ * deallocate_rx_intent() - Deallocate space created for RX Intent
+ * @if_ptr:	The transport the intent is associated with.
+ * @intent:	Pointer to the intent structure.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int deallocate_rx_intent(struct glink_transport_if *if_ptr,
+				struct glink_core_rx_intent *intent)
+{
+	if (!intent || !intent->data)
+		return -EINVAL;
+
+	kfree(intent->data);
+	intent->data = NULL;
+	intent->iovec = NULL;
+	intent->vprovider = NULL;
+	return 0;
+}
+
+/**
+ * check_and_resume_rx() - Check the RX state and resume it
+ * @ch:		Channel which needs to be checked.
+ * @intent_size:	Intent size being queued.
+ *
+ * This function checks if a receive intent is requested in the
+ * channel and resumes the RX if the queued receive intent satisifes
+ * the requested receive intent. This function must be called with
+ * ch->intents_lock locked.
+ */
+static void check_and_resume_rx(struct channel *ch, size_t intent_size)
+{
+	if (ch->intent_req && ch->intent_req_size <= intent_size) {
+		ch->intent_req = false;
+		tasklet_hi_schedule(&ch->data_tasklet);
+	}
+}
+
+/**
+ * tx_cmd_local_rx_intent() - convert an rx intent cmd to wire format and
+ *			      transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @size:	The intent size to encode.
+ * @liid:	The local intent id to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_local_rx_intent(struct glink_transport_if *if_ptr,
+				  uint32_t lcid, size_t size, uint32_t liid)
+{
+	struct edge_info *einfo;
+	struct channel *ch;
+	struct intent_info *intent;
+	int rcu_id;
+	unsigned long flags;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->ssr_sync);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+		return -EFAULT;
+	}
+
+	spin_lock_irqsave(&einfo->channels_lock, flags);
+	list_for_each_entry(ch, &einfo->channels, node) {
+		if (lcid == ch->lcid)
+			break;
+	}
+	spin_unlock_irqrestore(&einfo->channels_lock, flags);
+
+	intent = kmalloc(sizeof(*intent), GFP_KERNEL);
+	if (!intent) {
+		SMDXPRT_ERR(einfo, "%s: no memory for intent\n", __func__);
+		srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+		return -ENOMEM;
+	}
+
+	intent->liid = liid;
+	intent->size = size;
+	spin_lock_irqsave(&ch->intents_lock, flags);
+	list_add_tail(&intent->node, &ch->intents);
+	check_and_resume_rx(ch, size);
+	spin_unlock_irqrestore(&ch->intents_lock, flags);
+
+	srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+	return 0;
+}
+
+/**
+ * tx_cmd_local_rx_done() - convert an rx done cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @liid:	The local intent id to encode.
+ * @reuse:	Reuse the consumed intent.
+ */
+static void tx_cmd_local_rx_done(struct glink_transport_if *if_ptr,
+				 uint32_t lcid, uint32_t liid, bool reuse)
+{
+	struct edge_info *einfo;
+	struct channel *ch;
+	struct intent_info *i;
+	unsigned long flags;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+	spin_lock_irqsave(&einfo->channels_lock, flags);
+	list_for_each_entry(ch, &einfo->channels, node) {
+		if (lcid == ch->lcid)
+			break;
+	}
+	spin_unlock_irqrestore(&einfo->channels_lock, flags);
+	spin_lock_irqsave(&ch->intents_lock, flags);
+	list_for_each_entry(i, &ch->used_intents, node) {
+		if (i->liid == liid) {
+			list_del(&i->node);
+			if (reuse) {
+				list_add_tail(&i->node, &ch->intents);
+				check_and_resume_rx(ch, i->size);
+			} else {
+				kfree(i);
+			}
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&ch->intents_lock, flags);
+}
+
+/**
+ * tx() - convert a data transmit cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @pctx:	The data to encode.
+ *
+ * Return: Number of bytes written or standard Linux error code.
+ */
+static int tx(struct glink_transport_if *if_ptr, uint32_t lcid,
+	      struct glink_core_tx_pkt *pctx)
+{
+	struct edge_info *einfo;
+	struct channel *ch;
+	int rc;
+	struct channel_work *tx_done;
+	const void *data_start;
+	size_t tx_size = 0;
+	int rcu_id;
+	unsigned long flags;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->ssr_sync);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+		return -EFAULT;
+	}
+
+	spin_lock_irqsave(&einfo->channels_lock, flags);
+	list_for_each_entry(ch, &einfo->channels, node) {
+		if (lcid == ch->lcid)
+			break;
+	}
+	spin_unlock_irqrestore(&einfo->channels_lock, flags);
+
+	data_start = get_tx_vaddr(pctx, pctx->size - pctx->size_remaining,
+				  &tx_size);
+	if (!data_start) {
+		srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+		return -EINVAL;
+	}
+
+	if (!ch->streaming_ch) {
+		if (pctx->size == pctx->size_remaining) {
+			rc = check_write_avail(smd_write_avail, ch);
+			if (rc <= 0) {
+				srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+				return rc;
+			}
+			rc = smd_write_start(ch->smd_ch, pctx->size);
+			if (rc) {
+				srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+				return rc;
+			}
+		}
+
+		rc = check_write_avail(smd_write_segment_avail, ch);
+		if (rc <= 0) {
+			srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+			return rc;
+		}
+		if (rc > tx_size)
+			rc = tx_size;
+		rc = smd_write_segment(ch->smd_ch, data_start, rc);
+		if (rc < 0) {
+			SMDXPRT_ERR(einfo, "%s: write segment failed %d\n",
+					__func__, rc);
+			srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+			return rc;
+		}
+	} else {
+		rc = check_write_avail(smd_write_avail, ch);
+		if (rc <= 0) {
+			srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+			return rc;
+		}
+		if (rc > tx_size)
+			rc = tx_size;
+		rc = smd_write(ch->smd_ch, data_start, rc);
+		if (rc < 0) {
+			SMDXPRT_ERR(einfo, "%s: write failed %d\n",
+					__func__, rc);
+			srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+			return rc;
+		}
+	}
+
+	pctx->size_remaining -= rc;
+	if (!pctx->size_remaining) {
+		if (!ch->streaming_ch)
+			smd_write_end(ch->smd_ch);
+		tx_done = kmalloc(sizeof(*tx_done), GFP_ATOMIC);
+		if (!tx_done) {
+			SMDXPRT_ERR(einfo, "%s: failed allocation of tx_done\n",
+					__func__);
+			srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+			return -ENOMEM;
+		}
+		tx_done->ch = ch;
+		tx_done->iid = pctx->riid;
+		INIT_WORK(&tx_done->work, process_tx_done);
+		queue_work(ch->wq, &tx_done->work);
+	}
+
+	srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+	return rc;
+}
+
+/**
+ * tx_cmd_rx_intent_req() - convert an rx intent request cmd to wire format and
+ *			    transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @size:	The requested intent size to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_rx_intent_req(struct glink_transport_if *if_ptr,
+				uint32_t lcid, size_t size)
+{
+	struct edge_info *einfo;
+	struct channel *ch;
+	unsigned long flags;
+	int rcu_id;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+	rcu_id = srcu_read_lock(&einfo->ssr_sync);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+		return -EFAULT;
+	}
+	spin_lock_irqsave(&einfo->channels_lock, flags);
+	list_for_each_entry(ch, &einfo->channels, node) {
+		if (lcid == ch->lcid)
+			break;
+	}
+	spin_unlock_irqrestore(&einfo->channels_lock, flags);
+	einfo->xprt_if.glink_core_if_ptr->rx_cmd_rx_intent_req_ack(
+								&einfo->xprt_if,
+								ch->rcid,
+								true);
+	einfo->xprt_if.glink_core_if_ptr->rx_cmd_remote_rx_intent_put(
+							&einfo->xprt_if,
+							ch->rcid,
+							ch->next_intent_id++,
+							size);
+	srcu_read_unlock(&einfo->ssr_sync, rcu_id);
+	return 0;
+}
+
+/**
+ * tx_cmd_rx_intent_req_ack() - convert an rx intent request ack cmd to wire
+ *				format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @granted:	The request response to encode.
+ *
+ * The remote side doesn't speak G-Link.  The core is just acking a request we
+ * faked.  Do nothing.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_remote_rx_intent_req_ack(struct glink_transport_if *if_ptr,
+					   uint32_t lcid, bool granted)
+{
+	return 0;
+}
+
+/**
+ * tx_cmd_set_sigs() - convert a signal cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @sigs:	The signals to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_set_sigs(struct glink_transport_if *if_ptr, uint32_t lcid,
+			   uint32_t sigs)
+{
+	struct edge_info *einfo;
+	struct channel *ch;
+	uint32_t set = 0;
+	uint32_t clear = 0;
+	unsigned long flags;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+	spin_lock_irqsave(&einfo->channels_lock, flags);
+	list_for_each_entry(ch, &einfo->channels, node) {
+		if (lcid == ch->lcid)
+			break;
+	}
+	spin_unlock_irqrestore(&einfo->channels_lock, flags);
+
+	if (sigs & SMD_DTR_SIG)
+		set |= TIOCM_DTR;
+	else
+		clear |= TIOCM_DTR;
+
+	if (sigs & SMD_CTS_SIG)
+		set |= TIOCM_RTS;
+	else
+		clear |= TIOCM_RTS;
+
+	if (sigs & SMD_CD_SIG)
+		set |= TIOCM_CD;
+	else
+		clear |= TIOCM_CD;
+
+	if (sigs & SMD_RI_SIG)
+		set |= TIOCM_RI;
+	else
+		clear |= TIOCM_RI;
+
+	return smd_tiocmset(ch->smd_ch, set, clear);
+}
+
+/**
+ * poll() - poll for data on a channel
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id for the channel.
+ *
+ * Return: 0 if no data available, 1 if data available, or standard Linux error
+ * code.
+ */
+static int poll(struct glink_transport_if *if_ptr, uint32_t lcid)
+{
+	struct edge_info *einfo;
+	struct channel *ch;
+	int rc;
+	unsigned long flags;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+	spin_lock_irqsave(&einfo->channels_lock, flags);
+	list_for_each_entry(ch, &einfo->channels, node) {
+		if (lcid == ch->lcid)
+			break;
+	}
+	spin_unlock_irqrestore(&einfo->channels_lock, flags);
+	rc = smd_is_pkt_avail(ch->smd_ch);
+	if (rc == 1)
+		process_data_event((unsigned long)ch);
+	return rc;
+}
+
+/**
+ * mask_rx_irq() - mask the receive irq
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id for the channel.
+ * @mask:	True to mask the irq, false to unmask.
+ * @pstruct:	Platform defined structure for handling the masking.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int mask_rx_irq(struct glink_transport_if *if_ptr, uint32_t lcid,
+		       bool mask, void *pstruct)
+{
+	struct edge_info *einfo;
+	struct channel *ch;
+	int ret = 0;
+	unsigned long flags;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+	spin_lock_irqsave(&einfo->channels_lock, flags);
+	list_for_each_entry(ch, &einfo->channels, node) {
+		if (lcid == ch->lcid)
+			break;
+	}
+	spin_unlock_irqrestore(&einfo->channels_lock, flags);
+	ret = smd_mask_receive_interrupt(ch->smd_ch, mask, pstruct);
+
+	if (ret == 0)
+		einfo->irq_disabled = mask;
+
+	return ret;
+}
+
+/**
+ * negotiate_features_v1() - determine what features of a version can be used
+ * @if_ptr:	The transport for which features are negotiated for.
+ * @version:	The version negotiated.
+ * @features:	The set of requested features.
+ *
+ * Return: What set of the requested features can be supported.
+ */
+static uint32_t negotiate_features_v1(struct glink_transport_if *if_ptr,
+				      const struct glink_core_version *version,
+				      uint32_t features)
+{
+	return features & version->features;
+}
+
+/**
+* init_xprt_if() - initialize the xprt_if for an edge
+* @einfo:	The edge to initialize.
+*/
+static void init_xprt_if(struct edge_info *einfo)
+{
+	einfo->xprt_if.tx_cmd_version = tx_cmd_version;
+	einfo->xprt_if.tx_cmd_version_ack = tx_cmd_version_ack;
+	einfo->xprt_if.set_version = set_version;
+	einfo->xprt_if.tx_cmd_ch_open = tx_cmd_ch_open;
+	einfo->xprt_if.tx_cmd_ch_close = tx_cmd_ch_close;
+	einfo->xprt_if.tx_cmd_ch_remote_open_ack = tx_cmd_ch_remote_open_ack;
+	einfo->xprt_if.tx_cmd_ch_remote_close_ack = tx_cmd_ch_remote_close_ack;
+	einfo->xprt_if.ssr = ssr;
+	einfo->xprt_if.allocate_rx_intent = allocate_rx_intent;
+	einfo->xprt_if.deallocate_rx_intent = deallocate_rx_intent;
+	einfo->xprt_if.tx_cmd_local_rx_intent = tx_cmd_local_rx_intent;
+	einfo->xprt_if.tx_cmd_local_rx_done = tx_cmd_local_rx_done;
+	einfo->xprt_if.tx = tx;
+	einfo->xprt_if.tx_cmd_rx_intent_req = tx_cmd_rx_intent_req;
+	einfo->xprt_if.tx_cmd_remote_rx_intent_req_ack =
+						tx_cmd_remote_rx_intent_req_ack;
+	einfo->xprt_if.tx_cmd_set_sigs = tx_cmd_set_sigs;
+	einfo->xprt_if.poll = poll;
+	einfo->xprt_if.mask_rx_irq = mask_rx_irq;
+}
+
+/**
+ * init_xprt_cfg() - initialize the xprt_cfg for an edge
+ * @einfo:	The edge to initialize.
+ */
+static void init_xprt_cfg(struct edge_info *einfo)
+{
+	einfo->xprt_cfg.name = XPRT_NAME;
+	einfo->xprt_cfg.versions = versions;
+	einfo->xprt_cfg.versions_entries = ARRAY_SIZE(versions);
+	einfo->xprt_cfg.max_cid = SZ_64;
+	einfo->xprt_cfg.max_iid = SZ_128;
+}
+
+static struct platform_driver migration_driver = {
+	.probe		= ctl_ch_probe,
+	.driver		= {
+		.name	= "GLINK_CTRL",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init glink_smd_xprt_init(void)
+{
+	int i;
+	int rc;
+	struct edge_info *einfo;
+
+	for (i = 0; i < NUM_EDGES; ++i) {
+		einfo = &edge_infos[i];
+		init_xprt_cfg(einfo);
+		init_xprt_if(einfo);
+		INIT_LIST_HEAD(&einfo->channels);
+		spin_lock_init(&einfo->channels_lock);
+		init_srcu_struct(&einfo->ssr_sync);
+		mutex_init(&einfo->smd_lock);
+		mutex_init(&einfo->in_ssr_lock);
+		mutex_init(&einfo->rx_cmd_lock);
+		INIT_DELAYED_WORK(&einfo->ssr_work, ssr_work_func);
+		INIT_WORK(&einfo->work, process_ctl_event);
+		rc = glink_core_register_transport(&einfo->xprt_if,
+							&einfo->xprt_cfg);
+		if (rc)
+			SMDXPRT_ERR(einfo,
+				"%s: %s glink register xprt failed %d\n",
+				__func__, einfo->xprt_cfg.edge, rc);
+		else
+			einfo->xprt_if.glink_core_if_ptr->link_up(
+							&einfo->xprt_if);
+	}
+
+	platform_driver_register(&migration_driver);
+
+	return 0;
+}
+arch_initcall(glink_smd_xprt_init);
+
+MODULE_DESCRIPTION("MSM G-Link SMD Transport");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/glink_smem_native_xprt.c	2019-10-29 09:26:24.809214589 +0100
@@ -0,0 +1,3140 @@
+/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ipc_logging.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/srcu.h>
+#include <linux/wait.h>
+#include <soc/qcom/smem.h>
+#include <soc/qcom/tracer_pkt.h>
+#include "glink_core_if.h"
+#include "glink_private.h"
+#include "glink_xprt_if.h"
+
+#define XPRT_NAME "smem"
+#define FIFO_FULL_RESERVE 8
+#define FIFO_ALIGNMENT 8
+#define TX_BLOCKED_CMD_RESERVE 8 /* size of struct read_notif_request */
+#define SMEM_CH_DESC_SIZE 32
+#define RPM_TOC_ID 0x67727430
+#define RPM_TX_FIFO_ID 0x61703272
+#define RPM_RX_FIFO_ID 0x72326170
+#define RPM_TOC_SIZE 256
+#define RPM_MAX_TOC_ENTRIES 20
+#define RPM_FIFO_ADDR_ALIGN_BYTES 3
+#define TRACER_PKT_FEATURE BIT(2)
+#define DEFERRED_CMDS_THRESHOLD 25
+/**
+ * enum command_types - definition of the types of commands sent/received
+ * @VERSION_CMD:		Version and feature set supported
+ * @VERSION_ACK_CMD:		Response for @VERSION_CMD
+ * @OPEN_CMD:			Open a channel
+ * @CLOSE_CMD:			Close a channel
+ * @OPEN_ACK_CMD:		Response to @OPEN_CMD
+ * @RX_INTENT_CMD:		RX intent for a channel was queued
+ * @RX_DONE_CMD:		Use of RX intent for a channel is complete
+ * @RX_INTENT_REQ_CMD:		Request to have RX intent queued
+ * @RX_INTENT_REQ_ACK_CMD:	Response for @RX_INTENT_REQ_CMD
+ * @TX_DATA_CMD:		Start of a data transfer
+ * @ZERO_COPY_TX_DATA_CMD:	Start of a data transfer with zero copy
+ * @CLOSE_ACK_CMD:		Response for @CLOSE_CMD
+ * @TX_DATA_CONT_CMD:		Continuation or end of a data transfer
+ * @READ_NOTIF_CMD:		Request for a notification when this cmd is read
+ * @RX_DONE_W_REUSE_CMD:	Same as @RX_DONE but also reuse the used intent
+ * @SIGNALS_CMD:		Sideband signals
+ * @TRACER_PKT_CMD:		Start of a Tracer Packet Command
+ * @TRACER_PKT_CONT_CMD:	Continuation or end of a Tracer Packet Command
+ */
+enum command_types {
+	VERSION_CMD,
+	VERSION_ACK_CMD,
+	OPEN_CMD,
+	CLOSE_CMD,
+	OPEN_ACK_CMD,
+	RX_INTENT_CMD,
+	RX_DONE_CMD,
+	RX_INTENT_REQ_CMD,
+	RX_INTENT_REQ_ACK_CMD,
+	TX_DATA_CMD,
+	ZERO_COPY_TX_DATA_CMD,
+	CLOSE_ACK_CMD,
+	TX_DATA_CONT_CMD,
+	READ_NOTIF_CMD,
+	RX_DONE_W_REUSE_CMD,
+	SIGNALS_CMD,
+	TRACER_PKT_CMD,
+	TRACER_PKT_CONT_CMD,
+};
+
+/**
+ * struct channel_desc - description of a channel fifo with a remote entity
+ * @read_index:		The read index for the fifo where data should be
+ *			consumed from.
+ * @write_index:	The write index for the fifo where data should produced
+ *			to.
+ *
+ * This structure resides in SMEM and contains the control information for the
+ * fifo data pipes of the channel.  There is one physical channel between us
+ * and a remote entity.
+ */
+struct channel_desc {
+	uint32_t read_index;
+	uint32_t write_index;
+};
+
+/**
+ * struct mailbox_config_info - description of a mailbox tranposrt channel
+ * @tx_read_index:	Offset into the tx fifo where data should be read from.
+ * @tx_write_index:	Offset into the tx fifo where new data will be placed.
+ * @tx_size:		Size of the transmit fifo in bytes.
+ * @rx_read_index:	Offset into the rx fifo where data should be read from.
+ * @rx_write_index:	Offset into the rx fifo where new data will be placed.
+ * @rx_size:		Size of the receive fifo in bytes.
+ * @fifo:		The fifos for the channel.
+ */
+struct mailbox_config_info {
+	uint32_t tx_read_index;
+	uint32_t tx_write_index;
+	uint32_t tx_size;
+	uint32_t rx_read_index;
+	uint32_t rx_write_index;
+	uint32_t rx_size;
+	char fifo[]; /* tx fifo, then rx fifo */
+};
+
+/**
+ * struct edge_info - local information for managing a single complete edge
+ * @xprt_if:			The transport interface registered with the
+ *				glink core associated with this edge.
+ * @xprt_cfg:			The transport configuration for the glink core
+ *				assocaited with this edge.
+ * @intentless:			True if this edge runs in intentless mode.
+ * @irq_disabled:		Flag indicating the whether interrupt is enabled
+ *				or disabled.
+ * @remote_proc_id:		The SMEM processor id for the remote side.
+ * @rx_reset_reg:		Reference to the register to reset the rx irq
+ *				line, if applicable.
+ * @out_irq_reg:		Reference to the register to send an irq to the
+ *				remote side.
+ * @out_irq_mask:		Mask written to @out_irq_reg to trigger the
+ *				correct irq.
+ * @irq_line:			The incoming interrupt line.
+ * @tx_irq_count:		Number of interrupts triggered.
+ * @rx_irq_count:		Number of interrupts received.
+ * @tx_ch_desc:			Reference to the channel description structure
+ *				for tx in SMEM for this edge.
+ * @rx_ch_desc:			Reference to the channel description structure
+ *				for rx in SMEM for this edge.
+ * @tx_fifo:			Reference to the transmit fifo in SMEM.
+ * @rx_fifo:			Reference to the receive fifo in SMEM.
+ * @tx_fifo_size:		Total size of @tx_fifo.
+ * @rx_fifo_size:		Total size of @rx_fifo.
+ * @read_from_fifo:		Memcpy for this edge.
+ * @write_to_fifo:		Memcpy for this edge.
+ * @write_lock:			Lock to serialize access to @tx_fifo.
+ * @tx_blocked_queue:		Queue of entities waiting for the remote side to
+ *				signal @tx_fifo has flushed and is now empty.
+ * @tx_resume_needed:		A tx resume signal needs to be sent to the glink
+ *				core once the remote side indicates @tx_fifo has
+ *				flushed.
+ * @tx_blocked_signal_sent:	Flag to indicate the flush signal has already
+ *				been sent, and a response is pending from the
+ *				remote side.  Protected by @write_lock.
+ * @kwork:			Work to be executed when an irq is received.
+ * @kworker:			Handle to the entity processing of
+				deferred commands.
+ * @task:			Handle to the task context used to run @kworker.
+ * @use_ref:			Active uses of this transport use this to grab
+ *				a reference.  Used for ssr synchronization.
+ * @in_ssr:			Signals if this transport is in ssr.
+ * @rx_lock:			Used to serialize concurrent instances of rx
+ *				processing.
+ * @deferred_cmds:		List of deferred commands that need to be
+ *				processed in process context.
+ * @deferred_cmds_cnt:		Number of deferred commands in queue.
+ * @rt_vote_lock:		Serialize access to RT rx votes
+ * @rt_votes:			Vote count for RT rx thread priority
+ * @num_pw_states:		Size of @ramp_time_us.
+ * @ramp_time_us:		Array of ramp times in microseconds where array
+ *				index position represents a power state.
+ * @mailbox:			Mailbox transport channel description reference.
+ */
+struct edge_info {
+	struct glink_transport_if xprt_if;
+	struct glink_core_transport_cfg xprt_cfg;
+	bool intentless;
+	bool irq_disabled;
+	uint32_t remote_proc_id;
+	void __iomem *rx_reset_reg;
+	void __iomem *out_irq_reg;
+	uint32_t out_irq_mask;
+	uint32_t irq_line;
+	uint32_t tx_irq_count;
+	uint32_t rx_irq_count;
+	struct channel_desc *tx_ch_desc;
+	struct channel_desc *rx_ch_desc;
+	void __iomem *tx_fifo;
+	void __iomem *rx_fifo;
+	uint32_t tx_fifo_size;
+	uint32_t rx_fifo_size;
+	void * (*read_from_fifo)(void *dest, const void *src, size_t num_bytes);
+	void * (*write_to_fifo)(void *dest, const void *src, size_t num_bytes);
+	spinlock_t write_lock;
+	wait_queue_head_t tx_blocked_queue;
+	bool tx_resume_needed;
+	bool tx_blocked_signal_sent;
+	struct kthread_work kwork;
+	struct kthread_worker kworker;
+	struct task_struct *task;
+	struct srcu_struct use_ref;
+	bool in_ssr;
+	spinlock_t rx_lock;
+	struct list_head deferred_cmds;
+	uint32_t deferred_cmds_cnt;
+	spinlock_t rt_vote_lock;
+	uint32_t rt_votes;
+	uint32_t num_pw_states;
+	uint32_t readback;
+	unsigned long *ramp_time_us;
+	struct mailbox_config_info *mailbox;
+};
+
+/**
+ * struct deferred_cmd - description of a command to be processed later
+ * @list_node:	Used to put this command on a list in the edge.
+ * @id:		ID of the command.
+ * @param1:	Parameter one of the command.
+ * @param2:	Parameter two of the command.
+ * @data:	Extra data associated with the command, if applicable.
+ *
+ * This structure stores the relevant information of a command that was removed
+ * from the fifo but needs to be processed at a later time.
+ */
+struct deferred_cmd {
+	struct list_head list_node;
+	uint16_t id;
+	uint16_t param1;
+	uint32_t param2;
+	void *data;
+};
+
+static uint32_t negotiate_features_v1(struct glink_transport_if *if_ptr,
+				      const struct glink_core_version *version,
+				      uint32_t features);
+static void register_debugfs_info(struct edge_info *einfo);
+
+static struct edge_info *edge_infos[NUM_SMEM_SUBSYSTEMS];
+static DEFINE_MUTEX(probe_lock);
+static struct glink_core_version versions[] = {
+	{1, TRACER_PKT_FEATURE, negotiate_features_v1},
+};
+
+/**
+ * send_irq() - send an irq to a remote entity as an event signal
+ * @einfo:	Which remote entity that should receive the irq.
+ */
+static void send_irq(struct edge_info *einfo)
+{
+	/*
+	 * Any data associated with this event must be visable to the remote
+	 * before the interrupt is triggered
+	 */
+	einfo->readback = einfo->tx_ch_desc->write_index;
+	wmb();
+	writel_relaxed(einfo->out_irq_mask, einfo->out_irq_reg);
+	einfo->tx_irq_count++;
+}
+
+/**
+ * read_from_fifo() - memcpy from fifo memory
+ * @dest:	Destination address.
+ * @src:	Source address.
+ * @num_bytes:	Number of bytes to copy.
+ *
+ * Return: Destination address.
+ */
+static void *read_from_fifo(void *dest, const void *src, size_t num_bytes)
+{
+	memcpy_fromio(dest, src, num_bytes);
+	return dest;
+}
+
+/**
+ * write_to_fifo() - memcpy to fifo memory
+ * @dest:	Destination address.
+ * @src:	Source address.
+ * @num_bytes:	Number of bytes to copy.
+ *
+ * Return: Destination address.
+ */
+static void *write_to_fifo(void *dest, const void *src, size_t num_bytes)
+{
+	memcpy_toio(dest, src, num_bytes);
+	return dest;
+}
+
+/**
+ * memcpy32_toio() - memcpy to word access only memory
+ * @dest:	Destination address.
+ * @src:	Source address.
+ * @num_bytes:	Number of bytes to copy.
+ *
+ * Return: Destination address.
+ */
+static void *memcpy32_toio(void *dest, const void *src, size_t num_bytes)
+{
+	uint32_t *dest_local = (uint32_t *)dest;
+	uint32_t *src_local = (uint32_t *)src;
+
+	BUG_ON(num_bytes & RPM_FIFO_ADDR_ALIGN_BYTES);
+	BUG_ON(!dest_local ||
+			((uintptr_t)dest_local & RPM_FIFO_ADDR_ALIGN_BYTES));
+	BUG_ON(!src_local ||
+			((uintptr_t)src_local & RPM_FIFO_ADDR_ALIGN_BYTES));
+	num_bytes /= sizeof(uint32_t);
+
+	while (num_bytes--)
+		__raw_writel_no_log(*src_local++, dest_local++);
+
+	return dest;
+}
+
+/**
+ * memcpy32_fromio() - memcpy from word access only memory
+ * @dest:	Destination address.
+ * @src:	Source address.
+ * @num_bytes:	Number of bytes to copy.
+ *
+ * Return: Destination address.
+ */
+static void *memcpy32_fromio(void *dest, const void *src, size_t num_bytes)
+{
+	uint32_t *dest_local = (uint32_t *)dest;
+	uint32_t *src_local = (uint32_t *)src;
+
+	BUG_ON(num_bytes & RPM_FIFO_ADDR_ALIGN_BYTES);
+	BUG_ON(!dest_local ||
+			((uintptr_t)dest_local & RPM_FIFO_ADDR_ALIGN_BYTES));
+	BUG_ON(!src_local ||
+			((uintptr_t)src_local & RPM_FIFO_ADDR_ALIGN_BYTES));
+	num_bytes /= sizeof(uint32_t);
+
+	while (num_bytes--)
+		*dest_local++ = __raw_readl_no_log(src_local++);
+
+	return dest;
+}
+
+/**
+ * fifo_read_avail() - how many bytes are available to be read from an edge
+ * @einfo:	The concerned edge to query.
+ *
+ * Return: The number of bytes available to be read from edge.
+ */
+static uint32_t fifo_read_avail(struct edge_info *einfo)
+{
+	uint32_t read_index = einfo->rx_ch_desc->read_index;
+	uint32_t write_index = einfo->rx_ch_desc->write_index;
+	uint32_t fifo_size = einfo->rx_fifo_size;
+	uint32_t bytes_avail;
+
+	bytes_avail = write_index - read_index;
+	if (write_index < read_index)
+		/*
+		 * Case:  W < R - Write has wrapped
+		 * --------------------------------
+		 * In this case, the write operation has wrapped past the end
+		 * of the FIFO which means that now calculating the amount of
+		 * data in the FIFO results in a negative number.  This can be
+		 * easily fixed by adding the fifo_size to the value.  Even
+		 * though the values are unsigned, subtraction is always done
+		 * using 2's complement which means that the result will still
+		 * be correct once the FIFO size has been added to the negative
+		 * result.
+		 *
+		 * Example:
+		 *     '-' = data in fifo
+		 *     '.' = empty
+		 *
+		 *      0         1
+		 *      0123456789012345
+		 *     |-----w.....r----|
+		 *      0               N
+		 *
+		 *     write = 5 = 101b
+		 *     read = 11 = 1011b
+		 *     Data in FIFO
+		 *       (write - read) + fifo_size = (101b - 1011b) + 10000b
+		 *                          = 11111010b + 10000b = 1010b = 10
+		 */
+		bytes_avail += fifo_size;
+
+	return bytes_avail;
+}
+
+/**
+ * fifo_write_avail() - how many bytes can be written to the edge
+ * @einfo:	The concerned edge to query.
+ *
+ * Calculates the number of bytes that can be transmitted at this time.
+ * Automatically reserves some space to maintain alignment when the fifo is
+ * completely full, and reserves space so that the flush command can always be
+ * transmitted when needed.
+ *
+ * Return: The number of bytes available to be read from edge.
+ */
+static uint32_t fifo_write_avail(struct edge_info *einfo)
+{
+	uint32_t read_index = einfo->tx_ch_desc->read_index;
+	uint32_t write_index = einfo->tx_ch_desc->write_index;
+	uint32_t fifo_size = einfo->tx_fifo_size;
+	uint32_t bytes_avail = read_index - write_index;
+
+	if (read_index <= write_index)
+		bytes_avail += fifo_size;
+	if (bytes_avail < FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE)
+		bytes_avail = 0;
+	else
+		bytes_avail -= FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE;
+
+	return bytes_avail;
+}
+
+/**
+ * fifo_read() - read data from an edge
+ * @einfo:	The concerned edge to read from.
+ * @_data:	Buffer to copy the read data into.
+ * @len:	The ammount of data to read in bytes.
+ *
+ * Return: The number of bytes read.
+ */
+static int fifo_read(struct edge_info *einfo, void *_data, int len)
+{
+	void *ptr;
+	void *data = _data;
+	int orig_len = len;
+	uint32_t read_index = einfo->rx_ch_desc->read_index;
+	uint32_t write_index = einfo->rx_ch_desc->write_index;
+	uint32_t fifo_size = einfo->rx_fifo_size;
+	uint32_t n;
+
+	if (read_index >= fifo_size || write_index >= fifo_size)
+		return 0;
+
+	while (len) {
+		ptr = einfo->rx_fifo + read_index;
+		if (read_index <= write_index)
+			n = write_index - read_index;
+		else
+			n = fifo_size - read_index;
+
+		if (n == 0)
+			break;
+		if (n > len)
+			n = len;
+
+		einfo->read_from_fifo(data, ptr, n);
+
+		data += n;
+		len -= n;
+		read_index += n;
+		if (read_index >= fifo_size)
+			read_index -= fifo_size;
+	}
+	einfo->rx_ch_desc->read_index = read_index;
+
+	return orig_len - len;
+}
+
+/**
+ * fifo_write_body() - Copy transmit data into an edge
+ * @einfo:		The concerned edge to copy into.
+ * @_data:		Buffer of data to copy from.
+ * @len:		Size of data to copy in bytes.
+ * @write_index:	Index into the channel where the data should be copied.
+ *
+ * Return: Number of bytes remaining to be copied into the edge.
+ */
+static uint32_t fifo_write_body(struct edge_info *einfo, const void *_data,
+				int len, uint32_t *write_index)
+{
+	void *ptr;
+	const void *data = _data;
+	uint32_t read_index = einfo->tx_ch_desc->read_index;
+	uint32_t fifo_size = einfo->tx_fifo_size;
+	uint32_t n;
+
+	if (read_index >= fifo_size || *write_index >= fifo_size)
+		return 0;
+
+	while (len) {
+		ptr = einfo->tx_fifo + *write_index;
+		if (*write_index < read_index) {
+			n = read_index - *write_index - FIFO_FULL_RESERVE;
+		} else {
+			if (read_index < FIFO_FULL_RESERVE)
+				n = fifo_size + read_index - *write_index -
+							FIFO_FULL_RESERVE;
+			else
+				n = fifo_size - *write_index;
+		}
+
+		if (n == 0)
+			break;
+		if (n > len)
+			n = len;
+
+		einfo->write_to_fifo(ptr, data, n);
+
+		data += n;
+		len -= n;
+		*write_index += n;
+		if (*write_index >= fifo_size)
+			*write_index -= fifo_size;
+	}
+	return len;
+}
+
+/**
+ * fifo_write() - Write data into an edge
+ * @einfo:	The concerned edge to write to.
+ * @data:	Buffer of data to write.
+ * @len:	Length of data to write, in bytes.
+ *
+ * Wrapper around fifo_write_body() to manage additional details that are
+ * necessary for a complete write event.  Does not manage concurrency.  Clients
+ * should use fifo_write_avail() to check if there is sufficent space before
+ * calling fifo_write().
+ *
+ * Return: Number of bytes written to the edge.
+ */
+static int fifo_write(struct edge_info *einfo, const void *data, int len)
+{
+	int orig_len = len;
+	uint32_t write_index = einfo->tx_ch_desc->write_index;
+
+	len = fifo_write_body(einfo, data, len, &write_index);
+	einfo->tx_ch_desc->write_index = write_index;
+	send_irq(einfo);
+
+	return orig_len - len;
+}
+
+/**
+ * fifo_write_complex() - writes a transaction of multiple buffers to an edge
+ * @einfo:	The concerned edge to write to.
+ * @data1:	The first buffer of data to write.
+ * @len1:	The length of the first buffer in bytes.
+ * @data2:	The second buffer of data to write.
+ * @len2:	The length of the second buffer in bytes.
+ * @data3:	The thirs buffer of data to write.
+ * @len3:	The length of the third buffer in bytes.
+ *
+ * A variant of fifo_write() which optimizes the usecase found in tx().  The
+ * remote side expects all or none of the transmitted data to be available.
+ * This prevents the tx() usecase from calling fifo_write() multiple times.  The
+ * alternative would be an allocation and additional memcpy to create a buffer
+ * to copy all the data segments into one location before calling fifo_write().
+ *
+ * Return: Number of bytes written to the edge.
+ */
+static int fifo_write_complex(struct edge_info *einfo,
+			      const void *data1, int len1,
+			      const void *data2, int len2,
+			      const void *data3, int len3)
+{
+	int orig_len = len1 + len2 + len3;
+	uint32_t write_index = einfo->tx_ch_desc->write_index;
+
+	len1 = fifo_write_body(einfo, data1, len1, &write_index);
+	len2 = fifo_write_body(einfo, data2, len2, &write_index);
+	len3 = fifo_write_body(einfo, data3, len3, &write_index);
+	einfo->tx_ch_desc->write_index = write_index;
+	send_irq(einfo);
+
+	return orig_len - len1 - len2 - len3;
+}
+
+/**
+ * send_tx_blocked_signal() - send the flush command as we are blocked from tx
+ * @einfo:	The concerned edge which is blocked.
+ *
+ * Used to send a signal to the remote side that we have no more space to
+ * transmit data and therefore need the remote side to signal us when they have
+ * cleared some space by reading some data.  This function relies upon the
+ * assumption that fifo_write_avail() will reserve some space so that the flush
+ * signal command can always be put into the transmit fifo, even when "everyone"
+ * else thinks that the transmit fifo is truely full.  This function assumes
+ * that it is called with the write_lock already locked.
+ */
+static void send_tx_blocked_signal(struct edge_info *einfo)
+{
+	struct read_notif_request {
+		uint16_t cmd;
+		uint16_t reserved;
+		uint32_t reserved2;
+	};
+	struct read_notif_request read_notif_req;
+
+	read_notif_req.cmd = READ_NOTIF_CMD;
+	read_notif_req.reserved = 0;
+	read_notif_req.reserved2 = 0;
+
+	if (!einfo->tx_blocked_signal_sent) {
+		einfo->tx_blocked_signal_sent = true;
+		fifo_write(einfo, &read_notif_req, sizeof(read_notif_req));
+	}
+}
+
+/**
+ * fifo_tx() - transmit data on an edge
+ * @einfo:	The concerned edge to transmit on.
+ * @data:	Buffer of data to transmit.
+ * @len:	Length of data to transmit in bytes.
+ *
+ * This helper function is the preferred interface to fifo_write() and should
+ * be used in the normal case for transmitting entities.  fifo_tx() will block
+ * until there is sufficent room to transmit the requested ammount of data.
+ * fifo_tx() will manage any concurrency between multiple transmitters on a
+ * channel.
+ *
+ * Return: Number of bytes transmitted.
+ */
+static int fifo_tx(struct edge_info *einfo, const void *data, int len)
+{
+	unsigned long flags;
+	int ret;
+
+	DEFINE_WAIT(wait);
+
+	spin_lock_irqsave(&einfo->write_lock, flags);
+	while (fifo_write_avail(einfo) < len) {
+		send_tx_blocked_signal(einfo);
+		spin_unlock_irqrestore(&einfo->write_lock, flags);
+		prepare_to_wait(&einfo->tx_blocked_queue, &wait,
+							TASK_UNINTERRUPTIBLE);
+		if (fifo_write_avail(einfo) < len && !einfo->in_ssr)
+			schedule();
+		finish_wait(&einfo->tx_blocked_queue, &wait);
+		spin_lock_irqsave(&einfo->write_lock, flags);
+		if (einfo->in_ssr) {
+			spin_unlock_irqrestore(&einfo->write_lock, flags);
+			return -EFAULT;
+		}
+	}
+	ret = fifo_write(einfo, data, len);
+	spin_unlock_irqrestore(&einfo->write_lock, flags);
+
+	return ret;
+}
+
+/**
+ * process_rx_data() - process received data from an edge
+ * @einfo:	The edge the data was received on.
+ * @cmd_id:	ID to specify the type of data.
+ * @rcid:	The remote channel id associated with the data.
+ * @intend_id:	The intent the data should be put in.
+ */
+static void process_rx_data(struct edge_info *einfo, uint16_t cmd_id,
+			    uint32_t rcid, uint32_t intent_id)
+{
+	struct command {
+		uint32_t frag_size;
+		uint32_t size_remaining;
+	};
+	struct command cmd;
+	struct glink_core_rx_intent *intent;
+	char trash[FIFO_ALIGNMENT];
+	int alignment;
+	bool err = false;
+
+	fifo_read(einfo, &cmd, sizeof(cmd));
+
+	intent = einfo->xprt_if.glink_core_if_ptr->rx_get_pkt_ctx(
+					&einfo->xprt_if, rcid, intent_id);
+	if (intent == NULL) {
+		GLINK_ERR("%s: no intent for ch %d liid %d\n", __func__, rcid,
+								intent_id);
+		err = true;
+	} else if (intent->data == NULL) {
+		if (einfo->intentless) {
+			intent->data = kmalloc(cmd.frag_size,
+						__GFP_ATOMIC | __GFP_HIGH);
+			if (!intent->data) {
+				err = true;
+				GLINK_ERR(
+				"%s: atomic alloc fail ch %d liid %d size %d\n",
+						__func__, rcid, intent_id,
+						cmd.frag_size);
+			} else {
+				intent->intent_size = cmd.frag_size;
+			}
+		} else {
+			GLINK_ERR(
+				"%s: intent for ch %d liid %d has no data buff\n",
+						__func__, rcid, intent_id);
+			err = true;
+		}
+	}
+
+	if (!err &&
+	    (intent->intent_size - intent->write_offset < cmd.frag_size ||
+	    intent->write_offset + cmd.size_remaining > intent->intent_size)) {
+		GLINK_ERR("%s: rx data size:%d and remaining:%d %s %d %s:%d\n",
+							__func__,
+							cmd.frag_size,
+							cmd.size_remaining,
+							"will overflow ch",
+							rcid,
+							"intent",
+							intent_id);
+		err = true;
+	}
+
+	if (err) {
+		alignment = ALIGN(cmd.frag_size, FIFO_ALIGNMENT);
+		alignment -= cmd.frag_size;
+		while (cmd.frag_size) {
+			if (cmd.frag_size > FIFO_ALIGNMENT) {
+				fifo_read(einfo, trash, FIFO_ALIGNMENT);
+				cmd.frag_size -= FIFO_ALIGNMENT;
+			} else {
+				fifo_read(einfo, trash, cmd.frag_size);
+				cmd.frag_size = 0;
+			}
+		}
+		if (alignment)
+			fifo_read(einfo, trash, alignment);
+		return;
+	}
+	fifo_read(einfo, intent->data + intent->write_offset, cmd.frag_size);
+	intent->write_offset += cmd.frag_size;
+	intent->pkt_size += cmd.frag_size;
+
+	alignment = ALIGN(cmd.frag_size, FIFO_ALIGNMENT);
+	alignment -= cmd.frag_size;
+	if (alignment)
+		fifo_read(einfo, trash, alignment);
+
+	if (unlikely((cmd_id == TRACER_PKT_CMD ||
+		      cmd_id == TRACER_PKT_CONT_CMD) && !cmd.size_remaining)) {
+		tracer_pkt_log_event(intent->data, GLINK_XPRT_RX);
+		intent->tracer_pkt = true;
+	}
+
+	einfo->xprt_if.glink_core_if_ptr->rx_put_pkt_ctx(&einfo->xprt_if,
+							rcid,
+							intent,
+							cmd.size_remaining ?
+								false : true);
+}
+
+/**
+ * queue_cmd() - queue a deferred command for later processing
+ * @einfo:	Edge to queue commands on.
+ * @cmd:	Command to queue.
+ * @data:	Command specific data to queue with the command.
+ *
+ * Return: True if queuing was successful, false otherwise.
+ */
+static bool queue_cmd(struct edge_info *einfo, void *cmd, void *data)
+{
+	struct command {
+		uint16_t id;
+		uint16_t param1;
+		uint32_t param2;
+	};
+	struct command *_cmd = cmd;
+	struct deferred_cmd *d_cmd;
+
+	d_cmd = kmalloc(sizeof(*d_cmd), GFP_ATOMIC);
+	if (!d_cmd) {
+		GLINK_ERR("%s: Discarding cmd %d\n", __func__, _cmd->id);
+		return false;
+	}
+	d_cmd->id = _cmd->id;
+	d_cmd->param1 = _cmd->param1;
+	d_cmd->param2 = _cmd->param2;
+	d_cmd->data = data;
+	list_add_tail(&d_cmd->list_node, &einfo->deferred_cmds);
+	einfo->deferred_cmds_cnt++;
+	queue_kthread_work(&einfo->kworker, &einfo->kwork);
+	return true;
+}
+
+/**
+ * get_rx_fifo() - Find the rx fifo for an edge
+ * @einfo:	Edge to find the fifo for.
+ *
+ * Return: True if fifo was found, false otherwise.
+ */
+static bool get_rx_fifo(struct edge_info *einfo)
+{
+	if (einfo->mailbox) {
+		einfo->rx_fifo = &einfo->mailbox->fifo[einfo->mailbox->tx_size];
+		einfo->rx_fifo_size = einfo->mailbox->rx_size;
+	} else {
+		einfo->rx_fifo = smem_get_entry(SMEM_GLINK_NATIVE_XPRT_FIFO_1,
+							&einfo->rx_fifo_size,
+							einfo->remote_proc_id,
+							SMEM_ITEM_CACHED_FLAG);
+		if (!einfo->rx_fifo)
+			einfo->rx_fifo = smem_get_entry(
+						SMEM_GLINK_NATIVE_XPRT_FIFO_1,
+							&einfo->rx_fifo_size,
+							einfo->remote_proc_id,
+							0);
+		if (!einfo->rx_fifo)
+			return false;
+	}
+
+	return true;
+}
+
+/**
+ * tx_wakeup_worker() - worker function to wakeup tx blocked thread
+ * @work:	kwork associated with the edge to process commands on.
+ */
+static void tx_wakeup_worker(struct edge_info *einfo)
+{
+	bool trigger_wakeup = false;
+	unsigned long flags;
+
+	if (einfo->in_ssr)
+		return;
+	if (einfo->tx_resume_needed && fifo_write_avail(einfo)) {
+		einfo->tx_resume_needed = false;
+		einfo->xprt_if.glink_core_if_ptr->tx_resume(
+						&einfo->xprt_if);
+	}
+	spin_lock_irqsave(&einfo->write_lock, flags);
+	if (waitqueue_active(&einfo->tx_blocked_queue)) { /* tx waiting ?*/
+		einfo->tx_blocked_signal_sent = false;
+		trigger_wakeup = true;
+	}
+	spin_unlock_irqrestore(&einfo->write_lock, flags);
+	if (trigger_wakeup)
+		wake_up_all(&einfo->tx_blocked_queue);
+}
+
+/**
+ * __rx_worker() - process received commands on a specific edge
+ * @einfo:	Edge to process commands on.
+ * @atomic_ctx:	Indicates if the caller is in atomic context and requires any
+ *		non-atomic operations to be deferred.
+ */
+static void __rx_worker(struct edge_info *einfo, bool atomic_ctx)
+{
+	struct command {
+		uint16_t id;
+		uint16_t param1;
+		uint32_t param2;
+	};
+	struct intent_desc {
+		uint32_t size;
+		uint32_t id;
+	};
+	struct command cmd;
+	struct intent_desc intent;
+	struct intent_desc *intents;
+	int i;
+	bool granted;
+	unsigned long flags;
+	int rcu_id;
+	uint16_t rcid;
+	uint32_t name_len;
+	uint32_t len;
+	char *name;
+	char trash[FIFO_ALIGNMENT];
+	struct deferred_cmd *d_cmd;
+	void *cmd_data;
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+
+	if ((atomic_ctx) && ((einfo->tx_resume_needed) ||
+		(waitqueue_active(&einfo->tx_blocked_queue)))) /* tx waiting ?*/
+		tx_wakeup_worker(einfo);
+
+	/*
+	 * Access to the fifo needs to be synchronized, however only the calls
+	 * into the core from process_rx_data() are compatible with an atomic
+	 * processing context.  For everything else, we need to do all the fifo
+	 * processing, then unlock the lock for the call into the core.  Data
+	 * in the fifo is allowed to be processed immediately instead of being
+	 * ordered with the commands because the channel open process prevents
+	 * intents from being queued (which prevents data from being sent) until
+	 * all the channel open commands are processed by the core, thus
+	 * eliminating a race.
+	 */
+	spin_lock_irqsave(&einfo->rx_lock, flags);
+	while (fifo_read_avail(einfo) ||
+			(!atomic_ctx && !list_empty(&einfo->deferred_cmds))) {
+		if (einfo->in_ssr)
+			break;
+
+		if (atomic_ctx && !einfo->intentless &&
+		    einfo->deferred_cmds_cnt >= DEFERRED_CMDS_THRESHOLD)
+			break;
+
+		if (!atomic_ctx && !list_empty(&einfo->deferred_cmds)) {
+			d_cmd = list_first_entry(&einfo->deferred_cmds,
+						struct deferred_cmd, list_node);
+			list_del(&d_cmd->list_node);
+			einfo->deferred_cmds_cnt--;
+			cmd.id = d_cmd->id;
+			cmd.param1 = d_cmd->param1;
+			cmd.param2 = d_cmd->param2;
+			cmd_data = d_cmd->data;
+			kfree(d_cmd);
+		} else {
+			fifo_read(einfo, &cmd, sizeof(cmd));
+			cmd_data = NULL;
+		}
+
+		switch (cmd.id) {
+		case VERSION_CMD:
+			if (atomic_ctx) {
+				queue_cmd(einfo, &cmd, NULL);
+				break;
+			}
+			spin_unlock_irqrestore(&einfo->rx_lock, flags);
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_version(
+								&einfo->xprt_if,
+								cmd.param1,
+								cmd.param2);
+			spin_lock_irqsave(&einfo->rx_lock, flags);
+			break;
+		case VERSION_ACK_CMD:
+			if (atomic_ctx) {
+				queue_cmd(einfo, &cmd, NULL);
+				break;
+			}
+			spin_unlock_irqrestore(&einfo->rx_lock, flags);
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_version_ack(
+								&einfo->xprt_if,
+								cmd.param1,
+								cmd.param2);
+			spin_lock_irqsave(&einfo->rx_lock, flags);
+			break;
+		case OPEN_CMD:
+			rcid = cmd.param1;
+			name_len = cmd.param2;
+
+			if (cmd_data) {
+				name = cmd_data;
+			} else {
+				len = ALIGN(name_len, FIFO_ALIGNMENT);
+				name = kmalloc(len, GFP_ATOMIC);
+				if (!name) {
+					pr_err("No memory available to rx ch open cmd name.  Discarding cmd.\n");
+					while (len) {
+						fifo_read(einfo, trash,
+								FIFO_ALIGNMENT);
+						len -= FIFO_ALIGNMENT;
+					}
+					break;
+				}
+				fifo_read(einfo, name, len);
+			}
+			if (atomic_ctx) {
+				if (!queue_cmd(einfo, &cmd, name))
+					kfree(name);
+				break;
+			}
+
+			spin_unlock_irqrestore(&einfo->rx_lock, flags);
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_remote_open(
+								&einfo->xprt_if,
+								rcid,
+								name,
+								SMEM_XPRT_ID);
+			kfree(name);
+			spin_lock_irqsave(&einfo->rx_lock, flags);
+			break;
+		case CLOSE_CMD:
+			if (atomic_ctx) {
+				queue_cmd(einfo, &cmd, NULL);
+				break;
+			}
+			spin_unlock_irqrestore(&einfo->rx_lock, flags);
+			einfo->xprt_if.glink_core_if_ptr->
+							rx_cmd_ch_remote_close(
+								&einfo->xprt_if,
+								cmd.param1);
+			spin_lock_irqsave(&einfo->rx_lock, flags);
+			break;
+		case OPEN_ACK_CMD:
+			if (atomic_ctx) {
+				queue_cmd(einfo, &cmd, NULL);
+				break;
+			}
+			spin_unlock_irqrestore(&einfo->rx_lock, flags);
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_open_ack(
+								&einfo->xprt_if,
+								cmd.param1,
+								SMEM_XPRT_ID);
+			spin_lock_irqsave(&einfo->rx_lock, flags);
+			break;
+		case RX_INTENT_CMD:
+			/*
+			 * One intent listed with this command.  This is the
+			 * expected case and can be optimized over the general
+			 * case of an array of intents.
+			 */
+			if (cmd.param2 == 1) {
+				if (cmd_data) {
+					intent.id = ((struct intent_desc *)
+								cmd_data)->id;
+					intent.size = ((struct intent_desc *)
+								cmd_data)->size;
+					kfree(cmd_data);
+				} else {
+					fifo_read(einfo, &intent,
+								sizeof(intent));
+				}
+				if (atomic_ctx) {
+					cmd_data = kmalloc(sizeof(intent),
+								GFP_ATOMIC);
+					if (!cmd_data) {
+						pr_err("%s: dropping cmd %d\n",
+								__func__,
+								cmd.id);
+						break;
+					}
+					((struct intent_desc *)cmd_data)->id =
+								intent.id;
+					((struct intent_desc *)cmd_data)->size =
+								intent.size;
+					if (!queue_cmd(einfo, &cmd, cmd_data))
+						kfree(cmd_data);
+					break;
+				}
+				spin_unlock_irqrestore(&einfo->rx_lock, flags);
+				einfo->xprt_if.glink_core_if_ptr->
+						rx_cmd_remote_rx_intent_put(
+								&einfo->xprt_if,
+								cmd.param1,
+								intent.id,
+								intent.size);
+				spin_lock_irqsave(&einfo->rx_lock, flags);
+				break;
+			}
+
+			/* Array of intents to process */
+			if (cmd_data) {
+				intents = cmd_data;
+			} else {
+				intents = kmalloc(sizeof(*intents) * cmd.param2,
+								GFP_ATOMIC);
+				if (!intents) {
+					for (i = 0; i < cmd.param2; ++i)
+						fifo_read(einfo, &intent,
+								sizeof(intent));
+					break;
+				}
+				fifo_read(einfo, intents,
+					sizeof(*intents) * cmd.param2);
+			}
+			if (atomic_ctx) {
+				if (!queue_cmd(einfo, &cmd, intents))
+					kfree(intents);
+				break;
+			}
+			spin_unlock_irqrestore(&einfo->rx_lock, flags);
+			for (i = 0; i < cmd.param2; ++i) {
+				einfo->xprt_if.glink_core_if_ptr->
+					rx_cmd_remote_rx_intent_put(
+							&einfo->xprt_if,
+							cmd.param1,
+							intents[i].id,
+							intents[i].size);
+			}
+			kfree(intents);
+			spin_lock_irqsave(&einfo->rx_lock, flags);
+			break;
+		case RX_DONE_CMD:
+			if (atomic_ctx) {
+				queue_cmd(einfo, &cmd, NULL);
+				break;
+			}
+			spin_unlock_irqrestore(&einfo->rx_lock, flags);
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_tx_done(
+								&einfo->xprt_if,
+								cmd.param1,
+								cmd.param2,
+								false);
+			spin_lock_irqsave(&einfo->rx_lock, flags);
+			break;
+		case RX_INTENT_REQ_CMD:
+			if (atomic_ctx) {
+				queue_cmd(einfo, &cmd, NULL);
+				break;
+			}
+			spin_unlock_irqrestore(&einfo->rx_lock, flags);
+			einfo->xprt_if.glink_core_if_ptr->
+						rx_cmd_remote_rx_intent_req(
+								&einfo->xprt_if,
+								cmd.param1,
+								cmd.param2);
+			spin_lock_irqsave(&einfo->rx_lock, flags);
+			break;
+		case RX_INTENT_REQ_ACK_CMD:
+			if (atomic_ctx) {
+				queue_cmd(einfo, &cmd, NULL);
+				break;
+			}
+			spin_unlock_irqrestore(&einfo->rx_lock, flags);
+			granted = false;
+			if (cmd.param2 == 1)
+				granted = true;
+			einfo->xprt_if.glink_core_if_ptr->
+						rx_cmd_rx_intent_req_ack(
+								&einfo->xprt_if,
+								cmd.param1,
+								granted);
+			spin_lock_irqsave(&einfo->rx_lock, flags);
+			break;
+		case TX_DATA_CMD:
+		case TX_DATA_CONT_CMD:
+		case TRACER_PKT_CMD:
+		case TRACER_PKT_CONT_CMD:
+			process_rx_data(einfo, cmd.id, cmd.param1, cmd.param2);
+			break;
+		case CLOSE_ACK_CMD:
+			if (atomic_ctx) {
+				queue_cmd(einfo, &cmd, NULL);
+				break;
+			}
+			spin_unlock_irqrestore(&einfo->rx_lock, flags);
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_close_ack(
+								&einfo->xprt_if,
+								cmd.param1);
+			spin_lock_irqsave(&einfo->rx_lock, flags);
+			break;
+		case READ_NOTIF_CMD:
+			send_irq(einfo);
+			break;
+		case SIGNALS_CMD:
+			if (atomic_ctx) {
+				queue_cmd(einfo, &cmd, NULL);
+				break;
+			}
+			spin_unlock_irqrestore(&einfo->rx_lock, flags);
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_remote_sigs(
+								&einfo->xprt_if,
+								cmd.param1,
+								cmd.param2);
+			spin_lock_irqsave(&einfo->rx_lock, flags);
+			break;
+		case RX_DONE_W_REUSE_CMD:
+			if (atomic_ctx) {
+				queue_cmd(einfo, &cmd, NULL);
+				break;
+			}
+			spin_unlock_irqrestore(&einfo->rx_lock, flags);
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_tx_done(
+								&einfo->xprt_if,
+								cmd.param1,
+								cmd.param2,
+								true);
+			spin_lock_irqsave(&einfo->rx_lock, flags);
+			break;
+		default:
+			pr_err("Unrecognized command: %d\n", cmd.id);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&einfo->rx_lock, flags);
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * rx_worker() - worker function to process received commands
+ * @work:	kwork associated with the edge to process commands on.
+ */
+static void rx_worker(struct kthread_work *work)
+{
+	struct edge_info *einfo;
+
+	einfo = container_of(work, struct edge_info, kwork);
+	__rx_worker(einfo, false);
+}
+
+irqreturn_t irq_handler(int irq, void *priv)
+{
+	struct edge_info *einfo = (struct edge_info *)priv;
+
+	if (einfo->rx_reset_reg)
+		writel_relaxed(einfo->out_irq_mask, einfo->rx_reset_reg);
+
+	__rx_worker(einfo, true);
+	einfo->rx_irq_count++;
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * tx_cmd_version() - convert a version cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @version:	The version number to encode.
+ * @features:	The features information to encode.
+ */
+static void tx_cmd_version(struct glink_transport_if *if_ptr, uint32_t version,
+			   uint32_t features)
+{
+	struct command {
+		uint16_t id;
+		uint16_t version;
+		uint32_t features;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+
+	cmd.id = VERSION_CMD;
+	cmd.version = version;
+	cmd.features = features;
+
+	fifo_tx(einfo, &cmd, sizeof(cmd));
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * tx_cmd_version_ack() - convert a version ack cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @version:	The version number to encode.
+ * @features:	The features information to encode.
+ */
+static void tx_cmd_version_ack(struct glink_transport_if *if_ptr,
+			       uint32_t version,
+			       uint32_t features)
+{
+	struct command {
+		uint16_t id;
+		uint16_t version;
+		uint32_t features;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+
+	cmd.id = VERSION_ACK_CMD;
+	cmd.version = version;
+	cmd.features = features;
+
+	fifo_tx(einfo, &cmd, sizeof(cmd));
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * set_version() - activate a negotiated version and feature set
+ * @if_ptr:	The transport to configure.
+ * @version:	The version to use.
+ * @features:	The features to use.
+ *
+ * Return: The supported capabilities of the transport.
+ */
+static uint32_t set_version(struct glink_transport_if *if_ptr, uint32_t version,
+			uint32_t features)
+{
+	struct edge_info *einfo;
+	uint32_t ret;
+	int rcu_id;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return 0;
+	}
+
+	ret = einfo->intentless ?
+				GCAP_INTENTLESS | GCAP_SIGNALS : GCAP_SIGNALS;
+
+	if (features & TRACER_PKT_FEATURE)
+		ret |= GCAP_TRACER_PKT;
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return ret;
+}
+
+/**
+ * tx_cmd_ch_open() - convert a channel open cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @name:	The channel name to encode.
+ * @req_xprt:	The transport the core would like to migrate this channel to.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_ch_open(struct glink_transport_if *if_ptr, uint32_t lcid,
+			  const char *name, uint16_t req_xprt)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t length;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	uint32_t buf_size;
+	void *buf;
+	int rcu_id;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	cmd.id = OPEN_CMD;
+	cmd.lcid = lcid;
+	cmd.length = strlen(name) + 1;
+
+	buf_size = ALIGN(sizeof(cmd) + cmd.length, FIFO_ALIGNMENT);
+
+	buf = kzalloc(buf_size, GFP_KERNEL);
+	if (!buf) {
+		pr_err("%s: malloc fail for %d size buf\n", __func__, buf_size);
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -ENOMEM;
+	}
+
+	memcpy(buf, &cmd, sizeof(cmd));
+	memcpy(buf + sizeof(cmd), name, cmd.length);
+
+	fifo_tx(einfo, buf, buf_size);
+
+	kfree(buf);
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return 0;
+}
+
+/**
+ * tx_cmd_ch_close() - convert a channel close cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_ch_close(struct glink_transport_if *if_ptr, uint32_t lcid)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t reserved;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	cmd.id = CLOSE_CMD;
+	cmd.lcid = lcid;
+	cmd.reserved = 0;
+
+	fifo_tx(einfo, &cmd, sizeof(cmd));
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return 0;
+}
+
+/**
+ * tx_cmd_ch_remote_open_ack() - convert a channel open ack cmd to wire format
+ *				 and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @rcid:	The remote channel id to encode.
+ * @xprt_resp:	The response to a transport migration request.
+ */
+static void tx_cmd_ch_remote_open_ack(struct glink_transport_if *if_ptr,
+				     uint32_t rcid, uint16_t xprt_resp)
+{
+	struct command {
+		uint16_t id;
+		uint16_t rcid;
+		uint32_t reserved;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+
+	cmd.id = OPEN_ACK_CMD;
+	cmd.rcid = rcid;
+	cmd.reserved = 0;
+
+	fifo_tx(einfo, &cmd, sizeof(cmd));
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * tx_cmd_ch_remote_close_ack() - convert a channel close ack cmd to wire format
+ *				  and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @rcid:	The remote channel id to encode.
+ */
+static void tx_cmd_ch_remote_close_ack(struct glink_transport_if *if_ptr,
+				       uint32_t rcid)
+{
+	struct command {
+		uint16_t id;
+		uint16_t rcid;
+		uint32_t reserved;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+
+	cmd.id = CLOSE_ACK_CMD;
+	cmd.rcid = rcid;
+	cmd.reserved = 0;
+
+	fifo_tx(einfo, &cmd, sizeof(cmd));
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * subsys_up() - process a subsystem up notification
+ * @if_ptr:	The transport which is up
+ *
+ */
+static void subsys_up(struct glink_transport_if *if_ptr)
+{
+	struct edge_info *einfo;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+	if (!einfo->rx_fifo) {
+		if (!get_rx_fifo(einfo))
+			return;
+		einfo->in_ssr = false;
+		einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
+	}
+}
+
+/**
+ * ssr() - process a subsystem restart notification of a transport
+ * @if_ptr:	The transport to restart
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int ssr(struct glink_transport_if *if_ptr)
+{
+	struct edge_info *einfo;
+	struct deferred_cmd *cmd;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	BUG_ON(einfo->remote_proc_id == SMEM_RPM);
+
+	einfo->in_ssr = true;
+	wake_up_all(&einfo->tx_blocked_queue);
+
+	synchronize_srcu(&einfo->use_ref);
+
+	while (!list_empty(&einfo->deferred_cmds)) {
+		cmd = list_first_entry(&einfo->deferred_cmds,
+						struct deferred_cmd, list_node);
+		list_del(&cmd->list_node);
+		kfree(cmd->data);
+		kfree(cmd);
+	}
+
+	einfo->tx_resume_needed = false;
+	einfo->tx_blocked_signal_sent = false;
+	einfo->rx_fifo = NULL;
+	einfo->rx_fifo_size = 0;
+	einfo->tx_ch_desc->write_index = 0;
+	einfo->rx_ch_desc->read_index = 0;
+	einfo->xprt_if.glink_core_if_ptr->link_down(&einfo->xprt_if);
+
+	return 0;
+}
+
+/**
+ * int wait_link_down() - Check status of read/write indices
+ * @if_ptr:	The transport to check
+ *
+ * Return: 1 if indices are all zero, 0 otherwise
+ */
+int wait_link_down(struct glink_transport_if *if_ptr)
+{
+	struct edge_info *einfo;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	if (einfo->tx_ch_desc->write_index == 0 &&
+		einfo->tx_ch_desc->read_index == 0 &&
+		einfo->rx_ch_desc->write_index == 0 &&
+		einfo->rx_ch_desc->read_index == 0)
+		return 1;
+	else
+		return 0;
+}
+
+/**
+ * allocate_rx_intent() - allocate/reserve space for RX Intent
+ * @if_ptr:	The transport the intent is associated with.
+ * @size:	size of intent.
+ * @intent:	Pointer to the intent structure.
+ *
+ * Assign "data" with the buffer created, since the transport creates
+ * a linear buffer and "iovec" with the "intent" itself, so that
+ * the data can be passed to a client that receives only vector buffer.
+ * Note that returning NULL for the pointer is valid (it means that space has
+ * been reserved, but the actual pointer will be provided later).
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int allocate_rx_intent(struct glink_transport_if *if_ptr, size_t size,
+			      struct glink_core_rx_intent *intent)
+{
+	void *t;
+
+	t = kmalloc(size, GFP_KERNEL);
+	if (!t)
+		return -ENOMEM;
+
+	intent->data = t;
+	intent->iovec = (void *)intent;
+	intent->vprovider = rx_linear_vbuf_provider;
+	intent->pprovider = NULL;
+	return 0;
+}
+
+/**
+ * deallocate_rx_intent() - Deallocate space created for RX Intent
+ * @if_ptr:	The transport the intent is associated with.
+ * @intent:	Pointer to the intent structure.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int deallocate_rx_intent(struct glink_transport_if *if_ptr,
+				struct glink_core_rx_intent *intent)
+{
+	if (!intent || !intent->data)
+		return -EINVAL;
+
+	kfree(intent->data);
+	intent->data = NULL;
+	intent->iovec = NULL;
+	intent->vprovider = NULL;
+	return 0;
+}
+
+/**
+ * tx_cmd_local_rx_intent() - convert an rx intent cmd to wire format and
+ *			      transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @size:	The intent size to encode.
+ * @liid:	The local intent id to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_local_rx_intent(struct glink_transport_if *if_ptr,
+				  uint32_t lcid, size_t size, uint32_t liid)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t count;
+		uint32_t size;
+		uint32_t liid;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	if (size > UINT_MAX) {
+		pr_err("%s: size %zu is too large to encode\n", __func__, size);
+		return -EMSGSIZE;
+	}
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	if (einfo->intentless)
+		return -EOPNOTSUPP;
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	cmd.id = RX_INTENT_CMD;
+	cmd.lcid = lcid;
+	cmd.count = 1;
+	cmd.size = size;
+	cmd.liid = liid;
+
+	fifo_tx(einfo, &cmd, sizeof(cmd));
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return 0;
+}
+
+/**
+ * tx_cmd_local_rx_done() - convert an rx done cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @liid:	The local intent id to encode.
+ * @reuse:	Reuse the consumed intent.
+ */
+static void tx_cmd_local_rx_done(struct glink_transport_if *if_ptr,
+				 uint32_t lcid, uint32_t liid, bool reuse)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t liid;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	if (einfo->intentless)
+		return;
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+
+	cmd.id = reuse ? RX_DONE_W_REUSE_CMD : RX_DONE_CMD;
+	cmd.lcid = lcid;
+	cmd.liid = liid;
+
+	fifo_tx(einfo, &cmd, sizeof(cmd));
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * tx_cmd_rx_intent_req() - convert an rx intent request cmd to wire format and
+ *			    transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @size:	The requested intent size to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_rx_intent_req(struct glink_transport_if *if_ptr,
+				uint32_t lcid, size_t size)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t size;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	if (size > UINT_MAX) {
+		pr_err("%s: size %zu is too large to encode\n", __func__, size);
+		return -EMSGSIZE;
+	}
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	if (einfo->intentless)
+		return -EOPNOTSUPP;
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	cmd.id = RX_INTENT_REQ_CMD,
+	cmd.lcid = lcid;
+	cmd.size = size;
+
+	fifo_tx(einfo, &cmd, sizeof(cmd));
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return 0;
+}
+
+/**
+ * tx_cmd_rx_intent_req_ack() - convert an rx intent request ack cmd to wire
+ *				format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @granted:	The request response to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_remote_rx_intent_req_ack(struct glink_transport_if *if_ptr,
+					   uint32_t lcid, bool granted)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t response;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	if (einfo->intentless)
+		return -EOPNOTSUPP;
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	cmd.id = RX_INTENT_REQ_ACK_CMD,
+	cmd.lcid = lcid;
+	if (granted)
+		cmd.response = 1;
+	else
+		cmd.response = 0;
+
+	fifo_tx(einfo, &cmd, sizeof(cmd));
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return 0;
+}
+
+/**
+ * tx_cmd_set_sigs() - convert a signals ack cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @sigs:	The signals to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_set_sigs(struct glink_transport_if *if_ptr, uint32_t lcid,
+			   uint32_t sigs)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t sigs;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	cmd.id = SIGNALS_CMD,
+	cmd.lcid = lcid;
+	cmd.sigs = sigs;
+
+	fifo_tx(einfo, &cmd, sizeof(cmd));
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return 0;
+}
+
+/**
+ * poll() - poll for data on a channel
+ * @if_ptr:	The transport the channel exists on.
+ * @lcid:	The local channel id.
+ *
+ * Return: 0 if no data available, 1 if data available.
+ */
+static int poll(struct glink_transport_if *if_ptr, uint32_t lcid)
+{
+	struct edge_info *einfo;
+	int rcu_id;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	if (fifo_read_avail(einfo)) {
+		__rx_worker(einfo, true);
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return 1;
+	}
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return 0;
+}
+
+/**
+ * mask_rx_irq() - mask the receive irq for a channel
+ * @if_ptr:	The transport the channel exists on.
+ * @lcid:	The local channel id for the channel.
+ * @mask:	True to mask the irq, false to unmask.
+ * @pstruct:	Platform defined structure for handling the masking.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int mask_rx_irq(struct glink_transport_if *if_ptr, uint32_t lcid,
+		       bool mask, void *pstruct)
+{
+	struct edge_info *einfo;
+	struct irq_chip *irq_chip;
+	struct irq_data *irq_data;
+	int rcu_id;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	irq_chip = irq_get_chip(einfo->irq_line);
+	if (!irq_chip) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -ENODEV;
+	}
+
+	irq_data = irq_get_irq_data(einfo->irq_line);
+	if (!irq_data) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -ENODEV;
+	}
+
+	if (mask) {
+		irq_chip->irq_mask(irq_data);
+		einfo->irq_disabled = true;
+		if (pstruct)
+			irq_set_affinity(einfo->irq_line, pstruct);
+	} else {
+		irq_chip->irq_unmask(irq_data);
+		einfo->irq_disabled = false;
+	}
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return 0;
+}
+
+/**
+ * tx_data() - convert a data/tracer_pkt to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @cmd_id:	The command ID to transmit.
+ * @lcid:	The local channel id to encode.
+ * @pctx:	The data to encode.
+ *
+ * Return: Number of bytes written or standard Linux error code.
+ */
+static int tx_data(struct glink_transport_if *if_ptr, uint16_t cmd_id,
+		   uint32_t lcid, struct glink_core_tx_pkt *pctx)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t riid;
+		uint32_t size;
+		uint32_t size_left;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	uint32_t size;
+	uint32_t zeros_size;
+	const void *data_start;
+	char zeros[FIFO_ALIGNMENT] = { 0 };
+	unsigned long flags;
+	size_t tx_size = 0;
+	int rcu_id;
+
+	if (pctx->size < pctx->size_remaining) {
+		GLINK_ERR("%s: size remaining exceeds size.  Resetting.\n",
+								__func__);
+		pctx->size_remaining = pctx->size;
+	}
+	if (!pctx->size_remaining)
+		return 0;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	if (einfo->intentless &&
+	    (pctx->size_remaining != pctx->size || cmd_id == TRACER_PKT_CMD)) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EINVAL;
+	}
+
+	if (cmd_id == TX_DATA_CMD) {
+		if (pctx->size_remaining == pctx->size)
+			cmd.id = TX_DATA_CMD;
+		else
+			cmd.id = TX_DATA_CONT_CMD;
+	} else {
+		if (pctx->size_remaining == pctx->size)
+			cmd.id = TRACER_PKT_CMD;
+		else
+			cmd.id = TRACER_PKT_CONT_CMD;
+	}
+	cmd.lcid = lcid;
+	cmd.riid = pctx->riid;
+	data_start = get_tx_vaddr(pctx, pctx->size - pctx->size_remaining,
+				  &tx_size);
+	if (!data_start) {
+		GLINK_ERR("%s: invalid data_start\n", __func__);
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&einfo->write_lock, flags);
+	size = fifo_write_avail(einfo);
+
+	/* Intentless clients expect a complete commit or instant failure */
+	if (einfo->intentless && size < sizeof(cmd) + pctx->size) {
+		spin_unlock_irqrestore(&einfo->write_lock, flags);
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -ENOSPC;
+	}
+
+	/* Need enough space to write the command and some data */
+	if (size <= sizeof(cmd)) {
+		einfo->tx_resume_needed = true;
+		send_tx_blocked_signal(einfo);
+		spin_unlock_irqrestore(&einfo->write_lock, flags);
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EAGAIN;
+	}
+	size -= sizeof(cmd);
+	if (size > tx_size)
+		size = tx_size;
+
+	cmd.size = size;
+	pctx->size_remaining -= size;
+	cmd.size_left = pctx->size_remaining;
+	zeros_size = ALIGN(size, FIFO_ALIGNMENT) - cmd.size;
+	if (cmd.id == TRACER_PKT_CMD)
+		tracer_pkt_log_event((void *)(pctx->data), GLINK_XPRT_TX);
+
+	fifo_write_complex(einfo, &cmd, sizeof(cmd), data_start, size, zeros,
+								zeros_size);
+	GLINK_DBG("%s %s: lcid[%u] riid[%u] cmd[%d], size[%d], size_left[%d]\n",
+		"<SMEM>", __func__, cmd.lcid, cmd.riid, cmd.id, cmd.size,
+		cmd.size_left);
+	spin_unlock_irqrestore(&einfo->write_lock, flags);
+
+	/* Fake tx_done for intentless since its not supported over the wire */
+	if (einfo->intentless) {
+		spin_lock_irqsave(&einfo->rx_lock, flags);
+		cmd.id = RX_DONE_CMD;
+		cmd.lcid = pctx->rcid;
+		queue_cmd(einfo, &cmd, NULL);
+		spin_unlock_irqrestore(&einfo->rx_lock, flags);
+	}
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return cmd.size;
+}
+
+/**
+ * tx() - convert a data transmit cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @pctx:	The data to encode.
+ *
+ * Return: Number of bytes written or standard Linux error code.
+ */
+static int tx(struct glink_transport_if *if_ptr, uint32_t lcid,
+	      struct glink_core_tx_pkt *pctx)
+{
+	return tx_data(if_ptr, TX_DATA_CMD, lcid, pctx);
+}
+
+/**
+ * tx_cmd_tracer_pkt() - convert a tracer packet cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @pctx:	The data to encode.
+ *
+ * Return: Number of bytes written or standard Linux error code.
+ */
+static int tx_cmd_tracer_pkt(struct glink_transport_if *if_ptr, uint32_t lcid,
+	      struct glink_core_tx_pkt *pctx)
+{
+	return tx_data(if_ptr, TRACER_PKT_CMD, lcid, pctx);
+}
+
+/**
+ * get_power_vote_ramp_time() - Get the ramp time required for the power
+ *				votes to be applied
+ * @if_ptr:	The transport interface on which power voting is requested.
+ * @state:	The power state for which ramp time is required.
+ *
+ * Return: The ramp time specific to the power state, standard error otherwise.
+ */
+static unsigned long get_power_vote_ramp_time(
+				struct glink_transport_if *if_ptr,
+				uint32_t state)
+{
+	struct edge_info *einfo;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	if (state >= einfo->num_pw_states || !(einfo->ramp_time_us))
+		return (unsigned long)ERR_PTR(-EINVAL);
+
+	return einfo->ramp_time_us[state];
+}
+
+/**
+ * power_vote() - Update the power votes to meet qos requirement
+ * @if_ptr:	The transport interface on which power voting is requested.
+ * @state:	The power state for which the voting should be done.
+ *
+ * Return: 0 on Success, standard error otherwise.
+ */
+static int power_vote(struct glink_transport_if *if_ptr, uint32_t state)
+{
+	return 0;
+}
+
+/**
+ * power_unvote() - Remove the all the power votes
+ * @if_ptr:	The transport interface on which power voting is requested.
+ *
+ * Return: 0 on Success, standard error otherwise.
+ */
+static int power_unvote(struct glink_transport_if *if_ptr)
+{
+	return 0;
+}
+
+/**
+ * rx_rt_vote() - Increment and RX thread RT vote
+ * @if_ptr:	The transport interface on which power voting is requested.
+ *
+ * Return: 0 on Success, standard error otherwise.
+ */
+static int rx_rt_vote(struct glink_transport_if *if_ptr)
+{
+	struct edge_info *einfo;
+	struct sched_param param = { .sched_priority = 1 };
+	int ret = 0;
+	unsigned long flags;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+	spin_lock_irqsave(&einfo->rt_vote_lock, flags);
+	if (!einfo->rt_votes)
+		ret = sched_setscheduler_nocheck(einfo->task, SCHED_FIFO,
+							&param);
+	einfo->rt_votes++;
+	spin_unlock_irqrestore(&einfo->rt_vote_lock, flags);
+	return ret;
+}
+
+/**
+ * rx_rt_unvote() - Remove a RX thread RT vote
+ * @if_ptr:	The transport interface on which power voting is requested.
+ *
+ * Return: 0 on Success, standard error otherwise.
+ */
+static int rx_rt_unvote(struct glink_transport_if *if_ptr)
+{
+	struct edge_info *einfo;
+	struct sched_param param = { .sched_priority = 0 };
+	int ret = 0;
+	unsigned long flags;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+	spin_lock_irqsave(&einfo->rt_vote_lock, flags);
+	einfo->rt_votes--;
+	if (!einfo->rt_votes)
+		ret = sched_setscheduler_nocheck(einfo->task, SCHED_NORMAL,
+							&param);
+	spin_unlock_irqrestore(&einfo->rt_vote_lock, flags);
+	return ret;
+}
+
+/**
+ * negotiate_features_v1() - determine what features of a version can be used
+ * @if_ptr:	The transport for which features are negotiated for.
+ * @version:	The version negotiated.
+ * @features:	The set of requested features.
+ *
+ * Return: What set of the requested features can be supported.
+ */
+static uint32_t negotiate_features_v1(struct glink_transport_if *if_ptr,
+				      const struct glink_core_version *version,
+				      uint32_t features)
+{
+	return features & version->features;
+}
+
+/**
+ * init_xprt_if() - initialize the xprt_if for an edge
+ * @einfo:	The edge to initialize.
+ */
+static void init_xprt_if(struct edge_info *einfo)
+{
+	einfo->xprt_if.tx_cmd_version = tx_cmd_version;
+	einfo->xprt_if.tx_cmd_version_ack = tx_cmd_version_ack;
+	einfo->xprt_if.set_version = set_version;
+	einfo->xprt_if.tx_cmd_ch_open = tx_cmd_ch_open;
+	einfo->xprt_if.tx_cmd_ch_close = tx_cmd_ch_close;
+	einfo->xprt_if.tx_cmd_ch_remote_open_ack = tx_cmd_ch_remote_open_ack;
+	einfo->xprt_if.tx_cmd_ch_remote_close_ack = tx_cmd_ch_remote_close_ack;
+	einfo->xprt_if.ssr = ssr;
+	einfo->xprt_if.subsys_up = subsys_up;
+	einfo->xprt_if.allocate_rx_intent = allocate_rx_intent;
+	einfo->xprt_if.deallocate_rx_intent = deallocate_rx_intent;
+	einfo->xprt_if.tx_cmd_local_rx_intent = tx_cmd_local_rx_intent;
+	einfo->xprt_if.tx_cmd_local_rx_done = tx_cmd_local_rx_done;
+	einfo->xprt_if.tx = tx;
+	einfo->xprt_if.tx_cmd_rx_intent_req = tx_cmd_rx_intent_req;
+	einfo->xprt_if.tx_cmd_remote_rx_intent_req_ack =
+						tx_cmd_remote_rx_intent_req_ack;
+	einfo->xprt_if.tx_cmd_set_sigs = tx_cmd_set_sigs;
+	einfo->xprt_if.poll = poll;
+	einfo->xprt_if.mask_rx_irq = mask_rx_irq;
+	einfo->xprt_if.wait_link_down = wait_link_down;
+	einfo->xprt_if.tx_cmd_tracer_pkt = tx_cmd_tracer_pkt;
+	einfo->xprt_if.get_power_vote_ramp_time = get_power_vote_ramp_time;
+	einfo->xprt_if.power_vote = power_vote;
+	einfo->xprt_if.power_unvote = power_unvote;
+	einfo->xprt_if.rx_rt_vote = rx_rt_vote;
+	einfo->xprt_if.rx_rt_unvote = rx_rt_unvote;
+}
+
+/**
+ * init_xprt_cfg() - initialize the xprt_cfg for an edge
+ * @einfo:	The edge to initialize.
+ * @name:	The name of the remote side this edge communicates to.
+ */
+static void init_xprt_cfg(struct edge_info *einfo, const char *name)
+{
+	einfo->xprt_cfg.name = XPRT_NAME;
+	einfo->xprt_cfg.edge = name;
+	einfo->xprt_cfg.versions = versions;
+	einfo->xprt_cfg.versions_entries = ARRAY_SIZE(versions);
+	einfo->xprt_cfg.max_cid = SZ_64K;
+	einfo->xprt_cfg.max_iid = SZ_2G;
+}
+
+/**
+ * parse_qos_dt_params() - Parse the power states from DT
+ * @dev:	Reference to the platform device for a specific edge.
+ * @einfo:	Edge information for the edge probe function is called.
+ *
+ * Return: 0 on success, standard error code otherwise.
+ */
+static int parse_qos_dt_params(struct device_node *node,
+				struct edge_info *einfo)
+{
+	int rc;
+	int i;
+	char *key;
+	uint32_t *arr32;
+	uint32_t num_states;
+
+	key = "qcom,ramp-time";
+	if (!of_find_property(node, key, &num_states))
+		return -ENODEV;
+
+	num_states /= sizeof(uint32_t);
+
+	einfo->num_pw_states = num_states;
+
+	arr32 = kmalloc_array(num_states, sizeof(uint32_t), GFP_KERNEL);
+	if (!arr32)
+		return -ENOMEM;
+
+	einfo->ramp_time_us = kmalloc_array(num_states, sizeof(unsigned long),
+					GFP_KERNEL);
+	if (!einfo->ramp_time_us) {
+		rc = -ENOMEM;
+		goto mem_alloc_fail;
+	}
+
+	rc = of_property_read_u32_array(node, key, arr32, num_states);
+	if (rc) {
+		rc = -ENODEV;
+		goto invalid_key;
+	}
+	for (i = 0; i < num_states; i++)
+		einfo->ramp_time_us[i] = arr32[i];
+
+	rc = 0;
+	kfree(arr32);
+	return rc;
+
+invalid_key:
+	kfree(einfo->ramp_time_us);
+mem_alloc_fail:
+	kfree(arr32);
+	return rc;
+}
+
+/**
+ * subsys_name_to_id() - translate a subsystem name to a processor id
+ * @name:	The subsystem name to look up.
+ *
+ * Return: The processor id corresponding to @name or standard Linux error code.
+ */
+static int subsys_name_to_id(const char *name)
+{
+	if (!name)
+		return -ENODEV;
+
+	if (!strcmp(name, "apss"))
+		return SMEM_APPS;
+	if (!strcmp(name, "dsps"))
+		return SMEM_DSPS;
+	if (!strcmp(name, "lpass"))
+		return SMEM_Q6;
+	if (!strcmp(name, "mpss"))
+		return SMEM_MODEM;
+	if (!strcmp(name, "rpm"))
+		return SMEM_RPM;
+	if (!strcmp(name, "wcnss"))
+		return SMEM_WCNSS;
+	if (!strcmp(name, "spss"))
+		return SMEM_SPSS;
+	if (!strcmp(name, "cdsp"))
+		return SMEM_CDSP;
+	return -ENODEV;
+}
+
+static int glink_smem_native_probe(struct platform_device *pdev)
+{
+	struct device_node *node;
+	struct device_node *phandle_node;
+	struct edge_info *einfo;
+	int rc;
+	char *key;
+	const char *subsys_name;
+	uint32_t irq_line;
+	uint32_t irq_mask;
+	struct resource *r;
+
+	node = pdev->dev.of_node;
+
+	einfo = kzalloc(sizeof(*einfo), GFP_KERNEL);
+	if (!einfo) {
+		pr_err("%s: edge_info allocation failed\n", __func__);
+		rc = -ENOMEM;
+		goto edge_info_alloc_fail;
+	}
+
+	key = "label";
+	subsys_name = of_get_property(node, key, NULL);
+	if (!subsys_name) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	key = "interrupts";
+	irq_line = irq_of_parse_and_map(node, 0);
+	if (!irq_line) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	key = "qcom,irq-mask";
+	rc = of_property_read_u32(node, key, &irq_mask);
+	if (rc) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	key = "irq-reg-base";
+	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+	if (!r) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	if (subsys_name_to_id(subsys_name) == -ENODEV) {
+		pr_err("%s: unknown subsystem: %s\n", __func__, subsys_name);
+		rc = -ENODEV;
+		goto invalid_key;
+	}
+	einfo->remote_proc_id = subsys_name_to_id(subsys_name);
+
+	init_xprt_cfg(einfo, subsys_name);
+	init_xprt_if(einfo);
+	spin_lock_init(&einfo->write_lock);
+	init_waitqueue_head(&einfo->tx_blocked_queue);
+	init_kthread_work(&einfo->kwork, rx_worker);
+	init_kthread_worker(&einfo->kworker);
+	einfo->read_from_fifo = read_from_fifo;
+	einfo->write_to_fifo = write_to_fifo;
+	init_srcu_struct(&einfo->use_ref);
+	spin_lock_init(&einfo->rx_lock);
+	INIT_LIST_HEAD(&einfo->deferred_cmds);
+	spin_lock_init(&einfo->rt_vote_lock);
+	einfo->rt_votes = 0;
+
+	mutex_lock(&probe_lock);
+	if (edge_infos[einfo->remote_proc_id]) {
+		pr_err("%s: duplicate subsys %s is not valid\n", __func__,
+								subsys_name);
+		rc = -ENODEV;
+		mutex_unlock(&probe_lock);
+		goto invalid_key;
+	}
+	edge_infos[einfo->remote_proc_id] = einfo;
+	mutex_unlock(&probe_lock);
+
+	einfo->out_irq_mask = irq_mask;
+	einfo->out_irq_reg = ioremap_nocache(r->start, resource_size(r));
+	if (!einfo->out_irq_reg) {
+		pr_err("%s: unable to map irq reg\n", __func__);
+		rc = -ENOMEM;
+		goto ioremap_fail;
+	}
+
+	einfo->task = kthread_run(kthread_worker_fn, &einfo->kworker,
+						"smem_native_%s", subsys_name);
+	if (IS_ERR(einfo->task)) {
+		rc = PTR_ERR(einfo->task);
+		pr_err("%s: kthread_run failed %d\n", __func__, rc);
+		goto kthread_fail;
+	}
+
+	einfo->tx_ch_desc = smem_alloc(SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR,
+							SMEM_CH_DESC_SIZE,
+							einfo->remote_proc_id,
+							0);
+	if (PTR_ERR(einfo->tx_ch_desc) == -EPROBE_DEFER) {
+		rc = -EPROBE_DEFER;
+		goto smem_alloc_fail;
+	}
+	if (!einfo->tx_ch_desc) {
+		pr_err("%s: smem alloc of ch descriptor failed\n", __func__);
+		rc = -ENOMEM;
+		goto smem_alloc_fail;
+	}
+	einfo->rx_ch_desc = einfo->tx_ch_desc + 1;
+
+	einfo->tx_fifo_size = SZ_16K;
+	einfo->tx_fifo = smem_alloc(SMEM_GLINK_NATIVE_XPRT_FIFO_0,
+							einfo->tx_fifo_size,
+							einfo->remote_proc_id,
+							0);
+	if (!einfo->tx_fifo) {
+		pr_err("%s: smem alloc of tx fifo failed\n", __func__);
+		rc = -ENOMEM;
+		goto smem_alloc_fail;
+	}
+
+	key = "qcom,qos-config";
+	phandle_node = of_parse_phandle(node, key, 0);
+	if (phandle_node && !(of_get_glink_core_qos_cfg(phandle_node,
+							&einfo->xprt_cfg)))
+		parse_qos_dt_params(node, einfo);
+
+	rc = glink_core_register_transport(&einfo->xprt_if, &einfo->xprt_cfg);
+	if (rc == -EPROBE_DEFER)
+		goto reg_xprt_fail;
+	if (rc) {
+		pr_err("%s: glink core register transport failed: %d\n",
+								__func__, rc);
+		goto reg_xprt_fail;
+	}
+
+	einfo->irq_line = irq_line;
+	rc = request_irq(irq_line, irq_handler,
+			IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND | IRQF_SHARED,
+			node->name, einfo);
+	if (rc < 0) {
+		pr_err("%s: request_irq on %d failed: %d\n", __func__, irq_line,
+									rc);
+		goto request_irq_fail;
+	}
+	einfo->in_ssr = true;
+	rc = enable_irq_wake(irq_line);
+	if (rc < 0)
+		pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
+								irq_line);
+
+	register_debugfs_info(einfo);
+	/* fake an interrupt on this edge to see if the remote side is up */
+	irq_handler(0, einfo);
+	return 0;
+
+request_irq_fail:
+	glink_core_unregister_transport(&einfo->xprt_if);
+reg_xprt_fail:
+smem_alloc_fail:
+	flush_kthread_worker(&einfo->kworker);
+	kthread_stop(einfo->task);
+	einfo->task = NULL;
+kthread_fail:
+	iounmap(einfo->out_irq_reg);
+ioremap_fail:
+	mutex_lock(&probe_lock);
+	edge_infos[einfo->remote_proc_id] = NULL;
+	mutex_unlock(&probe_lock);
+invalid_key:
+missing_key:
+	kfree(einfo);
+edge_info_alloc_fail:
+	return rc;
+}
+
+static int glink_rpm_native_probe(struct platform_device *pdev)
+{
+	struct device_node *node;
+	struct edge_info *einfo;
+	int rc;
+	char *key;
+	const char *subsys_name;
+	uint32_t irq_line;
+	uint32_t irq_mask;
+	struct resource *irq_r;
+	struct resource *msgram_r;
+	void __iomem *msgram;
+	char toc[RPM_TOC_SIZE];
+	uint32_t *tocp;
+	uint32_t num_toc_entries;
+
+	node = pdev->dev.of_node;
+
+	einfo = kzalloc(sizeof(*einfo), GFP_KERNEL);
+	if (!einfo) {
+		pr_err("%s: edge_info allocation failed\n", __func__);
+		rc = -ENOMEM;
+		goto edge_info_alloc_fail;
+	}
+
+	subsys_name = "rpm";
+
+	key = "interrupts";
+	irq_line = irq_of_parse_and_map(node, 0);
+	if (!irq_line) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	key = "qcom,irq-mask";
+	rc = of_property_read_u32(node, key, &irq_mask);
+	if (rc) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	key = "irq-reg-base";
+	irq_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+	if (!irq_r) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	key = "msgram";
+	msgram_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+	if (!msgram_r) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	if (subsys_name_to_id(subsys_name) == -ENODEV) {
+		pr_err("%s: unknown subsystem: %s\n", __func__, subsys_name);
+		rc = -ENODEV;
+		goto invalid_key;
+	}
+	einfo->remote_proc_id = subsys_name_to_id(subsys_name);
+
+	init_xprt_cfg(einfo, subsys_name);
+	init_xprt_if(einfo);
+	spin_lock_init(&einfo->write_lock);
+	init_waitqueue_head(&einfo->tx_blocked_queue);
+	init_kthread_work(&einfo->kwork, rx_worker);
+	init_kthread_worker(&einfo->kworker);
+	einfo->intentless = true;
+	einfo->read_from_fifo = memcpy32_fromio;
+	einfo->write_to_fifo = memcpy32_toio;
+	init_srcu_struct(&einfo->use_ref);
+	spin_lock_init(&einfo->rx_lock);
+	INIT_LIST_HEAD(&einfo->deferred_cmds);
+
+	mutex_lock(&probe_lock);
+	if (edge_infos[einfo->remote_proc_id]) {
+		pr_err("%s: duplicate subsys %s is not valid\n", __func__,
+								subsys_name);
+		rc = -ENODEV;
+		mutex_unlock(&probe_lock);
+		goto invalid_key;
+	}
+	edge_infos[einfo->remote_proc_id] = einfo;
+	mutex_unlock(&probe_lock);
+
+	einfo->out_irq_mask = irq_mask;
+	einfo->out_irq_reg = ioremap_nocache(irq_r->start,
+							resource_size(irq_r));
+	if (!einfo->out_irq_reg) {
+		pr_err("%s: unable to map irq reg\n", __func__);
+		rc = -ENOMEM;
+		goto irq_ioremap_fail;
+	}
+
+	msgram = ioremap_nocache(msgram_r->start, resource_size(msgram_r));
+	if (!msgram) {
+		pr_err("%s: unable to map msgram\n", __func__);
+		rc = -ENOMEM;
+		goto msgram_ioremap_fail;
+	}
+
+	einfo->task = kthread_run(kthread_worker_fn, &einfo->kworker,
+						"smem_native_%s", subsys_name);
+	if (IS_ERR(einfo->task)) {
+		rc = PTR_ERR(einfo->task);
+		pr_err("%s: kthread_run failed %d\n", __func__, rc);
+		goto kthread_fail;
+	}
+
+	memcpy32_fromio(toc, msgram + resource_size(msgram_r) - RPM_TOC_SIZE,
+								RPM_TOC_SIZE);
+	tocp = (uint32_t *)toc;
+	if (*tocp != RPM_TOC_ID) {
+		rc = -ENODEV;
+		pr_err("%s: TOC id %d is not valid\n", __func__, *tocp);
+		goto toc_init_fail;
+	}
+	++tocp;
+	num_toc_entries = *tocp;
+	if (num_toc_entries > RPM_MAX_TOC_ENTRIES) {
+		rc = -ENODEV;
+		pr_err("%s: %d is too many toc entries\n", __func__,
+							num_toc_entries);
+		goto toc_init_fail;
+	}
+	++tocp;
+
+	for (rc = 0; rc < num_toc_entries; ++rc) {
+		if (*tocp != RPM_TX_FIFO_ID) {
+			tocp += 3;
+			continue;
+		}
+		++tocp;
+		einfo->tx_ch_desc = msgram + *tocp;
+		einfo->tx_fifo = einfo->tx_ch_desc + 1;
+		if ((uintptr_t)einfo->tx_fifo >
+				(uintptr_t)(msgram + resource_size(msgram_r))) {
+			pr_err("%s: invalid tx fifo address\n", __func__);
+			einfo->tx_fifo = NULL;
+			break;
+		}
+		++tocp;
+		einfo->tx_fifo_size = *tocp;
+		if (einfo->tx_fifo_size > resource_size(msgram_r) ||
+			(uintptr_t)(einfo->tx_fifo + einfo->tx_fifo_size) >
+				(uintptr_t)(msgram + resource_size(msgram_r))) {
+			pr_err("%s: invalid tx fifo size\n", __func__);
+			einfo->tx_fifo = NULL;
+			break;
+		}
+		break;
+	}
+	if (!einfo->tx_fifo) {
+		rc = -ENODEV;
+		pr_err("%s: tx fifo not found\n", __func__);
+		goto toc_init_fail;
+	}
+
+	tocp = (uint32_t *)toc;
+	tocp += 2;
+	for (rc = 0; rc < num_toc_entries; ++rc) {
+		if (*tocp != RPM_RX_FIFO_ID) {
+			tocp += 3;
+			continue;
+		}
+		++tocp;
+		einfo->rx_ch_desc = msgram + *tocp;
+		einfo->rx_fifo = einfo->rx_ch_desc + 1;
+		if ((uintptr_t)einfo->rx_fifo >
+				(uintptr_t)(msgram + resource_size(msgram_r))) {
+			pr_err("%s: invalid rx fifo address\n", __func__);
+			einfo->rx_fifo = NULL;
+			break;
+		}
+		++tocp;
+		einfo->rx_fifo_size = *tocp;
+		if (einfo->rx_fifo_size > resource_size(msgram_r) ||
+			(uintptr_t)(einfo->rx_fifo + einfo->rx_fifo_size) >
+				(uintptr_t)(msgram + resource_size(msgram_r))) {
+			pr_err("%s: invalid rx fifo size\n", __func__);
+			einfo->rx_fifo = NULL;
+			break;
+		}
+		break;
+	}
+	if (!einfo->rx_fifo) {
+		rc = -ENODEV;
+		pr_err("%s: rx fifo not found\n", __func__);
+		goto toc_init_fail;
+	}
+
+	einfo->tx_ch_desc->write_index = 0;
+	einfo->rx_ch_desc->read_index = 0;
+
+	rc = glink_core_register_transport(&einfo->xprt_if, &einfo->xprt_cfg);
+	if (rc == -EPROBE_DEFER)
+		goto reg_xprt_fail;
+	if (rc) {
+		pr_err("%s: glink core register transport failed: %d\n",
+								__func__, rc);
+		goto reg_xprt_fail;
+	}
+
+	einfo->irq_line = irq_line;
+	rc = request_irq(irq_line, irq_handler,
+			IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND | IRQF_SHARED,
+			node->name, einfo);
+	if (rc < 0) {
+		pr_err("%s: request_irq on %d failed: %d\n", __func__, irq_line,
+									rc);
+		goto request_irq_fail;
+	}
+	rc = enable_irq_wake(irq_line);
+	if (rc < 0)
+		pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
+								irq_line);
+
+	register_debugfs_info(einfo);
+	einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
+	return 0;
+
+request_irq_fail:
+	glink_core_unregister_transport(&einfo->xprt_if);
+reg_xprt_fail:
+toc_init_fail:
+	flush_kthread_worker(&einfo->kworker);
+	kthread_stop(einfo->task);
+	einfo->task = NULL;
+kthread_fail:
+	iounmap(msgram);
+msgram_ioremap_fail:
+	iounmap(einfo->out_irq_reg);
+irq_ioremap_fail:
+	mutex_lock(&probe_lock);
+	edge_infos[einfo->remote_proc_id] = NULL;
+	mutex_unlock(&probe_lock);
+invalid_key:
+missing_key:
+	kfree(einfo);
+edge_info_alloc_fail:
+	return rc;
+}
+
+static int glink_mailbox_probe(struct platform_device *pdev)
+{
+	struct device_node *node;
+	struct edge_info *einfo;
+	int rc;
+	char *key;
+	const char *subsys_name;
+	uint32_t irq_line;
+	uint32_t irq_mask;
+	struct resource *irq_r;
+	struct resource *mbox_loc_r;
+	struct resource *mbox_size_r;
+	struct resource *rx_reset_r;
+	void *mbox_loc;
+	void *mbox_size;
+	struct mailbox_config_info *mbox_cfg;
+	uint32_t mbox_cfg_size;
+	phys_addr_t cfg_p_addr;
+
+	node = pdev->dev.of_node;
+
+	einfo = kzalloc(sizeof(*einfo), GFP_KERNEL);
+	if (!einfo) {
+		rc = -ENOMEM;
+		goto edge_info_alloc_fail;
+	}
+
+	key = "label";
+	subsys_name = of_get_property(node, key, NULL);
+	if (!subsys_name) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	key = "interrupts";
+	irq_line = irq_of_parse_and_map(node, 0);
+	if (!irq_line) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	key = "qcom,irq-mask";
+	rc = of_property_read_u32(node, key, &irq_mask);
+	if (rc) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	key = "irq-reg-base";
+	irq_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+	if (!irq_r) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	key = "mbox-loc-addr";
+	mbox_loc_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+	if (!mbox_loc_r) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	key = "mbox-loc-size";
+	mbox_size_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+	if (!mbox_size_r) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	key = "irq-rx-reset";
+	rx_reset_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+	if (!rx_reset_r) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	key = "qcom,tx-ring-size";
+	rc = of_property_read_u32(node, key, &einfo->tx_fifo_size);
+	if (rc) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	key = "qcom,rx-ring-size";
+	rc = of_property_read_u32(node, key, &einfo->rx_fifo_size);
+	if (rc) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	if (subsys_name_to_id(subsys_name) == -ENODEV) {
+		pr_err("%s: unknown subsystem: %s\n", __func__, subsys_name);
+		rc = -ENODEV;
+		goto invalid_key;
+	}
+	einfo->remote_proc_id = subsys_name_to_id(subsys_name);
+
+	init_xprt_cfg(einfo, subsys_name);
+	einfo->xprt_cfg.name = "mailbox";
+	init_xprt_if(einfo);
+	spin_lock_init(&einfo->write_lock);
+	init_waitqueue_head(&einfo->tx_blocked_queue);
+	init_kthread_work(&einfo->kwork, rx_worker);
+	init_kthread_worker(&einfo->kworker);
+	einfo->read_from_fifo = read_from_fifo;
+	einfo->write_to_fifo = write_to_fifo;
+	init_srcu_struct(&einfo->use_ref);
+	spin_lock_init(&einfo->rx_lock);
+	INIT_LIST_HEAD(&einfo->deferred_cmds);
+
+	mutex_lock(&probe_lock);
+	if (edge_infos[einfo->remote_proc_id]) {
+		pr_err("%s: duplicate subsys %s is not valid\n", __func__,
+								subsys_name);
+		rc = -ENODEV;
+		mutex_unlock(&probe_lock);
+		goto invalid_key;
+	}
+	edge_infos[einfo->remote_proc_id] = einfo;
+	mutex_unlock(&probe_lock);
+
+	einfo->out_irq_mask = irq_mask;
+	einfo->out_irq_reg = ioremap_nocache(irq_r->start,
+							resource_size(irq_r));
+	if (!einfo->out_irq_reg) {
+		pr_err("%s: unable to map irq reg\n", __func__);
+		rc = -ENOMEM;
+		goto irq_ioremap_fail;
+	}
+
+	mbox_loc = ioremap_nocache(mbox_loc_r->start,
+						resource_size(mbox_loc_r));
+	if (!mbox_loc) {
+		pr_err("%s: unable to map mailbox location reg\n", __func__);
+		rc = -ENOMEM;
+		goto mbox_loc_ioremap_fail;
+	}
+
+	mbox_size = ioremap_nocache(mbox_size_r->start,
+						resource_size(mbox_size_r));
+	if (!mbox_size) {
+		pr_err("%s: unable to map mailbox size reg\n", __func__);
+		rc = -ENOMEM;
+		goto mbox_size_ioremap_fail;
+	}
+
+	einfo->rx_reset_reg = ioremap_nocache(rx_reset_r->start,
+						resource_size(rx_reset_r));
+	if (!einfo->rx_reset_reg) {
+		pr_err("%s: unable to map rx reset reg\n", __func__);
+		rc = -ENOMEM;
+		goto rx_reset_ioremap_fail;
+	}
+
+	einfo->task = kthread_run(kthread_worker_fn, &einfo->kworker,
+						"smem_native_%s", subsys_name);
+	if (IS_ERR(einfo->task)) {
+		rc = PTR_ERR(einfo->task);
+		pr_err("%s: kthread_run failed %d\n", __func__, rc);
+		goto kthread_fail;
+	}
+
+	mbox_cfg_size = sizeof(*mbox_cfg) + einfo->tx_fifo_size +
+							einfo->rx_fifo_size;
+	mbox_cfg = smem_alloc(SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR,
+							mbox_cfg_size,
+							einfo->remote_proc_id,
+							0);
+	if (PTR_ERR(mbox_cfg) == -EPROBE_DEFER) {
+		rc = -EPROBE_DEFER;
+		goto smem_alloc_fail;
+	}
+	if (!mbox_cfg) {
+		pr_err("%s: smem alloc of mailbox struct failed\n", __func__);
+		rc = -ENOMEM;
+		goto smem_alloc_fail;
+	}
+	einfo->mailbox = mbox_cfg;
+	einfo->tx_ch_desc = (struct channel_desc *)(&mbox_cfg->tx_read_index);
+	einfo->rx_ch_desc = (struct channel_desc *)(&mbox_cfg->rx_read_index);
+	mbox_cfg->tx_size = einfo->tx_fifo_size;
+	mbox_cfg->rx_size = einfo->rx_fifo_size;
+	einfo->tx_fifo = &mbox_cfg->fifo[0];
+
+	rc = glink_core_register_transport(&einfo->xprt_if, &einfo->xprt_cfg);
+	if (rc == -EPROBE_DEFER)
+		goto reg_xprt_fail;
+	if (rc) {
+		pr_err("%s: glink core register transport failed: %d\n",
+								__func__, rc);
+		goto reg_xprt_fail;
+	}
+
+	einfo->irq_line = irq_line;
+	rc = request_irq(irq_line, irq_handler,
+			IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND | IRQF_SHARED,
+			node->name, einfo);
+	if (rc < 0) {
+		pr_err("%s: request_irq on %d failed: %d\n", __func__, irq_line,
+									rc);
+		goto request_irq_fail;
+	}
+	rc = enable_irq_wake(irq_line);
+	if (rc < 0)
+		pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
+								irq_line);
+
+	register_debugfs_info(einfo);
+
+	writel_relaxed(mbox_cfg_size, mbox_size);
+	cfg_p_addr = smem_virt_to_phys(mbox_cfg);
+	writel_relaxed(lower_32_bits(cfg_p_addr), mbox_loc);
+	writel_relaxed(upper_32_bits(cfg_p_addr), mbox_loc + 4);
+	einfo->in_ssr = true;
+	send_irq(einfo);
+	iounmap(mbox_size);
+	iounmap(mbox_loc);
+	return 0;
+
+request_irq_fail:
+	glink_core_unregister_transport(&einfo->xprt_if);
+reg_xprt_fail:
+smem_alloc_fail:
+	flush_kthread_worker(&einfo->kworker);
+	kthread_stop(einfo->task);
+	einfo->task = NULL;
+kthread_fail:
+	iounmap(einfo->rx_reset_reg);
+rx_reset_ioremap_fail:
+	iounmap(mbox_size);
+mbox_size_ioremap_fail:
+	iounmap(mbox_loc);
+mbox_loc_ioremap_fail:
+	iounmap(einfo->out_irq_reg);
+irq_ioremap_fail:
+	mutex_lock(&probe_lock);
+	edge_infos[einfo->remote_proc_id] = NULL;
+	mutex_unlock(&probe_lock);
+invalid_key:
+missing_key:
+	kfree(einfo);
+edge_info_alloc_fail:
+	return rc;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+/**
+ * debug_edge() - generates formatted text output displaying current edge state
+ * @s:	File to send the output to.
+ */
+static void debug_edge(struct seq_file *s)
+{
+	struct edge_info *einfo;
+	struct glink_dbgfs_data *dfs_d;
+
+	dfs_d = s->private;
+	einfo = dfs_d->priv_data;
+
+/*
+ * formatted, human readable edge state output, ie:
+ * TX/RX fifo information:
+ID|EDGE      |TX READ   |TX WRITE  |TX SIZE   |RX READ   |RX WRITE  |RX SIZE
+-------------------------------------------------------------------------------
+01|mpss      |0x00000128|0x00000128|0x00000800|0x00000256|0x00000256|0x00001000
+ *
+ * Interrupt information:
+ * EDGE      |TX INT    |RX INT
+ * --------------------------------
+ * mpss      |0x00000006|0x00000008
+ */
+	seq_puts(s, "TX/RX fifo information:\n");
+	seq_printf(s, "%2s|%-10s|%-10s|%-10s|%-10s|%-10s|%-10s|%-10s\n",
+								"ID",
+								"EDGE",
+								"TX READ",
+								"TX WRITE",
+								"TX SIZE",
+								"RX READ",
+								"RX WRITE",
+								"RX SIZE");
+	seq_puts(s,
+		"-------------------------------------------------------------------------------\n");
+	if (!einfo)
+		return;
+
+	seq_printf(s, "%02i|%-10s|", einfo->remote_proc_id,
+					einfo->xprt_cfg.edge);
+	if (!einfo->rx_fifo)
+		seq_puts(s, "Link Not Up\n");
+	else
+		seq_printf(s, "0x%08X|0x%08X|0x%08X|0x%08X|0x%08X|0x%08X\n",
+						einfo->tx_ch_desc->read_index,
+						einfo->tx_ch_desc->write_index,
+						einfo->tx_fifo_size,
+						einfo->rx_ch_desc->read_index,
+						einfo->rx_ch_desc->write_index,
+						einfo->rx_fifo_size);
+
+	seq_puts(s, "\nInterrupt information:\n");
+	seq_printf(s, "%-10s|%-10s|%-10s\n", "EDGE", "TX INT", "RX INT");
+	seq_puts(s, "--------------------------------\n");
+	seq_printf(s, "%-10s|0x%08X|0x%08X\n", einfo->xprt_cfg.edge,
+						einfo->tx_irq_count,
+						einfo->rx_irq_count);
+}
+
+/**
+ * register_debugfs_info() - initialize debugfs device entries
+ * @einfo:	Pointer to specific edge_info for which register is called.
+ */
+static void register_debugfs_info(struct edge_info *einfo)
+{
+	struct glink_dbgfs dfs;
+	char *curr_dir_name;
+	int dir_name_len;
+
+	dir_name_len = strlen(einfo->xprt_cfg.edge) +
+				strlen(einfo->xprt_cfg.name) + 2;
+	curr_dir_name = kmalloc(dir_name_len, GFP_KERNEL);
+	if (!curr_dir_name) {
+		GLINK_ERR("%s: Memory allocation failed\n", __func__);
+		return;
+	}
+
+	snprintf(curr_dir_name, dir_name_len, "%s_%s",
+				einfo->xprt_cfg.edge, einfo->xprt_cfg.name);
+	dfs.curr_name = curr_dir_name;
+	dfs.par_name = "xprt";
+	dfs.b_dir_create = false;
+	glink_debugfs_create("XPRT_INFO", debug_edge,
+					&dfs, einfo, false);
+	kfree(curr_dir_name);
+}
+
+#else
+static void register_debugfs_info(struct edge_info *einfo)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static struct of_device_id smem_match_table[] = {
+	{ .compatible = "qcom,glink-smem-native-xprt" },
+	{},
+};
+
+static struct platform_driver glink_smem_native_driver = {
+	.probe = glink_smem_native_probe,
+	.driver = {
+		.name = "msm_glink_smem_native_xprt",
+		.owner = THIS_MODULE,
+		.of_match_table = smem_match_table,
+	},
+};
+
+static struct of_device_id rpm_match_table[] = {
+	{ .compatible = "qcom,glink-rpm-native-xprt" },
+	{},
+};
+
+static struct platform_driver glink_rpm_native_driver = {
+	.probe = glink_rpm_native_probe,
+	.driver = {
+		.name = "msm_glink_rpm_native_xprt",
+		.owner = THIS_MODULE,
+		.of_match_table = rpm_match_table,
+	},
+};
+
+static struct of_device_id mailbox_match_table[] = {
+	{ .compatible = "qcom,glink-mailbox-xprt" },
+	{},
+};
+
+static struct platform_driver glink_mailbox_driver = {
+	.probe = glink_mailbox_probe,
+	.driver = {
+		.name = "msm_glink_mailbox_xprt",
+		.owner = THIS_MODULE,
+		.of_match_table = mailbox_match_table,
+	},
+};
+
+static int __init glink_smem_native_xprt_init(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&glink_smem_native_driver);
+	if (rc) {
+		pr_err("%s: glink_smem_native_driver register failed %d\n",
+								__func__, rc);
+		return rc;
+	}
+
+	rc = platform_driver_register(&glink_rpm_native_driver);
+	if (rc) {
+		pr_err("%s: glink_rpm_native_driver register failed %d\n",
+								__func__, rc);
+		return rc;
+	}
+
+	rc = platform_driver_register(&glink_mailbox_driver);
+	if (rc) {
+		pr_err("%s: glink_mailbox_driver register failed %d\n",
+								__func__, rc);
+		return rc;
+	}
+
+	return 0;
+}
+arch_initcall(glink_smem_native_xprt_init);
+
+MODULE_DESCRIPTION("MSM G-Link SMEM Native Transport");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/glink_spi_xprt.c	2019-10-29 09:26:24.809214589 +0100
@@ -0,0 +1,2198 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spinlock.h>
+#include <linux/srcu.h>
+#include <linux/wait.h>
+#include <linux/component.h>
+#include <soc/qcom/tracer_pkt.h>
+#include <sound/wcd-dsp-mgr.h>
+#include <sound/wcd-spi.h>
+#include "glink_core_if.h"
+#include "glink_private.h"
+#include "glink_xprt_if.h"
+
+#define XPRT_NAME "spi"
+#define FIFO_ALIGNMENT 16
+#define FIFO_FULL_RESERVE 8
+#define TX_BLOCKED_CMD_RESERVE 16
+#define TRACER_PKT_FEATURE BIT(2)
+#define DEFAULT_FIFO_SIZE 1024
+#define SHORT_PKT_SIZE 16
+#define XPRT_ALIGNMENT 4
+
+#define MAX_INACTIVE_CYCLES 50
+#define POLL_INTERVAL_US 500
+
+#define ACTIVE_TX BIT(0)
+#define ACTIVE_RX BIT(1)
+
+#define ID_MASK 0xFFFFFF
+/**
+ * enum command_types - definition of the types of commands sent/received
+ * @VERSION_CMD:		Version and feature set supported
+ * @VERSION_ACK_CMD:		Response for @VERSION_CMD
+ * @OPEN_CMD:			Open a channel
+ * @CLOSE_CMD:			Close a channel
+ * @OPEN_ACK_CMD:		Response to @OPEN_CMD
+ * @CLOSE_ACK_CMD:		Response for @CLOSE_CMD
+ * @RX_INTENT_CMD:		RX intent for a channel is queued
+ * @RX_DONE_CMD:		Use of RX intent for a channel is complete
+ * @RX_DONE_W_REUSE_CMD:	Same as @RX_DONE but also reuse the used intent
+ * @RX_INTENT_REQ_CMD:		Request to have RX intent queued
+ * @RX_INTENT_REQ_ACK_CMD:	Response for @RX_INTENT_REQ_CMD
+ * @TX_DATA_CMD:		Start of a data transfer
+ * @TX_DATA_CONT_CMD:		Continuation or end of a data transfer
+ * @READ_NOTIF_CMD:		Request for a notification when this cmd is read
+ * @SIGNALS_CMD:		Sideband signals
+ * @TRACER_PKT_CMD:		Start of a Tracer Packet Command
+ * @TRACER_PKT_CONT_CMD:	Continuation or end of a Tracer Packet Command
+ * @TX_SHORT_DATA_CMD:		Transmit short packets
+ */
+enum command_types {
+	VERSION_CMD,
+	VERSION_ACK_CMD,
+	OPEN_CMD,
+	CLOSE_CMD,
+	OPEN_ACK_CMD,
+	CLOSE_ACK_CMD,
+	RX_INTENT_CMD,
+	RX_DONE_CMD,
+	RX_DONE_W_REUSE_CMD,
+	RX_INTENT_REQ_CMD,
+	RX_INTENT_REQ_ACK_CMD,
+	TX_DATA_CMD,
+	TX_DATA_CONT_CMD,
+	READ_NOTIF_CMD,
+	SIGNALS_CMD,
+	TRACER_PKT_CMD,
+	TRACER_PKT_CONT_CMD,
+	TX_SHORT_DATA_CMD,
+};
+
+/**
+ * struct glink_cmpnt - Component to cache WDSP component and its operations
+ * @master_dev:	Device structure corresponding to WDSP device.
+ * @master_ops:	Operations supported by the WDSP device.
+ */
+struct glink_cmpnt {
+	struct device *master_dev;
+	struct wdsp_mgr_ops *master_ops;
+};
+
+/**
+ * struct edge_info - local information for managing a single complete edge
+ * @xprt_if:			The transport interface registered with the
+ *				glink core associated with this edge.
+ * @xprt_cfg:			The transport configuration for the glink core
+ *				assocaited with this edge.
+ * @subsys_name:		Name of the remote subsystem in the edge.
+ * @spi_dev:			Pointer to the connectingSPI Device.
+ * @fifo_size:			Size of the FIFO at the remote end.
+ * @tx_fifo_start:		Base Address of the TX FIFO.
+ * @tx_fifo_end:		End Address of the TX FIFO.
+ * @rx_fifo_start:		Base Address of the RX FIFO.
+ * @rx_fifo_end:		End Address of the RX FIFO.
+ * @tx_fifo_read_reg_addr:	Address of the TX FIFO Read Index Register.
+ * @tx_fifo_write_reg_addr:	Address of the TX FIFO Write Index Register.
+ * @rx_fifo_read_reg_addr:	Address of the RX FIFO Read Index Register.
+ * @rx_fifo_write_reg_addr:	Address of the RX FIFO Write Index Register.
+ * @tx_fifo_write:		Internal write index for TX FIFO.
+ * @rx_fifo_read:		Internal read index for RX FIFO.
+ * @kwork:			Work to be executed when receiving data.
+ * @kworker:			Handle to the entity processing @kwork.
+ * @task:			Handle to the task context that runs @kworker.
+ * @use_ref:			Active users of this transport grab a
+ *				reference. Used for SSR synchronization.
+ * @in_ssr:			Signals if this transport is in ssr.
+ * @write_lock:			Lock to serialize write/tx operation.
+ * @tx_blocked_queue:		Queue of entities waiting for the remote side to
+ *				signal the resumption of TX.
+ * @tx_resume_needed:		A tx resume signal needs to be sent to the glink
+ *				core.
+ * @tx_blocked_signal_sent:	Flag to indicate the flush signal has already
+ *				been sent, and a response is pending from the
+ *				remote side.  Protected by @write_lock.
+ * @num_pw_states:		Size of @ramp_time_us.
+ * @ramp_time_us:		Array of ramp times in microseconds where array
+ *				index position represents a power state.
+ * @activity_flag:		Flag indicating active TX and RX.
+ * @activity_lock:		Lock to synchronize access to activity flag.
+ * @cmpnt:			Component to interface with the remote device.
+ */
+struct edge_info {
+	struct list_head list;
+	struct glink_transport_if xprt_if;
+	struct glink_core_transport_cfg xprt_cfg;
+	char subsys_name[GLINK_NAME_SIZE];
+	struct spi_device *spi_dev;
+
+	uint32_t fifo_size;
+	uint32_t tx_fifo_start;
+	uint32_t tx_fifo_end;
+	uint32_t rx_fifo_start;
+	uint32_t rx_fifo_end;
+	unsigned int tx_fifo_read_reg_addr;
+	unsigned int tx_fifo_write_reg_addr;
+	unsigned int rx_fifo_read_reg_addr;
+	unsigned int rx_fifo_write_reg_addr;
+	uint32_t tx_fifo_write;
+	uint32_t rx_fifo_read;
+
+	struct kthread_work kwork;
+	struct kthread_worker kworker;
+	struct task_struct *task;
+	struct srcu_struct use_ref;
+	bool in_ssr;
+	struct mutex write_lock;
+	wait_queue_head_t tx_blocked_queue;
+	bool tx_resume_needed;
+	bool tx_blocked_signal_sent;
+
+	uint32_t num_pw_states;
+	unsigned long *ramp_time_us;
+
+	uint32_t activity_flag;
+	spinlock_t activity_lock;
+
+	struct glink_cmpnt cmpnt;
+};
+
+static uint32_t negotiate_features_v1(struct glink_transport_if *if_ptr,
+				      const struct glink_core_version *version,
+				      uint32_t features);
+static DEFINE_SPINLOCK(edge_infos_lock);
+static LIST_HEAD(edge_infos);
+static struct glink_core_version versions[] = {
+	{1, TRACER_PKT_FEATURE, negotiate_features_v1},
+};
+
+/**
+ * negotiate_features_v1() - determine what features of a version can be used
+ * @if_ptr:	The transport for which features are negotiated for.
+ * @version:	The version negotiated.
+ * @features:	The set of requested features.
+ *
+ * Return: What set of the requested features can be supported.
+ */
+static uint32_t negotiate_features_v1(struct glink_transport_if *if_ptr,
+				      const struct glink_core_version *version,
+				      uint32_t features)
+{
+	return features & version->features;
+}
+
+/**
+ * wdsp_suspend() - Vote for the WDSP device suspend
+ * @cmpnt:	Component to identify the WDSP device.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int wdsp_suspend(struct glink_cmpnt *cmpnt)
+{
+	int rc = 0;
+
+	if (cmpnt && cmpnt->master_dev &&
+	    cmpnt->master_ops && cmpnt->master_ops->suspend)
+		rc = cmpnt->master_ops->suspend(cmpnt->master_dev);
+	return rc;
+}
+
+/**
+ * wdsp_resume() - Vote for the WDSP device resume
+ * @cmpnt:	Component to identify the WDSP device.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int wdsp_resume(struct glink_cmpnt *cmpnt)
+{
+	int rc = 0;
+
+	if (cmpnt && cmpnt->master_dev &&
+	    cmpnt->master_ops && cmpnt->master_ops->resume)
+		rc = cmpnt->master_ops->resume(cmpnt->master_dev);
+	return rc;
+}
+
+/**
+ * glink_spi_xprt_set_poll_mode() - Set the transport to polling mode
+ * @einfo:	Edge information corresponding to the transport.
+ *
+ * This helper function indicates the start of RX polling. This will
+ * prevent the system from suspending and keeps polling for RX for a
+ * pre-defined duration.
+ */
+static void glink_spi_xprt_set_poll_mode(struct edge_info *einfo)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&einfo->activity_lock, flags);
+	einfo->activity_flag |= ACTIVE_RX;
+	spin_unlock_irqrestore(&einfo->activity_lock, flags);
+	if (!strcmp(einfo->xprt_cfg.edge, "wdsp"))
+		wdsp_resume(&einfo->cmpnt);
+}
+
+/**
+ * glink_spi_xprt_set_irq_mode() - Set the transport to IRQ mode
+ * @einfo:	Edge information corresponding to the transport.
+ *
+ * This helper indicates the end of RX polling. This will allow the
+ * system to suspend and new RX data can be handled only through an IRQ.
+ */
+static void glink_spi_xprt_set_irq_mode(struct edge_info *einfo)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&einfo->activity_lock, flags);
+	einfo->activity_flag &= ~ACTIVE_RX;
+	spin_unlock_irqrestore(&einfo->activity_lock, flags);
+}
+
+/**
+ * glink_spi_xprt_rx_data() - Receive data over SPI bus
+ * @einfo:	Edge from which the data has to be received.
+ * @src:	Source Address of the RX data.
+ * @dst:	Address of the destination RX buffer.
+ * @size:	Size of the RX data.
+ *
+ * This function is used to receive data or command as a byte stream from
+ * the remote subsystem over the SPI bus.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_spi_xprt_rx_data(struct edge_info *einfo, void *src,
+				  void *dst, uint32_t size)
+{
+	struct wcd_spi_msg spi_msg;
+
+	memset(&spi_msg, 0, sizeof(spi_msg));
+	spi_msg.data = dst;
+	spi_msg.remote_addr = (uint32_t)(size_t)src;
+	spi_msg.len = (size_t)size;
+	return wcd_spi_data_read(einfo->spi_dev, &spi_msg);
+}
+
+/**
+ * glink_spi_xprt_tx_data() - Transmit data over SPI bus
+ * @einfo:	Edge from which the data has to be received.
+ * @src:	Address of the TX buffer.
+ * @dst:	Destination Address of the TX Date.
+ * @size:	Size of the TX data.
+ *
+ * This function is used to transmit data or command as a byte stream to
+ * the remote subsystem over the SPI bus.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_spi_xprt_tx_data(struct edge_info *einfo, void *src,
+				  void *dst, uint32_t size)
+{
+	struct wcd_spi_msg spi_msg;
+
+	memset(&spi_msg, 0, sizeof(spi_msg));
+	spi_msg.data = src;
+	spi_msg.remote_addr = (uint32_t)(size_t)dst;
+	spi_msg.len = (size_t)size;
+	return wcd_spi_data_write(einfo->spi_dev, &spi_msg);
+}
+
+/**
+ * glink_spi_xprt_reg_read() - Read the TX/RX FIFO Read/Write Index registers
+ * @einfo:	Edge from which the registers have to be read.
+ * @reg_addr:	Address of the register to be read.
+ * @data:	Buffer into which the register data has to be read.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_spi_xprt_reg_read(struct edge_info *einfo, u32 reg_addr,
+				   uint32_t *data)
+{
+	int rc;
+
+	rc = glink_spi_xprt_rx_data(einfo, (void *)(unsigned long)reg_addr,
+				    data, sizeof(*data));
+	if (!rc)
+		*data = *data & ID_MASK;
+	return rc;
+}
+
+/**
+ * glink_spi_xprt_reg_write() - Write the TX/RX FIFO Read/Write Index registers
+ * @einfo:	Edge to which the registers have to be written.
+ * @reg_addr:	Address of the registers to be written.
+ * @data:	Data to be written to the registers.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_spi_xprt_reg_write(struct edge_info *einfo, u32 reg_addr,
+					uint32_t data)
+{
+	return glink_spi_xprt_tx_data(einfo, &data,
+				(void *)(unsigned long)reg_addr, sizeof(data));
+}
+
+/**
+ * glink_spi_xprt_write_avail() - Available Write Space in the remote side
+ * @einfo:	Edge information corresponding to the remote side.
+ *
+ * This function reads the TX FIFO Read & Write Index registers from the
+ * remote subsystem and calculate the available write space.
+ *
+ * Return: 0 on error, available write space on success.
+ */
+static int glink_spi_xprt_write_avail(struct edge_info *einfo)
+{
+	uint32_t read_id;
+	uint32_t write_id;
+	int write_avail;
+	int ret;
+
+	if (unlikely(!einfo->tx_fifo_start)) {
+		ret = glink_spi_xprt_reg_read(einfo,
+			einfo->tx_fifo_write_reg_addr, &einfo->tx_fifo_write);
+		if (ret < 0) {
+			pr_err("%s: Error %d reading %s tx_fifo_write_reg_addr %d\n",
+				__func__, ret, einfo->xprt_cfg.edge,
+				einfo->tx_fifo_write_reg_addr);
+			return 0;
+		}
+		einfo->tx_fifo_start = einfo->tx_fifo_write;
+	}
+	write_id = einfo->tx_fifo_write;
+
+	ret = glink_spi_xprt_reg_read(einfo, einfo->tx_fifo_read_reg_addr,
+				   &read_id);
+	if (ret < 0) {
+		pr_err("%s: Error %d reading %s tx_fifo_read_reg_addr %d\n",
+			__func__, ret, einfo->xprt_cfg.edge,
+			einfo->tx_fifo_read_reg_addr);
+		return 0;
+	}
+
+	if (!read_id || !write_id)
+		return 0;
+
+	if (read_id > write_id)
+		write_avail = read_id - write_id;
+	else
+		write_avail = einfo->fifo_size - (write_id - read_id);
+
+	if (write_avail < FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE)
+		write_avail = 0;
+	else
+		write_avail -= FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE;
+
+	return write_avail;
+}
+
+/**
+ * glink_spi_xprt_read_avail() - Available Read Data from the remote side
+ * @einfo:	Edge information corresponding to the remote side.
+ *
+ * This function reads the RX FIFO Read & Write Index registers from the
+ * remote subsystem and calculate the available read data size.
+ *
+ * Return: 0 on error, available read data on success.
+ */
+static int glink_spi_xprt_read_avail(struct edge_info *einfo)
+{
+	uint32_t read_id;
+	uint32_t write_id;
+	int read_avail;
+	int ret;
+
+	if (unlikely(!einfo->rx_fifo_start)) {
+		ret = glink_spi_xprt_reg_read(einfo,
+			einfo->rx_fifo_read_reg_addr, &einfo->rx_fifo_read);
+		if (ret < 0) {
+			pr_err("%s: Error %d reading %s rx_fifo_read_reg_addr %d\n",
+				__func__, ret, einfo->xprt_cfg.edge,
+				einfo->rx_fifo_read_reg_addr);
+			return 0;
+		}
+		einfo->rx_fifo_start = einfo->rx_fifo_read;
+	}
+	read_id = einfo->rx_fifo_read;
+
+	ret = glink_spi_xprt_reg_read(einfo, einfo->rx_fifo_write_reg_addr,
+				&write_id);
+	if (ret < 0) {
+		pr_err("%s: Error %d reading %s rx_fifo_write_reg_addr %d\n",
+			__func__, ret, einfo->xprt_cfg.edge,
+			einfo->rx_fifo_write_reg_addr);
+		return 0;
+	}
+
+	if (!read_id || !write_id)
+		return 0;
+
+	if (read_id <= write_id)
+		read_avail = write_id - read_id;
+	else
+		read_avail = einfo->fifo_size - (read_id - write_id);
+	return read_avail;
+}
+
+/**
+ * glink_spi_xprt_rx_cmd() - Receive G-Link commands
+ * @einfo:	Edge information corresponding to the remote side.
+ * @dst:	Destination buffer where the commands have to be read into.
+ * @size:	Size of the data to be read.
+ *
+ * This function is used to receive the commands from the RX FIFO. This
+ * function updates the RX FIFO Read Index after reading the data.
+ *
+ * Return: 0 on success, standard Linux error codes on error.
+ */
+static int glink_spi_xprt_rx_cmd(struct edge_info *einfo, void *dst,
+				 uint32_t size)
+{
+	uint32_t read_id;
+	uint32_t size_to_read = size;
+	uint32_t offset = 0;
+	int ret;
+
+	read_id = einfo->rx_fifo_read;
+	do {
+		if ((read_id + size_to_read) >=
+		    (einfo->rx_fifo_start + einfo->fifo_size))
+			size_to_read = einfo->rx_fifo_start + einfo->fifo_size
+					- read_id;
+		ret = glink_spi_xprt_rx_data(einfo, (void *)(size_t)read_id,
+					     dst + offset, size_to_read);
+		if (ret < 0) {
+			pr_err("%s: Error %d reading data\n", __func__, ret);
+			return ret;
+		}
+		read_id += size_to_read;
+		offset += size_to_read;
+		if (read_id >= (einfo->rx_fifo_start + einfo->fifo_size))
+			read_id = einfo->rx_fifo_start;
+		size_to_read = size - offset;
+	} while (size_to_read);
+
+	ret = glink_spi_xprt_reg_write(einfo, einfo->rx_fifo_read_reg_addr,
+				read_id);
+	if (ret < 0)
+		pr_err("%s: Error %d writing %s rx_fifo_read_reg_addr %d\n",
+			__func__, ret, einfo->xprt_cfg.edge,
+			einfo->rx_fifo_read_reg_addr);
+	else
+		einfo->rx_fifo_read = read_id;
+
+	return ret;
+}
+
+/**
+ * glink_spi_xprt_tx_cmd_safe() - Transmit G-Link commands
+ * @einfo:	Edge information corresponding to the remote subsystem.
+ * @src:	Source buffer containing the G-Link command.
+ * @size:	Size of the command to transmit.
+ *
+ * This function is used to transmit the G-Link commands. This function
+ * must be called with einfo->write_lock locked.
+ *
+ * Return: 0 on success, standard Linux error codes on error.
+ */
+static int glink_spi_xprt_tx_cmd_safe(struct edge_info *einfo, void *src,
+				      uint32_t size)
+{
+	uint32_t write_id;
+	uint32_t size_to_write = size;
+	uint32_t offset = 0;
+	int ret;
+
+	write_id = einfo->tx_fifo_write;
+	do {
+		if ((write_id + size_to_write) >=
+		    (einfo->tx_fifo_start + einfo->fifo_size))
+			size_to_write = einfo->tx_fifo_start + einfo->fifo_size
+					- write_id;
+		ret = glink_spi_xprt_tx_data(einfo, src + offset,
+				(void *)(size_t)write_id, size_to_write);
+		if (ret < 0) {
+			pr_err("%s: Error %d writing data\n", __func__, ret);
+			return ret;
+		}
+		write_id += size_to_write;
+		offset += size_to_write;
+		if (write_id >= (einfo->tx_fifo_start + einfo->fifo_size))
+			write_id = einfo->tx_fifo_start;
+		size_to_write = size - offset;
+	} while (size_to_write);
+
+	ret = glink_spi_xprt_reg_write(einfo, einfo->tx_fifo_write_reg_addr,
+				write_id);
+	if (ret < 0)
+		pr_err("%s: Error %d writing %s tx_fifo_write_reg_addr %d\n",
+			__func__, ret, einfo->xprt_cfg.edge,
+			einfo->tx_fifo_write_reg_addr);
+	else
+		einfo->tx_fifo_write = write_id;
+
+	return ret;
+}
+
+/**
+ * send_tx_blocked_signal() - Send flow control request message
+ * @einfo:	Edge information corresponding to the remote subsystem.
+ *
+ * This function is used to send a message to the remote subsystem indicating
+ * that the local subsystem is waiting for the write space. The remote
+ * subsystem on receiving this message will send a resume tx message.
+ */
+static void send_tx_blocked_signal(struct edge_info *einfo)
+{
+	struct read_notif_request {
+		uint16_t cmd;
+		uint16_t reserved;
+		uint32_t reserved2;
+		uint64_t reserved3;
+	};
+	struct read_notif_request read_notif_req = {0};
+
+	read_notif_req.cmd = READ_NOTIF_CMD;
+
+	if (!einfo->tx_blocked_signal_sent) {
+		einfo->tx_blocked_signal_sent = true;
+		glink_spi_xprt_tx_cmd_safe(einfo, &read_notif_req,
+					    sizeof(read_notif_req));
+	}
+}
+
+/**
+ * glink_spi_xprt_tx_cmd() - Transmit G-Link commands
+ * @einfo:	Edge information corresponding to the remote subsystem.
+ * @src:	Source buffer containing the G-Link command.
+ * @size:	Size of the command to transmit.
+ *
+ * This function is used to transmit the G-Link commands. This function
+ * might sleep if the space is not available to transmit the command.
+ *
+ * Return: 0 on success, standard Linux error codes on error.
+ */
+static int glink_spi_xprt_tx_cmd(struct edge_info *einfo, void *src,
+				 uint32_t size)
+{
+	int ret;
+	DEFINE_WAIT(wait);
+
+	mutex_lock(&einfo->write_lock);
+	while (glink_spi_xprt_write_avail(einfo) < size) {
+		send_tx_blocked_signal(einfo);
+		prepare_to_wait(&einfo->tx_blocked_queue, &wait,
+				TASK_UNINTERRUPTIBLE);
+		if (glink_spi_xprt_write_avail(einfo) < size &&
+		    !einfo->in_ssr) {
+			mutex_unlock(&einfo->write_lock);
+			schedule();
+			mutex_lock(&einfo->write_lock);
+		}
+		finish_wait(&einfo->tx_blocked_queue, &wait);
+		if (einfo->in_ssr) {
+			mutex_unlock(&einfo->write_lock);
+			return -EFAULT;
+		}
+	}
+	ret = glink_spi_xprt_tx_cmd_safe(einfo, src, size);
+	mutex_unlock(&einfo->write_lock);
+	return ret;
+}
+
+/**
+ * process_rx_data() - process received data from an edge
+ * @einfo:		The edge the data is received on.
+ * @cmd_id:		ID to specify the type of data.
+ * @rcid:		The remote channel id associated with the data.
+ * @intend_id:		The intent the data should be put in.
+ * @src:		Address of the source buffer from which the data
+ *			is read.
+ * @frag_size:		Size of the data fragment to read.
+ * @size_remaining:	Size of data left to be read in this packet.
+ */
+static void process_rx_data(struct edge_info *einfo, uint16_t cmd_id,
+			    uint32_t rcid, uint32_t intent_id, void *src,
+			    uint32_t frag_size, uint32_t size_remaining)
+{
+	struct glink_core_rx_intent *intent;
+	int rc = 0;
+
+	intent = einfo->xprt_if.glink_core_if_ptr->rx_get_pkt_ctx(
+				&einfo->xprt_if, rcid, intent_id);
+	if (intent == NULL) {
+		GLINK_ERR("%s: no intent for ch %d liid %d\n", __func__, rcid,
+			  intent_id);
+		return;
+	} else if (intent->data == NULL) {
+		GLINK_ERR("%s: intent for ch %d liid %d has no data buff\n",
+			  __func__, rcid, intent_id);
+		return;
+	} else if (intent->intent_size - intent->write_offset < frag_size ||
+		 intent->write_offset + size_remaining > intent->intent_size) {
+		GLINK_ERR("%s: rx data size:%d and remaining:%d %s %d %s:%d\n",
+			  __func__, frag_size, size_remaining,
+			  "will overflow ch", rcid, "intent", intent_id);
+		return;
+	}
+
+	if (cmd_id == TX_SHORT_DATA_CMD)
+		memcpy(intent->data + intent->write_offset, src, frag_size);
+	else
+		rc = glink_spi_xprt_rx_data(einfo, src,
+				intent->data + intent->write_offset, frag_size);
+	if (rc < 0) {
+		GLINK_ERR("%s: Error %d receiving data %d:%d:%d:%d\n",
+			  __func__, rc, rcid, intent_id, frag_size,
+			  size_remaining);
+		size_remaining += frag_size;
+	} else {
+		intent->write_offset += frag_size;
+		intent->pkt_size += frag_size;
+
+		if (unlikely((cmd_id == TRACER_PKT_CMD ||
+			cmd_id == TRACER_PKT_CONT_CMD) && !size_remaining)) {
+			tracer_pkt_log_event(intent->data, GLINK_XPRT_RX);
+			intent->tracer_pkt = true;
+		}
+	}
+	einfo->xprt_if.glink_core_if_ptr->rx_put_pkt_ctx(&einfo->xprt_if,
+				rcid, intent, size_remaining ? false : true);
+}
+
+/**
+ * process_rx_cmd() - Process incoming G-Link commands
+ * @einfo:	Edge information corresponding to the remote subsystem.
+ * @rx_data:	Buffer which contains the G-Link commands to be processed.
+ * @rx_size:	Size of the buffer containing the series of G-Link commands.
+ *
+ * This function is used to parse and process a series of G-Link commands
+ * received in a buffer.
+ */
+static void process_rx_cmd(struct edge_info *einfo,
+			   void *rx_data, int rx_size)
+{
+	struct command {
+		uint16_t id;
+		uint16_t param1;
+		uint32_t param2;
+		uint32_t param3;
+		uint32_t param4;
+	};
+	struct intent_desc {
+		uint32_t size;
+		uint32_t id;
+		uint64_t addr;
+	};
+	struct rx_desc {
+		uint32_t size;
+		uint32_t size_left;
+		uint64_t addr;
+	};
+	struct rx_short_data_desc {
+		unsigned char data[SHORT_PKT_SIZE];
+	};
+	struct command *cmd;
+	struct intent_desc *intents;
+	struct rx_desc *rx_descp;
+	struct rx_short_data_desc *rx_sd_descp;
+	int offset = 0;
+	int rcu_id;
+	uint16_t rcid;
+	uint16_t name_len;
+	uint16_t prio;
+	char *name;
+	bool granted;
+	int i;
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+
+	while (offset < rx_size) {
+		cmd = (struct command *)(rx_data + offset);
+		offset += sizeof(*cmd);
+		switch (cmd->id) {
+		case VERSION_CMD:
+			if (cmd->param3)
+				einfo->fifo_size = cmd->param3;
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_version(
+				&einfo->xprt_if, cmd->param1, cmd->param2);
+			break;
+
+		case VERSION_ACK_CMD:
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_version_ack(
+				&einfo->xprt_if, cmd->param1, cmd->param2);
+			break;
+
+		case OPEN_CMD:
+			rcid = cmd->param1;
+			name_len = (uint16_t)(cmd->param2 & 0xFFFF);
+			prio = (uint16_t)((cmd->param2 & 0xFFFF0000) >> 16);
+			name = (char *)(rx_data + offset);
+			offset += ALIGN(name_len, FIFO_ALIGNMENT);
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_remote_open(
+				&einfo->xprt_if, rcid, name, prio);
+			break;
+
+		case CLOSE_CMD:
+			einfo->xprt_if.glink_core_if_ptr->
+					rx_cmd_ch_remote_close(
+						&einfo->xprt_if, cmd->param1);
+			break;
+
+		case OPEN_ACK_CMD:
+			prio = (uint16_t)(cmd->param2 & 0xFFFF);
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_open_ack(
+				&einfo->xprt_if, cmd->param1, prio);
+			break;
+
+		case CLOSE_ACK_CMD:
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_close_ack(
+					&einfo->xprt_if, cmd->param1);
+			break;
+
+		case RX_INTENT_CMD:
+			for (i = 0; i < cmd->param2; i++) {
+				intents = (struct intent_desc *)
+						(rx_data + offset);
+				offset += sizeof(*intents);
+				einfo->xprt_if.glink_core_if_ptr->
+					rx_cmd_remote_rx_intent_put_cookie(
+					&einfo->xprt_if, cmd->param1,
+					intents->id, intents->size,
+					(void *)(uintptr_t)(intents->addr));
+			}
+			break;
+
+		case RX_DONE_CMD:
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_tx_done(
+				&einfo->xprt_if, cmd->param1, cmd->param2,
+				false);
+			break;
+
+		case RX_INTENT_REQ_CMD:
+			einfo->xprt_if.glink_core_if_ptr->
+				rx_cmd_remote_rx_intent_req(
+					&einfo->xprt_if, cmd->param1,
+					cmd->param2);
+			break;
+
+		case RX_INTENT_REQ_ACK_CMD:
+			granted = cmd->param2 == 1 ? true : false;
+			einfo->xprt_if.glink_core_if_ptr->
+				rx_cmd_rx_intent_req_ack(&einfo->xprt_if,
+						cmd->param1, granted);
+			break;
+
+		case TX_DATA_CMD:
+		case TX_DATA_CONT_CMD:
+		case TRACER_PKT_CMD:
+		case TRACER_PKT_CONT_CMD:
+			rx_descp = (struct rx_desc *)(rx_data + offset);
+			offset += sizeof(*rx_descp);
+			process_rx_data(einfo, cmd->id, cmd->param1,
+					cmd->param2,
+					(void *)(uintptr_t)(rx_descp->addr),
+					rx_descp->size, rx_descp->size_left);
+			break;
+
+		case TX_SHORT_DATA_CMD:
+			rx_sd_descp = (struct rx_short_data_desc *)
+							(rx_data + offset);
+			offset += sizeof(*rx_sd_descp);
+			process_rx_data(einfo, cmd->id, cmd->param1,
+					cmd->param2, (void *)rx_sd_descp->data,
+					cmd->param3, cmd->param4);
+			break;
+
+		case READ_NOTIF_CMD:
+			break;
+
+		case SIGNALS_CMD:
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_remote_sigs(
+				&einfo->xprt_if, cmd->param1, cmd->param2);
+			break;
+
+		case RX_DONE_W_REUSE_CMD:
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_tx_done(
+				&einfo->xprt_if, cmd->param1,
+				cmd->param2, true);
+			break;
+
+		default:
+			pr_err("Unrecognized command: %d\n", cmd->id);
+			break;
+		}
+	}
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * __rx_worker() - Receive commands on a specific edge
+ * @einfo:      Edge to process commands on.
+ *
+ * This function checks the size of data to be received, allocates the
+ * buffer for that data and reads the data from the remote subsytem
+ * into that buffer. This function then calls the process_rx_cmd() to
+ * parse the received G-Link command sequence. This function will also
+ * poll for the data for a predefined duration for performance reasons.
+ */
+static void __rx_worker(struct edge_info *einfo)
+{
+	uint32_t inactive_cycles = 0;
+	int rx_avail, rc;
+	void *rx_data;
+	int rcu_id;
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+
+	if (unlikely(!einfo->rx_fifo_start)) {
+		rx_avail = glink_spi_xprt_read_avail(einfo);
+		if (!rx_avail) {
+			srcu_read_unlock(&einfo->use_ref, rcu_id);
+			return;
+		}
+		einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
+	}
+
+	glink_spi_xprt_set_poll_mode(einfo);
+	do {
+		if (einfo->tx_resume_needed &&
+		    glink_spi_xprt_write_avail(einfo)) {
+			einfo->tx_resume_needed = false;
+			einfo->xprt_if.glink_core_if_ptr->tx_resume(
+							&einfo->xprt_if);
+		}
+		mutex_lock(&einfo->write_lock);
+		if (einfo->tx_blocked_signal_sent) {
+			wake_up_all(&einfo->tx_blocked_queue);
+			einfo->tx_blocked_signal_sent = false;
+		}
+		mutex_unlock(&einfo->write_lock);
+
+		rx_avail = glink_spi_xprt_read_avail(einfo);
+		if (!rx_avail) {
+			usleep_range(POLL_INTERVAL_US, POLL_INTERVAL_US + 50);
+			inactive_cycles++;
+			continue;
+		}
+		inactive_cycles = 0;
+
+		rx_data = kzalloc(rx_avail, GFP_KERNEL);
+		if (!rx_data)
+			break;
+
+		rc = glink_spi_xprt_rx_cmd(einfo, rx_data, rx_avail);
+		if (rc < 0) {
+			GLINK_ERR("%s: Error %d receiving data\n",
+				  __func__, rc);
+			kfree(rx_data);
+			break;
+		}
+		process_rx_cmd(einfo, rx_data, rx_avail);
+		kfree(rx_data);
+	} while (inactive_cycles < MAX_INACTIVE_CYCLES && !einfo->in_ssr);
+	glink_spi_xprt_set_irq_mode(einfo);
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * rx_worker() - Worker function to process received commands
+ * @work:       kwork associated with the edge to process commands on.
+ */
+static void rx_worker(struct kthread_work *work)
+{
+	struct edge_info *einfo;
+
+	einfo = container_of(work, struct edge_info, kwork);
+	__rx_worker(einfo);
+};
+
+/**
+ * tx_cmd_version() - Convert a version cmd to wire format and transmit
+ * @if_ptr:     The transport to transmit on.
+ * @version:    The version number to encode.
+ * @features:   The features information to encode.
+ */
+static void tx_cmd_version(struct glink_transport_if *if_ptr, uint32_t version,
+			   uint32_t features)
+{
+	struct command {
+		uint16_t id;
+		uint16_t version;
+		uint32_t features;
+		uint32_t fifo_size;
+		uint32_t reserved;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	memset(&cmd, 0, sizeof(cmd));
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+
+	cmd.id = VERSION_CMD;
+	cmd.version = version;
+	cmd.features = features;
+
+	glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * tx_cmd_version_ack() - Convert a version ack cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @version:	The version number to encode.
+ * @features:	The features information to encode.
+ */
+static void tx_cmd_version_ack(struct glink_transport_if *if_ptr,
+			       uint32_t version,
+			       uint32_t features)
+{
+	struct command {
+		uint16_t id;
+		uint16_t version;
+		uint32_t features;
+		uint32_t fifo_size;
+		uint32_t reserved;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	memset(&cmd, 0, sizeof(cmd));
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+
+	cmd.id = VERSION_ACK_CMD;
+	cmd.version = version;
+	cmd.features = features;
+
+	glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * set_version() - Activate a negotiated version and feature set
+ * @if_ptr:	The transport to configure.
+ * @version:	The version to use.
+ * @features:	The features to use.
+ *
+ * Return: The supported capabilities of the transport.
+ */
+static uint32_t set_version(struct glink_transport_if *if_ptr, uint32_t version,
+			uint32_t features)
+{
+	struct edge_info *einfo;
+	uint32_t ret;
+	int rcu_id;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return 0;
+	}
+
+	ret = GCAP_SIGNALS;
+	if (features & TRACER_PKT_FEATURE)
+		ret |= GCAP_TRACER_PKT;
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return ret;
+}
+
+/**
+ * tx_cmd_ch_open() - Convert a channel open cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @name:	The channel name to encode.
+ * @req_xprt:	The transport the core would like to migrate this channel to.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_ch_open(struct glink_transport_if *if_ptr, uint32_t lcid,
+			  const char *name, uint16_t req_xprt)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint16_t length;
+		uint16_t req_xprt;
+		uint64_t reserved;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	uint32_t buf_size;
+	void *buf;
+	int rcu_id;
+
+	memset(&cmd, 0, sizeof(cmd));
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	cmd.id = OPEN_CMD;
+	cmd.lcid = lcid;
+	cmd.length = (uint16_t)(strlen(name) + 1);
+	cmd.req_xprt = req_xprt;
+
+	buf_size = ALIGN(sizeof(cmd) + cmd.length, FIFO_ALIGNMENT);
+
+	buf = kzalloc(buf_size, GFP_KERNEL);
+	if (!buf) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -ENOMEM;
+	}
+
+	memcpy(buf, &cmd, sizeof(cmd));
+	memcpy(buf + sizeof(cmd), name, cmd.length);
+
+	glink_spi_xprt_tx_cmd(einfo, buf, buf_size);
+
+	kfree(buf);
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return 0;
+}
+
+/**
+ * tx_cmd_ch_close() - Convert a channel close cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_ch_close(struct glink_transport_if *if_ptr, uint32_t lcid)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t reserved1;
+		uint64_t reserved2;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	memset(&cmd, 0, sizeof(cmd));
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	cmd.id = CLOSE_CMD;
+	cmd.lcid = lcid;
+
+	glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return 0;
+}
+
+/**
+ * tx_cmd_ch_remote_open_ack() - Convert a channel open ack cmd to wire format
+ *				 and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @rcid:	The remote channel id to encode.
+ * @xprt_resp:	The response to a transport migration request.
+ */
+static void tx_cmd_ch_remote_open_ack(struct glink_transport_if *if_ptr,
+				     uint32_t rcid, uint16_t xprt_resp)
+{
+	struct command {
+		uint16_t id;
+		uint16_t rcid;
+		uint16_t reserved1;
+		uint16_t xprt_resp;
+		uint64_t reserved2;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	memset(&cmd, 0, sizeof(cmd));
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+
+	cmd.id = OPEN_ACK_CMD;
+	cmd.rcid = rcid;
+	cmd.xprt_resp = xprt_resp;
+
+	glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * tx_cmd_ch_remote_close_ack() - Convert a channel close ack cmd to wire format
+ *				  and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @rcid:	The remote channel id to encode.
+ */
+static void tx_cmd_ch_remote_close_ack(struct glink_transport_if *if_ptr,
+				       uint32_t rcid)
+{
+	struct command {
+		uint16_t id;
+		uint16_t rcid;
+		uint32_t reserved1;
+		uint64_t reserved2;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	memset(&cmd, 0, sizeof(cmd));
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+
+	cmd.id = CLOSE_ACK_CMD;
+	cmd.rcid = rcid;
+
+	glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * ssr() - Process a subsystem restart notification of a transport
+ * @if_ptr:	The transport to restart
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int ssr(struct glink_transport_if *if_ptr)
+{
+	struct edge_info *einfo;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	einfo->in_ssr = true;
+	wake_up_all(&einfo->tx_blocked_queue);
+
+	synchronize_srcu(&einfo->use_ref);
+	einfo->tx_resume_needed = false;
+	einfo->tx_blocked_signal_sent = false;
+	einfo->tx_fifo_start = 0;
+	einfo->rx_fifo_start = 0;
+	einfo->tx_fifo_write = 0;
+	einfo->rx_fifo_read = 0;
+	einfo->fifo_size = DEFAULT_FIFO_SIZE;
+	einfo->xprt_if.glink_core_if_ptr->link_down(&einfo->xprt_if);
+
+	return 0;
+}
+
+/**
+ * allocate_rx_intent() - Allocate/reserve space for RX Intent
+ * @if_ptr:	The transport the intent is associated with.
+ * @size:	size of intent.
+ * @intent:	Pointer to the intent structure.
+ *
+ * Assign "data" with the buffer created, since the transport creates
+ * a linear buffer and "iovec" with the "intent" itself, so that
+ * the data can be passed to a client that receives only vector buffer.
+ * Note that returning NULL for the pointer is valid (it means that space has
+ * been reserved, but the actual pointer will be provided later).
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int allocate_rx_intent(struct glink_transport_if *if_ptr, size_t size,
+			      struct glink_core_rx_intent *intent)
+{
+	void *t;
+
+	t = kzalloc(size, GFP_KERNEL);
+	if (!t)
+		return -ENOMEM;
+
+	intent->data = t;
+	intent->iovec = (void *)intent;
+	intent->vprovider = rx_linear_vbuf_provider;
+	intent->pprovider = NULL;
+	return 0;
+}
+
+/**
+ * deallocate_rx_intent() - Deallocate space created for RX Intent
+ * @if_ptr:	The transport the intent is associated with.
+ * @intent:	Pointer to the intent structure.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int deallocate_rx_intent(struct glink_transport_if *if_ptr,
+				struct glink_core_rx_intent *intent)
+{
+	if (!intent || !intent->data)
+		return -EINVAL;
+
+	kfree(intent->data);
+	intent->data = NULL;
+	intent->iovec = NULL;
+	intent->vprovider = NULL;
+	return 0;
+}
+
+/**
+ * tx_cmd_local_rx_intent() - Convert an rx intent cmd to wire format and
+ *			      transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @size:	The intent size to encode.
+ * @liid:	The local intent id to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_local_rx_intent(struct glink_transport_if *if_ptr,
+				  uint32_t lcid, size_t size, uint32_t liid)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t count;
+		uint64_t reserved;
+		uint32_t size;
+		uint32_t liid;
+		uint64_t addr;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	if (size > UINT_MAX) {
+		pr_err("%s: size %zu is too large to encode\n", __func__, size);
+		return -EMSGSIZE;
+	}
+
+	memset(&cmd, 0, sizeof(cmd));
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	cmd.id = RX_INTENT_CMD;
+	cmd.lcid = lcid;
+	cmd.count = 1;
+	cmd.size = size;
+	cmd.liid = liid;
+
+	glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return 0;
+}
+
+/**
+ * tx_cmd_local_rx_done() - Convert an rx done cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @liid:	The local intent id to encode.
+ * @reuse:	Reuse the consumed intent.
+ */
+static void tx_cmd_local_rx_done(struct glink_transport_if *if_ptr,
+				 uint32_t lcid, uint32_t liid, bool reuse)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t liid;
+		uint64_t reserved;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	memset(&cmd, 0, sizeof(cmd));
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+
+	cmd.id = reuse ? RX_DONE_W_REUSE_CMD : RX_DONE_CMD;
+	cmd.lcid = lcid;
+	cmd.liid = liid;
+
+	glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * tx_cmd_rx_intent_req() - Convert an rx intent request cmd to wire format and
+ *			    transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @size:	The requested intent size to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_rx_intent_req(struct glink_transport_if *if_ptr,
+				uint32_t lcid, size_t size)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t size;
+		uint64_t reserved;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	if (size > UINT_MAX) {
+		pr_err("%s: size %zu is too large to encode\n", __func__, size);
+		return -EMSGSIZE;
+	}
+
+	memset(&cmd, 0, sizeof(cmd));
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	cmd.id = RX_INTENT_REQ_CMD,
+	cmd.lcid = lcid;
+	cmd.size = size;
+
+	glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return 0;
+}
+
+/**
+ * tx_cmd_rx_intent_req_ack() - Convert an rx intent request ack cmd to wire
+ *				format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @granted:	The request response to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_remote_rx_intent_req_ack(struct glink_transport_if *if_ptr,
+					   uint32_t lcid, bool granted)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t response;
+		uint64_t reserved;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	memset(&cmd, 0, sizeof(cmd));
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	cmd.id = RX_INTENT_REQ_ACK_CMD,
+	cmd.lcid = lcid;
+	if (granted)
+		cmd.response = 1;
+	else
+		cmd.response = 0;
+
+	glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return 0;
+}
+
+/**
+ * tx_cmd_set_sigs() - Convert a signals ack cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @sigs:	The signals to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_set_sigs(struct glink_transport_if *if_ptr, uint32_t lcid,
+			   uint32_t sigs)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t sigs;
+		uint64_t reserved;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	memset(&cmd, 0, sizeof(cmd));
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	cmd.id = SIGNALS_CMD,
+	cmd.lcid = lcid;
+	cmd.sigs = sigs;
+
+	glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return 0;
+}
+
+/**
+ * tx_data() - convert a data/tracer_pkt to wire format and transmit
+ * @if_ptr:     The transport to transmit on.
+ * @cmd_id:     The command ID to transmit.
+ * @lcid:       The local channel id to encode.
+ * @pctx:       The data to encode.
+ *
+ * Return: Number of bytes written or standard Linux error code.
+ */
+static int tx_data(struct glink_transport_if *if_ptr, uint16_t cmd_id,
+		   uint32_t lcid, struct glink_core_tx_pkt *pctx)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t riid;
+		uint64_t reserved;
+		uint32_t size;
+		uint32_t size_left;
+		uint64_t addr;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	uint32_t size;
+	void *data_start, *dst = NULL;
+	size_t tx_size = 0;
+	int rcu_id;
+
+	if (pctx->size < pctx->size_remaining) {
+		GLINK_ERR("%s: size remaining exceeds size.  Resetting.\n",
+			  __func__);
+		pctx->size_remaining = pctx->size;
+	}
+	if (!pctx->size_remaining)
+		return 0;
+
+	memset(&cmd, 0, sizeof(cmd));
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	if (cmd_id == TX_DATA_CMD) {
+		if (pctx->size_remaining == pctx->size)
+			cmd.id = TX_DATA_CMD;
+		else
+			cmd.id = TX_DATA_CONT_CMD;
+	} else {
+		if (pctx->size_remaining == pctx->size)
+			cmd.id = TRACER_PKT_CMD;
+		else
+			cmd.id = TRACER_PKT_CONT_CMD;
+	}
+	cmd.lcid = lcid;
+	cmd.riid = pctx->riid;
+	data_start = get_tx_vaddr(pctx, pctx->size - pctx->size_remaining,
+				  &tx_size);
+	if (unlikely(!data_start)) {
+		GLINK_ERR("%s: invalid data_start\n", __func__);
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EINVAL;
+	}
+	if (tx_size & (XPRT_ALIGNMENT - 1))
+		tx_size = ALIGN(tx_size - SHORT_PKT_SIZE, XPRT_ALIGNMENT);
+	if (likely(pctx->cookie))
+		dst = pctx->cookie + (pctx->size - pctx->size_remaining);
+
+	mutex_lock(&einfo->write_lock);
+	size = glink_spi_xprt_write_avail(einfo);
+	/* Need enough space to write the command */
+	if (size <= sizeof(cmd)) {
+		einfo->tx_resume_needed = true;
+		mutex_unlock(&einfo->write_lock);
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EAGAIN;
+	}
+	cmd.addr = 0;
+	cmd.size = tx_size;
+	pctx->size_remaining -= tx_size;
+	cmd.size_left = pctx->size_remaining;
+	if (cmd.id == TRACER_PKT_CMD)
+		tracer_pkt_log_event((void *)(pctx->data), GLINK_XPRT_TX);
+
+	if (!strcmp(einfo->xprt_cfg.edge, "wdsp"))
+		wdsp_resume(&einfo->cmpnt);
+	glink_spi_xprt_tx_data(einfo, data_start, dst, tx_size);
+	glink_spi_xprt_tx_cmd_safe(einfo, &cmd, sizeof(cmd));
+	GLINK_DBG("%s %s: lcid[%u] riid[%u] cmd[%d], size[%d], size_left[%d]\n",
+		  "<SPI>", __func__, cmd.lcid, cmd.riid, cmd.id, cmd.size,
+		  cmd.size_left);
+	mutex_unlock(&einfo->write_lock);
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return cmd.size;
+}
+
+/**
+ * tx_short_data() - Tansmit a short packet in band along with command
+ * @if_ptr:     The transport to transmit on.
+ * @cmd_id:     The command ID to transmit.
+ * @lcid:       The local channel id to encode.
+ * @pctx:       The data to encode.
+ *
+ * Return: Number of bytes written or standard Linux error code.
+ */
+static int tx_short_data(struct glink_transport_if *if_ptr,
+			 uint32_t lcid, struct glink_core_tx_pkt *pctx)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t riid;
+		uint32_t size;
+		uint32_t size_left;
+		unsigned char data[SHORT_PKT_SIZE];
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	uint32_t size;
+	void *data_start;
+	size_t tx_size = 0;
+	int rcu_id;
+
+	if (pctx->size < pctx->size_remaining) {
+		GLINK_ERR("%s: size remaining exceeds size.  Resetting.\n",
+			  __func__);
+		pctx->size_remaining = pctx->size;
+	}
+	if (!pctx->size_remaining)
+		return 0;
+
+	memset(&cmd, 0, sizeof(cmd));
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	cmd.id = TX_SHORT_DATA_CMD;
+	cmd.lcid = lcid;
+	cmd.riid = pctx->riid;
+	data_start = get_tx_vaddr(pctx, pctx->size - pctx->size_remaining,
+				  &tx_size);
+	if (unlikely(!data_start || tx_size > SHORT_PKT_SIZE)) {
+		GLINK_ERR("%s: invalid data_start %p or tx_size %zu\n",
+			  __func__, data_start, tx_size);
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EINVAL;
+	}
+
+	mutex_lock(&einfo->write_lock);
+	size = glink_spi_xprt_write_avail(einfo);
+	/* Need enough space to write the command */
+	if (size <= sizeof(cmd)) {
+		einfo->tx_resume_needed = true;
+		mutex_unlock(&einfo->write_lock);
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EAGAIN;
+	}
+	cmd.size = tx_size;
+	pctx->size_remaining -= tx_size;
+	cmd.size_left = pctx->size_remaining;
+	memcpy(cmd.data, data_start, tx_size);
+	if (!strcmp(einfo->xprt_cfg.edge, "wdsp"))
+		wdsp_resume(&einfo->cmpnt);
+	glink_spi_xprt_tx_cmd_safe(einfo, &cmd, sizeof(cmd));
+	GLINK_DBG("%s %s: lcid[%u] riid[%u] cmd[%d], size[%d], size_left[%d]\n",
+		  "<SPI>", __func__, cmd.lcid, cmd.riid, cmd.id, cmd.size,
+		  cmd.size_left);
+	mutex_unlock(&einfo->write_lock);
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return cmd.size;
+}
+
+/**
+ * tx() - convert a data transmit cmd to wire format and transmit
+ * @if_ptr:     The transport to transmit on.
+ * @lcid:       The local channel id to encode.
+ * @pctx:       The data to encode.
+ *
+ * Return: Number of bytes written or standard Linux error code.
+ */
+static int tx(struct glink_transport_if *if_ptr, uint32_t lcid,
+	      struct glink_core_tx_pkt *pctx)
+{
+	if (pctx->size_remaining <= SHORT_PKT_SIZE)
+		return tx_short_data(if_ptr, lcid, pctx);
+	return tx_data(if_ptr, TX_DATA_CMD, lcid, pctx);
+}
+
+/**
+ * tx_cmd_tracer_pkt() - convert a tracer packet cmd to wire format and transmit
+ * @if_ptr:     The transport to transmit on.
+ * @lcid:       The local channel id to encode.
+ * @pctx:       The data to encode.
+ *
+ * Return: Number of bytes written or standard Linux error code.
+ */
+static int tx_cmd_tracer_pkt(struct glink_transport_if *if_ptr, uint32_t lcid,
+			     struct glink_core_tx_pkt *pctx)
+{
+	return tx_data(if_ptr, TRACER_PKT_CMD, lcid, pctx);
+}
+
+/**
+ * int wait_link_down() - Check status of read/write indices
+ * @if_ptr:     The transport to check
+ *
+ * Return: 1 if indices are all zero, 0 otherwise
+ */
+static int wait_link_down(struct glink_transport_if *if_ptr)
+{
+	return 0;
+}
+
+/**
+ * get_power_vote_ramp_time() - Get the ramp time required for the power
+ *                              votes to be applied
+ * @if_ptr:     The transport interface on which power voting is requested.
+ * @state:      The power state for which ramp time is required.
+ *
+ * Return: The ramp time specific to the power state, standard error otherwise.
+ */
+static unsigned long get_power_vote_ramp_time(
+		struct glink_transport_if *if_ptr, uint32_t state)
+{
+	return 0;
+}
+
+/**
+ * power_vote() - Update the power votes to meet qos requirement
+ * @if_ptr:     The transport interface on which power voting is requested.
+ * @state:      The power state for which the voting should be done.
+ *
+ * Return: 0 on Success, standard error otherwise.
+ */
+static int power_vote(struct glink_transport_if *if_ptr, uint32_t state)
+{
+	unsigned long flags;
+	struct edge_info *einfo;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+	spin_lock_irqsave(&einfo->activity_lock, flags);
+	einfo->activity_flag |= ACTIVE_TX;
+	spin_unlock_irqrestore(&einfo->activity_lock, flags);
+	return 0;
+}
+
+/**
+ * power_unvote() - Remove the all the power votes
+ * @if_ptr:     The transport interface on which power voting is requested.
+ *
+ * Return: 0 on Success, standard error otherwise.
+ */
+static int power_unvote(struct glink_transport_if *if_ptr)
+{
+	unsigned long flags;
+	struct edge_info *einfo;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+	spin_lock_irqsave(&einfo->activity_lock, flags);
+	einfo->activity_flag &= ~ACTIVE_TX;
+	spin_unlock_irqrestore(&einfo->activity_lock, flags);
+	return 0;
+}
+
+static int glink_wdsp_cmpnt_init(struct device *dev, void *priv_data)
+{
+	return 0;
+}
+
+static int glink_wdsp_cmpnt_deinit(struct device *dev, void *priv_data)
+{
+	return 0;
+}
+
+static int glink_wdsp_cmpnt_event_handler(struct device *dev,
+		void *priv_data, enum wdsp_event_type event, void *data)
+{
+	struct edge_info *einfo = dev_get_drvdata(dev);
+	struct glink_cmpnt *cmpnt = &einfo->cmpnt;
+	struct device *sdev;
+	struct spi_device *spi_dev;
+
+	switch (event) {
+	case WDSP_EVENT_PRE_BOOTUP:
+		if (cmpnt && cmpnt->master_dev &&
+		    cmpnt->master_ops &&
+		    cmpnt->master_ops->get_dev_for_cmpnt)
+			sdev = cmpnt->master_ops->get_dev_for_cmpnt(
+				cmpnt->master_dev, WDSP_CMPNT_TRANSPORT);
+		else
+			sdev = NULL;
+
+		if (!sdev) {
+			dev_err(dev, "%s: Failed to get transport device\n",
+				__func__);
+			break;
+		}
+
+		spi_dev = to_spi_device(sdev);
+		einfo->spi_dev = spi_dev;
+		break;
+	case WDSP_EVENT_POST_BOOTUP:
+		einfo->in_ssr = false;
+		synchronize_srcu(&einfo->use_ref);
+		/* No break here to trigger fake rx_worker */
+	case WDSP_EVENT_IPC1_INTR:
+		queue_kthread_work(&einfo->kworker, &einfo->kwork);
+		break;
+	case WDSP_EVENT_PRE_SHUTDOWN:
+		ssr(&einfo->xprt_if);
+		break;
+	default:
+		pr_debug("%s: unhandled event %d", __func__, event);
+		break;
+	}
+
+	return 0;
+}
+
+/* glink_wdsp_cmpnt_ops - Callback operations registered wtih WDSP framework */
+static struct wdsp_cmpnt_ops glink_wdsp_cmpnt_ops = {
+	.init = glink_wdsp_cmpnt_init,
+	.deinit = glink_wdsp_cmpnt_deinit,
+	.event_handler = glink_wdsp_cmpnt_event_handler,
+};
+
+static int glink_component_bind(struct device *dev, struct device *master,
+				void *data)
+{
+	struct edge_info *einfo = dev_get_drvdata(dev);
+	struct glink_cmpnt *cmpnt = &einfo->cmpnt;
+	int ret = 0;
+
+	cmpnt->master_dev = master;
+	cmpnt->master_ops = data;
+
+	if (cmpnt->master_ops && cmpnt->master_ops->register_cmpnt_ops)
+		ret = cmpnt->master_ops->register_cmpnt_ops(master, dev, einfo,
+							&glink_wdsp_cmpnt_ops);
+	else
+		ret = -EINVAL;
+
+	if (ret)
+		dev_err(dev, "%s: register_cmpnt_ops failed, err = %d\n",
+			__func__, ret);
+	return ret;
+}
+
+static void glink_component_unbind(struct device *dev, struct device *master,
+				   void *data)
+{
+	struct edge_info *einfo = dev_get_drvdata(dev);
+	struct glink_cmpnt *cmpnt = &einfo->cmpnt;
+
+	cmpnt->master_dev = NULL;
+	cmpnt->master_ops = NULL;
+}
+
+static const struct component_ops glink_component_ops = {
+	.bind = glink_component_bind,
+	.unbind = glink_component_unbind,
+};
+
+/**
+ * init_xprt_if() - Initialize the xprt_if for an edge
+ * @einfo:	The edge to initialize.
+ */
+static void init_xprt_if(struct edge_info *einfo)
+{
+	einfo->xprt_if.tx_cmd_version = tx_cmd_version;
+	einfo->xprt_if.tx_cmd_version_ack = tx_cmd_version_ack;
+	einfo->xprt_if.set_version = set_version;
+	einfo->xprt_if.tx_cmd_ch_open = tx_cmd_ch_open;
+	einfo->xprt_if.tx_cmd_ch_close = tx_cmd_ch_close;
+	einfo->xprt_if.tx_cmd_ch_remote_open_ack = tx_cmd_ch_remote_open_ack;
+	einfo->xprt_if.tx_cmd_ch_remote_close_ack = tx_cmd_ch_remote_close_ack;
+	einfo->xprt_if.ssr = ssr;
+	einfo->xprt_if.allocate_rx_intent = allocate_rx_intent;
+	einfo->xprt_if.deallocate_rx_intent = deallocate_rx_intent;
+	einfo->xprt_if.tx_cmd_local_rx_intent = tx_cmd_local_rx_intent;
+	einfo->xprt_if.tx_cmd_local_rx_done = tx_cmd_local_rx_done;
+	einfo->xprt_if.tx = tx;
+	einfo->xprt_if.tx_cmd_rx_intent_req = tx_cmd_rx_intent_req;
+	einfo->xprt_if.tx_cmd_remote_rx_intent_req_ack =
+						tx_cmd_remote_rx_intent_req_ack;
+	einfo->xprt_if.tx_cmd_set_sigs = tx_cmd_set_sigs;
+	einfo->xprt_if.wait_link_down = wait_link_down;
+	einfo->xprt_if.tx_cmd_tracer_pkt = tx_cmd_tracer_pkt;
+	einfo->xprt_if.get_power_vote_ramp_time = get_power_vote_ramp_time;
+	einfo->xprt_if.power_vote = power_vote;
+	einfo->xprt_if.power_unvote = power_unvote;
+}
+
+/**
+ * init_xprt_cfg() - Initialize the xprt_cfg for an edge
+ * @einfo:	The edge to initialize.
+ * @name:	The name of the remote side this edge communicates to.
+ */
+static void init_xprt_cfg(struct edge_info *einfo, const char *name)
+{
+	einfo->xprt_cfg.name = XPRT_NAME;
+	einfo->xprt_cfg.edge = name;
+	einfo->xprt_cfg.versions = versions;
+	einfo->xprt_cfg.versions_entries = ARRAY_SIZE(versions);
+	einfo->xprt_cfg.max_cid = SZ_64K;
+	einfo->xprt_cfg.max_iid = SZ_2G;
+}
+
+/**
+ * parse_qos_dt_params() - Parse the power states from DT
+ * @dev:	Reference to the platform device for a specific edge.
+ * @einfo:	Edge information for the edge probe function is called.
+ *
+ * Return: 0 on success, standard error code otherwise.
+ */
+static int parse_qos_dt_params(struct device_node *node,
+				struct edge_info *einfo)
+{
+	int rc;
+	int i;
+	char *key;
+	uint32_t *arr32;
+	uint32_t num_states;
+
+	key = "qcom,ramp-time";
+	if (!of_find_property(node, key, &num_states))
+		return -ENODEV;
+
+	num_states /= sizeof(uint32_t);
+
+	einfo->num_pw_states = num_states;
+
+	arr32 = kmalloc_array(num_states, sizeof(uint32_t), GFP_KERNEL);
+	if (!arr32)
+		return -ENOMEM;
+
+	einfo->ramp_time_us = kmalloc_array(num_states, sizeof(unsigned long),
+					GFP_KERNEL);
+	if (!einfo->ramp_time_us) {
+		rc = -ENOMEM;
+		goto mem_alloc_fail;
+	}
+
+	rc = of_property_read_u32_array(node, key, arr32, num_states);
+	if (rc) {
+		rc = -ENODEV;
+		goto invalid_key;
+	}
+	for (i = 0; i < num_states; i++)
+		einfo->ramp_time_us[i] = arr32[i];
+
+	kfree(arr32);
+	return 0;
+
+invalid_key:
+	kfree(einfo->ramp_time_us);
+mem_alloc_fail:
+	kfree(arr32);
+	return rc;
+}
+
+/**
+ * parse_qos_dt_params() - Parse any remote FIFO configuration
+ * @node:	Reference to the platform device for a specific edge.
+ * @einfo:	Edge information for the edge probe function is called.
+ *
+ * Return: 0 on success, standard error code otherwise.
+ */
+static int parse_remote_fifo_cfg(struct device_node *node,
+				 struct edge_info *einfo)
+{
+	int rc;
+	char *key;
+
+	key = "qcom,out-read-idx-reg";
+	rc = of_property_read_u32(node, key, &einfo->tx_fifo_read_reg_addr);
+	if (rc)
+		goto key_error;
+
+	key = "qcom,out-write-idx-reg";
+	rc = of_property_read_u32(node, key, &einfo->tx_fifo_write_reg_addr);
+	if (rc)
+		goto key_error;
+
+	key = "qcom,in-read-idx-reg";
+	rc = of_property_read_u32(node, key, &einfo->rx_fifo_read_reg_addr);
+	if (rc)
+		goto key_error;
+
+	key = "qcom,in-write-idx-reg";
+	rc = of_property_read_u32(node, key, &einfo->rx_fifo_write_reg_addr);
+	if (rc)
+		goto key_error;
+	return 0;
+
+key_error:
+	pr_err("%s: Error %d parsing key %s\n", __func__, rc, key);
+	return rc;
+}
+
+static int glink_spi_probe(struct platform_device *pdev)
+{
+	struct device_node *node;
+	struct device_node *phandle_node;
+	struct edge_info *einfo;
+	int rc;
+	char *key;
+	const char *subsys_name;
+	unsigned long flags;
+
+	node = pdev->dev.of_node;
+
+	einfo = kzalloc(sizeof(*einfo), GFP_KERNEL);
+	if (!einfo) {
+		rc = -ENOMEM;
+		goto edge_info_alloc_fail;
+	}
+
+	key = "label";
+	subsys_name = of_get_property(node, key, NULL);
+	if (!subsys_name) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+	strlcpy(einfo->subsys_name, subsys_name, sizeof(einfo->subsys_name));
+
+	init_xprt_cfg(einfo, subsys_name);
+	init_xprt_if(einfo);
+
+	einfo->fifo_size = DEFAULT_FIFO_SIZE;
+	init_kthread_work(&einfo->kwork, rx_worker);
+	init_kthread_worker(&einfo->kworker);
+	init_srcu_struct(&einfo->use_ref);
+	mutex_init(&einfo->write_lock);
+	init_waitqueue_head(&einfo->tx_blocked_queue);
+	spin_lock_init(&einfo->activity_lock);
+
+	spin_lock_irqsave(&edge_infos_lock, flags);
+	list_add_tail(&einfo->list, &edge_infos);
+	spin_unlock_irqrestore(&edge_infos_lock, flags);
+
+	einfo->task = kthread_run(kthread_worker_fn, &einfo->kworker,
+				  "spi_%s", subsys_name);
+	if (IS_ERR(einfo->task)) {
+		rc = PTR_ERR(einfo->task);
+		pr_err("%s: kthread run failed %d\n", __func__, rc);
+		goto kthread_fail;
+	}
+
+	key = "qcom,remote-fifo-config";
+	phandle_node = of_parse_phandle(node, key, 0);
+	if (phandle_node)
+		parse_remote_fifo_cfg(phandle_node, einfo);
+
+	key = "qcom,qos-config";
+	phandle_node = of_parse_phandle(node, key, 0);
+	if (phandle_node && !(of_get_glink_core_qos_cfg(phandle_node,
+							&einfo->xprt_cfg)))
+		parse_qos_dt_params(node, einfo);
+
+	rc = glink_core_register_transport(&einfo->xprt_if, &einfo->xprt_cfg);
+	if (rc == -EPROBE_DEFER)
+		goto reg_xprt_fail;
+	if (rc) {
+		pr_err("%s: glink core register transport failed: %d\n",
+			__func__, rc);
+		goto reg_xprt_fail;
+	}
+
+	dev_set_drvdata(&pdev->dev, einfo);
+	if (!strcmp(einfo->xprt_cfg.edge, "wdsp")) {
+		rc = component_add(&pdev->dev, &glink_component_ops);
+		if (rc) {
+			pr_err("%s: component_add failed, err = %d\n",
+				__func__, rc);
+			rc = -ENODEV;
+			goto reg_cmpnt_fail;
+		}
+	}
+	return 0;
+
+reg_cmpnt_fail:
+	dev_set_drvdata(&pdev->dev, NULL);
+	glink_core_unregister_transport(&einfo->xprt_if);
+reg_xprt_fail:
+	flush_kthread_worker(&einfo->kworker);
+	kthread_stop(einfo->task);
+	einfo->task = NULL;
+kthread_fail:
+	spin_lock_irqsave(&edge_infos_lock, flags);
+	list_del(&einfo->list);
+	spin_unlock_irqrestore(&edge_infos_lock, flags);
+missing_key:
+	kfree(einfo);
+edge_info_alloc_fail:
+	return rc;
+}
+
+static int glink_spi_remove(struct platform_device *pdev)
+{
+	struct edge_info *einfo;
+	unsigned long flags;
+
+	einfo = (struct edge_info *)dev_get_drvdata(&pdev->dev);
+	glink_core_unregister_transport(&einfo->xprt_if);
+	flush_kthread_worker(&einfo->kworker);
+	kthread_stop(einfo->task);
+	einfo->task = NULL;
+	spin_lock_irqsave(&edge_infos_lock, flags);
+	list_del(&einfo->list);
+	spin_unlock_irqrestore(&edge_infos_lock, flags);
+	kfree(einfo);
+	return 0;
+}
+
+static int glink_spi_resume(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static int glink_spi_suspend(struct platform_device *pdev,
+				   pm_message_t state)
+{
+	unsigned long flags;
+	struct edge_info *einfo;
+	bool suspend;
+	int rc = -EBUSY;
+
+	einfo = (struct edge_info *)dev_get_drvdata(&pdev->dev);
+	if (strcmp(einfo->xprt_cfg.edge, "wdsp"))
+		return 0;
+
+	spin_lock_irqsave(&einfo->activity_lock, flags);
+	suspend = !(einfo->activity_flag);
+	spin_unlock_irqrestore(&einfo->activity_lock, flags);
+	if (suspend)
+		rc = wdsp_suspend(&einfo->cmpnt);
+	if (rc < 0)
+		pr_err("%s: Could not suspend activity_flag %d, rc %d\n",
+			__func__, einfo->activity_flag, rc);
+	return rc;
+}
+
+static const struct of_device_id spi_match_table[] = {
+	{ .compatible = "qcom,glink-spi-xprt" },
+	{},
+};
+
+static struct platform_driver glink_spi_driver = {
+	.probe = glink_spi_probe,
+	.remove = glink_spi_remove,
+	.resume = glink_spi_resume,
+	.suspend = glink_spi_suspend,
+	.driver = {
+		.name = "msm_glink_spi_xprt",
+		.owner = THIS_MODULE,
+		.of_match_table = spi_match_table,
+	},
+};
+
+static int __init glink_spi_xprt_init(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&glink_spi_driver);
+	if (rc)
+		pr_err("%s: glink_spi register failed %d\n", __func__, rc);
+
+	return rc;
+}
+module_init(glink_spi_xprt_init);
+
+static void __exit glink_spi_xprt_exit(void)
+{
+	platform_driver_unregister(&glink_spi_driver);
+}
+module_exit(glink_spi_xprt_exit);
+
+MODULE_DESCRIPTION("MSM G-Link SPI Transport");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/glink_ssr.c	2019-10-29 09:26:24.809214589 +0100
@@ -0,0 +1,1079 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/err.h>
+#include <linux/ipc_logging.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/random.h>
+#include <soc/qcom/glink.h>
+#include <soc/qcom/subsystem_notif.h>
+#include "glink_private.h"
+
+#define GLINK_SSR_REPLY_TIMEOUT	HZ
+#define GLINK_SSR_INTENT_REQ_TIMEOUT_MS 500
+#define GLINK_SSR_EVENT_INIT ~0
+#define NUM_LOG_PAGES 3
+
+#define GLINK_SSR_LOG(x...) do { \
+	if (glink_ssr_log_ctx) \
+		ipc_log_string(glink_ssr_log_ctx, x); \
+} while (0)
+
+#define GLINK_SSR_ERR(x...) do { \
+	pr_err(x); \
+	GLINK_SSR_LOG(x); \
+} while (0)
+
+static void *glink_ssr_log_ctx;
+
+/* Global restart counter */
+static uint32_t sequence_number;
+
+/* Flag indicating if responses were received for all SSR notifications */
+static bool notifications_successful;
+
+/* Completion for setting notifications_successful */
+struct completion notifications_successful_complete;
+
+/**
+ * struct restart_notifier_block - restart notifier wrapper structure
+ * subsystem:	the name of the subsystem as recognized by the SSR framework
+ * nb:		notifier block structure used by the SSR framework
+ */
+struct restart_notifier_block {
+	const char *subsystem;
+	struct notifier_block nb;
+};
+
+/**
+ * struct configure_and_open_ch_work - Work structure for used for opening
+ *				glink_ssr channels
+ * edge:	The G-Link edge obtained from the link state callback
+ * transport:	The G-Link transport obtained from the link state callback
+ * link_state:	The link state obtained from the link state callback
+ * ss_info:	Subsystem information structure containing the info for this
+ *		callback
+ * work:	Work structure
+ */
+struct configure_and_open_ch_work {
+	char edge[GLINK_NAME_SIZE];
+	char transport[GLINK_NAME_SIZE];
+	enum glink_link_state link_state;
+	struct subsys_info *ss_info;
+	struct work_struct work;
+};
+
+/**
+ * struct rx_done_ch_work - Work structure used for sending rx_done on
+ *				glink_ssr channels
+ * handle:	G-Link channel handle to be used for sending rx_done
+ * ptr:		Intent pointer data provided in notify rx function
+ * work:	Work structure
+ */
+struct rx_done_ch_work {
+	void *handle;
+	const void *ptr;
+	struct work_struct work;
+};
+
+/**
+ * struct close_ch_work - Work structure for used for closing glink_ssr channels
+ * edge:	The G-Link edge name for the channel being closed
+ * handle:	G-Link channel handle to be closed
+ * work:	Work structure
+ */
+struct close_ch_work {
+	char edge[GLINK_NAME_SIZE];
+	void *handle;
+	struct work_struct work;
+};
+
+static int glink_ssr_restart_notifier_cb(struct notifier_block *this,
+				  unsigned long code,
+				  void *data);
+static void delete_ss_info_notify_list(struct subsys_info *ss_info);
+static int configure_and_open_channel(struct subsys_info *ss_info);
+static struct workqueue_struct *glink_ssr_wq;
+
+static LIST_HEAD(subsystem_list);
+static atomic_t responses_remaining = ATOMIC_INIT(0);
+static wait_queue_head_t waitqueue;
+
+/**
+ * cb_data_release() - Free cb_data and set to NULL
+ * @kref_ptr:	pointer to kref.
+ *
+ * This function releses cb_data.
+ */
+static inline void cb_data_release(struct kref *kref_ptr)
+{
+	struct ssr_notify_data *cb_data;
+
+	cb_data = container_of(kref_ptr, struct ssr_notify_data, cb_kref);
+	kfree(cb_data);
+}
+
+/**
+ * check_and_get_cb_data() - Try to get reference to kref of cb_data
+ * @ss_info:	pointer to subsystem info structure.
+ *
+ * Return: NULL is cb_data is NULL, pointer to cb_data otherwise
+ */
+static struct ssr_notify_data *check_and_get_cb_data(
+					struct subsys_info *ss_info)
+{
+	struct ssr_notify_data *cb_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ss_info->cb_lock, flags);
+	if (ss_info->cb_data == NULL) {
+		GLINK_SSR_LOG("<SSR> %s: cb_data is NULL\n", __func__);
+		spin_unlock_irqrestore(&ss_info->cb_lock, flags);
+		return 0;
+	}
+	kref_get(&ss_info->cb_data->cb_kref);
+	cb_data = ss_info->cb_data;
+	spin_unlock_irqrestore(&ss_info->cb_lock, flags);
+	return cb_data;
+}
+
+static void rx_done_cb_worker(struct work_struct *work)
+{
+	struct rx_done_ch_work *rx_done_work =
+		container_of(work, struct rx_done_ch_work, work);
+
+	glink_rx_done(rx_done_work->handle, rx_done_work->ptr, false);
+	kfree(rx_done_work);
+}
+
+static void link_state_cb_worker(struct work_struct *work)
+{
+	unsigned long flags;
+	struct configure_and_open_ch_work *ch_open_work =
+		container_of(work, struct configure_and_open_ch_work, work);
+	struct subsys_info *ss_info = ch_open_work->ss_info;
+
+	GLINK_SSR_LOG("<SSR> %s: LINK STATE[%d] %s:%s\n", __func__,
+			ch_open_work->link_state, ch_open_work->edge,
+			ch_open_work->transport);
+
+	if (ss_info && ch_open_work->link_state == GLINK_LINK_STATE_UP) {
+		spin_lock_irqsave(&ss_info->link_up_lock, flags);
+		if (!ss_info->link_up) {
+			ss_info->link_up = true;
+			spin_unlock_irqrestore(&ss_info->link_up_lock, flags);
+			if (!configure_and_open_channel(ss_info)) {
+				glink_unregister_link_state_cb(
+						ss_info->link_state_handle);
+				ss_info->link_state_handle = NULL;
+			}
+			kfree(ch_open_work);
+			return;
+		}
+		spin_unlock_irqrestore(&ss_info->link_up_lock, flags);
+	} else {
+		if (ss_info) {
+			spin_lock_irqsave(&ss_info->link_up_lock, flags);
+			ss_info->link_up = false;
+			spin_unlock_irqrestore(&ss_info->link_up_lock, flags);
+			ss_info->handle = NULL;
+		} else {
+			GLINK_SSR_ERR("<SSR> %s: ss_info is NULL\n", __func__);
+		}
+	}
+
+	kfree(ch_open_work);
+}
+
+/**
+ * glink_lbsrv_link_state_cb() - Callback to receive link state updates
+ * @cb_info:	Information containing link & its state.
+ * @priv:	Private data passed during the link state registration.
+ *
+ * This function is called by the G-Link core to notify the glink_ssr module
+ * regarding the link state updates. This function is registered with the
+ * G-Link core by the loopback server during glink_register_link_state_cb().
+ */
+static void glink_ssr_link_state_cb(struct glink_link_state_cb_info *cb_info,
+				      void *priv)
+{
+	struct subsys_info *ss_info;
+	struct configure_and_open_ch_work *open_ch_work;
+
+	if (!cb_info) {
+		GLINK_SSR_ERR("<SSR> %s: Missing cb_data\n", __func__);
+		return;
+	}
+
+	ss_info = get_info_for_edge(cb_info->edge);
+
+	open_ch_work = kmalloc(sizeof(*open_ch_work), GFP_KERNEL);
+	if (!open_ch_work) {
+		GLINK_SSR_ERR("<SSR> %s: Could not allocate open_ch_work\n",
+				__func__);
+		return;
+	}
+
+	strlcpy(open_ch_work->edge, cb_info->edge, GLINK_NAME_SIZE);
+	strlcpy(open_ch_work->transport, cb_info->transport, GLINK_NAME_SIZE);
+	open_ch_work->link_state = cb_info->link_state;
+	open_ch_work->ss_info = ss_info;
+
+	INIT_WORK(&open_ch_work->work, link_state_cb_worker);
+	queue_work(glink_ssr_wq, &open_ch_work->work);
+}
+
+/**
+ * glink_ssr_notify_rx() - RX Notification callback
+ * @handle:	G-Link channel handle
+ * @priv:	Private callback data
+ * @pkt_priv:	Private packet data
+ * @ptr:	Pointer to the data received
+ * @size:	Size of the data received
+ *
+ * This function is a notification callback from the G-Link core that data
+ * has been received from the remote side. This data is validate to make
+ * sure it is a cleanup_done message and is processed accordingly if it is.
+ */
+void glink_ssr_notify_rx(void *handle, const void *priv, const void *pkt_priv,
+		const void *ptr, size_t size)
+{
+	struct do_cleanup_msg *do_cleanup_data =
+				(struct do_cleanup_msg *)pkt_priv;
+	struct ssr_notify_data *cb_data = (struct ssr_notify_data *)priv;
+	struct cleanup_done_msg *resp = (struct cleanup_done_msg *)ptr;
+	struct rx_done_ch_work *rx_done_work;
+
+	rx_done_work = kmalloc(sizeof(*rx_done_work), GFP_ATOMIC);
+	if (!rx_done_work) {
+		GLINK_SSR_ERR("<SSR> %s: Could not allocate rx_done_work\n",
+				__func__);
+		return;
+	}
+	if (unlikely(!do_cleanup_data))
+		goto missing_do_cleanup_data;
+	if (unlikely(!cb_data))
+		goto missing_cb_data;
+	if (unlikely(!resp))
+		goto missing_response;
+	if (unlikely(resp->version != do_cleanup_data->version))
+		goto version_mismatch;
+	if (unlikely(resp->seq_num != do_cleanup_data->seq_num))
+		goto invalid_seq_number;
+	if (unlikely(resp->response != GLINK_SSR_CLEANUP_DONE))
+		goto wrong_response;
+
+	cb_data->responded = true;
+	atomic_dec(&responses_remaining);
+
+	GLINK_SSR_LOG(
+		"<SSR> %s: Response from %s resp[%d] version[%d] seq_num[%d] restarted[%s]\n",
+			__func__, cb_data->edge, resp->response,
+			resp->version, resp->seq_num,
+			do_cleanup_data->name);
+
+	kfree(do_cleanup_data);
+	rx_done_work->ptr = ptr;
+	rx_done_work->handle = handle;
+	INIT_WORK(&rx_done_work->work, rx_done_cb_worker);
+	queue_work(glink_ssr_wq, &rx_done_work->work);
+	wake_up(&waitqueue);
+	return;
+
+missing_cb_data:
+	panic("%s: Missing cb_data!\n", __func__);
+	return;
+missing_do_cleanup_data:
+	panic("%s: Missing do_cleanup data!\n", __func__);
+	return;
+missing_response:
+	GLINK_SSR_ERR("<SSR> %s: Missing response data\n", __func__);
+	return;
+version_mismatch:
+	GLINK_SSR_ERR("<SSR> %s: Version mismatch. %s[%d], %s[%d]\n", __func__,
+			"do_cleanup version", do_cleanup_data->version,
+			"cleanup_done version", resp->version);
+	return;
+invalid_seq_number:
+	GLINK_SSR_ERR("<SSR> %s: Invalid seq. number. %s[%d], %s[%d]\n",
+			__func__, "do_cleanup seq num",
+			do_cleanup_data->seq_num,
+			"cleanup_done seq_num", resp->seq_num);
+	return;
+wrong_response:
+	GLINK_SSR_ERR("<SSR> %s: Not a cleaup_done message. %s[%d]\n", __func__,
+			"cleanup_done response", resp->response);
+	return;
+}
+
+/**
+ * glink_ssr_notify_tx_done() - Transmit finished notification callback
+ * @handle:	G-Link channel handle
+ * @priv:	Private callback data
+ * @pkt_priv:	Private packet data
+ * @ptr:	Pointer to the data received
+ *
+ * This function is a notification callback from the G-Link core that data
+ * we sent has finished transmitting.
+ */
+void glink_ssr_notify_tx_done(void *handle, const void *priv,
+		const void *pkt_priv, const void *ptr)
+{
+	struct ssr_notify_data *cb_data = (struct ssr_notify_data *)priv;
+
+	if (unlikely(!cb_data)) {
+		panic("%s: cb_data is NULL!\n", __func__);
+		return;
+	}
+
+	GLINK_SSR_LOG("<SSR> %s: Notified %s of restart\n",
+		__func__, cb_data->edge);
+
+	cb_data->tx_done = true;
+}
+
+void close_ch_worker(struct work_struct *work)
+{
+	unsigned long flags;
+	void *link_state_handle;
+	struct subsys_info *ss_info;
+	struct close_ch_work *close_work =
+		container_of(work, struct close_ch_work, work);
+
+	glink_close(close_work->handle);
+
+	ss_info = get_info_for_edge(close_work->edge);
+	BUG_ON(!ss_info);
+
+	spin_lock_irqsave(&ss_info->link_up_lock, flags);
+	ss_info->link_up = false;
+	spin_unlock_irqrestore(&ss_info->link_up_lock, flags);
+
+	BUG_ON(ss_info->link_state_handle != NULL);
+	link_state_handle = glink_register_link_state_cb(ss_info->link_info,
+			NULL);
+
+	if (IS_ERR_OR_NULL(link_state_handle))
+		GLINK_SSR_ERR("<SSR> %s: %s, ret[%d]\n", __func__,
+				"Couldn't register link state cb",
+				(int)PTR_ERR(link_state_handle));
+	else
+		ss_info->link_state_handle = link_state_handle;
+
+	BUG_ON(!ss_info->cb_data);
+	spin_lock_irqsave(&ss_info->cb_lock, flags);
+	kref_put(&ss_info->cb_data->cb_kref, cb_data_release);
+	ss_info->cb_data = NULL;
+	spin_unlock_irqrestore(&ss_info->cb_lock, flags);
+	kfree(close_work);
+}
+
+/**
+ * glink_ssr_notify_state() - Channel state notification callback
+ * @handle:	G-Link channel handle
+ * @priv:	Private callback data
+ * @event:	The state that has been transitioned to
+ *
+ * This function is a notification callback from the G-Link core that the
+ * channel state has changed.
+ */
+void glink_ssr_notify_state(void *handle, const void *priv, unsigned event)
+{
+	struct ssr_notify_data *cb_data = (struct ssr_notify_data *)priv;
+	struct close_ch_work *close_work;
+
+	if (!cb_data) {
+		GLINK_SSR_ERR("<SSR> %s: Could not allocate data for cb_data\n",
+				__func__);
+	} else {
+		GLINK_SSR_LOG("<SSR> %s: event[%d]\n",
+				__func__, event);
+		cb_data->event = event;
+		if (event == GLINK_REMOTE_DISCONNECTED) {
+			close_work =
+				kmalloc(sizeof(struct close_ch_work),
+						GFP_KERNEL);
+			if (!close_work) {
+				GLINK_SSR_ERR(
+					"<SSR> %s: Could not allocate %s\n",
+						__func__, "close work");
+				return;
+			}
+
+			strlcpy(close_work->edge, cb_data->edge,
+					sizeof(close_work->edge));
+			close_work->handle = handle;
+			INIT_WORK(&close_work->work, close_ch_worker);
+			queue_work(glink_ssr_wq, &close_work->work);
+		}
+	}
+}
+
+/**
+ * glink_ssr_notify_rx_intent_req() - RX intent request notification callback
+ * @handle:	G-Link channel handle
+ * @priv:	Private callback data
+ * @req_size:	The size of the requested intent
+ *
+ * This function is a notification callback from the G-Link core of the remote
+ * side's request for an RX intent to be queued.
+ *
+ * Return: Boolean indicating whether or not the request was successfully
+ *         received
+ */
+bool glink_ssr_notify_rx_intent_req(void *handle, const void *priv,
+		size_t req_size)
+{
+	struct ssr_notify_data *cb_data = (struct ssr_notify_data *)priv;
+
+	if (!cb_data) {
+		GLINK_SSR_ERR("<SSR> %s: Could not allocate data for cb_data\n",
+				__func__);
+		return false;
+	} else {
+		GLINK_SSR_LOG("<SSR> %s: rx_intent_req of size %zu\n",
+				__func__, req_size);
+		return true;
+	}
+}
+
+/**
+ * glink_ssr_restart_notifier_cb() - SSR restart notifier callback function
+ * @this:	Notifier block used by the SSR framework
+ * @code:	The SSR code for which stage of restart is occurring
+ * @data:	Structure containing private data - not used here.
+ *
+ * This function is a callback for the SSR framework. From here we initiate
+ * our handling of SSR.
+ *
+ * Return: Status of SSR handling
+ */
+static int glink_ssr_restart_notifier_cb(struct notifier_block *this,
+				  unsigned long code,
+				  void *data)
+{
+	int ret = 0;
+	struct subsys_info *ss_info = NULL;
+	struct restart_notifier_block *notifier =
+		container_of(this, struct restart_notifier_block, nb);
+
+	if (code == SUBSYS_AFTER_SHUTDOWN) {
+		GLINK_SSR_LOG("<SSR> %s: %s: subsystem restart for %s\n",
+				__func__, "SUBSYS_AFTER_SHUTDOWN",
+				notifier->subsystem);
+		ss_info = get_info_for_subsystem(notifier->subsystem);
+		if (ss_info == NULL) {
+			GLINK_SSR_ERR("<SSR> %s: ss_info is NULL\n", __func__);
+			return -EINVAL;
+		}
+
+		glink_ssr(ss_info->edge);
+		ret = notify_for_subsystem(ss_info);
+
+		if (ret) {
+			GLINK_SSR_ERR("<SSR>: %s: %s, ret[%d]\n", __func__,
+					"Subsystem notification failed", ret);
+			return ret;
+		}
+	} else if (code == SUBSYS_AFTER_POWERUP) {
+		GLINK_SSR_LOG("<SSR> %s: %s: subsystem restart for %s\n",
+				__func__, "SUBSYS_AFTER_POWERUP",
+				notifier->subsystem);
+		ss_info = get_info_for_subsystem(notifier->subsystem);
+		if (ss_info == NULL) {
+			GLINK_SSR_ERR("<SSR> %s: ss_info is NULL\n", __func__);
+			return -EINVAL;
+		}
+
+		glink_subsys_up(ss_info->edge);
+	}
+	return NOTIFY_DONE;
+}
+
+/**
+ * notify for subsystem() - Notify other subsystems that a subsystem is being
+ *                          restarted
+ * @ss_info:	Subsystem info structure for the subsystem being restarted
+ *
+ * This function sends notifications to affected subsystems that the subsystem
+ * in ss_info is being restarted, and waits for the cleanup done response from
+ * all of those subsystems. It also initiates any local cleanup that is
+ * necessary.
+ *
+ * Return: 0 on success, standard error codes otherwise
+ */
+int notify_for_subsystem(struct subsys_info *ss_info)
+{
+	struct subsys_info *ss_info_channel;
+	struct subsys_info_leaf *ss_leaf_entry;
+	struct do_cleanup_msg *do_cleanup_data;
+	void *handle;
+	int wait_ret;
+	int ret;
+	unsigned long flags;
+
+	if (!ss_info) {
+		GLINK_SSR_ERR("<SSR> %s: ss_info structure invalid\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	/*
+	 * No locking is needed here because ss_info->notify_list_len is
+	 * only modified during setup.
+	 */
+	atomic_set(&responses_remaining, ss_info->notify_list_len);
+	init_waitqueue_head(&waitqueue);
+	notifications_successful = true;
+
+	list_for_each_entry(ss_leaf_entry, &ss_info->notify_list,
+			notify_list_node) {
+		GLINK_SSR_LOG(
+			"<SSR> %s: Notifying: %s:%s of %s restart, seq_num[%d]\n",
+				__func__, ss_leaf_entry->edge,
+				ss_leaf_entry->xprt, ss_info->edge,
+				sequence_number);
+
+		ss_info_channel =
+			get_info_for_subsystem(ss_leaf_entry->ssr_name);
+		if (ss_info_channel == NULL) {
+			GLINK_SSR_ERR(
+				"<SSR> %s: unable to find subsystem name\n",
+					__func__);
+			return -ENODEV;
+		}
+		handle = ss_info_channel->handle;
+		ss_leaf_entry->cb_data = check_and_get_cb_data(
+							ss_info_channel);
+		if (!ss_leaf_entry->cb_data) {
+			GLINK_SSR_LOG("<SSR> %s: CB data is NULL\n", __func__);
+			atomic_dec(&responses_remaining);
+			continue;
+		}
+
+		spin_lock_irqsave(&ss_info->link_up_lock, flags);
+		if (IS_ERR_OR_NULL(ss_info_channel->handle) ||
+				!ss_info_channel->link_up ||
+				ss_leaf_entry->cb_data->event
+						!= GLINK_CONNECTED) {
+
+			GLINK_SSR_LOG(
+				"<SSR> %s: %s:%s %s[%d], %s[%p], %s[%d]\n",
+				__func__, ss_leaf_entry->edge, "Not connected",
+				"resp. remaining",
+				atomic_read(&responses_remaining), "handle",
+				ss_info_channel->handle, "link_up",
+				ss_info_channel->link_up);
+
+			spin_unlock_irqrestore(&ss_info->link_up_lock, flags);
+			atomic_dec(&responses_remaining);
+			kref_put(&ss_leaf_entry->cb_data->cb_kref,
+							cb_data_release);
+			continue;
+		}
+		spin_unlock_irqrestore(&ss_info->link_up_lock, flags);
+
+		do_cleanup_data = kmalloc(sizeof(struct do_cleanup_msg),
+				GFP_KERNEL);
+		if (!do_cleanup_data) {
+			GLINK_SSR_ERR(
+				"%s %s: Could not allocate do_cleanup_msg\n",
+				"<SSR>", __func__);
+			kref_put(&ss_leaf_entry->cb_data->cb_kref,
+							cb_data_release);
+			return -ENOMEM;
+		}
+
+		do_cleanup_data->version = 0;
+		do_cleanup_data->command = GLINK_SSR_DO_CLEANUP;
+		do_cleanup_data->seq_num = sequence_number;
+		do_cleanup_data->name_len = strlen(ss_info->edge);
+		strlcpy(do_cleanup_data->name, ss_info->edge,
+				do_cleanup_data->name_len + 1);
+
+		ret = glink_queue_rx_intent(handle, do_cleanup_data,
+				sizeof(struct cleanup_done_msg));
+		if (ret) {
+			GLINK_SSR_ERR(
+				"%s %s: %s, ret[%d], resp. remaining[%d]\n",
+				"<SSR>", __func__,
+				"queue_rx_intent failed", ret,
+				atomic_read(&responses_remaining));
+			kfree(do_cleanup_data);
+
+			if (!strcmp(ss_leaf_entry->ssr_name, "rpm"))
+				panic("%s: Could not queue intent for RPM!\n",
+						__func__);
+			atomic_dec(&responses_remaining);
+			kref_put(&ss_leaf_entry->cb_data->cb_kref,
+							cb_data_release);
+			continue;
+		}
+
+		if (strcmp(ss_leaf_entry->ssr_name, "rpm"))
+			ret = glink_tx(handle, do_cleanup_data,
+					do_cleanup_data,
+					sizeof(*do_cleanup_data),
+					GLINK_TX_REQ_INTENT);
+		else
+			ret = glink_tx(handle, do_cleanup_data,
+					do_cleanup_data,
+					sizeof(*do_cleanup_data),
+					GLINK_TX_SINGLE_THREADED);
+
+		if (ret) {
+			GLINK_SSR_ERR("<SSR> %s: tx failed, ret[%d], %s[%d]\n",
+					__func__, ret, "resp. remaining",
+					atomic_read(&responses_remaining));
+			kfree(do_cleanup_data);
+
+			if (!strcmp(ss_leaf_entry->ssr_name, "rpm"))
+				panic("%s: glink_tx() to RPM failed!\n",
+						__func__);
+			atomic_dec(&responses_remaining);
+			kref_put(&ss_leaf_entry->cb_data->cb_kref,
+							cb_data_release);
+			continue;
+		}
+		sequence_number++;
+		kref_put(&ss_leaf_entry->cb_data->cb_kref, cb_data_release);
+	}
+
+	wait_ret = wait_event_timeout(waitqueue,
+			atomic_read(&responses_remaining) == 0,
+			GLINK_SSR_REPLY_TIMEOUT);
+
+	list_for_each_entry(ss_leaf_entry, &ss_info->notify_list,
+			notify_list_node) {
+		ss_info_channel =
+			get_info_for_subsystem(ss_leaf_entry->ssr_name);
+		if (ss_info_channel == NULL) {
+			GLINK_SSR_ERR(
+				"<SSR> %s: unable to find subsystem name\n",
+					__func__);
+			continue;
+		}
+
+		ss_leaf_entry->cb_data = check_and_get_cb_data(
+							ss_info_channel);
+		if (!ss_leaf_entry->cb_data) {
+			GLINK_SSR_LOG("<SSR> %s: CB data is NULL\n", __func__);
+			continue;
+		}
+		if (!wait_ret && !IS_ERR_OR_NULL(ss_leaf_entry->cb_data)
+				&& !ss_leaf_entry->cb_data->responded) {
+			GLINK_SSR_ERR("%s %s: Subsystem %s %s\n",
+				"<SSR>", __func__, ss_leaf_entry->edge,
+				"failed to respond. Restarting.");
+
+			notifications_successful = false;
+
+			/* Check for RPM, as it can't be restarted */
+			if (!strcmp(ss_leaf_entry->ssr_name, "rpm"))
+				panic("%s: RPM failed to respond!\n", __func__);
+		}
+		if (!IS_ERR_OR_NULL(ss_leaf_entry->cb_data))
+			ss_leaf_entry->cb_data->responded = false;
+		kref_put(&ss_leaf_entry->cb_data->cb_kref, cb_data_release);
+	}
+	complete(&notifications_successful_complete);
+	return 0;
+}
+EXPORT_SYMBOL(notify_for_subsystem);
+
+/**
+ * configure_and_open_channel() - configure and open a G-Link channel for
+ *                                the given subsystem
+ * @ss_info:	The subsys_info structure where the channel will be stored
+ *
+ * Return: 0 on success, standard error codes otherwise
+ */
+static int configure_and_open_channel(struct subsys_info *ss_info)
+{
+	struct glink_open_config open_cfg;
+	struct ssr_notify_data *cb_data = NULL;
+	void *handle = NULL;
+	unsigned long flags;
+
+	if (!ss_info) {
+		GLINK_SSR_ERR("<SSR> %s: ss_info structure invalid\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	cb_data = kmalloc(sizeof(struct ssr_notify_data), GFP_KERNEL);
+	if (!cb_data) {
+		GLINK_SSR_ERR("<SSR> %s: Could not allocate cb_data\n",
+				__func__);
+		return -ENOMEM;
+	}
+	cb_data->responded = false;
+	cb_data->event = GLINK_SSR_EVENT_INIT;
+	cb_data->edge = ss_info->edge;
+	spin_lock_irqsave(&ss_info->cb_lock, flags);
+	ss_info->cb_data = cb_data;
+	kref_init(&cb_data->cb_kref);
+	spin_unlock_irqrestore(&ss_info->cb_lock, flags);
+
+	memset(&open_cfg, 0, sizeof(struct glink_open_config));
+
+	if (ss_info->xprt) {
+		open_cfg.transport = ss_info->xprt;
+	} else {
+		open_cfg.transport = NULL;
+		open_cfg.options = GLINK_OPT_INITIAL_XPORT;
+	}
+	open_cfg.edge = ss_info->edge;
+	open_cfg.name = "glink_ssr";
+	open_cfg.notify_rx = glink_ssr_notify_rx;
+	open_cfg.notify_tx_done = glink_ssr_notify_tx_done;
+	open_cfg.notify_state = glink_ssr_notify_state;
+	open_cfg.notify_rx_intent_req = glink_ssr_notify_rx_intent_req;
+	open_cfg.priv = ss_info->cb_data;
+	open_cfg.rx_intent_req_timeout_ms = GLINK_SSR_INTENT_REQ_TIMEOUT_MS;
+
+	handle = glink_open(&open_cfg);
+	if (IS_ERR_OR_NULL(handle)) {
+		GLINK_SSR_ERR(
+			"<SSR> %s:%s %s: unable to open channel, ret[%d]\n",
+				 open_cfg.edge, open_cfg.name, __func__,
+				 (int)PTR_ERR(handle));
+		kfree(cb_data);
+		cb_data = NULL;
+		ss_info->cb_data = NULL;
+		return PTR_ERR(handle);
+	}
+	ss_info->handle = handle;
+	return 0;
+}
+
+/**
+ * get_info_for_subsystem() - Retrieve information about a subsystem from the
+ *                            global subsystem_info_list
+ * @subsystem:	The name of the subsystem recognized by the SSR
+ *		framework
+ *
+ * Return: subsys_info structure containing info for the requested subsystem;
+ *         NULL if no structure can be found for the requested subsystem
+ */
+struct subsys_info *get_info_for_subsystem(const char *subsystem)
+{
+	struct subsys_info *ss_info_entry;
+
+	list_for_each_entry(ss_info_entry, &subsystem_list,
+			subsystem_list_node) {
+		if (!strcmp(subsystem, ss_info_entry->ssr_name))
+			return ss_info_entry;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(get_info_for_subsystem);
+
+/**
+ * get_info_for_edge() - Retrieve information about a subsystem from the
+ *                       global subsystem_info_list
+ * @edge:	The name of the edge recognized by G-Link
+ *
+ * Return: subsys_info structure containing info for the requested subsystem;
+ *         NULL if no structure can be found for the requested subsystem
+ */
+struct subsys_info *get_info_for_edge(const char *edge)
+{
+	struct subsys_info *ss_info_entry;
+
+	list_for_each_entry(ss_info_entry, &subsystem_list,
+			subsystem_list_node) {
+		if (!strcmp(edge, ss_info_entry->edge))
+			return ss_info_entry;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(get_info_for_edge);
+
+/**
+ * glink_ssr_get_seq_num() - Get the current SSR sequence number
+ *
+ * Return: The current SSR sequence number
+ */
+uint32_t glink_ssr_get_seq_num(void)
+{
+	return sequence_number;
+}
+EXPORT_SYMBOL(glink_ssr_get_seq_num);
+
+/**
+ * delete_ss_info_notify_list() - Delete the notify list for a subsystem
+ * @ss_info:	The subsystem info structure
+ */
+static void delete_ss_info_notify_list(struct subsys_info *ss_info)
+{
+	struct subsys_info_leaf *leaf, *temp;
+
+	list_for_each_entry_safe(leaf, temp, &ss_info->notify_list,
+			notify_list_node) {
+		list_del(&leaf->notify_list_node);
+		kfree(leaf);
+	}
+}
+
+/**
+ * glink_ssr_wait_cleanup_done() - Get the value of the
+ *                                 notifications_successful flag.
+ * @timeout_multiplier: timeout multiplier for waiting on all processors
+ *
+ * Return: True if cleanup_done received from all processors, false otherwise
+ */
+bool glink_ssr_wait_cleanup_done(unsigned ssr_timeout_multiplier)
+{
+	int wait_ret =
+		wait_for_completion_timeout(&notifications_successful_complete,
+			ssr_timeout_multiplier * GLINK_SSR_REPLY_TIMEOUT);
+	reinit_completion(&notifications_successful_complete);
+
+	if (!notifications_successful || !wait_ret)
+		return false;
+	else
+		return true;
+}
+EXPORT_SYMBOL(glink_ssr_wait_cleanup_done);
+
+/**
+ * glink_ssr_probe() - G-Link SSR platform device probe function
+ * @pdev:	Pointer to the platform device structure
+ *
+ * This function parses DT for information on which subsystems should be
+ * notified when each subsystem undergoes SSR. The global subsystem information
+ * list is built from this information. In addition, SSR notifier callback
+ * functions are registered here for the necessary subsystems.
+ *
+ * Return: 0 on success, standard error codes otherwise
+ */
+static int glink_ssr_probe(struct platform_device *pdev)
+{
+	struct device_node *node;
+	struct device_node *phandle_node;
+	struct restart_notifier_block *nb;
+	struct subsys_info *ss_info;
+	struct subsys_info_leaf *ss_info_leaf = NULL;
+	struct glink_link_info *link_info;
+	char *key;
+	const char *edge;
+	const char *subsys_name;
+	const char *xprt;
+	void *handle;
+	void *link_state_handle;
+	int phandle_index = 0;
+	int ret = 0;
+
+	if (!pdev) {
+		GLINK_SSR_ERR("<SSR> %s: pdev is NULL\n", __func__);
+		ret = -EINVAL;
+		goto pdev_null_or_ss_info_alloc_failed;
+	}
+
+	node = pdev->dev.of_node;
+
+	ss_info = kmalloc(sizeof(*ss_info), GFP_KERNEL);
+	if (!ss_info) {
+		GLINK_SSR_ERR("<SSR> %s: %s\n", __func__,
+			"Could not allocate subsystem info structure\n");
+		ret = -ENOMEM;
+		goto pdev_null_or_ss_info_alloc_failed;
+	}
+	INIT_LIST_HEAD(&ss_info->notify_list);
+
+	link_info = kmalloc(sizeof(struct glink_link_info),
+			GFP_KERNEL);
+	if (!link_info) {
+		GLINK_SSR_ERR("<SSR> %s: %s\n", __func__,
+			"Could not allocate link info structure\n");
+		ret = -ENOMEM;
+		goto link_info_alloc_failed;
+	}
+	ss_info->link_info = link_info;
+
+	key = "label";
+	subsys_name = of_get_property(node, key, NULL);
+	if (!subsys_name) {
+		GLINK_SSR_ERR("<SSR> %s: missing key %s\n", __func__, key);
+		ret = -ENODEV;
+		goto label_or_edge_missing;
+	}
+
+	key = "qcom,edge";
+	edge = of_get_property(node, key, NULL);
+	if (!edge) {
+		GLINK_SSR_ERR("<SSR> %s: missing key %s\n", __func__, key);
+		ret = -ENODEV;
+		goto label_or_edge_missing;
+	}
+
+	key = "qcom,xprt";
+	xprt = of_get_property(node, key, NULL);
+	if (!xprt)
+		GLINK_SSR_LOG(
+			"%s %s: no transport present for subys/edge %s/%s\n",
+			"<SSR>", __func__, subsys_name, edge);
+
+	ss_info->ssr_name = subsys_name;
+	ss_info->edge = edge;
+	ss_info->xprt = xprt;
+	ss_info->notify_list_len = 0;
+	ss_info->link_info->transport = xprt;
+	ss_info->link_info->edge = edge;
+	ss_info->link_info->glink_link_state_notif_cb = glink_ssr_link_state_cb;
+	ss_info->link_up = false;
+	ss_info->handle = NULL;
+	ss_info->link_state_handle = NULL;
+	ss_info->cb_data = NULL;
+	spin_lock_init(&ss_info->link_up_lock);
+	spin_lock_init(&ss_info->cb_lock);
+
+	nb = kmalloc(sizeof(struct restart_notifier_block), GFP_KERNEL);
+	if (!nb) {
+		GLINK_SSR_ERR("<SSR> %s: Could not allocate notifier block\n",
+				__func__);
+		ret = -ENOMEM;
+		goto label_or_edge_missing;
+	}
+
+	nb->subsystem = subsys_name;
+	nb->nb.notifier_call = glink_ssr_restart_notifier_cb;
+
+	handle = subsys_notif_register_notifier(nb->subsystem, &nb->nb);
+	if (IS_ERR_OR_NULL(handle)) {
+		GLINK_SSR_ERR("<SSR> %s: Could not register SSR notifier cb\n",
+				__func__);
+		ret = -EINVAL;
+		goto nb_registration_fail;
+	}
+
+	key = "qcom,notify-edges";
+	while (true) {
+		phandle_node = of_parse_phandle(node, key, phandle_index++);
+		if (!phandle_node && phandle_index == 0) {
+			GLINK_SSR_ERR(
+				"<SSR> %s: qcom,notify-edges is not present",
+				__func__);
+			ret = -ENODEV;
+			goto notify_edges_not_present;
+		}
+
+		if (!phandle_node)
+			break;
+
+		ss_info_leaf = kmalloc(sizeof(struct subsys_info_leaf),
+				GFP_KERNEL);
+		if (!ss_info_leaf) {
+			GLINK_SSR_ERR(
+				"<SSR> %s: Could not allocate subsys_info_leaf\n",
+				__func__);
+			ret = -ENOMEM;
+			goto notify_edges_not_present;
+		}
+
+		subsys_name = of_get_property(phandle_node, "label", NULL);
+		edge = of_get_property(phandle_node, "qcom,edge", NULL);
+		xprt = of_get_property(phandle_node, "qcom,xprt", NULL);
+
+		of_node_put(phandle_node);
+
+		if (!subsys_name || !edge) {
+			GLINK_SSR_ERR(
+				"%s, %s: Found DT node with invalid data!\n",
+				"<SSR>", __func__);
+			ret = -EINVAL;
+			goto invalid_dt_node;
+		}
+
+		ss_info_leaf->ssr_name = subsys_name;
+		ss_info_leaf->edge = edge;
+		ss_info_leaf->xprt = xprt;
+		list_add_tail(&ss_info_leaf->notify_list_node,
+				&ss_info->notify_list);
+		ss_info->notify_list_len++;
+	}
+
+	list_add_tail(&ss_info->subsystem_list_node, &subsystem_list);
+
+	link_state_handle = glink_register_link_state_cb(ss_info->link_info,
+			NULL);
+	if (IS_ERR_OR_NULL(link_state_handle)) {
+		GLINK_SSR_ERR("<SSR> %s: Could not register link state cb\n",
+				__func__);
+		ret = PTR_ERR(link_state_handle);
+		goto link_state_register_fail;
+	}
+	ss_info->link_state_handle = link_state_handle;
+
+	return 0;
+
+link_state_register_fail:
+	list_del(&ss_info->subsystem_list_node);
+invalid_dt_node:
+	kfree(ss_info_leaf);
+notify_edges_not_present:
+	subsys_notif_unregister_notifier(handle, &nb->nb);
+	delete_ss_info_notify_list(ss_info);
+nb_registration_fail:
+	kfree(nb);
+label_or_edge_missing:
+	kfree(link_info);
+link_info_alloc_failed:
+	kfree(ss_info);
+pdev_null_or_ss_info_alloc_failed:
+	return ret;
+}
+
+static struct of_device_id match_table[] = {
+	{ .compatible = "qcom,glink_ssr" },
+	{},
+};
+
+static struct platform_driver glink_ssr_driver = {
+	.probe = glink_ssr_probe,
+	.driver = {
+		.name = "msm_glink_ssr",
+		.owner = THIS_MODULE,
+		.of_match_table = match_table,
+	},
+};
+
+static int glink_ssr_init(void)
+{
+	int ret;
+
+	glink_ssr_log_ctx =
+		ipc_log_context_create(NUM_LOG_PAGES, "glink_ssr", 0);
+	glink_ssr_wq = create_singlethread_workqueue("glink_ssr_wq");
+	ret = platform_driver_register(&glink_ssr_driver);
+	if (ret)
+		GLINK_SSR_ERR("<SSR> %s: %s ret: %d\n", __func__,
+				"glink_ssr driver registration failed", ret);
+
+	notifications_successful = false;
+	init_completion(&notifications_successful_complete);
+	return 0;
+}
+
+module_init(glink_ssr_init);
+
+MODULE_DESCRIPTION("MSM Generic Link (G-Link) SSR Module");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/glink_xprt_if.h	2019-01-22 16:16:26.651274914 +0100
@@ -0,0 +1,207 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _SOC_QCOM_GLINK_XPRT_IF_H_
+#define _SOC_QCOM_GLINK_XPRT_IF_H_
+
+#include <linux/bitops.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+struct glink_core_xprt_ctx;
+struct glink_core_if;
+struct channel_ctx;
+struct glink_core_rx_intent;
+
+enum buf_type {
+	LINEAR = 0,
+	VECTOR,
+};
+
+enum xprt_ids {
+	SMEM_XPRT_ID = 100,
+	SPIV2_XPRT_ID = SMEM_XPRT_ID,
+	SMD_TRANS_XPRT_ID = 200,
+	LLOOP_XPRT_ID = 300,
+	MOCK_XPRT_HIGH_ID = 390,
+	MOCK_XPRT_ID = 400,
+	MOCK_XPRT_LOW_ID = 410,
+};
+
+#define GCAP_SIGNALS		BIT(0)
+#define GCAP_INTENTLESS		BIT(1)
+#define GCAP_TRACER_PKT		BIT(2)
+#define GCAP_AUTO_QUEUE_RX_INT	BIT(3)
+
+/**
+ * struct glink_core_tx_pkt - Transmit Packet information
+ * @list_done:		Index to the channel's transmit queue.
+ * @list_done:		Index to the channel's acknowledgment queue.
+ * @pkt_priv:		Private information specific to the packet.
+ * @data:		Pointer to the buffer containing the data.
+ * @riid:		Remote receive intent used to transmit the packet.
+ * @rcid:		Remote channel receiving the packet.
+ * @size:		Total size of the data in the packet.
+ * @tx_len:		Data length to transmit in the current transmit slot.
+ * @size_remaining:	Remaining size of the data in the packet.
+ * @intent_size:	Receive intent size queued by the remote side.
+ * @tracer_pkt:		Flag to indicate if the packet is a tracer packet.
+ * @iovec:		Pointer to the vector buffer packet.
+ * @vprovider:		Packet-specific virtual buffer provider function.
+ * @pprovider:		Packet-specific physical buffer provider function.
+ * @cookie:		Transport-specific cookie
+ * @pkt_ref:		Active references to the packet.
+ */
+struct glink_core_tx_pkt {
+	struct list_head list_node;
+	struct list_head list_done;
+	const void *pkt_priv;
+	const void *data;
+	uint32_t riid;
+	uint32_t rcid;
+	uint32_t size;
+	uint32_t tx_len;
+	uint32_t size_remaining;
+	size_t intent_size;
+	bool tracer_pkt;
+	void *iovec;
+	void * (*vprovider)(void *iovec, size_t offset, size_t *size);
+	void * (*pprovider)(void *iovec, size_t offset, size_t *size);
+	void *cookie;
+	struct rwref_lock pkt_ref;
+};
+
+/**
+ * Note - each call to register the interface must pass a unique
+ * instance of this data.
+ */
+struct glink_transport_if {
+	/* Negotiation */
+	void (*tx_cmd_version)(struct glink_transport_if *if_ptr,
+			uint32_t version,
+			uint32_t features);
+	void (*tx_cmd_version_ack)(struct glink_transport_if *if_ptr,
+			uint32_t version,
+			uint32_t features);
+	uint32_t (*set_version)(struct glink_transport_if *if_ptr,
+			uint32_t version,
+			uint32_t features);
+
+	/* channel state */
+	int (*tx_cmd_ch_open)(struct glink_transport_if *if_ptr, uint32_t lcid,
+			const char *name, uint16_t req_xprt);
+	int (*tx_cmd_ch_close)(struct glink_transport_if *if_ptr,
+			uint32_t lcid);
+	void (*tx_cmd_ch_remote_open_ack)(struct glink_transport_if *if_ptr,
+			uint32_t rcid, uint16_t xprt_resp);
+	void (*tx_cmd_ch_remote_close_ack)(struct glink_transport_if *if_ptr,
+			uint32_t rcid);
+	int (*ssr)(struct glink_transport_if *if_ptr);
+	void (*subsys_up)(struct glink_transport_if *if_ptr);
+
+	/* channel data */
+	int (*allocate_rx_intent)(struct glink_transport_if *if_ptr,
+				  size_t size,
+				  struct glink_core_rx_intent *intent);
+	int (*deallocate_rx_intent)(struct glink_transport_if *if_ptr,
+				    struct glink_core_rx_intent *intent);
+	/* Optional */
+	int (*reuse_rx_intent)(struct glink_transport_if *if_ptr,
+			       struct glink_core_rx_intent *intent);
+
+	int (*tx_cmd_local_rx_intent)(struct glink_transport_if *if_ptr,
+			uint32_t lcid, size_t size, uint32_t liid);
+	void (*tx_cmd_local_rx_done)(struct glink_transport_if *if_ptr,
+			uint32_t lcid, uint32_t liid, bool reuse);
+	int (*tx)(struct glink_transport_if *if_ptr, uint32_t lcid,
+			struct glink_core_tx_pkt *pctx);
+	int (*tx_cmd_rx_intent_req)(struct glink_transport_if *if_ptr,
+			uint32_t lcid, size_t size);
+	int (*tx_cmd_remote_rx_intent_req_ack)(
+			struct glink_transport_if *if_ptr,
+			uint32_t lcid, bool granted);
+	int (*tx_cmd_set_sigs)(struct glink_transport_if *if_ptr,
+			uint32_t lcid, uint32_t sigs);
+
+	/* Optional.  If NULL at xprt registration, dummies will be used */
+	int (*poll)(struct glink_transport_if *if_ptr, uint32_t lcid);
+	int (*mask_rx_irq)(struct glink_transport_if *if_ptr, uint32_t lcid,
+			bool mask, void *pstruct);
+	int (*wait_link_down)(struct glink_transport_if *if_ptr);
+	int (*tx_cmd_tracer_pkt)(struct glink_transport_if *if_ptr,
+			uint32_t lcid, struct glink_core_tx_pkt *pctx);
+	unsigned long (*get_power_vote_ramp_time)(
+			struct glink_transport_if *if_ptr, uint32_t state);
+	int (*power_vote)(struct glink_transport_if *if_ptr, uint32_t state);
+	int (*power_unvote)(struct glink_transport_if *if_ptr);
+	int (*rx_rt_vote)(struct glink_transport_if *if_ptr);
+	int (*rx_rt_unvote)(struct glink_transport_if *if_ptr);
+	/*
+	 * Keep data pointers at the end of the structure after all function
+	 * pointer to allow for in-place initialization.
+	 */
+
+	/* private pointer for core */
+	struct glink_core_xprt_ctx *glink_core_priv;
+
+	/* core pointer (set during transport registration) */
+	struct glink_core_if *glink_core_if_ptr;
+};
+
+#ifdef CONFIG_MSM_GLINK
+
+/**
+ * get_tx_vaddr() - Get the virtual address from which the tx has to be done
+ * @pctx:	transmit packet context.
+ * @offset:	offset into the packet.
+ * @tx_size:	pointer to hold the length of the contiguous buffer
+ *              space.
+ *
+ * Return:	Address from which the tx has to be done.
+ */
+static inline void *get_tx_vaddr(struct glink_core_tx_pkt *pctx, size_t offset,
+				 size_t *tx_size)
+{
+	void *pdata;
+
+	if (pctx->vprovider) {
+		return pctx->vprovider((void *)pctx->iovec, offset, tx_size);
+	} else if (pctx->pprovider) {
+		pdata = pctx->pprovider((void *)pctx->iovec, offset, tx_size);
+		return phys_to_virt((unsigned long)pdata);
+	}
+	return NULL;
+}
+
+/**
+ * glink_xprt_name_to_id() - convert transport name to id
+ * @name:	Name of the transport.
+ * @id:		Assigned id.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+int glink_xprt_name_to_id(const char *name, uint16_t *id);
+
+
+#else /* CONFIG_MSM_GLINK */
+static inline void *get_tx_vaddr(struct glink_core_tx_pkt *pctx, size_t offset,
+				 size_t *tx_size)
+{
+	return NULL;
+}
+
+static inline int glink_xprt_name_to_id(const char *name, uint16_t *id)
+{
+	return -ENODEV;
+}
+
+#endif /* CONFIG_MSM_GLINK */
+#endif /* _SOC_QCOM_GLINK_XPRT_IF_H_ */
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/hab./Kconfig linux-4.4.115-fbx/drivers/soc/qcom/hab/Kconfig
--- linux-4.4.115-fbx/drivers/soc/qcom/hab./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/hab/Kconfig	2019-10-29 09:26:24.809214589 +0100
@@ -0,0 +1,7 @@
+config MSM_HAB
+	bool "Enable Multimedia driver Hypervisor Abstraction Layer"
+	help
+	  Multimedia driver hypervisor abstraction layer.
+	  Required for drivers to use the HAB API to communicate with the host
+	  OS.
+
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/hab./Makefile linux-4.4.115-fbx/drivers/soc/qcom/hab/Makefile
--- linux-4.4.115-fbx/drivers/soc/qcom/hab./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/hab/Makefile	2019-10-29 09:26:24.809214589 +0100
@@ -0,0 +1,15 @@
+msm_hab-objs = \
+	khab.o \
+	hab.o \
+	hab_msg.o \
+	hab_vchan.o \
+	hab_pchan.o \
+	hab_open.o \
+	hab_mimex.o \
+	hab_mem_linux.o \
+	hab_pipe.o \
+	qvm_comm.o \
+	hab_qvm.o \
+	hab_parser.o
+
+obj-$(CONFIG_MSM_HAB) += msm_hab.o
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/icnss.c	2019-10-29 09:26:24.813214628 +0100
@@ -0,0 +1,4796 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "icnss: " fmt
+
+#include <asm/dma-iommu.h>
+#include <linux/of_address.h>
+#include <linux/clk.h>
+#include <linux/iommu.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/qmi_encdec.h>
+#include <linux/ipc_logging.h>
+#include <linux/thread_info.h>
+#include <linux/uaccess.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/etherdevice.h>
+#include <soc/qcom/memory_dump.h>
+#include <soc/qcom/icnss.h>
+#include <soc/qcom/msm_qmi_interface.h>
+#include <soc/qcom/secure_buffer.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/service-locator.h>
+#include <soc/qcom/service-notifier.h>
+#include <soc/qcom/socinfo.h>
+#include <soc/qcom/ramdump.h>
+
+#include "wlan_firmware_service_v01.h"
+
+#ifdef CONFIG_ICNSS_DEBUG
+unsigned long qmi_timeout = 10000;
+module_param(qmi_timeout, ulong, 0600);
+
+#define WLFW_TIMEOUT_MS			qmi_timeout
+#else
+#define WLFW_TIMEOUT_MS			10000
+#endif
+#define WLFW_SERVICE_INS_ID_V01		0
+#define WLFW_CLIENT_ID			0x4b4e454c
+#define MAX_PROP_SIZE			32
+#define NUM_LOG_PAGES			10
+#define NUM_LOG_LONG_PAGES		4
+#define ICNSS_MAGIC			0x5abc5abc
+
+#define ICNSS_SERVICE_LOCATION_CLIENT_NAME			"ICNSS-WLAN"
+#define ICNSS_WLAN_SERVICE_NAME					"wlan/fw"
+
+#define ICNSS_THRESHOLD_HIGH		3600000
+#define ICNSS_THRESHOLD_LOW		3450000
+#define ICNSS_THRESHOLD_GUARD		20000
+
+#define ICNSS_MAX_PROBE_CNT		2
+
+#define icnss_ipc_log_string(_x...) do {				\
+	if (icnss_ipc_log_context)					\
+		ipc_log_string(icnss_ipc_log_context, _x);		\
+	} while (0)
+
+#define icnss_ipc_log_long_string(_x...) do {				\
+	if (icnss_ipc_log_long_context)					\
+		ipc_log_string(icnss_ipc_log_long_context, _x);		\
+	} while (0)
+
+#define icnss_pr_err(_fmt, ...) do {					\
+	printk("%s" pr_fmt(_fmt), KERN_ERR, ##__VA_ARGS__);		\
+	icnss_ipc_log_string("%s" pr_fmt(_fmt), "",			\
+			     ##__VA_ARGS__);				\
+	} while (0)
+
+#define icnss_pr_warn(_fmt, ...) do {					\
+	printk("%s" pr_fmt(_fmt), KERN_WARNING, ##__VA_ARGS__);		\
+	icnss_ipc_log_string("%s" pr_fmt(_fmt), "",			\
+			     ##__VA_ARGS__);				\
+	} while (0)
+
+#define icnss_pr_info(_fmt, ...) do {					\
+	printk("%s" pr_fmt(_fmt), KERN_INFO, ##__VA_ARGS__);		\
+	icnss_ipc_log_string("%s" pr_fmt(_fmt), "",			\
+			     ##__VA_ARGS__);				\
+	} while (0)
+
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define icnss_pr_dbg(_fmt, ...) do {					\
+	pr_debug(_fmt, ##__VA_ARGS__);					\
+	icnss_ipc_log_string(pr_fmt(_fmt), ##__VA_ARGS__);		\
+	} while (0)
+
+#define icnss_pr_vdbg(_fmt, ...) do {					\
+	pr_debug(_fmt, ##__VA_ARGS__);					\
+	icnss_ipc_log_long_string(pr_fmt(_fmt), ##__VA_ARGS__);		\
+	} while (0)
+#elif defined(DEBUG)
+#define icnss_pr_dbg(_fmt, ...) do {					\
+	printk("%s" pr_fmt(_fmt), KERN_DEBUG, ##__VA_ARGS__);		\
+	icnss_ipc_log_string("%s" pr_fmt(_fmt), "",			\
+			     ##__VA_ARGS__);				\
+	} while (0)
+
+#define icnss_pr_vdbg(_fmt, ...) do {					\
+	printk("%s" pr_fmt(_fmt), KERN_DEBUG, ##__VA_ARGS__);		\
+	icnss_ipc_log_long_string("%s" pr_fmt(_fmt), "",		\
+				  ##__VA_ARGS__);			\
+	} while (0)
+#else
+#define icnss_pr_dbg(_fmt, ...) do {					\
+	no_printk("%s" pr_fmt(_fmt), KERN_DEBUG, ##__VA_ARGS__);	\
+	icnss_ipc_log_string("%s" pr_fmt(_fmt), "",			\
+		     ##__VA_ARGS__);					\
+	} while (0)
+
+#define icnss_pr_vdbg(_fmt, ...) do {					\
+	no_printk("%s" pr_fmt(_fmt), KERN_DEBUG, ##__VA_ARGS__);	\
+	icnss_ipc_log_long_string("%s" pr_fmt(_fmt), "",		\
+				  ##__VA_ARGS__);			\
+	} while (0)
+#endif
+
+#ifdef CONFIG_ICNSS_DEBUG
+#define ICNSS_ASSERT(_condition) do {					\
+		if (!(_condition)) {					\
+			icnss_pr_err("ASSERT at line %d\n", __LINE__);	\
+			BUG_ON(1);					\
+		}							\
+	} while (0)
+
+bool ignore_qmi_timeout;
+#define ICNSS_QMI_ASSERT() ICNSS_ASSERT(ignore_qmi_timeout)
+#else
+#define ICNSS_ASSERT(_condition) do { } while (0)
+#define ICNSS_QMI_ASSERT() do { } while (0)
+#endif
+
+#define QMI_ERR_PLAT_CCPM_CLK_INIT_FAILED 0x77
+
+enum icnss_debug_quirks {
+	HW_ALWAYS_ON,
+	HW_DEBUG_ENABLE,
+	SKIP_QMI,
+	HW_ONLY_TOP_LEVEL_RESET,
+	RECOVERY_DISABLE,
+	SSR_ONLY,
+	PDR_ONLY,
+	VBATT_DISABLE,
+	FW_REJUVENATE_ENABLE,
+};
+
+#define ICNSS_QUIRKS_DEFAULT		(BIT(VBATT_DISABLE) | \
+					 BIT(FW_REJUVENATE_ENABLE))
+
+unsigned long quirks = ICNSS_QUIRKS_DEFAULT;
+module_param(quirks, ulong, 0600);
+
+uint64_t dynamic_feature_mask = QMI_WLFW_FW_REJUVENATE_V01;
+module_param(dynamic_feature_mask, ullong, 0600);
+
+void *icnss_ipc_log_context;
+void *icnss_ipc_log_long_context;
+
+#define ICNSS_EVENT_PENDING			2989
+
+#define ICNSS_EVENT_SYNC			BIT(0)
+#define ICNSS_EVENT_UNINTERRUPTIBLE		BIT(1)
+#define ICNSS_EVENT_SYNC_UNINTERRUPTIBLE	(ICNSS_EVENT_UNINTERRUPTIBLE | \
+						 ICNSS_EVENT_SYNC)
+
+enum icnss_driver_event_type {
+	ICNSS_DRIVER_EVENT_SERVER_ARRIVE,
+	ICNSS_DRIVER_EVENT_SERVER_EXIT,
+	ICNSS_DRIVER_EVENT_FW_READY_IND,
+	ICNSS_DRIVER_EVENT_REGISTER_DRIVER,
+	ICNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
+	ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
+	ICNSS_DRIVER_EVENT_MAX,
+};
+
+enum icnss_msa_perm {
+	ICNSS_MSA_PERM_HLOS_ALL = 0,
+	ICNSS_MSA_PERM_WLAN_HW_RW = 1,
+	ICNSS_MSA_PERM_DUMP_COLLECT = 2,
+	ICNSS_MSA_PERM_MAX,
+};
+
+#define ICNSS_MAX_VMIDS     4
+
+struct icnss_mem_region_info {
+	uint64_t reg_addr;
+	uint32_t size;
+	uint8_t secure_flag;
+	enum icnss_msa_perm perm;
+};
+
+struct icnss_msa_perm_list_t {
+	int vmids[ICNSS_MAX_VMIDS];
+	int perms[ICNSS_MAX_VMIDS];
+	int nelems;
+};
+
+struct icnss_msa_perm_list_t msa_perm_secure_list[ICNSS_MSA_PERM_MAX] = {
+	[ICNSS_MSA_PERM_HLOS_ALL] = {
+		.vmids = {VMID_HLOS},
+		.perms = {PERM_READ | PERM_WRITE | PERM_EXEC},
+		.nelems = 1,
+	},
+
+	[ICNSS_MSA_PERM_WLAN_HW_RW] = {
+		.vmids = {VMID_MSS_MSA, VMID_WLAN},
+		.perms = {PERM_READ | PERM_WRITE,
+			PERM_READ | PERM_WRITE},
+		.nelems = 2,
+	},
+
+	[ICNSS_MSA_PERM_DUMP_COLLECT] = {
+		.vmids = {VMID_MSS_MSA, VMID_WLAN, VMID_HLOS},
+		.perms = {PERM_READ | PERM_WRITE,
+			PERM_READ | PERM_WRITE,
+			PERM_READ},
+		.nelems = 3,
+	},
+};
+
+struct icnss_msa_perm_list_t msa_perm_list[ICNSS_MSA_PERM_MAX] = {
+	[ICNSS_MSA_PERM_HLOS_ALL] = {
+		.vmids = {VMID_HLOS},
+		.perms = {PERM_READ | PERM_WRITE | PERM_EXEC},
+		.nelems = 1,
+	},
+
+	[ICNSS_MSA_PERM_WLAN_HW_RW] = {
+		.vmids = {VMID_MSS_MSA, VMID_WLAN, VMID_WLAN_CE},
+		.perms = {PERM_READ | PERM_WRITE,
+			PERM_READ | PERM_WRITE,
+			PERM_READ | PERM_WRITE},
+		.nelems = 3,
+	},
+
+	[ICNSS_MSA_PERM_DUMP_COLLECT] = {
+		.vmids = {VMID_MSS_MSA, VMID_WLAN, VMID_WLAN_CE, VMID_HLOS},
+		.perms = {PERM_READ | PERM_WRITE,
+			PERM_READ | PERM_WRITE,
+			PERM_READ | PERM_WRITE,
+			PERM_READ},
+		.nelems = 4,
+	},
+};
+
+struct icnss_event_pd_service_down_data {
+	bool crashed;
+	bool fw_rejuvenate;
+};
+
+struct icnss_driver_event {
+	struct list_head list;
+	enum icnss_driver_event_type type;
+	bool sync;
+	struct completion complete;
+	int ret;
+	void *data;
+};
+
+enum icnss_driver_state {
+	ICNSS_WLFW_QMI_CONNECTED,
+	ICNSS_POWER_ON,
+	ICNSS_FW_READY,
+	ICNSS_DRIVER_PROBED,
+	ICNSS_FW_TEST_MODE,
+	ICNSS_PM_SUSPEND,
+	ICNSS_PM_SUSPEND_NOIRQ,
+	ICNSS_SSR_REGISTERED,
+	ICNSS_PDR_REGISTERED,
+	ICNSS_PD_RESTART,
+	ICNSS_MSA0_ASSIGNED,
+	ICNSS_WLFW_EXISTS,
+	ICNSS_SHUTDOWN_DONE,
+	ICNSS_HOST_TRIGGERED_PDR,
+	ICNSS_FW_DOWN,
+	ICNSS_DRIVER_UNLOADING,
+};
+
+struct ce_irq_list {
+	int irq;
+	irqreturn_t (*handler)(int, void *);
+};
+
+struct icnss_vreg_info {
+	struct regulator *reg;
+	const char *name;
+	u32 min_v;
+	u32 max_v;
+	u32 load_ua;
+	unsigned long settle_delay;
+	bool required;
+};
+
+struct icnss_clk_info {
+	struct clk *handle;
+	const char *name;
+	u32 freq;
+	bool required;
+};
+
+static struct icnss_vreg_info icnss_vreg_info[] = {
+	{NULL, "vdd-0.8-cx-mx", 800000, 800000, 0, 0, false},
+	{NULL, "vdd-1.8-xo", 1800000, 1800000, 0, 0, false},
+	{NULL, "vdd-1.3-rfa", 1304000, 1304000, 0, 0, false},
+	{NULL, "vdd-3.3-ch0", 3312000, 3312000, 0, 0, false},
+};
+
+#define ICNSS_VREG_INFO_SIZE		ARRAY_SIZE(icnss_vreg_info)
+
+static struct icnss_clk_info icnss_clk_info[] = {
+	{NULL, "cxo_ref_clk_pin", 0, false},
+};
+
+#define ICNSS_CLK_INFO_SIZE		ARRAY_SIZE(icnss_clk_info)
+
+struct icnss_stats {
+	struct {
+		uint32_t posted;
+		uint32_t processed;
+	} events[ICNSS_DRIVER_EVENT_MAX];
+
+	struct {
+		uint32_t request;
+		uint32_t free;
+		uint32_t enable;
+		uint32_t disable;
+	} ce_irqs[ICNSS_MAX_IRQ_REGISTRATIONS];
+
+	struct {
+		uint32_t pdr_fw_crash;
+		uint32_t pdr_host_error;
+		uint32_t root_pd_crash;
+		uint32_t root_pd_shutdown;
+	} recovery;
+
+	uint32_t pm_suspend;
+	uint32_t pm_suspend_err;
+	uint32_t pm_resume;
+	uint32_t pm_resume_err;
+	uint32_t pm_suspend_noirq;
+	uint32_t pm_suspend_noirq_err;
+	uint32_t pm_resume_noirq;
+	uint32_t pm_resume_noirq_err;
+	uint32_t pm_stay_awake;
+	uint32_t pm_relax;
+
+	uint32_t ind_register_req;
+	uint32_t ind_register_resp;
+	uint32_t ind_register_err;
+	uint32_t msa_info_req;
+	uint32_t msa_info_resp;
+	uint32_t msa_info_err;
+	uint32_t msa_ready_req;
+	uint32_t msa_ready_resp;
+	uint32_t msa_ready_err;
+	uint32_t msa_ready_ind;
+	uint32_t cap_req;
+	uint32_t cap_resp;
+	uint32_t cap_err;
+	uint32_t pin_connect_result;
+	uint32_t cfg_req;
+	uint32_t cfg_resp;
+	uint32_t cfg_req_err;
+	uint32_t mode_req;
+	uint32_t mode_resp;
+	uint32_t mode_req_err;
+	uint32_t ini_req;
+	uint32_t ini_resp;
+	uint32_t ini_req_err;
+	uint32_t vbatt_req;
+	uint32_t vbatt_resp;
+	uint32_t vbatt_req_err;
+	u32 rejuvenate_ind;
+	uint32_t rejuvenate_ack_req;
+	uint32_t rejuvenate_ack_resp;
+	uint32_t rejuvenate_ack_err;
+};
+
+#define MAX_NO_OF_MAC_ADDR 4
+struct icnss_wlan_mac_addr {
+	u8 mac_addr[MAX_NO_OF_MAC_ADDR][ETH_ALEN];
+	uint32_t no_of_mac_addr_set;
+};
+
+enum icnss_pdr_cause_index {
+	ICNSS_FW_CRASH,
+	ICNSS_ROOT_PD_CRASH,
+	ICNSS_ROOT_PD_SHUTDOWN,
+	ICNSS_HOST_ERROR,
+};
+
+static const char * const icnss_pdr_cause[] = {
+	[ICNSS_FW_CRASH] = "FW crash",
+	[ICNSS_ROOT_PD_CRASH] = "Root PD crashed",
+	[ICNSS_ROOT_PD_SHUTDOWN] = "Root PD shutdown",
+	[ICNSS_HOST_ERROR] = "Host error",
+};
+
+struct service_notifier_context {
+	void *handle;
+	uint32_t instance_id;
+	char name[QMI_SERVREG_LOC_NAME_LENGTH_V01 + 1];
+};
+
+static struct icnss_priv {
+	uint32_t magic;
+	struct platform_device *pdev;
+	struct icnss_driver_ops *ops;
+	struct ce_irq_list ce_irq_list[ICNSS_MAX_IRQ_REGISTRATIONS];
+	struct icnss_vreg_info vreg_info[ICNSS_VREG_INFO_SIZE];
+	struct icnss_clk_info clk_info[ICNSS_CLK_INFO_SIZE];
+	u32 ce_irqs[ICNSS_MAX_IRQ_REGISTRATIONS];
+	phys_addr_t mem_base_pa;
+	void __iomem *mem_base_va;
+	struct dma_iommu_mapping *smmu_mapping;
+	dma_addr_t smmu_iova_start;
+	size_t smmu_iova_len;
+	dma_addr_t smmu_iova_ipa_start;
+	size_t smmu_iova_ipa_len;
+	struct qmi_handle *wlfw_clnt;
+	struct list_head event_list;
+	spinlock_t event_lock;
+	struct work_struct event_work;
+	struct work_struct qmi_recv_msg_work;
+	struct workqueue_struct *event_wq;
+	phys_addr_t msa_pa;
+	uint32_t msa_mem_size;
+	void *msa_va;
+	unsigned long state;
+	struct wlfw_rf_chip_info_s_v01 chip_info;
+	struct wlfw_rf_board_info_s_v01 board_info;
+	struct wlfw_soc_info_s_v01 soc_info;
+	struct wlfw_fw_version_info_s_v01 fw_version_info;
+	char fw_build_id[QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1];
+	u32 pwr_pin_result;
+	u32 phy_io_pin_result;
+	u32 rf_pin_result;
+	uint32_t nr_mem_region;
+	struct icnss_mem_region_info
+		mem_region[QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01];
+	struct dentry *root_dentry;
+	spinlock_t on_off_lock;
+	struct icnss_stats stats;
+	struct work_struct service_notifier_work;
+	struct service_notifier_context *service_notifier;
+	struct notifier_block service_notifier_nb;
+	int total_domains;
+	struct notifier_block get_service_nb;
+	void *modem_notify_handler;
+	struct notifier_block modem_ssr_nb;
+	uint32_t diag_reg_read_addr;
+	uint32_t diag_reg_read_mem_type;
+	uint32_t diag_reg_read_len;
+	uint8_t *diag_reg_read_buf;
+	struct qpnp_adc_tm_btm_param vph_monitor_params;
+	struct qpnp_adc_tm_chip *adc_tm_dev;
+	struct qpnp_vadc_chip *vadc_dev;
+	uint64_t vph_pwr;
+	atomic_t pm_count;
+	struct ramdump_device *msa0_dump_dev;
+	bool is_wlan_mac_set;
+	struct icnss_wlan_mac_addr wlan_mac_addr;
+	bool bypass_s1_smmu;
+	struct mutex dev_lock;
+	u8 cause_for_rejuvenation;
+	u8 requesting_sub_system;
+	u16 line_number;
+	char function_name[QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1];
+} *penv;
+
+#ifdef CONFIG_ICNSS_DEBUG
+static void icnss_ignore_qmi_timeout(bool ignore)
+{
+	ignore_qmi_timeout = ignore;
+}
+#else
+static void icnss_ignore_qmi_timeout(bool ignore) { }
+#endif
+
+static int icnss_assign_msa_perm(struct icnss_mem_region_info
+				 *mem_region, enum icnss_msa_perm new_perm)
+{
+	int ret = 0;
+	phys_addr_t addr;
+	u32 size;
+	u32 i = 0;
+	u32 source_vmids[ICNSS_MAX_VMIDS] = {0};
+	u32 source_nelems;
+	u32 dest_vmids[ICNSS_MAX_VMIDS] = {0};
+	u32 dest_perms[ICNSS_MAX_VMIDS] = {0};
+	u32 dest_nelems;
+	enum icnss_msa_perm cur_perm = mem_region->perm;
+	struct icnss_msa_perm_list_t *new_perm_list, *old_perm_list;
+
+	addr = mem_region->reg_addr;
+	size = mem_region->size;
+
+	if (mem_region->secure_flag) {
+		new_perm_list = &msa_perm_secure_list[new_perm];
+		old_perm_list = &msa_perm_secure_list[cur_perm];
+	} else {
+		new_perm_list = &msa_perm_list[new_perm];
+		old_perm_list = &msa_perm_list[cur_perm];
+	}
+
+	source_nelems = old_perm_list->nelems;
+	dest_nelems = new_perm_list->nelems;
+
+	for (i = 0; i < source_nelems; ++i)
+		source_vmids[i] = old_perm_list->vmids[i];
+
+	for (i = 0; i < dest_nelems; ++i) {
+		dest_vmids[i] = new_perm_list->vmids[i];
+		dest_perms[i] = new_perm_list->perms[i];
+	}
+
+	ret = hyp_assign_phys(addr, size, source_vmids, source_nelems,
+			      dest_vmids, dest_perms, dest_nelems);
+	if (ret) {
+		icnss_pr_err("Hyperviser map failed for PA=%pa size=%u err=%d\n",
+			     &addr, size, ret);
+		goto out;
+	}
+
+	icnss_pr_dbg("Hypervisor map for source_nelems=%d, source[0]=%x, source[1]=%x, source[2]=%x,"
+		     "source[3]=%x, dest_nelems=%d, dest[0]=%x, dest[1]=%x, dest[2]=%x, dest[3]=%x\n",
+		     source_nelems, source_vmids[0], source_vmids[1],
+		     source_vmids[2], source_vmids[3], dest_nelems,
+		     dest_vmids[0], dest_vmids[1], dest_vmids[2],
+		     dest_vmids[3]);
+out:
+	return ret;
+}
+
+static int icnss_assign_msa_perm_all(struct icnss_priv *priv,
+				     enum icnss_msa_perm new_perm)
+{
+	int ret;
+	int i;
+	enum icnss_msa_perm old_perm;
+
+	if (priv->nr_mem_region > QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01) {
+		icnss_pr_err("Invalid memory region len %d\n",
+			     priv->nr_mem_region);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < priv->nr_mem_region; i++) {
+		old_perm = priv->mem_region[i].perm;
+		ret = icnss_assign_msa_perm(&priv->mem_region[i], new_perm);
+		if (ret)
+			goto err_unmap;
+		priv->mem_region[i].perm = new_perm;
+	}
+	return 0;
+
+err_unmap:
+	for (i--; i >= 0; i--) {
+		icnss_assign_msa_perm(&priv->mem_region[i], old_perm);
+	}
+	return ret;
+}
+
+static void icnss_pm_stay_awake(struct icnss_priv *priv)
+{
+	if (atomic_inc_return(&priv->pm_count) != 1)
+		return;
+
+	icnss_pr_vdbg("PM stay awake, state: 0x%lx, count: %d\n", priv->state,
+		     atomic_read(&priv->pm_count));
+
+	pm_stay_awake(&priv->pdev->dev);
+
+	priv->stats.pm_stay_awake++;
+}
+
+static void icnss_pm_relax(struct icnss_priv *priv)
+{
+	int r = atomic_dec_return(&priv->pm_count);
+
+	WARN_ON(r < 0);
+
+	if (r != 0)
+		return;
+
+	icnss_pr_vdbg("PM relax, state: 0x%lx, count: %d\n", priv->state,
+		     atomic_read(&priv->pm_count));
+
+	pm_relax(&priv->pdev->dev);
+	priv->stats.pm_relax++;
+}
+
+static char *icnss_driver_event_to_str(enum icnss_driver_event_type type)
+{
+	switch (type) {
+	case ICNSS_DRIVER_EVENT_SERVER_ARRIVE:
+		return "SERVER_ARRIVE";
+	case ICNSS_DRIVER_EVENT_SERVER_EXIT:
+		return "SERVER_EXIT";
+	case ICNSS_DRIVER_EVENT_FW_READY_IND:
+		return "FW_READY";
+	case ICNSS_DRIVER_EVENT_REGISTER_DRIVER:
+		return "REGISTER_DRIVER";
+	case ICNSS_DRIVER_EVENT_UNREGISTER_DRIVER:
+		return "UNREGISTER_DRIVER";
+	case ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN:
+		return "PD_SERVICE_DOWN";
+	case ICNSS_DRIVER_EVENT_MAX:
+		return "EVENT_MAX";
+	}
+
+	return "UNKNOWN";
+};
+
+static int icnss_driver_event_post(enum icnss_driver_event_type type,
+				   u32 flags, void *data)
+{
+	struct icnss_driver_event *event;
+	unsigned long irq_flags;
+	int gfp = GFP_KERNEL;
+	int ret = 0;
+
+	icnss_pr_dbg("Posting event: %s(%d), %s, flags: 0x%x, state: 0x%lx\n",
+		     icnss_driver_event_to_str(type), type, current->comm,
+		     flags, penv->state);
+
+	if (type >= ICNSS_DRIVER_EVENT_MAX) {
+		icnss_pr_err("Invalid Event type: %d, can't post", type);
+		return -EINVAL;
+	}
+
+	if (in_interrupt() || irqs_disabled())
+		gfp = GFP_ATOMIC;
+
+	event = kzalloc(sizeof(*event), gfp);
+	if (event == NULL)
+		return -ENOMEM;
+
+	icnss_pm_stay_awake(penv);
+
+	event->type = type;
+	event->data = data;
+	init_completion(&event->complete);
+	event->ret = ICNSS_EVENT_PENDING;
+	event->sync = !!(flags & ICNSS_EVENT_SYNC);
+
+	spin_lock_irqsave(&penv->event_lock, irq_flags);
+	list_add_tail(&event->list, &penv->event_list);
+	spin_unlock_irqrestore(&penv->event_lock, irq_flags);
+
+	penv->stats.events[type].posted++;
+	queue_work(penv->event_wq, &penv->event_work);
+
+	if (!(flags & ICNSS_EVENT_SYNC))
+		goto out;
+
+	if (flags & ICNSS_EVENT_UNINTERRUPTIBLE)
+		wait_for_completion(&event->complete);
+	else
+		ret = wait_for_completion_interruptible(&event->complete);
+
+	icnss_pr_dbg("Completed event: %s(%d), state: 0x%lx, ret: %d/%d\n",
+		     icnss_driver_event_to_str(type), type, penv->state, ret,
+		     event->ret);
+
+	spin_lock_irqsave(&penv->event_lock, irq_flags);
+	if (ret == -ERESTARTSYS && event->ret == ICNSS_EVENT_PENDING) {
+		event->sync = false;
+		spin_unlock_irqrestore(&penv->event_lock, irq_flags);
+		ret = -EINTR;
+		goto out;
+	}
+	spin_unlock_irqrestore(&penv->event_lock, irq_flags);
+
+	ret = event->ret;
+	kfree(event);
+
+out:
+	icnss_pm_relax(penv);
+	return ret;
+}
+
+static int wlfw_vbatt_send_sync_msg(struct icnss_priv *priv,
+				    uint64_t voltage_uv)
+{
+	int ret;
+	struct wlfw_vbatt_req_msg_v01 req;
+	struct wlfw_vbatt_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+
+	if (!priv->wlfw_clnt) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	icnss_pr_dbg("Sending Vbatt message, state: 0x%lx\n",
+		     penv->state);
+
+	memset(&req, 0, sizeof(req));
+	memset(&resp, 0, sizeof(resp));
+
+	req.voltage_uv = voltage_uv;
+
+	req_desc.max_msg_len = WLFW_VBATT_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_WLFW_VBATT_REQ_V01;
+	req_desc.ei_array = wlfw_vbatt_req_msg_v01_ei;
+
+	resp_desc.max_msg_len = WLFW_VBATT_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_WLFW_VBATT_RESP_V01;
+	resp_desc.ei_array = wlfw_vbatt_resp_msg_v01_ei;
+
+	priv->stats.vbatt_req++;
+
+	ret = qmi_send_req_wait(priv->wlfw_clnt, &req_desc, &req, sizeof(req),
+			&resp_desc, &resp, sizeof(resp), WLFW_TIMEOUT_MS);
+	if (ret < 0) {
+		icnss_pr_err("Send vbatt req failed %d\n", ret);
+		goto out;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		icnss_pr_err("QMI vbatt request rejected, result:%d error:%d\n",
+			resp.resp.result, resp.resp.error);
+		ret = -resp.resp.result;
+		goto out;
+	}
+	priv->stats.vbatt_resp++;
+
+out:
+	priv->stats.vbatt_req_err++;
+	return ret;
+}
+
+static int icnss_get_phone_power(struct icnss_priv *priv, uint64_t *result_uv)
+{
+	int ret = 0;
+	struct qpnp_vadc_result adc_result;
+
+	if (!priv->vadc_dev) {
+		icnss_pr_err("VADC dev doesn't exists\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = qpnp_vadc_read(penv->vadc_dev, VADC_VPH_PWR, &adc_result);
+	if (ret) {
+		icnss_pr_err("Error reading ADC channel %d, ret = %d\n",
+			     VADC_VPH_PWR, ret);
+		goto out;
+	}
+
+	icnss_pr_dbg("Phone power read phy=%lld meas=0x%llx\n",
+		       adc_result.physical, adc_result.measurement);
+
+	*result_uv = adc_result.physical;
+out:
+	return ret;
+}
+
+static void icnss_vph_notify(enum qpnp_tm_state state, void *ctx)
+{
+	struct icnss_priv *priv = ctx;
+	uint64_t vph_pwr = 0;
+	uint64_t vph_pwr_prev;
+	int ret = 0;
+	bool update = true;
+
+	if (!priv) {
+		icnss_pr_err("Priv pointer is NULL\n");
+		return;
+	}
+
+	vph_pwr_prev = priv->vph_pwr;
+
+	ret = icnss_get_phone_power(priv, &vph_pwr);
+	if (ret)
+		return;
+
+	if (vph_pwr < ICNSS_THRESHOLD_LOW) {
+		if (vph_pwr_prev < ICNSS_THRESHOLD_LOW)
+			update = false;
+		priv->vph_monitor_params.state_request =
+			ADC_TM_HIGH_THR_ENABLE;
+		priv->vph_monitor_params.high_thr = ICNSS_THRESHOLD_LOW +
+			ICNSS_THRESHOLD_GUARD;
+		priv->vph_monitor_params.low_thr = 0;
+	} else if (vph_pwr > ICNSS_THRESHOLD_HIGH) {
+		if (vph_pwr_prev > ICNSS_THRESHOLD_HIGH)
+			update = false;
+		priv->vph_monitor_params.state_request =
+			ADC_TM_LOW_THR_ENABLE;
+		priv->vph_monitor_params.low_thr = ICNSS_THRESHOLD_HIGH -
+			ICNSS_THRESHOLD_GUARD;
+		priv->vph_monitor_params.high_thr = 0;
+	} else {
+		if (vph_pwr_prev > ICNSS_THRESHOLD_LOW &&
+		    vph_pwr_prev < ICNSS_THRESHOLD_HIGH)
+			update = false;
+		priv->vph_monitor_params.state_request =
+			ADC_TM_HIGH_LOW_THR_ENABLE;
+		priv->vph_monitor_params.low_thr = ICNSS_THRESHOLD_LOW;
+		priv->vph_monitor_params.high_thr = ICNSS_THRESHOLD_HIGH;
+	}
+
+	priv->vph_pwr = vph_pwr;
+
+	if (update)
+		wlfw_vbatt_send_sync_msg(priv, vph_pwr);
+
+	icnss_pr_dbg("set low threshold to %d, high threshold to %d\n",
+		       priv->vph_monitor_params.low_thr,
+		       priv->vph_monitor_params.high_thr);
+	ret = qpnp_adc_tm_channel_measure(priv->adc_tm_dev,
+					  &priv->vph_monitor_params);
+	if (ret)
+		icnss_pr_err("TM channel setup failed %d\n", ret);
+}
+
+static int icnss_setup_vph_monitor(struct icnss_priv *priv)
+{
+	int ret = 0;
+
+	if (!priv->adc_tm_dev) {
+		icnss_pr_err("ADC TM handler is NULL\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	priv->vph_monitor_params.low_thr = ICNSS_THRESHOLD_LOW;
+	priv->vph_monitor_params.high_thr = ICNSS_THRESHOLD_HIGH;
+	priv->vph_monitor_params.state_request = ADC_TM_HIGH_LOW_THR_ENABLE;
+	priv->vph_monitor_params.channel = VADC_VPH_PWR;
+	priv->vph_monitor_params.btm_ctx = priv;
+	priv->vph_monitor_params.timer_interval = ADC_MEAS1_INTERVAL_1S;
+	priv->vph_monitor_params.threshold_notification = &icnss_vph_notify;
+	icnss_pr_dbg("Set low threshold to %d, high threshold to %d\n",
+		       priv->vph_monitor_params.low_thr,
+		       priv->vph_monitor_params.high_thr);
+
+	ret = qpnp_adc_tm_channel_measure(priv->adc_tm_dev,
+					  &priv->vph_monitor_params);
+	if (ret)
+		icnss_pr_err("TM channel setup failed %d\n", ret);
+out:
+	return ret;
+}
+
+static int icnss_init_vph_monitor(struct icnss_priv *priv)
+{
+	int ret = 0;
+
+	if (test_bit(VBATT_DISABLE, &quirks))
+		goto out;
+
+	ret = icnss_get_phone_power(priv, &priv->vph_pwr);
+	if (ret)
+		goto out;
+
+	wlfw_vbatt_send_sync_msg(priv, priv->vph_pwr);
+
+	ret = icnss_setup_vph_monitor(priv);
+	if (ret)
+		goto out;
+out:
+	return ret;
+}
+
+
+static int icnss_qmi_pin_connect_result_ind(void *msg, unsigned int msg_len)
+{
+	struct msg_desc ind_desc;
+	struct wlfw_pin_connect_result_ind_msg_v01 ind_msg;
+	int ret = 0;
+
+	if (!penv || !penv->wlfw_clnt) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	memset(&ind_msg, 0, sizeof(ind_msg));
+
+	ind_desc.msg_id = QMI_WLFW_PIN_CONNECT_RESULT_IND_V01;
+	ind_desc.max_msg_len = WLFW_PIN_CONNECT_RESULT_IND_MSG_V01_MAX_MSG_LEN;
+	ind_desc.ei_array = wlfw_pin_connect_result_ind_msg_v01_ei;
+
+	ret = qmi_kernel_decode(&ind_desc, &ind_msg, msg, msg_len);
+	if (ret < 0) {
+		icnss_pr_err("Failed to decode message: %d, msg_len: %u\n",
+			     ret, msg_len);
+		goto out;
+	}
+
+	/* store pin result locally */
+	if (ind_msg.pwr_pin_result_valid)
+		penv->pwr_pin_result = ind_msg.pwr_pin_result;
+	if (ind_msg.phy_io_pin_result_valid)
+		penv->phy_io_pin_result = ind_msg.phy_io_pin_result;
+	if (ind_msg.rf_pin_result_valid)
+		penv->rf_pin_result = ind_msg.rf_pin_result;
+
+	icnss_pr_dbg("Pin connect Result: pwr_pin: 0x%x phy_io_pin: 0x%x rf_io_pin: 0x%x\n",
+		     ind_msg.pwr_pin_result, ind_msg.phy_io_pin_result,
+		     ind_msg.rf_pin_result);
+
+	penv->stats.pin_connect_result++;
+out:
+	return ret;
+}
+
+static int icnss_vreg_on(struct icnss_priv *priv)
+{
+	int ret = 0;
+	struct icnss_vreg_info *vreg_info;
+	int i;
+
+	for (i = 0; i < ICNSS_VREG_INFO_SIZE; i++) {
+		vreg_info = &priv->vreg_info[i];
+
+		if (!vreg_info->reg)
+			continue;
+
+		icnss_pr_vdbg("Regulator %s being enabled\n", vreg_info->name);
+
+		ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v,
+					    vreg_info->max_v);
+		if (ret) {
+			icnss_pr_err("Regulator %s, can't set voltage: min_v: %u, max_v: %u, ret: %d\n",
+				     vreg_info->name, vreg_info->min_v,
+				     vreg_info->max_v, ret);
+			break;
+		}
+
+		if (vreg_info->load_ua) {
+			ret = regulator_set_load(vreg_info->reg,
+						 vreg_info->load_ua);
+			if (ret < 0) {
+				icnss_pr_err("Regulator %s, can't set load: %u, ret: %d\n",
+					     vreg_info->name,
+					     vreg_info->load_ua, ret);
+				break;
+			}
+		}
+
+		ret = regulator_enable(vreg_info->reg);
+		if (ret) {
+			icnss_pr_err("Regulator %s, can't enable: %d\n",
+				     vreg_info->name, ret);
+			break;
+		}
+
+		if (vreg_info->settle_delay)
+			udelay(vreg_info->settle_delay);
+	}
+
+	if (!ret)
+		return 0;
+
+	for (; i >= 0; i--) {
+		vreg_info = &priv->vreg_info[i];
+
+		if (!vreg_info->reg)
+			continue;
+
+		regulator_disable(vreg_info->reg);
+		regulator_set_load(vreg_info->reg, 0);
+		regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
+	}
+
+	return ret;
+}
+
+static int icnss_vreg_off(struct icnss_priv *priv)
+{
+	int ret = 0;
+	struct icnss_vreg_info *vreg_info;
+	int i;
+
+	for (i = ICNSS_VREG_INFO_SIZE - 1; i >= 0; i--) {
+		vreg_info = &priv->vreg_info[i];
+
+		if (!vreg_info->reg)
+			continue;
+
+		icnss_pr_vdbg("Regulator %s being disabled\n", vreg_info->name);
+
+		ret = regulator_disable(vreg_info->reg);
+		if (ret)
+			icnss_pr_err("Regulator %s, can't disable: %d\n",
+				     vreg_info->name, ret);
+
+		ret = regulator_set_load(vreg_info->reg, 0);
+		if (ret < 0)
+			icnss_pr_err("Regulator %s, can't set load: %d\n",
+				     vreg_info->name, ret);
+
+		ret = regulator_set_voltage(vreg_info->reg, 0,
+					    vreg_info->max_v);
+		if (ret)
+			icnss_pr_err("Regulator %s, can't set voltage: %d\n",
+				     vreg_info->name, ret);
+	}
+
+	return ret;
+}
+
+static int icnss_clk_init(struct icnss_priv *priv)
+{
+	struct icnss_clk_info *clk_info;
+	int i;
+	int ret = 0;
+
+	for (i = 0; i < ICNSS_CLK_INFO_SIZE; i++) {
+		clk_info = &priv->clk_info[i];
+
+		if (!clk_info->handle)
+			continue;
+
+		icnss_pr_vdbg("Clock %s being enabled\n", clk_info->name);
+
+		if (clk_info->freq) {
+			ret = clk_set_rate(clk_info->handle, clk_info->freq);
+
+			if (ret) {
+				icnss_pr_err("Clock %s, can't set frequency: %u, ret: %d\n",
+					     clk_info->name, clk_info->freq,
+					     ret);
+				break;
+			}
+		}
+
+		ret = clk_prepare_enable(clk_info->handle);
+		if (ret) {
+			icnss_pr_err("Clock %s, can't enable: %d\n",
+				     clk_info->name, ret);
+			break;
+		}
+	}
+
+	if (ret == 0)
+		return 0;
+
+	for (; i >= 0; i--) {
+		clk_info = &priv->clk_info[i];
+
+		if (!clk_info->handle)
+			continue;
+
+		clk_disable_unprepare(clk_info->handle);
+	}
+
+	return ret;
+}
+
+static int icnss_clk_deinit(struct icnss_priv *priv)
+{
+	struct icnss_clk_info *clk_info;
+	int i;
+
+	for (i = 0; i < ICNSS_CLK_INFO_SIZE; i++) {
+		clk_info = &priv->clk_info[i];
+
+		if (!clk_info->handle)
+			continue;
+
+		icnss_pr_vdbg("Clock %s being disabled\n", clk_info->name);
+
+		clk_disable_unprepare(clk_info->handle);
+	}
+
+	return 0;
+}
+
+static int icnss_hw_power_on(struct icnss_priv *priv)
+{
+	int ret = 0;
+
+	icnss_pr_dbg("HW Power on: state: 0x%lx\n", priv->state);
+
+	spin_lock(&priv->on_off_lock);
+	if (test_bit(ICNSS_POWER_ON, &priv->state)) {
+		spin_unlock(&priv->on_off_lock);
+		return ret;
+	}
+	set_bit(ICNSS_POWER_ON, &priv->state);
+	spin_unlock(&priv->on_off_lock);
+
+	ret = icnss_vreg_on(priv);
+	if (ret)
+		goto out;
+
+	ret = icnss_clk_init(priv);
+	if (ret)
+		goto vreg_off;
+
+	return ret;
+
+vreg_off:
+	icnss_vreg_off(priv);
+out:
+	clear_bit(ICNSS_POWER_ON, &priv->state);
+	return ret;
+}
+
+static int icnss_hw_power_off(struct icnss_priv *priv)
+{
+	int ret = 0;
+
+	if (test_bit(HW_ALWAYS_ON, &quirks))
+		return 0;
+
+	if (test_bit(ICNSS_FW_DOWN, &priv->state))
+		return 0;
+
+	icnss_pr_dbg("HW Power off: 0x%lx\n", priv->state);
+
+	spin_lock(&priv->on_off_lock);
+	if (!test_bit(ICNSS_POWER_ON, &priv->state)) {
+		spin_unlock(&priv->on_off_lock);
+		return ret;
+	}
+	clear_bit(ICNSS_POWER_ON, &priv->state);
+	spin_unlock(&priv->on_off_lock);
+
+	icnss_clk_deinit(priv);
+
+	ret = icnss_vreg_off(priv);
+
+	return ret;
+}
+
+int icnss_power_on(struct device *dev)
+{
+	struct icnss_priv *priv = dev_get_drvdata(dev);
+
+	if (!priv) {
+		icnss_pr_err("Invalid drvdata: dev %p, data %p\n",
+			     dev, priv);
+		return -EINVAL;
+	}
+
+	icnss_pr_dbg("Power On: 0x%lx\n", priv->state);
+
+	return icnss_hw_power_on(priv);
+}
+EXPORT_SYMBOL(icnss_power_on);
+
+bool icnss_is_fw_ready(void)
+{
+	if (!penv)
+		return false;
+	else
+		return test_bit(ICNSS_FW_READY, &penv->state);
+}
+EXPORT_SYMBOL(icnss_is_fw_ready);
+
+bool icnss_is_fw_down(void)
+{
+	if (!penv)
+		return false;
+	else
+		return test_bit(ICNSS_FW_DOWN, &penv->state);
+}
+EXPORT_SYMBOL(icnss_is_fw_down);
+
+
+int icnss_power_off(struct device *dev)
+{
+	struct icnss_priv *priv = dev_get_drvdata(dev);
+
+	if (!priv) {
+		icnss_pr_err("Invalid drvdata: dev %p, data %p\n",
+			     dev, priv);
+		return -EINVAL;
+	}
+
+	icnss_pr_dbg("Power Off: 0x%lx\n", priv->state);
+
+	return icnss_hw_power_off(priv);
+}
+EXPORT_SYMBOL(icnss_power_off);
+
+static int wlfw_msa_mem_info_send_sync_msg(void)
+{
+	int ret;
+	int i;
+	struct wlfw_msa_info_req_msg_v01 req;
+	struct wlfw_msa_info_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+
+	if (!penv || !penv->wlfw_clnt)
+		return -ENODEV;
+
+	icnss_pr_dbg("Sending MSA mem info, state: 0x%lx\n", penv->state);
+
+	memset(&req, 0, sizeof(req));
+	memset(&resp, 0, sizeof(resp));
+
+	req.msa_addr = penv->msa_pa;
+	req.size = penv->msa_mem_size;
+
+	req_desc.max_msg_len = WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_WLFW_MSA_INFO_REQ_V01;
+	req_desc.ei_array = wlfw_msa_info_req_msg_v01_ei;
+
+	resp_desc.max_msg_len = WLFW_MSA_INFO_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_WLFW_MSA_INFO_RESP_V01;
+	resp_desc.ei_array = wlfw_msa_info_resp_msg_v01_ei;
+
+	penv->stats.msa_info_req++;
+
+	ret = qmi_send_req_wait(penv->wlfw_clnt, &req_desc, &req, sizeof(req),
+			&resp_desc, &resp, sizeof(resp), WLFW_TIMEOUT_MS);
+	if (ret < 0) {
+		icnss_pr_err("Send MSA Mem info req failed %d\n", ret);
+		goto out;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		icnss_pr_err("QMI MSA Mem info request rejected, result:%d error:%d\n",
+			resp.resp.result, resp.resp.error);
+		ret = -resp.resp.result;
+		goto out;
+	}
+
+	icnss_pr_dbg("Receive mem_region_info_len: %d\n",
+		     resp.mem_region_info_len);
+
+	if (resp.mem_region_info_len > QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01) {
+		icnss_pr_err("Invalid memory region length received: %d\n",
+			     resp.mem_region_info_len);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	penv->stats.msa_info_resp++;
+	penv->nr_mem_region = resp.mem_region_info_len;
+	for (i = 0; i < resp.mem_region_info_len; i++) {
+		penv->mem_region[i].reg_addr =
+			resp.mem_region_info[i].region_addr;
+		penv->mem_region[i].size =
+			resp.mem_region_info[i].size;
+		penv->mem_region[i].secure_flag =
+			resp.mem_region_info[i].secure_flag;
+		icnss_pr_dbg("Memory Region: %d Addr: 0x%llx Size: 0x%x Flag: 0x%08x\n",
+			     i, penv->mem_region[i].reg_addr,
+			     penv->mem_region[i].size,
+			     penv->mem_region[i].secure_flag);
+	}
+
+	return 0;
+
+out:
+	penv->stats.msa_info_err++;
+	ICNSS_QMI_ASSERT();
+	return ret;
+}
+
+static int wlfw_msa_ready_send_sync_msg(void)
+{
+	int ret;
+	struct wlfw_msa_ready_req_msg_v01 req;
+	struct wlfw_msa_ready_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+
+	if (!penv || !penv->wlfw_clnt)
+		return -ENODEV;
+
+	icnss_pr_dbg("Sending MSA ready request message, state: 0x%lx\n",
+		     penv->state);
+
+	memset(&req, 0, sizeof(req));
+	memset(&resp, 0, sizeof(resp));
+
+	req_desc.max_msg_len = WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_WLFW_MSA_READY_REQ_V01;
+	req_desc.ei_array = wlfw_msa_ready_req_msg_v01_ei;
+
+	resp_desc.max_msg_len = WLFW_MSA_READY_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_WLFW_MSA_READY_RESP_V01;
+	resp_desc.ei_array = wlfw_msa_ready_resp_msg_v01_ei;
+
+	penv->stats.msa_ready_req++;
+	ret = qmi_send_req_wait(penv->wlfw_clnt, &req_desc, &req, sizeof(req),
+			&resp_desc, &resp, sizeof(resp), WLFW_TIMEOUT_MS);
+	if (ret < 0) {
+		icnss_pr_err("Send MSA ready req failed %d\n", ret);
+		goto out;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		icnss_pr_err("QMI MSA ready request rejected: result:%d error:%d\n",
+			resp.resp.result, resp.resp.error);
+		ret = -resp.resp.result;
+		goto out;
+	}
+	penv->stats.msa_ready_resp++;
+
+	return 0;
+
+out:
+	penv->stats.msa_ready_err++;
+	ICNSS_QMI_ASSERT();
+	return ret;
+}
+
+static int wlfw_ind_register_send_sync_msg(void)
+{
+	int ret;
+	struct wlfw_ind_register_req_msg_v01 req;
+	struct wlfw_ind_register_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+
+	if (!penv || !penv->wlfw_clnt)
+		return -ENODEV;
+
+	icnss_pr_dbg("Sending indication register message, state: 0x%lx\n",
+		     penv->state);
+
+	memset(&req, 0, sizeof(req));
+	memset(&resp, 0, sizeof(resp));
+
+	req.client_id_valid = 1;
+	req.client_id = WLFW_CLIENT_ID;
+	req.fw_ready_enable_valid = 1;
+	req.fw_ready_enable = 1;
+	req.msa_ready_enable_valid = 1;
+	req.msa_ready_enable = 1;
+	req.pin_connect_result_enable_valid = 1;
+	req.pin_connect_result_enable = 1;
+	if (test_bit(FW_REJUVENATE_ENABLE, &quirks)) {
+		req.rejuvenate_enable_valid = 1;
+		req.rejuvenate_enable = 1;
+	}
+
+	req_desc.max_msg_len = WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_WLFW_IND_REGISTER_REQ_V01;
+	req_desc.ei_array = wlfw_ind_register_req_msg_v01_ei;
+
+	resp_desc.max_msg_len = WLFW_IND_REGISTER_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_WLFW_IND_REGISTER_RESP_V01;
+	resp_desc.ei_array = wlfw_ind_register_resp_msg_v01_ei;
+
+	penv->stats.ind_register_req++;
+
+	ret = qmi_send_req_wait(penv->wlfw_clnt, &req_desc, &req, sizeof(req),
+				&resp_desc, &resp, sizeof(resp),
+				WLFW_TIMEOUT_MS);
+	if (ret < 0) {
+		icnss_pr_err("Send indication register req failed %d\n", ret);
+		goto out;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		icnss_pr_err("QMI indication register request rejected, resut:%d error:%d\n",
+		       resp.resp.result, resp.resp.error);
+		ret = -resp.resp.result;
+		goto out;
+	}
+	penv->stats.ind_register_resp++;
+
+	return 0;
+
+out:
+	penv->stats.ind_register_err++;
+	ICNSS_QMI_ASSERT();
+	return ret;
+}
+
+static int wlfw_cap_send_sync_msg(void)
+{
+	int ret;
+	struct wlfw_cap_req_msg_v01 req;
+	struct wlfw_cap_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+
+	if (!penv || !penv->wlfw_clnt)
+		return -ENODEV;
+
+	icnss_pr_dbg("Sending capability message, state: 0x%lx\n", penv->state);
+
+	memset(&resp, 0, sizeof(resp));
+
+	req_desc.max_msg_len = WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_WLFW_CAP_REQ_V01;
+	req_desc.ei_array = wlfw_cap_req_msg_v01_ei;
+
+	resp_desc.max_msg_len = WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_WLFW_CAP_RESP_V01;
+	resp_desc.ei_array = wlfw_cap_resp_msg_v01_ei;
+
+	penv->stats.cap_req++;
+	ret = qmi_send_req_wait(penv->wlfw_clnt, &req_desc, &req, sizeof(req),
+				&resp_desc, &resp, sizeof(resp),
+				WLFW_TIMEOUT_MS);
+	if (ret < 0) {
+		icnss_pr_err("Send capability req failed %d\n", ret);
+		goto out;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		icnss_pr_err("QMI capability request rejected, result:%d error:%d\n",
+		       resp.resp.result, resp.resp.error);
+		ret = -resp.resp.result;
+		if (resp.resp.error == QMI_ERR_PLAT_CCPM_CLK_INIT_FAILED)
+			icnss_pr_err("RF card Not present");
+		goto out;
+	}
+
+	penv->stats.cap_resp++;
+	/* store cap locally */
+	if (resp.chip_info_valid)
+		penv->chip_info = resp.chip_info;
+	if (resp.board_info_valid)
+		penv->board_info = resp.board_info;
+	else
+		penv->board_info.board_id = 0xFF;
+	if (resp.soc_info_valid)
+		penv->soc_info = resp.soc_info;
+	if (resp.fw_version_info_valid)
+		penv->fw_version_info = resp.fw_version_info;
+	if (resp.fw_build_id_valid)
+		strlcpy(penv->fw_build_id, resp.fw_build_id,
+			QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1);
+
+	icnss_pr_dbg("Capability, chip_id: 0x%x, chip_family: 0x%x, board_id: 0x%x, soc_id: 0x%x, fw_version: 0x%x, fw_build_timestamp: %s, fw_build_id: %s",
+		     penv->chip_info.chip_id, penv->chip_info.chip_family,
+		     penv->board_info.board_id, penv->soc_info.soc_id,
+		     penv->fw_version_info.fw_version,
+		     penv->fw_version_info.fw_build_timestamp,
+		     penv->fw_build_id);
+
+	return 0;
+
+out:
+	penv->stats.cap_err++;
+	ICNSS_QMI_ASSERT();
+	return ret;
+}
+
+static int wlfw_wlan_mode_send_sync_msg(enum wlfw_driver_mode_enum_v01 mode)
+{
+	int ret;
+	struct wlfw_wlan_mode_req_msg_v01 req;
+	struct wlfw_wlan_mode_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+
+	if (!penv || !penv->wlfw_clnt)
+		return -ENODEV;
+
+	/* During recovery do not send mode request for WLAN OFF as
+	 * FW not able to process it.
+	 */
+	if (test_bit(ICNSS_PD_RESTART, &penv->state) &&
+	    mode == QMI_WLFW_OFF_V01)
+		return 0;
+
+	icnss_pr_dbg("Sending Mode request, state: 0x%lx, mode: %d\n",
+		     penv->state, mode);
+
+	memset(&req, 0, sizeof(req));
+	memset(&resp, 0, sizeof(resp));
+
+	req.mode = mode;
+	req.hw_debug_valid = 1;
+	req.hw_debug = !!test_bit(HW_DEBUG_ENABLE, &quirks);
+
+	req_desc.max_msg_len = WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_WLFW_WLAN_MODE_REQ_V01;
+	req_desc.ei_array = wlfw_wlan_mode_req_msg_v01_ei;
+
+	resp_desc.max_msg_len = WLFW_WLAN_MODE_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_WLFW_WLAN_MODE_RESP_V01;
+	resp_desc.ei_array = wlfw_wlan_mode_resp_msg_v01_ei;
+
+	penv->stats.mode_req++;
+	ret = qmi_send_req_wait(penv->wlfw_clnt, &req_desc, &req, sizeof(req),
+				&resp_desc, &resp, sizeof(resp),
+				WLFW_TIMEOUT_MS);
+	if (ret < 0) {
+		icnss_pr_err("Send mode req failed, mode: %d ret: %d\n",
+			     mode, ret);
+		goto out;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		icnss_pr_err("QMI mode request rejected, mode:%d result:%d error:%d\n",
+			     mode, resp.resp.result, resp.resp.error);
+		ret = -resp.resp.result;
+		goto out;
+	}
+	penv->stats.mode_resp++;
+
+	return 0;
+
+out:
+	penv->stats.mode_req_err++;
+	ICNSS_QMI_ASSERT();
+	return ret;
+}
+
+static int wlfw_wlan_cfg_send_sync_msg(struct wlfw_wlan_cfg_req_msg_v01 *data)
+{
+	int ret;
+	struct wlfw_wlan_cfg_req_msg_v01 req;
+	struct wlfw_wlan_cfg_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+
+	if (!penv || !penv->wlfw_clnt)
+		return -ENODEV;
+
+	icnss_pr_dbg("Sending config request, state: 0x%lx\n", penv->state);
+
+	memset(&req, 0, sizeof(req));
+	memset(&resp, 0, sizeof(resp));
+
+	memcpy(&req, data, sizeof(req));
+
+	req_desc.max_msg_len = WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_WLFW_WLAN_CFG_REQ_V01;
+	req_desc.ei_array = wlfw_wlan_cfg_req_msg_v01_ei;
+
+	resp_desc.max_msg_len = WLFW_WLAN_CFG_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_WLFW_WLAN_CFG_RESP_V01;
+	resp_desc.ei_array = wlfw_wlan_cfg_resp_msg_v01_ei;
+
+	penv->stats.cfg_req++;
+	ret = qmi_send_req_wait(penv->wlfw_clnt, &req_desc, &req, sizeof(req),
+				&resp_desc, &resp, sizeof(resp),
+				WLFW_TIMEOUT_MS);
+	if (ret < 0) {
+		icnss_pr_err("Send config req failed %d\n", ret);
+		goto out;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		icnss_pr_err("QMI config request rejected, result:%d error:%d\n",
+		       resp.resp.result, resp.resp.error);
+		ret = -resp.resp.result;
+		goto out;
+	}
+	penv->stats.cfg_resp++;
+
+	return 0;
+
+out:
+	penv->stats.cfg_req_err++;
+	ICNSS_QMI_ASSERT();
+	return ret;
+}
+
+static int wlfw_ini_send_sync_msg(uint8_t fw_log_mode)
+{
+	int ret;
+	struct wlfw_ini_req_msg_v01 req;
+	struct wlfw_ini_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+
+	if (!penv || !penv->wlfw_clnt)
+		return -ENODEV;
+
+	icnss_pr_dbg("Sending ini sync request, state: 0x%lx, fw_log_mode: %d\n",
+		     penv->state, fw_log_mode);
+
+	memset(&req, 0, sizeof(req));
+	memset(&resp, 0, sizeof(resp));
+
+	req.enablefwlog_valid = 1;
+	req.enablefwlog = fw_log_mode;
+
+	req_desc.max_msg_len = WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_WLFW_INI_REQ_V01;
+	req_desc.ei_array = wlfw_ini_req_msg_v01_ei;
+
+	resp_desc.max_msg_len = WLFW_INI_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_WLFW_INI_RESP_V01;
+	resp_desc.ei_array = wlfw_ini_resp_msg_v01_ei;
+
+	penv->stats.ini_req++;
+
+	ret = qmi_send_req_wait(penv->wlfw_clnt, &req_desc, &req, sizeof(req),
+			&resp_desc, &resp, sizeof(resp), WLFW_TIMEOUT_MS);
+	if (ret < 0) {
+		icnss_pr_err("Send INI req failed fw_log_mode: %d, ret: %d\n",
+			     fw_log_mode, ret);
+		goto out;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		icnss_pr_err("QMI INI request rejected, fw_log_mode:%d result:%d error:%d\n",
+			     fw_log_mode, resp.resp.result, resp.resp.error);
+		ret = -resp.resp.result;
+		goto out;
+	}
+	penv->stats.ini_resp++;
+
+	return 0;
+
+out:
+	penv->stats.ini_req_err++;
+	ICNSS_QMI_ASSERT();
+	return ret;
+}
+
+static int wlfw_athdiag_read_send_sync_msg(struct icnss_priv *priv,
+					   uint32_t offset, uint32_t mem_type,
+					   uint32_t data_len, uint8_t *data)
+{
+	int ret;
+	struct wlfw_athdiag_read_req_msg_v01 req;
+	struct wlfw_athdiag_read_resp_msg_v01 *resp = NULL;
+	struct msg_desc req_desc, resp_desc;
+
+	if (!priv->wlfw_clnt) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	icnss_pr_dbg("Diag read: state 0x%lx, offset %x, mem_type %x, data_len %u\n",
+		     priv->state, offset, mem_type, data_len);
+
+	resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+	if (!resp) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	memset(&req, 0, sizeof(req));
+
+	req.offset = offset;
+	req.mem_type = mem_type;
+	req.data_len = data_len;
+
+	req_desc.max_msg_len = WLFW_ATHDIAG_READ_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_WLFW_ATHDIAG_READ_REQ_V01;
+	req_desc.ei_array = wlfw_athdiag_read_req_msg_v01_ei;
+
+	resp_desc.max_msg_len = WLFW_ATHDIAG_READ_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_WLFW_ATHDIAG_READ_RESP_V01;
+	resp_desc.ei_array = wlfw_athdiag_read_resp_msg_v01_ei;
+
+	ret = qmi_send_req_wait(penv->wlfw_clnt, &req_desc, &req, sizeof(req),
+				&resp_desc, resp, sizeof(*resp),
+				WLFW_TIMEOUT_MS);
+	if (ret < 0) {
+		icnss_pr_err("send athdiag read req failed %d\n", ret);
+		goto out;
+	}
+
+	if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		icnss_pr_err("QMI athdiag read request rejected, result:%d error:%d\n",
+			     resp->resp.result, resp->resp.error);
+		ret = -resp->resp.result;
+		goto out;
+	}
+
+	if (!resp->data_valid || resp->data_len < data_len) {
+		icnss_pr_err("Athdiag read data is invalid, data_valid = %u, data_len = %u\n",
+			     resp->data_valid, resp->data_len);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	memcpy(data, resp->data, resp->data_len);
+
+out:
+	kfree(resp);
+	return ret;
+}
+
+static int wlfw_athdiag_write_send_sync_msg(struct icnss_priv *priv,
+					    uint32_t offset, uint32_t mem_type,
+					    uint32_t data_len, uint8_t *data)
+{
+	int ret;
+	struct wlfw_athdiag_write_req_msg_v01 *req = NULL;
+	struct wlfw_athdiag_write_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+
+	if (!priv->wlfw_clnt) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	icnss_pr_dbg("Diag write: state 0x%lx, offset %x, mem_type %x, data_len %u, data %p\n",
+		     priv->state, offset, mem_type, data_len, data);
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	memset(&resp, 0, sizeof(resp));
+
+	req->offset = offset;
+	req->mem_type = mem_type;
+	req->data_len = data_len;
+	memcpy(req->data, data, data_len);
+
+	req_desc.max_msg_len = WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_WLFW_ATHDIAG_WRITE_REQ_V01;
+	req_desc.ei_array = wlfw_athdiag_write_req_msg_v01_ei;
+
+	resp_desc.max_msg_len = WLFW_ATHDIAG_WRITE_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_WLFW_ATHDIAG_WRITE_RESP_V01;
+	resp_desc.ei_array = wlfw_athdiag_write_resp_msg_v01_ei;
+
+	ret = qmi_send_req_wait(penv->wlfw_clnt, &req_desc, req, sizeof(*req),
+				&resp_desc, &resp, sizeof(resp),
+				WLFW_TIMEOUT_MS);
+	if (ret < 0) {
+		icnss_pr_err("send athdiag write req failed %d\n", ret);
+		goto out;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		icnss_pr_err("QMI athdiag write request rejected, result:%d error:%d\n",
+			     resp.resp.result, resp.resp.error);
+		ret = -resp.resp.result;
+		goto out;
+	}
+out:
+	kfree(req);
+	return ret;
+}
+
+static int icnss_decode_rejuvenate_ind(void *msg, unsigned int msg_len)
+{
+	struct msg_desc ind_desc;
+	struct wlfw_rejuvenate_ind_msg_v01 ind_msg;
+	int ret = 0;
+
+	if (!penv || !penv->wlfw_clnt) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	memset(&ind_msg, 0, sizeof(ind_msg));
+
+	ind_desc.msg_id = QMI_WLFW_REJUVENATE_IND_V01;
+	ind_desc.max_msg_len = WLFW_REJUVENATE_IND_MSG_V01_MAX_MSG_LEN;
+	ind_desc.ei_array = wlfw_rejuvenate_ind_msg_v01_ei;
+
+	ret = qmi_kernel_decode(&ind_desc, &ind_msg, msg, msg_len);
+	if (ret < 0) {
+		icnss_pr_err("Failed to decode rejuvenate ind message: ret %d, msg_len %u\n",
+			     ret, msg_len);
+		goto out;
+	}
+
+	if (ind_msg.cause_for_rejuvenation_valid)
+		penv->cause_for_rejuvenation = ind_msg.cause_for_rejuvenation;
+	else
+		penv->cause_for_rejuvenation = 0;
+	if (ind_msg.requesting_sub_system_valid)
+		penv->requesting_sub_system = ind_msg.requesting_sub_system;
+	else
+		penv->requesting_sub_system = 0;
+	if (ind_msg.line_number_valid)
+		penv->line_number = ind_msg.line_number;
+	else
+		penv->line_number = 0;
+	if (ind_msg.function_name_valid)
+		memcpy(penv->function_name, ind_msg.function_name,
+		       QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1);
+	else
+		memset(penv->function_name, 0,
+		       QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1);
+
+	icnss_pr_info("Cause for rejuvenation: 0x%x, requesting sub-system: 0x%x, line number: %u, function name: %s\n",
+		      penv->cause_for_rejuvenation,
+		      penv->requesting_sub_system,
+		      penv->line_number,
+		      penv->function_name);
+
+	penv->stats.rejuvenate_ind++;
+out:
+	return ret;
+}
+
+static int wlfw_rejuvenate_ack_send_sync_msg(struct icnss_priv *priv)
+{
+	int ret;
+	struct wlfw_rejuvenate_ack_req_msg_v01 req;
+	struct wlfw_rejuvenate_ack_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+
+	icnss_pr_dbg("Sending rejuvenate ack request, state: 0x%lx\n",
+		     priv->state);
+
+	memset(&req, 0, sizeof(req));
+	memset(&resp, 0, sizeof(resp));
+
+	req_desc.max_msg_len = WLFW_REJUVENATE_ACK_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_WLFW_REJUVENATE_ACK_REQ_V01;
+	req_desc.ei_array = wlfw_rejuvenate_ack_req_msg_v01_ei;
+
+	resp_desc.max_msg_len = WLFW_REJUVENATE_ACK_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_WLFW_REJUVENATE_ACK_RESP_V01;
+	resp_desc.ei_array = wlfw_rejuvenate_ack_resp_msg_v01_ei;
+
+	priv->stats.rejuvenate_ack_req++;
+	ret = qmi_send_req_wait(priv->wlfw_clnt, &req_desc, &req, sizeof(req),
+				&resp_desc, &resp, sizeof(resp),
+				WLFW_TIMEOUT_MS);
+	if (ret < 0) {
+		icnss_pr_err("Send rejuvenate ack req failed %d\n", ret);
+		goto out;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		icnss_pr_err("QMI rejuvenate ack request rejected, result:%d error %d\n",
+			     resp.resp.result, resp.resp.error);
+		ret = -resp.resp.result;
+		goto out;
+	}
+	priv->stats.rejuvenate_ack_resp++;
+	return 0;
+
+out:
+	priv->stats.rejuvenate_ack_err++;
+	ICNSS_QMI_ASSERT();
+	return ret;
+}
+
+static int wlfw_dynamic_feature_mask_send_sync_msg(struct icnss_priv *priv,
+					   uint64_t dynamic_feature_mask)
+{
+	int ret;
+	struct wlfw_dynamic_feature_mask_req_msg_v01 req;
+	struct wlfw_dynamic_feature_mask_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+
+	if (!test_bit(ICNSS_WLFW_QMI_CONNECTED, &priv->state)) {
+		icnss_pr_err("Invalid state for dynamic feature: 0x%lx\n",
+			     priv->state);
+		return -EINVAL;
+	}
+
+	if (!test_bit(FW_REJUVENATE_ENABLE, &quirks)) {
+		icnss_pr_dbg("FW rejuvenate is disabled from quirks\n");
+		return 0;
+	}
+
+	icnss_pr_dbg("Sending dynamic feature mask request, val 0x%llx, state: 0x%lx\n",
+		     dynamic_feature_mask, priv->state);
+
+	memset(&req, 0, sizeof(req));
+	memset(&resp, 0, sizeof(resp));
+
+	req.mask_valid = 1;
+	req.mask = dynamic_feature_mask;
+
+	req_desc.max_msg_len =
+		WLFW_DYNAMIC_FEATURE_MASK_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01;
+	req_desc.ei_array = wlfw_dynamic_feature_mask_req_msg_v01_ei;
+
+	resp_desc.max_msg_len =
+		WLFW_DYNAMIC_FEATURE_MASK_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_WLFW_DYNAMIC_FEATURE_MASK_RESP_V01;
+	resp_desc.ei_array = wlfw_dynamic_feature_mask_resp_msg_v01_ei;
+
+	ret = qmi_send_req_wait(priv->wlfw_clnt, &req_desc, &req, sizeof(req),
+				&resp_desc, &resp, sizeof(resp),
+				WLFW_TIMEOUT_MS);
+	if (ret < 0) {
+		icnss_pr_err("Send dynamic feature mask req failed %d\n", ret);
+		goto out;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		icnss_pr_err("QMI dynamic feature mask request rejected, result:%d error %d\n",
+			     resp.resp.result, resp.resp.error);
+		ret = -resp.resp.result;
+		goto out;
+	}
+
+	icnss_pr_dbg("prev_mask_valid %u, prev_mask 0x%llx, curr_maks_valid %u, curr_mask 0x%llx\n",
+		     resp.prev_mask_valid, resp.prev_mask,
+		     resp.curr_mask_valid, resp.curr_mask);
+
+	return 0;
+
+out:
+	return ret;
+}
+
+static void icnss_qmi_wlfw_clnt_notify_work(struct work_struct *work)
+{
+	int ret;
+
+	if (!penv || !penv->wlfw_clnt)
+		return;
+
+	icnss_pr_vdbg("Receiving Event in work queue context\n");
+
+	do {
+	} while ((ret = qmi_recv_msg(penv->wlfw_clnt)) == 0);
+
+	if (ret != -ENOMSG)
+		icnss_pr_err("Error receiving message: %d\n", ret);
+
+	icnss_pr_vdbg("Receiving Event completed\n");
+}
+
+static void icnss_qmi_wlfw_clnt_notify(struct qmi_handle *handle,
+			     enum qmi_event_type event, void *notify_priv)
+{
+	icnss_pr_vdbg("QMI client notify: %d\n", event);
+
+	if (!penv || !penv->wlfw_clnt)
+		return;
+
+	switch (event) {
+	case QMI_RECV_MSG:
+		schedule_work(&penv->qmi_recv_msg_work);
+		break;
+	default:
+		icnss_pr_dbg("Unknown Event:  %d\n", event);
+		break;
+	}
+}
+
+static int icnss_call_driver_uevent(struct icnss_priv *priv,
+				    enum icnss_uevent uevent, void *data)
+{
+	struct icnss_uevent_data uevent_data;
+
+	if (!priv->ops || !priv->ops->uevent)
+		return 0;
+
+	icnss_pr_dbg("Calling driver uevent state: 0x%lx, uevent: %d\n",
+		     priv->state, uevent);
+
+	uevent_data.uevent = uevent;
+	uevent_data.data = data;
+
+	return priv->ops->uevent(&priv->pdev->dev, &uevent_data);
+}
+
+static void icnss_qmi_wlfw_clnt_ind(struct qmi_handle *handle,
+			  unsigned int msg_id, void *msg,
+			  unsigned int msg_len, void *ind_cb_priv)
+{
+	struct icnss_event_pd_service_down_data *event_data;
+	struct icnss_uevent_fw_down_data fw_down_data;
+
+	if (!penv)
+		return;
+
+	icnss_pr_dbg("Received Ind 0x%x, msg_len: %d\n", msg_id, msg_len);
+
+	if (test_bit(ICNSS_FW_DOWN, &penv->state)) {
+		icnss_pr_dbg("FW down, ignoring 0x%x, state: 0x%lx\n",
+				msg_id, penv->state);
+		return;
+	}
+
+	switch (msg_id) {
+	case QMI_WLFW_FW_READY_IND_V01:
+		icnss_driver_event_post(ICNSS_DRIVER_EVENT_FW_READY_IND,
+					0, NULL);
+		break;
+	case QMI_WLFW_MSA_READY_IND_V01:
+		icnss_pr_dbg("Received MSA Ready Indication msg_id 0x%x\n",
+			     msg_id);
+		penv->stats.msa_ready_ind++;
+		break;
+	case QMI_WLFW_PIN_CONNECT_RESULT_IND_V01:
+		icnss_pr_dbg("Received Pin Connect Test Result msg_id 0x%x\n",
+			     msg_id);
+		icnss_qmi_pin_connect_result_ind(msg, msg_len);
+		break;
+	case QMI_WLFW_REJUVENATE_IND_V01:
+		icnss_pr_dbg("Received Rejuvenate Indication msg_id 0x%x, state: 0x%lx\n",
+			     msg_id, penv->state);
+
+		icnss_ignore_qmi_timeout(true);
+		icnss_decode_rejuvenate_ind(msg, msg_len);
+		event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
+		if (event_data == NULL)
+			return;
+		event_data->crashed = true;
+		event_data->fw_rejuvenate = true;
+		fw_down_data.crashed = true;
+		icnss_call_driver_uevent(penv, ICNSS_UEVENT_FW_DOWN,
+					 &fw_down_data);
+		icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
+					0, event_data);
+		break;
+	default:
+		icnss_pr_err("Invalid msg_id 0x%x\n", msg_id);
+		break;
+	}
+}
+
+static int icnss_driver_event_server_arrive(void *data)
+{
+	int ret = 0;
+
+	if (!penv)
+		return -ENODEV;
+
+	set_bit(ICNSS_WLFW_EXISTS, &penv->state);
+	clear_bit(ICNSS_FW_DOWN, &penv->state);
+
+	penv->wlfw_clnt = qmi_handle_create(icnss_qmi_wlfw_clnt_notify, penv);
+	if (!penv->wlfw_clnt) {
+		icnss_pr_err("QMI client handle create failed\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = qmi_connect_to_service(penv->wlfw_clnt, WLFW_SERVICE_ID_V01,
+				     WLFW_SERVICE_VERS_V01,
+				     WLFW_SERVICE_INS_ID_V01);
+	if (ret < 0) {
+		icnss_pr_err("QMI WLAN Service not found : %d\n", ret);
+		goto fail;
+	}
+
+	ret = qmi_register_ind_cb(penv->wlfw_clnt,
+				  icnss_qmi_wlfw_clnt_ind, penv);
+	if (ret < 0) {
+		icnss_pr_err("Failed to register indication callback: %d\n",
+			     ret);
+		goto fail;
+	}
+
+	set_bit(ICNSS_WLFW_QMI_CONNECTED, &penv->state);
+
+	icnss_pr_info("QMI Server Connected: state: 0x%lx\n", penv->state);
+
+	ret = icnss_hw_power_on(penv);
+	if (ret)
+		goto fail;
+
+	ret = wlfw_ind_register_send_sync_msg();
+	if (ret < 0)
+		goto err_power_on;
+
+	if (!penv->msa_va) {
+		icnss_pr_err("Invalid MSA address\n");
+		ret = -EINVAL;
+		goto err_power_on;
+	}
+
+	ret = wlfw_msa_mem_info_send_sync_msg();
+	if (ret < 0)
+		goto err_power_on;
+
+	if (!test_bit(ICNSS_MSA0_ASSIGNED, &penv->state)) {
+		ret = icnss_assign_msa_perm_all(penv, ICNSS_MSA_PERM_WLAN_HW_RW);
+		if (ret < 0)
+			goto err_power_on;
+		set_bit(ICNSS_MSA0_ASSIGNED, &penv->state);
+	}
+
+	ret = wlfw_msa_ready_send_sync_msg();
+	if (ret < 0)
+		goto err_setup_msa;
+
+	ret = wlfw_cap_send_sync_msg();
+	if (ret < 0)
+		goto err_setup_msa;
+
+	wlfw_dynamic_feature_mask_send_sync_msg(penv,
+						dynamic_feature_mask);
+
+	icnss_init_vph_monitor(penv);
+
+	return ret;
+
+err_setup_msa:
+	icnss_assign_msa_perm_all(penv, ICNSS_MSA_PERM_HLOS_ALL);
+err_power_on:
+	icnss_hw_power_off(penv);
+fail:
+	qmi_handle_destroy(penv->wlfw_clnt);
+	penv->wlfw_clnt = NULL;
+out:
+	ICNSS_ASSERT(0);
+	return ret;
+}
+
+static int icnss_driver_event_server_exit(void *data)
+{
+	if (!penv || !penv->wlfw_clnt)
+		return -ENODEV;
+
+	icnss_pr_info("QMI Service Disconnected: 0x%lx\n", penv->state);
+
+	if (!test_bit(VBATT_DISABLE, &quirks) && penv->adc_tm_dev)
+		qpnp_adc_tm_disable_chan_meas(penv->adc_tm_dev,
+					      &penv->vph_monitor_params);
+
+	qmi_handle_destroy(penv->wlfw_clnt);
+
+	clear_bit(ICNSS_WLFW_QMI_CONNECTED, &penv->state);
+	penv->wlfw_clnt = NULL;
+
+	return 0;
+}
+
+static int icnss_call_driver_probe(struct icnss_priv *priv)
+{
+	int ret = 0;
+	int probe_cnt = 0;
+
+	if (!priv->ops || !priv->ops->probe)
+		return 0;
+
+	if (test_bit(ICNSS_DRIVER_PROBED, &priv->state))
+		return -EINVAL;
+
+	icnss_pr_dbg("Calling driver probe state: 0x%lx\n", priv->state);
+
+	icnss_hw_power_on(priv);
+
+	while (probe_cnt < ICNSS_MAX_PROBE_CNT) {
+		ret = priv->ops->probe(&priv->pdev->dev);
+		probe_cnt++;
+		if (ret != -EPROBE_DEFER)
+			break;
+	}
+	if (ret < 0) {
+		icnss_pr_err("Driver probe failed: %d, state: 0x%lx, probe_cnt: %d\n",
+			     ret, priv->state, probe_cnt);
+		goto out;
+	}
+
+	set_bit(ICNSS_DRIVER_PROBED, &priv->state);
+
+	return 0;
+
+out:
+	icnss_hw_power_off(priv);
+	return ret;
+}
+
+static int icnss_call_driver_shutdown(struct icnss_priv *priv)
+{
+	if (!test_bit(ICNSS_DRIVER_PROBED, &penv->state))
+		goto out;
+
+	if (!priv->ops || !priv->ops->shutdown)
+		goto out;
+
+	if (test_bit(ICNSS_SHUTDOWN_DONE, &penv->state))
+		goto out;
+
+	icnss_pr_dbg("Calling driver shutdown state: 0x%lx\n", priv->state);
+
+	priv->ops->shutdown(&priv->pdev->dev);
+	set_bit(ICNSS_SHUTDOWN_DONE, &penv->state);
+
+out:
+	return 0;
+}
+
+static int icnss_pd_restart_complete(struct icnss_priv *priv)
+{
+	int ret;
+
+	icnss_pm_relax(priv);
+
+	icnss_call_driver_shutdown(priv);
+
+	clear_bit(ICNSS_PD_RESTART, &priv->state);
+
+	if (!priv->ops || !priv->ops->reinit)
+		goto out;
+
+	if (test_bit(ICNSS_FW_DOWN, &priv->state)) {
+		icnss_pr_err("FW is in bad state, state: 0x%lx\n",
+			     priv->state);
+		goto out;
+	}
+
+	if (!test_bit(ICNSS_DRIVER_PROBED, &priv->state))
+		goto call_probe;
+
+	icnss_pr_dbg("Calling driver reinit state: 0x%lx\n", priv->state);
+
+	icnss_hw_power_on(priv);
+
+	ret = priv->ops->reinit(&priv->pdev->dev);
+	if (ret < 0) {
+		icnss_pr_err("Driver reinit failed: %d, state: 0x%lx\n",
+			     ret, priv->state);
+		ICNSS_ASSERT(false);
+		goto out_power_off;
+	}
+
+out:
+	clear_bit(ICNSS_SHUTDOWN_DONE, &penv->state);
+	return 0;
+
+call_probe:
+	return icnss_call_driver_probe(priv);
+
+out_power_off:
+	icnss_hw_power_off(priv);
+
+	return ret;
+}
+
+
+static int icnss_driver_event_fw_ready_ind(void *data)
+{
+	int ret = 0;
+
+	if (!penv)
+		return -ENODEV;
+
+	set_bit(ICNSS_FW_READY, &penv->state);
+
+	icnss_call_driver_uevent(penv, ICNSS_UEVENT_FW_READY, NULL);
+
+	icnss_pr_info("WLAN FW is ready: 0x%lx\n", penv->state);
+
+	icnss_hw_power_off(penv);
+
+	if (!penv->pdev) {
+		icnss_pr_err("Device is not ready\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	if (test_bit(ICNSS_PD_RESTART, &penv->state))
+		ret = icnss_pd_restart_complete(penv);
+	else
+		ret = icnss_call_driver_probe(penv);
+
+out:
+	return ret;
+}
+
+static int icnss_driver_event_register_driver(void *data)
+{
+	int ret = 0;
+	int probe_cnt = 0;
+
+	if (penv->ops)
+		return -EEXIST;
+
+	penv->ops = data;
+
+	if (test_bit(SKIP_QMI, &quirks))
+		set_bit(ICNSS_FW_READY, &penv->state);
+
+	if (test_bit(ICNSS_FW_DOWN, &penv->state)) {
+		icnss_pr_err("FW is in bad state, state: 0x%lx\n",
+			     penv->state);
+		return -ENODEV;
+	}
+
+	if (!test_bit(ICNSS_FW_READY, &penv->state)) {
+		icnss_pr_dbg("FW is not ready yet, state: 0x%lx\n",
+			     penv->state);
+		goto out;
+	}
+
+	ret = icnss_hw_power_on(penv);
+	if (ret)
+		goto out;
+
+	while (probe_cnt < ICNSS_MAX_PROBE_CNT) {
+		ret = penv->ops->probe(&penv->pdev->dev);
+		probe_cnt++;
+		if (ret != -EPROBE_DEFER)
+			break;
+	}
+	if (ret) {
+		icnss_pr_err("Driver probe failed: %d, state: 0x%lx, probe_cnt: %d\n",
+			     ret, penv->state, probe_cnt);
+		goto power_off;
+	}
+
+	set_bit(ICNSS_DRIVER_PROBED, &penv->state);
+
+	return 0;
+
+power_off:
+	icnss_hw_power_off(penv);
+out:
+	return ret;
+}
+
+static int icnss_driver_event_unregister_driver(void *data)
+{
+	if (!test_bit(ICNSS_DRIVER_PROBED, &penv->state)) {
+		penv->ops = NULL;
+		goto out;
+	}
+
+	set_bit(ICNSS_DRIVER_UNLOADING, &penv->state);
+	if (penv->ops)
+		penv->ops->remove(&penv->pdev->dev);
+
+	clear_bit(ICNSS_DRIVER_UNLOADING, &penv->state);
+	clear_bit(ICNSS_DRIVER_PROBED, &penv->state);
+
+	penv->ops = NULL;
+
+	icnss_hw_power_off(penv);
+
+out:
+	return 0;
+}
+
+static int icnss_fw_crashed(struct icnss_priv *priv,
+			    struct icnss_event_pd_service_down_data *event_data)
+{
+	icnss_pr_dbg("FW crashed, state: 0x%lx\n", priv->state);
+
+	set_bit(ICNSS_PD_RESTART, &priv->state);
+	clear_bit(ICNSS_FW_READY, &priv->state);
+
+	icnss_pm_stay_awake(priv);
+
+	if (test_bit(ICNSS_DRIVER_PROBED, &priv->state))
+		icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_CRASHED, NULL);
+
+	if (event_data->fw_rejuvenate)
+		wlfw_rejuvenate_ack_send_sync_msg(priv);
+
+	return 0;
+}
+
+static int icnss_driver_event_pd_service_down(struct icnss_priv *priv,
+					      void *data)
+{
+	int ret = 0;
+	struct icnss_event_pd_service_down_data *event_data = data;
+
+	if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state))
+		goto out;
+
+	if (test_bit(ICNSS_PD_RESTART, &priv->state) && event_data->crashed) {
+		icnss_pr_err("PD Down while recovery inprogress, crashed: %d, state: 0x%lx\n",
+			     event_data->crashed, priv->state);
+		ICNSS_ASSERT(0);
+		goto out;
+	}
+
+	icnss_fw_crashed(priv, event_data);
+
+out:
+	kfree(data);
+
+	icnss_ignore_qmi_timeout(false);
+
+	return ret;
+}
+
+static void icnss_driver_event_work(struct work_struct *work)
+{
+	struct icnss_driver_event *event;
+	unsigned long flags;
+	int ret;
+
+	icnss_pm_stay_awake(penv);
+
+	spin_lock_irqsave(&penv->event_lock, flags);
+
+	while (!list_empty(&penv->event_list)) {
+		event = list_first_entry(&penv->event_list,
+					 struct icnss_driver_event, list);
+		list_del(&event->list);
+		spin_unlock_irqrestore(&penv->event_lock, flags);
+
+		icnss_pr_dbg("Processing event: %s%s(%d), state: 0x%lx\n",
+			     icnss_driver_event_to_str(event->type),
+			     event->sync ? "-sync" : "", event->type,
+			     penv->state);
+
+		switch (event->type) {
+		case ICNSS_DRIVER_EVENT_SERVER_ARRIVE:
+			ret = icnss_driver_event_server_arrive(event->data);
+			break;
+		case ICNSS_DRIVER_EVENT_SERVER_EXIT:
+			ret = icnss_driver_event_server_exit(event->data);
+			break;
+		case ICNSS_DRIVER_EVENT_FW_READY_IND:
+			ret = icnss_driver_event_fw_ready_ind(event->data);
+			break;
+		case ICNSS_DRIVER_EVENT_REGISTER_DRIVER:
+			ret = icnss_driver_event_register_driver(event->data);
+			break;
+		case ICNSS_DRIVER_EVENT_UNREGISTER_DRIVER:
+			ret = icnss_driver_event_unregister_driver(event->data);
+			break;
+		case ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN:
+			ret = icnss_driver_event_pd_service_down(penv,
+								 event->data);
+			break;
+		default:
+			icnss_pr_err("Invalid Event type: %d", event->type);
+			kfree(event);
+			continue;
+		}
+
+		penv->stats.events[event->type].processed++;
+
+		icnss_pr_dbg("Event Processed: %s%s(%d), ret: %d, state: 0x%lx\n",
+			     icnss_driver_event_to_str(event->type),
+			     event->sync ? "-sync" : "", event->type, ret,
+			     penv->state);
+
+		spin_lock_irqsave(&penv->event_lock, flags);
+		if (event->sync) {
+			event->ret = ret;
+			complete(&event->complete);
+			continue;
+		}
+		spin_unlock_irqrestore(&penv->event_lock, flags);
+
+		kfree(event);
+
+		spin_lock_irqsave(&penv->event_lock, flags);
+	}
+	spin_unlock_irqrestore(&penv->event_lock, flags);
+
+	icnss_pm_relax(penv);
+}
+
+static int icnss_qmi_wlfw_clnt_svc_event_notify(struct notifier_block *this,
+					       unsigned long code,
+					       void *_cmd)
+{
+	int ret = 0;
+
+	if (!penv)
+		return -ENODEV;
+
+	icnss_pr_dbg("Event Notify: code: %ld", code);
+
+	switch (code) {
+	case QMI_SERVER_ARRIVE:
+		ret = icnss_driver_event_post(ICNSS_DRIVER_EVENT_SERVER_ARRIVE,
+					      0, NULL);
+		break;
+
+	case QMI_SERVER_EXIT:
+		ret = icnss_driver_event_post(ICNSS_DRIVER_EVENT_SERVER_EXIT,
+					      0, NULL);
+		break;
+	default:
+		icnss_pr_dbg("Invalid code: %ld", code);
+		break;
+	}
+	return ret;
+}
+
+static int icnss_msa0_ramdump(struct icnss_priv *priv)
+{
+	struct ramdump_segment segment;
+
+	memset(&segment, 0, sizeof(segment));
+	segment.v_address = priv->msa_va;
+	segment.size = priv->msa_mem_size;
+	return do_ramdump(priv->msa0_dump_dev, &segment, 1);
+}
+
+static struct notifier_block wlfw_clnt_nb = {
+	.notifier_call = icnss_qmi_wlfw_clnt_svc_event_notify,
+};
+
+static int icnss_modem_notifier_nb(struct notifier_block *nb,
+				  unsigned long code,
+				  void *data)
+{
+	struct icnss_event_pd_service_down_data *event_data;
+	struct notif_data *notif = data;
+	struct icnss_priv *priv = container_of(nb, struct icnss_priv,
+					       modem_ssr_nb);
+	struct icnss_uevent_fw_down_data fw_down_data;
+	int ret = 0;
+
+	icnss_pr_vdbg("Modem-Notify: event %lu\n", code);
+
+	if (code == SUBSYS_AFTER_SHUTDOWN &&
+	    notif->crashed == CRASH_STATUS_ERR_FATAL) {
+		ret = icnss_assign_msa_perm_all(priv,
+						ICNSS_MSA_PERM_DUMP_COLLECT);
+		if (!ret) {
+			icnss_pr_info("Collecting msa0 segment dump\n");
+			icnss_msa0_ramdump(priv);
+			icnss_assign_msa_perm_all(priv,
+						  ICNSS_MSA_PERM_WLAN_HW_RW);
+		} else {
+			icnss_pr_err("Not able to Collect msa0 segment dump"
+				     "Apps permissions not assigned %d\n", ret);
+		}
+		return NOTIFY_OK;
+	}
+
+	if (code != SUBSYS_BEFORE_SHUTDOWN)
+		return NOTIFY_OK;
+
+	if (test_bit(ICNSS_PDR_REGISTERED, &priv->state)) {
+		set_bit(ICNSS_FW_DOWN, &priv->state);
+		icnss_ignore_qmi_timeout(true);
+
+		fw_down_data.crashed = !!notif->crashed;
+		if (test_bit(ICNSS_FW_READY, &priv->state) &&
+		    !test_bit(ICNSS_DRIVER_UNLOADING, &priv->state))
+			icnss_call_driver_uevent(priv,
+						 ICNSS_UEVENT_FW_DOWN,
+						 &fw_down_data);
+		return NOTIFY_OK;
+	}
+
+	icnss_pr_info("Modem went down, state: 0x%lx, crashed: %d\n",
+		      priv->state, notif->crashed);
+
+	set_bit(ICNSS_FW_DOWN, &priv->state);
+
+	if (notif->crashed)
+		priv->stats.recovery.root_pd_crash++;
+	else
+		priv->stats.recovery.root_pd_shutdown++;
+
+	icnss_ignore_qmi_timeout(true);
+
+	event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
+
+	if (event_data == NULL)
+		return notifier_from_errno(-ENOMEM);
+
+	event_data->crashed = notif->crashed;
+
+	fw_down_data.crashed = !!notif->crashed;
+	icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_DOWN, &fw_down_data);
+
+	icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
+				ICNSS_EVENT_SYNC, event_data);
+
+	return NOTIFY_OK;
+}
+
+static int icnss_modem_ssr_register_notifier(struct icnss_priv *priv)
+{
+	int ret = 0;
+
+	priv->modem_ssr_nb.notifier_call = icnss_modem_notifier_nb;
+
+	priv->modem_notify_handler =
+		subsys_notif_register_notifier("modem", &priv->modem_ssr_nb);
+
+	if (IS_ERR(priv->modem_notify_handler)) {
+		ret = PTR_ERR(priv->modem_notify_handler);
+		icnss_pr_err("Modem register notifier failed: %d\n", ret);
+	}
+
+	set_bit(ICNSS_SSR_REGISTERED, &priv->state);
+
+	return ret;
+}
+
+static int icnss_modem_ssr_unregister_notifier(struct icnss_priv *priv)
+{
+	if (!test_and_clear_bit(ICNSS_SSR_REGISTERED, &priv->state))
+		return 0;
+
+	subsys_notif_unregister_notifier(priv->modem_notify_handler,
+					 &priv->modem_ssr_nb);
+	priv->modem_notify_handler = NULL;
+
+	return 0;
+}
+
+static int icnss_pdr_unregister_notifier(struct icnss_priv *priv)
+{
+	int i;
+
+	if (!test_and_clear_bit(ICNSS_PDR_REGISTERED, &priv->state))
+		return 0;
+
+	for (i = 0; i < priv->total_domains; i++)
+		service_notif_unregister_notifier(
+				priv->service_notifier[i].handle,
+				&priv->service_notifier_nb);
+
+	kfree(priv->service_notifier);
+
+	priv->service_notifier = NULL;
+
+	return 0;
+}
+
+static int icnss_service_notifier_notify(struct notifier_block *nb,
+					 unsigned long notification, void *data)
+{
+	struct icnss_priv *priv = container_of(nb, struct icnss_priv,
+					       service_notifier_nb);
+	enum pd_subsys_state *state = data;
+	struct icnss_event_pd_service_down_data *event_data;
+	struct icnss_uevent_fw_down_data fw_down_data;
+	enum icnss_pdr_cause_index cause = ICNSS_ROOT_PD_CRASH;
+
+	icnss_pr_dbg("PD service notification: 0x%lx state: 0x%lx\n",
+		     notification, priv->state);
+
+	if (notification != SERVREG_NOTIF_SERVICE_STATE_DOWN_V01)
+		goto done;
+
+	event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
+
+	if (event_data == NULL)
+		return notifier_from_errno(-ENOMEM);
+
+	event_data->crashed = true;
+
+	if (state == NULL) {
+		priv->stats.recovery.root_pd_crash++;
+		goto event_post;
+	}
+
+	switch (*state) {
+	case ROOT_PD_WDOG_BITE:
+		priv->stats.recovery.root_pd_crash++;
+		break;
+	case ROOT_PD_SHUTDOWN:
+		cause = ICNSS_ROOT_PD_SHUTDOWN;
+		priv->stats.recovery.root_pd_shutdown++;
+		event_data->crashed = false;
+		break;
+	case USER_PD_STATE_CHANGE:
+		if (test_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state)) {
+			cause = ICNSS_HOST_ERROR;
+			priv->stats.recovery.pdr_host_error++;
+		} else {
+			cause = ICNSS_FW_CRASH;
+			priv->stats.recovery.pdr_fw_crash++;
+		}
+		break;
+	default:
+		priv->stats.recovery.root_pd_crash++;
+		break;
+	}
+
+	icnss_pr_info("PD service down, pd_state: %d, state: 0x%lx: cause: %s\n",
+		      *state, priv->state, icnss_pdr_cause[cause]);
+event_post:
+	if (!test_bit(ICNSS_FW_DOWN, &priv->state)) {
+		set_bit(ICNSS_FW_DOWN, &priv->state);
+		icnss_ignore_qmi_timeout(true);
+
+		fw_down_data.crashed = event_data->crashed;
+		if (test_bit(ICNSS_FW_READY, &priv->state) &&
+		    !test_bit(ICNSS_DRIVER_UNLOADING, &priv->state))
+			icnss_call_driver_uevent(priv,
+						 ICNSS_UEVENT_FW_DOWN,
+						 &fw_down_data);
+	}
+
+	clear_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state);
+	icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
+				ICNSS_EVENT_SYNC, event_data);
+done:
+	if (notification == SERVREG_NOTIF_SERVICE_STATE_UP_V01)
+		clear_bit(ICNSS_FW_DOWN, &priv->state);
+	return NOTIFY_OK;
+}
+
+static int icnss_get_service_location_notify(struct notifier_block *nb,
+					     unsigned long opcode, void *data)
+{
+	struct icnss_priv *priv = container_of(nb, struct icnss_priv,
+					       get_service_nb);
+	struct pd_qmi_client_data *pd = data;
+	int curr_state;
+	int ret;
+	int i;
+	struct service_notifier_context *notifier;
+
+	icnss_pr_dbg("Get service notify opcode: %lu, state: 0x%lx\n", opcode,
+		     priv->state);
+
+	if (opcode != LOCATOR_UP)
+		return NOTIFY_DONE;
+
+	if (pd->total_domains == 0) {
+		icnss_pr_err("Did not find any domains\n");
+		ret = -ENOENT;
+		goto out;
+	}
+
+	notifier = kcalloc(pd->total_domains,
+				sizeof(struct service_notifier_context),
+				GFP_KERNEL);
+	if (!notifier) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	priv->service_notifier_nb.notifier_call = icnss_service_notifier_notify;
+
+	for (i = 0; i < pd->total_domains; i++) {
+		icnss_pr_dbg("%d: domain_name: %s, instance_id: %d\n", i,
+			     pd->domain_list[i].name,
+			     pd->domain_list[i].instance_id);
+
+		notifier[i].handle =
+			service_notif_register_notifier(pd->domain_list[i].name,
+				pd->domain_list[i].instance_id,
+				&priv->service_notifier_nb, &curr_state);
+		notifier[i].instance_id = pd->domain_list[i].instance_id;
+		strlcpy(notifier[i].name, pd->domain_list[i].name,
+			QMI_SERVREG_LOC_NAME_LENGTH_V01 + 1);
+
+		if (IS_ERR(notifier[i].handle)) {
+			icnss_pr_err("%d: Unable to register notifier for %s(0x%x)\n",
+				     i, pd->domain_list->name,
+				     pd->domain_list->instance_id);
+			ret = PTR_ERR(notifier[i].handle);
+			goto free_handle;
+		}
+	}
+
+	priv->service_notifier = notifier;
+	priv->total_domains = pd->total_domains;
+
+	set_bit(ICNSS_PDR_REGISTERED, &priv->state);
+
+	icnss_pr_dbg("PD notification registration happened, state: 0x%lx\n",
+		     priv->state);
+
+	return NOTIFY_OK;
+
+free_handle:
+	for (i = 0; i < pd->total_domains; i++) {
+		if (notifier[i].handle)
+			service_notif_unregister_notifier(notifier[i].handle,
+					&priv->service_notifier_nb);
+	}
+	kfree(notifier);
+
+out:
+	icnss_pr_err("PD restart not enabled: %d, state: 0x%lx\n", ret,
+		     priv->state);
+
+	return NOTIFY_OK;
+}
+
+
+static int icnss_pd_restart_enable(struct icnss_priv *priv)
+{
+	int ret;
+
+	if (test_bit(SSR_ONLY, &quirks)) {
+		icnss_pr_dbg("PDR disabled through module parameter\n");
+		return 0;
+	}
+
+	icnss_pr_dbg("Get service location, state: 0x%lx\n", priv->state);
+
+	priv->get_service_nb.notifier_call = icnss_get_service_location_notify;
+	ret = get_service_location(ICNSS_SERVICE_LOCATION_CLIENT_NAME,
+				   ICNSS_WLAN_SERVICE_NAME,
+				   &priv->get_service_nb);
+	if (ret) {
+		icnss_pr_err("Get service location failed: %d\n", ret);
+		goto out;
+	}
+
+	return 0;
+out:
+	icnss_pr_err("Failed to enable PD restart: %d\n", ret);
+	return ret;
+
+}
+
+
+static int icnss_enable_recovery(struct icnss_priv *priv)
+{
+	int ret;
+
+	if (test_bit(RECOVERY_DISABLE, &quirks)) {
+		icnss_pr_dbg("Recovery disabled through module parameter\n");
+		return 0;
+	}
+
+	if (test_bit(PDR_ONLY, &quirks)) {
+		icnss_pr_dbg("SSR disabled through module parameter\n");
+		goto enable_pdr;
+	}
+
+	priv->msa0_dump_dev = create_ramdump_device("wcss_msa0",
+						    &priv->pdev->dev);
+	if (!priv->msa0_dump_dev)
+		return -ENOMEM;
+
+	icnss_modem_ssr_register_notifier(priv);
+	if (test_bit(SSR_ONLY, &quirks)) {
+		icnss_pr_dbg("PDR disabled through module parameter\n");
+		return 0;
+	}
+
+enable_pdr:
+	ret = icnss_pd_restart_enable(priv);
+
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+int __icnss_register_driver(struct icnss_driver_ops *ops,
+			    struct module *owner, const char *mod_name)
+{
+	int ret = 0;
+
+	if (!penv || !penv->pdev) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	icnss_pr_dbg("Registering driver, state: 0x%lx\n", penv->state);
+
+	if (penv->ops) {
+		icnss_pr_err("Driver already registered\n");
+		ret = -EEXIST;
+		goto out;
+	}
+
+	if (!ops->probe || !ops->remove) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = icnss_driver_event_post(ICNSS_DRIVER_EVENT_REGISTER_DRIVER,
+				      0, ops);
+
+	if (ret == -EINTR)
+		ret = 0;
+
+out:
+	return ret;
+}
+EXPORT_SYMBOL(__icnss_register_driver);
+
+int icnss_unregister_driver(struct icnss_driver_ops *ops)
+{
+	int ret;
+
+	if (!penv || !penv->pdev) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	icnss_pr_dbg("Unregistering driver, state: 0x%lx\n", penv->state);
+
+	if (!penv->ops) {
+		icnss_pr_err("Driver not registered\n");
+		ret = -ENOENT;
+		goto out;
+	}
+
+	ret = icnss_driver_event_post(ICNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
+				      ICNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(icnss_unregister_driver);
+
+int icnss_ce_request_irq(struct device *dev, unsigned int ce_id,
+	irqreturn_t (*handler)(int, void *),
+		unsigned long flags, const char *name, void *ctx)
+{
+	int ret = 0;
+	unsigned int irq;
+	struct ce_irq_list *irq_entry;
+
+	if (!penv || !penv->pdev || !dev) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	icnss_pr_vdbg("CE request IRQ: %d, state: 0x%lx\n", ce_id, penv->state);
+
+	if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
+		icnss_pr_err("Invalid CE ID, ce_id: %d\n", ce_id);
+		ret = -EINVAL;
+		goto out;
+	}
+	irq = penv->ce_irqs[ce_id];
+	irq_entry = &penv->ce_irq_list[ce_id];
+
+	if (irq_entry->handler || irq_entry->irq) {
+		icnss_pr_err("IRQ already requested: %d, ce_id: %d\n",
+			     irq, ce_id);
+		ret = -EEXIST;
+		goto out;
+	}
+
+	ret = request_irq(irq, handler, flags, name, ctx);
+	if (ret) {
+		icnss_pr_err("IRQ request failed: %d, ce_id: %d, ret: %d\n",
+			     irq, ce_id, ret);
+		goto out;
+	}
+	irq_entry->irq = irq;
+	irq_entry->handler = handler;
+
+	icnss_pr_vdbg("IRQ requested: %d, ce_id: %d\n", irq, ce_id);
+
+	penv->stats.ce_irqs[ce_id].request++;
+out:
+	return ret;
+}
+EXPORT_SYMBOL(icnss_ce_request_irq);
+
+int icnss_ce_free_irq(struct device *dev, unsigned int ce_id, void *ctx)
+{
+	int ret = 0;
+	unsigned int irq;
+	struct ce_irq_list *irq_entry;
+
+	if (!penv || !penv->pdev || !dev) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	icnss_pr_vdbg("CE free IRQ: %d, state: 0x%lx\n", ce_id, penv->state);
+
+	if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
+		icnss_pr_err("Invalid CE ID to free, ce_id: %d\n", ce_id);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	irq = penv->ce_irqs[ce_id];
+	irq_entry = &penv->ce_irq_list[ce_id];
+	if (!irq_entry->handler || !irq_entry->irq) {
+		icnss_pr_err("IRQ not requested: %d, ce_id: %d\n", irq, ce_id);
+		ret = -EEXIST;
+		goto out;
+	}
+	free_irq(irq, ctx);
+	irq_entry->irq = 0;
+	irq_entry->handler = NULL;
+
+	penv->stats.ce_irqs[ce_id].free++;
+out:
+	return ret;
+}
+EXPORT_SYMBOL(icnss_ce_free_irq);
+
+void icnss_enable_irq(struct device *dev, unsigned int ce_id)
+{
+	unsigned int irq;
+
+	if (!penv || !penv->pdev || !dev) {
+		icnss_pr_err("Platform driver not initialized\n");
+		return;
+	}
+
+	icnss_pr_vdbg("Enable IRQ: ce_id: %d, state: 0x%lx\n", ce_id,
+		     penv->state);
+
+	if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
+		icnss_pr_err("Invalid CE ID to enable IRQ, ce_id: %d\n", ce_id);
+		return;
+	}
+
+	penv->stats.ce_irqs[ce_id].enable++;
+
+	irq = penv->ce_irqs[ce_id];
+	enable_irq(irq);
+}
+EXPORT_SYMBOL(icnss_enable_irq);
+
+void icnss_disable_irq(struct device *dev, unsigned int ce_id)
+{
+	unsigned int irq;
+
+	if (!penv || !penv->pdev || !dev) {
+		icnss_pr_err("Platform driver not initialized\n");
+		return;
+	}
+
+	icnss_pr_vdbg("Disable IRQ: ce_id: %d, state: 0x%lx\n", ce_id,
+		     penv->state);
+
+	if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
+		icnss_pr_err("Invalid CE ID to disable IRQ, ce_id: %d\n",
+			     ce_id);
+		return;
+	}
+
+	irq = penv->ce_irqs[ce_id];
+	disable_irq(irq);
+
+	penv->stats.ce_irqs[ce_id].disable++;
+}
+EXPORT_SYMBOL(icnss_disable_irq);
+
+int icnss_get_soc_info(struct device *dev, struct icnss_soc_info *info)
+{
+	if (!penv || !dev) {
+		icnss_pr_err("Platform driver not initialized\n");
+		return -EINVAL;
+	}
+
+	info->v_addr = penv->mem_base_va;
+	info->p_addr = penv->mem_base_pa;
+	info->chip_id = penv->chip_info.chip_id;
+	info->chip_family = penv->chip_info.chip_family;
+	info->board_id = penv->board_info.board_id;
+	info->soc_id = penv->soc_info.soc_id;
+	info->fw_version = penv->fw_version_info.fw_version;
+	strlcpy(info->fw_build_timestamp,
+		penv->fw_version_info.fw_build_timestamp,
+		QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1);
+
+	return 0;
+}
+EXPORT_SYMBOL(icnss_get_soc_info);
+
+int icnss_set_fw_log_mode(struct device *dev, uint8_t fw_log_mode)
+{
+	int ret;
+
+	if (!dev)
+		return -ENODEV;
+
+	if (test_bit(ICNSS_FW_DOWN, &penv->state)) {
+		icnss_pr_err("FW down, ignoring fw_log_mode state: 0x%lx\n",
+			     penv->state);
+		return -EINVAL;
+	}
+
+	icnss_pr_dbg("FW log mode: %u\n", fw_log_mode);
+
+	ret = wlfw_ini_send_sync_msg(fw_log_mode);
+	if (ret)
+		icnss_pr_err("Fail to send ini, ret = %d, fw_log_mode: %u\n",
+			     ret, fw_log_mode);
+	return ret;
+}
+EXPORT_SYMBOL(icnss_set_fw_log_mode);
+
+int icnss_athdiag_read(struct device *dev, uint32_t offset,
+		       uint32_t mem_type, uint32_t data_len,
+		       uint8_t *output)
+{
+	int ret = 0;
+	struct icnss_priv *priv = dev_get_drvdata(dev);
+
+	if (priv->magic != ICNSS_MAGIC) {
+		icnss_pr_err("Invalid drvdata for diag read: dev %p, data %p, magic 0x%x\n",
+			     dev, priv, priv->magic);
+		return -EINVAL;
+	}
+
+	if (!output || data_len == 0
+	    || data_len > QMI_WLFW_MAX_DATA_SIZE_V01) {
+		icnss_pr_err("Invalid parameters for diag read: output %p, data_len %u\n",
+			     output, data_len);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (!test_bit(ICNSS_FW_READY, &priv->state) ||
+	    !test_bit(ICNSS_POWER_ON, &priv->state)) {
+		icnss_pr_err("Invalid state for diag read: 0x%lx\n",
+			     priv->state);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = wlfw_athdiag_read_send_sync_msg(priv, offset, mem_type,
+					      data_len, output);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(icnss_athdiag_read);
+
+int icnss_athdiag_write(struct device *dev, uint32_t offset,
+			uint32_t mem_type, uint32_t data_len,
+			uint8_t *input)
+{
+	int ret = 0;
+	struct icnss_priv *priv = dev_get_drvdata(dev);
+
+	if (priv->magic != ICNSS_MAGIC) {
+		icnss_pr_err("Invalid drvdata for diag write: dev %p, data %p, magic 0x%x\n",
+			     dev, priv, priv->magic);
+		return -EINVAL;
+	}
+
+	if (!input || data_len == 0
+	    || data_len > QMI_WLFW_MAX_DATA_SIZE_V01) {
+		icnss_pr_err("Invalid parameters for diag write: input %p, data_len %u\n",
+			     input, data_len);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (!test_bit(ICNSS_FW_READY, &priv->state) ||
+	    !test_bit(ICNSS_POWER_ON, &priv->state)) {
+		icnss_pr_err("Invalid state for diag write: 0x%lx\n",
+			     priv->state);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = wlfw_athdiag_write_send_sync_msg(priv, offset, mem_type,
+					       data_len, input);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(icnss_athdiag_write);
+
+int icnss_wlan_enable(struct device *dev, struct icnss_wlan_enable_cfg *config,
+		      enum icnss_driver_mode mode,
+		      const char *host_version)
+{
+	struct wlfw_wlan_cfg_req_msg_v01 req;
+	u32 i;
+	int ret;
+
+	if (!dev)
+		return -ENODEV;
+
+	if (test_bit(ICNSS_FW_DOWN, &penv->state)) {
+		icnss_pr_err("FW down, ignoring wlan_enable state: 0x%lx\n",
+			     penv->state);
+		return -EINVAL;
+	}
+
+	icnss_pr_dbg("Mode: %d, config: %p, host_version: %s\n",
+		     mode, config, host_version);
+
+	memset(&req, 0, sizeof(req));
+
+	if (mode == ICNSS_WALTEST || mode == ICNSS_CCPM)
+		goto skip;
+
+	if (!config || !host_version) {
+		icnss_pr_err("Invalid cfg pointer, config: %p, host_version: %p\n",
+			     config, host_version);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	req.host_version_valid = 1;
+	strlcpy(req.host_version, host_version,
+		QMI_WLFW_MAX_STR_LEN_V01 + 1);
+
+	req.tgt_cfg_valid = 1;
+	if (config->num_ce_tgt_cfg > QMI_WLFW_MAX_NUM_CE_V01)
+		req.tgt_cfg_len = QMI_WLFW_MAX_NUM_CE_V01;
+	else
+		req.tgt_cfg_len = config->num_ce_tgt_cfg;
+	for (i = 0; i < req.tgt_cfg_len; i++) {
+		req.tgt_cfg[i].pipe_num = config->ce_tgt_cfg[i].pipe_num;
+		req.tgt_cfg[i].pipe_dir = config->ce_tgt_cfg[i].pipe_dir;
+		req.tgt_cfg[i].nentries = config->ce_tgt_cfg[i].nentries;
+		req.tgt_cfg[i].nbytes_max = config->ce_tgt_cfg[i].nbytes_max;
+		req.tgt_cfg[i].flags = config->ce_tgt_cfg[i].flags;
+	}
+
+	req.svc_cfg_valid = 1;
+	if (config->num_ce_svc_pipe_cfg > QMI_WLFW_MAX_NUM_SVC_V01)
+		req.svc_cfg_len = QMI_WLFW_MAX_NUM_SVC_V01;
+	else
+		req.svc_cfg_len = config->num_ce_svc_pipe_cfg;
+	for (i = 0; i < req.svc_cfg_len; i++) {
+		req.svc_cfg[i].service_id = config->ce_svc_cfg[i].service_id;
+		req.svc_cfg[i].pipe_dir = config->ce_svc_cfg[i].pipe_dir;
+		req.svc_cfg[i].pipe_num = config->ce_svc_cfg[i].pipe_num;
+	}
+
+	req.shadow_reg_valid = 1;
+	if (config->num_shadow_reg_cfg >
+	    QMI_WLFW_MAX_NUM_SHADOW_REG_V01)
+		req.shadow_reg_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01;
+	else
+		req.shadow_reg_len = config->num_shadow_reg_cfg;
+
+	memcpy(req.shadow_reg, config->shadow_reg_cfg,
+	       sizeof(struct wlfw_shadow_reg_cfg_s_v01) * req.shadow_reg_len);
+
+	ret = wlfw_wlan_cfg_send_sync_msg(&req);
+	if (ret)
+		goto out;
+skip:
+	ret = wlfw_wlan_mode_send_sync_msg(mode);
+out:
+	if (test_bit(SKIP_QMI, &quirks))
+		ret = 0;
+
+	return ret;
+}
+EXPORT_SYMBOL(icnss_wlan_enable);
+
+int icnss_wlan_disable(struct device *dev, enum icnss_driver_mode mode)
+{
+	if (!dev)
+		return -ENODEV;
+
+	return wlfw_wlan_mode_send_sync_msg(QMI_WLFW_OFF_V01);
+}
+EXPORT_SYMBOL(icnss_wlan_disable);
+
+bool icnss_is_qmi_disable(struct device *dev)
+{
+	return test_bit(SKIP_QMI, &quirks) ? true : false;
+}
+EXPORT_SYMBOL(icnss_is_qmi_disable);
+
+int icnss_get_ce_id(struct device *dev, int irq)
+{
+	int i;
+
+	if (!penv || !penv->pdev || !dev)
+		return -ENODEV;
+
+	for (i = 0; i < ICNSS_MAX_IRQ_REGISTRATIONS; i++) {
+		if (penv->ce_irqs[i] == irq)
+			return i;
+	}
+
+	icnss_pr_err("No matching CE id for irq %d\n", irq);
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL(icnss_get_ce_id);
+
+int icnss_get_irq(struct device *dev, int ce_id)
+{
+	int irq;
+
+	if (!penv || !penv->pdev || !dev)
+		return -ENODEV;
+
+	if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS)
+		return -EINVAL;
+
+	irq = penv->ce_irqs[ce_id];
+
+	return irq;
+}
+EXPORT_SYMBOL(icnss_get_irq);
+
+struct dma_iommu_mapping *icnss_smmu_get_mapping(struct device *dev)
+{
+	struct icnss_priv *priv = dev_get_drvdata(dev);
+
+	if (!priv) {
+		icnss_pr_err("Invalid drvdata: dev %p, data %p\n",
+			     dev, priv);
+		return NULL;
+	}
+
+	return priv->smmu_mapping;
+}
+EXPORT_SYMBOL(icnss_smmu_get_mapping);
+
+int icnss_smmu_map(struct device *dev,
+		   phys_addr_t paddr, uint32_t *iova_addr, size_t size)
+{
+	struct icnss_priv *priv = dev_get_drvdata(dev);
+	unsigned long iova;
+	size_t len;
+	int ret = 0;
+
+	if (!priv) {
+		icnss_pr_err("Invalid drvdata: dev %p, data %p\n",
+			     dev, priv);
+		return -EINVAL;
+	}
+
+	if (!iova_addr) {
+		icnss_pr_err("iova_addr is NULL, paddr %pa, size %zu\n",
+			     &paddr, size);
+		return -EINVAL;
+	}
+
+	len = roundup(size + paddr - rounddown(paddr, PAGE_SIZE), PAGE_SIZE);
+	iova = roundup(penv->smmu_iova_ipa_start, PAGE_SIZE);
+
+	if (iova >= priv->smmu_iova_ipa_start + priv->smmu_iova_ipa_len) {
+		icnss_pr_err("No IOVA space to map, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n",
+			     iova,
+			     &priv->smmu_iova_ipa_start,
+			     priv->smmu_iova_ipa_len);
+		return -ENOMEM;
+	}
+
+	ret = iommu_map(priv->smmu_mapping->domain, iova,
+			rounddown(paddr, PAGE_SIZE), len,
+			IOMMU_READ | IOMMU_WRITE);
+	if (ret) {
+		icnss_pr_err("PA to IOVA mapping failed, ret %d\n", ret);
+		return ret;
+	}
+
+	priv->smmu_iova_ipa_start = iova + len;
+	*iova_addr = (uint32_t)(iova + paddr - rounddown(paddr, PAGE_SIZE));
+
+	return 0;
+}
+EXPORT_SYMBOL(icnss_smmu_map);
+
+unsigned int icnss_socinfo_get_serial_number(struct device *dev)
+{
+	return socinfo_get_serial_number();
+}
+EXPORT_SYMBOL(icnss_socinfo_get_serial_number);
+
+int icnss_set_wlan_mac_address(const u8 *in, const uint32_t len)
+{
+	struct icnss_priv *priv = penv;
+	uint32_t no_of_mac_addr;
+	struct icnss_wlan_mac_addr *addr = NULL;
+	int iter;
+	u8 *temp = NULL;
+
+	if (!priv) {
+		icnss_pr_err("Priv data is NULL\n");
+		return -EINVAL;
+	}
+
+	if (priv->is_wlan_mac_set) {
+		icnss_pr_dbg("WLAN MAC address is already set\n");
+		return 0;
+	}
+
+	if (len == 0 || (len % ETH_ALEN) != 0) {
+		icnss_pr_err("Invalid length %d\n", len);
+		return -EINVAL;
+	}
+
+	no_of_mac_addr = len / ETH_ALEN;
+	if (no_of_mac_addr > MAX_NO_OF_MAC_ADDR) {
+		icnss_pr_err("Exceed maxinum supported MAC address %u %u\n",
+			     MAX_NO_OF_MAC_ADDR, no_of_mac_addr);
+		return -EINVAL;
+	}
+
+	priv->is_wlan_mac_set = true;
+	addr = &priv->wlan_mac_addr;
+	addr->no_of_mac_addr_set = no_of_mac_addr;
+	temp = &addr->mac_addr[0][0];
+
+	for (iter = 0; iter < no_of_mac_addr;
+	     ++iter, temp += ETH_ALEN, in += ETH_ALEN) {
+		ether_addr_copy(temp, in);
+		icnss_pr_dbg("MAC_ADDR:%02x:%02x:%02x:%02x:%02x:%02x\n",
+			     temp[0], temp[1], temp[2],
+			     temp[3], temp[4], temp[5]);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(icnss_set_wlan_mac_address);
+
+u8 *icnss_get_wlan_mac_address(struct device *dev, uint32_t *num)
+{
+	struct icnss_priv *priv = dev_get_drvdata(dev);
+	struct icnss_wlan_mac_addr *addr = NULL;
+
+	if (priv->magic != ICNSS_MAGIC) {
+		icnss_pr_err("Invalid drvdata: dev %p, data %p, magic 0x%x\n",
+			     dev, priv, priv->magic);
+		goto out;
+	}
+
+	if (!priv->is_wlan_mac_set) {
+		icnss_pr_dbg("WLAN MAC address is not set\n");
+		goto out;
+	}
+
+	addr = &priv->wlan_mac_addr;
+	*num = addr->no_of_mac_addr_set;
+	return &addr->mac_addr[0][0];
+out:
+	*num = 0;
+	return NULL;
+}
+EXPORT_SYMBOL(icnss_get_wlan_mac_address);
+
+int icnss_trigger_recovery(struct device *dev)
+{
+	int ret = 0;
+	struct icnss_priv *priv = dev_get_drvdata(dev);
+
+	if (priv->magic != ICNSS_MAGIC) {
+		icnss_pr_err("Invalid drvdata: magic 0x%x\n", priv->magic);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (test_bit(ICNSS_PD_RESTART, &priv->state)) {
+		icnss_pr_err("PD recovery already in progress: state: 0x%lx\n",
+			     priv->state);
+		ret = -EPERM;
+		goto out;
+	}
+
+	if (!test_bit(ICNSS_PDR_REGISTERED, &priv->state)) {
+		icnss_pr_err("PD restart not enabled to trigger recovery: state: 0x%lx\n",
+			     priv->state);
+		ret = -EOPNOTSUPP;
+		goto out;
+	}
+
+	if (!priv->service_notifier || !priv->service_notifier[0].handle) {
+		icnss_pr_err("Invalid handle during recovery, state: 0x%lx\n",
+			     priv->state);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	WARN_ON(1);
+	icnss_pr_warn("Initiate PD restart at WLAN FW, state: 0x%lx\n",
+		      priv->state);
+
+	/*
+	 * Initiate PDR, required only for the first instance
+	 */
+	ret = service_notif_pd_restart(priv->service_notifier[0].name,
+		priv->service_notifier[0].instance_id);
+
+	if (!ret)
+		set_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state);
+
+out:
+	return ret;
+}
+EXPORT_SYMBOL(icnss_trigger_recovery);
+
+
+static int icnss_smmu_init(struct icnss_priv *priv)
+{
+	struct dma_iommu_mapping *mapping;
+	int atomic_ctx = 1;
+	int s1_bypass = 1;
+	int ret = 0;
+
+	icnss_pr_dbg("Initializing SMMU\n");
+
+	mapping = arm_iommu_create_mapping(&platform_bus_type,
+					   priv->smmu_iova_start,
+					   priv->smmu_iova_len);
+	if (IS_ERR(mapping)) {
+		icnss_pr_err("Create mapping failed, err = %d\n", ret);
+		ret = PTR_ERR(mapping);
+		goto map_fail;
+	}
+
+	if (!priv->bypass_s1_smmu) {
+		ret = iommu_domain_set_attr(mapping->domain,
+					    DOMAIN_ATTR_ATOMIC,
+					    &atomic_ctx);
+		if (ret < 0) {
+			icnss_pr_err("Set atomic_ctx attribute failed, err = %d\n",
+				     ret);
+			goto set_attr_fail;
+		}
+	}
+
+	ret = iommu_domain_set_attr(mapping->domain,
+				    DOMAIN_ATTR_S1_BYPASS,
+				    &s1_bypass);
+	if (ret < 0) {
+		icnss_pr_err("Set s1_bypass attribute failed, err = %d\n", ret);
+		goto set_attr_fail;
+	}
+
+	ret = arm_iommu_attach_device(&priv->pdev->dev, mapping);
+	if (ret < 0) {
+		icnss_pr_err("Attach device failed, err = %d\n", ret);
+		goto attach_fail;
+	}
+
+	priv->smmu_mapping = mapping;
+
+	return ret;
+
+attach_fail:
+set_attr_fail:
+	arm_iommu_release_mapping(mapping);
+map_fail:
+	return ret;
+}
+
+static void icnss_smmu_deinit(struct icnss_priv *priv)
+{
+	if (!priv->smmu_mapping)
+		return;
+
+	arm_iommu_detach_device(&priv->pdev->dev);
+	arm_iommu_release_mapping(priv->smmu_mapping);
+
+	priv->smmu_mapping = NULL;
+}
+
+static int icnss_get_vreg_info(struct device *dev,
+			       struct icnss_vreg_info *vreg_info)
+{
+	int ret = 0;
+	char prop_name[MAX_PROP_SIZE];
+	struct regulator *reg;
+	const __be32 *prop;
+	int len = 0;
+	int i;
+
+	reg = devm_regulator_get_optional(dev, vreg_info->name);
+	if (PTR_ERR(reg) == -EPROBE_DEFER) {
+		icnss_pr_err("EPROBE_DEFER for regulator: %s\n",
+			     vreg_info->name);
+		ret = PTR_ERR(reg);
+		goto out;
+	}
+
+	if (IS_ERR(reg)) {
+		ret = PTR_ERR(reg);
+
+		if (vreg_info->required) {
+			icnss_pr_err("Regulator %s doesn't exist: %d\n",
+				     vreg_info->name, ret);
+			goto out;
+		} else {
+			icnss_pr_dbg("Optional regulator %s doesn't exist: %d\n",
+				     vreg_info->name, ret);
+			goto done;
+		}
+	}
+
+	vreg_info->reg = reg;
+
+	snprintf(prop_name, MAX_PROP_SIZE,
+		 "qcom,%s-config", vreg_info->name);
+
+	prop = of_get_property(dev->of_node, prop_name, &len);
+
+	icnss_pr_dbg("Got regulator config, prop: %s, len: %d\n",
+		     prop_name, len);
+
+	if (!prop || len < (2 * sizeof(__be32))) {
+		icnss_pr_dbg("Property %s %s\n", prop_name,
+			     prop ? "invalid format" : "doesn't exist");
+		goto done;
+	}
+
+	for (i = 0; (i * sizeof(__be32)) < len; i++) {
+		switch (i) {
+		case 0:
+			vreg_info->min_v = be32_to_cpup(&prop[0]);
+			break;
+		case 1:
+			vreg_info->max_v = be32_to_cpup(&prop[1]);
+			break;
+		case 2:
+			vreg_info->load_ua = be32_to_cpup(&prop[2]);
+			break;
+		case 3:
+			vreg_info->settle_delay = be32_to_cpup(&prop[3]);
+			break;
+		default:
+			icnss_pr_dbg("Property %s, ignoring value at %d\n",
+				     prop_name, i);
+			break;
+		}
+	}
+
+done:
+	icnss_pr_dbg("Regulator: %s, min_v: %u, max_v: %u, load: %u, delay: %lu\n",
+		     vreg_info->name, vreg_info->min_v, vreg_info->max_v,
+		     vreg_info->load_ua, vreg_info->settle_delay);
+
+	return 0;
+
+out:
+	return ret;
+}
+
+static int icnss_get_clk_info(struct device *dev,
+			      struct icnss_clk_info *clk_info)
+{
+	struct clk *handle;
+	int ret = 0;
+
+	handle = devm_clk_get(dev, clk_info->name);
+	if (IS_ERR(handle)) {
+		ret = PTR_ERR(handle);
+		if (clk_info->required) {
+			icnss_pr_err("Clock %s isn't available: %d\n",
+				     clk_info->name, ret);
+			goto out;
+		} else {
+			icnss_pr_dbg("Ignoring clock %s: %d\n", clk_info->name,
+				     ret);
+			ret = 0;
+			goto out;
+		}
+	}
+
+	icnss_pr_dbg("Clock: %s, freq: %u\n", clk_info->name, clk_info->freq);
+
+	clk_info->handle = handle;
+out:
+	return ret;
+}
+
+static int icnss_fw_debug_show(struct seq_file *s, void *data)
+{
+	struct icnss_priv *priv = s->private;
+
+	seq_puts(s, "\nUsage: echo <CMD> <VAL> > <DEBUGFS>/icnss/fw_debug\n");
+
+	seq_puts(s, "\nCMD: test_mode\n");
+	seq_puts(s, "  VAL: 0 (Test mode disable)\n");
+	seq_puts(s, "  VAL: 1 (WLAN FW test)\n");
+	seq_puts(s, "  VAL: 2 (CCPM test)\n");
+	seq_puts(s, "  VAL: 3 (Trigger Recovery)\n");
+
+	seq_puts(s, "\nCMD: dynamic_feature_mask\n");
+	seq_puts(s, "  VAL: (64 bit feature mask)\n");
+
+	if (!test_bit(ICNSS_FW_READY, &priv->state)) {
+		seq_puts(s, "Firmware is not ready yet, can't run test_mode!\n");
+		goto out;
+	}
+
+	if (test_bit(ICNSS_DRIVER_PROBED, &priv->state)) {
+		seq_puts(s, "Machine mode is running, can't run test_mode!\n");
+		goto out;
+	}
+
+	if (test_bit(ICNSS_FW_TEST_MODE, &priv->state)) {
+		seq_puts(s, "test_mode is running, can't run test_mode!\n");
+		goto out;
+	}
+
+out:
+	seq_puts(s, "\n");
+	return 0;
+}
+
+static int icnss_test_mode_fw_test_off(struct icnss_priv *priv)
+{
+	int ret;
+
+	if (!test_bit(ICNSS_FW_READY, &priv->state)) {
+		icnss_pr_err("Firmware is not ready yet!, wait for FW READY: state: 0x%lx\n",
+			     priv->state);
+		ret = -ENODEV;
+		goto out;
+	}
+
+	if (test_bit(ICNSS_DRIVER_PROBED, &priv->state)) {
+		icnss_pr_err("Machine mode is running, can't run test mode: state: 0x%lx\n",
+			     priv->state);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (!test_bit(ICNSS_FW_TEST_MODE, &priv->state)) {
+		icnss_pr_err("Test mode not started, state: 0x%lx\n",
+			     priv->state);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	icnss_wlan_disable(&priv->pdev->dev, ICNSS_OFF);
+
+	ret = icnss_hw_power_off(priv);
+
+	clear_bit(ICNSS_FW_TEST_MODE, &priv->state);
+
+out:
+	return ret;
+}
+static int icnss_test_mode_fw_test(struct icnss_priv *priv,
+				   enum icnss_driver_mode mode)
+{
+	int ret;
+
+	if (!test_bit(ICNSS_FW_READY, &priv->state)) {
+		icnss_pr_err("Firmware is not ready yet!, wait for FW READY, state: 0x%lx\n",
+			     priv->state);
+		ret = -ENODEV;
+		goto out;
+	}
+
+	if (test_bit(ICNSS_DRIVER_PROBED, &priv->state)) {
+		icnss_pr_err("Machine mode is running, can't run test mode, state: 0x%lx\n",
+			     priv->state);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (test_bit(ICNSS_FW_TEST_MODE, &priv->state)) {
+		icnss_pr_err("Test mode already started, state: 0x%lx\n",
+			     priv->state);
+		ret = -EBUSY;
+		goto out;
+	}
+
+	ret = icnss_hw_power_on(priv);
+	if (ret)
+		goto out;
+
+	set_bit(ICNSS_FW_TEST_MODE, &priv->state);
+
+	ret = icnss_wlan_enable(&priv->pdev->dev, NULL, mode, NULL);
+	if (ret)
+		goto power_off;
+
+	return 0;
+
+power_off:
+	icnss_hw_power_off(priv);
+	clear_bit(ICNSS_FW_TEST_MODE, &priv->state);
+
+out:
+	return ret;
+}
+
+static ssize_t icnss_fw_debug_write(struct file *fp,
+				    const char __user *user_buf,
+				    size_t count, loff_t *off)
+{
+	struct icnss_priv *priv =
+		((struct seq_file *)fp->private_data)->private;
+	char buf[64];
+	char *sptr, *token;
+	unsigned int len = 0;
+	char *cmd;
+	uint64_t val;
+	const char *delim = " ";
+	int ret = 0;
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EINVAL;
+
+	buf[len] = '\0';
+	sptr = buf;
+
+	token = strsep(&sptr, delim);
+	if (!token)
+		return -EINVAL;
+	if (!sptr)
+		return -EINVAL;
+	cmd = token;
+
+	token = strsep(&sptr, delim);
+	if (!token)
+		return -EINVAL;
+	if (kstrtou64(token, 0, &val))
+		return -EINVAL;
+
+	if (strcmp(cmd, "test_mode") == 0) {
+		switch (val) {
+		case 0:
+			ret = icnss_test_mode_fw_test_off(priv);
+			break;
+		case 1:
+			ret = icnss_test_mode_fw_test(priv, ICNSS_WALTEST);
+			break;
+		case 2:
+			ret = icnss_test_mode_fw_test(priv, ICNSS_CCPM);
+			break;
+		case 3:
+			ret = icnss_trigger_recovery(&priv->pdev->dev);
+			break;
+		default:
+			return -EINVAL;
+		}
+	} else if (strcmp(cmd, "dynamic_feature_mask") == 0) {
+		ret = wlfw_dynamic_feature_mask_send_sync_msg(priv, val);
+	} else {
+		return -EINVAL;
+	}
+
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static int icnss_fw_debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, icnss_fw_debug_show, inode->i_private);
+}
+
+static const struct file_operations icnss_fw_debug_fops = {
+	.read		= seq_read,
+	.write		= icnss_fw_debug_write,
+	.release	= single_release,
+	.open		= icnss_fw_debug_open,
+	.owner		= THIS_MODULE,
+	.llseek		= seq_lseek,
+};
+
+static ssize_t icnss_stats_write(struct file *fp, const char __user *buf,
+				    size_t count, loff_t *off)
+{
+	struct icnss_priv *priv =
+		((struct seq_file *)fp->private_data)->private;
+	int ret;
+	u32 val;
+
+	ret = kstrtou32_from_user(buf, count, 0, &val);
+	if (ret)
+		return ret;
+
+	if (ret == 0)
+		memset(&priv->stats, 0, sizeof(priv->stats));
+
+	return count;
+}
+
+static int icnss_stats_show_state(struct seq_file *s, struct icnss_priv *priv)
+{
+	enum icnss_driver_state i;
+	int skip = 0;
+	unsigned long state;
+
+	seq_printf(s, "\nState: 0x%lx(", priv->state);
+	for (i = 0, state = priv->state; state != 0; state >>= 1, i++) {
+
+		if (!(state & 0x1))
+			continue;
+
+		if (skip++)
+			seq_puts(s, " | ");
+
+		switch (i) {
+		case ICNSS_WLFW_QMI_CONNECTED:
+			seq_puts(s, "QMI CONN");
+			continue;
+		case ICNSS_POWER_ON:
+			seq_puts(s, "POWER ON");
+			continue;
+		case ICNSS_FW_READY:
+			seq_puts(s, "FW READY");
+			continue;
+		case ICNSS_DRIVER_PROBED:
+			seq_puts(s, "DRIVER PROBED");
+			continue;
+		case ICNSS_FW_TEST_MODE:
+			seq_puts(s, "FW TEST MODE");
+			continue;
+		case ICNSS_PM_SUSPEND:
+			seq_puts(s, "PM SUSPEND");
+			continue;
+		case ICNSS_PM_SUSPEND_NOIRQ:
+			seq_puts(s, "PM SUSPEND NOIRQ");
+			continue;
+		case ICNSS_SSR_REGISTERED:
+			seq_puts(s, "SSR REGISTERED");
+			continue;
+		case ICNSS_PDR_REGISTERED:
+			seq_puts(s, "PDR REGISTERED");
+			continue;
+		case ICNSS_PD_RESTART:
+			seq_puts(s, "PD RESTART");
+			continue;
+		case ICNSS_MSA0_ASSIGNED:
+			seq_puts(s, "MSA0 ASSIGNED");
+			continue;
+		case ICNSS_WLFW_EXISTS:
+			seq_puts(s, "WLAN FW EXISTS");
+			continue;
+		case ICNSS_SHUTDOWN_DONE:
+			seq_puts(s, "SHUTDOWN DONE");
+			continue;
+		case ICNSS_HOST_TRIGGERED_PDR:
+			seq_puts(s, "HOST TRIGGERED PDR");
+			continue;
+		case ICNSS_FW_DOWN:
+			seq_puts(s, "FW DOWN");
+			continue;
+		case ICNSS_DRIVER_UNLOADING:
+			seq_puts(s, "DRIVER UNLOADING");
+		}
+
+		seq_printf(s, "UNKNOWN-%d", i);
+	}
+	seq_puts(s, ")\n");
+
+	return 0;
+}
+
+static int icnss_stats_show_capability(struct seq_file *s,
+				       struct icnss_priv *priv)
+{
+	if (test_bit(ICNSS_FW_READY, &priv->state)) {
+		seq_puts(s, "\n<---------------- FW Capability ----------------->\n");
+		seq_printf(s, "Chip ID: 0x%x\n", priv->chip_info.chip_id);
+		seq_printf(s, "Chip family: 0x%x\n",
+			  priv->chip_info.chip_family);
+		seq_printf(s, "Board ID: 0x%x\n", priv->board_info.board_id);
+		seq_printf(s, "SOC Info: 0x%x\n", priv->soc_info.soc_id);
+		seq_printf(s, "Firmware Version: 0x%x\n",
+			   priv->fw_version_info.fw_version);
+		seq_printf(s, "Firmware Build Timestamp: %s\n",
+			   priv->fw_version_info.fw_build_timestamp);
+		seq_printf(s, "Firmware Build ID: %s\n",
+			   priv->fw_build_id);
+	}
+
+	return 0;
+}
+
+static int icnss_stats_show_rejuvenate_info(struct seq_file *s,
+					    struct icnss_priv *priv)
+{
+	if (priv->stats.rejuvenate_ind)  {
+		seq_puts(s, "\n<---------------- Rejuvenate Info ----------------->\n");
+		seq_printf(s, "Number of Rejuvenations: %u\n",
+			   priv->stats.rejuvenate_ind);
+		seq_printf(s, "Cause for Rejuvenation: 0x%x\n",
+			   priv->cause_for_rejuvenation);
+		seq_printf(s, "Requesting Sub-System: 0x%x\n",
+			   priv->requesting_sub_system);
+		seq_printf(s, "Line Number: %u\n",
+			   priv->line_number);
+		seq_printf(s, "Function Name: %s\n",
+			   priv->function_name);
+	}
+
+	return 0;
+}
+
+static int icnss_stats_show_events(struct seq_file *s, struct icnss_priv *priv)
+{
+	int i;
+
+	seq_puts(s, "\n<----------------- Events stats ------------------->\n");
+	seq_printf(s, "%24s %16s %16s\n", "Events", "Posted", "Processed");
+	for (i = 0; i < ICNSS_DRIVER_EVENT_MAX; i++)
+		seq_printf(s, "%24s %16u %16u\n",
+			   icnss_driver_event_to_str(i),
+			   priv->stats.events[i].posted,
+			   priv->stats.events[i].processed);
+
+	return 0;
+}
+
+static int icnss_stats_show_irqs(struct seq_file *s, struct icnss_priv *priv)
+{
+	int i;
+
+	seq_puts(s, "\n<------------------ IRQ stats ------------------->\n");
+	seq_printf(s, "%4s %4s %8s %8s %8s %8s\n", "CE_ID", "IRQ", "Request",
+		   "Free", "Enable", "Disable");
+	for (i = 0; i < ICNSS_MAX_IRQ_REGISTRATIONS; i++)
+		seq_printf(s, "%4d: %4u %8u %8u %8u %8u\n", i,
+			   priv->ce_irqs[i], priv->stats.ce_irqs[i].request,
+			   priv->stats.ce_irqs[i].free,
+			   priv->stats.ce_irqs[i].enable,
+			   priv->stats.ce_irqs[i].disable);
+
+	return 0;
+}
+
+static int icnss_stats_show(struct seq_file *s, void *data)
+{
+#define ICNSS_STATS_DUMP(_s, _priv, _x) \
+	seq_printf(_s, "%24s: %u\n", #_x, _priv->stats._x)
+
+	struct icnss_priv *priv = s->private;
+
+	ICNSS_STATS_DUMP(s, priv, ind_register_req);
+	ICNSS_STATS_DUMP(s, priv, ind_register_resp);
+	ICNSS_STATS_DUMP(s, priv, ind_register_err);
+	ICNSS_STATS_DUMP(s, priv, msa_info_req);
+	ICNSS_STATS_DUMP(s, priv, msa_info_resp);
+	ICNSS_STATS_DUMP(s, priv, msa_info_err);
+	ICNSS_STATS_DUMP(s, priv, msa_ready_req);
+	ICNSS_STATS_DUMP(s, priv, msa_ready_resp);
+	ICNSS_STATS_DUMP(s, priv, msa_ready_err);
+	ICNSS_STATS_DUMP(s, priv, msa_ready_ind);
+	ICNSS_STATS_DUMP(s, priv, cap_req);
+	ICNSS_STATS_DUMP(s, priv, cap_resp);
+	ICNSS_STATS_DUMP(s, priv, cap_err);
+	ICNSS_STATS_DUMP(s, priv, pin_connect_result);
+	ICNSS_STATS_DUMP(s, priv, cfg_req);
+	ICNSS_STATS_DUMP(s, priv, cfg_resp);
+	ICNSS_STATS_DUMP(s, priv, cfg_req_err);
+	ICNSS_STATS_DUMP(s, priv, mode_req);
+	ICNSS_STATS_DUMP(s, priv, mode_resp);
+	ICNSS_STATS_DUMP(s, priv, mode_req_err);
+	ICNSS_STATS_DUMP(s, priv, ini_req);
+	ICNSS_STATS_DUMP(s, priv, ini_resp);
+	ICNSS_STATS_DUMP(s, priv, ini_req_err);
+	ICNSS_STATS_DUMP(s, priv, vbatt_req);
+	ICNSS_STATS_DUMP(s, priv, vbatt_resp);
+	ICNSS_STATS_DUMP(s, priv, vbatt_req_err);
+	ICNSS_STATS_DUMP(s, priv, rejuvenate_ind);
+	ICNSS_STATS_DUMP(s, priv, rejuvenate_ack_req);
+	ICNSS_STATS_DUMP(s, priv, rejuvenate_ack_resp);
+	ICNSS_STATS_DUMP(s, priv, rejuvenate_ack_err);
+	ICNSS_STATS_DUMP(s, priv, recovery.pdr_fw_crash);
+	ICNSS_STATS_DUMP(s, priv, recovery.pdr_host_error);
+	ICNSS_STATS_DUMP(s, priv, recovery.root_pd_crash);
+	ICNSS_STATS_DUMP(s, priv, recovery.root_pd_shutdown);
+
+	seq_puts(s, "\n<------------------ PM stats ------------------->\n");
+	ICNSS_STATS_DUMP(s, priv, pm_suspend);
+	ICNSS_STATS_DUMP(s, priv, pm_suspend_err);
+	ICNSS_STATS_DUMP(s, priv, pm_resume);
+	ICNSS_STATS_DUMP(s, priv, pm_resume_err);
+	ICNSS_STATS_DUMP(s, priv, pm_suspend_noirq);
+	ICNSS_STATS_DUMP(s, priv, pm_suspend_noirq_err);
+	ICNSS_STATS_DUMP(s, priv, pm_resume_noirq);
+	ICNSS_STATS_DUMP(s, priv, pm_resume_noirq_err);
+	ICNSS_STATS_DUMP(s, priv, pm_stay_awake);
+	ICNSS_STATS_DUMP(s, priv, pm_relax);
+
+	icnss_stats_show_irqs(s, priv);
+
+	icnss_stats_show_capability(s, priv);
+
+	icnss_stats_show_rejuvenate_info(s, priv);
+
+	icnss_stats_show_events(s, priv);
+
+	icnss_stats_show_state(s, priv);
+
+	return 0;
+#undef ICNSS_STATS_DUMP
+}
+
+static int icnss_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, icnss_stats_show, inode->i_private);
+}
+
+static const struct file_operations icnss_stats_fops = {
+	.read		= seq_read,
+	.write		= icnss_stats_write,
+	.release	= single_release,
+	.open		= icnss_stats_open,
+	.owner		= THIS_MODULE,
+	.llseek		= seq_lseek,
+};
+
+static int icnss_regwrite_show(struct seq_file *s, void *data)
+{
+	struct icnss_priv *priv = s->private;
+
+	seq_puts(s, "\nUsage: echo <mem_type> <offset> <reg_val> > <debugfs>/icnss/reg_write\n");
+
+	if (!test_bit(ICNSS_FW_READY, &priv->state))
+		seq_puts(s, "Firmware is not ready yet!, wait for FW READY\n");
+
+	return 0;
+}
+
+static ssize_t icnss_regwrite_write(struct file *fp,
+				    const char __user *user_buf,
+				    size_t count, loff_t *off)
+{
+	struct icnss_priv *priv =
+		((struct seq_file *)fp->private_data)->private;
+	char buf[64];
+	char *sptr, *token;
+	unsigned int len = 0;
+	uint32_t reg_offset, mem_type, reg_val;
+	const char *delim = " ";
+	int ret = 0;
+
+	if (!test_bit(ICNSS_FW_READY, &priv->state) ||
+	    !test_bit(ICNSS_POWER_ON, &priv->state))
+		return -EINVAL;
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	sptr = buf;
+
+	token = strsep(&sptr, delim);
+	if (!token)
+		return -EINVAL;
+
+	if (!sptr)
+		return -EINVAL;
+
+	if (kstrtou32(token, 0, &mem_type))
+		return -EINVAL;
+
+	token = strsep(&sptr, delim);
+	if (!token)
+		return -EINVAL;
+
+	if (!sptr)
+		return -EINVAL;
+
+	if (kstrtou32(token, 0, &reg_offset))
+		return -EINVAL;
+
+	token = strsep(&sptr, delim);
+	if (!token)
+		return -EINVAL;
+
+	if (kstrtou32(token, 0, &reg_val))
+		return -EINVAL;
+
+	ret = wlfw_athdiag_write_send_sync_msg(priv, reg_offset, mem_type,
+					       sizeof(uint32_t),
+					       (uint8_t *)&reg_val);
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static int icnss_regwrite_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, icnss_regwrite_show, inode->i_private);
+}
+
+static const struct file_operations icnss_regwrite_fops = {
+	.read		= seq_read,
+	.write          = icnss_regwrite_write,
+	.open           = icnss_regwrite_open,
+	.owner          = THIS_MODULE,
+	.llseek		= seq_lseek,
+};
+
+static int icnss_regread_show(struct seq_file *s, void *data)
+{
+	struct icnss_priv *priv = s->private;
+
+	mutex_lock(&priv->dev_lock);
+	if (!priv->diag_reg_read_buf) {
+		seq_puts(s, "Usage: echo <mem_type> <offset> <data_len> > <debugfs>/icnss/reg_read\n");
+
+		if (!test_bit(ICNSS_FW_READY, &priv->state))
+			seq_puts(s, "Firmware is not ready yet!, wait for FW READY\n");
+
+		mutex_unlock(&priv->dev_lock);
+		return 0;
+	}
+
+	seq_printf(s, "REGREAD: Addr 0x%x Type 0x%x Length 0x%x\n",
+		   priv->diag_reg_read_addr, priv->diag_reg_read_mem_type,
+		   priv->diag_reg_read_len);
+
+	seq_hex_dump(s, "", DUMP_PREFIX_OFFSET, 32, 4, priv->diag_reg_read_buf,
+		     priv->diag_reg_read_len, false);
+
+	priv->diag_reg_read_len = 0;
+	kfree(priv->diag_reg_read_buf);
+	priv->diag_reg_read_buf = NULL;
+	mutex_unlock(&priv->dev_lock);
+
+	return 0;
+}
+
+static ssize_t icnss_regread_write(struct file *fp, const char __user *user_buf,
+				size_t count, loff_t *off)
+{
+	struct icnss_priv *priv =
+		((struct seq_file *)fp->private_data)->private;
+	char buf[64];
+	char *sptr, *token;
+	unsigned int len = 0;
+	uint32_t reg_offset, mem_type;
+	uint32_t data_len = 0;
+	uint8_t *reg_buf = NULL;
+	const char *delim = " ";
+	int ret = 0;
+
+	if (!test_bit(ICNSS_FW_READY, &priv->state) ||
+	    !test_bit(ICNSS_POWER_ON, &priv->state))
+		return -EINVAL;
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	sptr = buf;
+
+	token = strsep(&sptr, delim);
+	if (!token)
+		return -EINVAL;
+
+	if (!sptr)
+		return -EINVAL;
+
+	if (kstrtou32(token, 0, &mem_type))
+		return -EINVAL;
+
+	token = strsep(&sptr, delim);
+	if (!token)
+		return -EINVAL;
+
+	if (!sptr)
+		return -EINVAL;
+
+	if (kstrtou32(token, 0, &reg_offset))
+		return -EINVAL;
+
+	token = strsep(&sptr, delim);
+	if (!token)
+		return -EINVAL;
+
+	if (kstrtou32(token, 0, &data_len))
+		return -EINVAL;
+
+	if (data_len == 0 ||
+	    data_len > QMI_WLFW_MAX_DATA_SIZE_V01)
+		return -EINVAL;
+
+	mutex_lock(&priv->dev_lock);
+	kfree(priv->diag_reg_read_buf);
+	priv->diag_reg_read_buf = NULL;
+
+	reg_buf = kzalloc(data_len, GFP_KERNEL);
+	if (!reg_buf) {
+		mutex_unlock(&priv->dev_lock);
+		return -ENOMEM;
+	}
+
+	ret = wlfw_athdiag_read_send_sync_msg(priv, reg_offset,
+					      mem_type, data_len,
+					      reg_buf);
+	if (ret) {
+		kfree(reg_buf);
+		mutex_unlock(&priv->dev_lock);
+		return ret;
+	}
+
+	priv->diag_reg_read_addr = reg_offset;
+	priv->diag_reg_read_mem_type = mem_type;
+	priv->diag_reg_read_len = data_len;
+	priv->diag_reg_read_buf = reg_buf;
+	mutex_unlock(&priv->dev_lock);
+
+	return count;
+}
+
+static int icnss_regread_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, icnss_regread_show, inode->i_private);
+}
+
+static const struct file_operations icnss_regread_fops = {
+	.read           = seq_read,
+	.write          = icnss_regread_write,
+	.open           = icnss_regread_open,
+	.owner          = THIS_MODULE,
+	.llseek         = seq_lseek,
+};
+
+#ifdef CONFIG_ICNSS_DEBUG
+static int icnss_debugfs_create(struct icnss_priv *priv)
+{
+	int ret = 0;
+	struct dentry *root_dentry;
+
+	root_dentry = debugfs_create_dir("icnss", NULL);
+
+	if (IS_ERR(root_dentry)) {
+		ret = PTR_ERR(root_dentry);
+		icnss_pr_err("Unable to create debugfs %d\n", ret);
+		goto out;
+	}
+
+	priv->root_dentry = root_dentry;
+
+	debugfs_create_file("fw_debug", 0600, root_dentry, priv,
+			    &icnss_fw_debug_fops);
+
+	debugfs_create_file("stats", 0600, root_dentry, priv,
+			    &icnss_stats_fops);
+	debugfs_create_file("reg_read", 0600, root_dentry, priv,
+			    &icnss_regread_fops);
+	debugfs_create_file("reg_write", 0600, root_dentry, priv,
+			    &icnss_regwrite_fops);
+
+out:
+	return ret;
+}
+#else
+static int icnss_debugfs_create(struct icnss_priv *priv)
+{
+	int ret = 0;
+	struct dentry *root_dentry;
+
+	root_dentry = debugfs_create_dir("icnss", NULL);
+
+	if (IS_ERR(root_dentry)) {
+		ret = PTR_ERR(root_dentry);
+		icnss_pr_err("Unable to create debugfs %d\n", ret);
+		return ret;
+	}
+
+	priv->root_dentry = root_dentry;
+
+	debugfs_create_file("stats", 0600, root_dentry, priv,
+			    &icnss_stats_fops);
+	return 0;
+}
+#endif
+
+static void icnss_debugfs_destroy(struct icnss_priv *priv)
+{
+	debugfs_remove_recursive(priv->root_dentry);
+}
+
+static int icnss_get_vbatt_info(struct icnss_priv *priv)
+{
+	struct qpnp_adc_tm_chip *adc_tm_dev = NULL;
+	struct qpnp_vadc_chip *vadc_dev = NULL;
+	int ret = 0;
+
+	if (test_bit(VBATT_DISABLE, &quirks)) {
+		icnss_pr_dbg("VBATT feature is disabled\n");
+		return ret;
+	}
+
+	adc_tm_dev = qpnp_get_adc_tm(&priv->pdev->dev, "icnss");
+	if (PTR_ERR(adc_tm_dev) == -EPROBE_DEFER) {
+		icnss_pr_err("adc_tm_dev probe defer\n");
+		return -EPROBE_DEFER;
+	}
+
+	if (IS_ERR(adc_tm_dev)) {
+		ret = PTR_ERR(adc_tm_dev);
+		icnss_pr_err("Not able to get ADC dev, VBATT monitoring is disabled: %d\n",
+			     ret);
+		return ret;
+	}
+
+	vadc_dev = qpnp_get_vadc(&priv->pdev->dev, "icnss");
+	if (PTR_ERR(vadc_dev) == -EPROBE_DEFER) {
+		icnss_pr_err("vadc_dev probe defer\n");
+		return -EPROBE_DEFER;
+	}
+
+	if (IS_ERR(vadc_dev)) {
+		ret = PTR_ERR(vadc_dev);
+		icnss_pr_err("Not able to get VADC dev, VBATT monitoring is disabled: %d\n",
+			     ret);
+		return ret;
+	}
+
+	priv->adc_tm_dev = adc_tm_dev;
+	priv->vadc_dev = vadc_dev;
+
+	return 0;
+}
+
+static int icnss_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct resource *res;
+	int i;
+	struct device *dev = &pdev->dev;
+	struct icnss_priv *priv;
+	const __be32 *addrp;
+	u64 prop_size = 0;
+	struct device_node *np;
+
+	if (penv) {
+		icnss_pr_err("Driver is already initialized\n");
+		return -EEXIST;
+	}
+
+	icnss_pr_dbg("Platform driver probe\n");
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->magic = ICNSS_MAGIC;
+	dev_set_drvdata(dev, priv);
+
+	priv->pdev = pdev;
+
+	ret = icnss_get_vbatt_info(priv);
+	if (ret == -EPROBE_DEFER)
+		goto out;
+
+	memcpy(priv->vreg_info, icnss_vreg_info, sizeof(icnss_vreg_info));
+	for (i = 0; i < ICNSS_VREG_INFO_SIZE; i++) {
+		ret = icnss_get_vreg_info(dev, &priv->vreg_info[i]);
+
+		if (ret)
+			goto out;
+	}
+
+	memcpy(priv->clk_info, icnss_clk_info, sizeof(icnss_clk_info));
+	for (i = 0; i < ICNSS_CLK_INFO_SIZE; i++) {
+		ret = icnss_get_clk_info(dev, &priv->clk_info[i]);
+		if (ret)
+			goto out;
+	}
+
+	if (of_property_read_bool(pdev->dev.of_node, "qcom,smmu-s1-bypass"))
+		priv->bypass_s1_smmu = true;
+
+	icnss_pr_dbg("SMMU S1 BYPASS = %d\n", priv->bypass_s1_smmu);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase");
+	if (!res) {
+		icnss_pr_err("Memory base not found in DT\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	priv->mem_base_pa = res->start;
+	priv->mem_base_va = devm_ioremap(dev, priv->mem_base_pa,
+					 resource_size(res));
+	if (!priv->mem_base_va) {
+		icnss_pr_err("Memory base ioremap failed: phy addr: %pa\n",
+			     &priv->mem_base_pa);
+		ret = -EINVAL;
+		goto out;
+	}
+	icnss_pr_dbg("MEM_BASE pa: %pa, va: 0x%p\n", &priv->mem_base_pa,
+		     priv->mem_base_va);
+
+	for (i = 0; i < ICNSS_MAX_IRQ_REGISTRATIONS; i++) {
+		res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i);
+		if (!res) {
+			icnss_pr_err("Fail to get IRQ-%d\n", i);
+			ret = -ENODEV;
+			goto out;
+		} else {
+			priv->ce_irqs[i] = res->start;
+		}
+	}
+
+	np = of_parse_phandle(dev->of_node,
+			      "qcom,wlan-msa-fixed-region", 0);
+	if (np) {
+		addrp = of_get_address(np, 0, &prop_size, NULL);
+		if (!addrp) {
+			icnss_pr_err("Failed to get assigned-addresses or property\n");
+			ret = -EINVAL;
+			goto out;
+		}
+
+		priv->msa_pa = of_translate_address(np, addrp);
+		if (priv->msa_pa == OF_BAD_ADDR) {
+			icnss_pr_err("Failed to translate MSA PA from device-tree\n");
+			ret = -EINVAL;
+			goto out;
+		}
+
+		priv->msa_va = memremap(priv->msa_pa,
+					(unsigned long)prop_size, MEMREMAP_WT);
+		if (!priv->msa_va) {
+			icnss_pr_err("MSA PA ioremap failed: phy addr: %pa\n",
+				     &priv->msa_pa);
+			ret = -EINVAL;
+			goto out;
+		}
+		priv->msa_mem_size = prop_size;
+	} else {
+		ret = of_property_read_u32(dev->of_node, "qcom,wlan-msa-memory",
+					   &priv->msa_mem_size);
+		if (ret || priv->msa_mem_size == 0) {
+			icnss_pr_err("Fail to get MSA Memory Size: %u ret: %d\n",
+				     priv->msa_mem_size, ret);
+			goto out;
+		}
+
+		priv->msa_va = dmam_alloc_coherent(&pdev->dev,
+				priv->msa_mem_size, &priv->msa_pa, GFP_KERNEL);
+
+		if (!priv->msa_va) {
+			icnss_pr_err("DMA alloc failed for MSA\n");
+			ret = -ENOMEM;
+			goto out;
+		}
+	}
+
+	icnss_pr_dbg("MSA pa: %pa, MSA va: 0x%p MSA Memory Size: 0x%x\n",
+		     &priv->msa_pa, (void *)priv->msa_va, priv->msa_mem_size);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "smmu_iova_base");
+	if (!res) {
+		icnss_pr_err("SMMU IOVA base not found\n");
+	} else {
+		priv->smmu_iova_start = res->start;
+		priv->smmu_iova_len = resource_size(res);
+		icnss_pr_dbg("SMMU IOVA start: %pa, len: %zu\n",
+			     &priv->smmu_iova_start, priv->smmu_iova_len);
+
+		res = platform_get_resource_byname(pdev,
+						   IORESOURCE_MEM,
+						   "smmu_iova_ipa");
+		if (!res) {
+			icnss_pr_err("SMMU IOVA IPA not found\n");
+		} else {
+			priv->smmu_iova_ipa_start = res->start;
+			priv->smmu_iova_ipa_len = resource_size(res);
+			icnss_pr_dbg("SMMU IOVA IPA start: %pa, len: %zu\n",
+				     &priv->smmu_iova_ipa_start,
+				     priv->smmu_iova_ipa_len);
+		}
+
+		ret = icnss_smmu_init(priv);
+		if (ret < 0) {
+			icnss_pr_err("SMMU init failed, err = %d, start: %pad, len: %zx\n",
+				     ret, &priv->smmu_iova_start,
+				     priv->smmu_iova_len);
+			goto out;
+		}
+	}
+
+	spin_lock_init(&priv->event_lock);
+	spin_lock_init(&priv->on_off_lock);
+	mutex_init(&priv->dev_lock);
+
+	priv->event_wq = alloc_workqueue("icnss_driver_event", WQ_UNBOUND, 1);
+	if (!priv->event_wq) {
+		icnss_pr_err("Workqueue creation failed\n");
+		ret = -EFAULT;
+		goto out_smmu_deinit;
+	}
+
+	INIT_WORK(&priv->event_work, icnss_driver_event_work);
+	INIT_WORK(&priv->qmi_recv_msg_work, icnss_qmi_wlfw_clnt_notify_work);
+	INIT_LIST_HEAD(&priv->event_list);
+
+	ret = qmi_svc_event_notifier_register(WLFW_SERVICE_ID_V01,
+					      WLFW_SERVICE_VERS_V01,
+					      WLFW_SERVICE_INS_ID_V01,
+					      &wlfw_clnt_nb);
+	if (ret < 0) {
+		icnss_pr_err("Notifier register failed: %d\n", ret);
+		goto out_destroy_wq;
+	}
+
+	icnss_enable_recovery(priv);
+
+	icnss_debugfs_create(priv);
+
+	ret = device_init_wakeup(&priv->pdev->dev, true);
+	if (ret)
+		icnss_pr_err("Failed to init platform device wakeup source, err = %d\n",
+			     ret);
+
+	penv = priv;
+
+	icnss_pr_info("Platform driver probed successfully\n");
+
+	return 0;
+
+out_destroy_wq:
+	destroy_workqueue(priv->event_wq);
+out_smmu_deinit:
+	icnss_smmu_deinit(priv);
+out:
+	dev_set_drvdata(dev, NULL);
+
+	return ret;
+}
+
+static int icnss_remove(struct platform_device *pdev)
+{
+	icnss_pr_info("Removing driver: state: 0x%lx\n", penv->state);
+
+	device_init_wakeup(&penv->pdev->dev, false);
+
+	icnss_debugfs_destroy(penv);
+
+	icnss_modem_ssr_unregister_notifier(penv);
+
+	destroy_ramdump_device(penv->msa0_dump_dev);
+
+	icnss_pdr_unregister_notifier(penv);
+
+	qmi_svc_event_notifier_unregister(WLFW_SERVICE_ID_V01,
+					  WLFW_SERVICE_VERS_V01,
+					  WLFW_SERVICE_INS_ID_V01,
+					  &wlfw_clnt_nb);
+	if (penv->event_wq)
+		destroy_workqueue(penv->event_wq);
+
+	icnss_hw_power_off(penv);
+
+	icnss_assign_msa_perm_all(penv, ICNSS_MSA_PERM_HLOS_ALL);
+	clear_bit(ICNSS_MSA0_ASSIGNED, &penv->state);
+
+	dev_set_drvdata(&pdev->dev, NULL);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int icnss_pm_suspend(struct device *dev)
+{
+	struct icnss_priv *priv = dev_get_drvdata(dev);
+	int ret = 0;
+
+	if (priv->magic != ICNSS_MAGIC) {
+		icnss_pr_err("Invalid drvdata for pm suspend: dev %p, data %p, magic 0x%x\n",
+			     dev, priv, priv->magic);
+		return -EINVAL;
+	}
+
+	icnss_pr_vdbg("PM Suspend, state: 0x%lx\n", priv->state);
+
+	if (!priv->ops || !priv->ops->pm_suspend ||
+	    !test_bit(ICNSS_DRIVER_PROBED, &priv->state))
+		goto out;
+
+	ret = priv->ops->pm_suspend(dev);
+
+out:
+	if (ret == 0) {
+		priv->stats.pm_suspend++;
+		set_bit(ICNSS_PM_SUSPEND, &priv->state);
+	} else {
+		priv->stats.pm_suspend_err++;
+	}
+	return ret;
+}
+
+static int icnss_pm_resume(struct device *dev)
+{
+	struct icnss_priv *priv = dev_get_drvdata(dev);
+	int ret = 0;
+
+	if (priv->magic != ICNSS_MAGIC) {
+		icnss_pr_err("Invalid drvdata for pm resume: dev %p, data %p, magic 0x%x\n",
+			     dev, priv, priv->magic);
+		return -EINVAL;
+	}
+
+	icnss_pr_vdbg("PM resume, state: 0x%lx\n", priv->state);
+
+	if (!priv->ops || !priv->ops->pm_resume ||
+	    !test_bit(ICNSS_DRIVER_PROBED, &priv->state))
+		goto out;
+
+	ret = priv->ops->pm_resume(dev);
+
+out:
+	if (ret == 0) {
+		priv->stats.pm_resume++;
+		clear_bit(ICNSS_PM_SUSPEND, &priv->state);
+	} else {
+		priv->stats.pm_resume_err++;
+	}
+	return ret;
+}
+
+static int icnss_pm_suspend_noirq(struct device *dev)
+{
+	struct icnss_priv *priv = dev_get_drvdata(dev);
+	int ret = 0;
+
+	if (priv->magic != ICNSS_MAGIC) {
+		icnss_pr_err("Invalid drvdata for pm suspend_noirq: dev %p, data %p, magic 0x%x\n",
+			     dev, priv, priv->magic);
+		return -EINVAL;
+	}
+
+	icnss_pr_vdbg("PM suspend_noirq, state: 0x%lx\n", priv->state);
+
+	if (!priv->ops || !priv->ops->suspend_noirq ||
+	    !test_bit(ICNSS_DRIVER_PROBED, &priv->state))
+		goto out;
+
+	ret = priv->ops->suspend_noirq(dev);
+
+out:
+	if (ret == 0) {
+		priv->stats.pm_suspend_noirq++;
+		set_bit(ICNSS_PM_SUSPEND_NOIRQ, &priv->state);
+	} else {
+		priv->stats.pm_suspend_noirq_err++;
+	}
+	return ret;
+}
+
+static int icnss_pm_resume_noirq(struct device *dev)
+{
+	struct icnss_priv *priv = dev_get_drvdata(dev);
+	int ret = 0;
+
+	if (priv->magic != ICNSS_MAGIC) {
+		icnss_pr_err("Invalid drvdata for pm resume_noirq: dev %p, data %p, magic 0x%x\n",
+			     dev, priv, priv->magic);
+		return -EINVAL;
+	}
+
+	icnss_pr_vdbg("PM resume_noirq, state: 0x%lx\n", priv->state);
+
+	if (!priv->ops || !priv->ops->resume_noirq ||
+	    !test_bit(ICNSS_DRIVER_PROBED, &priv->state))
+		goto out;
+
+	ret = priv->ops->resume_noirq(dev);
+
+out:
+	if (ret == 0) {
+		priv->stats.pm_resume_noirq++;
+		clear_bit(ICNSS_PM_SUSPEND_NOIRQ, &priv->state);
+	} else {
+		priv->stats.pm_resume_noirq_err++;
+	}
+	return ret;
+}
+#endif
+
+static const struct dev_pm_ops icnss_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(icnss_pm_suspend,
+				icnss_pm_resume)
+	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(icnss_pm_suspend_noirq,
+				      icnss_pm_resume_noirq)
+};
+
+static const struct of_device_id icnss_dt_match[] = {
+	{.compatible = "qcom,icnss"},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, icnss_dt_match);
+
+static struct platform_driver icnss_driver = {
+	.probe  = icnss_probe,
+	.remove = icnss_remove,
+	.driver = {
+		.name = "icnss",
+		.pm = &icnss_pm_ops,
+		.owner = THIS_MODULE,
+		.of_match_table = icnss_dt_match,
+	},
+};
+
+static int __init icnss_initialize(void)
+{
+	icnss_ipc_log_context = ipc_log_context_create(NUM_LOG_PAGES,
+						       "icnss", 0);
+	if (!icnss_ipc_log_context)
+		icnss_pr_err("Unable to create log context\n");
+
+	icnss_ipc_log_long_context = ipc_log_context_create(NUM_LOG_LONG_PAGES,
+						       "icnss_long", 0);
+	if (!icnss_ipc_log_long_context)
+		icnss_pr_err("Unable to create log long context\n");
+
+	return platform_driver_register(&icnss_driver);
+}
+
+static void __exit icnss_exit(void)
+{
+	platform_driver_unregister(&icnss_driver);
+	ipc_log_context_destroy(icnss_ipc_log_context);
+	icnss_ipc_log_context = NULL;
+	ipc_log_context_destroy(icnss_ipc_log_long_context);
+	icnss_ipc_log_long_context = NULL;
+}
+
+
+module_init(icnss_initialize);
+module_exit(icnss_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(DEVICE "iCNSS CORE platform driver");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/icnss_utils.c	2019-01-22 16:16:26.655274950 +0100
@@ -0,0 +1,154 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <soc/qcom/icnss.h>
+
+#define ICNSS_MAX_CH_NUM 45
+
+static DEFINE_MUTEX(unsafe_channel_list_lock);
+static DEFINE_SPINLOCK(dfs_nol_info_lock);
+static int driver_load_cnt;
+static enum cnss_cc_src icnss_cc_source = CNSS_SOURCE_CORE;
+
+static struct icnss_unsafe_channel_list {
+	u16 unsafe_ch_count;
+	u16 unsafe_ch_list[ICNSS_MAX_CH_NUM];
+} unsafe_channel_list;
+
+static struct icnss_dfs_nol_info {
+	void *dfs_nol_info;
+	u16 dfs_nol_info_len;
+} dfs_nol_info;
+
+int icnss_set_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 ch_count)
+{
+	mutex_lock(&unsafe_channel_list_lock);
+	if ((!unsafe_ch_list) || (ch_count > ICNSS_MAX_CH_NUM)) {
+		mutex_unlock(&unsafe_channel_list_lock);
+		return -EINVAL;
+	}
+
+	unsafe_channel_list.unsafe_ch_count = ch_count;
+
+	if (ch_count != 0) {
+		memcpy(
+		       (char *)unsafe_channel_list.unsafe_ch_list,
+		       (char *)unsafe_ch_list, ch_count * sizeof(u16));
+	}
+	mutex_unlock(&unsafe_channel_list_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(icnss_set_wlan_unsafe_channel);
+
+int icnss_get_wlan_unsafe_channel(u16 *unsafe_ch_list,
+				  u16 *ch_count, u16 buf_len)
+{
+	mutex_lock(&unsafe_channel_list_lock);
+	if (!unsafe_ch_list || !ch_count) {
+		mutex_unlock(&unsafe_channel_list_lock);
+		return -EINVAL;
+	}
+
+	if (buf_len < (unsafe_channel_list.unsafe_ch_count * sizeof(u16))) {
+		mutex_unlock(&unsafe_channel_list_lock);
+		return -ENOMEM;
+	}
+
+	*ch_count = unsafe_channel_list.unsafe_ch_count;
+	memcpy(
+		(char *)unsafe_ch_list,
+		(char *)unsafe_channel_list.unsafe_ch_list,
+		unsafe_channel_list.unsafe_ch_count * sizeof(u16));
+	mutex_unlock(&unsafe_channel_list_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(icnss_get_wlan_unsafe_channel);
+
+int icnss_wlan_set_dfs_nol(const void *info, u16 info_len)
+{
+	void *temp;
+	void *old_nol_info;
+	struct icnss_dfs_nol_info *dfs_info;
+
+	if (!info || !info_len)
+		return -EINVAL;
+
+	temp = kmalloc(info_len, GFP_ATOMIC);
+	if (!temp)
+		return -ENOMEM;
+
+	memcpy(temp, info, info_len);
+	spin_lock_bh(&dfs_nol_info_lock);
+	dfs_info = &dfs_nol_info;
+	old_nol_info = dfs_info->dfs_nol_info;
+	dfs_info->dfs_nol_info = temp;
+	dfs_info->dfs_nol_info_len = info_len;
+	spin_unlock_bh(&dfs_nol_info_lock);
+	kfree(old_nol_info);
+
+	return 0;
+}
+EXPORT_SYMBOL(icnss_wlan_set_dfs_nol);
+
+int icnss_wlan_get_dfs_nol(void *info, u16 info_len)
+{
+	int len;
+	struct icnss_dfs_nol_info *dfs_info;
+
+	if (!info || !info_len)
+		return -EINVAL;
+
+	spin_lock_bh(&dfs_nol_info_lock);
+
+	dfs_info = &dfs_nol_info;
+	if (dfs_info->dfs_nol_info == NULL ||
+	    dfs_info->dfs_nol_info_len == 0) {
+		spin_unlock_bh(&dfs_nol_info_lock);
+		return -ENOENT;
+	}
+
+	len = min(info_len, dfs_info->dfs_nol_info_len);
+	memcpy(info, dfs_info->dfs_nol_info, len);
+	spin_unlock_bh(&dfs_nol_info_lock);
+
+	return len;
+}
+EXPORT_SYMBOL(icnss_wlan_get_dfs_nol);
+
+void icnss_increment_driver_load_cnt(void)
+{
+	++driver_load_cnt;
+}
+EXPORT_SYMBOL(icnss_increment_driver_load_cnt);
+
+int icnss_get_driver_load_cnt(void)
+{
+	return driver_load_cnt;
+}
+EXPORT_SYMBOL(icnss_get_driver_load_cnt);
+
+
+void icnss_set_cc_source(enum cnss_cc_src cc_source)
+{
+	icnss_cc_source = cc_source;
+}
+EXPORT_SYMBOL(icnss_set_cc_source);
+
+enum cnss_cc_src icnss_get_cc_source(void)
+{
+	return icnss_cc_source;
+}
+EXPORT_SYMBOL(icnss_get_cc_source);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/ipc_router_glink_xprt.c	2019-01-22 16:16:26.655274950 +0100
@@ -0,0 +1,934 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * IPC ROUTER GLINK XPRT module.
+ */
+#define DEBUG
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/of.h>
+#include <linux/ipc_router_xprt.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include <soc/qcom/glink.h>
+#include <soc/qcom/subsystem_restart.h>
+
+static int ipc_router_glink_xprt_debug_mask;
+module_param_named(debug_mask, ipc_router_glink_xprt_debug_mask,
+		   int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define IPCRTR_INTENT_REQ_TIMEOUT_MS 5000
+#if defined(DEBUG)
+#define D(x...) do { \
+if (ipc_router_glink_xprt_debug_mask) \
+	pr_info(x); \
+} while (0)
+#else
+#define D(x...) do { } while (0)
+#endif
+
+#define MIN_FRAG_SZ (IPC_ROUTER_HDR_SIZE + sizeof(union rr_control_msg))
+#define IPC_RTR_XPRT_NAME_LEN (2 * GLINK_NAME_SIZE)
+#define PIL_SUBSYSTEM_NAME_LEN 32
+#define IPC_RTR_WS_NAME_LEN ((2 * GLINK_NAME_SIZE) + 4)
+
+#define MAX_NUM_LO_INTENTS 5
+#define MAX_NUM_MD_INTENTS 3
+#define MAX_NUM_HI_INTENTS 2
+#define LO_RX_INTENT_SIZE 2048
+#define MD_RX_INTENT_SIZE 8192
+#define HI_RX_INTENT_SIZE (17 * 1024)
+
+/**
+ * ipc_router_glink_xprt - IPC Router's GLINK XPRT structure
+ * @list: IPC router's GLINK XPRT list.
+ * @ch_name: GLink Channel Name.
+ * @edge: Edge between the local node and the remote node.
+ * @transport: Physical Transport Name as identified by Glink.
+ * @pil_edge: Edge name understood by PIL.
+ * @ipc_rtr_xprt_name: XPRT Name to be registered with IPC Router.
+ * @notify_rx_ws_name: Name of wakesource used in notify rx path.
+ * @xprt: IPC Router XPRT structure to contain XPRT specific info.
+ * @ch_hndl: Opaque Channel handle returned by GLink.
+ * @xprt_wq: Workqueue to queue read & other XPRT related works.
+ * @ss_reset_rwlock: Read-Write lock to protect access to the ss_reset flag.
+ * @ss_reset: flag used to check SSR state.
+ * @pil: pil handle to the remote subsystem
+ * @sft_close_complete: Variable to indicate completion of SSR handling
+ *                      by IPC Router.
+ * @xprt_version: IPC Router header version supported by this XPRT.
+ * @xprt_option: XPRT specific options to be handled by IPC Router.
+ * @disable_pil_loading: Disable PIL Loading of the subsystem.
+ * @dynamic_wakeup_source: Dynamic wakeup source for this subsystem.
+ */
+struct ipc_router_glink_xprt {
+	struct list_head list;
+	char ch_name[GLINK_NAME_SIZE];
+	char edge[GLINK_NAME_SIZE];
+	char transport[GLINK_NAME_SIZE];
+	char pil_edge[PIL_SUBSYSTEM_NAME_LEN];
+	char ipc_rtr_xprt_name[IPC_RTR_XPRT_NAME_LEN];
+	char notify_rx_ws_name[IPC_RTR_WS_NAME_LEN];
+	struct msm_ipc_router_xprt xprt;
+	void *ch_hndl;
+	struct workqueue_struct *xprt_wq;
+	struct wakeup_source notify_rxv_ws;
+	struct rw_semaphore ss_reset_rwlock;
+	int ss_reset;
+	void *pil;
+	struct completion sft_close_complete;
+	unsigned xprt_version;
+	unsigned xprt_option;
+	bool disable_pil_loading;
+	uint32_t cur_lo_intents_cnt;
+	uint32_t cur_md_intents_cnt;
+	uint32_t cur_hi_intents_cnt;
+	bool dynamic_wakeup_source;
+};
+
+struct ipc_router_glink_xprt_work {
+	struct ipc_router_glink_xprt *glink_xprtp;
+	struct work_struct work;
+};
+
+struct queue_rx_intent_work {
+	struct ipc_router_glink_xprt *glink_xprtp;
+	size_t intent_size;
+	struct work_struct work;
+};
+
+struct read_work {
+	struct ipc_router_glink_xprt *glink_xprtp;
+	void *iovec;
+	size_t iovec_size;
+	void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size);
+	void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size);
+	struct work_struct work;
+};
+
+static void glink_xprt_read_data(struct work_struct *work);
+static void glink_xprt_open_event(struct work_struct *work);
+static void glink_xprt_close_event(struct work_struct *work);
+
+/**
+ * ipc_router_glink_xprt_config - Config. Info. of each GLINK XPRT
+ * @ch_name:		Name of the GLINK endpoint exported by GLINK driver.
+ * @edge:		Edge between the local node and remote node.
+ * @transport:		Physical Transport Name as identified by GLINK.
+ * @pil_edge:		Edge name understood by PIL.
+ * @ipc_rtr_xprt_name:	XPRT Name to be registered with IPC Router.
+ * @link_id:		Network Cluster ID to which this XPRT belongs to.
+ * @xprt_version:	IPC Router header version supported by this XPRT.
+ * @disable_pil_loading:Disable PIL Loading of the subsystem.
+ * @dynamic_wakeup_source: Dynamic wakeup source for this subsystem.
+ */
+struct ipc_router_glink_xprt_config {
+	char ch_name[GLINK_NAME_SIZE];
+	char edge[GLINK_NAME_SIZE];
+	char transport[GLINK_NAME_SIZE];
+	char ipc_rtr_xprt_name[IPC_RTR_XPRT_NAME_LEN];
+	char pil_edge[PIL_SUBSYSTEM_NAME_LEN];
+	uint32_t link_id;
+	unsigned xprt_version;
+	unsigned xprt_option;
+	bool disable_pil_loading;
+	bool dynamic_wakeup_source;
+};
+
+#define MODULE_NAME "ipc_router_glink_xprt"
+static DEFINE_MUTEX(glink_xprt_list_lock_lha1);
+static LIST_HEAD(glink_xprt_list);
+
+static struct workqueue_struct *glink_xprt_wq;
+
+static void glink_xprt_link_state_cb(struct glink_link_state_cb_info *cb_info,
+				     void *priv);
+static struct glink_link_info glink_xprt_link_info = {
+			NULL, NULL, glink_xprt_link_state_cb};
+static void *glink_xprt_link_state_notif_handle;
+
+struct xprt_state_work_info {
+	char edge[GLINK_NAME_SIZE];
+	char transport[GLINK_NAME_SIZE];
+	uint32_t link_state;
+	struct work_struct work;
+};
+
+#define OVERFLOW_ADD_UNSIGNED(type, a, b) \
+	(((type)~0 - (a)) < (b) ? true : false)
+
+static void *glink_xprt_vbuf_provider(void *iovec, size_t offset,
+				      size_t *buf_size)
+{
+	struct rr_packet *pkt = (struct rr_packet *)iovec;
+	struct sk_buff *skb;
+	size_t temp_size = 0;
+
+	if (unlikely(!pkt || !buf_size))
+		return NULL;
+
+	*buf_size = 0;
+	skb_queue_walk(pkt->pkt_fragment_q, skb) {
+		if (unlikely(OVERFLOW_ADD_UNSIGNED(size_t, temp_size,
+						   skb->len)))
+			break;
+
+		temp_size += skb->len;
+		if (offset >= temp_size)
+			continue;
+
+		*buf_size = temp_size - offset;
+		return (void *)skb->data + skb->len - *buf_size;
+	}
+	return NULL;
+}
+
+/**
+ * ipc_router_glink_xprt_set_version() - Set the IPC Router version in transport
+ * @xprt:	Reference to the transport structure.
+ * @version:	The version to be set in transport.
+ */
+static void ipc_router_glink_xprt_set_version(
+	struct msm_ipc_router_xprt *xprt, unsigned version)
+{
+	struct ipc_router_glink_xprt *glink_xprtp;
+
+	if (!xprt)
+		return;
+	glink_xprtp = container_of(xprt, struct ipc_router_glink_xprt, xprt);
+	glink_xprtp->xprt_version = version;
+}
+
+static int ipc_router_glink_xprt_get_version(
+	struct msm_ipc_router_xprt *xprt)
+{
+	struct ipc_router_glink_xprt *glink_xprtp;
+	if (!xprt)
+		return -EINVAL;
+	glink_xprtp = container_of(xprt, struct ipc_router_glink_xprt, xprt);
+
+	return (int)glink_xprtp->xprt_version;
+}
+
+static int ipc_router_glink_xprt_get_option(
+	struct msm_ipc_router_xprt *xprt)
+{
+	struct ipc_router_glink_xprt *glink_xprtp;
+	if (!xprt)
+		return -EINVAL;
+	glink_xprtp = container_of(xprt, struct ipc_router_glink_xprt, xprt);
+
+	return (int)glink_xprtp->xprt_option;
+}
+
+static int ipc_router_glink_xprt_write(void *data, uint32_t len,
+				       struct msm_ipc_router_xprt *xprt)
+{
+	struct rr_packet *pkt = (struct rr_packet *)data;
+	struct rr_packet *temp_pkt;
+	int ret;
+	struct ipc_router_glink_xprt *glink_xprtp =
+		container_of(xprt, struct ipc_router_glink_xprt, xprt);
+
+	if (!pkt)
+		return -EINVAL;
+
+	if (!len || pkt->length != len)
+		return -EINVAL;
+
+	temp_pkt = clone_pkt(pkt);
+	if (!temp_pkt) {
+		IPC_RTR_ERR("%s: Error cloning packet while tx\n", __func__);
+		return -ENOMEM;
+	}
+
+	down_read(&glink_xprtp->ss_reset_rwlock);
+	if (glink_xprtp->ss_reset) {
+		release_pkt(temp_pkt);
+		IPC_RTR_ERR("%s: %s chnl reset\n", __func__, xprt->name);
+		ret = -ENETRESET;
+		goto out_write_data;
+	}
+
+	D("%s: Ready to write %d bytes\n", __func__, len);
+	ret = glink_txv(glink_xprtp->ch_hndl, (void *)glink_xprtp,
+			(void *)temp_pkt, len, glink_xprt_vbuf_provider,
+			NULL, true);
+	if (ret < 0) {
+		release_pkt(temp_pkt);
+		IPC_RTR_ERR("%s: Error %d while tx\n", __func__, ret);
+		goto out_write_data;
+	}
+	ret = len;
+	D("%s:%s: TX Complete for %d bytes @ %p\n", __func__,
+	  glink_xprtp->ipc_rtr_xprt_name, len, temp_pkt);
+
+out_write_data:
+	up_read(&glink_xprtp->ss_reset_rwlock);
+	return ret;
+}
+
+static int ipc_router_glink_xprt_close(struct msm_ipc_router_xprt *xprt)
+{
+	struct ipc_router_glink_xprt *glink_xprtp =
+		container_of(xprt, struct ipc_router_glink_xprt, xprt);
+
+	down_write(&glink_xprtp->ss_reset_rwlock);
+	glink_xprtp->ss_reset = 1;
+	up_write(&glink_xprtp->ss_reset_rwlock);
+	return glink_close(glink_xprtp->ch_hndl);
+}
+
+static void glink_xprt_sft_close_done(struct msm_ipc_router_xprt *xprt)
+{
+	struct ipc_router_glink_xprt *glink_xprtp =
+		container_of(xprt, struct ipc_router_glink_xprt, xprt);
+
+	complete_all(&glink_xprtp->sft_close_complete);
+}
+
+static bool ipc_router_glink_xprt_get_ws_info(struct msm_ipc_router_xprt *xprt)
+{
+	struct ipc_router_glink_xprt *glink_xprtp =
+		container_of(xprt, struct ipc_router_glink_xprt, xprt);
+
+	return glink_xprtp->dynamic_wakeup_source;
+}
+
+static struct rr_packet *glink_xprt_copy_data(struct read_work *rx_work)
+{
+	void *buf, *pbuf, *dest_buf;
+	size_t buf_size;
+	struct rr_packet *pkt;
+	struct sk_buff *skb;
+
+	pkt = create_pkt(NULL);
+	if (!pkt) {
+		IPC_RTR_ERR("%s: Couldn't alloc rr_packet\n", __func__);
+		return NULL;
+	}
+
+	do {
+		buf_size = 0;
+		if (rx_work->vbuf_provider) {
+			buf = rx_work->vbuf_provider(rx_work->iovec,
+						pkt->length, &buf_size);
+		} else {
+			pbuf = rx_work->pbuf_provider(rx_work->iovec,
+						pkt->length, &buf_size);
+			buf = phys_to_virt((unsigned long)pbuf);
+		}
+		if (!buf_size || !buf)
+			break;
+
+		skb = alloc_skb(buf_size, GFP_KERNEL);
+		if (!skb) {
+			IPC_RTR_ERR("%s: Couldn't alloc skb of size %zu\n",
+				    __func__, buf_size);
+			release_pkt(pkt);
+			return NULL;
+		}
+		dest_buf = skb_put(skb, buf_size);
+		memcpy(dest_buf, buf, buf_size);
+		skb_queue_tail(pkt->pkt_fragment_q, skb);
+		pkt->length += buf_size;
+	} while (buf && buf_size);
+	return pkt;
+}
+
+static void glink_xprt_read_data(struct work_struct *work)
+{
+	struct rr_packet *pkt;
+	struct read_work *rx_work =
+		container_of(work, struct read_work, work);
+	struct ipc_router_glink_xprt *glink_xprtp = rx_work->glink_xprtp;
+	bool reuse_intent = false;
+
+	down_read(&glink_xprtp->ss_reset_rwlock);
+	if (glink_xprtp->ss_reset) {
+		IPC_RTR_ERR("%s: %s channel reset\n",
+			__func__, glink_xprtp->xprt.name);
+		goto out_read_data;
+	}
+
+	D("%s %zu bytes @ %p\n", __func__, rx_work->iovec_size, rx_work->iovec);
+	if (rx_work->iovec_size <= HI_RX_INTENT_SIZE)
+		reuse_intent = true;
+
+	pkt = glink_xprt_copy_data(rx_work);
+	if (!pkt) {
+		IPC_RTR_ERR("%s: Error copying data\n", __func__);
+		goto out_read_data;
+	}
+
+	msm_ipc_router_xprt_notify(&glink_xprtp->xprt,
+				   IPC_ROUTER_XPRT_EVENT_DATA, pkt);
+	release_pkt(pkt);
+out_read_data:
+	glink_rx_done(glink_xprtp->ch_hndl, rx_work->iovec, reuse_intent);
+	kfree(rx_work);
+	up_read(&glink_xprtp->ss_reset_rwlock);
+	__pm_relax(&glink_xprtp->notify_rxv_ws);
+}
+
+static void glink_xprt_open_event(struct work_struct *work)
+{
+	struct ipc_router_glink_xprt_work *xprt_work =
+		container_of(work, struct ipc_router_glink_xprt_work, work);
+	struct ipc_router_glink_xprt *glink_xprtp = xprt_work->glink_xprtp;
+	int i;
+
+	msm_ipc_router_xprt_notify(&glink_xprtp->xprt,
+				IPC_ROUTER_XPRT_EVENT_OPEN, NULL);
+	D("%s: Notified IPC Router of %s OPEN\n",
+	  __func__, glink_xprtp->xprt.name);
+	glink_xprtp->cur_lo_intents_cnt = 0;
+	glink_xprtp->cur_md_intents_cnt = 0;
+	glink_xprtp->cur_hi_intents_cnt = 0;
+	for (i = 0; i < MAX_NUM_LO_INTENTS; i++) {
+		glink_queue_rx_intent(glink_xprtp->ch_hndl, (void *)glink_xprtp,
+				      LO_RX_INTENT_SIZE);
+		glink_xprtp->cur_lo_intents_cnt++;
+	}
+	kfree(xprt_work);
+}
+
+static void glink_xprt_close_event(struct work_struct *work)
+{
+	struct ipc_router_glink_xprt_work *xprt_work =
+		container_of(work, struct ipc_router_glink_xprt_work, work);
+	struct ipc_router_glink_xprt *glink_xprtp = xprt_work->glink_xprtp;
+
+	init_completion(&glink_xprtp->sft_close_complete);
+	msm_ipc_router_xprt_notify(&glink_xprtp->xprt,
+				IPC_ROUTER_XPRT_EVENT_CLOSE, NULL);
+	D("%s: Notified IPC Router of %s CLOSE\n",
+	   __func__, glink_xprtp->xprt.name);
+	wait_for_completion(&glink_xprtp->sft_close_complete);
+	kfree(xprt_work);
+}
+
+static void glink_xprt_qrx_intent_worker(struct work_struct *work)
+{
+	size_t sz;
+	struct queue_rx_intent_work *qrx_intent_work =
+		container_of(work, struct queue_rx_intent_work, work);
+	struct ipc_router_glink_xprt *glink_xprtp =
+					qrx_intent_work->glink_xprtp;
+	uint32_t *cnt = NULL;
+	int ret;
+
+	sz = qrx_intent_work->intent_size;
+	if (sz <= MD_RX_INTENT_SIZE) {
+		if (glink_xprtp->cur_md_intents_cnt >= MAX_NUM_MD_INTENTS)
+			goto qrx_intent_worker_out;
+		sz = MD_RX_INTENT_SIZE;
+		cnt = &glink_xprtp->cur_md_intents_cnt;
+	} else if (sz <= HI_RX_INTENT_SIZE) {
+		if (glink_xprtp->cur_hi_intents_cnt >= MAX_NUM_HI_INTENTS)
+			goto qrx_intent_worker_out;
+		sz = HI_RX_INTENT_SIZE;
+		cnt = &glink_xprtp->cur_hi_intents_cnt;
+	}
+
+	ret = glink_queue_rx_intent(glink_xprtp->ch_hndl, (void *)glink_xprtp,
+					sz);
+	if (!ret && cnt)
+		(*cnt)++;
+qrx_intent_worker_out:
+	kfree(qrx_intent_work);
+}
+
+static void msm_ipc_unload_subsystem(struct ipc_router_glink_xprt *glink_xprtp)
+{
+	if (glink_xprtp->pil) {
+		subsystem_put(glink_xprtp->pil);
+		glink_xprtp->pil = NULL;
+	}
+}
+
+static void *msm_ipc_load_subsystem(struct ipc_router_glink_xprt *glink_xprtp)
+{
+	void *pil = NULL;
+
+	if (!glink_xprtp->disable_pil_loading) {
+		pil = subsystem_get(glink_xprtp->pil_edge);
+		if (IS_ERR(pil)) {
+			pr_err("%s: Failed to load %s err = [0x%ld]\n",
+				__func__, glink_xprtp->pil_edge, PTR_ERR(pil));
+			pil = NULL;
+		}
+	}
+	return pil;
+}
+
+static void glink_xprt_notify_rxv(void *handle, const void *priv,
+	const void *pkt_priv, void *ptr, size_t size,
+	void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size),
+	void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size))
+{
+	struct ipc_router_glink_xprt *glink_xprtp =
+		(struct ipc_router_glink_xprt *)priv;
+	struct read_work *rx_work;
+
+	rx_work = kmalloc(sizeof(*rx_work), GFP_ATOMIC);
+	if (!rx_work) {
+		IPC_RTR_ERR("%s: couldn't allocate read_work\n", __func__);
+		glink_rx_done(glink_xprtp->ch_hndl, ptr, true);
+		return;
+	}
+
+	rx_work->glink_xprtp = glink_xprtp;
+	rx_work->iovec = ptr;
+	rx_work->iovec_size = size;
+	rx_work->vbuf_provider = vbuf_provider;
+	rx_work->pbuf_provider = pbuf_provider;
+	if (!glink_xprtp->dynamic_wakeup_source)
+		__pm_stay_awake(&glink_xprtp->notify_rxv_ws);
+	INIT_WORK(&rx_work->work, glink_xprt_read_data);
+	queue_work(glink_xprtp->xprt_wq, &rx_work->work);
+}
+
+static void glink_xprt_notify_tx_done(void *handle, const void *priv,
+				      const void *pkt_priv, const void *ptr)
+{
+	struct ipc_router_glink_xprt *glink_xprtp =
+		(struct ipc_router_glink_xprt *)priv;
+	struct rr_packet *temp_pkt = (struct rr_packet *)ptr;
+
+	D("%s:%s: @ %p\n", __func__, glink_xprtp->ipc_rtr_xprt_name, ptr);
+	release_pkt(temp_pkt);
+}
+
+static bool glink_xprt_notify_rx_intent_req(void *handle, const void *priv,
+					    size_t sz)
+{
+	struct queue_rx_intent_work *qrx_intent_work;
+	struct ipc_router_glink_xprt *glink_xprtp =
+		(struct ipc_router_glink_xprt *)priv;
+
+	if (sz <= LO_RX_INTENT_SIZE)
+		return true;
+
+	qrx_intent_work = kmalloc(sizeof(struct queue_rx_intent_work),
+				  GFP_ATOMIC);
+	if (!qrx_intent_work) {
+		IPC_RTR_ERR("%s: Couldn't queue rx_intent of %zu bytes\n",
+			    __func__, sz);
+		return false;
+	}
+	qrx_intent_work->glink_xprtp = glink_xprtp;
+	qrx_intent_work->intent_size = sz;
+	INIT_WORK(&qrx_intent_work->work, glink_xprt_qrx_intent_worker);
+	queue_work(glink_xprtp->xprt_wq, &qrx_intent_work->work);
+	return true;
+}
+
+static void glink_xprt_notify_state(void *handle, const void *priv,
+				    unsigned event)
+{
+	struct ipc_router_glink_xprt_work *xprt_work;
+	struct ipc_router_glink_xprt *glink_xprtp =
+		(struct ipc_router_glink_xprt *)priv;
+
+	D("%s: %s:%s - State %d\n",
+	  __func__, glink_xprtp->edge, glink_xprtp->transport, event);
+	switch (event) {
+	case GLINK_CONNECTED:
+		if (IS_ERR_OR_NULL(glink_xprtp->ch_hndl))
+			glink_xprtp->ch_hndl = handle;
+		down_write(&glink_xprtp->ss_reset_rwlock);
+		glink_xprtp->ss_reset = 0;
+		up_write(&glink_xprtp->ss_reset_rwlock);
+		xprt_work = kmalloc(sizeof(struct ipc_router_glink_xprt_work),
+				    GFP_ATOMIC);
+		if (!xprt_work) {
+			IPC_RTR_ERR(
+			"%s: Couldn't notify %d event to IPC Router\n",
+				__func__, event);
+			return;
+		}
+		xprt_work->glink_xprtp = glink_xprtp;
+		INIT_WORK(&xprt_work->work, glink_xprt_open_event);
+		queue_work(glink_xprtp->xprt_wq, &xprt_work->work);
+		break;
+
+	case GLINK_LOCAL_DISCONNECTED:
+	case GLINK_REMOTE_DISCONNECTED:
+		down_write(&glink_xprtp->ss_reset_rwlock);
+		if (glink_xprtp->ss_reset) {
+			up_write(&glink_xprtp->ss_reset_rwlock);
+			break;
+		}
+		glink_xprtp->ss_reset = 1;
+		up_write(&glink_xprtp->ss_reset_rwlock);
+		xprt_work = kmalloc(sizeof(struct ipc_router_glink_xprt_work),
+				    GFP_ATOMIC);
+		if (!xprt_work) {
+			IPC_RTR_ERR(
+			"%s: Couldn't notify %d event to IPC Router\n",
+				__func__, event);
+			return;
+		}
+		xprt_work->glink_xprtp = glink_xprtp;
+		INIT_WORK(&xprt_work->work, glink_xprt_close_event);
+		queue_work(glink_xprtp->xprt_wq, &xprt_work->work);
+		break;
+	}
+}
+
+static void glink_xprt_ch_open(struct ipc_router_glink_xprt *glink_xprtp)
+{
+	struct glink_open_config open_cfg = {0};
+
+	if (!IS_ERR_OR_NULL(glink_xprtp->ch_hndl))
+		return;
+
+	open_cfg.transport = glink_xprtp->transport;
+	open_cfg.options |= GLINK_OPT_INITIAL_XPORT;
+	open_cfg.edge = glink_xprtp->edge;
+	open_cfg.name = glink_xprtp->ch_name;
+	open_cfg.notify_rx = NULL;
+	open_cfg.notify_rxv = glink_xprt_notify_rxv;
+	open_cfg.notify_tx_done = glink_xprt_notify_tx_done;
+	open_cfg.notify_state = glink_xprt_notify_state;
+	open_cfg.notify_rx_intent_req = glink_xprt_notify_rx_intent_req;
+	open_cfg.priv = glink_xprtp;
+	open_cfg.rx_intent_req_timeout_ms = IPCRTR_INTENT_REQ_TIMEOUT_MS;
+
+	glink_xprtp->pil = msm_ipc_load_subsystem(glink_xprtp);
+	glink_xprtp->ch_hndl =  glink_open(&open_cfg);
+	if (IS_ERR_OR_NULL(glink_xprtp->ch_hndl)) {
+		IPC_RTR_ERR("%s:%s:%s %s: unable to open channel\n",
+			    open_cfg.transport, open_cfg.edge,
+			    open_cfg.name, __func__);
+			msm_ipc_unload_subsystem(glink_xprtp);
+	}
+}
+
+/**
+ * glink_xprt_link_state_worker() - Function to handle link state updates
+ * @work: Pointer to the work item in the link_state_work_info.
+ *
+ * This worker function is scheduled when there is a link state update. Since
+ * the loopback server registers for all transports, it receives all link state
+ * updates about all transports that get registered in the system.
+ */
+static void glink_xprt_link_state_worker(struct work_struct *work)
+{
+	struct xprt_state_work_info *xs_info =
+		container_of(work, struct xprt_state_work_info, work);
+	struct ipc_router_glink_xprt *glink_xprtp;
+
+	if (xs_info->link_state == GLINK_LINK_STATE_UP) {
+		D("%s: LINK_STATE_UP %s:%s\n",
+		  __func__, xs_info->edge, xs_info->transport);
+		mutex_lock(&glink_xprt_list_lock_lha1);
+		list_for_each_entry(glink_xprtp, &glink_xprt_list, list) {
+			if (strcmp(glink_xprtp->edge, xs_info->edge) ||
+			    strcmp(glink_xprtp->transport, xs_info->transport))
+				continue;
+			glink_xprt_ch_open(glink_xprtp);
+		}
+		mutex_unlock(&glink_xprt_list_lock_lha1);
+	} else if (xs_info->link_state == GLINK_LINK_STATE_DOWN) {
+		D("%s: LINK_STATE_DOWN %s:%s\n",
+		  __func__, xs_info->edge, xs_info->transport);
+		mutex_lock(&glink_xprt_list_lock_lha1);
+		list_for_each_entry(glink_xprtp, &glink_xprt_list, list) {
+			if (strcmp(glink_xprtp->edge, xs_info->edge) ||
+			    strcmp(glink_xprtp->transport, xs_info->transport)
+			    || IS_ERR_OR_NULL(glink_xprtp->ch_hndl))
+				continue;
+			glink_close(glink_xprtp->ch_hndl);
+			glink_xprtp->ch_hndl = NULL;
+			msm_ipc_unload_subsystem(glink_xprtp);
+		}
+		mutex_unlock(&glink_xprt_list_lock_lha1);
+
+	}
+	kfree(xs_info);
+	return;
+}
+
+/**
+ * glink_xprt_link_state_cb() - Callback to receive link state updates
+ * @cb_info: Information containing link & its state.
+ * @priv: Private data passed during the link state registration.
+ *
+ * This function is called by the GLINK core to notify the IPC Router
+ * regarding the link state updates. This function is registered with the
+ * GLINK core by IPC Router during glink_register_link_state_cb().
+ */
+static void glink_xprt_link_state_cb(struct glink_link_state_cb_info *cb_info,
+				      void *priv)
+{
+	struct xprt_state_work_info *xs_info;
+
+	if (!cb_info)
+		return;
+
+	D("%s: %s:%s\n", __func__, cb_info->edge, cb_info->transport);
+	xs_info = kmalloc(sizeof(*xs_info), GFP_KERNEL);
+	if (!xs_info) {
+		IPC_RTR_ERR("%s: Error allocating xprt state info\n", __func__);
+		return;
+	}
+
+	strlcpy(xs_info->edge, cb_info->edge, GLINK_NAME_SIZE);
+	strlcpy(xs_info->transport, cb_info->transport, GLINK_NAME_SIZE);
+	xs_info->link_state = cb_info->link_state;
+	INIT_WORK(&xs_info->work, glink_xprt_link_state_worker);
+	queue_work(glink_xprt_wq, &xs_info->work);
+}
+
+/**
+ * ipc_router_glink_config_init() - init GLINK xprt configs
+ *
+ * @glink_xprt_config: pointer to GLINK Channel configurations.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called to initialize the GLINK XPRT pointer with
+ * the GLINK XPRT configurations either from device tree or static arrays.
+ */
+static int ipc_router_glink_config_init(
+		struct ipc_router_glink_xprt_config *glink_xprt_config)
+{
+	struct ipc_router_glink_xprt *glink_xprtp;
+	char xprt_wq_name[GLINK_NAME_SIZE];
+
+	glink_xprtp = kzalloc(sizeof(struct ipc_router_glink_xprt), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(glink_xprtp)) {
+		IPC_RTR_ERR("%s:%s:%s:%s glink_xprtp alloc failed\n",
+			    __func__, glink_xprt_config->ch_name,
+			    glink_xprt_config->edge,
+			    glink_xprt_config->transport);
+		return -ENOMEM;
+	}
+
+	glink_xprtp->xprt.link_id = glink_xprt_config->link_id;
+	glink_xprtp->xprt_version = glink_xprt_config->xprt_version;
+	glink_xprtp->xprt_option = glink_xprt_config->xprt_option;
+	glink_xprtp->disable_pil_loading =
+				glink_xprt_config->disable_pil_loading;
+	glink_xprtp->dynamic_wakeup_source =
+				glink_xprt_config->dynamic_wakeup_source;
+
+	if (!glink_xprtp->disable_pil_loading)
+		strlcpy(glink_xprtp->pil_edge, glink_xprt_config->pil_edge,
+				PIL_SUBSYSTEM_NAME_LEN);
+	strlcpy(glink_xprtp->ch_name, glink_xprt_config->ch_name,
+		GLINK_NAME_SIZE);
+	strlcpy(glink_xprtp->edge, glink_xprt_config->edge, GLINK_NAME_SIZE);
+	strlcpy(glink_xprtp->transport,
+		glink_xprt_config->transport, GLINK_NAME_SIZE);
+	strlcpy(glink_xprtp->ipc_rtr_xprt_name,
+		glink_xprt_config->ipc_rtr_xprt_name, IPC_RTR_XPRT_NAME_LEN);
+	glink_xprtp->xprt.name = glink_xprtp->ipc_rtr_xprt_name;
+
+	glink_xprtp->xprt.get_version =	ipc_router_glink_xprt_get_version;
+	glink_xprtp->xprt.set_version =	ipc_router_glink_xprt_set_version;
+	glink_xprtp->xprt.get_option = ipc_router_glink_xprt_get_option;
+	glink_xprtp->xprt.read_avail = NULL;
+	glink_xprtp->xprt.read = NULL;
+	glink_xprtp->xprt.write_avail = NULL;
+	glink_xprtp->xprt.write = ipc_router_glink_xprt_write;
+	glink_xprtp->xprt.close = ipc_router_glink_xprt_close;
+	glink_xprtp->xprt.sft_close_done = glink_xprt_sft_close_done;
+	glink_xprtp->xprt.get_ws_info = ipc_router_glink_xprt_get_ws_info;
+	glink_xprtp->xprt.priv = NULL;
+
+	init_rwsem(&glink_xprtp->ss_reset_rwlock);
+	glink_xprtp->ss_reset = 0;
+
+	scnprintf(xprt_wq_name, GLINK_NAME_SIZE, "%s_%s_%s",
+			glink_xprtp->ch_name, glink_xprtp->edge,
+			glink_xprtp->transport);
+	glink_xprtp->xprt_wq = create_singlethread_workqueue(xprt_wq_name);
+	if (IS_ERR_OR_NULL(glink_xprtp->xprt_wq)) {
+		IPC_RTR_ERR("%s:%s:%s:%s wq alloc failed\n",
+			    __func__, glink_xprt_config->ch_name,
+			    glink_xprt_config->edge,
+			    glink_xprt_config->transport);
+		kfree(glink_xprtp);
+		return -EFAULT;
+	}
+	scnprintf(glink_xprtp->notify_rx_ws_name, IPC_RTR_WS_NAME_LEN,
+			"%s_%s_rx", glink_xprtp->ch_name, glink_xprtp->edge);
+	wakeup_source_init(&glink_xprtp->notify_rxv_ws,
+				glink_xprtp->notify_rx_ws_name);
+	mutex_lock(&glink_xprt_list_lock_lha1);
+	list_add(&glink_xprtp->list, &glink_xprt_list);
+	mutex_unlock(&glink_xprt_list_lock_lha1);
+
+	glink_xprt_link_info.edge = glink_xprt_config->edge;
+	glink_xprt_link_state_notif_handle = glink_register_link_state_cb(
+						&glink_xprt_link_info, NULL);
+	return 0;
+}
+
+/**
+ * parse_devicetree() - parse device tree binding
+ *
+ * @node: pointer to device tree node
+ * @glink_xprt_config: pointer to GLINK XPRT configurations
+ *
+ * @return: 0 on success, -ENODEV on failure.
+ */
+static int parse_devicetree(struct device_node *node,
+		struct ipc_router_glink_xprt_config *glink_xprt_config)
+{
+	int ret;
+	int link_id;
+	int version;
+	char *key;
+	const char *ch_name;
+	const char *edge;
+	const char *transport;
+	const char *pil_edge;
+
+	key = "qcom,ch-name";
+	ch_name = of_get_property(node, key, NULL);
+	if (!ch_name)
+		goto error;
+	strlcpy(glink_xprt_config->ch_name, ch_name, GLINK_NAME_SIZE);
+
+	key = "qcom,xprt-remote";
+	edge = of_get_property(node, key, NULL);
+	if (!edge)
+		goto error;
+	strlcpy(glink_xprt_config->edge, edge, GLINK_NAME_SIZE);
+
+	key = "qcom,glink-xprt";
+	transport = of_get_property(node, key, NULL);
+	if (!transport)
+		goto error;
+	strlcpy(glink_xprt_config->transport, transport,
+		GLINK_NAME_SIZE);
+
+	key = "qcom,xprt-linkid";
+	ret = of_property_read_u32(node, key, &link_id);
+	if (ret)
+		goto error;
+	glink_xprt_config->link_id = link_id;
+
+	key = "qcom,xprt-version";
+	ret = of_property_read_u32(node, key, &version);
+	if (ret)
+		goto error;
+	glink_xprt_config->xprt_version = version;
+
+	key = "qcom,fragmented-data";
+	glink_xprt_config->xprt_option = of_property_read_bool(node, key);
+
+	key = "qcom,pil-label";
+	pil_edge = of_get_property(node, key, NULL);
+	if (pil_edge) {
+		strlcpy(glink_xprt_config->pil_edge,
+				pil_edge, PIL_SUBSYSTEM_NAME_LEN);
+		glink_xprt_config->disable_pil_loading = false;
+	} else {
+		glink_xprt_config->disable_pil_loading = true;
+	}
+	scnprintf(glink_xprt_config->ipc_rtr_xprt_name, IPC_RTR_XPRT_NAME_LEN,
+		  "%s_%s", edge, ch_name);
+
+	key = "qcom,dynamic-wakeup-source";
+	glink_xprt_config->dynamic_wakeup_source =
+					of_property_read_bool(node, key);
+
+	return 0;
+
+error:
+	IPC_RTR_ERR("%s: missing key: %s\n", __func__, key);
+	return -ENODEV;
+}
+
+/**
+ * ipc_router_glink_xprt_probe() - Probe a GLINK xprt
+ *
+ * @pdev: Platform device corresponding to GLINK xprt.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying device tree driver registers
+ * a platform device, mapped to a GLINK transport.
+ */
+static int ipc_router_glink_xprt_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct ipc_router_glink_xprt_config glink_xprt_config;
+
+	if (pdev) {
+		if (pdev->dev.of_node) {
+			ret = parse_devicetree(pdev->dev.of_node,
+							&glink_xprt_config);
+			if (ret) {
+				IPC_RTR_ERR("%s: Failed to parse device tree\n",
+					    __func__);
+				return ret;
+			}
+
+			ret = ipc_router_glink_config_init(&glink_xprt_config);
+			if (ret) {
+				IPC_RTR_ERR("%s init failed\n", __func__);
+				return ret;
+			}
+		}
+	}
+	return 0;
+}
+
+static struct of_device_id ipc_router_glink_xprt_match_table[] = {
+	{ .compatible = "qcom,ipc_router_glink_xprt" },
+	{},
+};
+
+static struct platform_driver ipc_router_glink_xprt_driver = {
+	.probe = ipc_router_glink_xprt_probe,
+	.driver = {
+		.name = MODULE_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = ipc_router_glink_xprt_match_table,
+	 },
+};
+
+static int __init ipc_router_glink_xprt_init(void)
+{
+	int rc;
+
+	glink_xprt_wq = create_singlethread_workqueue("glink_xprt_wq");
+	if (IS_ERR_OR_NULL(glink_xprt_wq)) {
+		pr_err("%s: create_singlethread_workqueue failed\n", __func__);
+		return -EFAULT;
+	}
+
+	rc = platform_driver_register(&ipc_router_glink_xprt_driver);
+	if (rc) {
+		IPC_RTR_ERR(
+		"%s: ipc_router_glink_xprt_driver register failed %d\n",
+		__func__, rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+module_init(ipc_router_glink_xprt_init);
+MODULE_DESCRIPTION("IPC Router GLINK XPRT");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/ipc_router_smd_xprt.c	2019-10-29 09:26:24.813214628 +0100
@@ -0,0 +1,864 @@
+/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * IPC ROUTER SMD XPRT module.
+ */
+#define DEBUG
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/of.h>
+#include <linux/ipc_router_xprt.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include <soc/qcom/smd.h>
+#include <soc/qcom/smsm.h>
+#include <soc/qcom/subsystem_restart.h>
+
+static int msm_ipc_router_smd_xprt_debug_mask;
+module_param_named(debug_mask, msm_ipc_router_smd_xprt_debug_mask,
+		   int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#if defined(DEBUG)
+#define D(x...) do { \
+if (msm_ipc_router_smd_xprt_debug_mask) \
+	pr_info(x); \
+} while (0)
+#else
+#define D(x...) do { } while (0)
+#endif
+
+#define MIN_FRAG_SZ (IPC_ROUTER_HDR_SIZE + sizeof(union rr_control_msg))
+
+#define NUM_SMD_XPRTS 4
+#define XPRT_NAME_LEN (SMD_MAX_CH_NAME_LEN + 12)
+
+/**
+ * msm_ipc_router_smd_xprt - IPC Router's SMD XPRT structure
+ * @list: IPC router's SMD XPRTs list.
+ * @ch_name: Name of the HSIC endpoint exported by ipc_bridge driver.
+ * @xprt_name: Name of the XPRT to be registered with IPC Router.
+ * @edge: SMD channel edge.
+ * @driver: Platform drivers register by this XPRT.
+ * @xprt: IPC Router XPRT structure to contain XPRT specific info.
+ * @channel: SMD channel specific info.
+ * @smd_xprt_wq: Workqueue to queue read & other XPRT related works.
+ * @write_avail_wait_q: wait queue for writer thread.
+ * @in_pkt: Pointer to any partially read packet.
+ * @is_partial_in_pkt: check pkt completion.
+ * @read_work: Read Work to perform read operation from SMD.
+ * @ss_reset_lock: Lock to protect access to the ss_reset flag.
+ * @ss_reset: flag used to check SSR state.
+ * @pil: handle to the remote subsystem.
+ * @sft_close_complete: Variable to indicate completion of SSR handling
+ *                      by IPC Router.
+ * @xprt_version: IPC Router header version supported by this XPRT.
+ * @xprt_option: XPRT specific options to be handled by IPC Router.
+ * @disable_pil_loading: Disable PIL Loading of the subsystem.
+ */
+struct msm_ipc_router_smd_xprt {
+	struct list_head list;
+	char ch_name[SMD_MAX_CH_NAME_LEN];
+	char xprt_name[XPRT_NAME_LEN];
+	uint32_t edge;
+	struct platform_driver driver;
+	struct msm_ipc_router_xprt xprt;
+	smd_channel_t *channel;
+	struct workqueue_struct *smd_xprt_wq;
+	wait_queue_head_t write_avail_wait_q;
+	struct rr_packet *in_pkt;
+	int is_partial_in_pkt;
+	struct delayed_work read_work;
+	spinlock_t ss_reset_lock;	/*Subsystem reset lock*/
+	int ss_reset;
+	void *pil;
+	struct completion sft_close_complete;
+	unsigned xprt_version;
+	unsigned xprt_option;
+	bool disable_pil_loading;
+};
+
+struct msm_ipc_router_smd_xprt_work {
+	struct msm_ipc_router_xprt *xprt;
+	struct work_struct work;
+};
+
+static void smd_xprt_read_data(struct work_struct *work);
+static void smd_xprt_open_event(struct work_struct *work);
+static void smd_xprt_close_event(struct work_struct *work);
+
+/**
+ * msm_ipc_router_smd_xprt_config - Config. Info. of each SMD XPRT
+ * @ch_name: Name of the SMD endpoint exported by SMD driver.
+ * @xprt_name: Name of the XPRT to be registered with IPC Router.
+ * @edge: ID to differentiate among multiple SMD endpoints.
+ * @link_id: Network Cluster ID to which this XPRT belongs to.
+ * @xprt_version: IPC Router header version supported by this XPRT.
+ * @disable_pil_loading: Disable PIL Loading of the subsystem.
+ */
+struct msm_ipc_router_smd_xprt_config {
+	char ch_name[SMD_MAX_CH_NAME_LEN];
+	char xprt_name[XPRT_NAME_LEN];
+	uint32_t edge;
+	uint32_t link_id;
+	unsigned xprt_version;
+	unsigned xprt_option;
+	bool disable_pil_loading;
+};
+
+struct msm_ipc_router_smd_xprt_config smd_xprt_cfg[] = {
+	{"RPCRPY_CNTL", "ipc_rtr_smd_rpcrpy_cntl", SMD_APPS_MODEM, 1, 1},
+	{"IPCRTR", "ipc_rtr_smd_ipcrtr", SMD_APPS_MODEM, 1, 1},
+	{"IPCRTR", "ipc_rtr_q6_ipcrtr", SMD_APPS_QDSP, 1, 1},
+	{"IPCRTR", "ipc_rtr_wcnss_ipcrtr", SMD_APPS_WCNSS, 1, 1},
+};
+
+#define MODULE_NAME "ipc_router_smd_xprt"
+#define IPC_ROUTER_SMD_XPRT_WAIT_TIMEOUT 3000
+static int ipc_router_smd_xprt_probe_done;
+static struct delayed_work ipc_router_smd_xprt_probe_work;
+static DEFINE_MUTEX(smd_remote_xprt_list_lock_lha1);
+static LIST_HEAD(smd_remote_xprt_list);
+
+static bool is_pil_loading_disabled(uint32_t edge);
+
+/**
+ * ipc_router_smd_set_xprt_version() - Set IPC Router header version
+ *                                          in the transport
+ * @xprt: Reference to the transport structure.
+ * @version: The version to be set in transport.
+ */
+static void ipc_router_smd_set_xprt_version(
+	struct msm_ipc_router_xprt *xprt, unsigned version)
+{
+	struct msm_ipc_router_smd_xprt *smd_xprtp;
+
+	if (!xprt)
+		return;
+	smd_xprtp = container_of(xprt, struct msm_ipc_router_smd_xprt, xprt);
+	smd_xprtp->xprt_version = version;
+}
+
+static int msm_ipc_router_smd_get_xprt_version(
+	struct msm_ipc_router_xprt *xprt)
+{
+	struct msm_ipc_router_smd_xprt *smd_xprtp;
+	if (!xprt)
+		return -EINVAL;
+	smd_xprtp = container_of(xprt, struct msm_ipc_router_smd_xprt, xprt);
+
+	return (int)smd_xprtp->xprt_version;
+}
+
+static int msm_ipc_router_smd_get_xprt_option(
+	struct msm_ipc_router_xprt *xprt)
+{
+	struct msm_ipc_router_smd_xprt *smd_xprtp;
+	if (!xprt)
+		return -EINVAL;
+	smd_xprtp = container_of(xprt, struct msm_ipc_router_smd_xprt, xprt);
+
+	return (int)smd_xprtp->xprt_option;
+}
+
+static int msm_ipc_router_smd_remote_write_avail(
+	struct msm_ipc_router_xprt *xprt)
+{
+	struct msm_ipc_router_smd_xprt *smd_xprtp =
+		container_of(xprt, struct msm_ipc_router_smd_xprt, xprt);
+
+	return smd_write_avail(smd_xprtp->channel);
+}
+
+static int msm_ipc_router_smd_remote_write(void *data,
+					   uint32_t len,
+					   struct msm_ipc_router_xprt *xprt)
+{
+	struct rr_packet *pkt = (struct rr_packet *)data;
+	struct sk_buff *ipc_rtr_pkt;
+	int offset, sz_written = 0;
+	int ret, num_retries = 0;
+	unsigned long flags;
+	struct msm_ipc_router_smd_xprt *smd_xprtp =
+		container_of(xprt, struct msm_ipc_router_smd_xprt, xprt);
+
+	if (!pkt)
+		return -EINVAL;
+
+	if (!len || pkt->length != len)
+		return -EINVAL;
+
+	do {
+		spin_lock_irqsave(&smd_xprtp->ss_reset_lock, flags);
+		if (smd_xprtp->ss_reset) {
+			spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock,
+						flags);
+			IPC_RTR_ERR("%s: %s chnl reset\n",
+					__func__, xprt->name);
+			return -ENETRESET;
+		}
+		spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock, flags);
+		ret = smd_write_start(smd_xprtp->channel, len);
+		if (ret < 0 && num_retries >= 5) {
+			IPC_RTR_ERR("%s: Error %d @smd_write_start for %s\n",
+				__func__, ret, xprt->name);
+			return ret;
+		} else if (ret < 0) {
+			msleep(50);
+			num_retries++;
+		}
+	} while (ret < 0);
+
+	D("%s: Ready to write %d bytes\n", __func__, len);
+	skb_queue_walk(pkt->pkt_fragment_q, ipc_rtr_pkt) {
+		offset = 0;
+		while (offset < ipc_rtr_pkt->len) {
+			if (!smd_write_segment_avail(smd_xprtp->channel))
+				smd_enable_read_intr(smd_xprtp->channel);
+
+			wait_event(smd_xprtp->write_avail_wait_q,
+				(smd_write_segment_avail(smd_xprtp->channel) ||
+				smd_xprtp->ss_reset));
+			smd_disable_read_intr(smd_xprtp->channel);
+			spin_lock_irqsave(&smd_xprtp->ss_reset_lock, flags);
+			if (smd_xprtp->ss_reset) {
+				spin_unlock_irqrestore(
+					&smd_xprtp->ss_reset_lock, flags);
+				IPC_RTR_ERR("%s: %s chnl reset\n",
+					__func__, xprt->name);
+				return -ENETRESET;
+			}
+			spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock,
+						flags);
+
+			sz_written = smd_write_segment(smd_xprtp->channel,
+					ipc_rtr_pkt->data + offset,
+					(ipc_rtr_pkt->len - offset));
+			offset += sz_written;
+			sz_written = 0;
+		}
+		D("%s: Wrote %d bytes over %s\n",
+		  __func__, offset, xprt->name);
+	}
+
+	if (!smd_write_end(smd_xprtp->channel))
+		D("%s: Finished writing\n", __func__);
+	return len;
+}
+
+static int msm_ipc_router_smd_remote_close(struct msm_ipc_router_xprt *xprt)
+{
+	int rc;
+	struct msm_ipc_router_smd_xprt *smd_xprtp =
+		container_of(xprt, struct msm_ipc_router_smd_xprt, xprt);
+
+	rc = smd_close(smd_xprtp->channel);
+	if (smd_xprtp->pil) {
+		subsystem_put(smd_xprtp->pil);
+		smd_xprtp->pil = NULL;
+	}
+	return rc;
+}
+
+static void smd_xprt_sft_close_done(struct msm_ipc_router_xprt *xprt)
+{
+	struct msm_ipc_router_smd_xprt *smd_xprtp =
+		container_of(xprt, struct msm_ipc_router_smd_xprt, xprt);
+
+	complete_all(&smd_xprtp->sft_close_complete);
+}
+
+static void smd_xprt_read_data(struct work_struct *work)
+{
+	int pkt_size, sz_read, sz;
+	struct sk_buff *ipc_rtr_pkt;
+	void *data;
+	unsigned long flags;
+	struct delayed_work *rwork = to_delayed_work(work);
+	struct msm_ipc_router_smd_xprt *smd_xprtp =
+		container_of(rwork, struct msm_ipc_router_smd_xprt, read_work);
+
+	spin_lock_irqsave(&smd_xprtp->ss_reset_lock, flags);
+	if (smd_xprtp->ss_reset) {
+		spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock, flags);
+		if (smd_xprtp->in_pkt)
+			release_pkt(smd_xprtp->in_pkt);
+		smd_xprtp->is_partial_in_pkt = 0;
+		IPC_RTR_ERR("%s: %s channel reset\n",
+			__func__, smd_xprtp->xprt.name);
+		return;
+	}
+	spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock, flags);
+
+	D("%s pkt_size: %d, read_avail: %d\n", __func__,
+		smd_cur_packet_size(smd_xprtp->channel),
+		smd_read_avail(smd_xprtp->channel));
+	while ((pkt_size = smd_cur_packet_size(smd_xprtp->channel)) &&
+		smd_read_avail(smd_xprtp->channel)) {
+		if (!smd_xprtp->is_partial_in_pkt) {
+			smd_xprtp->in_pkt = create_pkt(NULL);
+			if (!smd_xprtp->in_pkt) {
+				IPC_RTR_ERR("%s: Couldn't alloc rr_packet\n",
+					__func__);
+				return;
+			}
+			smd_xprtp->is_partial_in_pkt = 1;
+			D("%s: Allocated rr_packet\n", __func__);
+		}
+
+		if (((pkt_size >= MIN_FRAG_SZ) &&
+		     (smd_read_avail(smd_xprtp->channel) < MIN_FRAG_SZ)) ||
+		    ((pkt_size < MIN_FRAG_SZ) &&
+		     (smd_read_avail(smd_xprtp->channel) < pkt_size)))
+			return;
+
+		sz = smd_read_avail(smd_xprtp->channel);
+		do {
+			ipc_rtr_pkt = alloc_skb(sz, GFP_KERNEL);
+			if (!ipc_rtr_pkt) {
+				if (sz <= (PAGE_SIZE/2)) {
+					queue_delayed_work(
+						smd_xprtp->smd_xprt_wq,
+						&smd_xprtp->read_work,
+						msecs_to_jiffies(100));
+					return;
+				}
+				sz = sz / 2;
+			}
+		} while (!ipc_rtr_pkt);
+
+		D("%s: Allocated the sk_buff of size %d\n", __func__, sz);
+		data = skb_put(ipc_rtr_pkt, sz);
+		sz_read = smd_read(smd_xprtp->channel, data, sz);
+		if (sz_read != sz) {
+			IPC_RTR_ERR("%s: Couldn't read %s completely\n",
+				__func__, smd_xprtp->xprt.name);
+			kfree_skb(ipc_rtr_pkt);
+			release_pkt(smd_xprtp->in_pkt);
+			smd_xprtp->is_partial_in_pkt = 0;
+			return;
+		}
+		skb_queue_tail(smd_xprtp->in_pkt->pkt_fragment_q, ipc_rtr_pkt);
+		smd_xprtp->in_pkt->length += sz_read;
+		if (sz_read != pkt_size)
+			smd_xprtp->is_partial_in_pkt = 1;
+		else
+			smd_xprtp->is_partial_in_pkt = 0;
+
+		if (!smd_xprtp->is_partial_in_pkt) {
+			D("%s: Packet size read %d\n",
+			  __func__, smd_xprtp->in_pkt->length);
+			msm_ipc_router_xprt_notify(&smd_xprtp->xprt,
+						IPC_ROUTER_XPRT_EVENT_DATA,
+						(void *)smd_xprtp->in_pkt);
+			release_pkt(smd_xprtp->in_pkt);
+			smd_xprtp->in_pkt = NULL;
+		}
+	}
+}
+
+static void smd_xprt_open_event(struct work_struct *work)
+{
+	struct msm_ipc_router_smd_xprt_work *xprt_work =
+		container_of(work, struct msm_ipc_router_smd_xprt_work, work);
+	struct msm_ipc_router_smd_xprt *smd_xprtp =
+		container_of(xprt_work->xprt,
+			     struct msm_ipc_router_smd_xprt, xprt);
+	unsigned long flags;
+
+	spin_lock_irqsave(&smd_xprtp->ss_reset_lock, flags);
+	smd_xprtp->ss_reset = 0;
+	spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock, flags);
+	msm_ipc_router_xprt_notify(xprt_work->xprt,
+				IPC_ROUTER_XPRT_EVENT_OPEN, NULL);
+	D("%s: Notified IPC Router of %s OPEN\n",
+	   __func__, xprt_work->xprt->name);
+	kfree(xprt_work);
+}
+
+static void smd_xprt_close_event(struct work_struct *work)
+{
+	struct msm_ipc_router_smd_xprt_work *xprt_work =
+		container_of(work, struct msm_ipc_router_smd_xprt_work, work);
+	struct msm_ipc_router_smd_xprt *smd_xprtp =
+		container_of(xprt_work->xprt,
+			     struct msm_ipc_router_smd_xprt, xprt);
+
+	if (smd_xprtp->in_pkt) {
+		release_pkt(smd_xprtp->in_pkt);
+		smd_xprtp->in_pkt = NULL;
+	}
+	smd_xprtp->is_partial_in_pkt = 0;
+	init_completion(&smd_xprtp->sft_close_complete);
+	msm_ipc_router_xprt_notify(xprt_work->xprt,
+				IPC_ROUTER_XPRT_EVENT_CLOSE, NULL);
+	D("%s: Notified IPC Router of %s CLOSE\n",
+	   __func__, xprt_work->xprt->name);
+	wait_for_completion(&smd_xprtp->sft_close_complete);
+	kfree(xprt_work);
+}
+
+static void msm_ipc_router_smd_remote_notify(void *_dev, unsigned event)
+{
+	unsigned long flags;
+	struct msm_ipc_router_smd_xprt *smd_xprtp;
+	struct msm_ipc_router_smd_xprt_work *xprt_work;
+
+	smd_xprtp = (struct msm_ipc_router_smd_xprt *)_dev;
+	if (!smd_xprtp)
+		return;
+
+	switch (event) {
+	case SMD_EVENT_DATA:
+		if (smd_read_avail(smd_xprtp->channel))
+			queue_delayed_work(smd_xprtp->smd_xprt_wq,
+					   &smd_xprtp->read_work, 0);
+		if (smd_write_segment_avail(smd_xprtp->channel))
+			wake_up(&smd_xprtp->write_avail_wait_q);
+		break;
+
+	case SMD_EVENT_OPEN:
+		xprt_work = kmalloc(sizeof(struct msm_ipc_router_smd_xprt_work),
+				    GFP_ATOMIC);
+		if (!xprt_work) {
+			IPC_RTR_ERR(
+			"%s: Couldn't notify %d event to IPC Router\n",
+				__func__, event);
+			return;
+		}
+		xprt_work->xprt = &smd_xprtp->xprt;
+		INIT_WORK(&xprt_work->work, smd_xprt_open_event);
+		queue_work(smd_xprtp->smd_xprt_wq, &xprt_work->work);
+		break;
+
+	case SMD_EVENT_CLOSE:
+		spin_lock_irqsave(&smd_xprtp->ss_reset_lock, flags);
+		smd_xprtp->ss_reset = 1;
+		spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock, flags);
+		wake_up(&smd_xprtp->write_avail_wait_q);
+		xprt_work = kmalloc(sizeof(struct msm_ipc_router_smd_xprt_work),
+				    GFP_ATOMIC);
+		if (!xprt_work) {
+			IPC_RTR_ERR(
+			"%s: Couldn't notify %d event to IPC Router\n",
+				__func__, event);
+			return;
+		}
+		xprt_work->xprt = &smd_xprtp->xprt;
+		INIT_WORK(&xprt_work->work, smd_xprt_close_event);
+		queue_work(smd_xprtp->smd_xprt_wq, &xprt_work->work);
+		break;
+	}
+}
+
+static void *msm_ipc_load_subsystem(uint32_t edge)
+{
+	void *pil = NULL;
+	const char *peripheral;
+	bool loading_disabled;
+
+	loading_disabled = is_pil_loading_disabled(edge);
+	peripheral = smd_edge_to_pil_str(edge);
+	if (!IS_ERR_OR_NULL(peripheral) && !loading_disabled) {
+		pil = subsystem_get(peripheral);
+		if (IS_ERR(pil)) {
+			IPC_RTR_ERR("%s: Failed to load %s\n",
+				__func__, peripheral);
+			pil = NULL;
+		}
+	}
+	return pil;
+}
+
+/**
+ * find_smd_xprt_list() - Find xprt item specific to an HSIC endpoint
+ * @pdev: Platform device registered by HSIC's ipc_bridge driver
+ *
+ * @return: pointer to msm_ipc_router_smd_xprt if matching endpoint is found,
+ *		else NULL.
+ *
+ * This function is used to find specific xprt item from the global xprt list
+ */
+static struct msm_ipc_router_smd_xprt *
+		find_smd_xprt_list(struct platform_device *pdev)
+{
+	struct msm_ipc_router_smd_xprt *smd_xprtp;
+
+	mutex_lock(&smd_remote_xprt_list_lock_lha1);
+	list_for_each_entry(smd_xprtp, &smd_remote_xprt_list, list) {
+		if (!strcmp(pdev->name, smd_xprtp->ch_name)
+				&& (pdev->id == smd_xprtp->edge)) {
+			mutex_unlock(&smd_remote_xprt_list_lock_lha1);
+			return smd_xprtp;
+		}
+	}
+	mutex_unlock(&smd_remote_xprt_list_lock_lha1);
+	return NULL;
+}
+
+/**
+ * is_pil_loading_disabled() - Check if pil loading a subsystem is disabled
+ * @edge: Edge that points to the remote subsystem.
+ *
+ * @return: true if disabled, false if enabled.
+ */
+static bool is_pil_loading_disabled(uint32_t edge)
+{
+	struct msm_ipc_router_smd_xprt *smd_xprtp;
+
+	mutex_lock(&smd_remote_xprt_list_lock_lha1);
+	list_for_each_entry(smd_xprtp, &smd_remote_xprt_list, list) {
+		if (smd_xprtp->edge == edge) {
+			mutex_unlock(&smd_remote_xprt_list_lock_lha1);
+			return smd_xprtp->disable_pil_loading;
+		}
+	}
+	mutex_unlock(&smd_remote_xprt_list_lock_lha1);
+	return true;
+}
+
+/**
+ * msm_ipc_router_smd_remote_probe() - Probe an SMD endpoint
+ *
+ * @pdev: Platform device corresponding to SMD endpoint.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying SMD driver registers
+ * a platform device, mapped to SMD endpoint.
+ */
+static int msm_ipc_router_smd_remote_probe(struct platform_device *pdev)
+{
+	int rc;
+	struct msm_ipc_router_smd_xprt *smd_xprtp;
+
+	smd_xprtp = find_smd_xprt_list(pdev);
+	if (!smd_xprtp) {
+		IPC_RTR_ERR("%s No device with name %s\n",
+					__func__, pdev->name);
+		return -EPROBE_DEFER;
+	}
+	if (strcmp(pdev->name, smd_xprtp->ch_name)
+			|| (pdev->id != smd_xprtp->edge)) {
+		IPC_RTR_ERR("%s wrong item name:%s edge:%d\n",
+				__func__, smd_xprtp->ch_name, smd_xprtp->edge);
+		return -ENODEV;
+	}
+	smd_xprtp->smd_xprt_wq =
+		create_singlethread_workqueue(pdev->name);
+	if (!smd_xprtp->smd_xprt_wq) {
+		IPC_RTR_ERR("%s: WQ creation failed for %s\n",
+			__func__, pdev->name);
+		return -EFAULT;
+	}
+
+	smd_xprtp->pil = msm_ipc_load_subsystem(
+					smd_xprtp->edge);
+	rc = smd_named_open_on_edge(smd_xprtp->ch_name,
+				    smd_xprtp->edge,
+				    &smd_xprtp->channel,
+				    smd_xprtp,
+				    msm_ipc_router_smd_remote_notify);
+	if (rc < 0) {
+		IPC_RTR_ERR("%s: Channel open failed for %s\n",
+			__func__, smd_xprtp->ch_name);
+		if (smd_xprtp->pil) {
+			subsystem_put(smd_xprtp->pil);
+			smd_xprtp->pil = NULL;
+		}
+		destroy_workqueue(smd_xprtp->smd_xprt_wq);
+		return rc;
+	}
+
+	smd_disable_read_intr(smd_xprtp->channel);
+
+	smsm_change_state(SMSM_APPS_STATE, 0, SMSM_RPCINIT);
+
+	return 0;
+}
+
+/**
+ * msm_ipc_router_smd_driver_register() - register SMD XPRT drivers
+ *
+ * @smd_xprtp: pointer to Ipc router smd xprt structure.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when a new XPRT is added to register platform
+ * drivers for new XPRT.
+ */
+static int msm_ipc_router_smd_driver_register(
+			struct msm_ipc_router_smd_xprt *smd_xprtp)
+{
+	int ret;
+	struct msm_ipc_router_smd_xprt *item;
+	unsigned already_registered = 0;
+
+	mutex_lock(&smd_remote_xprt_list_lock_lha1);
+	list_for_each_entry(item, &smd_remote_xprt_list, list) {
+		if (!strcmp(smd_xprtp->ch_name, item->ch_name))
+			already_registered = 1;
+	}
+	list_add(&smd_xprtp->list, &smd_remote_xprt_list);
+	mutex_unlock(&smd_remote_xprt_list_lock_lha1);
+
+	if (!already_registered) {
+		smd_xprtp->driver.driver.name = smd_xprtp->ch_name;
+		smd_xprtp->driver.driver.owner = THIS_MODULE;
+		smd_xprtp->driver.probe = msm_ipc_router_smd_remote_probe;
+
+		ret = platform_driver_register(&smd_xprtp->driver);
+		if (ret) {
+			IPC_RTR_ERR(
+			"%s: Failed to register platform driver [%s]\n",
+						__func__, smd_xprtp->ch_name);
+			return ret;
+		}
+	} else {
+		IPC_RTR_ERR("%s Already driver registered %s\n",
+					__func__, smd_xprtp->ch_name);
+	}
+	return 0;
+}
+
+/**
+ * msm_ipc_router_smd_config_init() - init SMD xprt configs
+ *
+ * @smd_xprt_config: pointer to SMD xprt configurations.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called to initialize the SMD XPRT pointer with
+ * the SMD XPRT configurations either from device tree or static arrays.
+ */
+static int msm_ipc_router_smd_config_init(
+		struct msm_ipc_router_smd_xprt_config *smd_xprt_config)
+{
+	struct msm_ipc_router_smd_xprt *smd_xprtp;
+
+	smd_xprtp = kzalloc(sizeof(struct msm_ipc_router_smd_xprt), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(smd_xprtp)) {
+		IPC_RTR_ERR("%s: kzalloc() failed for smd_xprtp id:%s\n",
+				__func__, smd_xprt_config->ch_name);
+		return -ENOMEM;
+	}
+
+	smd_xprtp->xprt.link_id = smd_xprt_config->link_id;
+	smd_xprtp->xprt_version = smd_xprt_config->xprt_version;
+	smd_xprtp->edge = smd_xprt_config->edge;
+	smd_xprtp->xprt_option = smd_xprt_config->xprt_option;
+	smd_xprtp->disable_pil_loading = smd_xprt_config->disable_pil_loading;
+
+	strlcpy(smd_xprtp->ch_name, smd_xprt_config->ch_name,
+						SMD_MAX_CH_NAME_LEN);
+
+	strlcpy(smd_xprtp->xprt_name, smd_xprt_config->xprt_name,
+						XPRT_NAME_LEN);
+	smd_xprtp->xprt.name = smd_xprtp->xprt_name;
+
+	smd_xprtp->xprt.set_version =
+		ipc_router_smd_set_xprt_version;
+	smd_xprtp->xprt.get_version =
+		msm_ipc_router_smd_get_xprt_version;
+	smd_xprtp->xprt.get_option =
+		msm_ipc_router_smd_get_xprt_option;
+	smd_xprtp->xprt.read_avail = NULL;
+	smd_xprtp->xprt.read = NULL;
+	smd_xprtp->xprt.write_avail =
+		msm_ipc_router_smd_remote_write_avail;
+	smd_xprtp->xprt.write = msm_ipc_router_smd_remote_write;
+	smd_xprtp->xprt.close = msm_ipc_router_smd_remote_close;
+	smd_xprtp->xprt.sft_close_done = smd_xprt_sft_close_done;
+	smd_xprtp->xprt.priv = NULL;
+
+	init_waitqueue_head(&smd_xprtp->write_avail_wait_q);
+	smd_xprtp->in_pkt = NULL;
+	smd_xprtp->is_partial_in_pkt = 0;
+	INIT_DELAYED_WORK(&smd_xprtp->read_work, smd_xprt_read_data);
+	spin_lock_init(&smd_xprtp->ss_reset_lock);
+	smd_xprtp->ss_reset = 0;
+
+	msm_ipc_router_smd_driver_register(smd_xprtp);
+
+	return 0;
+}
+
+/**
+ * parse_devicetree() - parse device tree binding
+ *
+ * @node: pointer to device tree node
+ * @smd_xprt_config: pointer to SMD XPRT configurations
+ *
+ * @return: 0 on success, -ENODEV on failure.
+ */
+static int parse_devicetree(struct device_node *node,
+		struct msm_ipc_router_smd_xprt_config *smd_xprt_config)
+{
+	int ret;
+	int edge;
+	int link_id;
+	int version;
+	char *key;
+	const char *ch_name;
+	const char *remote_ss;
+
+	key = "qcom,ch-name";
+	ch_name = of_get_property(node, key, NULL);
+	if (!ch_name)
+		goto error;
+	strlcpy(smd_xprt_config->ch_name, ch_name, SMD_MAX_CH_NAME_LEN);
+
+	key = "qcom,xprt-remote";
+	remote_ss = of_get_property(node, key, NULL);
+	if (!remote_ss)
+		goto error;
+	edge = smd_remote_ss_to_edge(remote_ss);
+	if (edge < 0)
+		goto error;
+	smd_xprt_config->edge = edge;
+
+	key = "qcom,xprt-linkid";
+	ret = of_property_read_u32(node, key, &link_id);
+	if (ret)
+		goto error;
+	smd_xprt_config->link_id = link_id;
+
+	key = "qcom,xprt-version";
+	ret = of_property_read_u32(node, key, &version);
+	if (ret)
+		goto error;
+	smd_xprt_config->xprt_version = version;
+
+	key = "qcom,fragmented-data";
+	smd_xprt_config->xprt_option = of_property_read_bool(node, key);
+
+	key = "qcom,disable-pil-loading";
+	smd_xprt_config->disable_pil_loading = of_property_read_bool(node, key);
+
+	scnprintf(smd_xprt_config->xprt_name, XPRT_NAME_LEN, "%s_%s",
+			remote_ss, smd_xprt_config->ch_name);
+
+	return 0;
+
+error:
+	IPC_RTR_ERR("%s: missing key: %s\n", __func__, key);
+	return -ENODEV;
+}
+
+/**
+ * msm_ipc_router_smd_xprt_probe() - Probe an SMD xprt
+ *
+ * @pdev: Platform device corresponding to SMD xprt.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying device tree driver registers
+ * a platform device, mapped to an SMD transport.
+ */
+static int msm_ipc_router_smd_xprt_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct msm_ipc_router_smd_xprt_config smd_xprt_config;
+
+	if (pdev) {
+		if (pdev->dev.of_node) {
+			mutex_lock(&smd_remote_xprt_list_lock_lha1);
+			ipc_router_smd_xprt_probe_done = 1;
+			mutex_unlock(&smd_remote_xprt_list_lock_lha1);
+
+			ret = parse_devicetree(pdev->dev.of_node,
+							&smd_xprt_config);
+			if (ret) {
+				IPC_RTR_ERR("%s: Failed to parse device tree\n",
+								__func__);
+				return ret;
+			}
+
+			ret = msm_ipc_router_smd_config_init(&smd_xprt_config);
+			if (ret) {
+				IPC_RTR_ERR("%s init failed\n", __func__);
+				return ret;
+			}
+		}
+	}
+	return 0;
+}
+
+/**
+ * ipc_router_smd_xprt_probe_worker() - probe worker for non DT configurations
+ *
+ * @work: work item to process
+ *
+ * This function is called by schedule_delay_work after 3sec and check if
+ * device tree probe is done or not. If device tree probe fails the default
+ * configurations read from static array.
+ */
+static void ipc_router_smd_xprt_probe_worker(struct work_struct *work)
+{
+	int i, ret;
+
+	BUG_ON(ARRAY_SIZE(smd_xprt_cfg) != NUM_SMD_XPRTS);
+
+	mutex_lock(&smd_remote_xprt_list_lock_lha1);
+	if (!ipc_router_smd_xprt_probe_done) {
+		mutex_unlock(&smd_remote_xprt_list_lock_lha1);
+		for (i = 0; i < ARRAY_SIZE(smd_xprt_cfg); i++) {
+			ret = msm_ipc_router_smd_config_init(&smd_xprt_cfg[i]);
+			if (ret)
+				IPC_RTR_ERR(" %s init failed config idx %d\n",
+							__func__, i);
+		}
+		mutex_lock(&smd_remote_xprt_list_lock_lha1);
+	}
+	mutex_unlock(&smd_remote_xprt_list_lock_lha1);
+}
+
+static struct of_device_id msm_ipc_router_smd_xprt_match_table[] = {
+	{ .compatible = "qcom,ipc_router_smd_xprt" },
+	{},
+};
+
+static struct platform_driver msm_ipc_router_smd_xprt_driver = {
+	.probe = msm_ipc_router_smd_xprt_probe,
+	.driver = {
+		.name = MODULE_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = msm_ipc_router_smd_xprt_match_table,
+	 },
+};
+
+static int __init msm_ipc_router_smd_xprt_init(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&msm_ipc_router_smd_xprt_driver);
+	if (rc) {
+		IPC_RTR_ERR(
+		"%s: msm_ipc_router_smd_xprt_driver register failed %d\n",
+								__func__, rc);
+		return rc;
+	}
+
+	INIT_DELAYED_WORK(&ipc_router_smd_xprt_probe_work,
+					ipc_router_smd_xprt_probe_worker);
+	schedule_delayed_work(&ipc_router_smd_xprt_probe_work,
+			msecs_to_jiffies(IPC_ROUTER_SMD_XPRT_WAIT_TIMEOUT));
+	return 0;
+}
+
+module_init(msm_ipc_router_smd_xprt_init);
+MODULE_DESCRIPTION("IPC Router SMD XPRT");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/irq-helper.c	2019-01-22 16:16:26.655274950 +0100
@@ -0,0 +1,184 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/cpu.h>
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <soc/qcom/irq-helper.h>
+
+struct irq_helper {
+	bool enable;
+	bool deploy;
+	uint32_t count;
+	struct kobject kobj;
+	/* spinlock to protect reference count variable 'count' */
+	spinlock_t lock;
+};
+
+struct irq_helper_attr {
+	struct attribute        attr;
+	ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
+			char *buf);
+	size_t (*store)(struct kobject *kobj, struct attribute *attr,
+			const char *buf, size_t count);
+};
+
+#define IRQ_HELPER_ATTR(_name, _mode, _show, _store)    \
+	struct irq_helper_attr irq_helper_##_name =  \
+		__ATTR(_name, _mode, _show, _store)
+
+#define to_irq_helper(kobj) \
+	container_of(kobj, struct irq_helper, kobj)
+
+#define to_irq_helper_attr(_attr) \
+	container_of(_attr, struct irq_helper_attr, attr)
+
+static ssize_t attr_show(struct kobject *kobj, struct attribute *attr,
+		char *buf)
+{
+	struct irq_helper_attr *irq_attr = to_irq_helper_attr(attr);
+	ssize_t ret = -EIO;
+
+	if (irq_attr->show)
+		ret = irq_attr->show(kobj, attr, buf);
+
+	return ret;
+}
+
+static const struct sysfs_ops irq_helper_sysfs_ops = {
+	.show   = attr_show,
+};
+
+static struct kobj_type irq_helper_ktype = {
+	.sysfs_ops  = &irq_helper_sysfs_ops,
+};
+
+static ssize_t show_deploy(struct kobject *kobj, struct attribute *attr,
+		char *buf)
+{
+	struct irq_helper *irq = to_irq_helper(kobj);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", irq->deploy);
+}
+IRQ_HELPER_ATTR(irq_blacklist_on, 0444, show_deploy, NULL);
+
+static struct irq_helper *irq_h;
+
+/* Do not call this API in an atomic context */
+int irq_blacklist_on(void)
+{
+	bool flag = false;
+
+	might_sleep();
+	if (!irq_h) {
+		pr_err("%s: init function is not called", __func__);
+		return -EPERM;
+	}
+	if (!irq_h->enable) {
+		pr_err("%s: enable bit is not set up", __func__);
+		return -EPERM;
+	}
+	spin_lock(&irq_h->lock);
+	irq_h->count++;
+	if (!irq_h->deploy) {
+		irq_h->deploy = true;
+		flag = true;
+	}
+	spin_unlock(&irq_h->lock);
+	if (flag)
+		sysfs_notify(&irq_h->kobj, NULL, "irq_blacklist_on");
+	return 0;
+}
+EXPORT_SYMBOL(irq_blacklist_on);
+
+/* Do not call this API in an atomic context */
+int irq_blacklist_off(void)
+{
+	bool flag = false;
+
+	might_sleep();
+	if (!irq_h) {
+		pr_err("%s: init function is not called", __func__);
+		return -EPERM;
+	}
+	if (!irq_h->enable) {
+		pr_err("%s: enable bit is not set up", __func__);
+		return -EPERM;
+	}
+	spin_lock(&irq_h->lock);
+	if (irq_h->count == 0) {
+		pr_err("%s: ref-count is 0, cannot call irq blacklist off.",
+				__func__);
+		spin_unlock(&irq_h->lock);
+		return -EPERM;
+	}
+	irq_h->count--;
+	if (irq_h->count == 0) {
+		irq_h->deploy = false;
+		flag = true;
+	}
+	spin_unlock(&irq_h->lock);
+
+	if (flag)
+		sysfs_notify(&irq_h->kobj, NULL, "irq_blacklist_on");
+	return 0;
+}
+EXPORT_SYMBOL(irq_blacklist_off);
+
+static int __init irq_helper_init(void)
+{
+	int ret;
+
+	irq_h = kzalloc(sizeof(struct irq_helper), GFP_KERNEL);
+	if (!irq_h)
+		return -ENOMEM;
+
+	ret = kobject_init_and_add(&irq_h->kobj, &irq_helper_ktype,
+			kernel_kobj,  "%s", "irq_helper");
+	if (ret) {
+		pr_err("%s:Error in creation kobject_add\n", __func__);
+		goto out_free_irq;
+	}
+
+	ret = sysfs_create_file(&irq_h->kobj,
+			&irq_helper_irq_blacklist_on.attr);
+	if (ret) {
+		pr_err("%s:Error in sysfs_create_file\n", __func__);
+		goto out_put_kobj;
+	}
+
+	spin_lock_init(&irq_h->lock);
+	irq_h->count = 0;
+	irq_h->enable = true;
+	return 0;
+out_put_kobj:
+	kobject_put(&irq_h->kobj);
+out_free_irq:
+	kfree(irq_h);
+	return ret;
+}
+module_init(irq_helper_init);
+
+static void __exit irq_helper_exit(void)
+{
+	sysfs_remove_file(&irq_h->kobj, &irq_helper_irq_blacklist_on.attr);
+	kobject_del(&irq_h->kobj);
+	kobject_put(&irq_h->kobj);
+	kfree(irq_h);
+}
+module_exit(irq_helper_exit);
+MODULE_DESCRIPTION("IRQ Helper APIs");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/jtag-fuse.c	2019-01-22 16:16:26.655274950 +0100
@@ -0,0 +1,235 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+#include <soc/qcom/jtag.h>
+
+#define fuse_writel(drvdata, val, off)	__raw_writel((val), drvdata->base + off)
+#define fuse_readl(drvdata, off)	__raw_readl(drvdata->base + off)
+
+#define OEM_CONFIG0			(0x000)
+#define OEM_CONFIG1			(0x004)
+#define OEM_CONFIG2			(0x008)
+
+/* JTAG FUSE V1 */
+#define ALL_DEBUG_DISABLE		BIT(21)
+#define APPS_DBGEN_DISABLE		BIT(0)
+#define APPS_NIDEN_DISABLE		BIT(1)
+#define APPS_SPIDEN_DISABLE		BIT(2)
+#define APPS_SPNIDEN_DISABLE		BIT(3)
+#define DAP_DEVICEEN_DISABLE		BIT(8)
+
+/* JTAG FUSE V2 */
+#define ALL_DEBUG_DISABLE_V2		BIT(0)
+#define APPS_DBGEN_DISABLE_V2		BIT(10)
+#define APPS_NIDEN_DISABLE_V2		BIT(11)
+#define APPS_SPIDEN_DISABLE_V2		BIT(12)
+#define APPS_SPNIDEN_DISABLE_V2	BIT(13)
+#define DAP_DEVICEEN_DISABLE_V2	BIT(18)
+
+/* JTAG FUSE V3 */
+#define ALL_DEBUG_DISABLE_V3		BIT(29)
+#define APPS_DBGEN_DISABLE_V3		BIT(8)
+#define APPS_NIDEN_DISABLE_V3		BIT(21)
+#define APPS_SPIDEN_DISABLE_V3		BIT(5)
+#define APPS_SPNIDEN_DISABLE_V3	BIT(31)
+#define DAP_DEVICEEN_DISABLE_V3	BIT(7)
+
+/* JTAG FUSE V4 */
+#define ALL_DEBUG_DISABLE_V4		BIT(29)
+#define APPS_DBGEN_DISABLE_V4		BIT(4)
+#define APPS_NIDEN_DISABLE_V4		BIT(15)
+#define APPS_SPIDEN_DISABLE_V4		BIT(28)
+#define APPS_SPNIDEN_DISABLE_V4		BIT(23)
+#define DAP_DEVICEEN_DISABLE_V4		BIT(3)
+
+#define JTAG_FUSE_VERSION_V1		"qcom,jtag-fuse"
+#define JTAG_FUSE_VERSION_V2		"qcom,jtag-fuse-v2"
+#define JTAG_FUSE_VERSION_V3		"qcom,jtag-fuse-v3"
+#define JTAG_FUSE_VERSION_V4		"qcom,jtag-fuse-v4"
+
+struct fuse_drvdata {
+	void __iomem		*base;
+	struct device		*dev;
+	bool			fuse_v2;
+	bool			fuse_v3;
+	bool			fuse_v4;
+};
+
+static struct fuse_drvdata *fusedrvdata;
+
+bool msm_jtag_fuse_apps_access_disabled(void)
+{
+	struct fuse_drvdata *drvdata = fusedrvdata;
+	uint32_t config0, config1, config2;
+	bool ret = false;
+
+	if (!drvdata)
+		return false;
+
+	config0 = fuse_readl(drvdata, OEM_CONFIG0);
+	config1 = fuse_readl(drvdata, OEM_CONFIG1);
+
+	dev_dbg(drvdata->dev, "apps config0: %lx\n", (unsigned long)config0);
+	dev_dbg(drvdata->dev, "apps config1: %lx\n", (unsigned long)config1);
+
+	if (drvdata->fuse_v3) {
+		config2 = fuse_readl(drvdata, OEM_CONFIG2);
+		dev_dbg(drvdata->dev, "apps config2: %lx\n",
+		       (unsigned long)config2);
+	}
+
+	if (drvdata->fuse_v4) {
+		if (config0 & ALL_DEBUG_DISABLE_V4)
+			ret = true;
+		else if (config1 & APPS_DBGEN_DISABLE_V4)
+			ret = true;
+		else if (config1 & APPS_NIDEN_DISABLE_V4)
+			ret = true;
+		else if (config1 & APPS_SPIDEN_DISABLE_V4)
+			ret = true;
+		else if (config1 & APPS_SPNIDEN_DISABLE_V4)
+			ret = true;
+		else if (config1 & DAP_DEVICEEN_DISABLE_V4)
+			ret = true;
+	} else if (drvdata->fuse_v3) {
+		if (config0 & ALL_DEBUG_DISABLE_V3)
+			ret = true;
+		else if (config1 & APPS_DBGEN_DISABLE_V3)
+			ret = true;
+		else if (config1 & APPS_NIDEN_DISABLE_V3)
+			ret = true;
+		else if (config2 & APPS_SPIDEN_DISABLE_V3)
+			ret = true;
+		else if (config1 & APPS_SPNIDEN_DISABLE_V3)
+			ret = true;
+		else if (config1 & DAP_DEVICEEN_DISABLE_V3)
+			ret = true;
+	} else if (drvdata->fuse_v2) {
+		if (config1 & ALL_DEBUG_DISABLE_V2)
+			ret = true;
+		else if (config1 & APPS_DBGEN_DISABLE_V2)
+			ret = true;
+		else if (config1 & APPS_NIDEN_DISABLE_V2)
+			ret = true;
+		else if (config1 & APPS_SPIDEN_DISABLE_V2)
+			ret = true;
+		else if (config1 & APPS_SPNIDEN_DISABLE_V2)
+			ret = true;
+		else if (config1 & DAP_DEVICEEN_DISABLE_V2)
+			ret = true;
+	} else {
+		if (config0 & ALL_DEBUG_DISABLE)
+			ret = true;
+		else if (config1 & APPS_DBGEN_DISABLE)
+			ret = true;
+		else if (config1 & APPS_NIDEN_DISABLE)
+			ret = true;
+		else if (config1 & APPS_SPIDEN_DISABLE)
+			ret = true;
+		else if (config1 & APPS_SPNIDEN_DISABLE)
+			ret = true;
+		else if (config1 & DAP_DEVICEEN_DISABLE)
+			ret = true;
+	}
+
+	if (ret)
+		dev_dbg(drvdata->dev, "apps fuse disabled\n");
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_jtag_fuse_apps_access_disabled);
+
+static const struct of_device_id jtag_fuse_match[] = {
+	{.compatible = JTAG_FUSE_VERSION_V1 },
+	{.compatible = JTAG_FUSE_VERSION_V2 },
+	{.compatible = JTAG_FUSE_VERSION_V3 },
+	{.compatible = JTAG_FUSE_VERSION_V4 },
+	{}
+};
+
+static int jtag_fuse_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct fuse_drvdata *drvdata;
+	struct resource *res;
+	const struct of_device_id *match;
+
+	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+	if (!drvdata)
+		return -ENOMEM;
+	drvdata->dev = &pdev->dev;
+	platform_set_drvdata(pdev, drvdata);
+
+	match = of_match_device(jtag_fuse_match, dev);
+	if (!match)
+		return -EINVAL;
+
+	if (!strcmp(match->compatible, JTAG_FUSE_VERSION_V2))
+		drvdata->fuse_v2 = true;
+	else if (!strcmp(match->compatible, JTAG_FUSE_VERSION_V3))
+		drvdata->fuse_v3 = true;
+	else if (!strcmp(match->compatible, JTAG_FUSE_VERSION_V4))
+		drvdata->fuse_v4 = true;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fuse-base");
+	if (!res)
+		return -ENODEV;
+
+	drvdata->base = devm_ioremap(dev, res->start, resource_size(res));
+	if (!drvdata->base)
+		return -ENOMEM;
+
+	/* Store the driver data pointer for use in exported functions */
+	fusedrvdata = drvdata;
+	dev_info(dev, "JTag Fuse initialized\n");
+	return 0;
+}
+
+static int jtag_fuse_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static struct platform_driver jtag_fuse_driver = {
+	.probe          = jtag_fuse_probe,
+	.remove         = jtag_fuse_remove,
+	.driver         = {
+		.name   = "msm-jtag-fuse",
+		.owner	= THIS_MODULE,
+		.of_match_table = jtag_fuse_match,
+	},
+};
+
+static int __init jtag_fuse_init(void)
+{
+	return platform_driver_register(&jtag_fuse_driver);
+}
+arch_initcall(jtag_fuse_init);
+
+static void __exit jtag_fuse_exit(void)
+{
+	platform_driver_unregister(&jtag_fuse_driver);
+}
+module_exit(jtag_fuse_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("JTag Fuse driver");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/jtagv8.c	2019-01-22 16:16:26.659274986 +0100
@@ -0,0 +1,1016 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
+#include <linux/export.h>
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
+#include <linux/coresight.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/jtag.h>
+#ifdef CONFIG_ARM64
+#include <asm/debugv8.h>
+#else
+#include <asm/hardware/debugv8.h>
+#endif
+
+#define TIMEOUT_US		(100)
+
+#define BM(lsb, msb)		((BIT(msb) - BIT(lsb)) + BIT(msb))
+#define BMVAL(val, lsb, msb)	((val & BM(lsb, msb)) >> lsb)
+#define BVAL(val, n)		((val & BIT(n)) >> n)
+
+#ifdef CONFIG_ARM64
+#define ARM_DEBUG_ARCH_V8	(0x6)
+#endif
+
+#define MAX_DBG_REGS		(66)
+#define MAX_DBG_STATE_SIZE	(MAX_DBG_REGS * num_possible_cpus())
+
+#define OSLOCK_MAGIC		(0xC5ACCE55)
+#define TZ_DBG_ETM_FEAT_ID	(0x8)
+#define TZ_DBG_ETM_VER		(0x400000)
+
+uint32_t msm_jtag_save_cntr[NR_CPUS];
+uint32_t msm_jtag_restore_cntr[NR_CPUS];
+
+/* access debug registers using system instructions */
+struct dbg_cpu_ctx {
+	uint32_t		*state;
+};
+
+struct dbg_ctx {
+	uint8_t			arch;
+	bool			save_restore_enabled;
+	uint8_t			nr_wp;
+	uint8_t			nr_bp;
+	uint8_t			nr_ctx_cmp;
+#ifdef CONFIG_ARM64
+	uint64_t		*state;
+#else
+	uint32_t		*state;
+#endif
+};
+
+static struct dbg_ctx dbg;
+static struct notifier_block jtag_hotcpu_save_notifier;
+static struct notifier_block jtag_hotcpu_restore_notifier;
+static struct notifier_block jtag_cpu_pm_notifier;
+
+#ifdef CONFIG_ARM64
+static int dbg_read_arch64_bxr(uint64_t *state, int i, int j)
+{
+	switch (j) {
+	case 0:
+		state[i++] = dbg_readq(DBGBVR0_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGBCR0_EL1);
+		break;
+	case 1:
+		state[i++] = dbg_readq(DBGBVR1_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGBCR1_EL1);
+		break;
+	case 2:
+		state[i++] = dbg_readq(DBGBVR2_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGBCR2_EL1);
+		break;
+	case 3:
+		state[i++] = dbg_readq(DBGBVR3_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGBCR3_EL1);
+		break;
+	case 4:
+		state[i++] = dbg_readq(DBGBVR4_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGBCR4_EL1);
+		break;
+	case 5:
+		state[i++] = dbg_readq(DBGBVR5_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGBCR5_EL1);
+		break;
+	case 6:
+		state[i++] = dbg_readq(DBGBVR6_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGBCR6_EL1);
+		break;
+	case 7:
+		state[i++] = dbg_readq(DBGBVR7_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGBCR7_EL1);
+		break;
+	case 8:
+		state[i++] = dbg_readq(DBGBVR8_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGBCR8_EL1);
+		break;
+	case 9:
+		state[i++] = dbg_readq(DBGBVR9_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGBCR9_EL1);
+		break;
+	case 10:
+		state[i++] = dbg_readq(DBGBVR10_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGBCR10_EL1);
+		break;
+	case 11:
+		state[i++] = dbg_readq(DBGBVR11_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGBCR11_EL1);
+		break;
+	case 12:
+		state[i++] = dbg_readq(DBGBVR12_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGBCR12_EL1);
+		break;
+	case 13:
+		state[i++] = dbg_readq(DBGBVR13_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGBCR13_EL1);
+		break;
+	case 14:
+		state[i++] = dbg_readq(DBGBVR14_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGBCR14_EL1);
+		break;
+	case 15:
+		state[i++] = dbg_readq(DBGBVR15_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGBCR15_EL1);
+		break;
+	default:
+		pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+	}
+	return i;
+}
+
+static int dbg_write_arch64_bxr(uint64_t *state, int i, int j)
+{
+	switch (j) {
+	case 0:
+		dbg_write(state[i++], DBGBVR0_EL1);
+		dbg_write(state[i++], DBGBCR0_EL1);
+		break;
+	case 1:
+		dbg_write(state[i++], DBGBVR1_EL1);
+		dbg_write(state[i++], DBGBCR1_EL1);
+		break;
+	case 2:
+		dbg_write(state[i++], DBGBVR2_EL1);
+		dbg_write(state[i++], DBGBCR2_EL1);
+		break;
+	case 3:
+		dbg_write(state[i++], DBGBVR3_EL1);
+		dbg_write(state[i++], DBGBCR3_EL1);
+		break;
+	case 4:
+		dbg_write(state[i++], DBGBVR4_EL1);
+		dbg_write(state[i++], DBGBCR4_EL1);
+		break;
+	case 5:
+		dbg_write(state[i++], DBGBVR5_EL1);
+		dbg_write(state[i++], DBGBCR5_EL1);
+		break;
+	case 6:
+		dbg_write(state[i++], DBGBVR6_EL1);
+		dbg_write(state[i++], DBGBCR6_EL1);
+		break;
+	case 7:
+		dbg_write(state[i++], DBGBVR7_EL1);
+		dbg_write(state[i++], DBGBCR7_EL1);
+		break;
+	case 8:
+		dbg_write(state[i++], DBGBVR8_EL1);
+		dbg_write(state[i++], DBGBCR8_EL1);
+		break;
+	case 9:
+		dbg_write(state[i++], DBGBVR9_EL1);
+		dbg_write(state[i++], DBGBCR9_EL1);
+		break;
+	case 10:
+		dbg_write(state[i++], DBGBVR10_EL1);
+		dbg_write(state[i++], DBGBCR10_EL1);
+		break;
+	case 11:
+		dbg_write(state[i++], DBGBVR11_EL1);
+		dbg_write(state[i++], DBGBCR11_EL1);
+		break;
+	case 12:
+		dbg_write(state[i++], DBGBVR12_EL1);
+		dbg_write(state[i++], DBGBCR12_EL1);
+		break;
+	case 13:
+		dbg_write(state[i++], DBGBVR13_EL1);
+		dbg_write(state[i++], DBGBCR13_EL1);
+		break;
+	case 14:
+		dbg_write(state[i++], DBGBVR14_EL1);
+		dbg_write(state[i++], DBGBCR14_EL1);
+		break;
+	case 15:
+		dbg_write(state[i++], DBGBVR15_EL1);
+		dbg_write(state[i++], DBGBCR15_EL1);
+		break;
+	default:
+		pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+	}
+	return i;
+}
+
+static int dbg_read_arch64_wxr(uint64_t *state, int i, int j)
+{
+	switch (j) {
+	case 0:
+		state[i++] = dbg_readq(DBGWVR0_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGWCR0_EL1);
+		break;
+	case 1:
+		state[i++] = dbg_readq(DBGWVR1_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGWCR1_EL1);
+		break;
+	case 2:
+		state[i++] = dbg_readq(DBGWVR2_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGWCR2_EL1);
+		break;
+	case 3:
+		state[i++] = dbg_readq(DBGWVR3_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGWCR3_EL1);
+		break;
+	case 4:
+		state[i++] = dbg_readq(DBGWVR4_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGWCR4_EL1);
+		break;
+	case 5:
+		state[i++] = dbg_readq(DBGWVR5_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGWCR5_EL1);
+		break;
+	case 6:
+		state[i++] = dbg_readq(DBGWVR6_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGWCR6_EL1);
+		break;
+	case 7:
+		state[i++] = dbg_readq(DBGWVR7_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGWCR7_EL1);
+		break;
+	case 8:
+		state[i++] = dbg_readq(DBGWVR8_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGWCR8_EL1);
+		break;
+	case 9:
+		state[i++] = dbg_readq(DBGWVR9_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGWCR9_EL1);
+		break;
+	case 10:
+		state[i++] = dbg_readq(DBGWVR10_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGWCR10_EL1);
+		break;
+	case 11:
+		state[i++] = dbg_readq(DBGWVR11_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGWCR11_EL1);
+		break;
+	case 12:
+		state[i++] = dbg_readq(DBGWVR12_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGWCR12_EL1);
+		break;
+	case 13:
+		state[i++] = dbg_readq(DBGWVR13_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGWCR13_EL1);
+		break;
+	case 14:
+		state[i++] = dbg_readq(DBGWVR14_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGWCR14_EL1);
+		break;
+	case 15:
+		state[i++] = dbg_readq(DBGWVR15_EL1);
+		state[i++] = (uint64_t)dbg_readl(DBGWCR15_EL1);
+		break;
+	default:
+		pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+	}
+	return i;
+}
+
+static int dbg_write_arch64_wxr(uint64_t *state, int i, int j)
+{
+	switch (j) {
+	case 0:
+		dbg_write(state[i++], DBGWVR0_EL1);
+		dbg_write(state[i++], DBGWCR0_EL1);
+		break;
+	case 1:
+		dbg_write(state[i++], DBGWVR1_EL1);
+		dbg_write(state[i++], DBGWCR1_EL1);
+		break;
+	case 2:
+		dbg_write(state[i++], DBGWVR2_EL1);
+		dbg_write(state[i++], DBGWCR2_EL1);
+		break;
+	case 3:
+		dbg_write(state[i++], DBGWVR3_EL1);
+		dbg_write(state[i++], DBGWCR3_EL1);
+		break;
+	case 4:
+		dbg_write(state[i++], DBGWVR4_EL1);
+		dbg_write(state[i++], DBGWCR4_EL1);
+		break;
+	case 5:
+		dbg_write(state[i++], DBGWVR5_EL1);
+		dbg_write(state[i++], DBGWCR5_EL1);
+		break;
+	case 6:
+		dbg_write(state[i++], DBGWVR0_EL1);
+		dbg_write(state[i++], DBGWCR6_EL1);
+		break;
+	case 7:
+		dbg_write(state[i++], DBGWVR7_EL1);
+		dbg_write(state[i++], DBGWCR7_EL1);
+		break;
+	case 8:
+		dbg_write(state[i++], DBGWVR8_EL1);
+		dbg_write(state[i++], DBGWCR8_EL1);
+		break;
+	case 9:
+		dbg_write(state[i++], DBGWVR9_EL1);
+		dbg_write(state[i++], DBGWCR9_EL1);
+		break;
+	case 10:
+		dbg_write(state[i++], DBGWVR10_EL1);
+		dbg_write(state[i++], DBGWCR10_EL1);
+		break;
+	case 11:
+		dbg_write(state[i++], DBGWVR11_EL1);
+		dbg_write(state[i++], DBGWCR11_EL1);
+		break;
+	case 12:
+		dbg_write(state[i++], DBGWVR12_EL1);
+		dbg_write(state[i++], DBGWCR12_EL1);
+		break;
+	case 13:
+		dbg_write(state[i++], DBGWVR13_EL1);
+		dbg_write(state[i++], DBGWCR13_EL1);
+		break;
+	case 14:
+		dbg_write(state[i++], DBGWVR14_EL1);
+		dbg_write(state[i++], DBGWCR14_EL1);
+		break;
+	case 15:
+		dbg_write(state[i++], DBGWVR15_EL1);
+		dbg_write(state[i++], DBGWCR15_EL1);
+		break;
+	default:
+		pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+	}
+	return i;
+}
+
+static inline void dbg_save_state(int cpu)
+{
+	int i, j;
+
+	i = cpu * MAX_DBG_REGS;
+
+	switch (dbg.arch) {
+	case ARM_DEBUG_ARCH_V8:
+		/* Set OS Lock to inform the debugger that the OS is in the
+		 * process of saving debug registers. It prevents accidental
+		 * modification of the debug regs by the external debugger.
+		 */
+		dbg_write(0x1, OSLAR_EL1);
+		/* Ensure OS lock is set before proceeding */
+		isb();
+
+		dbg.state[i++] =  (uint32_t)dbg_readl(MDSCR_EL1);
+		for (j = 0; j < dbg.nr_bp; j++)
+			i = dbg_read_arch64_bxr((uint64_t *)dbg.state, i, j);
+		for (j = 0; j < dbg.nr_wp; j++)
+			i = dbg_read_arch64_wxr((uint64_t *)dbg.state, i, j);
+		dbg.state[i++] =  (uint32_t)dbg_readl(MDCCINT_EL1);
+		dbg.state[i++] =  (uint32_t)dbg_readl(DBGCLAIMCLR_EL1);
+		dbg.state[i++] =  (uint32_t)dbg_readl(OSECCR_EL1);
+		dbg.state[i++] =  (uint32_t)dbg_readl(OSDTRRX_EL1);
+		dbg.state[i++] =  (uint32_t)dbg_readl(OSDTRTX_EL1);
+
+		/* Set the OS double lock */
+		isb();
+		dbg_write(0x1, OSDLR_EL1);
+		isb();
+		break;
+	default:
+		pr_err_ratelimited("unsupported dbg arch %d in %s\n", dbg.arch,
+				   __func__);
+	}
+}
+
+static inline void dbg_restore_state(int cpu)
+{
+	int i, j;
+
+	i = cpu * MAX_DBG_REGS;
+
+	switch (dbg.arch) {
+	case ARM_DEBUG_ARCH_V8:
+		/* Clear the OS double lock */
+		isb();
+		dbg_write(0x0, OSDLR_EL1);
+		isb();
+
+		/* Set OS lock. Lock will already be set after power collapse
+		 * but this write is included to ensure it is set.
+		 */
+		dbg_write(0x1, OSLAR_EL1);
+		isb();
+
+		dbg_write(dbg.state[i++], MDSCR_EL1);
+		for (j = 0; j < dbg.nr_bp; j++)
+			i = dbg_write_arch64_bxr((uint64_t *)dbg.state, i, j);
+		for (j = 0; j < dbg.nr_wp; j++)
+			i = dbg_write_arch64_wxr((uint64_t *)dbg.state, i, j);
+		dbg_write(dbg.state[i++], MDCCINT_EL1);
+		dbg_write(dbg.state[i++], DBGCLAIMSET_EL1);
+		dbg_write(dbg.state[i++], OSECCR_EL1);
+		dbg_write(dbg.state[i++], OSDTRRX_EL1);
+		dbg_write(dbg.state[i++], OSDTRTX_EL1);
+
+		isb();
+		dbg_write(0x0, OSLAR_EL1);
+		isb();
+		break;
+	default:
+		pr_err_ratelimited("unsupported dbg arch %d in %s\n", dbg.arch,
+				   __func__);
+	}
+}
+
+static void dbg_init_arch_data(void)
+{
+	uint64_t dbgfr;
+
+	/* This will run on core0 so use it to populate parameters */
+	dbgfr = dbg_readq(ID_AA64DFR0_EL1);
+	dbg.arch = BMVAL(dbgfr, 0, 3);
+	dbg.nr_bp = BMVAL(dbgfr, 12, 15) + 1;
+	dbg.nr_wp = BMVAL(dbgfr, 20, 23) + 1;
+	dbg.nr_ctx_cmp = BMVAL(dbgfr, 28, 31) + 1;
+}
+#else
+
+static int dbg_read_arch32_bxr(uint32_t *state, int i, int j)
+{
+	switch (j) {
+	case 0:
+		state[i++] = dbg_read(DBGBVR0);
+		state[i++] = dbg_read(DBGBCR0);
+		break;
+	case 1:
+		state[i++] = dbg_read(DBGBVR1);
+		state[i++] = dbg_read(DBGBCR1);
+		break;
+	case 2:
+		state[i++] = dbg_read(DBGBVR2);
+		state[i++] = dbg_read(DBGBCR2);
+		break;
+	case 3:
+		state[i++] = dbg_read(DBGBVR3);
+		state[i++] = dbg_read(DBGBCR3);
+		break;
+	case 4:
+		state[i++] = dbg_read(DBGBVR4);
+		state[i++] = dbg_read(DBGBCR4);
+		break;
+	case 5:
+		state[i++] = dbg_read(DBGBVR5);
+		state[i++] = dbg_read(DBGBCR5);
+		break;
+	case 6:
+		state[i++] = dbg_read(DBGBVR6);
+		state[i++] = dbg_read(DBGBCR6);
+		break;
+	case 7:
+		state[i++] = dbg_read(DBGBVR7);
+		state[i++] = dbg_read(DBGBCR7);
+		break;
+	case 8:
+		state[i++] = dbg_read(DBGBVR8);
+		state[i++] = dbg_read(DBGBCR8);
+		break;
+	case 9:
+		state[i++] = dbg_read(DBGBVR9);
+		state[i++] = dbg_read(DBGBCR9);
+		break;
+	case 10:
+		state[i++] = dbg_read(DBGBVR10);
+		state[i++] = dbg_read(DBGBCR10);
+		break;
+	case 11:
+		state[i++] = dbg_read(DBGBVR11);
+		state[i++] = dbg_read(DBGBCR11);
+		break;
+	case 12:
+		state[i++] = dbg_read(DBGBVR12);
+		state[i++] = dbg_read(DBGBCR12);
+		break;
+	case 13:
+		state[i++] = dbg_read(DBGBVR13);
+		state[i++] = dbg_read(DBGBCR13);
+		break;
+	case 14:
+		state[i++] = dbg_read(DBGBVR14);
+		state[i++] = dbg_read(DBGBCR14);
+		break;
+	case 15:
+		state[i++] = dbg_read(DBGBVR15);
+		state[i++] = dbg_read(DBGBCR15);
+		break;
+	default:
+		pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+	}
+	return i;
+}
+
+static int dbg_write_arch32_bxr(uint32_t *state, int i, int j)
+{
+	switch (j) {
+	case 0:
+		dbg_write(state[i++], DBGBVR0);
+		dbg_write(state[i++], DBGBCR0);
+		break;
+	case 1:
+		dbg_write(state[i++], DBGBVR1);
+		dbg_write(state[i++], DBGBCR1);
+		break;
+	case 2:
+		dbg_write(state[i++], DBGBVR2);
+		dbg_write(state[i++], DBGBCR2);
+		break;
+	case 3:
+		dbg_write(state[i++], DBGBVR3);
+		dbg_write(state[i++], DBGBCR3);
+		break;
+	case 4:
+		dbg_write(state[i++], DBGBVR4);
+		dbg_write(state[i++], DBGBCR4);
+		break;
+	case 5:
+		dbg_write(state[i++], DBGBVR5);
+		dbg_write(state[i++], DBGBCR5);
+		break;
+	case 6:
+		dbg_write(state[i++], DBGBVR6);
+		dbg_write(state[i++], DBGBCR6);
+		break;
+	case 7:
+		dbg_write(state[i++], DBGBVR7);
+		dbg_write(state[i++], DBGBCR7);
+		break;
+	case 8:
+		dbg_write(state[i++], DBGBVR8);
+		dbg_write(state[i++], DBGBCR8);
+		break;
+	case 9:
+		dbg_write(state[i++], DBGBVR9);
+		dbg_write(state[i++], DBGBCR9);
+		break;
+	case 10:
+		dbg_write(state[i++], DBGBVR10);
+		dbg_write(state[i++], DBGBCR10);
+		break;
+	case 11:
+		dbg_write(state[i++], DBGBVR11);
+		dbg_write(state[i++], DBGBCR11);
+		break;
+	case 12:
+		dbg_write(state[i++], DBGBVR12);
+		dbg_write(state[i++], DBGBCR12);
+		break;
+	case 13:
+		dbg_write(state[i++], DBGBVR13);
+		dbg_write(state[i++], DBGBCR13);
+		break;
+	case 14:
+		dbg_write(state[i++], DBGBVR14);
+		dbg_write(state[i++], DBGBCR14);
+		break;
+	case 15:
+		dbg_write(state[i++], DBGBVR15);
+		dbg_write(state[i++], DBGBCR15);
+		break;
+	default:
+		pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+	}
+	return i;
+}
+
+static int dbg_read_arch32_wxr(uint32_t *state, int i, int j)
+{
+	switch (j) {
+	case 0:
+		state[i++] = dbg_read(DBGWVR0);
+		state[i++] = dbg_read(DBGWCR0);
+		break;
+	case 1:
+		state[i++] = dbg_read(DBGWVR1);
+		state[i++] = dbg_read(DBGWCR1);
+		break;
+	case 2:
+		state[i++] = dbg_read(DBGWVR2);
+		state[i++] = dbg_read(DBGWCR2);
+		break;
+	case 3:
+		state[i++] = dbg_read(DBGWVR3);
+		state[i++] = dbg_read(DBGWCR3);
+		break;
+	case 4:
+		state[i++] = dbg_read(DBGWVR4);
+		state[i++] = dbg_read(DBGWCR4);
+		break;
+	case 5:
+		state[i++] = dbg_read(DBGWVR5);
+		state[i++] = dbg_read(DBGWCR5);
+		break;
+	case 6:
+		state[i++] = dbg_read(DBGWVR6);
+		state[i++] = dbg_read(DBGWCR6);
+		break;
+	case 7:
+		state[i++] = dbg_read(DBGWVR7);
+		state[i++] = dbg_read(DBGWCR7);
+		break;
+	case 8:
+		state[i++] = dbg_read(DBGWVR8);
+		state[i++] = dbg_read(DBGWCR8);
+		break;
+	case 9:
+		state[i++] = dbg_read(DBGWVR9);
+		state[i++] = dbg_read(DBGWCR9);
+		break;
+	case 10:
+		state[i++] = dbg_read(DBGWVR10);
+		state[i++] = dbg_read(DBGWCR10);
+		break;
+	case 11:
+		state[i++] = dbg_read(DBGWVR11);
+		state[i++] = dbg_read(DBGWCR11);
+		break;
+	case 12:
+		state[i++] = dbg_read(DBGWVR12);
+		state[i++] = dbg_read(DBGWCR12);
+		break;
+	case 13:
+		state[i++] = dbg_read(DBGWVR13);
+		state[i++] = dbg_read(DBGWCR13);
+		break;
+	case 14:
+		state[i++] = dbg_read(DBGWVR14);
+		state[i++] = dbg_read(DBGWCR14);
+		break;
+	case 15:
+		state[i++] = dbg_read(DBGWVR15);
+		state[i++] = dbg_read(DBGWCR15);
+		break;
+	default:
+		pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+	}
+	return i;
+}
+
+static int dbg_write_arch32_wxr(uint32_t *state, int i, int j)
+{
+	switch (j) {
+	case 0:
+		dbg_write(state[i++], DBGWVR0);
+		dbg_write(state[i++], DBGWCR0);
+		break;
+	case 1:
+		dbg_write(state[i++], DBGWVR1);
+		dbg_write(state[i++], DBGWCR1);
+		break;
+	case 2:
+		dbg_write(state[i++], DBGWVR2);
+		dbg_write(state[i++], DBGWCR2);
+		break;
+	case 3:
+		dbg_write(state[i++], DBGWVR3);
+		dbg_write(state[i++], DBGWCR3);
+		break;
+	case 4:
+		dbg_write(state[i++], DBGWVR4);
+		dbg_write(state[i++], DBGWCR4);
+		break;
+	case 5:
+		dbg_write(state[i++], DBGWVR5);
+		dbg_write(state[i++], DBGWCR5);
+		break;
+	case 6:
+		dbg_write(state[i++], DBGWVR6);
+		dbg_write(state[i++], DBGWCR6);
+		break;
+	case 7:
+		dbg_write(state[i++], DBGWVR7);
+		dbg_write(state[i++], DBGWCR7);
+		break;
+	case 8:
+		dbg_write(state[i++], DBGWVR8);
+		dbg_write(state[i++], DBGWCR8);
+		break;
+	case 9:
+		dbg_write(state[i++], DBGWVR9);
+		dbg_write(state[i++], DBGWCR9);
+		break;
+	case 10:
+		dbg_write(state[i++], DBGWVR10);
+		dbg_write(state[i++], DBGWCR10);
+		break;
+	case 11:
+		dbg_write(state[i++], DBGWVR11);
+		dbg_write(state[i++], DBGWCR11);
+		break;
+	case 12:
+		dbg_write(state[i++], DBGWVR12);
+		dbg_write(state[i++], DBGWCR12);
+		break;
+	case 13:
+		dbg_write(state[i++], DBGWVR13);
+		dbg_write(state[i++], DBGWCR13);
+		break;
+	case 14:
+		dbg_write(state[i++], DBGWVR14);
+		dbg_write(state[i++], DBGWCR14);
+		break;
+	case 15:
+		dbg_write(state[i++], DBGWVR15);
+		dbg_write(state[i++], DBGWCR15);
+		break;
+	default:
+		pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+	}
+	return i;
+}
+
+static inline void dbg_save_state(int cpu)
+{
+	int i, j;
+
+	i = cpu * MAX_DBG_REGS;
+
+	switch (dbg.arch) {
+	case ARM_DEBUG_ARCH_V8:
+		/* Set OS Lock to inform the debugger that the OS is in the
+		 * process of saving debug registers. It prevents accidental
+		 * modification of the debug regs by the external debugger.
+		 */
+		dbg_write(OSLOCK_MAGIC, DBGOSLAR);
+		/* Ensure OS lock is set before proceeding */
+		isb();
+
+		dbg.state[i++] =  dbg_read(DBGDSCRext);
+		for (j = 0; j < dbg.nr_bp; j++)
+			i = dbg_read_arch32_bxr(dbg.state, i, j);
+		for (j = 0; j < dbg.nr_wp; j++)
+			i = dbg_read_arch32_wxr(dbg.state, i, j);
+		dbg.state[i++] =  dbg_read(DBGDCCINT);
+		dbg.state[i++] =  dbg_read(DBGCLAIMCLR);
+		dbg.state[i++] =  dbg_read(DBGOSECCR);
+		dbg.state[i++] =  dbg_read(DBGDTRRXext);
+		dbg.state[i++] =  dbg_read(DBGDTRTXext);
+
+		/* Set the OS double lock */
+		isb();
+		dbg_write(0x1, DBGOSDLR);
+		isb();
+		break;
+	default:
+		pr_err_ratelimited("unsupported dbg arch %d in %s\n", dbg.arch,
+				   __func__);
+	}
+}
+
+static inline void dbg_restore_state(int cpu)
+{
+	int i, j;
+
+	i = cpu * MAX_DBG_REGS;
+
+	switch (dbg.arch) {
+	case ARM_DEBUG_ARCH_V8:
+		/* Clear the OS double lock */
+		isb();
+		dbg_write(0x0, DBGOSDLR);
+		isb();
+
+		/* Set OS lock. Lock will already be set after power collapse
+		 * but this write is included to ensure it is set.
+		 */
+		dbg_write(OSLOCK_MAGIC, DBGOSLAR);
+		isb();
+
+		dbg_write(dbg.state[i++], DBGDSCRext);
+		for (j = 0; j < dbg.nr_bp; j++)
+			i = dbg_write_arch32_bxr((uint32_t *)dbg.state, i, j);
+		for (j = 0; j < dbg.nr_wp; j++)
+			i = dbg_write_arch32_wxr((uint32_t *)dbg.state, i, j);
+		dbg_write(dbg.state[i++], DBGDCCINT);
+		dbg_write(dbg.state[i++], DBGCLAIMSET);
+		dbg_write(dbg.state[i++], DBGOSECCR);
+		dbg_write(dbg.state[i++], DBGDTRRXext);
+		dbg_write(dbg.state[i++], DBGDTRTXext);
+
+		isb();
+		dbg_write(0x0, DBGOSLAR);
+		isb();
+		break;
+	default:
+		pr_err_ratelimited("unsupported dbg arch %d in %s\n", dbg.arch,
+				   __func__);
+	}
+}
+
+static void dbg_init_arch_data(void)
+{
+	uint32_t dbgdidr;
+
+	/* This will run on core0 so use it to populate parameters */
+	dbgdidr = dbg_read(DBGDIDR);
+	dbg.arch = BMVAL(dbgdidr, 16, 19);
+	dbg.nr_ctx_cmp = BMVAL(dbgdidr, 20, 23) + 1;
+	dbg.nr_bp = BMVAL(dbgdidr, 24, 27) + 1;
+	dbg.nr_wp = BMVAL(dbgdidr, 28, 31) + 1;
+}
+#endif
+
+/*
+ * msm_jtag_save_state - save debug registers
+ *
+ * Debug registers are saved before power collapse if debug
+ * architecture is supported respectively and TZ isn't supporting
+ * the save and restore of debug registers.
+ *
+ * CONTEXT:
+ * Called with preemption off and interrupts locked from:
+ * 1. per_cpu idle thread context for idle power collapses
+ * or
+ * 2. per_cpu idle thread context for hotplug/suspend power collapse
+ *    for nonboot cpus
+ * or
+ * 3. suspend thread context for suspend power collapse for core0
+ *
+ * In all cases we will run on the same cpu for the entire duration.
+ */
+void msm_jtag_save_state(void)
+{
+	int cpu;
+
+	cpu = raw_smp_processor_id();
+
+	msm_jtag_save_cntr[cpu]++;
+	/* ensure counter is updated before moving forward */
+	mb();
+
+	msm_jtag_etm_save_state();
+	if (dbg.save_restore_enabled)
+		dbg_save_state(cpu);
+}
+EXPORT_SYMBOL(msm_jtag_save_state);
+
+void msm_jtag_restore_state(void)
+{
+	int cpu;
+
+	cpu = raw_smp_processor_id();
+
+	/* Attempt restore only if save has been done. If power collapse
+	 * is disabled, hotplug off of non-boot core will result in WFI
+	 * and hence msm_jtag_save_state will not occur. Subsequently,
+	 * during hotplug on of non-boot core when msm_jtag_restore_state
+	 * is called via msm_platform_secondary_init, this check will help
+	 * bail us out without restoring.
+	 */
+	if (msm_jtag_save_cntr[cpu] == msm_jtag_restore_cntr[cpu])
+		return;
+	else if (msm_jtag_save_cntr[cpu] != msm_jtag_restore_cntr[cpu] + 1)
+		pr_err_ratelimited("jtag imbalance, save:%lu, restore:%lu\n",
+				   (unsigned long)msm_jtag_save_cntr[cpu],
+				   (unsigned long)msm_jtag_restore_cntr[cpu]);
+
+	msm_jtag_restore_cntr[cpu]++;
+	/* ensure counter is updated before moving forward */
+	mb();
+
+	if (dbg.save_restore_enabled)
+		dbg_restore_state(cpu);
+	msm_jtag_etm_restore_state();
+}
+EXPORT_SYMBOL(msm_jtag_restore_state);
+
+static inline bool dbg_arch_supported(uint8_t arch)
+{
+	switch (arch) {
+	case ARM_DEBUG_ARCH_V8:
+		break;
+	default:
+		return false;
+	}
+	return true;
+}
+
+static int jtag_hotcpu_save_callback(struct notifier_block *nfb,
+				unsigned long action, void *hcpu)
+{
+	switch (action & (~CPU_TASKS_FROZEN)) {
+	case CPU_DYING:
+		msm_jtag_save_state();
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block jtag_hotcpu_save_notifier = {
+	.notifier_call = jtag_hotcpu_save_callback,
+};
+
+static int jtag_hotcpu_restore_callback(struct notifier_block *nfb,
+					unsigned long action, void *hcpu)
+{
+	switch (action & (~CPU_TASKS_FROZEN)) {
+	case CPU_STARTING:
+		msm_jtag_restore_state();
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block jtag_hotcpu_restore_notifier = {
+	.notifier_call = jtag_hotcpu_restore_callback,
+	.priority = 1,
+};
+
+static int jtag_cpu_pm_callback(struct notifier_block *nfb,
+				unsigned long action, void *hcpu)
+{
+	switch (action) {
+	case CPU_PM_ENTER:
+		msm_jtag_save_state();
+		break;
+	case CPU_PM_ENTER_FAILED:
+	case CPU_PM_EXIT:
+		msm_jtag_restore_state();
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block jtag_cpu_pm_notifier = {
+	.notifier_call = jtag_cpu_pm_callback,
+};
+
+static int __init msm_jtag_dbg_init(void)
+{
+	int ret;
+	u64 version = 0;
+	if (msm_jtag_fuse_apps_access_disabled())
+		return -EPERM;
+
+	/* This will run on core0 so use it to populate parameters */
+	dbg_init_arch_data();
+
+	if (dbg_arch_supported(dbg.arch)) {
+		if (!scm_get_feat_version(TZ_DBG_ETM_FEAT_ID, &version)  &&
+			version < TZ_DBG_ETM_VER) {
+			dbg.save_restore_enabled = true;
+		} else {
+			pr_info("dbg save-restore supported by TZ\n");
+			goto dbg_out;
+		}
+	} else {
+		pr_info("dbg arch %u not supported\n", dbg.arch);
+		goto dbg_out;
+	}
+
+	/* Allocate dbg state save space */
+#ifdef CONFIG_ARM64
+	dbg.state = kcalloc(MAX_DBG_STATE_SIZE, sizeof(uint64_t), GFP_KERNEL);
+#else
+	dbg.state = kcalloc(MAX_DBG_STATE_SIZE, sizeof(uint32_t), GFP_KERNEL);
+#endif
+	if (!dbg.state) {
+		ret = -ENOMEM;
+		goto dbg_err;
+	}
+
+	register_hotcpu_notifier(&jtag_hotcpu_save_notifier);
+	register_hotcpu_notifier(&jtag_hotcpu_restore_notifier);
+	cpu_pm_register_notifier(&jtag_cpu_pm_notifier);
+dbg_out:
+	return 0;
+dbg_err:
+	return ret;
+}
+arch_initcall(msm_jtag_dbg_init);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/jtagv8-etm.c	2019-01-22 16:16:26.659274986 +0100
@@ -0,0 +1,1725 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/export.h>
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
+#include <linux/coresight.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <linux/of.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/jtag.h>
+#include <asm/smp_plat.h>
+#include <asm/etmv4x.h>
+#include <soc/qcom/socinfo.h>
+
+#define CORESIGHT_LAR		(0xFB0)
+
+#define TIMEOUT_US		(100)
+
+#define BM(lsb, msb)		((BIT(msb) - BIT(lsb)) + BIT(msb))
+#define BMVAL(val, lsb, msb)	((val & BM(lsb, msb)) >> lsb)
+#define BVAL(val, n)		((val & BIT(n)) >> n)
+
+/*
+ * ETMv4 registers:
+ * 0x000 - 0x2FC: Trace         registers
+ * 0x300 - 0x314: Management    registers
+ * 0x318 - 0xEFC: Trace         registers
+ * 0xF00: Management		registers
+ * 0xFA0 - 0xFA4: Trace		registers
+ * 0xFA8 - 0xFFC: Management	registers
+ */
+
+/* Trace registers (0x000-0x2FC) */
+/* Main control and configuration registers  */
+#define TRCPRGCTLR			(0x004)
+#define TRCPROCSELR			(0x008)
+#define TRCSTATR			(0x00C)
+#define TRCCONFIGR			(0x010)
+#define TRCAUXCTLR			(0x018)
+#define TRCEVENTCTL0R			(0x020)
+#define TRCEVENTCTL1R			(0x024)
+#define TRCSTALLCTLR			(0x02C)
+#define TRCTSCTLR			(0x030)
+#define TRCSYNCPR			(0x034)
+#define TRCCCCTLR			(0x038)
+#define TRCBBCTLR			(0x03C)
+#define TRCTRACEIDR			(0x040)
+#define TRCQCTLR			(0x044)
+/* Filtering control registers */
+#define TRCVICTLR			(0x080)
+#define TRCVIIECTLR			(0x084)
+#define TRCVISSCTLR			(0x088)
+#define TRCVIPCSSCTLR			(0x08C)
+#define TRCVDCTLR			(0x0A0)
+#define TRCVDSACCTLR			(0x0A4)
+#define TRCVDARCCTLR			(0x0A8)
+/* Derived resources registers */
+#define TRCSEQEVRn(n)			(0x100 + (n * 4))
+#define TRCSEQRSTEVR			(0x118)
+#define TRCSEQSTR			(0x11C)
+#define TRCEXTINSELR			(0x120)
+#define TRCCNTRLDVRn(n)			(0x140 + (n * 4))
+#define TRCCNTCTLRn(n)			(0x150 + (n * 4))
+#define TRCCNTVRn(n)			(0x160 + (n * 4))
+/* ID registers */
+#define TRCIDR8				(0x180)
+#define TRCIDR9				(0x184)
+#define TRCIDR10			(0x188)
+#define TRCIDR11			(0x18C)
+#define TRCIDR12			(0x190)
+#define TRCIDR13			(0x194)
+#define TRCIMSPEC0			(0x1C0)
+#define TRCIMSPECn(n)			(0x1C0 + (n * 4))
+#define TRCIDR0				(0x1E0)
+#define TRCIDR1				(0x1E4)
+#define TRCIDR2				(0x1E8)
+#define TRCIDR3				(0x1EC)
+#define TRCIDR4				(0x1F0)
+#define TRCIDR5				(0x1F4)
+#define TRCIDR6				(0x1F8)
+#define TRCIDR7				(0x1FC)
+/* Resource selection registers */
+#define TRCRSCTLRn(n)			(0x200 + (n * 4))
+/* Single-shot comparator registers */
+#define TRCSSCCRn(n)			(0x280 + (n * 4))
+#define TRCSSCSRn(n)			(0x2A0 + (n * 4))
+#define TRCSSPCICRn(n)			(0x2C0 + (n * 4))
+/* Management registers (0x300-0x314) */
+#define TRCOSLAR			(0x300)
+#define TRCOSLSR			(0x304)
+#define TRCPDCR				(0x310)
+#define TRCPDSR				(0x314)
+/* Trace registers (0x318-0xEFC) */
+/* Comparator registers */
+#define TRCACVRn(n)			(0x400 + (n * 8))
+#define TRCACATRn(n)			(0x480 + (n * 8))
+#define TRCDVCVRn(n)			(0x500 + (n * 16))
+#define TRCDVCMRn(n)			(0x580 + (n * 16))
+#define TRCCIDCVRn(n)			(0x600 + (n * 8))
+#define TRCVMIDCVRn(n)			(0x640 + (n * 8))
+#define TRCCIDCCTLR0			(0x680)
+#define TRCCIDCCTLR1			(0x684)
+#define TRCVMIDCCTLR0			(0x688)
+#define TRCVMIDCCTLR1			(0x68C)
+/* Management register (0xF00) */
+/* Integration control registers */
+#define TRCITCTRL			(0xF00)
+/* Trace registers (0xFA0-0xFA4) */
+/* Claim tag registers */
+#define TRCCLAIMSET			(0xFA0)
+#define TRCCLAIMCLR			(0xFA4)
+/* Management registers (0xFA8-0xFFC) */
+#define TRCDEVAFF0			(0xFA8)
+#define TRCDEVAFF1			(0xFAC)
+#define TRCLAR				(0xFB0)
+#define TRCLSR				(0xFB4)
+#define TRCAUTHSTATUS			(0xFB8)
+#define TRCDEVARCH			(0xFBC)
+#define TRCDEVID			(0xFC8)
+#define TRCDEVTYPE			(0xFCC)
+#define TRCPIDR4			(0xFD0)
+#define TRCPIDR5			(0xFD4)
+#define TRCPIDR6			(0xFD8)
+#define TRCPIDR7			(0xFDC)
+#define TRCPIDR0			(0xFE0)
+#define TRCPIDR1			(0xFE4)
+#define TRCPIDR2			(0xFE8)
+#define TRCPIDR3			(0xFEC)
+#define TRCCIDR0			(0xFF0)
+#define TRCCIDR1			(0xFF4)
+#define TRCCIDR2			(0xFF8)
+#define TRCCIDR3			(0xFFC)
+
+/* ETMv4 resources */
+#define ETM_MAX_NR_PE			(8)
+#define ETM_MAX_CNTR			(4)
+#define ETM_MAX_SEQ_STATES		(4)
+#define ETM_MAX_EXT_INP_SEL		(4)
+#define ETM_MAX_EXT_INP			(256)
+#define ETM_MAX_EXT_OUT			(4)
+#define ETM_MAX_SINGLE_ADDR_CMP		(16)
+#define ETM_MAX_ADDR_RANGE_CMP		(ETM_MAX_SINGLE_ADDR_CMP / 2)
+#define ETM_MAX_DATA_VAL_CMP		(8)
+#define ETM_MAX_CTXID_CMP		(8)
+#define ETM_MAX_VMID_CMP		(8)
+#define ETM_MAX_PE_CMP			(8)
+#define ETM_MAX_RES_SEL			(32)
+#define ETM_MAX_SS_CMP			(8)
+
+#define ETM_CPMR_CLKEN			(0x4)
+#define ETM_ARCH_V4			(0x40)
+
+#define MAX_ETM_STATE_SIZE	(165)
+
+#define TZ_DBG_ETM_FEAT_ID	(0x8)
+#define TZ_DBG_ETM_VER		(0x400000)
+#define HW_SOC_ID_M8953		(293)
+
+#define etm_writel(etm, val, off)	\
+		   __raw_writel(val, etm->base + off)
+#define etm_readl(etm, off)		\
+		  __raw_readl(etm->base + off)
+
+#define etm_writeq(etm, val, off)	\
+		   __raw_writeq(val, etm->base + off)
+#define etm_readq(etm, off)		\
+		  __raw_readq(etm->base + off)
+
+#define ETM_LOCK(base)							\
+do {									\
+	mb(); /* ensure configuration take effect before we lock it */	\
+	etm_writel(base, 0x0, CORESIGHT_LAR);				\
+} while (0)
+
+#define ETM_UNLOCK(base)						\
+do {									\
+	etm_writel(base, CORESIGHT_UNLOCK, CORESIGHT_LAR);		\
+	mb(); /* ensure unlock take effect before we configure */	\
+} while (0)
+
+struct etm_ctx {
+	uint8_t			arch;
+	uint8_t			nr_pe;
+	uint8_t			nr_pe_cmp;
+	uint8_t			nr_addr_cmp;
+	uint8_t			nr_data_cmp;
+	uint8_t			nr_cntr;
+	uint8_t			nr_ext_inp;
+	uint8_t			nr_ext_inp_sel;
+	uint8_t			nr_ext_out;
+	uint8_t			nr_ctxid_cmp;
+	uint8_t			nr_vmid_cmp;
+	uint8_t			nr_seq_state;
+	uint8_t			nr_event;
+	uint8_t			nr_resource;
+	uint8_t			nr_ss_cmp;
+	bool			si_enable;
+	bool			save_restore_disabled;
+	bool			save_restore_enabled;
+	bool			os_lock_present;
+	bool			init;
+	bool			enable;
+	void __iomem		*base;
+	struct device		*dev;
+	uint64_t		*state;
+	spinlock_t		spinlock;
+	struct mutex		mutex;
+};
+
+static struct etm_ctx *etm[NR_CPUS];
+static int cnt;
+
+static struct clk *clock[NR_CPUS];
+
+ATOMIC_NOTIFIER_HEAD(etm_save_notifier_list);
+ATOMIC_NOTIFIER_HEAD(etm_restore_notifier_list);
+
+int msm_jtag_save_register(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(&etm_save_notifier_list, nb);
+}
+EXPORT_SYMBOL(msm_jtag_save_register);
+
+int msm_jtag_save_unregister(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_unregister(&etm_save_notifier_list, nb);
+}
+EXPORT_SYMBOL(msm_jtag_save_unregister);
+
+int msm_jtag_restore_register(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(&etm_restore_notifier_list, nb);
+}
+EXPORT_SYMBOL(msm_jtag_restore_register);
+
+int msm_jtag_restore_unregister(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_unregister(&etm_restore_notifier_list, nb);
+}
+EXPORT_SYMBOL(msm_jtag_restore_unregister);
+
+static void etm_os_lock(struct etm_ctx *etmdata)
+{
+	if (etmdata->os_lock_present) {
+		etm_writel(etmdata, 0x1, TRCOSLAR);
+		/* Ensure OS lock is set before proceeding */
+		mb();
+	}
+}
+
+static void etm_os_unlock(struct etm_ctx *etmdata)
+{
+	if (etmdata->os_lock_present) {
+		/* Ensure all writes are complete before clearing OS lock */
+		mb();
+		etm_writel(etmdata, 0x0, TRCOSLAR);
+	}
+}
+
+static inline void etm_mm_save_state(struct etm_ctx *etmdata)
+{
+	int i, j, count;
+
+	i = 0;
+	mb(); /* ensure all register writes complete before saving them */
+	isb();
+	ETM_UNLOCK(etmdata);
+
+	switch (etmdata->arch) {
+	case ETM_ARCH_V4:
+		etm_os_lock(etmdata);
+
+		/* poll until programmers' model becomes stable */
+		for (count = TIMEOUT_US; (BVAL(etm_readl(etmdata, TRCSTATR), 1)
+		     != 1) && count > 0; count--)
+			udelay(1);
+		if (count == 0)
+			pr_err_ratelimited("programmers model is not stable\n"
+					   );
+
+		/* main control and configuration registers */
+		etmdata->state[i++] = etm_readl(etmdata, TRCPROCSELR);
+		etmdata->state[i++] = etm_readl(etmdata, TRCCONFIGR);
+		etmdata->state[i++] = etm_readl(etmdata, TRCAUXCTLR);
+		etmdata->state[i++] = etm_readl(etmdata, TRCEVENTCTL0R);
+		etmdata->state[i++] = etm_readl(etmdata, TRCEVENTCTL1R);
+		etmdata->state[i++] = etm_readl(etmdata, TRCSTALLCTLR);
+		etmdata->state[i++] = etm_readl(etmdata, TRCTSCTLR);
+		etmdata->state[i++] = etm_readl(etmdata, TRCSYNCPR);
+		etmdata->state[i++] = etm_readl(etmdata, TRCCCCTLR);
+		etmdata->state[i++] = etm_readl(etmdata, TRCBBCTLR);
+		etmdata->state[i++] = etm_readl(etmdata, TRCTRACEIDR);
+		etmdata->state[i++] = etm_readl(etmdata, TRCQCTLR);
+		/* filtering control registers */
+		etmdata->state[i++] = etm_readl(etmdata, TRCVICTLR);
+		etmdata->state[i++] = etm_readl(etmdata, TRCVIIECTLR);
+		etmdata->state[i++] = etm_readl(etmdata, TRCVISSCTLR);
+		etmdata->state[i++] = etm_readl(etmdata, TRCVIPCSSCTLR);
+		etmdata->state[i++] = etm_readl(etmdata, TRCVDCTLR);
+		etmdata->state[i++] = etm_readl(etmdata, TRCVDSACCTLR);
+		etmdata->state[i++] = etm_readl(etmdata, TRCVDARCCTLR);
+		/* derived resource registers */
+		for (j = 0; j < etmdata->nr_seq_state-1; j++)
+			etmdata->state[i++] = etm_readl(etmdata, TRCSEQEVRn(j));
+		etmdata->state[i++] = etm_readl(etmdata, TRCSEQRSTEVR);
+		etmdata->state[i++] = etm_readl(etmdata, TRCSEQSTR);
+		etmdata->state[i++] = etm_readl(etmdata, TRCEXTINSELR);
+		for (j = 0; j < etmdata->nr_cntr; j++)  {
+			etmdata->state[i++] = etm_readl(etmdata,
+						       TRCCNTRLDVRn(j));
+			etmdata->state[i++] = etm_readl(etmdata,
+						       TRCCNTCTLRn(j));
+			etmdata->state[i++] = etm_readl(etmdata,
+						       TRCCNTVRn(j));
+		}
+		/* resource selection registers */
+		for (j = 0; j < etmdata->nr_resource; j++)
+			etmdata->state[i++] = etm_readl(etmdata, TRCRSCTLRn(j));
+		/* comparator registers */
+		for (j = 0; j < etmdata->nr_addr_cmp * 2; j++) {
+			etmdata->state[i++] = etm_readq(etmdata, TRCACVRn(j));
+			etmdata->state[i++] = etm_readq(etmdata, TRCACATRn(j));
+		}
+		for (j = 0; j < etmdata->nr_data_cmp; j++) {
+			etmdata->state[i++] = etm_readq(etmdata, TRCDVCVRn(j));
+			etmdata->state[i++] = etm_readq(etmdata, TRCDVCMRn(i));
+		}
+		for (j = 0; j < etmdata->nr_ctxid_cmp; j++)
+			etmdata->state[i++] = etm_readq(etmdata, TRCCIDCVRn(j));
+		etmdata->state[i++] = etm_readl(etmdata, TRCCIDCCTLR0);
+		etmdata->state[i++] = etm_readl(etmdata, TRCCIDCCTLR1);
+		for (j = 0; j < etmdata->nr_vmid_cmp; j++)
+			etmdata->state[i++] = etm_readq(etmdata,
+							TRCVMIDCVRn(j));
+		etmdata->state[i++] = etm_readl(etmdata, TRCVMIDCCTLR0);
+		etmdata->state[i++] = etm_readl(etmdata, TRCVMIDCCTLR1);
+		/* single-shot comparator registers */
+		for (j = 0; j < etmdata->nr_ss_cmp; j++) {
+			etmdata->state[i++] = etm_readl(etmdata, TRCSSCCRn(j));
+			etmdata->state[i++] = etm_readl(etmdata, TRCSSCSRn(j));
+			etmdata->state[i++] = etm_readl(etmdata,
+							TRCSSPCICRn(j));
+		}
+		/* claim tag registers */
+		etmdata->state[i++] = etm_readl(etmdata, TRCCLAIMCLR);
+		/* program ctrl register */
+		etmdata->state[i++] = etm_readl(etmdata, TRCPRGCTLR);
+
+		/* ensure trace unit is idle to be powered down */
+		for (count = TIMEOUT_US; (BVAL(etm_readl(etmdata, TRCSTATR), 0)
+		     != 1) && count > 0; count--)
+			udelay(1);
+		if (count == 0)
+			pr_err_ratelimited("timeout waiting for idle state\n");
+
+		atomic_notifier_call_chain(&etm_save_notifier_list, 0, NULL);
+
+		break;
+	default:
+		pr_err_ratelimited("unsupported etm arch %d in %s\n",
+				   etmdata->arch, __func__);
+	}
+
+	ETM_LOCK(etmdata);
+}
+
+static inline void etm_mm_restore_state(struct etm_ctx *etmdata)
+{
+	int i, j;
+
+	i = 0;
+	ETM_UNLOCK(etmdata);
+
+	switch (etmdata->arch) {
+	case ETM_ARCH_V4:
+		atomic_notifier_call_chain(&etm_restore_notifier_list, 0, NULL);
+
+		/* check OS lock is locked */
+		if (BVAL(etm_readl(etmdata, TRCOSLSR), 1) != 1) {
+			pr_err_ratelimited("OS lock is unlocked\n");
+			etm_os_lock(etmdata);
+		}
+
+		/* main control and configuration registers */
+		etm_writel(etmdata, etmdata->state[i++], TRCPROCSELR);
+		etm_writel(etmdata, etmdata->state[i++], TRCCONFIGR);
+		etm_writel(etmdata, etmdata->state[i++], TRCAUXCTLR);
+		etm_writel(etmdata, etmdata->state[i++], TRCEVENTCTL0R);
+		etm_writel(etmdata, etmdata->state[i++], TRCEVENTCTL1R);
+		etm_writel(etmdata, etmdata->state[i++], TRCSTALLCTLR);
+		etm_writel(etmdata, etmdata->state[i++], TRCTSCTLR);
+		etm_writel(etmdata, etmdata->state[i++], TRCSYNCPR);
+		etm_writel(etmdata, etmdata->state[i++], TRCCCCTLR);
+		etm_writel(etmdata, etmdata->state[i++], TRCBBCTLR);
+		etm_writel(etmdata, etmdata->state[i++], TRCTRACEIDR);
+		etm_writel(etmdata, etmdata->state[i++], TRCQCTLR);
+		/* filtering control registers */
+		etm_writel(etmdata, etmdata->state[i++], TRCVICTLR);
+		etm_writel(etmdata, etmdata->state[i++], TRCVIIECTLR);
+		etm_writel(etmdata, etmdata->state[i++], TRCVISSCTLR);
+		etm_writel(etmdata, etmdata->state[i++], TRCVIPCSSCTLR);
+		etm_writel(etmdata, etmdata->state[i++], TRCVDCTLR);
+		etm_writel(etmdata, etmdata->state[i++], TRCVDSACCTLR);
+		etm_writel(etmdata, etmdata->state[i++], TRCVDARCCTLR);
+		/* derived resources registers */
+		for (j = 0; j < etmdata->nr_seq_state-1; j++)
+			etm_writel(etmdata, etmdata->state[i++], TRCSEQEVRn(j));
+		etm_writel(etmdata, etmdata->state[i++], TRCSEQRSTEVR);
+		etm_writel(etmdata, etmdata->state[i++], TRCSEQSTR);
+		etm_writel(etmdata, etmdata->state[i++], TRCEXTINSELR);
+		for (j = 0; j < etmdata->nr_cntr; j++)  {
+			etm_writel(etmdata, etmdata->state[i++],
+				  TRCCNTRLDVRn(j));
+			etm_writel(etmdata, etmdata->state[i++],
+				  TRCCNTCTLRn(j));
+			etm_writel(etmdata, etmdata->state[i++], TRCCNTVRn(j));
+		}
+		/* resource selection registers */
+		for (j = 0; j < etmdata->nr_resource; j++)
+			etm_writel(etmdata, etmdata->state[i++], TRCRSCTLRn(j));
+		/* comparator registers */
+		for (j = 0; j < etmdata->nr_addr_cmp * 2; j++) {
+			etm_writeq(etmdata, etmdata->state[i++], TRCACVRn(j));
+			etm_writeq(etmdata, etmdata->state[i++], TRCACATRn(j));
+		}
+		for (j = 0; j < etmdata->nr_data_cmp; j++) {
+			etm_writeq(etmdata, etmdata->state[i++], TRCDVCVRn(j));
+			etm_writeq(etmdata, etmdata->state[i++], TRCDVCMRn(j));
+		}
+		for (j = 0; j < etmdata->nr_ctxid_cmp; j++)
+			etm_writeq(etmdata, etmdata->state[i++], TRCCIDCVRn(j));
+		etm_writel(etmdata, etmdata->state[i++], TRCCIDCCTLR0);
+		etm_writel(etmdata, etmdata->state[i++], TRCCIDCCTLR1);
+		for (j = 0; j < etmdata->nr_vmid_cmp; j++)
+			etm_writeq(etmdata, etmdata->state[i++],
+				   TRCVMIDCVRn(j));
+		etm_writel(etmdata, etmdata->state[i++], TRCVMIDCCTLR0);
+		etm_writel(etmdata, etmdata->state[i++], TRCVMIDCCTLR1);
+		/* e-shot comparator registers */
+		for (j = 0; j < etmdata->nr_ss_cmp; j++) {
+			etm_writel(etmdata, etmdata->state[i++], TRCSSCCRn(j));
+			etm_writel(etmdata, etmdata->state[i++], TRCSSCSRn(j));
+			etm_writel(etmdata, etmdata->state[i++],
+				   TRCSSPCICRn(j));
+		}
+		/* claim tag registers */
+		etm_writel(etmdata, etmdata->state[i++], TRCCLAIMSET);
+		/* program ctrl register */
+		etm_writel(etmdata, etmdata->state[i++], TRCPRGCTLR);
+
+		etm_os_unlock(etmdata);
+		break;
+	default:
+		pr_err_ratelimited("unsupported etm arch %d in %s\n",
+				   etmdata->arch,  __func__);
+	}
+
+	ETM_LOCK(etmdata);
+}
+
+static inline void etm_clk_disable(void)
+{
+	uint32_t cpmr;
+
+	isb();
+	cpmr = trc_readl(CPMR_EL1);
+	cpmr  &= ~ETM_CPMR_CLKEN;
+	trc_write(cpmr, CPMR_EL1);
+}
+
+static inline void etm_clk_enable(void)
+{
+	uint32_t cpmr;
+
+	cpmr = trc_readl(CPMR_EL1);
+	cpmr  |= ETM_CPMR_CLKEN;
+	trc_write(cpmr, CPMR_EL1);
+	isb();
+}
+
+static int etm_read_ssxr(uint64_t *state, int i, int j)
+{
+	switch (j) {
+	case 0:
+		state[i++] = trc_readl(ETMSEQEVR0);
+		break;
+	case 1:
+		state[i++] = trc_readl(ETMSEQEVR1);
+		break;
+	case 2:
+		state[i++] = trc_readl(ETMSEQEVR2);
+		break;
+	default:
+		pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+	}
+	return i;
+}
+
+static int etm_read_crxr(uint64_t *state, int i, int j)
+{
+	switch (j) {
+	case 0:
+		state[i++] = trc_readl(ETMCNTRLDVR0);
+		state[i++] = trc_readl(ETMCNTCTLR0);
+		state[i++] = trc_readl(ETMCNTVR0);
+		break;
+	case 1:
+		state[i++] = trc_readl(ETMCNTRLDVR1);
+		state[i++] = trc_readl(ETMCNTCTLR1);
+		state[i++] = trc_readl(ETMCNTVR1);
+		break;
+	case 2:
+		state[i++] = trc_readl(ETMCNTRLDVR2);
+		state[i++] = trc_readl(ETMCNTCTLR2);
+		state[i++] = trc_readl(ETMCNTVR2);
+		break;
+	case 3:
+		state[i++] = trc_readl(ETMCNTRLDVR3);
+		state[i++] = trc_readl(ETMCNTCTLR3);
+		state[i++] = trc_readl(ETMCNTVR3);
+		break;
+	default:
+		pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+	}
+	return i;
+}
+
+static int etm_read_rsxr(uint64_t *state, int i, int j)
+{
+	switch (j) {
+	case 2:
+		state[i++] = trc_readl(ETMRSCTLR2);
+		break;
+	case 3:
+		state[i++] = trc_readl(ETMRSCTLR3);
+		break;
+	case 4:
+		state[i++] = trc_readl(ETMRSCTLR4);
+		break;
+	case 5:
+		state[i++] = trc_readl(ETMRSCTLR5);
+		break;
+	case 6:
+		state[i++] = trc_readl(ETMRSCTLR6);
+		break;
+	case 7:
+		state[i++] = trc_readl(ETMRSCTLR7);
+		break;
+	case 8:
+		state[i++] = trc_readl(ETMRSCTLR8);
+		break;
+	case 9:
+		state[i++] = trc_readl(ETMRSCTLR9);
+		break;
+	case 10:
+		state[i++] = trc_readl(ETMRSCTLR10);
+		break;
+	case 11:
+		state[i++] = trc_readl(ETMRSCTLR11);
+		break;
+	case 12:
+		state[i++] = trc_readl(ETMRSCTLR12);
+		break;
+	case 13:
+		state[i++] = trc_readl(ETMRSCTLR13);
+		break;
+	case 14:
+		state[i++] = trc_readl(ETMRSCTLR14);
+		break;
+	case 15:
+		state[i++] = trc_readl(ETMRSCTLR15);
+		break;
+	case 16:
+		state[i++] = trc_readl(ETMRSCTLR16);
+		break;
+	case 17:
+		state[i++] = trc_readl(ETMRSCTLR17);
+		break;
+	case 18:
+		state[i++] = trc_readl(ETMRSCTLR18);
+		break;
+	case 19:
+		state[i++] = trc_readl(ETMRSCTLR19);
+		break;
+	case 20:
+		state[i++] = trc_readl(ETMRSCTLR20);
+		break;
+	case 21:
+		state[i++] = trc_readl(ETMRSCTLR21);
+		break;
+	case 22:
+		state[i++] = trc_readl(ETMRSCTLR22);
+		break;
+	case 23:
+		state[i++] = trc_readl(ETMRSCTLR23);
+		break;
+	case 24:
+		state[i++] = trc_readl(ETMRSCTLR24);
+		break;
+	case 25:
+		state[i++] = trc_readl(ETMRSCTLR25);
+		break;
+	case 26:
+		state[i++] = trc_readl(ETMRSCTLR26);
+		break;
+	case 27:
+		state[i++] = trc_readl(ETMRSCTLR27);
+		break;
+	case 28:
+		state[i++] = trc_readl(ETMRSCTLR28);
+		break;
+	case 29:
+		state[i++] = trc_readl(ETMRSCTLR29);
+		break;
+	case 30:
+		state[i++] = trc_readl(ETMRSCTLR30);
+		break;
+	case 31:
+		state[i++] = trc_readl(ETMRSCTLR31);
+		break;
+	default:
+		pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+	}
+	return i;
+}
+
+static int etm_read_acr(uint64_t *state, int i, int j)
+{
+	switch (j) {
+	case 0:
+		state[i++] = trc_readq(ETMACVR0);
+		state[i++] = trc_readq(ETMACATR0);
+		break;
+	case 1:
+		state[i++] = trc_readq(ETMACVR1);
+		state[i++] = trc_readq(ETMACATR1);
+		break;
+	case 2:
+		state[i++] = trc_readq(ETMACVR2);
+		state[i++] = trc_readq(ETMACATR2);
+		break;
+	case 3:
+		state[i++] = trc_readq(ETMACVR3);
+		state[i++] = trc_readq(ETMACATR3);
+		break;
+	case 4:
+		state[i++] = trc_readq(ETMACVR4);
+		state[i++] = trc_readq(ETMACATR4);
+		break;
+	case 5:
+		state[i++] = trc_readq(ETMACVR5);
+		state[i++] = trc_readq(ETMACATR5);
+		break;
+	case 6:
+		state[i++] = trc_readq(ETMACVR6);
+		state[i++] = trc_readq(ETMACATR6);
+		break;
+	case 7:
+		state[i++] = trc_readq(ETMACVR7);
+		state[i++] = trc_readq(ETMACATR7);
+		break;
+	case 8:
+		state[i++] = trc_readq(ETMACVR8);
+		state[i++] = trc_readq(ETMACATR8);
+		break;
+	case 9:
+		state[i++] = trc_readq(ETMACVR9);
+		state[i++] = trc_readq(ETMACATR9);
+		break;
+	case 10:
+		state[i++] = trc_readq(ETMACVR10);
+		state[i++] = trc_readq(ETMACATR10);
+		break;
+	case 11:
+		state[i++] = trc_readq(ETMACVR11);
+		state[i++] = trc_readq(ETMACATR11);
+		break;
+	case 12:
+		state[i++] = trc_readq(ETMACVR12);
+		state[i++] = trc_readq(ETMACATR12);
+		break;
+	case 13:
+		state[i++] = trc_readq(ETMACVR13);
+		state[i++] = trc_readq(ETMACATR13);
+		break;
+	case 14:
+		state[i++] = trc_readq(ETMACVR14);
+		state[i++] = trc_readq(ETMACATR14);
+		break;
+	case 15:
+		state[i++] = trc_readq(ETMACVR15);
+		state[i++] = trc_readq(ETMACATR15);
+		break;
+	default:
+		pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+	}
+	return i;
+}
+
+static int etm_read_dvcr(uint64_t *state, int i, int j)
+{
+	switch (j) {
+	case 0:
+		state[i++] = trc_readq(ETMDVCVR0);
+		state[i++] = trc_readq(ETMDVCMR0);
+		break;
+	case 1:
+		state[i++] = trc_readq(ETMDVCVR1);
+		state[i++] = trc_readq(ETMDVCMR1);
+		break;
+	case 2:
+		state[i++] = trc_readq(ETMDVCVR2);
+		state[i++] = trc_readq(ETMDVCMR2);
+		break;
+	case 3:
+		state[i++] = trc_readq(ETMDVCVR3);
+		state[i++] = trc_readq(ETMDVCMR3);
+		break;
+	case 4:
+		state[i++] = trc_readq(ETMDVCVR4);
+		state[i++] = trc_readq(ETMDVCMR4);
+		break;
+	case 5:
+		state[i++] = trc_readq(ETMDVCVR5);
+		state[i++] = trc_readq(ETMDVCMR5);
+		break;
+	case 6:
+		state[i++] = trc_readq(ETMDVCVR6);
+		state[i++] = trc_readq(ETMDVCMR6);
+		break;
+	case 7:
+		state[i++] = trc_readq(ETMDVCVR7);
+		state[i++] = trc_readq(ETMDVCMR7);
+		break;
+	default:
+		pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+	}
+	return i;
+}
+
+static int etm_read_ccvr(uint64_t *state, int i, int j)
+{
+	switch (j) {
+	case 0:
+		state[i++] = trc_readq(ETMCIDCVR0);
+		break;
+	case 1:
+		state[i++] = trc_readq(ETMCIDCVR1);
+		break;
+	case 2:
+		state[i++] = trc_readq(ETMCIDCVR2);
+		break;
+	case 3:
+		state[i++] = trc_readq(ETMCIDCVR3);
+		break;
+	case 4:
+		state[i++] = trc_readq(ETMCIDCVR4);
+		break;
+	case 5:
+		state[i++] = trc_readq(ETMCIDCVR5);
+		break;
+	case 6:
+		state[i++] = trc_readq(ETMCIDCVR6);
+		break;
+	case 7:
+		state[i++] = trc_readq(ETMCIDCVR7);
+		break;
+	default:
+		pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+	}
+	return i;
+}
+
+static int etm_read_vcvr(uint64_t *state, int i, int j)
+{
+	switch (j) {
+	case 0:
+		state[i++] = trc_readq(ETMVMIDCVR0);
+		break;
+	case 1:
+		state[i++] = trc_readq(ETMVMIDCVR1);
+		break;
+	case 2:
+		state[i++] = trc_readq(ETMVMIDCVR2);
+		break;
+	case 3:
+		state[i++] = trc_readq(ETMVMIDCVR3);
+		break;
+	case 4:
+		state[i++] = trc_readq(ETMVMIDCVR4);
+		break;
+	case 5:
+		state[i++] = trc_readq(ETMVMIDCVR5);
+		break;
+	case 6:
+		state[i++] = trc_readq(ETMVMIDCVR6);
+		break;
+	case 7:
+		state[i++] = trc_readq(ETMVMIDCVR7);
+		break;
+	default:
+		pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+	}
+	return i;
+}
+
+static int etm_read_sscr(uint64_t *state, int i, int j)
+{
+	switch (j) {
+	case 0:
+		state[i++] = trc_readl(ETMSSCCR0);
+		state[i++] = trc_readl(ETMSSCSR0);
+		state[i++] = trc_readl(ETMSSPCICR0);
+		break;
+	case 1:
+		state[i++] = trc_readl(ETMSSCCR1);
+		state[i++] = trc_readl(ETMSSCSR1);
+		state[i++] = trc_readl(ETMSSPCICR1);
+		break;
+	case 2:
+		state[i++] = trc_readl(ETMSSCCR2);
+		state[i++] = trc_readl(ETMSSCSR2);
+		state[i++] = trc_readl(ETMSSPCICR2);
+		break;
+	case 3:
+		state[i++] = trc_readl(ETMSSCCR3);
+		state[i++] = trc_readl(ETMSSCSR3);
+		state[i++] = trc_readl(ETMSSPCICR3);
+		break;
+	case 4:
+		state[i++] = trc_readl(ETMSSCCR4);
+		state[i++] = trc_readl(ETMSSCSR4);
+		state[i++] = trc_readl(ETMSSPCICR4);
+		break;
+	case 5:
+		state[i++] = trc_readl(ETMSSCCR5);
+		state[i++] = trc_readl(ETMSSCSR5);
+		state[i++] = trc_readl(ETMSSPCICR5);
+		break;
+	case 6:
+		state[i++] = trc_readl(ETMSSCCR6);
+		state[i++] = trc_readl(ETMSSCSR6);
+		state[i++] = trc_readl(ETMSSPCICR6);
+		break;
+	case 7:
+		state[i++] = trc_readl(ETMSSCCR7);
+		state[i++] = trc_readl(ETMSSCSR7);
+		state[i++] = trc_readl(ETMSSPCICR7);
+		break;
+	default:
+		pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+	}
+	return i;
+}
+
+static inline void etm_si_save_state(struct etm_ctx *etmdata)
+{
+	int i, j, count;
+
+	i = 0;
+	/* Ensure all writes are complete before saving ETM registers */
+	mb();
+	isb();
+
+	/* Vote for ETM power/clock enable */
+	etm_clk_enable();
+
+	switch (etmdata->arch) {
+	case ETM_ARCH_V4:
+		trc_write(0x1, ETMOSLAR);
+		isb();
+
+		/* poll until programmers' model becomes stable */
+		for (count = TIMEOUT_US; (BVAL(trc_readl(ETMSTATR), 1)
+		     != 1) && count > 0; count--)
+			udelay(1);
+		if (count == 0)
+			pr_err_ratelimited("programmers model is not stable\n");
+
+		/* main control and configuration registers */
+		etmdata->state[i++] = trc_readl(ETMCONFIGR);
+		etmdata->state[i++] = trc_readl(ETMEVENTCTL0R);
+		etmdata->state[i++] = trc_readl(ETMEVENTCTL1R);
+		etmdata->state[i++] = trc_readl(ETMSTALLCTLR);
+		etmdata->state[i++] = trc_readl(ETMTSCTLR);
+		etmdata->state[i++] = trc_readl(ETMSYNCPR);
+		etmdata->state[i++] = trc_readl(ETMCCCTLR);
+		etmdata->state[i++] = trc_readl(ETMTRACEIDR);
+		/* filtering control registers */
+		etmdata->state[i++] = trc_readl(ETMVICTLR);
+		etmdata->state[i++] = trc_readl(ETMVIIECTLR);
+		etmdata->state[i++] = trc_readl(ETMVISSCTLR);
+		/* derived resource registers */
+		for (j = 0; j < etmdata->nr_seq_state-1; j++)
+			i = etm_read_ssxr(etmdata->state, i, j);
+		etmdata->state[i++] = trc_readl(ETMSEQRSTEVR);
+		etmdata->state[i++] = trc_readl(ETMSEQSTR);
+		etmdata->state[i++] = trc_readl(ETMEXTINSELR);
+		for (j = 0; j < etmdata->nr_cntr; j++)
+			i = etm_read_crxr(etmdata->state, i, j);
+		/* resource selection registers */
+		for (j = 0; j < etmdata->nr_resource; j++)
+			i = etm_read_rsxr(etmdata->state, i, j + 2);
+		/* comparator registers */
+		for (j = 0; j < etmdata->nr_addr_cmp * 2; j++)
+			i = etm_read_acr(etmdata->state, i, j);
+		for (j = 0; j < etmdata->nr_data_cmp; j++)
+			i = etm_read_dvcr(etmdata->state, i, j);
+		for (j = 0; j < etmdata->nr_ctxid_cmp; j++)
+			i = etm_read_ccvr(etmdata->state, i, j);
+		etmdata->state[i++] = trc_readl(ETMCIDCCTLR0);
+		for (j = 0; j < etmdata->nr_vmid_cmp; j++)
+			i = etm_read_vcvr(etmdata->state, i, j);
+		/* single-shot comparator registers */
+		for (j = 0; j < etmdata->nr_ss_cmp; j++)
+			i = etm_read_sscr(etmdata->state, i, j);
+		/* program ctrl register */
+		etmdata->state[i++] = trc_readl(ETMPRGCTLR);
+
+		/* ensure trace unit is idle to be powered down */
+		for (count = TIMEOUT_US; (BVAL(trc_readl(ETMSTATR), 0)
+		     != 1) && count > 0; count--)
+			udelay(1);
+		if (count == 0)
+			pr_err_ratelimited("timeout waiting for idle state\n");
+
+		atomic_notifier_call_chain(&etm_save_notifier_list, 0, NULL);
+
+		break;
+	default:
+		pr_err_ratelimited("unsupported etm arch %d in %s\n",
+				   etmdata->arch, __func__);
+	}
+
+	/* Vote for ETM power/clock disable */
+	etm_clk_disable();
+}
+
+static int etm_write_ssxr(uint64_t *state, int i, int j)
+{
+	switch (j) {
+	case 0:
+		trc_write(state[i++], ETMSEQEVR0);
+		break;
+	case 1:
+		trc_write(state[i++], ETMSEQEVR1);
+		break;
+	case 2:
+		trc_write(state[i++], ETMSEQEVR2);
+		break;
+	default:
+		pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+	}
+	return i;
+}
+
+static int etm_write_crxr(uint64_t *state, int i, int j)
+{
+	switch (j) {
+	case 0:
+		trc_write(state[i++], ETMCNTRLDVR0);
+		trc_write(state[i++], ETMCNTCTLR0);
+		trc_write(state[i++], ETMCNTVR0);
+		break;
+	case 1:
+		trc_write(state[i++], ETMCNTRLDVR1);
+		trc_write(state[i++], ETMCNTCTLR1);
+		trc_write(state[i++], ETMCNTVR1);
+		break;
+	case 2:
+		trc_write(state[i++], ETMCNTRLDVR2);
+		trc_write(state[i++], ETMCNTCTLR2);
+		trc_write(state[i++], ETMCNTVR2);
+		break;
+	case 3:
+		trc_write(state[i++], ETMCNTRLDVR3);
+		trc_write(state[i++], ETMCNTCTLR3);
+		trc_write(state[i++], ETMCNTVR3);
+		break;
+	default:
+		pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+	}
+	return i;
+}
+
+static int etm_write_rsxr(uint64_t *state, int i, int j)
+{
+	switch (j) {
+	case 2:
+		trc_write(state[i++], ETMRSCTLR2);
+		break;
+	case 3:
+		trc_write(state[i++], ETMRSCTLR3);
+		break;
+	case 4:
+		trc_write(state[i++], ETMRSCTLR4);
+		break;
+	case 5:
+		trc_write(state[i++], ETMRSCTLR5);
+		break;
+	case 6:
+		trc_write(state[i++], ETMRSCTLR6);
+		break;
+	case 7:
+		trc_write(state[i++], ETMRSCTLR7);
+		break;
+	case 8:
+		trc_write(state[i++], ETMRSCTLR8);
+		break;
+	case 9:
+		trc_write(state[i++], ETMRSCTLR9);
+		break;
+	case 10:
+		trc_write(state[i++], ETMRSCTLR10);
+		break;
+	case 11:
+		trc_write(state[i++], ETMRSCTLR11);
+		break;
+	case 12:
+		trc_write(state[i++], ETMRSCTLR12);
+		break;
+	case 13:
+		trc_write(state[i++], ETMRSCTLR13);
+		break;
+	case 14:
+		trc_write(state[i++], ETMRSCTLR14);
+		break;
+	case 15:
+		trc_write(state[i++], ETMRSCTLR15);
+		break;
+	case 16:
+		trc_write(state[i++], ETMRSCTLR16);
+		break;
+	case 17:
+		trc_write(state[i++], ETMRSCTLR17);
+		break;
+	case 18:
+		trc_write(state[i++], ETMRSCTLR18);
+		break;
+	case 19:
+		trc_write(state[i++], ETMRSCTLR19);
+		break;
+	case 20:
+		trc_write(state[i++], ETMRSCTLR20);
+		break;
+	case 21:
+		trc_write(state[i++], ETMRSCTLR21);
+		break;
+	case 22:
+		trc_write(state[i++], ETMRSCTLR22);
+		break;
+	case 23:
+		trc_write(state[i++], ETMRSCTLR23);
+		break;
+	case 24:
+		trc_write(state[i++], ETMRSCTLR24);
+		break;
+	case 25:
+		trc_write(state[i++], ETMRSCTLR25);
+		break;
+	case 26:
+		trc_write(state[i++], ETMRSCTLR26);
+		break;
+	case 27:
+		trc_write(state[i++], ETMRSCTLR27);
+		break;
+	case 28:
+		trc_write(state[i++], ETMRSCTLR28);
+		break;
+	case 29:
+		trc_write(state[i++], ETMRSCTLR29);
+		break;
+	case 30:
+		trc_write(state[i++], ETMRSCTLR30);
+		break;
+	case 31:
+		trc_write(state[i++], ETMRSCTLR31);
+		break;
+	default:
+		pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+	}
+	return i;
+}
+
+static int etm_write_acr(uint64_t *state, int i, int j)
+{
+	switch (j) {
+	case 0:
+		trc_write(state[i++], ETMACVR0);
+		trc_write(state[i++], ETMACATR0);
+		break;
+	case 1:
+		trc_write(state[i++], ETMACVR1);
+		trc_write(state[i++], ETMACATR1);
+		break;
+	case 2:
+		trc_write(state[i++], ETMACVR2);
+		trc_write(state[i++], ETMACATR2);
+		break;
+	case 3:
+		trc_write(state[i++], ETMACVR3);
+		trc_write(state[i++], ETMACATR3);
+		break;
+	case 4:
+		trc_write(state[i++], ETMACVR4);
+		trc_write(state[i++], ETMACATR4);
+		break;
+	case 5:
+		trc_write(state[i++], ETMACVR5);
+		trc_write(state[i++], ETMACATR5);
+		break;
+	case 6:
+		trc_write(state[i++], ETMACVR6);
+		trc_write(state[i++], ETMACATR6);
+		break;
+	case 7:
+		trc_write(state[i++], ETMACVR7);
+		trc_write(state[i++], ETMACATR7);
+		break;
+	case 8:
+		trc_write(state[i++], ETMACVR8);
+		trc_write(state[i++], ETMACATR8);
+		break;
+	case 9:
+		trc_write(state[i++], ETMACVR9);
+		trc_write(state[i++], ETMACATR9);
+		break;
+	case 10:
+		trc_write(state[i++], ETMACVR10);
+		trc_write(state[i++], ETMACATR10);
+		break;
+	case 11:
+		trc_write(state[i++], ETMACVR11);
+		trc_write(state[i++], ETMACATR11);
+		break;
+	case 12:
+		trc_write(state[i++], ETMACVR12);
+		trc_write(state[i++], ETMACATR12);
+		break;
+	case 13:
+		trc_write(state[i++], ETMACVR13);
+		trc_write(state[i++], ETMACATR13);
+		break;
+	case 14:
+		trc_write(state[i++], ETMACVR14);
+		trc_write(state[i++], ETMACATR14);
+		break;
+	case 15:
+		trc_write(state[i++], ETMACVR15);
+		trc_write(state[i++], ETMACATR15);
+		break;
+	default:
+		pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+	}
+	return i;
+}
+
+static int etm_write_dvcr(uint64_t *state, int i, int j)
+{
+	switch (j) {
+	case 0:
+		trc_write(state[i++], ETMDVCVR0);
+		trc_write(state[i++], ETMDVCMR0);
+		break;
+	case 1:
+		trc_write(state[i++], ETMDVCVR1);
+		trc_write(state[i++], ETMDVCMR1);
+		break;
+	case 2:
+		trc_write(state[i++], ETMDVCVR2);
+		trc_write(state[i++], ETMDVCMR2);
+		break;
+	case 3:
+		trc_write(state[i++], ETMDVCVR3);
+		trc_write(state[i++], ETMDVCMR3);
+		break;
+	case 4:
+		trc_write(state[i++], ETMDVCVR4);
+		trc_write(state[i++], ETMDVCMR4);
+		break;
+	case 5:
+		trc_write(state[i++], ETMDVCVR5);
+		trc_write(state[i++], ETMDVCMR5);
+		break;
+	case 6:
+		trc_write(state[i++], ETMDVCVR6);
+		trc_write(state[i++], ETMDVCMR6);
+		break;
+	case 7:
+		trc_write(state[i++], ETMDVCVR7);
+		trc_write(state[i++], ETMDVCMR7);
+		break;
+	default:
+		pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+	}
+	return i;
+}
+
+static int etm_write_ccvr(uint64_t *state, int i, int j)
+{
+	switch (j) {
+	case 0:
+		trc_write(state[i++], ETMCIDCVR0);
+		break;
+	case 1:
+		trc_write(state[i++], ETMCIDCVR1);
+		break;
+	case 2:
+		trc_write(state[i++], ETMCIDCVR2);
+		break;
+	case 3:
+		trc_write(state[i++], ETMCIDCVR3);
+		break;
+	case 4:
+		trc_write(state[i++], ETMCIDCVR4);
+		break;
+	case 5:
+		trc_write(state[i++], ETMCIDCVR5);
+		break;
+	case 6:
+		trc_write(state[i++], ETMCIDCVR6);
+		break;
+	case 7:
+		trc_write(state[i++], ETMCIDCVR7);
+		break;
+	default:
+		pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+	}
+	return i;
+}
+
+static int etm_write_vcvr(uint64_t *state, int i, int j)
+{
+	switch (j) {
+	case 0:
+		trc_write(state[i++], ETMVMIDCVR0);
+		break;
+	case 1:
+		trc_write(state[i++], ETMVMIDCVR1);
+		break;
+	case 2:
+		trc_write(state[i++], ETMVMIDCVR2);
+		break;
+	case 3:
+		trc_write(state[i++], ETMVMIDCVR3);
+		break;
+	case 4:
+		trc_write(state[i++], ETMVMIDCVR4);
+		break;
+	case 5:
+		trc_write(state[i++], ETMVMIDCVR5);
+		break;
+	case 6:
+		trc_write(state[i++], ETMVMIDCVR6);
+		break;
+	case 7:
+		trc_write(state[i++], ETMVMIDCVR7);
+		break;
+	default:
+		pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+	}
+	return i;
+}
+
+static int etm_write_sscr(uint64_t *state, int i, int j)
+{
+	switch (j) {
+	case 0:
+		trc_write(state[i++], ETMSSCCR0);
+		trc_write(state[i++], ETMSSCSR0);
+		trc_write(state[i++], ETMSSPCICR0);
+		break;
+	case 1:
+		trc_write(state[i++], ETMSSCCR1);
+		trc_write(state[i++], ETMSSCSR1);
+		trc_write(state[i++], ETMSSPCICR1);
+		break;
+	case 2:
+		trc_write(state[i++], ETMSSCCR2);
+		trc_write(state[i++], ETMSSCSR2);
+		trc_write(state[i++], ETMSSPCICR2);
+		break;
+	case 3:
+		trc_write(state[i++], ETMSSCCR3);
+		trc_write(state[i++], ETMSSCSR3);
+		trc_write(state[i++], ETMSSPCICR3);
+		break;
+	case 4:
+		trc_write(state[i++], ETMSSCCR4);
+		trc_write(state[i++], ETMSSCSR4);
+		trc_write(state[i++], ETMSSPCICR4);
+		break;
+	case 5:
+		trc_write(state[i++], ETMSSCCR5);
+		trc_write(state[i++], ETMSSCSR5);
+		trc_write(state[i++], ETMSSPCICR5);
+		break;
+	case 6:
+		trc_write(state[i++], ETMSSCCR6);
+		trc_write(state[i++], ETMSSCSR6);
+		trc_write(state[i++], ETMSSPCICR6);
+		break;
+	case 7:
+		trc_write(state[i++], ETMSSCCR7);
+		trc_write(state[i++], ETMSSCSR7);
+		trc_write(state[i++], ETMSSPCICR7);
+		break;
+	default:
+		pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+	}
+	return i;
+}
+
+static inline void etm_si_restore_state(struct etm_ctx *etmdata)
+{
+	int i, j;
+
+	i = 0;
+
+	/* Vote for ETM power/clock enable */
+	etm_clk_enable();
+
+	switch (etmdata->arch) {
+	case ETM_ARCH_V4:
+		atomic_notifier_call_chain(&etm_restore_notifier_list, 0, NULL);
+
+		/* check OS lock is locked */
+		if (BVAL(trc_readl(ETMOSLSR), 1) != 1) {
+			pr_err_ratelimited("OS lock is unlocked\n");
+			trc_write(0x1, ETMOSLAR);
+			isb();
+		}
+
+		/* main control and configuration registers */
+		trc_write(etmdata->state[i++], ETMCONFIGR);
+		trc_write(etmdata->state[i++], ETMEVENTCTL0R);
+		trc_write(etmdata->state[i++], ETMEVENTCTL1R);
+		trc_write(etmdata->state[i++], ETMSTALLCTLR);
+		trc_write(etmdata->state[i++], ETMTSCTLR);
+		trc_write(etmdata->state[i++], ETMSYNCPR);
+		trc_write(etmdata->state[i++], ETMCCCTLR);
+		trc_write(etmdata->state[i++], ETMTRACEIDR);
+		/* filtering control registers */
+		trc_write(etmdata->state[i++], ETMVICTLR);
+		trc_write(etmdata->state[i++], ETMVIIECTLR);
+		trc_write(etmdata->state[i++], ETMVISSCTLR);
+		/* derived resources registers */
+		for (j = 0; j < etmdata->nr_seq_state-1; j++)
+			i = etm_write_ssxr(etmdata->state, i, j);
+		trc_write(etmdata->state[i++], ETMSEQRSTEVR);
+		trc_write(etmdata->state[i++], ETMSEQSTR);
+		trc_write(etmdata->state[i++], ETMEXTINSELR);
+		for (j = 0; j < etmdata->nr_cntr; j++)
+			i = etm_write_crxr(etmdata->state, i, j);
+		/* resource selection registers */
+		for (j = 0; j < etmdata->nr_resource; j++)
+			i = etm_write_rsxr(etmdata->state, i, j + 2);
+		/* comparator registers */
+		for (j = 0; j < etmdata->nr_addr_cmp * 2; j++)
+			i = etm_write_acr(etmdata->state, i, j);
+		for (j = 0; j < etmdata->nr_data_cmp; j++)
+			i = etm_write_dvcr(etmdata->state, i, j);
+		for (j = 0; j < etmdata->nr_ctxid_cmp; j++)
+			i = etm_write_ccvr(etmdata->state, i, j);
+		trc_write(etmdata->state[i++], ETMCIDCCTLR0);
+		for (j = 0; j < etmdata->nr_vmid_cmp; j++)
+			i = etm_write_vcvr(etmdata->state, i, j);
+		/* single-shot comparator registers */
+		for (j = 0; j < etmdata->nr_ss_cmp; j++)
+			i = etm_write_sscr(etmdata->state, i, j);
+		/* program ctrl register */
+		trc_write(etmdata->state[i++], ETMPRGCTLR);
+
+		isb();
+		trc_write(0x0, ETMOSLAR);
+		break;
+	default:
+		pr_err_ratelimited("unsupported etm arch %d in %s\n",
+				   etmdata->arch,  __func__);
+	}
+
+	/* Vote for ETM power/clock disable */
+	etm_clk_disable();
+}
+
+void msm_jtag_etm_save_state(void)
+{
+	int cpu;
+
+	cpu = raw_smp_processor_id();
+
+	if (!etm[cpu] || etm[cpu]->save_restore_disabled)
+		return;
+
+	if (etm[cpu]->save_restore_enabled) {
+		if (etm[cpu]->si_enable)
+			etm_si_save_state(etm[cpu]);
+		else
+			etm_mm_save_state(etm[cpu]);
+	}
+}
+EXPORT_SYMBOL(msm_jtag_etm_save_state);
+
+void msm_jtag_etm_restore_state(void)
+{
+	int cpu;
+
+	cpu = raw_smp_processor_id();
+
+	if (!etm[cpu] || etm[cpu]->save_restore_disabled)
+		return;
+
+	/*
+	 * Check to ensure we attempt to restore only when save
+	 * has been done is accomplished by callee function.
+	 */
+	if (etm[cpu]->save_restore_enabled) {
+		if (etm[cpu]->si_enable)
+			etm_si_restore_state(etm[cpu]);
+		else
+			etm_mm_restore_state(etm[cpu]);
+	}
+}
+EXPORT_SYMBOL(msm_jtag_etm_restore_state);
+
+static inline bool etm_arch_supported(uint8_t arch)
+{
+	switch (arch) {
+	case ETM_ARCH_V4:
+		break;
+	default:
+		return false;
+	}
+	return true;
+}
+
+static void etm_os_lock_init(struct etm_ctx *etmdata)
+{
+	uint32_t etmoslsr;
+
+	etmoslsr = etm_readl(etmdata, TRCOSLSR);
+	if ((BVAL(etmoslsr, 0) == 0)  && BVAL(etmoslsr, 3))
+		etmdata->os_lock_present = true;
+	else
+		etmdata->os_lock_present = false;
+}
+
+static void etm_init_arch_data(void *info)
+{
+	uint32_t val;
+	struct etm_ctx  *etmdata = info;
+
+	ETM_UNLOCK(etmdata);
+
+	etm_os_lock_init(etmdata);
+
+	val = etm_readl(etmdata, TRCIDR1);
+	etmdata->arch = BMVAL(val, 4, 11);
+
+	/* number of resources trace unit supports */
+	val = etm_readl(etmdata, TRCIDR4);
+	etmdata->nr_addr_cmp = BMVAL(val, 0, 3);
+	etmdata->nr_data_cmp = BMVAL(val, 4, 7);
+	etmdata->nr_resource = BMVAL(val, 16, 19);
+	etmdata->nr_ss_cmp = BMVAL(val, 20, 23);
+	etmdata->nr_ctxid_cmp = BMVAL(val, 24, 27);
+	etmdata->nr_vmid_cmp = BMVAL(val, 28, 31);
+
+	val = etm_readl(etmdata, TRCIDR5);
+	etmdata->nr_seq_state = BMVAL(val, 25, 27);
+	etmdata->nr_cntr = BMVAL(val, 28, 30);
+
+	ETM_LOCK(etmdata);
+}
+
+static int jtag_mm_etm_callback(struct notifier_block *nfb,
+				unsigned long action,
+				void *hcpu)
+{
+	unsigned int cpu = (unsigned long)hcpu;
+	u64 version = 0;
+
+	if (!etm[cpu])
+		goto out;
+
+	switch (action & (~CPU_TASKS_FROZEN)) {
+	case CPU_STARTING:
+		spin_lock(&etm[cpu]->spinlock);
+		if (!etm[cpu]->init) {
+			etm_init_arch_data(etm[cpu]);
+			etm[cpu]->init = true;
+		}
+		spin_unlock(&etm[cpu]->spinlock);
+		break;
+
+	case CPU_ONLINE:
+		mutex_lock(&etm[cpu]->mutex);
+		if (etm[cpu]->enable) {
+			mutex_unlock(&etm[cpu]->mutex);
+			goto out;
+		}
+		if (etm_arch_supported(etm[cpu]->arch)) {
+			if (!scm_get_feat_version(TZ_DBG_ETM_FEAT_ID, &version)
+				&& version <  TZ_DBG_ETM_VER)
+				etm[cpu]->save_restore_enabled = true;
+			else
+				pr_info("etm save-restore supported by TZ\n");
+		} else
+			pr_info("etm arch %u not supported\n", etm[cpu]->arch);
+		etm[cpu]->enable = true;
+		mutex_unlock(&etm[cpu]->mutex);
+		break;
+	default:
+		break;
+	}
+out:
+	return NOTIFY_OK;
+}
+
+static struct notifier_block jtag_mm_etm_notifier = {
+	.notifier_call = jtag_mm_etm_callback,
+};
+
+static bool skip_etm_save_restore(void)
+{
+	uint32_t id;
+	uint32_t version;
+
+	id = socinfo_get_id();
+	version = socinfo_get_version();
+
+	if (id == HW_SOC_ID_M8953 && SOCINFO_VERSION_MAJOR(version) == 1 &&
+	    SOCINFO_VERSION_MINOR(version) == 0)
+		return true;
+
+	return false;
+}
+
+static int jtag_mm_etm_probe(struct platform_device *pdev, uint32_t cpu)
+{
+	struct etm_ctx *etmdata;
+	struct resource *res;
+	struct device *dev = &pdev->dev;
+
+	/* Allocate memory per cpu */
+	etmdata = devm_kzalloc(dev, sizeof(struct etm_ctx), GFP_KERNEL);
+	if (!etmdata)
+		return -ENOMEM;
+
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "etm-base");
+	if (!res)
+		return -ENODEV;
+
+	etmdata->base = devm_ioremap(dev, res->start, resource_size(res));
+	if (!etmdata->base)
+		return -EINVAL;
+
+	etmdata->si_enable = of_property_read_bool(pdev->dev.of_node,
+						   "qcom,si-enable");
+	etmdata->save_restore_disabled = of_property_read_bool(
+					 pdev->dev.of_node,
+					 "qcom,save-restore-disable");
+
+	if (skip_etm_save_restore())
+		etmdata->save_restore_disabled = 1;
+
+	/* Allocate etm state save space per core */
+	etmdata->state = devm_kzalloc(dev,
+				      MAX_ETM_STATE_SIZE * sizeof(uint64_t),
+				      GFP_KERNEL);
+	if (!etmdata->state)
+		return -ENOMEM;
+
+	spin_lock_init(&etmdata->spinlock);
+	mutex_init(&etmdata->mutex);
+
+	if (cnt++ == 0)
+		register_hotcpu_notifier(&jtag_mm_etm_notifier);
+
+	get_online_cpus();
+
+	if (!smp_call_function_single(cpu, etm_init_arch_data, etmdata,
+				      1))
+		etmdata->init = true;
+
+	etm[cpu] = etmdata;
+
+	put_online_cpus();
+
+	mutex_lock(&etmdata->mutex);
+	if (etmdata->init && !etmdata->enable) {
+		if (etm_arch_supported(etmdata->arch)) {
+			u64 version = 0;
+
+			if (!scm_get_feat_version(TZ_DBG_ETM_FEAT_ID, &version)
+				&& (version < TZ_DBG_ETM_VER))
+				etmdata->save_restore_enabled = true;
+			else
+				pr_info("etm save-restore supported by TZ\n");
+		} else
+			pr_info("etm arch %u not supported\n", etmdata->arch);
+		etmdata->enable = true;
+	}
+	mutex_unlock(&etmdata->mutex);
+	return 0;
+}
+
+static int jtag_mm_probe(struct platform_device *pdev)
+{
+	int ret, i, cpu = -1;
+	struct device *dev = &pdev->dev;
+	struct device_node *cpu_node;
+
+	if (msm_jtag_fuse_apps_access_disabled())
+		return -EPERM;
+
+	cpu_node = of_parse_phandle(pdev->dev.of_node,
+				    "qcom,coresight-jtagmm-cpu", 0);
+	if (!cpu_node) {
+		dev_err(dev, "Jtag-mm cpu handle not specified\n");
+		return -ENODEV;
+	}
+	for_each_possible_cpu(i) {
+		if (cpu_node == of_get_cpu_node(i, NULL)) {
+			cpu = i;
+			break;
+		}
+	}
+	if (cpu == -1) {
+		dev_err(dev, "invalid Jtag-mm cpu handle\n");
+		return -EINVAL;
+	}
+
+	clock[cpu] = devm_clk_get(dev, "core_clk");
+	if (IS_ERR(clock[cpu])) {
+		ret = PTR_ERR(clock[cpu]);
+		return ret;
+	}
+
+	ret = clk_set_rate(clock[cpu], CORESIGHT_CLK_RATE_TRACE);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(clock[cpu]);
+	if (ret)
+		return ret;
+
+	platform_set_drvdata(pdev, clock[cpu]);
+
+	ret  = jtag_mm_etm_probe(pdev, cpu);
+	if (ret)
+		clk_disable_unprepare(clock[cpu]);
+	return ret;
+}
+
+static void jtag_mm_etm_remove(void)
+{
+	unregister_hotcpu_notifier(&jtag_mm_etm_notifier);
+}
+
+static int jtag_mm_remove(struct platform_device *pdev)
+{
+	struct clk *clock = platform_get_drvdata(pdev);
+
+	if (--cnt == 0)
+		jtag_mm_etm_remove();
+	clk_disable_unprepare(clock);
+	return 0;
+}
+
+static const struct of_device_id msm_qdss_mm_match[] = {
+	{ .compatible = "qcom,jtagv8-mm"},
+	{}
+};
+
+static struct platform_driver jtag_mm_driver = {
+	.probe          = jtag_mm_probe,
+	.remove         = jtag_mm_remove,
+	.driver         = {
+		.name   = "msm-jtagv8-mm",
+		.owner	= THIS_MODULE,
+		.of_match_table	= msm_qdss_mm_match,
+		},
+};
+
+static int __init jtag_mm_init(void)
+{
+	return platform_driver_register(&jtag_mm_driver);
+}
+module_init(jtag_mm_init);
+
+static void __exit jtag_mm_exit(void)
+{
+	platform_driver_unregister(&jtag_mm_driver);
+}
+module_exit(jtag_mm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight DEBUGv8 and ETMv4 save-restore driver");
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/memshare./heap_mem_ext_v01.c linux-4.4.115-fbx/drivers/soc/qcom/memshare/heap_mem_ext_v01.c
--- linux-4.4.115-fbx/drivers/soc/qcom/memshare./heap_mem_ext_v01.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/memshare/heap_mem_ext_v01.c	2019-01-22 16:16:26.659274986 +0100
@@ -0,0 +1,472 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/qmi_encdec.h>
+#include <soc/qcom/msm_qmi_interface.h>
+#include "heap_mem_ext_v01.h"
+
+struct elem_info mem_alloc_req_msg_data_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct mem_alloc_req_msg_v01,
+					num_bytes),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct mem_alloc_req_msg_v01,
+					block_alignment_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct mem_alloc_req_msg_v01,
+					block_alignment),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info mem_alloc_resp_msg_data_v01_ei[] = {
+	{
+		.data_type      = QMI_SIGNED_2_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct mem_alloc_resp_msg_v01,
+					resp),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct mem_alloc_resp_msg_v01,
+					handle_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct mem_alloc_resp_msg_v01,
+					handle),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct mem_alloc_resp_msg_v01,
+					num_bytes_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct mem_alloc_resp_msg_v01,
+					num_bytes),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info mem_free_req_msg_data_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct mem_free_req_msg_v01,
+					handle),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info mem_free_resp_msg_data_v01_ei[] = {
+	{
+		.data_type      = QMI_SIGNED_2_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct mem_free_resp_msg_v01,
+					resp),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info dhms_mem_alloc_addr_info_type_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+		.offset         = offsetof(struct
+					dhms_mem_alloc_addr_info_type_v01,
+					phy_addr),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+		.offset         = offsetof(struct
+					dhms_mem_alloc_addr_info_type_v01,
+					num_bytes),
+	},
+		{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info mem_alloc_generic_req_msg_data_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct mem_alloc_generic_req_msg_v01,
+					num_bytes),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct mem_alloc_generic_req_msg_v01,
+					client_id),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x03,
+		.offset         = offsetof(struct mem_alloc_generic_req_msg_v01,
+					proc_id),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x04,
+		.offset         = offsetof(struct mem_alloc_generic_req_msg_v01,
+					sequence_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct mem_alloc_generic_req_msg_v01,
+					alloc_contiguous_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct mem_alloc_generic_req_msg_v01,
+					alloc_contiguous),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct mem_alloc_generic_req_msg_v01,
+					block_alignment_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct mem_alloc_generic_req_msg_v01,
+					block_alignment),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info mem_alloc_generic_resp_msg_data_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct	qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+						mem_alloc_generic_resp_msg_v01,
+					resp),
+		.ei_array		= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+						mem_alloc_generic_resp_msg_v01,
+					sequence_id_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+						mem_alloc_generic_resp_msg_v01,
+					sequence_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct
+						mem_alloc_generic_resp_msg_v01,
+					dhms_mem_alloc_addr_info_valid),
+	},
+	{
+		.data_type	    = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct
+						mem_alloc_generic_resp_msg_v01,
+					dhms_mem_alloc_addr_info_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = MAX_ARR_CNT_V01,
+		.elem_size      = sizeof(struct
+					dhms_mem_alloc_addr_info_type_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct
+						mem_alloc_generic_resp_msg_v01,
+					dhms_mem_alloc_addr_info),
+		.ei_array       = dhms_mem_alloc_addr_info_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info mem_free_generic_req_msg_data_v01_ei[] = {
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct mem_free_generic_req_msg_v01,
+					dhms_mem_alloc_addr_info_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = MAX_ARR_CNT_V01,
+		.elem_size      = sizeof(struct
+					dhms_mem_alloc_addr_info_type_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct mem_free_generic_req_msg_v01,
+					dhms_mem_alloc_addr_info),
+		.ei_array		= dhms_mem_alloc_addr_info_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct mem_free_generic_req_msg_v01,
+					client_id_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct mem_free_generic_req_msg_v01,
+					client_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct mem_free_generic_req_msg_v01,
+					proc_id_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct mem_free_generic_req_msg_v01,
+					proc_id),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info mem_free_generic_resp_msg_data_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct	qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+						mem_free_generic_resp_msg_v01,
+					resp),
+		.ei_array		= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info mem_query_size_req_msg_data_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct mem_query_size_req_msg_v01,
+					client_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct mem_query_size_req_msg_v01,
+					proc_id_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct mem_query_size_req_msg_v01,
+					proc_id),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info mem_query_size_resp_msg_data_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct	qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+						mem_query_size_rsp_msg_v01,
+					resp),
+		.ei_array		= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct mem_query_size_rsp_msg_v01,
+					size_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct mem_query_size_rsp_msg_v01,
+					size),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/memshare./heap_mem_ext_v01.h linux-4.4.115-fbx/drivers/soc/qcom/memshare/heap_mem_ext_v01.h
--- linux-4.4.115-fbx/drivers/soc/qcom/memshare./heap_mem_ext_v01.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/memshare/heap_mem_ext_v01.h	2019-01-22 16:16:26.659274986 +0100
@@ -0,0 +1,356 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef HEAP_MEM_EXT_SERVICE_01_H
+#define HEAP_MEM_EXT_SERVICE_01_H
+
+#include <soc/qcom/msm_qmi_interface.h>
+
+#define MEM_ALLOC_REQ_MAX_MSG_LEN_V01 255
+#define MEM_FREE_REQ_MAX_MSG_LEN_V01 255
+#define MAX_ARR_CNT_V01 64
+
+struct dhms_mem_alloc_addr_info_type_v01 {
+	uint64_t phy_addr;
+	uint32_t num_bytes;
+};
+
+enum dhms_mem_proc_id_v01 {
+	/* To force a 32 bit signed enum.  Do not change or use */
+	DHMS_MEM_PROC_ID_MIN_ENUM_VAL_V01 = -2147483647,
+	/* Request from MPSS processor */
+	DHMS_MEM_PROC_MPSS_V01 = 0,
+	/* Request from ADSP processor */
+	DHMS_MEM_PROC_ADSP_V01 = 1,
+	/* Request from WCNSS processor */
+	DHMS_MEM_PROC_WCNSS_V01 = 2,
+	/* To force a 32 bit signed enum.  Do not change or use */
+	DHMS_MEM_PROC_ID_MAX_ENUM_VAL_V01 = 2147483647
+};
+
+enum dhms_mem_client_id_v01 {
+	/*To force a 32 bit signed enum.  Do not change or use*/
+	DHMS_MEM_CLIENT_ID_MIN_ENUM_VAL_V01 = -2147483647,
+	/*  Request from GPS Client    */
+	DHMS_MEM_CLIENT_GPS_V01 = 0,
+	/* Invalid Client */
+	DHMS_MEM_CLIENT_INVALID = 1000,
+	/* To force a 32 bit signed enum.  Do not change or use */
+	DHMS_MEM_CLIENT_ID_MAX_ENUM_VAL_V01 = 2147483647
+};
+
+enum dhms_mem_block_align_enum_v01 {
+	/* To force a 32 bit signed enum.  Do not change or use
+	*/
+	DHMS_MEM_BLOCK_ALIGN_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+	/* Align allocated memory by 2 bytes  */
+	DHMS_MEM_BLOCK_ALIGN_2_V01 = 0,
+	/* Align allocated memory by 4 bytes  */
+	DHMS_MEM_BLOCK_ALIGN_4_V01 = 1,
+	/**<  Align allocated memory by 8 bytes */
+	DHMS_MEM_BLOCK_ALIGN_8_V01 = 2,
+	/**<  Align allocated memory by 16 bytes */
+	DHMS_MEM_BLOCK_ALIGN_16_V01 = 3,
+	/**<  Align allocated memory by 32 bytes */
+	DHMS_MEM_BLOCK_ALIGN_32_V01 = 4,
+	/**<  Align allocated memory by 64 bytes */
+	DHMS_MEM_BLOCK_ALIGN_64_V01 = 5,
+	/**<  Align allocated memory by 128 bytes */
+	DHMS_MEM_BLOCK_ALIGN_128_V01 = 6,
+	/**<  Align allocated memory by 256 bytes */
+	DHMS_MEM_BLOCK_ALIGN_256_V01 = 7,
+	/**<  Align allocated memory by 512 bytes */
+	DHMS_MEM_BLOCK_ALIGN_512_V01 = 8,
+	/**<  Align allocated memory by 1024 bytes */
+	DHMS_MEM_BLOCK_ALIGN_1K_V01 = 9,
+	/**<  Align allocated memory by 2048 bytes */
+	DHMS_MEM_BLOCK_ALIGN_2K_V01 = 10,
+	/**<  Align allocated memory by 4096 bytes */
+	DHMS_MEM_BLOCK_ALIGN_4K_V01 = 11,
+	DHMS_MEM_BLOCK_ALIGN_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+	/* To force a 32 bit signed enum.  Do not change or use
+	*/
+};
+
+/* Request Message; This command is used for getting
+ * the multiple physically contiguous
+ * memory blocks from the server memory subsystem
+ */
+struct mem_alloc_req_msg_v01 {
+
+	/* Mandatory */
+	/*requested size*/
+	uint32_t num_bytes;
+
+	/* Optional */
+	/* Must be set to true if block_alignment
+	 * is being passed
+	 */
+	uint8_t block_alignment_valid;
+	/* The block alignment for the memory block to be allocated
+	*/
+	enum dhms_mem_block_align_enum_v01 block_alignment;
+};  /* Message */
+
+/* Response Message; This command is used for getting
+ * the multiple physically contiguous memory blocks
+ * from the server memory subsystem
+ */
+struct mem_alloc_resp_msg_v01 {
+
+	/* Mandatory */
+	/*  Result Code */
+	/* The result of the requested memory operation
+	*/
+	enum qmi_result_type_v01 resp;
+	/* Optional */
+	/*  Memory Block Handle
+	*/
+	/* Must be set to true if handle is being passed
+	*/
+	uint8_t handle_valid;
+	/* The physical address of the memory allocated on the HLOS
+	*/
+	uint64_t handle;
+	/* Optional */
+	/* Memory block size */
+	/* Must be set to true if num_bytes is being passed
+	*/
+	uint8_t num_bytes_valid;
+	/* The number of bytes actually allocated for the request.
+	 * This value can be smaller than the size requested in
+	 * QMI_DHMS_MEM_ALLOC_REQ_MSG.
+	*/
+	uint32_t num_bytes;
+};  /* Message */
+
+/* Request Message; This command is used for releasing
+ * the multiple physically contiguous
+ * memory blocks to the server memory subsystem
+ */
+struct mem_free_req_msg_v01 {
+
+	/* Mandatory */
+	/* Physical address of memory to be freed
+	*/
+	uint32_t handle;
+};  /* Message */
+
+/* Response Message; This command is used for releasing
+ * the multiple physically contiguous
+ * memory blocks to the server memory subsystem
+ */
+struct mem_free_resp_msg_v01 {
+
+	/* Mandatory */
+	/* Result of the requested memory operation, todo,
+	 * need to check the async operation for free
+	 */
+	enum qmi_result_type_v01 resp;
+};  /* Message */
+
+/* Request Message; This command is used for getting
+ * the multiple physically contiguous
+ * memory blocks from the server memory subsystem
+ */
+struct mem_alloc_generic_req_msg_v01 {
+
+	/* Mandatory */
+	/*requested size*/
+	uint32_t num_bytes;
+
+	/* Mandatory */
+	/* client id */
+	enum dhms_mem_client_id_v01 client_id;
+
+	/* Mandatory */
+	/* Peripheral Id*/
+	enum dhms_mem_proc_id_v01 proc_id;
+
+	/* Mandatory */
+	/* Sequence id */
+	uint32_t sequence_id;
+
+	/* Optional */
+	/*  alloc_contiguous */
+	/* Must be set to true if alloc_contiguous is being passed */
+	uint8_t alloc_contiguous_valid;
+
+	/* Alloc_contiguous is used to identify that clients are requesting
+	 * for contiguous or non contiguous memory, default is contiguous
+	* 0 = non contiguous else contiguous
+	 */
+	uint8_t alloc_contiguous;
+
+	/* Optional */
+	/* Must be set to true if block_alignment
+	 * is being passed
+	 */
+	uint8_t block_alignment_valid;
+
+	/* The block alignment for the memory block to be allocated
+	*/
+	enum dhms_mem_block_align_enum_v01 block_alignment;
+
+};  /* Message */
+
+/* Response Message; This command is used for getting
+ * the multiple physically contiguous memory blocks
+ * from the server memory subsystem
+ */
+struct mem_alloc_generic_resp_msg_v01 {
+
+	/* Mandatory */
+	/*  Result Code */
+	/* The result of the requested memory operation
+	*/
+	struct qmi_response_type_v01 resp;
+
+	/* Optional */
+	/* Sequence ID */
+	/* Must be set to true if sequence_id is being passed */
+	uint8_t sequence_id_valid;
+
+
+	/* Mandatory */
+	/* Sequence id */
+	uint32_t sequence_id;
+
+	/* Optional */
+	/*  Memory Block Handle
+	*/
+	/* Must be set to true if handle is being passed
+	*/
+	uint8_t dhms_mem_alloc_addr_info_valid;
+
+	/* Optional */
+	/* Handle Size */
+	uint32_t dhms_mem_alloc_addr_info_len;
+
+	/* Optional */
+	/* The physical address of the memory allocated on the HLOS
+	*/
+	struct dhms_mem_alloc_addr_info_type_v01
+		dhms_mem_alloc_addr_info[MAX_ARR_CNT_V01];
+
+};  /* Message */
+
+/* Request Message; This command is used for releasing
+ * the multiple physically contiguous
+ * memory blocks to the server memory subsystem
+ */
+struct mem_free_generic_req_msg_v01 {
+
+	/* Mandatory */
+	/* Must be set to # of  elments in array*/
+	uint32_t dhms_mem_alloc_addr_info_len;
+
+	/* Mandatory */
+	/* Physical address and size of the memory allocated
+	 * on the HLOS to be freed.
+	 */
+	struct dhms_mem_alloc_addr_info_type_v01
+			dhms_mem_alloc_addr_info[MAX_ARR_CNT_V01];
+
+	/* Optional */
+	/* Client ID */
+	/* Must be set to true if client_id is being passed */
+	uint8_t client_id_valid;
+
+	/* Optional */
+	/* Client Id */
+	enum dhms_mem_client_id_v01 client_id;
+
+	/* Optional */
+	/* Proc ID */
+	/* Must be set to true if proc_id is being passed */
+	uint8_t proc_id_valid;
+
+	/* Optional */
+	/* Peripheral */
+	enum dhms_mem_proc_id_v01 proc_id;
+
+};  /* Message */
+
+/* Response Message; This command is used for releasing
+ * the multiple physically contiguous
+ * memory blocks to the server memory subsystem
+ */
+struct mem_free_generic_resp_msg_v01 {
+
+	/*
+	 * Mandatory
+	 * Result of the requested memory operation, todo,
+	 * need to check the async operation for free
+	 */
+	struct qmi_response_type_v01 resp;
+
+};  /* Message */
+
+struct mem_query_size_req_msg_v01 {
+
+	/* Mandatory */
+	enum dhms_mem_client_id_v01 client_id;
+
+	/*
+	 * Optional
+	 * Proc ID
+	 * proc_id_valid must be set to true if proc_id is being passed
+	 */
+	uint8_t proc_id_valid;
+
+	enum dhms_mem_proc_id_v01 proc_id;
+};  /* Message */
+
+struct mem_query_size_rsp_msg_v01 {
+
+	/*
+	 * Mandatory
+	 * Result Code
+	 */
+	struct qmi_response_type_v01 resp;
+
+	/*
+	 * Optional
+	 * size_valid must be set to true if size is being passed
+	 */
+	uint8_t size_valid;
+
+	uint32_t size;
+};  /* Message */
+
+
+extern struct elem_info mem_alloc_req_msg_data_v01_ei[];
+extern struct elem_info mem_alloc_resp_msg_data_v01_ei[];
+extern struct elem_info mem_free_req_msg_data_v01_ei[];
+extern struct elem_info mem_free_resp_msg_data_v01_ei[];
+extern struct elem_info mem_alloc_generic_req_msg_data_v01_ei[];
+extern struct elem_info mem_alloc_generic_resp_msg_data_v01_ei[];
+extern struct elem_info mem_free_generic_req_msg_data_v01_ei[];
+extern struct elem_info mem_free_generic_resp_msg_data_v01_ei[];
+extern struct elem_info mem_query_size_req_msg_data_v01_ei[];
+extern struct elem_info mem_query_size_resp_msg_data_v01_ei[];
+
+/*Service Message Definition*/
+#define MEM_ALLOC_REQ_MSG_V01 0x0020
+#define MEM_ALLOC_RESP_MSG_V01 0x0020
+#define MEM_FREE_REQ_MSG_V01 0x0021
+#define MEM_FREE_RESP_MSG_V01 0x0021
+#define MEM_ALLOC_GENERIC_REQ_MSG_V01 0x0022
+#define MEM_ALLOC_GENERIC_RESP_MSG_V01 0x0022
+#define MEM_FREE_GENERIC_REQ_MSG_V01 0x0023
+#define MEM_FREE_GENERIC_RESP_MSG_V01 0x0023
+#define MEM_QUERY_SIZE_REQ_MSG_V01	0x0024
+#define MEM_QUERY_SIZE_RESP_MSG_V01	0x0024
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/memshare./Kconfig linux-4.4.115-fbx/drivers/soc/qcom/memshare/Kconfig
--- linux-4.4.115-fbx/drivers/soc/qcom/memshare./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/memshare/Kconfig	2019-01-22 16:16:26.659274986 +0100
@@ -0,0 +1,9 @@
+config MEM_SHARE_QMI_SERVICE
+       depends on MSM_QMI_INTERFACE
+       bool "Shared Heap for external processors"
+       help
+		Memory Share Kernel Qualcomm Messaging Interface Service
+		receives requests from Modem Processor Sub System
+		for heap alloc/free from Application Processor
+		Sub System and send a response back to client with
+		proper handle/address.
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/memshare./Makefile linux-4.4.115-fbx/drivers/soc/qcom/memshare/Makefile
--- linux-4.4.115-fbx/drivers/soc/qcom/memshare./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/memshare/Makefile	2019-01-22 16:16:26.659274986 +0100
@@ -0,0 +1 @@
+obj-$(CONFIG_MEM_SHARE_QMI_SERVICE) := heap_mem_ext_v01.o msm_memshare.o
\ No newline at end of file
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/memshare./msm_memshare.c linux-4.4.115-fbx/drivers/soc/qcom/memshare/msm_memshare.c
--- linux-4.4.115-fbx/drivers/soc/qcom/memshare./msm_memshare.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/memshare/msm_memshare.c	2019-10-29 09:26:24.813214628 +0100
@@ -0,0 +1,1107 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/notifier.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/msm_qmi_interface.h>
+#include <soc/qcom/scm.h>
+#include "msm_memshare.h"
+#include "heap_mem_ext_v01.h"
+
+#include <soc/qcom/secure_buffer.h>
+#include <soc/qcom/ramdump.h>
+
+/* Macros */
+#define MEMSHARE_DEV_NAME "memshare"
+#define MEMSHARE_CHILD_DEV_NAME "memshare_child"
+static DEFINE_DMA_ATTRS(attrs);
+
+static struct qmi_handle *mem_share_svc_handle;
+static void mem_share_svc_recv_msg(struct work_struct *work);
+static DECLARE_DELAYED_WORK(work_recv_msg, mem_share_svc_recv_msg);
+static struct workqueue_struct *mem_share_svc_workqueue;
+static uint64_t bootup_request;
+static bool ramdump_event;
+static void *memshare_ramdump_dev[MAX_CLIENTS];
+static struct device *memshare_dev[MAX_CLIENTS];
+
+/* Memshare Driver Structure */
+struct memshare_driver {
+	struct device *dev;
+	struct mutex mem_share;
+	struct mutex mem_free;
+	struct work_struct memshare_init_work;
+};
+
+struct memshare_child {
+	struct device *dev;
+};
+
+static struct memshare_driver *memsh_drv;
+static struct memshare_child *memsh_child;
+static struct mem_blocks memblock[MAX_CLIENTS];
+static uint32_t num_clients;
+static struct msg_desc mem_share_svc_alloc_req_desc = {
+	.max_msg_len = MEM_ALLOC_REQ_MAX_MSG_LEN_V01,
+	.msg_id = MEM_ALLOC_REQ_MSG_V01,
+	.ei_array = mem_alloc_req_msg_data_v01_ei,
+};
+
+static struct msg_desc mem_share_svc_alloc_resp_desc = {
+	.max_msg_len = MEM_ALLOC_REQ_MAX_MSG_LEN_V01,
+	.msg_id = MEM_ALLOC_RESP_MSG_V01,
+	.ei_array = mem_alloc_resp_msg_data_v01_ei,
+};
+
+static struct msg_desc mem_share_svc_free_req_desc = {
+	.max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01,
+	.msg_id = MEM_FREE_REQ_MSG_V01,
+	.ei_array = mem_free_req_msg_data_v01_ei,
+};
+
+static struct msg_desc mem_share_svc_free_resp_desc = {
+	.max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01,
+	.msg_id = MEM_FREE_RESP_MSG_V01,
+	.ei_array = mem_free_resp_msg_data_v01_ei,
+};
+
+static struct msg_desc mem_share_svc_alloc_generic_req_desc = {
+	.max_msg_len = MEM_ALLOC_REQ_MAX_MSG_LEN_V01,
+	.msg_id = MEM_ALLOC_GENERIC_REQ_MSG_V01,
+	.ei_array = mem_alloc_generic_req_msg_data_v01_ei,
+};
+
+static struct msg_desc mem_share_svc_alloc_generic_resp_desc = {
+	.max_msg_len = MEM_ALLOC_REQ_MAX_MSG_LEN_V01,
+	.msg_id = MEM_ALLOC_GENERIC_RESP_MSG_V01,
+	.ei_array = mem_alloc_generic_resp_msg_data_v01_ei,
+};
+
+static struct msg_desc mem_share_svc_free_generic_req_desc = {
+	.max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01,
+	.msg_id = MEM_FREE_GENERIC_REQ_MSG_V01,
+	.ei_array = mem_free_generic_req_msg_data_v01_ei,
+};
+
+static struct msg_desc mem_share_svc_free_generic_resp_desc = {
+	.max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01,
+	.msg_id = MEM_FREE_GENERIC_RESP_MSG_V01,
+	.ei_array = mem_free_generic_resp_msg_data_v01_ei,
+};
+
+static struct msg_desc mem_share_svc_size_query_req_desc = {
+	.max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01,
+	.msg_id = MEM_QUERY_SIZE_REQ_MSG_V01,
+	.ei_array = mem_query_size_req_msg_data_v01_ei,
+};
+
+static struct msg_desc mem_share_svc_size_query_resp_desc = {
+	.max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01,
+	.msg_id = MEM_QUERY_SIZE_RESP_MSG_V01,
+	.ei_array = mem_query_size_resp_msg_data_v01_ei,
+};
+
+/*
+ *  This API creates ramdump dev handlers
+ *  for each of the memshare clients.
+ *  These dev handlers will be used for
+ *  extracting the ramdump for loaned memory
+ *  segments.
+ */
+
+static int mem_share_configure_ramdump(void)
+{
+	char client_name[18] = "memshare_";
+	char *clnt = NULL;
+
+	switch (num_clients) {
+	case 0:
+		clnt = "GPS";
+		break;
+	case 1:
+		clnt = "FTM";
+		break;
+	case 2:
+		clnt = "DIAG";
+		break;
+	default:
+		pr_info("memshare: no memshare clients registered\n");
+		return -EINVAL;
+	}
+
+	snprintf(client_name, 18, "memshare_%s", clnt);
+	if (memshare_dev[num_clients]) {
+		memshare_ramdump_dev[num_clients] =
+			create_ramdump_device(client_name,
+				memshare_dev[num_clients]);
+	} else {
+		pr_err("memshare:%s: invalid memshare device\n", __func__);
+		return -ENODEV;
+	}
+	if (IS_ERR_OR_NULL(memshare_ramdump_dev[num_clients])) {
+		pr_err("memshare: %s: Unable to create memshare ramdump device.\n",
+				__func__);
+		memshare_ramdump_dev[num_clients] = NULL;
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int check_client(int client_id, int proc, int request)
+{
+	int i = 0, rc;
+	int found = DHMS_MEM_CLIENT_INVALID;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (memblock[i].client_id == client_id &&
+				memblock[i].peripheral == proc) {
+			found = i;
+			break;
+		}
+	}
+	if ((found == DHMS_MEM_CLIENT_INVALID) && !request) {
+		pr_debug("memshare: No registered client, adding a new client\n");
+		/* Add a new client */
+		for (i = 0; i < MAX_CLIENTS; i++) {
+			if (memblock[i].client_id == DHMS_MEM_CLIENT_INVALID) {
+				memblock[i].client_id = client_id;
+				memblock[i].alloted = 0;
+				memblock[i].guarantee = 0;
+				memblock[i].peripheral = proc;
+				found = i;
+
+				if (!memblock[i].file_created) {
+					rc = mem_share_configure_ramdump();
+					if (rc)
+						pr_err("In %s, Cannot create ramdump for client: %d\n",
+							__func__, client_id);
+					else
+						memblock[i].file_created = 1;
+				}
+
+				break;
+			}
+		}
+	}
+
+	return found;
+}
+
+void free_client(int id)
+{
+	memblock[id].phy_addr = 0;
+	memblock[id].virtual_addr = 0;
+	memblock[id].alloted = 0;
+	memblock[id].guarantee = 0;
+	memblock[id].sequence_id = -1;
+	memblock[id].memory_type = MEMORY_CMA;
+
+}
+
+void fill_alloc_response(struct mem_alloc_generic_resp_msg_v01 *resp,
+						int id, int *flag)
+{
+	resp->sequence_id_valid = 1;
+	resp->sequence_id = memblock[id].sequence_id;
+	resp->dhms_mem_alloc_addr_info_valid = 1;
+	resp->dhms_mem_alloc_addr_info_len = 1;
+	resp->dhms_mem_alloc_addr_info[0].phy_addr = memblock[id].phy_addr;
+	resp->dhms_mem_alloc_addr_info[0].num_bytes = memblock[id].size;
+	if (!*flag) {
+		resp->resp.result = QMI_RESULT_SUCCESS_V01;
+		resp->resp.error = QMI_ERR_NONE_V01;
+	} else {
+		resp->resp.result = QMI_RESULT_FAILURE_V01;
+		resp->resp.error = QMI_ERR_NO_MEMORY_V01;
+	}
+
+}
+
+void initialize_client(void)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		memblock[i].alloted = 0;
+		memblock[i].size = 0;
+		memblock[i].guarantee = 0;
+		memblock[i].phy_addr = 0;
+		memblock[i].virtual_addr = 0;
+		memblock[i].client_id = DHMS_MEM_CLIENT_INVALID;
+		memblock[i].peripheral = -1;
+		memblock[i].sequence_id = -1;
+		memblock[i].memory_type = MEMORY_CMA;
+		memblock[i].free_memory = 0;
+		memblock[i].hyp_mapping = 0;
+		memblock[i].file_created = 0;
+	}
+	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+}
+
+/*
+ *  This API initializes the ramdump segments
+ *  with the physical address and size of
+ *  the memshared clients. Extraction of ramdump
+ *  is skipped if memshare client is not alloted
+ *  This calls the ramdump api in extracting the
+ *  ramdump in elf format.
+ */
+
+static int mem_share_do_ramdump(void)
+{
+	int i = 0, ret;
+	char *client_name = NULL;
+
+	for (i = 0; i < num_clients; i++) {
+
+		struct ramdump_segment *ramdump_segments_tmp = NULL;
+
+		switch (i) {
+		case 0:
+			client_name = "GPS";
+			break;
+		case 1:
+			client_name = "FTM";
+			break;
+		case 2:
+			client_name = "DIAG";
+			break;
+		default:
+			pr_info("memshare: no memshare clients registered\n");
+			break;
+		}
+
+		if (!memblock[i].alloted) {
+			pr_err("memshare:%s memblock is not alloted\n",
+			client_name);
+			continue;
+		}
+
+		ramdump_segments_tmp = kcalloc(1,
+			sizeof(struct ramdump_segment),
+			GFP_KERNEL);
+		if (!ramdump_segments_tmp)
+			return -ENOMEM;
+
+		ramdump_segments_tmp[0].size = memblock[i].size;
+		ramdump_segments_tmp[0].address = memblock[i].phy_addr;
+
+		pr_debug("memshare: %s:%s client:phy_address = %llx, size = %d\n",
+		__func__, client_name,
+		(unsigned long long) memblock[i].phy_addr, memblock[i].size);
+
+		ret = do_elf_ramdump(memshare_ramdump_dev[i],
+					ramdump_segments_tmp, 1);
+		if (ret < 0) {
+			pr_err("memshare: Unable to dump: %d\n", ret);
+			kfree(ramdump_segments_tmp);
+			return ret;
+		}
+		kfree(ramdump_segments_tmp);
+	}
+	return 0;
+}
+
+static int modem_notifier_cb(struct notifier_block *this, unsigned long code,
+					void *_cmd)
+{
+	int i;
+	int ret;
+	u32 source_vmlist[2] = {VMID_HLOS, VMID_MSS_MSA};
+	int dest_vmids[1] = {VMID_HLOS};
+	int dest_perms[1] = {PERM_READ|PERM_WRITE|PERM_EXEC};
+	struct notif_data *notifdata = NULL;
+
+	mutex_lock(&memsh_drv->mem_share);
+
+	switch (code) {
+
+	case SUBSYS_BEFORE_SHUTDOWN:
+		bootup_request++;
+		break;
+
+	case SUBSYS_RAMDUMP_NOTIFICATION:
+		ramdump_event = 1;
+		break;
+
+	case SUBSYS_BEFORE_POWERUP:
+		if (_cmd) {
+			notifdata = (struct notif_data *) _cmd;
+		} else {
+			ramdump_event = 0;
+			break;
+		}
+
+		if (notifdata->enable_ramdump && ramdump_event) {
+			pr_info("memshare: %s, Ramdump collection is enabled\n",
+					__func__);
+			ret = mem_share_do_ramdump();
+			if (ret)
+				pr_err("Ramdump collection failed\n");
+			ramdump_event = 0;
+		}
+		break;
+
+	case SUBSYS_AFTER_POWERUP:
+		pr_debug("memshare: Modem has booted up\n");
+		for (i = 0; i < MAX_CLIENTS; i++) {
+			if (memblock[i].free_memory > 0 &&
+					bootup_request >= 2) {
+				memblock[i].free_memory -= 1;
+				pr_debug("memshare: free_memory count: %d for clinet id: %d\n",
+					memblock[i].free_memory,
+					memblock[i].client_id);
+			}
+
+			if (memblock[i].free_memory == 0) {
+				if (memblock[i].peripheral ==
+					DHMS_MEM_PROC_MPSS_V01 &&
+					!memblock[i].guarantee &&
+					memblock[i].alloted) {
+					pr_debug("memshare: Freeing memory for client id: %d\n",
+						memblock[i].client_id);
+					ret = hyp_assign_phys(
+							memblock[i].phy_addr,
+							memblock[i].size,
+							source_vmlist,
+							2, dest_vmids,
+							dest_perms, 1);
+					if (ret &&
+						memblock[i].hyp_mapping == 1) {
+						/*
+						 * This is an error case as hyp
+						 * mapping was successful
+						 * earlier but during unmap
+						 * it lead to failure.
+						 */
+						pr_err("memshare: %s, failed to unmap the region\n",
+							__func__);
+						memblock[i].hyp_mapping = 1;
+					} else {
+						memblock[i].hyp_mapping = 0;
+					}
+					dma_free_attrs(memsh_drv->dev,
+						memblock[i].size,
+						memblock[i].virtual_addr,
+						memblock[i].phy_addr,
+						&attrs);
+					free_client(i);
+				}
+			}
+		}
+		bootup_request++;
+		break;
+
+	default:
+		pr_debug("Memshare: code: %lu\n", code);
+		break;
+	}
+
+	mutex_unlock(&memsh_drv->mem_share);
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block nb = {
+	.notifier_call = modem_notifier_cb,
+};
+
+static void shared_hyp_mapping(int client_id)
+{
+	int ret;
+	u32 source_vmlist[1] = {VMID_HLOS};
+	int dest_vmids[2] = {VMID_HLOS, VMID_MSS_MSA};
+	int dest_perms[2] = {PERM_READ|PERM_WRITE,
+				PERM_READ|PERM_WRITE};
+
+	if (client_id == DHMS_MEM_CLIENT_INVALID) {
+		pr_err("memshare: %s, Invalid Client\n", __func__);
+		return;
+	}
+
+	ret = hyp_assign_phys(memblock[client_id].phy_addr,
+			memblock[client_id].size,
+			source_vmlist, 1, dest_vmids,
+			dest_perms, 2);
+
+	if (ret != 0) {
+		pr_err("memshare: hyp_assign_phys failed size=%u err=%d\n",
+				memblock[client_id].size, ret);
+		return;
+	}
+	memblock[client_id].hyp_mapping = 1;
+}
+
+static int handle_alloc_req(void *req_h, void *req, void *conn_h)
+{
+	struct mem_alloc_req_msg_v01 *alloc_req;
+	struct mem_alloc_resp_msg_v01 alloc_resp;
+	int rc = 0;
+
+	alloc_req = (struct mem_alloc_req_msg_v01 *)req;
+	pr_debug("%s: Received Alloc Request\n", __func__);
+	pr_debug("%s: req->num_bytes = %d\n", __func__, alloc_req->num_bytes);
+	mutex_lock(&memsh_drv->mem_share);
+	if (!memblock[GPS].size) {
+		memset(&alloc_resp, 0, sizeof(struct mem_alloc_resp_msg_v01));
+		alloc_resp.resp = QMI_RESULT_FAILURE_V01;
+		rc = memshare_alloc(memsh_drv->dev, alloc_req->num_bytes,
+					&memblock[GPS]);
+	}
+	alloc_resp.num_bytes_valid = 1;
+	alloc_resp.num_bytes =  alloc_req->num_bytes;
+	alloc_resp.handle_valid = 1;
+	alloc_resp.handle = memblock[GPS].phy_addr;
+	if (rc) {
+		alloc_resp.resp = QMI_RESULT_FAILURE_V01;
+		memblock[GPS].size = 0;
+	} else {
+		alloc_resp.resp = QMI_RESULT_SUCCESS_V01;
+	}
+
+	mutex_unlock(&memsh_drv->mem_share);
+
+	pr_debug("alloc_resp.num_bytes :%d, alloc_resp.handle :%lx, alloc_resp.mem_req_result :%lx\n",
+			  alloc_resp.num_bytes,
+			  (unsigned long int)alloc_resp.handle,
+			  (unsigned long int)alloc_resp.resp);
+	rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h,
+			&mem_share_svc_alloc_resp_desc, &alloc_resp,
+			sizeof(alloc_resp));
+	if (rc < 0)
+		pr_err("In %s, Error sending the alloc request: %d\n",
+					__func__, rc);
+
+	return rc;
+}
+
+static int handle_alloc_generic_req(void *req_h, void *req, void *conn_h)
+{
+	struct mem_alloc_generic_req_msg_v01 *alloc_req;
+	struct mem_alloc_generic_resp_msg_v01 *alloc_resp;
+	int rc, resp = 0;
+	int client_id;
+	uint32_t size = 0;
+
+	alloc_req = (struct mem_alloc_generic_req_msg_v01 *)req;
+	pr_debug("memshare: alloc request client id: %d proc _id: %d\n",
+			alloc_req->client_id, alloc_req->proc_id);
+	mutex_lock(&memsh_drv->mem_share);
+	alloc_resp = kzalloc(sizeof(struct mem_alloc_generic_resp_msg_v01),
+					GFP_KERNEL);
+	if (!alloc_resp) {
+		mutex_unlock(&memsh_drv->mem_share);
+		return -ENOMEM;
+	}
+	alloc_resp->resp.result = QMI_RESULT_FAILURE_V01;
+	alloc_resp->resp.error = QMI_ERR_NO_MEMORY_V01;
+	client_id = check_client(alloc_req->client_id, alloc_req->proc_id,
+								CHECK);
+
+	if (client_id >= MAX_CLIENTS) {
+		pr_err("memshare: %s client not found, requested client: %d, proc_id: %d\n",
+				__func__, alloc_req->client_id,
+				alloc_req->proc_id);
+		kfree(alloc_resp);
+		alloc_resp = NULL;
+		return -EINVAL;
+	}
+
+	if (!memblock[client_id].alloted) {
+		if (alloc_req->client_id == 1 && alloc_req->num_bytes > 0)
+			size = alloc_req->num_bytes + MEMSHARE_GUARD_BYTES;
+		else
+			size = alloc_req->num_bytes;
+		rc = memshare_alloc(memsh_drv->dev, size,
+					&memblock[client_id]);
+		if (rc) {
+			pr_err("In %s,Unable to allocate memory for requested client\n",
+							__func__);
+			resp = 1;
+		}
+		if (!resp) {
+			memblock[client_id].free_memory += 1;
+			memblock[client_id].alloted = 1;
+			memblock[client_id].size = alloc_req->num_bytes;
+			memblock[client_id].peripheral = alloc_req->proc_id;
+		}
+	}
+	pr_debug("memshare: In %s, free memory count for client id: %d = %d",
+		__func__, memblock[client_id].client_id,
+		memblock[client_id].free_memory);
+
+	memblock[client_id].sequence_id = alloc_req->sequence_id;
+
+	fill_alloc_response(alloc_resp, client_id, &resp);
+	/*
+	 * Perform the Hypervisor mapping in order to avoid XPU viloation
+	 * to the allocated region for Modem Clients
+	 */
+	if (!memblock[client_id].hyp_mapping &&
+		memblock[client_id].alloted)
+		shared_hyp_mapping(client_id);
+	mutex_unlock(&memsh_drv->mem_share);
+	pr_debug("memshare: alloc_resp.num_bytes :%d, alloc_resp.handle :%lx, alloc_resp.mem_req_result :%lx\n",
+			  alloc_resp->dhms_mem_alloc_addr_info[0].num_bytes,
+			  (unsigned long int)
+			  alloc_resp->dhms_mem_alloc_addr_info[0].phy_addr,
+			  (unsigned long int)alloc_resp->resp.result);
+	rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h,
+			&mem_share_svc_alloc_generic_resp_desc, alloc_resp,
+			sizeof(alloc_resp));
+
+	if (rc < 0)
+		pr_err("In %s, Error sending the alloc request: %d\n",
+							__func__, rc);
+
+	kfree(alloc_resp);
+	alloc_resp = NULL;
+	return rc;
+}
+
+static int handle_free_req(void *req_h, void *req, void *conn_h)
+{
+	struct mem_free_req_msg_v01 *free_req;
+	struct mem_free_resp_msg_v01 free_resp;
+	int rc;
+
+	mutex_lock(&memsh_drv->mem_free);
+	if (!memblock[GPS].guarantee) {
+		free_req = (struct mem_free_req_msg_v01 *)req;
+		pr_debug("%s: Received Free Request\n", __func__);
+		memset(&free_resp, 0, sizeof(struct mem_free_resp_msg_v01));
+		pr_debug("In %s: pblk->virtual_addr :%lx, pblk->phy_addr: %lx\n,size: %d",
+				__func__,
+			(unsigned long int)memblock[GPS].virtual_addr,
+			(unsigned long int)free_req->handle,
+			memblock[GPS].size);
+		dma_free_coherent(memsh_drv->dev, memblock[GPS].size,
+			memblock[GPS].virtual_addr,
+				free_req->handle);
+	}
+	free_resp.resp = QMI_RESULT_SUCCESS_V01;
+	mutex_unlock(&memsh_drv->mem_free);
+	rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h,
+			&mem_share_svc_free_resp_desc, &free_resp,
+			sizeof(free_resp));
+	if (rc < 0)
+		pr_err("In %s, Error sending the free request: %d\n",
+					__func__, rc);
+
+	return rc;
+}
+
+static int handle_free_generic_req(void *req_h, void *req, void *conn_h)
+{
+	struct mem_free_generic_req_msg_v01 *free_req;
+	struct mem_free_generic_resp_msg_v01 free_resp;
+	int rc;
+	int flag = 0;
+	uint32_t client_id;
+
+	free_req = (struct mem_free_generic_req_msg_v01 *)req;
+	pr_debug("memshare: %s: Received Free Request\n", __func__);
+	mutex_lock(&memsh_drv->mem_free);
+	memset(&free_resp, 0, sizeof(struct mem_free_generic_resp_msg_v01));
+	free_resp.resp.error = QMI_ERR_INTERNAL_V01;
+	free_resp.resp.result = QMI_RESULT_FAILURE_V01;
+	pr_debug("memshare: Client id: %d proc id: %d\n", free_req->client_id,
+				free_req->proc_id);
+	client_id = check_client(free_req->client_id, free_req->proc_id, FREE);
+	if (client_id == DHMS_MEM_CLIENT_INVALID) {
+		pr_err("In %s, Invalid client request to free memory\n",
+					__func__);
+		flag = 1;
+	} else if (!memblock[client_id].guarantee &&
+					memblock[client_id].alloted) {
+		pr_debug("In %s: pblk->virtual_addr :%lx, pblk->phy_addr: %lx\n,size: %d",
+				__func__,
+				(unsigned long int)
+				memblock[client_id].virtual_addr,
+				(unsigned long int)memblock[client_id].phy_addr,
+				memblock[client_id].size);
+		dma_free_attrs(memsh_drv->dev, memblock[client_id].size,
+			memblock[client_id].virtual_addr,
+			memblock[client_id].phy_addr,
+			&attrs);
+		free_client(client_id);
+	} else {
+		pr_err("In %s, Request came for a guaranteed client cannot free up the memory\n",
+						__func__);
+	}
+
+	if (flag) {
+		free_resp.resp.result = QMI_RESULT_FAILURE_V01;
+		free_resp.resp.error = QMI_ERR_INVALID_ID_V01;
+	} else {
+		free_resp.resp.result = QMI_RESULT_SUCCESS_V01;
+		free_resp.resp.error = QMI_ERR_NONE_V01;
+	}
+
+	mutex_unlock(&memsh_drv->mem_free);
+	rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h,
+		&mem_share_svc_free_generic_resp_desc, &free_resp,
+		sizeof(free_resp));
+
+	if (rc < 0)
+		pr_err("In %s, Error sending the free request: %d\n",
+					__func__, rc);
+
+	return rc;
+}
+
+static int handle_query_size_req(void *req_h, void *req, void *conn_h)
+{
+	int rc, client_id;
+	struct mem_query_size_req_msg_v01 *query_req;
+	struct mem_query_size_rsp_msg_v01 *query_resp;
+
+	query_req = (struct mem_query_size_req_msg_v01 *)req;
+	mutex_lock(&memsh_drv->mem_share);
+	query_resp = kzalloc(sizeof(struct mem_query_size_rsp_msg_v01),
+					GFP_KERNEL);
+	if (!query_resp) {
+		mutex_unlock(&memsh_drv->mem_share);
+		return -ENOMEM;
+	}
+	pr_debug("memshare: query request client id: %d proc _id: %d\n",
+		query_req->client_id, query_req->proc_id);
+	client_id = check_client(query_req->client_id, query_req->proc_id,
+								CHECK);
+
+	if (client_id >= MAX_CLIENTS) {
+		pr_err("memshare: %s client not found, requested client: %d, proc_id: %d\n",
+				__func__, query_req->client_id,
+				query_req->proc_id);
+		kfree(query_resp);
+		query_resp = NULL;
+		return -EINVAL;
+	}
+
+	if (memblock[client_id].size) {
+		query_resp->size_valid = 1;
+		query_resp->size = memblock[client_id].size;
+	} else {
+		query_resp->size_valid = 1;
+		query_resp->size = 0;
+	}
+	query_resp->resp.result = QMI_RESULT_SUCCESS_V01;
+	query_resp->resp.error = QMI_ERR_NONE_V01;
+	mutex_unlock(&memsh_drv->mem_share);
+
+	pr_debug("memshare: query_resp.size :%d, alloc_resp.mem_req_result :%lx\n",
+			  query_resp->size,
+			  (unsigned long int)query_resp->resp.result);
+	rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h,
+			&mem_share_svc_size_query_resp_desc, query_resp,
+			sizeof(query_resp));
+
+	if (rc < 0)
+		pr_err("In %s, Error sending the query request: %d\n",
+							__func__, rc);
+
+	kfree(query_resp);
+	query_resp = NULL;
+	return rc;
+}
+
+static int mem_share_svc_connect_cb(struct qmi_handle *handle,
+			       void *conn_h)
+{
+	if (mem_share_svc_handle != handle || !conn_h)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int mem_share_svc_disconnect_cb(struct qmi_handle *handle,
+				  void *conn_h)
+{
+	if (mem_share_svc_handle != handle || !conn_h)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int mem_share_svc_req_desc_cb(unsigned int msg_id,
+				struct msg_desc **req_desc)
+{
+	int rc;
+
+	pr_debug("memshare: In %s\n", __func__);
+	switch (msg_id) {
+	case MEM_ALLOC_REQ_MSG_V01:
+		*req_desc = &mem_share_svc_alloc_req_desc;
+		rc = sizeof(struct mem_alloc_req_msg_v01);
+		break;
+
+	case MEM_FREE_REQ_MSG_V01:
+		*req_desc = &mem_share_svc_free_req_desc;
+		rc = sizeof(struct mem_free_req_msg_v01);
+		break;
+
+	case MEM_ALLOC_GENERIC_REQ_MSG_V01:
+		*req_desc = &mem_share_svc_alloc_generic_req_desc;
+		rc = sizeof(struct mem_alloc_generic_req_msg_v01);
+		break;
+
+	case MEM_FREE_GENERIC_REQ_MSG_V01:
+		*req_desc = &mem_share_svc_free_generic_req_desc;
+		rc = sizeof(struct mem_free_generic_req_msg_v01);
+		break;
+
+	case MEM_QUERY_SIZE_REQ_MSG_V01:
+		*req_desc = &mem_share_svc_size_query_req_desc;
+		rc = sizeof(struct mem_query_size_req_msg_v01);
+		break;
+
+	default:
+		rc = -ENOTSUPP;
+		break;
+	}
+	return rc;
+}
+
+static int mem_share_svc_req_cb(struct qmi_handle *handle, void *conn_h,
+			void *req_h, unsigned int msg_id, void *req)
+{
+	int rc;
+
+	pr_debug("memshare: In %s\n", __func__);
+	if (mem_share_svc_handle != handle || !conn_h)
+		return -EINVAL;
+
+	switch (msg_id) {
+	case MEM_ALLOC_REQ_MSG_V01:
+		rc = handle_alloc_req(req_h, req, conn_h);
+		break;
+
+	case MEM_FREE_REQ_MSG_V01:
+		rc = handle_free_req(req_h, req, conn_h);
+		break;
+
+	case MEM_ALLOC_GENERIC_REQ_MSG_V01:
+		rc = handle_alloc_generic_req(req_h, req, conn_h);
+		break;
+
+	case MEM_FREE_GENERIC_REQ_MSG_V01:
+		rc = handle_free_generic_req(req_h, req, conn_h);
+		break;
+
+	case MEM_QUERY_SIZE_REQ_MSG_V01:
+		rc = handle_query_size_req(req_h, req, conn_h);
+		break;
+
+	default:
+		rc = -ENOTSUPP;
+		break;
+	}
+	return rc;
+}
+
+static void mem_share_svc_recv_msg(struct work_struct *work)
+{
+	int rc;
+
+	pr_debug("memshare: In %s\n", __func__);
+	do {
+		pr_debug("%s: Notified about a Receive Event", __func__);
+	} while ((rc = qmi_recv_msg(mem_share_svc_handle)) == 0);
+
+	if (rc != -ENOMSG)
+		pr_err("%s: Error receiving message\n", __func__);
+}
+
+static void qmi_mem_share_svc_ntfy(struct qmi_handle *handle,
+		enum qmi_event_type event, void *priv)
+{
+	pr_debug("memshare: In %s\n", __func__);
+	switch (event) {
+	case QMI_RECV_MSG:
+		queue_delayed_work(mem_share_svc_workqueue,
+				   &work_recv_msg, 0);
+		break;
+	default:
+		break;
+	}
+}
+
+static struct qmi_svc_ops_options mem_share_svc_ops_options = {
+	.version = 1,
+	.service_id = MEM_SHARE_SERVICE_SVC_ID,
+	.service_vers = MEM_SHARE_SERVICE_VERS,
+	.service_ins = MEM_SHARE_SERVICE_INS_ID,
+	.connect_cb = mem_share_svc_connect_cb,
+	.disconnect_cb = mem_share_svc_disconnect_cb,
+	.req_desc_cb = mem_share_svc_req_desc_cb,
+	.req_cb = mem_share_svc_req_cb,
+};
+
+int memshare_alloc(struct device *dev,
+					unsigned int block_size,
+					struct mem_blocks *pblk)
+{
+
+	int ret;
+
+	pr_debug("%s: memshare_alloc called", __func__);
+	if (!pblk) {
+		pr_err("%s: Failed to alloc\n", __func__);
+		return -ENOMEM;
+	}
+
+	pblk->virtual_addr = dma_alloc_attrs(dev, block_size,
+						&pblk->phy_addr, GFP_KERNEL,
+						&attrs);
+	if (pblk->virtual_addr == NULL) {
+		pr_err("allocation failed, %d\n", block_size);
+		ret = -ENOMEM;
+		return ret;
+	}
+	pr_debug("pblk->phy_addr :%lx, pblk->virtual_addr %lx\n",
+		  (unsigned long int)pblk->phy_addr,
+		  (unsigned long int)pblk->virtual_addr);
+	return 0;
+}
+
+static void memshare_init_worker(struct work_struct *work)
+{
+	int rc;
+
+	mem_share_svc_workqueue =
+		create_singlethread_workqueue("mem_share_svc");
+	if (!mem_share_svc_workqueue)
+		return;
+
+	mem_share_svc_handle = qmi_handle_create(qmi_mem_share_svc_ntfy, NULL);
+	if (!mem_share_svc_handle) {
+		pr_err("%s: Creating mem_share_svc qmi handle failed\n",
+			__func__);
+		destroy_workqueue(mem_share_svc_workqueue);
+		return;
+	}
+	rc = qmi_svc_register(mem_share_svc_handle, &mem_share_svc_ops_options);
+	if (rc < 0) {
+		pr_err("%s: Registering mem share svc failed %d\n",
+			__func__, rc);
+		qmi_handle_destroy(mem_share_svc_handle);
+		destroy_workqueue(mem_share_svc_workqueue);
+		return;
+	}
+	pr_debug("memshare: memshare_init successful\n");
+}
+
+static int memshare_child_probe(struct platform_device *pdev)
+{
+	int rc;
+	uint32_t size, client_id;
+	const char *name;
+	struct memshare_child *drv;
+
+	drv = devm_kzalloc(&pdev->dev, sizeof(struct memshare_child),
+							GFP_KERNEL);
+
+	if (!drv) {
+		pr_err("Unable to allocate memory to driver\n");
+		return -ENOMEM;
+	}
+
+	drv->dev = &pdev->dev;
+	memsh_child = drv;
+	platform_set_drvdata(pdev, memsh_child);
+
+	rc = of_property_read_u32(pdev->dev.of_node, "qcom,peripheral-size",
+						&size);
+	if (rc) {
+		pr_err("In %s, Error reading size of clients, rc: %d\n",
+				__func__, rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node, "qcom,client-id",
+						&client_id);
+	if (rc) {
+		pr_err("In %s, Error reading client id, rc: %d\n",
+				__func__, rc);
+		return rc;
+	}
+
+	memblock[num_clients].guarantee = of_property_read_bool(
+							pdev->dev.of_node,
+							"qcom,allocate-boot-time");
+
+	rc = of_property_read_string(pdev->dev.of_node, "label",
+						&name);
+	if (rc) {
+		pr_err("In %s, Error reading peripheral info for client, rc: %d\n",
+					__func__, rc);
+		return rc;
+	}
+
+	if (strcmp(name, "modem") == 0)
+		memblock[num_clients].peripheral = DHMS_MEM_PROC_MPSS_V01;
+	else if (strcmp(name, "adsp") == 0)
+		memblock[num_clients].peripheral = DHMS_MEM_PROC_ADSP_V01;
+	else if (strcmp(name, "wcnss") == 0)
+		memblock[num_clients].peripheral = DHMS_MEM_PROC_WCNSS_V01;
+
+	memblock[num_clients].size = size;
+	memblock[num_clients].client_id = client_id;
+
+  /*
+   *	Memshare allocation for guaranteed clients
+   */
+	if (memblock[num_clients].guarantee && size > 0) {
+		if (client_id == 1)
+			size += MEMSHARE_GUARD_BYTES;
+		rc = memshare_alloc(memsh_child->dev,
+				size,
+				&memblock[num_clients]);
+		if (rc) {
+			pr_err("In %s, Unable to allocate memory for guaranteed clients, rc: %d\n",
+							__func__, rc);
+			return rc;
+		}
+		memblock[num_clients].alloted = 1;
+		shared_hyp_mapping(num_clients);
+	}
+
+	/*
+	 *  call for creating ramdump dev handlers for
+	 *  memshare clients
+	 */
+
+	memshare_dev[num_clients] = &pdev->dev;
+
+	if (!memblock[num_clients].file_created) {
+		rc = mem_share_configure_ramdump();
+		if (rc)
+			pr_err("In %s, cannot collect dumps for client id: %d\n",
+					__func__,
+					memblock[num_clients].client_id);
+		else
+			memblock[num_clients].file_created = 1;
+	}
+
+	num_clients++;
+
+	return 0;
+}
+
+static int memshare_probe(struct platform_device *pdev)
+{
+	int rc;
+	struct memshare_driver *drv;
+
+	drv = devm_kzalloc(&pdev->dev, sizeof(struct memshare_driver),
+							GFP_KERNEL);
+
+	if (!drv) {
+		pr_err("Unable to allocate memory to driver\n");
+		return -ENOMEM;
+	}
+
+	/* Memory allocation has been done successfully */
+	mutex_init(&drv->mem_free);
+	mutex_init(&drv->mem_share);
+
+	INIT_WORK(&drv->memshare_init_work, memshare_init_worker);
+	schedule_work(&drv->memshare_init_work);
+
+	drv->dev = &pdev->dev;
+	memsh_drv = drv;
+	platform_set_drvdata(pdev, memsh_drv);
+	initialize_client();
+	num_clients = 0;
+
+	rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+
+	if (rc) {
+		pr_err("In %s, error populating the devices\n", __func__);
+		return rc;
+	}
+
+	subsys_notif_register_notifier("modem", &nb);
+	pr_info("In %s, Memshare probe success\n", __func__);
+
+	return 0;
+}
+
+static int memshare_remove(struct platform_device *pdev)
+{
+	if (!memsh_drv)
+		return 0;
+
+	qmi_svc_unregister(mem_share_svc_handle);
+	flush_workqueue(mem_share_svc_workqueue);
+	qmi_handle_destroy(mem_share_svc_handle);
+	destroy_workqueue(mem_share_svc_workqueue);
+
+	return 0;
+}
+
+static int memshare_child_remove(struct platform_device *pdev)
+{
+	if (!memsh_child)
+		return 0;
+
+	return 0;
+}
+
+static struct of_device_id memshare_match_table[] = {
+	{
+		.compatible = "qcom,memshare",
+	},
+	{}
+};
+
+static struct of_device_id memshare_match_table1[] = {
+	{
+		.compatible = "qcom,memshare-peripheral",
+	},
+	{}
+};
+
+
+static struct platform_driver memshare_pdriver = {
+	.probe          = memshare_probe,
+	.remove         = memshare_remove,
+	.driver = {
+		.name   = MEMSHARE_DEV_NAME,
+		.owner  = THIS_MODULE,
+		.of_match_table = memshare_match_table,
+	},
+};
+
+static struct platform_driver memshare_pchild = {
+	.probe          = memshare_child_probe,
+	.remove         = memshare_child_remove,
+	.driver = {
+		.name   = MEMSHARE_CHILD_DEV_NAME,
+		.owner  = THIS_MODULE,
+		.of_match_table = memshare_match_table1,
+	},
+};
+
+module_platform_driver(memshare_pdriver);
+module_platform_driver(memshare_pchild);
+
+MODULE_DESCRIPTION("Mem Share QMI Service Driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/memshare./msm_memshare.h linux-4.4.115-fbx/drivers/soc/qcom/memshare/msm_memshare.h
--- linux-4.4.115-fbx/drivers/soc/qcom/memshare./msm_memshare.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/memshare/msm_memshare.h	2019-10-29 09:26:24.813214628 +0100
@@ -0,0 +1,65 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_MEM_SHARE_H
+#define _LINUX_MEM_SHARE_H
+
+#define MEM_SHARE_SERVICE_SVC_ID 0x00000034
+#define MEM_SHARE_SERVICE_INS_ID 1
+#define MEM_SHARE_SERVICE_VERS 1
+
+#define MEMORY_CMA	1
+#define MEMORY_NON_CMA	0
+#define MAX_CLIENTS 10
+#define GPS	0
+#define CHECK	0
+#define FREE	1
+#define MEMSHARE_GUARD_BYTES	(4*1024)
+
+struct mem_blocks {
+	/* Client Id information */
+	uint32_t client_id;
+	/* Peripheral associated with client */
+	uint32_t peripheral;
+	/* Sequence Id */
+	uint32_t sequence_id;
+	/* CMA or Non-CMA region */
+	uint32_t memory_type;
+	/* Guaranteed Memory */
+	uint32_t guarantee;
+	/* Memory alloted or not */
+	uint32_t alloted;
+	/* Size required for client */
+	uint32_t size;
+	/*
+	 * start address of the memory block reserved by server memory
+	 * subsystem to client
+	 */
+	phys_addr_t phy_addr;
+	/* Virtual address for the physical address allocated */
+	void *virtual_addr;
+	/* Release memory only when XPU is released*/
+	uint8_t free_memory;
+	/* Need Hypervisor mapping*/
+	uint8_t hyp_mapping;
+	/* Status flag which checks if ramdump file is created*/
+	int file_created;
+
+};
+
+int memshare_alloc(struct device *dev,
+					unsigned int block_size,
+					struct mem_blocks *pblk);
+void memshare_free(unsigned int block_size,
+					struct mem_blocks *pblk);
+#endif /* _LINUX_MEM_SHARE_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/mpm-of.c	2019-01-22 16:16:26.659274986 +0100
@@ -0,0 +1,1024 @@
+/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/power_supply.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+#include <linux/workqueue.h>
+#include <linux/irqchip/arm-gic.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/irqchip/msm-gpio-irq.h>
+#include <linux/irqchip/msm-mpm-irq.h>
+#include <linux/mutex.h>
+#include <asm/arch_timer.h>
+
+enum {
+	MSM_MPM_GIC_IRQ_DOMAIN,
+	MSM_MPM_GPIO_IRQ_DOMAIN,
+	MSM_MPM_NR_IRQ_DOMAINS,
+};
+
+enum {
+	MSM_MPM_SET_ENABLED,
+	MSM_MPM_SET_WAKEUP,
+	MSM_NR_IRQS_SET,
+};
+
+struct mpm_irqs_a2m {
+	struct irq_domain *domain;
+	struct device_node *parent;
+	irq_hw_number_t hwirq;
+	unsigned long pin;
+	struct hlist_node node;
+};
+#define MAX_DOMAIN_NAME 5
+
+struct mpm_irqs {
+	struct irq_domain *domain;
+	unsigned long *enabled_irqs;
+	unsigned long *wakeup_irqs;
+	unsigned long size;
+	char domain_name[MAX_DOMAIN_NAME];
+};
+
+#define MAX_MPM_PIN_PER_IRQ 2
+static struct mpm_irqs unlisted_irqs[MSM_MPM_NR_IRQ_DOMAINS];
+static int num_mpm_irqs = MSM_MPM_NR_MPM_IRQS;
+static struct hlist_head *irq_hash;
+static unsigned int *msm_mpm_irqs_m2a;
+#define MSM_MPM_REG_WIDTH  DIV_ROUND_UP(num_mpm_irqs, 32)
+
+#define MSM_MPM_IRQ_INDEX(irq)  (irq / 32)
+#define MSM_MPM_IRQ_MASK(irq)  BIT(irq % 32)
+#define MSM_MPM_IRQ_DOMAIN_MASK(domain) BIT(domain)
+
+#define hashfn(val) (val % num_mpm_irqs)
+#define SCLK_HZ (32768)
+#define ARCH_TIMER_HZ (19200000)
+#define MAX_IRQ 1024
+
+struct msm_mpm_device_data {
+	uint16_t *irqs_m2a;
+	unsigned int irqs_m2a_size;
+	uint16_t *bypassed_apps_irqs;
+	unsigned int bypassed_apps_irqs_size;
+	void __iomem *mpm_request_reg_base;
+	void __iomem *mpm_status_reg_base;
+	void __iomem *mpm_apps_ipc_reg;
+	unsigned int mpm_apps_ipc_val;
+	unsigned int mpm_ipc_irq;
+};
+static struct msm_mpm_device_data msm_mpm_dev_data;
+
+struct mpm_of {
+	char *pkey;
+	char *map;
+	char name[MAX_DOMAIN_NAME];
+	struct irq_chip *chip;
+	int (*get_max_irqs)(struct irq_domain *d);
+};
+
+static struct clk *xo_clk;
+static bool xo_enabled;
+static bool msm_mpm_in_suspend;
+static struct workqueue_struct *msm_mpm_wq;
+static struct work_struct msm_mpm_work;
+static struct completion wake_wq;
+
+enum mpm_reg_offsets {
+	MSM_MPM_REG_ENABLE,
+	MSM_MPM_REG_FALLING_EDGE,
+	MSM_MPM_REG_RISING_EDGE,
+	MSM_MPM_REG_POLARITY,
+	MSM_MPM_REG_STATUS,
+};
+
+static DEFINE_SPINLOCK(msm_mpm_lock);
+
+static uint32_t *msm_mpm_enabled_irq;
+static uint32_t *msm_mpm_wake_irq;
+static uint32_t *msm_mpm_falling_edge;
+static uint32_t *msm_mpm_rising_edge;
+static uint32_t *msm_mpm_polarity;
+
+enum mpm_state {
+	MSM_MPM_GIC_IRQ_MAPPING_DONE = BIT(0),
+	MSM_MPM_GPIO_IRQ_MAPPING_DONE = BIT(1),
+	MSM_MPM_DEVICE_PROBED = BIT(2),
+};
+
+static enum mpm_state msm_mpm_initialized;
+static int mpm_init_irq_domain(struct device_node *node, int irq_domain);
+
+static inline bool msm_mpm_is_initialized(void)
+{
+	return msm_mpm_initialized &
+		(MSM_MPM_GIC_IRQ_MAPPING_DONE | MSM_MPM_DEVICE_PROBED);
+
+}
+
+static inline uint32_t msm_mpm_read(
+	unsigned int reg, unsigned int subreg_index)
+{
+	unsigned int offset = reg * MSM_MPM_REG_WIDTH + subreg_index + 2;
+	return __raw_readl(msm_mpm_dev_data.mpm_request_reg_base + offset * 4);
+}
+
+static inline void msm_mpm_write(
+	unsigned int reg, unsigned int subreg_index, uint32_t value)
+{
+	/*
+	 * Add 2 to offset to account for the 64 bit timer in the vMPM
+	 * mapping
+	 */
+	unsigned int offset = reg * MSM_MPM_REG_WIDTH + subreg_index + 2;
+
+	__raw_writel(value, msm_mpm_dev_data.mpm_request_reg_base + offset * 4);
+}
+
+static inline void msm_mpm_send_interrupt(void)
+{
+	__raw_writel(msm_mpm_dev_data.mpm_apps_ipc_val,
+			msm_mpm_dev_data.mpm_apps_ipc_reg);
+	/* Ensure the write is complete before returning. */
+	wmb();
+}
+
+static irqreturn_t msm_mpm_irq(int irq, void *dev_id)
+{
+	/*
+	 * When the system resumes from deep sleep mode, the RPM hardware wakes
+	 * up the Apps processor by triggering this interrupt. This interrupt
+	 * has to be enabled and set as wake for the irq to get SPM out of
+	 * sleep. Handle the interrupt here to make sure that it gets cleared.
+	 */
+	return IRQ_HANDLED;
+}
+
+static void msm_mpm_timer_write(uint32_t *expiry)
+{
+	__raw_writel(expiry[0], msm_mpm_dev_data.mpm_request_reg_base);
+	__raw_writel(expiry[1], msm_mpm_dev_data.mpm_request_reg_base + 0x4);
+}
+
+static void msm_mpm_set(cycle_t wakeup, bool wakeset)
+{
+	uint32_t *irqs;
+	unsigned int reg;
+	int i;
+
+	msm_mpm_timer_write((uint32_t *)&wakeup);
+
+	irqs = wakeset ? msm_mpm_wake_irq : msm_mpm_enabled_irq;
+	for (i = 0; i < MSM_MPM_REG_WIDTH; i++) {
+		reg = MSM_MPM_REG_ENABLE;
+		msm_mpm_write(reg, i, irqs[i]);
+
+		reg = MSM_MPM_REG_FALLING_EDGE;
+		msm_mpm_write(reg, i, msm_mpm_falling_edge[i]);
+
+		reg = MSM_MPM_REG_RISING_EDGE;
+		msm_mpm_write(reg, i, msm_mpm_rising_edge[i]);
+
+		reg = MSM_MPM_REG_POLARITY;
+		msm_mpm_write(reg, i, msm_mpm_polarity[i]);
+
+		reg = MSM_MPM_REG_STATUS;
+		msm_mpm_write(reg, i, 0);
+	}
+
+	/*
+	 * Ensure that the set operation is complete before sending the
+	 * interrupt
+	 */
+	wmb();
+	msm_mpm_send_interrupt();
+}
+
+static inline unsigned int msm_mpm_get_irq_m2a(unsigned int pin)
+{
+	BUG_ON(!msm_mpm_irqs_m2a);
+	return msm_mpm_irqs_m2a[pin];
+}
+
+static inline void msm_mpm_get_irq_a2m(struct irq_data *d, uint16_t *mpm_pins)
+{
+	struct mpm_irqs_a2m *node = NULL;
+	int count = 0;
+
+	hlist_for_each_entry(node, &irq_hash[hashfn(d->hwirq)], node) {
+		if ((node->hwirq == d->hwirq)
+				&& (d->domain == node->domain)) {
+			/*
+			 * Update the linux irq mapping. No update required for
+			 * bypass interrupts
+			 */
+			if (node->pin != 0xff)
+				msm_mpm_irqs_m2a[node->pin] = d->irq;
+			if (count >= MAX_MPM_PIN_PER_IRQ) {
+				count--;
+				__WARN();
+			}
+			mpm_pins[count] = node->pin;
+			count++;
+		}
+	}
+}
+
+static int msm_mpm_enable_irq_exclusive(
+	struct irq_data *d, bool enable, bool wakeset)
+{
+	uint16_t num = 0;
+	uint16_t mpm_pins[MAX_MPM_PIN_PER_IRQ] = {0};
+
+	WARN_ON(!d);
+
+	if (!d)
+		return 0;
+
+	msm_mpm_get_irq_a2m(d, mpm_pins);
+
+	for (num = 0; num < MAX_MPM_PIN_PER_IRQ; num++) {
+
+		if (mpm_pins[num] == 0xff)
+			break;
+
+		if (num && mpm_pins[num] == 0)
+			break;
+
+		if (mpm_pins[num]) {
+			uint32_t *mpm_irq_masks = wakeset ?
+				msm_mpm_wake_irq : msm_mpm_enabled_irq;
+			uint32_t index = MSM_MPM_IRQ_INDEX(mpm_pins[num]);
+			uint32_t mask = MSM_MPM_IRQ_MASK(mpm_pins[num]);
+
+			if (enable)
+				mpm_irq_masks[index] |= mask;
+			else
+				mpm_irq_masks[index] &= ~mask;
+		} else {
+			int i;
+			unsigned long *irq_apps;
+
+			for (i = 0; i < MSM_MPM_NR_IRQ_DOMAINS; i++) {
+				if (d->domain == unlisted_irqs[i].domain)
+					break;
+			}
+
+			if (i == MSM_MPM_NR_IRQ_DOMAINS)
+				return 0;
+
+			irq_apps = wakeset ? unlisted_irqs[i].wakeup_irqs :
+					unlisted_irqs[i].enabled_irqs;
+
+			if (enable)
+				__set_bit(d->hwirq, irq_apps);
+			else
+				__clear_bit(d->hwirq, irq_apps);
+
+			if ((msm_mpm_initialized & MSM_MPM_DEVICE_PROBED)
+				&& !wakeset && !msm_mpm_in_suspend)
+				complete(&wake_wq);
+		}
+	}
+
+	return 0;
+}
+
+static void msm_mpm_set_edge_ctl(int pin, unsigned int flow_type)
+{
+	uint32_t index;
+	uint32_t mask;
+
+	index = MSM_MPM_IRQ_INDEX(pin);
+	mask = MSM_MPM_IRQ_MASK(pin);
+
+	if (flow_type & IRQ_TYPE_EDGE_FALLING)
+		msm_mpm_falling_edge[index] |= mask;
+	else
+		msm_mpm_falling_edge[index] &= ~mask;
+
+	if (flow_type & IRQ_TYPE_EDGE_RISING)
+		msm_mpm_rising_edge[index] |= mask;
+	else
+		msm_mpm_rising_edge[index] &= ~mask;
+
+}
+
+static int msm_mpm_set_irq_type_exclusive(
+	struct irq_data *d, unsigned int flow_type)
+{
+	uint16_t num = 0;
+	uint16_t mpm_pins[MAX_MPM_PIN_PER_IRQ] = {0};
+
+	msm_mpm_get_irq_a2m(d, mpm_pins);
+
+	for (num = 0; num < MAX_MPM_PIN_PER_IRQ; num++) {
+
+		if (mpm_pins[num] == 0xff)
+			break;
+
+		if (mpm_pins[num]) {
+			uint32_t index = MSM_MPM_IRQ_INDEX(mpm_pins[num]);
+			uint32_t mask = MSM_MPM_IRQ_MASK(mpm_pins[num]);
+
+			if (index >= MSM_MPM_REG_WIDTH)
+				return -EFAULT;
+
+			msm_mpm_set_edge_ctl(mpm_pins[num], flow_type);
+
+			if (flow_type &  IRQ_TYPE_LEVEL_HIGH)
+				msm_mpm_polarity[index] |= mask;
+			else
+				msm_mpm_polarity[index] &= ~mask;
+		}
+	}
+
+	return 0;
+}
+
+static int __msm_mpm_enable_irq(struct irq_data *d, bool enable)
+{
+	unsigned long flags;
+	int rc;
+
+	if (!msm_mpm_is_initialized())
+		return -EINVAL;
+
+	spin_lock_irqsave(&msm_mpm_lock, flags);
+
+	rc = msm_mpm_enable_irq_exclusive(d, enable, false);
+	spin_unlock_irqrestore(&msm_mpm_lock, flags);
+
+	return rc;
+}
+
+static void msm_mpm_enable_irq(struct irq_data *d)
+{
+	__msm_mpm_enable_irq(d, true);
+}
+
+static void msm_mpm_disable_irq(struct irq_data *d)
+{
+	__msm_mpm_enable_irq(d, false);
+}
+
+static int msm_mpm_set_irq_wake(struct irq_data *d, unsigned int on)
+{
+	unsigned long flags;
+	int rc;
+
+	if (!msm_mpm_is_initialized())
+		return -EINVAL;
+
+	spin_lock_irqsave(&msm_mpm_lock, flags);
+	rc = msm_mpm_enable_irq_exclusive(d, (bool)on, true);
+	spin_unlock_irqrestore(&msm_mpm_lock, flags);
+
+	return rc;
+}
+
+static int msm_mpm_set_irq_type(struct irq_data *d, unsigned int flow_type)
+{
+	unsigned long flags;
+	int rc;
+
+	if (!msm_mpm_is_initialized())
+		return -EINVAL;
+
+	spin_lock_irqsave(&msm_mpm_lock, flags);
+	rc = msm_mpm_set_irq_type_exclusive(d, flow_type);
+	spin_unlock_irqrestore(&msm_mpm_lock, flags);
+
+	return rc;
+}
+
+/******************************************************************************
+ * Public functions
+ *****************************************************************************/
+int msm_mpm_enable_pin(unsigned int pin, unsigned int enable)
+{
+	uint32_t index = MSM_MPM_IRQ_INDEX(pin);
+	uint32_t mask = MSM_MPM_IRQ_MASK(pin);
+	unsigned long flags;
+
+	if (!msm_mpm_is_initialized())
+		return -EINVAL;
+
+	if (pin >= MSM_MPM_NR_MPM_IRQS)
+		return -EINVAL;
+
+	spin_lock_irqsave(&msm_mpm_lock, flags);
+
+	if (enable)
+		msm_mpm_enabled_irq[index] |= mask;
+	else
+		msm_mpm_enabled_irq[index] &= ~mask;
+
+	spin_unlock_irqrestore(&msm_mpm_lock, flags);
+	return 0;
+}
+
+int msm_mpm_set_pin_wake(unsigned int pin, unsigned int on)
+{
+	uint32_t index = MSM_MPM_IRQ_INDEX(pin);
+	uint32_t mask = MSM_MPM_IRQ_MASK(pin);
+	unsigned long flags;
+
+	if (!msm_mpm_is_initialized())
+		return -EINVAL;
+
+	if (pin >= MSM_MPM_NR_MPM_IRQS)
+		return -EINVAL;
+
+	spin_lock_irqsave(&msm_mpm_lock, flags);
+
+	if (on)
+		msm_mpm_wake_irq[index] |= mask;
+	else
+		msm_mpm_wake_irq[index] &= ~mask;
+
+	spin_unlock_irqrestore(&msm_mpm_lock, flags);
+	return 0;
+}
+
+int msm_mpm_set_pin_type(unsigned int pin, unsigned int flow_type)
+{
+	uint32_t index = MSM_MPM_IRQ_INDEX(pin);
+	uint32_t mask = MSM_MPM_IRQ_MASK(pin);
+	unsigned long flags;
+
+	if (!msm_mpm_is_initialized())
+		return -EINVAL;
+
+	if (pin >= MSM_MPM_NR_MPM_IRQS)
+		return -EINVAL;
+
+	spin_lock_irqsave(&msm_mpm_lock, flags);
+
+	msm_mpm_set_edge_ctl(pin, flow_type);
+
+	if (flow_type & IRQ_TYPE_LEVEL_HIGH)
+		msm_mpm_polarity[index] |= mask;
+	else
+		msm_mpm_polarity[index] &= ~mask;
+
+	spin_unlock_irqrestore(&msm_mpm_lock, flags);
+	return 0;
+}
+
+static bool msm_mpm_interrupts_detectable(int d, bool from_idle)
+{
+	unsigned long *irq_bitmap;
+	bool ret = false;
+	struct mpm_irqs *unlisted = &unlisted_irqs[d];
+
+	if (!msm_mpm_is_initialized())
+		return false;
+
+	if (from_idle)
+		irq_bitmap = unlisted->enabled_irqs;
+	else
+		irq_bitmap = unlisted->wakeup_irqs;
+
+	ret = (bool) bitmap_empty(irq_bitmap, unlisted->size);
+
+	return ret;
+}
+
+bool msm_mpm_gpio_irqs_detectable(bool from_idle)
+{
+	return msm_mpm_interrupts_detectable(MSM_MPM_GPIO_IRQ_DOMAIN,
+			from_idle);
+}
+bool msm_mpm_irqs_detectable(bool from_idle)
+{
+	return msm_mpm_interrupts_detectable(MSM_MPM_GIC_IRQ_DOMAIN,
+			from_idle);
+}
+
+void msm_mpm_enter_sleep(uint64_t sclk_count, bool from_idle,
+		const struct cpumask *cpumask)
+{
+	cycle_t wakeup = (u64)sclk_count * ARCH_TIMER_HZ;
+
+	if (!msm_mpm_is_initialized()) {
+		pr_err("%s(): MPM not initialized\n", __func__);
+		return;
+	}
+
+	if (sclk_count) {
+		do_div(wakeup, SCLK_HZ);
+		wakeup += arch_counter_get_cntvct();
+	} else {
+		wakeup = (~0ULL);
+	}
+
+	msm_mpm_gpio_irqs_detectable(from_idle);
+	msm_mpm_irqs_detectable(from_idle);
+	msm_mpm_set(wakeup, !from_idle);
+	if (cpumask)
+		irq_set_affinity(msm_mpm_dev_data.mpm_ipc_irq, cpumask);
+}
+
+void msm_mpm_exit_sleep(bool from_idle)
+{
+	unsigned long pending;
+	uint32_t *enabled_intr;
+	int i;
+	int k;
+
+	if (!msm_mpm_is_initialized()) {
+		pr_err("%s(): MPM not initialized\n", __func__);
+		return;
+	}
+
+	enabled_intr = from_idle ? msm_mpm_enabled_irq :
+						msm_mpm_wake_irq;
+
+	for (i = 0; i < MSM_MPM_REG_WIDTH; i++) {
+		pending = msm_mpm_read(MSM_MPM_REG_STATUS, i);
+		pending &= enabled_intr[i];
+
+		k = find_first_bit(&pending, 32);
+		while (k < 32) {
+			unsigned int mpm_irq = 32 * i + k;
+			unsigned int apps_irq = msm_mpm_get_irq_m2a(mpm_irq);
+			struct irq_desc *desc = apps_irq ?
+				irq_to_desc(apps_irq) : NULL;
+
+			if (desc && !irqd_is_level_type(&desc->irq_data))
+				irq_set_irqchip_state(apps_irq,
+						IRQCHIP_STATE_PENDING, true);
+
+			k = find_next_bit(&pending, 32, k + 1);
+		}
+	}
+}
+static void msm_mpm_sys_low_power_modes(bool allow)
+{
+	static DEFINE_MUTEX(enable_xo_mutex);
+
+	mutex_lock(&enable_xo_mutex);
+	if (allow) {
+		if (xo_enabled) {
+			clk_disable_unprepare(xo_clk);
+			xo_enabled = false;
+		}
+	} else {
+		if (!xo_enabled) {
+			/* If we cannot enable XO clock then we want to flag it,
+			 * than having to deal with not being able to wakeup
+			 * from a non-monitorable interrupt
+			 */
+			BUG_ON(clk_prepare_enable(xo_clk));
+			xo_enabled = true;
+		}
+	}
+	mutex_unlock(&enable_xo_mutex);
+}
+
+void msm_mpm_suspend_prepare(void)
+{
+	bool allow;
+	unsigned long flags;
+
+	spin_lock_irqsave(&msm_mpm_lock, flags);
+
+	allow = msm_mpm_irqs_detectable(false) &&
+		msm_mpm_gpio_irqs_detectable(false);
+	msm_mpm_in_suspend = true;
+
+	spin_unlock_irqrestore(&msm_mpm_lock, flags);
+	msm_mpm_sys_low_power_modes(allow);
+}
+EXPORT_SYMBOL(msm_mpm_suspend_prepare);
+
+void msm_mpm_suspend_wake(void)
+{
+	bool allow;
+	unsigned long flags;
+
+	spin_lock_irqsave(&msm_mpm_lock, flags);
+
+	allow = msm_mpm_irqs_detectable(true) &&
+		msm_mpm_gpio_irqs_detectable(true);
+
+	spin_unlock_irqrestore(&msm_mpm_lock, flags);
+	msm_mpm_sys_low_power_modes(allow);
+	msm_mpm_in_suspend = false;
+}
+EXPORT_SYMBOL(msm_mpm_suspend_wake);
+
+static void msm_mpm_work_fn(struct work_struct *work)
+{
+	unsigned long flags;
+	while (1) {
+		bool allow;
+		if (wait_for_completion_interruptible(&wake_wq))
+			continue;
+		spin_lock_irqsave(&msm_mpm_lock, flags);
+		allow = msm_mpm_irqs_detectable(true) &&
+				msm_mpm_gpio_irqs_detectable(true);
+		if (msm_mpm_in_suspend) {
+			spin_unlock_irqrestore(&msm_mpm_lock, flags);
+			continue;
+		}
+
+		spin_unlock_irqrestore(&msm_mpm_lock, flags);
+		msm_mpm_sys_low_power_modes(allow);
+	}
+}
+
+static int msm_mpm_dev_probe(struct platform_device *pdev)
+{
+	struct resource *res = NULL;
+	int offset, ret;
+	struct msm_mpm_device_data *dev = &msm_mpm_dev_data;
+	const char *clk_name;
+	char *key;
+
+	if (msm_mpm_initialized & MSM_MPM_DEVICE_PROBED) {
+		pr_warn("MPM device probed multiple times\n");
+		return 0;
+	}
+
+	key = "clock-names";
+	ret = of_property_read_string(pdev->dev.of_node, key, &clk_name);
+	if (ret) {
+		pr_err("%s(): Cannot read clock name%s\n", __func__, key);
+		return -EINVAL;
+	}
+
+	xo_clk = clk_get(&pdev->dev, clk_name);
+
+	if (IS_ERR(xo_clk)) {
+		pr_err("%s(): Cannot get clk resource for XO: %ld\n", __func__,
+				PTR_ERR(xo_clk));
+		return PTR_ERR(xo_clk);
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vmpm");
+	if (!res) {
+		pr_err("%s(): Missing RPM memory resource\n", __func__);
+		return -EINVAL;
+	}
+
+	dev->mpm_request_reg_base = devm_ioremap_resource(&pdev->dev, res);
+
+	if (!dev->mpm_request_reg_base) {
+		pr_err("%s(): Unable to iomap\n", __func__);
+		return -EADDRNOTAVAIL;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ipc");
+	if (!res) {
+		pr_err("%s(): Missing GCC memory resource\n", __func__);
+		return -EINVAL;
+	}
+
+	dev->mpm_apps_ipc_reg = devm_ioremap(&pdev->dev, res->start,
+					resource_size(res));
+	if (!dev->mpm_apps_ipc_reg) {
+		pr_err("%s(): Unable to iomap IPC register\n", __func__);
+		return -EADDRNOTAVAIL;
+	}
+
+	if (of_property_read_u32(pdev->dev.of_node,
+				"qcom,ipc-bit-offset", &offset)) {
+		pr_info("%s(): Cannot read ipc bit offset\n", __func__);
+		return -EINVAL;
+	}
+
+	dev->mpm_apps_ipc_val = (1 << offset);
+
+	dev->mpm_ipc_irq = platform_get_irq(pdev, 0);
+
+	if (dev->mpm_ipc_irq == -ENXIO) {
+		pr_info("%s(): Cannot find IRQ resource\n", __func__);
+		return -ENXIO;
+	}
+	ret = devm_request_irq(&pdev->dev, dev->mpm_ipc_irq, msm_mpm_irq,
+			IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND, pdev->name,
+			msm_mpm_irq);
+
+	if (ret) {
+		pr_info("%s(): request_irq failed errno: %d\n", __func__, ret);
+		return ret;
+	}
+	ret = irq_set_irq_wake(dev->mpm_ipc_irq, 1);
+
+	if (ret) {
+		pr_err("%s: failed to set wakeup irq %u: %d\n",
+			__func__, dev->mpm_ipc_irq, ret);
+		return ret;
+
+	}
+
+	init_completion(&wake_wq);
+
+	INIT_WORK(&msm_mpm_work, msm_mpm_work_fn);
+	msm_mpm_wq = create_singlethread_workqueue("mpm");
+
+	if (msm_mpm_wq)
+		queue_work(msm_mpm_wq, &msm_mpm_work);
+	else  {
+		pr_warn("%s(): Failed to create wq. So voting against XO off",
+				__func__);
+		/* Throw a BUG. Otherwise, its possible that system allows
+		 * XO shutdown when there are non-monitored interrupts are
+		 * pending and cause errors at a later point in time.
+		 */
+		BUG_ON(clk_prepare_enable(xo_clk));
+		xo_enabled = true;
+	}
+
+	msm_mpm_initialized |= MSM_MPM_DEVICE_PROBED;
+	return 0;
+}
+
+static inline int __init mpm_irq_domain_size(struct irq_domain *d)
+{
+	return d->revmap_size ?: MAX_IRQ;
+}
+
+static const struct mpm_of mpm_of_map[MSM_MPM_NR_IRQ_DOMAINS] = {
+	{
+		"qcom,gic-parent",
+		"qcom,gic-map",
+		"gic",
+		&gic_arch_extn,
+		mpm_irq_domain_size,
+	},
+	{
+		"qcom,gpio-parent",
+		"qcom,gpio-map",
+		"gpio",
+#if (defined(CONFIG_USE_PINCTRL_IRQ) && defined(CONFIG_PINCTRL_MSM_TLMM))
+		&mpm_tlmm_irq_extn,
+#elif defined(CONFIG_GPIO_MSM_V3)
+		&msm_gpio_irq_extn,
+#else
+		&mpm_pinctrl_extn,
+#endif
+		mpm_irq_domain_size,
+	},
+};
+
+static void freeup_memory(void)
+{
+	int i;
+
+	for (i = 0; i < MSM_MPM_NR_IRQ_DOMAINS; i++) {
+		if (mpm_of_map[i].chip) {
+			mpm_of_map[i].chip->irq_mask = NULL;
+			mpm_of_map[i].chip->irq_unmask = NULL;
+			mpm_of_map[i].chip->irq_disable = NULL;
+			mpm_of_map[i].chip->irq_set_type = NULL;
+			mpm_of_map[i].chip->irq_set_wake = NULL;
+		}
+		kfree(unlisted_irqs[i].enabled_irqs);
+		kfree(unlisted_irqs[i].wakeup_irqs);
+	}
+
+	kfree(irq_hash);
+	kfree(msm_mpm_irqs_m2a);
+	kfree(msm_mpm_enabled_irq);
+	kfree(msm_mpm_wake_irq);
+	kfree(msm_mpm_falling_edge);
+	kfree(msm_mpm_rising_edge);
+	kfree(msm_mpm_polarity);
+}
+
+static int mpm_init_irq_domain(struct device_node *node, int irq_domain)
+{
+	int i = irq_domain;
+	struct device_node *parent = NULL;
+	struct mpm_irqs_a2m *mpm_node = NULL;
+	struct irq_domain *domain = NULL;
+	int size;
+	const __be32 *list;
+
+	/* Check if mapping is already done for this irq domain */
+	if (msm_mpm_initialized & MSM_MPM_IRQ_DOMAIN_MASK(irq_domain))
+		return 0;
+
+	parent = of_parse_phandle(node, mpm_of_map[i].pkey, 0);
+
+	if (!parent) {
+		pr_warn("%s(): %s Not found\n", __func__,
+				mpm_of_map[i].pkey);
+		return -ENODEV;
+	}
+
+	domain = irq_find_host(parent);
+
+	if (!domain) {
+		pr_warn("%s(): Cannot find irq controller for %s\n",
+				__func__, mpm_of_map[i].pkey);
+		return -EPROBE_DEFER;
+	}
+
+	size = mpm_of_map[i].get_max_irqs(domain);
+	unlisted_irqs[i].size = size;
+	memcpy(unlisted_irqs[i].domain_name, mpm_of_map[i].name,
+			MAX_DOMAIN_NAME);
+
+	unlisted_irqs[i].enabled_irqs =
+		kzalloc(BITS_TO_LONGS(size) * sizeof(unsigned long),
+				GFP_KERNEL);
+
+	if (!unlisted_irqs[i].enabled_irqs)
+		goto failed_malloc;
+
+	unlisted_irqs[i].wakeup_irqs =
+		kzalloc(BITS_TO_LONGS(size) * sizeof(unsigned long),
+				GFP_KERNEL);
+
+	if (!unlisted_irqs[i].wakeup_irqs)
+		goto failed_malloc;
+
+	unlisted_irqs[i].domain = domain;
+
+	list = of_get_property(node, mpm_of_map[i].map, &size);
+
+	if (!list || !size) {
+		__WARN();
+		return -ENODEV;
+	}
+
+	/*
+	 * Size is in bytes. Convert to size of uint32_t
+	 */
+	size /= sizeof(*list);
+
+	/*
+	 * The data is represented by a tuple mapping hwirq to a MPM
+	 * pin. The number of mappings in the device tree would be
+	 * size/2
+	 */
+	mpm_node = kzalloc(sizeof(struct mpm_irqs_a2m) * size / 2,
+			GFP_KERNEL);
+	if (!mpm_node)
+		goto failed_malloc;
+
+	while (size) {
+		unsigned long pin = be32_to_cpup(list++);
+		irq_hw_number_t hwirq = be32_to_cpup(list++);
+
+		mpm_node->pin = pin;
+		mpm_node->hwirq = hwirq;
+		mpm_node->parent = parent;
+		mpm_node->domain = domain;
+		INIT_HLIST_NODE(&mpm_node->node);
+
+		hlist_add_head(&mpm_node->node,
+				&irq_hash[hashfn(mpm_node->hwirq)]);
+		size -= 2;
+		mpm_node++;
+	}
+
+	if (mpm_of_map[i].chip) {
+		mpm_of_map[i].chip->irq_mask = msm_mpm_disable_irq;
+		mpm_of_map[i].chip->irq_unmask = msm_mpm_enable_irq;
+		mpm_of_map[i].chip->irq_disable = msm_mpm_disable_irq;
+		mpm_of_map[i].chip->irq_set_type = msm_mpm_set_irq_type;
+		mpm_of_map[i].chip->irq_set_wake = msm_mpm_set_irq_wake;
+	}
+
+	msm_mpm_initialized |= MSM_MPM_IRQ_DOMAIN_MASK(irq_domain);
+
+	return 0;
+failed_malloc:
+
+	freeup_memory();
+	return -ENODEV;
+}
+
+static void __of_mpm_init(struct device_node *node)
+{
+	int i;
+
+	if (msm_mpm_initialized & (MSM_MPM_GIC_IRQ_MAPPING_DONE |
+				MSM_MPM_GPIO_IRQ_MAPPING_DONE)) {
+		pr_warn("%s(): MPM driver mapping exists\n", __func__);
+		return;
+	}
+
+	/*
+	 * Assumes a default value of 64 MPM interrupts if the DT property
+	 * num_mpm_irqs is not defined. The MPM driver assumes writing to 32
+	 * bit words for configuring MPM registers. Ensure the num_mpm_irqs is
+	 * a multiple of 32
+	 */
+	of_property_read_u32(node, "qcom,num-mpm-irqs", &num_mpm_irqs);
+
+	irq_hash = kzalloc(num_mpm_irqs * sizeof(*irq_hash), GFP_KERNEL);
+	if (!irq_hash)
+		goto failed_malloc;
+
+	msm_mpm_irqs_m2a = kzalloc(num_mpm_irqs * sizeof(*msm_mpm_irqs_m2a),
+				GFP_KERNEL);
+	if (!msm_mpm_irqs_m2a)
+		goto failed_malloc;
+
+	msm_mpm_enabled_irq = kzalloc(MSM_MPM_REG_WIDTH * sizeof(uint32_t),
+				GFP_KERNEL);
+	if (!msm_mpm_enabled_irq)
+		goto failed_malloc;
+	msm_mpm_wake_irq = kzalloc(MSM_MPM_REG_WIDTH * sizeof(uint32_t),
+				GFP_KERNEL);
+	if (!msm_mpm_wake_irq)
+		goto failed_malloc;
+
+	msm_mpm_falling_edge = kzalloc(MSM_MPM_REG_WIDTH * sizeof(uint32_t),
+				GFP_KERNEL);
+	if (!msm_mpm_falling_edge)
+		goto failed_malloc;
+
+	msm_mpm_rising_edge = kzalloc(MSM_MPM_REG_WIDTH * sizeof(uint32_t),
+				GFP_KERNEL);
+	if (!msm_mpm_rising_edge)
+		goto failed_malloc;
+
+	msm_mpm_polarity = kzalloc(MSM_MPM_REG_WIDTH * sizeof(uint32_t),
+				GFP_KERNEL);
+	if (!msm_mpm_polarity)
+		goto failed_malloc;
+
+	for (i = 0; i < num_mpm_irqs; i++)
+		INIT_HLIST_HEAD(&irq_hash[i]);
+
+	return;
+failed_malloc:
+	freeup_memory();
+}
+
+
+
+static struct of_device_id msm_mpm_match_table[] = {
+	{.compatible = "qcom,mpm-v2"},
+	{},
+};
+
+static struct platform_driver msm_mpm_dev_driver = {
+	.probe = msm_mpm_dev_probe,
+	.driver = {
+		.name = "mpm-v2",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_mpm_match_table,
+	},
+};
+
+int __init msm_mpm_device_init(void)
+{
+	return platform_driver_register(&msm_mpm_dev_driver);
+}
+arch_initcall(msm_mpm_device_init);
+
+void of_mpm_init(void)
+{
+	struct device_node *node;
+	int i;
+	int ret;
+
+	node = of_find_matching_node(NULL, msm_mpm_match_table);
+	WARN_ON(!node);
+	if (node) {
+		__of_mpm_init(node);
+		for (i = 0; i < MSM_MPM_NR_IRQ_DOMAINS; i++) {
+			ret = mpm_init_irq_domain(node, i);
+			if (ret)
+				pr_err("MPM %d irq mapping errored %d\n", i,
+						ret);
+		}
+	}
+}
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./Makefile linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/Makefile
--- linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/Makefile	2019-01-22 16:16:26.659274986 +0100
@@ -0,0 +1,13 @@
+#
+# Makefile for msm-bus driver specific files
+#
+obj-y +=  msm_bus_core.o msm_bus_client_api.o
+obj-$(CONFIG_OF) += msm_bus_of.o
+obj-$(CONFIG_MSM_RPM_SMD) += msm_bus_rpm_smd.o
+obj-y += msm_bus_fabric_adhoc.o msm_bus_arb_adhoc.o msm_bus_rules.o \
+	msm_bus_bimc_adhoc.o msm_bus_noc_adhoc.o
+obj-$(CONFIG_OF) += msm_bus_of_adhoc.o
+obj-$(CONFIG_DEBUG_BUS_VOTER) += msm_bus_dbg_voter.o
+obj-$(CONFIG_CORESIGHT) +=  msm_buspm_coresight_adhoc.o
+
+obj-$(CONFIG_DEBUG_FS) += msm_bus_dbg.o
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./msm_bus_adhoc.h linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/msm_bus_adhoc.h
--- linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./msm_bus_adhoc.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/msm_bus_adhoc.h	2019-01-22 16:16:26.659274986 +0100
@@ -0,0 +1,177 @@
+/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_ADHOC_H
+#define _ARCH_ARM_MACH_MSM_BUS_ADHOC_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+#include <linux/msm_bus_rules.h>
+#include "msm_bus_core.h"
+
+struct msm_bus_node_device_type;
+struct link_node {
+	uint64_t lnode_ib[NUM_CTX];
+	uint64_t lnode_ab[NUM_CTX];
+	int next;
+	struct device *next_dev;
+	struct list_head link;
+	uint32_t in_use;
+	const char *cl_name;
+};
+
+/* New types introduced for adhoc topology */
+struct msm_bus_noc_ops {
+	int (*qos_init)(struct msm_bus_node_device_type *dev,
+			void __iomem *qos_base, uint32_t qos_off,
+			uint32_t qos_delta, uint32_t qos_freq);
+	int (*set_bw)(struct msm_bus_node_device_type *dev,
+			void __iomem *qos_base, uint32_t qos_off,
+			uint32_t qos_delta, uint32_t qos_freq);
+	int (*limit_mport)(struct msm_bus_node_device_type *dev,
+			void __iomem *qos_base, uint32_t qos_off,
+			uint32_t qos_delta, uint32_t qos_freq, int enable_lim,
+			uint64_t lim_bw);
+	bool (*update_bw_reg)(int mode);
+};
+
+struct nodebw {
+	uint64_t sum_ab;
+	uint64_t last_sum_ab;
+	uint64_t max_ib;
+	uint64_t max_ab;
+	uint64_t cur_clk_hz;
+	uint32_t util_used;
+	uint32_t vrail_used;
+	const char *max_ab_cl_name;
+	const char *max_ib_cl_name;
+};
+
+struct msm_bus_fab_device_type {
+	void __iomem *qos_base;
+	phys_addr_t pqos_base;
+	size_t qos_range;
+	uint32_t base_offset;
+	uint32_t qos_freq;
+	uint32_t qos_off;
+	struct msm_bus_noc_ops noc_ops;
+	enum msm_bus_hw_sel bus_type;
+	bool bypass_qos_prg;
+};
+
+struct qos_params_type {
+	int mode;
+	unsigned int prio_lvl;
+	unsigned int prio_rd;
+	unsigned int prio_wr;
+	unsigned int prio1;
+	unsigned int prio0;
+	unsigned int reg_prio1;
+	unsigned int reg_prio0;
+	unsigned int gp;
+	unsigned int thmp;
+	unsigned int ws;
+	u64 bw_buffer;
+};
+
+struct node_util_levels_type {
+	uint64_t threshold;
+	uint32_t util_fact;
+};
+
+struct node_agg_params_type {
+	uint32_t agg_scheme;
+	uint32_t num_aggports;
+	unsigned int buswidth;
+	uint32_t vrail_comp;
+	uint32_t num_util_levels;
+	struct node_util_levels_type *util_levels;
+};
+
+struct msm_bus_node_info_type {
+	const char *name;
+	unsigned int id;
+	int mas_rpm_id;
+	int slv_rpm_id;
+	int num_ports;
+	int num_qports;
+	int *qport;
+	struct qos_params_type qos_params;
+	unsigned int num_connections;
+	unsigned int num_blist;
+	bool is_fab_dev;
+	bool virt_dev;
+	bool is_traversed;
+	unsigned int *connections;
+	unsigned int *black_listed_connections;
+	struct device **dev_connections;
+	struct device **black_connections;
+	unsigned int bus_device_id;
+	struct device *bus_device;
+	struct rule_update_path_info rule;
+	uint64_t lim_bw;
+	bool defer_qos;
+	struct node_agg_params_type agg_params;
+};
+
+struct msm_bus_node_device_type {
+	struct msm_bus_node_info_type *node_info;
+	struct msm_bus_fab_device_type *fabdev;
+	int num_lnodes;
+	struct link_node *lnode_list;
+	struct nodebw node_bw[NUM_CTX];
+	struct list_head link;
+	unsigned int ap_owned;
+	struct nodeclk clk[NUM_CTX];
+	struct nodeclk bus_qos_clk;
+	uint32_t num_node_qos_clks;
+	struct nodeclk *node_qos_clks;
+	struct device_node *of_node;
+	struct device dev;
+	bool dirty;
+	struct list_head dev_link;
+	struct list_head devlist;
+};
+
+static inline struct msm_bus_node_device_type *to_msm_bus_node(struct device *d)
+{
+	return container_of(d, struct msm_bus_node_device_type, dev);
+}
+
+
+int msm_bus_enable_limiter(struct msm_bus_node_device_type *nodedev,
+				int throttle_en, uint64_t lim_bw);
+int msm_bus_commit_data(struct list_head *clist);
+void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size,
+					size_t new_size, gfp_t flags);
+
+extern struct msm_bus_device_node_registration
+	*msm_bus_of_to_pdata(struct platform_device *pdev);
+extern void msm_bus_arb_setops_adhoc(struct msm_bus_arb_ops *arb_ops);
+extern int msm_bus_bimc_set_ops(struct msm_bus_node_device_type *bus_dev);
+extern int msm_bus_noc_set_ops(struct msm_bus_node_device_type *bus_dev);
+extern int msm_bus_of_get_static_rules(struct platform_device *pdev,
+					struct bus_rule_type **static_rule);
+extern int msm_rules_update_path(struct list_head *input_list,
+				struct list_head *output_list);
+extern void print_all_rules(void);
+#ifdef CONFIG_DEBUG_BUS_VOTER
+int msm_bus_floor_init(struct device *dev);
+#else
+static inline int msm_bus_floor_init(struct device *dev)
+{
+	return 0;
+}
+#endif /* CONFIG_DBG_BUS_VOTER */
+#endif /* _ARCH_ARM_MACH_MSM_BUS_ADHOC_H */
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./msm_bus_arb_adhoc.c linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/msm_bus_arb_adhoc.c
--- linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./msm_bus_arb_adhoc.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/msm_bus_arb_adhoc.c	2019-01-22 16:16:26.659274986 +0100
@@ -0,0 +1,1444 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is Mree software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/rtmutex.h>
+#include <linux/clk.h>
+#include <linux/msm-bus.h>
+#include "msm_bus_core.h"
+#include "msm_bus_adhoc.h"
+#include <trace/events/trace_msm_bus.h>
+
+#define NUM_CL_HANDLES	50
+#define NUM_LNODES	3
+#define MAX_STR_CL	50
+
+struct bus_search_type {
+	struct list_head link;
+	struct list_head node_list;
+};
+
+struct handle_type {
+	int num_entries;
+	struct msm_bus_client **cl_list;
+};
+
+static struct handle_type handle_list;
+static LIST_HEAD(input_list);
+static LIST_HEAD(apply_list);
+static LIST_HEAD(commit_list);
+
+DEFINE_RT_MUTEX(msm_bus_adhoc_lock);
+
+static bool chk_bl_list(struct list_head *black_list, unsigned int id)
+{
+	struct msm_bus_node_device_type *bus_node = NULL;
+
+	list_for_each_entry(bus_node, black_list, link) {
+		if (bus_node->node_info->id == id)
+			return true;
+	}
+	return false;
+}
+
+static void copy_remaining_nodes(struct list_head *edge_list, struct list_head
+	*traverse_list, struct list_head *route_list)
+{
+	struct bus_search_type *search_node;
+
+	if (list_empty(edge_list) && list_empty(traverse_list))
+		return;
+
+	search_node = kzalloc(sizeof(struct bus_search_type), GFP_KERNEL);
+	INIT_LIST_HEAD(&search_node->node_list);
+	list_splice_init(edge_list, traverse_list);
+	list_splice_init(traverse_list, &search_node->node_list);
+	list_add_tail(&search_node->link, route_list);
+}
+
+/*
+ * Duplicate instantiaion from msm_bus_arb.c. Todo there needs to be a
+ * "util" file for these common func/macros.
+ *
+ * */
+uint64_t msm_bus_div64(unsigned int w, uint64_t bw)
+{
+	uint64_t *b = &bw;
+
+	if ((bw > 0) && (bw < w))
+		return 1;
+
+	switch (w) {
+	case 0:
+		WARN(1, "AXI: Divide by 0 attempted\n");
+	case 1: return bw;
+	case 2: return (bw >> 1);
+	case 4: return (bw >> 2);
+	case 8: return (bw >> 3);
+	case 16: return (bw >> 4);
+	case 32: return (bw >> 5);
+	}
+
+	do_div(*b, w);
+	return *b;
+}
+
+int msm_bus_device_match_adhoc(struct device *dev, void *id)
+{
+	int ret = 0;
+	struct msm_bus_node_device_type *bnode = to_msm_bus_node(dev);
+
+	if (bnode)
+		ret = (bnode->node_info->id == *(unsigned int *)id);
+	else
+		ret = 0;
+
+	return ret;
+}
+
+static int gen_lnode(struct device *dev,
+			int next_hop, int prev_idx, const char *cl_name)
+{
+	struct link_node *lnode;
+	struct msm_bus_node_device_type *cur_dev = NULL;
+	int lnode_idx = -1;
+
+	if (!dev)
+		goto exit_gen_lnode;
+
+	cur_dev = to_msm_bus_node(dev);
+	if (!cur_dev) {
+		MSM_BUS_ERR("%s: Null device ptr", __func__);
+		goto exit_gen_lnode;
+	}
+
+	if (!cur_dev->num_lnodes) {
+		cur_dev->lnode_list = devm_kzalloc(dev,
+				sizeof(struct link_node) * NUM_LNODES,
+								GFP_KERNEL);
+		if (!cur_dev->lnode_list)
+			goto exit_gen_lnode;
+
+		lnode = cur_dev->lnode_list;
+		cur_dev->num_lnodes = NUM_LNODES;
+		lnode_idx = 0;
+	} else {
+		int i;
+		for (i = 0; i < cur_dev->num_lnodes; i++) {
+			if (!cur_dev->lnode_list[i].in_use)
+				break;
+		}
+
+		if (i < cur_dev->num_lnodes) {
+			lnode = &cur_dev->lnode_list[i];
+			lnode_idx = i;
+		} else {
+			struct link_node *realloc_list;
+			size_t cur_size = sizeof(struct link_node) *
+					cur_dev->num_lnodes;
+
+			cur_dev->num_lnodes += NUM_LNODES;
+			realloc_list = msm_bus_realloc_devmem(
+					dev,
+					cur_dev->lnode_list,
+					cur_size,
+					sizeof(struct link_node) *
+					cur_dev->num_lnodes, GFP_KERNEL);
+
+			if (!realloc_list)
+				goto exit_gen_lnode;
+
+			cur_dev->lnode_list = realloc_list;
+			lnode = &cur_dev->lnode_list[i];
+			lnode_idx = i;
+		}
+	}
+
+	lnode->in_use = 1;
+	lnode->cl_name = cl_name;
+	if (next_hop == cur_dev->node_info->id) {
+		lnode->next = -1;
+		lnode->next_dev = NULL;
+	} else {
+		lnode->next = prev_idx;
+		lnode->next_dev = bus_find_device(&msm_bus_type, NULL,
+					(void *) &next_hop,
+					msm_bus_device_match_adhoc);
+	}
+
+	memset(lnode->lnode_ib, 0, sizeof(uint64_t) * NUM_CTX);
+	memset(lnode->lnode_ab, 0, sizeof(uint64_t) * NUM_CTX);
+
+exit_gen_lnode:
+	return lnode_idx;
+}
+
+static int remove_lnode(struct msm_bus_node_device_type *cur_dev,
+				int lnode_idx)
+{
+	int ret = 0;
+
+	if (!cur_dev) {
+		MSM_BUS_ERR("%s: Null device ptr", __func__);
+		ret = -ENODEV;
+		goto exit_remove_lnode;
+	}
+
+	if (lnode_idx != -1) {
+		if (!cur_dev->num_lnodes ||
+				(lnode_idx > (cur_dev->num_lnodes - 1))) {
+			MSM_BUS_ERR("%s: Invalid Idx %d, num_lnodes %d",
+				__func__, lnode_idx, cur_dev->num_lnodes);
+			ret = -ENODEV;
+			goto exit_remove_lnode;
+		}
+
+		cur_dev->lnode_list[lnode_idx].next = -1;
+		cur_dev->lnode_list[lnode_idx].next_dev = NULL;
+		cur_dev->lnode_list[lnode_idx].in_use = 0;
+		cur_dev->lnode_list[lnode_idx].cl_name = NULL;
+	}
+
+exit_remove_lnode:
+	return ret;
+}
+
+static int prune_path(struct list_head *route_list, int dest, int src,
+				struct list_head *black_list, int found,
+				const char *cl_name)
+{
+	struct bus_search_type *search_node, *temp_search_node;
+	struct msm_bus_node_device_type *bus_node;
+	struct list_head *bl_list;
+	struct list_head *temp_bl_list;
+	int search_dev_id = dest;
+	struct device *dest_dev = bus_find_device(&msm_bus_type, NULL,
+					(void *) &dest,
+					msm_bus_device_match_adhoc);
+	int lnode_hop = -1;
+
+	if (!found)
+		goto reset_links;
+
+	if (!dest_dev) {
+		MSM_BUS_ERR("%s: Can't find dest dev %d", __func__, dest);
+		goto exit_prune_path;
+	}
+
+	lnode_hop = gen_lnode(dest_dev, search_dev_id, lnode_hop, cl_name);
+
+	list_for_each_entry_reverse(search_node, route_list, link) {
+		list_for_each_entry(bus_node, &search_node->node_list, link) {
+			unsigned int i;
+			for (i = 0; i < bus_node->node_info->num_connections;
+									i++) {
+				if (bus_node->node_info->connections[i] ==
+								search_dev_id) {
+					dest_dev = bus_find_device(
+						&msm_bus_type,
+						NULL,
+						(void *)
+						&bus_node->node_info->
+						id,
+						msm_bus_device_match_adhoc);
+
+					if (!dest_dev) {
+						lnode_hop = -1;
+						goto reset_links;
+					}
+
+					lnode_hop = gen_lnode(dest_dev,
+							search_dev_id,
+							lnode_hop, cl_name);
+					search_dev_id =
+						bus_node->node_info->id;
+					break;
+				}
+			}
+		}
+	}
+reset_links:
+	list_for_each_entry_safe(search_node, temp_search_node, route_list,
+									link) {
+			list_for_each_entry(bus_node, &search_node->node_list,
+									link)
+				bus_node->node_info->is_traversed = false;
+
+			list_del(&search_node->link);
+			kfree(search_node);
+	}
+
+	list_for_each_safe(bl_list, temp_bl_list, black_list)
+		list_del(bl_list);
+
+exit_prune_path:
+	return lnode_hop;
+}
+
+static void setup_bl_list(struct msm_bus_node_device_type *node,
+				struct list_head *black_list)
+{
+	unsigned int i;
+
+	for (i = 0; i < node->node_info->num_blist; i++) {
+		struct msm_bus_node_device_type *bdev;
+		bdev = to_msm_bus_node(node->node_info->black_connections[i]);
+		list_add_tail(&bdev->link, black_list);
+	}
+}
+
+static int getpath(struct device *src_dev, int dest, const char *cl_name)
+{
+	struct list_head traverse_list;
+	struct list_head edge_list;
+	struct list_head route_list;
+	struct list_head black_list;
+	struct msm_bus_node_device_type *src_node;
+	struct bus_search_type *search_node;
+	int found = 0;
+	int depth_index = 0;
+	int first_hop = -1;
+	int src;
+
+	INIT_LIST_HEAD(&traverse_list);
+	INIT_LIST_HEAD(&edge_list);
+	INIT_LIST_HEAD(&route_list);
+	INIT_LIST_HEAD(&black_list);
+
+	if (!src_dev) {
+		MSM_BUS_ERR("%s: Cannot locate src dev ", __func__);
+		goto exit_getpath;
+	}
+
+	src_node = to_msm_bus_node(src_dev);
+	if (!src_node) {
+		MSM_BUS_ERR("%s:Fatal, Source node not found", __func__);
+		goto exit_getpath;
+	}
+	src = src_node->node_info->id;
+	list_add_tail(&src_node->link, &traverse_list);
+
+	while ((!found && !list_empty(&traverse_list))) {
+		struct msm_bus_node_device_type *bus_node = NULL;
+		/* Locate dest_id in the traverse list */
+		list_for_each_entry(bus_node, &traverse_list, link) {
+			if (bus_node->node_info->id == dest) {
+				found = 1;
+				break;
+			}
+		}
+
+		if (!found) {
+			unsigned int i;
+			/* Setup the new edge list */
+			list_for_each_entry(bus_node, &traverse_list, link) {
+				/* Setup list of black-listed nodes */
+				setup_bl_list(bus_node, &black_list);
+
+				for (i = 0; i < bus_node->node_info->
+						num_connections; i++) {
+					bool skip;
+					struct msm_bus_node_device_type
+							*node_conn;
+					node_conn =
+					to_msm_bus_node(bus_node->node_info->
+						dev_connections[i]);
+					if (node_conn->node_info->
+							is_traversed) {
+						MSM_BUS_ERR("Circ Path %d\n",
+						node_conn->node_info->id);
+						goto reset_traversed;
+					}
+					skip = chk_bl_list(&black_list,
+							bus_node->node_info->
+							connections[i]);
+					if (!skip) {
+						list_add_tail(&node_conn->link,
+							&edge_list);
+						node_conn->node_info->
+							is_traversed = true;
+					}
+				}
+			}
+
+			/* Keep tabs of the previous search list */
+			search_node = kzalloc(sizeof(struct bus_search_type),
+					 GFP_KERNEL);
+			INIT_LIST_HEAD(&search_node->node_list);
+			list_splice_init(&traverse_list,
+					 &search_node->node_list);
+			/* Add the previous search list to a route list */
+			list_add_tail(&search_node->link, &route_list);
+			/* Advancing the list depth */
+			depth_index++;
+			list_splice_init(&edge_list, &traverse_list);
+		}
+	}
+reset_traversed:
+	copy_remaining_nodes(&edge_list, &traverse_list, &route_list);
+	first_hop = prune_path(&route_list, dest, src, &black_list, found,
+								cl_name);
+
+exit_getpath:
+	return first_hop;
+}
+
+static uint64_t scheme1_agg_scheme(struct msm_bus_node_device_type *bus_dev,
+			struct msm_bus_node_device_type *fab_dev, int ctx)
+{
+	uint64_t max_ib;
+	uint64_t sum_ab;
+	uint64_t bw_max_hz;
+	uint32_t util_fact = 0;
+	uint32_t vrail_comp = 0;
+	struct node_util_levels_type *utils;
+	int i;
+	int num_util_levels;
+
+	/*
+	 *  Account for Util factor and vrail comp.
+	 *  Util factor is picked according to the current sum(AB) for this
+	 *  node and for this context.
+	 *  Vrail comp is fixed for the entire performance range.
+	 *  They default to 100 if absent.
+	 *
+	 *  The aggregated clock is computed as:
+	 *  Freq_hz = max((sum(ab) * util_fact)/num_chan, max(ib)/vrail_comp)
+	 *				/ bus-width
+	 */
+	if (bus_dev->node_info->agg_params.num_util_levels) {
+		utils = bus_dev->node_info->agg_params.util_levels;
+		num_util_levels =
+			bus_dev->node_info->agg_params.num_util_levels;
+	} else {
+		utils = fab_dev->node_info->agg_params.util_levels;
+		num_util_levels =
+			fab_dev->node_info->agg_params.num_util_levels;
+	}
+
+	sum_ab = bus_dev->node_bw[ctx].sum_ab;
+	max_ib = bus_dev->node_bw[ctx].max_ib;
+
+	for (i = 0; i < num_util_levels; i++) {
+		if (sum_ab < utils[i].threshold) {
+			util_fact = utils[i].util_fact;
+			break;
+		}
+	}
+	if (i == num_util_levels)
+		util_fact = utils[(num_util_levels - 1)].util_fact;
+
+	vrail_comp = bus_dev->node_info->agg_params.vrail_comp ?
+			bus_dev->node_info->agg_params.vrail_comp :
+			fab_dev->node_info->agg_params.vrail_comp;
+
+	bus_dev->node_bw[ctx].vrail_used = vrail_comp;
+	bus_dev->node_bw[ctx].util_used = util_fact;
+
+	if (util_fact && (util_fact != 100)) {
+		sum_ab *= util_fact;
+		sum_ab = msm_bus_div64(100, sum_ab);
+	}
+
+	if (vrail_comp && (vrail_comp != 100)) {
+		max_ib *= 100;
+		max_ib = msm_bus_div64(vrail_comp, max_ib);
+	}
+
+	/* Account for multiple channels if any */
+	if (bus_dev->node_info->agg_params.num_aggports > 1)
+		sum_ab = msm_bus_div64(
+				bus_dev->node_info->agg_params.num_aggports,
+					sum_ab);
+
+	if (!bus_dev->node_info->agg_params.buswidth) {
+		MSM_BUS_WARN("No bus width found for %d. Using default\n",
+					bus_dev->node_info->id);
+		bus_dev->node_info->agg_params.buswidth = 8;
+	}
+
+	bw_max_hz = max(max_ib, sum_ab);
+	bw_max_hz = msm_bus_div64(bus_dev->node_info->agg_params.buswidth,
+					bw_max_hz);
+
+	return bw_max_hz;
+}
+
+static uint64_t legacy_agg_scheme(struct msm_bus_node_device_type *bus_dev,
+			struct msm_bus_node_device_type *fab_dev, int ctx)
+{
+	uint64_t max_ib;
+	uint64_t sum_ab;
+	uint64_t bw_max_hz;
+	uint32_t util_fact = 0;
+	uint32_t vrail_comp = 0;
+
+	/*
+	 *  Util_fact and vrail comp are obtained from fabric/Node's dts
+	 *  properties and are fixed for the entire performance range.
+	 *  They default to 100 if absent.
+	 *
+	 *  The clock frequency is computed as:
+	 *  Freq_hz = max((sum(ab) * util_fact)/num_chan, max(ib)/vrail_comp)
+	 *				/ bus-width
+	 */
+	util_fact = fab_dev->node_info->agg_params.util_levels[0].util_fact;
+	vrail_comp = fab_dev->node_info->agg_params.vrail_comp;
+
+	if (bus_dev->node_info->agg_params.num_util_levels)
+		util_fact =
+		bus_dev->node_info->agg_params.util_levels[0].util_fact ?
+		bus_dev->node_info->agg_params.util_levels[0].util_fact :
+		util_fact;
+
+	vrail_comp = bus_dev->node_info->agg_params.vrail_comp ?
+			bus_dev->node_info->agg_params.vrail_comp :
+			vrail_comp;
+
+	bus_dev->node_bw[ctx].vrail_used = vrail_comp;
+	bus_dev->node_bw[ctx].util_used = util_fact;
+	sum_ab = bus_dev->node_bw[ctx].sum_ab;
+	max_ib = bus_dev->node_bw[ctx].max_ib;
+
+	if (util_fact && (util_fact != 100)) {
+		sum_ab *= util_fact;
+		sum_ab = msm_bus_div64(100, sum_ab);
+	}
+
+	if (vrail_comp && (vrail_comp != 100)) {
+		max_ib *= 100;
+		max_ib = msm_bus_div64(vrail_comp, max_ib);
+	}
+
+	/* Account for multiple channels if any */
+	if (bus_dev->node_info->agg_params.num_aggports > 1)
+		sum_ab = msm_bus_div64(
+				bus_dev->node_info->agg_params.num_aggports,
+					sum_ab);
+
+	if (!bus_dev->node_info->agg_params.buswidth) {
+		MSM_BUS_WARN("No bus width found for %d. Using default\n",
+					bus_dev->node_info->id);
+		bus_dev->node_info->agg_params.buswidth = 8;
+	}
+
+	bw_max_hz = max(max_ib, sum_ab);
+	bw_max_hz = msm_bus_div64(bus_dev->node_info->agg_params.buswidth,
+					bw_max_hz);
+
+	return bw_max_hz;
+}
+
+static uint64_t aggregate_bus_req(struct msm_bus_node_device_type *bus_dev,
+									int ctx)
+{
+	uint64_t bw_hz = 0;
+	int i;
+	struct msm_bus_node_device_type *fab_dev = NULL;
+	uint32_t agg_scheme;
+	uint64_t max_ib = 0;
+	uint64_t max_ab = 0;
+	uint64_t sum_ab = 0;
+
+	if (!bus_dev || !to_msm_bus_node(bus_dev->node_info->bus_device)) {
+		MSM_BUS_ERR("Bus node pointer is Invalid");
+		goto exit_agg_bus_req;
+	}
+
+	bus_dev->node_bw[ctx].max_ib_cl_name = NULL;
+	bus_dev->node_bw[ctx].max_ab_cl_name = NULL;
+	fab_dev = to_msm_bus_node(bus_dev->node_info->bus_device);
+	for (i = 0; i < bus_dev->num_lnodes; i++) {
+		if (bus_dev->lnode_list[i].lnode_ib[ctx] > max_ib)
+			bus_dev->node_bw[ctx].max_ib_cl_name =
+					bus_dev->lnode_list[i].cl_name;
+		max_ib = max(max_ib, bus_dev->lnode_list[i].lnode_ib[ctx]);
+		if (bus_dev->lnode_list[i].lnode_ab[ctx] > max_ab) {
+			max_ab = bus_dev->lnode_list[i].lnode_ab[ctx];
+			bus_dev->node_bw[ctx].max_ab_cl_name =
+					bus_dev->lnode_list[i].cl_name;
+		}
+		sum_ab += bus_dev->lnode_list[i].lnode_ab[ctx];
+	}
+
+	bus_dev->node_bw[ctx].sum_ab = sum_ab;
+	bus_dev->node_bw[ctx].max_ib = max_ib;
+	bus_dev->node_bw[ctx].max_ab = max_ab;
+
+	if (bus_dev->node_info->agg_params.agg_scheme != AGG_SCHEME_NONE)
+		agg_scheme = bus_dev->node_info->agg_params.agg_scheme;
+	else
+		agg_scheme = fab_dev->node_info->agg_params.agg_scheme;
+
+	switch (agg_scheme) {
+	case AGG_SCHEME_1:
+		bw_hz = scheme1_agg_scheme(bus_dev, fab_dev, ctx);
+		break;
+	case AGG_SCHEME_LEG:
+		bw_hz = legacy_agg_scheme(bus_dev, fab_dev, ctx);
+		break;
+	}
+
+exit_agg_bus_req:
+	return bw_hz;
+}
+
+
+static void del_inp_list(struct list_head *list)
+{
+	struct rule_update_path_info *rule_node;
+	struct rule_update_path_info *rule_node_tmp;
+
+	list_for_each_entry_safe(rule_node, rule_node_tmp, list, link) {
+		list_del(&rule_node->link);
+		rule_node->added = false;
+	}
+}
+
+static void del_op_list(struct list_head *list)
+{
+	struct rule_apply_rcm_info *rule;
+	struct rule_apply_rcm_info *rule_tmp;
+
+	list_for_each_entry_safe(rule, rule_tmp, list, link)
+		list_del(&rule->link);
+}
+
+static int msm_bus_apply_rules(struct list_head *list, bool after_clk_commit)
+{
+	struct rule_apply_rcm_info *rule;
+	struct device *dev = NULL;
+	struct msm_bus_node_device_type *dev_info = NULL;
+	int ret = 0;
+
+	list_for_each_entry(rule, list, link) {
+		if (!rule)
+			continue;
+
+		if (rule && (rule->after_clk_commit != after_clk_commit))
+			continue;
+
+		dev = bus_find_device(&msm_bus_type, NULL,
+				(void *) &rule->id,
+				msm_bus_device_match_adhoc);
+
+		if (!dev) {
+			MSM_BUS_ERR("Can't find dev node for %d", rule->id);
+			continue;
+		}
+		dev_info = to_msm_bus_node(dev);
+
+		ret = msm_bus_enable_limiter(dev_info, rule->throttle,
+							rule->lim_bw);
+		if (ret)
+			MSM_BUS_ERR("Failed to set limiter for %d", rule->id);
+	}
+
+	return ret;
+}
+
+static void commit_data(void)
+{
+	bool rules_registered = msm_rule_are_rules_registered();
+
+	if (rules_registered) {
+		msm_rules_update_path(&input_list, &apply_list);
+		msm_bus_apply_rules(&apply_list, false);
+	}
+
+	msm_bus_commit_data(&commit_list);
+
+	if (rules_registered) {
+		msm_bus_apply_rules(&apply_list, true);
+		del_inp_list(&input_list);
+		del_op_list(&apply_list);
+	}
+	INIT_LIST_HEAD(&input_list);
+	INIT_LIST_HEAD(&apply_list);
+	INIT_LIST_HEAD(&commit_list);
+}
+
+static void add_node_to_clist(struct msm_bus_node_device_type *node)
+{
+	struct msm_bus_node_device_type *node_parent =
+			to_msm_bus_node(node->node_info->bus_device);
+
+	if (!node->dirty) {
+		list_add_tail(&node->link, &commit_list);
+		node->dirty = true;
+	}
+
+	if (!node_parent->dirty) {
+		list_add_tail(&node_parent->link, &commit_list);
+		node_parent->dirty = true;
+	}
+}
+
+static int update_path(struct device *src_dev, int dest, uint64_t act_req_ib,
+			uint64_t act_req_bw, uint64_t slp_req_ib,
+			uint64_t slp_req_bw, uint64_t cur_ib, uint64_t cur_bw,
+			int src_idx, int ctx)
+{
+	struct device *next_dev = NULL;
+	struct link_node *lnode = NULL;
+	struct msm_bus_node_device_type *dev_info = NULL;
+	int curr_idx;
+	int ret = 0;
+	struct rule_update_path_info *rule_node;
+	bool rules_registered = msm_rule_are_rules_registered();
+
+	if (IS_ERR_OR_NULL(src_dev)) {
+		MSM_BUS_ERR("%s: No source device", __func__);
+		ret = -ENODEV;
+		goto exit_update_path;
+	}
+
+	next_dev = src_dev;
+
+	if (src_idx < 0) {
+		MSM_BUS_ERR("%s: Invalid lnode idx %d", __func__, src_idx);
+		ret = -ENXIO;
+		goto exit_update_path;
+	}
+	curr_idx = src_idx;
+
+	while (next_dev) {
+		int i;
+		dev_info = to_msm_bus_node(next_dev);
+
+		if (curr_idx >= dev_info->num_lnodes) {
+			MSM_BUS_ERR("%s: Invalid lnode Idx %d num lnodes %d",
+			 __func__, curr_idx, dev_info->num_lnodes);
+			ret = -ENXIO;
+			goto exit_update_path;
+		}
+
+		lnode = &dev_info->lnode_list[curr_idx];
+		if (!lnode) {
+			MSM_BUS_ERR("%s: Invalid lnode ptr lnode %d",
+				 __func__, curr_idx);
+			ret = -ENXIO;
+			goto exit_update_path;
+		}
+		lnode->lnode_ib[ACTIVE_CTX] = act_req_ib;
+		lnode->lnode_ab[ACTIVE_CTX] = act_req_bw;
+		lnode->lnode_ib[DUAL_CTX] = slp_req_ib;
+		lnode->lnode_ab[DUAL_CTX] = slp_req_bw;
+
+		for (i = 0; i < NUM_CTX; i++)
+			dev_info->node_bw[i].cur_clk_hz =
+					aggregate_bus_req(dev_info, i);
+
+		add_node_to_clist(dev_info);
+
+		if (rules_registered) {
+			rule_node = &dev_info->node_info->rule;
+			rule_node->id = dev_info->node_info->id;
+			rule_node->ib = dev_info->node_bw[ACTIVE_CTX].max_ib;
+			rule_node->ab = dev_info->node_bw[ACTIVE_CTX].sum_ab;
+			rule_node->clk =
+				dev_info->node_bw[ACTIVE_CTX].cur_clk_hz;
+			if (!rule_node->added) {
+				list_add_tail(&rule_node->link, &input_list);
+				rule_node->added = true;
+			}
+		}
+
+		next_dev = lnode->next_dev;
+		curr_idx = lnode->next;
+	}
+
+exit_update_path:
+	return ret;
+}
+
+static int remove_path(struct device *src_dev, int dst, uint64_t cur_ib,
+			uint64_t cur_ab, int src_idx, int active_only)
+{
+	struct device *next_dev = NULL;
+	struct link_node *lnode = NULL;
+	struct msm_bus_node_device_type *dev_info = NULL;
+	int ret = 0;
+	int cur_idx = src_idx;
+	int next_idx;
+
+	/* Update the current path to zero out all request from
+	 * this cient on all paths
+	 */
+	if (!src_dev) {
+		MSM_BUS_ERR("%s: Can't find source device", __func__);
+		ret = -ENODEV;
+		goto exit_remove_path;
+	}
+
+	ret = update_path(src_dev, dst, 0, 0, 0, 0, cur_ib, cur_ab, src_idx,
+							active_only);
+	if (ret) {
+		MSM_BUS_ERR("%s: Error zeroing out path ctx %d",
+					__func__, ACTIVE_CTX);
+		goto exit_remove_path;
+	}
+
+	next_dev = src_dev;
+
+	while (next_dev) {
+		dev_info = to_msm_bus_node(next_dev);
+		lnode = &dev_info->lnode_list[cur_idx];
+		next_idx = lnode->next;
+		next_dev = lnode->next_dev;
+		remove_lnode(dev_info, cur_idx);
+		cur_idx = next_idx;
+	}
+
+exit_remove_path:
+	return ret;
+}
+
+static void getpath_debug(int src, int curr, int active_only)
+{
+	struct device *dev_node;
+	struct device *dev_it;
+	unsigned int hop = 1;
+	int idx;
+	struct msm_bus_node_device_type *devinfo;
+	int i;
+
+	dev_node = bus_find_device(&msm_bus_type, NULL,
+				(void *) &src,
+				msm_bus_device_match_adhoc);
+
+	if (!dev_node) {
+		MSM_BUS_ERR("SRC NOT FOUND %d", src);
+		return;
+	}
+
+	idx = curr;
+	devinfo = to_msm_bus_node(dev_node);
+	dev_it = dev_node;
+
+	MSM_BUS_ERR("Route list Src %d", src);
+	while (dev_it) {
+		struct msm_bus_node_device_type *busdev =
+			to_msm_bus_node(devinfo->node_info->bus_device);
+
+		MSM_BUS_ERR("Hop[%d] at Device %d ctx %d", hop,
+					devinfo->node_info->id, active_only);
+
+		for (i = 0; i < NUM_CTX; i++) {
+			MSM_BUS_ERR("dev info sel ib %llu",
+						devinfo->node_bw[i].cur_clk_hz);
+			MSM_BUS_ERR("dev info sel ab %llu",
+						devinfo->node_bw[i].sum_ab);
+		}
+
+		dev_it = devinfo->lnode_list[idx].next_dev;
+		idx = devinfo->lnode_list[idx].next;
+		if (dev_it)
+			devinfo = to_msm_bus_node(dev_it);
+
+		MSM_BUS_ERR("Bus Device %d", busdev->node_info->id);
+		MSM_BUS_ERR("Bus Clock %llu", busdev->clk[active_only].rate);
+
+		if (idx < 0)
+			break;
+		hop++;
+	}
+}
+
+static void unregister_client_adhoc(uint32_t cl)
+{
+	int i;
+	struct msm_bus_scale_pdata *pdata;
+	int lnode, src, curr, dest;
+	uint64_t  cur_clk, cur_bw;
+	struct msm_bus_client *client;
+	struct device *src_dev;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+	if (!cl) {
+		MSM_BUS_ERR("%s: Null cl handle passed unregister\n",
+				__func__);
+		goto exit_unregister_client;
+	}
+	client = handle_list.cl_list[cl];
+	pdata = client->pdata;
+	if (!pdata) {
+		MSM_BUS_ERR("%s: Null pdata passed to unregister\n",
+				__func__);
+		goto exit_unregister_client;
+	}
+
+	curr = client->curr;
+	if ((curr < 0) || (curr >= pdata->num_usecases)) {
+		MSM_BUS_ERR("Invalid index Defaulting curr to 0");
+		curr = 0;
+	}
+
+	MSM_BUS_DBG("%s: Unregistering client %p", __func__, client);
+
+	for (i = 0; i < pdata->usecase->num_paths; i++) {
+		src = client->pdata->usecase[curr].vectors[i].src;
+		dest = client->pdata->usecase[curr].vectors[i].dst;
+
+		lnode = client->src_pnode[i];
+		src_dev = client->src_devs[i];
+		cur_clk = client->pdata->usecase[curr].vectors[i].ib;
+		cur_bw = client->pdata->usecase[curr].vectors[i].ab;
+		remove_path(src_dev, dest, cur_clk, cur_bw, lnode,
+						pdata->active_only);
+	}
+	commit_data();
+	msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_UNREGISTER, cl);
+	kfree(client->src_pnode);
+	kfree(client->src_devs);
+	kfree(client);
+	handle_list.cl_list[cl] = NULL;
+exit_unregister_client:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return;
+}
+
+static int alloc_handle_lst(int size)
+{
+	int ret = 0;
+	struct msm_bus_client **t_cl_list;
+
+	if (!handle_list.num_entries) {
+		t_cl_list = kzalloc(sizeof(struct msm_bus_client *)
+			* NUM_CL_HANDLES, GFP_KERNEL);
+		if (ZERO_OR_NULL_PTR(t_cl_list)) {
+			ret = -ENOMEM;
+			MSM_BUS_ERR("%s: Failed to allocate handles list",
+								__func__);
+			goto exit_alloc_handle_lst;
+		}
+		handle_list.cl_list = t_cl_list;
+		handle_list.num_entries += NUM_CL_HANDLES;
+	} else {
+		t_cl_list = krealloc(handle_list.cl_list,
+				sizeof(struct msm_bus_client *) *
+				(handle_list.num_entries + NUM_CL_HANDLES),
+				GFP_KERNEL);
+		if (ZERO_OR_NULL_PTR(t_cl_list)) {
+			ret = -ENOMEM;
+			MSM_BUS_ERR("%s: Failed to allocate handles list",
+								__func__);
+			goto exit_alloc_handle_lst;
+		}
+
+		handle_list.cl_list = t_cl_list;
+		memset(&handle_list.cl_list[handle_list.num_entries], 0,
+			NUM_CL_HANDLES * sizeof(struct msm_bus_client *));
+		handle_list.num_entries += NUM_CL_HANDLES;
+	}
+exit_alloc_handle_lst:
+	return ret;
+}
+
+static uint32_t gen_handle(struct msm_bus_client *client)
+{
+	uint32_t handle = 0;
+	int i;
+	int ret = 0;
+
+	for (i = 0; i < handle_list.num_entries; i++) {
+		if (i && !handle_list.cl_list[i]) {
+			handle = i;
+			break;
+		}
+	}
+
+	if (!handle) {
+		ret = alloc_handle_lst(NUM_CL_HANDLES);
+
+		if (ret) {
+			MSM_BUS_ERR("%s: Failed to allocate handle list",
+							__func__);
+			goto exit_gen_handle;
+		}
+		handle = i + 1;
+	}
+	handle_list.cl_list[handle] = client;
+exit_gen_handle:
+	return handle;
+}
+
+static uint32_t register_client_adhoc(struct msm_bus_scale_pdata *pdata)
+{
+	int src, dest;
+	int i;
+	struct msm_bus_client *client = NULL;
+	int *lnode;
+	struct device *dev;
+	uint32_t handle = 0;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+	client = kzalloc(sizeof(struct msm_bus_client), GFP_KERNEL);
+	if (!client) {
+		MSM_BUS_ERR("%s: Error allocating client data", __func__);
+		goto exit_register_client;
+	}
+	client->pdata = pdata;
+
+	lnode = kzalloc(pdata->usecase->num_paths * sizeof(int), GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(lnode)) {
+		MSM_BUS_ERR("%s: Error allocating pathnode ptr!", __func__);
+		goto exit_lnode_malloc_fail;
+	}
+	client->src_pnode = lnode;
+
+	client->src_devs = kzalloc(pdata->usecase->num_paths *
+					sizeof(struct device *), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(client->src_devs)) {
+		MSM_BUS_ERR("%s: Error allocating pathnode ptr!", __func__);
+		goto exit_src_dev_malloc_fail;
+	}
+	client->curr = -1;
+
+	for (i = 0; i < pdata->usecase->num_paths; i++) {
+		src = pdata->usecase->vectors[i].src;
+		dest = pdata->usecase->vectors[i].dst;
+
+		if ((src < 0) || (dest < 0)) {
+			MSM_BUS_ERR("%s:Invalid src/dst.src %d dest %d",
+				__func__, src, dest);
+			goto exit_invalid_data;
+		}
+		dev = bus_find_device(&msm_bus_type, NULL,
+				(void *) &src,
+				msm_bus_device_match_adhoc);
+		if (IS_ERR_OR_NULL(dev)) {
+			MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
+				__func__, src, dest);
+			goto exit_invalid_data;
+		}
+		client->src_devs[i] = dev;
+
+		lnode[i] = getpath(dev, dest, client->pdata->name);
+		if (lnode[i] < 0) {
+			MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
+				__func__, src, dest);
+			goto exit_invalid_data;
+		}
+	}
+
+	handle = gen_handle(client);
+	msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_REGISTER,
+					handle);
+	MSM_BUS_DBG("%s:Client handle %d %s", __func__, handle,
+						client->pdata->name);
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return handle;
+exit_invalid_data:
+	kfree(client->src_devs);
+exit_src_dev_malloc_fail:
+	kfree(lnode);
+exit_lnode_malloc_fail:
+	kfree(client);
+exit_register_client:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return handle;
+}
+
+static int update_client_paths(struct msm_bus_client *client, bool log_trns,
+							unsigned int idx)
+{
+	int lnode, src, dest, cur_idx;
+	uint64_t req_clk, req_bw, curr_clk, curr_bw, slp_clk, slp_bw;
+	int i, ret = 0;
+	struct msm_bus_scale_pdata *pdata;
+	struct device *src_dev;
+
+	if (!client) {
+		MSM_BUS_ERR("Client handle  Null");
+		ret = -ENXIO;
+		goto exit_update_client_paths;
+	}
+
+	pdata = client->pdata;
+	if (!pdata) {
+		MSM_BUS_ERR("Client pdata Null");
+		ret = -ENXIO;
+		goto exit_update_client_paths;
+	}
+
+	cur_idx = client->curr;
+	client->curr = idx;
+	for (i = 0; i < pdata->usecase->num_paths; i++) {
+		src = pdata->usecase[idx].vectors[i].src;
+		dest = pdata->usecase[idx].vectors[i].dst;
+
+		lnode = client->src_pnode[i];
+		src_dev = client->src_devs[i];
+		req_clk = client->pdata->usecase[idx].vectors[i].ib;
+		req_bw = client->pdata->usecase[idx].vectors[i].ab;
+		if (cur_idx < 0) {
+			curr_clk = 0;
+			curr_bw = 0;
+		} else {
+			curr_clk =
+				client->pdata->usecase[cur_idx].vectors[i].ib;
+			curr_bw = client->pdata->usecase[cur_idx].vectors[i].ab;
+			MSM_BUS_DBG("%s:ab: %llu ib: %llu\n", __func__,
+					curr_bw, curr_clk);
+		}
+
+		if (pdata->active_only) {
+			slp_clk = 0;
+			slp_bw = 0;
+		} else {
+			slp_clk = req_clk;
+			slp_bw = req_bw;
+		}
+
+		ret = update_path(src_dev, dest, req_clk, req_bw, slp_clk,
+			slp_bw, curr_clk, curr_bw, lnode, pdata->active_only);
+
+		if (ret) {
+			MSM_BUS_ERR("%s: Update path failed! %d ctx %d\n",
+					__func__, ret, pdata->active_only);
+			goto exit_update_client_paths;
+		}
+
+		if (log_trns)
+			getpath_debug(src, lnode, pdata->active_only);
+	}
+	commit_data();
+exit_update_client_paths:
+	return ret;
+}
+
+static int update_context(uint32_t cl, bool active_only,
+					unsigned int ctx_idx)
+{
+	int ret = 0;
+	struct msm_bus_scale_pdata *pdata;
+	struct msm_bus_client *client;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+	if (!cl) {
+		MSM_BUS_ERR("%s: Invalid client handle %d", __func__, cl);
+		ret = -ENXIO;
+		goto exit_update_context;
+	}
+
+	client = handle_list.cl_list[cl];
+	if (!client) {
+		ret = -ENXIO;
+		goto exit_update_context;
+	}
+
+	pdata = client->pdata;
+	if (!pdata) {
+		ret = -ENXIO;
+		goto exit_update_context;
+	}
+	if (pdata->active_only == active_only) {
+		MSM_BUS_ERR("No change in context(%d==%d), skip\n",
+					pdata->active_only, active_only);
+		ret = -ENXIO;
+		goto exit_update_context;
+	}
+
+	if (ctx_idx >= pdata->num_usecases) {
+		MSM_BUS_ERR("Client %u passed invalid index: %d\n",
+			cl, ctx_idx);
+		ret = -ENXIO;
+		goto exit_update_context;
+	}
+
+	pdata->active_only = active_only;
+
+	msm_bus_dbg_client_data(client->pdata, ctx_idx , cl);
+	ret = update_client_paths(client, false, ctx_idx);
+	if (ret) {
+		pr_err("%s: Err updating path\n", __func__);
+		goto exit_update_context;
+	}
+
+	trace_bus_update_request_end(pdata->name);
+
+exit_update_context:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return ret;
+}
+
+static int update_request_adhoc(uint32_t cl, unsigned int index)
+{
+	int ret = 0;
+	struct msm_bus_scale_pdata *pdata;
+	struct msm_bus_client *client;
+	const char *test_cl = "Null";
+	bool log_transaction = false;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+
+	if (!cl) {
+		MSM_BUS_ERR("%s: Invalid client handle %d", __func__, cl);
+		ret = -ENXIO;
+		goto exit_update_request;
+	}
+
+	client = handle_list.cl_list[cl];
+	if (!client) {
+		MSM_BUS_ERR("%s: Invalid client pointer ", __func__);
+		ret = -ENXIO;
+		goto exit_update_request;
+	}
+
+	pdata = client->pdata;
+	if (!pdata) {
+		MSM_BUS_ERR("%s: Client data Null.[client didn't register]",
+				__func__);
+		ret = -ENXIO;
+		goto exit_update_request;
+	}
+
+	if (index >= pdata->num_usecases) {
+		MSM_BUS_ERR("Client %u passed invalid index: %d\n",
+			cl, index);
+		ret = -ENXIO;
+		goto exit_update_request;
+	}
+
+	if (client->curr == index) {
+		MSM_BUS_DBG("%s: Not updating client request idx %d unchanged",
+				__func__, index);
+		goto exit_update_request;
+	}
+
+	if (!strcmp(test_cl, pdata->name))
+		log_transaction = true;
+
+	MSM_BUS_DBG("%s: cl: %u index: %d curr: %d num_paths: %d\n", __func__,
+		cl, index, client->curr, client->pdata->usecase->num_paths);
+	msm_bus_dbg_client_data(client->pdata, index , cl);
+	ret = update_client_paths(client, log_transaction, index);
+	if (ret) {
+		pr_err("%s: Err updating path\n", __func__);
+		goto exit_update_request;
+	}
+
+	trace_bus_update_request_end(pdata->name);
+
+exit_update_request:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return ret;
+}
+
+static void free_cl_mem(struct msm_bus_client_handle *cl)
+{
+	if (cl) {
+		kfree(cl->name);
+		kfree(cl);
+		cl = NULL;
+	}
+}
+
+static int update_bw_adhoc(struct msm_bus_client_handle *cl, u64 ab, u64 ib)
+{
+	int ret = 0;
+	char *test_cl = "test-client";
+	bool log_transaction = false;
+	u64 slp_ib, slp_ab;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+
+	if (!cl) {
+		MSM_BUS_ERR("%s: Invalid client handle %p", __func__, cl);
+		ret = -ENXIO;
+		goto exit_update_request;
+	}
+
+	if (!strcmp(test_cl, cl->name))
+		log_transaction = true;
+
+	msm_bus_dbg_rec_transaction(cl, ab, ib);
+
+	if ((cl->cur_act_ib == ib) && (cl->cur_act_ab == ab)) {
+		MSM_BUS_DBG("%s:no change in request", cl->name);
+		goto exit_update_request;
+	}
+
+	if (cl->active_only) {
+		slp_ib = 0;
+		slp_ab = 0;
+	} else {
+		slp_ib = ib;
+		slp_ab = ab;
+	}
+
+	ret = update_path(cl->mas_dev, cl->slv, ib, ab, slp_ib, slp_ab,
+		cl->cur_act_ib, cl->cur_act_ab, cl->first_hop, cl->active_only);
+
+	if (ret) {
+		MSM_BUS_ERR("%s: Update path failed! %d active_only %d\n",
+				__func__, ret, cl->active_only);
+		goto exit_update_request;
+	}
+
+	commit_data();
+	cl->cur_act_ib = ib;
+	cl->cur_act_ab = ab;
+	cl->cur_slp_ib = slp_ib;
+	cl->cur_slp_ab = slp_ab;
+
+	if (log_transaction)
+		getpath_debug(cl->mas, cl->first_hop, cl->active_only);
+	trace_bus_update_request_end(cl->name);
+exit_update_request:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+
+	return ret;
+}
+
+static int update_bw_context(struct msm_bus_client_handle *cl, u64 act_ab,
+				u64 act_ib, u64 slp_ib, u64 slp_ab)
+{
+	int ret = 0;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+	if (!cl) {
+		MSM_BUS_ERR("Invalid client handle %p", cl);
+		ret = -ENXIO;
+		goto exit_change_context;
+	}
+
+	if ((cl->cur_act_ib == act_ib) &&
+		(cl->cur_act_ab == act_ab) &&
+		(cl->cur_slp_ib == slp_ib) &&
+		(cl->cur_slp_ab == slp_ab)) {
+		MSM_BUS_ERR("No change in vote");
+		goto exit_change_context;
+	}
+
+	if (!slp_ab && !slp_ib)
+		cl->active_only = true;
+	msm_bus_dbg_rec_transaction(cl, cl->cur_act_ab, cl->cur_slp_ib);
+	ret = update_path(cl->mas_dev, cl->slv, act_ib, act_ab, slp_ib, slp_ab,
+				cl->cur_act_ab, cl->cur_act_ab,  cl->first_hop,
+				cl->active_only);
+	if (ret) {
+		MSM_BUS_ERR("%s: Update path failed! %d active_only %d\n",
+				__func__, ret, cl->active_only);
+		goto exit_change_context;
+	}
+	commit_data();
+	cl->cur_act_ib = act_ib;
+	cl->cur_act_ab = act_ab;
+	cl->cur_slp_ib = slp_ib;
+	cl->cur_slp_ab = slp_ab;
+	trace_bus_update_request_end(cl->name);
+exit_change_context:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return ret;
+}
+
+static void unregister_adhoc(struct msm_bus_client_handle *cl)
+{
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+	if (!cl) {
+		MSM_BUS_ERR("%s: Null cl handle passed unregister\n",
+				__func__);
+		goto exit_unregister_client;
+	}
+
+	MSM_BUS_DBG("%s: Unregistering client %p", __func__, cl);
+
+	remove_path(cl->mas_dev, cl->slv, cl->cur_act_ib, cl->cur_act_ab,
+				cl->first_hop, cl->active_only);
+	commit_data();
+	msm_bus_dbg_remove_client(cl);
+	kfree(cl->name);
+	kfree(cl);
+exit_unregister_client:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return;
+}
+
+static struct msm_bus_client_handle*
+register_adhoc(uint32_t mas, uint32_t slv, char *name, bool active_only)
+{
+	struct msm_bus_client_handle *client = NULL;
+	int len = 0;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+
+	if (!(mas && slv && name)) {
+		pr_err("%s: Error: src dst name num_paths are required",
+								 __func__);
+		goto exit_register;
+	}
+
+	client = kzalloc(sizeof(struct msm_bus_client_handle), GFP_KERNEL);
+	if (!client) {
+		MSM_BUS_ERR("%s: Error allocating client data", __func__);
+		goto exit_register;
+	}
+
+	len = strnlen(name, MAX_STR_CL);
+	client->name = kzalloc((len + 1), GFP_KERNEL);
+	if (!client->name) {
+		MSM_BUS_ERR("%s: Error allocating client name buf", __func__);
+		free_cl_mem(client);
+		goto exit_register;
+	}
+	strlcpy(client->name, name, MAX_STR_CL);
+	client->active_only = active_only;
+
+	client->mas = mas;
+	client->slv = slv;
+
+	client->mas_dev = bus_find_device(&msm_bus_type, NULL,
+					(void *) &mas,
+					msm_bus_device_match_adhoc);
+	if (IS_ERR_OR_NULL(client->mas_dev)) {
+		MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
+			__func__, client->mas, client->slv);
+		free_cl_mem(client);
+		goto exit_register;
+	}
+
+	client->first_hop = getpath(client->mas_dev, client->slv, client->name);
+	if (client->first_hop < 0) {
+		MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
+			__func__, client->mas, client->slv);
+		free_cl_mem(client);
+		goto exit_register;
+	}
+
+	MSM_BUS_DBG("%s:Client handle %p %s", __func__, client,
+						client->name);
+	msm_bus_dbg_add_client(client);
+exit_register:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return client;
+}
+/**
+ *  msm_bus_arb_setops_adhoc() : Setup the bus arbitration ops
+ *  @ arb_ops: pointer to the arb ops.
+ */
+void msm_bus_arb_setops_adhoc(struct msm_bus_arb_ops *arb_ops)
+{
+	arb_ops->register_client = register_client_adhoc;
+	arb_ops->update_request = update_request_adhoc;
+	arb_ops->unregister_client = unregister_client_adhoc;
+	arb_ops->update_context = update_context;
+
+	arb_ops->register_cl = register_adhoc;
+	arb_ops->unregister = unregister_adhoc;
+	arb_ops->update_bw = update_bw_adhoc;
+	arb_ops->update_bw_context = update_bw_context;
+}
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./msm_bus_bimc_adhoc.c linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/msm_bus_bimc_adhoc.c
--- linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./msm_bus_bimc_adhoc.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/msm_bus_bimc_adhoc.c	2019-01-22 16:16:26.659274986 +0100
@@ -0,0 +1,609 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "AXI: BIMC: %s(): " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/msm-bus-board.h>
+#include "msm_bus_core.h"
+#include "msm_bus_bimc.h"
+#include "msm_bus_adhoc.h"
+#include <trace/events/trace_msm_bus.h>
+
+/* M_Generic */
+
+enum bke_sw {
+	BKE_OFF = 0,
+	BKE_ON = 1,
+};
+
+#define M_REG_BASE(b)		((b) + 0x00008000)
+
+#define M_MODE_ADDR(b, n) \
+		(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000210)
+enum bimc_m_mode {
+	M_MODE_RMSK				= 0xf0000011,
+	M_MODE_WR_GATHER_BEATS_BMSK		= 0xf0000000,
+	M_MODE_WR_GATHER_BEATS_SHFT		= 0x1c,
+	M_MODE_NARROW_WR_BMSK			= 0x10,
+	M_MODE_NARROW_WR_SHFT			= 0x4,
+	M_MODE_ORDERING_MODEL_BMSK		= 0x1,
+	M_MODE_ORDERING_MODEL_SHFT		= 0x0,
+};
+
+#define M_PRIOLVL_OVERRIDE_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000230)
+enum bimc_m_priolvl_override {
+	M_PRIOLVL_OVERRIDE_RMSK			= 0x301,
+	M_PRIOLVL_OVERRIDE_BMSK			= 0x300,
+	M_PRIOLVL_OVERRIDE_SHFT			= 0x8,
+	M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_BMSK	= 0x1,
+	M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_SHFT	= 0x0,
+};
+
+#define M_RD_CMD_OVERRIDE_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000240)
+enum bimc_m_read_command_override {
+	M_RD_CMD_OVERRIDE_RMSK			= 0x3071f7f,
+	M_RD_CMD_OVERRIDE_AREQPRIO_BMSK		= 0x3000000,
+	M_RD_CMD_OVERRIDE_AREQPRIO_SHFT		= 0x18,
+	M_RD_CMD_OVERRIDE_AMEMTYPE_BMSK		= 0x70000,
+	M_RD_CMD_OVERRIDE_AMEMTYPE_SHFT		= 0x10,
+	M_RD_CMD_OVERRIDE_ATRANSIENT_BMSK		= 0x1000,
+	M_RD_CMD_OVERRIDE_ATRANSIENT_SHFT		= 0xc,
+	M_RD_CMD_OVERRIDE_ASHARED_BMSK		= 0x800,
+	M_RD_CMD_OVERRIDE_ASHARED_SHFT		= 0xb,
+	M_RD_CMD_OVERRIDE_AREDIRECT_BMSK		= 0x400,
+	M_RD_CMD_OVERRIDE_AREDIRECT_SHFT		= 0xa,
+	M_RD_CMD_OVERRIDE_AOOO_BMSK			= 0x200,
+	M_RD_CMD_OVERRIDE_AOOO_SHFT			= 0x9,
+	M_RD_CMD_OVERRIDE_AINNERSHARED_BMSK		= 0x100,
+	M_RD_CMD_OVERRIDE_AINNERSHARED_SHFT		= 0x8,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK	= 0x40,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT	= 0x6,
+	M_RD_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_BMSK	= 0x20,
+	M_RD_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_SHFT	= 0x5,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_BMSK	= 0x10,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_SHFT	= 0x4,
+	M_RD_CMD_OVERRIDE_OVERRIDE_ASHARED_BMSK	= 0x8,
+	M_RD_CMD_OVERRIDE_OVERRIDE_ASHARED_SHFT	= 0x3,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AREDIRECT_BMSK	= 0x4,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AREDIRECT_SHFT	= 0x2,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AOOO_BMSK		= 0x2,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AOOO_SHFT		= 0x1,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_BMSK	= 0x1,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_SHFT	= 0x0,
+};
+
+#define M_WR_CMD_OVERRIDE_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000250)
+enum bimc_m_write_command_override {
+	M_WR_CMD_OVERRIDE_RMSK			= 0x3071f7f,
+	M_WR_CMD_OVERRIDE_AREQPRIO_BMSK		= 0x3000000,
+	M_WR_CMD_OVERRIDE_AREQPRIO_SHFT		= 0x18,
+	M_WR_CMD_OVERRIDE_AMEMTYPE_BMSK		= 0x70000,
+	M_WR_CMD_OVERRIDE_AMEMTYPE_SHFT		= 0x10,
+	M_WR_CMD_OVERRIDE_ATRANSIENT_BMSK	= 0x1000,
+	M_WR_CMD_OVERRIDE_ATRANSIENT_SHFT	= 0xc,
+	M_WR_CMD_OVERRIDE_ASHARED_BMSK		= 0x800,
+	M_WR_CMD_OVERRIDE_ASHARED_SHFT		= 0xb,
+	M_WR_CMD_OVERRIDE_AREDIRECT_BMSK		= 0x400,
+	M_WR_CMD_OVERRIDE_AREDIRECT_SHFT		= 0xa,
+	M_WR_CMD_OVERRIDE_AOOO_BMSK			= 0x200,
+	M_WR_CMD_OVERRIDE_AOOO_SHFT			= 0x9,
+	M_WR_CMD_OVERRIDE_AINNERSHARED_BMSK		= 0x100,
+	M_WR_CMD_OVERRIDE_AINNERSHARED_SHFT		= 0x8,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK	= 0x40,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT	= 0x6,
+	M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_BMSK	= 0x20,
+	M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_SHFT	= 0x5,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_BMSK	= 0x10,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_SHFT	= 0x4,
+	M_WR_CMD_OVERRIDE_OVERRIDE_ASHARED_BMSK	= 0x8,
+	M_WR_CMD_OVERRIDE_OVERRIDE_ASHARED_SHFT	= 0x3,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AREDIRECT_BMSK	= 0x4,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AREDIRECT_SHFT	= 0x2,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AOOO_BMSK	= 0x2,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AOOO_SHFT	= 0x1,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_BMSK	= 0x1,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_SHFT	= 0x0,
+};
+
+#define M_BKE_EN_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000300)
+enum bimc_m_bke_en {
+	M_BKE_EN_RMSK			= 0x1,
+	M_BKE_EN_EN_BMSK		= 0x1,
+	M_BKE_EN_EN_SHFT		= 0x0,
+};
+
+/* Grant Period registers */
+#define M_BKE_GP_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000304)
+enum bimc_m_bke_grant_period {
+	M_BKE_GP_RMSK		= 0x3ff,
+	M_BKE_GP_GP_BMSK	= 0x3ff,
+	M_BKE_GP_GP_SHFT	= 0x0,
+};
+
+/* Grant count register.
+ * The Grant count register represents a signed 16 bit
+ * value, range 0-0x7fff
+ */
+#define M_BKE_GC_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000308)
+enum bimc_m_bke_grant_count {
+	M_BKE_GC_RMSK			= 0xffff,
+	M_BKE_GC_GC_BMSK		= 0xffff,
+	M_BKE_GC_GC_SHFT		= 0x0,
+};
+
+/* Threshold High Registers */
+#define M_BKE_THH_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000320)
+enum bimc_m_bke_thresh_high {
+	M_BKE_THH_RMSK		= 0xffff,
+	M_BKE_THH_THRESH_BMSK	= 0xffff,
+	M_BKE_THH_THRESH_SHFT	= 0x0,
+};
+
+/* Threshold Medium Registers */
+#define M_BKE_THM_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000324)
+enum bimc_m_bke_thresh_medium {
+	M_BKE_THM_RMSK		= 0xffff,
+	M_BKE_THM_THRESH_BMSK	= 0xffff,
+	M_BKE_THM_THRESH_SHFT	= 0x0,
+};
+
+/* Threshold Low Registers */
+#define M_BKE_THL_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000328)
+enum bimc_m_bke_thresh_low {
+	M_BKE_THL_RMSK			= 0xffff,
+	M_BKE_THL_THRESH_BMSK		= 0xffff,
+	M_BKE_THL_THRESH_SHFT		= 0x0,
+};
+
+#define NUM_HEALTH_LEVEL	(4)
+#define M_BKE_HEALTH_0_CONFIG_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000340)
+enum bimc_m_bke_health_0 {
+	M_BKE_HEALTH_0_CONFIG_RMSK			= 0x80000303,
+	M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK		= 0x80000000,
+	M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_SHFT		= 0x1f,
+	M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK		= 0x300,
+	M_BKE_HEALTH_0_CONFIG_AREQPRIO_SHFT		= 0x8,
+	M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK		= 0x3,
+	M_BKE_HEALTH_0_CONFIG_PRIOLVL_SHFT		= 0x0,
+};
+
+#define M_BKE_HEALTH_1_CONFIG_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000344)
+enum bimc_m_bke_health_1 {
+	M_BKE_HEALTH_1_CONFIG_RMSK			= 0x80000303,
+	M_BKE_HEALTH_1_CONFIG_LIMIT_CMDS_BMSK		= 0x80000000,
+	M_BKE_HEALTH_1_CONFIG_LIMIT_CMDS_SHFT		= 0x1f,
+	M_BKE_HEALTH_1_CONFIG_AREQPRIO_BMSK		= 0x300,
+	M_BKE_HEALTH_1_CONFIG_AREQPRIO_SHFT		= 0x8,
+	M_BKE_HEALTH_1_CONFIG_PRIOLVL_BMSK		= 0x3,
+	M_BKE_HEALTH_1_CONFIG_PRIOLVL_SHFT		= 0x0,
+};
+
+#define M_BKE_HEALTH_2_CONFIG_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000348)
+enum bimc_m_bke_health_2 {
+	M_BKE_HEALTH_2_CONFIG_RMSK			= 0x80000303,
+	M_BKE_HEALTH_2_CONFIG_LIMIT_CMDS_BMSK		= 0x80000000,
+	M_BKE_HEALTH_2_CONFIG_LIMIT_CMDS_SHFT		= 0x1f,
+	M_BKE_HEALTH_2_CONFIG_AREQPRIO_BMSK		= 0x300,
+	M_BKE_HEALTH_2_CONFIG_AREQPRIO_SHFT		= 0x8,
+	M_BKE_HEALTH_2_CONFIG_PRIOLVL_BMSK		= 0x3,
+	M_BKE_HEALTH_2_CONFIG_PRIOLVL_SHFT		= 0x0,
+};
+
+#define M_BKE_HEALTH_3_CONFIG_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x0000034c)
+enum bimc_m_bke_health_3 {
+	M_BKE_HEALTH_3_CONFIG_RMSK			= 0x303,
+	M_BKE_HEALTH_3_CONFIG_AREQPRIO_BMSK	= 0x300,
+	M_BKE_HEALTH_3_CONFIG_AREQPRIO_SHFT	= 0x8,
+	M_BKE_HEALTH_3_CONFIG_PRIOLVL_BMSK		= 0x3,
+	M_BKE_HEALTH_3_CONFIG_PRIOLVL_SHFT		= 0x0,
+};
+
+#define BKE_HEALTH_MASK \
+	(M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK |\
+	M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK |\
+	M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK)
+
+#define BKE_HEALTH_VAL(limit, areq, plvl) \
+	((((limit) << M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_SHFT) & \
+	M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK) | \
+	(((areq) << M_BKE_HEALTH_0_CONFIG_AREQPRIO_SHFT) & \
+	M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK) | \
+	(((plvl) << M_BKE_HEALTH_0_CONFIG_PRIOLVL_SHFT) & \
+	M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK))
+
+#define MAX_GRANT_PERIOD \
+	(M_BKE_GP_GP_BMSK >> \
+	M_BKE_GP_GP_SHFT)
+
+#define MAX_GC \
+	(M_BKE_GC_GC_BMSK >> \
+	(M_BKE_GC_GC_SHFT + 1))
+
+static int bimc_div(int64_t *a, uint32_t b)
+{
+	if ((*a > 0) && (*a < b)) {
+		*a = 0;
+		return 1;
+	} else {
+		return do_div(*a, b);
+	}
+}
+
+static void set_bke_en(void __iomem *addr, uint32_t index,
+		bool req)
+{
+	uint32_t old_val, new_val;
+
+	old_val = readl_relaxed(M_BKE_EN_ADDR(addr, index));
+	new_val = req << M_BKE_EN_EN_SHFT;
+	if ((old_val & M_BKE_EN_RMSK) == (new_val))
+		return;
+	writel_relaxed(((old_val & ~(M_BKE_EN_EN_BMSK)) | (new_val &
+				M_BKE_EN_EN_BMSK)), M_BKE_EN_ADDR(addr, index));
+	/* Ensure that BKE register is programmed set before returning */
+	wmb();
+}
+
+static void set_health_reg(void __iomem *addr, uint32_t rmsk,
+	uint8_t index, struct msm_bus_bimc_qos_mode *qmode)
+{
+	uint32_t reg_val, val0, val;
+
+	/* Note, addr is already passed with right mas_index */
+	reg_val = readl_relaxed(addr) & rmsk;
+	val0 = BKE_HEALTH_VAL(qmode->rl.qhealth[index].limit_commands,
+		qmode->rl.qhealth[index].areq_prio,
+		qmode->rl.qhealth[index].prio_level);
+	val = ((reg_val & (~(BKE_HEALTH_MASK))) | (val0 & BKE_HEALTH_MASK));
+	writel_relaxed(val, addr);
+	/*
+	 * Ensure that priority for regulator/limiter modes are
+	 * set before returning
+	 */
+	wmb();
+}
+
+static void msm_bus_bimc_set_qos_prio(void __iomem *base,
+	uint32_t mas_index, uint8_t qmode_sel,
+	struct msm_bus_bimc_qos_mode *qmode)
+{
+
+	switch (qmode_sel) {
+	case BIMC_QOS_MODE_FIXED:
+	case BIMC_QOS_MODE_REGULATOR:
+	case BIMC_QOS_MODE_LIMITER:
+		set_health_reg(M_BKE_HEALTH_3_CONFIG_ADDR(base,
+			mas_index), M_BKE_HEALTH_3_CONFIG_RMSK, 3, qmode);
+		set_health_reg(M_BKE_HEALTH_2_CONFIG_ADDR(base,
+			mas_index), M_BKE_HEALTH_2_CONFIG_RMSK, 2, qmode);
+		set_health_reg(M_BKE_HEALTH_1_CONFIG_ADDR(base,
+			mas_index), M_BKE_HEALTH_1_CONFIG_RMSK, 1, qmode);
+		set_health_reg(M_BKE_HEALTH_0_CONFIG_ADDR(base,
+			mas_index), M_BKE_HEALTH_0_CONFIG_RMSK, 0 , qmode);
+		set_bke_en(base, mas_index, true);
+		break;
+	case BIMC_QOS_MODE_BYPASS:
+		set_bke_en(base, mas_index, false);
+		break;
+	default:
+		break;
+	}
+}
+
+static void set_qos_bw_regs(void __iomem *baddr, uint32_t mas_index,
+	int32_t th, int32_t tm, int32_t tl, uint32_t gp,
+	uint32_t gc)
+{
+	int32_t reg_val, val;
+	int32_t bke_reg_val;
+	int16_t val2;
+
+	/* Disable BKE before writing to registers as per spec */
+	bke_reg_val = readl_relaxed(M_BKE_EN_ADDR(baddr, mas_index));
+	writel_relaxed((bke_reg_val & ~(M_BKE_EN_EN_BMSK)),
+		M_BKE_EN_ADDR(baddr, mas_index));
+
+	/* Write values of registers calculated */
+	reg_val = readl_relaxed(M_BKE_GP_ADDR(baddr, mas_index))
+		& M_BKE_GP_RMSK;
+	val =  gp << M_BKE_GP_GP_SHFT;
+	writel_relaxed(((reg_val & ~(M_BKE_GP_GP_BMSK)) | (val &
+		M_BKE_GP_GP_BMSK)), M_BKE_GP_ADDR(baddr, mas_index));
+
+	reg_val = readl_relaxed(M_BKE_GC_ADDR(baddr, mas_index)) &
+		M_BKE_GC_RMSK;
+	val =  gc << M_BKE_GC_GC_SHFT;
+	writel_relaxed(((reg_val & ~(M_BKE_GC_GC_BMSK)) | (val &
+		M_BKE_GC_GC_BMSK)), M_BKE_GC_ADDR(baddr, mas_index));
+
+	reg_val = readl_relaxed(M_BKE_THH_ADDR(baddr, mas_index)) &
+		M_BKE_THH_RMSK;
+	val =  th << M_BKE_THH_THRESH_SHFT;
+	writel_relaxed(((reg_val & ~(M_BKE_THH_THRESH_BMSK)) | (val &
+		M_BKE_THH_THRESH_BMSK)), M_BKE_THH_ADDR(baddr, mas_index));
+
+	reg_val = readl_relaxed(M_BKE_THM_ADDR(baddr, mas_index)) &
+		M_BKE_THM_RMSK;
+	val2 =	tm << M_BKE_THM_THRESH_SHFT;
+	writel_relaxed(((reg_val & ~(M_BKE_THM_THRESH_BMSK)) | (val2 &
+		M_BKE_THM_THRESH_BMSK)), M_BKE_THM_ADDR(baddr, mas_index));
+
+	reg_val = readl_relaxed(M_BKE_THL_ADDR(baddr, mas_index)) &
+		M_BKE_THL_RMSK;
+	val2 =	tl << M_BKE_THL_THRESH_SHFT;
+	writel_relaxed(((reg_val & ~(M_BKE_THL_THRESH_BMSK)) |
+		(val2 & M_BKE_THL_THRESH_BMSK)), M_BKE_THL_ADDR(baddr,
+		mas_index));
+
+	/* Ensure that all bandwidth register writes have completed
+	 * before returning
+	 */
+	wmb();
+}
+
+static void bimc_set_static_qos_bw(void __iomem *base, unsigned int qos_freq,
+	int mport, struct msm_bus_bimc_qos_bw *qbw)
+{
+	int32_t bw_mbps, thh = 0, thm, thl, gc;
+	int32_t gp;
+	u64 temp;
+
+	if (qos_freq == 0) {
+		MSM_BUS_DBG("No QoS Frequency.\n");
+		return;
+	}
+
+	if (!(qbw->bw && qbw->gp)) {
+		MSM_BUS_DBG("No QoS Bandwidth or Window size\n");
+		return;
+	}
+
+	/* Convert bandwidth to MBPS */
+	temp = qbw->bw;
+	bimc_div(&temp, 1000000);
+	bw_mbps = temp;
+
+	/* Grant period in clock cycles
+	 * Grant period from bandwidth structure
+	 * is in nano seconds, QoS freq is in KHz.
+	 * Divide by 1000 to get clock cycles.
+	 */
+	gp = (qos_freq * qbw->gp) / (1000 * NSEC_PER_USEC);
+
+	/* Grant count = BW in MBps * Grant period
+	 * in micro seconds
+	 */
+	gc = bw_mbps * (qbw->gp / NSEC_PER_USEC);
+	gc = min(gc, MAX_GC);
+
+	/* Medium threshold = -((Medium Threshold percentage *
+	 * Grant count) / 100)
+	 */
+	thm = -((qbw->thmp * gc) / 100);
+	qbw->thm = thm;
+
+	/* Low threshold = -(Grant count) */
+	thl = -gc;
+	qbw->thl = thl;
+
+	MSM_BUS_DBG("%s: BKE parameters: gp %d, gc %d, thm %d thl %d thh %d",
+			__func__, gp, gc, thm, thl, thh);
+
+	trace_bus_bke_params(gc, gp, thl, thm, thl);
+	set_qos_bw_regs(base, mport, thh, thm, thl, gp, gc);
+}
+
+static int msm_bus_bimc_limit_mport(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base, uint32_t qos_off,
+				uint32_t qos_delta, uint32_t qos_freq,
+				int enable_lim, u64 lim_bw)
+{
+	int mode;
+	int i;
+	struct msm_bus_bimc_qos_mode qmode = {0};
+
+	if (ZERO_OR_NULL_PTR(info->node_info->qport)) {
+		MSM_BUS_DBG("No QoS Ports to limit\n");
+		return 0;
+	}
+
+	if ((enable_lim == THROTTLE_ON) && lim_bw) {
+		mode =  BIMC_QOS_MODE_LIMITER;
+
+		qmode.rl.qhealth[0].limit_commands = 1;
+		qmode.rl.qhealth[1].limit_commands = 0;
+		qmode.rl.qhealth[2].limit_commands = 0;
+		qmode.rl.qhealth[3].limit_commands = 0;
+		for (i = 0; i < NUM_HEALTH_LEVEL; i++) {
+			qmode.rl.qhealth[i].prio_level =
+					info->node_info->qos_params.prio_lvl;
+			qmode.rl.qhealth[i].areq_prio =
+					info->node_info->qos_params.prio_rd;
+		}
+
+		for (i = 0; i < info->node_info->num_qports; i++) {
+			struct msm_bus_bimc_qos_bw qbw;
+			/* If not in fixed mode, update bandwidth */
+			if (info->node_info->lim_bw != lim_bw) {
+				qbw.ws = info->node_info->qos_params.ws;
+				qbw.bw = lim_bw;
+				qbw.gp = info->node_info->qos_params.gp;
+				qbw.thmp = info->node_info->qos_params.thmp;
+				bimc_set_static_qos_bw(qos_base, qos_freq,
+					info->node_info->qport[i], &qbw);
+			}
+		}
+		info->node_info->lim_bw = lim_bw;
+	} else {
+		mode = info->node_info->qos_params.mode;
+		if (mode != BIMC_QOS_MODE_BYPASS) {
+			for (i = 0; i < NUM_HEALTH_LEVEL; i++) {
+				qmode.rl.qhealth[i].prio_level =
+					info->node_info->qos_params.prio_lvl;
+				qmode.rl.qhealth[i].areq_prio =
+					info->node_info->qos_params.prio_rd;
+			}
+		}
+	}
+
+	for (i = 0; i < info->node_info->num_qports; i++)
+		msm_bus_bimc_set_qos_prio(qos_base, info->node_info->qport[i],
+				mode, &qmode);
+	return 0;
+}
+
+static bool msm_bus_bimc_update_bw_reg(int mode)
+{
+	bool ret = false;
+
+	if ((mode == BIMC_QOS_MODE_LIMITER)
+		|| (mode == BIMC_QOS_MODE_REGULATOR))
+		ret = true;
+
+	return ret;
+}
+
+static int msm_bus_bimc_qos_init(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base,
+				uint32_t qos_off, uint32_t qos_delta,
+				uint32_t qos_freq)
+{
+	int i;
+	struct msm_bus_bimc_qos_mode qmode = {0};
+
+	if (ZERO_OR_NULL_PTR(info->node_info->qport)) {
+		MSM_BUS_DBG("No QoS Ports to init\n");
+		return 0;
+	}
+
+	switch (info->node_info->qos_params.mode) {
+		/* For now Fixed and regulator are handled the same way. */
+	case BIMC_QOS_MODE_FIXED:
+	case BIMC_QOS_MODE_REGULATOR:
+		for (i = 0; i < NUM_HEALTH_LEVEL; i++) {
+			qmode.rl.qhealth[i].prio_level =
+				info->node_info->qos_params.prio_lvl;
+			qmode.rl.qhealth[i].areq_prio =
+				info->node_info->qos_params.prio_rd;
+		}
+		break;
+	case BIMC_QOS_MODE_LIMITER:
+		qmode.rl.qhealth[0].limit_commands = 1;
+		qmode.rl.qhealth[1].limit_commands = 0;
+		qmode.rl.qhealth[2].limit_commands = 0;
+		qmode.rl.qhealth[3].limit_commands = 0;
+		for (i = 0; i < NUM_HEALTH_LEVEL; i++) {
+			qmode.rl.qhealth[i].prio_level =
+				info->node_info->qos_params.prio_lvl;
+			qmode.rl.qhealth[i].areq_prio =
+				info->node_info->qos_params.prio_rd;
+		}
+		break;
+	default:
+		break;
+	}
+
+
+	for (i = 0; i < info->node_info->num_qports; i++)
+		msm_bus_bimc_set_qos_prio(qos_base, info->node_info->qport[i],
+			info->node_info->qos_params.mode, &qmode);
+
+	return 0;
+}
+
+static int msm_bus_bimc_set_bw(struct msm_bus_node_device_type *dev,
+				void __iomem *qos_base, uint32_t qos_off,
+				uint32_t qos_delta, uint32_t qos_freq)
+{
+	struct msm_bus_bimc_qos_bw qbw;
+	struct msm_bus_bimc_qos_mode qmode = {0};
+	int i;
+	int64_t bw = 0;
+	int ret = 0;
+	struct msm_bus_node_info_type *info = dev->node_info;
+	int mode;
+
+	if (info && info->num_qports &&
+		((info->qos_params.mode == BIMC_QOS_MODE_LIMITER))) {
+		bw = msm_bus_div64(info->num_qports,
+				dev->node_bw[ACTIVE_CTX].sum_ab);
+
+		MSM_BUS_DBG("BIMC: Update mas_bw for ID: %d -> %llu\n",
+				info->id, bw);
+
+		if (!info->qport) {
+			MSM_BUS_DBG("No qos ports to update!\n");
+			goto exit_set_bw;
+		}
+
+		qbw.bw = bw + info->qos_params.bw_buffer;
+		trace_bus_bimc_config_limiter(info->id, bw);
+
+		/* Default to gp of 5us */
+		qbw.gp = (info->qos_params.gp ?
+				info->qos_params.gp : 5000);
+		/* Default to thmp of 50% */
+		qbw.thmp = (info->qos_params.thmp ?
+				info->qos_params.thmp : 50);
+		/*
+		 * If the BW vote is 0 then set the QoS mode to
+		 * Fixed/0/0.
+		 */
+		if (bw) {
+			qmode.rl.qhealth[0].limit_commands = 1;
+			qmode.rl.qhealth[1].limit_commands = 0;
+			qmode.rl.qhealth[2].limit_commands = 0;
+			qmode.rl.qhealth[3].limit_commands = 0;
+			mode = info->qos_params.mode;
+		} else {
+			mode =	BIMC_QOS_MODE_FIXED;
+		}
+
+		for (i = 0; i < info->num_qports; i++) {
+			msm_bus_bimc_set_qos_prio(qos_base,
+				info->qport[i], mode, &qmode);
+			if (bw)
+				bimc_set_static_qos_bw(qos_base, qos_freq,
+					info->qport[i], &qbw);
+		}
+	}
+exit_set_bw:
+	return ret;
+}
+
+int msm_bus_bimc_set_ops(struct msm_bus_node_device_type *bus_dev)
+{
+	if (!bus_dev)
+		return -ENODEV;
+	bus_dev->fabdev->noc_ops.qos_init = msm_bus_bimc_qos_init;
+	bus_dev->fabdev->noc_ops.set_bw = msm_bus_bimc_set_bw;
+	bus_dev->fabdev->noc_ops.limit_mport = msm_bus_bimc_limit_mport;
+	bus_dev->fabdev->noc_ops.update_bw_reg =
+					msm_bus_bimc_update_bw_reg;
+	return 0;
+}
+EXPORT_SYMBOL(msm_bus_bimc_set_ops);
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./msm_bus_bimc.h linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/msm_bus_bimc.h
--- linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./msm_bus_bimc.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/msm_bus_bimc.h	2019-01-22 16:16:26.659274986 +0100
@@ -0,0 +1,127 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_BIMC_H
+#define _ARCH_ARM_MACH_MSM_BUS_BIMC_H
+
+struct msm_bus_bimc_params {
+	uint32_t bus_id;
+	uint32_t addr_width;
+	uint32_t data_width;
+	uint32_t nmasters;
+	uint32_t nslaves;
+};
+
+struct msm_bus_bimc_commit {
+	struct msm_bus_node_hw_info *mas;
+	struct msm_bus_node_hw_info *slv;
+};
+
+struct msm_bus_bimc_info {
+	void __iomem *base;
+	uint32_t base_addr;
+	uint32_t qos_freq;
+	struct msm_bus_bimc_params params;
+	struct msm_bus_bimc_commit cdata[NUM_CTX];
+};
+
+struct msm_bus_bimc_node {
+	uint32_t conn_mask;
+	uint32_t data_width;
+	uint8_t slv_arb_mode;
+};
+
+enum msm_bus_bimc_arb_mode {
+	BIMC_ARB_MODE_RR = 0,
+	BIMC_ARB_MODE_PRIORITY_RR,
+	BIMC_ARB_MODE_TIERED_RR,
+};
+
+
+enum msm_bus_bimc_interleave {
+	BIMC_INTERLEAVE_NONE = 0,
+	BIMC_INTERLEAVE_ODD,
+	BIMC_INTERLEAVE_EVEN,
+};
+
+struct msm_bus_bimc_slave_seg {
+	bool enable;
+	uint64_t start_addr;
+	uint64_t seg_size;
+	uint8_t interleave;
+};
+
+enum msm_bus_bimc_qos_mode_type {
+	BIMC_QOS_MODE_FIXED = 0,
+	BIMC_QOS_MODE_LIMITER,
+	BIMC_QOS_MODE_BYPASS,
+	BIMC_QOS_MODE_REGULATOR,
+};
+
+struct msm_bus_bimc_qos_health {
+	bool limit_commands;
+	uint32_t areq_prio;
+	uint32_t prio_level;
+};
+
+struct msm_bus_bimc_mode_fixed {
+	uint32_t prio_level;
+	uint32_t areq_prio_rd;
+	uint32_t areq_prio_wr;
+};
+
+struct msm_bus_bimc_mode_rl {
+	uint8_t qhealthnum;
+	struct msm_bus_bimc_qos_health qhealth[4];
+};
+
+struct msm_bus_bimc_qos_mode {
+	uint8_t mode;
+	struct msm_bus_bimc_mode_fixed fixed;
+	struct msm_bus_bimc_mode_rl rl;
+};
+
+struct msm_bus_bimc_qos_bw {
+	uint64_t bw;	/* bw is in Bytes/sec */
+	uint32_t ws;	/* Window size in nano seconds*/
+	int64_t thh;	/* Threshold high, bytes per second */
+	int64_t thm;	/* Threshold medium, bytes per second */
+	int64_t thl;	/* Threshold low, bytes per second */
+	u32 gp;	/* Grant Period in micro seconds */
+	u32 thmp; /* Threshold medium in percentage */
+};
+
+struct msm_bus_bimc_clk_gate {
+	bool core_clk_gate_en;
+	bool arb_clk_gate_en;	/* For arbiter */
+	bool port_clk_gate_en;	/* For regs on BIMC core clock */
+};
+
+void msm_bus_bimc_set_slave_seg(struct msm_bus_bimc_info *binfo,
+	uint32_t slv_index, uint32_t seg_index,
+	struct msm_bus_bimc_slave_seg *bsseg);
+void msm_bus_bimc_set_slave_clk_gate(struct msm_bus_bimc_info *binfo,
+	uint32_t slv_index, struct msm_bus_bimc_clk_gate *bgate);
+void msm_bus_bimc_set_mas_clk_gate(struct msm_bus_bimc_info *binfo,
+	uint32_t mas_index, struct msm_bus_bimc_clk_gate *bgate);
+void msm_bus_bimc_arb_en(struct msm_bus_bimc_info *binfo,
+	uint32_t slv_index, bool en);
+void msm_bus_bimc_get_params(struct msm_bus_bimc_info *binfo,
+	struct msm_bus_bimc_params *params);
+void msm_bus_bimc_get_mas_params(struct msm_bus_bimc_info *binfo,
+	uint32_t mas_index, struct msm_bus_bimc_node *mparams);
+void msm_bus_bimc_get_slv_params(struct msm_bus_bimc_info *binfo,
+	uint32_t slv_index, struct msm_bus_bimc_node *sparams);
+bool msm_bus_bimc_get_arb_en(struct msm_bus_bimc_info *binfo,
+	uint32_t slv_index);
+
+#endif /*_ARCH_ARM_MACH_MSM_BUS_BIMC_H*/
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./msm_bus_client_api.c linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/msm_bus_client_api.c
--- linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./msm_bus_client_api.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/msm_bus_client_api.c	2019-01-22 16:16:26.659274986 +0100
@@ -0,0 +1,184 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/radix-tree.h>
+#include <linux/clk.h>
+#include <linux/msm-bus.h>
+#include "msm_bus_core.h"
+
+struct msm_bus_arb_ops arb_ops;
+
+/**
+ * msm_bus_scale_register_client() - Register the clients with the msm bus
+ * driver
+ * @pdata: Platform data of the client, containing src, dest, ab, ib.
+ * Return non-zero value in case of success, 0 in case of failure.
+ *
+ * Client data contains the vectors specifying arbitrated bandwidth (ab)
+ * and instantaneous bandwidth (ib) requested between a particular
+ * src and dest.
+ */
+uint32_t msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata)
+{
+	if (arb_ops.register_client)
+		return arb_ops.register_client(pdata);
+	else {
+		pr_err("%s: Bus driver not ready.",
+				__func__);
+		return 0;
+	}
+}
+EXPORT_SYMBOL(msm_bus_scale_register_client);
+
+/**
+ * msm_bus_scale_client_update_request() - Update the request for bandwidth
+ * from a particular client
+ *
+ * cl: Handle to the client
+ * index: Index into the vector, to which the bw and clock values need to be
+ * updated
+ */
+int msm_bus_scale_client_update_request(uint32_t cl, unsigned int index)
+{
+	if (arb_ops.update_request)
+		return arb_ops.update_request(cl, index);
+	else {
+		pr_err("%s: Bus driver not ready.",
+				__func__);
+		return -EPROBE_DEFER;
+	}
+}
+EXPORT_SYMBOL(msm_bus_scale_client_update_request);
+
+/**
+ * msm_bus_scale_client_update_context() - Update the context for a client
+ * cl: Handle to the client
+ * active_only: Bool to indicate dual context or active-only context.
+ * ctx_idx: Voting index to be used when switching contexts.
+ */
+int msm_bus_scale_client_update_context(uint32_t cl, bool active_only,
+							unsigned int ctx_idx)
+{
+	if (arb_ops.update_context)
+		return arb_ops.update_context(cl, active_only, ctx_idx);
+
+	return -EPROBE_DEFER;
+}
+EXPORT_SYMBOL(msm_bus_scale_client_update_context);
+
+/**
+ * msm_bus_scale_unregister_client() - Unregister the client from the bus driver
+ * @cl: Handle to the client
+ */
+void msm_bus_scale_unregister_client(uint32_t cl)
+{
+	if (arb_ops.unregister_client)
+		arb_ops.unregister_client(cl);
+	else {
+		pr_err("%s: Bus driver not ready.",
+				__func__);
+	}
+}
+EXPORT_SYMBOL(msm_bus_scale_unregister_client);
+
+/**
+ * msm_bus_scale_register() - Register the clients with the msm bus
+ * driver
+ *
+ * @mas: Master ID
+ * @slv: Slave ID
+ * @name: descriptive name for this client
+ * @active_only: Whether or not this bandwidth vote should only be
+ *               effective while the application processor is active.
+ *
+ * Client data contains the vectors specifying arbitrated bandwidth (ab)
+ * and instantaneous bandwidth (ib) requested between a particular
+ * src and dest.
+ */
+struct msm_bus_client_handle*
+msm_bus_scale_register(uint32_t mas, uint32_t slv, char *name, bool active_only)
+{
+	if (arb_ops.register_cl)
+		return arb_ops.register_cl(mas, slv, name, active_only);
+	else {
+		pr_err("%s: Bus driver not ready.",
+				__func__);
+		return ERR_PTR(-EPROBE_DEFER);
+
+	}
+}
+EXPORT_SYMBOL(msm_bus_scale_register);
+
+/**
+ * msm_bus_scale_client_update_bw() - Update the request for bandwidth
+ * from a particular client
+ *
+ * @cl: Handle to the client
+ * @ab: Arbitrated bandwidth being requested
+ * @ib: Instantaneous bandwidth being requested
+ */
+int msm_bus_scale_update_bw(struct msm_bus_client_handle *cl, u64 ab, u64 ib)
+{
+	if (arb_ops.update_request)
+		return arb_ops.update_bw(cl, ab, ib);
+	else {
+		pr_err("%s: Bus driver not ready.", __func__);
+		return -EPROBE_DEFER;
+	}
+}
+EXPORT_SYMBOL(msm_bus_scale_update_bw);
+
+/**
+ * msm_bus_scale_change_context() - Update the context for a particular client
+ * cl: Handle to the client
+ * act_ab: The average bandwidth(AB) in Bytes/s to be used in active context.
+ * act_ib: The instantaneous bandwidth(IB) in Bytes/s to be used in active
+ *         context.
+ * slp_ib: The average bandwidth(AB) in Bytes/s to be used in dual context.
+ * slp_ab: The instantaneous bandwidth(IB) in Bytes/s to be used in dual
+ *         context.
+ */
+int
+msm_bus_scale_update_bw_context(struct msm_bus_client_handle *cl, u64 act_ab,
+				u64 act_ib, u64 slp_ib, u64 slp_ab)
+{
+	if (arb_ops.update_context)
+		return arb_ops.update_bw_context(cl, act_ab, act_ib,
+							slp_ab, slp_ib);
+
+	return -EPROBE_DEFER;
+}
+EXPORT_SYMBOL(msm_bus_scale_update_bw_context);
+
+/**
+ * msm_bus_scale_unregister() - Update the request for bandwidth
+ * from a particular client
+ *
+ * cl: Handle to the client
+ */
+void msm_bus_scale_unregister(struct msm_bus_client_handle *cl)
+{
+	if (arb_ops.unregister)
+		arb_ops.unregister(cl);
+	else
+		pr_err("%s: Bus driver not ready.",
+				__func__);
+}
+EXPORT_SYMBOL(msm_bus_scale_unregister);
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./msm_bus_core.c linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/msm_bus_core.c
--- linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./msm_bus_core.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/msm_bus_core.c	2019-01-22 16:16:26.659274986 +0100
@@ -0,0 +1,125 @@
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/radix-tree.h>
+#include <linux/clk.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+#include "msm_bus_core.h"
+
+static atomic_t num_fab = ATOMIC_INIT(0);
+
+int msm_bus_get_num_fab(void)
+{
+	return atomic_read(&num_fab);
+}
+
+int msm_bus_device_match(struct device *dev, void *id)
+{
+	struct msm_bus_fabric_device *fabdev = to_msm_bus_fabric_device(dev);
+
+	if (!fabdev) {
+		MSM_BUS_WARN("Fabric %p returning 0\n", fabdev);
+		return 0;
+	}
+	return fabdev->id == *(int *)id;
+}
+
+static void msm_bus_release(struct device *device)
+{
+}
+
+struct bus_type msm_bus_type = {
+	.name      = "msm-bus-type",
+};
+EXPORT_SYMBOL(msm_bus_type);
+
+/**
+ * msm_bus_get_fabric_device() - This function is used to search for
+ * the fabric device on the bus
+ * @fabid: Fabric id
+ * Function returns: Pointer to the fabric device
+ */
+struct msm_bus_fabric_device *msm_bus_get_fabric_device(int fabid)
+{
+	struct device *dev;
+	struct msm_bus_fabric_device *fabric;
+	dev = bus_find_device(&msm_bus_type, NULL, (void *)&fabid,
+		msm_bus_device_match);
+	if (!dev)
+		return NULL;
+	fabric = to_msm_bus_fabric_device(dev);
+	return fabric;
+}
+
+/**
+ * msm_bus_fabric_device_register() - Registers a fabric on msm bus
+ * @fabdev: Fabric device to be registered
+ */
+int msm_bus_fabric_device_register(struct msm_bus_fabric_device *fabdev)
+{
+	int ret = 0;
+	fabdev->dev.bus = &msm_bus_type;
+	fabdev->dev.release = msm_bus_release;
+	ret = dev_set_name(&fabdev->dev, fabdev->name);
+	if (ret) {
+		MSM_BUS_ERR("error setting dev name\n");
+		goto err;
+	}
+
+	ret = device_register(&fabdev->dev);
+	if (ret < 0) {
+		MSM_BUS_ERR("error registering device%d %s\n",
+				ret, fabdev->name);
+		goto err;
+	}
+	atomic_inc(&num_fab);
+err:
+	return ret;
+}
+
+/**
+ * msm_bus_fabric_device_unregister() - Unregisters the fabric
+ * devices from the msm bus
+ */
+void msm_bus_fabric_device_unregister(struct msm_bus_fabric_device *fabdev)
+{
+	device_unregister(&fabdev->dev);
+	atomic_dec(&num_fab);
+}
+
+static void __exit msm_bus_exit(void)
+{
+	bus_unregister(&msm_bus_type);
+}
+
+static int __init msm_bus_init(void)
+{
+	int retval = 0;
+	retval = bus_register(&msm_bus_type);
+	if (retval)
+		MSM_BUS_ERR("bus_register error! %d\n",
+			retval);
+	return retval;
+}
+postcore_initcall(msm_bus_init);
+module_exit(msm_bus_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.2");
+MODULE_ALIAS("platform:msm_bus");
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./msm_bus_core.h linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/msm_bus_core.h
--- linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./msm_bus_core.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/msm_bus_core.h	2019-01-22 16:16:26.659274986 +0100
@@ -0,0 +1,417 @@
+/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_CORE_H
+#define _ARCH_ARM_MACH_MSM_BUS_CORE_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/radix-tree.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+
+#define MSM_BUS_DBG(msg, ...) \
+	pr_debug(msg, ## __VA_ARGS__)
+#define MSM_BUS_ERR(msg, ...) \
+	pr_err(msg, ## __VA_ARGS__)
+#define MSM_BUS_WARN(msg, ...) \
+	pr_warn(msg, ## __VA_ARGS__)
+#define MSM_FAB_ERR(msg, ...) \
+	dev_err(&fabric->fabdev.dev, msg, ## __VA_ARGS__)
+
+#define IS_MASTER_VALID(mas) \
+	(((mas >= MSM_BUS_MASTER_FIRST) && (mas <= MSM_BUS_MASTER_LAST)) \
+	 ? 1 : 0)
+#define IS_SLAVE_VALID(slv) \
+	(((slv >= MSM_BUS_SLAVE_FIRST) && (slv <= MSM_BUS_SLAVE_LAST)) ? 1 : 0)
+
+#define INTERLEAVED_BW(fab_pdata, bw, ports) \
+	((fab_pdata->il_flag) ? ((bw < 0) \
+	? -msm_bus_div64((ports), (-bw)) : msm_bus_div64((ports), (bw))) : (bw))
+#define INTERLEAVED_VAL(fab_pdata, n) \
+	((fab_pdata->il_flag) ? (n) : 1)
+#define KBTOB(a) (a * 1000ULL)
+#define MAX_REG_NAME	(50)
+
+enum msm_bus_dbg_op_type {
+	MSM_BUS_DBG_UNREGISTER = -2,
+	MSM_BUS_DBG_REGISTER,
+	MSM_BUS_DBG_OP = 1,
+};
+
+enum msm_bus_hw_sel {
+	MSM_BUS_RPM = 0,
+	MSM_BUS_NOC,
+	MSM_BUS_BIMC,
+};
+
+struct msm_bus_arb_ops {
+	uint32_t (*register_client)(struct msm_bus_scale_pdata *pdata);
+	int (*update_request)(uint32_t cl, unsigned int index);
+	int (*update_context)(uint32_t cl, bool active_only,
+						unsigned int ctx_idx);
+	void (*unregister_client)(uint32_t cl);
+	struct msm_bus_client_handle*
+		(*register_cl)(uint32_t mas, uint32_t slv, char *name,
+						bool active_only);
+	int (*update_bw)(struct msm_bus_client_handle *cl, u64 ab, u64 ib);
+	void (*unregister)(struct msm_bus_client_handle *cl);
+	int (*update_bw_context)(struct msm_bus_client_handle *cl, u64 act_ab,
+				u64 act_ib, u64 slp_ib, u64 slp_ab);
+};
+
+enum {
+	SLAVE_NODE,
+	MASTER_NODE,
+	CLK_NODE,
+	NR_LIM_NODE,
+};
+
+
+extern struct bus_type msm_bus_type;
+extern struct msm_bus_arb_ops arb_ops;
+extern void msm_bus_arb_setops_legacy(struct msm_bus_arb_ops *arb_ops);
+
+struct msm_bus_node_info {
+	unsigned int id;
+	unsigned int priv_id;
+	unsigned int mas_hw_id;
+	unsigned int slv_hw_id;
+	int gateway;
+	int *masterp;
+	int *qport;
+	int num_mports;
+	int *slavep;
+	int num_sports;
+	int *tier;
+	int num_tiers;
+	int ahb;
+	int hw_sel;
+	const char *slaveclk[NUM_CTX];
+	const char *memclk[NUM_CTX];
+	const char *iface_clk_node;
+	unsigned int buswidth;
+	unsigned int ws;
+	unsigned int mode;
+	unsigned int perm_mode;
+	unsigned int prio_lvl;
+	unsigned int prio_rd;
+	unsigned int prio_wr;
+	unsigned int prio1;
+	unsigned int prio0;
+	unsigned int num_thresh;
+	u64 *th;
+	u64 cur_lim_bw;
+	unsigned int mode_thresh;
+	bool dual_conf;
+	u64 *bimc_bw;
+	bool nr_lim;
+	u32 ff;
+	bool rt_mas;
+	u32 bimc_gp;
+	u32 bimc_thmp;
+	u64 floor_bw;
+	const char *name;
+};
+
+struct path_node {
+	uint64_t clk[NUM_CTX];
+	uint64_t bw[NUM_CTX];
+	uint64_t *sel_clk;
+	uint64_t *sel_bw;
+	int next;
+};
+
+struct msm_bus_link_info {
+	uint64_t clk[NUM_CTX];
+	uint64_t *sel_clk;
+	uint64_t memclk;
+	int64_t bw[NUM_CTX];
+	int64_t *sel_bw;
+	int *tier;
+	int num_tiers;
+};
+
+struct nodeclk {
+	struct clk *clk;
+	struct regulator *reg;
+	uint64_t rate;
+	bool dirty;
+	bool enable_only_clk;
+	bool setrate_only_clk;
+	bool enable;
+	char reg_name[MAX_REG_NAME];
+};
+
+struct msm_bus_inode_info {
+	struct msm_bus_node_info *node_info;
+	uint64_t max_bw;
+	uint64_t max_clk;
+	uint64_t cur_lim_bw;
+	uint64_t cur_prg_bw;
+	struct msm_bus_link_info link_info;
+	int num_pnodes;
+	struct path_node *pnode;
+	int commit_index;
+	struct nodeclk nodeclk[NUM_CTX];
+	struct nodeclk memclk[NUM_CTX];
+	struct nodeclk iface_clk;
+	void *hw_data;
+};
+
+struct msm_bus_node_hw_info {
+	bool dirty;
+	unsigned int hw_id;
+	uint64_t bw;
+};
+
+struct msm_bus_hw_algorithm {
+	int (*allocate_commit_data)(struct msm_bus_fabric_registration
+		*fab_pdata, void **cdata, int ctx);
+	void *(*allocate_hw_data)(struct platform_device *pdev,
+		struct msm_bus_fabric_registration *fab_pdata);
+	void (*node_init)(void *hw_data, struct msm_bus_inode_info *info);
+	void (*free_commit_data)(void *cdata);
+	void (*update_bw)(struct msm_bus_inode_info *hop,
+		struct msm_bus_inode_info *info,
+		struct msm_bus_fabric_registration *fab_pdata,
+		void *sel_cdata, int *master_tiers,
+		int64_t add_bw);
+	void (*fill_cdata_buffer)(int *curr, char *buf, const int max_size,
+		void *cdata, int nmasters, int nslaves, int ntslaves);
+	int (*commit)(struct msm_bus_fabric_registration
+		*fab_pdata, void *hw_data, void **cdata);
+	int (*port_unhalt)(uint32_t haltid, uint8_t mport);
+	int (*port_halt)(uint32_t haltid, uint8_t mport);
+	void (*config_master)(struct msm_bus_fabric_registration *fab_pdata,
+		struct msm_bus_inode_info *info,
+		uint64_t req_clk, uint64_t req_bw);
+	void (*config_limiter)(struct msm_bus_fabric_registration *fab_pdata,
+		struct msm_bus_inode_info *info);
+	bool (*update_bw_reg)(int mode);
+};
+
+struct msm_bus_fabric_device {
+	int id;
+	const char *name;
+	struct device dev;
+	const struct msm_bus_fab_algorithm *algo;
+	const struct msm_bus_board_algorithm *board_algo;
+	struct msm_bus_hw_algorithm hw_algo;
+	int visited;
+	int num_nr_lim;
+	u64 nr_lim_thresh;
+	u32 eff_fact;
+};
+#define to_msm_bus_fabric_device(d) container_of(d, \
+		struct msm_bus_fabric_device, d)
+
+struct msm_bus_fabric {
+	struct msm_bus_fabric_device fabdev;
+	int ahb;
+	void *cdata[NUM_CTX];
+	bool arb_dirty;
+	bool clk_dirty;
+	struct radix_tree_root fab_tree;
+	int num_nodes;
+	struct list_head gateways;
+	struct msm_bus_inode_info info;
+	struct msm_bus_fabric_registration *pdata;
+	void *hw_data;
+};
+#define to_msm_bus_fabric(d) container_of(d, \
+	struct msm_bus_fabric, d)
+
+
+struct msm_bus_fab_algorithm {
+	int (*update_clks)(struct msm_bus_fabric_device *fabdev,
+		struct msm_bus_inode_info *pme, int index,
+		uint64_t curr_clk, uint64_t req_clk,
+		uint64_t bwsum, int flag, int ctx,
+		unsigned int cl_active_flag);
+	int (*port_halt)(struct msm_bus_fabric_device *fabdev, int portid);
+	int (*port_unhalt)(struct msm_bus_fabric_device *fabdev, int portid);
+	int (*commit)(struct msm_bus_fabric_device *fabdev);
+	struct msm_bus_inode_info *(*find_node)(struct msm_bus_fabric_device
+		*fabdev, int id);
+	struct msm_bus_inode_info *(*find_gw_node)(struct msm_bus_fabric_device
+		*fabdev, int id);
+	struct list_head *(*get_gw_list)(struct msm_bus_fabric_device *fabdev);
+	void (*update_bw)(struct msm_bus_fabric_device *fabdev, struct
+		msm_bus_inode_info * hop, struct msm_bus_inode_info *info,
+		int64_t add_bw, int *master_tiers, int ctx);
+	void (*config_master)(struct msm_bus_fabric_device *fabdev,
+		struct msm_bus_inode_info *info, uint64_t req_clk,
+		uint64_t req_bw);
+	void (*config_limiter)(struct msm_bus_fabric_device *fabdev,
+		struct msm_bus_inode_info *info);
+};
+
+struct msm_bus_board_algorithm {
+	int board_nfab;
+	void (*assign_iids)(struct msm_bus_fabric_registration *fabreg,
+		int fabid);
+	int (*get_iid)(int id);
+};
+
+/**
+ * Used to store the list of fabrics and other info to be
+ * maintained outside the fabric structure.
+ * Used while calculating path, and to find fabric ptrs
+ */
+struct msm_bus_fabnodeinfo {
+	struct list_head list;
+	struct msm_bus_inode_info *info;
+};
+
+struct msm_bus_client {
+	int id;
+	struct msm_bus_scale_pdata *pdata;
+	int *src_pnode;
+	int curr;
+	struct device **src_devs;
+};
+
+uint64_t msm_bus_div64(unsigned int width, uint64_t bw);
+int msm_bus_fabric_device_register(struct msm_bus_fabric_device *fabric);
+void msm_bus_fabric_device_unregister(struct msm_bus_fabric_device *fabric);
+struct msm_bus_fabric_device *msm_bus_get_fabric_device(int fabid);
+int msm_bus_get_num_fab(void);
+
+
+int msm_bus_hw_fab_init(struct msm_bus_fabric_registration *pdata,
+	struct msm_bus_hw_algorithm *hw_algo);
+void msm_bus_board_init(struct msm_bus_fabric_registration *pdata);
+#if defined(CONFIG_MSM_RPM_SMD)
+int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata,
+	struct msm_bus_hw_algorithm *hw_algo);
+int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration
+	*fab_pdata, void *hw_data, void **cdata);
+void msm_bus_rpm_fill_cdata_buffer(int *curr, char *buf, const int max_size,
+	void *cdata, int nmasters, int nslaves, int ntslaves);
+#else
+static inline int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata,
+	struct msm_bus_hw_algorithm *hw_algo)
+{
+	return 0;
+}
+static inline int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration
+	*fab_pdata, void *hw_data, void **cdata)
+{
+	return 0;
+}
+static inline void msm_bus_rpm_fill_cdata_buffer(int *curr, char *buf,
+	const int max_size, void *cdata, int nmasters, int nslaves,
+	int ntslaves)
+{
+}
+#endif
+
+int msm_bus_noc_hw_init(struct msm_bus_fabric_registration *pdata,
+	struct msm_bus_hw_algorithm *hw_algo);
+int msm_bus_bimc_hw_init(struct msm_bus_fabric_registration *pdata,
+	struct msm_bus_hw_algorithm *hw_algo);
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_QCOM_BUS_SCALING)
+void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata, int index,
+	uint32_t cl);
+void msm_bus_dbg_commit_data(const char *fabname, void *cdata,
+	int nmasters, int nslaves, int ntslaves, int op);
+int msm_bus_dbg_add_client(const struct msm_bus_client_handle *pdata);
+int msm_bus_dbg_rec_transaction(const struct msm_bus_client_handle *pdata,
+						u64 ab, u64 ib);
+void msm_bus_dbg_remove_client(const struct msm_bus_client_handle *pdata);
+
+#else
+static inline void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata,
+	int index, uint32_t cl)
+{
+}
+static inline void msm_bus_dbg_commit_data(const char *fabname,
+	void *cdata, int nmasters, int nslaves, int ntslaves,
+	int op)
+{
+}
+static inline void msm_bus_dbg_remove_client
+		(const struct msm_bus_client_handle *pdata)
+{
+}
+
+static inline int
+msm_bus_dbg_rec_transaction(const struct msm_bus_client_handle *pdata,
+						u64 ab, u64 ib)
+{
+	return 0;
+}
+
+static inline int
+msm_bus_dbg_add_client(const struct msm_bus_client_handle *pdata)
+{
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_CORESIGHT
+int msmbus_coresight_init(struct platform_device *pdev);
+void msmbus_coresight_remove(struct platform_device *pdev);
+int msmbus_coresight_init_adhoc(struct platform_device *pdev,
+		struct device_node *of_node);
+void msmbus_coresight_remove_adhoc(struct platform_device *pdev);
+#else
+static inline int msmbus_coresight_init(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static inline void msmbus_coresight_remove(struct platform_device *pdev)
+{
+}
+
+static inline int msmbus_coresight_init_adhoc(struct platform_device *pdev,
+		struct device_node *of_node)
+{
+	return 0;
+}
+
+static inline void msmbus_coresight_remove_adhoc(struct platform_device *pdev)
+{
+}
+#endif
+
+
+#ifdef CONFIG_OF
+void msm_bus_of_get_nfab(struct platform_device *pdev,
+		struct msm_bus_fabric_registration *pdata);
+struct msm_bus_fabric_registration
+	*msm_bus_of_get_fab_data(struct platform_device *pdev);
+static inline void msm_bus_board_set_nfab(struct msm_bus_fabric_registration
+		*pdata,	int nfab)
+{
+}
+#else
+void msm_bus_board_set_nfab(struct msm_bus_fabric_registration *pdata,
+	int nfab);
+static inline void msm_bus_of_get_nfab(struct platform_device *pdev,
+		struct msm_bus_fabric_registration *pdata)
+{
+	return;
+}
+
+static inline struct msm_bus_fabric_registration
+	*msm_bus_of_get_fab_data(struct platform_device *pdev)
+{
+	return NULL;
+}
+#endif
+
+#endif /*_ARCH_ARM_MACH_MSM_BUS_CORE_H*/
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./msm_bus_dbg.c linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/msm_bus_dbg.c
--- linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./msm_bus_dbg.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/msm_bus_dbg.c	2019-01-22 16:16:26.659274986 +0100
@@ -0,0 +1,998 @@
+/* Copyright (c) 2010-2012, 2014-2015, 2017 The Linux Foundation. All rights
+ * reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/hrtimer.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+#include <linux/msm_bus_rules.h>
+#include "msm_bus_core.h"
+#include "msm_bus_adhoc.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/trace_msm_bus.h>
+
+#define MAX_BUFF_SIZE 4096
+#define FILL_LIMIT 128
+
+static struct dentry *clients;
+static struct dentry *dir;
+static DEFINE_MUTEX(msm_bus_dbg_fablist_lock);
+static DEFINE_RT_MUTEX(msm_bus_dbg_cllist_lock);
+struct msm_bus_dbg_state {
+	uint32_t cl;
+	uint8_t enable;
+	uint8_t current_index;
+} clstate;
+
+struct msm_bus_cldata {
+	const struct msm_bus_scale_pdata *pdata;
+	const struct msm_bus_client_handle *handle;
+	int index;
+	uint32_t clid;
+	int size;
+	struct dentry *file;
+	struct list_head list;
+	char buffer[MAX_BUFF_SIZE];
+};
+
+struct msm_bus_fab_list {
+	const char *name;
+	int size;
+	struct dentry *file;
+	struct list_head list;
+	char buffer[MAX_BUFF_SIZE];
+};
+
+static char *rules_buf;
+
+LIST_HEAD(fabdata_list);
+LIST_HEAD(cl_list);
+
+/**
+ * The following structures and funtions are used for
+ * the test-client which can be created at run-time.
+ */
+
+static struct msm_bus_vectors init_vectors[1];
+static struct msm_bus_vectors current_vectors[1];
+static struct msm_bus_vectors requested_vectors[1];
+
+static struct msm_bus_paths shell_client_usecases[] = {
+	{
+		.num_paths = ARRAY_SIZE(init_vectors),
+		.vectors = init_vectors,
+	},
+	{
+		.num_paths = ARRAY_SIZE(current_vectors),
+		.vectors = current_vectors,
+	},
+	{
+		.num_paths = ARRAY_SIZE(requested_vectors),
+		.vectors = requested_vectors,
+	},
+};
+
+static struct msm_bus_scale_pdata shell_client = {
+	.usecase = shell_client_usecases,
+	.num_usecases = ARRAY_SIZE(shell_client_usecases),
+	.name = "test-client",
+};
+
+static void msm_bus_dbg_init_vectors(void)
+{
+	init_vectors[0].src = -1;
+	init_vectors[0].dst = -1;
+	init_vectors[0].ab = 0;
+	init_vectors[0].ib = 0;
+	current_vectors[0].src = -1;
+	current_vectors[0].dst = -1;
+	current_vectors[0].ab = 0;
+	current_vectors[0].ib = 0;
+	requested_vectors[0].src = -1;
+	requested_vectors[0].dst = -1;
+	requested_vectors[0].ab = 0;
+	requested_vectors[0].ib = 0;
+	clstate.enable = 0;
+	clstate.current_index = 0;
+}
+
+static int msm_bus_dbg_update_cl_request(uint32_t cl)
+{
+	int ret = 0;
+
+	if (clstate.current_index < 2)
+		clstate.current_index = 2;
+	else {
+		clstate.current_index = 1;
+		current_vectors[0].ab = requested_vectors[0].ab;
+		current_vectors[0].ib = requested_vectors[0].ib;
+	}
+
+	if (clstate.enable) {
+		MSM_BUS_DBG("Updating request for shell client, index: %d\n",
+			clstate.current_index);
+		ret = msm_bus_scale_client_update_request(clstate.cl,
+			clstate.current_index);
+	} else
+		MSM_BUS_DBG("Enable bit not set. Skipping update request\n");
+
+	return ret;
+}
+
+static void msm_bus_dbg_unregister_client(uint32_t cl)
+{
+	MSM_BUS_DBG("Unregistering shell client\n");
+	msm_bus_scale_unregister_client(clstate.cl);
+	clstate.cl = 0;
+}
+
+static uint32_t msm_bus_dbg_register_client(void)
+{
+	int ret = 0;
+
+	if (init_vectors[0].src != requested_vectors[0].src) {
+		MSM_BUS_DBG("Shell client master changed. Unregistering\n");
+		msm_bus_dbg_unregister_client(clstate.cl);
+	}
+	if (init_vectors[0].dst != requested_vectors[0].dst) {
+		MSM_BUS_DBG("Shell client slave changed. Unregistering\n");
+		msm_bus_dbg_unregister_client(clstate.cl);
+	}
+
+	current_vectors[0].src = init_vectors[0].src;
+	requested_vectors[0].src = init_vectors[0].src;
+	current_vectors[0].dst = init_vectors[0].dst;
+	requested_vectors[0].dst = init_vectors[0].dst;
+
+	if (!clstate.enable) {
+		MSM_BUS_DBG("Enable bit not set, skipping registration: cl "
+			"%d\n",	clstate.cl);
+		return 0;
+	}
+
+	if (clstate.cl) {
+		MSM_BUS_DBG("Client  registered, skipping registration\n");
+		return clstate.cl;
+	}
+
+	MSM_BUS_DBG("Registering shell client\n");
+	ret = msm_bus_scale_register_client(&shell_client);
+	return ret;
+}
+
+static int msm_bus_dbg_mas_get(void  *data, u64 *val)
+{
+	*val = init_vectors[0].src;
+	MSM_BUS_DBG("Get master: %llu\n", *val);
+	return 0;
+}
+
+static int msm_bus_dbg_mas_set(void  *data, u64 val)
+{
+	init_vectors[0].src = val;
+	MSM_BUS_DBG("Set master: %llu\n", val);
+	clstate.cl = msm_bus_dbg_register_client();
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(shell_client_mas_fops, msm_bus_dbg_mas_get,
+	msm_bus_dbg_mas_set, "%llu\n");
+
+static int msm_bus_dbg_slv_get(void  *data, u64 *val)
+{
+	*val = init_vectors[0].dst;
+	MSM_BUS_DBG("Get slave: %llu\n", *val);
+	return 0;
+}
+
+static int msm_bus_dbg_slv_set(void  *data, u64 val)
+{
+	init_vectors[0].dst = val;
+	MSM_BUS_DBG("Set slave: %llu\n", val);
+	clstate.cl = msm_bus_dbg_register_client();
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(shell_client_slv_fops, msm_bus_dbg_slv_get,
+	msm_bus_dbg_slv_set, "%llu\n");
+
+static int msm_bus_dbg_ab_get(void  *data, u64 *val)
+{
+	*val = requested_vectors[0].ab;
+	MSM_BUS_DBG("Get ab: %llu\n", *val);
+	return 0;
+}
+
+static int msm_bus_dbg_ab_set(void  *data, u64 val)
+{
+	requested_vectors[0].ab = val;
+	MSM_BUS_DBG("Set ab: %llu\n", val);
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(shell_client_ab_fops, msm_bus_dbg_ab_get,
+	msm_bus_dbg_ab_set, "%llu\n");
+
+static int msm_bus_dbg_ib_get(void  *data, u64 *val)
+{
+	*val = requested_vectors[0].ib;
+	MSM_BUS_DBG("Get ib: %llu\n", *val);
+	return 0;
+}
+
+static int msm_bus_dbg_ib_set(void  *data, u64 val)
+{
+	requested_vectors[0].ib = val;
+	MSM_BUS_DBG("Set ib: %llu\n", val);
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(shell_client_ib_fops, msm_bus_dbg_ib_get,
+	msm_bus_dbg_ib_set, "%llu\n");
+
+static int msm_bus_dbg_en_get(void  *data, u64 *val)
+{
+	*val = clstate.enable;
+	MSM_BUS_DBG("Get enable: %llu\n", *val);
+	return 0;
+}
+
+static int msm_bus_dbg_en_set(void  *data, u64 val)
+{
+	int ret = 0;
+
+	clstate.enable = val;
+	if (clstate.enable) {
+		if (!clstate.cl) {
+			MSM_BUS_DBG("client: %u\n", clstate.cl);
+			clstate.cl = msm_bus_dbg_register_client();
+			if (clstate.cl)
+				ret = msm_bus_dbg_update_cl_request(clstate.cl);
+		} else {
+			MSM_BUS_DBG("update request for cl: %u\n", clstate.cl);
+			ret = msm_bus_dbg_update_cl_request(clstate.cl);
+		}
+	}
+
+	MSM_BUS_DBG("Set enable: %llu\n", val);
+	return ret;
+}
+DEFINE_SIMPLE_ATTRIBUTE(shell_client_en_fops, msm_bus_dbg_en_get,
+	msm_bus_dbg_en_set, "%llu\n");
+
+/**
+ * The following funtions are used for viewing the client data
+ * and changing the client request at run-time
+ */
+
+static ssize_t client_data_read(struct file *file, char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	int bsize = 0;
+	uint32_t cl = (uint32_t)(uintptr_t)file->private_data;
+	struct msm_bus_cldata *cldata = NULL;
+	const struct msm_bus_client_handle *handle = file->private_data;
+	int found = 0;
+	ssize_t ret;
+
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_for_each_entry(cldata, &cl_list, list) {
+		if ((cldata->clid == cl) ||
+			(cldata->handle && (cldata->handle == handle))) {
+			found = 1;
+			break;
+		}
+	}
+
+	if (!found) {
+		rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+		return 0;
+	}
+
+	bsize = cldata->size;
+	ret = simple_read_from_buffer(buf, count, ppos,
+		cldata->buffer, bsize);
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+
+	return ret;
+}
+
+static int client_data_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static const struct file_operations client_data_fops = {
+	.open		= client_data_open,
+	.read		= client_data_read,
+};
+
+struct dentry *msm_bus_dbg_create(const char *name, mode_t mode,
+	struct dentry *dent, uint32_t clid)
+{
+	if (dent == NULL) {
+		MSM_BUS_DBG("debugfs not ready yet\n");
+		return NULL;
+	}
+	return debugfs_create_file(name, mode, dent, (void *)(uintptr_t)clid,
+		&client_data_fops);
+}
+
+int msm_bus_dbg_add_client(const struct msm_bus_client_handle *pdata)
+
+{
+	struct msm_bus_cldata *cldata;
+
+	cldata = kzalloc(sizeof(struct msm_bus_cldata), GFP_KERNEL);
+	if (!cldata) {
+		MSM_BUS_DBG("Failed to allocate memory for client data\n");
+		return -ENOMEM;
+	}
+	cldata->handle = pdata;
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_add_tail(&cldata->list, &cl_list);
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+	return 0;
+}
+
+int msm_bus_dbg_rec_transaction(const struct msm_bus_client_handle *pdata,
+						u64 ab, u64 ib)
+{
+	struct msm_bus_cldata *cldata;
+	int i;
+	struct timespec ts;
+	bool found = false;
+	char *buf = NULL;
+
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_for_each_entry(cldata, &cl_list, list) {
+		if (cldata->handle == pdata) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found) {
+		rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+		return -ENOENT;
+	}
+
+	if (cldata->file == NULL) {
+		if (pdata->name == NULL) {
+			MSM_BUS_DBG("Client doesn't have a name\n");
+			rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+			return -EINVAL;
+		}
+		cldata->file = debugfs_create_file(pdata->name, S_IRUGO,
+				clients, (void *)pdata, &client_data_fops);
+	}
+
+	if (cldata->size < (MAX_BUFF_SIZE - FILL_LIMIT))
+		i = cldata->size;
+	else {
+		i = 0;
+		cldata->size = 0;
+	}
+	buf = cldata->buffer;
+	ts = ktime_to_timespec(ktime_get());
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%ld.%09lu\n",
+		ts.tv_sec, ts.tv_nsec);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "master: ");
+
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d  ", pdata->mas);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nslave : ");
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d  ", pdata->slv);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nab     : ");
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu  ", ab);
+
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nib     : ");
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu  ", ib);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n");
+	cldata->size = i;
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+
+	trace_bus_update_request((int)ts.tv_sec, (int)ts.tv_nsec,
+		pdata->name, pdata->mas, pdata->slv, ab, ib);
+
+	return i;
+}
+
+void msm_bus_dbg_remove_client(const struct msm_bus_client_handle *pdata)
+{
+	struct msm_bus_cldata *cldata = NULL;
+
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_for_each_entry(cldata, &cl_list, list) {
+		if (cldata->handle == pdata) {
+			debugfs_remove(cldata->file);
+			list_del(&cldata->list);
+			kfree(cldata);
+			break;
+		}
+	}
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+}
+
+static int msm_bus_dbg_record_client(const struct msm_bus_scale_pdata *pdata,
+	int index, uint32_t clid, struct dentry *file)
+{
+	struct msm_bus_cldata *cldata;
+
+	cldata = kmalloc(sizeof(struct msm_bus_cldata), GFP_KERNEL);
+	if (!cldata) {
+		MSM_BUS_DBG("Failed to allocate memory for client data\n");
+		return -ENOMEM;
+	}
+	cldata->pdata = pdata;
+	cldata->index = index;
+	cldata->clid = clid;
+	cldata->file = file;
+	cldata->size = 0;
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_add_tail(&cldata->list, &cl_list);
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+	return 0;
+}
+
+static void msm_bus_dbg_free_client(uint32_t clid)
+{
+	struct msm_bus_cldata *cldata = NULL;
+
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_for_each_entry(cldata, &cl_list, list) {
+		if (cldata->clid == clid) {
+			debugfs_remove(cldata->file);
+			list_del(&cldata->list);
+			kfree(cldata);
+			break;
+		}
+	}
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+}
+
+static int msm_bus_dbg_fill_cl_buffer(const struct msm_bus_scale_pdata *pdata,
+	int index, uint32_t clid)
+{
+	int i = 0, j;
+	char *buf = NULL;
+	struct msm_bus_cldata *cldata = NULL;
+	struct timespec ts;
+	int found = 0;
+
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_for_each_entry(cldata, &cl_list, list) {
+		if (cldata->clid == clid) {
+			found = 1;
+			break;
+		}
+	}
+
+	if (!found) {
+		rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+		return -ENOENT;
+	}
+
+	if (cldata->file == NULL) {
+		if (pdata->name == NULL) {
+			rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+			MSM_BUS_DBG("Client doesn't have a name\n");
+			return -EINVAL;
+		}
+		cldata->file = msm_bus_dbg_create(pdata->name, S_IRUGO,
+			clients, clid);
+	}
+
+	if (cldata->size < (MAX_BUFF_SIZE - FILL_LIMIT))
+		i = cldata->size;
+	else {
+		i = 0;
+		cldata->size = 0;
+	}
+	buf = cldata->buffer;
+	ts = ktime_to_timespec(ktime_get());
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%ld.%09lu\n",
+		ts.tv_sec, ts.tv_nsec);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "curr   : %d\n", index);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "masters: ");
+
+	for (j = 0; j < pdata->usecase->num_paths; j++)
+		i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d  ",
+			pdata->usecase[index].vectors[j].src);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nslaves : ");
+	for (j = 0; j < pdata->usecase->num_paths; j++)
+		i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d  ",
+			pdata->usecase[index].vectors[j].dst);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nab     : ");
+	for (j = 0; j < pdata->usecase->num_paths; j++)
+		i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu  ",
+			pdata->usecase[index].vectors[j].ab);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nib     : ");
+	for (j = 0; j < pdata->usecase->num_paths; j++)
+		i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu  ",
+			pdata->usecase[index].vectors[j].ib);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n");
+
+	for (j = 0; j < pdata->usecase->num_paths; j++)
+		trace_bus_update_request((int)ts.tv_sec, (int)ts.tv_nsec,
+		pdata->name,
+		pdata->usecase[index].vectors[j].src,
+		pdata->usecase[index].vectors[j].dst,
+		pdata->usecase[index].vectors[j].ab,
+		pdata->usecase[index].vectors[j].ib);
+
+	cldata->index = index;
+	cldata->size = i;
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+
+	return i;
+}
+
+static ssize_t  msm_bus_dbg_update_request_write(struct file *file,
+	const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+	struct msm_bus_cldata *cldata;
+	unsigned long index = 0;
+	int ret = 0;
+	char *chid;
+	char *buf = kmalloc((sizeof(char) * (cnt + 1)), GFP_KERNEL);
+	int found = 0;
+	uint32_t clid;
+	ssize_t res = cnt;
+
+	if (!buf || IS_ERR(buf)) {
+		MSM_BUS_ERR("Memory allocation for buffer failed\n");
+		return -ENOMEM;
+	}
+	if (cnt == 0) {
+		res = 0;
+		goto out;
+	}
+	if (copy_from_user(buf, ubuf, cnt)) {
+		res = -EFAULT;
+		goto out;
+	}
+	buf[cnt] = '\0';
+	chid = buf;
+	MSM_BUS_DBG("buffer: %s\n size: %zu\n", buf, sizeof(ubuf));
+
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_for_each_entry(cldata, &cl_list, list) {
+		if (strnstr(chid, cldata->pdata->name, cnt)) {
+			found = 1;
+			cldata = cldata;
+			strsep(&chid, " ");
+			if (chid) {
+				ret = kstrtoul(chid, 10, &index);
+				if (ret) {
+					MSM_BUS_DBG("Index conversion"
+						" failed\n");
+					rt_mutex_unlock(
+						&msm_bus_dbg_cllist_lock);
+					res = -EFAULT;
+					goto out;
+				}
+			} else {
+				MSM_BUS_DBG("Error parsing input. Index not"
+					" found\n");
+				found = 0;
+			}
+			if ((index < 0) ||
+					(index > cldata->pdata->num_usecases)) {
+				MSM_BUS_DBG("Invalid index!\n");
+				rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+				res = -EINVAL;
+				goto out;
+			}
+			clid = cldata->clid;
+			break;
+		}
+	}
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+
+	if (found)
+		msm_bus_scale_client_update_request(clid, index);
+
+out:
+	kfree(buf);
+	return res;
+}
+
+/**
+ * The following funtions are used for viewing the commit data
+ * for each fabric
+ */
+static ssize_t fabric_data_read(struct file *file, char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	struct msm_bus_fab_list *fablist = NULL;
+	int bsize = 0;
+	ssize_t ret;
+	const char *name = file->private_data;
+	int found = 0;
+
+	mutex_lock(&msm_bus_dbg_fablist_lock);
+	list_for_each_entry(fablist, &fabdata_list, list) {
+		if (strcmp(fablist->name, name) == 0) {
+			found = 1;
+			break;
+		}
+	}
+	if (!found) {
+		mutex_unlock(&msm_bus_dbg_fablist_lock);
+		return -ENOENT;
+	}
+	bsize = fablist->size;
+	ret = simple_read_from_buffer(buf, count, ppos,
+		fablist->buffer, bsize);
+	mutex_unlock(&msm_bus_dbg_fablist_lock);
+	return ret;
+}
+
+static const struct file_operations fabric_data_fops = {
+	.open		= client_data_open,
+	.read		= fabric_data_read,
+};
+
+static ssize_t rules_dbg_read(struct file *file, char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	ssize_t ret;
+	memset(rules_buf, 0, MAX_BUFF_SIZE);
+	print_rules_buf(rules_buf, MAX_BUFF_SIZE);
+	ret = simple_read_from_buffer(buf, count, ppos,
+		rules_buf, MAX_BUFF_SIZE);
+	return ret;
+}
+
+static int rules_dbg_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static const struct file_operations rules_dbg_fops = {
+	.open		= rules_dbg_open,
+	.read		= rules_dbg_read,
+};
+
+static int msm_bus_dbg_record_fabric(const char *fabname, struct dentry *file)
+{
+	struct msm_bus_fab_list *fablist;
+	int ret = 0;
+
+	mutex_lock(&msm_bus_dbg_fablist_lock);
+	fablist = kmalloc(sizeof(struct msm_bus_fab_list), GFP_KERNEL);
+	if (!fablist) {
+		MSM_BUS_DBG("Failed to allocate memory for commit data\n");
+		ret =  -ENOMEM;
+		goto err;
+	}
+
+	fablist->name = fabname;
+	fablist->size = 0;
+	list_add_tail(&fablist->list, &fabdata_list);
+err:
+	mutex_unlock(&msm_bus_dbg_fablist_lock);
+	return ret;
+}
+
+static void msm_bus_dbg_free_fabric(const char *fabname)
+{
+	struct msm_bus_fab_list *fablist = NULL;
+
+	mutex_lock(&msm_bus_dbg_fablist_lock);
+	list_for_each_entry(fablist, &fabdata_list, list) {
+		if (strcmp(fablist->name, fabname) == 0) {
+			debugfs_remove(fablist->file);
+			list_del(&fablist->list);
+			kfree(fablist);
+			break;
+		}
+	}
+	mutex_unlock(&msm_bus_dbg_fablist_lock);
+}
+
+static int msm_bus_dbg_fill_fab_buffer(const char *fabname,
+	void *cdata, int nmasters, int nslaves,
+	int ntslaves)
+{
+	int i;
+	char *buf = NULL;
+	struct msm_bus_fab_list *fablist = NULL;
+	struct timespec ts;
+	int found = 0;
+
+	mutex_lock(&msm_bus_dbg_fablist_lock);
+	list_for_each_entry(fablist, &fabdata_list, list) {
+		if (strcmp(fablist->name, fabname) == 0) {
+			found = 1;
+			break;
+		}
+	}
+	if (!found) {
+		mutex_unlock(&msm_bus_dbg_fablist_lock);
+		return -ENOENT;
+	}
+
+	if (fablist->file == NULL) {
+		MSM_BUS_DBG("Fabric dbg entry does not exist\n");
+		mutex_unlock(&msm_bus_dbg_fablist_lock);
+		return -EFAULT;
+	}
+
+	if (fablist->size < MAX_BUFF_SIZE - 256)
+		i = fablist->size;
+	else {
+		i = 0;
+		fablist->size = 0;
+	}
+	buf = fablist->buffer;
+	mutex_unlock(&msm_bus_dbg_fablist_lock);
+	ts = ktime_to_timespec(ktime_get());
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%ld.%09lu\n",
+		ts.tv_sec, ts.tv_nsec);
+
+	msm_bus_rpm_fill_cdata_buffer(&i, buf, MAX_BUFF_SIZE, cdata,
+		nmasters, nslaves, ntslaves);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n");
+	mutex_lock(&msm_bus_dbg_fablist_lock);
+	fablist->size = i;
+	mutex_unlock(&msm_bus_dbg_fablist_lock);
+	return 0;
+}
+
+static const struct file_operations msm_bus_dbg_update_request_fops = {
+	.open = client_data_open,
+	.write = msm_bus_dbg_update_request_write,
+};
+
+static int msm_bus_dbg_dump_clients_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t msm_bus_dbg_dump_clients_read(struct file *file,
+	char __user *buf, size_t count, loff_t *ppos)
+{
+	int j, cnt;
+	char msg[50];
+	struct msm_bus_cldata *cldata = NULL;
+
+	cnt = scnprintf(msg, 50,
+		"\nDumping curent client votes to trace log\n");
+	if (*ppos)
+		goto exit_dump_clients_read;
+
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_for_each_entry(cldata, &cl_list, list) {
+		if (IS_ERR_OR_NULL(cldata->pdata))
+			continue;
+		for (j = 0; j < cldata->pdata->usecase->num_paths; j++) {
+			if (cldata->index == -1)
+				continue;
+			trace_bus_client_status(
+			cldata->pdata->name,
+			cldata->pdata->usecase[cldata->index].vectors[j].src,
+			cldata->pdata->usecase[cldata->index].vectors[j].dst,
+			cldata->pdata->usecase[cldata->index].vectors[j].ab,
+			cldata->pdata->usecase[cldata->index].vectors[j].ib,
+			cldata->pdata->active_only);
+		}
+	}
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+exit_dump_clients_read:
+	return simple_read_from_buffer(buf, count, ppos, msg, cnt);
+}
+
+static const struct file_operations msm_bus_dbg_dump_clients_fops = {
+	.open		= msm_bus_dbg_dump_clients_open,
+	.read		= msm_bus_dbg_dump_clients_read,
+};
+
+/**
+ * msm_bus_dbg_client_data() - Add debug data for clients
+ * @pdata: Platform data of the client
+ * @index: The current index or operation to be performed
+ * @clid: Client handle obtained during registration
+ */
+void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata, int index,
+	uint32_t clid)
+{
+	struct dentry *file = NULL;
+
+	if (index == MSM_BUS_DBG_REGISTER) {
+		msm_bus_dbg_record_client(pdata, index, clid, file);
+		if (!pdata->name) {
+			MSM_BUS_DBG("Cannot create debugfs entry. Null name\n");
+			return;
+		}
+	} else if (index == MSM_BUS_DBG_UNREGISTER) {
+		msm_bus_dbg_free_client(clid);
+		MSM_BUS_DBG("Client %d unregistered\n", clid);
+	} else
+		msm_bus_dbg_fill_cl_buffer(pdata, index, clid);
+}
+EXPORT_SYMBOL(msm_bus_dbg_client_data);
+
+/**
+ * msm_bus_dbg_commit_data() - Add commit data from fabrics
+ * @fabname: Fabric name specified in platform data
+ * @cdata: Commit Data
+ * @nmasters: Number of masters attached to fabric
+ * @nslaves: Number of slaves attached to fabric
+ * @ntslaves: Number of tiered slaves attached to fabric
+ * @op: Operation to be performed
+ */
+void msm_bus_dbg_commit_data(const char *fabname, void *cdata,
+	int nmasters, int nslaves, int ntslaves, int op)
+{
+	struct dentry *file = NULL;
+
+	if (op == MSM_BUS_DBG_REGISTER)
+		msm_bus_dbg_record_fabric(fabname, file);
+	else if (op == MSM_BUS_DBG_UNREGISTER)
+		msm_bus_dbg_free_fabric(fabname);
+	else
+		msm_bus_dbg_fill_fab_buffer(fabname, cdata, nmasters,
+			nslaves, ntslaves);
+}
+EXPORT_SYMBOL(msm_bus_dbg_commit_data);
+
+static int __init msm_bus_debugfs_init(void)
+{
+	struct dentry *commit, *shell_client, *rules_dbg;
+	struct msm_bus_fab_list *fablist;
+	struct msm_bus_cldata *cldata = NULL;
+	uint64_t val = 0;
+
+	dir = debugfs_create_dir("msm-bus-dbg", NULL);
+	if ((!dir) || IS_ERR(dir)) {
+		MSM_BUS_ERR("Couldn't create msm-bus-dbg\n");
+		goto err;
+	}
+
+	clients = debugfs_create_dir("client-data", dir);
+	if ((!dir) || IS_ERR(dir)) {
+		MSM_BUS_ERR("Couldn't create clients\n");
+		goto err;
+	}
+
+	shell_client = debugfs_create_dir("shell-client", dir);
+	if ((!dir) || IS_ERR(dir)) {
+		MSM_BUS_ERR("Couldn't create clients\n");
+		goto err;
+	}
+
+	commit = debugfs_create_dir("commit-data", dir);
+	if ((!dir) || IS_ERR(dir)) {
+		MSM_BUS_ERR("Couldn't create commit\n");
+		goto err;
+	}
+
+	rules_dbg = debugfs_create_dir("rules-dbg", dir);
+	if ((!rules_dbg) || IS_ERR(rules_dbg)) {
+		MSM_BUS_ERR("Couldn't create rules-dbg\n");
+		goto err;
+	}
+
+	if (debugfs_create_file("print_rules", S_IRUGO | S_IWUSR,
+		rules_dbg, &val, &rules_dbg_fops) == NULL)
+		goto err;
+
+	if (debugfs_create_file("update_request", S_IRUGO | S_IWUSR,
+		shell_client, &val, &shell_client_en_fops) == NULL)
+		goto err;
+	if (debugfs_create_file("ib", S_IRUGO | S_IWUSR, shell_client, &val,
+		&shell_client_ib_fops) == NULL)
+		goto err;
+	if (debugfs_create_file("ab", S_IRUGO | S_IWUSR, shell_client, &val,
+		&shell_client_ab_fops) == NULL)
+		goto err;
+	if (debugfs_create_file("slv", S_IRUGO | S_IWUSR, shell_client,
+		&val, &shell_client_slv_fops) == NULL)
+		goto err;
+	if (debugfs_create_file("mas", S_IRUGO | S_IWUSR, shell_client,
+		&val, &shell_client_mas_fops) == NULL)
+		goto err;
+	if (debugfs_create_file("update-request", S_IRUGO | S_IWUSR,
+		clients, NULL, &msm_bus_dbg_update_request_fops) == NULL)
+		goto err;
+
+	rules_buf = kzalloc(MAX_BUFF_SIZE, GFP_KERNEL);
+	if (!rules_buf) {
+		MSM_BUS_ERR("Failed to alloc rules_buf");
+		goto err;
+	}
+
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_for_each_entry(cldata, &cl_list, list) {
+		if (cldata->pdata) {
+			if (cldata->pdata->name == NULL) {
+				MSM_BUS_DBG("Client name not found\n");
+				continue;
+			}
+			cldata->file = msm_bus_dbg_create(cldata->
+				pdata->name, S_IRUGO, clients, cldata->clid);
+		} else if (cldata->handle) {
+			if (cldata->handle->name == NULL) {
+				MSM_BUS_DBG("Client doesn't have a name\n");
+				continue;
+			}
+			cldata->file = debugfs_create_file(cldata->handle->name,
+							S_IRUGO, clients,
+							(void *)cldata->handle,
+							&client_data_fops);
+		}
+	}
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+
+	if (debugfs_create_file("dump_clients", S_IRUGO | S_IWUSR,
+		clients, NULL, &msm_bus_dbg_dump_clients_fops) == NULL)
+		goto err;
+
+	mutex_lock(&msm_bus_dbg_fablist_lock);
+	list_for_each_entry(fablist, &fabdata_list, list) {
+		fablist->file = debugfs_create_file(fablist->name, S_IRUGO,
+			commit, (void *)fablist->name, &fabric_data_fops);
+		if (fablist->file == NULL) {
+			MSM_BUS_DBG("Cannot create files for commit data\n");
+			kfree(rules_buf);
+			mutex_unlock(&msm_bus_dbg_fablist_lock);
+			goto err;
+		}
+	}
+	mutex_unlock(&msm_bus_dbg_fablist_lock);
+
+	msm_bus_dbg_init_vectors();
+	return 0;
+err:
+	debugfs_remove_recursive(dir);
+	return -ENODEV;
+}
+late_initcall(msm_bus_debugfs_init);
+
+static void __exit msm_bus_dbg_teardown(void)
+{
+	struct msm_bus_fab_list *fablist = NULL, *fablist_temp;
+	struct msm_bus_cldata *cldata = NULL, *cldata_temp;
+
+	debugfs_remove_recursive(dir);
+
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_for_each_entry_safe(cldata, cldata_temp, &cl_list, list) {
+		list_del(&cldata->list);
+		kfree(cldata);
+	}
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+
+	mutex_lock(&msm_bus_dbg_fablist_lock);
+	list_for_each_entry_safe(fablist, fablist_temp, &fabdata_list, list) {
+		list_del(&fablist->list);
+		kfree(fablist);
+	}
+	kfree(rules_buf);
+	mutex_unlock(&msm_bus_dbg_fablist_lock);
+}
+module_exit(msm_bus_dbg_teardown);
+MODULE_DESCRIPTION("Debugfs for msm bus scaling client");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Gagan Mac <gmac@codeaurora.org>");
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./msm_bus_fabric_adhoc.c linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c
--- linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./msm_bus_fabric_adhoc.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c	2019-01-22 16:16:26.659274986 +0100
@@ -0,0 +1,1405 @@
+/* Copyright (c) 2014-2017, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <soc/qcom/rpm-smd.h>
+#include <trace/events/trace_msm_bus.h>
+#include "msm_bus_core.h"
+#include "msm_bus_adhoc.h"
+#include "msm_bus_noc.h"
+#include "msm_bus_bimc.h"
+
+static LIST_HEAD(fabdev_list);
+
+static int msm_bus_dev_init_qos(struct device *dev, void *data);
+
+ssize_t bw_show(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	struct msm_bus_node_info_type *node_info = NULL;
+	struct msm_bus_node_device_type *bus_node = NULL;
+	int i;
+	int off = 0;
+
+	bus_node = to_msm_bus_node(dev);
+	if (!bus_node)
+		return -EINVAL;
+
+	node_info = bus_node->node_info;
+
+	for (i = 0; i < bus_node->num_lnodes; i++) {
+		if (!bus_node->lnode_list[i].in_use)
+			continue;
+		off += scnprintf((buf + off), PAGE_SIZE,
+		"[%d]:%s:Act_IB %llu Act_AB %llu Slp_IB %llu Slp_AB %llu\n",
+			i, bus_node->lnode_list[i].cl_name,
+			bus_node->lnode_list[i].lnode_ib[ACTIVE_CTX],
+			bus_node->lnode_list[i].lnode_ab[ACTIVE_CTX],
+			bus_node->lnode_list[i].lnode_ib[DUAL_CTX],
+			bus_node->lnode_list[i].lnode_ab[DUAL_CTX]);
+		trace_printk(
+		"[%d]:%s:Act_IB %llu Act_AB %llu Slp_IB %llu Slp_AB %llu\n",
+			i, bus_node->lnode_list[i].cl_name,
+			bus_node->lnode_list[i].lnode_ib[ACTIVE_CTX],
+			bus_node->lnode_list[i].lnode_ab[ACTIVE_CTX],
+			bus_node->lnode_list[i].lnode_ib[DUAL_CTX],
+			bus_node->lnode_list[i].lnode_ab[DUAL_CTX]);
+	}
+	off += scnprintf((buf + off), PAGE_SIZE,
+	"Max_Act_IB %llu Sum_Act_AB %llu Act_Util_fact %d Act_Vrail_comp %d\n",
+		bus_node->node_bw[ACTIVE_CTX].max_ib,
+		bus_node->node_bw[ACTIVE_CTX].sum_ab,
+		bus_node->node_bw[ACTIVE_CTX].util_used,
+		bus_node->node_bw[ACTIVE_CTX].vrail_used);
+	off += scnprintf((buf + off), PAGE_SIZE,
+	"Max_Slp_IB %llu Sum_Slp_AB %llu Slp_Util_fact %d Slp_Vrail_comp %d\n",
+		bus_node->node_bw[DUAL_CTX].max_ib,
+		bus_node->node_bw[DUAL_CTX].sum_ab,
+		bus_node->node_bw[DUAL_CTX].util_used,
+		bus_node->node_bw[DUAL_CTX].vrail_used);
+	trace_printk(
+	"Max_Act_IB %llu Sum_Act_AB %llu Act_Util_fact %d Act_Vrail_comp %d\n",
+		bus_node->node_bw[ACTIVE_CTX].max_ib,
+		bus_node->node_bw[ACTIVE_CTX].sum_ab,
+		bus_node->node_bw[ACTIVE_CTX].util_used,
+		bus_node->node_bw[ACTIVE_CTX].vrail_used);
+	trace_printk(
+	"Max_Slp_IB %llu Sum_Slp_AB %lluSlp_Util_fact %d Slp_Vrail_comp %d\n",
+		bus_node->node_bw[DUAL_CTX].max_ib,
+		bus_node->node_bw[DUAL_CTX].sum_ab,
+		bus_node->node_bw[DUAL_CTX].util_used,
+		bus_node->node_bw[DUAL_CTX].vrail_used);
+	return off;
+}
+
+ssize_t bw_store(struct device *dev, struct device_attribute *attr,
+			   const char *buf, size_t count)
+{
+	return count;
+}
+
+DEVICE_ATTR(bw, 0600, bw_show, bw_store);
+
+struct static_rules_type {
+	int num_rules;
+	struct bus_rule_type *rules;
+};
+
+static struct static_rules_type static_rules;
+
+static int bus_get_reg(struct nodeclk *nclk, struct device *dev)
+{
+	int ret = 0;
+	struct msm_bus_node_device_type *node_dev;
+
+	if (!(dev && nclk))
+		return -ENXIO;
+
+	node_dev = to_msm_bus_node(dev);
+	if (!strlen(nclk->reg_name)) {
+		dev_dbg(dev, "No regulator exist for node %d\n",
+						node_dev->node_info->id);
+		goto exit_of_get_reg;
+	} else {
+		if (!(IS_ERR_OR_NULL(nclk->reg)))
+			goto exit_of_get_reg;
+
+		nclk->reg = devm_regulator_get(dev, nclk->reg_name);
+		if (IS_ERR_OR_NULL(nclk->reg)) {
+			ret =
+			(IS_ERR(nclk->reg) ? PTR_ERR(nclk->reg) : -ENXIO);
+			dev_err(dev, "Error: Failed to get regulator %s:%d\n",
+							nclk->reg_name, ret);
+		} else {
+			dev_dbg(dev, "Succesfully got regulator for %d\n",
+				node_dev->node_info->id);
+		}
+	}
+
+exit_of_get_reg:
+	return ret;
+}
+
+static int bus_enable_reg(struct nodeclk *nclk)
+{
+	int ret = 0;
+
+	if (!nclk) {
+		ret = -ENXIO;
+		goto exit_bus_enable_reg;
+	}
+
+	if ((IS_ERR_OR_NULL(nclk->reg))) {
+		ret = -ENXIO;
+		goto exit_bus_enable_reg;
+	}
+
+	ret = regulator_enable(nclk->reg);
+	if (ret) {
+		MSM_BUS_ERR("Failed to enable regulator for %s\n",
+							nclk->reg_name);
+		goto exit_bus_enable_reg;
+	}
+	pr_debug("%s: Enabled Reg\n", __func__);
+exit_bus_enable_reg:
+	return ret;
+}
+
+static int bus_disable_reg(struct nodeclk *nclk)
+{
+	int ret = 0;
+
+	if (!nclk) {
+		ret = -ENXIO;
+		goto exit_bus_disable_reg;
+	}
+
+	if ((IS_ERR_OR_NULL(nclk->reg))) {
+		ret = -ENXIO;
+		goto exit_bus_disable_reg;
+	}
+
+	regulator_disable(nclk->reg);
+	pr_debug("%s: Disabled Reg\n", __func__);
+exit_bus_disable_reg:
+	return ret;
+}
+
+static int enable_nodeclk(struct nodeclk *nclk, struct device *dev)
+{
+	int ret = 0;
+
+	if (!nclk->enable && !nclk->setrate_only_clk) {
+		if (dev && strlen(nclk->reg_name)) {
+			if (IS_ERR_OR_NULL(nclk->reg)) {
+				ret = bus_get_reg(nclk, dev);
+				if (ret) {
+					dev_dbg(dev,
+						"Failed to get reg.Err %d\n",
+									ret);
+					goto exit_enable_nodeclk;
+				}
+			}
+
+			ret = bus_enable_reg(nclk);
+			if (ret) {
+				dev_dbg(dev, "Failed to enable reg. Err %d\n",
+									ret);
+				goto exit_enable_nodeclk;
+			}
+		}
+		ret = clk_prepare_enable(nclk->clk);
+
+		if (ret) {
+			MSM_BUS_ERR("%s: failed to enable clk ", __func__);
+			nclk->enable = false;
+		} else
+			nclk->enable = true;
+	}
+exit_enable_nodeclk:
+	return ret;
+}
+
+static int disable_nodeclk(struct nodeclk *nclk)
+{
+	int ret = 0;
+
+	if (nclk->enable && !nclk->setrate_only_clk) {
+		clk_disable_unprepare(nclk->clk);
+		nclk->enable = false;
+		bus_disable_reg(nclk);
+	}
+	return ret;
+}
+
+static int setrate_nodeclk(struct nodeclk *nclk, long rate)
+{
+	int ret = 0;
+
+	if (!nclk->enable_only_clk)
+		ret = clk_set_rate(nclk->clk, rate);
+
+	if (ret)
+		MSM_BUS_ERR("%s: failed to setrate clk", __func__);
+	return ret;
+}
+
+static int send_rpm_msg(struct msm_bus_node_device_type *ndev, int ctx)
+{
+	int ret = 0;
+	int rsc_type;
+	struct msm_rpm_kvp rpm_kvp;
+	int rpm_ctx;
+
+	if (!ndev) {
+		MSM_BUS_ERR("%s: Error getting node info.", __func__);
+		ret = -ENODEV;
+		goto exit_send_rpm_msg;
+	}
+
+	rpm_kvp.length = sizeof(uint64_t);
+	rpm_kvp.key = RPM_MASTER_FIELD_BW;
+
+	if (ctx == DUAL_CTX)
+		rpm_ctx = MSM_RPM_CTX_SLEEP_SET;
+	else
+		rpm_ctx = MSM_RPM_CTX_ACTIVE_SET;
+
+	rpm_kvp.data = (uint8_t *)&ndev->node_bw[ctx].sum_ab;
+
+	if (ndev->node_info->mas_rpm_id != -1) {
+		rsc_type = RPM_BUS_MASTER_REQ;
+		ret = msm_rpm_send_message(rpm_ctx, rsc_type,
+			ndev->node_info->mas_rpm_id, &rpm_kvp, 1);
+		if (ret) {
+			MSM_BUS_ERR("%s: Failed to send RPM message:",
+					__func__);
+			MSM_BUS_ERR("%s:Node Id %d RPM id %d",
+			__func__, ndev->node_info->id,
+				 ndev->node_info->mas_rpm_id);
+			goto exit_send_rpm_msg;
+		}
+		trace_bus_agg_bw(ndev->node_info->id,
+			ndev->node_info->mas_rpm_id, rpm_ctx,
+			ndev->node_bw[ctx].sum_ab);
+	}
+
+	if (ndev->node_info->slv_rpm_id != -1) {
+		rsc_type = RPM_BUS_SLAVE_REQ;
+		ret = msm_rpm_send_message(rpm_ctx, rsc_type,
+			ndev->node_info->slv_rpm_id, &rpm_kvp, 1);
+		if (ret) {
+			MSM_BUS_ERR("%s: Failed to send RPM message:",
+						__func__);
+			MSM_BUS_ERR("%s: Node Id %d RPM id %d",
+			__func__, ndev->node_info->id,
+				ndev->node_info->slv_rpm_id);
+			goto exit_send_rpm_msg;
+		}
+		trace_bus_agg_bw(ndev->node_info->id,
+			ndev->node_info->slv_rpm_id, rpm_ctx,
+			ndev->node_bw[ctx].sum_ab);
+	}
+exit_send_rpm_msg:
+	return ret;
+}
+
+static int flush_bw_data(struct msm_bus_node_device_type *node_info, int ctx)
+{
+	int ret = 0;
+
+	if (!node_info) {
+		MSM_BUS_ERR("%s: Unable to find bus device for device",
+			__func__);
+		ret = -ENODEV;
+		goto exit_flush_bw_data;
+	}
+
+	if (node_info->node_bw[ctx].last_sum_ab !=
+				node_info->node_bw[ctx].sum_ab) {
+		if (node_info->ap_owned) {
+			struct msm_bus_node_device_type *bus_device =
+			to_msm_bus_node(node_info->node_info->bus_device);
+			struct msm_bus_fab_device_type *fabdev =
+							bus_device->fabdev;
+
+			/*
+			 * For AP owned ports, only care about the Active
+			 * context bandwidth.
+			 */
+			if (fabdev && (ctx == ACTIVE_CTX) &&
+				fabdev->noc_ops.update_bw_reg &&
+				fabdev->noc_ops.update_bw_reg
+					(node_info->node_info->qos_params.mode))
+				ret = fabdev->noc_ops.set_bw(node_info,
+							fabdev->qos_base,
+							fabdev->base_offset,
+							fabdev->qos_off,
+							fabdev->qos_freq);
+		} else {
+			ret = send_rpm_msg(node_info, ctx);
+
+			if (ret)
+				MSM_BUS_ERR("%s: Failed to send RPM msg for%d",
+				__func__, node_info->node_info->id);
+		}
+		node_info->node_bw[ctx].last_sum_ab =
+					node_info->node_bw[ctx].sum_ab;
+	}
+
+exit_flush_bw_data:
+	return ret;
+
+}
+
+static int flush_clk_data(struct msm_bus_node_device_type *node, int ctx)
+{
+	struct nodeclk *nodeclk = NULL;
+	int ret = 0;
+
+	if (!node) {
+		MSM_BUS_ERR("Unable to find bus device");
+		ret = -ENODEV;
+		goto exit_flush_clk_data;
+	}
+
+	nodeclk = &node->clk[ctx];
+
+	if (IS_ERR_OR_NULL(nodeclk) || IS_ERR_OR_NULL(nodeclk->clk))
+		goto exit_flush_clk_data;
+
+	if (nodeclk->rate != node->node_bw[ctx].cur_clk_hz) {
+		long rounded_rate;
+
+		nodeclk->rate = node->node_bw[ctx].cur_clk_hz;
+		nodeclk->dirty = true;
+
+		if (nodeclk->rate) {
+			rounded_rate = clk_round_rate(nodeclk->clk,
+							nodeclk->rate);
+			ret = setrate_nodeclk(nodeclk, rounded_rate);
+
+			if (ret) {
+				MSM_BUS_ERR("%s: Failed to set_rate %lu for %d",
+					__func__, rounded_rate,
+						node->node_info->id);
+				ret = -ENODEV;
+				goto exit_flush_clk_data;
+			}
+
+			ret = enable_nodeclk(nodeclk, &node->dev);
+
+			if ((node->node_info->is_fab_dev) &&
+				!IS_ERR_OR_NULL(node->bus_qos_clk.clk))
+					ret = enable_nodeclk(&node->bus_qos_clk,
+								&node->dev);
+		} else {
+			if ((node->node_info->is_fab_dev) &&
+				!IS_ERR_OR_NULL(node->bus_qos_clk.clk))
+					ret =
+					disable_nodeclk(&node->bus_qos_clk);
+
+			ret = disable_nodeclk(nodeclk);
+		}
+
+		if (ret) {
+			MSM_BUS_ERR("%s: Failed to enable for %d", __func__,
+						node->node_info->id);
+			ret = -ENODEV;
+			goto exit_flush_clk_data;
+		}
+		MSM_BUS_DBG("%s: Updated %d clk to %llu", __func__,
+				node->node_info->id, nodeclk->rate);
+	}
+exit_flush_clk_data:
+	/* Reset the aggregated clock rate for fab devices*/
+	if (node && node->node_info->is_fab_dev)
+		node->node_bw[ctx].cur_clk_hz = 0;
+
+	if (nodeclk)
+		nodeclk->dirty = 0;
+	return ret;
+}
+
+static int msm_bus_agg_fab_clks(struct msm_bus_node_device_type *bus_dev)
+{
+	int ret = 0;
+	struct msm_bus_node_device_type *node;
+	int ctx;
+
+	list_for_each_entry(node, &bus_dev->devlist, dev_link) {
+		for (ctx = 0; ctx < NUM_CTX; ctx++) {
+			if (node->node_bw[ctx].cur_clk_hz >=
+					bus_dev->node_bw[ctx].cur_clk_hz)
+				bus_dev->node_bw[ctx].cur_clk_hz =
+						node->node_bw[ctx].cur_clk_hz;
+		}
+	}
+	return ret;
+}
+
+static void msm_bus_log_fab_max_votes(struct msm_bus_node_device_type *bus_dev)
+{
+	int ctx;
+	struct timespec ts;
+	uint32_t vrail_comp = 0;
+	struct msm_bus_node_device_type *node;
+	uint64_t max_ib, max_ib_temp[NUM_CTX];
+
+	for (ctx = 0; ctx < NUM_CTX; ctx++) {
+		max_ib_temp[ctx] = 0;
+		bus_dev->node_bw[ctx].max_ib = 0;
+		bus_dev->node_bw[ctx].max_ab = 0;
+		bus_dev->node_bw[ctx].max_ib_cl_name = NULL;
+		bus_dev->node_bw[ctx].max_ab_cl_name = NULL;
+	}
+
+	list_for_each_entry(node, &bus_dev->devlist, dev_link) {
+		for (ctx = 0; ctx < NUM_CTX; ctx++) {
+			max_ib = node->node_bw[ctx].max_ib;
+			vrail_comp = node->node_bw[ctx].vrail_used;
+
+			if (vrail_comp && (vrail_comp != 100)) {
+				max_ib *= 100;
+				max_ib = msm_bus_div64(vrail_comp, max_ib);
+			}
+
+			if (max_ib > max_ib_temp[ctx]) {
+				max_ib_temp[ctx] = max_ib;
+				bus_dev->node_bw[ctx].max_ib =
+					node->node_bw[ctx].max_ib;
+				bus_dev->node_bw[ctx].max_ib_cl_name =
+					node->node_bw[ctx].max_ib_cl_name;
+			}
+
+			if (node->node_bw[ctx].max_ab >
+					bus_dev->node_bw[ctx].max_ab) {
+				bus_dev->node_bw[ctx].max_ab =
+					node->node_bw[ctx].max_ab;
+				bus_dev->node_bw[ctx].max_ab_cl_name =
+					node->node_bw[ctx].max_ab_cl_name;
+			}
+		}
+	}
+
+	ts = ktime_to_timespec(ktime_get());
+	for (ctx = 0; ctx < NUM_CTX; ctx++) {
+		trace_bus_max_votes((int)ts.tv_sec, (int)ts.tv_nsec,
+				bus_dev->node_info->name,
+				((ctx == ACTIVE_CTX) ? "active" : "sleep"),
+				"ib", bus_dev->node_bw[ctx].max_ib,
+				bus_dev->node_bw[ctx].max_ib_cl_name);
+	}
+
+	for (ctx = 0; ctx < NUM_CTX; ctx++) {
+		trace_bus_max_votes((int)ts.tv_sec, (int)ts.tv_nsec,
+				bus_dev->node_info->name,
+				((ctx == ACTIVE_CTX) ? "active" : "sleep"),
+				"ab", bus_dev->node_bw[ctx].max_ab,
+				bus_dev->node_bw[ctx].max_ab_cl_name);
+	}
+}
+
+int msm_bus_commit_data(struct list_head *clist)
+{
+	int ret = 0;
+	int ctx;
+	struct msm_bus_node_device_type *node;
+	struct msm_bus_node_device_type *node_tmp;
+
+	list_for_each_entry(node, clist, link) {
+		/* Aggregate the bus clocks */
+		if (node->node_info->is_fab_dev) {
+			msm_bus_agg_fab_clks(node);
+			msm_bus_log_fab_max_votes(node);
+		}
+	}
+
+	list_for_each_entry_safe(node, node_tmp, clist, link) {
+		if (unlikely(node->node_info->defer_qos))
+				msm_bus_dev_init_qos(&node->dev, NULL);
+
+		for (ctx = 0; ctx < NUM_CTX; ctx++) {
+			ret = flush_clk_data(node, ctx);
+			if (ret)
+				MSM_BUS_ERR("%s: Err flushing clk data for:%d",
+						__func__, node->node_info->id);
+			ret = flush_bw_data(node, ctx);
+			if (ret)
+				MSM_BUS_ERR("%s: Error flushing bw data for %d",
+					__func__, node->node_info->id);
+		}
+		node->dirty = false;
+		list_del_init(&node->link);
+	}
+	return ret;
+}
+
+void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size,
+					size_t new_size, gfp_t flags)
+{
+	void *ret;
+	size_t copy_size = old_size;
+
+	if (!new_size) {
+		devm_kfree(dev, p);
+		return ZERO_SIZE_PTR;
+	}
+
+	if (new_size < old_size)
+		copy_size = new_size;
+
+	ret = devm_kzalloc(dev, new_size, flags);
+	if (!ret) {
+		MSM_BUS_ERR("%s: Error Reallocating memory", __func__);
+		goto exit_realloc_devmem;
+	}
+
+	memcpy(ret, p, copy_size);
+	devm_kfree(dev, p);
+exit_realloc_devmem:
+	return ret;
+}
+
+static void msm_bus_fab_init_noc_ops(struct msm_bus_node_device_type *bus_dev)
+{
+	switch (bus_dev->fabdev->bus_type) {
+	case MSM_BUS_NOC:
+		msm_bus_noc_set_ops(bus_dev);
+		break;
+	case MSM_BUS_BIMC:
+		msm_bus_bimc_set_ops(bus_dev);
+		break;
+	default:
+		MSM_BUS_ERR("%s: Invalid Bus type", __func__);
+	}
+}
+
+static int msm_bus_disable_node_qos_clk(struct msm_bus_node_device_type *node)
+{
+	struct msm_bus_node_device_type *bus_node = NULL;
+	int i;
+	int ret = 0;
+
+	if (!node || (!to_msm_bus_node(node->node_info->bus_device))) {
+		ret = -ENXIO;
+		goto exit_disable_node_qos_clk;
+	}
+
+	for (i = node->num_node_qos_clks - 1; i >= 0; i--)
+		ret = disable_nodeclk(&node->node_qos_clks[i]);
+
+	bus_node = to_msm_bus_node(node->node_info->bus_device);
+
+	for (i = bus_node->num_node_qos_clks - 1; i >= 0; i--)
+		ret = disable_nodeclk(&bus_node->node_qos_clks[i]);
+
+exit_disable_node_qos_clk:
+	return ret;
+}
+
+static int msm_bus_enable_node_qos_clk(struct msm_bus_node_device_type *node)
+{
+	struct msm_bus_node_device_type *bus_node = NULL;
+	int i;
+	int ret = 0;
+	long rounded_rate;
+
+	if (!node || (!to_msm_bus_node(node->node_info->bus_device))) {
+		ret = -ENXIO;
+		goto exit_enable_node_qos_clk;
+	}
+	bus_node = to_msm_bus_node(node->node_info->bus_device);
+
+	for (i = 0; i < node->num_node_qos_clks; i++) {
+		if (!node->node_qos_clks[i].enable_only_clk) {
+			rounded_rate =
+				clk_round_rate(
+					node->node_qos_clks[i].clk, 1);
+			ret = setrate_nodeclk(&node->node_qos_clks[i],
+								rounded_rate);
+			if (ret)
+				MSM_BUS_DBG("%s: Failed set rate clk,node %d\n",
+					__func__, node->node_info->id);
+		}
+		ret = enable_nodeclk(&node->node_qos_clks[i],
+					node->node_info->bus_device);
+		if (ret) {
+			MSM_BUS_DBG("%s: Failed to set Qos Clks ret %d\n",
+				__func__, ret);
+			msm_bus_disable_node_qos_clk(node);
+			goto exit_enable_node_qos_clk;
+		}
+
+	}
+
+	for (i = 0; i < bus_node->num_node_qos_clks; i++) {
+		if (!bus_node->node_qos_clks[i].enable_only_clk) {
+			rounded_rate =
+				clk_round_rate(
+					bus_node->node_qos_clks[i].clk, 1);
+			ret = setrate_nodeclk(&bus_node->node_qos_clks[i],
+								rounded_rate);
+			if (ret)
+				MSM_BUS_DBG("%s: Failed set rate clk,node %d\n",
+					__func__, node->node_info->id);
+		}
+		ret = enable_nodeclk(&bus_node->node_qos_clks[i],
+					node->node_info->bus_device);
+		if (ret) {
+			MSM_BUS_DBG("%s: Failed to set Qos Clks ret %d\n",
+				__func__, ret);
+			msm_bus_disable_node_qos_clk(node);
+			goto exit_enable_node_qos_clk;
+		}
+
+	}
+exit_enable_node_qos_clk:
+	return ret;
+}
+
+int msm_bus_enable_limiter(struct msm_bus_node_device_type *node_dev,
+				int enable, uint64_t lim_bw)
+{
+	int ret = 0;
+	struct msm_bus_node_device_type *bus_node_dev;
+
+	if (!node_dev) {
+		MSM_BUS_ERR("No device specified");
+		ret = -ENXIO;
+		goto exit_enable_limiter;
+	}
+
+	if (!node_dev->ap_owned) {
+		MSM_BUS_ERR("Device is not AP owned %d",
+						node_dev->node_info->id);
+		ret = -ENXIO;
+		goto exit_enable_limiter;
+	}
+
+	bus_node_dev = to_msm_bus_node(node_dev->node_info->bus_device);
+	if (!bus_node_dev) {
+		MSM_BUS_ERR("Unable to get bus device infofor %d",
+			node_dev->node_info->id);
+		ret = -ENXIO;
+		goto exit_enable_limiter;
+	}
+	if (bus_node_dev->fabdev &&
+		bus_node_dev->fabdev->noc_ops.limit_mport) {
+		if (ret < 0) {
+			MSM_BUS_ERR("Can't Enable QoS clk %d",
+				node_dev->node_info->id);
+			goto exit_enable_limiter;
+		}
+		bus_node_dev->fabdev->noc_ops.limit_mport(
+				node_dev,
+				bus_node_dev->fabdev->qos_base,
+				bus_node_dev->fabdev->base_offset,
+				bus_node_dev->fabdev->qos_off,
+				bus_node_dev->fabdev->qos_freq,
+				enable, lim_bw);
+	}
+
+exit_enable_limiter:
+	return ret;
+}
+
+static int msm_bus_dev_init_qos(struct device *dev, void *data)
+{
+	int ret = 0;
+	struct msm_bus_node_device_type *node_dev = NULL;
+
+	node_dev = to_msm_bus_node(dev);
+	if (!node_dev) {
+		MSM_BUS_ERR("%s: Unable to get node device info" , __func__);
+		ret = -ENXIO;
+		goto exit_init_qos;
+	}
+
+	MSM_BUS_DBG("Device = %d", node_dev->node_info->id);
+
+	if (node_dev->ap_owned) {
+		struct msm_bus_node_device_type *bus_node_info;
+
+		bus_node_info =
+			to_msm_bus_node(node_dev->node_info->bus_device);
+
+		if (!bus_node_info) {
+			MSM_BUS_ERR("%s: Unable to get bus device info for %d",
+				__func__,
+				node_dev->node_info->id);
+			ret = -ENXIO;
+			goto exit_init_qos;
+		}
+
+		if (bus_node_info->fabdev &&
+			bus_node_info->fabdev->noc_ops.qos_init) {
+			int ret = 0;
+
+			if (node_dev->ap_owned &&
+				(node_dev->node_info->qos_params.mode) != -1) {
+
+				if (bus_node_info->fabdev->bypass_qos_prg)
+					goto exit_init_qos;
+
+				ret = msm_bus_enable_node_qos_clk(node_dev);
+				if (ret < 0) {
+					MSM_BUS_DBG("Can't Enable QoS clk %d\n",
+					node_dev->node_info->id);
+					node_dev->node_info->defer_qos = true;
+					goto exit_init_qos;
+				}
+
+				bus_node_info->fabdev->noc_ops.qos_init(
+					node_dev,
+					bus_node_info->fabdev->qos_base,
+					bus_node_info->fabdev->base_offset,
+					bus_node_info->fabdev->qos_off,
+					bus_node_info->fabdev->qos_freq);
+				ret = msm_bus_disable_node_qos_clk(node_dev);
+				node_dev->node_info->defer_qos = false;
+			}
+		} else
+			MSM_BUS_ERR("%s: Skipping QOS init for %d",
+				__func__, node_dev->node_info->id);
+	}
+exit_init_qos:
+	return ret;
+}
+
+static int msm_bus_fabric_init(struct device *dev,
+			struct msm_bus_node_device_type *pdata)
+{
+	struct msm_bus_fab_device_type *fabdev;
+	struct msm_bus_node_device_type *node_dev = NULL;
+	int ret = 0;
+
+	node_dev = to_msm_bus_node(dev);
+	if (!node_dev) {
+		MSM_BUS_ERR("%s: Unable to get bus device info" , __func__);
+		ret = -ENXIO;
+		goto exit_fabric_init;
+	}
+
+	if (node_dev->node_info->virt_dev) {
+		MSM_BUS_ERR("%s: Skip Fab init for virtual device %d", __func__,
+						node_dev->node_info->id);
+		goto exit_fabric_init;
+	}
+
+	fabdev = devm_kzalloc(dev, sizeof(struct msm_bus_fab_device_type),
+								GFP_KERNEL);
+	if (!fabdev) {
+		MSM_BUS_ERR("Fabric alloc failed\n");
+		ret = -ENOMEM;
+		goto exit_fabric_init;
+	}
+
+	node_dev->fabdev = fabdev;
+	fabdev->pqos_base = pdata->fabdev->pqos_base;
+	fabdev->qos_range = pdata->fabdev->qos_range;
+	fabdev->base_offset = pdata->fabdev->base_offset;
+	fabdev->qos_off = pdata->fabdev->qos_off;
+	fabdev->qos_freq = pdata->fabdev->qos_freq;
+	fabdev->bus_type = pdata->fabdev->bus_type;
+	fabdev->bypass_qos_prg = pdata->fabdev->bypass_qos_prg;
+	msm_bus_fab_init_noc_ops(node_dev);
+
+	fabdev->qos_base = devm_ioremap(dev,
+				fabdev->pqos_base, fabdev->qos_range);
+	if (!fabdev->qos_base) {
+		MSM_BUS_ERR("%s: Error remapping address 0x%zx :bus device %d",
+			__func__,
+			 (size_t)fabdev->pqos_base, node_dev->node_info->id);
+		ret = -ENOMEM;
+		goto exit_fabric_init;
+	}
+
+exit_fabric_init:
+	return ret;
+}
+
+static int msm_bus_init_clk(struct device *bus_dev,
+				struct msm_bus_node_device_type *pdata)
+{
+	unsigned int ctx;
+	struct msm_bus_node_device_type *node_dev = to_msm_bus_node(bus_dev);
+	int i;
+
+	for (ctx = 0; ctx < NUM_CTX; ctx++) {
+		if (!IS_ERR_OR_NULL(pdata->clk[ctx].clk)) {
+			node_dev->clk[ctx].clk = pdata->clk[ctx].clk;
+			node_dev->clk[ctx].enable_only_clk =
+					pdata->clk[ctx].enable_only_clk;
+			node_dev->clk[ctx].setrate_only_clk =
+					pdata->clk[ctx].setrate_only_clk;
+			node_dev->clk[ctx].enable = false;
+			node_dev->clk[ctx].dirty = false;
+			strlcpy(node_dev->clk[ctx].reg_name,
+				pdata->clk[ctx].reg_name, MAX_REG_NAME);
+			node_dev->clk[ctx].reg = NULL;
+			bus_get_reg(&node_dev->clk[ctx], bus_dev);
+			MSM_BUS_DBG("%s: Valid node clk node %d ctx %d\n",
+				__func__, node_dev->node_info->id, ctx);
+		}
+	}
+
+	if (!IS_ERR_OR_NULL(pdata->bus_qos_clk.clk)) {
+		node_dev->bus_qos_clk.clk = pdata->bus_qos_clk.clk;
+		node_dev->bus_qos_clk.enable_only_clk =
+					pdata->bus_qos_clk.enable_only_clk;
+		node_dev->bus_qos_clk.setrate_only_clk =
+					pdata->bus_qos_clk.setrate_only_clk;
+		node_dev->bus_qos_clk.enable = false;
+		strlcpy(node_dev->bus_qos_clk.reg_name,
+			pdata->bus_qos_clk.reg_name, MAX_REG_NAME);
+		node_dev->bus_qos_clk.reg = NULL;
+		MSM_BUS_DBG("%s: Valid bus qos clk node %d\n", __func__,
+						node_dev->node_info->id);
+	}
+
+	if (pdata->num_node_qos_clks) {
+		node_dev->num_node_qos_clks = pdata->num_node_qos_clks;
+		node_dev->node_qos_clks = devm_kzalloc(bus_dev,
+			(node_dev->num_node_qos_clks * sizeof(struct nodeclk)),
+			GFP_KERNEL);
+		if (!node_dev->node_qos_clks) {
+			dev_err(bus_dev, "Failed to alloc memory for qos clk");
+			return -ENOMEM;
+		}
+
+		for (i = 0; i < pdata->num_node_qos_clks; i++) {
+			node_dev->node_qos_clks[i].clk =
+					pdata->node_qos_clks[i].clk;
+			node_dev->node_qos_clks[i].enable_only_clk =
+					pdata->node_qos_clks[i].enable_only_clk;
+			node_dev->node_qos_clks[i].setrate_only_clk =
+				pdata->node_qos_clks[i].setrate_only_clk;
+			node_dev->node_qos_clks[i].enable = false;
+			strlcpy(node_dev->node_qos_clks[i].reg_name,
+				pdata->node_qos_clks[i].reg_name, MAX_REG_NAME);
+			node_dev->node_qos_clks[i].reg = NULL;
+			MSM_BUS_DBG("%s: Valid qos clk[%d] node %d %d Reg%s\n",
+					__func__, i,
+					node_dev->node_info->id,
+					node_dev->num_node_qos_clks,
+					node_dev->node_qos_clks[i].reg_name);
+		}
+	}
+
+	return 0;
+}
+
+static int msm_bus_copy_node_info(struct msm_bus_node_device_type *pdata,
+				struct device *bus_dev)
+{
+	int ret = 0;
+	struct msm_bus_node_info_type *node_info = NULL;
+	struct msm_bus_node_info_type *pdata_node_info = NULL;
+	struct msm_bus_node_device_type *bus_node = NULL;
+
+	bus_node = to_msm_bus_node(bus_dev);
+
+	if (!bus_node || !pdata) {
+		ret = -ENXIO;
+		MSM_BUS_ERR("%s: Invalid pointers pdata %p, bus_node %p",
+			__func__, pdata, bus_node);
+		goto exit_copy_node_info;
+	}
+
+	node_info = bus_node->node_info;
+	pdata_node_info = pdata->node_info;
+
+	node_info->name = pdata_node_info->name;
+	node_info->id =  pdata_node_info->id;
+	node_info->bus_device_id = pdata_node_info->bus_device_id;
+	node_info->mas_rpm_id = pdata_node_info->mas_rpm_id;
+	node_info->slv_rpm_id = pdata_node_info->slv_rpm_id;
+	node_info->num_connections = pdata_node_info->num_connections;
+	node_info->num_blist = pdata_node_info->num_blist;
+	node_info->num_qports = pdata_node_info->num_qports;
+	node_info->virt_dev = pdata_node_info->virt_dev;
+	node_info->is_fab_dev = pdata_node_info->is_fab_dev;
+	node_info->qos_params.mode = pdata_node_info->qos_params.mode;
+	node_info->qos_params.prio1 = pdata_node_info->qos_params.prio1;
+	node_info->qos_params.prio0 = pdata_node_info->qos_params.prio0;
+	node_info->qos_params.reg_prio1 = pdata_node_info->qos_params.reg_prio1;
+	node_info->qos_params.reg_prio0 = pdata_node_info->qos_params.reg_prio0;
+	node_info->qos_params.prio_lvl = pdata_node_info->qos_params.prio_lvl;
+	node_info->qos_params.prio_rd = pdata_node_info->qos_params.prio_rd;
+	node_info->qos_params.prio_wr = pdata_node_info->qos_params.prio_wr;
+	node_info->qos_params.gp = pdata_node_info->qos_params.gp;
+	node_info->qos_params.thmp = pdata_node_info->qos_params.thmp;
+	node_info->qos_params.ws = pdata_node_info->qos_params.ws;
+	node_info->qos_params.bw_buffer = pdata_node_info->qos_params.bw_buffer;
+	node_info->agg_params.buswidth = pdata_node_info->agg_params.buswidth;
+	node_info->agg_params.agg_scheme =
+					pdata_node_info->agg_params.agg_scheme;
+	node_info->agg_params.vrail_comp =
+					pdata_node_info->agg_params.vrail_comp;
+	node_info->agg_params.num_aggports =
+				pdata_node_info->agg_params.num_aggports;
+	node_info->agg_params.num_util_levels =
+				pdata_node_info->agg_params.num_util_levels;
+	node_info->agg_params.util_levels = devm_kzalloc(bus_dev,
+			sizeof(struct node_util_levels_type) *
+			node_info->agg_params.num_util_levels,
+			GFP_KERNEL);
+	if (!node_info->agg_params.util_levels) {
+		MSM_BUS_ERR("%s: Agg util level alloc failed\n", __func__);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+	memcpy(node_info->agg_params.util_levels,
+		pdata_node_info->agg_params.util_levels,
+		sizeof(struct node_util_levels_type) *
+			pdata_node_info->agg_params.num_util_levels);
+
+	node_info->dev_connections = devm_kzalloc(bus_dev,
+			sizeof(struct device *) *
+				pdata_node_info->num_connections,
+			GFP_KERNEL);
+	if (!node_info->dev_connections) {
+		MSM_BUS_ERR("%s:Bus dev connections alloc failed\n", __func__);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	node_info->connections = devm_kzalloc(bus_dev,
+			sizeof(int) * pdata_node_info->num_connections,
+			GFP_KERNEL);
+	if (!node_info->connections) {
+		MSM_BUS_ERR("%s:Bus connections alloc failed\n", __func__);
+		devm_kfree(bus_dev, node_info->dev_connections);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	memcpy(node_info->connections,
+		pdata_node_info->connections,
+		sizeof(int) * pdata_node_info->num_connections);
+
+	node_info->black_connections = devm_kzalloc(bus_dev,
+			sizeof(struct device *) *
+				pdata_node_info->num_blist,
+			GFP_KERNEL);
+	if (!node_info->black_connections) {
+		MSM_BUS_ERR("%s: Bus black connections alloc failed\n",
+			__func__);
+		devm_kfree(bus_dev, node_info->dev_connections);
+		devm_kfree(bus_dev, node_info->connections);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	node_info->black_listed_connections = devm_kzalloc(bus_dev,
+			pdata_node_info->num_blist * sizeof(int),
+			GFP_KERNEL);
+	if (!node_info->black_listed_connections) {
+		MSM_BUS_ERR("%s:Bus black list connections alloc failed\n",
+					__func__);
+		devm_kfree(bus_dev, node_info->black_connections);
+		devm_kfree(bus_dev, node_info->dev_connections);
+		devm_kfree(bus_dev, node_info->connections);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	memcpy(node_info->black_listed_connections,
+		pdata_node_info->black_listed_connections,
+		sizeof(int) * pdata_node_info->num_blist);
+
+	node_info->qport = devm_kzalloc(bus_dev,
+			sizeof(int) * pdata_node_info->num_qports,
+			GFP_KERNEL);
+	if (!node_info->qport) {
+		MSM_BUS_ERR("%s:Bus qport allocation failed\n", __func__);
+		devm_kfree(bus_dev, node_info->dev_connections);
+		devm_kfree(bus_dev, node_info->connections);
+		devm_kfree(bus_dev, node_info->black_listed_connections);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	memcpy(node_info->qport,
+		pdata_node_info->qport,
+		sizeof(int) * pdata_node_info->num_qports);
+
+exit_copy_node_info:
+	return ret;
+}
+
+static struct device *msm_bus_device_init(
+			struct msm_bus_node_device_type *pdata)
+{
+	struct device *bus_dev = NULL;
+	struct msm_bus_node_device_type *bus_node = NULL;
+	struct msm_bus_node_info_type *node_info = NULL;
+	int ret = 0;
+
+	/**
+	* Init here so we can use devm calls
+	*/
+
+	bus_node = kzalloc(sizeof(struct msm_bus_node_device_type), GFP_KERNEL);
+	if (!bus_node) {
+		ret = -ENOMEM;
+		goto err_device_init;
+	}
+	bus_dev = &bus_node->dev;
+	device_initialize(bus_dev);
+
+	node_info = devm_kzalloc(bus_dev,
+			sizeof(struct msm_bus_node_info_type), GFP_KERNEL);
+	if (!node_info) {
+		ret = -ENOMEM;
+		goto err_put_device;
+	}
+
+	bus_node->node_info = node_info;
+	bus_node->ap_owned = pdata->ap_owned;
+	bus_dev->of_node = pdata->of_node;
+
+	ret = msm_bus_copy_node_info(pdata, bus_dev);
+	if (ret)
+		goto err_put_device;
+
+	bus_dev->bus = &msm_bus_type;
+	dev_set_name(bus_dev, bus_node->node_info->name);
+
+	ret = device_add(bus_dev);
+	if (ret) {
+		MSM_BUS_ERR("%s: Error registering device %d",
+				__func__, pdata->node_info->id);
+		goto err_put_device;
+	}
+	device_create_file(bus_dev, &dev_attr_bw);
+	INIT_LIST_HEAD(&bus_node->devlist);
+	return bus_dev;
+
+err_put_device:
+	put_device(bus_dev);
+	bus_dev = NULL;
+	kfree(bus_node);
+err_device_init:
+	return ERR_PTR(ret);
+}
+
+static int msm_bus_setup_dev_conn(struct device *bus_dev, void *data)
+{
+	struct msm_bus_node_device_type *bus_node = NULL;
+	int ret = 0;
+	int j;
+	struct msm_bus_node_device_type *fab;
+
+	bus_node = to_msm_bus_node(bus_dev);
+	if (!bus_node) {
+		MSM_BUS_ERR("%s: Can't get device info", __func__);
+		ret = -ENODEV;
+		goto exit_setup_dev_conn;
+	}
+
+	/* Setup parent bus device for this node */
+	if (!bus_node->node_info->is_fab_dev) {
+		struct device *bus_parent_device =
+			bus_find_device(&msm_bus_type, NULL,
+				(void *)&bus_node->node_info->bus_device_id,
+				msm_bus_device_match_adhoc);
+
+		if (!bus_parent_device) {
+			MSM_BUS_ERR("%s: Error finding parentdev %d parent %d",
+				__func__,
+				bus_node->node_info->id,
+				bus_node->node_info->bus_device_id);
+			ret = -ENXIO;
+			goto exit_setup_dev_conn;
+		}
+		bus_node->node_info->bus_device = bus_parent_device;
+		fab = to_msm_bus_node(bus_parent_device);
+		list_add_tail(&bus_node->dev_link, &fab->devlist);
+	}
+
+	bus_node->node_info->is_traversed = false;
+
+	for (j = 0; j < bus_node->node_info->num_connections; j++) {
+		bus_node->node_info->dev_connections[j] =
+			bus_find_device(&msm_bus_type, NULL,
+				(void *)&bus_node->node_info->connections[j],
+				msm_bus_device_match_adhoc);
+
+		if (!bus_node->node_info->dev_connections[j]) {
+			MSM_BUS_ERR("%s: Error finding conn %d for device %d",
+				__func__, bus_node->node_info->connections[j],
+				 bus_node->node_info->id);
+			ret = -ENODEV;
+			goto exit_setup_dev_conn;
+		}
+	}
+
+	for (j = 0; j < bus_node->node_info->num_blist; j++) {
+		bus_node->node_info->black_connections[j] =
+			bus_find_device(&msm_bus_type, NULL,
+				(void *)&bus_node->node_info->
+				black_listed_connections[j],
+				msm_bus_device_match_adhoc);
+
+		if (!bus_node->node_info->black_connections[j]) {
+			MSM_BUS_ERR("%s: Error finding conn %d for device %d\n",
+				__func__, bus_node->node_info->
+				black_listed_connections[j],
+				bus_node->node_info->id);
+			ret = -ENODEV;
+			goto exit_setup_dev_conn;
+		}
+	}
+
+exit_setup_dev_conn:
+	return ret;
+}
+
+static int msm_bus_node_debug(struct device *bus_dev, void *data)
+{
+	int j;
+	int ret = 0;
+	struct msm_bus_node_device_type *bus_node = NULL;
+
+	bus_node = to_msm_bus_node(bus_dev);
+	if (!bus_node) {
+		MSM_BUS_ERR("%s: Can't get device info", __func__);
+		ret = -ENODEV;
+		goto exit_node_debug;
+	}
+
+	MSM_BUS_DBG("Device = %d buswidth %u", bus_node->node_info->id,
+				bus_node->node_info->agg_params.buswidth);
+	for (j = 0; j < bus_node->node_info->num_connections; j++) {
+		struct msm_bus_node_device_type *bdev =
+		to_msm_bus_node(bus_node->node_info->dev_connections[j]);
+		MSM_BUS_DBG("\n\t Connection[%d] %d", j, bdev->node_info->id);
+	}
+
+	if (bus_node->node_info->is_fab_dev)
+		msm_bus_floor_init(bus_dev);
+
+exit_node_debug:
+	return ret;
+}
+
+static int msm_bus_free_dev(struct device *dev, void *data)
+{
+	struct msm_bus_node_device_type *bus_node = NULL;
+
+	bus_node = to_msm_bus_node(dev);
+
+	if (bus_node)
+		MSM_BUS_ERR("\n%s: Removing device %d", __func__,
+						bus_node->node_info->id);
+	device_unregister(dev);
+	kfree(bus_node);
+	return 0;
+}
+
+int msm_bus_device_remove(struct platform_device *pdev)
+{
+	bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_free_dev);
+	return 0;
+}
+
+/**
+ * msm_bus_panic_callback() - panic notification callback function.
+ *              This function is invoked when a kernel panic occurs.
+ * @nfb:        Notifier block pointer
+ * @event:      Value passed unmodified to notifier function
+ * @data:       Pointer passed unmodified to notifier function
+ *
+ * Return: NOTIFY_OK
+ */
+static int msm_bus_panic_callback(struct notifier_block *nfb,
+					unsigned long event, void *data)
+{
+	struct msm_bus_node_device_type *bus_node = NULL;
+	unsigned int ctx;
+
+	list_for_each_entry(bus_node, &fabdev_list, dev_link) {
+		for (ctx = 0; ctx < NUM_CTX; ctx++) {
+			if (bus_node->node_bw[ctx].max_ib_cl_name &&
+				bus_node->node_bw[ctx].max_ib) {
+				pr_debug("%s: %s: %s max_ib: %llu: client-name: %s\n",
+				__func__, bus_node->node_info->name,
+				((ctx == ACTIVE_CTX) ? "active" : "sleep"),
+				bus_node->node_bw[ctx].max_ib,
+				bus_node->node_bw[ctx].max_ib_cl_name);
+			}
+		}
+
+		for (ctx = 0; ctx < NUM_CTX; ctx++) {
+			if (bus_node->node_bw[ctx].max_ab_cl_name &&
+				bus_node->node_bw[ctx].max_ab) {
+				pr_debug("%s: %s: %s max_ab: %llu: client-name: %s\n",
+				__func__, bus_node->node_info->name,
+				((ctx == ACTIVE_CTX) ? "active" : "sleep"),
+				bus_node->node_bw[ctx].max_ab,
+				bus_node->node_bw[ctx].max_ab_cl_name);
+			}
+		}
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block msm_bus_panic_notifier = {
+	.notifier_call = msm_bus_panic_callback,
+	.priority = 1,
+};
+
+static int msm_bus_device_probe(struct platform_device *pdev)
+{
+	unsigned int i, ret;
+	struct msm_bus_device_node_registration *pdata;
+
+	/* If possible, get pdata from device-tree */
+	if (pdev->dev.of_node)
+		pdata = msm_bus_of_to_pdata(pdev);
+	else {
+		pdata = (struct msm_bus_device_node_registration *)pdev->
+			dev.platform_data;
+	}
+
+	if (IS_ERR_OR_NULL(pdata)) {
+		MSM_BUS_ERR("No platform data found");
+		ret = -ENODATA;
+		goto exit_device_probe;
+	}
+
+	for (i = 0; i < pdata->num_devices; i++) {
+		struct device *node_dev = NULL;
+		struct msm_bus_node_device_type *bus_node = NULL;
+
+		node_dev = msm_bus_device_init(&pdata->info[i]);
+
+		if (IS_ERR(node_dev)) {
+			MSM_BUS_ERR("%s: Error during dev init for %d",
+				__func__, pdata->info[i].node_info->id);
+			ret = PTR_ERR(node_dev);
+			goto exit_device_probe;
+		}
+
+		ret = msm_bus_init_clk(node_dev, &pdata->info[i]);
+		if (ret) {
+			MSM_BUS_ERR("\n Failed to init bus clk. ret %d", ret);
+			msm_bus_device_remove(pdev);
+			goto exit_device_probe;
+		}
+		/*Is this a fabric device ?*/
+		if (pdata->info[i].node_info->is_fab_dev) {
+			MSM_BUS_DBG("%s: %d is a fab", __func__,
+						pdata->info[i].node_info->id);
+			ret = msm_bus_fabric_init(node_dev, &pdata->info[i]);
+			if (ret) {
+				MSM_BUS_ERR("%s: Error intializing fab %d",
+					__func__, pdata->info[i].node_info->id);
+				goto exit_device_probe;
+			}
+
+			bus_node = to_msm_bus_node(node_dev);
+			list_add_tail(&bus_node->dev_link, &fabdev_list);
+		}
+	}
+
+	ret = bus_for_each_dev(&msm_bus_type, NULL, NULL,
+						msm_bus_setup_dev_conn);
+	if (ret) {
+		MSM_BUS_ERR("%s: Error setting up dev connections", __func__);
+		goto exit_device_probe;
+	}
+
+	/*
+	 * Setup the QoS for the nodes, don't check the error codes as we
+	 * defer QoS programming to the first transaction in cases of failure
+	 * and we want to continue the probe.
+	 */
+	ret = bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_dev_init_qos);
+
+	/* Register the arb layer ops */
+	msm_bus_arb_setops_adhoc(&arb_ops);
+	bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_node_debug);
+
+	atomic_notifier_chain_register(&panic_notifier_list,
+						&msm_bus_panic_notifier);
+
+	devm_kfree(&pdev->dev, pdata->info);
+	devm_kfree(&pdev->dev, pdata);
+exit_device_probe:
+	return ret;
+}
+
+static int msm_bus_device_rules_probe(struct platform_device *pdev)
+{
+	struct bus_rule_type *rule_data = NULL;
+	int num_rules = 0;
+
+	num_rules = msm_bus_of_get_static_rules(pdev, &rule_data);
+
+	if (!rule_data)
+		goto exit_rules_probe;
+
+	msm_rule_register(num_rules, rule_data, NULL);
+	static_rules.num_rules = num_rules;
+	static_rules.rules = rule_data;
+	pdev->dev.platform_data = &static_rules;
+
+exit_rules_probe:
+	return 0;
+}
+
+int msm_bus_device_rules_remove(struct platform_device *pdev)
+{
+	struct static_rules_type *static_rules = NULL;
+
+	static_rules = pdev->dev.platform_data;
+	if (static_rules)
+		msm_rule_unregister(static_rules->num_rules,
+					static_rules->rules, NULL);
+	return 0;
+}
+
+
+static struct of_device_id rules_match[] = {
+	{.compatible = "qcom,msm-bus-static-bw-rules"},
+	{}
+};
+
+static struct platform_driver msm_bus_rules_driver = {
+	.probe = msm_bus_device_rules_probe,
+	.remove = msm_bus_device_rules_remove,
+	.driver = {
+		.name = "msm_bus_rules_device",
+		.owner = THIS_MODULE,
+		.of_match_table = rules_match,
+	},
+};
+
+static struct of_device_id fabric_match[] = {
+	{.compatible = "qcom,msm-bus-device"},
+	{}
+};
+
+static struct platform_driver msm_bus_device_driver = {
+	.probe = msm_bus_device_probe,
+	.remove = msm_bus_device_remove,
+	.driver = {
+		.name = "msm_bus_device",
+		.owner = THIS_MODULE,
+		.of_match_table = fabric_match,
+	},
+};
+
+int __init msm_bus_device_init_driver(void)
+{
+	int rc;
+
+	MSM_BUS_ERR("msm_bus_fabric_init_driver\n");
+	rc =  platform_driver_register(&msm_bus_device_driver);
+
+	if (rc) {
+		MSM_BUS_ERR("Failed to register bus device driver");
+		return rc;
+	}
+	return platform_driver_register(&msm_bus_rules_driver);
+}
+subsys_initcall(msm_bus_device_init_driver);
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./msm_bus_noc_adhoc.c linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/msm_bus_noc_adhoc.c
--- linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./msm_bus_noc_adhoc.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/msm_bus_noc_adhoc.c	2019-01-22 16:16:26.659274986 +0100
@@ -0,0 +1,590 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "AXI: NOC: %s(): " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/msm-bus-board.h>
+#include "msm_bus_core.h"
+#include "msm_bus_noc.h"
+#include "msm_bus_adhoc.h"
+
+/* NOC_QOS generic */
+#define __CLZ(x) ((8 * sizeof(uint32_t)) - 1 - __fls(x))
+#define SAT_SCALE 16	/* 16 bytes minimum for saturation */
+#define BW_SCALE  256	/* 1/256 byte per cycle unit */
+#define QOS_DEFAULT_BASEOFFSET		0x00003000
+#define QOS_DEFAULT_DELTA		0x80
+#define MAX_BW_FIELD (NOC_QOS_BWn_BW_BMSK >> NOC_QOS_BWn_BW_SHFT)
+#define MAX_SAT_FIELD (NOC_QOS_SATn_SAT_BMSK >> NOC_QOS_SATn_SAT_SHFT)
+#define MIN_SAT_FIELD	1
+#define MIN_BW_FIELD	1
+
+#define NOC_QOS_REG_BASE(b, o)		((b) + (o))
+
+#define NOC_QOS_PRIORITYn_ADDR(b, o, n, d)	\
+	(NOC_QOS_REG_BASE(b, o) + 0x8 + (d) * (n))
+enum noc_qos_id_priorityn {
+	NOC_QOS_PRIORITYn_RMSK		= 0x0000000f,
+	NOC_QOS_PRIORITYn_MAXn		= 32,
+	NOC_QOS_PRIORITYn_P1_BMSK	= 0xc,
+	NOC_QOS_PRIORITYn_P1_SHFT	= 0x2,
+	NOC_QOS_PRIORITYn_P0_BMSK	= 0x3,
+	NOC_QOS_PRIORITYn_P0_SHFT	= 0x0,
+};
+
+#define NOC_QOS_MODEn_ADDR(b, o, n, d) \
+	(NOC_QOS_REG_BASE(b, o) + 0xC + (d) * (n))
+enum noc_qos_id_moden_rmsk {
+	NOC_QOS_MODEn_RMSK		= 0x00000003,
+	NOC_QOS_MODEn_MAXn		= 32,
+	NOC_QOS_MODEn_MODE_BMSK		= 0x3,
+	NOC_QOS_MODEn_MODE_SHFT		= 0x0,
+};
+
+#define NOC_QOS_BWn_ADDR(b, o, n, d) \
+	(NOC_QOS_REG_BASE(b, o) + 0x10 + (d) * (n))
+enum noc_qos_id_bwn {
+	NOC_QOS_BWn_RMSK		= 0x0000ffff,
+	NOC_QOS_BWn_MAXn		= 32,
+	NOC_QOS_BWn_BW_BMSK		= 0xffff,
+	NOC_QOS_BWn_BW_SHFT		= 0x0,
+};
+
+/* QOS Saturation registers */
+#define NOC_QOS_SATn_ADDR(b, o, n, d) \
+	(NOC_QOS_REG_BASE(b, o) + 0x14 + (d) * (n))
+enum noc_qos_id_saturationn {
+	NOC_QOS_SATn_RMSK		= 0x000003ff,
+	NOC_QOS_SATn_MAXn		= 32,
+	NOC_QOS_SATn_SAT_BMSK		= 0x3ff,
+	NOC_QOS_SATn_SAT_SHFT		= 0x0,
+};
+
+static int noc_div(uint64_t *a, uint32_t b)
+{
+	if ((*a > 0) && (*a < b)) {
+		*a = 0;
+		return 1;
+	} else {
+		return do_div(*a, b);
+	}
+}
+
+/**
+ * Calculates bw hardware is using from register values
+ * bw returned is in bytes/sec
+ */
+static uint64_t noc_bw(uint32_t bw_field, uint32_t qos_freq)
+{
+	uint64_t res;
+	uint32_t rem, scale;
+
+	res = 2 * qos_freq * bw_field;
+	scale = BW_SCALE * 1000;
+	rem = noc_div(&res, scale);
+	MSM_BUS_DBG("NOC: Calculated bw: %llu\n", res * 1000000ULL);
+	return res * 1000000ULL;
+}
+
+/**
+ * Calculate the max BW in Bytes/s for a given time-base.
+ */
+static uint32_t noc_bw_ceil(long int bw_field, uint32_t qos_freq_khz)
+{
+	uint64_t bw_temp = 2 * qos_freq_khz * bw_field;
+	uint32_t scale = 1000 * BW_SCALE;
+
+	noc_div(&bw_temp, scale);
+	return bw_temp * 1000000;
+}
+#define MAX_BW(timebase) noc_bw_ceil(MAX_BW_FIELD, (timebase))
+
+/**
+ * Calculates ws hardware is using from register values
+ * ws returned is in nanoseconds
+ */
+static uint32_t noc_ws(uint64_t bw, uint32_t sat, uint32_t qos_freq)
+{
+	if (bw && qos_freq) {
+		uint32_t bwf = bw * qos_freq;
+		uint64_t scale = 1000000000000LL * BW_SCALE *
+			SAT_SCALE * sat;
+		noc_div(&scale, bwf);
+		MSM_BUS_DBG("NOC: Calculated ws: %llu\n", scale);
+		return scale;
+	}
+
+	return 0;
+}
+#define MAX_WS(bw, timebase) noc_ws((bw), MAX_SAT_FIELD, (timebase))
+
+/* Calculate bandwidth field value for requested bandwidth  */
+static uint32_t noc_bw_field(uint64_t bw_bps, uint32_t qos_freq_khz)
+{
+	uint32_t bw_field = 0;
+
+	if (bw_bps) {
+		uint32_t rem;
+		uint64_t bw_capped = min_t(uint64_t, bw_bps,
+						MAX_BW(qos_freq_khz));
+		uint64_t bwc = bw_capped * BW_SCALE;
+		uint64_t qf = 2 * qos_freq_khz * 1000;
+
+		rem = noc_div(&bwc, qf);
+		bw_field = (uint32_t)max_t(unsigned long, bwc, MIN_BW_FIELD);
+		bw_field = (uint32_t)min_t(unsigned long, bw_field,
+								MAX_BW_FIELD);
+	}
+
+	MSM_BUS_DBG("NOC: bw_field: %u\n", bw_field);
+	return bw_field;
+}
+
+static uint32_t noc_sat_field(uint64_t bw, uint32_t ws, uint32_t qos_freq)
+{
+	uint32_t sat_field = 0;
+
+	if (bw) {
+		/* Limit to max bw and scale bw to 100 KB increments */
+		uint64_t tbw, tscale;
+		uint64_t bw_scaled = min_t(uint64_t, bw, MAX_BW(qos_freq));
+		uint32_t rem = noc_div(&bw_scaled, 100000);
+
+		/**
+			SATURATION =
+			(BW [MBps] * integration window [us] *
+				time base frequency [MHz]) / (256 * 16)
+		 */
+		tbw = bw_scaled * ws * qos_freq;
+		tscale = BW_SCALE * SAT_SCALE * 1000000LL;
+		rem = noc_div(&tbw, tscale);
+		sat_field = (uint32_t)max_t(unsigned long, tbw, MIN_SAT_FIELD);
+		sat_field = (uint32_t)min_t(unsigned long, sat_field,
+							MAX_SAT_FIELD);
+	}
+
+	MSM_BUS_DBG("NOC: sat_field: %d\n", sat_field);
+	return sat_field;
+}
+
+static void noc_set_qos_mode(void __iomem *base, uint32_t qos_off,
+		uint32_t mport, uint32_t qos_delta, uint8_t mode,
+		uint8_t perm_mode)
+{
+	if (mode < NOC_QOS_MODE_MAX &&
+		((1 << mode) & perm_mode)) {
+		uint32_t reg_val;
+
+		reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off,
+			mport, qos_delta)) & NOC_QOS_MODEn_RMSK;
+		writel_relaxed(((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK))) |
+			(mode & NOC_QOS_MODEn_MODE_BMSK)),
+			NOC_QOS_MODEn_ADDR(base, qos_off, mport, qos_delta));
+	}
+	/* Ensure qos mode is set before exiting */
+	wmb();
+}
+
+static void noc_set_qos_priority(void __iomem *base, uint32_t qos_off,
+		uint32_t mport, uint32_t qos_delta,
+		struct msm_bus_noc_qos_priority *priority)
+{
+	uint32_t reg_val, val;
+
+	reg_val = readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport,
+		qos_delta)) & NOC_QOS_PRIORITYn_RMSK;
+	val = priority->p1 << NOC_QOS_PRIORITYn_P1_SHFT;
+	writel_relaxed(((reg_val & (~(NOC_QOS_PRIORITYn_P1_BMSK))) |
+		(val & NOC_QOS_PRIORITYn_P1_BMSK)),
+		NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport, qos_delta));
+
+	reg_val = readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport,
+								qos_delta))
+		& NOC_QOS_PRIORITYn_RMSK;
+	writel_relaxed(((reg_val & (~(NOC_QOS_PRIORITYn_P0_BMSK))) |
+		(priority->p0 & NOC_QOS_PRIORITYn_P0_BMSK)),
+		NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport, qos_delta));
+	/* Ensure qos priority is set before exiting */
+	wmb();
+}
+
+static void msm_bus_noc_set_qos_bw(void __iomem *base, uint32_t qos_off,
+		uint32_t qos_freq, uint32_t mport, uint32_t qos_delta,
+		uint8_t perm_mode, struct msm_bus_noc_qos_bw *qbw)
+{
+	uint32_t reg_val, val, mode;
+
+	if (!qos_freq) {
+		MSM_BUS_DBG("Zero QoS Freq\n");
+		return;
+	}
+
+	/* If Limiter or Regulator modes are not supported, bw not available*/
+	if (perm_mode & (NOC_QOS_PERM_MODE_LIMITER |
+		NOC_QOS_PERM_MODE_REGULATOR)) {
+		uint32_t bw_val = noc_bw_field(qbw->bw, qos_freq);
+		uint32_t sat_val = noc_sat_field(qbw->bw, qbw->ws,
+			qos_freq);
+
+		MSM_BUS_DBG("NOC: BW: perm_mode: %d bw_val: %d, sat_val: %d\n",
+			perm_mode, bw_val, sat_val);
+		/*
+		 * If in Limiter/Regulator mode, first go to fixed mode.
+		 * Clear QoS accumulator
+		 **/
+		mode = readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off,
+			mport, qos_delta)) & NOC_QOS_MODEn_MODE_BMSK;
+		if (mode == NOC_QOS_MODE_REGULATOR || mode ==
+			NOC_QOS_MODE_LIMITER) {
+			reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(
+				base, qos_off, mport, qos_delta));
+			val = NOC_QOS_MODE_FIXED;
+			writel_relaxed((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK)))
+				| (val & NOC_QOS_MODEn_MODE_BMSK),
+				NOC_QOS_MODEn_ADDR(base, qos_off, mport,
+								qos_delta));
+		}
+
+		reg_val = readl_relaxed(NOC_QOS_BWn_ADDR(base, qos_off, mport,
+								qos_delta));
+		val = bw_val << NOC_QOS_BWn_BW_SHFT;
+		writel_relaxed(((reg_val & (~(NOC_QOS_BWn_BW_BMSK))) |
+			(val & NOC_QOS_BWn_BW_BMSK)),
+			NOC_QOS_BWn_ADDR(base, qos_off, mport, qos_delta));
+
+		MSM_BUS_DBG("NOC: BW: Wrote value: 0x%x\n", ((reg_val &
+			(~NOC_QOS_BWn_BW_BMSK)) | (val &
+			NOC_QOS_BWn_BW_BMSK)));
+
+		reg_val = readl_relaxed(NOC_QOS_SATn_ADDR(base, qos_off,
+			mport, qos_delta));
+		val = sat_val << NOC_QOS_SATn_SAT_SHFT;
+		writel_relaxed(((reg_val & (~(NOC_QOS_SATn_SAT_BMSK))) |
+			(val & NOC_QOS_SATn_SAT_BMSK)),
+			NOC_QOS_SATn_ADDR(base, qos_off, mport, qos_delta));
+
+		MSM_BUS_DBG("NOC: SAT: Wrote value: 0x%x\n", ((reg_val &
+			(~NOC_QOS_SATn_SAT_BMSK)) | (val &
+			NOC_QOS_SATn_SAT_BMSK)));
+
+		/* Set mode back to what it was initially */
+		reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off,
+			mport, qos_delta));
+		writel_relaxed((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK)))
+			| (mode & NOC_QOS_MODEn_MODE_BMSK),
+			NOC_QOS_MODEn_ADDR(base, qos_off, mport, qos_delta));
+		/* Ensure that all writes for bandwidth registers have
+		 * completed before returning
+		 */
+		wmb();
+	}
+}
+
+uint8_t msm_bus_noc_get_qos_mode(void __iomem *base, uint32_t qos_off,
+	uint32_t mport, uint32_t qos_delta, uint32_t mode, uint32_t perm_mode)
+{
+	if (NOC_QOS_MODES_ALL_PERM == perm_mode)
+		return readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off,
+			mport, qos_delta)) & NOC_QOS_MODEn_MODE_BMSK;
+	else
+		return 31 - __CLZ(mode &
+			NOC_QOS_MODES_ALL_PERM);
+}
+
+void msm_bus_noc_get_qos_priority(void __iomem *base, uint32_t qos_off,
+	uint32_t mport, uint32_t qos_delta,
+	struct msm_bus_noc_qos_priority *priority)
+{
+	priority->p1 = (readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off,
+		mport, qos_delta)) & NOC_QOS_PRIORITYn_P1_BMSK) >>
+		NOC_QOS_PRIORITYn_P1_SHFT;
+
+	priority->p0 = (readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off,
+		mport, qos_delta)) & NOC_QOS_PRIORITYn_P0_BMSK) >>
+		NOC_QOS_PRIORITYn_P0_SHFT;
+}
+
+void msm_bus_noc_get_qos_bw(void __iomem *base, uint32_t qos_off,
+	uint32_t qos_freq,
+	uint32_t mport, uint32_t qos_delta, uint8_t perm_mode,
+	struct msm_bus_noc_qos_bw *qbw)
+{
+	if (perm_mode & (NOC_QOS_PERM_MODE_LIMITER |
+		NOC_QOS_PERM_MODE_REGULATOR)) {
+		uint32_t bw_val = readl_relaxed(NOC_QOS_BWn_ADDR(
+			base, qos_off, mport, qos_delta)) & NOC_QOS_BWn_BW_BMSK;
+		uint32_t sat = readl_relaxed(NOC_QOS_SATn_ADDR(
+			base, qos_off, mport, qos_delta))
+						& NOC_QOS_SATn_SAT_BMSK;
+
+		qbw->bw = noc_bw(bw_val, qos_freq);
+		qbw->ws = noc_ws(qbw->bw, sat, qos_freq);
+	} else {
+		qbw->bw = 0;
+		qbw->ws = 0;
+	}
+}
+
+static bool msm_bus_noc_update_bw_reg(int mode)
+{
+	bool ret = false;
+
+	if ((mode == NOC_QOS_MODE_LIMITER) ||
+			(mode == NOC_QOS_MODE_REGULATOR))
+			ret = true;
+
+	return ret;
+}
+
+static int msm_bus_noc_qos_init(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base,
+				uint32_t qos_off, uint32_t qos_delta,
+				uint32_t qos_freq)
+{
+	struct msm_bus_noc_qos_priority prio;
+	int ret = 0;
+	int i;
+
+	prio.p1 = info->node_info->qos_params.prio1;
+	prio.p0 = info->node_info->qos_params.prio0;
+
+	if (!info->node_info->qport) {
+		MSM_BUS_DBG("No QoS Ports to init\n");
+		ret = 0;
+		goto err_qos_init;
+	}
+
+	for (i = 0; i < info->node_info->num_qports; i++) {
+		if (info->node_info->qos_params.mode != NOC_QOS_MODE_BYPASS) {
+			noc_set_qos_priority(qos_base, qos_off,
+					info->node_info->qport[i], qos_delta,
+					&prio);
+
+			if (info->node_info->qos_params.mode !=
+							NOC_QOS_MODE_FIXED) {
+				struct msm_bus_noc_qos_bw qbw;
+
+				qbw.ws = info->node_info->qos_params.ws;
+				qbw.bw = 0;
+				msm_bus_noc_set_qos_bw(qos_base, qos_off,
+					qos_freq,
+					info->node_info->qport[i],
+					qos_delta,
+					info->node_info->qos_params.mode,
+					&qbw);
+			}
+		}
+
+		noc_set_qos_mode(qos_base, qos_off, info->node_info->qport[i],
+				qos_delta, info->node_info->qos_params.mode,
+				(1 << info->node_info->qos_params.mode));
+	}
+err_qos_init:
+	return ret;
+}
+
+static int msm_bus_noc_set_bw(struct msm_bus_node_device_type *dev,
+				void __iomem *qos_base,
+				uint32_t qos_off, uint32_t qos_delta,
+				uint32_t qos_freq)
+{
+	int ret = 0;
+	uint64_t bw = 0;
+	int i;
+	struct msm_bus_node_info_type *info = dev->node_info;
+
+	if (info && info->num_qports &&
+		((info->qos_params.mode == NOC_QOS_MODE_REGULATOR) ||
+		(info->qos_params.mode ==
+			NOC_QOS_MODE_LIMITER))) {
+		struct msm_bus_noc_qos_bw qos_bw;
+
+		bw = msm_bus_div64(info->num_qports,
+				dev->node_bw[ACTIVE_CTX].sum_ab);
+
+		for (i = 0; i < info->num_qports; i++) {
+			if (!info->qport) {
+				MSM_BUS_DBG("No qos ports to update!\n");
+				break;
+			}
+
+			qos_bw.bw = bw;
+			qos_bw.ws = info->qos_params.ws;
+			msm_bus_noc_set_qos_bw(qos_base, qos_off, qos_freq,
+				info->qport[i], qos_delta,
+				(1 << info->qos_params.mode), &qos_bw);
+			MSM_BUS_DBG("NOC: QoS: Update mas_bw: ws: %u\n",
+				qos_bw.ws);
+		}
+	}
+	return ret;
+}
+
+static int msm_bus_noc_set_lim_mode(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base, uint32_t qos_off,
+				uint32_t qos_delta, uint32_t qos_freq,
+				u64 lim_bw)
+{
+	int i;
+
+	if (info && info->node_info->num_qports) {
+		struct msm_bus_noc_qos_bw qos_bw;
+
+		if (lim_bw != info->node_info->lim_bw) {
+			for (i = 0; i < info->node_info->num_qports; i++) {
+				qos_bw.bw = lim_bw;
+				qos_bw.ws = info->node_info->qos_params.ws;
+					msm_bus_noc_set_qos_bw(qos_base,
+					qos_off, qos_freq,
+					info->node_info->qport[i], qos_delta,
+					(1 << NOC_QOS_MODE_LIMITER), &qos_bw);
+			}
+			info->node_info->lim_bw = lim_bw;
+		}
+
+		for (i = 0; i < info->node_info->num_qports; i++) {
+			noc_set_qos_mode(qos_base, qos_off,
+					info->node_info->qport[i],
+					qos_delta,
+					NOC_QOS_MODE_LIMITER,
+					(1 << NOC_QOS_MODE_LIMITER));
+		}
+	}
+
+	return 0;
+}
+
+static int msm_bus_noc_set_reg_mode(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base, uint32_t qos_off,
+				uint32_t qos_delta, uint32_t qos_freq,
+				u64 lim_bw)
+{
+	int i;
+
+	if (info && info->node_info->num_qports) {
+		struct msm_bus_noc_qos_priority prio;
+		struct msm_bus_noc_qos_bw qos_bw;
+
+		for (i = 0; i < info->node_info->num_qports; i++) {
+			prio.p1 =
+				info->node_info->qos_params.reg_prio1;
+			prio.p0 =
+				info->node_info->qos_params.reg_prio0;
+			noc_set_qos_priority(qos_base, qos_off,
+					info->node_info->qport[i],
+					qos_delta,
+					&prio);
+		}
+
+		if (lim_bw != info->node_info->lim_bw) {
+			for (i = 0; i < info->node_info->num_qports; i++) {
+				qos_bw.bw = lim_bw;
+				qos_bw.ws = info->node_info->qos_params.ws;
+				msm_bus_noc_set_qos_bw(qos_base, qos_off,
+					qos_freq,
+					info->node_info->qport[i], qos_delta,
+					(1 << NOC_QOS_MODE_REGULATOR), &qos_bw);
+			}
+			info->node_info->lim_bw = lim_bw;
+		}
+
+		for (i = 0; i < info->node_info->num_qports; i++) {
+			noc_set_qos_mode(qos_base, qos_off,
+					info->node_info->qport[i],
+					qos_delta,
+					NOC_QOS_MODE_REGULATOR,
+					(1 << NOC_QOS_MODE_REGULATOR));
+		}
+	}
+	return 0;
+}
+
+static int msm_bus_noc_set_def_mode(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base, uint32_t qos_off,
+				uint32_t qos_delta, uint32_t qos_freq,
+				u64 lim_bw)
+{
+	int i;
+
+	for (i = 0; i < info->node_info->num_qports; i++) {
+		if (info->node_info->qos_params.mode ==
+						NOC_QOS_MODE_FIXED) {
+			struct msm_bus_noc_qos_priority prio;
+
+			prio.p1 =
+				info->node_info->qos_params.prio1;
+			prio.p0 =
+				info->node_info->qos_params.prio0;
+			noc_set_qos_priority(qos_base, qos_off,
+					info->node_info->qport[i],
+					qos_delta, &prio);
+		}
+		noc_set_qos_mode(qos_base, qos_off,
+			info->node_info->qport[i],
+			qos_delta,
+			info->node_info->qos_params.mode,
+			(1 << info->node_info->qos_params.mode));
+	}
+	return 0;
+}
+
+static int msm_bus_noc_limit_mport(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base, uint32_t qos_off,
+				uint32_t qos_delta, uint32_t qos_freq,
+				int enable_lim, u64 lim_bw)
+{
+	int ret = 0;
+
+	if (!(info && info->node_info->num_qports)) {
+		MSM_BUS_ERR("Invalid Node info or no Qports to program");
+		ret = -ENXIO;
+		goto exit_limit_mport;
+	}
+
+	if (lim_bw) {
+		switch (enable_lim) {
+		case THROTTLE_REG:
+			msm_bus_noc_set_reg_mode(info, qos_base, qos_off,
+						qos_delta, qos_freq, lim_bw);
+			break;
+		case THROTTLE_ON:
+			msm_bus_noc_set_lim_mode(info, qos_base, qos_off,
+						qos_delta, qos_freq, lim_bw);
+			break;
+		default:
+			msm_bus_noc_set_def_mode(info, qos_base, qos_off,
+						qos_delta, qos_freq, lim_bw);
+			break;
+		}
+	} else
+		msm_bus_noc_set_def_mode(info, qos_base, qos_off,
+					qos_delta, qos_freq, lim_bw);
+
+exit_limit_mport:
+	return ret;
+}
+
+int msm_bus_noc_set_ops(struct msm_bus_node_device_type *bus_dev)
+{
+	if (!bus_dev)
+		return -ENODEV;
+
+	bus_dev->fabdev->noc_ops.qos_init = msm_bus_noc_qos_init;
+	bus_dev->fabdev->noc_ops.set_bw = msm_bus_noc_set_bw;
+	bus_dev->fabdev->noc_ops.limit_mport = msm_bus_noc_limit_mport;
+	bus_dev->fabdev->noc_ops.update_bw_reg = msm_bus_noc_update_bw_reg;
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_bus_noc_set_ops);
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./msm_bus_noc.h linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/msm_bus_noc.h
--- linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./msm_bus_noc.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/msm_bus_noc.h	2019-01-22 16:16:26.659274986 +0100
@@ -0,0 +1,76 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_BIMC_H
+#define _ARCH_ARM_MACH_MSM_BUS_BIMC_H
+
+enum msm_bus_noc_qos_mode_type {
+	NOC_QOS_MODE_FIXED = 0,
+	NOC_QOS_MODE_LIMITER,
+	NOC_QOS_MODE_BYPASS,
+	NOC_QOS_MODE_REGULATOR,
+	NOC_QOS_MODE_MAX,
+};
+
+enum msm_bus_noc_qos_mode_perm {
+	NOC_QOS_PERM_MODE_FIXED = (1 << NOC_QOS_MODE_FIXED),
+	NOC_QOS_PERM_MODE_LIMITER = (1 << NOC_QOS_MODE_LIMITER),
+	NOC_QOS_PERM_MODE_BYPASS = (1 << NOC_QOS_MODE_BYPASS),
+	NOC_QOS_PERM_MODE_REGULATOR = (1 << NOC_QOS_MODE_REGULATOR),
+};
+
+#define NOC_QOS_MODES_ALL_PERM (NOC_QOS_PERM_MODE_FIXED | \
+	NOC_QOS_PERM_MODE_LIMITER | NOC_QOS_PERM_MODE_BYPASS | \
+	NOC_QOS_PERM_MODE_REGULATOR)
+
+struct msm_bus_noc_commit {
+	struct msm_bus_node_hw_info *mas;
+	struct msm_bus_node_hw_info *slv;
+};
+
+struct msm_bus_noc_info {
+	void __iomem *base;
+	uint32_t base_addr;
+	uint32_t nmasters;
+	uint32_t nqos_masters;
+	uint32_t nslaves;
+	uint32_t qos_freq; /* QOS Clock in KHz */
+	uint32_t qos_baseoffset;
+	uint32_t qos_delta;
+	uint32_t *mas_modes;
+	struct msm_bus_noc_commit cdata[NUM_CTX];
+};
+
+struct msm_bus_noc_qos_priority {
+	uint32_t high_prio;
+	uint32_t low_prio;
+	uint32_t read_prio;
+	uint32_t write_prio;
+	uint32_t p1;
+	uint32_t p0;
+};
+
+struct msm_bus_noc_qos_bw {
+	uint64_t bw; /* Bandwidth in bytes per second */
+	uint32_t ws; /* Window size in nano seconds */
+};
+
+void msm_bus_noc_init(struct msm_bus_noc_info *ninfo);
+uint8_t msm_bus_noc_get_qos_mode(void __iomem *base, uint32_t qos_off,
+	uint32_t mport, uint32_t qos_delta, uint32_t mode, uint32_t perm_mode);
+void msm_bus_noc_get_qos_priority(void __iomem *base, uint32_t qos_off,
+	uint32_t mport, uint32_t qos_delta,
+	struct msm_bus_noc_qos_priority *qprio);
+void msm_bus_noc_get_qos_bw(void __iomem *base, uint32_t qos_off,
+	uint32_t qos_freq, uint32_t mport, uint32_t qos_delta,
+	uint8_t perm_mode, struct msm_bus_noc_qos_bw *qbw);
+#endif /*_ARCH_ARM_MACH_MSM_BUS_NOC_H */
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./msm_bus_of_adhoc.c linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c
--- linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./msm_bus_of_adhoc.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c	2019-01-22 16:16:26.663275022 +0100
@@ -0,0 +1,908 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm_bus_rules.h>
+#include "msm_bus_core.h"
+#include "msm_bus_adhoc.h"
+
+#define DEFAULT_QOS_FREQ	19200
+#define DEFAULT_UTIL_FACT	100
+#define DEFAULT_VRAIL_COMP	100
+#define DEFAULT_AGG_SCHEME	AGG_SCHEME_LEG
+
+static int get_qos_mode(struct platform_device *pdev,
+			struct device_node *node, const char *qos_mode)
+{
+	const char *qos_names[] = {"fixed", "limiter", "bypass", "regulator"};
+	int i = 0;
+	int ret = -1;
+
+	if (!qos_mode)
+		goto exit_get_qos_mode;
+
+	for (i = 0; i < ARRAY_SIZE(qos_names); i++) {
+		if (!strcmp(qos_mode, qos_names[i]))
+			break;
+	}
+	if (i == ARRAY_SIZE(qos_names))
+		dev_err(&pdev->dev, "Cannot match mode qos %s using Bypass",
+				qos_mode);
+	else
+		ret = i;
+
+exit_get_qos_mode:
+	return ret;
+}
+
+static int *get_arr(struct platform_device *pdev,
+		struct device_node *node, const char *prop,
+		int *nports)
+{
+	int size = 0, ret;
+	int *arr = NULL;
+
+	if (of_get_property(node, prop, &size)) {
+		*nports = size / sizeof(int);
+	} else {
+		dev_dbg(&pdev->dev, "Property %s not available\n", prop);
+		*nports = 0;
+		return NULL;
+	}
+
+	arr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+	if ((size > 0) && ZERO_OR_NULL_PTR(arr)) {
+		dev_err(&pdev->dev, "Error: Failed to alloc mem for %s\n",
+				prop);
+		return NULL;
+	}
+
+	ret = of_property_read_u32_array(node, prop, (u32 *)arr, *nports);
+	if (ret) {
+		dev_err(&pdev->dev, "Error in reading property: %s\n", prop);
+		goto arr_err;
+	}
+
+	return arr;
+arr_err:
+	devm_kfree(&pdev->dev, arr);
+	return NULL;
+}
+
+static struct msm_bus_fab_device_type *get_fab_device_info(
+		struct device_node *dev_node,
+		struct platform_device *pdev)
+{
+	struct msm_bus_fab_device_type *fab_dev;
+	unsigned int ret;
+	struct resource *res;
+	const char *base_name;
+
+	fab_dev = devm_kzalloc(&pdev->dev,
+			sizeof(struct msm_bus_fab_device_type),
+			GFP_KERNEL);
+	if (!fab_dev) {
+		dev_err(&pdev->dev,
+			"Error: Unable to allocate memory for fab_dev\n");
+		return NULL;
+	}
+
+	ret = of_property_read_string(dev_node, "qcom,base-name", &base_name);
+	if (ret) {
+		dev_err(&pdev->dev, "Error: Unable to get base address name\n");
+		goto fab_dev_err;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, base_name);
+	if (!res) {
+		dev_err(&pdev->dev, "Error getting qos base addr %s\n",
+								base_name);
+		goto fab_dev_err;
+	}
+	fab_dev->pqos_base = res->start;
+	fab_dev->qos_range = resource_size(res);
+	fab_dev->bypass_qos_prg = of_property_read_bool(dev_node,
+						"qcom,bypass-qos-prg");
+
+	ret = of_property_read_u32(dev_node, "qcom,base-offset",
+			&fab_dev->base_offset);
+	if (ret)
+		dev_dbg(&pdev->dev, "Bus base offset is missing\n");
+
+	ret = of_property_read_u32(dev_node, "qcom,qos-off",
+			&fab_dev->qos_off);
+	if (ret)
+		dev_dbg(&pdev->dev, "Bus qos off is missing\n");
+
+
+	ret = of_property_read_u32(dev_node, "qcom,bus-type",
+						&fab_dev->bus_type);
+	if (ret) {
+		dev_warn(&pdev->dev, "Bus type is missing\n");
+		goto fab_dev_err;
+	}
+
+	ret = of_property_read_u32(dev_node, "qcom,qos-freq",
+						&fab_dev->qos_freq);
+	if (ret) {
+		dev_dbg(&pdev->dev, "Bus qos freq is missing\n");
+		fab_dev->qos_freq = DEFAULT_QOS_FREQ;
+	}
+
+
+	return fab_dev;
+
+fab_dev_err:
+	devm_kfree(&pdev->dev, fab_dev);
+	fab_dev = 0;
+	return NULL;
+}
+
+static void get_qos_params(
+		struct device_node * const dev_node,
+		struct platform_device * const pdev,
+		struct msm_bus_node_info_type *node_info)
+{
+	const char *qos_mode = NULL;
+	unsigned int ret;
+	unsigned int temp;
+
+	ret = of_property_read_string(dev_node, "qcom,qos-mode", &qos_mode);
+
+	if (ret)
+		node_info->qos_params.mode = -1;
+	else
+		node_info->qos_params.mode = get_qos_mode(pdev, dev_node,
+								qos_mode);
+
+	of_property_read_u32(dev_node, "qcom,prio-lvl",
+					&node_info->qos_params.prio_lvl);
+
+	of_property_read_u32(dev_node, "qcom,prio1",
+						&node_info->qos_params.prio1);
+
+	of_property_read_u32(dev_node, "qcom,prio0",
+						&node_info->qos_params.prio0);
+
+	of_property_read_u32(dev_node, "qcom,reg-prio1",
+					&node_info->qos_params.reg_prio1);
+
+	of_property_read_u32(dev_node, "qcom,reg-prio0",
+					&node_info->qos_params.reg_prio0);
+
+	of_property_read_u32(dev_node, "qcom,prio-rd",
+					&node_info->qos_params.prio_rd);
+
+	of_property_read_u32(dev_node, "qcom,prio-wr",
+						&node_info->qos_params.prio_wr);
+
+	of_property_read_u32(dev_node, "qcom,gp",
+						&node_info->qos_params.gp);
+
+	of_property_read_u32(dev_node, "qcom,thmp",
+						&node_info->qos_params.thmp);
+
+	of_property_read_u32(dev_node, "qcom,ws",
+						&node_info->qos_params.ws);
+
+	ret = of_property_read_u32(dev_node, "qcom,bw_buffer", &temp);
+
+	if (ret)
+		node_info->qos_params.bw_buffer = 0;
+	else
+		node_info->qos_params.bw_buffer = KBTOB(temp);
+
+}
+
+static int msm_bus_of_parse_clk_array(struct device_node *dev_node,
+			struct device_node *gdsc_node,
+			struct platform_device *pdev, struct nodeclk **clk_arr,
+			int *num_clks, int id)
+{
+	int ret = 0;
+	int idx = 0;
+	struct property *prop;
+	const char *clk_name;
+	int clks = 0;
+
+	clks = of_property_count_strings(dev_node, "clock-names");
+	if (clks < 0) {
+		dev_err(&pdev->dev, "No qos clks node %d\n", id);
+		ret = clks;
+		goto exit_of_parse_clk_array;
+	}
+
+	*num_clks = clks;
+	*clk_arr = devm_kzalloc(&pdev->dev,
+			(clks * sizeof(struct nodeclk)), GFP_KERNEL);
+
+	if (!(*clk_arr)) {
+		dev_err(&pdev->dev, "Error allocating clk nodes for %d\n", id);
+		ret = -ENOMEM;
+		*num_clks = 0;
+		goto exit_of_parse_clk_array;
+	}
+
+	of_property_for_each_string(dev_node, "clock-names", prop, clk_name) {
+		char gdsc_string[MAX_REG_NAME];
+
+		(*clk_arr)[idx].clk = of_clk_get_by_name(dev_node, clk_name);
+
+		if (IS_ERR_OR_NULL((*clk_arr)[idx].clk)) {
+			dev_err(&pdev->dev,
+				"Failed to get clk %s for bus%d ", clk_name,
+									id);
+			continue;
+		}
+		if (strnstr(clk_name, "no-rate", strlen(clk_name)))
+			(*clk_arr)[idx].enable_only_clk = true;
+
+		scnprintf(gdsc_string, MAX_REG_NAME, "%s-supply", clk_name);
+
+		if (of_find_property(gdsc_node, gdsc_string, NULL))
+			scnprintf((*clk_arr)[idx].reg_name,
+				MAX_REG_NAME, "%s", clk_name);
+		else
+			scnprintf((*clk_arr)[idx].reg_name,
+					MAX_REG_NAME, "%c", '\0');
+
+		idx++;
+	}
+exit_of_parse_clk_array:
+	return ret;
+}
+
+static void get_agg_params(
+		struct device_node * const dev_node,
+		struct platform_device * const pdev,
+		struct msm_bus_node_info_type *node_info)
+{
+	int ret;
+
+
+	ret = of_property_read_u32(dev_node, "qcom,buswidth",
+					&node_info->agg_params.buswidth);
+	if (ret) {
+		dev_dbg(&pdev->dev, "Using default 8 bytes %d", node_info->id);
+		node_info->agg_params.buswidth = 8;
+	}
+
+	ret = of_property_read_u32(dev_node, "qcom,agg-ports",
+				   &node_info->agg_params.num_aggports);
+	if (ret)
+		node_info->agg_params.num_aggports = node_info->num_qports;
+
+	ret = of_property_read_u32(dev_node, "qcom,agg-scheme",
+					&node_info->agg_params.agg_scheme);
+	if (ret) {
+		if (node_info->is_fab_dev)
+			node_info->agg_params.agg_scheme = DEFAULT_AGG_SCHEME;
+		else
+			node_info->agg_params.agg_scheme = AGG_SCHEME_NONE;
+	}
+
+	ret = of_property_read_u32(dev_node, "qcom,vrail-comp",
+					&node_info->agg_params.vrail_comp);
+	if (ret) {
+		if (node_info->is_fab_dev)
+			node_info->agg_params.vrail_comp = DEFAULT_VRAIL_COMP;
+		else
+			node_info->agg_params.vrail_comp = 0;
+	}
+
+	if (node_info->agg_params.agg_scheme == AGG_SCHEME_1) {
+		uint32_t len = 0;
+		const uint32_t *util_levels;
+		int i, index = 0;
+
+		util_levels =
+			of_get_property(dev_node, "qcom,util-levels", &len);
+		if (!util_levels)
+			goto err_get_agg_params;
+
+		node_info->agg_params.num_util_levels =
+					len / (sizeof(uint32_t) * 2);
+		node_info->agg_params.util_levels = devm_kzalloc(&pdev->dev,
+			(node_info->agg_params.num_util_levels *
+			sizeof(struct node_util_levels_type)), GFP_KERNEL);
+
+		if (IS_ERR_OR_NULL(node_info->agg_params.util_levels))
+			goto err_get_agg_params;
+
+		for (i = 0; i < node_info->agg_params.num_util_levels; i++) {
+			node_info->agg_params.util_levels[i].threshold =
+				KBTOB(be32_to_cpu(util_levels[index++]));
+			node_info->agg_params.util_levels[i].util_fact =
+					be32_to_cpu(util_levels[index++]);
+			dev_dbg(&pdev->dev, "[%d]:Thresh:%llu util_fact:%d\n",
+				i,
+				node_info->agg_params.util_levels[i].threshold,
+				node_info->agg_params.util_levels[i].util_fact);
+		}
+	} else {
+		uint32_t util_fact;
+
+		ret = of_property_read_u32(dev_node, "qcom,util-fact",
+								&util_fact);
+		if (ret) {
+			if (node_info->is_fab_dev)
+				util_fact = DEFAULT_UTIL_FACT;
+			else
+				util_fact = 0;
+		}
+
+		if (util_fact) {
+			node_info->agg_params.num_util_levels = 1;
+			node_info->agg_params.util_levels =
+			devm_kzalloc(&pdev->dev,
+				(node_info->agg_params.num_util_levels *
+				sizeof(struct node_util_levels_type)),
+				GFP_KERNEL);
+			if (IS_ERR_OR_NULL(node_info->agg_params.util_levels))
+				goto err_get_agg_params;
+			node_info->agg_params.util_levels[0].util_fact =
+								util_fact;
+		}
+
+	}
+
+	return;
+err_get_agg_params:
+	node_info->agg_params.num_util_levels = 0;
+	node_info->agg_params.agg_scheme = DEFAULT_AGG_SCHEME;
+}
+
+static struct msm_bus_node_info_type *get_node_info_data(
+		struct device_node * const dev_node,
+		struct platform_device * const pdev)
+{
+	struct msm_bus_node_info_type *node_info;
+	unsigned int ret;
+	int size;
+	int i;
+	struct device_node *con_node;
+	struct device_node *bus_dev;
+
+	node_info = devm_kzalloc(&pdev->dev,
+			sizeof(struct msm_bus_node_info_type),
+			GFP_KERNEL);
+	if (!node_info) {
+		dev_err(&pdev->dev,
+			"Error: Unable to allocate memory for node_info\n");
+		return NULL;
+	}
+
+	ret = of_property_read_u32(dev_node, "cell-id", &node_info->id);
+	if (ret) {
+		dev_warn(&pdev->dev, "Bus node is missing cell-id\n");
+		goto node_info_err;
+	}
+	ret = of_property_read_string(dev_node, "label", &node_info->name);
+	if (ret) {
+		dev_warn(&pdev->dev, "Bus node is missing name\n");
+		goto node_info_err;
+	}
+	node_info->qport = get_arr(pdev, dev_node, "qcom,qport",
+			&node_info->num_qports);
+
+	if (of_get_property(dev_node, "qcom,connections", &size)) {
+		node_info->num_connections = size / sizeof(int);
+		node_info->connections = devm_kzalloc(&pdev->dev, size,
+				GFP_KERNEL);
+	} else {
+		node_info->num_connections = 0;
+		node_info->connections = 0;
+	}
+
+	for (i = 0; i < node_info->num_connections; i++) {
+		con_node = of_parse_phandle(dev_node, "qcom,connections", i);
+		if (IS_ERR_OR_NULL(con_node))
+			goto node_info_err;
+
+		if (of_property_read_u32(con_node, "cell-id",
+				&node_info->connections[i]))
+			goto node_info_err;
+		of_node_put(con_node);
+	}
+
+	if (of_get_property(dev_node, "qcom,blacklist", &size)) {
+		node_info->num_blist = size/sizeof(u32);
+		node_info->black_listed_connections = devm_kzalloc(&pdev->dev,
+		size, GFP_KERNEL);
+	} else {
+		node_info->num_blist = 0;
+		node_info->black_listed_connections = 0;
+	}
+
+	for (i = 0; i < node_info->num_blist; i++) {
+		con_node = of_parse_phandle(dev_node, "qcom,blacklist", i);
+		if (IS_ERR_OR_NULL(con_node))
+			goto node_info_err;
+
+		if (of_property_read_u32(con_node, "cell-id",
+				&node_info->black_listed_connections[i]))
+			goto node_info_err;
+		of_node_put(con_node);
+	}
+
+	bus_dev = of_parse_phandle(dev_node, "qcom,bus-dev", 0);
+	if (!IS_ERR_OR_NULL(bus_dev)) {
+		if (of_property_read_u32(bus_dev, "cell-id",
+			&node_info->bus_device_id)) {
+			dev_err(&pdev->dev, "Can't find bus device. Node %d",
+					node_info->id);
+			goto node_info_err;
+		}
+
+		of_node_put(bus_dev);
+	} else
+		dev_dbg(&pdev->dev, "Can't find bdev phandle for %d",
+					node_info->id);
+
+	node_info->is_fab_dev = of_property_read_bool(dev_node, "qcom,fab-dev");
+	node_info->virt_dev = of_property_read_bool(dev_node, "qcom,virt-dev");
+
+
+	ret = of_property_read_u32(dev_node, "qcom,mas-rpm-id",
+						&node_info->mas_rpm_id);
+	if (ret) {
+		dev_dbg(&pdev->dev, "mas rpm id is missing\n");
+		node_info->mas_rpm_id = -1;
+	}
+
+	ret = of_property_read_u32(dev_node, "qcom,slv-rpm-id",
+						&node_info->slv_rpm_id);
+	if (ret) {
+		dev_dbg(&pdev->dev, "slv rpm id is missing\n");
+		node_info->slv_rpm_id = -1;
+	}
+
+	get_agg_params(dev_node, pdev, node_info);
+	get_qos_params(dev_node, pdev, node_info);
+
+	return node_info;
+
+node_info_err:
+	devm_kfree(&pdev->dev, node_info);
+	node_info = 0;
+	return NULL;
+}
+
+static int get_bus_node_device_data(
+		struct device_node * const dev_node,
+		struct platform_device * const pdev,
+		struct msm_bus_node_device_type * const node_device)
+{
+	bool enable_only;
+	bool setrate_only;
+	struct device_node *qos_clk_node;
+
+	node_device->node_info = get_node_info_data(dev_node, pdev);
+	if (IS_ERR_OR_NULL(node_device->node_info)) {
+		dev_err(&pdev->dev, "Error: Node info missing\n");
+		return -ENODATA;
+	}
+	node_device->ap_owned = of_property_read_bool(dev_node,
+							"qcom,ap-owned");
+
+	if (node_device->node_info->is_fab_dev) {
+		dev_dbg(&pdev->dev, "Dev %d\n", node_device->node_info->id);
+
+		if (!node_device->node_info->virt_dev) {
+			node_device->fabdev =
+				get_fab_device_info(dev_node, pdev);
+			if (IS_ERR_OR_NULL(node_device->fabdev)) {
+				dev_err(&pdev->dev,
+					"Error: Fabric device info missing\n");
+				devm_kfree(&pdev->dev, node_device->node_info);
+				return -ENODATA;
+			}
+		}
+
+		enable_only = of_property_read_bool(dev_node,
+							"qcom,enable-only-clk");
+		node_device->clk[DUAL_CTX].enable_only_clk = enable_only;
+		node_device->clk[ACTIVE_CTX].enable_only_clk = enable_only;
+
+		/*
+		 * Doesn't make sense to have a clk handle you can't enable or
+		 * set rate on.
+		 */
+		if (!enable_only) {
+			setrate_only = of_property_read_bool(dev_node,
+						"qcom,setrate-only-clk");
+			node_device->clk[DUAL_CTX].setrate_only_clk =
+								setrate_only;
+			node_device->clk[ACTIVE_CTX].setrate_only_clk =
+								setrate_only;
+		}
+
+		node_device->clk[DUAL_CTX].clk = of_clk_get_by_name(dev_node,
+							"bus_clk");
+
+		if (IS_ERR_OR_NULL(node_device->clk[DUAL_CTX].clk)) {
+			int ret;
+			dev_err(&pdev->dev,
+				"%s:Failed to get bus clk for bus%d ctx%d",
+				__func__, node_device->node_info->id,
+								DUAL_CTX);
+			ret = (IS_ERR(node_device->clk[DUAL_CTX].clk) ?
+			PTR_ERR(node_device->clk[DUAL_CTX].clk) : -ENXIO);
+			return ret;
+		}
+
+		if (of_find_property(dev_node, "bus-gdsc-supply", NULL))
+			scnprintf(node_device->clk[DUAL_CTX].reg_name,
+				MAX_REG_NAME, "%s", "bus-gdsc");
+		else
+			scnprintf(node_device->clk[DUAL_CTX].reg_name,
+				MAX_REG_NAME, "%c", '\0');
+
+		node_device->clk[ACTIVE_CTX].clk = of_clk_get_by_name(dev_node,
+							"bus_a_clk");
+		if (IS_ERR_OR_NULL(node_device->clk[ACTIVE_CTX].clk)) {
+			int ret;
+			dev_err(&pdev->dev,
+				"Failed to get bus clk for bus%d ctx%d",
+				 node_device->node_info->id, ACTIVE_CTX);
+			ret = (IS_ERR(node_device->clk[DUAL_CTX].clk) ?
+			PTR_ERR(node_device->clk[DUAL_CTX].clk) : -ENXIO);
+			return ret;
+		}
+
+		if (of_find_property(dev_node, "bus-a-gdsc-supply", NULL))
+			scnprintf(node_device->clk[ACTIVE_CTX].reg_name,
+				MAX_REG_NAME, "%s", "bus-a-gdsc");
+		else
+			scnprintf(node_device->clk[ACTIVE_CTX].reg_name,
+				MAX_REG_NAME, "%c", '\0');
+
+		node_device->bus_qos_clk.clk = of_clk_get_by_name(dev_node,
+							"bus_qos_clk");
+
+		if (IS_ERR_OR_NULL(node_device->bus_qos_clk.clk)) {
+			dev_dbg(&pdev->dev,
+				"%s:Failed to get bus qos clk for %d",
+				__func__, node_device->node_info->id);
+			scnprintf(node_device->bus_qos_clk.reg_name,
+					MAX_REG_NAME, "%c", '\0');
+		} else {
+			if (of_find_property(dev_node, "bus-qos-gdsc-supply",
+								NULL))
+				scnprintf(node_device->bus_qos_clk.reg_name,
+					MAX_REG_NAME, "%s", "bus-qos-gdsc");
+			else
+				scnprintf(node_device->bus_qos_clk.reg_name,
+					MAX_REG_NAME, "%c", '\0');
+		}
+
+		qos_clk_node = of_get_child_by_name(dev_node,
+						"qcom,node-qos-clks");
+
+		if (qos_clk_node) {
+			if (msm_bus_of_parse_clk_array(qos_clk_node, dev_node,
+						pdev,
+						&node_device->node_qos_clks,
+						&node_device->num_node_qos_clks,
+						node_device->node_info->id)) {
+				dev_info(&pdev->dev, "Bypass QoS programming");
+				node_device->fabdev->bypass_qos_prg = true;
+			}
+			of_node_put(qos_clk_node);
+		}
+
+		if (msmbus_coresight_init_adhoc(pdev, dev_node))
+			dev_warn(&pdev->dev,
+				 "Coresight support absent for bus: %d\n",
+				  node_device->node_info->id);
+	} else {
+		node_device->bus_qos_clk.clk = of_clk_get_by_name(dev_node,
+							"bus_qos_clk");
+
+		if (IS_ERR_OR_NULL(node_device->bus_qos_clk.clk))
+			dev_dbg(&pdev->dev,
+				"%s:Failed to get bus qos clk for mas%d",
+				__func__, node_device->node_info->id);
+
+		if (of_find_property(dev_node, "bus-qos-gdsc-supply",
+									NULL))
+			scnprintf(node_device->bus_qos_clk.reg_name,
+				MAX_REG_NAME, "%s", "bus-qos-gdsc");
+		else
+			scnprintf(node_device->bus_qos_clk.reg_name,
+				MAX_REG_NAME, "%c", '\0');
+
+		enable_only = of_property_read_bool(dev_node,
+							"qcom,enable-only-clk");
+		node_device->clk[DUAL_CTX].enable_only_clk = enable_only;
+		node_device->bus_qos_clk.enable_only_clk = enable_only;
+
+		/*
+		 * Doesn't make sense to have a clk handle you can't enable or
+		 * set rate on.
+		 */
+		if (!enable_only) {
+			setrate_only = of_property_read_bool(dev_node,
+						"qcom,setrate-only-clk");
+			node_device->clk[DUAL_CTX].setrate_only_clk =
+								setrate_only;
+			node_device->clk[ACTIVE_CTX].setrate_only_clk =
+								setrate_only;
+		}
+
+		qos_clk_node = of_get_child_by_name(dev_node,
+						"qcom,node-qos-clks");
+
+		if (qos_clk_node) {
+			if (msm_bus_of_parse_clk_array(qos_clk_node, dev_node,
+						pdev,
+						&node_device->node_qos_clks,
+						&node_device->num_node_qos_clks,
+						node_device->node_info->id)) {
+				dev_info(&pdev->dev, "Bypass QoS programming");
+				node_device->fabdev->bypass_qos_prg = true;
+			}
+			of_node_put(qos_clk_node);
+		}
+
+		node_device->clk[DUAL_CTX].clk = of_clk_get_by_name(dev_node,
+							"node_clk");
+
+		if (IS_ERR_OR_NULL(node_device->clk[DUAL_CTX].clk))
+			dev_dbg(&pdev->dev,
+				"%s:Failed to get bus clk for bus%d ctx%d",
+				__func__, node_device->node_info->id,
+								DUAL_CTX);
+
+		if (of_find_property(dev_node, "node-gdsc-supply", NULL))
+			scnprintf(node_device->clk[DUAL_CTX].reg_name,
+				MAX_REG_NAME, "%s", "node-gdsc");
+		else
+			scnprintf(node_device->clk[DUAL_CTX].reg_name,
+				MAX_REG_NAME, "%c", '\0');
+
+	}
+	return 0;
+}
+
+struct msm_bus_device_node_registration
+	*msm_bus_of_to_pdata(struct platform_device *pdev)
+{
+	struct device_node *of_node, *child_node;
+	struct msm_bus_device_node_registration *pdata;
+	unsigned int i = 0, j;
+	unsigned int ret;
+
+	if (!pdev) {
+		pr_err("Error: Null platform device\n");
+		return NULL;
+	}
+
+	of_node = pdev->dev.of_node;
+
+	pdata = devm_kzalloc(&pdev->dev,
+			sizeof(struct msm_bus_device_node_registration),
+			GFP_KERNEL);
+	if (!pdata) {
+		dev_err(&pdev->dev,
+				"Error: Memory allocation for pdata failed\n");
+		return NULL;
+	}
+
+	pdata->num_devices = of_get_child_count(of_node);
+
+	pdata->info = devm_kzalloc(&pdev->dev,
+			sizeof(struct msm_bus_node_device_type) *
+			pdata->num_devices, GFP_KERNEL);
+
+	if (!pdata->info) {
+		dev_err(&pdev->dev,
+			"Error: Memory allocation for pdata->info failed\n");
+		goto node_reg_err;
+	}
+
+	ret = 0;
+	for_each_child_of_node(of_node, child_node) {
+		ret = get_bus_node_device_data(child_node, pdev,
+				&pdata->info[i]);
+		if (ret) {
+			dev_err(&pdev->dev, "Error: unable to initialize bus nodes\n");
+			goto node_reg_err_1;
+		}
+		pdata->info[i].of_node = child_node;
+		i++;
+	}
+
+	dev_dbg(&pdev->dev, "bus topology:\n");
+	for (i = 0; i < pdata->num_devices; i++) {
+		dev_dbg(&pdev->dev, "id %d\nnum_qports %d\nnum_connections %d",
+				pdata->info[i].node_info->id,
+				pdata->info[i].node_info->num_qports,
+				pdata->info[i].node_info->num_connections);
+		dev_dbg(&pdev->dev, "\nbus_device_id %d\n buswidth %d\n",
+				pdata->info[i].node_info->bus_device_id,
+				pdata->info[i].node_info->agg_params.buswidth);
+		for (j = 0; j < pdata->info[i].node_info->num_connections;
+									j++) {
+			dev_dbg(&pdev->dev, "connection[%d]: %d\n", j,
+				pdata->info[i].node_info->connections[j]);
+		}
+		for (j = 0; j < pdata->info[i].node_info->num_blist;
+									 j++) {
+			dev_dbg(&pdev->dev, "black_listed_node[%d]: %d\n", j,
+				pdata->info[i].node_info->
+				black_listed_connections[j]);
+		}
+		if (pdata->info[i].fabdev)
+			dev_dbg(&pdev->dev, "base_addr %zu\nbus_type %d\n",
+					(size_t)pdata->info[i].
+						fabdev->pqos_base,
+					pdata->info[i].fabdev->bus_type);
+	}
+	return pdata;
+
+node_reg_err_1:
+	devm_kfree(&pdev->dev, pdata->info);
+node_reg_err:
+	devm_kfree(&pdev->dev, pdata);
+	pdata = NULL;
+	return NULL;
+}
+
+static int msm_bus_of_get_ids(struct platform_device *pdev,
+			struct device_node *dev_node, int **dev_ids,
+			int *num_ids, char *prop_name)
+{
+	int ret = 0;
+	int size, i;
+	struct device_node *rule_node;
+	int *ids = NULL;
+
+	if (of_get_property(dev_node, prop_name, &size)) {
+		*num_ids = size / sizeof(int);
+		ids = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+	} else {
+		dev_err(&pdev->dev, "No rule nodes, skipping node");
+		ret = -ENXIO;
+		goto exit_get_ids;
+	}
+
+	*dev_ids = ids;
+	for (i = 0; i < *num_ids; i++) {
+		rule_node = of_parse_phandle(dev_node, prop_name, i);
+		if (IS_ERR_OR_NULL(rule_node)) {
+			dev_err(&pdev->dev, "Can't get rule node id");
+			ret = -ENXIO;
+			goto err_get_ids;
+		}
+
+		if (of_property_read_u32(rule_node, "cell-id",
+				&ids[i])) {
+			dev_err(&pdev->dev, "Can't get rule node id");
+			ret = -ENXIO;
+			goto err_get_ids;
+		}
+		of_node_put(rule_node);
+	}
+exit_get_ids:
+	return ret;
+err_get_ids:
+	devm_kfree(&pdev->dev, ids);
+	of_node_put(rule_node);
+	ids = NULL;
+	return ret;
+}
+
+int msm_bus_of_get_static_rules(struct platform_device *pdev,
+					struct bus_rule_type **static_rules)
+{
+	int ret = 0;
+	struct device_node *of_node, *child_node;
+	int num_rules = 0;
+	int rule_idx = 0;
+	int bw_fld = 0;
+	int i;
+	struct bus_rule_type *local_rule = NULL;
+
+	of_node = pdev->dev.of_node;
+	num_rules = of_get_child_count(of_node);
+	local_rule = devm_kzalloc(&pdev->dev,
+				sizeof(struct bus_rule_type) * num_rules,
+				GFP_KERNEL);
+
+	if (IS_ERR_OR_NULL(local_rule)) {
+		ret = -ENOMEM;
+		goto exit_static_rules;
+	}
+
+	*static_rules = local_rule;
+	for_each_child_of_node(of_node, child_node) {
+		ret = msm_bus_of_get_ids(pdev, child_node,
+			&local_rule[rule_idx].src_id,
+			&local_rule[rule_idx].num_src,
+			"qcom,src-nodes");
+
+		ret = msm_bus_of_get_ids(pdev, child_node,
+			&local_rule[rule_idx].dst_node,
+			&local_rule[rule_idx].num_dst,
+			"qcom,dest-node");
+
+		ret = of_property_read_u32(child_node, "qcom,src-field",
+				&local_rule[rule_idx].src_field);
+		if (ret) {
+			dev_err(&pdev->dev, "src-field missing");
+			ret = -ENXIO;
+			goto err_static_rules;
+		}
+
+		ret = of_property_read_u32(child_node, "qcom,src-op",
+				&local_rule[rule_idx].op);
+		if (ret) {
+			dev_err(&pdev->dev, "src-op missing");
+			ret = -ENXIO;
+			goto err_static_rules;
+		}
+
+		ret = of_property_read_u32(child_node, "qcom,mode",
+				&local_rule[rule_idx].mode);
+		if (ret) {
+			dev_err(&pdev->dev, "mode missing");
+			ret = -ENXIO;
+			goto err_static_rules;
+		}
+
+		ret = of_property_read_u32(child_node, "qcom,thresh", &bw_fld);
+		if (ret) {
+			dev_err(&pdev->dev, "thresh missing");
+			ret = -ENXIO;
+			goto err_static_rules;
+		} else
+			local_rule[rule_idx].thresh = KBTOB(bw_fld);
+
+		ret = of_property_read_u32(child_node, "qcom,dest-bw",
+								&bw_fld);
+		if (ret)
+			local_rule[rule_idx].dst_bw = 0;
+		else
+			local_rule[rule_idx].dst_bw = KBTOB(bw_fld);
+
+		rule_idx++;
+	}
+	ret = rule_idx;
+exit_static_rules:
+	return ret;
+err_static_rules:
+	for (i = 0; i < num_rules; i++) {
+		if (!IS_ERR_OR_NULL(local_rule)) {
+			if (!IS_ERR_OR_NULL(local_rule[i].src_id))
+				devm_kfree(&pdev->dev,
+						local_rule[i].src_id);
+			if (!IS_ERR_OR_NULL(local_rule[i].dst_node))
+				devm_kfree(&pdev->dev,
+						local_rule[i].dst_node);
+			devm_kfree(&pdev->dev, local_rule);
+		}
+	}
+	*static_rules = NULL;
+	return ret;
+}
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./msm_bus_of.c linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/msm_bus_of.c
--- linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./msm_bus_of.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/msm_bus_of.c	2019-01-22 16:16:26.663275022 +0100
@@ -0,0 +1,703 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include "msm_bus_core.h"
+
+static const char * const hw_sel_name[] = {"RPM", "NoC", "BIMC", NULL};
+static const char * const mode_sel_name[] = {"Fixed", "Limiter", "Bypass",
+						"Regulator", NULL};
+
+static int get_num(const char *const str[], const char *name)
+{
+	int i = 0;
+
+	do {
+		if (!strcmp(name, str[i]))
+			return i;
+
+		i++;
+	} while (str[i] != NULL);
+
+	pr_err("Error: string %s not found\n", name);
+	return -EINVAL;
+}
+
+static struct msm_bus_scale_pdata *get_pdata(struct platform_device *pdev,
+	struct device_node *of_node)
+{
+	struct msm_bus_scale_pdata *pdata = NULL;
+	struct msm_bus_paths *usecase = NULL;
+	int i = 0, j, ret, num_usecases = 0, num_paths, len;
+	const uint32_t *vec_arr = NULL;
+	bool mem_err = false;
+
+	if (!pdev) {
+		pr_err("Error: Null Platform device\n");
+		return NULL;
+	}
+
+	pdata = devm_kzalloc(&pdev->dev, sizeof(struct msm_bus_scale_pdata),
+		GFP_KERNEL);
+	if (!pdata) {
+		pr_err("Error: Memory allocation for pdata failed\n");
+		mem_err = true;
+		goto err;
+	}
+
+	ret = of_property_read_string(of_node, "qcom,msm-bus,name",
+		&pdata->name);
+	if (ret) {
+		pr_err("Error: Client name not found\n");
+		goto err;
+	}
+
+	ret = of_property_read_u32(of_node, "qcom,msm-bus,num-cases",
+		&num_usecases);
+	if (ret) {
+		pr_err("Error: num-usecases not found\n");
+		goto err;
+	}
+
+	pdata->num_usecases = num_usecases;
+
+	if (of_property_read_bool(of_node, "qcom,msm-bus,active-only"))
+		pdata->active_only = 1;
+	else {
+		pr_debug("active_only flag absent.\n");
+		pr_debug("Using dual context by default\n");
+	}
+
+	usecase = devm_kzalloc(&pdev->dev, (sizeof(struct msm_bus_paths) *
+		pdata->num_usecases), GFP_KERNEL);
+	if (!usecase) {
+		pr_err("Error: Memory allocation for paths failed\n");
+		mem_err = true;
+		goto err;
+	}
+
+	ret = of_property_read_u32(of_node, "qcom,msm-bus,num-paths",
+		&num_paths);
+	if (ret) {
+		pr_err("Error: num_paths not found\n");
+		goto err;
+	}
+
+	vec_arr = of_get_property(of_node, "qcom,msm-bus,vectors-KBps", &len);
+	if (vec_arr == NULL) {
+		pr_err("Error: Vector array not found\n");
+		goto err;
+	}
+
+	if (len != num_usecases * num_paths * sizeof(uint32_t) * 4) {
+		pr_err("Error: Length-error on getting vectors\n");
+		goto err;
+	}
+
+	for (i = 0; i < num_usecases; i++) {
+		usecase[i].num_paths = num_paths;
+		usecase[i].vectors = devm_kzalloc(&pdev->dev, num_paths *
+			sizeof(struct msm_bus_vectors), GFP_KERNEL);
+		if (!usecase[i].vectors) {
+			mem_err = true;
+			pr_err("Error: Mem alloc failure in vectors\n");
+			goto err;
+		}
+
+		for (j = 0; j < num_paths; j++) {
+			int index = ((i * num_paths) + j) * 4;
+			usecase[i].vectors[j].src = be32_to_cpu(vec_arr[index]);
+			usecase[i].vectors[j].dst =
+				be32_to_cpu(vec_arr[index + 1]);
+			usecase[i].vectors[j].ab = (uint64_t)
+				KBTOB(be32_to_cpu(vec_arr[index + 2]));
+			usecase[i].vectors[j].ib = (uint64_t)
+				KBTOB(be32_to_cpu(vec_arr[index + 3]));
+		}
+	}
+
+	pdata->usecase = usecase;
+	return pdata;
+err:
+	if (mem_err) {
+		for (; i > 0; i--)
+			kfree(usecase[i-1].vectors);
+
+		kfree(usecase);
+		kfree(pdata);
+	}
+
+	return NULL;
+}
+
+/**
+ * msm_bus_cl_get_pdata() - Generate bus client data from device tree
+ * provided by clients.
+ *
+ * of_node: Device tree node to extract information from
+ *
+ * The function returns a valid pointer to the allocated bus-scale-pdata
+ * if the vectors were correctly read from the client's device node.
+ * Any error in reading or parsing the device node will return NULL
+ * to the caller.
+ */
+struct msm_bus_scale_pdata *msm_bus_cl_get_pdata(struct platform_device *pdev)
+{
+	struct device_node *of_node;
+	struct msm_bus_scale_pdata *pdata = NULL;
+
+	if (!pdev) {
+		pr_err("Error: Null Platform device\n");
+		return NULL;
+	}
+
+	of_node = pdev->dev.of_node;
+	pdata = get_pdata(pdev, of_node);
+	if (!pdata) {
+		pr_err("client has to provide missing entry for successful registration\n");
+		return NULL;
+	}
+
+	return pdata;
+}
+EXPORT_SYMBOL(msm_bus_cl_get_pdata);
+
+/**
+ * msm_bus_cl_pdata_from_node() - Generate bus client data from device tree
+ * node provided by clients. This function should be used when a client
+ * driver needs to register multiple bus-clients from a single device-tree
+ * node associated with the platform-device.
+ *
+ * of_node: The subnode containing information about the bus scaling
+ * data
+ *
+ * pdev: Platform device associated with the device-tree node
+ *
+ * The function returns a valid pointer to the allocated bus-scale-pdata
+ * if the vectors were correctly read from the client's device node.
+ * Any error in reading or parsing the device node will return NULL
+ * to the caller.
+ */
+struct msm_bus_scale_pdata *msm_bus_pdata_from_node(
+		struct platform_device *pdev, struct device_node *of_node)
+{
+	struct msm_bus_scale_pdata *pdata = NULL;
+
+	if (!pdev) {
+		pr_err("Error: Null Platform device\n");
+		return NULL;
+	}
+
+	if (!of_node) {
+		pr_err("Error: Null of_node passed to bus driver\n");
+		return NULL;
+	}
+
+	pdata = get_pdata(pdev, of_node);
+	if (!pdata) {
+		pr_err("client has to provide missing entry for successful registration\n");
+		return NULL;
+	}
+
+	return pdata;
+}
+EXPORT_SYMBOL(msm_bus_pdata_from_node);
+
+/**
+ * msm_bus_cl_clear_pdata() - Clear pdata allocated from device-tree
+ * of_node: Device tree node to extract information from
+ */
+void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata)
+{
+	int i;
+
+	for (i = 0; i < pdata->num_usecases; i++)
+		kfree(pdata->usecase[i].vectors);
+
+	kfree(pdata->usecase);
+	kfree(pdata);
+}
+EXPORT_SYMBOL(msm_bus_cl_clear_pdata);
+
+static int *get_arr(struct platform_device *pdev,
+		const struct device_node *node, const char *prop,
+		int *nports)
+{
+	int size = 0, ret;
+	int *arr = NULL;
+
+	if (of_get_property(node, prop, &size)) {
+		*nports = size / sizeof(int);
+	} else {
+		pr_debug("Property %s not available\n", prop);
+		*nports = 0;
+		return NULL;
+	}
+
+	if (!size) {
+		*nports = 0;
+		return NULL;
+	}
+
+	arr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(arr)) {
+		pr_err("Error: Failed to alloc mem for %s\n", prop);
+		return NULL;
+	}
+
+	ret = of_property_read_u32_array(node, prop, (u32 *)arr, *nports);
+	if (ret) {
+		pr_err("Error in reading property: %s\n", prop);
+		goto err;
+	}
+
+	return arr;
+err:
+	devm_kfree(&pdev->dev, arr);
+	return NULL;
+}
+
+static u64 *get_th_params(struct platform_device *pdev,
+		const struct device_node *node, const char *prop,
+		int *nports)
+{
+	int size = 0, ret;
+	u64 *ret_arr = NULL;
+	int *arr = NULL;
+	int i;
+
+	if (of_get_property(node, prop, &size)) {
+		*nports = size / sizeof(int);
+	} else {
+		pr_debug("Property %s not available\n", prop);
+		*nports = 0;
+		return NULL;
+	}
+
+	if (!size) {
+		*nports = 0;
+		return NULL;
+	}
+
+	ret_arr = devm_kzalloc(&pdev->dev, (*nports * sizeof(u64)),
+							GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(ret_arr)) {
+		pr_err("Error: Failed to alloc mem for ret arr %s\n", prop);
+		return NULL;
+	}
+
+	arr = kzalloc(size, GFP_KERNEL);
+	if ((ZERO_OR_NULL_PTR(arr))) {
+		pr_err("Error: Failed to alloc temp mem for %s\n", prop);
+		return NULL;
+	}
+
+	ret = of_property_read_u32_array(node, prop, (u32 *)arr, *nports);
+	if (ret) {
+		pr_err("Error in reading property: %s\n", prop);
+		goto err;
+	}
+
+	for (i = 0; i < *nports; i++)
+		ret_arr[i] = (uint64_t)KBTOB(arr[i]);
+
+	MSM_BUS_DBG("%s: num entries %d prop %s", __func__, *nports, prop);
+
+	for (i = 0; i < *nports; i++)
+		MSM_BUS_DBG("Th %d val %llu", i, ret_arr[i]);
+
+	kfree(arr);
+	return ret_arr;
+err:
+	kfree(arr);
+	devm_kfree(&pdev->dev, ret_arr);
+	return NULL;
+}
+
+static struct msm_bus_node_info *get_nodes(struct device_node *of_node,
+	struct platform_device *pdev,
+	struct msm_bus_fabric_registration *pdata)
+{
+	struct msm_bus_node_info *info;
+	struct device_node *child_node = NULL;
+	int i = 0, ret;
+	int num_bw = 0;
+	u32 temp;
+
+	for_each_child_of_node(of_node, child_node) {
+		i++;
+	}
+
+	pdata->len = i;
+	info = (struct msm_bus_node_info *)
+		devm_kzalloc(&pdev->dev, sizeof(struct msm_bus_node_info) *
+			pdata->len, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(info)) {
+		pr_err("Failed to alloc memory for nodes: %d\n", pdata->len);
+		goto err;
+	}
+
+	i = 0;
+	child_node = NULL;
+	for_each_child_of_node(of_node, child_node) {
+		const char *sel_str;
+
+		ret = of_property_read_string(child_node, "label",
+			&info[i].name);
+		if (ret)
+			pr_err("Error reading node label\n");
+
+		ret = of_property_read_u32(child_node, "cell-id", &info[i].id);
+		if (ret) {
+			pr_err("Error reading node id\n");
+			goto err;
+		}
+
+		if (of_property_read_bool(child_node, "qcom,gateway"))
+			info[i].gateway = 1;
+
+		of_property_read_u32(child_node, "qcom,mas-hw-id",
+			&info[i].mas_hw_id);
+
+		of_property_read_u32(child_node, "qcom,slv-hw-id",
+			&info[i].slv_hw_id);
+		info[i].masterp = get_arr(pdev, child_node,
+					"qcom,masterp", &info[i].num_mports);
+		/* No need to store number of qports */
+		info[i].qport = get_arr(pdev, child_node,
+					"qcom,qport", &ret);
+		pdata->nmasters += info[i].num_mports;
+
+
+		info[i].slavep = get_arr(pdev, child_node,
+					"qcom,slavep", &info[i].num_sports);
+		pdata->nslaves += info[i].num_sports;
+
+
+		info[i].tier = get_arr(pdev, child_node,
+					"qcom,tier", &info[i].num_tiers);
+
+		if (of_property_read_bool(child_node, "qcom,ahb"))
+			info[i].ahb = 1;
+
+		ret = of_property_read_string(child_node, "qcom,hw-sel",
+			&sel_str);
+		if (ret)
+			info[i].hw_sel = 0;
+		else {
+			ret =  get_num(hw_sel_name, sel_str);
+			if (ret < 0) {
+				pr_err("Invalid hw-sel\n");
+				goto err;
+			}
+
+			info[i].hw_sel = ret;
+		}
+
+		of_property_read_u32(child_node, "qcom,buswidth",
+			&info[i].buswidth);
+		of_property_read_u32(child_node, "qcom,ws", &info[i].ws);
+
+		info[i].dual_conf =
+			of_property_read_bool(child_node, "qcom,dual-conf");
+
+
+		info[i].th = get_th_params(pdev, child_node, "qcom,thresh",
+						&info[i].num_thresh);
+
+		info[i].bimc_bw = get_th_params(pdev, child_node,
+						"qcom,bimc,bw", &num_bw);
+
+		if (num_bw != info[i].num_thresh) {
+			pr_err("%s:num_bw %d must equal num_thresh %d",
+				__func__, num_bw, info[i].num_thresh);
+			pr_err("%s:Err setting up dual conf for %s",
+				__func__, info[i].name);
+			goto err;
+		}
+
+		of_property_read_u32(child_node, "qcom,bimc,gp",
+			&info[i].bimc_gp);
+		of_property_read_u32(child_node, "qcom,bimc,thmp",
+			&info[i].bimc_thmp);
+
+		ret = of_property_read_string(child_node, "qcom,mode-thresh",
+			&sel_str);
+		if (ret)
+			info[i].mode_thresh = 0;
+		else {
+			ret = get_num(mode_sel_name, sel_str);
+			if (ret < 0) {
+				pr_err("Unknown mode :%s\n", sel_str);
+				goto err;
+			}
+
+			info[i].mode_thresh = ret;
+			MSM_BUS_DBG("AXI: THreshold mode set: %d\n",
+					info[i].mode_thresh);
+		}
+
+		ret = of_property_read_string(child_node, "qcom,mode",
+				&sel_str);
+
+		if (ret)
+			info[i].mode = 0;
+		else {
+			ret = get_num(mode_sel_name, sel_str);
+			if (ret < 0) {
+				pr_err("Unknown mode :%s\n", sel_str);
+				goto err;
+			}
+
+			info[i].mode = ret;
+		}
+
+		info[i].nr_lim =
+			of_property_read_bool(child_node, "qcom,nr-lim");
+
+		ret = of_property_read_u32(child_node, "qcom,ff",
+							&info[i].ff);
+		if (ret) {
+			pr_debug("fudge factor not present %d", info[i].id);
+			info[i].ff = 0;
+		}
+
+		ret = of_property_read_u32(child_node, "qcom,floor-bw",
+						&temp);
+		if (ret) {
+			pr_debug("fabdev floor bw not present %d", info[i].id);
+			info[i].floor_bw = 0;
+		} else {
+			info[i].floor_bw = KBTOB(temp);
+		}
+
+		info[i].rt_mas =
+			of_property_read_bool(child_node, "qcom,rt-mas");
+
+		ret = of_property_read_string(child_node, "qcom,perm-mode",
+			&sel_str);
+		if (ret)
+			info[i].perm_mode = 0;
+		else {
+			ret = get_num(mode_sel_name, sel_str);
+			if (ret < 0)
+				goto err;
+
+			info[i].perm_mode = 1 << ret;
+		}
+
+		of_property_read_u32(child_node, "qcom,prio-lvl",
+			&info[i].prio_lvl);
+		of_property_read_u32(child_node, "qcom,prio-rd",
+			&info[i].prio_rd);
+		of_property_read_u32(child_node, "qcom,prio-wr",
+			&info[i].prio_wr);
+		of_property_read_u32(child_node, "qcom,prio0", &info[i].prio0);
+		of_property_read_u32(child_node, "qcom,prio1", &info[i].prio1);
+		ret = of_property_read_string(child_node, "qcom,slaveclk-dual",
+			&info[i].slaveclk[DUAL_CTX]);
+		if (!ret)
+			pr_debug("Got slaveclk_dual: %s\n",
+				info[i].slaveclk[DUAL_CTX]);
+		else
+			info[i].slaveclk[DUAL_CTX] = NULL;
+
+		ret = of_property_read_string(child_node,
+			"qcom,slaveclk-active", &info[i].slaveclk[ACTIVE_CTX]);
+		if (!ret)
+			pr_debug("Got slaveclk_active\n");
+		else
+			info[i].slaveclk[ACTIVE_CTX] = NULL;
+
+		ret = of_property_read_string(child_node, "qcom,memclk-dual",
+			&info[i].memclk[DUAL_CTX]);
+		if (!ret)
+			pr_debug("Got memclk_dual\n");
+		else
+			info[i].memclk[DUAL_CTX] = NULL;
+
+		ret = of_property_read_string(child_node, "qcom,memclk-active",
+			&info[i].memclk[ACTIVE_CTX]);
+		if (!ret)
+			pr_debug("Got memclk_active\n");
+		else
+			info[i].memclk[ACTIVE_CTX] = NULL;
+
+		ret = of_property_read_string(child_node, "qcom,iface-clk-node",
+			&info[i].iface_clk_node);
+		if (!ret)
+			pr_debug("Got iface_clk_node\n");
+		else
+			info[i].iface_clk_node = NULL;
+
+		pr_debug("Node name: %s\n", info[i].name);
+		of_node_put(child_node);
+		i++;
+	}
+
+	pr_debug("Bus %d added: %d masters\n", pdata->id, pdata->nmasters);
+	pr_debug("Bus %d added: %d slaves\n", pdata->id, pdata->nslaves);
+	return info;
+err:
+	return NULL;
+}
+
+void msm_bus_of_get_nfab(struct platform_device *pdev,
+		struct msm_bus_fabric_registration *pdata)
+{
+	struct device_node *of_node;
+	int ret, nfab = 0;
+
+	if (!pdev) {
+		pr_err("Error: Null platform device\n");
+		return;
+	}
+
+	of_node = pdev->dev.of_node;
+	ret = of_property_read_u32(of_node, "qcom,nfab",
+		&nfab);
+	if (!ret)
+		pr_debug("Fab_of: Read number of buses: %u\n", nfab);
+
+	msm_bus_board_set_nfab(pdata, nfab);
+}
+
+struct msm_bus_fabric_registration
+	*msm_bus_of_get_fab_data(struct platform_device *pdev)
+{
+	struct device_node *of_node;
+	struct msm_bus_fabric_registration *pdata;
+	bool mem_err = false;
+	int ret = 0;
+	const char *sel_str;
+	u32 temp;
+
+	if (!pdev) {
+		pr_err("Error: Null platform device\n");
+		return NULL;
+	}
+
+	of_node = pdev->dev.of_node;
+	pdata = devm_kzalloc(&pdev->dev,
+			sizeof(struct msm_bus_fabric_registration), GFP_KERNEL);
+	if (!pdata) {
+		pr_err("Error: Memory allocation for pdata failed\n");
+		mem_err = true;
+		goto err;
+	}
+
+	ret = of_property_read_string(of_node, "label", &pdata->name);
+	if (ret) {
+		pr_err("Error: label not found\n");
+		goto err;
+	}
+	pr_debug("Fab_of: Read name: %s\n", pdata->name);
+
+	ret = of_property_read_u32(of_node, "cell-id",
+		&pdata->id);
+	if (ret) {
+		pr_err("Error: num-usecases not found\n");
+		goto err;
+	}
+	pr_debug("Fab_of: Read id: %u\n", pdata->id);
+
+	if (of_property_read_bool(of_node, "qcom,ahb"))
+		pdata->ahb = 1;
+
+	ret = of_property_read_string(of_node, "qcom,fabclk-dual",
+		&pdata->fabclk[DUAL_CTX]);
+	if (ret) {
+		pr_debug("fabclk_dual not available\n");
+		pdata->fabclk[DUAL_CTX] = NULL;
+	} else
+		pr_debug("Fab_of: Read clk dual ctx: %s\n",
+			pdata->fabclk[DUAL_CTX]);
+	ret = of_property_read_string(of_node, "qcom,fabclk-active",
+		&pdata->fabclk[ACTIVE_CTX]);
+	if (ret) {
+		pr_debug("Error: fabclk_active not available\n");
+		pdata->fabclk[ACTIVE_CTX] = NULL;
+	} else
+		pr_debug("Fab_of: Read clk act ctx: %s\n",
+			pdata->fabclk[ACTIVE_CTX]);
+
+	ret = of_property_read_u32(of_node, "qcom,ntieredslaves",
+		&pdata->ntieredslaves);
+	if (ret) {
+		pr_err("Error: ntieredslaves not found\n");
+		goto err;
+	}
+
+	ret = of_property_read_u32(of_node, "qcom,qos-freq", &pdata->qos_freq);
+	if (ret)
+		pr_debug("qos_freq not available\n");
+
+	ret = of_property_read_string(of_node, "qcom,hw-sel", &sel_str);
+	if (ret) {
+		pr_err("Error: hw_sel not found\n");
+		goto err;
+	} else {
+		ret = get_num(hw_sel_name, sel_str);
+		if (ret < 0)
+			goto err;
+
+		pdata->hw_sel = ret;
+	}
+
+	if (of_property_read_bool(of_node, "qcom,virt"))
+		pdata->virt = true;
+
+	ret = of_property_read_u32(of_node, "qcom,qos-baseoffset",
+						&pdata->qos_baseoffset);
+	if (ret)
+		pr_debug("%s:qos_baseoffset not available\n", __func__);
+
+	ret = of_property_read_u32(of_node, "qcom,qos-delta",
+						&pdata->qos_delta);
+	if (ret)
+		pr_debug("%s:qos_delta not available\n", __func__);
+
+	if (of_property_read_bool(of_node, "qcom,rpm-en"))
+		pdata->rpm_enabled = 1;
+
+	ret = of_property_read_u32(of_node, "qcom,nr-lim-thresh",
+						&temp);
+
+	if (ret) {
+		pr_err("nr-lim threshold not specified");
+		pdata->nr_lim_thresh = 0;
+	} else {
+		pdata->nr_lim_thresh = KBTOB(temp);
+	}
+
+	ret = of_property_read_u32(of_node, "qcom,eff-fact",
+						&pdata->eff_fact);
+	if (ret) {
+		pr_err("Fab eff-factor not present");
+		pdata->eff_fact = 0;
+	}
+
+	pdata->info = get_nodes(of_node, pdev, pdata);
+	return pdata;
+err:
+	return NULL;
+}
+EXPORT_SYMBOL(msm_bus_of_get_fab_data);
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./msm_bus_rpm_smd.c linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c
--- linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./msm_bus_rpm_smd.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c	2019-01-22 16:16:26.663275022 +0100
@@ -0,0 +1,242 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include "msm_bus_core.h"
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <soc/qcom/rpm-smd.h>
+
+/* Stubs for backward compatibility */
+void msm_bus_rpm_set_mt_mask()
+{
+}
+
+bool msm_bus_rpm_is_mem_interleaved(void)
+{
+	return true;
+}
+
+struct commit_data {
+	struct msm_bus_node_hw_info *mas_arb;
+	struct msm_bus_node_hw_info *slv_arb;
+};
+
+#ifdef CONFIG_DEBUG_FS
+void msm_bus_rpm_fill_cdata_buffer(int *curr, char *buf, const int max_size,
+	void *cdata, int nmasters, int nslaves, int ntslaves)
+{
+	int c;
+	struct commit_data *cd = (struct commit_data *)cdata;
+
+	*curr += scnprintf(buf + *curr, max_size - *curr, "\nMas BW:\n");
+	for (c = 0; c < nmasters; c++)
+		*curr += scnprintf(buf + *curr, max_size - *curr,
+			"%d: %llu\t", cd->mas_arb[c].hw_id,
+			cd->mas_arb[c].bw);
+	*curr += scnprintf(buf + *curr, max_size - *curr, "\nSlave BW:\n");
+	for (c = 0; c < nslaves; c++) {
+		*curr += scnprintf(buf + *curr, max_size - *curr,
+		"%d: %llu\t", cd->slv_arb[c].hw_id,
+		cd->slv_arb[c].bw);
+	}
+}
+#endif
+
+static int msm_bus_rpm_compare_cdata(
+	struct msm_bus_fabric_registration *fab_pdata,
+	struct commit_data *cd1, struct commit_data *cd2)
+{
+	size_t n;
+	int ret;
+
+	n = sizeof(struct msm_bus_node_hw_info) * fab_pdata->nmasters * 2;
+	ret = memcmp(cd1->mas_arb, cd2->mas_arb, n);
+	if (ret) {
+		MSM_BUS_DBG("Master Arb Data not equal\n");
+		return ret;
+	}
+
+	n = sizeof(struct msm_bus_node_hw_info) * fab_pdata->nslaves * 2;
+	ret = memcmp(cd1->slv_arb, cd2->slv_arb, n);
+	if (ret) {
+		MSM_BUS_DBG("Master Arb Data not equal\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int msm_bus_rpm_req(int ctx, uint32_t rsc_type, uint32_t key,
+	struct msm_bus_node_hw_info *hw_info, bool valid)
+{
+	struct msm_rpm_request *rpm_req;
+	int ret = 0, msg_id;
+
+	if (ctx == ACTIVE_CTX)
+		ctx = MSM_RPM_CTX_ACTIVE_SET;
+	else if (ctx == DUAL_CTX)
+		ctx = MSM_RPM_CTX_SLEEP_SET;
+
+	rpm_req = msm_rpm_create_request(ctx, rsc_type, hw_info->hw_id, 1);
+	if (rpm_req == NULL) {
+		MSM_BUS_WARN("RPM: Couldn't create RPM Request\n");
+		return -ENXIO;
+	}
+
+	if (valid) {
+		ret = msm_rpm_add_kvp_data(rpm_req, key, (const uint8_t *)
+			&hw_info->bw, (int)(sizeof(uint64_t)));
+		if (ret) {
+			MSM_BUS_WARN("RPM: Add KVP failed for RPM Req:%u\n",
+				rsc_type);
+			goto free_rpm_request;
+		}
+
+		MSM_BUS_DBG("Added Key: %d, Val: %llu, size: %zu\n", key,
+			hw_info->bw, sizeof(uint64_t));
+	} else {
+		/* Invalidate RPM requests */
+		ret = msm_rpm_add_kvp_data(rpm_req, 0, NULL, 0);
+		if (ret) {
+			MSM_BUS_WARN("RPM: Add KVP failed for RPM Req:%u\n",
+				rsc_type);
+			goto free_rpm_request;
+		}
+	}
+
+	msg_id = msm_rpm_send_request(rpm_req);
+	if (!msg_id) {
+		MSM_BUS_WARN("RPM: No message ID for req\n");
+		ret = -ENXIO;
+		goto free_rpm_request;
+	}
+
+	ret = msm_rpm_wait_for_ack(msg_id);
+	if (ret) {
+		MSM_BUS_WARN("RPM: Ack failed\n");
+		goto free_rpm_request;
+	}
+
+free_rpm_request:
+	msm_rpm_free_request(rpm_req);
+
+	return ret;
+}
+
+static int msm_bus_rpm_commit_arb(struct msm_bus_fabric_registration
+	*fab_pdata, int ctx, void *rpm_data,
+	struct commit_data *cd, bool valid)
+{
+	int i, status = 0, rsc_type, key;
+
+	MSM_BUS_DBG("Context: %d\n", ctx);
+	rsc_type = RPM_BUS_MASTER_REQ;
+	key = RPM_MASTER_FIELD_BW;
+	for (i = 0; i < fab_pdata->nmasters; i++) {
+		if (cd->mas_arb[i].dirty) {
+			MSM_BUS_DBG("MAS HWID: %d, BW: %llu DIRTY: %d\n",
+				cd->mas_arb[i].hw_id,
+				cd->mas_arb[i].bw,
+				cd->mas_arb[i].dirty);
+			status = msm_bus_rpm_req(ctx, rsc_type, key,
+				&cd->mas_arb[i], valid);
+			if (status) {
+				MSM_BUS_ERR("RPM: Req fail: mas:%d, bw:%llu\n",
+					cd->mas_arb[i].hw_id,
+					cd->mas_arb[i].bw);
+				break;
+			} else {
+				cd->mas_arb[i].dirty = false;
+			}
+		}
+	}
+
+	rsc_type = RPM_BUS_SLAVE_REQ;
+	key = RPM_SLAVE_FIELD_BW;
+	for (i = 0; i < fab_pdata->nslaves; i++) {
+		if (cd->slv_arb[i].dirty) {
+			MSM_BUS_DBG("SLV HWID: %d, BW: %llu DIRTY: %d\n",
+				cd->slv_arb[i].hw_id,
+				cd->slv_arb[i].bw,
+				cd->slv_arb[i].dirty);
+			status = msm_bus_rpm_req(ctx, rsc_type, key,
+				&cd->slv_arb[i], valid);
+			if (status) {
+				MSM_BUS_ERR("RPM: Req fail: slv:%d, bw:%llu\n",
+					cd->slv_arb[i].hw_id,
+					cd->slv_arb[i].bw);
+				break;
+			} else {
+				cd->slv_arb[i].dirty = false;
+			}
+		}
+	}
+
+	return status;
+}
+
+/**
+* msm_bus_remote_hw_commit() - Commit the arbitration data to RPM
+* @fabric: Fabric for which the data should be committed
+**/
+int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration
+	*fab_pdata, void *hw_data, void **cdata)
+{
+
+	int ret;
+	bool valid;
+	struct commit_data *dual_cd, *act_cd;
+	void *rpm_data = hw_data;
+
+	MSM_BUS_DBG("\nReached RPM Commit\n");
+	dual_cd = (struct commit_data *)cdata[DUAL_CTX];
+	act_cd = (struct commit_data *)cdata[ACTIVE_CTX];
+
+	/*
+	 * If the arb data for active set and sleep set is
+	 * different, commit both sets.
+	 * If the arb data for active set and sleep set is
+	 * the same, invalidate the sleep set.
+	 */
+	ret = msm_bus_rpm_compare_cdata(fab_pdata, act_cd, dual_cd);
+	if (!ret)
+		/* Invalidate sleep set.*/
+		valid = false;
+	else
+		valid = true;
+
+	ret = msm_bus_rpm_commit_arb(fab_pdata, DUAL_CTX, rpm_data,
+		dual_cd, valid);
+	if (ret)
+		MSM_BUS_ERR("Error comiting fabric:%d in %d ctx\n",
+			fab_pdata->id, DUAL_CTX);
+
+	valid = true;
+	ret = msm_bus_rpm_commit_arb(fab_pdata, ACTIVE_CTX, rpm_data, act_cd,
+		valid);
+	if (ret)
+		MSM_BUS_ERR("Error comiting fabric:%d in %d ctx\n",
+			fab_pdata->id, ACTIVE_CTX);
+
+	return ret;
+}
+
+int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata,
+	struct msm_bus_hw_algorithm *hw_algo)
+{
+	if (!pdata->ahb)
+		pdata->rpm_enabled = 1;
+	return 0;
+}
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./msm_bus_rules.c linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/msm_bus_rules.c
--- linux-4.4.115-fbx/drivers/soc/qcom/msm_bus./msm_bus_rules.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/msm_bus/msm_bus_rules.c	2019-01-22 16:16:26.663275022 +0100
@@ -0,0 +1,736 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/list_sort.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm_bus_rules.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/msm-bus.h>
+#include <trace/events/trace_msm_bus.h>
+
+struct node_vote_info {
+	int id;
+	u64 ib;
+	u64 ab;
+	u64 clk;
+};
+
+struct rules_def {
+	int rule_id;
+	int num_src;
+	int state;
+	struct node_vote_info *src_info;
+	struct bus_rule_type rule_ops;
+	bool state_change;
+	struct list_head link;
+};
+
+struct rule_node_info {
+	int id;
+	void *data;
+	struct raw_notifier_head rule_notify_list;
+	struct rules_def *cur_rule;
+	int num_rules;
+	struct list_head node_rules;
+	struct list_head link;
+	struct rule_apply_rcm_info apply;
+};
+
+DEFINE_MUTEX(msm_bus_rules_lock);
+static LIST_HEAD(node_list);
+static struct rule_node_info *get_node(u32 id, void *data);
+static int node_rules_compare(void *priv, struct list_head *a,
+					struct list_head *b);
+
+#define LE(op1, op2)	(op1 <= op2)
+#define LT(op1, op2)	(op1 < op2)
+#define GE(op1, op2)	(op1 >= op2)
+#define GT(op1, op2)	(op1 > op2)
+#define NB_ID		(0x201)
+
+static struct rule_node_info *get_node(u32 id, void *data)
+{
+	struct rule_node_info *node_it = NULL;
+	struct rule_node_info *node_match = NULL;
+
+	list_for_each_entry(node_it, &node_list, link) {
+		if (node_it->id == id) {
+			if ((id == NB_ID)) {
+				if ((node_it->data == data)) {
+					node_match = node_it;
+					break;
+				}
+			} else {
+				node_match = node_it;
+				break;
+			}
+		}
+	}
+	return node_match;
+}
+
+static struct rule_node_info *gen_node(u32 id, void *data)
+{
+	struct rule_node_info *node_it = NULL;
+	struct rule_node_info *node_match = NULL;
+
+	list_for_each_entry(node_it, &node_list, link) {
+		if (node_it->id == id) {
+			node_match = node_it;
+			break;
+		}
+	}
+
+	if (!node_match) {
+		node_match = kzalloc(sizeof(struct rule_node_info), GFP_KERNEL);
+		if (!node_match) {
+			pr_err("%s: Cannot allocate memory", __func__);
+			goto exit_node_match;
+		}
+
+		node_match->id = id;
+		node_match->cur_rule = NULL;
+		node_match->num_rules = 0;
+		node_match->data = data;
+		list_add_tail(&node_match->link, &node_list);
+		INIT_LIST_HEAD(&node_match->node_rules);
+		RAW_INIT_NOTIFIER_HEAD(&node_match->rule_notify_list);
+		pr_debug("Added new node %d to list\n", id);
+	}
+exit_node_match:
+	return node_match;
+}
+
+static bool do_compare_op(u64 op1, u64 op2, int op)
+{
+	bool ret = false;
+
+	switch (op) {
+	case OP_LE:
+		ret = LE(op1, op2);
+		break;
+	case OP_LT:
+		ret = LT(op1, op2);
+		break;
+	case OP_GT:
+		ret = GT(op1, op2);
+		break;
+	case OP_GE:
+		ret = GE(op1, op2);
+		break;
+	case OP_NOOP:
+		ret = true;
+		break;
+	default:
+		pr_info("Invalid OP %d", op);
+		break;
+	}
+	return ret;
+}
+
+static void update_src_id_vote(struct rule_update_path_info *inp_node,
+				struct rule_node_info *rule_node)
+{
+	struct rules_def *rule;
+	int i;
+
+	list_for_each_entry(rule, &rule_node->node_rules, link) {
+		for (i = 0; i < rule->num_src; i++) {
+			if (rule->src_info[i].id == inp_node->id) {
+				rule->src_info[i].ib = inp_node->ib;
+				rule->src_info[i].ab = inp_node->ab;
+				rule->src_info[i].clk = inp_node->clk;
+			}
+		}
+	}
+}
+
+static u64 get_field(struct rules_def *rule, int src_id)
+{
+	u64 field = 0;
+	int i;
+
+	for (i = 0; i < rule->num_src; i++) {
+		switch (rule->rule_ops.src_field) {
+		case FLD_IB:
+			field += rule->src_info[i].ib;
+			break;
+		case FLD_AB:
+			field += rule->src_info[i].ab;
+			break;
+		case FLD_CLK:
+			field += rule->src_info[i].clk;
+			break;
+		}
+	}
+
+	return field;
+}
+
+static bool check_rule(struct rules_def *rule,
+			struct rule_update_path_info *inp)
+{
+	bool ret = false;
+
+	if (!rule)
+		return ret;
+
+	switch (rule->rule_ops.op) {
+	case OP_LE:
+	case OP_LT:
+	case OP_GT:
+	case OP_GE:
+	{
+		u64 src_field = get_field(rule, inp->id);
+		ret = do_compare_op(src_field, rule->rule_ops.thresh,
+							rule->rule_ops.op);
+		break;
+	}
+	default:
+		pr_err("Unsupported op %d", rule->rule_ops.op);
+		break;
+	}
+	return ret;
+}
+
+static void match_rule(struct rule_update_path_info *inp_node,
+			struct rule_node_info *node)
+{
+	struct rules_def *rule;
+	int i;
+
+	list_for_each_entry(rule, &node->node_rules, link) {
+		for (i = 0; i < rule->num_src; i++) {
+			if (rule->src_info[i].id == inp_node->id) {
+				if (check_rule(rule, inp_node)) {
+					trace_bus_rules_matches(
+					(node->cur_rule ?
+						node->cur_rule->rule_id : -1),
+					inp_node->id, inp_node->ab,
+					inp_node->ib, inp_node->clk);
+					if (rule->state ==
+						RULE_STATE_NOT_APPLIED)
+						rule->state_change = true;
+					rule->state = RULE_STATE_APPLIED;
+				} else {
+					if (rule->state ==
+						RULE_STATE_APPLIED)
+						rule->state_change = true;
+					rule->state = RULE_STATE_NOT_APPLIED;
+				}
+			}
+		}
+	}
+}
+
+static void apply_rule(struct rule_node_info *node,
+			struct list_head *output_list)
+{
+	struct rules_def *rule;
+	struct rules_def *last_rule;
+
+	last_rule = node->cur_rule;
+	node->cur_rule = NULL;
+	list_for_each_entry(rule, &node->node_rules, link) {
+		if ((rule->state == RULE_STATE_APPLIED) &&
+						!node->cur_rule)
+			node->cur_rule = rule;
+
+		if (node->id == NB_ID) {
+			if (rule->state_change) {
+				rule->state_change = false;
+				raw_notifier_call_chain(&node->rule_notify_list,
+					rule->state, (void *)&rule->rule_ops);
+			}
+		} else {
+			if ((rule->state == RULE_STATE_APPLIED) &&
+			     (node->cur_rule &&
+				(node->cur_rule->rule_id == rule->rule_id))) {
+				node->apply.id = rule->rule_ops.dst_node[0];
+				node->apply.throttle = rule->rule_ops.mode;
+				node->apply.lim_bw = rule->rule_ops.dst_bw;
+				node->apply.after_clk_commit = false;
+				if (last_rule != node->cur_rule)
+					list_add_tail(&node->apply.link,
+								output_list);
+				if (last_rule) {
+					if (node_rules_compare(NULL,
+						&last_rule->link,
+						&node->cur_rule->link) == -1)
+						node->apply.after_clk_commit =
+									true;
+				}
+			}
+			rule->state_change = false;
+		}
+	}
+
+}
+
+int msm_rules_update_path(struct list_head *input_list,
+			struct list_head *output_list)
+{
+	int ret = 0;
+	struct rule_update_path_info  *inp_node;
+	struct rule_node_info *node_it = NULL;
+
+	mutex_lock(&msm_bus_rules_lock);
+	list_for_each_entry(inp_node, input_list, link) {
+		list_for_each_entry(node_it, &node_list, link) {
+			update_src_id_vote(inp_node, node_it);
+			match_rule(inp_node, node_it);
+		}
+	}
+
+	list_for_each_entry(node_it, &node_list, link)
+		apply_rule(node_it, output_list);
+	mutex_unlock(&msm_bus_rules_lock);
+	return ret;
+}
+
+static bool ops_equal(int op1, int op2)
+{
+	bool ret = false;
+
+	switch (op1) {
+	case OP_GT:
+	case OP_GE:
+	case OP_LT:
+	case OP_LE:
+		if (abs(op1 - op2) <= 1)
+			ret = true;
+		break;
+	default:
+		ret = (op1 == op2);
+	}
+
+	return ret;
+}
+
+static bool is_throttle_rule(int mode)
+{
+	bool ret = true;
+
+	if (mode == THROTTLE_OFF)
+		ret = false;
+
+	return ret;
+}
+
+static int node_rules_compare(void *priv, struct list_head *a,
+					struct list_head *b)
+{
+	struct rules_def *ra = container_of(a, struct rules_def, link);
+	struct rules_def *rb = container_of(b, struct rules_def, link);
+	int ret = -1;
+	int64_t th_diff = 0;
+
+
+	if (ra->rule_ops.mode == rb->rule_ops.mode) {
+		if (ops_equal(ra->rule_ops.op, rb->rule_ops.op)) {
+			if ((ra->rule_ops.op == OP_LT) ||
+				(ra->rule_ops.op == OP_LE)) {
+				th_diff = ra->rule_ops.thresh -
+						rb->rule_ops.thresh;
+				if (th_diff > 0)
+					ret = 1;
+				 else
+					ret = -1;
+			} else if ((ra->rule_ops.op == OP_GT) ||
+					(ra->rule_ops.op == OP_GE)) {
+				th_diff = rb->rule_ops.thresh -
+							ra->rule_ops.thresh;
+				if (th_diff > 0)
+					ret = 1;
+				 else
+					ret = -1;
+			}
+		} else
+			ret = ra->rule_ops.op - rb->rule_ops.op;
+	} else if (is_throttle_rule(ra->rule_ops.mode) &&
+				is_throttle_rule(rb->rule_ops.mode)) {
+		if (ra->rule_ops.mode == THROTTLE_ON)
+			ret = -1;
+		else
+			ret = 1;
+	} else if ((ra->rule_ops.mode == THROTTLE_OFF) &&
+		is_throttle_rule(rb->rule_ops.mode)) {
+		ret = 1;
+	} else if (is_throttle_rule(ra->rule_ops.mode) &&
+		(rb->rule_ops.mode == THROTTLE_OFF)) {
+		ret = -1;
+	}
+
+	return ret;
+}
+
+static void print_rules(struct rule_node_info *node_it)
+{
+	struct rules_def *node_rule = NULL;
+	int i;
+
+	if (!node_it) {
+		pr_err("%s: no node for found", __func__);
+		return;
+	}
+
+	pr_info("\n Now printing rules for Node %d  cur rule %d\n",
+			node_it->id,
+			(node_it->cur_rule ? node_it->cur_rule->rule_id : -1));
+	list_for_each_entry(node_rule, &node_it->node_rules, link) {
+		pr_info("\n num Rules %d  rule Id %d\n",
+				node_it->num_rules, node_rule->rule_id);
+		pr_info("Rule: src_field %d\n", node_rule->rule_ops.src_field);
+		for (i = 0; i < node_rule->rule_ops.num_src; i++)
+			pr_info("Rule: src %d\n",
+					node_rule->rule_ops.src_id[i]);
+		for (i = 0; i < node_rule->rule_ops.num_dst; i++)
+			pr_info("Rule: dst %d dst_bw %llu\n",
+						node_rule->rule_ops.dst_node[i],
+						node_rule->rule_ops.dst_bw);
+		pr_info("Rule: thresh %llu op %d mode %d State %d\n",
+					node_rule->rule_ops.thresh,
+					node_rule->rule_ops.op,
+					node_rule->rule_ops.mode,
+					node_rule->state);
+	}
+}
+
+void print_all_rules(void)
+{
+	struct rule_node_info *node_it = NULL;
+
+	mutex_lock(&msm_bus_rules_lock);
+	list_for_each_entry(node_it, &node_list, link)
+		print_rules(node_it);
+	mutex_unlock(&msm_bus_rules_lock);
+}
+
+void print_rules_buf(char *buf, int max_buf)
+{
+	struct rule_node_info *node_it = NULL;
+	struct rules_def *node_rule = NULL;
+	int i;
+	int cnt = 0;
+
+	mutex_lock(&msm_bus_rules_lock);
+	list_for_each_entry(node_it, &node_list, link) {
+		cnt += scnprintf(buf + cnt, max_buf - cnt,
+			"\n Now printing rules for Node %d cur_rule %d\n",
+			node_it->id,
+			(node_it->cur_rule ? node_it->cur_rule->rule_id : -1));
+		list_for_each_entry(node_rule, &node_it->node_rules, link) {
+			cnt += scnprintf(buf + cnt, max_buf - cnt,
+				"\nNum Rules:%d ruleId %d STATE:%d change:%d\n",
+				node_it->num_rules, node_rule->rule_id,
+				node_rule->state, node_rule->state_change);
+			cnt += scnprintf(buf + cnt, max_buf - cnt,
+				"Src_field %d\n",
+				node_rule->rule_ops.src_field);
+			for (i = 0; i < node_rule->rule_ops.num_src; i++)
+				cnt += scnprintf(buf + cnt, max_buf - cnt,
+					"Src %d Cur Ib %llu Ab %llu\n",
+					node_rule->rule_ops.src_id[i],
+					node_rule->src_info[i].ib,
+					node_rule->src_info[i].ab);
+			for (i = 0; i < node_rule->rule_ops.num_dst; i++)
+				cnt += scnprintf(buf + cnt, max_buf - cnt,
+					"Dst %d dst_bw %llu\n",
+					node_rule->rule_ops.dst_node[0],
+					node_rule->rule_ops.dst_bw);
+			cnt += scnprintf(buf + cnt, max_buf - cnt,
+					"Thresh %llu op %d mode %d\n",
+					node_rule->rule_ops.thresh,
+					node_rule->rule_ops.op,
+					node_rule->rule_ops.mode);
+		}
+	}
+	mutex_unlock(&msm_bus_rules_lock);
+}
+
+static int copy_rule(struct bus_rule_type *src, struct rules_def *node_rule,
+			struct notifier_block *nb)
+{
+	int i;
+	int ret = 0;
+
+	memcpy(&node_rule->rule_ops, src,
+				sizeof(struct bus_rule_type));
+	node_rule->rule_ops.src_id = kzalloc(
+			(sizeof(int) * node_rule->rule_ops.num_src),
+							GFP_KERNEL);
+	if (!node_rule->rule_ops.src_id) {
+		pr_err("%s:Failed to allocate for src_id",
+					__func__);
+		return -ENOMEM;
+	}
+	memcpy(node_rule->rule_ops.src_id, src->src_id,
+				sizeof(int) * src->num_src);
+
+
+	if (!nb) {
+		node_rule->rule_ops.dst_node = kzalloc(
+			(sizeof(int) * node_rule->rule_ops.num_dst),
+						GFP_KERNEL);
+		if (!node_rule->rule_ops.dst_node) {
+			pr_err("%s:Failed to allocate for src_id",
+							__func__);
+			return -ENOMEM;
+		}
+		memcpy(node_rule->rule_ops.dst_node, src->dst_node,
+						sizeof(int) * src->num_dst);
+	}
+
+	node_rule->num_src = src->num_src;
+	node_rule->src_info = kzalloc(
+		(sizeof(struct node_vote_info) * node_rule->rule_ops.num_src),
+							GFP_KERNEL);
+	if (!node_rule->src_info) {
+		pr_err("%s:Failed to allocate for src_id",
+						__func__);
+		return -ENOMEM;
+	}
+	for (i = 0; i < src->num_src; i++)
+		node_rule->src_info[i].id = src->src_id[i];
+
+	return ret;
+}
+
+static bool __rule_register(int num_rules, struct bus_rule_type *rule,
+					struct notifier_block *nb)
+{
+	struct rule_node_info *node = NULL;
+	int i, j;
+	struct rules_def *node_rule = NULL;
+	int num_dst = 0;
+	bool reg_success = true;
+
+	if (num_rules <= 0)
+		return false;
+
+	for (i = 0; i < num_rules; i++) {
+		if (nb)
+			num_dst = 1;
+		else
+			num_dst = rule[i].num_dst;
+
+		for (j = 0; j < num_dst; j++) {
+			int id = 0;
+
+			if (nb)
+				id = NB_ID;
+			else
+				id = rule[i].dst_node[j];
+
+			node = gen_node(id, nb);
+			if (!node) {
+				pr_info("Error getting rule");
+				reg_success = false;
+				goto exit_rule_register;
+			}
+			node_rule = kzalloc(sizeof(struct rules_def),
+						GFP_KERNEL);
+			if (!node_rule) {
+				pr_err("%s: Failed to allocate for rule",
+								__func__);
+				reg_success = false;
+				goto exit_rule_register;
+			}
+
+			if (copy_rule(&rule[i], node_rule, nb)) {
+				pr_err("Error copying rule");
+				reg_success = false;
+				goto exit_rule_register;
+			}
+
+			node_rule->rule_id = node->num_rules++;
+			if (nb)
+				node->data = nb;
+
+			list_add_tail(&node_rule->link, &node->node_rules);
+		}
+	}
+	list_sort(NULL, &node->node_rules, node_rules_compare);
+	if (nb && nb != node->rule_notify_list.head)
+		raw_notifier_chain_register(&node->rule_notify_list, nb);
+exit_rule_register:
+	return reg_success;
+}
+
+static int comp_rules(struct bus_rule_type *rulea, struct bus_rule_type *ruleb)
+{
+	int ret = 1;
+
+	if (rulea->num_src == ruleb->num_src)
+		ret = memcmp(rulea->src_id, ruleb->src_id,
+				(sizeof(int) * rulea->num_src));
+	if (!ret && (rulea->num_dst == ruleb->num_dst))
+		ret = memcmp(rulea->dst_node, ruleb->dst_node,
+				(sizeof(int) * rulea->num_dst));
+	if (ret || (rulea->dst_bw != ruleb->dst_bw) ||
+		(rulea->op != ruleb->op) || (rulea->thresh != ruleb->thresh))
+		ret = 1;
+	return ret;
+}
+
+void msm_rule_register(int num_rules, struct bus_rule_type *rule,
+					struct notifier_block *nb)
+{
+	if (!rule || num_rules <= 0)
+		return;
+
+	mutex_lock(&msm_bus_rules_lock);
+	__rule_register(num_rules, rule, nb);
+	mutex_unlock(&msm_bus_rules_lock);
+}
+
+static bool __rule_unregister(int num_rules, struct bus_rule_type *rule,
+					struct notifier_block *nb)
+{
+	int i = 0;
+	struct rule_node_info *node = NULL;
+	struct rule_node_info *node_tmp = NULL;
+	struct rules_def *node_rule;
+	struct rules_def *node_rule_tmp;
+	bool match_found = false;
+
+	if (num_rules <= 0)
+		return false;
+
+	if (nb) {
+		node = get_node(NB_ID, nb);
+		if (!node) {
+			pr_err("%s: Can't find node", __func__);
+			goto exit_unregister_rule;
+		}
+		match_found = true;
+		list_for_each_entry_safe(node_rule, node_rule_tmp,
+					&node->node_rules, link) {
+			if (comp_rules(&node_rule->rule_ops,
+					&rule[i]) == 0) {
+				list_del(&node_rule->link);
+				kfree(node_rule);
+				match_found = true;
+				node->num_rules--;
+				list_sort(NULL,
+					&node->node_rules,
+					node_rules_compare);
+				break;
+			}
+		}
+		if (!node->num_rules)
+			raw_notifier_chain_unregister(
+					&node->rule_notify_list, nb);
+	} else {
+		for (i = 0; i < num_rules; i++) {
+			match_found = false;
+
+			list_for_each_entry(node, &node_list, link) {
+				list_for_each_entry_safe(node_rule,
+				node_rule_tmp, &node->node_rules, link) {
+					if (comp_rules(&node_rule->rule_ops,
+						&rule[i]) == 0) {
+						list_del(&node_rule->link);
+						kfree(node_rule);
+						match_found = true;
+						node->num_rules--;
+						list_sort(NULL,
+							&node->node_rules,
+							node_rules_compare);
+						break;
+					}
+				}
+			}
+		}
+	}
+
+	list_for_each_entry_safe(node, node_tmp,
+					&node_list, link) {
+		if (!node->num_rules) {
+			pr_debug("Deleting Rule node %d", node->id);
+			list_del(&node->link);
+			kfree(node);
+		}
+	}
+exit_unregister_rule:
+	return match_found;
+}
+
+void msm_rule_unregister(int num_rules, struct bus_rule_type *rule,
+					struct notifier_block *nb)
+{
+	if (!rule || num_rules <= 0)
+		return;
+
+	mutex_lock(&msm_bus_rules_lock);
+	__rule_unregister(num_rules, rule, nb);
+	mutex_unlock(&msm_bus_rules_lock);
+}
+
+bool msm_rule_update(struct bus_rule_type *old_rule,
+			struct bus_rule_type *new_rule,
+			struct notifier_block *nb)
+{
+	bool rc = true;
+
+	if (!old_rule || !new_rule) {
+		pr_err("%s:msm_rule_update: void rules, error\n", __func__);
+		return false;
+	}
+	mutex_lock(&msm_bus_rules_lock);
+	if (!__rule_unregister(1, old_rule, nb)) {
+		pr_err("%s:msm_rule_update: failed to unregister old rule\n",
+				__func__);
+		rc = false;
+		goto exit_rule_update;
+	}
+
+	if (!__rule_register(1, new_rule, nb)) {
+		/*
+		 * Registering new rule has failed for some reason, attempt
+		 * to re-register the old rule and return error.
+		 */
+		pr_err("%s:msm_rule_update: failed to register new rule\n",
+				__func__);
+		__rule_register(1, old_rule, nb);
+		rc = false;
+	}
+exit_rule_update:
+	mutex_unlock(&msm_bus_rules_lock);
+	return rc;
+}
+
+void msm_rule_evaluate_rules(int node)
+{
+	struct msm_bus_client_handle *handle;
+
+	handle = msm_bus_scale_register(node, node, "tmp-rm", false);
+	if (!handle)
+		return;
+	msm_bus_scale_update_bw(handle, 0, 0);
+	msm_bus_scale_unregister(handle);
+}
+
+bool msm_rule_are_rules_registered(void)
+{
+	bool ret = false;
+
+	mutex_lock(&msm_bus_rules_lock);
+	if (list_empty(&node_list))
+		ret = false;
+	else
+		ret = true;
+	mutex_unlock(&msm_bus_rules_lock);
+	return ret;
+}
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/msm_glink_pkt.c	2019-01-22 16:16:26.663275022 +0100
@@ -0,0 +1,1535 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * G-link Packet Driver -- Provides a binary G-link non-muxed packet port
+ *                       interface.
+ */
+
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <asm/ioctls.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <linux/ipc_logging.h>
+#include <linux/termios.h>
+
+#include <soc/qcom/glink.h>
+/* This Limit ensures that auto queue will not exhaust memory on remote side */
+#define MAX_PENDING_GLINK_PKT 5
+#define MODULE_NAME "msm_glinkpkt"
+#define DEVICE_NAME "glinkpkt"
+#define WAKEUPSOURCE_TIMEOUT (2000) /* two seconds */
+#define CLOSE_WAIT_TIMEOUT 1000 /* one seconds */
+
+#define GLINK_PKT_IOCTL_MAGIC (0xC3)
+
+#define GLINK_PKT_IOCTL_QUEUE_RX_INTENT \
+	_IOW(GLINK_PKT_IOCTL_MAGIC, 0, unsigned int)
+
+#define SMD_DTR_SIG BIT(31)
+#define SMD_CTS_SIG BIT(30)
+#define SMD_CD_SIG BIT(29)
+#define SMD_RI_SIG BIT(28)
+
+#define map_to_smd_trans_signal(sigs) \
+	do { \
+		sigs &= 0x0fff; \
+		if (sigs & TIOCM_DTR) \
+			sigs |= SMD_DTR_SIG; \
+		if (sigs & TIOCM_RTS) \
+			sigs |= SMD_CTS_SIG; \
+		if (sigs & TIOCM_CD) \
+			sigs |= SMD_CD_SIG; \
+		if (sigs & TIOCM_RI) \
+			sigs |= SMD_RI_SIG; \
+	} while (0)
+
+#define map_from_smd_trans_signal(sigs) \
+	do { \
+		if (sigs & SMD_DTR_SIG) \
+			sigs |= TIOCM_DSR; \
+		if (sigs & SMD_CTS_SIG) \
+			sigs |= TIOCM_CTS; \
+		if (sigs & SMD_CD_SIG) \
+			sigs |= TIOCM_CD; \
+		if (sigs & SMD_RI_SIG) \
+			sigs |= TIOCM_RI; \
+		sigs &= 0x0fff; \
+	} while (0)
+
+/**
+ * glink_pkt_dev - G-Link packet device structure
+ * dev_list:	G-Link packets device list.
+ * open_cfg:	Transport configuration used to open Logical channel.
+ * dev_name:	Device node name used by the clients.
+ * handle:	Opaque Channel handle returned by G-Link.
+ * ch_lock:	Per channel lock for synchronization.
+ * ch_satet:	flag used to check the channel state.
+ * cdev:	structure to the internal character device.
+ * devicep:	Pointer to the G-Link pkt class device structure.
+ * i:		Index to this character device.
+ * ref_cnt:	number of references to this device.
+ * poll_mode:	flag to check polling mode.
+ * ch_read_wait_queue:	reader thread wait queue.
+ * ch_opened_wait_queue: open thread wait queue.
+ * ch_closed_wait_queue: close thread wait queue.
+ * pkt_list:	The pending Rx packets list.
+ * pkt_list_lock: Lock to protect @pkt_list.
+ * pa_ws:	Packet arrival Wakeup source.
+ * packet_arrival_work:	Hold the wakeup source worker info.
+ * pa_spinlock:	Packet arrival spinlock.
+ * ws_locked:	flag to check wakeup source state.
+ * sigs_updated: flag to check signal update.
+ * open_time_wait: wait time for channel to fully open.
+ * in_reset:	flag to check SSR state.
+ * link_info:	structure to hold link information.
+ * link_state_handle: handle to get link state events.
+ * link_up:	flag to check link is up or not.
+ */
+struct glink_pkt_dev {
+	struct list_head dev_list;
+	struct glink_open_config open_cfg;
+	const char *dev_name;
+	void *handle;
+	struct mutex ch_lock;
+	unsigned ch_state;
+
+	struct cdev cdev;
+	struct device *devicep;
+
+	int i;
+	int ref_cnt;
+	int poll_mode;
+
+	wait_queue_head_t ch_read_wait_queue;
+	wait_queue_head_t ch_opened_wait_queue;
+	wait_queue_head_t ch_closed_wait_queue;
+	struct list_head pkt_list;
+	spinlock_t pkt_list_lock;
+
+	struct wakeup_source pa_ws;	/* Packet Arrival Wakeup Source */
+	struct work_struct packet_arrival_work;
+	spinlock_t pa_spinlock;
+	int ws_locked;
+	int sigs_updated;
+	int open_time_wait;
+	int in_reset;
+
+	struct glink_link_info link_info;
+	void *link_state_handle;
+	bool link_up;
+	bool auto_intent_enabled;
+};
+
+/**
+ * glink_rx_pkt - Pointer to Rx packet
+ * list:	Chain the Rx packets into list.
+ * data:	pointer to the Rx data.
+ * pkt_ptiv:	private pointer to the Rx packet.
+ * size:	The size of received data.
+ */
+struct glink_rx_pkt {
+	struct list_head list;
+	const void *data;
+	const void *pkt_priv;
+	size_t size;
+};
+
+/**
+ * queue_rx_intent_work - Work item to Queue Rx intent.
+ * size:	The size of intent to be queued.
+ * devp:	Pointer to the device structure.
+ * work:	Hold the worker function information.
+ */
+struct queue_rx_intent_work {
+	size_t intent_size;
+	struct glink_pkt_dev *devp;
+	struct work_struct work;
+};
+
+/**
+ * notify_state_work - Work item to notify channel state.
+ * state:	Channel new state.
+ * devp:	Pointer to the device structure.
+ * work:	Hold the worker function information.
+ */
+struct notify_state_work {
+	unsigned state;
+	struct glink_pkt_dev *devp;
+	void *handle;
+	struct work_struct work;
+};
+
+static DEFINE_MUTEX(glink_pkt_dev_lock_lha1);
+static LIST_HEAD(glink_pkt_dev_list);
+static DEFINE_MUTEX(glink_pkt_driver_lock_lha1);
+static LIST_HEAD(glink_pkt_driver_list);
+
+struct class *glink_pkt_classp;
+static dev_t glink_pkt_number;
+struct workqueue_struct *glink_pkt_wq;
+
+static int num_glink_pkt_ports;
+
+#define GLINK_PKT_IPC_LOG_PAGE_CNT 2
+static void *glink_pkt_ilctxt;
+
+enum {
+	GLINK_PKT_STATUS = 1U << 0,
+};
+
+static int msm_glink_pkt_debug_mask;
+module_param_named(debug_mask, msm_glink_pkt_debug_mask,
+		int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static void glink_pkt_queue_rx_intent_worker(struct work_struct *work);
+static void glink_pkt_notify_state_worker(struct work_struct *work);
+static bool glink_pkt_read_avail(struct glink_pkt_dev *devp);
+
+#define DEBUG
+
+#ifdef DEBUG
+
+#define GLINK_PKT_LOG_STRING(x...) \
+do { \
+	if (glink_pkt_ilctxt) \
+		ipc_log_string(glink_pkt_ilctxt, "<GLINK_PKT>: "x); \
+} while (0)
+
+#define GLINK_PKT_INFO(x...) \
+do { \
+	if (msm_glink_pkt_debug_mask & GLINK_PKT_STATUS) \
+		pr_info("Status: "x); \
+	GLINK_PKT_LOG_STRING(x); \
+} while (0)
+
+#define GLINK_PKT_ERR(x...) \
+do { \
+	pr_err_ratelimited("<GLINK_PKT> err: "x); \
+	GLINK_PKT_LOG_STRING(x); \
+} while (0)
+
+#else
+#define GLINK_PKT_INFO(x...) do {} while (0)
+#define GLINK_PKT_ERR(x...) do {} while (0)
+#endif
+
+static ssize_t open_timeout_store(struct device *d,
+				  struct device_attribute *attr,
+				  const char *buf,
+				  size_t n)
+{
+	struct glink_pkt_dev *devp;
+	long tmp;
+
+	mutex_lock(&glink_pkt_dev_lock_lha1);
+	list_for_each_entry(devp, &glink_pkt_dev_list, dev_list) {
+		if (devp->devicep == d) {
+			if (!kstrtol(buf, 0, &tmp)) {
+				devp->open_time_wait = tmp;
+				mutex_unlock(&glink_pkt_dev_lock_lha1);
+				return n;
+			} else {
+				mutex_unlock(&glink_pkt_dev_lock_lha1);
+				pr_err("%s: unable to convert: %s to an int\n",
+						__func__, buf);
+				return -EINVAL;
+			}
+		}
+	}
+	mutex_unlock(&glink_pkt_dev_lock_lha1);
+	GLINK_PKT_ERR("%s: unable to match device to valid port\n", __func__);
+	return -EINVAL;
+}
+
+static ssize_t open_timeout_show(struct device *d,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct glink_pkt_dev *devp;
+
+	mutex_lock(&glink_pkt_dev_lock_lha1);
+	list_for_each_entry(devp, &glink_pkt_dev_list, dev_list) {
+		if (devp->devicep == d) {
+			mutex_unlock(&glink_pkt_dev_lock_lha1);
+			return snprintf(buf, PAGE_SIZE, "%d\n",
+					devp->open_time_wait);
+		}
+	}
+	mutex_unlock(&glink_pkt_dev_lock_lha1);
+	GLINK_PKT_ERR("%s: unable to match device to valid port\n", __func__);
+	return -EINVAL;
+
+}
+
+static DEVICE_ATTR(open_timeout, 0664, open_timeout_show, open_timeout_store);
+
+/**
+ * packet_arrival_worker() - wakeup source timeout worker fn
+ * work:	Work struct queued
+ *
+ * This function used to keep the system awake to allow
+ * userspace client to read the received packet.
+ */
+static void packet_arrival_worker(struct work_struct *work)
+{
+	struct glink_pkt_dev *devp;
+	unsigned long flags;
+
+	devp = container_of(work, struct glink_pkt_dev,
+				    packet_arrival_work);
+	mutex_lock(&devp->ch_lock);
+	spin_lock_irqsave(&devp->pa_spinlock, flags);
+	if (devp->ws_locked) {
+		GLINK_PKT_INFO("%s locking glink_pkt_dev id:%d wakeup source\n",
+			__func__, devp->i);
+		/*
+		 * Keep system awake long enough to allow userspace client
+		 * to process the packet.
+		 */
+		__pm_wakeup_event(&devp->pa_ws, WAKEUPSOURCE_TIMEOUT);
+	}
+	spin_unlock_irqrestore(&devp->pa_spinlock, flags);
+	mutex_unlock(&devp->ch_lock);
+}
+
+/**
+ * glink_pkt_link_state_cb() - Callback to receive link state updates
+ * @cb_info: Information containing link & its state.
+ * @priv: Private data passed during the link state registration.
+ *
+ * This function is called by the GLINK core to notify the Glink Pkt drivers
+ * regarding the link state updates. This function is registered with the
+ * GLINK core by Glink pkt drivers with glink_register_link_state_cb().
+ */
+static void glink_pkt_link_state_cb(struct glink_link_state_cb_info *cb_info,
+				      void *priv)
+{
+	struct glink_pkt_dev *devp = (struct glink_pkt_dev *)priv;
+
+	if (!cb_info)
+		return;
+	if (!devp)
+		return;
+
+	if (cb_info->link_state == GLINK_LINK_STATE_UP) {
+		devp->link_up = true;
+		wake_up_interruptible(&devp->ch_opened_wait_queue);
+	} else if (cb_info->link_state == GLINK_LINK_STATE_DOWN) {
+		devp->link_up = false;
+	}
+}
+
+/**
+ * glink_pkt_notify_rx() - Rx data Callback from G-Link core layer
+ * handle:	Opaque Channel handle returned by GLink.
+ * priv:	private pointer to the channel.
+ * pkt_priv:	private pointer to the packet.
+ * ptr:	Pointer to the Rx data.
+ * size:	Size of the Rx data.
+ *
+ * This callback function is notified on receiving the data from
+ * remote channel.
+ */
+void glink_pkt_notify_rx(void *handle, const void *priv,
+				const void *pkt_priv,
+				const void *ptr, size_t size)
+{
+	struct glink_rx_pkt *pkt = NULL;
+	struct glink_pkt_dev *devp = (struct glink_pkt_dev *)priv;
+	unsigned long flags;
+
+	GLINK_PKT_INFO("%s(): priv[%p] data[%p] size[%zu]\n",
+		   __func__, pkt_priv, (char *)ptr, size);
+
+	pkt = kzalloc(sizeof(*pkt), GFP_ATOMIC);
+	if (!pkt) {
+		GLINK_PKT_ERR("%s: memory allocation failed\n", __func__);
+		return;
+	}
+
+	pkt->data = ptr;
+	pkt->pkt_priv = pkt_priv;
+	pkt->size = size;
+	spin_lock_irqsave(&devp->pkt_list_lock, flags);
+	list_add_tail(&pkt->list, &devp->pkt_list);
+	spin_unlock_irqrestore(&devp->pkt_list_lock, flags);
+
+	spin_lock_irqsave(&devp->pa_spinlock, flags);
+	__pm_stay_awake(&devp->pa_ws);
+	devp->ws_locked = 1;
+	spin_unlock_irqrestore(&devp->pa_spinlock, flags);
+	wake_up(&devp->ch_read_wait_queue);
+	schedule_work(&devp->packet_arrival_work);
+	return;
+}
+
+/**
+ * glink_pkt_notify_tx_done() - Tx done callback function
+ * handle:	Opaque Channel handle returned by GLink.
+ * priv:	private pointer to the channel.
+ * pkt_priv:	private pointer to the packet.
+ * ptr:	Pointer to the Tx data.
+ *
+ * This  callback function is notified when the remote core
+ * signals the Rx done to the local core.
+ */
+void glink_pkt_notify_tx_done(void *handle, const void *priv,
+				const void *pkt_priv, const void *ptr)
+{
+	GLINK_PKT_INFO("%s(): priv[%p] pkt_priv[%p] ptr[%p]\n",
+					__func__, priv, pkt_priv, ptr);
+/* Free Tx buffer allocated in glink_pkt_write */
+	kfree(ptr);
+}
+
+/**
+ * glink_pkt_notify_state() - state notification callback function
+ * handle:	Opaque Channel handle returned by GLink.
+ * priv:	private pointer to the channel.
+ * event:	channel state
+ *
+ * This callback function is notified when the remote channel alters
+ * the channel state and send the event to local G-Link core.
+ */
+void glink_pkt_notify_state(void *handle, const void *priv, unsigned event)
+{
+	struct glink_pkt_dev *devp = (struct glink_pkt_dev *)priv;
+	struct notify_state_work *work_item;
+
+	if ((devp->handle != NULL) && (devp->handle != handle)) {
+		GLINK_PKT_ERR("%s() event[%d] on incorrect channel [%s]\n",
+				__func__, event, devp->open_cfg.name);
+		return;
+	}
+	GLINK_PKT_INFO("%s(): event[%d] on [%s]\n", __func__, event,
+						devp->open_cfg.name);
+
+	work_item = kzalloc(sizeof(*work_item), GFP_ATOMIC);
+	if (!work_item) {
+		GLINK_PKT_ERR("%s() failed allocate work_item\n", __func__);
+		return;
+	}
+
+	work_item->state = event;
+	work_item->devp = devp;
+	work_item->handle = handle;
+	INIT_WORK(&work_item->work, glink_pkt_notify_state_worker);
+	queue_work(glink_pkt_wq, &work_item->work);
+}
+
+/**
+ * glink_pkt_rmt_rx_intent_req_cb() - Remote Rx intent request callback
+ * handle:	Opaque Channel handle returned by GLink.
+ * priv:	private pointer to the channel.
+ * sz:	the size of the requested Rx intent
+ *
+ * This callback function is notified when remote client
+ * request the intent from local client.
+ */
+bool glink_pkt_rmt_rx_intent_req_cb(void *handle, const void *priv, size_t sz)
+{
+	struct queue_rx_intent_work *work_item;
+	int pending_pkt_count = 0;
+	struct glink_rx_pkt *pkt = NULL;
+	unsigned long flags;
+	struct glink_pkt_dev *devp = (struct glink_pkt_dev *)priv;
+
+	GLINK_PKT_INFO("%s(): QUEUE RX INTENT to receive size[%zu]\n",
+		   __func__, sz);
+	if (devp->auto_intent_enabled) {
+		spin_lock_irqsave(&devp->pkt_list_lock, flags);
+		list_for_each_entry(pkt, &devp->pkt_list, list)
+			pending_pkt_count++;
+		spin_unlock_irqrestore(&devp->pkt_list_lock, flags);
+		if (pending_pkt_count > MAX_PENDING_GLINK_PKT) {
+			GLINK_PKT_ERR("%s failed, max limit reached\n",
+					__func__);
+			return false;
+		}
+	} else {
+		return false;
+	}
+
+	work_item = kzalloc(sizeof(*work_item), GFP_ATOMIC);
+	if (!work_item) {
+		GLINK_PKT_ERR("%s failed allocate work_item\n", __func__);
+		return false;
+	}
+
+	work_item->intent_size = sz;
+	work_item->devp = devp;
+	INIT_WORK(&work_item->work, glink_pkt_queue_rx_intent_worker);
+	queue_work(glink_pkt_wq, &work_item->work);
+
+	return true;
+}
+
+/**
+ * glink_pkt_notify_rx_sigs() - signals callback
+ * handle:      Opaque Channel handle returned by GLink.
+ * priv:        private pointer to the channel.
+ * old_sigs:    signal before modification
+ * new_sigs:    signal after modification
+ *
+ * This callback function is notified when remote client
+ * updated the signal.
+ */
+void glink_pkt_notify_rx_sigs(void *handle, const void *priv,
+			uint32_t old_sigs, uint32_t new_sigs)
+{
+	struct glink_pkt_dev *devp = (struct glink_pkt_dev *)priv;
+	GLINK_PKT_INFO("%s(): sigs old[%x] new[%x]\n",
+				__func__, old_sigs, new_sigs);
+	mutex_lock(&devp->ch_lock);
+	devp->sigs_updated = true;
+	mutex_unlock(&devp->ch_lock);
+	wake_up(&devp->ch_read_wait_queue);
+}
+
+/**
+ * glink_pkt_queue_rx_intent_worker() - Queue Rx worker function
+ *
+ * work:	Pointer to the work struct
+ *
+ * This function is used to queue the RX intent which
+ * can sleep during allocation of larger buffers.
+ */
+static void glink_pkt_queue_rx_intent_worker(struct work_struct *work)
+{
+	int ret;
+	struct queue_rx_intent_work *work_item =
+				container_of(work,
+				struct queue_rx_intent_work, work);
+	struct glink_pkt_dev *devp = work_item->devp;
+
+	if (!devp) {
+		GLINK_PKT_ERR("%s: Invalid device\n", __func__);
+		kfree(work_item);
+		return;
+	}
+	mutex_lock(&devp->ch_lock);
+	if (!devp->handle) {
+		GLINK_PKT_ERR("%s: Invalid device Handle\n", __func__);
+		mutex_unlock(&devp->ch_lock);
+		kfree(work_item);
+		return;
+	}
+
+	ret = glink_queue_rx_intent(devp->handle, devp, work_item->intent_size);
+	mutex_unlock(&devp->ch_lock);
+	GLINK_PKT_INFO("%s: Triggered with size[%zu] ret[%d]\n",
+				__func__, work_item->intent_size, ret);
+	if (ret)
+		GLINK_PKT_ERR("%s queue_rx_intent failed\n", __func__);
+	kfree(work_item);
+}
+
+/**
+ * glink_pkt_notify_state_worker() - Notify state worker function
+ *
+ * work:	Pointer to the work struct
+ *
+ * This function is used to notify the channel state and update the
+ * internal data structure.
+ */
+static void glink_pkt_notify_state_worker(struct work_struct *work)
+{
+	struct notify_state_work *work_item =
+				container_of(work,
+				struct notify_state_work, work);
+	struct glink_pkt_dev *devp = work_item->devp;
+	unsigned event = work_item->state;
+	void *handle = work_item->handle;
+
+	if (!devp) {
+		GLINK_PKT_ERR("%s: Invalid device Handle\n", __func__);
+		kfree(work_item);
+		return;
+	}
+
+	GLINK_PKT_INFO("%s(): event[%d] on [%s]\n", __func__,
+				event, devp->open_cfg.name);
+	mutex_lock(&devp->ch_lock);
+	devp->ch_state = event;
+	if (event == GLINK_CONNECTED) {
+		if (!devp->handle) {
+			GLINK_PKT_ERR("%s: Invalid device handle\n", __func__);
+			goto exit;
+		}
+		devp->in_reset = 0;
+		wake_up_interruptible(&devp->ch_opened_wait_queue);
+	} else if (event == GLINK_REMOTE_DISCONNECTED) {
+		devp->in_reset = 1;
+		wake_up(&devp->ch_read_wait_queue);
+		wake_up_interruptible(&devp->ch_opened_wait_queue);
+	} else if (event == GLINK_LOCAL_DISCONNECTED) {
+		if (devp->handle == handle)
+			devp->handle = NULL;
+		wake_up_interruptible(&devp->ch_closed_wait_queue);
+	}
+exit:
+	mutex_unlock(&devp->ch_lock);
+	kfree(work_item);
+}
+
+/**
+ * glink_pkt_read_avail() - check any pending packets to read
+ * devp:	pointer to G-Link packet device.
+ *
+ * This function is used to check any pending data packets are
+ * available to read or not.
+ */
+static bool glink_pkt_read_avail(struct glink_pkt_dev *devp)
+{
+	bool list_is_empty;
+	unsigned long flags;
+
+	spin_lock_irqsave(&devp->pkt_list_lock, flags);
+	list_is_empty = list_empty(&devp->pkt_list);
+	spin_unlock_irqrestore(&devp->pkt_list_lock, flags);
+	return !list_is_empty;
+}
+
+/**
+ * glink_pkt_read() - read() syscall for the glink_pkt device
+ * file:	Pointer to the file structure.
+ * buf:	Pointer to the userspace buffer.
+ * count:	Number bytes to read from the file.
+ * ppos:	Pointer to the position into the file.
+ *
+ * This function is used to Read the data from glink pkt device when
+ * userspace client do a read() system call. All input arguments are
+ * validated by the virtual file system before calling this function.
+ */
+ssize_t glink_pkt_read(struct file *file,
+		       char __user *buf,
+		       size_t count,
+		       loff_t *ppos)
+{
+	int ret = 0;
+	struct glink_pkt_dev *devp;
+	struct glink_rx_pkt *pkt = NULL;
+	unsigned long flags;
+
+	devp = file->private_data;
+
+	if (!devp) {
+		GLINK_PKT_ERR("%s on NULL glink_pkt_dev\n", __func__);
+		return -EINVAL;
+	}
+	if (!devp->handle) {
+		GLINK_PKT_ERR("%s on a closed glink_pkt_dev id:%d\n",
+			__func__, devp->i);
+		return -EINVAL;
+	}
+	if (devp->in_reset) {
+		GLINK_PKT_ERR("%s: notifying reset for glink_pkt_dev id:%d\n",
+			__func__, devp->i);
+		return -ENETRESET;
+	}
+
+	mutex_lock(&devp->ch_lock);
+	if (!glink_pkt_read_avail(devp) &&
+				!glink_rx_intent_exists(devp->handle, count)) {
+		ret  = glink_queue_rx_intent(devp->handle, devp, count);
+		if (ret) {
+			GLINK_PKT_ERR("%s: failed to queue intent ret[%d]\n",
+					__func__, ret);
+			mutex_unlock(&devp->ch_lock);
+			return ret;
+		}
+	}
+	mutex_unlock(&devp->ch_lock);
+
+	GLINK_PKT_INFO("Begin %s on glink_pkt_dev id:%d buffer_size %zu\n",
+		__func__, devp->i, count);
+
+	ret = wait_event_interruptible(devp->ch_read_wait_queue,
+				     !devp->handle || devp->in_reset ||
+				     glink_pkt_read_avail(devp));
+	if (devp->in_reset) {
+		GLINK_PKT_ERR("%s: notifying reset for glink_pkt_dev id:%d\n",
+			__func__, devp->i);
+		return -ENETRESET;
+	}
+	if (!devp->handle) {
+		GLINK_PKT_ERR("%s on a closed glink_pkt_dev id:%d\n",
+			__func__, devp->i);
+		return -EINVAL;
+	}
+	if (ret < 0) {
+		/* qualify error message */
+		if (ret != -ERESTARTSYS) {
+			/* we get this anytime a signal comes in */
+			GLINK_PKT_ERR("%s: wait on dev id:%d ret %i\n",
+					__func__, devp->i, ret);
+		}
+		return ret;
+	}
+
+	spin_lock_irqsave(&devp->pkt_list_lock, flags);
+	pkt = list_first_entry(&devp->pkt_list, struct glink_rx_pkt, list);
+	if (pkt->size > count) {
+		GLINK_PKT_ERR("%s: Small Buff on dev Id:%d-[%zu > %zu]\n",
+				__func__, devp->i, pkt->size, count);
+		spin_unlock_irqrestore(&devp->pkt_list_lock, flags);
+		return -ETOOSMALL;
+	}
+	list_del(&pkt->list);
+	spin_unlock_irqrestore(&devp->pkt_list_lock, flags);
+
+	ret = copy_to_user(buf, pkt->data, pkt->size);
+	 if (ret) {
+		GLINK_PKT_ERR(
+		"%s copy_to_user failed ret[%d] on dev id:%d size %zu\n",
+		 __func__, ret, devp->i, pkt->size);
+		spin_lock_irqsave(&devp->pkt_list_lock, flags);
+		list_add_tail(&pkt->list, &devp->pkt_list);
+		spin_unlock_irqrestore(&devp->pkt_list_lock, flags);
+		return -EFAULT;
+	}
+
+
+	ret = pkt->size;
+	glink_rx_done(devp->handle, pkt->data, false);
+	kfree(pkt);
+
+	mutex_lock(&devp->ch_lock);
+	spin_lock_irqsave(&devp->pa_spinlock, flags);
+	if (devp->poll_mode && !glink_pkt_read_avail(devp)) {
+		__pm_relax(&devp->pa_ws);
+		devp->ws_locked = 0;
+		devp->poll_mode = 0;
+		GLINK_PKT_INFO("%s unlocked pkt_dev id:%d wakeup_source\n",
+			__func__, devp->i);
+	}
+	spin_unlock_irqrestore(&devp->pa_spinlock, flags);
+	mutex_unlock(&devp->ch_lock);
+
+	GLINK_PKT_INFO("End %s on glink_pkt_dev id:%d ret[%d]\n",
+				__func__, devp->i, ret);
+	return ret;
+}
+
+/**
+ * glink_pkt_write() - write() syscall for the glink_pkt device
+ * file:	Pointer to the file structure.
+ * buf:	Pointer to the userspace buffer.
+ * count:	Number bytes to read from the file.
+ * ppos:	Pointer to the position into the file.
+ *
+ * This function is used to write the data to glink pkt device when
+ * userspace client do a write() system call. All input arguments are
+ * validated by the virtual file system before calling this function.
+ */
+ssize_t glink_pkt_write(struct file *file,
+		       const char __user *buf,
+		       size_t count,
+		       loff_t *ppos)
+{
+	int ret = 0;
+	struct glink_pkt_dev *devp;
+	void *data;
+
+	devp = file->private_data;
+
+	if (!count) {
+		GLINK_PKT_ERR("%s: data count is zero\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!devp) {
+		GLINK_PKT_ERR("%s on NULL glink_pkt_dev\n", __func__);
+		return -EINVAL;
+	}
+	if (!devp->handle) {
+		GLINK_PKT_ERR("%s on a closed glink_pkt_dev id:%d\n",
+			__func__, devp->i);
+		return -EINVAL;
+	}
+	if (devp->in_reset) {
+		GLINK_PKT_ERR("%s: notifying reset for glink_pkt_dev id:%d\n",
+			__func__, devp->i);
+		return -ENETRESET;
+	};
+
+	GLINK_PKT_INFO("Begin %s on glink_pkt_dev id:%d buffer_size %zu\n",
+		__func__, devp->i, count);
+	data = kzalloc(count, GFP_KERNEL);
+	if (!data) {
+		GLINK_PKT_ERR("%s buffer allocation failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	ret = copy_from_user(data, buf, count);
+	if (ret) {
+		GLINK_PKT_ERR(
+		"%s copy_from_user failed ret[%d] on dev id:%d size %zu\n",
+		 __func__, ret, devp->i, count);
+		kfree(data);
+		return -EFAULT;
+	}
+
+	ret = glink_tx(devp->handle, data, data, count, GLINK_TX_REQ_INTENT);
+	if (ret) {
+		GLINK_PKT_ERR("%s glink_tx failed ret[%d]\n", __func__, ret);
+		kfree(data);
+		return ret;
+	}
+
+	GLINK_PKT_INFO("Finished %s on glink_pkt_dev id:%d buffer_size %zu\n",
+		__func__, devp->i, count);
+
+	return count;
+}
+
+/**
+ * glink_pkt_poll() - poll() syscall for the glink_pkt device
+ * file:	Pointer to the file structure.
+ * wait:	pointer to Poll table.
+ *
+ * This function is used to poll on the glink pkt device when
+ * userspace client do a poll() system call. All input arguments are
+ * validated by the virtual file system before calling this function.
+ */
+static unsigned int glink_pkt_poll(struct file *file, poll_table *wait)
+{
+	struct glink_pkt_dev *devp;
+	unsigned int mask = 0;
+
+	devp = file->private_data;
+	if (!devp || !devp->handle) {
+		GLINK_PKT_ERR("%s: Invalid device handle\n", __func__);
+		return POLLERR;
+	}
+	if (devp->in_reset)
+		return POLLHUP;
+
+	devp->poll_mode = 1;
+	poll_wait(file, &devp->ch_read_wait_queue, wait);
+	mutex_lock(&devp->ch_lock);
+	if (!devp->handle) {
+		mutex_unlock(&devp->ch_lock);
+		return POLLERR;
+	}
+	if (devp->in_reset) {
+		mutex_unlock(&devp->ch_lock);
+		return POLLHUP;
+	}
+
+	if (glink_pkt_read_avail(devp)) {
+		mask |= POLLIN | POLLRDNORM;
+		GLINK_PKT_INFO("%s sets POLLIN for glink_pkt_dev id: %d\n",
+			__func__, devp->i);
+	}
+
+	if (devp->sigs_updated) {
+		mask |= POLLPRI;
+		GLINK_PKT_INFO("%s sets POLLPRI for glink_pkt_dev id: %d\n",
+			__func__, devp->i);
+	}
+	mutex_unlock(&devp->ch_lock);
+
+	return mask;
+}
+
+/**
+ * glink_pkt_tiocmset() - set the signals for glink_pkt device
+ * devp:	Pointer to the glink_pkt device structure.
+ * cmd:		IOCTL command.
+ * arg:		Arguments to the ioctl call.
+ *
+ * This function is used to set the signals on the glink pkt device
+ * when userspace client do a ioctl() system call with TIOCMBIS,
+ * TIOCMBIC and TICOMSET.
+ */
+static int glink_pkt_tiocmset(struct glink_pkt_dev *devp, unsigned int cmd,
+							unsigned long arg)
+{
+	int ret;
+	uint32_t sigs;
+	uint32_t val;
+
+	ret = get_user(val, (uint32_t *)arg);
+	if (ret)
+		return ret;
+	map_to_smd_trans_signal(val);
+	ret = glink_sigs_local_get(devp->handle, &sigs);
+	if (ret < 0) {
+		GLINK_PKT_ERR("%s: Get signals failed[%d]\n", __func__, ret);
+		return ret;
+	}
+	switch (cmd) {
+	case TIOCMBIS:
+		sigs |= val;
+		break;
+	case TIOCMBIC:
+		sigs &= ~val;
+		break;
+	case TIOCMSET:
+		sigs = val;
+		break;
+	}
+	ret = glink_sigs_set(devp->handle, sigs);
+	GLINK_PKT_INFO("%s: sigs[0x%x] ret[%d]\n", __func__, sigs, ret);
+	return ret;
+}
+
+/**
+ * glink_pkt_ioctl() - ioctl() syscall for the glink_pkt device
+ * file:	Pointer to the file structure.
+ * cmd:		IOCTL command.
+ * arg:		Arguments to the ioctl call.
+ *
+ * This function is used to ioctl on the glink pkt device when
+ * userspace client do a ioctl() system call. All input arguments are
+ * validated by the virtual file system before calling this function.
+ */
+static long glink_pkt_ioctl(struct file *file, unsigned int cmd,
+					     unsigned long arg)
+{
+	int ret;
+	struct glink_pkt_dev *devp;
+	uint32_t size = 0;
+	uint32_t sigs = 0;
+
+	devp = file->private_data;
+	if (!devp || !devp->handle) {
+		GLINK_PKT_ERR("%s: Invalid device handle\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&devp->ch_lock);
+	switch (cmd) {
+	case TIOCMGET:
+		devp->sigs_updated = false;
+		ret = glink_sigs_remote_get(devp->handle, &sigs);
+		GLINK_PKT_INFO("%s: TIOCMGET ret[%d] sigs[0x%x]\n",
+					__func__, ret, sigs);
+		map_from_smd_trans_signal(sigs);
+		if (!ret)
+			ret = put_user(sigs, (uint32_t *)arg);
+		break;
+	case TIOCMSET:
+	case TIOCMBIS:
+	case TIOCMBIC:
+		ret = glink_pkt_tiocmset(devp, cmd, arg);
+		break;
+
+	case GLINK_PKT_IOCTL_QUEUE_RX_INTENT:
+		ret = get_user(size, (uint32_t *)arg);
+		GLINK_PKT_INFO("%s: intent size[%d]\n", __func__, size);
+		devp->auto_intent_enabled = false;
+		ret  = glink_queue_rx_intent(devp->handle, devp, size);
+		if (ret) {
+			GLINK_PKT_ERR("%s: failed to QUEUE_RX_INTENT ret[%d]\n",
+					__func__, ret);
+		}
+		break;
+	default:
+		GLINK_PKT_ERR("%s: Unrecognized ioctl command 0x%x\n",
+					__func__, cmd);
+		ret = -ENOIOCTLCMD;
+		break;
+	}
+	mutex_unlock(&devp->ch_lock);
+
+	return ret;
+}
+
+/**
+ * glink_pkt_open() - open() syscall for the glink_pkt device
+ * inode:	Pointer to the inode structure.
+ * file:	Pointer to the file structure.
+ *
+ * This function is used to open the glink pkt device when
+ * userspace client do a open() system call. All input arguments are
+ * validated by the virtual file system before calling this function.
+ */
+int glink_pkt_open(struct inode *inode, struct file *file)
+{
+	int ret = 0;
+	struct glink_pkt_dev *devp = NULL;
+	int wait_time_msecs;
+
+	devp = container_of(inode->i_cdev, struct glink_pkt_dev, cdev);
+	if (!devp) {
+		GLINK_PKT_ERR("%s on NULL device\n", __func__);
+		return -EINVAL;
+	}
+	GLINK_PKT_INFO("Begin %s() on dev id:%d open_time_wait[%d] by [%s]\n",
+		__func__, devp->i, devp->open_time_wait, current->comm);
+	file->private_data = devp;
+	wait_time_msecs = devp->open_time_wait * 1000;
+
+	mutex_lock(&devp->ch_lock);
+	/* waiting for previous close to complete */
+	if (devp->handle && devp->ref_cnt == 0) {
+		mutex_unlock(&devp->ch_lock);
+		if (wait_time_msecs < 0) {
+			ret = wait_event_interruptible(
+				devp->ch_opened_wait_queue,
+				devp->ch_state == GLINK_LOCAL_DISCONNECTED);
+		} else {
+			ret = wait_event_interruptible_timeout(
+				devp->ch_opened_wait_queue,
+				devp->ch_state == GLINK_LOCAL_DISCONNECTED,
+				msecs_to_jiffies(wait_time_msecs));
+			if (ret >= 0)
+				wait_time_msecs = jiffies_to_msecs(ret);
+			if (ret == 0)
+				ret = -ETIMEDOUT;
+		}
+		if (ret < 0) {
+			GLINK_PKT_ERR(
+			"%s:failed for prev close on dev id:%d rc:%d\n",
+			__func__, devp->i, ret);
+			return ret;
+		}
+		mutex_lock(&devp->ch_lock);
+	}
+
+	if (!devp->handle) {
+		mutex_unlock(&devp->ch_lock);
+		/*
+		 * Wait for the link to be complete up so we know
+		 * the remote is ready enough.
+		 */
+		if (wait_time_msecs < 0) {
+			ret = wait_event_interruptible(
+				devp->ch_opened_wait_queue,
+				devp->link_up == true);
+		} else {
+			ret = wait_event_interruptible_timeout(
+				devp->ch_opened_wait_queue,
+				devp->link_up == true,
+				msecs_to_jiffies(wait_time_msecs));
+			if (ret >= 0)
+				wait_time_msecs = jiffies_to_msecs(ret);
+			if (ret == 0)
+				ret = -ETIMEDOUT;
+		}
+		mutex_lock(&devp->ch_lock);
+		if (ret < 0) {
+			GLINK_PKT_ERR(
+				"%s: Link not up edge[%s] name[%s] rc:%d\n",
+				__func__, devp->open_cfg.edge,
+				devp->open_cfg.name, ret);
+			devp->handle = NULL;
+			goto error;
+		}
+
+		devp->handle = glink_open(&devp->open_cfg);
+		if (IS_ERR_OR_NULL(devp->handle)) {
+			GLINK_PKT_ERR(
+				"%s: open failed xprt[%s] edge[%s] name[%s]\n",
+				__func__, devp->open_cfg.transport,
+				devp->open_cfg.edge, devp->open_cfg.name);
+			ret = -ENODEV;
+			devp->handle = NULL;
+			goto error;
+		}
+
+		mutex_unlock(&devp->ch_lock);
+		/*
+		 * Wait for the channel to be complete open state so we know
+		 * the remote client is ready enough.
+		 */
+		if (wait_time_msecs < 0) {
+			ret = wait_event_interruptible(
+				devp->ch_opened_wait_queue,
+				devp->ch_state == GLINK_CONNECTED);
+		} else {
+			ret = wait_event_interruptible_timeout(
+				devp->ch_opened_wait_queue,
+				devp->ch_state == GLINK_CONNECTED,
+				msecs_to_jiffies(wait_time_msecs));
+			if (ret == 0)
+				ret = -ETIMEDOUT;
+		}
+		mutex_lock(&devp->ch_lock);
+		if (ret < 0) {
+			GLINK_PKT_ERR("%s: open failed on dev id:%d rc:%d\n",
+					__func__, devp->i, ret);
+			glink_close(devp->handle);
+			devp->handle = NULL;
+			goto error;
+		}
+	}
+	ret = 0;
+	devp->ref_cnt++;
+
+error:
+	mutex_unlock(&devp->ch_lock);
+	GLINK_PKT_INFO("END %s() on dev id:%d ref_cnt[%d] ret[%d]\n",
+			__func__, devp->i, devp->ref_cnt, ret);
+	return ret;
+}
+
+/**
+ * pop_rx_pkt() - return first pkt from rx pkt_list
+ * devp:	pointer to G-Link packet device.
+ *
+ * This function return first item from rx pkt_list and NULL if list is empty.
+ */
+struct glink_rx_pkt *pop_rx_pkt(struct glink_pkt_dev *devp)
+{
+	unsigned long flags;
+	struct glink_rx_pkt *pkt = NULL;
+
+	spin_lock_irqsave(&devp->pkt_list_lock, flags);
+	if (!list_empty(&devp->pkt_list)) {
+		pkt = list_first_entry(&devp->pkt_list,
+				struct glink_rx_pkt, list);
+		list_del(&pkt->list);
+	}
+	spin_unlock_irqrestore(&devp->pkt_list_lock, flags);
+	return pkt;
+}
+
+/**
+ * glink_pkt_release() - release operation on glink_pkt device
+ * inode:	Pointer to the inode structure.
+ * file:	Pointer to the file structure.
+ *
+ * This function is used to release the glink pkt device when
+ * userspace client do a close() system call. All input arguments are
+ * validated by the virtual file system before calling this function.
+ */
+int glink_pkt_release(struct inode *inode, struct file *file)
+{
+	int ret = 0;
+	struct glink_pkt_dev *devp = file->private_data;
+	unsigned long flags;
+	struct glink_rx_pkt *pkt;
+	GLINK_PKT_INFO("%s() on dev id:%d by [%s] ref_cnt[%d]\n",
+			__func__, devp->i, current->comm, devp->ref_cnt);
+
+	mutex_lock(&devp->ch_lock);
+	if (devp->ref_cnt > 0)
+		devp->ref_cnt--;
+
+	if (devp->handle && devp->ref_cnt == 0) {
+		while ((pkt = pop_rx_pkt(devp))) {
+			glink_rx_done(devp->handle, pkt->data, false);
+			kfree(pkt);
+		}
+		wake_up(&devp->ch_read_wait_queue);
+		wake_up_interruptible(&devp->ch_opened_wait_queue);
+		ret = glink_close(devp->handle);
+		devp->handle = NULL;
+		if (ret)  {
+			GLINK_PKT_ERR("%s: close failed ret[%d]\n",
+						__func__, ret);
+		} else {
+			mutex_unlock(&devp->ch_lock);
+			ret = wait_event_interruptible_timeout(
+				devp->ch_closed_wait_queue,
+				devp->ch_state == GLINK_LOCAL_DISCONNECTED,
+				msecs_to_jiffies(CLOSE_WAIT_TIMEOUT));
+			if (ret == 0)
+				GLINK_PKT_ERR(
+				"%s(): close TIMEOUT on dev_id[%d]\n",
+				__func__, devp->i);
+			mutex_lock(&devp->ch_lock);
+		}
+		devp->poll_mode = 0;
+		spin_lock_irqsave(&devp->pa_spinlock, flags);
+		if (devp->ws_locked) {
+			__pm_relax(&devp->pa_ws);
+			devp->ws_locked = 0;
+		}
+		spin_unlock_irqrestore(&devp->pa_spinlock, flags);
+		devp->sigs_updated = false;
+		devp->in_reset = 0;
+	}
+	mutex_unlock(&devp->ch_lock);
+
+	if (flush_work(&devp->packet_arrival_work))
+		GLINK_PKT_INFO("%s: Flushed work for glink_pkt_dev id:%d\n",
+			__func__, devp->i);
+	return ret;
+}
+
+static const struct file_operations glink_pkt_fops = {
+	.owner = THIS_MODULE,
+	.open = glink_pkt_open,
+	.release = glink_pkt_release,
+	.read = glink_pkt_read,
+	.write = glink_pkt_write,
+	.poll = glink_pkt_poll,
+	.unlocked_ioctl = glink_pkt_ioctl,
+	.compat_ioctl = glink_pkt_ioctl,
+};
+
+/**
+ * glink_pkt_init_add_device() - Initialize G-Link packet device and add cdev
+ * devp:	pointer to G-Link packet device.
+ * i:		index of the G-Link packet device.
+ *
+ * return:	0 for success, Standard Linux errors
+ */
+static int glink_pkt_init_add_device(struct glink_pkt_dev *devp, int i)
+{
+	int ret = 0;
+
+	devp->open_cfg.notify_rx = glink_pkt_notify_rx;
+	devp->open_cfg.notify_tx_done = glink_pkt_notify_tx_done;
+	devp->open_cfg.notify_state = glink_pkt_notify_state;
+	devp->open_cfg.notify_rx_intent_req = glink_pkt_rmt_rx_intent_req_cb;
+	devp->open_cfg.notify_rx_sigs = glink_pkt_notify_rx_sigs;
+	devp->open_cfg.options |= GLINK_OPT_INITIAL_XPORT;
+	devp->open_cfg.priv = devp;
+
+	devp->link_up = false;
+	devp->link_info.edge = devp->open_cfg.edge;
+	devp->link_info.transport = devp->open_cfg.transport;
+	devp->link_info.glink_link_state_notif_cb =
+				glink_pkt_link_state_cb;
+	devp->i = i;
+	devp->poll_mode = 0;
+	devp->auto_intent_enabled = true;
+	devp->ws_locked = 0;
+	devp->ch_state = GLINK_LOCAL_DISCONNECTED;
+	/* Default timeout for open wait is 120sec */
+	devp->open_time_wait = 120;
+	mutex_init(&devp->ch_lock);
+	init_waitqueue_head(&devp->ch_read_wait_queue);
+	init_waitqueue_head(&devp->ch_opened_wait_queue);
+	init_waitqueue_head(&devp->ch_closed_wait_queue);
+	spin_lock_init(&devp->pa_spinlock);
+	INIT_LIST_HEAD(&devp->pkt_list);
+	spin_lock_init(&devp->pkt_list_lock);
+	wakeup_source_init(&devp->pa_ws, devp->dev_name);
+	INIT_WORK(&devp->packet_arrival_work, packet_arrival_worker);
+
+	devp->link_state_handle =
+		glink_register_link_state_cb(&devp->link_info, devp);
+	if (IS_ERR_OR_NULL(devp->link_state_handle)) {
+		GLINK_PKT_ERR(
+			"%s: link state cb reg. failed edge[%s] name[%s]\n",
+			__func__, devp->open_cfg.edge, devp->open_cfg.name);
+		ret = PTR_ERR(devp->link_state_handle);
+		return ret;
+	}
+	cdev_init(&devp->cdev, &glink_pkt_fops);
+	devp->cdev.owner = THIS_MODULE;
+
+	ret = cdev_add(&devp->cdev, (glink_pkt_number + i), 1);
+	if (IS_ERR_VALUE(ret)) {
+		GLINK_PKT_ERR("%s: cdev_add() failed for dev id:%d ret:%i\n",
+			__func__, i, ret);
+		wakeup_source_trash(&devp->pa_ws);
+		return ret;
+	}
+
+	devp->devicep = device_create(glink_pkt_classp,
+			      NULL,
+			      (glink_pkt_number + i),
+			      NULL,
+			      devp->dev_name);
+
+	if (IS_ERR_OR_NULL(devp->devicep)) {
+		GLINK_PKT_ERR("%s: device_create() failed for dev id:%d\n",
+			__func__, i);
+		ret = -ENOMEM;
+		cdev_del(&devp->cdev);
+		wakeup_source_trash(&devp->pa_ws);
+		return ret;
+	}
+
+	if (device_create_file(devp->devicep, &dev_attr_open_timeout))
+		GLINK_PKT_ERR("%s: device_create_file() failed for id:%d\n",
+			__func__, i);
+
+	mutex_lock(&glink_pkt_dev_lock_lha1);
+	list_add(&devp->dev_list, &glink_pkt_dev_list);
+	mutex_unlock(&glink_pkt_dev_lock_lha1);
+
+	return ret;
+}
+
+/**
+ * glink_pkt_core_deinit- De-initialization for this module
+ *
+ * This function remove all the memory and unregister
+ * the char device region.
+ */
+static void glink_pkt_core_deinit(void)
+{
+	struct glink_pkt_dev *glink_pkt_devp;
+	struct glink_pkt_dev *index;
+
+	mutex_lock(&glink_pkt_dev_lock_lha1);
+	list_for_each_entry_safe(glink_pkt_devp, index, &glink_pkt_dev_list,
+							dev_list) {
+		if (glink_pkt_devp->link_state_handle)
+			glink_unregister_link_state_cb(
+				glink_pkt_devp->link_state_handle);
+		cdev_del(&glink_pkt_devp->cdev);
+		list_del(&glink_pkt_devp->dev_list);
+		device_destroy(glink_pkt_classp,
+			       MKDEV(MAJOR(glink_pkt_number),
+			       glink_pkt_devp->i));
+		kfree(glink_pkt_devp);
+	}
+	mutex_unlock(&glink_pkt_dev_lock_lha1);
+
+	if (!IS_ERR_OR_NULL(glink_pkt_classp))
+		class_destroy(glink_pkt_classp);
+
+	unregister_chrdev_region(MAJOR(glink_pkt_number), num_glink_pkt_ports);
+}
+
+/**
+ * glink_pkt_alloc_chrdev_region() - allocate the char device region
+ *
+ * This function allocate memory for G-Link packet character-device region and
+ * create the class.
+ */
+static int glink_pkt_alloc_chrdev_region(void)
+{
+	int ret;
+
+	ret = alloc_chrdev_region(&glink_pkt_number,
+			       0,
+			       num_glink_pkt_ports,
+			       DEVICE_NAME);
+	if (IS_ERR_VALUE(ret)) {
+		GLINK_PKT_ERR("%s: alloc_chrdev_region() failed ret:%i\n",
+			__func__, ret);
+		return ret;
+	}
+
+	glink_pkt_classp = class_create(THIS_MODULE, DEVICE_NAME);
+	if (IS_ERR(glink_pkt_classp)) {
+		GLINK_PKT_ERR("%s: class_create() failed ENOMEM\n", __func__);
+		ret = -ENOMEM;
+		unregister_chrdev_region(MAJOR(glink_pkt_number),
+						num_glink_pkt_ports);
+		return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * parse_glinkpkt_devicetree() - parse device tree binding
+ *
+ * node:	pointer to device tree node
+ * glink_pkt_devp: pointer to GLINK PACKET device
+ *
+ * Return:	0 on success, -ENODEV on failure.
+ */
+static int parse_glinkpkt_devicetree(struct device_node *node,
+					struct glink_pkt_dev *glink_pkt_devp)
+{
+	char *key;
+
+	key = "qcom,glinkpkt-transport";
+	glink_pkt_devp->open_cfg.transport = of_get_property(node, key, NULL);
+	if (!glink_pkt_devp->open_cfg.transport)
+		goto error;
+	GLINK_PKT_INFO("%s transport = %s\n", __func__,
+			glink_pkt_devp->open_cfg.transport);
+
+	key = "qcom,glinkpkt-edge";
+	glink_pkt_devp->open_cfg.edge = of_get_property(node, key, NULL);
+	if (!glink_pkt_devp->open_cfg.edge)
+		goto error;
+	GLINK_PKT_INFO("%s edge = %s\n", __func__,
+			glink_pkt_devp->open_cfg.edge);
+
+	key = "qcom,glinkpkt-ch-name";
+	glink_pkt_devp->open_cfg.name = of_get_property(node, key, NULL);
+	if (!glink_pkt_devp->open_cfg.name)
+		goto error;
+	GLINK_PKT_INFO("%s ch_name = %s\n", __func__,
+			glink_pkt_devp->open_cfg.name);
+
+	key = "qcom,glinkpkt-dev-name";
+	glink_pkt_devp->dev_name = of_get_property(node, key, NULL);
+	if (!glink_pkt_devp->dev_name)
+		goto error;
+	GLINK_PKT_INFO("%s dev_name = %s\n", __func__,
+			glink_pkt_devp->dev_name);
+	return 0;
+
+error:
+	GLINK_PKT_ERR("%s: missing key: %s\n", __func__, key);
+	return -ENODEV;
+
+}
+
+/**
+ * glink_pkt_devicetree_init() - Initialize the add char device
+ *
+ * pdev:	Pointer to device tree data.
+ *
+ * return:	0 on success, -ENODEV on failure.
+ */
+static int glink_pkt_devicetree_init(struct platform_device *pdev)
+{
+	int ret;
+	int i = 0;
+	struct device_node *node;
+	struct glink_pkt_dev *glink_pkt_devp;
+	int subnode_num = 0;
+
+	for_each_child_of_node(pdev->dev.of_node, node)
+		++subnode_num;
+	if (!subnode_num) {
+		GLINK_PKT_ERR("%s subnode_num = %d\n", __func__, subnode_num);
+		return 0;
+	}
+
+	num_glink_pkt_ports = subnode_num;
+
+	ret = glink_pkt_alloc_chrdev_region();
+	if (ret) {
+		GLINK_PKT_ERR("%s: chrdev_region allocation failed ret:%i\n",
+			__func__, ret);
+		return ret;
+	}
+
+	for_each_child_of_node(pdev->dev.of_node, node) {
+		glink_pkt_devp = kzalloc(sizeof(*glink_pkt_devp),
+						GFP_KERNEL);
+		if (IS_ERR_OR_NULL(glink_pkt_devp)) {
+			GLINK_PKT_ERR("%s: allocation failed id:%d\n",
+						__func__, i);
+			ret = -ENOMEM;
+			goto error_destroy;
+		}
+
+		ret = parse_glinkpkt_devicetree(node, glink_pkt_devp);
+		if (ret) {
+			GLINK_PKT_ERR("%s: failed to parse devicetree %d\n",
+						__func__, i);
+			kfree(glink_pkt_devp);
+			goto error_destroy;
+		}
+
+		ret = glink_pkt_init_add_device(glink_pkt_devp, i);
+		if (ret < 0) {
+			GLINK_PKT_ERR("%s: add device failed idx:%d ret=%d\n",
+					__func__, i, ret);
+			kfree(glink_pkt_devp);
+			goto error_destroy;
+		}
+		i++;
+	}
+
+	GLINK_PKT_INFO("G-Link Packet Port Driver Initialized.\n");
+	return 0;
+
+error_destroy:
+	glink_pkt_core_deinit();
+	return ret;
+}
+
+/**
+ * msm_glink_pkt_probe() - Probe a G-Link packet device
+ *
+ * pdev:	Pointer to device tree data.
+ *
+ * return:	0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying device tree driver registers
+ * a platform device, mapped to a G-Link packet device.
+ */
+static int msm_glink_pkt_probe(struct platform_device *pdev)
+{
+	int ret;
+
+	if (pdev) {
+		if (pdev->dev.of_node) {
+			GLINK_PKT_INFO("%s device tree implementation\n",
+							__func__);
+			ret = glink_pkt_devicetree_init(pdev);
+			if (ret)
+				GLINK_PKT_ERR("%s: device tree init failed\n",
+					__func__);
+		}
+	}
+
+	return 0;
+}
+
+static struct of_device_id msm_glink_pkt_match_table[] = {
+	{ .compatible = "qcom,glinkpkt" },
+	{},
+};
+
+static struct platform_driver msm_glink_pkt_driver = {
+	.probe = msm_glink_pkt_probe,
+	.driver = {
+		.name = MODULE_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = msm_glink_pkt_match_table,
+	 },
+};
+
+/**
+ * glink_pkt_init() - Initialization function for this module
+ *
+ * returns:	0 on success, standard Linux error code otherwise.
+ */
+static int __init glink_pkt_init(void)
+{
+	int ret;
+
+	INIT_LIST_HEAD(&glink_pkt_dev_list);
+	INIT_LIST_HEAD(&glink_pkt_driver_list);
+	ret = platform_driver_register(&msm_glink_pkt_driver);
+	if (ret) {
+		GLINK_PKT_ERR("%s: msm_glink_driver register failed %d\n",
+			 __func__, ret);
+		return ret;
+	}
+
+	glink_pkt_ilctxt = ipc_log_context_create(GLINK_PKT_IPC_LOG_PAGE_CNT,
+						"glink_pkt", 0);
+	glink_pkt_wq = create_singlethread_workqueue("glink_pkt_wq");
+	if (!glink_pkt_wq) {
+		GLINK_PKT_ERR("%s: Error creating glink_pkt_wq\n", __func__);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+/**
+ * glink_pkt_cleanup() - Exit function for this module
+ *
+ * This function is used to cleanup the module during the exit.
+ */
+static void __exit glink_pkt_cleanup(void)
+{
+	glink_pkt_core_deinit();
+}
+
+module_init(glink_pkt_init);
+module_exit(glink_pkt_cleanup);
+
+MODULE_DESCRIPTION("MSM G-Link Packet Port");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/msm_performance.c	2019-10-29 09:26:24.817214667 +0100
@@ -0,0 +1,2782 @@
+/*
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/moduleparam.h>
+#include <linux/cpumask.h>
+#include <linux/cpufreq.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/tick.h>
+#include <trace/events/power.h>
+#include <linux/sysfs.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/kthread.h>
+
+static unsigned int use_input_evts_with_hi_slvt_detect;
+static struct mutex managed_cpus_lock;
+
+
+/* Maximum number to clusters that this module will manage*/
+static unsigned int num_clusters;
+struct cluster {
+	cpumask_var_t cpus;
+	/* Number of CPUs to maintain online */
+	int max_cpu_request;
+	/* To track CPUs that the module decides to offline */
+	cpumask_var_t offlined_cpus;
+	/* stats for load detection */
+	/* IO */
+	u64 last_io_check_ts;
+	unsigned int iowait_enter_cycle_cnt;
+	unsigned int iowait_exit_cycle_cnt;
+	spinlock_t iowait_lock;
+	unsigned int cur_io_busy;
+	bool io_change;
+	/* CPU */
+	unsigned int mode;
+	bool mode_change;
+	u64 last_mode_check_ts;
+	unsigned int single_enter_cycle_cnt;
+	unsigned int single_exit_cycle_cnt;
+	unsigned int multi_enter_cycle_cnt;
+	unsigned int multi_exit_cycle_cnt;
+	spinlock_t mode_lock;
+	/* Perf Cluster Peak Loads */
+	unsigned int perf_cl_peak;
+	u64 last_perf_cl_check_ts;
+	bool perf_cl_detect_state_change;
+	unsigned int perf_cl_peak_enter_cycle_cnt;
+	unsigned int perf_cl_peak_exit_cycle_cnt;
+	spinlock_t perf_cl_peak_lock;
+	/* Tunables */
+	unsigned int single_enter_load;
+	unsigned int pcpu_multi_enter_load;
+	unsigned int perf_cl_peak_enter_load;
+	unsigned int single_exit_load;
+	unsigned int pcpu_multi_exit_load;
+	unsigned int perf_cl_peak_exit_load;
+	unsigned int single_enter_cycles;
+	unsigned int single_exit_cycles;
+	unsigned int multi_enter_cycles;
+	unsigned int multi_exit_cycles;
+	unsigned int perf_cl_peak_enter_cycles;
+	unsigned int perf_cl_peak_exit_cycles;
+	unsigned int current_freq;
+	spinlock_t timer_lock;
+	unsigned int timer_rate;
+	struct timer_list mode_exit_timer;
+	struct timer_list perf_cl_peak_mode_exit_timer;
+};
+
+struct input_events {
+	unsigned int evt_x_cnt;
+	unsigned int evt_y_cnt;
+	unsigned int evt_pres_cnt;
+	unsigned int evt_dist_cnt;
+};
+
+struct trig_thr {
+	unsigned int pwr_cl_trigger_threshold;
+	unsigned int perf_cl_trigger_threshold;
+	unsigned int ip_evt_threshold;
+};
+static struct cluster **managed_clusters;
+static bool clusters_inited;
+static bool input_events_handler_registered;
+static struct input_events *ip_evts;
+static struct trig_thr thr;
+/* Work to evaluate the onlining/offlining CPUs */
+struct delayed_work evaluate_hotplug_work;
+
+/* To handle cpufreq min/max request */
+struct cpu_status {
+	unsigned int min;
+	unsigned int max;
+};
+static DEFINE_PER_CPU(struct cpu_status, cpu_stats);
+
+static unsigned int num_online_managed(struct cpumask *mask);
+static int init_cluster_control(void);
+static int rm_high_pwr_cost_cpus(struct cluster *cl);
+static int init_events_group(void);
+static int register_input_handler(void);
+static void unregister_input_handler(void);
+
+
+static DEFINE_PER_CPU(unsigned int, cpu_power_cost);
+
+struct load_stats {
+	u64 last_wallclock;
+	/* IO wait related */
+	u64 last_iowait;
+	unsigned int last_iopercent;
+	/* CPU load related */
+	unsigned int cpu_load;
+	/*CPU Freq*/
+	unsigned int freq;
+};
+static DEFINE_PER_CPU(struct load_stats, cpu_load_stats);
+
+struct events {
+	spinlock_t cpu_hotplug_lock;
+	bool cpu_hotplug;
+	bool init_success;
+};
+static struct events events_group;
+static struct task_struct *events_notify_thread;
+
+#define LAST_UPDATE_TOL		USEC_PER_MSEC
+
+/* Bitmask to keep track of the workloads being detected */
+static unsigned int workload_detect;
+#define IO_DETECT	1
+#define MODE_DETECT	2
+#define PERF_CL_PEAK_DETECT	4
+
+
+/* IOwait related tunables */
+static unsigned int io_enter_cycles = 4;
+static unsigned int io_exit_cycles = 4;
+static u64 iowait_ceiling_pct = 25;
+static u64 iowait_floor_pct = 8;
+#define LAST_IO_CHECK_TOL	(3 * USEC_PER_MSEC)
+
+static unsigned int aggr_iobusy;
+static unsigned int aggr_mode;
+
+static struct task_struct *notify_thread;
+
+static struct input_handler *handler;
+
+/* CPU workload detection related */
+#define NO_MODE		(0)
+#define SINGLE		(1)
+#define MULTI		(2)
+#define MIXED		(3)
+#define PERF_CL_PEAK		(4)
+#define DEF_SINGLE_ENT		90
+#define DEF_PCPU_MULTI_ENT	85
+#define DEF_PERF_CL_PEAK_ENT	80
+#define DEF_SINGLE_EX		60
+#define DEF_PCPU_MULTI_EX	50
+#define DEF_PERF_CL_PEAK_EX		70
+#define DEF_SINGLE_ENTER_CYCLE	4
+#define DEF_SINGLE_EXIT_CYCLE	4
+#define DEF_MULTI_ENTER_CYCLE	4
+#define DEF_MULTI_EXIT_CYCLE	4
+#define DEF_PERF_CL_PEAK_ENTER_CYCLE	100
+#define DEF_PERF_CL_PEAK_EXIT_CYCLE	20
+#define LAST_LD_CHECK_TOL	(2 * USEC_PER_MSEC)
+#define CLUSTER_0_THRESHOLD_FREQ	147000
+#define CLUSTER_1_THRESHOLD_FREQ	190000
+#define INPUT_EVENT_CNT_THRESHOLD	15
+#define MAX_LENGTH_CPU_STRING	256
+
+
+
+/**************************sysfs start********************************/
+
+static int set_num_clusters(const char *buf, const struct kernel_param *kp)
+{
+	unsigned int val;
+
+	if (sscanf(buf, "%u\n", &val) != 1)
+		return -EINVAL;
+	if (num_clusters)
+		return -EINVAL;
+
+	num_clusters = val;
+
+	if (init_cluster_control()) {
+		num_clusters = 0;
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int get_num_clusters(char *buf, const struct kernel_param *kp)
+{
+	return snprintf(buf, PAGE_SIZE, "%u", num_clusters);
+}
+
+static const struct kernel_param_ops param_ops_num_clusters = {
+	.set = set_num_clusters,
+	.get = get_num_clusters,
+};
+device_param_cb(num_clusters, &param_ops_num_clusters, NULL, 0644);
+
+static int set_max_cpus(const char *buf, const struct kernel_param *kp)
+{
+	unsigned int i, ntokens = 0;
+	const char *cp = buf;
+	int val;
+
+	if (!clusters_inited)
+		return -EINVAL;
+
+	while ((cp = strpbrk(cp + 1, ":")))
+		ntokens++;
+
+	if (ntokens != (num_clusters - 1))
+		return -EINVAL;
+
+	cp = buf;
+	for (i = 0; i < num_clusters; i++) {
+
+		if (sscanf(cp, "%d\n", &val) != 1)
+			return -EINVAL;
+		if (val > (int)cpumask_weight(managed_clusters[i]->cpus))
+			return -EINVAL;
+
+		managed_clusters[i]->max_cpu_request = val;
+
+		cp = strnchr(cp, strlen(cp), ':');
+		cp++;
+		trace_set_max_cpus(cpumask_bits(managed_clusters[i]->cpus)[0],
+								val);
+	}
+
+	schedule_delayed_work(&evaluate_hotplug_work, 0);
+
+	return 0;
+}
+
+static int get_max_cpus(char *buf, const struct kernel_param *kp)
+{
+	int i, cnt = 0;
+
+	if (!clusters_inited)
+		return cnt;
+
+	for (i = 0; i < num_clusters; i++)
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+				"%d:", managed_clusters[i]->max_cpu_request);
+	cnt--;
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_max_cpus = {
+	.set = set_max_cpus,
+	.get = get_max_cpus,
+};
+
+#ifdef CONFIG_MSM_PERFORMANCE_HOTPLUG_ON
+device_param_cb(max_cpus, &param_ops_max_cpus, NULL, 0644);
+#endif
+
+static int set_managed_cpus(const char *buf, const struct kernel_param *kp)
+{
+	int i, ret;
+	struct cpumask tmp_mask;
+
+	if (!clusters_inited)
+		return -EINVAL;
+
+	ret = cpulist_parse(buf, &tmp_mask);
+
+	if (ret)
+		return ret;
+
+	for (i = 0; i < num_clusters; i++) {
+		if (cpumask_empty(managed_clusters[i]->cpus)) {
+			mutex_lock(&managed_cpus_lock);
+			cpumask_copy(managed_clusters[i]->cpus, &tmp_mask);
+			cpumask_clear(managed_clusters[i]->offlined_cpus);
+			mutex_unlock(&managed_cpus_lock);
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static int get_managed_cpus(char *buf, const struct kernel_param *kp)
+{
+	int i, cnt = 0, total_cnt = 0;
+	char tmp[MAX_LENGTH_CPU_STRING] = "";
+
+	if (!clusters_inited)
+		return cnt;
+
+	for (i = 0; i < num_clusters; i++) {
+		cnt = cpumap_print_to_pagebuf(true, buf,
+						managed_clusters[i]->cpus);
+		if ((i + 1) < num_clusters &&
+		    (total_cnt + cnt + 1) <= MAX_LENGTH_CPU_STRING) {
+			snprintf(tmp + total_cnt, cnt, "%s", buf);
+			tmp[cnt-1] = ':';
+			tmp[cnt] = '\0';
+			total_cnt += cnt;
+		} else if ((i + 1) == num_clusters &&
+			   (total_cnt + cnt) <= MAX_LENGTH_CPU_STRING) {
+			snprintf(tmp + total_cnt, cnt, "%s", buf);
+			total_cnt += cnt;
+		} else {
+			pr_err("invalid string for managed_cpu:%s%s\n", tmp,
+				buf);
+			break;
+		}
+	}
+	snprintf(buf, PAGE_SIZE, "%s", tmp);
+	return total_cnt;
+}
+
+static const struct kernel_param_ops param_ops_managed_cpus = {
+	.set = set_managed_cpus,
+	.get = get_managed_cpus,
+};
+device_param_cb(managed_cpus, &param_ops_managed_cpus, NULL, 0644);
+
+/* Read-only node: To display all the online managed CPUs */
+static int get_managed_online_cpus(char *buf, const struct kernel_param *kp)
+{
+	int i, cnt = 0, total_cnt = 0;
+	char tmp[MAX_LENGTH_CPU_STRING] = "";
+	struct cpumask tmp_mask;
+	struct cluster *i_cl;
+
+	if (!clusters_inited)
+		return cnt;
+
+	for (i = 0; i < num_clusters; i++) {
+		i_cl = managed_clusters[i];
+
+		cpumask_clear(&tmp_mask);
+		cpumask_complement(&tmp_mask, i_cl->offlined_cpus);
+		cpumask_and(&tmp_mask, i_cl->cpus, &tmp_mask);
+
+		cnt = cpumap_print_to_pagebuf(true, buf, &tmp_mask);
+		if ((i + 1) < num_clusters &&
+		    (total_cnt + cnt + 1) <= MAX_LENGTH_CPU_STRING) {
+			snprintf(tmp + total_cnt, cnt, "%s", buf);
+			tmp[cnt-1] = ':';
+			tmp[cnt] = '\0';
+			total_cnt += cnt;
+		} else if ((i + 1) == num_clusters &&
+			   (total_cnt + cnt) <= MAX_LENGTH_CPU_STRING) {
+			snprintf(tmp + total_cnt, cnt, "%s", buf);
+			total_cnt += cnt;
+		} else {
+			pr_err("invalid string for managed_cpu:%s%s\n", tmp,
+				buf);
+			break;
+		}
+	}
+	snprintf(buf, PAGE_SIZE, "%s", tmp);
+	return total_cnt;
+}
+
+static const struct kernel_param_ops param_ops_managed_online_cpus = {
+	.get = get_managed_online_cpus,
+};
+
+#ifdef CONFIG_MSM_PERFORMANCE_HOTPLUG_ON
+device_param_cb(managed_online_cpus, &param_ops_managed_online_cpus,
+							NULL, 0444);
+#endif
+/*
+ * Userspace sends cpu#:min_freq_value to vote for min_freq_value as the new
+ * scaling_min. To withdraw its vote it needs to enter cpu#:0
+ */
+static int set_cpu_min_freq(const char *buf, const struct kernel_param *kp)
+{
+	int i, j, ntokens = 0;
+	unsigned int val, cpu;
+	const char *cp = buf;
+	struct cpu_status *i_cpu_stats;
+	struct cpufreq_policy policy;
+	cpumask_var_t limit_mask;
+	int ret;
+
+	while ((cp = strpbrk(cp + 1, " :")))
+		ntokens++;
+
+	/* CPU:value pair */
+	if (!(ntokens % 2))
+		return -EINVAL;
+
+	cp = buf;
+	cpumask_clear(limit_mask);
+	for (i = 0; i < ntokens; i += 2) {
+		if (sscanf(cp, "%u:%u", &cpu, &val) != 2)
+			return -EINVAL;
+		if (cpu > (num_present_cpus() - 1))
+			return -EINVAL;
+
+		i_cpu_stats = &per_cpu(cpu_stats, cpu);
+
+		i_cpu_stats->min = val;
+		cpumask_set_cpu(cpu, limit_mask);
+
+		cp = strnchr(cp, strlen(cp), ' ');
+		cp++;
+	}
+
+	/*
+	 * Since on synchronous systems policy is shared amongst multiple
+	 * CPUs only one CPU needs to be updated for the limit to be
+	 * reflected for the entire cluster. We can avoid updating the policy
+	 * of other CPUs in the cluster once it is done for at least one CPU
+	 * in the cluster
+	 */
+	get_online_cpus();
+	for_each_cpu(i, limit_mask) {
+		i_cpu_stats = &per_cpu(cpu_stats, i);
+
+		if (cpufreq_get_policy(&policy, i))
+			continue;
+
+		if (cpu_online(i) && (policy.min != i_cpu_stats->min)) {
+			ret = cpufreq_update_policy(i);
+			if (ret)
+				continue;
+		}
+		for_each_cpu(j, policy.related_cpus)
+			cpumask_clear_cpu(j, limit_mask);
+	}
+	put_online_cpus();
+
+	return 0;
+}
+
+static int get_cpu_min_freq(char *buf, const struct kernel_param *kp)
+{
+	int cnt = 0, cpu;
+
+	for_each_present_cpu(cpu) {
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+				"%d:%u ", cpu, per_cpu(cpu_stats, cpu).min);
+	}
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "\n");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_cpu_min_freq = {
+	.set = set_cpu_min_freq,
+	.get = get_cpu_min_freq,
+};
+module_param_cb(cpu_min_freq, &param_ops_cpu_min_freq, NULL, 0644);
+
+/*
+ * Userspace sends cpu#:max_freq_value to vote for max_freq_value as the new
+ * scaling_max. To withdraw its vote it needs to enter cpu#:UINT_MAX
+ */
+static int set_cpu_max_freq(const char *buf, const struct kernel_param *kp)
+{
+	int i, j, ntokens = 0;
+	unsigned int val, cpu;
+	const char *cp = buf;
+	struct cpu_status *i_cpu_stats;
+	struct cpufreq_policy policy;
+	cpumask_var_t limit_mask;
+	int ret;
+
+	while ((cp = strpbrk(cp + 1, " :")))
+		ntokens++;
+
+	/* CPU:value pair */
+	if (!(ntokens % 2))
+		return -EINVAL;
+
+	cp = buf;
+	cpumask_clear(limit_mask);
+	for (i = 0; i < ntokens; i += 2) {
+		if (sscanf(cp, "%u:%u", &cpu, &val) != 2)
+			return -EINVAL;
+		if (cpu > (num_present_cpus() - 1))
+			return -EINVAL;
+
+		i_cpu_stats = &per_cpu(cpu_stats, cpu);
+
+		i_cpu_stats->max = val;
+		cpumask_set_cpu(cpu, limit_mask);
+
+		cp = strnchr(cp, strlen(cp), ' ');
+		cp++;
+	}
+
+	get_online_cpus();
+	for_each_cpu(i, limit_mask) {
+		i_cpu_stats = &per_cpu(cpu_stats, i);
+		if (cpufreq_get_policy(&policy, i))
+			continue;
+
+		if (cpu_online(i) && (policy.max != i_cpu_stats->max)) {
+			ret = cpufreq_update_policy(i);
+			if (ret)
+				continue;
+		}
+		for_each_cpu(j, policy.related_cpus)
+			cpumask_clear_cpu(j, limit_mask);
+	}
+	put_online_cpus();
+
+	return 0;
+}
+
+static int get_cpu_max_freq(char *buf, const struct kernel_param *kp)
+{
+	int cnt = 0, cpu;
+
+	for_each_present_cpu(cpu) {
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+				"%d:%u ", cpu, per_cpu(cpu_stats, cpu).max);
+	}
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "\n");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_cpu_max_freq = {
+	.set = set_cpu_max_freq,
+	.get = get_cpu_max_freq,
+};
+module_param_cb(cpu_max_freq, &param_ops_cpu_max_freq, NULL, 0644);
+
+static int set_ip_evt_trigger_threshold(const char *buf,
+		const struct kernel_param *kp)
+{
+	unsigned int val;
+
+	if (sscanf(buf, "%u\n", &val) != 1)
+		return -EINVAL;
+
+	thr.ip_evt_threshold = val;
+	return 0;
+}
+
+static int get_ip_evt_trigger_threshold(char *buf,
+		const struct kernel_param *kp)
+{
+	return snprintf(buf, PAGE_SIZE, "%u", thr.ip_evt_threshold);
+}
+
+static const struct kernel_param_ops param_ops_ip_evt_trig_thr = {
+	.set = set_ip_evt_trigger_threshold,
+	.get = get_ip_evt_trigger_threshold,
+};
+device_param_cb(ip_evt_trig_thr, &param_ops_ip_evt_trig_thr, NULL, 0644);
+
+
+static int set_perf_cl_trigger_threshold(const char *buf,
+		 const struct kernel_param *kp)
+{
+	unsigned int val;
+
+	if (sscanf(buf, "%u\n", &val) != 1)
+		return -EINVAL;
+
+	thr.perf_cl_trigger_threshold = val;
+	return 0;
+}
+
+static int get_perf_cl_trigger_threshold(char *buf,
+		const struct kernel_param *kp)
+{
+	return snprintf(buf, PAGE_SIZE, "%u", thr.perf_cl_trigger_threshold);
+}
+
+static const struct kernel_param_ops param_ops_perf_trig_thr = {
+	.set = set_perf_cl_trigger_threshold,
+	.get = get_perf_cl_trigger_threshold,
+};
+device_param_cb(perf_cl_trig_thr, &param_ops_perf_trig_thr, NULL, 0644);
+
+
+static int set_pwr_cl_trigger_threshold(const char *buf,
+		const struct kernel_param *kp)
+{
+	unsigned int val;
+
+	if (sscanf(buf, "%u\n", &val) != 1)
+		return -EINVAL;
+
+	thr.pwr_cl_trigger_threshold = val;
+	return 0;
+}
+
+static int get_pwr_cl_trigger_threshold(char *buf,
+		const struct kernel_param *kp)
+{
+	return snprintf(buf, PAGE_SIZE, "%u", thr.pwr_cl_trigger_threshold);
+}
+
+static const struct kernel_param_ops param_ops_pwr_trig_thr = {
+	.set = set_pwr_cl_trigger_threshold,
+	.get = get_pwr_cl_trigger_threshold,
+};
+device_param_cb(pwr_cl_trig_thr, &param_ops_pwr_trig_thr, NULL, 0644);
+
+
+static int freq_greater_than_threshold(struct cluster *cl, int idx)
+{
+	int rc = 0;
+	/*Check for Cluster 0*/
+	if (!idx && cl->current_freq >= thr.pwr_cl_trigger_threshold)
+		rc = 1;
+	/*Check for Cluster 1*/
+	if (idx && cl->current_freq >= thr.perf_cl_trigger_threshold)
+		rc = 1;
+	return rc;
+}
+
+static int input_events_greater_than_threshold(void)
+{
+
+	int rc = 0;
+
+	if ((ip_evts->evt_x_cnt >= thr.ip_evt_threshold) ||
+		(ip_evts->evt_y_cnt >= thr.ip_evt_threshold) ||
+		!use_input_evts_with_hi_slvt_detect)
+			rc = 1;
+
+	return rc;
+}
+static int set_single_enter_load(const char *buf, const struct kernel_param *kp)
+{
+	unsigned int val, i, ntokens = 0;
+	const char *cp = buf;
+	unsigned int bytes_left;
+
+	if (!clusters_inited)
+		return -EINVAL;
+
+	while ((cp = strpbrk(cp + 1, ":")))
+		ntokens++;
+
+	if (ntokens != (num_clusters - 1))
+		return -EINVAL;
+
+	cp = buf;
+	for (i = 0; i < num_clusters; i++) {
+
+		if (sscanf(cp, "%u\n", &val) != 1)
+			return -EINVAL;
+
+		if (val < managed_clusters[i]->single_exit_load)
+			return -EINVAL;
+
+		managed_clusters[i]->single_enter_load = val;
+
+		bytes_left = PAGE_SIZE - (cp - buf);
+		cp = strnchr(cp, bytes_left, ':');
+		cp++;
+	}
+
+	return 0;
+}
+
+static int get_single_enter_load(char *buf, const struct kernel_param *kp)
+{
+	int i, cnt = 0;
+
+	if (!clusters_inited)
+		return cnt;
+
+	for (i = 0; i < num_clusters; i++)
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+				"%u:", managed_clusters[i]->single_enter_load);
+	cnt--;
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_single_enter_load = {
+	.set = set_single_enter_load,
+	.get = get_single_enter_load,
+};
+device_param_cb(single_enter_load, &param_ops_single_enter_load, NULL, 0644);
+
+static int set_single_exit_load(const char *buf, const struct kernel_param *kp)
+{
+	unsigned int val, i, ntokens = 0;
+	const char *cp = buf;
+	unsigned int bytes_left;
+
+	if (!clusters_inited)
+		return -EINVAL;
+
+	while ((cp = strpbrk(cp + 1, ":")))
+		ntokens++;
+
+	if (ntokens != (num_clusters - 1))
+		return -EINVAL;
+
+	cp = buf;
+	for (i = 0; i < num_clusters; i++) {
+
+		if (sscanf(cp, "%u\n", &val) != 1)
+			return -EINVAL;
+
+		if (val > managed_clusters[i]->single_enter_load)
+			return -EINVAL;
+
+		managed_clusters[i]->single_exit_load = val;
+
+		bytes_left = PAGE_SIZE - (cp - buf);
+		cp = strnchr(cp, bytes_left, ':');
+		cp++;
+	}
+
+	return 0;
+}
+
+static int get_single_exit_load(char *buf, const struct kernel_param *kp)
+{
+	int i, cnt = 0;
+
+	if (!clusters_inited)
+		return cnt;
+
+	for (i = 0; i < num_clusters; i++)
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+				"%u:", managed_clusters[i]->single_exit_load);
+	cnt--;
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_single_exit_load = {
+	.set = set_single_exit_load,
+	.get = get_single_exit_load,
+};
+device_param_cb(single_exit_load, &param_ops_single_exit_load, NULL, 0644);
+
+static int set_pcpu_multi_enter_load(const char *buf,
+					const struct kernel_param *kp)
+{
+	unsigned int val, i, ntokens = 0;
+	const char *cp = buf;
+	unsigned int bytes_left;
+
+	if (!clusters_inited)
+		return -EINVAL;
+
+	while ((cp = strpbrk(cp + 1, ":")))
+		ntokens++;
+
+	if (ntokens != (num_clusters - 1))
+		return -EINVAL;
+
+	cp = buf;
+	for (i = 0; i < num_clusters; i++) {
+
+		if (sscanf(cp, "%u\n", &val) != 1)
+			return -EINVAL;
+
+		if (val < managed_clusters[i]->pcpu_multi_exit_load)
+			return -EINVAL;
+
+		managed_clusters[i]->pcpu_multi_enter_load = val;
+
+		bytes_left = PAGE_SIZE - (cp - buf);
+		cp = strnchr(cp, bytes_left, ':');
+		cp++;
+	}
+
+	return 0;
+}
+
+static int get_pcpu_multi_enter_load(char *buf, const struct kernel_param *kp)
+{
+	int i, cnt = 0;
+
+	if (!clusters_inited)
+		return cnt;
+
+	for (i = 0; i < num_clusters; i++)
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+			"%u:", managed_clusters[i]->pcpu_multi_enter_load);
+	cnt--;
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_pcpu_multi_enter_load = {
+	.set = set_pcpu_multi_enter_load,
+	.get = get_pcpu_multi_enter_load,
+};
+device_param_cb(pcpu_multi_enter_load, &param_ops_pcpu_multi_enter_load,
+								NULL, 0644);
+
+static int set_pcpu_multi_exit_load(const char *buf,
+						const struct kernel_param *kp)
+{
+	unsigned int val, i, ntokens = 0;
+	const char *cp = buf;
+	unsigned int bytes_left;
+
+	if (!clusters_inited)
+		return -EINVAL;
+
+	while ((cp = strpbrk(cp + 1, ":")))
+		ntokens++;
+
+	if (ntokens != (num_clusters - 1))
+		return -EINVAL;
+
+	cp = buf;
+	for (i = 0; i < num_clusters; i++) {
+
+		if (sscanf(cp, "%u\n", &val) != 1)
+			return -EINVAL;
+
+		if (val > managed_clusters[i]->pcpu_multi_enter_load)
+			return -EINVAL;
+
+		managed_clusters[i]->pcpu_multi_exit_load = val;
+
+		bytes_left = PAGE_SIZE - (cp - buf);
+		cp = strnchr(cp, bytes_left, ':');
+		cp++;
+	}
+
+	return 0;
+}
+
+static int get_pcpu_multi_exit_load(char *buf, const struct kernel_param *kp)
+{
+	int i, cnt = 0;
+
+	if (!clusters_inited)
+		return cnt;
+
+	for (i = 0; i < num_clusters; i++)
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+			"%u:", managed_clusters[i]->pcpu_multi_exit_load);
+	cnt--;
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_pcpu_multi_exit_load = {
+	.set = set_pcpu_multi_exit_load,
+	.get = get_pcpu_multi_exit_load,
+};
+device_param_cb(pcpu_multi_exit_load, &param_ops_pcpu_multi_exit_load,
+		NULL, 0644);
+static int set_perf_cl_peak_enter_load(const char *buf,
+				const struct kernel_param *kp)
+{
+	unsigned int val, i, ntokens = 0;
+	const char *cp = buf;
+	unsigned int bytes_left;
+
+	if (!clusters_inited)
+		return -EINVAL;
+
+	while ((cp = strpbrk(cp + 1, ":")))
+		ntokens++;
+
+	if (ntokens != (num_clusters - 1))
+		return -EINVAL;
+
+	cp = buf;
+	for (i = 0; i < num_clusters; i++) {
+
+		if (sscanf(cp, "%u\n", &val) != 1)
+			return -EINVAL;
+
+		if (val < managed_clusters[i]->perf_cl_peak_exit_load)
+			return -EINVAL;
+
+		managed_clusters[i]->perf_cl_peak_enter_load = val;
+
+		bytes_left = PAGE_SIZE - (cp - buf);
+		cp = strnchr(cp, bytes_left, ':');
+		cp++;
+	}
+
+	return 0;
+}
+
+static int get_perf_cl_peak_enter_load(char *buf,
+				const struct kernel_param *kp)
+{
+	int i, cnt = 0;
+
+	if (!clusters_inited)
+		return cnt;
+
+	for (i = 0; i < num_clusters; i++)
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+			"%u:", managed_clusters[i]->perf_cl_peak_enter_load);
+	cnt--;
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_perf_cl_peak_enter_load = {
+	.set = set_perf_cl_peak_enter_load,
+	.get = get_perf_cl_peak_enter_load,
+};
+device_param_cb(perf_cl_peak_enter_load, &param_ops_perf_cl_peak_enter_load,
+		 NULL, 0644);
+
+static int set_perf_cl_peak_exit_load(const char *buf,
+				const struct kernel_param *kp)
+{
+	unsigned int val, i, ntokens = 0;
+	const char *cp = buf;
+	unsigned int bytes_left;
+
+	if (!clusters_inited)
+		return -EINVAL;
+
+	while ((cp = strpbrk(cp + 1, ":")))
+		ntokens++;
+
+	if (ntokens != (num_clusters - 1))
+		return -EINVAL;
+
+	cp = buf;
+	for (i = 0; i < num_clusters; i++) {
+
+		if (sscanf(cp, "%u\n", &val) != 1)
+			return -EINVAL;
+
+		if (val > managed_clusters[i]->perf_cl_peak_enter_load)
+			return -EINVAL;
+
+		managed_clusters[i]->perf_cl_peak_exit_load = val;
+
+		bytes_left = PAGE_SIZE - (cp - buf);
+		cp = strnchr(cp, bytes_left, ':');
+		cp++;
+	}
+
+	return 0;
+}
+
+static int get_perf_cl_peak_exit_load(char *buf,
+				const struct kernel_param *kp)
+{
+	int i, cnt = 0;
+
+	if (!clusters_inited)
+		return cnt;
+
+	for (i = 0; i < num_clusters; i++)
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+			"%u:", managed_clusters[i]->perf_cl_peak_exit_load);
+	cnt--;
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_perf_cl_peak_exit_load = {
+	.set = set_perf_cl_peak_exit_load,
+	.get = get_perf_cl_peak_exit_load,
+};
+device_param_cb(perf_cl_peak_exit_load, &param_ops_perf_cl_peak_exit_load,
+		 NULL, 0644);
+
+static int set_perf_cl_peak_enter_cycles(const char *buf,
+				const struct kernel_param *kp)
+{
+	unsigned int val, i, ntokens = 0;
+	const char *cp = buf;
+	unsigned int bytes_left;
+
+	if (!clusters_inited)
+		return -EINVAL;
+
+	while ((cp = strpbrk(cp + 1, ":")))
+		ntokens++;
+
+	if (ntokens != (num_clusters - 1))
+		return -EINVAL;
+
+	cp = buf;
+	for (i = 0; i < num_clusters; i++) {
+
+		if (sscanf(cp, "%u\n", &val) != 1)
+			return -EINVAL;
+
+		managed_clusters[i]->perf_cl_peak_enter_cycles = val;
+
+		bytes_left = PAGE_SIZE - (cp - buf);
+		cp = strnchr(cp, bytes_left, ':');
+		cp++;
+	}
+
+	return 0;
+}
+
+static int get_perf_cl_peak_enter_cycles(char *buf,
+				const struct kernel_param *kp)
+{
+	int i, cnt = 0;
+
+	if (!clusters_inited)
+		return cnt;
+
+	for (i = 0; i < num_clusters; i++)
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "%u:",
+				managed_clusters[i]->perf_cl_peak_enter_cycles);
+	cnt--;
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_perf_cl_peak_enter_cycles = {
+	.set = set_perf_cl_peak_enter_cycles,
+	.get = get_perf_cl_peak_enter_cycles,
+};
+device_param_cb(perf_cl_peak_enter_cycles, &param_ops_perf_cl_peak_enter_cycles,
+		NULL, 0644);
+
+
+static int set_perf_cl_peak_exit_cycles(const char *buf,
+				const struct kernel_param *kp)
+{
+	unsigned int val, i, ntokens = 0;
+	const char *cp = buf;
+	unsigned int bytes_left;
+
+	if (!clusters_inited)
+		return -EINVAL;
+
+	while ((cp = strpbrk(cp + 1, ":")))
+		ntokens++;
+
+	if (ntokens != (num_clusters - 1))
+		return -EINVAL;
+
+	cp = buf;
+	for (i = 0; i < num_clusters; i++) {
+
+		if (sscanf(cp, "%u\n", &val) != 1)
+			return -EINVAL;
+
+		managed_clusters[i]->perf_cl_peak_exit_cycles = val;
+
+		bytes_left = PAGE_SIZE - (cp - buf);
+		cp = strnchr(cp, bytes_left, ':');
+		cp++;
+	}
+
+	return 0;
+}
+
+static int get_perf_cl_peak_exit_cycles(char *buf,
+			const struct kernel_param *kp)
+{
+	int i, cnt = 0;
+
+	if (!clusters_inited)
+		return cnt;
+
+	for (i = 0; i < num_clusters; i++)
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+			"%u:", managed_clusters[i]->perf_cl_peak_exit_cycles);
+	cnt--;
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_perf_cl_peak_exit_cycles = {
+	.set = set_perf_cl_peak_exit_cycles,
+	.get = get_perf_cl_peak_exit_cycles,
+};
+device_param_cb(perf_cl_peak_exit_cycles, &param_ops_perf_cl_peak_exit_cycles,
+		 NULL, 0644);
+
+
+static int set_single_enter_cycles(const char *buf,
+				const struct kernel_param *kp)
+{
+	unsigned int val, i, ntokens = 0;
+	const char *cp = buf;
+	unsigned int bytes_left;
+
+	if (!clusters_inited)
+		return -EINVAL;
+
+	while ((cp = strpbrk(cp + 1, ":")))
+		ntokens++;
+
+	if (ntokens != (num_clusters - 1))
+		return -EINVAL;
+
+	cp = buf;
+	for (i = 0; i < num_clusters; i++) {
+
+		if (sscanf(cp, "%u\n", &val) != 1)
+			return -EINVAL;
+
+		managed_clusters[i]->single_enter_cycles = val;
+
+		bytes_left = PAGE_SIZE - (cp - buf);
+		cp = strnchr(cp, bytes_left, ':');
+		cp++;
+	}
+
+	return 0;
+}
+
+static int get_single_enter_cycles(char *buf, const struct kernel_param *kp)
+{
+	int i, cnt = 0;
+
+	if (!clusters_inited)
+		return cnt;
+
+	for (i = 0; i < num_clusters; i++)
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "%u:",
+				managed_clusters[i]->single_enter_cycles);
+	cnt--;
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_single_enter_cycles = {
+	.set = set_single_enter_cycles,
+	.get = get_single_enter_cycles,
+};
+device_param_cb(single_enter_cycles, &param_ops_single_enter_cycles,
+		NULL, 0644);
+
+
+static int set_single_exit_cycles(const char *buf,
+				const struct kernel_param *kp)
+{
+	unsigned int val, i, ntokens = 0;
+	const char *cp = buf;
+	unsigned int bytes_left;
+
+	if (!clusters_inited)
+		return -EINVAL;
+
+	while ((cp = strpbrk(cp + 1, ":")))
+		ntokens++;
+
+	if (ntokens != (num_clusters - 1))
+		return -EINVAL;
+
+	cp = buf;
+	for (i = 0; i < num_clusters; i++) {
+
+		if (sscanf(cp, "%u\n", &val) != 1)
+			return -EINVAL;
+
+		managed_clusters[i]->single_exit_cycles = val;
+
+		bytes_left = PAGE_SIZE - (cp - buf);
+		cp = strnchr(cp, bytes_left, ':');
+		cp++;
+	}
+
+	return 0;
+}
+
+static int get_single_exit_cycles(char *buf, const struct kernel_param *kp)
+{
+	int i, cnt = 0;
+
+	if (!clusters_inited)
+		return cnt;
+
+	for (i = 0; i < num_clusters; i++)
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+				"%u:", managed_clusters[i]->single_exit_cycles);
+	cnt--;
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_single_exit_cycles = {
+	.set = set_single_exit_cycles,
+	.get = get_single_exit_cycles,
+};
+device_param_cb(single_exit_cycles, &param_ops_single_exit_cycles, NULL, 0644);
+
+static int set_multi_enter_cycles(const char *buf,
+				const struct kernel_param *kp)
+{
+	unsigned int val, i, ntokens = 0;
+	const char *cp = buf;
+	unsigned int bytes_left;
+
+	if (!clusters_inited)
+		return -EINVAL;
+
+	while ((cp = strpbrk(cp + 1, ":")))
+		ntokens++;
+
+	if (ntokens != (num_clusters - 1))
+		return -EINVAL;
+
+	cp = buf;
+	for (i = 0; i < num_clusters; i++) {
+
+		if (sscanf(cp, "%u\n", &val) != 1)
+			return -EINVAL;
+
+		managed_clusters[i]->multi_enter_cycles = val;
+
+		bytes_left = PAGE_SIZE - (cp - buf);
+		cp = strnchr(cp, bytes_left, ':');
+		cp++;
+	}
+
+	return 0;
+}
+
+static int get_multi_enter_cycles(char *buf, const struct kernel_param *kp)
+{
+	int i, cnt = 0;
+
+	if (!clusters_inited)
+		return cnt;
+
+	for (i = 0; i < num_clusters; i++)
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+				"%u:", managed_clusters[i]->multi_enter_cycles);
+	cnt--;
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_multi_enter_cycles = {
+	.set = set_multi_enter_cycles,
+	.get = get_multi_enter_cycles,
+};
+device_param_cb(multi_enter_cycles, &param_ops_multi_enter_cycles, NULL, 0644);
+
+static int set_multi_exit_cycles(const char *buf, const struct kernel_param *kp)
+{
+	unsigned int val, i, ntokens = 0;
+	const char *cp = buf;
+	unsigned int bytes_left;
+
+	if (!clusters_inited)
+		return -EINVAL;
+
+	while ((cp = strpbrk(cp + 1, ":")))
+		ntokens++;
+
+	if (ntokens != (num_clusters - 1))
+		return -EINVAL;
+
+	cp = buf;
+	for (i = 0; i < num_clusters; i++) {
+
+		if (sscanf(cp, "%u\n", &val) != 1)
+			return -EINVAL;
+
+		managed_clusters[i]->multi_exit_cycles = val;
+
+		bytes_left = PAGE_SIZE - (cp - buf);
+		cp = strnchr(cp, bytes_left, ':');
+		cp++;
+	}
+
+	return 0;
+}
+
+static int get_multi_exit_cycles(char *buf, const struct kernel_param *kp)
+{
+	int i, cnt = 0;
+
+	if (!clusters_inited)
+		return cnt;
+
+	for (i = 0; i < num_clusters; i++)
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+				"%u:", managed_clusters[i]->multi_exit_cycles);
+	cnt--;
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_multi_exit_cycles = {
+	.set = set_multi_exit_cycles,
+	.get = get_multi_exit_cycles,
+};
+device_param_cb(multi_exit_cycles, &param_ops_multi_exit_cycles, NULL, 0644);
+
+static int set_io_enter_cycles(const char *buf, const struct kernel_param *kp)
+{
+	unsigned int val;
+
+	if (sscanf(buf, "%u\n", &val) != 1)
+		return -EINVAL;
+
+	io_enter_cycles = val;
+
+	return 0;
+}
+
+static int get_io_enter_cycles(char *buf, const struct kernel_param *kp)
+{
+	return snprintf(buf, PAGE_SIZE, "%u", io_enter_cycles);
+}
+
+static const struct kernel_param_ops param_ops_io_enter_cycles = {
+	.set = set_io_enter_cycles,
+	.get = get_io_enter_cycles,
+};
+device_param_cb(io_enter_cycles, &param_ops_io_enter_cycles, NULL, 0644);
+
+static int set_io_exit_cycles(const char *buf, const struct kernel_param *kp)
+{
+	unsigned int val;
+
+	if (sscanf(buf, "%u\n", &val) != 1)
+		return -EINVAL;
+
+	io_exit_cycles = val;
+
+	return 0;
+}
+
+static int get_io_exit_cycles(char *buf, const struct kernel_param *kp)
+{
+	return snprintf(buf, PAGE_SIZE, "%u", io_exit_cycles);
+}
+
+static const struct kernel_param_ops param_ops_io_exit_cycles = {
+	.set = set_io_exit_cycles,
+	.get = get_io_exit_cycles,
+};
+device_param_cb(io_exit_cycles, &param_ops_io_exit_cycles, NULL, 0644);
+
+static int set_iowait_floor_pct(const char *buf, const struct kernel_param *kp)
+{
+	u64 val;
+
+	if (sscanf(buf, "%llu\n", &val) != 1)
+		return -EINVAL;
+	if (val > iowait_ceiling_pct)
+		return -EINVAL;
+
+	iowait_floor_pct = val;
+
+	return 0;
+}
+
+static int get_iowait_floor_pct(char *buf, const struct kernel_param *kp)
+{
+	return snprintf(buf, PAGE_SIZE, "%llu", iowait_floor_pct);
+}
+
+static const struct kernel_param_ops param_ops_iowait_floor_pct = {
+	.set = set_iowait_floor_pct,
+	.get = get_iowait_floor_pct,
+};
+device_param_cb(iowait_floor_pct, &param_ops_iowait_floor_pct, NULL, 0644);
+
+static int set_iowait_ceiling_pct(const char *buf,
+						const struct kernel_param *kp)
+{
+	u64 val;
+
+	if (sscanf(buf, "%llu\n", &val) != 1)
+		return -EINVAL;
+	if (val < iowait_floor_pct)
+		return -EINVAL;
+
+	iowait_ceiling_pct = val;
+
+	return 0;
+}
+
+static int get_iowait_ceiling_pct(char *buf, const struct kernel_param *kp)
+{
+	return snprintf(buf, PAGE_SIZE, "%llu", iowait_ceiling_pct);
+}
+
+static const struct kernel_param_ops param_ops_iowait_ceiling_pct = {
+	.set = set_iowait_ceiling_pct,
+	.get = get_iowait_ceiling_pct,
+};
+device_param_cb(iowait_ceiling_pct, &param_ops_iowait_ceiling_pct, NULL, 0644);
+
+static int set_workload_detect(const char *buf, const struct kernel_param *kp)
+{
+	unsigned int val, i;
+	struct cluster *i_cl;
+	unsigned long flags;
+
+	if (!clusters_inited)
+		return -EINVAL;
+
+	if (sscanf(buf, "%u\n", &val) != 1)
+		return -EINVAL;
+
+	if (val == workload_detect)
+		return 0;
+
+	workload_detect = val;
+	if (!(workload_detect & IO_DETECT)) {
+		for (i = 0; i < num_clusters; i++) {
+			i_cl = managed_clusters[i];
+			spin_lock_irqsave(&i_cl->iowait_lock, flags);
+			i_cl->iowait_enter_cycle_cnt = 0;
+			i_cl->iowait_exit_cycle_cnt = 0;
+			i_cl->cur_io_busy = 0;
+			i_cl->io_change = true;
+			spin_unlock_irqrestore(&i_cl->iowait_lock, flags);
+		}
+	}
+	if (!(workload_detect & MODE_DETECT)) {
+		for (i = 0; i < num_clusters; i++) {
+			i_cl = managed_clusters[i];
+			spin_lock_irqsave(&i_cl->mode_lock, flags);
+			i_cl->single_enter_cycle_cnt = 0;
+			i_cl->single_exit_cycle_cnt = 0;
+			i_cl->multi_enter_cycle_cnt = 0;
+			i_cl->multi_exit_cycle_cnt = 0;
+			i_cl->mode = 0;
+			i_cl->mode_change = true;
+			spin_unlock_irqrestore(&i_cl->mode_lock, flags);
+		}
+	}
+
+	if (!(workload_detect & PERF_CL_PEAK_DETECT)) {
+		for (i = 0; i < num_clusters; i++) {
+			i_cl = managed_clusters[i];
+			spin_lock_irqsave(&i_cl->perf_cl_peak_lock, flags);
+			i_cl->perf_cl_peak_enter_cycle_cnt = 0;
+			i_cl->perf_cl_peak_exit_cycle_cnt = 0;
+			i_cl->perf_cl_peak = 0;
+			spin_unlock_irqrestore(&i_cl->perf_cl_peak_lock, flags);
+		}
+	}
+
+	wake_up_process(notify_thread);
+	return 0;
+}
+
+static int get_workload_detect(char *buf, const struct kernel_param *kp)
+{
+	return snprintf(buf, PAGE_SIZE, "%u", workload_detect);
+}
+
+static const struct kernel_param_ops param_ops_workload_detect = {
+	.set = set_workload_detect,
+	.get = get_workload_detect,
+};
+device_param_cb(workload_detect, &param_ops_workload_detect, NULL, 0644);
+
+
+static int set_input_evts_with_hi_slvt_detect(const char *buf,
+					const struct kernel_param *kp)
+{
+
+	unsigned int val;
+
+	if (sscanf(buf, "%u\n", &val) != 1)
+		return -EINVAL;
+
+	if (val == use_input_evts_with_hi_slvt_detect)
+		return 0;
+
+	use_input_evts_with_hi_slvt_detect = val;
+
+	if ((workload_detect & PERF_CL_PEAK_DETECT) &&
+		!input_events_handler_registered &&
+		use_input_evts_with_hi_slvt_detect) {
+		if (register_input_handler() == -ENOMEM) {
+			use_input_evts_with_hi_slvt_detect = 0;
+			return -ENOMEM;
+		}
+	} else if ((workload_detect & PERF_CL_PEAK_DETECT) &&
+				input_events_handler_registered &&
+				!use_input_evts_with_hi_slvt_detect) {
+		unregister_input_handler();
+	}
+	return 0;
+}
+
+static int get_input_evts_with_hi_slvt_detect(char *buf,
+					const struct kernel_param *kp)
+{
+	return snprintf(buf, PAGE_SIZE, "%u",
+			use_input_evts_with_hi_slvt_detect);
+}
+
+static const struct kernel_param_ops param_ops_ip_evts_with_hi_slvt_detect = {
+	.set = set_input_evts_with_hi_slvt_detect,
+	.get = get_input_evts_with_hi_slvt_detect,
+};
+device_param_cb(input_evts_with_hi_slvt_detect,
+	&param_ops_ip_evts_with_hi_slvt_detect, NULL, 0644);
+
+static struct kobject *mode_kobj;
+
+static ssize_t show_aggr_mode(struct kobject *kobj,
+					struct kobj_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", aggr_mode);
+}
+static struct kobj_attribute aggr_mode_attr =
+__ATTR(aggr_mode, 0444, show_aggr_mode, NULL);
+
+static ssize_t show_aggr_iobusy(struct kobject *kobj,
+					struct kobj_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", aggr_iobusy);
+}
+static struct kobj_attribute aggr_iobusy_attr =
+__ATTR(aggr_iobusy, 0444, show_aggr_iobusy, NULL);
+
+static struct attribute *attrs[] = {
+	&aggr_mode_attr.attr,
+	&aggr_iobusy_attr.attr,
+	NULL,
+};
+
+static struct attribute_group attr_group = {
+	.attrs = attrs,
+};
+
+/* CPU Hotplug */
+static struct kobject *events_kobj;
+
+static ssize_t show_cpu_hotplug(struct kobject *kobj,
+					struct kobj_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "\n");
+}
+static struct kobj_attribute cpu_hotplug_attr =
+__ATTR(cpu_hotplug, 0444, show_cpu_hotplug, NULL);
+
+static struct attribute *events_attrs[] = {
+	&cpu_hotplug_attr.attr,
+	NULL,
+};
+
+static struct attribute_group events_attr_group = {
+	.attrs = events_attrs,
+};
+/*******************************sysfs ends************************************/
+
+static unsigned int num_online_managed(struct cpumask *mask)
+{
+	struct cpumask tmp_mask;
+
+	cpumask_clear(&tmp_mask);
+	cpumask_and(&tmp_mask, mask, cpu_online_mask);
+
+	return cpumask_weight(&tmp_mask);
+}
+
+static int perf_adjust_notify(struct notifier_block *nb, unsigned long val,
+							void *data)
+{
+	struct cpufreq_policy *policy = data;
+	unsigned int cpu = policy->cpu;
+	struct cpu_status *cpu_st = &per_cpu(cpu_stats, cpu);
+	unsigned int min = cpu_st->min, max = cpu_st->max;
+
+
+	if (val != CPUFREQ_ADJUST)
+		return NOTIFY_OK;
+
+	pr_debug("msm_perf: CPU%u policy before: %u:%u kHz\n", cpu,
+						policy->min, policy->max);
+	pr_debug("msm_perf: CPU%u seting min:max %u:%u kHz\n", cpu, min, max);
+
+	cpufreq_verify_within_limits(policy, min, max);
+
+	pr_debug("msm_perf: CPU%u policy after: %u:%u kHz\n", cpu,
+						policy->min, policy->max);
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block perf_cpufreq_nb = {
+	.notifier_call = perf_adjust_notify,
+};
+
+static bool check_notify_status(void)
+{
+	int i;
+	struct cluster *cl;
+	bool any_change = false;
+	unsigned long flags;
+
+
+	for (i = 0; i < num_clusters; i++) {
+		cl = managed_clusters[i];
+		spin_lock_irqsave(&cl->iowait_lock, flags);
+		if (!any_change)
+			any_change = cl->io_change;
+		cl->io_change = false;
+		spin_unlock_irqrestore(&cl->iowait_lock, flags);
+
+		spin_lock_irqsave(&cl->mode_lock, flags);
+		if (!any_change)
+			any_change = cl->mode_change;
+		cl->mode_change = false;
+		spin_unlock_irqrestore(&cl->mode_lock, flags);
+
+		spin_lock_irqsave(&cl->perf_cl_peak_lock, flags);
+		if (!any_change)
+			any_change = cl->perf_cl_detect_state_change;
+		cl->perf_cl_detect_state_change = false;
+		spin_unlock_irqrestore(&cl->perf_cl_peak_lock, flags);
+	}
+
+	return any_change;
+}
+
+static int notify_userspace(void *data)
+{
+	unsigned int i, io, cpu_mode, perf_cl_peak_mode;
+
+	while (1) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (!check_notify_status()) {
+			schedule();
+
+			if (kthread_should_stop())
+				break;
+		}
+		set_current_state(TASK_RUNNING);
+
+		io = 0;
+		cpu_mode = 0;
+		perf_cl_peak_mode = 0;
+		for (i = 0; i < num_clusters; i++) {
+			io |= managed_clusters[i]->cur_io_busy;
+			cpu_mode |= managed_clusters[i]->mode;
+			perf_cl_peak_mode |= managed_clusters[i]->perf_cl_peak;
+		}
+		if (io != aggr_iobusy) {
+			aggr_iobusy = io;
+			sysfs_notify(mode_kobj, NULL, "aggr_iobusy");
+			pr_debug("msm_perf: Notifying IO: %u\n", aggr_iobusy);
+		}
+		if ((aggr_mode & (SINGLE | MULTI)) != cpu_mode) {
+			aggr_mode &= ~(SINGLE | MULTI);
+			aggr_mode |= cpu_mode;
+			sysfs_notify(mode_kobj, NULL, "aggr_mode");
+			pr_debug("msm_perf: Notifying CPU mode:%u\n",
+								aggr_mode);
+		}
+		if ((aggr_mode & PERF_CL_PEAK) != perf_cl_peak_mode) {
+			aggr_mode &= ~(PERF_CL_PEAK);
+			aggr_mode |= perf_cl_peak_mode;
+			sysfs_notify(mode_kobj, NULL, "aggr_mode");
+			pr_debug("msm_perf: Notifying Gaming mode:%u\n",
+								aggr_mode);
+		}
+	}
+
+	return 0;
+}
+
+static void hotplug_notify(int action)
+{
+	unsigned long flags;
+
+	if (!events_group.init_success)
+		return;
+
+	if ((action == CPU_ONLINE) || (action == CPU_DEAD)) {
+		spin_lock_irqsave(&(events_group.cpu_hotplug_lock), flags);
+		events_group.cpu_hotplug = true;
+		spin_unlock_irqrestore(&(events_group.cpu_hotplug_lock), flags);
+		wake_up_process(events_notify_thread);
+	}
+}
+
+static int events_notify_userspace(void *data)
+{
+	unsigned long flags;
+	bool notify_change;
+
+	while (1) {
+
+		set_current_state(TASK_INTERRUPTIBLE);
+		spin_lock_irqsave(&(events_group.cpu_hotplug_lock), flags);
+
+		if (!events_group.cpu_hotplug) {
+			spin_unlock_irqrestore(&(events_group.cpu_hotplug_lock),
+									flags);
+
+			schedule();
+			if (kthread_should_stop())
+				break;
+			spin_lock_irqsave(&(events_group.cpu_hotplug_lock),
+									flags);
+		}
+
+		set_current_state(TASK_RUNNING);
+		notify_change = events_group.cpu_hotplug;
+		events_group.cpu_hotplug = false;
+		spin_unlock_irqrestore(&(events_group.cpu_hotplug_lock), flags);
+
+		if (notify_change)
+			sysfs_notify(events_kobj, NULL, "cpu_hotplug");
+	}
+
+	return 0;
+}
+
+static void check_cluster_iowait(struct cluster *cl, u64 now)
+{
+	struct load_stats *pcpu_st;
+	unsigned int i;
+	unsigned long flags;
+	unsigned int temp_iobusy;
+	u64 max_iowait = 0;
+
+	spin_lock_irqsave(&cl->iowait_lock, flags);
+
+	if (((now - cl->last_io_check_ts)
+		< (cl->timer_rate - LAST_IO_CHECK_TOL)) ||
+		!(workload_detect & IO_DETECT)) {
+		spin_unlock_irqrestore(&cl->iowait_lock, flags);
+		return;
+	}
+
+	temp_iobusy = cl->cur_io_busy;
+	for_each_cpu(i, cl->cpus) {
+		pcpu_st = &per_cpu(cpu_load_stats, i);
+		if ((now - pcpu_st->last_wallclock)
+			> (cl->timer_rate + LAST_UPDATE_TOL))
+			continue;
+		if (max_iowait < pcpu_st->last_iopercent)
+			max_iowait = pcpu_st->last_iopercent;
+	}
+
+	if (!cl->cur_io_busy) {
+		if (max_iowait > iowait_ceiling_pct) {
+			cl->iowait_enter_cycle_cnt++;
+			if (cl->iowait_enter_cycle_cnt >= io_enter_cycles) {
+				cl->cur_io_busy = 1;
+				cl->iowait_enter_cycle_cnt = 0;
+			}
+		} else {
+			cl->iowait_enter_cycle_cnt = 0;
+		}
+	} else {
+		if (max_iowait < iowait_floor_pct) {
+			cl->iowait_exit_cycle_cnt++;
+			if (cl->iowait_exit_cycle_cnt >= io_exit_cycles) {
+				cl->cur_io_busy = 0;
+				cl->iowait_exit_cycle_cnt = 0;
+			}
+		} else {
+			cl->iowait_exit_cycle_cnt = 0;
+		}
+	}
+
+	cl->last_io_check_ts = now;
+	trace_track_iowait(cpumask_first(cl->cpus), cl->iowait_enter_cycle_cnt,
+			cl->iowait_exit_cycle_cnt, cl->cur_io_busy, max_iowait);
+
+	if (temp_iobusy != cl->cur_io_busy) {
+		cl->io_change = true;
+		pr_debug("msm_perf: IO changed to %u\n", cl->cur_io_busy);
+	}
+
+	spin_unlock_irqrestore(&cl->iowait_lock, flags);
+	if (cl->io_change)
+		wake_up_process(notify_thread);
+}
+
+static void disable_timer(struct cluster *cl)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&cl->timer_lock, flags);
+
+	if (del_timer(&cl->mode_exit_timer)) {
+		trace_single_cycle_exit_timer_stop(cpumask_first(cl->cpus),
+			cl->single_enter_cycles, cl->single_enter_cycle_cnt,
+			cl->single_exit_cycles, cl->single_exit_cycle_cnt,
+			cl->multi_enter_cycles, cl->multi_enter_cycle_cnt,
+			cl->multi_exit_cycles, cl->multi_exit_cycle_cnt,
+			cl->timer_rate, cl->mode);
+	}
+
+	spin_unlock_irqrestore(&cl->timer_lock, flags);
+}
+
+static void start_timer(struct cluster *cl)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&cl->timer_lock, flags);
+	if ((cl->mode & SINGLE) && !timer_pending(&cl->mode_exit_timer)) {
+		/*Set timer for the Cluster since there is none pending*/
+		cl->mode_exit_timer.expires = get_jiffies_64() +
+		usecs_to_jiffies(cl->single_exit_cycles * cl->timer_rate);
+		cl->mode_exit_timer.data = cpumask_first(cl->cpus);
+		add_timer(&cl->mode_exit_timer);
+		trace_single_cycle_exit_timer_start(cpumask_first(cl->cpus),
+			cl->single_enter_cycles, cl->single_enter_cycle_cnt,
+			cl->single_exit_cycles, cl->single_exit_cycle_cnt,
+			cl->multi_enter_cycles, cl->multi_enter_cycle_cnt,
+			cl->multi_exit_cycles, cl->multi_exit_cycle_cnt,
+			cl->timer_rate, cl->mode);
+	}
+	spin_unlock_irqrestore(&cl->timer_lock, flags);
+}
+
+
+static void disable_perf_cl_peak_timer(struct cluster *cl)
+{
+
+	if (del_timer(&cl->perf_cl_peak_mode_exit_timer)) {
+		trace_perf_cl_peak_exit_timer_stop(cpumask_first(cl->cpus),
+			cl->perf_cl_peak_enter_cycles,
+			cl->perf_cl_peak_enter_cycle_cnt,
+			cl->perf_cl_peak_exit_cycles,
+			cl->perf_cl_peak_exit_cycle_cnt,
+			cl->timer_rate, cl->mode);
+	}
+
+}
+
+static void start_perf_cl_peak_timer(struct cluster *cl)
+{
+	if ((cl->mode & PERF_CL_PEAK) &&
+		!timer_pending(&cl->perf_cl_peak_mode_exit_timer)) {
+		/*Set timer for the Cluster since there is none pending*/
+		cl->perf_cl_peak_mode_exit_timer.expires = get_jiffies_64() +
+		usecs_to_jiffies(cl->perf_cl_peak_exit_cycles * cl->timer_rate);
+		cl->perf_cl_peak_mode_exit_timer.data = cpumask_first(cl->cpus);
+		add_timer(&cl->perf_cl_peak_mode_exit_timer);
+		trace_perf_cl_peak_exit_timer_start(cpumask_first(cl->cpus),
+			cl->perf_cl_peak_enter_cycles,
+			cl->perf_cl_peak_enter_cycle_cnt,
+			cl->perf_cl_peak_exit_cycles,
+			cl->perf_cl_peak_exit_cycle_cnt,
+			cl->timer_rate, cl->mode);
+	}
+}
+
+static const struct input_device_id msm_perf_input_ids[] = {
+
+	{
+		.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+		.evbit = {BIT_MASK(EV_ABS)},
+		.absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
+			BIT_MASK(ABS_MT_POSITION_X) |
+			BIT_MASK(ABS_MT_POSITION_Y)},
+	},
+
+	{},
+};
+
+static void msm_perf_input_event_handler(struct input_handle *handle,
+					unsigned int type,
+					unsigned int code,
+					int value)
+{
+	if (type != EV_ABS)
+		return;
+
+	switch (code) {
+
+	case ABS_MT_POSITION_X:
+		ip_evts->evt_x_cnt++;
+		break;
+	case ABS_MT_POSITION_Y:
+		ip_evts->evt_y_cnt++;
+		break;
+
+	case ABS_MT_DISTANCE:
+		break;
+
+	case ABS_MT_PRESSURE:
+		break;
+
+	default:
+		break;
+
+	}
+}
+static int msm_perf_input_connect(struct input_handler *handler,
+				struct input_dev *dev,
+				const struct input_device_id *id)
+{
+	int rc;
+	struct input_handle *handle;
+
+	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+	if (!handle)
+		return -ENOMEM;
+
+	handle->dev = dev;
+	handle->handler = handler;
+	handle->name = handler->name;
+
+	rc = input_register_handle(handle);
+	if (rc) {
+		pr_err("Failed to register handle\n");
+		goto error;
+	}
+
+	rc = input_open_device(handle);
+	if (rc) {
+		pr_err("Failed to open device\n");
+		goto error_unregister;
+	}
+	return 0;
+
+error_unregister:
+	input_unregister_handle(handle);
+error:
+	kfree(handle);
+	return rc;
+}
+
+static void  msm_perf_input_disconnect(struct input_handle *handle)
+{
+	input_close_device(handle);
+	input_unregister_handle(handle);
+	kfree(handle);
+}
+
+static void unregister_input_handler(void)
+{
+	if (handler != NULL) {
+		input_unregister_handler(handler);
+		input_events_handler_registered = false;
+	}
+}
+
+static int register_input_handler(void)
+{
+	int rc;
+
+	if (handler == NULL) {
+		handler = kzalloc(sizeof(*handler), GFP_KERNEL);
+		if (!handler)
+			return -ENOMEM;
+		handler->event = msm_perf_input_event_handler;
+		handler->connect = msm_perf_input_connect;
+		handler->disconnect = msm_perf_input_disconnect;
+		handler->name = "msm_perf";
+		handler->id_table = msm_perf_input_ids;
+		handler->private = NULL;
+	}
+	rc = input_register_handler(handler);
+	if (rc) {
+		pr_err("Unable to register the input handler for msm_perf\n");
+		kfree(handler);
+	} else {
+		input_events_handler_registered = true;
+	}
+	return rc;
+}
+
+static void check_perf_cl_peak_load(struct cluster *cl, u64 now)
+{
+	struct load_stats *pcpu_st;
+	unsigned int i, ret_mode, max_load = 0;
+	unsigned int total_load = 0, cpu_cnt = 0;
+	unsigned long flags;
+	bool cpu_of_cluster_zero = true;
+
+	spin_lock_irqsave(&cl->perf_cl_peak_lock, flags);
+
+	cpu_of_cluster_zero = cpumask_first(cl->cpus) ? false:true;
+	/*
+	 * If delta of last load to now < than timer_rate - ld check tolerance
+	 * which is 18ms OR if perf_cl_peak detection not set
+	 * OR the first CPU of Cluster is CPU 0 (LVT)
+	 * then return do nothing. We are interested only in SLVT
+	 */
+	if (((now - cl->last_perf_cl_check_ts)
+		< (cl->timer_rate - LAST_LD_CHECK_TOL)) ||
+		!(workload_detect & PERF_CL_PEAK_DETECT) ||
+		cpu_of_cluster_zero) {
+		spin_unlock_irqrestore(&cl->perf_cl_peak_lock, flags);
+		return;
+	}
+	for_each_cpu(i, cl->cpus) {
+		pcpu_st = &per_cpu(cpu_load_stats, i);
+		if ((now - pcpu_st->last_wallclock)
+			> (cl->timer_rate + LAST_UPDATE_TOL))
+			continue;
+		if (pcpu_st->cpu_load > max_load)
+			max_load = pcpu_st->cpu_load;
+		 /*
+		  * Save the frequency for the cpu of the cluster
+		  * This frequency is the most recent/current
+		  * as obtained due to a transition
+		  * notifier callback.
+		  */
+		cl->current_freq = pcpu_st->freq;
+	}
+	ret_mode = cl->perf_cl_peak;
+
+	if (!(cl->perf_cl_peak & PERF_CL_PEAK)) {
+		if (max_load >= cl->perf_cl_peak_enter_load &&
+			freq_greater_than_threshold(cl,
+				cpumask_first(cl->cpus))) {
+			/* Reset the event count  for the first cycle
+			 * of perf_cl_peak we detect
+			 */
+			if (!cl->perf_cl_peak_enter_cycle_cnt)
+				ip_evts->evt_x_cnt = ip_evts->evt_y_cnt = 0;
+			cl->perf_cl_peak_enter_cycle_cnt++;
+			if (cl->perf_cl_peak_enter_cycle_cnt
+				>= cl->perf_cl_peak_enter_cycles) {
+				if (input_events_greater_than_threshold())
+					ret_mode |= PERF_CL_PEAK;
+				cl->perf_cl_peak_enter_cycle_cnt = 0;
+			}
+		} else {
+			cl->perf_cl_peak_enter_cycle_cnt = 0;
+			/* Reset the event count */
+			ip_evts->evt_x_cnt = ip_evts->evt_y_cnt = 0;
+		}
+	} else {
+		if (max_load >= cl->perf_cl_peak_exit_load &&
+			freq_greater_than_threshold(cl,
+				cpumask_first(cl->cpus))) {
+			cl->perf_cl_peak_exit_cycle_cnt = 0;
+			disable_perf_cl_peak_timer(cl);
+		} else {
+			start_perf_cl_peak_timer(cl);
+			cl->perf_cl_peak_exit_cycle_cnt++;
+			if (cl->perf_cl_peak_exit_cycle_cnt
+				>= cl->perf_cl_peak_exit_cycles) {
+				ret_mode &= ~PERF_CL_PEAK;
+				cl->perf_cl_peak_exit_cycle_cnt = 0;
+				disable_perf_cl_peak_timer(cl);
+			}
+		}
+	}
+
+	cl->last_perf_cl_check_ts = now;
+	if (ret_mode != cl->perf_cl_peak) {
+		pr_debug("msm_perf: Mode changed to %u\n", ret_mode);
+		cl->perf_cl_peak = ret_mode;
+		cl->perf_cl_detect_state_change = true;
+	}
+
+	trace_cpu_mode_detect(cpumask_first(cl->cpus), max_load,
+		cl->single_enter_cycle_cnt, cl->single_exit_cycle_cnt,
+		total_load, cl->multi_enter_cycle_cnt,
+		cl->multi_exit_cycle_cnt, cl->perf_cl_peak_enter_cycle_cnt,
+		cl->perf_cl_peak_exit_cycle_cnt, cl->mode, cpu_cnt);
+
+	spin_unlock_irqrestore(&cl->perf_cl_peak_lock, flags);
+
+	if (cl->perf_cl_detect_state_change)
+		wake_up_process(notify_thread);
+
+}
+
+static void check_cpu_load(struct cluster *cl, u64 now)
+{
+	struct load_stats *pcpu_st;
+	unsigned int i, max_load = 0, total_load = 0, ret_mode, cpu_cnt = 0;
+	unsigned int total_load_ceil, total_load_floor;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cl->mode_lock, flags);
+
+	if (((now - cl->last_mode_check_ts)
+		< (cl->timer_rate - LAST_LD_CHECK_TOL)) ||
+		!(workload_detect & MODE_DETECT)) {
+		spin_unlock_irqrestore(&cl->mode_lock, flags);
+		return;
+	}
+
+	for_each_cpu(i, cl->cpus) {
+		pcpu_st = &per_cpu(cpu_load_stats, i);
+		if ((now - pcpu_st->last_wallclock)
+			> (cl->timer_rate + LAST_UPDATE_TOL))
+			continue;
+		if (pcpu_st->cpu_load > max_load)
+			max_load = pcpu_st->cpu_load;
+		total_load += pcpu_st->cpu_load;
+		cpu_cnt++;
+	}
+
+	if (cpu_cnt > 1) {
+		total_load_ceil = cl->pcpu_multi_enter_load * cpu_cnt;
+		total_load_floor = cl->pcpu_multi_exit_load * cpu_cnt;
+	} else {
+		total_load_ceil = UINT_MAX;
+		total_load_floor = UINT_MAX;
+	}
+
+	ret_mode = cl->mode;
+	if (!(cl->mode & SINGLE)) {
+		if (max_load >= cl->single_enter_load) {
+			cl->single_enter_cycle_cnt++;
+			if (cl->single_enter_cycle_cnt
+				>= cl->single_enter_cycles) {
+				ret_mode |= SINGLE;
+				cl->single_enter_cycle_cnt = 0;
+			}
+		} else {
+			cl->single_enter_cycle_cnt = 0;
+		}
+	} else {
+		if (max_load < cl->single_exit_load) {
+			start_timer(cl);
+			cl->single_exit_cycle_cnt++;
+			if (cl->single_exit_cycle_cnt
+				>= cl->single_exit_cycles) {
+				ret_mode &= ~SINGLE;
+				cl->single_exit_cycle_cnt = 0;
+				disable_timer(cl);
+			}
+		} else {
+			cl->single_exit_cycle_cnt = 0;
+			disable_timer(cl);
+		}
+	}
+
+	if (!(cl->mode & MULTI)) {
+		if (total_load >= total_load_ceil) {
+			cl->multi_enter_cycle_cnt++;
+			if (cl->multi_enter_cycle_cnt
+				>= cl->multi_enter_cycles) {
+				ret_mode |= MULTI;
+				cl->multi_enter_cycle_cnt = 0;
+			}
+		} else {
+			cl->multi_enter_cycle_cnt = 0;
+		}
+	} else {
+		if (total_load < total_load_floor) {
+			cl->multi_exit_cycle_cnt++;
+			if (cl->multi_exit_cycle_cnt
+				>= cl->multi_exit_cycles) {
+				ret_mode &= ~MULTI;
+				cl->multi_exit_cycle_cnt = 0;
+			}
+		} else {
+			cl->multi_exit_cycle_cnt = 0;
+		}
+	}
+
+	cl->last_mode_check_ts = now;
+
+	if (ret_mode != cl->mode) {
+		cl->mode = ret_mode;
+		cl->mode_change = true;
+		pr_debug("msm_perf: Mode changed to %u\n", ret_mode);
+	}
+
+	trace_cpu_mode_detect(cpumask_first(cl->cpus), max_load,
+		cl->single_enter_cycle_cnt, cl->single_exit_cycle_cnt,
+		total_load, cl->multi_enter_cycle_cnt,
+		cl->multi_exit_cycle_cnt, cl->perf_cl_peak_enter_cycle_cnt,
+		cl->perf_cl_peak_exit_cycle_cnt, cl->mode, cpu_cnt);
+
+	spin_unlock_irqrestore(&cl->mode_lock, flags);
+
+	if (cl->mode_change)
+		wake_up_process(notify_thread);
+}
+
+static void check_workload_stats(unsigned int cpu, unsigned int rate, u64 now)
+{
+	struct cluster *cl = NULL;
+	unsigned int i;
+
+	for (i = 0; i < num_clusters; i++) {
+		if (cpumask_test_cpu(cpu, managed_clusters[i]->cpus)) {
+			cl = managed_clusters[i];
+			break;
+		}
+	}
+	if (cl == NULL)
+		return;
+
+	cl->timer_rate = rate;
+	check_cluster_iowait(cl, now);
+	check_cpu_load(cl, now);
+	check_perf_cl_peak_load(cl, now);
+}
+
+static int perf_govinfo_notify(struct notifier_block *nb, unsigned long val,
+								void *data)
+{
+	struct cpufreq_govinfo *gov_info = data;
+	unsigned int cpu = gov_info->cpu;
+	struct load_stats *cpu_st = &per_cpu(cpu_load_stats, cpu);
+	u64 now, cur_iowait, time_diff, iowait_diff;
+
+	if (!clusters_inited || !workload_detect)
+		return NOTIFY_OK;
+
+	cur_iowait = get_cpu_iowait_time_us(cpu, &now);
+	if (cur_iowait >= cpu_st->last_iowait)
+		iowait_diff = cur_iowait - cpu_st->last_iowait;
+	else
+		iowait_diff = 0;
+
+	if (now > cpu_st->last_wallclock)
+		time_diff = now - cpu_st->last_wallclock;
+	else
+		return NOTIFY_OK;
+
+	if (iowait_diff <= time_diff) {
+		iowait_diff *= 100;
+		cpu_st->last_iopercent = div64_u64(iowait_diff, time_diff);
+	} else {
+		cpu_st->last_iopercent = 100;
+	}
+
+	cpu_st->last_wallclock = now;
+	cpu_st->last_iowait = cur_iowait;
+	cpu_st->cpu_load = gov_info->load;
+
+	/*
+	 * Avoid deadlock in case governor notifier ran in the context
+	 * of notify_work thread
+	 */
+	if (current == notify_thread)
+		return NOTIFY_OK;
+
+	check_workload_stats(cpu, gov_info->sampling_rate_us, now);
+
+	return NOTIFY_OK;
+}
+static int perf_cputrans_notify(struct notifier_block *nb, unsigned long val,
+								void *data)
+{
+	struct cpufreq_freqs *freq = data;
+	unsigned int cpu = freq->cpu;
+	unsigned long flags;
+	unsigned int i;
+	struct cluster *cl = NULL;
+	struct load_stats *cpu_st = &per_cpu(cpu_load_stats, cpu);
+
+	if (!clusters_inited || !workload_detect)
+		return NOTIFY_OK;
+
+	for (i = 0; i < num_clusters; i++) {
+		if (cpumask_test_cpu(cpu, managed_clusters[i]->cpus)) {
+			cl = managed_clusters[i];
+			break;
+		}
+	}
+	if (cl == NULL)
+		return NOTIFY_OK;
+
+	if (val == CPUFREQ_POSTCHANGE) {
+		spin_lock_irqsave(&cl->perf_cl_peak_lock, flags);
+		cpu_st->freq = freq->new;
+		spin_unlock_irqrestore(&cl->perf_cl_peak_lock, flags);
+	}
+	/*
+	* Avoid deadlock in case governor notifier ran in the context
+	* of notify_work thread
+	*/
+	if (current == notify_thread)
+		return NOTIFY_OK;
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block perf_govinfo_nb = {
+	.notifier_call = perf_govinfo_notify,
+};
+static struct notifier_block perf_cputransitions_nb = {
+	.notifier_call = perf_cputrans_notify,
+};
+
+/*
+ * Attempt to offline CPUs based on their power cost.
+ * CPUs with higher power costs are offlined first.
+ */
+static int __ref rm_high_pwr_cost_cpus(struct cluster *cl)
+{
+	unsigned int cpu, i;
+	struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats();
+	struct cpu_pstate_pwr *costs;
+	unsigned int *pcpu_pwr;
+	unsigned int max_cost_cpu, max_cost;
+	int any_cpu = -1;
+
+	if (!per_cpu_info)
+		return -ENOSYS;
+
+	for_each_cpu(cpu, cl->cpus) {
+		costs = per_cpu_info[cpu].ptable;
+		if (!costs || !costs[0].freq)
+			continue;
+
+		i = 1;
+		while (costs[i].freq)
+			i++;
+
+		pcpu_pwr = &per_cpu(cpu_power_cost, cpu);
+		*pcpu_pwr = costs[i - 1].power;
+		any_cpu = (int)cpu;
+		pr_debug("msm_perf: CPU:%d Power:%u\n", cpu, *pcpu_pwr);
+	}
+
+	if (any_cpu < 0)
+		return -EAGAIN;
+
+	for (i = 0; i < cpumask_weight(cl->cpus); i++) {
+		max_cost = 0;
+		max_cost_cpu = cpumask_first(cl->cpus);
+
+		for_each_cpu(cpu, cl->cpus) {
+			pcpu_pwr = &per_cpu(cpu_power_cost, cpu);
+			if (max_cost < *pcpu_pwr) {
+				max_cost = *pcpu_pwr;
+				max_cost_cpu = cpu;
+			}
+		}
+
+		if (!cpu_online(max_cost_cpu))
+			goto end;
+
+		pr_debug("msm_perf: Offlining CPU%d Power:%d\n", max_cost_cpu,
+								max_cost);
+		cpumask_set_cpu(max_cost_cpu, cl->offlined_cpus);
+		lock_device_hotplug();
+		if (device_offline(get_cpu_device(max_cost_cpu))) {
+			cpumask_clear_cpu(max_cost_cpu, cl->offlined_cpus);
+			pr_debug("msm_perf: Offlining CPU%d failed\n",
+								max_cost_cpu);
+		}
+		unlock_device_hotplug();
+
+end:
+		pcpu_pwr = &per_cpu(cpu_power_cost, max_cost_cpu);
+		*pcpu_pwr = 0;
+		if (num_online_managed(cl->cpus) <= cl->max_cpu_request)
+			break;
+	}
+
+	if (num_online_managed(cl->cpus) > cl->max_cpu_request)
+		return -EAGAIN;
+	else
+		return 0;
+}
+
+/*
+ * try_hotplug tries to online/offline cores based on the current requirement.
+ * It loops through the currently managed CPUs and tries to online/offline
+ * them until the max_cpu_request criteria is met.
+ */
+static void __ref try_hotplug(struct cluster *data)
+{
+	unsigned int i;
+
+	if (!clusters_inited)
+		return;
+
+	pr_debug("msm_perf: Trying hotplug...%d:%d\n",
+			num_online_managed(data->cpus),	num_online_cpus());
+
+	mutex_lock(&managed_cpus_lock);
+	if (num_online_managed(data->cpus) > data->max_cpu_request) {
+		if (!rm_high_pwr_cost_cpus(data)) {
+			mutex_unlock(&managed_cpus_lock);
+			return;
+		}
+
+		/*
+		 * If power aware offlining fails due to power cost info
+		 * being unavaiable fall back to original implementation
+		 */
+		for (i = num_present_cpus() - 1; i >= 0 &&
+						i < num_present_cpus(); i--) {
+			if (!cpumask_test_cpu(i, data->cpus) ||	!cpu_online(i))
+				continue;
+
+			pr_debug("msm_perf: Offlining CPU%d\n", i);
+			cpumask_set_cpu(i, data->offlined_cpus);
+			lock_device_hotplug();
+			if (device_offline(get_cpu_device(i))) {
+				cpumask_clear_cpu(i, data->offlined_cpus);
+				pr_debug("msm_perf: Offlining CPU%d failed\n",
+									i);
+				unlock_device_hotplug();
+				continue;
+			}
+			unlock_device_hotplug();
+			if (num_online_managed(data->cpus) <=
+							data->max_cpu_request)
+				break;
+		}
+	} else {
+		for_each_cpu(i, data->cpus) {
+			if (cpu_online(i))
+				continue;
+			pr_debug("msm_perf: Onlining CPU%d\n", i);
+			lock_device_hotplug();
+			if (device_online(get_cpu_device(i))) {
+				pr_debug("msm_perf: Onlining CPU%d failed\n",
+									i);
+				unlock_device_hotplug();
+				continue;
+			}
+			unlock_device_hotplug();
+			cpumask_clear_cpu(i, data->offlined_cpus);
+			if (num_online_managed(data->cpus) >=
+							data->max_cpu_request)
+				break;
+		}
+	}
+	mutex_unlock(&managed_cpus_lock);
+}
+
+static void __ref release_cluster_control(struct cpumask *off_cpus)
+{
+	int cpu;
+
+	for_each_cpu(cpu, off_cpus) {
+		pr_debug("msm_perf: Release CPU %d\n", cpu);
+		lock_device_hotplug();
+		if (!device_online(get_cpu_device(cpu)))
+			cpumask_clear_cpu(cpu, off_cpus);
+		unlock_device_hotplug();
+	}
+}
+
+/* Work to evaluate current online CPU status and hotplug CPUs as per need*/
+static void check_cluster_status(struct work_struct *work)
+{
+	int i;
+	struct cluster *i_cl;
+
+	for (i = 0; i < num_clusters; i++) {
+		i_cl = managed_clusters[i];
+
+		if (cpumask_empty(i_cl->cpus))
+			continue;
+
+		if (i_cl->max_cpu_request < 0) {
+			if (!cpumask_empty(i_cl->offlined_cpus))
+				release_cluster_control(i_cl->offlined_cpus);
+			continue;
+		}
+
+		if (num_online_managed(i_cl->cpus) !=
+					i_cl->max_cpu_request)
+			try_hotplug(i_cl);
+	}
+}
+
+static int __ref msm_performance_cpu_callback(struct notifier_block *nfb,
+		unsigned long action, void *hcpu)
+{
+	uint32_t cpu = (uintptr_t)hcpu;
+	unsigned int i;
+	struct cluster *i_cl = NULL;
+
+	hotplug_notify(action);
+
+	if (!clusters_inited)
+		return NOTIFY_OK;
+
+	for (i = 0; i < num_clusters; i++) {
+		if (managed_clusters[i]->cpus == NULL)
+			return NOTIFY_OK;
+		if (cpumask_test_cpu(cpu, managed_clusters[i]->cpus)) {
+			i_cl = managed_clusters[i];
+			break;
+		}
+	}
+
+	if (i_cl == NULL)
+		return NOTIFY_OK;
+
+	if (action == CPU_UP_PREPARE || action == CPU_UP_PREPARE_FROZEN) {
+		/*
+		 * Prevent onlining of a managed CPU if max_cpu criteria is
+		 * already satisfied
+		 */
+		if (i_cl->offlined_cpus == NULL)
+			return NOTIFY_OK;
+		if (i_cl->max_cpu_request <=
+					num_online_managed(i_cl->cpus)) {
+			pr_debug("msm_perf: Prevent CPU%d onlining\n", cpu);
+			cpumask_set_cpu(cpu, i_cl->offlined_cpus);
+			return NOTIFY_BAD;
+		}
+		cpumask_clear_cpu(cpu, i_cl->offlined_cpus);
+
+	} else if (action == CPU_DEAD) {
+		if (i_cl->offlined_cpus == NULL)
+			return NOTIFY_OK;
+		if (cpumask_test_cpu(cpu, i_cl->offlined_cpus))
+			return NOTIFY_OK;
+		/*
+		 * Schedule a re-evaluation to check if any more CPUs can be
+		 * brought online to meet the max_cpu_request requirement. This
+		 * work is delayed to account for CPU hotplug latencies
+		 */
+		if (schedule_delayed_work(&evaluate_hotplug_work, 0)) {
+			trace_reevaluate_hotplug(cpumask_bits(i_cl->cpus)[0],
+							i_cl->max_cpu_request);
+			pr_debug("msm_perf: Re-evaluation scheduled %d\n", cpu);
+		} else {
+			pr_debug("msm_perf: Work scheduling failed %d\n", cpu);
+		}
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block __refdata msm_performance_cpu_notifier = {
+	.notifier_call = msm_performance_cpu_callback,
+};
+
+static void single_mod_exit_timer(unsigned long data)
+{
+	int i;
+	struct cluster *i_cl = NULL;
+	unsigned long flags;
+
+	if (!clusters_inited)
+		return;
+
+	for (i = 0; i < num_clusters; i++) {
+		if (cpumask_test_cpu(data,
+			managed_clusters[i]->cpus)) {
+			i_cl = managed_clusters[i];
+			break;
+		}
+	}
+
+	if (i_cl == NULL)
+		return;
+
+	spin_lock_irqsave(&i_cl->mode_lock, flags);
+	if (i_cl->mode & SINGLE) {
+		/* Disable SINGLE mode and exit since the timer expired */
+		i_cl->mode = i_cl->mode & ~SINGLE;
+		i_cl->single_enter_cycle_cnt = 0;
+		i_cl->single_exit_cycle_cnt = 0;
+		trace_single_mode_timeout(cpumask_first(i_cl->cpus),
+			i_cl->single_enter_cycles, i_cl->single_enter_cycle_cnt,
+			i_cl->single_exit_cycles, i_cl->single_exit_cycle_cnt,
+			i_cl->multi_enter_cycles, i_cl->multi_enter_cycle_cnt,
+			i_cl->multi_exit_cycles, i_cl->multi_exit_cycle_cnt,
+			i_cl->timer_rate, i_cl->mode);
+	}
+	spin_unlock_irqrestore(&i_cl->mode_lock, flags);
+	wake_up_process(notify_thread);
+}
+
+static void perf_cl_peak_mod_exit_timer(unsigned long data)
+{
+	int i;
+	struct cluster *i_cl = NULL;
+	unsigned long flags;
+
+	if (!clusters_inited)
+		return;
+
+	for (i = 0; i < num_clusters; i++) {
+		if (cpumask_test_cpu(data,
+			managed_clusters[i]->cpus)) {
+			i_cl = managed_clusters[i];
+			break;
+		}
+	}
+
+	if (i_cl == NULL)
+		return;
+
+	spin_lock_irqsave(&i_cl->perf_cl_peak_lock, flags);
+	if (i_cl->perf_cl_peak & PERF_CL_PEAK) {
+		/* Disable PERF_CL_PEAK mode and exit since the timer expired */
+		i_cl->perf_cl_peak = i_cl->perf_cl_peak & ~PERF_CL_PEAK;
+		i_cl->perf_cl_peak_enter_cycle_cnt = 0;
+		i_cl->perf_cl_peak_exit_cycle_cnt = 0;
+	}
+	spin_unlock_irqrestore(&i_cl->perf_cl_peak_lock, flags);
+	wake_up_process(notify_thread);
+}
+
+static int init_cluster_control(void)
+{
+	unsigned int i;
+	int ret = 0;
+	struct kobject *module_kobj;
+
+	managed_clusters = kcalloc(num_clusters, sizeof(struct cluster *),
+								GFP_KERNEL);
+	if (!managed_clusters)
+		return -ENOMEM;
+	for (i = 0; i < num_clusters; i++) {
+		managed_clusters[i] = kcalloc(1, sizeof(struct cluster),
+								GFP_KERNEL);
+		if (!managed_clusters[i]) {
+			pr_err("msm_perf:Cluster %u mem alloc failed\n", i);
+			ret = -ENOMEM;
+			goto error;
+		}
+		if (!alloc_cpumask_var(&managed_clusters[i]->cpus,
+		     GFP_KERNEL)) {
+			pr_err("msm_perf:Cluster %u cpu alloc failed\n",
+			       i);
+			ret = -ENOMEM;
+			goto error;
+		}
+		if (!alloc_cpumask_var(&managed_clusters[i]->offlined_cpus,
+		     GFP_KERNEL)) {
+			pr_err("msm_perf:Cluster %u off_cpus alloc failed\n",
+			       i);
+			ret = -ENOMEM;
+			goto error;
+		}
+
+		managed_clusters[i]->max_cpu_request = -1;
+		managed_clusters[i]->single_enter_load = DEF_SINGLE_ENT;
+		managed_clusters[i]->single_exit_load = DEF_SINGLE_EX;
+		managed_clusters[i]->single_enter_cycles
+						= DEF_SINGLE_ENTER_CYCLE;
+		managed_clusters[i]->single_exit_cycles
+						= DEF_SINGLE_EXIT_CYCLE;
+		managed_clusters[i]->pcpu_multi_enter_load
+						= DEF_PCPU_MULTI_ENT;
+		managed_clusters[i]->pcpu_multi_exit_load = DEF_PCPU_MULTI_EX;
+		managed_clusters[i]->multi_enter_cycles = DEF_MULTI_ENTER_CYCLE;
+		managed_clusters[i]->multi_exit_cycles = DEF_MULTI_EXIT_CYCLE;
+		managed_clusters[i]->perf_cl_peak_enter_load =
+						DEF_PERF_CL_PEAK_ENT;
+		managed_clusters[i]->perf_cl_peak_exit_load =
+						DEF_PERF_CL_PEAK_EX;
+		managed_clusters[i]->perf_cl_peak_enter_cycles =
+						DEF_PERF_CL_PEAK_ENTER_CYCLE;
+		managed_clusters[i]->perf_cl_peak_exit_cycles =
+						DEF_PERF_CL_PEAK_EXIT_CYCLE;
+
+		/* Initialize trigger threshold */
+		thr.perf_cl_trigger_threshold = CLUSTER_1_THRESHOLD_FREQ;
+		thr.pwr_cl_trigger_threshold = CLUSTER_0_THRESHOLD_FREQ;
+		thr.ip_evt_threshold = INPUT_EVENT_CNT_THRESHOLD;
+		spin_lock_init(&(managed_clusters[i]->iowait_lock));
+		spin_lock_init(&(managed_clusters[i]->mode_lock));
+		spin_lock_init(&(managed_clusters[i]->timer_lock));
+		spin_lock_init(&(managed_clusters[i]->perf_cl_peak_lock));
+		init_timer(&managed_clusters[i]->mode_exit_timer);
+		managed_clusters[i]->mode_exit_timer.function =
+			single_mod_exit_timer;
+		init_timer(&managed_clusters[i]->perf_cl_peak_mode_exit_timer);
+		managed_clusters[i]->perf_cl_peak_mode_exit_timer.function =
+			perf_cl_peak_mod_exit_timer;
+
+	}
+	ip_evts = kcalloc(1, sizeof(struct input_events), GFP_KERNEL);
+	if (!ip_evts) {
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	INIT_DELAYED_WORK(&evaluate_hotplug_work, check_cluster_status);
+	mutex_init(&managed_cpus_lock);
+
+	module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+	if (!module_kobj) {
+		pr_err("msm_perf: Couldn't find module kobject\n");
+		ret = -ENOENT;
+		goto error;
+	}
+	mode_kobj = kobject_create_and_add("workload_modes", module_kobj);
+	if (!mode_kobj) {
+		pr_err("msm_perf: Failed to add mode_kobj\n");
+		ret = -ENOMEM;
+		kobject_put(module_kobj);
+		goto error;
+	}
+	ret = sysfs_create_group(mode_kobj, &attr_group);
+	if (ret) {
+		pr_err("msm_perf: Failed to create sysfs\n");
+		kobject_put(module_kobj);
+		kobject_put(mode_kobj);
+		goto error;
+	}
+	notify_thread = kthread_run(notify_userspace, NULL, "wrkld_notify");
+	clusters_inited = true;
+
+	return 0;
+
+error:
+	for (i = 0; i < num_clusters; i++) {
+		if (!managed_clusters[i])
+			break;
+		if (managed_clusters[i]->offlined_cpus)
+			free_cpumask_var(managed_clusters[i]->offlined_cpus);
+		if (managed_clusters[i]->cpus)
+			free_cpumask_var(managed_clusters[i]->cpus);
+		kfree(managed_clusters[i]);
+	}
+	kfree(managed_clusters);
+	return ret;
+}
+
+static int init_events_group(void)
+{
+	int ret;
+	struct kobject *module_kobj;
+
+	module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+	if (!module_kobj) {
+		pr_err("msm_perf: Couldn't find module kobject\n");
+		return -ENOENT;
+	}
+
+	events_kobj = kobject_create_and_add("events", module_kobj);
+	if (!events_kobj) {
+		pr_err("msm_perf: Failed to add events_kobj\n");
+		return -ENOMEM;
+	}
+
+	ret = sysfs_create_group(events_kobj, &events_attr_group);
+	if (ret) {
+		pr_err("msm_perf: Failed to create sysfs\n");
+		return ret;
+	}
+
+	spin_lock_init(&(events_group.cpu_hotplug_lock));
+	events_notify_thread = kthread_run(events_notify_userspace,
+					NULL, "msm_perf:events_notify");
+	if (IS_ERR(events_notify_thread))
+		return PTR_ERR(events_notify_thread);
+
+	events_group.init_success = true;
+
+	return 0;
+}
+
+static int __init msm_performance_init(void)
+{
+	unsigned int cpu;
+
+	cpufreq_register_notifier(&perf_cpufreq_nb, CPUFREQ_POLICY_NOTIFIER);
+	cpufreq_register_notifier(&perf_govinfo_nb, CPUFREQ_GOVINFO_NOTIFIER);
+	cpufreq_register_notifier(&perf_cputransitions_nb,
+					CPUFREQ_TRANSITION_NOTIFIER);
+
+	for_each_present_cpu(cpu)
+		per_cpu(cpu_stats, cpu).max = UINT_MAX;
+
+	register_cpu_notifier(&msm_performance_cpu_notifier);
+
+	init_events_group();
+
+	return 0;
+}
+late_initcall(msm_performance_init);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/msm_rq_stats.c	2019-01-22 16:16:26.663275022 +0100
@@ -0,0 +1,396 @@
+/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/hrtimer.h>
+#include <linux/cpu.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/notifier.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/rq_stats.h>
+#include <linux/cpufreq.h>
+#include <linux/kernel_stat.h>
+#include <linux/tick.h>
+#include <asm/smp_plat.h>
+#include <linux/suspend.h>
+
+#define MAX_LONG_SIZE 24
+#define DEFAULT_RQ_POLL_JIFFIES 1
+#define DEFAULT_DEF_TIMER_JIFFIES 5
+
+struct notifier_block freq_transition;
+struct notifier_block cpu_hotplug;
+
+struct cpu_load_data {
+	cputime64_t prev_cpu_idle;
+	cputime64_t prev_cpu_wall;
+	unsigned int avg_load_maxfreq;
+	unsigned int samples;
+	unsigned int window_size;
+	unsigned int cur_freq;
+	unsigned int policy_max;
+	cpumask_var_t related_cpus;
+	struct mutex cpu_load_mutex;
+};
+
+static DEFINE_PER_CPU(struct cpu_load_data, cpuload);
+
+
+static int update_average_load(unsigned int freq, unsigned int cpu)
+{
+
+	struct cpu_load_data *pcpu = &per_cpu(cpuload, cpu);
+	cputime64_t cur_wall_time, cur_idle_time;
+	unsigned int idle_time, wall_time;
+	unsigned int cur_load, load_at_max_freq;
+
+	cur_idle_time = get_cpu_idle_time(cpu, &cur_wall_time, 0);
+
+	wall_time = (unsigned int) (cur_wall_time - pcpu->prev_cpu_wall);
+	pcpu->prev_cpu_wall = cur_wall_time;
+
+	idle_time = (unsigned int) (cur_idle_time - pcpu->prev_cpu_idle);
+	pcpu->prev_cpu_idle = cur_idle_time;
+
+
+	if (unlikely(wall_time <= 0 || wall_time < idle_time))
+		return 0;
+
+	cur_load = 100 * (wall_time - idle_time) / wall_time;
+
+	/* Calculate the scaled load across CPU */
+	load_at_max_freq = (cur_load * freq) / pcpu->policy_max;
+
+	if (!pcpu->avg_load_maxfreq) {
+		/* This is the first sample in this window*/
+		pcpu->avg_load_maxfreq = load_at_max_freq;
+		pcpu->window_size = wall_time;
+	} else {
+		/*
+		 * The is already a sample available in this window.
+		 * Compute weighted average with prev entry, so that we get
+		 * the precise weighted load.
+		 */
+		pcpu->avg_load_maxfreq =
+			((pcpu->avg_load_maxfreq * pcpu->window_size) +
+			(load_at_max_freq * wall_time)) /
+			(wall_time + pcpu->window_size);
+
+		pcpu->window_size += wall_time;
+	}
+
+	return 0;
+}
+
+static unsigned int report_load_at_max_freq(void)
+{
+	int cpu;
+	struct cpu_load_data *pcpu;
+	unsigned int total_load = 0;
+
+	for_each_online_cpu(cpu) {
+		pcpu = &per_cpu(cpuload, cpu);
+		mutex_lock(&pcpu->cpu_load_mutex);
+		update_average_load(pcpu->cur_freq, cpu);
+		total_load += pcpu->avg_load_maxfreq;
+		pcpu->avg_load_maxfreq = 0;
+		mutex_unlock(&pcpu->cpu_load_mutex);
+	}
+	return total_load;
+}
+
+static int cpufreq_transition_handler(struct notifier_block *nb,
+			unsigned long val, void *data)
+{
+	struct cpufreq_freqs *freqs = data;
+	struct cpu_load_data *this_cpu = &per_cpu(cpuload, freqs->cpu);
+	int j;
+
+	switch (val) {
+	case CPUFREQ_POSTCHANGE:
+		for_each_cpu(j, this_cpu->related_cpus) {
+			struct cpu_load_data *pcpu = &per_cpu(cpuload, j);
+
+			mutex_lock(&pcpu->cpu_load_mutex);
+			update_average_load(freqs->old, j);
+			pcpu->cur_freq = freqs->new;
+			mutex_unlock(&pcpu->cpu_load_mutex);
+		}
+		break;
+	}
+	return 0;
+}
+
+static void update_related_cpus(void)
+{
+	unsigned cpu;
+
+	for_each_cpu(cpu, cpu_online_mask) {
+		struct cpu_load_data *this_cpu = &per_cpu(cpuload, cpu);
+		struct cpufreq_policy cpu_policy;
+
+		cpufreq_get_policy(&cpu_policy, cpu);
+		cpumask_copy(this_cpu->related_cpus, cpu_policy.cpus);
+	}
+}
+static int cpu_hotplug_handler(struct notifier_block *nb,
+			unsigned long val, void *data)
+{
+	unsigned int cpu = (unsigned long)data;
+	struct cpu_load_data *this_cpu = &per_cpu(cpuload, cpu);
+
+	switch (val) {
+	case CPU_ONLINE:
+		if (!this_cpu->cur_freq)
+			this_cpu->cur_freq = cpufreq_quick_get(cpu);
+		update_related_cpus();
+		/* fall through */
+	case CPU_ONLINE_FROZEN:
+		this_cpu->avg_load_maxfreq = 0;
+	}
+
+	return NOTIFY_OK;
+}
+
+static int system_suspend_handler(struct notifier_block *nb,
+				unsigned long val, void *data)
+{
+	switch (val) {
+	case PM_POST_HIBERNATION:
+	case PM_POST_SUSPEND:
+	case PM_POST_RESTORE:
+		rq_info.hotplug_disabled = 0;
+		break;
+	case PM_HIBERNATION_PREPARE:
+	case PM_SUSPEND_PREPARE:
+		rq_info.hotplug_disabled = 1;
+		break;
+	default:
+		return NOTIFY_DONE;
+	}
+	return NOTIFY_OK;
+}
+
+
+static ssize_t hotplug_disable_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	unsigned int val = rq_info.hotplug_disabled;
+
+	return snprintf(buf, MAX_LONG_SIZE, "%d\n", val);
+}
+
+static struct kobj_attribute hotplug_disabled_attr = __ATTR_RO(hotplug_disable);
+
+static void def_work_fn(struct work_struct *work)
+{
+	/* Notify polling threads on change of value */
+	sysfs_notify(rq_info.kobj, NULL, "def_timer_ms");
+}
+
+static ssize_t run_queue_avg_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	unsigned int val = 0;
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(&rq_lock, flags);
+	/* rq avg currently available only on one core */
+	val = rq_info.rq_avg;
+	rq_info.rq_avg = 0;
+	spin_unlock_irqrestore(&rq_lock, flags);
+
+	return snprintf(buf, PAGE_SIZE, "%d.%d\n", val/10, val%10);
+}
+
+static struct kobj_attribute run_queue_avg_attr = __ATTR_RO(run_queue_avg);
+
+static ssize_t show_run_queue_poll_ms(struct kobject *kobj,
+				      struct kobj_attribute *attr, char *buf)
+{
+	int ret = 0;
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(&rq_lock, flags);
+	ret = snprintf(buf, MAX_LONG_SIZE, "%u\n",
+		       jiffies_to_msecs(rq_info.rq_poll_jiffies));
+	spin_unlock_irqrestore(&rq_lock, flags);
+
+	return ret;
+}
+
+static ssize_t store_run_queue_poll_ms(struct kobject *kobj,
+				       struct kobj_attribute *attr,
+				       const char *buf, size_t count)
+{
+	unsigned int val = 0;
+	unsigned long flags = 0;
+	static DEFINE_MUTEX(lock_poll_ms);
+
+	mutex_lock(&lock_poll_ms);
+
+	spin_lock_irqsave(&rq_lock, flags);
+	if (kstrtouint(buf, 0, &val))
+		count = -EINVAL;
+	else
+		rq_info.rq_poll_jiffies = msecs_to_jiffies(val);
+	spin_unlock_irqrestore(&rq_lock, flags);
+
+	mutex_unlock(&lock_poll_ms);
+
+	return count;
+}
+
+static struct kobj_attribute run_queue_poll_ms_attr =
+	__ATTR(run_queue_poll_ms, S_IWUSR | S_IRUSR, show_run_queue_poll_ms,
+			store_run_queue_poll_ms);
+
+static ssize_t show_def_timer_ms(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	int64_t diff;
+	unsigned int udiff;
+
+	diff = ktime_to_ns(ktime_get()) - rq_info.def_start_time;
+	do_div(diff, 1000 * 1000);
+	udiff = (unsigned int) diff;
+
+	return snprintf(buf, MAX_LONG_SIZE, "%u\n", udiff);
+}
+
+static ssize_t store_def_timer_ms(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int val = 0;
+
+	if (kstrtouint(buf, 0, &val))
+		return -EINVAL;
+
+	rq_info.def_timer_jiffies = msecs_to_jiffies(val);
+
+	rq_info.def_start_time = ktime_to_ns(ktime_get());
+	return count;
+}
+
+static struct kobj_attribute def_timer_ms_attr =
+	__ATTR(def_timer_ms, S_IWUSR | S_IRUSR, show_def_timer_ms,
+			store_def_timer_ms);
+
+static ssize_t show_cpu_normalized_load(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	return snprintf(buf, MAX_LONG_SIZE, "%u\n", report_load_at_max_freq());
+}
+
+static struct kobj_attribute cpu_normalized_load_attr =
+	__ATTR(cpu_normalized_load, S_IWUSR | S_IRUSR, show_cpu_normalized_load,
+			NULL);
+
+static struct attribute *rq_attrs[] = {
+	&cpu_normalized_load_attr.attr,
+	&def_timer_ms_attr.attr,
+	&run_queue_avg_attr.attr,
+	&run_queue_poll_ms_attr.attr,
+	&hotplug_disabled_attr.attr,
+	NULL,
+};
+
+static struct attribute_group rq_attr_group = {
+	.attrs = rq_attrs,
+};
+
+static int init_rq_attribs(void)
+{
+	int err;
+
+	rq_info.rq_avg = 0;
+	rq_info.attr_group = &rq_attr_group;
+
+	/* Create /sys/devices/system/cpu/cpu0/rq-stats/... */
+	rq_info.kobj = kobject_create_and_add("rq-stats",
+			&get_cpu_device(0)->kobj);
+	if (!rq_info.kobj)
+		return -ENOMEM;
+
+	err = sysfs_create_group(rq_info.kobj, rq_info.attr_group);
+	if (err)
+		kobject_put(rq_info.kobj);
+	else
+		kobject_uevent(rq_info.kobj, KOBJ_ADD);
+
+	return err;
+}
+
+static int __init msm_rq_stats_init(void)
+{
+	int ret;
+	int i;
+	struct cpufreq_policy cpu_policy;
+
+#ifndef CONFIG_SMP
+	/* Bail out if this is not an SMP Target */
+	rq_info.init = 0;
+	return -ENOSYS;
+#endif
+
+	rq_wq = create_singlethread_workqueue("rq_stats");
+	BUG_ON(!rq_wq);
+	INIT_WORK(&rq_info.def_timer_work, def_work_fn);
+	spin_lock_init(&rq_lock);
+	rq_info.rq_poll_jiffies = DEFAULT_RQ_POLL_JIFFIES;
+	rq_info.def_timer_jiffies = DEFAULT_DEF_TIMER_JIFFIES;
+	rq_info.rq_poll_last_jiffy = 0;
+	rq_info.def_timer_last_jiffy = 0;
+	rq_info.hotplug_disabled = 0;
+	ret = init_rq_attribs();
+
+	rq_info.init = 1;
+
+	for_each_possible_cpu(i) {
+		struct cpu_load_data *pcpu = &per_cpu(cpuload, i);
+
+		mutex_init(&pcpu->cpu_load_mutex);
+		cpufreq_get_policy(&cpu_policy, i);
+		pcpu->policy_max = cpu_policy.cpuinfo.max_freq;
+		if (cpu_online(i))
+			pcpu->cur_freq = cpufreq_quick_get(i);
+		cpumask_copy(pcpu->related_cpus, cpu_policy.cpus);
+	}
+	freq_transition.notifier_call = cpufreq_transition_handler;
+	cpu_hotplug.notifier_call = cpu_hotplug_handler;
+	cpufreq_register_notifier(&freq_transition,
+					CPUFREQ_TRANSITION_NOTIFIER);
+	register_hotcpu_notifier(&cpu_hotplug);
+
+	return ret;
+}
+late_initcall(msm_rq_stats_init);
+
+static int __init msm_rq_stats_early_init(void)
+{
+#ifndef CONFIG_SMP
+	/* Bail out if this is not an SMP Target */
+	rq_info.init = 0;
+	return -ENOSYS;
+#endif
+
+	pm_notifier(system_suspend_handler, 0);
+	return 0;
+}
+core_initcall(msm_rq_stats_early_init);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/msm_smd.c	2019-10-29 09:26:24.817214667 +0100
@@ -0,0 +1,3250 @@
+/* drivers/soc/qcom/msm_smd.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/termios.h>
+#include <linux/ctype.h>
+#include <linux/remote_spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/kfifo.h>
+#include <linux/pm.h>
+#include <linux/notifier.h>
+#include <linux/suspend.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/ipc_logging.h>
+
+#include <soc/qcom/ramdump.h>
+#include <soc/qcom/smd.h>
+#include <soc/qcom/smem.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/subsystem_restart.h>
+
+#include "smd_private.h"
+#include "smem_private.h"
+
+#define SMSM_SNAPSHOT_CNT 64
+#define SMSM_SNAPSHOT_SIZE ((SMSM_NUM_ENTRIES + 1) * 4 + sizeof(uint64_t))
+#define RSPIN_INIT_WAIT_MS 1000
+#define SMD_FIFO_FULL_RESERVE 4
+#define SMD_FIFO_ADDR_ALIGN_BYTES 3
+
+uint32_t SMSM_NUM_ENTRIES = 8;
+uint32_t SMSM_NUM_HOSTS = 3;
+
+/* Legacy SMSM interrupt notifications */
+#define LEGACY_MODEM_SMSM_MASK (SMSM_RESET | SMSM_INIT | SMSM_SMDINIT)
+
+struct smsm_shared_info {
+	uint32_t *state;
+	uint32_t *intr_mask;
+	uint32_t *intr_mux;
+};
+
+static struct smsm_shared_info smsm_info;
+static struct kfifo smsm_snapshot_fifo;
+static struct wakeup_source smsm_snapshot_ws;
+static int smsm_snapshot_count;
+static DEFINE_SPINLOCK(smsm_snapshot_count_lock);
+
+struct smsm_size_info_type {
+	uint32_t num_hosts;
+	uint32_t num_entries;
+	uint32_t reserved0;
+	uint32_t reserved1;
+};
+
+struct smsm_state_cb_info {
+	struct list_head cb_list;
+	uint32_t mask;
+	void *data;
+	void (*notify)(void *data, uint32_t old_state, uint32_t new_state);
+};
+
+struct smsm_state_info {
+	struct list_head callbacks;
+	uint32_t last_value;
+	uint32_t intr_mask_set;
+	uint32_t intr_mask_clear;
+};
+
+static irqreturn_t smsm_irq_handler(int irq, void *data);
+
+/*
+ * Interrupt configuration consists of static configuration for the supported
+ * processors that is done here along with interrupt configuration that is
+ * added by the separate initialization modules (device tree, platform data, or
+ * hard coded).
+ */
+static struct interrupt_config private_intr_config[NUM_SMD_SUBSYSTEMS] = {
+	[SMD_MODEM] = {
+		.smd.irq_handler = smd_modem_irq_handler,
+		.smsm.irq_handler = smsm_modem_irq_handler,
+	},
+	[SMD_Q6] = {
+		.smd.irq_handler = smd_dsp_irq_handler,
+		.smsm.irq_handler = smsm_dsp_irq_handler,
+	},
+	[SMD_DSPS] = {
+		.smd.irq_handler = smd_dsps_irq_handler,
+		.smsm.irq_handler = smsm_dsps_irq_handler,
+	},
+	[SMD_WCNSS] = {
+		.smd.irq_handler = smd_wcnss_irq_handler,
+		.smsm.irq_handler = smsm_wcnss_irq_handler,
+	},
+	[SMD_MODEM_Q6_FW] = {
+		.smd.irq_handler = smd_modemfw_irq_handler,
+		.smsm.irq_handler = NULL, /* does not support smsm */
+	},
+	[SMD_RPM] = {
+		.smd.irq_handler = smd_rpm_irq_handler,
+		.smsm.irq_handler = NULL, /* does not support smsm */
+	},
+};
+
+struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
+
+#define SMSM_STATE_ADDR(entry)           (smsm_info.state + entry)
+#define SMSM_INTR_MASK_ADDR(entry, host) (smsm_info.intr_mask + \
+					  entry * SMSM_NUM_HOSTS + host)
+#define SMSM_INTR_MUX_ADDR(entry)        (smsm_info.intr_mux + entry)
+
+int msm_smd_debug_mask = MSM_SMD_POWER_INFO | MSM_SMD_INFO |
+							MSM_SMSM_POWER_INFO;
+module_param_named(debug_mask, msm_smd_debug_mask,
+		   int, S_IRUGO | S_IWUSR | S_IWGRP);
+void *smd_log_ctx;
+void *smsm_log_ctx;
+#define NUM_LOG_PAGES 4
+
+#define IPC_LOG_SMD(level, x...) do { \
+	if (smd_log_ctx) \
+		ipc_log_string(smd_log_ctx, x); \
+	else \
+		printk(level x); \
+	} while (0)
+
+#define IPC_LOG_SMSM(level, x...) do { \
+	if (smsm_log_ctx) \
+		ipc_log_string(smsm_log_ctx, x); \
+	else \
+		printk(level x); \
+	} while (0)
+
+#if defined(CONFIG_MSM_SMD_DEBUG)
+#define SMD_DBG(x...) do {				\
+		if (msm_smd_debug_mask & MSM_SMD_DEBUG) \
+			IPC_LOG_SMD(KERN_DEBUG, x);	\
+	} while (0)
+
+#define SMSM_DBG(x...) do {					\
+		if (msm_smd_debug_mask & MSM_SMSM_DEBUG)	\
+			IPC_LOG_SMSM(KERN_DEBUG, x);		\
+	} while (0)
+
+#define SMD_INFO(x...) do {				\
+		if (msm_smd_debug_mask & MSM_SMD_INFO)	\
+			IPC_LOG_SMD(KERN_INFO, x);	\
+	} while (0)
+
+#define SMSM_INFO(x...) do {				\
+		if (msm_smd_debug_mask & MSM_SMSM_INFO) \
+			IPC_LOG_SMSM(KERN_INFO, x);	\
+	} while (0)
+
+#define SMD_POWER_INFO(x...) do {				\
+		if (msm_smd_debug_mask & MSM_SMD_POWER_INFO)	\
+			IPC_LOG_SMD(KERN_INFO, x);		\
+	} while (0)
+
+#define SMSM_POWER_INFO(x...) do {				\
+		if (msm_smd_debug_mask & MSM_SMSM_POWER_INFO)	\
+			IPC_LOG_SMSM(KERN_INFO, x);		\
+	} while (0)
+#else
+#define SMD_DBG(x...) do { } while (0)
+#define SMSM_DBG(x...) do { } while (0)
+#define SMD_INFO(x...) do { } while (0)
+#define SMSM_INFO(x...) do { } while (0)
+#define SMD_POWER_INFO(x...) do { } while (0)
+#define SMSM_POWER_INFO(x...) do { } while (0)
+#endif
+
+static void smd_fake_irq_handler(unsigned long arg);
+static void smsm_cb_snapshot(uint32_t use_wakeup_source);
+
+static struct workqueue_struct *smsm_cb_wq;
+static void notify_smsm_cb_clients_worker(struct work_struct *work);
+static DECLARE_WORK(smsm_cb_work, notify_smsm_cb_clients_worker);
+static DEFINE_MUTEX(smsm_lock);
+static struct smsm_state_info *smsm_states;
+
+static int smd_stream_write_avail(struct smd_channel *ch);
+static int smd_stream_read_avail(struct smd_channel *ch);
+
+static bool pid_is_on_edge(uint32_t edge_num, unsigned pid);
+
+static inline void smd_write_intr(unsigned int val, void __iomem *addr)
+{
+	wmb();
+	__raw_writel(val, addr);
+}
+
+/**
+ * smd_memcpy_to_fifo() - copy to SMD channel FIFO
+ * @dest: Destination address
+ * @src: Source address
+ * @num_bytes: Number of bytes to copy
+ *
+ * @return: Address of destination
+ *
+ * This function copies num_bytes from src to dest. This is used as the memcpy
+ * function to copy data to SMD FIFO in case the SMD FIFO is naturally aligned.
+ */
+static void *smd_memcpy_to_fifo(void *dest, const void *src, size_t num_bytes)
+{
+
+	memcpy_toio(dest, src, num_bytes);
+	return dest;
+}
+
+/**
+ * smd_memcpy_from_fifo() - copy from SMD channel FIFO
+ * @dest: Destination address
+ * @src: Source address
+ * @num_bytes: Number of bytes to copy
+ *
+ * @return: Address of destination
+ *
+ * This function copies num_bytes from src to dest. This is used as the memcpy
+ * function to copy data from SMD FIFO in case the SMD FIFO is naturally
+ * aligned.
+ */
+static void *smd_memcpy_from_fifo(void *dest, const void *src, size_t num_bytes)
+{
+	memcpy_fromio(dest, src, num_bytes);
+	return dest;
+}
+
+/**
+ * smd_memcpy32_to_fifo() - Copy to SMD channel FIFO
+ *
+ * @dest: Destination address
+ * @src: Source address
+ * @num_bytes: Number of bytes to copy
+ *
+ * @return: On Success, address of destination
+ *
+ * This function copies num_bytes data from src to dest. This is used as the
+ * memcpy function to copy data to SMD FIFO in case the SMD FIFO is 4 byte
+ * aligned.
+ */
+static void *smd_memcpy32_to_fifo(void *dest, const void *src, size_t num_bytes)
+{
+	uint32_t *dest_local = (uint32_t *)dest;
+	uint32_t *src_local = (uint32_t *)src;
+
+	BUG_ON(num_bytes & SMD_FIFO_ADDR_ALIGN_BYTES);
+	BUG_ON(!dest_local ||
+			((uintptr_t)dest_local & SMD_FIFO_ADDR_ALIGN_BYTES));
+	BUG_ON(!src_local ||
+			((uintptr_t)src_local & SMD_FIFO_ADDR_ALIGN_BYTES));
+	num_bytes /= sizeof(uint32_t);
+
+	while (num_bytes--)
+		__raw_writel_no_log(*src_local++, dest_local++);
+
+	return dest;
+}
+
+/**
+ * smd_memcpy32_from_fifo() - Copy from SMD channel FIFO
+ * @dest: Destination address
+ * @src: Source address
+ * @num_bytes: Number of bytes to copy
+ *
+ * @return: On Success, destination address
+ *
+ * This function copies num_bytes data from SMD FIFO to dest. This is used as
+ * the memcpy function to copy data from SMD FIFO in case the SMD FIFO is 4 byte
+ * aligned.
+ */
+static void *smd_memcpy32_from_fifo(void *dest, const void *src,
+						size_t num_bytes)
+{
+
+	uint32_t *dest_local = (uint32_t *)dest;
+	uint32_t *src_local = (uint32_t *)src;
+
+	BUG_ON(num_bytes & SMD_FIFO_ADDR_ALIGN_BYTES);
+	BUG_ON(!dest_local ||
+			((uintptr_t)dest_local & SMD_FIFO_ADDR_ALIGN_BYTES));
+	BUG_ON(!src_local ||
+			((uintptr_t)src_local & SMD_FIFO_ADDR_ALIGN_BYTES));
+	num_bytes /= sizeof(uint32_t);
+
+	while (num_bytes--)
+		*dest_local++ = __raw_readl_no_log(src_local++);
+
+	return dest;
+}
+
+static inline void log_notify(uint32_t subsystem, smd_channel_t *ch)
+{
+	const char *subsys = smd_edge_to_subsystem(subsystem);
+
+	(void) subsys;
+
+	if (!ch)
+		SMD_POWER_INFO("Apps->%s\n", subsys);
+	else
+		SMD_POWER_INFO(
+			"Apps->%s ch%d '%s': tx%d/rx%d %dr/%dw : %dr/%dw\n",
+			subsys, ch->n, ch->name,
+			ch->fifo_size -
+				(smd_stream_write_avail(ch) + 1),
+			smd_stream_read_avail(ch),
+			ch->half_ch->get_tail(ch->send),
+			ch->half_ch->get_head(ch->send),
+			ch->half_ch->get_tail(ch->recv),
+			ch->half_ch->get_head(ch->recv)
+			);
+}
+
+static inline void notify_modem_smd(smd_channel_t *ch)
+{
+	static const struct interrupt_config_item *intr
+	   = &private_intr_config[SMD_MODEM].smd;
+
+	log_notify(SMD_APPS_MODEM, ch);
+	if (intr->out_base) {
+		++interrupt_stats[SMD_MODEM].smd_out_count;
+		smd_write_intr(intr->out_bit_pos,
+		intr->out_base + intr->out_offset);
+	}
+}
+
+static inline void notify_dsp_smd(smd_channel_t *ch)
+{
+	static const struct interrupt_config_item *intr
+		= &private_intr_config[SMD_Q6].smd;
+
+	log_notify(SMD_APPS_QDSP, ch);
+	if (intr->out_base) {
+		++interrupt_stats[SMD_Q6].smd_out_count;
+		smd_write_intr(intr->out_bit_pos,
+		intr->out_base + intr->out_offset);
+	}
+}
+
+static inline void notify_dsps_smd(smd_channel_t *ch)
+{
+	static const struct interrupt_config_item *intr
+		= &private_intr_config[SMD_DSPS].smd;
+
+	log_notify(SMD_APPS_DSPS, ch);
+	if (intr->out_base) {
+		++interrupt_stats[SMD_DSPS].smd_out_count;
+		smd_write_intr(intr->out_bit_pos,
+		intr->out_base + intr->out_offset);
+	}
+}
+
+static inline void notify_wcnss_smd(struct smd_channel *ch)
+{
+	static const struct interrupt_config_item *intr
+		= &private_intr_config[SMD_WCNSS].smd;
+
+	log_notify(SMD_APPS_WCNSS, ch);
+	if (intr->out_base) {
+		++interrupt_stats[SMD_WCNSS].smd_out_count;
+		smd_write_intr(intr->out_bit_pos,
+		intr->out_base + intr->out_offset);
+	}
+}
+
+static inline void notify_modemfw_smd(smd_channel_t *ch)
+{
+	static const struct interrupt_config_item *intr
+		= &private_intr_config[SMD_MODEM_Q6_FW].smd;
+
+	log_notify(SMD_APPS_Q6FW, ch);
+	if (intr->out_base) {
+		++interrupt_stats[SMD_MODEM_Q6_FW].smd_out_count;
+		smd_write_intr(intr->out_bit_pos,
+		intr->out_base + intr->out_offset);
+	}
+}
+
+static inline void notify_rpm_smd(smd_channel_t *ch)
+{
+	static const struct interrupt_config_item *intr
+		= &private_intr_config[SMD_RPM].smd;
+
+	if (intr->out_base) {
+		log_notify(SMD_APPS_RPM, ch);
+		++interrupt_stats[SMD_RPM].smd_out_count;
+		smd_write_intr(intr->out_bit_pos,
+		intr->out_base + intr->out_offset);
+	}
+}
+
+static inline void notify_modem_smsm(void)
+{
+	static const struct interrupt_config_item *intr
+		= &private_intr_config[SMD_MODEM].smsm;
+
+	SMSM_POWER_INFO("SMSM Apps->%s", "MODEM");
+
+	if (intr->out_base) {
+		++interrupt_stats[SMD_MODEM].smsm_out_count;
+		smd_write_intr(intr->out_bit_pos,
+		intr->out_base + intr->out_offset);
+	}
+}
+
+static inline void notify_dsp_smsm(void)
+{
+	static const struct interrupt_config_item *intr
+		= &private_intr_config[SMD_Q6].smsm;
+
+	SMSM_POWER_INFO("SMSM Apps->%s", "ADSP");
+
+	if (intr->out_base) {
+		++interrupt_stats[SMD_Q6].smsm_out_count;
+		smd_write_intr(intr->out_bit_pos,
+		intr->out_base + intr->out_offset);
+	}
+}
+
+static inline void notify_dsps_smsm(void)
+{
+	static const struct interrupt_config_item *intr
+		= &private_intr_config[SMD_DSPS].smsm;
+
+	SMSM_POWER_INFO("SMSM Apps->%s", "DSPS");
+
+	if (intr->out_base) {
+		++interrupt_stats[SMD_DSPS].smsm_out_count;
+		smd_write_intr(intr->out_bit_pos,
+		intr->out_base + intr->out_offset);
+	}
+}
+
+static inline void notify_wcnss_smsm(void)
+{
+	static const struct interrupt_config_item *intr
+		= &private_intr_config[SMD_WCNSS].smsm;
+
+	SMSM_POWER_INFO("SMSM Apps->%s", "WCNSS");
+
+	if (intr->out_base) {
+		++interrupt_stats[SMD_WCNSS].smsm_out_count;
+		smd_write_intr(intr->out_bit_pos,
+		intr->out_base + intr->out_offset);
+	}
+}
+
+static void notify_other_smsm(uint32_t smsm_entry, uint32_t notify_mask)
+{
+	if (smsm_info.intr_mask &&
+	    (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_MODEM))
+				& notify_mask))
+		notify_modem_smsm();
+
+	if (smsm_info.intr_mask &&
+	    (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_Q6))
+				& notify_mask))
+		notify_dsp_smsm();
+
+	if (smsm_info.intr_mask &&
+	    (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_WCNSS))
+				& notify_mask)) {
+		notify_wcnss_smsm();
+	}
+
+	if (smsm_info.intr_mask &&
+	    (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_DSPS))
+				& notify_mask)) {
+		notify_dsps_smsm();
+	}
+
+	if (smsm_info.intr_mask &&
+	    (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS))
+				& notify_mask)) {
+		smsm_cb_snapshot(1);
+	}
+}
+
+static int smsm_pm_notifier(struct notifier_block *nb,
+				unsigned long event, void *unused)
+{
+	switch (event) {
+	case PM_SUSPEND_PREPARE:
+		smsm_change_state(SMSM_APPS_STATE, SMSM_PROC_AWAKE, 0);
+		break;
+
+	case PM_POST_SUSPEND:
+		smsm_change_state(SMSM_APPS_STATE, 0, SMSM_PROC_AWAKE);
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block smsm_pm_nb = {
+	.notifier_call = smsm_pm_notifier,
+	.priority = 0,
+};
+
+/* the spinlock is used to synchronize between the
+ * irq handler and code that mutates the channel
+ * list or fiddles with channel state
+ */
+static DEFINE_SPINLOCK(smd_lock);
+DEFINE_SPINLOCK(smem_lock);
+
+/* the mutex is used during open() and close()
+ * operations to avoid races while creating or
+ * destroying smd_channel structures
+ */
+static DEFINE_MUTEX(smd_creation_mutex);
+
+struct smd_shared {
+	struct smd_half_channel ch0;
+	struct smd_half_channel ch1;
+};
+
+struct smd_shared_word_access {
+	struct smd_half_channel_word_access ch0;
+	struct smd_half_channel_word_access ch1;
+};
+
+/**
+ * Maps edge type to local and remote processor ID's.
+ */
+static struct edge_to_pid edge_to_pids[] = {
+	[SMD_APPS_MODEM] = {SMD_APPS, SMD_MODEM, "modem"},
+	[SMD_APPS_QDSP] = {SMD_APPS, SMD_Q6, "adsp"},
+	[SMD_MODEM_QDSP] = {SMD_MODEM, SMD_Q6},
+	[SMD_APPS_DSPS] = {SMD_APPS, SMD_DSPS, "dsps"},
+	[SMD_MODEM_DSPS] = {SMD_MODEM, SMD_DSPS},
+	[SMD_QDSP_DSPS] = {SMD_Q6, SMD_DSPS},
+	[SMD_APPS_WCNSS] = {SMD_APPS, SMD_WCNSS, "wcnss"},
+	[SMD_MODEM_WCNSS] = {SMD_MODEM, SMD_WCNSS},
+	[SMD_QDSP_WCNSS] = {SMD_Q6, SMD_WCNSS},
+	[SMD_DSPS_WCNSS] = {SMD_DSPS, SMD_WCNSS},
+	[SMD_APPS_Q6FW] = {SMD_APPS, SMD_MODEM_Q6_FW},
+	[SMD_MODEM_Q6FW] = {SMD_MODEM, SMD_MODEM_Q6_FW},
+	[SMD_QDSP_Q6FW] = {SMD_Q6, SMD_MODEM_Q6_FW},
+	[SMD_DSPS_Q6FW] = {SMD_DSPS, SMD_MODEM_Q6_FW},
+	[SMD_WCNSS_Q6FW] = {SMD_WCNSS, SMD_MODEM_Q6_FW},
+	[SMD_APPS_RPM] = {SMD_APPS, SMD_RPM},
+	[SMD_MODEM_RPM] = {SMD_MODEM, SMD_RPM},
+	[SMD_QDSP_RPM] = {SMD_Q6, SMD_RPM},
+	[SMD_WCNSS_RPM] = {SMD_WCNSS, SMD_RPM},
+	[SMD_TZ_RPM] = {SMD_TZ, SMD_RPM},
+};
+
+struct restart_notifier_block {
+	unsigned processor;
+	char *name;
+	struct notifier_block nb;
+};
+
+static struct platform_device loopback_tty_pdev = {.name = "LOOPBACK_TTY"};
+
+static LIST_HEAD(smd_ch_closed_list);
+static LIST_HEAD(smd_ch_closing_list);
+static LIST_HEAD(smd_ch_to_close_list);
+
+struct remote_proc_info {
+	unsigned remote_pid;
+	unsigned free_space;
+	struct work_struct probe_work;
+	struct list_head ch_list;
+	/* 2 total supported tables of channels */
+	unsigned char ch_allocated[SMEM_NUM_SMD_STREAM_CHANNELS * 2];
+	bool skip_pil;
+};
+
+static struct remote_proc_info remote_info[NUM_SMD_SUBSYSTEMS];
+
+static void finalize_channel_close_fn(struct work_struct *work);
+static DECLARE_WORK(finalize_channel_close_work, finalize_channel_close_fn);
+static struct workqueue_struct *channel_close_wq;
+
+#define PRI_ALLOC_TBL 1
+#define SEC_ALLOC_TBL 2
+static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm, int table_id,
+				struct remote_proc_info *r_info);
+
+static bool smd_edge_inited(int edge)
+{
+	return edge_to_pids[edge].initialized;
+}
+
+/* on smp systems, the probe might get called from multiple cores,
+   hence use a lock */
+static DEFINE_MUTEX(smd_probe_lock);
+
+/**
+ * scan_alloc_table - Scans a specified SMD channel allocation table in SMEM for
+ *			newly created channels that need to be made locally
+ *			visable
+ *
+ * @shared: pointer to the table array in SMEM
+ * @smd_ch_allocated: pointer to an array indicating already allocated channels
+ * @table_id: identifier for this channel allocation table
+ * @num_entries: number of entries in this allocation table
+ * @r_info: pointer to the info structure of the remote proc we care about
+ *
+ * The smd_probe_lock must be locked by the calling function.  Shared and
+ * smd_ch_allocated are assumed to be valid pointers.
+ */
+static void scan_alloc_table(struct smd_alloc_elm *shared,
+				char *smd_ch_allocated,
+				int table_id,
+				unsigned num_entries,
+				struct remote_proc_info *r_info)
+{
+	unsigned n;
+	uint32_t type;
+
+	for (n = 0; n < num_entries; n++) {
+		if (smd_ch_allocated[n])
+			continue;
+
+		/*
+		 * channel should be allocated only if APPS processor is
+		 * involved
+		 */
+		type = SMD_CHANNEL_TYPE(shared[n].type);
+		if (!pid_is_on_edge(type, SMD_APPS) ||
+				!pid_is_on_edge(type, r_info->remote_pid))
+			continue;
+		if (!shared[n].ref_count)
+			continue;
+		if (!shared[n].name[0])
+			continue;
+
+		if (!smd_edge_inited(type)) {
+			SMD_INFO(
+				"Probe skipping proc %d, tbl %d, ch %d, edge not inited\n",
+				r_info->remote_pid, table_id, n);
+			continue;
+		}
+
+		if (!smd_alloc_channel(&shared[n], table_id, r_info))
+			smd_ch_allocated[n] = 1;
+		else
+			SMD_INFO(
+				"Probe skipping proc %d, tbl %d, ch %d, not allocated\n",
+				r_info->remote_pid, table_id, n);
+	}
+}
+
+static void smd_channel_probe_now(struct remote_proc_info *r_info)
+{
+	struct smd_alloc_elm *shared;
+	unsigned tbl_size;
+
+	shared = smem_get_entry(ID_CH_ALLOC_TBL, &tbl_size,
+							r_info->remote_pid, 0);
+
+	if (!shared) {
+		pr_err("%s: allocation table not initialized\n", __func__);
+		return;
+	}
+
+	mutex_lock(&smd_probe_lock);
+
+	scan_alloc_table(shared, r_info->ch_allocated, PRI_ALLOC_TBL,
+						tbl_size / sizeof(*shared),
+						r_info);
+
+	shared = smem_get_entry(SMEM_CHANNEL_ALLOC_TBL_2, &tbl_size,
+							r_info->remote_pid, 0);
+	if (shared)
+		scan_alloc_table(shared,
+			&(r_info->ch_allocated[SMEM_NUM_SMD_STREAM_CHANNELS]),
+			SEC_ALLOC_TBL,
+			tbl_size / sizeof(*shared),
+			r_info);
+
+	mutex_unlock(&smd_probe_lock);
+}
+
+/**
+ * smd_channel_probe_worker() - Scan for newly created SMD channels and init
+ *				local structures so the channels are visable to
+ *				local clients
+ *
+ * @work: work_struct corresponding to an instance of this function running on
+ *		a workqueue.
+ */
+static void smd_channel_probe_worker(struct work_struct *work)
+{
+	struct remote_proc_info *r_info;
+
+	r_info = container_of(work, struct remote_proc_info, probe_work);
+
+	smd_channel_probe_now(r_info);
+}
+
+/**
+ * get_remote_ch() - gathers remote channel info
+ *
+ * @shared2:   Pointer to v2 shared channel structure
+ * @type:      Edge type
+ * @pid:       Processor ID of processor on edge
+ * @remote_ch:  Channel that belongs to processor @pid
+ * @is_word_access_ch: Bool, is this a word aligned access channel
+ *
+ * @returns:		0 on success, error code on failure
+ */
+static int get_remote_ch(void *shared2,
+		uint32_t type, uint32_t pid,
+		void **remote_ch,
+		int is_word_access_ch
+		)
+{
+	if (!remote_ch || !shared2 || !pid_is_on_edge(type, pid) ||
+				!pid_is_on_edge(type, SMD_APPS))
+		return -EINVAL;
+
+	if (is_word_access_ch)
+		*remote_ch =
+			&((struct smd_shared_word_access *)(shared2))->ch1;
+	else
+		*remote_ch = &((struct smd_shared *)(shared2))->ch1;
+
+	return 0;
+}
+
+/**
+ * smd_remote_ss_to_edge() - return edge type from remote ss type
+ * @name:	remote subsystem name
+ *
+ * Returns the edge type connected between the local subsystem(APPS)
+ * and remote subsystem @name.
+ */
+int smd_remote_ss_to_edge(const char *name)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(edge_to_pids); ++i) {
+		if (edge_to_pids[i].subsys_name[0] != 0x0) {
+			if (!strncmp(edge_to_pids[i].subsys_name, name,
+								strlen(name)))
+				return i;
+		}
+	}
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL(smd_remote_ss_to_edge);
+
+/**
+ * smd_edge_to_pil_str - Returns the PIL string used to load the remote side of
+ *			 the indicated edge.
+ *
+ * @type -	Edge definition
+ * @returns -	The PIL string to load the remove side of @type or NULL if the
+ *		PIL string does not exist.
+ */
+const char *smd_edge_to_pil_str(uint32_t type)
+{
+	const char *pil_str = NULL;
+
+	if (type < ARRAY_SIZE(edge_to_pids)) {
+		if (!edge_to_pids[type].initialized)
+			return ERR_PTR(-EPROBE_DEFER);
+		if (!remote_info[smd_edge_to_remote_pid(type)].skip_pil) {
+			pil_str = edge_to_pids[type].subsys_name;
+			if (pil_str[0] == 0x0)
+				pil_str = NULL;
+		}
+	}
+	return pil_str;
+}
+EXPORT_SYMBOL(smd_edge_to_pil_str);
+
+/*
+ * Returns a pointer to the subsystem name or NULL if no
+ * subsystem name is available.
+ *
+ * @type - Edge definition
+ */
+const char *smd_edge_to_subsystem(uint32_t type)
+{
+	const char *subsys = NULL;
+
+	if (type < ARRAY_SIZE(edge_to_pids)) {
+		subsys = edge_to_pids[type].subsys_name;
+		if (subsys[0] == 0x0)
+			subsys = NULL;
+		if (!edge_to_pids[type].initialized)
+			subsys = ERR_PTR(-EPROBE_DEFER);
+	}
+	return subsys;
+}
+EXPORT_SYMBOL(smd_edge_to_subsystem);
+
+/*
+ * Returns a pointer to the subsystem name given the
+ * remote processor ID.
+ * subsystem is not necessarily PIL-loadable
+ *
+ * @pid     Remote processor ID
+ * @returns Pointer to subsystem name or NULL if not found
+ */
+const char *smd_pid_to_subsystem(uint32_t pid)
+{
+	const char *subsys = NULL;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(edge_to_pids); ++i) {
+		if (pid == edge_to_pids[i].remote_pid) {
+			if (!edge_to_pids[i].initialized) {
+				subsys = ERR_PTR(-EPROBE_DEFER);
+				break;
+			}
+			if (edge_to_pids[i].subsys_name[0] != 0x0) {
+				subsys = edge_to_pids[i].subsys_name;
+				break;
+			} else if (pid == SMD_RPM) {
+				subsys = "rpm";
+				break;
+			}
+		}
+	}
+
+	return subsys;
+}
+EXPORT_SYMBOL(smd_pid_to_subsystem);
+
+static void smd_reset_edge(void *void_ch, unsigned new_state,
+				int is_word_access_ch)
+{
+	if (is_word_access_ch) {
+		struct smd_half_channel_word_access *ch =
+			(struct smd_half_channel_word_access *)(void_ch);
+		if (ch->state != SMD_SS_CLOSED) {
+			ch->state = new_state;
+			ch->fDSR = 0;
+			ch->fCTS = 0;
+			ch->fCD = 0;
+			ch->fSTATE = 1;
+		}
+	} else {
+		struct smd_half_channel *ch =
+			(struct smd_half_channel *)(void_ch);
+		if (ch->state != SMD_SS_CLOSED) {
+			ch->state = new_state;
+			ch->fDSR = 0;
+			ch->fCTS = 0;
+			ch->fCD = 0;
+			ch->fSTATE = 1;
+		}
+	}
+}
+
+/**
+ * smd_channel_reset_state() - find channels in an allocation table and set them
+ *				to the specified state
+ *
+ * @shared:	Pointer to the allocation table to scan
+ * @table_id:	ID of the table
+ * @new_state:	New state that channels should be set to
+ * @pid:	Processor ID of the remote processor for the channels
+ * @num_entries: Number of entries in the table
+ *
+ * Scan the indicated table for channels between Apps and @pid.  If a valid
+ * channel is found, set the remote side of the channel to @new_state.
+ */
+static void smd_channel_reset_state(struct smd_alloc_elm *shared, int table_id,
+		unsigned new_state, unsigned pid, unsigned num_entries)
+{
+	unsigned n;
+	void *shared2;
+	uint32_t type;
+	void *remote_ch;
+	int is_word_access;
+	unsigned base_id;
+
+	switch (table_id) {
+	case PRI_ALLOC_TBL:
+		base_id = SMEM_SMD_BASE_ID;
+		break;
+	case SEC_ALLOC_TBL:
+		base_id = SMEM_SMD_BASE_ID_2;
+		break;
+	default:
+		SMD_INFO("%s: invalid table_id:%d\n", __func__, table_id);
+		return;
+	}
+
+	for (n = 0; n < num_entries; n++) {
+		if (!shared[n].ref_count)
+			continue;
+		if (!shared[n].name[0])
+			continue;
+
+		type = SMD_CHANNEL_TYPE(shared[n].type);
+		is_word_access = is_word_access_ch(type);
+		if (is_word_access)
+			shared2 = smem_find(base_id + n,
+				sizeof(struct smd_shared_word_access), pid,
+				0);
+		else
+			shared2 = smem_find(base_id + n,
+				sizeof(struct smd_shared), pid, 0);
+		if (!shared2)
+			continue;
+
+		if (!get_remote_ch(shared2, type, pid,
+					&remote_ch, is_word_access))
+			smd_reset_edge(remote_ch, new_state, is_word_access);
+	}
+}
+
+/**
+ * pid_is_on_edge() - checks to see if the processor with id pid is on the
+ * edge specified by edge_num
+ *
+ * @edge_num:		the number of the edge which is being tested
+ * @pid:		the id of the processor being tested
+ *
+ * @returns:		true if on edge, false otherwise
+ */
+static bool pid_is_on_edge(uint32_t edge_num, unsigned pid)
+{
+	struct edge_to_pid edge;
+
+	if (edge_num >= ARRAY_SIZE(edge_to_pids))
+		return 0;
+
+	edge = edge_to_pids[edge_num];
+	return (edge.local_pid == pid || edge.remote_pid == pid);
+}
+
+void smd_channel_reset(uint32_t restart_pid)
+{
+	struct smd_alloc_elm *shared_pri;
+	struct smd_alloc_elm *shared_sec;
+	unsigned long flags;
+	unsigned pri_size;
+	unsigned sec_size;
+
+	SMD_POWER_INFO("%s: starting reset\n", __func__);
+
+	shared_pri = smem_get_entry(ID_CH_ALLOC_TBL, &pri_size,	restart_pid, 0);
+	if (!shared_pri) {
+		pr_err("%s: allocation table not initialized\n", __func__);
+		return;
+	}
+	shared_sec = smem_get_entry(SMEM_CHANNEL_ALLOC_TBL_2, &sec_size,
+								restart_pid, 0);
+
+	/* reset SMSM entry */
+	if (smsm_info.state) {
+		writel_relaxed(0, SMSM_STATE_ADDR(restart_pid));
+
+		/* restart SMSM init handshake */
+		if (restart_pid == SMSM_MODEM) {
+			smsm_change_state(SMSM_APPS_STATE,
+				SMSM_INIT | SMSM_SMD_LOOPBACK | SMSM_RESET,
+				0);
+		}
+
+		/* notify SMSM processors */
+		smsm_irq_handler(0, 0);
+		notify_modem_smsm();
+		notify_dsp_smsm();
+		notify_dsps_smsm();
+		notify_wcnss_smsm();
+	}
+
+	/* change all remote states to CLOSING */
+	mutex_lock(&smd_probe_lock);
+	spin_lock_irqsave(&smd_lock, flags);
+	smd_channel_reset_state(shared_pri, PRI_ALLOC_TBL, SMD_SS_CLOSING,
+				restart_pid, pri_size / sizeof(*shared_pri));
+	if (shared_sec)
+		smd_channel_reset_state(shared_sec, SEC_ALLOC_TBL,
+						SMD_SS_CLOSING, restart_pid,
+						sec_size / sizeof(*shared_sec));
+	spin_unlock_irqrestore(&smd_lock, flags);
+	mutex_unlock(&smd_probe_lock);
+
+	mb();
+	smd_fake_irq_handler(0);
+
+	/* change all remote states to CLOSED */
+	mutex_lock(&smd_probe_lock);
+	spin_lock_irqsave(&smd_lock, flags);
+	smd_channel_reset_state(shared_pri, PRI_ALLOC_TBL, SMD_SS_CLOSED,
+				restart_pid, pri_size / sizeof(*shared_pri));
+	if (shared_sec)
+		smd_channel_reset_state(shared_sec, SEC_ALLOC_TBL,
+						SMD_SS_CLOSED, restart_pid,
+						sec_size / sizeof(*shared_sec));
+	spin_unlock_irqrestore(&smd_lock, flags);
+	mutex_unlock(&smd_probe_lock);
+
+	mb();
+	smd_fake_irq_handler(0);
+
+	SMD_POWER_INFO("%s: finished reset\n", __func__);
+}
+
+/* how many bytes are available for reading */
+static int smd_stream_read_avail(struct smd_channel *ch)
+{
+	unsigned head = ch->half_ch->get_head(ch->recv);
+	unsigned tail = ch->half_ch->get_tail(ch->recv);
+	unsigned fifo_size = ch->fifo_size;
+	unsigned bytes_avail = head - tail;
+
+	if (head < tail)
+		bytes_avail += fifo_size;
+
+	BUG_ON(bytes_avail >= fifo_size);
+	return bytes_avail;
+}
+
+/* how many bytes we are free to write */
+static int smd_stream_write_avail(struct smd_channel *ch)
+{
+	unsigned head = ch->half_ch->get_head(ch->send);
+	unsigned tail = ch->half_ch->get_tail(ch->send);
+	unsigned fifo_size = ch->fifo_size;
+	unsigned bytes_avail = tail - head;
+
+	if (tail <= head)
+		bytes_avail += fifo_size;
+	if (bytes_avail < SMD_FIFO_FULL_RESERVE)
+		bytes_avail = 0;
+	else
+		bytes_avail -= SMD_FIFO_FULL_RESERVE;
+
+	BUG_ON(bytes_avail >= fifo_size);
+	return bytes_avail;
+}
+
+static int smd_packet_read_avail(struct smd_channel *ch)
+{
+	if (ch->current_packet) {
+		int n = smd_stream_read_avail(ch);
+		if (n > ch->current_packet)
+			n = ch->current_packet;
+		return n;
+	} else {
+		return 0;
+	}
+}
+
+static int smd_packet_write_avail(struct smd_channel *ch)
+{
+	int n = smd_stream_write_avail(ch);
+	return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
+}
+
+static int ch_is_open(struct smd_channel *ch)
+{
+	return (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED ||
+		ch->half_ch->get_state(ch->recv) == SMD_SS_FLUSHING)
+		&& (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED);
+}
+
+/* provide a pointer and length to readable data in the fifo */
+static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
+{
+	unsigned head = ch->half_ch->get_head(ch->recv);
+	unsigned tail = ch->half_ch->get_tail(ch->recv);
+	unsigned fifo_size = ch->fifo_size;
+
+	BUG_ON(fifo_size >= SZ_1M);
+	BUG_ON(head >= fifo_size);
+	BUG_ON(tail >= fifo_size);
+	BUG_ON(OVERFLOW_ADD_UNSIGNED(uintptr_t, (uintptr_t)ch->recv_data,
+								tail));
+	*ptr = (void *) (ch->recv_data + tail);
+	if (tail <= head)
+		return head - tail;
+	else
+		return fifo_size - tail;
+}
+
+static int read_intr_blocked(struct smd_channel *ch)
+{
+	return ch->half_ch->get_fBLOCKREADINTR(ch->recv);
+}
+
+/* advance the fifo read pointer after data from ch_read_buffer is consumed */
+static void ch_read_done(struct smd_channel *ch, unsigned count)
+{
+	unsigned tail = ch->half_ch->get_tail(ch->recv);
+	unsigned fifo_size = ch->fifo_size;
+
+	BUG_ON(count > smd_stream_read_avail(ch));
+
+	tail += count;
+	if (tail >= fifo_size)
+		tail -= fifo_size;
+	ch->half_ch->set_tail(ch->recv, tail);
+	wmb();
+	ch->half_ch->set_fTAIL(ch->send,  1);
+}
+
+/* basic read interface to ch_read_{buffer,done} used
+ * by smd_*_read() and update_packet_state()
+ * will read-and-discard if the _data pointer is null
+ */
+static int ch_read(struct smd_channel *ch, void *_data, int len)
+{
+	void *ptr;
+	unsigned n;
+	unsigned char *data = _data;
+	int orig_len = len;
+
+	while (len > 0) {
+		n = ch_read_buffer(ch, &ptr);
+		if (n == 0)
+			break;
+
+		if (n > len)
+			n = len;
+		if (_data)
+			ch->read_from_fifo(data, ptr, n);
+
+		data += n;
+		len -= n;
+		ch_read_done(ch, n);
+	}
+
+	return orig_len - len;
+}
+
+static void update_stream_state(struct smd_channel *ch)
+{
+	/* streams have no special state requiring updating */
+}
+
+static void update_packet_state(struct smd_channel *ch)
+{
+	unsigned hdr[5];
+	int r;
+	const char *peripheral = NULL;
+
+	/* can't do anything if we're in the middle of a packet */
+	while (ch->current_packet == 0) {
+		/* discard 0 length packets if any */
+
+		/* don't bother unless we can get the full header */
+		if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
+			return;
+
+		r = ch_read(ch, hdr, SMD_HEADER_SIZE);
+		BUG_ON(r != SMD_HEADER_SIZE);
+
+		ch->current_packet = hdr[0];
+		if (ch->current_packet > (uint32_t)INT_MAX) {
+			pr_err("%s: Invalid packet size of %d bytes detected. Edge: %d, Channel : %s, RPTR: %d, WPTR: %d",
+				__func__, ch->current_packet, ch->type,
+				ch->name, ch->half_ch->get_tail(ch->recv),
+				ch->half_ch->get_head(ch->recv));
+			peripheral = smd_edge_to_pil_str(ch->type);
+			if (peripheral) {
+				if (subsystem_restart(peripheral) < 0)
+					BUG();
+			} else {
+				BUG();
+			}
+		}
+	}
+}
+
+/**
+ * ch_write_buffer() - Provide a pointer and length for the next segment of
+ * free space in the FIFO.
+ * @ch: channel
+ * @ptr: Address to pointer for the next segment write
+ * @returns: Maximum size that can be written until the FIFO is either full
+ *           or the end of the FIFO has been reached.
+ *
+ * The returned pointer and length are passed to memcpy, so the next segment is
+ * defined as either the space available between the read index (tail) and the
+ * write index (head) or the space available to the end of the FIFO.
+ */
+static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
+{
+	unsigned head = ch->half_ch->get_head(ch->send);
+	unsigned tail = ch->half_ch->get_tail(ch->send);
+	unsigned fifo_size = ch->fifo_size;
+
+	BUG_ON(fifo_size >= SZ_1M);
+	BUG_ON(head >= fifo_size);
+	BUG_ON(tail >= fifo_size);
+	BUG_ON(OVERFLOW_ADD_UNSIGNED(uintptr_t, (uintptr_t)ch->send_data,
+								head));
+
+	*ptr = (void *) (ch->send_data + head);
+	if (head < tail) {
+		return tail - head - SMD_FIFO_FULL_RESERVE;
+	} else {
+		if (tail < SMD_FIFO_FULL_RESERVE)
+			return fifo_size + tail - head
+					- SMD_FIFO_FULL_RESERVE;
+		else
+			return fifo_size - head;
+	}
+}
+
+/* advace the fifo write pointer after freespace
+ * from ch_write_buffer is filled
+ */
+static void ch_write_done(struct smd_channel *ch, unsigned count)
+{
+	unsigned head = ch->half_ch->get_head(ch->send);
+	unsigned fifo_size = ch->fifo_size;
+
+	BUG_ON(count > smd_stream_write_avail(ch));
+	head += count;
+	if (head >= fifo_size)
+		head -= fifo_size;
+	ch->half_ch->set_head(ch->send, head);
+	wmb();
+	ch->half_ch->set_fHEAD(ch->send, 1);
+}
+
+static void ch_set_state(struct smd_channel *ch, unsigned n)
+{
+	if (n == SMD_SS_OPENED) {
+		ch->half_ch->set_fDSR(ch->send, 1);
+		ch->half_ch->set_fCTS(ch->send, 1);
+		ch->half_ch->set_fCD(ch->send, 1);
+	} else {
+		ch->half_ch->set_fDSR(ch->send, 0);
+		ch->half_ch->set_fCTS(ch->send, 0);
+		ch->half_ch->set_fCD(ch->send, 0);
+	}
+	ch->half_ch->set_state(ch->send, n);
+	ch->half_ch->set_fSTATE(ch->send, 1);
+	ch->notify_other_cpu(ch);
+}
+
+/**
+ * do_smd_probe() - Look for newly created SMD channels a specific processor
+ *
+ * @remote_pid: remote processor id of the proc that may have created channels
+ */
+static void do_smd_probe(unsigned remote_pid)
+{
+	unsigned free_space;
+
+	free_space = smem_get_free_space(remote_pid);
+	if (free_space != remote_info[remote_pid].free_space) {
+		remote_info[remote_pid].free_space = free_space;
+		schedule_work(&remote_info[remote_pid].probe_work);
+	}
+}
+
+static void remote_processed_close(struct smd_channel *ch)
+{
+	/* The remote side has observed our close, we can allow a reopen */
+	list_move(&ch->ch_list, &smd_ch_to_close_list);
+	queue_work(channel_close_wq, &finalize_channel_close_work);
+}
+
+static void smd_state_change(struct smd_channel *ch,
+			     unsigned last, unsigned next)
+{
+	ch->last_state = next;
+
+	SMD_INFO("SMD: ch %d %d -> %d\n", ch->n, last, next);
+
+	switch (next) {
+	case SMD_SS_OPENING:
+		if (last == SMD_SS_OPENED &&
+		    ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
+			/* We missed the CLOSING and CLOSED states */
+			remote_processed_close(ch);
+		} else if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSING ||
+		    ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
+			ch->half_ch->set_tail(ch->recv, 0);
+			ch->half_ch->set_head(ch->send, 0);
+			ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
+			ch->current_packet = 0;
+			ch_set_state(ch, SMD_SS_OPENING);
+		}
+		break;
+	case SMD_SS_OPENED:
+		if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENING) {
+			ch_set_state(ch, SMD_SS_OPENED);
+			ch->notify(ch->priv, SMD_EVENT_OPEN);
+		}
+		break;
+	case SMD_SS_FLUSHING:
+	case SMD_SS_RESET:
+		/* we should force them to close? */
+		break;
+	case SMD_SS_CLOSED:
+		if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED) {
+			ch_set_state(ch, SMD_SS_CLOSING);
+			ch->pending_pkt_sz = 0;
+			ch->notify(ch->priv, SMD_EVENT_CLOSE);
+		}
+		/* We missed the CLOSING state */
+		if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED)
+			remote_processed_close(ch);
+		break;
+	case SMD_SS_CLOSING:
+		if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED)
+			remote_processed_close(ch);
+		break;
+	}
+}
+
+static void handle_smd_irq_closing_list(void)
+{
+	unsigned long flags;
+	struct smd_channel *ch;
+	struct smd_channel *index;
+	unsigned tmp;
+
+	spin_lock_irqsave(&smd_lock, flags);
+	list_for_each_entry_safe(ch, index, &smd_ch_closing_list, ch_list) {
+		if (ch->half_ch->get_fSTATE(ch->recv))
+			ch->half_ch->set_fSTATE(ch->recv, 0);
+		tmp = ch->half_ch->get_state(ch->recv);
+		if (tmp != ch->last_state)
+			smd_state_change(ch, ch->last_state, tmp);
+	}
+	spin_unlock_irqrestore(&smd_lock, flags);
+}
+
+static void handle_smd_irq(struct remote_proc_info *r_info,
+		void (*notify)(smd_channel_t *ch))
+{
+	unsigned long flags;
+	struct smd_channel *ch;
+	unsigned ch_flags;
+	unsigned tmp;
+	unsigned char state_change;
+	struct list_head *list;
+
+	list = &r_info->ch_list;
+
+	spin_lock_irqsave(&smd_lock, flags);
+	list_for_each_entry(ch, list, ch_list) {
+		state_change = 0;
+		ch_flags = 0;
+		if (ch_is_open(ch)) {
+			if (ch->half_ch->get_fHEAD(ch->recv)) {
+				ch->half_ch->set_fHEAD(ch->recv, 0);
+				ch_flags |= 1;
+			}
+			if (ch->half_ch->get_fTAIL(ch->recv)) {
+				ch->half_ch->set_fTAIL(ch->recv, 0);
+				ch_flags |= 2;
+			}
+			if (ch->half_ch->get_fSTATE(ch->recv)) {
+				ch->half_ch->set_fSTATE(ch->recv, 0);
+				ch_flags |= 4;
+			}
+		}
+		tmp = ch->half_ch->get_state(ch->recv);
+		if (tmp != ch->last_state) {
+			SMD_POWER_INFO("SMD ch%d '%s' State change %d->%d\n",
+					ch->n, ch->name, ch->last_state, tmp);
+			smd_state_change(ch, ch->last_state, tmp);
+			state_change = 1;
+		}
+		if (ch_flags & 0x3) {
+			ch->update_state(ch);
+			SMD_POWER_INFO(
+				"SMD ch%d '%s' Data event 0x%x tx%d/rx%d %dr/%dw : %dr/%dw\n",
+				ch->n, ch->name,
+				ch_flags,
+				ch->fifo_size -
+					(smd_stream_write_avail(ch) + 1),
+				smd_stream_read_avail(ch),
+				ch->half_ch->get_tail(ch->send),
+				ch->half_ch->get_head(ch->send),
+				ch->half_ch->get_tail(ch->recv),
+				ch->half_ch->get_head(ch->recv)
+				);
+			ch->notify(ch->priv, SMD_EVENT_DATA);
+		}
+		if (ch_flags & 0x4 && !state_change) {
+			SMD_POWER_INFO("SMD ch%d '%s' State update\n",
+					ch->n, ch->name);
+			ch->notify(ch->priv, SMD_EVENT_STATUS);
+		}
+	}
+	spin_unlock_irqrestore(&smd_lock, flags);
+	do_smd_probe(r_info->remote_pid);
+}
+
+static inline void log_irq(uint32_t subsystem)
+{
+	const char *subsys = smd_edge_to_subsystem(subsystem);
+
+	(void) subsys;
+
+	SMD_POWER_INFO("SMD Int %s->Apps\n", subsys);
+}
+
+irqreturn_t smd_modem_irq_handler(int irq, void *data)
+{
+	if (unlikely(!edge_to_pids[SMD_APPS_MODEM].initialized))
+		return IRQ_HANDLED;
+	log_irq(SMD_APPS_MODEM);
+	++interrupt_stats[SMD_MODEM].smd_in_count;
+	handle_smd_irq(&remote_info[SMD_MODEM], notify_modem_smd);
+	handle_smd_irq_closing_list();
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smd_dsp_irq_handler(int irq, void *data)
+{
+	if (unlikely(!edge_to_pids[SMD_APPS_QDSP].initialized))
+		return IRQ_HANDLED;
+	log_irq(SMD_APPS_QDSP);
+	++interrupt_stats[SMD_Q6].smd_in_count;
+	handle_smd_irq(&remote_info[SMD_Q6], notify_dsp_smd);
+	handle_smd_irq_closing_list();
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smd_dsps_irq_handler(int irq, void *data)
+{
+	if (unlikely(!edge_to_pids[SMD_APPS_DSPS].initialized))
+		return IRQ_HANDLED;
+	log_irq(SMD_APPS_DSPS);
+	++interrupt_stats[SMD_DSPS].smd_in_count;
+	handle_smd_irq(&remote_info[SMD_DSPS], notify_dsps_smd);
+	handle_smd_irq_closing_list();
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smd_wcnss_irq_handler(int irq, void *data)
+{
+	if (unlikely(!edge_to_pids[SMD_APPS_WCNSS].initialized))
+		return IRQ_HANDLED;
+	log_irq(SMD_APPS_WCNSS);
+	++interrupt_stats[SMD_WCNSS].smd_in_count;
+	handle_smd_irq(&remote_info[SMD_WCNSS], notify_wcnss_smd);
+	handle_smd_irq_closing_list();
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smd_modemfw_irq_handler(int irq, void *data)
+{
+	if (unlikely(!edge_to_pids[SMD_APPS_Q6FW].initialized))
+		return IRQ_HANDLED;
+	log_irq(SMD_APPS_Q6FW);
+	++interrupt_stats[SMD_MODEM_Q6_FW].smd_in_count;
+	handle_smd_irq(&remote_info[SMD_MODEM_Q6_FW], notify_modemfw_smd);
+	handle_smd_irq_closing_list();
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smd_rpm_irq_handler(int irq, void *data)
+{
+	if (unlikely(!edge_to_pids[SMD_APPS_RPM].initialized))
+		return IRQ_HANDLED;
+	log_irq(SMD_APPS_RPM);
+	++interrupt_stats[SMD_RPM].smd_in_count;
+	handle_smd_irq(&remote_info[SMD_RPM], notify_rpm_smd);
+	handle_smd_irq_closing_list();
+	return IRQ_HANDLED;
+}
+
+static void smd_fake_irq_handler(unsigned long arg)
+{
+	handle_smd_irq(&remote_info[SMD_MODEM], notify_modem_smd);
+	handle_smd_irq(&remote_info[SMD_Q6], notify_dsp_smd);
+	handle_smd_irq(&remote_info[SMD_DSPS], notify_dsps_smd);
+	handle_smd_irq(&remote_info[SMD_WCNSS], notify_wcnss_smd);
+	handle_smd_irq(&remote_info[SMD_MODEM_Q6_FW], notify_modemfw_smd);
+	handle_smd_irq(&remote_info[SMD_RPM], notify_rpm_smd);
+	handle_smd_irq_closing_list();
+}
+
+static int smd_is_packet(struct smd_alloc_elm *alloc_elm)
+{
+	if (SMD_XFER_TYPE(alloc_elm->type) == 1)
+		return 0;
+	else if (SMD_XFER_TYPE(alloc_elm->type) == 2)
+		return 1;
+
+	panic("Unsupported SMD xfer type: %d name:%s edge:%d\n",
+					SMD_XFER_TYPE(alloc_elm->type),
+					alloc_elm->name,
+					SMD_CHANNEL_TYPE(alloc_elm->type));
+}
+
+static int smd_stream_write(smd_channel_t *ch, const void *_data, int len,
+				bool intr_ntfy)
+{
+	void *ptr;
+	const unsigned char *buf = _data;
+	unsigned xfer;
+	int orig_len = len;
+
+	SMD_DBG("smd_stream_write() %d -> ch%d\n", len, ch->n);
+	if (len < 0)
+		return -EINVAL;
+	else if (len == 0)
+		return 0;
+
+	while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
+		if (!ch_is_open(ch)) {
+			len = orig_len;
+			break;
+		}
+		if (xfer > len)
+			xfer = len;
+
+		ch->write_to_fifo(ptr, buf, xfer);
+		ch_write_done(ch, xfer);
+		len -= xfer;
+		buf += xfer;
+		if (len == 0)
+			break;
+	}
+
+	if (orig_len - len && intr_ntfy)
+		ch->notify_other_cpu(ch);
+
+	return orig_len - len;
+}
+
+static int smd_packet_write(smd_channel_t *ch, const void *_data, int len,
+				bool intr_ntfy)
+{
+	int ret;
+	unsigned hdr[5];
+
+	SMD_DBG("smd_packet_write() %d -> ch%d\n", len, ch->n);
+	if (len < 0)
+		return -EINVAL;
+	else if (len == 0)
+		return 0;
+
+	if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE))
+		return -ENOMEM;
+
+	hdr[0] = len;
+	hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
+
+
+	ret = smd_stream_write(ch, hdr, sizeof(hdr), false);
+	if (ret < 0 || ret != sizeof(hdr)) {
+		SMD_DBG("%s failed to write pkt header: %d returned\n",
+								__func__, ret);
+		return -EFAULT;
+	}
+
+
+	ret = smd_stream_write(ch, _data, len, true);
+	if (ret < 0 || ret != len) {
+		SMD_DBG("%s failed to write pkt data: %d returned\n",
+								__func__, ret);
+		return ret;
+	}
+
+	return len;
+}
+
+static int smd_stream_read(smd_channel_t *ch, void *data, int len)
+{
+	int r;
+
+	if (len < 0)
+		return -EINVAL;
+
+	r = ch_read(ch, data, len);
+	if (r > 0)
+		if (!read_intr_blocked(ch))
+			ch->notify_other_cpu(ch);
+
+	return r;
+}
+
+static int smd_packet_read(smd_channel_t *ch, void *data, int len)
+{
+	unsigned long flags;
+	int r;
+
+	if (len < 0)
+		return -EINVAL;
+
+	if (ch->current_packet > (uint32_t)INT_MAX) {
+		pr_err("%s: Invalid packet size for Edge %d and Channel %s",
+			__func__, ch->type, ch->name);
+		return -EFAULT;
+	}
+
+	if (len > ch->current_packet)
+		len = ch->current_packet;
+
+	r = ch_read(ch, data, len);
+	if (r > 0)
+		if (!read_intr_blocked(ch))
+			ch->notify_other_cpu(ch);
+
+	spin_lock_irqsave(&smd_lock, flags);
+	ch->current_packet -= r;
+	update_packet_state(ch);
+	spin_unlock_irqrestore(&smd_lock, flags);
+
+	return r;
+}
+
+static int smd_packet_read_from_cb(smd_channel_t *ch, void *data, int len)
+{
+	int r;
+
+	if (len < 0)
+		return -EINVAL;
+
+	if (ch->current_packet > (uint32_t)INT_MAX) {
+		pr_err("%s: Invalid packet size for Edge %d and Channel %s",
+			__func__, ch->type, ch->name);
+		return -EFAULT;
+	}
+
+	if (len > ch->current_packet)
+		len = ch->current_packet;
+
+	r = ch_read(ch, data, len);
+	if (r > 0)
+		if (!read_intr_blocked(ch))
+			ch->notify_other_cpu(ch);
+
+	ch->current_packet -= r;
+	update_packet_state(ch);
+
+	return r;
+}
+
+/**
+ * smd_alloc_v2() - Init local channel structure with information stored in SMEM
+ *
+ * @ch: pointer to the local structure for this channel
+ * @table_id: the id of the table this channel resides in. 1 = first table, 2 =
+ *		second table, etc
+ * @r_info: pointer to the info structure of the remote proc for this channel
+ * @returns: -EINVAL for failure; 0 for success
+ *
+ * ch must point to an allocated instance of struct smd_channel that is zeroed
+ * out, and has the n and type members already initialized to the correct values
+ */
+static int smd_alloc(struct smd_channel *ch, int table_id,
+						struct remote_proc_info *r_info)
+{
+	void *buffer;
+	unsigned buffer_sz;
+	unsigned base_id;
+	unsigned fifo_id;
+
+	switch (table_id) {
+	case PRI_ALLOC_TBL:
+		base_id = SMEM_SMD_BASE_ID;
+		fifo_id = SMEM_SMD_FIFO_BASE_ID;
+		break;
+	case SEC_ALLOC_TBL:
+		base_id = SMEM_SMD_BASE_ID_2;
+		fifo_id = SMEM_SMD_FIFO_BASE_ID_2;
+		break;
+	default:
+		SMD_INFO("Invalid table_id:%d passed to smd_alloc\n", table_id);
+		return -EINVAL;
+	}
+
+	if (is_word_access_ch(ch->type)) {
+		struct smd_shared_word_access *shared2;
+		shared2 = smem_find(base_id + ch->n, sizeof(*shared2),
+							r_info->remote_pid, 0);
+		if (!shared2) {
+			SMD_INFO("smem_find failed ch=%d\n", ch->n);
+			return -EINVAL;
+		}
+		ch->send = &shared2->ch0;
+		ch->recv = &shared2->ch1;
+	} else {
+		struct smd_shared *shared2;
+		shared2 = smem_find(base_id + ch->n, sizeof(*shared2),
+							r_info->remote_pid, 0);
+		if (!shared2) {
+			SMD_INFO("smem_find failed ch=%d\n", ch->n);
+			return -EINVAL;
+		}
+		ch->send = &shared2->ch0;
+		ch->recv = &shared2->ch1;
+	}
+	ch->half_ch = get_half_ch_funcs(ch->type);
+
+	buffer = smem_get_entry(fifo_id + ch->n, &buffer_sz,
+							r_info->remote_pid, 0);
+	if (!buffer) {
+		SMD_INFO("smem_get_entry failed\n");
+		return -EINVAL;
+	}
+
+	/* buffer must be a multiple of 32 size */
+	if ((buffer_sz & (SZ_32 - 1)) != 0) {
+		SMD_INFO("Buffer size: %u not multiple of 32\n", buffer_sz);
+		return -EINVAL;
+	}
+	buffer_sz /= 2;
+	ch->send_data = buffer;
+	ch->recv_data = buffer + buffer_sz;
+	ch->fifo_size = buffer_sz;
+
+	return 0;
+}
+
+/**
+ * smd_alloc_channel() - Create and init local structures for a newly allocated
+ *			SMD channel
+ *
+ * @alloc_elm: the allocation element stored in SMEM for this channel
+ * @table_id: the id of the table this channel resides in. 1 = first table, 2 =
+ *		seconds table, etc
+ * @r_info: pointer to the info structure of the remote proc for this channel
+ * @returns: error code for failure; 0 for success
+ */
+static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm, int table_id,
+				struct remote_proc_info *r_info)
+{
+	struct smd_channel *ch;
+
+	ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
+	if (ch == 0) {
+		pr_err("smd_alloc_channel() out of memory\n");
+		return -ENOMEM;
+	}
+	ch->n = alloc_elm->cid;
+	ch->type = SMD_CHANNEL_TYPE(alloc_elm->type);
+
+	if (smd_alloc(ch, table_id, r_info)) {
+		kfree(ch);
+		return -ENODEV;
+	}
+
+	/* probe_worker guarentees ch->type will be a valid type */
+	if (ch->type == SMD_APPS_MODEM)
+		ch->notify_other_cpu = notify_modem_smd;
+	else if (ch->type == SMD_APPS_QDSP)
+		ch->notify_other_cpu = notify_dsp_smd;
+	else if (ch->type == SMD_APPS_DSPS)
+		ch->notify_other_cpu = notify_dsps_smd;
+	else if (ch->type == SMD_APPS_WCNSS)
+		ch->notify_other_cpu = notify_wcnss_smd;
+	else if (ch->type == SMD_APPS_Q6FW)
+		ch->notify_other_cpu = notify_modemfw_smd;
+	else if (ch->type == SMD_APPS_RPM)
+		ch->notify_other_cpu = notify_rpm_smd;
+
+	if (smd_is_packet(alloc_elm)) {
+		ch->read = smd_packet_read;
+		ch->write = smd_packet_write;
+		ch->read_avail = smd_packet_read_avail;
+		ch->write_avail = smd_packet_write_avail;
+		ch->update_state = update_packet_state;
+		ch->read_from_cb = smd_packet_read_from_cb;
+		ch->is_pkt_ch = 1;
+	} else {
+		ch->read = smd_stream_read;
+		ch->write = smd_stream_write;
+		ch->read_avail = smd_stream_read_avail;
+		ch->write_avail = smd_stream_write_avail;
+		ch->update_state = update_stream_state;
+		ch->read_from_cb = smd_stream_read;
+	}
+
+	if (is_word_access_ch(ch->type)) {
+		ch->read_from_fifo = smd_memcpy32_from_fifo;
+		ch->write_to_fifo = smd_memcpy32_to_fifo;
+	} else {
+		ch->read_from_fifo = smd_memcpy_from_fifo;
+		ch->write_to_fifo = smd_memcpy_to_fifo;
+	}
+
+	smd_memcpy_from_fifo(ch->name, alloc_elm->name, SMD_MAX_CH_NAME_LEN);
+	ch->name[SMD_MAX_CH_NAME_LEN-1] = 0;
+
+	ch->pdev.name = ch->name;
+	ch->pdev.id = ch->type;
+
+	SMD_INFO("smd_alloc_channel() '%s' cid=%d\n",
+		 ch->name, ch->n);
+
+	mutex_lock(&smd_creation_mutex);
+	list_add(&ch->ch_list, &smd_ch_closed_list);
+	mutex_unlock(&smd_creation_mutex);
+
+	platform_device_register(&ch->pdev);
+	if (!strncmp(ch->name, "LOOPBACK", 8) && ch->type == SMD_APPS_MODEM) {
+		/* create a platform driver to be used by smd_tty driver
+		 * so that it can access the loopback port
+		 */
+		loopback_tty_pdev.id = ch->type;
+		platform_device_register(&loopback_tty_pdev);
+	}
+	return 0;
+}
+
+static void do_nothing_notify(void *priv, unsigned flags)
+{
+}
+
+static void finalize_channel_close_fn(struct work_struct *work)
+{
+	unsigned long flags;
+	struct smd_channel *ch;
+	struct smd_channel *index;
+
+	mutex_lock(&smd_creation_mutex);
+	spin_lock_irqsave(&smd_lock, flags);
+	list_for_each_entry_safe(ch, index,  &smd_ch_to_close_list, ch_list) {
+		list_del(&ch->ch_list);
+		list_add(&ch->ch_list, &smd_ch_closed_list);
+		ch->notify(ch->priv, SMD_EVENT_REOPEN_READY);
+		ch->notify = do_nothing_notify;
+	}
+	spin_unlock_irqrestore(&smd_lock, flags);
+	mutex_unlock(&smd_creation_mutex);
+}
+
+struct smd_channel *smd_get_channel(const char *name, uint32_t type)
+{
+	struct smd_channel *ch;
+
+	mutex_lock(&smd_creation_mutex);
+	list_for_each_entry(ch, &smd_ch_closed_list, ch_list) {
+		if (!strcmp(name, ch->name) &&
+			(type == ch->type)) {
+			list_del(&ch->ch_list);
+			mutex_unlock(&smd_creation_mutex);
+			return ch;
+		}
+	}
+	mutex_unlock(&smd_creation_mutex);
+
+	return NULL;
+}
+
+int smd_named_open_on_edge(const char *name, uint32_t edge,
+			   smd_channel_t **_ch,
+			   void *priv, void (*notify)(void *, unsigned))
+{
+	struct smd_channel *ch;
+	unsigned long flags;
+
+	if (edge >= SMD_NUM_TYPE) {
+		pr_err("%s: edge:%d is invalid\n", __func__, edge);
+		return -EINVAL;
+	}
+
+	if (!smd_edge_inited(edge)) {
+		SMD_INFO("smd_open() before smd_init()\n");
+		return -EPROBE_DEFER;
+	}
+
+	SMD_DBG("smd_open('%s', %p, %p)\n", name, priv, notify);
+
+	ch = smd_get_channel(name, edge);
+	if (!ch) {
+		spin_lock_irqsave(&smd_lock, flags);
+		/* check opened list for port */
+		list_for_each_entry(ch,
+			&remote_info[edge_to_pids[edge].remote_pid].ch_list,
+			ch_list) {
+			if (!strcmp(name, ch->name)) {
+				/* channel is already open */
+				spin_unlock_irqrestore(&smd_lock, flags);
+				SMD_DBG("smd_open: channel '%s' already open\n",
+					ch->name);
+				return -EBUSY;
+			}
+		}
+
+		/* check closing list for port */
+		list_for_each_entry(ch, &smd_ch_closing_list, ch_list) {
+			if (!strncmp(name, ch->name, 20) &&
+				(edge == ch->type)) {
+				/* channel exists, but is being closed */
+				spin_unlock_irqrestore(&smd_lock, flags);
+				return -EAGAIN;
+			}
+		}
+
+		/* check closing workqueue list for port */
+		list_for_each_entry(ch, &smd_ch_to_close_list, ch_list) {
+			if (!strncmp(name, ch->name, 20) &&
+				(edge == ch->type)) {
+				/* channel exists, but is being closed */
+				spin_unlock_irqrestore(&smd_lock, flags);
+				return -EAGAIN;
+			}
+		}
+		spin_unlock_irqrestore(&smd_lock, flags);
+
+		/* one final check to handle closing->closed race condition */
+		ch = smd_get_channel(name, edge);
+		if (!ch)
+			return -ENODEV;
+	}
+
+	if (ch->half_ch->get_fSTATE(ch->send)) {
+		/* remote side hasn't acknowledged our last state transition */
+		SMD_INFO("%s: ch %d valid, waiting for remote to ack state\n",
+				__func__, ch->n);
+		msleep(250);
+		if (ch->half_ch->get_fSTATE(ch->send))
+			SMD_INFO("%s: ch %d - no remote ack, continuing\n",
+					__func__, ch->n);
+	}
+
+	if (notify == 0)
+		notify = do_nothing_notify;
+
+	ch->notify = notify;
+	ch->current_packet = 0;
+	ch->last_state = SMD_SS_CLOSED;
+	ch->priv = priv;
+
+	*_ch = ch;
+
+	SMD_DBG("smd_open: opening '%s'\n", ch->name);
+
+	spin_lock_irqsave(&smd_lock, flags);
+	list_add(&ch->ch_list,
+		       &remote_info[edge_to_pids[ch->type].remote_pid].ch_list);
+
+	SMD_DBG("%s: opening ch %d\n", __func__, ch->n);
+
+	smd_state_change(ch, ch->last_state, SMD_SS_OPENING);
+
+	spin_unlock_irqrestore(&smd_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(smd_named_open_on_edge);
+
+int smd_close(smd_channel_t *ch)
+{
+	unsigned long flags;
+	bool was_opened;
+
+	if (ch == 0)
+		return -EINVAL;
+
+	SMD_INFO("smd_close(%s)\n", ch->name);
+
+	spin_lock_irqsave(&smd_lock, flags);
+	list_del(&ch->ch_list);
+
+	was_opened = ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED;
+	ch_set_state(ch, SMD_SS_CLOSED);
+
+	if (was_opened) {
+		list_add(&ch->ch_list, &smd_ch_closing_list);
+		spin_unlock_irqrestore(&smd_lock, flags);
+	} else {
+		spin_unlock_irqrestore(&smd_lock, flags);
+		ch->notify = do_nothing_notify;
+		mutex_lock(&smd_creation_mutex);
+		list_add(&ch->ch_list, &smd_ch_closed_list);
+		mutex_unlock(&smd_creation_mutex);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(smd_close);
+
+int smd_write_start(smd_channel_t *ch, int len)
+{
+	int ret;
+	unsigned hdr[5];
+
+	if (!ch) {
+		pr_err("%s: Invalid channel specified\n", __func__);
+		return -ENODEV;
+	}
+	if (!ch->is_pkt_ch) {
+		pr_err("%s: non-packet channel specified\n", __func__);
+		return -EACCES;
+	}
+	if (len < 1) {
+		pr_err("%s: invalid length: %d\n", __func__, len);
+		return -EINVAL;
+	}
+
+	if (ch->pending_pkt_sz) {
+		pr_err("%s: packet of size: %d in progress\n", __func__,
+			ch->pending_pkt_sz);
+		return -EBUSY;
+	}
+	ch->pending_pkt_sz = len;
+
+	if (smd_stream_write_avail(ch) < (SMD_HEADER_SIZE)) {
+		ch->pending_pkt_sz = 0;
+		SMD_DBG("%s: no space to write packet header\n", __func__);
+		return -EAGAIN;
+	}
+
+	hdr[0] = len;
+	hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
+
+
+	ret = smd_stream_write(ch, hdr, sizeof(hdr), true);
+	if (ret < 0 || ret != sizeof(hdr)) {
+		ch->pending_pkt_sz = 0;
+		pr_err("%s: packet header failed to write\n", __func__);
+		return -EPERM;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(smd_write_start);
+
+int smd_write_segment(smd_channel_t *ch, const void *data, int len)
+{
+	int bytes_written;
+
+	if (!ch) {
+		pr_err("%s: Invalid channel specified\n", __func__);
+		return -ENODEV;
+	}
+	if (len < 1) {
+		pr_err("%s: invalid length: %d\n", __func__, len);
+		return -EINVAL;
+	}
+
+	if (!ch->pending_pkt_sz) {
+		pr_err("%s: no transaction in progress\n", __func__);
+		return -ENOEXEC;
+	}
+	if (ch->pending_pkt_sz - len < 0) {
+		pr_err("%s: segment of size: %d will make packet go over length\n",
+								__func__, len);
+		return -EINVAL;
+	}
+
+	bytes_written = smd_stream_write(ch, data, len, true);
+
+	ch->pending_pkt_sz -= bytes_written;
+
+	return bytes_written;
+}
+EXPORT_SYMBOL(smd_write_segment);
+
+int smd_write_end(smd_channel_t *ch)
+{
+
+	if (!ch) {
+		pr_err("%s: Invalid channel specified\n", __func__);
+		return -ENODEV;
+	}
+	if (ch->pending_pkt_sz) {
+		pr_err("%s: current packet not completely written\n", __func__);
+		return -E2BIG;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(smd_write_end);
+
+int smd_write_segment_avail(smd_channel_t *ch)
+{
+	int n;
+
+	if (!ch) {
+		pr_err("%s: Invalid channel specified\n", __func__);
+		return -ENODEV;
+	}
+	if (!ch->is_pkt_ch) {
+		pr_err("%s: non-packet channel specified\n", __func__);
+		return -ENODEV;
+	}
+
+	n = smd_stream_write_avail(ch);
+
+	/* pkt hdr already written, no need to reserve space for it */
+	if (ch->pending_pkt_sz)
+		return n;
+
+	return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
+}
+EXPORT_SYMBOL(smd_write_segment_avail);
+
+int smd_read(smd_channel_t *ch, void *data, int len)
+{
+	if (!ch) {
+		pr_err("%s: Invalid channel specified\n", __func__);
+		return -ENODEV;
+	}
+
+	return ch->read(ch, data, len);
+}
+EXPORT_SYMBOL(smd_read);
+
+int smd_read_from_cb(smd_channel_t *ch, void *data, int len)
+{
+	if (!ch) {
+		pr_err("%s: Invalid channel specified\n", __func__);
+		return -ENODEV;
+	}
+
+	return ch->read_from_cb(ch, data, len);
+}
+EXPORT_SYMBOL(smd_read_from_cb);
+
+int smd_write(smd_channel_t *ch, const void *data, int len)
+{
+	if (!ch) {
+		pr_err("%s: Invalid channel specified\n", __func__);
+		return -ENODEV;
+	}
+
+	return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, true);
+}
+EXPORT_SYMBOL(smd_write);
+
+int smd_read_avail(smd_channel_t *ch)
+{
+	if (!ch) {
+		pr_err("%s: Invalid channel specified\n", __func__);
+		return -ENODEV;
+	}
+
+	if (ch->current_packet > (uint32_t)INT_MAX) {
+		pr_err("%s: Invalid packet size for Edge %d and Channel %s",
+			__func__, ch->type, ch->name);
+		return -EFAULT;
+	}
+	return ch->read_avail(ch);
+}
+EXPORT_SYMBOL(smd_read_avail);
+
+int smd_write_avail(smd_channel_t *ch)
+{
+	if (!ch) {
+		pr_err("%s: Invalid channel specified\n", __func__);
+		return -ENODEV;
+	}
+
+	return ch->write_avail(ch);
+}
+EXPORT_SYMBOL(smd_write_avail);
+
+void smd_enable_read_intr(smd_channel_t *ch)
+{
+	if (ch)
+		ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
+}
+EXPORT_SYMBOL(smd_enable_read_intr);
+
+void smd_disable_read_intr(smd_channel_t *ch)
+{
+	if (ch)
+		ch->half_ch->set_fBLOCKREADINTR(ch->send, 1);
+}
+EXPORT_SYMBOL(smd_disable_read_intr);
+
+/**
+ * Enable/disable receive interrupts for the remote processor used by a
+ * particular channel.
+ * @ch:      open channel handle to use for the edge
+ * @mask:    1 = mask interrupts; 0 = unmask interrupts
+ * @cpumask  cpumask for the next cpu scheduled to be woken up
+ * @returns: 0 for success; < 0 for failure
+ *
+ * Note that this enables/disables all interrupts from the remote subsystem for
+ * all channels.  As such, it should be used with care and only for specific
+ * use cases such as power-collapse sequencing.
+ */
+int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask,
+		const struct cpumask *cpumask)
+{
+	struct irq_chip *irq_chip;
+	struct irq_data *irq_data;
+	struct interrupt_config_item *int_cfg;
+
+	if (!ch)
+		return -EINVAL;
+
+	if (ch->type >= ARRAY_SIZE(edge_to_pids))
+		return -ENODEV;
+
+	int_cfg = &private_intr_config[edge_to_pids[ch->type].remote_pid].smd;
+
+	if (int_cfg->irq_id < 0)
+		return -ENODEV;
+
+	irq_chip = irq_get_chip(int_cfg->irq_id);
+	if (!irq_chip)
+		return -ENODEV;
+
+	irq_data = irq_get_irq_data(int_cfg->irq_id);
+	if (!irq_data)
+		return -ENODEV;
+
+	if (mask) {
+		SMD_POWER_INFO("SMD Masking interrupts from %s\n",
+				edge_to_pids[ch->type].subsys_name);
+		irq_chip->irq_mask(irq_data);
+		if (cpumask)
+			irq_set_affinity(int_cfg->irq_id, cpumask);
+	} else {
+		SMD_POWER_INFO("SMD Unmasking interrupts from %s\n",
+				edge_to_pids[ch->type].subsys_name);
+		irq_chip->irq_unmask(irq_data);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(smd_mask_receive_interrupt);
+
+int smd_cur_packet_size(smd_channel_t *ch)
+{
+	if (!ch) {
+		pr_err("%s: Invalid channel specified\n", __func__);
+		return -ENODEV;
+	}
+
+	if (ch->current_packet > (uint32_t)INT_MAX) {
+		pr_err("%s: Invalid packet size for Edge %d and Channel %s",
+			__func__, ch->type, ch->name);
+		return -EFAULT;
+	}
+	return ch->current_packet;
+}
+EXPORT_SYMBOL(smd_cur_packet_size);
+
+int smd_tiocmget(smd_channel_t *ch)
+{
+	if (!ch) {
+		pr_err("%s: Invalid channel specified\n", __func__);
+		return -ENODEV;
+	}
+
+	return  (ch->half_ch->get_fDSR(ch->recv) ? TIOCM_DSR : 0) |
+		(ch->half_ch->get_fCTS(ch->recv) ? TIOCM_CTS : 0) |
+		(ch->half_ch->get_fCD(ch->recv) ? TIOCM_CD : 0) |
+		(ch->half_ch->get_fRI(ch->recv) ? TIOCM_RI : 0) |
+		(ch->half_ch->get_fCTS(ch->send) ? TIOCM_RTS : 0) |
+		(ch->half_ch->get_fDSR(ch->send) ? TIOCM_DTR : 0);
+}
+EXPORT_SYMBOL(smd_tiocmget);
+
+/* this api will be called while holding smd_lock */
+int
+smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear)
+{
+	if (!ch) {
+		pr_err("%s: Invalid channel specified\n", __func__);
+		return -ENODEV;
+	}
+
+	if (set & TIOCM_DTR)
+		ch->half_ch->set_fDSR(ch->send, 1);
+
+	if (set & TIOCM_RTS)
+		ch->half_ch->set_fCTS(ch->send, 1);
+
+	if (clear & TIOCM_DTR)
+		ch->half_ch->set_fDSR(ch->send, 0);
+
+	if (clear & TIOCM_RTS)
+		ch->half_ch->set_fCTS(ch->send, 0);
+
+	ch->half_ch->set_fSTATE(ch->send, 1);
+	barrier();
+	ch->notify_other_cpu(ch);
+
+	return 0;
+}
+EXPORT_SYMBOL(smd_tiocmset_from_cb);
+
+int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear)
+{
+	unsigned long flags;
+
+	if (!ch) {
+		pr_err("%s: Invalid channel specified\n", __func__);
+		return -ENODEV;
+	}
+
+	spin_lock_irqsave(&smd_lock, flags);
+	smd_tiocmset_from_cb(ch, set, clear);
+	spin_unlock_irqrestore(&smd_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(smd_tiocmset);
+
+int smd_is_pkt_avail(smd_channel_t *ch)
+{
+	unsigned long flags;
+
+	if (!ch || !ch->is_pkt_ch)
+		return -EINVAL;
+
+	if (ch->current_packet)
+		return 1;
+
+	spin_lock_irqsave(&smd_lock, flags);
+	update_packet_state(ch);
+	spin_unlock_irqrestore(&smd_lock, flags);
+
+	return ch->current_packet ? 1 : 0;
+}
+EXPORT_SYMBOL(smd_is_pkt_avail);
+
+static int smsm_cb_init(void)
+{
+	struct smsm_state_info *state_info;
+	int n;
+	int ret = 0;
+
+	smsm_states = kmalloc(sizeof(struct smsm_state_info)*SMSM_NUM_ENTRIES,
+		   GFP_KERNEL);
+
+	if (!smsm_states) {
+		pr_err("%s: SMSM init failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	smsm_cb_wq = create_singlethread_workqueue("smsm_cb_wq");
+	if (!smsm_cb_wq) {
+		pr_err("%s: smsm_cb_wq creation failed\n", __func__);
+		kfree(smsm_states);
+		return -EFAULT;
+	}
+
+	mutex_lock(&smsm_lock);
+	for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
+		state_info = &smsm_states[n];
+		state_info->last_value = __raw_readl(SMSM_STATE_ADDR(n));
+		state_info->intr_mask_set = 0x0;
+		state_info->intr_mask_clear = 0x0;
+		INIT_LIST_HEAD(&state_info->callbacks);
+	}
+	mutex_unlock(&smsm_lock);
+
+	return ret;
+}
+
+static int smsm_init(void)
+{
+	int i;
+	struct smsm_size_info_type *smsm_size_info;
+	unsigned long flags;
+	unsigned long j_start;
+	static int first = 1;
+	remote_spinlock_t *remote_spinlock;
+
+	if (!first)
+		return 0;
+	first = 0;
+
+	/* Verify that remote spinlock is not deadlocked */
+	remote_spinlock = smem_get_remote_spinlock();
+	j_start = jiffies;
+	while (!remote_spin_trylock_irqsave(remote_spinlock, flags)) {
+		if (jiffies_to_msecs(jiffies - j_start) > RSPIN_INIT_WAIT_MS) {
+			panic("%s: Remote processor %d will not release spinlock\n",
+				__func__, remote_spin_owner(remote_spinlock));
+		}
+	}
+	remote_spin_unlock_irqrestore(remote_spinlock, flags);
+
+	smsm_size_info = smem_find(SMEM_SMSM_SIZE_INFO,
+				sizeof(struct smsm_size_info_type), 0,
+				SMEM_ANY_HOST_FLAG);
+	if (smsm_size_info) {
+		SMSM_NUM_ENTRIES = smsm_size_info->num_entries;
+		SMSM_NUM_HOSTS = smsm_size_info->num_hosts;
+	}
+
+	i = kfifo_alloc(&smsm_snapshot_fifo,
+			sizeof(uint32_t) * SMSM_NUM_ENTRIES * SMSM_SNAPSHOT_CNT,
+			GFP_KERNEL);
+	if (i) {
+		pr_err("%s: SMSM state fifo alloc failed %d\n", __func__, i);
+		return i;
+	}
+	wakeup_source_init(&smsm_snapshot_ws, "smsm_snapshot");
+
+	if (!smsm_info.state) {
+		smsm_info.state = smem_alloc(ID_SHARED_STATE,
+						SMSM_NUM_ENTRIES *
+						sizeof(uint32_t), 0,
+						SMEM_ANY_HOST_FLAG);
+
+		if (smsm_info.state)
+			__raw_writel(0, SMSM_STATE_ADDR(SMSM_APPS_STATE));
+	}
+
+	if (!smsm_info.intr_mask) {
+		smsm_info.intr_mask = smem_alloc(SMEM_SMSM_CPU_INTR_MASK,
+						SMSM_NUM_ENTRIES *
+						SMSM_NUM_HOSTS *
+						sizeof(uint32_t), 0,
+						SMEM_ANY_HOST_FLAG);
+
+		if (smsm_info.intr_mask) {
+			for (i = 0; i < SMSM_NUM_ENTRIES; i++)
+				__raw_writel(0x0,
+					SMSM_INTR_MASK_ADDR(i, SMSM_APPS));
+
+			/* Configure legacy modem bits */
+			__raw_writel(LEGACY_MODEM_SMSM_MASK,
+				SMSM_INTR_MASK_ADDR(SMSM_MODEM_STATE,
+					SMSM_APPS));
+		}
+	}
+
+	i = smsm_cb_init();
+	if (i)
+		return i;
+
+	wmb();
+
+	smsm_pm_notifier(&smsm_pm_nb, PM_POST_SUSPEND, NULL);
+	i = register_pm_notifier(&smsm_pm_nb);
+	if (i)
+		pr_err("%s: power state notif error %d\n", __func__, i);
+
+	return 0;
+}
+
+static void smsm_cb_snapshot(uint32_t use_wakeup_source)
+{
+	int n;
+	uint32_t new_state;
+	unsigned long flags;
+	int ret;
+	uint64_t timestamp;
+
+	timestamp = sched_clock();
+	ret = kfifo_avail(&smsm_snapshot_fifo);
+	if (ret < SMSM_SNAPSHOT_SIZE) {
+		pr_err("%s: SMSM snapshot full %d\n", __func__, ret);
+		return;
+	}
+
+	/*
+	 * To avoid a race condition with notify_smsm_cb_clients_worker, the
+	 * following sequence must be followed:
+	 *   1) increment snapshot count
+	 *   2) insert data into FIFO
+	 *
+	 *   Potentially in parallel, the worker:
+	 *   a) verifies >= 1 snapshots are in FIFO
+	 *   b) processes snapshot
+	 *   c) decrements reference count
+	 *
+	 *   This order ensures that 1 will always occur before abc.
+	 */
+	if (use_wakeup_source) {
+		spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
+		if (smsm_snapshot_count == 0) {
+			SMSM_POWER_INFO("SMSM snapshot wake lock\n");
+			__pm_stay_awake(&smsm_snapshot_ws);
+		}
+		++smsm_snapshot_count;
+		spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
+	}
+
+	/* queue state entries */
+	for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
+		new_state = __raw_readl(SMSM_STATE_ADDR(n));
+
+		ret = kfifo_in(&smsm_snapshot_fifo,
+				&new_state, sizeof(new_state));
+		if (ret != sizeof(new_state)) {
+			pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
+			goto restore_snapshot_count;
+		}
+	}
+
+	ret = kfifo_in(&smsm_snapshot_fifo, &timestamp, sizeof(timestamp));
+	if (ret != sizeof(timestamp)) {
+		pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
+		goto restore_snapshot_count;
+	}
+
+	/* queue wakelock usage flag */
+	ret = kfifo_in(&smsm_snapshot_fifo,
+			&use_wakeup_source, sizeof(use_wakeup_source));
+	if (ret != sizeof(use_wakeup_source)) {
+		pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
+		goto restore_snapshot_count;
+	}
+
+	queue_work(smsm_cb_wq, &smsm_cb_work);
+	return;
+
+restore_snapshot_count:
+	if (use_wakeup_source) {
+		spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
+		if (smsm_snapshot_count) {
+			--smsm_snapshot_count;
+			if (smsm_snapshot_count == 0) {
+				SMSM_POWER_INFO("SMSM snapshot wake unlock\n");
+				__pm_relax(&smsm_snapshot_ws);
+			}
+		} else {
+			pr_err("%s: invalid snapshot count\n", __func__);
+		}
+		spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
+	}
+}
+
+static irqreturn_t smsm_irq_handler(int irq, void *data)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&smem_lock, flags);
+	if (!smsm_info.state) {
+		SMSM_INFO("<SM NO STATE>\n");
+	} else {
+		unsigned old_apps, apps;
+		unsigned modm = __raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE));
+
+		old_apps = apps = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE));
+
+		SMSM_DBG("<SM %08x %08x>\n", apps, modm);
+		if (modm & SMSM_RESET) {
+			pr_err("SMSM: Modem SMSM state changed to SMSM_RESET.\n");
+		} else if (modm & SMSM_INIT) {
+			if (!(apps & SMSM_INIT))
+				apps |= SMSM_INIT;
+			if (modm & SMSM_SMDINIT)
+				apps |= SMSM_SMDINIT;
+		}
+
+		if (old_apps != apps) {
+			SMSM_DBG("<SM %08x NOTIFY>\n", apps);
+			__raw_writel(apps, SMSM_STATE_ADDR(SMSM_APPS_STATE));
+			notify_other_smsm(SMSM_APPS_STATE, (old_apps ^ apps));
+		}
+
+		smsm_cb_snapshot(1);
+	}
+	spin_unlock_irqrestore(&smem_lock, flags);
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smsm_modem_irq_handler(int irq, void *data)
+{
+	SMSM_POWER_INFO("SMSM Int Modem->Apps\n");
+	++interrupt_stats[SMD_MODEM].smsm_in_count;
+	return smsm_irq_handler(irq, data);
+}
+
+irqreturn_t smsm_dsp_irq_handler(int irq, void *data)
+{
+	SMSM_POWER_INFO("SMSM Int LPASS->Apps\n");
+	++interrupt_stats[SMD_Q6].smsm_in_count;
+	return smsm_irq_handler(irq, data);
+}
+
+irqreturn_t smsm_dsps_irq_handler(int irq, void *data)
+{
+	SMSM_POWER_INFO("SMSM Int DSPS->Apps\n");
+	++interrupt_stats[SMD_DSPS].smsm_in_count;
+	return smsm_irq_handler(irq, data);
+}
+
+irqreturn_t smsm_wcnss_irq_handler(int irq, void *data)
+{
+	SMSM_POWER_INFO("SMSM Int WCNSS->Apps\n");
+	++interrupt_stats[SMD_WCNSS].smsm_in_count;
+	return smsm_irq_handler(irq, data);
+}
+
+/*
+ * Changes the global interrupt mask.  The set and clear masks are re-applied
+ * every time the global interrupt mask is updated for callback registration
+ * and de-registration.
+ *
+ * The clear mask is applied first, so if a bit is set to 1 in both the clear
+ * mask and the set mask, the result will be that the interrupt is set.
+ *
+ * @smsm_entry  SMSM entry to change
+ * @clear_mask  1 = clear bit, 0 = no-op
+ * @set_mask    1 = set bit, 0 = no-op
+ *
+ * @returns 0 for success, < 0 for error
+ */
+int smsm_change_intr_mask(uint32_t smsm_entry,
+			  uint32_t clear_mask, uint32_t set_mask)
+{
+	uint32_t  old_mask, new_mask;
+	unsigned long flags;
+
+	if (smsm_entry >= SMSM_NUM_ENTRIES) {
+		pr_err("smsm_change_state: Invalid entry %d\n",
+		       smsm_entry);
+		return -EINVAL;
+	}
+
+	if (!smsm_info.intr_mask) {
+		pr_err("smsm_change_intr_mask <SM NO STATE>\n");
+		return -EIO;
+	}
+
+	spin_lock_irqsave(&smem_lock, flags);
+	smsm_states[smsm_entry].intr_mask_clear = clear_mask;
+	smsm_states[smsm_entry].intr_mask_set = set_mask;
+
+	old_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
+	new_mask = (old_mask & ~clear_mask) | set_mask;
+	__raw_writel(new_mask, SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
+
+	wmb();
+	spin_unlock_irqrestore(&smem_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(smsm_change_intr_mask);
+
+int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask)
+{
+	if (smsm_entry >= SMSM_NUM_ENTRIES) {
+		pr_err("smsm_change_state: Invalid entry %d\n",
+		       smsm_entry);
+		return -EINVAL;
+	}
+
+	if (!smsm_info.intr_mask) {
+		pr_err("smsm_change_intr_mask <SM NO STATE>\n");
+		return -EIO;
+	}
+
+	*intr_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
+	return 0;
+}
+EXPORT_SYMBOL(smsm_get_intr_mask);
+
+int smsm_change_state(uint32_t smsm_entry,
+		      uint32_t clear_mask, uint32_t set_mask)
+{
+	unsigned long flags;
+	uint32_t  old_state, new_state;
+
+	if (smsm_entry >= SMSM_NUM_ENTRIES) {
+		pr_err("smsm_change_state: Invalid entry %d",
+		       smsm_entry);
+		return -EINVAL;
+	}
+
+	if (!smsm_info.state) {
+		pr_err("smsm_change_state <SM NO STATE>\n");
+		return -EIO;
+	}
+	spin_lock_irqsave(&smem_lock, flags);
+
+	old_state = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
+	new_state = (old_state & ~clear_mask) | set_mask;
+	__raw_writel(new_state, SMSM_STATE_ADDR(smsm_entry));
+	SMSM_POWER_INFO("%s %d:%08x->%08x", __func__, smsm_entry,
+			old_state, new_state);
+	notify_other_smsm(SMSM_APPS_STATE, (old_state ^ new_state));
+
+	spin_unlock_irqrestore(&smem_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(smsm_change_state);
+
+uint32_t smsm_get_state(uint32_t smsm_entry)
+{
+	uint32_t rv = 0;
+
+	/* needs interface change to return error code */
+	if (smsm_entry >= SMSM_NUM_ENTRIES) {
+		pr_err("smsm_change_state: Invalid entry %d",
+		       smsm_entry);
+		return 0;
+	}
+
+	if (!smsm_info.state)
+		pr_err("smsm_get_state <SM NO STATE>\n");
+	else
+		rv = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
+
+	return rv;
+}
+EXPORT_SYMBOL(smsm_get_state);
+
+/**
+ * Performs SMSM callback client notifiction.
+ */
+void notify_smsm_cb_clients_worker(struct work_struct *work)
+{
+	struct smsm_state_cb_info *cb_info;
+	struct smsm_state_info *state_info;
+	int n;
+	uint32_t new_state;
+	uint32_t state_changes;
+	uint32_t use_wakeup_source;
+	int ret;
+	unsigned long flags;
+	uint64_t t_snapshot;
+	uint64_t t_start;
+	unsigned long nanosec_rem;
+
+	while (kfifo_len(&smsm_snapshot_fifo) >= SMSM_SNAPSHOT_SIZE) {
+		t_start = sched_clock();
+		mutex_lock(&smsm_lock);
+		for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
+			state_info = &smsm_states[n];
+
+			ret = kfifo_out(&smsm_snapshot_fifo, &new_state,
+					sizeof(new_state));
+			if (ret != sizeof(new_state)) {
+				pr_err("%s: snapshot underflow %d\n",
+					__func__, ret);
+				mutex_unlock(&smsm_lock);
+				return;
+			}
+
+			state_changes = state_info->last_value ^ new_state;
+			if (state_changes) {
+				SMSM_POWER_INFO("SMSM Change %d: %08x->%08x\n",
+						n, state_info->last_value,
+						new_state);
+				list_for_each_entry(cb_info,
+					&state_info->callbacks, cb_list) {
+
+					if (cb_info->mask & state_changes)
+						cb_info->notify(cb_info->data,
+							state_info->last_value,
+							new_state);
+				}
+				state_info->last_value = new_state;
+			}
+		}
+
+		ret = kfifo_out(&smsm_snapshot_fifo, &t_snapshot,
+				sizeof(t_snapshot));
+		if (ret != sizeof(t_snapshot)) {
+			pr_err("%s: snapshot underflow %d\n",
+				__func__, ret);
+			mutex_unlock(&smsm_lock);
+			return;
+		}
+
+		/* read wakelock flag */
+		ret = kfifo_out(&smsm_snapshot_fifo, &use_wakeup_source,
+				sizeof(use_wakeup_source));
+		if (ret != sizeof(use_wakeup_source)) {
+			pr_err("%s: snapshot underflow %d\n",
+				__func__, ret);
+			mutex_unlock(&smsm_lock);
+			return;
+		}
+		mutex_unlock(&smsm_lock);
+
+		if (use_wakeup_source) {
+			spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
+			if (smsm_snapshot_count) {
+				--smsm_snapshot_count;
+				if (smsm_snapshot_count == 0) {
+					SMSM_POWER_INFO(
+						"SMSM snapshot wake unlock\n");
+					__pm_relax(&smsm_snapshot_ws);
+				}
+			} else {
+				pr_err("%s: invalid snapshot count\n",
+						__func__);
+			}
+			spin_unlock_irqrestore(&smsm_snapshot_count_lock,
+					flags);
+		}
+
+		t_start = t_start - t_snapshot;
+		nanosec_rem = do_div(t_start, 1000000000U);
+		SMSM_POWER_INFO(
+			"SMSM snapshot queue response time %6u.%09lu s\n",
+			(unsigned)t_start, nanosec_rem);
+	}
+}
+
+
+/**
+ * Registers callback for SMSM state notifications when the specified
+ * bits change.
+ *
+ * @smsm_entry  Processor entry to deregister
+ * @mask        Bits to deregister (if result is 0, callback is removed)
+ * @notify      Notification function to deregister
+ * @data        Opaque data passed in to callback
+ *
+ * @returns Status code
+ *  <0 error code
+ *  0  inserted new entry
+ *  1  updated mask of existing entry
+ */
+int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
+		void (*notify)(void *, uint32_t, uint32_t), void *data)
+{
+	struct smsm_state_info *state;
+	struct smsm_state_cb_info *cb_info;
+	struct smsm_state_cb_info *cb_found = 0;
+	uint32_t new_mask = 0;
+	int ret = 0;
+
+	if (smsm_entry >= SMSM_NUM_ENTRIES)
+		return -EINVAL;
+
+	mutex_lock(&smsm_lock);
+
+	if (!smsm_states) {
+		/* smsm not yet initialized */
+		ret = -ENODEV;
+		goto cleanup;
+	}
+
+	state = &smsm_states[smsm_entry];
+	list_for_each_entry(cb_info,
+			&state->callbacks, cb_list) {
+		if (!ret && (cb_info->notify == notify) &&
+				(cb_info->data == data)) {
+			cb_info->mask |= mask;
+			cb_found = cb_info;
+			ret = 1;
+		}
+		new_mask |= cb_info->mask;
+	}
+
+	if (!cb_found) {
+		cb_info = kmalloc(sizeof(struct smsm_state_cb_info),
+			GFP_ATOMIC);
+		if (!cb_info) {
+			ret = -ENOMEM;
+			goto cleanup;
+		}
+
+		cb_info->mask = mask;
+		cb_info->notify = notify;
+		cb_info->data = data;
+		INIT_LIST_HEAD(&cb_info->cb_list);
+		list_add_tail(&cb_info->cb_list,
+			&state->callbacks);
+		new_mask |= mask;
+	}
+
+	/* update interrupt notification mask */
+	if (smsm_entry == SMSM_MODEM_STATE)
+		new_mask |= LEGACY_MODEM_SMSM_MASK;
+
+	if (smsm_info.intr_mask) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&smem_lock, flags);
+		new_mask = (new_mask & ~state->intr_mask_clear)
+				| state->intr_mask_set;
+		__raw_writel(new_mask,
+				SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
+		wmb();
+		spin_unlock_irqrestore(&smem_lock, flags);
+	}
+
+cleanup:
+	mutex_unlock(&smsm_lock);
+	return ret;
+}
+EXPORT_SYMBOL(smsm_state_cb_register);
+
+
+/**
+ * Deregisters for SMSM state notifications for the specified bits.
+ *
+ * @smsm_entry  Processor entry to deregister
+ * @mask        Bits to deregister (if result is 0, callback is removed)
+ * @notify      Notification function to deregister
+ * @data        Opaque data passed in to callback
+ *
+ * @returns Status code
+ *  <0 error code
+ *  0  not found
+ *  1  updated mask
+ *  2  removed callback
+ */
+int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
+		void (*notify)(void *, uint32_t, uint32_t), void *data)
+{
+	struct smsm_state_cb_info *cb_info;
+	struct smsm_state_cb_info *cb_tmp;
+	struct smsm_state_info *state;
+	uint32_t new_mask = 0;
+	int ret = 0;
+
+	if (smsm_entry >= SMSM_NUM_ENTRIES)
+		return -EINVAL;
+
+	mutex_lock(&smsm_lock);
+
+	if (!smsm_states) {
+		/* smsm not yet initialized */
+		mutex_unlock(&smsm_lock);
+		return -ENODEV;
+	}
+
+	state = &smsm_states[smsm_entry];
+	list_for_each_entry_safe(cb_info, cb_tmp,
+		&state->callbacks, cb_list) {
+		if (!ret && (cb_info->notify == notify) &&
+			(cb_info->data == data)) {
+			cb_info->mask &= ~mask;
+			ret = 1;
+			if (!cb_info->mask) {
+				/* no mask bits set, remove callback */
+				list_del(&cb_info->cb_list);
+				kfree(cb_info);
+				ret = 2;
+				continue;
+			}
+		}
+		new_mask |= cb_info->mask;
+	}
+
+	/* update interrupt notification mask */
+	if (smsm_entry == SMSM_MODEM_STATE)
+		new_mask |= LEGACY_MODEM_SMSM_MASK;
+
+	if (smsm_info.intr_mask) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&smem_lock, flags);
+		new_mask = (new_mask & ~state->intr_mask_clear)
+				| state->intr_mask_set;
+		__raw_writel(new_mask,
+				SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
+		wmb();
+		spin_unlock_irqrestore(&smem_lock, flags);
+	}
+
+	mutex_unlock(&smsm_lock);
+	return ret;
+}
+EXPORT_SYMBOL(smsm_state_cb_deregister);
+
+static int restart_notifier_cb(struct notifier_block *this,
+				  unsigned long code,
+				  void *data);
+
+static struct restart_notifier_block restart_notifiers[] = {
+	{SMD_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
+	{SMD_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
+	{SMD_WCNSS, "wcnss", .nb.notifier_call = restart_notifier_cb},
+	{SMD_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
+	{SMD_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
+	{SMD_Q6, "adsp", .nb.notifier_call = restart_notifier_cb},
+	{SMD_DSPS, "slpi", .nb.notifier_call = restart_notifier_cb},
+};
+
+static int restart_notifier_cb(struct notifier_block *this,
+				  unsigned long code,
+				  void *data)
+{
+	remote_spinlock_t *remote_spinlock;
+
+	/*
+	 * Some SMD or SMSM clients assume SMD/SMSM SSR handling will be
+	 * done in the AFTER_SHUTDOWN level.  If this ever changes, extra
+	 * care should be taken to verify no clients are broken.
+	 */
+	if (code == SUBSYS_AFTER_SHUTDOWN) {
+		struct restart_notifier_block *notifier;
+
+		notifier = container_of(this,
+				struct restart_notifier_block, nb);
+		SMD_INFO("%s: ssrestart for processor %d ('%s')\n",
+				__func__, notifier->processor,
+				notifier->name);
+
+		remote_spinlock = smem_get_remote_spinlock();
+		remote_spin_release(remote_spinlock, notifier->processor);
+		remote_spin_release_all(notifier->processor);
+
+		smd_channel_reset(notifier->processor);
+	}
+
+	return NOTIFY_DONE;
+}
+
+/**
+ * smd_post_init() - SMD post initialization
+ * @remote_pid: remote pid that has been initialized.  Ignored when is_legacy=1
+ *
+ * This function is used by the device tree initialization to complete the SMD
+ * init sequence.
+ */
+void smd_post_init(unsigned remote_pid)
+{
+	smd_channel_probe_now(&remote_info[remote_pid]);
+}
+
+/**
+ * smsm_post_init() - SMSM post initialization
+ * @returns:	0 for success, standard Linux error code otherwise
+ *
+ * This function is used by the legacy and device tree initialization
+ * to complete the SMSM init sequence.
+ */
+int smsm_post_init(void)
+{
+	int ret;
+
+	ret = smsm_init();
+	if (ret) {
+		pr_err("smsm_init() failed ret = %d\n", ret);
+		return ret;
+	}
+	smsm_irq_handler(0, 0);
+
+	return ret;
+}
+
+/**
+ * smd_get_intr_config() - Get interrupt configuration structure
+ * @edge:	edge type identifes local and remote processor
+ * @returns:	pointer to interrupt configuration
+ *
+ * This function returns the interrupt configuration of remote processor
+ * based on the edge type.
+ */
+struct interrupt_config *smd_get_intr_config(uint32_t edge)
+{
+	if (edge >= ARRAY_SIZE(edge_to_pids))
+		return NULL;
+	return &private_intr_config[edge_to_pids[edge].remote_pid];
+}
+
+/**
+ * smd_get_edge_remote_pid() - Get the remote processor ID
+ * @edge:	edge type identifes local and remote processor
+ * @returns:	remote processor ID
+ *
+ * This function returns remote processor ID based on edge type.
+ */
+int smd_edge_to_remote_pid(uint32_t edge)
+{
+	if (edge >= ARRAY_SIZE(edge_to_pids))
+		return -EINVAL;
+	return edge_to_pids[edge].remote_pid;
+}
+
+/**
+ * smd_get_edge_local_pid() - Get the local processor ID
+ * @edge:	edge type identifies local and remote processor
+ * @returns:	local processor ID
+ *
+ * This function returns local processor ID based on edge type.
+ */
+int smd_edge_to_local_pid(uint32_t edge)
+{
+	if (edge >= ARRAY_SIZE(edge_to_pids))
+		return -EINVAL;
+	return edge_to_pids[edge].local_pid;
+}
+
+/**
+ * smd_proc_set_skip_pil() - Mark if the indicated processor is be loaded by PIL
+ * @pid:		the processor id to mark
+ * @skip_pil:		true if @pid cannot by loaded by PIL
+ */
+void smd_proc_set_skip_pil(unsigned pid, bool skip_pil)
+{
+	if (pid >= NUM_SMD_SUBSYSTEMS) {
+		pr_err("%s: invalid pid:%d\n", __func__, pid);
+		return;
+	}
+	remote_info[pid].skip_pil = skip_pil;
+}
+
+/**
+ * smd_set_edge_subsys_name() - Set the subsystem name
+ * @edge:		edge type identifies local and remote processor
+ * @subsys_name:	pointer to subsystem name
+ *
+ * This function is used to set the subsystem name for given edge type.
+ */
+void smd_set_edge_subsys_name(uint32_t edge, const char *subsys_name)
+{
+	if (edge < ARRAY_SIZE(edge_to_pids))
+		if (subsys_name)
+			strlcpy(edge_to_pids[edge].subsys_name,
+				subsys_name, SMD_MAX_CH_NAME_LEN);
+		else
+			strlcpy(edge_to_pids[edge].subsys_name,
+				"", SMD_MAX_CH_NAME_LEN);
+	else
+		pr_err("%s: Invalid edge type[%d]\n", __func__, edge);
+}
+
+/**
+ * smd_reset_all_edge_subsys_name() - Reset the subsystem name
+ *
+ * This function is used to reset the subsystem name of all edges in
+ * targets where configuration information is available through
+ * device tree.
+ */
+void smd_reset_all_edge_subsys_name(void)
+{
+	int i;
+	for (i = 0; i < ARRAY_SIZE(edge_to_pids); i++)
+		strlcpy(edge_to_pids[i].subsys_name,
+			"", sizeof(""));
+}
+
+/**
+ * smd_set_edge_initialized() - Set the edge initialized status
+ * @edge:	edge type identifies local and remote processor
+ *
+ * This function set the initialized varibale based on edge type.
+ */
+void smd_set_edge_initialized(uint32_t edge)
+{
+	if (edge < ARRAY_SIZE(edge_to_pids))
+		edge_to_pids[edge].initialized = true;
+	else
+		pr_err("%s: Invalid edge type[%d]\n", __func__, edge);
+}
+
+/**
+ * smd_cfg_smd_intr() - Set the SMD interrupt configuration
+ * @proc:	remote processor ID
+ * @mask:	bit position in IRQ register
+ * @ptr:	IRQ register
+ *
+ * This function is called in Legacy init sequence and used to set
+ * the SMD interrupt configurations for particular processor.
+ */
+void smd_cfg_smd_intr(uint32_t proc, uint32_t mask, void *ptr)
+{
+	private_intr_config[proc].smd.out_bit_pos = mask;
+	private_intr_config[proc].smd.out_base = ptr;
+	private_intr_config[proc].smd.out_offset = 0;
+}
+
+/*
+ * smd_cfg_smsm_intr() -  Set the SMSM interrupt configuration
+ * @proc:	remote processor ID
+ * @mask:	bit position in IRQ register
+ * @ptr:	IRQ register
+ *
+ * This function is called in Legacy init sequence and used to set
+ * the SMSM interrupt configurations for particular processor.
+ */
+void smd_cfg_smsm_intr(uint32_t proc, uint32_t mask, void *ptr)
+{
+	private_intr_config[proc].smsm.out_bit_pos = mask;
+	private_intr_config[proc].smsm.out_base = ptr;
+	private_intr_config[proc].smsm.out_offset = 0;
+}
+
+static __init int modem_restart_late_init(void)
+{
+	int i;
+	void *handle;
+	struct restart_notifier_block *nb;
+
+	for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
+		nb = &restart_notifiers[i];
+		handle = subsys_notif_register_notifier(nb->name, &nb->nb);
+		SMD_DBG("%s: registering notif for '%s', handle=%p\n",
+				__func__, nb->name, handle);
+	}
+
+	return 0;
+}
+late_initcall(modem_restart_late_init);
+
+int __init msm_smd_init(void)
+{
+	static bool registered;
+	int rc;
+	int i;
+
+	if (registered)
+		return 0;
+
+	smd_log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "smd", 0);
+	if (!smd_log_ctx) {
+		pr_err("%s: unable to create SMD logging context\n", __func__);
+		msm_smd_debug_mask = 0;
+	}
+
+	smsm_log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "smsm", 0);
+	if (!smsm_log_ctx) {
+		pr_err("%s: unable to create SMSM logging context\n", __func__);
+		msm_smd_debug_mask = 0;
+	}
+
+	registered = true;
+
+	for (i = 0; i < NUM_SMD_SUBSYSTEMS; ++i) {
+		remote_info[i].remote_pid = i;
+		remote_info[i].free_space = UINT_MAX;
+		INIT_WORK(&remote_info[i].probe_work, smd_channel_probe_worker);
+		INIT_LIST_HEAD(&remote_info[i].ch_list);
+	}
+
+	channel_close_wq = create_singlethread_workqueue("smd_channel_close");
+	if (IS_ERR(channel_close_wq)) {
+		pr_err("%s: create_singlethread_workqueue ENOMEM\n", __func__);
+		return -ENOMEM;
+	}
+
+	rc = msm_smd_driver_register();
+	if (rc) {
+		pr_err("%s: msm_smd_driver register failed %d\n",
+			__func__, rc);
+		return rc;
+	}
+	return 0;
+}
+
+arch_initcall(msm_smd_init);
+
+MODULE_DESCRIPTION("MSM Shared Memory Core");
+MODULE_AUTHOR("Brian Swetland <swetland@google.com>");
+MODULE_LICENSE("GPL");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/msm_smem.c	2019-10-29 09:26:24.817214667 +0100
@@ -0,0 +1,1571 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/ipc_logging.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/ramdump.h>
+
+#include <soc/qcom/smem.h>
+
+
+#include "smem_private.h"
+
+#define MODEM_SBL_VERSION_INDEX 7
+#define SMEM_VERSION_INFO_SIZE (32 * 4)
+#define SMEM_VERSION 0x000B
+
+enum {
+	MSM_SMEM_DEBUG = 1U << 0,
+	MSM_SMEM_INFO = 1U << 1,
+};
+
+static int msm_smem_debug_mask = MSM_SMEM_INFO;
+module_param_named(debug_mask, msm_smem_debug_mask,
+			int, S_IRUGO | S_IWUSR | S_IWGRP);
+static void *smem_ipc_log_ctx;
+#define NUM_LOG_PAGES 4
+
+#define IPC_LOG(x...) do {                                   \
+		if (smem_ipc_log_ctx)                        \
+			ipc_log_string(smem_ipc_log_ctx, x); \
+	} while (0)
+
+
+#define LOG_ERR(x...) do {  \
+		pr_err(x);  \
+		IPC_LOG(x); \
+	} while (0)
+#define SMEM_DBG(x...) do {                               \
+		if (msm_smem_debug_mask & MSM_SMEM_DEBUG) \
+			IPC_LOG(x);                       \
+	} while (0)
+#define SMEM_INFO(x...) do {                             \
+		if (msm_smem_debug_mask & MSM_SMEM_INFO) \
+			IPC_LOG(x);                      \
+	} while (0)
+
+#define SMEM_SPINLOCK_SMEM_ALLOC       "S:3"
+
+static void *smem_ram_base;
+static resource_size_t smem_ram_size;
+static phys_addr_t smem_ram_phys;
+static remote_spinlock_t remote_spinlock;
+static uint32_t num_smem_areas;
+static struct smem_area *smem_areas;
+static struct ramdump_segment *smem_ramdump_segments;
+static int spinlocks_initialized;
+static void *smem_ramdump_dev;
+static DEFINE_MUTEX(spinlock_init_lock);
+static DEFINE_SPINLOCK(smem_init_check_lock);
+static struct device *smem_dev;
+static int smem_module_inited;
+static RAW_NOTIFIER_HEAD(smem_module_init_notifier_list);
+static DEFINE_MUTEX(smem_module_init_notifier_lock);
+static bool probe_done;
+uint32_t smem_max_items;
+
+/* smem security feature components */
+#define SMEM_TOC_IDENTIFIER 0x434f5424 /* "$TOC" */
+#define SMEM_TOC_MAX_EXCLUSIONS 4
+#define SMEM_PART_HDR_IDENTIFIER 0x54525024 /* "$PRT" */
+#define SMEM_ALLOCATION_CANARY 0xa5a5
+
+struct smem_toc_entry {
+	uint32_t offset;
+	uint32_t size;
+	uint32_t flags;
+	uint16_t host0;
+	uint16_t host1;
+	uint32_t size_cacheline;
+	uint32_t reserved[3];
+	uint32_t exclusion_sizes[SMEM_TOC_MAX_EXCLUSIONS];
+};
+
+struct smem_toc {
+	/* Identifier is a constant, set to SMEM_TOC_IDENTIFIER. */
+	uint32_t identifier;
+	uint32_t version;
+	uint32_t num_entries;
+	uint32_t reserved[5];
+	struct smem_toc_entry entry[];
+};
+
+struct smem_partition_header {
+	/* Identifier is a constant, set to SMEM_PART_HDR_IDENTIFIER. */
+	uint32_t identifier;
+	uint16_t host0;
+	uint16_t host1;
+	uint32_t size;
+	uint32_t offset_free_uncached;
+	uint32_t offset_free_cached;
+	uint32_t reserved[3];
+};
+
+struct smem_partition_allocation_header {
+	/* Canary is a constant, set to SMEM_ALLOCATION_CANARY */
+	uint16_t canary;
+	uint16_t smem_type;
+	uint32_t size; /* includes padding bytes */
+	uint16_t padding_data;
+	uint16_t padding_hdr;
+	uint32_t reserved[1];
+};
+
+struct smem_partition_info {
+	uint32_t partition_num;
+	uint32_t offset;
+	uint32_t size_cacheline;
+};
+
+static struct smem_partition_info partitions[NUM_SMEM_SUBSYSTEMS];
+
+#define SMEM_COMM_PART_VERSION 0x000C
+#define SMEM_COMM_HOST 0xFFFE
+static bool use_comm_partition;
+static struct smem_partition_info comm_partition;
+/* end smem security feature components */
+
+/* Identifier for the SMEM target info struct. */
+#define SMEM_TARG_INFO_IDENTIFIER 0x49494953 /* "SIII" in little-endian. */
+
+struct smem_targ_info_type {
+	/* Identifier is a constant, set to SMEM_TARG_INFO_IDENTIFIER. */
+	uint32_t identifier;
+	uint32_t size;
+	phys_addr_t phys_base_addr;
+	uint32_t  max_items;
+};
+
+struct restart_notifier_block {
+	unsigned processor;
+	char *name;
+	struct notifier_block nb;
+};
+
+static int restart_notifier_cb(struct notifier_block *this,
+				unsigned long code,
+				void *data);
+
+static struct restart_notifier_block restart_notifiers[] = {
+	{SMEM_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
+	{SMEM_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
+	{SMEM_WCNSS, "wcnss", .nb.notifier_call = restart_notifier_cb},
+	{SMEM_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
+	{SMEM_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
+	{SMEM_Q6, "adsp", .nb.notifier_call = restart_notifier_cb},
+	{SMEM_DSPS, "slpi", .nb.notifier_call = restart_notifier_cb},
+};
+
+static int init_smem_remote_spinlock(void);
+
+/**
+ * is_probe_done() - Did the probe function successfully complete
+ *
+ * @return - true if probe successfully completed, false if otherwise
+ *
+ * Helper function for EPROBE_DEFER support.  If this function returns false,
+ * the calling function should immediately return -EPROBE_DEFER.
+ */
+static bool is_probe_done(void)
+{
+	return probe_done;
+}
+
+/**
+ * smem_phys_to_virt() - Convert a physical base and offset to virtual address
+ *
+ * @base: physical base address to check
+ * @offset: offset from the base to get the final address
+ * @returns: virtual SMEM address; NULL for failure
+ *
+ * Takes a physical address and an offset and checks if the resulting physical
+ * address would fit into one of the smem regions.  If so, returns the
+ * corresponding virtual address.  Otherwise returns NULL.
+ */
+static void *smem_phys_to_virt(phys_addr_t base, unsigned offset)
+{
+	int i;
+	phys_addr_t phys_addr;
+	resource_size_t size;
+
+	if (OVERFLOW_ADD_UNSIGNED(phys_addr_t, base, offset))
+		return NULL;
+
+	if (!smem_areas) {
+		/*
+		 * Early boot - no area configuration yet, so default
+		 * to using the main memory region.
+		 *
+		 * To remove the MSM_SHARED_RAM_BASE and the static
+		 * mapping of SMEM in the future, add dump_stack()
+		 * to identify the early callers of smem_get_entry()
+		 * (which calls this function) and replace those calls
+		 * with a new function that knows how to lookup the
+		 * SMEM base address before SMEM has been probed.
+		 */
+		phys_addr = smem_ram_phys;
+		size = smem_ram_size;
+
+		if (base >= phys_addr && base + offset < phys_addr + size) {
+			if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
+				(uintptr_t)smem_ram_base, offset)) {
+				SMEM_INFO("%s: overflow %p %x\n", __func__,
+					smem_ram_base, offset);
+				return NULL;
+			}
+
+			return smem_ram_base + offset;
+		} else {
+			return NULL;
+		}
+	}
+	for (i = 0; i < num_smem_areas; ++i) {
+		phys_addr = smem_areas[i].phys_addr;
+		size = smem_areas[i].size;
+
+		if (base < phys_addr || base + offset >= phys_addr + size)
+			continue;
+
+		if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
+				(uintptr_t)smem_areas[i].virt_addr, offset)) {
+			SMEM_INFO("%s: overflow %p %x\n", __func__,
+				smem_areas[i].virt_addr, offset);
+			return NULL;
+		}
+
+		return smem_areas[i].virt_addr + offset;
+	}
+
+	return NULL;
+}
+
+/**
+ * smem_virt_to_phys() - Convert SMEM address to physical address.
+ *
+ * @smem_address: Address of SMEM item (returned by smem_alloc(), etc)
+ * @returns: Physical address (or NULL if there is a failure)
+ *
+ * This function should only be used if an SMEM item needs to be handed
+ * off to a DMA engine.  This function will not return a version of EPROBE_DEFER
+ * if the driver is not ready since the caller should obtain @smem_address from
+ * one of the other public APIs and get EPROBE_DEFER at that time, if
+ * applicable.
+ */
+phys_addr_t smem_virt_to_phys(void *smem_address)
+{
+	phys_addr_t phys_addr = 0;
+	int i;
+	void *vend;
+
+	if (!smem_areas)
+		return phys_addr;
+
+	for (i = 0; i < num_smem_areas; ++i) {
+		vend = (void *)(smem_areas[i].virt_addr + smem_areas[i].size);
+
+		if (smem_address >= smem_areas[i].virt_addr &&
+				smem_address < vend) {
+			phys_addr = smem_address - smem_areas[i].virt_addr;
+			phys_addr +=  smem_areas[i].phys_addr;
+			break;
+		}
+	}
+
+	return phys_addr;
+}
+EXPORT_SYMBOL(smem_virt_to_phys);
+
+/**
+ * __smem_get_entry_nonsecure - Get pointer and size of existing SMEM item
+ *
+ * @id:              ID of SMEM item
+ * @size:            Pointer to size variable for storing the result
+ * @skip_init_check: True means do not verify that SMEM has been initialized
+ * @use_rspinlock:   True to use the remote spinlock
+ * @returns:         Pointer to SMEM item or NULL if it doesn't exist
+ */
+static void *__smem_get_entry_nonsecure(unsigned id, unsigned *size,
+		bool skip_init_check, bool use_rspinlock)
+{
+	struct smem_shared *shared = smem_ram_base;
+	struct smem_heap_entry *toc = shared->heap_toc;
+	int use_spinlocks = spinlocks_initialized && use_rspinlock;
+	void *ret = 0;
+	unsigned long flags = 0;
+	int rc;
+
+	if (!skip_init_check && !smem_initialized_check())
+		return ret;
+
+	if (id >= smem_max_items)
+		return ret;
+
+	if (use_spinlocks) {
+		do {
+			rc = remote_spin_trylock_irqsave(&remote_spinlock,
+				flags);
+		} while (!rc);
+	}
+	/* toc is in device memory and cannot be speculatively accessed */
+	if (toc[id].allocated) {
+		phys_addr_t phys_base;
+
+		*size = toc[id].size;
+		barrier();
+
+		phys_base = toc[id].reserved & BASE_ADDR_MASK;
+		if (!phys_base)
+			phys_base = smem_ram_phys;
+		ret = smem_phys_to_virt(phys_base, toc[id].offset);
+	} else {
+		*size = 0;
+	}
+	if (use_spinlocks)
+		remote_spin_unlock_irqrestore(&remote_spinlock, flags);
+
+	return ret;
+}
+
+/**
+ * __smem_get_entry_secure - Get pointer and size of existing SMEM item with
+ *                   security support
+ *
+ * @id:              ID of SMEM item
+ * @size:            Pointer to size variable for storing the result
+ * @to_proc:         SMEM host that shares the item with apps
+ * @flags:           Item attribute flags
+ * @skip_init_check: True means do not verify that SMEM has been initialized
+ * @use_rspinlock:   True to use the remote spinlock
+ * @returns:         Pointer to SMEM item or NULL if it doesn't exist
+ */
+static void *__smem_get_entry_secure(unsigned id,
+					unsigned *size,
+					unsigned to_proc,
+					unsigned flags,
+					bool skip_init_check,
+					bool use_rspinlock)
+{
+	struct smem_partition_header *hdr;
+	unsigned long lflags = 0;
+	void *item = NULL;
+	struct smem_partition_allocation_header *alloc_hdr;
+	uint32_t partition_num;
+	uint32_t a_hdr_size;
+	int rc;
+
+	SMEM_DBG("%s(%u, %u, %u, %d, %d)\n", __func__, id, to_proc,
+					flags, skip_init_check, use_rspinlock);
+
+	if (!skip_init_check && !smem_initialized_check())
+		return NULL;
+
+	if (id >= smem_max_items) {
+		SMEM_INFO("%s: invalid id %d\n", __func__, id);
+		return NULL;
+	}
+
+	if (!(flags & SMEM_ANY_HOST_FLAG) && to_proc >= NUM_SMEM_SUBSYSTEMS) {
+		SMEM_INFO("%s: id %u invalid to_proc %d\n", __func__, id,
+								to_proc);
+		return NULL;
+	}
+
+	if (flags & SMEM_ANY_HOST_FLAG || !partitions[to_proc].offset) {
+		if (use_comm_partition) {
+			partition_num = comm_partition.partition_num;
+			hdr = smem_areas[0].virt_addr + comm_partition.offset;
+		} else {
+			return __smem_get_entry_nonsecure(id, size,
+					skip_init_check, use_rspinlock);
+		}
+	} else {
+		partition_num = partitions[to_proc].partition_num;
+		hdr = smem_areas[0].virt_addr + partitions[to_proc].offset;
+	}
+	if (unlikely(!spinlocks_initialized)) {
+		rc = init_smem_remote_spinlock();
+		if (unlikely(rc)) {
+			SMEM_INFO(
+				"%s: id:%u remote spinlock init failed %d\n",
+						__func__, id, rc);
+			return NULL;
+		}
+	}
+	if (use_rspinlock) {
+		do {
+			rc = remote_spin_trylock_irqsave(&remote_spinlock,
+				lflags);
+		} while (!rc);
+	}
+	if (hdr->identifier != SMEM_PART_HDR_IDENTIFIER) {
+		LOG_ERR(
+			"%s: SMEM corruption detected.  Partition %d to %d at %p\n",
+								__func__,
+								partition_num,
+								to_proc,
+								hdr);
+		BUG();
+	}
+
+	if (flags & SMEM_ITEM_CACHED_FLAG) {
+		a_hdr_size = ALIGN(sizeof(*alloc_hdr),
+				partitions[to_proc].size_cacheline);
+		for (alloc_hdr = (void *)(hdr) + hdr->size - a_hdr_size;
+				(void *)(alloc_hdr) > (void *)(hdr) +
+					hdr->offset_free_cached;
+				alloc_hdr = (void *)(alloc_hdr) -
+						alloc_hdr->size - a_hdr_size) {
+			if (alloc_hdr->canary != SMEM_ALLOCATION_CANARY) {
+				LOG_ERR(
+					"%s: SMEM corruption detected.  Partition %d to %d at %p\n",
+								__func__,
+								partition_num,
+								to_proc,
+								alloc_hdr);
+				BUG();
+
+			}
+			if (alloc_hdr->smem_type == id) {
+				/* 8 byte alignment to match legacy */
+				*size = ALIGN(alloc_hdr->size -
+						alloc_hdr->padding_data, 8);
+				item = (void *)(alloc_hdr) - alloc_hdr->size;
+				break;
+			}
+		}
+	} else {
+		for (alloc_hdr = (void *)(hdr) + sizeof(*hdr);
+				(void *)(alloc_hdr) < (void *)(hdr) +
+					hdr->offset_free_uncached;
+				alloc_hdr = (void *)(alloc_hdr) +
+						sizeof(*alloc_hdr) +
+						alloc_hdr->padding_hdr +
+						alloc_hdr->size) {
+			if (alloc_hdr->canary != SMEM_ALLOCATION_CANARY) {
+				LOG_ERR(
+					"%s: SMEM corruption detected.  Partition %d to %d at %p\n",
+								__func__,
+								partition_num,
+								to_proc,
+								alloc_hdr);
+				BUG();
+
+			}
+			if (alloc_hdr->smem_type == id) {
+				/* 8 byte alignment to match legacy */
+				*size = ALIGN(alloc_hdr->size -
+						alloc_hdr->padding_data, 8);
+				item = (void *)(alloc_hdr) +
+						sizeof(*alloc_hdr) +
+						alloc_hdr->padding_hdr;
+				break;
+			}
+		}
+	}
+	if (use_rspinlock)
+		remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
+
+	return item;
+}
+
+static void *__smem_find(unsigned id, unsigned size_in, bool skip_init_check)
+{
+	unsigned size;
+	void *ptr;
+
+	ptr = __smem_get_entry_nonsecure(id, &size, skip_init_check, true);
+	if (!ptr)
+		return 0;
+
+	size_in = ALIGN(size_in, 8);
+	if (size_in != size) {
+		SMEM_INFO("smem_find(%u, %u): wrong size %u\n",
+			id, size_in, size);
+		return 0;
+	}
+
+	return ptr;
+}
+
+/**
+ * smem_find - Find existing item with security support
+ *
+ * @id:       ID of SMEM item
+ * @size_in:  Size of the SMEM item
+ * @to_proc:  SMEM host that shares the item with apps
+ * @flags:    Item attribute flags
+ * @returns:  Pointer to SMEM item, NULL if it doesn't exist, or -EPROBE_DEFER
+ *	if the driver is not ready
+ */
+void *smem_find(unsigned id, unsigned size_in, unsigned to_proc, unsigned flags)
+{
+	unsigned size;
+	void *ptr;
+
+	SMEM_DBG("%s(%u, %u, %u, %u)\n", __func__, id, size_in, to_proc,
+									flags);
+
+	/*
+	 * Handle the circular dependecy between SMEM and software implemented
+	 * remote spinlocks.  SMEM must initialize the remote spinlocks in
+	 * probe() before it is done.  EPROBE_DEFER handling will not resolve
+	 * this code path, so we must be intellegent to know that the spinlock
+	 * item is a special case.
+	 */
+	if (!is_probe_done() && id != SMEM_SPINLOCK_ARRAY)
+		return ERR_PTR(-EPROBE_DEFER);
+
+	ptr = smem_get_entry(id, &size, to_proc, flags);
+	if (!ptr)
+		return 0;
+
+	size_in = ALIGN(size_in, 8);
+	if (size_in != size) {
+		SMEM_INFO("smem_find(%u, %u, %u, %u): wrong size %u\n",
+			id, size_in, to_proc, flags, size);
+		return 0;
+	}
+
+	return ptr;
+}
+EXPORT_SYMBOL(smem_find);
+
+/**
+ * alloc_item_nonsecure - Allocate an SMEM item in the nonsecure partition
+ *
+ * @id:              ID of SMEM item
+ * @size_in:         Size to allocate
+ * @returns:         Pointer to SMEM item or NULL for error
+ *
+ * Assumes the id parameter is valid and does not already exist.  Assumes
+ * size_in is already adjusted for alignment, if necessary.  Requires the
+ * remote spinlock to already be locked.
+ */
+static void *alloc_item_nonsecure(unsigned id, unsigned size_in)
+{
+	void *smem_base = smem_ram_base;
+	struct smem_shared *shared = smem_base;
+	struct smem_heap_entry *toc = shared->heap_toc;
+	void *ret = NULL;
+
+	if (shared->heap_info.heap_remaining >= size_in) {
+		toc[id].offset = shared->heap_info.free_offset;
+		toc[id].size = size_in;
+		/*
+		 * wmb() is necessary to ensure the allocation data is
+		 * consistent before setting the allocated flag to prevent race
+		 * conditions with remote processors
+		 */
+		wmb();
+		toc[id].allocated = 1;
+
+		shared->heap_info.free_offset += size_in;
+		shared->heap_info.heap_remaining -= size_in;
+		ret = smem_base + toc[id].offset;
+		/*
+		 * wmb() is necessary to ensure the heap data is consistent
+		 * before continuing to prevent race conditions with remote
+		 * processors
+		 */
+		wmb();
+	} else {
+		SMEM_INFO("%s: id %u not enough memory %u (required %u)\n",
+			__func__, id, shared->heap_info.heap_remaining,
+			size_in);
+	}
+
+	return ret;
+}
+
+/**
+ * alloc_item_secure - Allocate an SMEM item in a secure partition
+ *
+ * @id:              ID of SMEM item
+ * @size_in:         Size to allocate
+ * @to_proc:         SMEM host that shares the item with apps
+ * @flags:           Item attribute flags
+ * @returns:         Pointer to SMEM item or NULL for error
+ *
+ * Assumes the id parameter is valid and does  not already exist.  Assumes
+ * size_in is the raw size requested by the client.  Assumes to_proc is a valid
+ * host, and a valid partition to that host exists.  Requires the remote
+ * spinlock to already be locked.
+ */
+static void *alloc_item_secure(unsigned id, unsigned size_in, unsigned to_proc,
+								unsigned flags)
+{
+	void *smem_base = smem_ram_base;
+	struct smem_partition_header *hdr;
+	struct smem_partition_allocation_header *alloc_hdr;
+	uint32_t a_hdr_size;
+	uint32_t a_data_size;
+	uint32_t size_cacheline;
+	uint32_t free_space;
+	uint32_t partition_num;
+	void *ret = NULL;
+
+	if (to_proc == SMEM_COMM_HOST) {
+		hdr = smem_base + comm_partition.offset;
+		partition_num = comm_partition.partition_num;
+		size_cacheline = comm_partition.size_cacheline;
+	} else if (to_proc < NUM_SMEM_SUBSYSTEMS) {
+		hdr = smem_base + partitions[to_proc].offset;
+		partition_num = partitions[to_proc].partition_num;
+		size_cacheline = partitions[to_proc].size_cacheline;
+	} else {
+		SMEM_INFO("%s: invalid to_proc %u for id %u\n", __func__,
+								to_proc, id);
+		return NULL;
+	}
+
+	if (hdr->identifier != SMEM_PART_HDR_IDENTIFIER) {
+		LOG_ERR(
+			"%s: SMEM corruption detected.  Partition %d to %d at %p\n",
+								__func__,
+								partition_num,
+								to_proc,
+								hdr);
+		BUG();
+	}
+
+	free_space = hdr->offset_free_cached -
+					hdr->offset_free_uncached;
+
+	if (flags & SMEM_ITEM_CACHED_FLAG) {
+		a_hdr_size = ALIGN(sizeof(*alloc_hdr), size_cacheline);
+		a_data_size = ALIGN(size_in, size_cacheline);
+		if (free_space < a_hdr_size + a_data_size) {
+			SMEM_INFO(
+				"%s: id %u not enough memory %u (required %u)\n",
+						__func__, id, free_space,
+						a_hdr_size + a_data_size);
+			return ret;
+		}
+		alloc_hdr = (void *)(hdr) + hdr->offset_free_cached -
+								a_hdr_size;
+		alloc_hdr->canary = SMEM_ALLOCATION_CANARY;
+		alloc_hdr->smem_type = id;
+		alloc_hdr->size = a_data_size;
+		alloc_hdr->padding_data = a_data_size - size_in;
+		alloc_hdr->padding_hdr = a_hdr_size - sizeof(*alloc_hdr);
+		hdr->offset_free_cached = hdr->offset_free_cached -
+						a_hdr_size - a_data_size;
+		ret = (void *)(alloc_hdr) - a_data_size;
+		/*
+		 * The SMEM protocol currently does not support cacheable
+		 * areas within the smem region, but if it ever does in the
+		 * future, then cache management needs to be done here.
+		 * The area of memory this item is allocated from will need to
+		 * be dynamically made cachable, and a cache flush of the
+		 * allocation header using __cpuc_flush_dcache_area and
+		 * outer_flush_area will need to be done.
+		 */
+	} else {
+		a_hdr_size = sizeof(*alloc_hdr);
+		a_data_size = ALIGN(size_in, 8);
+		if (free_space < a_hdr_size + a_data_size) {
+			SMEM_INFO(
+				"%s: id %u not enough memory %u (required %u)\n",
+						__func__, id, free_space,
+						a_hdr_size + a_data_size);
+			return ret;
+		}
+		alloc_hdr = (void *)(hdr) + hdr->offset_free_uncached;
+		alloc_hdr->canary = SMEM_ALLOCATION_CANARY;
+		alloc_hdr->smem_type = id;
+		alloc_hdr->size = a_data_size;
+		alloc_hdr->padding_data = a_data_size - size_in;
+		alloc_hdr->padding_hdr = a_hdr_size - sizeof(*alloc_hdr);
+		hdr->offset_free_uncached = hdr->offset_free_uncached +
+						a_hdr_size + a_data_size;
+		ret = alloc_hdr + 1;
+	}
+	/*
+	 * wmb() is necessary to ensure the heap and allocation data is
+	 * consistent before continuing to prevent race conditions with remote
+	 * processors
+	 */
+	wmb();
+
+	return ret;
+}
+
+/**
+ * smem_alloc - Find an existing item, otherwise allocate it with security
+ *		support
+ *
+ * @id:       ID of SMEM item
+ * @size_in:  Size of the SMEM item
+ * @to_proc:  SMEM host that shares the item with apps
+ * @flags:    Item attribute flags
+ * @returns:  Pointer to SMEM item, NULL if it couldn't be found/allocated,
+ *	or -EPROBE_DEFER if the driver is not ready
+ */
+void *smem_alloc(unsigned id, unsigned size_in, unsigned to_proc,
+								unsigned flags)
+{
+	unsigned long lflags;
+	void *ret = NULL;
+	int rc;
+	unsigned size_out;
+	unsigned a_size_in;
+
+	SMEM_DBG("%s(%u, %u, %u, %u)\n", __func__, id, size_in, to_proc,
+									flags);
+
+	if (!is_probe_done())
+		return ERR_PTR(-EPROBE_DEFER);
+
+	if (!smem_initialized_check())
+		return NULL;
+
+	if (id >= smem_max_items) {
+		SMEM_INFO("%s: invalid id %u\n", __func__, id);
+		return NULL;
+	}
+
+	if (!(flags & SMEM_ANY_HOST_FLAG) && to_proc >= NUM_SMEM_SUBSYSTEMS) {
+		SMEM_INFO("%s: invalid to_proc %u for id %u\n", __func__,
+								to_proc, id);
+		return NULL;
+	}
+
+	if (unlikely(!spinlocks_initialized)) {
+		rc = init_smem_remote_spinlock();
+		if (unlikely(rc)) {
+			SMEM_INFO("%s: id:%u remote spinlock init failed %d\n",
+							__func__, id, rc);
+			return NULL;
+		}
+	}
+
+	a_size_in = ALIGN(size_in, 8);
+	do {
+		rc = remote_spin_trylock_irqsave(&remote_spinlock, lflags);
+	} while (!rc);
+
+	ret = __smem_get_entry_secure(id, &size_out, to_proc, flags, true,
+									false);
+	if (ret) {
+		SMEM_INFO("%s: %u already allocated\n", __func__, id);
+		if (a_size_in == size_out) {
+			remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
+			return ret;
+		} else {
+			remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
+			SMEM_INFO("%s: id %u wrong size %u (expected %u)\n",
+				__func__, id, size_out, a_size_in);
+			return NULL;
+		}
+	}
+
+	if (id > SMEM_FIXED_ITEM_LAST) {
+		SMEM_INFO("%s: allocating %u size %u to_proc %u flags %u\n",
+					__func__, id, size_in, to_proc, flags);
+		if (flags & SMEM_ANY_HOST_FLAG
+			|| !partitions[to_proc].offset) {
+			if (use_comm_partition)
+				ret = alloc_item_secure(id, size_in,
+							SMEM_COMM_HOST, flags);
+			else
+				ret = alloc_item_nonsecure(id, a_size_in);
+		} else {
+			ret = alloc_item_secure(id, size_in, to_proc, flags);
+		}
+	} else {
+		SMEM_INFO("%s: attempted to allocate non-dynamic item %u\n",
+								__func__, id);
+	}
+
+	remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
+	return ret;
+}
+EXPORT_SYMBOL(smem_alloc);
+
+/**
+ * smem_get_entry - Get existing item with security support
+ *
+ * @id:       ID of SMEM item
+ * @size:     Pointer to size variable for storing the result
+ * @to_proc:  SMEM host that shares the item with apps
+ * @flags:    Item attribute flags
+ * @returns:  Pointer to SMEM item, NULL if it doesn't exist, or -EPROBE_DEFER
+ *	if the driver isn't ready
+ */
+void *smem_get_entry(unsigned id, unsigned *size, unsigned to_proc,
+								unsigned flags)
+{
+	SMEM_DBG("%s(%u, %u, %u)\n", __func__, id, to_proc, flags);
+
+	/*
+	 * Handle the circular dependecy between SMEM and software implemented
+	 * remote spinlocks.  SMEM must initialize the remote spinlocks in
+	 * probe() before it is done.  EPROBE_DEFER handling will not resolve
+	 * this code path, so we must be intellegent to know that the spinlock
+	 * item is a special case.
+	 */
+	if (!is_probe_done() && id != SMEM_SPINLOCK_ARRAY)
+		return ERR_PTR(-EPROBE_DEFER);
+
+	return __smem_get_entry_secure(id, size, to_proc, flags, false, true);
+}
+EXPORT_SYMBOL(smem_get_entry);
+
+/**
+ * smem_get_entry_no_rlock - Get existing item without using remote spinlock
+ *
+ * @id:       ID of SMEM item
+ * @size_out: Pointer to size variable for storing the result
+ * @to_proc:  SMEM host that shares the item with apps
+ * @flags:    Item attribute flags
+ * @returns:  Pointer to SMEM item, NULL if it doesn't exist, or -EPROBE_DEFER
+ *	if the driver isn't ready
+ *
+ * This function does not lock the remote spinlock and should only be used in
+ * failure-recover cases such as retrieving the subsystem failure reason during
+ * subsystem restart.
+ */
+void *smem_get_entry_no_rlock(unsigned id, unsigned *size_out, unsigned to_proc,
+								unsigned flags)
+{
+	if (!is_probe_done())
+		return ERR_PTR(-EPROBE_DEFER);
+
+	return __smem_get_entry_secure(id, size_out, to_proc, flags, false,
+									false);
+}
+EXPORT_SYMBOL(smem_get_entry_no_rlock);
+
+/**
+ * smem_get_remote_spinlock - Remote spinlock pointer for unit testing.
+ *
+ * @returns: pointer to SMEM remote spinlock
+ */
+remote_spinlock_t *smem_get_remote_spinlock(void)
+{
+	if (unlikely(!spinlocks_initialized))
+		init_smem_remote_spinlock();
+	return &remote_spinlock;
+}
+EXPORT_SYMBOL(smem_get_remote_spinlock);
+
+/**
+ * smem_get_free_space() - Get the available allocation free space for a
+ *				partition
+ *
+ * @to_proc: remote SMEM host.  Determines the applicable partition
+ * @returns: size in bytes available to allocate
+ *
+ * Helper function for SMD so that SMD only scans the channel allocation
+ * table for a partition when it is reasonably certain that a channel has
+ * actually been created, because scanning can be expensive.  Creating a channel
+ * will consume some of the free space in a partition, so SMD can compare the
+ * last free space size against the current free space size to determine if
+ * a channel may have been created.  SMD can't do this directly, because the
+ * necessary partition internals are restricted to just SMEM.
+ */
+unsigned smem_get_free_space(unsigned to_proc)
+{
+	struct smem_partition_header *hdr;
+	struct smem_shared *shared;
+
+	if (to_proc >= NUM_SMEM_SUBSYSTEMS) {
+		pr_err("%s: invalid to_proc:%d\n", __func__, to_proc);
+		return UINT_MAX;
+	}
+
+	if (partitions[to_proc].offset) {
+		if (unlikely(OVERFLOW_ADD_UNSIGNED(uintptr_t,
+					(uintptr_t)smem_areas[0].virt_addr,
+					partitions[to_proc].offset))) {
+			pr_err("%s: unexpected overflow detected\n", __func__);
+			return UINT_MAX;
+		}
+		hdr = smem_areas[0].virt_addr + partitions[to_proc].offset;
+		return hdr->offset_free_cached - hdr->offset_free_uncached;
+	} else {
+		shared = smem_ram_base;
+		return shared->heap_info.heap_remaining;
+	}
+}
+EXPORT_SYMBOL(smem_get_free_space);
+
+/**
+ * smem_get_version() - Get the smem user version number
+ *
+ * @idx: SMEM user idx in SMEM_VERSION_INFO table.
+ * @returns: smem version number if success otherwise zero.
+ */
+unsigned smem_get_version(unsigned idx)
+{
+	int *version_array;
+	struct smem_shared *smem = smem_ram_base;
+
+	if (idx > 32) {
+		pr_err("%s: invalid idx:%d\n", __func__, idx);
+		return 0;
+	}
+
+	if (use_comm_partition)
+		version_array = smem->version;
+	else
+		version_array = __smem_find(SMEM_VERSION_INFO,
+					SMEM_VERSION_INFO_SIZE, true);
+	if (version_array == NULL)
+		return 0;
+
+	return version_array[idx];
+}
+EXPORT_SYMBOL(smem_get_version);
+
+/**
+ * init_smem_remote_spinlock - Reentrant remote spinlock initialization
+ *
+ * @returns: success or error code for failure
+ */
+static int init_smem_remote_spinlock(void)
+{
+	int rc = 0;
+
+	/*
+	 * Optimistic locking.  Init only needs to be done once by the first
+	 * caller.  After that, serializing inits between different callers
+	 * is unnecessary.  The second check after the lock ensures init
+	 * wasn't previously completed by someone else before the lock could
+	 * be grabbed.
+	 */
+	if (!spinlocks_initialized) {
+		mutex_lock(&spinlock_init_lock);
+		if (!spinlocks_initialized) {
+			rc = remote_spin_lock_init(&remote_spinlock,
+						SMEM_SPINLOCK_SMEM_ALLOC);
+			if (!rc)
+				spinlocks_initialized = 1;
+		}
+		mutex_unlock(&spinlock_init_lock);
+	}
+	return rc;
+}
+
+/**
+ * smem_initialized_check - Reentrant check that smem has been initialized
+ *
+ * @returns: true if initialized, false if not.
+ */
+bool smem_initialized_check(void)
+{
+	static int checked;
+	static int is_inited;
+	unsigned long flags;
+	struct smem_shared *smem;
+	unsigned ver;
+
+	if (likely(checked)) {
+		if (unlikely(!is_inited))
+			LOG_ERR("%s: smem not initialized\n", __func__);
+		return is_inited;
+	}
+
+	spin_lock_irqsave(&smem_init_check_lock, flags);
+	if (checked) {
+		spin_unlock_irqrestore(&smem_init_check_lock, flags);
+		if (unlikely(!is_inited))
+			LOG_ERR("%s: smem not initialized\n", __func__);
+		return is_inited;
+	}
+
+	smem = smem_ram_base;
+
+	if (smem->heap_info.initialized != 1)
+		goto failed;
+	if (smem->heap_info.reserved != 0)
+		goto failed;
+
+	/*
+	 * The Modem SBL is now the Master SBL version and is required to
+	 * pre-initialize SMEM and fill in any necessary configuration
+	 * structures.  Without the extra configuration data, the SMEM driver
+	 * cannot be properly initialized.
+	 */
+	ver = smem->version[MODEM_SBL_VERSION_INDEX];
+	if (ver == SMEM_COMM_PART_VERSION << 16) {
+		use_comm_partition = true;
+	} else if (ver != SMEM_VERSION << 16) {
+		pr_err("%s: SBL version not correct 0x%x\n",
+				__func__, smem->version[7]);
+		goto failed;
+	}
+
+	is_inited = 1;
+	checked = 1;
+	spin_unlock_irqrestore(&smem_init_check_lock, flags);
+	return is_inited;
+
+failed:
+	is_inited = 0;
+	checked = 1;
+	spin_unlock_irqrestore(&smem_init_check_lock, flags);
+	LOG_ERR(
+		"%s: shared memory needs to be initialized by SBL before booting\n",
+								__func__);
+	return is_inited;
+}
+EXPORT_SYMBOL(smem_initialized_check);
+
+static int restart_notifier_cb(struct notifier_block *this,
+				unsigned long code,
+				void *data)
+{
+	struct restart_notifier_block *notifier;
+	struct notif_data *notifdata = data;
+	int ret;
+
+	switch (code) {
+
+	case SUBSYS_AFTER_SHUTDOWN:
+		notifier = container_of(this,
+					struct restart_notifier_block, nb);
+		SMEM_INFO("%s: ssrestart for processor %d ('%s')\n",
+				__func__, notifier->processor,
+				notifier->name);
+		remote_spin_release(&remote_spinlock, notifier->processor);
+		remote_spin_release_all(notifier->processor);
+		break;
+	case SUBSYS_SOC_RESET:
+		if (!(smem_ramdump_dev && notifdata->enable_mini_ramdumps))
+			break;
+	case SUBSYS_RAMDUMP_NOTIFICATION:
+		if (!(smem_ramdump_dev && (notifdata->enable_mini_ramdumps
+						|| notifdata->enable_ramdump)))
+			break;
+		SMEM_DBG("%s: saving ramdump\n", __func__);
+		/*
+		 * XPU protection does not currently allow the
+		 * auxiliary memory regions to be dumped.  If this
+		 * changes, then num_smem_areas + 1 should be passed
+		 * into do_elf_ramdump() to dump all regions.
+		 */
+		ret = do_elf_ramdump(smem_ramdump_dev,
+				smem_ramdump_segments, 1);
+		if (ret < 0)
+			LOG_ERR("%s: unable to dump smem %d\n", __func__, ret);
+		break;
+	default:
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static __init int modem_restart_late_init(void)
+{
+	int i;
+	void *handle;
+	struct restart_notifier_block *nb;
+
+	if (smem_dev)
+		smem_ramdump_dev = create_ramdump_device("smem", smem_dev);
+	if (IS_ERR_OR_NULL(smem_ramdump_dev)) {
+		LOG_ERR("%s: Unable to create smem ramdump device.\n",
+			__func__);
+		smem_ramdump_dev = NULL;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
+		nb = &restart_notifiers[i];
+		handle = subsys_notif_register_notifier(nb->name, &nb->nb);
+		SMEM_DBG("%s: registering notif for '%s', handle=%p\n",
+				__func__, nb->name, handle);
+	}
+
+	return 0;
+}
+late_initcall(modem_restart_late_init);
+
+int smem_module_init_notifier_register(struct notifier_block *nb)
+{
+	int ret;
+	if (!nb)
+		return -EINVAL;
+	mutex_lock(&smem_module_init_notifier_lock);
+	ret = raw_notifier_chain_register(&smem_module_init_notifier_list, nb);
+	if (smem_module_inited)
+		nb->notifier_call(nb, 0, NULL);
+	mutex_unlock(&smem_module_init_notifier_lock);
+	return ret;
+}
+EXPORT_SYMBOL(smem_module_init_notifier_register);
+
+int smem_module_init_notifier_unregister(struct notifier_block *nb)
+{
+	int ret;
+	if (!nb)
+		return -EINVAL;
+	mutex_lock(&smem_module_init_notifier_lock);
+	ret = raw_notifier_chain_unregister(&smem_module_init_notifier_list,
+						nb);
+	mutex_unlock(&smem_module_init_notifier_lock);
+	return ret;
+}
+EXPORT_SYMBOL(smem_module_init_notifier_unregister);
+
+static void smem_module_init_notify(uint32_t state, void *data)
+{
+	mutex_lock(&smem_module_init_notifier_lock);
+	smem_module_inited = 1;
+	raw_notifier_call_chain(&smem_module_init_notifier_list,
+					state, data);
+	mutex_unlock(&smem_module_init_notifier_lock);
+}
+
+/**
+ * smem_init_security_partition - Init local structures for a secured smem
+ *                   partition that has apps as one of the hosts
+ *
+ * @entry:           Entry in the security TOC for the partition to init
+ * @num:             Partition ID
+ *
+ * Initialize local data structures to point to a secured smem partition
+ * that is accessible by apps and another processor.  Assumes that one of the
+ * listed hosts is apps.  Verifiess that the partition is valid, otherwise will
+ * skip.  Checks for memory corruption and will BUG() if detected.  Assumes
+ * smem_areas is already initialized and that smem_areas[0] corresponds to the
+ * smem region with the secured partitions.
+ */
+static void smem_init_security_partition(struct smem_toc_entry *entry,
+								uint32_t num)
+{
+	uint16_t remote_host = 0;
+	struct smem_partition_header *hdr;
+	bool is_comm_partition = false;
+
+	if (!entry->offset) {
+		SMEM_INFO("Skipping smem partition %d - bad offset\n", num);
+		return;
+	}
+	if (!entry->size) {
+		SMEM_INFO("Skipping smem partition %d - bad size\n", num);
+		return;
+	}
+	if (!entry->size_cacheline) {
+		SMEM_INFO("Skipping smem partition %d - bad cacheline\n", num);
+		return;
+	}
+
+	if (entry->host0 == SMEM_COMM_HOST && entry->host1 == SMEM_COMM_HOST)
+		is_comm_partition = true;
+
+	if (!is_comm_partition) {
+		if (entry->host0 == SMEM_APPS)
+			remote_host = entry->host1;
+		else
+			remote_host = entry->host0;
+
+		if (remote_host >= NUM_SMEM_SUBSYSTEMS) {
+			SMEM_INFO(
+				"Skipping smem partition %d - bad remote:%d\n",
+				num, remote_host);
+			return;
+		}
+		if (partitions[remote_host].offset) {
+			SMEM_INFO(
+				"Skipping smem partition %d - duplicate of %d\n",
+				num, partitions[remote_host].partition_num);
+			return;
+		}
+
+		if (entry->host0 != SMEM_APPS && entry->host1 != SMEM_APPS) {
+			SMEM_INFO(
+				"Non-APSS Partition %d offset:%x host0:%d host1:%d\n",
+				num, entry->offset, entry->host0, entry->host1);
+			return;
+		}
+	}
+
+	hdr = smem_areas[0].virt_addr + entry->offset;
+
+	if (hdr->identifier != SMEM_PART_HDR_IDENTIFIER) {
+		LOG_ERR("Smem partition %d hdr magic is bad\n", num);
+		BUG();
+	}
+	if (!hdr->size) {
+		LOG_ERR("Smem partition %d size is 0\n", num);
+		BUG();
+	}
+	if (hdr->offset_free_uncached > hdr->size) {
+		LOG_ERR("Smem partition %d uncached heap exceeds size\n", num);
+		BUG();
+	}
+	if (hdr->offset_free_cached > hdr->size) {
+		LOG_ERR("Smem partition %d cached heap exceeds size\n", num);
+		BUG();
+	}
+	if (is_comm_partition) {
+		if (hdr->host0 == SMEM_COMM_HOST
+			&& hdr->host1 == SMEM_COMM_HOST) {
+			comm_partition.partition_num = num;
+			comm_partition.offset = entry->offset;
+			comm_partition.size_cacheline = entry->size_cacheline;
+			SMEM_INFO("Common Partition %d offset:%x\n", num,
+							entry->offset);
+		} else {
+			LOG_ERR("Smem Comm partition hosts don't match TOC\n");
+			WARN_ON(1);
+		}
+		return;
+	}
+	if (hdr->host0 != SMEM_APPS && hdr->host1 != SMEM_APPS) {
+		LOG_ERR("Smem partition %d hosts don't match TOC\n", num);
+		BUG();
+	}
+	if (hdr->host0 != remote_host && hdr->host1 != remote_host) {
+		LOG_ERR("Smem partition %d hosts don't match TOC\n", num);
+		BUG();
+	}
+
+	partitions[remote_host].partition_num = num;
+	partitions[remote_host].offset = entry->offset;
+	partitions[remote_host].size_cacheline = entry->size_cacheline;
+	SMEM_INFO("Partition %d offset:%x remote:%d\n", num, entry->offset,
+								remote_host);
+}
+
+/**
+ * smem_init_security - Init local support for secured smem
+ *
+ * Looks for a valid security TOC, and if one is found, parses it looking for
+ * partitions that apps can access.  If any such partitions are found, do the
+ * required local initialization to support them.  Assumes smem_areas is inited
+ * and smem_area[0] corresponds to the smem region with the TOC.
+ */
+static void smem_init_security(void)
+{
+	struct smem_toc *toc;
+	uint32_t i;
+
+	SMEM_DBG("%s\n", __func__);
+
+	toc = smem_areas[0].virt_addr + smem_areas[0].size - 4 * 1024;
+
+	if (toc->identifier != SMEM_TOC_IDENTIFIER) {
+		LOG_ERR("%s failed: invalid TOC magic\n", __func__);
+		return;
+	}
+
+	for (i = 0; i < toc->num_entries; ++i) {
+		SMEM_DBG("Partition %d host0:%d host1:%d\n", i,
+							toc->entry[i].host0,
+							toc->entry[i].host1);
+		smem_init_security_partition(&toc->entry[i], i);
+	}
+
+	SMEM_DBG("%s done\n", __func__);
+}
+
+/**
+ * smem_init_target_info - Init smem target information
+ *
+ * @info_addr : smem target info physical address.
+ * @size : size of the smem target info structure.
+ *
+ * This function is used to initialize the smem_targ_info structure and checks
+ * for valid identifier, if identifier is valid initialize smem variables.
+ */
+static int smem_init_target_info(phys_addr_t info_addr, resource_size_t size)
+{
+	struct smem_targ_info_type *smem_targ_info;
+	void *smem_targ_info_addr;
+	smem_targ_info_addr = ioremap_nocache(info_addr, size);
+	if (!smem_targ_info_addr) {
+		LOG_ERR("%s: failed ioremap_nocache() of addr:%pa size:%pa\n",
+				__func__, &info_addr, &size);
+		return -ENODEV;
+	}
+	smem_targ_info =
+		(struct smem_targ_info_type __iomem *)smem_targ_info_addr;
+
+	if (smem_targ_info->identifier != SMEM_TARG_INFO_IDENTIFIER) {
+		LOG_ERR("%s failed: invalid TARGET INFO magic\n", __func__);
+		return -ENODEV;
+	}
+	smem_ram_phys = smem_targ_info->phys_base_addr;
+	smem_ram_size = smem_targ_info->size;
+	if (smem_targ_info->max_items)
+		smem_max_items = smem_targ_info->max_items;
+	iounmap(smem_targ_info_addr);
+	return 0;
+}
+
+static int msm_smem_probe(struct platform_device *pdev)
+{
+	char *key;
+	struct resource *r;
+	phys_addr_t aux_mem_base;
+	resource_size_t aux_mem_size;
+	int temp_string_size = 11; /* max 3 digit count */
+	char temp_string[temp_string_size];
+	int ret;
+	struct ramdump_segment *ramdump_segments_tmp = NULL;
+	struct smem_area *smem_areas_tmp = NULL;
+	int smem_idx = 0;
+	bool security_enabled;
+
+	r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"smem_targ_info_imem");
+	if (r) {
+		if (smem_init_target_info(r->start, resource_size(r)))
+			goto smem_targ_info_legacy;
+		goto smem_targ_info_done;
+	}
+
+	r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"smem_targ_info_reg");
+	if (r) {
+		void *reg_base_addr;
+		uint64_t base_addr;
+		reg_base_addr = ioremap_nocache(r->start, resource_size(r));
+		base_addr = (uint32_t)readl_relaxed(reg_base_addr);
+		base_addr |=
+			((uint64_t)readl_relaxed(reg_base_addr + 0x4) << 32);
+		iounmap(reg_base_addr);
+		if ((base_addr == 0) || ((base_addr >> 32) != 0)) {
+			SMEM_INFO("%s: Invalid SMEM address\n", __func__);
+			goto smem_targ_info_legacy;
+		}
+		if (smem_init_target_info(base_addr,
+				sizeof(struct smem_targ_info_type)))
+			goto smem_targ_info_legacy;
+		goto smem_targ_info_done;
+	}
+
+smem_targ_info_legacy:
+	SMEM_INFO("%s: reading dt-specified SMEM address\n", __func__);
+	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smem");
+	if (r) {
+		smem_ram_size = resource_size(r);
+		smem_ram_phys = r->start;
+	}
+
+smem_targ_info_done:
+	if (!smem_ram_phys || !smem_ram_size) {
+		LOG_ERR("%s: Missing SMEM TARGET INFO\n", __func__);
+		return -ENODEV;
+	}
+
+	smem_ram_base = ioremap_nocache(smem_ram_phys, smem_ram_size);
+
+	if (!smem_ram_base) {
+		LOG_ERR("%s: ioremap_nocache() of addr:%pa size: %pa\n",
+				__func__,
+				&smem_ram_phys, &smem_ram_size);
+		return -ENODEV;
+	}
+
+	if (!smem_initialized_check())
+		return -ENODEV;
+
+	/*
+	 * The software implementation requires smem_find(), which needs
+	 * smem_ram_base to be intitialized.  The remote spinlock item is
+	 * guarenteed to be allocated by the bootloader, so this is the
+	 * safest and earliest place to init the spinlock.
+	 */
+	ret = init_smem_remote_spinlock();
+	if (ret) {
+		LOG_ERR("%s: remote spinlock init failed %d\n", __func__, ret);
+		return ret;
+	}
+
+	key = "irq-reg-base";
+	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+	if (!r) {
+		LOG_ERR("%s: missing '%s'\n", __func__, key);
+		return -ENODEV;
+	}
+
+	num_smem_areas = 1;
+	while (1) {
+		scnprintf(temp_string, temp_string_size, "aux-mem%d",
+				num_smem_areas);
+		r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+								temp_string);
+		if (!r)
+			break;
+
+		++num_smem_areas;
+		if (num_smem_areas > 999) {
+			LOG_ERR("%s: max num aux mem regions reached\n",
+								__func__);
+			break;
+		}
+	}
+	/* Initialize main SMEM region and SSR ramdump region */
+	smem_areas_tmp = kmalloc_array(num_smem_areas, sizeof(struct smem_area),
+				GFP_KERNEL);
+	if (!smem_areas_tmp) {
+		LOG_ERR("%s: smem areas kmalloc failed\n", __func__);
+		ret = -ENOMEM;
+		goto free_smem_areas;
+	}
+
+	ramdump_segments_tmp = kcalloc(num_smem_areas,
+			sizeof(struct ramdump_segment), GFP_KERNEL);
+	if (!ramdump_segments_tmp) {
+		LOG_ERR("%s: ramdump segment kmalloc failed\n", __func__);
+		ret = -ENOMEM;
+		goto free_smem_areas;
+	}
+	smem_areas_tmp[smem_idx].phys_addr =  smem_ram_phys;
+	smem_areas_tmp[smem_idx].size = smem_ram_size;
+	smem_areas_tmp[smem_idx].virt_addr = smem_ram_base;
+
+	ramdump_segments_tmp[smem_idx].address = smem_ram_phys;
+	ramdump_segments_tmp[smem_idx].size = smem_ram_size;
+	++smem_idx;
+
+	/* Configure auxiliary SMEM regions */
+	while (1) {
+		scnprintf(temp_string, temp_string_size, "aux-mem%d",
+								smem_idx);
+		r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							temp_string);
+		if (!r)
+			break;
+		aux_mem_base = r->start;
+		aux_mem_size = resource_size(r);
+
+		ramdump_segments_tmp[smem_idx].address = aux_mem_base;
+		ramdump_segments_tmp[smem_idx].size = aux_mem_size;
+
+		smem_areas_tmp[smem_idx].phys_addr = aux_mem_base;
+		smem_areas_tmp[smem_idx].size = aux_mem_size;
+		smem_areas_tmp[smem_idx].virt_addr = ioremap_nocache(
+			(unsigned long)(smem_areas_tmp[smem_idx].phys_addr),
+			smem_areas_tmp[smem_idx].size);
+		SMEM_DBG("%s: %s = %pa %pa -> %p", __func__, temp_string,
+				&aux_mem_base, &aux_mem_size,
+				smem_areas_tmp[smem_idx].virt_addr);
+
+		if (!smem_areas_tmp[smem_idx].virt_addr) {
+			LOG_ERR("%s: ioremap_nocache() of addr:%pa size: %pa\n",
+				__func__,
+				&smem_areas_tmp[smem_idx].phys_addr,
+				&smem_areas_tmp[smem_idx].size);
+			ret = -ENOMEM;
+			goto free_smem_areas;
+		}
+
+		if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
+				(uintptr_t)smem_areas_tmp[smem_idx].virt_addr,
+				smem_areas_tmp[smem_idx].size)) {
+			LOG_ERR(
+				"%s: invalid virtual address block %i: %p:%pa\n",
+					__func__, smem_idx,
+					smem_areas_tmp[smem_idx].virt_addr,
+					&smem_areas_tmp[smem_idx].size);
+			++smem_idx;
+			ret = -EINVAL;
+			goto free_smem_areas;
+		}
+
+		++smem_idx;
+		if (smem_idx > 999) {
+			LOG_ERR("%s: max num aux mem regions reached\n",
+							__func__);
+			break;
+		}
+	}
+
+	smem_areas = smem_areas_tmp;
+	smem_ramdump_segments = ramdump_segments_tmp;
+
+	key = "qcom,mpu-enabled";
+	security_enabled = of_property_read_bool(pdev->dev.of_node, key);
+	if (security_enabled) {
+		SMEM_INFO("smem security enabled\n");
+		smem_init_security();
+	}
+	smem_dev = &pdev->dev;
+	probe_done = true;
+
+	ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+	if (ret)
+		LOG_ERR("%s: of_platform_populate failed %d\n", __func__, ret);
+
+	return 0;
+
+free_smem_areas:
+	for (smem_idx = smem_idx - 1; smem_idx >= 1; --smem_idx)
+		iounmap(smem_areas_tmp[smem_idx].virt_addr);
+
+	num_smem_areas = 0;
+	kfree(ramdump_segments_tmp);
+	kfree(smem_areas_tmp);
+	return ret;
+}
+
+static struct of_device_id msm_smem_match_table[] = {
+	{ .compatible = "qcom,smem" },
+	{},
+};
+
+static struct platform_driver msm_smem_driver = {
+	.probe = msm_smem_probe,
+	.driver = {
+		.name = "msm_smem",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_smem_match_table,
+	},
+};
+
+int __init msm_smem_init(void)
+{
+	static bool registered;
+	int rc;
+
+	if (registered)
+		return 0;
+
+	registered = true;
+	smem_max_items = SMEM_NUM_ITEMS;
+	smem_ipc_log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "smem", 0);
+	if (!smem_ipc_log_ctx) {
+		pr_err("%s: unable to create logging context\n", __func__);
+		msm_smem_debug_mask = 0;
+	}
+
+	rc = platform_driver_register(&msm_smem_driver);
+	if (rc) {
+		LOG_ERR("%s: msm_smem_driver register failed %d\n",
+							__func__, rc);
+		return rc;
+	}
+
+	smem_module_init_notify(0, NULL);
+
+	return 0;
+}
+
+arch_initcall(msm_smem_init);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/msm-spm.c	2019-01-22 16:16:26.659274986 +0100
@@ -0,0 +1,715 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include "spm_driver.h"
+
+#define MSM_SPM_PMIC_STATE_IDLE  0
+
+enum {
+	MSM_SPM_DEBUG_SHADOW = 1U << 0,
+	MSM_SPM_DEBUG_VCTL = 1U << 1,
+};
+
+static int msm_spm_debug_mask;
+module_param_named(
+	debug_mask, msm_spm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
+);
+
+struct saw2_data {
+	const char *ver_name;
+	uint32_t major;
+	uint32_t minor;
+	uint32_t *spm_reg_offset_ptr;
+};
+
+static uint32_t msm_spm_reg_offsets_saw2_v2_1[MSM_SPM_REG_NR] = {
+	[MSM_SPM_REG_SAW_SECURE]		= 0x00,
+	[MSM_SPM_REG_SAW_ID]			= 0x04,
+	[MSM_SPM_REG_SAW_CFG]			= 0x08,
+	[MSM_SPM_REG_SAW_SPM_STS]		= 0x0C,
+	[MSM_SPM_REG_SAW_AVS_STS]		= 0x10,
+	[MSM_SPM_REG_SAW_PMIC_STS]		= 0x14,
+	[MSM_SPM_REG_SAW_RST]			= 0x18,
+	[MSM_SPM_REG_SAW_VCTL]			= 0x1C,
+	[MSM_SPM_REG_SAW_AVS_CTL]		= 0x20,
+	[MSM_SPM_REG_SAW_AVS_LIMIT]		= 0x24,
+	[MSM_SPM_REG_SAW_AVS_DLY]		= 0x28,
+	[MSM_SPM_REG_SAW_AVS_HYSTERESIS]	= 0x2C,
+	[MSM_SPM_REG_SAW_SPM_CTL]		= 0x30,
+	[MSM_SPM_REG_SAW_SPM_DLY]		= 0x34,
+	[MSM_SPM_REG_SAW_PMIC_DATA_0]		= 0x40,
+	[MSM_SPM_REG_SAW_PMIC_DATA_1]		= 0x44,
+	[MSM_SPM_REG_SAW_PMIC_DATA_2]		= 0x48,
+	[MSM_SPM_REG_SAW_PMIC_DATA_3]		= 0x4C,
+	[MSM_SPM_REG_SAW_PMIC_DATA_4]		= 0x50,
+	[MSM_SPM_REG_SAW_PMIC_DATA_5]		= 0x54,
+	[MSM_SPM_REG_SAW_PMIC_DATA_6]		= 0x58,
+	[MSM_SPM_REG_SAW_PMIC_DATA_7]		= 0x5C,
+	[MSM_SPM_REG_SAW_SEQ_ENTRY]		= 0x80,
+	[MSM_SPM_REG_SAW_VERSION]		= 0xFD0,
+};
+
+static uint32_t msm_spm_reg_offsets_saw2_v3_0[MSM_SPM_REG_NR] = {
+	[MSM_SPM_REG_SAW_SECURE]		= 0x00,
+	[MSM_SPM_REG_SAW_ID]			= 0x04,
+	[MSM_SPM_REG_SAW_CFG]			= 0x08,
+	[MSM_SPM_REG_SAW_SPM_STS]		= 0x0C,
+	[MSM_SPM_REG_SAW_AVS_STS]		= 0x10,
+	[MSM_SPM_REG_SAW_PMIC_STS]		= 0x14,
+	[MSM_SPM_REG_SAW_RST]			= 0x18,
+	[MSM_SPM_REG_SAW_VCTL]			= 0x1C,
+	[MSM_SPM_REG_SAW_AVS_CTL]		= 0x20,
+	[MSM_SPM_REG_SAW_AVS_LIMIT]		= 0x24,
+	[MSM_SPM_REG_SAW_AVS_DLY]		= 0x28,
+	[MSM_SPM_REG_SAW_AVS_HYSTERESIS]	= 0x2C,
+	[MSM_SPM_REG_SAW_SPM_CTL]		= 0x30,
+	[MSM_SPM_REG_SAW_SPM_DLY]		= 0x34,
+	[MSM_SPM_REG_SAW_STS2]			= 0x38,
+	[MSM_SPM_REG_SAW_PMIC_DATA_0]		= 0x40,
+	[MSM_SPM_REG_SAW_PMIC_DATA_1]		= 0x44,
+	[MSM_SPM_REG_SAW_PMIC_DATA_2]		= 0x48,
+	[MSM_SPM_REG_SAW_PMIC_DATA_3]		= 0x4C,
+	[MSM_SPM_REG_SAW_PMIC_DATA_4]		= 0x50,
+	[MSM_SPM_REG_SAW_PMIC_DATA_5]		= 0x54,
+	[MSM_SPM_REG_SAW_PMIC_DATA_6]		= 0x58,
+	[MSM_SPM_REG_SAW_PMIC_DATA_7]		= 0x5C,
+	[MSM_SPM_REG_SAW_SEQ_ENTRY]		= 0x400,
+	[MSM_SPM_REG_SAW_VERSION]		= 0xFD0,
+};
+
+static uint32_t msm_spm_reg_offsets_saw2_v4_1[MSM_SPM_REG_NR] = {
+	[MSM_SPM_REG_SAW_SECURE]		= 0xC00,
+	[MSM_SPM_REG_SAW_ID]			= 0xC04,
+	[MSM_SPM_REG_SAW_STS2]			= 0xC10,
+	[MSM_SPM_REG_SAW_SPM_STS]		= 0xC0C,
+	[MSM_SPM_REG_SAW_AVS_STS]		= 0xC14,
+	[MSM_SPM_REG_SAW_PMIC_STS]		= 0xC18,
+	[MSM_SPM_REG_SAW_RST]			= 0xC1C,
+	[MSM_SPM_REG_SAW_VCTL]			= 0x900,
+	[MSM_SPM_REG_SAW_AVS_CTL]		= 0x904,
+	[MSM_SPM_REG_SAW_AVS_LIMIT]		= 0x908,
+	[MSM_SPM_REG_SAW_AVS_DLY]		= 0x90C,
+	[MSM_SPM_REG_SAW_SPM_CTL]		= 0x0,
+	[MSM_SPM_REG_SAW_SPM_DLY]		= 0x4,
+	[MSM_SPM_REG_SAW_CFG]			= 0x0C,
+	[MSM_SPM_REG_SAW_PMIC_DATA_0]		= 0x40,
+	[MSM_SPM_REG_SAW_PMIC_DATA_1]		= 0x44,
+	[MSM_SPM_REG_SAW_PMIC_DATA_2]		= 0x48,
+	[MSM_SPM_REG_SAW_PMIC_DATA_3]		= 0x4C,
+	[MSM_SPM_REG_SAW_PMIC_DATA_4]		= 0x50,
+	[MSM_SPM_REG_SAW_PMIC_DATA_5]		= 0x54,
+	[MSM_SPM_REG_SAW_SEQ_ENTRY]		= 0x400,
+	[MSM_SPM_REG_SAW_VERSION]		= 0xFD0,
+};
+
+static struct saw2_data saw2_info[] = {
+	[0] = {
+		"SAW_v2.1",
+		0x2,
+		0x1,
+		msm_spm_reg_offsets_saw2_v2_1,
+	},
+	[1] = {
+		"SAW_v2.3",
+		0x3,
+		0x0,
+		msm_spm_reg_offsets_saw2_v3_0,
+	},
+	[2] = {
+		"SAW_v3.0",
+		0x1,
+		0x0,
+		msm_spm_reg_offsets_saw2_v3_0,
+	},
+	[3] = {
+		"SAW_v4.0",
+		0x4,
+		0x1,
+		msm_spm_reg_offsets_saw2_v4_1,
+	},
+};
+
+static uint32_t num_pmic_data;
+
+static void msm_spm_drv_flush_shadow(struct msm_spm_driver_data *dev,
+		unsigned int reg_index)
+{
+	BUG_ON(!dev);
+
+	BUG_ON(!dev->reg_shadow);
+
+	__raw_writel(dev->reg_shadow[reg_index],
+		dev->reg_base_addr + dev->reg_offsets[reg_index]);
+}
+
+static void msm_spm_drv_load_shadow(struct msm_spm_driver_data *dev,
+		unsigned int reg_index)
+{
+	dev->reg_shadow[reg_index] =
+		__raw_readl(dev->reg_base_addr +
+				dev->reg_offsets[reg_index]);
+}
+
+static inline uint32_t msm_spm_drv_get_num_spm_entry(
+		struct msm_spm_driver_data *dev)
+{
+	BUG_ON(!dev);
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_ID);
+	return (dev->reg_shadow[MSM_SPM_REG_SAW_ID] >> 24) & 0xFF;
+}
+
+static inline void msm_spm_drv_set_start_addr(
+		struct msm_spm_driver_data *dev, uint32_t ctl)
+{
+	dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL] = ctl;
+}
+
+static inline bool msm_spm_pmic_arb_present(struct msm_spm_driver_data *dev)
+{
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_ID);
+	return (dev->reg_shadow[MSM_SPM_REG_SAW_ID] >> 2) & 0x1;
+}
+
+static inline void msm_spm_drv_set_vctl2(struct msm_spm_driver_data *dev,
+		uint32_t vlevel)
+{
+	unsigned int pmic_data = 0;
+
+	/**
+	 * VCTL_PORT has to be 0, for PMIC_STS register to be updated.
+	 * Ensure that vctl_port is always set to 0.
+	 */
+	WARN_ON(dev->vctl_port);
+
+	pmic_data |= vlevel;
+	pmic_data |= (dev->vctl_port & 0x7) << 16;
+
+	dev->reg_shadow[MSM_SPM_REG_SAW_VCTL] &= ~0x700FF;
+	dev->reg_shadow[MSM_SPM_REG_SAW_VCTL] |= pmic_data;
+
+	dev->reg_shadow[MSM_SPM_REG_SAW_PMIC_DATA_3] &= ~0x700FF;
+	dev->reg_shadow[MSM_SPM_REG_SAW_PMIC_DATA_3] |= pmic_data;
+
+	msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_VCTL);
+	msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_PMIC_DATA_3);
+}
+
+static inline uint32_t msm_spm_drv_get_num_pmic_data(
+		struct msm_spm_driver_data *dev)
+{
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_ID);
+	mb();
+	return (dev->reg_shadow[MSM_SPM_REG_SAW_ID] >> 4) & 0x7;
+}
+
+static inline uint32_t msm_spm_drv_get_sts_pmic_state(
+		struct msm_spm_driver_data *dev)
+{
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_PMIC_STS);
+	return (dev->reg_shadow[MSM_SPM_REG_SAW_PMIC_STS] >> 16) &
+				0x03;
+}
+
+uint32_t msm_spm_drv_get_sts_curr_pmic_data(
+		struct msm_spm_driver_data *dev)
+{
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_PMIC_STS);
+	return dev->reg_shadow[MSM_SPM_REG_SAW_PMIC_STS] & 0x300FF;
+}
+
+static inline void msm_spm_drv_get_saw2_ver(struct msm_spm_driver_data *dev,
+		uint32_t *major, uint32_t *minor)
+{
+	uint32_t val = 0;
+
+	dev->reg_shadow[MSM_SPM_REG_SAW_VERSION] =
+			__raw_readl(dev->reg_base_addr + dev->ver_reg);
+
+	val = dev->reg_shadow[MSM_SPM_REG_SAW_VERSION];
+
+	*major = (val >> 28) & 0xF;
+	*minor = (val >> 16) & 0xFFF;
+}
+
+inline int msm_spm_drv_set_spm_enable(
+		struct msm_spm_driver_data *dev, bool enable)
+{
+	uint32_t value = enable ? 0x01 : 0x00;
+
+	if (!dev)
+		return -EINVAL;
+
+	if ((dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL] & 0x01) ^ value) {
+
+		dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL] &= ~0x1;
+		dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL] |= value;
+
+		msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_SPM_CTL);
+		wmb();
+	}
+	return 0;
+}
+
+int msm_spm_drv_get_avs_enable(struct msm_spm_driver_data *dev)
+{
+	if (!dev)
+		return -EINVAL;
+
+	return dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] & 0x01;
+}
+
+int msm_spm_drv_set_avs_enable(struct msm_spm_driver_data *dev,
+		 bool enable)
+{
+	uint32_t value = enable ? 0x1 : 0x0;
+
+	if (!dev)
+		return -EINVAL;
+
+	if ((dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] & 0x1) ^ value) {
+		dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] &= ~0x1;
+		dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] |= value;
+
+		msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
+	}
+
+	return 0;
+}
+
+int msm_spm_drv_set_avs_limit(struct msm_spm_driver_data *dev,
+		uint32_t min_lvl, uint32_t max_lvl)
+{
+	uint32_t value = (max_lvl & 0xff) << 16 | (min_lvl & 0xff);
+
+	if (!dev)
+		return -EINVAL;
+
+	dev->reg_shadow[MSM_SPM_REG_SAW_AVS_LIMIT] = value;
+
+	msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_LIMIT);
+
+	return 0;
+}
+
+static int msm_spm_drv_avs_irq_mask(enum msm_spm_avs_irq irq)
+{
+	switch (irq) {
+	case MSM_SPM_AVS_IRQ_MIN:
+		return BIT(1);
+	case MSM_SPM_AVS_IRQ_MAX:
+		return BIT(2);
+	default:
+		return -EINVAL;
+	}
+}
+
+int msm_spm_drv_set_avs_irq_enable(struct msm_spm_driver_data *dev,
+		enum msm_spm_avs_irq irq, bool enable)
+{
+	int mask = msm_spm_drv_avs_irq_mask(irq);
+	uint32_t value;
+
+	if (!dev)
+		return -EINVAL;
+	else if (mask < 0)
+		return mask;
+
+	value = enable ? mask : 0;
+
+	if ((dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] & mask) ^ value) {
+		dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] &= ~mask;
+		dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] |= value;
+		msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
+	}
+
+	return 0;
+}
+
+int msm_spm_drv_avs_clear_irq(struct msm_spm_driver_data *dev,
+		enum msm_spm_avs_irq irq)
+{
+	int mask = msm_spm_drv_avs_irq_mask(irq);
+
+	if (!dev)
+		return -EINVAL;
+	else if (mask < 0)
+		return mask;
+
+	if (dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] & mask) {
+		/*
+		 * The interrupt status is cleared by disabling and then
+		 * re-enabling the interrupt.
+		 */
+		dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] &= ~mask;
+		msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
+		dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] |= mask;
+		msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
+	}
+
+	return 0;
+}
+
+void msm_spm_drv_flush_seq_entry(struct msm_spm_driver_data *dev)
+{
+	int i;
+	int num_spm_entry = msm_spm_drv_get_num_spm_entry(dev);
+
+	if (!dev) {
+		__WARN();
+		return;
+	}
+
+	for (i = 0; i < num_spm_entry; i++) {
+		__raw_writel(dev->reg_seq_entry_shadow[i],
+			dev->reg_base_addr
+			+ dev->reg_offsets[MSM_SPM_REG_SAW_SEQ_ENTRY]
+			+ 4 * i);
+	}
+	mb();
+}
+
+void dump_regs(struct msm_spm_driver_data *dev, int cpu)
+{
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_SPM_STS);
+	mb();
+	pr_err("CPU%d: spm register MSM_SPM_REG_SAW_SPM_STS: 0x%x\n", cpu,
+			dev->reg_shadow[MSM_SPM_REG_SAW_SPM_STS]);
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_SPM_CTL);
+	mb();
+	pr_err("CPU%d: spm register MSM_SPM_REG_SAW_SPM_CTL: 0x%x\n", cpu,
+			dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL]);
+}
+
+int msm_spm_drv_write_seq_data(struct msm_spm_driver_data *dev,
+		uint8_t *cmd, uint32_t *offset)
+{
+	uint32_t cmd_w;
+	uint32_t offset_w = *offset / 4;
+	uint8_t last_cmd;
+
+	if (!cmd)
+		return -EINVAL;
+
+	while (1) {
+		int i;
+		cmd_w = 0;
+		last_cmd = 0;
+		cmd_w = dev->reg_seq_entry_shadow[offset_w];
+
+		for (i = (*offset % 4); i < 4; i++) {
+			last_cmd = *(cmd++);
+			cmd_w |=  last_cmd << (i * 8);
+			(*offset)++;
+			if (last_cmd == 0x0f)
+				break;
+		}
+
+		dev->reg_seq_entry_shadow[offset_w++] = cmd_w;
+		if (last_cmd == 0x0f)
+			break;
+	}
+
+	return 0;
+}
+
+int msm_spm_drv_set_low_power_mode(struct msm_spm_driver_data *dev,
+		uint32_t ctl)
+{
+
+	/* SPM is configured to reset start address to zero after end of Program
+	 */
+	if (!dev)
+		return -EINVAL;
+
+	msm_spm_drv_set_start_addr(dev, ctl);
+
+	msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_SPM_CTL);
+	wmb();
+
+	if (msm_spm_debug_mask & MSM_SPM_DEBUG_SHADOW) {
+		int i;
+		for (i = 0; i < MSM_SPM_REG_NR; i++)
+			pr_info("%s: reg %02x = 0x%08x\n", __func__,
+				dev->reg_offsets[i], dev->reg_shadow[i]);
+	}
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_SPM_STS);
+
+	return 0;
+}
+
+uint32_t msm_spm_drv_get_vdd(struct msm_spm_driver_data *dev)
+{
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_PMIC_STS);
+	return dev->reg_shadow[MSM_SPM_REG_SAW_PMIC_STS] & 0xFF;
+}
+
+#ifdef CONFIG_MSM_AVS_HW
+static bool msm_spm_drv_is_avs_enabled(struct msm_spm_driver_data *dev)
+{
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
+	return dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] & BIT(0);
+}
+
+static void msm_spm_drv_disable_avs(struct msm_spm_driver_data *dev)
+{
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
+	dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] &= ~BIT(27);
+	msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
+}
+
+static void msm_spm_drv_enable_avs(struct msm_spm_driver_data *dev)
+{
+	dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] |= BIT(27);
+	msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
+}
+
+static void msm_spm_drv_set_avs_vlevel(struct msm_spm_driver_data *dev,
+		unsigned int vlevel)
+{
+	vlevel &= 0x3f;
+	dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] &= ~0x7efc00;
+	dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] |= ((vlevel - 4) << 10);
+	dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] |= (vlevel << 17);
+	msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
+}
+
+#else
+static bool msm_spm_drv_is_avs_enabled(struct msm_spm_driver_data *dev)
+{
+	return false;
+}
+
+static void msm_spm_drv_disable_avs(struct msm_spm_driver_data *dev) { }
+
+static void msm_spm_drv_enable_avs(struct msm_spm_driver_data *dev) { }
+
+static void msm_spm_drv_set_avs_vlevel(struct msm_spm_driver_data *dev,
+		unsigned int vlevel) { }
+#endif
+
+int msm_spm_drv_set_vdd(struct msm_spm_driver_data *dev, unsigned int vlevel)
+{
+	uint32_t timeout_us, new_level;
+	bool avs_enabled;
+
+	if (!dev)
+		return -EINVAL;
+
+	avs_enabled  = msm_spm_drv_is_avs_enabled(dev);
+
+	if (!msm_spm_pmic_arb_present(dev))
+		return -ENOSYS;
+
+	if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL)
+		pr_info("%s: requesting vlevel %#x\n", __func__, vlevel);
+
+	if (avs_enabled)
+		msm_spm_drv_disable_avs(dev);
+
+	/* Kick the state machine back to idle */
+	dev->reg_shadow[MSM_SPM_REG_SAW_RST] = 1;
+	msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_RST);
+
+	msm_spm_drv_set_vctl2(dev, vlevel);
+
+	timeout_us = dev->vctl_timeout_us;
+	/* Confirm the voltage we set was what hardware sent */
+	do {
+		udelay(1);
+		new_level = msm_spm_drv_get_sts_curr_pmic_data(dev);
+		/* FSM is idle */
+		if (((new_level & 0x30000) == 0) &&
+				((new_level & 0xFF) == vlevel))
+			break;
+	} while (--timeout_us);
+	if (!timeout_us) {
+		pr_info("Wrong level %#x\n", new_level);
+		goto set_vdd_bail;
+	}
+
+	if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL)
+		pr_info("%s: done, remaining timeout %u us\n",
+			__func__, timeout_us);
+
+	/* Set AVS min/max */
+	if (avs_enabled) {
+		msm_spm_drv_set_avs_vlevel(dev, vlevel);
+		msm_spm_drv_enable_avs(dev);
+	}
+
+	return 0;
+
+set_vdd_bail:
+	if (avs_enabled)
+		msm_spm_drv_enable_avs(dev);
+
+	pr_err("%s: failed %#x, remaining timeout %uus, vlevel %#x\n",
+		__func__, vlevel, timeout_us, new_level);
+	return -EIO;
+}
+
+static int msm_spm_drv_get_pmic_port(struct msm_spm_driver_data *dev,
+		enum msm_spm_pmic_port port)
+{
+	int index = -1;
+
+	switch (port) {
+	case MSM_SPM_PMIC_VCTL_PORT:
+		index = dev->vctl_port;
+		break;
+	case MSM_SPM_PMIC_PHASE_PORT:
+		index = dev->phase_port;
+		break;
+	case MSM_SPM_PMIC_PFM_PORT:
+		index = dev->pfm_port;
+		break;
+	default:
+		break;
+	}
+
+	return index;
+}
+
+int msm_spm_drv_set_pmic_data(struct msm_spm_driver_data *dev,
+		enum msm_spm_pmic_port port, unsigned int data)
+{
+	unsigned int pmic_data = 0;
+	unsigned int timeout_us = 0;
+	int index = 0;
+
+	if (!msm_spm_pmic_arb_present(dev))
+		return -ENOSYS;
+
+	index = msm_spm_drv_get_pmic_port(dev, port);
+	if (index < 0)
+		return -ENODEV;
+
+	pmic_data |= data & 0xFF;
+	pmic_data |= (index & 0x7) << 16;
+
+	dev->reg_shadow[MSM_SPM_REG_SAW_VCTL] &= ~0x700FF;
+	dev->reg_shadow[MSM_SPM_REG_SAW_VCTL] |= pmic_data;
+	msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_VCTL);
+	mb();
+
+	timeout_us = dev->vctl_timeout_us;
+	/**
+	 * Confirm the pmic data set was what hardware sent by
+	 * checking the PMIC FSM state.
+	 * We cannot use the sts_pmic_data and check it against
+	 * the value like we do fot set_vdd, since the PMIC_STS
+	 * is only updated for SAW_VCTL sent with port index 0.
+	 */
+	do {
+		if (msm_spm_drv_get_sts_pmic_state(dev) ==
+				MSM_SPM_PMIC_STATE_IDLE)
+			break;
+		udelay(1);
+	} while (--timeout_us);
+
+	if (!timeout_us) {
+		pr_err("%s: failed, remaining timeout %u us, data %d\n",
+				__func__, timeout_us, data);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+void msm_spm_drv_reinit(struct msm_spm_driver_data *dev, bool seq_write)
+{
+	int i;
+
+	if (seq_write)
+		msm_spm_drv_flush_seq_entry(dev);
+
+	for (i = 0; i < MSM_SPM_REG_SAW_PMIC_DATA_0 + num_pmic_data; i++)
+		msm_spm_drv_load_shadow(dev, i);
+
+	for (i = MSM_SPM_REG_NR_INITIALIZE + 1; i < MSM_SPM_REG_NR; i++)
+		msm_spm_drv_load_shadow(dev, i);
+}
+
+int msm_spm_drv_reg_init(struct msm_spm_driver_data *dev,
+		struct msm_spm_platform_data *data)
+{
+	int i;
+	bool found = false;
+
+	dev->ver_reg = data->ver_reg;
+	dev->reg_base_addr = data->reg_base_addr;
+	msm_spm_drv_get_saw2_ver(dev, &dev->major, &dev->minor);
+	for (i = 0; i < ARRAY_SIZE(saw2_info); i++)
+		if (dev->major == saw2_info[i].major &&
+			dev->minor == saw2_info[i].minor) {
+			pr_debug("%s: Version found\n",
+					saw2_info[i].ver_name);
+			dev->reg_offsets = saw2_info[i].spm_reg_offset_ptr;
+			found = true;
+			break;
+		}
+
+	if (!found) {
+		pr_err("%s: No SAW version found\n", __func__);
+		BUG_ON(!found);
+	}
+	return 0;
+}
+
+void msm_spm_drv_upd_reg_shadow(struct msm_spm_driver_data *dev, int id,
+		int val)
+{
+	dev->reg_shadow[id] = val;
+	msm_spm_drv_flush_shadow(dev, id);
+	/* Complete the above writes before other accesses */
+	mb();
+}
+
+int msm_spm_drv_init(struct msm_spm_driver_data *dev,
+		struct msm_spm_platform_data *data)
+{
+	int num_spm_entry;
+
+	BUG_ON(!dev || !data);
+
+	dev->vctl_port = data->vctl_port;
+	dev->phase_port = data->phase_port;
+	dev->pfm_port = data->pfm_port;
+	dev->reg_base_addr = data->reg_base_addr;
+	memcpy(dev->reg_shadow, data->reg_init_values,
+			sizeof(data->reg_init_values));
+
+	dev->vctl_timeout_us = data->vctl_timeout_us;
+
+
+	if (!num_pmic_data)
+		num_pmic_data = msm_spm_drv_get_num_pmic_data(dev);
+
+	num_spm_entry = msm_spm_drv_get_num_spm_entry(dev);
+
+	dev->reg_seq_entry_shadow =
+		kzalloc(sizeof(*dev->reg_seq_entry_shadow) * num_spm_entry,
+				GFP_KERNEL);
+
+	if (!dev->reg_seq_entry_shadow)
+		return -ENOMEM;
+
+	return 0;
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/peripheral-loader.c	2019-01-22 16:16:26.663275022 +0100
@@ -0,0 +1,1348 @@
+/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/elf.h>
+#include <linux/mutex.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/rwsem.h>
+#include <linux/sysfs.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/list_sort.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/of_gpio.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <soc/qcom/ramdump.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/secure_buffer.h>
+
+#include <asm/uaccess.h>
+#include <asm/setup.h>
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/trace_msm_pil_event.h>
+
+#include "peripheral-loader.h"
+
+#define pil_err(desc, fmt, ...)						\
+	dev_err(desc->dev, "%s: " fmt, desc->name, ##__VA_ARGS__)
+#define pil_info(desc, fmt, ...)					\
+	dev_info(desc->dev, "%s: " fmt, desc->name, ##__VA_ARGS__)
+
+#if defined(CONFIG_ARM)
+#define pil_memset_io(d, c, count) memset(d, c, count)
+#else
+#define pil_memset_io(d, c, count) memset_io(d, c, count)
+#endif
+
+#define PIL_NUM_DESC		10
+#define NUM_OF_ENCRYPTED_KEY	3
+static void __iomem *pil_info_base;
+static void __iomem *pil_minidump_base;
+
+/**
+ * proxy_timeout - Override for proxy vote timeouts
+ * -1: Use driver-specified timeout
+ *  0: Hold proxy votes until shutdown
+ * >0: Specify a custom timeout in ms
+ */
+static int proxy_timeout_ms = -1;
+module_param(proxy_timeout_ms, int, S_IRUGO | S_IWUSR);
+
+static bool disable_timeouts;
+static const char firmware_error_msg[] = "firmware_error\n";
+/**
+ * struct pil_mdt - Representation of <name>.mdt file in memory
+ * @hdr: ELF32 header
+ * @phdr: ELF32 program headers
+ */
+struct pil_mdt {
+	struct elf32_hdr hdr;
+	struct elf32_phdr phdr[];
+};
+
+/**
+ * struct boot_minidump_smem_region - Representation of SMEM TOC
+ * @region_name: Name of modem segment to be dumped
+ * @region_base_address: Where segment start from
+ * @region_size: Size of segment to be dumped
+ */
+struct boot_minidump_smem_region {
+	char region_name[16];
+	u64 region_base_address;
+	u64 region_size;
+};
+
+/**
+ * struct pil_seg - memory map representing one segment
+ * @next: points to next seg mentor NULL if last segment
+ * @paddr: physical start address of segment
+ * @sz: size of segment
+ * @filesz: size of segment on disk
+ * @num: segment number
+ * @relocated: true if segment is relocated, false otherwise
+ *
+ * Loosely based on an elf program header. Contains all necessary information
+ * to load and initialize a segment of the image in memory.
+ */
+struct pil_seg {
+	phys_addr_t paddr;
+	unsigned long sz;
+	unsigned long filesz;
+	int num;
+	struct list_head list;
+	bool relocated;
+};
+
+/**
+ * struct pil_priv - Private state for a pil_desc
+ * @proxy: work item used to run the proxy unvoting routine
+ * @ws: wakeup source to prevent suspend during pil_boot
+ * @wname: name of @ws
+ * @desc: pointer to pil_desc this is private data for
+ * @seg: list of segments sorted by physical address
+ * @entry_addr: physical address where processor starts booting at
+ * @base_addr: smallest start address among all segments that are relocatable
+ * @region_start: address where relocatable region starts or lowest address
+ * for non-relocatable images
+ * @region_end: address where relocatable region ends or highest address for
+ * non-relocatable images
+ * @region: region allocated for relocatable images
+ * @unvoted_flag: flag to keep track if we have unvoted or not.
+ *
+ * This struct contains data for a pil_desc that should not be exposed outside
+ * of this file. This structure points to the descriptor and the descriptor
+ * points to this structure so that PIL drivers can't access the private
+ * data of a descriptor but this file can access both.
+ */
+struct pil_priv {
+	struct delayed_work proxy;
+	struct wakeup_source ws;
+	char wname[32];
+	struct pil_desc *desc;
+	struct list_head segs;
+	phys_addr_t entry_addr;
+	phys_addr_t base_addr;
+	phys_addr_t region_start;
+	phys_addr_t region_end;
+	void *region;
+	struct pil_image_info __iomem *info;
+	struct md_ssr_ss_info __iomem *minidump_ss;
+	struct md_ssr_ss_info __iomem *minidump_pdr;
+	int minidump_id;
+	int id;
+	int unvoted_flag;
+	size_t region_size;
+};
+
+static int pil_do_minidump(struct pil_desc *desc, void *ramdump_dev)
+{
+	struct boot_minidump_smem_region __iomem *region_info_ss;
+	struct boot_minidump_smem_region __iomem *region_info_pdr;
+	struct ramdump_segment *ramdump_segs, *s;
+	struct pil_priv *priv = desc->priv;
+	void __iomem *subsys_smem_base_pdr;
+	void __iomem *subsys_smem_base_ss;
+	void __iomem *offset_ss;
+	void __iomem *offset_pdr;
+	int ss_mdump_seg_cnt_ss = 0, ss_mdump_seg_cnt_pdr = 0, total_segs;
+	int ret, i;
+
+	if (!ramdump_dev)
+		return -ENODEV;
+
+	memcpy(&offset_ss, &priv->minidump_ss, sizeof(priv->minidump_ss));
+	offset_ss = offset_ss +
+		sizeof(priv->minidump_ss->md_ss_smem_regions_baseptr);
+	/* There are 3 encryption keys which also need to be dumped */
+	ss_mdump_seg_cnt_ss = readb_relaxed(offset_ss) +
+				NUM_OF_ENCRYPTED_KEY;
+
+	pr_debug("SMEM base to read minidump ss segments is 0x%x\n",
+			__raw_readl(priv->minidump_ss));
+	subsys_smem_base_ss = ioremap(__raw_readl(priv->minidump_ss),
+			ss_mdump_seg_cnt_ss * sizeof(*region_info_ss));
+	region_info_ss =
+		(struct boot_minidump_smem_region __iomem *)subsys_smem_base_ss;
+
+	if (priv->minidump_pdr && (__raw_readl(priv->minidump_pdr) != 0)) {
+		memcpy(&offset_pdr, &priv->minidump_pdr,
+				sizeof(priv->minidump_pdr));
+		offset_pdr = offset_pdr +
+			sizeof(priv->minidump_pdr->md_ss_smem_regions_baseptr);
+		/* There are 3 encryption keys which also need to be dumped */
+		ss_mdump_seg_cnt_pdr = readb_relaxed(offset_pdr) +
+					NUM_OF_ENCRYPTED_KEY;
+
+		pr_debug("SMEM base to read minidump pdr segments is 0x%x\n",
+				__raw_readl(priv->minidump_pdr));
+		subsys_smem_base_pdr = ioremap(__raw_readl(priv->minidump_pdr),
+			ss_mdump_seg_cnt_pdr * sizeof(*region_info_pdr));
+		region_info_pdr =
+			(struct boot_minidump_smem_region __iomem *)
+						subsys_smem_base_pdr;
+	}
+
+	total_segs = ss_mdump_seg_cnt_ss + ss_mdump_seg_cnt_pdr;
+	ramdump_segs = kcalloc(total_segs,
+			       sizeof(*ramdump_segs), GFP_KERNEL);
+	if (!ramdump_segs)
+		return -ENOMEM;
+
+	if (desc->subsys_vmid > 0)
+		ret = pil_assign_mem_to_linux(desc, priv->region_start,
+			(priv->region_end - priv->region_start));
+
+	s = ramdump_segs;
+	for (i = 0; i < ss_mdump_seg_cnt_ss; i++) {
+		memcpy(&offset_ss, &region_info_ss, sizeof(region_info_ss));
+		memcpy(&s->name, &region_info_ss, sizeof(region_info_ss));
+		offset_ss = offset_ss + sizeof(region_info_ss->region_name);
+		s->address = __raw_readl(offset_ss);
+		offset_ss = offset_ss +
+				sizeof(region_info_ss->region_base_address);
+		s->size = __raw_readl(offset_ss);
+		pr_debug("Dumping segment %s with address %pK and size 0x%x\n",
+				s->name, (void *)s->address,
+				(unsigned int)s->size);
+		s++;
+		region_info_ss++;
+	}
+
+	for (i = 0; i < ss_mdump_seg_cnt_pdr; i++) {
+		memcpy(&offset_pdr, &region_info_pdr, sizeof(region_info_pdr));
+		memcpy(&s->name, &region_info_pdr, sizeof(region_info_pdr));
+		offset_pdr = offset_pdr + sizeof(region_info_pdr->region_name);
+		s->address = __raw_readl(offset_pdr);
+		offset_pdr = offset_pdr +
+			sizeof(region_info_pdr->region_base_address);
+		s->size = __raw_readl(offset_pdr);
+		pr_debug("Dumping segment %s with address %pK and size 0x%x\n",
+				s->name, (void *)s->address,
+				(unsigned int)s->size);
+		s++;
+		region_info_pdr++;
+	}
+
+	ret = do_minidump(ramdump_dev, ramdump_segs, total_segs);
+	kfree(ramdump_segs);
+	if (ret)
+		pil_err(desc, "%s: Ramdump collection failed for subsys %s rc:%d\n",
+			__func__, desc->name, ret);
+	writeb_relaxed(1, &priv->minidump_ss->md_ss_ssr_cause);
+	writeb_relaxed(1, &priv->minidump_pdr->md_ss_ssr_cause);
+
+	if (desc->subsys_vmid > 0)
+		ret = pil_assign_mem_to_subsys(desc, priv->region_start,
+			(priv->region_end - priv->region_start));
+	return ret;
+}
+
+/**
+ * pil_do_ramdump() - Ramdump an image
+ * @desc: descriptor from pil_desc_init()
+ * @ramdump_dev: ramdump device returned from create_ramdump_device()
+ *
+ * Calls the ramdump API with a list of segments generated from the addresses
+ * that the descriptor corresponds to.
+ */
+int pil_do_ramdump(struct pil_desc *desc,
+		   void *ramdump_dev, void *minidump_dev)
+{
+	struct pil_priv *priv = desc->priv;
+	struct pil_seg *seg;
+	int count = 0, ret;
+	struct ramdump_segment *ramdump_segs, *s;
+	void __iomem *offset;
+
+	memcpy(&offset, &priv->minidump_ss, sizeof(priv->minidump_ss));
+	/*
+	 * Collect minidump if smem base is initialized,
+	 * ssr cause is 0. No need to check encryption status
+	 */
+	if (priv->minidump_ss
+	&& (__raw_readl(priv->minidump_ss) != 0)
+	&& (readb_relaxed(offset + sizeof(u32) + 2 * sizeof(u8)) == 0)) {
+		pr_debug("Dumping Minidump for %s\n", desc->name);
+		return pil_do_minidump(desc, minidump_dev);
+
+	}
+	pr_debug("Continuing with full SSR dump for %s\n", desc->name);
+	list_for_each_entry(seg, &priv->segs, list)
+		count++;
+
+	ramdump_segs = kcalloc(count, sizeof(*ramdump_segs), GFP_KERNEL);
+	if (!ramdump_segs)
+		return -ENOMEM;
+
+	if (desc->subsys_vmid > 0)
+		ret = pil_assign_mem_to_linux(desc, priv->region_start,
+				(priv->region_end - priv->region_start));
+
+	s = ramdump_segs;
+	list_for_each_entry(seg, &priv->segs, list) {
+		s->address = seg->paddr;
+		s->size = seg->sz;
+		s++;
+	}
+
+	ret = do_elf_ramdump(ramdump_dev, ramdump_segs, count);
+	kfree(ramdump_segs);
+
+	if (ret)
+		pil_err(desc, "%s: Ramdump collection failed for subsys %s rc:%d\n",
+				__func__, desc->name, ret);
+
+	if (desc->subsys_vmid > 0)
+		ret = pil_assign_mem_to_subsys(desc, priv->region_start,
+				(priv->region_end - priv->region_start));
+
+	return ret;
+}
+EXPORT_SYMBOL(pil_do_ramdump);
+
+int pil_assign_mem_to_subsys(struct pil_desc *desc, phys_addr_t addr,
+							size_t size)
+{
+	int ret;
+	int srcVM[1] = {VMID_HLOS};
+	int destVM[1] = {desc->subsys_vmid};
+	int destVMperm[1] = {PERM_READ | PERM_WRITE};
+
+	ret = hyp_assign_phys(addr, size, srcVM, 1, destVM, destVMperm, 1);
+	if (ret)
+		pil_err(desc, "%s: failed for %pa address of size %zx - subsys VMid %d rc:%d\n",
+				__func__, &addr, size, desc->subsys_vmid, ret);
+	return ret;
+}
+EXPORT_SYMBOL(pil_assign_mem_to_subsys);
+
+int pil_assign_mem_to_linux(struct pil_desc *desc, phys_addr_t addr,
+							size_t size)
+{
+	int ret;
+	int srcVM[1] = {desc->subsys_vmid};
+	int destVM[1] = {VMID_HLOS};
+	int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
+
+	ret = hyp_assign_phys(addr, size, srcVM, 1, destVM, destVMperm, 1);
+	if (ret)
+		panic("%s: failed for %pa address of size %zx - subsys VMid %d rc:%d\n",
+				__func__, &addr, size, desc->subsys_vmid, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL(pil_assign_mem_to_linux);
+
+int pil_assign_mem_to_subsys_and_linux(struct pil_desc *desc,
+						phys_addr_t addr, size_t size)
+{
+	int ret;
+	int srcVM[1] = {VMID_HLOS};
+	int destVM[2] = {VMID_HLOS, desc->subsys_vmid};
+	int destVMperm[2] = {PERM_READ | PERM_WRITE, PERM_READ | PERM_WRITE};
+
+	ret = hyp_assign_phys(addr, size, srcVM, 1, destVM, destVMperm, 2);
+	if (ret)
+		pil_err(desc, "%s: failed for %pa address of size %zx - subsys VMid %d rc:%d\n",
+				__func__, &addr, size, desc->subsys_vmid, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL(pil_assign_mem_to_subsys_and_linux);
+
+int pil_reclaim_mem(struct pil_desc *desc, phys_addr_t addr, size_t size,
+						int VMid)
+{
+	int ret;
+	int srcVM[2] = {VMID_HLOS, desc->subsys_vmid};
+	int destVM[1] = {VMid};
+	int destVMperm[1] = {PERM_READ | PERM_WRITE};
+
+	if (VMid == VMID_HLOS)
+		destVMperm[0] = PERM_READ | PERM_WRITE | PERM_EXEC;
+
+	ret = hyp_assign_phys(addr, size, srcVM, 2, destVM, destVMperm, 1);
+	if (ret)
+		panic("%s: failed for %pa address of size %zx - subsys VMid %d. Fatal error.\n",
+				__func__, &addr, size, desc->subsys_vmid);
+
+	return ret;
+}
+EXPORT_SYMBOL(pil_reclaim_mem);
+
+/**
+ * pil_get_entry_addr() - Retrieve the entry address of a peripheral image
+ * @desc: descriptor from pil_desc_init()
+ *
+ * Returns the physical address where the image boots at or 0 if unknown.
+ */
+phys_addr_t pil_get_entry_addr(struct pil_desc *desc)
+{
+	return desc->priv ? desc->priv->entry_addr : 0;
+}
+EXPORT_SYMBOL(pil_get_entry_addr);
+
+static void __pil_proxy_unvote(struct pil_priv *priv)
+{
+	struct pil_desc *desc = priv->desc;
+
+	desc->ops->proxy_unvote(desc);
+	notify_proxy_unvote(desc->dev);
+	__pm_relax(&priv->ws);
+	module_put(desc->owner);
+
+}
+
+static void pil_proxy_unvote_work(struct work_struct *work)
+{
+	struct delayed_work *delayed = to_delayed_work(work);
+	struct pil_priv *priv = container_of(delayed, struct pil_priv, proxy);
+	__pil_proxy_unvote(priv);
+}
+
+static int pil_proxy_vote(struct pil_desc *desc)
+{
+	int ret = 0;
+	struct pil_priv *priv = desc->priv;
+
+	if (desc->ops->proxy_vote) {
+		__pm_stay_awake(&priv->ws);
+		ret = desc->ops->proxy_vote(desc);
+		if (ret)
+			__pm_relax(&priv->ws);
+	}
+
+	if (desc->proxy_unvote_irq)
+		enable_irq(desc->proxy_unvote_irq);
+	notify_proxy_vote(desc->dev);
+
+	return ret;
+}
+
+static void pil_proxy_unvote(struct pil_desc *desc, int immediate)
+{
+	struct pil_priv *priv = desc->priv;
+	unsigned long timeout;
+
+	if (proxy_timeout_ms == 0 && !immediate)
+		return;
+	else if (proxy_timeout_ms > 0)
+		timeout = proxy_timeout_ms;
+	else
+		timeout = desc->proxy_timeout;
+
+	if (desc->ops->proxy_unvote) {
+		if (WARN_ON(!try_module_get(desc->owner)))
+			return;
+
+		if (immediate)
+			timeout = 0;
+
+		if (!desc->proxy_unvote_irq || immediate)
+			schedule_delayed_work(&priv->proxy,
+					      msecs_to_jiffies(timeout));
+	}
+}
+
+static irqreturn_t proxy_unvote_intr_handler(int irq, void *dev_id)
+{
+	struct pil_desc *desc = dev_id;
+	struct pil_priv *priv = desc->priv;
+
+	pil_info(desc, "Power/Clock ready interrupt received\n");
+	if (!desc->priv->unvoted_flag) {
+		desc->priv->unvoted_flag = 1;
+		__pil_proxy_unvote(priv);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static bool segment_is_relocatable(const struct elf32_phdr *p)
+{
+	return !!(p->p_flags & BIT(27));
+}
+
+static phys_addr_t pil_reloc(const struct pil_priv *priv, phys_addr_t addr)
+{
+	return addr - priv->base_addr + priv->region_start;
+}
+
+static struct pil_seg *pil_init_seg(const struct pil_desc *desc,
+				  const struct elf32_phdr *phdr, int num)
+{
+	bool reloc = segment_is_relocatable(phdr);
+	const struct pil_priv *priv = desc->priv;
+	struct pil_seg *seg;
+
+	if (!reloc && memblock_overlaps_memory(phdr->p_paddr, phdr->p_memsz)) {
+		pil_err(desc, "Segment not relocatable,kernel memory would be overwritten[%#08lx, %#08lx)\n",
+		(unsigned long)phdr->p_paddr,
+		(unsigned long)(phdr->p_paddr + phdr->p_memsz));
+		return ERR_PTR(-EPERM);
+	}
+
+	if (phdr->p_filesz > phdr->p_memsz) {
+		pil_err(desc, "Segment %d: file size (%u) is greater than mem size (%u).\n",
+			num, phdr->p_filesz, phdr->p_memsz);
+		return ERR_PTR(-EINVAL);
+	}
+
+	seg = kmalloc(sizeof(*seg), GFP_KERNEL);
+	if (!seg)
+		return ERR_PTR(-ENOMEM);
+	seg->num = num;
+	seg->paddr = reloc ? pil_reloc(priv, phdr->p_paddr) : phdr->p_paddr;
+	seg->filesz = phdr->p_filesz;
+	seg->sz = phdr->p_memsz;
+	seg->relocated = reloc;
+	INIT_LIST_HEAD(&seg->list);
+
+	return seg;
+}
+
+#define segment_is_hash(flag) (((flag) & (0x7 << 24)) == (0x2 << 24))
+
+static int segment_is_loadable(const struct elf32_phdr *p)
+{
+	return (p->p_type == PT_LOAD) && !segment_is_hash(p->p_flags) &&
+		p->p_memsz;
+}
+
+static void pil_dump_segs(const struct pil_priv *priv)
+{
+	struct pil_seg *seg;
+	phys_addr_t seg_h_paddr;
+
+	list_for_each_entry(seg, &priv->segs, list) {
+		seg_h_paddr = seg->paddr + seg->sz;
+		pil_info(priv->desc, "%d: %pa %pa\n", seg->num,
+				&seg->paddr, &seg_h_paddr);
+	}
+}
+
+/*
+ * Ensure the entry address lies within the image limits and if the image is
+ * relocatable ensure it lies within a relocatable segment.
+ */
+static int pil_init_entry_addr(struct pil_priv *priv, const struct pil_mdt *mdt)
+{
+	struct pil_seg *seg;
+	phys_addr_t entry = mdt->hdr.e_entry;
+	bool image_relocated = priv->region;
+
+	if (image_relocated)
+		entry = pil_reloc(priv, entry);
+	priv->entry_addr = entry;
+
+	if (priv->desc->flags & PIL_SKIP_ENTRY_CHECK)
+		return 0;
+
+	list_for_each_entry(seg, &priv->segs, list) {
+		if (entry >= seg->paddr && entry < seg->paddr + seg->sz) {
+			if (!image_relocated)
+				return 0;
+			else if (seg->relocated)
+				return 0;
+		}
+	}
+	pil_err(priv->desc, "entry address %pa not within range\n", &entry);
+	pil_dump_segs(priv);
+	return -EADDRNOTAVAIL;
+}
+
+static int pil_alloc_region(struct pil_priv *priv, phys_addr_t min_addr,
+				phys_addr_t max_addr, size_t align)
+{
+	void *region;
+	size_t size = max_addr - min_addr;
+	size_t aligned_size;
+
+	/* Don't reallocate due to fragmentation concerns, just sanity check */
+	if (priv->region) {
+		if (WARN(priv->region_end - priv->region_start < size,
+			"Can't reuse PIL memory, too small\n"))
+			return -ENOMEM;
+		return 0;
+	}
+
+	if (align > SZ_4M)
+		aligned_size = ALIGN(size, SZ_4M);
+	else
+		aligned_size = ALIGN(size, SZ_1M);
+
+	init_dma_attrs(&priv->desc->attrs);
+	dma_set_attr(DMA_ATTR_SKIP_ZEROING, &priv->desc->attrs);
+	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &priv->desc->attrs);
+
+	region = dma_alloc_attrs(priv->desc->dev, aligned_size,
+				&priv->region_start, GFP_KERNEL,
+				&priv->desc->attrs);
+
+	if (region == NULL) {
+		pil_err(priv->desc, "Failed to allocate relocatable region of size %zx\n",
+					size);
+		priv->region_start = 0;
+		priv->region_end = 0;
+		return -ENOMEM;
+	}
+
+	priv->region = region;
+	priv->region_end = priv->region_start + size;
+	priv->base_addr = min_addr;
+	priv->region_size = aligned_size;
+
+	return 0;
+}
+
+static int pil_setup_region(struct pil_priv *priv, const struct pil_mdt *mdt)
+{
+	const struct elf32_phdr *phdr;
+	phys_addr_t min_addr_r, min_addr_n, max_addr_r, max_addr_n, start, end;
+	size_t align = 0;
+	int i, ret = 0;
+	bool relocatable = false;
+
+	min_addr_n = min_addr_r = (phys_addr_t)ULLONG_MAX;
+	max_addr_n = max_addr_r = 0;
+
+	/* Find the image limits */
+	for (i = 0; i < mdt->hdr.e_phnum; i++) {
+		phdr = &mdt->phdr[i];
+		if (!segment_is_loadable(phdr))
+			continue;
+
+		start = phdr->p_paddr;
+		end = start + phdr->p_memsz;
+
+		if (segment_is_relocatable(phdr)) {
+			min_addr_r = min(min_addr_r, start);
+			max_addr_r = max(max_addr_r, end);
+			/*
+			 * Lowest relocatable segment dictates alignment of
+			 * relocatable region
+			 */
+			if (min_addr_r == start)
+				align = phdr->p_align;
+			relocatable = true;
+		} else {
+			min_addr_n = min(min_addr_n, start);
+			max_addr_n = max(max_addr_n, end);
+		}
+
+	}
+
+	/*
+	 * Align the max address to the next 4K boundary to satisfy iommus and
+	 * XPUs that operate on 4K chunks.
+	 */
+	max_addr_n = ALIGN(max_addr_n, SZ_4K);
+	max_addr_r = ALIGN(max_addr_r, SZ_4K);
+
+	if (relocatable) {
+		ret = pil_alloc_region(priv, min_addr_r, max_addr_r, align);
+	} else {
+		priv->region_start = min_addr_n;
+		priv->region_end = max_addr_n;
+		priv->base_addr = min_addr_n;
+	}
+
+	if (priv->info) {
+		__iowrite32_copy(&priv->info->start, &priv->region_start,
+					sizeof(priv->region_start) / 4);
+		writel_relaxed(priv->region_end - priv->region_start,
+				&priv->info->size);
+	}
+
+	return ret;
+}
+
+static int pil_cmp_seg(void *priv, struct list_head *a, struct list_head *b)
+{
+	int ret = 0;
+	struct pil_seg *seg_a = list_entry(a, struct pil_seg, list);
+	struct pil_seg *seg_b = list_entry(b, struct pil_seg, list);
+
+	if (seg_a->paddr < seg_b->paddr)
+		ret = -1;
+	else if (seg_a->paddr > seg_b->paddr)
+		ret = 1;
+
+	return ret;
+}
+
+static int pil_init_mmap(struct pil_desc *desc, const struct pil_mdt *mdt)
+{
+	struct pil_priv *priv = desc->priv;
+	const struct elf32_phdr *phdr;
+	struct pil_seg *seg;
+	int i, ret;
+
+	ret = pil_setup_region(priv, mdt);
+	if (ret)
+		return ret;
+
+
+	pil_info(desc, "loading from %pa to %pa\n", &priv->region_start,
+							&priv->region_end);
+
+	for (i = 0; i < mdt->hdr.e_phnum; i++) {
+		phdr = &mdt->phdr[i];
+		if (!segment_is_loadable(phdr))
+			continue;
+
+		seg = pil_init_seg(desc, phdr, i);
+		if (IS_ERR(seg))
+			return PTR_ERR(seg);
+
+		list_add_tail(&seg->list, &priv->segs);
+	}
+	list_sort(NULL, &priv->segs, pil_cmp_seg);
+
+	return pil_init_entry_addr(priv, mdt);
+}
+
+struct pil_map_fw_info {
+	void *region;
+	struct dma_attrs attrs;
+	phys_addr_t base_addr;
+	struct device *dev;
+};
+
+static void pil_release_mmap(struct pil_desc *desc)
+{
+	struct pil_priv *priv = desc->priv;
+	struct pil_seg *p, *tmp;
+	u64 zero = 0ULL;
+
+	if (priv->info) {
+		__iowrite32_copy(&priv->info->start, &zero,
+					sizeof(zero) / 4);
+		writel_relaxed(0, &priv->info->size);
+	}
+
+	list_for_each_entry_safe(p, tmp, &priv->segs, list) {
+		list_del(&p->list);
+		kfree(p);
+	}
+}
+
+static void pil_clear_segment(struct pil_desc *desc)
+{
+	struct pil_priv *priv = desc->priv;
+	u8 __iomem *buf;
+
+	struct pil_map_fw_info map_fw_info = {
+		.attrs = desc->attrs,
+		.region = priv->region,
+		.base_addr = priv->region_start,
+		.dev = desc->dev,
+	};
+
+	void *map_data = desc->map_data ? desc->map_data : &map_fw_info;
+
+	/* Clear memory so that unauthorized ELF code is not left behind */
+	buf = desc->map_fw_mem(priv->region_start, (priv->region_end -
+					priv->region_start), map_data);
+
+	if (!buf) {
+		pil_err(desc, "Failed to map memory\n");
+		return;
+	}
+
+	pil_memset_io(buf, 0, (priv->region_end - priv->region_start));
+	desc->unmap_fw_mem(buf, (priv->region_end - priv->region_start),
+								map_data);
+
+}
+
+#define IOMAP_SIZE SZ_1M
+
+static void *map_fw_mem(phys_addr_t paddr, size_t size, void *data)
+{
+	struct pil_map_fw_info *info = data;
+
+	return dma_remap(info->dev, info->region, paddr, size,
+					&info->attrs);
+}
+
+static void unmap_fw_mem(void *vaddr, size_t size, void *data)
+{
+	struct pil_map_fw_info *info = data;
+
+	dma_unremap(info->dev, vaddr, size);
+}
+
+static int pil_load_seg(struct pil_desc *desc, struct pil_seg *seg)
+{
+	int ret = 0, count;
+	phys_addr_t paddr;
+	char fw_name[30];
+	int num = seg->num;
+	struct pil_map_fw_info map_fw_info = {
+		.attrs = desc->attrs,
+		.region = desc->priv->region,
+		.base_addr = desc->priv->region_start,
+		.dev = desc->dev,
+	};
+	void *map_data = desc->map_data ? desc->map_data : &map_fw_info;
+
+	if (seg->filesz) {
+		snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d",
+				desc->fw_name, num);
+		ret = request_firmware_into_buf(fw_name, desc->dev, seg->paddr,
+					      seg->filesz, desc->map_fw_mem,
+					      desc->unmap_fw_mem, map_data);
+		if (ret < 0) {
+			pil_err(desc, "Failed to locate blob %s or blob is too big(rc:%d)\n",
+				fw_name, ret);
+			subsys_set_error(desc->subsys_dev, firmware_error_msg);
+			return ret;
+		}
+
+		if (ret != seg->filesz) {
+			pil_err(desc, "Blob size %u doesn't match %lu\n",
+					ret, seg->filesz);
+			subsys_set_error(desc->subsys_dev, firmware_error_msg);
+			return -EPERM;
+		}
+		ret = 0;
+	}
+
+	/* Zero out trailing memory */
+	paddr = seg->paddr + seg->filesz;
+	count = seg->sz - seg->filesz;
+	while (count > 0) {
+		int size;
+		u8 __iomem *buf;
+
+		size = min_t(size_t, IOMAP_SIZE, count);
+		buf = desc->map_fw_mem(paddr, size, map_data);
+		if (!buf) {
+			pil_err(desc, "Failed to map memory\n");
+			return -ENOMEM;
+		}
+		pil_memset_io(buf, 0, size);
+
+		desc->unmap_fw_mem(buf, size, map_data);
+
+		count -= size;
+		paddr += size;
+	}
+
+	if (desc->ops->verify_blob) {
+		ret = desc->ops->verify_blob(desc, seg->paddr, seg->sz);
+		if (ret) {
+			pil_err(desc, "Blob%u failed verification(rc:%d)\n",
+								num, ret);
+			subsys_set_error(desc->subsys_dev, firmware_error_msg);
+		}
+	}
+
+	return ret;
+}
+
+static int pil_parse_devicetree(struct pil_desc *desc)
+{
+	struct device_node *ofnode = desc->dev->of_node;
+	int clk_ready = 0;
+
+	if (!ofnode)
+		return -EINVAL;
+
+	if (of_property_read_u32(ofnode, "qcom,mem-protect-id",
+					&desc->subsys_vmid))
+		pr_debug("Unable to read the addr-protect-id for %s\n",
+					desc->name);
+
+	if (desc->ops->proxy_unvote && of_find_property(ofnode,
+					"qcom,gpio-proxy-unvote",
+					NULL)) {
+		clk_ready = of_get_named_gpio(ofnode,
+				"qcom,gpio-proxy-unvote", 0);
+
+		if (clk_ready < 0) {
+			dev_dbg(desc->dev,
+				"[%s]: Error getting proxy unvoting gpio\n",
+				desc->name);
+			return clk_ready;
+		}
+
+		clk_ready = gpio_to_irq(clk_ready);
+		if (clk_ready < 0) {
+			dev_err(desc->dev,
+				"[%s]: Error getting proxy unvote IRQ\n",
+				desc->name);
+			return clk_ready;
+		}
+	}
+	desc->proxy_unvote_irq = clk_ready;
+	return 0;
+}
+
+/* Synchronize request_firmware() with suspend */
+static DECLARE_RWSEM(pil_pm_rwsem);
+
+/**
+ * pil_boot() - Load a peripheral image into memory and boot it
+ * @desc: descriptor from pil_desc_init()
+ *
+ * Returns 0 on success or -ERROR on failure.
+ */
+int pil_boot(struct pil_desc *desc)
+{
+	int ret;
+	char fw_name[30];
+	const struct pil_mdt *mdt;
+	const struct elf32_hdr *ehdr;
+	struct pil_seg *seg;
+	const struct firmware *fw;
+	struct pil_priv *priv = desc->priv;
+	bool mem_protect = false;
+	bool hyp_assign = false;
+
+	if (desc->shutdown_fail)
+		pil_err(desc, "Subsystem shutdown failed previously!\n");
+
+	/* Reinitialize for new image */
+	pil_release_mmap(desc);
+
+	down_read(&pil_pm_rwsem);
+	snprintf(fw_name, sizeof(fw_name), "%s.mdt", desc->fw_name);
+	ret = request_firmware(&fw, fw_name, desc->dev);
+	if (ret) {
+		pil_err(desc, "Failed to locate %s(rc:%d)\n", fw_name, ret);
+		goto out;
+	}
+
+	if (fw->size < sizeof(*ehdr)) {
+		pil_err(desc, "Not big enough to be an elf header\n");
+		subsys_set_error(desc->subsys_dev, firmware_error_msg);
+		ret = -EIO;
+		goto release_fw;
+	}
+
+	mdt = (const struct pil_mdt *)fw->data;
+	ehdr = &mdt->hdr;
+
+	if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
+		pil_err(desc, "Not an elf header\n");
+		subsys_set_error(desc->subsys_dev, firmware_error_msg);
+		ret = -EIO;
+		goto release_fw;
+	}
+
+	if (ehdr->e_phnum == 0) {
+		pil_err(desc, "No loadable segments\n");
+		subsys_set_error(desc->subsys_dev, firmware_error_msg);
+		ret = -EIO;
+		goto release_fw;
+	}
+	if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
+	    sizeof(struct elf32_hdr) > fw->size) {
+		pil_err(desc, "Program headers not within mdt\n");
+		subsys_set_error(desc->subsys_dev, firmware_error_msg);
+		ret = -EIO;
+		goto release_fw;
+	}
+
+	ret = pil_init_mmap(desc, mdt);
+	if (ret)
+		goto release_fw;
+
+	desc->priv->unvoted_flag = 0;
+	ret = pil_proxy_vote(desc);
+	if (ret) {
+		pil_err(desc, "Failed to proxy vote(rc:%d)\n", ret);
+		goto release_fw;
+	}
+
+	trace_pil_event("before_init_image", desc);
+	if (desc->ops->init_image)
+		ret = desc->ops->init_image(desc, fw->data, fw->size);
+	if (ret) {
+		pil_err(desc, "Initializing image failed(rc:%d)\n", ret);
+		subsys_set_error(desc->subsys_dev, firmware_error_msg);
+		goto err_boot;
+	}
+
+	trace_pil_event("before_mem_setup", desc);
+	if (desc->ops->mem_setup)
+		ret = desc->ops->mem_setup(desc, priv->region_start,
+				priv->region_end - priv->region_start);
+	if (ret) {
+		pil_err(desc, "Memory setup error(rc:%d)\n", ret);
+		goto err_deinit_image;
+	}
+
+	if (desc->subsys_vmid > 0) {
+		/**
+		 * In case of modem ssr, we need to assign memory back to linux.
+		 * This is not true after cold boot since linux already owns it.
+		 * Also for secure boot devices, modem memory has to be released
+		 * after MBA is booted
+		 */
+		trace_pil_event("before_assign_mem", desc);
+		if (desc->modem_ssr) {
+			ret = pil_assign_mem_to_linux(desc, priv->region_start,
+				(priv->region_end - priv->region_start));
+			if (ret)
+				pil_err(desc, "Failed to assign to linux, ret- %d\n",
+								ret);
+		}
+		ret = pil_assign_mem_to_subsys_and_linux(desc,
+				priv->region_start,
+				(priv->region_end - priv->region_start));
+		if (ret) {
+			pil_err(desc, "Failed to assign memory, ret - %d\n",
+								ret);
+			goto err_deinit_image;
+		}
+		hyp_assign = true;
+	}
+
+	trace_pil_event("before_load_seg", desc);
+	list_for_each_entry(seg, &desc->priv->segs, list) {
+		ret = pil_load_seg(desc, seg);
+		if (ret)
+			goto err_deinit_image;
+	}
+
+	if (desc->subsys_vmid > 0) {
+		trace_pil_event("before_reclaim_mem", desc);
+		ret =  pil_reclaim_mem(desc, priv->region_start,
+				(priv->region_end - priv->region_start),
+				desc->subsys_vmid);
+		if (ret) {
+			pil_err(desc, "Failed to assign %s memory, ret - %d\n",
+							desc->name, ret);
+			goto err_deinit_image;
+		}
+		hyp_assign = false;
+	}
+
+	trace_pil_event("before_auth_reset", desc);
+	ret = desc->ops->auth_and_reset(desc);
+	if (ret) {
+		pil_err(desc, "Failed to bring out of reset(rc:%d)\n", ret);
+		subsys_set_error(desc->subsys_dev, firmware_error_msg);
+		goto err_auth_and_reset;
+	}
+	trace_pil_event("reset_done", desc);
+	pil_info(desc, "Brought out of reset\n");
+	desc->modem_ssr = false;
+err_auth_and_reset:
+	if (ret && desc->subsys_vmid > 0) {
+		pil_assign_mem_to_linux(desc, priv->region_start,
+				(priv->region_end - priv->region_start));
+		mem_protect = true;
+	}
+err_deinit_image:
+	if (ret && desc->ops->deinit_image)
+		desc->ops->deinit_image(desc);
+err_boot:
+	if (ret && desc->proxy_unvote_irq)
+		disable_irq(desc->proxy_unvote_irq);
+	pil_proxy_unvote(desc, ret);
+release_fw:
+	release_firmware(fw);
+out:
+	up_read(&pil_pm_rwsem);
+	if (ret) {
+		if (priv->region) {
+			if (desc->subsys_vmid > 0 && !mem_protect &&
+					hyp_assign) {
+				pil_reclaim_mem(desc, priv->region_start,
+					(priv->region_end -
+						priv->region_start),
+					VMID_HLOS);
+			}
+			if (desc->clear_fw_region && priv->region_start)
+				pil_clear_segment(desc);
+			dma_free_attrs(desc->dev, priv->region_size,
+					priv->region, priv->region_start,
+					&desc->attrs);
+			priv->region = NULL;
+		}
+		pil_release_mmap(desc);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(pil_boot);
+
+/**
+ * pil_shutdown() - Shutdown a peripheral
+ * @desc: descriptor from pil_desc_init()
+ */
+void pil_shutdown(struct pil_desc *desc)
+{
+	struct pil_priv *priv = desc->priv;
+
+	if (desc->ops->shutdown) {
+		if (desc->ops->shutdown(desc))
+			desc->shutdown_fail = true;
+		else
+			desc->shutdown_fail = false;
+	}
+
+	if (desc->proxy_unvote_irq) {
+		disable_irq(desc->proxy_unvote_irq);
+		if (!desc->priv->unvoted_flag)
+			pil_proxy_unvote(desc, 1);
+	} else if (!proxy_timeout_ms)
+		pil_proxy_unvote(desc, 1);
+	else
+		flush_delayed_work(&priv->proxy);
+	desc->modem_ssr = true;
+}
+EXPORT_SYMBOL(pil_shutdown);
+
+/**
+ * pil_free_memory() - Free memory resources associated with a peripheral
+ * @desc: descriptor from pil_desc_init()
+ */
+void pil_free_memory(struct pil_desc *desc)
+{
+	struct pil_priv *priv = desc->priv;
+
+	if (priv->region) {
+		if (desc->subsys_vmid > 0)
+			pil_assign_mem_to_linux(desc, priv->region_start,
+				(priv->region_end - priv->region_start));
+		dma_free_attrs(desc->dev, priv->region_size,
+				priv->region, priv->region_start, &desc->attrs);
+		priv->region = NULL;
+	}
+}
+EXPORT_SYMBOL(pil_free_memory);
+
+static DEFINE_IDA(pil_ida);
+
+bool is_timeout_disabled(void)
+{
+	return disable_timeouts;
+}
+/**
+ * pil_desc_init() - Initialize a pil descriptor
+ * @desc: descriptor to intialize
+ *
+ * Initialize a pil descriptor for use by other pil functions. This function
+ * must be called before calling pil_boot() or pil_shutdown().
+ *
+ * Returns 0 for success and -ERROR on failure.
+ */
+int pil_desc_init(struct pil_desc *desc)
+{
+	struct pil_priv *priv;
+	void __iomem *addr;
+	int ret, ss_imem_offset_mdump_ss, ss_imem_offset_mdump_pdr;
+	char buf[sizeof(priv->info->name)];
+	struct device_node *ofnode = desc->dev->of_node;
+
+	if (WARN(desc->ops->proxy_unvote && !desc->ops->proxy_vote,
+				"Invalid proxy voting. Ignoring\n"))
+		((struct pil_reset_ops *)desc->ops)->proxy_unvote = NULL;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+	desc->priv = priv;
+	priv->desc = desc;
+
+	priv->id = ret = ida_simple_get(&pil_ida, 0, PIL_NUM_DESC, GFP_KERNEL);
+	if (priv->id < 0)
+		goto err;
+
+	if (pil_info_base) {
+		addr = pil_info_base + sizeof(struct pil_image_info) * priv->id;
+		priv->info = (struct pil_image_info __iomem *)addr;
+
+		strncpy(buf, desc->name, sizeof(buf));
+		__iowrite32_copy(priv->info->name, buf, sizeof(buf) / 4);
+	}
+	if (of_property_read_u32(ofnode, "qcom,minidump-id",
+		&priv->minidump_id))
+		pr_debug("minidump-id not found for %s\n", desc->name);
+	else {
+		ss_imem_offset_mdump_ss =
+			sizeof(struct md_ssr_ss_info) * priv->minidump_id;
+		ss_imem_offset_mdump_pdr =
+			sizeof(struct md_ssr_ss_info) * (priv->minidump_id + 1);
+		if (pil_minidump_base) {
+			/* Add 0x4 to get start of struct md_ssr_ss_info base
+			 * from struct md_ssr_toc for any subsystem,
+			 * struct md_ssr_ss_info is actually the pointer
+			 * of ToC in smem for any subsystem.
+			 */
+			addr = pil_minidump_base +
+				ss_imem_offset_mdump_ss + 0x4;
+			priv->minidump_ss =
+				(struct md_ssr_ss_info __iomem *)addr;
+
+			addr = pil_minidump_base +
+				ss_imem_offset_mdump_pdr + 0x4;
+			priv->minidump_pdr =
+				(struct md_ssr_ss_info __iomem *)addr;
+		}
+	}
+
+	ret = pil_parse_devicetree(desc);
+	if (ret)
+		goto err_parse_dt;
+
+	/* Ignore users who don't make any sense */
+	WARN(desc->ops->proxy_unvote && desc->proxy_unvote_irq == 0
+		 && !desc->proxy_timeout,
+		 "Invalid proxy unvote callback or a proxy timeout of 0"
+		 " was specified or no proxy unvote IRQ was specified.\n");
+
+	if (desc->proxy_unvote_irq) {
+		ret = request_threaded_irq(desc->proxy_unvote_irq,
+				  NULL,
+				  proxy_unvote_intr_handler,
+				  IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+				  desc->name, desc);
+		if (ret < 0) {
+			dev_err(desc->dev,
+				"Unable to request proxy unvote IRQ: %d\n",
+				ret);
+			goto err;
+		}
+		disable_irq(desc->proxy_unvote_irq);
+	}
+
+	snprintf(priv->wname, sizeof(priv->wname), "pil-%s", desc->name);
+	wakeup_source_init(&priv->ws, priv->wname);
+	INIT_DELAYED_WORK(&priv->proxy, pil_proxy_unvote_work);
+	INIT_LIST_HEAD(&priv->segs);
+
+	/* Make sure mapping functions are set. */
+	if (!desc->map_fw_mem)
+		desc->map_fw_mem = map_fw_mem;
+
+	if (!desc->unmap_fw_mem)
+		desc->unmap_fw_mem = unmap_fw_mem;
+
+	return 0;
+err_parse_dt:
+	ida_simple_remove(&pil_ida, priv->id);
+err:
+	kfree(priv);
+	return ret;
+}
+EXPORT_SYMBOL(pil_desc_init);
+
+/**
+ * pil_desc_release() - Release a pil descriptor
+ * @desc: descriptor to free
+ */
+void pil_desc_release(struct pil_desc *desc)
+{
+	struct pil_priv *priv = desc->priv;
+
+	if (priv) {
+		ida_simple_remove(&pil_ida, priv->id);
+		flush_delayed_work(&priv->proxy);
+		wakeup_source_trash(&priv->ws);
+	}
+	desc->priv = NULL;
+	kfree(priv);
+}
+EXPORT_SYMBOL(pil_desc_release);
+
+static int pil_pm_notify(struct notifier_block *b, unsigned long event, void *p)
+{
+	switch (event) {
+	case PM_SUSPEND_PREPARE:
+		down_write(&pil_pm_rwsem);
+		break;
+	case PM_POST_SUSPEND:
+		up_write(&pil_pm_rwsem);
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block pil_pm_notifier = {
+	.notifier_call = pil_pm_notify,
+};
+
+static int __init msm_pil_init(void)
+{
+	struct device_node *np;
+	struct resource res;
+	int i;
+
+	np = of_find_compatible_node(NULL, NULL, "qcom,msm-imem-pil");
+	if (!np) {
+		pr_warn("pil: failed to find qcom,msm-imem-pil node\n");
+		goto out;
+	}
+	if (of_address_to_resource(np, 0, &res)) {
+		pr_warn("pil: address to resource on imem region failed\n");
+		goto out;
+	}
+	pil_info_base = ioremap(res.start, resource_size(&res));
+	if (!pil_info_base) {
+		pr_warn("pil: could not map imem region\n");
+		goto out;
+	}
+	if (__raw_readl(pil_info_base) == 0x53444247) {
+		pr_info("pil: pil-imem set to disable pil timeouts\n");
+		disable_timeouts = true;
+	}
+	for (i = 0; i < resource_size(&res)/sizeof(u32); i++)
+		writel_relaxed(0, pil_info_base + (i * sizeof(u32)));
+
+	np = of_find_compatible_node(NULL, NULL, "qcom,msm-imem-minidump");
+	if (!np) {
+		pr_warn("pil: failed to find qcom,msm-imem-minidump node\n");
+		goto out;
+	} else {
+		pil_minidump_base = of_iomap(np, 0);
+		if (!pil_minidump_base) {
+			pr_err("unable to map pil minidump imem offset\n");
+			goto out;
+		}
+	}
+	for (i = 0; i < sizeof(struct md_ssr_toc)/sizeof(u32); i++)
+		writel_relaxed(0, pil_minidump_base + (i * sizeof(u32)));
+	writel_relaxed(1, pil_minidump_base);
+out:
+	return register_pm_notifier(&pil_pm_notifier);
+}
+device_initcall(msm_pil_init);
+
+static void __exit msm_pil_exit(void)
+{
+	unregister_pm_notifier(&pil_pm_notifier);
+	if (pil_info_base)
+		iounmap(pil_info_base);
+	if (pil_minidump_base)
+		iounmap(pil_minidump_base);
+}
+module_exit(msm_pil_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Load peripheral images and bring peripherals out of reset");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/peripheral-loader.h	2019-01-22 16:16:26.663275022 +0100
@@ -0,0 +1,186 @@
+/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MSM_PERIPHERAL_LOADER_H
+#define __MSM_PERIPHERAL_LOADER_H
+
+#include <linux/dma-attrs.h>
+
+struct device;
+struct module;
+struct pil_priv;
+
+/**
+ * struct pil_desc - PIL descriptor
+ * @name: string used for pil_get()
+ * @fw_name: firmware name
+ * @dev: parent device
+ * @ops: callback functions
+ * @owner: module the descriptor belongs to
+ * @proxy_timeout: delay in ms until proxy vote is removed
+ * @flags: bitfield for image flags
+ * @priv: DON'T USE - internal only
+ * @attrs: DMA attributes to be used during dma allocation.
+ * @proxy_unvote_irq: IRQ to trigger a proxy unvote. proxy_timeout
+ * is ignored if this is set.
+ * @map_fw_mem: Custom function used to map physical address space to virtual.
+ * This defaults to ioremap if not specified.
+ * @unmap_fw_mem: Custom function used to undo mapping by map_fw_mem.
+ * This defaults to iounmap if not specified.
+ * @shutdown_fail: Set if PIL op for shutting down subsystem fails.
+ * @modem_ssr: true if modem is restarting, false if booting for first time.
+ * @clear_fw_region: Clear fw region on failure in loading.
+ * @subsys_vmid: memprot id for the subsystem.
+ */
+struct pil_desc {
+	const char *name;
+	const char *fw_name;
+	struct device *dev;
+	struct subsys_device *subsys_dev;
+	const struct pil_reset_ops *ops;
+	struct module *owner;
+	unsigned long proxy_timeout;
+	unsigned long flags;
+#define PIL_SKIP_ENTRY_CHECK	BIT(0)
+	struct pil_priv *priv;
+	struct dma_attrs attrs;
+	unsigned int proxy_unvote_irq;
+	void * (*map_fw_mem)(phys_addr_t phys, size_t size, void *data);
+	void (*unmap_fw_mem)(void *virt, size_t size, void *data);
+	void *map_data;
+	bool shutdown_fail;
+	bool modem_ssr;
+	bool clear_fw_region;
+	u32 subsys_vmid;
+};
+
+/**
+ * struct pil_image_info - info in IMEM about image and where it is loaded
+ * @name: name of image (may or may not be NULL terminated)
+ * @start: indicates physical address where image starts (little endian)
+ * @size: size of image (little endian)
+ */
+struct pil_image_info {
+	char name[8];
+	__le64 start;
+	__le32 size;
+} __attribute__((__packed__));
+
+#define MAX_NUM_OF_SS 3
+
+/**
+ * struct md_ssr_ss_info - Info in imem about smem ToC
+ * @md_ss_smem_regions_baseptr: Start physical address of SMEM TOC
+ * @md_ss_num_of_regions: number of segments that need to be dumped
+ * @md_ss_encryption_status: status of encryption of segments
+ * @md_ss_ssr_cause: ssr cause enum
+ */
+struct md_ssr_ss_info {
+	u32 md_ss_smem_regions_baseptr;
+	u8 md_ss_num_of_regions;
+	u8 md_ss_encryption_status;
+	u8 md_ss_ssr_cause;
+	u8 reserved;
+};
+
+/**
+ * struct md_ssr_toc - Wrapper of struct md_ssr_ss_info
+ * @md_ssr_toc_init: flag to indicate to MSS SW about imem init done
+ * @md_ssr_ss: Instance of struct md_ssr_ss_info for a subsystem
+ */
+struct md_ssr_toc /* Shared IMEM ToC struct */
+{
+	u32 md_ssr_toc_init;
+	struct md_ssr_ss_info	md_ssr_ss[MAX_NUM_OF_SS];
+};
+
+/**
+ * struct pil_reset_ops - PIL operations
+ * @init_image: prepare an image for authentication
+ * @mem_setup: prepare the image memory region
+ * @verify_blob: authenticate a program segment, called once for each loadable
+ *		 program segment (optional)
+ * @proxy_vote: make proxy votes before auth_and_reset (optional)
+ * @auth_and_reset: boot the processor
+ * @proxy_unvote: remove any proxy votes (optional)
+ * @deinit_image: restore actions performed in init_image if necessary
+ * @shutdown: shutdown the processor
+ */
+struct pil_reset_ops {
+	int (*init_image)(struct pil_desc *pil, const u8 *metadata,
+			  size_t size);
+	int (*mem_setup)(struct pil_desc *pil, phys_addr_t addr, size_t size);
+	int (*verify_blob)(struct pil_desc *pil, phys_addr_t phy_addr,
+			   size_t size);
+	int (*proxy_vote)(struct pil_desc *pil);
+	int (*auth_and_reset)(struct pil_desc *pil);
+	void (*proxy_unvote)(struct pil_desc *pil);
+	int (*deinit_image)(struct pil_desc *pil);
+	int (*shutdown)(struct pil_desc *pil);
+};
+
+#ifdef CONFIG_MSM_PIL
+extern int pil_desc_init(struct pil_desc *desc);
+extern int pil_boot(struct pil_desc *desc);
+extern void pil_shutdown(struct pil_desc *desc);
+extern void pil_free_memory(struct pil_desc *desc);
+extern void pil_desc_release(struct pil_desc *desc);
+extern phys_addr_t pil_get_entry_addr(struct pil_desc *desc);
+extern int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev,
+			  void *minidump_dev);
+extern int pil_assign_mem_to_subsys(struct pil_desc *desc, phys_addr_t addr,
+						size_t size);
+extern int pil_assign_mem_to_linux(struct pil_desc *desc, phys_addr_t addr,
+						size_t size);
+extern int pil_assign_mem_to_subsys_and_linux(struct pil_desc *desc,
+						phys_addr_t addr, size_t size);
+extern int pil_reclaim_mem(struct pil_desc *desc, phys_addr_t addr, size_t size,
+						int VMid);
+extern bool is_timeout_disabled(void);
+#else
+static inline int pil_desc_init(struct pil_desc *desc) { return 0; }
+static inline int pil_boot(struct pil_desc *desc) { return 0; }
+static inline void pil_shutdown(struct pil_desc *desc) { }
+static inline void pil_free_memory(struct pil_desc *desc) { }
+static inline void pil_desc_release(struct pil_desc *desc) { }
+static inline phys_addr_t pil_get_entry_addr(struct pil_desc *desc)
+{
+	return 0;
+}
+static inline int pil_do_ramdump(struct pil_desc *desc,
+		void *ramdump_dev, void *minidump_dev)
+{
+	return 0;
+}
+static inline int pil_assign_mem_to_subsys(struct pil_desc *desc,
+						phys_addr_t addr, size_t size)
+{
+	return 0;
+}
+static inline int pil_assign_mem_to_linux(struct pil_desc *desc,
+						phys_addr_t addr, size_t size)
+{
+	return 0;
+}
+static inline int pil_assign_mem_to_subsys_and_linux(struct pil_desc *desc,
+						phys_addr_t addr, size_t size)
+{
+	return 0;
+}
+static inline int pil_reclaim_mem(struct pil_desc *desc, phys_addr_t addr,
+					size_t size, int VMid)
+{
+	return 0;
+}
+extern bool is_timeout_disabled(void) { return false; }
+#endif
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/pil-msa.c	2019-10-29 09:26:24.817214667 +0100
@@ -0,0 +1,883 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/secure_buffer.h>
+#include <trace/events/trace_msm_pil_event.h>
+
+#include "peripheral-loader.h"
+#include "pil-q6v5.h"
+#include "pil-msa.h"
+
+/* Q6 Register Offsets */
+#define QDSP6SS_RST_EVB			0x010
+#define QDSP6SS_DBG_CFG			0x018
+
+/* AXI Halting Registers */
+#define MSS_Q6_HALT_BASE		0x180
+#define MSS_MODEM_HALT_BASE		0x200
+#define MSS_NC_HALT_BASE		0x280
+
+/* RMB Status Register Values */
+#define STATUS_PBL_SUCCESS		0x1
+#define STATUS_XPU_UNLOCKED		0x1
+#define STATUS_XPU_UNLOCKED_SCRIBBLED	0x2
+
+/* PBL/MBA interface registers */
+#define RMB_MBA_IMAGE			0x00
+#define RMB_PBL_STATUS			0x04
+#define RMB_MBA_COMMAND			0x08
+#define RMB_MBA_STATUS			0x0C
+#define RMB_PMI_META_DATA		0x10
+#define RMB_PMI_CODE_START		0x14
+#define RMB_PMI_CODE_LENGTH		0x18
+#define RMB_PROTOCOL_VERSION		0x1C
+#define RMB_MBA_DEBUG_INFORMATION	0x20
+
+#define POLL_INTERVAL_US		50
+
+#define CMD_META_DATA_READY		0x1
+#define CMD_LOAD_READY			0x2
+#define CMD_PILFAIL_NFY_MBA		0xffffdead
+
+#define STATUS_META_DATA_AUTH_SUCCESS	0x3
+#define STATUS_AUTH_COMPLETE		0x4
+#define STATUS_MBA_UNLOCKED		0x6
+
+/* External BHS */
+#define EXTERNAL_BHS_ON			BIT(0)
+#define EXTERNAL_BHS_STATUS		BIT(4)
+#define BHS_TIMEOUT_US			50
+
+#define MSS_RESTART_PARAM_ID		0x2
+#define MSS_RESTART_ID			0xA
+
+#define MSS_MAGIC			0XAABADEAD
+/* CX_IPEAK Parameters */
+#define CX_IPEAK_MSS			BIT(5)
+/* Timeout value for MBA boot when minidump is enabled */
+#define MBA_ENCRYPTION_TIMEOUT	5000
+enum scm_cmd {
+	PAS_MEM_SETUP_CMD = 2,
+};
+
+static int pbl_mba_boot_timeout_ms = 1000;
+module_param(pbl_mba_boot_timeout_ms, int, S_IRUGO | S_IWUSR);
+
+static int modem_auth_timeout_ms = 10000;
+module_param(modem_auth_timeout_ms, int, S_IRUGO | S_IWUSR);
+
+/* If set to 0xAABADEAD, MBA failures trigger a kernel panic */
+static uint modem_trigger_panic;
+module_param(modem_trigger_panic, uint, S_IRUGO | S_IWUSR);
+
+/* To set the modem debug cookie in DBG_CFG register for debugging */
+static uint modem_dbg_cfg;
+module_param(modem_dbg_cfg, uint, S_IRUGO | S_IWUSR);
+
+static void modem_log_rmb_regs(void __iomem *base)
+{
+	pr_err("RMB_MBA_IMAGE: %08x\n", readl_relaxed(base + RMB_MBA_IMAGE));
+	pr_err("RMB_PBL_STATUS: %08x\n", readl_relaxed(base + RMB_PBL_STATUS));
+	pr_err("RMB_MBA_COMMAND: %08x\n",
+				readl_relaxed(base + RMB_MBA_COMMAND));
+	pr_err("RMB_MBA_STATUS: %08x\n", readl_relaxed(base + RMB_MBA_STATUS));
+	pr_err("RMB_PMI_META_DATA: %08x\n",
+				readl_relaxed(base + RMB_PMI_META_DATA));
+	pr_err("RMB_PMI_CODE_START: %08x\n",
+				readl_relaxed(base + RMB_PMI_CODE_START));
+	pr_err("RMB_PMI_CODE_LENGTH: %08x\n",
+				readl_relaxed(base + RMB_PMI_CODE_LENGTH));
+	pr_err("RMB_PROTOCOL_VERSION: %08x\n",
+				readl_relaxed(base + RMB_PROTOCOL_VERSION));
+	pr_err("RMB_MBA_DEBUG_INFORMATION: %08x\n",
+			readl_relaxed(base + RMB_MBA_DEBUG_INFORMATION));
+
+	if (modem_trigger_panic == MSS_MAGIC)
+		panic("%s: System ramdump is needed!!!\n", __func__);
+}
+
+static int pil_mss_power_up(struct q6v5_data *drv)
+{
+	int ret = 0;
+	u32 regval;
+
+	if (drv->vreg) {
+		ret = regulator_enable(drv->vreg);
+		if (ret)
+			dev_err(drv->desc.dev, "Failed to enable modem regulator(rc:%d)\n",
+									ret);
+	}
+
+	if (drv->cxrail_bhs) {
+		regval = readl_relaxed(drv->cxrail_bhs);
+		regval |= EXTERNAL_BHS_ON;
+		writel_relaxed(regval, drv->cxrail_bhs);
+
+		ret = readl_poll_timeout(drv->cxrail_bhs, regval,
+			regval & EXTERNAL_BHS_STATUS, 1, BHS_TIMEOUT_US);
+	}
+
+	return ret;
+}
+
+static int pil_mss_power_down(struct q6v5_data *drv)
+{
+	u32 regval;
+
+	if (drv->cxrail_bhs) {
+		regval = readl_relaxed(drv->cxrail_bhs);
+		regval &= ~EXTERNAL_BHS_ON;
+		writel_relaxed(regval, drv->cxrail_bhs);
+	}
+
+	if (drv->vreg)
+		return regulator_disable(drv->vreg);
+
+	return 0;
+}
+
+static int pil_mss_enable_clks(struct q6v5_data *drv)
+{
+	int ret;
+
+	ret = clk_prepare_enable(drv->ahb_clk);
+	if (ret)
+		goto err_ahb_clk;
+	ret = clk_prepare_enable(drv->axi_clk);
+	if (ret)
+		goto err_axi_clk;
+	ret = clk_prepare_enable(drv->rom_clk);
+	if (ret)
+		goto err_rom_clk;
+	ret = clk_prepare_enable(drv->gpll0_mss_clk);
+	if (ret)
+		goto err_gpll0_mss_clk;
+	ret = clk_prepare_enable(drv->snoc_axi_clk);
+	if (ret)
+		goto err_snoc_axi_clk;
+	ret = clk_prepare_enable(drv->mnoc_axi_clk);
+	if (ret)
+		goto err_mnoc_axi_clk;
+	return 0;
+err_mnoc_axi_clk:
+	clk_disable_unprepare(drv->mnoc_axi_clk);
+err_snoc_axi_clk:
+	clk_disable_unprepare(drv->snoc_axi_clk);
+err_gpll0_mss_clk:
+	clk_disable_unprepare(drv->gpll0_mss_clk);
+err_rom_clk:
+	clk_disable_unprepare(drv->rom_clk);
+err_axi_clk:
+	clk_disable_unprepare(drv->axi_clk);
+err_ahb_clk:
+	clk_disable_unprepare(drv->ahb_clk);
+	return ret;
+}
+
+static void pil_mss_disable_clks(struct q6v5_data *drv)
+{
+	clk_disable_unprepare(drv->mnoc_axi_clk);
+	clk_disable_unprepare(drv->snoc_axi_clk);
+	clk_disable_unprepare(drv->gpll0_mss_clk);
+	clk_disable_unprepare(drv->rom_clk);
+	clk_disable_unprepare(drv->axi_clk);
+	if (!drv->ahb_clk_vote)
+		clk_disable_unprepare(drv->ahb_clk);
+}
+
+static int pil_mss_restart_reg(struct q6v5_data *drv, u32 mss_restart)
+{
+	int ret = 0;
+	int scm_ret = 0;
+	struct scm_desc desc = {0};
+
+	desc.args[0] = mss_restart;
+	desc.args[1] = 0;
+	desc.arginfo = SCM_ARGS(2);
+
+	if (drv->restart_reg && !drv->restart_reg_sec) {
+		writel_relaxed(mss_restart, drv->restart_reg);
+		mb();
+		udelay(2);
+	} else if (drv->restart_reg_sec) {
+		if (!is_scm_armv8()) {
+			ret = scm_call(SCM_SVC_PIL, MSS_RESTART_ID,
+					&mss_restart, sizeof(mss_restart),
+					&scm_ret, sizeof(scm_ret));
+		} else {
+			ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
+						MSS_RESTART_ID), &desc);
+			scm_ret = desc.ret[0];
+		}
+		if (ret || scm_ret)
+			pr_err("Secure MSS restart failed\n");
+	}
+
+	return ret;
+}
+
+static int pil_msa_wait_for_mba_ready(struct q6v5_data *drv)
+{
+	struct device *dev = drv->desc.dev;
+	int ret;
+	u32 status;
+	u64 val;
+
+	if (of_property_read_bool(dev->of_node, "qcom,minidump-id"))
+		pbl_mba_boot_timeout_ms = MBA_ENCRYPTION_TIMEOUT;
+
+	val = is_timeout_disabled() ? 0 : pbl_mba_boot_timeout_ms * 1000;
+
+	/* Wait for PBL completion. */
+	ret = readl_poll_timeout(drv->rmb_base + RMB_PBL_STATUS, status,
+				 status != 0, POLL_INTERVAL_US, val);
+	if (ret) {
+		dev_err(dev, "PBL boot timed out (rc:%d)\n", ret);
+		return ret;
+	}
+	if (status != STATUS_PBL_SUCCESS) {
+		dev_err(dev, "PBL returned unexpected status %d\n", status);
+		return -EINVAL;
+	}
+
+	/* Wait for MBA completion. */
+	ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
+				status != 0, POLL_INTERVAL_US, val);
+	if (ret) {
+		dev_err(dev, "MBA boot timed out (rc:%d)\n", ret);
+		return ret;
+	}
+	if (status != STATUS_XPU_UNLOCKED &&
+	    status != STATUS_XPU_UNLOCKED_SCRIBBLED) {
+		dev_err(dev, "MBA returned unexpected status %d\n", status);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int pil_mss_shutdown(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	int ret = 0;
+
+	dev_info(pil->dev, "MSS is shutting down\n");
+	if (drv->axi_halt_base) {
+		pil_q6v5_halt_axi_port(pil,
+			drv->axi_halt_base + MSS_Q6_HALT_BASE);
+		pil_q6v5_halt_axi_port(pil,
+			drv->axi_halt_base + MSS_MODEM_HALT_BASE);
+		pil_q6v5_halt_axi_port(pil,
+			drv->axi_halt_base + MSS_NC_HALT_BASE);
+	}
+
+	if (drv->axi_halt_q6)
+		pil_q6v5_halt_axi_port(pil, drv->axi_halt_q6);
+	if (drv->axi_halt_mss)
+		pil_q6v5_halt_axi_port(pil, drv->axi_halt_mss);
+	if (drv->axi_halt_nc)
+		pil_q6v5_halt_axi_port(pil, drv->axi_halt_nc);
+
+	/*
+	 * Software workaround to avoid high MX current during LPASS/MSS
+	 * restart.
+	 */
+	if (drv->mx_spike_wa && drv->ahb_clk_vote) {
+		ret = clk_prepare_enable(drv->ahb_clk);
+		if (!ret)
+			assert_clamps(pil);
+		else
+			dev_err(pil->dev, "error turning ON AHB clock(rc:%d)\n",
+									ret);
+	}
+
+	/*
+	 *  If MSS was in turbo state before fatal error occurs, it would
+	 *  have set the vote bit. Since MSS is restarting, So PIL need to
+	 *  clear this bit. This may clear the throttle state.
+	 */
+	if (drv->cx_ipeak_vote)
+		writel_relaxed(CX_IPEAK_MSS, drv->cxip_lm_vote_clear);
+
+	ret = pil_mss_restart_reg(drv, 1);
+
+	if (drv->is_booted) {
+		pil_mss_disable_clks(drv);
+		pil_mss_power_down(drv);
+		drv->is_booted = false;
+	}
+
+	return ret;
+}
+
+int __pil_mss_deinit_image(struct pil_desc *pil, bool err_path)
+{
+	struct modem_data *drv = dev_get_drvdata(pil->dev);
+	struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
+	int ret = 0;
+	struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
+	s32 status;
+	u64 val = is_timeout_disabled() ? 0 : pbl_mba_boot_timeout_ms * 1000;
+
+	if (err_path) {
+		writel_relaxed(CMD_PILFAIL_NFY_MBA,
+				drv->rmb_base + RMB_MBA_COMMAND);
+		ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
+				status == STATUS_MBA_UNLOCKED || status < 0,
+				POLL_INTERVAL_US, val);
+		if (ret)
+			dev_err(pil->dev, "MBA region unlock timed out(rc:%d)\n",
+									ret);
+		else if (status < 0)
+			dev_err(pil->dev, "MBA unlock returned err status: %d\n",
+						status);
+	}
+
+	ret = pil_mss_shutdown(pil);
+
+	if (q6_drv->ahb_clk_vote)
+		clk_disable_unprepare(q6_drv->ahb_clk);
+
+	/* In case of any failure where reclaiming MBA and DP memory
+	 * could not happen, free the memory here */
+	if (drv->q6->mba_dp_virt) {
+		if (pil->subsys_vmid > 0)
+			pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
+						drv->q6->mba_dp_size);
+		dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
+				drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
+				&drv->attrs_dma);
+		drv->q6->mba_dp_virt = NULL;
+	}
+
+	return ret;
+}
+
+int pil_mss_deinit_image(struct pil_desc *pil)
+{
+	return __pil_mss_deinit_image(pil, true);
+}
+
+int pil_mss_make_proxy_votes(struct pil_desc *pil)
+{
+	int ret;
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	int uv = 0;
+
+	ret = of_property_read_u32(pil->dev->of_node, "vdd_mx-uV", &uv);
+	if (ret) {
+		dev_err(pil->dev, "missing vdd_mx-uV property(rc:%d)\n", ret);
+		return ret;
+	}
+
+	ret = regulator_set_voltage(drv->vreg_mx, uv, INT_MAX);
+	if (ret) {
+		dev_err(pil->dev, "Failed to request vreg_mx voltage(rc:%d)\n",
+									ret);
+		return ret;
+	}
+
+	ret = regulator_enable(drv->vreg_mx);
+	if (ret) {
+		dev_err(pil->dev, "Failed to enable vreg_mx(rc:%d)\n", ret);
+		regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
+		return ret;
+	}
+
+	ret = pil_q6v5_make_proxy_votes(pil);
+	if (ret) {
+		regulator_disable(drv->vreg_mx);
+		regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
+	}
+
+	return ret;
+}
+
+void pil_mss_remove_proxy_votes(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	pil_q6v5_remove_proxy_votes(pil);
+	regulator_disable(drv->vreg_mx);
+	regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
+}
+
+static int pil_mss_mem_setup(struct pil_desc *pil,
+					phys_addr_t addr, size_t size)
+{
+	struct modem_data *md = dev_get_drvdata(pil->dev);
+
+	struct pas_init_image_req {
+		u32	proc;
+		u32	start_addr;
+		u32	len;
+	} request;
+	u32 scm_ret = 0;
+	int ret;
+	struct scm_desc desc = {0};
+
+	if (!md->subsys_desc.pil_mss_memsetup)
+		return 0;
+
+	request.proc = md->pas_id;
+	request.start_addr = addr;
+	request.len = size;
+
+	if (!is_scm_armv8()) {
+		ret = scm_call(SCM_SVC_PIL, PAS_MEM_SETUP_CMD, &request,
+				sizeof(request), &scm_ret, sizeof(scm_ret));
+	} else {
+		desc.args[0] = md->pas_id;
+		desc.args[1] = addr;
+		desc.args[2] = size;
+		desc.arginfo = SCM_ARGS(3);
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL, PAS_MEM_SETUP_CMD),
+				&desc);
+		scm_ret = desc.ret[0];
+	}
+	if (ret)
+		return ret;
+	return scm_ret;
+}
+
+static int pil_mss_reset(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	phys_addr_t start_addr = pil_get_entry_addr(pil);
+	int ret;
+
+	trace_pil_func(__func__);
+	if (drv->mba_dp_phys)
+		start_addr = drv->mba_dp_phys;
+
+	/*
+	 * Bring subsystem out of reset and enable required
+	 * regulators and clocks.
+	 */
+	ret = pil_mss_power_up(drv);
+	if (ret)
+		goto err_power;
+
+	/* Deassert reset to subsystem and wait for propagation */
+	ret = pil_mss_restart_reg(drv, 0);
+	if (ret)
+		goto err_restart;
+
+	ret = pil_mss_enable_clks(drv);
+	if (ret)
+		goto err_clks;
+
+	if (modem_dbg_cfg)
+		writel_relaxed(modem_dbg_cfg, drv->reg_base + QDSP6SS_DBG_CFG);
+
+	/* Program Image Address */
+	if (drv->self_auth) {
+		writel_relaxed(start_addr, drv->rmb_base + RMB_MBA_IMAGE);
+		/*
+		 * Ensure write to RMB base occurs before reset
+		 * is released.
+		 */
+		mb();
+	} else {
+		writel_relaxed((start_addr >> 4) & 0x0FFFFFF0,
+				drv->reg_base + QDSP6SS_RST_EVB);
+	}
+
+	/* Program DP Address */
+	if (drv->dp_size) {
+		writel_relaxed(start_addr + SZ_1M, drv->rmb_base +
+			       RMB_PMI_CODE_START);
+		writel_relaxed(drv->dp_size, drv->rmb_base +
+			       RMB_PMI_CODE_LENGTH);
+	} else {
+		writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_START);
+		writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);
+	}
+	/* Make sure RMB regs are written before bringing modem out of reset */
+	mb();
+
+	ret = pil_q6v5_reset(pil);
+	if (ret)
+		goto err_q6v5_reset;
+
+	/* Wait for MBA to start. Check for PBL and MBA errors while waiting. */
+	if (drv->self_auth) {
+		ret = pil_msa_wait_for_mba_ready(drv);
+		if (ret)
+			goto err_q6v5_reset;
+	}
+
+	dev_info(pil->dev, "MBA boot done\n");
+	drv->is_booted = true;
+
+	return 0;
+
+err_q6v5_reset:
+	modem_log_rmb_regs(drv->rmb_base);
+	pil_mss_disable_clks(drv);
+	if (drv->ahb_clk_vote)
+		clk_disable_unprepare(drv->ahb_clk);
+err_clks:
+	pil_mss_restart_reg(drv, 1);
+err_restart:
+	pil_mss_power_down(drv);
+err_power:
+	return ret;
+}
+
+int pil_mss_reset_load_mba(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	struct modem_data *md = dev_get_drvdata(pil->dev);
+	const struct firmware *fw, *dp_fw = NULL;
+	char fw_name_legacy[10] = "mba.b00";
+	char fw_name[10] = "mba.mbn";
+	char *dp_name = "msadp";
+	char *fw_name_p;
+	void *mba_dp_virt;
+	dma_addr_t mba_dp_phys, mba_dp_phys_end;
+	int ret;
+	const u8 *data;
+	struct device *dma_dev = md->mba_mem_dev_fixed ?: &md->mba_mem_dev;
+
+	trace_pil_func(__func__);
+	fw_name_p = drv->non_elf_image ? fw_name_legacy : fw_name;
+	ret = request_firmware(&fw, fw_name_p, pil->dev);
+	if (ret) {
+		dev_err(pil->dev, "Failed to locate %s (rc:%d)\n",
+						fw_name_p, ret);
+		return ret;
+	}
+
+	data = fw ? fw->data : NULL;
+	if (!data) {
+		dev_err(pil->dev, "MBA data is NULL\n");
+		ret = -ENOMEM;
+		goto err_invalid_fw;
+	}
+
+	drv->mba_dp_size = SZ_1M;
+
+	arch_setup_dma_ops(dma_dev, 0, 0, NULL, 0);
+
+	dma_dev->coherent_dma_mask = DMA_BIT_MASK(32);
+
+	init_dma_attrs(&md->attrs_dma);
+	dma_set_attr(DMA_ATTR_SKIP_ZEROING, &md->attrs_dma);
+	dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &md->attrs_dma);
+
+	ret = request_firmware(&dp_fw, dp_name, pil->dev);
+	if (ret) {
+		dev_warn(pil->dev, "Debug policy not present - %s. Continue.\n",
+						dp_name);
+	} else {
+		if (!dp_fw || !dp_fw->data) {
+			dev_err(pil->dev, "Invalid DP firmware\n");
+			ret = -ENOMEM;
+			goto err_invalid_fw;
+		}
+		drv->dp_size = dp_fw->size;
+		drv->mba_dp_size += drv->dp_size;
+		drv->mba_dp_size = ALIGN(drv->mba_dp_size, SZ_4K);
+	}
+
+	mba_dp_virt = dma_alloc_attrs(dma_dev, drv->mba_dp_size, &mba_dp_phys,
+				   GFP_KERNEL, &md->attrs_dma);
+	if (!mba_dp_virt) {
+		dev_err(pil->dev, "%s MBA/DP buffer allocation %zx bytes failed\n",
+				 __func__, drv->mba_dp_size);
+		ret = -ENOMEM;
+		goto err_invalid_fw;
+	}
+
+	/* Make sure there are no mappings in PKMAP and fixmap */
+	kmap_flush_unused();
+	kmap_atomic_flush_unused();
+
+	drv->mba_dp_phys = mba_dp_phys;
+	drv->mba_dp_virt = mba_dp_virt;
+	mba_dp_phys_end = mba_dp_phys + drv->mba_dp_size;
+
+	dev_info(pil->dev, "Loading MBA and DP (if present) from %pa to %pa size %zx\n",
+			&mba_dp_phys, &mba_dp_phys_end, drv->mba_dp_size);
+
+	/* Load the MBA image into memory */
+	if (fw->size <= SZ_1M) {
+		/* Ensures memcpy is done for max 1MB fw size */
+		memcpy(mba_dp_virt, data, fw->size);
+	} else {
+		dev_err(pil->dev, "%s fw image loading into memory is failed due to fw size overflow\n",
+			__func__);
+		 ret = -EINVAL;
+		 goto err_mba_data;
+	}
+	/* Ensure memcpy of the MBA memory is done before loading the DP */
+	wmb();
+
+	/* Load the DP image into memory */
+	if (drv->mba_dp_size > SZ_1M) {
+		memcpy(mba_dp_virt + SZ_1M, dp_fw->data, dp_fw->size);
+		/* Ensure memcpy is done before powering up modem */
+		wmb();
+	}
+
+	if (pil->subsys_vmid > 0) {
+		ret = pil_assign_mem_to_subsys(pil, drv->mba_dp_phys,
+							drv->mba_dp_size);
+		if (ret) {
+			pr_err("scm_call to unprotect MBA and DP mem failed(rc:%d)\n",
+									ret);
+			goto err_mba_data;
+		}
+	}
+
+	ret = pil_mss_reset(pil);
+	if (ret) {
+		dev_err(pil->dev, "MBA boot failed(rc:%d)\n", ret);
+		goto err_mss_reset;
+	}
+
+	if (dp_fw)
+		release_firmware(dp_fw);
+	release_firmware(fw);
+
+	return 0;
+
+err_mss_reset:
+	if (pil->subsys_vmid > 0)
+		pil_assign_mem_to_linux(pil, drv->mba_dp_phys,
+							drv->mba_dp_size);
+err_mba_data:
+	dma_free_attrs(dma_dev, drv->mba_dp_size, drv->mba_dp_virt,
+				drv->mba_dp_phys, &md->attrs_dma);
+err_invalid_fw:
+	if (dp_fw)
+		release_firmware(dp_fw);
+	release_firmware(fw);
+	drv->mba_dp_virt = NULL;
+	return ret;
+}
+
+static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
+					size_t size)
+{
+	struct modem_data *drv = dev_get_drvdata(pil->dev);
+	void *mdata_virt;
+	dma_addr_t mdata_phys;
+	s32 status;
+	int ret;
+	u64 val = is_timeout_disabled() ? 0 : modem_auth_timeout_ms * 1000;
+	struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
+	DEFINE_DMA_ATTRS(attrs);
+
+
+	trace_pil_func(__func__);
+	dma_dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	dma_set_attr(DMA_ATTR_SKIP_ZEROING, &attrs);
+	dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs);
+	/* Make metadata physically contiguous and 4K aligned. */
+	mdata_virt = dma_alloc_attrs(dma_dev, size, &mdata_phys, GFP_KERNEL,
+				     &attrs);
+	if (!mdata_virt) {
+		dev_err(pil->dev, "%s MBA metadata buffer allocation %zx bytes failed\n",
+			 __func__, size);
+		ret = -ENOMEM;
+		goto fail;
+	}
+	memcpy(mdata_virt, metadata, size);
+	/* wmb() ensures copy completes prior to starting authentication. */
+	wmb();
+
+	if (pil->subsys_vmid > 0) {
+		ret = pil_assign_mem_to_subsys(pil, mdata_phys,
+							ALIGN(size, SZ_4K));
+		if (ret) {
+			pr_err("scm_call to unprotect modem metadata mem failed(rc:%d)\n",
+									ret);
+			dma_free_attrs(dma_dev, size, mdata_virt, mdata_phys,
+									&attrs);
+			goto fail;
+		}
+	}
+
+	/* Initialize length counter to 0 */
+	writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);
+
+	/* Pass address of meta-data to the MBA and perform authentication */
+	writel_relaxed(mdata_phys, drv->rmb_base + RMB_PMI_META_DATA);
+	writel_relaxed(CMD_META_DATA_READY, drv->rmb_base + RMB_MBA_COMMAND);
+	ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
+			status == STATUS_META_DATA_AUTH_SUCCESS || status < 0,
+			POLL_INTERVAL_US, val);
+	if (ret) {
+		dev_err(pil->dev, "MBA authentication of headers timed out(rc:%d)\n",
+								ret);
+	} else if (status < 0) {
+		dev_err(pil->dev, "MBA returned error %d for headers\n",
+				status);
+		ret = -EINVAL;
+	}
+
+	if (pil->subsys_vmid > 0)
+		pil_assign_mem_to_linux(pil, mdata_phys, ALIGN(size, SZ_4K));
+
+	dma_free_attrs(dma_dev, size, mdata_virt, mdata_phys, &attrs);
+
+	if (!ret)
+		return ret;
+
+fail:
+	modem_log_rmb_regs(drv->rmb_base);
+	if (drv->q6) {
+		pil_mss_shutdown(pil);
+		if (pil->subsys_vmid > 0)
+			pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
+						drv->q6->mba_dp_size);
+		dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
+				drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
+				&drv->attrs_dma);
+		drv->q6->mba_dp_virt = NULL;
+
+	}
+	return ret;
+}
+
+static int pil_msa_mss_reset_mba_load_auth_mdt(struct pil_desc *pil,
+				  const u8 *metadata, size_t size)
+{
+	int ret;
+
+	ret = pil_mss_reset_load_mba(pil);
+	if (ret)
+		return ret;
+
+	return pil_msa_auth_modem_mdt(pil, metadata, size);
+}
+
+static int pil_msa_mba_verify_blob(struct pil_desc *pil, phys_addr_t phy_addr,
+				   size_t size)
+{
+	struct modem_data *drv = dev_get_drvdata(pil->dev);
+	s32 status;
+	u32 img_length = readl_relaxed(drv->rmb_base + RMB_PMI_CODE_LENGTH);
+
+	/* Begin image authentication */
+	if (img_length == 0) {
+		writel_relaxed(phy_addr, drv->rmb_base + RMB_PMI_CODE_START);
+		writel_relaxed(CMD_LOAD_READY, drv->rmb_base + RMB_MBA_COMMAND);
+	}
+	/* Increment length counter */
+	img_length += size;
+	writel_relaxed(img_length, drv->rmb_base + RMB_PMI_CODE_LENGTH);
+
+	status = readl_relaxed(drv->rmb_base + RMB_MBA_STATUS);
+	if (status < 0) {
+		dev_err(pil->dev, "MBA returned error %d\n", status);
+		modem_log_rmb_regs(drv->rmb_base);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int pil_msa_mba_auth(struct pil_desc *pil)
+{
+	struct modem_data *drv = dev_get_drvdata(pil->dev);
+	struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
+	int ret;
+	struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
+	s32 status;
+	u64 val = is_timeout_disabled() ? 0 : modem_auth_timeout_ms * 1000;
+
+	/* Wait for all segments to be authenticated or an error to occur */
+	ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
+		status == STATUS_AUTH_COMPLETE || status < 0, 50, val);
+	if (ret) {
+		dev_err(pil->dev, "MBA authentication of image timed out(rc:%d)\n",
+									ret);
+	} else if (status < 0) {
+		dev_err(pil->dev, "MBA returned error %d for image\n", status);
+		ret = -EINVAL;
+	}
+
+	if (drv->q6) {
+		if (drv->q6->mba_dp_virt) {
+			/* Reclaim MBA and DP (if allocated) memory. */
+			if (pil->subsys_vmid > 0)
+				pil_assign_mem_to_linux(pil,
+					drv->q6->mba_dp_phys,
+					drv->q6->mba_dp_size);
+			dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
+				drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
+				&drv->attrs_dma);
+
+			drv->q6->mba_dp_virt = NULL;
+		}
+	}
+	if (ret)
+		modem_log_rmb_regs(drv->rmb_base);
+	if (q6_drv->ahb_clk_vote)
+		clk_disable_unprepare(q6_drv->ahb_clk);
+
+	return ret;
+}
+
+/*
+ * To be used only if self-auth is disabled, or if the
+ * MBA image is loaded as segments and not in init_image.
+ */
+struct pil_reset_ops pil_msa_mss_ops = {
+	.proxy_vote = pil_mss_make_proxy_votes,
+	.proxy_unvote = pil_mss_remove_proxy_votes,
+	.auth_and_reset = pil_mss_reset,
+	.shutdown = pil_mss_shutdown,
+};
+
+/*
+ * To be used if self-auth is enabled and the MBA is to be loaded
+ * in init_image and the modem headers are also to be authenticated
+ * in init_image. Modem segments authenticated in auth_and_reset.
+ */
+struct pil_reset_ops pil_msa_mss_ops_selfauth = {
+	.init_image = pil_msa_mss_reset_mba_load_auth_mdt,
+	.proxy_vote = pil_mss_make_proxy_votes,
+	.proxy_unvote = pil_mss_remove_proxy_votes,
+	.mem_setup = pil_mss_mem_setup,
+	.verify_blob = pil_msa_mba_verify_blob,
+	.auth_and_reset = pil_msa_mba_auth,
+	.deinit_image = pil_mss_deinit_image,
+	.shutdown = pil_mss_shutdown,
+};
+
+/*
+ * To be used if the modem headers are to be authenticated
+ * in init_image, and the modem segments in auth_and_reset.
+ */
+struct pil_reset_ops pil_msa_femto_mba_ops = {
+	.init_image = pil_msa_auth_modem_mdt,
+	.verify_blob = pil_msa_mba_verify_blob,
+	.auth_and_reset = pil_msa_mba_auth,
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/pil-msa.h	2019-01-22 16:16:26.663275022 +0100
@@ -0,0 +1,50 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_PIL_MSA_H
+#define __MSM_PIL_MSA_H
+
+#include <soc/qcom/subsystem_restart.h>
+
+#include "peripheral-loader.h"
+
+#define VDD_MSS_UV	1000000
+
+struct modem_data {
+	struct q6v5_data *q6;
+	struct subsys_device *subsys;
+	struct subsys_desc subsys_desc;
+	void *ramdump_dev;
+	void *minidump_dev;
+	bool crash_shutdown;
+	u32 pas_id;
+	bool ignore_errors;
+	struct completion stop_ack;
+	void __iomem *rmb_base;
+	struct clk *xo;
+	struct pil_desc desc;
+	struct device mba_mem_dev;
+	struct device *mba_mem_dev_fixed;
+	struct dma_attrs attrs_dma;
+};
+
+extern struct pil_reset_ops pil_msa_mss_ops;
+extern struct pil_reset_ops pil_msa_mss_ops_selfauth;
+extern struct pil_reset_ops pil_msa_femto_mba_ops;
+
+int pil_mss_reset_load_mba(struct pil_desc *pil);
+int pil_mss_make_proxy_votes(struct pil_desc *pil);
+void pil_mss_remove_proxy_votes(struct pil_desc *pil);
+int pil_mss_shutdown(struct pil_desc *pil);
+int pil_mss_deinit_image(struct pil_desc *pil);
+int __pil_mss_deinit_image(struct pil_desc *pil, bool err_path);
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/pil-q6v5.c	2019-01-22 16:16:26.667275058 +0100
@@ -0,0 +1,747 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+#include <linux/clk/msm-clk.h>
+#include <trace/events/trace_msm_pil_event.h>
+
+#include "peripheral-loader.h"
+#include "pil-q6v5.h"
+
+/* QDSP6SS Register Offsets */
+#define QDSP6SS_RESET			0x014
+#define QDSP6SS_GFMUX_CTL		0x020
+#define QDSP6SS_PWR_CTL			0x030
+#define QDSP6V6SS_MEM_PWR_CTL		0x034
+#define QDSP6SS_BHS_STATUS		0x078
+#define QDSP6SS_MEM_PWR_CTL		0x0B0
+#define QDSP6SS_STRAP_ACC		0x110
+#define QDSP6V62SS_BHS_STATUS		0x0C4
+
+/* AXI Halt Register Offsets */
+#define AXI_HALTREQ			0x0
+#define AXI_HALTACK			0x4
+#define AXI_IDLE			0x8
+
+#define HALT_ACK_TIMEOUT_US		25000
+
+/* QDSP6SS_RESET */
+#define Q6SS_STOP_CORE			BIT(0)
+#define Q6SS_CORE_ARES			BIT(1)
+#define Q6SS_BUS_ARES_ENA		BIT(2)
+
+/* QDSP6SS_GFMUX_CTL */
+#define Q6SS_CLK_ENA			BIT(1)
+#define Q6SS_CLK_SRC_SEL_C		BIT(3)
+#define Q6SS_CLK_SRC_SEL_FIELD		0xC
+#define Q6SS_CLK_SRC_SWITCH_CLK_OVR	BIT(8)
+
+/* QDSP6SS_PWR_CTL */
+#define Q6SS_L2DATA_SLP_NRET_N_0	BIT(0)
+#define Q6SS_L2DATA_SLP_NRET_N_1	BIT(1)
+#define Q6SS_L2DATA_SLP_NRET_N_2	BIT(2)
+#define Q6SS_L2TAG_SLP_NRET_N		BIT(16)
+#define Q6SS_ETB_SLP_NRET_N		BIT(17)
+#define Q6SS_L2DATA_STBY_N		BIT(18)
+#define Q6SS_SLP_RET_N			BIT(19)
+#define Q6SS_CLAMP_IO			BIT(20)
+#define QDSS_BHS_ON			BIT(21)
+#define QDSS_LDO_BYP			BIT(22)
+
+/* QDSP6v55 parameters */
+#define QDSP6v55_LDO_ON                 BIT(26)
+#define QDSP6v55_LDO_BYP                BIT(25)
+#define QDSP6v55_BHS_ON                 BIT(24)
+#define QDSP6v55_CLAMP_WL               BIT(21)
+#define QDSP6v55_CLAMP_QMC_MEM          BIT(22)
+#define L1IU_SLP_NRET_N                 BIT(15)
+#define L1DU_SLP_NRET_N                 BIT(14)
+#define L2PLRU_SLP_NRET_N               BIT(13)
+#define QDSP6v55_BHS_EN_REST_ACK        BIT(0)
+
+#define HALT_CHECK_MAX_LOOPS            (200)
+#define BHS_CHECK_MAX_LOOPS             (200)
+#define QDSP6SS_XO_CBCR                 (0x0038)
+
+#define QDSP6SS_ACC_OVERRIDE_VAL	0x20
+
+int pil_q6v5_make_proxy_votes(struct pil_desc *pil)
+{
+	int ret;
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	int uv;
+
+	ret = of_property_read_u32(pil->dev->of_node, "vdd_cx-voltage", &uv);
+	if (ret) {
+		dev_err(pil->dev, "missing vdd_cx-voltage property(rc:%d)\n",
+								ret);
+		return ret;
+	}
+
+	ret = clk_prepare_enable(drv->xo);
+	if (ret) {
+		dev_err(pil->dev, "Failed to vote for XO(rc:%d)\n", ret);
+		goto out;
+	}
+
+	ret = clk_prepare_enable(drv->pnoc_clk);
+	if (ret) {
+		dev_err(pil->dev, "Failed to vote for pnoc(rc:%d)\n", ret);
+		goto err_pnoc_vote;
+	}
+
+	ret = clk_prepare_enable(drv->qdss_clk);
+	if (ret) {
+		dev_err(pil->dev, "Failed to vote for qdss(rc:%d)\n", ret);
+		goto err_qdss_vote;
+	}
+
+	ret = regulator_set_voltage(drv->vreg_cx, uv, INT_MAX);
+	if (ret) {
+		dev_err(pil->dev, "Failed to request vdd_cx voltage(rc:%d)\n",
+								ret);
+		goto err_cx_voltage;
+	}
+
+	ret = regulator_set_load(drv->vreg_cx, 100000);
+	if (ret < 0) {
+		dev_err(pil->dev, "Failed to set vdd_cx mode(rc:%d)\n", ret);
+		goto err_cx_mode;
+	}
+
+	ret = regulator_enable(drv->vreg_cx);
+	if (ret) {
+		dev_err(pil->dev, "Failed to vote for vdd_cx(rc:%d)\n", ret);
+		goto err_cx_enable;
+	}
+
+	if (drv->vreg_pll) {
+		ret = regulator_enable(drv->vreg_pll);
+		if (ret) {
+			dev_err(pil->dev, "Failed to vote for vdd_pll(rc:%d)\n",
+									ret);
+			goto err_vreg_pll;
+		}
+	}
+
+	return 0;
+
+err_vreg_pll:
+	regulator_disable(drv->vreg_cx);
+err_cx_enable:
+	regulator_set_load(drv->vreg_cx, 0);
+err_cx_mode:
+	regulator_set_voltage(drv->vreg_cx, RPM_REGULATOR_CORNER_NONE, INT_MAX);
+err_cx_voltage:
+	clk_disable_unprepare(drv->qdss_clk);
+err_qdss_vote:
+	clk_disable_unprepare(drv->pnoc_clk);
+err_pnoc_vote:
+	clk_disable_unprepare(drv->xo);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(pil_q6v5_make_proxy_votes);
+
+void pil_q6v5_remove_proxy_votes(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	int uv, ret = 0;
+
+	ret = of_property_read_u32(pil->dev->of_node, "vdd_cx-voltage", &uv);
+	if (ret) {
+		dev_err(pil->dev, "missing vdd_cx-voltage property(rc:%d)\n",
+									ret);
+		return;
+	}
+
+	if (drv->vreg_pll) {
+		regulator_disable(drv->vreg_pll);
+		regulator_set_load(drv->vreg_pll, 0);
+	}
+	regulator_disable(drv->vreg_cx);
+	regulator_set_load(drv->vreg_cx, 0);
+	regulator_set_voltage(drv->vreg_cx, RPM_REGULATOR_CORNER_NONE, INT_MAX);
+	clk_disable_unprepare(drv->xo);
+	clk_disable_unprepare(drv->pnoc_clk);
+	clk_disable_unprepare(drv->qdss_clk);
+}
+EXPORT_SYMBOL(pil_q6v5_remove_proxy_votes);
+
+void pil_q6v5_halt_axi_port(struct pil_desc *pil, void __iomem *halt_base)
+{
+	int ret;
+	u32 status;
+
+	/* Assert halt request */
+	writel_relaxed(1, halt_base + AXI_HALTREQ);
+
+	/* Wait for halt */
+	ret = readl_poll_timeout(halt_base + AXI_HALTACK,
+		status, status != 0, 50, HALT_ACK_TIMEOUT_US);
+	if (ret)
+		dev_warn(pil->dev, "Port %pK halt timeout\n", halt_base);
+	else if (!readl_relaxed(halt_base + AXI_IDLE))
+		dev_warn(pil->dev, "Port %pK halt failed\n", halt_base);
+
+	/* Clear halt request (port will remain halted until reset) */
+	writel_relaxed(0, halt_base + AXI_HALTREQ);
+}
+EXPORT_SYMBOL(pil_q6v5_halt_axi_port);
+
+void assert_clamps(struct pil_desc *pil)
+{
+	u32 val;
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+
+	/*
+	 * Assert QDSP6 I/O clamp, memory wordline clamp, and compiler memory
+	 * clamp as a software workaround to avoid high MX current during
+	 * LPASS/MSS restart.
+	 */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= (Q6SS_CLAMP_IO | QDSP6v55_CLAMP_WL |
+			QDSP6v55_CLAMP_QMC_MEM);
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+	/* To make sure asserting clamps is done before MSS restart*/
+	mb();
+}
+
+static void __pil_q6v5_shutdown(struct pil_desc *pil)
+{
+	u32 val;
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+
+	/* Turn off core clock */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_GFMUX_CTL);
+	val &= ~Q6SS_CLK_ENA;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_GFMUX_CTL);
+
+	/* Clamp IO */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= Q6SS_CLAMP_IO;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Turn off Q6 memories */
+	val &= ~(Q6SS_L2DATA_SLP_NRET_N_0 | Q6SS_L2DATA_SLP_NRET_N_1 |
+		 Q6SS_L2DATA_SLP_NRET_N_2 | Q6SS_SLP_RET_N |
+		 Q6SS_L2TAG_SLP_NRET_N | Q6SS_ETB_SLP_NRET_N |
+		 Q6SS_L2DATA_STBY_N);
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Assert Q6 resets */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+	val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENA);
+	writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+	/* Kill power at block headswitch */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val &= ~QDSS_BHS_ON;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+}
+
+void pil_q6v5_shutdown(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	if (drv->qdsp6v55)
+		/* Subsystem driver expected to halt bus and assert reset */
+		return;
+	else
+		__pil_q6v5_shutdown(pil);
+}
+EXPORT_SYMBOL(pil_q6v5_shutdown);
+
+static int __pil_q6v5_reset(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	u32 val;
+
+	/* Assert resets, stop core */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+	val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENA | Q6SS_STOP_CORE);
+	writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+	/* Enable power block headswitch, and wait for it to stabilize */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= QDSS_BHS_ON | QDSS_LDO_BYP;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+	mb();
+	udelay(1);
+
+	/*
+	 * Turn on memories. L2 banks should be done individually
+	 * to minimize inrush current.
+	 */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
+	       Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= Q6SS_L2DATA_SLP_NRET_N_2;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= Q6SS_L2DATA_SLP_NRET_N_1;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= Q6SS_L2DATA_SLP_NRET_N_0;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Remove IO clamp */
+	val &= ~Q6SS_CLAMP_IO;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Bring core out of reset */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+	val &= ~Q6SS_CORE_ARES;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+	/* Turn on core clock */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_GFMUX_CTL);
+	val |= Q6SS_CLK_ENA;
+
+	/* Need a different clock source for v5.2.0 */
+	if (drv->qdsp6v5_2_0) {
+		val &= ~Q6SS_CLK_SRC_SEL_FIELD;
+		val |= Q6SS_CLK_SRC_SEL_C;
+	}
+
+	/* force clock on during source switch */
+	if (drv->qdsp6v56)
+		val |= Q6SS_CLK_SRC_SWITCH_CLK_OVR;
+
+	writel_relaxed(val, drv->reg_base + QDSP6SS_GFMUX_CTL);
+
+	/* Start core execution */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+	val &= ~Q6SS_STOP_CORE;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+	return 0;
+}
+
+static int q6v55_branch_clk_enable(struct q6v5_data *drv)
+{
+	u32 val, count;
+	void __iomem *cbcr_reg = drv->reg_base + QDSP6SS_XO_CBCR;
+
+	val = readl_relaxed(cbcr_reg);
+	val |= 0x1;
+	writel_relaxed(val, cbcr_reg);
+
+	for (count = HALT_CHECK_MAX_LOOPS; count > 0; count--) {
+		val = readl_relaxed(cbcr_reg);
+		if (!(val & BIT(31)))
+			return 0;
+		udelay(1);
+	}
+
+	dev_err(drv->desc.dev, "Failed to enable xo branch clock.\n");
+	return -EINVAL;
+}
+
+static int __pil_q6v55_reset(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	u32 val;
+	int i;
+
+	trace_pil_func(__func__);
+	/* Override the ACC value if required */
+	if (drv->override_acc)
+		writel_relaxed(QDSP6SS_ACC_OVERRIDE_VAL,
+				drv->reg_base + QDSP6SS_STRAP_ACC);
+
+	/* Override the ACC value with input value */
+	if (!of_property_read_u32(pil->dev->of_node, "qcom,override-acc-1",
+				&drv->override_acc_1))
+		writel_relaxed(drv->override_acc_1,
+				drv->reg_base + QDSP6SS_STRAP_ACC);
+
+	/* Assert resets, stop core */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+	val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENA | Q6SS_STOP_CORE);
+	writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+	/* BHS require xo cbcr to be enabled */
+	i = q6v55_branch_clk_enable(drv);
+	if (i)
+		return i;
+
+	/* Enable power block headswitch, and wait for it to stabilize */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= QDSP6v55_BHS_ON;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+	mb();
+	udelay(1);
+
+	if (drv->qdsp6v62_1_2 || drv->qdsp6v62_1_5) {
+		for (i = BHS_CHECK_MAX_LOOPS; i > 0; i--) {
+			if (readl_relaxed(drv->reg_base + QDSP6V62SS_BHS_STATUS)
+			    & QDSP6v55_BHS_EN_REST_ACK)
+				break;
+			udelay(1);
+		}
+		if (!i) {
+			pr_err("%s: BHS_EN_REST_ACK not set!\n", __func__);
+			return -ETIMEDOUT;
+		}
+	}
+
+	if (drv->qdsp6v61_1_1) {
+		for (i = BHS_CHECK_MAX_LOOPS; i > 0; i--) {
+			if (readl_relaxed(drv->reg_base + QDSP6SS_BHS_STATUS)
+			    & QDSP6v55_BHS_EN_REST_ACK)
+				break;
+			udelay(1);
+		}
+		if (!i) {
+			pr_err("%s: BHS_EN_REST_ACK not set!\n", __func__);
+			return -ETIMEDOUT;
+		}
+	}
+
+	/* Put LDO in bypass mode */
+	val |= QDSP6v55_LDO_BYP;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	if (drv->qdsp6v56_1_3) {
+		/* Deassert memory peripheral sleep and L2 memory standby */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+		val |= (Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N);
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Turn on L1, L2 and ETB memories 1 at a time */
+		for (i = 17; i >= 0; i--) {
+			val |= BIT(i);
+			writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+			udelay(1);
+		}
+	} else if (drv->qdsp6v56_1_5 || drv->qdsp6v56_1_8
+					|| drv->qdsp6v56_1_10) {
+		/* Deassert QDSP6 compiler memory clamp */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+		val &= ~QDSP6v55_CLAMP_QMC_MEM;
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Deassert memory peripheral sleep and L2 memory standby */
+		val |= (Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N);
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Turn on L1, L2, ETB and JU memories 1 at a time */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_MEM_PWR_CTL);
+		for (i = 19; i >= 0; i--) {
+			val |= BIT(i);
+			writel_relaxed(val, drv->reg_base +
+						QDSP6SS_MEM_PWR_CTL);
+			val |= readl_relaxed(drv->reg_base +
+						QDSP6SS_MEM_PWR_CTL);
+			/*
+			 * Wait for 1us for both memory peripheral and
+			 * data array to turn on.
+			 */
+			udelay(1);
+		}
+	} else if (drv->qdsp6v56_1_8_inrush_current) {
+		/* Deassert QDSP6 compiler memory clamp */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+		val &= ~QDSP6v55_CLAMP_QMC_MEM;
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Deassert memory peripheral sleep and L2 memory standby */
+		val |= (Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N);
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Turn on L1, L2, ETB and JU memories 1 at a time */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_MEM_PWR_CTL);
+		for (i = 19; i >= 6; i--) {
+			val |= BIT(i);
+			writel_relaxed(val, drv->reg_base +
+						QDSP6SS_MEM_PWR_CTL);
+			/*
+			 * Wait for 1us for both memory peripheral and
+			 * data array to turn on.
+			 */
+			udelay(1);
+		}
+
+		for (i = 0 ; i <= 5 ; i++) {
+			val |= BIT(i);
+			writel_relaxed(val, drv->reg_base +
+						QDSP6SS_MEM_PWR_CTL);
+			/*
+			 * Wait for 1us for both memory peripheral and
+			 * data array to turn on.
+			 */
+			udelay(1);
+		}
+	} else if (drv->qdsp6v61_1_1 || drv->qdsp6v62_1_2 ||
+						drv->qdsp6v62_1_5) {
+		/* Deassert QDSP6 compiler memory clamp */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+		val &= ~QDSP6v55_CLAMP_QMC_MEM;
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Deassert memory peripheral sleep and L2 memory standby */
+		val |= (Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N);
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Turn on L1, L2, ETB and JU memories 1 at a time */
+		val = readl_relaxed(drv->reg_base +
+				QDSP6V6SS_MEM_PWR_CTL);
+
+		if (drv->qdsp6v62_1_5)
+			i = 29;
+		else
+			i = 28;
+
+		for ( ; i >= 0; i--) {
+			val |= BIT(i);
+			writel_relaxed(val, drv->reg_base +
+					QDSP6V6SS_MEM_PWR_CTL);
+			val = readl_relaxed(drv->reg_base +
+					QDSP6V6SS_MEM_PWR_CTL);
+			/*
+			 * Wait for 1us for both memory peripheral and
+			 * data array to turn on.
+			 */
+			udelay(1);
+		}
+	} else {
+		/* Turn on memories. */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+		val |= 0xFFF00;
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Turn on L2 banks 1 at a time */
+		for (i = 0; i <= 7; i++) {
+			val |= BIT(i);
+			writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+		}
+	}
+
+	/* Remove word line clamp */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val &= ~QDSP6v55_CLAMP_WL;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Remove IO clamp */
+	val &= ~Q6SS_CLAMP_IO;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Bring core out of reset */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+	val &= ~(Q6SS_CORE_ARES | Q6SS_STOP_CORE);
+	writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+	/* Turn on core clock */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_GFMUX_CTL);
+	val |= Q6SS_CLK_ENA;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_GFMUX_CTL);
+
+	return 0;
+}
+
+int pil_q6v5_reset(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	if (drv->qdsp6v55)
+		return __pil_q6v55_reset(pil);
+	else
+		return __pil_q6v5_reset(pil);
+}
+EXPORT_SYMBOL(pil_q6v5_reset);
+
+struct q6v5_data *pil_q6v5_init(struct platform_device *pdev)
+{
+	struct q6v5_data *drv;
+	struct resource *res;
+	struct pil_desc *desc;
+	struct property *prop;
+	int ret, vdd_pll;
+
+	drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+	if (!drv)
+		return ERR_PTR(-ENOMEM);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6_base");
+	drv->reg_base = devm_ioremap_resource(&pdev->dev, res);
+	if (!drv->reg_base)
+		return ERR_PTR(-ENOMEM);
+
+	desc = &drv->desc;
+	ret = of_property_read_string(pdev->dev.of_node, "qcom,firmware-name",
+				      &desc->name);
+	if (ret)
+		return ERR_PTR(ret);
+
+	desc->clear_fw_region = false;
+	desc->dev = &pdev->dev;
+
+	drv->qdsp6v5_2_0 = of_device_is_compatible(pdev->dev.of_node,
+						   "qcom,pil-femto-modem");
+
+	if (drv->qdsp6v5_2_0)
+		return drv;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "halt_base");
+	if (res) {
+		drv->axi_halt_base = devm_ioremap(&pdev->dev, res->start,
+							resource_size(res));
+		if (!drv->axi_halt_base) {
+			dev_err(&pdev->dev, "Failed to map axi_halt_base.\n");
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+
+	if (!drv->axi_halt_base) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+								"halt_q6");
+		if (res) {
+			drv->axi_halt_q6 = devm_ioremap(&pdev->dev,
+					res->start, resource_size(res));
+			if (!drv->axi_halt_q6) {
+				dev_err(&pdev->dev, "Failed to map axi_halt_q6.\n");
+				return ERR_PTR(-ENOMEM);
+			}
+		}
+
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+								"halt_modem");
+		if (res) {
+			drv->axi_halt_mss = devm_ioremap(&pdev->dev,
+					res->start, resource_size(res));
+			if (!drv->axi_halt_mss) {
+				dev_err(&pdev->dev, "Failed to map axi_halt_mss.\n");
+				return ERR_PTR(-ENOMEM);
+			}
+		}
+
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+								"halt_nc");
+		if (res) {
+			drv->axi_halt_nc = devm_ioremap(&pdev->dev,
+					res->start, resource_size(res));
+			if (!drv->axi_halt_nc) {
+				dev_err(&pdev->dev, "Failed to map axi_halt_nc.\n");
+				return ERR_PTR(-ENOMEM);
+			}
+		}
+	}
+
+	if (!(drv->axi_halt_base || (drv->axi_halt_q6 && drv->axi_halt_mss
+					&& drv->axi_halt_nc))) {
+		dev_err(&pdev->dev, "halt bases for Q6 are not defined.\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	drv->qdsp6v55 = of_device_is_compatible(pdev->dev.of_node,
+						"qcom,pil-q6v55-mss");
+	drv->qdsp6v56 = of_device_is_compatible(pdev->dev.of_node,
+						"qcom,pil-q6v56-mss");
+
+	drv->qdsp6v56_1_3 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v56-1-3");
+	drv->qdsp6v56_1_5 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v56-1-5");
+
+	drv->qdsp6v56_1_8 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v56-1-8");
+	drv->qdsp6v56_1_10 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v56-1-10");
+
+	drv->qdsp6v56_1_8_inrush_current = of_property_read_bool(
+						pdev->dev.of_node,
+						"qcom,qdsp6v56-1-8-inrush-current");
+
+	drv->qdsp6v61_1_1 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v61-1-1");
+
+	drv->qdsp6v62_1_2 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v62-1-2");
+
+	drv->qdsp6v62_1_5 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v62-1-5");
+
+	drv->non_elf_image = of_property_read_bool(pdev->dev.of_node,
+						"qcom,mba-image-is-not-elf");
+
+	drv->override_acc = of_property_read_bool(pdev->dev.of_node,
+						"qcom,override-acc");
+
+	drv->ahb_clk_vote = of_property_read_bool(pdev->dev.of_node,
+						"qcom,ahb-clk-vote");
+	drv->mx_spike_wa = of_property_read_bool(pdev->dev.of_node,
+						"qcom,mx-spike-wa");
+
+	drv->xo = devm_clk_get(&pdev->dev, "xo");
+	if (IS_ERR(drv->xo))
+		return ERR_CAST(drv->xo);
+
+	if (of_property_read_bool(pdev->dev.of_node, "qcom,pnoc-clk-vote")) {
+		drv->pnoc_clk = devm_clk_get(&pdev->dev, "pnoc_clk");
+		if (IS_ERR(drv->pnoc_clk))
+			return ERR_CAST(drv->pnoc_clk);
+	} else {
+		drv->pnoc_clk = NULL;
+	}
+
+	if (of_property_match_string(pdev->dev.of_node,
+			"qcom,proxy-clock-names", "qdss_clk") >= 0) {
+		drv->qdss_clk = devm_clk_get(&pdev->dev, "qdss_clk");
+		if (IS_ERR(drv->qdss_clk))
+			return ERR_CAST(drv->qdss_clk);
+	} else {
+		drv->qdss_clk = NULL;
+	}
+
+	drv->vreg_cx = devm_regulator_get(&pdev->dev, "vdd_cx");
+	if (IS_ERR(drv->vreg_cx))
+		return ERR_CAST(drv->vreg_cx);
+	prop = of_find_property(pdev->dev.of_node, "vdd_cx-voltage", NULL);
+	if (!prop) {
+		dev_err(&pdev->dev, "Missing vdd_cx-voltage property\n");
+		return ERR_CAST(prop);
+	}
+
+	ret = of_property_read_u32(pdev->dev.of_node, "qcom,vdd_pll",
+		&vdd_pll);
+	if (!ret) {
+		drv->vreg_pll = devm_regulator_get(&pdev->dev, "vdd_pll");
+		if (!IS_ERR_OR_NULL(drv->vreg_pll)) {
+			ret = regulator_set_voltage(drv->vreg_pll, vdd_pll,
+							vdd_pll);
+			if (ret) {
+				dev_err(&pdev->dev, "Failed to set vdd_pll voltage(rc:%d)\n",
+									ret);
+				return ERR_PTR(ret);
+			}
+
+			ret = regulator_set_load(drv->vreg_pll, 10000);
+			if (ret < 0) {
+				dev_err(&pdev->dev, "Failed to set vdd_pll mode(rc:%d)\n",
+									ret);
+				return ERR_PTR(ret);
+			}
+		} else
+			drv->vreg_pll = NULL;
+	}
+
+	return drv;
+}
+EXPORT_SYMBOL(pil_q6v5_init);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/pil-q6v5.h	2019-01-22 16:16:26.667275058 +0100
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MSM_PIL_Q6V5_H
+#define __MSM_PIL_Q6V5_H
+
+#include "peripheral-loader.h"
+
+struct regulator;
+struct clk;
+struct pil_device;
+struct platform_device;
+
+struct q6v5_data {
+	void __iomem *reg_base;
+	void __iomem *rmb_base;
+	void __iomem *cxrail_bhs;  /* External BHS register */
+	struct clk *xo;		   /* XO clock source */
+	struct clk *pnoc_clk;	   /* PNOC bus clock source */
+	struct clk *ahb_clk;	   /* PIL access to registers */
+	struct clk *axi_clk;	   /* CPU access to memory */
+	struct clk *core_clk;	   /* CPU core */
+	struct clk *reg_clk;	   /* CPU access registers */
+	struct clk *gpll0_mss_clk; /* GPLL0 to MSS connection */
+	struct clk *rom_clk;	   /* Boot ROM */
+	struct clk *snoc_axi_clk;
+	struct clk *mnoc_axi_clk;
+	struct clk *qdss_clk;
+	void __iomem *axi_halt_base; /* Halt base of q6, mss,
+					nc are in same 4K page */
+	void __iomem *axi_halt_q6;
+	void __iomem *axi_halt_mss;
+	void __iomem *axi_halt_nc;
+	void __iomem *restart_reg;
+	void __iomem *cxip_lm_vote_clear;
+	struct regulator *vreg;
+	struct regulator *vreg_cx;
+	struct regulator *vreg_mx;
+	struct regulator *vreg_pll;
+	bool is_booted;
+	struct pil_desc desc;
+	bool self_auth;
+	phys_addr_t mba_dp_phys;
+	void *mba_dp_virt;
+	size_t mba_dp_size;
+	size_t dp_size;
+	bool qdsp6v55;
+	bool qdsp6v5_2_0;
+	bool qdsp6v56;
+	bool qdsp6v56_1_3;
+	bool qdsp6v56_1_5;
+	bool qdsp6v56_1_8;
+	bool qdsp6v56_1_8_inrush_current;
+	bool qdsp6v56_1_10;
+	bool qdsp6v61_1_1;
+	bool qdsp6v62_1_2;
+	bool qdsp6v62_1_5;
+	bool non_elf_image;
+	bool restart_reg_sec;
+	bool override_acc;
+	int override_acc_1;
+	bool ahb_clk_vote;
+	bool mx_spike_wa;
+	bool cx_ipeak_vote;
+};
+
+int pil_q6v5_make_proxy_votes(struct pil_desc *pil);
+void pil_q6v5_remove_proxy_votes(struct pil_desc *pil);
+void pil_q6v5_halt_axi_port(struct pil_desc *pil, void __iomem *halt_base);
+void pil_q6v5_shutdown(struct pil_desc *pil);
+int pil_q6v5_reset(struct pil_desc *pil);
+void assert_clamps(struct pil_desc *pil);
+struct q6v5_data *pil_q6v5_init(struct platform_device *pdev);
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/pil-q6v5-mss.c	2019-10-29 09:26:24.817214667 +0100
@@ -0,0 +1,500 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_gpio.h>
+#include <linux/clk/msm-clk.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/ramdump.h>
+#include <soc/qcom/smem.h>
+#include <soc/qcom/smsm.h>
+
+#include "peripheral-loader.h"
+#include "pil-q6v5.h"
+#include "pil-msa.h"
+
+#define MAX_VDD_MSS_UV		1150000
+#define PROXY_TIMEOUT_MS	10000
+#define MAX_SSR_REASON_LEN	130U
+#define STOP_ACK_TIMEOUT_MS	1000
+#define QDSP6SS_NMI_STATUS	0x44
+
+#define subsys_to_drv(d) container_of(d, struct modem_data, subsys_desc)
+
+static void log_modem_sfr(void)
+{
+	u32 size;
+	char *smem_reason, reason[MAX_SSR_REASON_LEN];
+
+	smem_reason = smem_get_entry_no_rlock(SMEM_SSR_REASON_MSS0, &size, 0,
+							SMEM_ANY_HOST_FLAG);
+	if (!smem_reason || !size) {
+		pr_err("modem subsystem failure reason: (unknown, smem_get_entry_no_rlock failed).\n");
+		return;
+	}
+	if (!smem_reason[0]) {
+		pr_err("modem subsystem failure reason: (unknown, empty string found).\n");
+		return;
+	}
+
+	strlcpy(reason, smem_reason, min(size, MAX_SSR_REASON_LEN));
+	pr_err("modem subsystem failure reason: %s.\n", reason);
+
+	smem_reason[0] = '\0';
+	wmb();
+}
+
+static void restart_modem(struct modem_data *drv)
+{
+	log_modem_sfr();
+	drv->ignore_errors = true;
+	subsystem_restart_dev(drv->subsys);
+}
+
+static irqreturn_t modem_err_fatal_intr_handler(int irq, void *dev_id)
+{
+	struct modem_data *drv = subsys_to_drv(dev_id);
+	u32 nmi_status = readl_relaxed(drv->q6->reg_base + QDSP6SS_NMI_STATUS);
+
+	/* Ignore if we're the one that set the force stop GPIO */
+	if (drv->crash_shutdown)
+		return IRQ_HANDLED;
+
+	if (nmi_status & 0x04)
+		pr_err("%s: Fatal error on the modem due to TZ NMI\n",
+			__func__);
+	else
+		pr_err("%s: Fatal error on the modem\n", __func__);
+	subsys_set_crash_status(drv->subsys, CRASH_STATUS_ERR_FATAL);
+	restart_modem(drv);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t modem_stop_ack_intr_handler(int irq, void *dev_id)
+{
+	struct modem_data *drv = subsys_to_drv(dev_id);
+	pr_info("Received stop ack interrupt from modem\n");
+	complete(&drv->stop_ack);
+	return IRQ_HANDLED;
+}
+
+static int modem_shutdown(const struct subsys_desc *subsys, bool force_stop)
+{
+	struct modem_data *drv = subsys_to_drv(subsys);
+	unsigned long ret;
+
+	if (subsys->is_not_loadable)
+		return 0;
+
+	if (!subsys_get_crash_status(drv->subsys) && force_stop &&
+	    subsys->force_stop_gpio) {
+		gpio_set_value(subsys->force_stop_gpio, 1);
+		ret = wait_for_completion_timeout(&drv->stop_ack,
+				msecs_to_jiffies(STOP_ACK_TIMEOUT_MS));
+		if (!ret)
+			pr_warn("Timed out on stop ack from modem.\n");
+		gpio_set_value(subsys->force_stop_gpio, 0);
+	}
+
+	if (drv->subsys_desc.ramdump_disable_gpio) {
+		drv->subsys_desc.ramdump_disable = gpio_get_value(
+					drv->subsys_desc.ramdump_disable_gpio);
+		 pr_warn("Ramdump disable gpio value is %d\n",
+			drv->subsys_desc.ramdump_disable);
+	}
+
+	pil_shutdown(&drv->q6->desc);
+
+	return 0;
+}
+
+static int modem_powerup(const struct subsys_desc *subsys)
+{
+	struct modem_data *drv = subsys_to_drv(subsys);
+
+	if (subsys->is_not_loadable)
+		return 0;
+	/*
+	 * At this time, the modem is shutdown. Therefore this function cannot
+	 * run concurrently with the watchdog bite error handler, making it safe
+	 * to unset the flag below.
+	 */
+	reinit_completion(&drv->stop_ack);
+	drv->subsys_desc.ramdump_disable = 0;
+	drv->ignore_errors = false;
+	drv->q6->desc.fw_name = subsys->fw_name;
+	return pil_boot(&drv->q6->desc);
+}
+
+static void modem_crash_shutdown(const struct subsys_desc *subsys)
+{
+	struct modem_data *drv = subsys_to_drv(subsys);
+	drv->crash_shutdown = true;
+	if (!subsys_get_crash_status(drv->subsys) &&
+		subsys->force_stop_gpio) {
+		gpio_set_value(subsys->force_stop_gpio, 1);
+		mdelay(STOP_ACK_TIMEOUT_MS);
+	}
+}
+
+static int modem_ramdump(int enable, const struct subsys_desc *subsys)
+{
+	struct modem_data *drv = subsys_to_drv(subsys);
+	int ret;
+
+	if (!enable)
+		return 0;
+
+	ret = pil_mss_make_proxy_votes(&drv->q6->desc);
+	if (ret)
+		return ret;
+
+	ret = pil_mss_reset_load_mba(&drv->q6->desc);
+	if (ret)
+		return ret;
+
+	ret = pil_do_ramdump(&drv->q6->desc,
+			drv->ramdump_dev, drv->minidump_dev);
+	if (ret < 0)
+		pr_err("Unable to dump modem fw memory (rc = %d).\n", ret);
+
+	ret = __pil_mss_deinit_image(&drv->q6->desc, false);
+	if (ret < 0)
+		pr_err("Unable to free up resources (rc = %d).\n", ret);
+
+	pil_mss_remove_proxy_votes(&drv->q6->desc);
+	return ret;
+}
+
+static irqreturn_t modem_wdog_bite_intr_handler(int irq, void *dev_id)
+{
+	struct modem_data *drv = subsys_to_drv(dev_id);
+	if (drv->ignore_errors)
+		return IRQ_HANDLED;
+
+	pr_err("Watchdog bite received from modem software!\n");
+	if (drv->subsys_desc.system_debug &&
+			!gpio_get_value(drv->subsys_desc.err_fatal_gpio))
+		panic("%s: System ramdump requested. Triggering device restart!\n",
+							__func__);
+	subsys_set_crash_status(drv->subsys, CRASH_STATUS_WDOG_BITE);
+	restart_modem(drv);
+	return IRQ_HANDLED;
+}
+
+static int pil_subsys_init(struct modem_data *drv,
+					struct platform_device *pdev)
+{
+	int ret;
+
+	drv->subsys_desc.name = "modem";
+	drv->subsys_desc.dev = &pdev->dev;
+	drv->subsys_desc.owner = THIS_MODULE;
+	drv->subsys_desc.shutdown = modem_shutdown;
+	drv->subsys_desc.powerup = modem_powerup;
+	drv->subsys_desc.ramdump = modem_ramdump;
+	drv->subsys_desc.crash_shutdown = modem_crash_shutdown;
+	drv->subsys_desc.err_fatal_handler = modem_err_fatal_intr_handler;
+	drv->subsys_desc.stop_ack_handler = modem_stop_ack_intr_handler;
+	drv->subsys_desc.wdog_bite_handler = modem_wdog_bite_intr_handler;
+
+	drv->q6->desc.modem_ssr = false;
+	drv->subsys = subsys_register(&drv->subsys_desc);
+	if (IS_ERR(drv->subsys)) {
+		ret = PTR_ERR(drv->subsys);
+		goto err_subsys;
+	}
+
+	drv->q6->desc.subsys_dev = drv->subsys;
+	drv->ramdump_dev = create_ramdump_device("modem", &pdev->dev);
+	if (!drv->ramdump_dev) {
+		pr_err("%s: Unable to create a modem ramdump device.\n",
+			__func__);
+		ret = -ENOMEM;
+		goto err_ramdump;
+	}
+	drv->minidump_dev = create_ramdump_device("md_modem", &pdev->dev);
+	if (!drv->minidump_dev) {
+		pr_err("%s: Unable to create a modem minidump device.\n",
+			__func__);
+		ret = -ENOMEM;
+		goto err_minidump;
+	}
+
+	return 0;
+
+err_minidump:
+	destroy_ramdump_device(drv->ramdump_dev);
+err_ramdump:
+	subsys_unregister(drv->subsys);
+err_subsys:
+	return ret;
+}
+
+static int pil_mss_loadable_init(struct modem_data *drv,
+					struct platform_device *pdev)
+{
+	struct q6v5_data *q6;
+	struct pil_desc *q6_desc;
+	struct resource *res;
+	struct property *prop;
+	int ret;
+
+	q6 = pil_q6v5_init(pdev);
+	if (IS_ERR_OR_NULL(q6))
+		return PTR_ERR(q6);
+	drv->q6 = q6;
+	drv->xo = q6->xo;
+
+	q6_desc = &q6->desc;
+	q6_desc->owner = THIS_MODULE;
+	q6_desc->proxy_timeout = PROXY_TIMEOUT_MS;
+
+	q6_desc->ops = &pil_msa_mss_ops;
+
+	q6->self_auth = of_property_read_bool(pdev->dev.of_node,
+							"qcom,pil-self-auth");
+	if (q6->self_auth) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						    "rmb_base");
+		q6->rmb_base = devm_ioremap_resource(&pdev->dev, res);
+		if (!q6->rmb_base)
+			return -ENOMEM;
+		drv->rmb_base = q6->rmb_base;
+		q6_desc->ops = &pil_msa_mss_ops_selfauth;
+	}
+
+	q6->cx_ipeak_vote = of_property_read_bool(pdev->dev.of_node,
+							"qcom,cx-ipeak-vote");
+	if (q6->cx_ipeak_vote) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						    "cxip_lm_vote_clear");
+		if (!res) {
+			dev_err(&pdev->dev, "Failed to get resource for ipeak reg\n");
+			return -EINVAL;
+		}
+		q6->cxip_lm_vote_clear = devm_ioremap(&pdev->dev,
+						res->start, resource_size(res));
+		if (!q6->cxip_lm_vote_clear)
+			return -ENOMEM;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "restart_reg");
+	if (!res) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"restart_reg_sec");
+		if (!res) {
+			dev_err(&pdev->dev, "Failed to get resource for restart reg\n");
+			return -EINVAL;
+		}
+
+		q6->restart_reg_sec = true;
+	}
+
+	q6->restart_reg = devm_ioremap(&pdev->dev,
+						res->start, resource_size(res));
+	if (!q6->restart_reg)
+		return -ENOMEM;
+
+	q6->vreg = NULL;
+
+	prop = of_find_property(pdev->dev.of_node, "vdd_mss-supply", NULL);
+	if (prop) {
+		q6->vreg = devm_regulator_get(&pdev->dev, "vdd_mss");
+		if (IS_ERR(q6->vreg))
+			return PTR_ERR(q6->vreg);
+
+		ret = regulator_set_voltage(q6->vreg, VDD_MSS_UV,
+						MAX_VDD_MSS_UV);
+		if (ret)
+			dev_err(&pdev->dev, "Failed to set vreg voltage(rc:%d)\n",
+									ret);
+
+		ret = regulator_set_load(q6->vreg, 100000);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "Failed to set vreg mode(rc:%d)\n",
+									ret);
+			return ret;
+		}
+	}
+
+	q6->vreg_mx = devm_regulator_get(&pdev->dev, "vdd_mx");
+	if (IS_ERR(q6->vreg_mx))
+		return PTR_ERR(q6->vreg_mx);
+	prop = of_find_property(pdev->dev.of_node, "vdd_mx-uV", NULL);
+	if (!prop) {
+		dev_err(&pdev->dev, "Missing vdd_mx-uV property\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+		"cxrail_bhs_reg");
+	if (res)
+		q6->cxrail_bhs = devm_ioremap(&pdev->dev, res->start,
+					  resource_size(res));
+
+	q6->ahb_clk = devm_clk_get(&pdev->dev, "iface_clk");
+	if (IS_ERR(q6->ahb_clk))
+		return PTR_ERR(q6->ahb_clk);
+
+	q6->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
+	if (IS_ERR(q6->axi_clk))
+		return PTR_ERR(q6->axi_clk);
+
+	q6->rom_clk = devm_clk_get(&pdev->dev, "mem_clk");
+	if (IS_ERR(q6->rom_clk))
+		return PTR_ERR(q6->rom_clk);
+
+	ret = of_property_read_u32(pdev->dev.of_node,
+					"qcom,pas-id", &drv->pas_id);
+	if (ret)
+		dev_info(&pdev->dev, "No pas_id found.\n");
+
+	drv->subsys_desc.pil_mss_memsetup =
+	of_property_read_bool(pdev->dev.of_node, "qcom,pil-mss-memsetup");
+
+	/* Optional. */
+	if (of_property_match_string(pdev->dev.of_node,
+			"qcom,active-clock-names", "gpll0_mss_clk") >= 0)
+		q6->gpll0_mss_clk = devm_clk_get(&pdev->dev, "gpll0_mss_clk");
+
+	if (of_property_match_string(pdev->dev.of_node,
+			"qcom,active-clock-names", "snoc_axi_clk") >= 0)
+		q6->snoc_axi_clk = devm_clk_get(&pdev->dev, "snoc_axi_clk");
+
+	if (of_property_match_string(pdev->dev.of_node,
+			"qcom,active-clock-names", "mnoc_axi_clk") >= 0)
+		q6->mnoc_axi_clk = devm_clk_get(&pdev->dev, "mnoc_axi_clk");
+
+	ret = pil_desc_init(q6_desc);
+
+	return ret;
+}
+
+static int pil_mss_driver_probe(struct platform_device *pdev)
+{
+	struct modem_data *drv;
+	int ret, is_not_loadable;
+
+	drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+	if (!drv)
+		return -ENOMEM;
+	platform_set_drvdata(pdev, drv);
+
+	is_not_loadable = of_property_read_bool(pdev->dev.of_node,
+							"qcom,is-not-loadable");
+	if (is_not_loadable) {
+		drv->subsys_desc.is_not_loadable = 1;
+	} else {
+		ret = pil_mss_loadable_init(drv, pdev);
+		if (ret)
+			return ret;
+	}
+	init_completion(&drv->stop_ack);
+
+	/* Probe the MBA mem device if present */
+	ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+	if (ret)
+		return ret;
+
+	return pil_subsys_init(drv, pdev);
+}
+
+static int pil_mss_driver_exit(struct platform_device *pdev)
+{
+	struct modem_data *drv = platform_get_drvdata(pdev);
+
+	subsys_unregister(drv->subsys);
+	destroy_ramdump_device(drv->ramdump_dev);
+	destroy_ramdump_device(drv->minidump_dev);
+	pil_desc_release(&drv->q6->desc);
+	return 0;
+}
+
+static int pil_mba_mem_driver_probe(struct platform_device *pdev)
+{
+	struct modem_data *drv;
+
+	if (!pdev->dev.parent) {
+		pr_err("No parent found.\n");
+		return -EINVAL;
+	}
+	drv = dev_get_drvdata(pdev->dev.parent);
+	drv->mba_mem_dev_fixed = &pdev->dev;
+	return 0;
+}
+
+static const struct of_device_id mba_mem_match_table[] = {
+	{ .compatible = "qcom,pil-mba-mem" },
+	{}
+};
+
+static struct platform_driver pil_mba_mem_driver = {
+	.probe = pil_mba_mem_driver_probe,
+	.driver = {
+		.name = "pil-mba-mem",
+		.of_match_table = mba_mem_match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+static struct of_device_id mss_match_table[] = {
+	{ .compatible = "qcom,pil-q6v5-mss" },
+	{ .compatible = "qcom,pil-q6v55-mss" },
+	{ .compatible = "qcom,pil-q6v56-mss" },
+	{}
+};
+
+static struct platform_driver pil_mss_driver = {
+	.probe = pil_mss_driver_probe,
+	.remove = pil_mss_driver_exit,
+	.driver = {
+		.name = "pil-q6v5-mss",
+		.of_match_table = mss_match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init pil_mss_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&pil_mba_mem_driver);
+	if (!ret)
+		ret = platform_driver_register(&pil_mss_driver);
+	return ret;
+}
+module_init(pil_mss_init);
+
+static void __exit pil_mss_exit(void)
+{
+	platform_driver_unregister(&pil_mss_driver);
+}
+module_exit(pil_mss_exit);
+
+MODULE_DESCRIPTION("Support for booting modem subsystems with QDSP6v5 Hexagon processors");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2./adsp-loader.c linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2/adsp-loader.c
--- linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2./adsp-loader.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2/adsp-loader.c	2019-01-22 16:16:26.667275058 +0100
@@ -0,0 +1,315 @@
+/*
+ * Copyright (c) 2012-2014, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/qdsp6v2/apr.h>
+#include <linux/of_device.h>
+#include <linux/sysfs.h>
+#include <linux/workqueue.h>
+
+#include <soc/qcom/subsystem_restart.h>
+
+#define Q6_PIL_GET_DELAY_MS 100
+#define BOOT_CMD 1
+#define IMAGE_UNLOAD_CMD 0
+
+static ssize_t adsp_boot_store(struct kobject *kobj,
+	struct kobj_attribute *attr,
+	const char *buf, size_t count);
+
+struct adsp_loader_private {
+	void *pil_h;
+	struct kobject *boot_adsp_obj;
+	struct attribute_group *attr_group;
+};
+
+static struct kobj_attribute adsp_boot_attribute =
+	__ATTR(boot, 0220, NULL, adsp_boot_store);
+
+static struct attribute *attrs[] = {
+	&adsp_boot_attribute.attr,
+	NULL,
+};
+
+static struct work_struct adsp_ldr_work;
+static struct platform_device *adsp_private;
+static void adsp_loader_unload(struct platform_device *pdev);
+
+static void adsp_load_fw(struct work_struct *adsp_ldr_work)
+{
+	struct platform_device *pdev = adsp_private;
+	struct adsp_loader_private *priv = NULL;
+
+	const char *adsp_dt = "qcom,adsp-state";
+	int rc = 0;
+	u32 adsp_state;
+	const char *img_name;
+
+	if (!pdev) {
+		dev_err(&pdev->dev, "%s: Platform device null\n", __func__);
+		goto fail;
+	}
+
+	if (!pdev->dev.of_node) {
+		dev_err(&pdev->dev,
+			"%s: Device tree information missing\n", __func__);
+		goto fail;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node, adsp_dt, &adsp_state);
+	if (rc) {
+		dev_err(&pdev->dev,
+			"%s: ADSP state = %x\n", __func__, adsp_state);
+		goto fail;
+	}
+
+	rc = of_property_read_string(pdev->dev.of_node,
+					"qcom,proc-img-to-load",
+					&img_name);
+
+	if (rc) {
+		dev_dbg(&pdev->dev,
+			"%s: loading default image ADSP\n", __func__);
+		goto load_adsp;
+	}
+	if (!strcmp(img_name, "modem")) {
+		/* adsp_state always returns "0". So load modem image based on
+		apr_modem_state to prevent loading of image twice */
+		adsp_state = apr_get_modem_state();
+		if (adsp_state == APR_SUBSYS_DOWN) {
+			priv = platform_get_drvdata(pdev);
+			if (!priv) {
+				dev_err(&pdev->dev,
+				" %s: Private data get failed\n", __func__);
+				goto fail;
+			}
+
+			priv->pil_h = subsystem_get("modem");
+			if (IS_ERR(priv->pil_h)) {
+				dev_err(&pdev->dev, "%s: pil get failed,\n",
+					__func__);
+				goto fail;
+			}
+
+			/* Set the state of the ADSP in APR driver */
+			apr_set_modem_state(APR_SUBSYS_LOADED);
+		} else if (adsp_state == APR_SUBSYS_LOADED) {
+			dev_dbg(&pdev->dev,
+			"%s: MDSP state = %x\n", __func__, adsp_state);
+		}
+
+		dev_dbg(&pdev->dev, "%s: Q6/MDSP image is loaded\n", __func__);
+		return;
+	}
+load_adsp:
+	{
+		adsp_state = apr_get_q6_state();
+		if (adsp_state == APR_SUBSYS_DOWN) {
+			priv = platform_get_drvdata(pdev);
+			if (!priv) {
+				dev_err(&pdev->dev,
+				" %s: Private data get failed\n", __func__);
+				goto fail;
+			}
+
+			priv->pil_h = subsystem_get("adsp");
+			if (IS_ERR(priv->pil_h)) {
+				dev_err(&pdev->dev, "%s: pil get failed,\n",
+					__func__);
+				goto fail;
+			}
+
+			/* Set the state of the ADSP in APR driver */
+			apr_set_q6_state(APR_SUBSYS_LOADED);
+		} else if (adsp_state == APR_SUBSYS_LOADED) {
+			dev_dbg(&pdev->dev,
+			"%s: ADSP state = %x\n", __func__, adsp_state);
+		}
+
+		dev_dbg(&pdev->dev, "%s: Q6/ADSP image is loaded\n", __func__);
+		return;
+	}
+fail:
+	dev_err(&pdev->dev, "%s: Q6 image loading failed\n", __func__);
+	return;
+}
+
+static void adsp_loader_do(struct platform_device *pdev)
+{
+	dev_info(&pdev->dev, "%s: scheduling work to load ADSP fw\n", __func__);
+	schedule_work(&adsp_ldr_work);
+}
+
+static ssize_t adsp_boot_store(struct kobject *kobj,
+	struct kobj_attribute *attr,
+	const char *buf,
+	size_t count)
+{
+	int boot = 0;
+	sscanf(buf, "%du", &boot);
+
+	if (boot == BOOT_CMD) {
+		pr_debug("%s: going to call adsp_loader_do\n", __func__);
+		adsp_loader_do(adsp_private);
+	} else if (boot == IMAGE_UNLOAD_CMD) {
+		pr_debug("%s: going to call adsp_unloader\n", __func__);
+		adsp_loader_unload(adsp_private);
+	}
+	return count;
+}
+
+static void adsp_loader_unload(struct platform_device *pdev)
+{
+	struct adsp_loader_private *priv = NULL;
+
+	priv = platform_get_drvdata(pdev);
+
+	if (!priv)
+		return;
+
+	if (priv->pil_h) {
+		dev_dbg(&pdev->dev, "%s: calling subsystem put\n", __func__);
+		subsystem_put(priv->pil_h);
+		priv->pil_h = NULL;
+	}
+}
+
+static int adsp_loader_init_sysfs(struct platform_device *pdev)
+{
+	int ret = -EINVAL;
+	struct adsp_loader_private *priv = NULL;
+	adsp_private = NULL;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		dev_err(&pdev->dev, "%s: memory alloc failed\n", __func__);
+		ret = -ENOMEM;
+		return ret;
+	}
+
+	platform_set_drvdata(pdev, priv);
+
+	priv->pil_h = NULL;
+	priv->boot_adsp_obj = NULL;
+	priv->attr_group = devm_kzalloc(&pdev->dev,
+				sizeof(*(priv->attr_group)),
+				GFP_KERNEL);
+	if (!priv->attr_group) {
+		dev_err(&pdev->dev, "%s: malloc attr_group failed\n",
+						__func__);
+		ret = -ENOMEM;
+		goto error_return;
+	}
+
+	priv->attr_group->attrs = attrs;
+
+	priv->boot_adsp_obj = kobject_create_and_add("boot_adsp", kernel_kobj);
+	if (!priv->boot_adsp_obj) {
+		dev_err(&pdev->dev, "%s: sysfs create and add failed\n",
+						__func__);
+		ret = -ENOMEM;
+		goto error_return;
+	}
+
+	ret = sysfs_create_group(priv->boot_adsp_obj, priv->attr_group);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: sysfs create group failed %d\n",
+							__func__, ret);
+		goto error_return;
+	}
+
+	adsp_private = pdev;
+
+	return 0;
+
+error_return:
+
+	if (priv->boot_adsp_obj) {
+		kobject_del(priv->boot_adsp_obj);
+		priv->boot_adsp_obj = NULL;
+	}
+
+	return ret;
+}
+
+static int adsp_loader_remove(struct platform_device *pdev)
+{
+	struct adsp_loader_private *priv = NULL;
+
+	priv = platform_get_drvdata(pdev);
+
+	if (!priv)
+		return 0;
+
+	if (priv->pil_h) {
+		subsystem_put(priv->pil_h);
+		priv->pil_h = NULL;
+	}
+
+	if (priv->boot_adsp_obj) {
+		sysfs_remove_group(priv->boot_adsp_obj, priv->attr_group);
+		kobject_del(priv->boot_adsp_obj);
+		priv->boot_adsp_obj = NULL;
+	}
+
+	return 0;
+}
+
+static int adsp_loader_probe(struct platform_device *pdev)
+{
+	int ret = adsp_loader_init_sysfs(pdev);
+	if (ret != 0) {
+		dev_err(&pdev->dev, "%s: Error in initing sysfs\n", __func__);
+		return ret;
+	}
+
+	INIT_WORK(&adsp_ldr_work, adsp_load_fw);
+
+	return 0;
+}
+
+static const struct of_device_id adsp_loader_dt_match[] = {
+	{ .compatible = "qcom,adsp-loader" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, adsp_loader_dt_match);
+
+static struct platform_driver adsp_loader_driver = {
+	.driver = {
+		.name = "adsp-loader",
+		.owner = THIS_MODULE,
+		.of_match_table = adsp_loader_dt_match,
+	},
+	.probe = adsp_loader_probe,
+	.remove = adsp_loader_remove,
+};
+
+static int __init adsp_loader_init(void)
+{
+	return platform_driver_register(&adsp_loader_driver);
+}
+module_init(adsp_loader_init);
+
+static void __exit adsp_loader_exit(void)
+{
+	platform_driver_unregister(&adsp_loader_driver);
+}
+module_exit(adsp_loader_exit);
+
+MODULE_DESCRIPTION("ADSP Loader module");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2./apr.c linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2/apr.c
--- linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2./apr.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2/apr.c	2019-10-29 09:26:24.821214706 +0100
@@ -0,0 +1,982 @@
+/* Copyright (c) 2010-2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/platform_device.h>
+#include <linux/sysfs.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/scm.h>
+#include <sound/apr_audio-v2.h>
+#include <soc/qcom/smd.h>
+#include <linux/qdsp6v2/apr.h>
+#include <linux/qdsp6v2/apr_tal.h>
+#include <linux/qdsp6v2/dsp_debug.h>
+#include <linux/qdsp6v2/audio_notifier.h>
+#include <linux/ipc_logging.h>
+
+#define APR_PKT_IPC_LOG_PAGE_CNT 2
+
+static struct apr_q6 q6;
+static struct apr_client client[APR_DEST_MAX][APR_CLIENT_MAX];
+static void *apr_pkt_ctx;
+static wait_queue_head_t dsp_wait;
+static wait_queue_head_t modem_wait;
+static bool is_modem_up;
+static bool is_initial_boot;
+/* Subsystem restart: QDSP6 data, functions */
+static struct workqueue_struct *apr_reset_workqueue;
+static void apr_reset_deregister(struct work_struct *work);
+static void dispatch_event(unsigned long code, uint16_t proc);
+struct apr_reset_work {
+	void *handle;
+	struct work_struct work;
+};
+
+static bool apr_cf_debug;
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *debugfs_apr_debug;
+static ssize_t apr_debug_write(struct file *filp, const char __user *ubuf,
+			       size_t cnt, loff_t *ppos)
+{
+	char cmd;
+
+	if (copy_from_user(&cmd, ubuf, 1))
+		return -EFAULT;
+
+	apr_cf_debug = (cmd == '1') ? true : false;
+
+	return cnt;
+}
+
+static const struct file_operations apr_debug_ops = {
+	.write = apr_debug_write,
+};
+#endif
+
+#define APR_PKT_INFO(x...) \
+do { \
+	if (apr_pkt_ctx) \
+		ipc_log_string(apr_pkt_ctx, "<APR>: "x); \
+} while (0)
+
+
+struct apr_svc_table {
+	char name[64];
+	int idx;
+	int id;
+	int client_id;
+};
+
+static const struct apr_svc_table svc_tbl_qdsp6[] = {
+	{
+		.name = "AFE",
+		.idx = 0,
+		.id = APR_SVC_AFE,
+		.client_id = APR_CLIENT_AUDIO,
+	},
+	{
+		.name = "ASM",
+		.idx = 1,
+		.id = APR_SVC_ASM,
+		.client_id = APR_CLIENT_AUDIO,
+	},
+	{
+		.name = "ADM",
+		.idx = 2,
+		.id = APR_SVC_ADM,
+		.client_id = APR_CLIENT_AUDIO,
+	},
+	{
+		.name = "CORE",
+		.idx = 3,
+		.id = APR_SVC_ADSP_CORE,
+		.client_id = APR_CLIENT_AUDIO,
+	},
+	{
+		.name = "TEST",
+		.idx = 4,
+		.id = APR_SVC_TEST_CLIENT,
+		.client_id = APR_CLIENT_AUDIO,
+	},
+	{
+		.name = "MVM",
+		.idx = 5,
+		.id = APR_SVC_ADSP_MVM,
+		.client_id = APR_CLIENT_AUDIO,
+	},
+	{
+		.name = "CVS",
+		.idx = 6,
+		.id = APR_SVC_ADSP_CVS,
+		.client_id = APR_CLIENT_AUDIO,
+	},
+	{
+		.name = "CVP",
+		.idx = 7,
+		.id = APR_SVC_ADSP_CVP,
+		.client_id = APR_CLIENT_AUDIO,
+	},
+	{
+		.name = "USM",
+		.idx = 8,
+		.id = APR_SVC_USM,
+		.client_id = APR_CLIENT_AUDIO,
+	},
+	{
+		.name = "VIDC",
+		.idx = 9,
+		.id = APR_SVC_VIDC,
+	},
+	{
+		.name = "LSM",
+		.idx = 10,
+		.id = APR_SVC_LSM,
+		.client_id = APR_CLIENT_AUDIO,
+	},
+};
+
+static struct apr_svc_table svc_tbl_voice[] = {
+	{
+		.name = "VSM",
+		.idx = 0,
+		.id = APR_SVC_VSM,
+		.client_id = APR_CLIENT_VOICE,
+	},
+	{
+		.name = "VPM",
+		.idx = 1,
+		.id = APR_SVC_VPM,
+		.client_id = APR_CLIENT_VOICE,
+	},
+	{
+		.name = "MVS",
+		.idx = 2,
+		.id = APR_SVC_MVS,
+		.client_id = APR_CLIENT_VOICE,
+	},
+	{
+		.name = "MVM",
+		.idx = 3,
+		.id = APR_SVC_MVM,
+		.client_id = APR_CLIENT_VOICE,
+	},
+	{
+		.name = "CVS",
+		.idx = 4,
+		.id = APR_SVC_CVS,
+		.client_id = APR_CLIENT_VOICE,
+	},
+	{
+		.name = "CVP",
+		.idx = 5,
+		.id = APR_SVC_CVP,
+		.client_id = APR_CLIENT_VOICE,
+	},
+	{
+		.name = "SRD",
+		.idx = 6,
+		.id = APR_SVC_SRD,
+		.client_id = APR_CLIENT_VOICE,
+	},
+	{
+		.name = "TEST",
+		.idx = 7,
+		.id = APR_SVC_TEST_CLIENT,
+		.client_id = APR_CLIENT_VOICE,
+	},
+};
+
+enum apr_subsys_state apr_get_modem_state(void)
+{
+	return atomic_read(&q6.modem_state);
+}
+
+void apr_set_modem_state(enum apr_subsys_state state)
+{
+	atomic_set(&q6.modem_state, state);
+}
+
+enum apr_subsys_state apr_cmpxchg_modem_state(enum apr_subsys_state prev,
+					      enum apr_subsys_state new)
+{
+	return atomic_cmpxchg(&q6.modem_state, prev, new);
+}
+
+static void apr_modem_down(unsigned long opcode)
+{
+	apr_set_modem_state(APR_SUBSYS_DOWN);
+	dispatch_event(opcode, APR_DEST_MODEM);
+}
+
+static void apr_modem_up(void)
+{
+	if (apr_cmpxchg_modem_state(APR_SUBSYS_DOWN, APR_SUBSYS_UP) ==
+							APR_SUBSYS_DOWN)
+		wake_up(&modem_wait);
+	is_modem_up = 1;
+}
+
+enum apr_subsys_state apr_get_q6_state(void)
+{
+	return atomic_read(&q6.q6_state);
+}
+EXPORT_SYMBOL_GPL(apr_get_q6_state);
+
+int apr_set_q6_state(enum apr_subsys_state state)
+{
+	pr_debug("%s: setting adsp state %d\n", __func__, state);
+	if (state < APR_SUBSYS_DOWN || state > APR_SUBSYS_LOADED)
+		return -EINVAL;
+	atomic_set(&q6.q6_state, state);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(apr_set_q6_state);
+
+enum apr_subsys_state apr_cmpxchg_q6_state(enum apr_subsys_state prev,
+					   enum apr_subsys_state new)
+{
+	return atomic_cmpxchg(&q6.q6_state, prev, new);
+}
+
+static void apr_adsp_down(unsigned long opcode)
+{
+	apr_set_q6_state(APR_SUBSYS_DOWN);
+	dispatch_event(opcode, APR_DEST_QDSP6);
+}
+
+static void apr_adsp_up(void)
+{
+	if (apr_cmpxchg_q6_state(APR_SUBSYS_DOWN, APR_SUBSYS_LOADED) ==
+							APR_SUBSYS_DOWN)
+		wake_up(&dsp_wait);
+}
+
+int apr_wait_for_device_up(int dest_id)
+{
+	int rc = -1;
+	if (dest_id == APR_DEST_MODEM)
+		rc = wait_event_interruptible_timeout(modem_wait,
+				    (apr_get_modem_state() == APR_SUBSYS_UP),
+				    (1 * HZ));
+	else if (dest_id == APR_DEST_QDSP6)
+		rc = wait_event_interruptible_timeout(dsp_wait,
+				    (apr_get_q6_state() == APR_SUBSYS_UP),
+				    (1 * HZ));
+	else
+		pr_err("%s: unknown dest_id %d\n", __func__, dest_id);
+	/* returns left time */
+	return rc;
+}
+
+int apr_load_adsp_image(void)
+{
+	int rc = 0;
+	mutex_lock(&q6.lock);
+	if (apr_get_q6_state() == APR_SUBSYS_UP) {
+		q6.pil = subsystem_get("adsp");
+		if (IS_ERR(q6.pil)) {
+			rc = PTR_ERR(q6.pil);
+			pr_err("APR: Unable to load q6 image, error:%d\n", rc);
+		} else {
+			apr_set_q6_state(APR_SUBSYS_LOADED);
+			pr_debug("APR: Image is loaded, stated\n");
+		}
+	} else if (apr_get_q6_state() == APR_SUBSYS_LOADED) {
+		pr_debug("APR: q6 image already loaded\n");
+	} else {
+		pr_debug("APR: cannot load state %d\n", apr_get_q6_state());
+	}
+	mutex_unlock(&q6.lock);
+	return rc;
+}
+
+struct apr_client *apr_get_client(int dest_id, int client_id)
+{
+	return &client[dest_id][client_id];
+}
+
+int apr_send_pkt(void *handle, uint32_t *buf)
+{
+	struct apr_svc *svc = handle;
+	struct apr_client *clnt;
+	struct apr_hdr *hdr;
+	uint16_t dest_id;
+	uint16_t client_id;
+	uint16_t w_len;
+	int rc;
+	unsigned long flags;
+
+	if (!handle || !buf) {
+		pr_err("APR: Wrong parameters\n");
+		return -EINVAL;
+	}
+	if (svc->need_reset) {
+		pr_err("apr: send_pkt service need reset\n");
+		return -ENETRESET;
+	}
+
+	if ((svc->dest_id == APR_DEST_QDSP6) &&
+	    (apr_get_q6_state() != APR_SUBSYS_LOADED)) {
+		pr_err("%s: Still dsp is not Up\n", __func__);
+		return -ENETRESET;
+	} else if ((svc->dest_id == APR_DEST_MODEM) &&
+		   (apr_get_modem_state() == APR_SUBSYS_DOWN)) {
+		pr_err("apr: Still Modem is not Up\n");
+		return -ENETRESET;
+	}
+
+	spin_lock_irqsave(&svc->w_lock, flags);
+	dest_id = svc->dest_id;
+	client_id = svc->client_id;
+	clnt = &client[dest_id][client_id];
+
+	if (!client[dest_id][client_id].handle) {
+		pr_err("APR: Still service is not yet opened\n");
+		spin_unlock_irqrestore(&svc->w_lock, flags);
+		return -EINVAL;
+	}
+	hdr = (struct apr_hdr *)buf;
+
+	hdr->src_domain = APR_DOMAIN_APPS;
+	hdr->src_svc = svc->id;
+	hdr->dest_domain = svc->dest_domain;
+	hdr->dest_svc = svc->id;
+
+	if (unlikely(apr_cf_debug)) {
+		APR_PKT_INFO(
+		"Tx: src_addr[0x%X] dest_addr[0x%X] opcode[0x%X] token[0x%X]",
+		(hdr->src_domain << 8) | hdr->src_svc,
+		(hdr->dest_domain << 8) | hdr->dest_svc, hdr->opcode,
+		hdr->token);
+	}
+
+	rc = apr_tal_write(clnt->handle, buf,
+			(struct apr_pkt_priv *)&svc->pkt_owner,
+			hdr->pkt_size);
+	if (rc >= 0) {
+		w_len = rc;
+		if (w_len != hdr->pkt_size) {
+			pr_err("%s: Unable to write whole APR pkt successfully: %d\n",
+			       __func__, rc);
+			rc = -EINVAL;
+		}
+	} else {
+		pr_err("%s: Write APR pkt failed with error %d\n",
+			__func__, rc);
+	}
+	spin_unlock_irqrestore(&svc->w_lock, flags);
+
+	return rc;
+}
+
+int apr_pkt_config(void *handle, struct apr_pkt_cfg *cfg)
+{
+	struct apr_svc *svc = (struct apr_svc *)handle;
+	uint16_t dest_id;
+	uint16_t client_id;
+	struct apr_client *clnt;
+
+	if (!handle) {
+		pr_err("%s: Invalid handle\n", __func__);
+		return -EINVAL;
+	}
+
+	if (svc->need_reset) {
+		pr_err("%s: service need reset\n", __func__);
+		return -ENETRESET;
+	}
+
+	svc->pkt_owner = cfg->pkt_owner;
+	dest_id = svc->dest_id;
+	client_id = svc->client_id;
+	clnt = &client[dest_id][client_id];
+
+	return apr_tal_rx_intents_config(clnt->handle,
+		cfg->intents.num_of_intents, cfg->intents.size);
+}
+
+struct apr_svc *apr_register(char *dest, char *svc_name, apr_fn svc_fn,
+				uint32_t src_port, void *priv)
+{
+	struct apr_client *clnt;
+	int client_id = 0;
+	int svc_idx = 0;
+	int svc_id = 0;
+	int dest_id = 0;
+	int domain_id = 0;
+	int temp_port = 0;
+	struct apr_svc *svc = NULL;
+	int rc = 0;
+	bool can_open_channel = true;
+
+	if (!dest || !svc_name || !svc_fn)
+		return NULL;
+
+	if (!strcmp(dest, "ADSP"))
+		domain_id = APR_DOMAIN_ADSP;
+	else if (!strcmp(dest, "MODEM")) {
+		/* Don't request for SMD channels if destination is MODEM,
+		 * as these channels are no longer used and these clients
+		 * are to listen only for MODEM SSR events
+		 */
+		can_open_channel = false;
+		domain_id = APR_DOMAIN_MODEM;
+	} else {
+		pr_err("APR: wrong destination\n");
+		goto done;
+	}
+
+	dest_id = apr_get_dest_id(dest);
+
+	if (dest_id == APR_DEST_QDSP6) {
+		if (apr_get_q6_state() != APR_SUBSYS_LOADED) {
+			pr_err("%s: adsp not up\n", __func__);
+			return NULL;
+		}
+		pr_debug("%s: adsp Up\n", __func__);
+	} else if (dest_id == APR_DEST_MODEM) {
+		if (apr_get_modem_state() == APR_SUBSYS_DOWN) {
+			if (is_modem_up) {
+				pr_err("%s: modem shutdown due to SSR, ret",
+					__func__);
+				return NULL;
+			}
+			pr_debug("%s: Wait for modem to bootup\n", __func__);
+			rc = apr_wait_for_device_up(APR_DEST_MODEM);
+			if (rc == 0) {
+				pr_err("%s: Modem is not Up\n", __func__);
+				return NULL;
+			}
+		}
+		pr_debug("%s: modem Up\n", __func__);
+	}
+
+	if (apr_get_svc(svc_name, domain_id, &client_id, &svc_idx, &svc_id)) {
+		pr_err("%s: apr_get_svc failed\n", __func__);
+		goto done;
+	}
+
+	clnt = &client[dest_id][client_id];
+	mutex_lock(&clnt->m_lock);
+	if (!clnt->handle && can_open_channel) {
+		clnt->handle = apr_tal_open(client_id, dest_id,
+				APR_DL_SMD, apr_cb_func, NULL);
+		if (!clnt->handle) {
+			svc = NULL;
+			pr_err("APR: Unable to open handle\n");
+			mutex_unlock(&clnt->m_lock);
+			goto done;
+		}
+	}
+	mutex_unlock(&clnt->m_lock);
+	svc = &clnt->svc[svc_idx];
+	mutex_lock(&svc->m_lock);
+	clnt->id = client_id;
+	if (svc->need_reset) {
+		mutex_unlock(&svc->m_lock);
+		pr_err("APR: Service needs reset\n");
+		goto done;
+	}
+	svc->id = svc_id;
+	svc->dest_id = dest_id;
+	svc->client_id = client_id;
+	svc->dest_domain = domain_id;
+	svc->pkt_owner = APR_PKT_OWNER_DRIVER;
+
+	if (src_port != 0xFFFFFFFF) {
+		temp_port = ((src_port >> 8) * 8) + (src_port & 0xFF);
+		pr_debug("port = %d t_port = %d\n", src_port, temp_port);
+		if (temp_port >= APR_MAX_PORTS || temp_port < 0) {
+			pr_err("APR: temp_port out of bounds\n");
+			mutex_unlock(&svc->m_lock);
+			return NULL;
+		}
+		if (!svc->svc_cnt)
+			clnt->svc_cnt++;
+		svc->port_cnt++;
+		svc->port_fn[temp_port] = svc_fn;
+		svc->port_priv[temp_port] = priv;
+		svc->svc_cnt++;
+	} else {
+		if (!svc->fn) {
+			if (!svc->svc_cnt)
+				clnt->svc_cnt++;
+			svc->fn = svc_fn;
+			svc->priv = priv;
+			svc->svc_cnt++;
+		}
+	}
+
+	mutex_unlock(&svc->m_lock);
+done:
+	return svc;
+}
+
+
+void apr_cb_func(void *buf, int len, void *priv)
+{
+	struct apr_client_data data;
+	struct apr_client *apr_client;
+	struct apr_svc *c_svc;
+	struct apr_hdr *hdr;
+	uint16_t hdr_size;
+	uint16_t msg_type;
+	uint16_t ver;
+	uint16_t src;
+	uint16_t svc;
+	uint16_t clnt;
+	int i;
+	int temp_port = 0;
+	uint32_t *ptr;
+
+	pr_debug("APR2: len = %d\n", len);
+	ptr = buf;
+	pr_debug("\n*****************\n");
+	for (i = 0; i < len/4; i++)
+		pr_debug("%x  ", ptr[i]);
+	pr_debug("\n");
+	pr_debug("\n*****************\n");
+
+	if (!buf || len <= APR_HDR_SIZE) {
+		pr_err("APR: Improper apr pkt received:%pK %d\n", buf, len);
+		return;
+	}
+	hdr = buf;
+
+	ver = hdr->hdr_field;
+	ver = (ver & 0x000F);
+	if (ver > APR_PKT_VER + 1) {
+		pr_err("APR: Wrong version: %d\n", ver);
+		return;
+	}
+
+	hdr_size = hdr->hdr_field;
+	hdr_size = ((hdr_size & 0x00F0) >> 0x4) * 4;
+	if (hdr_size < APR_HDR_SIZE) {
+		pr_err("APR: Wrong hdr size:%d\n", hdr_size);
+		return;
+	}
+
+	if (hdr->pkt_size < APR_HDR_SIZE) {
+		pr_err("APR: Wrong paket size\n");
+		return;
+	}
+
+	if (hdr->pkt_size < hdr_size) {
+		pr_err("APR: Packet size less than header size\n");
+		return;
+	}
+
+	msg_type = hdr->hdr_field;
+	msg_type = (msg_type >> 0x08) & 0x0003;
+	if (msg_type >= APR_MSG_TYPE_MAX && msg_type != APR_BASIC_RSP_RESULT) {
+		pr_err("APR: Wrong message type: %d\n", msg_type);
+		return;
+	}
+
+	if (hdr->src_domain >= APR_DOMAIN_MAX ||
+		hdr->dest_domain >= APR_DOMAIN_MAX ||
+		hdr->src_svc >= APR_SVC_MAX ||
+		hdr->dest_svc >= APR_SVC_MAX) {
+		pr_err("APR: Wrong APR header\n");
+		return;
+	}
+
+	svc = hdr->dest_svc;
+	if (hdr->src_domain == APR_DOMAIN_MODEM) {
+		if (svc == APR_SVC_MVS || svc == APR_SVC_MVM ||
+		    svc == APR_SVC_CVS || svc == APR_SVC_CVP ||
+		    svc == APR_SVC_TEST_CLIENT)
+			clnt = APR_CLIENT_VOICE;
+		else {
+			pr_err("APR: Wrong svc :%d\n", svc);
+			return;
+		}
+	} else if (hdr->src_domain == APR_DOMAIN_ADSP) {
+		if (svc == APR_SVC_AFE || svc == APR_SVC_ASM ||
+		    svc == APR_SVC_VSM || svc == APR_SVC_VPM ||
+		    svc == APR_SVC_ADM || svc == APR_SVC_ADSP_CORE ||
+		    svc == APR_SVC_USM ||
+		    svc == APR_SVC_TEST_CLIENT || svc == APR_SVC_ADSP_MVM ||
+		    svc == APR_SVC_ADSP_CVS || svc == APR_SVC_ADSP_CVP ||
+		    svc == APR_SVC_LSM)
+			clnt = APR_CLIENT_AUDIO;
+		else if (svc == APR_SVC_VIDC)
+			clnt = APR_CLIENT_AUDIO;
+		else {
+			pr_err("APR: Wrong svc :%d\n", svc);
+			return;
+		}
+	} else {
+		pr_err("APR: Pkt from wrong source: %d\n", hdr->src_domain);
+		return;
+	}
+
+	src = apr_get_data_src(hdr);
+	if (src == APR_DEST_MAX)
+		return;
+
+	pr_debug("src =%d clnt = %d\n", src, clnt);
+	apr_client = &client[src][clnt];
+	for (i = 0; i < APR_SVC_MAX; i++)
+		if (apr_client->svc[i].id == svc) {
+			pr_debug("%d\n", apr_client->svc[i].id);
+			c_svc = &apr_client->svc[i];
+			break;
+		}
+
+	if (i == APR_SVC_MAX) {
+		pr_err("APR: service is not registered\n");
+		return;
+	}
+	pr_debug("svc_idx = %d\n", i);
+	pr_debug("%x %x %x %pK %pK\n", c_svc->id, c_svc->dest_id,
+		 c_svc->client_id, c_svc->fn, c_svc->priv);
+	data.payload_size = hdr->pkt_size - hdr_size;
+	data.opcode = hdr->opcode;
+	data.src = src;
+	data.src_port = hdr->src_port;
+	data.dest_port = hdr->dest_port;
+	data.token = hdr->token;
+	data.msg_type = msg_type;
+	data.payload = NULL;
+	if (data.payload_size > 0)
+		data.payload = (char *)hdr + hdr_size;
+
+	if (unlikely(apr_cf_debug)) {
+		if (hdr->opcode == APR_BASIC_RSP_RESULT && data.payload) {
+			uint32_t *ptr = data.payload;
+
+			APR_PKT_INFO(
+			"Rx: src_addr[0x%X] dest_addr[0x%X] opcode[0x%X] token[0x%X] rc[0x%X]",
+			(hdr->src_domain << 8) | hdr->src_svc,
+			(hdr->dest_domain << 8) | hdr->dest_svc,
+			hdr->opcode, hdr->token, ptr[1]);
+		} else {
+			APR_PKT_INFO(
+			"Rx: src_addr[0x%X] dest_addr[0x%X] opcode[0x%X] token[0x%X]",
+			(hdr->src_domain << 8) | hdr->src_svc,
+			(hdr->dest_domain << 8) | hdr->dest_svc, hdr->opcode,
+			hdr->token);
+		}
+	}
+
+	temp_port = ((data.dest_port >> 8) * 8) + (data.dest_port & 0xFF);
+	if (((temp_port >= 0) && (temp_port < APR_MAX_PORTS))
+		&& (c_svc->port_cnt && c_svc->port_fn[temp_port]))
+		c_svc->port_fn[temp_port](&data,
+			c_svc->port_priv[temp_port]);
+	else if (c_svc->fn)
+		c_svc->fn(&data, c_svc->priv);
+	else
+		pr_err("APR: Rxed a packet for NULL callback\n");
+}
+
+int apr_get_svc(const char *svc_name, int domain_id, int *client_id,
+		int *svc_idx, int *svc_id)
+{
+	int i;
+	int size;
+	struct apr_svc_table *tbl;
+	int ret = 0;
+
+	if ((domain_id == APR_DOMAIN_ADSP)) {
+		tbl = (struct apr_svc_table *)&svc_tbl_qdsp6;
+		size = ARRAY_SIZE(svc_tbl_qdsp6);
+	} else {
+		tbl = (struct apr_svc_table *)&svc_tbl_voice;
+		size = ARRAY_SIZE(svc_tbl_voice);
+	}
+
+	for (i = 0; i < size; i++) {
+		if (!strcmp(svc_name, tbl[i].name)) {
+			*client_id = tbl[i].client_id;
+			*svc_idx = tbl[i].idx;
+			*svc_id = tbl[i].id;
+			break;
+		}
+	}
+
+	pr_debug("%s: svc_name = %s c_id = %d domain_id = %d\n",
+		 __func__, svc_name, *client_id, domain_id);
+	if (i == size) {
+		pr_err("%s: APR: Wrong svc name %s\n", __func__, svc_name);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static void apr_reset_deregister(struct work_struct *work)
+{
+	struct apr_svc *handle = NULL;
+	struct apr_reset_work *apr_reset =
+			container_of(work, struct apr_reset_work, work);
+
+	handle = apr_reset->handle;
+	pr_debug("%s:handle[%pK]\n", __func__, handle);
+	apr_deregister(handle);
+	kfree(apr_reset);
+}
+
+int apr_deregister(void *handle)
+{
+	struct apr_svc *svc = handle;
+	struct apr_client *clnt;
+	uint16_t dest_id;
+	uint16_t client_id;
+
+	if (!handle)
+		return -EINVAL;
+
+	mutex_lock(&svc->m_lock);
+	if (!svc->svc_cnt) {
+		pr_err("%s: svc already deregistered. svc = %pK\n",
+			__func__, svc);
+		mutex_unlock(&svc->m_lock);
+		return -EINVAL;
+	}
+
+	dest_id = svc->dest_id;
+	client_id = svc->client_id;
+	clnt = &client[dest_id][client_id];
+
+	if (svc->svc_cnt > 0) {
+		if (svc->port_cnt)
+			svc->port_cnt--;
+		svc->svc_cnt--;
+		if (!svc->svc_cnt) {
+			client[dest_id][client_id].svc_cnt--;
+			pr_debug("%s: service is reset %pK\n", __func__, svc);
+		}
+	}
+
+	if (!svc->svc_cnt) {
+		svc->priv = NULL;
+		svc->id = 0;
+		svc->fn = NULL;
+		svc->dest_id = 0;
+		svc->client_id = 0;
+		svc->need_reset = 0x0;
+	}
+	if (client[dest_id][client_id].handle &&
+	    !client[dest_id][client_id].svc_cnt) {
+		apr_tal_close(client[dest_id][client_id].handle);
+		client[dest_id][client_id].handle = NULL;
+	}
+	mutex_unlock(&svc->m_lock);
+
+	return 0;
+}
+
+void apr_reset(void *handle)
+{
+	struct apr_reset_work *apr_reset_worker = NULL;
+
+	if (!handle)
+		return;
+	pr_debug("%s: handle[%pK]\n", __func__, handle);
+
+	if (apr_reset_workqueue == NULL) {
+		pr_err("%s: apr_reset_workqueue is NULL\n", __func__);
+		return;
+	}
+
+	apr_reset_worker = kzalloc(sizeof(struct apr_reset_work),
+							GFP_ATOMIC);
+
+	if (apr_reset_worker == NULL) {
+		pr_err("%s: mem failure\n", __func__);
+		return;
+	}
+
+	apr_reset_worker->handle = handle;
+	INIT_WORK(&apr_reset_worker->work, apr_reset_deregister);
+	queue_work(apr_reset_workqueue, &apr_reset_worker->work);
+}
+
+/* Dispatch the Reset events to Modem and audio clients */
+static void dispatch_event(unsigned long code, uint16_t proc)
+{
+	struct apr_client *apr_client;
+	struct apr_client_data data;
+	struct apr_svc *svc;
+	uint16_t clnt;
+	int i, j;
+
+	memset(&data, 0, sizeof(data));
+	data.opcode = RESET_EVENTS;
+	data.reset_event = code;
+
+	/* Service domain can be different from the processor */
+	data.reset_proc = apr_get_reset_domain(proc);
+
+	clnt = APR_CLIENT_AUDIO;
+	apr_client = &client[proc][clnt];
+	for (i = 0; i < APR_SVC_MAX; i++) {
+		mutex_lock(&apr_client->svc[i].m_lock);
+		if (apr_client->svc[i].fn) {
+			apr_client->svc[i].need_reset = 0x1;
+			apr_client->svc[i].fn(&data, apr_client->svc[i].priv);
+		}
+		if (apr_client->svc[i].port_cnt) {
+			svc = &(apr_client->svc[i]);
+			svc->need_reset = 0x1;
+			for (j = 0; j < APR_MAX_PORTS; j++)
+				if (svc->port_fn[j])
+					svc->port_fn[j](&data,
+						svc->port_priv[j]);
+		}
+		mutex_unlock(&apr_client->svc[i].m_lock);
+	}
+
+	clnt = APR_CLIENT_VOICE;
+	apr_client = &client[proc][clnt];
+	for (i = 0; i < APR_SVC_MAX; i++) {
+		mutex_lock(&apr_client->svc[i].m_lock);
+		if (apr_client->svc[i].fn) {
+			apr_client->svc[i].need_reset = 0x1;
+			apr_client->svc[i].fn(&data, apr_client->svc[i].priv);
+		}
+		if (apr_client->svc[i].port_cnt) {
+			svc = &(apr_client->svc[i]);
+			svc->need_reset = 0x1;
+			for (j = 0; j < APR_MAX_PORTS; j++)
+				if (svc->port_fn[j])
+					svc->port_fn[j](&data,
+						svc->port_priv[j]);
+		}
+		mutex_unlock(&apr_client->svc[i].m_lock);
+	}
+}
+
+static int apr_notifier_service_cb(struct notifier_block *this,
+				   unsigned long opcode, void *data)
+{
+	struct audio_notifier_cb_data *cb_data = data;
+
+	if (cb_data == NULL) {
+		pr_err("%s: Callback data is NULL!\n", __func__);
+		goto done;
+	}
+
+	pr_debug("%s: Service opcode 0x%lx, domain %d\n",
+		__func__, opcode, cb_data->domain);
+
+	switch (opcode) {
+	case AUDIO_NOTIFIER_SERVICE_DOWN:
+		/*
+		 * Use flag to ignore down notifications during
+		 * initial boot. There is no benefit from error
+		 * recovery notifications during initial boot
+		 * up since everything is expected to be down.
+		 */
+		if (is_initial_boot) {
+			is_initial_boot = false;
+			break;
+		}
+		if (cb_data->domain == AUDIO_NOTIFIER_MODEM_DOMAIN)
+			apr_modem_down(opcode);
+		else
+			apr_adsp_down(opcode);
+		break;
+	case AUDIO_NOTIFIER_SERVICE_UP:
+		is_initial_boot = false;
+		if (cb_data->domain == AUDIO_NOTIFIER_MODEM_DOMAIN)
+			apr_modem_up();
+		else
+			apr_adsp_up();
+		break;
+	default:
+		break;
+	}
+done:
+	return NOTIFY_OK;
+}
+
+static struct notifier_block adsp_service_nb = {
+	.notifier_call  = apr_notifier_service_cb,
+	.priority = 0,
+};
+
+static struct notifier_block modem_service_nb = {
+	.notifier_call  = apr_notifier_service_cb,
+	.priority = 0,
+};
+
+static int __init apr_init(void)
+{
+	int i, j, k;
+
+	for (i = 0; i < APR_DEST_MAX; i++)
+		for (j = 0; j < APR_CLIENT_MAX; j++) {
+			mutex_init(&client[i][j].m_lock);
+			for (k = 0; k < APR_SVC_MAX; k++) {
+				mutex_init(&client[i][j].svc[k].m_lock);
+				spin_lock_init(&client[i][j].svc[k].w_lock);
+			}
+		}
+	apr_set_subsys_state();
+	mutex_init(&q6.lock);
+	apr_reset_workqueue = create_singlethread_workqueue("apr_driver");
+	if (!apr_reset_workqueue)
+		return -ENOMEM;
+
+	apr_pkt_ctx = ipc_log_context_create(APR_PKT_IPC_LOG_PAGE_CNT,
+						"apr", 0);
+	if (!apr_pkt_ctx)
+		pr_err("%s: Unable to create ipc log context\n", __func__);
+
+	is_initial_boot = true;
+	subsys_notif_register("apr_adsp", AUDIO_NOTIFIER_ADSP_DOMAIN,
+			      &adsp_service_nb);
+	subsys_notif_register("apr_modem", AUDIO_NOTIFIER_MODEM_DOMAIN,
+			      &modem_service_nb);
+
+	return 0;
+}
+device_initcall(apr_init);
+
+static int __init apr_late_init(void)
+{
+	int ret = 0;
+	init_waitqueue_head(&dsp_wait);
+	init_waitqueue_head(&modem_wait);
+
+	return ret;
+}
+late_initcall(apr_late_init);
+
+#ifdef CONFIG_DEBUG_FS
+static int __init apr_debug_init(void)
+{
+	debugfs_apr_debug = debugfs_create_file("msm_apr_debug",
+						 S_IFREG | S_IRUGO, NULL, NULL,
+						 &apr_debug_ops);
+	return 0;
+}
+device_initcall(apr_debug_init);
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2./apr_tal_glink.c linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
--- linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2./apr_tal_glink.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c	2019-10-29 09:26:24.821214706 +0100
@@ -0,0 +1,452 @@
+/* Copyright (c) 2016-2017 The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <soc/qcom/smd.h>
+#include <soc/qcom/glink.h>
+#include <linux/qdsp6v2/apr_tal.h>
+
+#define APR_MAXIMUM_NUM_OF_RETRIES 2
+
+struct apr_tx_buf {
+	struct apr_pkt_priv pkt_priv;
+	char buf[APR_MAX_BUF];
+};
+
+struct link_state {
+	uint32_t dest;
+	void *handle;
+	enum glink_link_state link_state;
+	wait_queue_head_t wait;
+};
+
+static struct link_state link_state[APR_DEST_MAX];
+
+static char *svc_names[APR_DEST_MAX][APR_CLIENT_MAX] = {
+	{
+		"apr_audio_svc",
+		"apr_voice_svc",
+	},
+	{
+		"apr_audio_svc",
+		"apr_voice_svc",
+	},
+};
+
+static struct apr_svc_ch_dev
+	apr_svc_ch[APR_DL_MAX][APR_DEST_MAX][APR_CLIENT_MAX];
+
+static struct apr_tx_buf *apr_alloc_buf(int len)
+{
+
+	if (len > APR_MAX_BUF) {
+		pr_err("%s: buf too large [%d]\n", __func__, len);
+		return ERR_PTR(-EINVAL);
+	}
+
+	return kzalloc(sizeof(struct apr_tx_buf), GFP_ATOMIC);
+}
+
+static void apr_free_buf(const void *ptr)
+{
+
+	struct apr_pkt_priv *apr_pkt_priv = (struct apr_pkt_priv *)ptr;
+	struct apr_tx_buf *tx_buf;
+
+	if (!apr_pkt_priv) {
+		pr_err("%s: Invalid apr_pkt_priv\n", __func__);
+		return;
+	}
+
+	if (apr_pkt_priv->pkt_owner == APR_PKT_OWNER_DRIVER) {
+		tx_buf = container_of((void *)apr_pkt_priv,
+				      struct apr_tx_buf, pkt_priv);
+		pr_debug("%s: Freeing buffer %pK", __func__, tx_buf);
+		kfree(tx_buf);
+	}
+}
+
+
+static int __apr_tal_write(struct apr_svc_ch_dev *apr_ch, void *data,
+			   struct apr_pkt_priv *pkt_priv, int len)
+{
+	int rc = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&apr_ch->w_lock, flags);
+	rc = glink_tx(apr_ch->handle, pkt_priv, data, len, GLINK_TX_ATOMIC);
+	spin_unlock_irqrestore(&apr_ch->w_lock, flags);
+
+	if (rc)
+		pr_err("%s: glink_tx failed, rc[%d]\n", __func__, rc);
+	else
+		rc = len;
+
+	return rc;
+}
+
+int apr_tal_write(struct apr_svc_ch_dev *apr_ch, void *data,
+		  struct apr_pkt_priv *pkt_priv, int len)
+{
+	int rc = 0, retries = 0;
+	void *pkt_data = NULL;
+	struct apr_tx_buf *tx_buf = NULL;
+	struct apr_pkt_priv *pkt_priv_ptr = pkt_priv;
+
+	if (!apr_ch->handle || !pkt_priv)
+		return -EINVAL;
+
+	if (pkt_priv->pkt_owner == APR_PKT_OWNER_DRIVER) {
+		tx_buf = apr_alloc_buf(len);
+		if (IS_ERR_OR_NULL(tx_buf)) {
+			rc = -EINVAL;
+			goto exit;
+		}
+		memcpy(tx_buf->buf, data, len);
+		memcpy(&tx_buf->pkt_priv, pkt_priv, sizeof(tx_buf->pkt_priv));
+		pkt_priv_ptr = &tx_buf->pkt_priv;
+		pkt_data = tx_buf->buf;
+	} else {
+		pkt_data = data;
+	}
+
+	do {
+		if (rc == -EAGAIN)
+			udelay(50);
+
+		rc = __apr_tal_write(apr_ch, pkt_data, pkt_priv_ptr, len);
+	} while (rc == -EAGAIN && retries++ < APR_MAXIMUM_NUM_OF_RETRIES);
+
+	if (rc < 0) {
+		pr_err("%s: Unable to send the packet, rc:%d\n", __func__, rc);
+		if (pkt_priv->pkt_owner == APR_PKT_OWNER_DRIVER)
+			kfree(tx_buf);
+	}
+exit:
+	return rc;
+}
+
+void apr_tal_notify_rx(void *handle, const void *priv, const void *pkt_priv,
+		       const void *ptr, size_t size)
+{
+	struct apr_svc_ch_dev *apr_ch = (struct apr_svc_ch_dev *)priv;
+	unsigned long flags;
+
+	if (!apr_ch || !ptr) {
+		pr_err("%s: Invalid apr_ch or ptr\n", __func__);
+		return;
+	}
+
+	pr_debug("%s: Rx packet received\n", __func__);
+
+	spin_lock_irqsave(&apr_ch->r_lock, flags);
+	if (apr_ch->func)
+		apr_ch->func((void *)ptr, size, (void *)pkt_priv);
+	spin_unlock_irqrestore(&apr_ch->r_lock, flags);
+	glink_rx_done(apr_ch->handle, ptr, true);
+}
+
+static void apr_tal_notify_tx_abort(void *handle, const void *priv,
+				    const void *pkt_priv)
+{
+	pr_debug("%s: tx_abort received for pkt_priv:%pK\n",
+		 __func__, pkt_priv);
+	apr_free_buf(pkt_priv);
+}
+
+void apr_tal_notify_tx_done(void *handle, const void *priv,
+			    const void *pkt_priv, const void *ptr)
+{
+	pr_debug("%s: tx_done received for pkt_priv:%pK\n",
+		 __func__, pkt_priv);
+	apr_free_buf(pkt_priv);
+}
+
+bool apr_tal_notify_rx_intent_req(void *handle, const void *priv,
+				  size_t req_size)
+{
+	struct apr_svc_ch_dev *apr_ch = (struct apr_svc_ch_dev *)priv;
+
+	if (!apr_ch) {
+		pr_err("%s: Invalid apr_ch\n", __func__);
+		return false;
+	}
+
+	pr_err("%s: No rx intents queued, unable to receive\n", __func__);
+	return false;
+}
+
+static void apr_tal_notify_remote_rx_intent(void *handle, const void *priv,
+					    size_t size)
+{
+	struct apr_svc_ch_dev *apr_ch = (struct apr_svc_ch_dev *)priv;
+
+	if (!apr_ch) {
+		pr_err("%s: Invalid apr_ch\n", __func__);
+		return;
+	}
+	/*
+	 * This is to make sure that the far end has queued at least one intent
+	 * before we attmpt any IPC. A simple bool flag is used here instead of
+	 * a counter, as the far end is required to guarantee intent
+	 * availability for all use cases once the channel is fully opened.
+	 */
+	pr_debug("%s: remote queued an intent\n", __func__);
+	apr_ch->if_remote_intent_ready = true;
+	wake_up(&apr_ch->wait);
+}
+
+void apr_tal_notify_state(void *handle, const void *priv, unsigned event)
+{
+	struct apr_svc_ch_dev *apr_ch = (struct apr_svc_ch_dev *)priv;
+
+	if (!apr_ch) {
+		pr_err("%s: Invalid apr_ch\n", __func__);
+		return;
+	}
+
+	apr_ch->channel_state = event;
+	pr_info("%s: Channel state[%d]\n", __func__, event);
+
+	if (event == GLINK_CONNECTED)
+		wake_up(&apr_ch->wait);
+}
+
+int apr_tal_rx_intents_config(struct apr_svc_ch_dev *apr_ch,
+			      int num_of_intents, uint32_t size)
+{
+	int i;
+	int rc;
+
+	if (!apr_ch || !num_of_intents || !size) {
+		pr_err("%s: Invalid parameter\n", __func__);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num_of_intents; i++) {
+		rc = glink_queue_rx_intent(apr_ch->handle, apr_ch, size);
+		if (rc) {
+			pr_err("%s: Failed to queue rx intent, iteration[%d]\n",
+			       __func__, i);
+			break;
+		}
+	}
+
+	return rc;
+}
+
+struct apr_svc_ch_dev *apr_tal_open(uint32_t clnt, uint32_t dest, uint32_t dl,
+				    apr_svc_cb_fn func, void *priv)
+{
+	int rc;
+	struct glink_open_config open_cfg;
+	struct apr_svc_ch_dev *apr_ch;
+
+	if ((clnt >= APR_CLIENT_MAX) || (dest >= APR_DEST_MAX) ||
+	    (dl >= APR_DL_MAX)) {
+		pr_err("%s: Invalid params, clnt:%d, dest:%d, dl:%d\n",
+		       __func__, clnt, dest, dl);
+		return NULL;
+	}
+
+	apr_ch = &apr_svc_ch[dl][dest][clnt];
+	mutex_lock(&apr_ch->m_lock);
+	if (apr_ch->handle) {
+		pr_err("%s: This channel is already opened\n", __func__);
+		rc = -EBUSY;
+		goto unlock;
+	}
+
+	if (link_state[dest].link_state != GLINK_LINK_STATE_UP) {
+		rc = wait_event_timeout(link_state[dest].wait,
+			link_state[dest].link_state == GLINK_LINK_STATE_UP,
+			msecs_to_jiffies(APR_OPEN_TIMEOUT_MS));
+		if (rc == 0) {
+			pr_err("%s: Open timeout, dest:%d\n", __func__, dest);
+			rc = -ETIMEDOUT;
+			goto unlock;
+		}
+		pr_debug("%s: Wakeup done, dest:%d\n", __func__, dest);
+	}
+
+	memset(&open_cfg, 0, sizeof(struct glink_open_config));
+	open_cfg.options = GLINK_OPT_INITIAL_XPORT;
+	if (dest == APR_DEST_MODEM)
+		open_cfg.edge = "mpss";
+	else
+		open_cfg.edge = "lpass";
+
+	open_cfg.name = svc_names[dest][clnt];
+	open_cfg.notify_rx = apr_tal_notify_rx;
+	open_cfg.notify_tx_done = apr_tal_notify_tx_done;
+	open_cfg.notify_state = apr_tal_notify_state;
+	open_cfg.notify_rx_intent_req = apr_tal_notify_rx_intent_req;
+	open_cfg.notify_remote_rx_intent = apr_tal_notify_remote_rx_intent;
+	open_cfg.notify_tx_abort = apr_tal_notify_tx_abort;
+	open_cfg.priv = apr_ch;
+	open_cfg.transport = "smem";
+
+	apr_ch->channel_state = GLINK_REMOTE_DISCONNECTED;
+	apr_ch->handle = glink_open(&open_cfg);
+	if (IS_ERR_OR_NULL(apr_ch->handle)) {
+		pr_err("%s: glink_open failed %s\n", __func__,
+		       svc_names[dest][clnt]);
+		apr_ch->handle = NULL;
+		rc = -EINVAL;
+		goto unlock;
+	}
+
+	rc = wait_event_timeout(apr_ch->wait,
+		(apr_ch->channel_state == GLINK_CONNECTED), 5 * HZ);
+	if (rc == 0) {
+		pr_err("%s: TIMEOUT for OPEN event\n", __func__);
+		rc = -ETIMEDOUT;
+		goto close_link;
+	}
+
+	/*
+	 * Remote intent is not required for GLINK <--> SMD IPC, so this is
+	 * designed not to fail the open call.
+	 */
+	rc = wait_event_timeout(apr_ch->wait,
+		apr_ch->if_remote_intent_ready, 5 * HZ);
+	if (rc == 0)
+		pr_err("%s: TIMEOUT for remote intent readiness\n", __func__);
+
+	rc = apr_tal_rx_intents_config(apr_ch, APR_DEFAULT_NUM_OF_INTENTS,
+				       APR_MAX_BUF);
+	if (rc) {
+		pr_err("%s: Unable to queue intents\n", __func__);
+		goto close_link;
+	}
+
+	apr_ch->func = func;
+	apr_ch->priv = priv;
+
+close_link:
+	if (rc) {
+		glink_close(apr_ch->handle);
+		apr_ch->handle = NULL;
+	}
+unlock:
+	mutex_unlock(&apr_ch->m_lock);
+
+	return rc ? NULL : apr_ch;
+}
+
+int apr_tal_close(struct apr_svc_ch_dev *apr_ch)
+{
+	int rc;
+
+	if (!apr_ch || !apr_ch->handle) {
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	mutex_lock(&apr_ch->m_lock);
+	rc = glink_close(apr_ch->handle);
+	apr_ch->handle = NULL;
+	apr_ch->func = NULL;
+	apr_ch->priv = NULL;
+	apr_ch->if_remote_intent_ready = false;
+	mutex_unlock(&apr_ch->m_lock);
+exit:
+	return rc;
+}
+
+static void apr_tal_link_state_cb(struct glink_link_state_cb_info *cb_info,
+				  void *priv)
+{
+	uint32_t dest;
+
+	if (!cb_info) {
+		pr_err("%s: Invalid cb_info\n", __func__);
+		return;
+	}
+
+	if (!strcmp(cb_info->edge, "mpss"))
+		dest = APR_DEST_MODEM;
+	else if (!strcmp(cb_info->edge, "lpass"))
+		dest = APR_DEST_QDSP6;
+	else {
+		pr_err("%s:Unknown edge[%s]\n", __func__, cb_info->edge);
+		return;
+	}
+
+	pr_info("%s: edge[%s] link state[%d]\n", __func__, cb_info->edge,
+		cb_info->link_state);
+
+	link_state[dest].link_state = cb_info->link_state;
+	if (link_state[dest].link_state == GLINK_LINK_STATE_UP)
+		wake_up(&link_state[dest].wait);
+}
+
+static struct glink_link_info mpss_link_info = {
+	.transport = "smem",
+	.edge = "mpss",
+	.glink_link_state_notif_cb = apr_tal_link_state_cb,
+};
+
+static struct glink_link_info lpass_link_info = {
+	.transport = "smem",
+	.edge = "lpass",
+	.glink_link_state_notif_cb = apr_tal_link_state_cb,
+};
+
+static int __init apr_tal_init(void)
+{
+	int i, j, k;
+
+	for (i = 0; i < APR_DL_MAX; i++) {
+		for (j = 0; j < APR_DEST_MAX; j++) {
+			for (k = 0; k < APR_CLIENT_MAX; k++) {
+				init_waitqueue_head(&apr_svc_ch[i][j][k].wait);
+				spin_lock_init(&apr_svc_ch[i][j][k].w_lock);
+				spin_lock_init(&apr_svc_ch[i][j][k].r_lock);
+				mutex_init(&apr_svc_ch[i][j][k].m_lock);
+			}
+		}
+	}
+
+	for (i = 0; i < APR_DEST_MAX; i++)
+		init_waitqueue_head(&link_state[i].wait);
+
+	link_state[APR_DEST_MODEM].link_state = GLINK_LINK_STATE_DOWN;
+	link_state[APR_DEST_MODEM].handle =
+		glink_register_link_state_cb(&mpss_link_info, NULL);
+	if (!link_state[APR_DEST_MODEM].handle)
+		pr_err("%s: Unable to register mpss link state\n", __func__);
+
+	link_state[APR_DEST_QDSP6].link_state = GLINK_LINK_STATE_DOWN;
+	link_state[APR_DEST_QDSP6].handle =
+		glink_register_link_state_cb(&lpass_link_info, NULL);
+	if (!link_state[APR_DEST_QDSP6].handle)
+		pr_err("%s: Unable to register lpass link state\n", __func__);
+
+	return 0;
+}
+device_initcall(apr_tal_init);
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2./apr_v2.c linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2/apr_v2.c
--- linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2./apr_v2.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2/apr_v2.c	2019-10-29 09:26:24.821214706 +0100
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2012-2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include <linux/qdsp6v2/apr.h>
+#include <linux/qdsp6v2/apr_tal.h>
+#include <linux/qdsp6v2/dsp_debug.h>
+#include <linux/qdsp6v2/audio_notifier.h>
+
+enum apr_subsys_state apr_get_subsys_state(void)
+{
+	return apr_get_q6_state();
+}
+
+void apr_set_subsys_state(void)
+{
+	apr_set_q6_state(APR_SUBSYS_DOWN);
+	apr_set_modem_state(APR_SUBSYS_UP);
+}
+
+uint16_t apr_get_data_src(struct apr_hdr *hdr)
+{
+	if (hdr->src_domain == APR_DOMAIN_MODEM)
+		return APR_DEST_MODEM;
+	else if (hdr->src_domain == APR_DOMAIN_ADSP)
+		return APR_DEST_QDSP6;
+	else {
+		pr_err("APR: Pkt from wrong source: %d\n", hdr->src_domain);
+		return APR_DEST_MAX;		/*RETURN INVALID VALUE*/
+	}
+}
+
+int apr_get_dest_id(char *dest)
+{
+	if (!strcmp(dest, "ADSP"))
+		return APR_DEST_QDSP6;
+	else
+		return APR_DEST_MODEM;
+}
+
+void subsys_notif_register(char *client_name, int domain,
+			   struct notifier_block *nb)
+{
+	int ret;
+
+	ret = audio_notifier_register(client_name, domain, nb);
+	if (ret < 0)
+		pr_err("%s: Audio notifier register failed for domain %d ret = %d\n",
+			__func__, domain, ret);
+}
+
+uint16_t apr_get_reset_domain(uint16_t proc)
+{
+	return proc;
+}
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2./audio_notifier.c linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2/audio_notifier.c
--- linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2./audio_notifier.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2/audio_notifier.c	2019-01-22 16:16:26.667275058 +0100
@@ -0,0 +1,636 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/qdsp6v2/audio_pdr.h>
+#include <linux/qdsp6v2/audio_ssr.h>
+#include <linux/qdsp6v2/audio_notifier.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/service-notifier.h>
+
+/* Audio states internal to notifier. Client */
+/* used states defined in audio_notifier.h */
+/* for AUDIO_NOTIFIER_SERVICE_DOWN & UP */
+#define NO_SERVICE -2
+#define UNINIT_SERVICE -1
+
+/*
+ * Used for each client registered with audio notifier
+ */
+struct client_data {
+	struct list_head        list;
+	/* Notifier block given by client */
+	struct notifier_block   *nb;
+	char                    client_name[20];
+	int                     service;
+	int                     domain;
+};
+
+/*
+ * Used for each service and domain combination
+ * Tracks information specific to the underlying
+ * service.
+ */
+struct service_info {
+	const char                      name[20];
+	int                             domain_id;
+	int                             state;
+	void                            *handle;
+	/* Notifier block registered to service */
+	struct notifier_block           *nb;
+	/* Used to determine when to register and deregister service */
+	int                             num_of_clients;
+	/* List of all clients registered to the service and domain */
+	struct srcu_notifier_head       client_nb_list;
+};
+
+static int audio_notifer_ssr_adsp_cb(struct notifier_block *this,
+				     unsigned long opcode, void *data);
+static int audio_notifer_ssr_modem_cb(struct notifier_block *this,
+				     unsigned long opcode, void *data);
+static int audio_notifer_pdr_adsp_cb(struct notifier_block *this,
+				     unsigned long opcode, void *data);
+
+static struct notifier_block notifier_ssr_adsp_nb = {
+	.notifier_call  = audio_notifer_ssr_adsp_cb,
+	.priority = 0,
+};
+
+static struct notifier_block notifier_ssr_modem_nb = {
+	.notifier_call  = audio_notifer_ssr_modem_cb,
+	.priority = 0,
+};
+
+static struct notifier_block notifier_pdr_adsp_nb = {
+	.notifier_call  = audio_notifer_pdr_adsp_cb,
+	.priority = 0,
+};
+
+static struct service_info service_data[AUDIO_NOTIFIER_MAX_SERVICES]
+				       [AUDIO_NOTIFIER_MAX_DOMAINS] = {
+
+	{{
+		.name = "SSR_ADSP",
+		.domain_id = AUDIO_SSR_DOMAIN_ADSP,
+		.state = AUDIO_NOTIFIER_SERVICE_DOWN,
+		.nb = &notifier_ssr_adsp_nb
+	 },
+	 {
+		.name = "SSR_MODEM",
+		.domain_id = AUDIO_SSR_DOMAIN_MODEM,
+		.state = AUDIO_NOTIFIER_SERVICE_DOWN,
+		.nb = &notifier_ssr_modem_nb
+	} },
+
+	{{
+		.name = "PDR_ADSP",
+		.domain_id = AUDIO_PDR_DOMAIN_ADSP,
+		.state = UNINIT_SERVICE,
+		.nb = &notifier_pdr_adsp_nb
+	 },
+	 {	/* PDR MODEM service not enabled */
+		.name = "INVALID",
+		.state = NO_SERVICE,
+		.nb = NULL
+	} }
+};
+
+/* Master list of all audio notifier clients */
+struct list_head   client_list;
+struct mutex       notifier_mutex;
+
+static int audio_notifer_get_default_service(int domain)
+{
+	int service = NO_SERVICE;
+
+	/* initial service to connect per domain */
+	switch (domain) {
+	case AUDIO_NOTIFIER_ADSP_DOMAIN:
+		service = AUDIO_NOTIFIER_PDR_SERVICE;
+		break;
+	case AUDIO_NOTIFIER_MODEM_DOMAIN:
+		service = AUDIO_NOTIFIER_SSR_SERVICE;
+		break;
+	}
+
+	return service;
+}
+
+static void audio_notifer_disable_service(int service)
+{
+	int i;
+
+	for (i = 0; i < AUDIO_NOTIFIER_MAX_DOMAINS; i++)
+		service_data[service][i].state = NO_SERVICE;
+}
+
+static bool audio_notifer_is_service_enabled(int service)
+{
+	int i;
+
+	for (i = 0; i < AUDIO_NOTIFIER_MAX_DOMAINS; i++)
+		if (service_data[service][i].state != NO_SERVICE)
+			return true;
+	return false;
+}
+
+static void audio_notifer_init_service(int service)
+{
+	int i;
+
+	for (i = 0; i < AUDIO_NOTIFIER_MAX_DOMAINS; i++) {
+		if (service_data[service][i].state == UNINIT_SERVICE)
+			service_data[service][i].state =
+				AUDIO_NOTIFIER_SERVICE_DOWN;
+	}
+}
+
+static int audio_notifer_reg_service(int service, int domain)
+{
+	void *handle;
+	int ret = 0;
+	int curr_state = AUDIO_NOTIFIER_SERVICE_DOWN;
+
+	switch (service) {
+	case AUDIO_NOTIFIER_SSR_SERVICE:
+		handle = audio_ssr_register(
+			service_data[service][domain].domain_id,
+			service_data[service][domain].nb);
+		break;
+	case AUDIO_NOTIFIER_PDR_SERVICE:
+		handle = audio_pdr_service_register(
+			service_data[service][domain].domain_id,
+			service_data[service][domain].nb, &curr_state);
+
+		if (curr_state == SERVREG_NOTIF_SERVICE_STATE_UP_V01)
+			curr_state = AUDIO_NOTIFIER_SERVICE_UP;
+		else
+			curr_state = AUDIO_NOTIFIER_SERVICE_DOWN;
+		break;
+	default:
+		pr_err("%s: Invalid service %d\n",
+			__func__, service);
+		ret = -EINVAL;
+		goto done;
+	}
+	if (IS_ERR_OR_NULL(handle)) {
+		pr_err("%s: handle is incorrect for service %s\n",
+			__func__, service_data[service][domain].name);
+		ret = -EINVAL;
+		goto done;
+	}
+	service_data[service][domain].state = curr_state;
+	service_data[service][domain].handle = handle;
+
+	pr_info("%s: service %s is in use\n",
+		__func__, service_data[service][domain].name);
+	pr_debug("%s: service %s has current state %d, handle 0x%pK\n",
+		__func__, service_data[service][domain].name,
+		service_data[service][domain].state,
+		service_data[service][domain].handle);
+done:
+	return ret;
+}
+
+static int audio_notifer_dereg_service(int service, int domain)
+{
+	int ret;
+
+	switch (service) {
+	case AUDIO_NOTIFIER_SSR_SERVICE:
+		ret = audio_ssr_deregister(
+			service_data[service][domain].handle,
+			service_data[service][domain].nb);
+		break;
+	case AUDIO_NOTIFIER_PDR_SERVICE:
+		ret = audio_pdr_service_deregister(
+			service_data[service][domain].handle,
+			service_data[service][domain].nb);
+		break;
+	default:
+		pr_err("%s: Invalid service %d\n",
+			__func__, service);
+		ret = -EINVAL;
+		goto done;
+	}
+	if (IS_ERR_VALUE(ret)) {
+		pr_err("%s: deregister failed for service %s, ret %d\n",
+			__func__, service_data[service][domain].name, ret);
+		goto done;
+	}
+
+	pr_debug("%s: service %s with handle 0x%pK deregistered\n",
+		__func__, service_data[service][domain].name,
+		service_data[service][domain].handle);
+
+	service_data[service][domain].state = AUDIO_NOTIFIER_SERVICE_DOWN;
+	service_data[service][domain].handle = NULL;
+done:
+	return ret;
+}
+
+static int audio_notifer_reg_client_service(struct client_data *client_data,
+					    int service)
+{
+	int ret = 0;
+	int domain = client_data->domain;
+	struct audio_notifier_cb_data data;
+
+	switch (service) {
+	case AUDIO_NOTIFIER_SSR_SERVICE:
+	case AUDIO_NOTIFIER_PDR_SERVICE:
+		if (service_data[service][domain].num_of_clients == 0)
+			ret = audio_notifer_reg_service(service, domain);
+		break;
+	default:
+		pr_err("%s: Invalid service for client %s, service %d, domain %d\n",
+			__func__, client_data->client_name, service, domain);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (IS_ERR_VALUE(ret)) {
+		pr_err("%s: service registration failed on service %s for client %s\n",
+			__func__, service_data[service][domain].name,
+			client_data->client_name);
+		goto done;
+	}
+
+	client_data->service = service;
+	srcu_notifier_chain_register(
+		&service_data[service][domain].client_nb_list,
+		client_data->nb);
+	service_data[service][domain].num_of_clients++;
+
+	pr_debug("%s: registered client %s on service %s, current state 0x%x\n",
+		__func__, client_data->client_name,
+		service_data[service][domain].name,
+		service_data[service][domain].state);
+
+	/*
+	 * PDR registration returns current state
+	 * Force callback of client with current state for PDR
+	 */
+	if (client_data->service == AUDIO_NOTIFIER_PDR_SERVICE) {
+		data.service = service;
+		data.domain = domain;
+		(void)client_data->nb->notifier_call(client_data->nb,
+			service_data[service][domain].state, &data);
+	}
+done:
+	return ret;
+}
+
+static int audio_notifer_reg_client(struct client_data *client_data)
+{
+	int ret = 0;
+	int service;
+	int domain = client_data->domain;
+
+	service = audio_notifer_get_default_service(domain);
+	if (service < 0) {
+		pr_err("%s: service %d is incorrect\n", __func__, service);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/* Search through services to find a valid one to register client on. */
+	for (; service >= 0; service--) {
+		/* If a service is not initialized, wait for it to come up. */
+		if (service_data[service][domain].state == UNINIT_SERVICE)
+			goto done;
+		/* Skip unsupported service and domain combinations. */
+		if (service_data[service][domain].state < 0)
+			continue;
+		/* Only register clients who have not acquired a service. */
+		if (client_data->service != NO_SERVICE)
+			continue;
+
+		/*
+		 * Only register clients, who have not acquired a service, on
+		 * the best available service for their domain. Uninitialized
+		 * services will try to register all of their clients after
+		 * they initialize correctly or will disable their service and
+		 * register clients on the next best avaialable service.
+		 */
+		pr_debug("%s: register client %s on service %s",
+				__func__, client_data->client_name,
+				service_data[service][domain].name);
+
+		ret = audio_notifer_reg_client_service(client_data, service);
+		if (IS_ERR_VALUE(ret))
+			pr_err("%s: client %s failed to register on service %s",
+				__func__, client_data->client_name,
+				service_data[service][domain].name);
+	}
+
+done:
+	return ret;
+}
+
+static int audio_notifer_dereg_client(struct client_data *client_data)
+{
+	int ret = 0;
+	int service = client_data->service;
+	int domain = client_data->domain;
+
+	switch (client_data->service) {
+	case AUDIO_NOTIFIER_SSR_SERVICE:
+	case AUDIO_NOTIFIER_PDR_SERVICE:
+		if (service_data[service][domain].num_of_clients == 1)
+			ret = audio_notifer_dereg_service(service, domain);
+		break;
+	case NO_SERVICE:
+		goto done;
+	default:
+		pr_err("%s: Invalid service for client %s, service %d\n",
+			__func__, client_data->client_name,
+			client_data->service);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (IS_ERR_VALUE(ret)) {
+		pr_err("%s: deregister failed for client %s on service %s, ret %d\n",
+			__func__, client_data->client_name,
+			service_data[service][domain].name, ret);
+		goto done;
+	}
+
+	ret = srcu_notifier_chain_unregister(&service_data[service][domain].
+					     client_nb_list, client_data->nb);
+	if (IS_ERR_VALUE(ret)) {
+		pr_err("%s: srcu_notifier_chain_unregister failed, ret %d\n",
+			__func__, ret);
+		goto done;
+	}
+
+	pr_debug("%s: deregistered client %s on service %s\n",
+		__func__, client_data->client_name,
+		service_data[service][domain].name);
+
+	client_data->service = NO_SERVICE;
+	if (service_data[service][domain].num_of_clients > 0)
+		service_data[service][domain].num_of_clients--;
+done:
+	return ret;
+}
+
+static void audio_notifer_reg_all_clients(void)
+{
+	struct list_head *ptr, *next;
+	struct client_data *client_data;
+	int ret;
+
+	list_for_each_safe(ptr, next, &client_list) {
+		client_data = list_entry(ptr, struct client_data, list);
+
+		ret = audio_notifer_reg_client(client_data);
+		if (IS_ERR_VALUE(ret))
+			pr_err("%s: audio_notifer_reg_client failed for client %s, ret %d\n",
+				__func__, client_data->client_name,
+				ret);
+	}
+}
+
+static int audio_notifer_pdr_callback(struct notifier_block *this,
+				      unsigned long opcode, void *data)
+{
+	pr_debug("%s: Audio PDR framework state 0x%lx\n",
+		__func__, opcode);
+	mutex_lock(&notifier_mutex);
+	if (opcode == AUDIO_PDR_FRAMEWORK_DOWN)
+		audio_notifer_disable_service(AUDIO_NOTIFIER_PDR_SERVICE);
+	else
+		audio_notifer_init_service(AUDIO_NOTIFIER_PDR_SERVICE);
+
+	audio_notifer_reg_all_clients();
+	mutex_unlock(&notifier_mutex);
+	return 0;
+}
+
+static struct notifier_block pdr_nb = {
+	.notifier_call  = audio_notifer_pdr_callback,
+	.priority = 0,
+};
+
+static int audio_notifer_convert_opcode(unsigned long opcode,
+					unsigned long *notifier_opcode)
+{
+	int ret = 0;
+
+	switch (opcode) {
+	case SUBSYS_BEFORE_SHUTDOWN:
+	case SERVREG_NOTIF_SERVICE_STATE_DOWN_V01:
+		*notifier_opcode = AUDIO_NOTIFIER_SERVICE_DOWN;
+		break;
+	case SUBSYS_AFTER_POWERUP:
+	case SERVREG_NOTIF_SERVICE_STATE_UP_V01:
+		*notifier_opcode = AUDIO_NOTIFIER_SERVICE_UP;
+		break;
+	default:
+		pr_debug("%s: Unused opcode 0x%lx\n", __func__, opcode);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int audio_notifer_service_cb(unsigned long opcode,
+				    int service, int domain)
+{
+	int ret = 0;
+	unsigned long notifier_opcode;
+	struct audio_notifier_cb_data data;
+
+	if (audio_notifer_convert_opcode(opcode, &notifier_opcode) < 0)
+		goto done;
+
+	data.service = service;
+	data.domain = domain;
+
+	pr_debug("%s: service %s, opcode 0x%lx\n",
+		__func__, service_data[service][domain].name, notifier_opcode);
+
+	mutex_lock(&notifier_mutex);
+
+	service_data[service][domain].state = notifier_opcode;
+	ret = srcu_notifier_call_chain(&service_data[service][domain].
+		client_nb_list, notifier_opcode, &data);
+	if (IS_ERR_VALUE(ret))
+		pr_err("%s: srcu_notifier_call_chain returned %d, service %s, opcode 0x%lx\n",
+			__func__, ret, service_data[service][domain].name,
+			notifier_opcode);
+
+	mutex_unlock(&notifier_mutex);
+done:
+	return NOTIFY_OK;
+}
+
+static int audio_notifer_pdr_adsp_cb(struct notifier_block *this,
+				     unsigned long opcode, void *data)
+{
+	return audio_notifer_service_cb(opcode,
+					AUDIO_NOTIFIER_PDR_SERVICE,
+					AUDIO_NOTIFIER_ADSP_DOMAIN);
+}
+
+static int audio_notifer_ssr_adsp_cb(struct notifier_block *this,
+				     unsigned long opcode, void *data)
+{
+	if (opcode == SUBSYS_BEFORE_SHUTDOWN)
+		audio_ssr_send_nmi(data);
+
+	return audio_notifer_service_cb(opcode,
+					AUDIO_NOTIFIER_SSR_SERVICE,
+					AUDIO_NOTIFIER_ADSP_DOMAIN);
+}
+
+static int audio_notifer_ssr_modem_cb(struct notifier_block *this,
+				      unsigned long opcode, void *data)
+{
+	return audio_notifer_service_cb(opcode,
+					AUDIO_NOTIFIER_SSR_SERVICE,
+					AUDIO_NOTIFIER_MODEM_DOMAIN);
+}
+
+int audio_notifier_deregister(char *client_name)
+{
+	int ret = 0;
+	int ret2;
+	struct list_head *ptr, *next;
+	struct client_data *client_data = NULL;
+
+	if (client_name == NULL) {
+		pr_err("%s: client_name is NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+	mutex_lock(&notifier_mutex);
+	list_for_each_safe(ptr, next, &client_list) {
+		client_data = list_entry(ptr, struct client_data, list);
+		if (!strcmp(client_name, client_data->client_name)) {
+			ret2 = audio_notifer_dereg_client(client_data);
+			if (ret2 < 0) {
+				pr_err("%s: audio_notifer_dereg_client failed, ret %d\n, service %d, domain %d",
+					__func__, ret2, client_data->service,
+					client_data->domain);
+				ret = ret2;
+				continue;
+			}
+			list_del(&client_data->list);
+			kfree(client_data);
+		}
+	}
+	mutex_unlock(&notifier_mutex);
+done:
+	return ret;
+}
+EXPORT_SYMBOL(audio_notifier_deregister);
+
+int audio_notifier_register(char *client_name, int domain,
+			    struct notifier_block *nb)
+{
+	int ret;
+	struct client_data *client_data;
+
+	if (client_name == NULL) {
+		pr_err("%s: client_name is NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	} else if (nb == NULL) {
+		pr_err("%s: Notifier block is NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	client_data = kmalloc(sizeof(*client_data), GFP_KERNEL);
+	if (client_data == NULL) {
+		ret = -ENOMEM;
+		goto done;
+	}
+	INIT_LIST_HEAD(&client_data->list);
+	client_data->nb = nb;
+	strlcpy(client_data->client_name, client_name,
+		sizeof(client_data->client_name));
+	client_data->service = NO_SERVICE;
+	client_data->domain = domain;
+
+	mutex_lock(&notifier_mutex);
+	ret = audio_notifer_reg_client(client_data);
+	if (IS_ERR_VALUE(ret)) {
+		mutex_unlock(&notifier_mutex);
+		pr_err("%s: audio_notifer_reg_client for client %s failed ret = %d\n",
+			__func__, client_data->client_name,
+			ret);
+		kfree(client_data);
+		goto done;
+	}
+	list_add_tail(&client_data->list, &client_list);
+	mutex_unlock(&notifier_mutex);
+done:
+	return ret;
+}
+EXPORT_SYMBOL(audio_notifier_register);
+
+static int __init audio_notifier_subsys_init(void)
+{
+	int i, j;
+
+	mutex_init(&notifier_mutex);
+	INIT_LIST_HEAD(&client_list);
+	for (i = 0; i < AUDIO_NOTIFIER_MAX_SERVICES; i++) {
+		for (j = 0; j < AUDIO_NOTIFIER_MAX_DOMAINS; j++) {
+			if (service_data[i][j].state <= NO_SERVICE)
+				continue;
+
+			srcu_init_notifier_head(
+				&service_data[i][j].client_nb_list);
+		}
+	}
+
+	return 0;
+}
+subsys_initcall(audio_notifier_subsys_init);
+
+static int __init audio_notifier_init(void)
+{
+	int ret;
+
+	ret = audio_pdr_register(&pdr_nb);
+	if (IS_ERR_VALUE(ret)) {
+		pr_debug("%s: PDR register failed, ret = %d, disable service\n",
+			__func__, ret);
+		audio_notifer_disable_service(AUDIO_NOTIFIER_PDR_SERVICE);
+	}
+
+	/* Do not return error since PDR enablement is not critical */
+	return 0;
+}
+module_init(audio_notifier_init);
+
+static int __init audio_notifier_late_init(void)
+{
+	/*
+	 * If pdr registration failed, register clients on next service
+	 * Do in late init to ensure that SSR subsystem is initialized
+	 */
+	mutex_lock(&notifier_mutex);
+	if (!audio_notifer_is_service_enabled(AUDIO_NOTIFIER_PDR_SERVICE))
+		audio_notifer_reg_all_clients();
+
+	mutex_unlock(&notifier_mutex);
+	return 0;
+}
+late_initcall(audio_notifier_late_init);
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2./audio_pdr.c linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2/audio_pdr.c
--- linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2./audio_pdr.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2/audio_pdr.c	2019-01-22 16:16:26.667275058 +0100
@@ -0,0 +1,148 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/qdsp6v2/audio_pdr.h>
+#include <soc/qcom/service-locator.h>
+#include <soc/qcom/service-notifier.h>
+
+static struct pd_qmi_client_data audio_pdr_services[AUDIO_PDR_DOMAIN_MAX] = {
+	{	/* AUDIO_PDR_DOMAIN_ADSP */
+		.client_name = "audio_pdr_adsp",
+		.service_name = "avs/audio"
+	}
+};
+
+struct srcu_notifier_head audio_pdr_cb_list;
+
+static int audio_pdr_locator_callback(struct notifier_block *this,
+				      unsigned long opcode, void *data)
+{
+	unsigned long pdr_state = AUDIO_PDR_FRAMEWORK_DOWN;
+
+	if (opcode == LOCATOR_DOWN) {
+		pr_debug("%s: Service %s is down!", __func__,
+			audio_pdr_services[AUDIO_PDR_DOMAIN_ADSP].
+			service_name);
+		goto done;
+	}
+
+	memcpy(&audio_pdr_services, data,
+		sizeof(audio_pdr_services[AUDIO_PDR_DOMAIN_ADSP]));
+	if (audio_pdr_services[AUDIO_PDR_DOMAIN_ADSP].total_domains == 1) {
+		pr_debug("%s: Service %s, returned total domains %d, ",
+			__func__,
+			audio_pdr_services[AUDIO_PDR_DOMAIN_ADSP].service_name,
+			audio_pdr_services[AUDIO_PDR_DOMAIN_ADSP].
+			total_domains);
+		pdr_state = AUDIO_PDR_FRAMEWORK_UP;
+		goto done;
+	} else
+		pr_err("%s: Service %s returned invalid total domains %d",
+			__func__,
+			audio_pdr_services[AUDIO_PDR_DOMAIN_ADSP].service_name,
+			audio_pdr_services[AUDIO_PDR_DOMAIN_ADSP].
+			total_domains);
+done:
+	srcu_notifier_call_chain(&audio_pdr_cb_list, pdr_state, NULL);
+	return NOTIFY_OK;
+}
+
+static struct notifier_block audio_pdr_locator_nb = {
+	.notifier_call = audio_pdr_locator_callback,
+	.priority = 0,
+};
+
+int audio_pdr_register(struct notifier_block *nb)
+{
+	if (nb == NULL) {
+		pr_err("%s: Notifier block is NULL\n", __func__);
+		return -EINVAL;
+	}
+	return srcu_notifier_chain_register(&audio_pdr_cb_list, nb);
+}
+EXPORT_SYMBOL(audio_pdr_register);
+
+void *audio_pdr_service_register(int domain_id,
+				 struct notifier_block *nb, int *curr_state)
+{
+	void *handle;
+
+	if ((domain_id < 0) ||
+	    (domain_id >= AUDIO_PDR_DOMAIN_MAX)) {
+		pr_err("%s: Invalid service ID %d\n", __func__, domain_id);
+		return ERR_PTR(-EINVAL);
+	}
+
+	handle = service_notif_register_notifier(
+		audio_pdr_services[domain_id].domain_list[0].name,
+		audio_pdr_services[domain_id].domain_list[0].instance_id,
+		nb, curr_state);
+	if (IS_ERR_OR_NULL(handle)) {
+		pr_err("%s: Failed to register for service %s, instance %d\n",
+			__func__,
+			audio_pdr_services[domain_id].domain_list[0].name,
+			audio_pdr_services[domain_id].domain_list[0].
+			instance_id);
+	}
+	return handle;
+}
+EXPORT_SYMBOL(audio_pdr_service_register);
+
+int audio_pdr_service_deregister(void *service_handle,
+	struct notifier_block *nb)
+{
+	int ret;
+
+	if (service_handle == NULL) {
+		pr_err("%s: service handle is NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = service_notif_unregister_notifier(
+		service_handle, nb);
+	if (IS_ERR_VALUE(ret))
+		pr_err("%s: Failed to deregister service ret %d\n",
+			__func__, ret);
+done:
+	return ret;
+}
+EXPORT_SYMBOL(audio_pdr_service_deregister);
+
+static int __init audio_pdr_subsys_init(void)
+{
+	srcu_init_notifier_head(&audio_pdr_cb_list);
+	return 0;
+}
+subsys_initcall(audio_pdr_subsys_init);
+
+static int __init audio_pdr_late_init(void)
+{
+	int ret;
+
+	ret = get_service_location(
+		audio_pdr_services[AUDIO_PDR_DOMAIN_ADSP].client_name,
+		audio_pdr_services[AUDIO_PDR_DOMAIN_ADSP].service_name,
+		&audio_pdr_locator_nb);
+	if (IS_ERR_VALUE(ret)) {
+		pr_err("%s get_service_location failed ret %d\n",
+			__func__, ret);
+		srcu_notifier_call_chain(&audio_pdr_cb_list,
+					 AUDIO_PDR_FRAMEWORK_DOWN, NULL);
+	}
+
+	return ret;
+}
+late_initcall(audio_pdr_late_init);
+
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2./audio_ssr.c linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2/audio_ssr.c
--- linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2./audio_ssr.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2/audio_ssr.c	2019-01-22 16:16:26.667275058 +0100
@@ -0,0 +1,66 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/qdsp6v2/audio_ssr.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/subsystem_notif.h>
+
+#define SCM_Q6_NMI_CMD 0x1
+
+static char *audio_ssr_domains[] = {
+	"adsp",
+	"modem"
+};
+
+void *audio_ssr_register(int domain_id, struct notifier_block *nb)
+{
+	if ((domain_id < 0) ||
+	    (domain_id >= AUDIO_SSR_DOMAIN_MAX)) {
+		pr_err("%s: Invalid service ID %d\n", __func__, domain_id);
+		return ERR_PTR(-EINVAL);
+	}
+
+	return subsys_notif_register_notifier(
+		audio_ssr_domains[domain_id], nb);
+}
+EXPORT_SYMBOL(audio_ssr_register);
+
+int audio_ssr_deregister(void *handle, struct notifier_block *nb)
+{
+	return subsys_notif_unregister_notifier(handle, nb);
+}
+EXPORT_SYMBOL(audio_ssr_deregister);
+
+void audio_ssr_send_nmi(void *ssr_cb_data)
+{
+	struct notif_data *data = (struct notif_data *)ssr_cb_data;
+	struct scm_desc desc;
+
+	if (data && data->crashed) {
+		/* Send NMI to QDSP6 via an SCM call. */
+		if (!is_scm_armv8()) {
+			scm_call_atomic1(SCM_SVC_UTIL,
+					 SCM_Q6_NMI_CMD, 0x1);
+		} else {
+			desc.args[0] = 0x1;
+			desc.arginfo = SCM_ARGS(1);
+			scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_UTIL,
+					 SCM_Q6_NMI_CMD), &desc);
+		}
+		/* The write should go through before q6 is shutdown */
+		mb();
+		pr_debug("%s: Q6 NMI was sent.\n", __func__);
+	}
+}
+EXPORT_SYMBOL(audio_ssr_send_nmi);
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2./Makefile linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2/Makefile
--- linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2/Makefile	2019-10-29 09:26:24.817214667 +0100
@@ -0,0 +1,12 @@
+obj-$(CONFIG_MSM_QDSP6_APRV2) += apr.o apr_v2.o apr_tal.o voice_svc.o
+obj-$(CONFIG_MSM_QDSP6_APRV3) += apr.o apr_v3.o apr_tal.o voice_svc.o
+obj-$(CONFIG_MSM_QDSP6_APRV2_GLINK) += apr.o apr_v2.o apr_tal_glink.o voice_svc.o
+obj-$(CONFIG_MSM_QDSP6_APRV3_GLINK) += apr.o apr_v3.o apr_tal_glink.o voice_svc.o
+obj-$(CONFIG_MSM_QDSP6_APRV2_VM) += apr_vm.o apr_v2.o voice_svc.o
+obj-$(CONFIG_SND_SOC_MSM_QDSP6V2_INTF) += msm_audio_ion.o
+obj-$(CONFIG_SND_SOC_QDSP6V2_VM) += msm_audio_ion_vm.o
+obj-$(CONFIG_MSM_ADSP_LOADER) += adsp-loader.o
+obj-$(CONFIG_MSM_QDSP6_SSR) += audio_ssr.o
+obj-$(CONFIG_MSM_QDSP6_PDR) += audio_pdr.o
+obj-$(CONFIG_MSM_QDSP6_NOTIFIER) += audio_notifier.o
+obj-$(CONFIG_MSM_CDSP_LOADER) += cdsp-loader.o
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2./msm_audio_ion.c linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c
--- linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2./msm_audio_ion.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c	2019-01-22 16:16:26.667275058 +0100
@@ -0,0 +1,1041 @@
+/*
+ * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-buf.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/qdsp6v2/apr.h>
+#include <linux/of_device.h>
+#include <linux/msm_audio_ion.h>
+#include <linux/export.h>
+#include <linux/qcom_iommu.h>
+#include <asm/dma-iommu.h>
+#include <soc/qcom/secure_buffer.h>
+
+#define MSM_AUDIO_ION_PROBED (1 << 0)
+
+#define MSM_AUDIO_ION_PHYS_ADDR(alloc_data) \
+	alloc_data->table->sgl->dma_address
+
+#define MSM_AUDIO_ION_VA_START 0x10000000
+#define MSM_AUDIO_ION_VA_LEN 0x0FFFFFFF
+
+#define MSM_AUDIO_SMMU_SID_OFFSET 32
+
+struct addr_range {
+	dma_addr_t start;
+	size_t size;
+};
+
+struct context_bank_info {
+	const char *name;
+	struct addr_range addr_range;
+};
+
+struct msm_audio_ion_private {
+	bool smmu_enabled;
+	bool audioheap_enabled;
+	struct device *cb_dev;
+	struct dma_iommu_mapping *mapping;
+	u8 device_status;
+	struct list_head alloc_list;
+	struct mutex list_mutex;
+	u64 smmu_sid_bits;
+	u32 smmu_version;
+};
+
+struct msm_audio_alloc_data {
+	struct ion_client *client;
+	struct ion_handle *handle;
+	size_t len;
+	struct dma_buf *dma_buf;
+	struct dma_buf_attachment *attach;
+	struct sg_table *table;
+	struct list_head list;
+};
+
+static struct msm_audio_ion_private msm_audio_ion_data = {0,};
+
+static int msm_audio_ion_get_phys(struct ion_client *client,
+				  struct ion_handle *handle,
+				  ion_phys_addr_t *addr, size_t *len);
+
+static int msm_audio_dma_buf_map(struct ion_client *client,
+				  struct ion_handle *handle,
+				  ion_phys_addr_t *addr, size_t *len);
+
+static int msm_audio_dma_buf_unmap(struct ion_client *client,
+				   struct ion_handle *handle);
+
+static void msm_audio_ion_add_allocation(
+	struct msm_audio_ion_private *msm_audio_ion_data,
+	struct msm_audio_alloc_data *alloc_data)
+{
+	/*
+	 * Since these APIs can be invoked by multiple
+	 * clients, there is need to make sure the list
+	 * of allocations is always protected
+	 */
+	mutex_lock(&(msm_audio_ion_data->list_mutex));
+	list_add_tail(&(alloc_data->list),
+		      &(msm_audio_ion_data->alloc_list));
+	mutex_unlock(&(msm_audio_ion_data->list_mutex));
+}
+
+int msm_audio_ion_alloc(const char *name, struct ion_client **client,
+			struct ion_handle **handle, size_t bufsz,
+			ion_phys_addr_t *paddr, size_t *pa_len, void **vaddr)
+{
+	int rc = -EINVAL;
+	unsigned long err_ion_ptr = 0;
+
+	if ((msm_audio_ion_data.smmu_enabled == true) &&
+	    !(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
+		pr_debug("%s:probe is not done, deferred\n", __func__);
+		return -EPROBE_DEFER;
+	}
+	if (!name || !client || !handle || !paddr || !vaddr
+		|| !bufsz || !pa_len) {
+		pr_err("%s: Invalid params\n", __func__);
+		return -EINVAL;
+	}
+	*client = msm_audio_ion_client_create(name);
+	if (IS_ERR_OR_NULL((void *)(*client))) {
+		pr_err("%s: ION create client for AUDIO failed\n", __func__);
+		goto err;
+	}
+
+	*handle = ion_alloc(*client, bufsz, SZ_4K,
+			ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+	if (IS_ERR_OR_NULL((void *) (*handle))) {
+		if (msm_audio_ion_data.smmu_enabled == true) {
+			pr_debug("system heap is used");
+			msm_audio_ion_data.audioheap_enabled = 0;
+			*handle = ion_alloc(*client, bufsz, SZ_4K,
+					ION_HEAP(ION_SYSTEM_HEAP_ID), 0);
+		}
+		if (IS_ERR_OR_NULL((void *) (*handle))) {
+			if (IS_ERR((void *)(*handle)))
+				err_ion_ptr = PTR_ERR((int *)(*handle));
+			pr_err("%s:ION alloc fail err ptr=%ld, smmu_enabled=%d\n",
+			__func__, err_ion_ptr, msm_audio_ion_data.smmu_enabled);
+			rc = -ENOMEM;
+			goto err_ion_client;
+		}
+	} else {
+		pr_debug("audio heap is used");
+		msm_audio_ion_data.audioheap_enabled = 1;
+	}
+
+	rc = msm_audio_ion_get_phys(*client, *handle, paddr, pa_len);
+	if (rc) {
+		pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
+			__func__, rc);
+		goto err_ion_handle;
+	}
+
+	*vaddr = ion_map_kernel(*client, *handle);
+	if (IS_ERR_OR_NULL((void *)*vaddr)) {
+		pr_err("%s: ION memory mapping for AUDIO failed\n", __func__);
+		goto err_ion_handle;
+	}
+	pr_debug("%s: mapped address = %pK, size=%zd\n", __func__,
+		*vaddr, bufsz);
+
+	if (bufsz != 0) {
+		pr_debug("%s: memset to 0 %pK %zd\n", __func__, *vaddr, bufsz);
+		memset((void *)*vaddr, 0, bufsz);
+	}
+
+	return rc;
+
+err_ion_handle:
+	ion_free(*client, *handle);
+err_ion_client:
+	msm_audio_ion_client_destroy(*client);
+	*handle = NULL;
+	*client = NULL;
+err:
+	return rc;
+}
+EXPORT_SYMBOL(msm_audio_ion_alloc);
+
+static int msm_audio_hyp_assign(ion_phys_addr_t *paddr, size_t *pa_len,
+				u8 assign_type)
+{
+	int srcVM[1] = {VMID_HLOS};
+	int destVM[1] = {VMID_CP_ADSP_SHARED};
+	int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
+	int ret = 0;
+
+	switch (assign_type) {
+	case HLOS_TO_ADSP:
+		srcVM[0] = VMID_HLOS;
+		destVM[0] = VMID_CP_ADSP_SHARED;
+		break;
+	case ADSP_TO_HLOS:
+		srcVM[0] = VMID_CP_ADSP_SHARED;
+		destVM[0] = VMID_HLOS;
+		break;
+	default:
+		pr_err("%s: Invalid assign type = %d\n", __func__, assign_type);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = hyp_assign_phys(*paddr, *pa_len, srcVM, 1, destVM, destVMperm, 1);
+	if (ret)
+		pr_err("%s: hyp_assign_phys failed for type %d, rc = %d\n",
+			 __func__, assign_type, ret);
+done:
+	return ret;
+}
+
+int msm_audio_ion_phys_free(struct ion_client *client,
+			    struct ion_handle *handle,
+			    ion_phys_addr_t *paddr,
+			    size_t *pa_len, u8 assign_type)
+{
+	int ret;
+
+	if (!(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
+		pr_debug("%s:probe is not done, deferred\n", __func__);
+		return -EPROBE_DEFER;
+	}
+
+	if (!client || !handle || !paddr || !pa_len) {
+		pr_err("%s: Invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	ret = ion_phys(client, handle, paddr, pa_len);
+	if (ret) {
+		pr_err("%s: could not get physical address for handle, ret = %d\n",
+			__func__, ret);
+		goto err_ion_handle;
+	}
+
+	ret = msm_audio_hyp_assign(paddr, pa_len, assign_type);
+
+err_ion_handle:
+	ion_free(client, handle);
+	ion_client_destroy(client);
+
+	return ret;
+}
+
+int msm_audio_ion_phys_assign(const char *name, struct ion_client **client,
+			      struct ion_handle **handle, int fd,
+			      ion_phys_addr_t *paddr,
+			      size_t *pa_len, u8 assign_type)
+{
+	int ret;
+
+	if (!(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
+		pr_debug("%s:probe is not done, deferred\n", __func__);
+		return -EPROBE_DEFER;
+	}
+
+	if (!name || !client || !handle || !paddr || !pa_len) {
+		pr_err("%s: Invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	*client = msm_audio_ion_client_create(name);
+	if (IS_ERR_OR_NULL((void *)(*client))) {
+		pr_err("%s: ION create client failed\n", __func__);
+		return -EINVAL;
+	}
+
+	*handle = ion_import_dma_buf(*client, fd);
+	if (IS_ERR_OR_NULL((void *) (*handle))) {
+		pr_err("%s: ion import dma buffer failed\n",
+			__func__);
+		ret = -EINVAL;
+		goto err_destroy_client;
+	}
+
+	ret = ion_phys(*client, *handle, paddr, pa_len);
+	if (ret) {
+		pr_err("%s: could not get physical address for handle, ret = %d\n",
+			__func__, ret);
+		goto err_ion_handle;
+	}
+
+	ret = msm_audio_hyp_assign(paddr, pa_len, assign_type);
+
+	return ret;
+
+err_ion_handle:
+	ion_free(*client, *handle);
+
+err_destroy_client:
+	ion_client_destroy(*client);
+	*client = NULL;
+	*handle = NULL;
+
+	return ret;
+}
+
+int msm_audio_ion_import(const char *name, struct ion_client **client,
+			struct ion_handle **handle, int fd,
+			unsigned long *ionflag, size_t bufsz,
+			ion_phys_addr_t *paddr, size_t *pa_len, void **vaddr)
+{
+	int rc = 0;
+
+	if ((msm_audio_ion_data.smmu_enabled == true) &&
+	    !(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
+		pr_debug("%s:probe is not done, deferred\n", __func__);
+		return -EPROBE_DEFER;
+	}
+
+	if (!name || !client || !handle || !paddr || !vaddr || !pa_len) {
+		pr_err("%s: Invalid params\n", __func__);
+		rc = -EINVAL;
+		goto err;
+	}
+
+	*client = msm_audio_ion_client_create(name);
+	if (IS_ERR_OR_NULL((void *)(*client))) {
+		pr_err("%s: ION create client for AUDIO failed\n", __func__);
+		rc = -EINVAL;
+		goto err;
+	}
+
+	/* name should be audio_acdb_client or Audio_Dec_Client,
+	bufsz should be 0 and fd shouldn't be 0 as of now
+	*/
+	*handle = ion_import_dma_buf(*client, fd);
+	pr_debug("%s: DMA Buf name=%s, fd=%d handle=%pK\n", __func__,
+							name, fd, *handle);
+	if (IS_ERR_OR_NULL((void *) (*handle))) {
+		pr_err("%s: ion import dma buffer failed\n",
+				__func__);
+		rc = -EINVAL;
+		goto err_destroy_client;
+	}
+
+	if (ionflag != NULL) {
+		rc = ion_handle_get_flags(*client, *handle, ionflag);
+		if (rc) {
+			pr_err("%s: could not get flags for the handle\n",
+				__func__);
+			goto err_ion_handle;
+		}
+	}
+
+	rc = msm_audio_ion_get_phys(*client, *handle, paddr, pa_len);
+	if (rc) {
+		pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
+				__func__, rc);
+		goto err_ion_handle;
+	}
+
+	*vaddr = ion_map_kernel(*client, *handle);
+	if (IS_ERR_OR_NULL((void *)*vaddr)) {
+		pr_err("%s: ION memory mapping for AUDIO failed\n", __func__);
+		rc = -ENOMEM;
+		goto err_ion_handle;
+	}
+	pr_debug("%s: mapped address = %pK, size=%zd\n", __func__,
+		*vaddr, bufsz);
+
+	return 0;
+
+err_ion_handle:
+	ion_free(*client, *handle);
+err_destroy_client:
+	msm_audio_ion_client_destroy(*client);
+	*client = NULL;
+	*handle = NULL;
+err:
+	return rc;
+}
+
+int msm_audio_ion_free(struct ion_client *client, struct ion_handle *handle)
+{
+	if (!client || !handle) {
+		pr_err("%s Invalid params\n", __func__);
+		return -EINVAL;
+	}
+	if (msm_audio_ion_data.smmu_enabled)
+		msm_audio_dma_buf_unmap(client, handle);
+
+	ion_unmap_kernel(client, handle);
+
+	ion_free(client, handle);
+	msm_audio_ion_client_destroy(client);
+	return 0;
+}
+EXPORT_SYMBOL(msm_audio_ion_free);
+
+int msm_audio_ion_mmap(struct audio_buffer *ab,
+		       struct vm_area_struct *vma)
+{
+	struct sg_table *table;
+	unsigned long addr = vma->vm_start;
+	unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
+	struct scatterlist *sg;
+	unsigned int i;
+	struct page *page;
+	int ret;
+
+	pr_debug("%s\n", __func__);
+
+	table = ion_sg_table(ab->client, ab->handle);
+
+	if (IS_ERR(table)) {
+		pr_err("%s: Unable to get sg_table from ion: %ld\n",
+			__func__, PTR_ERR(table));
+		return PTR_ERR(table);
+	} else if (!table) {
+		pr_err("%s: sg_list is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	/* uncached */
+	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+	/* We need to check if a page is associated with this sg list because:
+	 * If the allocation came from a carveout we currently don't have
+	 * pages associated with carved out memory. This might change in the
+	 * future and we can remove this check and the else statement.
+	 */
+	page = sg_page(table->sgl);
+	if (page) {
+		pr_debug("%s: page is NOT null\n", __func__);
+		for_each_sg(table->sgl, sg, table->nents, i) {
+			unsigned long remainder = vma->vm_end - addr;
+			unsigned long len = sg->length;
+
+			page = sg_page(sg);
+
+			if (offset >= len) {
+				offset -= len;
+				continue;
+			} else if (offset) {
+				page += offset / PAGE_SIZE;
+				len -= offset;
+				offset = 0;
+			}
+			len = min(len, remainder);
+			pr_debug("vma=%pK, addr=%x len=%ld vm_start=%x vm_end=%x vm_page_prot=%ld\n",
+				vma, (unsigned int)addr, len,
+				(unsigned int)vma->vm_start,
+				(unsigned int)vma->vm_end,
+				(unsigned long int)vma->vm_page_prot);
+			remap_pfn_range(vma, addr, page_to_pfn(page), len,
+					vma->vm_page_prot);
+			addr += len;
+			if (addr >= vma->vm_end)
+				return 0;
+		}
+	} else {
+		ion_phys_addr_t phys_addr;
+		size_t phys_len;
+		size_t va_len = 0;
+		pr_debug("%s: page is NULL\n", __func__);
+
+		ret = ion_phys(ab->client, ab->handle, &phys_addr, &phys_len);
+		if (ret) {
+			pr_err("%s: Unable to get phys address from ION buffer: %d\n"
+				, __func__ , ret);
+			return ret;
+		}
+		pr_debug("phys=%pKK len=%zd\n", &phys_addr, phys_len);
+		pr_debug("vma=%pK, vm_start=%x vm_end=%x vm_pgoff=%ld vm_page_prot=%ld\n",
+			vma, (unsigned int)vma->vm_start,
+			(unsigned int)vma->vm_end, vma->vm_pgoff,
+			(unsigned long int)vma->vm_page_prot);
+		va_len = vma->vm_end - vma->vm_start;
+		if ((offset > phys_len) || (va_len > phys_len-offset)) {
+			pr_err("wrong offset size %ld, lens= %zd, va_len=%zd\n",
+				offset, phys_len, va_len);
+			return -EINVAL;
+		}
+		ret =  remap_pfn_range(vma, vma->vm_start,
+				__phys_to_pfn(phys_addr) + vma->vm_pgoff,
+				vma->vm_end - vma->vm_start,
+				vma->vm_page_prot);
+	}
+	return 0;
+}
+
+
+bool msm_audio_ion_is_smmu_available(void)
+{
+	return msm_audio_ion_data.smmu_enabled;
+}
+
+/* move to static section again */
+struct ion_client *msm_audio_ion_client_create(const char *name)
+{
+	struct ion_client *pclient = NULL;
+	pclient = msm_ion_client_create(name);
+	return pclient;
+}
+
+
+void msm_audio_ion_client_destroy(struct ion_client *client)
+{
+	pr_debug("%s: client = %pK smmu_enabled = %d\n", __func__,
+		client, msm_audio_ion_data.smmu_enabled);
+
+	ion_client_destroy(client);
+}
+
+int msm_audio_ion_import_legacy(const char *name, struct ion_client *client,
+			struct ion_handle **handle, int fd,
+			unsigned long *ionflag, size_t bufsz,
+			ion_phys_addr_t *paddr, size_t *pa_len, void **vaddr)
+{
+	int rc = 0;
+	if (!name || !client || !handle || !paddr || !vaddr || !pa_len) {
+		pr_err("%s: Invalid params\n", __func__);
+		rc = -EINVAL;
+		goto err;
+	}
+	/* client is already created for legacy and given*/
+	/* name should be audio_acdb_client or Audio_Dec_Client,
+	bufsz should be 0 and fd shouldn't be 0 as of now
+	*/
+	*handle = ion_import_dma_buf(client, fd);
+	pr_debug("%s: DMA Buf name=%s, fd=%d handle=%pK\n", __func__,
+							name, fd, *handle);
+	if (IS_ERR_OR_NULL((void *)(*handle))) {
+		pr_err("%s: ion import dma buffer failed\n",
+			__func__);
+		rc = -EINVAL;
+		goto err;
+	}
+
+	if (ionflag != NULL) {
+		rc = ion_handle_get_flags(client, *handle, ionflag);
+		if (rc) {
+			pr_err("%s: could not get flags for the handle\n",
+							__func__);
+			rc = -EINVAL;
+			goto err_ion_handle;
+		}
+	}
+
+	rc = msm_audio_ion_get_phys(client, *handle, paddr, pa_len);
+	if (rc) {
+		pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
+			__func__, rc);
+		rc = -EINVAL;
+		goto err_ion_handle;
+	}
+
+	/*Need to add condition SMMU enable or not */
+	*vaddr = ion_map_kernel(client, *handle);
+	if (IS_ERR_OR_NULL((void *)*vaddr)) {
+		pr_err("%s: ION memory mapping for AUDIO failed\n", __func__);
+		rc = -EINVAL;
+		goto err_ion_handle;
+	}
+
+	if (bufsz != 0)
+		memset((void *)*vaddr, 0, bufsz);
+
+	return 0;
+
+err_ion_handle:
+	ion_free(client, *handle);
+err:
+	return rc;
+}
+
+int msm_audio_ion_free_legacy(struct ion_client *client,
+			      struct ion_handle *handle)
+{
+	if (msm_audio_ion_data.smmu_enabled)
+		msm_audio_dma_buf_unmap(client, handle);
+
+	ion_unmap_kernel(client, handle);
+
+	ion_free(client, handle);
+	/* no client_destrody in legacy*/
+	return 0;
+}
+
+int msm_audio_ion_cache_operations(struct audio_buffer *abuff, int cache_op)
+{
+	unsigned long ionflag = 0;
+	int rc = 0;
+	int msm_cache_ops = 0;
+
+	if (!abuff) {
+		pr_err("%s: Invalid params: %pK\n", __func__, abuff);
+		return -EINVAL;
+	}
+	rc = ion_handle_get_flags(abuff->client, abuff->handle,
+		&ionflag);
+	if (rc) {
+		pr_err("ion_handle_get_flags failed: %d\n", rc);
+		goto cache_op_failed;
+	}
+
+	/* has to be CACHED */
+	if (ION_IS_CACHED(ionflag)) {
+		/* ION_IOC_INV_CACHES or ION_IOC_CLEAN_CACHES */
+		msm_cache_ops = cache_op;
+		rc = msm_ion_do_cache_op(abuff->client,
+				abuff->handle,
+				(unsigned long *) abuff->data,
+				(unsigned long)abuff->size,
+				msm_cache_ops);
+		if (rc) {
+			pr_err("cache operation failed %d\n", rc);
+			goto cache_op_failed;
+		}
+	}
+cache_op_failed:
+	return rc;
+}
+
+
+static int msm_audio_dma_buf_map(struct ion_client *client,
+		struct ion_handle *handle,
+		ion_phys_addr_t *addr, size_t *len)
+{
+
+	struct msm_audio_alloc_data *alloc_data;
+	struct device *cb_dev;
+	int rc = 0;
+
+	cb_dev = msm_audio_ion_data.cb_dev;
+
+	/* Data required per buffer mapping */
+	alloc_data = kzalloc(sizeof(*alloc_data), GFP_KERNEL);
+	if (!alloc_data) {
+		pr_err("%s: No memory for alloc_data\n", __func__);
+		return -ENOMEM;
+	}
+
+	/* Get the ION handle size */
+	ion_handle_get_size(client, handle, len);
+
+	alloc_data->client = client;
+	alloc_data->handle = handle;
+	alloc_data->len = *len;
+
+	/* Get the dma_buf handle from ion_handle */
+	alloc_data->dma_buf = ion_share_dma_buf(client, handle);
+	if (IS_ERR(alloc_data->dma_buf)) {
+		rc = PTR_ERR(alloc_data->dma_buf);
+		dev_err(cb_dev,
+			"%s: Fail to get dma_buf handle, rc = %d\n",
+			__func__, rc);
+		goto err_dma_buf;
+	}
+
+	/* Attach the dma_buf to context bank device */
+	alloc_data->attach = dma_buf_attach(alloc_data->dma_buf,
+					    cb_dev);
+	if (IS_ERR(alloc_data->attach)) {
+		rc = PTR_ERR(alloc_data->attach);
+		dev_err(cb_dev,
+			"%s: Fail to attach dma_buf to CB, rc = %d\n",
+			__func__, rc);
+		goto err_attach;
+	}
+
+	/*
+	 * Get the scatter-gather list.
+	 * There is no info as this is a write buffer or
+	 * read buffer, hence the request is bi-directional
+	 * to accomodate both read and write mappings.
+	 */
+	alloc_data->table = dma_buf_map_attachment(alloc_data->attach,
+				DMA_BIDIRECTIONAL);
+	if (IS_ERR(alloc_data->table)) {
+		rc = PTR_ERR(alloc_data->table);
+		dev_err(cb_dev,
+			"%s: Fail to map attachment, rc = %d\n",
+			__func__, rc);
+		goto err_map_attach;
+	}
+
+	rc = dma_map_sg(cb_dev, alloc_data->table->sgl,
+			alloc_data->table->nents,
+			DMA_BIDIRECTIONAL);
+	if (rc != alloc_data->table->nents) {
+		dev_err(cb_dev,
+			"%s: Fail to map SG, rc = %d, nents = %d\n",
+			__func__, rc, alloc_data->table->nents);
+		goto err_map_sg;
+	}
+	/* Make sure not to return rc from dma_map_sg, as it can be nonzero */
+	rc = 0;
+
+	/* physical address from mapping */
+	*addr = MSM_AUDIO_ION_PHYS_ADDR(alloc_data);
+
+	msm_audio_ion_add_allocation(&msm_audio_ion_data,
+				     alloc_data);
+	return rc;
+
+err_map_sg:
+	dma_buf_unmap_attachment(alloc_data->attach,
+				 alloc_data->table,
+				 DMA_BIDIRECTIONAL);
+err_map_attach:
+	dma_buf_detach(alloc_data->dma_buf,
+		       alloc_data->attach);
+err_attach:
+	dma_buf_put(alloc_data->dma_buf);
+
+err_dma_buf:
+	kfree(alloc_data);
+
+	return rc;
+}
+
+static int msm_audio_dma_buf_unmap(struct ion_client *client,
+				   struct ion_handle *handle)
+{
+	int rc = 0;
+	struct msm_audio_alloc_data *alloc_data = NULL;
+	struct list_head *ptr, *next;
+	struct device *cb_dev = msm_audio_ion_data.cb_dev;
+	bool found = false;
+
+	/*
+	 * Though list_for_each_safe is delete safe, lock
+	 * should be explicitly acquired to avoid race condition
+	 * on adding elements to the list.
+	 */
+	mutex_lock(&(msm_audio_ion_data.list_mutex));
+	list_for_each_safe(ptr, next,
+			    &(msm_audio_ion_data.alloc_list)) {
+
+		alloc_data = list_entry(ptr, struct msm_audio_alloc_data,
+					list);
+
+		if (alloc_data->handle == handle &&
+		    alloc_data->client == client) {
+			found = true;
+			dma_unmap_sg(cb_dev,
+				    alloc_data->table->sgl,
+				    alloc_data->table->nents,
+				    DMA_BIDIRECTIONAL);
+
+			dma_buf_unmap_attachment(alloc_data->attach,
+						 alloc_data->table,
+						 DMA_BIDIRECTIONAL);
+
+			dma_buf_detach(alloc_data->dma_buf,
+				       alloc_data->attach);
+
+			dma_buf_put(alloc_data->dma_buf);
+
+			list_del(&(alloc_data->list));
+			kfree(alloc_data);
+			break;
+		}
+	}
+	mutex_unlock(&(msm_audio_ion_data.list_mutex));
+
+	if (!found) {
+		dev_err(cb_dev,
+			"%s: cannot find allocation, ion_handle %pK, ion_client %pK",
+			__func__, handle, client);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static int msm_audio_ion_get_phys(struct ion_client *client,
+		struct ion_handle *handle,
+		ion_phys_addr_t *addr, size_t *len)
+{
+	int rc = 0;
+
+	pr_debug("%s: smmu_enabled = %d\n", __func__,
+		msm_audio_ion_data.smmu_enabled);
+
+	if (msm_audio_ion_data.smmu_enabled) {
+		rc = msm_audio_dma_buf_map(client, handle, addr, len);
+		if (rc) {
+			pr_err("%s: failed to map DMA buf, err = %d\n",
+				__func__, rc);
+			goto err;
+		}
+		/* Append the SMMU SID information to the IOVA address */
+		*addr |= msm_audio_ion_data.smmu_sid_bits;
+	} else {
+		rc = ion_phys(client, handle, addr, len);
+	}
+
+	pr_debug("phys=%pK, len=%zd, rc=%d\n", &(*addr), *len, rc);
+err:
+	return rc;
+}
+
+static int msm_audio_smmu_init_legacy(struct device *dev)
+{
+	struct dma_iommu_mapping *mapping;
+	struct device_node *ctx_node = NULL;
+	struct context_bank_info *cb;
+	int ret;
+	u32 read_val[2];
+
+	cb = devm_kzalloc(dev, sizeof(struct context_bank_info), GFP_KERNEL);
+	if (!cb)
+		return -ENOMEM;
+
+	ctx_node = of_parse_phandle(dev->of_node, "iommus", 0);
+	if (!ctx_node) {
+		dev_err(dev, "%s Could not find any iommus for audio\n",
+			__func__);
+		return -EINVAL;
+	}
+	ret = of_property_read_string(ctx_node, "label", &(cb->name));
+	if (ret) {
+		dev_err(dev, "%s Could not find label\n", __func__);
+		return -EINVAL;
+	}
+	pr_debug("label found : %s\n", cb->name);
+	ret = of_property_read_u32_array(ctx_node,
+				"qcom,virtual-addr-pool",
+				read_val, 2);
+	if (ret) {
+		dev_err(dev, "%s Could not read addr pool for group : (%d)\n",
+			__func__, ret);
+		return -EINVAL;
+	}
+	msm_audio_ion_data.cb_dev = msm_iommu_get_ctx(cb->name);
+	if (msm_audio_ion_data.cb_dev == NULL) {
+		dev_err(dev, "%s Could not find IOMMU context\n",
+			__func__);
+		return -EINVAL;
+	}
+	cb->addr_range.start = (dma_addr_t) read_val[0];
+	cb->addr_range.size = (size_t) read_val[1];
+	dev_dbg(dev, "%s Legacy iommu usage\n", __func__);
+	mapping = arm_iommu_create_mapping(
+				msm_iommu_get_bus(msm_audio_ion_data.cb_dev),
+					   cb->addr_range.start,
+					   cb->addr_range.size);
+	if (IS_ERR(mapping))
+		return PTR_ERR(mapping);
+
+	ret = arm_iommu_attach_device(msm_audio_ion_data.cb_dev, mapping);
+	if (ret) {
+		dev_err(dev, "%s: Attach failed, err = %d\n",
+			__func__, ret);
+		goto fail_attach;
+	}
+
+	msm_audio_ion_data.mapping = mapping;
+	INIT_LIST_HEAD(&msm_audio_ion_data.alloc_list);
+	mutex_init(&(msm_audio_ion_data.list_mutex));
+
+	return 0;
+
+fail_attach:
+	arm_iommu_release_mapping(mapping);
+	return ret;
+}
+
+static int msm_audio_smmu_init(struct device *dev)
+{
+	struct dma_iommu_mapping *mapping;
+	int ret;
+
+	mapping = arm_iommu_create_mapping(
+					msm_iommu_get_bus(dev),
+					   MSM_AUDIO_ION_VA_START,
+					   MSM_AUDIO_ION_VA_LEN);
+	if (IS_ERR(mapping))
+		return PTR_ERR(mapping);
+
+	ret = arm_iommu_attach_device(dev, mapping);
+	if (ret) {
+		dev_err(dev, "%s: Attach failed, err = %d\n",
+			__func__, ret);
+		goto fail_attach;
+	}
+
+	msm_audio_ion_data.cb_dev = dev;
+	msm_audio_ion_data.mapping = mapping;
+	INIT_LIST_HEAD(&msm_audio_ion_data.alloc_list);
+	mutex_init(&(msm_audio_ion_data.list_mutex));
+
+	return 0;
+
+fail_attach:
+	arm_iommu_release_mapping(mapping);
+	return ret;
+}
+
+static const struct of_device_id msm_audio_ion_dt_match[] = {
+	{ .compatible = "qcom,msm-audio-ion" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, msm_audio_ion_dt_match);
+
+
+u32 msm_audio_ion_get_smmu_sid_mode32(void)
+{
+	if (msm_audio_ion_data.smmu_enabled)
+		return upper_32_bits(msm_audio_ion_data.smmu_sid_bits);
+	else
+		return 0;
+}
+
+u32 msm_audio_populate_upper_32_bits(ion_phys_addr_t pa)
+{
+	if (sizeof(ion_phys_addr_t) == sizeof(u32))
+		return msm_audio_ion_get_smmu_sid_mode32();
+	else
+		return upper_32_bits(pa);
+}
+
+static int msm_audio_ion_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	const char *msm_audio_ion_dt = "qcom,smmu-enabled";
+	const char *msm_audio_ion_smmu = "qcom,smmu-version";
+	bool smmu_enabled;
+	enum apr_subsys_state q6_state;
+	struct device *dev = &pdev->dev;
+
+	if (dev->of_node == NULL) {
+		dev_err(dev,
+			"%s: device tree is not found\n",
+			__func__);
+		msm_audio_ion_data.smmu_enabled = 0;
+		return 0;
+	}
+
+	smmu_enabled = of_property_read_bool(dev->of_node,
+					     msm_audio_ion_dt);
+	msm_audio_ion_data.smmu_enabled = smmu_enabled;
+
+	if (smmu_enabled) {
+		rc = of_property_read_u32(dev->of_node,
+					msm_audio_ion_smmu,
+					&msm_audio_ion_data.smmu_version);
+		if (rc) {
+			dev_err(dev,
+				"%s: qcom,smmu_version missing in DT node\n",
+				__func__);
+			return rc;
+		}
+		dev_dbg(dev, "%s: SMMU version is (%d)", __func__,
+				msm_audio_ion_data.smmu_version);
+		q6_state = apr_get_q6_state();
+		if (q6_state == APR_SUBSYS_DOWN) {
+			dev_dbg(dev,
+				"defering %s, adsp_state %d\n",
+				__func__, q6_state);
+			return -EPROBE_DEFER;
+		} else {
+			dev_dbg(dev, "%s: adsp is ready\n", __func__);
+		}
+	}
+
+	dev_dbg(dev, "%s: SMMU is %s\n", __func__,
+		(smmu_enabled) ? "Enabled" : "Disabled");
+
+	if (smmu_enabled) {
+		u64 smmu_sid = 0;
+		struct of_phandle_args iommuspec;
+
+		/* Get SMMU SID information from Devicetree */
+		rc = of_parse_phandle_with_args(dev->of_node, "iommus",
+						"#iommu-cells", 0, &iommuspec);
+		if (rc)
+			dev_err(dev, "%s: could not get smmu SID, ret = %d\n",
+				__func__, rc);
+		else
+			smmu_sid = iommuspec.args[0];
+
+		msm_audio_ion_data.smmu_sid_bits =
+			smmu_sid << MSM_AUDIO_SMMU_SID_OFFSET;
+
+		if (msm_audio_ion_data.smmu_version == 0x1) {
+			rc = msm_audio_smmu_init_legacy(dev);
+		} else if (msm_audio_ion_data.smmu_version == 0x2) {
+			rc = msm_audio_smmu_init(dev);
+		} else {
+			dev_err(dev, "%s: smmu version invalid %d\n",
+				__func__, msm_audio_ion_data.smmu_version);
+			rc = -EINVAL;
+		}
+		if (rc)
+			dev_err(dev, "%s: smmu init failed, err = %d\n",
+				__func__, rc);
+	}
+
+	if (!rc)
+		msm_audio_ion_data.device_status |= MSM_AUDIO_ION_PROBED;
+
+	return rc;
+}
+
+static int msm_audio_ion_remove(struct platform_device *pdev)
+{
+	struct dma_iommu_mapping *mapping;
+	struct device *audio_cb_dev;
+
+	mapping = msm_audio_ion_data.mapping;
+	audio_cb_dev = msm_audio_ion_data.cb_dev;
+
+	if (audio_cb_dev && mapping) {
+		arm_iommu_detach_device(audio_cb_dev);
+		arm_iommu_release_mapping(mapping);
+	}
+
+	msm_audio_ion_data.smmu_enabled = 0;
+	msm_audio_ion_data.device_status = 0;
+	return 0;
+}
+
+static struct platform_driver msm_audio_ion_driver = {
+	.driver = {
+		.name = "msm-audio-ion",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_audio_ion_dt_match,
+	},
+	.probe = msm_audio_ion_probe,
+	.remove = msm_audio_ion_remove,
+};
+
+static int __init msm_audio_ion_init(void)
+{
+	return platform_driver_register(&msm_audio_ion_driver);
+}
+module_init(msm_audio_ion_init);
+
+static void __exit msm_audio_ion_exit(void)
+{
+	platform_driver_unregister(&msm_audio_ion_driver);
+}
+module_exit(msm_audio_ion_exit);
+
+MODULE_DESCRIPTION("MSM Audio ION module");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2./voice_svc.c linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2/voice_svc.c
--- linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2./voice_svc.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soc/qcom/qdsp6v2/voice_svc.c	2019-10-29 09:26:24.821214706 +0100
@@ -0,0 +1,874 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/cdev.h>
+#include <linux/qdsp6v2/apr_tal.h>
+#include <linux/qdsp6v2/apr.h>
+#include <sound/voice_svc.h>
+
+#define MINOR_NUMBER 1
+#define APR_MAX_RESPONSE 10
+#define TIMEOUT_MS 1000
+
+#define MAX(a, b) ((a) >= (b) ? (a) : (b))
+
+struct voice_svc_device {
+	struct cdev *cdev;
+	struct device *dev;
+	int major;
+};
+
+struct voice_svc_prvt {
+	void *apr_q6_mvm;
+	void *apr_q6_cvs;
+	uint16_t response_count;
+	struct list_head response_queue;
+	wait_queue_head_t response_wait;
+	spinlock_t response_lock;
+	/*
+	 * This mutex ensures responses are processed in sequential order and
+	 * that no two threads access and free the same response at the same
+	 * time.
+	 */
+	struct mutex response_mutex_lock;
+};
+
+struct apr_data {
+	struct apr_hdr hdr;
+	__u8 payload[0];
+} __packed;
+
+struct apr_response_list {
+	struct list_head list;
+	struct voice_svc_cmd_response resp;
+};
+
+static struct voice_svc_device *voice_svc_dev;
+static struct class *voice_svc_class;
+static bool reg_dummy_sess;
+static void *dummy_q6_mvm;
+static void *dummy_q6_cvs;
+dev_t device_num;
+
+static spinlock_t voicesvc_lock;
+static bool is_released;
+static int voice_svc_dummy_reg(void);
+static int voice_svc_dummy_dereg(void);
+
+static int32_t qdsp_dummy_apr_callback(struct apr_client_data *data,
+					void *priv);
+
+static int32_t qdsp_apr_callback(struct apr_client_data *data, void *priv)
+{
+	struct voice_svc_prvt *prtd;
+	struct apr_response_list *response_list;
+	unsigned long spin_flags;
+
+	if ((data == NULL) || (priv == NULL)) {
+		pr_err("%s: data or priv is NULL\n", __func__);
+
+		return -EINVAL;
+	}
+	spin_lock(&voicesvc_lock);
+	if (is_released) {
+		spin_unlock(&voicesvc_lock);
+		return 0;
+	}
+
+	prtd = (struct voice_svc_prvt *)priv;
+	if (prtd == NULL) {
+		pr_err("%s: private data is NULL\n", __func__);
+		spin_unlock(&voicesvc_lock);
+
+		return -EINVAL;
+	}
+
+	pr_debug("%s: data->opcode %x\n", __func__,
+		 data->opcode);
+
+	if (data->opcode == RESET_EVENTS) {
+		if (data->reset_proc == APR_DEST_QDSP6) {
+			pr_debug("%s: Received ADSP reset event\n", __func__);
+
+			if (prtd->apr_q6_mvm != NULL) {
+				apr_reset(prtd->apr_q6_mvm);
+				prtd->apr_q6_mvm = NULL;
+			}
+
+			if (prtd->apr_q6_cvs != NULL) {
+				apr_reset(prtd->apr_q6_cvs);
+				prtd->apr_q6_cvs = NULL;
+			}
+		} else if (data->reset_proc == APR_DEST_MODEM) {
+			pr_debug("%s: Received Modem reset event\n", __func__);
+		}
+		/* Set the remaining member variables to default values
+			for RESET_EVENTS */
+		data->payload_size = 0;
+		data->payload = NULL;
+		data->src_port = 0;
+		data->dest_port = 0;
+		data->token = 0;
+	}
+
+	spin_lock_irqsave(&prtd->response_lock, spin_flags);
+
+	if (prtd->response_count < APR_MAX_RESPONSE) {
+		response_list = kmalloc(sizeof(struct apr_response_list) +
+					data->payload_size, GFP_ATOMIC);
+		if (response_list == NULL) {
+			pr_err("%s: kmalloc failed\n", __func__);
+
+			spin_unlock_irqrestore(&prtd->response_lock,
+					       spin_flags);
+			spin_unlock(&voicesvc_lock);
+			return -ENOMEM;
+		}
+
+		response_list->resp.src_port = data->src_port;
+
+		/* Reverting the bit manipulation done in voice_svc_update_hdr
+		 * to the src_port which is returned to us as dest_port.
+		 */
+		response_list->resp.dest_port = ((data->dest_port) >> 8);
+		response_list->resp.token = data->token;
+		response_list->resp.opcode = data->opcode;
+		response_list->resp.payload_size = data->payload_size;
+		if (data->payload != NULL && data->payload_size > 0) {
+			memcpy(response_list->resp.payload, data->payload,
+			       data->payload_size);
+		}
+
+		list_add_tail(&response_list->list, &prtd->response_queue);
+		prtd->response_count++;
+		spin_unlock_irqrestore(&prtd->response_lock, spin_flags);
+
+		wake_up(&prtd->response_wait);
+	} else {
+		spin_unlock_irqrestore(&prtd->response_lock, spin_flags);
+		pr_err("%s: Response dropped since the queue is full\n",
+		       __func__);
+	}
+
+	spin_unlock(&voicesvc_lock);
+	return 0;
+}
+
+static int32_t qdsp_dummy_apr_callback(struct apr_client_data *data, void *priv)
+{
+	/* Do Nothing */
+	return 0;
+}
+
+static void voice_svc_update_hdr(struct voice_svc_cmd_request *apr_req_data,
+				 struct apr_data *aprdata)
+{
+
+	aprdata->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				       APR_HDR_LEN(sizeof(struct apr_hdr)),
+				       APR_PKT_VER);
+	/* Bit manipulation is done on src_port so that a unique ID is sent.
+	 * This manipulation can be used in the future where the same service
+	 * is tried to open multiple times with the same src_port. At that
+	 * time 0x0001 can be replaced with other values depending on the
+	 * count.
+	 */
+	aprdata->hdr.src_port = ((apr_req_data->src_port) << 8 | 0x0001);
+	aprdata->hdr.dest_port = apr_req_data->dest_port;
+	aprdata->hdr.token = apr_req_data->token;
+	aprdata->hdr.opcode = apr_req_data->opcode;
+	aprdata->hdr.pkt_size  = APR_PKT_SIZE(APR_HDR_SIZE,
+					apr_req_data->payload_size);
+	memcpy(aprdata->payload, apr_req_data->payload,
+	       apr_req_data->payload_size);
+}
+
+static int voice_svc_send_req(struct voice_svc_cmd_request *apr_request,
+			      struct voice_svc_prvt *prtd)
+{
+	int ret = 0;
+	void *apr_handle = NULL;
+	struct apr_data *aprdata = NULL;
+	uint32_t user_payload_size;
+	uint32_t payload_size;
+
+	pr_debug("%s\n", __func__);
+
+	if (apr_request == NULL) {
+		pr_err("%s: apr_request is NULL\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	user_payload_size = apr_request->payload_size;
+	payload_size = sizeof(struct apr_data) + user_payload_size;
+
+	if (payload_size <= user_payload_size) {
+		pr_err("%s: invalid payload size ( 0x%x ).\n",
+			__func__, user_payload_size);
+		ret = -EINVAL;
+		goto done;
+	} else {
+		aprdata = kmalloc(payload_size, GFP_KERNEL);
+		if (aprdata == NULL) {
+			ret = -ENOMEM;
+			goto done;
+		}
+	}
+
+	voice_svc_update_hdr(apr_request, aprdata);
+
+	if (!strcmp(apr_request->svc_name, VOICE_SVC_CVS_STR)) {
+		apr_handle = prtd->apr_q6_cvs;
+	} else if (!strcmp(apr_request->svc_name, VOICE_SVC_MVM_STR)) {
+		apr_handle = prtd->apr_q6_mvm;
+	} else {
+		pr_err("%s: Invalid service %.*s\n", __func__,
+			MAX_APR_SERVICE_NAME_LEN, apr_request->svc_name);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = apr_send_pkt(apr_handle, (uint32_t *)aprdata);
+
+	if (ret < 0) {
+		pr_err("%s: Fail in sending request %d\n",
+			__func__, ret);
+		ret = -EINVAL;
+	} else {
+		pr_debug("%s: apr packet sent successfully %d\n",
+			 __func__, ret);
+		ret = 0;
+	}
+
+done:
+	kfree(aprdata);
+	return ret;
+}
+static int voice_svc_reg(char *svc, uint32_t src_port,
+			 struct voice_svc_prvt *prtd, void **handle)
+{
+	int ret = 0;
+
+	pr_debug("%s\n", __func__);
+
+	if (handle == NULL) {
+		pr_err("%s: handle is NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (*handle != NULL) {
+		pr_err("%s: svc handle not NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (src_port == (APR_MAX_PORTS - 1)) {
+		pr_err("%s: SRC port reserved for dummy session\n", __func__);
+		pr_err("%s: Unable to register %s\n", __func__, svc);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	*handle = apr_register("ADSP",
+			       svc, qdsp_apr_callback,
+			       ((src_port) << 8 | 0x0001),
+			       prtd);
+
+	if (*handle == NULL) {
+		pr_err("%s: Unable to register %s\n",
+		       __func__, svc);
+
+		ret = -EFAULT;
+		goto done;
+	}
+	pr_debug("%s: Register %s successful\n",
+		__func__, svc);
+done:
+	return ret;
+}
+
+static int voice_svc_dereg(char *svc, void **handle)
+{
+	int ret = 0;
+
+	pr_debug("%s\n", __func__);
+
+	if (handle == NULL) {
+		pr_err("%s: handle is NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (*handle == NULL) {
+		pr_err("%s: svc handle is NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = apr_deregister(*handle);
+	if (ret) {
+		pr_err("%s: Unable to deregister service %s; error: %d\n",
+		       __func__, svc, ret);
+
+		goto done;
+	}
+	*handle = NULL;
+	pr_debug("%s: deregister %s successful\n", __func__, svc);
+
+done:
+	return ret;
+}
+
+static int process_reg_cmd(struct voice_svc_register *apr_reg_svc,
+			   struct voice_svc_prvt *prtd)
+{
+	int ret = 0;
+	char *svc = NULL;
+	void **handle = NULL;
+
+	pr_debug("%s\n", __func__);
+
+	if (!strcmp(apr_reg_svc->svc_name, VOICE_SVC_MVM_STR)) {
+		svc = VOICE_SVC_MVM_STR;
+		handle = &prtd->apr_q6_mvm;
+	} else if (!strcmp(apr_reg_svc->svc_name, VOICE_SVC_CVS_STR)) {
+		svc = VOICE_SVC_CVS_STR;
+		handle = &prtd->apr_q6_cvs;
+	} else {
+		pr_err("%s: Invalid Service: %.*s\n", __func__,
+			MAX_APR_SERVICE_NAME_LEN, apr_reg_svc->svc_name);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (apr_reg_svc->reg_flag) {
+		ret = voice_svc_reg(svc, apr_reg_svc->src_port, prtd,
+				    handle);
+	} else if (!apr_reg_svc->reg_flag) {
+		ret = voice_svc_dereg(svc, handle);
+	}
+
+done:
+	return ret;
+}
+
+static ssize_t voice_svc_write(struct file *file, const char __user *buf,
+			       size_t count, loff_t *ppos)
+{
+	int ret = 0;
+	struct voice_svc_prvt *prtd;
+	struct voice_svc_write_msg *data = NULL;
+	uint32_t cmd;
+	struct voice_svc_register *register_data = NULL;
+	struct voice_svc_cmd_request *request_data = NULL;
+	uint32_t request_payload_size;
+
+	pr_debug("%s\n", __func__);
+
+	/*
+	 * Check if enough memory is allocated to parse the message type.
+	 * Will check there is enough to hold the payload later.
+	 */
+	if (count >= sizeof(struct voice_svc_write_msg)) {
+		data = kmalloc(count, GFP_KERNEL);
+	} else {
+		pr_debug("%s: invalid data size\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (data == NULL) {
+		pr_err("%s: data kmalloc failed.\n", __func__);
+
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	ret = copy_from_user(data, buf, count);
+	if (ret) {
+		pr_err("%s: copy_from_user failed %d\n", __func__, ret);
+
+		ret = -EPERM;
+		goto done;
+	}
+
+	cmd = data->msg_type;
+	prtd = (struct voice_svc_prvt *) file->private_data;
+	if (prtd == NULL) {
+		pr_err("%s: prtd is NULL\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	switch (cmd) {
+	case MSG_REGISTER:
+		/*
+		 * Check that count reflects the expected size to ensure
+		 * sufficient memory was allocated. Since voice_svc_register
+		 * has a static size, this should be exact.
+		 */
+		if (count == (sizeof(struct voice_svc_write_msg) +
+			      sizeof(struct voice_svc_register))) {
+			register_data =
+				(struct voice_svc_register *)data->payload;
+			if (register_data == NULL) {
+				pr_err("%s: register data is NULL", __func__);
+				ret = -EINVAL;
+				goto done;
+			}
+			ret = process_reg_cmd(register_data, prtd);
+			if (!ret)
+				ret = count;
+		} else {
+			pr_err("%s: invalid data payload size for register command\n",
+				__func__);
+			ret = -EINVAL;
+			goto done;
+		}
+		break;
+	case MSG_REQUEST:
+		/*
+		 * Check that count reflects the expected size to ensure
+		 * sufficient memory was allocated. Since voice_svc_cmd_request
+		 * has a variable size, check the minimum value count must be to
+		 * parse the message request then check the minimum size to hold
+		 * the payload of the message request.
+		 */
+		if (count >= (sizeof(struct voice_svc_write_msg) +
+			      sizeof(struct voice_svc_cmd_request))) {
+			request_data =
+				(struct voice_svc_cmd_request *)data->payload;
+			if (request_data == NULL) {
+				pr_err("%s: request data is NULL", __func__);
+				ret = -EINVAL;
+				goto done;
+			}
+
+			request_payload_size = request_data->payload_size;
+
+			if (count >= (sizeof(struct voice_svc_write_msg) +
+				      sizeof(struct voice_svc_cmd_request) +
+				      request_payload_size)) {
+				ret = voice_svc_send_req(request_data, prtd);
+				if (!ret)
+					ret = count;
+			} else {
+				pr_err("%s: invalid request payload size\n",
+					__func__);
+				ret = -EINVAL;
+				goto done;
+			}
+		} else {
+			pr_err("%s: invalid data payload size for request command\n",
+				__func__);
+			ret = -EINVAL;
+			goto done;
+		}
+		break;
+	default:
+		pr_debug("%s: Invalid command: %u\n", __func__, cmd);
+		ret = -EINVAL;
+	}
+
+done:
+	kfree(data);
+	return ret;
+}
+
+static ssize_t voice_svc_read(struct file *file, char __user *arg,
+			      size_t count, loff_t *ppos)
+{
+	int ret = 0;
+	struct voice_svc_prvt *prtd;
+	struct apr_response_list *resp;
+	unsigned long spin_flags;
+	int size;
+
+	pr_debug("%s\n", __func__);
+
+	prtd = (struct voice_svc_prvt *)file->private_data;
+	if (prtd == NULL) {
+		pr_err("%s: prtd is NULL\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	mutex_lock(&prtd->response_mutex_lock);
+	spin_lock_irqsave(&prtd->response_lock, spin_flags);
+
+	if (list_empty(&prtd->response_queue)) {
+		spin_unlock_irqrestore(&prtd->response_lock, spin_flags);
+		pr_debug("%s: wait for a response\n", __func__);
+
+		ret = wait_event_interruptible_timeout(prtd->response_wait,
+					!list_empty(&prtd->response_queue),
+					msecs_to_jiffies(TIMEOUT_MS));
+		if (ret == 0) {
+			pr_debug("%s: Read timeout\n", __func__);
+
+			ret = -ETIMEDOUT;
+			goto unlock;
+		} else if (ret > 0 && !list_empty(&prtd->response_queue)) {
+			pr_debug("%s: Interrupt recieved for response\n",
+				 __func__);
+		} else if (ret < 0) {
+			pr_debug("%s: Interrupted by SIGNAL %d\n",
+				 __func__, ret);
+
+			goto unlock;
+		}
+
+		spin_lock_irqsave(&prtd->response_lock, spin_flags);
+	}
+
+	resp = list_first_entry(&prtd->response_queue,
+				struct apr_response_list, list);
+
+	spin_unlock_irqrestore(&prtd->response_lock, spin_flags);
+
+	size = resp->resp.payload_size +
+	       sizeof(struct voice_svc_cmd_response);
+
+	if (count < size) {
+		pr_err("%s: Invalid payload size %zd, %d\n",
+		       __func__, count, size);
+
+		ret = -ENOMEM;
+		goto unlock;
+	}
+
+	if (!access_ok(VERIFY_WRITE, arg, size)) {
+		pr_err("%s: Access denied to write\n",
+		       __func__);
+
+		ret = -EPERM;
+		goto unlock;
+	}
+
+	ret = copy_to_user(arg, &resp->resp,
+			 sizeof(struct voice_svc_cmd_response) +
+			 resp->resp.payload_size);
+	if (ret) {
+		pr_err("%s: copy_to_user failed %d\n", __func__, ret);
+
+		ret = -EPERM;
+		goto unlock;
+	}
+
+	spin_lock_irqsave(&prtd->response_lock, spin_flags);
+
+	list_del(&resp->list);
+	prtd->response_count--;
+	kfree(resp);
+
+	spin_unlock_irqrestore(&prtd->response_lock,
+				spin_flags);
+
+	ret = count;
+
+unlock:
+	mutex_unlock(&prtd->response_mutex_lock);
+done:
+	return ret;
+}
+
+static int voice_svc_dummy_reg()
+{
+	uint32_t src_port = APR_MAX_PORTS - 1;
+
+	pr_debug("%s\n", __func__);
+	dummy_q6_mvm = apr_register("ADSP", "MVM",
+				qdsp_dummy_apr_callback,
+				src_port,
+				NULL);
+	if (dummy_q6_mvm == NULL) {
+		pr_err("%s: Unable to register dummy MVM\n", __func__);
+		goto err;
+	}
+
+	dummy_q6_cvs = apr_register("ADSP", "CVS",
+				qdsp_dummy_apr_callback,
+				src_port,
+				NULL);
+	if (dummy_q6_cvs == NULL) {
+		pr_err("%s: Unable to register dummy CVS\n", __func__);
+		goto err;
+	}
+	return 0;
+err:
+	if (dummy_q6_mvm != NULL) {
+		apr_deregister(dummy_q6_mvm);
+		dummy_q6_mvm = NULL;
+	}
+	return -EINVAL;
+}
+
+static int voice_svc_dummy_dereg(void)
+{
+	pr_debug("%s\n", __func__);
+	if (dummy_q6_mvm != NULL) {
+		apr_deregister(dummy_q6_mvm);
+		dummy_q6_mvm = NULL;
+	}
+
+	if (dummy_q6_cvs != NULL) {
+		apr_deregister(dummy_q6_cvs);
+		dummy_q6_cvs = NULL;
+	}
+	return 0;
+}
+
+static int voice_svc_open(struct inode *inode, struct file *file)
+{
+	struct voice_svc_prvt *prtd = NULL;
+
+	pr_debug("%s\n", __func__);
+
+	prtd = kmalloc(sizeof(struct voice_svc_prvt), GFP_KERNEL);
+
+	if (prtd == NULL) {
+		pr_err("%s: kmalloc failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	memset(prtd, 0, sizeof(struct voice_svc_prvt));
+	prtd->apr_q6_cvs = NULL;
+	prtd->apr_q6_mvm = NULL;
+	prtd->response_count = 0;
+	INIT_LIST_HEAD(&prtd->response_queue);
+	init_waitqueue_head(&prtd->response_wait);
+	spin_lock_init(&prtd->response_lock);
+	mutex_init(&prtd->response_mutex_lock);
+	file->private_data = (void *)prtd;
+
+	is_released = 0;
+	/* Current APR implementation doesn't support session based
+	 * multiple service registrations. The apr_deregister()
+	 * function sets the destination and client IDs to zero, if
+	 * deregister is called for a single service instance.
+	 * To avoid this, register for additional services.
+	 */
+	if (!reg_dummy_sess) {
+		voice_svc_dummy_reg();
+		reg_dummy_sess = 1;
+	}
+	return 0;
+}
+
+static int voice_svc_release(struct inode *inode, struct file *file)
+{
+	int ret = 0;
+	struct apr_response_list *resp = NULL;
+	unsigned long spin_flags;
+	struct voice_svc_prvt *prtd = NULL;
+	char *svc_name = NULL;
+	void **handle = NULL;
+
+	pr_debug("%s\n", __func__);
+
+	prtd = (struct voice_svc_prvt *)file->private_data;
+	if (prtd == NULL) {
+		pr_err("%s: prtd is NULL\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	mutex_lock(&prtd->response_mutex_lock);
+	if (reg_dummy_sess) {
+		voice_svc_dummy_dereg();
+		reg_dummy_sess = 0;
+	}
+	if (prtd->apr_q6_cvs != NULL) {
+		svc_name = VOICE_SVC_MVM_STR;
+		handle = &prtd->apr_q6_cvs;
+		ret = voice_svc_dereg(svc_name, handle);
+		if (ret)
+			pr_err("%s: Failed to dereg CVS %d\n", __func__, ret);
+	}
+
+	if (prtd->apr_q6_mvm != NULL) {
+		svc_name = VOICE_SVC_MVM_STR;
+		handle = &prtd->apr_q6_mvm;
+		ret = voice_svc_dereg(svc_name, handle);
+		if (ret)
+			pr_err("%s: Failed to dereg MVM %d\n", __func__, ret);
+	}
+
+	spin_lock_irqsave(&prtd->response_lock, spin_flags);
+
+	while (!list_empty(&prtd->response_queue)) {
+		pr_debug("%s: Remove item from response queue\n", __func__);
+
+		resp = list_first_entry(&prtd->response_queue,
+					struct apr_response_list, list);
+		list_del(&resp->list);
+		prtd->response_count--;
+		kfree(resp);
+	}
+
+	spin_unlock_irqrestore(&prtd->response_lock, spin_flags);
+	mutex_unlock(&prtd->response_mutex_lock);
+
+	mutex_destroy(&prtd->response_mutex_lock);
+
+	spin_lock(&voicesvc_lock);
+	kfree(file->private_data);
+	file->private_data = NULL;
+	is_released = 1;
+	spin_unlock(&voicesvc_lock);
+done:
+	return ret;
+}
+
+static const struct file_operations voice_svc_fops = {
+	.owner =                THIS_MODULE,
+	.open =                 voice_svc_open,
+	.read =                 voice_svc_read,
+	.write =                voice_svc_write,
+	.release =              voice_svc_release,
+};
+
+
+static int voice_svc_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	pr_debug("%s\n", __func__);
+
+	voice_svc_dev = devm_kzalloc(&pdev->dev,
+				  sizeof(struct voice_svc_device), GFP_KERNEL);
+	if (!voice_svc_dev) {
+		pr_err("%s: kzalloc failed\n", __func__);
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	ret = alloc_chrdev_region(&device_num, 0, MINOR_NUMBER,
+				  VOICE_SVC_DRIVER_NAME);
+	if (ret) {
+		pr_err("%s: Failed to alloc chrdev\n", __func__);
+		ret = -ENODEV;
+		goto done;
+	}
+
+	voice_svc_dev->major = MAJOR(device_num);
+	voice_svc_class = class_create(THIS_MODULE, VOICE_SVC_DRIVER_NAME);
+	if (IS_ERR(voice_svc_class)) {
+		ret = PTR_ERR(voice_svc_class);
+		pr_err("%s: Failed to create class; err = %d\n", __func__,
+			ret);
+		goto class_err;
+	}
+
+	voice_svc_dev->dev = device_create(voice_svc_class, NULL, device_num,
+					   NULL, VOICE_SVC_DRIVER_NAME);
+	if (IS_ERR(voice_svc_dev->dev)) {
+		ret = PTR_ERR(voice_svc_dev->dev);
+		pr_err("%s: Failed to create device; err = %d\n", __func__,
+			ret);
+		goto dev_err;
+	}
+
+	voice_svc_dev->cdev = cdev_alloc();
+	if (!voice_svc_dev->cdev) {
+		pr_err("%s: Failed to alloc cdev\n", __func__);
+		ret = -ENOMEM;
+		goto cdev_alloc_err;
+	}
+
+	cdev_init(voice_svc_dev->cdev, &voice_svc_fops);
+	ret = cdev_add(voice_svc_dev->cdev, device_num, MINOR_NUMBER);
+	if (ret) {
+		pr_err("%s: Failed to register chrdev; err = %d\n", __func__,
+			ret);
+		goto add_err;
+	}
+	pr_debug("%s: Device created\n", __func__);
+	spin_lock_init(&voicesvc_lock);
+	goto done;
+
+add_err:
+	cdev_del(voice_svc_dev->cdev);
+cdev_alloc_err:
+	device_destroy(voice_svc_class, device_num);
+dev_err:
+	class_destroy(voice_svc_class);
+class_err:
+	unregister_chrdev_region(0, MINOR_NUMBER);
+done:
+	return ret;
+}
+
+static int voice_svc_remove(struct platform_device *pdev)
+{
+	pr_debug("%s\n", __func__);
+
+	cdev_del(voice_svc_dev->cdev);
+	kfree(voice_svc_dev->cdev);
+	device_destroy(voice_svc_class, device_num);
+	class_destroy(voice_svc_class);
+	unregister_chrdev_region(0, MINOR_NUMBER);
+
+	return 0;
+}
+
+static struct of_device_id voice_svc_of_match[] = {
+	{.compatible = "qcom,msm-voice-svc"},
+	{ }
+};
+MODULE_DEVICE_TABLE(of, voice_svc_of_match);
+
+static struct platform_driver voice_svc_driver = {
+	.probe          = voice_svc_probe,
+	.remove         = voice_svc_remove,
+	.driver         = {
+		.name   = "msm-voice-svc",
+		.owner  = THIS_MODULE,
+		.of_match_table = voice_svc_of_match,
+	},
+};
+
+static int __init voice_svc_init(void)
+{
+	pr_debug("%s\n", __func__);
+
+	return platform_driver_register(&voice_svc_driver);
+}
+
+static void __exit voice_svc_exit(void)
+{
+	pr_debug("%s\n", __func__);
+
+	platform_driver_unregister(&voice_svc_driver);
+}
+
+module_init(voice_svc_init);
+module_exit(voice_svc_exit);
+
+MODULE_DESCRIPTION("Soc QDSP6v2 Voice Service driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/qmi_interface.c	2019-10-29 09:26:24.821214706 +0100
@@ -0,0 +1,2255 @@
+/* Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/socket.h>
+#include <linux/gfp.h>
+#include <linux/qmi_encdec.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/hashtable.h>
+#include <linux/ipc_router.h>
+#include <linux/ipc_logging.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+
+#include "qmi_interface_priv.h"
+
+#define BUILD_INSTANCE_ID(vers, ins) (((vers) & 0xFF) | (((ins) & 0xFF) << 8))
+#define LOOKUP_MASK 0xFFFFFFFF
+#define MAX_WQ_NAME_LEN 20
+#define QMI_REQ_RESP_LOG_PAGES 3
+#define QMI_IND_LOG_PAGES 2
+#define QMI_REQ_RESP_LOG(buf...) \
+do { \
+	if (qmi_req_resp_log_ctx) { \
+		ipc_log_string(qmi_req_resp_log_ctx, buf); \
+	} \
+} while (0) \
+
+#define QMI_IND_LOG(buf...) \
+do { \
+	if (qmi_ind_log_ctx) { \
+		ipc_log_string(qmi_ind_log_ctx, buf); \
+	} \
+} while (0) \
+
+static LIST_HEAD(svc_event_nb_list);
+static DEFINE_MUTEX(svc_event_nb_list_lock);
+
+struct qmi_notify_event_work {
+	unsigned event;
+	void *oob_data;
+	size_t oob_data_len;
+	void *priv;
+	struct work_struct work;
+};
+static void qmi_notify_event_worker(struct work_struct *work);
+
+#define HANDLE_HASH_TBL_SZ 1
+static DEFINE_HASHTABLE(handle_hash_tbl, HANDLE_HASH_TBL_SZ);
+static DEFINE_MUTEX(handle_hash_tbl_lock);
+
+struct elem_info qmi_response_type_v01_ei[] = {
+	{
+		.data_type	= QMI_SIGNED_2_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint16_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct qmi_response_type_v01,
+					   result),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type      = QMI_SIGNED_2_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+		.offset         = offsetof(struct qmi_response_type_v01,
+					   error),
+		.ei_array       = NULL,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.elem_len	= 0,
+		.elem_size	= 0,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= 0,
+		.ei_array	= NULL,
+	},
+};
+EXPORT_SYMBOL(qmi_response_type_v01_ei);
+
+struct elem_info qmi_error_resp_type_v01_ei[] = {
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len  = 1,
+		.elem_size = sizeof(struct qmi_response_type_v01),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x02,
+		.offset    = 0,
+		.ei_array  = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type = QMI_EOTI,
+		.elem_len  = 0,
+		.elem_size = 0,
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x00,
+		.offset    = 0,
+		.ei_array  = NULL,
+	},
+};
+
+struct msg_desc err_resp_desc = {
+	.max_msg_len = 7,
+	.msg_id = 0,
+	.ei_array = qmi_error_resp_type_v01_ei,
+};
+
+static DEFINE_MUTEX(qmi_svc_event_notifier_lock);
+static struct msm_ipc_port *qmi_svc_event_notifier_port;
+static struct workqueue_struct *qmi_svc_event_notifier_wq;
+static void qmi_svc_event_notifier_init(void);
+static void qmi_svc_event_worker(struct work_struct *work);
+static struct svc_event_nb *find_svc_event_nb(uint32_t service_id,
+					      uint32_t instance_id);
+DECLARE_WORK(qmi_svc_event_work, qmi_svc_event_worker);
+static void svc_resume_tx_worker(struct work_struct *work);
+static void clean_txn_info(struct qmi_handle *handle);
+static void *qmi_req_resp_log_ctx;
+static void *qmi_ind_log_ctx;
+
+/**
+ * qmi_log() - Pass log data to IPC logging framework
+ * @handle:	The pointer to the qmi_handle
+ * @cntl_flg:	Indicates the type(request/response/indications) of the message
+ * @txn_id:	Transaction ID of the message.
+ * @msg_id:	Message ID of the incoming/outgoing message.
+ * @msg_len:	Total size of the message.
+ *
+ * This function builds the data the would be passed on to the IPC logging
+ * framework. The data that would be passed corresponds to the information
+ * that is exchanged between the IPC Router and kernel modules during
+ * request/response/indication transactions.
+ */
+
+static void qmi_log(struct qmi_handle *handle,
+			unsigned char cntl_flag, uint16_t txn_id,
+			uint16_t msg_id, uint16_t msg_len)
+{
+	uint32_t service_id = 0;
+	const char *ops_type = NULL;
+
+	if (handle->handle_type == QMI_CLIENT_HANDLE) {
+		service_id = handle->dest_service_id;
+		if (cntl_flag == QMI_REQUEST_CONTROL_FLAG)
+			ops_type = "TX";
+		else if (cntl_flag == QMI_INDICATION_CONTROL_FLAG ||
+			cntl_flag == QMI_RESPONSE_CONTROL_FLAG)
+			ops_type = "RX";
+	} else if (handle->handle_type == QMI_SERVICE_HANDLE) {
+		service_id = handle->svc_ops_options->service_id;
+		if (cntl_flag == QMI_REQUEST_CONTROL_FLAG)
+			ops_type = "RX";
+		else if (cntl_flag == QMI_INDICATION_CONTROL_FLAG ||
+			cntl_flag == QMI_RESPONSE_CONTROL_FLAG)
+			ops_type = "TX";
+	}
+
+	/*
+	 * IPC Logging format is as below:-
+	 * <Type of module>(CLNT or  SERV)	:
+	 * <Opertaion Type> (Transmit/ RECV)	:
+	 * <Control Flag> (Req/Resp/Ind)	:
+	 * <Transaction ID>			:
+	 * <Message ID>				:
+	 * <Message Length>			:
+	 * <Service ID>				:
+	 */
+	if (qmi_req_resp_log_ctx &&
+		((cntl_flag == QMI_REQUEST_CONTROL_FLAG) ||
+		(cntl_flag == QMI_RESPONSE_CONTROL_FLAG))) {
+		QMI_REQ_RESP_LOG("%s %s CF:%x TI:%x MI:%x ML:%x SvcId: %x",
+		(handle->handle_type == QMI_CLIENT_HANDLE ? "QCCI" : "QCSI"),
+		ops_type, cntl_flag, txn_id, msg_id, msg_len, service_id);
+	} else if (qmi_ind_log_ctx &&
+		(cntl_flag == QMI_INDICATION_CONTROL_FLAG)) {
+		QMI_IND_LOG("%s %s CF:%x TI:%x MI:%x ML:%x SvcId: %x",
+		(handle->handle_type == QMI_CLIENT_HANDLE ? "QCCI" : "QCSI"),
+		ops_type, cntl_flag, txn_id, msg_id, msg_len, service_id);
+	}
+}
+
+/**
+ * add_req_handle() - Create and Add a request handle to the connection
+ * @conn_h: Connection handle over which the request has arrived.
+ * @msg_id: Message ID of the request.
+ * @txn_id: Transaction ID of the request.
+ *
+ * @return: Pointer to request handle on success, NULL on error.
+ *
+ * This function creates a request handle to track the request that arrives
+ * on a connection. This function then adds it to the connection's request
+ * handle list.
+ */
+static struct req_handle *add_req_handle(struct qmi_svc_clnt_conn *conn_h,
+					 uint16_t msg_id, uint16_t txn_id)
+{
+	struct req_handle *req_h;
+
+	req_h = kmalloc(sizeof(struct req_handle), GFP_KERNEL);
+	if (!req_h) {
+		pr_err("%s: Error allocating req_h\n", __func__);
+		return NULL;
+	}
+
+	req_h->conn_h = conn_h;
+	req_h->msg_id = msg_id;
+	req_h->txn_id = txn_id;
+	list_add_tail(&req_h->list, &conn_h->req_handle_list);
+	return req_h;
+}
+
+/**
+ * verify_req_handle() - Verify the validity of a request handle
+ * @conn_h: Connection handle over which the request has arrived.
+ * @req_h: Request handle to be verified.
+ *
+ * @return: true on success, false on failure.
+ *
+ * This function is used to check if the request handle is present in
+ * the connection handle.
+ */
+static bool verify_req_handle(struct qmi_svc_clnt_conn *conn_h,
+			      struct req_handle *req_h)
+{
+	struct req_handle *temp_req_h;
+
+	list_for_each_entry(temp_req_h, &conn_h->req_handle_list, list) {
+		if (temp_req_h == req_h)
+			return true;
+	}
+	return false;
+}
+
+/**
+ * rmv_req_handle() - Remove and destroy the request handle
+ * @req_h: Request handle to be removed and destroyed.
+ *
+ * @return: 0.
+ */
+static int rmv_req_handle(struct req_handle *req_h)
+{
+	list_del(&req_h->list);
+	kfree(req_h);
+	return 0;
+}
+
+/**
+ * add_svc_clnt_conn() - Create and add a connection handle to a service
+ * @handle: QMI handle in which the service is hosted.
+ * @clnt_addr: Address of the client connecting with the service.
+ * @clnt_addr_len: Length of the client address.
+ *
+ * @return: Pointer to connection handle on success, NULL on error.
+ *
+ * This function is used to create a connection handle that binds the service
+ * with a client. This function is called on a service's QMI handle when a
+ * client sends its first message to the service.
+ *
+ * This function must be called with handle->handle_lock locked.
+ */
+static struct qmi_svc_clnt_conn *add_svc_clnt_conn(
+	struct qmi_handle *handle, void *clnt_addr, size_t clnt_addr_len)
+{
+	struct qmi_svc_clnt_conn *conn_h;
+
+	conn_h = kmalloc(sizeof(struct qmi_svc_clnt_conn), GFP_KERNEL);
+	if (!conn_h) {
+		pr_err("%s: Error allocating conn_h\n", __func__);
+		return NULL;
+	}
+
+	conn_h->clnt_addr = kmalloc(clnt_addr_len, GFP_KERNEL);
+	if (!conn_h->clnt_addr) {
+		pr_err("%s: Error allocating clnt_addr\n", __func__);
+		return NULL;
+	}
+
+	INIT_LIST_HEAD(&conn_h->list);
+	conn_h->svc_handle = handle;
+	memcpy(conn_h->clnt_addr, clnt_addr, clnt_addr_len);
+	conn_h->clnt_addr_len = clnt_addr_len;
+	INIT_LIST_HEAD(&conn_h->req_handle_list);
+	INIT_DELAYED_WORK(&conn_h->resume_tx_work, svc_resume_tx_worker);
+	INIT_LIST_HEAD(&conn_h->pending_txn_list);
+	mutex_init(&conn_h->pending_txn_lock);
+	list_add_tail(&conn_h->list, &handle->conn_list);
+	return conn_h;
+}
+
+/**
+ * find_svc_clnt_conn() - Find the existence of a client<->service connection
+ * @handle: Service's QMI handle.
+ * @clnt_addr: Address of the client to be present in the connection.
+ * @clnt_addr_len: Length of the client address.
+ *
+ * @return: Pointer to connection handle if the matching connection is found,
+ *          NULL if the connection is not found.
+ *
+ * This function is used to find the existence of a client<->service connection
+ * handle in a service's QMI handle. This function tries to match the client
+ * address in the existing connections.
+ *
+ * This function must be called with handle->handle_lock locked.
+ */
+static struct qmi_svc_clnt_conn *find_svc_clnt_conn(
+	struct qmi_handle *handle, void *clnt_addr, size_t clnt_addr_len)
+{
+	struct qmi_svc_clnt_conn *conn_h;
+
+	list_for_each_entry(conn_h, &handle->conn_list, list) {
+		if (!memcmp(conn_h->clnt_addr, clnt_addr, clnt_addr_len))
+			return conn_h;
+	}
+	return NULL;
+}
+
+/**
+ * verify_svc_clnt_conn() - Verify the existence of a connection handle
+ * @handle: Service's QMI handle.
+ * @conn_h: Connection handle to be verified.
+ *
+ * @return: true on success, false on failure.
+ *
+ * This function is used to verify the existence of a connection in the
+ * connection list maintained by the service.
+ *
+ * This function must be called with handle->handle_lock locked.
+ */
+static bool verify_svc_clnt_conn(struct qmi_handle *handle,
+				 struct qmi_svc_clnt_conn *conn_h)
+{
+	struct qmi_svc_clnt_conn *temp_conn_h;
+
+	list_for_each_entry(temp_conn_h, &handle->conn_list, list) {
+		if (temp_conn_h == conn_h)
+			return true;
+	}
+	return false;
+}
+
+/**
+ * rmv_svc_clnt_conn() - Remove the connection handle info from the service
+ * @conn_h: Connection handle to be removed.
+ *
+ * This function removes a connection handle from a service's QMI handle.
+ *
+ * This function must be called with handle->handle_lock locked.
+ */
+static void rmv_svc_clnt_conn(struct qmi_svc_clnt_conn *conn_h)
+{
+	struct req_handle *req_h, *temp_req_h;
+	struct qmi_txn *txn_h, *temp_txn_h;
+
+	list_del(&conn_h->list);
+	list_for_each_entry_safe(req_h, temp_req_h,
+				 &conn_h->req_handle_list, list)
+		rmv_req_handle(req_h);
+
+	mutex_lock(&conn_h->pending_txn_lock);
+	list_for_each_entry_safe(txn_h, temp_txn_h,
+				 &conn_h->pending_txn_list, list) {
+		list_del(&txn_h->list);
+		kfree(txn_h->enc_data);
+		kfree(txn_h);
+	}
+	mutex_unlock(&conn_h->pending_txn_lock);
+	flush_delayed_work(&conn_h->resume_tx_work);
+	kfree(conn_h->clnt_addr);
+	kfree(conn_h);
+}
+
+/**
+ * qmi_event_notify() - Notification function to QMI client/service interface
+ * @event: Type of event that gets notified.
+ * @oob_data: Any out-of-band data associated with event.
+ * @oob_data_len: Length of the out-of-band data, if any.
+ * @priv: Private data.
+ *
+ * This function is called by the underlying transport to notify the QMI
+ * interface regarding any incoming event. This function is registered by
+ * QMI interface when it opens a port/handle with the underlying transport.
+ */
+static void qmi_event_notify(unsigned event, void *oob_data,
+			     size_t oob_data_len, void *priv)
+{
+	struct qmi_notify_event_work *notify_work;
+	struct qmi_handle *handle;
+	uint32_t key = 0;
+
+	notify_work = kmalloc(sizeof(struct qmi_notify_event_work),
+			      GFP_KERNEL);
+	if (!notify_work) {
+		pr_err("%s: Couldn't notify %d event to %p\n",
+			__func__, event, priv);
+		return;
+	}
+	notify_work->event = event;
+	if (oob_data) {
+		notify_work->oob_data = kmalloc(oob_data_len, GFP_KERNEL);
+		if (!notify_work->oob_data) {
+			pr_err("%s: Couldn't allocate oob_data @ %d to %p\n",
+				__func__, event, priv);
+			kfree(notify_work);
+			return;
+		}
+		memcpy(notify_work->oob_data, oob_data, oob_data_len);
+	} else {
+		notify_work->oob_data = NULL;
+	}
+	notify_work->oob_data_len = oob_data_len;
+	notify_work->priv = priv;
+	INIT_WORK(&notify_work->work, qmi_notify_event_worker);
+
+	mutex_lock(&handle_hash_tbl_lock);
+	hash_for_each_possible(handle_hash_tbl, handle, handle_hash, key) {
+		if (handle == (struct qmi_handle *)priv) {
+			queue_work(handle->handle_wq,
+				   &notify_work->work);
+			mutex_unlock(&handle_hash_tbl_lock);
+			return;
+		}
+	}
+	mutex_unlock(&handle_hash_tbl_lock);
+	kfree(notify_work->oob_data);
+	kfree(notify_work);
+}
+
+static void qmi_notify_event_worker(struct work_struct *work)
+{
+	struct qmi_notify_event_work *notify_work =
+		container_of(work, struct qmi_notify_event_work, work);
+	struct qmi_handle *handle = (struct qmi_handle *)notify_work->priv;
+	unsigned long flags;
+
+	if (!handle)
+		return;
+
+	mutex_lock(&handle->handle_lock);
+	if (handle->handle_reset) {
+		mutex_unlock(&handle->handle_lock);
+		kfree(notify_work->oob_data);
+		kfree(notify_work);
+		return;
+	}
+
+	switch (notify_work->event) {
+	case IPC_ROUTER_CTRL_CMD_DATA:
+		spin_lock_irqsave(&handle->notify_lock, flags);
+		handle->notify(handle, QMI_RECV_MSG, handle->notify_priv);
+		spin_unlock_irqrestore(&handle->notify_lock, flags);
+		break;
+
+	case IPC_ROUTER_CTRL_CMD_RESUME_TX:
+		if (handle->handle_type == QMI_CLIENT_HANDLE) {
+			queue_delayed_work(handle->handle_wq,
+					   &handle->resume_tx_work,
+					   msecs_to_jiffies(0));
+		} else if (handle->handle_type == QMI_SERVICE_HANDLE) {
+			struct msm_ipc_addr rtx_addr = {0};
+			struct qmi_svc_clnt_conn *conn_h;
+			union rr_control_msg *msg;
+
+			msg = (union rr_control_msg *)notify_work->oob_data;
+			rtx_addr.addrtype = MSM_IPC_ADDR_ID;
+			rtx_addr.addr.port_addr.node_id = msg->cli.node_id;
+			rtx_addr.addr.port_addr.port_id = msg->cli.port_id;
+			conn_h = find_svc_clnt_conn(handle, &rtx_addr,
+						    sizeof(rtx_addr));
+			if (conn_h)
+				queue_delayed_work(handle->handle_wq,
+						   &conn_h->resume_tx_work,
+						   msecs_to_jiffies(0));
+		}
+		break;
+
+	case IPC_ROUTER_CTRL_CMD_NEW_SERVER:
+	case IPC_ROUTER_CTRL_CMD_REMOVE_SERVER:
+	case IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT:
+		queue_delayed_work(handle->handle_wq,
+				   &handle->ctl_work, msecs_to_jiffies(0));
+		break;
+	default:
+		break;
+	}
+	mutex_unlock(&handle->handle_lock);
+	kfree(notify_work->oob_data);
+	kfree(notify_work);
+}
+
+/**
+ * clnt_resume_tx_worker() - Handle the Resume_Tx event
+ * @work : Pointer to the work strcuture.
+ *
+ * This function handles the resume_tx event for any QMI client that
+ * exists in the kernel space. This function parses the pending_txn_list of
+ * the handle and attempts a send for each transaction in that list.
+ */
+static void clnt_resume_tx_worker(struct work_struct *work)
+{
+	struct delayed_work *rtx_work = to_delayed_work(work);
+	struct qmi_handle *handle =
+		container_of(rtx_work, struct qmi_handle, resume_tx_work);
+	struct qmi_txn *pend_txn, *temp_txn;
+	int ret;
+	uint16_t msg_id;
+
+	mutex_lock(&handle->handle_lock);
+	if (handle->handle_reset)
+		goto out_clnt_handle_rtx;
+
+	list_for_each_entry_safe(pend_txn, temp_txn,
+				&handle->pending_txn_list, list) {
+		ret = msm_ipc_router_send_msg(
+				(struct msm_ipc_port *)handle->src_port,
+				(struct msm_ipc_addr *)handle->dest_info,
+				pend_txn->enc_data, pend_txn->enc_data_len);
+
+		if (ret == -EAGAIN)
+			break;
+		msg_id = ((struct qmi_header *)pend_txn->enc_data)->msg_id;
+		kfree(pend_txn->enc_data);
+		if (ret < 0) {
+			pr_err("%s: Sending transaction %d from port %d failed",
+				__func__, pend_txn->txn_id,
+				((struct msm_ipc_port *)handle->src_port)->
+							this_port.port_id);
+			if (pend_txn->type == QMI_ASYNC_TXN) {
+				pend_txn->resp_cb(pend_txn->handle,
+						msg_id, pend_txn->resp,
+						pend_txn->resp_cb_data,
+						ret);
+				list_del(&pend_txn->list);
+				kfree(pend_txn);
+			} else if (pend_txn->type == QMI_SYNC_TXN) {
+				pend_txn->send_stat = ret;
+				wake_up(&pend_txn->wait_q);
+			}
+		} else {
+			list_del(&pend_txn->list);
+			list_add_tail(&pend_txn->list, &handle->txn_list);
+		}
+	}
+out_clnt_handle_rtx:
+	mutex_unlock(&handle->handle_lock);
+}
+
+/**
+ * svc_resume_tx_worker() - Handle the Resume_Tx event
+ * @work : Pointer to the work strcuture.
+ *
+ * This function handles the resume_tx event for any QMI service that
+ * exists in the kernel space. This function parses the pending_txn_list of
+ * the connection handle and attempts a send for each transaction in that list.
+ */
+static void svc_resume_tx_worker(struct work_struct *work)
+{
+	struct delayed_work *rtx_work = to_delayed_work(work);
+	struct qmi_svc_clnt_conn *conn_h =
+		container_of(rtx_work, struct qmi_svc_clnt_conn,
+			     resume_tx_work);
+	struct qmi_handle *handle = (struct qmi_handle *)conn_h->svc_handle;
+	struct qmi_txn *pend_txn, *temp_txn;
+	int ret;
+
+	mutex_lock(&conn_h->pending_txn_lock);
+	if (handle->handle_reset)
+		goto out_svc_handle_rtx;
+
+	list_for_each_entry_safe(pend_txn, temp_txn,
+				&conn_h->pending_txn_list, list) {
+		ret = msm_ipc_router_send_msg(
+				(struct msm_ipc_port *)handle->src_port,
+				(struct msm_ipc_addr *)conn_h->clnt_addr,
+				pend_txn->enc_data, pend_txn->enc_data_len);
+
+		if (ret == -EAGAIN)
+			break;
+		if (ret < 0)
+			pr_err("%s: Sending transaction %d from port %d failed",
+				__func__, pend_txn->txn_id,
+				((struct msm_ipc_port *)handle->src_port)->
+							this_port.port_id);
+		list_del(&pend_txn->list);
+		kfree(pend_txn->enc_data);
+		kfree(pend_txn);
+	}
+out_svc_handle_rtx:
+	mutex_unlock(&conn_h->pending_txn_lock);
+}
+
+/**
+ * handle_rmv_server() - Handle the server exit event
+ * @handle: Client handle on which the server exit event is received.
+ * @ctl_msg: Information about the server that is exiting.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ *
+ * This function must be called with handle->handle_lock locked.
+ */
+static int handle_rmv_server(struct qmi_handle *handle,
+			     union rr_control_msg *ctl_msg)
+{
+	struct msm_ipc_addr *svc_addr;
+	unsigned long flags;
+
+	if (unlikely(!handle->dest_info))
+		return 0;
+
+	svc_addr = (struct msm_ipc_addr *)(handle->dest_info);
+	if (svc_addr->addr.port_addr.node_id == ctl_msg->srv.node_id &&
+	    svc_addr->addr.port_addr.port_id == ctl_msg->srv.port_id) {
+		/* Wakeup any threads waiting for the response */
+		handle->handle_reset = 1;
+		clean_txn_info(handle);
+
+		spin_lock_irqsave(&handle->notify_lock, flags);
+		handle->notify(handle, QMI_SERVER_EXIT, handle->notify_priv);
+		spin_unlock_irqrestore(&handle->notify_lock, flags);
+	}
+	return 0;
+}
+
+/**
+ * handle_rmv_client() - Handle the client exit event
+ * @handle: Service handle on which the client exit event is received.
+ * @ctl_msg: Information about the client that is exiting.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ *
+ * This function must be called with handle->handle_lock locked.
+ */
+static int handle_rmv_client(struct qmi_handle *handle,
+			     union rr_control_msg *ctl_msg)
+{
+	struct qmi_svc_clnt_conn *conn_h;
+	struct msm_ipc_addr clnt_addr = {0};
+	unsigned long flags;
+
+	clnt_addr.addrtype = MSM_IPC_ADDR_ID;
+	clnt_addr.addr.port_addr.node_id = ctl_msg->cli.node_id;
+	clnt_addr.addr.port_addr.port_id = ctl_msg->cli.port_id;
+	conn_h = find_svc_clnt_conn(handle, &clnt_addr, sizeof(clnt_addr));
+	if (conn_h) {
+		spin_lock_irqsave(&handle->notify_lock, flags);
+		handle->svc_ops_options->disconnect_cb(handle, conn_h);
+		spin_unlock_irqrestore(&handle->notify_lock, flags);
+		rmv_svc_clnt_conn(conn_h);
+	}
+	return 0;
+}
+
+/**
+ * handle_ctl_msg: Worker function to handle the control events
+ * @work: Work item to map the QMI handle.
+ *
+ * This function is a worker function to handle the incoming control
+ * events like REMOVE_SERVER/REMOVE_CLIENT. The work item is unique
+ * to a handle and the workker function handles the control events on
+ * a specific handle.
+ */
+static void handle_ctl_msg(struct work_struct *work)
+{
+	struct delayed_work *ctl_work = to_delayed_work(work);
+	struct qmi_handle *handle =
+		container_of(ctl_work, struct qmi_handle, ctl_work);
+	unsigned int ctl_msg_len;
+	union rr_control_msg *ctl_msg = NULL;
+	struct msm_ipc_addr src_addr;
+	int rc;
+
+	mutex_lock(&handle->handle_lock);
+	while (1) {
+		if (handle->handle_reset)
+			break;
+
+		/* Read the messages */
+		rc = msm_ipc_router_read_msg(
+			(struct msm_ipc_port *)(handle->ctl_port),
+			&src_addr, (unsigned char **)&ctl_msg, &ctl_msg_len);
+		if (rc == -ENOMSG)
+			break;
+		if (rc < 0) {
+			pr_err("%s: Read failed %d\n", __func__, rc);
+			break;
+		}
+		if (ctl_msg->cmd == IPC_ROUTER_CTRL_CMD_REMOVE_SERVER &&
+		    handle->handle_type == QMI_CLIENT_HANDLE)
+			handle_rmv_server(handle, ctl_msg);
+		else if (ctl_msg->cmd == IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT &&
+			 handle->handle_type == QMI_SERVICE_HANDLE)
+			handle_rmv_client(handle, ctl_msg);
+		kfree(ctl_msg);
+	}
+	mutex_unlock(&handle->handle_lock);
+	return;
+}
+
+struct qmi_handle *qmi_handle_create(
+	void (*notify)(struct qmi_handle *handle,
+		       enum qmi_event_type event, void *notify_priv),
+	void *notify_priv)
+{
+	struct qmi_handle *temp_handle;
+	struct msm_ipc_port *port_ptr, *ctl_port_ptr;
+	static uint32_t handle_count;
+	char wq_name[MAX_WQ_NAME_LEN];
+
+	temp_handle = kzalloc(sizeof(struct qmi_handle), GFP_KERNEL);
+	if (!temp_handle) {
+		pr_err("%s: Failure allocating client handle\n", __func__);
+		return NULL;
+	}
+	mutex_lock(&handle_hash_tbl_lock);
+	handle_count++;
+	scnprintf(wq_name, MAX_WQ_NAME_LEN, "qmi_hndl%08x", handle_count);
+	hash_add(handle_hash_tbl, &temp_handle->handle_hash, 0);
+	temp_handle->handle_wq = create_singlethread_workqueue(wq_name);
+	mutex_unlock(&handle_hash_tbl_lock);
+	if (!temp_handle->handle_wq) {
+		pr_err("%s: Couldn't create workqueue for handle\n", __func__);
+		goto handle_create_err1;
+	}
+
+	/* Initialize common elements */
+	temp_handle->handle_type = QMI_CLIENT_HANDLE;
+	temp_handle->next_txn_id = 1;
+	mutex_init(&temp_handle->handle_lock);
+	spin_lock_init(&temp_handle->notify_lock);
+	temp_handle->notify = notify;
+	temp_handle->notify_priv = notify_priv;
+	init_waitqueue_head(&temp_handle->reset_waitq);
+	INIT_DELAYED_WORK(&temp_handle->resume_tx_work, clnt_resume_tx_worker);
+	INIT_DELAYED_WORK(&temp_handle->ctl_work, handle_ctl_msg);
+
+	/* Initialize client specific elements */
+	INIT_LIST_HEAD(&temp_handle->txn_list);
+	INIT_LIST_HEAD(&temp_handle->pending_txn_list);
+
+	/* Initialize service specific elements */
+	INIT_LIST_HEAD(&temp_handle->conn_list);
+
+	port_ptr = msm_ipc_router_create_port(qmi_event_notify,
+					      (void *)temp_handle);
+	if (!port_ptr) {
+		pr_err("%s: IPC router port creation failed\n", __func__);
+		goto handle_create_err2;
+	}
+
+	ctl_port_ptr = msm_ipc_router_create_port(qmi_event_notify,
+						  (void *)temp_handle);
+	if (!ctl_port_ptr) {
+		pr_err("%s: IPC router ctl port creation failed\n", __func__);
+		goto handle_create_err3;
+	}
+	msm_ipc_router_bind_control_port(ctl_port_ptr);
+
+	temp_handle->src_port = port_ptr;
+	temp_handle->ctl_port = ctl_port_ptr;
+	return temp_handle;
+
+handle_create_err3:
+	msm_ipc_router_close_port(port_ptr);
+handle_create_err2:
+	destroy_workqueue(temp_handle->handle_wq);
+handle_create_err1:
+	mutex_lock(&handle_hash_tbl_lock);
+	hash_del(&temp_handle->handle_hash);
+	mutex_unlock(&handle_hash_tbl_lock);
+	kfree(temp_handle);
+	return NULL;
+}
+EXPORT_SYMBOL(qmi_handle_create);
+
+static void clean_txn_info(struct qmi_handle *handle)
+{
+	struct qmi_txn *txn_handle, *temp_txn_handle, *pend_txn;
+
+	list_for_each_entry_safe(pend_txn, temp_txn_handle,
+				&handle->pending_txn_list, list) {
+		if (pend_txn->type == QMI_ASYNC_TXN) {
+			list_del(&pend_txn->list);
+			pend_txn->resp_cb(pend_txn->handle,
+					((struct qmi_header *)
+					pend_txn->enc_data)->msg_id,
+					pend_txn->resp, pend_txn->resp_cb_data,
+					-ENETRESET);
+			kfree(pend_txn->enc_data);
+			kfree(pend_txn);
+		} else if (pend_txn->type == QMI_SYNC_TXN) {
+			kfree(pend_txn->enc_data);
+			wake_up(&pend_txn->wait_q);
+		}
+	}
+	list_for_each_entry_safe(txn_handle, temp_txn_handle,
+				 &handle->txn_list, list) {
+		if (txn_handle->type == QMI_ASYNC_TXN) {
+			list_del(&txn_handle->list);
+			kfree(txn_handle);
+		} else if (txn_handle->type == QMI_SYNC_TXN) {
+			wake_up(&txn_handle->wait_q);
+		}
+	}
+}
+
+int qmi_handle_destroy(struct qmi_handle *handle)
+{
+	DEFINE_WAIT(wait);
+
+	if (!handle)
+		return -EINVAL;
+
+	mutex_lock(&handle_hash_tbl_lock);
+	hash_del(&handle->handle_hash);
+	mutex_unlock(&handle_hash_tbl_lock);
+
+	mutex_lock(&handle->handle_lock);
+	handle->handle_reset = 1;
+	clean_txn_info(handle);
+	msm_ipc_router_close_port((struct msm_ipc_port *)(handle->ctl_port));
+	msm_ipc_router_close_port((struct msm_ipc_port *)(handle->src_port));
+	mutex_unlock(&handle->handle_lock);
+	flush_workqueue(handle->handle_wq);
+	destroy_workqueue(handle->handle_wq);
+
+	mutex_lock(&handle->handle_lock);
+	while (!list_empty(&handle->txn_list) ||
+		    !list_empty(&handle->pending_txn_list)) {
+		prepare_to_wait(&handle->reset_waitq, &wait,
+				TASK_UNINTERRUPTIBLE);
+		mutex_unlock(&handle->handle_lock);
+		schedule();
+		mutex_lock(&handle->handle_lock);
+		finish_wait(&handle->reset_waitq, &wait);
+	}
+	mutex_unlock(&handle->handle_lock);
+	kfree(handle->dest_info);
+	kfree(handle);
+	return 0;
+}
+EXPORT_SYMBOL(qmi_handle_destroy);
+
+int qmi_register_ind_cb(struct qmi_handle *handle,
+	void (*ind_cb)(struct qmi_handle *handle,
+		       unsigned int msg_id, void *msg,
+		       unsigned int msg_len, void *ind_cb_priv),
+	void *ind_cb_priv)
+{
+	if (!handle)
+		return -EINVAL;
+
+	mutex_lock(&handle->handle_lock);
+	if (handle->handle_reset) {
+		mutex_unlock(&handle->handle_lock);
+		return -ENETRESET;
+	}
+
+	handle->ind_cb = ind_cb;
+	handle->ind_cb_priv = ind_cb_priv;
+	mutex_unlock(&handle->handle_lock);
+	return 0;
+}
+EXPORT_SYMBOL(qmi_register_ind_cb);
+
+static int qmi_encode_and_send_req(struct qmi_txn **ret_txn_handle,
+	struct qmi_handle *handle, enum txn_type type,
+	struct msg_desc *req_desc, void *req, unsigned int req_len,
+	struct msg_desc *resp_desc, void *resp, unsigned int resp_len,
+	void (*resp_cb)(struct qmi_handle *handle,
+			unsigned int msg_id, void *msg,
+			void *resp_cb_data, int stat),
+	void *resp_cb_data)
+{
+	struct qmi_txn *txn_handle;
+	int rc, encoded_req_len;
+	void *encoded_req;
+
+	if (!handle || !handle->dest_info ||
+	    !req_desc || !resp_desc || !resp)
+		return -EINVAL;
+
+	if ((!req && req_len) || (!req_len && req))
+		return -EINVAL;
+
+	mutex_lock(&handle->handle_lock);
+	if (handle->handle_reset) {
+		mutex_unlock(&handle->handle_lock);
+		return -ENETRESET;
+	}
+
+	/* Allocate Transaction Info */
+	txn_handle = kzalloc(sizeof(struct qmi_txn), GFP_KERNEL);
+	if (!txn_handle) {
+		pr_err("%s: Failed to allocate txn handle\n", __func__);
+		mutex_unlock(&handle->handle_lock);
+		return -ENOMEM;
+	}
+	txn_handle->type = type;
+	INIT_LIST_HEAD(&txn_handle->list);
+	init_waitqueue_head(&txn_handle->wait_q);
+
+	/* Cache the parameters passed & mark it as sync*/
+	txn_handle->handle = handle;
+	txn_handle->resp_desc = resp_desc;
+	txn_handle->resp = resp;
+	txn_handle->resp_len = resp_len;
+	txn_handle->resp_received = 0;
+	txn_handle->resp_cb = resp_cb;
+	txn_handle->resp_cb_data = resp_cb_data;
+	txn_handle->enc_data = NULL;
+	txn_handle->enc_data_len = 0;
+
+	/* Encode the request msg */
+	encoded_req_len = req_desc->max_msg_len + QMI_HEADER_SIZE;
+	encoded_req = kmalloc(encoded_req_len, GFP_KERNEL);
+	if (!encoded_req) {
+		pr_err("%s: Failed to allocate req_msg_buf\n", __func__);
+		rc = -ENOMEM;
+		goto encode_and_send_req_err1;
+	}
+	rc = qmi_kernel_encode(req_desc,
+		(void *)(encoded_req + QMI_HEADER_SIZE),
+		req_desc->max_msg_len, req);
+	if (rc < 0) {
+		pr_err("%s: Encode Failure %d\n", __func__, rc);
+		goto encode_and_send_req_err2;
+	}
+	encoded_req_len = rc;
+
+	/* Encode the header & Add to the txn_list */
+	if (!handle->next_txn_id)
+		handle->next_txn_id++;
+	txn_handle->txn_id = handle->next_txn_id++;
+	encode_qmi_header(encoded_req, QMI_REQUEST_CONTROL_FLAG,
+			  txn_handle->txn_id, req_desc->msg_id,
+			  encoded_req_len);
+	encoded_req_len += QMI_HEADER_SIZE;
+
+	/*
+	 * Check if this port has transactions queued to its pending list
+	 * and if there are any pending transactions then add the current
+	 * transaction to the pending list rather than sending it. This avoids
+	 * out-of-order message transfers.
+	 */
+	if (!list_empty(&handle->pending_txn_list)) {
+		rc = -EAGAIN;
+		goto append_pend_txn;
+	}
+
+	list_add_tail(&txn_handle->list, &handle->txn_list);
+	qmi_log(handle, QMI_REQUEST_CONTROL_FLAG, txn_handle->txn_id,
+			req_desc->msg_id, encoded_req_len);
+	/* Send the request */
+	rc = msm_ipc_router_send_msg((struct msm_ipc_port *)(handle->src_port),
+		(struct msm_ipc_addr *)handle->dest_info,
+		encoded_req, encoded_req_len);
+append_pend_txn:
+	if (rc == -EAGAIN) {
+		txn_handle->enc_data = encoded_req;
+		txn_handle->enc_data_len = encoded_req_len;
+		if (list_empty(&handle->pending_txn_list))
+			list_del(&txn_handle->list);
+		list_add_tail(&txn_handle->list, &handle->pending_txn_list);
+		if (ret_txn_handle)
+			*ret_txn_handle = txn_handle;
+		mutex_unlock(&handle->handle_lock);
+		return 0;
+	}
+	if (rc < 0) {
+		pr_err("%s: send_msg failed %d\n", __func__, rc);
+		goto encode_and_send_req_err3;
+	}
+	mutex_unlock(&handle->handle_lock);
+
+	kfree(encoded_req);
+	if (ret_txn_handle)
+		*ret_txn_handle = txn_handle;
+	return 0;
+
+encode_and_send_req_err3:
+	list_del(&txn_handle->list);
+encode_and_send_req_err2:
+	kfree(encoded_req);
+encode_and_send_req_err1:
+	kfree(txn_handle);
+	mutex_unlock(&handle->handle_lock);
+	return rc;
+}
+
+int qmi_send_req_wait(struct qmi_handle *handle,
+		      struct msg_desc *req_desc,
+		      void *req, unsigned int req_len,
+		      struct msg_desc *resp_desc,
+		      void *resp, unsigned int resp_len,
+		      unsigned long timeout_ms)
+{
+	struct qmi_txn *txn_handle = NULL;
+	int rc;
+
+	/* Encode and send the request */
+	rc = qmi_encode_and_send_req(&txn_handle, handle, QMI_SYNC_TXN,
+				     req_desc, req, req_len,
+				     resp_desc, resp, resp_len,
+				     NULL, NULL);
+	if (rc < 0) {
+		pr_err("%s: Error encode & send req: %d\n", __func__, rc);
+		return rc;
+	}
+
+	/* Wait for the response */
+	if (!timeout_ms) {
+		wait_event(txn_handle->wait_q,
+			   (txn_handle->resp_received ||
+			    handle->handle_reset ||
+			   (txn_handle->send_stat < 0)));
+	} else {
+		rc = wait_event_timeout(txn_handle->wait_q,
+				(txn_handle->resp_received ||
+				handle->handle_reset ||
+				(txn_handle->send_stat < 0)),
+				msecs_to_jiffies(timeout_ms));
+		if (rc == 0)
+			rc = -ETIMEDOUT;
+	}
+
+	mutex_lock(&handle->handle_lock);
+	if (!txn_handle->resp_received) {
+		pr_err("%s: Response Wait Error %d\n", __func__, rc);
+		if (handle->handle_reset)
+			rc = -ENETRESET;
+		if (rc >= 0)
+			rc = -EFAULT;
+		if (txn_handle->send_stat < 0)
+			rc = txn_handle->send_stat;
+		goto send_req_wait_err;
+	}
+	rc = 0;
+
+send_req_wait_err:
+	list_del(&txn_handle->list);
+	kfree(txn_handle);
+	wake_up(&handle->reset_waitq);
+	mutex_unlock(&handle->handle_lock);
+	return rc;
+}
+EXPORT_SYMBOL(qmi_send_req_wait);
+
+int qmi_send_req_nowait(struct qmi_handle *handle,
+			struct msg_desc *req_desc,
+			void *req, unsigned int req_len,
+			struct msg_desc *resp_desc,
+			void *resp, unsigned int resp_len,
+			void (*resp_cb)(struct qmi_handle *handle,
+					unsigned int msg_id, void *msg,
+					void *resp_cb_data, int stat),
+			void *resp_cb_data)
+{
+	return qmi_encode_and_send_req(NULL, handle, QMI_ASYNC_TXN,
+				       req_desc, req, req_len,
+				       resp_desc, resp, resp_len,
+				       resp_cb, resp_cb_data);
+}
+EXPORT_SYMBOL(qmi_send_req_nowait);
+
+/**
+ * qmi_encode_and_send_resp() - Encode and send QMI response
+ * @handle: QMI service handle sending the response.
+ * @conn_h: Connection handle to which the response is sent.
+ * @req_h: Request handle for which the response is sent.
+ * @resp_desc: Message Descriptor describing the response structure.
+ * @resp: Response structure.
+ * @resp_len: Length of the response structure.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ *
+ * This function encodes and sends a response message from a service to
+ * a client identified from the connection handle. The request for which
+ * the response is sent is identified from the connection handle.
+ *
+ * This function must be called with handle->handle_lock locked.
+ */
+static int qmi_encode_and_send_resp(struct qmi_handle *handle,
+	struct qmi_svc_clnt_conn *conn_h, struct req_handle *req_h,
+	struct msg_desc *resp_desc, void *resp, unsigned int resp_len)
+{
+	struct qmi_txn *txn_handle;
+	uint16_t cntl_flag;
+	int rc;
+	int encoded_resp_len;
+	void *encoded_resp;
+
+	if (handle->handle_reset) {
+		rc = -ENETRESET;
+		goto encode_and_send_resp_err0;
+	}
+
+	if (handle->handle_type != QMI_SERVICE_HANDLE ||
+	    !verify_svc_clnt_conn(handle, conn_h) ||
+	    (req_h && !verify_req_handle(conn_h, req_h))) {
+		rc = -EINVAL;
+		goto encode_and_send_resp_err0;
+	}
+
+	/* Allocate Transaction Info */
+	txn_handle = kzalloc(sizeof(struct qmi_txn), GFP_KERNEL);
+	if (!txn_handle) {
+		pr_err("%s: Failed to allocate txn handle\n", __func__);
+		rc = -ENOMEM;
+		goto encode_and_send_resp_err0;
+	}
+	INIT_LIST_HEAD(&txn_handle->list);
+	init_waitqueue_head(&txn_handle->wait_q);
+	txn_handle->handle = handle;
+	txn_handle->enc_data = NULL;
+	txn_handle->enc_data_len = 0;
+
+	/* Encode the response msg */
+	encoded_resp_len = resp_desc->max_msg_len + QMI_HEADER_SIZE;
+	encoded_resp = kmalloc(encoded_resp_len, GFP_KERNEL);
+	if (!encoded_resp) {
+		pr_err("%s: Failed to allocate resp_msg_buf\n", __func__);
+		rc = -ENOMEM;
+		goto encode_and_send_resp_err1;
+	}
+	rc = qmi_kernel_encode(resp_desc,
+		(void *)(encoded_resp + QMI_HEADER_SIZE),
+		resp_desc->max_msg_len, resp);
+	if (rc < 0) {
+		pr_err("%s: Encode Failure %d\n", __func__, rc);
+		goto encode_and_send_resp_err2;
+	}
+	encoded_resp_len = rc;
+
+	/* Encode the header & Add to the txn_list */
+	if (req_h) {
+		txn_handle->txn_id = req_h->txn_id;
+		cntl_flag = QMI_RESPONSE_CONTROL_FLAG;
+	} else {
+		if (!handle->next_txn_id)
+			handle->next_txn_id++;
+		txn_handle->txn_id = handle->next_txn_id++;
+		cntl_flag = QMI_INDICATION_CONTROL_FLAG;
+	}
+	encode_qmi_header(encoded_resp, cntl_flag,
+			  txn_handle->txn_id, resp_desc->msg_id,
+			  encoded_resp_len);
+	encoded_resp_len += QMI_HEADER_SIZE;
+
+	qmi_log(handle, cntl_flag, txn_handle->txn_id,
+			resp_desc->msg_id, encoded_resp_len);
+	/*
+	 * Check if this svc_clnt has transactions queued to its pending list
+	 * and if there are any pending transactions then add the current
+	 * transaction to the pending list rather than sending it. This avoids
+	 * out-of-order message transfers.
+	 */
+	mutex_lock(&conn_h->pending_txn_lock);
+	if (list_empty(&conn_h->pending_txn_list))
+		rc = msm_ipc_router_send_msg(
+			(struct msm_ipc_port *)(handle->src_port),
+			(struct msm_ipc_addr *)conn_h->clnt_addr,
+			encoded_resp, encoded_resp_len);
+	else
+		rc = -EAGAIN;
+
+	if (req_h)
+		rmv_req_handle(req_h);
+	if (rc == -EAGAIN) {
+		txn_handle->enc_data = encoded_resp;
+		txn_handle->enc_data_len = encoded_resp_len;
+		list_add_tail(&txn_handle->list, &conn_h->pending_txn_list);
+		mutex_unlock(&conn_h->pending_txn_lock);
+		return 0;
+	}
+	mutex_unlock(&conn_h->pending_txn_lock);
+	if (rc < 0)
+		pr_err("%s: send_msg failed %d\n", __func__, rc);
+encode_and_send_resp_err2:
+	kfree(encoded_resp);
+encode_and_send_resp_err1:
+	kfree(txn_handle);
+encode_and_send_resp_err0:
+	return rc;
+}
+
+/**
+ * qmi_send_resp() - Send response to a request
+ * @handle: QMI handle from which the response is sent.
+ * @clnt: Client to which the response is sent.
+ * @req_handle: Request for which the response is sent.
+ * @resp_desc: Descriptor explaining the response structure.
+ * @resp: Pointer to the response structure.
+ * @resp_len: Length of the response structure.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_send_resp(struct qmi_handle *handle, void *conn_handle,
+		  void *req_handle, struct msg_desc *resp_desc,
+		  void *resp, unsigned int resp_len)
+{
+	int rc;
+	struct qmi_svc_clnt_conn *conn_h;
+	struct req_handle *req_h;
+
+	if (!handle || !conn_handle || !req_handle ||
+	    !resp_desc || !resp || !resp_len)
+		return -EINVAL;
+
+	conn_h = (struct qmi_svc_clnt_conn *)conn_handle;
+	req_h = (struct req_handle *)req_handle;
+	mutex_lock(&handle->handle_lock);
+	rc = qmi_encode_and_send_resp(handle, conn_h, req_h,
+				      resp_desc, resp, resp_len);
+	if (rc < 0)
+		pr_err("%s: Error encoding and sending response\n", __func__);
+	mutex_unlock(&handle->handle_lock);
+	return rc;
+}
+EXPORT_SYMBOL(qmi_send_resp);
+
+/**
+ * qmi_send_resp_from_cb() - Send response to a request from request_cb
+ * @handle: QMI handle from which the response is sent.
+ * @clnt: Client to which the response is sent.
+ * @req_handle: Request for which the response is sent.
+ * @resp_desc: Descriptor explaining the response structure.
+ * @resp: Pointer to the response structure.
+ * @resp_len: Length of the response structure.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_send_resp_from_cb(struct qmi_handle *handle, void *conn_handle,
+			  void *req_handle, struct msg_desc *resp_desc,
+			  void *resp, unsigned int resp_len)
+{
+	int rc;
+	struct qmi_svc_clnt_conn *conn_h;
+	struct req_handle *req_h;
+
+	if (!handle || !conn_handle || !req_handle ||
+	    !resp_desc || !resp || !resp_len)
+		return -EINVAL;
+
+	conn_h = (struct qmi_svc_clnt_conn *)conn_handle;
+	req_h = (struct req_handle *)req_handle;
+	rc = qmi_encode_and_send_resp(handle, conn_h, req_h,
+				      resp_desc, resp, resp_len);
+	if (rc < 0)
+		pr_err("%s: Error encoding and sending response\n", __func__);
+	return rc;
+}
+EXPORT_SYMBOL(qmi_send_resp_from_cb);
+
+/**
+ * qmi_send_ind() - Send unsolicited event/indication to a client
+ * @handle: QMI handle from which the indication is sent.
+ * @clnt: Client to which the indication is sent.
+ * @ind_desc: Descriptor explaining the indication structure.
+ * @ind: Pointer to the indication structure.
+ * @ind_len: Length of the indication structure.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_send_ind(struct qmi_handle *handle, void *conn_handle,
+		 struct msg_desc *ind_desc, void *ind, unsigned int ind_len)
+{
+	int rc = 0;
+	struct qmi_svc_clnt_conn *conn_h;
+
+	if (!handle || !conn_handle || !ind_desc)
+		return -EINVAL;
+
+	if ((!ind && ind_len) || (ind && !ind_len))
+		return -EINVAL;
+
+	conn_h = (struct qmi_svc_clnt_conn *)conn_handle;
+	mutex_lock(&handle->handle_lock);
+	rc = qmi_encode_and_send_resp(handle, conn_h, NULL,
+				      ind_desc, ind, ind_len);
+	if (rc < 0)
+		pr_err("%s: Error encoding and sending ind.\n", __func__);
+	mutex_unlock(&handle->handle_lock);
+	return rc;
+}
+EXPORT_SYMBOL(qmi_send_ind);
+
+/**
+ * qmi_send_ind_from_cb() - Send indication to a client from registration_cb
+ * @handle: QMI handle from which the indication is sent.
+ * @clnt: Client to which the indication is sent.
+ * @ind_desc: Descriptor explaining the indication structure.
+ * @ind: Pointer to the indication structure.
+ * @ind_len: Length of the indication structure.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_send_ind_from_cb(struct qmi_handle *handle, void *conn_handle,
+		struct msg_desc *ind_desc, void *ind, unsigned int ind_len)
+{
+	int rc = 0;
+	struct qmi_svc_clnt_conn *conn_h;
+
+	if (!handle || !conn_handle || !ind_desc)
+		return -EINVAL;
+
+	if ((!ind && ind_len) || (ind && !ind_len))
+		return -EINVAL;
+
+	conn_h = (struct qmi_svc_clnt_conn *)conn_handle;
+	rc = qmi_encode_and_send_resp(handle, conn_h, NULL,
+				      ind_desc, ind, ind_len);
+	if (rc < 0)
+		pr_err("%s: Error encoding and sending ind.\n", __func__);
+	return rc;
+}
+EXPORT_SYMBOL(qmi_send_ind_from_cb);
+
+/**
+ * translate_err_code() - Translate Linux error codes into QMI error codes
+ * @err: Standard Linux error codes to be translated.
+ *
+ * @return: Return QMI error code.
+ */
+static int translate_err_code(int err)
+{
+	int rc;
+
+	switch (err) {
+	case -ECONNREFUSED:
+		rc = QMI_ERR_CLIENT_IDS_EXHAUSTED_V01;
+		break;
+	case -EBADMSG:
+		rc = QMI_ERR_ENCODING_V01;
+		break;
+	case -ENOMEM:
+		rc = QMI_ERR_NO_MEMORY_V01;
+		break;
+	case -EOPNOTSUPP:
+		rc = QMI_ERR_MALFORMED_MSG_V01;
+		break;
+	case -ENOTSUPP:
+		rc = QMI_ERR_NOT_SUPPORTED_V01;
+		break;
+	default:
+		rc = QMI_ERR_INTERNAL_V01;
+		break;
+	}
+	return rc;
+}
+
+/**
+ * send_err_resp() - Send the error response
+ * @handle: Service handle from which the response is sent.
+ * @conn_h: Client<->Service connection on which the response is sent.
+ * @addr: Client address to which the error response is sent.
+ * @msg_id: Request message id for which the error response is sent.
+ * @txn_id: Request Transaction ID for which the error response is sent.
+ * @err: Error code to be sent.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ *
+ * This function is used to send an error response from within the QMI
+ * service interface. This function is called when the service returns
+ * an error to the QMI interface while handling a request.
+ */
+static int send_err_resp(struct qmi_handle *handle,
+			 struct qmi_svc_clnt_conn *conn_h, void *addr,
+			 uint16_t msg_id, uint16_t txn_id, int err)
+{
+	struct qmi_response_type_v01 err_resp;
+	struct qmi_txn *txn_handle;
+	struct msm_ipc_addr *dest_addr;
+	int rc;
+	int encoded_resp_len;
+	void *encoded_resp;
+
+	if (handle->handle_reset)
+		return -ENETRESET;
+
+	err_resp.result = QMI_RESULT_FAILURE_V01;
+	err_resp.error = translate_err_code(err);
+
+	/* Allocate Transaction Info */
+	txn_handle = kzalloc(sizeof(struct qmi_txn), GFP_KERNEL);
+	if (!txn_handle) {
+		pr_err("%s: Failed to allocate txn handle\n", __func__);
+		return -ENOMEM;
+	}
+	INIT_LIST_HEAD(&txn_handle->list);
+	init_waitqueue_head(&txn_handle->wait_q);
+	txn_handle->handle = handle;
+	txn_handle->enc_data = NULL;
+	txn_handle->enc_data_len = 0;
+
+	/* Encode the response msg */
+	encoded_resp_len = err_resp_desc.max_msg_len + QMI_HEADER_SIZE;
+	encoded_resp = kmalloc(encoded_resp_len, GFP_KERNEL);
+	if (!encoded_resp) {
+		pr_err("%s: Failed to allocate resp_msg_buf\n", __func__);
+		rc = -ENOMEM;
+		goto encode_and_send_err_resp_err0;
+	}
+	rc = qmi_kernel_encode(&err_resp_desc,
+		(void *)(encoded_resp + QMI_HEADER_SIZE),
+		err_resp_desc.max_msg_len, &err_resp);
+	if (rc < 0) {
+		pr_err("%s: Encode Failure %d\n", __func__, rc);
+		goto encode_and_send_err_resp_err1;
+	}
+	encoded_resp_len = rc;
+
+	/* Encode the header & Add to the txn_list */
+	txn_handle->txn_id = txn_id;
+	encode_qmi_header(encoded_resp, QMI_RESPONSE_CONTROL_FLAG,
+			  txn_handle->txn_id, msg_id,
+			  encoded_resp_len);
+	encoded_resp_len += QMI_HEADER_SIZE;
+
+	qmi_log(handle, QMI_RESPONSE_CONTROL_FLAG, txn_id,
+			msg_id, encoded_resp_len);
+	/*
+	 * Check if this svc_clnt has transactions queued to its pending list
+	 * and if there are any pending transactions then add the current
+	 * transaction to the pending list rather than sending it. This avoids
+	 * out-of-order message transfers.
+	 */
+	if (!conn_h) {
+		dest_addr = (struct msm_ipc_addr *)addr;
+		goto tx_err_resp;
+	}
+
+	mutex_lock(&conn_h->pending_txn_lock);
+	dest_addr = (struct msm_ipc_addr *)conn_h->clnt_addr;
+	if (!list_empty(&conn_h->pending_txn_list)) {
+		rc = -EAGAIN;
+		goto queue_err_resp;
+	}
+tx_err_resp:
+	rc = msm_ipc_router_send_msg(
+			(struct msm_ipc_port *)(handle->src_port),
+			dest_addr, encoded_resp, encoded_resp_len);
+queue_err_resp:
+	if (rc == -EAGAIN && conn_h) {
+		txn_handle->enc_data = encoded_resp;
+		txn_handle->enc_data_len = encoded_resp_len;
+		list_add_tail(&txn_handle->list, &conn_h->pending_txn_list);
+		mutex_unlock(&conn_h->pending_txn_lock);
+		return 0;
+	}
+	if (conn_h)
+		mutex_unlock(&conn_h->pending_txn_lock);
+	if (rc < 0)
+		pr_err("%s: send_msg failed %d\n", __func__, rc);
+encode_and_send_err_resp_err1:
+	kfree(encoded_resp);
+encode_and_send_err_resp_err0:
+	kfree(txn_handle);
+	return rc;
+}
+
+/**
+ * handle_qmi_request() - Handle the QMI request
+ * @handle: QMI service handle on which the request has arrived.
+ * @req_msg: Request message to be handled.
+ * @txn_id: Transaction ID of the request message.
+ * @msg_id: Message ID of the request message.
+ * @msg_len: Message Length of the request message.
+ * @src_addr: Address of the source which sent the request.
+ * @src_addr_len: Length of the source address.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ */
+static int handle_qmi_request(struct qmi_handle *handle,
+			      unsigned char *req_msg, uint16_t txn_id,
+			      uint16_t msg_id, uint16_t msg_len,
+			      void *src_addr, size_t src_addr_len)
+{
+	struct qmi_svc_clnt_conn *conn_h;
+	struct msg_desc *req_desc = NULL;
+	void *req_struct = NULL;
+	unsigned int req_struct_len = 0;
+	struct req_handle *req_h = NULL;
+	int rc = 0;
+
+	if (handle->handle_type != QMI_SERVICE_HANDLE)
+		return -EOPNOTSUPP;
+
+	conn_h = find_svc_clnt_conn(handle, src_addr, src_addr_len);
+	if (conn_h)
+		goto decode_req;
+
+	/* New client, establish a connection */
+	conn_h = add_svc_clnt_conn(handle, src_addr, src_addr_len);
+	if (!conn_h) {
+		pr_err("%s: Error adding a new conn_h\n", __func__);
+		rc = -ENOMEM;
+		goto out_handle_req;
+	}
+	rc = handle->svc_ops_options->connect_cb(handle, conn_h);
+	if (rc < 0) {
+		pr_err("%s: Error accepting new client\n", __func__);
+		rmv_svc_clnt_conn(conn_h);
+		conn_h = NULL;
+		goto out_handle_req;
+	}
+
+decode_req:
+	if (!msg_len)
+		goto process_req;
+
+	req_struct_len = handle->svc_ops_options->req_desc_cb(msg_id,
+							      &req_desc);
+	if (!req_desc || req_struct_len <= 0) {
+		pr_err("%s: Error getting req_desc for msg_id %d\n",
+			__func__, msg_id);
+		rc = -ENOTSUPP;
+		goto out_handle_req;
+	}
+
+	req_struct = kzalloc(req_struct_len, GFP_KERNEL);
+	if (!req_struct) {
+		pr_err("%s: Error allocating request struct\n", __func__);
+		rc = -ENOMEM;
+		goto out_handle_req;
+	}
+
+	rc = qmi_kernel_decode(req_desc, req_struct,
+				(void *)(req_msg + QMI_HEADER_SIZE), msg_len);
+	if (rc < 0) {
+		pr_err("%s: Error decoding msg_id %d\n", __func__, msg_id);
+		rc = -EBADMSG;
+		goto out_handle_req;
+	}
+
+process_req:
+	req_h = add_req_handle(conn_h, msg_id, txn_id);
+	if (!req_h) {
+		pr_err("%s: Error adding new request handle\n", __func__);
+		rc = -ENOMEM;
+		goto out_handle_req;
+	}
+	rc = handle->svc_ops_options->req_cb(handle, conn_h, req_h,
+					      msg_id, req_struct);
+	if (rc < 0) {
+		pr_err("%s: Error while req_cb\n", __func__);
+		/* Check if the error is before or after sending a response */
+		if (verify_req_handle(conn_h, req_h))
+			rmv_req_handle(req_h);
+		else
+			rc = 0;
+	}
+
+out_handle_req:
+	kfree(req_struct);
+	if (rc < 0)
+		send_err_resp(handle, conn_h, src_addr, msg_id, txn_id, rc);
+	return rc;
+}
+
+static struct qmi_txn *find_txn_handle(struct qmi_handle *handle,
+				       uint16_t txn_id)
+{
+	struct qmi_txn *txn_handle;
+
+	list_for_each_entry(txn_handle, &handle->txn_list, list) {
+		if (txn_handle->txn_id == txn_id)
+			return txn_handle;
+	}
+	return NULL;
+}
+
+static int handle_qmi_response(struct qmi_handle *handle,
+			       unsigned char *resp_msg, uint16_t txn_id,
+			       uint16_t msg_id, uint16_t msg_len)
+{
+	struct qmi_txn *txn_handle;
+	int rc;
+
+	/* Find the transaction handle */
+	txn_handle = find_txn_handle(handle, txn_id);
+	if (!txn_handle) {
+		pr_err("%s Response received for non-existent txn_id %d\n",
+			__func__, txn_id);
+		return 0;
+	}
+
+	/* Decode the message */
+	rc = qmi_kernel_decode(txn_handle->resp_desc, txn_handle->resp,
+			       (void *)(resp_msg + QMI_HEADER_SIZE), msg_len);
+	if (rc < 0) {
+		pr_err("%s: Response Decode Failure <%d: %d: %d> rc: %d\n",
+			__func__, txn_id, msg_id, msg_len, rc);
+		wake_up(&txn_handle->wait_q);
+		if (txn_handle->type == QMI_ASYNC_TXN) {
+			list_del(&txn_handle->list);
+			kfree(txn_handle);
+		}
+		return rc;
+	}
+
+	/* Handle async or sync resp */
+	switch (txn_handle->type) {
+	case QMI_SYNC_TXN:
+		txn_handle->resp_received = 1;
+		wake_up(&txn_handle->wait_q);
+		rc = 0;
+		break;
+
+	case QMI_ASYNC_TXN:
+		if (txn_handle->resp_cb)
+			txn_handle->resp_cb(txn_handle->handle, msg_id,
+					    txn_handle->resp,
+					    txn_handle->resp_cb_data, 0);
+		list_del(&txn_handle->list);
+		kfree(txn_handle);
+		rc = 0;
+		break;
+
+	default:
+		pr_err("%s: Unrecognized transaction type\n", __func__);
+		return -EFAULT;
+	}
+	return rc;
+}
+
+static int handle_qmi_indication(struct qmi_handle *handle, void *msg,
+				 unsigned int msg_id, unsigned int msg_len)
+{
+	if (handle->ind_cb)
+		handle->ind_cb(handle, msg_id, msg + QMI_HEADER_SIZE,
+				msg_len, handle->ind_cb_priv);
+	return 0;
+}
+
+int qmi_recv_msg(struct qmi_handle *handle)
+{
+	unsigned int recv_msg_len;
+	unsigned char *recv_msg = NULL;
+	struct msm_ipc_addr src_addr = {0};
+	unsigned char cntl_flag;
+	uint16_t txn_id, msg_id, msg_len;
+	int rc;
+
+	if (!handle)
+		return -EINVAL;
+
+	mutex_lock(&handle->handle_lock);
+	if (handle->handle_reset) {
+		mutex_unlock(&handle->handle_lock);
+		return -ENETRESET;
+	}
+
+	/* Read the messages */
+	rc = msm_ipc_router_read_msg((struct msm_ipc_port *)(handle->src_port),
+				     &src_addr, &recv_msg, &recv_msg_len);
+	if (rc == -ENOMSG) {
+		mutex_unlock(&handle->handle_lock);
+		return rc;
+	}
+
+	if (rc < 0) {
+		pr_err("%s: Read failed %d\n", __func__, rc);
+		mutex_unlock(&handle->handle_lock);
+		return rc;
+	}
+
+	/* Decode the header & Handle the req, resp, indication message */
+	decode_qmi_header(recv_msg, &cntl_flag, &txn_id, &msg_id, &msg_len);
+
+	qmi_log(handle, cntl_flag, txn_id, msg_id, msg_len);
+	switch (cntl_flag) {
+	case QMI_REQUEST_CONTROL_FLAG:
+		rc = handle_qmi_request(handle, recv_msg, txn_id, msg_id,
+					msg_len, &src_addr, sizeof(src_addr));
+		break;
+
+	case QMI_RESPONSE_CONTROL_FLAG:
+		rc = handle_qmi_response(handle, recv_msg,
+					 txn_id, msg_id, msg_len);
+		break;
+
+	case QMI_INDICATION_CONTROL_FLAG:
+		rc = handle_qmi_indication(handle, recv_msg, msg_id, msg_len);
+		break;
+
+	default:
+		rc = -EFAULT;
+		pr_err("%s: Unsupported message type %d\n",
+			__func__, cntl_flag);
+		break;
+	}
+	kfree(recv_msg);
+	mutex_unlock(&handle->handle_lock);
+	return rc;
+}
+EXPORT_SYMBOL(qmi_recv_msg);
+
+int qmi_connect_to_service(struct qmi_handle *handle,
+			   uint32_t service_id,
+			   uint32_t service_vers,
+			   uint32_t service_ins)
+{
+	struct msm_ipc_port_name svc_name;
+	struct msm_ipc_server_info svc_info;
+	struct msm_ipc_addr *svc_dest_addr;
+	int rc;
+	uint32_t instance_id;
+
+	if (!handle)
+		return -EINVAL;
+
+	svc_dest_addr = kzalloc(sizeof(struct msm_ipc_addr),
+				GFP_KERNEL);
+	if (!svc_dest_addr) {
+		pr_err("%s: Failure allocating memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	instance_id = BUILD_INSTANCE_ID(service_vers, service_ins);
+	svc_name.service = service_id;
+	svc_name.instance = instance_id;
+
+	rc = msm_ipc_router_lookup_server_name(&svc_name, &svc_info,
+						1, LOOKUP_MASK);
+	if (rc <= 0) {
+		pr_err("%s: Server %08x:%08x not found\n",
+			__func__, service_id, instance_id);
+		return -ENODEV;
+	}
+	svc_dest_addr->addrtype = MSM_IPC_ADDR_ID;
+	svc_dest_addr->addr.port_addr.node_id = svc_info.node_id;
+	svc_dest_addr->addr.port_addr.port_id = svc_info.port_id;
+	mutex_lock(&handle->handle_lock);
+	if (handle->handle_reset) {
+		mutex_unlock(&handle->handle_lock);
+		return -ENETRESET;
+	}
+	handle->dest_info = svc_dest_addr;
+	handle->dest_service_id = service_id;
+	mutex_unlock(&handle->handle_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(qmi_connect_to_service);
+
+/**
+ * svc_event_add_svc_addr() - Add a specific service address to the list
+ * @event_nb:	Reference to the service event structure.
+ * @node_id:	Node id of the service address.
+ * @port_id:	Port id of the service address.
+ *
+ * Return: 0 on success, standard error code otheriwse.
+ *
+ * This function should be called with svc_addr_list_lock locked.
+ */
+static int svc_event_add_svc_addr(struct svc_event_nb *event_nb,
+				uint32_t node_id, uint32_t port_id)
+{
+
+	struct svc_addr *addr;
+
+	if (!event_nb)
+		return -EINVAL;
+	addr = kmalloc(sizeof(*addr), GFP_KERNEL);
+	if (!addr) {
+		pr_err("%s: Memory allocation failed for address list\n",
+			__func__);
+		return -ENOMEM;
+	}
+	addr->port_addr.node_id = node_id;
+	addr->port_addr.port_id = port_id;
+	list_add_tail(&addr->list_node, &event_nb->svc_addr_list);
+	return 0;
+}
+
+/**
+ * qmi_notify_svc_event_arrive() - Notify the clients about service arrival
+ * @service:	Service id for the specific service.
+ * @instance:	Instance id for the specific service.
+ * @node_id:	Node id of the processor where the service is hosted.
+ * @port_id:	Port id of the service port created by IPC Router.
+ *
+ * Return:	0 on Success or standard error code.
+ */
+static int qmi_notify_svc_event_arrive(uint32_t service,
+					uint32_t instance,
+					uint32_t node_id,
+					uint32_t port_id)
+{
+	struct svc_event_nb *temp;
+	unsigned long flags;
+	struct svc_addr *addr;
+	bool already_notified = false;
+
+	mutex_lock(&svc_event_nb_list_lock);
+	temp = find_svc_event_nb(service, instance);
+	if (!temp) {
+		mutex_unlock(&svc_event_nb_list_lock);
+		return -EINVAL;
+	}
+	mutex_unlock(&svc_event_nb_list_lock);
+
+	mutex_lock(&temp->svc_addr_list_lock);
+	list_for_each_entry(addr, &temp->svc_addr_list, list_node)
+		if (addr->port_addr.node_id == node_id &&
+			addr->port_addr.port_id == port_id)
+				already_notified = true;
+	if (!already_notified) {
+		/*
+		 * Notify only if the clients are not notified about the
+		 * service during registration.
+		 */
+		svc_event_add_svc_addr(temp, node_id, port_id);
+		spin_lock_irqsave(&temp->nb_lock, flags);
+		raw_notifier_call_chain(&temp->svc_event_rcvr_list,
+				QMI_SERVER_ARRIVE, NULL);
+		spin_unlock_irqrestore(&temp->nb_lock, flags);
+	}
+	mutex_unlock(&temp->svc_addr_list_lock);
+
+	return 0;
+}
+
+/**
+ * qmi_notify_svc_event_exit() - Notify the clients about service exit
+ * @service:	Service id for the specific service.
+ * @instance:	Instance id for the specific service.
+ * @node_id:	Node id of the processor where the service is hosted.
+ * @port_id:	Port id of the service port created by IPC Router.
+ *
+ * Return:	0 on Success or standard error code.
+ */
+static int qmi_notify_svc_event_exit(uint32_t service,
+					uint32_t instance,
+					uint32_t node_id,
+					uint32_t port_id)
+{
+	struct svc_event_nb *temp;
+	unsigned long flags;
+	struct svc_addr *addr;
+	struct svc_addr *temp_addr;
+
+	mutex_lock(&svc_event_nb_list_lock);
+	temp = find_svc_event_nb(service, instance);
+	if (!temp) {
+		mutex_unlock(&svc_event_nb_list_lock);
+		return -EINVAL;
+	}
+	mutex_unlock(&svc_event_nb_list_lock);
+
+	mutex_lock(&temp->svc_addr_list_lock);
+	list_for_each_entry_safe(addr, temp_addr, &temp->svc_addr_list,
+					list_node) {
+		if (addr->port_addr.node_id == node_id &&
+			addr->port_addr.port_id == port_id) {
+			/*
+			 * Notify only if an already notified service has
+			 * gone down.
+			 */
+			spin_lock_irqsave(&temp->nb_lock, flags);
+			raw_notifier_call_chain(&temp->svc_event_rcvr_list,
+						QMI_SERVER_EXIT, NULL);
+			spin_unlock_irqrestore(&temp->nb_lock, flags);
+			list_del(&addr->list_node);
+			kfree(addr);
+		}
+	}
+
+	mutex_unlock(&temp->svc_addr_list_lock);
+
+	return 0;
+}
+
+static struct svc_event_nb *find_svc_event_nb(uint32_t service_id,
+					      uint32_t instance_id)
+{
+	struct svc_event_nb *temp;
+
+	list_for_each_entry(temp, &svc_event_nb_list, list) {
+		if (temp->service_id == service_id &&
+		    temp->instance_id == instance_id)
+			return temp;
+	}
+	return NULL;
+}
+
+/**
+ * find_and_add_svc_event_nb() - Find/Add a notifier block for specific service
+ * @service_id:	Service Id of the service
+ * @instance_id:Instance Id of the service
+ *
+ * Return:	Pointer to svc_event_nb structure for the specified service
+ *
+ * This function should only be called after acquiring svc_event_nb_list_lock.
+ */
+static struct svc_event_nb *find_and_add_svc_event_nb(uint32_t service_id,
+						      uint32_t instance_id)
+{
+	struct svc_event_nb *temp;
+
+	temp = find_svc_event_nb(service_id, instance_id);
+	if (temp)
+		return temp;
+
+	temp = kzalloc(sizeof(struct svc_event_nb), GFP_KERNEL);
+	if (!temp) {
+		pr_err("%s: Failed to alloc notifier block\n", __func__);
+		return temp;
+	}
+
+	spin_lock_init(&temp->nb_lock);
+	temp->service_id = service_id;
+	temp->instance_id = instance_id;
+	INIT_LIST_HEAD(&temp->list);
+	INIT_LIST_HEAD(&temp->svc_addr_list);
+	RAW_INIT_NOTIFIER_HEAD(&temp->svc_event_rcvr_list);
+	mutex_init(&temp->svc_addr_list_lock);
+	list_add_tail(&temp->list, &svc_event_nb_list);
+
+	return temp;
+}
+
+int qmi_svc_event_notifier_register(uint32_t service_id,
+				    uint32_t service_vers,
+				    uint32_t service_ins,
+				    struct notifier_block *nb)
+{
+	struct svc_event_nb *temp;
+	unsigned long flags;
+	int ret;
+	int i;
+	int num_servers;
+	uint32_t instance_id;
+	struct msm_ipc_port_name svc_name;
+	struct msm_ipc_server_info *svc_info_arr = NULL;
+
+	mutex_lock(&qmi_svc_event_notifier_lock);
+	if (!qmi_svc_event_notifier_port && !qmi_svc_event_notifier_wq)
+		qmi_svc_event_notifier_init();
+	mutex_unlock(&qmi_svc_event_notifier_lock);
+
+	instance_id = BUILD_INSTANCE_ID(service_vers, service_ins);
+	mutex_lock(&svc_event_nb_list_lock);
+	temp = find_and_add_svc_event_nb(service_id, instance_id);
+	if (!temp) {
+		mutex_unlock(&svc_event_nb_list_lock);
+		return -EFAULT;
+	}
+	mutex_unlock(&svc_event_nb_list_lock);
+
+	mutex_lock(&temp->svc_addr_list_lock);
+	spin_lock_irqsave(&temp->nb_lock, flags);
+	ret = raw_notifier_chain_register(&temp->svc_event_rcvr_list, nb);
+	spin_unlock_irqrestore(&temp->nb_lock, flags);
+	if (!list_empty(&temp->svc_addr_list)) {
+		/* Notify this client only if Some services already exist. */
+		spin_lock_irqsave(&temp->nb_lock, flags);
+		nb->notifier_call(nb, QMI_SERVER_ARRIVE, NULL);
+		spin_unlock_irqrestore(&temp->nb_lock, flags);
+	} else {
+		/*
+		 * Check if we have missed a new server event that happened
+		 * earlier.
+		 */
+		svc_name.service = service_id;
+		svc_name.instance = instance_id;
+		num_servers = msm_ipc_router_lookup_server_name(&svc_name,
+								NULL,
+								0, LOOKUP_MASK);
+		if (num_servers > 0) {
+			svc_info_arr = kmalloc_array(num_servers,
+						sizeof(*svc_info_arr),
+						GFP_KERNEL);
+			if (!svc_info_arr)
+				return -ENOMEM;
+			num_servers = msm_ipc_router_lookup_server_name(
+								&svc_name,
+								svc_info_arr,
+								num_servers,
+								LOOKUP_MASK);
+			for (i = 0; i < num_servers; i++)
+				svc_event_add_svc_addr(temp,
+						svc_info_arr[i].node_id,
+						svc_info_arr[i].port_id);
+			kfree(svc_info_arr);
+
+			spin_lock_irqsave(&temp->nb_lock, flags);
+			raw_notifier_call_chain(&temp->svc_event_rcvr_list,
+						QMI_SERVER_ARRIVE, NULL);
+			spin_unlock_irqrestore(&temp->nb_lock, flags);
+		}
+	}
+	mutex_unlock(&temp->svc_addr_list_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(qmi_svc_event_notifier_register);
+
+int qmi_svc_event_notifier_unregister(uint32_t service_id,
+				      uint32_t service_vers,
+				      uint32_t service_ins,
+				      struct notifier_block *nb)
+{
+	int ret;
+	struct svc_event_nb *temp;
+	unsigned long flags;
+	uint32_t instance_id;
+
+	instance_id = BUILD_INSTANCE_ID(service_vers, service_ins);
+	mutex_lock(&svc_event_nb_list_lock);
+	temp = find_svc_event_nb(service_id, instance_id);
+	if (!temp) {
+		mutex_unlock(&svc_event_nb_list_lock);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&temp->nb_lock, flags);
+	ret = raw_notifier_chain_unregister(&temp->svc_event_rcvr_list, nb);
+	spin_unlock_irqrestore(&temp->nb_lock, flags);
+	mutex_unlock(&svc_event_nb_list_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(qmi_svc_event_notifier_unregister);
+
+/**
+ * qmi_svc_event_worker() - Read control messages over service event port
+ * @work:	Reference to the work structure queued.
+ *
+ */
+static void qmi_svc_event_worker(struct work_struct *work)
+{
+	union rr_control_msg *ctl_msg = NULL;
+	unsigned int ctl_msg_len;
+	struct msm_ipc_addr src_addr;
+	int ret;
+
+	while (1) {
+		ret = msm_ipc_router_read_msg(qmi_svc_event_notifier_port,
+			&src_addr, (unsigned char **)&ctl_msg, &ctl_msg_len);
+		if (ret == -ENOMSG)
+			break;
+		if (ret < 0) {
+			pr_err("%s:Error receiving control message\n",
+					__func__);
+			break;
+		}
+		if (ctl_msg->cmd == IPC_ROUTER_CTRL_CMD_NEW_SERVER)
+			qmi_notify_svc_event_arrive(ctl_msg->srv.service,
+							ctl_msg->srv.instance,
+							ctl_msg->srv.node_id,
+							ctl_msg->srv.port_id);
+		else if (ctl_msg->cmd == IPC_ROUTER_CTRL_CMD_REMOVE_SERVER)
+			qmi_notify_svc_event_exit(ctl_msg->srv.service,
+							ctl_msg->srv.instance,
+							ctl_msg->srv.node_id,
+							ctl_msg->srv.port_id);
+		kfree(ctl_msg);
+	}
+}
+
+/**
+ * qmi_svc_event_notify() - Callback for any service event posted on the control port
+ * @event:	The event posted on the control port.
+ * @data:	Any out-of-band data associated with event.
+ * @odata_len:	Length of the out-of-band data, if any.
+ * @priv:	Private Data.
+ *
+ * This function is called by the underlying transport to notify the QMI
+ * interface regarding any incoming service related events. It is registered
+ * during service event control port creation.
+ */
+static void qmi_svc_event_notify(unsigned event, void *data,
+				size_t odata_len, void *priv)
+{
+	if (event == IPC_ROUTER_CTRL_CMD_NEW_SERVER
+		|| event == IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT
+		|| event == IPC_ROUTER_CTRL_CMD_REMOVE_SERVER)
+		queue_work(qmi_svc_event_notifier_wq, &qmi_svc_event_work);
+}
+
+/**
+ * qmi_svc_event_notifier_init() - Create a control port to get service events
+ *
+ * This function is called during first service notifier registration. It
+ * creates a control port to get notification about server events so that
+ * respective clients can be notified about the events.
+ */
+static void qmi_svc_event_notifier_init(void)
+{
+	qmi_svc_event_notifier_wq = create_singlethread_workqueue(
+					"qmi_svc_event_wq");
+	if (!qmi_svc_event_notifier_wq) {
+		pr_err("%s: ctrl workqueue allocation failed\n", __func__);
+		return;
+	}
+	qmi_svc_event_notifier_port = msm_ipc_router_create_port(
+				qmi_svc_event_notify, NULL);
+	if (!qmi_svc_event_notifier_port) {
+		destroy_workqueue(qmi_svc_event_notifier_wq);
+		pr_err("%s: IPC Router Port creation failed\n", __func__);
+		return;
+	}
+	msm_ipc_router_bind_control_port(qmi_svc_event_notifier_port);
+
+	return;
+}
+
+/**
+ * qmi_log_init() - Init function for IPC Logging
+ *
+ * Initialize log contexts for QMI request/response/indications.
+ */
+void qmi_log_init(void)
+{
+	qmi_req_resp_log_ctx =
+		ipc_log_context_create(QMI_REQ_RESP_LOG_PAGES,
+			"kqmi_req_resp", 0);
+	if (!qmi_req_resp_log_ctx)
+		pr_err("%s: Unable to create QMI IPC logging for Req/Resp",
+			__func__);
+	qmi_ind_log_ctx =
+		ipc_log_context_create(QMI_IND_LOG_PAGES, "kqmi_ind", 0);
+	if (!qmi_ind_log_ctx)
+		pr_err("%s: Unable to create QMI IPC %s",
+				"logging for Indications", __func__);
+}
+
+/**
+ * qmi_svc_register() - Register a QMI service with a QMI handle
+ * @handle: QMI handle on which the service has to be registered.
+ * @ops_options: Service specific operations and options.
+ *
+ * @return: 0 if successfully registered, < 0 on error.
+ */
+int qmi_svc_register(struct qmi_handle *handle, void *ops_options)
+{
+	struct qmi_svc_ops_options *svc_ops_options;
+	struct msm_ipc_addr svc_name;
+	int rc;
+	uint32_t instance_id;
+
+	svc_ops_options = (struct qmi_svc_ops_options *)ops_options;
+	if (!handle || !svc_ops_options)
+		return -EINVAL;
+
+	/* Check if the required elements of opts_options are filled */
+	if (!svc_ops_options->service_id || !svc_ops_options->service_vers ||
+	    !svc_ops_options->connect_cb || !svc_ops_options->disconnect_cb ||
+	    !svc_ops_options->req_desc_cb || !svc_ops_options->req_cb)
+		return -EINVAL;
+
+	mutex_lock(&handle->handle_lock);
+	/* Check if another service/client is registered in that handle */
+	if (handle->handle_type == QMI_SERVICE_HANDLE || handle->dest_info) {
+		mutex_unlock(&handle->handle_lock);
+		return -EBUSY;
+	}
+	INIT_LIST_HEAD(&handle->conn_list);
+	mutex_unlock(&handle->handle_lock);
+
+	/*
+	 * Unlocked the handle_lock, because NEW_SERVER message will end up
+	 * in this handle's control port, which requires holding the same
+	 * mutex. Also it is safe to call register_server unlocked.
+	 */
+	/* Register the service */
+	instance_id = ((svc_ops_options->service_vers & 0xFF) |
+		       ((svc_ops_options->service_ins & 0xFF) << 8));
+	svc_name.addrtype = MSM_IPC_ADDR_NAME;
+	svc_name.addr.port_name.service = svc_ops_options->service_id;
+	svc_name.addr.port_name.instance = instance_id;
+	rc = msm_ipc_router_register_server(
+		(struct msm_ipc_port *)handle->src_port, &svc_name);
+	if (rc < 0) {
+		pr_err("%s: Error %d registering QMI service %08x:%08x\n",
+			__func__, rc, svc_ops_options->service_id,
+			instance_id);
+		return rc;
+	}
+	mutex_lock(&handle->handle_lock);
+	handle->svc_ops_options = svc_ops_options;
+	handle->handle_type = QMI_SERVICE_HANDLE;
+	mutex_unlock(&handle->handle_lock);
+	return rc;
+}
+EXPORT_SYMBOL(qmi_svc_register);
+
+
+/**
+ * qmi_svc_unregister() - Unregister the service from a QMI handle
+ * @handle: QMI handle from which the service has to be unregistered.
+ *
+ * return: 0 on success, < 0 on error.
+ */
+int qmi_svc_unregister(struct qmi_handle *handle)
+{
+	struct qmi_svc_clnt_conn *conn_h, *temp_conn_h;
+
+	if (!handle || handle->handle_type != QMI_SERVICE_HANDLE)
+		return -EINVAL;
+
+	mutex_lock(&handle->handle_lock);
+	handle->handle_type = QMI_CLIENT_HANDLE;
+	mutex_unlock(&handle->handle_lock);
+	/*
+	 * Unlocked the handle_lock, because REMOVE_SERVER message will end up
+	 * in this handle's control port, which requires holding the same
+	 * mutex. Also it is safe to call register_server unlocked.
+	 */
+	msm_ipc_router_unregister_server(
+		(struct msm_ipc_port *)handle->src_port);
+
+	mutex_lock(&handle->handle_lock);
+	list_for_each_entry_safe(conn_h, temp_conn_h,
+				 &handle->conn_list, list)
+		rmv_svc_clnt_conn(conn_h);
+	mutex_unlock(&handle->handle_lock);
+	return 0;
+}
+EXPORT_SYMBOL(qmi_svc_unregister);
+
+static int __init qmi_interface_init(void)
+{
+	qmi_log_init();
+	return 0;
+}
+module_init(qmi_interface_init);
+
+MODULE_DESCRIPTION("MSM QMI Interface");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/qmi_interface_priv.h	2019-01-22 16:16:26.671275095 +0100
@@ -0,0 +1,123 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QMI_INTERFACE_PRIV_H_
+#define _QMI_INTERFACE_PRIV_H_
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/socket.h>
+#include <linux/gfp.h>
+#include <linux/platform_device.h>
+#include <linux/qmi_encdec.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+
+enum txn_type {
+	QMI_SYNC_TXN = 1,
+	QMI_ASYNC_TXN,
+};
+
+/**
+ * handle_type - Enum to identify QMI handle type
+ */
+enum handle_type {
+	QMI_CLIENT_HANDLE = 1,
+	QMI_SERVICE_HANDLE,
+};
+
+struct qmi_txn {
+	struct list_head list;
+	uint16_t txn_id;
+	enum txn_type type;
+	struct qmi_handle *handle;
+	void *enc_data;
+	unsigned int enc_data_len;
+	struct msg_desc *resp_desc;
+	void *resp;
+	unsigned int resp_len;
+	int resp_received;
+	int send_stat;
+	void (*resp_cb)(struct qmi_handle *handle, unsigned int msg_id,
+			void *msg, void *resp_cb_data, int stat);
+	void *resp_cb_data;
+	wait_queue_head_t wait_q;
+};
+
+/**
+ * svc_addr - Data structure to maintain a list of service addresses.
+ * @list_node: Service address list node used by "svc_addr_list"
+ * @port_addr: Service address in <node_id:port_id>.
+ */
+struct svc_addr {
+	struct list_head list_node;
+	struct msm_ipc_port_addr port_addr;
+};
+
+/**
+ * svc_event_nb - Service event notification structure.
+ * @nb_lock: Spinlock for the notifier block lists.
+ * @service_id: Service id for which list of notifier blocks are maintained.
+ * @instance_id: Instance id for which list of notifier blocks are maintained.
+ * @svc_event_rcvr_list: List of notifier blocks which clients have registered.
+ * @list: Used to chain this structure in a global list.
+ * @svc_addr_list_lock: Lock to protect @svc_addr_list.
+ * @svc_addr_list: List for mantaining all the address for a specific
+ *			<service_id:instance_id>.
+ */
+struct svc_event_nb {
+	spinlock_t nb_lock;
+	uint32_t service_id;
+	uint32_t instance_id;
+	struct raw_notifier_head svc_event_rcvr_list;
+	struct list_head list;
+	struct mutex svc_addr_list_lock;
+	struct list_head svc_addr_list;
+};
+
+/**
+ * req_handle - Data structure to store request information
+ * @list: Points to req_handle_list maintained per connection.
+ * @conn_h: Connection handle on which the concerned request is received.
+ * @msg_id: Message ID of the request.
+ * @txn_id: Transaction ID of the request.
+ */
+struct req_handle {
+	struct list_head list;
+	struct qmi_svc_clnt_conn *conn_h;
+	uint16_t msg_id;
+	uint16_t txn_id;
+};
+
+/**
+ * qmi_svc_clnt_conn - Data structure to identify client service connection
+ * @list: List to chain up the client conncection to the connection list.
+ * @svc_handle: Service side information of the connection.
+ * @clnt_addr: Client side information of the connection.
+ * @clnt_addr_len: Length of the client address.
+ * @req_handle_list: Pending requests in this connection.
+ * @pending_tx_list: Pending response/indications awaiting flow control.
+ */
+struct qmi_svc_clnt_conn {
+	struct list_head list;
+	void *svc_handle;
+	void *clnt_addr;
+	size_t clnt_addr_len;
+	struct list_head req_handle_list;
+	struct delayed_work resume_tx_work;
+	struct list_head pending_txn_list;
+	struct mutex pending_txn_lock;
+};
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/qpnp-haptic.c	2019-01-22 16:16:26.671275095 +0100
@@ -0,0 +1,3138 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"haptic: %s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/hrtimer.h>
+#include <linux/of_device.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/qpnp/pwm.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/log2.h>
+#include <linux/qpnp-misc.h>
+#include <linux/qpnp/qpnp-haptic.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include "../../staging/android/timed_output.h"
+
+#define QPNP_HAP_STATUS(b)		(b + 0x0A)
+#define QPNP_HAP_LRA_AUTO_RES_LO(b)	(b + 0x0B)
+#define QPNP_HAP_LRA_AUTO_RES_HI(b)     (b + 0x0C)
+#define QPNP_HAP_EN_CTL_REG(b)		(b + 0x46)
+#define QPNP_HAP_EN_CTL2_REG(b)		(b + 0x48)
+#define QPNP_HAP_AUTO_RES_CTRL(b)	(b + 0x4B)
+#define QPNP_HAP_CFG1_REG(b)		(b + 0x4C)
+#define QPNP_HAP_CFG2_REG(b)		(b + 0x4D)
+#define QPNP_HAP_SEL_REG(b)		(b + 0x4E)
+#define QPNP_HAP_LRA_AUTO_RES_REG(b)	(b + 0x4F)
+#define QPNP_HAP_VMAX_REG(b)		(b + 0x51)
+#define QPNP_HAP_ILIM_REG(b)		(b + 0x52)
+#define QPNP_HAP_SC_DEB_REG(b)		(b + 0x53)
+#define QPNP_HAP_RATE_CFG1_REG(b)	(b + 0x54)
+#define QPNP_HAP_RATE_CFG2_REG(b)	(b + 0x55)
+#define QPNP_HAP_INT_PWM_REG(b)		(b + 0x56)
+#define QPNP_HAP_EXT_PWM_REG(b)		(b + 0x57)
+#define QPNP_HAP_PWM_CAP_REG(b)		(b + 0x58)
+#define QPNP_HAP_SC_CLR_REG(b)		(b + 0x59)
+#define QPNP_HAP_SC_IRQ_STATUS_DELAY   msecs_to_jiffies(1000)
+#define QPNP_HAP_BRAKE_REG(b)		(b + 0x5C)
+#define QPNP_HAP_WAV_REP_REG(b)		(b + 0x5E)
+#define QPNP_HAP_WAV_S_REG_BASE(b)	(b + 0x60)
+#define QPNP_HAP_PLAY_REG(b)		(b + 0x70)
+#define QPNP_HAP_SEC_ACCESS_REG(b)	(b + 0xD0)
+#define QPNP_HAP_TEST2_REG(b)		(b + 0xE3)
+
+#define QPNP_HAP_STATUS_BUSY		0x02
+#define QPNP_HAP_ACT_TYPE_MASK		BIT(0)
+#define QPNP_HAP_LRA			0x0
+#define QPNP_HAP_ERM			0x1
+#define QPNP_HAP_PM660_HW_AUTO_RES_MODE_BIT	BIT(3)
+#define QPNP_HAP_AUTO_RES_MODE_MASK	GENMASK(6, 4)
+#define QPNP_HAP_AUTO_RES_MODE_SHIFT	4
+#define QPNP_HAP_PM660_AUTO_RES_MODE_BIT	BIT(7)
+#define QPNP_HAP_PM660_AUTO_RES_MODE_SHIFT	7
+#define QPNP_HAP_PM660_CALIBRATE_DURATION_MASK	GENMASK(6, 5)
+#define QPNP_HAP_PM660_CALIBRATE_DURATION_SHIFT	5
+#define QPNP_HAP_PM660_QWD_DRIVE_DURATION_BIT	BIT(4)
+#define QPNP_HAP_PM660_QWD_DRIVE_DURATION_SHIFT	4
+#define QPNP_HAP_PM660_CALIBRATE_AT_EOP_BIT	BIT(3)
+#define QPNP_HAP_PM660_CALIBRATE_AT_EOP_SHIFT	3
+#define QPNP_HAP_PM660_LRA_ZXD_CAL_PERIOD_BIT	GENMASK(2, 0)
+#define QPNP_HAP_LRA_HIGH_Z_MASK		GENMASK(3, 2)
+#define QPNP_HAP_LRA_HIGH_Z_SHIFT		2
+#define QPNP_HAP_LRA_RES_CAL_PER_MASK		GENMASK(1, 0)
+#define QPNP_HAP_PM660_LRA_RES_CAL_PER_MASK	GENMASK(2, 0)
+#define QPNP_HAP_RES_CAL_PERIOD_MIN		4
+#define QPNP_HAP_RES_CAL_PERIOD_MAX		32
+#define QPNP_HAP_PM660_RES_CAL_PERIOD_MAX	256
+#define QPNP_HAP_WF_SOURCE_MASK		GENMASK(5, 4)
+#define QPNP_HAP_WF_SOURCE_SHIFT	4
+#define QPNP_HAP_VMAX_OVD_BIT		BIT(6)
+#define QPNP_HAP_VMAX_MASK		GENMASK(5, 1)
+#define QPNP_HAP_VMAX_SHIFT		1
+#define QPNP_HAP_VMAX_MIN_MV		116
+#define QPNP_HAP_VMAX_MAX_MV		3596
+#define QPNP_HAP_ILIM_MASK		BIT(0)
+#define QPNP_HAP_ILIM_MIN_MV		400
+#define QPNP_HAP_ILIM_MAX_MV		800
+#define QPNP_HAP_SC_DEB_MASK		GENMASK(2, 0)
+#define QPNP_HAP_SC_DEB_CYCLES_MIN	0
+#define QPNP_HAP_DEF_SC_DEB_CYCLES	8
+#define QPNP_HAP_SC_DEB_CYCLES_MAX	32
+#define QPNP_HAP_SC_CLR			1
+#define QPNP_HAP_INT_PWM_MASK		GENMASK(1, 0)
+#define QPNP_HAP_INT_PWM_FREQ_253_KHZ	253
+#define QPNP_HAP_INT_PWM_FREQ_505_KHZ	505
+#define QPNP_HAP_INT_PWM_FREQ_739_KHZ	739
+#define QPNP_HAP_INT_PWM_FREQ_1076_KHZ	1076
+#define QPNP_HAP_WAV_SHAPE_MASK		BIT(0)
+#define QPNP_HAP_RATE_CFG1_MASK		0xFF
+#define QPNP_HAP_RATE_CFG2_MASK		0xF0
+#define QPNP_HAP_RATE_CFG2_SHFT		8
+#define QPNP_HAP_RATE_CFG_STEP_US	5
+#define QPNP_HAP_WAV_PLAY_RATE_US_MIN	0
+#define QPNP_HAP_DEF_WAVE_PLAY_RATE_US	5715
+#define QPNP_HAP_WAV_PLAY_RATE_US_MAX	20475
+#define QPNP_HAP_WAV_REP_MASK		GENMASK(6, 4)
+#define QPNP_HAP_WAV_S_REP_MASK		GENMASK(1, 0)
+#define QPNP_HAP_WAV_REP_SHIFT		4
+#define QPNP_HAP_WAV_REP_MIN		1
+#define QPNP_HAP_WAV_REP_MAX		128
+#define QPNP_HAP_WAV_S_REP_MIN		1
+#define QPNP_HAP_WAV_S_REP_MAX		8
+#define QPNP_HAP_WF_AMP_MASK		GENMASK(5, 1)
+#define QPNP_HAP_WF_OVD_BIT		BIT(6)
+#define QPNP_HAP_BRAKE_PAT_MASK		0x3
+#define QPNP_HAP_ILIM_MIN_MA		400
+#define QPNP_HAP_ILIM_MAX_MA		800
+#define QPNP_HAP_EXT_PWM_MASK		GENMASK(1, 0)
+#define QPNP_HAP_EXT_PWM_FREQ_25_KHZ	25
+#define QPNP_HAP_EXT_PWM_FREQ_50_KHZ	50
+#define QPNP_HAP_EXT_PWM_FREQ_75_KHZ	75
+#define QPNP_HAP_EXT_PWM_FREQ_100_KHZ	100
+#define PWM_MAX_DTEST_LINES		4
+#define QPNP_HAP_EXT_PWM_DTEST_MASK	GENMASK(6, 4)
+#define QPNP_HAP_EXT_PWM_DTEST_SHFT	4
+#define QPNP_HAP_EXT_PWM_PEAK_DATA	0x7F
+#define QPNP_HAP_EXT_PWM_HALF_DUTY	50
+#define QPNP_HAP_EXT_PWM_FULL_DUTY	100
+#define QPNP_HAP_EXT_PWM_DATA_FACTOR	39
+#define QPNP_HAP_WAV_SINE		0
+#define QPNP_HAP_WAV_SQUARE		1
+#define QPNP_HAP_WAV_SAMP_LEN		8
+#define QPNP_HAP_WAV_SAMP_MAX		0x3E
+#define QPNP_HAP_BRAKE_PAT_LEN		4
+#define QPNP_HAP_PLAY_EN_BIT		BIT(7)
+#define QPNP_HAP_EN_BIT			BIT(7)
+#define QPNP_HAP_BRAKE_MASK		BIT(0)
+#define QPNP_HAP_AUTO_RES_MASK		BIT(7)
+#define AUTO_RES_ENABLE			BIT(7)
+#define AUTO_RES_ERR_BIT		0x10
+#define SC_FOUND_BIT			0x08
+#define SC_MAX_COUNT			5
+
+#define QPNP_HAP_TIMEOUT_MS_MAX		15000
+#define QPNP_HAP_STR_SIZE		20
+#define QPNP_HAP_MAX_RETRIES		5
+#define QPNP_TEST_TIMER_MS		5
+
+#define QPNP_HAP_TIME_REQ_FOR_BACK_EMF_GEN	20000
+#define POLL_TIME_AUTO_RES_ERR_NS	(20 * NSEC_PER_MSEC)
+
+#define MAX_POSITIVE_VARIATION_LRA_FREQ 30
+#define MAX_NEGATIVE_VARIATION_LRA_FREQ -30
+#define FREQ_VARIATION_STEP		5
+#define AUTO_RES_ERROR_CAPTURE_RES	5
+#define AUTO_RES_ERROR_MAX		30
+#define ADJUSTED_LRA_PLAY_RATE_CODE_ARRSIZE \
+	((MAX_POSITIVE_VARIATION_LRA_FREQ - MAX_NEGATIVE_VARIATION_LRA_FREQ) \
+	 / FREQ_VARIATION_STEP)
+#define LRA_DRIVE_PERIOD_POS_ERR(hap, rc_clk_err_percent) \
+	(hap->init_drive_period_code = (hap->init_drive_period_code * \
+		(1000 + rc_clk_err_percent_x10)) / 1000)
+#define LRA_DRIVE_PERIOD_NEG_ERR(hap, rc_clk_err_percent) \
+	(hap->init_drive_period_code = (hap->init_drive_period_code * \
+		(1000 - rc_clk_err_percent_x10)) / 1000)
+
+u32 adjusted_lra_play_rate_code[ADJUSTED_LRA_PLAY_RATE_CODE_ARRSIZE];
+
+/* haptic debug register set */
+static u8 qpnp_hap_dbg_regs[] = {
+	0x0a, 0x0b, 0x0c, 0x46, 0x48, 0x4c, 0x4d, 0x4e, 0x4f, 0x51, 0x52, 0x53,
+	0x54, 0x55, 0x56, 0x57, 0x58, 0x5c, 0x5e, 0x60, 0x61, 0x62, 0x63, 0x64,
+	0x65, 0x66, 0x67, 0x70, 0xE3,
+};
+
+/* ramp up/down test sequence */
+static u8 qpnp_hap_ramp_test_data[] = {
+	0x0, 0x19, 0x32, 0x4C, 0x65, 0x7F, 0x65, 0x4C, 0x32, 0x19,
+	0x0, 0x99, 0xB2, 0xCC, 0xE5, 0xFF, 0xE5, 0xCC, 0xB2, 0x99,
+	0x0, 0x19, 0x32, 0x4C, 0x65, 0x7F, 0x65, 0x4C, 0x32, 0x19,
+	0x0, 0x99, 0xB2, 0xCC, 0xE5, 0xFF, 0xE5, 0xCC, 0xB2, 0x99,
+	0x0, 0x19, 0x32, 0x4C, 0x65, 0x7F, 0x65, 0x4C, 0x32, 0x19,
+	0x0, 0x99, 0xB2, 0xCC, 0xE5, 0xFF, 0xE5, 0xCC, 0xB2, 0x99,
+	0x0, 0x19, 0x32, 0x4C, 0x65, 0x7F, 0x65, 0x4C, 0x32, 0x19,
+	0x0, 0x99, 0xB2, 0xCC, 0xE5, 0xFF, 0xE5, 0xCC, 0xB2, 0x99,
+	0x0, 0x19, 0x32, 0x4C, 0x65, 0x7F, 0x65, 0x4C, 0x32, 0x19,
+	0x0, 0x99, 0xB2, 0xCC, 0xE5, 0xFF, 0xE5, 0xCC, 0xB2, 0x99,
+	0x0, 0x19, 0x32, 0x4C, 0x65, 0x7F, 0x65, 0x4C, 0x32, 0x19,
+	0x0, 0x99, 0xB2, 0xCC, 0xE5, 0xFF, 0xE5, 0xCC, 0xB2, 0x99,
+	0x0, 0x19, 0x32, 0x4C, 0x65, 0x7F, 0x65, 0x4C, 0x32, 0x19,
+	0x0, 0x99, 0xB2, 0xCC, 0xE5, 0xFF, 0xE5, 0xCC, 0xB2, 0x99,
+};
+
+/* alternate max and min sequence */
+static u8 qpnp_hap_min_max_test_data[] = {
+	0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF,
+	0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF,
+	0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF,
+	0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF,
+	0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF,
+	0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF,
+	0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF,
+	0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF,
+	0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF,
+	0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF,
+	0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF,
+	0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF,
+};
+
+/*
+ * auto resonance mode
+ * ZXD - Zero Cross Detect
+ * QWD - Quarter Wave Drive
+ * ZXD_EOP - ZXD with End Of Pattern
+ */
+enum qpnp_hap_auto_res_mode {
+	QPNP_HAP_AUTO_RES_NONE,
+	QPNP_HAP_AUTO_RES_ZXD,
+	QPNP_HAP_AUTO_RES_QWD,
+	QPNP_HAP_AUTO_RES_MAX_QWD,
+	QPNP_HAP_AUTO_RES_ZXD_EOP,
+};
+
+enum qpnp_hap_pm660_auto_res_mode {
+	QPNP_HAP_PM660_AUTO_RES_ZXD,
+	QPNP_HAP_PM660_AUTO_RES_QWD,
+};
+
+/* high Z option lines */
+enum qpnp_hap_high_z {
+	QPNP_HAP_LRA_HIGH_Z_NONE, /* opt0 for PM660 */
+	QPNP_HAP_LRA_HIGH_Z_OPT1,
+	QPNP_HAP_LRA_HIGH_Z_OPT2,
+	QPNP_HAP_LRA_HIGH_Z_OPT3,
+};
+
+/* play modes */
+enum qpnp_hap_mode {
+	QPNP_HAP_DIRECT,
+	QPNP_HAP_BUFFER,
+	QPNP_HAP_AUDIO,
+	QPNP_HAP_PWM,
+};
+
+/* status flags */
+enum qpnp_hap_status {
+	AUTO_RESONANCE_ENABLED = BIT(0),
+};
+
+/* pwm channel info */
+struct qpnp_pwm_info {
+	struct pwm_device *pwm_dev;
+	u32 pwm_channel;
+	u32 duty_us;
+	u32 period_us;
+};
+
+/*
+ *  qpnp_hap_lra_ares_cfg - Haptic auto_resonance configuration
+ *  @ lra_qwd_drive_duration - LRA QWD drive duration
+ *  @ calibrate_at_eop - Calibrate at EOP
+ *  @ lra_res_cal_period - LRA resonance calibration period
+ *  @ auto_res_mode - auto resonace mode
+ *  @ lra_high_z - high z option line
+ */
+struct qpnp_hap_lra_ares_cfg {
+	int				lra_qwd_drive_duration;
+	int				calibrate_at_eop;
+	enum qpnp_hap_high_z		lra_high_z;
+	u16				lra_res_cal_period;
+	u8				auto_res_mode;
+};
+
+/*
+ *  qpnp_hap - Haptic data structure
+ *  @ spmi - spmi device
+ *  @ hap_timer - hrtimer
+ *  @ auto_res_err_poll_timer - hrtimer for auto-resonance error
+ *  @ timed_dev - timed output device
+ *  @ work - worker
+ *  @ sc_work - worker to handle short circuit condition
+ *  @ pwm_info - pwm info
+ *  @ ares_cfg - auto resonance configuration
+ *  @ lock - mutex lock
+ *  @ wf_lock - mutex lock for waveform
+ *  @ init_drive_period_code - the initial lra drive period code
+ *  @ drive_period_code_max_limit_percent_variation - maximum limit of
+      percentage variation of drive period code
+ *  @ drive_period_code_min_limit_percent_variation - minimum limit og
+      percentage variation of drive period code
+ *  @ drive_period_code_max_limit - calculated drive period code with
+      percentage variation on the higher side.
+ *  @ drive_period_code_min_limit - calculated drive period code with
+      percentage variation on the lower side
+ *  @ play_mode - play mode
+ *  @ timeout_ms - max timeout in ms
+ *  @ time_required_to_generate_back_emf_us - the time required for sufficient
+      back-emf to be generated for auto resonance to be successful
+ *  @ vmax_mv - max voltage in mv
+ *  @ ilim_ma - limiting current in ma
+ *  @ sc_deb_cycles - short circuit debounce cycles
+ *  @ int_pwm_freq_khz - internal pwm frequency in khz
+ *  @ wave_play_rate_us - play rate for waveform
+ *  @ play_time_ms - play time set by the user
+ *  @ ext_pwm_freq_khz - external pwm frequency in khz
+ *  @ wave_rep_cnt - waveform repeat count
+ *  @ wave_s_rep_cnt - waveform sample repeat count
+ *  @ play_irq - irq for play
+ *  @ sc_irq - irq for short circuit
+ *  @ status_flags - status
+ *  @ base - base address
+ *  @ act_type - actuator type
+ *  @ wave_shape - waveform shape
+ *  @ wave_samp - array of wave samples
+ *  @ shadow_wave_samp - shadow array of wave samples
+ *  @ brake_pat - pattern for active breaking
+ *  @ sc_count - counter to determine the duration of short circuit condition
+ *  @ lra_hw_auto_resonance - enable hardware auto resonance
+ *  @ state - current state of haptics
+ *  @ module_en - haptics module enable status
+ *  @ wf_update - waveform update flag
+ *  @ pwm_cfg_state - pwm mode configuration state
+ *  @ en_brake - brake state
+ *  @ sup_brake_pat - support custom brake pattern
+ *  @ correct_lra_drive_freq - correct LRA Drive Frequency
+ *  @ misc_clk_trim_error_reg - MISC clock trim error register if present
+ *  @ clk_trim_error_code - MISC clock trim error code
+ *  @ perform_lra_auto_resonance_search - whether lra auto resonance search
+ *    algorithm should be performed or not.
+ *  @ auto_mode - Auto mode selection
+ *  @ override_auto_mode_config - Flag to override auto mode configuration with
+ *    user specified values through sysfs.
+ */
+struct qpnp_hap {
+	struct platform_device		*pdev;
+	struct regmap			*regmap;
+	struct regulator		*vcc_pon;
+	struct hrtimer			hap_timer;
+	struct hrtimer			auto_res_err_poll_timer;
+	struct timed_output_dev		timed_dev;
+	struct work_struct		work;
+	struct delayed_work		sc_work;
+	struct hrtimer			hap_test_timer;
+	struct work_struct		test_work;
+	struct qpnp_pwm_info		pwm_info;
+	struct qpnp_hap_lra_ares_cfg	ares_cfg;
+	struct mutex			lock;
+	struct mutex			wf_lock;
+	spinlock_t			bus_lock;
+	struct completion		completion;
+	enum qpnp_hap_mode		play_mode;
+	u32				misc_clk_trim_error_reg;
+	u32				init_drive_period_code;
+	u32				timeout_ms;
+	u32				time_required_to_generate_back_emf_us;
+	u32				vmax_mv;
+	u32				ilim_ma;
+	u32				sc_deb_cycles;
+	u32				int_pwm_freq_khz;
+	u32				wave_play_rate_us;
+	u32				play_time_ms;
+	u32				ext_pwm_freq_khz;
+	u32				wave_rep_cnt;
+	u32				wave_s_rep_cnt;
+	u32				play_irq;
+	u32				sc_irq;
+	u32				status_flags;
+	u16				base;
+	u16				last_rate_cfg;
+	u16				drive_period_code_max_limit;
+	u16				drive_period_code_min_limit;
+	u8			drive_period_code_max_limit_percent_variation;
+	u8			drive_period_code_min_limit_percent_variation;
+	u8				act_type;
+	u8				wave_shape;
+	u8				wave_samp[QPNP_HAP_WAV_SAMP_LEN];
+	u8				shadow_wave_samp[QPNP_HAP_WAV_SAMP_LEN];
+	u8				brake_pat[QPNP_HAP_BRAKE_PAT_LEN];
+	u8				sc_count;
+	u8				ext_pwm_dtest_line;
+	u8				pmic_subtype;
+	u8				clk_trim_error_code;
+	bool				lra_hw_auto_resonance;
+	bool				vcc_pon_enabled;
+	bool				state;
+	bool				module_en;
+	bool				manage_pon_supply;
+	bool				wf_update;
+	bool				pwm_cfg_state;
+	bool				en_brake;
+	bool				sup_brake_pat;
+	bool				correct_lra_drive_freq;
+	bool				perform_lra_auto_resonance_search;
+	bool				auto_mode;
+	bool				override_auto_mode_config;
+	bool				play_irq_en;
+};
+
+static struct qpnp_hap *ghap;
+
+/* helper to read a pmic register */
+static int qpnp_hap_read_mult_reg(struct qpnp_hap *hap, u16 addr, u8 *val,
+				int len)
+{
+	int rc;
+
+	rc = regmap_bulk_read(hap->regmap, addr, val, len);
+	if (rc < 0)
+		pr_err("Error reading address: %X - ret %X\n", addr, rc);
+
+	return rc;
+}
+
+static int qpnp_hap_read_reg(struct qpnp_hap *hap, u16 addr, u8 *val)
+{
+	int rc;
+	uint tmp;
+
+	rc = regmap_read(hap->regmap, addr, &tmp);
+	if (rc < 0)
+		pr_err("Error reading address: %X - ret %X\n", addr, rc);
+	else
+		*val = (u8)tmp;
+
+	return rc;
+}
+
+/* helper to write a pmic register */
+static int qpnp_hap_write_mult_reg(struct qpnp_hap *hap, u16 addr, u8 *val,
+				int len)
+{
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(&hap->bus_lock, flags);
+	rc = regmap_bulk_write(hap->regmap, addr, val, len);
+	if (rc < 0)
+		pr_err("Error writing address: %X - ret %X\n", addr, rc);
+
+	spin_unlock_irqrestore(&hap->bus_lock, flags);
+	return rc;
+}
+
+static int qpnp_hap_write_reg(struct qpnp_hap *hap, u16 addr, u8 val)
+{
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(&hap->bus_lock, flags);
+	rc = regmap_write(hap->regmap, addr, val);
+	if (rc < 0)
+		pr_err("Error writing address: %X - ret %X\n", addr, rc);
+
+	spin_unlock_irqrestore(&hap->bus_lock, flags);
+	if (!rc)
+		pr_debug("wrote: HAP_0x%x = 0x%x\n", addr, val);
+	return rc;
+}
+
+/* helper to access secure registers */
+#define QPNP_HAP_SEC_UNLOCK		0xA5
+static int qpnp_hap_sec_masked_write_reg(struct qpnp_hap *hap, u16 addr,
+					u8 mask, u8 val)
+{
+	unsigned long flags;
+	int rc;
+	u8 tmp = QPNP_HAP_SEC_UNLOCK;
+
+	spin_lock_irqsave(&hap->bus_lock, flags);
+	rc = regmap_write(hap->regmap, QPNP_HAP_SEC_ACCESS_REG(hap->base), tmp);
+	if (rc < 0) {
+		pr_err("Error writing sec_code - ret %X\n", rc);
+		goto out;
+	}
+
+	rc = regmap_update_bits(hap->regmap, addr, mask, val);
+	if (rc < 0)
+		pr_err("Error writing address: %X - ret %X\n", addr, rc);
+
+out:
+	spin_unlock_irqrestore(&hap->bus_lock, flags);
+	if (!rc)
+		pr_debug("wrote: HAP_0x%x = 0x%x\n", addr, val);
+	return rc;
+}
+
+static int qpnp_hap_masked_write_reg(struct qpnp_hap *hap, u16 addr, u8 mask,
+					u8 val)
+{
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(&hap->bus_lock, flags);
+	rc = regmap_update_bits(hap->regmap, addr, mask, val);
+	if (rc < 0)
+		pr_err("Error writing address: %X - ret %X\n", addr, rc);
+
+	spin_unlock_irqrestore(&hap->bus_lock, flags);
+	if (!rc)
+		pr_debug("wrote: HAP_0x%x = 0x%x\n", addr, val);
+	return rc;
+}
+
+static void qpnp_handle_sc_irq(struct work_struct *work)
+{
+	struct qpnp_hap *hap = container_of(work,
+				struct qpnp_hap, sc_work.work);
+	u8 val;
+
+	qpnp_hap_read_reg(hap, QPNP_HAP_STATUS(hap->base), &val);
+
+	/* clear short circuit register */
+	if (val & SC_FOUND_BIT) {
+		hap->sc_count++;
+		val = QPNP_HAP_SC_CLR;
+		qpnp_hap_write_reg(hap, QPNP_HAP_SC_CLR_REG(hap->base), val);
+	}
+}
+
+#define QPNP_HAP_CYCLES		4
+static int qpnp_hap_mod_enable(struct qpnp_hap *hap, bool on)
+{
+	unsigned long wait_time_us;
+	u8 val;
+	int rc, i;
+
+	if (hap->module_en == on)
+		return 0;
+
+	if (!on) {
+		/*
+		 * Wait for 4 cycles of play rate for play time is > 20 ms and
+		 * wait for play time when play time is < 20 ms. This way, there
+		 * will be an improvement in waiting time polling BUSY status.
+		 */
+		if (hap->play_time_ms <= 20)
+			wait_time_us = hap->play_time_ms * 1000;
+		else
+			wait_time_us = QPNP_HAP_CYCLES * hap->wave_play_rate_us;
+
+		for (i = 0; i < QPNP_HAP_MAX_RETRIES; i++) {
+			rc = qpnp_hap_read_reg(hap, QPNP_HAP_STATUS(hap->base),
+					&val);
+			if (rc < 0)
+				return rc;
+
+			pr_debug("HAP_STATUS=0x%x\n", val);
+
+			/* wait for play_rate cycles */
+			if (val & QPNP_HAP_STATUS_BUSY) {
+				usleep_range(wait_time_us, wait_time_us + 1);
+				if (hap->play_mode == QPNP_HAP_DIRECT ||
+					hap->play_mode == QPNP_HAP_PWM)
+					break;
+			} else {
+				break;
+			}
+		}
+
+		if (i >= QPNP_HAP_MAX_RETRIES)
+			pr_debug("Haptics Busy. Force disable\n");
+	}
+
+	val = on ? QPNP_HAP_EN_BIT : 0;
+	rc = qpnp_hap_write_reg(hap, QPNP_HAP_EN_CTL_REG(hap->base), val);
+	if (rc < 0)
+		return rc;
+
+	hap->module_en = on;
+	return 0;
+}
+
+static int qpnp_hap_play(struct qpnp_hap *hap, bool on)
+{
+	u8 val;
+	int rc;
+
+	val = on ? QPNP_HAP_PLAY_EN_BIT : 0;
+	rc = qpnp_hap_write_reg(hap, QPNP_HAP_PLAY_REG(hap->base), val);
+	return rc;
+}
+
+/* sysfs show debug registers */
+static ssize_t qpnp_hap_dump_regs_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+	struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+					 timed_dev);
+	int count = 0, i;
+	u8 val;
+
+	for (i = 0; i < ARRAY_SIZE(qpnp_hap_dbg_regs); i++) {
+		qpnp_hap_read_reg(hap, hap->base + qpnp_hap_dbg_regs[i], &val);
+		count += snprintf(buf + count, PAGE_SIZE - count,
+				"qpnp_haptics: REG_0x%x = 0x%x\n",
+				hap->base + qpnp_hap_dbg_regs[i],
+				val);
+
+		if (count >= PAGE_SIZE)
+			return PAGE_SIZE - 1;
+	}
+
+	return count;
+}
+
+/* play irq handler */
+static irqreturn_t qpnp_hap_play_irq(int irq, void *_hap)
+{
+	struct qpnp_hap *hap = _hap;
+	int i, rc;
+	u8 val;
+
+	mutex_lock(&hap->wf_lock);
+
+	/* Configure WAVE_SAMPLE1 to WAVE_SAMPLE8 register */
+	for (i = 0; i < QPNP_HAP_WAV_SAMP_LEN && hap->wf_update; i++) {
+		val = hap->wave_samp[i] = hap->shadow_wave_samp[i];
+		rc = qpnp_hap_write_reg(hap,
+			QPNP_HAP_WAV_S_REG_BASE(hap->base) + i, val);
+		if (rc)
+			goto unlock;
+	}
+	hap->wf_update = false;
+
+unlock:
+	mutex_unlock(&hap->wf_lock);
+
+	return IRQ_HANDLED;
+}
+
+/* short circuit irq handler */
+static irqreturn_t qpnp_hap_sc_irq(int irq, void *_hap)
+{
+	struct qpnp_hap *hap = _hap;
+	int rc;
+	u8 val;
+
+	pr_debug("Short circuit detected\n");
+
+	if (hap->sc_count < SC_MAX_COUNT) {
+		qpnp_hap_read_reg(hap, QPNP_HAP_STATUS(hap->base), &val);
+		if (val & SC_FOUND_BIT)
+			schedule_delayed_work(&hap->sc_work,
+					QPNP_HAP_SC_IRQ_STATUS_DELAY);
+		else
+			hap->sc_count = 0;
+	} else {
+		/* Disable haptics module if the duration of short circuit
+		 * exceeds the maximum limit (5 secs).
+		 */
+		val = 0;
+		rc = qpnp_hap_write_reg(hap, QPNP_HAP_EN_CTL_REG(hap->base),
+			val);
+		pr_err("Haptics disabled permanently due to short circuit\n");
+	}
+
+	return IRQ_HANDLED;
+}
+
+/* configuration api for buffer mode */
+static int qpnp_hap_buffer_config(struct qpnp_hap *hap, u8 *wave_samp,
+				bool overdrive)
+{
+	u8 buf[QPNP_HAP_WAV_SAMP_LEN], val;
+	u8 *ptr;
+	int rc, i;
+
+	/* Configure the WAVE_REPEAT register */
+	if (hap->wave_rep_cnt < QPNP_HAP_WAV_REP_MIN)
+		hap->wave_rep_cnt = QPNP_HAP_WAV_REP_MIN;
+	else if (hap->wave_rep_cnt > QPNP_HAP_WAV_REP_MAX)
+		hap->wave_rep_cnt = QPNP_HAP_WAV_REP_MAX;
+
+	if (hap->wave_s_rep_cnt < QPNP_HAP_WAV_S_REP_MIN)
+		hap->wave_s_rep_cnt = QPNP_HAP_WAV_S_REP_MIN;
+	else if (hap->wave_s_rep_cnt > QPNP_HAP_WAV_S_REP_MAX)
+		hap->wave_s_rep_cnt = QPNP_HAP_WAV_S_REP_MAX;
+
+	val = ilog2(hap->wave_rep_cnt) << QPNP_HAP_WAV_REP_SHIFT |
+			ilog2(hap->wave_s_rep_cnt);
+	rc = qpnp_hap_masked_write_reg(hap, QPNP_HAP_WAV_REP_REG(hap->base),
+			QPNP_HAP_WAV_REP_MASK | QPNP_HAP_WAV_S_REP_MASK, val);
+	if (rc)
+		return rc;
+
+	/* Don't set override bit in waveform sample for PM660 */
+	if (hap->pmic_subtype == PM660_SUBTYPE)
+		overdrive = false;
+
+	if (wave_samp)
+		ptr = wave_samp;
+	else
+		ptr = hap->wave_samp;
+
+	/* Configure WAVE_SAMPLE1 to WAVE_SAMPLE8 register */
+	for (i = 0; i < QPNP_HAP_WAV_SAMP_LEN; i++) {
+		buf[i] = ptr[i] & QPNP_HAP_WF_AMP_MASK;
+		if (buf[i])
+			buf[i] |= (overdrive ? QPNP_HAP_WF_OVD_BIT : 0);
+	}
+
+	rc = qpnp_hap_write_mult_reg(hap, QPNP_HAP_WAV_S_REG_BASE(hap->base),
+				buf, QPNP_HAP_WAV_SAMP_LEN);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+/* configuration api for pwm */
+static int qpnp_hap_pwm_config(struct qpnp_hap *hap)
+{
+	u8 val = 0;
+	int rc;
+
+	/* Configure the EXTERNAL_PWM register */
+	if (hap->ext_pwm_freq_khz <= QPNP_HAP_EXT_PWM_FREQ_25_KHZ) {
+		hap->ext_pwm_freq_khz = QPNP_HAP_EXT_PWM_FREQ_25_KHZ;
+		val = 0;
+	} else if (hap->ext_pwm_freq_khz <=
+				QPNP_HAP_EXT_PWM_FREQ_50_KHZ) {
+		hap->ext_pwm_freq_khz = QPNP_HAP_EXT_PWM_FREQ_50_KHZ;
+		val = 1;
+	} else if (hap->ext_pwm_freq_khz <=
+				QPNP_HAP_EXT_PWM_FREQ_75_KHZ) {
+		hap->ext_pwm_freq_khz = QPNP_HAP_EXT_PWM_FREQ_75_KHZ;
+		val = 2;
+	} else {
+		hap->ext_pwm_freq_khz = QPNP_HAP_EXT_PWM_FREQ_100_KHZ;
+		val = 3;
+	}
+
+	rc = qpnp_hap_masked_write_reg(hap, QPNP_HAP_EXT_PWM_REG(hap->base),
+			QPNP_HAP_EXT_PWM_MASK, val);
+	if (rc)
+		return rc;
+
+	if (!hap->ext_pwm_dtest_line ||
+			hap->ext_pwm_dtest_line > PWM_MAX_DTEST_LINES) {
+		pr_err("invalid dtest line\n");
+		return -EINVAL;
+	}
+
+	/* disable auto res for PWM mode */
+	val = hap->ext_pwm_dtest_line << QPNP_HAP_EXT_PWM_DTEST_SHFT;
+	rc = qpnp_hap_sec_masked_write_reg(hap, QPNP_HAP_TEST2_REG(hap->base),
+		QPNP_HAP_EXT_PWM_DTEST_MASK | QPNP_HAP_AUTO_RES_MASK, val);
+	if (rc)
+		return rc;
+
+	rc = pwm_config(hap->pwm_info.pwm_dev,
+				hap->pwm_info.duty_us * NSEC_PER_USEC,
+				hap->pwm_info.period_us * NSEC_PER_USEC);
+	if (rc < 0) {
+		pr_err("hap pwm config failed\n");
+		pwm_free(hap->pwm_info.pwm_dev);
+		return -ENODEV;
+	}
+
+	hap->pwm_cfg_state = true;
+
+	return 0;
+}
+
+static int qpnp_hap_lra_auto_res_config(struct qpnp_hap *hap,
+					struct qpnp_hap_lra_ares_cfg *tmp_cfg)
+{
+	struct qpnp_hap_lra_ares_cfg *ares_cfg;
+	int rc;
+	u8 val = 0, mask = 0;
+
+	/* disable auto resonance for ERM */
+	if (hap->act_type == QPNP_HAP_ERM) {
+		val = 0x00;
+		rc = qpnp_hap_write_reg(hap,
+			QPNP_HAP_LRA_AUTO_RES_REG(hap->base), val);
+		return rc;
+	}
+
+	if (hap->lra_hw_auto_resonance) {
+		rc = qpnp_hap_masked_write_reg(hap,
+			QPNP_HAP_AUTO_RES_CTRL(hap->base),
+			QPNP_HAP_PM660_HW_AUTO_RES_MODE_BIT,
+			QPNP_HAP_PM660_HW_AUTO_RES_MODE_BIT);
+		if (rc)
+			return rc;
+	}
+
+	if (tmp_cfg)
+		ares_cfg = tmp_cfg;
+	else
+		ares_cfg = &hap->ares_cfg;
+
+	if (ares_cfg->lra_res_cal_period < QPNP_HAP_RES_CAL_PERIOD_MIN)
+		ares_cfg->lra_res_cal_period = QPNP_HAP_RES_CAL_PERIOD_MIN;
+
+	if (hap->pmic_subtype == PM660_SUBTYPE) {
+		if (ares_cfg->lra_res_cal_period >
+				QPNP_HAP_PM660_RES_CAL_PERIOD_MAX)
+			ares_cfg->lra_res_cal_period =
+				QPNP_HAP_PM660_RES_CAL_PERIOD_MAX;
+
+		if (ares_cfg->auto_res_mode == QPNP_HAP_PM660_AUTO_RES_QWD)
+			ares_cfg->lra_res_cal_period = 0;
+
+		if (ares_cfg->lra_res_cal_period)
+			val = ilog2(ares_cfg->lra_res_cal_period /
+					QPNP_HAP_RES_CAL_PERIOD_MIN) + 1;
+	} else {
+		if (ares_cfg->lra_res_cal_period > QPNP_HAP_RES_CAL_PERIOD_MAX)
+			ares_cfg->lra_res_cal_period =
+				QPNP_HAP_RES_CAL_PERIOD_MAX;
+
+		if (ares_cfg->lra_res_cal_period)
+			val = ilog2(ares_cfg->lra_res_cal_period /
+					QPNP_HAP_RES_CAL_PERIOD_MIN);
+	}
+
+	if (hap->pmic_subtype == PM660_SUBTYPE) {
+		val |= ares_cfg->auto_res_mode <<
+			QPNP_HAP_PM660_AUTO_RES_MODE_SHIFT;
+		mask = QPNP_HAP_PM660_AUTO_RES_MODE_BIT;
+		val |= ares_cfg->lra_high_z <<
+				QPNP_HAP_PM660_CALIBRATE_DURATION_SHIFT;
+		mask |= QPNP_HAP_PM660_CALIBRATE_DURATION_MASK;
+		if (ares_cfg->lra_qwd_drive_duration != -EINVAL) {
+			val |= ares_cfg->lra_qwd_drive_duration <<
+				QPNP_HAP_PM660_QWD_DRIVE_DURATION_SHIFT;
+			mask |= QPNP_HAP_PM660_QWD_DRIVE_DURATION_BIT;
+		}
+		if (ares_cfg->calibrate_at_eop != -EINVAL) {
+			val |= ares_cfg->calibrate_at_eop <<
+				QPNP_HAP_PM660_CALIBRATE_AT_EOP_SHIFT;
+			mask |= QPNP_HAP_PM660_CALIBRATE_AT_EOP_BIT;
+		}
+		mask |= QPNP_HAP_PM660_LRA_RES_CAL_PER_MASK;
+	} else {
+		val |= (ares_cfg->auto_res_mode <<
+				QPNP_HAP_AUTO_RES_MODE_SHIFT);
+		val |= (ares_cfg->lra_high_z << QPNP_HAP_LRA_HIGH_Z_SHIFT);
+		mask = QPNP_HAP_AUTO_RES_MODE_MASK | QPNP_HAP_LRA_HIGH_Z_MASK |
+			QPNP_HAP_LRA_RES_CAL_PER_MASK;
+	}
+
+	pr_debug("mode: %d hi_z period: %d cal_period: %d\n",
+		ares_cfg->auto_res_mode, ares_cfg->lra_high_z,
+		ares_cfg->lra_res_cal_period);
+
+	rc = qpnp_hap_masked_write_reg(hap,
+			QPNP_HAP_LRA_AUTO_RES_REG(hap->base), mask, val);
+	return rc;
+}
+
+/* configuration api for play mode */
+static int qpnp_hap_play_mode_config(struct qpnp_hap *hap)
+{
+	u8 val = 0;
+	int rc;
+
+	val = hap->play_mode << QPNP_HAP_WF_SOURCE_SHIFT;
+	rc = qpnp_hap_masked_write_reg(hap, QPNP_HAP_SEL_REG(hap->base),
+			QPNP_HAP_WF_SOURCE_MASK, val);
+	return rc;
+}
+
+/* configuration api for max voltage */
+static int qpnp_hap_vmax_config(struct qpnp_hap *hap, int vmax_mv,
+				bool overdrive)
+{
+	u8 val = 0;
+	int rc;
+
+	if (vmax_mv < 0)
+		return -EINVAL;
+
+	/* Allow setting override bit in VMAX_CFG only for PM660 */
+	if (hap->pmic_subtype != PM660_SUBTYPE)
+		overdrive = false;
+
+	if (vmax_mv < QPNP_HAP_VMAX_MIN_MV)
+		vmax_mv = QPNP_HAP_VMAX_MIN_MV;
+	else if (vmax_mv > QPNP_HAP_VMAX_MAX_MV)
+		vmax_mv = QPNP_HAP_VMAX_MAX_MV;
+
+	val = (vmax_mv / QPNP_HAP_VMAX_MIN_MV) << QPNP_HAP_VMAX_SHIFT;
+	if (overdrive)
+		val |= QPNP_HAP_VMAX_OVD_BIT;
+	rc = qpnp_hap_masked_write_reg(hap, QPNP_HAP_VMAX_REG(hap->base),
+			QPNP_HAP_VMAX_MASK | QPNP_HAP_VMAX_OVD_BIT, val);
+	return rc;
+}
+
+/* configuration api for ilim */
+static int qpnp_hap_ilim_config(struct qpnp_hap *hap)
+{
+	u8 val = 0;
+	int rc;
+
+	if (hap->ilim_ma < QPNP_HAP_ILIM_MIN_MA)
+		hap->ilim_ma = QPNP_HAP_ILIM_MIN_MA;
+	else if (hap->ilim_ma > QPNP_HAP_ILIM_MAX_MA)
+		hap->ilim_ma = QPNP_HAP_ILIM_MAX_MA;
+
+	val = (hap->ilim_ma / QPNP_HAP_ILIM_MIN_MA) - 1;
+	rc = qpnp_hap_masked_write_reg(hap, QPNP_HAP_ILIM_REG(hap->base),
+			QPNP_HAP_ILIM_MASK, val);
+	return rc;
+}
+
+/* configuration api for short circuit debounce */
+static int qpnp_hap_sc_deb_config(struct qpnp_hap *hap)
+{
+	u8 val = 0;
+	int rc;
+
+	if (hap->sc_deb_cycles < QPNP_HAP_SC_DEB_CYCLES_MIN)
+		hap->sc_deb_cycles = QPNP_HAP_SC_DEB_CYCLES_MIN;
+	else if (hap->sc_deb_cycles > QPNP_HAP_SC_DEB_CYCLES_MAX)
+		hap->sc_deb_cycles = QPNP_HAP_SC_DEB_CYCLES_MAX;
+
+	if (hap->sc_deb_cycles != QPNP_HAP_SC_DEB_CYCLES_MIN)
+		val = ilog2(hap->sc_deb_cycles /
+			QPNP_HAP_DEF_SC_DEB_CYCLES) + 1;
+	else
+		val = QPNP_HAP_SC_DEB_CYCLES_MIN;
+
+	rc = qpnp_hap_masked_write_reg(hap, QPNP_HAP_SC_DEB_REG(hap->base),
+			QPNP_HAP_SC_DEB_MASK, val);
+
+	return rc;
+}
+
+static int qpnp_hap_int_pwm_config(struct qpnp_hap *hap)
+{
+	int rc;
+	u8 val;
+
+	if (hap->int_pwm_freq_khz <= QPNP_HAP_INT_PWM_FREQ_253_KHZ) {
+		if (hap->pmic_subtype == PM660_SUBTYPE) {
+			hap->int_pwm_freq_khz = QPNP_HAP_INT_PWM_FREQ_505_KHZ;
+			val = 1;
+		} else {
+			hap->int_pwm_freq_khz = QPNP_HAP_INT_PWM_FREQ_253_KHZ;
+			val = 0;
+		}
+	} else if (hap->int_pwm_freq_khz <= QPNP_HAP_INT_PWM_FREQ_505_KHZ) {
+		hap->int_pwm_freq_khz = QPNP_HAP_INT_PWM_FREQ_505_KHZ;
+		val = 1;
+	} else if (hap->int_pwm_freq_khz <= QPNP_HAP_INT_PWM_FREQ_739_KHZ) {
+		hap->int_pwm_freq_khz = QPNP_HAP_INT_PWM_FREQ_739_KHZ;
+		val = 2;
+	} else {
+		hap->int_pwm_freq_khz = QPNP_HAP_INT_PWM_FREQ_1076_KHZ;
+		val = 3;
+	}
+
+	rc = qpnp_hap_masked_write_reg(hap, QPNP_HAP_INT_PWM_REG(hap->base),
+			QPNP_HAP_INT_PWM_MASK, val);
+	if (rc)
+		return rc;
+
+	rc = qpnp_hap_masked_write_reg(hap, QPNP_HAP_PWM_CAP_REG(hap->base),
+			QPNP_HAP_INT_PWM_MASK, val);
+	return rc;
+}
+
+static int qpnp_hap_brake_config(struct qpnp_hap *hap, u8 *brake_pat)
+{
+	int rc, i;
+	u32 temp;
+	u8 *pat_ptr, val;
+
+	if (!hap->en_brake)
+		return 0;
+
+	/* Configure BRAKE register */
+	rc = qpnp_hap_masked_write_reg(hap, QPNP_HAP_EN_CTL2_REG(hap->base),
+			QPNP_HAP_BRAKE_MASK, (u8)hap->en_brake);
+	if (rc)
+		return rc;
+
+	if (!brake_pat)
+		pat_ptr = hap->brake_pat;
+	else
+		pat_ptr = brake_pat;
+
+	if (hap->sup_brake_pat) {
+		for (i = QPNP_HAP_BRAKE_PAT_LEN - 1, val = 0; i >= 0; i--) {
+			pat_ptr[i] &= QPNP_HAP_BRAKE_PAT_MASK;
+			temp = i << 1;
+			val |= pat_ptr[i] << temp;
+		}
+		rc = qpnp_hap_write_reg(hap, QPNP_HAP_BRAKE_REG(hap->base),
+				val);
+		if (rc)
+			return rc;
+	}
+
+	return 0;
+}
+
+/* DT parsing api for buffer mode */
+static int qpnp_hap_parse_buffer_dt(struct qpnp_hap *hap)
+{
+	struct platform_device *pdev = hap->pdev;
+	struct property *prop;
+	u32 temp;
+	int rc, i;
+
+	if (hap->wave_rep_cnt > 0 || hap->wave_s_rep_cnt > 0)
+		return 0;
+
+	hap->wave_rep_cnt = QPNP_HAP_WAV_REP_MIN;
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,wave-rep-cnt", &temp);
+	if (!rc) {
+		hap->wave_rep_cnt = temp;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read rep cnt\n");
+		return rc;
+	}
+
+	hap->wave_s_rep_cnt = QPNP_HAP_WAV_S_REP_MIN;
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,wave-samp-rep-cnt", &temp);
+	if (!rc) {
+		hap->wave_s_rep_cnt = temp;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read samp rep cnt\n");
+		return rc;
+	}
+
+	prop = of_find_property(pdev->dev.of_node,
+			"qcom,wave-samples", &temp);
+	if (!prop || temp != QPNP_HAP_WAV_SAMP_LEN) {
+		pr_err("Invalid wave samples, use default");
+		for (i = 0; i < QPNP_HAP_WAV_SAMP_LEN; i++)
+			hap->wave_samp[i] = QPNP_HAP_WAV_SAMP_MAX;
+	} else {
+		memcpy(hap->wave_samp, prop->value, QPNP_HAP_WAV_SAMP_LEN);
+	}
+
+	return 0;
+}
+
+/* DT parsing api for PWM mode */
+static int qpnp_hap_parse_pwm_dt(struct qpnp_hap *hap)
+{
+	struct platform_device *pdev = hap->pdev;
+	u32 temp;
+	int rc;
+
+	hap->ext_pwm_freq_khz = QPNP_HAP_EXT_PWM_FREQ_25_KHZ;
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,ext-pwm-freq-khz", &temp);
+	if (!rc) {
+		hap->ext_pwm_freq_khz = temp;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read ext pwm freq\n");
+		return rc;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,pwm-channel", &temp);
+	if (!rc)
+		hap->pwm_info.pwm_channel = temp;
+	else
+		return rc;
+
+	hap->pwm_info.pwm_dev = of_pwm_get(pdev->dev.of_node, NULL);
+
+	if (IS_ERR(hap->pwm_info.pwm_dev)) {
+		rc = PTR_ERR(hap->pwm_info.pwm_dev);
+		pr_err("Cannot get PWM device rc:(%d)\n", rc);
+		hap->pwm_info.pwm_dev = NULL;
+		return rc;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node, "qcom,period-us", &temp);
+	if (!rc)
+		hap->pwm_info.period_us = temp;
+	else
+		return rc;
+
+	rc = of_property_read_u32(pdev->dev.of_node, "qcom,duty-us", &temp);
+	if (!rc)
+		hap->pwm_info.duty_us = temp;
+	else
+		return rc;
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,ext-pwm-dtest-line", &temp);
+	if (!rc)
+		hap->ext_pwm_dtest_line = temp;
+	else
+		return rc;
+
+	return 0;
+}
+
+/* sysfs show for wave samples */
+static ssize_t qpnp_hap_wf_samp_show(struct device *dev, char *buf, int index)
+{
+	struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+	struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+					 timed_dev);
+
+	if (index < 0 || index >= QPNP_HAP_WAV_SAMP_LEN) {
+		pr_err("Invalid sample index(%d)\n", index);
+		return -EINVAL;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "0x%x\n",
+			hap->shadow_wave_samp[index]);
+}
+
+static ssize_t qpnp_hap_wf_s0_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return qpnp_hap_wf_samp_show(dev, buf, 0);
+}
+
+static ssize_t qpnp_hap_wf_s1_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return qpnp_hap_wf_samp_show(dev, buf, 1);
+}
+
+static ssize_t qpnp_hap_wf_s2_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return qpnp_hap_wf_samp_show(dev, buf, 2);
+}
+
+static ssize_t qpnp_hap_wf_s3_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return qpnp_hap_wf_samp_show(dev, buf, 3);
+}
+
+static ssize_t qpnp_hap_wf_s4_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return qpnp_hap_wf_samp_show(dev, buf, 4);
+}
+
+static ssize_t qpnp_hap_wf_s5_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return qpnp_hap_wf_samp_show(dev, buf, 5);
+}
+
+static ssize_t qpnp_hap_wf_s6_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return qpnp_hap_wf_samp_show(dev, buf, 6);
+}
+
+static ssize_t qpnp_hap_wf_s7_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return qpnp_hap_wf_samp_show(dev, buf, 7);
+}
+
+/* sysfs store for wave samples */
+static ssize_t qpnp_hap_wf_samp_store(struct device *dev,
+		const char *buf, size_t count, int index)
+{
+	struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+	struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+					 timed_dev);
+	int data, rc;
+
+	if (index < 0 || index >= QPNP_HAP_WAV_SAMP_LEN) {
+		pr_err("Invalid sample index(%d)\n", index);
+		return -EINVAL;
+	}
+
+	rc = kstrtoint(buf, 16, &data);
+	if (rc)
+		return rc;
+
+	if (data < 0 || data > 0xff) {
+		pr_err("Invalid sample wf_%d (%d)\n", index, data);
+		return -EINVAL;
+	}
+
+	hap->shadow_wave_samp[index] = (u8) data;
+	return count;
+}
+
+static ssize_t qpnp_hap_wf_s0_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	return qpnp_hap_wf_samp_store(dev, buf, count, 0);
+}
+
+static ssize_t qpnp_hap_wf_s1_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	return qpnp_hap_wf_samp_store(dev, buf, count, 1);
+}
+
+static ssize_t qpnp_hap_wf_s2_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	return qpnp_hap_wf_samp_store(dev, buf, count, 2);
+}
+
+static ssize_t qpnp_hap_wf_s3_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	return qpnp_hap_wf_samp_store(dev, buf, count, 3);
+}
+
+static ssize_t qpnp_hap_wf_s4_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	return qpnp_hap_wf_samp_store(dev, buf, count, 4);
+}
+
+static ssize_t qpnp_hap_wf_s5_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	return qpnp_hap_wf_samp_store(dev, buf, count, 5);
+}
+
+static ssize_t qpnp_hap_wf_s6_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	return qpnp_hap_wf_samp_store(dev, buf, count, 6);
+}
+
+static ssize_t qpnp_hap_wf_s7_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	return qpnp_hap_wf_samp_store(dev, buf, count, 7);
+}
+
+/* sysfs show for wave form update */
+static ssize_t qpnp_hap_wf_update_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+	struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+					 timed_dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", hap->wf_update);
+}
+
+/* sysfs store for updating wave samples */
+static ssize_t qpnp_hap_wf_update_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+	struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+					 timed_dev);
+
+	mutex_lock(&hap->wf_lock);
+	hap->wf_update = true;
+	mutex_unlock(&hap->wf_lock);
+
+	return count;
+}
+
+/* sysfs show for wave repeat */
+static ssize_t qpnp_hap_wf_rep_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+	struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+					 timed_dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", hap->wave_rep_cnt);
+}
+
+/* sysfs store for wave repeat */
+static ssize_t qpnp_hap_wf_rep_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+	struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+					 timed_dev);
+	int data, rc;
+	u8 val;
+
+	rc = kstrtoint(buf, 10, &data);
+	if (rc)
+		return rc;
+
+	if (data < QPNP_HAP_WAV_REP_MIN)
+		data = QPNP_HAP_WAV_REP_MIN;
+	else if (data > QPNP_HAP_WAV_REP_MAX)
+		data = QPNP_HAP_WAV_REP_MAX;
+
+	val = ilog2(data) << QPNP_HAP_WAV_REP_SHIFT;
+	rc = qpnp_hap_masked_write_reg(hap, QPNP_HAP_WAV_REP_REG(hap->base),
+			QPNP_HAP_WAV_REP_MASK, val);
+	if (!rc)
+		hap->wave_rep_cnt = data;
+
+	return count;
+}
+
+/* sysfs show for wave samples repeat */
+static ssize_t qpnp_hap_wf_s_rep_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+	struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+					 timed_dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", hap->wave_s_rep_cnt);
+}
+
+/* sysfs store for wave samples repeat */
+static ssize_t qpnp_hap_wf_s_rep_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+	struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+					 timed_dev);
+	int data, rc;
+	u8 val;
+
+	rc = kstrtoint(buf, 10, &data);
+	if (rc)
+		return rc;
+
+	if (data < QPNP_HAP_WAV_S_REP_MIN)
+		data = QPNP_HAP_WAV_S_REP_MIN;
+	else if (data > QPNP_HAP_WAV_S_REP_MAX)
+		data = QPNP_HAP_WAV_S_REP_MAX;
+
+	val = ilog2(hap->wave_s_rep_cnt);
+	rc = qpnp_hap_masked_write_reg(hap, QPNP_HAP_WAV_REP_REG(hap->base),
+			QPNP_HAP_WAV_S_REP_MASK, val);
+	if (!rc)
+		hap->wave_s_rep_cnt = data;
+
+	return count;
+}
+
+static int parse_string(const char *in_buf, char *out_buf)
+{
+	int i;
+
+	if (snprintf(out_buf, QPNP_HAP_STR_SIZE, "%s", in_buf)
+		> QPNP_HAP_STR_SIZE)
+		return -EINVAL;
+
+	for (i = 0; i < strlen(out_buf); i++) {
+		if (out_buf[i] == ' ' || out_buf[i] == '\n' ||
+			out_buf[i] == '\t') {
+			out_buf[i] = '\0';
+			break;
+		}
+	}
+
+	return 0;
+}
+
+/* sysfs store function for play mode*/
+static ssize_t qpnp_hap_play_mode_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+	struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+					 timed_dev);
+	char str[QPNP_HAP_STR_SIZE + 1];
+	int rc = 0, temp, old_mode;
+
+	rc = parse_string(buf, str);
+	if (rc < 0)
+		return rc;
+
+	if (strcmp(str, "buffer") == 0)
+		temp = QPNP_HAP_BUFFER;
+	else if (strcmp(str, "direct") == 0)
+		temp = QPNP_HAP_DIRECT;
+	else if (strcmp(str, "audio") == 0)
+		temp = QPNP_HAP_AUDIO;
+	else if (strcmp(str, "pwm") == 0)
+		temp = QPNP_HAP_PWM;
+	else
+		return -EINVAL;
+
+	if (temp == hap->play_mode)
+		return count;
+
+	if (temp == QPNP_HAP_BUFFER) {
+		rc = qpnp_hap_parse_buffer_dt(hap);
+		if (!rc)
+			rc = qpnp_hap_buffer_config(hap, NULL, false);
+	} else if (temp == QPNP_HAP_PWM && !hap->pwm_cfg_state) {
+		rc = qpnp_hap_parse_pwm_dt(hap);
+		if (!rc)
+			rc = qpnp_hap_pwm_config(hap);
+	}
+
+	if (rc < 0)
+		return rc;
+
+	rc = qpnp_hap_mod_enable(hap, false);
+	if (rc < 0)
+		return rc;
+
+	old_mode = hap->play_mode;
+	hap->play_mode = temp;
+	/* Configure the PLAY MODE register */
+	rc = qpnp_hap_play_mode_config(hap);
+	if (rc) {
+		hap->play_mode = old_mode;
+		return rc;
+	}
+
+	if (hap->play_mode == QPNP_HAP_AUDIO) {
+		rc = qpnp_hap_mod_enable(hap, true);
+		if (rc < 0) {
+			hap->play_mode = old_mode;
+			return rc;
+		}
+	}
+
+	return count;
+}
+
+/* sysfs show function for play mode */
+static ssize_t qpnp_hap_play_mode_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+	struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+					 timed_dev);
+	char *str;
+
+	if (hap->play_mode == QPNP_HAP_BUFFER)
+		str = "buffer";
+	else if (hap->play_mode == QPNP_HAP_DIRECT)
+		str = "direct";
+	else if (hap->play_mode == QPNP_HAP_AUDIO)
+		str = "audio";
+	else if (hap->play_mode == QPNP_HAP_PWM)
+		str = "pwm";
+	else
+		return -EINVAL;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", str);
+}
+
+/* sysfs store for ramp test data */
+static ssize_t qpnp_hap_min_max_test_data_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+	struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+					 timed_dev);
+
+	int value = QPNP_TEST_TIMER_MS, i;
+
+	mutex_lock(&hap->lock);
+	qpnp_hap_mod_enable(hap, true);
+	for (i = 0; i < ARRAY_SIZE(qpnp_hap_min_max_test_data); i++) {
+		hrtimer_start(&hap->hap_test_timer,
+			      ktime_set(value / 1000, (value % 1000) * 1000000),
+			      HRTIMER_MODE_REL);
+		qpnp_hap_play_byte(qpnp_hap_min_max_test_data[i], true);
+		wait_for_completion(&hap->completion);
+	}
+
+	qpnp_hap_play_byte(0, false);
+	qpnp_hap_mod_enable(hap, false);
+	mutex_unlock(&hap->lock);
+
+	return count;
+}
+
+/* sysfs show function for min max test data */
+static ssize_t qpnp_hap_min_max_test_data_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int count = 0, i;
+
+	for (i = 0; i < ARRAY_SIZE(qpnp_hap_min_max_test_data); i++) {
+		count += snprintf(buf + count, PAGE_SIZE - count,
+				"qpnp_haptics: min_max_test_data[%d] = 0x%x\n",
+				i, qpnp_hap_min_max_test_data[i]);
+
+		if (count >= PAGE_SIZE)
+			return PAGE_SIZE - 1;
+	}
+
+	return count;
+
+}
+
+/* sysfs store for ramp test data */
+static ssize_t qpnp_hap_ramp_test_data_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+	struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+					 timed_dev);
+
+	int value = QPNP_TEST_TIMER_MS, i;
+
+	mutex_lock(&hap->lock);
+	qpnp_hap_mod_enable(hap, true);
+	for (i = 0; i < ARRAY_SIZE(qpnp_hap_ramp_test_data); i++) {
+		hrtimer_start(&hap->hap_test_timer,
+			      ktime_set(value / 1000, (value % 1000) * 1000000),
+			      HRTIMER_MODE_REL);
+		qpnp_hap_play_byte(qpnp_hap_ramp_test_data[i], true);
+		wait_for_completion(&hap->completion);
+	}
+
+	qpnp_hap_play_byte(0, false);
+	qpnp_hap_mod_enable(hap, false);
+	mutex_unlock(&hap->lock);
+
+	return count;
+}
+
+/* sysfs show function for ramp test data */
+static ssize_t qpnp_hap_ramp_test_data_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int count = 0, i;
+
+	for (i = 0; i < ARRAY_SIZE(qpnp_hap_ramp_test_data); i++) {
+		count += snprintf(buf + count, PAGE_SIZE - count,
+				"qpnp_haptics: ramp_test_data[%d] = 0x%x\n",
+				i, qpnp_hap_ramp_test_data[i]);
+
+		if (count >= PAGE_SIZE)
+			return PAGE_SIZE - 1;
+	}
+
+	return count;
+
+}
+
+static ssize_t qpnp_hap_auto_res_mode_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+	struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+					 timed_dev);
+	char *str;
+
+	if (hap->pmic_subtype == PM660_SUBTYPE) {
+		switch (hap->ares_cfg.auto_res_mode) {
+		case QPNP_HAP_PM660_AUTO_RES_ZXD:
+			str = "ZXD";
+			break;
+		case QPNP_HAP_PM660_AUTO_RES_QWD:
+			str = "QWD";
+			break;
+		default:
+			str = "None";
+			break;
+		}
+	} else {
+		switch (hap->ares_cfg.auto_res_mode) {
+		case QPNP_HAP_AUTO_RES_NONE:
+			str = "None";
+			break;
+		case QPNP_HAP_AUTO_RES_ZXD:
+			str = "ZXD";
+			break;
+		case QPNP_HAP_AUTO_RES_QWD:
+			str = "QWD";
+			break;
+		case QPNP_HAP_AUTO_RES_MAX_QWD:
+			str = "MAX_QWD";
+			break;
+		case QPNP_HAP_AUTO_RES_ZXD_EOP:
+			str = "ZXD_EOP";
+			break;
+		default:
+			str = "None";
+			break;
+		}
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", str);
+}
+
+static ssize_t qpnp_hap_auto_res_mode_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+	struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+					 timed_dev);
+	char str[QPNP_HAP_STR_SIZE + 1];
+	int rc = 0, temp;
+
+	rc = parse_string(buf, str);
+	if (rc < 0)
+		return rc;
+
+	if (hap->pmic_subtype == PM660_SUBTYPE) {
+		if (strcmp(str, "ZXD") == 0 ||
+			strcmp(str, "zxd") == 0)
+			temp = QPNP_HAP_PM660_AUTO_RES_ZXD;
+		else if (strcmp(str, "QWD") == 0 ||
+				strcmp(str, "qwd") == 0)
+			temp = QPNP_HAP_PM660_AUTO_RES_QWD;
+		else {
+			pr_err("Should be ZXD or QWD\n");
+			return -EINVAL;
+		}
+	} else {
+		if (strcmp(str, "None") == 0)
+			temp = QPNP_HAP_AUTO_RES_NONE;
+		else if (strcmp(str, "ZXD") == 0 ||
+				strcmp(str, "zxd") == 0)
+			temp = QPNP_HAP_AUTO_RES_ZXD;
+		else if (strcmp(str, "QWD") == 0 ||
+				strcmp(str, "qwd") == 0)
+			temp = QPNP_HAP_AUTO_RES_QWD;
+		else if (strcmp(str, "ZXD_EOP") == 0 ||
+				strcmp(str, "zxd_eop") == 0)
+			temp = QPNP_HAP_AUTO_RES_ZXD_EOP;
+		else if (strcmp(str, "MAX_QWD") == 0 ||
+				strcmp(str, "max_qwd") == 0)
+			temp = QPNP_HAP_AUTO_RES_MAX_QWD;
+		else {
+			pr_err("Should be None or ZXD or QWD or ZXD_EOP or MAX_QWD\n");
+			return -EINVAL;
+		}
+	}
+
+	hap->ares_cfg.auto_res_mode = temp;
+	return count;
+}
+
+static ssize_t qpnp_hap_hi_z_period_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+	struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+					 timed_dev);
+	char *str;
+
+	switch (hap->ares_cfg.lra_high_z) {
+	case QPNP_HAP_LRA_HIGH_Z_NONE:
+		str = "high_z_none";
+		break;
+	case QPNP_HAP_LRA_HIGH_Z_OPT1:
+		str = "high_z_opt1";
+		break;
+	case QPNP_HAP_LRA_HIGH_Z_OPT2:
+		str = "high_z_opt2";
+		break;
+	case QPNP_HAP_LRA_HIGH_Z_OPT3:
+		str = "high_z_opt3";
+		break;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", str);
+}
+
+static ssize_t qpnp_hap_hi_z_period_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+	struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+					 timed_dev);
+	int data, rc;
+
+	rc = kstrtoint(buf, 10, &data);
+	if (rc)
+		return rc;
+
+	if (data < QPNP_HAP_LRA_HIGH_Z_NONE
+		|| data > QPNP_HAP_LRA_HIGH_Z_OPT3) {
+		pr_err("Invalid high Z configuration\n");
+		return -EINVAL;
+	}
+
+	hap->ares_cfg.lra_high_z = data;
+	return count;
+}
+
+static ssize_t qpnp_hap_calib_period_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+	struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+					 timed_dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+		hap->ares_cfg.lra_res_cal_period);
+}
+
+static ssize_t qpnp_hap_calib_period_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+	struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+					 timed_dev);
+	int data, rc;
+
+	rc = kstrtoint(buf, 10, &data);
+	if (rc)
+		return rc;
+
+	if (data < QPNP_HAP_RES_CAL_PERIOD_MIN) {
+		pr_err("Invalid auto resonance calibration period\n");
+		return -EINVAL;
+	}
+
+	if (hap->pmic_subtype == PM660_SUBTYPE) {
+		if (data > QPNP_HAP_PM660_RES_CAL_PERIOD_MAX) {
+			pr_err("Invalid auto resonance calibration period\n");
+			return -EINVAL;
+		}
+	} else {
+		if (data > QPNP_HAP_RES_CAL_PERIOD_MAX) {
+			pr_err("Invalid auto resonance calibration period\n");
+			return -EINVAL;
+		}
+	}
+
+	hap->ares_cfg.lra_res_cal_period = data;
+	return count;
+}
+
+static ssize_t qpnp_hap_override_auto_mode_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+	struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+					 timed_dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", hap->override_auto_mode_config);
+}
+
+static ssize_t qpnp_hap_override_auto_mode_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+	struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+					 timed_dev);
+	int data, rc;
+
+	rc = kstrtoint(buf, 10, &data);
+	if (rc)
+		return rc;
+
+	hap->override_auto_mode_config = data;
+	return count;
+}
+
+static ssize_t qpnp_hap_vmax_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+	struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+					 timed_dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", hap->vmax_mv);
+}
+
+static ssize_t qpnp_hap_vmax_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+	struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+					 timed_dev);
+	int data, rc;
+
+	rc = kstrtoint(buf, 10, &data);
+	if (rc)
+		return rc;
+
+	hap->vmax_mv = data;
+	return count;
+}
+
+/* sysfs attributes */
+static struct device_attribute qpnp_hap_attrs[] = {
+	__ATTR(wf_s0, 0664, qpnp_hap_wf_s0_show, qpnp_hap_wf_s0_store),
+	__ATTR(wf_s1, 0664, qpnp_hap_wf_s1_show, qpnp_hap_wf_s1_store),
+	__ATTR(wf_s2, 0664, qpnp_hap_wf_s2_show, qpnp_hap_wf_s2_store),
+	__ATTR(wf_s3, 0664, qpnp_hap_wf_s3_show, qpnp_hap_wf_s3_store),
+	__ATTR(wf_s4, 0664, qpnp_hap_wf_s4_show, qpnp_hap_wf_s4_store),
+	__ATTR(wf_s5, 0664, qpnp_hap_wf_s5_show, qpnp_hap_wf_s5_store),
+	__ATTR(wf_s6, 0664, qpnp_hap_wf_s6_show, qpnp_hap_wf_s6_store),
+	__ATTR(wf_s7, 0664, qpnp_hap_wf_s7_show, qpnp_hap_wf_s7_store),
+	__ATTR(wf_update, 0664, qpnp_hap_wf_update_show,
+		qpnp_hap_wf_update_store),
+	__ATTR(wf_rep, 0664, qpnp_hap_wf_rep_show, qpnp_hap_wf_rep_store),
+	__ATTR(wf_s_rep, 0664, qpnp_hap_wf_s_rep_show, qpnp_hap_wf_s_rep_store),
+	__ATTR(play_mode, 0664, qpnp_hap_play_mode_show,
+		qpnp_hap_play_mode_store),
+	__ATTR(dump_regs, 0664, qpnp_hap_dump_regs_show, NULL),
+	__ATTR(ramp_test, 0664, qpnp_hap_ramp_test_data_show,
+		qpnp_hap_ramp_test_data_store),
+	__ATTR(min_max_test, 0664, qpnp_hap_min_max_test_data_show,
+		qpnp_hap_min_max_test_data_store),
+	__ATTR(auto_res_mode, 0664, qpnp_hap_auto_res_mode_show,
+		qpnp_hap_auto_res_mode_store),
+	__ATTR(high_z_period, 0664, qpnp_hap_hi_z_period_show,
+		qpnp_hap_hi_z_period_store),
+	__ATTR(calib_period, 0664, qpnp_hap_calib_period_show,
+		qpnp_hap_calib_period_store),
+	__ATTR(override_auto_mode_config, 0664,
+		qpnp_hap_override_auto_mode_show,
+		qpnp_hap_override_auto_mode_store),
+	__ATTR(vmax_mv, 0664, qpnp_hap_vmax_show, qpnp_hap_vmax_store),
+};
+
+static int calculate_lra_code(struct qpnp_hap *hap)
+{
+	u8 lra_drive_period_code_lo = 0, lra_drive_period_code_hi = 0;
+	u32 lra_drive_period_code, lra_drive_frequency_hz, freq_variation;
+	u8 start_variation = AUTO_RES_ERROR_MAX, i;
+	u8 neg_idx = 0, pos_idx = ADJUSTED_LRA_PLAY_RATE_CODE_ARRSIZE - 1;
+	int rc = 0;
+
+	rc = qpnp_hap_read_reg(hap, QPNP_HAP_RATE_CFG1_REG(hap->base),
+			&lra_drive_period_code_lo);
+	if (rc) {
+		pr_err("Error while reading RATE_CFG1 register\n");
+		return rc;
+	}
+
+	rc = qpnp_hap_read_reg(hap, QPNP_HAP_RATE_CFG2_REG(hap->base),
+			&lra_drive_period_code_hi);
+	if (rc) {
+		pr_err("Error while reading RATE_CFG2 register\n");
+		return rc;
+	}
+
+	if (!lra_drive_period_code_lo && !lra_drive_period_code_hi) {
+		pr_err("Unexpected Error: both RATE_CFG1 and RATE_CFG2 read 0\n");
+		return -EINVAL;
+	}
+
+	lra_drive_period_code =
+	 (lra_drive_period_code_hi << 8) | (lra_drive_period_code_lo & 0xff);
+	lra_drive_frequency_hz = 200000 / lra_drive_period_code;
+
+	while (start_variation >= AUTO_RES_ERROR_CAPTURE_RES) {
+		freq_variation =
+			 (lra_drive_frequency_hz * start_variation) / 100;
+		adjusted_lra_play_rate_code[neg_idx++] =
+			200000 / (lra_drive_frequency_hz - freq_variation);
+		adjusted_lra_play_rate_code[pos_idx--] =
+			200000 / (lra_drive_frequency_hz + freq_variation);
+		start_variation -= AUTO_RES_ERROR_CAPTURE_RES;
+	}
+
+	pr_debug("lra_drive_period_code_lo = 0x%x lra_drive_period_code_hi = 0x%x\n"
+		"lra_drive_period_code = 0x%x, lra_drive_frequency_hz = 0x%x\n"
+		"Calculated play rate code values are :\n",
+		lra_drive_period_code_lo, lra_drive_period_code_hi,
+		lra_drive_period_code, lra_drive_frequency_hz);
+
+	for (i = 0; i < ADJUSTED_LRA_PLAY_RATE_CODE_ARRSIZE; ++i)
+		pr_debug(" 0x%x", adjusted_lra_play_rate_code[i]);
+
+	return 0;
+}
+
+static int qpnp_hap_auto_res_enable(struct qpnp_hap *hap, int enable)
+{
+	int rc = 0;
+	u32 back_emf_delay_us = hap->time_required_to_generate_back_emf_us;
+	u8 val, auto_res_mode_qwd;
+
+	if (hap->act_type != QPNP_HAP_LRA)
+		return 0;
+
+	if (hap->pmic_subtype == PM660_SUBTYPE)
+		auto_res_mode_qwd = (hap->ares_cfg.auto_res_mode ==
+						QPNP_HAP_PM660_AUTO_RES_QWD);
+	else
+		auto_res_mode_qwd = (hap->ares_cfg.auto_res_mode ==
+							QPNP_HAP_AUTO_RES_QWD);
+
+	/*
+	 * Do not enable auto resonance if auto mode is enabled and auto
+	 * resonance mode is QWD, meaning short pattern.
+	 */
+	if (hap->auto_mode && auto_res_mode_qwd && enable) {
+		pr_debug("auto_mode enabled, not enabling auto_res\n");
+		return 0;
+	}
+
+	if (!hap->correct_lra_drive_freq && !auto_res_mode_qwd) {
+		pr_debug("correct_lra_drive_freq: %d auto_res_mode_qwd: %d\n",
+			hap->correct_lra_drive_freq, auto_res_mode_qwd);
+		return 0;
+	}
+
+	val = enable ? AUTO_RES_ENABLE : 0;
+	/*
+	 * For auto resonance detection to work properly, sufficient back-emf
+	 * has to be generated. In general, back-emf takes some time to build
+	 * up. When the auto resonance mode is chosen as QWD, high-z will be
+	 * applied for every LRA cycle and hence there won't be enough back-emf
+	 * at the start-up. Hence, the motor needs to vibrate for few LRA cycles
+	 * after the PLAY bit is asserted. Enable the auto resonance after
+	 * 'time_required_to_generate_back_emf_us' is completed.
+	 */
+	if (enable)
+		usleep_range(back_emf_delay_us, back_emf_delay_us + 1);
+
+	if (hap->pmic_subtype == PM660_SUBTYPE)
+		rc = qpnp_hap_masked_write_reg(hap,
+				QPNP_HAP_AUTO_RES_CTRL(hap->base),
+				QPNP_HAP_AUTO_RES_MASK, val);
+	else
+		rc = qpnp_hap_sec_masked_write_reg(hap,
+				QPNP_HAP_TEST2_REG(hap->base),
+				QPNP_HAP_AUTO_RES_MASK, val);
+	if (rc < 0)
+		return rc;
+
+	if (enable)
+		hap->status_flags |= AUTO_RESONANCE_ENABLED;
+	else
+		hap->status_flags &= ~AUTO_RESONANCE_ENABLED;
+
+	pr_debug("auto_res %sabled\n", enable ? "en" : "dis");
+	return rc;
+}
+
+static void update_lra_frequency(struct qpnp_hap *hap)
+{
+	u8 lra_auto_res[2], val;
+	u32 play_rate_code;
+	u16 rate_cfg;
+	int rc;
+
+	rc = qpnp_hap_read_mult_reg(hap, QPNP_HAP_LRA_AUTO_RES_LO(hap->base),
+				lra_auto_res, 2);
+	if (rc < 0) {
+		pr_err("Error in reading LRA_AUTO_RES_LO/HI, rc=%d\n", rc);
+		return;
+	}
+
+	play_rate_code =
+		 (lra_auto_res[1] & 0xF0) << 4 | (lra_auto_res[0] & 0xFF);
+
+	pr_debug("lra_auto_res_lo = 0x%x lra_auto_res_hi = 0x%x play_rate_code = 0x%x\n",
+		lra_auto_res[0], lra_auto_res[1], play_rate_code);
+
+	rc = qpnp_hap_read_reg(hap, QPNP_HAP_STATUS(hap->base), &val);
+	if (rc < 0)
+		return;
+
+	/*
+	 * If the drive period code read from AUTO_RES_LO and AUTO_RES_HI
+	 * registers is more than the max limit percent variation or less
+	 * than the min limit percent variation specified through DT, then
+	 * auto-resonance is disabled.
+	 */
+
+	if ((val & AUTO_RES_ERR_BIT) ||
+		((play_rate_code <= hap->drive_period_code_min_limit) ||
+		(play_rate_code >= hap->drive_period_code_max_limit))) {
+		if (val & AUTO_RES_ERR_BIT)
+			pr_debug("Auto-resonance error %x\n", val);
+		else
+			pr_debug("play rate %x out of bounds [min: 0x%x, max: 0x%x]\n",
+				play_rate_code,
+				hap->drive_period_code_min_limit,
+				hap->drive_period_code_max_limit);
+		rc = qpnp_hap_auto_res_enable(hap, 0);
+		if (rc < 0)
+			pr_debug("Auto-resonance write failed\n");
+		return;
+	}
+
+	lra_auto_res[1] >>= 4;
+	rate_cfg = lra_auto_res[1] << 8 | lra_auto_res[0];
+	if (hap->last_rate_cfg == rate_cfg) {
+		pr_debug("Same rate_cfg, skip updating\n");
+		return;
+	}
+
+	rc = qpnp_hap_write_mult_reg(hap, QPNP_HAP_RATE_CFG1_REG(hap->base),
+				lra_auto_res, 2);
+	if (rc < 0) {
+		pr_err("Error in writing to RATE_CFG1/2, rc=%d\n", rc);
+	} else {
+		pr_debug("Update RATE_CFG with [0x%x]\n", rate_cfg);
+		hap->last_rate_cfg = rate_cfg;
+	}
+}
+
+static enum hrtimer_restart detect_auto_res_error(struct hrtimer *timer)
+{
+	struct qpnp_hap *hap = container_of(timer, struct qpnp_hap,
+					auto_res_err_poll_timer);
+	ktime_t currtime;
+
+	if (!(hap->status_flags & AUTO_RESONANCE_ENABLED))
+		return HRTIMER_NORESTART;
+
+	update_lra_frequency(hap);
+	currtime  = ktime_get();
+	hrtimer_forward(&hap->auto_res_err_poll_timer, currtime,
+			ktime_set(0, POLL_TIME_AUTO_RES_ERR_NS));
+	return HRTIMER_RESTART;
+}
+
+static bool is_sw_lra_auto_resonance_control(struct qpnp_hap *hap)
+{
+	if (hap->act_type != QPNP_HAP_LRA)
+		return false;
+
+	if (hap->lra_hw_auto_resonance)
+		return false;
+
+	if (!hap->correct_lra_drive_freq)
+		return false;
+
+	if (hap->auto_mode && hap->play_mode == QPNP_HAP_BUFFER)
+		return false;
+
+	return true;
+}
+
+/* set api for haptics */
+static int qpnp_hap_set(struct qpnp_hap *hap, bool on)
+{
+	int rc = 0;
+	unsigned long timeout_ns = POLL_TIME_AUTO_RES_ERR_NS;
+
+	if (hap->play_mode == QPNP_HAP_PWM) {
+		if (on) {
+			rc = pwm_enable(hap->pwm_info.pwm_dev);
+			if (rc < 0)
+				return rc;
+		} else {
+			pwm_disable(hap->pwm_info.pwm_dev);
+		}
+	} else if (hap->play_mode == QPNP_HAP_BUFFER ||
+			hap->play_mode == QPNP_HAP_DIRECT) {
+		if (on) {
+			rc = qpnp_hap_auto_res_enable(hap, 0);
+			if (rc < 0)
+				return rc;
+
+			rc = qpnp_hap_mod_enable(hap, on);
+			if (rc < 0)
+				return rc;
+
+			rc = qpnp_hap_play(hap, on);
+			if (rc < 0)
+				return rc;
+
+			rc = qpnp_hap_auto_res_enable(hap, 1);
+			if (rc < 0)
+				return rc;
+
+			if (is_sw_lra_auto_resonance_control(hap)) {
+				/*
+				 * Start timer to poll Auto Resonance error bit
+				 */
+				mutex_lock(&hap->lock);
+				hrtimer_cancel(&hap->auto_res_err_poll_timer);
+				hrtimer_start(&hap->auto_res_err_poll_timer,
+						ktime_set(0, timeout_ns),
+						HRTIMER_MODE_REL);
+				mutex_unlock(&hap->lock);
+			}
+		} else {
+			rc = qpnp_hap_play(hap, on);
+			if (rc < 0)
+				return rc;
+
+			if (is_sw_lra_auto_resonance_control(hap) &&
+				(hap->status_flags & AUTO_RESONANCE_ENABLED))
+				update_lra_frequency(hap);
+
+			rc = qpnp_hap_mod_enable(hap, on);
+			if (rc < 0)
+				return rc;
+
+			if (is_sw_lra_auto_resonance_control(hap))
+				hrtimer_cancel(&hap->auto_res_err_poll_timer);
+		}
+	}
+
+	return rc;
+}
+
+static int qpnp_hap_auto_mode_config(struct qpnp_hap *hap, int time_ms)
+{
+	struct qpnp_hap_lra_ares_cfg ares_cfg;
+	enum qpnp_hap_mode old_play_mode;
+	u8 old_ares_mode;
+	u8 brake_pat[QPNP_HAP_BRAKE_PAT_LEN] = {0};
+	u8 wave_samp[QPNP_HAP_WAV_SAMP_LEN] = {0};
+	int rc, vmax_mv;
+
+	/* For now, this is for LRA only */
+	if (hap->act_type == QPNP_HAP_ERM)
+		return 0;
+
+	old_ares_mode = hap->ares_cfg.auto_res_mode;
+	old_play_mode = hap->play_mode;
+	pr_debug("auto_mode, time_ms: %d\n", time_ms);
+	if (time_ms <= 20) {
+		wave_samp[0] = QPNP_HAP_WAV_SAMP_MAX;
+		wave_samp[1] = QPNP_HAP_WAV_SAMP_MAX;
+		if (time_ms > 15)
+			wave_samp[2] = QPNP_HAP_WAV_SAMP_MAX;
+
+		/* short pattern */
+		rc = qpnp_hap_parse_buffer_dt(hap);
+		if (!rc)
+			rc = qpnp_hap_buffer_config(hap, wave_samp, true);
+		if (rc < 0) {
+			pr_err("Error in configuring buffer mode %d\n",
+				rc);
+			return rc;
+		}
+
+		ares_cfg.lra_high_z = QPNP_HAP_LRA_HIGH_Z_OPT1;
+		ares_cfg.lra_res_cal_period = QPNP_HAP_RES_CAL_PERIOD_MIN;
+		if (hap->pmic_subtype == PM660_SUBTYPE) {
+			ares_cfg.auto_res_mode =
+						QPNP_HAP_PM660_AUTO_RES_QWD;
+			ares_cfg.lra_qwd_drive_duration = 0;
+			ares_cfg.calibrate_at_eop = 0;
+		} else {
+			ares_cfg.auto_res_mode = QPNP_HAP_AUTO_RES_QWD;
+			ares_cfg.lra_qwd_drive_duration = -EINVAL;
+			ares_cfg.calibrate_at_eop = -EINVAL;
+		}
+
+		vmax_mv = QPNP_HAP_VMAX_MAX_MV;
+		rc = qpnp_hap_vmax_config(hap, vmax_mv, true);
+		if (rc < 0)
+			return rc;
+
+		rc = qpnp_hap_brake_config(hap, brake_pat);
+		if (rc < 0)
+			return rc;
+
+		/* enable play_irq for buffer mode */
+		if (hap->play_irq >= 0 && !hap->play_irq_en) {
+			enable_irq(hap->play_irq);
+			hap->play_irq_en = true;
+		}
+
+		hap->play_mode = QPNP_HAP_BUFFER;
+		hap->wave_shape = QPNP_HAP_WAV_SQUARE;
+	} else {
+		/* long pattern */
+		ares_cfg.lra_high_z = QPNP_HAP_LRA_HIGH_Z_OPT1;
+		if (hap->pmic_subtype == PM660_SUBTYPE) {
+			ares_cfg.auto_res_mode =
+				QPNP_HAP_PM660_AUTO_RES_ZXD;
+			ares_cfg.lra_res_cal_period =
+				QPNP_HAP_PM660_RES_CAL_PERIOD_MAX;
+			ares_cfg.lra_qwd_drive_duration = 0;
+			ares_cfg.calibrate_at_eop = 1;
+		} else {
+			ares_cfg.auto_res_mode = QPNP_HAP_AUTO_RES_ZXD_EOP;
+			ares_cfg.lra_res_cal_period =
+				QPNP_HAP_RES_CAL_PERIOD_MAX;
+			ares_cfg.lra_qwd_drive_duration = -EINVAL;
+			ares_cfg.calibrate_at_eop = -EINVAL;
+		}
+
+		vmax_mv = hap->vmax_mv;
+		rc = qpnp_hap_vmax_config(hap, vmax_mv, false);
+		if (rc < 0)
+			return rc;
+
+		brake_pat[0] = 0x3;
+		rc = qpnp_hap_brake_config(hap, brake_pat);
+		if (rc < 0)
+			return rc;
+
+		/* enable play_irq for direct mode */
+		if (hap->play_irq >= 0 && hap->play_irq_en) {
+			disable_irq(hap->play_irq);
+			hap->play_irq_en = false;
+		}
+
+		hap->play_mode = QPNP_HAP_DIRECT;
+		hap->wave_shape = QPNP_HAP_WAV_SINE;
+	}
+
+	if (hap->override_auto_mode_config) {
+		rc = qpnp_hap_lra_auto_res_config(hap, NULL);
+	} else {
+		hap->ares_cfg.auto_res_mode = ares_cfg.auto_res_mode;
+		rc = qpnp_hap_lra_auto_res_config(hap, &ares_cfg);
+	}
+
+	if (rc < 0) {
+		hap->ares_cfg.auto_res_mode = old_ares_mode;
+		return rc;
+	}
+
+	rc = qpnp_hap_play_mode_config(hap);
+	if (rc < 0) {
+		hap->play_mode = old_play_mode;
+		return rc;
+	}
+
+	rc = qpnp_hap_masked_write_reg(hap, QPNP_HAP_CFG2_REG(hap->base),
+			QPNP_HAP_WAV_SHAPE_MASK, hap->wave_shape);
+	if (rc < 0)
+		return rc;
+
+	return 0;
+}
+
+/* enable interface from timed output class */
+static void qpnp_hap_td_enable(struct timed_output_dev *dev, int time_ms)
+{
+	struct qpnp_hap *hap = container_of(dev, struct qpnp_hap,
+					 timed_dev);
+	bool state = !!time_ms;
+	ktime_t rem;
+	int rc;
+
+	if (time_ms < 0)
+		return;
+
+	mutex_lock(&hap->lock);
+
+	if (hap->state == state) {
+		if (state) {
+			rem = hrtimer_get_remaining(&hap->hap_timer);
+			if (time_ms > ktime_to_ms(rem)) {
+				time_ms = (time_ms > hap->timeout_ms ?
+						 hap->timeout_ms : time_ms);
+				hrtimer_cancel(&hap->hap_timer);
+				hap->play_time_ms = time_ms;
+				hrtimer_start(&hap->hap_timer,
+						ktime_set(time_ms / 1000,
+						(time_ms % 1000) * 1000000),
+						HRTIMER_MODE_REL);
+			}
+		}
+		mutex_unlock(&hap->lock);
+		return;
+	}
+
+	hap->state = state;
+	if (!hap->state) {
+		hrtimer_cancel(&hap->hap_timer);
+	} else {
+		if (time_ms < 10)
+			time_ms = 10;
+
+		if (hap->auto_mode) {
+			rc = qpnp_hap_auto_mode_config(hap, time_ms);
+			if (rc < 0) {
+				pr_err("Unable to do auto mode config\n");
+				mutex_unlock(&hap->lock);
+				return;
+			}
+		}
+
+		time_ms = (time_ms > hap->timeout_ms ?
+				 hap->timeout_ms : time_ms);
+		hap->play_time_ms = time_ms;
+		hrtimer_start(&hap->hap_timer,
+				ktime_set(time_ms / 1000,
+				(time_ms % 1000) * 1000000),
+				HRTIMER_MODE_REL);
+	}
+
+	mutex_unlock(&hap->lock);
+	schedule_work(&hap->work);
+}
+
+/* play pwm bytes */
+int qpnp_hap_play_byte(u8 data, bool on)
+{
+	struct qpnp_hap *hap = ghap;
+	int duty_ns, period_ns, duty_percent, rc;
+
+	if (!hap) {
+		pr_err("Haptics is not initialized\n");
+		return -EINVAL;
+	}
+
+	if (hap->play_mode != QPNP_HAP_PWM) {
+		pr_err("only PWM mode is supported\n");
+		return -EINVAL;
+	}
+
+	rc = qpnp_hap_set(hap, false);
+	if (rc)
+		return rc;
+
+	if (!on) {
+		/*
+		 * Set the pwm back to original duty for normal operations.
+		 * This is not required if standard interface is not used.
+		 */
+		rc = pwm_config(hap->pwm_info.pwm_dev,
+				hap->pwm_info.duty_us * NSEC_PER_USEC,
+				hap->pwm_info.period_us * NSEC_PER_USEC);
+		return rc;
+	}
+
+	/*
+	 * pwm values range from 0x00 to 0xff. The range from 0x00 to 0x7f
+	 * provides a postive amplitude in the sin wave form for 0 to 100%.
+	 * The range from 0x80 to 0xff provides a negative amplitude in the
+	 * sin wave form for 0 to 100%. Here the duty percentage is calculated
+	 * based on the incoming data to accommodate this.
+	 */
+	if (data <= QPNP_HAP_EXT_PWM_PEAK_DATA)
+		duty_percent = QPNP_HAP_EXT_PWM_HALF_DUTY +
+			((data * QPNP_HAP_EXT_PWM_DATA_FACTOR) / 100);
+	else
+		duty_percent = QPNP_HAP_EXT_PWM_FULL_DUTY -
+			((data * QPNP_HAP_EXT_PWM_DATA_FACTOR) / 100);
+
+	period_ns = hap->pwm_info.period_us * NSEC_PER_USEC;
+	duty_ns = (period_ns * duty_percent) / 100;
+	rc = pwm_config(hap->pwm_info.pwm_dev,
+			duty_ns,
+			hap->pwm_info.period_us * NSEC_PER_USEC);
+	if (rc)
+		return rc;
+
+	pr_debug("data=0x%x duty_per=%d\n", data, duty_percent);
+
+	rc = qpnp_hap_set(hap, true);
+
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_hap_play_byte);
+
+/* worker to opeate haptics */
+static void qpnp_hap_worker(struct work_struct *work)
+{
+	struct qpnp_hap *hap = container_of(work, struct qpnp_hap,
+					 work);
+	u8 val = 0x00;
+	int rc;
+
+	if (hap->vcc_pon && hap->state && !hap->vcc_pon_enabled) {
+		rc = regulator_enable(hap->vcc_pon);
+		if (rc < 0)
+			pr_err("could not enable vcc_pon regulator rc=%d\n",
+				rc);
+		else
+			hap->vcc_pon_enabled = true;
+	}
+
+	/* Disable haptics module if the duration of short circuit
+	 * exceeds the maximum limit (5 secs).
+	 */
+	if (hap->sc_count >= SC_MAX_COUNT) {
+		rc = qpnp_hap_write_reg(hap, QPNP_HAP_EN_CTL_REG(hap->base),
+			val);
+	} else {
+		if (hap->play_mode == QPNP_HAP_PWM)
+			qpnp_hap_mod_enable(hap, hap->state);
+		qpnp_hap_set(hap, hap->state);
+	}
+
+	if (hap->vcc_pon && !hap->state && hap->vcc_pon_enabled) {
+		rc = regulator_disable(hap->vcc_pon);
+		if (rc)
+			pr_err("could not disable vcc_pon regulator rc=%d\n",
+				rc);
+		else
+			hap->vcc_pon_enabled = false;
+	}
+}
+
+/* get time api to know the remaining time */
+static int qpnp_hap_get_time(struct timed_output_dev *dev)
+{
+	struct qpnp_hap *hap = container_of(dev, struct qpnp_hap,
+							 timed_dev);
+
+	if (hrtimer_active(&hap->hap_timer)) {
+		ktime_t r = hrtimer_get_remaining(&hap->hap_timer);
+
+		return (int)ktime_to_us(r);
+	} else {
+		return 0;
+	}
+}
+
+/* hrtimer function handler */
+static enum hrtimer_restart qpnp_hap_timer(struct hrtimer *timer)
+{
+	struct qpnp_hap *hap = container_of(timer, struct qpnp_hap,
+							 hap_timer);
+
+	hap->state = 0;
+	schedule_work(&hap->work);
+
+	return HRTIMER_NORESTART;
+}
+
+/* hrtimer function handler */
+static enum hrtimer_restart qpnp_hap_test_timer(struct hrtimer *timer)
+{
+	struct qpnp_hap *hap = container_of(timer, struct qpnp_hap,
+							 hap_test_timer);
+
+	complete(&hap->completion);
+
+	return HRTIMER_NORESTART;
+}
+
+/* suspend routines to turn off haptics */
+#ifdef CONFIG_PM
+static int qpnp_haptic_suspend(struct device *dev)
+{
+	struct qpnp_hap *hap = dev_get_drvdata(dev);
+
+	hrtimer_cancel(&hap->hap_timer);
+	cancel_work_sync(&hap->work);
+	/* turn-off haptic */
+	qpnp_hap_set(hap, false);
+
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(qpnp_haptic_pm_ops, qpnp_haptic_suspend, NULL);
+
+/* Configuration api for haptics registers */
+static int qpnp_hap_config(struct qpnp_hap *hap)
+{
+	u8 val = 0;
+	int rc;
+
+	/*
+	 * This denotes the percentage error in rc clock multiplied by 10
+	 */
+	u8 rc_clk_err_percent_x10;
+
+	/* Configure the CFG1 register for actuator type */
+	rc = qpnp_hap_masked_write_reg(hap, QPNP_HAP_CFG1_REG(hap->base),
+			QPNP_HAP_ACT_TYPE_MASK, hap->act_type);
+	if (rc)
+		return rc;
+
+	/* Configure auto resonance parameters */
+	rc = qpnp_hap_lra_auto_res_config(hap, NULL);
+	if (rc)
+		return rc;
+
+	/* Configure the PLAY MODE register */
+	rc = qpnp_hap_play_mode_config(hap);
+	if (rc)
+		return rc;
+
+	/* Configure the VMAX register */
+	rc = qpnp_hap_vmax_config(hap, hap->vmax_mv, false);
+	if (rc)
+		return rc;
+
+	/* Configure the ILIM register */
+	rc = qpnp_hap_ilim_config(hap);
+	if (rc)
+		return rc;
+
+	/* Configure the short circuit debounce register */
+	rc = qpnp_hap_sc_deb_config(hap);
+	if (rc)
+		return rc;
+
+	/* Configure the INTERNAL_PWM register */
+	rc = qpnp_hap_int_pwm_config(hap);
+	if (rc)
+		return rc;
+
+	/* Configure the WAVE SHAPE register */
+	rc = qpnp_hap_masked_write_reg(hap, QPNP_HAP_CFG2_REG(hap->base),
+			QPNP_HAP_WAV_SHAPE_MASK, hap->wave_shape);
+	if (rc)
+		return rc;
+
+	/*
+	 * Configure RATE_CFG1 and RATE_CFG2 registers.
+	 * Note: For ERM these registers act as play rate and
+	 * for LRA these represent resonance period
+	 */
+	if (hap->wave_play_rate_us < QPNP_HAP_WAV_PLAY_RATE_US_MIN)
+		hap->wave_play_rate_us = QPNP_HAP_WAV_PLAY_RATE_US_MIN;
+	else if (hap->wave_play_rate_us > QPNP_HAP_WAV_PLAY_RATE_US_MAX)
+		hap->wave_play_rate_us = QPNP_HAP_WAV_PLAY_RATE_US_MAX;
+
+	hap->init_drive_period_code =
+			 hap->wave_play_rate_us / QPNP_HAP_RATE_CFG_STEP_US;
+
+	/*
+	 * The frequency of 19.2Mzhz RC clock is subject to variation. Currently
+	 * a few PMI modules have MISC_TRIM_ERROR_RC19P2_CLK register
+	 * present in their MISC  block. This register holds the frequency error
+	 * in 19.2 MHz RC clock.
+	 */
+	if ((hap->act_type == QPNP_HAP_LRA) && hap->correct_lra_drive_freq
+			&& hap->misc_clk_trim_error_reg) {
+		pr_debug("TRIM register = 0x%x\n", hap->clk_trim_error_code);
+
+		/*
+		 * Extract the 4 LSBs and multiply by 7 to get
+		 * the %error in RC clock multiplied by 10
+		 */
+		rc_clk_err_percent_x10 = (hap->clk_trim_error_code & 0x0F) * 7;
+
+		/*
+		 * If the TRIM register holds value less than 0x80,
+		 * then there is a positive error in the RC clock.
+		 * If the TRIM register holds value greater than or equal to
+		 * 0x80, then there is a negative error in the RC clock. Bit 7
+		 * is the sign bit for error code.
+		 *
+		 * The adjusted play rate code is calculated as follows:
+		 * LRA drive period code (RATE_CFG) =
+		 *	 200KHz * 1 / LRA drive frequency * ( 1 + %error/ 100)
+		 *
+		 * This can be rewritten as:
+		 * LRA drive period code (RATE_CFG) =
+		 *	200KHz * 1/LRA drive frequency *( 1 + %error * 10/1000)
+		 *
+		 * Since 200KHz * 1/LRA drive frequency is already calculated
+		 * above we only do rest of the scaling here.
+		 */
+		if (hap->clk_trim_error_code & BIT(7))
+			LRA_DRIVE_PERIOD_NEG_ERR(hap, rc_clk_err_percent_x10);
+		else
+			LRA_DRIVE_PERIOD_POS_ERR(hap, rc_clk_err_percent_x10);
+	}
+
+	pr_debug("Play rate code 0x%x\n", hap->init_drive_period_code);
+
+	val = hap->init_drive_period_code & QPNP_HAP_RATE_CFG1_MASK;
+	rc = qpnp_hap_write_reg(hap, QPNP_HAP_RATE_CFG1_REG(hap->base), val);
+	if (rc)
+		return rc;
+
+	val = (hap->init_drive_period_code & 0xF00) >> QPNP_HAP_RATE_CFG2_SHFT;
+	rc = qpnp_hap_write_reg(hap, QPNP_HAP_RATE_CFG2_REG(hap->base), val);
+	if (rc)
+		return rc;
+
+	hap->last_rate_cfg = hap->init_drive_period_code;
+
+	if (hap->act_type == QPNP_HAP_LRA &&
+				hap->perform_lra_auto_resonance_search)
+		calculate_lra_code(hap);
+
+	if (hap->act_type == QPNP_HAP_LRA && hap->correct_lra_drive_freq) {
+		hap->drive_period_code_max_limit =
+			(hap->init_drive_period_code * (100 +
+			hap->drive_period_code_max_limit_percent_variation))
+			/ 100;
+		hap->drive_period_code_min_limit =
+			(hap->init_drive_period_code * (100 -
+			hap->drive_period_code_min_limit_percent_variation))
+			/ 100;
+		pr_debug("Drive period code max limit %x min limit %x\n",
+			hap->drive_period_code_max_limit,
+			hap->drive_period_code_min_limit);
+	}
+
+	rc = qpnp_hap_brake_config(hap, NULL);
+	if (rc < 0)
+		return rc;
+
+	if (hap->play_mode == QPNP_HAP_BUFFER)
+		rc = qpnp_hap_buffer_config(hap, NULL, false);
+	else if (hap->play_mode == QPNP_HAP_PWM)
+		rc = qpnp_hap_pwm_config(hap);
+	else if (hap->play_mode == QPNP_HAP_AUDIO)
+		rc = qpnp_hap_mod_enable(hap, true);
+
+	if (rc)
+		return rc;
+
+	/* setup play irq */
+	if (hap->play_irq >= 0) {
+		rc = devm_request_threaded_irq(&hap->pdev->dev, hap->play_irq,
+			NULL, qpnp_hap_play_irq, IRQF_ONESHOT, "qpnp_hap_play",
+			hap);
+		if (rc < 0) {
+			pr_err("Unable to request play(%d) IRQ(err:%d)\n",
+				hap->play_irq, rc);
+			return rc;
+		}
+
+		/* use play_irq only for buffer mode */
+		if (hap->play_mode != QPNP_HAP_BUFFER) {
+			disable_irq(hap->play_irq);
+			hap->play_irq_en = false;
+		}
+	}
+
+	/* setup short circuit irq */
+	if (hap->sc_irq >= 0) {
+		rc = devm_request_threaded_irq(&hap->pdev->dev, hap->sc_irq,
+			NULL, qpnp_hap_sc_irq, IRQF_ONESHOT, "qpnp_hap_sc",
+			hap);
+		if (rc < 0) {
+			pr_err("Unable to request sc(%d) IRQ(err:%d)\n",
+				hap->sc_irq, rc);
+			return rc;
+		}
+	}
+
+	hap->sc_count = 0;
+
+	return rc;
+}
+
+/* DT parsing for haptics parameters */
+static int qpnp_hap_parse_dt(struct qpnp_hap *hap)
+{
+	struct platform_device *pdev = hap->pdev;
+	struct device_node *misc_node;
+	struct property *prop;
+	const char *temp_str;
+	u32 temp;
+	int rc;
+
+	if (of_find_property(pdev->dev.of_node, "qcom,pmic-misc", NULL)) {
+		misc_node = of_parse_phandle(pdev->dev.of_node,
+					"qcom,pmic-misc", 0);
+		if (!misc_node)
+			return -EINVAL;
+
+		rc = of_property_read_u32(pdev->dev.of_node,
+				"qcom,misc-clk-trim-error-reg", &temp);
+		if (rc < 0) {
+			pr_err("Missing misc-clk-trim-error-reg\n");
+			return rc;
+		}
+
+		if (!temp || temp > 0xFF) {
+			pr_err("Invalid misc-clk-trim-error-reg\n");
+			return -EINVAL;
+		}
+
+		hap->misc_clk_trim_error_reg = temp;
+		rc = qpnp_misc_read_reg(misc_node, hap->misc_clk_trim_error_reg,
+				&hap->clk_trim_error_code);
+		if (rc < 0) {
+			pr_err("Couldn't get clk_trim_error_code, rc=%d\n", rc);
+			return -EPROBE_DEFER;
+		}
+	}
+
+	hap->timeout_ms = QPNP_HAP_TIMEOUT_MS_MAX;
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,timeout-ms", &temp);
+	if (!rc) {
+		hap->timeout_ms = temp;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read timeout\n");
+		return rc;
+	}
+
+	hap->act_type = QPNP_HAP_LRA;
+	rc = of_property_read_string(pdev->dev.of_node,
+			"qcom,actuator-type", &temp_str);
+	if (!rc) {
+		if (strcmp(temp_str, "erm") == 0)
+			hap->act_type = QPNP_HAP_ERM;
+		else if (strcmp(temp_str, "lra") == 0)
+			hap->act_type = QPNP_HAP_LRA;
+		else {
+			pr_err("Invalid actuator type\n");
+			return -EINVAL;
+		}
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read actuator type\n");
+		return rc;
+	}
+
+	if (hap->act_type == QPNP_HAP_LRA) {
+		rc = of_property_read_string(pdev->dev.of_node,
+				"qcom,lra-auto-res-mode", &temp_str);
+		if (!rc) {
+			if (hap->pmic_subtype == PM660_SUBTYPE) {
+				hap->ares_cfg.auto_res_mode =
+						QPNP_HAP_PM660_AUTO_RES_QWD;
+				if (strcmp(temp_str, "zxd") == 0)
+					hap->ares_cfg.auto_res_mode =
+						QPNP_HAP_PM660_AUTO_RES_ZXD;
+				else if (strcmp(temp_str, "qwd") == 0)
+					hap->ares_cfg.auto_res_mode =
+						QPNP_HAP_PM660_AUTO_RES_QWD;
+			} else {
+				hap->ares_cfg.auto_res_mode =
+					QPNP_HAP_AUTO_RES_ZXD_EOP;
+				if (strcmp(temp_str, "none") == 0)
+					hap->ares_cfg.auto_res_mode =
+						QPNP_HAP_AUTO_RES_NONE;
+				else if (strcmp(temp_str, "zxd") == 0)
+					hap->ares_cfg.auto_res_mode =
+						QPNP_HAP_AUTO_RES_ZXD;
+				else if (strcmp(temp_str, "qwd") == 0)
+					hap->ares_cfg.auto_res_mode =
+						QPNP_HAP_AUTO_RES_QWD;
+				else if (strcmp(temp_str, "max-qwd") == 0)
+					hap->ares_cfg.auto_res_mode =
+						QPNP_HAP_AUTO_RES_MAX_QWD;
+				else
+					hap->ares_cfg.auto_res_mode =
+						QPNP_HAP_AUTO_RES_ZXD_EOP;
+			}
+		} else if (rc != -EINVAL) {
+			pr_err("Unable to read auto res mode\n");
+			return rc;
+		}
+
+		hap->ares_cfg.lra_high_z = QPNP_HAP_LRA_HIGH_Z_OPT3;
+		rc = of_property_read_string(pdev->dev.of_node,
+				"qcom,lra-high-z", &temp_str);
+		if (!rc) {
+			if (strcmp(temp_str, "none") == 0)
+				hap->ares_cfg.lra_high_z =
+					QPNP_HAP_LRA_HIGH_Z_NONE;
+			else if (strcmp(temp_str, "opt1") == 0)
+				hap->ares_cfg.lra_high_z =
+					QPNP_HAP_LRA_HIGH_Z_OPT1;
+			else if (strcmp(temp_str, "opt2") == 0)
+				hap->ares_cfg.lra_high_z =
+					 QPNP_HAP_LRA_HIGH_Z_OPT2;
+			else
+				hap->ares_cfg.lra_high_z =
+					 QPNP_HAP_LRA_HIGH_Z_OPT3;
+
+			if (hap->pmic_subtype == PM660_SUBTYPE) {
+				if (strcmp(temp_str, "opt0") == 0)
+					hap->ares_cfg.lra_high_z =
+						QPNP_HAP_LRA_HIGH_Z_NONE;
+			}
+		} else if (rc != -EINVAL) {
+			pr_err("Unable to read LRA high-z\n");
+			return rc;
+		}
+
+		hap->ares_cfg.lra_qwd_drive_duration = -EINVAL;
+		rc = of_property_read_u32(pdev->dev.of_node,
+				"qcom,lra-qwd-drive-duration",
+				&hap->ares_cfg.lra_qwd_drive_duration);
+
+		hap->ares_cfg.calibrate_at_eop = -EINVAL;
+		rc = of_property_read_u32(pdev->dev.of_node,
+				"qcom,lra-calibrate-at-eop",
+				&hap->ares_cfg.calibrate_at_eop);
+
+		hap->ares_cfg.lra_res_cal_period = QPNP_HAP_RES_CAL_PERIOD_MAX;
+		rc = of_property_read_u32(pdev->dev.of_node,
+				"qcom,lra-res-cal-period", &temp);
+		if (!rc) {
+			hap->ares_cfg.lra_res_cal_period = temp;
+		} else if (rc != -EINVAL) {
+			pr_err("Unable to read cal period\n");
+			return rc;
+		}
+
+		hap->lra_hw_auto_resonance =
+				of_property_read_bool(pdev->dev.of_node,
+				"qcom,lra-hw-auto-resonance");
+
+		hap->perform_lra_auto_resonance_search =
+				of_property_read_bool(pdev->dev.of_node,
+				"qcom,perform-lra-auto-resonance-search");
+
+		hap->correct_lra_drive_freq =
+				of_property_read_bool(pdev->dev.of_node,
+						"qcom,correct-lra-drive-freq");
+
+		hap->drive_period_code_max_limit_percent_variation = 25;
+		rc = of_property_read_u32(pdev->dev.of_node,
+		"qcom,drive-period-code-max-limit-percent-variation", &temp);
+		if (!rc)
+			hap->drive_period_code_max_limit_percent_variation =
+								(u8) temp;
+
+		hap->drive_period_code_min_limit_percent_variation = 25;
+		rc = of_property_read_u32(pdev->dev.of_node,
+		"qcom,drive-period-code-min-limit-percent-variation", &temp);
+		if (!rc)
+			hap->drive_period_code_min_limit_percent_variation =
+								(u8) temp;
+
+		if (hap->ares_cfg.auto_res_mode == QPNP_HAP_AUTO_RES_QWD) {
+			hap->time_required_to_generate_back_emf_us =
+					QPNP_HAP_TIME_REQ_FOR_BACK_EMF_GEN;
+			rc = of_property_read_u32(pdev->dev.of_node,
+				"qcom,time-required-to-generate-back-emf-us",
+				&temp);
+			if (!rc)
+				hap->time_required_to_generate_back_emf_us =
+									temp;
+		} else {
+			hap->time_required_to_generate_back_emf_us = 0;
+		}
+	}
+
+	rc = of_property_read_string(pdev->dev.of_node,
+				"qcom,play-mode", &temp_str);
+	if (!rc) {
+		if (strcmp(temp_str, "direct") == 0)
+			hap->play_mode = QPNP_HAP_DIRECT;
+		else if (strcmp(temp_str, "buffer") == 0)
+			hap->play_mode = QPNP_HAP_BUFFER;
+		else if (strcmp(temp_str, "pwm") == 0)
+			hap->play_mode = QPNP_HAP_PWM;
+		else if (strcmp(temp_str, "audio") == 0)
+			hap->play_mode = QPNP_HAP_AUDIO;
+		else {
+			pr_err("Invalid play mode\n");
+			return -EINVAL;
+		}
+	} else {
+		pr_err("Unable to read play mode\n");
+		return rc;
+	}
+
+	hap->vmax_mv = QPNP_HAP_VMAX_MAX_MV;
+	rc = of_property_read_u32(pdev->dev.of_node, "qcom,vmax-mv", &temp);
+	if (!rc) {
+		hap->vmax_mv = temp;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read vmax\n");
+		return rc;
+	}
+
+	hap->ilim_ma = QPNP_HAP_ILIM_MIN_MV;
+	rc = of_property_read_u32(pdev->dev.of_node, "qcom,ilim-ma", &temp);
+	if (!rc) {
+		hap->ilim_ma = temp;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read ILim\n");
+		return rc;
+	}
+
+	hap->sc_deb_cycles = QPNP_HAP_DEF_SC_DEB_CYCLES;
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,sc-deb-cycles", &temp);
+	if (!rc) {
+		hap->sc_deb_cycles = temp;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read sc debounce\n");
+		return rc;
+	}
+
+	hap->int_pwm_freq_khz = QPNP_HAP_INT_PWM_FREQ_505_KHZ;
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,int-pwm-freq-khz", &temp);
+	if (!rc) {
+		hap->int_pwm_freq_khz = temp;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read int pwm freq\n");
+		return rc;
+	}
+
+	hap->wave_shape = QPNP_HAP_WAV_SQUARE;
+	rc = of_property_read_string(pdev->dev.of_node,
+			"qcom,wave-shape", &temp_str);
+	if (!rc) {
+		if (strcmp(temp_str, "sine") == 0)
+			hap->wave_shape = QPNP_HAP_WAV_SINE;
+		else if (strcmp(temp_str, "square") == 0)
+			hap->wave_shape = QPNP_HAP_WAV_SQUARE;
+		else {
+			pr_err("Unsupported wav shape\n");
+			return -EINVAL;
+		}
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read wav shape\n");
+		return rc;
+	}
+
+	hap->wave_play_rate_us = QPNP_HAP_DEF_WAVE_PLAY_RATE_US;
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,wave-play-rate-us", &temp);
+	if (!rc) {
+		hap->wave_play_rate_us = temp;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read play rate\n");
+		return rc;
+	}
+
+	if (hap->play_mode == QPNP_HAP_BUFFER)
+		rc = qpnp_hap_parse_buffer_dt(hap);
+	else if (hap->play_mode == QPNP_HAP_PWM)
+		rc = qpnp_hap_parse_pwm_dt(hap);
+
+	if (rc < 0)
+		return rc;
+
+	hap->en_brake = of_property_read_bool(pdev->dev.of_node,
+				"qcom,en-brake");
+
+	if (hap->en_brake) {
+		prop = of_find_property(pdev->dev.of_node,
+				"qcom,brake-pattern", &temp);
+		if (!prop) {
+			pr_info("brake pattern not found");
+		} else if (temp != QPNP_HAP_BRAKE_PAT_LEN) {
+			pr_err("Invalid len of brake pattern\n");
+			return -EINVAL;
+		} else {
+			hap->sup_brake_pat = true;
+			memcpy(hap->brake_pat, prop->value,
+					QPNP_HAP_BRAKE_PAT_LEN);
+		}
+	}
+
+	hap->play_irq = platform_get_irq_byname(hap->pdev, "play-irq");
+	if (hap->play_irq < 0)
+		pr_warn("Unable to get play irq\n");
+
+	hap->sc_irq = platform_get_irq_byname(hap->pdev, "sc-irq");
+	if (hap->sc_irq < 0) {
+		pr_err("Unable to get sc irq\n");
+		return hap->sc_irq;
+	}
+
+	if (of_find_property(pdev->dev.of_node, "vcc_pon-supply", NULL))
+		hap->manage_pon_supply = true;
+
+	hap->auto_mode = of_property_read_bool(pdev->dev.of_node,
+				"qcom,lra-auto-mode");
+	return 0;
+}
+
+static int qpnp_hap_get_pmic_revid(struct qpnp_hap *hap)
+{
+	struct pmic_revid_data *pmic_rev_id;
+	struct device_node *revid_dev_node;
+
+	revid_dev_node = of_parse_phandle(hap->pdev->dev.of_node,
+					"qcom,pmic-revid", 0);
+	if (!revid_dev_node) {
+		pr_err("Missing qcom,pmic-revid property - driver failed\n");
+		return -EINVAL;
+	}
+	pmic_rev_id = get_revid_data(revid_dev_node);
+	if (IS_ERR_OR_NULL(pmic_rev_id)) {
+		pr_err("Unable to get pmic_revid rc=%ld\n",
+						PTR_ERR(pmic_rev_id));
+		/*
+		 * the revid peripheral must be registered, any failure
+		 * here only indicates that the rev-id module has not
+		 * probed yet.
+		 */
+		return -EPROBE_DEFER;
+	}
+
+	hap->pmic_subtype = pmic_rev_id->pmic_subtype;
+
+	return 0;
+}
+
+static int qpnp_haptic_probe(struct platform_device *pdev)
+{
+	struct qpnp_hap *hap;
+	unsigned int base;
+	struct regulator *vcc_pon;
+	int rc, i;
+
+	hap = devm_kzalloc(&pdev->dev, sizeof(*hap), GFP_KERNEL);
+	if (!hap)
+		return -ENOMEM;
+
+	hap->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!hap->regmap) {
+		pr_err("Couldn't get parent's regmap\n");
+		return -EINVAL;
+	}
+
+	hap->pdev = pdev;
+
+	rc = of_property_read_u32(pdev->dev.of_node, "reg", &base);
+	if (rc < 0) {
+		pr_err("Couldn't find reg in node = %s rc = %d\n",
+			pdev->dev.of_node->full_name, rc);
+		return rc;
+	}
+	hap->base = base;
+
+	dev_set_drvdata(&pdev->dev, hap);
+
+	rc = qpnp_hap_get_pmic_revid(hap);
+	if (rc) {
+		pr_err("Unable to check PMIC version rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = qpnp_hap_parse_dt(hap);
+	if (rc) {
+		pr_err("DT parsing failed\n");
+		return rc;
+	}
+
+	spin_lock_init(&hap->bus_lock);
+	rc = qpnp_hap_config(hap);
+	if (rc) {
+		pr_err("hap config failed\n");
+		return rc;
+	}
+
+	mutex_init(&hap->lock);
+	mutex_init(&hap->wf_lock);
+	INIT_WORK(&hap->work, qpnp_hap_worker);
+	INIT_DELAYED_WORK(&hap->sc_work, qpnp_handle_sc_irq);
+	init_completion(&hap->completion);
+
+	hrtimer_init(&hap->hap_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	hap->hap_timer.function = qpnp_hap_timer;
+
+	hrtimer_init(&hap->hap_test_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	hap->hap_test_timer.function = qpnp_hap_test_timer;
+
+	hap->timed_dev.name = "vibrator";
+	hap->timed_dev.get_time = qpnp_hap_get_time;
+	hap->timed_dev.enable = qpnp_hap_td_enable;
+
+	hrtimer_init(&hap->auto_res_err_poll_timer, CLOCK_MONOTONIC,
+			HRTIMER_MODE_REL);
+	hap->auto_res_err_poll_timer.function = detect_auto_res_error;
+
+	rc = timed_output_dev_register(&hap->timed_dev);
+	if (rc < 0) {
+		pr_err("timed_output registration failed\n");
+		goto timed_output_fail;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(qpnp_hap_attrs); i++) {
+		rc = sysfs_create_file(&hap->timed_dev.dev->kobj,
+				&qpnp_hap_attrs[i].attr);
+		if (rc < 0) {
+			pr_err("sysfs creation failed\n");
+			goto sysfs_fail;
+		}
+	}
+
+	if (hap->manage_pon_supply) {
+		vcc_pon = regulator_get(&pdev->dev, "vcc_pon");
+		if (IS_ERR(vcc_pon)) {
+			rc = PTR_ERR(vcc_pon);
+			pr_err("regulator get failed vcc_pon rc=%d\n", rc);
+			goto sysfs_fail;
+		}
+		hap->vcc_pon = vcc_pon;
+	}
+
+	ghap = hap;
+
+	return 0;
+
+sysfs_fail:
+	for (i--; i >= 0; i--)
+		sysfs_remove_file(&hap->timed_dev.dev->kobj,
+				&qpnp_hap_attrs[i].attr);
+	timed_output_dev_unregister(&hap->timed_dev);
+timed_output_fail:
+	cancel_work_sync(&hap->work);
+	hrtimer_cancel(&hap->auto_res_err_poll_timer);
+	hrtimer_cancel(&hap->hap_timer);
+	mutex_destroy(&hap->lock);
+	mutex_destroy(&hap->wf_lock);
+
+	return rc;
+}
+
+static int qpnp_haptic_remove(struct platform_device *pdev)
+{
+	struct qpnp_hap *hap = dev_get_drvdata(&pdev->dev);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(qpnp_hap_attrs); i++)
+		sysfs_remove_file(&hap->timed_dev.dev->kobj,
+				&qpnp_hap_attrs[i].attr);
+
+	cancel_work_sync(&hap->work);
+	hrtimer_cancel(&hap->auto_res_err_poll_timer);
+	hrtimer_cancel(&hap->hap_timer);
+	timed_output_dev_unregister(&hap->timed_dev);
+	mutex_destroy(&hap->lock);
+	mutex_destroy(&hap->wf_lock);
+	if (hap->vcc_pon)
+		regulator_put(hap->vcc_pon);
+
+	return 0;
+}
+
+static const struct of_device_id spmi_match_table[] = {
+	{ .compatible = "qcom,qpnp-haptic", },
+	{ },
+};
+
+static struct platform_driver qpnp_haptic_driver = {
+	.driver		= {
+		.name		= "qcom,qpnp-haptic",
+		.of_match_table	= spmi_match_table,
+		.pm		= &qpnp_haptic_pm_ops,
+	},
+	.probe		= qpnp_haptic_probe,
+	.remove		= qpnp_haptic_remove,
+};
+
+static int __init qpnp_haptic_init(void)
+{
+	return platform_driver_register(&qpnp_haptic_driver);
+}
+module_init(qpnp_haptic_init);
+
+static void __exit qpnp_haptic_exit(void)
+{
+	return platform_driver_unregister(&qpnp_haptic_driver);
+}
+module_exit(qpnp_haptic_exit);
+
+MODULE_DESCRIPTION("qpnp haptic driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/qsee_ipc_irq_bridge.c	2019-01-22 16:16:26.671275095 +0100
@@ -0,0 +1,624 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cdev.h>
+#include <linux/interrupt.h>
+#include <linux/ipc_logging.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/subsystem_restart.h>
+
+#define MODULE_NAME "qsee_ipc_irq_bridge"
+#define DEVICE_NAME MODULE_NAME
+#define NUM_LOG_PAGES 4
+
+#define QIIB_DBG(x...) do { \
+	if (qiib_info->log_ctx) \
+		ipc_log_string(qiib_info->log_ctx, x); \
+	else \
+		pr_debug(x); \
+	} while (0)
+
+#define QIIB_ERR(x...) do { \
+	pr_err(x); \
+	if (qiib_info->log_ctx) \
+		ipc_log_string(qiib_info->log_ctx, x); \
+	} while (0)
+
+static void qiib_cleanup(void);
+
+/**
+ * qiib_dev - QSEE IPC IRQ bridge device
+ * @dev_list:		qiib device list.
+ * @i:			Index to this character device.
+ * @dev_name:		Device node name used by the clients.
+ * @cdev:		structure to the internal character device.
+ * @devicep:		Pointer to the qiib class device structure.
+ * @poll_wait_queue:	poll thread wait queue.
+ * @irq_num:		IRQ number usd for this device.
+ * @rx_irq_reset_reg:	Reference to the register to reset the rx irq
+ *			line, if applicable.
+ * @irq_mask:		Mask written to @rx_irq_reset_reg to clear the irq.
+ * @irq_pending_count:	The number of IRQs pending.
+ * @irq_pending_count_lock: Lock to protect @irq_pending_cont.
+ * @ssr_name:		Name of the subsystem recognized by the SSR framework.
+ * @nb:			SSR Notifier callback.
+ * @notifier_handle:	SSR Notifier handle.
+ * @in_reset:		Flag to check the SSR state.
+ */
+struct qiib_dev {
+	struct list_head dev_list;
+	uint32_t i;
+
+	const char *dev_name;
+	struct cdev cdev;
+	struct device *devicep;
+
+	wait_queue_head_t poll_wait_queue;
+
+	uint32_t irq_line;
+	void __iomem *rx_irq_reset_reg;
+	uint32_t irq_mask;
+	uint32_t irq_pending_count;
+	spinlock_t irq_pending_count_lock;
+
+	const char *ssr_name;
+	struct notifier_block nb;
+	void *notifier_handle;
+	bool in_reset;
+};
+
+/**
+ * qiib_driver_data - QSEE IPC IRQ bridge driver data
+ * @list:		list of all nodes devices.
+ * @list_lock:		lock to synchronize the @list access.
+ * @nprots:		Number of device nodes.
+ * @classp:		Pointer to the device class.
+ * @dev_num:		qiib device number.
+ * @log_ctx:		pointer to the ipc logging context.
+ */
+struct qiib_driver_data {
+	struct list_head list;
+	struct mutex list_lock;
+
+	int nports;
+	struct class *classp;
+	dev_t dev_num;
+
+	void *log_ctx;
+};
+
+static struct qiib_driver_data *qiib_info;
+
+/**
+ * qiib_driver_data_init() - Initialize the QIIB driver data.
+ *
+ * This function used to initialize the driver specific data
+ * during the module init.
+ *
+ * Return:	0 for success, Standard Linux errors
+ */
+static int qiib_driver_data_init(void)
+{
+	qiib_info = kzalloc(sizeof(*qiib_info), GFP_KERNEL);
+	if (!qiib_info)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&qiib_info->list);
+	mutex_init(&qiib_info->list_lock);
+
+	qiib_info->log_ctx = ipc_log_context_create(NUM_LOG_PAGES,
+						"qsee_ipc_irq_bridge", 0);
+	if (!qiib_info->log_ctx)
+		QIIB_ERR("%s: unable to create logging context\n", __func__);
+
+	return 0;
+}
+
+/**
+ * qiib_driver_data_deinit() - De-Initialize the QIIB driver data.
+ *
+ * This function used to de-initialize the driver specific data
+ * during the module exit.
+ */
+static void qiib_driver_data_deinit(void)
+{
+	qiib_cleanup();
+	if (!qiib_info->log_ctx)
+		ipc_log_context_destroy(qiib_info->log_ctx);
+	kfree(qiib_info);
+	qiib_info = NULL;
+}
+
+/**
+ * qiib_restart_notifier_cb() - SSR restart notifier callback function
+ * @this:	Notifier block used by the SSR framework
+ * @code:	The SSR code for which stage of restart is occurring
+ * @data:	Structure containing private data - not used here.
+ *
+ * This function is a callback for the SSR framework. From here we initiate
+ * our handling of SSR.
+ *
+ * Return: Status of SSR handling
+ */
+static int qiib_restart_notifier_cb(struct notifier_block *this,
+				  unsigned long code,
+				  void *data)
+{
+	struct qiib_dev *devp = container_of(this, struct qiib_dev, nb);
+
+	if (code == SUBSYS_BEFORE_SHUTDOWN) {
+		QIIB_DBG("%s: %s: subsystem restart for %s\n", __func__,
+				"SUBSYS_BEFORE_SHUTDOWN",
+				devp->ssr_name);
+		devp->in_reset = true;
+		wake_up_interruptible(&devp->poll_wait_queue);
+	} else if (code == SUBSYS_AFTER_POWERUP) {
+		QIIB_DBG("%s: %s: subsystem restart for %s\n", __func__,
+				"SUBSYS_AFTER_POWERUP",
+				devp->ssr_name);
+		devp->in_reset = false;
+	}
+	return NOTIFY_DONE;
+}
+
+/**
+ * qiib_poll() - poll() syscall for the qiib device
+ * @file:	Pointer to the file structure.
+ * @wait:	pointer to Poll table.
+ *
+ * This function is used to poll on the qiib device when
+ * userspace client do a poll() system call. All input arguments are
+ * validated by the virtual file system before calling this function.
+ *
+ * Return: POLLIN for interrupt intercepted case and POLLRDHUP for SSR.
+ */
+static unsigned int qiib_poll(struct file *file, poll_table *wait)
+{
+	struct qiib_dev *devp = file->private_data;
+	unsigned int mask = 0;
+	unsigned long flags;
+
+	if (!devp) {
+		QIIB_ERR("%s on NULL device\n", __func__);
+		return POLLERR;
+	}
+
+	if (devp->in_reset)
+		return POLLRDHUP;
+
+	poll_wait(file, &devp->poll_wait_queue, wait);
+	spin_lock_irqsave(&devp->irq_pending_count_lock, flags);
+	if (devp->irq_pending_count) {
+		mask |= POLLIN;
+		QIIB_DBG("%s set POLLIN on [%s] count[%d]\n",
+					__func__, devp->dev_name,
+					devp->irq_pending_count);
+		devp->irq_pending_count = 0;
+	}
+	spin_unlock_irqrestore(&devp->irq_pending_count_lock, flags);
+
+	if (devp->in_reset) {
+		mask |= POLLRDHUP;
+		QIIB_DBG("%s set POLLRDHUP on [%s] count[%d]\n",
+					__func__, devp->dev_name,
+					devp->irq_pending_count);
+	}
+	return mask;
+}
+
+/**
+ * qiib_open() - open() syscall for the qiib device
+ * @inode:	Pointer to the inode structure.
+ * @file:	Pointer to the file structure.
+ *
+ * This function is used to open the qiib device when
+ * userspace client do a open() system call. All input arguments are
+ * validated by the virtual file system before calling this function.
+ *
+ * Return:	0 for success, Standard Linux errors
+ */
+static int qiib_open(struct inode *inode, struct file *file)
+{
+	struct qiib_dev *devp = NULL;
+
+	devp = container_of(inode->i_cdev, struct qiib_dev, cdev);
+	if (!devp) {
+		QIIB_ERR("%s on NULL device\n", __func__);
+		return -EINVAL;
+	}
+	file->private_data = devp;
+	QIIB_DBG("%s on [%s]\n", __func__, devp->dev_name);
+	return 0;
+}
+
+/**
+ * qiib_release() - release operation on qiibdevice
+ * @inode:	Pointer to the inode structure.
+ * @file:	Pointer to the file structure.
+ *
+ * This function is used to release the qiib device when
+ * userspace client do a close() system call. All input arguments are
+ * validated by the virtual file system before calling this function.
+ */
+static int qiib_release(struct inode *inode, struct file *file)
+{
+	struct qiib_dev *devp = file->private_data;
+
+	if (!devp) {
+		QIIB_ERR("%s on NULL device\n", __func__);
+		return -EINVAL;
+	}
+
+	QIIB_DBG("%s on [%s]\n", __func__, devp->dev_name);
+	return 0;
+}
+
+static const struct file_operations qiib_fops = {
+	.owner = THIS_MODULE,
+	.open = qiib_open,
+	.release = qiib_release,
+	.poll = qiib_poll,
+};
+
+/**
+ * qiib_add_device() - Initialize qiib device and add cdev
+ * @devp:	pointer to the qiib device.
+ * @i:		index of the qiib device.
+ *
+ * Return:	0 for success, Standard Linux errors
+ */
+static int qiib_add_device(struct qiib_dev *devp, int i)
+{
+	int ret = 0;
+
+	devp->i = i;
+	init_waitqueue_head(&devp->poll_wait_queue);
+	spin_lock_init(&devp->irq_pending_count_lock);
+
+	cdev_init(&devp->cdev, &qiib_fops);
+	devp->cdev.owner = THIS_MODULE;
+
+	ret = cdev_add(&devp->cdev, qiib_info->dev_num + i, 1);
+	if (IS_ERR_VALUE(ret)) {
+		QIIB_ERR("%s: cdev_add() failed for dev [%s] ret:%i\n",
+			__func__, devp->dev_name, ret);
+		return ret;
+	}
+
+	devp->devicep = device_create(qiib_info->classp,
+			      NULL,
+			      (qiib_info->dev_num + i),
+			      NULL,
+			      devp->dev_name);
+
+	if (IS_ERR_OR_NULL(devp->devicep)) {
+		QIIB_ERR("%s: device_create() failed for dev [%s]\n",
+			__func__, devp->dev_name);
+		ret = -ENOMEM;
+		cdev_del(&devp->cdev);
+		return ret;
+	}
+
+	mutex_lock(&qiib_info->list_lock);
+	list_add(&devp->dev_list, &qiib_info->list);
+	mutex_unlock(&qiib_info->list_lock);
+
+	return ret;
+}
+
+static irqreturn_t qiib_irq_handler(int irq, void *priv)
+{
+	struct qiib_dev *devp = priv;
+	unsigned long flags;
+
+	spin_lock_irqsave(&devp->irq_pending_count_lock, flags);
+	devp->irq_pending_count++;
+	spin_unlock_irqrestore(&devp->irq_pending_count_lock, flags);
+	wake_up_interruptible(&devp->poll_wait_queue);
+
+	if (devp->rx_irq_reset_reg)
+		writel_relaxed(devp->irq_mask, devp->rx_irq_reset_reg);
+
+	QIIB_DBG("%s name[%s] pend_count[%d]\n", __func__,
+				devp->dev_name, devp->irq_pending_count);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * qiib_parse_node() - parse node from device tree binding
+ * @node:	pointer to device tree node
+ * @devp:	pointer to the qiib device
+ *
+ * Return:	0 on success, -ENODEV on failure.
+ */
+static int qiib_parse_node(struct device_node *node, struct qiib_dev *devp)
+{
+	char *key;
+	const char *subsys_name;
+	const char *dev_name;
+	uint32_t irqtype;
+	uint32_t irq_clear[2];
+	struct irq_data *irqtype_data;
+	int ret = -ENODEV;
+
+	key = "qcom,dev-name";
+	dev_name = of_get_property(node, key, NULL);
+	if (!dev_name) {
+		QIIB_ERR("%s: missing key: %s\n", __func__, key);
+		goto missing_key;
+	}
+	QIIB_DBG("%s: %s = %s\n", __func__, key, dev_name);
+
+	key = "interrupts";
+	devp->irq_line = irq_of_parse_and_map(node, 0);
+	if (!devp->irq_line) {
+		QIIB_ERR("%s: missing key: %s\n", __func__, key);
+		goto missing_key;
+	}
+	QIIB_DBG("%s: %s = %d\n", __func__, key, devp->irq_line);
+
+	irqtype_data = irq_get_irq_data(devp->irq_line);
+	if (!irqtype_data) {
+		QIIB_ERR("%s: get irqdata fail:%d\n", __func__, devp->irq_line);
+		goto missing_key;
+	}
+	irqtype = irqd_get_trigger_type(irqtype_data);
+	QIIB_DBG("%s: irqtype = %d\n", __func__, irqtype);
+
+	key = "label";
+	subsys_name = of_get_property(node, key, NULL);
+	if (!subsys_name) {
+		QIIB_ERR("%s: missing key: %s\n", __func__, key);
+		goto missing_key;
+	}
+	QIIB_DBG("%s: %s = %s\n", __func__, key, subsys_name);
+
+	if (irqtype & IRQF_TRIGGER_HIGH) {
+		key = "qcom,rx-irq-clr-mask";
+		ret = of_property_read_u32(node, key, &devp->irq_mask);
+		if (ret) {
+			QIIB_ERR("%s: missing key: %s\n", __func__, key);
+			ret = -ENODEV;
+			goto missing_key;
+		}
+		QIIB_DBG("%s: %s = %d\n", __func__, key, devp->irq_mask);
+
+		key = "qcom,rx-irq-clr";
+		ret = of_property_read_u32_array(node, key, irq_clear,
+							ARRAY_SIZE(irq_clear));
+		if (ret) {
+			QIIB_ERR("%s: missing key: %s\n", __func__, key);
+			ret = -ENODEV;
+			goto missing_key;
+		}
+
+		devp->rx_irq_reset_reg = ioremap_nocache(irq_clear[0],
+								irq_clear[1]);
+		if (!devp->rx_irq_reset_reg) {
+			QIIB_ERR("%s: unable to map rx reset reg\n", __func__);
+			ret = -ENOMEM;
+			goto missing_key;
+		}
+	}
+
+	devp->dev_name = dev_name;
+	devp->ssr_name = subsys_name;
+	devp->nb.notifier_call = qiib_restart_notifier_cb;
+
+	devp->notifier_handle = subsys_notif_register_notifier(devp->ssr_name,
+								&devp->nb);
+	if (IS_ERR_OR_NULL(devp->notifier_handle)) {
+		QIIB_ERR("%s: Could not register SSR notifier cb\n", __func__);
+		ret = -EINVAL;
+		goto ssr_reg_fail;
+	}
+
+	ret = request_irq(devp->irq_line, qiib_irq_handler,
+			irqtype | IRQF_NO_SUSPEND,
+			devp->dev_name, devp);
+	if (ret < 0) {
+		QIIB_ERR("%s: request_irq() failed on %d\n", __func__,
+				devp->irq_line);
+		goto req_irq_fail;
+	} else {
+		ret = enable_irq_wake(devp->irq_line);
+		if (ret < 0)
+			QIIB_ERR("%s: enable_irq_wake() failed on %d\n",
+					__func__, devp->irq_line);
+	}
+
+	return ret;
+
+req_irq_fail:
+	subsys_notif_unregister_notifier(devp->notifier_handle,	&devp->nb);
+ssr_reg_fail:
+	if (devp->rx_irq_reset_reg) {
+		iounmap(devp->rx_irq_reset_reg);
+		devp->rx_irq_reset_reg = NULL;
+	}
+missing_key:
+	return ret;
+}
+
+/**
+ * qiib_cleanup - cleanup all the resources
+ *
+ * This function remove all the memory and unregister
+ * the char device region.
+ */
+static void qiib_cleanup(void)
+{
+	struct qiib_dev *devp;
+	struct qiib_dev *index;
+
+	mutex_lock(&qiib_info->list_lock);
+	list_for_each_entry_safe(devp, index, &qiib_info->list, dev_list) {
+		cdev_del(&devp->cdev);
+		list_del(&devp->dev_list);
+		device_destroy(qiib_info->classp,
+			       MKDEV(MAJOR(qiib_info->dev_num), devp->i));
+		if (devp->notifier_handle)
+			subsys_notif_unregister_notifier(devp->notifier_handle,
+								&devp->nb);
+		kfree(devp);
+	}
+	mutex_unlock(&qiib_info->list_lock);
+
+	if (!IS_ERR_OR_NULL(qiib_info->classp))
+		class_destroy(qiib_info->classp);
+
+	unregister_chrdev_region(MAJOR(qiib_info->dev_num), qiib_info->nports);
+}
+
+/**
+ * qiib_alloc_chrdev_region() - allocate the char device region
+ *
+ * This function allocate memory for qiib character-device region and
+ * create the class.
+ */
+static int qiib_alloc_chrdev_region(void)
+{
+	int ret;
+
+	ret = alloc_chrdev_region(&qiib_info->dev_num,
+			       0,
+			       qiib_info->nports,
+			       DEVICE_NAME);
+	if (IS_ERR_VALUE(ret)) {
+		QIIB_ERR("%s: alloc_chrdev_region() failed ret:%i\n",
+			__func__, ret);
+		return ret;
+	}
+
+	qiib_info->classp = class_create(THIS_MODULE, DEVICE_NAME);
+	if (IS_ERR(qiib_info->classp)) {
+		QIIB_ERR("%s: class_create() failed ENOMEM\n", __func__);
+		ret = -ENOMEM;
+		unregister_chrdev_region(MAJOR(qiib_info->dev_num),
+						qiib_info->nports);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int qsee_ipc_irq_bridge_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct device_node *node;
+	struct qiib_dev *devp;
+	int i = 0;
+
+	qiib_info->nports = of_get_available_child_count(pdev->dev.of_node);
+	if (!qiib_info->nports) {
+		QIIB_ERR("%s:Fail nports = %d\n", __func__, qiib_info->nports);
+		return -EINVAL;
+	}
+
+	ret = qiib_alloc_chrdev_region();
+	if (ret) {
+		QIIB_ERR("%s: chrdev_region allocation failed ret:%i\n",
+			__func__, ret);
+		return ret;
+	}
+
+	for_each_available_child_of_node(pdev->dev.of_node, node) {
+		devp = kzalloc(sizeof(*devp), GFP_KERNEL);
+		if (IS_ERR_OR_NULL(devp)) {
+			QIIB_ERR("%s:Allocation failed id:%d\n", __func__, i);
+			ret = -ENOMEM;
+			goto error;
+		}
+
+		ret = qiib_parse_node(node, devp);
+		if (ret) {
+			QIIB_ERR("%s:qiib_parse_node failed %d\n", __func__, i);
+			kfree(devp);
+			goto error;
+		}
+
+		ret = qiib_add_device(devp, i);
+		if (ret < 0) {
+			QIIB_ERR("%s: add [%s] device failed ret=%d\n",
+					__func__, devp->dev_name, ret);
+			kfree(devp);
+			goto error;
+		}
+		i++;
+	}
+
+	QIIB_DBG("%s: Driver Initialized.\n", __func__);
+	return 0;
+
+error:
+	qiib_cleanup();
+	return ret;
+}
+
+static int qsee_ipc_irq_bridge_remove(struct platform_device *pdev)
+{
+	qiib_cleanup();
+	return 0;
+}
+
+static const struct of_device_id qsee_ipc_irq_bridge_match_table[] = {
+	{ .compatible = "qcom,qsee-ipc-irq-bridge" },
+	{},
+};
+
+static struct platform_driver qsee_ipc_irq_bridge_driver = {
+	.probe = qsee_ipc_irq_bridge_probe,
+	.remove = qsee_ipc_irq_bridge_remove,
+	.driver = {
+		.name = MODULE_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = qsee_ipc_irq_bridge_match_table,
+	 },
+};
+
+static int __init qsee_ipc_irq_bridge_init(void)
+{
+	int ret;
+
+	ret = qiib_driver_data_init();
+	if (ret) {
+		QIIB_ERR("%s: driver data init failed %d\n",
+			__func__, ret);
+		return ret;
+	}
+
+	ret = platform_driver_register(&qsee_ipc_irq_bridge_driver);
+	if (ret) {
+		QIIB_ERR("%s: platform driver register failed %d\n",
+			__func__, ret);
+		return ret;
+	}
+
+	return 0;
+}
+module_init(qsee_ipc_irq_bridge_init);
+
+static void __exit qsee_ipc_irq_bridge_exit(void)
+{
+	platform_driver_unregister(&qsee_ipc_irq_bridge_driver);
+	qiib_driver_data_deinit();
+}
+module_exit(qsee_ipc_irq_bridge_exit);
+MODULE_DESCRIPTION("QSEE IPC interrupt bridge");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/ramdump.c	2019-01-22 16:16:26.671275095 +0100
@@ -0,0 +1,538 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/uaccess.h>
+#include <linux/elf.h>
+#include <linux/wait.h>
+#include <soc/qcom/ramdump.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+
+#define RAMDUMP_WAIT_MSECS	120000
+#define MAX_STRTBL_SIZE 512
+#define MAX_NAME_LENGTH 16
+
+struct ramdump_device {
+	char name[256];
+
+	unsigned int data_ready;
+	unsigned int consumer_present;
+	int ramdump_status;
+
+	struct completion ramdump_complete;
+	struct miscdevice device;
+
+	wait_queue_head_t dump_wait_q;
+	int nsegments;
+	struct ramdump_segment *segments;
+	size_t elfcore_size;
+	char *elfcore_buf;
+	struct dma_attrs attrs;
+	bool complete_ramdump;
+};
+
+static int ramdump_open(struct inode *inode, struct file *filep)
+{
+	struct ramdump_device *rd_dev = container_of(filep->private_data,
+				struct ramdump_device, device);
+	rd_dev->consumer_present = 1;
+	rd_dev->ramdump_status = 0;
+	return 0;
+}
+
+static int ramdump_release(struct inode *inode, struct file *filep)
+{
+	struct ramdump_device *rd_dev = container_of(filep->private_data,
+				struct ramdump_device, device);
+	rd_dev->consumer_present = 0;
+	rd_dev->data_ready = 0;
+	complete(&rd_dev->ramdump_complete);
+	return 0;
+}
+
+static unsigned long offset_translate(loff_t user_offset,
+		struct ramdump_device *rd_dev, unsigned long *data_left,
+		void **vaddr)
+{
+	int i = 0;
+	*vaddr = NULL;
+
+	for (i = 0; i < rd_dev->nsegments; i++)
+		if (user_offset >= rd_dev->segments[i].size)
+			user_offset -= rd_dev->segments[i].size;
+		else
+			break;
+
+	if (i == rd_dev->nsegments) {
+		pr_debug("Ramdump(%s): offset_translate returning zero\n",
+				rd_dev->name);
+		*data_left = 0;
+		return 0;
+	}
+
+	*data_left = rd_dev->segments[i].size - user_offset;
+
+	pr_debug("Ramdump(%s): Returning address: %llx, data_left = %ld\n",
+		rd_dev->name, rd_dev->segments[i].address + user_offset,
+		*data_left);
+
+	if (rd_dev->segments[i].v_address)
+		*vaddr = rd_dev->segments[i].v_address + user_offset;
+
+	return rd_dev->segments[i].address + user_offset;
+}
+
+#define MAX_IOREMAP_SIZE SZ_1M
+
+static ssize_t ramdump_read(struct file *filep, char __user *buf, size_t count,
+			loff_t *pos)
+{
+	struct ramdump_device *rd_dev = container_of(filep->private_data,
+				struct ramdump_device, device);
+	void *device_mem = NULL, *origdevice_mem = NULL, *vaddr = NULL;
+	unsigned long data_left = 0, bytes_before, bytes_after;
+	unsigned long addr = 0;
+	size_t copy_size = 0, alignsize;
+	unsigned char *alignbuf = NULL, *finalbuf = NULL;
+	int ret = 0;
+	loff_t orig_pos = *pos;
+
+	if ((filep->f_flags & O_NONBLOCK) && !rd_dev->data_ready)
+		return -EAGAIN;
+
+	ret = wait_event_interruptible(rd_dev->dump_wait_q, rd_dev->data_ready);
+	if (ret)
+		return ret;
+
+	if (*pos < rd_dev->elfcore_size) {
+		copy_size = rd_dev->elfcore_size - *pos;
+		copy_size = min(copy_size, count);
+
+		if (copy_to_user(buf, rd_dev->elfcore_buf + *pos, copy_size)) {
+			ret = -EFAULT;
+			goto ramdump_done;
+		}
+		*pos += copy_size;
+		count -= copy_size;
+		buf += copy_size;
+		if (count == 0)
+			return copy_size;
+	}
+
+	addr = offset_translate(*pos - rd_dev->elfcore_size, rd_dev,
+				&data_left, &vaddr);
+
+	/* EOF check */
+	if (data_left == 0) {
+		pr_debug("Ramdump(%s): Ramdump complete. %lld bytes read.",
+			rd_dev->name, *pos);
+		rd_dev->ramdump_status = 0;
+		ret = 0;
+		goto ramdump_done;
+	}
+
+	copy_size = min(count, (size_t)MAX_IOREMAP_SIZE);
+	copy_size = min((unsigned long)copy_size, data_left);
+
+	init_dma_attrs(&rd_dev->attrs);
+	dma_set_attr(DMA_ATTR_SKIP_ZEROING, &rd_dev->attrs);
+	device_mem = vaddr ?: dma_remap(rd_dev->device.parent, NULL, addr,
+						copy_size, &rd_dev->attrs);
+	origdevice_mem = device_mem;
+
+	if (device_mem == NULL) {
+		pr_err("Ramdump(%s): Unable to ioremap: addr %lx, size %zd\n",
+			rd_dev->name, addr, copy_size);
+		rd_dev->ramdump_status = -1;
+		ret = -ENOMEM;
+		goto ramdump_done;
+	}
+
+	alignbuf = kzalloc(copy_size, GFP_KERNEL);
+	if (!alignbuf) {
+		pr_err("Ramdump(%s): Unable to alloc mem for aligned buf\n",
+				rd_dev->name);
+		rd_dev->ramdump_status = -1;
+		ret = -ENOMEM;
+		goto ramdump_done;
+	}
+
+	finalbuf = alignbuf;
+	alignsize = copy_size;
+
+	if ((unsigned long)device_mem & 0x7) {
+		bytes_before = 8 - ((unsigned long)device_mem & 0x7);
+		memcpy_fromio(alignbuf, device_mem, bytes_before);
+		device_mem += bytes_before;
+		alignbuf += bytes_before;
+		alignsize -= bytes_before;
+	}
+
+	if (alignsize & 0x7) {
+		bytes_after = alignsize & 0x7;
+		memcpy(alignbuf, device_mem, alignsize - bytes_after);
+		device_mem += alignsize - bytes_after;
+		alignbuf += (alignsize - bytes_after);
+		alignsize = bytes_after;
+		memcpy_fromio(alignbuf, device_mem, alignsize);
+	} else
+		memcpy(alignbuf, device_mem, alignsize);
+
+	if (copy_to_user(buf, finalbuf, copy_size)) {
+		pr_err("Ramdump(%s): Couldn't copy all data to user.",
+			rd_dev->name);
+		rd_dev->ramdump_status = -1;
+		ret = -EFAULT;
+		goto ramdump_done;
+	}
+
+	kfree(finalbuf);
+	if (!vaddr && origdevice_mem)
+		dma_unremap(rd_dev->device.parent, origdevice_mem, copy_size);
+
+	*pos += copy_size;
+
+	pr_debug("Ramdump(%s): Read %zd bytes from address %lx.",
+			rd_dev->name, copy_size, addr);
+
+	return *pos - orig_pos;
+
+ramdump_done:
+	if (!vaddr && origdevice_mem)
+		dma_unremap(rd_dev->device.parent, origdevice_mem, copy_size);
+
+	kfree(finalbuf);
+	rd_dev->data_ready = 0;
+	*pos = 0;
+	complete(&rd_dev->ramdump_complete);
+	return ret;
+}
+
+static unsigned int ramdump_poll(struct file *filep,
+					struct poll_table_struct *wait)
+{
+	struct ramdump_device *rd_dev = container_of(filep->private_data,
+				struct ramdump_device, device);
+	unsigned int mask = 0;
+
+	if (rd_dev->data_ready)
+		mask |= (POLLIN | POLLRDNORM);
+
+	poll_wait(filep, &rd_dev->dump_wait_q, wait);
+	return mask;
+}
+
+static const struct file_operations ramdump_file_ops = {
+	.open = ramdump_open,
+	.release = ramdump_release,
+	.read = ramdump_read,
+	.poll = ramdump_poll
+};
+
+void *create_ramdump_device(const char *dev_name, struct device *parent)
+{
+	int ret;
+	struct ramdump_device *rd_dev;
+
+	if (!dev_name) {
+		pr_err("%s: Invalid device name.\n", __func__);
+		return NULL;
+	}
+
+	rd_dev = kzalloc(sizeof(struct ramdump_device), GFP_KERNEL);
+
+	if (!rd_dev) {
+		pr_err("%s: Couldn't alloc space for ramdump device!",
+			__func__);
+		return NULL;
+	}
+
+	snprintf(rd_dev->name, ARRAY_SIZE(rd_dev->name), "ramdump_%s",
+		 dev_name);
+
+	init_completion(&rd_dev->ramdump_complete);
+
+	rd_dev->device.minor = MISC_DYNAMIC_MINOR;
+	rd_dev->device.name = rd_dev->name;
+	rd_dev->device.fops = &ramdump_file_ops;
+	rd_dev->device.parent = parent;
+	if (parent) {
+		rd_dev->complete_ramdump = of_property_read_bool(
+				parent->of_node, "qcom,complete-ramdump");
+		if (!rd_dev->complete_ramdump)
+			dev_info(parent,
+			"for %s segments only will be dumped.", dev_name);
+	}
+
+	init_waitqueue_head(&rd_dev->dump_wait_q);
+
+	ret = misc_register(&rd_dev->device);
+
+	if (ret) {
+		pr_err("%s: misc_register failed for %s (%d)", __func__,
+				dev_name, ret);
+		kfree(rd_dev);
+		return NULL;
+	}
+
+	return (void *)rd_dev;
+}
+EXPORT_SYMBOL(create_ramdump_device);
+
+void destroy_ramdump_device(void *dev)
+{
+	struct ramdump_device *rd_dev = dev;
+
+	if (IS_ERR_OR_NULL(rd_dev))
+		return;
+
+	misc_deregister(&rd_dev->device);
+	kfree(rd_dev);
+}
+EXPORT_SYMBOL(destroy_ramdump_device);
+
+static int _do_ramdump(void *handle, struct ramdump_segment *segments,
+		int nsegments, bool use_elf)
+{
+	int ret, i;
+	struct ramdump_device *rd_dev = (struct ramdump_device *)handle;
+	Elf32_Phdr *phdr;
+	Elf32_Ehdr *ehdr;
+	unsigned long offset;
+
+	if (!rd_dev->consumer_present) {
+		pr_err("Ramdump(%s): No consumers. Aborting..\n", rd_dev->name);
+		return -EPIPE;
+	}
+
+	if (rd_dev->complete_ramdump) {
+		for (i = 0; i < nsegments-1; i++)
+			segments[i].size =
+				segments[i + 1].address - segments[i].address;
+	}
+
+	rd_dev->segments = segments;
+	rd_dev->nsegments = nsegments;
+
+	if (use_elf) {
+		rd_dev->elfcore_size = sizeof(*ehdr) +
+				       sizeof(*phdr) * nsegments;
+		ehdr = kzalloc(rd_dev->elfcore_size, GFP_KERNEL);
+		rd_dev->elfcore_buf = (char *)ehdr;
+		if (!rd_dev->elfcore_buf)
+			return -ENOMEM;
+
+		memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
+		ehdr->e_ident[EI_CLASS] = ELFCLASS32;
+		ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
+		ehdr->e_ident[EI_VERSION] = EV_CURRENT;
+		ehdr->e_ident[EI_OSABI] = ELFOSABI_NONE;
+		ehdr->e_type = ET_CORE;
+		ehdr->e_version = EV_CURRENT;
+		ehdr->e_phoff = sizeof(*ehdr);
+		ehdr->e_ehsize = sizeof(*ehdr);
+		ehdr->e_phentsize = sizeof(*phdr);
+		ehdr->e_phnum = nsegments;
+
+		offset = rd_dev->elfcore_size;
+		phdr = (Elf32_Phdr *)(ehdr + 1);
+		for (i = 0; i < nsegments; i++, phdr++) {
+			phdr->p_type = PT_LOAD;
+			phdr->p_offset = offset;
+			phdr->p_vaddr = phdr->p_paddr = segments[i].address;
+			phdr->p_filesz = phdr->p_memsz = segments[i].size;
+			phdr->p_flags = PF_R | PF_W | PF_X;
+			offset += phdr->p_filesz;
+		}
+	}
+
+	rd_dev->data_ready = 1;
+	rd_dev->ramdump_status = -1;
+
+	reinit_completion(&rd_dev->ramdump_complete);
+
+	/* Tell userspace that the data is ready */
+	wake_up(&rd_dev->dump_wait_q);
+
+	/* Wait (with a timeout) to let the ramdump complete */
+	ret = wait_for_completion_timeout(&rd_dev->ramdump_complete,
+			msecs_to_jiffies(RAMDUMP_WAIT_MSECS));
+
+	if (!ret) {
+		pr_err("Ramdump(%s): Timed out waiting for userspace.\n",
+			rd_dev->name);
+		ret = -EPIPE;
+	} else
+		ret = (rd_dev->ramdump_status == 0) ? 0 : -EPIPE;
+
+	rd_dev->data_ready = 0;
+	rd_dev->elfcore_size = 0;
+	kfree(rd_dev->elfcore_buf);
+	rd_dev->elfcore_buf = NULL;
+	return ret;
+
+}
+
+static inline struct elf_shdr *elf_sheader(struct elfhdr *hdr)
+{
+	return (struct elf_shdr *)((size_t)hdr + (size_t)hdr->e_shoff);
+}
+
+static inline struct elf_shdr *elf_section(struct elfhdr *hdr, int idx)
+{
+	return &elf_sheader(hdr)[idx];
+}
+
+static inline char *elf_str_table(struct elfhdr *hdr)
+{
+	if (hdr->e_shstrndx == SHN_UNDEF)
+		return NULL;
+	return (char *)hdr + elf_section(hdr, hdr->e_shstrndx)->sh_offset;
+}
+
+static inline unsigned int set_section_name(const char *name,
+					    struct elfhdr *ehdr)
+{
+	char *strtab = elf_str_table(ehdr);
+	static int strtable_idx = 1;
+	int idx, ret = 0;
+
+	idx = strtable_idx;
+	if ((strtab == NULL) || (name == NULL))
+		return 0;
+
+	ret = idx;
+	idx += strlcpy((strtab + idx), name, MAX_NAME_LENGTH);
+	strtable_idx = idx + 1;
+
+	return ret;
+}
+
+static int _do_minidump(void *handle, struct ramdump_segment *segments,
+		int nsegments)
+{
+	int ret, i;
+	struct ramdump_device *rd_dev = (struct ramdump_device *)handle;
+	struct elfhdr *ehdr;
+	struct elf_shdr *shdr;
+	unsigned long offset, strtbl_off;
+
+	if (!rd_dev->consumer_present) {
+		pr_err("Ramdump(%s): No consumers. Aborting..\n", rd_dev->name);
+		return -EPIPE;
+	}
+
+	rd_dev->segments = segments;
+	rd_dev->nsegments = nsegments;
+
+	rd_dev->elfcore_size = sizeof(*ehdr) +
+			(sizeof(*shdr) * (nsegments + 2)) + MAX_STRTBL_SIZE;
+	ehdr = kzalloc(rd_dev->elfcore_size, GFP_KERNEL);
+	rd_dev->elfcore_buf = (char *)ehdr;
+	if (!rd_dev->elfcore_buf)
+		return -ENOMEM;
+
+	memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
+	ehdr->e_ident[EI_CLASS] = ELF_CLASS;
+	ehdr->e_ident[EI_DATA] = ELF_DATA;
+	ehdr->e_ident[EI_VERSION] = EV_CURRENT;
+	ehdr->e_ident[EI_OSABI] = ELF_OSABI;
+	ehdr->e_type = ET_CORE;
+	ehdr->e_machine  = ELF_ARCH;
+	ehdr->e_version = EV_CURRENT;
+	ehdr->e_ehsize = sizeof(*ehdr);
+	ehdr->e_shoff = sizeof(*ehdr);
+	ehdr->e_shentsize = sizeof(*shdr);
+	ehdr->e_shstrndx = 1;
+
+
+	offset = rd_dev->elfcore_size;
+	shdr = (struct elf_shdr *)(ehdr + 1);
+	strtbl_off = sizeof(*ehdr) + sizeof(*shdr) * (nsegments + 2);
+	shdr++;
+	shdr->sh_type = SHT_STRTAB;
+	shdr->sh_offset = (elf_addr_t)strtbl_off;
+	shdr->sh_size = MAX_STRTBL_SIZE;
+	shdr->sh_entsize = 0;
+	shdr->sh_flags = 0;
+	shdr->sh_name = set_section_name("STR_TBL", ehdr);
+	shdr++;
+
+	for (i = 0; i < nsegments; i++, shdr++) {
+		/* Update elf header */
+		shdr->sh_type = SHT_PROGBITS;
+		shdr->sh_name = set_section_name(segments[i].name, ehdr);
+		shdr->sh_addr = (elf_addr_t)segments[i].address;
+		shdr->sh_size = segments[i].size;
+		shdr->sh_flags = SHF_WRITE;
+		shdr->sh_offset = offset;
+		shdr->sh_entsize = 0;
+		offset += shdr->sh_size;
+	}
+	ehdr->e_shnum = nsegments + 2;
+
+	rd_dev->data_ready = 1;
+	rd_dev->ramdump_status = -1;
+
+	reinit_completion(&rd_dev->ramdump_complete);
+
+	/* Tell userspace that the data is ready */
+	wake_up(&rd_dev->dump_wait_q);
+
+	/* Wait (with a timeout) to let the ramdump complete */
+	ret = wait_for_completion_timeout(&rd_dev->ramdump_complete,
+			msecs_to_jiffies(RAMDUMP_WAIT_MSECS));
+
+	if (!ret) {
+		pr_err("Ramdump(%s): Timed out waiting for userspace.\n",
+		       rd_dev->name);
+		ret = -EPIPE;
+	} else {
+		ret = (rd_dev->ramdump_status == 0) ? 0 : -EPIPE;
+	}
+
+	rd_dev->data_ready = 0;
+	rd_dev->elfcore_size = 0;
+	kfree(rd_dev->elfcore_buf);
+	rd_dev->elfcore_buf = NULL;
+	return ret;
+}
+
+int do_ramdump(void *handle, struct ramdump_segment *segments, int nsegments)
+{
+	return _do_ramdump(handle, segments, nsegments, false);
+}
+EXPORT_SYMBOL(do_ramdump);
+
+int do_minidump(void *handle, struct ramdump_segment *segments, int nsegments)
+{
+	return _do_minidump(handle, segments, nsegments);
+}
+EXPORT_SYMBOL(do_minidump);
+
+int
+do_elf_ramdump(void *handle, struct ramdump_segment *segments, int nsegments)
+{
+	return _do_ramdump(handle, segments, nsegments, true);
+}
+EXPORT_SYMBOL(do_elf_ramdump);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/remoteqdss.c	2019-01-22 16:16:26.671275095 +0100
@@ -0,0 +1,448 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <soc/qcom/scm.h>
+#include <linux/debugfs.h>
+#include <linux/ratelimit.h>
+#include <linux/dma-mapping.h>
+
+#define REMOTEQDSS_FLAG_QUIET (BIT(0))
+
+static unsigned long remoteqdss_dbg_flags;
+module_param_named(dbg_flags, remoteqdss_dbg_flags, ulong, 0644);
+
+static struct dentry *remoteqdss_dir;
+
+#define REMOTEQDSS_ERR(fmt, ...) \
+	pr_debug("%s: " fmt, __func__, ## __VA_ARGS__)
+
+#define REMOTEQDSS_ERR_CALLER(fmt, caller, ...) \
+	pr_debug("%pf: " fmt, caller, ## __VA_ARGS__)
+
+struct qdss_msg_translation {
+	u64 val;
+	char *msg;
+};
+
+/*
+ * id			Unique identifier
+ * sw_entity_group	Array index
+ * sw_event_group	Array index
+ * dir			Parent debugfs directory
+ */
+struct remoteqdss_data {
+	uint32_t id;
+	uint32_t sw_entity_group;
+	uint32_t sw_event_group;
+	struct dentry *dir;
+};
+
+static struct device dma_dev;
+
+/* Allowed message formats */
+
+enum remoteqdss_cmd_id {
+	CMD_ID_QUERY_SWEVENT_TAG,
+	CMD_ID_FILTER_SWTRACE_STATE,
+	CMD_ID_QUERY_SWTRACE_STATE,
+	CMD_ID_FILTER_SWEVENT,
+	CMD_ID_QUERY_SWEVENT,
+	CMD_ID_FILTER_SWENTITY,
+	CMD_ID_QUERY_SWENTITY,
+};
+
+struct remoteqdss_header_fmt {
+	uint32_t subsys_id;
+	uint32_t cmd_id;
+};
+
+struct remoteqdss_filter_swtrace_state_fmt {
+	struct remoteqdss_header_fmt h;
+	uint32_t state;
+};
+
+struct remoteqdss_filter_swevent_fmt {
+	struct remoteqdss_header_fmt h;
+	uint32_t event_group;
+	uint32_t event_mask;
+};
+
+struct remoteqdss_query_swevent_fmt {
+	struct remoteqdss_header_fmt h;
+	uint32_t event_group;
+};
+
+struct remoteqdss_filter_swentity_fmt {
+	struct remoteqdss_header_fmt h;
+	uint32_t entity_group;
+	uint32_t entity_mask;
+};
+
+struct remoteqdss_query_swentity_fmt {
+	struct remoteqdss_header_fmt h;
+	uint32_t entity_group;
+};
+
+/* msgs is a null terminated array */
+static void remoteqdss_err_translation(struct qdss_msg_translation *msgs,
+					u64 err, const void *caller)
+{
+	static DEFINE_RATELIMIT_STATE(rl, 5 * HZ, 2);
+	struct qdss_msg_translation *msg;
+
+	if (!err)
+		return;
+
+	if (remoteqdss_dbg_flags & REMOTEQDSS_FLAG_QUIET)
+		return;
+
+	for (msg = msgs; msg->msg; msg++) {
+		if (err == msg->val && __ratelimit(&rl)) {
+			REMOTEQDSS_ERR_CALLER("0x%llx: %s\n", caller, err,
+						msg->msg);
+			return;
+		}
+	}
+
+	REMOTEQDSS_ERR_CALLER("Error 0x%llx\n", caller, err);
+}
+
+/* Shared across all remoteqdss scm functions */
+#define SCM_CMD_ID (0x1)
+
+/* Response Values */
+#define SCM_CMD_FAIL		(0x80)
+#define SCM_QDSS_UNAVAILABLE	(0x81)
+#define SCM_UNINITIALIZED	(0x82)
+#define SCM_BAD_ARG		(0x83)
+#define SCM_BAD_SUBSYS		(0x85)
+
+static struct qdss_msg_translation remoteqdss_scm_msgs[] = {
+	{SCM_CMD_FAIL,
+		"Command failed"},
+	{SCM_QDSS_UNAVAILABLE,
+		"QDSS not available or cannot turn QDSS (clock) on"},
+	{SCM_UNINITIALIZED,
+		"Tracer not initialized or unable to initialize"},
+	{SCM_BAD_ARG,
+		"Invalid parameter value"},
+	{SCM_BAD_SUBSYS,
+		"Incorrect subsys ID"},
+	{}
+};
+
+static struct remoteqdss_data *create_remoteqdss_data(u32 id)
+{
+	struct remoteqdss_data *data;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return NULL;
+
+	data->id = id;
+	return data;
+}
+
+static void free_remoteqdss_data(struct remoteqdss_data *data)
+{
+	kfree(data);
+}
+
+static int remoteqdss_do_scm_call(struct scm_desc *desc,
+		dma_addr_t addr, size_t size, const void *caller)
+{
+	int ret;
+
+	memset(desc, 0, sizeof(*desc));
+	desc->args[0] = dma_to_phys(NULL, addr);
+	desc->args[1] = size;
+	desc->arginfo = SCM_ARGS(2, SCM_RO, SCM_VAL);
+
+	ret = scm_call2(
+		SCM_SIP_FNID(SCM_SVC_QDSS, SCM_CMD_ID),
+		desc);
+	if (ret)
+		return ret;
+
+	remoteqdss_err_translation(remoteqdss_scm_msgs, desc->ret[0], caller);
+	ret = desc->ret[0] ? -EINVAL : 0;
+	return ret;
+}
+
+static int remoteqdss_scm_query_swtrace(void *priv, u64 *val)
+{
+	struct remoteqdss_data *data = priv;
+	int ret;
+	struct scm_desc desc;
+	struct remoteqdss_header_fmt *fmt;
+	dma_addr_t addr;
+
+	fmt = dma_alloc_coherent(&dma_dev, sizeof(*fmt), &addr, GFP_KERNEL);
+	if (!fmt)
+		return -ENOMEM;
+	fmt->subsys_id = data->id;
+	fmt->cmd_id = CMD_ID_QUERY_SWTRACE_STATE;
+
+	ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt),
+					__builtin_return_address(0));
+	*val = desc.ret[1];
+
+	dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr);
+	return ret;
+}
+
+static int remoteqdss_scm_filter_swtrace(void *priv, u64 val)
+{
+	struct remoteqdss_data *data = priv;
+	int ret;
+	struct scm_desc desc;
+	struct remoteqdss_filter_swtrace_state_fmt *fmt;
+	dma_addr_t addr;
+
+	fmt = dma_alloc_coherent(&dma_dev, sizeof(*fmt), &addr, GFP_KERNEL);
+	if (!fmt)
+		return -ENOMEM;
+	fmt->h.subsys_id = data->id;
+	fmt->h.cmd_id = CMD_ID_FILTER_SWTRACE_STATE;
+	fmt->state = (uint32_t)val;
+
+	ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt),
+					__builtin_return_address(0));
+
+	dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr);
+	return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_sw_trace_output,
+			remoteqdss_scm_query_swtrace,
+			remoteqdss_scm_filter_swtrace,
+			"0x%llx\n");
+
+static int remoteqdss_scm_query_tag(void *priv, u64 *val)
+{
+	struct remoteqdss_data *data = priv;
+	int ret;
+	struct scm_desc desc;
+	struct remoteqdss_header_fmt *fmt;
+	dma_addr_t addr;
+
+	fmt = dma_alloc_coherent(&dma_dev, sizeof(*fmt), &addr, GFP_KERNEL);
+	if (!fmt)
+		return -ENOMEM;
+	fmt->subsys_id = data->id;
+	fmt->cmd_id = CMD_ID_QUERY_SWEVENT_TAG;
+
+	ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt),
+					__builtin_return_address(0));
+	*val = desc.ret[1];
+
+	dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr);
+	return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_tag,
+			remoteqdss_scm_query_tag,
+			NULL,
+			"0x%llx\n");
+
+static int remoteqdss_scm_query_swevent(void *priv, u64 *val)
+{
+	struct remoteqdss_data *data = priv;
+	int ret;
+	struct scm_desc desc;
+	struct remoteqdss_query_swevent_fmt *fmt;
+	dma_addr_t addr;
+
+	fmt = dma_alloc_coherent(&dma_dev, sizeof(*fmt), &addr, GFP_KERNEL);
+	if (!fmt)
+		return -ENOMEM;
+	fmt->h.subsys_id = data->id;
+	fmt->h.cmd_id = CMD_ID_QUERY_SWEVENT;
+	fmt->event_group = data->sw_event_group;
+
+	ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt),
+					__builtin_return_address(0));
+	*val = desc.ret[1];
+
+	dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr);
+	return ret;
+}
+
+static int remoteqdss_scm_filter_swevent(void *priv, u64 val)
+{
+	struct remoteqdss_data *data = priv;
+	int ret;
+	struct scm_desc desc;
+	struct remoteqdss_filter_swevent_fmt *fmt;
+	dma_addr_t addr;
+
+	fmt = dma_alloc_coherent(&dma_dev, sizeof(*fmt), &addr, GFP_KERNEL);
+	if (!fmt)
+		return -ENOMEM;
+	fmt->h.subsys_id = data->id;
+	fmt->h.cmd_id = CMD_ID_FILTER_SWEVENT;
+	fmt->event_group = data->sw_event_group;
+	fmt->event_mask = (uint32_t)val;
+
+	ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt),
+					__builtin_return_address(0));
+
+	dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr);
+	return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_swevent,
+			remoteqdss_scm_query_swevent,
+			remoteqdss_scm_filter_swevent,
+			"0x%llx\n");
+
+static int remoteqdss_scm_query_swentity(void *priv, u64 *val)
+{
+	struct remoteqdss_data *data = priv;
+	int ret;
+	struct scm_desc desc;
+	struct remoteqdss_query_swentity_fmt *fmt;
+	dma_addr_t addr;
+
+	fmt = dma_alloc_coherent(&dma_dev, sizeof(*fmt), &addr, GFP_KERNEL);
+	if (!fmt)
+		return -ENOMEM;
+	fmt->h.subsys_id = data->id;
+	fmt->h.cmd_id = CMD_ID_QUERY_SWENTITY;
+	fmt->entity_group = data->sw_entity_group;
+
+	ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt),
+					__builtin_return_address(0));
+	*val = desc.ret[1];
+
+	dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr);
+	return ret;
+}
+
+static int remoteqdss_scm_filter_swentity(void *priv, u64 val)
+{
+	struct remoteqdss_data *data = priv;
+	int ret;
+	struct scm_desc desc;
+	struct remoteqdss_filter_swentity_fmt *fmt;
+	dma_addr_t addr;
+
+	fmt = dma_alloc_coherent(&dma_dev, sizeof(*fmt), &addr, GFP_KERNEL);
+	if (!fmt)
+		return -ENOMEM;
+	fmt->h.subsys_id = data->id;
+	fmt->h.cmd_id = CMD_ID_FILTER_SWENTITY;
+	fmt->entity_group = data->sw_entity_group;
+	fmt->entity_mask = (uint32_t)val;
+
+	ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt),
+					__builtin_return_address(0));
+
+	dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr);
+	return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_swentity,
+			remoteqdss_scm_query_swentity,
+			remoteqdss_scm_filter_swentity,
+			"0x%llx\n");
+
+static void __init enumerate_scm_devices(struct dentry *parent)
+{
+	u64 unused;
+	int ret;
+	struct remoteqdss_data *data;
+	struct dentry *dentry;
+
+	if (!is_scm_armv8())
+		return;
+
+	data = create_remoteqdss_data(0);
+	if (!data)
+		return;
+
+	/* Assume failure means device not present */
+	ret = remoteqdss_scm_query_swtrace(data, &unused);
+	if (ret)
+		goto out;
+
+	data->dir = debugfs_create_dir("tz", parent);
+	if (IS_ERR_OR_NULL(data->dir))
+		goto out;
+
+	dentry = debugfs_create_file("sw_trace_output", S_IRUGO | S_IWUSR,
+			data->dir, data, &fops_sw_trace_output);
+	if (IS_ERR_OR_NULL(dentry))
+		goto out;
+
+	dentry = debugfs_create_u32("sw_entity_group", S_IRUGO | S_IWUSR,
+			data->dir, &data->sw_entity_group);
+	if (IS_ERR_OR_NULL(dentry))
+		goto out;
+
+	dentry = debugfs_create_u32("sw_event_group", S_IRUGO | S_IWUSR,
+			data->dir, &data->sw_event_group);
+	if (IS_ERR_OR_NULL(dentry))
+		goto out;
+
+	dentry = debugfs_create_file("tag", S_IRUGO,
+			data->dir, data, &fops_tag);
+	if (IS_ERR_OR_NULL(dentry))
+		goto out;
+
+	dentry = debugfs_create_file("swevent", S_IRUGO | S_IWUSR,
+			data->dir, data, &fops_swevent);
+	if (IS_ERR_OR_NULL(dentry))
+		goto out;
+
+	dentry = debugfs_create_file("swentity", S_IRUGO | S_IWUSR,
+			data->dir, data, &fops_swentity);
+	if (IS_ERR_OR_NULL(dentry))
+		goto out;
+
+	return;
+
+out:
+	debugfs_remove_recursive(data->dir);
+	free_remoteqdss_data(data);
+}
+
+static int __init remoteqdss_init(void)
+{
+	unsigned long old_flags = remoteqdss_dbg_flags;
+	int ret;
+
+	/* Set up DMA */
+	arch_setup_dma_ops(&dma_dev, 0, U64_MAX, NULL, false);
+	ret = dma_coerce_mask_and_coherent(&dma_dev, DMA_BIT_MASK(64));
+	if (ret)
+		return ret;
+
+	/*
+	 * disable normal error messages while checking
+	 * if support is present.
+	 */
+	remoteqdss_dbg_flags |= REMOTEQDSS_FLAG_QUIET;
+
+	remoteqdss_dir = debugfs_create_dir("remoteqdss", NULL);
+	if (!remoteqdss_dir)
+		return 0;
+
+	enumerate_scm_devices(remoteqdss_dir);
+
+	remoteqdss_dbg_flags = old_flags;
+	return 0;
+}
+late_initcall(remoteqdss_init);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/rpm_log.c	2019-01-22 16:16:26.671275095 +0100
@@ -0,0 +1,550 @@
+/* Copyright (c) 2010-2011, 2013-2014, 2017, The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#include "rpm_log.h"
+
+/* registers in MSM_RPM_LOG_PAGE_INDICES */
+enum {
+	MSM_RPM_LOG_TAIL,
+	MSM_RPM_LOG_HEAD
+};
+
+/* used to 4 byte align message lengths */
+#define PADDED_LENGTH(x) (0xFFFFFFFC & ((x) + 3))
+
+/* calculates the character string length of a message of byte length x */
+#define PRINTED_LENGTH(x) ((x) * 6 + 3)
+
+/* number of ms to wait between checking for new messages in the RPM log */
+#define RECHECK_TIME (50)
+
+#define VERSION_8974 0x1000
+#define RPM_ULOG_LENGTH_SHIFT 16
+#define RPM_ULOG_LENGTH_MASK  0xFFFF0000
+
+struct msm_rpm_log_buffer {
+	char *data;
+	u32 len;
+	u32 pos;
+	struct mutex mutex;
+	u32 max_len;
+	u32 read_idx;
+	struct msm_rpm_log_platform_data *pdata;
+};
+
+/******************************************************************************
+ * Internal functions
+ *****************************************************************************/
+
+static inline u32
+msm_rpm_log_read(const struct msm_rpm_log_platform_data *pdata, u32 page,
+		 u32 reg)
+{
+	return readl_relaxed(pdata->reg_base + pdata->reg_offsets[page]
+				+ reg * 4);
+}
+
+/*
+ * msm_rpm_log_copy() - Copies messages from a volatile circular buffer in
+ *			the RPM's shared memory into a private local buffer
+ * msg_buffer:		pointer to local buffer (string)
+ * buf_len:		length of local buffer in bytes
+ * read_start_idx:	index into shared memory buffer
+ *
+ * Return value:	number of bytes written to the local buffer
+ *
+ * Copies messages stored in a circular buffer in the RPM Message Memory into
+ * a specified local buffer.  The RPM processor is unaware of these reading
+ * efforts, so care is taken to make sure that messages are valid both before
+ * and after reading.  The RPM processor utilizes a ULog driver to write the
+ * log.  The RPM processor maintains tail and head indices.  These correspond
+ * to the next byte to write into, and the first valid byte, respectively.
+ * Both indices increase monotonically (except for rollover).
+ *
+ * Messages take the form of [(u32)length] [(char)data0,1,...] in which the
+ * length specifies the number of payload bytes.  Messages must be 4 byte
+ * aligned, so padding is added at the end of a message as needed.
+ *
+ * Print format:
+ * - 0xXX, 0xXX, 0xXX
+ * - 0xXX
+ * etc...
+ */
+static u32 msm_rpm_log_copy(const struct msm_rpm_log_platform_data *pdata,
+			    char *msg_buffer, u32 buf_len, u32 *read_idx)
+{
+	u32 head_idx, tail_idx;
+	u32 pos = 0;
+	u32 i = 0;
+	u32 msg_len;
+	u32 pos_start;
+	char temp[4];
+
+	tail_idx = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_INDICES,
+				    MSM_RPM_LOG_TAIL);
+	head_idx = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_INDICES,
+				    MSM_RPM_LOG_HEAD);
+
+	/* loop while the remote buffer has valid messages left to read */
+	while (tail_idx - head_idx > 0 && tail_idx - *read_idx > 0) {
+		head_idx = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_INDICES,
+					    MSM_RPM_LOG_HEAD);
+		tail_idx = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_INDICES,
+				    MSM_RPM_LOG_TAIL);
+		/* check if the message to be read is valid */
+		if (tail_idx - *read_idx > tail_idx - head_idx) {
+			*read_idx = head_idx;
+			continue;
+		}
+
+		/*
+		 * Ensure that all indices are 4 byte aligned.
+		 * This conditions is required to interact with a ULog buffer
+		 * properly.
+		 */
+		if (!IS_ALIGNED((tail_idx | head_idx | *read_idx), 4))
+			break;
+
+		msg_len = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_BUFFER,
+				((*read_idx) & pdata->log_len_mask) >> 2);
+
+		/* Message length for 8974 is first 2 bytes.
+		 * Exclude message length and format from message length.
+		 */
+		if (pdata->version == VERSION_8974) {
+			msg_len = (msg_len & RPM_ULOG_LENGTH_MASK) >>
+					RPM_ULOG_LENGTH_SHIFT;
+			msg_len -= 4;
+		}
+
+		/* handle messages that claim to be longer than the log */
+		if (PADDED_LENGTH(msg_len) > tail_idx - *read_idx - 4)
+			msg_len = tail_idx - *read_idx - 4;
+
+		/* check that the local buffer has enough space for this msg */
+		if (pos + PRINTED_LENGTH(msg_len) > buf_len)
+			break;
+
+		pos_start = pos;
+		pos += scnprintf(msg_buffer + pos, buf_len - pos, "- ");
+
+		/* copy message payload to local buffer */
+		for (i = 0; i < msg_len; i++) {
+			/* read from shared memory 4 bytes at a time */
+			if (IS_ALIGNED(i, 4))
+				*((u32 *)temp) = msm_rpm_log_read(pdata,
+						MSM_RPM_LOG_PAGE_BUFFER,
+						((*read_idx + 4 + i) &
+						pdata->log_len_mask) >> 2);
+
+			pos += scnprintf(msg_buffer + pos, buf_len - pos,
+					 "0x%02X, ", temp[i & 0x03]);
+		}
+
+		pos += scnprintf(msg_buffer + pos, buf_len - pos, "\n");
+
+		head_idx = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_INDICES,
+					    MSM_RPM_LOG_HEAD);
+		tail_idx = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_INDICES,
+				    MSM_RPM_LOG_TAIL);
+
+		/* roll back if message that was read is not still valid */
+		if (tail_idx - *read_idx > tail_idx - head_idx)
+			pos = pos_start;
+
+		*read_idx += PADDED_LENGTH(msg_len) + 4;
+	}
+
+	return pos;
+}
+
+
+/*
+ * msm_rpm_log_file_read() - Reads in log buffer messages then outputs them to a
+ *			     user buffer
+ *
+ * Return value:
+ *	0:	 success
+ *	-ENOMEM: no memory available
+ *	-EINVAL: user buffer null or requested bytes 0
+ *	-EFAULT: user buffer not writeable
+ *	-EAGAIN: no bytes available at the moment
+ */
+static ssize_t msm_rpm_log_file_read(struct file *file, char __user *bufu,
+				     size_t count, loff_t *ppos)
+{
+	u32 out_len, remaining;
+	struct msm_rpm_log_platform_data *pdata;
+	struct msm_rpm_log_buffer *buf;
+
+	buf = file->private_data;
+
+	if (!buf)
+		return -ENOMEM;
+
+	pdata = buf->pdata;
+
+	if (!pdata)
+		return -EINVAL;
+	if (!buf->data)
+		return -ENOMEM;
+	if (!bufu || count == 0)
+		return -EINVAL;
+	if (!access_ok(VERIFY_WRITE, bufu, count))
+		return -EFAULT;
+
+	mutex_lock(&buf->mutex);
+	/* check for more messages if local buffer empty */
+	if (buf->pos == buf->len) {
+		buf->pos = 0;
+		buf->len = msm_rpm_log_copy(pdata, buf->data, buf->max_len,
+						&(buf->read_idx));
+	}
+
+	if ((file->f_flags & O_NONBLOCK) && buf->len == 0) {
+		mutex_unlock(&buf->mutex);
+		return -EAGAIN;
+	}
+
+	/* loop until new messages arrive */
+	while (buf->len == 0) {
+		cond_resched();
+		if (msleep_interruptible(RECHECK_TIME))
+			break;
+		buf->len = msm_rpm_log_copy(pdata, buf->data, buf->max_len,
+						&(buf->read_idx));
+	}
+
+	out_len = ((buf->len - buf->pos) < count ? buf->len - buf->pos : count);
+
+	remaining = __copy_to_user(bufu, &(buf->data[buf->pos]), out_len);
+	buf->pos += out_len - remaining;
+	mutex_unlock(&buf->mutex);
+
+	return out_len - remaining;
+}
+
+
+/*
+ * msm_rpm_log_file_open() - Allows a new reader to open the RPM log virtual
+ *			      file
+ *
+ * One local buffer is kmalloc'ed for each reader, so no resource sharing has
+ * to take place (besides the read only access to the RPM log buffer).
+ *
+ * Return value:
+ *	0:	 success
+ *	-ENOMEM: no memory available
+ */
+static int msm_rpm_log_file_open(struct inode *inode, struct file *file)
+{
+	struct msm_rpm_log_buffer *buf;
+	struct msm_rpm_log_platform_data *pdata;
+
+	pdata = inode->i_private;
+	if (!pdata)
+		return -EINVAL;
+
+	file->private_data =
+		   kmalloc(sizeof(struct msm_rpm_log_buffer), GFP_KERNEL);
+	if (!file->private_data) {
+		pr_err("%s: ERROR kmalloc failed to allocate %zu bytes\n",
+			__func__, sizeof(struct msm_rpm_log_buffer));
+		return -ENOMEM;
+	}
+	buf = file->private_data;
+
+	buf->data = kmalloc(PRINTED_LENGTH(pdata->log_len), GFP_KERNEL);
+	if (!buf->data) {
+		kfree(file->private_data);
+		file->private_data = NULL;
+		pr_err("%s: ERROR kmalloc failed to allocate %d bytes\n",
+			__func__, PRINTED_LENGTH(pdata->log_len));
+		return -ENOMEM;
+	}
+
+	buf->pdata = pdata;
+	buf->len = 0;
+	buf->pos = 0;
+	mutex_init(&buf->mutex);
+	buf->max_len = PRINTED_LENGTH(pdata->log_len);
+	buf->read_idx = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_INDICES,
+					 MSM_RPM_LOG_HEAD);
+	return 0;
+}
+
+static int msm_rpm_log_file_close(struct inode *inode, struct file *file)
+{
+	kfree(((struct msm_rpm_log_buffer *)file->private_data)->data);
+	kfree(file->private_data);
+	return 0;
+}
+
+
+static const struct file_operations msm_rpm_log_file_fops = {
+	.owner   = THIS_MODULE,
+	.open    = msm_rpm_log_file_open,
+	.read    = msm_rpm_log_file_read,
+	.release = msm_rpm_log_file_close,
+};
+
+static int msm_rpm_log_probe(struct platform_device *pdev)
+{
+	struct dentry *dent;
+	struct msm_rpm_log_platform_data *pdata;
+	struct resource *res = NULL, *offset = NULL;
+	struct device_node *node = NULL;
+	phys_addr_t page_buffer_address, rpm_addr_phys;
+	int ret = 0;
+	char *key = NULL;
+	uint32_t val = 0;
+	uint32_t offset_addr = 0;
+	void __iomem *phys_ptr = NULL;
+
+	node = pdev->dev.of_node;
+
+	if (node) {
+		pdata = kzalloc(sizeof(struct msm_rpm_log_platform_data),
+				GFP_KERNEL);
+		if (!pdata)
+			return -ENOMEM;
+
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		if (!res) {
+			pr_err("%s: could not get resource\n", __func__);
+			kfree(pdata);
+			return -EINVAL;
+		}
+
+		offset = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+		if (offset) {
+			/* Remap the rpm-log pointer */
+			phys_ptr = ioremap_nocache(offset->start, SZ_4);
+			if (!phys_ptr) {
+				pr_err("%s: Failed to ioremap address: %pa\n",
+						__func__, &offset->start);
+				kfree(pdata);
+				return -ENODEV;
+			}
+			offset_addr = readl_relaxed(phys_ptr);
+			iounmap(phys_ptr);
+		}
+
+		pdata->phys_addr_base = res->start + offset_addr;
+		pdata->phys_size = resource_size(res);
+
+		pdata->reg_base = ioremap_nocache(pdata->phys_addr_base,
+					pdata->phys_size);
+		if (!pdata->reg_base) {
+			pr_err("%s: ERROR could not ioremap: start=%pa, len=%u\n",
+				__func__, &pdata->phys_addr_base,
+				pdata->phys_size);
+			kfree(pdata);
+			return -EBUSY;
+		}
+		/* Read various parameters from the header if the
+		 * version of the RPM Ulog is 0x1000. This version
+		 * corresponds to the node in the rpm header which
+		 * holds RPM log on 8974.
+		 *
+		 * offset-page-buffer-addr: At this offset header
+		 * contains address of the location where raw log
+		 * starts
+		 * offset-log-len: At this offset header contains
+		 * the length of the log buffer.
+		 * offset-log-len-mask: At this offset header contains
+		 * the log length mask for the buffer.
+		 * offset-page-indices: At this offset header contains
+		 * the index for writer.
+		 */
+
+		key = "qcom,offset-version";
+		ret = of_property_read_u32(node, key, &val);
+		if (ret) {
+			pr_err("%s: Error in name %s key %s\n",
+				__func__, node->full_name, key);
+			ret = -EFAULT;
+			goto fail;
+		}
+
+		pdata->version = readl_relaxed(pdata->reg_base + val);
+		if (pdata->version == VERSION_8974) {
+			key = "qcom,rpm-addr-phys";
+			ret = of_property_read_u32(node, key, &val);
+			if (ret) {
+				pr_err("%s: Error in name %s key %s\n",
+					__func__, node->full_name, key);
+				ret = -EFAULT;
+				goto fail;
+			}
+
+			rpm_addr_phys = val;
+
+			key = "qcom,offset-page-buffer-addr";
+			ret = of_property_read_u32(node, key, &val);
+			if (ret) {
+				pr_err("%s: Error in name %s key %s\n",
+					__func__, node->full_name, key);
+				ret = -EFAULT;
+				goto fail;
+			}
+
+			page_buffer_address = rpm_addr_phys +
+				readl_relaxed(pdata->reg_base + val);
+			pdata->reg_offsets[MSM_RPM_LOG_PAGE_BUFFER] =
+				page_buffer_address - pdata->phys_addr_base;
+
+			key = "qcom,offset-log-len";
+			ret = of_property_read_u32(node, key, &val);
+			if (ret) {
+				pr_err("%s: Error in name %s key %s\n",
+					__func__, node->full_name, key);
+				ret = -EFAULT;
+				goto fail;
+			}
+			pdata->log_len = readl_relaxed(pdata->reg_base + val);
+
+			if (pdata->log_len > pdata->phys_size) {
+				pr_err("%s: Error phy size: %d should be atleast log length: %d\n",
+					__func__, pdata->phys_size,
+					pdata->log_len);
+
+				ret = -EINVAL;
+				goto fail;
+			}
+
+			key = "qcom,offset-log-len-mask";
+			ret = of_property_read_u32(node, key, &val);
+			if (ret) {
+				pr_err("%s: Error in name %s key %s\n",
+					__func__, node->full_name, key);
+				ret = -EFAULT;
+				goto fail;
+			}
+			pdata->log_len_mask = readl_relaxed(pdata->reg_base
+					+ val);
+
+			key = "qcom,offset-page-indices";
+			ret = of_property_read_u32(node, key, &val);
+			if (ret) {
+				pr_err("%s: Error in name %s key %s\n",
+					__func__, node->full_name, key);
+				ret = -EFAULT;
+				goto fail;
+			}
+			pdata->reg_offsets[MSM_RPM_LOG_PAGE_INDICES] =
+						val;
+		} else{
+			ret = -EINVAL;
+			goto fail;
+		}
+
+	} else{
+		pdata = pdev->dev.platform_data;
+		if (!pdata)
+			return -EINVAL;
+
+		pdata->reg_base = ioremap(pdata->phys_addr_base,
+				pdata->phys_size);
+		if (!pdata->reg_base) {
+			pr_err("%s: ERROR could not ioremap: start=%pa, len=%u\n",
+				__func__, &pdata->phys_addr_base,
+				pdata->phys_size);
+			return -EBUSY;
+		}
+	}
+
+	dent = debugfs_create_file("rpm_log", S_IRUGO, NULL,
+			pdata, &msm_rpm_log_file_fops);
+	if (!dent) {
+		pr_err("%s: ERROR debugfs_create_file failed\n", __func__);
+		if (pdata->version == VERSION_8974) {
+			ret = -ENOMEM;
+			goto fail;
+		}
+		return -ENOMEM;
+	}
+
+	platform_set_drvdata(pdev, dent);
+
+	pr_notice("%s: OK\n", __func__);
+	return 0;
+
+fail:
+	iounmap(pdata->reg_base);
+	kfree(pdata);
+	return ret;
+}
+
+static int msm_rpm_log_remove(struct platform_device *pdev)
+{
+	struct dentry *dent;
+	struct msm_rpm_log_platform_data *pdata;
+
+	pdata = pdev->dev.platform_data;
+
+	iounmap(pdata->reg_base);
+
+	dent = platform_get_drvdata(pdev);
+	debugfs_remove(dent);
+	platform_set_drvdata(pdev, NULL);
+
+	pr_notice("%s: OK\n", __func__);
+	return 0;
+}
+
+static const struct of_device_id rpm_log_table[] = {
+	       {.compatible = "qcom,rpm-log"},
+	       {},
+};
+
+static struct platform_driver msm_rpm_log_driver = {
+	.probe		= msm_rpm_log_probe,
+	.remove		= msm_rpm_log_remove,
+	.driver		= {
+		.name = "msm_rpm_log",
+		.owner = THIS_MODULE,
+		.of_match_table = rpm_log_table,
+	},
+};
+
+static int __init msm_rpm_log_init(void)
+{
+	return platform_driver_register(&msm_rpm_log_driver);
+}
+
+static void __exit msm_rpm_log_exit(void)
+{
+	platform_driver_unregister(&msm_rpm_log_driver);
+}
+
+module_init(msm_rpm_log_init);
+module_exit(msm_rpm_log_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM RPM Log driver");
+MODULE_ALIAS("platform:msm_rpm_log");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/rpm_log.h	2019-01-22 16:16:26.671275095 +0100
@@ -0,0 +1,35 @@
+/* Copyright (c) 2010, 2013, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_RPM_LOG_H
+#define __ARCH_ARM_MACH_MSM_RPM_LOG_H
+
+#include <linux/types.h>
+
+enum {
+	MSM_RPM_LOG_PAGE_INDICES,
+	MSM_RPM_LOG_PAGE_BUFFER,
+	MSM_RPM_LOG_PAGE_COUNT
+};
+
+struct msm_rpm_log_platform_data {
+	u32 reg_offsets[MSM_RPM_LOG_PAGE_COUNT];
+	u32 log_len;
+	u32 log_len_mask;
+	phys_addr_t phys_addr_base;
+	u32 phys_size;
+	u32 version;
+	void __iomem *reg_base;
+};
+
+#endif /* __ARCH_ARM_MACH_MSM_RPM_LOG_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/rpm_master_stat.c	2019-01-22 16:16:26.671275095 +0100
@@ -0,0 +1,491 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <linux/uaccess.h>
+
+#include "rpm_stats.h"
+
+#define RPM_MASTERS_BUF_LEN 400
+
+#define SNPRINTF(buf, size, format, ...) \
+	do { \
+		if (size > 0) { \
+			int ret; \
+			ret = snprintf(buf, size, format, ## __VA_ARGS__); \
+			if (ret > size) { \
+				buf += size; \
+				size = 0; \
+			} else { \
+				buf += ret; \
+				size -= ret; \
+			} \
+		} \
+	} while (0)
+
+#define GET_MASTER_NAME(a, prvdata) \
+	((a >= prvdata->num_masters) ? "Invalid Master Name" : \
+	 prvdata->master_names[a])
+
+#define GET_FIELD(a) ((strnstr(#a, ".", 80) + 1))
+
+static DEFINE_MUTEX(msm_rpm_master_stats_mutex);
+
+struct msm_rpm_master_stats {
+	uint32_t active_cores;
+	uint32_t numshutdowns;
+	uint64_t shutdown_req;
+	uint64_t wakeup_ind;
+	uint64_t bringup_req;
+	uint64_t bringup_ack;
+	uint32_t wakeup_reason; /* 0 = rude wakeup, 1 = scheduled wakeup */
+	uint32_t last_sleep_transition_duration;
+	uint32_t last_wake_transition_duration;
+	uint32_t xo_count;
+	uint64_t xo_last_entered_at;
+	uint64_t xo_last_exited_at;
+	uint64_t xo_accumulated_duration;
+};
+
+struct msm_rpm_master_stats_private_data {
+	void __iomem *reg_base;
+	u32 len;
+	char **master_names;
+	u32 num_masters;
+	char buf[RPM_MASTERS_BUF_LEN];
+	struct msm_rpm_master_stats_platform_data *platform_data;
+};
+
+int msm_rpm_master_stats_file_close(struct inode *inode,
+		struct file *file)
+{
+	struct msm_rpm_master_stats_private_data *private = file->private_data;
+
+	mutex_lock(&msm_rpm_master_stats_mutex);
+	if (private->reg_base)
+		iounmap(private->reg_base);
+	kfree(file->private_data);
+	mutex_unlock(&msm_rpm_master_stats_mutex);
+
+	return 0;
+}
+
+static int msm_rpm_master_copy_stats(
+		struct msm_rpm_master_stats_private_data *prvdata)
+{
+	struct msm_rpm_master_stats record;
+	struct msm_rpm_master_stats_platform_data *pdata;
+	static int master_cnt;
+	int count, j = 0;
+	char *buf;
+	unsigned long active_cores;
+
+	/* Iterate possible number of masters */
+	if (master_cnt > prvdata->num_masters - 1) {
+		master_cnt = 0;
+		return 0;
+	}
+
+	pdata = prvdata->platform_data;
+	count = RPM_MASTERS_BUF_LEN;
+	buf = prvdata->buf;
+
+	if (prvdata->platform_data->version == 2) {
+		SNPRINTF(buf, count, "%s\n",
+				GET_MASTER_NAME(master_cnt, prvdata));
+
+		record.shutdown_req = readq_relaxed(prvdata->reg_base +
+			(master_cnt * pdata->master_offset +
+			offsetof(struct msm_rpm_master_stats, shutdown_req)));
+
+		SNPRINTF(buf, count, "\t%s:0x%llX\n",
+			GET_FIELD(record.shutdown_req),
+			record.shutdown_req);
+
+		record.wakeup_ind = readq_relaxed(prvdata->reg_base +
+			(master_cnt * pdata->master_offset +
+			offsetof(struct msm_rpm_master_stats, wakeup_ind)));
+
+		SNPRINTF(buf, count, "\t%s:0x%llX\n",
+			GET_FIELD(record.wakeup_ind),
+			record.wakeup_ind);
+
+		record.bringup_req = readq_relaxed(prvdata->reg_base +
+			(master_cnt * pdata->master_offset +
+			offsetof(struct msm_rpm_master_stats, bringup_req)));
+
+		SNPRINTF(buf, count, "\t%s:0x%llX\n",
+			GET_FIELD(record.bringup_req),
+			record.bringup_req);
+
+		record.bringup_ack = readq_relaxed(prvdata->reg_base +
+			(master_cnt * pdata->master_offset +
+			offsetof(struct msm_rpm_master_stats, bringup_ack)));
+
+		SNPRINTF(buf, count, "\t%s:0x%llX\n",
+			GET_FIELD(record.bringup_ack),
+			record.bringup_ack);
+
+		record.xo_last_entered_at = readq_relaxed(prvdata->reg_base +
+			(master_cnt * pdata->master_offset +
+			offsetof(struct msm_rpm_master_stats,
+			xo_last_entered_at)));
+
+		SNPRINTF(buf, count, "\t%s:0x%llX\n",
+			GET_FIELD(record.xo_last_entered_at),
+			record.xo_last_entered_at);
+
+		record.xo_last_exited_at = readq_relaxed(prvdata->reg_base +
+			(master_cnt * pdata->master_offset +
+			offsetof(struct msm_rpm_master_stats,
+			xo_last_exited_at)));
+
+		SNPRINTF(buf, count, "\t%s:0x%llX\n",
+			GET_FIELD(record.xo_last_exited_at),
+			record.xo_last_exited_at);
+
+		record.xo_accumulated_duration =
+				readq_relaxed(prvdata->reg_base +
+				(master_cnt * pdata->master_offset +
+				offsetof(struct msm_rpm_master_stats,
+				xo_accumulated_duration)));
+
+		SNPRINTF(buf, count, "\t%s:0x%llX\n",
+			GET_FIELD(record.xo_accumulated_duration),
+			record.xo_accumulated_duration);
+
+		record.last_sleep_transition_duration =
+				readl_relaxed(prvdata->reg_base +
+				(master_cnt * pdata->master_offset +
+				offsetof(struct msm_rpm_master_stats,
+				last_sleep_transition_duration)));
+
+		SNPRINTF(buf, count, "\t%s:0x%x\n",
+			GET_FIELD(record.last_sleep_transition_duration),
+			record.last_sleep_transition_duration);
+
+		record.last_wake_transition_duration =
+				readl_relaxed(prvdata->reg_base +
+				(master_cnt * pdata->master_offset +
+				offsetof(struct msm_rpm_master_stats,
+				last_wake_transition_duration)));
+
+		SNPRINTF(buf, count, "\t%s:0x%x\n",
+			GET_FIELD(record.last_wake_transition_duration),
+			record.last_wake_transition_duration);
+
+		record.xo_count =
+				readl_relaxed(prvdata->reg_base +
+				(master_cnt * pdata->master_offset +
+				offsetof(struct msm_rpm_master_stats,
+				xo_count)));
+
+		SNPRINTF(buf, count, "\t%s:0x%x\n",
+			GET_FIELD(record.xo_count),
+			record.xo_count);
+
+		record.wakeup_reason = readl_relaxed(prvdata->reg_base +
+					(master_cnt * pdata->master_offset +
+					offsetof(struct msm_rpm_master_stats,
+					wakeup_reason)));
+
+		SNPRINTF(buf, count, "\t%s:0x%x\n",
+			GET_FIELD(record.wakeup_reason),
+			record.wakeup_reason);
+
+		record.numshutdowns = readl_relaxed(prvdata->reg_base +
+			(master_cnt * pdata->master_offset +
+			 offsetof(struct msm_rpm_master_stats, numshutdowns)));
+
+		SNPRINTF(buf, count, "\t%s:0x%x\n",
+			GET_FIELD(record.numshutdowns),
+			record.numshutdowns);
+
+		record.active_cores = readl_relaxed(prvdata->reg_base +
+			(master_cnt * pdata->master_offset) +
+			offsetof(struct msm_rpm_master_stats, active_cores));
+
+		SNPRINTF(buf, count, "\t%s:0x%x\n",
+			GET_FIELD(record.active_cores),
+			record.active_cores);
+	} else {
+		SNPRINTF(buf, count, "%s\n",
+				GET_MASTER_NAME(master_cnt, prvdata));
+
+		record.numshutdowns = readl_relaxed(prvdata->reg_base +
+				(master_cnt * pdata->master_offset) + 0x0);
+
+		SNPRINTF(buf, count, "\t%s:0x%0x\n",
+			GET_FIELD(record.numshutdowns),
+			record.numshutdowns);
+
+		record.active_cores = readl_relaxed(prvdata->reg_base +
+				(master_cnt * pdata->master_offset) + 0x4);
+
+		SNPRINTF(buf, count, "\t%s:0x%0x\n",
+			GET_FIELD(record.active_cores),
+			record.active_cores);
+	}
+
+	active_cores = record.active_cores;
+	j = find_first_bit(&active_cores, BITS_PER_LONG);
+	while (j < BITS_PER_LONG) {
+		SNPRINTF(buf, count, "\t\tcore%d\n", j);
+		j = find_next_bit(&active_cores, BITS_PER_LONG, j + 1);
+	}
+
+	master_cnt++;
+	return RPM_MASTERS_BUF_LEN - count;
+}
+
+static ssize_t msm_rpm_master_stats_file_read(struct file *file,
+				char __user *bufu, size_t count, loff_t *ppos)
+{
+	struct msm_rpm_master_stats_private_data *prvdata;
+	struct msm_rpm_master_stats_platform_data *pdata;
+	ssize_t ret;
+
+	mutex_lock(&msm_rpm_master_stats_mutex);
+	prvdata = file->private_data;
+	if (!prvdata) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	pdata = prvdata->platform_data;
+	if (!pdata) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	if (!bufu || count == 0) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	if (*ppos <= pdata->phys_size) {
+		prvdata->len = msm_rpm_master_copy_stats(prvdata);
+		*ppos = 0;
+	}
+
+	ret = simple_read_from_buffer(bufu, count, ppos,
+			prvdata->buf, prvdata->len);
+exit:
+	mutex_unlock(&msm_rpm_master_stats_mutex);
+	return ret;
+}
+
+static int msm_rpm_master_stats_file_open(struct inode *inode,
+		struct file *file)
+{
+	struct msm_rpm_master_stats_private_data *prvdata;
+	struct msm_rpm_master_stats_platform_data *pdata;
+	int ret = 0;
+
+	mutex_lock(&msm_rpm_master_stats_mutex);
+	pdata = inode->i_private;
+
+	file->private_data =
+		kzalloc(sizeof(struct msm_rpm_master_stats_private_data),
+			GFP_KERNEL);
+
+	if (!file->private_data) {
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	prvdata = file->private_data;
+
+	prvdata->reg_base = ioremap(pdata->phys_addr_base,
+						pdata->phys_size);
+	if (!prvdata->reg_base) {
+		kfree(file->private_data);
+		prvdata = NULL;
+		pr_err("%s: ERROR could not ioremap start=%pa, len=%u\n",
+			__func__, &pdata->phys_addr_base,
+			pdata->phys_size);
+		ret = -EBUSY;
+		goto exit;
+	}
+
+	prvdata->len = 0;
+	prvdata->num_masters = pdata->num_masters;
+	prvdata->master_names = pdata->masters;
+	prvdata->platform_data = pdata;
+exit:
+	mutex_unlock(&msm_rpm_master_stats_mutex);
+	return ret;
+}
+
+static const struct file_operations msm_rpm_master_stats_fops = {
+	.owner	  = THIS_MODULE,
+	.open	  = msm_rpm_master_stats_file_open,
+	.read	  = msm_rpm_master_stats_file_read,
+	.release  = msm_rpm_master_stats_file_close,
+	.llseek   = no_llseek,
+};
+
+static struct msm_rpm_master_stats_platform_data
+			*msm_rpm_master_populate_pdata(struct device *dev)
+{
+	struct msm_rpm_master_stats_platform_data *pdata;
+	struct device_node *node = dev->of_node;
+	int rc = 0, i;
+
+	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		goto err;
+
+	rc = of_property_read_u32(node, "qcom,master-stats-version",
+							&pdata->version);
+	if (rc) {
+		dev_err(dev, "master-stats-version missing rc=%d\n", rc);
+		goto err;
+	}
+
+	rc = of_property_read_u32(node, "qcom,master-offset",
+							&pdata->master_offset);
+	if (rc) {
+		dev_err(dev, "master-offset missing rc=%d\n", rc);
+		goto err;
+	}
+
+	pdata->num_masters = of_property_count_strings(node, "qcom,masters");
+	if (pdata->num_masters < 0) {
+		dev_err(dev, "Failed to get number of masters =%d\n",
+						pdata->num_masters);
+		goto err;
+	}
+
+	pdata->masters = devm_kzalloc(dev, sizeof(char *) * pdata->num_masters,
+								GFP_KERNEL);
+	if (!pdata->masters)
+		goto err;
+
+	/*
+	 * Read master names from DT
+	 */
+	for (i = 0; i < pdata->num_masters; i++) {
+		const char *master_name;
+
+		of_property_read_string_index(node, "qcom,masters",
+							i, &master_name);
+		pdata->masters[i] = devm_kzalloc(dev, sizeof(char) *
+				strlen(master_name) + 1, GFP_KERNEL);
+		if (!pdata->masters[i])
+			goto err;
+
+		strlcpy(pdata->masters[i], master_name,
+					strlen(master_name) + 1);
+	}
+	return pdata;
+err:
+	return NULL;
+}
+
+static  int msm_rpm_master_stats_probe(struct platform_device *pdev)
+{
+	struct dentry *dent;
+	struct msm_rpm_master_stats_platform_data *pdata;
+	struct resource *res = NULL;
+
+	if (!pdev)
+		return -EINVAL;
+
+	if (pdev->dev.of_node)
+		pdata = msm_rpm_master_populate_pdata(&pdev->dev);
+	else
+		pdata = pdev->dev.platform_data;
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "%s: Unable to get pdata\n", __func__);
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	if (!res) {
+		dev_err(&pdev->dev,
+			"%s: Failed to get IO resource from platform device",
+			__func__);
+		return -ENXIO;
+	}
+
+	pdata->phys_addr_base = res->start;
+	pdata->phys_size = resource_size(res);
+
+	dent = debugfs_create_file("rpm_master_stats", S_IRUGO, NULL,
+					pdata, &msm_rpm_master_stats_fops);
+
+	if (!dent) {
+		dev_err(&pdev->dev, "%s: ERROR debugfs_create_file failed\n",
+								__func__);
+		return -ENOMEM;
+	}
+
+	platform_set_drvdata(pdev, dent);
+	return 0;
+}
+
+static int msm_rpm_master_stats_remove(struct platform_device *pdev)
+{
+	struct dentry *dent;
+
+	dent = platform_get_drvdata(pdev);
+	debugfs_remove(dent);
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static const struct of_device_id rpm_master_table[] = {
+	{.compatible = "qcom,rpm-master-stats"},
+	{},
+};
+
+static struct platform_driver msm_rpm_master_stats_driver = {
+	.probe	= msm_rpm_master_stats_probe,
+	.remove = msm_rpm_master_stats_remove,
+	.driver = {
+		.name = "msm_rpm_master_stats",
+		.owner = THIS_MODULE,
+		.of_match_table = rpm_master_table,
+	},
+};
+
+static int __init msm_rpm_master_stats_init(void)
+{
+	return platform_driver_register(&msm_rpm_master_stats_driver);
+}
+
+static void __exit msm_rpm_master_stats_exit(void)
+{
+	platform_driver_unregister(&msm_rpm_master_stats_driver);
+}
+
+module_init(msm_rpm_master_stats_init);
+module_exit(msm_rpm_master_stats_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM RPM Master Statistics driver");
+MODULE_ALIAS("platform:msm_master_stat_log");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/rpm_rail_stats.c	2019-01-22 16:16:26.671275095 +0100
@@ -0,0 +1,354 @@
+/* Copyright (c) 2015,2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <linux/uaccess.h>
+
+#include "rpm_stats.h"
+
+#define RPM_RAIL_BUF_LEN 1300
+
+#define SNPRINTF(buf, size, format, ...) \
+{ \
+	if (size > 0) { \
+		int ret; \
+		ret = snprintf(buf, size, format, ## __VA_ARGS__); \
+		if (ret > size) { \
+			buf += size; \
+			size = 0; \
+		} else { \
+			buf += ret; \
+			size -= ret; \
+		} \
+	} \
+}
+
+#define NAMELEN (sizeof(uint32_t)+1)
+
+static DEFINE_MUTEX(msm_rpm_rail_stats_mutex);
+
+struct msm_rpm_rail_stats_platform_data {
+	phys_addr_t phys_addr_base;
+	u32 phys_size;
+};
+
+struct msm_rpm_rail_corner {
+	uint64_t time;
+	uint32_t corner;
+	uint32_t reserved;
+};
+
+struct msm_rpm_rail_type {
+	uint32_t rail;
+	uint32_t num_corners;
+	uint32_t current_corner;
+	uint32_t last_entered;
+};
+
+struct msm_rpm_rail_stats {
+	uint32_t num_rails;
+	uint32_t reserved;
+};
+
+struct msm_rpm_rail_stats_private_data {
+	void __iomem *reg_base;
+	u32 len;
+	char buf[RPM_RAIL_BUF_LEN];
+	struct msm_rpm_rail_stats_platform_data *platform_data;
+};
+
+int msm_rpm_rail_stats_file_close(struct inode *inode, struct file *file)
+{
+	struct msm_rpm_rail_stats_private_data *private = file->private_data;
+
+	mutex_lock(&msm_rpm_rail_stats_mutex);
+	if (private->reg_base)
+		iounmap(private->reg_base);
+	kfree(file->private_data);
+	mutex_unlock(&msm_rpm_rail_stats_mutex);
+
+	return 0;
+}
+
+static int msm_rpm_rail_corner_copy(void __iomem **base, char **buf,
+					int count)
+{
+	struct msm_rpm_rail_corner rc;
+	char corner[NAMELEN];
+
+	memset(&rc, 0, sizeof(rc));
+	memcpy_fromio(&rc, *base, sizeof(rc));
+
+	corner[NAMELEN - 1] = '\0';
+	memcpy(corner, &rc.corner, NAMELEN - 1);
+	SNPRINTF(*buf, count, "\t\tcorner:%-5s time:%-16llu\n",
+		corner, rc.time);
+
+	*base += sizeof(rc);
+
+	return count;
+}
+
+static int msm_rpm_rail_type_copy(void __iomem **base, char **buf, int count)
+{
+	struct msm_rpm_rail_type rt;
+	char rail[NAMELEN];
+	int i;
+
+	memset(&rt, 0, sizeof(rt));
+	memcpy_fromio(&rt, *base, sizeof(rt));
+
+	rail[NAMELEN - 1] = '\0';
+	memcpy(rail, &rt.rail, NAMELEN - 1);
+	SNPRINTF(*buf, count,
+		"\trail:%-2s \tnum_corners:%-2u current_corner:%-2u last_entered:%-8u\n",
+		rail, rt.num_corners, rt.current_corner, rt.last_entered);
+
+	*base += sizeof(rt);
+
+	for (i = 0; i < rt.num_corners; i++)
+		count = msm_rpm_rail_corner_copy(base, buf, count);
+
+	return count;
+}
+
+static int msm_rpm_rail_stats_copy(
+		struct msm_rpm_rail_stats_private_data *prvdata)
+{
+	struct msm_rpm_rail_stats rs;
+	void __iomem *base = prvdata->reg_base;
+	char *buf = prvdata->buf;
+	int count = RPM_RAIL_BUF_LEN;
+	int i;
+
+	memset(&rs, 0, sizeof(rs));
+	memcpy_fromio(&rs, base, sizeof(rs));
+
+	SNPRINTF(buf, count, "Number of Rails:%u\n", rs.num_rails);
+
+	base = prvdata->reg_base + sizeof(rs);
+
+	for (i = 0; i < rs.num_rails; i++)
+		count = msm_rpm_rail_type_copy(&base, &buf, count);
+
+	return RPM_RAIL_BUF_LEN - count;
+}
+
+static ssize_t msm_rpm_rail_stats_file_read(struct file *file,
+				char __user *bufu, size_t count, loff_t *ppos)
+{
+	struct msm_rpm_rail_stats_private_data *prvdata;
+	struct msm_rpm_rail_stats_platform_data *pdata;
+	ssize_t ret;
+
+	mutex_lock(&msm_rpm_rail_stats_mutex);
+	prvdata = file->private_data;
+	if (!prvdata) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	if (!prvdata->platform_data) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	if (!bufu || count == 0) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	pdata = prvdata->platform_data;
+
+	if (*ppos <= pdata->phys_size) {
+		prvdata->len = msm_rpm_rail_stats_copy(prvdata);
+		*ppos = 0;
+	}
+
+	ret = simple_read_from_buffer(bufu, count, ppos,
+			prvdata->buf, prvdata->len);
+exit:
+	mutex_unlock(&msm_rpm_rail_stats_mutex);
+	return ret;
+}
+
+static int msm_rpm_rail_stats_file_open(struct inode *inode,
+		struct file *file)
+{
+	struct msm_rpm_rail_stats_private_data *prvdata;
+	struct msm_rpm_rail_stats_platform_data *pdata;
+	int ret = 0;
+
+	mutex_lock(&msm_rpm_rail_stats_mutex);
+	pdata = inode->i_private;
+
+	file->private_data =
+		kzalloc(sizeof(struct msm_rpm_rail_stats_private_data),
+			GFP_KERNEL);
+
+	if (!file->private_data) {
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	prvdata = file->private_data;
+
+	prvdata->reg_base = ioremap(pdata->phys_addr_base,
+						pdata->phys_size);
+	if (!prvdata->reg_base) {
+		kfree(file->private_data);
+		prvdata = NULL;
+		pr_err("%s: ERROR could not ioremap start=%pa, len=%u\n",
+			__func__, &pdata->phys_addr_base,
+			pdata->phys_size);
+		ret = -EBUSY;
+		goto exit;
+	}
+
+	prvdata->len = 0;
+	prvdata->platform_data = pdata;
+exit:
+	mutex_unlock(&msm_rpm_rail_stats_mutex);
+	return ret;
+}
+
+
+static const struct file_operations msm_rpm_rail_stats_fops = {
+	.owner	  = THIS_MODULE,
+	.open	  = msm_rpm_rail_stats_file_open,
+	.read	  = msm_rpm_rail_stats_file_read,
+	.release  = msm_rpm_rail_stats_file_close,
+	.llseek   = no_llseek,
+};
+
+static int msm_rpm_rail_stats_probe(struct platform_device *pdev)
+{
+	struct dentry *dent;
+	struct msm_rpm_rail_stats_platform_data *pdata;
+	struct resource *res;
+	struct resource *offset;
+	struct device_node *node;
+	uint32_t offset_addr;
+	void __iomem *phys_ptr;
+
+	if (!pdev)
+		return -EINVAL;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"phys_addr_base");
+	if (!res)
+		return -EINVAL;
+
+	offset = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"offset_addr");
+	if (!offset)
+		return -EINVAL;
+
+	phys_ptr = ioremap_nocache(offset->start, SZ_4);
+	if (!phys_ptr) {
+		dev_err(&pdev->dev, "%s: Failed to ioremap address.\n",
+								__func__);
+		return -ENODEV;
+	}
+	offset_addr = readl_relaxed(phys_ptr);
+	iounmap(phys_ptr);
+
+	if (!offset_addr) {
+		dev_err(&pdev->dev, "%s: RPM Rail Stats not available: Exit\n",
+								__func__);
+		return 0;
+	}
+
+	node = pdev->dev.of_node;
+
+	if (pdev->dev.platform_data) {
+		pdata = pdev->dev.platform_data;
+		if (!pdata)
+			return -ENOMEM;
+	} else if (node) {
+		pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+		if (!pdata)
+			return -ENOMEM;
+	} else {
+		dev_err(&pdev->dev, "%s: pdata is not available: Exit\n",
+								__func__);
+		return 0;
+	}
+
+	pdata->phys_addr_base = res->start + offset_addr;
+	pdata->phys_size = resource_size(res);
+
+	dent = debugfs_create_file("rpm_rail_stats", S_IRUGO, NULL,
+					pdata, &msm_rpm_rail_stats_fops);
+
+	if (!dent) {
+		dev_err(&pdev->dev, "%s: ERROR debugfs_create_file failed\n",
+								__func__);
+		return -ENOMEM;
+	}
+
+	platform_set_drvdata(pdev, dent);
+	return 0;
+}
+
+static int msm_rpm_rail_stats_remove(struct platform_device *pdev)
+{
+	struct dentry *dent = platform_get_drvdata(pdev);
+
+	debugfs_remove(dent);
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static const struct of_device_id rpm_rail_table[] = {
+	{.compatible = "qcom,rpm-rail-stats"},
+	{},
+};
+
+static struct platform_driver msm_rpm_rail_stats_driver = {
+	.probe	= msm_rpm_rail_stats_probe,
+	.remove = msm_rpm_rail_stats_remove,
+	.driver = {
+		.name = "msm_rpm_rail_stats",
+		.owner = THIS_MODULE,
+		.of_match_table = rpm_rail_table,
+	},
+};
+
+static int __init msm_rpm_rail_stats_init(void)
+{
+	return platform_driver_register(&msm_rpm_rail_stats_driver);
+}
+
+static void __exit msm_rpm_rail_stats_exit(void)
+{
+	platform_driver_unregister(&msm_rpm_rail_stats_driver);
+}
+
+module_init(msm_rpm_rail_stats_init);
+module_exit(msm_rpm_rail_stats_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM RPM rail Statistics driver");
+MODULE_ALIAS("platform:msm_rail_stat_log");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/rpm_rbcpr_stats_v2.c	2019-01-22 16:16:26.671275095 +0100
@@ -0,0 +1,421 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/sort.h>
+#include <linux/uaccess.h>
+
+#define RBCPR_BUF_LEN 8000
+#define RBCPR_STATS_MAX_SIZE SZ_2K
+#define RBCPR_MAX_RAILS 4
+#define RBCPR_NUM_RECMNDS 3
+#define RBCPR_NUM_CORNERS 3
+
+#define FIELD(a)     ((strnstr(#a, "->", 80) + 2))
+#define PRINT(buf, pos, format, ...) \
+	((pos < RBCPR_BUF_LEN) ? snprintf((buf + pos), (RBCPR_BUF_LEN - pos),\
+	format, ## __VA_ARGS__) : 0)
+
+enum {
+	CORNER_OFF,
+	CORNER_RETENTION,
+	CORNER_SVS_KRAIT,
+	CORNER_SVS_SOC,
+	CORNER_NOMINAL,
+	CORNER_TURBO,
+	CORNER_SUPER_TURBO,
+	CORNER_MAX,
+};
+
+struct rbcpr_recmnd_data_type {
+	uint32_t microvolts;
+	uint64_t timestamp;
+};
+
+struct rbcpr_corners_data_type {
+	int32_t efuse_adjustment;
+	uint32_t programmed_voltage;
+	uint32_t isr_count;
+	uint32_t min_count;
+	uint32_t max_count;
+	struct rbcpr_recmnd_data_type rbcpr_recmnd[RBCPR_NUM_RECMNDS];
+};
+
+struct rbcpr_rail_stats_header_type {
+	uint32_t num_corners;
+	uint32_t num_latest_recommends;
+};
+
+struct rbcpr_rail_stats_footer_type {
+	uint32_t current_corner;
+	uint32_t railway_voltage;
+	uint32_t off_corner;
+	uint32_t margin;
+};
+
+struct rbcpr_stats_type {
+	uint32_t num_rails;
+	uint32_t status;
+};
+
+struct rbcpr_data_type {
+	void __iomem *start;
+	uint32_t len;
+	char buf[RBCPR_BUF_LEN];
+};
+
+static char *rbcpr_rail_labels[] = {
+	[0] = "VDD-CX",
+	[1] = "VDD-GFX",
+};
+
+static char *rbcpr_corner_string[] = {
+	[CORNER_OFF] = "CORNERS_OFF",
+	[CORNER_RETENTION] = "RETENTION",
+	[CORNER_SVS_KRAIT] = "SVS",
+	[CORNER_SVS_SOC] = "SVS_SOC",
+	[CORNER_NOMINAL] = "NOMINAL",
+	[CORNER_TURBO] = "TURBO",
+	[CORNER_SUPER_TURBO] = "SUPER_TURBO",
+};
+
+#define CORNER_STRING(a)	\
+	((a >= CORNER_MAX) ? "INVALID Corner" : rbcpr_corner_string[a])
+
+static struct rbcpr_data_type *rbcpr_data;
+
+static void msm_rpmrbcpr_print_stats_header(
+		struct rbcpr_stats_type *rbcpr_stats, char *buf,
+						uint32_t *pos)
+{
+	*pos += PRINT(buf, *pos, "\n:RBCPR STATS  ");
+	*pos += PRINT(buf, *pos, "(%s: %d)", FIELD(rbcpr_stats->num_rails),
+				rbcpr_stats->num_rails);
+	*pos += PRINT(buf, *pos, "(%s: %d)", FIELD(rbcpr_stats->status),
+				rbcpr_stats->status);
+}
+
+static void msm_rpmrbcpr_print_rail_header(
+		struct rbcpr_rail_stats_header_type *rail_header, char *buf,
+							uint32_t *pos)
+{
+	*pos += PRINT(buf, *pos, "(%s: %d)", FIELD(rail_header->num_corners),
+				rail_header->num_corners);
+	*pos += PRINT(buf, *pos, "(%s: %d)",
+			FIELD(rail_header->num_latest_recommends),
+			rail_header->num_latest_recommends);
+}
+
+static void msm_rpmrbcpr_print_corner_recmnds(
+		struct rbcpr_recmnd_data_type *rbcpr_recmnd, char *buf,
+							uint32_t *pos)
+{
+	*pos += PRINT(buf, *pos, "\n\t\t\t :(%s: %d) ",
+						FIELD(rbcpr_recmd->microvolts),
+						rbcpr_recmnd->microvolts);
+	*pos += PRINT(buf, *pos, " (%s: %lld)", FIELD(rbcpr_recmd->timestamp),
+						rbcpr_recmnd->timestamp);
+}
+
+static void msm_rpmrbcpr_print_corner_data(
+		struct rbcpr_corners_data_type *corner, char *buf,
+			uint32_t num_corners, uint32_t *pos)
+{
+	int i;
+
+	*pos += PRINT(buf, *pos, "(%s: %d)",
+			FIELD(corner->efuse_adjustment),
+					corner->efuse_adjustment);
+	*pos += PRINT(buf, *pos, "(%s: %d)",
+			FIELD(corner->programmed_voltage),
+					corner->programmed_voltage);
+	*pos += PRINT(buf, *pos, "(%s: %d)",
+			FIELD(corner->isr_count), corner->isr_count);
+	*pos += PRINT(buf, *pos, "(%s: %d)",
+			FIELD(corner->min_count), corner->min_count);
+	*pos += PRINT(buf, *pos, "(%s: %d)\n",
+			FIELD(corner->max_count), corner->max_count);
+	*pos += PRINT(buf, *pos, "\t\t\t:Latest Recommends");
+	for (i = 0; i < num_corners; i++)
+		msm_rpmrbcpr_print_corner_recmnds(&corner->rbcpr_recmnd[i], buf,
+						pos);
+}
+
+static void msm_rpmrbcpr_print_rail_footer(
+		struct rbcpr_rail_stats_footer_type *rail, char *buf,
+							uint32_t *pos)
+{
+	*pos += PRINT(buf, *pos, "(%s: %s)", FIELD(rail->current_corner),
+			CORNER_STRING(rail->current_corner));
+	*pos += PRINT(buf, *pos, "(%s: %d)",
+			FIELD(rail->railway_voltage), rail->railway_voltage);
+	*pos += PRINT(buf, *pos, "(%s: %d)",
+			FIELD(rail->off_corner), rail->off_corner);
+	*pos += PRINT(buf, *pos, "(%s: %d)\n",
+			FIELD(rail->margin), rail->margin);
+}
+
+static uint32_t msm_rpmrbcpr_read_rpm_data(void)
+{
+	uint32_t read_offset = 0;
+	static struct rbcpr_stats_type rbcpr_stats_header;
+	uint32_t buffer_offset = 0;
+	char *buf = rbcpr_data->buf;
+	int i, j;
+
+	memcpy_fromio(&rbcpr_stats_header, rbcpr_data->start,
+					sizeof(rbcpr_stats_header));
+	read_offset += sizeof(rbcpr_stats_header);
+	msm_rpmrbcpr_print_stats_header(&rbcpr_stats_header, buf,
+							&buffer_offset);
+
+	for (i = 0; i < rbcpr_stats_header.num_rails; i++) {
+		static struct rbcpr_rail_stats_header_type rail_header;
+		static struct rbcpr_rail_stats_footer_type rail_footer;
+
+		memcpy_fromio(&rail_header, (rbcpr_data->start + read_offset),
+					sizeof(rail_header));
+		read_offset += sizeof(rail_header);
+		buffer_offset += PRINT(buf, buffer_offset, "\n:%s Rail Data ",
+							rbcpr_rail_labels[i]);
+		msm_rpmrbcpr_print_rail_header(&rail_header, buf,
+							&buffer_offset);
+
+		for (j = 0; j < rail_header.num_corners; j++) {
+			static struct rbcpr_corners_data_type corner;
+			uint32_t corner_index;
+
+			memcpy_fromio(&corner,
+					(rbcpr_data->start + read_offset),
+					sizeof(corner));
+			read_offset += sizeof(corner);
+
+			/*
+			 * RPM doesn't include corner type in the data for the
+			 * corner. For now add this hack to know which corners
+			 * are used based on number of corners for the rail.
+			 */
+			corner_index = j + 3;
+			if (rail_header.num_corners == 3 && j == 2)
+				corner_index++;
+
+			buffer_offset += PRINT(buf, buffer_offset,
+				"\n\t\t:Corner Data: %s ",
+					CORNER_STRING(corner_index));
+			msm_rpmrbcpr_print_corner_data(&corner, buf,
+				rail_header.num_latest_recommends,
+				&buffer_offset);
+		}
+		buffer_offset += PRINT(buf, buffer_offset,
+				"\n\t\t");
+		memcpy_fromio(&rail_footer, (rbcpr_data->start + read_offset),
+					sizeof(rail_footer));
+		read_offset += sizeof(rail_footer);
+		msm_rpmrbcpr_print_rail_footer(&rail_footer, buf,
+							&buffer_offset);
+	}
+	return buffer_offset;
+}
+
+static int msm_rpmrbcpr_file_read(struct seq_file *m, void *data)
+{
+	struct rbcpr_data_type *pdata = m->private;
+	int ret = 0;
+	int curr_status_counter;
+	static int prev_status_counter;
+	static DEFINE_MUTEX(rbcpr_lock);
+
+	mutex_lock(&rbcpr_lock);
+	if (!pdata) {
+		pr_err("%s pdata is null", __func__);
+		ret = -EINVAL;
+		goto exit_rpmrbcpr_file_read;
+	}
+
+	/* Read RPM stats */
+	curr_status_counter = readl_relaxed(pdata->start +
+		offsetof(struct rbcpr_stats_type, status));
+	if (curr_status_counter != prev_status_counter) {
+		pdata->len = msm_rpmrbcpr_read_rpm_data();
+		pdata->len = 0;
+		prev_status_counter = curr_status_counter;
+	}
+
+	seq_printf(m, "%s", pdata->buf);
+
+exit_rpmrbcpr_file_read:
+	mutex_unlock(&rbcpr_lock);
+	return ret;
+}
+
+static int msm_rpmrbcpr_file_open(struct inode *inode, struct file *file)
+{
+	if (!rbcpr_data->start)
+		return -ENODEV;
+	return single_open(file, msm_rpmrbcpr_file_read, inode->i_private);
+}
+
+static const struct file_operations msm_rpmrbcpr_fops = {
+	.open		= msm_rpmrbcpr_file_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int msm_rpmrbcpr_validate(struct platform_device *pdev)
+{
+	int ret = 0;
+	uint32_t num_rails;
+
+	num_rails = readl_relaxed(rbcpr_data->start);
+
+	if (num_rails > RBCPR_MAX_RAILS) {
+		pr_err("%s: Invalid number of RPM RBCPR rails %d",
+				__func__, num_rails);
+		ret = -EFAULT;
+	}
+
+	return ret;
+}
+
+static  int msm_rpmrbcpr_probe(struct platform_device *pdev)
+{
+	struct dentry *dent;
+	int ret = 0;
+	struct resource *res = NULL;
+	void __iomem *start_ptr = NULL;
+	uint32_t rbcpr_start_addr = 0;
+	char *key = NULL;
+	uint32_t start_addr;
+
+	rbcpr_data = devm_kzalloc(&pdev->dev,
+				sizeof(struct rbcpr_data_type), GFP_KERNEL);
+
+	if (!rbcpr_data)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	if (!res) {
+		pr_err("%s: Failed to get IO resource from platform device",
+				__func__);
+		ret = -ENXIO;
+		goto rbcpr_probe_fail;
+	}
+
+	key = "qcom,start-offset";
+	ret = of_property_read_u32(pdev->dev.of_node, key, &start_addr);
+
+	if (ret) {
+		pr_err("%s: Failed to get start offset", __func__);
+		goto rbcpr_probe_fail;
+	}
+
+	start_addr += res->start;
+	start_ptr = ioremap_nocache(start_addr, 4);
+
+	if (!start_ptr) {
+		pr_err("%s: Failed to remap RBCPR start pointer",
+					__func__);
+		goto rbcpr_probe_fail;
+	}
+
+	rbcpr_start_addr = res->start + readl_relaxed(start_ptr);
+	res->end = rbcpr_start_addr + RBCPR_STATS_MAX_SIZE;
+
+	if ((rbcpr_start_addr > (res->end - RBCPR_STATS_MAX_SIZE)) ||
+			(rbcpr_start_addr < start_addr)) {
+		pr_err("%s: Invalid start address for rbcpr stats 0x%x",
+			__func__, rbcpr_start_addr);
+		goto rbcpr_probe_fail;
+	}
+
+	rbcpr_data->start = devm_ioremap_nocache(&pdev->dev, rbcpr_start_addr,
+							RBCPR_STATS_MAX_SIZE);
+
+	if (!rbcpr_data->start) {
+		pr_err("%s: Failed to remap RBCPR start address",
+				__func__);
+		goto rbcpr_probe_fail;
+	}
+
+	ret = msm_rpmrbcpr_validate(pdev);
+
+	if (ret)
+		goto rbcpr_probe_fail;
+
+	dent = debugfs_create_file("rpm_rbcpr", S_IRUGO, NULL,
+			rbcpr_data, &msm_rpmrbcpr_fops);
+
+	if (!dent) {
+		pr_err("%s: error debugfs_create_file failed\n", __func__);
+		ret = -ENOMEM;
+		goto rbcpr_probe_fail;
+	}
+
+	platform_set_drvdata(pdev, dent);
+rbcpr_probe_fail:
+	iounmap(start_ptr);
+	return ret;
+}
+
+static int msm_rpmrbcpr_remove(struct platform_device *pdev)
+{
+	struct dentry *dent;
+
+	dent = platform_get_drvdata(pdev);
+	debugfs_remove(dent);
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static const struct of_device_id rpmrbcpr_stats_table[] = {
+	{.compatible = "qcom,rpmrbcpr-stats"},
+	{},
+};
+
+static struct platform_driver msm_rpmrbcpr_driver = {
+	.probe  = msm_rpmrbcpr_probe,
+	.remove = msm_rpmrbcpr_remove,
+	.driver = {
+		.name = "msm_rpmrbcpr_stats",
+		.owner = THIS_MODULE,
+		.of_match_table = rpmrbcpr_stats_table,
+	},
+};
+
+static int __init msm_rpmrbcpr_init(void)
+{
+	return platform_driver_register(&msm_rpmrbcpr_driver);
+}
+
+static void __exit msm_rpmrbcpr_exit(void)
+{
+	platform_driver_unregister(&msm_rpmrbcpr_driver);
+}
+
+module_init(msm_rpmrbcpr_init);
+module_exit(msm_rpmrbcpr_exit);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/rpm-smd.c	2019-01-22 16:16:26.671275095 +0100
@@ -0,0 +1,2174 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/of_address.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/rbtree.h>
+#include <soc/qcom/rpm-notifier.h>
+#include <soc/qcom/rpm-smd.h>
+#include <soc/qcom/smd.h>
+#include <soc/qcom/glink_rpm_xprt.h>
+#include <soc/qcom/glink.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/trace_rpm_smd.h>
+
+/* Debug Definitions */
+enum {
+	MSM_RPM_LOG_REQUEST_PRETTY	= BIT(0),
+	MSM_RPM_LOG_REQUEST_RAW		= BIT(1),
+	MSM_RPM_LOG_REQUEST_SHOW_MSG_ID	= BIT(2),
+};
+
+static int msm_rpm_debug_mask;
+module_param_named(
+	debug_mask, msm_rpm_debug_mask, int, S_IRUGO | S_IWUSR
+);
+
+struct msm_rpm_driver_data {
+	const char *ch_name;
+	uint32_t ch_type;
+	smd_channel_t *ch_info;
+	struct work_struct work;
+	spinlock_t smd_lock_write;
+	spinlock_t smd_lock_read;
+	struct completion smd_open;
+};
+
+struct glink_apps_rpm_data {
+	const char *name;
+	const char *edge;
+	const char *xprt;
+	void *glink_handle;
+	struct glink_link_info *link_info;
+	struct glink_open_config *open_cfg;
+	struct work_struct work;
+};
+
+static bool glink_enabled;
+static struct glink_apps_rpm_data *glink_data;
+
+#define DEFAULT_BUFFER_SIZE 256
+#define DEBUG_PRINT_BUFFER_SIZE 512
+#define MAX_SLEEP_BUFFER 128
+#define GFP_FLAG(noirq) (noirq ? GFP_ATOMIC : GFP_NOIO)
+#define INV_RSC "resource does not exist"
+#define ERR "err\0"
+#define MAX_ERR_BUFFER_SIZE 128
+#define MAX_WAIT_ON_ACK 24
+#define INIT_ERROR 1
+#define V1_PROTOCOL_VERSION 0x31726576 /* rev1 */
+#define V0_PROTOCOL_VERSION 0 /* rev0 */
+#define RPM_MSG_TYPE_OFFSET 16
+#define RPM_MSG_TYPE_SIZE 8
+#define RPM_SET_TYPE_OFFSET 28
+#define RPM_SET_TYPE_SIZE 4
+#define RPM_REQ_LEN_OFFSET 0
+#define RPM_REQ_LEN_SIZE 16
+#define RPM_MSG_VERSION_OFFSET 24
+#define RPM_MSG_VERSION_SIZE 8
+#define RPM_MSG_VERSION 1
+#define RPM_MSG_SET_OFFSET 28
+#define RPM_MSG_SET_SIZE 4
+#define RPM_RSC_ID_OFFSET 16
+#define RPM_RSC_ID_SIZE 12
+#define RPM_DATA_LEN_OFFSET 0
+#define RPM_DATA_LEN_SIZE 16
+#define RPM_HDR_SIZE ((rpm_msg_fmt_ver == RPM_MSG_V0_FMT) ?\
+		sizeof(struct rpm_v0_hdr) : sizeof(struct rpm_v1_hdr))
+#define CLEAR_FIELD(offset, size) (~GENMASK(offset + size - 1, offset))
+
+static ATOMIC_NOTIFIER_HEAD(msm_rpm_sleep_notifier);
+static bool standalone;
+static int probe_status = -EPROBE_DEFER;
+static int msm_rpm_read_smd_data(char *buf);
+static void msm_rpm_process_ack(uint32_t msg_id, int errno);
+
+int msm_rpm_register_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(&msm_rpm_sleep_notifier, nb);
+}
+
+int msm_rpm_unregister_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_unregister(&msm_rpm_sleep_notifier, nb);
+}
+
+static struct workqueue_struct *msm_rpm_smd_wq;
+
+enum {
+	MSM_RPM_MSG_REQUEST_TYPE = 0,
+	MSM_RPM_MSG_TYPE_NR,
+};
+
+static const uint32_t msm_rpm_request_service_v1[MSM_RPM_MSG_TYPE_NR] = {
+	0x716572, /* 'req\0' */
+};
+
+enum {
+	RPM_V1_REQUEST_SERVICE,
+	RPM_V1_SYSTEMDB_SERVICE,
+	RPM_V1_COMMAND_SERVICE,
+	RPM_V1_ACK_SERVICE,
+	RPM_V1_NACK_SERVICE,
+} msm_rpm_request_service_v2;
+
+struct rpm_v0_hdr {
+	uint32_t service_type;
+	uint32_t request_len;
+};
+
+struct rpm_v1_hdr {
+	uint32_t request_hdr;
+};
+
+struct rpm_message_header_v0 {
+	struct rpm_v0_hdr hdr;
+	uint32_t msg_id;
+	enum msm_rpm_set set;
+	uint32_t resource_type;
+	uint32_t resource_id;
+	uint32_t data_len;
+};
+
+struct rpm_message_header_v1 {
+	struct rpm_v1_hdr hdr;
+	uint32_t msg_id;
+	uint32_t resource_type;
+	uint32_t request_details;
+};
+
+struct msm_rpm_ack_msg_v0 {
+	uint32_t req;
+	uint32_t req_len;
+	uint32_t rsc_id;
+	uint32_t msg_len;
+	uint32_t id_ack;
+};
+
+struct msm_rpm_ack_msg_v1 {
+	uint32_t request_hdr;
+	uint32_t id_ack;
+};
+
+struct kvp {
+	unsigned int k;
+	unsigned int s;
+};
+
+struct msm_rpm_kvp_data {
+	uint32_t key;
+	uint32_t nbytes; /* number of bytes */
+	uint8_t *value;
+	bool valid;
+};
+
+struct slp_buf {
+	struct rb_node node;
+	char ubuf[MAX_SLEEP_BUFFER];
+	char *buf;
+	bool valid;
+};
+
+enum rpm_msg_fmts {
+	RPM_MSG_V0_FMT,
+	RPM_MSG_V1_FMT
+};
+
+static uint32_t rpm_msg_fmt_ver;
+module_param_named(
+	rpm_msg_fmt_ver, rpm_msg_fmt_ver, uint, S_IRUGO
+);
+
+static struct rb_root tr_root = RB_ROOT;
+static int (*msm_rpm_send_buffer)(char *buf, uint32_t size, bool noirq);
+static int msm_rpm_send_smd_buffer(char *buf, uint32_t size, bool noirq);
+static int msm_rpm_glink_send_buffer(char *buf, uint32_t size, bool noirq);
+static uint32_t msm_rpm_get_next_msg_id(void);
+
+static inline uint32_t get_offset_value(uint32_t val, uint32_t offset,
+		uint32_t size)
+{
+	return (((val) & GENMASK(offset + size - 1, offset))
+		>> offset);
+}
+
+static inline void change_offset_value(uint32_t *val, uint32_t offset,
+		uint32_t size, int32_t val1)
+{
+	uint32_t member = *val;
+	uint32_t offset_val = get_offset_value(member, offset, size);
+	uint32_t mask = (1 << size) - 1;
+
+	offset_val += val1;
+	*val &= CLEAR_FIELD(offset, size);
+	*val |= ((offset_val & mask) << offset);
+}
+
+static inline void set_offset_value(uint32_t *val, uint32_t offset,
+		uint32_t size, uint32_t val1)
+{
+	uint32_t mask = (1 << size) - 1;
+
+	*val &= CLEAR_FIELD(offset, size);
+	*val |= ((val1 & mask) << offset);
+}
+static uint32_t get_msg_id(char *buf)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		return ((struct rpm_message_header_v0 *)buf)->msg_id;
+
+	return ((struct rpm_message_header_v1 *)buf)->msg_id;
+
+}
+
+static uint32_t get_ack_msg_id(char *buf)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		return ((struct msm_rpm_ack_msg_v0 *)buf)->id_ack;
+
+	return ((struct msm_rpm_ack_msg_v1 *)buf)->id_ack;
+
+}
+
+static uint32_t get_rsc_type(char *buf)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		return ((struct rpm_message_header_v0 *)buf)->resource_type;
+
+	return ((struct rpm_message_header_v1 *)buf)->resource_type;
+
+}
+
+static uint32_t get_set_type(char *buf)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		return ((struct rpm_message_header_v0 *)buf)->set;
+
+	return get_offset_value(((struct rpm_message_header_v1 *)buf)->
+			request_details, RPM_SET_TYPE_OFFSET,
+			RPM_SET_TYPE_SIZE);
+}
+
+static uint32_t get_data_len(char *buf)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		return ((struct rpm_message_header_v0 *)buf)->data_len;
+
+	return get_offset_value(((struct rpm_message_header_v1 *)buf)->
+			request_details, RPM_DATA_LEN_OFFSET,
+			RPM_DATA_LEN_SIZE);
+}
+
+static uint32_t get_rsc_id(char *buf)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		return ((struct rpm_message_header_v0 *)buf)->resource_id;
+
+	return get_offset_value(((struct rpm_message_header_v1 *)buf)->
+			request_details, RPM_RSC_ID_OFFSET,
+			RPM_RSC_ID_SIZE);
+}
+
+static uint32_t get_ack_req_len(char *buf)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		return ((struct msm_rpm_ack_msg_v0 *)buf)->req_len;
+
+	return get_offset_value(((struct msm_rpm_ack_msg_v1 *)buf)->
+			request_hdr, RPM_REQ_LEN_OFFSET,
+			RPM_REQ_LEN_SIZE);
+}
+
+static uint32_t get_ack_msg_type(char *buf)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		return ((struct msm_rpm_ack_msg_v0 *)buf)->req;
+
+	return get_offset_value(((struct msm_rpm_ack_msg_v1 *)buf)->
+			request_hdr, RPM_MSG_TYPE_OFFSET,
+			RPM_MSG_TYPE_SIZE);
+}
+
+static uint32_t get_req_len(char *buf)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		return ((struct rpm_message_header_v0 *)buf)->hdr.request_len;
+
+	return get_offset_value(((struct rpm_message_header_v1 *)buf)->
+			hdr.request_hdr, RPM_REQ_LEN_OFFSET,
+			RPM_REQ_LEN_SIZE);
+}
+
+static void set_msg_ver(char *buf, uint32_t val)
+{
+	if (rpm_msg_fmt_ver) {
+		set_offset_value(&((struct rpm_message_header_v1 *)buf)->
+			hdr.request_hdr, RPM_MSG_VERSION_OFFSET,
+			RPM_MSG_VERSION_SIZE, val);
+	} else {
+		set_offset_value(&((struct rpm_message_header_v1 *)buf)->
+			hdr.request_hdr, RPM_MSG_VERSION_OFFSET,
+			RPM_MSG_VERSION_SIZE, 0);
+	}
+}
+
+static void set_req_len(char *buf, uint32_t val)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT) {
+		((struct rpm_message_header_v0 *)buf)->hdr.request_len = val;
+	} else {
+		set_offset_value(&((struct rpm_message_header_v1 *)buf)->
+			hdr.request_hdr, RPM_REQ_LEN_OFFSET,
+			RPM_REQ_LEN_SIZE, val);
+	}
+}
+
+static void change_req_len(char *buf, int32_t val)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT) {
+		((struct rpm_message_header_v0 *)buf)->hdr.request_len += val;
+	} else {
+		change_offset_value(&((struct rpm_message_header_v1 *)buf)->
+			hdr.request_hdr, RPM_REQ_LEN_OFFSET,
+			RPM_REQ_LEN_SIZE, val);
+	}
+}
+
+static void set_msg_type(char *buf, uint32_t val)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT) {
+		((struct rpm_message_header_v0 *)buf)->hdr.service_type =
+			msm_rpm_request_service_v1[val];
+	} else {
+		set_offset_value(&((struct rpm_message_header_v1 *)buf)->
+			hdr.request_hdr, RPM_MSG_TYPE_OFFSET,
+			RPM_MSG_TYPE_SIZE, RPM_V1_REQUEST_SERVICE);
+	}
+}
+
+static void set_rsc_id(char *buf, uint32_t val)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		((struct rpm_message_header_v0 *)buf)->resource_id = val;
+	else
+		set_offset_value(&((struct rpm_message_header_v1 *)buf)->
+			request_details, RPM_RSC_ID_OFFSET,
+			RPM_RSC_ID_SIZE, val);
+}
+
+static void set_data_len(char *buf, uint32_t val)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		((struct rpm_message_header_v0 *)buf)->data_len = val;
+	else
+		set_offset_value(&((struct rpm_message_header_v1 *)buf)->
+			request_details, RPM_DATA_LEN_OFFSET,
+			RPM_DATA_LEN_SIZE, val);
+}
+static void change_data_len(char *buf, int32_t val)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		((struct rpm_message_header_v0 *)buf)->data_len += val;
+	else
+		change_offset_value(&((struct rpm_message_header_v1 *)buf)->
+			request_details, RPM_DATA_LEN_OFFSET,
+			RPM_DATA_LEN_SIZE, val);
+}
+
+static void set_set_type(char *buf, uint32_t val)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		((struct rpm_message_header_v0 *)buf)->set = val;
+	else
+		set_offset_value(&((struct rpm_message_header_v1 *)buf)->
+			request_details, RPM_SET_TYPE_OFFSET,
+			RPM_SET_TYPE_SIZE, val);
+}
+static void set_msg_id(char *buf, uint32_t val)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		((struct rpm_message_header_v0 *)buf)->msg_id = val;
+	else
+		((struct rpm_message_header_v1 *)buf)->msg_id = val;
+
+}
+
+static void set_rsc_type(char *buf, uint32_t val)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		((struct rpm_message_header_v0 *)buf)->resource_type = val;
+	else
+		((struct rpm_message_header_v1 *)buf)->resource_type = val;
+}
+
+static inline int get_buf_len(char *buf)
+{
+	return get_req_len(buf) + RPM_HDR_SIZE;
+}
+
+static inline struct kvp *get_first_kvp(char *buf)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		return (struct kvp *)(buf +
+				sizeof(struct rpm_message_header_v0));
+	else
+		return (struct kvp *)(buf +
+				sizeof(struct rpm_message_header_v1));
+}
+
+static inline struct kvp *get_next_kvp(struct kvp *k)
+{
+	return (struct kvp *)((void *)k + sizeof(*k) + k->s);
+}
+
+static inline void *get_data(struct kvp *k)
+{
+	return (void *)k + sizeof(*k);
+}
+
+
+static void delete_kvp(char *buf, struct kvp *d)
+{
+	struct kvp *n;
+	int dec;
+	uint32_t size;
+
+	n = get_next_kvp(d);
+	dec = (void *)n - (void *)d;
+	size = get_data_len(buf) -
+		((void *)n - (void *)get_first_kvp(buf));
+
+	memcpy((void *)d, (void *)n, size);
+
+	change_data_len(buf, -dec);
+	change_req_len(buf, -dec);
+}
+
+static inline void update_kvp_data(struct kvp *dest, struct kvp *src)
+{
+	memcpy(get_data(dest), get_data(src), src->s);
+}
+
+static void add_kvp(char *buf, struct kvp *n)
+{
+	int32_t inc = sizeof(*n) + n->s;
+
+	if (get_req_len(buf) + inc > MAX_SLEEP_BUFFER) {
+		WARN_ON(get_req_len(buf) + inc > MAX_SLEEP_BUFFER);
+		return;
+	}
+
+	memcpy(buf + get_buf_len(buf), n, inc);
+
+	change_data_len(buf, inc);
+	change_req_len(buf, inc);
+}
+
+static struct slp_buf *tr_search(struct rb_root *root, char *slp)
+{
+	unsigned int type = get_rsc_type(slp);
+	unsigned int id = get_rsc_id(slp);
+	struct rb_node *node = root->rb_node;
+
+	while (node) {
+		struct slp_buf *cur = rb_entry(node, struct slp_buf, node);
+		unsigned int ctype = get_rsc_type(cur->buf);
+		unsigned int cid = get_rsc_id(cur->buf);
+
+		if (type < ctype)
+			node = node->rb_left;
+		else if (type > ctype)
+			node = node->rb_right;
+		else if (id < cid)
+			node = node->rb_left;
+		else if (id > cid)
+			node = node->rb_right;
+		else
+			return cur;
+	}
+	return NULL;
+}
+
+static int tr_insert(struct rb_root *root, struct slp_buf *slp)
+{
+	unsigned int type = get_rsc_type(slp->buf);
+	unsigned int id = get_rsc_id(slp->buf);
+	struct rb_node **node = &(root->rb_node), *parent = NULL;
+
+	while (*node) {
+		struct slp_buf *curr = rb_entry(*node, struct slp_buf, node);
+		unsigned int ctype = get_rsc_type(curr->buf);
+		unsigned int cid = get_rsc_id(curr->buf);
+
+		parent = *node;
+
+		if (type < ctype)
+			node = &((*node)->rb_left);
+		else if (type > ctype)
+			node = &((*node)->rb_right);
+		else if (id < cid)
+			node = &((*node)->rb_left);
+		else if (id > cid)
+			node = &((*node)->rb_right);
+		else
+			return -EINVAL;
+	}
+
+	rb_link_node(&slp->node, parent, node);
+	rb_insert_color(&slp->node, root);
+	slp->valid = true;
+	return 0;
+}
+
+#define for_each_kvp(buf, k) \
+	for (k = (struct kvp *)get_first_kvp(buf); \
+		((void *)k - (void *)get_first_kvp(buf)) < \
+		 get_data_len(buf);\
+		k = get_next_kvp(k))
+
+
+static void tr_update(struct slp_buf *s, char *buf)
+{
+	struct kvp *e, *n;
+
+	for_each_kvp(buf, n) {
+		bool found = false;
+		for_each_kvp(s->buf, e) {
+			if (n->k == e->k) {
+				found = true;
+				if (n->s == e->s) {
+					void *e_data = get_data(e);
+					void *n_data = get_data(n);
+					if (memcmp(e_data, n_data, n->s)) {
+						update_kvp_data(e, n);
+						s->valid = true;
+					}
+				} else {
+					delete_kvp(s->buf, e);
+					add_kvp(s->buf, n);
+					s->valid = true;
+				}
+				break;
+			}
+
+		}
+		if (!found) {
+			add_kvp(s->buf, n);
+			s->valid = true;
+		}
+	}
+}
+static atomic_t msm_rpm_msg_id = ATOMIC_INIT(0);
+
+struct msm_rpm_request {
+	uint8_t *client_buf;
+	struct msm_rpm_kvp_data *kvp;
+	uint32_t num_elements;
+	uint32_t write_idx;
+	uint8_t *buf;
+	uint32_t numbytes;
+};
+
+/*
+ * Data related to message acknowledgment
+ */
+
+LIST_HEAD(msm_rpm_wait_list);
+
+struct msm_rpm_wait_data {
+	struct list_head list;
+	uint32_t msg_id;
+	bool ack_recd;
+	int errno;
+	struct completion ack;
+	bool delete_on_ack;
+};
+DEFINE_SPINLOCK(msm_rpm_list_lock);
+
+
+
+LIST_HEAD(msm_rpm_ack_list);
+
+static struct tasklet_struct data_tasklet;
+
+static inline uint32_t msm_rpm_get_msg_id_from_ack(uint8_t *buf)
+{
+	return get_ack_msg_id(buf);
+}
+
+static inline int msm_rpm_get_error_from_ack(uint8_t *buf)
+{
+	uint8_t *tmp;
+	uint32_t req_len = get_ack_req_len(buf);
+	uint32_t msg_type = get_ack_msg_type(buf);
+	int rc = -ENODEV;
+	uint32_t err;
+	uint32_t ack_msg_size = rpm_msg_fmt_ver ?
+			sizeof(struct msm_rpm_ack_msg_v1) :
+			sizeof(struct msm_rpm_ack_msg_v0);
+
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT &&
+			msg_type == RPM_V1_ACK_SERVICE) {
+		return 0;
+	} else if (rpm_msg_fmt_ver && msg_type == RPM_V1_NACK_SERVICE) {
+		err = *(uint32_t *)(buf + sizeof(struct msm_rpm_ack_msg_v1));
+		return err;
+	}
+
+	req_len -= ack_msg_size;
+	req_len += 2 * sizeof(uint32_t);
+	if (!req_len)
+		return 0;
+
+	pr_err("%s:rpm returned error or nack req_len: %d id_ack: %d\n",
+				__func__, req_len, get_ack_msg_id(buf));
+
+	tmp = buf + ack_msg_size;
+
+	if (memcmp(tmp, ERR, sizeof(uint32_t))) {
+		pr_err("%s rpm returned error\n", __func__);
+		BUG_ON(1);
+	}
+
+	tmp += 2 * sizeof(uint32_t);
+
+	if (!(memcmp(tmp, INV_RSC, min_t(uint32_t, req_len,
+						sizeof(INV_RSC))-1))) {
+		pr_err("%s(): RPM NACK Unsupported resource\n", __func__);
+		rc = -EINVAL;
+	} else {
+		pr_err("%s(): RPM NACK Invalid header\n", __func__);
+	}
+
+	return rc;
+}
+
+int msm_rpm_smd_buffer_request(struct msm_rpm_request *cdata,
+		uint32_t size, gfp_t flag)
+{
+	struct slp_buf *slp;
+	static DEFINE_SPINLOCK(slp_buffer_lock);
+	unsigned long flags;
+	char *buf;
+
+	buf = cdata->buf;
+
+	if (size > MAX_SLEEP_BUFFER)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&slp_buffer_lock, flags);
+	slp = tr_search(&tr_root, buf);
+
+	if (!slp) {
+		slp = kzalloc(sizeof(struct slp_buf), GFP_ATOMIC);
+		if (!slp) {
+			spin_unlock_irqrestore(&slp_buffer_lock, flags);
+			return -ENOMEM;
+		}
+		slp->buf = PTR_ALIGN(&slp->ubuf[0], sizeof(u32));
+		memcpy(slp->buf, buf, size);
+		if (tr_insert(&tr_root, slp))
+			pr_err("Error updating sleep request\n");
+	} else {
+		/* handle unsent requests */
+		tr_update(slp, buf);
+	}
+	trace_rpm_smd_sleep_set(get_msg_id(cdata->client_buf),
+			get_rsc_type(cdata->client_buf),
+			get_req_len(cdata->client_buf));
+
+	spin_unlock_irqrestore(&slp_buffer_lock, flags);
+
+	return 0;
+}
+
+static struct msm_rpm_driver_data msm_rpm_data = {
+	.smd_open = COMPLETION_INITIALIZER(msm_rpm_data.smd_open),
+};
+
+static int msm_rpm_glink_rx_poll(void *glink_handle)
+{
+	int ret;
+
+	ret = glink_rpm_rx_poll(glink_handle);
+	if (ret >= 0)
+		/*
+		 * Sleep for 50us at a time before checking
+		 * for packet availability. The 50us is based
+		 * on the the time rpm could take to process
+		 * and send an ack for the sleep set request.
+		 */
+		udelay(50);
+	else
+		pr_err("Not receieve an ACK from RPM. ret = %d\n", ret);
+
+	return ret;
+}
+
+/*
+ * Returns
+ *	= 0 on successful reads
+ *	> 0 on successful reads with no further data
+ *	standard Linux error codes on failure.
+ */
+static int msm_rpm_read_sleep_ack(void)
+{
+	int ret;
+	char buf[MAX_ERR_BUFFER_SIZE] = {0};
+
+	if (glink_enabled)
+		ret = msm_rpm_glink_rx_poll(glink_data->glink_handle);
+	else {
+		ret = msm_rpm_read_smd_data(buf);
+		if (!ret)
+			ret = smd_is_pkt_avail(msm_rpm_data.ch_info);
+	}
+	return ret;
+}
+
+static int msm_rpm_flush_requests(bool print)
+{
+	struct rb_node *t;
+	int ret;
+	int count = 0;
+
+	for (t = rb_first(&tr_root); t; t = rb_next(t)) {
+
+		struct slp_buf *s = rb_entry(t, struct slp_buf, node);
+		unsigned int type = get_rsc_type(s->buf);
+		unsigned int id = get_rsc_id(s->buf);
+
+		if (!s->valid)
+			continue;
+
+		set_msg_id(s->buf, msm_rpm_get_next_msg_id());
+
+		if (!glink_enabled)
+			ret = msm_rpm_send_smd_buffer(s->buf,
+					get_buf_len(s->buf), true);
+		else
+			ret = msm_rpm_glink_send_buffer(s->buf,
+					get_buf_len(s->buf), true);
+
+		WARN_ON(ret != get_buf_len(s->buf));
+		trace_rpm_smd_send_sleep_set(get_msg_id(s->buf), type, id);
+
+		s->valid = false;
+		count++;
+
+		/*
+		 * RPM acks need to be handled here if we have sent 24
+		 * messages such that we do not overrun SMD buffer. Since
+		 * we expect only sleep sets at this point (RPM PC would be
+		 * disallowed if we had pending active requests), we need not
+		 * process these sleep set acks.
+		 */
+		if (count >= MAX_WAIT_ON_ACK) {
+			int ret = msm_rpm_read_sleep_ack();
+
+			if (ret >= 0)
+				count--;
+			else
+				return ret;
+		}
+	}
+	return 0;
+}
+
+static void msm_rpm_notify_sleep_chain(char *buf,
+		struct msm_rpm_kvp_data *kvp)
+{
+	struct msm_rpm_notifier_data notif;
+
+	notif.rsc_type = get_rsc_type(buf);
+	notif.rsc_id = get_req_len(buf);
+	notif.key = kvp->key;
+	notif.size = kvp->nbytes;
+	notif.value = kvp->value;
+	atomic_notifier_call_chain(&msm_rpm_sleep_notifier, 0, &notif);
+}
+
+static int msm_rpm_add_kvp_data_common(struct msm_rpm_request *handle,
+		uint32_t key, const uint8_t *data, int size, bool noirq)
+{
+	uint32_t i;
+	uint32_t data_size, msg_size;
+
+	if (probe_status)
+		return probe_status;
+
+	if (!handle || !data) {
+		pr_err("%s(): Invalid handle/data\n", __func__);
+		return -EINVAL;
+	}
+
+	if (size < 0)
+		return  -EINVAL;
+
+	data_size = ALIGN(size, SZ_4);
+	msg_size = data_size + 8;
+
+	for (i = 0; i < handle->write_idx; i++) {
+		if (handle->kvp[i].key != key)
+			continue;
+		if (handle->kvp[i].nbytes != data_size) {
+			kfree(handle->kvp[i].value);
+			handle->kvp[i].value = NULL;
+		} else {
+			if (!memcmp(handle->kvp[i].value, data, data_size))
+				return 0;
+		}
+		break;
+	}
+
+	if (i >= handle->num_elements) {
+		pr_err("Number of resources exceeds max allocated\n");
+		return -ENOMEM;
+	}
+
+	if (i == handle->write_idx)
+		handle->write_idx++;
+
+	if (!handle->kvp[i].value) {
+		handle->kvp[i].value = kzalloc(data_size, GFP_FLAG(noirq));
+
+		if (!handle->kvp[i].value) {
+			pr_err("Failed malloc\n");
+			return -ENOMEM;
+		}
+	} else {
+		/* We enter the else case, if a key already exists but the
+		 * data doesn't match. In which case, we should zero the data
+		 * out.
+		 */
+		memset(handle->kvp[i].value, 0, data_size);
+	}
+
+	if (!handle->kvp[i].valid)
+		change_data_len(handle->client_buf, msg_size);
+	else
+		change_data_len(handle->client_buf,
+			(data_size - handle->kvp[i].nbytes));
+
+	handle->kvp[i].nbytes = data_size;
+	handle->kvp[i].key = key;
+	memcpy(handle->kvp[i].value, data, size);
+	handle->kvp[i].valid = true;
+
+	return 0;
+
+}
+
+static struct msm_rpm_request *msm_rpm_create_request_common(
+		enum msm_rpm_set set, uint32_t rsc_type, uint32_t rsc_id,
+		int num_elements, bool noirq)
+{
+	struct msm_rpm_request *cdata;
+	uint32_t buf_size;
+
+	if (probe_status)
+		return ERR_PTR(probe_status);
+
+	cdata = kzalloc(sizeof(struct msm_rpm_request),
+			GFP_FLAG(noirq));
+
+	if (!cdata) {
+		pr_err("Cannot allocate memory for client data\n");
+		goto cdata_alloc_fail;
+	}
+
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		buf_size = sizeof(struct rpm_message_header_v0);
+	else
+		buf_size = sizeof(struct rpm_message_header_v1);
+
+	cdata->client_buf = kzalloc(buf_size, GFP_FLAG(noirq));
+
+	if (!cdata->client_buf) {
+		pr_warn("Cannot allocate memory for client_buf\n");
+		goto client_buf_alloc_fail;
+	}
+
+	set_set_type(cdata->client_buf, set);
+	set_rsc_type(cdata->client_buf, rsc_type);
+	set_rsc_id(cdata->client_buf, rsc_id);
+
+	cdata->num_elements = num_elements;
+	cdata->write_idx = 0;
+
+	cdata->kvp = kzalloc(sizeof(struct msm_rpm_kvp_data) * num_elements,
+			GFP_FLAG(noirq));
+
+	if (!cdata->kvp) {
+		pr_warn("%s(): Cannot allocate memory for key value data\n",
+				__func__);
+		goto kvp_alloc_fail;
+	}
+
+	cdata->buf = kzalloc(DEFAULT_BUFFER_SIZE, GFP_FLAG(noirq));
+
+	if (!cdata->buf)
+		goto buf_alloc_fail;
+
+	cdata->numbytes = DEFAULT_BUFFER_SIZE;
+	return cdata;
+
+buf_alloc_fail:
+	kfree(cdata->kvp);
+kvp_alloc_fail:
+	kfree(cdata->client_buf);
+client_buf_alloc_fail:
+	kfree(cdata);
+cdata_alloc_fail:
+	return NULL;
+
+}
+
+void msm_rpm_free_request(struct msm_rpm_request *handle)
+{
+	int i;
+
+	if (!handle)
+		return;
+	for (i = 0; i < handle->num_elements; i++)
+		kfree(handle->kvp[i].value);
+	kfree(handle->kvp);
+	kfree(handle->client_buf);
+	kfree(handle->buf);
+	kfree(handle);
+}
+EXPORT_SYMBOL(msm_rpm_free_request);
+
+struct msm_rpm_request *msm_rpm_create_request(
+		enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, int num_elements)
+{
+	return msm_rpm_create_request_common(set, rsc_type, rsc_id,
+			num_elements, false);
+}
+EXPORT_SYMBOL(msm_rpm_create_request);
+
+struct msm_rpm_request *msm_rpm_create_request_noirq(
+		enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, int num_elements)
+{
+	return msm_rpm_create_request_common(set, rsc_type, rsc_id,
+			num_elements, true);
+}
+EXPORT_SYMBOL(msm_rpm_create_request_noirq);
+
+int msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
+		uint32_t key, const uint8_t *data, int size)
+{
+	return msm_rpm_add_kvp_data_common(handle, key, data, size, false);
+
+}
+EXPORT_SYMBOL(msm_rpm_add_kvp_data);
+
+int msm_rpm_add_kvp_data_noirq(struct msm_rpm_request *handle,
+		uint32_t key, const uint8_t *data, int size)
+{
+	return msm_rpm_add_kvp_data_common(handle, key, data, size, true);
+}
+EXPORT_SYMBOL(msm_rpm_add_kvp_data_noirq);
+
+/* Runs in interrupt context */
+static void msm_rpm_notify(void *data, unsigned event)
+{
+	struct msm_rpm_driver_data *pdata = (struct msm_rpm_driver_data *)data;
+	BUG_ON(!pdata);
+
+	if (!(pdata->ch_info))
+		return;
+
+	switch (event) {
+	case SMD_EVENT_DATA:
+		tasklet_schedule(&data_tasklet);
+		trace_rpm_smd_interrupt_notify("interrupt notification");
+		break;
+	case SMD_EVENT_OPEN:
+		complete(&pdata->smd_open);
+		break;
+	case SMD_EVENT_CLOSE:
+	case SMD_EVENT_STATUS:
+	case SMD_EVENT_REOPEN_READY:
+		break;
+	default:
+		pr_info("Unknown SMD event\n");
+
+	}
+}
+
+bool msm_rpm_waiting_for_ack(void)
+{
+	bool ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&msm_rpm_list_lock, flags);
+	ret = list_empty(&msm_rpm_wait_list);
+	spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+
+	return !ret;
+}
+
+static struct msm_rpm_wait_data *msm_rpm_get_entry_from_msg_id(uint32_t msg_id)
+{
+	struct list_head *ptr;
+	struct msm_rpm_wait_data *elem = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&msm_rpm_list_lock, flags);
+
+	list_for_each(ptr, &msm_rpm_wait_list) {
+		elem = list_entry(ptr, struct msm_rpm_wait_data, list);
+		if (elem && (elem->msg_id == msg_id))
+			break;
+		elem = NULL;
+	}
+	spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+	return elem;
+}
+
+static uint32_t msm_rpm_get_next_msg_id(void)
+{
+	uint32_t id;
+
+	/*
+	 * A message id of 0 is used by the driver to indicate a error
+	 * condition. The RPM driver uses a id of 1 to indicate unsent data
+	 * when the data sent over hasn't been modified. This isn't a error
+	 * scenario and wait for ack returns a success when the message id is 1.
+	 */
+
+	do {
+		id = atomic_inc_return(&msm_rpm_msg_id);
+	} while ((id == 0) || (id == 1) || msm_rpm_get_entry_from_msg_id(id));
+
+	return id;
+}
+
+static int msm_rpm_add_wait_list(uint32_t msg_id, bool delete_on_ack)
+{
+	unsigned long flags;
+	struct msm_rpm_wait_data *data =
+		kzalloc(sizeof(struct msm_rpm_wait_data), GFP_ATOMIC);
+
+	if (!data)
+		return -ENOMEM;
+
+	init_completion(&data->ack);
+	data->ack_recd = false;
+	data->msg_id = msg_id;
+	data->errno = INIT_ERROR;
+	data->delete_on_ack = delete_on_ack;
+	spin_lock_irqsave(&msm_rpm_list_lock, flags);
+	if (delete_on_ack)
+		list_add_tail(&data->list, &msm_rpm_wait_list);
+	else
+		list_add(&data->list, &msm_rpm_wait_list);
+	spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+
+	return 0;
+}
+
+static void msm_rpm_free_list_entry(struct msm_rpm_wait_data *elem)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&msm_rpm_list_lock, flags);
+	list_del(&elem->list);
+	spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+	kfree(elem);
+}
+
+static void msm_rpm_process_ack(uint32_t msg_id, int errno)
+{
+	struct list_head *ptr, *next;
+	struct msm_rpm_wait_data *elem = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&msm_rpm_list_lock, flags);
+
+	list_for_each_safe(ptr, next, &msm_rpm_wait_list) {
+		elem = list_entry(ptr, struct msm_rpm_wait_data, list);
+		if (elem->msg_id == msg_id) {
+			elem->errno = errno;
+			elem->ack_recd = true;
+			complete(&elem->ack);
+			if (elem->delete_on_ack) {
+				list_del(&elem->list);
+				kfree(elem);
+			}
+			break;
+		}
+	}
+	/* Special case where the sleep driver doesn't
+	 * wait for ACKs. This would decrease the latency involved with
+	 * entering RPM assisted power collapse.
+	 */
+	if (!elem)
+		trace_rpm_smd_ack_recvd(0, msg_id, 0xDEADBEEF);
+
+	spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+}
+
+struct msm_rpm_kvp_packet {
+	uint32_t id;
+	uint32_t len;
+	uint32_t val;
+};
+
+static int msm_rpm_read_smd_data(char *buf)
+{
+	int pkt_sz;
+	int bytes_read = 0;
+
+	pkt_sz = smd_cur_packet_size(msm_rpm_data.ch_info);
+
+	if (!pkt_sz)
+		return -EAGAIN;
+
+	if (pkt_sz > MAX_ERR_BUFFER_SIZE) {
+		pr_err("rpm_smd pkt_sz is greater than max size\n");
+		goto error;
+	}
+
+	if (pkt_sz != smd_read_avail(msm_rpm_data.ch_info))
+		return -EAGAIN;
+
+	do {
+		int len;
+
+		len = smd_read(msm_rpm_data.ch_info, buf + bytes_read, pkt_sz);
+		pkt_sz -= len;
+		bytes_read += len;
+
+	} while (pkt_sz > 0);
+
+	if (pkt_sz < 0) {
+		pr_err("rpm_smd pkt_sz is less than zero\n");
+		goto error;
+	}
+	return 0;
+error:
+	BUG_ON(1);
+
+	return 0;
+}
+
+static void data_fn_tasklet(unsigned long data)
+{
+	uint32_t msg_id;
+	int errno;
+	char buf[MAX_ERR_BUFFER_SIZE] = {0};
+
+	spin_lock(&msm_rpm_data.smd_lock_read);
+	while (smd_is_pkt_avail(msm_rpm_data.ch_info)) {
+		if (msm_rpm_read_smd_data(buf))
+			break;
+		msg_id = msm_rpm_get_msg_id_from_ack(buf);
+		errno = msm_rpm_get_error_from_ack(buf);
+		trace_rpm_smd_ack_recvd(0, msg_id, errno);
+		msm_rpm_process_ack(msg_id, errno);
+	}
+	spin_unlock(&msm_rpm_data.smd_lock_read);
+}
+
+static void msm_rpm_log_request(struct msm_rpm_request *cdata)
+{
+	char buf[DEBUG_PRINT_BUFFER_SIZE];
+	size_t buflen = DEBUG_PRINT_BUFFER_SIZE;
+	char name[5];
+	u32 value;
+	uint32_t i;
+	int j, prev_valid;
+	int valid_count = 0;
+	int pos = 0;
+	uint32_t res_type, rsc_id;
+
+	name[4] = 0;
+
+	for (i = 0; i < cdata->write_idx; i++)
+		if (cdata->kvp[i].valid)
+			valid_count++;
+
+	pos += scnprintf(buf + pos, buflen - pos, "%sRPM req: ", KERN_INFO);
+	if (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_SHOW_MSG_ID)
+		pos += scnprintf(buf + pos, buflen - pos, "msg_id=%u, ",
+				get_msg_id(cdata->client_buf));
+	pos += scnprintf(buf + pos, buflen - pos, "s=%s",
+		(get_set_type(cdata->client_buf) ==
+				MSM_RPM_CTX_ACTIVE_SET ? "act" : "slp"));
+
+	res_type = get_rsc_type(cdata->client_buf);
+	rsc_id = get_rsc_id(cdata->client_buf);
+	if ((msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_PRETTY)
+	    && (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_RAW)) {
+		/* Both pretty and raw formatting */
+		memcpy(name, &res_type, sizeof(uint32_t));
+		pos += scnprintf(buf + pos, buflen - pos,
+			", rsc_type=0x%08X (%s), rsc_id=%u; ",
+			res_type, name, rsc_id);
+
+		for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
+			if (!cdata->kvp[i].valid)
+				continue;
+
+			memcpy(name, &cdata->kvp[i].key, sizeof(uint32_t));
+			pos += scnprintf(buf + pos, buflen - pos,
+					"[key=0x%08X (%s), value=%s",
+					cdata->kvp[i].key, name,
+					(cdata->kvp[i].nbytes ? "0x" : "null"));
+
+			for (j = 0; j < cdata->kvp[i].nbytes; j++)
+				pos += scnprintf(buf + pos, buflen - pos,
+						"%02X ",
+						cdata->kvp[i].value[j]);
+
+			if (cdata->kvp[i].nbytes)
+				pos += scnprintf(buf + pos, buflen - pos, "(");
+
+			for (j = 0; j < cdata->kvp[i].nbytes; j += 4) {
+				value = 0;
+				memcpy(&value, &cdata->kvp[i].value[j],
+					min_t(uint32_t, sizeof(uint32_t),
+						cdata->kvp[i].nbytes - j));
+				pos += scnprintf(buf + pos, buflen - pos, "%u",
+						value);
+				if (j + 4 < cdata->kvp[i].nbytes)
+					pos += scnprintf(buf + pos,
+						buflen - pos, " ");
+			}
+			if (cdata->kvp[i].nbytes)
+				pos += scnprintf(buf + pos, buflen - pos, ")");
+			pos += scnprintf(buf + pos, buflen - pos, "]");
+			if (prev_valid + 1 < valid_count)
+				pos += scnprintf(buf + pos, buflen - pos, ", ");
+			prev_valid++;
+		}
+	} else if (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_PRETTY) {
+		/* Pretty formatting only */
+		memcpy(name, &res_type, sizeof(uint32_t));
+		pos += scnprintf(buf + pos, buflen - pos, " %s %u; ", name,
+			rsc_id);
+
+		for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
+			if (!cdata->kvp[i].valid)
+				continue;
+
+			memcpy(name, &cdata->kvp[i].key, sizeof(uint32_t));
+			pos += scnprintf(buf + pos, buflen - pos, "%s=%s",
+				name, (cdata->kvp[i].nbytes ? "" : "null"));
+
+			for (j = 0; j < cdata->kvp[i].nbytes; j += 4) {
+				value = 0;
+				memcpy(&value, &cdata->kvp[i].value[j],
+					min_t(uint32_t, sizeof(uint32_t),
+						cdata->kvp[i].nbytes - j));
+				pos += scnprintf(buf + pos, buflen - pos, "%u",
+						value);
+
+				if (j + 4 < cdata->kvp[i].nbytes)
+					pos += scnprintf(buf + pos,
+						buflen - pos, " ");
+			}
+			if (prev_valid + 1 < valid_count)
+				pos += scnprintf(buf + pos, buflen - pos, ", ");
+			prev_valid++;
+		}
+	} else {
+		/* Raw formatting only */
+		pos += scnprintf(buf + pos, buflen - pos,
+			", rsc_type=0x%08X, rsc_id=%u; ", res_type, rsc_id);
+
+		for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
+			if (!cdata->kvp[i].valid)
+				continue;
+
+			pos += scnprintf(buf + pos, buflen - pos,
+					"[key=0x%08X, value=%s",
+					cdata->kvp[i].key,
+					(cdata->kvp[i].nbytes ? "0x" : "null"));
+			for (j = 0; j < cdata->kvp[i].nbytes; j++) {
+				pos += scnprintf(buf + pos, buflen - pos,
+						"%02X",
+						cdata->kvp[i].value[j]);
+				if (j + 1 < cdata->kvp[i].nbytes)
+					pos += scnprintf(buf + pos,
+							buflen - pos, " ");
+			}
+			pos += scnprintf(buf + pos, buflen - pos, "]");
+			if (prev_valid + 1 < valid_count)
+				pos += scnprintf(buf + pos, buflen - pos, ", ");
+			prev_valid++;
+		}
+	}
+
+	pos += scnprintf(buf + pos, buflen - pos, "\n");
+	printk(buf);
+}
+
+static int msm_rpm_send_smd_buffer(char *buf, uint32_t size, bool noirq)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
+	ret = smd_write_avail(msm_rpm_data.ch_info);
+
+	while ((ret = smd_write_avail(msm_rpm_data.ch_info)) < size) {
+		if (ret < 0)
+			break;
+		if (!noirq) {
+			spin_unlock_irqrestore(
+				&msm_rpm_data.smd_lock_write, flags);
+			cpu_relax();
+			spin_lock_irqsave(
+				&msm_rpm_data.smd_lock_write, flags);
+		} else
+			udelay(5);
+	}
+
+	if (ret < 0) {
+		pr_err("SMD not initialized\n");
+		spin_unlock_irqrestore(
+			&msm_rpm_data.smd_lock_write, flags);
+		return ret;
+	}
+
+	ret = smd_write(msm_rpm_data.ch_info, buf, size);
+	spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
+	return ret;
+}
+
+static int msm_rpm_glink_send_buffer(char *buf, uint32_t size, bool noirq)
+{
+	int ret;
+	unsigned long flags;
+	int timeout = 50;
+
+	spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
+	do {
+		ret = glink_tx(glink_data->glink_handle, buf, buf,
+					size, GLINK_TX_SINGLE_THREADED);
+		if (ret == -EBUSY || ret == -ENOSPC) {
+			if (!noirq) {
+				spin_unlock_irqrestore(
+					&msm_rpm_data.smd_lock_write, flags);
+				cpu_relax();
+				spin_lock_irqsave(
+					&msm_rpm_data.smd_lock_write, flags);
+			} else {
+				udelay(5);
+			}
+			timeout--;
+		} else {
+			ret = 0;
+		}
+	} while (ret && timeout);
+	spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
+
+	if (!timeout)
+		return 0;
+	else
+		return size;
+}
+
+static int msm_rpm_send_data(struct msm_rpm_request *cdata,
+		int msg_type, bool noirq, bool noack)
+{
+	uint8_t *tmpbuff;
+	int ret;
+	uint32_t i;
+	uint32_t msg_size;
+	int msg_hdr_sz, req_hdr_sz;
+	uint32_t data_len = get_data_len(cdata->client_buf);
+	uint32_t set = get_set_type(cdata->client_buf);
+	uint32_t msg_id;
+
+	if (probe_status)
+		return probe_status;
+
+	if (!data_len)
+		return 1;
+
+	msg_hdr_sz = rpm_msg_fmt_ver ? sizeof(struct rpm_message_header_v1) :
+			sizeof(struct rpm_message_header_v0);
+
+	req_hdr_sz = RPM_HDR_SIZE;
+	set_msg_type(cdata->client_buf, msg_type);
+
+	set_req_len(cdata->client_buf, data_len + msg_hdr_sz - req_hdr_sz);
+	msg_size = get_req_len(cdata->client_buf) + req_hdr_sz;
+
+	/* populate data_len */
+	if (msg_size > cdata->numbytes) {
+		kfree(cdata->buf);
+		cdata->numbytes = msg_size;
+		cdata->buf = kzalloc(msg_size, GFP_FLAG(noirq));
+	}
+
+	if (!cdata->buf) {
+		pr_err("Failed malloc\n");
+		return 0;
+	}
+
+	tmpbuff = cdata->buf;
+
+	tmpbuff += msg_hdr_sz;
+	for (i = 0; (i < cdata->write_idx); i++) {
+		/* Sanity check */
+		BUG_ON((tmpbuff - cdata->buf) > cdata->numbytes);
+
+		if (!cdata->kvp[i].valid)
+			continue;
+
+		memcpy(tmpbuff, &cdata->kvp[i].key, sizeof(uint32_t));
+		tmpbuff += sizeof(uint32_t);
+
+		memcpy(tmpbuff, &cdata->kvp[i].nbytes, sizeof(uint32_t));
+		tmpbuff += sizeof(uint32_t);
+
+		memcpy(tmpbuff, cdata->kvp[i].value, cdata->kvp[i].nbytes);
+		tmpbuff += cdata->kvp[i].nbytes;
+
+		if (set == MSM_RPM_CTX_SLEEP_SET)
+			msm_rpm_notify_sleep_chain(cdata->client_buf,
+					&cdata->kvp[i]);
+
+	}
+
+	memcpy(cdata->buf, cdata->client_buf, msg_hdr_sz);
+	if ((set == MSM_RPM_CTX_SLEEP_SET) &&
+		!msm_rpm_smd_buffer_request(cdata, msg_size,
+			GFP_FLAG(noirq)))
+		return 1;
+
+	msg_id = msm_rpm_get_next_msg_id();
+	/* Set the version bit for new protocol */
+	set_msg_ver(cdata->buf, rpm_msg_fmt_ver);
+	set_msg_id(cdata->buf, msg_id);
+	set_msg_id(cdata->client_buf, msg_id);
+
+	if (msm_rpm_debug_mask
+	    & (MSM_RPM_LOG_REQUEST_PRETTY | MSM_RPM_LOG_REQUEST_RAW))
+		msm_rpm_log_request(cdata);
+
+	if (standalone) {
+		for (i = 0; (i < cdata->write_idx); i++)
+			cdata->kvp[i].valid = false;
+
+		set_data_len(cdata->client_buf, 0);
+		ret = msg_id;
+		return ret;
+	}
+
+	msm_rpm_add_wait_list(msg_id, noack);
+
+	ret = msm_rpm_send_buffer(&cdata->buf[0], msg_size, noirq);
+
+	if (ret == msg_size) {
+		for (i = 0; (i < cdata->write_idx); i++)
+			cdata->kvp[i].valid = false;
+		set_data_len(cdata->client_buf, 0);
+		ret = msg_id;
+		trace_rpm_smd_send_active_set(msg_id,
+			get_rsc_type(cdata->client_buf),
+			get_rsc_id(cdata->client_buf));
+	} else if (ret < msg_size) {
+		struct msm_rpm_wait_data *rc;
+		ret = 0;
+		pr_err("Failed to write data msg_size:%d ret:%d msg_id:%d\n",
+				msg_size, ret, msg_id);
+		rc = msm_rpm_get_entry_from_msg_id(msg_id);
+		if (rc)
+			msm_rpm_free_list_entry(rc);
+	}
+	return ret;
+}
+
+static int _msm_rpm_send_request(struct msm_rpm_request *handle, bool noack)
+{
+	int ret;
+	static DEFINE_MUTEX(send_mtx);
+
+	mutex_lock(&send_mtx);
+	ret = msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, false, noack);
+	mutex_unlock(&send_mtx);
+
+	return ret;
+}
+
+int msm_rpm_send_request(struct msm_rpm_request *handle)
+{
+	return _msm_rpm_send_request(handle, false);
+}
+EXPORT_SYMBOL(msm_rpm_send_request);
+
+int msm_rpm_send_request_noirq(struct msm_rpm_request *handle)
+{
+	return msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, true, false);
+}
+EXPORT_SYMBOL(msm_rpm_send_request_noirq);
+
+void *msm_rpm_send_request_noack(struct msm_rpm_request *handle)
+{
+	int ret;
+
+	ret = _msm_rpm_send_request(handle, true);
+
+	return ret < 0 ? ERR_PTR(ret) : NULL;
+}
+EXPORT_SYMBOL(msm_rpm_send_request_noack);
+
+int msm_rpm_wait_for_ack(uint32_t msg_id)
+{
+	struct msm_rpm_wait_data *elem;
+	int rc = 0;
+
+	if (!msg_id) {
+		pr_err("Invalid msg id\n");
+		return -ENOMEM;
+	}
+
+	if (msg_id == 1)
+		return rc;
+
+	if (standalone)
+		return rc;
+
+	elem = msm_rpm_get_entry_from_msg_id(msg_id);
+	if (!elem)
+		return rc;
+
+	wait_for_completion(&elem->ack);
+	trace_rpm_smd_ack_recvd(0, msg_id, 0xDEADFEED);
+
+	rc = elem->errno;
+	msm_rpm_free_list_entry(elem);
+
+	return rc;
+}
+EXPORT_SYMBOL(msm_rpm_wait_for_ack);
+
+static void msm_rpm_smd_read_data_noirq(uint32_t msg_id)
+{
+	uint32_t id = 0;
+
+	while (id != msg_id) {
+		if (smd_is_pkt_avail(msm_rpm_data.ch_info)) {
+			int errno;
+			char buf[MAX_ERR_BUFFER_SIZE] = {};
+
+			msm_rpm_read_smd_data(buf);
+			id = msm_rpm_get_msg_id_from_ack(buf);
+			errno = msm_rpm_get_error_from_ack(buf);
+			trace_rpm_smd_ack_recvd(1, msg_id, errno);
+			msm_rpm_process_ack(id, errno);
+		}
+	}
+}
+
+static void msm_rpm_glink_read_data_noirq(struct msm_rpm_wait_data *elem)
+{
+	int ret;
+
+	/* Use rx_poll method to read the message from RPM */
+	while (elem->errno) {
+		ret = glink_rpm_rx_poll(glink_data->glink_handle);
+		if (ret >= 0) {
+			/*
+			 * We might have receieve the notification.
+			 * Now we have to check whether the notification
+			 * received is what we are interested?
+			 * Wait for few usec to get the notification
+			 * before re-trying the poll again.
+			 */
+			udelay(50);
+		} else {
+			pr_err("rx poll return error = %d\n", ret);
+		}
+	}
+}
+
+int msm_rpm_wait_for_ack_noirq(uint32_t msg_id)
+{
+	struct msm_rpm_wait_data *elem;
+	unsigned long flags;
+	int rc = 0;
+
+	if (!msg_id)  {
+		pr_err("Invalid msg id\n");
+		return -ENOMEM;
+	}
+
+	if (msg_id == 1)
+		return 0;
+
+	if (standalone)
+		return 0;
+
+	spin_lock_irqsave(&msm_rpm_data.smd_lock_read, flags);
+
+	elem = msm_rpm_get_entry_from_msg_id(msg_id);
+
+	if (!elem)
+		/* Should this be a bug
+		 * Is it ok for another thread to read the msg?
+		 */
+		goto wait_ack_cleanup;
+
+	if (elem->errno != INIT_ERROR) {
+		rc = elem->errno;
+		msm_rpm_free_list_entry(elem);
+		goto wait_ack_cleanup;
+	}
+
+	if (!glink_enabled)
+		msm_rpm_smd_read_data_noirq(msg_id);
+	else
+		msm_rpm_glink_read_data_noirq(elem);
+
+	rc = elem->errno;
+
+	msm_rpm_free_list_entry(elem);
+wait_ack_cleanup:
+	spin_unlock_irqrestore(&msm_rpm_data.smd_lock_read, flags);
+
+	if (!glink_enabled)
+		if (smd_is_pkt_avail(msm_rpm_data.ch_info))
+			tasklet_schedule(&data_tasklet);
+	return rc;
+}
+EXPORT_SYMBOL(msm_rpm_wait_for_ack_noirq);
+
+void *msm_rpm_send_message_noack(enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
+{
+	int i, rc;
+	struct msm_rpm_request *req =
+		msm_rpm_create_request_common(set, rsc_type, rsc_id, nelems,
+			       false);
+
+	if (IS_ERR(req))
+		return req;
+
+	if (!req)
+		return ERR_PTR(ENOMEM);
+
+	for (i = 0; i < nelems; i++) {
+		rc = msm_rpm_add_kvp_data(req, kvp[i].key,
+				kvp[i].data, kvp[i].length);
+		if (rc)
+			goto bail;
+	}
+
+	rc = PTR_ERR(msm_rpm_send_request_noack(req));
+bail:
+	msm_rpm_free_request(req);
+	return rc < 0 ? ERR_PTR(rc) : NULL;
+}
+EXPORT_SYMBOL(msm_rpm_send_message_noack);
+
+int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
+{
+	int i, rc;
+	struct msm_rpm_request *req =
+		msm_rpm_create_request(set, rsc_type, rsc_id, nelems);
+
+	if (IS_ERR(req))
+		return PTR_ERR(req);
+
+	if (!req)
+		return -ENOMEM;
+
+	for (i = 0; i < nelems; i++) {
+		rc = msm_rpm_add_kvp_data(req, kvp[i].key,
+				kvp[i].data, kvp[i].length);
+		if (rc)
+			goto bail;
+	}
+
+	rc = msm_rpm_wait_for_ack(msm_rpm_send_request(req));
+bail:
+	msm_rpm_free_request(req);
+	return rc;
+}
+EXPORT_SYMBOL(msm_rpm_send_message);
+
+int msm_rpm_send_message_noirq(enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
+{
+	int i, rc;
+	struct msm_rpm_request *req =
+		msm_rpm_create_request_noirq(set, rsc_type, rsc_id, nelems);
+
+	if (IS_ERR(req))
+		return PTR_ERR(req);
+
+	if (!req)
+		return -ENOMEM;
+
+	for (i = 0; i < nelems; i++) {
+		rc = msm_rpm_add_kvp_data_noirq(req, kvp[i].key,
+					kvp[i].data, kvp[i].length);
+		if (rc)
+			goto bail;
+	}
+
+	rc = msm_rpm_wait_for_ack_noirq(msm_rpm_send_request_noirq(req));
+bail:
+	msm_rpm_free_request(req);
+	return rc;
+}
+EXPORT_SYMBOL(msm_rpm_send_message_noirq);
+
+/**
+ * During power collapse, the rpm driver disables the SMD interrupts to make
+ * sure that the interrupt doesn't wakes us from sleep.
+ */
+int msm_rpm_enter_sleep(bool print, const struct cpumask *cpumask)
+{
+	int ret = 0;
+
+	if (standalone)
+		return 0;
+
+	if (!glink_enabled)
+		ret = smd_mask_receive_interrupt(msm_rpm_data.ch_info,
+								true, cpumask);
+	else
+		ret = glink_rpm_mask_rx_interrupt(glink_data->glink_handle,
+							true, (void *)cpumask);
+
+	if (!ret) {
+		ret = msm_rpm_flush_requests(print);
+
+		if (ret) {
+			if (!glink_enabled)
+				smd_mask_receive_interrupt(
+					msm_rpm_data.ch_info, false, NULL);
+			else
+				glink_rpm_mask_rx_interrupt(
+					glink_data->glink_handle, false, NULL);
+		}
+	}
+	return ret;
+}
+EXPORT_SYMBOL(msm_rpm_enter_sleep);
+
+/**
+ * When the system resumes from power collapse, the SMD interrupt disabled by
+ * enter function has to reenabled to continue processing SMD message.
+ */
+void msm_rpm_exit_sleep(void)
+{
+	int ret;
+
+	if (standalone)
+		return;
+
+	do  {
+		ret =  msm_rpm_read_sleep_ack();
+	} while (ret > 0);
+
+	if (!glink_enabled)
+		smd_mask_receive_interrupt(msm_rpm_data.ch_info, false, NULL);
+	else
+		glink_rpm_mask_rx_interrupt(glink_data->glink_handle,
+								false, NULL);
+}
+EXPORT_SYMBOL(msm_rpm_exit_sleep);
+
+/*
+ * Whenever there is a data from RPM, notify_rx will be called.
+ * This function is invoked either interrupt OR polling context.
+ */
+static void msm_rpm_trans_notify_rx(void *handle, const void *priv,
+			const void *pkt_priv, const void *ptr, size_t size)
+{
+	uint32_t msg_id;
+	int errno;
+	char buf[MAX_ERR_BUFFER_SIZE] = {0};
+	struct msm_rpm_wait_data *elem;
+	static DEFINE_SPINLOCK(rx_notify_lock);
+	unsigned long flags;
+
+	if (!size)
+		return;
+
+	BUG_ON(size > MAX_ERR_BUFFER_SIZE);
+
+	spin_lock_irqsave(&rx_notify_lock, flags);
+	memcpy(buf, ptr, size);
+	msg_id = msm_rpm_get_msg_id_from_ack(buf);
+	errno = msm_rpm_get_error_from_ack(buf);
+	elem = msm_rpm_get_entry_from_msg_id(msg_id);
+
+	/*
+	 * It is applicable for sleep set requests
+	 * Sleep set requests are not added to the
+	 * wait queue list. Without this check we
+	 * run into NULL pointer deferrence issue.
+	 */
+	if (!elem) {
+		spin_unlock_irqrestore(&rx_notify_lock, flags);
+		glink_rx_done(handle, ptr, 0);
+		return;
+	}
+
+	msm_rpm_process_ack(msg_id, errno);
+	spin_unlock_irqrestore(&rx_notify_lock, flags);
+
+	glink_rx_done(handle, ptr, 0);
+}
+
+static void msm_rpm_trans_notify_state(void *handle, const void *priv,
+				   unsigned event)
+{
+	switch (event) {
+	case GLINK_CONNECTED:
+		glink_data->glink_handle = handle;
+
+		if (IS_ERR_OR_NULL(glink_data->glink_handle)) {
+			pr_err("glink_handle %d\n",
+					(int)PTR_ERR(glink_data->glink_handle));
+			BUG_ON(1);
+		}
+
+		/*
+		 * Do not allow clients to send data to RPM until glink
+		 * is fully open.
+		 */
+		probe_status = 0;
+		pr_info("glink config params: transport=%s, edge=%s, name=%s\n",
+			glink_data->xprt,
+			glink_data->edge,
+			glink_data->name);
+		break;
+	default:
+		pr_err("Unrecognized event %d\n", event);
+		break;
+	};
+}
+
+static void msm_rpm_trans_notify_tx_done(void *handle, const void *priv,
+					const void *pkt_priv, const void *ptr)
+{
+	return;
+}
+
+static void msm_rpm_glink_open_work(struct work_struct *work)
+{
+	pr_debug("Opening glink channel\n");
+	glink_data->glink_handle = glink_open(glink_data->open_cfg);
+
+	if (IS_ERR_OR_NULL(glink_data->glink_handle)) {
+		pr_err("Error: glink_open failed %d\n",
+				(int)PTR_ERR(glink_data->glink_handle));
+		BUG_ON(1);
+	}
+}
+
+static void msm_rpm_glink_notifier_cb(struct glink_link_state_cb_info *cb_info,
+					void *priv)
+{
+	struct glink_open_config *open_config;
+	static bool first = true;
+
+	if (!cb_info) {
+		pr_err("Missing callback data\n");
+		return;
+	}
+
+	switch (cb_info->link_state) {
+	case GLINK_LINK_STATE_UP:
+		if (first)
+			first = false;
+		else
+			break;
+		open_config = kzalloc(sizeof(*open_config), GFP_KERNEL);
+		if (!open_config) {
+			pr_err("Could not allocate memory\n");
+			break;
+		}
+
+		glink_data->open_cfg = open_config;
+		pr_debug("glink link state up cb receieved\n");
+		INIT_WORK(&glink_data->work, msm_rpm_glink_open_work);
+
+		open_config->priv = glink_data;
+		open_config->name = glink_data->name;
+		open_config->edge = glink_data->edge;
+		open_config->notify_rx = msm_rpm_trans_notify_rx;
+		open_config->notify_tx_done = msm_rpm_trans_notify_tx_done;
+		open_config->notify_state = msm_rpm_trans_notify_state;
+		schedule_work(&glink_data->work);
+		break;
+	default:
+		pr_err("Unrecognised state = %d\n", cb_info->link_state);
+		break;
+	};
+}
+
+static int msm_rpm_glink_dt_parse(struct platform_device *pdev,
+				struct glink_apps_rpm_data *glink_data)
+{
+	char *key = NULL;
+	int ret;
+
+	if (of_device_is_compatible(pdev->dev.of_node, "qcom,rpm-glink")) {
+		glink_enabled = true;
+	} else {
+		pr_warn("qcom,rpm-glink compatible not matches\n");
+		ret = -EINVAL;
+		return ret;
+	}
+
+	key = "qcom,glink-edge";
+	ret = of_property_read_string(pdev->dev.of_node, key,
+							&glink_data->edge);
+	if (ret) {
+		pr_err("Failed to read node: %s, key=%s\n",
+			pdev->dev.of_node->full_name, key);
+		return ret;
+	}
+
+	key = "rpm-channel-name";
+	ret = of_property_read_string(pdev->dev.of_node, key,
+							&glink_data->name);
+	if (ret)
+		pr_err("%s(): Failed to read node: %s, key=%s\n", __func__,
+			pdev->dev.of_node->full_name, key);
+
+	return ret;
+}
+
+static int msm_rpm_glink_link_setup(struct glink_apps_rpm_data *glink_data,
+						struct platform_device *pdev)
+{
+	struct glink_link_info *link_info;
+	void *link_state_cb_handle;
+	struct device *dev = &pdev->dev;
+	int ret = 0;
+
+	link_info = devm_kzalloc(dev, sizeof(struct glink_link_info),
+								GFP_KERNEL);
+	if (!link_info) {
+		pr_err("Could not allocate memory\n");
+		ret = -ENOMEM;
+		return ret;
+	}
+
+	glink_data->link_info = link_info;
+
+	/*
+	 * Setup link info parameters
+	 */
+	link_info->edge = glink_data->edge;
+	link_info->glink_link_state_notif_cb =
+					msm_rpm_glink_notifier_cb;
+	link_state_cb_handle = glink_register_link_state_cb(link_info, NULL);
+	if (IS_ERR_OR_NULL(link_state_cb_handle)) {
+		pr_err("Could not register cb\n");
+		ret = PTR_ERR(link_state_cb_handle);
+		return ret;
+	}
+
+	spin_lock_init(&msm_rpm_data.smd_lock_read);
+	spin_lock_init(&msm_rpm_data.smd_lock_write);
+
+	return ret;
+}
+
+static int msm_rpm_dev_glink_probe(struct platform_device *pdev)
+{
+	int ret = -ENOMEM;
+	struct device *dev = &pdev->dev;
+
+	glink_data = devm_kzalloc(dev, sizeof(*glink_data), GFP_KERNEL);
+	if (!glink_data) {
+		pr_err("Could not allocate memory\n");
+		return ret;
+	}
+
+	ret = msm_rpm_glink_dt_parse(pdev, glink_data);
+	if (ret < 0) {
+		devm_kfree(dev, glink_data);
+		return ret;
+	}
+
+	ret = msm_rpm_glink_link_setup(glink_data, pdev);
+	if (ret < 0) {
+		/*
+		 * If the glink setup fails there is no
+		 * fall back mechanism to SMD.
+		 */
+		pr_err("GLINK setup fail ret = %d\n", ret);
+		BUG_ON(1);
+	}
+
+	return ret;
+}
+
+static int msm_rpm_dev_probe(struct platform_device *pdev)
+{
+	char *key = NULL;
+	int ret = 0;
+	void __iomem *reg_base;
+	uint32_t version = V0_PROTOCOL_VERSION; /* set to default v0 format */
+
+	/*
+	 * Check for standalone support
+	 */
+	key = "rpm-standalone";
+	standalone = of_property_read_bool(pdev->dev.of_node, key);
+	if (standalone) {
+		probe_status = ret;
+		goto skip_init;
+	}
+
+	reg_base = of_iomap(pdev->dev.of_node, 0);
+
+	if (reg_base) {
+		version = readq_relaxed(reg_base);
+		iounmap(reg_base);
+	}
+
+	if (version == V1_PROTOCOL_VERSION)
+		rpm_msg_fmt_ver = RPM_MSG_V1_FMT;
+
+	pr_debug("RPM-SMD running version %d/n", rpm_msg_fmt_ver);
+
+	ret = msm_rpm_dev_glink_probe(pdev);
+	if (!ret) {
+		pr_info("APSS-RPM communication over GLINK\n");
+		msm_rpm_send_buffer = msm_rpm_glink_send_buffer;
+		of_platform_populate(pdev->dev.of_node, NULL, NULL,
+							&pdev->dev);
+		return ret;
+	} else {
+		msm_rpm_send_buffer = msm_rpm_send_smd_buffer;
+		pr_info("APSS-RPM communication over SMD\n");
+	}
+
+	key = "rpm-channel-name";
+	ret = of_property_read_string(pdev->dev.of_node, key,
+					&msm_rpm_data.ch_name);
+	if (ret) {
+		pr_err("%s(): Failed to read node: %s, key=%s\n", __func__,
+			pdev->dev.of_node->full_name, key);
+		goto fail;
+	}
+
+	key = "rpm-channel-type";
+	ret = of_property_read_u32(pdev->dev.of_node, key,
+					&msm_rpm_data.ch_type);
+	if (ret) {
+		pr_err("%s(): Failed to read node: %s, key=%s\n", __func__,
+			pdev->dev.of_node->full_name, key);
+		goto fail;
+	}
+
+	ret = smd_named_open_on_edge(msm_rpm_data.ch_name,
+				msm_rpm_data.ch_type,
+				&msm_rpm_data.ch_info,
+				&msm_rpm_data,
+				msm_rpm_notify);
+	if (ret) {
+		if (ret != -EPROBE_DEFER) {
+			pr_err("%s: Cannot open RPM channel %s %d\n",
+				__func__, msm_rpm_data.ch_name,
+				msm_rpm_data.ch_type);
+		}
+		goto fail;
+	}
+
+	spin_lock_init(&msm_rpm_data.smd_lock_write);
+	spin_lock_init(&msm_rpm_data.smd_lock_read);
+	tasklet_init(&data_tasklet, data_fn_tasklet, 0);
+
+	wait_for_completion(&msm_rpm_data.smd_open);
+
+	smd_disable_read_intr(msm_rpm_data.ch_info);
+
+	msm_rpm_smd_wq = alloc_workqueue("rpm-smd",
+			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
+	if (!msm_rpm_smd_wq) {
+		pr_err("%s: Unable to alloc rpm-smd workqueue\n", __func__);
+		ret = -EINVAL;
+		goto fail;
+	}
+	queue_work(msm_rpm_smd_wq, &msm_rpm_data.work);
+
+	probe_status = ret;
+skip_init:
+	of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+
+	if (standalone)
+		pr_info("RPM running in standalone mode\n");
+fail:
+	return probe_status;
+}
+
+static struct of_device_id msm_rpm_match_table[] =  {
+	{.compatible = "qcom,rpm-smd"},
+	{.compatible = "qcom,rpm-glink"},
+	{},
+};
+
+static struct platform_driver msm_rpm_device_driver = {
+	.probe = msm_rpm_dev_probe,
+	.driver = {
+		.name = "rpm-smd",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_rpm_match_table,
+	},
+};
+
+int __init msm_rpm_driver_init(void)
+{
+	static bool registered;
+
+	if (registered)
+		return 0;
+	registered = true;
+
+	return platform_driver_register(&msm_rpm_device_driver);
+}
+EXPORT_SYMBOL(msm_rpm_driver_init);
+arch_initcall(msm_rpm_driver_init);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/rpm-smd-debug.c	2019-01-22 16:16:26.671275095 +0100
@@ -0,0 +1,151 @@
+/* Copyright (c) 2013-2014, 2017,  The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "rpm-smd-debug: %s(): " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <soc/qcom/rpm-smd.h>
+
+#define MAX_MSG_BUFFER 350
+#define MAX_KEY_VALUE_PAIRS 20
+
+static struct dentry *rpm_debugfs_dir;
+
+static u32 string_to_uint(const u8 *str)
+{
+	int i, len;
+	u32 output = 0;
+
+	len = strnlen(str, sizeof(u32));
+	for (i = 0; i < len; i++)
+		output |= str[i] << (i * 8);
+
+	return output;
+}
+
+static ssize_t rsc_ops_write(struct file *fp, const char __user *user_buffer,
+						size_t count, loff_t *position)
+{
+	char buf[MAX_MSG_BUFFER], rsc_type_str[6] = {}, rpm_set[8] = {},
+						key_str[6] = {};
+	int i, pos = -1, set = -1, nelems = -1;
+	char *cmp;
+	uint32_t rsc_type = 0, rsc_id = 0, key = 0, data = 0;
+	struct msm_rpm_request *req;
+
+	count = min(count, sizeof(buf) - 1);
+	if (copy_from_user(&buf, user_buffer, count))
+		return -EFAULT;
+	buf[count] = '\0';
+	cmp = strstrip(buf);
+
+	if (sscanf(cmp, "%7s %5s %u %d %n", rpm_set, rsc_type_str,
+				&rsc_id, &nelems, &pos) != 4) {
+		pr_err("Invalid number of arguments passed\n");
+		goto err;
+	}
+
+	if (strlen(rpm_set) > 6 || strlen(rsc_type_str) > 4) {
+		pr_err("Invalid value of set or resource type\n");
+		goto err;
+	}
+
+	if (!strcmp(rpm_set, "active"))
+		set = 0;
+	else if (!strcmp(rpm_set, "sleep"))
+		set = 1;
+
+	rsc_type = string_to_uint(rsc_type_str);
+
+	if (set < 0 || nelems < 0) {
+		pr_err("Invalid value of set or nelems\n");
+		goto err;
+	}
+	if (nelems > MAX_KEY_VALUE_PAIRS) {
+		pr_err("Exceeded max no of key-value entries\n");
+		goto err;
+	}
+
+	req = msm_rpm_create_request(set, rsc_type, rsc_id, nelems);
+	if (!req)
+		return -ENOMEM;
+
+	for (i = 0; i < nelems; i++) {
+		cmp += pos;
+		if (sscanf(cmp, "%5s %n", key_str, &pos) != 1) {
+			pr_err("Invalid number of arguments passed\n");
+			goto err_request;
+		}
+
+		if (strlen(key_str) > 4) {
+			pr_err("Key value cannot be more than 4 charecters");
+			goto err_request;
+		}
+		key = string_to_uint(key_str);
+		if (!key) {
+			pr_err("Key values entered incorrectly\n");
+			goto err_request;
+		}
+
+		cmp += pos;
+		if (sscanf(cmp, "%u %n", &data, &pos) != 1) {
+			pr_err("Invalid number of arguments passed\n");
+			goto err_request;
+		}
+
+		if (msm_rpm_add_kvp_data(req, key,
+				(void *)&data, sizeof(data)))
+			goto err_request;
+	}
+
+	if (msm_rpm_wait_for_ack(msm_rpm_send_request(req)))
+		pr_err("Sending the RPM message failed\n");
+
+err_request:
+	msm_rpm_free_request(req);
+err:
+	return count;
+}
+
+static const struct file_operations rsc_ops = {
+	.write = rsc_ops_write,
+};
+
+static int __init rpm_smd_debugfs_init(void)
+{
+	rpm_debugfs_dir = debugfs_create_dir("rpm_send_msg", NULL);
+	if (!rpm_debugfs_dir)
+		return -ENOMEM;
+
+	if (!debugfs_create_file("message", S_IWUSR, rpm_debugfs_dir, NULL,
+								&rsc_ops))
+		return -ENOMEM;
+
+	return 0;
+}
+late_initcall(rpm_smd_debugfs_init);
+
+static void __exit rpm_smd_debugfs_exit(void)
+{
+	debugfs_remove_recursive(rpm_debugfs_dir);
+}
+module_exit(rpm_smd_debugfs_exit);
+
+MODULE_DESCRIPTION("RPM SMD Debug Driver");
+MODULE_LICENSE("GPL");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/rpm_stats.c	2019-10-29 09:26:24.821214706 +0100
@@ -0,0 +1,603 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <linux/of.h>
+#include <linux/uaccess.h>
+#include <asm/arch_timer.h>
+#include "rpm_stats.h"
+
+#define GET_PDATA_OF_ATTR(attr) \
+	(container_of(attr, struct msm_rpmstats_kobj_attr, ka)->pd)
+
+static DEFINE_MUTEX(rpm_stats_mutex);
+
+enum {
+	ID_COUNTER,
+	ID_ACCUM_TIME_SCLK,
+	ID_MAX,
+};
+
+static char *msm_rpmstats_id_labels[ID_MAX] = {
+	[ID_COUNTER] = "Count",
+	[ID_ACCUM_TIME_SCLK] = "Total time(uSec)",
+};
+
+#define SCLK_HZ 32768
+#define MSM_ARCH_TIMER_FREQ 19200000
+
+struct msm_rpmstats_record {
+	char		name[32];
+	uint32_t	id;
+	uint32_t	val;
+};
+
+struct msm_rpmstats_private_data {
+	void __iomem *reg_base;
+	u32 num_records;
+	u32 read_idx;
+	u32 len;
+	char buf[320];
+	struct msm_rpmstats_platform_data *platform_data;
+};
+
+struct msm_rpm_stats_data_v2 {
+	u32 stat_type;
+	u32 count;
+	u64 last_entered_at;
+	u64 last_exited_at;
+	u64 accumulated;
+	u32 client_votes;
+	u32 reserved[3];
+};
+
+struct msm_rpmstats_kobj_attr {
+	struct kobj_attribute ka;
+	struct msm_rpmstats_platform_data *pd;
+};
+
+static struct dentry *heap_dent;
+
+static inline u64 get_time_in_sec(u64 counter)
+{
+	do_div(counter, MSM_ARCH_TIMER_FREQ);
+	return counter;
+}
+
+static inline u64 get_time_in_msec(u64 counter)
+{
+	do_div(counter, MSM_ARCH_TIMER_FREQ);
+	counter *= MSEC_PER_SEC;
+	return counter;
+}
+
+static inline int msm_rpmstats_append_data_to_buf(char *buf,
+		struct msm_rpm_stats_data_v2 *data, int buflength)
+{
+	char stat_type[5];
+	u64 time_in_last_mode;
+	u64 time_since_last_mode;
+	u64 actual_last_sleep;
+
+	stat_type[4] = 0;
+	memcpy(stat_type, &data->stat_type, sizeof(u32));
+
+	time_in_last_mode = data->last_exited_at - data->last_entered_at;
+	time_in_last_mode = get_time_in_msec(time_in_last_mode);
+	time_since_last_mode = arch_counter_get_cntvct() - data->last_exited_at;
+	time_since_last_mode = get_time_in_sec(time_since_last_mode);
+	actual_last_sleep = get_time_in_msec(data->accumulated);
+
+	return snprintf(buf, buflength,
+		"RPM Mode:%s\n\t count:%d\ntime in last mode(msec):%llu\n"
+		"time since last mode(sec):%llu\nactual last sleep(msec):%llu\n"
+		"client votes: %#010x\n\n",
+		stat_type, data->count, time_in_last_mode,
+		time_since_last_mode, actual_last_sleep,
+		data->client_votes);
+}
+
+static inline u32 msm_rpmstats_read_long_register_v2(void __iomem *regbase,
+		int index, int offset)
+{
+	return readl_relaxed(regbase + offset +
+			index * sizeof(struct msm_rpm_stats_data_v2));
+}
+
+static inline u64 msm_rpmstats_read_quad_register_v2(void __iomem *regbase,
+		int index, int offset)
+{
+	u64 dst;
+
+	memcpy_fromio(&dst,
+		regbase + offset + index * sizeof(struct msm_rpm_stats_data_v2),
+		8);
+	return dst;
+}
+
+static inline int msm_rpmstats_copy_stats_v2(
+			struct msm_rpmstats_private_data *prvdata)
+{
+	void __iomem *reg;
+	struct msm_rpm_stats_data_v2 data;
+	int i, length;
+
+	reg = prvdata->reg_base;
+
+	for (i = 0, length = 0; i < prvdata->num_records; i++) {
+
+		data.stat_type = msm_rpmstats_read_long_register_v2(reg, i,
+				offsetof(struct msm_rpm_stats_data_v2,
+					stat_type));
+		data.count = msm_rpmstats_read_long_register_v2(reg, i,
+				offsetof(struct msm_rpm_stats_data_v2, count));
+		data.last_entered_at = msm_rpmstats_read_quad_register_v2(reg,
+				i, offsetof(struct msm_rpm_stats_data_v2,
+					last_entered_at));
+		data.last_exited_at = msm_rpmstats_read_quad_register_v2(reg,
+				i, offsetof(struct msm_rpm_stats_data_v2,
+					last_exited_at));
+
+		data.accumulated = msm_rpmstats_read_quad_register_v2(reg,
+				i, offsetof(struct msm_rpm_stats_data_v2,
+					accumulated));
+		data.client_votes = msm_rpmstats_read_long_register_v2(reg,
+				i, offsetof(struct msm_rpm_stats_data_v2,
+					client_votes));
+		length += msm_rpmstats_append_data_to_buf(prvdata->buf + length,
+				&data, sizeof(prvdata->buf) - length);
+		prvdata->read_idx++;
+	}
+	return length;
+}
+
+static inline unsigned long  msm_rpmstats_read_register(void __iomem *regbase,
+		int index, int offset)
+{
+	return  readl_relaxed(regbase + index * 12 + (offset + 1) * 4);
+}
+static void msm_rpmstats_strcpy(char *dest, char  *src)
+{
+	union {
+		char ch[4];
+		unsigned long word;
+	} string;
+	int index = 0;
+
+	do  {
+		int i;
+
+		string.word = readl_relaxed(src + 4 * index);
+		for (i = 0; i < 4; i++) {
+			*dest++ = string.ch[i];
+			if (!string.ch[i])
+				break;
+		}
+		index++;
+	} while (*(dest-1));
+
+}
+static int msm_rpmstats_copy_stats(struct msm_rpmstats_private_data *pdata)
+{
+
+	struct msm_rpmstats_record record;
+	unsigned long ptr;
+	unsigned long offset;
+	char *str;
+	uint64_t usec;
+
+	ptr = msm_rpmstats_read_register(pdata->reg_base, pdata->read_idx, 0);
+	offset = (ptr - (unsigned long)pdata->platform_data->phys_addr_base);
+
+	if (offset > pdata->platform_data->phys_size)
+		str = (char *)ioremap(ptr, SZ_256);
+	else
+		str = (char *) pdata->reg_base + offset;
+
+	msm_rpmstats_strcpy(record.name, str);
+
+	if (offset > pdata->platform_data->phys_size)
+		iounmap(str);
+
+	record.id = msm_rpmstats_read_register(pdata->reg_base,
+						pdata->read_idx, 1);
+	if (record.id >= ID_MAX) {
+		pr_err("%s: array out of bound error found.\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	record.val = msm_rpmstats_read_register(pdata->reg_base,
+						pdata->read_idx, 2);
+
+	if (record.id == ID_ACCUM_TIME_SCLK) {
+		usec = record.val * USEC_PER_SEC;
+		do_div(usec, SCLK_HZ);
+	}  else
+		usec = (unsigned long)record.val;
+
+	pdata->read_idx++;
+
+	return snprintf(pdata->buf, sizeof(pdata->buf),
+			"RPM Mode:%s\n\t%s:%llu\n",
+			record.name,
+			msm_rpmstats_id_labels[record.id],
+			usec);
+}
+
+static ssize_t msm_rpmstats_file_read(struct file *file, char __user *bufu,
+				  size_t count, loff_t *ppos)
+{
+	struct msm_rpmstats_private_data *prvdata;
+	ssize_t ret;
+
+	mutex_lock(&rpm_stats_mutex);
+	prvdata = file->private_data;
+
+	if (!prvdata) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	if (!bufu || count == 0) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	if (prvdata->platform_data->version == 1) {
+		if (!prvdata->num_records)
+			prvdata->num_records = readl_relaxed(prvdata->reg_base);
+	}
+
+	if ((*ppos >= prvdata->len) &&
+		(prvdata->read_idx < prvdata->num_records)) {
+		if (prvdata->platform_data->version == 1)
+			prvdata->len = msm_rpmstats_copy_stats(prvdata);
+		else if (prvdata->platform_data->version == 2)
+			prvdata->len = msm_rpmstats_copy_stats_v2(prvdata);
+		*ppos = 0;
+	}
+	ret = simple_read_from_buffer(bufu, count, ppos,
+			prvdata->buf, prvdata->len);
+exit:
+	mutex_unlock(&rpm_stats_mutex);
+	return ret;
+}
+
+static int msm_rpmstats_file_open(struct inode *inode, struct file *file)
+{
+	struct msm_rpmstats_private_data *prvdata;
+	struct msm_rpmstats_platform_data *pdata;
+	int ret = 0;
+
+	mutex_lock(&rpm_stats_mutex);
+	pdata = inode->i_private;
+
+	file->private_data =
+		kmalloc(sizeof(struct msm_rpmstats_private_data), GFP_KERNEL);
+
+	if (!file->private_data) {
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	prvdata = file->private_data;
+
+	prvdata->reg_base = ioremap_nocache(pdata->phys_addr_base,
+					pdata->phys_size);
+	if (!prvdata->reg_base) {
+		kfree(file->private_data);
+		prvdata = NULL;
+		pr_err("%s: ERROR could not ioremap start=%pa, len=%u\n",
+			__func__, &pdata->phys_addr_base,
+			pdata->phys_size);
+		ret = -EBUSY;
+		goto exit;
+	}
+
+	prvdata->read_idx = prvdata->num_records =  prvdata->len = 0;
+	prvdata->platform_data = pdata;
+	if (pdata->version == 2)
+		prvdata->num_records = 2;
+exit:
+	mutex_unlock(&rpm_stats_mutex);
+	return ret;
+}
+
+static int msm_rpmstats_file_close(struct inode *inode, struct file *file)
+{
+	struct msm_rpmstats_private_data *private = file->private_data;
+
+	mutex_lock(&rpm_stats_mutex);
+	if (private->reg_base)
+		iounmap(private->reg_base);
+	kfree(file->private_data);
+	mutex_unlock(&rpm_stats_mutex);
+
+	return 0;
+}
+
+static const struct file_operations msm_rpmstats_fops = {
+	.owner	  = THIS_MODULE,
+	.open	  = msm_rpmstats_file_open,
+	.read	  = msm_rpmstats_file_read,
+	.release  = msm_rpmstats_file_close,
+	.llseek   = no_llseek,
+};
+
+static int msm_rpmheap_file_show(struct seq_file *m, void *v)
+{
+	struct msm_rpmstats_platform_data *pdata;
+	void __iomem *reg_base;
+	uint32_t rpmheap_free;
+
+	if (!m->private)
+		return -EINVAL;
+
+	pdata = m->private;
+
+	reg_base = ioremap_nocache(pdata->heap_phys_addrbase, SZ_4);
+	if (!reg_base) {
+		pr_err("%s: ERROR could not ioremap start=%p\n",
+			__func__, &pdata->heap_phys_addrbase);
+		return -EBUSY;
+	}
+
+	rpmheap_free = readl_relaxed(reg_base);
+	iounmap(reg_base);
+
+	seq_printf(m, "RPM FREE HEAP SPACE is 0x%x Bytes\n", rpmheap_free);
+	return 0;
+}
+
+static int msm_rpmheap_file_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, msm_rpmheap_file_show, inode->i_private);
+}
+
+static const struct file_operations msm_rpmheap_fops = {
+	.owner	  = THIS_MODULE,
+	.open	  = msm_rpmheap_file_open,
+	.read	  = seq_read,
+	.release  = single_release,
+	.llseek   = no_llseek,
+};
+
+static ssize_t rpmstats_show(struct kobject *kobj,
+			struct kobj_attribute *attr, char *buf)
+{
+	struct msm_rpmstats_private_data *prvdata = NULL;
+	struct msm_rpmstats_platform_data *pdata = NULL;
+	ssize_t ret;
+
+	mutex_lock(&rpm_stats_mutex);
+	pdata = GET_PDATA_OF_ATTR(attr);
+
+	prvdata =
+		kmalloc(sizeof(*prvdata), GFP_KERNEL);
+	if (!prvdata) {
+		ret = -ENOMEM;
+		goto kmalloc_fail;
+	}
+
+	prvdata->reg_base = ioremap_nocache(pdata->phys_addr_base,
+					pdata->phys_size);
+	if (!prvdata->reg_base) {
+		pr_err("%s: ERROR could not ioremap start=%pa, len=%u\n",
+			__func__, &pdata->phys_addr_base,
+			pdata->phys_size);
+		ret = -EBUSY;
+		goto ioremap_fail;
+	}
+
+	prvdata->read_idx = prvdata->num_records =  prvdata->len = 0;
+	prvdata->platform_data = pdata;
+	if (pdata->version == 2)
+		prvdata->num_records = 2;
+
+	if (prvdata->platform_data->version == 1) {
+		if (!prvdata->num_records)
+			prvdata->num_records =
+				readl_relaxed(prvdata->reg_base);
+	}
+
+	if (prvdata->read_idx < prvdata->num_records) {
+		if (prvdata->platform_data->version == 1)
+			prvdata->len = msm_rpmstats_copy_stats(prvdata);
+		else if (prvdata->platform_data->version == 2)
+			prvdata->len = msm_rpmstats_copy_stats_v2(
+					prvdata);
+	}
+
+	ret = snprintf(buf, prvdata->len, "%s", prvdata->buf);
+	iounmap(prvdata->reg_base);
+ioremap_fail:
+	kfree(prvdata);
+kmalloc_fail:
+	mutex_unlock(&rpm_stats_mutex);
+	return ret;
+}
+
+static int msm_rpmstats_create_sysfs(struct msm_rpmstats_platform_data *pd)
+{
+	struct kobject *rpmstats_kobj = NULL;
+	struct msm_rpmstats_kobj_attr *rpms_ka = NULL;
+	int ret = 0;
+
+	rpmstats_kobj = kobject_create_and_add("system_sleep", power_kobj);
+	if (!rpmstats_kobj) {
+		pr_err("%s: Cannot create rpmstats kobject\n", __func__);
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	rpms_ka = kzalloc(sizeof(*rpms_ka), GFP_KERNEL);
+	if (!rpms_ka) {
+		kobject_put(rpmstats_kobj);
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	sysfs_attr_init(&rpms_ka->ka.attr);
+	rpms_ka->pd = pd;
+	rpms_ka->ka.attr.mode = 0444;
+	rpms_ka->ka.attr.name = "stats";
+	rpms_ka->ka.show = rpmstats_show;
+	rpms_ka->ka.store = NULL;
+
+	ret = sysfs_create_file(rpmstats_kobj, &rpms_ka->ka.attr);
+
+fail:
+	return ret;
+}
+
+static int msm_rpmstats_probe(struct platform_device *pdev)
+{
+	struct dentry *dent = NULL;
+	struct msm_rpmstats_platform_data *pdata;
+	struct msm_rpmstats_platform_data *pd;
+	struct resource *res = NULL, *offset = NULL;
+	struct device_node *node = NULL;
+	uint32_t offset_addr = 0;
+	void __iomem *phys_ptr = NULL;
+	int ret = 0;
+
+	if (!pdev)
+		return -EINVAL;
+
+	pdata = kzalloc(sizeof(struct msm_rpmstats_platform_data), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"phys_addr_base");
+	if (!res) {
+		kfree(pdata);
+		return -EINVAL;
+	}
+
+	offset = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"offset_addr");
+	if (offset) {
+		/* Remap the rpm-stats pointer */
+		phys_ptr = ioremap_nocache(offset->start, SZ_4);
+		if (!phys_ptr) {
+			pr_err("%s: Failed to ioremap address: %pa\n",
+					__func__, &offset->start);
+			kfree(pdata);
+			return -ENODEV;
+		}
+		offset_addr = readl_relaxed(phys_ptr);
+		iounmap(phys_ptr);
+	}
+
+	pdata->phys_addr_base  = res->start + offset_addr;
+
+	pdata->phys_size = resource_size(res);
+	node = pdev->dev.of_node;
+	if (pdev->dev.platform_data) {
+		pd = pdev->dev.platform_data;
+		pdata->version = pd->version;
+
+	} else if (node)
+		ret = of_property_read_u32(node,
+			"qcom,sleep-stats-version", &pdata->version);
+
+	if (!ret) {
+
+		dent = debugfs_create_file("rpm_stats", S_IRUGO, NULL,
+				pdata, &msm_rpmstats_fops);
+
+		if (!dent) {
+			pr_err("%s: ERROR rpm_stats debugfs_create_file	fail\n",
+					__func__);
+			kfree(pdata);
+			return -ENOMEM;
+		}
+
+	} else {
+		kfree(pdata);
+		return -EINVAL;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"heap_phys_addrbase");
+	if (res) {
+		heap_dent = debugfs_create_file("rpm_heap", S_IRUGO, NULL,
+				pdata, &msm_rpmheap_fops);
+
+		if (!heap_dent) {
+			pr_err("%s: ERROR rpm_heap debugfs_create_file fail\n",
+					__func__);
+			kfree(pdata);
+			return -ENOMEM;
+		}
+		pdata->heap_phys_addrbase = res->start;
+	}
+
+	msm_rpmstats_create_sysfs(pdata);
+
+	platform_set_drvdata(pdev, dent);
+	return 0;
+}
+
+static int msm_rpmstats_remove(struct platform_device *pdev)
+{
+	struct dentry *dent;
+
+	dent = platform_get_drvdata(pdev);
+	debugfs_remove(dent);
+	debugfs_remove(heap_dent);
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static const struct of_device_id rpm_stats_table[] = {
+	       {.compatible = "qcom,rpm-stats"},
+	       {},
+};
+
+static struct platform_driver msm_rpmstats_driver = {
+	.probe	= msm_rpmstats_probe,
+	.remove = msm_rpmstats_remove,
+	.driver = {
+		.name = "msm_rpm_stat",
+		.owner = THIS_MODULE,
+		.of_match_table = rpm_stats_table,
+	},
+};
+static int __init msm_rpmstats_init(void)
+{
+	return platform_driver_register(&msm_rpmstats_driver);
+}
+static void __exit msm_rpmstats_exit(void)
+{
+	platform_driver_unregister(&msm_rpmstats_driver);
+}
+module_init(msm_rpmstats_init);
+module_exit(msm_rpmstats_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM RPM Statistics driver");
+MODULE_ALIAS("platform:msm_stat_log");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/rpm_stats.h	2019-01-22 16:16:26.671275095 +0100
@@ -0,0 +1,42 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_RPM_STATS_H
+#define __ARCH_ARM_MACH_MSM_RPM_STATS_H
+
+#include <linux/types.h>
+
+struct msm_rpmstats_platform_data {
+	phys_addr_t phys_addr_base;
+	phys_addr_t heap_phys_addrbase;
+	u32 phys_size;
+	u32 version;
+};
+
+struct msm_rpm_master_stats_platform_data {
+	phys_addr_t phys_addr_base;
+	u32 phys_size;
+	char **masters;
+	/*
+	 * RPM maintains PC stats for each master in MSG RAM,
+	 * it allocates 256 bytes for this use.
+	 * No of masters differs for different targets.
+	 * Based on the number of masters, linux rpm stat
+	 * driver reads (32 * num_masters) bytes to display
+	 * master stats.
+	 */
+	 s32 num_masters;
+	 u32 master_offset;
+	 u32 version;
+};
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/scm-boot.c	2019-01-22 16:16:26.671275095 +0100
@@ -0,0 +1,120 @@
+/* Copyright (c) 2010, 2014, 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/scm-boot.h>
+
+/*
+ * Set the cold/warm boot address for one of the CPU cores.
+ */
+int scm_set_boot_addr(phys_addr_t addr, unsigned int flags)
+{
+	struct {
+		u32 flags;
+		u32 addr;
+	} cmd;
+	struct scm_desc desc = {0};
+
+	if (!is_scm_armv8()) {
+		cmd.addr = addr;
+		cmd.flags = flags;
+		return scm_call(SCM_SVC_BOOT, SCM_BOOT_ADDR,
+				&cmd, sizeof(cmd), NULL, 0);
+	}
+
+	desc.args[0] = addr;
+	desc.args[1] = flags;
+	desc.arginfo = SCM_ARGS(2);
+
+	return scm_call2(SCM_SIP_FNID(SCM_SVC_BOOT, SCM_BOOT_ADDR), &desc);
+}
+EXPORT_SYMBOL(scm_set_boot_addr);
+
+/**
+ *	scm_set_boot_addr_mc - Set entry physical address for cpus
+ *	@addr:	32bit physical address
+ *	@aff0:	Collective bitmask of the affinity-level-0 of the mpidr
+ *		1<<aff0_CPU0| 1<<aff0_CPU1....... | 1<<aff0_CPU32
+ *		Supports maximum 32 cpus under any affinity level.
+ *	@aff1:	Collective bitmask of the affinity-level-1 of the mpidr
+ *	@aff2:	Collective bitmask of the affinity-level-2 of the mpidr
+ *	@flags:	Flag to differentiate between coldboot vs warmboot
+ */
+int scm_set_boot_addr_mc(phys_addr_t addr, u32 aff0,
+		u32 aff1, u32 aff2, u32 flags)
+{
+	struct {
+		u32 addr;
+		u32 aff0;
+		u32 aff1;
+		u32 aff2;
+		u32 reserved;
+		u32 flags;
+	} cmd;
+	struct scm_desc desc = {0};
+
+	if (!is_scm_armv8()) {
+		cmd.addr = addr;
+		cmd.aff0 = aff0;
+		cmd.aff1 = aff1;
+		cmd.aff2 = aff2;
+		/*
+		 * Reserved for future chips with affinity level 3 effectively
+		 * 1 << 0
+		 */
+		cmd.reserved = ~0U;
+		cmd.flags = flags | SCM_FLAG_HLOS;
+		return scm_call(SCM_SVC_BOOT, SCM_BOOT_ADDR_MC,
+				&cmd, sizeof(cmd), NULL, 0);
+	}
+
+	flags = flags | SCM_FLAG_HLOS;
+	desc.args[0] = addr;
+	desc.args[1] = aff0;
+	desc.args[2] = aff1;
+	desc.args[3] = aff2;
+	desc.args[4] = ~0ULL;
+	desc.args[5] = flags;
+	desc.arginfo = SCM_ARGS(6);
+
+	return scm_call2(SCM_SIP_FNID(SCM_SVC_BOOT, SCM_BOOT_ADDR_MC), &desc);
+}
+EXPORT_SYMBOL(scm_set_boot_addr_mc);
+
+/**
+ *	scm_set_warm_boot_addr_mc_for_all -
+ *	Set entry physical address for __all__ possible cpus
+ *	This API passes all_set mask to secure-os and relies
+ *	on secure-os to appropriately
+ *	set the boot-address on the current system.
+ *	@addr:	32bit physical address
+ */
+
+int scm_set_warm_boot_addr_mc_for_all(phys_addr_t addr)
+{
+	return scm_set_boot_addr_mc(addr, ~0U, ~0U, ~0U,
+			SCM_FLAG_WARMBOOT_MC);
+}
+EXPORT_SYMBOL(scm_set_warm_boot_addr_mc_for_all);
+
+/**
+ *	scm_is_mc_boot_available -
+ *	Checks if TZ supports the boot API for multi-cluster configuration
+ *	Returns true if available and false otherwise
+ */
+int scm_is_mc_boot_available(void)
+{
+	return scm_is_call_available(SCM_SVC_BOOT, SCM_BOOT_ADDR_MC);
+}
+EXPORT_SYMBOL(scm_is_mc_boot_available);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/scm.c	2019-01-22 16:16:26.671275095 +0100
@@ -0,0 +1,1245 @@
+/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+
+#include <asm/cacheflush.h>
+#include <asm/compiler.h>
+
+#include <soc/qcom/scm.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/scm.h>
+
+#define SCM_ENOMEM		-5
+#define SCM_EOPNOTSUPP		-4
+#define SCM_EINVAL_ADDR		-3
+#define SCM_EINVAL_ARG		-2
+#define SCM_ERROR		-1
+#define SCM_INTERRUPTED		1
+#define SCM_EBUSY		-55
+#define SCM_V2_EBUSY		-12
+
+static DEFINE_MUTEX(scm_lock);
+
+/*
+ * MSM8996 V2 requires a lock to protect against
+ * concurrent accesses between the limits management
+ * driver and the clock controller
+ */
+DEFINE_MUTEX(scm_lmh_lock);
+
+#define SCM_EBUSY_WAIT_MS 30
+#define SCM_EBUSY_MAX_RETRY 67
+
+#define N_EXT_SCM_ARGS 7
+#define FIRST_EXT_ARG_IDX 3
+#define SMC_ATOMIC_SYSCALL 31
+#define N_REGISTER_ARGS (MAX_SCM_ARGS - N_EXT_SCM_ARGS + 1)
+#define SMC64_MASK 0x40000000
+#define SMC_ATOMIC_MASK 0x80000000
+#define IS_CALL_AVAIL_CMD 1
+
+#define SCM_BUF_LEN(__cmd_size, __resp_size) ({ \
+	size_t x =  __cmd_size + __resp_size; \
+	size_t y = sizeof(struct scm_command) + sizeof(struct scm_response); \
+	size_t result; \
+	if (x < __cmd_size || (x + y) < x) \
+		result = 0; \
+	else \
+		result = x + y; \
+	result; \
+	})
+/**
+ * struct scm_command - one SCM command buffer
+ * @len: total available memory for command and response
+ * @buf_offset: start of command buffer
+ * @resp_hdr_offset: start of response buffer
+ * @id: command to be executed
+ * @buf: buffer returned from scm_get_command_buffer()
+ *
+ * An SCM command is laid out in memory as follows:
+ *
+ *	------------------- <--- struct scm_command
+ *	| command header  |
+ *	------------------- <--- scm_get_command_buffer()
+ *	| command buffer  |
+ *	------------------- <--- struct scm_response and
+ *	| response header |      scm_command_to_response()
+ *	------------------- <--- scm_get_response_buffer()
+ *	| response buffer |
+ *	-------------------
+ *
+ * There can be arbitrary padding between the headers and buffers so
+ * you should always use the appropriate scm_get_*_buffer() routines
+ * to access the buffers in a safe manner.
+ */
+struct scm_command {
+	u32	len;
+	u32	buf_offset;
+	u32	resp_hdr_offset;
+	u32	id;
+	u32	buf[0];
+};
+
+/**
+ * struct scm_response - one SCM response buffer
+ * @len: total available memory for response
+ * @buf_offset: start of response data relative to start of scm_response
+ * @is_complete: indicates if the command has finished processing
+ */
+struct scm_response {
+	u32	len;
+	u32	buf_offset;
+	u32	is_complete;
+};
+
+#ifdef CONFIG_ARM64
+
+#define R0_STR "x0"
+#define R1_STR "x1"
+#define R2_STR "x2"
+#define R3_STR "x3"
+#define R4_STR "x4"
+#define R5_STR "x5"
+#define R6_STR "x6"
+
+/* Outer caches unsupported on ARM64 platforms */
+#define outer_inv_range(x, y)
+#define outer_flush_range(x, y)
+
+#define __cpuc_flush_dcache_area __flush_dcache_area
+
+#else
+
+#define R0_STR "r0"
+#define R1_STR "r1"
+#define R2_STR "r2"
+#define R3_STR "r3"
+#define R4_STR "r4"
+#define R5_STR "r5"
+#define R6_STR "r6"
+
+#endif
+
+/**
+ * scm_command_to_response() - Get a pointer to a scm_response
+ * @cmd: command
+ *
+ * Returns a pointer to a response for a command.
+ */
+static inline struct scm_response *scm_command_to_response(
+		const struct scm_command *cmd)
+{
+	return (void *)cmd + cmd->resp_hdr_offset;
+}
+
+/**
+ * scm_get_command_buffer() - Get a pointer to a command buffer
+ * @cmd: command
+ *
+ * Returns a pointer to the command buffer of a command.
+ */
+static inline void *scm_get_command_buffer(const struct scm_command *cmd)
+{
+	return (void *)cmd->buf;
+}
+
+/**
+ * scm_get_response_buffer() - Get a pointer to a response buffer
+ * @rsp: response
+ *
+ * Returns a pointer to a response buffer of a response.
+ */
+static inline void *scm_get_response_buffer(const struct scm_response *rsp)
+{
+	return (void *)rsp + rsp->buf_offset;
+}
+
+static int scm_remap_error(int err)
+{
+	switch (err) {
+	case SCM_ERROR:
+		return -EIO;
+	case SCM_EINVAL_ADDR:
+	case SCM_EINVAL_ARG:
+		return -EINVAL;
+	case SCM_EOPNOTSUPP:
+		return -EOPNOTSUPP;
+	case SCM_ENOMEM:
+		return -ENOMEM;
+	case SCM_EBUSY:
+	case SCM_V2_EBUSY:
+		return -EBUSY;
+	}
+	return -EINVAL;
+}
+
+static u32 smc(u32 cmd_addr)
+{
+	int context_id;
+	register u32 r0 asm("r0") = 1;
+	register u32 r1 asm("r1") = (uintptr_t)&context_id;
+	register u32 r2 asm("r2") = cmd_addr;
+	do {
+		asm volatile(
+			__asmeq("%0", R0_STR)
+			__asmeq("%1", R0_STR)
+			__asmeq("%2", R1_STR)
+			__asmeq("%3", R2_STR)
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
+			"smc	#0\n"
+			: "=r" (r0)
+			: "r" (r0), "r" (r1), "r" (r2)
+			: "r3");
+	} while (r0 == SCM_INTERRUPTED);
+
+	return r0;
+}
+
+static int __scm_call(const struct scm_command *cmd)
+{
+	int ret;
+	u32 cmd_addr = virt_to_phys(cmd);
+
+	/*
+	 * Flush the command buffer so that the secure world sees
+	 * the correct data.
+	 */
+	__cpuc_flush_dcache_area((void *)cmd, cmd->len);
+	outer_flush_range(cmd_addr, cmd_addr + cmd->len);
+
+	ret = smc(cmd_addr);
+	if (ret < 0) {
+		if (ret != SCM_EBUSY)
+			pr_err("scm_call failed with error code %d\n", ret);
+		ret = scm_remap_error(ret);
+	}
+	return ret;
+}
+
+#ifndef CONFIG_ARM64
+static void scm_inv_range(unsigned long start, unsigned long end)
+{
+	u32 cacheline_size, ctr;
+
+	asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
+	cacheline_size = 4 << ((ctr >> 16) & 0xf);
+
+	start = round_down(start, cacheline_size);
+	end = round_up(end, cacheline_size);
+	outer_inv_range(start, end);
+	while (start < end) {
+		asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start)
+		     : "memory");
+		start += cacheline_size;
+	}
+	dsb();
+	isb();
+}
+#else
+
+static void scm_inv_range(unsigned long start, unsigned long end)
+{
+	dmac_inv_range((void *)start, (void *)end);
+}
+#endif
+
+/**
+ * scm_call_common() - Send an SCM command
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @cmd_buf: command buffer
+ * @cmd_len: length of the command buffer
+ * @resp_buf: response buffer
+ * @resp_len: length of the response buffer
+ * @scm_buf: internal scm structure used for passing data
+ * @scm_buf_len: length of the internal scm structure
+ *
+ * Core function to scm call. Initializes the given cmd structure with
+ * appropriate values and makes the actual scm call. Validation of cmd
+ * pointer and length must occur in the calling function.
+ *
+ * Returns the appropriate error code from the scm call
+ */
+
+static int scm_call_common(u32 svc_id, u32 cmd_id, const void *cmd_buf,
+				size_t cmd_len, void *resp_buf, size_t resp_len,
+				struct scm_command *scm_buf,
+				size_t scm_buf_length)
+{
+	int ret;
+	struct scm_response *rsp;
+	unsigned long start, end;
+
+	scm_buf->len = scm_buf_length;
+	scm_buf->buf_offset = offsetof(struct scm_command, buf);
+	scm_buf->resp_hdr_offset = scm_buf->buf_offset + cmd_len;
+	scm_buf->id = (svc_id << 10) | cmd_id;
+
+	if (cmd_buf)
+		memcpy(scm_get_command_buffer(scm_buf), cmd_buf, cmd_len);
+
+	mutex_lock(&scm_lock);
+	ret = __scm_call(scm_buf);
+	mutex_unlock(&scm_lock);
+	if (ret)
+		return ret;
+
+	rsp = scm_command_to_response(scm_buf);
+	start = (unsigned long)rsp;
+
+	do {
+		scm_inv_range(start, start + sizeof(*rsp));
+	} while (!rsp->is_complete);
+
+	end = (unsigned long)scm_get_response_buffer(rsp) + resp_len;
+	scm_inv_range(start, end);
+
+	if (resp_buf)
+		memcpy(resp_buf, scm_get_response_buffer(rsp), resp_len);
+
+	return ret;
+}
+
+/*
+ * Sometimes the secure world may be busy waiting for a particular resource.
+ * In those situations, it is expected that the secure world returns a special
+ * error code (SCM_EBUSY). Retry any scm_call that fails with this error code,
+ * but with a timeout in place. Also, don't move this into scm_call_common,
+ * since we want the first attempt to be the "fastpath".
+ */
+static int _scm_call_retry(u32 svc_id, u32 cmd_id, const void *cmd_buf,
+				size_t cmd_len, void *resp_buf, size_t resp_len,
+				struct scm_command *cmd,
+				size_t len)
+{
+	int ret, retry_count = 0;
+
+	do {
+		ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len,
+					resp_buf, resp_len, cmd, len);
+		if (ret == -EBUSY)
+			msleep(SCM_EBUSY_WAIT_MS);
+		if (retry_count == 33)
+			pr_warn("scm: secure world has been busy for 1 second!\n");
+	} while (ret == -EBUSY && (retry_count++ < SCM_EBUSY_MAX_RETRY));
+
+	if (ret == -EBUSY)
+		pr_err("scm: secure world busy (rc = SCM_EBUSY)\n");
+
+	return ret;
+}
+
+/**
+ * scm_call_noalloc - Send an SCM command
+ *
+ * Same as scm_call except clients pass in a buffer (@scm_buf) to be used for
+ * scm internal structures. The buffer should be allocated with
+ * DEFINE_SCM_BUFFER to account for the proper alignment and size.
+ */
+int scm_call_noalloc(u32 svc_id, u32 cmd_id, const void *cmd_buf,
+		size_t cmd_len, void *resp_buf, size_t resp_len,
+		void *scm_buf, size_t scm_buf_len)
+{
+	int ret;
+	size_t len = SCM_BUF_LEN(cmd_len, resp_len);
+
+	if (len == 0)
+		return -EINVAL;
+
+	if (!IS_ALIGNED((unsigned long)scm_buf, PAGE_SIZE))
+		return -EINVAL;
+
+	memset(scm_buf, 0, scm_buf_len);
+
+	ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf,
+				resp_len, scm_buf, len);
+	return ret;
+
+}
+
+#ifdef CONFIG_ARM64
+
+static int __scm_call_armv8_64(u64 x0, u64 x1, u64 x2, u64 x3, u64 x4, u64 x5,
+				u64 *ret1, u64 *ret2, u64 *ret3)
+{
+	register u64 r0 asm("r0") = x0;
+	register u64 r1 asm("r1") = x1;
+	register u64 r2 asm("r2") = x2;
+	register u64 r3 asm("r3") = x3;
+	register u64 r4 asm("r4") = x4;
+	register u64 r5 asm("r5") = x5;
+	register u64 r6 asm("r6") = 0;
+
+	do {
+		asm volatile(
+			__asmeq("%0", R0_STR)
+			__asmeq("%1", R1_STR)
+			__asmeq("%2", R2_STR)
+			__asmeq("%3", R3_STR)
+			__asmeq("%4", R4_STR)
+			__asmeq("%5", R5_STR)
+			__asmeq("%6", R6_STR)
+			__asmeq("%7", R0_STR)
+			__asmeq("%8", R1_STR)
+			__asmeq("%9", R2_STR)
+			__asmeq("%10", R3_STR)
+			__asmeq("%11", R4_STR)
+			__asmeq("%12", R5_STR)
+			__asmeq("%13", R6_STR)
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
+			"smc	#0\n"
+			: "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3),
+			  "=r" (r4), "=r" (r5), "=r" (r6)
+			: "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4),
+			  "r" (r5), "r" (r6)
+			: "x7", "x8", "x9", "x10", "x11", "x12", "x13",
+			  "x14", "x15", "x16", "x17");
+	} while (r0 == SCM_INTERRUPTED);
+
+	if (ret1)
+		*ret1 = r1;
+	if (ret2)
+		*ret2 = r2;
+	if (ret3)
+		*ret3 = r3;
+
+	return r0;
+}
+
+static int __scm_call_armv8_32(u32 w0, u32 w1, u32 w2, u32 w3, u32 w4, u32 w5,
+				u64 *ret1, u64 *ret2, u64 *ret3)
+{
+	register u32 r0 asm("r0") = w0;
+	register u32 r1 asm("r1") = w1;
+	register u32 r2 asm("r2") = w2;
+	register u32 r3 asm("r3") = w3;
+	register u32 r4 asm("r4") = w4;
+	register u32 r5 asm("r5") = w5;
+	register u32 r6 asm("r6") = 0;
+
+	do {
+		asm volatile(
+			__asmeq("%0", R0_STR)
+			__asmeq("%1", R1_STR)
+			__asmeq("%2", R2_STR)
+			__asmeq("%3", R3_STR)
+			__asmeq("%4", R4_STR)
+			__asmeq("%5", R5_STR)
+			__asmeq("%6", R6_STR)
+			__asmeq("%7", R0_STR)
+			__asmeq("%8", R1_STR)
+			__asmeq("%9", R2_STR)
+			__asmeq("%10", R3_STR)
+			__asmeq("%11", R4_STR)
+			__asmeq("%12", R5_STR)
+			__asmeq("%13", R6_STR)
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
+			"smc	#0\n"
+			: "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3),
+			  "=r" (r4), "=r" (r5), "=r" (r6)
+			: "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4),
+			  "r" (r5), "r" (r6)
+			: "x7", "x8", "x9", "x10", "x11", "x12", "x13",
+			"x14", "x15", "x16", "x17");
+
+	} while (r0 == SCM_INTERRUPTED);
+
+	if (ret1)
+		*ret1 = r1;
+	if (ret2)
+		*ret2 = r2;
+	if (ret3)
+		*ret3 = r3;
+
+	return r0;
+}
+
+#else
+
+static int __scm_call_armv8_32(u32 w0, u32 w1, u32 w2, u32 w3, u32 w4, u32 w5,
+				u64 *ret1, u64 *ret2, u64 *ret3)
+{
+	register u32 r0 asm("r0") = w0;
+	register u32 r1 asm("r1") = w1;
+	register u32 r2 asm("r2") = w2;
+	register u32 r3 asm("r3") = w3;
+	register u32 r4 asm("r4") = w4;
+	register u32 r5 asm("r5") = w5;
+	register u32 r6 asm("r6") = 0;
+
+	do {
+		asm volatile(
+			__asmeq("%0", R0_STR)
+			__asmeq("%1", R1_STR)
+			__asmeq("%2", R2_STR)
+			__asmeq("%3", R3_STR)
+			__asmeq("%4", R4_STR)
+			__asmeq("%5", R5_STR)
+			__asmeq("%6", R6_STR)
+			__asmeq("%7", R0_STR)
+			__asmeq("%8", R1_STR)
+			__asmeq("%9", R2_STR)
+			__asmeq("%10", R3_STR)
+			__asmeq("%11", R4_STR)
+			__asmeq("%12", R5_STR)
+			__asmeq("%13", R6_STR)
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
+			"smc	#0\n"
+			: "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3),
+			  "=r" (r4), "=r" (r5), "=r" (r6)
+			: "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4),
+			 "r" (r5), "r" (r6));
+
+	} while (r0 == SCM_INTERRUPTED);
+
+	if (ret1)
+		*ret1 = r1;
+	if (ret2)
+		*ret2 = r2;
+	if (ret3)
+		*ret3 = r3;
+
+	return r0;
+}
+
+static int __scm_call_armv8_64(u64 x0, u64 x1, u64 x2, u64 x3, u64 x4, u64 x5,
+				u64 *ret1, u64 *ret2, u64 *ret3)
+{
+	return 0;
+}
+#endif
+
+struct scm_extra_arg {
+	union {
+		u32 args32[N_EXT_SCM_ARGS];
+		u64 args64[N_EXT_SCM_ARGS];
+	};
+};
+
+static enum scm_interface_version {
+	SCM_UNKNOWN,
+	SCM_LEGACY,
+	SCM_ARMV8_32,
+	SCM_ARMV8_64,
+} scm_version = SCM_UNKNOWN;
+
+/* This will be set to specify SMC32 or SMC64 */
+static u32 scm_version_mask;
+
+bool is_scm_armv8(void)
+{
+	int ret;
+	u64 ret1, x0;
+
+	if (likely(scm_version != SCM_UNKNOWN))
+		return (scm_version == SCM_ARMV8_32) ||
+			(scm_version == SCM_ARMV8_64);
+	/*
+	 * This is a one time check that runs on the first ever
+	 * invocation of is_scm_armv8. We might be called in atomic
+	 * context so no mutexes etc. Also, we can't use the scm_call2
+	 * or scm_call2_APIs directly since they depend on this init.
+	 */
+
+	/* First try a SMC64 call */
+	scm_version = SCM_ARMV8_64;
+	ret1 = 0;
+	x0 = SCM_SIP_FNID(SCM_SVC_INFO, IS_CALL_AVAIL_CMD) | SMC_ATOMIC_MASK;
+	ret = __scm_call_armv8_64(x0 | SMC64_MASK, SCM_ARGS(1), x0, 0, 0, 0,
+				  &ret1, NULL, NULL);
+	if (ret || !ret1) {
+		/* Try SMC32 call */
+		ret1 = 0;
+		ret = __scm_call_armv8_32(x0, SCM_ARGS(1), x0, 0, 0, 0,
+					  &ret1, NULL, NULL);
+		if (ret || !ret1)
+			scm_version = SCM_LEGACY;
+		else
+			scm_version = SCM_ARMV8_32;
+	} else
+		scm_version_mask = SMC64_MASK;
+
+	pr_debug("scm_call: scm version is %x, mask is %x\n", scm_version,
+		  scm_version_mask);
+
+	return (scm_version == SCM_ARMV8_32) ||
+			(scm_version == SCM_ARMV8_64);
+}
+EXPORT_SYMBOL(is_scm_armv8);
+
+/*
+ * If there are more than N_REGISTER_ARGS, allocate a buffer and place
+ * the additional arguments in it. The extra argument buffer will be
+ * pointed to by X5.
+ */
+static int allocate_extra_arg_buffer(struct scm_desc *desc, gfp_t flags)
+{
+	int i, j;
+	struct scm_extra_arg *argbuf;
+	int arglen = desc->arginfo & 0xf;
+	size_t argbuflen = PAGE_ALIGN(sizeof(struct scm_extra_arg));
+
+	desc->x5 = desc->args[FIRST_EXT_ARG_IDX];
+
+	if (likely(arglen <= N_REGISTER_ARGS)) {
+		desc->extra_arg_buf = NULL;
+		return 0;
+	}
+
+	argbuf = kzalloc(argbuflen, flags);
+	if (!argbuf) {
+		pr_err("scm_call: failed to alloc mem for extended argument buffer\n");
+		return -ENOMEM;
+	}
+
+	desc->extra_arg_buf = argbuf;
+
+	j = FIRST_EXT_ARG_IDX;
+	if (scm_version == SCM_ARMV8_64)
+		for (i = 0; i < N_EXT_SCM_ARGS; i++)
+			argbuf->args64[i] = desc->args[j++];
+	else
+		for (i = 0; i < N_EXT_SCM_ARGS; i++)
+			argbuf->args32[i] = desc->args[j++];
+	desc->x5 = virt_to_phys(argbuf);
+	__cpuc_flush_dcache_area(argbuf, argbuflen);
+	outer_flush_range(virt_to_phys(argbuf),
+			  virt_to_phys(argbuf) + argbuflen);
+
+	return 0;
+}
+
+/**
+ * scm_call2() - Invoke a syscall in the secure world
+ * @fn_id: The function ID for this syscall
+ * @desc: Descriptor structure containing arguments and return values
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ * This should *only* be called in pre-emptible context.
+ *
+ * A note on cache maintenance:
+ * Note that any buffers that are expected to be accessed by the secure world
+ * must be flushed before invoking scm_call and invalidated in the cache
+ * immediately after scm_call returns. An important point that must be noted
+ * is that on ARMV8 architectures, invalidation actually also causes a dirty
+ * cache line to be cleaned (flushed + unset-dirty-bit). Therefore it is of
+ * paramount importance that the buffer be flushed before invoking scm_call2,
+ * even if you don't care about the contents of that buffer.
+ *
+ * Note that cache maintenance on the argument buffer (desc->args) is taken care
+ * of by scm_call2; however, callers are responsible for any other cached
+ * buffers passed over to the secure world.
+*/
+int scm_call2(u32 fn_id, struct scm_desc *desc)
+{
+	int arglen = desc->arginfo & 0xf;
+	int ret, retry_count = 0;
+	u64 x0;
+
+	if (unlikely(!is_scm_armv8()))
+		return -ENODEV;
+
+	ret = allocate_extra_arg_buffer(desc, GFP_NOIO);
+	if (ret)
+		return ret;
+
+	x0 = fn_id | scm_version_mask;
+
+	do {
+		mutex_lock(&scm_lock);
+
+		if (SCM_SVC_ID(fn_id) == SCM_SVC_LMH)
+			mutex_lock(&scm_lmh_lock);
+
+		desc->ret[0] = desc->ret[1] = desc->ret[2] = 0;
+
+		trace_scm_call_start(x0, desc);
+
+		if (scm_version == SCM_ARMV8_64)
+			ret = __scm_call_armv8_64(x0, desc->arginfo,
+						  desc->args[0], desc->args[1],
+						  desc->args[2], desc->x5,
+						  &desc->ret[0], &desc->ret[1],
+						  &desc->ret[2]);
+		else
+			ret = __scm_call_armv8_32(x0, desc->arginfo,
+						  desc->args[0], desc->args[1],
+						  desc->args[2], desc->x5,
+						  &desc->ret[0], &desc->ret[1],
+						  &desc->ret[2]);
+
+		trace_scm_call_end(desc);
+
+		if (SCM_SVC_ID(fn_id) == SCM_SVC_LMH)
+			mutex_unlock(&scm_lmh_lock);
+
+		mutex_unlock(&scm_lock);
+
+		if (ret == SCM_V2_EBUSY)
+			msleep(SCM_EBUSY_WAIT_MS);
+		if (retry_count == 33)
+			pr_warn("scm: secure world has been busy for 1 second!\n");
+	}  while (ret == SCM_V2_EBUSY && (retry_count++ < SCM_EBUSY_MAX_RETRY));
+
+	if (ret < 0)
+		pr_err("scm_call failed: func id %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n",
+			x0, ret, desc->ret[0], desc->ret[1], desc->ret[2]);
+
+	if (arglen > N_REGISTER_ARGS)
+		kfree(desc->extra_arg_buf);
+	if (ret < 0)
+		return scm_remap_error(ret);
+	return 0;
+}
+EXPORT_SYMBOL(scm_call2);
+
+/**
+ * scm_call2_atomic() - Invoke a syscall in the secure world
+ *
+ * Similar to scm_call2 except that this can be invoked in atomic context.
+ * There is also no retry mechanism implemented. Please ensure that the
+ * secure world syscall can be executed in such a context and can complete
+ * in a timely manner.
+ */
+int scm_call2_atomic(u32 fn_id, struct scm_desc *desc)
+{
+	int arglen = desc->arginfo & 0xf;
+	int ret;
+	u64 x0;
+
+	if (unlikely(!is_scm_armv8()))
+		return -ENODEV;
+
+	ret = allocate_extra_arg_buffer(desc, GFP_ATOMIC);
+	if (ret)
+		return ret;
+
+	x0 = fn_id | BIT(SMC_ATOMIC_SYSCALL) | scm_version_mask;
+
+	if (scm_version == SCM_ARMV8_64)
+		ret = __scm_call_armv8_64(x0, desc->arginfo, desc->args[0],
+					  desc->args[1], desc->args[2],
+					  desc->x5, &desc->ret[0],
+					  &desc->ret[1], &desc->ret[2]);
+	else
+		ret = __scm_call_armv8_32(x0, desc->arginfo, desc->args[0],
+					  desc->args[1], desc->args[2],
+					  desc->x5, &desc->ret[0],
+					  &desc->ret[1], &desc->ret[2]);
+	if (ret < 0)
+		pr_err("scm_call failed: func id %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n",
+			x0, ret, desc->ret[0],
+			desc->ret[1], desc->ret[2]);
+
+	if (arglen > N_REGISTER_ARGS)
+		kfree(desc->extra_arg_buf);
+	if (ret < 0)
+		return scm_remap_error(ret);
+	return ret;
+}
+
+/**
+ * scm_call() - Send an SCM command
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @cmd_buf: command buffer
+ * @cmd_len: length of the command buffer
+ * @resp_buf: response buffer
+ * @resp_len: length of the response buffer
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ *
+ * A note on cache maintenance:
+ * Note that any buffers that are expected to be accessed by the secure world
+ * must be flushed before invoking scm_call and invalidated in the cache
+ * immediately after scm_call returns. Cache maintenance on the command and
+ * response buffers is taken care of by scm_call; however, callers are
+ * responsible for any other cached buffers passed over to the secure world.
+ */
+int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len,
+		void *resp_buf, size_t resp_len)
+{
+	struct scm_command *cmd;
+	int ret;
+	size_t len = SCM_BUF_LEN(cmd_len, resp_len);
+
+	if (len == 0 || PAGE_ALIGN(len) < len)
+		return -EINVAL;
+
+	cmd = kzalloc(PAGE_ALIGN(len), GFP_KERNEL);
+	if (!cmd)
+		return -ENOMEM;
+
+	ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf,
+				resp_len, cmd, len);
+	if (unlikely(ret == -EBUSY))
+		ret = _scm_call_retry(svc_id, cmd_id, cmd_buf, cmd_len,
+				      resp_buf, resp_len, cmd, PAGE_ALIGN(len));
+	kfree(cmd);
+	return ret;
+}
+EXPORT_SYMBOL(scm_call);
+
+#define SCM_CLASS_REGISTER	(0x2 << 8)
+#define SCM_MASK_IRQS		BIT(5)
+#define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \
+				SCM_CLASS_REGISTER | \
+				SCM_MASK_IRQS | \
+				(n & 0xf))
+
+/**
+ * scm_call_atomic1() - Send an atomic SCM command with one argument
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @arg1: first argument
+ *
+ * This shall only be used with commands that are guaranteed to be
+ * uninterruptable, atomic and SMP safe.
+ */
+s32 scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
+{
+	int context_id;
+	register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1);
+	register u32 r1 asm("r1") = (uintptr_t)&context_id;
+	register u32 r2 asm("r2") = arg1;
+
+	asm volatile(
+		__asmeq("%0", R0_STR)
+		__asmeq("%1", R0_STR)
+		__asmeq("%2", R1_STR)
+		__asmeq("%3", R2_STR)
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
+		"smc	#0\n"
+		: "=r" (r0)
+		: "r" (r0), "r" (r1), "r" (r2)
+		: "r3");
+	return r0;
+}
+EXPORT_SYMBOL(scm_call_atomic1);
+
+/**
+ * scm_call_atomic1_1() - SCM command with one argument and one return value
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @arg1: first argument
+ * @ret1: first return value
+ *
+ * This shall only be used with commands that are guaranteed to be
+ * uninterruptable, atomic and SMP safe.
+ */
+s32 scm_call_atomic1_1(u32 svc, u32 cmd, u32 arg1, u32 *ret1)
+{
+	int context_id;
+	register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1);
+	register u32 r1 asm("r1") = (uintptr_t)&context_id;
+	register u32 r2 asm("r2") = arg1;
+
+	asm volatile(
+		__asmeq("%0", R0_STR)
+		__asmeq("%1", R1_STR)
+		__asmeq("%2", R0_STR)
+		__asmeq("%3", R1_STR)
+		__asmeq("%4", R2_STR)
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
+		"smc	#0\n"
+		: "=r" (r0), "=r" (r1)
+		: "r" (r0), "r" (r1), "r" (r2)
+		: "r3");
+	if (ret1)
+		*ret1 = r1;
+	return r0;
+}
+EXPORT_SYMBOL(scm_call_atomic1_1);
+
+/**
+ * scm_call_atomic2() - Send an atomic SCM command with two arguments
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @arg1: first argument
+ * @arg2: second argument
+ *
+ * This shall only be used with commands that are guaranteed to be
+ * uninterruptable, atomic and SMP safe.
+ */
+s32 scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2)
+{
+	int context_id;
+	register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 2);
+	register u32 r1 asm("r1") = (uintptr_t)&context_id;
+	register u32 r2 asm("r2") = arg1;
+	register u32 r3 asm("r3") = arg2;
+
+	asm volatile(
+		__asmeq("%0", R0_STR)
+		__asmeq("%1", R0_STR)
+		__asmeq("%2", R1_STR)
+		__asmeq("%3", R2_STR)
+		__asmeq("%4", R3_STR)
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
+		"smc	#0\n"
+		: "=r" (r0)
+		: "r" (r0), "r" (r1), "r" (r2), "r" (r3));
+	return r0;
+}
+EXPORT_SYMBOL(scm_call_atomic2);
+
+/**
+ * scm_call_atomic3() - Send an atomic SCM command with three arguments
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @arg1: first argument
+ * @arg2: second argument
+ * @arg3: third argument
+ *
+ * This shall only be used with commands that are guaranteed to be
+ * uninterruptable, atomic and SMP safe.
+ */
+s32 scm_call_atomic3(u32 svc, u32 cmd, u32 arg1, u32 arg2, u32 arg3)
+{
+	int context_id;
+	register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 3);
+	register u32 r1 asm("r1") = (uintptr_t)&context_id;
+	register u32 r2 asm("r2") = arg1;
+	register u32 r3 asm("r3") = arg2;
+	register u32 r4 asm("r4") = arg3;
+
+	asm volatile(
+		__asmeq("%0", R0_STR)
+		__asmeq("%1", R0_STR)
+		__asmeq("%2", R1_STR)
+		__asmeq("%3", R2_STR)
+		__asmeq("%4", R3_STR)
+		__asmeq("%5", R4_STR)
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
+		"smc	#0\n"
+		: "=r" (r0)
+		: "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4));
+	return r0;
+}
+EXPORT_SYMBOL(scm_call_atomic3);
+
+s32 scm_call_atomic4_3(u32 svc, u32 cmd, u32 arg1, u32 arg2,
+		u32 arg3, u32 arg4, u32 *ret1, u32 *ret2)
+{
+	int ret;
+	int context_id;
+	register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 4);
+	register u32 r1 asm("r1") = (uintptr_t)&context_id;
+	register u32 r2 asm("r2") = arg1;
+	register u32 r3 asm("r3") = arg2;
+	register u32 r4 asm("r4") = arg3;
+	register u32 r5 asm("r5") = arg4;
+
+	asm volatile(
+		__asmeq("%0", R0_STR)
+		__asmeq("%1", R1_STR)
+		__asmeq("%2", R2_STR)
+		__asmeq("%3", R0_STR)
+		__asmeq("%4", R1_STR)
+		__asmeq("%5", R2_STR)
+		__asmeq("%6", R3_STR)
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
+		"smc	#0\n"
+		: "=r" (r0), "=r" (r1), "=r" (r2)
+		: "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5));
+	ret = r0;
+	if (ret1)
+		*ret1 = r1;
+	if (ret2)
+		*ret2 = r2;
+	return r0;
+}
+EXPORT_SYMBOL(scm_call_atomic4_3);
+
+/**
+ * scm_call_atomic5_3() - SCM command with five argument and three return value
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @arg1: first argument
+ * @arg2: second argument
+ * @arg3: third argument
+ * @arg4: fourth argument
+ * @arg5: fifth argument
+ * @ret1: first return value
+ * @ret2: second return value
+ * @ret3: third return value
+ *
+ * This shall only be used with commands that are guaranteed to be
+ * uninterruptable, atomic and SMP safe.
+ */
+s32 scm_call_atomic5_3(u32 svc, u32 cmd, u32 arg1, u32 arg2,
+	u32 arg3, u32 arg4, u32 arg5, u32 *ret1, u32 *ret2, u32 *ret3)
+{
+	int ret;
+	int context_id;
+	register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 5);
+	register u32 r1 asm("r1") = (uintptr_t)&context_id;
+	register u32 r2 asm("r2") = arg1;
+	register u32 r3 asm("r3") = arg2;
+	register u32 r4 asm("r4") = arg3;
+	register u32 r5 asm("r5") = arg4;
+	register u32 r6 asm("r6") = arg5;
+
+	asm volatile(
+		__asmeq("%0", R0_STR)
+		__asmeq("%1", R1_STR)
+		__asmeq("%2", R2_STR)
+		__asmeq("%3", R3_STR)
+		__asmeq("%4", R0_STR)
+		__asmeq("%5", R1_STR)
+		__asmeq("%6", R2_STR)
+		__asmeq("%7", R3_STR)
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
+		"smc	#0\n"
+		: "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3)
+		: "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5),
+		 "r" (r6));
+	ret = r0;
+
+	if (ret1)
+		*ret1 = r1;
+	if (ret2)
+		*ret2 = r2;
+	if (ret3)
+		*ret3 = r3;
+	return r0;
+}
+EXPORT_SYMBOL(scm_call_atomic5_3);
+
+u32 scm_get_version(void)
+{
+	int context_id;
+	static u32 version = -1;
+	register u32 r0 asm("r0");
+	register u32 r1 asm("r1");
+
+	if (version != -1)
+		return version;
+
+	mutex_lock(&scm_lock);
+
+	r0 = 0x1 << 8;
+	r1 = (uintptr_t)&context_id;
+	do {
+		asm volatile(
+			__asmeq("%0", R0_STR)
+			__asmeq("%1", R1_STR)
+			__asmeq("%2", R0_STR)
+			__asmeq("%3", R1_STR)
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
+			"smc	#0\n"
+			: "=r" (r0), "=r" (r1)
+			: "r" (r0), "r" (r1)
+			: "r2", "r3");
+	} while (r0 == SCM_INTERRUPTED);
+
+	version = r1;
+	mutex_unlock(&scm_lock);
+
+	return version;
+}
+EXPORT_SYMBOL(scm_get_version);
+
+#define SCM_IO_READ	0x1
+#define SCM_IO_WRITE	0x2
+
+u32 scm_io_read(phys_addr_t address)
+{
+	if (!is_scm_armv8()) {
+		return scm_call_atomic1(SCM_SVC_IO, SCM_IO_READ, address);
+	} else {
+		struct scm_desc desc = {
+			.args[0] = address,
+			.arginfo = SCM_ARGS(1),
+		};
+		scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_IO, SCM_IO_READ), &desc);
+		return desc.ret[0];
+	}
+}
+EXPORT_SYMBOL(scm_io_read);
+
+int scm_io_write(phys_addr_t address, u32 val)
+{
+	int ret;
+
+	if (!is_scm_armv8()) {
+		ret = scm_call_atomic2(SCM_SVC_IO, SCM_IO_WRITE, address, val);
+	} else {
+		struct scm_desc desc = {
+			.args[0] = address,
+			.args[1] = val,
+			.arginfo = SCM_ARGS(2),
+		};
+		ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_IO, SCM_IO_WRITE),
+				       &desc);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(scm_io_write);
+
+int scm_is_call_available(u32 svc_id, u32 cmd_id)
+{
+	int ret;
+	struct scm_desc desc = {0};
+
+	if (!is_scm_armv8()) {
+		u32 ret_val = 0;
+		u32 svc_cmd = (svc_id << 10) | cmd_id;
+
+		ret = scm_call(SCM_SVC_INFO, IS_CALL_AVAIL_CMD, &svc_cmd,
+			sizeof(svc_cmd), &ret_val, sizeof(ret_val));
+		if (!ret && ret_val)
+			return 1;
+		else
+			return 0;
+
+	}
+	desc.arginfo = SCM_ARGS(1);
+	desc.args[0] = SCM_SIP_FNID(svc_id, cmd_id);
+	desc.ret[0] = 0;
+	ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO, IS_CALL_AVAIL_CMD), &desc);
+	if (!ret && desc.ret[0])
+		return 1;
+	else
+		return 0;
+
+}
+EXPORT_SYMBOL(scm_is_call_available);
+
+#define GET_FEAT_VERSION_CMD	3
+int scm_get_feat_version(u32 feat, u64 *scm_ret)
+{
+	struct scm_desc desc = {0};
+	int ret;
+
+	if (!is_scm_armv8()) {
+		if (scm_is_call_available(SCM_SVC_INFO, GET_FEAT_VERSION_CMD)) {
+			ret = scm_call(SCM_SVC_INFO, GET_FEAT_VERSION_CMD,
+				&feat, sizeof(feat), scm_ret, sizeof(*scm_ret));
+			return ret;
+		}
+	}
+
+	ret = scm_is_call_available(SCM_SVC_INFO, GET_FEAT_VERSION_CMD);
+	if (ret <= 0)
+		return -EAGAIN;
+
+	desc.args[0] = feat;
+	desc.arginfo = SCM_ARGS(1);
+	ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO, GET_FEAT_VERSION_CMD),
+			&desc);
+
+	*scm_ret = desc.ret[0];
+
+	return ret;
+}
+EXPORT_SYMBOL(scm_get_feat_version);
+
+#define RESTORE_SEC_CFG    2
+int scm_restore_sec_cfg(u32 device_id, u32 spare, u64 *scm_ret)
+{
+	struct scm_desc desc = {0};
+	int ret;
+	struct restore_sec_cfg {
+		u32 device_id;
+		u32 spare;
+	} cfg;
+
+	cfg.device_id = device_id;
+	cfg.spare = spare;
+
+	if (IS_ERR_OR_NULL(scm_ret))
+		return -EINVAL;
+
+	if (!is_scm_armv8())
+		return scm_call(SCM_SVC_MP, RESTORE_SEC_CFG, &cfg, sizeof(cfg),
+				scm_ret, sizeof(*scm_ret));
+
+	desc.args[0] = device_id;
+	desc.args[1] = spare;
+	desc.arginfo = SCM_ARGS(2);
+
+	ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP, RESTORE_SEC_CFG), &desc);
+	if (ret)
+		return ret;
+
+	*scm_ret = desc.ret[0];
+	return 0;
+}
+EXPORT_SYMBOL(scm_restore_sec_cfg);
+
+/*
+ * SCM call command ID to check secure mode
+ * Return zero for secure device.
+ * Return one for non secure device or secure
+ * device with debug enabled device.
+ */
+#define TZ_INFO_GET_SECURE_STATE	0x4
+bool scm_is_secure_device(void)
+{
+	struct scm_desc desc = {0};
+	int ret = 0, resp;
+
+	desc.args[0] = 0;
+	desc.arginfo = 0;
+	if (!is_scm_armv8()) {
+		ret = scm_call(SCM_SVC_INFO, TZ_INFO_GET_SECURE_STATE, NULL,
+			0, &resp, sizeof(resp));
+	} else {
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO,
+				TZ_INFO_GET_SECURE_STATE),
+				&desc);
+		resp = desc.ret[0];
+	}
+
+	if (ret) {
+		pr_err("%s: SCM call failed\n", __func__);
+		return false;
+	}
+
+	if ((resp & BIT(0)) || (resp & BIT(2)))
+		return true;
+	else
+		return false;
+}
+EXPORT_SYMBOL(scm_is_secure_device);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/secure_buffer.c	2019-10-29 09:26:24.821214706 +0100
@@ -0,0 +1,457 @@
+/*
+ * Copyright (C) 2011 Google, Inc
+ * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/secure_buffer.h>
+
+DEFINE_MUTEX(secure_buffer_mutex);
+
+struct cp2_mem_chunks {
+	u32 chunk_list;
+	u32 chunk_list_size;
+	u32 chunk_size;
+} __attribute__ ((__packed__));
+
+struct cp2_lock_req {
+	struct cp2_mem_chunks chunks;
+	u32 mem_usage;
+	u32 lock;
+} __attribute__ ((__packed__));
+
+
+struct mem_prot_info {
+	phys_addr_t addr;
+	u64 size;
+};
+
+#define MEM_PROT_ASSIGN_ID		0x16
+#define MEM_PROTECT_LOCK_ID2		0x0A
+#define MEM_PROTECT_LOCK_ID2_FLAT	0x11
+#define V2_CHUNK_SIZE		SZ_1M
+#define FEATURE_ID_CP 12
+
+struct dest_vm_and_perm_info {
+	u32 vm;
+	u32 perm;
+	u64 ctx;
+	u32 ctx_size;
+};
+
+static void *qcom_secure_mem;
+#define QCOM_SECURE_MEM_SIZE (512*1024)
+
+static int secure_buffer_change_chunk(u32 chunks,
+				u32 nchunks,
+				u32 chunk_size,
+				int lock)
+{
+	struct cp2_lock_req request;
+	u32 resp;
+	int ret;
+	struct scm_desc desc = {0};
+
+	desc.args[0] = request.chunks.chunk_list = chunks;
+	desc.args[1] = request.chunks.chunk_list_size = nchunks;
+	desc.args[2] = request.chunks.chunk_size = chunk_size;
+	/* Usage is now always 0 */
+	desc.args[3] = request.mem_usage = 0;
+	desc.args[4] = request.lock = lock;
+	desc.args[5] = 0;
+	desc.arginfo = SCM_ARGS(6, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL,
+				SCM_VAL);
+
+	kmap_flush_unused();
+	kmap_atomic_flush_unused();
+
+	if (!is_scm_armv8()) {
+		ret = scm_call(SCM_SVC_MP, MEM_PROTECT_LOCK_ID2,
+				&request, sizeof(request), &resp, sizeof(resp));
+	} else {
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
+				MEM_PROTECT_LOCK_ID2_FLAT), &desc);
+		resp = desc.ret[0];
+	}
+
+	return ret;
+}
+
+
+
+static int secure_buffer_change_table(struct sg_table *table, int lock)
+{
+	int i, j;
+	int ret = -EINVAL;
+	u32 *chunk_list;
+	struct scatterlist *sg;
+
+	for_each_sg(table->sgl, sg, table->nents, i) {
+		int nchunks;
+		int size = sg->length;
+		int chunk_list_len;
+		phys_addr_t chunk_list_phys;
+
+		/*
+		 * This should theoretically be a phys_addr_t but the protocol
+		 * indicates this should be a u32.
+		 */
+		u32 base;
+		u64 tmp = sg_dma_address(sg);
+		WARN((tmp >> 32) & 0xffffffff,
+			"%s: there are ones in the upper 32 bits of the sg at %p! They will be truncated! Address: 0x%llx\n",
+			__func__, sg, tmp);
+		if (unlikely(!size || (size % V2_CHUNK_SIZE))) {
+			WARN(1,
+				"%s: chunk %d has invalid size: 0x%x. Must be a multiple of 0x%x\n",
+				__func__, i, size, V2_CHUNK_SIZE);
+			return -EINVAL;
+		}
+
+		base = (u32)tmp;
+
+		nchunks = size / V2_CHUNK_SIZE;
+		chunk_list_len = sizeof(u32)*nchunks;
+
+		chunk_list = kzalloc(chunk_list_len, GFP_KERNEL);
+
+		if (!chunk_list)
+			return -ENOMEM;
+
+		chunk_list_phys = virt_to_phys(chunk_list);
+		for (j = 0; j < nchunks; j++)
+			chunk_list[j] = base + j * V2_CHUNK_SIZE;
+
+		/*
+		 * Flush the chunk list before sending the memory to the
+		 * secure environment to ensure the data is actually present
+		 * in RAM
+		 */
+		dmac_flush_range(chunk_list, chunk_list + chunk_list_len);
+
+		ret = secure_buffer_change_chunk(virt_to_phys(chunk_list),
+				nchunks, V2_CHUNK_SIZE, lock);
+
+		if (!ret) {
+			/*
+			 * Set or clear the private page flag to communicate the
+			 * status of the chunk to other entities
+			 */
+			if (lock)
+				SetPagePrivate(sg_page(sg));
+			else
+				ClearPagePrivate(sg_page(sg));
+		}
+
+		kfree(chunk_list);
+	}
+
+	return ret;
+}
+
+int msm_secure_table(struct sg_table *table)
+{
+	int ret;
+
+	mutex_lock(&secure_buffer_mutex);
+	ret = secure_buffer_change_table(table, 1);
+	mutex_unlock(&secure_buffer_mutex);
+
+	return ret;
+
+}
+
+int msm_unsecure_table(struct sg_table *table)
+{
+	int ret;
+
+	mutex_lock(&secure_buffer_mutex);
+	ret = secure_buffer_change_table(table, 0);
+	mutex_unlock(&secure_buffer_mutex);
+	return ret;
+
+}
+
+static struct dest_vm_and_perm_info *
+populate_dest_info(int *dest_vmids, int nelements, int *dest_perms,
+		   size_t *size_in_bytes)
+{
+	struct dest_vm_and_perm_info *dest_info;
+	int i;
+	size_t size;
+
+	/* Ensure allocated size is less than PAGE_ALLOC_COSTLY_ORDER */
+	size = nelements * sizeof(*dest_info);
+	if (size > PAGE_SIZE)
+		return NULL;
+
+	dest_info = kzalloc(size, GFP_KERNEL);
+	if (!dest_info)
+		return NULL;
+
+	for (i = 0; i < nelements; i++) {
+		dest_info[i].vm = dest_vmids[i];
+		dest_info[i].perm = dest_perms[i];
+		dest_info[i].ctx = 0x0;
+		dest_info[i].ctx_size = 0;
+	}
+
+	*size_in_bytes = size;
+	return dest_info;
+}
+
+/* Must hold secure_buffer_mutex while allocated buffer is in use */
+static struct mem_prot_info *get_info_list_from_table(struct sg_table *table,
+						      size_t *size_in_bytes)
+{
+	int i;
+	struct scatterlist *sg;
+	struct mem_prot_info *info;
+	size_t size;
+
+	size = table->nents * sizeof(*info);
+
+	if (size >= QCOM_SECURE_MEM_SIZE) {
+		pr_err("%s: Not enough memory allocated. Required size %zd\n",
+				__func__, size);
+		return NULL;
+	}
+
+	if (!qcom_secure_mem) {
+		pr_err("%s is not functional as qcom_secure_mem is not allocated.\n",
+				__func__);
+		return NULL;
+	}
+
+	/* "Allocate" it */
+	info = qcom_secure_mem;
+
+	for_each_sg(table->sgl, sg, table->nents, i) {
+		info[i].addr = page_to_phys(sg_page(sg));
+		info[i].size = sg->length;
+	}
+
+	*size_in_bytes = size;
+	return info;
+}
+
+#define BATCH_MAX_SIZE SZ_2M
+#define BATCH_MAX_SECTIONS 32
+
+int hyp_assign_table(struct sg_table *table,
+			u32 *source_vm_list, int source_nelems,
+			int *dest_vmids, int *dest_perms,
+			int dest_nelems)
+{
+	int ret = 0;
+	struct scm_desc desc = {0};
+	u32 *source_vm_copy;
+	size_t source_vm_copy_size;
+	struct dest_vm_and_perm_info *dest_vm_copy;
+	size_t dest_vm_copy_size;
+	struct mem_prot_info *sg_table_copy;
+	size_t sg_table_copy_size;
+
+	int batch_start, batch_end;
+	u64 batch_size;
+
+	/*
+	 * We can only pass cache-aligned sizes to hypervisor, so we need
+	 * to kmalloc and memcpy the source_vm_list here.
+	 */
+	source_vm_copy_size = sizeof(*source_vm_copy) * source_nelems;
+	source_vm_copy = kzalloc(source_vm_copy_size, GFP_KERNEL);
+	if (!source_vm_copy)
+		return -ENOMEM;
+
+	memcpy(source_vm_copy, source_vm_list, source_vm_copy_size);
+
+
+	dest_vm_copy = populate_dest_info(dest_vmids, dest_nelems, dest_perms,
+					  &dest_vm_copy_size);
+	if (!dest_vm_copy) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+
+	mutex_lock(&secure_buffer_mutex);
+
+	sg_table_copy = get_info_list_from_table(table, &sg_table_copy_size);
+	if (!sg_table_copy) {
+		ret = -ENOMEM;
+		goto out_unlock;
+	}
+
+	desc.args[0] = virt_to_phys(sg_table_copy);
+	desc.args[1] = sg_table_copy_size;
+	desc.args[2] = virt_to_phys(source_vm_copy);
+	desc.args[3] = source_vm_copy_size;
+	desc.args[4] = virt_to_phys(dest_vm_copy);
+	desc.args[5] = dest_vm_copy_size;
+	desc.args[6] = 0;
+
+	desc.arginfo = SCM_ARGS(7, SCM_RO, SCM_VAL, SCM_RO, SCM_VAL, SCM_RO,
+				SCM_VAL, SCM_VAL);
+
+	dmac_flush_range(source_vm_copy,
+			 (void *)source_vm_copy + source_vm_copy_size);
+	dmac_flush_range(sg_table_copy,
+			 (void *)sg_table_copy + sg_table_copy_size);
+	dmac_flush_range(dest_vm_copy,
+			 (void *)dest_vm_copy + dest_vm_copy_size);
+
+	batch_start = 0;
+	while (batch_start < table->nents) {
+		/* Ensure no size zero batches */
+		batch_size = sg_table_copy[batch_start].size;
+		batch_end = batch_start + 1;
+		while (1) {
+			u64 size;
+
+			if (batch_end >= table->nents)
+				break;
+			if (batch_end - batch_start >= BATCH_MAX_SECTIONS)
+				break;
+
+			size = sg_table_copy[batch_end].size;
+			if (size + batch_size >= BATCH_MAX_SIZE)
+				break;
+
+			batch_size += size;
+			batch_end++;
+		}
+
+		desc.args[0] = virt_to_phys(&sg_table_copy[batch_start]);
+		desc.args[1] = (batch_end - batch_start) *
+				sizeof(sg_table_copy[0]);
+
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
+				MEM_PROT_ASSIGN_ID), &desc);
+		if (ret) {
+			pr_info("%s: Failed to assign memory protection, ret = %d\n",
+				__func__, ret);
+			break;
+		}
+		batch_start = batch_end;
+	}
+
+out_unlock:
+	mutex_unlock(&secure_buffer_mutex);
+	kfree(dest_vm_copy);
+out_free:
+	kfree(source_vm_copy);
+	return ret;
+}
+
+int hyp_assign_phys(phys_addr_t addr, u64 size, u32 *source_vm_list,
+			int source_nelems, int *dest_vmids,
+			int *dest_perms, int dest_nelems)
+{
+	struct sg_table table;
+	int ret;
+
+	ret = sg_alloc_table(&table, 1, GFP_KERNEL);
+	if (ret)
+		return ret;
+
+	sg_set_page(table.sgl, phys_to_page(addr), size, 0);
+
+	ret = hyp_assign_table(&table, source_vm_list, source_nelems,
+			       dest_vmids, dest_perms, dest_nelems);
+
+	sg_free_table(&table);
+	return ret;
+}
+EXPORT_SYMBOL(hyp_assign_phys);
+
+const char *msm_secure_vmid_to_string(int secure_vmid)
+{
+	switch (secure_vmid) {
+	case VMID_HLOS:
+		return "VMID_HLOS";
+	case VMID_CP_TOUCH:
+		return "VMID_CP_TOUCH";
+	case VMID_CP_BITSTREAM:
+		return "VMID_CP_BITSTREAM";
+	case VMID_CP_PIXEL:
+		return "VMID_CP_PIXEL";
+	case VMID_CP_NON_PIXEL:
+		return "VMID_CP_NON_PIXEL";
+	case VMID_CP_CAMERA:
+		return "VMID_CP_CAMERA";
+	case VMID_HLOS_FREE:
+		return "VMID_HLOS_FREE";
+	case VMID_MSS_MSA:
+		return "VMID_MSS_MSA";
+	case VMID_MSS_NONMSA:
+		return "VMID_MSS_NONMSA";
+	case VMID_CP_SEC_DISPLAY:
+		return "VMID_CP_SEC_DISPLAY";
+	case VMID_CP_APP:
+		return "VMID_CP_APP";
+	case VMID_WLAN:
+		return "VMID_WLAN";
+	case VMID_WLAN_CE:
+		return "VMID_WLAN_CE";
+	case VMID_CP_CAMERA_PREVIEW:
+		return "VMID_CP_CAMERA_PREVIEW";
+	case VMID_CP_SPSS_SP_SHARED:
+		return "VMID_CP_SPSS_SP_SHARED";
+	case VMID_INVAL:
+		return "VMID_INVAL";
+	default:
+		return "Unknown VMID";
+	}
+}
+
+#define MAKE_CP_VERSION(major, minor, patch) \
+	(((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
+
+bool msm_secure_v2_is_supported(void)
+{
+	u64 version;
+	int ret = scm_get_feat_version(FEATURE_ID_CP, &version);
+
+	/*
+	 * if the version is < 1.1.0 then dynamic buffer allocation is
+	 * not supported
+	 */
+	return (ret == 0) && (version >= MAKE_CP_VERSION(1, 1, 0));
+}
+
+static int __init alloc_secure_shared_memory(void)
+{
+	int ret = 0;
+	dma_addr_t dma_handle;
+
+	qcom_secure_mem = kzalloc(QCOM_SECURE_MEM_SIZE, GFP_KERNEL);
+	if (!qcom_secure_mem) {
+		/* Fallback to CMA-DMA memory */
+		qcom_secure_mem = dma_alloc_coherent(NULL, QCOM_SECURE_MEM_SIZE,
+						&dma_handle, GFP_KERNEL);
+		if (!qcom_secure_mem) {
+			pr_err("Couldn't allocate memory for secure use-cases. hyp_assign_table will not work\n");
+			return -ENOMEM;
+		}
+	}
+
+	return ret;
+}
+pure_initcall(alloc_secure_shared_memory);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/service-locator.c	2019-01-22 16:16:26.675275131 +0100
@@ -0,0 +1,439 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "servloc: %s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/string.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+#include <soc/qcom/service-locator.h>
+#include "service-locator-private.h"
+
+#define SERVREG_LOC_SERVICE_INSTANCE_ID			1
+
+#define QMI_SERVREG_LOC_SERVER_INITIAL_TIMEOUT		2000
+#define QMI_SERVREG_LOC_SERVER_TIMEOUT			2000
+#define INITIAL_TIMEOUT					100000
+
+#define LOCATOR_NOT_PRESENT	0
+#define LOCATOR_PRESENT		1
+
+static u32 locator_status = LOCATOR_PRESENT;
+static bool service_inited;
+
+module_param_named(enable, locator_status, uint, S_IRUGO | S_IWUSR);
+
+static void service_locator_svc_arrive(struct work_struct *work);
+static void service_locator_svc_exit(struct work_struct *work);
+static void service_locator_recv_msg(struct work_struct *work);
+static void pd_locator_work(struct work_struct *work);
+
+struct workqueue_struct *servloc_wq;
+
+struct pd_qmi_data {
+	struct work_struct svc_arrive;
+	struct work_struct svc_exit;
+	struct work_struct svc_rcv_msg;
+	struct notifier_block notifier;
+	struct completion service_available;
+	struct mutex service_mutex;
+	struct qmi_handle *clnt_handle;
+};
+
+struct pd_qmi_work {
+	struct work_struct pd_loc_work;
+	struct pd_qmi_client_data *pdc;
+	struct notifier_block *notifier;
+};
+DEFINE_MUTEX(service_init_mutex);
+struct pd_qmi_data service_locator;
+
+/* Please refer soc/qcom/service-locator.h for use about APIs defined here */
+
+static int service_locator_svc_event_notify(struct notifier_block *this,
+				      unsigned long code,
+				      void *_cmd)
+{
+	switch (code) {
+	case QMI_SERVER_ARRIVE:
+		queue_work(servloc_wq, &service_locator.svc_arrive);
+		break;
+	case QMI_SERVER_EXIT:
+		queue_work(servloc_wq, &service_locator.svc_exit);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static void service_locator_clnt_notify(struct qmi_handle *handle,
+			     enum qmi_event_type event, void *notify_priv)
+{
+	switch (event) {
+	case QMI_RECV_MSG:
+		schedule_work(&service_locator.svc_rcv_msg);
+		break;
+	default:
+		break;
+	}
+}
+
+static void service_locator_svc_arrive(struct work_struct *work)
+{
+	int rc = 0;
+
+	/* Create a Local client port for QMI communication */
+	mutex_lock(&service_locator.service_mutex);
+	service_locator.clnt_handle =
+			qmi_handle_create(service_locator_clnt_notify, NULL);
+	if (!service_locator.clnt_handle) {
+		service_locator.clnt_handle = NULL;
+		complete_all(&service_locator.service_available);
+		mutex_unlock(&service_locator.service_mutex);
+		pr_err("Service locator QMI client handle alloc failed!\n");
+		return;
+	}
+
+	/* Connect to service */
+	rc = qmi_connect_to_service(service_locator.clnt_handle,
+		SERVREG_LOC_SERVICE_ID_V01, SERVREG_LOC_SERVICE_VERS_V01,
+		SERVREG_LOC_SERVICE_INSTANCE_ID);
+	if (rc) {
+		qmi_handle_destroy(service_locator.clnt_handle);
+		service_locator.clnt_handle = NULL;
+		complete_all(&service_locator.service_available);
+		mutex_unlock(&service_locator.service_mutex);
+		pr_err("Unable to connnect to service rc:%d\n", rc);
+		return;
+	}
+	if (!service_inited)
+		complete_all(&service_locator.service_available);
+	mutex_unlock(&service_locator.service_mutex);
+	pr_info("Connection established with the Service locator\n");
+}
+
+static void service_locator_svc_exit(struct work_struct *work)
+{
+	mutex_lock(&service_locator.service_mutex);
+	qmi_handle_destroy(service_locator.clnt_handle);
+	service_locator.clnt_handle = NULL;
+	complete_all(&service_locator.service_available);
+	mutex_unlock(&service_locator.service_mutex);
+	pr_info("Connection with service locator lost\n");
+}
+
+static void service_locator_recv_msg(struct work_struct *work)
+{
+	int ret;
+
+	do {
+		pr_debug("Notified about a Receive event\n");
+	} while ((ret = qmi_recv_msg(service_locator.clnt_handle)) == 0);
+
+	if (ret != -ENOMSG)
+		pr_err("Error receiving message rc:%d\n", ret);
+
+}
+
+static void store_get_domain_list_response(struct pd_qmi_client_data *pd,
+		struct qmi_servreg_loc_get_domain_list_resp_msg_v01 *resp,
+		int offset)
+{
+	int i;
+
+	for (i = offset; i < resp->domain_list_len; i++) {
+		pd->domain_list[i].instance_id =
+					resp->domain_list[i].instance_id;
+		strlcpy(pd->domain_list[i].name, resp->domain_list[i].name,
+			QMI_SERVREG_LOC_NAME_LENGTH_V01 + 1);
+		pd->domain_list[i].service_data_valid =
+					resp->domain_list[i].service_data_valid;
+		pd->domain_list[i].service_data =
+					resp->domain_list[i].service_data;
+	}
+}
+
+static int servreg_loc_send_msg(struct msg_desc *req_desc,
+		struct msg_desc *resp_desc,
+		struct qmi_servreg_loc_get_domain_list_req_msg_v01 *req,
+		struct qmi_servreg_loc_get_domain_list_resp_msg_v01 *resp,
+		struct pd_qmi_client_data *pd)
+{
+	int rc;
+
+	/*
+	 * Send msg and get response. There is a chance that the service went
+	 * away since the time we last checked for it to be available and
+	 * actually made this call. In that case the call just fails.
+	 */
+	rc = qmi_send_req_wait(service_locator.clnt_handle, req_desc, req,
+		sizeof(*req), resp_desc, resp, sizeof(*resp),
+		QMI_SERVREG_LOC_SERVER_TIMEOUT);
+	if (rc < 0) {
+		pr_err("QMI send req failed for client %s, ret - %d\n",
+			pd->client_name, rc);
+		return rc;
+	}
+
+	/* Check the response */
+	if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		pr_err("QMI request for client %s failed 0x%x\n",
+			pd->client_name, resp->resp.error);
+		return -EREMOTEIO;
+	}
+	return rc;
+}
+
+static int service_locator_send_msg(struct pd_qmi_client_data *pd)
+{
+	struct msg_desc req_desc, resp_desc;
+	struct qmi_servreg_loc_get_domain_list_resp_msg_v01 *resp = NULL;
+	struct qmi_servreg_loc_get_domain_list_req_msg_v01 *req = NULL;
+	int rc;
+	int db_rev_count = 0, domains_read = 0;
+
+	if (!service_locator.clnt_handle) {
+		pr_err("Service locator not available!\n");
+		return -EAGAIN;
+	}
+
+	req = kzalloc(sizeof(
+		struct qmi_servreg_loc_get_domain_list_req_msg_v01),
+		GFP_KERNEL);
+	if (!req) {
+		pr_err("Unable to allocate memory for req message\n");
+		rc = -ENOMEM;
+		goto out;
+	}
+	resp = kzalloc(sizeof(
+		struct qmi_servreg_loc_get_domain_list_resp_msg_v01),
+		GFP_KERNEL);
+	if (!resp) {
+		pr_err("Unable to allocate memory for resp message\n");
+		rc = -ENOMEM;
+		goto out;
+	}
+	/* Prepare req and response message formats */
+	req_desc.msg_id = QMI_SERVREG_LOC_GET_DOMAIN_LIST_REQ_V01;
+	req_desc.max_msg_len =
+		QMI_SERVREG_LOC_GET_DOMAIN_LIST_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.ei_array = qmi_servreg_loc_get_domain_list_req_msg_v01_ei;
+
+	resp_desc.msg_id = QMI_SERVREG_LOC_GET_DOMAIN_LIST_RESP_V01;
+	resp_desc.max_msg_len =
+		QMI_SERVREG_LOC_GET_DOMAIN_LIST_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.ei_array = qmi_servreg_loc_get_domain_list_resp_msg_v01_ei;
+
+	/* Prepare req and response message */
+	strlcpy(req->service_name, pd->service_name,
+		QMI_SERVREG_LOC_NAME_LENGTH_V01 + 1);
+	req->domain_offset_valid = true;
+	req->domain_offset = 0;
+
+	pd->domain_list = NULL;
+	do {
+		req->domain_offset += domains_read;
+		rc = servreg_loc_send_msg(&req_desc, &resp_desc, req, resp,
+					pd);
+		if (rc < 0) {
+			pr_err("send msg failed rc:%d\n", rc);
+			goto out;
+		}
+		if (!domains_read) {
+			db_rev_count = pd->db_rev_count = resp->db_rev_count;
+			pd->total_domains = resp->total_domains;
+			if (!resp->total_domains) {
+				pr_err("No matching domains found\n");
+				goto out;
+			}
+
+			pd->domain_list = kmalloc(
+					sizeof(struct servreg_loc_entry_v01) *
+					resp->total_domains, GFP_KERNEL);
+			if (!pd->domain_list) {
+				pr_err("Cannot allocate domain list\n");
+				rc = -ENOMEM;
+				goto out;
+			}
+		}
+		if (db_rev_count != resp->db_rev_count) {
+			pr_err("Service Locator DB updated for client %s\n",
+				pd->client_name);
+			kfree(pd->domain_list);
+			rc = -EAGAIN;
+			goto out;
+		}
+		if (resp->domain_list_len >  resp->total_domains) {
+			/* Always read total_domains from the response msg */
+			resp->domain_list_len = resp->total_domains;
+		}
+		/* Copy the response*/
+		store_get_domain_list_response(pd, resp, domains_read);
+		domains_read += resp->domain_list_len;
+	} while (domains_read < resp->total_domains);
+	rc = 0;
+out:
+	kfree(req);
+	kfree(resp);
+	return rc;
+}
+
+static int init_service_locator(void)
+{
+	int rc = 0;
+
+	mutex_lock(&service_init_mutex);
+	if (locator_status == LOCATOR_NOT_PRESENT) {
+		pr_err("Service Locator not enabled\n");
+		rc = -ENODEV;
+		goto inited;
+	}
+	if (service_inited)
+		goto inited;
+
+	service_locator.notifier.notifier_call =
+					service_locator_svc_event_notify;
+	init_completion(&service_locator.service_available);
+	mutex_init(&service_locator.service_mutex);
+
+	servloc_wq = create_singlethread_workqueue("servloc_wq");
+	if (!servloc_wq) {
+		rc = -ENOMEM;
+		pr_err("Could not create workqueue\n");
+		goto inited;
+	}
+
+	INIT_WORK(&service_locator.svc_arrive, service_locator_svc_arrive);
+	INIT_WORK(&service_locator.svc_exit, service_locator_svc_exit);
+	INIT_WORK(&service_locator.svc_rcv_msg, service_locator_recv_msg);
+
+	rc = qmi_svc_event_notifier_register(SERVREG_LOC_SERVICE_ID_V01,
+		SERVREG_LOC_SERVICE_VERS_V01, SERVREG_LOC_SERVICE_INSTANCE_ID,
+		&service_locator.notifier);
+	if (rc < 0) {
+		pr_err("Notifier register failed rc:%d\n", rc);
+		goto inited;
+	}
+
+	wait_for_completion(&service_locator.service_available);
+	service_inited = true;
+	mutex_unlock(&service_init_mutex);
+	pr_info("Service locator initialized\n");
+	return 0;
+
+inited:
+	mutex_unlock(&service_init_mutex);
+	return rc;
+}
+
+int get_service_location(char *client_name, char *service_name,
+				struct notifier_block *locator_nb)
+{
+	struct pd_qmi_client_data *pqcd;
+	struct pd_qmi_work *pqw;
+	int rc = 0;
+
+	if (!locator_nb || !client_name || !service_name) {
+		rc = -EINVAL;
+		pr_err("Invalid input!\n");
+		goto err;
+	}
+
+	pqcd = kmalloc(sizeof(struct pd_qmi_client_data), GFP_KERNEL);
+	if (!pqcd) {
+		rc = -ENOMEM;
+		pr_err("Allocation failed\n");
+		goto err;
+	}
+	strlcpy(pqcd->client_name, client_name, ARRAY_SIZE(pqcd->client_name));
+	strlcpy(pqcd->service_name, service_name,
+		ARRAY_SIZE(pqcd->service_name));
+
+	pqw = kmalloc(sizeof(struct pd_qmi_work), GFP_KERNEL);
+	if (!pqw) {
+		rc = -ENOMEM;
+		pr_err("Allocation failed\n");
+		kfree(pqcd);
+		goto err;
+	}
+	pqw->notifier = locator_nb;
+	pqw->pdc = pqcd;
+
+	INIT_WORK(&pqw->pd_loc_work, pd_locator_work);
+	schedule_work(&pqw->pd_loc_work);
+
+err:
+	return rc;
+}
+EXPORT_SYMBOL(get_service_location);
+
+static void pd_locator_work(struct work_struct *work)
+{
+	int rc = 0;
+	struct pd_qmi_client_data *data;
+	struct pd_qmi_work *pdqw = container_of(work, struct pd_qmi_work,
+								pd_loc_work);
+
+	data = pdqw->pdc;
+	rc = init_service_locator();
+	if (rc) {
+		pr_err("Unable to connect to service locator!, rc = %d\n", rc);
+		pdqw->notifier->notifier_call(pdqw->notifier,
+			LOCATOR_DOWN, NULL);
+		goto err;
+	}
+	rc = service_locator_send_msg(data);
+	if (rc) {
+		pr_err("Failed to get process domains for %s for client %s rc:%d\n",
+			data->service_name, data->client_name, rc);
+		pdqw->notifier->notifier_call(pdqw->notifier,
+			LOCATOR_DOWN, NULL);
+		goto err;
+	}
+	pdqw->notifier->notifier_call(pdqw->notifier, LOCATOR_UP, data);
+
+err:
+	kfree(data);
+	kfree(pdqw);
+}
+
+int find_subsys(const char *pd_path, char *subsys)
+{
+	char *start, *end;
+
+	if (!subsys || !pd_path)
+		return -EINVAL;
+
+	start = strnstr(pd_path, "/", QMI_SERVREG_LOC_NAME_LENGTH_V01);
+	if (!start)
+		return -EINVAL;
+	start++;
+	end = strnstr(start, "/", QMI_SERVREG_LOC_NAME_LENGTH_V01);
+	if (!end || start == end)
+		return -EINVAL;
+
+	strlcpy(subsys, start, end - start + 1);
+	return 0;
+}
+EXPORT_SYMBOL(find_subsys);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/service-locator-private.h	2019-01-22 16:16:26.671275095 +0100
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef SERVICE_LOCATOR_V01_H
+#define SERVICE_LOCATOR_V01_H
+
+#include <linux/qmi_encdec.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+#include <soc/qcom/service-locator.h>
+
+#define SERVREG_LOC_SERVICE_ID_V01 0x40
+#define SERVREG_LOC_SERVICE_VERS_V01 0x01
+
+#define QMI_SERVREG_LOC_INDICATION_REGISTER_RESP_V01 0x0020
+#define QMI_SERVREG_LOC_REGISTER_SERVICE_LIST_REQ_V01 0x0022
+#define QMI_SERVREG_LOC_GET_DOMAIN_LIST_REQ_V01 0x0021
+#define QMI_SERVREG_LOC_GET_DOMAIN_LIST_RESP_V01 0x0021
+#define QMI_SERVREG_LOC_DATABASE_UPDATED_IND_V01 0x0023
+#define QMI_SERVREG_LOC_INDICATION_REGISTER_REQ_V01 0x0020
+#define QMI_SERVREG_LOC_REGISTER_SERVICE_LIST_RESP_V01 0x0022
+
+#define QMI_SERVREG_LOC_NAME_LENGTH_V01 64
+#define QMI_SERVREG_LOC_LIST_LENGTH_V01 32
+
+enum qmi_servreg_loc_service_instance_enum_type_v01 {
+	QMI_SERVREG_LOC_SERVICE_INSTANCE_ENUM_TYPE_MIN_VAL_V01 = INT_MIN,
+	QMI_SERVREG_LOC_SERVICE_INSTANCE_APSS_V01 = 0x1,
+	QMI_SERVREG_LOC_SERVICE_INSTANCE_ENUM_TYPE_MAX_VAL_V01 = INT_MAX,
+};
+
+struct qmi_servreg_loc_indication_register_req_msg_v01 {
+	uint8_t enable_database_updated_indication_valid;
+	uint8_t enable_database_updated_indication;
+};
+#define QMI_SERVREG_LOC_INDICATION_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 4
+struct elem_info qmi_servreg_loc_indication_register_req_msg_v01_ei[];
+
+struct qmi_servreg_loc_indication_register_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+#define QMI_SERVREG_LOC_INDICATION_REGISTER_RESP_MSG_V01_MAX_MSG_LEN 7
+struct elem_info qmi_servreg_loc_indication_register_resp_msg_v01_ei[];
+
+struct qmi_servreg_loc_get_domain_list_req_msg_v01 {
+	char service_name[QMI_SERVREG_LOC_NAME_LENGTH_V01 + 1];
+	uint8_t domain_offset_valid;
+	uint32_t domain_offset;
+};
+#define QMI_SERVREG_LOC_GET_DOMAIN_LIST_REQ_MSG_V01_MAX_MSG_LEN 74
+struct elem_info qmi_servreg_loc_get_domain_list_req_msg_v01_ei[];
+
+struct qmi_servreg_loc_get_domain_list_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	uint8_t total_domains_valid;
+	uint16_t total_domains;
+	uint8_t db_rev_count_valid;
+	uint16_t db_rev_count;
+	uint8_t domain_list_valid;
+	uint32_t domain_list_len;
+	struct servreg_loc_entry_v01
+				domain_list[QMI_SERVREG_LOC_LIST_LENGTH_V01];
+};
+#define QMI_SERVREG_LOC_GET_DOMAIN_LIST_RESP_MSG_V01_MAX_MSG_LEN 2389
+struct elem_info qmi_servreg_loc_get_domain_list_resp_msg_v01_ei[];
+
+struct qmi_servreg_loc_register_service_list_req_msg_v01 {
+	char domain_name[QMI_SERVREG_LOC_NAME_LENGTH_V01 + 1];
+	uint32_t service_list_len;
+	struct servreg_loc_entry_v01
+				service_list[QMI_SERVREG_LOC_LIST_LENGTH_V01];
+};
+#define QMI_SERVREG_LOC_REGISTER_SERVICE_LIST_REQ_MSG_V01_MAX_MSG_LEN 2439
+struct elem_info qmi_servreg_loc_register_service_list_req_msg_v01_ei[];
+
+struct qmi_servreg_loc_register_service_list_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+#define QMI_SERVREG_LOC_REGISTER_SERVICE_LIST_RESP_MSG_V01_MAX_MSG_LEN 7
+struct elem_info qmi_servreg_loc_register_service_list_resp_msg_v01_ei[];
+
+struct qmi_servreg_loc_database_updated_ind_msg_v01 {
+	char placeholder;
+};
+#define QMI_SERVREG_LOC_DATABASE_UPDATED_IND_MSG_V01_MAX_MSG_LEN 0
+struct elem_info qmi_servreg_loc_database_updated_ind_msg_v01_ei[];
+
+#define QMI_EOTI_DATA_TYPE	\
+{				\
+	.data_type = QMI_EOTI,	\
+	.elem_len  = 0,		\
+	.elem_size = 0,		\
+	.is_array  = NO_ARRAY,	\
+	.tlv_type  = 0x00,	\
+	.offset    = 0,		\
+	.ei_array  = NULL,	\
+},
+
+static struct elem_info servreg_loc_entry_v01_ei[] = {
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_SERVREG_LOC_NAME_LENGTH_V01 + 1,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct servreg_loc_entry_v01,
+					   name),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct servreg_loc_entry_v01,
+					   instance_id),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct servreg_loc_entry_v01,
+					   service_data_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct servreg_loc_entry_v01,
+					   service_data),
+	},
+	QMI_EOTI_DATA_TYPE
+};
+
+struct elem_info qmi_servreg_loc_indication_register_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+				qmi_servreg_loc_indication_register_req_msg_v01,
+				enable_database_updated_indication_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+				qmi_servreg_loc_indication_register_req_msg_v01,
+				enable_database_updated_indication),
+	},
+	QMI_EOTI_DATA_TYPE
+};
+
+struct elem_info qmi_servreg_loc_indication_register_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+			qmi_servreg_loc_indication_register_resp_msg_v01,
+			resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	QMI_EOTI_DATA_TYPE
+};
+
+struct elem_info qmi_servreg_loc_get_domain_list_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_SERVREG_LOC_NAME_LENGTH_V01 + 1,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct
+				qmi_servreg_loc_get_domain_list_req_msg_v01,
+				service_name),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+				qmi_servreg_loc_get_domain_list_req_msg_v01,
+				domain_offset_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+				qmi_servreg_loc_get_domain_list_req_msg_v01,
+				domain_offset),
+	},
+	QMI_EOTI_DATA_TYPE
+};
+
+struct elem_info qmi_servreg_loc_get_domain_list_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+				qmi_servreg_loc_get_domain_list_resp_msg_v01,
+				resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+				qmi_servreg_loc_get_domain_list_resp_msg_v01,
+				total_domains_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+				qmi_servreg_loc_get_domain_list_resp_msg_v01,
+				total_domains),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct
+				qmi_servreg_loc_get_domain_list_resp_msg_v01,
+				db_rev_count_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct
+				qmi_servreg_loc_get_domain_list_resp_msg_v01,
+				db_rev_count),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct
+				qmi_servreg_loc_get_domain_list_resp_msg_v01,
+				domain_list_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct
+				qmi_servreg_loc_get_domain_list_resp_msg_v01,
+				domain_list_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_SERVREG_LOC_LIST_LENGTH_V01,
+		.elem_size      = sizeof(struct servreg_loc_entry_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct
+				qmi_servreg_loc_get_domain_list_resp_msg_v01,
+				domain_list),
+		.ei_array      = servreg_loc_entry_v01_ei,
+	},
+	QMI_EOTI_DATA_TYPE
+};
+
+struct elem_info qmi_servreg_loc_register_service_list_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_SERVREG_LOC_NAME_LENGTH_V01 + 1,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct
+			qmi_servreg_loc_register_service_list_req_msg_v01,
+			domain_name),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+			qmi_servreg_loc_register_service_list_req_msg_v01,
+			service_list_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_SERVREG_LOC_LIST_LENGTH_V01,
+		.elem_size      = sizeof(struct servreg_loc_entry_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+			qmi_servreg_loc_register_service_list_req_msg_v01,
+			service_list),
+		.ei_array      = servreg_loc_entry_v01_ei,
+	},
+	QMI_EOTI_DATA_TYPE
+};
+
+struct elem_info qmi_servreg_loc_register_service_list_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+			qmi_servreg_loc_register_service_list_resp_msg_v01,
+			resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	QMI_EOTI_DATA_TYPE
+};
+
+struct elem_info qmi_servreg_loc_database_updated_ind_msg_v01_ei[] = {
+	QMI_EOTI_DATA_TYPE
+};
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/service-notifier.c	2019-01-22 16:16:26.675275131 +0100
@@ -0,0 +1,751 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "service-notifier: %s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/uaccess.h>
+
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/sysmon.h>
+#include <soc/qcom/service-locator.h>
+#include <soc/qcom/service-notifier.h>
+#include "service-notifier-private.h"
+
+#define SERVREG_NOTIF_NAME_LENGTH	QMI_SERVREG_NOTIF_NAME_LENGTH_V01
+#define SERVREG_NOTIF_SERVICE_ID	SERVREG_NOTIF_SERVICE_ID_V01
+#define SERVREG_NOTIF_SERVICE_VERS	SERVREG_NOTIF_SERVICE_VERS_V01
+
+#define SERVREG_NOTIF_SET_ACK_REQ		\
+			QMI_SERVREG_NOTIF_STATE_UPDATED_IND_ACK_REQ_V01
+#define SERVREG_NOTIF_SET_ACK_REQ_MSG_LEN	\
+			QMI_SERVREG_NOTIF_SET_ACK_REQ_MSG_V01_MAX_MSG_LEN
+#define SERVREG_NOTIF_SET_ACK_RESP		\
+			QMI_SERVREG_NOTIF_STATE_UPDATED_IND_ACK_RESP_V01
+#define SERVREG_NOTIF_SET_ACK_RESP_MSG_LEN	\
+			QMI_SERVREG_NOTIF_SET_ACK_RESP_MSG_V01_MAX_MSG_LEN
+#define SERVREG_NOTIF_STATE_UPDATED_IND_MSG	\
+			QMI_SERVREG_NOTIF_STATE_UPDATED_IND_V01
+#define SERVREG_NOTIF_STATE_UPDATED_IND_MSG_LEN	\
+			QMI_SERVREG_NOTIF_STATE_UPDATED_IND_MSG_V01_MAX_MSG_LEN
+
+#define SERVREG_NOTIF_REGISTER_LISTENER_REQ	\
+			QMI_SERVREG_NOTIF_REGISTER_LISTENER_REQ_V01
+#define SERVREG_NOTIF_REGISTER_LISTENER_REQ_MSG_LEN \
+		QMI_SERVREG_NOTIF_REGISTER_LISTENER_REQ_MSG_V01_MAX_MSG_LEN
+#define SERVREG_NOTIF_REGISTER_LISTENER_RESP	\
+			QMI_SERVREG_NOTIF_REGISTER_LISTENER_RESP_V01
+#define SERVREG_NOTIF_REGISTER_LISTENER_RESP_MSG_LEN \
+		QMI_SERVREG_NOTIF_REGISTER_LISTENER_RESP_MSG_V01_MAX_MSG_LEN
+
+#define QMI_STATE_MIN_VAL QMI_SERVREG_NOTIF_SERVICE_STATE_ENUM_TYPE_MIN_VAL_V01
+#define QMI_STATE_MAX_VAL QMI_SERVREG_NOTIF_SERVICE_STATE_ENUM_TYPE_MAX_VAL_V01
+
+#define SERVER_TIMEOUT				500
+#define MAX_STRING_LEN				100
+
+/*
+ * Per user service data structure
+ * struct service_notif_info - notifier struct for each unique service path
+ * service_path - service provider path/location
+ * instance_id - service instance id specific to a subsystem
+ * service_notif_rcvr_list - list of clients interested in this service
+ *                           providers notifications
+ * curr_state: Current state of the service
+ */
+struct service_notif_info {
+	char service_path[SERVREG_NOTIF_NAME_LENGTH];
+	int instance_id;
+	struct srcu_notifier_head service_notif_rcvr_list;
+	struct list_head list;
+	int curr_state;
+};
+static LIST_HEAD(service_list);
+static DEFINE_MUTEX(service_list_lock);
+
+struct ind_req_resp {
+	char service_path[SERVREG_NOTIF_NAME_LENGTH];
+	int transaction_id;
+	int curr_state;
+};
+
+/*
+ * Per Root Process Domain (Root service) data structure
+ * struct qmi_client_info - QMI client info for each subsystem/instance id
+ * instance_id - service instance id specific to a subsystem (Root PD)
+ * clnt_handle - unique QMI client handle
+ * service_connected - indicates if QMI service is up on the subsystem
+ * ssr_handle - The SSR handle provided by the SSR driver for the subsystem
+ *		on which the remote root PD runs.
+ */
+struct qmi_client_info {
+	int instance_id;
+	enum pd_subsys_state subsys_state;
+	struct work_struct svc_arrive;
+	struct work_struct svc_exit;
+	struct work_struct svc_rcv_msg;
+	struct work_struct ind_ack;
+	struct work_struct qmi_handle_free;
+	struct workqueue_struct *svc_event_wq;
+	struct qmi_handle *clnt_handle;
+	struct notifier_block notifier;
+	void *ssr_handle;
+	struct notifier_block ssr_notifier;
+	bool service_connected;
+	struct list_head list;
+	struct ind_req_resp ind_msg;
+};
+static LIST_HEAD(qmi_client_list);
+static DEFINE_MUTEX(qmi_list_lock);
+static DEFINE_MUTEX(qmi_client_release_lock);
+
+static DEFINE_MUTEX(notif_add_lock);
+
+static void root_service_clnt_recv_msg(struct work_struct *work);
+static void root_service_service_arrive(struct work_struct *work);
+static void root_service_exit_work(struct work_struct *work);
+
+static void free_qmi_handle(struct work_struct *work)
+{
+	struct qmi_client_info *data = container_of(work,
+				struct qmi_client_info, qmi_handle_free);
+
+	mutex_lock(&qmi_client_release_lock);
+	data->service_connected = false;
+	qmi_handle_destroy(data->clnt_handle);
+	data->clnt_handle = NULL;
+	mutex_unlock(&qmi_client_release_lock);
+}
+
+static struct service_notif_info *_find_service_info(const char *service_path)
+{
+	struct service_notif_info *service_notif;
+
+	mutex_lock(&service_list_lock);
+	list_for_each_entry(service_notif, &service_list, list)
+		if (!strcmp(service_notif->service_path, service_path)) {
+			mutex_unlock(&service_list_lock);
+			return service_notif;
+		}
+	mutex_unlock(&service_list_lock);
+	return NULL;
+}
+
+static int service_notif_queue_notification(struct service_notif_info
+		*service_notif,
+		enum qmi_servreg_notif_service_state_enum_type_v01 notif_type,
+		void *info)
+{
+	int ret;
+
+	if (service_notif->curr_state == notif_type)
+		return 0;
+
+	ret = srcu_notifier_call_chain(&service_notif->service_notif_rcvr_list,
+							notif_type, info);
+	return ret;
+}
+
+static void root_service_clnt_recv_msg(struct work_struct *work)
+{
+	int ret;
+	struct qmi_client_info *data = container_of(work,
+					struct qmi_client_info, svc_rcv_msg);
+
+	do {
+		pr_debug("Polling for QMI recv msg(instance-id: %d)\n",
+							data->instance_id);
+	} while ((ret = qmi_recv_msg(data->clnt_handle)) == 0);
+
+	pr_debug("Notified about a Receive event (instance-id: %d)\n",
+							data->instance_id);
+}
+
+static void root_service_clnt_notify(struct qmi_handle *handle,
+			     enum qmi_event_type event, void *notify_priv)
+{
+	struct qmi_client_info *data = container_of(notify_priv,
+					struct qmi_client_info, svc_arrive);
+
+	switch (event) {
+	case QMI_RECV_MSG:
+		schedule_work(&data->svc_rcv_msg);
+		break;
+	default:
+		break;
+	}
+}
+
+static void send_ind_ack(struct work_struct *work)
+{
+	struct qmi_client_info *data = container_of(work,
+					struct qmi_client_info, ind_ack);
+	struct qmi_servreg_notif_set_ack_req_msg_v01 req;
+	struct msg_desc req_desc, resp_desc;
+	struct qmi_servreg_notif_set_ack_resp_msg_v01 resp = { { 0, 0 } };
+	struct service_notif_info *service_notif;
+	enum pd_subsys_state state = USER_PD_STATE_CHANGE;
+	int rc;
+
+	service_notif = _find_service_info(data->ind_msg.service_path);
+	if (!service_notif)
+		return;
+	if ((int)data->ind_msg.curr_state < QMI_STATE_MIN_VAL ||
+		(int)data->ind_msg.curr_state > QMI_STATE_MAX_VAL)
+		pr_err("Unexpected indication notification state %d\n",
+			data->ind_msg.curr_state);
+	else {
+		mutex_lock(&notif_add_lock);
+		mutex_lock(&service_list_lock);
+		rc = service_notif_queue_notification(service_notif,
+			data->ind_msg.curr_state, &state);
+		if (rc & NOTIFY_STOP_MASK)
+			pr_err("Notifier callback aborted for %s with error %d\n",
+				data->ind_msg.service_path, rc);
+		service_notif->curr_state = data->ind_msg.curr_state;
+		mutex_unlock(&service_list_lock);
+		mutex_unlock(&notif_add_lock);
+	}
+
+	req.transaction_id = data->ind_msg.transaction_id;
+	snprintf(req.service_name, ARRAY_SIZE(req.service_name), "%s",
+						data->ind_msg.service_path);
+
+	req_desc.msg_id = SERVREG_NOTIF_SET_ACK_REQ;
+	req_desc.max_msg_len = SERVREG_NOTIF_SET_ACK_REQ_MSG_LEN;
+	req_desc.ei_array = qmi_servreg_notif_set_ack_req_msg_v01_ei;
+
+	resp_desc.msg_id = SERVREG_NOTIF_SET_ACK_RESP;
+	resp_desc.max_msg_len = SERVREG_NOTIF_SET_ACK_RESP_MSG_LEN;
+	resp_desc.ei_array = qmi_servreg_notif_set_ack_resp_msg_v01_ei;
+
+	rc = qmi_send_req_wait(data->clnt_handle, &req_desc,
+				&req, sizeof(req), &resp_desc, &resp,
+				sizeof(resp), SERVER_TIMEOUT);
+	if (rc < 0) {
+		pr_err("%s: Sending Ack failed/server timeout, ret - %d\n",
+						data->ind_msg.service_path, rc);
+		return;
+	}
+
+	/* Check the response */
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01)
+		pr_err("QMI request failed 0x%x\n", resp.resp.error);
+	pr_info("Indication ACKed for transid %d, service %s, instance %d!\n",
+		data->ind_msg.transaction_id, data->ind_msg.service_path,
+		data->instance_id);
+}
+
+static void root_service_service_ind_cb(struct qmi_handle *handle,
+				unsigned int msg_id, void *msg,
+				unsigned int msg_len, void *ind_cb_priv)
+{
+	struct qmi_client_info *data = (struct qmi_client_info *)ind_cb_priv;
+	struct msg_desc ind_desc;
+	struct qmi_servreg_notif_state_updated_ind_msg_v01 ind_msg = {
+					QMI_STATE_MIN_VAL, "", 0xFFFF };
+	int rc;
+
+	ind_desc.msg_id = SERVREG_NOTIF_STATE_UPDATED_IND_MSG;
+	ind_desc.max_msg_len = SERVREG_NOTIF_STATE_UPDATED_IND_MSG_LEN;
+	ind_desc.ei_array = qmi_servreg_notif_state_updated_ind_msg_v01_ei;
+	rc = qmi_kernel_decode(&ind_desc, &ind_msg, msg, msg_len);
+	if (rc < 0) {
+		pr_err("Failed to decode message rc:%d\n", rc);
+		return;
+	}
+
+	pr_info("Indication received from %s, state: 0x%x, trans-id: %d\n",
+		ind_msg.service_name, ind_msg.curr_state,
+		ind_msg.transaction_id);
+
+	data->ind_msg.transaction_id = ind_msg.transaction_id;
+	data->ind_msg.curr_state = ind_msg.curr_state;
+	snprintf(data->ind_msg.service_path,
+		ARRAY_SIZE(data->ind_msg.service_path), "%s",
+		ind_msg.service_name);
+	schedule_work(&data->ind_ack);
+}
+
+static int send_notif_listener_msg_req(struct service_notif_info *service_notif,
+					struct qmi_client_info *data,
+					bool register_notif, int *curr_state)
+{
+	struct qmi_servreg_notif_register_listener_req_msg_v01 req;
+	struct qmi_servreg_notif_register_listener_resp_msg_v01
+						resp = { { 0, 0 } };
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	snprintf(req.service_name, ARRAY_SIZE(req.service_name), "%s",
+						service_notif->service_path);
+	req.enable = register_notif;
+
+	req_desc.msg_id = SERVREG_NOTIF_REGISTER_LISTENER_REQ;
+	req_desc.max_msg_len = SERVREG_NOTIF_REGISTER_LISTENER_REQ_MSG_LEN;
+	req_desc.ei_array = qmi_servreg_notif_register_listener_req_msg_v01_ei;
+
+	resp_desc.msg_id = SERVREG_NOTIF_REGISTER_LISTENER_RESP;
+	resp_desc.max_msg_len = SERVREG_NOTIF_REGISTER_LISTENER_RESP_MSG_LEN;
+	resp_desc.ei_array =
+			qmi_servreg_notif_register_listener_resp_msg_v01_ei;
+
+	rc = qmi_send_req_wait(data->clnt_handle, &req_desc, &req, sizeof(req),
+				&resp_desc, &resp, sizeof(resp),
+				SERVER_TIMEOUT);
+	if (rc < 0) {
+		pr_err("%s: Message sending failed/server timeout, ret - %d\n",
+					service_notif->service_path, rc);
+		return rc;
+	}
+
+	/* Check the response */
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		pr_err("QMI request failed 0x%x\n", resp.resp.error);
+		return -EREMOTEIO;
+	}
+
+	if ((int) resp.curr_state < QMI_STATE_MIN_VAL ||
+				(int) resp.curr_state > QMI_STATE_MAX_VAL) {
+		pr_err("Invalid indication notification state %d\n",
+							resp.curr_state);
+		rc = -EINVAL;
+	}
+	*curr_state = resp.curr_state;
+	return rc;
+}
+
+static int register_notif_listener(struct service_notif_info *service_notif,
+					struct qmi_client_info *data,
+					int *curr_state)
+{
+	return send_notif_listener_msg_req(service_notif, data, true,
+								curr_state);
+}
+
+static void root_service_service_arrive(struct work_struct *work)
+{
+	struct service_notif_info *service_notif = NULL;
+	struct qmi_client_info *data = container_of(work,
+					struct qmi_client_info, svc_arrive);
+	int rc;
+	int curr_state;
+
+	mutex_lock(&qmi_client_release_lock);
+	/* Create a Local client port for QMI communication */
+	data->clnt_handle = qmi_handle_create(root_service_clnt_notify, work);
+	if (!data->clnt_handle) {
+		pr_err("QMI client handle alloc failed (instance-id: %d)\n",
+							data->instance_id);
+		mutex_unlock(&qmi_client_release_lock);
+		return;
+	}
+
+	/* Connect to the service on the root PD service */
+	rc = qmi_connect_to_service(data->clnt_handle,
+			SERVREG_NOTIF_SERVICE_ID, SERVREG_NOTIF_SERVICE_VERS,
+			data->instance_id);
+	if (rc < 0) {
+		pr_err("Could not connect to service(instance-id: %d) rc:%d\n",
+							data->instance_id, rc);
+		qmi_handle_destroy(data->clnt_handle);
+		data->clnt_handle = NULL;
+		mutex_unlock(&qmi_client_release_lock);
+		return;
+	}
+	data->service_connected = true;
+	mutex_unlock(&qmi_client_release_lock);
+	pr_info("Connection established between QMI handle and %d service\n",
+							data->instance_id);
+	/* Register for indication messages about service */
+	rc = qmi_register_ind_cb(data->clnt_handle,
+		root_service_service_ind_cb, (void *)data);
+	if (rc < 0)
+		pr_err("Indication callback register failed(instance-id: %d) rc:%d\n",
+			data->instance_id, rc);
+	mutex_lock(&notif_add_lock);
+	mutex_lock(&service_list_lock);
+	list_for_each_entry(service_notif, &service_list, list) {
+		if (service_notif->instance_id == data->instance_id) {
+			enum pd_subsys_state state = ROOT_PD_UP;
+			rc = register_notif_listener(service_notif, data,
+								&curr_state);
+			if (rc) {
+				pr_err("Notifier registration failed for %s rc:%d\n",
+					service_notif->service_path, rc);
+			} else {
+				rc = service_notif_queue_notification(
+					service_notif, curr_state, &state);
+				if (rc & NOTIFY_STOP_MASK)
+					pr_err("Notifier callback aborted for %s error:%d\n",
+					service_notif->service_path, rc);
+				service_notif->curr_state = curr_state;
+			}
+		}
+	}
+	mutex_unlock(&service_list_lock);
+	mutex_unlock(&notif_add_lock);
+}
+
+static void root_service_service_exit(struct qmi_client_info *data,
+					enum pd_subsys_state state)
+{
+	struct service_notif_info *service_notif = NULL;
+	int rc;
+
+	/*
+	 * Send service down notifications to all clients
+	 * of registered for notifications for that service.
+	 */
+	mutex_lock(&notif_add_lock);
+	mutex_lock(&service_list_lock);
+	list_for_each_entry(service_notif, &service_list, list) {
+		if (service_notif->instance_id == data->instance_id) {
+			rc = service_notif_queue_notification(service_notif,
+					SERVREG_NOTIF_SERVICE_STATE_DOWN_V01,
+					&state);
+			if (rc & NOTIFY_STOP_MASK)
+				pr_err("Notifier callback aborted for %s with error %d\n",
+					service_notif->service_path, rc);
+			service_notif->curr_state =
+					SERVREG_NOTIF_SERVICE_STATE_DOWN_V01;
+		}
+	}
+	mutex_unlock(&service_list_lock);
+	mutex_unlock(&notif_add_lock);
+
+	/*
+	 * Destroy client handle and try connecting when
+	 * service comes up again.
+	 */
+	queue_work(data->svc_event_wq, &data->qmi_handle_free);
+}
+
+static void root_service_exit_work(struct work_struct *work)
+{
+	struct qmi_client_info *data = container_of(work,
+					struct qmi_client_info, svc_exit);
+	root_service_service_exit(data, data->subsys_state);
+}
+
+static int service_event_notify(struct notifier_block *this,
+				      unsigned long code,
+				      void *_cmd)
+{
+	struct qmi_client_info *data = container_of(this,
+					struct qmi_client_info, notifier);
+
+	switch (code) {
+	case QMI_SERVER_ARRIVE:
+		pr_debug("Root PD service UP\n");
+		queue_work(data->svc_event_wq, &data->svc_arrive);
+		break;
+	case QMI_SERVER_EXIT:
+		pr_debug("Root PD service DOWN\n");
+		data->subsys_state = ROOT_PD_DOWN;
+		queue_work(data->svc_event_wq, &data->svc_exit);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int ssr_event_notify(struct notifier_block *this,
+				  unsigned long code,
+				  void *data)
+{
+	struct qmi_client_info *info = container_of(this,
+					struct qmi_client_info, ssr_notifier);
+	struct notif_data *notif = data;
+
+	switch (code) {
+	case	SUBSYS_BEFORE_SHUTDOWN:
+		pr_debug("Root PD DOWN(SSR notification), state:%d\n",
+						notif->crashed);
+		switch (notif->crashed) {
+		case CRASH_STATUS_ERR_FATAL:
+			info->subsys_state = ROOT_PD_ERR_FATAL;
+			break;
+		case CRASH_STATUS_WDOG_BITE:
+			info->subsys_state = ROOT_PD_WDOG_BITE;
+			break;
+		default:
+			info->subsys_state = ROOT_PD_SHUTDOWN;
+			break;
+		}
+		root_service_service_exit(info, info->subsys_state);
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static void *add_service_notif(const char *service_path, int instance_id,
+							int *curr_state)
+{
+	struct service_notif_info *service_notif;
+	struct qmi_client_info *tmp, *qmi_data;
+	long int rc;
+	char subsys[SERVREG_NOTIF_NAME_LENGTH];
+
+	rc = find_subsys(service_path, subsys);
+	if (rc < 0) {
+		pr_err("Could not find subsys for %s\n", service_path);
+		return ERR_PTR(rc);
+	}
+
+	service_notif = kzalloc(sizeof(struct service_notif_info), GFP_KERNEL);
+	if (!service_notif)
+		return ERR_PTR(-ENOMEM);
+
+	strlcpy(service_notif->service_path, service_path,
+		ARRAY_SIZE(service_notif->service_path));
+	service_notif->instance_id = instance_id;
+
+	/* If we already have a connection to the root PD on which the remote
+	 * service we are interested in notifications about runs, then use
+	 * the existing QMI connection.
+	 */
+	mutex_lock(&qmi_list_lock);
+	list_for_each_entry(tmp, &qmi_client_list, list) {
+		if (tmp->instance_id == instance_id) {
+			if (tmp->service_connected) {
+				rc = register_notif_listener(service_notif, tmp,
+								curr_state);
+				if (rc) {
+					mutex_unlock(&qmi_list_lock);
+					pr_err("Register notifier failed: %s",
+						service_path);
+					kfree(service_notif);
+					return ERR_PTR(rc);
+				}
+				service_notif->curr_state = *curr_state;
+			}
+			mutex_unlock(&qmi_list_lock);
+			goto add_service_list;
+		}
+	}
+	mutex_unlock(&qmi_list_lock);
+
+	qmi_data = kzalloc(sizeof(struct qmi_client_info), GFP_KERNEL);
+	if (!qmi_data) {
+		kfree(service_notif);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	qmi_data->instance_id = instance_id;
+	qmi_data->clnt_handle = NULL;
+	qmi_data->notifier.notifier_call = service_event_notify;
+
+	qmi_data->svc_event_wq = create_singlethread_workqueue(subsys);
+	if (!qmi_data->svc_event_wq) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	INIT_WORK(&qmi_data->svc_arrive, root_service_service_arrive);
+	INIT_WORK(&qmi_data->svc_exit, root_service_exit_work);
+	INIT_WORK(&qmi_data->svc_rcv_msg, root_service_clnt_recv_msg);
+	INIT_WORK(&qmi_data->ind_ack, send_ind_ack);
+	INIT_WORK(&qmi_data->qmi_handle_free, free_qmi_handle);
+
+	*curr_state = service_notif->curr_state =
+				SERVREG_NOTIF_SERVICE_STATE_UNINIT_V01;
+
+	rc = qmi_svc_event_notifier_register(SERVREG_NOTIF_SERVICE_ID,
+			SERVREG_NOTIF_SERVICE_VERS, qmi_data->instance_id,
+			&qmi_data->notifier);
+	if (rc < 0) {
+		pr_err("Notifier register failed (instance-id: %d)\n",
+							qmi_data->instance_id);
+		goto exit;
+	}
+	qmi_data->ssr_notifier.notifier_call = ssr_event_notify;
+	qmi_data->ssr_handle = subsys_notif_register_notifier(subsys,
+						&qmi_data->ssr_notifier);
+	if (IS_ERR(qmi_data->ssr_handle)) {
+		pr_err("SSR notif register for %s failed(instance-id: %d)\n",
+			subsys, qmi_data->instance_id);
+		rc = PTR_ERR(qmi_data->ssr_handle);
+		goto exit;
+	}
+
+	mutex_lock(&qmi_list_lock);
+	INIT_LIST_HEAD(&qmi_data->list);
+	list_add_tail(&qmi_data->list, &qmi_client_list);
+	mutex_unlock(&qmi_list_lock);
+
+add_service_list:
+	srcu_init_notifier_head(&service_notif->service_notif_rcvr_list);
+
+	mutex_lock(&service_list_lock);
+	INIT_LIST_HEAD(&service_notif->list);
+	list_add_tail(&service_notif->list, &service_list);
+	mutex_unlock(&service_list_lock);
+
+	return service_notif;
+exit:
+	if (qmi_data->svc_event_wq)
+		destroy_workqueue(qmi_data->svc_event_wq);
+	kfree(qmi_data);
+	kfree(service_notif);
+	return ERR_PTR(rc);
+}
+
+static int send_pd_restart_req(const char *service_path,
+				struct qmi_client_info *data)
+{
+	struct qmi_servreg_notif_restart_pd_req_msg_v01 req;
+	struct qmi_servreg_notif_register_listener_resp_msg_v01
+						resp = { { 0, 0 } };
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	snprintf(req.service_name, ARRAY_SIZE(req.service_name), "%s",
+							service_path);
+
+	req_desc.msg_id = QMI_SERVREG_NOTIF_RESTART_PD_REQ_V01;
+	req_desc.max_msg_len =
+		QMI_SERVREG_NOTIF_RESTART_PD_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.ei_array = qmi_servreg_notif_restart_pd_req_msg_v01_ei;
+
+	resp_desc.msg_id = QMI_SERVREG_NOTIF_RESTART_PD_RESP_V01;
+	resp_desc.max_msg_len =
+		QMI_SERVREG_NOTIF_RESTART_PD_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.ei_array = qmi_servreg_notif_restart_pd_resp_msg_v01_ei;
+
+	rc = qmi_send_req_wait(data->clnt_handle, &req_desc, &req,
+			sizeof(req), &resp_desc, &resp, sizeof(resp),
+			SERVER_TIMEOUT);
+	if (rc < 0) {
+		pr_err("%s: Message sending failed/server timeout, ret - %d\n",
+							service_path, rc);
+		return rc;
+	}
+
+	/* Check response if PDR is disabled */
+	if (resp.resp.result == QMI_RESULT_FAILURE_V01 &&
+				resp.resp.error == QMI_ERR_DISABLED_V01) {
+		pr_err("PD restart is disabled 0x%x\n", resp.resp.error);
+		return -EOPNOTSUPP;
+	}
+	/* Check the response for other error case*/
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		pr_err("QMI request for PD restart failed 0x%x\n",
+						resp.resp.error);
+		return -EREMOTEIO;
+	}
+
+	return rc;
+
+}
+
+/* service_notif_pd_restart() - Request PD restart
+ * @service_path: Individual service identifier path for which restart is
+ *		being requested.
+ * @instance_id: Instance id specific to a subsystem.
+ *
+ * @return: >=0 on success, standard Linux error codes on failure.
+ */
+int service_notif_pd_restart(const char *service_path, int instance_id)
+{
+	struct qmi_client_info *tmp;
+	int rc = 0;
+
+	list_for_each_entry(tmp, &qmi_client_list, list) {
+		if (tmp->instance_id == instance_id) {
+			if (tmp->service_connected) {
+				pr_info("Restarting service %s, instance-id %d\n",
+						service_path, instance_id);
+				rc = send_pd_restart_req(service_path, tmp);
+			} else
+				pr_info("Service %s is not connected\n",
+							service_path);
+		}
+	}
+	return rc;
+}
+EXPORT_SYMBOL(service_notif_pd_restart);
+
+/* service_notif_register_notifier() - Register a notifier for a service
+ * On success, it returns back a handle. It takes the following arguments:
+ * service_path: Individual service identifier path for which a client
+ *		registers for notifications.
+ * instance_id: Instance id specific to a subsystem.
+ * current_state: Current state of service returned by the registration
+ *		 process.
+ * notifier block: notifier callback for service events.
+ */
+void *service_notif_register_notifier(const char *service_path, int instance_id,
+				struct notifier_block *nb, int *curr_state)
+{
+	struct service_notif_info *service_notif;
+	int ret = 0;
+
+	if (!service_path || !instance_id || !nb)
+		return ERR_PTR(-EINVAL);
+
+	mutex_lock(&notif_add_lock);
+	service_notif = _find_service_info(service_path);
+	if (!service_notif) {
+		service_notif = (struct service_notif_info *)add_service_notif(
+								service_path,
+								instance_id,
+								curr_state);
+		if (IS_ERR(service_notif))
+			goto exit;
+	}
+
+	ret = srcu_notifier_chain_register(
+				&service_notif->service_notif_rcvr_list, nb);
+	*curr_state = service_notif->curr_state;
+	if (ret < 0)
+		service_notif = ERR_PTR(ret);
+exit:
+	mutex_unlock(&notif_add_lock);
+	return service_notif;
+}
+EXPORT_SYMBOL(service_notif_register_notifier);
+
+/* service_notif_unregister_notifier() - Unregister a notifier for a service.
+ * service_notif_handle - The notifier handler that was provided by the
+ *			  service_notif_register_notifier function when the
+ *			  client registered for notifications.
+ * nb - The notifier block that was previously used during the registration.
+ */
+int service_notif_unregister_notifier(void *service_notif_handle,
+					struct notifier_block *nb)
+{
+	struct service_notif_info *service_notif;
+
+	if (!service_notif_handle || !nb)
+		return -EINVAL;
+
+	service_notif = (struct service_notif_info *)service_notif_handle;
+	if (service_notif < 0)
+		return -EINVAL;
+
+	return srcu_notifier_chain_unregister(
+				&service_notif->service_notif_rcvr_list, nb);
+}
+EXPORT_SYMBOL(service_notif_unregister_notifier);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/service-notifier-private.h	2019-01-22 16:16:26.675275131 +0100
@@ -0,0 +1,345 @@
+ /* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef SERVICE_REGISTRY_NOTIFIER_H
+#define SERVICE_REGISTRY_NOTIFIER_H
+
+#include <linux/qmi_encdec.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+
+#define SERVREG_NOTIF_SERVICE_ID_V01 0x42
+#define SERVREG_NOTIF_SERVICE_VERS_V01 0x01
+
+#define QMI_SERVREG_NOTIF_REGISTER_LISTENER_REQ_V01 0x0020
+#define QMI_SERVREG_NOTIF_REGISTER_LISTENER_RESP_V01 0x0020
+#define QMI_SERVREG_NOTIF_QUERY_STATE_REQ_V01 0x0021
+#define QMI_SERVREG_NOTIF_QUERY_STATE_RESP_V01 0x0021
+#define QMI_SERVREG_NOTIF_STATE_UPDATED_IND_V01 0x0022
+#define QMI_SERVREG_NOTIF_STATE_UPDATED_IND_ACK_REQ_V01 0x0023
+#define QMI_SERVREG_NOTIF_STATE_UPDATED_IND_ACK_RESP_V01 0x0023
+#define QMI_SERVREG_NOTIF_RESTART_PD_REQ_V01 0x0024
+#define QMI_SERVREG_NOTIF_RESTART_PD_RESP_V01 0x0024
+
+#define QMI_SERVREG_NOTIF_NAME_LENGTH_V01 64
+
+struct qmi_servreg_notif_register_listener_req_msg_v01 {
+	uint8_t enable;
+	char service_name[QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1];
+};
+#define QMI_SERVREG_NOTIF_REGISTER_LISTENER_REQ_MSG_V01_MAX_MSG_LEN 71
+struct elem_info qmi_servreg_notif_register_listener_req_msg_v01_ei[];
+
+struct qmi_servreg_notif_register_listener_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	uint8_t curr_state_valid;
+	enum qmi_servreg_notif_service_state_enum_type_v01 curr_state;
+};
+#define QMI_SERVREG_NOTIF_REGISTER_LISTENER_RESP_MSG_V01_MAX_MSG_LEN 14
+struct elem_info qmi_servreg_notif_register_listener_resp_msg_v01_ei[];
+
+struct qmi_servreg_notif_query_state_req_msg_v01 {
+	char service_name[QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1];
+};
+#define QMI_SERVREG_NOTIF_QUERY_STATE_REQ_MSG_V01_MAX_MSG_LEN 67
+struct elem_info qmi_servreg_notif_query_state_req_msg_v01_ei[];
+
+struct qmi_servreg_notif_query_state_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	uint8_t curr_state_valid;
+	enum qmi_servreg_notif_service_state_enum_type_v01 curr_state;
+};
+#define QMI_SERVREG_NOTIF_QUERY_STATE_RESP_MSG_V01_MAX_MSG_LEN 14
+struct elem_info qmi_servreg_notif_query_state_resp_msg_v01_ei[];
+
+struct qmi_servreg_notif_state_updated_ind_msg_v01 {
+	enum qmi_servreg_notif_service_state_enum_type_v01 curr_state;
+	char service_name[QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1];
+	uint16_t transaction_id;
+};
+#define QMI_SERVREG_NOTIF_STATE_UPDATED_IND_MSG_V01_MAX_MSG_LEN 79
+struct elem_info qmi_servreg_notif_state_updated_ind_msg_v01_ei[];
+
+struct qmi_servreg_notif_set_ack_req_msg_v01 {
+	char service_name[QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1];
+	uint16_t transaction_id;
+};
+#define QMI_SERVREG_NOTIF_SET_ACK_REQ_MSG_V01_MAX_MSG_LEN 72
+struct elem_info qmi_servreg_notif_set_ack_req_msg_v01_ei[];
+
+struct qmi_servreg_notif_set_ack_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+#define QMI_SERVREG_NOTIF_SET_ACK_RESP_MSG_V01_MAX_MSG_LEN 7
+struct elem_info qmi_servreg_notif_set_ack_resp_msg_v01_ei[];
+
+struct qmi_servreg_notif_restart_pd_req_msg_v01 {
+	char service_name[QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1];
+};
+#define QMI_SERVREG_NOTIF_RESTART_PD_REQ_MSG_V01_MAX_MSG_LEN 67
+extern struct elem_info qmi_servreg_notif_restart_pd_req_msg_v01_ei[];
+
+struct qmi_servreg_notif_restart_pd_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+#define QMI_SERVREG_NOTIF_RESTART_PD_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info qmi_servreg_notif_restart_pd_resp_msg_v01_ei[];
+
+struct elem_info qmi_servreg_notif_register_listener_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct
+				qmi_servreg_notif_register_listener_req_msg_v01,
+					   enable),
+	},
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+				qmi_servreg_notif_register_listener_req_msg_v01,
+					   service_name),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info qmi_servreg_notif_register_listener_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+			qmi_servreg_notif_register_listener_resp_msg_v01,
+									resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+			qmi_servreg_notif_register_listener_resp_msg_v01,
+							curr_state_valid),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(
+			enum qmi_servreg_notif_service_state_enum_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+			qmi_servreg_notif_register_listener_resp_msg_v01,
+								curr_state),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info qmi_servreg_notif_query_state_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct
+				qmi_servreg_notif_query_state_req_msg_v01,
+								service_name),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info qmi_servreg_notif_query_state_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+				qmi_servreg_notif_query_state_resp_msg_v01,
+									resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+				qmi_servreg_notif_query_state_resp_msg_v01,
+							curr_state_valid),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum
+				qmi_servreg_notif_service_state_enum_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+				qmi_servreg_notif_query_state_resp_msg_v01,
+								curr_state),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info qmi_servreg_notif_state_updated_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum
+				qmi_servreg_notif_service_state_enum_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct
+				qmi_servreg_notif_state_updated_ind_msg_v01,
+								curr_state),
+	},
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+				qmi_servreg_notif_state_updated_ind_msg_v01,
+								service_name),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x03,
+		.offset         = offsetof(struct
+				qmi_servreg_notif_state_updated_ind_msg_v01,
+								transaction_id),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info qmi_servreg_notif_set_ack_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct
+				qmi_servreg_notif_set_ack_req_msg_v01,
+								service_name),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+				qmi_servreg_notif_set_ack_req_msg_v01,
+								transaction_id),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info qmi_servreg_notif_set_ack_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+				qmi_servreg_notif_set_ack_resp_msg_v01,
+									resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info qmi_servreg_notif_restart_pd_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct
+				qmi_servreg_notif_restart_pd_req_msg_v01,
+								service_name),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info qmi_servreg_notif_restart_pd_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+				qmi_servreg_notif_restart_pd_resp_msg_v01,
+								   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/smcinvoke.c	2019-10-29 09:26:24.821214706 +0100
@@ -0,0 +1,518 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/poll.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/smcinvoke.h>
+#include <soc/qcom/scm.h>
+#include <asm/cacheflush.h>
+#include "smcinvoke_object.h"
+#include <soc/qcom/qseecomi.h>
+#include "../../misc/qseecom_kernel.h"
+
+#define SMCINVOKE_TZ_PARAM_ID		0x224
+#define SMCINVOKE_TZ_CMD		0x32000600
+#define SMCINVOKE_FILE		        "smcinvoke"
+#define SMCINVOKE_TZ_ROOT_OBJ		1
+#define SMCINVOKE_TZ_MIN_BUF_SIZE	4096
+#define SMCINVOKE_ARGS_ALIGN_SIZE	(sizeof(uint64_t))
+#define SMCINVOKE_TZ_OBJ_NULL		0
+
+#define FOR_ARGS(ndxvar, counts, section)                      \
+	for (ndxvar = object_counts_index_##section(counts);     \
+		ndxvar < (object_counts_index_##section(counts)  \
+		+ object_counts_num_##section(counts));          \
+		++ndxvar)
+
+static long smcinvoke_ioctl(struct file *, unsigned, unsigned long);
+static int smcinvoke_open(struct inode *, struct file *);
+static int smcinvoke_release(struct inode *, struct file *);
+
+static const struct file_operations smcinvoke_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = smcinvoke_ioctl,
+	.compat_ioctl = smcinvoke_ioctl,
+	.open = smcinvoke_open,
+	.release = smcinvoke_release,
+};
+
+static struct miscdevice smcinvoke_miscdev = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "smcinvoke",
+	.fops = &smcinvoke_fops
+};
+
+struct smcinvoke_buf_hdr {
+	uint32_t offset;
+	uint32_t size;
+};
+
+union smcinvoke_tz_args {
+	struct smcinvoke_buf_hdr b;
+	uint32_t		 tzhandle;
+};
+struct smcinvoke_msg_hdr {
+	uint32_t	tzhandle;
+	uint32_t	op;
+	uint32_t	counts;
+};
+
+struct smcinvoke_tzobj_context {
+	uint32_t	tzhandle;
+};
+
+/*
+ * size_add saturates at SIZE_MAX. If integer overflow is detected,
+ * this function would return SIZE_MAX otherwise normal a+b is returned.
+ */
+static inline size_t size_add(size_t a, size_t b)
+{
+	return (b > (SIZE_MAX - a)) ? SIZE_MAX : a + b;
+}
+
+/*
+ * pad_size is used along with size_align to define a buffer overflow
+ * protected version of ALIGN
+ */
+static inline size_t pad_size(size_t a, size_t b)
+{
+	return (~a + 1) % b;
+}
+
+/*
+ * size_align saturates at SIZE_MAX. If integer overflow is detected, this
+ * function would return SIZE_MAX otherwise next aligned size is returned.
+ */
+static inline size_t size_align(size_t a, size_t b)
+{
+	return size_add(a, pad_size(a, b));
+}
+
+/*
+ * This function retrieves file pointer corresponding to FD provided. It stores
+ * retrived file pointer until IOCTL call is concluded. Once call is completed,
+ * all stored file pointers are released. file pointers are stored to prevent
+ * other threads from releasing that FD while IOCTL is in progress.
+ */
+static int get_tzhandle_from_fd(int64_t fd, struct file **filp,
+				uint32_t *tzhandle)
+{
+	int ret = -EBADF;
+	struct file *tmp_filp = NULL;
+	struct smcinvoke_tzobj_context *tzobj = NULL;
+
+	if (fd == SMCINVOKE_USERSPACE_OBJ_NULL) {
+		*tzhandle = SMCINVOKE_TZ_OBJ_NULL;
+		ret = 0;
+		goto out;
+	} else if (fd < SMCINVOKE_USERSPACE_OBJ_NULL) {
+		goto out;
+	}
+
+	tmp_filp = fget(fd);
+	if (!tmp_filp)
+		goto out;
+
+	/* Verify if filp is smcinvoke device's file pointer */
+	if (!tmp_filp->f_op || !tmp_filp->private_data ||
+		(tmp_filp->f_op != &smcinvoke_fops)) {
+		fput(tmp_filp);
+		goto out;
+	}
+
+	tzobj = tmp_filp->private_data;
+	*tzhandle = tzobj->tzhandle;
+	*filp = tmp_filp;
+	ret = 0;
+out:
+	return ret;
+}
+
+static int get_fd_from_tzhandle(uint32_t tzhandle, int64_t *fd)
+{
+	int unused_fd = -1, ret = -1;
+	struct file *f = NULL;
+	struct smcinvoke_tzobj_context *cxt = NULL;
+
+	if (tzhandle == SMCINVOKE_TZ_OBJ_NULL) {
+		*fd = SMCINVOKE_USERSPACE_OBJ_NULL;
+		ret = 0;
+		goto out;
+	}
+
+	cxt = kzalloc(sizeof(*cxt), GFP_KERNEL);
+	if (!cxt) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	unused_fd = get_unused_fd_flags(O_RDWR);
+	if (unused_fd < 0)
+		goto out;
+
+	f = anon_inode_getfile(SMCINVOKE_FILE, &smcinvoke_fops, cxt, O_RDWR);
+	if (IS_ERR(f))
+		goto out;
+
+	*fd = unused_fd;
+	fd_install(*fd, f);
+	((struct smcinvoke_tzobj_context *)
+			(f->private_data))->tzhandle = tzhandle;
+	return 0;
+out:
+	if (unused_fd >= 0)
+		put_unused_fd(unused_fd);
+	kfree(cxt);
+
+	return ret;
+}
+
+static int prepare_send_scm_msg(const uint8_t *in_buf, size_t in_buf_len,
+				const uint8_t *out_buf, size_t out_buf_len,
+				int32_t *smcinvoke_result)
+{
+	int ret = 0;
+	struct scm_desc desc = {0};
+	size_t inbuf_flush_size = (1UL << get_order(in_buf_len)) * PAGE_SIZE;
+	size_t outbuf_flush_size = (1UL << get_order(out_buf_len)) * PAGE_SIZE;
+
+	desc.arginfo = SMCINVOKE_TZ_PARAM_ID;
+	desc.args[0] = (uint64_t)virt_to_phys(in_buf);
+	desc.args[1] = inbuf_flush_size;
+	desc.args[2] = (uint64_t)virt_to_phys(out_buf);
+	desc.args[3] = outbuf_flush_size;
+
+	dmac_flush_range(in_buf, in_buf + inbuf_flush_size);
+	dmac_flush_range(out_buf, out_buf + outbuf_flush_size);
+
+	ret = scm_call2(SMCINVOKE_TZ_CMD, &desc);
+
+	/* process listener request */
+	if (!ret && (desc.ret[0] == QSEOS_RESULT_INCOMPLETE ||
+		desc.ret[0] == QSEOS_RESULT_BLOCKED_ON_LISTENER))
+		ret = qseecom_process_listener_from_smcinvoke(&desc);
+
+	*smcinvoke_result = (int32_t)desc.ret[1];
+	if (ret || desc.ret[1] || desc.ret[2] || desc.ret[0])
+		pr_err("SCM call failed with ret val = %d %d %d %d\n",
+						ret, (int)desc.ret[0],
+				(int)desc.ret[1], (int)desc.ret[2]);
+
+	dmac_inv_range(in_buf, in_buf + inbuf_flush_size);
+	dmac_inv_range(out_buf, out_buf + outbuf_flush_size);
+	return ret;
+}
+
+static int marshal_out(void *buf, uint32_t buf_size,
+				struct smcinvoke_cmd_req *req,
+				union smcinvoke_arg *args_buf)
+{
+	int ret = -EINVAL, i = 0;
+	union smcinvoke_tz_args *tz_args = NULL;
+	size_t offset = sizeof(struct smcinvoke_msg_hdr) +
+				object_counts_total(req->counts) *
+					sizeof(union smcinvoke_tz_args);
+
+	if (offset > buf_size)
+		goto out;
+
+	tz_args = (union smcinvoke_tz_args *)
+				(buf + sizeof(struct smcinvoke_msg_hdr));
+
+	tz_args += object_counts_num_BI(req->counts);
+
+	FOR_ARGS(i, req->counts, BO) {
+		args_buf[i].b.size = tz_args->b.size;
+		if ((buf_size - tz_args->b.offset < tz_args->b.size) ||
+			tz_args->b.offset > buf_size) {
+			pr_err("%s: buffer overflow detected\n", __func__);
+			goto out;
+		}
+		if (copy_to_user((void __user *)(uintptr_t)(args_buf[i].b.addr),
+			(uint8_t *)(buf) + tz_args->b.offset,
+						tz_args->b.size)) {
+			pr_err("Error %d copying ctxt to user\n", ret);
+			goto out;
+		}
+		tz_args++;
+	}
+	tz_args += object_counts_num_OI(req->counts);
+
+	FOR_ARGS(i, req->counts, OO) {
+		/*
+		 * create a new FD and assign to output object's
+		 * context
+		 */
+		ret = get_fd_from_tzhandle(tz_args->tzhandle,
+						&(args_buf[i].o.fd));
+		if (ret)
+			goto out;
+		tz_args++;
+	}
+	ret = 0;
+out:
+	return ret;
+}
+
+/*
+ * SMC expects arguments in following format
+ * ---------------------------------------------------------------------------
+ * | cxt | op | counts | ptr|size |ptr|size...|ORef|ORef|...| rest of payload |
+ * ---------------------------------------------------------------------------
+ * cxt: target, op: operation, counts: total arguments
+ * offset: offset is from beginning of buffer i.e. cxt
+ * size: size is 8 bytes aligned value
+ */
+static size_t compute_in_msg_size(const struct smcinvoke_cmd_req *req,
+					const union smcinvoke_arg *args_buf)
+{
+	uint32_t i = 0;
+
+	size_t total_size = sizeof(struct smcinvoke_msg_hdr) +
+				object_counts_total(req->counts) *
+					sizeof(union smcinvoke_tz_args);
+
+	/* Computed total_size should be 8 bytes aligned from start of buf */
+	total_size = ALIGN(total_size, SMCINVOKE_ARGS_ALIGN_SIZE);
+
+	/* each buffer has to be 8 bytes aligned */
+	while (i < object_counts_num_buffers(req->counts))
+		total_size = size_add(total_size,
+		size_align(args_buf[i++].b.size, SMCINVOKE_ARGS_ALIGN_SIZE));
+
+	/* Since we're using get_free_pages, no need for explicit PAGE align */
+	return total_size;
+}
+
+static int marshal_in(const struct smcinvoke_cmd_req *req,
+			const union smcinvoke_arg *args_buf, uint32_t tzhandle,
+			uint8_t *buf, size_t buf_size, struct file **arr_filp)
+{
+	int ret = -EINVAL, i = 0;
+	union smcinvoke_tz_args *tz_args = NULL;
+	struct smcinvoke_msg_hdr msg_hdr = {tzhandle, req->op, req->counts};
+	uint32_t offset = sizeof(struct smcinvoke_msg_hdr) +
+				sizeof(union smcinvoke_tz_args) *
+				object_counts_total(req->counts);
+
+	if (buf_size < offset)
+		goto out;
+
+	*(struct smcinvoke_msg_hdr *)buf = msg_hdr;
+	tz_args = (union smcinvoke_tz_args *)
+			(buf + sizeof(struct smcinvoke_msg_hdr));
+
+	FOR_ARGS(i, req->counts, BI) {
+		offset = size_align(offset, SMCINVOKE_ARGS_ALIGN_SIZE);
+		if ((offset > buf_size) ||
+			(args_buf[i].b.size > (buf_size - offset)))
+			goto out;
+
+		tz_args->b.offset = offset;
+		tz_args->b.size = args_buf[i].b.size;
+		tz_args++;
+
+		if (copy_from_user(buf+offset,
+			(void __user *)(uintptr_t)(args_buf[i].b.addr),
+						args_buf[i].b.size))
+			goto out;
+
+		offset += args_buf[i].b.size;
+	}
+	FOR_ARGS(i, req->counts, BO) {
+		offset = size_align(offset, SMCINVOKE_ARGS_ALIGN_SIZE);
+		if ((offset > buf_size) ||
+			(args_buf[i].b.size > (buf_size - offset)))
+			goto out;
+
+		tz_args->b.offset = offset;
+		tz_args->b.size = args_buf[i].b.size;
+		tz_args++;
+
+		offset += args_buf[i].b.size;
+	}
+	FOR_ARGS(i, req->counts, OI) {
+		if (get_tzhandle_from_fd(args_buf[i].o.fd,
+					&arr_filp[i], &(tz_args->tzhandle)))
+			goto out;
+		tz_args++;
+	}
+	ret = 0;
+out:
+	return ret;
+}
+
+long smcinvoke_ioctl(struct file *filp, unsigned cmd, unsigned long arg)
+{
+	int    ret = -1, i = 0, nr_args = 0;
+	struct smcinvoke_cmd_req req = {0};
+	void   *in_msg = NULL;
+	size_t inmsg_size = 0;
+	void   *out_msg = NULL;
+	union  smcinvoke_arg *args_buf = NULL;
+	struct file *filp_to_release[object_counts_max_OO] = {NULL};
+	struct smcinvoke_tzobj_context *tzobj = filp->private_data;
+
+	switch (cmd) {
+	case SMCINVOKE_IOCTL_INVOKE_REQ:
+		if (_IOC_SIZE(cmd) != sizeof(req)) {
+			ret =  -EINVAL;
+			goto out;
+		}
+		ret = copy_from_user(&req, (void __user *)arg, sizeof(req));
+		if (ret) {
+			ret =  -EFAULT;
+			goto out;
+		}
+
+		nr_args = object_counts_num_buffers(req.counts) +
+				object_counts_num_objects(req.counts);
+
+		if (req.argsize != sizeof(union smcinvoke_arg)) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		if (nr_args) {
+
+			args_buf = kzalloc(nr_args * req.argsize, GFP_KERNEL);
+			if (!args_buf) {
+				ret = -ENOMEM;
+				goto out;
+			}
+
+			ret = copy_from_user(args_buf,
+					(void __user *)(uintptr_t)(req.args),
+						nr_args * req.argsize);
+
+			if (ret) {
+				ret = -EFAULT;
+				goto out;
+			}
+		}
+
+		inmsg_size = compute_in_msg_size(&req, args_buf);
+		in_msg = (void *)__get_free_pages(GFP_KERNEL,
+						get_order(inmsg_size));
+		if (!in_msg) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		out_msg = (void *)__get_free_page(GFP_KERNEL);
+		if (!out_msg) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		ret = marshal_in(&req, args_buf, tzobj->tzhandle, in_msg,
+					inmsg_size, filp_to_release);
+		if (ret)
+			goto out;
+
+		ret = prepare_send_scm_msg(in_msg, inmsg_size, out_msg,
+				SMCINVOKE_TZ_MIN_BUF_SIZE, &req.result);
+		if (ret)
+			goto out;
+
+		/*
+		 * if invoke op results in an err, no need to marshal_out and
+		 * copy args buf to user space
+		 */
+		if (!req.result) {
+			ret = marshal_out(in_msg, inmsg_size, &req, args_buf);
+
+			ret |=  copy_to_user(
+					(void __user *)(uintptr_t)(req.args),
+					args_buf, nr_args * req.argsize);
+		}
+		ret |=  copy_to_user((void __user *)arg, &req, sizeof(req));
+		if (ret)
+			goto out;
+
+		break;
+	default:
+		ret = -ENOIOCTLCMD;
+		break;
+	}
+out:
+	free_page((long)out_msg);
+	free_pages((long)in_msg, get_order(inmsg_size));
+	kfree(args_buf);
+	for (i = 0; i < object_counts_max_OO; i++) {
+		if (filp_to_release[i])
+			fput(filp_to_release[i]);
+	}
+
+	return ret;
+}
+
+static int smcinvoke_open(struct inode *nodp, struct file *filp)
+{
+	struct smcinvoke_tzobj_context *tzcxt = NULL;
+
+	tzcxt = kzalloc(sizeof(*tzcxt), GFP_KERNEL);
+	if (!tzcxt)
+		return -ENOMEM;
+
+	tzcxt->tzhandle = SMCINVOKE_TZ_ROOT_OBJ;
+	filp->private_data = tzcxt;
+
+	return 0;
+}
+
+
+static int smcinvoke_release(struct inode *nodp, struct file *filp)
+{
+	int ret = 0, smcinvoke_result = 0;
+	uint8_t *in_buf = NULL;
+	uint8_t *out_buf = NULL;
+	struct smcinvoke_msg_hdr hdr = {0};
+	struct smcinvoke_tzobj_context *tzobj = filp->private_data;
+	uint32_t tzhandle = tzobj->tzhandle;
+
+	/* Root object is special in sense it is indestructible */
+	if (!tzhandle || tzhandle == SMCINVOKE_TZ_ROOT_OBJ)
+		goto out;
+
+	in_buf = (uint8_t *)__get_free_page(GFP_KERNEL);
+	out_buf = (uint8_t *)__get_free_page(GFP_KERNEL);
+	if (!in_buf || !out_buf)
+		goto out;
+
+	hdr.tzhandle = tzhandle;
+	hdr.op = object_op_RELEASE;
+	hdr.counts = 0;
+	*(struct smcinvoke_msg_hdr *)in_buf = hdr;
+
+	ret = prepare_send_scm_msg(in_buf, SMCINVOKE_TZ_MIN_BUF_SIZE,
+			out_buf, SMCINVOKE_TZ_MIN_BUF_SIZE, &smcinvoke_result);
+out:
+	kfree(filp->private_data);
+	free_page((long)in_buf);
+	free_page((long)out_buf);
+
+	return ret;
+}
+
+static int __init smcinvoke_init(void)
+{
+	return misc_register(&smcinvoke_miscdev);
+}
+
+device_initcall(smcinvoke_init);
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/smcinvoke_object.h	2019-01-22 16:16:26.675275131 +0100
@@ -0,0 +1,51 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __SMCINVOKE_OBJECT_H
+#define __SMCINVOKE_OBJECT_H
+
+#include <linux/types.h>
+
+#define object_op_METHOD_MASK   ((uint32_t)0x0000FFFFu)
+#define object_op_RELEASE       (object_op_METHOD_MASK - 0)
+#define object_op_RETAIN        (object_op_METHOD_MASK - 1)
+
+#define object_counts_max_BI   0xF
+#define object_counts_max_BO   0xF
+#define object_counts_max_OI   0xF
+#define object_counts_max_OO   0xF
+
+/* unpack counts */
+
+#define object_counts_num_BI(k)  ((size_t) (((k) >> 0) & object_counts_max_BI))
+#define object_counts_num_BO(k)  ((size_t) (((k) >> 4) & object_counts_max_BO))
+#define object_counts_num_OI(k)  ((size_t) (((k) >> 8) & object_counts_max_OI))
+#define object_counts_num_OO(k)  ((size_t) (((k) >> 12) & object_counts_max_OO))
+#define object_counts_num_buffers(k)	\
+			(object_counts_num_BI(k) + object_counts_num_BO(k))
+
+#define object_counts_num_objects(k)	\
+			(object_counts_num_OI(k) + object_counts_num_OO(k))
+
+/* Indices into args[] */
+
+#define object_counts_index_BI(k)   0
+#define object_counts_index_BO(k)		\
+			(object_counts_index_BI(k) + object_counts_num_BI(k))
+#define object_counts_index_OI(k)		\
+			(object_counts_index_BO(k) + object_counts_num_BO(k))
+#define object_counts_index_OO(k)		\
+			(object_counts_index_OI(k) + object_counts_num_OI(k))
+#define object_counts_total(k)		\
+			(object_counts_index_OO(k) + object_counts_num_OO(k))
+
+
+#endif /* __SMCINVOKE_OBJECT_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/smd_debug.c	2019-01-22 16:16:26.675275131 +0100
@@ -0,0 +1,429 @@
+/* drivers/soc/qcom/smd_debug.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2014, The Linux Foundation. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <linux/jiffies.h>
+#include <linux/err.h>
+
+#include <soc/qcom/smem.h>
+
+#include "smd_private.h"
+
+#if defined(CONFIG_DEBUG_FS)
+
+static char *chstate(unsigned n)
+{
+	switch (n) {
+	case SMD_SS_CLOSED:
+		return "CLOSED";
+	case SMD_SS_OPENING:
+		return "OPENING";
+	case SMD_SS_OPENED:
+		return "OPENED";
+	case SMD_SS_FLUSHING:
+		return "FLUSHING";
+	case SMD_SS_CLOSING:
+		return "CLOSING";
+	case SMD_SS_RESET:
+		return "RESET";
+	case SMD_SS_RESET_OPENING:
+		return "ROPENING";
+	default:
+		return "UNKNOWN";
+	}
+}
+
+static void debug_int_stats(struct seq_file *s)
+{
+	int subsys;
+	struct interrupt_stat *stats = interrupt_stats;
+	const char *subsys_name;
+
+	seq_puts(s,
+		"   Subsystem    | Interrupt ID |    In     |    Out    |\n");
+
+	for (subsys = 0; subsys < NUM_SMD_SUBSYSTEMS; ++subsys) {
+		subsys_name = smd_pid_to_subsystem(subsys);
+		if (!IS_ERR_OR_NULL(subsys_name)) {
+			seq_printf(s, "%-10s %4s |    %9d | %9u | %9u |\n",
+				smd_pid_to_subsystem(subsys), "smd",
+				stats->smd_interrupt_id,
+				stats->smd_in_count,
+				stats->smd_out_count);
+
+			seq_printf(s, "%-10s %4s |    %9d | %9u | %9u |\n",
+				smd_pid_to_subsystem(subsys), "smsm",
+				stats->smsm_interrupt_id,
+				stats->smsm_in_count,
+				stats->smsm_out_count);
+		}
+		++stats;
+	}
+}
+
+static void debug_int_stats_reset(struct seq_file *s)
+{
+	int subsys;
+	struct interrupt_stat *stats = interrupt_stats;
+
+	seq_puts(s, "Resetting interrupt stats.\n");
+
+	for (subsys = 0; subsys < NUM_SMD_SUBSYSTEMS; ++subsys) {
+		stats->smd_in_count = 0;
+		stats->smd_out_count = 0;
+		stats->smsm_in_count = 0;
+		stats->smsm_out_count = 0;
+		++stats;
+	}
+}
+
+/* NNV: revist, it may not be smd version */
+static void debug_read_smd_version(struct seq_file *s)
+{
+	uint32_t *smd_ver;
+	uint32_t n, version;
+
+	smd_ver = smem_find(SMEM_VERSION_SMD, 32 * sizeof(uint32_t),
+							0, SMEM_ANY_HOST_FLAG);
+
+	if (smd_ver)
+		for (n = 0; n < 32; n++) {
+			version = smd_ver[n];
+			seq_printf(s, "entry %d: %d.%d\n", n,
+				       version >> 16,
+				       version & 0xffff);
+		}
+}
+
+/**
+ * pid_to_str - Convert a numeric processor id value into a human readable
+ *		string value.
+ *
+ * @pid: the processor id to convert
+ * @returns: a string representation of @pid
+ */
+static char *pid_to_str(int pid)
+{
+	switch (pid) {
+	case SMD_APPS:
+		return "APPS";
+	case SMD_MODEM:
+		return "MDMSW";
+	case SMD_Q6:
+		return "ADSP";
+	case SMD_TZ:
+		return "TZ";
+	case SMD_WCNSS:
+		return "WCNSS";
+	case SMD_MODEM_Q6_FW:
+		return "MDMFW";
+	case SMD_RPM:
+		return "RPM";
+	default:
+		return "???";
+	}
+}
+
+/**
+ * print_half_ch_state - Print the state of half of a SMD channel in a human
+ *			readable format.
+ *
+ * @s: the sequential file to print to
+ * @half_ch: half of a SMD channel that should have its state printed
+ * @half_ch_funcs: the relevant channel access functions for @half_ch
+ * @size: size of the fifo in bytes associated with @half_ch
+ * @proc: the processor id that owns the part of the SMD channel associated with
+ *		@half_ch
+ * @is_restricted: true if memory access is restricted
+ */
+static void print_half_ch_state(struct seq_file *s,
+				void *half_ch,
+				struct smd_half_channel_access *half_ch_funcs,
+				unsigned size,
+				int proc,
+				bool is_restricted)
+{
+	seq_printf(s, "%-5s|", pid_to_str(proc));
+
+	if (!is_restricted) {
+		seq_printf(s, "%-7s|0x%05X|0x%05X|0x%05X",
+			chstate(half_ch_funcs->get_state(half_ch)),
+			size,
+			half_ch_funcs->get_tail(half_ch),
+			half_ch_funcs->get_head(half_ch));
+		seq_printf(s, "|%c%c%c%c%c%c%c%c|0x%05X",
+			half_ch_funcs->get_fDSR(half_ch) ? 'D' : 'd',
+			half_ch_funcs->get_fCTS(half_ch) ? 'C' : 'c',
+			half_ch_funcs->get_fCD(half_ch) ? 'C' : 'c',
+			half_ch_funcs->get_fRI(half_ch) ? 'I' : 'i',
+			half_ch_funcs->get_fHEAD(half_ch) ? 'W' : 'w',
+			half_ch_funcs->get_fTAIL(half_ch) ? 'R' : 'r',
+			half_ch_funcs->get_fSTATE(half_ch) ? 'S' : 's',
+			half_ch_funcs->get_fBLOCKREADINTR(half_ch) ? 'B' : 'b',
+			(half_ch_funcs->get_head(half_ch) -
+				half_ch_funcs->get_tail(half_ch)) & (size - 1));
+	} else {
+		seq_puts(s, " Access Restricted");
+	}
+}
+
+/**
+ * smd_xfer_type_to_str - Convert a numeric transfer type value into a human
+ *		readable string value.
+ *
+ * @xfer_type: the processor id to convert
+ * @returns: a string representation of @xfer_type
+ */
+static char *smd_xfer_type_to_str(uint32_t xfer_type)
+{
+	if (xfer_type == 1)
+		return "S"; /* streaming type */
+	else if (xfer_type == 2)
+		return "P"; /* packet type */
+	else
+		return "L"; /* legacy type */
+}
+
+/**
+ * print_smd_ch_table - Print the current state of every valid SMD channel in a
+ *			specific SMD channel allocation table to a human
+ *			readable formatted output.
+ *
+ * @s: the sequential file to print to
+ * @tbl: a valid pointer to the channel allocation table to print from
+ * @num_tbl_entries: total number of entries in the table referenced by @tbl
+ * @ch_base_id: the SMEM item id corresponding to the array of channel
+ *		structures for the channels found in @tbl
+ * @fifo_base_id: the SMEM item id corresponding to the array of channel fifos
+ *		for the channels found in @tbl
+ * @pid: processor id to use for any SMEM operations
+ * @flags: flags to use for any SMEM operations
+ */
+static void print_smd_ch_table(struct seq_file *s,
+				struct smd_alloc_elm *tbl,
+				unsigned num_tbl_entries,
+				unsigned ch_base_id,
+				unsigned fifo_base_id,
+				unsigned pid,
+				unsigned flags)
+{
+	void *half_ch;
+	unsigned half_ch_size;
+	uint32_t ch_type;
+	void *buffer;
+	unsigned buffer_size;
+	int n;
+	bool is_restricted;
+
+/*
+ * formatted, human readable channel state output, ie:
+ID|CHANNEL NAME       |T|PROC |STATE  |FIFO SZ|RDPTR  |WRPTR  |FLAGS   |DATAPEN
+-------------------------------------------------------------------------------
+00|DS                 |S|APPS |CLOSED |0x02000|0x00000|0x00000|dcCiwrsb|0x00000
+  |                   | |MDMSW|OPENING|0x02000|0x00000|0x00000|dcCiwrsb|0x00000
+-------------------------------------------------------------------------------
+ */
+
+	seq_printf(s, "%2s|%-19s|%1s|%-5s|%-7s|%-7s|%-7s|%-7s|%-8s|%-7s\n",
+								"ID",
+								"CHANNEL NAME",
+								"T",
+								"PROC",
+								"STATE",
+								"FIFO SZ",
+								"RDPTR",
+								"WRPTR",
+								"FLAGS",
+								"DATAPEN");
+	seq_puts(s,
+		"-------------------------------------------------------------------------------\n");
+	for (n = 0; n < num_tbl_entries; ++n) {
+		if (strlen(tbl[n].name) == 0)
+			continue;
+
+		seq_printf(s, "%2u|%-19s|%s|", tbl[n].cid, tbl[n].name,
+			smd_xfer_type_to_str(SMD_XFER_TYPE(tbl[n].type)));
+		ch_type = SMD_CHANNEL_TYPE(tbl[n].type);
+
+
+		if (smd_edge_to_remote_pid(ch_type) == SMD_RPM &&
+		   smd_edge_to_local_pid(ch_type) != SMD_APPS)
+			is_restricted = true;
+		else
+			is_restricted = false;
+
+		if (is_word_access_ch(ch_type))
+			half_ch_size =
+				sizeof(struct smd_half_channel_word_access);
+		else
+			half_ch_size = sizeof(struct smd_half_channel);
+
+		half_ch = smem_find(ch_base_id + n, 2 * half_ch_size,
+								pid, flags);
+		buffer = smem_get_entry(fifo_base_id + n, &buffer_size,
+								pid, flags);
+		if (half_ch && buffer)
+			print_half_ch_state(s,
+					half_ch,
+					get_half_ch_funcs(ch_type),
+					buffer_size / 2,
+					smd_edge_to_local_pid(ch_type),
+					is_restricted);
+
+		seq_puts(s, "\n");
+		seq_printf(s, "%2s|%-19s|%1s|", "", "", "");
+
+		if (half_ch && buffer)
+			print_half_ch_state(s,
+					half_ch + half_ch_size,
+					get_half_ch_funcs(ch_type),
+					buffer_size / 2,
+					smd_edge_to_remote_pid(ch_type),
+					is_restricted);
+
+		seq_puts(s, "\n");
+		seq_puts(s,
+			"-------------------------------------------------------------------------------\n");
+	}
+}
+
+/**
+ * debug_ch - Print the current state of every valid SMD channel in a human
+ *		readable formatted table.
+ *
+ * @s: the sequential file to print to
+ */
+static void debug_ch(struct seq_file *s)
+{
+	struct smd_alloc_elm *tbl;
+	struct smd_alloc_elm *default_pri_tbl;
+	struct smd_alloc_elm *default_sec_tbl;
+	unsigned tbl_size;
+	int i;
+
+	tbl = smem_get_entry(ID_CH_ALLOC_TBL, &tbl_size, 0, SMEM_ANY_HOST_FLAG);
+	default_pri_tbl = tbl;
+
+	if (!tbl) {
+		seq_puts(s, "Channel allocation table not found\n");
+		return;
+	}
+
+	if (IS_ERR(tbl) && PTR_ERR(tbl) == -EPROBE_DEFER) {
+		seq_puts(s, "SMEM is not initialized\n");
+		return;
+	}
+
+	seq_puts(s, "Primary allocation table:\n");
+	print_smd_ch_table(s, tbl, tbl_size / sizeof(*tbl), ID_SMD_CHANNELS,
+							SMEM_SMD_FIFO_BASE_ID,
+							0,
+							SMEM_ANY_HOST_FLAG);
+
+	tbl = smem_get_entry(SMEM_CHANNEL_ALLOC_TBL_2, &tbl_size, 0,
+							SMEM_ANY_HOST_FLAG);
+	default_sec_tbl = tbl;
+	if (tbl) {
+		seq_puts(s, "\n\nSecondary allocation table:\n");
+		print_smd_ch_table(s, tbl, tbl_size / sizeof(*tbl),
+						SMEM_SMD_BASE_ID_2,
+						SMEM_SMD_FIFO_BASE_ID_2,
+						0,
+						SMEM_ANY_HOST_FLAG);
+	}
+
+	for (i = 1; i < NUM_SMD_SUBSYSTEMS; ++i) {
+		tbl = smem_get_entry(ID_CH_ALLOC_TBL, &tbl_size, i, 0);
+		if (tbl && tbl != default_pri_tbl) {
+			seq_puts(s, "\n\n");
+			seq_printf(s, "%s <-> %s Primary allocation table:\n",
+							pid_to_str(SMD_APPS),
+							pid_to_str(i));
+			print_smd_ch_table(s, tbl, tbl_size / sizeof(*tbl),
+							ID_SMD_CHANNELS,
+							SMEM_SMD_FIFO_BASE_ID,
+							i,
+							0);
+		}
+
+		tbl = smem_get_entry(SMEM_CHANNEL_ALLOC_TBL_2, &tbl_size, i, 0);
+		if (tbl && tbl != default_sec_tbl) {
+			seq_puts(s, "\n\n");
+			seq_printf(s, "%s <-> %s Secondary allocation table:\n",
+							pid_to_str(SMD_APPS),
+							pid_to_str(i));
+			print_smd_ch_table(s, tbl, tbl_size / sizeof(*tbl),
+						SMEM_SMD_BASE_ID_2,
+						SMEM_SMD_FIFO_BASE_ID_2,
+						i,
+						0);
+		}
+	}
+}
+
+static int debugfs_show(struct seq_file *s, void *data)
+{
+	void (*show)(struct seq_file *) = s->private;
+
+	show(s);
+
+	return 0;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, debugfs_show, inode->i_private);
+}
+
+static const struct file_operations debug_ops = {
+	.open = debug_open,
+	.release = single_release,
+	.read = seq_read,
+	.llseek = seq_lseek,
+};
+
+static void debug_create(const char *name, umode_t mode,
+			 struct dentry *dent,
+			 void (*show)(struct seq_file *))
+{
+	struct dentry *file;
+
+	file = debugfs_create_file(name, mode, dent, show, &debug_ops);
+	if (!file)
+		pr_err("%s: unable to create file '%s'\n", __func__, name);
+}
+
+static int __init smd_debugfs_init(void)
+{
+	struct dentry *dent;
+
+	dent = debugfs_create_dir("smd", 0);
+	if (IS_ERR(dent))
+		return PTR_ERR(dent);
+
+	debug_create("ch", 0444, dent, debug_ch);
+	debug_create("version", 0444, dent, debug_read_smd_version);
+	debug_create("int_stats", 0444, dent, debug_int_stats);
+	debug_create("int_stats_reset", 0444, dent, debug_int_stats_reset);
+
+	return 0;
+}
+
+late_initcall(smd_debugfs_init);
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/smd_init_dt.c	2019-01-22 16:16:26.675275131 +0100
@@ -0,0 +1,340 @@
+/* drivers/soc/qcom/smd_init_dt.c
+ *
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ipc_logging.h>
+
+#include "smd_private.h"
+
+#define MODULE_NAME "msm_smd"
+#define IPC_LOG(level, x...) do { \
+	if (smd_log_ctx) \
+		ipc_log_string(smd_log_ctx, x); \
+	else \
+		printk(level x); \
+	} while (0)
+
+#if defined(CONFIG_MSM_SMD_DEBUG)
+#define SMD_DBG(x...) do {				\
+		if (msm_smd_debug_mask & MSM_SMD_DEBUG) \
+			IPC_LOG(KERN_DEBUG, x);		\
+	} while (0)
+
+#define SMSM_DBG(x...) do {					\
+		if (msm_smd_debug_mask & MSM_SMSM_DEBUG)	\
+			IPC_LOG(KERN_DEBUG, x);		\
+	} while (0)
+#else
+#define SMD_DBG(x...) do { } while (0)
+#define SMSM_DBG(x...) do { } while (0)
+#endif
+
+static DEFINE_MUTEX(smd_probe_lock);
+static int first_probe_done;
+
+static int msm_smsm_probe(struct platform_device *pdev)
+{
+	uint32_t edge;
+	char *key;
+	int ret;
+	uint32_t irq_offset;
+	uint32_t irq_bitmask;
+	uint32_t irq_line;
+	struct interrupt_config_item *private_irq;
+	struct device_node *node;
+	void *irq_out_base;
+	resource_size_t irq_out_size;
+	struct platform_device *parent_pdev;
+	struct resource *r;
+	struct interrupt_config *private_intr_config;
+	uint32_t remote_pid;
+
+	node = pdev->dev.of_node;
+
+	if (!pdev->dev.parent) {
+		pr_err("%s: missing link to parent device\n", __func__);
+		return -ENODEV;
+	}
+
+	parent_pdev = to_platform_device(pdev->dev.parent);
+
+	key = "irq-reg-base";
+	r = platform_get_resource_byname(parent_pdev, IORESOURCE_MEM, key);
+	if (!r)
+		goto missing_key;
+	irq_out_size = resource_size(r);
+	irq_out_base = ioremap_nocache(r->start, irq_out_size);
+	if (!irq_out_base) {
+		pr_err("%s: ioremap_nocache() of irq_out_base addr:%pr size:%pr\n",
+				__func__, &r->start, &irq_out_size);
+		return -ENOMEM;
+	}
+	SMSM_DBG("%s: %s = %p", __func__, key, irq_out_base);
+
+	key = "qcom,smsm-edge";
+	ret = of_property_read_u32(node, key, &edge);
+	if (ret)
+		goto missing_key;
+	SMSM_DBG("%s: %s = %d", __func__, key, edge);
+
+	key = "qcom,smsm-irq-offset";
+	ret = of_property_read_u32(node, key, &irq_offset);
+	if (ret)
+		goto missing_key;
+	SMSM_DBG("%s: %s = %x", __func__, key, irq_offset);
+
+	key = "qcom,smsm-irq-bitmask";
+	ret = of_property_read_u32(node, key, &irq_bitmask);
+	if (ret)
+		goto missing_key;
+	SMSM_DBG("%s: %s = %x", __func__, key, irq_bitmask);
+
+	key = "interrupts";
+	irq_line = irq_of_parse_and_map(node, 0);
+	if (!irq_line)
+		goto missing_key;
+	SMSM_DBG("%s: %s = %d", __func__, key, irq_line);
+
+	private_intr_config = smd_get_intr_config(edge);
+	if (!private_intr_config) {
+		pr_err("%s: invalid edge\n", __func__);
+		return -ENODEV;
+	}
+	private_irq = &private_intr_config->smsm;
+	private_irq->out_bit_pos = irq_bitmask;
+	private_irq->out_offset = irq_offset;
+	private_irq->out_base = irq_out_base;
+	private_irq->irq_id = irq_line;
+	remote_pid = smd_edge_to_remote_pid(edge);
+	interrupt_stats[remote_pid].smsm_interrupt_id = irq_line;
+
+	ret = request_irq(irq_line,
+				private_irq->irq_handler,
+				IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND,
+				node->name,
+				NULL);
+	if (ret < 0) {
+		pr_err("%s: request_irq() failed on %d\n", __func__, irq_line);
+		return ret;
+	} else {
+		ret = enable_irq_wake(irq_line);
+		if (ret < 0)
+			pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
+					irq_line);
+	}
+
+	ret = smsm_post_init();
+	if (ret) {
+		pr_err("smd_post_init() failed ret=%d\n", ret);
+		return ret;
+	}
+
+	return 0;
+
+missing_key:
+	pr_err("%s: missing key: %s", __func__, key);
+	return -ENODEV;
+}
+
+static int msm_smd_probe(struct platform_device *pdev)
+{
+	uint32_t edge;
+	char *key;
+	int ret;
+	uint32_t irq_offset;
+	uint32_t irq_bitmask;
+	uint32_t irq_line;
+	const char *subsys_name;
+	struct interrupt_config_item *private_irq;
+	struct device_node *node;
+	void *irq_out_base;
+	resource_size_t irq_out_size;
+	struct platform_device *parent_pdev;
+	struct resource *r;
+	struct interrupt_config *private_intr_config;
+	uint32_t remote_pid;
+	bool skip_pil;
+
+	node = pdev->dev.of_node;
+
+	if (!pdev->dev.parent) {
+		pr_err("%s: missing link to parent device\n", __func__);
+		return -ENODEV;
+	}
+
+	mutex_lock(&smd_probe_lock);
+	if (!first_probe_done) {
+		smd_reset_all_edge_subsys_name();
+		first_probe_done = 1;
+	}
+	mutex_unlock(&smd_probe_lock);
+
+	parent_pdev = to_platform_device(pdev->dev.parent);
+
+	key = "irq-reg-base";
+	r = platform_get_resource_byname(parent_pdev, IORESOURCE_MEM, key);
+	if (!r)
+		goto missing_key;
+	irq_out_size = resource_size(r);
+	irq_out_base = ioremap_nocache(r->start, irq_out_size);
+	if (!irq_out_base) {
+		pr_err("%s: ioremap_nocache() of irq_out_base addr:%pr size:%pr\n",
+				__func__, &r->start, &irq_out_size);
+		return -ENOMEM;
+	}
+	SMD_DBG("%s: %s = %p", __func__, key, irq_out_base);
+
+	key = "qcom,smd-edge";
+	ret = of_property_read_u32(node, key, &edge);
+	if (ret)
+		goto missing_key;
+	SMD_DBG("%s: %s = %d", __func__, key, edge);
+
+	key = "qcom,smd-irq-offset";
+	ret = of_property_read_u32(node, key, &irq_offset);
+	if (ret)
+		goto missing_key;
+	SMD_DBG("%s: %s = %x", __func__, key, irq_offset);
+
+	key = "qcom,smd-irq-bitmask";
+	ret = of_property_read_u32(node, key, &irq_bitmask);
+	if (ret)
+		goto missing_key;
+	SMD_DBG("%s: %s = %x", __func__, key, irq_bitmask);
+
+	key = "interrupts";
+	irq_line = irq_of_parse_and_map(node, 0);
+	if (!irq_line)
+		goto missing_key;
+	SMD_DBG("%s: %s = %d", __func__, key, irq_line);
+
+	key = "label";
+	subsys_name = of_get_property(node, key, NULL);
+	SMD_DBG("%s: %s = %s", __func__, key, subsys_name);
+	/*
+	 * Backwards compatibility.  Although label is required, some DTs may
+	 * still list the legacy pil-string.  Sanely handle pil-string.
+	 */
+	if (!subsys_name) {
+		pr_warn("msm_smd: Missing required property - label. Using legacy parsing\n");
+		key = "qcom,pil-string";
+		subsys_name = of_get_property(node, key, NULL);
+		SMD_DBG("%s: %s = %s", __func__, key, subsys_name);
+		if (subsys_name)
+			skip_pil = false;
+		else
+			skip_pil = true;
+	} else {
+		key = "qcom,not-loadable";
+		skip_pil = of_property_read_bool(node, key);
+		SMD_DBG("%s: %s = %d\n", __func__, key, skip_pil);
+	}
+
+	private_intr_config = smd_get_intr_config(edge);
+	if (!private_intr_config) {
+		pr_err("%s: invalid edge\n", __func__);
+		return -ENODEV;
+	}
+	private_irq = &private_intr_config->smd;
+	private_irq->out_bit_pos = irq_bitmask;
+	private_irq->out_offset = irq_offset;
+	private_irq->out_base = irq_out_base;
+	private_irq->irq_id = irq_line;
+	remote_pid = smd_edge_to_remote_pid(edge);
+	interrupt_stats[remote_pid].smd_interrupt_id = irq_line;
+
+	ret = request_irq(irq_line,
+			private_irq->irq_handler,
+			IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND | IRQF_SHARED,
+			node->name,
+			&pdev->dev);
+	if (ret < 0) {
+		pr_err("%s: request_irq() failed on %d\n", __func__, irq_line);
+		return ret;
+	} else {
+		ret = enable_irq_wake(irq_line);
+		if (ret < 0)
+			pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
+					irq_line);
+	}
+
+	smd_set_edge_subsys_name(edge, subsys_name);
+	smd_proc_set_skip_pil(smd_edge_to_remote_pid(edge), skip_pil);
+
+	smd_set_edge_initialized(edge);
+	smd_post_init(remote_pid);
+	return 0;
+
+missing_key:
+	pr_err("%s: missing key: %s", __func__, key);
+	return -ENODEV;
+}
+
+static struct of_device_id msm_smd_match_table[] = {
+	{ .compatible = "qcom,smd" },
+	{},
+};
+
+static struct platform_driver msm_smd_driver = {
+	.probe = msm_smd_probe,
+	.driver = {
+		.name = MODULE_NAME ,
+		.owner = THIS_MODULE,
+		.of_match_table = msm_smd_match_table,
+	},
+};
+
+static struct of_device_id msm_smsm_match_table[] = {
+	{ .compatible = "qcom,smsm" },
+	{},
+};
+
+static struct platform_driver msm_smsm_driver = {
+	.probe = msm_smsm_probe,
+	.driver = {
+		.name = "msm_smsm",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_smsm_match_table,
+	},
+};
+
+int msm_smd_driver_register(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&msm_smd_driver);
+	if (rc) {
+		pr_err("%s: smd_driver register failed %d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	rc = platform_driver_register(&msm_smsm_driver);
+	if (rc) {
+		pr_err("%s: msm_smsm_driver register failed %d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_smd_driver_register);
+
+MODULE_DESCRIPTION("MSM SMD Device Tree Init");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/smd_private.c	2019-01-22 16:16:26.675275131 +0100
@@ -0,0 +1,333 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "smd_private.h"
+
+void set_state(volatile void __iomem *half_channel, unsigned data)
+{
+	((struct smd_half_channel __force *)(half_channel))->state = data;
+}
+
+unsigned get_state(volatile void __iomem *half_channel)
+{
+	return ((struct smd_half_channel __force *)(half_channel))->state;
+}
+
+void set_fDSR(volatile void __iomem *half_channel, unsigned char data)
+{
+	((struct smd_half_channel __force *)(half_channel))->fDSR = data;
+}
+
+unsigned get_fDSR(volatile void __iomem *half_channel)
+{
+	return ((struct smd_half_channel __force *)(half_channel))->fDSR;
+}
+
+void set_fCTS(volatile void __iomem *half_channel, unsigned char data)
+{
+	((struct smd_half_channel __force *)(half_channel))->fCTS = data;
+}
+
+unsigned get_fCTS(volatile void __iomem *half_channel)
+{
+	return ((struct smd_half_channel __force *)(half_channel))->fCTS;
+}
+
+void set_fCD(volatile void __iomem *half_channel, unsigned char data)
+{
+	((struct smd_half_channel __force *)(half_channel))->fCD = data;
+}
+
+unsigned get_fCD(volatile void __iomem *half_channel)
+{
+	return ((struct smd_half_channel __force *)(half_channel))->fCD;
+}
+
+void set_fRI(volatile void __iomem *half_channel, unsigned char data)
+{
+	((struct smd_half_channel __force *)(half_channel))->fRI = data;
+}
+
+unsigned get_fRI(volatile void __iomem *half_channel)
+{
+	return ((struct smd_half_channel __force *)(half_channel))->fRI;
+}
+
+void set_fHEAD(volatile void __iomem *half_channel, unsigned char data)
+{
+	((struct smd_half_channel __force *)(half_channel))->fHEAD = data;
+}
+
+unsigned get_fHEAD(volatile void __iomem *half_channel)
+{
+	return ((struct smd_half_channel __force *)(half_channel))->fHEAD;
+}
+
+void set_fTAIL(volatile void __iomem *half_channel, unsigned char data)
+{
+	((struct smd_half_channel __force *)(half_channel))->fTAIL = data;
+}
+
+unsigned get_fTAIL(volatile void __iomem *half_channel)
+{
+	return ((struct smd_half_channel __force *)(half_channel))->fTAIL;
+}
+
+void set_fSTATE(volatile void __iomem *half_channel, unsigned char data)
+{
+	((struct smd_half_channel __force *)(half_channel))->fSTATE = data;
+}
+
+unsigned get_fSTATE(volatile void __iomem *half_channel)
+{
+	return ((struct smd_half_channel __force *)(half_channel))->fSTATE;
+}
+
+void set_fBLOCKREADINTR(volatile void __iomem *half_channel, unsigned char data)
+{
+	((struct smd_half_channel __force *)
+				(half_channel))->fBLOCKREADINTR = data;
+}
+
+unsigned get_fBLOCKREADINTR(volatile void __iomem *half_channel)
+{
+	return ((struct smd_half_channel __force *)
+				(half_channel))->fBLOCKREADINTR;
+}
+
+void set_tail(volatile void __iomem *half_channel, unsigned data)
+{
+	((struct smd_half_channel __force *)(half_channel))->tail = data;
+}
+
+unsigned get_tail(volatile void __iomem *half_channel)
+{
+	return ((struct smd_half_channel __force *)(half_channel))->tail;
+}
+
+void set_head(volatile void __iomem *half_channel, unsigned data)
+{
+	((struct smd_half_channel __force *)(half_channel))->head = data;
+}
+
+unsigned get_head(volatile void __iomem *half_channel)
+{
+	return ((struct smd_half_channel __force *)(half_channel))->head;
+}
+
+void set_state_word_access(volatile void __iomem *half_channel, unsigned data)
+{
+	((struct smd_half_channel_word_access __force *)
+					(half_channel))->state = data;
+}
+
+unsigned get_state_word_access(volatile void __iomem *half_channel)
+{
+	return ((struct smd_half_channel_word_access __force *)
+					(half_channel))->state;
+}
+
+void set_fDSR_word_access(volatile void __iomem *half_channel,
+						unsigned char data)
+{
+	((struct smd_half_channel_word_access __force *)
+					(half_channel))->fDSR = data;
+}
+
+unsigned get_fDSR_word_access(volatile void __iomem *half_channel)
+{
+	return ((struct smd_half_channel_word_access __force *)
+					(half_channel))->fDSR;
+}
+
+void set_fCTS_word_access(volatile void __iomem *half_channel,
+						unsigned char data)
+{
+	((struct smd_half_channel_word_access __force *)
+					(half_channel))->fCTS = data;
+}
+
+unsigned get_fCTS_word_access(volatile void __iomem *half_channel)
+{
+	return ((struct smd_half_channel_word_access __force *)
+					(half_channel))->fCTS;
+}
+
+void set_fCD_word_access(volatile void __iomem *half_channel,
+						unsigned char data)
+{
+	((struct smd_half_channel_word_access __force *)
+					(half_channel))->fCD = data;
+}
+
+unsigned get_fCD_word_access(volatile void __iomem *half_channel)
+{
+	return ((struct smd_half_channel_word_access __force *)
+					(half_channel))->fCD;
+}
+
+void set_fRI_word_access(volatile void __iomem *half_channel,
+						unsigned char data)
+{
+	((struct smd_half_channel_word_access __force *)
+					(half_channel))->fRI = data;
+}
+
+unsigned get_fRI_word_access(volatile void __iomem *half_channel)
+{
+	return ((struct smd_half_channel_word_access __force *)
+					(half_channel))->fRI;
+}
+
+void set_fHEAD_word_access(volatile void __iomem *half_channel,
+						unsigned char data)
+{
+	((struct smd_half_channel_word_access __force *)
+					(half_channel))->fHEAD = data;
+}
+
+unsigned get_fHEAD_word_access(volatile void __iomem *half_channel)
+{
+	return ((struct smd_half_channel_word_access __force *)
+					(half_channel))->fHEAD;
+}
+
+void set_fTAIL_word_access(volatile void __iomem *half_channel,
+						unsigned char data)
+{
+	((struct smd_half_channel_word_access __force *)
+					(half_channel))->fTAIL = data;
+}
+
+unsigned get_fTAIL_word_access(volatile void __iomem *half_channel)
+{
+	return ((struct smd_half_channel_word_access __force *)
+					(half_channel))->fTAIL;
+}
+
+void set_fSTATE_word_access(volatile void __iomem *half_channel,
+						unsigned char data)
+{
+	((struct smd_half_channel_word_access __force *)
+					(half_channel))->fSTATE = data;
+}
+
+unsigned get_fSTATE_word_access(volatile void __iomem *half_channel)
+{
+	return ((struct smd_half_channel_word_access __force *)
+					(half_channel))->fSTATE;
+}
+
+void set_fBLOCKREADINTR_word_access(volatile void __iomem *half_channel,
+							unsigned char data)
+{
+	((struct smd_half_channel_word_access __force *)
+					(half_channel))->fBLOCKREADINTR = data;
+}
+
+unsigned get_fBLOCKREADINTR_word_access(volatile void __iomem *half_channel)
+{
+	return ((struct smd_half_channel_word_access __force *)
+					(half_channel))->fBLOCKREADINTR;
+}
+
+void set_tail_word_access(volatile void __iomem *half_channel, unsigned data)
+{
+	((struct smd_half_channel_word_access __force *)
+					(half_channel))->tail = data;
+}
+
+unsigned get_tail_word_access(volatile void __iomem *half_channel)
+{
+	return ((struct smd_half_channel_word_access __force *)
+					(half_channel))->tail;
+}
+
+void set_head_word_access(volatile void __iomem *half_channel, unsigned data)
+{
+	((struct smd_half_channel_word_access __force *)
+					(half_channel))->head = data;
+}
+
+unsigned get_head_word_access(volatile void __iomem *half_channel)
+{
+	return ((struct smd_half_channel_word_access __force *)
+					(half_channel))->head;
+}
+
+int is_word_access_ch(unsigned ch_type)
+{
+	if (ch_type == SMD_APPS_RPM || ch_type == SMD_MODEM_RPM ||
+		ch_type == SMD_QDSP_RPM || ch_type == SMD_WCNSS_RPM ||
+		ch_type == SMD_TZ_RPM)
+		return 1;
+	else
+		return 0;
+}
+
+struct smd_half_channel_access *get_half_ch_funcs(unsigned ch_type)
+{
+	static struct smd_half_channel_access byte_access = {
+		.set_state = set_state,
+		.get_state = get_state,
+		.set_fDSR = set_fDSR,
+		.get_fDSR = get_fDSR,
+		.set_fCTS = set_fCTS,
+		.get_fCTS = get_fCTS,
+		.set_fCD = set_fCD,
+		.get_fCD = get_fCD,
+		.set_fRI = set_fRI,
+		.get_fRI = get_fRI,
+		.set_fHEAD = set_fHEAD,
+		.get_fHEAD = get_fHEAD,
+		.set_fTAIL = set_fTAIL,
+		.get_fTAIL = get_fTAIL,
+		.set_fSTATE = set_fSTATE,
+		.get_fSTATE = get_fSTATE,
+		.set_fBLOCKREADINTR = set_fBLOCKREADINTR,
+		.get_fBLOCKREADINTR = get_fBLOCKREADINTR,
+		.set_tail = set_tail,
+		.get_tail = get_tail,
+		.set_head = set_head,
+		.get_head = get_head,
+	};
+	static struct smd_half_channel_access word_access = {
+		.set_state = set_state_word_access,
+		.get_state = get_state_word_access,
+		.set_fDSR = set_fDSR_word_access,
+		.get_fDSR = get_fDSR_word_access,
+		.set_fCTS = set_fCTS_word_access,
+		.get_fCTS = get_fCTS_word_access,
+		.set_fCD = set_fCD_word_access,
+		.get_fCD = get_fCD_word_access,
+		.set_fRI = set_fRI_word_access,
+		.get_fRI = get_fRI_word_access,
+		.set_fHEAD = set_fHEAD_word_access,
+		.get_fHEAD = get_fHEAD_word_access,
+		.set_fTAIL = set_fTAIL_word_access,
+		.get_fTAIL = get_fTAIL_word_access,
+		.set_fSTATE = set_fSTATE_word_access,
+		.get_fSTATE = get_fSTATE_word_access,
+		.set_fBLOCKREADINTR = set_fBLOCKREADINTR_word_access,
+		.get_fBLOCKREADINTR = get_fBLOCKREADINTR_word_access,
+		.set_tail = set_tail_word_access,
+		.get_tail = get_tail_word_access,
+		.set_head = set_head_word_access,
+		.get_head = get_head_word_access,
+	};
+
+	if (is_word_access_ch(ch_type))
+		return &word_access;
+	else
+		return &byte_access;
+}
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/smd_private.h	2019-01-22 16:16:26.675275131 +0100
@@ -0,0 +1,243 @@
+/* drivers/soc/qcom/smd_private.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _ARCH_ARM_MACH_MSM_MSM_SMD_PRIVATE_H_
+#define _ARCH_ARM_MACH_MSM_MSM_SMD_PRIVATE_H_
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/remote_spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+
+#include <soc/qcom/smd.h>
+#include <soc/qcom/smsm.h>
+
+#define VERSION_QDSP6     4
+#define VERSION_APPS_SBL  6
+#define VERSION_MODEM_SBL 7
+#define VERSION_APPS      8
+#define VERSION_MODEM     9
+#define VERSION_DSPS      10
+
+#define ID_SMD_CHANNELS SMEM_SMD_BASE_ID
+#define ID_SHARED_STATE SMEM_SMSM_SHARED_STATE
+#define ID_CH_ALLOC_TBL SMEM_CHANNEL_ALLOC_TBL
+
+#define SMD_SS_CLOSED            0x00000000
+#define SMD_SS_OPENING           0x00000001
+#define SMD_SS_OPENED            0x00000002
+#define SMD_SS_FLUSHING          0x00000003
+#define SMD_SS_CLOSING           0x00000004
+#define SMD_SS_RESET             0x00000005
+#define SMD_SS_RESET_OPENING     0x00000006
+
+#define SMD_HEADER_SIZE          20
+
+/* 'type' field of smd_alloc_elm structure
+ * has the following breakup
+ * bits 0-7   -> channel type
+ * bits 8-11  -> xfer type
+ * bits 12-31 -> reserved
+ */
+struct smd_alloc_elm {
+	char name[20];
+	uint32_t cid;
+	uint32_t type;
+	uint32_t ref_count;
+};
+
+#define SMD_CHANNEL_TYPE(x) ((x) & 0x000000FF)
+#define SMD_XFER_TYPE(x)    (((x) & 0x00000F00) >> 8)
+
+struct smd_half_channel {
+	unsigned state;
+	unsigned char fDSR;
+	unsigned char fCTS;
+	unsigned char fCD;
+	unsigned char fRI;
+	unsigned char fHEAD;
+	unsigned char fTAIL;
+	unsigned char fSTATE;
+	unsigned char fBLOCKREADINTR;
+	unsigned tail;
+	unsigned head;
+};
+
+struct smd_half_channel_word_access {
+	unsigned state;
+	unsigned fDSR;
+	unsigned fCTS;
+	unsigned fCD;
+	unsigned fRI;
+	unsigned fHEAD;
+	unsigned fTAIL;
+	unsigned fSTATE;
+	unsigned fBLOCKREADINTR;
+	unsigned tail;
+	unsigned head;
+};
+
+struct smd_half_channel_access {
+	void (*set_state)(volatile void __iomem *half_channel, unsigned data);
+	unsigned (*get_state)(volatile void __iomem *half_channel);
+	void (*set_fDSR)(volatile void __iomem *half_channel,
+					unsigned char data);
+	unsigned (*get_fDSR)(volatile void __iomem *half_channel);
+	void (*set_fCTS)(volatile void __iomem *half_channel,
+					unsigned char data);
+	unsigned (*get_fCTS)(volatile void __iomem *half_channel);
+	void (*set_fCD)(volatile void __iomem *half_channel,
+					unsigned char data);
+	unsigned (*get_fCD)(volatile void __iomem *half_channel);
+	void (*set_fRI)(volatile void __iomem *half_channel,
+					unsigned char data);
+	unsigned (*get_fRI)(volatile void __iomem *half_channel);
+	void (*set_fHEAD)(volatile void __iomem *half_channel,
+					unsigned char data);
+	unsigned (*get_fHEAD)(volatile void __iomem *half_channel);
+	void (*set_fTAIL)(volatile void __iomem *half_channel,
+					unsigned char data);
+	unsigned (*get_fTAIL)(volatile void __iomem *half_channel);
+	void (*set_fSTATE)(volatile void __iomem *half_channel,
+					unsigned char data);
+	unsigned (*get_fSTATE)(volatile void __iomem *half_channel);
+	void (*set_fBLOCKREADINTR)(volatile void __iomem *half_channel,
+					unsigned char data);
+	unsigned (*get_fBLOCKREADINTR)(volatile void __iomem *half_channel);
+	void (*set_tail)(volatile void __iomem *half_channel, unsigned data);
+	unsigned (*get_tail)(volatile void __iomem *half_channel);
+	void (*set_head)(volatile void __iomem *half_channel, unsigned data);
+	unsigned (*get_head)(volatile void __iomem *half_channel);
+};
+
+int is_word_access_ch(unsigned ch_type);
+
+struct smd_half_channel_access *get_half_ch_funcs(unsigned ch_type);
+
+struct smd_channel {
+	volatile void __iomem *send; /* some variant of smd_half_channel */
+	volatile void __iomem *recv; /* some variant of smd_half_channel */
+	unsigned char *send_data;
+	unsigned char *recv_data;
+	unsigned fifo_size;
+	struct list_head ch_list;
+
+	unsigned current_packet;
+	unsigned n;
+	void *priv;
+	void (*notify)(void *priv, unsigned flags);
+
+	int (*read)(smd_channel_t *ch, void *data, int len);
+	int (*write)(smd_channel_t *ch, const void *data, int len,
+			bool int_ntfy);
+	int (*read_avail)(smd_channel_t *ch);
+	int (*write_avail)(smd_channel_t *ch);
+	int (*read_from_cb)(smd_channel_t *ch, void *data, int len);
+
+	void (*update_state)(smd_channel_t *ch);
+	unsigned last_state;
+	void (*notify_other_cpu)(smd_channel_t *ch);
+	void * (*read_from_fifo)(void *dest, const void *src, size_t num_bytes);
+	void * (*write_to_fifo)(void *dest, const void *src, size_t num_bytes);
+
+	char name[20];
+	struct platform_device pdev;
+	unsigned type;
+
+	int pending_pkt_sz;
+
+	char is_pkt_ch;
+
+	/*
+	 * private internal functions to access *send and *recv.
+	 * never to be exported outside of smd
+	 */
+	struct smd_half_channel_access *half_ch;
+};
+
+extern spinlock_t smem_lock;
+
+struct interrupt_stat {
+	uint32_t smd_in_count;
+	uint32_t smd_out_count;
+	uint32_t smd_interrupt_id;
+
+	uint32_t smsm_in_count;
+	uint32_t smsm_out_count;
+	uint32_t smsm_interrupt_id;
+};
+extern struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
+
+struct interrupt_config_item {
+	/* must be initialized */
+	irqreturn_t (*irq_handler)(int req, void *data);
+	/* outgoing interrupt config (set from platform data) */
+	uint32_t out_bit_pos;
+	void __iomem *out_base;
+	uint32_t out_offset;
+	int irq_id;
+};
+
+enum {
+	MSM_SMD_DEBUG = 1U << 0,
+	MSM_SMSM_DEBUG = 1U << 1,
+	MSM_SMD_INFO = 1U << 2,
+	MSM_SMSM_INFO = 1U << 3,
+	MSM_SMD_POWER_INFO = 1U << 4,
+	MSM_SMSM_POWER_INFO = 1U << 5,
+};
+
+struct interrupt_config {
+	struct interrupt_config_item smd;
+	struct interrupt_config_item smsm;
+};
+
+struct edge_to_pid {
+	uint32_t	local_pid;
+	uint32_t	remote_pid;
+	char		subsys_name[SMD_MAX_CH_NAME_LEN];
+	bool		initialized;
+};
+
+extern void *smd_log_ctx;
+extern int msm_smd_debug_mask;
+
+extern irqreturn_t smd_modem_irq_handler(int irq, void *data);
+extern irqreturn_t smsm_modem_irq_handler(int irq, void *data);
+extern irqreturn_t smd_dsp_irq_handler(int irq, void *data);
+extern irqreturn_t smsm_dsp_irq_handler(int irq, void *data);
+extern irqreturn_t smd_dsps_irq_handler(int irq, void *data);
+extern irqreturn_t smsm_dsps_irq_handler(int irq, void *data);
+extern irqreturn_t smd_wcnss_irq_handler(int irq, void *data);
+extern irqreturn_t smsm_wcnss_irq_handler(int irq, void *data);
+extern irqreturn_t smd_rpm_irq_handler(int irq, void *data);
+extern irqreturn_t smd_modemfw_irq_handler(int irq, void *data);
+
+extern int msm_smd_driver_register(void);
+extern void smd_post_init(unsigned remote_pid);
+extern int smsm_post_init(void);
+
+extern struct interrupt_config *smd_get_intr_config(uint32_t edge);
+extern int smd_edge_to_remote_pid(uint32_t edge);
+extern int smd_edge_to_local_pid(uint32_t edge);
+extern void smd_set_edge_subsys_name(uint32_t edge, const char *subsys_name);
+extern void smd_reset_all_edge_subsys_name(void);
+extern void smd_proc_set_skip_pil(unsigned pid, bool skip_pil);
+extern void smd_set_edge_initialized(uint32_t edge);
+extern void smd_cfg_smd_intr(uint32_t proc, uint32_t mask, void *ptr);
+extern void smd_cfg_smsm_intr(uint32_t proc, uint32_t mask, void *ptr);
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/smem_debug.c	2019-01-22 16:16:26.675275131 +0100
@@ -0,0 +1,138 @@
+/* arch/arm/mach-msm/smem_debug.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2013,2016 The Linux Foundation. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <linux/jiffies.h>
+
+#include <soc/qcom/smem.h>
+
+#include "smem_private.h"
+
+#if defined(CONFIG_DEBUG_FS)
+
+#define SZ_SMEM_ALLOCATION_TABLE 8192
+
+static void debug_read_mem(struct seq_file *s)
+{
+	unsigned n;
+	struct smem_heap_info *heap_info;
+	struct smem_heap_entry *toc;
+
+	heap_info = smem_find(SMEM_HEAP_INFO, sizeof(struct smem_heap_info),
+						0,
+						SMEM_ANY_HOST_FLAG);
+	if (!heap_info) {
+		seq_puts(s, "SMEM_HEAP_INFO is NULL\n");
+		return;
+	}
+	toc = smem_find(SMEM_ALLOCATION_TABLE, SZ_SMEM_ALLOCATION_TABLE,
+							0, SMEM_ANY_HOST_FLAG);
+	if (!toc) {
+		seq_puts(s, "SMEM_ALLOCATION_TABLE is NULL\n");
+		return;
+	}
+
+	seq_printf(s, "heap: init=%d free=%d remain=%d\n",
+		       heap_info->initialized,
+		       heap_info->free_offset,
+		       heap_info->heap_remaining);
+
+	for (n = 0; n < smem_max_items; n++) {
+		if (toc[n].allocated == 0)
+			continue;
+		seq_printf(s, "%04d: offset %08x size %08x\n",
+			       n, toc[n].offset, toc[n].size);
+	}
+}
+
+static void debug_read_smem_version(struct seq_file *s)
+{
+	uint32_t n, version;
+
+	for (n = 0; n < 32; n++) {
+		version = smem_get_version(n);
+		seq_printf(s, "entry %d:%x smem = %d  proc_comm = %d\n",
+				n, version, version >> 16, version & 0xffff);
+	}
+}
+
+static void debug_read_build_id(struct seq_file *s)
+{
+	unsigned size;
+	void *data;
+
+	data = smem_get_entry(SMEM_HW_SW_BUILD_ID, &size, 0,
+							SMEM_ANY_HOST_FLAG);
+	if (!data)
+		return;
+
+	seq_write(s, data, size);
+}
+
+static int debugfs_show(struct seq_file *s, void *data)
+{
+	void (*show)(struct seq_file *) = s->private;
+
+	show(s);
+
+	return 0;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, debugfs_show, inode->i_private);
+}
+
+static const struct file_operations debug_ops = {
+	.open = debug_open,
+	.release = single_release,
+	.read = seq_read,
+	.llseek = seq_lseek,
+};
+
+static void debug_create(const char *name, umode_t mode,
+			 struct dentry *dent,
+			 void (*show)(struct seq_file *))
+{
+	struct dentry *file;
+
+	file = debugfs_create_file(name, mode, dent, show, &debug_ops);
+	if (!file)
+		pr_err("%s: unable to create file '%s'\n", __func__, name);
+}
+
+static int __init smem_debugfs_init(void)
+{
+	struct dentry *dent;
+
+	dent = debugfs_create_dir("smem", 0);
+	if (IS_ERR(dent))
+		return PTR_ERR(dent);
+
+	debug_create("mem", 0444, dent, debug_read_mem);
+	debug_create("version", 0444, dent, debug_read_smem_version);
+
+	/* NNV: this is google only stuff */
+	debug_create("build", 0444, dent, debug_read_build_id);
+
+	return 0;
+}
+
+late_initcall(smem_debugfs_init);
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/smem_log.c	2019-01-22 16:16:26.675275131 +0100
@@ -0,0 +1,1035 @@
+/* Copyright (c) 2008-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * Shared memory logging implementation.
+ */
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+#include <linux/remote_spinlock.h>
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+
+#include <soc/qcom/smem.h>
+#include <soc/qcom/smem_log.h>
+
+#include <asm/arch_timer.h>
+
+#include "smem_private.h"
+
+#define DEBUG
+#undef DEBUG
+
+#ifdef DEBUG
+#define D_DUMP_BUFFER(prestr, cnt, buf) \
+do { \
+	int i; \
+	printk(KERN_ERR "%s", prestr); \
+	for (i = 0; i < cnt; i++) \
+		printk(KERN_ERR "%.2x", buf[i]); \
+	printk(KERN_ERR "\n"); \
+} while (0)
+#else
+#define D_DUMP_BUFFER(prestr, cnt, buf)
+#endif
+
+#ifdef DEBUG
+#define D(x...) printk(x)
+#else
+#define D(x...) do {} while (0)
+#endif
+
+struct smem_log_item {
+	uint32_t identifier;
+	uint32_t timetick;
+	uint32_t data1;
+	uint32_t data2;
+	uint32_t data3;
+};
+
+#define SMEM_LOG_NUM_ENTRIES 2000
+#define SMEM_LOG_EVENTS_SIZE (sizeof(struct smem_log_item) * \
+			      SMEM_LOG_NUM_ENTRIES)
+
+#define SMEM_SPINLOCK_SMEM_LOG		"S:2"
+
+static remote_spinlock_t remote_spinlock;
+static uint32_t smem_log_enable;
+static int smem_log_initialized;
+
+module_param_named(log_enable, smem_log_enable, int,
+		   S_IRUGO | S_IWUSR | S_IWGRP);
+
+
+struct smem_log_inst {
+	int which_log;
+	struct smem_log_item __iomem *events;
+	uint32_t __iomem *idx;
+	uint32_t num;
+	uint32_t read_idx;
+	uint32_t last_read_avail;
+	wait_queue_head_t read_wait;
+	remote_spinlock_t *remote_spinlock;
+};
+
+enum smem_logs {
+	GEN = 0,
+	NUM
+};
+
+static struct smem_log_inst inst[NUM];
+
+#if defined(CONFIG_DEBUG_FS)
+
+#define HSIZE 13
+
+struct sym {
+	uint32_t val;
+	char *str;
+	struct hlist_node node;
+};
+
+struct sym id_syms[] = {
+	{ SMEM_LOG_PROC_ID_MODEM, "MODM" },
+	{ SMEM_LOG_PROC_ID_Q6, "QDSP" },
+	{ SMEM_LOG_PROC_ID_APPS, "APPS" },
+	{ SMEM_LOG_PROC_ID_WCNSS, "WCNSS" },
+};
+
+struct sym base_syms[] = {
+	{ SMEM_LOG_SMEM_EVENT_BASE, "SMEM" },
+	{ SMEM_LOG_ERROR_EVENT_BASE, "ERROR" },
+	{ SMEM_LOG_QMI_CCI_EVENT_BASE, "QCCI" },
+	{ SMEM_LOG_QMI_CSI_EVENT_BASE, "QCSI" },
+};
+
+struct sym event_syms[] = {
+	{ ERR_ERROR_FATAL, "ERR_ERROR_FATAL" },
+	{ ERR_ERROR_FATAL_TASK, "ERR_ERROR_FATAL_TASK" },
+	{ SMEM_LOG_EVENT_CB, "CB" },
+	{ SMEM_LOG_EVENT_START, "START" },
+	{ SMEM_LOG_EVENT_INIT, "INIT" },
+	{ SMEM_LOG_EVENT_RUNNING, "RUNNING" },
+	{ SMEM_LOG_EVENT_STOP, "STOP" },
+	{ SMEM_LOG_EVENT_RESTART, "RESTART" },
+	{ SMEM_LOG_EVENT_SS, "SS" },
+	{ SMEM_LOG_EVENT_READ, "READ" },
+	{ SMEM_LOG_EVENT_WRITE, "WRITE" },
+	{ SMEM_LOG_EVENT_SIGS1, "SIGS1" },
+	{ SMEM_LOG_EVENT_SIGS2, "SIGS2" },
+	{ SMEM_LOG_EVENT_WRITE_DM, "WRITE_DM" },
+	{ SMEM_LOG_EVENT_READ_DM, "READ_DM" },
+	{ SMEM_LOG_EVENT_SKIP_DM, "SKIP_DM" },
+	{ SMEM_LOG_EVENT_STOP_DM, "STOP_DM" },
+	{ SMEM_LOG_EVENT_ISR, "ISR" },
+	{ SMEM_LOG_EVENT_TASK, "TASK" },
+	{ SMEM_LOG_EVENT_RS, "RS" },
+};
+
+struct sym smsm_syms[] = {
+	{ 0x80000000, "UN" },
+	{ 0x7F000000, "ERR" },
+	{ 0x00800000, "SMLP" },
+	{ 0x00400000, "ADWN" },
+	{ 0x00200000, "PWRS" },
+	{ 0x00100000, "DWLD" },
+	{ 0x00080000, "SRBT" },
+	{ 0x00040000, "SDWN" },
+	{ 0x00020000, "ARBT" },
+	{ 0x00010000, "REL" },
+	{ 0x00008000, "SLE" },
+	{ 0x00004000, "SLP" },
+	{ 0x00002000, "WFPI" },
+	{ 0x00001000, "EEX" },
+	{ 0x00000800, "TIN" },
+	{ 0x00000400, "TWT" },
+	{ 0x00000200, "PWRC" },
+	{ 0x00000100, "RUN" },
+	{ 0x00000080, "SA" },
+	{ 0x00000040, "RES" },
+	{ 0x00000020, "RIN" },
+	{ 0x00000010, "RWT" },
+	{ 0x00000008, "SIN" },
+	{ 0x00000004, "SWT" },
+	{ 0x00000002, "OE" },
+	{ 0x00000001, "I" },
+};
+
+struct sym smsm_entry_type_syms[] = {
+	{ 0, "SMSM_APPS_STATE" },
+	{ 1, "SMSM_MODEM_STATE" },
+	{ 2, "SMSM_Q6_STATE" },
+	{ 3, "SMSM_APPS_DEM" },
+	{ 4, "SMSM_MODEM_DEM" },
+	{ 5, "SMSM_Q6_DEM" },
+	{ 6, "SMSM_POWER_MASTER_DEM" },
+	{ 7, "SMSM_TIME_MASTER_DEM" },
+};
+
+struct sym smsm_state_syms[] = {
+	{ 0x00000001, "INIT" },
+	{ 0x00000002, "OSENTERED" },
+	{ 0x00000004, "SMDWAIT" },
+	{ 0x00000008, "SMDINIT" },
+	{ 0x00000010, "RPCWAIT" },
+	{ 0x00000020, "RPCINIT" },
+	{ 0x00000040, "RESET" },
+	{ 0x00000080, "RSA" },
+	{ 0x00000100, "RUN" },
+	{ 0x00000200, "PWRC" },
+	{ 0x00000400, "TIMEWAIT" },
+	{ 0x00000800, "TIMEINIT" },
+	{ 0x00001000, "PWRC_EARLY_EXIT" },
+	{ 0x00002000, "WFPI" },
+	{ 0x00004000, "SLEEP" },
+	{ 0x00008000, "SLEEPEXIT" },
+	{ 0x00010000, "OEMSBL_RELEASE" },
+	{ 0x00020000, "APPS_REBOOT" },
+	{ 0x00040000, "SYSTEM_POWER_DOWN" },
+	{ 0x00080000, "SYSTEM_REBOOT" },
+	{ 0x00100000, "SYSTEM_DOWNLOAD" },
+	{ 0x00200000, "PWRC_SUSPEND" },
+	{ 0x00400000, "APPS_SHUTDOWN" },
+	{ 0x00800000, "SMD_LOOPBACK" },
+	{ 0x01000000, "RUN_QUIET" },
+	{ 0x02000000, "MODEM_WAIT" },
+	{ 0x04000000, "MODEM_BREAK" },
+	{ 0x08000000, "MODEM_CONTINUE" },
+	{ 0x80000000, "UNKNOWN" },
+};
+
+enum sym_tables {
+	ID_SYM,
+	BASE_SYM,
+	EVENT_SYM,
+	SMSM_SYM,
+	SMSM_ENTRY_TYPE_SYM,
+	SMSM_STATE_SYM,
+};
+
+static struct sym_tbl {
+	struct sym *data;
+	int size;
+	struct hlist_head hlist[HSIZE];
+} tbl[] = {
+	{ id_syms, ARRAY_SIZE(id_syms) },
+	{ base_syms, ARRAY_SIZE(base_syms) },
+	{ event_syms, ARRAY_SIZE(event_syms) },
+	{ smsm_syms, ARRAY_SIZE(smsm_syms) },
+	{ smsm_entry_type_syms, ARRAY_SIZE(smsm_entry_type_syms) },
+	{ smsm_state_syms, ARRAY_SIZE(smsm_state_syms) },
+};
+
+#define hash(val) (val % HSIZE)
+
+static void init_syms(void)
+{
+	int i;
+	int j;
+
+	for (i = 0; i < ARRAY_SIZE(tbl); ++i)
+		for (j = 0; j < HSIZE; ++j)
+			INIT_HLIST_HEAD(&tbl[i].hlist[j]);
+
+	for (i = 0; i < ARRAY_SIZE(tbl); ++i)
+		for (j = 0; j < tbl[i].size; ++j) {
+			INIT_HLIST_NODE(&tbl[i].data[j].node);
+			hlist_add_head(&tbl[i].data[j].node,
+				       &tbl[i].hlist[hash(tbl[i].data[j].val)]);
+		}
+}
+
+static char *find_sym(uint32_t id, uint32_t val)
+{
+	struct hlist_node *n;
+	struct sym *s;
+
+	hlist_for_each(n, &tbl[id].hlist[hash(val)]) {
+		s = hlist_entry(n, struct sym, node);
+		if (s->val == val)
+			return s->str;
+	}
+
+	return 0;
+}
+
+#else
+static void init_syms(void) {}
+#endif
+
+union fifo_mem {
+	uint64_t u64;
+	uint8_t u8;
+};
+
+/**
+ * memcpy_to_log() - copy to SMEM log FIFO
+ * @dest: Destination address
+ * @src: Source address
+ * @num_bytes: Number of bytes to copy
+ *
+ * @return: Address of destination
+ *
+ * This function copies num_bytes from src to dest maintaining natural alignment
+ * for accesses to dest as required for Device memory.
+ */
+static void *memcpy_to_log(void *dest, const void *src, size_t num_bytes)
+{
+	union fifo_mem *temp_dst = (union fifo_mem *)dest;
+	union fifo_mem *temp_src = (union fifo_mem *)src;
+	uintptr_t mask = sizeof(union fifo_mem) - 1;
+
+	/* Do byte copies until we hit 8-byte (double word) alignment */
+	while ((uintptr_t)temp_dst & mask && num_bytes) {
+		__raw_writeb_no_log(temp_src->u8, temp_dst);
+		temp_src = (union fifo_mem *)((uintptr_t)temp_src + 1);
+		temp_dst = (union fifo_mem *)((uintptr_t)temp_dst + 1);
+		num_bytes--;
+	}
+
+	/* Do double word copies */
+	while (num_bytes >= sizeof(union fifo_mem)) {
+		__raw_writeq_no_log(temp_src->u64, temp_dst);
+		temp_dst++;
+		temp_src++;
+		num_bytes -= sizeof(union fifo_mem);
+	}
+
+	/* Copy remaining bytes */
+	while (num_bytes--) {
+		__raw_writeb_no_log(temp_src->u8, temp_dst);
+		temp_src = (union fifo_mem *)((uintptr_t)temp_src + 1);
+		temp_dst = (union fifo_mem *)((uintptr_t)temp_dst + 1);
+	}
+
+	return dest;
+}
+
+
+static inline unsigned int read_timestamp(void)
+{
+	return (unsigned int)(arch_counter_get_cntvct());
+}
+
+static void smem_log_event_from_user(struct smem_log_inst *inst,
+				     const char *buf, int size, int num)
+{
+	uint32_t idx;
+	uint32_t next_idx;
+	unsigned long flags;
+	uint32_t identifier = 0;
+	uint32_t timetick = 0;
+	int first = 1;
+
+	if (!inst->idx) {
+		pr_err("%s: invalid write index\n", __func__);
+		return;
+	}
+
+	remote_spin_lock_irqsave(inst->remote_spinlock, flags);
+
+	while (num--) {
+		idx = *inst->idx;
+
+		if (idx < inst->num) {
+			memcpy_to_log(&inst->events[idx], buf, size);
+
+			if (first) {
+				identifier =
+					inst->events[idx].
+					identifier;
+				timetick = read_timestamp();
+				first = 0;
+			} else {
+				identifier |= SMEM_LOG_CONT;
+			}
+			inst->events[idx].identifier =
+				identifier;
+			inst->events[idx].timetick =
+				timetick;
+		}
+
+		next_idx = idx + 1;
+		if (next_idx >= inst->num)
+			next_idx = 0;
+		*inst->idx = next_idx;
+		buf += sizeof(struct smem_log_item);
+	}
+
+	wmb();
+	remote_spin_unlock_irqrestore(inst->remote_spinlock, flags);
+}
+
+static void _smem_log_event(
+	struct smem_log_item __iomem *events,
+	uint32_t __iomem *_idx,
+	remote_spinlock_t *lock,
+	int num,
+	uint32_t id, uint32_t data1, uint32_t data2,
+	uint32_t data3)
+{
+	struct smem_log_item item;
+	uint32_t idx;
+	uint32_t next_idx;
+	unsigned long flags;
+
+	item.timetick = read_timestamp();
+	item.identifier = id;
+	item.data1 = data1;
+	item.data2 = data2;
+	item.data3 = data3;
+
+	remote_spin_lock_irqsave(lock, flags);
+
+	idx = *_idx;
+
+	if (idx < num)
+		memcpy_to_log(&events[idx], &item, sizeof(item));
+
+	next_idx = idx + 1;
+	if (next_idx >= num)
+		next_idx = 0;
+	*_idx = next_idx;
+	wmb();
+
+	remote_spin_unlock_irqrestore(lock, flags);
+}
+
+static void _smem_log_event6(
+	struct smem_log_item __iomem *events,
+	uint32_t __iomem *_idx,
+	remote_spinlock_t *lock,
+	int num,
+	uint32_t id, uint32_t data1, uint32_t data2,
+	uint32_t data3, uint32_t data4, uint32_t data5,
+	uint32_t data6)
+{
+	struct smem_log_item item[2];
+	uint32_t idx;
+	uint32_t next_idx;
+	unsigned long flags;
+
+	item[0].timetick = read_timestamp();
+	item[0].identifier = id;
+	item[0].data1 = data1;
+	item[0].data2 = data2;
+	item[0].data3 = data3;
+	item[1].identifier = item[0].identifier;
+	item[1].timetick = item[0].timetick;
+	item[1].data1 = data4;
+	item[1].data2 = data5;
+	item[1].data3 = data6;
+
+	remote_spin_lock_irqsave(lock, flags);
+
+	idx = *_idx;
+
+	/* FIXME: Wrap around */
+	if (idx < (num-1))
+		memcpy_to_log(&events[idx], &item, sizeof(item));
+
+	next_idx = idx + 2;
+	if (next_idx >= num)
+		next_idx = 0;
+	*_idx = next_idx;
+
+	wmb();
+	remote_spin_unlock_irqrestore(lock, flags);
+}
+
+void smem_log_event(uint32_t id, uint32_t data1, uint32_t data2,
+		    uint32_t data3)
+{
+	if (smem_log_enable)
+		_smem_log_event(inst[GEN].events, inst[GEN].idx,
+				inst[GEN].remote_spinlock,
+				SMEM_LOG_NUM_ENTRIES, id,
+				data1, data2, data3);
+}
+
+void smem_log_event6(uint32_t id, uint32_t data1, uint32_t data2,
+		     uint32_t data3, uint32_t data4, uint32_t data5,
+		     uint32_t data6)
+{
+	if (smem_log_enable)
+		_smem_log_event6(inst[GEN].events, inst[GEN].idx,
+				 inst[GEN].remote_spinlock,
+				 SMEM_LOG_NUM_ENTRIES, id,
+				 data1, data2, data3, data4, data5, data6);
+}
+
+static int _smem_log_init(void)
+{
+	int ret;
+
+	inst[GEN].which_log = GEN;
+	inst[GEN].events =
+		(struct smem_log_item *)smem_alloc(SMEM_SMEM_LOG_EVENTS,
+						  SMEM_LOG_EVENTS_SIZE,
+						  0,
+						  SMEM_ANY_HOST_FLAG);
+	inst[GEN].idx = (uint32_t *)smem_alloc(SMEM_SMEM_LOG_IDX,
+					     sizeof(uint32_t),
+					     0,
+					     SMEM_ANY_HOST_FLAG);
+	if (IS_ERR_OR_NULL(inst[GEN].events) || IS_ERR_OR_NULL(inst[GEN].idx)) {
+		pr_err("%s: no log or log_idx allocated\n", __func__);
+		return -ENODEV;
+	}
+
+	inst[GEN].num = SMEM_LOG_NUM_ENTRIES;
+	inst[GEN].read_idx = 0;
+	inst[GEN].last_read_avail = SMEM_LOG_NUM_ENTRIES;
+	init_waitqueue_head(&inst[GEN].read_wait);
+	inst[GEN].remote_spinlock = &remote_spinlock;
+
+	ret = remote_spin_lock_init(&remote_spinlock,
+			      SMEM_SPINLOCK_SMEM_LOG);
+	if (ret) {
+		mb();
+		return ret;
+	}
+
+	init_syms();
+	mb();
+
+	return 0;
+}
+
+static ssize_t smem_log_write_bin(struct file *fp, const char __user *_buf,
+			 size_t count, loff_t *pos)
+{
+	void *buf;
+	int r;
+
+	if (count < sizeof(struct smem_log_item))
+		return -EINVAL;
+
+	buf = kmalloc(count, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	r = copy_from_user(buf, _buf, count);
+	if (r) {
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	if (smem_log_enable)
+		smem_log_event_from_user(fp->private_data, buf,
+					sizeof(struct smem_log_item),
+					count / sizeof(struct smem_log_item));
+	kfree(buf);
+	return count;
+}
+
+static int smem_log_open(struct inode *ip, struct file *fp)
+{
+	fp->private_data = &inst[GEN];
+
+	return 0;
+}
+
+static int smem_log_release(struct inode *ip, struct file *fp)
+{
+	return 0;
+}
+
+static const struct file_operations smem_log_bin_fops = {
+	.owner = THIS_MODULE,
+	.write = smem_log_write_bin,
+	.open = smem_log_open,
+	.release = smem_log_release,
+};
+
+static struct miscdevice smem_log_dev = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "smem_log",
+	.fops = &smem_log_bin_fops,
+};
+
+#if defined(CONFIG_DEBUG_FS)
+
+#define SMEM_LOG_ITEM_PRINT_SIZE 160
+
+#define EVENTS_PRINT_SIZE \
+(SMEM_LOG_ITEM_PRINT_SIZE * SMEM_LOG_NUM_ENTRIES)
+
+static uint32_t smem_log_timeout_ms;
+module_param_named(timeout_ms, smem_log_timeout_ms,
+		   int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static int smem_log_debug_mask;
+module_param_named(debug_mask, smem_log_debug_mask, int,
+		   S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define DBG(x...) do {\
+	if (smem_log_debug_mask) \
+		printk(KERN_DEBUG x);\
+	} while (0)
+
+static int update_read_avail(struct smem_log_inst *inst)
+{
+	int curr_read_avail;
+	unsigned long flags = 0;
+
+	if (!inst->idx)
+		return 0;
+
+	remote_spin_lock_irqsave(inst->remote_spinlock, flags);
+	curr_read_avail = (*inst->idx - inst->read_idx);
+	if (curr_read_avail < 0)
+		curr_read_avail = inst->num - inst->read_idx + *inst->idx;
+
+	DBG("%s: read = %d write = %d curr = %d last = %d\n", __func__,
+	    inst->read_idx, *inst->idx, curr_read_avail, inst->last_read_avail);
+
+	if (curr_read_avail < inst->last_read_avail) {
+		if (inst->last_read_avail != inst->num)
+			pr_info("smem_log: skipping %d log entries\n",
+				inst->last_read_avail);
+		inst->read_idx = *inst->idx + 1;
+		inst->last_read_avail = inst->num - 1;
+	} else
+		inst->last_read_avail = curr_read_avail;
+
+	remote_spin_unlock_irqrestore(inst->remote_spinlock, flags);
+
+	DBG("%s: read = %d write = %d curr = %d last = %d\n", __func__,
+	    inst->read_idx, *inst->idx, curr_read_avail, inst->last_read_avail);
+
+	return inst->last_read_avail;
+}
+
+static int _debug_dump(int log, char *buf, int max, uint32_t cont)
+{
+	unsigned int idx;
+	int write_idx, read_avail = 0;
+	unsigned long flags;
+	int i = 0;
+
+	if (!inst[log].events)
+		return 0;
+
+	if (cont && update_read_avail(&inst[log]) == 0)
+		return 0;
+
+	remote_spin_lock_irqsave(inst[log].remote_spinlock, flags);
+
+	if (cont) {
+		idx = inst[log].read_idx;
+		write_idx = (inst[log].read_idx + inst[log].last_read_avail);
+		if (write_idx >= inst[log].num)
+			write_idx -= inst[log].num;
+	} else {
+		write_idx = *inst[log].idx;
+		idx = (write_idx + 1);
+	}
+
+	DBG("%s: read %d write %d idx %d num %d\n", __func__,
+	    inst[log].read_idx, write_idx, idx, inst[log].num - 1);
+
+	while ((max - i) > 50) {
+		if ((inst[log].num - 1) < idx)
+			idx = 0;
+
+		if (idx == write_idx)
+			break;
+
+		if (inst[log].events[idx].identifier) {
+
+			i += scnprintf(buf + i, max - i,
+				       "%08x %08x %08x %08x %08x\n",
+				       inst[log].events[idx].identifier,
+				       inst[log].events[idx].timetick,
+				       inst[log].events[idx].data1,
+				       inst[log].events[idx].data2,
+				       inst[log].events[idx].data3);
+		}
+		idx++;
+	}
+	if (cont) {
+		inst[log].read_idx = idx;
+		read_avail = (write_idx - inst[log].read_idx);
+		if (read_avail < 0)
+			read_avail = inst->num - inst->read_idx + write_idx;
+		inst[log].last_read_avail = read_avail;
+	}
+
+	remote_spin_unlock_irqrestore(inst[log].remote_spinlock, flags);
+
+	DBG("%s: read %d write %d idx %d num %d\n", __func__,
+	    inst[log].read_idx, write_idx, idx, inst[log].num);
+
+	return i;
+}
+
+static int _debug_dump_sym(int log, char *buf, int max, uint32_t cont)
+{
+	unsigned int idx;
+	int write_idx, read_avail = 0;
+	unsigned long flags;
+	int i = 0;
+
+	char *proc;
+	char *sub;
+	char *id;
+	const char *sym = NULL;
+
+	uint32_t proc_val = 0;
+	uint32_t sub_val = 0;
+	uint32_t id_val = 0;
+	uint32_t id_only_val = 0;
+	uint32_t data1 = 0;
+	uint32_t data2 = 0;
+	uint32_t data3 = 0;
+
+	if (!inst[log].events)
+		return 0;
+
+	if (cont && update_read_avail(&inst[log]) == 0)
+		return 0;
+
+	remote_spin_lock_irqsave(inst[log].remote_spinlock, flags);
+
+	if (cont) {
+		idx = inst[log].read_idx;
+		write_idx = (inst[log].read_idx + inst[log].last_read_avail);
+		if (write_idx >= inst[log].num)
+			write_idx -= inst[log].num;
+	} else {
+		write_idx = *inst[log].idx;
+		idx = (write_idx + 1);
+	}
+
+	DBG("%s: read %d write %d idx %d num %d\n", __func__,
+	    inst[log].read_idx, write_idx, idx, inst[log].num - 1);
+
+	for (; (max - i) > SMEM_LOG_ITEM_PRINT_SIZE; idx++) {
+		if (idx > (inst[log].num - 1))
+			idx = 0;
+
+		if (idx == write_idx)
+			break;
+
+		if (idx < inst[log].num) {
+			if (!inst[log].events[idx].identifier)
+				continue;
+
+			proc_val = PROC & inst[log].events[idx].identifier;
+			sub_val = SUB & inst[log].events[idx].identifier;
+			id_val = (SUB | ID) & inst[log].events[idx].identifier;
+			id_only_val = ID & inst[log].events[idx].identifier;
+			data1 = inst[log].events[idx].data1;
+			data2 = inst[log].events[idx].data2;
+			data3 = inst[log].events[idx].data3;
+
+			if (!(proc_val & SMEM_LOG_CONT)) {
+				i += scnprintf(buf + i, max - i, "\n");
+
+				proc = find_sym(ID_SYM, proc_val);
+
+				if (proc)
+					i += scnprintf(buf + i, max - i,
+						       "%4s: ", proc);
+				else
+					i += scnprintf(buf + i, max - i,
+						       "%04x: ",
+						       PROC &
+						       inst[log].events[idx].
+						       identifier);
+
+				i += scnprintf(buf + i, max - i, "%10u ",
+					       inst[log].events[idx].timetick);
+
+				sub = find_sym(BASE_SYM, sub_val);
+
+				if (sub)
+					i += scnprintf(buf + i, max - i,
+						       "%9s: ", sub);
+				else
+					i += scnprintf(buf + i, max - i,
+						       "%08x: ", sub_val);
+
+				id = find_sym(EVENT_SYM, id_val);
+
+				if (id)
+					i += scnprintf(buf + i, max - i,
+						       "%11s: ", id);
+				else
+					i += scnprintf(buf + i, max - i,
+						       "%08x: ", id_only_val);
+			}
+
+			if (proc_val & SMEM_LOG_CONT) {
+				i += scnprintf(buf + i, max - i,
+					       " %08x %08x %08x",
+					       data1, data2, data3);
+			} else if (id_val == SMEM_LOG_EVENT_CB) {
+				unsigned vals[] = {data2, data3};
+				unsigned j;
+				unsigned mask;
+				unsigned tmp;
+				unsigned once;
+				i += scnprintf(buf + i, max - i, "%08x ",
+					       data1);
+				for (j = 0; j < ARRAY_SIZE(vals); ++j) {
+					i += scnprintf(buf + i, max - i, "[");
+					mask = 0x80000000;
+					once = 0;
+					while (mask) {
+						tmp = vals[j] & mask;
+						mask >>= 1;
+						if (!tmp)
+							continue;
+						sym = find_sym(SMSM_SYM, tmp);
+
+						if (once)
+							i += scnprintf(buf + i,
+								       max - i,
+								       " ");
+						if (sym)
+							i += scnprintf(buf + i,
+								       max - i,
+								       "%s",
+								       sym);
+						else
+							i += scnprintf(buf + i,
+								       max - i,
+								       "%08x",
+								       tmp);
+						once = 1;
+					}
+					i += scnprintf(buf + i, max - i, "] ");
+				}
+			} else {
+				i += scnprintf(buf + i, max - i,
+					       "%08x %08x %08x",
+					       data1, data2, data3);
+			}
+		}
+	}
+	if (cont) {
+		inst[log].read_idx = idx;
+		read_avail = (write_idx - inst[log].read_idx);
+		if (read_avail < 0)
+			read_avail = inst->num - inst->read_idx + write_idx;
+		inst[log].last_read_avail = read_avail;
+	}
+
+	remote_spin_unlock_irqrestore(inst[log].remote_spinlock, flags);
+
+	DBG("%s: read %d write %d idx %d num %d\n", __func__,
+	    inst[log].read_idx, write_idx, idx, inst[log].num);
+
+	return i;
+}
+
+static int debug_dump(char *buf, int max, uint32_t cont)
+{
+	int r;
+
+	if (!inst[GEN].idx || !inst[GEN].events)
+		return -ENODEV;
+
+	while (cont) {
+		update_read_avail(&inst[GEN]);
+		r = wait_event_interruptible_timeout(inst[GEN].read_wait,
+						     inst[GEN].last_read_avail,
+						     smem_log_timeout_ms *
+						     HZ / 1000);
+		DBG("%s: read available %d\n", __func__,
+		    inst[GEN].last_read_avail);
+		if (r < 0)
+			return 0;
+		else if (inst[GEN].last_read_avail)
+			break;
+	}
+
+	return _debug_dump(GEN, buf, max, cont);
+}
+
+static int debug_dump_sym(char *buf, int max, uint32_t cont)
+{
+	int r;
+
+	if (!inst[GEN].idx || !inst[GEN].events)
+		return -ENODEV;
+
+	while (cont) {
+		update_read_avail(&inst[GEN]);
+		r = wait_event_interruptible_timeout(inst[GEN].read_wait,
+						     inst[GEN].last_read_avail,
+						     smem_log_timeout_ms *
+						     HZ / 1000);
+		DBG("%s: readavailable %d\n", __func__,
+		    inst[GEN].last_read_avail);
+		if (r < 0)
+			return 0;
+		else if (inst[GEN].last_read_avail)
+			break;
+	}
+
+	return _debug_dump_sym(GEN, buf, max, cont);
+}
+
+static char debug_buffer[EVENTS_PRINT_SIZE];
+
+static ssize_t debug_read(struct file *file, char __user *buf,
+			  size_t count, loff_t *ppos)
+{
+	int r;
+	int bsize = 0;
+	int (*fill)(char *, int, uint32_t) = file->private_data;
+	if (!(*ppos)) {
+		bsize = fill(debug_buffer, EVENTS_PRINT_SIZE, 0);
+
+		if (bsize < 0)
+			bsize = scnprintf(debug_buffer,
+				EVENTS_PRINT_SIZE, "Log not available\n");
+	}
+	DBG("%s: count %zu ppos %d\n", __func__, count, (unsigned int)*ppos);
+	r =  simple_read_from_buffer(buf, count, ppos, debug_buffer,
+				     bsize);
+	return r;
+}
+
+static ssize_t debug_read_cont(struct file *file, char __user *buf,
+			       size_t count, loff_t *ppos)
+{
+	int (*fill)(char *, int, uint32_t) = file->private_data;
+	char *buffer = kmalloc(count, GFP_KERNEL);
+	int bsize;
+	if (!buffer)
+		return -ENOMEM;
+
+	bsize = fill(buffer, count, 1);
+	if (bsize < 0) {
+		if (*ppos == 0)
+			bsize = scnprintf(buffer, count, "Log not available\n");
+		else
+			bsize = 0;
+	}
+
+	DBG("%s: count %zu bsize %d\n", __func__, count, bsize);
+	if (copy_to_user(buf, buffer, bsize)) {
+		kfree(buffer);
+		return -EFAULT;
+	}
+	*ppos += bsize;
+	kfree(buffer);
+	return bsize;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static const struct file_operations debug_ops = {
+	.read = debug_read,
+	.open = debug_open,
+};
+
+static const struct file_operations debug_ops_cont = {
+	.read = debug_read_cont,
+	.open = debug_open,
+};
+
+static void debug_create(const char *name, mode_t mode,
+			 struct dentry *dent,
+			 int (*fill)(char *buf, int max, uint32_t cont),
+			 const struct file_operations *fops)
+{
+	debugfs_create_file(name, mode, dent, fill, fops);
+}
+
+static void smem_log_debugfs_init(void)
+{
+	struct dentry *dent;
+
+	dent = debugfs_create_dir("smem_log", 0);
+	if (IS_ERR(dent))
+		return;
+
+	debug_create("dump", 0444, dent, debug_dump, &debug_ops);
+	debug_create("dump_sym", 0444, dent, debug_dump_sym, &debug_ops);
+
+	debug_create("dump_cont", 0444, dent, debug_dump, &debug_ops_cont);
+	debug_create("dump_sym_cont", 0444, dent,
+		     debug_dump_sym, &debug_ops_cont);
+
+	smem_log_timeout_ms = 500;
+	smem_log_debug_mask = 0;
+}
+#else
+static void smem_log_debugfs_init(void) {}
+#endif
+
+static int smem_log_initialize(void)
+{
+	int ret;
+
+	ret = _smem_log_init();
+	if (ret < 0) {
+		pr_err("%s: init failed %d\n", __func__, ret);
+		return ret;
+	}
+
+	ret = misc_register(&smem_log_dev);
+	if (ret < 0) {
+		pr_err("%s: device register failed %d\n", __func__, ret);
+		return ret;
+	}
+
+	smem_log_enable = 1;
+	smem_log_initialized = 1;
+	smem_log_debugfs_init();
+	return ret;
+}
+
+static int smem_module_init_notifier(struct notifier_block *this,
+				    unsigned long code,
+				    void *_cmd)
+{
+	int ret = 0;
+	if (!smem_log_initialized)
+		ret = smem_log_initialize();
+	return ret;
+}
+
+static struct notifier_block nb = {
+	.notifier_call = smem_module_init_notifier,
+};
+
+static int __init smem_log_init(void)
+{
+	return smem_module_init_notifier_register(&nb);
+}
+
+
+module_init(smem_log_init);
+
+MODULE_DESCRIPTION("smem log");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/smem_private.h	2019-01-22 16:16:26.675275131 +0100
@@ -0,0 +1,105 @@
+/* Copyright (c) 2013,2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_SMEM_PRIVATE_H_
+#define _ARCH_ARM_MACH_MSM_SMEM_PRIVATE_H_
+
+#include <linux/remote_spinlock.h>
+#include <soc/qcom/ramdump.h>
+
+
+#define SMD_HEAP_SIZE 512
+extern uint32_t smem_max_items;
+
+struct smem_heap_info {
+	unsigned initialized;
+	unsigned free_offset;
+	unsigned heap_remaining;
+	unsigned reserved;
+};
+
+struct smem_heap_entry {
+	unsigned allocated;
+	unsigned offset;
+	unsigned size;
+	unsigned reserved; /* bits 1:0 reserved, bits 31:2 aux smem base addr */
+};
+#define BASE_ADDR_MASK 0xfffffffc
+
+struct smem_proc_comm {
+	unsigned command;
+	unsigned status;
+	unsigned data1;
+	unsigned data2;
+};
+
+struct smem_shared {
+	struct smem_proc_comm proc_comm[4];
+	unsigned version[32];
+	struct smem_heap_info heap_info;
+	struct smem_heap_entry heap_toc[SMD_HEAP_SIZE];
+};
+
+struct smem_area {
+	phys_addr_t phys_addr;
+	resource_size_t size;
+	void __iomem *virt_addr;
+};
+
+/* used for unit testing spinlocks */
+remote_spinlock_t *smem_get_remote_spinlock(void);
+
+bool smem_initialized_check(void);
+
+/**
+ * smem_module_init_notifier_register() - Register a smem module
+ *                                       init notifier block
+ * @nb: Notifier block to be registered
+ *
+ * In order to mark the dependency on SMEM Driver module initialization
+ * register a notifier using this API. Once the smem module_init is
+ * done, notification will be passed to the registered module.
+ */
+int smem_module_init_notifier_register(struct notifier_block *nb);
+
+/**
+ * smem_module_init_notifier_register() - Unregister a smem module
+ *                                       init notifier block
+ * @nb: Notifier block to be unregistered
+ */
+int smem_module_init_notifier_unregister(struct notifier_block *nb);
+
+/**
+ * smem_get_free_space() - Get the available allocation free space for a
+ *				partition
+ *
+ * @to_proc: remote SMEM host.  Determines the applicable partition
+ * @returns: size in bytes available to allocate
+ *
+ * Helper function for SMD so that SMD only scans the channel allocation
+ * table for a partition when it is reasonably certain that a channel has
+ * actually been created, because scanning can be expensive.  Creating a channel
+ * will consume some of the free space in a partition, so SMD can compare the
+ * last free space size against the current free space size to determine if
+ * a channel may have been created.  SMD can't do this directly, because the
+ * necessary partition internals are restricted to just SMEM.
+ */
+unsigned smem_get_free_space(unsigned to_proc);
+
+/**
+ * smem_get_version() - Get the smem user version number
+ *
+ * @idx: SMEM user idx in SMEM_VERSION_INFO table.
+ * @returns: smem version number if success otherwise zero.
+ */
+unsigned smem_get_version(unsigned idx);
+#endif /* _ARCH_ARM_MACH_MSM_SMEM_PRIVATE_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/smp2p.c	2019-10-29 09:26:24.825214745 +0100
@@ -0,0 +1,1955 @@
+/* drivers/soc/qcom/smp2p.c
+ *
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/ipc_logging.h>
+#include <linux/err.h>
+#include <soc/qcom/smem.h>
+#include "smp2p_private_api.h"
+#include "smp2p_private.h"
+
+#define NUM_LOG_PAGES 3
+
+/**
+ * struct msm_smp2p_out - This structure represents the outbound SMP2P entry.
+ *
+ * @remote_pid: Outbound processor ID.
+ * @name: Entry name.
+ * @out_edge_list: Adds this structure into smp2p_out_list_item::list.
+ * @msm_smp2p_notifier_list: Notifier block head used to notify for open event.
+ * @open_nb: Notifier block used to notify for open event.
+ * @l_smp2p_entry: Pointer to the actual entry in the SMEM item.
+ */
+struct msm_smp2p_out {
+	int remote_pid;
+	char name[SMP2P_MAX_ENTRY_NAME];
+	struct list_head out_edge_list;
+	struct raw_notifier_head msm_smp2p_notifier_list;
+	struct notifier_block *open_nb;
+	uint32_t __iomem *l_smp2p_entry;
+};
+
+/**
+ * struct smp2p_out_list_item - Maintains the state of outbound edge.
+ *
+ * @out_item_lock_lha1: Lock protecting all elements of the structure.
+ * @list: list of outbound entries (struct msm_smp2p_out).
+ * @smem_edge_out: Pointer to outbound smem item.
+ * @smem_edge_state: State of the outbound edge.
+ * @ops_ptr: Pointer to internal version-specific SMEM item access functions.
+ *
+ * @feature_ssr_ack_enabled: SSR ACK Support Enabled
+ * @restart_ack: Current cached state of the local ack bit
+ */
+struct smp2p_out_list_item {
+	spinlock_t out_item_lock_lha1;
+
+	struct list_head list;
+	struct smp2p_smem __iomem *smem_edge_out;
+	enum msm_smp2p_edge_state smem_edge_state;
+	struct smp2p_version_if *ops_ptr;
+
+	bool feature_ssr_ack_enabled;
+	bool restart_ack;
+};
+static struct smp2p_out_list_item out_list[SMP2P_NUM_PROCS];
+
+static void *log_ctx;
+static int smp2p_debug_mask = MSM_SMP2P_INFO | MSM_SMP2P_DEBUG;
+module_param_named(debug_mask, smp2p_debug_mask,
+		   int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+/**
+ * struct smp2p_in - Represents the entry on remote processor.
+ *
+ * @name: Name of the entry.
+ * @remote_pid: Outbound processor ID.
+ * @in_edge_list: Adds this structure into smp2p_in_list_item::list.
+ * @in_notifier_list: List for notifier block for entry opening/updates.
+ * @prev_entry_val: Previous value of the entry.
+ * @entry_ptr: Points to the current value in smem item.
+ * @notifier_count: Counts the number of notifier registered per pid,entry.
+ */
+struct smp2p_in {
+	int remote_pid;
+	char name[SMP2P_MAX_ENTRY_NAME];
+	struct list_head in_edge_list;
+	struct raw_notifier_head in_notifier_list;
+	uint32_t prev_entry_val;
+	uint32_t __iomem *entry_ptr;
+	uint32_t notifier_count;
+};
+
+/**
+ * struct smp2p_in_list_item - Maintains the inbound edge state.
+ *
+ * @in_item_lock_lhb1: Lock protecting all elements of the structure.
+ * @list: List head for the entries on remote processor.
+ * @smem_edge_in: Pointer to the remote smem item.
+ */
+struct smp2p_in_list_item {
+	spinlock_t in_item_lock_lhb1;
+	struct list_head list;
+	struct smp2p_smem __iomem *smem_edge_in;
+	uint32_t item_size;
+	uint32_t safe_total_entries;
+};
+static struct smp2p_in_list_item in_list[SMP2P_NUM_PROCS];
+
+/**
+ * SMEM Item access function interface.
+ *
+ * This interface is used to help isolate the implementation of
+ * the functionality from any changes in the shared data structures
+ * that may happen as versions are changed.
+ *
+ * @is_supported: True if this version is supported by SMP2P
+ * @negotiate_features: Returns (sub)set of supported features
+ * @negotiation_complete:  Called when negotiation has been completed
+ * @find_entry: Finds existing / next empty entry
+ * @create_entry: Creates a new entry
+ * @read_entry: Reads the value of an entry
+ * @write_entry: Writes a new value to an entry
+ * @modify_entry: Does a read/modify/write of an entry
+ * validate_size: Verifies the size of the remote SMEM item to ensure that
+ *                an invalid item size doesn't result in an out-of-bounds
+ *                memory access.
+ */
+struct smp2p_version_if {
+	/* common functions */
+	bool is_supported;
+	uint32_t (*negotiate_features)(uint32_t features);
+	void (*negotiation_complete)(struct smp2p_out_list_item *);
+	void (*find_entry)(struct smp2p_smem __iomem *item,
+			uint32_t entries_total,	char *name,
+			uint32_t **entry_ptr, int *empty_spot);
+
+	/* outbound entry functions */
+	int (*create_entry)(struct msm_smp2p_out *);
+	int (*read_entry)(struct msm_smp2p_out *, uint32_t *);
+	int (*write_entry)(struct msm_smp2p_out *, uint32_t);
+	int (*modify_entry)(struct msm_smp2p_out *, uint32_t, uint32_t, bool);
+
+	/* inbound entry functions */
+	struct smp2p_smem __iomem *(*validate_size)(int remote_pid,
+			struct smp2p_smem __iomem *, uint32_t);
+};
+
+static int smp2p_do_negotiation(int remote_pid, struct smp2p_out_list_item *p);
+static void smp2p_send_interrupt(int remote_pid);
+
+/* v0 (uninitialized SMEM item) interface functions */
+static uint32_t smp2p_negotiate_features_v0(uint32_t features);
+static void smp2p_negotiation_complete_v0(struct smp2p_out_list_item *out_item);
+static void smp2p_find_entry_v0(struct smp2p_smem __iomem *item,
+		uint32_t entries_total, char *name, uint32_t **entry_ptr,
+		int *empty_spot);
+static int smp2p_out_create_v0(struct msm_smp2p_out *);
+static int smp2p_out_read_v0(struct msm_smp2p_out *, uint32_t *);
+static int smp2p_out_write_v0(struct msm_smp2p_out *, uint32_t);
+static int smp2p_out_modify_v0(struct msm_smp2p_out *,
+					uint32_t, uint32_t, bool);
+static struct smp2p_smem __iomem *smp2p_in_validate_size_v0(int remote_pid,
+		struct smp2p_smem __iomem *smem_item, uint32_t size);
+
+/* v1 interface functions */
+static uint32_t smp2p_negotiate_features_v1(uint32_t features);
+static void smp2p_negotiation_complete_v1(struct smp2p_out_list_item *out_item);
+static void smp2p_find_entry_v1(struct smp2p_smem __iomem *item,
+		uint32_t entries_total, char *name, uint32_t **entry_ptr,
+		int *empty_spot);
+static int smp2p_out_create_v1(struct msm_smp2p_out *);
+static int smp2p_out_read_v1(struct msm_smp2p_out *, uint32_t *);
+static int smp2p_out_write_v1(struct msm_smp2p_out *, uint32_t);
+static int smp2p_out_modify_v1(struct msm_smp2p_out *,
+					uint32_t, uint32_t, bool);
+static struct smp2p_smem __iomem *smp2p_in_validate_size_v1(int remote_pid,
+		struct smp2p_smem __iomem *smem_item, uint32_t size);
+
+/* Version interface functions */
+static struct smp2p_version_if version_if[] = {
+	[0] = {
+		.negotiate_features = smp2p_negotiate_features_v0,
+		.negotiation_complete = smp2p_negotiation_complete_v0,
+		.find_entry = smp2p_find_entry_v0,
+		.create_entry = smp2p_out_create_v0,
+		.read_entry = smp2p_out_read_v0,
+		.write_entry = smp2p_out_write_v0,
+		.modify_entry = smp2p_out_modify_v0,
+		.validate_size = smp2p_in_validate_size_v0,
+	},
+	[1] = {
+		.is_supported = true,
+		.negotiate_features = smp2p_negotiate_features_v1,
+		.negotiation_complete = smp2p_negotiation_complete_v1,
+		.find_entry = smp2p_find_entry_v1,
+		.create_entry = smp2p_out_create_v1,
+		.read_entry = smp2p_out_read_v1,
+		.write_entry = smp2p_out_write_v1,
+		.modify_entry = smp2p_out_modify_v1,
+		.validate_size = smp2p_in_validate_size_v1,
+	},
+};
+
+/* interrupt configuration (filled by device tree) */
+static struct smp2p_interrupt_config smp2p_int_cfgs[SMP2P_NUM_PROCS] = {
+	[SMP2P_MODEM_PROC].name = "modem",
+	[SMP2P_AUDIO_PROC].name = "lpass",
+	[SMP2P_SENSOR_PROC].name = "dsps",
+	[SMP2P_WIRELESS_PROC].name = "wcnss",
+	[SMP2P_CDSP_PROC].name = "cdsp",
+	[SMP2P_TZ_PROC].name = "tz",
+	[SMP2P_REMOTE_MOCK_PROC].name = "mock",
+};
+
+/**
+ * smp2p_get_log_ctx - Return log context for other SMP2P modules.
+ *
+ * @returns: Log context or NULL if none.
+ */
+void *smp2p_get_log_ctx(void)
+{
+	return log_ctx;
+}
+
+/**
+ * smp2p_get_debug_mask - Return debug mask.
+ *
+ * @returns: Current debug mask.
+ */
+int smp2p_get_debug_mask(void)
+{
+	return smp2p_debug_mask;
+}
+
+/**
+ * smp2p_interrupt_config -  Return interrupt configuration.
+ *
+ * @returns interrupt configuration array for usage by debugfs.
+ */
+struct smp2p_interrupt_config *smp2p_get_interrupt_config(void)
+{
+	return smp2p_int_cfgs;
+}
+
+/**
+ * smp2p_pid_to_name -  Lookup name for remote pid.
+ *
+ * @returns: name (may be NULL).
+ */
+const char *smp2p_pid_to_name(int remote_pid)
+{
+	if (remote_pid >= SMP2P_NUM_PROCS)
+		return NULL;
+
+	return smp2p_int_cfgs[remote_pid].name;
+}
+
+/**
+ * smp2p_get_in_item - Return pointer to remote smem item.
+ *
+ * @remote_pid: Processor ID of the remote system.
+ * @returns:    Pointer to inbound SMEM item
+ *
+ * This is used by debugfs to print the smem items.
+ */
+struct smp2p_smem __iomem *smp2p_get_in_item(int remote_pid)
+{
+	void *ret = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&in_list[remote_pid].in_item_lock_lhb1, flags);
+	if (remote_pid < SMP2P_NUM_PROCS)
+		ret = in_list[remote_pid].smem_edge_in;
+	spin_unlock_irqrestore(&in_list[remote_pid].in_item_lock_lhb1,
+								flags);
+
+	return ret;
+}
+
+/**
+ * smp2p_get_out_item - Return pointer to outbound SMEM item.
+ *
+ * @remote_pid: Processor ID of remote system.
+ * @state:      Edge state of the outbound SMEM item.
+ * @returns:    Pointer to outbound (remote) SMEM item.
+ */
+struct smp2p_smem __iomem *smp2p_get_out_item(int remote_pid, int *state)
+{
+	void *ret = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&out_list[remote_pid].out_item_lock_lha1, flags);
+	if (remote_pid < SMP2P_NUM_PROCS) {
+		ret = out_list[remote_pid].smem_edge_out;
+		if (state)
+			*state = out_list[remote_pid].smem_edge_state;
+	}
+	spin_unlock_irqrestore(&out_list[remote_pid].out_item_lock_lha1, flags);
+
+	return ret;
+}
+
+/**
+ * smp2p_get_smem_item_id - Return the proper SMEM item ID.
+ *
+ * @write_id:	Processor that will write to the item.
+ * @read_id:    Processor that will read from the item.
+ * @returns:    SMEM ID
+ */
+static int smp2p_get_smem_item_id(int write_pid, int read_pid)
+{
+	int ret = -EINVAL;
+
+	switch (write_pid) {
+	case SMP2P_APPS_PROC:
+		ret = SMEM_SMP2P_APPS_BASE + read_pid;
+		break;
+	case SMP2P_MODEM_PROC:
+		ret = SMEM_SMP2P_MODEM_BASE + read_pid;
+		break;
+	case SMP2P_AUDIO_PROC:
+		ret = SMEM_SMP2P_AUDIO_BASE + read_pid;
+		break;
+	case SMP2P_SENSOR_PROC:
+		ret = SMEM_SMP2P_SENSOR_BASE + read_pid;
+		break;
+	case SMP2P_WIRELESS_PROC:
+		ret = SMEM_SMP2P_WIRLESS_BASE + read_pid;
+		break;
+	case SMP2P_CDSP_PROC:
+		ret = SMEM_SMP2P_CDSP_BASE + read_pid;
+		break;
+	case SMP2P_POWER_PROC:
+		ret = SMEM_SMP2P_POWER_BASE + read_pid;
+		break;
+	case SMP2P_TZ_PROC:
+		ret = SMEM_SMP2P_TZ_BASE + read_pid;
+		break;
+	}
+
+	return ret;
+}
+
+/**
+ * Return pointer to SMEM item owned by the local processor.
+ *
+ * @remote_pid: Remote processor ID
+ * @returns:    NULL for failure; otherwise pointer to SMEM item
+ *
+ * Must be called with out_item_lock_lha1 locked for mock proc.
+ */
+static void *smp2p_get_local_smem_item(int remote_pid)
+{
+	struct smp2p_smem __iomem *item_ptr = NULL;
+
+	if (remote_pid < SMP2P_REMOTE_MOCK_PROC) {
+		unsigned size;
+		int smem_id;
+
+		/* lookup or allocate SMEM item */
+		smem_id = smp2p_get_smem_item_id(SMP2P_APPS_PROC, remote_pid);
+		if (smem_id >= 0) {
+			item_ptr = smem_get_entry(smem_id, &size,
+								remote_pid, 0);
+
+			if (!item_ptr) {
+				size = sizeof(struct smp2p_smem_item);
+				item_ptr = smem_alloc(smem_id, size,
+								remote_pid, 0);
+			}
+		}
+	} else if (remote_pid == SMP2P_REMOTE_MOCK_PROC) {
+		/*
+		 * This path is only used during unit testing so
+		 * the GFP_ATOMIC allocation should not be a
+		 * concern.
+		 */
+		if (!out_list[SMP2P_REMOTE_MOCK_PROC].smem_edge_out)
+			item_ptr = kzalloc(
+					sizeof(struct smp2p_smem_item),
+					GFP_ATOMIC);
+	}
+	return item_ptr;
+}
+
+/**
+ * smp2p_get_remote_smem_item - Return remote SMEM item.
+ *
+ * @remote_pid: Remote processor ID
+ * @out_item:   Pointer to the output item structure
+ * @returns:    NULL for failure; otherwise pointer to SMEM item
+ *
+ * Return pointer to SMEM item owned by the remote processor.
+ *
+ * Note that this function does an SMEM lookup which uses a remote spinlock,
+ * so this function should not be called more than necessary.
+ *
+ * Must be called with out_item_lock_lha1 and in_item_lock_lhb1 locked.
+ */
+static void *smp2p_get_remote_smem_item(int remote_pid,
+	struct smp2p_out_list_item *out_item)
+{
+	void *item_ptr = NULL;
+	unsigned size = 0;
+
+	if (!out_item)
+		return item_ptr;
+
+	if (remote_pid < SMP2P_REMOTE_MOCK_PROC) {
+		int smem_id;
+
+		smem_id = smp2p_get_smem_item_id(remote_pid, SMP2P_APPS_PROC);
+		if (smem_id >= 0)
+			item_ptr = smem_get_entry(smem_id, &size,
+								remote_pid, 0);
+	} else if (remote_pid == SMP2P_REMOTE_MOCK_PROC) {
+		item_ptr = msm_smp2p_get_remote_mock_smem_item(&size);
+	}
+	item_ptr = out_item->ops_ptr->validate_size(remote_pid, item_ptr, size);
+
+	return item_ptr;
+}
+
+/**
+ * smp2p_ssr_ack_needed - Returns true if SSR ACK required
+ *
+ * @rpid: Remote processor ID
+ *
+ * Must be called with out_item_lock_lha1 and in_item_lock_lhb1 locked.
+ */
+static bool smp2p_ssr_ack_needed(uint32_t rpid)
+{
+	bool ssr_done;
+
+	if (!out_list[rpid].feature_ssr_ack_enabled)
+		return false;
+
+	ssr_done = SMP2P_GET_RESTART_DONE(in_list[rpid].smem_edge_in->flags);
+	if (ssr_done != out_list[rpid].restart_ack)
+		return true;
+
+	return false;
+}
+
+/**
+ * smp2p_do_ssr_ack - Handles SSR ACK
+ *
+ * @rpid: Remote processor ID
+ *
+ * Must be called with out_item_lock_lha1 and in_item_lock_lhb1 locked.
+ */
+static void smp2p_do_ssr_ack(uint32_t rpid)
+{
+	bool ack;
+
+	if (!smp2p_ssr_ack_needed(rpid))
+		return;
+
+	ack = !out_list[rpid].restart_ack;
+	SMP2P_INFO("%s: ssr ack pid %d: %d -> %d\n", __func__, rpid,
+			out_list[rpid].restart_ack, ack);
+	out_list[rpid].restart_ack = ack;
+	SMP2P_SET_RESTART_ACK(out_list[rpid].smem_edge_out->flags, ack);
+	smp2p_send_interrupt(rpid);
+}
+
+/**
+ * smp2p_negotiate_features_v1 - Initial feature negotiation.
+ *
+ * @features: Inbound feature set.
+ * @returns: Supported features (will be a same/subset of @features).
+ */
+static uint32_t smp2p_negotiate_features_v1(uint32_t features)
+{
+	return SMP2P_FEATURE_SSR_ACK;
+}
+
+/**
+ * smp2p_negotiation_complete_v1 - Negotiation completed
+ *
+ * @out_item:   Pointer to the output item structure
+ *
+ * Can be used to do final configuration based upon the negotiated feature set.
+ *
+ * Must be called with out_item_lock_lha1 locked.
+ */
+static void smp2p_negotiation_complete_v1(struct smp2p_out_list_item *out_item)
+{
+	uint32_t features;
+
+	features = SMP2P_GET_FEATURES(out_item->smem_edge_out->feature_version);
+
+	if (features & SMP2P_FEATURE_SSR_ACK)
+		out_item->feature_ssr_ack_enabled = true;
+}
+
+/**
+ * smp2p_find_entry_v1 - Search for an entry in SMEM item.
+ *
+ * @item: Pointer to the smem item.
+ * @entries_total: Total number of entries in @item.
+ * @name: Name of the entry.
+ * @entry_ptr: Set to pointer of entry if found, NULL otherwise.
+ * @empty_spot: If non-null, set to the value of the next empty entry.
+ *
+ * Searches for entry @name in the SMEM item.  If found, a pointer
+ * to the item is returned.  If it isn't found, the first empty
+ * index is returned in @empty_spot.
+ */
+static void smp2p_find_entry_v1(struct smp2p_smem __iomem *item,
+		uint32_t entries_total, char *name, uint32_t **entry_ptr,
+		int *empty_spot)
+{
+	int i;
+	struct smp2p_entry_v1 *pos;
+	char entry_name[SMP2P_MAX_ENTRY_NAME];
+
+	if (!item || !name || !entry_ptr) {
+		SMP2P_ERR("%s: invalid arguments %d %d %d\n",
+				__func__, !item, !name, !entry_ptr);
+		return;
+	}
+
+	*entry_ptr = NULL;
+	if (empty_spot)
+		*empty_spot = -1;
+
+	pos = (struct smp2p_entry_v1 *)(char *)(item + 1);
+	for (i = 0; i < entries_total; i++, ++pos) {
+		memcpy_fromio(entry_name, pos->name, SMP2P_MAX_ENTRY_NAME);
+		if (entry_name[0]) {
+			if (!strcmp(entry_name, name)) {
+				*entry_ptr = &pos->entry;
+				break;
+			}
+		} else if (empty_spot && *empty_spot < 0) {
+			*empty_spot = i;
+		}
+	}
+}
+
+/**
+ * smp2p_out_create_v1 - Creates a outbound SMP2P entry.
+ *
+ * @out_entry: Pointer to the SMP2P entry structure.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * Must be called with out_item_lock_lha1 locked.
+ */
+static int smp2p_out_create_v1(struct msm_smp2p_out *out_entry)
+{
+	struct smp2p_smem __iomem *smp2p_h_ptr;
+	struct smp2p_out_list_item *p_list;
+	uint32_t *state_entry_ptr;
+	uint32_t empty_spot;
+	uint32_t entries_total;
+	uint32_t entries_valid;
+
+	if (!out_entry)
+		return -EINVAL;
+
+	p_list = &out_list[out_entry->remote_pid];
+	if (p_list->smem_edge_state != SMP2P_EDGE_STATE_OPENED) {
+		SMP2P_ERR("%s: item '%s':%d opened - wrong create called\n",
+			__func__, out_entry->name, out_entry->remote_pid);
+		return -ENODEV;
+	}
+
+	smp2p_h_ptr = p_list->smem_edge_out;
+	entries_total = SMP2P_GET_ENT_TOTAL(smp2p_h_ptr->valid_total_ent);
+	entries_valid = SMP2P_GET_ENT_VALID(smp2p_h_ptr->valid_total_ent);
+
+	p_list->ops_ptr->find_entry(smp2p_h_ptr, entries_total,
+			out_entry->name, &state_entry_ptr, &empty_spot);
+	if (state_entry_ptr) {
+		/* re-use existing entry */
+		out_entry->l_smp2p_entry = state_entry_ptr;
+
+		SMP2P_DBG("%s: item '%s':%d reused\n", __func__,
+				out_entry->name, out_entry->remote_pid);
+	} else if (entries_valid >= entries_total) {
+		/* need to allocate entry, but not more space */
+		SMP2P_ERR("%s: no space for item '%s':%d\n",
+			__func__, out_entry->name, out_entry->remote_pid);
+		return -ENOMEM;
+	} else {
+		/* allocate a new entry */
+		struct smp2p_entry_v1 *entry_ptr;
+
+		entry_ptr = (struct smp2p_entry_v1 *)((char *)(smp2p_h_ptr + 1)
+			+ empty_spot * sizeof(struct smp2p_entry_v1));
+		memcpy_toio(entry_ptr->name, out_entry->name,
+						sizeof(entry_ptr->name));
+		out_entry->l_smp2p_entry = &entry_ptr->entry;
+		++entries_valid;
+		SMP2P_DBG("%s: item '%s':%d fully created as entry %d of %d\n",
+				__func__, out_entry->name,
+				out_entry->remote_pid,
+				entries_valid, entries_total);
+		SMP2P_SET_ENT_VALID(smp2p_h_ptr->valid_total_ent,
+				entries_valid);
+		smp2p_send_interrupt(out_entry->remote_pid);
+	}
+	raw_notifier_call_chain(&out_entry->msm_smp2p_notifier_list,
+		  SMP2P_OPEN, 0);
+
+	return 0;
+}
+
+/**
+ * smp2p_out_read_v1 -  Read the data from an outbound entry.
+ *
+ * @out_entry: Pointer to the SMP2P entry structure.
+ * @data: Out pointer, the data is available in this argument on success.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * Must be called with out_item_lock_lha1 locked.
+ */
+static int smp2p_out_read_v1(struct msm_smp2p_out *out_entry, uint32_t *data)
+{
+	struct smp2p_smem __iomem  *smp2p_h_ptr;
+	uint32_t remote_pid;
+
+	if (!out_entry)
+		return -EINVAL;
+
+	smp2p_h_ptr = out_list[out_entry->remote_pid].smem_edge_out;
+	remote_pid = SMP2P_GET_REMOTE_PID(smp2p_h_ptr->rem_loc_proc_id);
+
+	if (remote_pid != out_entry->remote_pid)
+		return -EINVAL;
+
+	if (out_entry->l_smp2p_entry) {
+		*data = readl_relaxed(out_entry->l_smp2p_entry);
+	} else {
+		SMP2P_ERR("%s: '%s':%d not yet OPEN\n", __func__,
+				out_entry->name, remote_pid);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/**
+ * smp2p_out_write_v1 - Writes an outbound entry value.
+ *
+ * @out_entry: Pointer to the SMP2P entry structure.
+ * @data: The data to be written.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * Must be called with out_item_lock_lha1 locked.
+ */
+static int smp2p_out_write_v1(struct msm_smp2p_out *out_entry, uint32_t data)
+{
+	struct smp2p_smem __iomem  *smp2p_h_ptr;
+	uint32_t remote_pid;
+
+	if (!out_entry)
+		return -EINVAL;
+
+	smp2p_h_ptr = out_list[out_entry->remote_pid].smem_edge_out;
+	remote_pid = SMP2P_GET_REMOTE_PID(smp2p_h_ptr->rem_loc_proc_id);
+
+	if (remote_pid != out_entry->remote_pid)
+		return -EINVAL;
+
+	if (out_entry->l_smp2p_entry) {
+		writel_relaxed(data, out_entry->l_smp2p_entry);
+		smp2p_send_interrupt(remote_pid);
+	} else {
+		SMP2P_ERR("%s: '%s':%d not yet OPEN\n", __func__,
+				out_entry->name, remote_pid);
+		return -ENODEV;
+	}
+	return 0;
+}
+
+/**
+ * smp2p_out_modify_v1 - Modifies and outbound value.
+ *
+ * @set_mask:  Mask containing the bits that needs to be set.
+ * @clear_mask: Mask containing the bits that needs to be cleared.
+ * @send_irq: Flag to send interrupt to remote processor.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * The clear mask is applied first, so  if a bit is set in both clear and
+ * set mask, the result will be that the bit is set.
+ *
+ * Must be called with out_item_lock_lha1 locked.
+ */
+static int smp2p_out_modify_v1(struct msm_smp2p_out *out_entry,
+		uint32_t set_mask, uint32_t clear_mask, bool send_irq)
+{
+	struct smp2p_smem __iomem  *smp2p_h_ptr;
+	uint32_t remote_pid;
+
+	if (!out_entry)
+		return -EINVAL;
+
+	smp2p_h_ptr = out_list[out_entry->remote_pid].smem_edge_out;
+	remote_pid = SMP2P_GET_REMOTE_PID(smp2p_h_ptr->rem_loc_proc_id);
+
+	if (remote_pid != out_entry->remote_pid)
+			return -EINVAL;
+
+	if (out_entry->l_smp2p_entry) {
+		uint32_t curr_value;
+
+		curr_value = readl_relaxed(out_entry->l_smp2p_entry);
+		writel_relaxed((curr_value & ~clear_mask) | set_mask,
+			out_entry->l_smp2p_entry);
+	} else {
+		SMP2P_ERR("%s: '%s':%d not yet OPEN\n", __func__,
+				out_entry->name, remote_pid);
+		return -ENODEV;
+	}
+
+	if (send_irq)
+		smp2p_send_interrupt(remote_pid);
+	return 0;
+}
+
+/**
+ * smp2p_in_validate_size_v1 - Size validation for version 1.
+ *
+ * @remote_pid: Remote processor ID.
+ * @smem_item:  Pointer to the inbound SMEM item.
+ * @size:       Size of the SMEM item.
+ * @returns:    Validated smem_item pointer (or NULL if size is too small).
+ *
+ * Validates we don't end up with out-of-bounds array access due to invalid
+ * smem item size.  If out-of-bound array access can't be avoided, then an
+ * error message is printed and NULL is returned to prevent usage of the
+ * item.
+ *
+ * Must be called with in_item_lock_lhb1 locked.
+ */
+static struct smp2p_smem __iomem *smp2p_in_validate_size_v1(int remote_pid,
+		struct smp2p_smem __iomem *smem_item, uint32_t size)
+{
+	uint32_t total_entries;
+	unsigned expected_size;
+	struct smp2p_smem __iomem *item_ptr;
+	struct smp2p_in_list_item *in_item;
+
+	if (remote_pid >= SMP2P_NUM_PROCS || !smem_item)
+		return NULL;
+
+	in_item = &in_list[remote_pid];
+	item_ptr = (struct smp2p_smem __iomem *)smem_item;
+
+	total_entries = SMP2P_GET_ENT_TOTAL(item_ptr->valid_total_ent);
+	if (total_entries > 0) {
+		in_item->safe_total_entries = total_entries;
+		in_item->item_size = size;
+
+		expected_size =	sizeof(struct smp2p_smem) +
+			(total_entries * sizeof(struct smp2p_entry_v1));
+
+		if (size < expected_size) {
+			unsigned new_size;
+
+			new_size = size;
+			new_size -= sizeof(struct smp2p_smem);
+			new_size /= sizeof(struct smp2p_entry_v1);
+			in_item->safe_total_entries = new_size;
+
+			SMP2P_ERR(
+				"%s pid %d item too small for %d entries; expected: %d actual: %d; reduced to %d entries\n",
+				__func__, remote_pid, total_entries,
+				expected_size, size, new_size);
+		}
+	} else {
+		/*
+		 * Total entries is 0, so the entry is still being initialized
+		 * or is invalid.  Either way, treat it as if the item does
+		 * not exist yet.
+		 */
+		in_item->safe_total_entries = 0;
+		in_item->item_size = 0;
+	}
+	return item_ptr;
+}
+
+/**
+ * smp2p_negotiate_features_v0 - Initial feature negotiation.
+ *
+ * @features: Inbound feature set.
+ * @returns: 0 (no features supported for v0).
+ */
+static uint32_t smp2p_negotiate_features_v0(uint32_t features)
+{
+	/* no supported features */
+	return 0;
+}
+
+/**
+ * smp2p_negotiation_complete_v0 - Negotiation completed
+ *
+ * @out_item:   Pointer to the output item structure
+ *
+ * Can be used to do final configuration based upon the negotiated feature set.
+ */
+static void smp2p_negotiation_complete_v0(struct smp2p_out_list_item *out_item)
+{
+	SMP2P_ERR("%s: invalid negotiation complete for v0 pid %d\n",
+		__func__,
+		SMP2P_GET_REMOTE_PID(out_item->smem_edge_out->rem_loc_proc_id));
+}
+
+/**
+ * smp2p_find_entry_v0 - Stub function.
+ *
+ * @item: Pointer to the smem item.
+ * @entries_total: Total number of entries in @item.
+ * @name: Name of the entry.
+ * @entry_ptr: Set to pointer of entry if found, NULL otherwise.
+ * @empty_spot: If non-null, set to the value of the next empty entry.
+ *
+ * Entries cannot be searched for until item negotiation has been completed.
+ */
+static void smp2p_find_entry_v0(struct smp2p_smem __iomem *item,
+		uint32_t entries_total, char *name, uint32_t **entry_ptr,
+		int *empty_spot)
+{
+	if (entry_ptr)
+		*entry_ptr = NULL;
+
+	if (empty_spot)
+		*empty_spot = -1;
+
+	SMP2P_ERR("%s: invalid - item negotiation incomplete\n", __func__);
+}
+
+/**
+ * smp2p_out_create_v0 - Initial creation function.
+ *
+ * @out_entry: Pointer to the SMP2P entry structure.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * If the outbound SMEM item negotiation is not complete, then
+ * this function is called to start the negotiation process.
+ * Eventually when the negotiation process is complete, this
+ * function pointer is switched with the appropriate function
+ * for the version of SMP2P being created.
+ *
+ * Must be called with out_item_lock_lha1 locked.
+ */
+static int smp2p_out_create_v0(struct msm_smp2p_out *out_entry)
+{
+	int edge_state;
+	struct smp2p_out_list_item *item_ptr;
+
+	if (!out_entry)
+		return -EINVAL;
+
+	edge_state = out_list[out_entry->remote_pid].smem_edge_state;
+
+	switch (edge_state) {
+	case SMP2P_EDGE_STATE_CLOSED:
+		/* start negotiation */
+		item_ptr = &out_list[out_entry->remote_pid];
+		edge_state = smp2p_do_negotiation(out_entry->remote_pid,
+				item_ptr);
+		break;
+
+	case SMP2P_EDGE_STATE_OPENING:
+		/* still negotiating */
+		break;
+
+	case SMP2P_EDGE_STATE_OPENED:
+		SMP2P_ERR("%s: item '%s':%d opened - wrong create called\n",
+			__func__, out_entry->name, out_entry->remote_pid);
+		break;
+
+	default:
+		SMP2P_ERR("%s: item '%s':%d invalid SMEM item state %d\n",
+			__func__, out_entry->name, out_entry->remote_pid,
+			edge_state);
+		break;
+	}
+	return 0;
+}
+
+/**
+ * smp2p_out_read_v0 - Stub function.
+ *
+ * @out_entry: Pointer to the SMP2P entry structure.
+ * @data: Out pointer, the data is available in this argument on success.
+ * @returns: -ENODEV
+ */
+static int smp2p_out_read_v0(struct msm_smp2p_out *out_entry, uint32_t *data)
+{
+	SMP2P_ERR("%s: item '%s':%d not OPEN\n",
+		__func__, out_entry->name, out_entry->remote_pid);
+
+	return -ENODEV;
+}
+
+/**
+ * smp2p_out_write_v0 - Stub function.
+ *
+ * @out_entry: Pointer to the SMP2P entry structure.
+ * @data: The data to be written.
+ * @returns: -ENODEV
+ */
+static int smp2p_out_write_v0(struct msm_smp2p_out *out_entry, uint32_t data)
+{
+	SMP2P_ERR("%s: item '%s':%d not yet OPEN\n",
+		__func__, out_entry->name, out_entry->remote_pid);
+
+	return -ENODEV;
+}
+
+/**
+ * smp2p_out_modify_v0 - Stub function.
+ *
+ * @set_mask:  Mask containing the bits that needs to be set.
+ * @clear_mask: Mask containing the bits that needs to be cleared.
+ * @send_irq: Flag to send interrupt to remote processor.
+ * @returns: -ENODEV
+ */
+static int smp2p_out_modify_v0(struct msm_smp2p_out *out_entry,
+		uint32_t set_mask, uint32_t clear_mask, bool send_irq)
+{
+	SMP2P_ERR("%s: item '%s':%d not yet OPEN\n",
+		__func__, out_entry->name, out_entry->remote_pid);
+
+	return -ENODEV;
+}
+
+/**
+ * smp2p_in_validate_size_v0 - Stub function.
+ *
+ * @remote_pid: Remote processor ID.
+ * @smem_item:  Pointer to the inbound SMEM item.
+ * @size:       Size of the SMEM item.
+ * @returns:    Validated smem_item pointer (or NULL if size is too small).
+ *
+ * Validates we don't end up with out-of-bounds array access due to invalid
+ * smem item size.  If out-of-bound array access can't be avoided, then an
+ * error message is printed and NULL is returned to prevent usage of the
+ * item.
+ *
+ * Must be called with in_item_lock_lhb1 locked.
+ */
+static struct smp2p_smem __iomem *smp2p_in_validate_size_v0(int remote_pid,
+		struct smp2p_smem __iomem *smem_item, uint32_t size)
+{
+	struct smp2p_in_list_item *in_item;
+
+	if (remote_pid >= SMP2P_NUM_PROCS || !smem_item)
+		return NULL;
+
+	in_item = &in_list[remote_pid];
+
+	if (size < sizeof(struct smp2p_smem)) {
+		SMP2P_ERR(
+			"%s pid %d item size too small; expected: %zu actual: %d\n",
+			__func__, remote_pid,
+			sizeof(struct smp2p_smem), size);
+		smem_item = NULL;
+		in_item->item_size = 0;
+	} else {
+		in_item->item_size = size;
+	}
+	return smem_item;
+}
+
+/**
+ * smp2p_init_header - Initializes the header of the smem item.
+ *
+ * @header_ptr: Pointer to the smp2p header.
+ * @local_pid: Local processor ID.
+ * @remote_pid: Remote processor ID.
+ * @feature: Features of smp2p implementation.
+ * @version: Version of smp2p implementation.
+ *
+ * Initializes the header as defined in the protocol specification.
+ */
+void smp2p_init_header(struct smp2p_smem __iomem *header_ptr,
+		int local_pid, int remote_pid,
+		uint32_t features, uint32_t version)
+{
+	header_ptr->magic = SMP2P_MAGIC;
+	SMP2P_SET_LOCAL_PID(header_ptr->rem_loc_proc_id, local_pid);
+	SMP2P_SET_REMOTE_PID(header_ptr->rem_loc_proc_id, remote_pid);
+	SMP2P_SET_FEATURES(header_ptr->feature_version, features);
+	SMP2P_SET_ENT_TOTAL(header_ptr->valid_total_ent, SMP2P_MAX_ENTRY);
+	SMP2P_SET_ENT_VALID(header_ptr->valid_total_ent, 0);
+	header_ptr->flags = 0;
+
+	/* ensure that all fields are valid before version is written */
+	wmb();
+	SMP2P_SET_VERSION(header_ptr->feature_version, version);
+}
+
+/**
+ * smp2p_do_negotiation - Implements negotiation algorithm.
+ *
+ * @remote_pid: Remote processor ID.
+ * @out_item: Pointer to the outbound list item.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * Must be called with out_item_lock_lha1 locked.  Will internally lock
+ * in_item_lock_lhb1.
+ */
+static int smp2p_do_negotiation(int remote_pid,
+		struct smp2p_out_list_item *out_item)
+{
+	struct smp2p_smem __iomem *r_smem_ptr;
+	struct smp2p_smem __iomem *l_smem_ptr;
+	uint32_t r_version;
+	uint32_t r_feature;
+	uint32_t l_version, l_feature;
+	int prev_state;
+
+	if (remote_pid >= SMP2P_NUM_PROCS || !out_item)
+		return -EINVAL;
+	if (out_item->smem_edge_state == SMP2P_EDGE_STATE_FAILED)
+		return -EPERM;
+
+	prev_state = out_item->smem_edge_state;
+
+	/* create local item */
+	if (!out_item->smem_edge_out) {
+		out_item->smem_edge_out = smp2p_get_local_smem_item(remote_pid);
+		if (!out_item->smem_edge_out) {
+			SMP2P_ERR(
+				"%s unable to allocate SMEM item for pid %d\n",
+				__func__, remote_pid);
+			return -ENODEV;
+		}
+		out_item->smem_edge_state = SMP2P_EDGE_STATE_OPENING;
+	}
+	l_smem_ptr = out_item->smem_edge_out;
+
+	/* retrieve remote side and version */
+	spin_lock(&in_list[remote_pid].in_item_lock_lhb1);
+	r_smem_ptr = smp2p_get_remote_smem_item(remote_pid, out_item);
+	spin_unlock(&in_list[remote_pid].in_item_lock_lhb1);
+
+	r_version = 0;
+	if (r_smem_ptr) {
+		r_version = SMP2P_GET_VERSION(r_smem_ptr->feature_version);
+		r_feature = SMP2P_GET_FEATURES(r_smem_ptr->feature_version);
+	}
+
+	if (r_version == 0) {
+		/*
+		 * Either remote side doesn't exist, or is in the
+		 * process of being initialized (the version is set last).
+		 *
+		 * In either case, treat as if the other side doesn't exist
+		 * and write out our maximum supported version.
+		 */
+		r_smem_ptr = NULL;
+		r_version = ARRAY_SIZE(version_if) - 1;
+		r_feature = ~0U;
+	}
+
+	/* find maximum supported version and feature set */
+	l_version = min(r_version, (uint32_t)ARRAY_SIZE(version_if) - 1);
+	for (; l_version > 0; --l_version) {
+		if (!version_if[l_version].is_supported)
+			continue;
+
+		/* found valid version */
+		l_feature = version_if[l_version].negotiate_features(~0U);
+		if (l_version == r_version)
+			l_feature &= r_feature;
+		break;
+	}
+
+	if (l_version == 0) {
+		SMP2P_ERR(
+			"%s: negotiation failure pid %d: RV %d RF %x\n",
+			__func__, remote_pid, r_version, r_feature
+			);
+		SMP2P_SET_VERSION(l_smem_ptr->feature_version,
+			SMP2P_EDGE_STATE_FAILED);
+		smp2p_send_interrupt(remote_pid);
+		out_item->smem_edge_state = SMP2P_EDGE_STATE_FAILED;
+		return -EPERM;
+	}
+
+	/* update header and notify remote side */
+	smp2p_init_header(l_smem_ptr, SMP2P_APPS_PROC, remote_pid,
+		l_feature, l_version);
+	smp2p_send_interrupt(remote_pid);
+
+	/* handle internal state changes */
+	if (r_smem_ptr && l_version == r_version &&
+			l_feature == r_feature) {
+		struct msm_smp2p_out *pos;
+
+		/* negotiation complete */
+		out_item->ops_ptr = &version_if[l_version];
+		out_item->ops_ptr->negotiation_complete(out_item);
+		out_item->smem_edge_state = SMP2P_EDGE_STATE_OPENED;
+		SMP2P_INFO(
+			"%s: negotiation complete pid %d: State %d->%d F0x%08x\n",
+			__func__, remote_pid, prev_state,
+			out_item->smem_edge_state, l_feature);
+
+		/* create any pending outbound entries */
+		list_for_each_entry(pos, &out_item->list, out_edge_list) {
+			out_item->ops_ptr->create_entry(pos);
+		}
+
+		/* update inbound edge */
+		spin_lock(&in_list[remote_pid].in_item_lock_lhb1);
+		(void)out_item->ops_ptr->validate_size(remote_pid, r_smem_ptr,
+				in_list[remote_pid].item_size);
+		in_list[remote_pid].smem_edge_in = r_smem_ptr;
+		spin_unlock(&in_list[remote_pid].in_item_lock_lhb1);
+	} else {
+		SMP2P_INFO("%s: negotiation pid %d: State %d->%d F0x%08x\n",
+			__func__, remote_pid, prev_state,
+			out_item->smem_edge_state, l_feature);
+	}
+	return 0;
+}
+
+/**
+ * msm_smp2p_out_open - Opens an outbound entry.
+ *
+ * @remote_pid: Outbound processor ID.
+ * @name: Name of the entry.
+ * @open_notifier: Notifier block for the open notification.
+ * @handle: Handle to the smem entry structure.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * Opens an outbound entry with the name specified by entry, from the
+ * local processor to the remote processor(remote_pid). If the entry, remote_pid
+ * and open_notifier are valid, then handle will be set and zero will be
+ * returned. The smem item that holds this entry will be created if it has
+ * not been created according to the version negotiation algorithm.
+ * The open_notifier will be used to notify the clients about the
+ * availability of the entry.
+ */
+int msm_smp2p_out_open(int remote_pid, const char *name,
+				   struct notifier_block *open_notifier,
+				   struct msm_smp2p_out **handle)
+{
+	struct msm_smp2p_out *out_entry;
+	struct msm_smp2p_out *pos;
+	int ret = 0;
+	unsigned long flags;
+
+	if (handle)
+		*handle = NULL;
+
+	if (remote_pid >= SMP2P_NUM_PROCS || !name || !open_notifier || !handle)
+		return -EINVAL;
+
+	if ((remote_pid != SMP2P_REMOTE_MOCK_PROC) &&
+			!smp2p_int_cfgs[remote_pid].is_configured) {
+		SMP2P_INFO("%s before msm_smp2p_init(): pid[%d] name[%s]\n",
+						__func__, remote_pid, name);
+		return -EPROBE_DEFER;
+	}
+
+	/* Allocate the smp2p object and node */
+	out_entry = kzalloc(sizeof(*out_entry), GFP_KERNEL);
+	if (!out_entry)
+		return -ENOMEM;
+
+	/* Handle duplicate registration */
+	spin_lock_irqsave(&out_list[remote_pid].out_item_lock_lha1, flags);
+	list_for_each_entry(pos, &out_list[remote_pid].list,
+			out_edge_list) {
+		if (!strcmp(pos->name, name)) {
+			spin_unlock_irqrestore(
+				&out_list[remote_pid].out_item_lock_lha1,
+				flags);
+			kfree(out_entry);
+			SMP2P_ERR("%s: duplicate registration '%s':%d\n",
+				__func__, name, remote_pid);
+			return -EBUSY;
+		}
+	}
+
+	out_entry->remote_pid = remote_pid;
+	RAW_INIT_NOTIFIER_HEAD(&out_entry->msm_smp2p_notifier_list);
+	strlcpy(out_entry->name, name, SMP2P_MAX_ENTRY_NAME);
+	out_entry->open_nb = open_notifier;
+	raw_notifier_chain_register(&out_entry->msm_smp2p_notifier_list,
+		  out_entry->open_nb);
+	list_add(&out_entry->out_edge_list, &out_list[remote_pid].list);
+
+	ret = out_list[remote_pid].ops_ptr->create_entry(out_entry);
+	if (ret) {
+		list_del(&out_entry->out_edge_list);
+		raw_notifier_chain_unregister(
+			&out_entry->msm_smp2p_notifier_list,
+			out_entry->open_nb);
+		spin_unlock_irqrestore(
+			&out_list[remote_pid].out_item_lock_lha1, flags);
+		kfree(out_entry);
+		SMP2P_ERR("%s: unable to open '%s':%d error %d\n",
+				__func__, name, remote_pid, ret);
+		return ret;
+	}
+	spin_unlock_irqrestore(&out_list[remote_pid].out_item_lock_lha1,
+			flags);
+	*handle = out_entry;
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_smp2p_out_open);
+
+/**
+ * msm_smp2p_out_close - Closes the handle to an outbound entry.
+ *
+ * @handle: Pointer to smp2p out entry handle.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * The actual entry will not be deleted and can be re-opened at a later
+ * time.  The handle will be set to NULL.
+ */
+int msm_smp2p_out_close(struct msm_smp2p_out **handle)
+{
+	unsigned long flags;
+	struct msm_smp2p_out *out_entry;
+	struct smp2p_out_list_item *out_item;
+
+	if (!handle || !*handle)
+		return -EINVAL;
+
+	out_entry = *handle;
+	*handle = NULL;
+
+	if ((out_entry->remote_pid != SMP2P_REMOTE_MOCK_PROC) &&
+			!smp2p_int_cfgs[out_entry->remote_pid].is_configured) {
+		SMP2P_INFO("%s before msm_smp2p_init(): pid[%d] name[%s]\n",
+			__func__, out_entry->remote_pid, out_entry->name);
+		return -EPROBE_DEFER;
+	}
+
+	out_item = &out_list[out_entry->remote_pid];
+	spin_lock_irqsave(&out_item->out_item_lock_lha1, flags);
+	list_del(&out_entry->out_edge_list);
+	raw_notifier_chain_unregister(&out_entry->msm_smp2p_notifier_list,
+		out_entry->open_nb);
+	spin_unlock_irqrestore(&out_item->out_item_lock_lha1, flags);
+
+	kfree(out_entry);
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_smp2p_out_close);
+
+/**
+ * msm_smp2p_out_read - Allows reading the entry.
+ *
+ * @handle: Handle to the smem entry structure.
+ * @data: Out pointer that holds the read data.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * Allows reading of the outbound entry for read-modify-write
+ * operation.
+ */
+int msm_smp2p_out_read(struct msm_smp2p_out *handle, uint32_t *data)
+{
+	int ret = -EINVAL;
+	unsigned long flags;
+	struct smp2p_out_list_item *out_item;
+
+	if (!handle || !data)
+		return ret;
+
+	if ((handle->remote_pid != SMP2P_REMOTE_MOCK_PROC) &&
+			!smp2p_int_cfgs[handle->remote_pid].is_configured) {
+		SMP2P_INFO("%s before msm_smp2p_init(): pid[%d] name[%s]\n",
+			__func__, handle->remote_pid, handle->name);
+		return -EPROBE_DEFER;
+	}
+
+	out_item = &out_list[handle->remote_pid];
+	spin_lock_irqsave(&out_item->out_item_lock_lha1, flags);
+	ret = out_item->ops_ptr->read_entry(handle, data);
+	spin_unlock_irqrestore(&out_item->out_item_lock_lha1, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_smp2p_out_read);
+
+/**
+ * msm_smp2p_out_write - Allows writing to the entry.
+ *
+ * @handle: Handle to smem entry structure.
+ * @data: Data that has to be written.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * Writes a new value to the output entry. Multiple back-to-back writes
+ * may overwrite previous writes before the remote processor get a chance
+ * to see them leading to ABA race condition. The client must implement
+ * their own synchronization mechanism (such as echo mechanism) if this is
+ * not acceptable.
+ */
+int msm_smp2p_out_write(struct msm_smp2p_out *handle, uint32_t data)
+{
+	int ret = -EINVAL;
+	unsigned long flags;
+	struct smp2p_out_list_item *out_item;
+
+	if (!handle)
+		return ret;
+
+	if ((handle->remote_pid != SMP2P_REMOTE_MOCK_PROC) &&
+			!smp2p_int_cfgs[handle->remote_pid].is_configured) {
+		SMP2P_INFO("%s before msm_smp2p_init(): pid[%d] name[%s]\n",
+			__func__, handle->remote_pid, handle->name);
+		return -EPROBE_DEFER;
+	}
+
+	out_item = &out_list[handle->remote_pid];
+	spin_lock_irqsave(&out_item->out_item_lock_lha1, flags);
+	ret = out_item->ops_ptr->write_entry(handle, data);
+	spin_unlock_irqrestore(&out_item->out_item_lock_lha1, flags);
+
+	return ret;
+
+}
+EXPORT_SYMBOL(msm_smp2p_out_write);
+
+/**
+ * msm_smp2p_out_modify - Modifies the entry.
+ *
+ * @handle: Handle to the smem entry structure.
+ * @set_mask: Specifies the bits that needs to be set.
+ * @clear_mask: Specifies the bits that needs to be cleared.
+ * @send_irq: Flag to send interrupt to remote processor.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * The modification is done by doing a bitwise AND of clear mask followed by
+ * the bit wise OR of set mask. The clear bit mask is applied first to the
+ * data, so if a bit is set in both the clear mask and the set mask, then in
+ * the result is a set bit.  Multiple back-to-back modifications may overwrite
+ * previous values before the remote processor gets a chance to see them
+ * leading to ABA race condition. The client must implement their own
+ * synchronization mechanism (such as echo mechanism) if this is not
+ * acceptable.
+ */
+int msm_smp2p_out_modify(struct msm_smp2p_out *handle, uint32_t set_mask,
+					uint32_t clear_mask, bool send_irq)
+{
+	int ret = -EINVAL;
+	unsigned long flags;
+	struct smp2p_out_list_item *out_item;
+
+	if (!handle)
+		return ret;
+
+	if ((handle->remote_pid != SMP2P_REMOTE_MOCK_PROC) &&
+			!smp2p_int_cfgs[handle->remote_pid].is_configured) {
+		SMP2P_INFO("%s before msm_smp2p_init(): pid[%d] name[%s]\n",
+			__func__, handle->remote_pid, handle->name);
+		return -EPROBE_DEFER;
+	}
+
+	out_item = &out_list[handle->remote_pid];
+	spin_lock_irqsave(&out_item->out_item_lock_lha1, flags);
+	ret = out_item->ops_ptr->modify_entry(handle, set_mask,
+						clear_mask, send_irq);
+	spin_unlock_irqrestore(&out_item->out_item_lock_lha1, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_smp2p_out_modify);
+
+/**
+ * msm_smp2p_in_read - Read an entry on a remote processor.
+ *
+ * @remote_pid: Processor ID of the remote processor.
+ * @name: Name of the entry that is to be read.
+ * @data: Output pointer, the value will be placed here if successful.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ */
+int msm_smp2p_in_read(int remote_pid, const char *name, uint32_t *data)
+{
+	unsigned long flags;
+	struct smp2p_out_list_item *out_item;
+	uint32_t *entry_ptr = NULL;
+
+	if (remote_pid >= SMP2P_NUM_PROCS)
+		return -EINVAL;
+
+	if ((remote_pid != SMP2P_REMOTE_MOCK_PROC) &&
+			!smp2p_int_cfgs[remote_pid].is_configured) {
+		SMP2P_INFO("%s before msm_smp2p_init(): pid[%d] name[%s]\n",
+						__func__, remote_pid, name);
+		return -EPROBE_DEFER;
+	}
+
+	out_item = &out_list[remote_pid];
+	spin_lock_irqsave(&out_item->out_item_lock_lha1, flags);
+	spin_lock(&in_list[remote_pid].in_item_lock_lhb1);
+
+	if (in_list[remote_pid].smem_edge_in)
+		out_item->ops_ptr->find_entry(
+			in_list[remote_pid].smem_edge_in,
+			in_list[remote_pid].safe_total_entries,
+			(char *)name, &entry_ptr, NULL);
+
+	spin_unlock(&in_list[remote_pid].in_item_lock_lhb1);
+	spin_unlock_irqrestore(&out_item->out_item_lock_lha1, flags);
+
+	if (!entry_ptr)
+		return -ENODEV;
+
+	*data = readl_relaxed(entry_ptr);
+	return 0;
+}
+EXPORT_SYMBOL(msm_smp2p_in_read);
+
+/**
+ * msm_smp2p_in_register -  Notifies the change in value of the entry.
+ *
+ * @pid: Remote processor ID.
+ * @name: Name of the entry.
+ * @in_notifier: Notifier block used to notify about the event.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * Register for change notifications for a remote entry. If the remote entry
+ * does not exist yet, then the registration request will be held until the
+ * remote side opens. Once the entry is open, then the SMP2P_OPEN notification
+ * will be sent. Any changes to the entry will trigger a call to the notifier
+ * block with an SMP2P_ENTRY_UPDATE event and the data field will point to an
+ * msm_smp2p_update_notif structure containing the current and previous value.
+ */
+int msm_smp2p_in_register(int pid, const char *name,
+	struct notifier_block *in_notifier)
+{
+	struct smp2p_in *pos;
+	struct smp2p_in *in = NULL;
+	int ret;
+	unsigned long flags;
+	struct msm_smp2p_update_notif data;
+	uint32_t *entry_ptr;
+
+	if (pid >= SMP2P_NUM_PROCS || !name || !in_notifier)
+		return -EINVAL;
+
+	if ((pid != SMP2P_REMOTE_MOCK_PROC) &&
+			!smp2p_int_cfgs[pid].is_configured) {
+		SMP2P_INFO("%s before msm_smp2p_init(): pid[%d] name[%s]\n",
+						__func__, pid, name);
+		return -EPROBE_DEFER;
+	}
+
+	/* Pre-allocate before spinlock since we will likely needed it */
+	in = kzalloc(sizeof(*in), GFP_KERNEL);
+	if (!in)
+		return -ENOMEM;
+
+	/* Search for existing entry */
+	spin_lock_irqsave(&out_list[pid].out_item_lock_lha1, flags);
+	spin_lock(&in_list[pid].in_item_lock_lhb1);
+
+	list_for_each_entry(pos, &in_list[pid].list, in_edge_list) {
+		if (!strncmp(pos->name, name,
+					SMP2P_MAX_ENTRY_NAME)) {
+			kfree(in);
+			in = pos;
+			break;
+		}
+	}
+
+	/* Create and add it to the list */
+	if (!in->notifier_count) {
+		in->remote_pid = pid;
+		strlcpy(in->name, name, SMP2P_MAX_ENTRY_NAME);
+		RAW_INIT_NOTIFIER_HEAD(&in->in_notifier_list);
+		list_add(&in->in_edge_list, &in_list[pid].list);
+	}
+
+	ret = raw_notifier_chain_register(&in->in_notifier_list,
+			in_notifier);
+	if (ret) {
+		if (!in->notifier_count) {
+			list_del(&in->in_edge_list);
+			kfree(in);
+		}
+		SMP2P_DBG("%s: '%s':%d failed %d\n", __func__, name, pid, ret);
+		goto bail;
+	}
+	in->notifier_count++;
+
+	if (out_list[pid].smem_edge_state == SMP2P_EDGE_STATE_OPENED) {
+		out_list[pid].ops_ptr->find_entry(
+				in_list[pid].smem_edge_in,
+				in_list[pid].safe_total_entries, (char *)name,
+				&entry_ptr, NULL);
+		if (entry_ptr) {
+			in->entry_ptr = entry_ptr;
+			in->prev_entry_val = readl_relaxed(entry_ptr);
+
+			data.previous_value = in->prev_entry_val;
+			data.current_value = in->prev_entry_val;
+			in_notifier->notifier_call(in_notifier, SMP2P_OPEN,
+					(void *)&data);
+		}
+	}
+	SMP2P_DBG("%s: '%s':%d registered\n", __func__, name, pid);
+
+bail:
+	spin_unlock(&in_list[pid].in_item_lock_lhb1);
+	spin_unlock_irqrestore(&out_list[pid].out_item_lock_lha1, flags);
+	return ret;
+
+}
+EXPORT_SYMBOL(msm_smp2p_in_register);
+
+/**
+ * msm_smp2p_in_unregister - Unregister the notifier for remote entry.
+ *
+ * @remote_pid: Processor Id of the remote processor.
+ * @name: The name of the entry.
+ * @in_notifier: Notifier block passed during registration.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ */
+int msm_smp2p_in_unregister(int remote_pid, const char *name,
+				struct notifier_block *in_notifier)
+{
+	struct smp2p_in *pos;
+	struct smp2p_in *in = NULL;
+	int ret = -ENODEV;
+	unsigned long flags;
+
+	if (remote_pid >= SMP2P_NUM_PROCS || !name || !in_notifier)
+		return -EINVAL;
+
+	if ((remote_pid != SMP2P_REMOTE_MOCK_PROC) &&
+			!smp2p_int_cfgs[remote_pid].is_configured) {
+		SMP2P_INFO("%s before msm_smp2p_init(): pid[%d] name[%s]\n",
+						__func__, remote_pid, name);
+		return -EPROBE_DEFER;
+	}
+
+	spin_lock_irqsave(&in_list[remote_pid].in_item_lock_lhb1, flags);
+	list_for_each_entry(pos, &in_list[remote_pid].list,
+			in_edge_list) {
+		if (!strncmp(pos->name, name, SMP2P_MAX_ENTRY_NAME)) {
+			in = pos;
+			break;
+		}
+	}
+	if (!in)
+		goto fail;
+
+	ret = raw_notifier_chain_unregister(&pos->in_notifier_list,
+			in_notifier);
+	if (ret == 0) {
+		pos->notifier_count--;
+		if (!pos->notifier_count) {
+			list_del(&pos->in_edge_list);
+			kfree(pos);
+			ret = 0;
+		}
+	} else {
+		SMP2P_ERR("%s: unregister failure '%s':%d\n", __func__,
+			name, remote_pid);
+		ret = -ENODEV;
+	}
+
+fail:
+	spin_unlock_irqrestore(&in_list[remote_pid].in_item_lock_lhb1, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_smp2p_in_unregister);
+
+/**
+ * smp2p_send_interrupt - Send interrupt to remote system.
+ *
+ * @remote_pid:  Processor ID of the remote system
+ *
+ * Must be called with out_item_lock_lha1 locked.
+ */
+static void smp2p_send_interrupt(int remote_pid)
+{
+	if (smp2p_int_cfgs[remote_pid].name)
+		SMP2P_DBG("SMP2P Int Apps->%s(%d)\n",
+			smp2p_int_cfgs[remote_pid].name, remote_pid);
+
+	++smp2p_int_cfgs[remote_pid].out_interrupt_count;
+	if (remote_pid != SMP2P_REMOTE_MOCK_PROC &&
+			smp2p_int_cfgs[remote_pid].out_int_mask) {
+		/* flush any pending writes before triggering interrupt */
+		wmb();
+		writel_relaxed(smp2p_int_cfgs[remote_pid].out_int_mask,
+			smp2p_int_cfgs[remote_pid].out_int_ptr);
+	} else {
+		smp2p_remote_mock_rx_interrupt();
+	}
+}
+
+/**
+ * smp2p_in_edge_notify - Notifies the entry changed on remote processor.
+ *
+ * @pid: Processor ID of the remote processor.
+ *
+ * This function is invoked on an incoming interrupt, it scans
+ * the list of the clients registered for the entries on the remote
+ * processor and notifies them if  the data changes.
+ *
+ * Note:  Edge state must be OPENED to avoid a race condition with
+ *        out_list[pid].ops_ptr->find_entry.
+ */
+static void smp2p_in_edge_notify(int pid)
+{
+	struct smp2p_in *pos;
+	uint32_t *entry_ptr;
+	unsigned long flags;
+	struct smp2p_smem __iomem *smem_h_ptr;
+	uint32_t curr_data;
+	struct  msm_smp2p_update_notif data;
+
+	spin_lock_irqsave(&in_list[pid].in_item_lock_lhb1, flags);
+	smem_h_ptr = in_list[pid].smem_edge_in;
+	if (!smem_h_ptr) {
+		SMP2P_DBG("%s: No remote SMEM item for pid %d\n",
+			__func__, pid);
+		spin_unlock_irqrestore(&in_list[pid].in_item_lock_lhb1, flags);
+		return;
+	}
+
+	list_for_each_entry(pos, &in_list[pid].list, in_edge_list) {
+		if (pos->entry_ptr == NULL) {
+			/* entry not open - try to open it */
+			out_list[pid].ops_ptr->find_entry(smem_h_ptr,
+				in_list[pid].safe_total_entries, pos->name,
+				&entry_ptr, NULL);
+
+			if (entry_ptr) {
+				pos->entry_ptr = entry_ptr;
+				pos->prev_entry_val = 0;
+				data.previous_value = 0;
+				data.current_value = readl_relaxed(entry_ptr);
+				raw_notifier_call_chain(
+					    &pos->in_notifier_list,
+					    SMP2P_OPEN, (void *)&data);
+			}
+		}
+
+		if (pos->entry_ptr != NULL) {
+			/* send update notification */
+			curr_data = readl_relaxed(pos->entry_ptr);
+			if (curr_data != pos->prev_entry_val) {
+				data.previous_value = pos->prev_entry_val;
+				data.current_value = curr_data;
+				pos->prev_entry_val = curr_data;
+				raw_notifier_call_chain(
+					&pos->in_notifier_list,
+					SMP2P_ENTRY_UPDATE, (void *)&data);
+			}
+		}
+	}
+	spin_unlock_irqrestore(&in_list[pid].in_item_lock_lhb1, flags);
+}
+
+/**
+ * smp2p_interrupt_handler - Incoming interrupt handler.
+ *
+ * @irq: Interrupt ID
+ * @data: Edge
+ * @returns: IRQ_HANDLED or IRQ_NONE for invalid interrupt
+ */
+static irqreturn_t smp2p_interrupt_handler(int irq, void *data)
+{
+	unsigned long flags;
+	uint32_t remote_pid = (uint32_t)(uintptr_t)data;
+
+	if (remote_pid >= SMP2P_NUM_PROCS) {
+		SMP2P_ERR("%s: invalid interrupt pid %d\n",
+			__func__, remote_pid);
+		return IRQ_NONE;
+	}
+
+	if (smp2p_int_cfgs[remote_pid].name)
+		SMP2P_DBG("SMP2P Int %s(%d)->Apps\n",
+			smp2p_int_cfgs[remote_pid].name, remote_pid);
+
+	spin_lock_irqsave(&out_list[remote_pid].out_item_lock_lha1, flags);
+	++smp2p_int_cfgs[remote_pid].in_interrupt_count;
+
+	if (out_list[remote_pid].smem_edge_state != SMP2P_EDGE_STATE_OPENED)
+		smp2p_do_negotiation(remote_pid, &out_list[remote_pid]);
+
+	if (out_list[remote_pid].smem_edge_state == SMP2P_EDGE_STATE_OPENED) {
+		bool do_restart_ack;
+
+		/*
+		 * Follow double-check pattern for restart ack since:
+		 * 1) we must notify clients of the X->0 transition
+		 *    that is part of the restart
+		 * 2) lock cannot be held during the
+		 *    smp2p_in_edge_notify() call because clients may do
+		 *    re-entrant calls into our APIs.
+		 *
+		 * smp2p_do_ssr_ack() will only do the ack if it is
+		 * necessary to handle the race condition exposed by
+		 * unlocking the spinlocks.
+		 */
+		spin_lock(&in_list[remote_pid].in_item_lock_lhb1);
+		do_restart_ack = smp2p_ssr_ack_needed(remote_pid);
+		spin_unlock(&in_list[remote_pid].in_item_lock_lhb1);
+		spin_unlock_irqrestore(&out_list[remote_pid].out_item_lock_lha1,
+			flags);
+
+		smp2p_in_edge_notify(remote_pid);
+
+		if (do_restart_ack) {
+			spin_lock_irqsave(
+				&out_list[remote_pid].out_item_lock_lha1,
+				flags);
+			spin_lock(&in_list[remote_pid].in_item_lock_lhb1);
+
+			smp2p_do_ssr_ack(remote_pid);
+
+			spin_unlock(&in_list[remote_pid].in_item_lock_lhb1);
+			spin_unlock_irqrestore(
+				&out_list[remote_pid].out_item_lock_lha1,
+				flags);
+		}
+	} else {
+		spin_unlock_irqrestore(&out_list[remote_pid].out_item_lock_lha1,
+			flags);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * smp2p_reset_mock_edge - Reinitializes the mock edge.
+ *
+ * @returns: 0 on success, -EAGAIN to retry later.
+ *
+ * Reinitializes the mock edge to initial power-up state values.
+ */
+int smp2p_reset_mock_edge(void)
+{
+	const int rpid = SMP2P_REMOTE_MOCK_PROC;
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&out_list[rpid].out_item_lock_lha1, flags);
+	spin_lock(&in_list[rpid].in_item_lock_lhb1);
+
+	if (!list_empty(&out_list[rpid].list) ||
+			!list_empty(&in_list[rpid].list)) {
+		ret = -EAGAIN;
+		goto fail;
+	}
+
+	kfree(out_list[rpid].smem_edge_out);
+	out_list[rpid].smem_edge_out = NULL;
+	out_list[rpid].ops_ptr = &version_if[0];
+	out_list[rpid].smem_edge_state = SMP2P_EDGE_STATE_CLOSED;
+	out_list[rpid].feature_ssr_ack_enabled = false;
+	out_list[rpid].restart_ack = false;
+
+	in_list[rpid].smem_edge_in = NULL;
+	in_list[rpid].item_size = 0;
+	in_list[rpid].safe_total_entries = 0;
+
+fail:
+	spin_unlock(&in_list[rpid].in_item_lock_lhb1);
+	spin_unlock_irqrestore(&out_list[rpid].out_item_lock_lha1, flags);
+
+	return ret;
+}
+
+/**
+ * msm_smp2p_interrupt_handler - Triggers incoming interrupt.
+ *
+ * @remote_pid: Remote processor ID
+ *
+ * This function is used with the remote mock infrastructure
+ * used for testing. It simulates triggering of interrupt in
+ * a testing environment.
+ */
+void msm_smp2p_interrupt_handler(int remote_pid)
+{
+	smp2p_interrupt_handler(0, (void *)(uintptr_t)remote_pid);
+}
+
+/**
+ * msm_smp2p_probe - Device tree probe function.
+ *
+ * @pdev: Pointer to device tree data.
+ * @returns: 0 on success; -ENODEV otherwise
+ */
+static int msm_smp2p_probe(struct platform_device *pdev)
+{
+	struct resource *r;
+	void *irq_out_ptr = NULL;
+	char *key;
+	uint32_t edge;
+	int ret;
+	struct device_node *node;
+	uint32_t irq_bitmask;
+	uint32_t irq_line;
+	void *temp_p;
+	unsigned temp_sz;
+
+	node = pdev->dev.of_node;
+
+	key = "qcom,remote-pid";
+	ret = of_property_read_u32(node, key, &edge);
+	if (ret) {
+		SMP2P_ERR("%s: missing edge '%s'\n", __func__, key);
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!r) {
+		SMP2P_ERR("%s: failed gathering irq-reg resource for edge %d\n"
+				, __func__, edge);
+		ret = -ENODEV;
+		goto fail;
+	}
+	irq_out_ptr = ioremap_nocache(r->start, resource_size(r));
+	if (!irq_out_ptr) {
+		SMP2P_ERR("%s: failed remap from phys to virt for edge %d\n",
+				__func__, edge);
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	key = "qcom,irq-bitmask";
+	ret = of_property_read_u32(node, key, &irq_bitmask);
+	if (ret)
+		goto missing_key;
+
+	key = "interrupts";
+	irq_line = platform_get_irq(pdev, 0);
+	if (irq_line == -ENXIO)
+		goto missing_key;
+
+	/*
+	 * We depend on the SMEM driver, so do a test access to see if SMEM is
+	 * ready.  We don't want any side effects at this time (so no alloc)
+	 * and the return doesn't matter, so long as it is not -EPROBE_DEFER.
+	 */
+	temp_p = smem_get_entry(
+		smp2p_get_smem_item_id(SMP2P_APPS_PROC, SMP2P_MODEM_PROC),
+		&temp_sz,
+		0,
+		SMEM_ANY_HOST_FLAG);
+	if (PTR_ERR(temp_p) == -EPROBE_DEFER) {
+		SMP2P_INFO("%s: edge:%d probe before smem ready\n", __func__,
+									edge);
+		ret = -EPROBE_DEFER;
+		goto fail;
+	}
+
+	ret = request_irq(irq_line, smp2p_interrupt_handler,
+			IRQF_TRIGGER_RISING, "smp2p", (void *)(uintptr_t)edge);
+	if (ret < 0) {
+		SMP2P_ERR("%s: request_irq() failed on %d (edge %d)\n",
+				__func__, irq_line, edge);
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	ret = enable_irq_wake(irq_line);
+	if (ret < 0)
+		SMP2P_ERR("%s: enable_irq_wake() failed on %d (edge %d)\n",
+				__func__, irq_line, edge);
+
+	/*
+	 * Set entry (keep is_configured last to prevent usage before
+	 * initialization).
+	 */
+	smp2p_int_cfgs[edge].in_int_id = irq_line;
+	smp2p_int_cfgs[edge].out_int_mask = irq_bitmask;
+	smp2p_int_cfgs[edge].out_int_ptr = irq_out_ptr;
+	smp2p_int_cfgs[edge].is_configured = true;
+	return 0;
+
+missing_key:
+	SMP2P_ERR("%s: missing '%s' for edge %d\n", __func__, key, edge);
+	ret = -ENODEV;
+fail:
+	if (irq_out_ptr)
+		iounmap(irq_out_ptr);
+	return ret;
+}
+
+static struct of_device_id msm_smp2p_match_table[] = {
+	{ .compatible = "qcom,smp2p" },
+	{},
+};
+
+static struct platform_driver msm_smp2p_driver = {
+	.probe = msm_smp2p_probe,
+	.driver = {
+		.name = "msm_smp2p",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_smp2p_match_table,
+	},
+};
+
+/**
+ * msm_smp2p_init -  Initialization function for the module.
+ *
+ * @returns: 0 on success, standard Linux error code otherwise.
+ */
+static int __init msm_smp2p_init(void)
+{
+	int i;
+	int rc;
+
+	for (i = 0; i < SMP2P_NUM_PROCS; i++) {
+		spin_lock_init(&out_list[i].out_item_lock_lha1);
+		INIT_LIST_HEAD(&out_list[i].list);
+		out_list[i].smem_edge_out = NULL;
+		out_list[i].smem_edge_state = SMP2P_EDGE_STATE_CLOSED;
+		out_list[i].ops_ptr = &version_if[0];
+		out_list[i].feature_ssr_ack_enabled = false;
+		out_list[i].restart_ack = false;
+
+		spin_lock_init(&in_list[i].in_item_lock_lhb1);
+		INIT_LIST_HEAD(&in_list[i].list);
+		in_list[i].smem_edge_in = NULL;
+	}
+
+	log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "smp2p", 0);
+	if (!log_ctx)
+		SMP2P_ERR("%s: unable to create log context\n", __func__);
+
+	rc = platform_driver_register(&msm_smp2p_driver);
+	if (rc) {
+		SMP2P_ERR("%s: msm_smp2p_driver register failed %d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	return 0;
+}
+module_init(msm_smp2p_init);
+
+MODULE_DESCRIPTION("MSM Shared Memory Point to Point");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/smp2p_debug.c	2019-01-22 16:16:26.675275131 +0100
@@ -0,0 +1,335 @@
+/* drivers/soc/qcom/smp2p_debug.c
+ *
+ * Copyright (c) 2013-2014,2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/ctype.h>
+#include <linux/list.h>
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include "smp2p_private.h"
+
+#if defined(CONFIG_DEBUG_FS)
+
+/**
+ * Dump interrupt statistics.
+ *
+ * @s:   pointer to output file
+ */
+static void smp2p_int_stats(struct seq_file *s)
+{
+	struct smp2p_interrupt_config *int_cfg;
+	int pid;
+
+	int_cfg = smp2p_get_interrupt_config();
+	if (!int_cfg)
+		return;
+
+	seq_puts(s, "| Processor | Incoming Id | Incoming # |");
+	seq_puts(s, " Outgoing # | Base Ptr |   Mask   |\n");
+
+	for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid) {
+		if (!int_cfg[pid].is_configured &&
+				pid != SMP2P_REMOTE_MOCK_PROC)
+			continue;
+
+		seq_printf(s, "| %5s (%d) | %11u | %10u | %10u | %pK | %08x |\n",
+			int_cfg[pid].name,
+			pid, int_cfg[pid].in_int_id,
+			int_cfg[pid].in_interrupt_count,
+			int_cfg[pid].out_interrupt_count,
+			int_cfg[pid].out_int_ptr,
+			int_cfg[pid].out_int_mask);
+	}
+}
+
+/**
+ * Dump item header line 1.
+ *
+ * @buf:      output buffer
+ * @max:      length of output buffer
+ * @item_ptr: SMEM item pointer
+ * @state:    item state
+ * @returns: Number of bytes written to output buffer
+ */
+static int smp2p_item_header1(char *buf, int max, struct smp2p_smem *item_ptr,
+	enum msm_smp2p_edge_state state)
+{
+	int i = 0;
+	const char *state_text;
+
+	if (!item_ptr) {
+		i += scnprintf(buf + i, max - i, "None");
+		return i;
+	}
+
+	switch (state) {
+	case SMP2P_EDGE_STATE_CLOSED:
+		state_text = "State: Closed";
+		break;
+	case SMP2P_EDGE_STATE_OPENING:
+		state_text = "State: Opening";
+		break;
+	case SMP2P_EDGE_STATE_OPENED:
+		state_text = "State: Opened";
+		break;
+	default:
+		state_text = "";
+		break;
+	}
+
+	i += scnprintf(buf + i, max - i,
+		"%-14s LPID %d RPID %d",
+		state_text,
+		SMP2P_GET_LOCAL_PID(item_ptr->rem_loc_proc_id),
+		SMP2P_GET_REMOTE_PID(item_ptr->rem_loc_proc_id)
+		);
+
+	return i;
+}
+
+/**
+ * Dump item header line 2.
+ *
+ * @buf:      output buffer
+ * @max:      length of output buffer
+ * @item_ptr: SMEM item pointer
+ * @returns: Number of bytes written to output buffer
+ */
+static int smp2p_item_header2(char *buf, int max, struct smp2p_smem *item_ptr)
+{
+	int i = 0;
+
+	if (!item_ptr) {
+		i += scnprintf(buf + i, max - i, "None");
+		return i;
+	}
+
+	i += scnprintf(buf + i, max - i,
+		"Version: %08x Features: %08x",
+		SMP2P_GET_VERSION(item_ptr->feature_version),
+		SMP2P_GET_FEATURES(item_ptr->feature_version)
+		);
+
+	return i;
+}
+
+/**
+ * Dump item header line 3.
+ *
+ * @buf:      output buffer
+ * @max:      length of output buffer
+ * @item_ptr: SMEM item pointer
+ * @state:    item state
+ * @returns: Number of bytes written to output buffer
+ */
+static int smp2p_item_header3(char *buf, int max, struct smp2p_smem *item_ptr)
+{
+	int i = 0;
+
+	if (!item_ptr) {
+		i += scnprintf(buf + i, max - i, "None");
+		return i;
+	}
+
+	i += scnprintf(buf + i, max - i,
+		"Entries #/Max: %d/%d Flags: %c%c",
+		SMP2P_GET_ENT_VALID(item_ptr->valid_total_ent),
+		SMP2P_GET_ENT_TOTAL(item_ptr->valid_total_ent),
+		item_ptr->flags & SMP2P_FLAGS_RESTART_ACK_MASK ? 'A' : 'a',
+		item_ptr->flags & SMP2P_FLAGS_RESTART_DONE_MASK ? 'D' : 'd'
+		);
+
+	return i;
+}
+
+/**
+ * Dump individual input/output item pair.
+ *
+ * @s:   pointer to output file
+ */
+static void smp2p_item(struct seq_file *s, int remote_pid)
+{
+	struct smp2p_smem *out_ptr;
+	struct smp2p_smem *in_ptr;
+	struct smp2p_interrupt_config *int_cfg;
+	char tmp_buff[64];
+	int state;
+	int entry;
+	struct smp2p_entry_v1 *out_entries = NULL;
+	struct smp2p_entry_v1 *in_entries = NULL;
+	int out_valid = 0;
+	int in_valid = 0;
+	char entry_name[SMP2P_MAX_ENTRY_NAME];
+
+	int_cfg = smp2p_get_interrupt_config();
+	if (!int_cfg)
+		return;
+	if (!int_cfg[remote_pid].is_configured &&
+			remote_pid != SMP2P_REMOTE_MOCK_PROC)
+		return;
+
+	out_ptr = smp2p_get_out_item(remote_pid, &state);
+	in_ptr = smp2p_get_in_item(remote_pid);
+
+	if (!out_ptr && !in_ptr)
+		return;
+
+	/* print item headers */
+	seq_printf(s, "%s%s\n",
+		" ====================================== ",
+		"======================================");
+	scnprintf(tmp_buff, sizeof(tmp_buff),
+		"Apps(%d)->%s(%d)",
+		SMP2P_APPS_PROC, int_cfg[remote_pid].name, remote_pid);
+	seq_printf(s, "| %-37s", tmp_buff);
+
+	scnprintf(tmp_buff, sizeof(tmp_buff),
+		"%s(%d)->Apps(%d)",
+		int_cfg[remote_pid].name, remote_pid, SMP2P_APPS_PROC);
+	seq_printf(s, "| %-37s|\n", tmp_buff);
+	seq_printf(s, "%s%s\n",
+		" ====================================== ",
+		"======================================");
+
+	smp2p_item_header1(tmp_buff, sizeof(tmp_buff), out_ptr, state);
+	seq_printf(s, "| %-37s", tmp_buff);
+	smp2p_item_header1(tmp_buff, sizeof(tmp_buff), in_ptr, -1);
+	seq_printf(s, "| %-37s|\n", tmp_buff);
+
+	smp2p_item_header2(tmp_buff, sizeof(tmp_buff), out_ptr);
+	seq_printf(s, "| %-37s", tmp_buff);
+	smp2p_item_header2(tmp_buff, sizeof(tmp_buff), in_ptr);
+	seq_printf(s, "| %-37s|\n", tmp_buff);
+
+	smp2p_item_header3(tmp_buff, sizeof(tmp_buff), out_ptr);
+	seq_printf(s, "| %-37s", tmp_buff);
+	smp2p_item_header3(tmp_buff, sizeof(tmp_buff), in_ptr);
+	seq_printf(s, "| %-37s|\n", tmp_buff);
+
+	seq_printf(s, " %s%s\n",
+		"-------------------------------------- ",
+		"--------------------------------------");
+	seq_printf(s, "| %-37s",
+		"Entry Name       Value");
+	seq_printf(s, "| %-37s|\n",
+		"Entry Name       Value");
+	seq_printf(s, " %s%s\n",
+		"-------------------------------------- ",
+		"--------------------------------------");
+
+	/* print entries */
+	if (out_ptr) {
+		out_entries = (struct smp2p_entry_v1 *)((void *)out_ptr +
+				sizeof(struct smp2p_smem));
+		out_valid = SMP2P_GET_ENT_VALID(out_ptr->valid_total_ent);
+	}
+
+	if (in_ptr) {
+		in_entries = (struct smp2p_entry_v1 *)((void *)in_ptr +
+				sizeof(struct smp2p_smem));
+		in_valid = SMP2P_GET_ENT_VALID(in_ptr->valid_total_ent);
+	}
+
+	for (entry = 0; out_entries || in_entries; ++entry) {
+		if (out_entries && entry < out_valid) {
+			memcpy_fromio(entry_name, out_entries->name,
+							SMP2P_MAX_ENTRY_NAME);
+			scnprintf(tmp_buff, sizeof(tmp_buff),
+					"%-16s 0x%08x",
+					entry_name,
+					out_entries->entry);
+			++out_entries;
+		} else {
+			out_entries = NULL;
+			scnprintf(tmp_buff, sizeof(tmp_buff), "None");
+		}
+		seq_printf(s, "| %-37s", tmp_buff);
+
+		if (in_entries && entry < in_valid) {
+			memcpy_fromio(entry_name, in_entries->name,
+							SMP2P_MAX_ENTRY_NAME);
+			scnprintf(tmp_buff, sizeof(tmp_buff),
+					"%-16s 0x%08x",
+					entry_name,
+					in_entries->entry);
+			++in_entries;
+		} else {
+			in_entries = NULL;
+			scnprintf(tmp_buff, sizeof(tmp_buff), "None");
+		}
+		seq_printf(s, "| %-37s|\n", tmp_buff);
+	}
+	seq_printf(s, " %s%s\n\n",
+		"-------------------------------------- ",
+		"--------------------------------------");
+}
+
+/**
+ * Dump item state.
+ *
+ * @s:   pointer to output file
+ */
+static void smp2p_items(struct seq_file *s)
+{
+	int pid;
+
+	for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid)
+		smp2p_item(s, pid);
+}
+
+static struct dentry *dent;
+
+static int debugfs_show(struct seq_file *s, void *data)
+{
+	void (*show)(struct seq_file *) = s->private;
+
+	show(s);
+
+	return 0;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, debugfs_show, inode->i_private);
+}
+
+static const struct file_operations debug_ops = {
+	.open = debug_open,
+	.release = single_release,
+	.read = seq_read,
+	.llseek = seq_lseek,
+};
+
+void debug_create(const char *name,
+			 void (*show)(struct seq_file *))
+{
+	struct dentry *file;
+
+	file = debugfs_create_file(name, 0444, dent, show, &debug_ops);
+	if (!file)
+		pr_err("%s: unable to create file '%s'\n", __func__, name);
+}
+
+static int __init smp2p_debugfs_init(void)
+{
+	dent = debugfs_create_dir("smp2p", 0);
+	if (IS_ERR(dent))
+		return PTR_ERR(dent);
+
+	debug_create("int_stats", smp2p_int_stats);
+	debug_create("items", smp2p_items);
+
+	return 0;
+}
+
+late_initcall(smp2p_debugfs_init);
+#endif /* CONFIG_DEBUG_FS */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/smp2p_loopback.c	2019-01-22 16:16:26.675275131 +0100
@@ -0,0 +1,449 @@
+/* drivers/soc/qcom/smp2p_loopback.c
+ *
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/termios.h>
+#include <linux/module.h>
+#include <linux/remote_spinlock.h>
+#include "smem_private.h"
+#include "smp2p_private.h"
+
+/**
+ * struct smp2p_loopback_ctx - Representation of remote loopback object.
+ *
+ * @proc_id: Processor id of the processor that sends the loopback commands.
+ * @out: Handle to the  smem entry structure for providing the response.
+ * @out_nb: Notifies the opening of local entry.
+ * @out_is_active: Outbound entry events should be processed.
+ * @in_nb: Notifies changes in the remote entry.
+ * @in_is_active: Inbound entry events should be processed.
+ * @rmt_lpb_work: Work item that handles the incoming loopback commands.
+ * @rmt_cmd: Structure that holds the current and previous value of the entry.
+ */
+struct smp2p_loopback_ctx {
+	int proc_id;
+	struct msm_smp2p_out *out;
+	struct notifier_block out_nb;
+	bool out_is_active;
+	struct notifier_block in_nb;
+	bool in_is_active;
+	struct work_struct  rmt_lpb_work;
+	struct msm_smp2p_update_notif rmt_cmd;
+};
+
+static struct smp2p_loopback_ctx  remote_loopback[SMP2P_NUM_PROCS];
+static struct msm_smp2p_remote_mock remote_mock;
+
+/**
+ * remote_spinlock_test - Handles remote spinlock test.
+ *
+ * @ctx: Loopback context
+ */
+static void remote_spinlock_test(struct smp2p_loopback_ctx *ctx)
+{
+	uint32_t test_request;
+	uint32_t test_response;
+	unsigned long flags;
+	int n;
+	unsigned lock_count = 0;
+	remote_spinlock_t *smem_spinlock;
+
+	test_request = 0x0;
+	SMP2P_SET_RMT_CMD_TYPE_REQ(test_request);
+	smem_spinlock = smem_get_remote_spinlock();
+	if (!smem_spinlock) {
+		pr_err("%s: unable to get remote spinlock\n", __func__);
+		return;
+	}
+
+	for (;;) {
+		remote_spin_lock_irqsave(smem_spinlock, flags);
+		++lock_count;
+		SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_LOCKED);
+		(void)msm_smp2p_out_write(ctx->out, test_request);
+
+		for (n = 0; n < 10000; ++n) {
+			(void)msm_smp2p_in_read(ctx->proc_id,
+					"smp2p", &test_response);
+			test_response = SMP2P_GET_RMT_CMD(test_response);
+
+			if (test_response == SMP2P_LB_CMD_RSPIN_END)
+				break;
+
+			if (test_response != SMP2P_LB_CMD_RSPIN_UNLOCKED)
+				SMP2P_ERR("%s: invalid spinlock command %x\n",
+					__func__, test_response);
+		}
+
+		if (test_response == SMP2P_LB_CMD_RSPIN_END) {
+			SMP2P_SET_RMT_CMD_TYPE_RESP(test_request);
+			SMP2P_SET_RMT_CMD(test_request,
+					SMP2P_LB_CMD_RSPIN_END);
+			SMP2P_SET_RMT_DATA(test_request, lock_count);
+			(void)msm_smp2p_out_write(ctx->out, test_request);
+			break;
+		}
+
+		SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_UNLOCKED);
+		(void)msm_smp2p_out_write(ctx->out, test_request);
+		remote_spin_unlock_irqrestore(smem_spinlock, flags);
+	}
+	remote_spin_unlock_irqrestore(smem_spinlock, flags);
+}
+
+/**
+ * smp2p_rmt_lpb_worker - Handles incoming remote loopback commands.
+ *
+ * @work: Work Item scheduled to handle the incoming commands.
+ */
+static void smp2p_rmt_lpb_worker(struct work_struct *work)
+{
+	struct smp2p_loopback_ctx *ctx;
+	int lpb_cmd;
+	int lpb_cmd_type;
+	int lpb_data;
+
+	ctx = container_of(work, struct smp2p_loopback_ctx, rmt_lpb_work);
+
+	if (!ctx->in_is_active || !ctx->out_is_active)
+		return;
+
+	if (ctx->rmt_cmd.previous_value == ctx->rmt_cmd.current_value)
+		return;
+
+	lpb_cmd_type =  SMP2P_GET_RMT_CMD_TYPE(ctx->rmt_cmd.current_value);
+	lpb_cmd = SMP2P_GET_RMT_CMD(ctx->rmt_cmd.current_value);
+	lpb_data = SMP2P_GET_RMT_DATA(ctx->rmt_cmd.current_value);
+
+	if (lpb_cmd & SMP2P_RLPB_IGNORE)
+		return;
+
+	switch (lpb_cmd) {
+	case SMP2P_LB_CMD_NOOP:
+	    /* Do nothing */
+	    break;
+
+	case SMP2P_LB_CMD_ECHO:
+		SMP2P_SET_RMT_CMD_TYPE(ctx->rmt_cmd.current_value, 0);
+		SMP2P_SET_RMT_DATA(ctx->rmt_cmd.current_value,
+							lpb_data);
+		(void)msm_smp2p_out_write(ctx->out,
+					ctx->rmt_cmd.current_value);
+	    break;
+
+	case SMP2P_LB_CMD_CLEARALL:
+		ctx->rmt_cmd.current_value = 0;
+		(void)msm_smp2p_out_write(ctx->out,
+					ctx->rmt_cmd.current_value);
+	    break;
+
+	case SMP2P_LB_CMD_PINGPONG:
+		SMP2P_SET_RMT_CMD_TYPE(ctx->rmt_cmd.current_value, 0);
+		if (lpb_data) {
+			lpb_data--;
+			SMP2P_SET_RMT_DATA(ctx->rmt_cmd.current_value,
+					lpb_data);
+			(void)msm_smp2p_out_write(ctx->out,
+					ctx->rmt_cmd.current_value);
+		}
+	    break;
+
+	case SMP2P_LB_CMD_RSPIN_START:
+		remote_spinlock_test(ctx);
+		break;
+
+	case SMP2P_LB_CMD_RSPIN_LOCKED:
+	case SMP2P_LB_CMD_RSPIN_UNLOCKED:
+	case SMP2P_LB_CMD_RSPIN_END:
+		/* not used for remote spinlock test */
+		break;
+
+	default:
+		SMP2P_DBG("%s: Unknown loopback command %x\n",
+				__func__, lpb_cmd);
+		break;
+	}
+}
+
+/**
+ * smp2p_rmt_in_edge_notify -  Schedules a work item to handle the commands.
+ *
+ * @nb: Notifier block, this is called when the value in remote entry changes.
+ * @event: Takes value SMP2P_ENTRY_UPDATE or SMP2P_OPEN based on the event.
+ * @data: Consists of previous and current value in case of entry update.
+ * @returns: 0 for success (return value required for notifier chains).
+ */
+static int smp2p_rmt_in_edge_notify(struct notifier_block *nb,
+				unsigned long event, void *data)
+{
+	struct smp2p_loopback_ctx *ctx;
+
+	if (!(event == SMP2P_ENTRY_UPDATE || event == SMP2P_OPEN))
+		return 0;
+
+	ctx = container_of(nb, struct smp2p_loopback_ctx, in_nb);
+	if (data && ctx->in_is_active) {
+			ctx->rmt_cmd =
+			    *(struct msm_smp2p_update_notif *)data;
+			schedule_work(&ctx->rmt_lpb_work);
+	}
+
+	return 0;
+}
+
+/**
+ * smp2p_rmt_out_edge_notify - Notifies on the opening of the outbound entry.
+ *
+ * @nb: Notifier block, this is called when the local entry is open.
+ * @event: Takes on value SMP2P_OPEN when the local entry is open.
+ * @data: Consist of current value of the remote entry, if entry is open.
+ * @returns: 0 for success (return value required for notifier chains).
+ */
+static int smp2p_rmt_out_edge_notify(struct notifier_block  *nb,
+				unsigned long event, void *data)
+{
+	struct smp2p_loopback_ctx *ctx;
+
+	ctx = container_of(nb, struct smp2p_loopback_ctx, out_nb);
+	if (event == SMP2P_OPEN)
+		SMP2P_DBG("%s: 'smp2p':%d opened\n", __func__,
+				ctx->proc_id);
+
+	return 0;
+}
+
+/**
+ * msm_smp2p_init_rmt_lpb -  Initializes the remote loopback object.
+ *
+ * @ctx: Pointer to remote loopback object that needs to be initialized.
+ * @pid: Processor id  of the processor that is sending the commands.
+ * @entry: Name of the entry that needs to be opened locally.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ */
+static int msm_smp2p_init_rmt_lpb(struct  smp2p_loopback_ctx *ctx,
+			int pid, const char *entry)
+{
+	int ret = 0;
+	int tmp;
+
+	if (!ctx || !entry || pid > SMP2P_NUM_PROCS)
+		return -EINVAL;
+
+	ctx->in_nb.notifier_call = smp2p_rmt_in_edge_notify;
+	ctx->out_nb.notifier_call = smp2p_rmt_out_edge_notify;
+	ctx->proc_id = pid;
+	ctx->in_is_active = true;
+	ctx->out_is_active = true;
+	tmp = msm_smp2p_out_open(pid, entry, &ctx->out_nb,
+						&ctx->out);
+	if (tmp) {
+		SMP2P_ERR("%s: open failed outbound entry '%s':%d - ret %d\n",
+				__func__, entry, pid, tmp);
+		ret = tmp;
+	}
+
+	tmp = msm_smp2p_in_register(ctx->proc_id,
+				SMP2P_RLPB_ENTRY_NAME,
+				&ctx->in_nb);
+	if (tmp) {
+		SMP2P_ERR("%s: unable to open inbound entry '%s':%d - ret %d\n",
+				__func__, entry, pid, tmp);
+		ret = tmp;
+	}
+
+	return ret;
+}
+
+/**
+ * msm_smp2p_init_rmt_lpb_proc - Wrapper over msm_smp2p_init_rmt_lpb
+ *
+ * @remote_pid: Processor ID of the processor that sends loopback command.
+ * @returns: Pointer to outbound entry handle.
+ */
+void *msm_smp2p_init_rmt_lpb_proc(int remote_pid)
+{
+	int tmp;
+	void *ret = NULL;
+
+	tmp = msm_smp2p_init_rmt_lpb(&remote_loopback[remote_pid],
+			remote_pid, SMP2P_RLPB_ENTRY_NAME);
+	if (!tmp)
+		ret = remote_loopback[remote_pid].out;
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_smp2p_init_rmt_lpb_proc);
+
+/**
+ * msm_smp2p_deinit_rmt_lpb_proc - Unregister support for remote processor.
+ *
+ * @remote_pid:  Processor ID of the remote system.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * Unregister loopback support for remote processor.
+ */
+int msm_smp2p_deinit_rmt_lpb_proc(int remote_pid)
+{
+	int ret = 0;
+	int tmp;
+	struct smp2p_loopback_ctx *ctx;
+
+	if (remote_pid >= SMP2P_NUM_PROCS)
+		return -EINVAL;
+
+	ctx = &remote_loopback[remote_pid];
+
+	/* abort any pending notifications */
+	remote_loopback[remote_pid].out_is_active = false;
+	remote_loopback[remote_pid].in_is_active = false;
+	flush_work(&ctx->rmt_lpb_work);
+
+	/* unregister entries */
+	tmp = msm_smp2p_out_close(&remote_loopback[remote_pid].out);
+	remote_loopback[remote_pid].out = NULL;
+	if (tmp) {
+		SMP2P_ERR("%s: outbound 'smp2p':%d close failed %d\n",
+				__func__, remote_pid, tmp);
+		ret = tmp;
+	}
+
+	tmp = msm_smp2p_in_unregister(remote_pid,
+		SMP2P_RLPB_ENTRY_NAME, &remote_loopback[remote_pid].in_nb);
+	if (tmp) {
+		SMP2P_ERR("%s: inbound 'smp2p':%d close failed %d\n",
+				__func__, remote_pid, tmp);
+		ret = tmp;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_smp2p_deinit_rmt_lpb_proc);
+
+/**
+ * msm_smp2p_set_remote_mock_exists - Sets the remote mock configuration.
+ *
+ * @item_exists: true = Remote mock SMEM item exists
+ *
+ * This is used in the testing environment to simulate the existence of the
+ * remote smem item in order to test the negotiation algorithm.
+ */
+void msm_smp2p_set_remote_mock_exists(bool item_exists)
+{
+	remote_mock.item_exists = item_exists;
+}
+EXPORT_SYMBOL(msm_smp2p_set_remote_mock_exists);
+
+/**
+ * msm_smp2p_get_remote_mock - Get remote mock object.
+ *
+ * @returns: Point to the remote mock object.
+ */
+void *msm_smp2p_get_remote_mock(void)
+{
+	return &remote_mock;
+}
+EXPORT_SYMBOL(msm_smp2p_get_remote_mock);
+
+/**
+ * msm_smp2p_get_remote_mock_smem_item - Returns a pointer to remote item.
+ *
+ * @size:    Size of item.
+ * @returns: Pointer to mock remote smem item.
+ */
+void *msm_smp2p_get_remote_mock_smem_item(uint32_t *size)
+{
+	void *ptr = NULL;
+	if (remote_mock.item_exists) {
+		*size = sizeof(remote_mock.remote_item);
+		ptr = &(remote_mock.remote_item);
+	}
+
+	return ptr;
+}
+EXPORT_SYMBOL(msm_smp2p_get_remote_mock_smem_item);
+
+/**
+ * smp2p_remote_mock_rx_interrupt - Triggers receive interrupt for mock proc.
+ *
+ * @returns: 0 for success
+ *
+ * This function simulates the receiving of interrupt by the mock remote
+ * processor in a testing environment.
+ */
+int smp2p_remote_mock_rx_interrupt(void)
+{
+	remote_mock.rx_interrupt_count++;
+	if (remote_mock.initialized)
+		complete(&remote_mock.cb_completion);
+	return 0;
+}
+EXPORT_SYMBOL(smp2p_remote_mock_rx_interrupt);
+
+/**
+ * smp2p_remote_mock_tx_interrupt - Calls the SMP2P interrupt handler.
+ *
+ * This function calls the interrupt handler of the Apps processor to simulate
+ * receiving interrupts from a remote processor.
+ */
+static void smp2p_remote_mock_tx_interrupt(void)
+{
+	msm_smp2p_interrupt_handler(SMP2P_REMOTE_MOCK_PROC);
+}
+
+/**
+ * smp2p_remote_mock_init - Initialize the remote mock and loopback objects.
+ *
+ * @returns: 0 for success
+ */
+static int __init smp2p_remote_mock_init(void)
+{
+	int i;
+	struct smp2p_interrupt_config *int_cfg;
+
+	smp2p_init_header(&remote_mock.remote_item.header,
+			SMP2P_REMOTE_MOCK_PROC, SMP2P_APPS_PROC,
+			0, 0);
+	remote_mock.rx_interrupt_count = 0;
+	remote_mock.rx_interrupt = smp2p_remote_mock_rx_interrupt;
+	remote_mock.tx_interrupt = smp2p_remote_mock_tx_interrupt;
+	remote_mock.item_exists = false;
+	init_completion(&remote_mock.cb_completion);
+	remote_mock.initialized = true;
+
+	for (i = 0; i < SMP2P_NUM_PROCS; i++) {
+		INIT_WORK(&(remote_loopback[i].rmt_lpb_work),
+				smp2p_rmt_lpb_worker);
+		if (i == SMP2P_REMOTE_MOCK_PROC)
+			/* do not register loopback for remote mock proc */
+			continue;
+
+		int_cfg = smp2p_get_interrupt_config();
+		if (!int_cfg) {
+			SMP2P_ERR("Remote processor config unavailable\n");
+			return 0;
+		}
+		if (!int_cfg[i].is_configured)
+			continue;
+
+		msm_smp2p_init_rmt_lpb(&remote_loopback[i],
+			i, SMP2P_RLPB_ENTRY_NAME);
+	}
+	return 0;
+}
+module_init(smp2p_remote_mock_init);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/smp2p_private_api.h	2019-01-22 16:16:26.675275131 +0100
@@ -0,0 +1,80 @@
+/* drivers/soc/qcom/smp2p_private_api.h
+ *
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _ARCH_ARM_MACH_MSM_SMP2P_PRIVATE_API_H_
+#define _ARCH_ARM_MACH_MSM_SMP2P_PRIVATE_API_H_
+
+#include <linux/notifier.h>
+
+struct msm_smp2p_out;
+
+/* Maximum size of the entry name and trailing null. */
+#define SMP2P_MAX_ENTRY_NAME 16
+
+/* Bits per entry */
+#define SMP2P_BITS_PER_ENTRY 32
+
+/* Processor ID's */
+enum {
+	SMP2P_APPS_PROC       = 0,
+	SMP2P_MODEM_PROC      = 1,
+	SMP2P_AUDIO_PROC      = 2,
+	SMP2P_SENSOR_PROC     = 3,
+	SMP2P_WIRELESS_PROC   = 4,
+	SMP2P_CDSP_PROC       = 5,
+	SMP2P_POWER_PROC      = 6,
+	SMP2P_TZ_PROC         = 7,
+	/* add new processors here */
+
+	SMP2P_REMOTE_MOCK_PROC = 15,
+	SMP2P_NUM_PROCS,
+};
+
+/**
+ * Notification events that are passed to notifier for incoming and outgoing
+ * entries.
+ *
+ * If the @metadata argument in the notifier is non-null, then it will
+ * point to the associated struct smux_meta_* structure.
+ */
+enum msm_smp2p_events {
+	SMP2P_OPEN,         /* data is NULL */
+	SMP2P_ENTRY_UPDATE, /* data => struct msm_smp2p_update_notif */
+};
+
+/**
+ * Passed in response to a SMP2P_ENTRY_UPDATE event.
+ *
+ * @prev_value:     previous value of entry
+ * @current_value:  latest value of entry
+ */
+struct msm_smp2p_update_notif {
+	uint32_t previous_value;
+	uint32_t current_value;
+};
+
+int msm_smp2p_out_open(int remote_pid, const char *entry,
+	struct notifier_block *open_notifier,
+	struct msm_smp2p_out **handle);
+int msm_smp2p_out_close(struct msm_smp2p_out **handle);
+int msm_smp2p_out_read(struct msm_smp2p_out *handle, uint32_t *data);
+int msm_smp2p_out_write(struct msm_smp2p_out *handle, uint32_t data);
+int msm_smp2p_out_modify(struct msm_smp2p_out *handle, uint32_t set_mask,
+	uint32_t clear_mask, bool send_irq);
+int msm_smp2p_in_read(int remote_pid, const char *entry, uint32_t *data);
+int msm_smp2p_in_register(int remote_pid, const char *entry,
+	struct notifier_block *in_notifier);
+int msm_smp2p_in_unregister(int remote_pid, const char *entry,
+	struct notifier_block *in_notifier);
+
+#endif /* _ARCH_ARM_MACH_MSM_SMP2P_PRIVATE_API_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/smp2p_private.h	2019-10-29 09:26:24.825214745 +0100
@@ -0,0 +1,253 @@
+/* drivers/soc/qcom/smp2p_private.h
+ *
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _ARCH_ARM_MACH_MSM_MSM_SMP2P_PRIVATE_H_
+#define _ARCH_ARM_MACH_MSM_MSM_SMP2P_PRIVATE_H_
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/ipc_logging.h>
+#include "smp2p_private_api.h"
+
+#define SMP2P_MAX_ENTRY 16
+#define SMP2P_FEATURE_SSR_ACK 0x1
+
+/* SMEM Item Header Macros */
+#define SMP2P_MAGIC 0x504D5324
+#define SMP2P_LOCAL_PID_MASK 0x0000ffff
+#define SMP2P_LOCAL_PID_BIT 0
+#define SMP2P_REMOTE_PID_MASK 0xffff0000
+#define SMP2P_REMOTE_PID_BIT 16
+#define SMP2P_VERSION_MASK 0x000000ff
+#define SMP2P_VERSION_BIT 0
+#define SMP2P_FEATURE_MASK 0xffffff00
+#define SMP2P_FEATURE_BIT 8
+#define SMP2P_ENT_TOTAL_MASK 0x0000ffff
+#define SMP2P_ENT_TOTAL_BIT 0
+#define SMP2P_ENT_VALID_MASK 0xffff0000
+#define SMP2P_ENT_VALID_BIT 16
+#define SMP2P_FLAGS_RESTART_DONE_BIT 0
+#define SMP2P_FLAGS_RESTART_DONE_MASK 0x1
+#define SMP2P_FLAGS_RESTART_ACK_BIT 1
+#define SMP2P_FLAGS_RESTART_ACK_MASK 0x2
+#define SMP2P_GPIO_NO_INT BIT(1)
+
+#define SMP2P_GET_BITS(hdr_val, mask, bit) \
+	(((hdr_val) & (mask)) >> (bit))
+#define SMP2P_SET_BITS(hdr_val, mask, bit, new_value) \
+	{\
+		hdr_val = (hdr_val & ~(mask)) \
+		| (((new_value) << (bit)) & (mask)); \
+	}
+
+#define SMP2P_GET_LOCAL_PID(hdr) \
+	SMP2P_GET_BITS(hdr, SMP2P_LOCAL_PID_MASK, SMP2P_LOCAL_PID_BIT)
+#define SMP2P_SET_LOCAL_PID(hdr, pid) \
+	SMP2P_SET_BITS(hdr, SMP2P_LOCAL_PID_MASK, SMP2P_LOCAL_PID_BIT, pid)
+
+#define SMP2P_GET_REMOTE_PID(hdr) \
+	SMP2P_GET_BITS(hdr, SMP2P_REMOTE_PID_MASK, SMP2P_REMOTE_PID_BIT)
+#define SMP2P_SET_REMOTE_PID(hdr, pid) \
+	SMP2P_SET_BITS(hdr, SMP2P_REMOTE_PID_MASK, SMP2P_REMOTE_PID_BIT, pid)
+
+#define SMP2P_GET_VERSION(hdr) \
+	SMP2P_GET_BITS(hdr, SMP2P_VERSION_MASK, SMP2P_VERSION_BIT)
+#define SMP2P_SET_VERSION(hdr, version) \
+	SMP2P_SET_BITS(hdr, SMP2P_VERSION_MASK, SMP2P_VERSION_BIT, version)
+
+#define SMP2P_GET_FEATURES(hdr) \
+	SMP2P_GET_BITS(hdr, SMP2P_FEATURE_MASK, SMP2P_FEATURE_BIT)
+#define SMP2P_SET_FEATURES(hdr, features) \
+	SMP2P_SET_BITS(hdr, SMP2P_FEATURE_MASK, SMP2P_FEATURE_BIT, features)
+
+#define SMP2P_GET_ENT_TOTAL(hdr) \
+	SMP2P_GET_BITS(hdr, SMP2P_ENT_TOTAL_MASK, SMP2P_ENT_TOTAL_BIT)
+#define SMP2P_SET_ENT_TOTAL(hdr, entries) \
+	SMP2P_SET_BITS(hdr, SMP2P_ENT_TOTAL_MASK, SMP2P_ENT_TOTAL_BIT, entries)
+
+#define SMP2P_GET_ENT_VALID(hdr) \
+	SMP2P_GET_BITS(hdr, SMP2P_ENT_VALID_MASK, SMP2P_ENT_VALID_BIT)
+#define SMP2P_SET_ENT_VALID(hdr, entries) \
+	SMP2P_SET_BITS(hdr,  SMP2P_ENT_VALID_MASK, SMP2P_ENT_VALID_BIT,\
+		entries)
+
+#define SMP2P_GET_RESTART_DONE(hdr) \
+	SMP2P_GET_BITS(hdr, SMP2P_FLAGS_RESTART_DONE_MASK, \
+			SMP2P_FLAGS_RESTART_DONE_BIT)
+#define SMP2P_SET_RESTART_DONE(hdr, value) \
+	SMP2P_SET_BITS(hdr, SMP2P_FLAGS_RESTART_DONE_MASK, \
+			SMP2P_FLAGS_RESTART_DONE_BIT, value)
+
+#define SMP2P_GET_RESTART_ACK(hdr) \
+	SMP2P_GET_BITS(hdr, SMP2P_FLAGS_RESTART_ACK_MASK, \
+			SMP2P_FLAGS_RESTART_ACK_BIT)
+#define SMP2P_SET_RESTART_ACK(hdr, value) \
+	SMP2P_SET_BITS(hdr, SMP2P_FLAGS_RESTART_ACK_MASK, \
+			SMP2P_FLAGS_RESTART_ACK_BIT, value)
+
+/* Loopback Command Macros */
+#define SMP2P_RMT_CMD_TYPE_MASK 0x80000000
+#define SMP2P_RMT_CMD_TYPE_BIT 31
+#define SMP2P_RMT_IGNORE_MASK 0x40000000
+#define SMP2P_RMT_IGNORE_BIT 30
+#define SMP2P_RMT_CMD_MASK 0x3f000000
+#define SMP2P_RMT_CMD_BIT 24
+#define SMP2P_RMT_DATA_MASK 0x00ffffff
+#define SMP2P_RMT_DATA_BIT 0
+
+#define SMP2P_GET_RMT_CMD_TYPE(val) \
+	SMP2P_GET_BITS(val, SMP2P_RMT_CMD_TYPE_MASK, SMP2P_RMT_CMD_TYPE_BIT)
+#define SMP2P_GET_RMT_CMD(val) \
+	SMP2P_GET_BITS(val, SMP2P_RMT_CMD_MASK, SMP2P_RMT_CMD_BIT)
+
+#define SMP2P_GET_RMT_DATA(val) \
+	SMP2P_GET_BITS(val, SMP2P_RMT_DATA_MASK, SMP2P_RMT_DATA_BIT)
+
+#define SMP2P_SET_RMT_CMD_TYPE(val, cmd_type) \
+	SMP2P_SET_BITS(val, SMP2P_RMT_CMD_TYPE_MASK, SMP2P_RMT_CMD_TYPE_BIT, \
+		cmd_type)
+#define SMP2P_SET_RMT_CMD_TYPE_REQ(val) \
+	SMP2P_SET_RMT_CMD_TYPE(val, 1)
+#define SMP2P_SET_RMT_CMD_TYPE_RESP(val) \
+	SMP2P_SET_RMT_CMD_TYPE(val, 0)
+
+#define SMP2P_SET_RMT_CMD(val, cmd) \
+	SMP2P_SET_BITS(val, SMP2P_RMT_CMD_MASK, SMP2P_RMT_CMD_BIT, \
+		cmd)
+#define SMP2P_SET_RMT_DATA(val, data) \
+	SMP2P_SET_BITS(val, SMP2P_RMT_DATA_MASK, SMP2P_RMT_DATA_BIT, data)
+
+enum {
+	SMP2P_LB_CMD_NOOP = 0x0,
+	SMP2P_LB_CMD_ECHO,
+	SMP2P_LB_CMD_CLEARALL,
+	SMP2P_LB_CMD_PINGPONG,
+	SMP2P_LB_CMD_RSPIN_START,
+	SMP2P_LB_CMD_RSPIN_LOCKED,
+	SMP2P_LB_CMD_RSPIN_UNLOCKED,
+	SMP2P_LB_CMD_RSPIN_END,
+};
+#define SMP2P_RLPB_IGNORE 0x40
+#define SMP2P_RLPB_ENTRY_NAME "smp2p"
+
+/* Debug Logging Macros */
+enum {
+	MSM_SMP2P_INFO = 1U << 0,
+	MSM_SMP2P_DEBUG = 1U << 1,
+	MSM_SMP2P_GPIO = 1U << 2,
+};
+
+#define SMP2P_IPC_LOG_STR(x...) do { \
+	if (smp2p_get_log_ctx()) \
+		ipc_log_string(smp2p_get_log_ctx(), x); \
+} while (0)
+
+#define SMP2P_DBG(x...) do {                              \
+	if (smp2p_get_debug_mask() & MSM_SMP2P_DEBUG) \
+			SMP2P_IPC_LOG_STR(x);  \
+} while (0)
+
+#define SMP2P_INFO(x...) do {                              \
+	if (smp2p_get_debug_mask() & MSM_SMP2P_INFO) \
+			SMP2P_IPC_LOG_STR(x);  \
+} while (0)
+
+#define SMP2P_ERR(x...) do {                              \
+	pr_err(x); \
+	SMP2P_IPC_LOG_STR(x);  \
+} while (0)
+
+#define SMP2P_GPIO(x...) do {                              \
+	if (smp2p_get_debug_mask() & MSM_SMP2P_GPIO) \
+			SMP2P_IPC_LOG_STR(x);  \
+} while (0)
+
+
+enum msm_smp2p_edge_state {
+	SMP2P_EDGE_STATE_CLOSED,
+	SMP2P_EDGE_STATE_OPENING,
+	SMP2P_EDGE_STATE_OPENED,
+	SMP2P_EDGE_STATE_FAILED = 0xff,
+};
+
+/**
+ * struct smp2p_smem - SMP2P SMEM Item Header
+ *
+ * @magic:  Set to "$SMP" -- used for identification / debug purposes
+ * @feature_version:  Feature and version fields
+ * @rem_loc_proc_id:  Remote (31:16) and Local (15:0) processor IDs
+ * @valid_total_ent:  Valid (31:16) and total (15:0) entries
+ * @flags:  Flags (bits 31:2 reserved)
+ */
+struct smp2p_smem {
+	uint32_t magic;
+	uint32_t feature_version;
+	uint32_t rem_loc_proc_id;
+	uint32_t valid_total_ent;
+	uint32_t flags;
+};
+
+struct smp2p_entry_v1 {
+	char name[SMP2P_MAX_ENTRY_NAME];
+	uint32_t entry;
+};
+
+struct smp2p_smem_item {
+	struct smp2p_smem header;
+	struct smp2p_entry_v1 entries[SMP2P_MAX_ENTRY];
+};
+
+/* Mock object for internal loopback testing. */
+struct msm_smp2p_remote_mock {
+	struct smp2p_smem_item remote_item;
+	int rx_interrupt_count;
+	int (*rx_interrupt)(void);
+	void (*tx_interrupt)(void);
+
+	bool item_exists;
+	bool initialized;
+	struct completion cb_completion;
+};
+
+void smp2p_init_header(struct smp2p_smem *header_ptr, int local_pid,
+		int remote_pid, uint32_t features, uint32_t version);
+void *msm_smp2p_get_remote_mock(void);
+int smp2p_remote_mock_rx_interrupt(void);
+int smp2p_reset_mock_edge(void);
+void msm_smp2p_interrupt_handler(int);
+void msm_smp2p_set_remote_mock_exists(bool item_exists);
+void *msm_smp2p_get_remote_mock_smem_item(uint32_t *size);
+void *msm_smp2p_init_rmt_lpb_proc(int remote_pid);
+int msm_smp2p_deinit_rmt_lpb_proc(int remote_pid);
+void *smp2p_get_log_ctx(void);
+int smp2p_get_debug_mask(void);
+
+/* Inbound / outbound Interrupt configuration. */
+struct smp2p_interrupt_config {
+	bool is_configured;
+	uint32_t *out_int_ptr;
+	uint32_t out_int_mask;
+	int in_int_id;
+	const char *name;
+
+	/* interrupt stats */
+	unsigned in_interrupt_count;
+	unsigned out_interrupt_count;
+};
+
+struct smp2p_interrupt_config *smp2p_get_interrupt_config(void);
+const char *smp2p_pid_to_name(int remote_pid);
+struct smp2p_smem *smp2p_get_in_item(int remote_pid);
+struct smp2p_smem *smp2p_get_out_item(int remote_pid, int *state);
+void smp2p_gpio_open_test_entry(const char *name, int remote_pid, bool do_open);
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/smp2p_sleepstate.c	2019-01-22 16:16:26.675275131 +0100
@@ -0,0 +1,112 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/suspend.h>
+#include <linux/delay.h>
+#include <linux/ipc_router.h>
+#include "smp2p_private.h"
+
+#define SET_DELAY (2 * HZ)
+#define PROC_AWAKE_ID 12 /* 12th bit */
+static int slst_gpio_base_id;
+
+/**
+ * sleepstate_pm_notifier() - PM notifier callback function.
+ * @nb:		Pointer to the notifier block.
+ * @event:	Suspend state event from PM module.
+ * @unused:	Null pointer from PM module.
+ *
+ * This function is register as callback function to get notifications
+ * from the PM module on the system suspend state.
+ */
+static int sleepstate_pm_notifier(struct notifier_block *nb,
+				unsigned long event, void *unused)
+{
+	switch (event) {
+	case PM_SUSPEND_PREPARE:
+		gpio_set_value(slst_gpio_base_id + PROC_AWAKE_ID, 0);
+		msleep(25); /* To be tuned based on SMP2P latencies */
+		msm_ipc_router_set_ws_allowed(true);
+		break;
+
+	case PM_POST_SUSPEND:
+		gpio_set_value(slst_gpio_base_id + PROC_AWAKE_ID, 1);
+		msleep(25); /* To be tuned based on SMP2P latencies */
+		msm_ipc_router_set_ws_allowed(false);
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block sleepstate_pm_nb = {
+	.notifier_call = sleepstate_pm_notifier,
+	.priority = INT_MAX,
+};
+
+static int smp2p_sleepstate_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct device_node *node = pdev->dev.of_node;
+
+	slst_gpio_base_id = of_get_gpio(node, 0);
+	if (slst_gpio_base_id == -EPROBE_DEFER) {
+		return slst_gpio_base_id;
+	} else if (slst_gpio_base_id < 0) {
+		SMP2P_ERR("%s: Error to get gpio %d\n",
+				__func__, slst_gpio_base_id);
+		return slst_gpio_base_id;
+	}
+
+
+	gpio_set_value(slst_gpio_base_id + PROC_AWAKE_ID, 1);
+
+	ret = register_pm_notifier(&sleepstate_pm_nb);
+	if (ret)
+		SMP2P_ERR("%s: power state notif error %d\n", __func__, ret);
+
+	return 0;
+}
+
+static struct of_device_id msm_smp2p_slst_match_table[] = {
+	{.compatible = "qcom,smp2pgpio_sleepstate_3_out"},
+	{.compatible = "qcom,smp2pgpio-sleepstate-out"},
+	{},
+};
+
+static struct platform_driver smp2p_sleepstate_driver = {
+	.probe = smp2p_sleepstate_probe,
+	.driver = {
+		.name = "smp2p_sleepstate",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_smp2p_slst_match_table,
+	},
+};
+
+static int __init smp2p_sleepstate_init(void)
+{
+	int ret;
+	ret = platform_driver_register(&smp2p_sleepstate_driver);
+	if (ret) {
+		SMP2P_ERR("%s: smp2p_sleepstate_driver register failed %d\n",
+			 __func__, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+module_init(smp2p_sleepstate_init);
+MODULE_DESCRIPTION("SMP2P SLEEP STATE");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/smsm_debug.c	2019-01-22 16:16:26.679275167 +0100
@@ -0,0 +1,330 @@
+/* drivers/soc/qcom/smsm_debug.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2014, The Linux Foundation. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <linux/jiffies.h>
+
+#include <soc/qcom/smem.h>
+#include <soc/qcom/smsm.h>
+
+#if defined(CONFIG_DEBUG_FS)
+
+
+static void debug_read_smsm_state(struct seq_file *s)
+{
+	uint32_t *smsm;
+	int n;
+
+	smsm = smem_find(SMEM_SMSM_SHARED_STATE,
+			 SMSM_NUM_ENTRIES * sizeof(uint32_t),
+			 0,
+			 SMEM_ANY_HOST_FLAG);
+
+	if (smsm)
+		for (n = 0; n < SMSM_NUM_ENTRIES; n++)
+			seq_printf(s, "entry %d: 0x%08x\n", n, smsm[n]);
+}
+
+struct SMSM_CB_DATA {
+	int cb_count;
+	void *data;
+	uint32_t old_state;
+	uint32_t new_state;
+};
+static struct SMSM_CB_DATA smsm_cb_data;
+static struct completion smsm_cb_completion;
+
+static void smsm_state_cb(void *data, uint32_t old_state, uint32_t new_state)
+{
+	smsm_cb_data.cb_count++;
+	smsm_cb_data.old_state = old_state;
+	smsm_cb_data.new_state = new_state;
+	smsm_cb_data.data = data;
+	complete_all(&smsm_cb_completion);
+}
+
+#define UT_EQ_INT(a, b) \
+	{ \
+		if ((a) != (b)) { \
+			seq_printf(s, "%s:%d " #a "(%d) != " #b "(%d)\n", \
+					__func__, __LINE__, \
+					a, b); \
+			break; \
+		} \
+	}
+
+#define UT_GT_INT(a, b) \
+	{ \
+		if ((a) <= (b)) { \
+			seq_printf(s, "%s:%d " #a "(%d) > " #b "(%d)\n", \
+					__func__, __LINE__, \
+					a, b); \
+			break; \
+		} \
+	}
+
+#define SMSM_CB_TEST_INIT() \
+	do { \
+		smsm_cb_data.cb_count = 0; \
+		smsm_cb_data.old_state = 0; \
+		smsm_cb_data.new_state = 0; \
+		smsm_cb_data.data = 0; \
+	} while (0)
+
+
+static void debug_test_smsm(struct seq_file *s)
+{
+	int test_num = 0;
+	int ret;
+
+	/* Test case 1 - Register new callback for notification */
+	do {
+		test_num++;
+		SMSM_CB_TEST_INIT();
+		ret = smsm_state_cb_register(SMSM_APPS_STATE, SMSM_SMDINIT,
+				smsm_state_cb, (void *)0x1234);
+		UT_EQ_INT(ret, 0);
+
+		/* de-assert SMSM_SMD_INIT to trigger state update */
+		UT_EQ_INT(smsm_cb_data.cb_count, 0);
+		reinit_completion(&smsm_cb_completion);
+		smsm_change_state(SMSM_APPS_STATE, SMSM_SMDINIT, 0x0);
+		UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+					msecs_to_jiffies(20)), 0);
+
+		UT_EQ_INT(smsm_cb_data.cb_count, 1);
+		UT_EQ_INT(smsm_cb_data.old_state & SMSM_SMDINIT, SMSM_SMDINIT);
+		UT_EQ_INT(smsm_cb_data.new_state & SMSM_SMDINIT, 0x0);
+		UT_EQ_INT((int)(uintptr_t)smsm_cb_data.data, 0x1234);
+
+		/* re-assert SMSM_SMD_INIT to trigger state update */
+		reinit_completion(&smsm_cb_completion);
+		smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_SMDINIT);
+		UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+					msecs_to_jiffies(20)), 0);
+		UT_EQ_INT(smsm_cb_data.cb_count, 2);
+		UT_EQ_INT(smsm_cb_data.old_state & SMSM_SMDINIT, 0x0);
+		UT_EQ_INT(smsm_cb_data.new_state & SMSM_SMDINIT, SMSM_SMDINIT);
+
+		/* deregister callback */
+		ret = smsm_state_cb_deregister(SMSM_APPS_STATE, SMSM_SMDINIT,
+				smsm_state_cb, (void *)0x1234);
+		UT_EQ_INT(ret, 2);
+
+		/* make sure state change doesn't cause any more callbacks */
+		reinit_completion(&smsm_cb_completion);
+		smsm_change_state(SMSM_APPS_STATE, SMSM_SMDINIT, 0x0);
+		smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_SMDINIT);
+		UT_EQ_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+					msecs_to_jiffies(20)), 0);
+		UT_EQ_INT(smsm_cb_data.cb_count, 2);
+
+		seq_printf(s, "Test %d - PASS\n", test_num);
+	} while (0);
+
+	/* Test case 2 - Update already registered callback */
+	do {
+		test_num++;
+		SMSM_CB_TEST_INIT();
+		ret = smsm_state_cb_register(SMSM_APPS_STATE, SMSM_SMDINIT,
+				smsm_state_cb, (void *)0x1234);
+		UT_EQ_INT(ret, 0);
+		ret = smsm_state_cb_register(SMSM_APPS_STATE, SMSM_INIT,
+				smsm_state_cb, (void *)0x1234);
+		UT_EQ_INT(ret, 1);
+
+		/* verify both callback bits work */
+		reinit_completion(&smsm_cb_completion);
+		UT_EQ_INT(smsm_cb_data.cb_count, 0);
+		smsm_change_state(SMSM_APPS_STATE, SMSM_SMDINIT, 0x0);
+		UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+					msecs_to_jiffies(20)), 0);
+		UT_EQ_INT(smsm_cb_data.cb_count, 1);
+		reinit_completion(&smsm_cb_completion);
+		smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_SMDINIT);
+		UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+					msecs_to_jiffies(20)), 0);
+		UT_EQ_INT(smsm_cb_data.cb_count, 2);
+
+		reinit_completion(&smsm_cb_completion);
+		smsm_change_state(SMSM_APPS_STATE, SMSM_INIT, 0x0);
+		UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+					msecs_to_jiffies(20)), 0);
+		UT_EQ_INT(smsm_cb_data.cb_count, 3);
+		reinit_completion(&smsm_cb_completion);
+		smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_INIT);
+		UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+					msecs_to_jiffies(20)), 0);
+		UT_EQ_INT(smsm_cb_data.cb_count, 4);
+
+		/* deregister 1st callback */
+		ret = smsm_state_cb_deregister(SMSM_APPS_STATE, SMSM_SMDINIT,
+				smsm_state_cb, (void *)0x1234);
+		UT_EQ_INT(ret, 1);
+		reinit_completion(&smsm_cb_completion);
+		smsm_change_state(SMSM_APPS_STATE, SMSM_SMDINIT, 0x0);
+		smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_SMDINIT);
+		UT_EQ_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+					msecs_to_jiffies(20)), 0);
+		UT_EQ_INT(smsm_cb_data.cb_count, 4);
+
+		reinit_completion(&smsm_cb_completion);
+		smsm_change_state(SMSM_APPS_STATE, SMSM_INIT, 0x0);
+		UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+					msecs_to_jiffies(20)), 0);
+		UT_EQ_INT(smsm_cb_data.cb_count, 5);
+		reinit_completion(&smsm_cb_completion);
+		smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_INIT);
+		UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+					msecs_to_jiffies(20)), 0);
+		UT_EQ_INT(smsm_cb_data.cb_count, 6);
+
+		/* deregister 2nd callback */
+		ret = smsm_state_cb_deregister(SMSM_APPS_STATE, SMSM_INIT,
+				smsm_state_cb, (void *)0x1234);
+		UT_EQ_INT(ret, 2);
+
+		/* make sure state change doesn't cause any more callbacks */
+		reinit_completion(&smsm_cb_completion);
+		smsm_change_state(SMSM_APPS_STATE, SMSM_INIT, 0x0);
+		smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_INIT);
+		UT_EQ_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+					msecs_to_jiffies(20)), 0);
+		UT_EQ_INT(smsm_cb_data.cb_count, 6);
+
+		seq_printf(s, "Test %d - PASS\n", test_num);
+	} while (0);
+
+	/* Test case 3 - Two callback registrations with different data */
+	do {
+		test_num++;
+		SMSM_CB_TEST_INIT();
+		ret = smsm_state_cb_register(SMSM_APPS_STATE, SMSM_SMDINIT,
+				smsm_state_cb, (void *)0x1234);
+		UT_EQ_INT(ret, 0);
+		ret = smsm_state_cb_register(SMSM_APPS_STATE, SMSM_INIT,
+				smsm_state_cb, (void *)0x3456);
+		UT_EQ_INT(ret, 0);
+
+		/* verify both callbacks work */
+		reinit_completion(&smsm_cb_completion);
+		UT_EQ_INT(smsm_cb_data.cb_count, 0);
+		smsm_change_state(SMSM_APPS_STATE, SMSM_SMDINIT, 0x0);
+		UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+					msecs_to_jiffies(20)), 0);
+		UT_EQ_INT(smsm_cb_data.cb_count, 1);
+		UT_EQ_INT((int)(uintptr_t)smsm_cb_data.data, 0x1234);
+
+		reinit_completion(&smsm_cb_completion);
+		smsm_change_state(SMSM_APPS_STATE, SMSM_INIT, 0x0);
+		UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+					msecs_to_jiffies(20)), 0);
+		UT_EQ_INT(smsm_cb_data.cb_count, 2);
+		UT_EQ_INT((int)(uintptr_t)smsm_cb_data.data, 0x3456);
+
+		/* cleanup and unregister
+		 * degregister in reverse to verify data field is
+		 * being used
+		 */
+		smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_SMDINIT);
+		smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_INIT);
+		ret = smsm_state_cb_deregister(SMSM_APPS_STATE,
+				SMSM_INIT,
+				smsm_state_cb, (void *)0x3456);
+		UT_EQ_INT(ret, 2);
+		ret = smsm_state_cb_deregister(SMSM_APPS_STATE,
+				SMSM_SMDINIT,
+				smsm_state_cb, (void *)0x1234);
+		UT_EQ_INT(ret, 2);
+
+		seq_printf(s, "Test %d - PASS\n", test_num);
+	} while (0);
+}
+
+static void debug_read_intr_mask(struct seq_file *s)
+{
+	uint32_t *smsm;
+	int m, n;
+
+	smsm = smem_find(SMEM_SMSM_CPU_INTR_MASK,
+			  SMSM_NUM_ENTRIES * SMSM_NUM_HOSTS * sizeof(uint32_t),
+			  0,
+			  SMEM_ANY_HOST_FLAG);
+
+	if (smsm)
+		for (m = 0; m < SMSM_NUM_ENTRIES; m++) {
+			seq_printf(s, "entry %d:", m);
+			for (n = 0; n < SMSM_NUM_HOSTS; n++)
+				seq_printf(s, "   host %d: 0x%08x",
+					       n, smsm[m * SMSM_NUM_HOSTS + n]);
+			seq_puts(s, "\n");
+		}
+}
+
+static int debugfs_show(struct seq_file *s, void *data)
+{
+	void (*show)(struct seq_file *) = s->private;
+
+	show(s);
+
+	return 0;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, debugfs_show, inode->i_private);
+}
+
+static const struct file_operations debug_ops = {
+	.open = debug_open,
+	.release = single_release,
+	.read = seq_read,
+	.llseek = seq_lseek,
+};
+
+static void debug_create(const char *name, umode_t mode,
+			 struct dentry *dent,
+			 void (*show)(struct seq_file *))
+{
+	struct dentry *file;
+
+	file = debugfs_create_file(name, mode, dent, show, &debug_ops);
+	if (!file)
+		pr_err("%s: unable to create file '%s'\n", __func__, name);
+}
+
+static int __init smsm_debugfs_init(void)
+{
+	struct dentry *dent;
+
+	dent = debugfs_create_dir("smsm", 0);
+	if (IS_ERR(dent))
+		return PTR_ERR(dent);
+
+	debug_create("state", 0444, dent, debug_read_smsm_state);
+	debug_create("intr_mask", 0444, dent, debug_read_intr_mask);
+	debug_create("smsm_test", 0444, dent, debug_test_smsm);
+
+	init_completion(&smsm_cb_completion);
+
+	return 0;
+}
+
+late_initcall(smsm_debugfs_init);
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/socinfo.c	2019-10-29 09:26:24.825214745 +0100
@@ -0,0 +1,1607 @@
+/*
+ * Copyright (c) 2009-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * SOC Info Routines
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sys_soc.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <asm/system_misc.h>
+
+#include <soc/qcom/socinfo.h>
+#include <soc/qcom/smem.h>
+#include <soc/qcom/boot_stats.h>
+
+#define BUILD_ID_LENGTH 32
+#define SMEM_IMAGE_VERSION_BLOCKS_COUNT 32
+#define SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE 128
+#define SMEM_IMAGE_VERSION_SIZE 4096
+#define SMEM_IMAGE_VERSION_NAME_SIZE 75
+#define SMEM_IMAGE_VERSION_VARIANT_SIZE 20
+#define SMEM_IMAGE_VERSION_VARIANT_OFFSET 75
+#define SMEM_IMAGE_VERSION_OEM_SIZE 33
+#define SMEM_IMAGE_VERSION_OEM_OFFSET 95
+#define SMEM_IMAGE_VERSION_PARTITION_APPS 10
+
+static DECLARE_RWSEM(current_image_rwsem);
+enum {
+	HW_PLATFORM_UNKNOWN = 0,
+	HW_PLATFORM_SURF    = 1,
+	HW_PLATFORM_FFA     = 2,
+	HW_PLATFORM_FLUID   = 3,
+	HW_PLATFORM_SVLTE_FFA	= 4,
+	HW_PLATFORM_SVLTE_SURF	= 5,
+	HW_PLATFORM_MTP_MDM = 7,
+	HW_PLATFORM_MTP  = 8,
+	HW_PLATFORM_LIQUID  = 9,
+	/* Dragonboard platform id is assigned as 10 in CDT */
+	HW_PLATFORM_DRAGON	= 10,
+	HW_PLATFORM_QRD	= 11,
+	HW_PLATFORM_HRD	= 13,
+	HW_PLATFORM_DTV	= 14,
+	HW_PLATFORM_RCM	= 21,
+	HW_PLATFORM_STP = 23,
+	HW_PLATFORM_SBC = 24,
+	HW_PLATFORM_ADP = 25,
+	HW_PLATFORM_INVALID
+};
+
+const char *hw_platform[] = {
+	[HW_PLATFORM_UNKNOWN] = "Unknown",
+	[HW_PLATFORM_SURF] = "Surf",
+	[HW_PLATFORM_FFA] = "FFA",
+	[HW_PLATFORM_FLUID] = "Fluid",
+	[HW_PLATFORM_SVLTE_FFA] = "SVLTE_FFA",
+	[HW_PLATFORM_SVLTE_SURF] = "SLVTE_SURF",
+	[HW_PLATFORM_MTP_MDM] = "MDM_MTP_NO_DISPLAY",
+	[HW_PLATFORM_MTP] = "MTP",
+	[HW_PLATFORM_RCM] = "RCM",
+	[HW_PLATFORM_LIQUID] = "Liquid",
+	[HW_PLATFORM_DRAGON] = "Dragon",
+	[HW_PLATFORM_QRD] = "QRD",
+	[HW_PLATFORM_HRD] = "HRD",
+	[HW_PLATFORM_DTV] = "DTV",
+	[HW_PLATFORM_STP] = "STP",
+	[HW_PLATFORM_SBC] = "SBC",
+	[HW_PLATFORM_ADP] = "ADP",
+};
+
+enum {
+	ACCESSORY_CHIP_UNKNOWN = 0,
+	ACCESSORY_CHIP_CHARM = 58,
+};
+
+enum {
+	PLATFORM_SUBTYPE_QRD = 0x0,
+	PLATFORM_SUBTYPE_SKUAA = 0x1,
+	PLATFORM_SUBTYPE_SKUF = 0x2,
+	PLATFORM_SUBTYPE_SKUAB = 0x3,
+	PLATFORM_SUBTYPE_SKUG = 0x5,
+	PLATFORM_SUBTYPE_QRD_INVALID,
+};
+
+const char *qrd_hw_platform_subtype[] = {
+	[PLATFORM_SUBTYPE_QRD] = "QRD",
+	[PLATFORM_SUBTYPE_SKUAA] = "SKUAA",
+	[PLATFORM_SUBTYPE_SKUF] = "SKUF",
+	[PLATFORM_SUBTYPE_SKUAB] = "SKUAB",
+	[PLATFORM_SUBTYPE_SKUG] = "SKUG",
+	[PLATFORM_SUBTYPE_QRD_INVALID] = "INVALID",
+};
+
+enum {
+	PLATFORM_SUBTYPE_MOJAVE_V1 = 0x0,
+	PLATFORM_SUBTYPE_MMX = 0x1,
+	PLATFORM_SUBTYPE_MOJAVE_FULL_V2 = 0x2,
+	PLATFORM_SUBTYPE_MOJAVE_BARE_V2 = 0x3,
+	PLATFORM_SUBTYPE_ADP_INVALID,
+};
+
+const char *adp_hw_platform_subtype[] = {
+	[PLATFORM_SUBTYPE_MOJAVE_V1] = "MOJAVE_V1",
+	[PLATFORM_SUBTYPE_MMX] = "MMX",
+	[PLATFORM_SUBTYPE_MOJAVE_FULL_V2] = "_MOJAVE_V2_FULL",
+	[PLATFORM_SUBTYPE_MOJAVE_BARE_V2] = "_MOJAVE_V2_BARE",
+	[PLATFORM_SUBTYPE_ADP_INVALID] = "INVALID",
+};
+
+enum {
+	PLATFORM_SUBTYPE_UNKNOWN = 0x0,
+	PLATFORM_SUBTYPE_CHARM = 0x1,
+	PLATFORM_SUBTYPE_STRANGE = 0x2,
+	PLATFORM_SUBTYPE_STRANGE_2A = 0x3,
+	PLATFORM_SUBTYPE_INVALID,
+};
+
+const char *hw_platform_subtype[] = {
+	[PLATFORM_SUBTYPE_UNKNOWN] = "Unknown",
+	[PLATFORM_SUBTYPE_CHARM] = "charm",
+	[PLATFORM_SUBTYPE_STRANGE] = "strange",
+	[PLATFORM_SUBTYPE_STRANGE_2A] = "strange_2a",
+	[PLATFORM_SUBTYPE_INVALID] = "Invalid",
+};
+
+/* Used to parse shared memory.  Must match the modem. */
+struct socinfo_v0_1 {
+	uint32_t format;
+	uint32_t id;
+	uint32_t version;
+	char build_id[BUILD_ID_LENGTH];
+};
+
+struct socinfo_v0_2 {
+	struct socinfo_v0_1 v0_1;
+	uint32_t raw_id;
+	uint32_t raw_version;
+};
+
+struct socinfo_v0_3 {
+	struct socinfo_v0_2 v0_2;
+	uint32_t hw_platform;
+};
+
+struct socinfo_v0_4 {
+	struct socinfo_v0_3 v0_3;
+	uint32_t platform_version;
+};
+
+struct socinfo_v0_5 {
+	struct socinfo_v0_4 v0_4;
+	uint32_t accessory_chip;
+};
+
+struct socinfo_v0_6 {
+	struct socinfo_v0_5 v0_5;
+	uint32_t hw_platform_subtype;
+};
+
+struct socinfo_v0_7 {
+	struct socinfo_v0_6 v0_6;
+	uint32_t pmic_model;
+	uint32_t pmic_die_revision;
+};
+
+struct socinfo_v0_8 {
+	struct socinfo_v0_7 v0_7;
+	uint32_t pmic_model_1;
+	uint32_t pmic_die_revision_1;
+	uint32_t pmic_model_2;
+	uint32_t pmic_die_revision_2;
+};
+
+struct socinfo_v0_9 {
+	struct socinfo_v0_8 v0_8;
+	uint32_t foundry_id;
+};
+
+struct socinfo_v0_10 {
+	struct socinfo_v0_9 v0_9;
+	uint32_t serial_number;
+};
+
+struct socinfo_v0_11 {
+	struct socinfo_v0_10 v0_10;
+	uint32_t num_pmics;
+	uint32_t pmic_array_offset;
+};
+
+struct socinfo_v0_12 {
+	struct socinfo_v0_11 v0_11;
+	uint32_t chip_family;
+	uint32_t raw_device_family;
+	uint32_t raw_device_number;
+};
+
+static union {
+	struct socinfo_v0_1 v0_1;
+	struct socinfo_v0_2 v0_2;
+	struct socinfo_v0_3 v0_3;
+	struct socinfo_v0_4 v0_4;
+	struct socinfo_v0_5 v0_5;
+	struct socinfo_v0_6 v0_6;
+	struct socinfo_v0_7 v0_7;
+	struct socinfo_v0_8 v0_8;
+	struct socinfo_v0_9 v0_9;
+	struct socinfo_v0_10 v0_10;
+	struct socinfo_v0_11 v0_11;
+	struct socinfo_v0_12 v0_12;
+} *socinfo;
+
+/* max socinfo format version supported */
+#define MAX_SOCINFO_FORMAT SOCINFO_VERSION(0, 12)
+
+static struct msm_soc_info cpu_of_id[] = {
+
+	/* 7x01 IDs */
+	[0]  = {MSM_CPU_UNKNOWN, "Unknown CPU"},
+	[1]  = {MSM_CPU_7X01, "MSM7X01"},
+	[16] = {MSM_CPU_7X01, "MSM7X01"},
+	[17] = {MSM_CPU_7X01, "MSM7X01"},
+	[18] = {MSM_CPU_7X01, "MSM7X01"},
+	[19] = {MSM_CPU_7X01, "MSM7X01"},
+	[23] = {MSM_CPU_7X01, "MSM7X01"},
+	[25] = {MSM_CPU_7X01, "MSM7X01"},
+	[26] = {MSM_CPU_7X01, "MSM7X01"},
+	[32] = {MSM_CPU_7X01, "MSM7X01"},
+	[33] = {MSM_CPU_7X01, "MSM7X01"},
+	[34] = {MSM_CPU_7X01, "MSM7X01"},
+	[35] = {MSM_CPU_7X01, "MSM7X01"},
+
+	/* 7x25 IDs */
+	[20] = {MSM_CPU_7X25, "MSM7X25"},
+	[21] = {MSM_CPU_7X25, "MSM7X25"},
+	[24] = {MSM_CPU_7X25, "MSM7X25"},
+	[27] = {MSM_CPU_7X25, "MSM7X25"},
+	[39] = {MSM_CPU_7X25, "MSM7X25"},
+	[40] = {MSM_CPU_7X25, "MSM7X25"},
+	[41] = {MSM_CPU_7X25, "MSM7X25"},
+	[42] = {MSM_CPU_7X25, "MSM7X25"},
+	[62] = {MSM_CPU_7X25, "MSM7X25"},
+	[63] = {MSM_CPU_7X25, "MSM7X25"},
+	[66] = {MSM_CPU_7X25, "MSM7X25"},
+
+
+	/* 7x27 IDs */
+	[43] = {MSM_CPU_7X27, "MSM7X27"},
+	[44] = {MSM_CPU_7X27, "MSM7X27"},
+	[61] = {MSM_CPU_7X27, "MSM7X27"},
+	[67] = {MSM_CPU_7X27, "MSM7X27"},
+	[68] = {MSM_CPU_7X27, "MSM7X27"},
+	[69] = {MSM_CPU_7X27, "MSM7X27"},
+
+
+	/* 8x50 IDs */
+	[30] = {MSM_CPU_8X50, "MSM8X50"},
+	[36] = {MSM_CPU_8X50, "MSM8X50"},
+	[37] = {MSM_CPU_8X50, "MSM8X50"},
+	[38] = {MSM_CPU_8X50, "MSM8X50"},
+
+	/* 7x30 IDs */
+	[59] = {MSM_CPU_7X30, "MSM7X30"},
+	[60] = {MSM_CPU_7X30, "MSM7X30"},
+
+	/* 8x55 IDs */
+	[74] = {MSM_CPU_8X55, "MSM8X55"},
+	[75] = {MSM_CPU_8X55, "MSM8X55"},
+	[85] = {MSM_CPU_8X55, "MSM8X55"},
+
+	/* 8x60 IDs */
+	[70] = {MSM_CPU_8X60, "MSM8X60"},
+	[71] = {MSM_CPU_8X60, "MSM8X60"},
+	[86] = {MSM_CPU_8X60, "MSM8X60"},
+
+	/* 8960 IDs */
+	[87] = {MSM_CPU_8960, "MSM8960"},
+
+	/* 7x25A IDs */
+	[88] = {MSM_CPU_7X25A, "MSM7X25A"},
+	[89] = {MSM_CPU_7X25A, "MSM7X25A"},
+	[96] = {MSM_CPU_7X25A, "MSM7X25A"},
+
+	/* 7x27A IDs */
+	[90] = {MSM_CPU_7X27A, "MSM7X27A"},
+	[91] = {MSM_CPU_7X27A, "MSM7X27A"},
+	[92] = {MSM_CPU_7X27A, "MSM7X27A"},
+	[97] = {MSM_CPU_7X27A, "MSM7X27A"},
+
+	/* FSM9xxx ID */
+	[94] = {FSM_CPU_9XXX, "FSM9XXX"},
+	[95] = {FSM_CPU_9XXX, "FSM9XXX"},
+
+	/*  7x25AA ID */
+	[98] = {MSM_CPU_7X25AA, "MSM7X25AA"},
+	[99] = {MSM_CPU_7X25AA, "MSM7X25AA"},
+	[100] = {MSM_CPU_7X25AA, "MSM7X25AA"},
+
+	/*  7x27AA ID */
+	[101] = {MSM_CPU_7X27AA, "MSM7X27AA"},
+	[102] = {MSM_CPU_7X27AA, "MSM7X27AA"},
+	[103] = {MSM_CPU_7X27AA, "MSM7X27AA"},
+	[136] = {MSM_CPU_7X27AA, "MSM7X27AA"},
+
+	/* 9x15 ID */
+	[104] = {MSM_CPU_9615, "MSM9615"},
+	[105] = {MSM_CPU_9615, "MSM9615"},
+	[106] = {MSM_CPU_9615, "MSM9615"},
+	[107] = {MSM_CPU_9615, "MSM9615"},
+	[171] = {MSM_CPU_9615, "MSM9615"},
+
+	/* 8064 IDs */
+	[109] = {MSM_CPU_8064, "APQ8064"},
+
+	/* 8930 IDs */
+	[116] = {MSM_CPU_8930, "MSM8930"},
+	[117] = {MSM_CPU_8930, "MSM8930"},
+	[118] = {MSM_CPU_8930, "MSM8930"},
+	[119] = {MSM_CPU_8930, "MSM8930"},
+	[179] = {MSM_CPU_8930, "MSM8930"},
+
+	/* 8627 IDs */
+	[120] = {MSM_CPU_8627, "MSM8627"},
+	[121] = {MSM_CPU_8627, "MSM8627"},
+
+	/* 8660A ID */
+	[122] = {MSM_CPU_8960, "MSM8960"},
+
+	/* 8260A ID */
+	[123] = {MSM_CPU_8960, "MSM8960"},
+
+	/* 8060A ID */
+	[124] = {MSM_CPU_8960, "MSM8960"},
+
+	/* 8974 IDs */
+	[126] = {MSM_CPU_8974, "MSM8974"},
+	[184] = {MSM_CPU_8974, "MSM8974"},
+	[185] = {MSM_CPU_8974, "MSM8974"},
+	[186] = {MSM_CPU_8974, "MSM8974"},
+
+	/* 8974AA IDs */
+	[208] = {MSM_CPU_8974PRO_AA, "MSM8974PRO-AA"},
+	[211] = {MSM_CPU_8974PRO_AA, "MSM8974PRO-AA"},
+	[214] = {MSM_CPU_8974PRO_AA, "MSM8974PRO-AA"},
+	[217] = {MSM_CPU_8974PRO_AA, "MSM8974PRO-AA"},
+
+	/* 8974AB IDs */
+	[209] = {MSM_CPU_8974PRO_AB, "MSM8974PRO-AB"},
+	[212] = {MSM_CPU_8974PRO_AB, "MSM8974PRO-AB"},
+	[215] = {MSM_CPU_8974PRO_AB, "MSM8974PRO-AB"},
+	[218] = {MSM_CPU_8974PRO_AB, "MSM8974PRO-AB"},
+
+	/* 8974AC IDs */
+	[194] = {MSM_CPU_8974PRO_AC, "MSM8974PRO-AC"},
+	[210] = {MSM_CPU_8974PRO_AC, "MSM8974PRO-AC"},
+	[213] = {MSM_CPU_8974PRO_AC, "MSM8974PRO-AC"},
+	[216] = {MSM_CPU_8974PRO_AC, "MSM8974PRO-AC"},
+
+	/* 8625 IDs */
+	[127] = {MSM_CPU_8625, "MSM8625"},
+	[128] = {MSM_CPU_8625, "MSM8625"},
+	[129] = {MSM_CPU_8625, "MSM8625"},
+	[137] = {MSM_CPU_8625, "MSM8625"},
+	[167] = {MSM_CPU_8625, "MSM8625"},
+
+	/* 8064 MPQ ID */
+	[130] = {MSM_CPU_8064, "APQ8064"},
+
+	/* 7x25AB IDs */
+	[131] = {MSM_CPU_7X25AB, "MSM7X25AB"},
+	[132] = {MSM_CPU_7X25AB, "MSM7X25AB"},
+	[133] = {MSM_CPU_7X25AB, "MSM7X25AB"},
+	[135] = {MSM_CPU_7X25AB, "MSM7X25AB"},
+
+	/* 9625 IDs */
+	[134] = {MSM_CPU_9625, "MSM9625"},
+	[148] = {MSM_CPU_9625, "MSM9625"},
+	[149] = {MSM_CPU_9625, "MSM9625"},
+	[150] = {MSM_CPU_9625, "MSM9625"},
+	[151] = {MSM_CPU_9625, "MSM9625"},
+	[152] = {MSM_CPU_9625, "MSM9625"},
+	[173] = {MSM_CPU_9625, "MSM9625"},
+	[174] = {MSM_CPU_9625, "MSM9625"},
+	[175] = {MSM_CPU_9625, "MSM9625"},
+
+	/* 8960AB IDs */
+	[138] = {MSM_CPU_8960AB, "MSM8960AB"},
+	[139] = {MSM_CPU_8960AB, "MSM8960AB"},
+	[140] = {MSM_CPU_8960AB, "MSM8960AB"},
+	[141] = {MSM_CPU_8960AB, "MSM8960AB"},
+
+	/* 8930AA IDs */
+	[142] = {MSM_CPU_8930AA, "MSM8930AA"},
+	[143] = {MSM_CPU_8930AA, "MSM8930AA"},
+	[144] = {MSM_CPU_8930AA, "MSM8930AA"},
+	[160] = {MSM_CPU_8930AA, "MSM8930AA"},
+	[180] = {MSM_CPU_8930AA, "MSM8930AA"},
+
+	/* 8226 IDs */
+	[145] = {MSM_CPU_8226, "MSM8626"},
+	[158] = {MSM_CPU_8226, "MSM8226"},
+	[159] = {MSM_CPU_8226, "MSM8526"},
+	[198] = {MSM_CPU_8226, "MSM8126"},
+	[199] = {MSM_CPU_8226, "APQ8026"},
+	[200] = {MSM_CPU_8226, "MSM8926"},
+	[205] = {MSM_CPU_8226, "MSM8326"},
+	[219] = {MSM_CPU_8226, "APQ8028"},
+	[220] = {MSM_CPU_8226, "MSM8128"},
+	[221] = {MSM_CPU_8226, "MSM8228"},
+	[222] = {MSM_CPU_8226, "MSM8528"},
+	[223] = {MSM_CPU_8226, "MSM8628"},
+	[224] = {MSM_CPU_8226, "MSM8928"},
+
+	/* 8610 IDs */
+	[147] = {MSM_CPU_8610, "MSM8610"},
+	[161] = {MSM_CPU_8610, "MSM8110"},
+	[162] = {MSM_CPU_8610, "MSM8210"},
+	[163] = {MSM_CPU_8610, "MSM8810"},
+	[164] = {MSM_CPU_8610, "MSM8212"},
+	[165] = {MSM_CPU_8610, "MSM8612"},
+	[166] = {MSM_CPU_8610, "MSM8112"},
+	[225] = {MSM_CPU_8610, "MSM8510"},
+	[226] = {MSM_CPU_8610, "MSM8512"},
+
+	/* 8064AB IDs */
+	[153] = {MSM_CPU_8064AB, "APQ8064AB"},
+
+	/* 8930AB IDs */
+	[154] = {MSM_CPU_8930AB, "MSM8930AB"},
+	[155] = {MSM_CPU_8930AB, "MSM8930AB"},
+	[156] = {MSM_CPU_8930AB, "MSM8930AB"},
+	[157] = {MSM_CPU_8930AB, "MSM8930AB"},
+	[181] = {MSM_CPU_8930AB, "MSM8930AB"},
+
+	/* 8625Q IDs */
+	[168] = {MSM_CPU_8625Q, "MSM8225Q"},
+	[169] = {MSM_CPU_8625Q, "MSM8625Q"},
+	[170] = {MSM_CPU_8625Q, "MSM8125Q"},
+
+	/* 8064AA IDs */
+	[172] = {MSM_CPU_8064AA, "APQ8064AA"},
+
+	/* 8084 IDs */
+	[178] = {MSM_CPU_8084, "APQ8084"},
+
+	/* 9630 IDs */
+	[187] = {MSM_CPU_9630, "MDM9630"},
+	[227] = {MSM_CPU_9630, "MDM9630"},
+	[228] = {MSM_CPU_9630, "MDM9630"},
+	[229] = {MSM_CPU_9630, "MDM9630"},
+	[230] = {MSM_CPU_9630, "MDM9630"},
+	[231] = {MSM_CPU_9630, "MDM9630"},
+
+	/* FSM9900 ID */
+	[188] = {FSM_CPU_9900, "FSM9900"},
+	[189] = {FSM_CPU_9900, "FSM9900"},
+	[190] = {FSM_CPU_9900, "FSM9900"},
+	[191] = {FSM_CPU_9900, "FSM9900"},
+	[192] = {FSM_CPU_9900, "FSM9900"},
+	[193] = {FSM_CPU_9900, "FSM9900"},
+
+	/* 8916 IDs */
+	[206] = {MSM_CPU_8916, "MSM8916"},
+	[247] = {MSM_CPU_8916, "APQ8016"},
+	[248] = {MSM_CPU_8916, "MSM8216"},
+	[249] = {MSM_CPU_8916, "MSM8116"},
+	[250] = {MSM_CPU_8916, "MSM8616"},
+
+	/* 8936 IDs */
+	[233] = {MSM_CPU_8936, "MSM8936"},
+	[240] = {MSM_CPU_8936, "APQ8036"},
+	[242] = {MSM_CPU_8936, "MSM8236"},
+
+	/* 8939 IDs */
+	[239] = {MSM_CPU_8939, "MSM8939"},
+	[241] = {MSM_CPU_8939, "APQ8039"},
+	[263] = {MSM_CPU_8939, "MSM8239"},
+
+	/* 8909 IDs */
+	[245] = {MSM_CPU_8909, "MSM8909"},
+	[258] = {MSM_CPU_8909, "MSM8209"},
+	[259] = {MSM_CPU_8909, "MSM8208"},
+	[265] = {MSM_CPU_8909, "APQ8009"},
+	[260] = {MSM_CPU_8909, "MDMFERRUM"},
+	[261] = {MSM_CPU_8909, "MDMFERRUM"},
+	[262] = {MSM_CPU_8909, "MDMFERRUM"},
+	[300] = {MSM_CPU_8909, "MSM8909W"},
+	[301] = {MSM_CPU_8909, "APQ8009W"},
+
+	/* ZIRC IDs */
+	[234] = {MSM_CPU_ZIRC, "MSMZIRC"},
+	[235] = {MSM_CPU_ZIRC, "MSMZIRC"},
+	[236] = {MSM_CPU_ZIRC, "MSMZIRC"},
+	[237] = {MSM_CPU_ZIRC, "MSMZIRC"},
+	[238] = {MSM_CPU_ZIRC, "MSMZIRC"},
+
+	/* 8994 ID */
+	[207] = {MSM_CPU_8994, "MSM8994"},
+	[253] = {MSM_CPU_8994, "APQ8094"},
+
+	/* 8992 ID */
+	[251] = {MSM_CPU_8992, "MSM8992"},
+
+	/* FSM9010 ID */
+	[254] = {FSM_CPU_9010, "FSM9010"},
+	[255] = {FSM_CPU_9010, "FSM9010"},
+	[256] = {FSM_CPU_9010, "FSM9010"},
+	[257] = {FSM_CPU_9010, "FSM9010"},
+
+	/* Tellurium ID */
+	[264] = {MSM_CPU_TELLURIUM, "MSMTELLURIUM"},
+
+	/* 8996 IDs */
+	[246] = {MSM_CPU_8996, "MSM8996"},
+	[291] = {MSM_CPU_8996, "APQ8096"},
+	[305] = {MSM_CPU_8996, "MSM8996pro"},
+	[310] = {MSM_CPU_8996, "MSM8996"},
+	[311] = {MSM_CPU_8996, "APQ8096"},
+	[312] = {MSM_CPU_8996, "APQ8096pro"},
+	[315] = {MSM_CPU_8996, "MSM8996pro"},
+	[316] = {MSM_CPU_8996, "APQ8096pro"},
+
+	/* 8976 ID */
+	[266] = {MSM_CPU_8976, "MSM8976"},
+
+	/* 8929 IDs */
+	[268] = {MSM_CPU_8929, "MSM8929"},
+	[269] = {MSM_CPU_8929, "MSM8629"},
+	[270] = {MSM_CPU_8929, "MSM8229"},
+	[271] = {MSM_CPU_8929, "APQ8029"},
+
+	/* Cobalt IDs */
+	[292] = {MSM_CPU_8998, "MSM8998"},
+	[319] = {MSM_CPU_8998, "APQ8098"},
+
+	/* Hamster ID */
+	[306] = {MSM_CPU_HAMSTER, "MSMHAMSTER"},
+
+	/* 660 ID */
+	[317] = {MSM_CPU_660, "SDM660"},
+	[324] = {MSM_CPU_660, "SDA660"},
+	[325] = {MSM_CPU_660, "SDM658"},
+	[326] = {MSM_CPU_660, "SDA658"},
+
+	/* 630 ID */
+	[318] = {MSM_CPU_630, "SDM630"},
+	[327] = {MSM_CPU_630, "SDA630"},
+
+	/* 636 ID */
+	[345] = {MSM_CPU_636, "SDM636"},
+	[346] = {MSM_CPU_636, "SDA636"},
+
+	/* Uninitialized IDs are not known to run Linux.
+	   MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
+	   considered as unknown CPU. */
+};
+
+static enum msm_cpu cur_cpu;
+static int current_image;
+static uint32_t socinfo_format;
+
+static struct socinfo_v0_1 dummy_socinfo = {
+	.format = SOCINFO_VERSION(0, 1),
+	.version = 1,
+};
+
+uint32_t socinfo_get_id(void)
+{
+	return (socinfo) ? socinfo->v0_1.id : 0;
+}
+EXPORT_SYMBOL_GPL(socinfo_get_id);
+
+static char *socinfo_get_id_string(void)
+{
+	return (socinfo) ? cpu_of_id[socinfo->v0_1.id].soc_id_string : NULL;
+}
+
+uint32_t socinfo_get_version(void)
+{
+	return (socinfo) ? socinfo->v0_1.version : 0;
+}
+
+char *socinfo_get_build_id(void)
+{
+	return (socinfo) ? socinfo->v0_1.build_id : NULL;
+}
+
+static char *msm_read_hardware_id(void)
+{
+	static char msm_soc_str[256] = "Qualcomm Technologies, Inc ";
+	static bool string_generated;
+	int ret = 0;
+
+	if (string_generated)
+		return msm_soc_str;
+	if (!socinfo)
+		goto err_path;
+	if (!cpu_of_id[socinfo->v0_1.id].soc_id_string)
+		goto err_path;
+
+	ret = strlcat(msm_soc_str, cpu_of_id[socinfo->v0_1.id].soc_id_string,
+			sizeof(msm_soc_str));
+	if (ret > sizeof(msm_soc_str))
+		goto err_path;
+
+	string_generated = true;
+	return msm_soc_str;
+err_path:
+	return "UNKNOWN SOC TYPE";
+}
+
+uint32_t socinfo_get_raw_id(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 2) ?
+			socinfo->v0_2.raw_id : 0)
+		: 0;
+}
+
+uint32_t socinfo_get_raw_version(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 2) ?
+			socinfo->v0_2.raw_version : 0)
+		: 0;
+}
+
+uint32_t socinfo_get_platform_type(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 3) ?
+			socinfo->v0_3.hw_platform : 0)
+		: 0;
+}
+
+
+uint32_t socinfo_get_platform_version(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 4) ?
+			socinfo->v0_4.platform_version : 0)
+		: 0;
+}
+
+/* This information is directly encoded by the machine id */
+/* Thus no external callers rely on this information at the moment */
+static uint32_t socinfo_get_accessory_chip(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 5) ?
+			socinfo->v0_5.accessory_chip : 0)
+		: 0;
+}
+
+uint32_t socinfo_get_platform_subtype(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 6) ?
+			socinfo->v0_6.hw_platform_subtype : 0)
+		: 0;
+}
+
+static uint32_t socinfo_get_foundry_id(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 9) ?
+			socinfo->v0_9.foundry_id : 0)
+		: 0;
+}
+
+uint32_t socinfo_get_serial_number(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 10) ?
+			socinfo->v0_10.serial_number : 0)
+		: 0;
+}
+EXPORT_SYMBOL(socinfo_get_serial_number);
+
+static uint32_t socinfo_get_chip_family(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 12) ?
+			socinfo->v0_12.chip_family : 0)
+		: 0;
+}
+
+static uint32_t socinfo_get_raw_device_family(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 12) ?
+			socinfo->v0_12.raw_device_family : 0)
+		: 0;
+}
+
+static uint32_t socinfo_get_raw_device_number(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 12) ?
+			socinfo->v0_12.raw_device_number : 0)
+		: 0;
+}
+
+enum pmic_model socinfo_get_pmic_model(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 7) ?
+			socinfo->v0_7.pmic_model : PMIC_MODEL_UNKNOWN)
+		: PMIC_MODEL_UNKNOWN;
+}
+
+uint32_t socinfo_get_pmic_die_revision(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 7) ?
+			socinfo->v0_7.pmic_die_revision : 0)
+		: 0;
+}
+
+static char *socinfo_get_image_version_base_address(void)
+{
+	return smem_find(SMEM_IMAGE_VERSION_TABLE,
+				SMEM_IMAGE_VERSION_SIZE, 0, SMEM_ANY_HOST_FLAG);
+}
+
+enum msm_cpu socinfo_get_msm_cpu(void)
+{
+	return cur_cpu;
+}
+EXPORT_SYMBOL_GPL(socinfo_get_msm_cpu);
+
+static ssize_t
+msm_get_vendor(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "Qualcomm\n");
+}
+
+static ssize_t
+msm_get_raw_id(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+		socinfo_get_raw_id());
+}
+
+static ssize_t
+msm_get_raw_version(struct device *dev,
+		     struct device_attribute *attr,
+		     char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+		socinfo_get_raw_version());
+}
+
+static ssize_t
+msm_get_build_id(struct device *dev,
+		   struct device_attribute *attr,
+		   char *buf)
+{
+	if (socinfo_get_build_id())
+		return snprintf(buf, PAGE_SIZE, "%-.32s\n",
+				socinfo_get_build_id());
+	return 0;
+}
+
+static ssize_t
+msm_get_hw_platform(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	uint32_t hw_type;
+	hw_type = socinfo_get_platform_type();
+
+	return snprintf(buf, PAGE_SIZE, "%-.32s\n",
+			hw_platform[hw_type]);
+}
+
+static ssize_t
+msm_get_platform_version(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+		socinfo_get_platform_version());
+}
+
+static ssize_t
+msm_get_accessory_chip(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+		socinfo_get_accessory_chip());
+}
+
+static ssize_t
+msm_get_platform_subtype(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	uint32_t hw_subtype;
+	hw_subtype = socinfo_get_platform_subtype();
+	if (HW_PLATFORM_QRD == socinfo_get_platform_type()) {
+		if (hw_subtype >= PLATFORM_SUBTYPE_QRD_INVALID) {
+			pr_err("Invalid hardware platform sub type for qrd found\n");
+			hw_subtype = PLATFORM_SUBTYPE_QRD_INVALID;
+		}
+		return snprintf(buf, PAGE_SIZE, "%-.32s\n",
+					qrd_hw_platform_subtype[hw_subtype]);
+	}
+	if (socinfo_get_platform_type() == HW_PLATFORM_ADP) {
+		if (hw_subtype >= PLATFORM_SUBTYPE_ADP_INVALID) {
+			pr_err("Invalid hardware platform sub type for adp found\n");
+			hw_subtype = PLATFORM_SUBTYPE_ADP_INVALID;
+		}
+		return snprintf(buf, PAGE_SIZE, "%-.32s\n",
+					adp_hw_platform_subtype[hw_subtype]);
+	} else {
+		if (hw_subtype >= PLATFORM_SUBTYPE_INVALID) {
+			pr_err("Invalid hardware platform subtype\n");
+			hw_subtype = PLATFORM_SUBTYPE_INVALID;
+		}
+		return snprintf(buf, PAGE_SIZE, "%-.32s\n",
+			hw_platform_subtype[hw_subtype]);
+	}
+}
+
+static ssize_t
+msm_get_platform_subtype_id(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	uint32_t hw_subtype;
+	hw_subtype = socinfo_get_platform_subtype();
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+		hw_subtype);
+}
+
+static ssize_t
+msm_get_foundry_id(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+		socinfo_get_foundry_id());
+}
+
+static ssize_t
+msm_get_serial_number(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+		socinfo_get_serial_number());
+}
+
+static ssize_t
+msm_get_chip_family(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n",
+		socinfo_get_chip_family());
+}
+
+static ssize_t
+msm_get_raw_device_family(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n",
+		socinfo_get_raw_device_family());
+}
+
+static ssize_t
+msm_get_raw_device_number(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n",
+		socinfo_get_raw_device_number());
+}
+
+static ssize_t
+msm_get_pmic_model(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+		socinfo_get_pmic_model());
+}
+
+static ssize_t
+msm_get_pmic_die_revision(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+			 socinfo_get_pmic_die_revision());
+}
+
+static ssize_t
+msm_get_image_version(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	char *string_address;
+
+	string_address = socinfo_get_image_version_base_address();
+	if (IS_ERR_OR_NULL(string_address)) {
+		pr_err("Failed to get image version base address");
+		return snprintf(buf, SMEM_IMAGE_VERSION_NAME_SIZE, "Unknown");
+	}
+	down_read(&current_image_rwsem);
+	string_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+	up_read(&current_image_rwsem);
+	return snprintf(buf, SMEM_IMAGE_VERSION_NAME_SIZE, "%-.75s\n",
+			string_address);
+}
+
+static ssize_t
+msm_set_image_version(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf,
+			size_t count)
+{
+	char *store_address;
+
+	down_read(&current_image_rwsem);
+	if (current_image != SMEM_IMAGE_VERSION_PARTITION_APPS) {
+		up_read(&current_image_rwsem);
+		return count;
+	}
+	store_address = socinfo_get_image_version_base_address();
+	if (IS_ERR_OR_NULL(store_address)) {
+		pr_err("Failed to get image version base address");
+		up_read(&current_image_rwsem);
+		return count;
+	}
+	store_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+	up_read(&current_image_rwsem);
+	snprintf(store_address, SMEM_IMAGE_VERSION_NAME_SIZE, "%-.75s", buf);
+	return count;
+}
+
+static ssize_t
+msm_get_image_variant(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	char *string_address;
+
+	string_address = socinfo_get_image_version_base_address();
+	if (IS_ERR_OR_NULL(string_address)) {
+		pr_err("Failed to get image version base address");
+		return snprintf(buf, SMEM_IMAGE_VERSION_VARIANT_SIZE,
+		"Unknown");
+	}
+	down_read(&current_image_rwsem);
+	string_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+	up_read(&current_image_rwsem);
+	string_address += SMEM_IMAGE_VERSION_VARIANT_OFFSET;
+	return snprintf(buf, SMEM_IMAGE_VERSION_VARIANT_SIZE, "%-.20s\n",
+			string_address);
+}
+
+static ssize_t
+msm_set_image_variant(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf,
+			size_t count)
+{
+	char *store_address;
+
+	down_read(&current_image_rwsem);
+	if (current_image != SMEM_IMAGE_VERSION_PARTITION_APPS) {
+		up_read(&current_image_rwsem);
+		return count;
+	}
+	store_address = socinfo_get_image_version_base_address();
+	if (IS_ERR_OR_NULL(store_address)) {
+		pr_err("Failed to get image version base address");
+		up_read(&current_image_rwsem);
+		return count;
+	}
+	store_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+	up_read(&current_image_rwsem);
+	store_address += SMEM_IMAGE_VERSION_VARIANT_OFFSET;
+	snprintf(store_address, SMEM_IMAGE_VERSION_VARIANT_SIZE, "%-.20s", buf);
+	return count;
+}
+
+static ssize_t
+msm_get_image_crm_version(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	char *string_address;
+
+	string_address = socinfo_get_image_version_base_address();
+	if (IS_ERR_OR_NULL(string_address)) {
+		pr_err("Failed to get image version base address");
+		return snprintf(buf, SMEM_IMAGE_VERSION_OEM_SIZE, "Unknown");
+	}
+	down_read(&current_image_rwsem);
+	string_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+	up_read(&current_image_rwsem);
+	string_address += SMEM_IMAGE_VERSION_OEM_OFFSET;
+	return snprintf(buf, SMEM_IMAGE_VERSION_OEM_SIZE, "%-.33s\n",
+			string_address);
+}
+
+static ssize_t
+msm_set_image_crm_version(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf,
+			size_t count)
+{
+	char *store_address;
+
+	down_read(&current_image_rwsem);
+	if (current_image != SMEM_IMAGE_VERSION_PARTITION_APPS) {
+		up_read(&current_image_rwsem);
+		return count;
+	}
+	store_address = socinfo_get_image_version_base_address();
+	if (IS_ERR_OR_NULL(store_address)) {
+		pr_err("Failed to get image version base address");
+		up_read(&current_image_rwsem);
+		return count;
+	}
+	store_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+	up_read(&current_image_rwsem);
+	store_address += SMEM_IMAGE_VERSION_OEM_OFFSET;
+	snprintf(store_address, SMEM_IMAGE_VERSION_OEM_SIZE, "%-.33s", buf);
+	return count;
+}
+
+static ssize_t
+msm_get_image_number(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	int ret;
+
+	down_read(&current_image_rwsem);
+	ret = snprintf(buf, PAGE_SIZE, "%d\n",
+			current_image);
+	up_read(&current_image_rwsem);
+	return ret;
+
+}
+
+static ssize_t
+msm_select_image(struct device *dev, struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	int ret, digit;
+
+	ret = kstrtoint(buf, 10, &digit);
+	if (ret)
+		return ret;
+	down_write(&current_image_rwsem);
+	if (0 <= digit && digit < SMEM_IMAGE_VERSION_BLOCKS_COUNT)
+		current_image = digit;
+	else
+		current_image = 0;
+	up_write(&current_image_rwsem);
+	return count;
+}
+
+static ssize_t
+msm_get_images(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int pos = 0;
+	int image;
+	char *image_address;
+
+	image_address = socinfo_get_image_version_base_address();
+	if (IS_ERR_OR_NULL(image_address))
+		return snprintf(buf, PAGE_SIZE, "Unavailable\n");
+
+	*buf = '\0';
+	for (image = 0; image < SMEM_IMAGE_VERSION_BLOCKS_COUNT; image++) {
+		if (*image_address == '\0') {
+			image_address += SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+			continue;
+		}
+
+		pos += snprintf(buf + pos, PAGE_SIZE - pos, "%d:\n",
+				image);
+		pos += snprintf(buf + pos, PAGE_SIZE - pos, "\tCRM:\t\t%-.75s\n",
+				image_address);
+		pos += snprintf(buf + pos, PAGE_SIZE - pos, "\tVariant:\t%-.20s\n",
+				image_address + SMEM_IMAGE_VERSION_VARIANT_OFFSET);
+		pos += snprintf(buf + pos, PAGE_SIZE - pos,
+				"\tVersion:\t%-.33s\n",
+				image_address + SMEM_IMAGE_VERSION_OEM_OFFSET);
+
+		image_address += SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+	}
+
+	return pos;
+}
+
+static struct device_attribute msm_soc_attr_raw_version =
+	__ATTR(raw_version, S_IRUGO, msm_get_raw_version,  NULL);
+
+static struct device_attribute msm_soc_attr_raw_id =
+	__ATTR(raw_id, S_IRUGO, msm_get_raw_id,  NULL);
+
+static struct device_attribute msm_soc_attr_vendor =
+	__ATTR(vendor, S_IRUGO, msm_get_vendor,  NULL);
+
+static struct device_attribute msm_soc_attr_build_id =
+	__ATTR(build_id, S_IRUGO, msm_get_build_id, NULL);
+
+static struct device_attribute msm_soc_attr_hw_platform =
+	__ATTR(hw_platform, S_IRUGO, msm_get_hw_platform, NULL);
+
+
+static struct device_attribute msm_soc_attr_platform_version =
+	__ATTR(platform_version, S_IRUGO,
+			msm_get_platform_version, NULL);
+
+static struct device_attribute msm_soc_attr_accessory_chip =
+	__ATTR(accessory_chip, S_IRUGO,
+			msm_get_accessory_chip, NULL);
+
+static struct device_attribute msm_soc_attr_platform_subtype =
+	__ATTR(platform_subtype, S_IRUGO,
+			msm_get_platform_subtype, NULL);
+
+/* Platform Subtype String is being deprecated. Use Platform
+ * Subtype ID instead.
+ */
+static struct device_attribute msm_soc_attr_platform_subtype_id =
+	__ATTR(platform_subtype_id, S_IRUGO,
+			msm_get_platform_subtype_id, NULL);
+
+static struct device_attribute msm_soc_attr_foundry_id =
+	__ATTR(foundry_id, S_IRUGO,
+			msm_get_foundry_id, NULL);
+
+static struct device_attribute msm_soc_attr_serial_number =
+	__ATTR(serial_number, S_IRUGO,
+			msm_get_serial_number, NULL);
+
+static struct device_attribute msm_soc_attr_chip_family =
+	__ATTR(chip_family, S_IRUGO,
+			msm_get_chip_family, NULL);
+
+static struct device_attribute msm_soc_attr_raw_device_family =
+	__ATTR(raw_device_family, S_IRUGO,
+			msm_get_raw_device_family, NULL);
+
+static struct device_attribute msm_soc_attr_raw_device_number =
+	__ATTR(raw_device_number, S_IRUGO,
+			msm_get_raw_device_number, NULL);
+
+static struct device_attribute msm_soc_attr_pmic_model =
+	__ATTR(pmic_model, S_IRUGO,
+			msm_get_pmic_model, NULL);
+
+static struct device_attribute msm_soc_attr_pmic_die_revision =
+	__ATTR(pmic_die_revision, S_IRUGO,
+			msm_get_pmic_die_revision, NULL);
+
+static struct device_attribute image_version =
+	__ATTR(image_version, S_IRUGO | S_IWUSR,
+			msm_get_image_version, msm_set_image_version);
+
+static struct device_attribute image_variant =
+	__ATTR(image_variant, S_IRUGO | S_IWUSR,
+			msm_get_image_variant, msm_set_image_variant);
+
+static struct device_attribute image_crm_version =
+	__ATTR(image_crm_version, S_IRUGO | S_IWUSR,
+			msm_get_image_crm_version, msm_set_image_crm_version);
+
+static struct device_attribute select_image =
+	__ATTR(select_image, S_IRUGO | S_IWUSR,
+			msm_get_image_number, msm_select_image);
+
+static struct device_attribute images =
+	__ATTR(images, S_IRUGO, msm_get_images, NULL);
+
+static void * __init setup_dummy_socinfo(void)
+{
+	if (early_machine_is_apq8084()) {
+		dummy_socinfo.id = 178;
+		strlcpy(dummy_socinfo.build_id, "apq8084 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_mdm9630()) {
+		dummy_socinfo.id = 187;
+		strlcpy(dummy_socinfo.build_id, "mdm9630 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msm8909()) {
+		dummy_socinfo.id = 245;
+		strlcpy(dummy_socinfo.build_id, "msm8909 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msm8916()) {
+		dummy_socinfo.id = 206;
+		strlcpy(dummy_socinfo.build_id, "msm8916 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msm8939()) {
+		dummy_socinfo.id = 239;
+		strlcpy(dummy_socinfo.build_id, "msm8939 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msm8936()) {
+		dummy_socinfo.id = 233;
+		strlcpy(dummy_socinfo.build_id, "msm8936 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msmzirc()) {
+		dummy_socinfo.id = 238;
+		strlcpy(dummy_socinfo.build_id, "msmzirc - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msm8994()) {
+		dummy_socinfo.id = 207;
+		strlcpy(dummy_socinfo.build_id, "msm8994 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msm8992()) {
+		dummy_socinfo.id = 251;
+		strlcpy(dummy_socinfo.build_id, "msm8992 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msm8976()) {
+		dummy_socinfo.id = 266;
+		strlcpy(dummy_socinfo.build_id, "msm8976 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msmtellurium()) {
+		dummy_socinfo.id = 264;
+		strlcpy(dummy_socinfo.build_id, "msmtellurium - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msm8996()) {
+		dummy_socinfo.id = 246;
+		strlcpy(dummy_socinfo.build_id, "msm8996 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msm8929()) {
+		dummy_socinfo.id = 268;
+		strlcpy(dummy_socinfo.build_id, "msm8929 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msm8998()) {
+		dummy_socinfo.id = 292;
+		strlcpy(dummy_socinfo.build_id, "msm8998 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msmhamster()) {
+		dummy_socinfo.id = 306;
+		strlcpy(dummy_socinfo.build_id, "msmhamster - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_sdm660()) {
+		dummy_socinfo.id = 317;
+		strlcpy(dummy_socinfo.build_id, "sdm660 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_sda660()) {
+		dummy_socinfo.id = 324;
+		strlcpy(dummy_socinfo.build_id, "sda660 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_sdm658()) {
+		dummy_socinfo.id = 325;
+		strlcpy(dummy_socinfo.build_id, "sdm658 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_sda658()) {
+		dummy_socinfo.id = 326;
+		strlcpy(dummy_socinfo.build_id, "sda658 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_sdm630()) {
+		dummy_socinfo.id = 318;
+		strlcpy(dummy_socinfo.build_id, "sdm630 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_sda630()) {
+		dummy_socinfo.id = 327;
+		strlcpy(dummy_socinfo.build_id, "sda630 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_sdm636()) {
+		dummy_socinfo.id = 345;
+		strlcpy(dummy_socinfo.build_id, "sdm636 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_sda636()) {
+		dummy_socinfo.id = 346;
+		strlcpy(dummy_socinfo.build_id, "sda636 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_apq8098()) {
+		dummy_socinfo.id = 319;
+		strlcpy(dummy_socinfo.build_id, "apq8098 - ",
+			sizeof(dummy_socinfo.build_id));
+	}
+
+	strlcat(dummy_socinfo.build_id, "Dummy socinfo",
+		sizeof(dummy_socinfo.build_id));
+	return (void *) &dummy_socinfo;
+}
+
+static void __init populate_soc_sysfs_files(struct device *msm_soc_device)
+{
+	device_create_file(msm_soc_device, &msm_soc_attr_vendor);
+	device_create_file(msm_soc_device, &image_version);
+	device_create_file(msm_soc_device, &image_variant);
+	device_create_file(msm_soc_device, &image_crm_version);
+	device_create_file(msm_soc_device, &select_image);
+	device_create_file(msm_soc_device, &images);
+
+	switch (socinfo_format) {
+	case SOCINFO_VERSION(0, 12):
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_chip_family);
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_raw_device_family);
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_raw_device_number);
+	case SOCINFO_VERSION(0, 11):
+	case SOCINFO_VERSION(0, 10):
+		 device_create_file(msm_soc_device,
+					&msm_soc_attr_serial_number);
+	case SOCINFO_VERSION(0, 9):
+		 device_create_file(msm_soc_device,
+					&msm_soc_attr_foundry_id);
+	case SOCINFO_VERSION(0, 8):
+	case SOCINFO_VERSION(0, 7):
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_pmic_model);
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_pmic_die_revision);
+	case SOCINFO_VERSION(0, 6):
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_platform_subtype);
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_platform_subtype_id);
+	case SOCINFO_VERSION(0, 5):
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_accessory_chip);
+	case SOCINFO_VERSION(0, 4):
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_platform_version);
+	case SOCINFO_VERSION(0, 3):
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_hw_platform);
+	case SOCINFO_VERSION(0, 2):
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_raw_id);
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_raw_version);
+	case SOCINFO_VERSION(0, 1):
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_build_id);
+		break;
+	default:
+		pr_err("Unknown socinfo format: v%u.%u\n",
+				SOCINFO_VERSION_MAJOR(socinfo_format),
+				SOCINFO_VERSION_MINOR(socinfo_format));
+		break;
+	}
+
+	return;
+}
+
+static void  __init soc_info_populate(struct soc_device_attribute *soc_dev_attr)
+{
+	uint32_t soc_version = socinfo_get_version();
+
+	soc_dev_attr->soc_id   = kasprintf(GFP_KERNEL, "%d", socinfo_get_id());
+	soc_dev_attr->family  =  "Snapdragon";
+	soc_dev_attr->machine  = socinfo_get_id_string();
+	soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%u.%u",
+			SOCINFO_VERSION_MAJOR(soc_version),
+			SOCINFO_VERSION_MINOR(soc_version));
+	return;
+
+}
+
+static int __init socinfo_init_sysfs(void)
+{
+	struct device *msm_soc_device;
+	struct soc_device *soc_dev;
+	struct soc_device_attribute *soc_dev_attr;
+
+	if (!socinfo) {
+		pr_err("No socinfo found!\n");
+		return -ENODEV;
+	}
+
+	soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+	if (!soc_dev_attr) {
+		pr_err("Soc Device alloc failed!\n");
+		return -ENOMEM;
+	}
+
+	soc_info_populate(soc_dev_attr);
+	soc_dev = soc_device_register(soc_dev_attr);
+	if (IS_ERR_OR_NULL(soc_dev)) {
+		kfree(soc_dev_attr);
+		 pr_err("Soc device register failed\n");
+		 return -EIO;
+	}
+
+	msm_soc_device = soc_device_to_device(soc_dev);
+	populate_soc_sysfs_files(msm_soc_device);
+	return 0;
+}
+
+late_initcall(socinfo_init_sysfs);
+
+static void socinfo_print(void)
+{
+	uint32_t f_maj = SOCINFO_VERSION_MAJOR(socinfo_format);
+	uint32_t f_min = SOCINFO_VERSION_MINOR(socinfo_format);
+	uint32_t v_maj = SOCINFO_VERSION_MAJOR(socinfo->v0_1.version);
+	uint32_t v_min = SOCINFO_VERSION_MINOR(socinfo->v0_1.version);
+
+	switch (socinfo_format) {
+	case SOCINFO_VERSION(0, 1):
+		pr_info("v%u.%u, id=%u, ver=%u.%u\n",
+			f_maj, f_min, socinfo->v0_1.id, v_maj, v_min);
+		break;
+	case SOCINFO_VERSION(0, 2):
+		pr_info("v%u.%u, id=%u, ver=%u.%u, "
+			 "raw_id=%u, raw_ver=%u\n",
+			f_maj, f_min, socinfo->v0_1.id, v_maj, v_min,
+			socinfo->v0_2.raw_id, socinfo->v0_2.raw_version);
+		break;
+	case SOCINFO_VERSION(0, 3):
+		pr_info("v%u.%u, id=%u, ver=%u.%u, "
+			 "raw_id=%u, raw_ver=%u, hw_plat=%u\n",
+			f_maj, f_min, socinfo->v0_1.id, v_maj, v_min,
+			socinfo->v0_2.raw_id, socinfo->v0_2.raw_version,
+			socinfo->v0_3.hw_platform);
+		break;
+	case SOCINFO_VERSION(0, 4):
+		pr_info("v%u.%u, id=%u, ver=%u.%u, "
+			 "raw_id=%u, raw_ver=%u, hw_plat=%u, hw_plat_ver=%u\n",
+			f_maj, f_min, socinfo->v0_1.id, v_maj, v_min,
+			socinfo->v0_2.raw_id, socinfo->v0_2.raw_version,
+			socinfo->v0_3.hw_platform,
+			socinfo->v0_4.platform_version);
+		break;
+	case SOCINFO_VERSION(0, 5):
+		pr_info("v%u.%u, id=%u, ver=%u.%u, "
+			 "raw_id=%u, raw_ver=%u, hw_plat=%u,  hw_plat_ver=%u\n"
+			" accessory_chip=%u\n",
+			f_maj, f_min, socinfo->v0_1.id, v_maj, v_min,
+			socinfo->v0_2.raw_id, socinfo->v0_2.raw_version,
+			socinfo->v0_3.hw_platform,
+			socinfo->v0_4.platform_version,
+			socinfo->v0_5.accessory_chip);
+		break;
+	case SOCINFO_VERSION(0, 6):
+		pr_info("v%u.%u, id=%u, ver=%u.%u, "
+			 "raw_id=%u, raw_ver=%u, hw_plat=%u,  hw_plat_ver=%u\n"
+			" accessory_chip=%u hw_plat_subtype=%u\n",
+			f_maj, f_min, socinfo->v0_1.id, v_maj, v_min,
+			socinfo->v0_2.raw_id, socinfo->v0_2.raw_version,
+			socinfo->v0_3.hw_platform,
+			socinfo->v0_4.platform_version,
+			socinfo->v0_5.accessory_chip,
+			socinfo->v0_6.hw_platform_subtype);
+		break;
+	case SOCINFO_VERSION(0, 7):
+	case SOCINFO_VERSION(0, 8):
+		pr_info("v%u.%u, id=%u, ver=%u.%u, raw_id=%u, raw_ver=%u, hw_plat=%u, hw_plat_ver=%u\n accessory_chip=%u, hw_plat_subtype=%u, pmic_model=%u, pmic_die_revision=%u\n",
+			f_maj, f_min, socinfo->v0_1.id, v_maj, v_min,
+			socinfo->v0_2.raw_id, socinfo->v0_2.raw_version,
+			socinfo->v0_3.hw_platform,
+			socinfo->v0_4.platform_version,
+			socinfo->v0_5.accessory_chip,
+			socinfo->v0_6.hw_platform_subtype,
+			socinfo->v0_7.pmic_model,
+			socinfo->v0_7.pmic_die_revision);
+		break;
+	case SOCINFO_VERSION(0, 9):
+		pr_info("v%u.%u, id=%u, ver=%u.%u, raw_id=%u, raw_ver=%u, hw_plat=%u, hw_plat_ver=%u\n accessory_chip=%u, hw_plat_subtype=%u, pmic_model=%u, pmic_die_revision=%u foundry_id=%u\n",
+			f_maj, f_min, socinfo->v0_1.id, v_maj, v_min,
+			socinfo->v0_2.raw_id, socinfo->v0_2.raw_version,
+			socinfo->v0_3.hw_platform,
+			socinfo->v0_4.platform_version,
+			socinfo->v0_5.accessory_chip,
+			socinfo->v0_6.hw_platform_subtype,
+			socinfo->v0_7.pmic_model,
+			socinfo->v0_7.pmic_die_revision,
+			socinfo->v0_9.foundry_id);
+		break;
+	case SOCINFO_VERSION(0, 10):
+		pr_info("v%u.%u, id=%u, ver=%u.%u, raw_id=%u, raw_ver=%u, hw_plat=%u, hw_plat_ver=%u\n accessory_chip=%u, hw_plat_subtype=%u, pmic_model=%u, pmic_die_revision=%u foundry_id=%u serial_number=%u\n",
+			f_maj, f_min, socinfo->v0_1.id, v_maj, v_min,
+			socinfo->v0_2.raw_id, socinfo->v0_2.raw_version,
+			socinfo->v0_3.hw_platform,
+			socinfo->v0_4.platform_version,
+			socinfo->v0_5.accessory_chip,
+			socinfo->v0_6.hw_platform_subtype,
+			socinfo->v0_7.pmic_model,
+			socinfo->v0_7.pmic_die_revision,
+			socinfo->v0_9.foundry_id,
+			socinfo->v0_10.serial_number);
+		break;
+	case SOCINFO_VERSION(0, 11):
+		pr_info("v%u.%u, id=%u, ver=%u.%u, raw_id=%u, raw_ver=%u, hw_plat=%u, hw_plat_ver=%u\n accessory_chip=%u, hw_plat_subtype=%u, pmic_model=%u, pmic_die_revision=%u foundry_id=%u serial_number=%u num_pmics=%u\n",
+			f_maj, f_min, socinfo->v0_1.id, v_maj, v_min,
+			socinfo->v0_2.raw_id, socinfo->v0_2.raw_version,
+			socinfo->v0_3.hw_platform,
+			socinfo->v0_4.platform_version,
+			socinfo->v0_5.accessory_chip,
+			socinfo->v0_6.hw_platform_subtype,
+			socinfo->v0_7.pmic_model,
+			socinfo->v0_7.pmic_die_revision,
+			socinfo->v0_9.foundry_id,
+			socinfo->v0_10.serial_number,
+			socinfo->v0_11.num_pmics);
+		break;
+	case SOCINFO_VERSION(0, 12):
+		pr_info("v%u.%u, id=%u, ver=%u.%u, raw_id=%u, raw_ver=%u, hw_plat=%u, hw_plat_ver=%u\n accessory_chip=%u, hw_plat_subtype=%u, pmic_model=%u, pmic_die_revision=%u foundry_id=%u serial_number=%u num_pmics=%u chip_family=0x%x raw_device_family=0x%x raw_device_number=0x%x\n",
+			f_maj, f_min, socinfo->v0_1.id, v_maj, v_min,
+			socinfo->v0_2.raw_id, socinfo->v0_2.raw_version,
+			socinfo->v0_3.hw_platform,
+			socinfo->v0_4.platform_version,
+			socinfo->v0_5.accessory_chip,
+			socinfo->v0_6.hw_platform_subtype,
+			socinfo->v0_7.pmic_model,
+			socinfo->v0_7.pmic_die_revision,
+			socinfo->v0_9.foundry_id,
+			socinfo->v0_10.serial_number,
+			socinfo->v0_11.num_pmics,
+			socinfo->v0_12.chip_family,
+			socinfo->v0_12.raw_device_family,
+			socinfo->v0_12.raw_device_number);
+		break;
+
+	default:
+		pr_err("Unknown format found: v%u.%u\n", f_maj, f_min);
+		break;
+	}
+}
+
+static void socinfo_select_format(void)
+{
+	uint32_t f_maj = SOCINFO_VERSION_MAJOR(socinfo->v0_1.format);
+	uint32_t f_min = SOCINFO_VERSION_MINOR(socinfo->v0_1.format);
+
+	if (f_maj != 0) {
+		pr_err("Unsupported format v%u.%u. Falling back to dummy values.\n",
+			f_maj, f_min);
+		socinfo = setup_dummy_socinfo();
+	}
+
+	if (socinfo->v0_1.format > MAX_SOCINFO_FORMAT) {
+		pr_warn("Unsupported format v%u.%u. Falling back to v%u.%u.\n",
+			f_maj, f_min, SOCINFO_VERSION_MAJOR(MAX_SOCINFO_FORMAT),
+			SOCINFO_VERSION_MINOR(MAX_SOCINFO_FORMAT));
+		socinfo_format = MAX_SOCINFO_FORMAT;
+	} else {
+		socinfo_format = socinfo->v0_1.format;
+	}
+}
+
+int __init socinfo_init(void)
+{
+	static bool socinfo_init_done;
+	unsigned size;
+
+	if (socinfo_init_done)
+		return 0;
+
+	socinfo = smem_get_entry(SMEM_HW_SW_BUILD_ID, &size, 0,
+				 SMEM_ANY_HOST_FLAG);
+	if (IS_ERR_OR_NULL(socinfo)) {
+		pr_warn("Can't find SMEM_HW_SW_BUILD_ID; falling back on dummy values.\n");
+		socinfo = setup_dummy_socinfo();
+	}
+
+	socinfo_select_format();
+
+	WARN(!socinfo_get_id(), "Unknown SOC ID!\n");
+
+	if (socinfo_get_id() >= ARRAY_SIZE(cpu_of_id))
+		BUG_ON("New IDs added! ID => CPU mapping needs an update.\n");
+	else
+		cur_cpu = cpu_of_id[socinfo->v0_1.id].generic_soc_type;
+
+	boot_stats_init();
+	socinfo_print();
+	arch_read_hardware_id = msm_read_hardware_id;
+	socinfo_init_done = true;
+
+	return 0;
+}
+subsys_initcall(socinfo_init);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/spcom.c	2019-01-22 16:16:26.679275167 +0100
@@ -0,0 +1,2814 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Secure-Processor-Communication (SPCOM).
+ *
+ * This driver provides communication to Secure Processor (SP)
+ * over G-Link transport layer.
+ *
+ * It provides interface to both User Space spcomlib and kernel drivers.
+ *
+ * User Space App shall use spcomlib for communication with SP.
+ * User Space App can be either Client or Server.
+ * spcomlib shall use write() file operation to send data,
+ * and read() file operation to read data.
+ *
+ * This driver uses glink as the transport layer.
+ * This driver exposes "/dev/<sp-channel-name>" file node for each glink
+ * logical channel.
+ * This driver exposes "/dev/spcom" file node for some debug/control command.
+ * The predefined channel "/dev/sp_kernel" is used for loading SP Application
+ * from HLOS.
+ * This driver exposes "/dev/sp_ssr" file node to allow user space poll for SSR.
+ * After the remote SP App is loaded, this driver exposes a new file node
+ * "/dev/<ch-name>" for the matching HLOS App to use.
+ * The access to predefined file node is restricted by using unix group
+ * and SELinux.
+ *
+ * No message routing is use, but using the G-Link "multiplexing" feature
+ * to use a dedicated logical channel for HLOS and SP Application-Pair.
+ *
+ * Each HLOS/SP Application can be either Client or Server or both,
+ * Messaging is allays point-to-point between 2 HLOS<=>SP applications.
+ *
+ * User Space Request & Response are synchronous.
+ * read() & write() operations are blocking until completed or terminated.
+ *
+ * This driver registers to G-Link callbacks to be aware on channel state.
+ * A notify callback is called upon channel connect/disconnect.
+ *
+ */
+
+/* Uncomment the line below to test spcom against modem rather than SP */
+/* #define SPCOM_TEST_HLOS_WITH_MODEM 1 */
+
+/* Uncomment the line below to enable debug messages */
+/* #define DEBUG 1 */
+
+#define pr_fmt(fmt)	"spcom [%s]: " fmt, __func__
+
+#include <linux/kernel.h>	/* min() */
+#include <linux/module.h>	/* MODULE_LICENSE */
+#include <linux/device.h>	/* class_create() */
+#include <linux/slab.h>	/* kzalloc() */
+#include <linux/fs.h>		/* file_operations */
+#include <linux/cdev.h>	/* cdev_add() */
+#include <linux/errno.h>	/* EINVAL, ETIMEDOUT */
+#include <linux/printk.h>	/* pr_err() */
+#include <linux/bitops.h>	/* BIT(x) */
+#include <linux/completion.h>	/* wait_for_completion_timeout() */
+#include <linux/poll.h>	/* POLLOUT */
+#include <linux/dma-mapping.h>	/* dma_alloc_coherent() */
+#include <linux/platform_device.h>
+#include <linux/of.h>		/* of_property_count_strings() */
+#include <linux/workqueue.h>
+#include <linux/delay.h>	/* msleep() */
+#include <linux/msm_ion.h>	/* msm_ion_client_create() */
+
+#include <soc/qcom/glink.h>
+#include <soc/qcom/smem.h>
+#include <soc/qcom/spcom.h>
+
+#include <uapi/linux/spcom.h>
+
+#include "glink_private.h" /* glink_ssr() */
+
+/* "SPCM" string */
+#define SPCOM_MAGIC_ID	((uint32_t)(0x5350434D))
+
+/* Request/Response */
+#define SPCOM_FLAG_REQ		BIT(0)
+#define SPCOM_FLAG_RESP	BIT(1)
+#define SPCOM_FLAG_ENCODED	BIT(2)
+#define SPCOM_FLAG_NON_ENCODED	BIT(3)
+
+/* SPCOM driver name */
+#define DEVICE_NAME	"spcom"
+
+/* maximum ION buffers should be >= SPCOM_MAX_CHANNELS  */
+#define SPCOM_MAX_ION_BUF_PER_CH (SPCOM_MAX_CHANNELS + 4)
+
+/* maximum ION buffer per send request/response command */
+#define SPCOM_MAX_ION_BUF_PER_CMD SPCOM_MAX_ION_BUF
+
+/* Maximum command size */
+#define SPCOM_MAX_COMMAND_SIZE	(PAGE_SIZE)
+
+/* Maximum input size */
+#define SPCOM_MAX_READ_SIZE	(PAGE_SIZE)
+
+/* Current Process ID */
+#define current_pid() ((u32)(current->pid))
+
+/* Maximum channel name size (including null) - matching GLINK_NAME_SIZE */
+#define MAX_CH_NAME_LEN	32
+
+/* Connection negotiation timeout, if remote channel is open */
+#define OPEN_CHANNEL_TIMEOUT_MSEC	100
+
+/*
+ * After both sides get CONNECTED,
+ * there is a race between once side queuing rx buffer and the other side
+ * trying to call glink_tx() , this race is only on the 1st tx.
+ * do tx retry with some delay to allow the other side to queue rx buffer.
+ */
+#define TX_RETRY_DELAY_MSEC	100
+
+/* number of tx retries */
+#define TX_MAX_RETRY	3
+
+/* SPCOM_MAX_REQUEST_SIZE-or-SPCOM_MAX_RESPONSE_SIZE + header */
+#define SPCOM_RX_BUF_SIZE	300
+
+/* The SPSS RAM size is 256 KB so SP App must fit into it */
+#define SPCOM_MAX_APP_SIZE	SZ_256K
+
+/*
+ * ACK timeout from remote side for TX data.
+ * Normally, it takes few msec for SPSS to respond with ACK for TX data.
+ * However, due to SPSS HW issue, the SPSS might disable interrupts
+ * for a very long time.
+ */
+#define TX_DONE_TIMEOUT_MSEC	5000
+
+/*
+ * Initial transaction id, use non-zero nonce for debug.
+ * Incremented by client on request, and copied back by server on response.
+ */
+#define INITIAL_TXN_ID	0x12345678
+
+/**
+ * struct spcom_msg_hdr - Request/Response message header between HLOS and SP.
+ *
+ * This header is proceeding any request specific parameters.
+ * The transaction id is used to match request with response.
+ * Note: glink API provides the rx/tx data size, so user payload size is
+ * calculated by reducing the header size.
+ */
+struct spcom_msg_hdr {
+	uint32_t reserved;	/* for future use */
+	uint32_t txn_id;	/* transaction id */
+	char buf[0];		/* Variable buffer size, must be last field */
+} __packed;
+
+/**
+ * struct spcom_client - Client handle
+ */
+struct spcom_client {
+	struct spcom_channel *ch;
+};
+
+/**
+ * struct spcom_server - Server handle
+ */
+struct spcom_server {
+	struct spcom_channel *ch;
+};
+
+/**
+ * struct spcom_channel - channel context
+ */
+struct spcom_channel {
+	char name[MAX_CH_NAME_LEN];
+	struct mutex lock;
+	void *glink_handle;
+	uint32_t txn_id;	/* incrementing nonce per channel */
+	bool is_server;		/* for txn_id and response_timeout_msec */
+	uint32_t response_timeout_msec; /* for client only */
+
+	/* char dev */
+	struct cdev *cdev;
+	struct device *dev;
+	struct device_attribute attr;
+
+	/*
+	 * glink state: CONNECTED / LOCAL_DISCONNECTED, REMOTE_DISCONNECTED
+	 */
+	unsigned glink_state;
+	bool is_closing;
+
+	/* Events notification */
+	struct completion connect;
+	struct completion disconnect;
+	struct completion tx_done;
+	struct completion rx_done;
+
+	/*
+	 * Only one client or server per channel.
+	 * Only one rx/tx transaction at a time (request + response).
+	 */
+	int ref_count;
+
+	u32 pid; /* debug only to find user space application */
+
+	/* abort flags */
+	bool rx_abort;
+	bool tx_abort;
+
+	/* rx data info */
+	size_t rx_buf_size;	/* allocated rx buffer size */
+	bool rx_buf_ready;
+	size_t actual_rx_size;	/* actual data size received */
+	const void *glink_rx_buf;
+
+	/* ION lock/unlock support */
+	int ion_fd_table[SPCOM_MAX_ION_BUF_PER_CH];
+	struct ion_handle *ion_handle_table[SPCOM_MAX_ION_BUF_PER_CH];
+};
+
+/**
+ * struct spcom_device - device state structure.
+ */
+struct spcom_device {
+	char predefined_ch_name[SPCOM_MAX_CHANNELS][MAX_CH_NAME_LEN];
+
+	/* char device info */
+	struct cdev cdev;
+	dev_t device_no;
+	struct class *driver_class;
+	struct device *class_dev;
+
+	/* G-Link channels */
+	struct spcom_channel channels[SPCOM_MAX_CHANNELS];
+	int channel_count;
+
+	/* private */
+	struct mutex cmd_lock;
+
+	/* Link state */
+	struct completion link_state_changed;
+	enum glink_link_state link_state;
+
+	/* ION support */
+	struct ion_client *ion_client;
+};
+
+#ifdef SPCOM_TEST_HLOS_WITH_MODEM
+	static const char *spcom_edge = "mpss";
+	static const char *spcom_transport = "smem";
+#else
+	static const char *spcom_edge = "spss";
+	static const char *spcom_transport = "mailbox";
+#endif
+
+/* Device Driver State */
+static struct spcom_device *spcom_dev;
+
+/* static functions declaration */
+static int spcom_create_channel_chardev(const char *name);
+static int spcom_open(struct spcom_channel *ch, unsigned int timeout_msec);
+static int spcom_close(struct spcom_channel *ch);
+static void spcom_notify_rx_abort(void *handle, const void *priv,
+				  const void *pkt_priv);
+static struct spcom_channel *spcom_find_channel_by_name(const char *name);
+static int spcom_unlock_ion_buf(struct spcom_channel *ch, int fd);
+static void spcom_rx_abort_pending_server(void);
+
+/**
+ * spcom_is_ready() - driver is initialized and ready.
+ */
+static inline bool spcom_is_ready(void)
+{
+	return spcom_dev != NULL;
+}
+
+/**
+ * spcom_is_channel_open() - channel is open on this side.
+ *
+ * Channel might not be fully connected if remote side didn't open the channel
+ * yet.
+ */
+static inline bool spcom_is_channel_open(struct spcom_channel *ch)
+{
+	return ch->glink_handle != NULL;
+}
+
+/**
+ * spcom_is_channel_connected() - channel is fully connected by both sides.
+ */
+static inline bool spcom_is_channel_connected(struct spcom_channel *ch)
+{
+	/* Channel must be open before it gets connected */
+	if (!spcom_is_channel_open(ch))
+		return false;
+
+	return (ch->glink_state == GLINK_CONNECTED);
+}
+
+/**
+ * spcom_create_predefined_channels_chardev() - expose predefined channels to
+ * user space.
+ *
+ * Predefined channels list is provided by device tree.
+ * Typically, it is for known servers on remote side that are not loaded by the
+ * HLOS.
+ */
+static int spcom_create_predefined_channels_chardev(void)
+{
+	int i;
+	int ret;
+	static bool is_predefined_created;
+
+	if (is_predefined_created)
+		return 0;
+
+	for (i = 0; i < SPCOM_MAX_CHANNELS; i++) {
+		const char *name = spcom_dev->predefined_ch_name[i];
+
+		if (name[0] == 0)
+			break;
+		ret = spcom_create_channel_chardev(name);
+		if (ret) {
+			pr_err("failed to create chardev [%s], ret [%d].\n",
+			       name, ret);
+			return -EFAULT;
+		}
+	}
+
+	is_predefined_created = true;
+
+	return 0;
+}
+
+/*======================================================================*/
+/*		GLINK CALLBACKS						*/
+/*======================================================================*/
+
+/**
+ * spcom_link_state_notif_cb() - glink callback for link state change.
+ *
+ * glink notifies link layer is up, before any channel opened on remote side.
+ * Calling glink_open() locally allowed only after link is up.
+ * Notify link down, normally upon Remote Subsystem Reset (SSR).
+ * Note: upon SSR, glink will also notify each channel about remote disconnect,
+ * and abort any pending rx buffer.
+ */
+static void spcom_link_state_notif_cb(struct glink_link_state_cb_info *cb_info,
+				      void *priv)
+{
+	struct spcom_channel *ch = NULL;
+	const char *ch_name = "sp_kernel";
+
+	if (!cb_info) {
+		pr_err("invalid NULL cb_info param\n");
+		return;
+	}
+
+	if (!spcom_is_ready()) {
+		pr_err("spcom is not ready.\n");
+		return;
+	}
+
+	spcom_dev->link_state = cb_info->link_state;
+
+	pr_debug("spcom_link_state_notif_cb called. transport = %s edge = %s\n",
+		 cb_info->transport, cb_info->edge);
+
+	switch (cb_info->link_state) {
+	case GLINK_LINK_STATE_UP:
+		pr_info("GLINK_LINK_STATE_UP.\n");
+		spcom_create_predefined_channels_chardev();
+		break;
+	case GLINK_LINK_STATE_DOWN:
+		pr_err("GLINK_LINK_STATE_DOWN.\n");
+
+		/*
+		 * Free all the SKP ION buffers that were locked
+		 * for SPSS app swapping, when remote subsystem reset.
+		 */
+		pr_debug("Free all SKP ION buffers on SSR.\n");
+		ch = spcom_find_channel_by_name(ch_name);
+		if (!ch)
+			pr_err("failed to find channel [%s].\n", ch_name);
+		else
+			spcom_unlock_ion_buf(ch, SPCOM_ION_FD_UNLOCK_ALL);
+
+		pr_debug("Rx-Abort pending servers.\n");
+		spcom_rx_abort_pending_server();
+		break;
+	default:
+		pr_err("unknown link_state [%d].\n", cb_info->link_state);
+		break;
+	}
+	complete_all(&spcom_dev->link_state_changed);
+}
+
+/**
+ * spcom_notify_rx() - glink callback on receiving data.
+ *
+ * Glink notify rx data is ready. The glink internal rx buffer was
+ * allocated upon glink_queue_rx_intent().
+ */
+static void spcom_notify_rx(void *handle,
+			    const void *priv, const void *pkt_priv,
+			    const void *buf, size_t size)
+{
+	struct spcom_channel *ch = (struct spcom_channel *) priv;
+
+	if (!ch) {
+		pr_err("invalid NULL channel param\n");
+		return;
+	}
+	if (!buf) {
+		pr_err("invalid NULL buf param\n");
+		return;
+	}
+
+	pr_debug("ch [%s] rx size [%zu]\n", ch->name, size);
+
+	ch->actual_rx_size = size;
+	ch->glink_rx_buf = (void *) buf;
+
+	complete_all(&ch->rx_done);
+}
+
+/**
+ * spcom_notify_tx_done() - glink callback on ACK sent data.
+ *
+ * after calling glink_tx() the remote side ACK receiving the data.
+ */
+static void spcom_notify_tx_done(void *handle,
+				 const void *priv, const void *pkt_priv,
+				 const void *buf)
+{
+	struct spcom_channel *ch = (struct spcom_channel *) priv;
+	int *tx_buf = (int *) buf;
+
+	if (!ch) {
+		pr_err("invalid NULL channel param\n");
+		return;
+	}
+	if (!buf) {
+		pr_err("invalid NULL buf param\n");
+		return;
+	}
+
+	pr_debug("ch [%s] buf[0] = [0x%x].\n", ch->name, tx_buf[0]);
+
+	complete_all(&ch->tx_done);
+}
+
+/**
+ * spcom_notify_state() - glink callback on channel connect/disconnect.
+ *
+ * Channel is fully CONNECTED after both sides opened the channel.
+ * Channel is LOCAL_DISCONNECTED after both sides closed the channel.
+ * If the remote side closed the channel, it is expected that the local side
+ * will also close the channel.
+ * Upon connection, rx buffer is allocated to receive data,
+ * the maximum transfer size is agreed by both sides.
+ */
+static void spcom_notify_state(void *handle, const void *priv, unsigned event)
+{
+	int ret;
+	struct spcom_channel *ch = (struct spcom_channel *) priv;
+
+	if (!ch) {
+		pr_err("invalid NULL channel param\n");
+		return;
+	}
+
+	switch (event) {
+	case GLINK_CONNECTED:
+		pr_debug("GLINK_CONNECTED, ch name [%s].\n", ch->name);
+		mutex_lock(&ch->lock);
+
+		if (ch->is_closing) {
+			pr_err("Unexpected CONNECTED while closing [%s].\n",
+				ch->name);
+			mutex_unlock(&ch->lock);
+			return;
+		}
+
+		ch->glink_state = event;
+
+		if (!handle) {
+			pr_err("inavlid glink_handle, ch [%s].\n", ch->name);
+			mutex_unlock(&ch->lock);
+			return;
+		}
+
+		/* signal before unlock mutex & before calling glink */
+		complete_all(&ch->connect);
+
+		/*
+		 * Prepare default rx buffer.
+		 * glink_queue_rx_intent() can be called only AFTER connected.
+		 * We do it here, ASAP, to allow rx data.
+		 */
+
+		pr_debug("call glink_queue_rx_intent() ch [%s].\n", ch->name);
+		ret = glink_queue_rx_intent(handle, ch, ch->rx_buf_size);
+		if (ret) {
+			pr_err("glink_queue_rx_intent() err [%d]\n", ret);
+		} else {
+			pr_debug("rx buf is ready, size [%zu]\n",
+				 ch->rx_buf_size);
+			ch->rx_buf_ready = true;
+		}
+
+		pr_debug("GLINK_CONNECTED, ch name [%s] done.\n", ch->name);
+		mutex_unlock(&ch->lock);
+		break;
+	case GLINK_LOCAL_DISCONNECTED:
+		/*
+		 * Channel state is GLINK_LOCAL_DISCONNECTED
+		 * only after *both* sides closed the channel.
+		 */
+		pr_debug("GLINK_LOCAL_DISCONNECTED, ch [%s].\n", ch->name);
+		ch->glink_state = event;
+		complete_all(&ch->disconnect);
+		break;
+	case GLINK_REMOTE_DISCONNECTED:
+		/*
+		 * Remote side initiates glink_close().
+		 * This is not expected on normal operation.
+		 * This may happen upon remote SSR.
+		 */
+		pr_err("GLINK_REMOTE_DISCONNECTED, ch [%s].\n", ch->name);
+
+		ch->glink_state = event;
+
+		/*
+		 * Abort any blocking read() operation.
+		 * The glink notification might be after REMOTE_DISCONNECT.
+		 */
+		spcom_notify_rx_abort(NULL, ch, NULL);
+
+		/*
+		 * after glink_close(),
+		 * expecting notify GLINK_LOCAL_DISCONNECTED
+		 */
+		spcom_close(ch);
+		break;
+	default:
+		pr_err("unknown event id = %d, ch name [%s].\n",
+		       (int) event, ch->name);
+		return;
+	}
+}
+
+/**
+ * spcom_notify_rx_intent_req() - glink callback on intent request.
+ *
+ * glink allows the remote side to request for a local rx buffer if such
+ * buffer is not ready.
+ * However, for spcom simplicity on SP, and to reduce latency, we decided
+ * that glink_tx() on both side is not using INTENT_REQ flag, so this
+ * callback should not be called.
+ * Anyhow, return "false" to reject the request.
+ */
+static bool spcom_notify_rx_intent_req(void *handle, const void *priv,
+				       size_t req_size)
+{
+	pr_err("Unexpected intent request\n");
+
+	return false;
+}
+
+/**
+ * spcom_notify_rx_abort() - glink callback on aborting rx pending buffer.
+ *
+ * Rx abort may happen if channel is closed by remote side, while rx buffer is
+ * pending in the queue.
+ */
+static void spcom_notify_rx_abort(void *handle, const void *priv,
+				  const void *pkt_priv)
+{
+	struct spcom_channel *ch = (struct spcom_channel *) priv;
+
+	if (!ch) {
+		pr_err("invalid NULL channel param\n");
+		return;
+	}
+
+	pr_debug("ch [%s] pending rx aborted.\n", ch->name);
+
+	if (spcom_is_channel_open(ch) && (!ch->rx_abort)) {
+		ch->rx_abort = true;
+		complete_all(&ch->rx_done);
+	}
+}
+
+/**
+ * spcom_notify_tx_abort() - glink callback on aborting tx data.
+ *
+ * This is probably not relevant, since glink_txv() is not used.
+ * Tx abort may happen if channel is closed by remote side,
+ * while multiple tx buffers are in a middle of tx operation.
+ */
+static void spcom_notify_tx_abort(void *handle, const void *priv,
+				  const void *pkt_priv)
+{
+	struct spcom_channel *ch = (struct spcom_channel *) priv;
+
+	if (!ch) {
+		pr_err("invalid NULL channel param\n");
+		return;
+	}
+
+	pr_debug("ch [%s] pending tx aborted.\n", ch->name);
+
+	if (spcom_is_channel_connected(ch) && (!ch->tx_abort)) {
+		ch->tx_abort = true;
+		complete_all(&ch->tx_done);
+	}
+}
+
+/*======================================================================*/
+/*		UTILITIES						*/
+/*======================================================================*/
+
+/**
+ * spcom_init_open_config() - Fill glink_open() configuration parameters.
+ *
+ * @cfg: glink configuration struct pointer
+ * @name: channel name
+ * @priv: private caller data, provided back by callbacks, channel state.
+ *
+ * specify callbacks and other parameters for glink open channel.
+ */
+static void spcom_init_open_config(struct glink_open_config *cfg,
+				   const char *name, void *priv)
+{
+	cfg->notify_rx		= spcom_notify_rx;
+	cfg->notify_rxv		= NULL;
+	cfg->notify_tx_done	= spcom_notify_tx_done;
+	cfg->notify_state	= spcom_notify_state;
+	cfg->notify_rx_intent_req = spcom_notify_rx_intent_req;
+	cfg->notify_rx_sigs	= NULL;
+	cfg->notify_rx_abort	= spcom_notify_rx_abort;
+	cfg->notify_tx_abort	= spcom_notify_tx_abort;
+
+	cfg->options	= 0; /* not using GLINK_OPT_INITIAL_XPORT */
+	cfg->priv	= priv; /* provided back by callbacks */
+
+	cfg->name	= name;
+
+	cfg->transport	= spcom_transport;
+	cfg->edge	= spcom_edge;
+}
+
+/**
+ * spcom_init_channel() - initialize channel state.
+ *
+ * @ch: channel state struct pointer
+ * @name: channel name
+ */
+static int spcom_init_channel(struct spcom_channel *ch, const char *name)
+{
+	if (!ch || !name || !name[0]) {
+		pr_err("invalid parameters.\n");
+		return -EINVAL;
+	}
+
+	strlcpy(ch->name, name, sizeof(ch->name));
+
+	init_completion(&ch->connect);
+	init_completion(&ch->disconnect);
+	init_completion(&ch->tx_done);
+	init_completion(&ch->rx_done);
+
+	mutex_init(&ch->lock);
+	ch->glink_state = GLINK_LOCAL_DISCONNECTED;
+	ch->actual_rx_size = 0;
+	ch->rx_buf_size = SPCOM_RX_BUF_SIZE;
+	ch->is_closing = false;
+	ch->glink_handle = NULL;
+	ch->ref_count = 0;
+	ch->rx_abort = false;
+	ch->tx_abort = false;
+	ch->txn_id = INITIAL_TXN_ID; /* use non-zero nonce for debug */
+	ch->pid = 0;
+
+	return 0;
+}
+
+/**
+ * spcom_find_channel_by_name() - find a channel by name.
+ *
+ * @name: channel name
+ *
+ * Return: a channel state struct.
+ */
+static struct spcom_channel *spcom_find_channel_by_name(const char *name)
+{
+	int i;
+
+	for (i = 0 ; i < ARRAY_SIZE(spcom_dev->channels); i++) {
+		struct spcom_channel *ch = &spcom_dev->channels[i];
+
+		if (strcmp(ch->name, name) == 0) {
+			return ch;
+		}
+	}
+
+	return NULL;
+}
+
+/**
+ * spcom_open() - Open glink channel and wait for connection ACK.
+ *
+ * @ch: channel state struct pointer
+ *
+ * Normally, a local client opens a channel after remote server has opened
+ * the channel.
+ * A local server may open the channel before remote client is running.
+ */
+static int spcom_open(struct spcom_channel *ch, unsigned int timeout_msec)
+{
+	struct glink_open_config cfg = {0};
+	unsigned long jiffies = msecs_to_jiffies(timeout_msec);
+	long timeleft;
+	const char *name;
+	void *handle;
+	u32 pid = current_pid();
+
+	mutex_lock(&ch->lock);
+	name = ch->name;
+
+	/* only one client/server may use the channel */
+	if (ch->ref_count) {
+		pr_err("channel [%s] is BUSY, already in use by pid [%d].\n",
+			name, ch->pid);
+		mutex_unlock(&ch->lock);
+		return -EBUSY;
+	}
+
+	pr_debug("ch [%s] opened by PID [%d], count [%d]\n",
+		 name, pid, ch->ref_count);
+
+	pr_debug("Open channel [%s] timeout_msec [%d].\n", name, timeout_msec);
+
+	if (spcom_is_channel_open(ch)) {
+		pr_debug("channel [%s] already open.\n", name);
+		mutex_unlock(&ch->lock);
+		return 0;
+	}
+
+	spcom_init_open_config(&cfg, name, ch);
+
+	/* init completion before calling glink_open() */
+	reinit_completion(&ch->connect);
+
+	ch->is_closing = false;
+
+	handle = glink_open(&cfg);
+	if (IS_ERR_OR_NULL(handle)) {
+		pr_err("glink_open failed.\n");
+		goto exit_err;
+	} else {
+		pr_debug("glink_open [%s] ok.\n", name);
+	}
+
+	/* init channel context after successful open */
+	ch->glink_handle = handle;
+	ch->ref_count++;
+	ch->pid = pid;
+	ch->txn_id = INITIAL_TXN_ID;
+
+	mutex_unlock(&ch->lock);
+
+	pr_debug("Wait for connection on channel [%s] timeout_msec [%d].\n",
+		 name, timeout_msec);
+
+	/* Wait for remote side to connect */
+	if (timeout_msec) {
+		timeleft = wait_for_completion_timeout(&(ch->connect), jiffies);
+		if (timeleft == 0)
+			pr_debug("Channel [%s] is NOT connected.\n", name);
+		else
+			pr_debug("Channel [%s] fully connect.\n", name);
+	} else {
+		pr_debug("wait for connection ch [%s] no timeout.\n", name);
+		wait_for_completion(&(ch->connect));
+		pr_debug("Channel [%s] opened, no timeout.\n", name);
+	}
+
+	return 0;
+exit_err:
+	mutex_unlock(&ch->lock);
+
+	return -EFAULT;
+}
+
+/**
+ * spcom_close() - Close glink channel.
+ *
+ * @ch: channel state struct pointer
+ *
+ * A calling API functions should wait for disconnecting by both sides.
+ */
+static int spcom_close(struct spcom_channel *ch)
+{
+	int ret = 0;
+
+	mutex_lock(&ch->lock);
+
+	if (!spcom_is_channel_open(ch)) {
+		pr_err("ch already closed.\n");
+		mutex_unlock(&ch->lock);
+		return 0;
+	}
+
+	ch->is_closing = true;
+
+	ret = glink_close(ch->glink_handle);
+	if (ret)
+		pr_err("glink_close() fail, ret [%d].\n", ret);
+	else
+		pr_debug("glink_close() ok.\n");
+
+	ch->glink_handle = NULL;
+	ch->ref_count = 0;
+	ch->rx_abort = false;
+	ch->tx_abort = false;
+	ch->glink_state = GLINK_LOCAL_DISCONNECTED;
+	ch->txn_id = INITIAL_TXN_ID; /* use non-zero nonce for debug */
+	ch->pid = 0;
+
+	pr_debug("Channel closed [%s].\n", ch->name);
+
+	mutex_unlock(&ch->lock);
+
+	return 0;
+}
+
+/**
+ * spcom_tx() - Send data and wait for ACK or timeout.
+ *
+ * @ch: channel state struct pointer
+ * @buf: buffer pointer
+ * @size: buffer size
+ *
+ * ACK is expected within a very short time (few msec).
+ *
+ * Return: 0 on successful operation, negative value otherwise.
+ */
+static int spcom_tx(struct spcom_channel *ch,
+		    void *buf,
+		    uint32_t size,
+		    uint32_t timeout_msec)
+{
+	int ret;
+	void *pkt_priv = NULL;
+	uint32_t tx_flags = 0 ; /* don't use GLINK_TX_REQ_INTENT */
+	unsigned long jiffies = msecs_to_jiffies(timeout_msec);
+	long timeleft;
+	int retry = 0;
+
+	mutex_lock(&ch->lock);
+
+	/* reset completion before calling glink */
+	reinit_completion(&ch->tx_done);
+
+	for (retry = 0; retry < TX_MAX_RETRY ; retry++) {
+		ret = glink_tx(ch->glink_handle, pkt_priv, buf, size, tx_flags);
+		if (ret == -EAGAIN) {
+			pr_err("glink_tx() fail, try again.\n");
+			/*
+			 * Delay to allow remote side to queue rx buffer.
+			 * This may happen after the first channel connection.
+			 */
+			msleep(TX_RETRY_DELAY_MSEC);
+		} else if (ret < 0) {
+			pr_err("glink_tx() error %d.\n", ret);
+			goto exit_err;
+		} else {
+			break; /* no retry needed */
+		}
+	}
+
+	pr_debug("Wait for Tx done.\n");
+
+	/* Wait for Tx Completion */
+	timeleft = wait_for_completion_timeout(&ch->tx_done, jiffies);
+	if (timeleft == 0) {
+		pr_err("tx_done timeout %d msec expired.\n", timeout_msec);
+		goto exit_err;
+	} else if (ch->tx_abort) {
+		pr_err("tx aborted.\n");
+		goto exit_err;
+	}
+
+	mutex_unlock(&ch->lock);
+
+	return ret;
+exit_err:
+	mutex_unlock(&ch->lock);
+	return -EFAULT;
+}
+
+/**
+ * spcom_rx() - Wait for received data until timeout, unless pending rx data is
+ * already ready
+ *
+ * @ch: channel state struct pointer
+ * @buf: buffer pointer
+ * @size: buffer size
+ *
+ * ACK is expected within a very short time (few msec).
+ *
+ * Return: size in bytes on success, negative value on failure.
+ */
+static int spcom_rx(struct spcom_channel *ch,
+		     void *buf,
+		     uint32_t size,
+		     uint32_t timeout_msec)
+{
+	int ret = -1;
+	unsigned long jiffies = msecs_to_jiffies(timeout_msec);
+	long timeleft = 1;
+
+	mutex_lock(&ch->lock);
+
+	/* check for already pending data */
+	if (ch->actual_rx_size) {
+		pr_debug("already pending data size [%zu]\n",
+			 ch->actual_rx_size);
+		goto copy_buf;
+	}
+
+	/* reset completion before calling glink */
+	reinit_completion(&ch->rx_done);
+
+	/* Wait for Rx response */
+	pr_debug("Wait for Rx done.\n");
+	if (timeout_msec)
+		timeleft = wait_for_completion_timeout(&ch->rx_done, jiffies);
+	else
+		wait_for_completion(&ch->rx_done);
+
+	if (timeleft == 0) {
+		pr_err("rx_done timeout [%d] msec expired.\n", timeout_msec);
+		mutex_unlock(&ch->lock);
+		return -ETIMEDOUT;
+	} else if (ch->rx_abort) {
+		mutex_unlock(&ch->lock);
+		return -ERESTART; /* probably SSR */
+	} else if (ch->actual_rx_size) {
+		pr_debug("actual_rx_size is [%zu]\n", ch->actual_rx_size);
+	} else {
+		pr_err("actual_rx_size is zero.\n");
+		goto exit_err;
+	}
+
+copy_buf:
+	if (!ch->glink_rx_buf) {
+		pr_err("invalid glink_rx_buf.\n");
+		goto exit_err;
+	}
+
+	/* Copy from glink buffer to spcom buffer */
+	size = min_t(int, ch->actual_rx_size, size);
+	memcpy(buf, ch->glink_rx_buf, size);
+
+	pr_debug("copy size [%d].\n", (int) size);
+
+	/* free glink buffer after copy to spcom buffer */
+	glink_rx_done(ch->glink_handle, ch->glink_rx_buf, false);
+	ch->glink_rx_buf = NULL;
+	ch->actual_rx_size = 0;
+
+	/* queue rx buffer for the next time */
+	ret = glink_queue_rx_intent(ch->glink_handle, ch, ch->rx_buf_size);
+	if (ret) {
+		pr_err("glink_queue_rx_intent() failed, ret [%d]", ret);
+		goto exit_err;
+	} else {
+		pr_debug("queue rx_buf, size [%zu]\n", ch->rx_buf_size);
+	}
+
+	mutex_unlock(&ch->lock);
+
+	return size;
+exit_err:
+	mutex_unlock(&ch->lock);
+	return -EFAULT;
+}
+
+/**
+ * spcom_get_next_request_size() - get request size.
+ * already ready
+ *
+ * @ch: channel state struct pointer
+ *
+ * Server needs the size of the next request to allocate a request buffer.
+ * Initially used intent-request, however this complicated the remote side,
+ * so both sides are not using glink_tx() with INTENT_REQ anymore.
+ *
+ * Return: size in bytes on success, negative value on failure.
+ */
+static int spcom_get_next_request_size(struct spcom_channel *ch)
+{
+	int size = -1;
+
+	/* NOTE: Remote clients might not be connected yet.*/
+	mutex_lock(&ch->lock);
+	reinit_completion(&ch->rx_done);
+
+	/* check if already got it via callback */
+	if (ch->actual_rx_size) {
+		pr_debug("next-req-size already ready ch [%s] size [%zu]\n",
+			 ch->name, ch->actual_rx_size);
+		goto exit_ready;
+	}
+	mutex_unlock(&ch->lock); /* unlock while waiting */
+
+	pr_debug("Wait for Rx Done, ch [%s].\n", ch->name);
+	wait_for_completion(&ch->rx_done);
+
+	mutex_lock(&ch->lock); /* re-lock after waiting */
+	/* Check Rx Abort on SP reset */
+	if (ch->rx_abort) {
+		pr_err("rx aborted.\n");
+		goto exit_error;
+	}
+
+	if (ch->actual_rx_size <= 0) {
+		pr_err("invalid rx size [%zu] ch [%s]\n",
+		       ch->actual_rx_size, ch->name);
+		goto exit_error;
+	}
+
+exit_ready:
+	size = ch->actual_rx_size;
+	if (size > sizeof(struct spcom_msg_hdr)) {
+		size -= sizeof(struct spcom_msg_hdr);
+	} else {
+		pr_err("rx size [%d] too small.\n", size);
+		goto exit_error;
+	}
+
+	mutex_unlock(&ch->lock);
+	return size;
+
+exit_error:
+	mutex_unlock(&ch->lock);
+	return -EFAULT;
+
+
+}
+
+/**
+ * spcom_rx_abort_pending_server() - abort pending server rx on SSR.
+ *
+ * Server that is waiting for request, but has no client connected,
+ * will not get RX-ABORT or REMOTE-DISCONNECT notification,
+ * that should cancel the server pending rx operation.
+ */
+static void spcom_rx_abort_pending_server(void)
+{
+	int i;
+
+	for (i = 0 ; i < ARRAY_SIZE(spcom_dev->channels); i++) {
+		struct spcom_channel *ch = &spcom_dev->channels[i];
+
+		if (ch->is_server) {
+			pr_debug("rx-abort server on ch [%s].\n", ch->name);
+			spcom_notify_rx_abort(NULL, ch, NULL);
+		}
+	}
+}
+
+/*======================================================================*/
+/*		General API for kernel drivers				*/
+/*======================================================================*/
+
+/**
+ * spcom_is_sp_subsystem_link_up() - check if SPSS link is up.
+ *
+ * return: true if link is up, false if link is down.
+ */
+bool spcom_is_sp_subsystem_link_up(void)
+{
+	if (spcom_dev == NULL)
+		return false;
+
+	return (spcom_dev->link_state == GLINK_LINK_STATE_UP);
+}
+EXPORT_SYMBOL(spcom_is_sp_subsystem_link_up);
+
+/*======================================================================*/
+/*		Client API for kernel drivers				*/
+/*======================================================================*/
+
+/**
+ * spcom_register_client() - register a client.
+ *
+ * @info: channel name and ssr callback.
+ *
+ * Return: client handle
+ */
+struct spcom_client *spcom_register_client(struct spcom_client_info *info)
+{
+	int ret;
+	const char *name;
+	struct spcom_channel *ch;
+	struct spcom_client *client;
+
+	if (!spcom_is_ready()) {
+		pr_err("spcom is not ready.\n");
+		return NULL;
+	}
+
+	if (!info) {
+		pr_err("Invalid parameter.\n");
+		return NULL;
+	}
+	name = info->ch_name;
+
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
+	if (!client)
+		return NULL;
+
+	ch = spcom_find_channel_by_name(name);
+	if (!ch) {
+		pr_err("channel %s doesn't exist, load App first.\n", name);
+		kfree(client);
+		return NULL;
+	}
+
+	client->ch = ch; /* backtrack */
+
+	ret = spcom_open(ch, OPEN_CHANNEL_TIMEOUT_MSEC);
+	if (ret) {
+		pr_err("failed to open channel [%s].\n", name);
+		kfree(client);
+		client = NULL;
+	} else {
+		pr_info("remote side connect to channel [%s].\n", name);
+	}
+
+	return client;
+}
+EXPORT_SYMBOL(spcom_register_client);
+
+
+/**
+ * spcom_unregister_client() - unregister a client.
+ *
+ * @client: client handle
+ */
+int spcom_unregister_client(struct spcom_client *client)
+{
+	struct spcom_channel *ch;
+
+	if (!spcom_is_ready()) {
+		pr_err("spcom is not ready.\n");
+		return -ENODEV;
+	}
+
+	if (!client) {
+		pr_err("Invalid client parameter.\n");
+		return -EINVAL;
+	}
+
+
+	ch = client->ch;
+	if (!ch) {
+		pr_err("Invalid channel.\n");
+		return -EINVAL;
+	}
+
+	spcom_close(ch);
+
+	kfree(client);
+
+	return 0;
+}
+EXPORT_SYMBOL(spcom_unregister_client);
+
+
+/**
+ * spcom_client_send_message_sync() - send request and wait for response.
+ *
+ * @client: client handle
+ * @req_ptr: request pointer
+ * @req_size: request size
+ * @resp_ptr: response pointer
+ * @resp_size: response size
+ * @timeout_msec: timeout waiting for response.
+ *
+ * The timeout depends on the specific request handling time at the remote side.
+ *
+ * Return: number of rx bytes on success, negative value on failure.
+ */
+int spcom_client_send_message_sync(struct spcom_client	*client,
+				    void	*req_ptr,
+				    uint32_t	req_size,
+				    void	*resp_ptr,
+				    uint32_t	resp_size,
+				    uint32_t	timeout_msec)
+{
+	int ret;
+	struct spcom_channel *ch;
+
+	if (!spcom_is_ready()) {
+		pr_err("spcom is not ready.\n");
+		return -ENODEV;
+	}
+
+	if (!client || !req_ptr || !resp_ptr) {
+		pr_err("Invalid parameter.\n");
+		return -EINVAL;
+	}
+
+	ch = client->ch;
+	if (!ch) {
+		pr_err("Invalid channel.\n");
+		return -EINVAL;
+	}
+
+	/* Check if remote side connect */
+	if (!spcom_is_channel_connected(ch)) {
+		pr_err("ch [%s] remote side not connect.\n", ch->name);
+		return -ENOTCONN;
+	}
+
+	ret = spcom_tx(ch, req_ptr, req_size, TX_DONE_TIMEOUT_MSEC);
+	if (ret < 0) {
+		pr_err("tx error %d.\n", ret);
+		return ret;
+	}
+
+	ret = spcom_rx(ch, resp_ptr, resp_size, timeout_msec);
+	if (ret < 0) {
+		pr_err("rx error %d.\n", ret);
+		return ret;
+	}
+
+	/* @todo verify response transaction id match the request */
+
+	return ret;
+}
+EXPORT_SYMBOL(spcom_client_send_message_sync);
+
+
+/**
+ * spcom_client_is_server_connected() - is remote server connected.
+ *
+ * @client: client handle
+ */
+bool spcom_client_is_server_connected(struct spcom_client *client)
+{
+	bool connected;
+	struct spcom_channel *ch;
+
+	if (!spcom_is_ready()) {
+		pr_err("spcom is not ready.\n");
+		return false;
+	}
+
+	if (!client) {
+		pr_err("Invalid parameter.\n");
+		return false;
+	}
+
+	ch = client->ch;
+	if (!ch) {
+		pr_err("Invalid channel.\n");
+		return -EINVAL;
+	}
+
+	connected = spcom_is_channel_connected(ch);
+
+	return connected;
+}
+EXPORT_SYMBOL(spcom_client_is_server_connected);
+
+/*======================================================================*/
+/*		Server API for kernel drivers				*/
+/*======================================================================*/
+
+/**
+ * spcom_register_service() - register a server.
+ *
+ * @info: channel name and ssr callback.
+ *
+ * Return: server handle
+ */
+struct spcom_server *spcom_register_service(struct spcom_service_info *info)
+{
+	int ret;
+	const char *name;
+	struct spcom_channel *ch;
+	struct spcom_server *server;
+
+	if (!spcom_is_ready()) {
+		pr_err("spcom is not ready.\n");
+		return NULL;
+	}
+
+	if (!info) {
+		pr_err("Invalid parameter.\n");
+		return NULL;
+	}
+	name = info->ch_name;
+
+	server = kzalloc(sizeof(*server), GFP_KERNEL);
+	if (!server)
+		return NULL;
+
+	ch = spcom_find_channel_by_name(name);
+	if (!ch) {
+		pr_err("channel %s doesn't exist, load App first.\n", name);
+		kfree(server);
+		return NULL;
+	}
+
+	server->ch = ch; /* backtrack */
+
+	ret = spcom_open(ch, 0);
+	if (ret) {
+		pr_err("failed to open channel [%s].\n", name);
+		kfree(server);
+		server = NULL;
+	}
+
+	return server;
+}
+EXPORT_SYMBOL(spcom_register_service);
+
+/**
+ * spcom_unregister_service() - unregister a server.
+ *
+ * @server: server handle
+ */
+int spcom_unregister_service(struct spcom_server *server)
+{
+	struct spcom_channel *ch;
+
+	if (!spcom_is_ready()) {
+		pr_err("spcom is not ready.\n");
+		return -ENODEV;
+	}
+
+	if (!server) {
+		pr_err("Invalid server parameter.\n");
+		return -EINVAL;
+	}
+
+	ch = server->ch;
+	if (!ch) {
+		pr_err("Invalid channel parameter.\n");
+		return -EINVAL;
+	}
+
+	spcom_close(ch);
+
+	kfree(server);
+
+	return 0;
+}
+EXPORT_SYMBOL(spcom_unregister_service);
+
+/**
+ * spcom_server_get_next_request_size() - get request size.
+ *
+ * @server: server handle
+ *
+ * Return: size in bytes on success, negative value on failure.
+ */
+int spcom_server_get_next_request_size(struct spcom_server *server)
+{
+	int size;
+	struct spcom_channel *ch;
+
+	if (!server) {
+		pr_err("Invalid parameter.\n");
+		return -EINVAL;
+	}
+
+	ch = server->ch;
+	if (!ch) {
+		pr_err("Invalid channel.\n");
+		return -EINVAL;
+	}
+
+	/* Check if remote side connect */
+	if (!spcom_is_channel_connected(ch)) {
+		pr_err("ch [%s] remote side not connect.\n", ch->name);
+		return -ENOTCONN;
+	}
+
+	size = spcom_get_next_request_size(ch);
+
+	pr_debug("next_request_size [%d].\n", size);
+
+	return size;
+}
+EXPORT_SYMBOL(spcom_server_get_next_request_size);
+
+/**
+ * spcom_server_wait_for_request() - wait for request.
+ *
+ * @server: server handle
+ * @req_ptr: request buffer pointer
+ * @req_size: max request size
+ *
+ * Return: size in bytes on success, negative value on failure.
+ */
+int spcom_server_wait_for_request(struct spcom_server	*server,
+				  void			*req_ptr,
+				  uint32_t		req_size)
+{
+	int ret;
+	struct spcom_channel *ch;
+
+	if (!spcom_is_ready()) {
+		pr_err("spcom is not ready.\n");
+		return -ENODEV;
+	}
+
+	if (!server || !req_ptr) {
+		pr_err("Invalid parameter.\n");
+		return -EINVAL;
+	}
+
+	ch = server->ch;
+	if (!ch) {
+		pr_err("Invalid channel.\n");
+		return -EINVAL;
+	}
+
+	/* Check if remote side connect */
+	if (!spcom_is_channel_connected(ch)) {
+		pr_err("ch [%s] remote side not connect.\n", ch->name);
+		return -ENOTCONN;
+	}
+
+	ret = spcom_rx(ch, req_ptr, req_size, 0);
+
+	return ret;
+}
+EXPORT_SYMBOL(spcom_server_wait_for_request);
+
+/**
+ * spcom_server_send_response() - Send response
+ *
+ * @server: server handle
+ * @resp_ptr: response buffer pointer
+ * @resp_size: response size
+ */
+int spcom_server_send_response(struct spcom_server	*server,
+			       void			*resp_ptr,
+			       uint32_t			resp_size)
+{
+	int ret;
+	struct spcom_channel *ch;
+
+	if (!spcom_is_ready()) {
+		pr_err("spcom is not ready.\n");
+		return -ENODEV;
+	}
+
+	if (!server || !resp_ptr) {
+		pr_err("Invalid parameter.\n");
+		return -EINVAL;
+	}
+
+	ch = server->ch;
+	if (!ch) {
+		pr_err("Invalid channel.\n");
+		return -EINVAL;
+	}
+
+	/* Check if remote side connect */
+	if (!spcom_is_channel_connected(ch)) {
+		pr_err("ch [%s] remote side not connect.\n", ch->name);
+		return -ENOTCONN;
+	}
+
+	ret = spcom_tx(ch, resp_ptr, resp_size, TX_DONE_TIMEOUT_MSEC);
+
+	return ret;
+}
+EXPORT_SYMBOL(spcom_server_send_response);
+
+/*======================================================================*/
+/*	USER SPACE commands handling					*/
+/*======================================================================*/
+
+/**
+ * spcom_handle_create_channel_command() - Handle Create Channel command from
+ * user space.
+ *
+ * @cmd_buf:	command buffer.
+ * @cmd_size:	command buffer size.
+ *
+ * Return: 0 on successful operation, negative value otherwise.
+ */
+static int spcom_handle_create_channel_command(void *cmd_buf, int cmd_size)
+{
+	int ret = 0;
+	struct spcom_user_create_channel_command *cmd = cmd_buf;
+	const char *ch_name;
+	const size_t maxlen = sizeof(cmd->ch_name);
+
+	if (cmd_size != sizeof(*cmd)) {
+		pr_err("cmd_size [%d] , expected [%d].\n",
+		       (int) cmd_size,  (int) sizeof(*cmd));
+		return -EINVAL;
+	}
+
+	ch_name = cmd->ch_name;
+	if (strnlen(cmd->ch_name, maxlen) == maxlen) {
+		pr_err("channel name is not NULL terminated\n");
+		return -EINVAL;
+	}
+
+	pr_debug("ch_name [%s].\n", ch_name);
+
+	ret = spcom_create_channel_chardev(ch_name);
+
+	return ret;
+}
+
+/**
+ * spcom_handle_send_command() - Handle send request/response from user space.
+ *
+ * @buf:	command buffer.
+ * @buf_size:	command buffer size.
+ *
+ * Return: 0 on successful operation, negative value otherwise.
+ */
+static int spcom_handle_send_command(struct spcom_channel *ch,
+					     void *cmd_buf, int size)
+{
+	int ret = 0;
+	struct spcom_send_command *cmd = cmd_buf;
+	uint32_t buf_size;
+	void *buf;
+	struct spcom_msg_hdr *hdr;
+	void *tx_buf;
+	int tx_buf_size;
+	uint32_t timeout_msec;
+
+	pr_debug("send req/resp ch [%s] size [%d] .\n", ch->name, size);
+
+	/*
+	 * check that cmd buf size is at least struct size,
+	 * to allow access to struct fields.
+	 */
+	if (size < sizeof(*cmd)) {
+		pr_err("ch [%s] invalid cmd buf.\n",
+			ch->name);
+		return -EINVAL;
+	}
+
+	/* Check if remote side connect */
+	if (!spcom_is_channel_connected(ch)) {
+		pr_err("ch [%s] remote side not connect.\n", ch->name);
+		return -ENOTCONN;
+	}
+
+	/* parse command buffer */
+	buf = &cmd->buf;
+	buf_size = cmd->buf_size;
+	timeout_msec = cmd->timeout_msec;
+
+	/* Check param validity */
+	if (buf_size > SPCOM_MAX_RESPONSE_SIZE) {
+		pr_err("ch [%s] invalid buf size [%d].\n",
+			ch->name, buf_size);
+		return -EINVAL;
+	}
+	if (size != sizeof(*cmd) + buf_size) {
+		pr_err("ch [%s] invalid cmd size [%d].\n",
+			ch->name, size);
+		return -EINVAL;
+	}
+
+	/* Allocate Buffers*/
+	tx_buf_size = sizeof(*hdr) + buf_size;
+	tx_buf = kzalloc(tx_buf_size, GFP_KERNEL);
+	if (!tx_buf)
+		return -ENOMEM;
+
+	/* Prepare Tx Buf */
+	hdr = tx_buf;
+
+	/* Header */
+	hdr->txn_id = ch->txn_id;
+	if (!ch->is_server) {
+		ch->txn_id++;   /* client sets the request txn_id */
+		ch->response_timeout_msec = timeout_msec;
+	}
+
+	/* user buf */
+	memcpy(hdr->buf, buf, buf_size);
+
+	/*
+	 * remote side should have rx buffer ready.
+	 * tx_done is expected to be received quickly.
+	 */
+	ret = spcom_tx(ch, tx_buf, tx_buf_size, TX_DONE_TIMEOUT_MSEC);
+	if (ret < 0)
+		pr_err("tx error %d.\n", ret);
+
+	kfree(tx_buf);
+
+	return ret;
+}
+
+/**
+ * modify_ion_addr() - replace the ION buffer virtual address with physical
+ * address in a request or response buffer.
+ *
+ * @buf: buffer to modify
+ * @buf_size: buffer size
+ * @ion_info: ION buffer info such as FD and offset in buffer.
+ *
+ * Return: 0 on successful operation, negative value otherwise.
+ */
+static int modify_ion_addr(void *buf,
+			    uint32_t buf_size,
+			    struct spcom_ion_info ion_info)
+{
+	struct ion_handle *handle = NULL;
+	ion_phys_addr_t ion_phys_addr;
+	size_t len;
+	int fd;
+	uint32_t buf_offset;
+	char *ptr = (char *)buf;
+	int ret;
+
+	fd = ion_info.fd;
+	buf_offset = ion_info.buf_offset;
+	ptr += buf_offset;
+
+	if (fd < 0) {
+		pr_err("invalid fd [%d].\n", fd);
+		return -ENODEV;
+	}
+
+	if (buf_size < sizeof(uint64_t)) {
+		pr_err("buf size too small [%d].\n", buf_size);
+		return -ENODEV;
+	}
+
+	if (buf_offset > buf_size - sizeof(uint64_t)) {
+		pr_err("invalid buf_offset [%d].\n", buf_offset);
+		return -ENODEV;
+	}
+
+	/* Get ION handle from fd */
+	handle = ion_import_dma_buf(spcom_dev->ion_client, fd);
+	if (IS_ERR_OR_NULL(handle)) {
+		pr_err("fail to get ion handle.\n");
+		return -EINVAL;
+	}
+	pr_debug("ion handle ok.\n");
+
+	/* Get the ION buffer Physical Address */
+	ret = ion_phys(spcom_dev->ion_client, handle, &ion_phys_addr, &len);
+	if (ret < 0) {
+		pr_err("fail to get ion phys addr.\n");
+		ion_free(spcom_dev->ion_client, handle);
+		return -EINVAL;
+	}
+	if (buf_offset % sizeof(uint64_t))
+		pr_debug("offset [%d] is NOT 64-bit aligned.\n", buf_offset);
+	else
+		pr_debug("offset [%d] is 64-bit aligned.\n", buf_offset);
+
+	/* Set the ION Physical Address at the buffer offset */
+	pr_debug("ion phys addr = [0x%lx].\n", (long int) ion_phys_addr);
+	memcpy(ptr, &ion_phys_addr, sizeof(uint64_t));
+
+	/* Release the ION handle */
+	ion_free(spcom_dev->ion_client, handle);
+
+	return 0;
+}
+
+/**
+ * spcom_handle_send_modified_command() - send a request/response with ION
+ * buffer address. Modify the request/response by replacing the ION buffer
+ * virtual address with the physical address.
+ *
+ * @ch: channel pointer
+ * @cmd_buf: User space command buffer
+ * @size: size of user command buffer
+ *
+ * Return: 0 on successful operation, negative value otherwise.
+ */
+static int spcom_handle_send_modified_command(struct spcom_channel *ch,
+					       void *cmd_buf, int size)
+{
+	int ret = 0;
+	struct spcom_user_send_modified_command *cmd = cmd_buf;
+	uint32_t buf_size;
+	void *buf;
+	struct spcom_msg_hdr *hdr;
+	void *tx_buf;
+	int tx_buf_size;
+	uint32_t timeout_msec;
+	struct spcom_ion_info ion_info[SPCOM_MAX_ION_BUF_PER_CMD];
+	int i;
+
+	pr_debug("send req/resp ch [%s] size [%d] .\n", ch->name, size);
+
+	/*
+	 * check that cmd buf size is at least struct size,
+	 * to allow access to struct fields.
+	 */
+	if (size < sizeof(*cmd)) {
+		pr_err("ch [%s] invalid cmd buf.\n",
+			ch->name);
+		return -EINVAL;
+	}
+
+	/* Check if remote side connect */
+	if (!spcom_is_channel_connected(ch)) {
+		pr_err("ch [%s] remote side not connect.\n", ch->name);
+		return -ENOTCONN;
+	}
+
+	/* parse command buffer */
+	buf = &cmd->buf;
+	buf_size = cmd->buf_size;
+	timeout_msec = cmd->timeout_msec;
+	memcpy(ion_info, cmd->ion_info, sizeof(ion_info));
+
+	/* Check param validity */
+	if (buf_size > SPCOM_MAX_RESPONSE_SIZE) {
+		pr_err("ch [%s] invalid buf size [%d].\n",
+			ch->name, buf_size);
+		return -EINVAL;
+	}
+	if (size != sizeof(*cmd) + buf_size) {
+		pr_err("ch [%s] invalid cmd size [%d].\n",
+			ch->name, size);
+		return -EINVAL;
+	}
+
+	/* Allocate Buffers*/
+	tx_buf_size = sizeof(*hdr) + buf_size;
+	tx_buf = kzalloc(tx_buf_size, GFP_KERNEL);
+	if (!tx_buf)
+		return -ENOMEM;
+
+	/* Prepare Tx Buf */
+	hdr = tx_buf;
+
+	/* Header */
+	hdr->txn_id = ch->txn_id;
+	if (!ch->is_server) {
+		ch->txn_id++;   /* client sets the request txn_id */
+		ch->response_timeout_msec = timeout_msec;
+	}
+
+	/* user buf */
+	memcpy(hdr->buf, buf, buf_size);
+
+	for (i = 0 ; i < ARRAY_SIZE(ion_info) ; i++) {
+		if (ion_info[i].fd >= 0) {
+			ret = modify_ion_addr(hdr->buf, buf_size, ion_info[i]);
+			if (ret < 0) {
+				pr_err("modify_ion_addr() error [%d].\n", ret);
+				kfree(tx_buf);
+				return -EFAULT;
+			}
+		}
+	}
+
+	/*
+	 * remote side should have rx buffer ready.
+	 * tx_done is expected to be received quickly.
+	 */
+	ret = spcom_tx(ch, tx_buf, tx_buf_size, TX_DONE_TIMEOUT_MSEC);
+	if (ret < 0)
+		pr_err("tx error %d.\n", ret);
+
+	kfree(tx_buf);
+
+	return ret;
+}
+
+
+/**
+ * spcom_handle_lock_ion_buf_command() - Lock an ION buffer.
+ *
+ * Lock an ION buffer, prevent it from being free if the user space App crash,
+ * while it is used by the remote subsystem.
+ */
+static int spcom_handle_lock_ion_buf_command(struct spcom_channel *ch,
+					      void *cmd_buf, int size)
+{
+	struct spcom_user_command *cmd = cmd_buf;
+	int fd = cmd->arg;
+	struct ion_handle *ion_handle;
+	int i;
+
+	if (size != sizeof(*cmd)) {
+		pr_err("cmd size [%d] , expected [%d].\n",
+		       (int) size,  (int) sizeof(*cmd));
+		return -EINVAL;
+	}
+
+	/* Check ION client */
+	if (spcom_dev->ion_client == NULL) {
+		pr_err("invalid ion client.\n");
+		return -ENODEV;
+	}
+
+	/* Get ION handle from fd - this increments the ref count */
+	ion_handle = ion_import_dma_buf(spcom_dev->ion_client, fd);
+	if (IS_ERR_OR_NULL(ion_handle)) {
+		pr_err("fail to get ion handle.\n");
+		return -EINVAL;
+	}
+
+	pr_debug("ion handle ok.\n");
+
+	/* ION buf lock doesn't involve any rx/tx data to SP. */
+	mutex_lock(&ch->lock);
+
+	/* Check if this ION buffer is already locked */
+	for (i = 0 ; i < ARRAY_SIZE(ch->ion_handle_table) ; i++) {
+		if (ch->ion_handle_table[i] == ion_handle) {
+			pr_err("fd [%d] ion buf is already locked.\n", fd);
+			/* decrement back the ref count */
+			ion_free(spcom_dev->ion_client, ion_handle);
+			mutex_unlock(&ch->lock);
+			return -EINVAL;
+		}
+	}
+
+       /* Store the ION handle */
+	for (i = 0 ; i < ARRAY_SIZE(ch->ion_handle_table) ; i++) {
+		if (ch->ion_handle_table[i] == NULL) {
+			ch->ion_handle_table[i] = ion_handle;
+			ch->ion_fd_table[i] = fd;
+			pr_debug("ch [%s] locked ion buf #%d, fd [%d].\n",
+				ch->name, i, fd);
+			mutex_unlock(&ch->lock);
+			return 0;
+		}
+	}
+
+	pr_err("no free entry to store ion handle of fd [%d].\n", fd);
+	/* decrement back the ref count */
+	ion_free(spcom_dev->ion_client, ion_handle);
+
+	mutex_unlock(&ch->lock);
+
+	return -EFAULT;
+}
+
+/**
+ * spcom_unlock_ion_buf() - Unlock an ION buffer.
+ *
+ * Unlock an ION buffer, let it be free, when it is no longer being used by
+ * the remote subsystem.
+ */
+static int spcom_unlock_ion_buf(struct spcom_channel *ch, int fd)
+{
+	struct ion_client *ion_client = spcom_dev->ion_client;
+	int i;
+	bool found = false;
+
+	pr_debug("Unlock ion buf ch [%s] fd [%d].\n", ch->name, fd);
+
+	/* Check ION client */
+	if (ion_client == NULL) {
+		pr_err("fail to create ion client.\n");
+		return -ENODEV;
+	}
+
+	if (fd == (int) SPCOM_ION_FD_UNLOCK_ALL) {
+		pr_debug("unlocked ALL ion buf ch [%s].\n", ch->name);
+		found = true;
+		/* unlock all ION buf */
+		for (i = 0 ; i < ARRAY_SIZE(ch->ion_handle_table) ; i++) {
+			if (ch->ion_handle_table[i] != NULL) {
+				pr_debug("unlocked ion buf #%d fd [%d].\n",
+					i, ch->ion_fd_table[i]);
+				ion_free(ion_client, ch->ion_handle_table[i]);
+				ch->ion_handle_table[i] = NULL;
+				ch->ion_fd_table[i] = -1;
+			}
+		}
+	} else {
+		/* unlock specific ION buf */
+		for (i = 0 ; i < ARRAY_SIZE(ch->ion_handle_table) ; i++) {
+			if (ch->ion_handle_table[i] == NULL)
+				continue;
+			if (ch->ion_fd_table[i] == fd) {
+				pr_debug("unlocked ion buf #%d fd [%d].\n",
+					i, ch->ion_fd_table[i]);
+				ion_free(ion_client, ch->ion_handle_table[i]);
+				ch->ion_handle_table[i] = NULL;
+				ch->ion_fd_table[i] = -1;
+				found = true;
+				break;
+			}
+		}
+	}
+
+	if (!found) {
+		pr_err("ch [%s] fd [%d] was not found.\n", ch->name, fd);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/**
+ * spcom_handle_unlock_ion_buf_command() - Unlock an ION buffer.
+ *
+ * Unlock an ION buffer, let it be free, when it is no longer being used by
+ * the remote subsystem.
+ */
+static int spcom_handle_unlock_ion_buf_command(struct spcom_channel *ch,
+					      void *cmd_buf, int size)
+{
+	int ret;
+	struct spcom_user_command *cmd = cmd_buf;
+	int fd = cmd->arg;
+
+	if (size != sizeof(*cmd)) {
+		pr_err("cmd size [%d] , expected [%d].\n",
+		       (int) size,  (int) sizeof(*cmd));
+		return -EINVAL;
+	}
+
+	/* ION buf unlock doesn't involve any rx/tx data to SP. */
+	mutex_lock(&ch->lock);
+
+	ret = spcom_unlock_ion_buf(ch, fd);
+
+	mutex_unlock(&ch->lock);
+
+	return ret;
+}
+
+/**
+ * spcom_handle_write() - Handle user space write commands.
+ *
+ * @buf:	command buffer.
+ * @buf_size:	command buffer size.
+ *
+ * Return: 0 on successful operation, negative value otherwise.
+ */
+static int spcom_handle_write(struct spcom_channel *ch,
+			       void *buf,
+			       int buf_size)
+{
+	int ret = 0;
+	struct spcom_user_command *cmd = NULL;
+	int cmd_id = 0;
+	int swap_id;
+	char cmd_name[5] = {0}; /* debug only */
+
+	/* Minimal command should have command-id and argument */
+	if (buf_size < sizeof(struct spcom_user_command)) {
+		pr_err("Command buffer size [%d] too small\n", buf_size);
+		return -EINVAL;
+	}
+
+	cmd = (struct spcom_user_command *)buf;
+	cmd_id = (int) cmd->cmd_id;
+	swap_id = htonl(cmd->cmd_id);
+	memcpy(cmd_name, &swap_id, sizeof(int));
+
+	mutex_lock(&spcom_dev->cmd_lock);
+
+	pr_debug("cmd_id [0x%x] cmd_name [%s].\n", cmd_id, cmd_name);
+
+	switch (cmd_id) {
+	case SPCOM_CMD_SEND:
+		ret = spcom_handle_send_command(ch, buf, buf_size);
+		break;
+	case SPCOM_CMD_SEND_MODIFIED:
+	       ret = spcom_handle_send_modified_command(ch, buf, buf_size);
+	       break;
+	case SPCOM_CMD_LOCK_ION_BUF:
+	      ret = spcom_handle_lock_ion_buf_command(ch, buf, buf_size);
+	      break;
+	case SPCOM_CMD_UNLOCK_ION_BUF:
+		ret = spcom_handle_unlock_ion_buf_command(ch, buf, buf_size);
+	     break;
+	case SPCOM_CMD_CREATE_CHANNEL:
+		ret = spcom_handle_create_channel_command(buf, buf_size);
+		break;
+	default:
+		pr_err("Invalid Command Id [0x%x].\n", (int) cmd->cmd_id);
+		ret = -EINVAL;
+	}
+
+	mutex_unlock(&spcom_dev->cmd_lock);
+
+	return ret;
+}
+
+/**
+ * spcom_handle_get_req_size() - Handle user space get request size command
+ *
+ * @ch:	channel handle
+ * @buf:	command buffer.
+ * @size:	command buffer size.
+ *
+ * Return: size in bytes on success, negative value on failure.
+ */
+static int spcom_handle_get_req_size(struct spcom_channel *ch,
+				      void *buf,
+				      uint32_t size)
+{
+	int ret = -1;
+	uint32_t next_req_size = 0;
+
+	if (size < sizeof(next_req_size)) {
+		pr_err("buf size [%d] too small.\n", (int) size);
+		return -EINVAL;
+	}
+
+	ret = spcom_get_next_request_size(ch);
+	if (ret < 0)
+		return ret;
+	next_req_size = (uint32_t) ret;
+
+	memcpy(buf, &next_req_size, sizeof(next_req_size));
+	pr_debug("next_req_size [%d].\n", next_req_size);
+
+	return sizeof(next_req_size); /* can't exceed user buffer size */
+}
+
+/**
+ * spcom_handle_read_req_resp() - Handle user space get request/response command
+ *
+ * @ch:	channel handle
+ * @buf:	command buffer.
+ * @size:	command buffer size.
+ *
+ * Return: size in bytes on success, negative value on failure.
+ */
+static int spcom_handle_read_req_resp(struct spcom_channel *ch,
+				       void *buf,
+				       uint32_t size)
+{
+	int ret;
+	struct spcom_msg_hdr *hdr;
+	void *rx_buf;
+	int rx_buf_size;
+	uint32_t timeout_msec = 0; /* client only */
+
+	/* Check if remote side connect */
+	if (!spcom_is_channel_connected(ch)) {
+		pr_err("ch [%s] remote side not connect.\n", ch->name);
+		return -ENOTCONN;
+	}
+
+	/* Check param validity */
+	if (size > SPCOM_MAX_RESPONSE_SIZE) {
+		pr_err("ch [%s] invalid size [%d].\n",
+			ch->name, size);
+		return -EINVAL;
+	}
+
+	/* Allocate Buffers*/
+	rx_buf_size = sizeof(*hdr) + size;
+	rx_buf = kzalloc(rx_buf_size, GFP_KERNEL);
+	if (!rx_buf)
+		return -ENOMEM;
+
+	/*
+	 * client response timeout depends on the request
+	 * handling time on the remote side .
+	 */
+	if (!ch->is_server) {
+		timeout_msec = ch->response_timeout_msec;
+		pr_debug("response_timeout_msec = %d.\n", (int) timeout_msec);
+	}
+
+	ret = spcom_rx(ch, rx_buf, rx_buf_size, timeout_msec);
+	if (ret < 0) {
+		pr_err("rx error %d.\n", ret);
+		kfree(rx_buf);
+		return ret;
+	} else {
+		size = ret; /* actual_rx_size */
+	}
+
+	hdr = rx_buf;
+
+	if (ch->is_server) {
+		ch->txn_id = hdr->txn_id;
+		pr_debug("request txn_id [0x%x].\n", ch->txn_id);
+	}
+
+	/* copy data to user without the header */
+	if (size > sizeof(*hdr)) {
+		size -= sizeof(*hdr);
+		memcpy(buf, hdr->buf, size);
+	} else {
+		pr_err("rx size [%d] too small.\n", size);
+		goto exit_err;
+	}
+
+	kfree(rx_buf);
+	return size;
+exit_err:
+	kfree(rx_buf);
+	return -EFAULT;
+
+}
+
+/**
+ * spcom_handle_read() - Handle user space read request/response or
+ * request-size command
+ *
+ * @ch:	channel handle
+ * @buf:	command buffer.
+ * @size:	command buffer size.
+ *
+ * A special size SPCOM_GET_NEXT_REQUEST_SIZE, which is bigger than the max
+ * response/request tells the kernel that user space only need the size.
+ *
+ * Return: size in bytes on success, negative value on failure.
+ */
+static int spcom_handle_read(struct spcom_channel *ch,
+			      void *buf,
+			      uint32_t size)
+{
+	int ret = -1;
+
+	if (size == SPCOM_GET_NEXT_REQUEST_SIZE) {
+		pr_debug("get next request size, ch [%s].\n", ch->name);
+		ch->is_server = true;
+		ret = spcom_handle_get_req_size(ch, buf, size);
+	} else {
+		pr_debug("get request/response, ch [%s].\n", ch->name);
+		ret = spcom_handle_read_req_resp(ch, buf, size);
+	}
+
+	pr_debug("ch [%s] , size = %d.\n", ch->name, size);
+
+	return ret;
+}
+
+/*======================================================================*/
+/*		CHAR DEVICE USER SPACE INTERFACE			*/
+/*======================================================================*/
+
+/**
+ * file_to_filename() - get the filename from file pointer.
+ *
+ * @filp: file pointer
+ *
+ * it is used for debug prints.
+ *
+ * Return: filename string or "unknown".
+ */
+static char *file_to_filename(struct file *filp)
+{
+	struct dentry *dentry = NULL;
+	char *filename = NULL;
+
+	if (!filp || !filp->f_path.dentry)
+		return "unknown";
+
+	dentry = filp->f_path.dentry;
+	filename = dentry->d_iname;
+
+	return filename;
+}
+
+/**
+ * spcom_device_open() - handle channel file open() from user space.
+ *
+ * @filp: file pointer
+ *
+ * The file name (without path) is the channel name.
+ * Open the relevant glink channel.
+ * Store the channel context in the file private
+ * date pointer for future read/write/close
+ * operations.
+ */
+static int spcom_device_open(struct inode *inode, struct file *filp)
+{
+	int ret = 0;
+	struct spcom_channel *ch;
+	const char *name = file_to_filename(filp);
+
+	/* silent error message until spss link is up */
+	if (!spcom_is_sp_subsystem_link_up())
+		return -ENODEV;
+
+	pr_debug("Open file [%s].\n", name);
+
+	if (strcmp(name, DEVICE_NAME) == 0) {
+		pr_debug("root dir skipped.\n");
+		return 0;
+	}
+
+	if (strcmp(name, "sp_ssr") == 0) {
+		pr_debug("sp_ssr dev node skipped.\n");
+		return 0;
+	}
+
+	ch = spcom_find_channel_by_name(name);
+	if (!ch) {
+		pr_err("channel %s doesn't exist, load App first.\n", name);
+		return -ENODEV;
+	}
+
+	ret = spcom_open(ch, OPEN_CHANNEL_TIMEOUT_MSEC);
+	if (ret == -ETIMEDOUT) {
+		pr_err("Connection timeout channel [%s].\n", name);
+	} else if (ret) {
+		pr_err("failed to open channel [%s] , err=%d.\n", name, ret);
+		return ret;
+	}
+
+	filp->private_data = ch;
+
+	pr_debug("finished.\n");
+
+	return 0;
+}
+
+/**
+ * spcom_device_release() - handle channel file close() from user space.
+ *
+ * @filp: file pointer
+ *
+ * The file name (without path) is the channel name.
+ * Open the relevant glink channel.
+ * Store the channel context in the file private
+ * date pointer for future read/write/close
+ * operations.
+ */
+static int spcom_device_release(struct inode *inode, struct file *filp)
+{
+	struct spcom_channel *ch;
+	const char *name = file_to_filename(filp);
+
+	pr_debug("Close file [%s].\n", name);
+
+	if (strcmp(name, DEVICE_NAME) == 0) {
+		pr_debug("root dir skipped.\n");
+		return 0;
+	}
+
+	if (strcmp(name, "sp_ssr") == 0) {
+		pr_debug("sp_ssr dev node skipped.\n");
+		return 0;
+	}
+
+	ch = filp->private_data;
+
+	if (!ch) {
+		pr_debug("ch is NULL, file name %s.\n", file_to_filename(filp));
+		return -ENODEV;
+	}
+
+	/* channel might be already closed or disconnected */
+	if (!spcom_is_channel_open(ch)) {
+		pr_err("ch [%s] already closed.\n", name);
+		return 0;
+	}
+
+	reinit_completion(&ch->disconnect);
+
+	spcom_close(ch);
+
+	pr_debug("Wait for event GLINK_LOCAL_DISCONNECTED, ch [%s].\n", name);
+	wait_for_completion(&ch->disconnect);
+	pr_debug("GLINK_LOCAL_DISCONNECTED signaled, ch [%s].\n", name);
+
+	return 0;
+}
+
+/**
+ * spcom_device_write() - handle channel file write() from user space.
+ *
+ * @filp: file pointer
+ *
+ * Return: On Success - same size as number of bytes to write.
+ * On Failure - negative value.
+ */
+static ssize_t spcom_device_write(struct file *filp,
+				   const char __user *user_buff,
+				   size_t size, loff_t *f_pos)
+{
+	int ret;
+	char *buf;
+	struct spcom_channel *ch;
+	const char *name = file_to_filename(filp);
+	int buf_size = 0;
+
+	pr_debug("Write file [%s] size [%d] pos [%d].\n",
+		 name, (int) size, (int) *f_pos);
+
+	if (!user_buff || !f_pos || !filp) {
+		pr_err("invalid null parameters.\n");
+		return -EINVAL;
+	}
+
+	ch = filp->private_data;
+	if (!ch) {
+		pr_err("invalid ch pointer, command not allowed.\n");
+		return -EINVAL;
+	} else {
+		/* Check if remote side connect */
+		if (!spcom_is_channel_connected(ch)) {
+			pr_err("ch [%s] remote side not connect.\n", ch->name);
+			return -ENOTCONN;
+		}
+	}
+
+	if (size > SPCOM_MAX_COMMAND_SIZE) {
+		pr_err("size [%d] > max size [%d].\n",
+			   (int) size , (int) SPCOM_MAX_COMMAND_SIZE);
+		return -EINVAL;
+	}
+	buf_size = size; /* explicit casting size_t to int */
+
+	if (*f_pos != 0) {
+		pr_err("offset should be zero, no sparse buffer.\n");
+		return -EINVAL;
+	}
+
+	buf = kzalloc(size, GFP_KERNEL);
+	if (buf == NULL)
+		return -ENOMEM;
+
+	ret = copy_from_user(buf, user_buff, size);
+	if (ret) {
+		pr_err("Unable to copy from user (err %d).\n", ret);
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	ret = spcom_handle_write(ch, buf, buf_size);
+	if (ret) {
+		pr_err("handle command error [%d].\n", ret);
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	kfree(buf);
+
+	return size;
+}
+
+/**
+ * spcom_device_read() - handle channel file read() from user space.
+ *
+ * @filp: file pointer
+ *
+ * Return: number of bytes to read on success, negative value on
+ * failure.
+ */
+static ssize_t spcom_device_read(struct file *filp, char __user *user_buff,
+				 size_t size, loff_t *f_pos)
+{
+	int ret = 0;
+	int actual_size = 0;
+	char *buf;
+	struct spcom_channel *ch;
+	const char *name = file_to_filename(filp);
+	uint32_t buf_size = 0;
+
+	pr_debug("Read file [%s], size = %d bytes.\n", name, (int) size);
+
+	if (!filp || !user_buff || !f_pos ||
+	    (size == 0) || (size > SPCOM_MAX_READ_SIZE)) {
+		pr_err("invalid parameters.\n");
+		return -EINVAL;
+	}
+	buf_size = size; /* explicit casting size_t to uint32_t */
+
+	ch = filp->private_data;
+
+	if (ch == NULL) {
+		pr_err("invalid ch pointer, file [%s].\n", name);
+		return -EINVAL;
+	}
+
+	if (!spcom_is_channel_open(ch)) {
+		pr_err("ch is not open, file [%s].\n", name);
+		return -EINVAL;
+	}
+
+	buf = kzalloc(size, GFP_KERNEL);
+	if (buf == NULL)
+		return -ENOMEM;
+
+	ret = spcom_handle_read(ch, buf, buf_size);
+	if (ret < 0) {
+		pr_err("read error [%d].\n", ret);
+		kfree(buf);
+		return ret;
+	}
+	actual_size = ret;
+	if ((actual_size == 0) || (actual_size > size)) {
+		pr_err("invalid actual_size [%d].\n", actual_size);
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	ret = copy_to_user(user_buff, buf, actual_size);
+
+	if (ret) {
+		pr_err("Unable to copy to user, err = %d.\n", ret);
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	kfree(buf);
+
+	pr_debug("ch [%s] ret [%d].\n", name, (int) actual_size);
+
+	return actual_size;
+}
+
+/**
+ * spcom_device_poll() - handle channel file poll() from user space.
+ *
+ * @filp: file pointer
+ *
+ * This allows user space to wait/check for channel connection,
+ * or wait for SSR event.
+ *
+ * Return: event bitmask on success, set POLLERR on failure.
+ */
+static unsigned int spcom_device_poll(struct file *filp,
+				       struct poll_table_struct *poll_table)
+{
+	/*
+	 * when user call with timeout -1 for blocking mode,
+	 * any bit must be set in response
+	 */
+	unsigned int ret = SPCOM_POLL_READY_FLAG;
+	unsigned long mask;
+	struct spcom_channel *ch;
+	const char *name = file_to_filename(filp);
+	bool wait = false;
+	bool done = false;
+	/* Event types always implicitly polled for */
+	unsigned long reserved = POLLERR | POLLHUP | POLLNVAL;
+	int ready = 0;
+
+	ch = filp->private_data;
+
+	mask = poll_requested_events(poll_table);
+
+	pr_debug("== ch [%s] mask [0x%x] ==.\n", name, (int) mask);
+
+	/* user space API has poll use "short" and not "long" */
+	mask &= 0x0000FFFF;
+
+	wait = mask & SPCOM_POLL_WAIT_FLAG;
+	if (wait)
+		pr_debug("ch [%s] wait for event flag is ON.\n", name);
+	mask &= ~SPCOM_POLL_WAIT_FLAG; /* clear the wait flag */
+	mask &= ~SPCOM_POLL_READY_FLAG; /* clear the ready flag */
+	mask &= ~reserved; /* clear the implicitly set reserved bits */
+
+	switch (mask) {
+	case SPCOM_POLL_LINK_STATE:
+		pr_debug("ch [%s] SPCOM_POLL_LINK_STATE.\n", name);
+		if (wait) {
+			reinit_completion(&spcom_dev->link_state_changed);
+			ready = wait_for_completion_interruptible(
+				&spcom_dev->link_state_changed);
+			pr_debug("ch [%s] poll LINK_STATE signaled.\n", name);
+		}
+		done = (spcom_dev->link_state == GLINK_LINK_STATE_UP);
+		break;
+	case SPCOM_POLL_CH_CONNECT:
+		/*
+		 * ch is not expected to be NULL since user must call open()
+		 * to get FD before it can call poll().
+		 * open() will fail if no ch related to the char-device.
+		 */
+		if (ch == NULL) {
+			pr_err("invalid ch pointer, file [%s].\n", name);
+			return POLLERR;
+		}
+		pr_debug("ch [%s] SPCOM_POLL_CH_CONNECT.\n", name);
+		if (wait) {
+			reinit_completion(&ch->connect);
+			ready = wait_for_completion_interruptible(&ch->connect);
+			pr_debug("ch [%s] poll CH_CONNECT signaled.\n", name);
+		}
+		done = completion_done(&ch->connect);
+		break;
+	default:
+		pr_err("ch [%s] poll, invalid mask [0x%x].\n",
+			 name, (int) mask);
+		ret = POLLERR;
+		break;
+	}
+
+	if (ready < 0) { /* wait was interrupted */
+		pr_debug("ch [%s] poll interrupted, ret [%d].\n", name, ready);
+		ret = POLLERR | SPCOM_POLL_READY_FLAG | mask;
+	}
+	if (done)
+		ret |= mask;
+
+	pr_debug("ch [%s] poll, mask = 0x%x, ret=0x%x.\n",
+		 name, (int) mask, ret);
+
+	return ret;
+}
+
+/* file operation supported from user space */
+static const struct file_operations fops = {
+	.owner = THIS_MODULE,
+	.read = spcom_device_read,
+	.poll = spcom_device_poll,
+	.write = spcom_device_write,
+	.open = spcom_device_open,
+	.release = spcom_device_release,
+};
+
+/**
+ * spcom_create_channel_chardev() - Create a channel char-dev node file
+ * for user space interface
+ */
+static int spcom_create_channel_chardev(const char *name)
+{
+	int ret;
+	struct device *dev;
+	struct spcom_channel *ch;
+	dev_t devt;
+	struct class *cls = spcom_dev->driver_class;
+	struct device *parent = spcom_dev->class_dev;
+	void *priv;
+	struct cdev *cdev;
+
+	pr_debug("Add channel [%s].\n", name);
+
+	ch = spcom_find_channel_by_name(name);
+	if (ch) {
+		pr_err("channel [%s] already exist.\n", name);
+		return -EINVAL;
+	}
+
+	ch = spcom_find_channel_by_name(""); /* find reserved channel */
+	if (!ch) {
+		pr_err("no free channel.\n");
+		return -ENODEV;
+	}
+
+	cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+	if (!cdev)
+		return -ENOMEM;
+
+	spcom_dev->channel_count++;
+	devt = spcom_dev->device_no + spcom_dev->channel_count;
+	priv = ch;
+	dev = device_create(cls, parent, devt, priv, name);
+	if (IS_ERR(dev)) {
+		pr_err("device_create failed.\n");
+		kfree(cdev);
+		return -ENODEV;
+	}
+
+	cdev_init(cdev, &fops);
+	cdev->owner = THIS_MODULE;
+
+	ret = cdev_add(cdev, devt, 1);
+	if (ret < 0) {
+		pr_err("cdev_add failed %d\n", ret);
+		goto exit_destroy_device;
+	}
+
+	spcom_init_channel(ch, name);
+
+	ch->cdev = cdev;
+	ch->dev = dev;
+
+	return 0;
+
+exit_destroy_device:
+	device_destroy(spcom_dev->driver_class, devt);
+	kfree(cdev);
+	return -EFAULT;
+}
+
+static int __init spcom_register_chardev(void)
+{
+	int ret;
+	unsigned baseminor = 0;
+	unsigned count = 1;
+	void *priv = spcom_dev;
+
+	ret = alloc_chrdev_region(&spcom_dev->device_no, baseminor, count,
+				 DEVICE_NAME);
+	if (ret < 0) {
+		pr_err("alloc_chrdev_region failed %d\n", ret);
+		return ret;
+	}
+
+	spcom_dev->driver_class = class_create(THIS_MODULE, DEVICE_NAME);
+	if (IS_ERR(spcom_dev->driver_class)) {
+		ret = -ENOMEM;
+		pr_err("class_create failed %d\n", ret);
+		goto exit_unreg_chrdev_region;
+	}
+
+	spcom_dev->class_dev = device_create(spcom_dev->driver_class, NULL,
+				  spcom_dev->device_no, priv,
+				  DEVICE_NAME);
+
+	if (IS_ERR(spcom_dev->class_dev)) {
+		pr_err("class_device_create failed %d\n", ret);
+		ret = -ENOMEM;
+		goto exit_destroy_class;
+	}
+
+	cdev_init(&spcom_dev->cdev, &fops);
+	spcom_dev->cdev.owner = THIS_MODULE;
+
+	ret = cdev_add(&spcom_dev->cdev,
+		       MKDEV(MAJOR(spcom_dev->device_no), 0),
+		       SPCOM_MAX_CHANNELS);
+	if (ret < 0) {
+		pr_err("cdev_add failed %d\n", ret);
+		goto exit_destroy_device;
+	}
+
+	pr_debug("char device created.\n");
+
+	return 0;
+
+exit_destroy_device:
+	device_destroy(spcom_dev->driver_class, spcom_dev->device_no);
+exit_destroy_class:
+	class_destroy(spcom_dev->driver_class);
+exit_unreg_chrdev_region:
+	unregister_chrdev_region(spcom_dev->device_no, 1);
+	return ret;
+}
+
+static void spcom_unregister_chrdev(void)
+{
+	cdev_del(&spcom_dev->cdev);
+	device_destroy(spcom_dev->driver_class, spcom_dev->device_no);
+	class_destroy(spcom_dev->driver_class);
+	unregister_chrdev_region(spcom_dev->device_no, 1);
+
+}
+
+/*======================================================================*/
+/*		Device Tree						*/
+/*======================================================================*/
+
+static int spcom_parse_dt(struct device_node *np)
+{
+	int ret;
+	const char *propname = "qcom,spcom-ch-names";
+	int num_ch = of_property_count_strings(np, propname);
+	int i;
+	const char *name;
+
+	pr_debug("num of predefined channels [%d].\n", num_ch);
+
+	if (num_ch > ARRAY_SIZE(spcom_dev->predefined_ch_name)) {
+		pr_err("too many predefined channels [%d].\n", num_ch);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num_ch; i++) {
+		ret = of_property_read_string_index(np, propname, i, &name);
+		if (ret) {
+			pr_err("failed to read DT channel [%d] name .\n", i);
+			return -EFAULT;
+		}
+		strlcpy(spcom_dev->predefined_ch_name[i],
+			name,
+			sizeof(spcom_dev->predefined_ch_name[i]));
+
+		pr_debug("found ch [%s].\n", name);
+	}
+
+	return num_ch;
+}
+
+static int spcom_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct spcom_device *dev = NULL;
+	struct glink_link_info link_info;
+	struct device_node *np;
+	struct link_state_notifier_info *notif_handle;
+
+	if (!pdev) {
+		pr_err("invalid pdev.\n");
+		return -ENODEV;
+	}
+
+	np = pdev->dev.of_node;
+	if (!np) {
+		pr_err("invalid DT node.\n");
+		return -EINVAL;
+	}
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (dev == NULL)
+		return -ENOMEM;
+
+	spcom_dev = dev;
+	mutex_init(&spcom_dev->cmd_lock);
+	init_completion(&dev->link_state_changed);
+	spcom_dev->link_state = GLINK_LINK_STATE_DOWN;
+
+	ret = spcom_register_chardev();
+	if (ret) {
+		pr_err("create character device failed.\n");
+		goto fail_reg_chardev;
+	}
+
+	link_info.glink_link_state_notif_cb = spcom_link_state_notif_cb;
+	link_info.transport = spcom_transport;
+	link_info.edge = spcom_edge;
+
+	ret = spcom_parse_dt(np);
+	if (ret < 0)
+		goto fail_reg_chardev;
+
+	/*
+	 * Register for glink link up/down notification.
+	 * glink channels can't be opened before link is up.
+	 */
+	pr_debug("register_link_state_cb(), transport [%s] edge [%s]\n",
+		link_info.transport, link_info.edge);
+	notif_handle = glink_register_link_state_cb(&link_info, spcom_dev);
+	if (IS_ERR(notif_handle)) {
+		pr_err("glink_register_link_state_cb(), err [%d]\n", ret);
+		goto fail_reg_chardev;
+	}
+
+	spcom_dev->ion_client = msm_ion_client_create(DEVICE_NAME);
+	if (IS_ERR(spcom_dev->ion_client)) {
+		pr_err("fail to create ion client.\n");
+		goto fail_ion_client;
+	}
+
+	pr_info("Driver Initialization ok.\n");
+
+	return 0;
+
+fail_ion_client:
+	glink_unregister_link_state_cb(notif_handle);
+fail_reg_chardev:
+	pr_err("Failed to init driver.\n");
+	spcom_unregister_chrdev();
+	kfree(dev);
+	spcom_dev = NULL;
+
+	return -ENODEV;
+}
+
+static const struct of_device_id spcom_match_table[] = {
+	{ .compatible = "qcom,spcom", },
+	{ },
+};
+
+static struct platform_driver spcom_driver = {
+	.probe = spcom_probe,
+	.driver = {
+		.name = DEVICE_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(spcom_match_table),
+	},
+};
+
+/*======================================================================*/
+/*		Driver Init/Exit					*/
+/*======================================================================*/
+
+static int __init spcom_init(void)
+{
+	int ret;
+
+	pr_info("spcom driver version 1.2 23-Aug-2017.\n");
+
+	ret = platform_driver_register(&spcom_driver);
+	if (ret)
+		pr_err("spcom_driver register failed %d\n", ret);
+
+	return ret;
+}
+module_init(spcom_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Secure Processor Communication");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/spm_devices.c	2019-01-22 16:16:26.679275167 +0100
@@ -0,0 +1,985 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/cpu.h>
+#include <soc/qcom/spm.h>
+#include "spm_driver.h"
+
+#define VDD_DEFAULT 0xDEADF00D
+#define SLP_CMD_BIT 17
+#define PC_MODE_BIT 16
+#define RET_MODE_BIT 15
+#define EVENT_SYNC_BIT 24
+#define ISAR_BIT 3
+#define SPM_EN_BIT 0
+
+struct msm_spm_power_modes {
+	uint32_t mode;
+	uint32_t ctl;
+};
+
+struct msm_spm_device {
+	struct list_head list;
+	bool initialized;
+	const char *name;
+	struct msm_spm_driver_data reg_data;
+	struct msm_spm_power_modes *modes;
+	uint32_t num_modes;
+	uint32_t cpu_vdd;
+	struct cpumask mask;
+	void __iomem *q2s_reg;
+	bool qchannel_ignore;
+	bool allow_rpm_hs;
+	bool use_spm_clk_gating;
+	bool use_qchannel_for_wfi;
+	void __iomem *flush_base_addr;
+	void __iomem *slpreq_base_addr;
+};
+
+struct msm_spm_vdd_info {
+	struct msm_spm_device *vctl_dev;
+	uint32_t vlevel;
+	int err;
+};
+
+static LIST_HEAD(spm_list);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct msm_spm_device, msm_cpu_spm_device);
+static DEFINE_PER_CPU(struct msm_spm_device *, cpu_vctl_device);
+
+static void msm_spm_smp_set_vdd(void *data)
+{
+	struct msm_spm_vdd_info *info = (struct msm_spm_vdd_info *)data;
+	struct msm_spm_device *dev = info->vctl_dev;
+
+	dev->cpu_vdd = info->vlevel;
+	info->err = msm_spm_drv_set_vdd(&dev->reg_data, info->vlevel);
+}
+
+/**
+ * msm_spm_probe_done(): Verify and return the status of the cpu(s) and l2
+ * probe.
+ * Return: 0 if all spm devices have been probed, else return -EPROBE_DEFER.
+ * if probe failed, then return the err number for that failure.
+ */
+int msm_spm_probe_done(void)
+{
+	struct msm_spm_device *dev;
+	int cpu;
+	int ret = 0;
+
+	for_each_possible_cpu(cpu) {
+		dev = per_cpu(cpu_vctl_device, cpu);
+		if (!dev)
+			return -EPROBE_DEFER;
+
+		ret = IS_ERR(dev);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_spm_probe_done);
+
+void msm_spm_dump_regs(unsigned int cpu)
+{
+	dump_regs(&per_cpu(msm_cpu_spm_device, cpu).reg_data, cpu);
+}
+
+/**
+ * msm_spm_set_vdd(): Set core voltage
+ * @cpu: core id
+ * @vlevel: Encoded PMIC data.
+ *
+ * Return: 0 on success or -(ERRNO) on failure.
+ */
+int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel)
+{
+	struct msm_spm_vdd_info info;
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+	int ret;
+
+	if (!dev)
+		return -EPROBE_DEFER;
+
+	ret = IS_ERR(dev);
+	if (ret)
+		return ret;
+
+	info.vctl_dev = dev;
+	info.vlevel = vlevel;
+
+	ret = smp_call_function_any(&dev->mask, msm_spm_smp_set_vdd, &info,
+					true);
+	if (ret)
+		return ret;
+
+	return info.err;
+}
+EXPORT_SYMBOL(msm_spm_set_vdd);
+
+/**
+ * msm_spm_get_vdd(): Get core voltage
+ * @cpu: core id
+ * @return: Returns encoded PMIC data.
+ */
+int msm_spm_get_vdd(unsigned int cpu)
+{
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+
+	if (!dev)
+		return -EPROBE_DEFER;
+
+	return msm_spm_drv_get_vdd(&dev->reg_data) ? : -EINVAL;
+}
+EXPORT_SYMBOL(msm_spm_get_vdd);
+
+static void msm_spm_config_q2s(struct msm_spm_device *dev, unsigned int mode)
+{
+	uint32_t spm_legacy_mode = 0;
+	uint32_t qchannel_ignore = 0;
+	uint32_t val = 0;
+
+	if (!dev->q2s_reg)
+		return;
+
+	switch (mode) {
+	case MSM_SPM_MODE_DISABLED:
+	case MSM_SPM_MODE_CLOCK_GATING:
+		qchannel_ignore = !dev->use_qchannel_for_wfi;
+		spm_legacy_mode = 0;
+		break;
+	case MSM_SPM_MODE_RETENTION:
+		qchannel_ignore = 0;
+		spm_legacy_mode = 0;
+		break;
+	case MSM_SPM_MODE_GDHS:
+	case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE:
+	case MSM_SPM_MODE_POWER_COLLAPSE:
+		qchannel_ignore = dev->qchannel_ignore;
+		spm_legacy_mode = 1;
+		break;
+	default:
+		break;
+	}
+
+	val = spm_legacy_mode << 2 | qchannel_ignore << 1;
+	__raw_writel(val, dev->q2s_reg);
+	mb();
+}
+
+static void msm_spm_config_hw_flush(struct msm_spm_device *dev,
+		unsigned int mode)
+{
+	uint32_t val = 0;
+
+	if (!dev->flush_base_addr)
+		return;
+
+	switch (mode) {
+	case MSM_SPM_MODE_FASTPC:
+	case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE:
+	case MSM_SPM_MODE_POWER_COLLAPSE:
+		val = BIT(0);
+		break;
+	default:
+		break;
+	}
+
+	__raw_writel(val, dev->flush_base_addr);
+}
+
+static void msm_spm_config_slpreq(struct msm_spm_device *dev,
+		unsigned int mode)
+{
+	uint32_t val = 0;
+
+	if (!dev->slpreq_base_addr)
+		return;
+
+	switch (mode) {
+	case MSM_SPM_MODE_FASTPC:
+	case MSM_SPM_MODE_GDHS:
+	case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE:
+	case MSM_SPM_MODE_POWER_COLLAPSE:
+		val = BIT(4);
+		break;
+	default:
+		break;
+	}
+
+	val = (__raw_readl(dev->slpreq_base_addr) & ~BIT(4)) | val;
+	__raw_writel(val, dev->slpreq_base_addr);
+}
+
+static int msm_spm_dev_set_low_power_mode(struct msm_spm_device *dev,
+		unsigned int mode, bool notify_rpm)
+{
+	uint32_t i;
+	int ret = -EINVAL;
+	uint32_t ctl = 0;
+
+	if (!dev) {
+		pr_err("dev is NULL\n");
+		return -ENODEV;
+	}
+
+	if (!dev->initialized)
+		return -ENXIO;
+
+	if (!dev->num_modes)
+		return 0;
+
+	if (mode == MSM_SPM_MODE_DISABLED) {
+		ret = msm_spm_drv_set_spm_enable(&dev->reg_data, false);
+	} else if (!msm_spm_drv_set_spm_enable(&dev->reg_data, true)) {
+		for (i = 0; i < dev->num_modes; i++) {
+			if (dev->modes[i].mode != mode)
+				continue;
+
+			ctl = dev->modes[i].ctl;
+			if (!dev->allow_rpm_hs && notify_rpm)
+				ctl &= ~BIT(SLP_CMD_BIT);
+
+			break;
+		}
+		ret = msm_spm_drv_set_low_power_mode(&dev->reg_data, ctl);
+	}
+
+	msm_spm_config_q2s(dev, mode);
+	msm_spm_config_hw_flush(dev, mode);
+	msm_spm_config_slpreq(dev, mode);
+
+	return ret;
+}
+
+static int msm_spm_dev_init(struct msm_spm_device *dev,
+		struct msm_spm_platform_data *data)
+{
+	int i, ret = -ENOMEM;
+	uint32_t offset = 0;
+
+	dev->cpu_vdd = VDD_DEFAULT;
+	dev->num_modes = data->num_modes;
+	dev->modes = kmalloc(
+			sizeof(struct msm_spm_power_modes) * dev->num_modes,
+			GFP_KERNEL);
+
+	if (!dev->modes)
+		goto spm_failed_malloc;
+
+	ret = msm_spm_drv_init(&dev->reg_data, data);
+
+	if (ret)
+		goto spm_failed_init;
+
+	for (i = 0; i < dev->num_modes; i++) {
+
+		/* Default offset is 0 and gets updated as we write more
+		 * sequences into SPM
+		 */
+		dev->modes[i].ctl = data->modes[i].ctl | ((offset & 0x1FF)
+						<< 4);
+		ret = msm_spm_drv_write_seq_data(&dev->reg_data,
+						data->modes[i].cmd, &offset);
+		if (ret < 0)
+			goto spm_failed_init;
+
+		dev->modes[i].mode = data->modes[i].mode;
+	}
+
+	msm_spm_drv_reinit(&dev->reg_data, dev->num_modes ? true : false);
+
+	dev->initialized = true;
+
+	return 0;
+
+spm_failed_init:
+	kfree(dev->modes);
+spm_failed_malloc:
+	return ret;
+}
+
+/**
+ * msm_spm_turn_on_cpu_rail(): Power on cpu rail before turning on core
+ * @node: The SPM node that controls the voltage for the CPU
+ * @val: The value to be set on the rail
+ * @cpu: The cpu for this with rail is being powered on
+ */
+int msm_spm_turn_on_cpu_rail(struct device_node *vctl_node,
+		unsigned int val, int cpu, int vctl_offset)
+{
+	uint32_t timeout = 2000; /* delay for voltage to settle on the core */
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+	void __iomem *base;
+
+	base = of_iomap(vctl_node, 1);
+	if (base) {
+		/*
+		 * Program Q2S to disable SPM legacy mode and ignore Q2S
+		 * channel requests.
+		 * bit[1] = qchannel_ignore = 1
+		 * bit[2] = spm_legacy_mode = 0
+		 */
+		writel_relaxed(0x2, base);
+		mb();
+		iounmap(base);
+	}
+
+	base = of_iomap(vctl_node, 0);
+	if (!base)
+		return -ENOMEM;
+
+	if (dev && (dev->cpu_vdd != VDD_DEFAULT))
+		return 0;
+
+	/* Set the CPU supply regulator voltage */
+	val = (val & 0xFF);
+	writel_relaxed(val, base + vctl_offset);
+	mb();
+	udelay(timeout);
+
+	/* Enable the CPU supply regulator*/
+	val = 0x30080;
+	writel_relaxed(val, base + vctl_offset);
+	mb();
+	udelay(timeout);
+
+	iounmap(base);
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_spm_turn_on_cpu_rail);
+
+void msm_spm_reinit(void)
+{
+	unsigned int cpu;
+	for_each_possible_cpu(cpu)
+		msm_spm_drv_reinit(
+			&per_cpu(msm_cpu_spm_device.reg_data, cpu), true);
+}
+EXPORT_SYMBOL(msm_spm_reinit);
+
+/*
+ * msm_spm_is_mode_avail() - Specifies if a mode is available for the cpu
+ * It should only be used to decide a mode before lpm driver is probed.
+ * @mode: SPM LPM mode to be selected
+ */
+bool msm_spm_is_mode_avail(unsigned int mode)
+{
+	struct msm_spm_device *dev = this_cpu_ptr(&msm_cpu_spm_device);
+	int i;
+
+	for (i = 0; i < dev->num_modes; i++) {
+		if (dev->modes[i].mode == mode)
+			return true;
+	}
+
+	return false;
+}
+
+/**
+ * msm_spm_is_avs_enabled() - Functions returns 1 if AVS is enabled and
+ *			      0 if it is not.
+ * @cpu: specifies cpu's avs should be read
+ *
+ * Returns errno in case of failure or AVS enable state otherwise
+ */
+int msm_spm_is_avs_enabled(unsigned int cpu)
+{
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+
+	if (!dev)
+		return -ENXIO;
+
+	return msm_spm_drv_get_avs_enable(&dev->reg_data);
+}
+EXPORT_SYMBOL(msm_spm_is_avs_enabled);
+
+/**
+ * msm_spm_avs_enable() - Enables AVS on the SAW that controls this cpu's
+ *			  voltage.
+ * @cpu: specifies which cpu's avs should be enabled
+ *
+ * Returns errno in case of failure or 0 if successful
+ */
+int msm_spm_avs_enable(unsigned int cpu)
+{
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+
+	if (!dev)
+		return -ENXIO;
+
+	return msm_spm_drv_set_avs_enable(&dev->reg_data, true);
+}
+EXPORT_SYMBOL(msm_spm_avs_enable);
+
+/**
+ * msm_spm_avs_disable() - Disables AVS on the SAW that controls this cpu's
+ *			   voltage.
+ * @cpu: specifies which cpu's avs should be enabled
+ *
+ * Returns errno in case of failure or 0 if successful
+ */
+int msm_spm_avs_disable(unsigned int cpu)
+{
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+
+	if (!dev)
+		return -ENXIO;
+
+	return msm_spm_drv_set_avs_enable(&dev->reg_data, false);
+}
+EXPORT_SYMBOL(msm_spm_avs_disable);
+
+/**
+ * msm_spm_avs_set_limit() - Set maximum and minimum AVS limits on the
+ *			     SAW that controls this cpu's voltage.
+ * @cpu: specify which cpu's avs should be configured
+ * @min_lvl: specifies the minimum PMIC output voltage control register
+ *		value that may be sent to the PMIC
+ * @max_lvl: specifies the maximum PMIC output voltage control register
+ *		value that may be sent to the PMIC
+ * Returns errno in case of failure or 0 if successful
+ */
+int msm_spm_avs_set_limit(unsigned int cpu,
+		uint32_t min_lvl, uint32_t max_lvl)
+{
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+
+	if (!dev)
+		return -ENXIO;
+
+	return msm_spm_drv_set_avs_limit(&dev->reg_data, min_lvl, max_lvl);
+}
+EXPORT_SYMBOL(msm_spm_avs_set_limit);
+
+/**
+ * msm_spm_avs_enable_irq() - Enable an AVS interrupt
+ * @cpu: specifies which CPU's AVS should be configured
+ * @irq: specifies which interrupt to enable
+ *
+ * Returns errno in case of failure or 0 if successful.
+ */
+int msm_spm_avs_enable_irq(unsigned int cpu, enum msm_spm_avs_irq irq)
+{
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+
+	if (!dev)
+		return -ENXIO;
+
+	return msm_spm_drv_set_avs_irq_enable(&dev->reg_data, irq, true);
+}
+EXPORT_SYMBOL(msm_spm_avs_enable_irq);
+
+/**
+ * msm_spm_avs_disable_irq() - Disable an AVS interrupt
+ * @cpu: specifies which CPU's AVS should be configured
+ * @irq: specifies which interrupt to disable
+ *
+ * Returns errno in case of failure or 0 if successful.
+ */
+int msm_spm_avs_disable_irq(unsigned int cpu, enum msm_spm_avs_irq irq)
+{
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+
+	if (!dev)
+		return -ENXIO;
+
+	return msm_spm_drv_set_avs_irq_enable(&dev->reg_data, irq, false);
+}
+EXPORT_SYMBOL(msm_spm_avs_disable_irq);
+
+/**
+ * msm_spm_avs_clear_irq() - Clear a latched AVS interrupt
+ * @cpu: specifies which CPU's AVS should be configured
+ * @irq: specifies which interrupt to clear
+ *
+ * Returns errno in case of failure or 0 if successful.
+ */
+int msm_spm_avs_clear_irq(unsigned int cpu, enum msm_spm_avs_irq irq)
+{
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+
+	if (!dev)
+		return -ENXIO;
+
+	return msm_spm_drv_avs_clear_irq(&dev->reg_data, irq);
+}
+EXPORT_SYMBOL(msm_spm_avs_clear_irq);
+
+/**
+ * msm_spm_set_low_power_mode() - Configure SPM start address for low power mode
+ * @mode: SPM LPM mode to enter
+ * @notify_rpm: Notify RPM in this mode
+ */
+int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm)
+{
+	struct msm_spm_device *dev = this_cpu_ptr(&msm_cpu_spm_device);
+	return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm);
+}
+EXPORT_SYMBOL(msm_spm_set_low_power_mode);
+
+/**
+ * msm_spm_init(): Board initalization function
+ * @data: platform specific SPM register configuration data
+ * @nr_devs: Number of SPM devices being initialized
+ */
+int __init msm_spm_init(struct msm_spm_platform_data *data, int nr_devs)
+{
+	unsigned int cpu;
+	int ret = 0;
+
+	BUG_ON((nr_devs < num_possible_cpus()) || !data);
+
+	for_each_possible_cpu(cpu) {
+		struct msm_spm_device *dev = &per_cpu(msm_cpu_spm_device, cpu);
+		ret = msm_spm_dev_init(dev, &data[cpu]);
+		if (ret < 0) {
+			pr_warn("%s():failed CPU:%u ret:%d\n", __func__,
+					cpu, ret);
+			break;
+		}
+	}
+
+	return ret;
+}
+
+struct msm_spm_device *msm_spm_get_device_by_name(const char *name)
+{
+	struct list_head *list;
+
+	list_for_each(list, &spm_list) {
+		struct msm_spm_device *dev
+			= list_entry(list, typeof(*dev), list);
+		if (dev->name && !strcmp(dev->name, name))
+			return dev;
+	}
+	return ERR_PTR(-ENODEV);
+}
+
+int msm_spm_config_low_power_mode(struct msm_spm_device *dev,
+		unsigned int mode, bool notify_rpm)
+{
+	return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm);
+}
+#ifdef CONFIG_MSM_L2_SPM
+
+/**
+ * msm_spm_apcs_set_phase(): Set number of SMPS phases.
+ * @cpu: cpu which is requesting the change in number of phases.
+ * @phase_cnt: Number of phases to be set active
+ */
+int msm_spm_apcs_set_phase(int cpu, unsigned int phase_cnt)
+{
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+
+	if (!dev)
+		return -ENXIO;
+
+	return msm_spm_drv_set_pmic_data(&dev->reg_data,
+			MSM_SPM_PMIC_PHASE_PORT, phase_cnt);
+}
+EXPORT_SYMBOL(msm_spm_apcs_set_phase);
+
+/** msm_spm_enable_fts_lpm() : Enable FTS to switch to low power
+ *                             when the cores are in low power modes
+ * @cpu: cpu that is entering low power mode.
+ * @mode: The mode configuration for FTS
+ */
+int msm_spm_enable_fts_lpm(int cpu, uint32_t mode)
+{
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+
+	if (!dev)
+		return -ENXIO;
+
+	return msm_spm_drv_set_pmic_data(&dev->reg_data,
+			MSM_SPM_PMIC_PFM_PORT, mode);
+}
+EXPORT_SYMBOL(msm_spm_enable_fts_lpm);
+
+#endif
+
+static int get_cpu_id(struct device_node *node)
+{
+	struct device_node *cpu_node;
+	u32 cpu;
+	char *key = "qcom,cpu";
+
+	cpu_node = of_parse_phandle(node, key, 0);
+	if (cpu_node) {
+		for_each_possible_cpu(cpu) {
+			if (of_get_cpu_node(cpu, NULL) == cpu_node)
+				return cpu;
+		}
+	} else
+		return num_possible_cpus();
+
+	return -EINVAL;
+}
+
+static struct msm_spm_device *msm_spm_get_device(struct platform_device *pdev)
+{
+	struct msm_spm_device *dev = NULL;
+	const char *val = NULL;
+	char *key = "qcom,name";
+	int cpu = get_cpu_id(pdev->dev.of_node);
+
+	if ((cpu >= 0) && cpu < num_possible_cpus())
+		dev = &per_cpu(msm_cpu_spm_device, cpu);
+	else if (cpu == num_possible_cpus())
+		dev = devm_kzalloc(&pdev->dev, sizeof(struct msm_spm_device),
+					GFP_KERNEL);
+
+	if (!dev)
+		return NULL;
+
+	if (of_property_read_string(pdev->dev.of_node, key, &val)) {
+		pr_err("%s(): Cannot find a required node key:%s\n",
+				__func__, key);
+		return NULL;
+	}
+	dev->name = val;
+	list_add(&dev->list, &spm_list);
+
+	return dev;
+}
+
+static void get_cpumask(struct device_node *node, struct cpumask *mask)
+{
+	unsigned c;
+	int idx = 0;
+	struct device_node *cpu_node;
+	char *key = "qcom,cpu-vctl-list";
+
+	cpu_node = of_parse_phandle(node, key, idx++);
+	while (cpu_node) {
+		for_each_possible_cpu(c) {
+			if (of_get_cpu_node(c, NULL) == cpu_node)
+				cpumask_set_cpu(c, mask);
+		}
+		cpu_node = of_parse_phandle(node, key, idx++);
+	};
+}
+
+static int msm_spm_dev_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	int cpu = 0;
+	int i = 0;
+	struct device_node *node = pdev->dev.of_node;
+	struct device_node *n = NULL;
+	struct msm_spm_platform_data spm_data;
+	char *key = NULL;
+	uint32_t val = 0;
+	struct msm_spm_seq_entry modes[MSM_SPM_MODE_NR];
+	int len = 0;
+	struct msm_spm_device *dev = NULL;
+	struct resource *res = NULL;
+	uint32_t mode_count = 0;
+
+	struct spm_of {
+		char *key;
+		uint32_t id;
+	};
+
+	struct spm_of spm_of_data[] = {
+		{"qcom,saw2-cfg", MSM_SPM_REG_SAW_CFG},
+		{"qcom,saw2-avs-ctl", MSM_SPM_REG_SAW_AVS_CTL},
+		{"qcom,saw2-avs-hysteresis", MSM_SPM_REG_SAW_AVS_HYSTERESIS},
+		{"qcom,saw2-avs-limit", MSM_SPM_REG_SAW_AVS_LIMIT},
+		{"qcom,saw2-avs-dly", MSM_SPM_REG_SAW_AVS_DLY},
+		{"qcom,saw2-spm-dly", MSM_SPM_REG_SAW_SPM_DLY},
+		{"qcom,saw2-spm-ctl", MSM_SPM_REG_SAW_SPM_CTL},
+		{"qcom,saw2-pmic-data0", MSM_SPM_REG_SAW_PMIC_DATA_0},
+		{"qcom,saw2-pmic-data1", MSM_SPM_REG_SAW_PMIC_DATA_1},
+		{"qcom,saw2-pmic-data2", MSM_SPM_REG_SAW_PMIC_DATA_2},
+		{"qcom,saw2-pmic-data3", MSM_SPM_REG_SAW_PMIC_DATA_3},
+		{"qcom,saw2-pmic-data4", MSM_SPM_REG_SAW_PMIC_DATA_4},
+		{"qcom,saw2-pmic-data5", MSM_SPM_REG_SAW_PMIC_DATA_5},
+		{"qcom,saw2-pmic-data6", MSM_SPM_REG_SAW_PMIC_DATA_6},
+		{"qcom,saw2-pmic-data7", MSM_SPM_REG_SAW_PMIC_DATA_7},
+	};
+
+	struct mode_of {
+		char *key;
+		uint32_t id;
+	};
+
+	struct mode_of mode_of_data[] = {
+		{"qcom,saw2-spm-cmd-wfi", MSM_SPM_MODE_CLOCK_GATING},
+		{"qcom,saw2-spm-cmd-ret", MSM_SPM_MODE_RETENTION},
+		{"qcom,saw2-spm-cmd-gdhs", MSM_SPM_MODE_GDHS},
+		{"qcom,saw2-spm-cmd-spc",
+				MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE},
+		{"qcom,saw2-spm-cmd-pc", MSM_SPM_MODE_POWER_COLLAPSE},
+		{"qcom,saw2-spm-cmd-fpc", MSM_SPM_MODE_FASTPC},
+	};
+
+	dev = msm_spm_get_device(pdev);
+	if (!dev) {
+		/*
+		 * For partial goods support some CPUs might not be available
+		 * in which case, shouldn't throw an error
+		 */
+		return 0;
+	}
+	get_cpumask(node, &dev->mask);
+
+	memset(&spm_data, 0, sizeof(struct msm_spm_platform_data));
+	memset(&modes, 0,
+		(MSM_SPM_MODE_NR - 2) * sizeof(struct msm_spm_seq_entry));
+
+	key = "qcom,saw2-ver-reg";
+	ret = of_property_read_u32(node, key, &val);
+	if (ret)
+		goto fail;
+	spm_data.ver_reg = val;
+
+	key = "qcom,vctl-timeout-us";
+	ret = of_property_read_u32(node, key, &val);
+	if (!ret)
+		spm_data.vctl_timeout_us = val;
+
+	/* SAW start address */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	spm_data.reg_base_addr = devm_ioremap(&pdev->dev, res->start,
+					resource_size(res));
+	if (!spm_data.reg_base_addr) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	spm_data.vctl_port = -1;
+	spm_data.phase_port = -1;
+	spm_data.pfm_port = -1;
+
+	key = "qcom,vctl-port";
+	of_property_read_u32(node, key, &spm_data.vctl_port);
+
+	key = "qcom,phase-port";
+	of_property_read_u32(node, key, &spm_data.phase_port);
+
+	key = "qcom,pfm-port";
+	of_property_read_u32(node, key, &spm_data.pfm_port);
+
+	/* Q2S (QChannel-2-SPM) register */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "q2s");
+	if (res) {
+		dev->q2s_reg = devm_ioremap(&pdev->dev, res->start,
+						resource_size(res));
+		if (!dev->q2s_reg) {
+			pr_err("%s(): Unable to iomap Q2S register\n",
+					__func__);
+			ret = -EADDRNOTAVAIL;
+			goto fail;
+		}
+	}
+
+	key = "qcom,use-qchannel-for-pc";
+	dev->qchannel_ignore = !of_property_read_bool(node, key);
+
+	key = "qcom,use-spm-clock-gating";
+	dev->use_spm_clk_gating = of_property_read_bool(node, key);
+
+	key = "qcom,use-qchannel-for-wfi";
+	dev->use_qchannel_for_wfi = of_property_read_bool(node, key);
+
+	/* HW flush address */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hw-flush");
+	if (res) {
+		dev->flush_base_addr = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(dev->flush_base_addr)) {
+			ret = PTR_ERR(dev->flush_base_addr);
+			pr_err("%s(): Unable to iomap hw flush register %d\n",
+					__func__, ret);
+			goto fail;
+		}
+	}
+
+	/* Sleep req address */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "slpreq");
+	if (res) {
+		dev->slpreq_base_addr = devm_ioremap(&pdev->dev, res->start,
+					resource_size(res));
+		if (!dev->slpreq_base_addr) {
+			ret = -ENOMEM;
+			pr_err("%s(): Unable to iomap slpreq register\n",
+					__func__);
+			ret = -EADDRNOTAVAIL;
+			goto fail;
+		}
+	}
+
+	/*
+	 * At system boot, cpus and or clusters can remain in reset. CCI SPM
+	 * will not be triggered unless SPM_LEGACY_MODE bit is set for the
+	 * cluster in reset. Initialize q2s registers and set the
+	 * SPM_LEGACY_MODE bit.
+	 */
+	msm_spm_config_q2s(dev, MSM_SPM_MODE_POWER_COLLAPSE);
+	msm_spm_drv_reg_init(&dev->reg_data, &spm_data);
+
+	for (i = 0; i < ARRAY_SIZE(spm_of_data); i++) {
+		ret = of_property_read_u32(node, spm_of_data[i].key, &val);
+		if (ret)
+			continue;
+		msm_spm_drv_upd_reg_shadow(&dev->reg_data, spm_of_data[i].id,
+				val);
+	}
+
+	for_each_child_of_node(node, n) {
+		const char *name;
+		bool bit_set;
+		int sync;
+
+		if (!n->name)
+			continue;
+
+		ret = of_property_read_string(n, "qcom,label", &name);
+		if (ret)
+			continue;
+
+		for (i = 0; i < ARRAY_SIZE(mode_of_data); i++)
+			if (!strcmp(name, mode_of_data[i].key))
+					break;
+
+		if (i == ARRAY_SIZE(mode_of_data)) {
+			pr_err("Mode name invalid %s\n", name);
+			break;
+		}
+
+		modes[mode_count].mode = mode_of_data[i].id;
+		modes[mode_count].cmd =
+			(uint8_t *)of_get_property(n, "qcom,sequence", &len);
+		if (!modes[mode_count].cmd) {
+			pr_err("cmd is empty\n");
+			continue;
+		}
+
+		bit_set = of_property_read_bool(n, "qcom,pc_mode");
+		modes[mode_count].ctl |= bit_set ? BIT(PC_MODE_BIT) : 0;
+
+		bit_set = of_property_read_bool(n, "qcom,ret_mode");
+		modes[mode_count].ctl |= bit_set ? BIT(RET_MODE_BIT) : 0;
+
+		bit_set = of_property_read_bool(n, "qcom,slp_cmd_mode");
+		modes[mode_count].ctl |= bit_set ? BIT(SLP_CMD_BIT) : 0;
+
+		bit_set = of_property_read_bool(n, "qcom,isar");
+		modes[mode_count].ctl |= bit_set ? BIT(ISAR_BIT) : 0;
+
+		bit_set = of_property_read_bool(n, "qcom,spm_en");
+		modes[mode_count].ctl |= bit_set ? BIT(SPM_EN_BIT) : 0;
+
+		ret = of_property_read_u32(n, "qcom,event_sync", &sync);
+		if (!ret)
+			modes[mode_count].ctl |= sync << EVENT_SYNC_BIT;
+
+		mode_count++;
+	}
+
+	spm_data.modes = modes;
+	spm_data.num_modes = mode_count;
+
+	key = "qcom,supports-rpm-hs";
+	dev->allow_rpm_hs = of_property_read_bool(pdev->dev.of_node, key);
+
+	ret = msm_spm_dev_init(dev, &spm_data);
+	if (ret)
+		pr_err("SPM modes programming is not available from HLOS\n");
+
+	platform_set_drvdata(pdev, dev);
+
+	for_each_cpu(cpu, &dev->mask)
+		per_cpu(cpu_vctl_device, cpu) = dev;
+
+	if (!spm_data.num_modes)
+		return 0;
+
+	cpu = get_cpu_id(pdev->dev.of_node);
+
+	/* For CPUs that are online, the SPM has to be programmed for
+	 * clockgating mode to ensure that it can use SPM for entering these
+	 * low power modes.
+	 */
+	get_online_cpus();
+	if ((cpu >= 0) && (cpu < num_possible_cpus()) && (cpu_online(cpu)))
+		msm_spm_config_low_power_mode(dev, MSM_SPM_MODE_CLOCK_GATING,
+				false);
+	put_online_cpus();
+	return ret;
+
+fail:
+	cpu = get_cpu_id(pdev->dev.of_node);
+	if (dev && (cpu >= num_possible_cpus() || (cpu < 0))) {
+		for_each_cpu(cpu, &dev->mask)
+			per_cpu(cpu_vctl_device, cpu) = ERR_PTR(ret);
+	}
+
+	pr_err("%s: CPU%d SPM device probe failed: %d\n", __func__, cpu, ret);
+
+	return ret;
+}
+
+static int msm_spm_dev_remove(struct platform_device *pdev)
+{
+	struct msm_spm_device *dev = platform_get_drvdata(pdev);
+	list_del(&dev->list);
+	return 0;
+}
+
+static struct of_device_id msm_spm_match_table[] = {
+	{.compatible = "qcom,spm-v2"},
+	{},
+};
+
+static struct platform_driver msm_spm_device_driver = {
+	.probe = msm_spm_dev_probe,
+	.remove = msm_spm_dev_remove,
+	.driver = {
+		.name = "spm-v2",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_spm_match_table,
+	},
+};
+
+/**
+ * msm_spm_device_init(): Device tree initialization function
+ */
+int __init msm_spm_device_init(void)
+{
+	static bool registered;
+	if (registered)
+		return 0;
+	registered = true;
+	return platform_driver_register(&msm_spm_device_driver);
+}
+arch_initcall(msm_spm_device_init);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/spm_driver.h	2019-01-22 16:16:26.679275167 +0100
@@ -0,0 +1,134 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __ARCH_ARM_MACH_MSM_SPM_DEVICES_H
+#define __ARCH_ARM_MACH_MSM_SPM_DEVICES_H
+
+#include <soc/qcom/spm.h>
+
+enum {
+	MSM_SPM_REG_SAW_CFG,
+	MSM_SPM_REG_SAW_AVS_CTL,
+	MSM_SPM_REG_SAW_AVS_HYSTERESIS,
+	MSM_SPM_REG_SAW_SPM_CTL,
+	MSM_SPM_REG_SAW_PMIC_DLY,
+	MSM_SPM_REG_SAW_AVS_LIMIT,
+	MSM_SPM_REG_SAW_AVS_DLY,
+	MSM_SPM_REG_SAW_SPM_DLY,
+	MSM_SPM_REG_SAW_PMIC_DATA_0,
+	MSM_SPM_REG_SAW_PMIC_DATA_1,
+	MSM_SPM_REG_SAW_PMIC_DATA_2,
+	MSM_SPM_REG_SAW_PMIC_DATA_3,
+	MSM_SPM_REG_SAW_PMIC_DATA_4,
+	MSM_SPM_REG_SAW_PMIC_DATA_5,
+	MSM_SPM_REG_SAW_PMIC_DATA_6,
+	MSM_SPM_REG_SAW_PMIC_DATA_7,
+	MSM_SPM_REG_SAW_RST,
+
+	MSM_SPM_REG_NR_INITIALIZE = MSM_SPM_REG_SAW_RST,
+
+	MSM_SPM_REG_SAW_ID,
+	MSM_SPM_REG_SAW_SECURE,
+	MSM_SPM_REG_SAW_STS0,
+	MSM_SPM_REG_SAW_STS1,
+	MSM_SPM_REG_SAW_STS2,
+	MSM_SPM_REG_SAW_VCTL,
+	MSM_SPM_REG_SAW_SEQ_ENTRY,
+	MSM_SPM_REG_SAW_SPM_STS,
+	MSM_SPM_REG_SAW_AVS_STS,
+	MSM_SPM_REG_SAW_PMIC_STS,
+	MSM_SPM_REG_SAW_VERSION,
+
+	MSM_SPM_REG_NR,
+};
+
+struct msm_spm_seq_entry {
+	uint32_t mode;
+	uint8_t *cmd;
+	uint32_t ctl;
+};
+
+struct msm_spm_platform_data {
+	void __iomem *reg_base_addr;
+	uint32_t reg_init_values[MSM_SPM_REG_NR_INITIALIZE];
+
+	uint32_t ver_reg;
+	uint32_t vctl_port;
+	uint32_t phase_port;
+	uint32_t pfm_port;
+
+	uint8_t awake_vlevel;
+	uint32_t vctl_timeout_us;
+	uint32_t avs_timeout_us;
+
+	uint32_t num_modes;
+	struct msm_spm_seq_entry *modes;
+};
+
+enum msm_spm_pmic_port {
+	MSM_SPM_PMIC_VCTL_PORT,
+	MSM_SPM_PMIC_PHASE_PORT,
+	MSM_SPM_PMIC_PFM_PORT,
+};
+
+struct msm_spm_driver_data {
+	uint32_t major;
+	uint32_t minor;
+	uint32_t ver_reg;
+	uint32_t vctl_port;
+	uint32_t phase_port;
+	uint32_t pfm_port;
+	void __iomem *reg_base_addr;
+	uint32_t vctl_timeout_us;
+	uint32_t avs_timeout_us;
+	uint32_t reg_shadow[MSM_SPM_REG_NR];
+	uint32_t *reg_seq_entry_shadow;
+	uint32_t *reg_offsets;
+};
+
+int msm_spm_drv_init(struct msm_spm_driver_data *dev,
+		struct msm_spm_platform_data *data);
+int msm_spm_drv_reg_init(struct msm_spm_driver_data *dev,
+		struct msm_spm_platform_data *data);
+void msm_spm_drv_reinit(struct msm_spm_driver_data *dev, bool seq);
+int msm_spm_drv_set_low_power_mode(struct msm_spm_driver_data *dev,
+		uint32_t ctl);
+int msm_spm_drv_set_vdd(struct msm_spm_driver_data *dev,
+		unsigned int vlevel);
+void dump_regs(struct msm_spm_driver_data *dev, int cpu);
+uint32_t msm_spm_drv_get_sts_curr_pmic_data(
+		struct msm_spm_driver_data *dev);
+int msm_spm_drv_write_seq_data(struct msm_spm_driver_data *dev,
+		uint8_t *cmd, uint32_t *offset);
+void msm_spm_drv_flush_seq_entry(struct msm_spm_driver_data *dev);
+int msm_spm_drv_set_spm_enable(struct msm_spm_driver_data *dev,
+		bool enable);
+int msm_spm_drv_set_pmic_data(struct msm_spm_driver_data *dev,
+		enum msm_spm_pmic_port port, unsigned int data);
+
+int msm_spm_drv_set_avs_limit(struct msm_spm_driver_data *dev,
+		 uint32_t min_lvl, uint32_t max_lvl);
+
+int msm_spm_drv_set_avs_enable(struct msm_spm_driver_data *dev,
+		 bool enable);
+int msm_spm_drv_get_avs_enable(struct msm_spm_driver_data *dev);
+
+int msm_spm_drv_set_avs_irq_enable(struct msm_spm_driver_data *dev,
+		enum msm_spm_avs_irq irq, bool enable);
+int msm_spm_drv_avs_clear_irq(struct msm_spm_driver_data *dev,
+		enum msm_spm_avs_irq irq);
+
+void msm_spm_reinit(void);
+int msm_spm_init(struct msm_spm_platform_data *data, int nr_devs);
+void msm_spm_drv_upd_reg_shadow(struct msm_spm_driver_data *dev, int id,
+		int val);
+uint32_t msm_spm_drv_get_vdd(struct msm_spm_driver_data *dev);
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/spss_utils.c	2019-01-22 16:16:26.679275167 +0100
@@ -0,0 +1,433 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Secure-Processor-SubSystem (SPSS) utilities.
+ *
+ * This driver provides utilities for the Secure Processor (SP).
+ *
+ * The SP daemon needs to load different SPSS images based on:
+ *
+ * 1. Test/Production key used to sign the SPSS image (read fuses).
+ * 2. SPSS HW version (selected via Device Tree).
+ *
+ */
+
+#define pr_fmt(fmt)	"spss_utils [%s]: " fmt, __func__
+
+#include <linux/kernel.h>	/* min() */
+#include <linux/module.h>	/* MODULE_LICENSE */
+#include <linux/device.h>	/* class_create() */
+#include <linux/slab.h>	/* kzalloc() */
+#include <linux/fs.h>		/* file_operations */
+#include <linux/cdev.h>		/* cdev_add() */
+#include <linux/errno.h>	/* EINVAL, ETIMEDOUT */
+#include <linux/printk.h>	/* pr_err() */
+#include <linux/bitops.h>	/* BIT(x) */
+#include <linux/platform_device.h> /* platform_driver_register() */
+#include <linux/of.h>		/* of_property_count_strings() */
+#include <linux/io.h>		/* ioremap_nocache() */
+
+#include <soc/qcom/subsystem_restart.h>
+
+/* driver name */
+#define DEVICE_NAME	"spss-utils"
+
+enum spss_firmware_type {
+	SPSS_FW_TYPE_TEST = 't',
+	SPSS_FW_TYPE_PROD = 'p',
+	SPSS_FW_TYPE_HYBRID = 'h',
+};
+
+static enum spss_firmware_type firmware_type = SPSS_FW_TYPE_TEST;
+static const char *test_firmware_name;
+static const char *prod_firmware_name;
+static const char *hybr_firmware_name;
+static const char *firmware_name = "NA";
+static struct device *spss_dev;
+static u32 spss_debug_reg_addr; /* SP_SCSR_MBn_SP2CL_GPm(n,m) */
+
+/*==========================================================================*/
+/*		Device Sysfs */
+/*==========================================================================*/
+
+static ssize_t firmware_name_show(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret;
+
+	if (!dev || !attr || !buf) {
+		pr_err("invalid param.\n");
+		return -EINVAL;
+	}
+
+	if (firmware_name == NULL)
+		ret = snprintf(buf, PAGE_SIZE, "%s\n", "unknown");
+	else
+		ret = snprintf(buf, PAGE_SIZE, "%s\n", firmware_name);
+
+	return ret;
+}
+
+static ssize_t firmware_name_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t size)
+{
+	pr_err("set firmware name is not allowed.\n");
+
+	return -EINVAL;
+}
+
+static DEVICE_ATTR(firmware_name, 0444,
+		firmware_name_show, firmware_name_store);
+
+static ssize_t test_fuse_state_show(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret;
+
+	if (!dev || !attr || !buf) {
+		pr_err("invalid param.\n");
+		return -EINVAL;
+	}
+
+	switch (firmware_type) {
+	case SPSS_FW_TYPE_TEST:
+		ret = snprintf(buf, PAGE_SIZE, "%s", "test");
+		break;
+	case SPSS_FW_TYPE_PROD:
+		ret = snprintf(buf, PAGE_SIZE, "%s", "prod");
+		break;
+	case SPSS_FW_TYPE_HYBRID:
+		ret = snprintf(buf, PAGE_SIZE, "%s", "hybrid");
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+static ssize_t test_fuse_state_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t size)
+{
+	pr_err("set test fuse state is not allowed.\n");
+
+	return -EINVAL;
+}
+
+static DEVICE_ATTR(test_fuse_state, 0444,
+		test_fuse_state_show, test_fuse_state_store);
+
+static ssize_t spss_debug_reg_show(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret;
+	void __iomem *spss_debug_reg = NULL;
+	u32 val1, val2;
+
+	if (!dev || !attr || !buf) {
+		pr_err("invalid param.\n");
+		return -EINVAL;
+	}
+
+	pr_debug("spss_debug_reg_addr [0x%x].\n", spss_debug_reg_addr);
+
+	spss_debug_reg = ioremap_nocache(spss_debug_reg_addr, sizeof(u32)*2);
+
+	if (!spss_debug_reg) {
+		pr_err("can't map debug reg addr.\n");
+		return -EFAULT;
+	}
+
+	val1 = readl_relaxed(spss_debug_reg);
+	val2 = readl_relaxed(((char *) spss_debug_reg) + sizeof(u32));
+
+	ret = snprintf(buf, PAGE_SIZE, "val1 [0x%x] val2 [0x%x]", val1, val2);
+
+	iounmap(spss_debug_reg);
+
+	return ret;
+}
+
+static ssize_t spss_debug_reg_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t size)
+{
+	pr_err("set debug reg is not allowed.\n");
+
+	return -EINVAL;
+}
+
+static DEVICE_ATTR(spss_debug_reg, 0444,
+		spss_debug_reg_show, spss_debug_reg_store);
+
+static int spss_create_sysfs(struct device *dev)
+{
+	int ret;
+
+	ret = device_create_file(dev, &dev_attr_firmware_name);
+	if (ret < 0) {
+		pr_err("failed to create sysfs file for firmware_name.\n");
+		return ret;
+	}
+
+	ret = device_create_file(dev, &dev_attr_test_fuse_state);
+	if (ret < 0) {
+		pr_err("failed to create sysfs file for test_fuse_state.\n");
+		device_remove_file(dev, &dev_attr_firmware_name);
+		return ret;
+	}
+
+	ret = device_create_file(dev, &dev_attr_spss_debug_reg);
+	if (ret < 0) {
+		pr_err("failed to create sysfs file for spss_debug_reg.\n");
+		device_remove_file(dev, &dev_attr_firmware_name);
+		device_remove_file(dev, &dev_attr_test_fuse_state);
+		return ret;
+	}
+
+	return 0;
+}
+
+/*==========================================================================*/
+/*		Device Tree */
+/*==========================================================================*/
+
+/**
+ * spss_parse_dt() - Parse Device Tree info.
+ */
+static int spss_parse_dt(struct device_node *node)
+{
+	int ret;
+	u32 spss_fuse1_addr = 0;
+	u32 spss_fuse1_bit = 0;
+	u32 spss_fuse1_mask = 0;
+	void __iomem *spss_fuse1_reg = NULL;
+	u32 spss_fuse2_addr = 0;
+	u32 spss_fuse2_bit = 0;
+	u32 spss_fuse2_mask = 0;
+	void __iomem *spss_fuse2_reg = NULL;
+	u32 val1 = 0;
+	u32 val2 = 0;
+
+	ret = of_property_read_string(node, "qcom,spss-test-firmware-name",
+		&test_firmware_name);
+	if (ret < 0) {
+		pr_err("can't get test fw name.\n");
+		return -EFAULT;
+	}
+
+	ret = of_property_read_string(node, "qcom,spss-prod-firmware-name",
+		&prod_firmware_name);
+	if (ret < 0) {
+		pr_err("can't get prod fw name.\n");
+		return -EFAULT;
+	}
+
+	ret = of_property_read_string(node, "qcom,spss-hybr-firmware-name",
+		&hybr_firmware_name);
+	if (ret < 0) {
+		pr_err("can't get prod fw name.\n");
+		return -EFAULT;
+	}
+
+	ret = of_property_read_u32(node, "qcom,spss-fuse1-addr",
+		&spss_fuse1_addr);
+	if (ret < 0) {
+		pr_err("can't get fuse1 addr.\n");
+		return -EFAULT;
+	}
+
+	ret = of_property_read_u32(node, "qcom,spss-fuse2-addr",
+		&spss_fuse2_addr);
+	if (ret < 0) {
+		pr_err("can't get fuse2 addr.\n");
+		return -EFAULT;
+	}
+
+	ret = of_property_read_u32(node, "qcom,spss-fuse1-bit",
+		&spss_fuse1_bit);
+	if (ret < 0) {
+		pr_err("can't get fuse1 bit.\n");
+		return -EFAULT;
+	}
+
+	ret = of_property_read_u32(node, "qcom,spss-fuse2-bit",
+		&spss_fuse2_bit);
+	if (ret < 0) {
+		pr_err("can't get fuse2 bit.\n");
+		return -EFAULT;
+	}
+
+
+	spss_fuse1_mask = BIT(spss_fuse1_bit);
+	spss_fuse2_mask = BIT(spss_fuse2_bit);
+
+	pr_debug("spss fuse1 addr [0x%x] bit [%d] .\n",
+		(int) spss_fuse1_addr, (int) spss_fuse1_bit);
+	pr_debug("spss fuse2 addr [0x%x] bit [%d] .\n",
+		(int) spss_fuse2_addr, (int) spss_fuse2_bit);
+
+	spss_fuse1_reg = ioremap_nocache(spss_fuse1_addr, sizeof(u32));
+
+	if (!spss_fuse1_reg) {
+		pr_err("can't map fuse1 addr.\n");
+		return -EFAULT;
+	}
+
+	spss_fuse2_reg = ioremap_nocache(spss_fuse2_addr, sizeof(u32));
+
+	if (!spss_fuse2_reg) {
+		iounmap(spss_fuse1_reg);
+		pr_err("can't map fuse2 addr.\n");
+		return -EFAULT;
+	}
+
+	val1 = readl_relaxed(spss_fuse1_reg);
+	val2 = readl_relaxed(spss_fuse2_reg);
+
+	pr_debug("spss fuse1 value [0x%08x].\n", (int) val1);
+	pr_debug("spss fuse2 value [0x%08x].\n", (int) val2);
+
+	pr_debug("spss fuse1 mask [0x%08x].\n", (int) spss_fuse1_mask);
+	pr_debug("spss fuse2 mask [0x%08x].\n", (int) spss_fuse2_mask);
+
+	if (val1 & spss_fuse1_mask)
+		firmware_type = SPSS_FW_TYPE_TEST;
+	else if (val2 & spss_fuse2_mask)
+		firmware_type = SPSS_FW_TYPE_PROD;
+	else
+		firmware_type = SPSS_FW_TYPE_HYBRID;
+
+	iounmap(spss_fuse1_reg);
+	iounmap(spss_fuse2_reg);
+
+	ret = of_property_read_u32(node, "qcom,spss-debug-reg-addr",
+		&spss_debug_reg_addr);
+	if (ret < 0) {
+		pr_err("can't get debug regs addr.\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * spss_probe() - initialization sequence
+ */
+static int spss_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct device_node *np = NULL;
+	struct device *dev = NULL;
+
+	if (!pdev) {
+		pr_err("invalid pdev.\n");
+		return -ENODEV;
+	}
+
+	np = pdev->dev.of_node;
+	if (!np) {
+		pr_err("invalid DT node.\n");
+		return -EINVAL;
+	}
+
+	dev = &pdev->dev;
+	spss_dev = dev;
+
+	if (dev == NULL) {
+		pr_err("invalid dev.\n");
+		return -EINVAL;
+	}
+
+	platform_set_drvdata(pdev, dev);
+
+	ret = spss_parse_dt(np);
+	if (ret < 0) {
+		pr_err("fail to parse device tree.\n");
+		return -EFAULT;
+	}
+
+	switch (firmware_type) {
+	case SPSS_FW_TYPE_TEST:
+		firmware_name = test_firmware_name;
+		break;
+	case SPSS_FW_TYPE_PROD:
+		firmware_name = prod_firmware_name;
+		break;
+	case SPSS_FW_TYPE_HYBRID:
+		firmware_name = hybr_firmware_name;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = subsystem_set_fwname("spss", firmware_name);
+	if (ret < 0) {
+		pr_err("fail to set fw name.\n");
+		return -EFAULT;
+	}
+
+	ret = spss_create_sysfs(dev);
+	if (ret < 0) {
+		pr_err("fail to create sysfs.\n");
+		return -EFAULT;
+	}
+
+	pr_info("Initialization completed ok, firmware_name [%s].\n",
+		firmware_name);
+
+	return 0;
+}
+
+static const struct of_device_id spss_match_table[] = {
+	{ .compatible = "qcom,spss-utils", },
+	{ },
+};
+
+static struct platform_driver spss_driver = {
+	.probe = spss_probe,
+	.driver = {
+		.name = DEVICE_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(spss_match_table),
+	},
+};
+
+/*==========================================================================*/
+/*		Driver Init/Exit					*/
+/*==========================================================================*/
+static int __init spss_init(void)
+{
+	int ret = 0;
+
+	pr_info("spss-utils driver Ver 1.2 13-Jan-2017.\n");
+
+	ret = platform_driver_register(&spss_driver);
+	if (ret)
+		pr_err("register platform driver failed, ret [%d]\n", ret);
+
+	return ret;
+}
+late_initcall(spss_init); /* start after PIL driver */
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Secure Processor Utilities");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/subsys-pil-tz.c	2019-01-22 16:16:26.679275167 +0100
@@ -0,0 +1,1219 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/of_gpio.h>
+#include <linux/delay.h>
+
+#include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/ramdump.h>
+#include <soc/qcom/scm.h>
+
+#include <soc/qcom/smem.h>
+
+#include "peripheral-loader.h"
+
+#define XO_FREQ			19200000
+#define PROXY_TIMEOUT_MS	10000
+#define MAX_SSR_REASON_LEN	81U
+#define STOP_ACK_TIMEOUT_MS	1000
+#define CRASH_STOP_ACK_TO_MS	200
+
+#define ERR_READY	0
+#define PBL_DONE	1
+#define NMI_STATUS_REGISTER	0x44
+
+#define desc_to_data(d) container_of(d, struct pil_tz_data, desc)
+#define subsys_to_data(d) container_of(d, struct pil_tz_data, subsys_desc)
+
+/**
+ * struct reg_info - regulator info
+ * @reg: regulator handle
+ * @uV: voltage in uV
+ * @uA: current in uA
+ */
+struct reg_info {
+	struct regulator *reg;
+	int uV;
+	int uA;
+};
+
+/**
+ * struct pil_tz_data
+ * @regs: regulators that should be always on when the subsystem is
+ *	   brought out of reset
+ * @proxy_regs: regulators that should be on during pil proxy voting
+ * @clks: clocks that should be always on when the subsystem is
+ *	  brought out of reset
+ * @proxy_clks: clocks that should be on during pil proxy voting
+ * @reg_count: the number of always on regulators
+ * @proxy_reg_count: the number of proxy voting regulators
+ * @clk_count: the number of always on clocks
+ * @proxy_clk_count: the number of proxy voting clocks
+ * @smem_id: the smem id used for read the subsystem crash reason
+ * @ramdump_dev: ramdump device pointer
+ * @pas_id: the PAS id for tz
+ * @bus_client: bus client id
+ * @enable_bus_scaling: set to true if PIL needs to vote for
+ *			bus bandwidth
+ * @keep_proxy_regs_on: If set, during proxy unvoting, PIL removes the
+ *			voltage/current vote for proxy regulators but leaves
+ *			them enabled.
+ * @stop_ack: state of completion of stop ack
+ * @desc: PIL descriptor
+ * @subsys: subsystem device pointer
+ * @subsys_desc: subsystem descriptor
+ * @u32 bits_arr[2]: array of bit positions in SCSR registers
+ */
+struct pil_tz_data {
+	struct reg_info *regs;
+	struct reg_info *proxy_regs;
+	struct clk **clks;
+	struct clk **proxy_clks;
+	int reg_count;
+	int proxy_reg_count;
+	int clk_count;
+	int proxy_clk_count;
+	int smem_id;
+	void *ramdump_dev;
+	u32 pas_id;
+	u32 bus_client;
+	bool enable_bus_scaling;
+	bool keep_proxy_regs_on;
+	struct completion stop_ack;
+	struct pil_desc desc;
+	struct subsys_device *subsys;
+	struct subsys_desc subsys_desc;
+	void __iomem *irq_status;
+	void __iomem *irq_clear;
+	void __iomem *irq_mask;
+	void __iomem *err_status;
+	void __iomem *err_status_spare;
+	void __iomem *reg_base;
+	u32 bits_arr[2];
+};
+
+enum scm_cmd {
+	PAS_INIT_IMAGE_CMD = 1,
+	PAS_MEM_SETUP_CMD,
+	PAS_AUTH_AND_RESET_CMD = 5,
+	PAS_SHUTDOWN_CMD,
+};
+
+enum pas_id {
+	PAS_MODEM,
+	PAS_Q6,
+	PAS_DSPS,
+	PAS_TZAPPS,
+	PAS_MODEM_SW,
+	PAS_MODEM_FW,
+	PAS_WCNSS,
+	PAS_SECAPP,
+	PAS_GSS,
+	PAS_VIDC,
+	PAS_VPU,
+	PAS_BCSS,
+};
+
+static struct msm_bus_paths scm_pas_bw_tbl[] = {
+	{
+		.vectors = (struct msm_bus_vectors[]){
+			{
+				.src = MSM_BUS_MASTER_SPS,
+				.dst = MSM_BUS_SLAVE_EBI_CH0,
+			},
+		},
+		.num_paths = 1,
+	},
+	{
+		.vectors = (struct msm_bus_vectors[]){
+			{
+				.src = MSM_BUS_MASTER_SPS,
+				.dst = MSM_BUS_SLAVE_EBI_CH0,
+				.ib = 492 * 8 * 1000000UL,
+				.ab = 492 * 8 *  100000UL,
+			},
+		},
+		.num_paths = 1,
+	},
+};
+
+static struct msm_bus_scale_pdata scm_pas_bus_pdata = {
+	.usecase = scm_pas_bw_tbl,
+	.num_usecases = ARRAY_SIZE(scm_pas_bw_tbl),
+	.name = "scm_pas",
+};
+
+static uint32_t scm_perf_client;
+static int scm_pas_bw_count;
+static DEFINE_MUTEX(scm_pas_bw_mutex);
+
+static int scm_pas_enable_bw(void)
+{
+	int ret = 0;
+
+	if (!scm_perf_client)
+		return -EINVAL;
+
+	mutex_lock(&scm_pas_bw_mutex);
+	if (!scm_pas_bw_count) {
+		ret = msm_bus_scale_client_update_request(scm_perf_client, 1);
+		if (ret)
+			goto err_bus;
+		scm_pas_bw_count++;
+	}
+
+	mutex_unlock(&scm_pas_bw_mutex);
+	return ret;
+
+err_bus:
+	pr_err("scm-pas; Bandwidth request failed (%d)\n", ret);
+	msm_bus_scale_client_update_request(scm_perf_client, 0);
+
+	mutex_unlock(&scm_pas_bw_mutex);
+	return ret;
+}
+
+static void scm_pas_disable_bw(void)
+{
+	mutex_lock(&scm_pas_bw_mutex);
+	if (scm_pas_bw_count-- == 1)
+		msm_bus_scale_client_update_request(scm_perf_client, 0);
+	mutex_unlock(&scm_pas_bw_mutex);
+}
+
+static void scm_pas_init(int id)
+{
+	static int is_inited;
+
+	if (is_inited)
+		return;
+
+	scm_pas_bw_tbl[0].vectors[0].src = id;
+	scm_pas_bw_tbl[1].vectors[0].src = id;
+
+	scm_perf_client = msm_bus_scale_register_client(&scm_pas_bus_pdata);
+	if (!scm_perf_client)
+		pr_warn("scm-pas: Unable to register bus client\n");
+
+	is_inited = 1;
+}
+
+static int of_read_clocks(struct device *dev, struct clk ***clks_ref,
+			  const char *propname)
+{
+	int clk_count, i, len;
+	struct clk **clks;
+
+	if (!of_find_property(dev->of_node, propname, &len))
+		return 0;
+
+	clk_count = of_property_count_strings(dev->of_node, propname);
+	if (IS_ERR_VALUE(clk_count)) {
+		dev_err(dev, "Failed to get clock names\n");
+		return -EINVAL;
+	}
+
+	clks = devm_kzalloc(dev, sizeof(struct clk *) * clk_count,
+				GFP_KERNEL);
+	if (!clks)
+		return -ENOMEM;
+
+	for (i = 0; i < clk_count; i++) {
+		const char *clock_name;
+		char clock_freq_name[50];
+		u32 clock_rate = XO_FREQ;
+
+		of_property_read_string_index(dev->of_node,
+					      propname, i,
+					      &clock_name);
+		snprintf(clock_freq_name, ARRAY_SIZE(clock_freq_name),
+						"qcom,%s-freq", clock_name);
+		if (of_find_property(dev->of_node, clock_freq_name, &len))
+			if (of_property_read_u32(dev->of_node, clock_freq_name,
+								&clock_rate)) {
+				dev_err(dev, "Failed to read %s clock's freq\n",
+							clock_freq_name);
+				return -EINVAL;
+			}
+
+		clks[i] = devm_clk_get(dev, clock_name);
+		if (IS_ERR(clks[i])) {
+			int rc = PTR_ERR(clks[i]);
+			if (rc != -EPROBE_DEFER)
+				dev_err(dev, "Failed to get %s clock\n",
+								clock_name);
+			return rc;
+		}
+
+		/* Make sure rate-settable clocks' rates are set */
+		if (clk_get_rate(clks[i]) == 0)
+			clk_set_rate(clks[i], clk_round_rate(clks[i],
+								clock_rate));
+	}
+
+	*clks_ref = clks;
+	return clk_count;
+}
+
+static int of_read_regs(struct device *dev, struct reg_info **regs_ref,
+			const char *propname)
+{
+	int reg_count, i, len, rc;
+	struct reg_info *regs;
+
+	if (!of_find_property(dev->of_node, propname, &len))
+		return 0;
+
+	reg_count = of_property_count_strings(dev->of_node, propname);
+	if (IS_ERR_VALUE(reg_count)) {
+		dev_err(dev, "Failed to get regulator names\n");
+		return -EINVAL;
+	}
+
+	regs = devm_kzalloc(dev, sizeof(struct reg_info) * reg_count,
+				GFP_KERNEL);
+	if (!regs)
+		return -ENOMEM;
+
+	for (i = 0; i < reg_count; i++) {
+		const char *reg_name;
+		char reg_uV_uA_name[50];
+		u32 vdd_uV_uA[2];
+
+		of_property_read_string_index(dev->of_node,
+					      propname, i,
+					      &reg_name);
+
+		regs[i].reg = devm_regulator_get(dev, reg_name);
+		if (IS_ERR(regs[i].reg)) {
+			int rc = PTR_ERR(regs[i].reg);
+			if (rc != -EPROBE_DEFER)
+				dev_err(dev, "Failed to get %s\n regulator",
+								reg_name);
+			return rc;
+		}
+
+		/*
+		 * Read the voltage and current values for the corresponding
+		 * regulator. The device tree property name is "qcom," +
+		 *  "regulator_name" + "-uV-uA".
+		 */
+		rc = snprintf(reg_uV_uA_name, ARRAY_SIZE(reg_uV_uA_name),
+			 "qcom,%s-uV-uA", reg_name);
+		if (rc < strlen(reg_name) + 6) {
+			dev_err(dev, "Failed to hold reg_uV_uA_name\n");
+			return -EINVAL;
+		}
+
+		if (!of_find_property(dev->of_node, reg_uV_uA_name, &len))
+			continue;
+
+		len /= sizeof(vdd_uV_uA[0]);
+
+		/* There should be two entries: one for uV and one for uA */
+		if (len != 2) {
+			dev_err(dev, "Missing uV/uA value\n");
+			return -EINVAL;
+		}
+
+		rc = of_property_read_u32_array(dev->of_node, reg_uV_uA_name,
+					vdd_uV_uA, len);
+		if (rc) {
+			dev_err(dev, "Failed to read uV/uA values(rc:%d)\n",
+									rc);
+			return rc;
+		}
+
+		regs[i].uV = vdd_uV_uA[0];
+		regs[i].uA = vdd_uV_uA[1];
+	}
+
+	*regs_ref = regs;
+	return reg_count;
+}
+
+static int of_read_bus_pdata(struct platform_device *pdev,
+			     struct pil_tz_data *d)
+{
+	struct msm_bus_scale_pdata *pdata;
+	pdata = msm_bus_cl_get_pdata(pdev);
+
+	if (!pdata)
+		return -EINVAL;
+
+	d->bus_client = msm_bus_scale_register_client(pdata);
+	if (!d->bus_client)
+		pr_warn("%s: Unable to register bus client\n", __func__);
+
+	return 0;
+}
+
+static int piltz_resc_init(struct platform_device *pdev, struct pil_tz_data *d)
+{
+	int len, count, rc;
+	struct device *dev = &pdev->dev;
+
+	count = of_read_clocks(dev, &d->clks, "qcom,active-clock-names");
+	if (count < 0) {
+		dev_err(dev, "Failed to setup clocks.\n");
+		return count;
+	}
+	d->clk_count = count;
+
+	count = of_read_clocks(dev, &d->proxy_clks, "qcom,proxy-clock-names");
+	if (count < 0) {
+		dev_err(dev, "Failed to setup proxy clocks.\n");
+		return count;
+	}
+	d->proxy_clk_count = count;
+
+	count = of_read_regs(dev, &d->regs, "qcom,active-reg-names");
+	if (count < 0) {
+		dev_err(dev, "Failed to setup regulators.\n");
+		return count;
+	}
+	d->reg_count = count;
+
+	count = of_read_regs(dev, &d->proxy_regs, "qcom,proxy-reg-names");
+	if (count < 0) {
+		dev_err(dev, "Failed to setup proxy regulators.\n");
+		return count;
+	}
+	d->proxy_reg_count = count;
+
+	if (of_find_property(dev->of_node, "qcom,msm-bus,name", &len)) {
+		d->enable_bus_scaling = true;
+		rc = of_read_bus_pdata(pdev, d);
+		if (rc) {
+			dev_err(dev, "Failed to setup bus scaling client.\n");
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int enable_regulators(struct pil_tz_data *d, struct device *dev,
+				struct reg_info *regs, int reg_count,
+				bool reg_no_enable)
+{
+	int i, rc = 0;
+
+	for (i = 0; i < reg_count; i++) {
+		if (regs[i].uV > 0) {
+			rc = regulator_set_voltage(regs[i].reg,
+					regs[i].uV, INT_MAX);
+			if (rc) {
+				dev_err(dev, "Failed to request voltage(rc:%d)\n",
+									rc);
+				goto err_voltage;
+			}
+		}
+
+		if (regs[i].uA > 0) {
+			rc = regulator_set_load(regs[i].reg,
+						regs[i].uA);
+			if (rc < 0) {
+				dev_err(dev, "Failed to set regulator mode(rc:%d)\n",
+									rc);
+				goto err_mode;
+			}
+		}
+
+		if (d->keep_proxy_regs_on && reg_no_enable)
+			continue;
+
+		rc = regulator_enable(regs[i].reg);
+		if (rc) {
+			dev_err(dev, "Regulator enable failed(rc:%d)\n", rc);
+			goto err_enable;
+		}
+	}
+
+	return 0;
+err_enable:
+	if (regs[i].uA > 0) {
+		regulator_set_voltage(regs[i].reg, 0, INT_MAX);
+		regulator_set_load(regs[i].reg, 0);
+	}
+err_mode:
+	if (regs[i].uV > 0)
+		regulator_set_voltage(regs[i].reg, 0, INT_MAX);
+err_voltage:
+	for (i--; i >= 0; i--) {
+		if (regs[i].uV > 0)
+			regulator_set_voltage(regs[i].reg, 0, INT_MAX);
+
+		if (regs[i].uA > 0)
+			regulator_set_load(regs[i].reg, 0);
+
+		if (d->keep_proxy_regs_on && reg_no_enable)
+			continue;
+		regulator_disable(regs[i].reg);
+	}
+
+	return rc;
+}
+
+static void disable_regulators(struct pil_tz_data *d, struct reg_info *regs,
+					int reg_count, bool reg_no_disable)
+{
+	int i;
+
+	for (i = 0; i < reg_count; i++) {
+		if (regs[i].uV > 0)
+			regulator_set_voltage(regs[i].reg, 0, INT_MAX);
+
+		if (regs[i].uA > 0)
+			regulator_set_load(regs[i].reg, 0);
+
+		if (d->keep_proxy_regs_on && reg_no_disable)
+			continue;
+		regulator_disable(regs[i].reg);
+	}
+}
+
+static int prepare_enable_clocks(struct device *dev, struct clk **clks,
+								int clk_count)
+{
+	int rc = 0;
+	int i;
+
+	for (i = 0; i < clk_count; i++) {
+		rc = clk_prepare_enable(clks[i]);
+		if (rc) {
+			dev_err(dev, "Clock enable failed(rc:%d)\n", rc);
+			goto err;
+		}
+	}
+
+	return 0;
+err:
+	for (i--; i >= 0; i--)
+		clk_disable_unprepare(clks[i]);
+
+	return rc;
+}
+
+static void disable_unprepare_clocks(struct clk **clks, int clk_count)
+{
+	int i;
+	for (i = --clk_count; i >= 0; i--)
+		clk_disable_unprepare(clks[i]);
+}
+
+static int pil_make_proxy_vote(struct pil_desc *pil)
+{
+	struct pil_tz_data *d = desc_to_data(pil);
+	int rc;
+
+	if (d->subsys_desc.no_auth)
+		return 0;
+
+	rc = enable_regulators(d, pil->dev, d->proxy_regs,
+					d->proxy_reg_count, false);
+	if (rc)
+		return rc;
+
+	rc = prepare_enable_clocks(pil->dev, d->proxy_clks,
+							d->proxy_clk_count);
+	if (rc)
+		goto err_clks;
+
+	if (d->bus_client) {
+		rc = msm_bus_scale_client_update_request(d->bus_client, 1);
+		if (rc) {
+			dev_err(pil->dev, "bandwidth request failed(rc:%d)\n",
+									rc);
+			goto err_bw;
+		}
+	} else
+		WARN(d->enable_bus_scaling, "Bus scaling not set up for %s!\n",
+					d->subsys_desc.name);
+
+	return 0;
+err_bw:
+	disable_unprepare_clocks(d->proxy_clks, d->proxy_clk_count);
+err_clks:
+	disable_regulators(d, d->proxy_regs, d->proxy_reg_count, false);
+
+	return rc;
+}
+
+static void pil_remove_proxy_vote(struct pil_desc *pil)
+{
+	struct pil_tz_data *d = desc_to_data(pil);
+
+	if (d->subsys_desc.no_auth)
+		return;
+
+	if (d->bus_client)
+		msm_bus_scale_client_update_request(d->bus_client, 0);
+	else
+		WARN(d->enable_bus_scaling, "Bus scaling not set up for %s!\n",
+					d->subsys_desc.name);
+
+	disable_unprepare_clocks(d->proxy_clks, d->proxy_clk_count);
+
+	disable_regulators(d, d->proxy_regs, d->proxy_reg_count, true);
+}
+
+static int pil_init_image_trusted(struct pil_desc *pil,
+		const u8 *metadata, size_t size)
+{
+	struct pil_tz_data *d = desc_to_data(pil);
+	struct pas_init_image_req {
+		u32	proc;
+		u32	image_addr;
+	} request;
+	u32 scm_ret = 0;
+	void *mdata_buf;
+	dma_addr_t mdata_phys;
+	int ret;
+	DEFINE_DMA_ATTRS(attrs);
+	struct device dev = {0};
+	struct scm_desc desc = {0};
+
+	if (d->subsys_desc.no_auth)
+		return 0;
+
+	ret = scm_pas_enable_bw();
+	if (ret)
+		return ret;
+	arch_setup_dma_ops(&dev, 0, 0, NULL, 0);
+
+	dev.coherent_dma_mask =
+		DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+	dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs);
+	mdata_buf = dma_alloc_attrs(&dev, size, &mdata_phys, GFP_KERNEL,
+					&attrs);
+	if (!mdata_buf) {
+		pr_err("scm-pas: Allocation for metadata failed.\n");
+		scm_pas_disable_bw();
+		return -ENOMEM;
+	}
+
+	/* Make sure there are no mappings in PKMAP and fixmap */
+	kmap_flush_unused();
+	kmap_atomic_flush_unused();
+
+	memcpy(mdata_buf, metadata, size);
+
+	request.proc = d->pas_id;
+	request.image_addr = mdata_phys;
+
+	if (!is_scm_armv8()) {
+		ret = scm_call(SCM_SVC_PIL, PAS_INIT_IMAGE_CMD, &request,
+				sizeof(request), &scm_ret, sizeof(scm_ret));
+	} else {
+		desc.args[0] = d->pas_id;
+		desc.args[1] = mdata_phys;
+		desc.arginfo = SCM_ARGS(2, SCM_VAL, SCM_RW);
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL, PAS_INIT_IMAGE_CMD),
+				&desc);
+		scm_ret = desc.ret[0];
+	}
+
+	dma_free_attrs(&dev, size, mdata_buf, mdata_phys, &attrs);
+	scm_pas_disable_bw();
+	if (ret)
+		return ret;
+	return scm_ret;
+}
+
+static int pil_mem_setup_trusted(struct pil_desc *pil, phys_addr_t addr,
+			       size_t size)
+{
+	struct pil_tz_data *d = desc_to_data(pil);
+	struct pas_init_image_req {
+		u32	proc;
+		u32	start_addr;
+		u32	len;
+	} request;
+	u32 scm_ret = 0;
+	int ret;
+	struct scm_desc desc = {0};
+
+	if (d->subsys_desc.no_auth)
+		return 0;
+
+	request.proc = d->pas_id;
+	request.start_addr = addr;
+	request.len = size;
+
+	if (!is_scm_armv8()) {
+		ret = scm_call(SCM_SVC_PIL, PAS_MEM_SETUP_CMD, &request,
+				sizeof(request), &scm_ret, sizeof(scm_ret));
+	} else {
+		desc.args[0] = d->pas_id;
+		desc.args[1] = addr;
+		desc.args[2] = size;
+		desc.arginfo = SCM_ARGS(3);
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL, PAS_MEM_SETUP_CMD),
+				&desc);
+		scm_ret = desc.ret[0];
+	}
+	if (ret)
+		return ret;
+	return scm_ret;
+}
+
+static int pil_auth_and_reset(struct pil_desc *pil)
+{
+	struct pil_tz_data *d = desc_to_data(pil);
+	int rc;
+	u32 proc, scm_ret = 0;
+	struct scm_desc desc = {0};
+
+	if (d->subsys_desc.no_auth)
+		return 0;
+
+	desc.args[0] = proc = d->pas_id;
+	desc.arginfo = SCM_ARGS(1);
+
+	rc = enable_regulators(d, pil->dev, d->regs, d->reg_count, false);
+	if (rc)
+		return rc;
+
+	rc = prepare_enable_clocks(pil->dev, d->clks, d->clk_count);
+	if (rc)
+		goto err_clks;
+
+	rc = scm_pas_enable_bw();
+	if (rc)
+		goto err_reset;
+
+	if (!is_scm_armv8()) {
+		rc = scm_call(SCM_SVC_PIL, PAS_AUTH_AND_RESET_CMD, &proc,
+				sizeof(proc), &scm_ret, sizeof(scm_ret));
+	} else {
+		rc = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
+			       PAS_AUTH_AND_RESET_CMD), &desc);
+		scm_ret = desc.ret[0];
+	}
+	scm_pas_disable_bw();
+	if (rc)
+		goto err_reset;
+
+	return scm_ret;
+err_reset:
+	disable_unprepare_clocks(d->clks, d->clk_count);
+err_clks:
+	disable_regulators(d, d->regs, d->reg_count, false);
+
+	return rc;
+}
+
+static int pil_shutdown_trusted(struct pil_desc *pil)
+{
+	struct pil_tz_data *d = desc_to_data(pil);
+	u32 proc, scm_ret = 0;
+	int rc;
+	struct scm_desc desc = {0};
+
+	if (d->subsys_desc.no_auth)
+		return 0;
+
+	desc.args[0] = proc = d->pas_id;
+	desc.arginfo = SCM_ARGS(1);
+
+	rc = enable_regulators(d, pil->dev, d->proxy_regs,
+					d->proxy_reg_count, true);
+	if (rc)
+		return rc;
+
+	rc = prepare_enable_clocks(pil->dev, d->proxy_clks,
+						d->proxy_clk_count);
+	if (rc)
+		goto err_clks;
+
+	if (!is_scm_armv8()) {
+		rc = scm_call(SCM_SVC_PIL, PAS_SHUTDOWN_CMD, &proc,
+			      sizeof(proc), &scm_ret, sizeof(scm_ret));
+	} else {
+		rc = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL, PAS_SHUTDOWN_CMD),
+			       &desc);
+		scm_ret = desc.ret[0];
+	}
+
+	disable_unprepare_clocks(d->proxy_clks, d->proxy_clk_count);
+	disable_regulators(d, d->proxy_regs, d->proxy_reg_count, false);
+
+	if (rc)
+		return rc;
+
+	disable_unprepare_clocks(d->clks, d->clk_count);
+	disable_regulators(d, d->regs, d->reg_count, false);
+
+	return scm_ret;
+err_clks:
+	disable_regulators(d, d->proxy_regs, d->proxy_reg_count, false);
+	return rc;
+}
+
+static struct pil_reset_ops pil_ops_trusted = {
+	.init_image = pil_init_image_trusted,
+	.mem_setup =  pil_mem_setup_trusted,
+	.auth_and_reset = pil_auth_and_reset,
+	.shutdown = pil_shutdown_trusted,
+	.proxy_vote = pil_make_proxy_vote,
+	.proxy_unvote = pil_remove_proxy_vote,
+};
+
+static void log_failure_reason(const struct pil_tz_data *d)
+{
+	u32 size;
+	char *smem_reason, reason[MAX_SSR_REASON_LEN];
+	const char *name = d->subsys_desc.name;
+
+	if (d->smem_id == -1)
+		return;
+
+	smem_reason = smem_get_entry_no_rlock(d->smem_id, &size, 0,
+							SMEM_ANY_HOST_FLAG);
+	if (!smem_reason || !size) {
+		pr_err("%s SFR: (unknown, smem_get_entry_no_rlock failed).\n",
+									name);
+		return;
+	}
+	if (!smem_reason[0]) {
+		pr_err("%s SFR: (unknown, empty string found).\n", name);
+		return;
+	}
+
+	strlcpy(reason, smem_reason, min(size, MAX_SSR_REASON_LEN));
+	pr_err("%s subsystem failure reason: %s.\n", name, reason);
+
+	smem_reason[0] = '\0';
+	wmb();
+}
+
+static int subsys_shutdown(const struct subsys_desc *subsys, bool force_stop)
+{
+	struct pil_tz_data *d = subsys_to_data(subsys);
+	int ret;
+
+	if (!subsys_get_crash_status(d->subsys) && force_stop &&
+						subsys->force_stop_gpio) {
+		gpio_set_value(subsys->force_stop_gpio, 1);
+		ret = wait_for_completion_timeout(&d->stop_ack,
+				msecs_to_jiffies(STOP_ACK_TIMEOUT_MS));
+		if (!ret)
+			pr_warn("Timed out on stop ack from %s.\n",
+							subsys->name);
+		gpio_set_value(subsys->force_stop_gpio, 0);
+	}
+
+	pil_shutdown(&d->desc);
+	return 0;
+}
+
+static int subsys_powerup(const struct subsys_desc *subsys)
+{
+	struct pil_tz_data *d = subsys_to_data(subsys);
+	int ret = 0;
+
+	if (subsys->stop_ack_irq)
+		reinit_completion(&d->stop_ack);
+
+	d->desc.fw_name = subsys->fw_name;
+	ret = pil_boot(&d->desc);
+
+	return ret;
+}
+
+static int subsys_ramdump(int enable, const struct subsys_desc *subsys)
+{
+	struct pil_tz_data *d = subsys_to_data(subsys);
+
+	if (!enable)
+		return 0;
+
+	return pil_do_ramdump(&d->desc, d->ramdump_dev, NULL);
+}
+
+static void subsys_free_memory(const struct subsys_desc *subsys)
+{
+	struct pil_tz_data *d = subsys_to_data(subsys);
+
+	pil_free_memory(&d->desc);
+}
+
+static void subsys_crash_shutdown(const struct subsys_desc *subsys)
+{
+	struct pil_tz_data *d = subsys_to_data(subsys);
+
+	if (subsys->force_stop_gpio > 0 &&
+				!subsys_get_crash_status(d->subsys)) {
+		gpio_set_value(subsys->force_stop_gpio, 1);
+		mdelay(CRASH_STOP_ACK_TO_MS);
+	}
+}
+
+static irqreturn_t subsys_err_fatal_intr_handler (int irq, void *dev_id)
+{
+	struct pil_tz_data *d = subsys_to_data(dev_id);
+	u32 nmi_status = 0;
+
+	if (d->reg_base)
+		nmi_status = readl_relaxed(d->reg_base +
+					   NMI_STATUS_REGISTER);
+
+	if (nmi_status & 0x04)
+		pr_err("%s: Fatal error on the %s due to TZ NMI\n",
+			__func__, d->subsys_desc.name);
+	else
+		pr_err("%s Fatal error on the %s\n",
+			__func__, d->subsys_desc.name);
+
+	if (subsys_get_crash_status(d->subsys)) {
+		pr_err("%s: Ignoring error fatal, restart in progress\n",
+							d->subsys_desc.name);
+		return IRQ_HANDLED;
+	}
+	subsys_set_crash_status(d->subsys, CRASH_STATUS_ERR_FATAL);
+	log_failure_reason(d);
+	subsystem_restart_dev(d->subsys);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t subsys_wdog_bite_irq_handler(int irq, void *dev_id)
+{
+	struct pil_tz_data *d = subsys_to_data(dev_id);
+
+	if (subsys_get_crash_status(d->subsys))
+		return IRQ_HANDLED;
+	pr_err("Watchdog bite received from %s!\n", d->subsys_desc.name);
+
+	if (d->subsys_desc.system_debug &&
+			!gpio_get_value(d->subsys_desc.err_fatal_gpio))
+		panic("%s: System ramdump requested. Triggering device restart!\n",
+							__func__);
+	subsys_set_crash_status(d->subsys, CRASH_STATUS_WDOG_BITE);
+	log_failure_reason(d);
+	subsystem_restart_dev(d->subsys);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t subsys_stop_ack_intr_handler(int irq, void *dev_id)
+{
+	struct pil_tz_data *d = subsys_to_data(dev_id);
+
+	pr_info("Received stop ack interrupt from %s\n", d->subsys_desc.name);
+	complete(&d->stop_ack);
+	return IRQ_HANDLED;
+}
+
+static void clear_pbl_done(struct pil_tz_data *d)
+{
+	uint32_t err_value;
+
+	err_value =  __raw_readl(d->err_status);
+	pr_debug("PBL_DONE received from %s!\n", d->subsys_desc.name);
+	if (err_value) {
+		uint32_t rmb_err_spare0;
+		uint32_t rmb_err_spare1;
+		uint32_t rmb_err_spare2;
+
+		rmb_err_spare2 =  __raw_readl(d->err_status_spare);
+		rmb_err_spare1 =  __raw_readl(d->err_status_spare-4);
+		rmb_err_spare0 =  __raw_readl(d->err_status_spare-8);
+
+		pr_err("PBL error status register: 0x%08x\n", err_value);
+
+		pr_err("PBL error status spare0 register: 0x%08x\n",
+			rmb_err_spare0);
+		pr_err("PBL error status spare1 register: 0x%08x\n",
+			rmb_err_spare1);
+		pr_err("PBL error status spare2 register: 0x%08x\n",
+			rmb_err_spare2);
+	}
+	__raw_writel(BIT(d->bits_arr[PBL_DONE]), d->irq_clear);
+}
+
+static void clear_err_ready(struct pil_tz_data *d)
+{
+	pr_debug("Subsystem error services up received from %s!\n",
+							d->subsys_desc.name);
+	__raw_writel(BIT(d->bits_arr[ERR_READY]), d->irq_clear);
+	complete_err_ready(d->subsys);
+}
+
+static void clear_wdog(struct pil_tz_data *d)
+{
+	/* Check crash status to know if device is restarting*/
+	if (!subsys_get_crash_status(d->subsys)) {
+		pr_err("wdog bite received from %s!\n", d->subsys_desc.name);
+		__raw_writel(BIT(d->bits_arr[ERR_READY]), d->irq_clear);
+		subsys_set_crash_status(d->subsys, CRASH_STATUS_WDOG_BITE);
+		log_failure_reason(d);
+		subsystem_restart_dev(d->subsys);
+	}
+}
+
+static irqreturn_t subsys_generic_handler(int irq, void *dev_id)
+{
+	struct pil_tz_data *d = subsys_to_data(dev_id);
+	uint32_t status_val, err_value;
+
+	err_value =  __raw_readl(d->err_status_spare);
+	status_val = __raw_readl(d->irq_status);
+
+	if ((status_val & BIT(d->bits_arr[ERR_READY])) && !err_value)
+		clear_err_ready(d);
+
+	if ((status_val & BIT(d->bits_arr[ERR_READY])) &&
+					err_value == 0x44554d50)
+		clear_wdog(d);
+
+	if (status_val & BIT(d->bits_arr[PBL_DONE]))
+		clear_pbl_done(d);
+
+	return IRQ_HANDLED;
+}
+
+static void mask_scsr_irqs(struct pil_tz_data *d)
+{
+	uint32_t mask_val;
+	/* Masking all interrupts not handled by HLOS */
+	mask_val = ~0;
+	__raw_writel(mask_val & ~BIT(d->bits_arr[ERR_READY]) &
+			~BIT(d->bits_arr[PBL_DONE]), d->irq_mask);
+}
+
+static int pil_tz_driver_probe(struct platform_device *pdev)
+{
+	struct pil_tz_data *d;
+	struct resource *res;
+	u32 proxy_timeout;
+	int len, rc;
+
+	d = devm_kzalloc(&pdev->dev, sizeof(*d), GFP_KERNEL);
+	if (!d)
+		return -ENOMEM;
+	platform_set_drvdata(pdev, d);
+
+	if (of_property_read_bool(pdev->dev.of_node, "qcom,pil-no-auth"))
+		d->subsys_desc.no_auth = true;
+
+	d->keep_proxy_regs_on = of_property_read_bool(pdev->dev.of_node,
+						"qcom,keep-proxy-regs-on");
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base_reg");
+	d->reg_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(d->reg_base)) {
+		dev_err(&pdev->dev, "Failed to iomap base register\n");
+		d->reg_base = NULL;
+	}
+
+	rc = of_property_read_string(pdev->dev.of_node, "qcom,firmware-name",
+				      &d->desc.name);
+	if (rc)
+		return rc;
+
+	/* Defaulting smem_id to be not present */
+	d->smem_id = -1;
+
+	if (of_find_property(pdev->dev.of_node, "qcom,smem-id", &len)) {
+		rc = of_property_read_u32(pdev->dev.of_node, "qcom,smem-id",
+						&d->smem_id);
+		if (rc) {
+			dev_err(&pdev->dev, "Failed to get the smem_id(rc:%d)\n",
+									rc);
+			return rc;
+		}
+	}
+
+	d->desc.dev = &pdev->dev;
+	d->desc.owner = THIS_MODULE;
+	d->desc.ops = &pil_ops_trusted;
+
+	d->desc.proxy_timeout = PROXY_TIMEOUT_MS;
+	d->desc.clear_fw_region = true;
+
+	rc = of_property_read_u32(pdev->dev.of_node, "qcom,proxy-timeout-ms",
+					&proxy_timeout);
+	if (!rc)
+		d->desc.proxy_timeout = proxy_timeout;
+
+	if (!d->subsys_desc.no_auth) {
+		rc = piltz_resc_init(pdev, d);
+		if (rc)
+			return rc;
+
+		rc = of_property_read_u32(pdev->dev.of_node, "qcom,pas-id",
+								&d->pas_id);
+		if (rc) {
+			dev_err(&pdev->dev, "Failed to find the pas_id(rc:%d)\n",
+									rc);
+			return rc;
+		}
+		scm_pas_init(MSM_BUS_MASTER_CRYPTO_CORE0);
+	}
+
+	rc = pil_desc_init(&d->desc);
+	if (rc)
+		return rc;
+
+	init_completion(&d->stop_ack);
+
+	d->subsys_desc.name = d->desc.name;
+	d->subsys_desc.owner = THIS_MODULE;
+	d->subsys_desc.dev = &pdev->dev;
+	d->subsys_desc.shutdown = subsys_shutdown;
+	d->subsys_desc.powerup = subsys_powerup;
+	d->subsys_desc.ramdump = subsys_ramdump;
+	d->subsys_desc.free_memory = subsys_free_memory;
+	d->subsys_desc.crash_shutdown = subsys_crash_shutdown;
+	if (of_property_read_bool(pdev->dev.of_node,
+					"qcom,pil-generic-irq-handler")) {
+		d->subsys_desc.generic_handler = subsys_generic_handler;
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"sp2soc_irq_status");
+		d->irq_status = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(d->irq_status)) {
+			dev_err(&pdev->dev, "Invalid resource for sp2soc_irq_status\n");
+			rc = PTR_ERR(d->irq_status);
+			goto err_ramdump;
+		}
+
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"sp2soc_irq_clr");
+		d->irq_clear = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(d->irq_clear)) {
+			dev_err(&pdev->dev, "Invalid resource for sp2soc_irq_clr\n");
+			rc = PTR_ERR(d->irq_clear);
+			goto err_ramdump;
+		}
+
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"sp2soc_irq_mask");
+		d->irq_mask = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(d->irq_mask)) {
+			dev_err(&pdev->dev, "Invalid resource for sp2soc_irq_mask\n");
+			rc = PTR_ERR(d->irq_mask);
+			goto err_ramdump;
+		}
+
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"rmb_err");
+		d->err_status = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(d->err_status)) {
+			dev_err(&pdev->dev, "Invalid resource for rmb_err\n");
+			rc = PTR_ERR(d->err_status);
+			goto err_ramdump;
+		}
+
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"rmb_err_spare2");
+		d->err_status_spare = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(d->err_status_spare)) {
+			dev_err(&pdev->dev, "Invalid resource for rmb_err_spare2\n");
+			rc = PTR_ERR(d->err_status_spare);
+			goto err_ramdump;
+		}
+
+		rc = of_property_read_u32_array(pdev->dev.of_node,
+		       "qcom,spss-scsr-bits", d->bits_arr, sizeof(d->bits_arr)/
+							sizeof(d->bits_arr[0]));
+		if (rc) {
+			dev_err(&pdev->dev, "Failed to read qcom,spss-scsr-bits");
+			goto err_ramdump;
+		}
+		mask_scsr_irqs(d);
+
+	} else {
+		d->subsys_desc.err_fatal_handler =
+						subsys_err_fatal_intr_handler;
+		d->subsys_desc.wdog_bite_handler = subsys_wdog_bite_irq_handler;
+		d->subsys_desc.stop_ack_handler = subsys_stop_ack_intr_handler;
+	}
+	d->ramdump_dev = create_ramdump_device(d->subsys_desc.name,
+								&pdev->dev);
+	if (!d->ramdump_dev) {
+		rc = -ENOMEM;
+		goto err_ramdump;
+	}
+
+	d->subsys = subsys_register(&d->subsys_desc);
+	if (IS_ERR(d->subsys)) {
+		rc = PTR_ERR(d->subsys);
+		goto err_subsys;
+	}
+	d->desc.subsys_dev = d->subsys;
+
+	return 0;
+err_subsys:
+	destroy_ramdump_device(d->ramdump_dev);
+err_ramdump:
+	pil_desc_release(&d->desc);
+	platform_set_drvdata(pdev, NULL);
+
+	return rc;
+}
+
+static int pil_tz_driver_exit(struct platform_device *pdev)
+{
+	struct pil_tz_data *d = platform_get_drvdata(pdev);
+
+	subsys_unregister(d->subsys);
+	destroy_ramdump_device(d->ramdump_dev);
+	pil_desc_release(&d->desc);
+
+	return 0;
+}
+
+static struct of_device_id pil_tz_match_table[] = {
+	{.compatible = "qcom,pil-tz-generic"},
+	{}
+};
+
+static struct platform_driver pil_tz_driver = {
+	.probe = pil_tz_driver_probe,
+	.remove = pil_tz_driver_exit,
+	.driver = {
+		.name = "subsys-pil-tz",
+		.of_match_table = pil_tz_match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init pil_tz_init(void)
+{
+	return platform_driver_register(&pil_tz_driver);
+}
+module_init(pil_tz_init);
+
+static void __exit pil_tz_exit(void)
+{
+	platform_driver_unregister(&pil_tz_driver);
+}
+module_exit(pil_tz_exit);
+
+MODULE_DESCRIPTION("Support for booting subsystems");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/subsystem_notif.c	2019-01-22 16:16:26.679275167 +0100
@@ -0,0 +1,219 @@
+/* Copyright (c) 2011, 2013, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ *
+ * Subsystem Notifier -- Provides notifications
+ * of subsys events.
+ *
+ * Use subsys_notif_register_notifier to register for notifications
+ * and subsys_notif_queue_notification to send notifications.
+ *
+ */
+
+#include <linux/notifier.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/stringify.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <soc/qcom/subsystem_notif.h>
+
+
+struct subsys_notif_info {
+	char name[50];
+	struct srcu_notifier_head subsys_notif_rcvr_list;
+	struct list_head list;
+};
+
+static LIST_HEAD(subsystem_list);
+static DEFINE_MUTEX(notif_lock);
+static DEFINE_MUTEX(notif_add_lock);
+
+#if defined(SUBSYS_RESTART_DEBUG)
+static void subsys_notif_reg_test_notifier(const char *);
+#endif
+
+static struct subsys_notif_info *_notif_find_subsys(const char *subsys_name)
+{
+	struct subsys_notif_info *subsys;
+
+	mutex_lock(&notif_lock);
+	list_for_each_entry(subsys, &subsystem_list, list)
+		if (!strncmp(subsys->name, subsys_name,
+				ARRAY_SIZE(subsys->name))) {
+			mutex_unlock(&notif_lock);
+			return subsys;
+		}
+	mutex_unlock(&notif_lock);
+
+	return NULL;
+}
+
+void *subsys_notif_register_notifier(
+			const char *subsys_name, struct notifier_block *nb)
+{
+	int ret;
+	struct subsys_notif_info *subsys = _notif_find_subsys(subsys_name);
+
+	if (!subsys) {
+
+		/* Possible first time reference to this subsystem. Add it. */
+		subsys = (struct subsys_notif_info *)
+				subsys_notif_add_subsys(subsys_name);
+
+		if (!subsys)
+			return ERR_PTR(-EINVAL);
+	}
+
+	ret = srcu_notifier_chain_register(
+		&subsys->subsys_notif_rcvr_list, nb);
+
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	return subsys;
+}
+EXPORT_SYMBOL(subsys_notif_register_notifier);
+
+int subsys_notif_unregister_notifier(void *subsys_handle,
+				struct notifier_block *nb)
+{
+	int ret;
+	struct subsys_notif_info *subsys =
+			(struct subsys_notif_info *)subsys_handle;
+
+	if (!subsys)
+		return -EINVAL;
+
+	ret = srcu_notifier_chain_unregister(
+		&subsys->subsys_notif_rcvr_list, nb);
+
+	return ret;
+}
+EXPORT_SYMBOL(subsys_notif_unregister_notifier);
+
+void *subsys_notif_add_subsys(const char *subsys_name)
+{
+	struct subsys_notif_info *subsys = NULL;
+
+	if (!subsys_name)
+		goto done;
+
+	mutex_lock(&notif_add_lock);
+
+	subsys = _notif_find_subsys(subsys_name);
+
+	if (subsys) {
+		mutex_unlock(&notif_add_lock);
+		goto done;
+	}
+
+	subsys = kmalloc(sizeof(struct subsys_notif_info), GFP_KERNEL);
+
+	if (!subsys) {
+		mutex_unlock(&notif_add_lock);
+		return ERR_PTR(-EINVAL);
+	}
+
+	strlcpy(subsys->name, subsys_name, ARRAY_SIZE(subsys->name));
+
+	srcu_init_notifier_head(&subsys->subsys_notif_rcvr_list);
+
+	INIT_LIST_HEAD(&subsys->list);
+
+	mutex_lock(&notif_lock);
+	list_add_tail(&subsys->list, &subsystem_list);
+	mutex_unlock(&notif_lock);
+
+	#if defined(SUBSYS_RESTART_DEBUG)
+	subsys_notif_reg_test_notifier(subsys->name);
+	#endif
+
+	mutex_unlock(&notif_add_lock);
+
+done:
+	return subsys;
+}
+EXPORT_SYMBOL(subsys_notif_add_subsys);
+
+int subsys_notif_queue_notification(void *subsys_handle,
+					enum subsys_notif_type notif_type,
+					void *data)
+{
+	struct subsys_notif_info *subsys =
+		(struct subsys_notif_info *) subsys_handle;
+
+	if (!subsys)
+		return -EINVAL;
+
+	if (notif_type < 0 || notif_type >= SUBSYS_NOTIF_TYPE_COUNT)
+		return -EINVAL;
+
+	return srcu_notifier_call_chain(
+		&subsys->subsys_notif_rcvr_list, notif_type, data);
+}
+EXPORT_SYMBOL(subsys_notif_queue_notification);
+
+#if defined(SUBSYS_RESTART_DEBUG)
+static const char *notif_to_string(enum subsys_notif_type notif_type)
+{
+	switch (notif_type) {
+
+	case	SUBSYS_BEFORE_SHUTDOWN:
+		return __stringify(SUBSYS_BEFORE_SHUTDOWN);
+
+	case	SUBSYS_AFTER_SHUTDOWN:
+		return __stringify(SUBSYS_AFTER_SHUTDOWN);
+
+	case	SUBSYS_BEFORE_POWERUP:
+		return __stringify(SUBSYS_BEFORE_POWERUP);
+
+	case	SUBSYS_AFTER_POWERUP:
+		return __stringify(SUBSYS_AFTER_POWERUP);
+
+	default:
+		return "unknown";
+	}
+}
+
+static int subsys_notifier_test_call(struct notifier_block *this,
+				  unsigned long code,
+				  void *data)
+{
+	switch (code) {
+
+	default:
+		pr_warn("%s: Notification %s from subsystem %pK\n",
+			__func__, notif_to_string(code), data);
+	break;
+
+	}
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block nb = {
+	.notifier_call = subsys_notifier_test_call,
+};
+
+static void subsys_notif_reg_test_notifier(const char *subsys_name)
+{
+	void *handle = subsys_notif_register_notifier(subsys_name, &nb);
+	pr_warn("%s: Registered test notifier, handle=%pK",
+			__func__, handle);
+}
+#endif
+
+MODULE_DESCRIPTION("Subsystem Restart Notifier");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/subsystem_restart.c	2019-10-29 09:26:24.825214745 +0100
@@ -0,0 +1,1817 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "subsys-restart: %s(): " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/kthread.h>
+#include <linux/time.h>
+#include <linux/suspend.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/of_gpio.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/sysmon.h>
+#include <trace/events/trace_msm_pil_event.h>
+
+#include <asm/current.h>
+
+#include "peripheral-loader.h"
+
+#define DISABLE_SSR 0x9889deed
+/* If set to 0x9889deed, call to subsystem_restart_dev() returns immediately */
+static uint disable_restart_work;
+module_param(disable_restart_work, uint, S_IRUGO | S_IWUSR);
+
+static int enable_debug;
+module_param(enable_debug, int, S_IRUGO | S_IWUSR);
+
+/* The maximum shutdown timeout is the product of MAX_LOOPS and DELAY_MS. */
+#define SHUTDOWN_ACK_MAX_LOOPS	100
+#define SHUTDOWN_ACK_DELAY_MS	100
+
+/**
+ * enum p_subsys_state - state of a subsystem (private)
+ * @SUBSYS_NORMAL: subsystem is operating normally
+ * @SUBSYS_CRASHED: subsystem has crashed and hasn't been shutdown
+ * @SUBSYS_RESTARTING: subsystem has been shutdown and is now restarting
+ *
+ * The 'private' side of the subsytem state used to determine where in the
+ * restart process the subsystem is.
+ */
+enum p_subsys_state {
+	SUBSYS_NORMAL,
+	SUBSYS_CRASHED,
+	SUBSYS_RESTARTING,
+};
+
+/**
+ * enum subsys_state - state of a subsystem (public)
+ * @SUBSYS_OFFLINING: subsystem is offlining
+ * @SUBSYS_OFFLINE: subsystem is offline
+ * @SUBSYS_ONLINE: subsystem is online
+ *
+ * The 'public' side of the subsytem state, exposed to userspace.
+ */
+enum subsys_state {
+	SUBSYS_OFFLINING,
+	SUBSYS_OFFLINE,
+	SUBSYS_ONLINE,
+};
+
+static const char * const subsys_states[] = {
+	[SUBSYS_OFFLINING] = "OFFLINING",
+	[SUBSYS_OFFLINE] = "OFFLINE",
+	[SUBSYS_ONLINE] = "ONLINE",
+};
+
+static const char * const restart_levels[] = {
+	[RESET_SOC] = "SYSTEM",
+	[RESET_SUBSYS_COUPLED] = "RELATED",
+};
+
+/**
+ * struct subsys_tracking - track state of a subsystem or restart order
+ * @p_state: private state of subsystem/order
+ * @state: public state of subsystem/order
+ * @s_lock: protects p_state
+ * @lock: protects subsystem/order callbacks and state
+ *
+ * Tracks the state of a subsystem or a set of subsystems (restart order).
+ * Doing this avoids the need to grab each subsystem's lock and update
+ * each subsystems state when restarting an order.
+ */
+struct subsys_tracking {
+	enum p_subsys_state p_state;
+	spinlock_t s_lock;
+	enum subsys_state state;
+	struct mutex lock;
+};
+
+/**
+ * struct subsys_soc_restart_order - subsystem restart order
+ * @subsystem_list: names of subsystems in this restart order
+ * @count: number of subsystems in order
+ * @track: state tracking and locking
+ * @subsys_ptrs: pointers to subsystems in this restart order
+ */
+struct subsys_soc_restart_order {
+	struct device_node **device_ptrs;
+	int count;
+
+	struct subsys_tracking track;
+	struct subsys_device **subsys_ptrs;
+	struct list_head list;
+};
+
+struct restart_log {
+	struct timeval time;
+	struct subsys_device *dev;
+	struct list_head list;
+};
+
+/**
+ * struct subsys_device - subsystem device
+ * @desc: subsystem descriptor
+ * @work: context for subsystem_restart_wq_func() for this device
+ * @ssr_wlock: prevents suspend during subsystem_restart()
+ * @wlname: name of wakeup source
+ * @device_restart_work: work struct for device restart
+ * @track: state tracking and locking
+ * @notify: subsys notify handle
+ * @dev: device
+ * @owner: module that provides @desc
+ * @count: reference count of subsystem_get()/subsystem_put()
+ * @id: ida
+ * @restart_level: restart level (0 - panic, 1 - related, 2 - independent, etc.)
+ * @restart_order: order of other devices this devices restarts with
+ * @crash_count: number of times the device has crashed
+ * @do_ramdump_on_put: ramdump on subsystem_put() if true
+ * @err_ready: completion variable to record error ready from subsystem
+ * @crashed: indicates if subsystem has crashed
+ * @notif_state: current state of subsystem in terms of subsys notifications
+ */
+struct subsys_device {
+	struct subsys_desc *desc;
+	struct work_struct work;
+	struct wakeup_source ssr_wlock;
+	char wlname[64];
+	char error_buf[64];
+	struct work_struct device_restart_work;
+	struct subsys_tracking track;
+
+	void *notify;
+	struct device dev;
+	struct module *owner;
+	int count;
+	int id;
+	int restart_level;
+	int crash_count;
+	struct subsys_soc_restart_order *restart_order;
+	bool do_ramdump_on_put;
+	struct cdev char_dev;
+	dev_t dev_no;
+	struct completion err_ready;
+	enum crash_status crashed;
+	int notif_state;
+	struct list_head list;
+};
+
+static struct subsys_device *to_subsys(struct device *d)
+{
+	return container_of(d, struct subsys_device, dev);
+}
+
+void complete_err_ready(struct subsys_device *subsys)
+{
+	complete(&subsys->err_ready);
+}
+
+static struct subsys_tracking *subsys_get_track(struct subsys_device *subsys)
+{
+	struct subsys_soc_restart_order *order = subsys->restart_order;
+
+	if (order)
+		return &order->track;
+	else
+		return &subsys->track;
+}
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%s\n", to_subsys(dev)->desc->name);
+}
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	enum subsys_state state = to_subsys(dev)->track.state;
+	return snprintf(buf, PAGE_SIZE, "%s\n", subsys_states[state]);
+}
+
+static ssize_t crash_count_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", to_subsys(dev)->crash_count);
+}
+
+static ssize_t
+restart_level_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	int level = to_subsys(dev)->restart_level;
+	return snprintf(buf, PAGE_SIZE, "%s\n", restart_levels[level]);
+}
+
+static ssize_t restart_level_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct subsys_device *subsys = to_subsys(dev);
+	const char *p;
+	int i, orig_count = count;
+
+	p = memchr(buf, '\n', count);
+	if (p)
+		count = p - buf;
+
+	for (i = 0; i < ARRAY_SIZE(restart_levels); i++)
+		if (!strncasecmp(buf, restart_levels[i], count)) {
+			subsys->restart_level = i;
+			return orig_count;
+		}
+	return -EPERM;
+}
+
+static ssize_t firmware_name_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%s\n", to_subsys(dev)->desc->fw_name);
+}
+
+static ssize_t firmware_name_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct subsys_device *subsys = to_subsys(dev);
+	struct subsys_tracking *track = subsys_get_track(subsys);
+	const char *p;
+	int orig_count = count;
+
+	p = memchr(buf, '\n', count);
+	if (p)
+		count = p - buf;
+
+	pr_info("Changing subsys fw_name to %s\n", buf);
+	mutex_lock(&track->lock);
+	strlcpy(subsys->desc->fw_name, buf,
+			min(count + 1, sizeof(subsys->desc->fw_name)));
+	mutex_unlock(&track->lock);
+	return orig_count;
+}
+
+static ssize_t system_debug_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct subsys_device *subsys = to_subsys(dev);
+	char p[6] = "set";
+
+	if (!subsys->desc->system_debug)
+		strlcpy(p, "reset", sizeof(p));
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", p);
+}
+
+static ssize_t system_debug_store(struct device *dev,
+				struct device_attribute *attr, const char *buf,
+				size_t count)
+{
+	struct subsys_device *subsys = to_subsys(dev);
+	const char *p;
+	int orig_count = count;
+
+	p = memchr(buf, '\n', count);
+	if (p)
+		count = p - buf;
+
+	if (!strncasecmp(buf, "set", count))
+		subsys->desc->system_debug = true;
+	else if (!strncasecmp(buf, "reset", count))
+		subsys->desc->system_debug = false;
+	else
+		return -EPERM;
+	return orig_count;
+}
+
+int subsys_get_restart_level(struct subsys_device *dev)
+{
+	return dev->restart_level;
+}
+EXPORT_SYMBOL(subsys_get_restart_level);
+
+static void subsys_set_state(struct subsys_device *subsys,
+			     enum subsys_state state)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&subsys->track.s_lock, flags);
+	if (subsys->track.state != state) {
+		subsys->track.state = state;
+		spin_unlock_irqrestore(&subsys->track.s_lock, flags);
+		sysfs_notify(&subsys->dev.kobj, NULL, "state");
+		return;
+	}
+	spin_unlock_irqrestore(&subsys->track.s_lock, flags);
+}
+
+static ssize_t error_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%s\n", to_subsys(dev)->error_buf);
+}
+
+/**
+ * subsytem_default_online() - Mark a subsystem as online by default
+ * @dev: subsystem to mark as online
+ *
+ * Marks a subsystem as "online" without increasing the reference count
+ * on the subsystem. This is typically used by subsystems that are already
+ * online when the kernel boots up.
+ */
+void subsys_default_online(struct subsys_device *dev)
+{
+	subsys_set_state(dev, SUBSYS_ONLINE);
+}
+EXPORT_SYMBOL(subsys_default_online);
+
+static struct device_attribute subsys_attrs[] = {
+	__ATTR_RO(name),
+	__ATTR_RO(state),
+	__ATTR_RO(crash_count),
+	__ATTR_RO(error),
+	__ATTR(restart_level, 0644, restart_level_show, restart_level_store),
+	__ATTR(firmware_name, 0644, firmware_name_show, firmware_name_store),
+	__ATTR(system_debug, 0644, system_debug_show, system_debug_store),
+	__ATTR_NULL,
+};
+
+struct bus_type subsys_bus_type = {
+	.name		= "msm_subsys",
+	.dev_attrs	= subsys_attrs,
+};
+EXPORT_SYMBOL(subsys_bus_type);
+
+static DEFINE_IDA(subsys_ida);
+
+static int enable_ramdumps;
+module_param(enable_ramdumps, int, S_IRUGO | S_IWUSR);
+
+static int enable_mini_ramdumps;
+module_param(enable_mini_ramdumps, int, S_IRUGO | S_IWUSR);
+
+struct workqueue_struct *ssr_wq;
+static struct class *char_class;
+
+static LIST_HEAD(restart_log_list);
+static LIST_HEAD(subsys_list);
+static LIST_HEAD(ssr_order_list);
+static DEFINE_MUTEX(soc_order_reg_lock);
+static DEFINE_MUTEX(restart_log_mutex);
+static DEFINE_MUTEX(subsys_list_lock);
+static DEFINE_MUTEX(char_device_lock);
+static DEFINE_MUTEX(ssr_order_mutex);
+
+static struct subsys_soc_restart_order *
+update_restart_order(struct subsys_device *dev)
+{
+	int i;
+	struct subsys_soc_restart_order *order;
+	struct device_node *device = dev->desc->dev->of_node;
+
+	mutex_lock(&soc_order_reg_lock);
+	list_for_each_entry(order, &ssr_order_list, list) {
+		for (i = 0; i < order->count; i++) {
+			if (order->device_ptrs[i] == device) {
+				order->subsys_ptrs[i] = dev;
+				goto found;
+			}
+		}
+	}
+	order = NULL;
+found:
+	mutex_unlock(&soc_order_reg_lock);
+
+	return order;
+}
+
+static int max_restarts;
+module_param(max_restarts, int, 0644);
+
+static long max_history_time = 3600;
+module_param(max_history_time, long, 0644);
+
+static void do_epoch_check(struct subsys_device *dev)
+{
+	int n = 0;
+	struct timeval *time_first = NULL, *curr_time;
+	struct restart_log *r_log, *temp;
+	static int max_restarts_check;
+	static long max_history_time_check;
+
+	mutex_lock(&restart_log_mutex);
+
+	max_restarts_check = max_restarts;
+	max_history_time_check = max_history_time;
+
+	/* Check if epoch checking is enabled */
+	if (!max_restarts_check)
+		goto out;
+
+	r_log = kmalloc(sizeof(struct restart_log), GFP_KERNEL);
+	if (!r_log)
+		goto out;
+	r_log->dev = dev;
+	do_gettimeofday(&r_log->time);
+	curr_time = &r_log->time;
+	INIT_LIST_HEAD(&r_log->list);
+
+	list_add_tail(&r_log->list, &restart_log_list);
+
+	list_for_each_entry_safe(r_log, temp, &restart_log_list, list) {
+
+		if ((curr_time->tv_sec - r_log->time.tv_sec) >
+				max_history_time_check) {
+
+			pr_debug("Deleted node with restart_time = %ld\n",
+					r_log->time.tv_sec);
+			list_del(&r_log->list);
+			kfree(r_log);
+			continue;
+		}
+		if (!n) {
+			time_first = &r_log->time;
+			pr_debug("Time_first: %ld\n", time_first->tv_sec);
+		}
+		n++;
+		pr_debug("Restart_time: %ld\n", r_log->time.tv_sec);
+	}
+
+	if (time_first && n >= max_restarts_check) {
+		if ((curr_time->tv_sec - time_first->tv_sec) <
+				max_history_time_check)
+			panic("Subsystems have crashed %d times in less than "
+				"%ld seconds!", max_restarts_check,
+				max_history_time_check);
+	}
+
+out:
+	mutex_unlock(&restart_log_mutex);
+}
+
+static int is_ramdump_enabled(struct subsys_device *dev)
+{
+	if (dev->desc->ramdump_disable_gpio)
+		return !dev->desc->ramdump_disable;
+
+	return enable_ramdumps;
+}
+
+static void send_sysmon_notif(struct subsys_device *dev)
+{
+	struct subsys_device *subsys;
+
+	mutex_lock(&subsys_list_lock);
+	list_for_each_entry(subsys, &subsys_list, list)
+		if ((subsys->notif_state > 0) && (subsys != dev))
+			sysmon_send_event(dev->desc, subsys->desc,
+						subsys->notif_state);
+	mutex_unlock(&subsys_list_lock);
+}
+
+static int for_each_subsys_device(struct subsys_device **list, unsigned count,
+		void *data, int (*fn)(struct subsys_device *, void *))
+{
+	int ret;
+	while (count--) {
+		struct subsys_device *dev = *list++;
+		if (!dev)
+			continue;
+		ret = fn(dev, data);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+static void notify_each_subsys_device(struct subsys_device **list,
+		unsigned count,
+		enum subsys_notif_type notif, void *data)
+{
+	struct subsys_device *subsys;
+
+	while (count--) {
+		struct subsys_device *dev = *list++;
+		struct notif_data notif_data;
+		struct platform_device *pdev;
+
+		if (!dev)
+			continue;
+
+		pdev = container_of(dev->desc->dev, struct platform_device,
+									dev);
+		dev->notif_state = notif;
+
+		mutex_lock(&subsys_list_lock);
+		list_for_each_entry(subsys, &subsys_list, list)
+			if (dev != subsys &&
+				subsys->track.state == SUBSYS_ONLINE)
+				sysmon_send_event(subsys->desc, dev->desc,
+								notif);
+		mutex_unlock(&subsys_list_lock);
+
+		if (notif == SUBSYS_AFTER_POWERUP &&
+				dev->track.state == SUBSYS_ONLINE)
+			send_sysmon_notif(dev);
+
+		notif_data.crashed = subsys_get_crash_status(dev);
+		notif_data.enable_ramdump = is_ramdump_enabled(dev);
+		notif_data.enable_mini_ramdumps = enable_mini_ramdumps;
+		notif_data.no_auth = dev->desc->no_auth;
+		notif_data.pdev = pdev;
+
+		trace_pil_notif("before_send_notif", notif, dev->desc->fw_name);
+		subsys_notif_queue_notification(dev->notify, notif,
+								&notif_data);
+		trace_pil_notif("after_send_notif", notif, dev->desc->fw_name);
+	}
+}
+
+static void enable_all_irqs(struct subsys_device *dev)
+{
+	if (dev->desc->err_ready_irq)
+		enable_irq(dev->desc->err_ready_irq);
+	if (dev->desc->wdog_bite_irq && dev->desc->wdog_bite_handler) {
+		enable_irq(dev->desc->wdog_bite_irq);
+		irq_set_irq_wake(dev->desc->wdog_bite_irq, 1);
+	}
+	if (dev->desc->err_fatal_irq && dev->desc->err_fatal_handler)
+		enable_irq(dev->desc->err_fatal_irq);
+	if (dev->desc->stop_ack_irq && dev->desc->stop_ack_handler)
+		enable_irq(dev->desc->stop_ack_irq);
+	if (dev->desc->generic_irq && dev->desc->generic_handler) {
+		enable_irq(dev->desc->generic_irq);
+		irq_set_irq_wake(dev->desc->generic_irq, 1);
+	}
+}
+
+static void disable_all_irqs(struct subsys_device *dev)
+{
+	if (dev->desc->err_ready_irq)
+		disable_irq(dev->desc->err_ready_irq);
+	if (dev->desc->wdog_bite_irq && dev->desc->wdog_bite_handler) {
+		disable_irq(dev->desc->wdog_bite_irq);
+		irq_set_irq_wake(dev->desc->wdog_bite_irq, 0);
+	}
+	if (dev->desc->err_fatal_irq && dev->desc->err_fatal_handler)
+		disable_irq(dev->desc->err_fatal_irq);
+	if (dev->desc->stop_ack_irq && dev->desc->stop_ack_handler)
+		disable_irq(dev->desc->stop_ack_irq);
+	if (dev->desc->generic_irq && dev->desc->generic_handler) {
+		disable_irq(dev->desc->generic_irq);
+		irq_set_irq_wake(dev->desc->generic_irq, 0);
+	}
+}
+
+static int wait_for_err_ready(struct subsys_device *subsys)
+{
+	int ret;
+
+	/*
+	 * If subsys is using generic_irq in which case err_ready_irq will be 0,
+	 * don't return.
+	 */
+	if ((subsys->desc->generic_irq <= 0 && !subsys->desc->err_ready_irq) ||
+				enable_debug == 1 || is_timeout_disabled())
+		return 0;
+
+	ret = wait_for_completion_timeout(&subsys->err_ready,
+					  msecs_to_jiffies(10000));
+	if (!ret) {
+		pr_err("[%s]: Error ready timed out\n", subsys->desc->name);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int subsystem_shutdown(struct subsys_device *dev, void *data)
+{
+	const char *name = dev->desc->name;
+	int ret;
+
+	pr_info("[%s:%d]: Shutting down %s\n",
+			current->comm, current->pid, name);
+	ret = dev->desc->shutdown(dev->desc, true);
+	if (ret < 0) {
+		if (!dev->desc->ignore_ssr_failure) {
+			panic("subsys-restart: [%s:%d]: Failed to shutdown %s!",
+				current->comm, current->pid, name);
+		} else {
+			pr_err("Shutdown failure on %s\n", name);
+			return ret;
+		}
+	}
+	dev->crash_count++;
+	subsys_set_state(dev, SUBSYS_OFFLINE);
+	disable_all_irqs(dev);
+
+	return 0;
+}
+
+static int subsystem_ramdump(struct subsys_device *dev, void *data)
+{
+	const char *name = dev->desc->name;
+
+	if (dev->desc->ramdump)
+		if (dev->desc->ramdump(is_ramdump_enabled(dev), dev->desc) < 0)
+			pr_warn("%s[%s:%d]: Ramdump failed.\n",
+				name, current->comm, current->pid);
+	dev->do_ramdump_on_put = false;
+	return 0;
+}
+
+static int subsystem_free_memory(struct subsys_device *dev, void *data)
+{
+	if (dev->desc->free_memory)
+		dev->desc->free_memory(dev->desc);
+	return 0;
+}
+
+static int subsystem_powerup(struct subsys_device *dev, void *data)
+{
+	const char *name = dev->desc->name;
+	int ret;
+
+	pr_info("[%s:%d]: Powering up %s\n", current->comm, current->pid, name);
+	init_completion(&dev->err_ready);
+
+	ret = dev->desc->powerup(dev->desc);
+	if (ret < 0) {
+		notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE,
+								NULL);
+		if (!dev->desc->ignore_ssr_failure) {
+			panic("[%s:%d]: Powerup error: %s!",
+				current->comm, current->pid, name);
+		} else {
+			pr_err("Powerup failure on %s\n", name);
+			return ret;
+		}
+	}
+	enable_all_irqs(dev);
+
+	ret = wait_for_err_ready(dev);
+	if (ret) {
+		notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE,
+								NULL);
+		if (!dev->desc->ignore_ssr_failure)
+			panic("[%s:%d]: Timed out waiting for error ready: %s!",
+				current->comm, current->pid, name);
+		else
+			return ret;
+	}
+	subsys_set_state(dev, SUBSYS_ONLINE);
+	subsys_set_crash_status(dev, CRASH_STATUS_NO_CRASH);
+
+	return 0;
+}
+
+static int __find_subsys(struct device *dev, void *data)
+{
+	struct subsys_device *subsys = to_subsys(dev);
+	return !strcmp(subsys->desc->name, data);
+}
+
+static struct subsys_device *find_subsys(const char *str)
+{
+	struct device *dev;
+
+	if (!str)
+		return NULL;
+
+	dev = bus_find_device(&subsys_bus_type, NULL, (void *)str,
+			__find_subsys);
+	return dev ? to_subsys(dev) : NULL;
+}
+
+static int subsys_start(struct subsys_device *subsys)
+{
+	int ret;
+
+	notify_each_subsys_device(&subsys, 1, SUBSYS_BEFORE_POWERUP,
+								NULL);
+
+	init_completion(&subsys->err_ready);
+	ret = subsys->desc->powerup(subsys->desc);
+	if (ret) {
+		notify_each_subsys_device(&subsys, 1, SUBSYS_POWERUP_FAILURE,
+									NULL);
+		return ret;
+	}
+	enable_all_irqs(subsys);
+
+	if (subsys->desc->is_not_loadable) {
+		subsys_set_state(subsys, SUBSYS_ONLINE);
+		return 0;
+	}
+
+	ret = wait_for_err_ready(subsys);
+	if (ret) {
+		/* pil-boot succeeded but we need to shutdown
+		 * the device because error ready timed out.
+		 */
+		notify_each_subsys_device(&subsys, 1, SUBSYS_POWERUP_FAILURE,
+									NULL);
+		subsys->desc->shutdown(subsys->desc, false);
+		disable_all_irqs(subsys);
+		return ret;
+	} else {
+		subsys_set_state(subsys, SUBSYS_ONLINE);
+	}
+
+	notify_each_subsys_device(&subsys, 1, SUBSYS_AFTER_POWERUP,
+								NULL);
+	return ret;
+}
+
+static void subsys_stop(struct subsys_device *subsys)
+{
+	const char *name = subsys->desc->name;
+
+	notify_each_subsys_device(&subsys, 1, SUBSYS_BEFORE_SHUTDOWN, NULL);
+	if (!of_property_read_bool(subsys->desc->dev->of_node,
+					"qcom,pil-force-shutdown")) {
+		subsys_set_state(subsys, SUBSYS_OFFLINING);
+		subsys->desc->sysmon_shutdown_ret =
+				sysmon_send_shutdown(subsys->desc);
+		if (subsys->desc->sysmon_shutdown_ret)
+			pr_debug("Graceful shutdown failed for %s\n", name);
+	}
+
+	subsys->desc->shutdown(subsys->desc, false);
+	subsys_set_state(subsys, SUBSYS_OFFLINE);
+	disable_all_irqs(subsys);
+	notify_each_subsys_device(&subsys, 1, SUBSYS_AFTER_SHUTDOWN, NULL);
+}
+
+int subsystem_set_fwname(const char *name, const char *fw_name)
+{
+	struct subsys_device *subsys;
+
+	if (!name)
+		return -EINVAL;
+
+	if (!fw_name)
+		return -EINVAL;
+
+	subsys = find_subsys(name);
+	if (!subsys)
+		return -EINVAL;
+
+	pr_debug("Changing subsys [%s] fw_name to [%s]\n", name, fw_name);
+	strlcpy(subsys->desc->fw_name, fw_name,
+		sizeof(subsys->desc->fw_name));
+
+	return 0;
+}
+EXPORT_SYMBOL(subsystem_set_fwname);
+
+int wait_for_shutdown_ack(struct subsys_desc *desc)
+{
+	int count;
+	struct subsys_device *dev;
+
+	if (!desc || !desc->shutdown_ack_gpio)
+		return 0;
+
+	dev = find_subsys(desc->name);
+	if (!dev)
+		return 0;
+
+	for (count = SHUTDOWN_ACK_MAX_LOOPS; count > 0; count--) {
+		if (gpio_get_value(desc->shutdown_ack_gpio))
+			return count;
+		else if (subsys_get_crash_status(dev))
+			break;
+		msleep(SHUTDOWN_ACK_DELAY_MS);
+	}
+
+	pr_err("[%s]: Timed out waiting for shutdown ack\n", desc->name);
+	return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(wait_for_shutdown_ack);
+
+void *__subsystem_get(const char *name, const char *fw_name)
+{
+	struct subsys_device *subsys;
+	struct subsys_device *subsys_d;
+	int ret;
+	void *retval;
+	struct subsys_tracking *track;
+
+	if (!name)
+		return NULL;
+
+	subsys = retval = find_subsys(name);
+	if (!subsys)
+		return ERR_PTR(-ENODEV);
+	if (!try_module_get(subsys->owner)) {
+		retval = ERR_PTR(-ENODEV);
+		goto err_module;
+	}
+
+	subsys_d = subsystem_get(subsys->desc->depends_on);
+	if (IS_ERR(subsys_d)) {
+		retval = subsys_d;
+		goto err_depends;
+	}
+
+	track = subsys_get_track(subsys);
+	mutex_lock(&track->lock);
+	if (!subsys->count) {
+		if (fw_name) {
+			pr_info("Changing subsys fw_name to %s\n", fw_name);
+			strlcpy(subsys->desc->fw_name, fw_name,
+				sizeof(subsys->desc->fw_name));
+		}
+		ret = subsys_start(subsys);
+		if (ret) {
+			retval = ERR_PTR(ret);
+			goto err_start;
+		}
+	}
+	subsys->count++;
+	mutex_unlock(&track->lock);
+	return retval;
+err_start:
+	mutex_unlock(&track->lock);
+	subsystem_put(subsys_d);
+err_depends:
+	module_put(subsys->owner);
+err_module:
+	put_device(&subsys->dev);
+	return retval;
+}
+
+/**
+ * subsytem_get() - Boot a subsystem
+ * @name: pointer to a string containing the name of the subsystem to boot
+ *
+ * This function returns a pointer if it succeeds. If an error occurs an
+ * ERR_PTR is returned.
+ *
+ * If this feature is disable, the value %NULL will be returned.
+ */
+void *subsystem_get(const char *name)
+{
+	return __subsystem_get(name, NULL);
+}
+EXPORT_SYMBOL(subsystem_get);
+
+/**
+ * subsystem_get_with_fwname() - Boot a subsystem using the firmware name passed in
+ * @name: pointer to a string containing the name of the subsystem to boot
+ * @fw_name: pointer to a string containing the subsystem firmware image name
+ *
+ * This function returns a pointer if it succeeds. If an error occurs an
+ * ERR_PTR is returned.
+ *
+ * If this feature is disable, the value %NULL will be returned.
+ */
+void *subsystem_get_with_fwname(const char *name, const char *fw_name)
+{
+	return __subsystem_get(name, fw_name);
+}
+EXPORT_SYMBOL(subsystem_get_with_fwname);
+
+/**
+ * subsystem_put() - Shutdown a subsystem
+ * @peripheral_handle: pointer from a previous call to subsystem_get()
+ *
+ * This doesn't imply that a subsystem is shutdown until all callers of
+ * subsystem_get() have called subsystem_put().
+ */
+void subsystem_put(void *subsystem)
+{
+	struct subsys_device *subsys_d, *subsys = subsystem;
+	struct subsys_tracking *track;
+
+	if (IS_ERR_OR_NULL(subsys))
+		return;
+
+	track = subsys_get_track(subsys);
+	mutex_lock(&track->lock);
+	if (WARN(!subsys->count, "%s: %s: Reference count mismatch\n",
+			subsys->desc->name, __func__))
+		goto err_out;
+	if (!--subsys->count) {
+		subsys_stop(subsys);
+		if (subsys->do_ramdump_on_put)
+			subsystem_ramdump(subsys, NULL);
+		subsystem_free_memory(subsys, NULL);
+	}
+	mutex_unlock(&track->lock);
+
+	subsys_d = find_subsys(subsys->desc->depends_on);
+	if (subsys_d) {
+		subsystem_put(subsys_d);
+		put_device(&subsys_d->dev);
+	}
+	module_put(subsys->owner);
+	put_device(&subsys->dev);
+	return;
+err_out:
+	mutex_unlock(&track->lock);
+}
+EXPORT_SYMBOL(subsystem_put);
+
+static void subsystem_restart_wq_func(struct work_struct *work)
+{
+	struct subsys_device *dev = container_of(work,
+						struct subsys_device, work);
+	struct subsys_device **list;
+	struct subsys_desc *desc = dev->desc;
+	struct subsys_soc_restart_order *order = dev->restart_order;
+	struct subsys_tracking *track;
+	unsigned count;
+	unsigned long flags;
+	int ret;
+
+	/*
+	 * It's OK to not take the registration lock at this point.
+	 * This is because the subsystem list inside the relevant
+	 * restart order is not being traversed.
+	 */
+	if (order) {
+		list = order->subsys_ptrs;
+		count = order->count;
+		track = &order->track;
+	} else {
+		list = &dev;
+		count = 1;
+		track = &dev->track;
+	}
+
+	/*
+	 * If a system reboot/shutdown is under way, ignore subsystem errors.
+	 * However, print a message so that we know that a subsystem behaved
+	 * unexpectedly here.
+	 */
+	if (system_state == SYSTEM_RESTART
+		|| system_state == SYSTEM_POWER_OFF) {
+		WARN(1, "SSR aborted: %s, system reboot/shutdown is under way\n",
+			desc->name);
+		return;
+	}
+
+	mutex_lock(&track->lock);
+	do_epoch_check(dev);
+
+	if (dev->track.state == SUBSYS_OFFLINE) {
+		mutex_unlock(&track->lock);
+		WARN(1, "SSR aborted: %s subsystem not online\n", desc->name);
+		return;
+	}
+
+	/*
+	 * It's necessary to take the registration lock because the subsystem
+	 * list in the SoC restart order will be traversed and it shouldn't be
+	 * changed until _this_ restart sequence completes.
+	 */
+	mutex_lock(&soc_order_reg_lock);
+
+	pr_debug("[%s:%d]: Starting restart sequence for %s\n",
+			current->comm, current->pid, desc->name);
+	notify_each_subsys_device(list, count, SUBSYS_BEFORE_SHUTDOWN, NULL);
+	ret = for_each_subsys_device(list, count, NULL, subsystem_shutdown);
+	if (ret)
+		goto err;
+	notify_each_subsys_device(list, count, SUBSYS_AFTER_SHUTDOWN, NULL);
+
+	notify_each_subsys_device(list, count, SUBSYS_RAMDUMP_NOTIFICATION,
+									NULL);
+
+	spin_lock_irqsave(&track->s_lock, flags);
+	track->p_state = SUBSYS_RESTARTING;
+	spin_unlock_irqrestore(&track->s_lock, flags);
+
+	/* Collect ram dumps for all subsystems in order here */
+	for_each_subsys_device(list, count, NULL, subsystem_ramdump);
+
+	for_each_subsys_device(list, count, NULL, subsystem_free_memory);
+
+	notify_each_subsys_device(list, count, SUBSYS_BEFORE_POWERUP, NULL);
+	ret = for_each_subsys_device(list, count, NULL, subsystem_powerup);
+	if (ret)
+		goto err;
+	notify_each_subsys_device(list, count, SUBSYS_AFTER_POWERUP, NULL);
+
+	pr_info("[%s:%d]: Restart sequence for %s completed.\n",
+			current->comm, current->pid, desc->name);
+
+err:
+	/* Reset subsys count */
+	if (ret)
+		dev->count = 0;
+
+	mutex_unlock(&soc_order_reg_lock);
+	mutex_unlock(&track->lock);
+
+	spin_lock_irqsave(&track->s_lock, flags);
+	track->p_state = SUBSYS_NORMAL;
+	__pm_relax(&dev->ssr_wlock);
+	spin_unlock_irqrestore(&track->s_lock, flags);
+}
+
+static void __subsystem_restart_dev(struct subsys_device *dev)
+{
+	struct subsys_desc *desc = dev->desc;
+	const char *name = dev->desc->name;
+	struct subsys_tracking *track;
+	unsigned long flags;
+
+	pr_debug("Restarting %s [level=%s]!\n", desc->name,
+			restart_levels[dev->restart_level]);
+
+	track = subsys_get_track(dev);
+	/*
+	 * Allow drivers to call subsystem_restart{_dev}() as many times as
+	 * they want up until the point where the subsystem is shutdown.
+	 */
+	spin_lock_irqsave(&track->s_lock, flags);
+	if (track->p_state != SUBSYS_CRASHED &&
+					dev->track.state == SUBSYS_ONLINE) {
+		if (track->p_state != SUBSYS_RESTARTING) {
+			track->p_state = SUBSYS_CRASHED;
+			__pm_stay_awake(&dev->ssr_wlock);
+			queue_work(ssr_wq, &dev->work);
+		} else {
+			panic("Subsystem %s crashed during SSR!", name);
+		}
+	} else
+		WARN(dev->track.state == SUBSYS_OFFLINE,
+			"SSR aborted: %s subsystem not online\n", name);
+	spin_unlock_irqrestore(&track->s_lock, flags);
+}
+
+static void device_restart_work_hdlr(struct work_struct *work)
+{
+	struct subsys_device *dev = container_of(work, struct subsys_device,
+							device_restart_work);
+
+	notify_each_subsys_device(&dev, 1, SUBSYS_SOC_RESET, NULL);
+	/*
+	 * Temporary workaround until ramdump userspace application calls
+	 * sync() and fclose() on attempting the dump.
+	 */
+	msleep(100);
+	panic("subsys-restart: Resetting the SoC - %s crashed.",
+							dev->desc->name);
+}
+
+int subsystem_restart_dev(struct subsys_device *dev)
+{
+	const char *name;
+
+	if (!get_device(&dev->dev))
+		return -ENODEV;
+
+	if (!try_module_get(dev->owner)) {
+		put_device(&dev->dev);
+		return -ENODEV;
+	}
+
+	name = dev->desc->name;
+
+	/*
+	 * If a system reboot/shutdown is underway, ignore subsystem errors.
+	 * However, print a message so that we know that a subsystem behaved
+	 * unexpectedly here.
+	 */
+	if (system_state == SYSTEM_RESTART
+		|| system_state == SYSTEM_POWER_OFF) {
+		pr_err("%s crashed during a system poweroff/shutdown.\n", name);
+		return -EBUSY;
+	}
+
+	pr_info("Restart sequence requested for %s, restart_level = %s.\n",
+		name, restart_levels[dev->restart_level]);
+
+	if (disable_restart_work == DISABLE_SSR) {
+		pr_warn("subsys-restart: Ignoring restart request for %s.\n",
+									name);
+		return 0;
+	}
+
+	switch (dev->restart_level) {
+
+	case RESET_SUBSYS_COUPLED:
+		__subsystem_restart_dev(dev);
+		break;
+	case RESET_SOC:
+		__pm_stay_awake(&dev->ssr_wlock);
+		schedule_work(&dev->device_restart_work);
+		return 0;
+	default:
+		panic("subsys-restart: Unknown restart level!\n");
+		break;
+	}
+	module_put(dev->owner);
+	put_device(&dev->dev);
+
+	return 0;
+}
+EXPORT_SYMBOL(subsystem_restart_dev);
+
+int subsystem_restart(const char *name)
+{
+	int ret;
+	struct subsys_device *dev = find_subsys(name);
+
+	if (!dev)
+		return -ENODEV;
+
+	ret = subsystem_restart_dev(dev);
+	put_device(&dev->dev);
+	return ret;
+}
+EXPORT_SYMBOL(subsystem_restart);
+
+int subsystem_crashed(const char *name)
+{
+	struct subsys_device *dev = find_subsys(name);
+	struct subsys_tracking *track;
+
+	if (!dev)
+		return -ENODEV;
+
+	if (!get_device(&dev->dev))
+		return -ENODEV;
+
+	track = subsys_get_track(dev);
+
+	mutex_lock(&track->lock);
+	dev->do_ramdump_on_put = true;
+	/*
+	 * TODO: Make this work with multiple consumers where one is calling
+	 * subsystem_restart() and another is calling this function. To do
+	 * so would require updating private state, etc.
+	 */
+	mutex_unlock(&track->lock);
+
+	put_device(&dev->dev);
+	return 0;
+}
+EXPORT_SYMBOL(subsystem_crashed);
+
+void subsys_set_crash_status(struct subsys_device *dev,
+				enum crash_status crashed)
+{
+	dev->crashed = crashed;
+}
+
+enum crash_status subsys_get_crash_status(struct subsys_device *dev)
+{
+	return dev->crashed;
+}
+
+void subsys_set_error(struct subsys_device *dev, const char *error_msg)
+{
+	snprintf(dev->error_buf, sizeof(dev->error_buf), "%s", error_msg);
+	sysfs_notify(&dev->dev.kobj, NULL, "error");
+}
+
+static struct subsys_device *desc_to_subsys(struct device *d)
+{
+	struct subsys_device *device, *subsys_dev = 0;
+
+	mutex_lock(&subsys_list_lock);
+	list_for_each_entry(device, &subsys_list, list)
+		if (device->desc->dev == d)
+			subsys_dev = device;
+	mutex_unlock(&subsys_list_lock);
+	return subsys_dev;
+}
+
+void notify_proxy_vote(struct device *device)
+{
+	struct subsys_device *dev = desc_to_subsys(device);
+
+	if (dev)
+		notify_each_subsys_device(&dev, 1, SUBSYS_PROXY_VOTE, NULL);
+}
+
+void notify_proxy_unvote(struct device *device)
+{
+	struct subsys_device *dev = desc_to_subsys(device);
+
+	if (dev)
+		notify_each_subsys_device(&dev, 1, SUBSYS_PROXY_UNVOTE, NULL);
+}
+
+static int subsys_device_open(struct inode *inode, struct file *file)
+{
+	struct subsys_device *device, *subsys_dev = 0;
+	void *retval;
+
+	mutex_lock(&subsys_list_lock);
+	list_for_each_entry(device, &subsys_list, list)
+		if (MINOR(device->dev_no) == iminor(inode))
+			subsys_dev = device;
+	mutex_unlock(&subsys_list_lock);
+
+	if (!subsys_dev)
+		return -EINVAL;
+
+	retval = subsystem_get_with_fwname(subsys_dev->desc->name,
+					subsys_dev->desc->fw_name);
+	if (IS_ERR(retval))
+		return PTR_ERR(retval);
+
+	return 0;
+}
+
+static int subsys_device_close(struct inode *inode, struct file *file)
+{
+	struct subsys_device *device, *subsys_dev = 0;
+
+	mutex_lock(&subsys_list_lock);
+	list_for_each_entry(device, &subsys_list, list)
+		if (MINOR(device->dev_no) == iminor(inode))
+			subsys_dev = device;
+	mutex_unlock(&subsys_list_lock);
+
+	if (!subsys_dev)
+		return -EINVAL;
+
+	subsystem_put(subsys_dev);
+	return 0;
+}
+
+static const struct file_operations subsys_device_fops = {
+		.owner = THIS_MODULE,
+		.open = subsys_device_open,
+		.release = subsys_device_close,
+};
+
+static void subsys_device_release(struct device *dev)
+{
+	struct subsys_device *subsys = to_subsys(dev);
+
+	wakeup_source_trash(&subsys->ssr_wlock);
+	mutex_destroy(&subsys->track.lock);
+	ida_simple_remove(&subsys_ida, subsys->id);
+	kfree(subsys);
+}
+static irqreturn_t subsys_err_ready_intr_handler(int irq, void *subsys)
+{
+	struct subsys_device *subsys_dev = subsys;
+	dev_info(subsys_dev->desc->dev,
+		"Subsystem error monitoring/handling services are up\n");
+
+	if (subsys_dev->desc->is_not_loadable)
+		return IRQ_HANDLED;
+
+	complete(&subsys_dev->err_ready);
+	return IRQ_HANDLED;
+}
+
+static int subsys_char_device_add(struct subsys_device *subsys_dev)
+{
+	int ret = 0;
+	static int major, minor;
+	dev_t dev_no;
+
+	mutex_lock(&char_device_lock);
+	if (!major) {
+		ret = alloc_chrdev_region(&dev_no, 0, 4, "subsys");
+		if (ret < 0) {
+			pr_err("Failed to alloc subsys_dev region, err %d\n",
+									ret);
+			goto fail;
+		}
+		major = MAJOR(dev_no);
+		minor = MINOR(dev_no);
+	} else
+		dev_no = MKDEV(major, minor);
+
+	if (!device_create(char_class, subsys_dev->desc->dev, dev_no,
+			NULL, "subsys_%s", subsys_dev->desc->name)) {
+		pr_err("Failed to create subsys_%s device\n",
+						subsys_dev->desc->name);
+		goto fail_unregister_cdev_region;
+	}
+
+	cdev_init(&subsys_dev->char_dev, &subsys_device_fops);
+	subsys_dev->char_dev.owner = THIS_MODULE;
+	ret = cdev_add(&subsys_dev->char_dev, dev_no, 1);
+	if (ret < 0)
+		goto fail_destroy_device;
+
+	subsys_dev->dev_no = dev_no;
+	minor++;
+	mutex_unlock(&char_device_lock);
+
+	return 0;
+
+fail_destroy_device:
+	device_destroy(char_class, dev_no);
+fail_unregister_cdev_region:
+	unregister_chrdev_region(dev_no, 1);
+fail:
+	mutex_unlock(&char_device_lock);
+	return ret;
+}
+
+static void subsys_char_device_remove(struct subsys_device *subsys_dev)
+{
+	cdev_del(&subsys_dev->char_dev);
+	device_destroy(char_class, subsys_dev->dev_no);
+	unregister_chrdev_region(subsys_dev->dev_no, 1);
+}
+
+static void subsys_remove_restart_order(struct device_node *device)
+{
+	struct subsys_soc_restart_order *order;
+	int i;
+
+	mutex_lock(&ssr_order_mutex);
+	list_for_each_entry(order, &ssr_order_list, list)
+		for (i = 0; i < order->count; i++)
+			if (order->device_ptrs[i] == device)
+				order->subsys_ptrs[i] = NULL;
+	mutex_unlock(&ssr_order_mutex);
+}
+
+static struct subsys_soc_restart_order *ssr_parse_restart_orders(struct
+							subsys_desc * desc)
+{
+	int i, j, count, num = 0;
+	struct subsys_soc_restart_order *order, *tmp;
+	struct device *dev = desc->dev;
+	struct device_node *ssr_node;
+	uint32_t len;
+
+	if (!of_get_property(dev->of_node, "qcom,restart-group", &len))
+		return NULL;
+
+	count = len/sizeof(uint32_t);
+
+	order = devm_kzalloc(dev, sizeof(*order), GFP_KERNEL);
+	if (!order)
+		return ERR_PTR(-ENOMEM);
+
+	order->subsys_ptrs = devm_kzalloc(dev,
+				count * sizeof(struct subsys_device *),
+				GFP_KERNEL);
+	if (!order->subsys_ptrs)
+		return ERR_PTR(-ENOMEM);
+
+	order->device_ptrs = devm_kzalloc(dev,
+				count * sizeof(struct device_node *),
+				GFP_KERNEL);
+	if (!order->device_ptrs)
+		return ERR_PTR(-ENOMEM);
+
+	for (i = 0; i < count; i++) {
+		ssr_node = of_parse_phandle(dev->of_node,
+						"qcom,restart-group", i);
+		if (!ssr_node)
+			return ERR_PTR(-ENXIO);
+		of_node_put(ssr_node);
+		pr_info("%s device has been added to %s's restart group\n",
+						ssr_node->name, desc->name);
+		order->device_ptrs[i] = ssr_node;
+	}
+
+	/*
+	 * Check for similar restart groups. If found, return
+	 * without adding the new group to the ssr_order_list.
+	 */
+	mutex_lock(&ssr_order_mutex);
+	list_for_each_entry(tmp, &ssr_order_list, list) {
+		for (i = 0; i < count; i++) {
+			for (j = 0; j < count; j++) {
+				if (order->device_ptrs[j] !=
+					tmp->device_ptrs[i])
+					continue;
+				else
+					num++;
+			}
+		}
+
+		if (num == count && tmp->count == count)
+			goto err;
+		else if (num) {
+			tmp = ERR_PTR(-EINVAL);
+			goto err;
+		}
+	}
+
+	order->count = count;
+	mutex_init(&order->track.lock);
+	spin_lock_init(&order->track.s_lock);
+
+	INIT_LIST_HEAD(&order->list);
+	list_add_tail(&order->list, &ssr_order_list);
+	mutex_unlock(&ssr_order_mutex);
+
+	return order;
+err:
+	mutex_unlock(&ssr_order_mutex);
+	return tmp;
+}
+
+static int __get_gpio(struct subsys_desc *desc, const char *prop,
+		int *gpio)
+{
+	struct device_node *dnode = desc->dev->of_node;
+	int ret = -ENOENT;
+
+	if (of_find_property(dnode, prop, NULL)) {
+		*gpio = of_get_named_gpio(dnode, prop, 0);
+		ret = *gpio < 0 ? *gpio : 0;
+	}
+
+	return ret;
+}
+
+static int __get_irq(struct subsys_desc *desc, const char *prop,
+		unsigned int *irq, int *gpio)
+{
+	int ret, gpiol, irql;
+
+	ret = __get_gpio(desc, prop, &gpiol);
+	if (ret)
+		return ret;
+
+	irql = gpio_to_irq(gpiol);
+
+	if (irql == -ENOENT)
+		irql = -ENXIO;
+
+	if (irql < 0) {
+		pr_err("[%s]: Error getting IRQ \"%s\"\n", desc->name,
+				prop);
+		return irql;
+	} else {
+		if (gpio)
+			*gpio = gpiol;
+		*irq = irql;
+	}
+
+	return 0;
+}
+
+static int subsys_parse_devicetree(struct subsys_desc *desc)
+{
+	struct subsys_soc_restart_order *order;
+	int ret;
+
+	struct platform_device *pdev = container_of(desc->dev,
+					struct platform_device, dev);
+
+	ret = __get_irq(desc, "qcom,gpio-err-fatal", &desc->err_fatal_irq,
+							&desc->err_fatal_gpio);
+	if (ret && ret != -ENOENT)
+		return ret;
+
+	ret = __get_irq(desc, "qcom,gpio-err-ready", &desc->err_ready_irq,
+							NULL);
+	if (ret && ret != -ENOENT)
+		return ret;
+
+	ret = __get_irq(desc, "qcom,gpio-stop-ack", &desc->stop_ack_irq, NULL);
+	if (ret && ret != -ENOENT)
+		return ret;
+
+	ret = __get_gpio(desc, "qcom,gpio-force-stop", &desc->force_stop_gpio);
+	if (ret && ret != -ENOENT)
+		return ret;
+
+	ret = __get_gpio(desc, "qcom,gpio-ramdump-disable",
+			&desc->ramdump_disable_gpio);
+	if (ret && ret != -ENOENT)
+		return ret;
+
+	ret = __get_gpio(desc, "qcom,gpio-shutdown-ack",
+			&desc->shutdown_ack_gpio);
+	if (ret && ret != -ENOENT)
+		return ret;
+
+	ret = platform_get_irq(pdev, 0);
+	if (ret > 0)
+		desc->wdog_bite_irq = ret;
+
+	if (of_property_read_bool(pdev->dev.of_node,
+					"qcom,pil-generic-irq-handler")) {
+		ret = platform_get_irq(pdev, 0);
+		if (ret > 0)
+			desc->generic_irq = ret;
+	}
+
+	desc->ignore_ssr_failure = of_property_read_bool(pdev->dev.of_node,
+						"qcom,ignore-ssr-failure");
+
+	order = ssr_parse_restart_orders(desc);
+	if (IS_ERR(order)) {
+		pr_err("Could not initialize SSR restart order, err = %ld\n",
+							PTR_ERR(order));
+		return PTR_ERR(order);
+	}
+
+	return 0;
+}
+
+static int subsys_setup_irqs(struct subsys_device *subsys)
+{
+	struct subsys_desc *desc = subsys->desc;
+	int ret;
+
+	if (desc->err_fatal_irq && desc->err_fatal_handler) {
+		ret = devm_request_irq(desc->dev, desc->err_fatal_irq,
+				desc->err_fatal_handler,
+				IRQF_TRIGGER_RISING, desc->name, desc);
+		if (ret < 0) {
+			dev_err(desc->dev, "[%s]: Unable to register error fatal IRQ handler!: %d\n",
+				desc->name, ret);
+			return ret;
+		}
+		disable_irq(desc->err_fatal_irq);
+	}
+
+	if (desc->stop_ack_irq && desc->stop_ack_handler) {
+		ret = devm_request_irq(desc->dev, desc->stop_ack_irq,
+			desc->stop_ack_handler,
+			IRQF_TRIGGER_RISING, desc->name, desc);
+		if (ret < 0) {
+			dev_err(desc->dev, "[%s]: Unable to register stop ack handler!: %d\n",
+				desc->name, ret);
+			return ret;
+		}
+		disable_irq(desc->stop_ack_irq);
+	}
+
+	if (desc->wdog_bite_irq && desc->wdog_bite_handler) {
+		ret = devm_request_irq(desc->dev, desc->wdog_bite_irq,
+			desc->wdog_bite_handler,
+			IRQF_TRIGGER_RISING, desc->name, desc);
+		if (ret < 0) {
+			dev_err(desc->dev, "[%s]: Unable to register wdog bite handler!: %d\n",
+				desc->name, ret);
+			return ret;
+		}
+		disable_irq(desc->wdog_bite_irq);
+	}
+
+	if (desc->generic_irq && desc->generic_handler) {
+		ret = devm_request_irq(desc->dev, desc->generic_irq,
+			desc->generic_handler,
+			IRQF_TRIGGER_HIGH, desc->name, desc);
+		if (ret < 0) {
+			dev_err(desc->dev, "[%s]: Unable to register generic irq handler!: %d\n",
+				desc->name, ret);
+			return ret;
+		}
+		disable_irq(desc->generic_irq);
+	}
+
+	if (desc->err_ready_irq) {
+		ret = devm_request_irq(desc->dev,
+					desc->err_ready_irq,
+					subsys_err_ready_intr_handler,
+					IRQF_TRIGGER_RISING,
+					"error_ready_interrupt", subsys);
+		if (ret < 0) {
+			dev_err(desc->dev,
+				"[%s]: Unable to register err ready handler\n",
+				desc->name);
+			return ret;
+		}
+		disable_irq(desc->err_ready_irq);
+	}
+
+	return 0;
+}
+
+static void subsys_free_irqs(struct subsys_device *subsys)
+{
+	struct subsys_desc *desc = subsys->desc;
+
+	if (desc->err_fatal_irq && desc->err_fatal_handler)
+		devm_free_irq(desc->dev, desc->err_fatal_irq, desc);
+	if (desc->stop_ack_irq && desc->stop_ack_handler)
+		devm_free_irq(desc->dev, desc->stop_ack_irq, desc);
+	if (desc->wdog_bite_irq && desc->wdog_bite_handler)
+		devm_free_irq(desc->dev, desc->wdog_bite_irq, desc);
+	if (desc->err_ready_irq)
+		devm_free_irq(desc->dev, desc->err_ready_irq, subsys);
+}
+
+struct subsys_device *subsys_register(struct subsys_desc *desc)
+{
+	struct subsys_device *subsys;
+	struct device_node *ofnode = desc->dev->of_node;
+	int ret;
+
+	subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
+	if (!subsys)
+		return ERR_PTR(-ENOMEM);
+
+	subsys->desc = desc;
+	subsys->owner = desc->owner;
+	subsys->dev.parent = desc->dev;
+	subsys->dev.bus = &subsys_bus_type;
+	subsys->dev.release = subsys_device_release;
+	subsys->notif_state = -1;
+	subsys->desc->sysmon_pid = -1;
+	strlcpy(subsys->desc->fw_name, desc->name,
+			sizeof(subsys->desc->fw_name));
+
+	subsys->notify = subsys_notif_add_subsys(desc->name);
+
+	snprintf(subsys->wlname, sizeof(subsys->wlname), "ssr(%s)", desc->name);
+	wakeup_source_init(&subsys->ssr_wlock, subsys->wlname);
+	INIT_WORK(&subsys->work, subsystem_restart_wq_func);
+	INIT_WORK(&subsys->device_restart_work, device_restart_work_hdlr);
+	spin_lock_init(&subsys->track.s_lock);
+
+	subsys->id = ida_simple_get(&subsys_ida, 0, 0, GFP_KERNEL);
+	if (subsys->id < 0) {
+		wakeup_source_trash(&subsys->ssr_wlock);
+		ret = subsys->id;
+		kfree(subsys);
+		return ERR_PTR(ret);
+	}
+
+	dev_set_name(&subsys->dev, "subsys%d", subsys->id);
+
+	mutex_init(&subsys->track.lock);
+
+	ret = device_register(&subsys->dev);
+	if (ret) {
+		put_device(&subsys->dev);
+		return ERR_PTR(ret);
+	}
+
+	ret = subsys_char_device_add(subsys);
+	if (ret) {
+		goto err_register;
+	}
+
+	if (ofnode) {
+		ret = subsys_parse_devicetree(desc);
+		if (ret)
+			goto err_register;
+
+		subsys->restart_order = update_restart_order(subsys);
+
+		ret = subsys_setup_irqs(subsys);
+		if (ret < 0)
+			goto err_setup_irqs;
+
+		if (of_property_read_u32(ofnode, "qcom,ssctl-instance-id",
+					&desc->ssctl_instance_id))
+			pr_debug("Reading instance-id for %s failed\n",
+								desc->name);
+
+		if (of_property_read_u32(ofnode, "qcom,sysmon-id",
+					&subsys->desc->sysmon_pid))
+			pr_debug("Reading sysmon-id for %s failed\n",
+								desc->name);
+
+		subsys->desc->edge = of_get_property(ofnode, "qcom,edge",
+									NULL);
+		if (!subsys->desc->edge)
+			pr_debug("Reading qcom,edge for %s failed\n",
+								desc->name);
+	}
+
+	ret = sysmon_notifier_register(desc);
+	if (ret < 0)
+		goto err_sysmon_notifier;
+
+	if (subsys->desc->edge) {
+		ret = sysmon_glink_register(desc);
+		if (ret < 0)
+			goto err_sysmon_glink_register;
+	}
+	mutex_lock(&subsys_list_lock);
+	INIT_LIST_HEAD(&subsys->list);
+	list_add_tail(&subsys->list, &subsys_list);
+	mutex_unlock(&subsys_list_lock);
+
+	return subsys;
+err_sysmon_glink_register:
+	sysmon_notifier_unregister(subsys->desc);
+err_sysmon_notifier:
+	if (ofnode)
+		subsys_free_irqs(subsys);
+err_setup_irqs:
+	if (ofnode)
+		subsys_remove_restart_order(ofnode);
+err_register:
+	device_unregister(&subsys->dev);
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(subsys_register);
+
+void subsys_unregister(struct subsys_device *subsys)
+{
+	struct subsys_device *subsys_dev, *tmp;
+	struct device_node *device = subsys->desc->dev->of_node;
+
+	if (IS_ERR_OR_NULL(subsys))
+		return;
+
+	if (get_device(&subsys->dev)) {
+		mutex_lock(&subsys_list_lock);
+		list_for_each_entry_safe(subsys_dev, tmp, &subsys_list, list)
+			if (subsys_dev == subsys)
+				list_del(&subsys->list);
+		mutex_unlock(&subsys_list_lock);
+
+		if (device) {
+			subsys_free_irqs(subsys);
+			subsys_remove_restart_order(device);
+		}
+		mutex_lock(&subsys->track.lock);
+		WARN_ON(subsys->count);
+		device_unregister(&subsys->dev);
+		mutex_unlock(&subsys->track.lock);
+		subsys_char_device_remove(subsys);
+		sysmon_notifier_unregister(subsys->desc);
+		if (subsys->desc->edge)
+			sysmon_glink_unregister(subsys->desc);
+		put_device(&subsys->dev);
+	}
+}
+EXPORT_SYMBOL(subsys_unregister);
+
+static int subsys_panic(struct device *dev, void *data)
+{
+	struct subsys_device *subsys = to_subsys(dev);
+
+	if (subsys->desc->crash_shutdown)
+		subsys->desc->crash_shutdown(subsys->desc);
+	return 0;
+}
+
+static int ssr_panic_handler(struct notifier_block *this,
+				unsigned long event, void *ptr)
+{
+	bus_for_each_dev(&subsys_bus_type, NULL, NULL, subsys_panic);
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block panic_nb = {
+	.notifier_call  = ssr_panic_handler,
+};
+
+static int __init subsys_restart_init(void)
+{
+	int ret;
+
+	ssr_wq = alloc_workqueue("ssr_wq", WQ_CPU_INTENSIVE, 0);
+	BUG_ON(!ssr_wq);
+
+	ret = bus_register(&subsys_bus_type);
+	if (ret)
+		goto err_bus;
+
+	char_class = class_create(THIS_MODULE, "subsys");
+	if (IS_ERR(char_class)) {
+		ret = -ENOMEM;
+		pr_err("Failed to create subsys_dev class\n");
+		goto err_class;
+	}
+
+	ret = atomic_notifier_chain_register(&panic_notifier_list,
+			&panic_nb);
+	if (ret)
+		goto err_soc;
+
+	return 0;
+
+err_soc:
+	class_destroy(char_class);
+err_class:
+	bus_unregister(&subsys_bus_type);
+err_bus:
+	destroy_workqueue(ssr_wq);
+	return ret;
+}
+arch_initcall(subsys_restart_init);
+
+MODULE_DESCRIPTION("Subsystem Restart Driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/sysmon-glink.c	2019-01-22 16:16:26.679275167 +0100
@@ -0,0 +1,480 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/workqueue.h>
+
+#include <soc/qcom/sysmon.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/glink.h>
+
+#define TX_BUF_SIZE	50
+#define RX_BUF_SIZE	500
+#define TIMEOUT_MS	500
+
+/**
+ * struct sysmon_subsys - Sysmon info structure for subsystem
+ * name:	subsys_desc name
+ * edge:	name of the G-Link edge.
+ * handle:	glink_ssr channel used for this subsystem.
+ * rx_buf:	Buffer used to store received message.
+ * chan_open:	Set when GLINK_CONNECTED. Reset otherwise.
+ * event:	Last stored glink state event.
+ * glink_handle:	Notifier handle reference.
+ * resp_ready:	Completion struct for event response.
+ */
+struct sysmon_subsys {
+	const char		*name;
+	const char		*edge;
+	void			*handle;
+	struct glink_link_info	*link_info;
+	char			rx_buf[RX_BUF_SIZE];
+	bool			chan_open;
+	unsigned		event;
+	void			*glink_handle;
+	int			intent_count;
+	struct completion	resp_ready;
+	struct mutex		lock;
+	struct workqueue_struct *glink_event_wq;
+	struct work_struct	work;
+	struct list_head	list;
+};
+
+static const char *notif_name[SUBSYS_NOTIF_TYPE_COUNT] = {
+	[SUBSYS_BEFORE_SHUTDOWN] = "before_shutdown",
+	[SUBSYS_AFTER_SHUTDOWN]  = "after_shutdown",
+	[SUBSYS_BEFORE_POWERUP]  = "before_powerup",
+	[SUBSYS_AFTER_POWERUP]   = "after_powerup",
+};
+
+static LIST_HEAD(sysmon_glink_list);
+static DEFINE_MUTEX(sysmon_glink_list_lock);
+
+static struct sysmon_subsys *_find_subsys(struct subsys_desc *desc)
+{
+	struct sysmon_subsys *ss;
+
+	if (desc == NULL)
+		return NULL;
+
+	mutex_lock(&sysmon_glink_list_lock);
+	list_for_each_entry(ss, &sysmon_glink_list, list) {
+		if (!strcmp(ss->name, desc->name)) {
+			mutex_unlock(&sysmon_glink_list_lock);
+			return ss;
+		}
+	}
+	mutex_unlock(&sysmon_glink_list_lock);
+
+	return NULL;
+}
+
+static int sysmon_send_msg(struct sysmon_subsys *ss, const char *tx_buf,
+			   size_t len)
+{
+	int ret;
+	void *handle;
+
+	if (!ss->chan_open)
+		return -ENODEV;
+
+	if (!ss->handle)
+		return -EINVAL;
+
+	init_completion(&ss->resp_ready);
+	handle = ss->handle;
+
+	/* Register an intent to receive data */
+	if (!ss->intent_count) {
+		ret = glink_queue_rx_intent(handle, (void *)ss,
+						sizeof(ss->rx_buf));
+		if (ret) {
+			pr_err("Failed to register receive intent\n");
+			return ret;
+		}
+		ss->intent_count++;
+	}
+
+	pr_debug("Sending sysmon message: %s\n", tx_buf);
+	ret = glink_tx(handle, (void *)ss, (void *)tx_buf, len,
+						GLINK_TX_REQ_INTENT);
+	if (ret) {
+		pr_err("Failed to send sysmon message!\n");
+		return ret;
+	}
+
+	ret = wait_for_completion_timeout(&ss->resp_ready,
+				  msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("Timed out waiting for response\n");
+		return -ETIMEDOUT;
+	}
+	pr_debug("Received response: %s\n", ss->rx_buf);
+	return ret;
+}
+
+/**
+ * sysmon_send_event_no_qmi() - Notify a subsystem of another's state change
+ * @dest_desc:	Subsystem descriptor of the subsystem the notification
+ * should be sent to
+ * @event_desc:	Subsystem descriptor of the subsystem that generated the
+ * notification
+ * @notif:	ID of the notification type (ex. SUBSYS_BEFORE_SHUTDOWN)
+ *
+ * Returns 0 for success, -EINVAL for invalid destination or notification IDs,
+ * -ENODEV if the transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -ENOSYS if the destination subsystem
+ * responds, but with something other than an acknowledgement.
+ *
+ * If CONFIG_MSM_SYSMON_GLINK_COMM is not defined, always return success (0).
+ */
+int sysmon_send_event_no_qmi(struct subsys_desc *dest_desc,
+			struct subsys_desc *event_desc,
+			enum subsys_notif_type notif)
+{
+
+	char tx_buf[TX_BUF_SIZE];
+	int ret;
+	struct sysmon_subsys *ss = NULL;
+
+	ss = _find_subsys(dest_desc);
+	if (ss == NULL)
+		return -EINVAL;
+
+	if (event_desc == NULL || notif < 0 || notif >= SUBSYS_NOTIF_TYPE_COUNT
+			|| notif_name[notif] == NULL)
+		return -EINVAL;
+
+	snprintf(tx_buf, sizeof(tx_buf), "ssr:%s:%s", event_desc->name,
+		 notif_name[notif]);
+
+	mutex_lock(&ss->lock);
+	ret = sysmon_send_msg(ss, tx_buf, strlen(tx_buf));
+	if (ret < 0) {
+		mutex_unlock(&ss->lock);
+		pr_err("Message sending failed %d\n", ret);
+		goto out;
+	}
+
+	if (strcmp(ss->rx_buf, "ssr:ack")) {
+		mutex_unlock(&ss->lock);
+		pr_debug("Unexpected response %s\n", ss->rx_buf);
+		ret = -ENOSYS;
+		goto out;
+	}
+	mutex_unlock(&ss->lock);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(sysmon_send_event_no_qmi);
+
+/**
+ * sysmon_send_shutdown_no_qmi() - send shutdown command to a subsystem.
+ * @dest_desc:	Subsystem descriptor of the subsystem to send to
+ *
+ * Returns 0 for success, -EINVAL for an invalid destination, -ENODEV if
+ * the SMD transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -ENOSYS if the destination subsystem
+ * responds with something unexpected.
+ *
+ * If CONFIG_MSM_SYSMON_GLINK_COMM is not defined, always return success (0).
+ */
+int sysmon_send_shutdown_no_qmi(struct subsys_desc *dest_desc)
+{
+	struct sysmon_subsys *ss = NULL;
+	const char tx_buf[] = "system:shutdown";
+	const char expect[] = "system:ack";
+	int ret;
+
+	ss = _find_subsys(dest_desc);
+	if (ss == NULL)
+		return -EINVAL;
+
+	mutex_lock(&ss->lock);
+	ret = sysmon_send_msg(ss, tx_buf, sizeof(tx_buf));
+	if (ret < 0) {
+		mutex_unlock(&ss->lock);
+		pr_err("Message sending failed %d\n", ret);
+		goto out;
+	}
+
+	if (strcmp(ss->rx_buf, expect)) {
+		mutex_unlock(&ss->lock);
+		pr_err("Unexpected response %s\n", ss->rx_buf);
+		ret = -ENOSYS;
+		goto out;
+	}
+	mutex_unlock(&ss->lock);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(sysmon_send_shutdown_no_qmi);
+
+/**
+ * sysmon_get_reason_no_qmi() - Retrieve failure reason from a subsystem.
+ * @dest_desc:	Subsystem descriptor of the subsystem to query
+ * @buf:	Caller-allocated buffer for the returned NULL-terminated reason
+ * @len:	Length of @buf
+ *
+ * Returns 0 for success, -EINVAL for an invalid destination, -ENODEV if
+ * the SMD transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -ENOSYS if the destination subsystem
+ * responds with something unexpected.
+ *
+ * If CONFIG_MSM_SYSMON_GLINK_COMM is not defined, always return success (0).
+ */
+int sysmon_get_reason_no_qmi(struct subsys_desc *dest_desc,
+				char *buf, size_t len)
+{
+	struct sysmon_subsys *ss = NULL;
+	const char tx_buf[] = "ssr:retrieve:sfr";
+	const char expect[] = "ssr:return:";
+	size_t prefix_len = ARRAY_SIZE(expect) - 1;
+	int ret;
+
+	ss = _find_subsys(dest_desc);
+	if (ss == NULL || buf == NULL || len == 0)
+		return -EINVAL;
+
+	mutex_lock(&ss->lock);
+	ret = sysmon_send_msg(ss, tx_buf, sizeof(tx_buf));
+	if (ret < 0) {
+		mutex_unlock(&ss->lock);
+		pr_err("Message sending failed %d\n", ret);
+		goto out;
+	}
+
+	if (strncmp(ss->rx_buf, expect, prefix_len)) {
+		mutex_unlock(&ss->lock);
+		pr_err("Unexpected response %s\n", ss->rx_buf);
+		ret = -ENOSYS;
+		goto out;
+	}
+	strlcpy(buf, ss->rx_buf + prefix_len, len);
+	mutex_unlock(&ss->lock);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(sysmon_get_reason_no_qmi);
+
+static void glink_notify_rx(void *handle, const void *priv,
+		const void *pkt_priv, const void *ptr, size_t size)
+{
+	struct sysmon_subsys *ss = (struct sysmon_subsys *)priv;
+
+	if (!ss) {
+		pr_err("sysmon_subsys mapping failed\n");
+		return;
+	}
+
+	memset(ss->rx_buf, 0, sizeof(ss->rx_buf));
+	ss->intent_count--;
+	if (sizeof(ss->rx_buf) > size)
+		strlcpy(ss->rx_buf, ptr, size);
+	else
+		pr_warn("Invalid recv message size\n");
+	glink_rx_done(ss->handle, ptr, false);
+	complete(&ss->resp_ready);
+}
+
+static void glink_notify_tx_done(void *handle, const void *priv,
+		const void *pkt_priv, const void *ptr)
+{
+	struct sysmon_subsys *cb_data = (struct sysmon_subsys *)priv;
+
+	if (!cb_data)
+		pr_err("sysmon_subsys mapping failed\n");
+	else
+		pr_debug("tx_done notification!\n");
+}
+
+static void glink_notify_state(void *handle, const void *priv, unsigned event)
+{
+	struct sysmon_subsys *ss = (struct sysmon_subsys *)priv;
+
+	if (!ss) {
+		pr_err("sysmon_subsys mapping failed\n");
+		return;
+	}
+
+	mutex_lock(&ss->lock);
+	ss->event = event;
+	switch (event) {
+	case GLINK_CONNECTED:
+		ss->chan_open = true;
+		break;
+	case GLINK_REMOTE_DISCONNECTED:
+		ss->chan_open = false;
+		break;
+	default:
+		break;
+	}
+	mutex_unlock(&ss->lock);
+}
+
+static void glink_state_up_work_hdlr(struct work_struct *work)
+{
+	struct glink_open_config open_cfg;
+	struct sysmon_subsys *ss = container_of(work, struct sysmon_subsys,
+							work);
+	void *handle = NULL;
+
+	if (!ss) {
+		pr_err("Invalid sysmon_subsys struct parameter\n");
+		return;
+	}
+
+	memset(&open_cfg, 0, sizeof(struct glink_open_config));
+	open_cfg.priv = (void *)ss;
+	open_cfg.notify_rx = glink_notify_rx;
+	open_cfg.notify_tx_done = glink_notify_tx_done;
+	open_cfg.notify_state = glink_notify_state;
+	open_cfg.edge = ss->edge;
+	open_cfg.transport = "smd_trans";
+	open_cfg.name = "sys_mon";
+
+	handle = glink_open(&open_cfg);
+	if (IS_ERR_OR_NULL(handle)) {
+		pr_err("%s: %s: unable to open channel\n",
+					open_cfg.edge, open_cfg.name);
+		return;
+	}
+	ss->handle = handle;
+}
+
+static void glink_state_down_work_hdlr(struct work_struct *work)
+{
+	struct sysmon_subsys *ss = container_of(work, struct sysmon_subsys,
+							work);
+
+	if (ss->handle)
+		glink_close(ss->handle);
+	ss->handle = NULL;
+}
+
+static void sysmon_glink_cb(struct glink_link_state_cb_info *cb_info,
+					void *priv)
+{
+	struct sysmon_subsys *ss = (struct sysmon_subsys *)priv;
+
+	if (!cb_info || !ss) {
+		pr_err("Invalid parameters\n");
+		return;
+	}
+
+	mutex_lock(&ss->lock);
+	switch (cb_info->link_state) {
+	case GLINK_LINK_STATE_UP:
+		pr_debug("LINK UP %s\n", ss->edge);
+		INIT_WORK(&ss->work, glink_state_up_work_hdlr);
+		queue_work(ss->glink_event_wq, &ss->work);
+		break;
+	case GLINK_LINK_STATE_DOWN:
+		pr_debug("LINK DOWN %s\n", ss->edge);
+		INIT_WORK(&ss->work, glink_state_down_work_hdlr);
+		queue_work(ss->glink_event_wq, &ss->work);
+		break;
+	default:
+		pr_warn("Invalid event notification\n");
+		break;
+	}
+	mutex_unlock(&ss->lock);
+}
+
+int sysmon_glink_register(struct subsys_desc *desc)
+{
+	struct sysmon_subsys *ss;
+	struct glink_link_info *link_info;
+	int ret;
+
+	if (!desc)
+		return -EINVAL;
+
+	ss = kzalloc(sizeof(*ss), GFP_KERNEL);
+	if (!ss)
+		return -ENOMEM;
+
+	link_info = kzalloc(sizeof(struct glink_link_info), GFP_KERNEL);
+	if (!link_info) {
+		pr_err("Could not allocate link info structure\n");
+		kfree(ss);
+		return -ENOMEM;
+	}
+
+	ss->glink_event_wq = create_singlethread_workqueue(desc->name);
+	if (ss->glink_event_wq == NULL) {
+		ret = -ENOMEM;
+		goto err_wq;
+	}
+	mutex_init(&ss->lock);
+
+	ss->name = desc->name;
+	ss->handle = NULL;
+	ss->intent_count = 0;
+	ss->link_info = link_info;
+	ss->link_info->edge = ss->edge = desc->edge;
+	ss->link_info->transport = "smd_trans";
+	ss->link_info->glink_link_state_notif_cb = sysmon_glink_cb;
+
+	ss->glink_handle = glink_register_link_state_cb(ss->link_info,
+								(void *)ss);
+	if (IS_ERR_OR_NULL(ss->glink_handle)) {
+		pr_err("Could not register link state cb\n");
+		ret = PTR_ERR(ss->glink_handle);
+		goto err;
+	}
+
+	mutex_lock(&sysmon_glink_list_lock);
+	INIT_LIST_HEAD(&ss->list);
+	list_add_tail(&ss->list, &sysmon_glink_list);
+	mutex_unlock(&sysmon_glink_list_lock);
+	return 0;
+err:
+	destroy_workqueue(ss->glink_event_wq);
+err_wq:
+	kfree(link_info);
+	kfree(ss);
+	return ret;
+}
+EXPORT_SYMBOL(sysmon_glink_register);
+
+void sysmon_glink_unregister(struct subsys_desc *desc)
+{
+	struct sysmon_subsys *ss = NULL;
+
+	if (!desc)
+		return;
+
+	ss = _find_subsys(desc);
+	if (ss == NULL)
+		return;
+
+	list_del(&ss->list);
+	if (ss->handle)
+		glink_close(ss->handle);
+	destroy_workqueue(ss->glink_event_wq);
+	glink_unregister_link_state_cb(ss->glink_handle);
+	kfree(ss->link_info);
+	kfree(ss);
+}
+EXPORT_SYMBOL(sysmon_glink_unregister);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/sysmon-qmi.c	2019-01-22 16:16:26.679275167 +0100
@@ -0,0 +1,738 @@
+/*
+ * Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "sysmon-qmi: %s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/msm_qmi_interface.h>
+#include <soc/qcom/sysmon.h>
+
+#define QMI_RESP_BIT_SHIFT(x)			(x << 16)
+
+#define QMI_SSCTL_RESTART_REQ_V02		0x0020
+#define QMI_SSCTL_RESTART_RESP_V02		0x0020
+#define QMI_SSCTL_RESTART_READY_IND_V02		0x0020
+#define QMI_SSCTL_SHUTDOWN_REQ_V02		0x0021
+#define QMI_SSCTL_SHUTDOWN_RESP_V02		0x0021
+#define QMI_SSCTL_SHUTDOWN_READY_IND_V02	0x0021
+#define QMI_SSCTL_GET_FAILURE_REASON_REQ_V02	0x0022
+#define QMI_SSCTL_GET_FAILURE_REASON_RESP_V02	0x0022
+#define QMI_SSCTL_SUBSYS_EVENT_REQ_V02		0x0023
+#define QMI_SSCTL_SUBSYS_EVENT_RESP_V02		0x0023
+#define QMI_SSCTL_SUBSYS_EVENT_READY_IND_V02	0x0023
+
+#define QMI_SSCTL_ERROR_MSG_LENGTH		90
+#define QMI_SSCTL_SUBSYS_NAME_LENGTH		15
+#define QMI_SSCTL_SUBSYS_EVENT_REQ_LENGTH	40
+#define QMI_SSCTL_RESP_MSG_LENGTH		7
+#define QMI_SSCTL_EMPTY_MSG_LENGTH		0
+
+#define SSCTL_SERVICE_ID			0x2B
+#define SSCTL_VER_2				2
+#define SERVER_TIMEOUT				500
+#define SHUTDOWN_TIMEOUT			10000
+
+#define QMI_EOTI_DATA_TYPE	\
+{				\
+	.data_type = QMI_EOTI,	\
+	.elem_len  = 0,		\
+	.elem_size = 0,		\
+	.is_array  = NO_ARRAY,	\
+	.tlv_type  = 0x00,	\
+	.offset    = 0,		\
+	.ei_array  = NULL,	\
+},
+
+struct sysmon_qmi_data {
+	const char *name;
+	int instance_id;
+	struct work_struct svc_arrive;
+	struct work_struct svc_exit;
+	struct work_struct svc_rcv_msg;
+	struct qmi_handle *clnt_handle;
+	struct notifier_block notifier;
+	void *notif_handle;
+	bool legacy_version;
+	struct completion server_connect;
+	struct completion ind_recv;
+	struct list_head list;
+};
+
+static struct workqueue_struct *sysmon_wq;
+
+static LIST_HEAD(sysmon_list);
+static DEFINE_MUTEX(sysmon_list_lock);
+static DEFINE_MUTEX(sysmon_lock);
+
+static void sysmon_clnt_recv_msg(struct work_struct *work);
+static void sysmon_clnt_svc_arrive(struct work_struct *work);
+static void sysmon_clnt_svc_exit(struct work_struct *work);
+
+static const int notif_map[SUBSYS_NOTIF_TYPE_COUNT] = {
+	[SUBSYS_BEFORE_POWERUP] = SSCTL_SSR_EVENT_BEFORE_POWERUP,
+	[SUBSYS_AFTER_POWERUP] = SSCTL_SSR_EVENT_AFTER_POWERUP,
+	[SUBSYS_BEFORE_SHUTDOWN] = SSCTL_SSR_EVENT_BEFORE_SHUTDOWN,
+	[SUBSYS_AFTER_SHUTDOWN] = SSCTL_SSR_EVENT_AFTER_SHUTDOWN,
+};
+
+static void sysmon_ind_cb(struct qmi_handle *handle, unsigned int msg_id,
+			void *msg, unsigned int msg_len, void *ind_cb_priv)
+{
+	struct sysmon_qmi_data *data = NULL, *temp;
+
+	mutex_lock(&sysmon_list_lock);
+	list_for_each_entry(temp, &sysmon_list, list)
+		if (!strcmp(temp->name, (char *)ind_cb_priv))
+			data = temp;
+	mutex_unlock(&sysmon_list_lock);
+
+	if (!data)
+		return;
+
+	pr_debug("%s: Indication received from subsystem\n", data->name);
+	complete(&data->ind_recv);
+}
+
+static int sysmon_svc_event_notify(struct notifier_block *this,
+				      unsigned long code,
+				      void *_cmd)
+{
+	struct sysmon_qmi_data *data = container_of(this,
+					struct sysmon_qmi_data, notifier);
+
+	switch (code) {
+	case QMI_SERVER_ARRIVE:
+		queue_work(sysmon_wq, &data->svc_arrive);
+		break;
+	case QMI_SERVER_EXIT:
+		queue_work(sysmon_wq, &data->svc_exit);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static void sysmon_clnt_notify(struct qmi_handle *handle,
+			     enum qmi_event_type event, void *notify_priv)
+{
+	struct sysmon_qmi_data *data = container_of(notify_priv,
+					struct sysmon_qmi_data, svc_arrive);
+
+	switch (event) {
+	case QMI_RECV_MSG:
+		schedule_work(&data->svc_rcv_msg);
+		break;
+	default:
+		break;
+	}
+}
+
+static void sysmon_clnt_svc_arrive(struct work_struct *work)
+{
+	int rc;
+	struct sysmon_qmi_data *data = container_of(work,
+					struct sysmon_qmi_data, svc_arrive);
+
+	mutex_lock(&sysmon_lock);
+	/* Create a Local client port for QMI communication */
+	data->clnt_handle = qmi_handle_create(sysmon_clnt_notify, work);
+	if (!data->clnt_handle) {
+		pr_err("QMI client handle alloc failed for %s\n", data->name);
+		mutex_unlock(&sysmon_lock);
+		return;
+	}
+
+	rc = qmi_connect_to_service(data->clnt_handle, SSCTL_SERVICE_ID,
+					SSCTL_VER_2, data->instance_id);
+	if (rc < 0) {
+		pr_err("%s: Could not connect handle to service\n",
+								data->name);
+		qmi_handle_destroy(data->clnt_handle);
+		data->clnt_handle = NULL;
+		mutex_unlock(&sysmon_lock);
+		return;
+	}
+	pr_info("Connection established between QMI handle and %s's SSCTL service\n"
+								, data->name);
+
+	rc = qmi_register_ind_cb(data->clnt_handle, sysmon_ind_cb,
+							(void *)data->name);
+	if (rc < 0)
+		pr_warn("%s: Could not register the indication callback\n",
+								data->name);
+	mutex_unlock(&sysmon_lock);
+}
+
+static void sysmon_clnt_svc_exit(struct work_struct *work)
+{
+	struct sysmon_qmi_data *data = container_of(work,
+					struct sysmon_qmi_data, svc_exit);
+
+	mutex_lock(&sysmon_lock);
+	qmi_handle_destroy(data->clnt_handle);
+	data->clnt_handle = NULL;
+	mutex_unlock(&sysmon_lock);
+}
+
+static void sysmon_clnt_recv_msg(struct work_struct *work)
+{
+	int ret;
+	struct sysmon_qmi_data *data = container_of(work,
+					struct sysmon_qmi_data, svc_rcv_msg);
+
+	do {
+		pr_debug("%s: Notified about a Receive event\n", data->name);
+	} while ((ret = qmi_recv_msg(data->clnt_handle)) == 0);
+
+	if (ret != -ENOMSG)
+		pr_err("%s: Error receiving message\n", data->name);
+}
+
+struct qmi_ssctl_subsys_event_req_msg {
+	uint8_t subsys_name_len;
+	char subsys_name[QMI_SSCTL_SUBSYS_NAME_LENGTH];
+	enum ssctl_ssr_event_enum_type event;
+	uint8_t evt_driven_valid;
+	enum ssctl_ssr_event_driven_enum_type evt_driven;
+};
+
+struct qmi_ssctl_subsys_event_resp_msg {
+	struct qmi_response_type_v01 resp;
+};
+
+static struct elem_info qmi_ssctl_subsys_event_req_msg_ei[] = {
+	{
+		.data_type = QMI_DATA_LEN,
+		.elem_len  = 1,
+		.elem_size = sizeof(uint8_t),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x01,
+		.offset    = offsetof(struct qmi_ssctl_subsys_event_req_msg,
+				      subsys_name_len),
+		.ei_array  = NULL,
+	},
+	{
+		.data_type = QMI_UNSIGNED_1_BYTE,
+		.elem_len  = QMI_SSCTL_SUBSYS_NAME_LENGTH,
+		.elem_size = sizeof(char),
+		.is_array  = VAR_LEN_ARRAY,
+		.tlv_type  = 0x01,
+		.offset    = offsetof(struct qmi_ssctl_subsys_event_req_msg,
+				      subsys_name),
+		.ei_array  = NULL,
+	},
+	{
+		.data_type = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len  = 1,
+		.elem_size = sizeof(uint32_t),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x02,
+		.offset    = offsetof(struct qmi_ssctl_subsys_event_req_msg,
+				      event),
+		.ei_array  = NULL,
+	},
+	{
+		.data_type = QMI_OPT_FLAG,
+		.elem_len  = 1,
+		.elem_size = sizeof(uint8_t),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x10,
+		.offset    = offsetof(struct qmi_ssctl_subsys_event_req_msg,
+				      evt_driven_valid),
+		.ei_array  = NULL,
+	},
+	{
+		.data_type = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len  = 1,
+		.elem_size = sizeof(uint32_t),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x10,
+		.offset    = offsetof(struct qmi_ssctl_subsys_event_req_msg,
+				      evt_driven),
+		.ei_array  = NULL,
+	},
+	QMI_EOTI_DATA_TYPE
+};
+
+static struct elem_info qmi_ssctl_subsys_event_resp_msg_ei[] = {
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len  = 1,
+		.elem_size = sizeof(struct qmi_response_type_v01),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x02,
+		.offset    = offsetof(struct qmi_ssctl_subsys_event_resp_msg,
+				      resp),
+		.ei_array  = get_qmi_response_type_v01_ei(),
+	},
+	QMI_EOTI_DATA_TYPE
+};
+
+/**
+ * sysmon_send_event() - Notify a subsystem of another's state change
+ * @dest_desc:	Subsystem descriptor of the subsystem the notification
+ * should be sent to
+ * @event_desc:	Subsystem descriptor of the subsystem that generated the
+ * notification
+ * @notif:	ID of the notification type (ex. SUBSYS_BEFORE_SHUTDOWN)
+ *
+ * Reverts to using legacy sysmon API (sysmon_send_event_no_qmi()) if
+ * client handle is not set.
+ *
+ * Returns 0 for success, -EINVAL for invalid destination or notification IDs,
+ * -ENODEV if the transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -ENOSYS if the destination subsystem
+ * responds, but with something other than an acknowledgement.
+ *
+ * If CONFIG_MSM_SYSMON_COMM is not defined, always return success (0).
+ */
+int sysmon_send_event(struct subsys_desc *dest_desc,
+			struct subsys_desc *event_desc,
+			enum subsys_notif_type notif)
+{
+	struct qmi_ssctl_subsys_event_req_msg req;
+	struct msg_desc req_desc, resp_desc;
+	struct qmi_ssctl_subsys_event_resp_msg resp = { { 0, 0 } };
+	struct sysmon_qmi_data *data = NULL, *temp;
+	const char *event_ss = event_desc->name;
+	const char *dest_ss = dest_desc->name;
+	int ret;
+
+	if (notif < 0 || notif >= SUBSYS_NOTIF_TYPE_COUNT || event_ss == NULL
+		|| dest_ss == NULL)
+		return -EINVAL;
+
+	mutex_lock(&sysmon_list_lock);
+	list_for_each_entry(temp, &sysmon_list, list)
+		if (!strcmp(temp->name, dest_desc->name))
+			data = temp;
+	mutex_unlock(&sysmon_list_lock);
+
+	if (!data)
+		return -EINVAL;
+
+	if (!data->clnt_handle) {
+		pr_debug("No SSCTL_V2 support for %s. Revert to SSCTL_V0\n",
+								dest_ss);
+		ret = sysmon_send_event_no_qmi(dest_desc, event_desc, notif);
+		if (ret)
+			pr_debug("SSCTL_V0 implementation failed - %d\n", ret);
+
+		return ret;
+	}
+
+	snprintf(req.subsys_name, ARRAY_SIZE(req.subsys_name), "%s", event_ss);
+	req.subsys_name_len = strlen(req.subsys_name);
+	req.event = notif_map[notif];
+	req.evt_driven_valid = 1;
+	req.evt_driven = SSCTL_SSR_EVENT_FORCED;
+
+	req_desc.msg_id = QMI_SSCTL_SUBSYS_EVENT_REQ_V02;
+	req_desc.max_msg_len = QMI_SSCTL_SUBSYS_EVENT_REQ_LENGTH;
+	req_desc.ei_array = qmi_ssctl_subsys_event_req_msg_ei;
+
+	resp_desc.msg_id = QMI_SSCTL_SUBSYS_EVENT_RESP_V02;
+	resp_desc.max_msg_len = QMI_SSCTL_RESP_MSG_LENGTH;
+	resp_desc.ei_array = qmi_ssctl_subsys_event_resp_msg_ei;
+
+	mutex_lock(&sysmon_lock);
+	ret = qmi_send_req_wait(data->clnt_handle, &req_desc, &req,
+		sizeof(req), &resp_desc, &resp, sizeof(resp), SERVER_TIMEOUT);
+	if (ret < 0) {
+		pr_err("QMI send req to %s failed, ret - %d\n", dest_ss, ret);
+		goto out;
+	}
+
+	/* Check the response */
+	if (QMI_RESP_BIT_SHIFT(resp.resp.result) != QMI_RESULT_SUCCESS_V01) {
+		pr_debug("QMI request failed 0x%x\n",
+					QMI_RESP_BIT_SHIFT(resp.resp.error));
+		ret = -EREMOTEIO;
+	}
+out:
+	mutex_unlock(&sysmon_lock);
+	return ret;
+}
+EXPORT_SYMBOL(sysmon_send_event);
+
+struct qmi_ssctl_shutdown_req_msg {
+};
+
+struct qmi_ssctl_shutdown_resp_msg {
+	struct qmi_response_type_v01 resp;
+};
+
+static struct elem_info qmi_ssctl_shutdown_req_msg_ei[] = {
+	QMI_EOTI_DATA_TYPE
+};
+
+static struct elem_info qmi_ssctl_shutdown_resp_msg_ei[] = {
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len  = 1,
+		.elem_size = sizeof(struct qmi_response_type_v01),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x02,
+		.offset    = offsetof(struct qmi_ssctl_shutdown_resp_msg,
+				      resp),
+		.ei_array  = get_qmi_response_type_v01_ei(),
+	},
+	QMI_EOTI_DATA_TYPE
+};
+
+/**
+ * sysmon_send_shutdown() - send shutdown command to a
+ * subsystem.
+ * @dest_desc:	Subsystem descriptor of the subsystem to send to
+ *
+ * Reverts to using legacy sysmon API (sysmon_send_shutdown_no_qmi()) if
+ * client handle is not set.
+ *
+ * Returns 0 for success, -EINVAL for an invalid destination, -ENODEV if
+ * the SMD transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -ENOSYS if the destination subsystem
+ * responds with something unexpected.
+ *
+ * If CONFIG_MSM_SYSMON_COMM is not defined, always return success (0).
+ */
+int sysmon_send_shutdown(struct subsys_desc *dest_desc)
+{
+	struct msg_desc req_desc, resp_desc;
+	struct qmi_ssctl_shutdown_resp_msg resp = { { 0, 0 } };
+	struct sysmon_qmi_data *data = NULL, *temp;
+	const char *dest_ss = dest_desc->name;
+	char req = 0;
+	int ret, shutdown_ack_ret;
+
+	if (dest_ss == NULL)
+		return -EINVAL;
+
+	mutex_lock(&sysmon_list_lock);
+	list_for_each_entry(temp, &sysmon_list, list)
+		if (!strcmp(temp->name, dest_desc->name))
+			data = temp;
+	mutex_unlock(&sysmon_list_lock);
+
+	if (!data)
+		return -EINVAL;
+
+	if (!data->clnt_handle) {
+		pr_debug("No SSCTL_V2 support for %s. Revert to SSCTL_V0\n",
+								dest_ss);
+		ret = sysmon_send_shutdown_no_qmi(dest_desc);
+		if (ret)
+			pr_debug("SSCTL_V0 implementation failed - %d\n", ret);
+
+		return ret;
+	}
+
+	req_desc.msg_id = QMI_SSCTL_SHUTDOWN_REQ_V02;
+	req_desc.max_msg_len = QMI_SSCTL_EMPTY_MSG_LENGTH;
+	req_desc.ei_array = qmi_ssctl_shutdown_req_msg_ei;
+
+	resp_desc.msg_id = QMI_SSCTL_SHUTDOWN_RESP_V02;
+	resp_desc.max_msg_len = QMI_SSCTL_RESP_MSG_LENGTH;
+	resp_desc.ei_array = qmi_ssctl_shutdown_resp_msg_ei;
+
+	reinit_completion(&data->ind_recv);
+	mutex_lock(&sysmon_lock);
+	ret = qmi_send_req_wait(data->clnt_handle, &req_desc, &req,
+		sizeof(req), &resp_desc, &resp, sizeof(resp), SERVER_TIMEOUT);
+	if (ret < 0) {
+		pr_err("QMI send req to %s failed, ret - %d\n", dest_ss, ret);
+		goto out;
+	}
+
+	/* Check the response */
+	if (QMI_RESP_BIT_SHIFT(resp.resp.result) != QMI_RESULT_SUCCESS_V01) {
+		pr_err("QMI request failed 0x%x\n",
+					QMI_RESP_BIT_SHIFT(resp.resp.error));
+		ret = -EREMOTEIO;
+		goto out;
+	}
+
+	shutdown_ack_ret = wait_for_shutdown_ack(dest_desc);
+	if (shutdown_ack_ret < 0) {
+		pr_err("shutdown_ack SMP2P bit for %s not set\n", data->name);
+		if (!&data->ind_recv.done) {
+			pr_err("QMI shutdown indication not received\n");
+			ret = shutdown_ack_ret;
+		}
+		goto out;
+	} else if (shutdown_ack_ret > 0)
+		goto out;
+
+	if (!wait_for_completion_timeout(&data->ind_recv,
+					msecs_to_jiffies(SHUTDOWN_TIMEOUT))) {
+		pr_err("Timed out waiting for shutdown indication from %s\n",
+							data->name);
+		ret = -ETIMEDOUT;
+	}
+out:
+	mutex_unlock(&sysmon_lock);
+	return ret;
+}
+EXPORT_SYMBOL(sysmon_send_shutdown);
+
+struct qmi_ssctl_get_failure_reason_req_msg {
+};
+
+struct qmi_ssctl_get_failure_reason_resp_msg {
+	struct qmi_response_type_v01 resp;
+	uint8_t error_message_valid;
+	uint32_t error_message_len;
+	char error_message[QMI_SSCTL_ERROR_MSG_LENGTH];
+};
+
+static struct elem_info qmi_ssctl_get_failure_reason_req_msg_ei[] = {
+	QMI_EOTI_DATA_TYPE
+};
+
+static struct elem_info qmi_ssctl_get_failure_reason_resp_msg_ei[] = {
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len  = 1,
+		.elem_size = sizeof(struct qmi_response_type_v01),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x02,
+		.offset    = offsetof(
+			struct qmi_ssctl_get_failure_reason_resp_msg,
+							resp),
+		.ei_array  = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type = QMI_OPT_FLAG,
+		.elem_len  = 1,
+		.elem_size = sizeof(uint8_t),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x10,
+		.offset    = offsetof(
+			struct qmi_ssctl_get_failure_reason_resp_msg,
+						error_message_valid),
+		.ei_array  = NULL,
+	},
+	{
+		.data_type = QMI_DATA_LEN,
+		.elem_len  = 1,
+		.elem_size = sizeof(uint8_t),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x10,
+		.offset    = offsetof(
+			struct qmi_ssctl_get_failure_reason_resp_msg,
+						error_message_len),
+		.ei_array  = NULL,
+	},
+	{
+		.data_type = QMI_UNSIGNED_1_BYTE,
+		.elem_len  = QMI_SSCTL_ERROR_MSG_LENGTH,
+		.elem_size = sizeof(char),
+		.is_array  = VAR_LEN_ARRAY,
+		.tlv_type  = 0x10,
+		.offset    = offsetof(
+			struct qmi_ssctl_get_failure_reason_resp_msg,
+						error_message),
+		.ei_array  = NULL,
+	},
+	QMI_EOTI_DATA_TYPE
+};
+
+/**
+ * sysmon_get_reason() - Retrieve failure reason from a subsystem.
+ * @dest_desc:	Subsystem descriptor of the subsystem to query
+ * @buf:	Caller-allocated buffer for the returned NUL-terminated reason
+ * @len:	Length of @buf
+ *
+ * Reverts to using legacy sysmon API (sysmon_get_reason_no_qmi()) if client
+ * handle is not set.
+ *
+ * Returns 0 for success, -EINVAL for an invalid destination, -ENODEV if
+ * the SMD transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -ENOSYS if the destination subsystem
+ * responds with something unexpected.
+ *
+ * If CONFIG_MSM_SYSMON_COMM is not defined, always return success (0).
+ */
+int sysmon_get_reason(struct subsys_desc *dest_desc, char *buf, size_t len)
+{
+	struct msg_desc req_desc, resp_desc;
+	struct qmi_ssctl_get_failure_reason_resp_msg resp;
+	struct sysmon_qmi_data *data = NULL, *temp;
+	const char *dest_ss = dest_desc->name;
+	const char expect[] = "ssr:return:";
+	char req = 0;
+	int ret;
+
+	if (dest_ss == NULL || buf == NULL || len == 0)
+		return -EINVAL;
+
+	mutex_lock(&sysmon_list_lock);
+	list_for_each_entry(temp, &sysmon_list, list)
+		if (!strcmp(temp->name, dest_desc->name))
+			data = temp;
+	mutex_unlock(&sysmon_list_lock);
+
+	if (!data)
+		return -EINVAL;
+
+	if (!data->clnt_handle) {
+		pr_debug("No SSCTL_V2 support for %s. Revert to SSCTL_V0\n",
+								dest_ss);
+		ret = sysmon_get_reason_no_qmi(dest_desc, buf, len);
+		if (ret)
+			pr_debug("SSCTL_V0 implementation failed - %d\n", ret);
+
+		return ret;
+	}
+
+	req_desc.msg_id = QMI_SSCTL_GET_FAILURE_REASON_REQ_V02;
+	req_desc.max_msg_len = QMI_SSCTL_EMPTY_MSG_LENGTH;
+	req_desc.ei_array = qmi_ssctl_get_failure_reason_req_msg_ei;
+
+	resp_desc.msg_id = QMI_SSCTL_GET_FAILURE_REASON_RESP_V02;
+	resp_desc.max_msg_len = QMI_SSCTL_ERROR_MSG_LENGTH;
+	resp_desc.ei_array = qmi_ssctl_get_failure_reason_resp_msg_ei;
+
+	mutex_lock(&sysmon_lock);
+	ret = qmi_send_req_wait(data->clnt_handle, &req_desc, &req,
+		sizeof(req), &resp_desc, &resp, sizeof(resp), SERVER_TIMEOUT);
+	if (ret < 0) {
+		pr_err("QMI send req to %s failed, ret - %d\n", dest_ss, ret);
+		goto out;
+	}
+
+	/* Check the response */
+	if (QMI_RESP_BIT_SHIFT(resp.resp.result) != QMI_RESULT_SUCCESS_V01) {
+		pr_err("QMI request failed 0x%x\n",
+					QMI_RESP_BIT_SHIFT(resp.resp.error));
+		ret = -EREMOTEIO;
+		goto out;
+	}
+
+	if (!strcmp(resp.error_message, expect)) {
+		pr_err("Unexpected response %s\n", resp.error_message);
+		ret = -ENOSYS;
+		goto out;
+	}
+	strlcpy(buf, resp.error_message, resp.error_message_len);
+out:
+	mutex_unlock(&sysmon_lock);
+	return ret;
+}
+EXPORT_SYMBOL(sysmon_get_reason);
+
+/**
+ * sysmon_notifier_register() - Initialize sysmon data for a subsystem.
+ * @dest_desc:	Subsystem descriptor of the subsystem
+ *
+ * Returns 0 for success. If the subsystem does not support SSCTL v2, a
+ * value of 0 is returned after adding the subsystem entry to the sysmon_list.
+ * In addition, if the SSCTL v2 support exists, the notifier block to receive
+ * events from the SSCTL service on the subsystem is registered.
+ *
+ * If CONFIG_MSM_SYSMON_COMM is not defined, always return success (0).
+ */
+int sysmon_notifier_register(struct subsys_desc *desc)
+{
+	struct sysmon_qmi_data *data;
+	int rc = 0;
+
+	data = kmalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->name = desc->name;
+	data->instance_id = desc->ssctl_instance_id;
+	data->clnt_handle = NULL;
+	data->legacy_version = false;
+
+	mutex_lock(&sysmon_list_lock);
+	if (data->instance_id <= 0) {
+		pr_debug("SSCTL instance id not defined\n");
+		goto add_list;
+	}
+
+	if (sysmon_wq)
+		goto notif_register;
+
+	sysmon_wq = create_singlethread_workqueue("sysmon_wq");
+	if (!sysmon_wq) {
+		mutex_unlock(&sysmon_list_lock);
+		pr_err("Could not create workqueue\n");
+		kfree(data);
+		return -ENOMEM;
+	}
+
+notif_register:
+	data->notifier.notifier_call = sysmon_svc_event_notify;
+	init_completion(&data->ind_recv);
+
+	INIT_WORK(&data->svc_arrive, sysmon_clnt_svc_arrive);
+	INIT_WORK(&data->svc_exit, sysmon_clnt_svc_exit);
+	INIT_WORK(&data->svc_rcv_msg, sysmon_clnt_recv_msg);
+
+	rc = qmi_svc_event_notifier_register(SSCTL_SERVICE_ID, SSCTL_VER_2,
+					data->instance_id, &data->notifier);
+	if (rc < 0)
+		pr_err("Notifier register failed for %s\n", data->name);
+add_list:
+	INIT_LIST_HEAD(&data->list);
+	list_add_tail(&data->list, &sysmon_list);
+	mutex_unlock(&sysmon_list_lock);
+
+	return rc;
+}
+EXPORT_SYMBOL(sysmon_notifier_register);
+
+/**
+ * sysmon_notifier_unregister() - Cleanup the subsystem's sysmon data.
+ * @dest_desc:	Subsystem descriptor of the subsystem
+ *
+ * If the subsystem does not support SSCTL v2, its entry is simply removed from
+ * the sysmon_list. In addition, if the SSCTL v2 support exists, the notifier
+ * block to receive events from the SSCTL service is unregistered.
+ */
+void sysmon_notifier_unregister(struct subsys_desc *desc)
+{
+	struct sysmon_qmi_data *data = NULL, *sysmon_data, *tmp;
+
+	mutex_lock(&sysmon_list_lock);
+	list_for_each_entry_safe(sysmon_data, tmp, &sysmon_list, list)
+		if (!strcmp(sysmon_data->name, desc->name)) {
+			data = sysmon_data;
+			list_del(&data->list);
+		}
+
+	if (data == NULL)
+		goto exit;
+
+	if (data->instance_id > 0)
+		qmi_svc_event_notifier_unregister(SSCTL_SERVICE_ID,
+			SSCTL_VER_2, data->instance_id, &data->notifier);
+
+	if (sysmon_wq && list_empty(&sysmon_list))
+		destroy_workqueue(sysmon_wq);
+exit:
+	mutex_unlock(&sysmon_list_lock);
+	kfree(data);
+}
+EXPORT_SYMBOL(sysmon_notifier_unregister);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/system_stats.c	2019-01-22 16:16:26.679275167 +0100
@@ -0,0 +1,425 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/uaccess.h>
+#include <asm/arch_timer.h>
+
+#define SCLK_HZ 32768
+#define MSM_ARCH_TIMER_FREQ 19200000
+#define NUM_STATS_RECORD 2
+#define STATS_OFFSET 0x14
+#define HEAP_OFFSET 0x1c
+#define MASTER_START_ADDR_OFFSET 0x150
+#define MASTER_OFFSET_ADDRESS 0x1000
+
+struct rpm_system_stats {
+	void __iomem *rpm_stats_addr;
+	void __iomem *rpm_master_addr;
+	int num_masters;
+	char **master;
+} ss;
+
+struct msm_rpmstats_record {
+	char name[32];
+	uint32_t id;
+	uint32_t val;
+};
+
+struct msm_rpm_stats_data {
+	uint32_t stat_type;
+	uint32_t count;
+	uint64_t last_entered_at;
+	uint64_t last_exited_at;
+	uint64_t accumulated;
+	uint32_t client_votes;
+	uint32_t reserved[3];
+};
+
+struct rpm_master_stats_data {
+	uint32_t active_cores;
+	uint32_t numshutdowns;
+	uint64_t shutdown_req;
+	uint64_t wakeup_ind;
+	uint64_t bringup_req;
+	uint64_t bringup_ack;
+	uint32_t wakeup_reason; /* 0 = rude wakeup, 1 = scheduled wakeup */
+	uint32_t last_sleep_transition_duration;
+	uint32_t last_wake_transition_duration;
+	uint32_t xo_count;
+	uint64_t xo_last_entered_at;
+	uint64_t xo_last_exited_at;
+	uint64_t xo_accumulated_duration;
+};
+
+static inline uint32_t msm_rpmstats_read_long_register(void __iomem *regbase,
+		int index, int offset)
+{
+	return readl_relaxed(regbase + offset +
+			index * sizeof(struct msm_rpm_stats_data));
+}
+
+static inline uint64_t msm_rpmstats_read_quad_register(void __iomem *regbase,
+		int index, int offset)
+{
+	uint64_t dst;
+
+	memcpy_fromio(&dst,
+		regbase + offset + index * sizeof(struct msm_rpm_stats_data),
+		8);
+	return dst;
+}
+
+static inline unsigned long  msm_rpmstats_read_register(void __iomem *regbase,
+		int index, int offset)
+{
+	return  readl_relaxed(regbase + index * 12 + (offset + 1) * 4);
+}
+
+static inline uint64_t get_time_in_msec(u64 counter)
+{
+	do_div(counter, MSM_ARCH_TIMER_FREQ);
+	counter *= MSEC_PER_SEC;
+	return counter;
+}
+
+static inline uint64_t get_time_in_sec(u64 counter)
+{
+	do_div(counter, MSM_ARCH_TIMER_FREQ);
+	return counter;
+}
+
+static void rpm_stats_copy_data(struct msm_rpm_stats_data *data, int idx)
+{
+	void __iomem *reg = ss.rpm_stats_addr;
+
+	data->stat_type = msm_rpmstats_read_long_register(reg, idx,
+			offsetof(struct msm_rpm_stats_data, stat_type));
+
+	data->count = msm_rpmstats_read_long_register(reg, idx,
+			offsetof(struct msm_rpm_stats_data, count));
+	data->last_entered_at = msm_rpmstats_read_quad_register(reg,
+			idx, offsetof(struct msm_rpm_stats_data,
+				last_entered_at));
+	data->last_exited_at = msm_rpmstats_read_quad_register(reg,
+			idx, offsetof(struct msm_rpm_stats_data,
+				last_exited_at));
+
+	data->accumulated = msm_rpmstats_read_quad_register(reg,
+			idx, offsetof(struct msm_rpm_stats_data,
+				accumulated));
+	data->client_votes = msm_rpmstats_read_long_register(reg,
+			idx, offsetof(struct msm_rpm_stats_data,
+				client_votes));
+}
+
+static int rpm_stats_write_buf(struct seq_file *m)
+{
+	struct msm_rpm_stats_data rs;
+	int i;
+
+	for (i = 0; i < NUM_STATS_RECORD; i++) {
+		char stat_type[5] = {0};
+		uint64_t time;
+
+		rpm_stats_copy_data(&rs, i);
+		memcpy(stat_type, &rs.stat_type, sizeof(uint32_t));
+		seq_printf(m, "RPM Mode:%s\n", stat_type);
+		seq_printf(m, "\tcount:%d\n", rs.count);
+
+		time = rs.last_exited_at - rs.last_entered_at;
+		time = get_time_in_msec(time);
+		seq_printf(m, "\ttime in last mode(msec):%llu\n", time);
+
+		time = arch_counter_get_cntvct() - rs.last_exited_at;
+		time = get_time_in_sec(time);
+		seq_printf(m, "\ttime since last mode(sec):%llu\n", time);
+
+		time = get_time_in_msec(rs.accumulated);
+		seq_printf(m, "\tactual last sleep(msec):%llu\n", time);
+
+		seq_printf(m, "\tclient votes: %#010x\n\n", rs.client_votes);
+	}
+
+	return 0;
+}
+
+static void master_stats_copy_data(struct rpm_master_stats_data *data, int idx)
+{
+	data->shutdown_req = readq_relaxed(ss.rpm_master_addr +
+			idx * MASTER_OFFSET_ADDRESS +
+			offsetof(struct rpm_master_stats_data, shutdown_req));
+
+	data->wakeup_ind = readq_relaxed(ss.rpm_master_addr +
+			(idx * MASTER_OFFSET_ADDRESS +
+			 offsetof(struct rpm_master_stats_data, wakeup_ind)));
+
+	data->bringup_req = readq_relaxed(ss.rpm_master_addr +
+			(idx * MASTER_OFFSET_ADDRESS +
+			 offsetof(struct rpm_master_stats_data, bringup_req)));
+
+	data->bringup_ack = readq_relaxed(ss.rpm_master_addr +
+			(idx * MASTER_OFFSET_ADDRESS +
+			 offsetof(struct rpm_master_stats_data, bringup_ack)));
+
+	data->xo_last_entered_at = readq_relaxed(ss.rpm_master_addr +
+			(idx * MASTER_OFFSET_ADDRESS +
+			 offsetof(struct rpm_master_stats_data,
+				 xo_last_entered_at)));
+
+	data->xo_last_exited_at = readq_relaxed(ss.rpm_master_addr +
+			(idx * MASTER_OFFSET_ADDRESS +
+			 offsetof(struct rpm_master_stats_data,
+				 xo_last_exited_at)));
+
+	data->xo_accumulated_duration =
+		readq_relaxed(ss.rpm_master_addr +
+				(idx * MASTER_OFFSET_ADDRESS +
+				 offsetof(struct rpm_master_stats_data,
+					 xo_accumulated_duration)));
+
+	data->last_sleep_transition_duration =
+		readl_relaxed(ss.rpm_master_addr +
+				(idx * MASTER_OFFSET_ADDRESS +
+				 offsetof(struct rpm_master_stats_data,
+					 last_sleep_transition_duration)));
+
+	data->last_wake_transition_duration =
+		readl_relaxed(ss.rpm_master_addr +
+				(idx * MASTER_OFFSET_ADDRESS +
+				 offsetof(struct rpm_master_stats_data,
+					 last_wake_transition_duration)));
+
+	data->xo_count =
+		readl_relaxed(ss.rpm_master_addr +
+				(idx * MASTER_OFFSET_ADDRESS +
+				 offsetof(struct rpm_master_stats_data,
+					 xo_count)));
+
+	data->wakeup_reason = readl_relaxed(ss.rpm_master_addr +
+			(idx * MASTER_OFFSET_ADDRESS +
+			 offsetof(struct rpm_master_stats_data,
+				 wakeup_reason)));
+
+	data->numshutdowns = readl_relaxed(ss.rpm_master_addr +
+			(idx * MASTER_OFFSET_ADDRESS +
+			 offsetof(struct rpm_master_stats_data, numshutdowns)));
+
+	data->active_cores = readl_relaxed(ss.rpm_master_addr +
+			(idx * MASTER_OFFSET_ADDRESS) +
+			offsetof(struct rpm_master_stats_data, active_cores));
+
+}
+
+static int master_stats_write_buf(struct seq_file *m)
+{
+	struct rpm_master_stats_data ms;
+	int i;
+
+	for (i = 0; i < ss.num_masters; i++) {
+
+		master_stats_copy_data(&ms, i);
+
+		seq_printf(m, "%s\n", ss.master[i]);
+		seq_printf(m, "\tShutdown Request:0x%llX\n", ms.shutdown_req);
+		seq_printf(m, "\tWakeup interrupt:0x%llX\n", ms.wakeup_ind);
+		seq_printf(m, "\tBringup Req::0x%llX\n", ms.bringup_req);
+		seq_printf(m, "\tBringup Ack:0x%llX\n", ms.bringup_ack);
+		seq_printf(m, "\tLast XO Entry:0x%llX\n",
+				ms.xo_last_entered_at);
+		seq_printf(m, "\tLast XO Exit:0x%llX\n", ms.xo_last_exited_at);
+		seq_printf(m, "\tAccumulated XO duration:0x%llX\n",
+			ms.xo_accumulated_duration);
+		seq_printf(m, "\tLast sleep transition duration:0x%x\n",
+			ms.last_sleep_transition_duration);
+		seq_printf(m, "\tLast wake transition duration:0x%x\n",
+			ms.last_wake_transition_duration);
+		seq_printf(m, "\tXO Count:0x%x\n", ms.xo_count);
+		seq_printf(m, "\tWakeup Reason:0x%s\n",
+			ms.wakeup_reason ? "Sched" : "Rude");
+		seq_printf(m, "\tNum Shutdowns:0x%x\n", ms.numshutdowns);
+		seq_printf(m, "\tActive Cores:0x%x\n", ms.active_cores);
+	}
+	return 0;
+}
+
+static int ss_file_show(struct seq_file *m, void *v)
+{
+	int ret = 0;
+
+	ret = rpm_stats_write_buf(m);
+	if (ret)
+		return ret;
+
+	return master_stats_write_buf(m);
+}
+
+static int ss_file_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ss_file_show, inode->i_private);
+}
+
+static const struct file_operations ss_file_ops = {
+	.owner = THIS_MODULE,
+	.open = ss_file_open,
+	.read = seq_read,
+	.llseek = no_llseek,
+	.release = single_release,
+};
+
+static int msm_rpmstats_probe(struct platform_device *pdev)
+{
+	struct dentry *dent = NULL;
+	struct device_node *node = NULL;
+	uint32_t offset = 0;
+	void __iomem *offset_addr = NULL;
+	struct resource res;
+	int i, ret = 0;
+
+	if (!pdev)
+		return -EINVAL;
+
+	node = of_parse_phandle(pdev->dev.of_node, "qcom,rpm-msg-ram", 0);
+	if (!node)
+		return -EINVAL;
+
+	ret = of_address_to_resource(node, 1, &res);
+	if (ret)
+		return ret;
+
+	offset_addr = ioremap_nocache(res.start + STATS_OFFSET,
+			resource_size(&res));
+	if (!offset_addr)
+		return -ENOMEM;
+
+	offset = readl_relaxed(offset_addr);
+	iounmap(offset_addr);
+
+	ret = of_address_to_resource(node, 0, &res);
+	if (ret)
+		return ret;
+
+	ss.rpm_stats_addr = devm_ioremap_nocache(&pdev->dev,
+			res.start + offset,
+			resource_size(&res));
+
+	if (!ss.rpm_stats_addr)
+		return -ENOMEM;
+
+	node = of_parse_phandle(pdev->dev.of_node, "qcom,rpm-code-ram", 0);
+	if (!node)
+		return -EINVAL;
+
+	ret = of_address_to_resource(node, 0, &res);
+	if (ret)
+		return ret;
+
+	ss.rpm_master_addr = devm_ioremap_nocache(
+			&pdev->dev, res.start + MASTER_START_ADDR_OFFSET,
+			resource_size(&res));
+
+	if (!ss.rpm_master_addr)
+		return -ENOMEM;
+
+	ss.num_masters = of_property_count_strings(pdev->dev.of_node,
+			"qcom,masters");
+	if (ss.num_masters < 0) {
+		dev_err(&pdev->dev, "Failed to get number of masters =%d\n",
+						ss.num_masters);
+		return -EINVAL;
+	}
+
+	ss.master = devm_kzalloc(&pdev->dev,
+			sizeof(char *) * ss.num_masters, GFP_KERNEL);
+
+	if (!ss.master) {
+		dev_err(&pdev->dev, "%s:Failed to allocated memory\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	/*
+	 * Read master names from DT
+	 */
+	for (i = 0; i < ss.num_masters; i++) {
+		const char *master_name;
+
+		of_property_read_string_index(pdev->dev.of_node,
+				"qcom,masters",
+				i, &master_name);
+		ss.master[i] = devm_kzalloc(&pdev->dev,
+				sizeof(char) * strlen(master_name) + 1,
+				GFP_KERNEL);
+		if (!ss.master[i]) {
+			pr_err("%s:Failed to get memory\n", __func__);
+			return -ENOMEM;
+		}
+		strlcpy(ss.master[i], master_name,
+					strlen(master_name) + 1);
+	}
+
+	dent = debugfs_create_file("system_stats", S_IRUGO, NULL,
+			&ss, &ss_file_ops);
+
+	if (!dent) {
+		pr_err("%s: ERROR rpm_stats debugfs_create_file fail\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	platform_set_drvdata(pdev, dent);
+	return 0;
+}
+
+static int msm_rpmstats_remove(struct platform_device *pdev)
+{
+	struct dentry *dent;
+
+	dent = platform_get_drvdata(pdev);
+	debugfs_remove(dent);
+	return 0;
+}
+
+static const struct of_device_id rpm_stats_table[] = {
+	       {.compatible = "qcom,system-stats"},
+	       {},
+};
+
+static struct platform_driver msm_system_stats_driver = {
+	.probe = msm_rpmstats_probe,
+	.remove = msm_rpmstats_remove,
+	.driver = {
+		.name = "msm_stat",
+		.owner = THIS_MODULE,
+		.of_match_table = rpm_stats_table,
+	},
+};
+
+module_platform_driver(msm_system_stats_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM Statistics driver");
+MODULE_ALIAS("platform:stat_log");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/tracer_pkt.c	2019-10-29 09:26:24.825214745 +0100
@@ -0,0 +1,255 @@
+/* Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <asm/arch_timer.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <soc/qcom/tracer_pkt.h>
+#define CREATE_TRACE_POINTS
+#include "tracer_pkt_private.h"
+
+static unsigned qdss_tracing;
+module_param_named(qdss_tracing_enable, qdss_tracing,
+		   uint, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define TRACER_PKT_VERSION 1
+#define MAX_CC_WLEN 3
+#define HEX_DUMP_HDR "Tracer Packet:"
+
+/**
+ * struct tracer_pkt_hdr - data structure defiining the tracer packet header
+ * @version:		Tracer Packet version.
+ * @reserved:		Reserved fields in the tracer packet.
+ * @id_valid:		Indicates the presence of a subsytem & transport ID.
+ * @qdss_tracing:	Enable the event logging to QDSS.
+ * @ccl:		Client cookie/private information length in words.
+ * @pkt_len:		Length of the tracer packet in words.
+ * @pkt_offset:		Offset into the packet to log events, in words.
+ * @clnt_event_cfg:	Client-specific event configuration bit mask.
+ * @glink_event_cfg:	G-Link-specific event configuration bit mask.
+ * @base_ts:		Base timestamp when the tracer packet is initialized.
+ * @cc:			Client cookie/private information.
+ */
+struct tracer_pkt_hdr {
+	uint16_t version:4;
+	uint16_t reserved:8;
+	uint16_t id_valid:1;
+	uint16_t qdss_tracing:1;
+	uint16_t ccl:2;
+	uint16_t pkt_len;
+	uint16_t pkt_offset;
+	uint16_t clnt_event_cfg;
+	uint32_t glink_event_cfg;
+	u64 base_ts;
+	uint32_t cc[MAX_CC_WLEN];
+} __attribute__((__packed__));
+
+/**
+ * struct tracer_pkt_event - data structure defining the tracer packet event
+ * @event_id:	Event ID.
+ * @event_ts:	Timestamp at which the event occured.
+ */
+struct tracer_pkt_event {
+	uint32_t event_id;
+	uint32_t event_ts;
+};
+
+/**
+ * tracer_pkt_init() - initialize the tracer packet
+ * @data:		Pointer to the buffer to be initialized with a tracer
+ *			packet.
+ * @data_len:		Length of the buffer.
+ * @client_event_cfg:	Client-specific event configuration mask.
+ * @glink_event_cfg:	G-Link-specific event configuration mask.
+ * @pkt_priv:		Private/Cookie information to be added to the tracer
+ *			packet.
+ * @pkt_priv_len:	Length of the private data.
+ *
+ * This function is used to initialize a buffer with the tracer packet header.
+ * The tracer packet header includes the data as passed by the elements in the
+ * parameters.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int tracer_pkt_init(void *data, size_t data_len,
+		    uint16_t client_event_cfg, uint32_t glink_event_cfg,
+		    void *pkt_priv, size_t pkt_priv_len)
+{
+	struct tracer_pkt_hdr *pkt_hdr;
+
+	if (!data || !data_len)
+		return -EINVAL;
+
+	if (!IS_ALIGNED(data_len, sizeof(uint32_t)))
+		return -EINVAL;
+
+	if (data_len < sizeof(*pkt_hdr))
+		return -ETOOSMALL;
+
+	pkt_hdr = (struct tracer_pkt_hdr *)data;
+	pkt_hdr->version = TRACER_PKT_VERSION;
+	pkt_hdr->reserved = 0;
+	pkt_hdr->id_valid = 0;
+	pkt_hdr->qdss_tracing = qdss_tracing ? true : false;
+	if (pkt_priv_len >= MAX_CC_WLEN * sizeof(uint32_t))
+		pkt_hdr->ccl = MAX_CC_WLEN;
+	else
+		pkt_hdr->ccl = pkt_priv_len/sizeof(uint32_t) +
+				(pkt_priv_len & (sizeof(uint32_t) - 1) ? 1 : 0);
+	pkt_hdr->pkt_len = data_len / sizeof(uint32_t);
+	pkt_hdr->pkt_offset = sizeof(*pkt_hdr) / sizeof(uint32_t);
+	pkt_hdr->clnt_event_cfg = client_event_cfg;
+	pkt_hdr->glink_event_cfg = glink_event_cfg;
+	pkt_hdr->base_ts = arch_counter_get_cntvct();
+	memcpy(pkt_hdr->cc, pkt_priv, pkt_hdr->ccl * sizeof(uint32_t));
+	return 0;
+}
+EXPORT_SYMBOL(tracer_pkt_init);
+
+/**
+ * tracer_pkt_set_event_cfg() - set the event configuration mask in the tracer
+ *				packet
+ * @data:		Pointer to the buffer to be initialized with event
+ *			configuration mask.
+ * @client_event_cfg:	Client-specific event configuration mask.
+ * @glink_event_cfg:	G-Link-specific event configuration mask.
+ *
+ * This function is used to initialize a buffer with the event configuration
+ * mask as passed by the elements in the parameters.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int tracer_pkt_set_event_cfg(void *data, uint16_t client_event_cfg,
+			     uint32_t glink_event_cfg)
+{
+	struct tracer_pkt_hdr *pkt_hdr;
+
+	if (!data)
+		return -EINVAL;
+
+	pkt_hdr = (struct tracer_pkt_hdr *)data;
+	if (unlikely(pkt_hdr->version != TRACER_PKT_VERSION))
+		return -EINVAL;
+
+	pkt_hdr->clnt_event_cfg = client_event_cfg;
+	pkt_hdr->glink_event_cfg = glink_event_cfg;
+	return 0;
+}
+EXPORT_SYMBOL(tracer_pkt_set_event_cfg);
+
+/**
+ * tracer_pkt_log_event() - log an event specific to the tracer packet
+ * @data:	Pointer to the buffer containing tracer packet.
+ * @event_id:	Event ID to be logged.
+ *
+ * This function is used to log an event specific to the tracer packet.
+ * The event is logged either into the tracer packet itself or a different
+ * tracing mechanism as configured.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int tracer_pkt_log_event(void *data, uint32_t event_id)
+{
+	struct tracer_pkt_hdr *pkt_hdr;
+	struct tracer_pkt_event event;
+
+	if (!data)
+		return -EINVAL;
+
+	pkt_hdr = (struct tracer_pkt_hdr *)data;
+	if (unlikely(pkt_hdr->version != TRACER_PKT_VERSION))
+		return -EINVAL;
+
+	if (qdss_tracing) {
+		trace_tracer_pkt_event(event_id, pkt_hdr->cc);
+		return 0;
+	}
+
+	if (unlikely((pkt_hdr->pkt_len - pkt_hdr->pkt_offset) *
+	    sizeof(uint32_t) < sizeof(event)))
+		return -ETOOSMALL;
+
+	event.event_id = event_id;
+	event.event_ts = (uint32_t)arch_counter_get_cntvct();
+	memcpy(data + (pkt_hdr->pkt_offset * sizeof(uint32_t)),
+		&event, sizeof(event));
+	pkt_hdr->pkt_offset += sizeof(event)/sizeof(uint32_t);
+	return 0;
+}
+EXPORT_SYMBOL(tracer_pkt_log_event);
+
+/**
+ * tracer_pkt_calc_hex_dump_size() - calculate the hex dump size of a tracer
+ *				     packet
+ * @data:	Pointer to the buffer containing tracer packet.
+ * @data_len:	Length of the tracer packet buffer.
+ *
+ * This function is used to calculate the length of the buffer required to
+ * hold the hex dump of the tracer packet.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+size_t tracer_pkt_calc_hex_dump_size(void *data, size_t data_len)
+{
+	size_t hex_dump_size;
+	struct tracer_pkt_hdr *pkt_hdr;
+
+	if (!data || data_len <= 0)
+		return -EINVAL;
+
+	pkt_hdr = (struct tracer_pkt_hdr *)data;
+	if (unlikely(pkt_hdr->version != TRACER_PKT_VERSION))
+		return -EINVAL;
+
+	/*
+	 * Hex Dump Prefix + newline
+	 * 0x<first_word> + newline
+	 * ...
+	 * 0x<last_word> + newline + null-termination character.
+	 */
+	hex_dump_size = strlen(HEX_DUMP_HDR) + 1 + (pkt_hdr->pkt_len * 11) + 1;
+	return hex_dump_size;
+}
+EXPORT_SYMBOL(tracer_pkt_calc_hex_dump_size);
+
+/**
+ * tracer_pkt_hex_dump() - hex dump the tracer packet into a buffer
+ * @buf:	Buffer to contain the hex dump of the tracer packet.
+ * @buf_len:	Length of the hex dump buffer.
+ * @data:	Buffer containing the tracer packet.
+ * @data_len:	Length of the buffer containing the tracer packet.
+ *
+ * This function is used to dump the contents of the tracer packet into
+ * a buffer in a specific hexadecimal format. The hex dump buffer can then
+ * be dumped through debugfs.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int tracer_pkt_hex_dump(void *buf, size_t buf_len, void *data, size_t data_len)
+{
+	int i, j = 0;
+	char *dst = (char *)buf;
+
+	if (!buf || buf_len <= 0 || !data || data_len <= 0)
+		return -EINVAL;
+
+	if (buf_len < tracer_pkt_calc_hex_dump_size(data, data_len))
+		return -EINVAL;
+
+	j = scnprintf(dst, buf_len, "%s\n", HEX_DUMP_HDR);
+	for (i = 0; i < data_len/sizeof(uint32_t); i++)
+		j += scnprintf(dst + j, buf_len - j, "0x%08x\n",
+				*((uint32_t *)data + i));
+	dst[j] = '\0';
+	return 0;
+}
+EXPORT_SYMBOL(tracer_pkt_hex_dump);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/tracer_pkt_private.h	2019-01-22 16:16:26.679275167 +0100
@@ -0,0 +1,50 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#if !defined(_TRACER_PKT_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACER_PKT_TRACE_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM tracer_pkt
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE tracer_pkt_private
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(tracer_pkt_event,
+
+	TP_PROTO(uint32_t id, uint32_t *cc),
+
+	TP_ARGS(id, cc),
+
+	TP_STRUCT__entry(
+		__field(uint32_t, id)
+		__field(uint32_t, cc1)
+		__field(uint32_t, cc2)
+		__field(uint32_t, cc3)
+	),
+
+	TP_fast_assign(
+		__entry->id = id;
+		__entry->cc1 = cc[0];
+		__entry->cc2 = cc[1];
+		__entry->cc3 = cc[2];
+	),
+
+	TP_printk("CC - 0x%08x:0x%08x:0x%08x, ID - %d",
+		__entry->cc1, __entry->cc2, __entry->cc3, __entry->id)
+);
+#endif /*_TRACER_PKT_TRACE_H*/
+
+#include <trace/define_trace.h>
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/watchdog_v2.c	2019-10-29 09:26:24.825214745 +0100
@@ -0,0 +1,898 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/percpu.h>
+#include <linux/of.h>
+#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/memory_dump.h>
+#include <soc/qcom/minidump.h>
+#include <soc/qcom/watchdog.h>
+
+#define MODULE_NAME "msm_watchdog"
+#define WDT0_ACCSCSSNBARK_INT 0
+#define TCSR_WDT_CFG	0x30
+#define WDT0_RST	0x04
+#define WDT0_EN		0x08
+#define WDT0_STS	0x0C
+#define WDT0_BARK_TIME	0x10
+#define WDT0_BITE_TIME	0x14
+
+#define WDOG_ABSENT	0
+
+#define EN		0
+#define UNMASKED_INT_EN 1
+
+#define MASK_SIZE		32
+#define SCM_SET_REGSAVE_CMD	0x2
+#define SCM_SVC_SEC_WDOG_DIS	0x7
+#define MAX_CPU_CTX_SIZE	2048
+
+static struct msm_watchdog_data *wdog_data;
+
+static int cpu_idle_pc_state[NR_CPUS];
+
+/*
+ * user_pet_enable:
+ *	Require userspace to write to a sysfs file every pet_time milliseconds.
+ *	Disabled by default on boot.
+ */
+struct msm_watchdog_data {
+	unsigned int __iomem phys_base;
+	size_t size;
+	void __iomem *base;
+	void __iomem *wdog_absent_base;
+	struct device *dev;
+	unsigned int pet_time;
+	unsigned int bark_time;
+	unsigned int bark_irq;
+	unsigned int bite_irq;
+	bool do_ipi_ping;
+	bool wakeup_irq_enable;
+	unsigned long long last_pet;
+	unsigned min_slack_ticks;
+	unsigned long long min_slack_ns;
+	void *scm_regsave;
+	cpumask_t alive_mask;
+	struct mutex disable_lock;
+	bool irq_ppi;
+	struct msm_watchdog_data __percpu **wdog_cpu_dd;
+	struct notifier_block panic_blk;
+
+	bool enabled;
+	bool user_pet_enabled;
+
+	struct task_struct *watchdog_task;
+	struct timer_list pet_timer;
+	wait_queue_head_t pet_complete;
+
+	bool timer_expired;
+	bool user_pet_complete;
+	unsigned int scandump_size;
+};
+
+/*
+ * On the kernel command line specify
+ * watchdog_v2.enable=1 to enable the watchdog
+ * By default watchdog is turned on
+ */
+static int enable = 1;
+module_param(enable, int, 0);
+
+/*
+ * On the kernel command line specify
+ * watchdog_v2.WDT_HZ=<clock val in HZ> to set Watchdog
+ * ticks. By default it is set to 32765.
+ */
+static long WDT_HZ = 32765;
+module_param(WDT_HZ, long, 0);
+
+/*
+ * On the kernel command line specify
+ * watchdog_v2.ipi_opt_en=1 to enable the watchdog ipi ping
+ * optimization. By default it is turned off
+ */
+static int ipi_opt_en;
+module_param(ipi_opt_en, int, 0);
+
+static void dump_cpu_alive_mask(struct msm_watchdog_data *wdog_dd)
+{
+	static char alive_mask_buf[MASK_SIZE];
+	scnprintf(alive_mask_buf, MASK_SIZE, "%*pb1", cpumask_pr_args(
+				&wdog_dd->alive_mask));
+	printk(KERN_INFO "cpu alive mask from last pet %s\n", alive_mask_buf);
+}
+
+static int msm_watchdog_suspend(struct device *dev)
+{
+	struct msm_watchdog_data *wdog_dd =
+			(struct msm_watchdog_data *)dev_get_drvdata(dev);
+	if (!enable)
+		return 0;
+	__raw_writel(1, wdog_dd->base + WDT0_RST);
+	if (wdog_dd->wakeup_irq_enable) {
+		/* Make sure register write is complete before proceeding */
+		mb();
+		wdog_dd->last_pet = sched_clock();
+		return 0;
+	}
+	__raw_writel(0, wdog_dd->base + WDT0_EN);
+	mb();
+	wdog_dd->enabled = false;
+	wdog_dd->last_pet = sched_clock();
+	return 0;
+}
+
+static int msm_watchdog_resume(struct device *dev)
+{
+	struct msm_watchdog_data *wdog_dd =
+			(struct msm_watchdog_data *)dev_get_drvdata(dev);
+	if (!enable)
+		return 0;
+	if (wdog_dd->wakeup_irq_enable) {
+		__raw_writel(1, wdog_dd->base + WDT0_RST);
+		/* Make sure register write is complete before proceeding */
+		mb();
+		wdog_dd->last_pet = sched_clock();
+		return 0;
+	}
+	__raw_writel(1, wdog_dd->base + WDT0_EN);
+	__raw_writel(1, wdog_dd->base + WDT0_RST);
+	mb();
+	wdog_dd->enabled = true;
+	wdog_dd->last_pet = sched_clock();
+	return 0;
+}
+
+static int panic_wdog_handler(struct notifier_block *this,
+			      unsigned long event, void *ptr)
+{
+	struct msm_watchdog_data *wdog_dd = container_of(this,
+				struct msm_watchdog_data, panic_blk);
+	if (panic_timeout == 0) {
+		__raw_writel(0, wdog_dd->base + WDT0_EN);
+		mb();
+	} else {
+		__raw_writel(WDT_HZ * (panic_timeout + 10),
+				wdog_dd->base + WDT0_BARK_TIME);
+		__raw_writel(WDT_HZ * (panic_timeout + 10),
+				wdog_dd->base + WDT0_BITE_TIME);
+		__raw_writel(1, wdog_dd->base + WDT0_RST);
+	}
+	return NOTIFY_DONE;
+}
+
+static void wdog_disable(struct msm_watchdog_data *wdog_dd)
+{
+	__raw_writel(0, wdog_dd->base + WDT0_EN);
+	mb();
+	if (wdog_dd->irq_ppi) {
+		disable_percpu_irq(wdog_dd->bark_irq);
+		free_percpu_irq(wdog_dd->bark_irq, wdog_dd->wdog_cpu_dd);
+	} else
+		devm_free_irq(wdog_dd->dev, wdog_dd->bark_irq, wdog_dd);
+	enable = 0;
+	/*Ensure all cpus see update to enable*/
+	smp_mb();
+	atomic_notifier_chain_unregister(&panic_notifier_list,
+						&wdog_dd->panic_blk);
+	del_timer_sync(&wdog_dd->pet_timer);
+	/* may be suspended after the first write above */
+	__raw_writel(0, wdog_dd->base + WDT0_EN);
+	mb();
+	wdog_dd->enabled = false;
+	pr_info("MSM Apps Watchdog deactivated.\n");
+}
+
+static ssize_t wdog_disable_get(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int ret;
+	struct msm_watchdog_data *wdog_dd = dev_get_drvdata(dev);
+
+	mutex_lock(&wdog_dd->disable_lock);
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", enable == 0 ? 1 : 0);
+	mutex_unlock(&wdog_dd->disable_lock);
+	return ret;
+}
+
+static ssize_t wdog_disable_set(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	int ret;
+	u8 disable;
+	struct msm_watchdog_data *wdog_dd = dev_get_drvdata(dev);
+
+	ret = kstrtou8(buf, 10, &disable);
+	if (ret) {
+		dev_err(wdog_dd->dev, "invalid user input\n");
+		return ret;
+	}
+	if (disable == 1) {
+		mutex_lock(&wdog_dd->disable_lock);
+		if (enable == 0) {
+			pr_info("MSM Apps Watchdog already disabled\n");
+			mutex_unlock(&wdog_dd->disable_lock);
+			return count;
+		}
+		disable = 1;
+		if (!is_scm_armv8()) {
+			ret = scm_call(SCM_SVC_BOOT, SCM_SVC_SEC_WDOG_DIS,
+				       &disable, sizeof(disable), NULL, 0);
+		} else {
+			struct scm_desc desc = {0};
+			desc.args[0] = 1;
+			desc.arginfo = SCM_ARGS(1);
+			ret = scm_call2(SCM_SIP_FNID(SCM_SVC_BOOT,
+					SCM_SVC_SEC_WDOG_DIS), &desc);
+		}
+		if (ret) {
+			dev_err(wdog_dd->dev,
+					"Failed to deactivate secure wdog\n");
+			mutex_unlock(&wdog_dd->disable_lock);
+			return -EIO;
+		}
+		wdog_disable(wdog_dd);
+		mutex_unlock(&wdog_dd->disable_lock);
+	} else {
+		pr_err("invalid operation, only disable = 1 supported\n");
+		return -EINVAL;
+	}
+	return count;
+}
+
+static DEVICE_ATTR(disable, S_IWUSR | S_IRUSR, wdog_disable_get,
+							wdog_disable_set);
+
+/*
+ * Userspace Watchdog Support:
+ * Write 1 to the "user_pet_enabled" file to enable hw support for a
+ * userspace watchdog.
+ * Userspace is required to pet the watchdog by continuing to write 1
+ * to this file in the expected interval.
+ * Userspace may disable this requirement by writing 0 to this same
+ * file.
+ */
+static void __wdog_user_pet(struct msm_watchdog_data *wdog_dd)
+{
+	wdog_dd->user_pet_complete = true;
+	wake_up(&wdog_dd->pet_complete);
+}
+
+static ssize_t wdog_user_pet_enabled_get(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int ret;
+	struct msm_watchdog_data *wdog_dd = dev_get_drvdata(dev);
+
+	ret = snprintf(buf, PAGE_SIZE, "%d\n",
+			wdog_dd->user_pet_enabled);
+	return ret;
+}
+
+static ssize_t wdog_user_pet_enabled_set(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	int ret;
+	struct msm_watchdog_data *wdog_dd = dev_get_drvdata(dev);
+
+	ret = strtobool(buf, &wdog_dd->user_pet_enabled);
+	if (ret) {
+		dev_err(wdog_dd->dev, "invalid user input\n");
+		return ret;
+	}
+
+	__wdog_user_pet(wdog_dd);
+
+	return count;
+}
+
+static DEVICE_ATTR(user_pet_enabled, S_IWUSR | S_IRUSR,
+		wdog_user_pet_enabled_get, wdog_user_pet_enabled_set);
+
+static ssize_t wdog_pet_time_get(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int ret;
+	struct msm_watchdog_data *wdog_dd = dev_get_drvdata(dev);
+
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", wdog_dd->pet_time);
+	return ret;
+}
+
+static DEVICE_ATTR(pet_time, S_IRUSR, wdog_pet_time_get, NULL);
+
+static void pet_watchdog(struct msm_watchdog_data *wdog_dd)
+{
+	int slack, i, count, prev_count = 0;
+	unsigned long long time_ns;
+	unsigned long long slack_ns;
+	unsigned long long bark_time_ns = wdog_dd->bark_time * 1000000ULL;
+
+	for (i = 0; i < 2; i++) {
+		count = (__raw_readl(wdog_dd->base + WDT0_STS) >> 1) & 0xFFFFF;
+		if (count != prev_count) {
+			prev_count = count;
+			i = 0;
+		}
+	}
+	slack = ((wdog_dd->bark_time * WDT_HZ) / 1000) - count;
+	if (slack < wdog_dd->min_slack_ticks)
+		wdog_dd->min_slack_ticks = slack;
+	__raw_writel(1, wdog_dd->base + WDT0_RST);
+	time_ns = sched_clock();
+	slack_ns = (wdog_dd->last_pet + bark_time_ns) - time_ns;
+	if (slack_ns < wdog_dd->min_slack_ns)
+		wdog_dd->min_slack_ns = slack_ns;
+	wdog_dd->last_pet = time_ns;
+}
+
+static void keep_alive_response(void *info)
+{
+	int cpu = smp_processor_id();
+	struct msm_watchdog_data *wdog_dd = (struct msm_watchdog_data *)info;
+	cpumask_set_cpu(cpu, &wdog_dd->alive_mask);
+	smp_mb();
+}
+
+/*
+ * If this function does not return, it implies one of the
+ * other cpu's is not responsive.
+ */
+static void ping_other_cpus(struct msm_watchdog_data *wdog_dd)
+{
+	int cpu;
+	cpumask_clear(&wdog_dd->alive_mask);
+	smp_mb();
+	for_each_cpu(cpu, cpu_online_mask) {
+		if (!cpu_idle_pc_state[cpu] && !cpu_isolated(cpu))
+			smp_call_function_single(cpu, keep_alive_response,
+						 wdog_dd, 1);
+	}
+}
+
+static void pet_task_wakeup(unsigned long data)
+{
+	struct msm_watchdog_data *wdog_dd =
+		(struct msm_watchdog_data *)data;
+	wdog_dd->timer_expired = true;
+	wake_up(&wdog_dd->pet_complete);
+}
+
+static __ref int watchdog_kthread(void *arg)
+{
+	struct msm_watchdog_data *wdog_dd =
+		(struct msm_watchdog_data *)arg;
+	unsigned long delay_time = 0;
+	struct sched_param param = {.sched_priority = MAX_RT_PRIO-1};
+
+	sched_setscheduler(current, SCHED_FIFO, &param);
+	while (!kthread_should_stop()) {
+		while (wait_event_interruptible(
+			wdog_dd->pet_complete,
+			wdog_dd->timer_expired) != 0)
+			;
+
+		if (wdog_dd->do_ipi_ping)
+			ping_other_cpus(wdog_dd);
+
+		while (wait_event_interruptible(
+			wdog_dd->pet_complete,
+			wdog_dd->user_pet_complete) != 0)
+			;
+
+		wdog_dd->timer_expired = false;
+		wdog_dd->user_pet_complete = !wdog_dd->user_pet_enabled;
+
+		if (enable) {
+			delay_time = msecs_to_jiffies(wdog_dd->pet_time);
+			pet_watchdog(wdog_dd);
+		}
+		/* Check again before scheduling *
+		 * Could have been changed on other cpu */
+		mod_timer(&wdog_dd->pet_timer, jiffies + delay_time);
+	}
+	return 0;
+}
+
+static int wdog_cpu_pm_notify(struct notifier_block *self,
+			      unsigned long action, void *v)
+{
+	int cpu;
+
+	cpu = raw_smp_processor_id();
+
+	switch (action) {
+	case CPU_PM_ENTER:
+		cpu_idle_pc_state[cpu] = 1;
+		break;
+	case CPU_PM_ENTER_FAILED:
+	case CPU_PM_EXIT:
+		cpu_idle_pc_state[cpu] = 0;
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block wdog_cpu_pm_nb = {
+	.notifier_call = wdog_cpu_pm_notify,
+};
+
+static int msm_watchdog_remove(struct platform_device *pdev)
+{
+	struct msm_watchdog_data *wdog_dd =
+			(struct msm_watchdog_data *)platform_get_drvdata(pdev);
+
+	if (ipi_opt_en)
+		cpu_pm_unregister_notifier(&wdog_cpu_pm_nb);
+
+	mutex_lock(&wdog_dd->disable_lock);
+	if (enable) {
+		wdog_disable(wdog_dd);
+	}
+	mutex_unlock(&wdog_dd->disable_lock);
+	device_remove_file(wdog_dd->dev, &dev_attr_disable);
+	if (wdog_dd->irq_ppi)
+		free_percpu(wdog_dd->wdog_cpu_dd);
+	printk(KERN_INFO "MSM Watchdog Exit - Deactivated\n");
+	del_timer_sync(&wdog_dd->pet_timer);
+	kthread_stop(wdog_dd->watchdog_task);
+	kfree(wdog_dd);
+	return 0;
+}
+
+void msm_trigger_wdog_bite(void)
+{
+	if (!wdog_data)
+		return;
+	pr_info("Causing a watchdog bite!");
+	__raw_writel(1, wdog_data->base + WDT0_BITE_TIME);
+	mb();
+	__raw_writel(1, wdog_data->base + WDT0_RST);
+	mb();
+	/* Delay to make sure bite occurs */
+	mdelay(10000);
+	pr_err("Wdog - STS: 0x%x, CTL: 0x%x, BARK TIME: 0x%x, BITE TIME: 0x%x",
+		__raw_readl(wdog_data->base + WDT0_STS),
+		__raw_readl(wdog_data->base + WDT0_EN),
+		__raw_readl(wdog_data->base + WDT0_BARK_TIME),
+		__raw_readl(wdog_data->base + WDT0_BITE_TIME));
+}
+
+static irqreturn_t wdog_bark_handler(int irq, void *dev_id)
+{
+	struct msm_watchdog_data *wdog_dd = (struct msm_watchdog_data *)dev_id;
+	unsigned long nanosec_rem;
+	unsigned long long t = sched_clock();
+
+	nanosec_rem = do_div(t, 1000000000);
+	printk(KERN_INFO "Watchdog bark! Now = %lu.%06lu\n", (unsigned long) t,
+		nanosec_rem / 1000);
+
+	nanosec_rem = do_div(wdog_dd->last_pet, 1000000000);
+	printk(KERN_INFO "Watchdog last pet at %lu.%06lu\n", (unsigned long)
+		wdog_dd->last_pet, nanosec_rem / 1000);
+	if (wdog_dd->do_ipi_ping)
+		dump_cpu_alive_mask(wdog_dd);
+	msm_trigger_wdog_bite();
+	panic("Failed to cause a watchdog bite! - Falling back to kernel panic!");
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t wdog_ppi_bark(int irq, void *dev_id)
+{
+	struct msm_watchdog_data *wdog_dd =
+			*(struct msm_watchdog_data **)(dev_id);
+	return wdog_bark_handler(irq, wdog_dd);
+}
+
+void register_scan_dump(struct msm_watchdog_data *wdog_dd)
+{
+	static void *dump_addr;
+	int ret;
+	struct msm_dump_entry dump_entry;
+	struct msm_dump_data *dump_data;
+
+	if (!wdog_dd->scandump_size)
+		return;
+
+	dump_data = kzalloc(sizeof(struct msm_dump_data), GFP_KERNEL);
+	if (!dump_data)
+		return;
+	dump_addr = kzalloc(wdog_dd->scandump_size, GFP_KERNEL);
+	if (!dump_addr)
+		goto err0;
+
+	dump_data->addr = virt_to_phys(dump_addr);
+	dump_data->len = wdog_dd->scandump_size;
+	strlcpy(dump_data->name, "KSCANDUMP", sizeof(dump_data->name));
+
+	dump_entry.id = MSM_DUMP_DATA_SCANDUMP;
+	dump_entry.addr = virt_to_phys(dump_data);
+	ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry);
+	if (ret) {
+		pr_err("Registering scandump region failed\n");
+		goto err1;
+	}
+	return;
+err1:
+	kfree(dump_addr);
+err0:
+	kfree(dump_data);
+}
+
+static void configure_bark_dump(struct msm_watchdog_data *wdog_dd)
+{
+	int ret;
+	struct msm_client_dump cpu_dump_entry;
+	struct msm_dump_entry dump_entry;
+	struct msm_dump_data *cpu_data;
+	int cpu;
+	void *cpu_buf;
+	struct {
+		unsigned addr;
+		int len;
+	} cmd_buf;
+	struct scm_desc desc = {0};
+
+	if (MSM_DUMP_MAJOR(msm_dump_table_version()) == 1) {
+		wdog_dd->scm_regsave = (void *)__get_free_page(GFP_KERNEL);
+		if (wdog_dd->scm_regsave) {
+			/* scm_regsave may be a phys address > 4GB */
+			desc.args[0] = virt_to_phys(wdog_dd->scm_regsave);
+			cmd_buf.addr = virt_to_phys(wdog_dd->scm_regsave);
+			desc.args[1] = cmd_buf.len  = PAGE_SIZE;
+			desc.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL);
+
+			if (!is_scm_armv8())
+				ret = scm_call(SCM_SVC_UTIL,
+					       SCM_SET_REGSAVE_CMD, &cmd_buf,
+					       sizeof(cmd_buf), NULL, 0);
+			else
+				ret = scm_call2(SCM_SIP_FNID(SCM_SVC_UTIL,
+						SCM_SET_REGSAVE_CMD), &desc);
+			if (ret)
+				pr_err("Setting register save address failed.\n"
+				       "Registers won't be dumped on a dog "
+				       "bite\n");
+			cpu_dump_entry.id = MSM_CPU_CTXT;
+			cpu_dump_entry.start_addr =
+					virt_to_phys(wdog_dd->scm_regsave);
+			cpu_dump_entry.end_addr = cpu_dump_entry.start_addr +
+						  PAGE_SIZE;
+			ret = msm_dump_tbl_register(&cpu_dump_entry);
+			if (ret)
+				pr_err("Setting cpu dump region failed\n"
+				"Registers wont be dumped during cpu hang\n");
+		} else {
+			pr_err("Allocating register save space failed\n"
+			       "Registers won't be dumped on a dog bite\n");
+			/*
+			 * No need to bail if allocation fails. Simply don't
+			 * send the command, and the secure side will reset
+			 * without saving registers.
+			 */
+		}
+	} else {
+		cpu_data = kzalloc(sizeof(struct msm_dump_data) *
+				   num_present_cpus(), GFP_KERNEL);
+		if (!cpu_data) {
+			pr_err("cpu dump data structure allocation failed\n");
+			goto out0;
+		}
+		cpu_buf = kzalloc(MAX_CPU_CTX_SIZE * num_present_cpus(),
+				  GFP_KERNEL);
+		if (!cpu_buf) {
+			pr_err("cpu reg context space allocation failed\n");
+			goto out1;
+		}
+
+		for_each_cpu(cpu, cpu_present_mask) {
+			cpu_data[cpu].addr = virt_to_phys(cpu_buf +
+							cpu * MAX_CPU_CTX_SIZE);
+			cpu_data[cpu].len = MAX_CPU_CTX_SIZE;
+			snprintf(cpu_data[cpu].name, sizeof(cpu_data[cpu].name),
+				"KCPU_CTX%d", cpu);
+
+			dump_entry.id = MSM_DUMP_DATA_CPU_CTX + cpu;
+			dump_entry.addr = virt_to_phys(&cpu_data[cpu]);
+			ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS,
+						     &dump_entry);
+			/*
+			 * Don't free the buffers in case of error since
+			 * registration may have succeeded for some cpus.
+			 */
+			if (ret)
+				pr_err("cpu %d reg dump setup failed\n", cpu);
+		}
+
+		register_scan_dump(wdog_dd);
+	}
+
+	return;
+out1:
+	kfree(cpu_data);
+out0:
+	return;
+}
+
+static int init_watchdog_sysfs(struct msm_watchdog_data *wdog_dd)
+{
+	int error = 0;
+
+	error |= device_create_file(wdog_dd->dev, &dev_attr_disable);
+
+	if (of_property_read_bool(wdog_dd->dev->of_node,
+					"qcom,userspace-watchdog")) {
+		error |= device_create_file(wdog_dd->dev, &dev_attr_pet_time);
+		error |= device_create_file(wdog_dd->dev,
+					    &dev_attr_user_pet_enabled);
+	}
+
+	if (error)
+		dev_err(wdog_dd->dev, "cannot create sysfs attribute\n");
+
+	return error;
+}
+
+static void init_watchdog_data(struct msm_watchdog_data *wdog_dd)
+{
+	unsigned long delay_time;
+	uint32_t val;
+	u64 timeout;
+	int ret;
+
+	/*
+	 * Disable the watchdog for cluster 1 so that cluster 0 watchdog will
+	 * be mapped to the entire sub-system.
+	 */
+	if (wdog_dd->wdog_absent_base)
+		__raw_writel(2, wdog_dd->wdog_absent_base + WDOG_ABSENT);
+
+	if (wdog_dd->irq_ppi) {
+		wdog_dd->wdog_cpu_dd = alloc_percpu(struct msm_watchdog_data *);
+		if (!wdog_dd->wdog_cpu_dd) {
+			dev_err(wdog_dd->dev, "fail to allocate cpu data\n");
+			return;
+		}
+		*raw_cpu_ptr(wdog_dd->wdog_cpu_dd) = wdog_dd;
+		ret = request_percpu_irq(wdog_dd->bark_irq, wdog_ppi_bark,
+					"apps_wdog_bark",
+					wdog_dd->wdog_cpu_dd);
+		if (ret) {
+			dev_err(wdog_dd->dev, "failed to request bark irq\n");
+			free_percpu(wdog_dd->wdog_cpu_dd);
+			return;
+		}
+	} else {
+		ret = devm_request_irq(wdog_dd->dev, wdog_dd->bark_irq,
+				wdog_bark_handler, IRQF_TRIGGER_RISING,
+						"apps_wdog_bark", wdog_dd);
+		if (ret) {
+			dev_err(wdog_dd->dev, "failed to request bark irq\n");
+			return;
+		}
+	}
+	delay_time = msecs_to_jiffies(wdog_dd->pet_time);
+	wdog_dd->min_slack_ticks = UINT_MAX;
+	wdog_dd->min_slack_ns = ULLONG_MAX;
+	configure_bark_dump(wdog_dd);
+	timeout = (wdog_dd->bark_time * WDT_HZ)/1000;
+	__raw_writel(timeout, wdog_dd->base + WDT0_BARK_TIME);
+	__raw_writel(timeout + 3*WDT_HZ, wdog_dd->base + WDT0_BITE_TIME);
+
+	wdog_dd->panic_blk.notifier_call = panic_wdog_handler;
+	atomic_notifier_chain_register(&panic_notifier_list,
+				       &wdog_dd->panic_blk);
+	mutex_init(&wdog_dd->disable_lock);
+	init_waitqueue_head(&wdog_dd->pet_complete);
+	wdog_dd->timer_expired = false;
+	wdog_dd->user_pet_complete = true;
+	wdog_dd->user_pet_enabled = false;
+	wake_up_process(wdog_dd->watchdog_task);
+	init_timer_deferrable(&wdog_dd->pet_timer);
+	wdog_dd->pet_timer.data = (unsigned long)wdog_dd;
+	wdog_dd->pet_timer.function = pet_task_wakeup;
+	wdog_dd->pet_timer.expires = jiffies + delay_time;
+	add_timer(&wdog_dd->pet_timer);
+
+	val = BIT(EN);
+	if (wdog_dd->wakeup_irq_enable)
+		val |= BIT(UNMASKED_INT_EN);
+	__raw_writel(val, wdog_dd->base + WDT0_EN);
+	__raw_writel(1, wdog_dd->base + WDT0_RST);
+	wdog_dd->last_pet = sched_clock();
+	wdog_dd->enabled = true;
+
+	init_watchdog_sysfs(wdog_dd);
+
+	if (wdog_dd->irq_ppi)
+		enable_percpu_irq(wdog_dd->bark_irq, 0);
+	if (ipi_opt_en)
+		cpu_pm_register_notifier(&wdog_cpu_pm_nb);
+	dev_info(wdog_dd->dev, "MSM Watchdog Initialized\n");
+	return;
+}
+
+static struct of_device_id msm_wdog_match_table[] = {
+	{ .compatible = "qcom,msm-watchdog" },
+	{}
+};
+
+static void dump_pdata(struct msm_watchdog_data *pdata)
+{
+	dev_dbg(pdata->dev, "wdog bark_time %d", pdata->bark_time);
+	dev_dbg(pdata->dev, "wdog pet_time %d", pdata->pet_time);
+	dev_dbg(pdata->dev, "wdog perform ipi ping %d", pdata->do_ipi_ping);
+	dev_dbg(pdata->dev, "wdog base address is 0x%lx\n", (unsigned long)
+								pdata->base);
+}
+
+static int msm_wdog_dt_to_pdata(struct platform_device *pdev,
+					struct msm_watchdog_data *pdata)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct resource *res;
+	int ret;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "wdt-base");
+	if (!res)
+		return -ENODEV;
+	pdata->size = resource_size(res);
+	pdata->phys_base = res->start;
+	if (unlikely(!(devm_request_mem_region(&pdev->dev, pdata->phys_base,
+					       pdata->size, "msm-watchdog")))) {
+
+		dev_err(&pdev->dev, "%s cannot reserve watchdog region\n",
+								__func__);
+		return -ENXIO;
+	}
+	pdata->base  = devm_ioremap(&pdev->dev, pdata->phys_base,
+							pdata->size);
+	if (!pdata->base) {
+		dev_err(&pdev->dev, "%s cannot map wdog register space\n",
+				__func__);
+		return -ENXIO;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "wdt-absent-base");
+	if (res) {
+		pdata->wdog_absent_base  = devm_ioremap(&pdev->dev, res->start,
+							 resource_size(res));
+		if (!pdata->wdog_absent_base) {
+			dev_err(&pdev->dev,
+				"cannot map wdog absent register space\n");
+			return -ENXIO;
+		}
+	} else {
+		dev_info(&pdev->dev, "wdog absent resource not present\n");
+	}
+
+	pdata->bark_irq = platform_get_irq(pdev, 0);
+	pdata->bite_irq = platform_get_irq(pdev, 1);
+	ret = of_property_read_u32(node, "qcom,bark-time", &pdata->bark_time);
+	if (ret) {
+		dev_err(&pdev->dev, "reading bark time failed\n");
+		return -ENXIO;
+	}
+	ret = of_property_read_u32(node, "qcom,pet-time", &pdata->pet_time);
+	if (ret) {
+		dev_err(&pdev->dev, "reading pet time failed\n");
+		return -ENXIO;
+	}
+	pdata->do_ipi_ping = of_property_read_bool(node, "qcom,ipi-ping");
+	if (!pdata->bark_time) {
+		dev_err(&pdev->dev, "%s watchdog bark time not setup\n",
+								__func__);
+		return -ENXIO;
+	}
+	if (!pdata->pet_time) {
+		dev_err(&pdev->dev, "%s watchdog pet time not setup\n",
+								__func__);
+		return -ENXIO;
+	}
+	pdata->wakeup_irq_enable = of_property_read_bool(node,
+							 "qcom,wakeup-enable");
+
+	if (of_property_read_u32(node, "qcom,scandump-size",
+				 &pdata->scandump_size))
+		dev_info(&pdev->dev,
+			 "No need to allocate memory for scandumps\n");
+
+	pdata->irq_ppi = irq_is_percpu(pdata->bark_irq);
+	dump_pdata(pdata);
+	return 0;
+}
+
+static int msm_watchdog_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct msm_watchdog_data *wdog_dd;
+	struct md_region md_entry;
+
+	if (!pdev->dev.of_node || !enable)
+		return -ENODEV;
+	wdog_dd = kzalloc(sizeof(struct msm_watchdog_data), GFP_KERNEL);
+	if (!wdog_dd)
+		return -EIO;
+	ret = msm_wdog_dt_to_pdata(pdev, wdog_dd);
+	if (ret)
+		goto err;
+
+	wdog_data = wdog_dd;
+	wdog_dd->dev = &pdev->dev;
+	platform_set_drvdata(pdev, wdog_dd);
+	cpumask_clear(&wdog_dd->alive_mask);
+	wdog_dd->watchdog_task = kthread_create(watchdog_kthread, wdog_dd,
+			"msm_watchdog");
+	if (IS_ERR(wdog_dd->watchdog_task)) {
+		ret = PTR_ERR(wdog_dd->watchdog_task);
+		goto err;
+	}
+	init_watchdog_data(wdog_dd);
+
+	/* Add wdog info to minidump table */
+	strlcpy(md_entry.name, "KWDOGDATA", sizeof(md_entry.name));
+	md_entry.virt_addr = (uintptr_t)wdog_dd;
+	md_entry.phys_addr = virt_to_phys(wdog_dd);
+	md_entry.size = sizeof(*wdog_dd);
+	if (msm_minidump_add_region(&md_entry))
+		pr_info("Failed to add RTB in Minidump\n");
+
+	return 0;
+err:
+	kzfree(wdog_dd);
+	return ret;
+}
+
+static const struct dev_pm_ops msm_watchdog_dev_pm_ops = {
+	.suspend_noirq = msm_watchdog_suspend,
+	.resume_noirq = msm_watchdog_resume,
+};
+
+static struct platform_driver msm_watchdog_driver = {
+	.probe = msm_watchdog_probe,
+	.remove = msm_watchdog_remove,
+	.driver = {
+		.name = MODULE_NAME,
+		.owner = THIS_MODULE,
+		.pm = &msm_watchdog_dev_pm_ops,
+		.of_match_table = msm_wdog_match_table,
+	},
+};
+
+static int init_watchdog(void)
+{
+	return platform_driver_register(&msm_watchdog_driver);
+}
+
+pure_initcall(init_watchdog);
+MODULE_DESCRIPTION("MSM Watchdog Driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/wcd-dsp-glink.c	2019-10-29 09:26:24.825214745 +0100
@@ -0,0 +1,1218 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include <linux/vmalloc.h>
+#include <soc/qcom/glink.h>
+#include "sound/wcd-dsp-glink.h"
+
+#define WDSP_GLINK_DRIVER_NAME "wcd-dsp-glink"
+#define WDSP_MAX_WRITE_SIZE (256 * 1024)
+#define WDSP_MAX_READ_SIZE (4 * 1024)
+#define WDSP_MAX_NO_OF_INTENTS (20)
+#define WDSP_MAX_NO_OF_CHANNELS (10)
+#define WDSP_WRITE_PKT_SIZE (sizeof(struct wdsp_write_pkt))
+#define WDSP_REG_PKT_SIZE (sizeof(struct wdsp_reg_pkt))
+#define WDSP_CMD_PKT_SIZE (sizeof(struct wdsp_cmd_pkt))
+#define WDSP_CH_CFG_SIZE (sizeof(struct wdsp_glink_ch_cfg))
+
+#define MINOR_NUMBER_COUNT 1
+#define WDSP_EDGE "wdsp"
+#define RESP_QUEUE_SIZE 3
+#define QOS_PKT_SIZE 1024
+#define TIMEOUT_MS 1000
+
+struct wdsp_glink_dev {
+	struct class *cls;
+	struct device *dev;
+	struct cdev cdev;
+	dev_t dev_num;
+};
+
+struct wdsp_glink_rsp_que {
+	/* Size of valid data in buffer */
+	u32 buf_size;
+
+	/* Response buffer */
+	u8 buf[WDSP_MAX_READ_SIZE];
+};
+
+struct wdsp_glink_tx_buf {
+	struct work_struct tx_work;
+	struct work_struct free_tx_work;
+
+	/* Glink channel information */
+	struct wdsp_glink_ch *ch;
+
+	/* Tx buffer to send to glink */
+	u8 buf[0];
+};
+
+struct wdsp_glink_ch {
+	struct wdsp_glink_priv *wpriv;
+
+	/* Glink channel handle */
+	void *handle;
+
+	/* Channel states like connect, disconnect */
+	int channel_state;
+	struct mutex mutex;
+
+	/* To free up the channel memory */
+	bool free_mem;
+
+	/* Glink local channel open work */
+	struct work_struct lcl_ch_open_wrk;
+
+	/* Glink local channel close work */
+	struct work_struct lcl_ch_cls_wrk;
+
+	/* Wait for ch connect state before sending any command */
+	wait_queue_head_t ch_connect_wait;
+
+	/*
+	 * Glink channel configuration. This has to be the last
+	 * member of the strucuture as it has variable size
+	 */
+	struct wdsp_glink_ch_cfg ch_cfg;
+};
+
+struct wdsp_glink_state {
+	/* Glink link state information */
+	enum glink_link_state link_state;
+	void *handle;
+};
+
+struct wdsp_glink_priv {
+	/* Respone buffer related */
+	u8 rsp_cnt;
+	struct wdsp_glink_rsp_que rsp[RESP_QUEUE_SIZE];
+	struct completion rsp_complete;
+	struct mutex rsp_mutex;
+
+	/* Glink channel related */
+	struct mutex glink_mutex;
+	struct wdsp_glink_state glink_state;
+	struct wdsp_glink_ch **ch;
+	u8 no_of_channels;
+	struct work_struct ch_open_cls_wrk;
+	struct workqueue_struct *work_queue;
+
+	wait_queue_head_t link_state_wait;
+
+	struct device *dev;
+};
+
+static int wdsp_glink_close_ch(struct wdsp_glink_ch *ch);
+static int wdsp_glink_open_ch(struct wdsp_glink_ch *ch);
+
+/*
+ * wdsp_glink_free_tx_buf_work - Work function to free tx pkt
+ * work:      Work structure
+ */
+static void wdsp_glink_free_tx_buf_work(struct work_struct *work)
+{
+	struct wdsp_glink_tx_buf *tx_buf;
+
+	tx_buf = container_of(work, struct wdsp_glink_tx_buf,
+			      free_tx_work);
+	vfree(tx_buf);
+}
+
+/*
+ * wdsp_glink_free_tx_buf - Function to free tx buffer
+ * priv:        Pointer to the channel
+ * pkt_priv:    Pointer to the tx buffer
+ */
+static void wdsp_glink_free_tx_buf(const void *priv, const void *pkt_priv)
+{
+	struct wdsp_glink_tx_buf *tx_buf = (struct wdsp_glink_tx_buf *)pkt_priv;
+	struct wdsp_glink_priv *wpriv;
+	struct wdsp_glink_ch *ch;
+
+	if (!priv) {
+		pr_err("%s: Invalid priv\n", __func__);
+		return;
+	}
+	if (!tx_buf) {
+		pr_err("%s: Invalid tx_buf\n", __func__);
+		return;
+	}
+
+	ch = (struct wdsp_glink_ch *)priv;
+	wpriv = ch->wpriv;
+	/* Work queue to free tx pkt */
+	INIT_WORK(&tx_buf->free_tx_work, wdsp_glink_free_tx_buf_work);
+	queue_work(wpriv->work_queue, &tx_buf->free_tx_work);
+}
+
+/*
+ * wdsp_glink_notify_rx - Glink notify rx callback for responses
+ * handle:      Opaque Channel handle returned by GLink
+ * priv:        Private pointer to the channel
+ * pkt_priv:    Private pointer to the packet
+ * ptr:         Pointer to the Rx data
+ * size:        Size of the Rx data
+ */
+static void wdsp_glink_notify_rx(void *handle, const void *priv,
+				 const void *pkt_priv, const void *ptr,
+				 size_t size)
+{
+	u8 *rx_buf;
+	u8 rsp_cnt;
+	struct wdsp_glink_ch *ch;
+	struct wdsp_glink_priv *wpriv;
+
+	if (!ptr || !priv) {
+		pr_err("%s: Invalid parameters\n", __func__);
+		return;
+	}
+
+	ch = (struct wdsp_glink_ch *)priv;
+	wpriv = ch->wpriv;
+	rx_buf = (u8 *)ptr;
+	if (size > WDSP_MAX_READ_SIZE) {
+		dev_err(wpriv->dev, "%s: Size %zd is greater than allowed %d\n",
+			__func__, size, WDSP_MAX_READ_SIZE);
+		size = WDSP_MAX_READ_SIZE;
+	}
+
+	mutex_lock(&wpriv->rsp_mutex);
+	rsp_cnt = wpriv->rsp_cnt;
+	if (rsp_cnt >= RESP_QUEUE_SIZE) {
+		dev_err(wpriv->dev, "%s: Resp Queue is Full\n", __func__);
+		rsp_cnt = 0;
+	}
+	dev_dbg(wpriv->dev, "%s: copy into buffer %d\n", __func__, rsp_cnt);
+
+	memcpy(wpriv->rsp[rsp_cnt].buf, rx_buf, size);
+	wpriv->rsp[rsp_cnt].buf_size = size;
+	wpriv->rsp_cnt = ++rsp_cnt;
+	mutex_unlock(&wpriv->rsp_mutex);
+
+	glink_rx_done(handle, ptr, true);
+	complete(&wpriv->rsp_complete);
+}
+
+/*
+ * wdsp_glink_notify_tx_done - Glink notify tx done callback to
+ * free tx buffer
+ * handle:      Opaque Channel handle returned by GLink
+ * priv:        Private pointer to the channel
+ * pkt_priv:    Private pointer to the packet
+ * ptr:         Pointer to the Tx data
+ */
+static void wdsp_glink_notify_tx_done(void *handle, const void *priv,
+				      const void *pkt_priv, const void *ptr)
+{
+	wdsp_glink_free_tx_buf(priv, pkt_priv);
+}
+/*
+ * wdsp_glink_notify_tx_abort - Glink notify tx abort callback to
+ * free tx buffer
+ * handle:      Opaque Channel handle returned by GLink
+ * priv:        Private pointer to the channel
+ * pkt_priv:    Private pointer to the packet
+ */
+static void wdsp_glink_notify_tx_abort(void *handle, const void *priv,
+				       const void *pkt_priv)
+{
+	wdsp_glink_free_tx_buf(priv, pkt_priv);
+}
+
+/*
+ * wdsp_glink_notify_rx_intent_req - Glink notify rx intent request callback
+ * to queue buffer to receive from remote client
+ * handle:      Opaque channel handle returned by GLink
+ * priv:        Private pointer to the channel
+ * req_size:    Size of intent to be queued
+ */
+static bool wdsp_glink_notify_rx_intent_req(void *handle, const void *priv,
+					    size_t req_size)
+{
+	struct wdsp_glink_priv *wpriv;
+	struct wdsp_glink_ch *ch;
+	int rc = 0;
+	bool ret = false;
+
+	if (!priv) {
+		pr_err("%s: Invalid priv\n", __func__);
+		goto done;
+	}
+	if (req_size > WDSP_MAX_READ_SIZE) {
+		pr_err("%s: Invalid req_size %zd\n", __func__, req_size);
+		goto done;
+	}
+
+	ch = (struct wdsp_glink_ch *)priv;
+	wpriv = ch->wpriv;
+
+	dev_dbg(wpriv->dev, "%s: intent size %zd requested for ch name %s",
+		 __func__, req_size, ch->ch_cfg.name);
+
+	mutex_lock(&ch->mutex);
+	rc = glink_queue_rx_intent(ch->handle, ch, req_size);
+	if (IS_ERR_VALUE(rc)) {
+		dev_err(wpriv->dev, "%s: Failed to queue rx intent, rc = %d\n",
+			__func__, rc);
+		mutex_unlock(&ch->mutex);
+		goto done;
+	}
+	mutex_unlock(&ch->mutex);
+	ret = true;
+
+done:
+	return ret;
+}
+
+/*
+ * wdsp_glink_lcl_ch_open_wrk - Work function to open channel again
+ * when local disconnect event happens
+ * work:      Work structure
+ */
+static void wdsp_glink_lcl_ch_open_wrk(struct work_struct *work)
+{
+	struct wdsp_glink_ch *ch;
+
+	ch = container_of(work, struct wdsp_glink_ch,
+			  lcl_ch_open_wrk);
+
+	wdsp_glink_open_ch(ch);
+}
+
+/*
+ * wdsp_glink_lcl_ch_cls_wrk - Work function to close channel locally
+ * when remote disconnect event happens
+ * work:      Work structure
+ */
+static void wdsp_glink_lcl_ch_cls_wrk(struct work_struct *work)
+{
+	struct wdsp_glink_ch *ch;
+
+	ch = container_of(work, struct wdsp_glink_ch,
+			  lcl_ch_cls_wrk);
+
+	wdsp_glink_close_ch(ch);
+}
+
+/*
+ * wdsp_glink_notify_state - Glink channel state information event callback
+ * handle:      Opaque Channel handle returned by GLink
+ * priv:        Private pointer to the channel
+ * event:       channel state event
+ */
+static void wdsp_glink_notify_state(void *handle, const void *priv,
+				    unsigned event)
+{
+	struct wdsp_glink_priv *wpriv;
+	struct wdsp_glink_ch *ch;
+	int i, ret = 0;
+
+	if (!priv) {
+		pr_err("%s: Invalid priv\n", __func__);
+		return;
+	}
+
+	ch = (struct wdsp_glink_ch *)priv;
+	wpriv = ch->wpriv;
+
+	mutex_lock(&ch->mutex);
+	ch->channel_state = event;
+	if (event == GLINK_CONNECTED) {
+		dev_dbg(wpriv->dev, "%s: glink channel: %s connected\n",
+			__func__, ch->ch_cfg.name);
+
+		for (i = 0; i < ch->ch_cfg.no_of_intents; i++) {
+			dev_dbg(wpriv->dev, "%s: intent_size = %d\n", __func__,
+				ch->ch_cfg.intents_size[i]);
+			ret = glink_queue_rx_intent(ch->handle, ch,
+						    ch->ch_cfg.intents_size[i]);
+			if (IS_ERR_VALUE(ret))
+				dev_warn(wpriv->dev, "%s: Failed to queue intent %d of size %d\n",
+					 __func__, i,
+					 ch->ch_cfg.intents_size[i]);
+		}
+
+		ret = glink_qos_latency(ch->handle, ch->ch_cfg.latency_in_us,
+					QOS_PKT_SIZE);
+		if (IS_ERR_VALUE(ret))
+			dev_warn(wpriv->dev, "%s: Failed to request qos %d for ch %s\n",
+				__func__, ch->ch_cfg.latency_in_us,
+				ch->ch_cfg.name);
+
+		wake_up(&ch->ch_connect_wait);
+		mutex_unlock(&ch->mutex);
+	} else if (event == GLINK_LOCAL_DISCONNECTED) {
+		/*
+		 * Don't use dev_dbg here as dev may not be valid if channel
+		 * closed from driver close.
+		 */
+		pr_debug("%s: channel: %s disconnected locally\n",
+			 __func__, ch->ch_cfg.name);
+		mutex_unlock(&ch->mutex);
+
+		if (ch->free_mem) {
+			kfree(ch);
+			ch = NULL;
+		}
+	} else if (event == GLINK_REMOTE_DISCONNECTED) {
+		dev_dbg(wpriv->dev, "%s: remote channel: %s disconnected remotely\n",
+			 __func__, ch->ch_cfg.name);
+		mutex_unlock(&ch->mutex);
+		/*
+		 * If remote disconnect happens, local side also has
+		 * to close the channel as per glink design in a
+		 * separate work_queue.
+		 */
+		queue_work(wpriv->work_queue, &ch->lcl_ch_cls_wrk);
+	}
+}
+
+/*
+ * wdsp_glink_close_ch - Internal function to close glink channel
+ * ch:       Glink Channel structure.
+ */
+static int wdsp_glink_close_ch(struct wdsp_glink_ch *ch)
+{
+	struct wdsp_glink_priv *wpriv = ch->wpriv;
+	int ret = 0;
+
+	mutex_lock(&wpriv->glink_mutex);
+	if (ch->handle) {
+		ret = glink_close(ch->handle);
+		if (IS_ERR_VALUE(ret)) {
+			dev_err(wpriv->dev, "%s: glink_close is failed, ret = %d\n",
+				 __func__, ret);
+		} else {
+			ch->handle = NULL;
+			dev_dbg(wpriv->dev, "%s: ch %s is closed\n", __func__,
+				ch->ch_cfg.name);
+		}
+	} else {
+		dev_dbg(wpriv->dev, "%s: ch %s is already closed\n", __func__,
+			ch->ch_cfg.name);
+	}
+	mutex_unlock(&wpriv->glink_mutex);
+
+
+	return ret;
+}
+
+/*
+ * wdsp_glink_open_ch - Internal function to open glink channel
+ * ch:       Glink Channel structure.
+ */
+static int wdsp_glink_open_ch(struct wdsp_glink_ch *ch)
+{
+	struct wdsp_glink_priv *wpriv = ch->wpriv;
+	struct glink_open_config open_cfg;
+	int ret = 0;
+
+	mutex_lock(&wpriv->glink_mutex);
+	if (!ch->handle) {
+		memset(&open_cfg, 0, sizeof(open_cfg));
+		open_cfg.options = GLINK_OPT_INITIAL_XPORT;
+		open_cfg.edge = WDSP_EDGE;
+		open_cfg.notify_rx = wdsp_glink_notify_rx;
+		open_cfg.notify_tx_done = wdsp_glink_notify_tx_done;
+		open_cfg.notify_tx_abort = wdsp_glink_notify_tx_abort;
+		open_cfg.notify_state = wdsp_glink_notify_state;
+		open_cfg.notify_rx_intent_req = wdsp_glink_notify_rx_intent_req;
+		open_cfg.priv = ch;
+		open_cfg.name = ch->ch_cfg.name;
+
+		dev_dbg(wpriv->dev, "%s: ch->ch_cfg.name = %s, latency_in_us = %d, intents = %d\n",
+			__func__, ch->ch_cfg.name, ch->ch_cfg.latency_in_us,
+			ch->ch_cfg.no_of_intents);
+
+		ch->handle = glink_open(&open_cfg);
+		if (IS_ERR_OR_NULL(ch->handle)) {
+			dev_err(wpriv->dev, "%s: glink_open failed for ch %s\n",
+				__func__, ch->ch_cfg.name);
+			ch->handle = NULL;
+			ret = -EINVAL;
+		}
+	} else {
+		dev_err(wpriv->dev, "%s: ch %s is already opened\n", __func__,
+			ch->ch_cfg.name);
+	}
+	mutex_unlock(&wpriv->glink_mutex);
+
+	return ret;
+}
+
+/*
+ * wdsp_glink_close_all_ch - Internal function to close all glink channels
+ * wpriv:       Wdsp_glink private structure
+ */
+static void wdsp_glink_close_all_ch(struct wdsp_glink_priv *wpriv)
+{
+	int i;
+
+	for (i = 0; i < wpriv->no_of_channels; i++)
+		if (wpriv->ch && wpriv->ch[i])
+			wdsp_glink_close_ch(wpriv->ch[i]);
+}
+
+/*
+ * wdsp_glink_open_all_ch - Internal function to open all glink channels
+ * wpriv:       Wdsp_glink private structure
+ */
+static int wdsp_glink_open_all_ch(struct wdsp_glink_priv *wpriv)
+{
+	int ret = 0, i, j;
+
+	for (i = 0; i < wpriv->no_of_channels; i++) {
+		if (wpriv->ch && wpriv->ch[i]) {
+			ret = wdsp_glink_open_ch(wpriv->ch[i]);
+			if (IS_ERR_VALUE(ret))
+				goto err_open;
+		}
+	}
+	goto done;
+
+err_open:
+	for (j = 0; j < i; j++)
+		if (wpriv->ch[i])
+			wdsp_glink_close_ch(wpriv->ch[j]);
+
+done:
+	return ret;
+}
+
+/*
+ * wdsp_glink_ch_open_wq - Work function to open glink channels
+ * work:      Work structure
+ */
+static void wdsp_glink_ch_open_cls_wrk(struct work_struct *work)
+{
+	struct wdsp_glink_priv *wpriv;
+
+	wpriv = container_of(work, struct wdsp_glink_priv,
+			     ch_open_cls_wrk);
+
+	if (wpriv->glink_state.link_state == GLINK_LINK_STATE_DOWN) {
+		dev_info(wpriv->dev, "%s: GLINK_LINK_STATE_DOWN\n",
+			 __func__);
+
+		wdsp_glink_close_all_ch(wpriv);
+	} else if (wpriv->glink_state.link_state == GLINK_LINK_STATE_UP) {
+		dev_info(wpriv->dev, "%s: GLINK_LINK_STATE_UP\n",
+			 __func__);
+
+		wdsp_glink_open_all_ch(wpriv);
+	}
+}
+
+/*
+ * wdsp_glink_link_state_cb - Glink link state callback to inform
+ * about link states
+ * cb_info:     Glink link state callback information structure
+ * priv:        Private structure of link state passed while register
+ */
+static void wdsp_glink_link_state_cb(struct glink_link_state_cb_info *cb_info,
+				     void *priv)
+{
+	struct wdsp_glink_priv *wpriv;
+
+	if (!cb_info || !priv) {
+		pr_err("%s: Invalid parameters\n", __func__);
+		return;
+	}
+
+	wpriv = (struct wdsp_glink_priv *)priv;
+
+	mutex_lock(&wpriv->glink_mutex);
+	wpriv->glink_state.link_state = cb_info->link_state;
+	wake_up(&wpriv->link_state_wait);
+	mutex_unlock(&wpriv->glink_mutex);
+
+	queue_work(wpriv->work_queue, &wpriv->ch_open_cls_wrk);
+}
+
+/*
+ * wdsp_glink_ch_info_init- Internal function to allocate channel memory
+ * and register with glink
+ * wpriv:     Wdsp_glink private structure.
+ * pkt:       Glink registration packet contains glink channel information.
+ * pkt_size:  Size of the pkt.
+ */
+static int wdsp_glink_ch_info_init(struct wdsp_glink_priv *wpriv,
+				   struct wdsp_reg_pkt *pkt, size_t pkt_size)
+{
+	int ret = 0, i, j;
+	struct glink_link_info link_info;
+	struct wdsp_glink_ch_cfg *ch_cfg;
+	struct wdsp_glink_ch **ch;
+	u8 no_of_channels;
+	u8 *payload;
+	u32 ch_size, ch_cfg_size;
+	size_t size = WDSP_WRITE_PKT_SIZE + WDSP_REG_PKT_SIZE;
+
+	mutex_lock(&wpriv->glink_mutex);
+	if (wpriv->ch) {
+		dev_err_ratelimited(wpriv->dev, "%s: glink ch memory is already allocated\n",
+			 __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+	payload = (u8 *)pkt->payload;
+	no_of_channels = pkt->no_of_channels;
+
+	if (no_of_channels > WDSP_MAX_NO_OF_CHANNELS) {
+		dev_err_ratelimited(wpriv->dev, "%s: no_of_channels: %d but max allowed are %d\n",
+			__func__, no_of_channels, WDSP_MAX_NO_OF_CHANNELS);
+		ret = -EINVAL;
+		goto done;
+	}
+	ch = kcalloc(no_of_channels, sizeof(struct wdsp_glink_ch *),
+		     GFP_ATOMIC);
+	if (!ch) {
+		ret = -ENOMEM;
+		goto done;
+	}
+	wpriv->ch = ch;
+	wpriv->no_of_channels = no_of_channels;
+
+	for (i = 0; i < no_of_channels; i++) {
+		ch_cfg = (struct wdsp_glink_ch_cfg *)payload;
+
+		size += WDSP_CH_CFG_SIZE;
+		if (size > pkt_size) {
+			dev_err_ratelimited(wpriv->dev, "%s: Invalid size = %zd, pkt_size = %zd\n",
+				__func__, size, pkt_size);
+			ret = -EINVAL;
+			goto err_ch_mem;
+		}
+		if (ch_cfg->no_of_intents > WDSP_MAX_NO_OF_INTENTS) {
+			dev_err_ratelimited(wpriv->dev, "%s: Invalid no_of_intents = %d\n",
+				__func__, ch_cfg->no_of_intents);
+			ret = -EINVAL;
+			goto err_ch_mem;
+		}
+		size += (sizeof(u32) * ch_cfg->no_of_intents);
+		if (size > pkt_size) {
+			dev_err_ratelimited(wpriv->dev, "%s: Invalid size = %zd, pkt_size = %zd\n",
+				__func__, size, pkt_size);
+			ret = -EINVAL;
+			goto err_ch_mem;
+		}
+
+		ch_cfg_size = sizeof(struct wdsp_glink_ch_cfg) +
+					(sizeof(u32) * ch_cfg->no_of_intents);
+		ch_size = sizeof(struct wdsp_glink_ch) +
+					(sizeof(u32) * ch_cfg->no_of_intents);
+
+		dev_dbg(wpriv->dev, "%s: channels: %d ch_cfg_size: %d, size: %zd, pkt_size: %zd",
+			 __func__, no_of_channels, ch_cfg_size, size, pkt_size);
+
+		ch[i] = kzalloc(ch_size, GFP_KERNEL);
+		if (!ch[i]) {
+			ret = -ENOMEM;
+			goto err_ch_mem;
+		}
+		ch[i]->channel_state = GLINK_LOCAL_DISCONNECTED;
+		memcpy(&ch[i]->ch_cfg, payload, ch_cfg_size);
+		payload += ch_cfg_size;
+
+		/* check ch name is valid string or not */
+		for (j = 0; j < WDSP_CH_NAME_MAX_LEN; j++) {
+			if (ch[i]->ch_cfg.name[j] == '\0')
+				break;
+		}
+
+		if (j == WDSP_CH_NAME_MAX_LEN) {
+			dev_err_ratelimited(wpriv->dev, "%s: Wrong channel name\n",
+				__func__);
+			kfree(ch[i]);
+			ch[i] = NULL;
+			ret = -EINVAL;
+			goto err_ch_mem;
+		}
+
+		mutex_init(&ch[i]->mutex);
+		ch[i]->wpriv = wpriv;
+		INIT_WORK(&ch[i]->lcl_ch_open_wrk, wdsp_glink_lcl_ch_open_wrk);
+		INIT_WORK(&ch[i]->lcl_ch_cls_wrk, wdsp_glink_lcl_ch_cls_wrk);
+		init_waitqueue_head(&ch[i]->ch_connect_wait);
+	}
+
+	INIT_WORK(&wpriv->ch_open_cls_wrk, wdsp_glink_ch_open_cls_wrk);
+
+	/* Register glink link_state notification */
+	link_info.glink_link_state_notif_cb = wdsp_glink_link_state_cb;
+	link_info.transport = NULL;
+	link_info.edge = WDSP_EDGE;
+
+	wpriv->glink_state.link_state = GLINK_LINK_STATE_DOWN;
+	wpriv->glink_state.handle = glink_register_link_state_cb(&link_info,
+								 wpriv);
+	if (!wpriv->glink_state.handle) {
+		dev_err(wpriv->dev, "%s: Unable to register wdsp link state\n",
+			__func__);
+		ret = -EINVAL;
+		goto err_ch_mem;
+	}
+	goto done;
+
+err_ch_mem:
+	for (j = 0; j < i; j++) {
+		mutex_destroy(&ch[j]->mutex);
+		kfree(wpriv->ch[j]);
+		wpriv->ch[j] = NULL;
+	}
+	kfree(wpriv->ch);
+	wpriv->ch = NULL;
+	wpriv->no_of_channels = 0;
+
+done:
+	mutex_unlock(&wpriv->glink_mutex);
+	return ret;
+}
+
+/*
+ * wdsp_glink_tx_buf_work - Work queue function to send tx buffer to glink
+ * work:     Work structure
+ */
+static void wdsp_glink_tx_buf_work(struct work_struct *work)
+{
+	struct wdsp_glink_priv *wpriv;
+	struct wdsp_glink_ch *ch;
+	struct wdsp_glink_tx_buf *tx_buf;
+	struct wdsp_write_pkt *wpkt;
+	struct wdsp_cmd_pkt *cpkt;
+	int ret = 0;
+
+	tx_buf = container_of(work, struct wdsp_glink_tx_buf,
+			      tx_work);
+	ch = tx_buf->ch;
+	wpriv = ch->wpriv;
+	wpkt = (struct wdsp_write_pkt *)tx_buf->buf;
+	cpkt = (struct wdsp_cmd_pkt *)wpkt->payload;
+	dev_dbg(wpriv->dev, "%s: ch name = %s, payload size = %d\n",
+		__func__, cpkt->ch_name, cpkt->payload_size);
+
+	mutex_lock(&tx_buf->ch->mutex);
+	if (ch->channel_state == GLINK_CONNECTED) {
+		mutex_unlock(&tx_buf->ch->mutex);
+		ret = glink_tx(ch->handle, tx_buf,
+			       cpkt->payload, cpkt->payload_size,
+			       GLINK_TX_REQ_INTENT);
+		if (IS_ERR_VALUE(ret)) {
+			dev_err(wpriv->dev, "%s: glink tx failed, ret = %d\n",
+				__func__, ret);
+			/*
+			 * If glink_tx() is failed then free tx_buf here as
+			 * there won't be any tx_done notification to
+			 * free the buffer.
+			 */
+			vfree(tx_buf);
+		}
+	} else {
+		mutex_unlock(&tx_buf->ch->mutex);
+		dev_err(wpriv->dev, "%s: channel %s is not in connected state\n",
+			__func__, ch->ch_cfg.name);
+		/*
+		 * Free tx_buf here as there won't be any tx_done
+		 * notification in this case also.
+		 */
+		vfree(tx_buf);
+	}
+}
+
+/*
+ * wdsp_glink_read - Read API to send the data to userspace
+ * file:    Pointer to the file structure
+ * buf:     Pointer to the userspace buffer
+ * count:   Number bytes to read from the file
+ * ppos:    Pointer to the position into the file
+ */
+static ssize_t wdsp_glink_read(struct file *file, char __user *buf,
+			       size_t count, loff_t *ppos)
+{
+	int ret = 0, ret1 = 0;
+	struct wdsp_glink_rsp_que *rsp;
+	struct wdsp_glink_priv *wpriv;
+
+	wpriv = (struct wdsp_glink_priv *)file->private_data;
+	if (!wpriv) {
+		pr_err("%s: Invalid private data\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (count > WDSP_MAX_READ_SIZE) {
+		dev_info_ratelimited(wpriv->dev, "%s: count = %zd is more than WDSP_MAX_READ_SIZE\n",
+			__func__, count);
+		count = WDSP_MAX_READ_SIZE;
+	}
+	/*
+	 * Complete signal has given from glink rx notification callback
+	 * or from flush API. Also use interruptible wait_for_completion API
+	 * to allow the system to go in suspend.
+	 */
+	ret = wait_for_completion_interruptible(&wpriv->rsp_complete);
+	if (ret)
+		goto done;
+
+	mutex_lock(&wpriv->rsp_mutex);
+	if (wpriv->rsp_cnt) {
+		wpriv->rsp_cnt--;
+		dev_dbg(wpriv->dev, "%s: read from buffer %d\n",
+			__func__, wpriv->rsp_cnt);
+
+		rsp = &wpriv->rsp[wpriv->rsp_cnt];
+		if (count < rsp->buf_size) {
+			ret1 = copy_to_user(buf, &rsp->buf, count);
+			/* Return the number of bytes copied */
+			ret = count;
+		} else {
+			ret1 = copy_to_user(buf, &rsp->buf, rsp->buf_size);
+			/* Return the number of bytes copied */
+			ret = rsp->buf_size;
+		}
+
+		if (ret1) {
+			mutex_unlock(&wpriv->rsp_mutex);
+			dev_err_ratelimited(wpriv->dev, "%s: copy_to_user failed %d\n",
+				__func__, ret);
+			ret = -EFAULT;
+			goto done;
+		}
+	} else {
+		/*
+		 * This will execute only if flush API is called or
+		 * something wrong with ref_cnt
+		 */
+		dev_dbg(wpriv->dev, "%s: resp count = %d\n", __func__,
+			wpriv->rsp_cnt);
+		ret = -EINVAL;
+	}
+	mutex_unlock(&wpriv->rsp_mutex);
+
+done:
+	return ret;
+}
+
+/*
+ * wdsp_glink_write - Write API to receive the data from userspace
+ * file:    Pointer to the file structure
+ * buf:     Pointer to the userspace buffer
+ * count:   Number bytes to read from the file
+ * ppos:    Pointer to the position into the file
+ */
+static ssize_t wdsp_glink_write(struct file *file, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	int ret = 0, i, tx_buf_size;
+	struct wdsp_write_pkt *wpkt;
+	struct wdsp_cmd_pkt *cpkt;
+	struct wdsp_glink_tx_buf *tx_buf;
+	struct wdsp_glink_priv *wpriv;
+	size_t pkt_max_size;
+
+	wpriv = (struct wdsp_glink_priv *)file->private_data;
+	if (!wpriv) {
+		pr_err("%s: Invalid private data\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if ((count < WDSP_WRITE_PKT_SIZE) ||
+	    (count > WDSP_MAX_WRITE_SIZE)) {
+		dev_err_ratelimited(wpriv->dev, "%s: Invalid count = %zd\n",
+			__func__, count);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	dev_dbg(wpriv->dev, "%s: count = %zd\n", __func__, count);
+
+	tx_buf_size = count + sizeof(struct wdsp_glink_tx_buf);
+	tx_buf = vzalloc(tx_buf_size);
+	if (!tx_buf) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	ret = copy_from_user(tx_buf->buf, buf, count);
+	if (ret) {
+		dev_err_ratelimited(wpriv->dev, "%s: copy_from_user failed %d\n",
+			__func__, ret);
+		ret = -EFAULT;
+		goto free_buf;
+	}
+
+	wpkt = (struct wdsp_write_pkt *)tx_buf->buf;
+	switch (wpkt->pkt_type) {
+	case WDSP_REG_PKT:
+		if (count < (WDSP_WRITE_PKT_SIZE + WDSP_REG_PKT_SIZE +
+			     WDSP_CH_CFG_SIZE)) {
+			dev_err_ratelimited(wpriv->dev, "%s: Invalid reg pkt size = %zd\n",
+				__func__, count);
+			ret = -EINVAL;
+			goto free_buf;
+		}
+		ret = wdsp_glink_ch_info_init(wpriv,
+					(struct wdsp_reg_pkt *)wpkt->payload,
+					count);
+		if (IS_ERR_VALUE(ret))
+			dev_err_ratelimited(wpriv->dev, "%s: glink register failed, ret = %d\n",
+				__func__, ret);
+		vfree(tx_buf);
+		break;
+	case WDSP_READY_PKT:
+		ret = wait_event_timeout(wpriv->link_state_wait,
+					 (wpriv->glink_state.link_state ==
+							GLINK_LINK_STATE_UP),
+					 msecs_to_jiffies(TIMEOUT_MS));
+		if (!ret) {
+			dev_err_ratelimited(wpriv->dev, "%s: Link state wait timeout\n",
+				__func__);
+			ret = -ETIMEDOUT;
+			goto free_buf;
+		}
+		ret = 0;
+		vfree(tx_buf);
+		break;
+	case WDSP_CMD_PKT:
+		if (count <= (WDSP_WRITE_PKT_SIZE + WDSP_CMD_PKT_SIZE)) {
+			dev_err_ratelimited(wpriv->dev, "%s: Invalid cmd pkt size = %zd\n",
+				__func__, count);
+			ret = -EINVAL;
+			goto free_buf;
+		}
+		mutex_lock(&wpriv->glink_mutex);
+		if (wpriv->glink_state.link_state == GLINK_LINK_STATE_DOWN) {
+			mutex_unlock(&wpriv->glink_mutex);
+			dev_err_ratelimited(wpriv->dev, "%s: Link state is Down\n",
+				__func__);
+
+			ret = -ENETRESET;
+			goto free_buf;
+		}
+		mutex_unlock(&wpriv->glink_mutex);
+		cpkt = (struct wdsp_cmd_pkt *)wpkt->payload;
+		pkt_max_size =  sizeof(struct wdsp_write_pkt) +
+					sizeof(struct wdsp_cmd_pkt) +
+					cpkt->payload_size;
+		if (count < pkt_max_size) {
+			dev_err_ratelimited(wpriv->dev, "%s: Invalid cmd pkt count = %zd, pkt_size = %zd\n",
+				__func__, count, pkt_max_size);
+			ret = -EINVAL;
+			goto free_buf;
+		}
+		for (i = 0; i < wpriv->no_of_channels; i++) {
+			if (wpriv->ch && wpriv->ch[i] &&
+				(!strcmp(cpkt->ch_name,
+						wpriv->ch[i]->ch_cfg.name))) {
+				tx_buf->ch = wpriv->ch[i];
+				break;
+			}
+		}
+		if (!tx_buf->ch) {
+			dev_err_ratelimited(wpriv->dev, "%s: Failed to get glink channel\n",
+				__func__);
+			ret = -EINVAL;
+			goto free_buf;
+		}
+		dev_dbg(wpriv->dev, "%s: requested ch_name: %s, pkt_size: %zd\n",
+			__func__, cpkt->ch_name, pkt_max_size);
+
+		ret = wait_event_timeout(tx_buf->ch->ch_connect_wait,
+					 (tx_buf->ch->channel_state ==
+							GLINK_CONNECTED),
+					 msecs_to_jiffies(TIMEOUT_MS));
+		if (!ret) {
+			dev_err_ratelimited(wpriv->dev, "%s: glink channel %s is not in connected state %d\n",
+				__func__, tx_buf->ch->ch_cfg.name,
+				tx_buf->ch->channel_state);
+			ret = -ETIMEDOUT;
+			goto free_buf;
+		}
+		ret = 0;
+
+		INIT_WORK(&tx_buf->tx_work, wdsp_glink_tx_buf_work);
+		queue_work(wpriv->work_queue, &tx_buf->tx_work);
+		break;
+	default:
+		dev_err_ratelimited(wpriv->dev, "%s: Invalid packet type\n",
+				    __func__);
+		ret = -EINVAL;
+		vfree(tx_buf);
+		break;
+	}
+	goto done;
+
+free_buf:
+	vfree(tx_buf);
+
+done:
+	return ret;
+}
+
+/*
+ * wdsp_glink_open - Open API to initialize private data
+ * inode:   Pointer to the inode structure
+ * file:    Pointer to the file structure
+ */
+static int wdsp_glink_open(struct inode *inode, struct file *file)
+{
+	int ret = 0;
+	struct wdsp_glink_priv *wpriv;
+	struct wdsp_glink_dev *wdev;
+
+	if (!inode->i_cdev) {
+		pr_err("%s: cdev is NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+	wdev = container_of(inode->i_cdev, struct wdsp_glink_dev, cdev);
+
+	wpriv = kzalloc(sizeof(struct wdsp_glink_priv), GFP_KERNEL);
+	if (!wpriv) {
+		ret = -ENOMEM;
+		goto done;
+	}
+	wpriv->dev = wdev->dev;
+	wpriv->work_queue = create_singlethread_workqueue("wdsp_glink_wq");
+	if (!wpriv->work_queue) {
+		dev_err(wpriv->dev, "%s: Error creating wdsp_glink_wq\n",
+			__func__);
+		ret = -EINVAL;
+		goto err_wq;
+	}
+
+	wpriv->glink_state.link_state = GLINK_LINK_STATE_DOWN;
+	init_completion(&wpriv->rsp_complete);
+	init_waitqueue_head(&wpriv->link_state_wait);
+	mutex_init(&wpriv->rsp_mutex);
+	mutex_init(&wpriv->glink_mutex);
+	file->private_data = wpriv;
+
+	goto done;
+
+err_wq:
+	kfree(wpriv);
+
+done:
+	return ret;
+}
+
+/*
+ * wdsp_glink_flush - Flush API to unblock read.
+ * file:    Pointer to the file structure
+ * id:      Lock owner ID
+ */
+static int wdsp_glink_flush(struct file *file, fl_owner_t id)
+{
+	struct wdsp_glink_priv *wpriv;
+
+	wpriv = (struct wdsp_glink_priv *)file->private_data;
+	if (!wpriv) {
+		pr_err("%s: Invalid private data\n", __func__);
+		return -EINVAL;
+	}
+
+	complete(&wpriv->rsp_complete);
+
+	return 0;
+}
+
+/*
+ * wdsp_glink_release - Release API to clean up resources.
+ * Whenever a file structure is shared across multiple threads,
+ * release won't be invoked until all copies are closed
+ * (file->f_count.counter should be 0). If we need to flush pending
+ * data when any copy is closed, you should implement the flush method.
+ *
+ * inode:   Pointer to the inode structure
+ * file:    Pointer to the file structure
+ */
+static int wdsp_glink_release(struct inode *inode, struct file *file)
+{
+	int i, ret = 0;
+	struct wdsp_glink_priv *wpriv;
+
+	wpriv = (struct wdsp_glink_priv *)file->private_data;
+	if (!wpriv) {
+		pr_err("%s: Invalid private data\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (wpriv->glink_state.handle)
+		glink_unregister_link_state_cb(wpriv->glink_state.handle);
+
+	flush_workqueue(wpriv->work_queue);
+	destroy_workqueue(wpriv->work_queue);
+
+	/*
+	 * Clean up glink channel memory in channel state
+	 * callback only if close channels are called from here.
+	 */
+	if (wpriv->ch) {
+		for (i = 0; i < wpriv->no_of_channels; i++) {
+			if (wpriv->ch[i]) {
+				wpriv->ch[i]->free_mem = true;
+				/*
+				 * Channel handle NULL means channel is already
+				 * closed. Free the channel memory here itself.
+				 */
+				if (!wpriv->ch[i]->handle) {
+					kfree(wpriv->ch[i]);
+					wpriv->ch[i] = NULL;
+				} else {
+					wdsp_glink_close_ch(wpriv->ch[i]);
+				}
+			}
+		}
+
+		kfree(wpriv->ch);
+		wpriv->ch = NULL;
+	}
+
+	mutex_destroy(&wpriv->glink_mutex);
+	mutex_destroy(&wpriv->rsp_mutex);
+	kfree(wpriv);
+	file->private_data = NULL;
+
+done:
+	return ret;
+}
+
+static const struct file_operations wdsp_glink_fops = {
+	.owner =                THIS_MODULE,
+	.open =                 wdsp_glink_open,
+	.read =                 wdsp_glink_read,
+	.write =                wdsp_glink_write,
+	.flush =                wdsp_glink_flush,
+	.release =              wdsp_glink_release,
+};
+
+/*
+ * wdsp_glink_probe - Driver probe to expose char device
+ * pdev:    Pointer to device tree data.
+ */
+static int wdsp_glink_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct wdsp_glink_dev *wdev;
+
+	wdev = devm_kzalloc(&pdev->dev, sizeof(*wdev), GFP_KERNEL);
+	if (!wdev) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	ret = alloc_chrdev_region(&wdev->dev_num, 0, MINOR_NUMBER_COUNT,
+				  WDSP_GLINK_DRIVER_NAME);
+	if (IS_ERR_VALUE(ret)) {
+		dev_err(&pdev->dev, "%s: Failed to alloc char dev, err = %d\n",
+			__func__, ret);
+		goto err_chrdev;
+	}
+
+	wdev->cls = class_create(THIS_MODULE, WDSP_GLINK_DRIVER_NAME);
+	if (IS_ERR(wdev->cls)) {
+		ret = PTR_ERR(wdev->cls);
+		dev_err(&pdev->dev, "%s: Failed to create class, err = %d\n",
+			__func__, ret);
+		goto err_class;
+	}
+
+	wdev->dev = device_create(wdev->cls, NULL, wdev->dev_num,
+				  NULL, WDSP_GLINK_DRIVER_NAME);
+	if (IS_ERR(wdev->dev)) {
+		ret = PTR_ERR(wdev->dev);
+		dev_err(&pdev->dev, "%s: Failed to create device, err = %d\n",
+			__func__, ret);
+		goto err_dev_create;
+	}
+
+	cdev_init(&wdev->cdev, &wdsp_glink_fops);
+	ret = cdev_add(&wdev->cdev, wdev->dev_num, MINOR_NUMBER_COUNT);
+	if (IS_ERR_VALUE(ret)) {
+		dev_err(&pdev->dev, "%s: Failed to register char dev, err = %d\n",
+			__func__, ret);
+		goto err_cdev_add;
+	}
+	platform_set_drvdata(pdev, wdev);
+	goto done;
+
+err_cdev_add:
+	device_destroy(wdev->cls, wdev->dev_num);
+
+err_dev_create:
+	class_destroy(wdev->cls);
+
+err_class:
+	unregister_chrdev_region(0, MINOR_NUMBER_COUNT);
+
+err_chrdev:
+	devm_kfree(&pdev->dev, wdev);
+
+done:
+	return ret;
+}
+
+/*
+ * wdsp_glink_remove - Driver remove to handle cleanup
+ * pdev:     Pointer to device tree data.
+ */
+static int wdsp_glink_remove(struct platform_device *pdev)
+{
+	struct wdsp_glink_dev *wdev = platform_get_drvdata(pdev);
+
+	if (wdev) {
+		cdev_del(&wdev->cdev);
+		device_destroy(wdev->cls, wdev->dev_num);
+		class_destroy(wdev->cls);
+		unregister_chrdev_region(0, MINOR_NUMBER_COUNT);
+		devm_kfree(&pdev->dev, wdev);
+	} else {
+		dev_err(&pdev->dev, "%s: Invalid device data\n", __func__);
+	}
+
+	return 0;
+}
+
+static const struct of_device_id wdsp_glink_of_match[] = {
+	{.compatible = "qcom,wcd-dsp-glink"},
+	{ }
+};
+MODULE_DEVICE_TABLE(of, wdsp_glink_of_match);
+
+static struct platform_driver wdsp_glink_driver = {
+	.probe          = wdsp_glink_probe,
+	.remove         = wdsp_glink_remove,
+	.driver         = {
+		.name   = WDSP_GLINK_DRIVER_NAME,
+		.owner  = THIS_MODULE,
+		.of_match_table = wdsp_glink_of_match,
+	},
+};
+
+module_platform_driver(wdsp_glink_driver);
+
+MODULE_DESCRIPTION("SoC WCD_DSP GLINK Driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/wlan_firmware_service_v01.c	2019-10-29 09:26:24.825214745 +0100
@@ -0,0 +1,2085 @@
+ /* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/qmi_encdec.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+
+#include "wlan_firmware_service_v01.h"
+
+static struct elem_info wlfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+					   pipe_num),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_pipedir_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+					   pipe_dir),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+					   nentries),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+					   nbytes_max),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+					   flags),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info wlfw_ce_svc_pipe_cfg_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
+					   service_id),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_pipedir_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
+					   pipe_dir),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
+					   pipe_num),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info wlfw_shadow_reg_cfg_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_shadow_reg_cfg_s_v01,
+					   id),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_shadow_reg_cfg_s_v01,
+					   offset),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info wlfw_shadow_reg_v2_cfg_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_shadow_reg_v2_cfg_s_v01,
+					   addr),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info wlfw_memory_region_info_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_memory_region_info_s_v01,
+					   region_addr),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_memory_region_info_s_v01,
+					   size),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_memory_region_info_s_v01,
+					   secure_flag),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info wlfw_rf_chip_info_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_rf_chip_info_s_v01,
+					   chip_id),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_rf_chip_info_s_v01,
+					   chip_family),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info wlfw_rf_board_info_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_rf_board_info_s_v01,
+					   board_id),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info wlfw_soc_info_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_soc_info_s_v01,
+					   soc_id),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info wlfw_fw_version_info_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_fw_version_info_s_v01,
+					   fw_version),
+	},
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_fw_version_info_s_v01,
+					   fw_build_timestamp),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_ind_register_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   fw_ready_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   fw_ready_enable),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   initiate_cal_download_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   initiate_cal_download_enable),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   initiate_cal_update_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   initiate_cal_update_enable),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   msa_ready_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   msa_ready_enable),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   pin_connect_result_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   pin_connect_result_enable),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   client_id_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   client_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   request_mem_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   request_mem_enable),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   fw_mem_ready_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   fw_mem_ready_enable),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x18,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   cold_boot_cal_done_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x18,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   cold_boot_cal_done_enable),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x19,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   rejuvenate_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x19,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   rejuvenate_enable),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_ind_register_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_ind_register_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+				   struct wlfw_ind_register_resp_msg_v01,
+				   fw_status_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+				   struct wlfw_ind_register_resp_msg_v01,
+				   fw_status),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_fw_ready_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_msa_ready_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+				   struct wlfw_pin_connect_result_ind_msg_v01,
+				   pwr_pin_result_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+				   struct wlfw_pin_connect_result_ind_msg_v01,
+				   pwr_pin_result),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+				   struct wlfw_pin_connect_result_ind_msg_v01,
+				   phy_io_pin_result_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+				   struct wlfw_pin_connect_result_ind_msg_v01,
+				   phy_io_pin_result),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(
+				   struct wlfw_pin_connect_result_ind_msg_v01,
+				   rf_pin_result_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(
+				   struct wlfw_pin_connect_result_ind_msg_v01,
+				   rf_pin_result),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_wlan_mode_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_driver_mode_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_wlan_mode_req_msg_v01,
+					   mode),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_wlan_mode_req_msg_v01,
+					   hw_debug_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_wlan_mode_req_msg_v01,
+					   hw_debug),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_wlan_mode_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_wlan_mode_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   host_version_valid),
+	},
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_WLFW_MAX_STR_LEN_V01 + 1,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   host_version),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   tgt_cfg_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   tgt_cfg_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_WLFW_MAX_NUM_CE_V01,
+		.elem_size      = sizeof(struct wlfw_ce_tgt_pipe_cfg_s_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   tgt_cfg),
+		.ei_array      = wlfw_ce_tgt_pipe_cfg_s_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   svc_cfg_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   svc_cfg_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_WLFW_MAX_NUM_SVC_V01,
+		.elem_size      = sizeof(struct wlfw_ce_svc_pipe_cfg_s_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   svc_cfg),
+		.ei_array      = wlfw_ce_svc_pipe_cfg_s_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   shadow_reg_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   shadow_reg_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_WLFW_MAX_NUM_SHADOW_REG_V01,
+		.elem_size      = sizeof(struct wlfw_shadow_reg_cfg_s_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   shadow_reg),
+		.ei_array      = wlfw_shadow_reg_cfg_s_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   shadow_reg_v2_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   shadow_reg_v2_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01,
+		.elem_size      = sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   shadow_reg_v2),
+		.ei_array      = wlfw_shadow_reg_v2_cfg_s_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_wlan_cfg_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_wlan_cfg_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_cap_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_cap_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   chip_info_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct wlfw_rf_chip_info_s_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   chip_info),
+		.ei_array      = wlfw_rf_chip_info_s_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   board_info_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct wlfw_rf_board_info_s_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   board_info),
+		.ei_array      = wlfw_rf_board_info_s_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   soc_info_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct wlfw_soc_info_s_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   soc_info),
+		.ei_array      = wlfw_soc_info_s_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   fw_version_info_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct wlfw_fw_version_info_s_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   fw_version_info),
+		.ei_array      = wlfw_fw_version_info_s_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   fw_build_id_valid),
+	},
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   fw_build_id),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_bdf_download_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   valid),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   file_id_valid),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_cal_temp_id_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   file_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   total_size_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   total_size),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   seg_id_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   seg_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   data_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   data_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = QMI_WLFW_MAX_DATA_SIZE_V01,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   data),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   end_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   end),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_bdf_download_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_bdf_download_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_cal_report_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_cal_report_req_msg_v01,
+					   meta_data_len),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = QMI_WLFW_MAX_NUM_CAL_V01,
+		.elem_size      = sizeof(enum wlfw_cal_temp_id_enum_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_cal_report_req_msg_v01,
+					   meta_data),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_cal_report_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_cal_report_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_cal_temp_id_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_initiate_cal_download_ind_msg_v01,
+					   cal_id),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_cal_download_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   valid),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   file_id_valid),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_cal_temp_id_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   file_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   total_size_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   total_size),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   seg_id_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   seg_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   data_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   data_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = QMI_WLFW_MAX_DATA_SIZE_V01,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   data),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   end_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   end),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_cal_download_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_cal_download_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_cal_temp_id_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_initiate_cal_update_ind_msg_v01,
+					   cal_id),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_initiate_cal_update_ind_msg_v01,
+					   total_size),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_cal_update_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_cal_temp_id_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_cal_update_req_msg_v01,
+					   cal_id),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_cal_update_req_msg_v01,
+					   seg_id),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_cal_update_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   file_id_valid),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_cal_temp_id_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   file_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   total_size_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   total_size),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   seg_id_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   seg_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   data_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   data_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = QMI_WLFW_MAX_DATA_SIZE_V01,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   data),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   end_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   end),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_msa_info_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_msa_info_req_msg_v01,
+					   msa_addr),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_msa_info_req_msg_v01,
+					   size),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_msa_info_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_msa_info_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x03,
+		.offset         = offsetof(struct wlfw_msa_info_resp_msg_v01,
+					   mem_region_info_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01,
+		.elem_size      = sizeof(struct wlfw_memory_region_info_s_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x03,
+		.offset         = offsetof(struct wlfw_msa_info_resp_msg_v01,
+					   mem_region_info),
+		.ei_array      = wlfw_memory_region_info_s_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_msa_ready_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_msa_ready_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_msa_ready_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_ini_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_ini_req_msg_v01,
+					   enablefwlog_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_ini_req_msg_v01,
+					   enablefwlog),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_ini_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_ini_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_athdiag_read_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_athdiag_read_req_msg_v01,
+					   offset),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_athdiag_read_req_msg_v01,
+					   mem_type),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x03,
+		.offset         = offsetof(struct wlfw_athdiag_read_req_msg_v01,
+					   data_len),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_athdiag_read_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+				   struct wlfw_athdiag_read_resp_msg_v01,
+				   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+				   struct wlfw_athdiag_read_resp_msg_v01,
+				   data_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+				   struct wlfw_athdiag_read_resp_msg_v01,
+				   data_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = QMI_WLFW_MAX_DATA_SIZE_V01,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+				   struct wlfw_athdiag_read_resp_msg_v01,
+				   data),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_athdiag_write_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(
+				   struct wlfw_athdiag_write_req_msg_v01,
+				   offset),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+				   struct wlfw_athdiag_write_req_msg_v01,
+				   mem_type),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x03,
+		.offset         = offsetof(
+				   struct wlfw_athdiag_write_req_msg_v01,
+				   data_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = QMI_WLFW_MAX_DATA_SIZE_V01,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x03,
+		.offset         = offsetof(
+				   struct wlfw_athdiag_write_req_msg_v01,
+				   data),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_athdiag_write_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+				   struct wlfw_athdiag_write_resp_msg_v01,
+				   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_vbatt_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_vbatt_req_msg_v01,
+					   voltage_uv),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_vbatt_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_vbatt_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_mac_addr_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_mac_addr_req_msg_v01,
+					   mac_addr_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = QMI_WLFW_MAC_ADDR_SIZE_V01,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = STATIC_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_mac_addr_req_msg_v01,
+					   mac_addr),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_mac_addr_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_mac_addr_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_host_cap_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   daemon_support_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   daemon_support),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_host_cap_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_host_cap_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_request_mem_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_request_mem_ind_msg_v01,
+					   size),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_respond_mem_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_respond_mem_req_msg_v01,
+					   addr),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_respond_mem_req_msg_v01,
+					   size),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_respond_mem_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_respond_mem_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_fw_mem_ready_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_cold_boot_cal_done_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   cause_for_rejuvenation_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   cause_for_rejuvenation),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   requesting_sub_system_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   requesting_sub_system),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   line_number_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   line_number),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   function_name_valid),
+	},
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   function_name),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+				   struct wlfw_rejuvenate_ack_resp_msg_v01,
+				   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+				   wlfw_dynamic_feature_mask_req_msg_v01,
+				   mask_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+				   struct wlfw_dynamic_feature_mask_req_msg_v01,
+				   mask),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+			   struct wlfw_dynamic_feature_mask_resp_msg_v01,
+			   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			   struct wlfw_dynamic_feature_mask_resp_msg_v01,
+			   prev_mask_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			   struct wlfw_dynamic_feature_mask_resp_msg_v01,
+			   prev_mask),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+			   struct wlfw_dynamic_feature_mask_resp_msg_v01,
+			   curr_mask_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+			   struct wlfw_dynamic_feature_mask_resp_msg_v01,
+			   curr_mask),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/soc/qcom/wlan_firmware_service_v01.h	2019-10-29 09:26:24.825214745 +0100
@@ -0,0 +1,571 @@
+ /* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef WLAN_FIRMWARE_SERVICE_V01_H
+#define WLAN_FIRMWARE_SERVICE_V01_H
+
+#define WLFW_SERVICE_ID_V01 0x45
+#define WLFW_SERVICE_VERS_V01 0x01
+
+#define QMI_WLFW_BDF_DOWNLOAD_REQ_V01 0x0025
+#define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037
+#define QMI_WLFW_INITIATE_CAL_UPDATE_IND_V01 0x002A
+#define QMI_WLFW_HOST_CAP_REQ_V01 0x0034
+#define QMI_WLFW_DYNAMIC_FEATURE_MASK_RESP_V01 0x003B
+#define QMI_WLFW_CAP_REQ_V01 0x0024
+#define QMI_WLFW_CAL_REPORT_REQ_V01 0x0026
+#define QMI_WLFW_CAL_UPDATE_RESP_V01 0x0029
+#define QMI_WLFW_CAL_DOWNLOAD_RESP_V01 0x0027
+#define QMI_WLFW_INI_RESP_V01 0x002F
+#define QMI_WLFW_CAL_REPORT_RESP_V01 0x0026
+#define QMI_WLFW_MAC_ADDR_RESP_V01 0x0033
+#define QMI_WLFW_INITIATE_CAL_DOWNLOAD_IND_V01 0x0028
+#define QMI_WLFW_HOST_CAP_RESP_V01 0x0034
+#define QMI_WLFW_MSA_READY_IND_V01 0x002B
+#define QMI_WLFW_ATHDIAG_WRITE_RESP_V01 0x0031
+#define QMI_WLFW_WLAN_MODE_REQ_V01 0x0022
+#define QMI_WLFW_IND_REGISTER_REQ_V01 0x0020
+#define QMI_WLFW_WLAN_CFG_RESP_V01 0x0023
+#define QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01 0x0038
+#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
+#define QMI_WLFW_REJUVENATE_IND_V01 0x0039
+#define QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01 0x003B
+#define QMI_WLFW_ATHDIAG_WRITE_REQ_V01 0x0031
+#define QMI_WLFW_WLAN_MODE_RESP_V01 0x0022
+#define QMI_WLFW_RESPOND_MEM_REQ_V01 0x0036
+#define QMI_WLFW_PIN_CONNECT_RESULT_IND_V01 0x002C
+#define QMI_WLFW_FW_READY_IND_V01 0x0021
+#define QMI_WLFW_MSA_READY_RESP_V01 0x002E
+#define QMI_WLFW_CAL_UPDATE_REQ_V01 0x0029
+#define QMI_WLFW_INI_REQ_V01 0x002F
+#define QMI_WLFW_BDF_DOWNLOAD_RESP_V01 0x0025
+#define QMI_WLFW_REJUVENATE_ACK_RESP_V01 0x003A
+#define QMI_WLFW_MSA_INFO_RESP_V01 0x002D
+#define QMI_WLFW_MSA_READY_REQ_V01 0x002E
+#define QMI_WLFW_CAP_RESP_V01 0x0024
+#define QMI_WLFW_REJUVENATE_ACK_REQ_V01 0x003A
+#define QMI_WLFW_ATHDIAG_READ_RESP_V01 0x0030
+#define QMI_WLFW_VBATT_REQ_V01 0x0032
+#define QMI_WLFW_MAC_ADDR_REQ_V01 0x0033
+#define QMI_WLFW_RESPOND_MEM_RESP_V01 0x0036
+#define QMI_WLFW_VBATT_RESP_V01 0x0032
+#define QMI_WLFW_MSA_INFO_REQ_V01 0x002D
+#define QMI_WLFW_CAL_DOWNLOAD_REQ_V01 0x0027
+#define QMI_WLFW_ATHDIAG_READ_REQ_V01 0x0030
+#define QMI_WLFW_WLAN_CFG_REQ_V01 0x0023
+#define QMI_WLFW_IND_REGISTER_RESP_V01 0x0020
+
+#define QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01 2
+#define QMI_WLFW_MAX_NUM_CAL_V01 5
+#define QMI_WLFW_MAX_DATA_SIZE_V01 6144
+#define QMI_WLFW_FUNCTION_NAME_LEN_V01 128
+#define QMI_WLFW_MAX_NUM_CE_V01 12
+#define QMI_WLFW_MAX_TIMESTAMP_LEN_V01 32
+#define QMI_WLFW_MAX_BUILD_ID_LEN_V01 128
+#define QMI_WLFW_MAX_STR_LEN_V01 16
+#define QMI_WLFW_MAX_NUM_SHADOW_REG_V01 24
+#define QMI_WLFW_MAC_ADDR_SIZE_V01 6
+#define QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01 36
+#define QMI_WLFW_MAX_NUM_SVC_V01 24
+
+enum wlfw_driver_mode_enum_v01 {
+	WLFW_DRIVER_MODE_ENUM_MIN_VAL_V01 = INT_MIN,
+	QMI_WLFW_MISSION_V01 = 0,
+	QMI_WLFW_FTM_V01 = 1,
+	QMI_WLFW_EPPING_V01 = 2,
+	QMI_WLFW_WALTEST_V01 = 3,
+	QMI_WLFW_OFF_V01 = 4,
+	QMI_WLFW_CCPM_V01 = 5,
+	QMI_WLFW_QVIT_V01 = 6,
+	QMI_WLFW_CALIBRATION_V01 = 7,
+	WLFW_DRIVER_MODE_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+enum wlfw_cal_temp_id_enum_v01 {
+	WLFW_CAL_TEMP_ID_ENUM_MIN_VAL_V01 = INT_MIN,
+	QMI_WLFW_CAL_TEMP_IDX_0_V01 = 0,
+	QMI_WLFW_CAL_TEMP_IDX_1_V01 = 1,
+	QMI_WLFW_CAL_TEMP_IDX_2_V01 = 2,
+	QMI_WLFW_CAL_TEMP_IDX_3_V01 = 3,
+	QMI_WLFW_CAL_TEMP_IDX_4_V01 = 4,
+	WLFW_CAL_TEMP_ID_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+enum wlfw_pipedir_enum_v01 {
+	WLFW_PIPEDIR_ENUM_MIN_VAL_V01 = INT_MIN,
+	QMI_WLFW_PIPEDIR_NONE_V01 = 0,
+	QMI_WLFW_PIPEDIR_IN_V01 = 1,
+	QMI_WLFW_PIPEDIR_OUT_V01 = 2,
+	QMI_WLFW_PIPEDIR_INOUT_V01 = 3,
+	WLFW_PIPEDIR_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+#define QMI_WLFW_CE_ATTR_FLAGS_V01 ((uint32_t)0x00)
+#define QMI_WLFW_CE_ATTR_NO_SNOOP_V01 ((uint32_t)0x01)
+#define QMI_WLFW_CE_ATTR_BYTE_SWAP_DATA_V01 ((uint32_t)0x02)
+#define QMI_WLFW_CE_ATTR_SWIZZLE_DESCRIPTORS_V01 ((uint32_t)0x04)
+#define QMI_WLFW_CE_ATTR_DISABLE_INTR_V01 ((uint32_t)0x08)
+#define QMI_WLFW_CE_ATTR_ENABLE_POLL_V01 ((uint32_t)0x10)
+
+#define QMI_WLFW_ALREADY_REGISTERED_V01 ((uint64_t)0x01ULL)
+#define QMI_WLFW_FW_READY_V01 ((uint64_t)0x02ULL)
+#define QMI_WLFW_MSA_READY_V01 ((uint64_t)0x04ULL)
+#define QMI_WLFW_FW_MEM_READY_V01 ((uint64_t)0x08ULL)
+
+#define QMI_WLFW_FW_REJUVENATE_V01 ((uint64_t)0x01ULL)
+
+struct wlfw_ce_tgt_pipe_cfg_s_v01 {
+	uint32_t pipe_num;
+	enum wlfw_pipedir_enum_v01 pipe_dir;
+	uint32_t nentries;
+	uint32_t nbytes_max;
+	uint32_t flags;
+};
+
+struct wlfw_ce_svc_pipe_cfg_s_v01 {
+	uint32_t service_id;
+	enum wlfw_pipedir_enum_v01 pipe_dir;
+	uint32_t pipe_num;
+};
+
+struct wlfw_shadow_reg_cfg_s_v01 {
+	uint16_t id;
+	uint16_t offset;
+};
+
+struct wlfw_shadow_reg_v2_cfg_s_v01 {
+	uint32_t addr;
+};
+
+struct wlfw_memory_region_info_s_v01 {
+	uint64_t region_addr;
+	uint32_t size;
+	uint8_t secure_flag;
+};
+
+struct wlfw_rf_chip_info_s_v01 {
+	uint32_t chip_id;
+	uint32_t chip_family;
+};
+
+struct wlfw_rf_board_info_s_v01 {
+	uint32_t board_id;
+};
+
+struct wlfw_soc_info_s_v01 {
+	uint32_t soc_id;
+};
+
+struct wlfw_fw_version_info_s_v01 {
+	uint32_t fw_version;
+	char fw_build_timestamp[QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1];
+};
+
+struct wlfw_ind_register_req_msg_v01 {
+	uint8_t fw_ready_enable_valid;
+	uint8_t fw_ready_enable;
+	uint8_t initiate_cal_download_enable_valid;
+	uint8_t initiate_cal_download_enable;
+	uint8_t initiate_cal_update_enable_valid;
+	uint8_t initiate_cal_update_enable;
+	uint8_t msa_ready_enable_valid;
+	uint8_t msa_ready_enable;
+	uint8_t pin_connect_result_enable_valid;
+	uint8_t pin_connect_result_enable;
+	uint8_t client_id_valid;
+	uint32_t client_id;
+	uint8_t request_mem_enable_valid;
+	uint8_t request_mem_enable;
+	uint8_t fw_mem_ready_enable_valid;
+	uint8_t fw_mem_ready_enable;
+	uint8_t cold_boot_cal_done_enable_valid;
+	uint8_t cold_boot_cal_done_enable;
+	uint8_t rejuvenate_enable_valid;
+	uint32_t rejuvenate_enable;
+};
+#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 46
+extern struct elem_info wlfw_ind_register_req_msg_v01_ei[];
+
+struct wlfw_ind_register_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	uint8_t fw_status_valid;
+	uint64_t fw_status;
+};
+#define WLFW_IND_REGISTER_RESP_MSG_V01_MAX_MSG_LEN 18
+extern struct elem_info wlfw_ind_register_resp_msg_v01_ei[];
+
+struct wlfw_fw_ready_ind_msg_v01 {
+	char placeholder;
+};
+#define WLFW_FW_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_fw_ready_ind_msg_v01_ei[];
+
+struct wlfw_msa_ready_ind_msg_v01 {
+	char placeholder;
+};
+#define WLFW_MSA_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_msa_ready_ind_msg_v01_ei[];
+
+struct wlfw_pin_connect_result_ind_msg_v01 {
+	uint8_t pwr_pin_result_valid;
+	uint32_t pwr_pin_result;
+	uint8_t phy_io_pin_result_valid;
+	uint32_t phy_io_pin_result;
+	uint8_t rf_pin_result_valid;
+	uint32_t rf_pin_result;
+};
+#define WLFW_PIN_CONNECT_RESULT_IND_MSG_V01_MAX_MSG_LEN 21
+extern struct elem_info wlfw_pin_connect_result_ind_msg_v01_ei[];
+
+struct wlfw_wlan_mode_req_msg_v01 {
+	enum wlfw_driver_mode_enum_v01 mode;
+	uint8_t hw_debug_valid;
+	uint8_t hw_debug;
+};
+#define WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct elem_info wlfw_wlan_mode_req_msg_v01_ei[];
+
+struct wlfw_wlan_mode_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+#define WLFW_WLAN_MODE_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_wlan_mode_resp_msg_v01_ei[];
+
+struct wlfw_wlan_cfg_req_msg_v01 {
+	uint8_t host_version_valid;
+	char host_version[QMI_WLFW_MAX_STR_LEN_V01 + 1];
+	uint8_t tgt_cfg_valid;
+	uint32_t tgt_cfg_len;
+	struct wlfw_ce_tgt_pipe_cfg_s_v01 tgt_cfg[QMI_WLFW_MAX_NUM_CE_V01];
+	uint8_t svc_cfg_valid;
+	uint32_t svc_cfg_len;
+	struct wlfw_ce_svc_pipe_cfg_s_v01 svc_cfg[QMI_WLFW_MAX_NUM_SVC_V01];
+	uint8_t shadow_reg_valid;
+	uint32_t shadow_reg_len;
+	struct wlfw_shadow_reg_cfg_s_v01 shadow_reg[QMI_WLFW_MAX_NUM_SHADOW_REG_V01];
+	uint8_t shadow_reg_v2_valid;
+	uint32_t shadow_reg_v2_len;
+	struct wlfw_shadow_reg_v2_cfg_s_v01
+	shadow_reg_v2[QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01];
+};
+#define WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN 803
+extern struct elem_info wlfw_wlan_cfg_req_msg_v01_ei[];
+
+struct wlfw_wlan_cfg_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+#define WLFW_WLAN_CFG_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_wlan_cfg_resp_msg_v01_ei[];
+
+struct wlfw_cap_req_msg_v01 {
+	char placeholder;
+};
+#define WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_cap_req_msg_v01_ei[];
+
+struct wlfw_cap_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	uint8_t chip_info_valid;
+	struct wlfw_rf_chip_info_s_v01 chip_info;
+	uint8_t board_info_valid;
+	struct wlfw_rf_board_info_s_v01 board_info;
+	uint8_t soc_info_valid;
+	struct wlfw_soc_info_s_v01 soc_info;
+	uint8_t fw_version_info_valid;
+	struct wlfw_fw_version_info_s_v01 fw_version_info;
+	uint8_t fw_build_id_valid;
+	char fw_build_id[QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1];
+};
+#define WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN 203
+extern struct elem_info wlfw_cap_resp_msg_v01_ei[];
+
+struct wlfw_bdf_download_req_msg_v01 {
+	uint8_t valid;
+	uint8_t file_id_valid;
+	enum wlfw_cal_temp_id_enum_v01 file_id;
+	uint8_t total_size_valid;
+	uint32_t total_size;
+	uint8_t seg_id_valid;
+	uint32_t seg_id;
+	uint8_t data_valid;
+	uint32_t data_len;
+	uint8_t data[QMI_WLFW_MAX_DATA_SIZE_V01];
+	uint8_t end_valid;
+	uint8_t end;
+};
+#define WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6178
+extern struct elem_info wlfw_bdf_download_req_msg_v01_ei[];
+
+struct wlfw_bdf_download_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+#define WLFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_bdf_download_resp_msg_v01_ei[];
+
+struct wlfw_cal_report_req_msg_v01 {
+	uint32_t meta_data_len;
+	enum wlfw_cal_temp_id_enum_v01 meta_data[QMI_WLFW_MAX_NUM_CAL_V01];
+};
+#define WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN 24
+extern struct elem_info wlfw_cal_report_req_msg_v01_ei[];
+
+struct wlfw_cal_report_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+#define WLFW_CAL_REPORT_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_cal_report_resp_msg_v01_ei[];
+
+struct wlfw_initiate_cal_download_ind_msg_v01 {
+	enum wlfw_cal_temp_id_enum_v01 cal_id;
+};
+#define WLFW_INITIATE_CAL_DOWNLOAD_IND_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[];
+
+struct wlfw_cal_download_req_msg_v01 {
+	uint8_t valid;
+	uint8_t file_id_valid;
+	enum wlfw_cal_temp_id_enum_v01 file_id;
+	uint8_t total_size_valid;
+	uint32_t total_size;
+	uint8_t seg_id_valid;
+	uint32_t seg_id;
+	uint8_t data_valid;
+	uint32_t data_len;
+	uint8_t data[QMI_WLFW_MAX_DATA_SIZE_V01];
+	uint8_t end_valid;
+	uint8_t end;
+};
+#define WLFW_CAL_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6178
+extern struct elem_info wlfw_cal_download_req_msg_v01_ei[];
+
+struct wlfw_cal_download_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+#define WLFW_CAL_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_cal_download_resp_msg_v01_ei[];
+
+struct wlfw_initiate_cal_update_ind_msg_v01 {
+	enum wlfw_cal_temp_id_enum_v01 cal_id;
+	uint32_t total_size;
+};
+#define WLFW_INITIATE_CAL_UPDATE_IND_MSG_V01_MAX_MSG_LEN 14
+extern struct elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[];
+
+struct wlfw_cal_update_req_msg_v01 {
+	enum wlfw_cal_temp_id_enum_v01 cal_id;
+	uint32_t seg_id;
+};
+#define WLFW_CAL_UPDATE_REQ_MSG_V01_MAX_MSG_LEN 14
+extern struct elem_info wlfw_cal_update_req_msg_v01_ei[];
+
+struct wlfw_cal_update_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	uint8_t file_id_valid;
+	enum wlfw_cal_temp_id_enum_v01 file_id;
+	uint8_t total_size_valid;
+	uint32_t total_size;
+	uint8_t seg_id_valid;
+	uint32_t seg_id;
+	uint8_t data_valid;
+	uint32_t data_len;
+	uint8_t data[QMI_WLFW_MAX_DATA_SIZE_V01];
+	uint8_t end_valid;
+	uint8_t end;
+};
+#define WLFW_CAL_UPDATE_RESP_MSG_V01_MAX_MSG_LEN 6181
+extern struct elem_info wlfw_cal_update_resp_msg_v01_ei[];
+
+struct wlfw_msa_info_req_msg_v01 {
+	uint64_t msa_addr;
+	uint32_t size;
+};
+#define WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
+extern struct elem_info wlfw_msa_info_req_msg_v01_ei[];
+
+struct wlfw_msa_info_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	uint32_t mem_region_info_len;
+	struct wlfw_memory_region_info_s_v01
+	mem_region_info[QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01];
+};
+#define WLFW_MSA_INFO_RESP_MSG_V01_MAX_MSG_LEN 37
+extern struct elem_info wlfw_msa_info_resp_msg_v01_ei[];
+
+struct wlfw_msa_ready_req_msg_v01 {
+	char placeholder;
+};
+#define WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_msa_ready_req_msg_v01_ei[];
+
+struct wlfw_msa_ready_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+#define WLFW_MSA_READY_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_msa_ready_resp_msg_v01_ei[];
+
+struct wlfw_ini_req_msg_v01 {
+	uint8_t enablefwlog_valid;
+	uint8_t enablefwlog;
+};
+#define WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN 4
+extern struct elem_info wlfw_ini_req_msg_v01_ei[];
+
+struct wlfw_ini_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+#define WLFW_INI_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_ini_resp_msg_v01_ei[];
+
+struct wlfw_athdiag_read_req_msg_v01 {
+	uint32_t offset;
+	uint32_t mem_type;
+	uint32_t data_len;
+};
+#define WLFW_ATHDIAG_READ_REQ_MSG_V01_MAX_MSG_LEN 21
+extern struct elem_info wlfw_athdiag_read_req_msg_v01_ei[];
+
+struct wlfw_athdiag_read_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	uint8_t data_valid;
+	uint32_t data_len;
+	uint8_t data[QMI_WLFW_MAX_DATA_SIZE_V01];
+};
+#define WLFW_ATHDIAG_READ_RESP_MSG_V01_MAX_MSG_LEN 6156
+extern struct elem_info wlfw_athdiag_read_resp_msg_v01_ei[];
+
+struct wlfw_athdiag_write_req_msg_v01 {
+	uint32_t offset;
+	uint32_t mem_type;
+	uint32_t data_len;
+	uint8_t data[QMI_WLFW_MAX_DATA_SIZE_V01];
+};
+#define WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN 6163
+extern struct elem_info wlfw_athdiag_write_req_msg_v01_ei[];
+
+struct wlfw_athdiag_write_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+#define WLFW_ATHDIAG_WRITE_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_athdiag_write_resp_msg_v01_ei[];
+
+struct wlfw_vbatt_req_msg_v01 {
+	uint64_t voltage_uv;
+};
+#define WLFW_VBATT_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct elem_info wlfw_vbatt_req_msg_v01_ei[];
+
+struct wlfw_vbatt_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+#define WLFW_VBATT_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_vbatt_resp_msg_v01_ei[];
+
+struct wlfw_mac_addr_req_msg_v01 {
+	uint8_t mac_addr_valid;
+	uint8_t mac_addr[QMI_WLFW_MAC_ADDR_SIZE_V01];
+};
+#define WLFW_MAC_ADDR_REQ_MSG_V01_MAX_MSG_LEN 9
+extern struct elem_info wlfw_mac_addr_req_msg_v01_ei[];
+
+struct wlfw_mac_addr_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+#define WLFW_MAC_ADDR_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_mac_addr_resp_msg_v01_ei[];
+
+struct wlfw_host_cap_req_msg_v01 {
+	uint8_t daemon_support_valid;
+	uint8_t daemon_support;
+};
+#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 4
+extern struct elem_info wlfw_host_cap_req_msg_v01_ei[];
+
+struct wlfw_host_cap_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+#define WLFW_HOST_CAP_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_host_cap_resp_msg_v01_ei[];
+
+struct wlfw_request_mem_ind_msg_v01 {
+	uint32_t size;
+};
+#define WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_request_mem_ind_msg_v01_ei[];
+
+struct wlfw_respond_mem_req_msg_v01 {
+	uint64_t addr;
+	uint32_t size;
+};
+#define WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN 18
+extern struct elem_info wlfw_respond_mem_req_msg_v01_ei[];
+
+struct wlfw_respond_mem_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+#define WLFW_RESPOND_MEM_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_respond_mem_resp_msg_v01_ei[];
+
+struct wlfw_fw_mem_ready_ind_msg_v01 {
+	char placeholder;
+};
+#define WLFW_FW_MEM_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_fw_mem_ready_ind_msg_v01_ei[];
+
+struct wlfw_cold_boot_cal_done_ind_msg_v01 {
+	char placeholder;
+};
+#define WLFW_COLD_BOOT_CAL_DONE_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_cold_boot_cal_done_ind_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ind_msg_v01 {
+	uint8_t cause_for_rejuvenation_valid;
+	uint8_t cause_for_rejuvenation;
+	uint8_t requesting_sub_system_valid;
+	uint8_t requesting_sub_system;
+	uint8_t line_number_valid;
+	uint16_t line_number;
+	uint8_t function_name_valid;
+	char function_name[QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1];
+};
+#define WLFW_REJUVENATE_IND_MSG_V01_MAX_MSG_LEN 144
+extern struct elem_info wlfw_rejuvenate_ind_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ack_req_msg_v01 {
+	char placeholder;
+};
+#define WLFW_REJUVENATE_ACK_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ack_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+#define WLFW_REJUVENATE_ACK_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[];
+
+struct wlfw_dynamic_feature_mask_req_msg_v01 {
+	uint8_t mask_valid;
+	uint64_t mask;
+};
+#define WLFW_DYNAMIC_FEATURE_MASK_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[];
+
+struct wlfw_dynamic_feature_mask_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	uint8_t prev_mask_valid;
+	uint64_t prev_mask;
+	uint8_t curr_mask_valid;
+	uint64_t curr_mask;
+};
+#define WLFW_DYNAMIC_FEATURE_MASK_RESP_MSG_V01_MAX_MSG_LEN 29
+extern struct elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[];
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/soundwire./Kconfig linux-4.4.115-fbx/drivers/soundwire/Kconfig
--- linux-4.4.115-fbx/drivers/soundwire./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soundwire/Kconfig	2019-01-22 16:16:26.687275240 +0100
@@ -0,0 +1,17 @@
+#
+# SOUNDWIRE driver configuration
+#
+menuconfig SOUNDWIRE
+	bool "Soundwire support"
+	help
+	  Soundwire is a two wire interface for audio to connect
+	  simple peripheral components in mobile devices.
+
+if SOUNDWIRE
+config SOUNDWIRE_WCD_CTRL
+	depends on WCD9335_CODEC
+	tristate "QTI WCD CODEC Soundwire controller"
+	default n
+	help
+	  Select driver for QTI's Soundwire Master Component.
+endif
diff -Nruw linux-4.4.115-fbx/drivers/soundwire./Makefile linux-4.4.115-fbx/drivers/soundwire/Makefile
--- linux-4.4.115-fbx/drivers/soundwire./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soundwire/Makefile	2019-01-22 16:16:26.687275240 +0100
@@ -0,0 +1,5 @@
+#
+# Makefile for kernel soundwire framework.
+#
+obj-$(CONFIG_SOUNDWIRE)			+= soundwire.o
+obj-$(CONFIG_SOUNDWIRE_WCD_CTRL)	+= swr-wcd-ctrl.o
diff -Nruw linux-4.4.115-fbx/drivers/soundwire./soundwire.c linux-4.4.115-fbx/drivers/soundwire/soundwire.c
--- linux-4.4.115-fbx/drivers/soundwire./soundwire.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soundwire/soundwire.c	2019-10-29 09:26:24.829214784 +0100
@@ -0,0 +1,1033 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/of_device.h>
+#include <linux/completion.h>
+#include <linux/idr.h>
+#include <linux/pm_runtime.h>
+#include <linux/soundwire/soundwire.h>
+
+struct boardinfo {
+	struct list_head	list;
+	struct swr_boardinfo	board_info;
+};
+
+static LIST_HEAD(board_list);
+static LIST_HEAD(swr_master_list);
+static DEFINE_MUTEX(board_lock);
+static DEFINE_IDR(master_idr);
+static DEFINE_MUTEX(swr_lock);
+
+static struct device_type swr_dev_type;
+
+#define SOUNDWIRE_NAME_SIZE	32
+
+static void swr_master_put(struct swr_master *master)
+{
+	if (master)
+		put_device(&master->dev);
+}
+
+static struct swr_master *swr_master_get(struct swr_master *master)
+{
+	if (!master || !get_device(&master->dev))
+		return NULL;
+	return master;
+}
+
+static void swr_dev_release(struct device *dev)
+{
+	struct swr_device *swr_dev = to_swr_device(dev);
+	struct swr_master *master;
+
+	if (!swr_dev)
+		return;
+	master = swr_dev->master;
+	if (!master)
+		return;
+	mutex_lock(&master->mlock);
+	list_del_init(&swr_dev->dev_list);
+	mutex_unlock(&master->mlock);
+	swr_master_put(swr_dev->master);
+	kfree(swr_dev);
+}
+
+/**
+ * swr_remove_device - remove a soundwire device
+ * @swr_dev: soundwire device to remove
+ *
+ * Remove a soundwire device. Go through the soundwire
+ * device list that master has and remove swr_dev from
+ * it.
+ */
+void swr_remove_device(struct swr_device *swr_dev)
+{
+	struct swr_device *swr_dev_loop, *safe;
+
+	list_for_each_entry_safe(swr_dev_loop, safe,
+				 &swr_dev->master->devices,
+				 dev_list) {
+		if (swr_dev == swr_dev_loop)
+			list_del(&swr_dev_loop->dev_list);
+	}
+}
+EXPORT_SYMBOL(swr_remove_device);
+
+/**
+ * swr_new_device - instantiate a new soundwire device
+ * @master: Controller to which device is connected
+ * @info: Describes the soundwire device
+ * Context: can sleep
+ *
+ * Create a soundwire device. Binding is handled through driver model
+ * probe/remove methods. A driver may be bound to this device when
+ * the function gets returned.
+ *
+ * Returns a soundwire new device or NULL
+ */
+struct swr_device *swr_new_device(struct swr_master *master,
+				 struct swr_boardinfo const *info)
+{
+	int result;
+	struct swr_device *swr;
+
+	if (!master || !swr_master_get(master)) {
+		pr_err("%s: master is NULL\n", __func__);
+		return NULL;
+	}
+
+	swr = kzalloc(sizeof(*swr), GFP_KERNEL);
+	if (!swr) {
+		dev_err(&master->dev, "cannot alloc swr_device\n");
+		put_device(&master->dev);
+		return NULL;
+	}
+	swr->master = master;
+	swr->addr = info->addr;
+	strlcpy(swr->name, info->name, sizeof(swr->name));
+	swr->dev.type = &swr_dev_type;
+	swr->dev.parent = &master->dev;
+	swr->dev.bus = &soundwire_type;
+	swr->dev.release = swr_dev_release;
+	swr->dev.of_node = info->of_node;
+	mutex_lock(&master->mlock);
+	list_add_tail(&swr->dev_list, &master->devices);
+	mutex_unlock(&master->mlock);
+
+	dev_set_name(&swr->dev, "%s.%lx", swr->name, swr->addr);
+	result = device_register(&swr->dev);
+	if (result) {
+		dev_err(&master->dev, "device [%s] register failed err %d\n",
+			swr->name, result);
+		goto err_out;
+	}
+	dev_dbg(&master->dev, "Device [%s] registered with bus id %s\n",
+		swr->name, dev_name(&swr->dev));
+	return swr;
+
+err_out:
+	dev_dbg(&master->dev, "Failed to register swr device %s at 0x%lx %d\n",
+		swr->name, swr->addr, result);
+	swr_master_put(master);
+	kfree(swr);
+	return NULL;
+}
+EXPORT_SYMBOL(swr_new_device);
+
+/**
+ * of_register_swr_devices - register child devices on to the soundwire bus
+ * @master: pointer to soundwire master device
+ *
+ * Registers a soundwire device for each child node of master node which has
+ * a "swr-devid" property
+ *
+ */
+int of_register_swr_devices(struct swr_master *master)
+{
+	struct swr_device *swr;
+	struct device_node *node;
+
+	if (!master->dev.of_node)
+		return -EINVAL;
+
+	for_each_available_child_of_node(master->dev.of_node, node) {
+		struct swr_boardinfo info = {};
+		u64 addr;
+
+		dev_dbg(&master->dev, "of_swr:register %s\n", node->full_name);
+
+		if (of_modalias_node(node, info.name, sizeof(info.name)) < 0) {
+			dev_err(&master->dev, "of_swr:modalias failure %s\n",
+				node->full_name);
+			continue;
+		}
+		if (of_property_read_u64(node, "reg", &addr)) {
+			dev_err(&master->dev, "of_swr:invalid reg %s\n",
+				node->full_name);
+			continue;
+		}
+		info.addr = addr;
+		info.of_node = of_node_get(node);
+		master->num_dev++;
+		swr = swr_new_device(master, &info);
+		if (!swr) {
+			dev_err(&master->dev, "of_swr: Register failed %s\n",
+				node->full_name);
+			of_node_put(node);
+			master->num_dev--;
+			continue;
+		}
+	}
+	return 0;
+}
+EXPORT_SYMBOL(of_register_swr_devices);
+
+/**
+ * swr_port_response - response from master to free the completed transaction
+ * @mstr: pointer to soundwire master device
+ * @tid: transaction id that indicates transaction to be freed.
+ *
+ * Master calls this function to free the compeleted transaction memory
+ */
+void swr_port_response(struct swr_master *mstr, u8 tid)
+{
+	struct swr_params *txn;
+
+	txn = mstr->port_txn[tid];
+
+	if (txn == NULL) {
+		dev_err(&mstr->dev, "%s: transaction is already NULL\n",
+			__func__);
+		return;
+	}
+	mstr->port_txn[tid] = NULL;
+	kfree(txn);
+}
+EXPORT_SYMBOL(swr_port_response);
+
+/**
+ * swr_remove_from_group - remove soundwire slave devices from group
+ * @dev: pointer to the soundwire slave device
+ * dev_num: device number of the soundwire slave device
+ *
+ * Returns error code for failure and 0 for success
+ */
+int swr_remove_from_group(struct swr_device *dev, u8 dev_num)
+{
+	struct swr_master *master;
+
+	if (!dev)
+		return -ENODEV;
+
+	master = dev->master;
+	if (!master)
+		return -EINVAL;
+
+	if (!dev->group_id)
+		return 0;
+
+	if (master->gr_sid == dev_num)
+		return 0;
+
+	if (master->remove_from_group && master->remove_from_group(master))
+		dev_dbg(&master->dev, "%s: falling back to GROUP_NONE\n",
+			__func__);
+
+	return 0;
+}
+EXPORT_SYMBOL(swr_remove_from_group);
+
+/**
+ * swr_slvdev_datapath_control - Enables/Disables soundwire slave device
+ *                               data path
+ * @dev: pointer to soundwire slave device
+ * @dev_num: device number of the soundwire slave device
+ *
+ * Returns error code for failure and 0 for success
+ */
+int swr_slvdev_datapath_control(struct swr_device *dev, u8 dev_num,
+				bool enable)
+{
+	struct swr_master *master;
+
+	if (!dev)
+		return -ENODEV;
+
+	master = dev->master;
+	if (!master)
+		return -EINVAL;
+
+	if (dev->group_id) {
+		/* Broadcast */
+		if (master->gr_sid != dev_num) {
+			if (!master->gr_sid)
+				master->gr_sid = dev_num;
+			else
+				return 0;
+		}
+	}
+
+	if (master->slvdev_datapath_control)
+		master->slvdev_datapath_control(master, enable);
+
+	return 0;
+}
+EXPORT_SYMBOL(swr_slvdev_datapath_control);
+
+/**
+ * swr_connect_port - enable soundwire slave port(s)
+ * @dev: pointer to soundwire slave device
+ * @port_id: logical port id(s) of soundwire slave device
+ * @num_port: number of slave device ports need to be enabled
+ * @ch_mask: channels for each port that needs to be enabled
+ * @ch_rate: rate at which each port/channels operate
+ * @num_ch: number of channels for each port
+ *
+ * soundwire slave device call swr_connect_port API to enable all/some of
+ * its ports and corresponding channels and channel rate. This API will
+ * call master connect_port callback function to calculate frame structure
+ * and enable master and slave ports
+ */
+int swr_connect_port(struct swr_device *dev, u8 *port_id, u8 num_port,
+			u8 *ch_mask, u32 *ch_rate, u8 *num_ch)
+{
+	u8 i = 0;
+	int ret = 0;
+	struct swr_params *txn = NULL;
+	struct swr_params **temp_txn = NULL;
+	struct swr_master *master = dev->master;
+
+	if (!master) {
+		pr_err("%s: Master is NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (num_port > SWR_MAX_DEV_PORT_NUM) {
+		dev_err(&master->dev, "%s: num_port %d exceeds max port %d\n",
+			__func__, num_port, SWR_MAX_DEV_PORT_NUM);
+		return -EINVAL;
+	}
+
+	/*
+	 * create "txn" to accomodate ports enablement of
+	 * different slave devices calling swr_connect_port at the
+	 * same time. Once master process the txn data, it calls
+	 * swr_port_response() to free the transaction. Maximum
+	 * of 256 transactions can be allocated.
+	 */
+	txn = kzalloc(sizeof(struct swr_params), GFP_KERNEL);
+	if (!txn) {
+		dev_err(&master->dev, "%s: txn memory alloc failed\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	mutex_lock(&master->mlock);
+	for (i = 0; i < master->last_tid; i++) {
+		if (master->port_txn[i] == NULL)
+			break;
+	}
+	if (i >= master->last_tid) {
+		if (master->last_tid == 255) {
+			mutex_unlock(&master->mlock);
+			kfree(txn);
+			dev_err(&master->dev, "%s Max tid reached\n",
+				__func__);
+			return -ENOMEM;
+		}
+		temp_txn = krealloc(master->port_txn,
+				(i + 1) * sizeof(struct swr_params *),
+				GFP_KERNEL);
+		if (!temp_txn) {
+			mutex_unlock(&master->mlock);
+			kfree(txn);
+			dev_err(&master->dev, "%s Not able to allocate\n"
+				"master port transaction memory\n",
+				__func__);
+			return -ENOMEM;
+		}
+		master->port_txn = temp_txn;
+		master->last_tid++;
+	}
+	master->port_txn[i] = txn;
+	mutex_unlock(&master->mlock);
+	txn->tid = i;
+
+	txn->dev_id = dev->dev_num;
+	txn->num_port = num_port;
+	for (i = 0; i < num_port; i++) {
+		txn->port_id[i] = port_id[i];
+		txn->num_ch[i]  = num_ch[i];
+		txn->ch_rate[i] = ch_rate[i];
+		txn->ch_en[i]   = ch_mask[i];
+	}
+	ret = master->connect_port(master, txn);
+	return ret;
+}
+EXPORT_SYMBOL(swr_connect_port);
+
+/**
+ * swr_disconnect_port - disable soundwire slave port(s)
+ * @dev: pointer to soundwire slave device
+ * @port_id: logical port id(s) of soundwire slave device
+ * @num_port: number of slave device ports need to be disabled
+ *
+ * soundwire slave device call swr_disconnect_port API to disable all/some of
+ * its ports. This API will call master disconnect_port callback function to
+ * disable master and slave port and (re)configure frame structure
+ */
+int swr_disconnect_port(struct swr_device *dev, u8 *port_id, u8 num_port)
+{
+	u8 i = 0;
+	int ret;
+	struct swr_params *txn = NULL;
+	struct swr_params **temp_txn = NULL;
+	struct swr_master *master = dev->master;
+
+	if (!master) {
+		pr_err("%s: Master is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	if (num_port > SWR_MAX_DEV_PORT_NUM) {
+		dev_err(&master->dev, "%s: num_port %d exceeds max port %d\n",
+			__func__, num_port, SWR_MAX_DEV_PORT_NUM);
+		return -EINVAL;
+	}
+
+	txn = kzalloc(sizeof(struct swr_params), GFP_KERNEL);
+	if (!txn) {
+		dev_err(&master->dev, "%s: txn memory alloc failed\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	mutex_lock(&master->mlock);
+	for (i = 0; i < master->last_tid; i++) {
+		if (master->port_txn[i] == NULL)
+			break;
+	}
+	if (i >= master->last_tid) {
+		if (master->last_tid == 255) {
+			mutex_unlock(&master->mlock);
+			kfree(txn);
+			dev_err(&master->dev, "%s Max tid reached\n",
+				__func__);
+			return -ENOMEM;
+		}
+		temp_txn = krealloc(master->port_txn,
+				(i + 1) * sizeof(struct swr_params *),
+				GFP_KERNEL);
+		if (!temp_txn) {
+			mutex_unlock(&master->mlock);
+			kfree(txn);
+			dev_err(&master->dev, "%s Not able to allocate\n"
+				"master port transaction memory\n",
+				__func__);
+			return -ENOMEM;
+		}
+		master->port_txn = temp_txn;
+		master->last_tid++;
+	}
+	master->port_txn[i] = txn;
+	mutex_unlock(&master->mlock);
+	txn->tid = i;
+
+	txn->dev_id = dev->dev_num;
+	txn->num_port = num_port;
+	for (i = 0; i < num_port; i++)
+		txn->port_id[i] = port_id[i];
+	ret = master->disconnect_port(master, txn);
+	return ret;
+}
+EXPORT_SYMBOL(swr_disconnect_port);
+
+/**
+ * swr_get_logical_dev_num - Get soundwire slave logical device number
+ * @dev: pointer to soundwire slave device
+ * @dev_id: physical device id of soundwire slave device
+ * @dev_num: pointer to logical device num of soundwire slave device
+ *
+ * This API will get the logical device number of soundwire slave device
+ */
+int swr_get_logical_dev_num(struct swr_device *dev, u64 dev_id,
+			u8 *dev_num)
+{
+	int ret = 0;
+	struct swr_master *master = dev->master;
+
+	if (!master) {
+		pr_err("%s: Master is NULL\n", __func__);
+		return -EINVAL;
+	}
+	mutex_lock(&master->mlock);
+	ret = master->get_logical_dev_num(master, dev_id, dev_num);
+	if (ret) {
+		pr_err("%s: Error %d to get logical addr for device %llx\n",
+			__func__, ret, dev_id);
+	}
+	mutex_unlock(&master->mlock);
+	return ret;
+}
+EXPORT_SYMBOL(swr_get_logical_dev_num);
+
+/**
+ * swr_read - read soundwire slave device registers
+ * @dev: pointer to soundwire slave device
+ * @dev_num: logical device num of soundwire slave device
+ * @reg_addr: base register address that needs to be read
+ * @buf: pointer to store the values of registers from base address
+ * @len: length of the buffer
+ *
+ * This API will read the value of the register address from
+ * soundwire slave device
+ */
+int swr_read(struct swr_device *dev, u8 dev_num, u16 reg_addr,
+	     void *buf, u32 len)
+{
+	struct swr_master *master = dev->master;
+	if (!master)
+		return -EINVAL;
+	return master->read(master, dev_num, reg_addr, buf, len);
+}
+EXPORT_SYMBOL(swr_read);
+
+/**
+ * swr_bulk_write - write soundwire slave device registers
+ * @dev: pointer to soundwire slave device
+ * @dev_num: logical device num of soundwire slave device
+ * @reg_addr: register address of soundwire slave device
+ * @buf: contains value of register address
+ * @len: indicates number of registers
+ *
+ * This API will write the value of the register address to
+ * soundwire slave device
+ */
+int swr_bulk_write(struct swr_device *dev, u8 dev_num, void *reg,
+		   const void *buf, size_t len)
+{
+	struct swr_master *master;
+
+	if (!dev || !dev->master)
+		return -EINVAL;
+
+	master = dev->master;
+	if (dev->group_id) {
+		if (master->gr_sid != dev_num) {
+			if (!master->gr_sid)
+				master->gr_sid = dev_num;
+			else
+				return 0;
+		}
+		dev_num = dev->group_id;
+	}
+	if (master->bulk_write)
+		return master->bulk_write(master, dev_num, reg, buf, len);
+
+	return -ENOSYS;
+}
+EXPORT_SYMBOL(swr_bulk_write);
+
+/**
+ * swr_write - write soundwire slave device registers
+ * @dev: pointer to soundwire slave device
+ * @dev_num: logical device num of soundwire slave device
+ * @reg_addr: register address of soundwire slave device
+ * @buf: contains value of register address
+ *
+ * This API will write the value of the register address to
+ * soundwire slave device
+ */
+int swr_write(struct swr_device *dev, u8 dev_num, u16 reg_addr,
+	      const void *buf)
+{
+	struct swr_master *master = dev->master;
+	if (!master)
+		return -EINVAL;
+
+	if (dev->group_id) {
+		if (master->gr_sid != dev_num) {
+			if (!master->gr_sid)
+				master->gr_sid = dev_num;
+			else
+				return 0;
+		}
+		dev_num = dev->group_id;
+	}
+	return master->write(master, dev_num, reg_addr, buf);
+}
+EXPORT_SYMBOL(swr_write);
+
+/**
+ * swr_device_up - Function to bringup the soundwire slave device
+ * @swr_dev: pointer to soundwire slave device
+ * Context: can sleep
+ *
+ * This API will be called by soundwire master to bringup the slave
+ * device.
+ */
+int swr_device_up(struct swr_device *swr_dev)
+{
+	struct device *dev;
+	const struct swr_driver *sdrv;
+
+	if (!swr_dev)
+		return -EINVAL;
+
+	dev = &swr_dev->dev;
+	sdrv = to_swr_driver(dev->driver);
+	if (!sdrv)
+		return 0;
+
+	if (sdrv->device_up)
+		return sdrv->device_up(to_swr_device(dev));
+
+	return -ENODEV;
+}
+EXPORT_SYMBOL(swr_device_up);
+
+/**
+ * swr_device_down - Function to call soundwire slave device down
+ * @swr_dev: pointer to soundwire slave device
+ * Context: can sleep
+ *
+ * This API will be called by soundwire master to put slave device in
+ * shutdown state.
+ */
+int swr_device_down(struct swr_device *swr_dev)
+{
+	struct device *dev;
+	const struct swr_driver *sdrv;
+
+	if (!swr_dev)
+		return -EINVAL;
+
+	dev = &swr_dev->dev;
+	sdrv = to_swr_driver(dev->driver);
+	if (!sdrv)
+		return 0;
+
+	if (sdrv->device_down)
+		return sdrv->device_down(to_swr_device(dev));
+
+	return -ENODEV;
+}
+EXPORT_SYMBOL(swr_device_down);
+
+/**
+ * swr_reset_device - reset soundwire slave device
+ * @swr_dev: pointer to soundwire slave device
+ * Context: can sleep
+ *
+ * This API will be called by soundwire master to reset the slave
+ * device when the slave device is not responding or in undefined
+ * state
+ */
+int swr_reset_device(struct swr_device *swr_dev)
+{
+	struct device *dev;
+	const struct swr_driver *sdrv;
+
+	if (!swr_dev)
+		return -EINVAL;
+
+	dev = &swr_dev->dev;
+	sdrv = to_swr_driver(dev->driver);
+	if (!sdrv)
+		return -EINVAL;
+
+	if (sdrv->reset_device)
+		return sdrv->reset_device(to_swr_device(dev));
+
+	return -ENODEV;
+}
+EXPORT_SYMBOL(swr_reset_device);
+
+/**
+ * swr_set_device_group - Assign group id to the slave devices
+ * @swr_dev: pointer to soundwire slave device
+ * @id: group id to be assigned to slave device
+ * Context: can sleep
+ *
+ * This API will be called either from soundwire master or slave
+ * device to assign group id.
+ */
+int swr_set_device_group(struct swr_device *swr_dev, u8 id)
+{
+	struct swr_master *master;
+
+	if (!swr_dev)
+		return -EINVAL;
+
+	swr_dev->group_id = id;
+	master = swr_dev->master;
+	if (!id && master)
+		master->gr_sid = 0;
+
+	return 0;
+}
+EXPORT_SYMBOL(swr_set_device_group);
+
+static int swr_drv_probe(struct device *dev)
+{
+	const struct swr_driver *sdrv = to_swr_driver(dev->driver);
+
+	if (!sdrv)
+		return -EINVAL;
+
+	if (sdrv->probe)
+		return sdrv->probe(to_swr_device(dev));
+	return -ENODEV;
+}
+
+static int swr_drv_remove(struct device *dev)
+{
+	const struct swr_driver *sdrv = to_swr_driver(dev->driver);
+
+	if (!sdrv)
+		return -EINVAL;
+
+	if (sdrv->remove)
+		return sdrv->remove(to_swr_device(dev));
+	return -ENODEV;
+}
+
+static void swr_drv_shutdown(struct device *dev)
+{
+	const struct swr_driver *sdrv = to_swr_driver(dev->driver);
+
+	if (!sdrv)
+		return;
+
+	if (sdrv->shutdown)
+		sdrv->shutdown(to_swr_device(dev));
+}
+
+/**
+ * swr_driver_register - register a soundwire driver
+ * @drv: the driver to register
+ * Context: can sleep
+ */
+int swr_driver_register(struct swr_driver *drv)
+{
+	drv->driver.bus = &soundwire_type;
+	if (drv->probe)
+		drv->driver.probe = swr_drv_probe;
+	if (drv->remove)
+		drv->driver.remove = swr_drv_remove;
+
+	if (drv->shutdown)
+		drv->driver.shutdown = swr_drv_shutdown;
+
+	return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL(swr_driver_register);
+
+/**
+ * swr_driver_unregister - unregister a soundwire driver
+ * @drv: the driver to unregister
+ */
+void swr_driver_unregister(struct swr_driver *drv)
+{
+	if (drv)
+		driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL(swr_driver_unregister);
+
+static void swr_match_ctrl_to_boardinfo(struct swr_master *master,
+				struct swr_boardinfo *bi)
+{
+	struct swr_device *swr;
+
+	if (master->bus_num != bi->bus_num) {
+		dev_dbg(&master->dev,
+			"%s: master# %d and bi# %d does not match\n",
+			__func__, master->bus_num, bi->bus_num);
+		return;
+	}
+
+	swr = swr_new_device(master, bi);
+	if (!swr)
+		dev_err(&master->dev, "can't create new device for %s\n",
+			bi->swr_slave->name);
+}
+
+/**
+ * swr_master_add_boarddevices - Add devices registered by board info
+ * @master: master to which these devices are to be added to.
+ *
+ * This API is called by master when it is up and running. If devices
+ * on a master were registered before master, this will make sure that
+ * they get probed when master is up.
+ */
+void swr_master_add_boarddevices(struct swr_master *master)
+{
+	struct boardinfo *bi;
+	mutex_lock(&board_lock);
+	list_add_tail(&master->list, &swr_master_list);
+	list_for_each_entry(bi, &board_list, list)
+		swr_match_ctrl_to_boardinfo(master, &bi->board_info);
+	mutex_unlock(&board_lock);
+}
+EXPORT_SYMBOL(swr_master_add_boarddevices);
+
+static void swr_unregister_device(struct swr_device *swr)
+{
+	if (swr)
+		device_unregister(&swr->dev);
+}
+
+static void swr_master_release(struct device *dev)
+{
+	struct swr_master *master = to_swr_master(dev);
+	kfree(master);
+}
+
+#define swr_master_attr_gr NULL
+static struct device_type swr_master_type = {
+	.groups     = swr_master_attr_gr,
+	.release    = swr_master_release,
+};
+
+static int __unregister(struct device *dev, void *null)
+{
+	swr_unregister_device(to_swr_device(dev));
+	return 0;
+}
+
+/**
+ * swr_unregister_master - unregister soundwire master controller
+ * @master: the master being unregistered
+ *
+ * This API is called by master controller driver to unregister
+ *  master controller that was registered by swr_register_master API.
+ */
+void swr_unregister_master(struct swr_master *master)
+{
+	int dummy;
+	struct swr_master *m_ctrl;
+
+	mutex_lock(&swr_lock);
+	m_ctrl = idr_find(&master_idr, master->bus_num);
+	mutex_unlock(&swr_lock);
+	if (m_ctrl != master)
+		return;
+
+	mutex_lock(&board_lock);
+	list_del(&master->list);
+	mutex_unlock(&board_lock);
+
+	/* free bus id */
+	mutex_lock(&swr_lock);
+	idr_remove(&master_idr, master->bus_num);
+	mutex_unlock(&swr_lock);
+
+	dummy = device_for_each_child(&master->dev, NULL, __unregister);
+	device_unregister(&master->dev);
+}
+EXPORT_SYMBOL(swr_unregister_master);
+
+/**
+ * swr_register_master - register soundwire master controller
+ * @master: master to be registered
+ *
+ * This API will register master with the framework. master->bus_num
+ * is the desired number with which soundwire framework registers the
+ * master.
+ */
+int swr_register_master(struct swr_master *master)
+{
+	int id;
+	int status = 0;
+
+	mutex_lock(&swr_lock);
+	id = idr_alloc(&master_idr, master, master->bus_num,
+			master->bus_num+1, GFP_KERNEL);
+	mutex_unlock(&swr_lock);
+	if (id < 0)
+		return id;
+	master->bus_num = id;
+
+	/* Can't register until driver model init */
+	if (WARN_ON(!soundwire_type.p)) {
+		status = -EAGAIN;
+		goto done;
+	}
+
+	dev_set_name(&master->dev, "swr%u", master->bus_num);
+	master->dev.bus = &soundwire_type;
+	master->dev.type = &swr_master_type;
+	mutex_init(&master->mlock);
+	status = device_register(&master->dev);
+	if (status < 0)
+		goto done;
+
+	INIT_LIST_HEAD(&master->devices);
+	pr_debug("%s: SWR master registered successfully %s\n",
+		__func__, dev_name(&master->dev));
+	return 0;
+
+done:
+	idr_remove(&master_idr, master->bus_num);
+	return status;
+}
+EXPORT_SYMBOL(swr_register_master);
+
+#define swr_device_attr_gr NULL
+#define swr_device_uevent NULL
+static struct device_type swr_dev_type = {
+	.groups    = swr_device_attr_gr,
+	.uevent    = swr_device_uevent,
+	.release   = swr_dev_release,
+};
+
+static const struct swr_device_id *swr_match(const struct swr_device_id *id,
+					     const struct swr_device *swr_dev)
+{
+	while (id->name[0]) {
+		if (strcmp(swr_dev->name, id->name) == 0)
+			return id;
+		id++;
+	}
+	return NULL;
+}
+
+static int swr_device_match(struct device *dev, struct device_driver *driver)
+{
+	struct swr_device *swr_dev;
+	struct swr_driver *drv = to_swr_driver(driver);
+
+	if (!drv)
+		return -EINVAL;
+
+	if (dev->type == &swr_dev_type)
+		swr_dev = to_swr_device(dev);
+	else
+		return 0;
+	if (drv->id_table)
+		return swr_match(drv->id_table, swr_dev) != NULL;
+
+	if (driver->name)
+		return strcmp(swr_dev->name, driver->name) == 0;
+	return 0;
+}
+#ifdef CONFIG_PM_SLEEP
+static int swr_legacy_suspend(struct device *dev, pm_message_t mesg)
+{
+	struct swr_device *swr_dev = NULL;
+	struct swr_driver *driver;
+	if (dev->type == &swr_dev_type)
+		swr_dev = to_swr_device(dev);
+
+	if (!swr_dev || !dev->driver)
+		return 0;
+
+	driver = to_swr_driver(dev->driver);
+	if (!driver->suspend)
+		return 0;
+
+	return driver->suspend(swr_dev, mesg);
+}
+
+static int swr_legacy_resume(struct device *dev)
+{
+	struct swr_device *swr_dev = NULL;
+	struct swr_driver *driver;
+	if (dev->type == &swr_dev_type)
+		swr_dev = to_swr_device(dev);
+
+	if (!swr_dev || !dev->driver)
+		return 0;
+
+	driver = to_swr_driver(dev->driver);
+	if (!driver->resume)
+		return 0;
+
+	return driver->resume(swr_dev);
+}
+
+static int swr_pm_suspend(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	if (pm)
+		return pm_generic_suspend(dev);
+	else
+		return swr_legacy_suspend(dev, PMSG_SUSPEND);
+}
+
+static int swr_pm_resume(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	if (pm)
+		return pm_generic_resume(dev);
+	else
+		return swr_legacy_resume(dev);
+}
+#else
+#define swr_pm_suspend	NULL
+#define swr_pm_resume	NULL
+#endif /*CONFIG_PM_SLEEP*/
+
+static const struct dev_pm_ops soundwire_pm = {
+	.suspend = swr_pm_suspend,
+	.resume = swr_pm_resume,
+	SET_RUNTIME_PM_OPS(
+		pm_generic_suspend,
+		pm_generic_resume,
+		NULL
+		)
+};
+
+struct device soundwire_dev = {
+	.init_name = "soundwire",
+};
+
+struct bus_type soundwire_type = {
+	.name		= "soundwire",
+	.match		= swr_device_match,
+	.pm		= &soundwire_pm,
+};
+EXPORT_SYMBOL(soundwire_type);
+
+static void __exit soundwire_exit(void)
+{
+	device_unregister(&soundwire_dev);
+	bus_unregister(&soundwire_type);
+}
+
+static int __init soundwire_init(void)
+{
+	int retval;
+
+	retval = bus_register(&soundwire_type);
+	if (!retval)
+		retval = device_register(&soundwire_dev);
+
+	if (retval)
+		bus_unregister(&soundwire_type);
+
+	return retval;
+}
+postcore_initcall(soundwire_init);
+module_exit(soundwire_exit);
+
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Soundwire module");
+MODULE_ALIAS("platform:soundwire");
diff -Nruw linux-4.4.115-fbx/drivers/soundwire./swrm_registers.h linux-4.4.115-fbx/drivers/soundwire/swrm_registers.h
--- linux-4.4.115-fbx/drivers/soundwire./swrm_registers.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soundwire/swrm_registers.h	2019-01-22 16:16:26.687275240 +0100
@@ -0,0 +1,204 @@
+/* Copyright (c) 2015, 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SWRM_REGISTERS_H
+#define _SWRM_REGISTERS_H
+
+#define SWRM_BASE_ADDRESS				0x00
+
+#define SWRM_COMP_HW_VERSION                     SWRM_BASE_ADDRESS
+#define SWRM_COMP_CFG_ADDR			(SWRM_BASE_ADDRESS+0x00000004)
+#define SWRM_COMP_CFG_RMSK				0x3
+#define SWRM_COMP_CFG_IRQ_LEVEL_OR_PULSE_BMSK		0x2
+#define SWRM_COMP_CFG_IRQ_LEVEL_OR_PULSE_SHFT		0x1
+#define SWRM_COMP_CFG_ENABLE_BMSK			0x1
+#define SWRM_COMP_CFG_ENABLE_SHFT			0x0
+
+#define SWRM_COMP_SW_RESET		(SWRM_BASE_ADDRESS+0x00000008)
+
+#define SWRM_COMP_PARAMS		(SWRM_BASE_ADDRESS+0x100)
+#define SWRM_COMP_PARAMS_DOUT_PORTS_MASK	0x0000001F
+#define SWRM_COMP_PARAMS_DIN_PORTS_MASK		0x000003E0
+#define SWRM_COMP_PARAMS_WR_FIFO_DEPTH		0x00007C00
+#define SWRM_COMP_PARAMS_RD_FIFO_DEPTH		0x000F8000
+#define SWRM_COMP_PARAMS_AUTO_ENUM_SLAVES	0x00F00000
+#define SWRM_COMP_PARAMS_DATA_LANES		0x07000000
+
+
+#define SWRM_INTERRUPT_STATUS		(SWRM_BASE_ADDRESS+0x00000200)
+#define SWRM_INTERRUPT_STATUS_RMSK		0x1FFFD
+
+#define SWRM_INTERRUPT_STATUS_SLAVE_PEND_IRQ			0x1
+#define SWRM_INTERRUPT_STATUS_NEW_SLAVE_ATTACHED		0x2
+#define SWRM_INTERRUPT_STATUS_CHANGE_ENUM_SLAVE_STATUS		0x4
+#define SWRM_INTERRUPT_STATUS_MASTER_CLASH_DET			0x8
+#define SWRM_INTERRUPT_STATUS_RD_FIFO_OVERFLOW			0x10
+#define SWRM_INTERRUPT_STATUS_RD_FIFO_UNDERFLOW			0x20
+#define SWRM_INTERRUPT_STATUS_WR_CMD_FIFO_OVERFLOW		0x40
+#define SWRM_INTERRUPT_STATUS_CMD_ERROR				0x80
+#define SWRM_INTERRUPT_STATUS_DOUT_PORT_COLLISION		0x100
+#define SWRM_INTERRUPT_STATUS_READ_EN_RD_VALID_MISMATCH		0x200
+#define SWRM_INTERRUPT_STATUS_SPECIAL_CMD_ID_FINISHED		0x400
+#define SWRM_INTERRUPT_STATUS_NEW_SLAVE_AUTO_ENUM_FINISHED	0x800
+#define SWRM_INTERRUPT_STATUS_AUTO_ENUM_FAILED			0x1000
+#define SWRM_INTERRUPT_STATUS_AUTO_ENUM_TABLE_IS_FULL		0x2000
+#define SWRM_INTERRUPT_STATUS_BUS_RESET_FINISHED		0x4000
+#define SWRM_INTERRUPT_STATUS_CLK_STOP_FINISHED			0x8000
+#define SWRM_INTERRUPT_STATUS_ERROR_PORT_TEST			0x10000
+
+#define SWRM_INTERRUPT_MASK_ADDR		(SWRM_BASE_ADDRESS+0x00000204)
+#define SWRM_INTERRUPT_MASK_RMSK		0x1FFFF
+
+#define SWRM_INTERRUPT_MASK_SLAVE_PEND_IRQ_BMSK			0x1
+#define SWRM_INTERRUPT_MASK_SLAVE_PEND_IRQ_SHFT			0x0
+
+#define SWRM_INTERRUPT_MASK_NEW_SLAVE_ATTACHED_BMSK		0x2
+#define SWRM_INTERRUPT_MASK_NEW_SLAVE_ATTACHED_SHFT		0x1
+
+#define SWRM_INTERRUPT_MASK_CHANGE_ENUM_SLAVE_STATUS_BMSK	0x4
+#define SWRM_INTERRUPT_MASK_CHANGE_ENUM_SLAVE_STATUS_SHFT	0x2
+
+#define SWRM_INTERRUPT_MASK_MASTER_CLASH_DET_BMSK		0x8
+#define SWRM_INTERRUPT_MASK_MASTER_CLASH_DET_SHFT		0x3
+
+#define SWRM_INTERRUPT_MASK_RD_FIFO_OVERFLOW_BMSK		0x10
+#define SWRM_INTERRUPT_MASK_RD_FIFO_OVERFLOW_SHFT		0x4
+
+#define SWRM_INTERRUPT_MASK_RD_FIFO_UNDERFLOW_BMSK		0x20
+#define SWRM_INTERRUPT_MASK_RD_FIFO_UNDERFLOW_SHFT		0x5
+
+#define SWRM_INTERRUPT_MASK_WR_CMD_FIFO_OVERFLOW_BMSK		0x40
+#define SWRM_INTERRUPT_MASK_WR_CMD_FIFO_OVERFLOW_SHFT		0x6
+
+#define SWRM_INTERRUPT_MASK_CMD_ERROR_BMSK			0x80
+#define SWRM_INTERRUPT_MASK_CMD_ERROR_SHFT			0x7
+
+#define SWRM_INTERRUPT_MASK_DOUT_PORT_COLLISION_BMSK		0x100
+#define SWRM_INTERRUPT_MASK_DOUT_PORT_COLLISION_SHFT		0x8
+
+#define SWRM_INTERRUPT_MASK_READ_EN_RD_VALID_MISMATCH_BMSK	0x200
+#define SWRM_INTERRUPT_MASK_READ_EN_RD_VALID_MISMATCH_SHFT	0x9
+
+#define SWRM_INTERRUPT_MASK_SPECIAL_CMD_ID_FINISHED_BMSK	0x400
+#define SWRM_INTERRUPT_MASK_SPECIAL_CMD_ID_FINISHED_SHFT	0xA
+
+#define SWRM_INTERRUPT_MASK_NEW_SLAVE_AUTO_ENUM_FINISHED_BMSK	0x800
+#define SWRM_INTERRUPT_MASK_NEW_SLAVE_AUTO_ENUM_FINISHED_SHFT	0xB
+
+#define SWRM_INTERRUPT_MASK_AUTO_ENUM_FAILED_BMSK		0x1000
+#define SWRM_INTERRUPT_MASK_AUTO_ENUM_FAILED_SHFT		0xC
+
+#define SWRM_INTERRUPT_MASK_AUTO_ENUM_TABLE_IS_FULL_BMSK	0x2000
+#define SWRM_INTERRUPT_MASK_AUTO_ENUM_TABLE_IS_FULL_SHFT	0xD
+
+#define SWRM_INTERRUPT_MASK_BUS_RESET_FINISHED_BMSK		0x4000
+#define SWRM_INTERRUPT_MASK_BUS_RESET_FINISHED_SHFT		0xE
+
+#define SWRM_INTERRUPT_MASK_CLK_STOP_FINISHED_BMSK		0x8000
+#define SWRM_INTERRUPT_MASK_CLK_STOP_FINISHED_SHFT		0xF
+
+#define SWRM_INTERRUPT_MASK_ERROR_PORT_TEST_BMSK		0x10000
+#define SWRM_INTERRUPT_MASK_ERROR_PORT_TEST_SHFT		0x10
+
+#define SWRM_INTERRUPT_MAX					0x11
+
+#define SWRM_INTERRUPT_CLEAR		(SWRM_BASE_ADDRESS+0x00000208)
+
+#define SWRM_CMD_FIFO_WR_CMD		(SWRM_BASE_ADDRESS + 0x00000300)
+#define SWRM_CMD_FIFO_WR_CMD_MASK	0xFFFFFFFF
+#define SWRM_CMD_FIFO_RD_CMD		(SWRM_BASE_ADDRESS + 0x00000304)
+#define SWRM_CMD_FIFO_RD_CMD_MASK	0xFFFFFFF
+#define SWRM_CMD_FIFO_CMD		(SWRM_BASE_ADDRESS + 0x00000308)
+#define SWRM_CMD_FIFO_STATUS		(SWRM_BASE_ADDRESS + 0x0000030C)
+
+#define SWRM_CMD_FIFO_STATUS_WR_CMD_FIFO_CNT_MASK	0x1F00
+#define SWRM_CMD_FIFO_STATUS_RD_CMD_FIFO_CNT_MASK	0x7C00000
+
+#define SWRM_CMD_FIFO_CFG_ADDR			(SWRM_BASE_ADDRESS+0x00000314)
+#define SWRM_CMD_FIFO_CFG_NUM_OF_CMD_RETRY_BMSK		0x7
+#define SWRM_CMD_FIFO_CFG_NUM_OF_CMD_RETRY_SHFT		0x0
+
+#define SWRM_CMD_FIFO_RD_FIFO_ADDR	(SWRM_BASE_ADDRESS + 0x00000318)
+
+#define SWRM_ENUMERATOR_CFG_ADDR		(SWRM_BASE_ADDRESS+0x00000500)
+#define SWRM_ENUMERATOR_CFG_AUTO_ENUM_EN_BMSK		0x1
+#define SWRM_ENUMERATOR_CFG_AUTO_ENUM_EN_SHFT		0x0
+
+#define SWRM_ENUMERATOR_SLAVE_DEV_ID_1(m)   (SWRM_BASE_ADDRESS+0x530+0x8*m)
+#define SWRM_ENUMERATOR_SLAVE_DEV_ID_2(m)   (SWRM_BASE_ADDRESS+0x534+0x8*m)
+
+#define SWRM_MCP_FRAME_CTRL_BANK_ADDR(m)    (SWRM_BASE_ADDRESS+0x101C+0x40*m)
+#define SWRM_MCP_FRAME_CTRL_BANK_RMSK			0x00ff07ff
+#define SWRM_MCP_FRAME_CTRL_BANK_SHFT			0
+#define SWRM_MCP_FRAME_CTRL_BANK_SSP_PERIOD_BMSK	0xff0000
+#define SWRM_MCP_FRAME_CTRL_BANK_SSP_PERIOD_SHFT	16
+#define SWRM_MCP_FRAME_CTRL_BANK_PHASE_BMSK		0xf800
+#define SWRM_MCP_FRAME_CTRL_BANK_PHASE_SHFT		11
+#define SWRM_MCP_FRAME_CTRL_BANK_CLK_DIV_VALUE_BMSK	0x700
+#define SWRM_MCP_FRAME_CTRL_BANK_CLK_DIV_VALUE_SHFT	8
+#define SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_BMSK		0xF8
+#define SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_SHFT		3
+#define SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_BMSK		0x7
+#define SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_SHFT		0
+
+#define SWRM_MCP_BUS_CTRL_ADDR			(SWRM_BASE_ADDRESS+0x00001044)
+#define SWRM_MCP_BUS_CTRL_BUS_RESET_BMSK		0x1
+#define SWRM_MCP_BUS_CTRL_BUS_RESET_SHFT		0x0
+#define SWRM_MCP_BUS_CTRL_CLK_START_BMSK		0x2
+#define SWRM_MCP_BUS_CTRL_CLK_START_SHFT		0x1
+
+#define SWRM_MCP_CFG_ADDR			(SWRM_BASE_ADDRESS+0x00001048)
+#define SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_BMSK	0x3E0000
+#define SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_SHFT	0x11
+#define SWRM_MCP_CFG_BUS_CLK_PAUSE_BMSK			0x02
+
+#define SWRM_MCP_STATUS			(SWRM_BASE_ADDRESS+0x104C)
+#define SWRM_MCP_STATUS_BANK_NUM_MASK	0x01
+
+#define SWRM_MCP_SLV_STATUS		(SWRM_BASE_ADDRESS+0x1090)
+#define SWRM_MCP_SLV_STATUS_MASK	0x03
+
+#define SWRM_DP_PORT_CTRL_BANK(n, m)		(SWRM_BASE_ADDRESS + \
+							0x00001124 + \
+							0x100*(n-1) + \
+							0x40*m)
+#define SWRM_DP_PORT_CTRL_BANK_MASK		0xFFFFFFFF
+#define SWRM_DP_PORT_CTRL_EN_CHAN_MASK		0xFF000000
+#define SWRM_DP_PORT_CTRL_EN_CHAN_SHFT		0x18
+#define SWRM_DP_PORT_CTRL_OFFSET2_SHFT		0x10
+#define SWRM_DP_PORT_CTRL_OFFSET1_SHFT		0x08
+#define SWRM_DP_PORT_CTRL_SAMPLE_INTERVAL	0x00
+
+/* Soundwire Slave Register definition */
+
+#define SWRS_BASE_ADDRESS			0x00
+
+#define SWRS_DP_REG_OFFSET(port, bank)		((0x100*port)+(0x10*bank))
+
+#define SWRS_DP_CHANNEL_ENABLE_BANK(n, m)	(SWRS_BASE_ADDRESS + 0x120 + \
+						 SWRS_DP_REG_OFFSET(n, m))
+#define SWRS_DP_SAMPLE_CONTROL_1_BANK(n, m)	(SWRS_BASE_ADDRESS + 0x122 + \
+						 SWRS_DP_REG_OFFSET(n, m))
+#define SWRS_DP_OFFSET_CONTROL_1_BANK(n, m)	(SWRS_BASE_ADDRESS + 0x124 + \
+						 SWRS_DP_REG_OFFSET(n, m))
+#define SWRS_DP_OFFSET_CONTROL_2_BANK(n, m)	(SWRS_BASE_ADDRESS + 0x125 + \
+						 SWRS_DP_REG_OFFSET(n, m))
+#define SWRS_DP_HCONTROL_BANK(n, m)		(SWRS_BASE_ADDRESS + 0x126 + \
+						 SWRS_DP_REG_OFFSET(n, m))
+#define SWRS_DP_BLOCK_CONTROL_3_BANK(n, m)	(SWRS_BASE_ADDRESS + 0x127 + \
+						 SWRS_DP_REG_OFFSET(n, m))
+#define SWRS_SCP_FRAME_CTRL_BANK(m)		(SWRS_BASE_ADDRESS + 0x60 + \
+						 0x10*m)
+#define SWRS_SCP_HOST_CLK_DIV2_CTL_BANK(m)	(SWRS_BASE_ADDRESS + 0xE0 + \
+						0x10*m)
+
+#endif /* _SWRM_REGISTERS_H */
diff -Nruw linux-4.4.115-fbx/drivers/soundwire./swr-wcd-ctrl.c linux-4.4.115-fbx/drivers/soundwire/swr-wcd-ctrl.c
--- linux-4.4.115-fbx/drivers/soundwire./swr-wcd-ctrl.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soundwire/swr-wcd-ctrl.c	2019-10-29 09:26:24.829214784 +0100
@@ -0,0 +1,1893 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/soundwire/soundwire.h>
+#include <linux/soundwire/swr-wcd.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include "swrm_registers.h"
+#include "swr-wcd-ctrl.h"
+
+#define SWR_BROADCAST_CMD_ID            0x0F
+#define SWR_AUTO_SUSPEND_DELAY          3 /* delay in sec */
+#define SWR_DEV_ID_MASK			0xFFFFFFFF
+#define SWR_REG_VAL_PACK(data, dev, id, reg)	\
+			((reg) | ((id) << 16) | ((dev) << 20) | ((data) << 24))
+
+/* pm runtime auto suspend timer in msecs */
+static int auto_suspend_timer = SWR_AUTO_SUSPEND_DELAY * 1000;
+module_param(auto_suspend_timer, int,
+		S_IRUGO | S_IWUSR | S_IWGRP);
+MODULE_PARM_DESC(auto_suspend_timer, "timer for auto suspend");
+
+static u8 mstr_ports[] = {100, 101, 102, 103, 104, 105, 106, 107};
+static u8 mstr_port_type[] = {SWR_DAC_PORT, SWR_COMP_PORT, SWR_BOOST_PORT,
+			      SWR_DAC_PORT, SWR_COMP_PORT, SWR_BOOST_PORT,
+			      SWR_VISENSE_PORT, SWR_VISENSE_PORT};
+
+struct usecase uc[] = {
+	{0, 0, 0},		/* UC0: no ports */
+	{1, 1, 2400},		/* UC1: Spkr */
+	{1, 4, 600},		/* UC2: Compander */
+	{1, 2, 300},		/* UC3: Smart Boost */
+	{1, 2, 1200},		/* UC4: VI Sense */
+	{4, 9, 4500},		/* UC5: Spkr + Comp + SB + VI */
+	{8, 18, 9000},		/* UC6: 2*(Spkr + Comp + SB + VI) */
+	{2, 2, 4800},		/* UC7: 2*Spkr */
+	{2, 5, 3000},		/* UC8: Spkr + Comp */
+	{4, 10, 6000},		/* UC9: 2*(Spkr + Comp) */
+	{3, 7, 3300},		/* UC10: Spkr + Comp + SB */
+	{6, 14, 6600},		/* UC11: 2*(Spkr + Comp + SB) */
+	{2, 3, 2700},		/* UC12: Spkr + SB */
+	{4, 6, 5400},		/* UC13: 2*(Spkr + SB) */
+	{3, 5, 3900},		/* UC14: Spkr + SB + VI */
+	{6, 10, 7800},		/* UC15: 2*(Spkr + SB + VI) */
+	{2, 3, 3600},		/* UC16: Spkr + VI */
+	{4, 6, 7200},		/* UC17: 2*(Spkr + VI) */
+	{3, 7, 4200},		/* UC18: Spkr + Comp + VI */
+	{6, 14, 8400},		/* UC19: 2*(Spkr + Comp + VI) */
+};
+#define MAX_USECASE	ARRAY_SIZE(uc)
+
+struct port_params pp[MAX_USECASE][SWR_MSTR_PORT_LEN] = {
+	/* UC 0 */
+	{
+		{0, 0, 0},
+	},
+	/* UC 1 */
+	{
+		{7, 1, 0},
+	},
+	/* UC 2 */
+	{
+		{31, 2, 0},
+	},
+	/* UC 3 */
+	{
+		{63, 12, 31},
+	},
+	/* UC 4 */
+	{
+		{15, 7, 0},
+	},
+	/* UC 5 */
+	{
+		{7, 1, 0},
+		{31, 2, 0},
+		{63, 12, 31},
+		{15, 7, 0},
+	},
+	/* UC 6 */
+	{
+		{7, 1, 0},
+		{31, 2, 0},
+		{63, 12, 31},
+		{15, 7, 0},
+		{7, 6, 0},
+		{31, 18, 0},
+		{63, 13, 31},
+		{15, 10, 0},
+	},
+	/* UC 7 */
+	{
+		{7, 1, 0},
+		{7, 6, 0},
+
+	},
+	/* UC 8 */
+	{
+		{7, 1, 0},
+		{31, 2, 0},
+	},
+	/* UC 9 */
+	{
+		{7, 1, 0},
+		{31, 2, 0},
+		{7, 6, 0},
+		{31, 18, 0},
+	},
+	/* UC 10 */
+	{
+		{7, 1, 0},
+		{31, 2, 0},
+		{63, 12, 31},
+	},
+	/* UC 11 */
+	{
+		{7, 1, 0},
+		{31, 2, 0},
+		{63, 12, 31},
+		{7, 6, 0},
+		{31, 18, 0},
+		{63, 13, 31},
+	},
+	/* UC 12 */
+	{
+		{7, 1, 0},
+		{63, 12, 31},
+	},
+	/* UC 13 */
+	{
+		{7, 1, 0},
+		{63, 12, 31},
+		{7, 6, 0},
+		{63, 13, 31},
+	},
+	/* UC 14 */
+	{
+		{7, 1, 0},
+		{63, 12, 31},
+		{15, 7, 0},
+	},
+	/* UC 15 */
+	{
+		{7, 1, 0},
+		{63, 12, 31},
+		{15, 7, 0},
+		{7, 6, 0},
+		{63, 13, 31},
+		{15, 10, 0},
+	},
+	/* UC 16 */
+	{
+		{7, 1, 0},
+		{15, 7, 0},
+	},
+	/* UC 17 */
+	{
+		{7, 1, 0},
+		{15, 7, 0},
+		{7, 6, 0},
+		{15, 10, 0},
+	},
+	/* UC 18 */
+	{
+		{7, 1, 0},
+		{31, 2, 0},
+		{15, 7, 0},
+	},
+	/* UC 19 */
+	{
+		{7, 1, 0},
+		{31, 2, 0},
+		{15, 7, 0},
+		{7, 6, 0},
+		{31, 18, 0},
+		{15, 10, 0},
+	},
+};
+
+enum {
+	SWR_NOT_PRESENT, /* Device is detached/not present on the bus */
+	SWR_ATTACHED_OK, /* Device is attached */
+	SWR_ALERT,       /* Device alters master for any interrupts */
+	SWR_RESERVED,    /* Reserved */
+};
+
+#define SWRM_MAX_PORT_REG    40
+#define SWRM_MAX_INIT_REG    8
+
+#define SWR_MSTR_MAX_REG_ADDR	0x1740
+#define SWR_MSTR_START_REG_ADDR	0x00
+#define SWR_MSTR_MAX_BUF_LEN     32
+#define BYTES_PER_LINE          12
+#define SWR_MSTR_RD_BUF_LEN      8
+#define SWR_MSTR_WR_BUF_LEN      32
+
+static void swrm_copy_data_port_config(struct swr_master *master,
+				       u8 inactive_bank);
+static struct swr_mstr_ctrl *dbgswrm;
+static struct dentry *debugfs_swrm_dent;
+static struct dentry *debugfs_peek;
+static struct dentry *debugfs_poke;
+static struct dentry *debugfs_reg_dump;
+static unsigned int read_data;
+
+
+static bool swrm_is_msm_variant(int val)
+{
+	return (val == SWRM_VERSION_1_3);
+}
+
+static int swrm_debug_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static int get_parameters(char *buf, u32 *param1, int num_of_par)
+{
+	char *token;
+	int base, cnt;
+
+	token = strsep(&buf, " ");
+	for (cnt = 0; cnt < num_of_par; cnt++) {
+		if (token) {
+			if ((token[1] == 'x') || (token[1] == 'X'))
+				base = 16;
+			else
+				base = 10;
+
+			if (kstrtou32(token, base, &param1[cnt]) != 0)
+				return -EINVAL;
+
+			token = strsep(&buf, " ");
+		} else
+			return -EINVAL;
+	}
+	return 0;
+}
+
+static ssize_t swrm_reg_show(char __user *ubuf, size_t count,
+					  loff_t *ppos)
+{
+	int i, reg_val, len;
+	ssize_t total = 0;
+	char tmp_buf[SWR_MSTR_MAX_BUF_LEN];
+
+	if (!ubuf || !ppos)
+		return 0;
+
+	for (i = (((int) *ppos / BYTES_PER_LINE) + SWR_MSTR_START_REG_ADDR);
+		i <= SWR_MSTR_MAX_REG_ADDR; i += 4) {
+		reg_val = dbgswrm->read(dbgswrm->handle, i);
+		len = snprintf(tmp_buf, 25, "0x%.3x: 0x%.2x\n", i, reg_val);
+		if ((total + len) >= count - 1)
+			break;
+		if (copy_to_user((ubuf + total), tmp_buf, len)) {
+			pr_err("%s: fail to copy reg dump\n", __func__);
+			total = -EFAULT;
+			goto copy_err;
+		}
+		*ppos += len;
+		total += len;
+	}
+
+copy_err:
+	return total;
+}
+
+static ssize_t swrm_debug_read(struct file *file, char __user *ubuf,
+				size_t count, loff_t *ppos)
+{
+	char lbuf[SWR_MSTR_RD_BUF_LEN];
+	char *access_str;
+	ssize_t ret_cnt;
+
+	if (!count || !file || !ppos || !ubuf)
+		return -EINVAL;
+
+	access_str = file->private_data;
+	if (*ppos < 0)
+		return -EINVAL;
+
+	if (!strcmp(access_str, "swrm_peek")) {
+		snprintf(lbuf, sizeof(lbuf), "0x%x\n", read_data);
+		ret_cnt = simple_read_from_buffer(ubuf, count, ppos, lbuf,
+					       strnlen(lbuf, 7));
+	} else if (!strcmp(access_str, "swrm_reg_dump")) {
+		ret_cnt = swrm_reg_show(ubuf, count, ppos);
+	} else {
+		pr_err("%s: %s not permitted to read\n", __func__, access_str);
+		ret_cnt = -EPERM;
+	}
+	return ret_cnt;
+}
+
+static ssize_t swrm_debug_write(struct file *filp,
+	const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+	char lbuf[SWR_MSTR_WR_BUF_LEN];
+	int rc;
+	u32 param[5];
+	char *access_str;
+
+	if (!filp || !ppos || !ubuf)
+		return -EINVAL;
+
+	access_str = filp->private_data;
+	if (cnt > sizeof(lbuf) - 1)
+		return -EINVAL;
+
+	rc = copy_from_user(lbuf, ubuf, cnt);
+	if (rc)
+		return -EFAULT;
+
+	lbuf[cnt] = '\0';
+	if (!strcmp(access_str, "swrm_poke")) {
+		/* write */
+		rc = get_parameters(lbuf, param, 2);
+		if ((param[0] <= SWR_MSTR_MAX_REG_ADDR) &&
+			(param[1] <= 0xFFFFFFFF) &&
+			(rc == 0))
+			rc = dbgswrm->write(dbgswrm->handle, param[0],
+					    param[1]);
+		else
+			rc = -EINVAL;
+	} else if (!strcmp(access_str, "swrm_peek")) {
+		/* read */
+		rc = get_parameters(lbuf, param, 1);
+		if ((param[0] <= SWR_MSTR_MAX_REG_ADDR) && (rc == 0))
+			read_data = dbgswrm->read(dbgswrm->handle, param[0]);
+		else
+			rc = -EINVAL;
+	}
+	if (rc == 0)
+		rc = cnt;
+	else
+		pr_err("%s: rc = %d\n", __func__, rc);
+
+	return rc;
+}
+
+static const struct file_operations swrm_debug_ops = {
+	.open = swrm_debug_open,
+	.write = swrm_debug_write,
+	.read = swrm_debug_read,
+};
+
+static int swrm_set_ch_map(struct swr_mstr_ctrl *swrm, void *data)
+{
+	struct swr_mstr_port *pinfo = (struct swr_mstr_port *)data;
+
+	swrm->mstr_port = kzalloc(sizeof(struct swr_mstr_port), GFP_KERNEL);
+	if (swrm->mstr_port == NULL)
+		return -ENOMEM;
+	swrm->mstr_port->num_port = pinfo->num_port;
+	swrm->mstr_port->port = kzalloc((pinfo->num_port * sizeof(u8)),
+					GFP_KERNEL);
+	if (!swrm->mstr_port->port) {
+		kfree(swrm->mstr_port);
+		swrm->mstr_port = NULL;
+		return -ENOMEM;
+	}
+	memcpy(swrm->mstr_port->port, pinfo->port, pinfo->num_port);
+	return 0;
+}
+
+static bool swrm_is_port_en(struct swr_master *mstr)
+{
+	return !!(mstr->num_port);
+}
+
+static int swrm_clk_request(struct swr_mstr_ctrl *swrm, bool enable)
+{
+	if (!swrm->clk || !swrm->handle)
+		return -EINVAL;
+
+	if (enable) {
+		swrm->clk_ref_count++;
+		if (swrm->clk_ref_count == 1) {
+			swrm->clk(swrm->handle, true);
+			swrm->state = SWR_MSTR_UP;
+		}
+	} else if (--swrm->clk_ref_count == 0) {
+		swrm->clk(swrm->handle, false);
+		swrm->state = SWR_MSTR_DOWN;
+	} else if (swrm->clk_ref_count < 0) {
+		pr_err("%s: swrm clk count mismatch\n", __func__);
+		swrm->clk_ref_count = 0;
+	}
+	return 0;
+}
+
+static int swrm_get_port_config(struct swr_master *master)
+{
+	u32 ch_rate = 0;
+	u32 num_ch = 0;
+	int i, uc_idx;
+	u32 portcount = 0;
+
+	for (i = 0; i < SWR_MSTR_PORT_LEN; i++) {
+		if (master->port[i].port_en) {
+			ch_rate += master->port[i].ch_rate;
+			num_ch += master->port[i].num_ch;
+			portcount++;
+		}
+	}
+	for (i = 0; i < ARRAY_SIZE(uc); i++) {
+		if ((uc[i].num_port == portcount) &&
+		    (uc[i].num_ch == num_ch) &&
+		    (uc[i].chrate == ch_rate)) {
+			uc_idx = i;
+			break;
+		}
+	}
+
+	if (i >= ARRAY_SIZE(uc)) {
+		dev_err(&master->dev,
+			"%s: usecase port:%d, num_ch:%d, chrate:%d not found\n",
+			__func__, master->num_port, num_ch, ch_rate);
+		return -EINVAL;
+	}
+	for (i = 0; i < SWR_MSTR_PORT_LEN; i++) {
+		if (master->port[i].port_en) {
+			master->port[i].sinterval = pp[uc_idx][i].si;
+			master->port[i].offset1 = pp[uc_idx][i].off1;
+			master->port[i].offset2 = pp[uc_idx][i].off2;
+		}
+	}
+	return 0;
+}
+
+static int swrm_get_master_port(u8 *mstr_port_id, u8 slv_port_id)
+{
+	int i;
+
+	for (i = 0; i < SWR_MSTR_PORT_LEN; i++) {
+		if (mstr_ports[i] == slv_port_id) {
+			*mstr_port_id = i;
+			return 0;
+		}
+	}
+	return -EINVAL;
+}
+
+static u32 swrm_get_packed_reg_val(u8 *cmd_id, u8 cmd_data,
+				 u8 dev_addr, u16 reg_addr)
+{
+	u32 val;
+	u8 id = *cmd_id;
+
+	if (id != SWR_BROADCAST_CMD_ID) {
+		if (id < 14)
+			id += 1;
+		else
+			id = 0;
+		*cmd_id = id;
+	}
+	val = SWR_REG_VAL_PACK(cmd_data, dev_addr, id, reg_addr);
+
+	return val;
+}
+
+static int swrm_cmd_fifo_rd_cmd(struct swr_mstr_ctrl *swrm, int *cmd_data,
+				 u8 dev_addr, u8 cmd_id, u16 reg_addr,
+				 u32 len)
+{
+	u32 val;
+	int ret = 0;
+
+	val = swrm_get_packed_reg_val(&swrm->rcmd_id, len, dev_addr, reg_addr);
+	ret = swrm->write(swrm->handle, SWRM_CMD_FIFO_RD_CMD, val);
+	if (ret < 0) {
+		dev_err(swrm->dev, "%s: reg 0x%x write failed, err:%d\n",
+			__func__, val, ret);
+		goto err;
+	}
+	*cmd_data = swrm->read(swrm->handle, SWRM_CMD_FIFO_RD_FIFO_ADDR);
+	dev_dbg(swrm->dev,
+		"%s: reg: 0x%x, cmd_id: 0x%x, dev_id: 0x%x, cmd_data: 0x%x\n",
+		__func__, reg_addr, cmd_id, dev_addr, *cmd_data);
+err:
+	return ret;
+}
+
+static int swrm_cmd_fifo_wr_cmd(struct swr_mstr_ctrl *swrm, u8 cmd_data,
+				 u8 dev_addr, u8 cmd_id, u16 reg_addr)
+{
+	u32 val;
+	int ret = 0;
+
+	if (!cmd_id)
+		val = swrm_get_packed_reg_val(&swrm->wcmd_id, cmd_data,
+					      dev_addr, reg_addr);
+	else
+		val = swrm_get_packed_reg_val(&cmd_id, cmd_data,
+					      dev_addr, reg_addr);
+
+	dev_dbg(swrm->dev,
+		"%s: reg: 0x%x, cmd_id: 0x%x, dev_id: 0x%x, cmd_data: 0x%x\n",
+		__func__, reg_addr, cmd_id, dev_addr, cmd_data);
+	ret = swrm->write(swrm->handle, SWRM_CMD_FIFO_WR_CMD, val);
+	if (ret < 0) {
+		dev_err(swrm->dev, "%s: reg 0x%x write failed, err:%d\n",
+			__func__, val, ret);
+		goto err;
+	}
+	if (cmd_id == 0xF) {
+		/*
+		 * sleep for 10ms for MSM soundwire variant to allow broadcast
+		 * command to complete.
+		 */
+		if (swrm_is_msm_variant(swrm->version))
+			usleep_range(10000, 10100);
+		else
+			wait_for_completion_timeout(&swrm->broadcast,
+						    (2 * HZ/10));
+	}
+err:
+	return ret;
+}
+
+static int swrm_read(struct swr_master *master, u8 dev_num, u16 reg_addr,
+		     void *buf, u32 len)
+{
+	struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
+	int ret = 0;
+	int val = 0;
+	u8 *reg_val = (u8 *)buf;
+
+	if (!swrm) {
+		dev_err(&master->dev, "%s: swrm is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	if (dev_num)
+		ret = swrm_cmd_fifo_rd_cmd(swrm, &val, dev_num, 0, reg_addr,
+					   len);
+	else
+		val = swrm->read(swrm->handle, reg_addr);
+
+	*reg_val = (u8)val;
+	pm_runtime_mark_last_busy(&swrm->pdev->dev);
+
+	return ret;
+}
+
+static int swrm_write(struct swr_master *master, u8 dev_num, u16 reg_addr,
+		      const void *buf)
+{
+	struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
+	int ret = 0;
+	u8 reg_val = *(u8 *)buf;
+
+	if (!swrm) {
+		dev_err(&master->dev, "%s: swrm is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	if (dev_num)
+		ret = swrm_cmd_fifo_wr_cmd(swrm, reg_val, dev_num, 0, reg_addr);
+	else
+		ret = swrm->write(swrm->handle, reg_addr, reg_val);
+
+	pm_runtime_mark_last_busy(&swrm->pdev->dev);
+
+	return ret;
+}
+
+static int swrm_bulk_write(struct swr_master *master, u8 dev_num, void *reg,
+			   const void *buf, size_t len)
+{
+	struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
+	int ret = 0;
+	int i;
+	u32 *val;
+	u32 *swr_fifo_reg;
+
+	if (!swrm || !swrm->handle) {
+		dev_err(&master->dev, "%s: swrm is NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (len <= 0)
+		return -EINVAL;
+
+	if (dev_num) {
+		swr_fifo_reg = kcalloc(len, sizeof(u32), GFP_KERNEL);
+		if (!swr_fifo_reg) {
+			ret = -ENOMEM;
+			goto err;
+		}
+		val = kcalloc(len, sizeof(u32), GFP_KERNEL);
+		if (!val) {
+			ret = -ENOMEM;
+			goto mem_fail;
+		}
+
+		for (i = 0; i < len; i++) {
+			val[i] = swrm_get_packed_reg_val(&swrm->wcmd_id,
+							 ((u8 *)buf)[i],
+							 dev_num,
+							 ((u16 *)reg)[i]);
+			swr_fifo_reg[i] = SWRM_CMD_FIFO_WR_CMD;
+		}
+		ret = swrm->bulk_write(swrm->handle, swr_fifo_reg, val, len);
+		if (ret) {
+			dev_err(&master->dev, "%s: bulk write failed\n",
+				__func__);
+			ret = -EINVAL;
+		}
+	} else {
+		dev_err(&master->dev,
+			"%s: No support of Bulk write for master regs\n",
+			__func__);
+		ret = -EINVAL;
+		goto err;
+	}
+	kfree(val);
+mem_fail:
+	kfree(swr_fifo_reg);
+err:
+	pm_runtime_mark_last_busy(&swrm->pdev->dev);
+	return ret;
+}
+
+static u8 get_inactive_bank_num(struct swr_mstr_ctrl *swrm)
+{
+	return (swrm->read(swrm->handle, SWRM_MCP_STATUS) &
+		SWRM_MCP_STATUS_BANK_NUM_MASK) ? 0 : 1;
+}
+
+static void enable_bank_switch(struct swr_mstr_ctrl *swrm, u8 bank,
+				u8 row, u8 col)
+{
+	swrm_cmd_fifo_wr_cmd(swrm, ((row << 3) | col), 0xF, 0xF,
+			SWRS_SCP_FRAME_CTRL_BANK(bank));
+}
+
+static struct swr_port_info *swrm_get_port(struct swr_master *master,
+					   u8 port_id)
+{
+	int i;
+	struct swr_port_info *port = NULL;
+
+	for (i = 0; i < SWR_MSTR_PORT_LEN; i++) {
+		port = &master->port[i];
+		if (port->port_id == port_id) {
+			dev_dbg(&master->dev, "%s: port_id: %d, index: %d\n",
+				__func__, port_id, i);
+			return port;
+		}
+	}
+
+	return NULL;
+}
+
+static struct swr_port_info *swrm_get_avail_port(struct swr_master *master)
+{
+	int i;
+	struct swr_port_info *port = NULL;
+
+	for (i = 0; i < SWR_MSTR_PORT_LEN; i++) {
+		port = &master->port[i];
+		if (port->port_en)
+			continue;
+
+		dev_dbg(&master->dev, "%s: port_id: %d, index: %d\n",
+			__func__, port->port_id, i);
+		return port;
+	}
+
+	return NULL;
+}
+
+static struct swr_port_info *swrm_get_enabled_port(struct swr_master *master,
+						   u8 port_id)
+{
+	int i;
+	struct swr_port_info *port = NULL;
+
+	for (i = 0; i < SWR_MSTR_PORT_LEN; i++) {
+		port = &master->port[i];
+		if ((port->port_id == port_id) && (port->port_en == true))
+			break;
+	}
+	if (i == SWR_MSTR_PORT_LEN)
+		port = NULL;
+	return port;
+}
+
+static bool swrm_remove_from_group(struct swr_master *master)
+{
+	struct swr_device *swr_dev;
+	struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
+	bool is_removed = false;
+
+	if (!swrm)
+		goto end;
+
+	mutex_lock(&swrm->mlock);
+	if ((swrm->num_rx_chs > 1) &&
+	    (swrm->num_rx_chs == swrm->num_cfg_devs)) {
+		list_for_each_entry(swr_dev, &master->devices,
+				dev_list) {
+			swr_dev->group_id = SWR_GROUP_NONE;
+			master->gr_sid = 0;
+		}
+		is_removed = true;
+	}
+	mutex_unlock(&swrm->mlock);
+
+end:
+	return is_removed;
+}
+
+static void swrm_cleanup_disabled_data_ports(struct swr_master *master,
+					     u8 bank)
+{
+	u32 value;
+	struct swr_port_info *port;
+	int i;
+	int port_type;
+	struct swrm_mports *mport, *mport_next = NULL;
+	int port_disable_cnt = 0;
+	struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
+
+	if (!swrm) {
+		pr_err("%s: swrm is null\n", __func__);
+		return;
+	}
+
+	dev_dbg(swrm->dev, "%s: master num_port: %d\n", __func__,
+		master->num_port);
+
+	mport = list_first_entry_or_null(&swrm->mport_list,
+					struct swrm_mports,
+					list);
+	if (!mport) {
+		dev_err(swrm->dev, "%s: list is empty\n", __func__);
+		return;
+	}
+
+	for (i = 0; i < master->num_port; i++) {
+		port = swrm_get_port(master, mstr_ports[mport->id]);
+		if (!port || port->ch_en)
+			goto inc_loop;
+
+		port_disable_cnt++;
+		port_type = mstr_port_type[mport->id];
+		value = ((port->ch_en)
+				<< SWRM_DP_PORT_CTRL_EN_CHAN_SHFT);
+		value |= ((port->offset2)
+				<< SWRM_DP_PORT_CTRL_OFFSET2_SHFT);
+		value |= ((port->offset1)
+				<< SWRM_DP_PORT_CTRL_OFFSET1_SHFT);
+		value |= port->sinterval;
+
+		swrm->write(swrm->handle,
+			    SWRM_DP_PORT_CTRL_BANK((mport->id+1), bank),
+			    value);
+		swrm_cmd_fifo_wr_cmd(swrm, 0x00, port->dev_id, 0x00,
+				SWRS_DP_CHANNEL_ENABLE_BANK(port_type, bank));
+
+		dev_dbg(swrm->dev, "%s: mport :%d, reg: 0x%x, val: 0x%x\n",
+			__func__, mport->id,
+			(SWRM_DP_PORT_CTRL_BANK((mport->id+1), bank)), value);
+
+inc_loop:
+		mport_next = list_next_entry(mport, list);
+		if (port && !port->ch_en) {
+			list_del(&mport->list);
+			kfree(mport);
+		}
+		if (!mport_next || (&mport_next->list == &swrm->mport_list)) {
+			dev_err(swrm->dev, "%s: end of list\n", __func__);
+			break;
+		}
+		mport = mport_next;
+	}
+	master->num_port -= port_disable_cnt;
+
+	dev_dbg(swrm->dev, "%s:disable ports: %d, active ports (rem): %d\n",
+		__func__, port_disable_cnt,  master->num_port);
+}
+
+static void swrm_slvdev_datapath_control(struct swr_master *master,
+					 bool enable)
+{
+	u8 bank;
+	u32 value, n_col;
+	struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
+	int mask = (SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_BMSK |
+		    SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_BMSK |
+		    SWRM_MCP_FRAME_CTRL_BANK_SSP_PERIOD_BMSK);
+	u8 inactive_bank;
+
+	if (!swrm) {
+		pr_err("%s: swrm is null\n", __func__);
+		return;
+	}
+
+	bank = get_inactive_bank_num(swrm);
+
+	dev_dbg(swrm->dev, "%s: enable: %d, cfg_devs: %d\n",
+		__func__, enable, swrm->num_cfg_devs);
+
+	if (enable) {
+		/* set Row = 48 and col = 16 */
+		n_col = SWR_MAX_COL;
+	} else {
+		/*
+		 * Do not change to 48x2 if number of channels configured
+		 * as stereo and if disable datapath is called for the
+		 * first slave device
+		 */
+		if (swrm->num_cfg_devs > 0)
+			n_col = SWR_MAX_COL;
+		else
+			n_col = SWR_MIN_COL;
+
+		/*
+		 * All ports are already disabled, no need to perform
+		 * bank-switch and copy operation. This case can arise
+		 * when speaker channels are enabled in stereo mode with
+		 * BROADCAST and disabled in GROUP_NONE
+		 */
+		if (master->num_port == 0)
+			return;
+	}
+
+	value = swrm->read(swrm->handle, SWRM_MCP_FRAME_CTRL_BANK_ADDR(bank));
+	value &= (~mask);
+	value |= ((0 << SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_SHFT) |
+		  (n_col << SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_SHFT) |
+		  (0 << SWRM_MCP_FRAME_CTRL_BANK_SSP_PERIOD_SHFT));
+	swrm->write(swrm->handle, SWRM_MCP_FRAME_CTRL_BANK_ADDR(bank), value);
+
+	dev_dbg(swrm->dev, "%s: regaddr: 0x%x, value: 0x%x\n", __func__,
+		SWRM_MCP_FRAME_CTRL_BANK_ADDR(bank), value);
+
+	enable_bank_switch(swrm, bank, SWR_MAX_ROW, n_col);
+
+	inactive_bank = bank ? 0 : 1;
+	if (enable)
+		swrm_copy_data_port_config(master, inactive_bank);
+	else
+		swrm_cleanup_disabled_data_ports(master, inactive_bank);
+
+	if (!swrm_is_port_en(master)) {
+		dev_dbg(&master->dev, "%s: pm_runtime auto suspend triggered\n",
+			__func__);
+		pm_runtime_mark_last_busy(&swrm->pdev->dev);
+		pm_runtime_put_autosuspend(&swrm->pdev->dev);
+	}
+}
+
+static void swrm_apply_port_config(struct swr_master *master)
+{
+	u8 bank;
+	struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
+
+	if (!swrm) {
+		pr_err("%s: Invalid handle to swr controller\n",
+			__func__);
+		return;
+	}
+
+	bank = get_inactive_bank_num(swrm);
+	dev_dbg(swrm->dev, "%s: enter bank: %d master_ports: %d\n",
+		__func__, bank, master->num_port);
+
+
+	swrm_cmd_fifo_wr_cmd(swrm, 0x01, 0xF, 0x00,
+			SWRS_SCP_HOST_CLK_DIV2_CTL_BANK(bank));
+
+	swrm_copy_data_port_config(master, bank);
+}
+
+static void swrm_copy_data_port_config(struct swr_master *master, u8 bank)
+{
+	u32 value;
+	struct swr_port_info *port;
+	int i;
+	int port_type;
+	struct swrm_mports *mport;
+	u32 reg[SWRM_MAX_PORT_REG];
+	u32 val[SWRM_MAX_PORT_REG];
+	int len = 0;
+	struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
+
+	if (!swrm) {
+		pr_err("%s: swrm is null\n", __func__);
+		return;
+	}
+
+	dev_dbg(swrm->dev, "%s: master num_port: %d\n", __func__,
+		master->num_port);
+
+	mport = list_first_entry_or_null(&swrm->mport_list,
+					struct swrm_mports,
+					list);
+	if (!mport) {
+		dev_err(swrm->dev, "%s: list is empty\n", __func__);
+		return;
+	}
+	for (i = 0; i < master->num_port; i++) {
+
+		port = swrm_get_enabled_port(master, mstr_ports[mport->id]);
+		if (!port)
+			continue;
+		port_type = mstr_port_type[mport->id];
+		if (!port->dev_id || (port->dev_id > master->num_dev)) {
+			dev_dbg(swrm->dev, "%s: invalid device id = %d\n",
+				__func__, port->dev_id);
+			continue;
+		}
+		value = ((port->ch_en)
+				<< SWRM_DP_PORT_CTRL_EN_CHAN_SHFT);
+		value |= ((port->offset2)
+				<< SWRM_DP_PORT_CTRL_OFFSET2_SHFT);
+		value |= ((port->offset1)
+				<< SWRM_DP_PORT_CTRL_OFFSET1_SHFT);
+		value |= port->sinterval;
+
+		reg[len] = SWRM_DP_PORT_CTRL_BANK((mport->id+1), bank);
+		val[len++] = value;
+
+		dev_dbg(swrm->dev, "%s: mport :%d, reg: 0x%x, val: 0x%x\n",
+			__func__, mport->id,
+			(SWRM_DP_PORT_CTRL_BANK((mport->id+1), bank)), value);
+
+		reg[len] = SWRM_CMD_FIFO_WR_CMD;
+		val[len++] = SWR_REG_VAL_PACK(port->ch_en, port->dev_id, 0x00,
+				SWRS_DP_CHANNEL_ENABLE_BANK(port_type, bank));
+
+		reg[len] = SWRM_CMD_FIFO_WR_CMD;
+		val[len++] = SWR_REG_VAL_PACK(port->sinterval,
+				port->dev_id, 0x00,
+				SWRS_DP_SAMPLE_CONTROL_1_BANK(port_type, bank));
+
+		reg[len] = SWRM_CMD_FIFO_WR_CMD;
+		val[len++] = SWR_REG_VAL_PACK(port->offset1,
+				port->dev_id, 0x00,
+				SWRS_DP_OFFSET_CONTROL_1_BANK(port_type, bank));
+
+		if (port_type != 0) {
+			reg[len] = SWRM_CMD_FIFO_WR_CMD;
+			val[len++] = SWR_REG_VAL_PACK(port->offset2,
+					port->dev_id, 0x00,
+					SWRS_DP_OFFSET_CONTROL_2_BANK(port_type,
+									bank));
+		}
+		mport = list_next_entry(mport, list);
+		if (!mport) {
+			dev_err(swrm->dev, "%s: end of list\n", __func__);
+			break;
+		}
+	}
+	swrm->bulk_write(swrm->handle, reg, val, len);
+}
+
+static int swrm_connect_port(struct swr_master *master,
+			struct swr_params *portinfo)
+{
+	int i;
+	struct swr_port_info *port;
+	int ret = 0;
+	struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
+	struct swrm_mports *mport;
+	struct list_head *ptr, *next;
+
+	dev_dbg(&master->dev, "%s: enter\n", __func__);
+	if (!portinfo)
+		return -EINVAL;
+
+	if (!swrm) {
+		dev_err(&master->dev,
+			"%s: Invalid handle to swr controller\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&swrm->mlock);
+	if (!swrm_is_port_en(master))
+		pm_runtime_get_sync(&swrm->pdev->dev);
+
+	for (i = 0; i < portinfo->num_port; i++) {
+		mport = kzalloc(sizeof(struct swrm_mports), GFP_KERNEL);
+		if (!mport) {
+			ret = -ENOMEM;
+			goto mem_fail;
+		}
+		ret = swrm_get_master_port(&mport->id,
+						portinfo->port_id[i]);
+		if (ret < 0) {
+			dev_err(&master->dev,
+				"%s: mstr portid for slv port %d not found\n",
+				__func__, portinfo->port_id[i]);
+			goto port_fail;
+		}
+		port = swrm_get_avail_port(master);
+		if (!port) {
+			dev_err(&master->dev,
+				"%s: avail ports not found!\n", __func__);
+			goto port_fail;
+		}
+		list_add(&mport->list, &swrm->mport_list);
+		port->dev_id = portinfo->dev_id;
+		port->port_id = portinfo->port_id[i];
+		port->num_ch = portinfo->num_ch[i];
+		port->ch_rate = portinfo->ch_rate[i];
+		port->ch_en = portinfo->ch_en[i];
+		port->port_en = true;
+		dev_dbg(&master->dev,
+			"%s: mstr port %d, slv port %d ch_rate %d num_ch %d\n",
+			__func__, mport->id, port->port_id, port->ch_rate,
+			port->num_ch);
+	}
+	master->num_port += portinfo->num_port;
+	if (master->num_port >= SWR_MSTR_PORT_LEN)
+		master->num_port = SWR_MSTR_PORT_LEN;
+
+	swrm_get_port_config(master);
+	swr_port_response(master, portinfo->tid);
+	swrm->num_cfg_devs += 1;
+	dev_dbg(&master->dev, "%s: cfg_devs: %d, rx_chs: %d\n",
+		__func__, swrm->num_cfg_devs, swrm->num_rx_chs);
+	if (swrm->num_rx_chs > 1) {
+		if (swrm->num_rx_chs == swrm->num_cfg_devs)
+			swrm_apply_port_config(master);
+	} else {
+		swrm_apply_port_config(master);
+	}
+	mutex_unlock(&swrm->mlock);
+	return 0;
+
+port_fail:
+	kfree(mport);
+mem_fail:
+	list_for_each_safe(ptr, next, &swrm->mport_list) {
+		mport = list_entry(ptr, struct swrm_mports, list);
+		for (i = 0; i < portinfo->num_port; i++) {
+			if (portinfo->port_id[i] == mstr_ports[mport->id]) {
+				port = swrm_get_port(master,
+						portinfo->port_id[i]);
+				if (port)
+					port->ch_en = false;
+				list_del(&mport->list);
+				kfree(mport);
+				break;
+			}
+		}
+	}
+	mutex_unlock(&swrm->mlock);
+	return ret;
+}
+
+static int swrm_disconnect_port(struct swr_master *master,
+			struct swr_params *portinfo)
+{
+	int i;
+	struct swr_port_info *port;
+	u8 bank;
+	u32 value;
+	int ret = 0;
+	u8 mport_id = 0;
+	int port_type = 0;
+	struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
+
+	if (!swrm) {
+		dev_err(&master->dev,
+			"%s: Invalid handle to swr controller\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (!portinfo) {
+		dev_err(&master->dev, "%s: portinfo is NULL\n", __func__);
+		return -EINVAL;
+	}
+	mutex_lock(&swrm->mlock);
+	bank = get_inactive_bank_num(swrm);
+	for (i = 0; i < portinfo->num_port; i++) {
+		ret = swrm_get_master_port(&mport_id,
+						portinfo->port_id[i]);
+		if (ret < 0) {
+			dev_err(&master->dev,
+				"%s: mstr portid for slv port %d not found\n",
+				__func__, portinfo->port_id[i]);
+			mutex_unlock(&swrm->mlock);
+			return -EINVAL;
+		}
+		port = swrm_get_enabled_port(master, portinfo->port_id[i]);
+		if (!port) {
+			dev_dbg(&master->dev, "%s: port %d already disabled\n",
+				__func__, portinfo->port_id[i]);
+			continue;
+		}
+		port_type = mstr_port_type[mport_id];
+		port->dev_id = portinfo->dev_id;
+		port->port_en = false;
+		port->ch_en = 0;
+		value = port->ch_en << SWRM_DP_PORT_CTRL_EN_CHAN_SHFT;
+		value |= (port->offset2 << SWRM_DP_PORT_CTRL_OFFSET2_SHFT);
+		value |= (port->offset1 << SWRM_DP_PORT_CTRL_OFFSET1_SHFT);
+		value |= port->sinterval;
+
+
+		swrm->write(swrm->handle,
+			    SWRM_DP_PORT_CTRL_BANK((mport_id+1), bank),
+			    value);
+		swrm_cmd_fifo_wr_cmd(swrm, 0x00, port->dev_id, 0x00,
+				SWRS_DP_CHANNEL_ENABLE_BANK(port_type, bank));
+	}
+
+	swr_port_response(master, portinfo->tid);
+	swrm->num_cfg_devs -= 1;
+	dev_dbg(&master->dev, "%s: cfg_devs: %d, rx_chs: %d, active ports: %d\n",
+		__func__, swrm->num_cfg_devs, swrm->num_rx_chs,
+		master->num_port);
+	mutex_unlock(&swrm->mlock);
+
+	return 0;
+}
+
+static int swrm_check_slave_change_status(struct swr_mstr_ctrl *swrm,
+					int status, u8 *devnum)
+{
+	int i;
+	int new_sts = status;
+	int ret = SWR_NOT_PRESENT;
+
+	if (status != swrm->slave_status) {
+		for (i = 0; i < (swrm->master.num_dev + 1); i++) {
+			if ((status & SWRM_MCP_SLV_STATUS_MASK) !=
+			    (swrm->slave_status & SWRM_MCP_SLV_STATUS_MASK)) {
+				ret = (status & SWRM_MCP_SLV_STATUS_MASK);
+				*devnum = i;
+				break;
+			}
+			status >>= 2;
+			swrm->slave_status >>= 2;
+		}
+		swrm->slave_status = new_sts;
+	}
+	return ret;
+}
+
+static irqreturn_t swr_mstr_interrupt(int irq, void *dev)
+{
+	struct swr_mstr_ctrl *swrm = dev;
+	u32 value, intr_sts;
+	int status, chg_sts, i;
+	u8 devnum = 0;
+	int ret = IRQ_HANDLED;
+
+	mutex_lock(&swrm->reslock);
+	swrm_clk_request(swrm, true);
+	mutex_unlock(&swrm->reslock);
+
+	intr_sts = swrm->read(swrm->handle, SWRM_INTERRUPT_STATUS);
+	intr_sts &= SWRM_INTERRUPT_STATUS_RMSK;
+	for (i = 0; i < SWRM_INTERRUPT_MAX; i++) {
+		value = intr_sts & (1 << i);
+		if (!value)
+			continue;
+
+		swrm->write(swrm->handle, SWRM_INTERRUPT_CLEAR, value);
+		switch (value) {
+		case SWRM_INTERRUPT_STATUS_SLAVE_PEND_IRQ:
+			dev_dbg(swrm->dev, "SWR slave pend irq\n");
+			break;
+		case SWRM_INTERRUPT_STATUS_NEW_SLAVE_ATTACHED:
+			dev_dbg(swrm->dev, "SWR new slave attached\n");
+			break;
+		case SWRM_INTERRUPT_STATUS_CHANGE_ENUM_SLAVE_STATUS:
+			status = swrm->read(swrm->handle, SWRM_MCP_SLV_STATUS);
+			if (status == swrm->slave_status) {
+				dev_dbg(swrm->dev,
+					"%s: No change in slave status: %d\n",
+					__func__, status);
+				break;
+			}
+			chg_sts = swrm_check_slave_change_status(swrm, status,
+								&devnum);
+			switch (chg_sts) {
+			case SWR_NOT_PRESENT:
+				dev_dbg(swrm->dev, "device %d got detached\n",
+					devnum);
+				break;
+			case SWR_ATTACHED_OK:
+				dev_dbg(swrm->dev, "device %d got attached\n",
+					devnum);
+				break;
+			case SWR_ALERT:
+				dev_dbg(swrm->dev,
+					"device %d has pending interrupt\n",
+					devnum);
+				break;
+			}
+			break;
+		case SWRM_INTERRUPT_STATUS_MASTER_CLASH_DET:
+			dev_err_ratelimited(swrm->dev, "SWR bus clash detected\n");
+			break;
+		case SWRM_INTERRUPT_STATUS_RD_FIFO_OVERFLOW:
+			dev_dbg(swrm->dev, "SWR read FIFO overflow\n");
+			break;
+		case SWRM_INTERRUPT_STATUS_RD_FIFO_UNDERFLOW:
+			dev_dbg(swrm->dev, "SWR read FIFO underflow\n");
+			break;
+		case SWRM_INTERRUPT_STATUS_WR_CMD_FIFO_OVERFLOW:
+			dev_dbg(swrm->dev, "SWR write FIFO overflow\n");
+			break;
+		case SWRM_INTERRUPT_STATUS_CMD_ERROR:
+			value = swrm->read(swrm->handle, SWRM_CMD_FIFO_STATUS);
+			dev_err_ratelimited(swrm->dev,
+			"SWR CMD error, fifo status 0x%x, flushing fifo\n",
+					    value);
+			swrm->write(swrm->handle, SWRM_CMD_FIFO_CMD, 0x1);
+			break;
+		case SWRM_INTERRUPT_STATUS_DOUT_PORT_COLLISION:
+			dev_dbg(swrm->dev, "SWR Port collision detected\n");
+			break;
+		case SWRM_INTERRUPT_STATUS_READ_EN_RD_VALID_MISMATCH:
+			dev_dbg(swrm->dev, "SWR read enable valid mismatch\n");
+			break;
+		case SWRM_INTERRUPT_STATUS_SPECIAL_CMD_ID_FINISHED:
+			complete(&swrm->broadcast);
+			dev_dbg(swrm->dev, "SWR cmd id finished\n");
+			break;
+		case SWRM_INTERRUPT_STATUS_NEW_SLAVE_AUTO_ENUM_FINISHED:
+			break;
+		case SWRM_INTERRUPT_STATUS_AUTO_ENUM_FAILED:
+			break;
+		case SWRM_INTERRUPT_STATUS_AUTO_ENUM_TABLE_IS_FULL:
+			break;
+		case SWRM_INTERRUPT_STATUS_BUS_RESET_FINISHED:
+			complete(&swrm->reset);
+			break;
+		case SWRM_INTERRUPT_STATUS_CLK_STOP_FINISHED:
+			break;
+		default:
+			dev_err_ratelimited(swrm->dev, "SWR unknown interrupt\n");
+			ret = IRQ_NONE;
+			break;
+		}
+	}
+
+	mutex_lock(&swrm->reslock);
+	swrm_clk_request(swrm, false);
+	mutex_unlock(&swrm->reslock);
+	return ret;
+}
+
+static int swrm_get_device_status(struct swr_mstr_ctrl *swrm, u8 devnum)
+{
+	u32 val;
+
+	swrm->slave_status = swrm->read(swrm->handle, SWRM_MCP_SLV_STATUS);
+	val = (swrm->slave_status >> (devnum * 2));
+	val &= SWRM_MCP_SLV_STATUS_MASK;
+	return val;
+}
+
+static int swrm_get_logical_dev_num(struct swr_master *mstr, u64 dev_id,
+				u8 *dev_num)
+{
+	int i;
+	u64 id = 0;
+	int ret = -EINVAL;
+	struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(mstr);
+
+	if (!swrm) {
+		pr_err("%s: Invalid handle to swr controller\n",
+			__func__);
+		return ret;
+	}
+
+	pm_runtime_get_sync(&swrm->pdev->dev);
+	for (i = 1; i < (mstr->num_dev + 1); i++) {
+		id = ((u64)(swrm->read(swrm->handle,
+			    SWRM_ENUMERATOR_SLAVE_DEV_ID_2(i))) << 32);
+		id |= swrm->read(swrm->handle,
+			    SWRM_ENUMERATOR_SLAVE_DEV_ID_1(i));
+		if ((id & SWR_DEV_ID_MASK) == dev_id) {
+			if (swrm_get_device_status(swrm, i) == 0x01) {
+				*dev_num = i;
+				ret = 0;
+			} else {
+				dev_err(swrm->dev, "%s: device is not ready\n",
+					 __func__);
+			}
+			goto found;
+		}
+	}
+	dev_err(swrm->dev, "%s: device id 0x%llx does not match with 0x%llx\n",
+		__func__, id, dev_id);
+found:
+	pm_runtime_mark_last_busy(&swrm->pdev->dev);
+	pm_runtime_put_autosuspend(&swrm->pdev->dev);
+	return ret;
+}
+static int swrm_master_init(struct swr_mstr_ctrl *swrm)
+{
+	int ret = 0;
+	u32 val;
+	u8 row_ctrl = SWR_MAX_ROW;
+	u8 col_ctrl = SWR_MIN_COL;
+	u8 ssp_period = 1;
+	u8 retry_cmd_num = 3;
+	u32 reg[SWRM_MAX_INIT_REG];
+	u32 value[SWRM_MAX_INIT_REG];
+	int len = 0;
+
+	/* Clear Rows and Cols */
+	val = ((row_ctrl << SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_SHFT) |
+		(col_ctrl << SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_SHFT) |
+		(ssp_period << SWRM_MCP_FRAME_CTRL_BANK_SSP_PERIOD_SHFT));
+
+	reg[len] = SWRM_MCP_FRAME_CTRL_BANK_ADDR(0);
+	value[len++] = val;
+
+	/* Set Auto enumeration flag */
+	reg[len] = SWRM_ENUMERATOR_CFG_ADDR;
+	value[len++] = 1;
+
+	/* Mask soundwire interrupts */
+	reg[len] = SWRM_INTERRUPT_MASK_ADDR;
+	value[len++] = 0x1FFFD;
+
+	/* Configure No pings */
+	val = swrm->read(swrm->handle, SWRM_MCP_CFG_ADDR);
+	val &= ~SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_BMSK;
+	val |= (0x1f << SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_SHFT);
+	reg[len] = SWRM_MCP_CFG_ADDR;
+	value[len++] = val;
+
+	/* Configure number of retries of a read/write cmd */
+	val = (retry_cmd_num << SWRM_CMD_FIFO_CFG_NUM_OF_CMD_RETRY_SHFT);
+	reg[len] = SWRM_CMD_FIFO_CFG_ADDR;
+	value[len++] = val;
+
+	/* Set IRQ to PULSE */
+	reg[len] = SWRM_COMP_CFG_ADDR;
+	value[len++] = 0x02;
+
+	reg[len] = SWRM_COMP_CFG_ADDR;
+	value[len++] = 0x03;
+
+	reg[len] = SWRM_INTERRUPT_CLEAR;
+	value[len++] = 0x08;
+
+	swrm->bulk_write(swrm->handle, reg, value, len);
+
+	return ret;
+}
+
+static int swrm_probe(struct platform_device *pdev)
+{
+	struct swr_mstr_ctrl *swrm;
+	struct swr_ctrl_platform_data *pdata;
+	int ret;
+
+	/* Allocate soundwire master driver structure */
+	swrm = kzalloc(sizeof(struct swr_mstr_ctrl), GFP_KERNEL);
+	if (!swrm) {
+		dev_err(&pdev->dev, "%s: no memory for swr mstr controller\n",
+			 __func__);
+		ret = -ENOMEM;
+		goto err_memory_fail;
+	}
+	swrm->dev = &pdev->dev;
+	swrm->pdev = pdev;
+	platform_set_drvdata(pdev, swrm);
+	swr_set_ctrl_data(&swrm->master, swrm);
+	pdata = dev_get_platdata(&pdev->dev);
+	if (!pdata) {
+		dev_err(&pdev->dev, "%s: pdata from parent is NULL\n",
+			__func__);
+		ret = -EINVAL;
+		goto err_pdata_fail;
+	}
+	swrm->handle = (void *)pdata->handle;
+	if (!swrm->handle) {
+		dev_err(&pdev->dev, "%s: swrm->handle is NULL\n",
+			__func__);
+		ret = -EINVAL;
+		goto err_pdata_fail;
+	}
+	swrm->read = pdata->read;
+	if (!swrm->read) {
+		dev_err(&pdev->dev, "%s: swrm->read is NULL\n",
+			__func__);
+		ret = -EINVAL;
+		goto err_pdata_fail;
+	}
+	swrm->write = pdata->write;
+	if (!swrm->write) {
+		dev_err(&pdev->dev, "%s: swrm->write is NULL\n",
+			__func__);
+		ret = -EINVAL;
+		goto err_pdata_fail;
+	}
+	swrm->bulk_write = pdata->bulk_write;
+	if (!swrm->bulk_write) {
+		dev_err(&pdev->dev, "%s: swrm->bulk_write is NULL\n",
+			__func__);
+		ret = -EINVAL;
+		goto err_pdata_fail;
+	}
+	swrm->clk = pdata->clk;
+	if (!swrm->clk) {
+		dev_err(&pdev->dev, "%s: swrm->clk is NULL\n",
+			__func__);
+		ret = -EINVAL;
+		goto err_pdata_fail;
+	}
+	swrm->reg_irq = pdata->reg_irq;
+	if (!swrm->reg_irq) {
+		dev_err(&pdev->dev, "%s: swrm->reg_irq is NULL\n",
+			__func__);
+		ret = -EINVAL;
+		goto err_pdata_fail;
+	}
+	swrm->master.read = swrm_read;
+	swrm->master.write = swrm_write;
+	swrm->master.bulk_write = swrm_bulk_write;
+	swrm->master.get_logical_dev_num = swrm_get_logical_dev_num;
+	swrm->master.connect_port = swrm_connect_port;
+	swrm->master.disconnect_port = swrm_disconnect_port;
+	swrm->master.slvdev_datapath_control = swrm_slvdev_datapath_control;
+	swrm->master.remove_from_group = swrm_remove_from_group;
+	swrm->master.dev.parent = &pdev->dev;
+	swrm->master.dev.of_node = pdev->dev.of_node;
+	swrm->master.num_port = 0;
+	swrm->num_enum_slaves = 0;
+	swrm->rcmd_id = 0;
+	swrm->wcmd_id = 0;
+	swrm->slave_status = 0;
+	swrm->num_rx_chs = 0;
+	swrm->clk_ref_count = 0;
+	swrm->state = SWR_MSTR_RESUME;
+	init_completion(&swrm->reset);
+	init_completion(&swrm->broadcast);
+	mutex_init(&swrm->mlock);
+	INIT_LIST_HEAD(&swrm->mport_list);
+	mutex_init(&swrm->reslock);
+
+	ret = swrm->reg_irq(swrm->handle, swr_mstr_interrupt, swrm,
+			    SWR_IRQ_REGISTER);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: IRQ register failed ret %d\n",
+			__func__, ret);
+		goto err_irq_fail;
+	}
+
+	ret = swr_register_master(&swrm->master);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: error adding swr master\n", __func__);
+		goto err_mstr_fail;
+	}
+
+	/* Add devices registered with board-info as the
+	   controller will be up now
+	 */
+	swr_master_add_boarddevices(&swrm->master);
+	mutex_lock(&swrm->mlock);
+	swrm_clk_request(swrm, true);
+	ret = swrm_master_init(swrm);
+	if (ret < 0) {
+		dev_err(&pdev->dev,
+			"%s: Error in master Initializaiton, err %d\n",
+			__func__, ret);
+		mutex_unlock(&swrm->mlock);
+		goto err_mstr_fail;
+	}
+	swrm->version = swrm->read(swrm->handle, SWRM_COMP_HW_VERSION);
+
+	mutex_unlock(&swrm->mlock);
+
+	if (pdev->dev.of_node)
+		of_register_swr_devices(&swrm->master);
+
+	dbgswrm = swrm;
+	debugfs_swrm_dent = debugfs_create_dir(dev_name(&pdev->dev), 0);
+	if (!IS_ERR(debugfs_swrm_dent)) {
+		debugfs_peek = debugfs_create_file("swrm_peek",
+				S_IFREG | S_IRUGO, debugfs_swrm_dent,
+				(void *) "swrm_peek", &swrm_debug_ops);
+
+		debugfs_poke = debugfs_create_file("swrm_poke",
+				S_IFREG | S_IRUGO, debugfs_swrm_dent,
+				(void *) "swrm_poke", &swrm_debug_ops);
+
+		debugfs_reg_dump = debugfs_create_file("swrm_reg_dump",
+				   S_IFREG | S_IRUGO, debugfs_swrm_dent,
+				   (void *) "swrm_reg_dump",
+				   &swrm_debug_ops);
+	}
+	pm_runtime_set_autosuspend_delay(&pdev->dev, auto_suspend_timer);
+	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_mark_last_busy(&pdev->dev);
+
+	return 0;
+err_mstr_fail:
+	swrm->reg_irq(swrm->handle, swr_mstr_interrupt,
+			swrm, SWR_IRQ_FREE);
+err_irq_fail:
+err_pdata_fail:
+	kfree(swrm);
+err_memory_fail:
+	return ret;
+}
+
+static int swrm_remove(struct platform_device *pdev)
+{
+	struct swr_mstr_ctrl *swrm = platform_get_drvdata(pdev);
+
+	swrm->reg_irq(swrm->handle, swr_mstr_interrupt,
+			swrm, SWR_IRQ_FREE);
+	if (swrm->mstr_port) {
+		kfree(swrm->mstr_port->port);
+		swrm->mstr_port->port = NULL;
+		kfree(swrm->mstr_port);
+		swrm->mstr_port = NULL;
+	}
+	pm_runtime_disable(&pdev->dev);
+	pm_runtime_set_suspended(&pdev->dev);
+	swr_unregister_master(&swrm->master);
+	mutex_destroy(&swrm->mlock);
+	mutex_destroy(&swrm->reslock);
+	kfree(swrm);
+	return 0;
+}
+
+static int swrm_clk_pause(struct swr_mstr_ctrl *swrm)
+{
+	u32 val;
+
+	dev_dbg(swrm->dev, "%s: state: %d\n", __func__, swrm->state);
+	swrm->write(swrm->handle, SWRM_INTERRUPT_MASK_ADDR, 0x1FDFD);
+	val = swrm->read(swrm->handle, SWRM_MCP_CFG_ADDR);
+	val |= SWRM_MCP_CFG_BUS_CLK_PAUSE_BMSK;
+	swrm->write(swrm->handle, SWRM_MCP_CFG_ADDR, val);
+	swrm->state = SWR_MSTR_PAUSE;
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int swrm_runtime_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct swr_mstr_ctrl *swrm = platform_get_drvdata(pdev);
+	int ret = 0;
+	struct swr_master *mstr = &swrm->master;
+	struct swr_device *swr_dev;
+
+	dev_dbg(dev, "%s: pm_runtime: resume, state:%d\n",
+		__func__, swrm->state);
+	mutex_lock(&swrm->reslock);
+	if ((swrm->state == SWR_MSTR_PAUSE) ||
+	    (swrm->state == SWR_MSTR_DOWN)) {
+		if (swrm->state == SWR_MSTR_DOWN) {
+			if (swrm_clk_request(swrm, true))
+				goto exit;
+		}
+		list_for_each_entry(swr_dev, &mstr->devices, dev_list) {
+			ret = swr_device_up(swr_dev);
+			if (ret) {
+				dev_err(dev,
+					"%s: failed to wakeup swr dev %d\n",
+					__func__, swr_dev->dev_num);
+				swrm_clk_request(swrm, false);
+				goto exit;
+			}
+		}
+		swrm->write(swrm->handle, SWRM_COMP_SW_RESET, 0x01);
+		swrm->write(swrm->handle, SWRM_COMP_SW_RESET, 0x01);
+		swrm_master_init(swrm);
+	}
+exit:
+	pm_runtime_set_autosuspend_delay(&pdev->dev, auto_suspend_timer);
+	mutex_unlock(&swrm->reslock);
+	return ret;
+}
+
+static int swrm_runtime_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct swr_mstr_ctrl *swrm = platform_get_drvdata(pdev);
+	int ret = 0;
+	struct swr_master *mstr = &swrm->master;
+	struct swr_device *swr_dev;
+
+	dev_dbg(dev, "%s: pm_runtime: suspend state: %d\n",
+		__func__, swrm->state);
+	mutex_lock(&swrm->reslock);
+	if ((swrm->state == SWR_MSTR_RESUME) ||
+	    (swrm->state == SWR_MSTR_UP)) {
+		if (swrm_is_port_en(&swrm->master)) {
+			dev_dbg(dev, "%s ports are enabled\n", __func__);
+			ret = -EBUSY;
+			goto exit;
+		}
+		swrm_clk_pause(swrm);
+		swrm->write(swrm->handle, SWRM_COMP_CFG_ADDR, 0x00);
+		list_for_each_entry(swr_dev, &mstr->devices, dev_list) {
+			ret = swr_device_down(swr_dev);
+			if (ret) {
+				dev_err(dev,
+					"%s: failed to shutdown swr dev %d\n",
+					__func__, swr_dev->dev_num);
+				goto exit;
+			}
+		}
+		swrm_clk_request(swrm, false);
+	}
+exit:
+	mutex_unlock(&swrm->reslock);
+	return ret;
+}
+#endif /* CONFIG_PM */
+
+static int swrm_device_down(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct swr_mstr_ctrl *swrm = platform_get_drvdata(pdev);
+	int ret = 0;
+	struct swr_master *mstr = &swrm->master;
+	struct swr_device *swr_dev;
+
+	dev_dbg(dev, "%s: swrm state: %d\n", __func__, swrm->state);
+	mutex_lock(&swrm->reslock);
+	if ((swrm->state == SWR_MSTR_RESUME) ||
+	    (swrm->state == SWR_MSTR_UP)) {
+		list_for_each_entry(swr_dev, &mstr->devices, dev_list) {
+			ret = swr_device_down(swr_dev);
+			if (ret)
+				dev_err(dev,
+					"%s: failed to shutdown swr dev %d\n",
+					__func__, swr_dev->dev_num);
+		}
+		dev_dbg(dev, "%s: Shutting down SWRM\n", __func__);
+		pm_runtime_disable(dev);
+		pm_runtime_set_suspended(dev);
+		pm_runtime_enable(dev);
+		swrm_clk_request(swrm, false);
+	}
+	mutex_unlock(&swrm->reslock);
+	return ret;
+}
+
+/**
+ * swrm_wcd_notify - parent device can notify to soundwire master through
+ * this function
+ * @pdev: pointer to platform device structure
+ * @id: command id from parent to the soundwire master
+ * @data: data from parent device to soundwire master
+ */
+int swrm_wcd_notify(struct platform_device *pdev, u32 id, void *data)
+{
+	struct swr_mstr_ctrl *swrm;
+	int ret = 0;
+	struct swr_master *mstr;
+	struct swr_device *swr_dev;
+
+	if (!pdev) {
+		pr_err("%s: pdev is NULL\n", __func__);
+		return -EINVAL;
+	}
+	swrm = platform_get_drvdata(pdev);
+	if (!swrm) {
+		dev_err(&pdev->dev, "%s: swrm is NULL\n", __func__);
+		return -EINVAL;
+	}
+	mstr = &swrm->master;
+
+	switch (id) {
+	case SWR_CH_MAP:
+		if (!data) {
+			dev_err(swrm->dev, "%s: data is NULL\n", __func__);
+			ret = -EINVAL;
+		} else {
+			ret = swrm_set_ch_map(swrm, data);
+		}
+		break;
+	case SWR_DEVICE_DOWN:
+		dev_dbg(swrm->dev, "%s: swr master down called\n", __func__);
+		mutex_lock(&swrm->mlock);
+		if ((swrm->state == SWR_MSTR_PAUSE) ||
+		    (swrm->state == SWR_MSTR_DOWN))
+			dev_dbg(swrm->dev, "%s: SWR master is already Down: %d\n",
+				__func__, swrm->state);
+		else
+			swrm_device_down(&pdev->dev);
+		mutex_unlock(&swrm->mlock);
+		break;
+	case SWR_DEVICE_UP:
+		dev_dbg(swrm->dev, "%s: swr master up called\n", __func__);
+		mutex_lock(&swrm->mlock);
+		mutex_lock(&swrm->reslock);
+		if ((swrm->state == SWR_MSTR_RESUME) ||
+		    (swrm->state == SWR_MSTR_UP)) {
+			dev_dbg(swrm->dev, "%s: SWR master is already UP: %d\n",
+				__func__, swrm->state);
+		} else {
+			pm_runtime_mark_last_busy(&pdev->dev);
+			mutex_unlock(&swrm->reslock);
+			pm_runtime_get_sync(&pdev->dev);
+			mutex_lock(&swrm->reslock);
+			list_for_each_entry(swr_dev, &mstr->devices, dev_list) {
+				ret = swr_reset_device(swr_dev);
+				if (ret) {
+					dev_err(swrm->dev,
+						"%s: failed to reset swr device %d\n",
+						__func__, swr_dev->dev_num);
+					swrm_clk_request(swrm, false);
+				}
+			}
+			pm_runtime_mark_last_busy(&pdev->dev);
+			pm_runtime_put_autosuspend(&pdev->dev);
+		}
+		mutex_unlock(&swrm->reslock);
+		mutex_unlock(&swrm->mlock);
+		break;
+	case SWR_SET_NUM_RX_CH:
+		if (!data) {
+			dev_err(swrm->dev, "%s: data is NULL\n", __func__);
+			ret = -EINVAL;
+		} else {
+			mutex_lock(&swrm->mlock);
+			swrm->num_rx_chs = *(int *)data;
+			if ((swrm->num_rx_chs > 1) && !swrm->num_cfg_devs) {
+				list_for_each_entry(swr_dev, &mstr->devices,
+						    dev_list) {
+					ret = swr_set_device_group(swr_dev,
+								SWR_BROADCAST);
+					if (ret)
+						dev_err(swrm->dev,
+							"%s: set num ch failed\n",
+							__func__);
+				}
+			} else {
+				list_for_each_entry(swr_dev, &mstr->devices,
+						    dev_list) {
+					ret = swr_set_device_group(swr_dev,
+								SWR_GROUP_NONE);
+					if (ret)
+						dev_err(swrm->dev,
+							"%s: set num ch failed\n",
+							__func__);
+				}
+			}
+			mutex_unlock(&swrm->mlock);
+		}
+		break;
+	default:
+		dev_err(swrm->dev, "%s: swr master unknown id %d\n",
+			__func__, id);
+		break;
+	}
+	return ret;
+}
+EXPORT_SYMBOL(swrm_wcd_notify);
+
+#ifdef CONFIG_PM_SLEEP
+static int swrm_suspend(struct device *dev)
+{
+	int ret = -EBUSY;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct swr_mstr_ctrl *swrm = platform_get_drvdata(pdev);
+
+	dev_dbg(dev, "%s: system suspend, state: %d\n", __func__, swrm->state);
+	if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
+		ret = swrm_runtime_suspend(dev);
+		if (!ret) {
+			/*
+			 * Synchronize runtime-pm and system-pm states:
+			 * At this point, we are already suspended. If
+			 * runtime-pm still thinks its active, then
+			 * make sure its status is in sync with HW
+			 * status. The three below calls let the
+			 * runtime-pm know that we are suspended
+			 * already without re-invoking the suspend
+			 * callback
+			 */
+			pm_runtime_disable(dev);
+			pm_runtime_set_suspended(dev);
+			pm_runtime_enable(dev);
+		}
+	}
+	if (ret == -EBUSY) {
+		/*
+		 * There is a possibility that some audio stream is active
+		 * during suspend. We dont want to return suspend failure in
+		 * that case so that display and relevant components can still
+		 * go to suspend.
+		 * If there is some other error, then it should be passed-on
+		 * to system level suspend
+		 */
+		ret = 0;
+	}
+	return ret;
+}
+
+static int swrm_resume(struct device *dev)
+{
+	int ret = 0;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct swr_mstr_ctrl *swrm = platform_get_drvdata(pdev);
+
+	dev_dbg(dev, "%s: system resume, state: %d\n", __func__, swrm->state);
+	if (!pm_runtime_enabled(dev) || !pm_runtime_suspend(dev)) {
+		ret = swrm_runtime_resume(dev);
+		if (!ret) {
+			pm_runtime_mark_last_busy(dev);
+			pm_request_autosuspend(dev);
+		}
+	}
+	return ret;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops swrm_dev_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(
+		swrm_suspend,
+		swrm_resume
+	)
+	SET_RUNTIME_PM_OPS(
+		swrm_runtime_suspend,
+		swrm_runtime_resume,
+		NULL
+	)
+};
+
+static struct of_device_id swrm_dt_match[] = {
+	{
+		.compatible = "qcom,swr-wcd",
+	},
+	{}
+};
+
+static struct platform_driver swr_mstr_driver = {
+	.probe = swrm_probe,
+	.remove = swrm_remove,
+	.driver = {
+		.name = SWR_WCD_NAME,
+		.owner = THIS_MODULE,
+		.pm = &swrm_dev_pm_ops,
+		.of_match_table = swrm_dt_match,
+	},
+};
+
+static int __init swrm_init(void)
+{
+	return platform_driver_register(&swr_mstr_driver);
+}
+subsys_initcall(swrm_init);
+
+static void __exit swrm_exit(void)
+{
+	platform_driver_unregister(&swr_mstr_driver);
+}
+module_exit(swrm_exit);
+
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("WCD SoundWire Controller");
+MODULE_ALIAS("platform:swr-wcd");
diff -Nruw linux-4.4.115-fbx/drivers/soundwire./swr-wcd-ctrl.h linux-4.4.115-fbx/drivers/soundwire/swr-wcd-ctrl.h
--- linux-4.4.115-fbx/drivers/soundwire./swr-wcd-ctrl.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/soundwire/swr-wcd-ctrl.h	2019-01-22 16:16:26.687275240 +0100
@@ -0,0 +1,107 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SWR_WCD_CTRL_H
+#define _SWR_WCD_CTRL_H
+#include <linux/module.h>
+#include <linux/soundwire/swr-wcd.h>
+
+#define SWR_MAX_ROW		0 /* Rows = 48 */
+#define SWR_MAX_COL		7 /* Cols = 16 */
+#define SWR_MIN_COL		0 /* Cols = 2 */
+
+#define SWR_WCD_NAME	"swr-wcd"
+
+#define SWR_MSTR_PORT_LEN	8 /* Number of master ports */
+
+#define SWRM_VERSION_1_0 0x01010000
+#define SWRM_VERSION_1_2 0x01030000
+#define SWRM_VERSION_1_3 0x01040000
+
+enum {
+	SWR_MSTR_PAUSE,
+	SWR_MSTR_RESUME,
+	SWR_MSTR_UP,
+	SWR_MSTR_DOWN,
+};
+
+enum {
+	SWR_IRQ_FREE,
+	SWR_IRQ_REGISTER,
+};
+
+enum {
+	SWR_DAC_PORT,
+	SWR_COMP_PORT,
+	SWR_BOOST_PORT,
+	SWR_VISENSE_PORT,
+};
+
+struct usecase {
+	u8 num_port;
+	u8 num_ch;
+	u32 chrate;
+};
+
+struct port_params {
+	u8 si;
+	u8 off1;
+	u8 off2;
+};
+
+struct swrm_mports {
+	struct list_head list;
+	u8 id;
+};
+
+struct swr_ctrl_platform_data {
+	void *handle; /* holds priv data */
+	int (*read)(void *handle, int reg);
+	int (*write)(void *handle, int reg, int val);
+	int (*bulk_write)(void *handle, u32 *reg, u32 *val, size_t len);
+	int (*clk)(void *handle, bool enable);
+	int (*reg_irq)(void *handle, irqreturn_t(*irq_handler)(int irq,
+			void *data), void *swr_handle, int type);
+};
+
+struct swr_mstr_ctrl {
+	struct swr_master master;
+	struct device *dev;
+	struct resource *supplies;
+	struct clk *mclk;
+	int clk_ref_count;
+	struct completion reset;
+	struct completion broadcast;
+	struct mutex mlock;
+	struct mutex reslock;
+	u8 rcmd_id;
+	u8 wcmd_id;
+	void *handle; /* SWR Master handle from client for read and writes */
+	int (*read)(void *handle, int reg);
+	int (*write)(void *handle, int reg, int val);
+	int (*bulk_write)(void *handle, u32 *reg, u32 *val, size_t len);
+	int (*clk)(void *handle, bool enable);
+	int (*reg_irq)(void *handle, irqreturn_t(*irq_handler)(int irq,
+			void *data), void *swr_handle, int type);
+	int irq;
+	int version;
+	int num_enum_slaves;
+	int slave_status;
+	struct swr_mstr_port *mstr_port;
+	struct list_head mport_list;
+	int state;
+	struct platform_device *pdev;
+	int num_rx_chs;
+	u8 num_cfg_devs;
+};
+
+#endif /* _SWR_WCD_CTRL_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/spi/spi_qsd.c	2019-10-29 09:26:24.841214902 +0100
@@ -0,0 +1,2932 @@
+/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * SPI driver for Qualcomm MSM platforms
+ *
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/atomic.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/qcom-spi.h>
+#include <linux/msm-sps.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include "spi_qsd.h"
+
+#define SPI_MAX_BYTES_PER_WORD			(4)
+
+static int msm_spi_pm_resume_runtime(struct device *device);
+static int msm_spi_pm_suspend_runtime(struct device *device);
+static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd);
+static int get_local_resources(struct msm_spi *dd);
+static void put_local_resources(struct msm_spi *dd);
+static void msm_spi_slv_setup(struct msm_spi *dd);
+static inline int msm_spi_wait_valid(struct msm_spi *dd);
+static int reset_core(struct msm_spi *dd);
+
+static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
+					struct platform_device *pdev)
+{
+	struct resource *resource;
+	unsigned long   gsbi_mem_phys_addr;
+	size_t          gsbi_mem_size;
+	void __iomem    *gsbi_base;
+
+	resource  = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!resource)
+		return 0;
+
+	gsbi_mem_phys_addr = resource->start;
+	gsbi_mem_size = resource_size(resource);
+	if (!devm_request_mem_region(&pdev->dev, gsbi_mem_phys_addr,
+					gsbi_mem_size, SPI_DRV_NAME))
+		return -ENXIO;
+
+	gsbi_base = devm_ioremap(&pdev->dev, gsbi_mem_phys_addr,
+					gsbi_mem_size);
+	if (!gsbi_base)
+		return -ENXIO;
+
+	/* Set GSBI to SPI mode */
+	writel_relaxed(GSBI_SPI_CONFIG, gsbi_base + GSBI_CTRL_REG);
+
+	return 0;
+}
+
+static inline int msm_spi_register_init(struct msm_spi *dd)
+{
+	if (dd->pdata->is_slv_ctrl) {
+		writel_relaxed(0x00000002, dd->base + SPI_SW_RESET);
+		 if (msm_spi_wait_valid(dd))
+			return -EIO;
+	} else {
+		writel_relaxed(0x00000001, dd->base + SPI_SW_RESET);
+	}
+	msm_spi_set_state(dd, SPI_OP_STATE_RESET);
+	writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL);
+	writel_relaxed(0x00000000, dd->base + SPI_CONFIG);
+	writel_relaxed(0x00000000, dd->base + SPI_IO_MODES);
+	if (dd->qup_ver)
+		writel_relaxed(0x00000000, dd->base + QUP_OPERATIONAL_MASK);
+	return 0;
+}
+
+static int msm_spi_pinctrl_init(struct msm_spi *dd)
+{
+	dd->pinctrl = devm_pinctrl_get(dd->dev);
+	if (IS_ERR_OR_NULL(dd->pinctrl)) {
+		dev_err(dd->dev, "Failed to get pin ctrl\n");
+		return PTR_ERR(dd->pinctrl);
+	}
+	dd->pins_active = pinctrl_lookup_state(dd->pinctrl,
+				SPI_PINCTRL_STATE_DEFAULT);
+	if (IS_ERR_OR_NULL(dd->pins_active)) {
+		dev_err(dd->dev, "Failed to lookup pinctrl default state\n");
+		return PTR_ERR(dd->pins_active);
+	}
+
+	dd->pins_sleep = pinctrl_lookup_state(dd->pinctrl,
+				SPI_PINCTRL_STATE_SLEEP);
+	if (IS_ERR_OR_NULL(dd->pins_sleep)) {
+		dev_err(dd->dev, "Failed to lookup pinctrl sleep state\n");
+		return PTR_ERR(dd->pins_sleep);
+	}
+
+	return 0;
+}
+
+static inline int msm_spi_request_gpios(struct msm_spi *dd)
+{
+	int i = 0;
+	int result = 0;
+
+	if (!dd->pdata->use_pinctrl) {
+		for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
+			if (dd->spi_gpios[i] >= 0) {
+				result = gpio_request(dd->spi_gpios[i],
+						spi_rsrcs[i]);
+				if (result) {
+					dev_err(dd->dev,
+					"%s: gpio_request for pin %d "
+					"failed with error %d\n"
+					, __func__, dd->spi_gpios[i], result);
+					goto error;
+				}
+			}
+		}
+	} else {
+		result = pinctrl_select_state(dd->pinctrl, dd->pins_active);
+		if (result) {
+			dev_err(dd->dev, "%s: Can not set %s pins\n",
+			__func__, SPI_PINCTRL_STATE_DEFAULT);
+			goto error;
+		}
+	}
+	return 0;
+error:
+	if (!dd->pdata->use_pinctrl) {
+		for (; --i >= 0;) {
+			if (dd->spi_gpios[i] >= 0)
+				gpio_free(dd->spi_gpios[i]);
+		}
+	}
+	return result;
+}
+
+static inline void msm_spi_free_gpios(struct msm_spi *dd)
+{
+	int i;
+	int result = 0;
+
+	if (!dd->pdata->use_pinctrl) {
+		for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
+			if (dd->spi_gpios[i] >= 0)
+				gpio_free(dd->spi_gpios[i]);
+			}
+
+		for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
+			if (dd->cs_gpios[i].valid) {
+				gpio_free(dd->cs_gpios[i].gpio_num);
+				dd->cs_gpios[i].valid = 0;
+			}
+		}
+	} else {
+		result = pinctrl_select_state(dd->pinctrl, dd->pins_sleep);
+		if (result)
+			dev_err(dd->dev, "%s: Can not set %s pins\n",
+			__func__, SPI_PINCTRL_STATE_SLEEP);
+	}
+}
+
+static inline int msm_spi_request_cs_gpio(struct msm_spi *dd)
+{
+	int cs_num;
+	int rc;
+
+	cs_num = dd->spi->chip_select;
+	if (!(dd->spi->mode & SPI_LOOP)) {
+		if (!dd->pdata->use_pinctrl) {
+			if ((!(dd->cs_gpios[cs_num].valid)) &&
+				(dd->cs_gpios[cs_num].gpio_num >= 0)) {
+				rc = gpio_request(dd->cs_gpios[cs_num].gpio_num,
+					spi_cs_rsrcs[cs_num]);
+
+				if (rc) {
+					dev_err(dd->dev,
+					"gpio_request for pin %d failed,error %d\n",
+					dd->cs_gpios[cs_num].gpio_num, rc);
+					return rc;
+				}
+				dd->cs_gpios[cs_num].valid = 1;
+			}
+		}
+	}
+	return 0;
+}
+
+static inline void msm_spi_free_cs_gpio(struct msm_spi *dd)
+{
+	int cs_num;
+
+	cs_num = dd->spi->chip_select;
+	if (!dd->pdata->use_pinctrl) {
+		if (dd->cs_gpios[cs_num].valid) {
+			gpio_free(dd->cs_gpios[cs_num].gpio_num);
+			dd->cs_gpios[cs_num].valid = 0;
+		}
+	}
+}
+
+
+/**
+ * msm_spi_clk_max_rate: finds the nearest lower rate for a clk
+ * @clk the clock for which to find nearest lower rate
+ * @rate clock frequency in Hz
+ * @return nearest lower rate or negative error value
+ *
+ * Public clock API extends clk_round_rate which is a ceiling function. This
+ * function is a floor function implemented as a binary search using the
+ * ceiling function.
+ */
+static long msm_spi_clk_max_rate(struct clk *clk, unsigned long rate)
+{
+	long lowest_available, nearest_low, step_size, cur;
+	long step_direction = -1;
+	long guess = rate;
+	int  max_steps = 10;
+
+	cur =  clk_round_rate(clk, rate);
+	if (cur == rate)
+		return rate;
+
+	/* if we got here then: cur > rate */
+	lowest_available =  clk_round_rate(clk, 0);
+	if (lowest_available > rate)
+		return -EINVAL;
+
+	step_size = (rate - lowest_available) >> 1;
+	nearest_low = lowest_available;
+
+	while (max_steps-- && step_size) {
+		guess += step_size * step_direction;
+
+		cur =  clk_round_rate(clk, guess);
+
+		if ((cur < rate) && (cur > nearest_low))
+			nearest_low = cur;
+
+		/*
+		 * if we stepped too far, then start stepping in the other
+		 * direction with half the step size
+		 */
+		if (((cur > rate) && (step_direction > 0))
+		 || ((cur < rate) && (step_direction < 0))) {
+			step_direction = -step_direction;
+			step_size >>= 1;
+		 }
+	}
+	return nearest_low;
+}
+
+static void msm_spi_clock_set(struct msm_spi *dd, int speed)
+{
+	long rate;
+	int rc;
+
+	rate = msm_spi_clk_max_rate(dd->clk, speed);
+	if (rate < 0) {
+		dev_err(dd->dev,
+		"%s: no match found for requested clock frequency:%d",
+			__func__, speed);
+		return;
+	}
+
+	rc = clk_set_rate(dd->clk, rate);
+	if (!rc)
+		dd->clock_speed = rate;
+}
+
+static void msm_spi_clk_path_vote(struct msm_spi *dd, u32 rate)
+{
+	if (dd->bus_cl_hdl) {
+		u64 ib = rate * dd->pdata->bus_width;
+
+		msm_bus_scale_update_bw(dd->bus_cl_hdl, 0, ib);
+	}
+}
+
+static void msm_spi_clk_path_teardown(struct msm_spi *dd)
+{
+	msm_spi_clk_path_vote(dd, 0);
+
+	if (dd->bus_cl_hdl) {
+		msm_bus_scale_unregister(dd->bus_cl_hdl);
+		dd->bus_cl_hdl = NULL;
+	}
+}
+
+/**
+ * msm_spi_clk_path_postponed_register: reg with bus-scaling after it is probed
+ *
+ * @return zero on success
+ *
+ * Workaround: SPI driver may be probed before the bus scaling driver. Calling
+ * msm_bus_scale_register_client() will fail if the bus scaling driver is not
+ * ready yet. Thus, this function should be called not from probe but from a
+ * later context. Also, this function may be called more then once before
+ * register succeed. At this case only one error message will be logged. At boot
+ * time all clocks are on, so earlier SPI transactions should succeed.
+ */
+static int msm_spi_clk_path_postponed_register(struct msm_spi *dd)
+{
+	int ret = 0;
+
+	dd->bus_cl_hdl = msm_bus_scale_register(dd->pdata->master_id,
+						MSM_BUS_SLAVE_EBI_CH0,
+						(char *)dev_name(dd->dev),
+						false);
+
+	if (IS_ERR_OR_NULL(dd->bus_cl_hdl)) {
+		ret = (dd->bus_cl_hdl ? PTR_ERR(dd->bus_cl_hdl) : -EAGAIN);
+		dev_err(dd->dev, "Failed bus registration Err %d", ret);
+	}
+
+	return ret;
+}
+
+static void msm_spi_clk_path_init(struct msm_spi *dd)
+{
+	/*
+	 * bail out if path voting is diabled (master_id == 0) or if it is
+	 * already registered (client_hdl != 0)
+	 */
+	if (!dd->pdata->master_id || dd->bus_cl_hdl)
+		return;
+
+	/* on failure try again later */
+	if (msm_spi_clk_path_postponed_register(dd))
+		return;
+
+}
+
+static int msm_spi_calculate_size(int *fifo_size,
+				  int *block_size,
+				  int block,
+				  int mult)
+{
+	int words;
+
+	switch (block) {
+	case 0:
+		words = 1; /* 4 bytes */
+		break;
+	case 1:
+		words = 4; /* 16 bytes */
+		break;
+	case 2:
+		words = 8; /* 32 bytes */
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (mult) {
+	case 0:
+		*fifo_size = words * 2;
+		break;
+	case 1:
+		*fifo_size = words * 4;
+		break;
+	case 2:
+		*fifo_size = words * 8;
+		break;
+	case 3:
+		*fifo_size = words * 16;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	*block_size = words * sizeof(u32); /* in bytes */
+	return 0;
+}
+
+static void msm_spi_calculate_fifo_size(struct msm_spi *dd)
+{
+	u32 spi_iom;
+	int block;
+	int mult;
+
+	spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
+
+	block = (spi_iom & SPI_IO_M_INPUT_BLOCK_SIZE) >> INPUT_BLOCK_SZ_SHIFT;
+	mult = (spi_iom & SPI_IO_M_INPUT_FIFO_SIZE) >> INPUT_FIFO_SZ_SHIFT;
+	if (msm_spi_calculate_size(&dd->input_fifo_size, &dd->input_block_size,
+				   block, mult)) {
+		goto fifo_size_err;
+	}
+
+	block = (spi_iom & SPI_IO_M_OUTPUT_BLOCK_SIZE) >> OUTPUT_BLOCK_SZ_SHIFT;
+	mult = (spi_iom & SPI_IO_M_OUTPUT_FIFO_SIZE) >> OUTPUT_FIFO_SZ_SHIFT;
+	if (msm_spi_calculate_size(&dd->output_fifo_size,
+				   &dd->output_block_size, block, mult)) {
+		goto fifo_size_err;
+	}
+	if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
+		/* DM mode is not available for this block size */
+		if (dd->input_block_size == 4 || dd->output_block_size == 4)
+			dd->use_dma = 0;
+
+		if (dd->use_dma) {
+			dd->input_burst_size = max(dd->input_block_size,
+						DM_BURST_SIZE);
+			dd->output_burst_size = max(dd->output_block_size,
+						DM_BURST_SIZE);
+		}
+	}
+
+	return;
+
+fifo_size_err:
+	dd->use_dma = 0;
+	pr_err("%s: invalid FIFO size, SPI_IO_MODES=0x%x\n", __func__, spi_iom);
+	return;
+}
+
+static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
+{
+	u32   data_in;
+	int   i;
+	int   shift;
+	int   read_bytes = (dd->pack_words ?
+				SPI_MAX_BYTES_PER_WORD : dd->bytes_per_word);
+
+	data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO);
+	if (dd->read_buf) {
+		for (i = 0; (i < read_bytes) &&
+			     dd->rx_bytes_remaining; i++) {
+			/* The data format depends on bytes_per_word:
+			   4 bytes: 0x12345678
+			   3 bytes: 0x00123456
+			   2 bytes: 0x00001234
+			   1 byte : 0x00000012
+			*/
+			shift = BITS_PER_BYTE * i;
+			*dd->read_buf++ = (data_in & (0xFF << shift)) >> shift;
+			dd->rx_bytes_remaining--;
+		}
+	} else {
+		if (dd->rx_bytes_remaining >= read_bytes)
+			dd->rx_bytes_remaining -= read_bytes;
+		else
+			dd->rx_bytes_remaining = 0;
+	}
+
+	dd->read_xfr_cnt++;
+}
+
+static inline bool msm_spi_is_valid_state(struct msm_spi *dd)
+{
+	u32 spi_op = readl_relaxed(dd->base + SPI_STATE);
+
+	return spi_op & SPI_OP_STATE_VALID;
+}
+
+static inline void msm_spi_udelay(unsigned int delay_usecs)
+{
+	/*
+	 * For smaller values of delay, context switch time
+	 * would negate the usage of usleep
+	 */
+	if (delay_usecs > 20)
+		usleep_range(delay_usecs, delay_usecs);
+	else if (delay_usecs)
+		udelay(delay_usecs);
+}
+
+static inline int msm_spi_wait_valid(struct msm_spi *dd)
+{
+	unsigned int delay = 0;
+	unsigned long timeout = 0;
+
+	if (dd->clock_speed == 0)
+		return -EINVAL;
+	/*
+	 * Based on the SPI clock speed, sufficient time
+	 * should be given for the SPI state transition
+	 * to occur
+	 */
+	delay = (10 * USEC_PER_SEC) / dd->clock_speed;
+	/*
+	 * For small delay values, the default timeout would
+	 * be one jiffy
+	 */
+	if (delay < SPI_DELAY_THRESHOLD)
+		delay = SPI_DELAY_THRESHOLD;
+
+	/* Adding one to round off to the nearest jiffy */
+	timeout = jiffies + msecs_to_jiffies(delay * SPI_DEFAULT_TIMEOUT) + 1;
+	while (!msm_spi_is_valid_state(dd)) {
+		if (time_after(jiffies, timeout)) {
+			if (!msm_spi_is_valid_state(dd)) {
+				dev_err(dd->dev, "%s: SPI operational state"
+					"not valid\n", __func__);
+				return -ETIMEDOUT;
+			} else
+				return 0;
+		}
+		msm_spi_udelay(delay);
+	}
+	return 0;
+}
+
+static inline int msm_spi_set_state(struct msm_spi *dd,
+				    enum msm_spi_state state)
+{
+	enum msm_spi_state cur_state;
+	if (msm_spi_wait_valid(dd))
+		return -EIO;
+	cur_state = readl_relaxed(dd->base + SPI_STATE);
+	/* Per spec:
+	   For PAUSE_STATE to RESET_STATE, two writes of (10) are required */
+	if (((cur_state & SPI_OP_STATE) == SPI_OP_STATE_PAUSE) &&
+			(state == SPI_OP_STATE_RESET)) {
+		writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
+		writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
+	} else {
+		writel_relaxed((cur_state & ~SPI_OP_STATE) | state,
+		       dd->base + SPI_STATE);
+	}
+	if (msm_spi_wait_valid(dd))
+		return -EIO;
+	atomic_set(&dd->qup_state, state);
+	return 0;
+}
+
+/**
+ * msm_spi_set_bpw_and_no_io_flags: configure N, and no-input/no-output flags
+ */
+static inline void
+msm_spi_set_bpw_and_no_io_flags(struct msm_spi *dd, u32 *config, int n)
+{
+	*config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
+
+	if (n != (*config & SPI_CFG_N))
+		*config = (*config & ~SPI_CFG_N) | n;
+
+	if (dd->tx_mode == SPI_BAM_MODE) {
+		if (dd->read_buf == NULL)
+			*config |= SPI_NO_INPUT;
+		if (dd->write_buf == NULL)
+			*config |= SPI_NO_OUTPUT;
+	}
+}
+
+/**
+ * msm_spi_calc_spi_config_loopback_and_input_first: Calculate the values that
+ * should be updated into SPI_CONFIG's LOOPBACK and INPUT_FIRST flags
+ * @return calculatd value for SPI_CONFIG
+ */
+static u32
+msm_spi_calc_spi_config_loopback_and_input_first(u32 spi_config, u8 mode)
+{
+	if (mode & SPI_LOOP)
+		spi_config |= SPI_CFG_LOOPBACK;
+	else
+		spi_config &= ~SPI_CFG_LOOPBACK;
+
+	if (mode & SPI_CPHA)
+		spi_config &= ~SPI_CFG_INPUT_FIRST;
+	else
+		spi_config |= SPI_CFG_INPUT_FIRST;
+
+	return spi_config;
+}
+
+/**
+ * msm_spi_set_spi_config: prepares register SPI_CONFIG to process the
+ * next transfer
+ */
+static void msm_spi_set_spi_config(struct msm_spi *dd, int bpw)
+{
+	u32 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
+	spi_config = msm_spi_calc_spi_config_loopback_and_input_first(
+					spi_config, dd->spi->mode);
+
+	if (dd->qup_ver == SPI_QUP_VERSION_NONE)
+		/* flags removed from SPI_CONFIG in QUP version-2 */
+		msm_spi_set_bpw_and_no_io_flags(dd, &spi_config, bpw-1);
+
+	/*
+	 * HS_MODE improves signal stability for spi-clk high rates
+	 * but is invalid in LOOPBACK mode.
+	 */
+	if ((dd->clock_speed >= SPI_HS_MIN_RATE) &&
+	   !(dd->spi->mode & SPI_LOOP))
+		spi_config |= SPI_CFG_HS_MODE;
+	else
+		spi_config &= ~SPI_CFG_HS_MODE;
+
+	writel_relaxed(spi_config, dd->base + SPI_CONFIG);
+}
+
+/**
+ * msm_spi_set_mx_counts: set SPI_MX_INPUT_COUNT and SPI_MX_INPUT_COUNT
+ * for FIFO-mode. set SPI_MX_INPUT_COUNT and SPI_MX_OUTPUT_COUNT for
+ * BAM and DMOV modes.
+ * @n_words The number of reads/writes of size N.
+ */
+static void msm_spi_set_mx_counts(struct msm_spi *dd, u32 n_words)
+{
+	/*
+	 * For FIFO mode:
+	 *   - Set the MX_OUTPUT_COUNT/MX_INPUT_COUNT registers to 0
+	 *   - Set the READ/WRITE_COUNT registers to 0 (infinite mode)
+	 *     or num bytes (finite mode) if less than fifo worth of data.
+	 * For Block mode:
+	 *  - Set the MX_OUTPUT/MX_INPUT_COUNT registers to num xfer bytes.
+	 *  - Set the READ/WRITE_COUNT registers to 0.
+	 */
+	if (dd->tx_mode != SPI_BAM_MODE) {
+		if (dd->tx_mode == SPI_FIFO_MODE) {
+			if (n_words <= dd->input_fifo_size)
+				msm_spi_set_write_count(dd, n_words);
+			else
+				msm_spi_set_write_count(dd, 0);
+			writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
+		} else
+			writel_relaxed(n_words, dd->base + SPI_MX_OUTPUT_COUNT);
+
+		if (dd->rx_mode == SPI_FIFO_MODE) {
+			if (n_words <= dd->input_fifo_size)
+				writel_relaxed(n_words,
+						dd->base + SPI_MX_READ_COUNT);
+			else
+				writel_relaxed(0,
+						dd->base + SPI_MX_READ_COUNT);
+			writel_relaxed(0, dd->base + SPI_MX_INPUT_COUNT);
+		} else
+			writel_relaxed(n_words, dd->base + SPI_MX_INPUT_COUNT);
+	} else {
+		/* must be zero for BAM and DMOV */
+		writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
+		msm_spi_set_write_count(dd, 0);
+
+		/*
+		 * for DMA transfers, both QUP_MX_INPUT_COUNT and
+		 * QUP_MX_OUTPUT_COUNT must be zero to all cases but one.
+		 * That case is a non-balanced transfer when there is
+		 * only a read_buf.
+		 */
+		if (dd->qup_ver == SPI_QUP_VERSION_BFAM) {
+			if (dd->write_buf)
+				writel_relaxed(0,
+						dd->base + SPI_MX_INPUT_COUNT);
+			else
+				writel_relaxed(n_words,
+						dd->base + SPI_MX_INPUT_COUNT);
+
+			writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
+		}
+	}
+}
+
+static int msm_spi_bam_pipe_disconnect(struct msm_spi *dd,
+						struct msm_spi_bam_pipe  *pipe)
+{
+	int ret = sps_disconnect(pipe->handle);
+	if (ret) {
+		dev_dbg(dd->dev, "%s disconnect bam %s pipe failed\n",
+							__func__, pipe->name);
+		return ret;
+	}
+	return 0;
+}
+
+static int msm_spi_bam_pipe_connect(struct msm_spi *dd,
+		struct msm_spi_bam_pipe  *pipe, struct sps_connect *config)
+{
+	int ret;
+	struct sps_register_event event  = {
+		.mode      = SPS_TRIGGER_WAIT,
+		.options   = SPS_O_EOT,
+	};
+
+	if (pipe == &dd->bam.prod)
+		event.xfer_done = &dd->rx_transfer_complete;
+	else if (pipe == &dd->bam.cons)
+		event.xfer_done = &dd->tx_transfer_complete;
+
+	ret = sps_connect(pipe->handle, config);
+	if (ret) {
+		dev_err(dd->dev, "%s: sps_connect(%s:0x%p):%d",
+				__func__, pipe->name, pipe->handle, ret);
+		return ret;
+	}
+
+	ret = sps_register_event(pipe->handle, &event);
+	if (ret) {
+		dev_err(dd->dev, "%s sps_register_event(hndl:0x%p %s):%d",
+				__func__, pipe->handle, pipe->name, ret);
+		msm_spi_bam_pipe_disconnect(dd, pipe);
+		return ret;
+	}
+
+	pipe->teardown_required = true;
+	return 0;
+}
+
+
+static void msm_spi_bam_pipe_flush(struct msm_spi *dd,
+					enum msm_spi_pipe_direction pipe_dir)
+{
+	struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
+					(&dd->bam.prod) : (&dd->bam.cons);
+	struct sps_connect           config  = pipe->config;
+	int    ret;
+
+	ret = msm_spi_bam_pipe_disconnect(dd, pipe);
+	if (ret)
+		return;
+
+	ret = msm_spi_bam_pipe_connect(dd, pipe, &config);
+	if (ret)
+		return;
+}
+
+static void msm_spi_bam_flush(struct msm_spi *dd)
+{
+	dev_dbg(dd->dev, "%s flushing bam for recovery\n" , __func__);
+
+	msm_spi_bam_pipe_flush(dd, SPI_BAM_CONSUMER_PIPE);
+	msm_spi_bam_pipe_flush(dd, SPI_BAM_PRODUCER_PIPE);
+}
+
+static int
+msm_spi_bam_process_rx(struct msm_spi *dd, u32 *bytes_to_send, u32 desc_cnt)
+{
+	int ret = 0;
+	u32 data_xfr_size = 0, rem_bc = 0;
+	u32 prod_flags = 0;
+
+	rem_bc = dd->cur_rx_transfer->len - dd->bam.curr_rx_bytes_recvd;
+	data_xfr_size = (rem_bc < *bytes_to_send) ? rem_bc : *bytes_to_send;
+
+	/*
+	 * set flags for last descriptor only
+	 */
+	if ((desc_cnt == 1)
+		|| (*bytes_to_send == data_xfr_size))
+		prod_flags = (dd->write_buf)
+			? 0 : (SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD);
+
+	/*
+	 * enqueue read buffer in BAM
+	 */
+	ret = sps_transfer_one(dd->bam.prod.handle,
+			dd->cur_rx_transfer->rx_dma
+				+ dd->bam.curr_rx_bytes_recvd,
+			data_xfr_size, dd, prod_flags);
+	if (ret < 0) {
+		dev_err(dd->dev,
+		"%s: Failed to queue producer BAM transfer",
+		__func__);
+		return ret;
+	}
+
+	dd->bam.curr_rx_bytes_recvd += data_xfr_size;
+	*bytes_to_send -= data_xfr_size;
+	dd->bam.bam_rx_len -= data_xfr_size;
+	return data_xfr_size;
+}
+
+static int
+msm_spi_bam_process_tx(struct msm_spi *dd, u32 *bytes_to_send, u32 desc_cnt)
+{
+	int ret = 0;
+	u32 data_xfr_size = 0, rem_bc = 0;
+	u32 cons_flags = 0;
+
+	rem_bc = dd->cur_tx_transfer->len - dd->bam.curr_tx_bytes_sent;
+	data_xfr_size = (rem_bc < *bytes_to_send) ? rem_bc : *bytes_to_send;
+
+	/*
+	 * set flags for last descriptor only
+	*/
+	if ((desc_cnt == 1)
+		|| (*bytes_to_send == data_xfr_size))
+		cons_flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD;
+
+	/*
+	 * enqueue write buffer in BAM
+	 */
+	ret = sps_transfer_one(dd->bam.cons.handle,
+			dd->cur_tx_transfer->tx_dma
+				+ dd->bam.curr_tx_bytes_sent,
+			data_xfr_size, dd, cons_flags);
+	if (ret < 0) {
+		dev_err(dd->dev,
+		"%s: Failed to queue consumer BAM transfer",
+		__func__);
+		return ret;
+	}
+
+	dd->bam.curr_tx_bytes_sent	+= data_xfr_size;
+	*bytes_to_send	-= data_xfr_size;
+	dd->bam.bam_tx_len -= data_xfr_size;
+	return data_xfr_size;
+}
+
+
+/**
+ * msm_spi_bam_begin_transfer: transfer dd->tx_bytes_remaining bytes
+ * using BAM.
+ * @brief BAM can transfer SPI_MAX_TRFR_BTWN_RESETS byte at a single
+ * transfer. Between transfer QUP must change to reset state. A loop is
+ * issuing a single BAM transfer at a time.
+ * @return zero on success
+ */
+static int
+msm_spi_bam_begin_transfer(struct msm_spi *dd)
+{
+	u32 tx_bytes_to_send = 0, rx_bytes_to_recv = 0;
+	u32 n_words_xfr;
+	s32 ret = 0;
+	u32 prod_desc_cnt = SPI_BAM_MAX_DESC_NUM - 1;
+	u32 cons_desc_cnt = SPI_BAM_MAX_DESC_NUM - 1;
+	u32 byte_count = 0;
+
+	rx_bytes_to_recv = min_t(u32, dd->bam.bam_rx_len,
+				SPI_MAX_TRFR_BTWN_RESETS);
+	tx_bytes_to_send = min_t(u32, dd->bam.bam_tx_len,
+				SPI_MAX_TRFR_BTWN_RESETS);
+	n_words_xfr = DIV_ROUND_UP(rx_bytes_to_recv,
+				dd->bytes_per_word);
+
+	msm_spi_set_mx_counts(dd, n_words_xfr);
+	ret = msm_spi_set_state(dd, SPI_OP_STATE_RUN);
+	if (ret < 0) {
+		dev_err(dd->dev,
+			"%s: Failed to set QUP state to run",
+			__func__);
+		goto xfr_err;
+	}
+
+	while ((rx_bytes_to_recv + tx_bytes_to_send) &&
+		((cons_desc_cnt + prod_desc_cnt) > 0)) {
+		struct spi_transfer *t = NULL;
+
+		if (dd->read_buf && (prod_desc_cnt > 0)) {
+			ret = msm_spi_bam_process_rx(dd, &rx_bytes_to_recv,
+							prod_desc_cnt);
+			if (ret < 0)
+				goto xfr_err;
+
+			if (!(dd->cur_rx_transfer->len
+				- dd->bam.curr_rx_bytes_recvd))
+				t = dd->cur_rx_transfer;
+			prod_desc_cnt--;
+		}
+
+		if (dd->write_buf && (cons_desc_cnt > 0)) {
+			ret = msm_spi_bam_process_tx(dd, &tx_bytes_to_send,
+							cons_desc_cnt);
+			if (ret < 0)
+				goto xfr_err;
+
+			if (!(dd->cur_tx_transfer->len
+				- dd->bam.curr_tx_bytes_sent))
+				t = dd->cur_tx_transfer;
+			cons_desc_cnt--;
+		}
+
+		byte_count += ret;
+	}
+
+	dd->tx_bytes_remaining -= min_t(u32, byte_count,
+						SPI_MAX_TRFR_BTWN_RESETS);
+	return 0;
+xfr_err:
+	return ret;
+}
+
+static int
+msm_spi_bam_next_transfer(struct msm_spi *dd)
+{
+	if (dd->tx_mode != SPI_BAM_MODE)
+		return 0;
+
+	if (dd->tx_bytes_remaining > 0) {
+		if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
+			return 0;
+		if ((msm_spi_bam_begin_transfer(dd)) < 0) {
+			dev_err(dd->dev, "%s: BAM transfer setup failed\n",
+				__func__);
+			return 0;
+		}
+		return 1;
+	}
+	return 0;
+}
+
+static int msm_spi_dma_send_next(struct msm_spi *dd)
+{
+	int ret = 0;
+	if (dd->tx_mode == SPI_BAM_MODE)
+		ret = msm_spi_bam_next_transfer(dd);
+	return ret;
+}
+
+static inline void msm_spi_ack_transfer(struct msm_spi *dd)
+{
+	writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG |
+		       SPI_OP_MAX_OUTPUT_DONE_FLAG,
+		       dd->base + SPI_OPERATIONAL);
+	/* Ensure done flag was cleared before proceeding further */
+	mb();
+}
+
+/* Figure which irq occured and call the relevant functions */
+static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id)
+{
+	u32 op, ret = IRQ_NONE;
+	u32 slv;
+	struct msm_spi *dd = dev_id;
+
+	if (pm_runtime_suspended(dd->dev)) {
+		dev_warn(dd->dev, "QUP: pm runtime suspend, irq:%d\n", irq);
+		return ret;
+	}
+	if (readl_relaxed(dd->base + SPI_ERROR_FLAGS) ||
+	    readl_relaxed(dd->base + QUP_ERROR_FLAGS)) {
+		struct spi_master *master = dev_get_drvdata(dd->dev);
+		ret |= msm_spi_error_irq(irq, master);
+	}
+
+	op = readl_relaxed(dd->base + SPI_OPERATIONAL);
+	slv = readl_relaxed(dd->base + SPI_SLAVE_IRQ_STATUS);
+	writel_relaxed(op, dd->base + SPI_OPERATIONAL);
+	writel_relaxed(slv, dd->base + SPI_SLAVE_IRQ_STATUS);
+	/*
+	 * Ensure service flag was cleared before further
+	 * processing of interrupt.
+	 */
+	mb();
+	if (op & SPI_OP_INPUT_SERVICE_FLAG) {
+		ret |= msm_spi_input_irq(irq, dev_id);
+	}
+
+	if (op & SPI_OP_OUTPUT_SERVICE_FLAG) {
+		ret |= msm_spi_output_irq(irq, dev_id);
+	}
+
+	if (dd->tx_mode != SPI_BAM_MODE) {
+		if (!dd->rx_done) {
+			if (dd->rx_bytes_remaining == 0)
+				dd->rx_done = true;
+		}
+		if (!dd->tx_done) {
+			if (!dd->tx_bytes_remaining &&
+					(op & SPI_OP_IP_FIFO_NOT_EMPTY)) {
+				dd->tx_done = true;
+			}
+		}
+	}
+	if (dd->tx_done && dd->rx_done) {
+		msm_spi_set_state(dd, SPI_OP_STATE_RESET);
+		dd->tx_done = false;
+		dd->rx_done = false;
+		complete(&dd->rx_transfer_complete);
+		complete(&dd->tx_transfer_complete);
+	}
+	return ret;
+}
+
+static irqreturn_t msm_spi_input_irq(int irq, void *dev_id)
+{
+	struct msm_spi	       *dd = dev_id;
+
+	dd->stat_rx++;
+
+	if (dd->rx_mode == SPI_MODE_NONE)
+		return IRQ_HANDLED;
+
+	if (dd->rx_mode == SPI_FIFO_MODE) {
+		while ((readl_relaxed(dd->base + SPI_OPERATIONAL) &
+			SPI_OP_IP_FIFO_NOT_EMPTY) &&
+			(dd->rx_bytes_remaining > 0)) {
+			msm_spi_read_word_from_fifo(dd);
+		}
+	} else if (dd->rx_mode == SPI_BLOCK_MODE) {
+		int count = 0;
+
+		while (dd->rx_bytes_remaining &&
+				(count < dd->input_block_size)) {
+			msm_spi_read_word_from_fifo(dd);
+			count += SPI_MAX_BYTES_PER_WORD;
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void msm_spi_write_word_to_fifo(struct msm_spi *dd)
+{
+	u32    word;
+	u8     byte;
+	int    i;
+	int   write_bytes =
+		(dd->pack_words ? SPI_MAX_BYTES_PER_WORD : dd->bytes_per_word);
+
+	word = 0;
+	if (dd->write_buf) {
+		for (i = 0; (i < write_bytes) &&
+			     dd->tx_bytes_remaining; i++) {
+			dd->tx_bytes_remaining--;
+			byte = *dd->write_buf++;
+			word |= (byte << (BITS_PER_BYTE * i));
+		}
+	} else
+		if (dd->tx_bytes_remaining > write_bytes)
+			dd->tx_bytes_remaining -= write_bytes;
+		else
+			dd->tx_bytes_remaining = 0;
+	dd->write_xfr_cnt++;
+
+	writel_relaxed(word, dd->base + SPI_OUTPUT_FIFO);
+}
+
+static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd)
+{
+	int count = 0;
+
+	if (dd->tx_mode == SPI_FIFO_MODE) {
+		while ((dd->tx_bytes_remaining > 0) &&
+			(count < dd->input_fifo_size) &&
+		       !(readl_relaxed(dd->base + SPI_OPERATIONAL)
+						& SPI_OP_OUTPUT_FIFO_FULL)) {
+			msm_spi_write_word_to_fifo(dd);
+			count++;
+		}
+	}
+
+	if (dd->tx_mode == SPI_BLOCK_MODE) {
+		while (dd->tx_bytes_remaining &&
+				(count < dd->output_block_size)) {
+			msm_spi_write_word_to_fifo(dd);
+			count += SPI_MAX_BYTES_PER_WORD;
+		}
+	}
+}
+
+static irqreturn_t msm_spi_output_irq(int irq, void *dev_id)
+{
+	struct msm_spi	       *dd = dev_id;
+
+	dd->stat_tx++;
+
+	if (dd->tx_mode == SPI_MODE_NONE)
+		return IRQ_HANDLED;
+
+	/* Output FIFO is empty. Transmit any outstanding write data. */
+	if ((dd->tx_mode == SPI_FIFO_MODE) || (dd->tx_mode == SPI_BLOCK_MODE))
+		msm_spi_write_rmn_to_fifo(dd);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t msm_spi_error_irq(int irq, void *dev_id)
+{
+	struct spi_master	*master = dev_id;
+	struct msm_spi          *dd = spi_master_get_devdata(master);
+	u32                      spi_err;
+
+	spi_err = readl_relaxed(dd->base + SPI_ERROR_FLAGS);
+	if (spi_err & SPI_ERR_OUTPUT_OVER_RUN_ERR)
+		dev_warn(master->dev.parent, "SPI output overrun error\n");
+	if (spi_err & SPI_ERR_INPUT_UNDER_RUN_ERR)
+		dev_warn(master->dev.parent, "SPI input underrun error\n");
+	if (spi_err & SPI_ERR_OUTPUT_UNDER_RUN_ERR)
+		dev_warn(master->dev.parent, "SPI output underrun error\n");
+	msm_spi_get_clk_err(dd, &spi_err);
+	if (spi_err & SPI_ERR_CLK_OVER_RUN_ERR)
+		dev_warn(master->dev.parent, "SPI clock overrun error\n");
+	if (spi_err & SPI_ERR_CLK_UNDER_RUN_ERR)
+		dev_warn(master->dev.parent, "SPI clock underrun error\n");
+	msm_spi_clear_error_flags(dd);
+	msm_spi_ack_clk_err(dd);
+	/* Ensure clearing of QUP_ERROR_FLAGS was completed */
+	mb();
+	return IRQ_HANDLED;
+}
+
+static int msm_spi_bam_map_buffers(struct msm_spi *dd)
+{
+	int ret = -EINVAL;
+	struct device *dev;
+	struct spi_transfer *xfr;
+	void *tx_buf, *rx_buf;
+	u32 tx_len, rx_len;
+
+	dev = dd->dev;
+	xfr = dd->cur_transfer;
+
+	tx_buf = (void *)xfr->tx_buf;
+	rx_buf = xfr->rx_buf;
+	tx_len = rx_len = xfr->len;
+	if (tx_buf != NULL) {
+		xfr->tx_dma = dma_map_single(dev, tx_buf,
+						tx_len, DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, xfr->tx_dma)) {
+			ret = -ENOMEM;
+			goto error;
+		}
+	}
+
+	if (rx_buf != NULL) {
+		xfr->rx_dma = dma_map_single(dev, rx_buf, rx_len,
+						DMA_FROM_DEVICE);
+		if (dma_mapping_error(dev, xfr->rx_dma)) {
+			if (tx_buf != NULL)
+				dma_unmap_single(dev,
+						xfr->tx_dma,
+						tx_len, DMA_TO_DEVICE);
+			ret = -ENOMEM;
+			goto error;
+		}
+	}
+
+	return 0;
+error:
+	msm_spi_dma_unmap_buffers(dd);
+	return ret;
+}
+
+static int msm_spi_dma_map_buffers(struct msm_spi *dd)
+{
+	int ret = 0;
+	if (dd->tx_mode == SPI_BAM_MODE)
+		ret = msm_spi_bam_map_buffers(dd);
+	return ret;
+}
+
+static void msm_spi_bam_unmap_buffers(struct msm_spi *dd)
+{
+	struct device *dev;
+	struct spi_transfer *xfr;
+	void *tx_buf, *rx_buf;
+	u32  tx_len, rx_len;
+
+	dev = dd->dev;
+	xfr = dd->cur_transfer;
+
+	tx_buf = (void *)xfr->tx_buf;
+	rx_buf = xfr->rx_buf;
+	tx_len = rx_len = xfr->len;
+	if (tx_buf != NULL)
+		dma_unmap_single(dev, xfr->tx_dma,
+				tx_len, DMA_TO_DEVICE);
+
+	if (rx_buf != NULL)
+		dma_unmap_single(dev, xfr->rx_dma,
+				rx_len, DMA_FROM_DEVICE);
+}
+
+static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd)
+{
+	if (dd->tx_mode == SPI_BAM_MODE)
+		msm_spi_bam_unmap_buffers(dd);
+}
+
+/**
+ * msm_spi_use_dma - decides whether to use Data-Mover or BAM for
+ * the given transfer
+ * @dd:       device
+ * @tr:       transfer
+ *
+ * Start using DMA if:
+ * 1. Is supported by HW
+ * 2. Is not diabled by platfrom data
+ * 3. Transfer size is greater than 3*block size.
+ * 4. Buffers are aligned to cache line.
+ * 5. Bytes-per-word is 8,16 or 32.
+  */
+static inline bool
+msm_spi_use_dma(struct msm_spi *dd, struct spi_transfer *tr, u8 bpw)
+{
+	if (!dd->use_dma)
+		return false;
+
+	/* check constraints from platform data */
+	if ((dd->qup_ver == SPI_QUP_VERSION_BFAM) && !dd->pdata->use_bam)
+		return false;
+
+	if (dd->cur_msg_len < 3*dd->input_block_size)
+		return false;
+
+	if ((dd->qup_ver != SPI_QUP_VERSION_BFAM) &&
+		 !dd->read_len && !dd->write_len)
+		return false;
+
+	if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
+		u32 cache_line = dma_get_cache_alignment();
+
+		if (tr->tx_buf) {
+			if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
+				return 0;
+		}
+		if (tr->rx_buf) {
+			if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
+				return false;
+		}
+
+		if (tr->cs_change &&
+		   ((bpw != 8) && (bpw != 16) && (bpw != 32)))
+			return false;
+	}
+
+	return true;
+}
+
+/**
+ * msm_spi_set_transfer_mode: Chooses optimal transfer mode. Sets dd->mode and
+ * prepares to process a transfer.
+ */
+static void
+msm_spi_set_transfer_mode(struct msm_spi *dd, u8 bpw, u32 read_count)
+{
+	if (dd->pdata->is_slv_ctrl) {
+		dd->tx_mode = SPI_BAM_MODE;
+		dd->rx_mode = SPI_BAM_MODE;
+	} else if (msm_spi_use_dma(dd, dd->cur_transfer, bpw)) {
+		dd->tx_mode = SPI_BAM_MODE;
+		dd->rx_mode = SPI_BAM_MODE;
+	} else {
+		dd->rx_mode = SPI_FIFO_MODE;
+		dd->tx_mode = SPI_FIFO_MODE;
+		dd->read_len = dd->cur_transfer->len;
+		dd->write_len = dd->cur_transfer->len;
+	}
+}
+
+/**
+ * msm_spi_set_qup_io_modes: prepares register QUP_IO_MODES to process a
+ * transfer
+ */
+static void msm_spi_set_qup_io_modes(struct msm_spi *dd)
+{
+	u32 spi_iom;
+	spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
+	/* Set input and output transfer mode: FIFO, DMOV, or BAM */
+	spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
+	spi_iom = (spi_iom | (dd->tx_mode << OUTPUT_MODE_SHIFT));
+	spi_iom = (spi_iom | (dd->rx_mode << INPUT_MODE_SHIFT));
+
+	/* Always enable packing for the BAM mode and for non BAM mode only
+	 * if bpw is % 8 and transfer length is % 4 Bytes.
+	 */
+	if (dd->tx_mode == SPI_BAM_MODE ||
+		((dd->cur_msg_len % SPI_MAX_BYTES_PER_WORD == 0) &&
+		(dd->cur_transfer->bits_per_word) &&
+		(dd->cur_transfer->bits_per_word <= 32) &&
+		(dd->cur_transfer->bits_per_word % 8 == 0))) {
+		spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
+		dd->pack_words = true;
+	} else {
+		spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
+		spi_iom |= SPI_IO_M_OUTPUT_BIT_SHIFT_EN;
+		dd->pack_words = false;
+	}
+
+	/*if (dd->mode == SPI_BAM_MODE) {
+		spi_iom |= SPI_IO_C_NO_TRI_STATE;
+		spi_iom &= ~(SPI_IO_C_CS_SELECT | SPI_IO_C_CS_N_POLARITY);
+	}*/
+	writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
+}
+
+static u32 msm_spi_calc_spi_ioc_clk_polarity(u32 spi_ioc, u8 mode)
+{
+	if (mode & SPI_CPOL)
+		spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
+	else
+		spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
+	return spi_ioc;
+}
+
+/**
+ * msm_spi_set_spi_io_control: prepares register SPI_IO_CONTROL to process the
+ * next transfer
+ * @return the new set value of SPI_IO_CONTROL
+ */
+static u32 msm_spi_set_spi_io_control(struct msm_spi *dd)
+{
+	u32 spi_ioc, spi_ioc_orig, chip_select;
+
+	spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
+	spi_ioc_orig = spi_ioc;
+	spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc
+						, dd->spi->mode);
+	/* Set chip-select */
+	chip_select = dd->spi->chip_select << 2;
+	if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
+		spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
+	if (!dd->cur_transfer->cs_change)
+		spi_ioc |= SPI_IO_C_MX_CS_MODE;
+
+	if (spi_ioc != spi_ioc_orig)
+		writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
+
+	/*
+	 * Ensure that the IO control mode register gets written
+	 * before proceeding with the transfer.
+	 */
+	mb();
+	return spi_ioc;
+}
+
+/**
+ * msm_spi_set_qup_op_mask: prepares register QUP_OPERATIONAL_MASK to process
+ * the next transfer
+ */
+static void msm_spi_set_qup_op_mask(struct msm_spi *dd)
+{
+	/* mask INPUT and OUTPUT service flags in to prevent IRQs on FIFO status
+	 * change in BAM mode */
+	u32 mask = (dd->tx_mode == SPI_BAM_MODE) ?
+		QUP_OP_MASK_OUTPUT_SERVICE_FLAG | QUP_OP_MASK_INPUT_SERVICE_FLAG
+		: 0;
+	writel_relaxed(mask, dd->base + QUP_OPERATIONAL_MASK);
+}
+
+static void get_transfer_length(struct msm_spi *dd)
+{
+	struct spi_transfer *xfer = dd->cur_transfer;
+
+	dd->cur_msg_len = 0;
+	dd->read_len = dd->write_len = 0;
+	dd->bam.bam_tx_len = dd->bam.bam_rx_len = 0;
+
+	if (xfer->tx_buf)
+		dd->bam.bam_tx_len = dd->write_len = xfer->len;
+	if (xfer->rx_buf)
+		dd->bam.bam_rx_len = dd->read_len = xfer->len;
+	dd->cur_msg_len = xfer->len;
+}
+
+static int msm_spi_process_transfer(struct msm_spi *dd)
+{
+	u8  bpw;
+	u32 max_speed = 0;
+	u32 read_count;
+	u32 timeout;
+	u32 spi_ioc;
+	u32 int_loopback = 0;
+	int ret;
+	int status = 0;
+
+	get_transfer_length(dd);
+	dd->cur_tx_transfer = dd->cur_transfer;
+	dd->cur_rx_transfer = dd->cur_transfer;
+	dd->bam.curr_rx_bytes_recvd = dd->bam.curr_tx_bytes_sent = 0;
+	dd->write_xfr_cnt = dd->read_xfr_cnt = 0;
+	dd->tx_bytes_remaining = dd->cur_msg_len;
+	dd->rx_bytes_remaining = dd->cur_msg_len;
+	dd->read_buf           = dd->cur_transfer->rx_buf;
+	dd->write_buf          = dd->cur_transfer->tx_buf;
+	dd->tx_done = false;
+	dd->rx_done = false;
+	init_completion(&dd->tx_transfer_complete);
+	init_completion(&dd->rx_transfer_complete);
+	if (dd->cur_transfer->bits_per_word)
+		bpw = dd->cur_transfer->bits_per_word;
+	else
+		bpw = 8;
+	dd->bytes_per_word = (bpw + 7) / 8;
+
+	if (dd->cur_transfer->speed_hz)
+		max_speed = dd->cur_transfer->speed_hz;
+	else
+		max_speed = dd->spi->max_speed_hz;
+	if (!dd->clock_speed || max_speed != dd->clock_speed)
+		msm_spi_clock_set(dd, max_speed);
+
+	timeout = 100 * msecs_to_jiffies(
+			DIV_ROUND_UP(dd->cur_msg_len * 8,
+			DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
+
+	read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
+	if (dd->spi->mode & SPI_LOOP && !dd->pdata->is_slv_ctrl)
+		int_loopback = 1;
+
+	if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
+		dev_err(dd->dev,
+			"%s: Error setting QUP to reset-state",
+			__func__);
+
+	msm_spi_set_transfer_mode(dd, bpw, read_count);
+	msm_spi_set_mx_counts(dd, read_count);
+	if (dd->tx_mode == SPI_BAM_MODE) {
+		ret = msm_spi_dma_map_buffers(dd);
+		if (ret < 0) {
+			pr_err("Mapping DMA buffers\n");
+			dd->tx_mode = SPI_MODE_NONE;
+			dd->rx_mode = SPI_MODE_NONE;
+			return ret;
+		}
+	}
+	msm_spi_set_qup_io_modes(dd);
+	msm_spi_set_spi_config(dd, bpw);
+	msm_spi_set_qup_config(dd, bpw);
+	if (!dd->pdata->is_slv_ctrl)
+		spi_ioc = msm_spi_set_spi_io_control(dd);
+	msm_spi_set_qup_op_mask(dd);
+	if (dd->pdata->is_slv_ctrl)
+		msm_spi_slv_setup(dd);
+
+	/* The output fifo interrupt handler will handle all writes after
+	   the first. Restricting this to one write avoids contention
+	   issues and race conditions between this thread and the int handler
+	*/
+	if (dd->tx_mode != SPI_BAM_MODE) {
+		if (msm_spi_prepare_for_write(dd))
+			goto transfer_end;
+		msm_spi_start_write(dd, read_count);
+	} else {
+		if ((msm_spi_bam_begin_transfer(dd)) < 0) {
+			dev_err(dd->dev, "%s: BAM transfer setup failed\n",
+				__func__);
+			status = -EIO;
+			goto transfer_end;
+		}
+	}
+
+	/*
+	 * On BAM mode, current state here is run.
+	 * Only enter the RUN state after the first word is written into
+	 * the output FIFO. Otherwise, the output FIFO EMPTY interrupt
+	 * might fire before the first word is written resulting in a
+	 * possible race condition.
+	 */
+	if (dd->tx_mode != SPI_BAM_MODE)
+		if (msm_spi_set_state(dd, SPI_OP_STATE_RUN)) {
+			dev_warn(dd->dev,
+				"%s: Failed to set QUP to run-state. Mode:%d",
+				__func__, dd->tx_mode);
+			goto transfer_end;
+		}
+
+	/* Assume success, this might change later upon transaction result */
+	do {
+		if (dd->write_buf &&
+			!wait_for_completion_timeout(&dd->tx_transfer_complete,
+						 timeout)) {
+				dev_err(dd->dev,
+					"%s: SPI Tx transaction timeout\n",
+					__func__);
+				status = -EIO;
+				break;
+		}
+
+		if (dd->read_buf &&
+			!wait_for_completion_timeout(&dd->rx_transfer_complete,
+						 timeout)) {
+				dev_err(dd->dev,
+					"%s: SPI Rx transaction timeout\n",
+					__func__);
+				status = -EIO;
+				break;
+		}
+	} while (msm_spi_dma_send_next(dd));
+
+	if (status && dd->pdata->is_slv_ctrl) {
+		if (reset_core(dd))
+			dev_err(dd->dev, "Reset failed\n");
+	}
+	msm_spi_udelay(dd->xfrs_delay_usec);
+
+transfer_end:
+	if ((dd->tx_mode == SPI_BAM_MODE) && status)
+		msm_spi_bam_flush(dd);
+	msm_spi_dma_unmap_buffers(dd);
+	dd->tx_mode = SPI_MODE_NONE;
+	dd->rx_mode = SPI_MODE_NONE;
+
+	msm_spi_set_state(dd, SPI_OP_STATE_RESET);
+	if (!dd->cur_transfer->cs_change && !dd->pdata->is_slv_ctrl)
+		writel_relaxed(spi_ioc & ~SPI_IO_C_MX_CS_MODE,
+		       dd->base + SPI_IO_CONTROL);
+	return status;
+}
+
+static int msm_spi_slv_abort(struct spi_master *spi)
+{
+	struct msm_spi *dd = spi_master_get_devdata(spi);
+
+	complete_all(&dd->tx_transfer_complete);
+	complete_all(&dd->rx_transfer_complete);
+	return 0;
+}
+
+static inline void msm_spi_set_cs(struct spi_device *spi, bool set_flag)
+{
+	struct msm_spi *dd = spi_master_get_devdata(spi->master);
+	u32 spi_ioc;
+	u32 spi_ioc_orig;
+	int rc = 0;
+
+	rc = pm_runtime_get_sync(dd->dev);
+	if (rc < 0) {
+		dev_err(dd->dev, "Failure during runtime get,rc=%d", rc);
+		return;
+	}
+
+	if (dd->pdata->is_shared) {
+		rc = get_local_resources(dd);
+		if (rc)
+			return;
+	}
+
+	msm_spi_clk_path_vote(dd, spi->max_speed_hz);
+
+	if (!(spi->mode & SPI_CS_HIGH))
+		set_flag = !set_flag;
+
+	/* Serve only under mutex lock as RT suspend may cause a race */
+	mutex_lock(&dd->core_lock);
+	if (dd->suspended) {
+		dev_err(dd->dev, "%s: SPI operational state=%d Invalid\n",
+			__func__, dd->suspended);
+		mutex_unlock(&dd->core_lock);
+		return;
+	}
+
+	spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
+	spi_ioc_orig = spi_ioc;
+	if (set_flag)
+		spi_ioc |= SPI_IO_C_FORCE_CS;
+	else
+		spi_ioc &= ~SPI_IO_C_FORCE_CS;
+
+	if (spi_ioc != spi_ioc_orig)
+		writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
+	if (dd->pdata->is_shared)
+		put_local_resources(dd);
+	mutex_unlock(&dd->core_lock);
+
+	pm_runtime_mark_last_busy(dd->dev);
+	pm_runtime_put_autosuspend(dd->dev);
+}
+
+static int reset_core(struct msm_spi *dd)
+{
+	u32 spi_ioc;
+	if (msm_spi_register_init(dd))
+		return -EIO;
+	/*
+	 * The SPI core generates a bogus input overrun error on some targets,
+	 * when a transition from run to reset state occurs and if the FIFO has
+	 * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
+	 * bit.
+	 */
+	msm_spi_enable_error_flags(dd);
+	if (!dd->pdata->is_slv_ctrl) {
+		spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
+		spi_ioc |= SPI_IO_C_NO_TRI_STATE;
+		writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
+		/*
+		 * Ensure that the IO control is written to before returning.
+		 */
+		mb();
+	}
+	msm_spi_set_state(dd, SPI_OP_STATE_RESET);
+	return 0;
+}
+
+static void put_local_resources(struct msm_spi *dd)
+{
+
+	if (IS_ERR_OR_NULL(dd->clk) || IS_ERR_OR_NULL(dd->pclk)) {
+		dev_err(dd->dev,
+			"%s: error clk put\n",
+				__func__);
+		return;
+	}
+	msm_spi_disable_irqs(dd);
+	clk_disable_unprepare(dd->clk);
+	dd->clock_speed = 0;
+	clk_disable_unprepare(dd->pclk);
+
+	/* Free  the spi clk, miso, mosi, cs gpio */
+	if (dd->pdata && dd->pdata->gpio_release)
+		dd->pdata->gpio_release();
+
+	msm_spi_free_gpios(dd);
+}
+
+static int get_local_resources(struct msm_spi *dd)
+{
+	int ret = -EINVAL;
+
+	if (IS_ERR_OR_NULL(dd->clk) || IS_ERR_OR_NULL(dd->pclk)) {
+		dev_err(dd->dev,
+			"%s: error clk put\n",
+				__func__);
+		return ret;
+	}
+
+	/* Configure the spi clk, miso, mosi and cs gpio */
+	if (dd->pdata->gpio_config) {
+		ret = dd->pdata->gpio_config();
+		if (ret) {
+			dev_err(dd->dev,
+					"%s: error configuring GPIOs\n",
+					__func__);
+			return ret;
+		}
+	}
+
+	ret = msm_spi_request_gpios(dd);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(dd->clk);
+	if (ret)
+		goto clk0_err;
+	ret = clk_prepare_enable(dd->pclk);
+	if (ret)
+		goto clk1_err;
+	msm_spi_enable_irqs(dd);
+
+	return 0;
+
+clk1_err:
+	clk_disable_unprepare(dd->clk);
+clk0_err:
+	msm_spi_free_gpios(dd);
+	return ret;
+}
+
+/**
+ * msm_spi_transfer_one: To process one spi transfer at a time
+ * @master: spi master controller reference
+ * @msg: one multi-segment SPI transaction
+ * @return zero on success or negative error value
+ *
+ */
+static int msm_spi_transfer_one(struct spi_master *master,
+				struct spi_device *spi,
+				struct spi_transfer *xfer)
+{
+	struct msm_spi	*dd;
+	unsigned long        flags;
+	u32	status_error = 0;
+
+	dd = spi_master_get_devdata(master);
+
+	/* Check message parameters */
+	if (xfer->speed_hz > dd->pdata->max_clock_speed ||
+	    (xfer->bits_per_word &&
+	     (xfer->bits_per_word < 4 || xfer->bits_per_word > 32)) ||
+	    (xfer->tx_buf == NULL && xfer->rx_buf == NULL)) {
+		dev_err(dd->dev,
+			"Invalid transfer: %d Hz, %d bpw tx=%p, rx=%p\n",
+			xfer->speed_hz, xfer->bits_per_word,
+			xfer->tx_buf, xfer->rx_buf);
+		return -EINVAL;
+	}
+	dd->spi = spi;
+	dd->cur_transfer = xfer;
+
+	mutex_lock(&dd->core_lock);
+
+	spin_lock_irqsave(&dd->queue_lock, flags);
+	dd->transfer_pending = 1;
+	spin_unlock_irqrestore(&dd->queue_lock, flags);
+	/*
+	 * get local resources for each transfer to ensure we're in a good
+	 * state and not interfering with other EE's using this device
+	 */
+	if (dd->pdata->is_shared) {
+		if (get_local_resources(dd)) {
+			mutex_unlock(&dd->core_lock);
+			spi_finalize_current_message(master);
+			return -EINVAL;
+		}
+
+		if (reset_core(dd)) {
+			mutex_unlock(&dd->core_lock);
+			spi_finalize_current_message(master);
+			return -EIO;
+		}
+		if (dd->use_dma) {
+			msm_spi_bam_pipe_connect(dd, &dd->bam.prod,
+					&dd->bam.prod.config);
+			msm_spi_bam_pipe_connect(dd, &dd->bam.cons,
+					&dd->bam.cons.config);
+		}
+	}
+
+	if (dd->suspended || !msm_spi_is_valid_state(dd)) {
+		dev_err(dd->dev, "%s: SPI operational state not valid\n",
+			__func__);
+		status_error = 1;
+	}
+
+
+	if (!status_error)
+		status_error =
+			msm_spi_process_transfer(dd);
+
+	spin_lock_irqsave(&dd->queue_lock, flags);
+	dd->transfer_pending = 0;
+	spin_unlock_irqrestore(&dd->queue_lock, flags);
+
+	/*
+	 * Put local resources prior to calling finalize to ensure the hw
+	 * is in a known state before notifying the calling thread (which is a
+	 * different context since we're running in the spi kthread here) to
+	 * prevent race conditions between us and any other EE's using this hw.
+	 */
+	if (dd->pdata->is_shared) {
+		if (dd->use_dma) {
+			msm_spi_bam_pipe_disconnect(dd, &dd->bam.prod);
+			msm_spi_bam_pipe_disconnect(dd, &dd->bam.cons);
+		}
+		put_local_resources(dd);
+	}
+	mutex_unlock(&dd->core_lock);
+	if (dd->suspended)
+		wake_up_interruptible(&dd->continue_suspend);
+	return status_error;
+}
+
+static int msm_spi_prepare_transfer_hardware(struct spi_master *master)
+{
+	struct msm_spi	*dd = spi_master_get_devdata(master);
+	int resume_state = 0;
+
+	resume_state = pm_runtime_get_sync(dd->dev);
+	if (resume_state < 0)
+		goto spi_finalize;
+
+	/*
+	 * Counter-part of system-suspend when runtime-pm is not enabled.
+	 * This way, resume can be left empty and device will be put in
+	 * active mode only if client requests anything on the bus
+	 */
+	if (!pm_runtime_enabled(dd->dev))
+		resume_state = msm_spi_pm_resume_runtime(dd->dev);
+	if (resume_state < 0)
+		goto spi_finalize;
+	if (dd->suspended) {
+		resume_state = -EBUSY;
+		goto spi_finalize;
+	}
+	return 0;
+
+spi_finalize:
+	spi_finalize_current_message(master);
+	return resume_state;
+}
+
+static int msm_spi_unprepare_transfer_hardware(struct spi_master *master)
+{
+	struct msm_spi	*dd = spi_master_get_devdata(master);
+
+	pm_runtime_mark_last_busy(dd->dev);
+	pm_runtime_put_autosuspend(dd->dev);
+	return 0;
+}
+
+static void msm_spi_slv_setup(struct msm_spi *dd)
+{
+	u32 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
+	u32 qup_config = readl_relaxed(dd->base + QUP_CONFIG);
+	u32 irq_en = GENMASK(6, 0);
+
+	qup_config &= ~QUP_CFG_MODE;
+	qup_config |= SPI_EN_EXT_OUT_FLAG;
+	writel_relaxed(qup_config, dd->base + QUP_CONFIG);
+	writel_relaxed(spi_config, dd->base + SPI_CONFIG);
+	writel_relaxed(irq_en, (dd->base + SPI_SLAVE_IRQ_EN));
+	if (dd->read_buf && !dd->write_buf) {
+		u32 slv_cfg =
+			readl_relaxed(dd->base + SPI_SLAVE_CONFIG);
+		slv_cfg |= (RX_UNBALANCED_MASK | SPI_S_CGC_EN);
+		writel_relaxed(slv_cfg, (dd->base + SPI_SLAVE_CONFIG));
+	}
+	/*
+	 * Ensure the previous write completed before enabling slave mode.
+	 */
+	mb();
+
+	spi_config = readl_relaxed(dd->base + SPI_CONFIG);
+	qup_config = readl_relaxed(dd->base + QUP_CONFIG);
+
+	qup_config |= QUP_CONFIG_SPI_SLAVE;
+	spi_config |= SPI_CFG_SLAVE_OP;
+
+	writel_relaxed(qup_config, dd->base + QUP_CONFIG);
+	writel_relaxed(spi_config, dd->base + SPI_CONFIG);
+	/*
+	 * Ensure the previous write completed before enabling clk_on bit.
+	 */
+	mb();
+
+	qup_config = readl_relaxed(dd->base + QUP_CONFIG);
+	qup_config |= (APP_CLK_ON_EN | CORE_CLK_ON_EN |
+		FIFO_CLK_ON_EN | CORE_EX_CLK_ON_EN);
+	writel_relaxed(qup_config, dd->base + QUP_CONFIG);
+	/*
+	 * Ensure Slave setup completes before returning.
+	 */
+	mb();
+}
+
+static int msm_spi_setup(struct spi_device *spi)
+{
+	struct msm_spi	*dd;
+	int              rc = 0;
+	u32              spi_ioc;
+	u32              spi_config;
+	u32              mask;
+
+	if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
+		dev_err(&spi->dev, "%s: invalid bits_per_word %d\n",
+			__func__, spi->bits_per_word);
+		return -EINVAL;
+	}
+	if (spi->chip_select > SPI_NUM_CHIPSELECTS-1) {
+		dev_err(&spi->dev, "%s, chip select %d exceeds max value %d\n",
+			__func__, spi->chip_select, SPI_NUM_CHIPSELECTS - 1);
+		return -EINVAL;
+	}
+
+	dd = spi_master_get_devdata(spi->master);
+
+	rc = pm_runtime_get_sync(dd->dev);
+	if (rc < 0 && !dd->is_init_complete &&
+			pm_runtime_enabled(dd->dev)) {
+		pm_runtime_set_suspended(dd->dev);
+		pm_runtime_put_sync(dd->dev);
+		rc = 0;
+		goto err_setup_exit;
+	} else
+		rc = 0;
+
+	mutex_lock(&dd->core_lock);
+
+	/* Counter-part of system-suspend when runtime-pm is not enabled. */
+	if (!pm_runtime_enabled(dd->dev)) {
+		rc = msm_spi_pm_resume_runtime(dd->dev);
+		if (rc < 0 && !dd->is_init_complete) {
+			rc = 0;
+			mutex_unlock(&dd->core_lock);
+			goto err_setup_exit;
+		}
+	}
+
+	if (dd->suspended) {
+		rc = -EBUSY;
+		mutex_unlock(&dd->core_lock);
+		goto err_setup_exit;
+	}
+
+	if (dd->pdata->is_shared) {
+		rc = get_local_resources(dd);
+		if (rc)
+			goto no_resources;
+	}
+
+	spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
+	mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
+	if (spi->mode & SPI_CS_HIGH)
+		spi_ioc |= mask;
+	else
+		spi_ioc &= ~mask;
+	spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc, spi->mode);
+
+	writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
+
+	spi_config = readl_relaxed(dd->base + SPI_CONFIG);
+	spi_config = msm_spi_calc_spi_config_loopback_and_input_first(
+							spi_config, spi->mode);
+	writel_relaxed(spi_config, dd->base + SPI_CONFIG);
+
+	/* Ensure previous write completed before disabling the clocks */
+	mb();
+	if (dd->pdata->is_shared)
+		put_local_resources(dd);
+
+no_resources:
+	mutex_unlock(&dd->core_lock);
+	/* Counter-part of system-resume when runtime-pm is not enabled. */
+	if (!pm_runtime_enabled(dd->dev)) {
+		msm_spi_pm_suspend_runtime(dd->dev);
+	} else {
+		pm_runtime_mark_last_busy(dd->dev);
+		pm_runtime_put_autosuspend(dd->dev);
+	}
+
+err_setup_exit:
+	return rc;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+
+static int debugfs_iomem_x32_set(void *data, u64 val)
+{
+	struct msm_spi_debugfs_data *reg = (struct msm_spi_debugfs_data *)data;
+	struct msm_spi *dd = reg->dd;
+	int ret;
+
+	ret = pm_runtime_get_sync(dd->dev);
+	if (ret < 0)
+		return ret;
+
+	writel_relaxed(val, (dd->base + reg->offset));
+	/* Ensure the previous write completed. */
+	mb();
+
+	pm_runtime_mark_last_busy(dd->dev);
+	pm_runtime_put_autosuspend(dd->dev);
+	return 0;
+}
+
+static int debugfs_iomem_x32_get(void *data, u64 *val)
+{
+	struct msm_spi_debugfs_data *reg = (struct msm_spi_debugfs_data *)data;
+	struct msm_spi *dd = reg->dd;
+	int ret;
+
+	ret = pm_runtime_get_sync(dd->dev);
+	if (ret < 0)
+		return ret;
+	*val = readl_relaxed(dd->base + reg->offset);
+	/* Ensure the previous read completed. */
+	mb();
+
+	pm_runtime_mark_last_busy(dd->dev);
+	pm_runtime_put_autosuspend(dd->dev);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
+			debugfs_iomem_x32_set, "0x%08llx\n");
+
+static void spi_debugfs_init(struct msm_spi *dd)
+{
+	char dir_name[20];
+
+	scnprintf(dir_name, sizeof(dir_name), "%s_dbg", dev_name(dd->dev));
+	dd->dent_spi = debugfs_create_dir(dir_name, NULL);
+	if (dd->dent_spi) {
+		int i;
+
+		for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) {
+			dd->reg_data[i].offset = debugfs_spi_regs[i].offset;
+			dd->reg_data[i].dd = dd;
+			dd->debugfs_spi_regs[i] =
+			   debugfs_create_file(
+			       debugfs_spi_regs[i].name,
+			       debugfs_spi_regs[i].mode,
+			       dd->dent_spi, &dd->reg_data[i],
+			       &fops_iomem_x32);
+		}
+	}
+}
+
+static void spi_debugfs_exit(struct msm_spi *dd)
+{
+	if (dd->dent_spi) {
+		int i;
+
+		debugfs_remove_recursive(dd->dent_spi);
+		dd->dent_spi = NULL;
+		for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++)
+			dd->debugfs_spi_regs[i] = NULL;
+	}
+}
+#else
+static void spi_debugfs_init(struct msm_spi *dd) {}
+static void spi_debugfs_exit(struct msm_spi *dd) {}
+#endif
+
+/* ===Device attributes begin=== */
+static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	struct spi_master *master = dev_get_drvdata(dev);
+	struct msm_spi *dd =  spi_master_get_devdata(master);
+
+	return snprintf(buf, PAGE_SIZE,
+			"Device       %s\n"
+			"rx fifo_size = %d spi words\n"
+			"tx fifo_size = %d spi words\n"
+			"use_dma ?    %s\n"
+			"rx block size = %d bytes\n"
+			"tx block size = %d bytes\n"
+			"input burst size = %d bytes\n"
+			"output burst size = %d bytes\n"
+			"DMA configuration:\n"
+			"tx_ch=%d, rx_ch=%d, tx_crci= %d, rx_crci=%d\n"
+			"--statistics--\n"
+			"Rx isrs  = %d\n"
+			"Tx isrs  = %d\n"
+			"--debug--\n"
+			"NA yet\n",
+			dev_name(dev),
+			dd->input_fifo_size,
+			dd->output_fifo_size,
+			dd->use_dma ? "yes" : "no",
+			dd->input_block_size,
+			dd->output_block_size,
+			dd->input_burst_size,
+			dd->output_burst_size,
+			dd->tx_dma_chan,
+			dd->rx_dma_chan,
+			dd->tx_dma_crci,
+			dd->rx_dma_crci,
+			dd->stat_rx,
+			dd->stat_tx
+			);
+}
+
+/* Reset statistics on write */
+static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct msm_spi *dd = dev_get_drvdata(dev);
+	dd->stat_rx = 0;
+	dd->stat_tx = 0;
+	return count;
+}
+
+static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
+
+static ssize_t show_qup_state(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	ssize_t ret = 0;
+	struct platform_device *pdev = container_of(dev, struct
+						platform_device, dev);
+	struct spi_master *master = platform_get_drvdata(pdev);
+	struct msm_spi *dd;
+
+	dd = spi_master_get_devdata(master);
+	/* This check should not fail */
+	if (dd)
+		ret = snprintf(buf, sizeof(int), "%u\n",
+				atomic_read(&dd->qup_state));
+	return ret;
+}
+
+static ssize_t set_qup_state(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	return 1;
+}
+
+static DEVICE_ATTR(spi_qup_state, S_IWUSR | S_IRUGO,
+			show_qup_state, set_qup_state);
+
+static struct attribute *dev_attrs[] = {
+	&dev_attr_stats.attr,
+	NULL,
+};
+
+static struct attribute_group dev_attr_grp = {
+	.attrs = dev_attrs,
+};
+/* ===Device attributes end=== */
+
+static void msm_spi_bam_pipe_teardown(struct msm_spi *dd,
+					enum msm_spi_pipe_direction pipe_dir)
+{
+	struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
+					(&dd->bam.prod) : (&dd->bam.cons);
+	if (!pipe->teardown_required)
+		return;
+
+	msm_spi_bam_pipe_disconnect(dd, pipe);
+	dma_free_coherent(dd->dev, pipe->config.desc.size,
+		pipe->config.desc.base, pipe->config.desc.phys_base);
+	sps_free_endpoint(pipe->handle);
+	pipe->handle = 0;
+	pipe->teardown_required = false;
+}
+
+static int msm_spi_bam_pipe_init(struct msm_spi *dd,
+					enum msm_spi_pipe_direction pipe_dir)
+{
+	int rc = 0;
+	struct sps_pipe *pipe_handle;
+	struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
+					(&dd->bam.prod) : (&dd->bam.cons);
+	struct sps_connect *pipe_conf = &pipe->config;
+
+	pipe->name   = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ? "cons" : "prod";
+	pipe->handle = 0;
+	pipe_handle  = sps_alloc_endpoint();
+	if (!pipe_handle) {
+		dev_err(dd->dev, "%s: Failed to allocate BAM endpoint\n"
+								, __func__);
+		return -ENOMEM;
+	}
+
+	memset(pipe_conf, 0, sizeof(*pipe_conf));
+	rc = sps_get_config(pipe_handle, pipe_conf);
+	if (rc) {
+		dev_err(dd->dev, "%s: Failed to get BAM pipe config\n"
+			, __func__);
+		goto config_err;
+	}
+
+	if (pipe_dir == SPI_BAM_CONSUMER_PIPE) {
+		pipe_conf->source          = dd->bam.handle;
+		pipe_conf->destination     = SPS_DEV_HANDLE_MEM;
+		pipe_conf->mode            = SPS_MODE_SRC;
+		pipe_conf->src_pipe_index  =
+					dd->pdata->bam_producer_pipe_index;
+		pipe_conf->dest_pipe_index = 0;
+	} else {
+		pipe_conf->source          = SPS_DEV_HANDLE_MEM;
+		pipe_conf->destination     = dd->bam.handle;
+		pipe_conf->mode            = SPS_MODE_DEST;
+		pipe_conf->src_pipe_index  = 0;
+		pipe_conf->dest_pipe_index =
+					dd->pdata->bam_consumer_pipe_index;
+	}
+	pipe_conf->options = SPS_O_EOT | SPS_O_AUTO_ENABLE;
+	pipe_conf->desc.size = SPI_BAM_MAX_DESC_NUM * sizeof(struct sps_iovec);
+	pipe_conf->desc.base = dma_alloc_coherent(dd->dev,
+				pipe_conf->desc.size,
+				&pipe_conf->desc.phys_base,
+				GFP_KERNEL);
+	if (!pipe_conf->desc.base) {
+		dev_err(dd->dev, "%s: Failed allocate BAM pipe memory"
+			, __func__);
+		rc = -ENOMEM;
+		goto config_err;
+	}
+	/* zero descriptor FIFO for convenient debugging of first descs */
+	memset(pipe_conf->desc.base, 0x00, pipe_conf->desc.size);
+
+	pipe->handle = pipe_handle;
+
+	return 0;
+
+config_err:
+	sps_free_endpoint(pipe_handle);
+
+	return rc;
+}
+
+static void msm_spi_bam_teardown(struct msm_spi *dd)
+{
+	msm_spi_bam_pipe_teardown(dd, SPI_BAM_PRODUCER_PIPE);
+	msm_spi_bam_pipe_teardown(dd, SPI_BAM_CONSUMER_PIPE);
+
+	if (dd->bam.deregister_required) {
+		sps_deregister_bam_device(dd->bam.handle);
+		dd->bam.deregister_required = false;
+	}
+}
+
+static int msm_spi_bam_init(struct msm_spi *dd)
+{
+	struct sps_bam_props bam_props = {0};
+	uintptr_t bam_handle;
+	int rc = 0;
+
+	rc = sps_phy2h(dd->bam.phys_addr, &bam_handle);
+	if (rc || !bam_handle) {
+		bam_props.phys_addr = dd->bam.phys_addr;
+		bam_props.virt_addr = dd->bam.base;
+		bam_props.irq       = dd->bam.irq;
+		bam_props.manage    = SPS_BAM_MGR_DEVICE_REMOTE;
+		bam_props.summing_threshold = 0x10;
+
+		rc = sps_register_bam_device(&bam_props, &bam_handle);
+		if (rc) {
+			dev_err(dd->dev,
+				"%s: Failed to register BAM device",
+				__func__);
+			return rc;
+		}
+		dd->bam.deregister_required = true;
+	}
+
+	dd->bam.handle = bam_handle;
+
+	rc = msm_spi_bam_pipe_init(dd, SPI_BAM_PRODUCER_PIPE);
+	if (rc) {
+		dev_err(dd->dev,
+			"%s: Failed to init producer BAM-pipe",
+			__func__);
+		goto bam_init_error;
+	}
+
+	rc = msm_spi_bam_pipe_init(dd, SPI_BAM_CONSUMER_PIPE);
+	if (rc) {
+		dev_err(dd->dev,
+			"%s: Failed to init consumer BAM-pipe",
+			__func__);
+		goto bam_init_error;
+	}
+
+	return 0;
+
+bam_init_error:
+	msm_spi_bam_teardown(dd);
+	return rc;
+}
+
+enum msm_spi_dt_entry_status {
+	DT_REQ,  /* Required:  fail if missing */
+	DT_SGST, /* Suggested: warn if missing */
+	DT_OPT,  /* Optional:  don't warn if missing */
+};
+
+enum msm_spi_dt_entry_type {
+	DT_U32,
+	DT_GPIO,
+	DT_BOOL,
+};
+
+struct msm_spi_dt_to_pdata_map {
+	const char                  *dt_name;
+	void                        *ptr_data;
+	enum msm_spi_dt_entry_status status;
+	enum msm_spi_dt_entry_type   type;
+	int                          default_val;
+};
+
+static int msm_spi_dt_to_pdata_populate(struct platform_device *pdev,
+					struct msm_spi_platform_data *pdata,
+					struct msm_spi_dt_to_pdata_map  *itr)
+{
+	int  ret, err = 0;
+	struct device_node *node = pdev->dev.of_node;
+
+	for (; itr->dt_name; ++itr) {
+		switch (itr->type) {
+		case DT_GPIO:
+			ret = of_get_named_gpio(node, itr->dt_name, 0);
+			if (ret >= 0) {
+				*((int *) itr->ptr_data) = ret;
+				ret = 0;
+			}
+			break;
+		case DT_U32:
+			ret = of_property_read_u32(node, itr->dt_name,
+							 (u32 *) itr->ptr_data);
+			break;
+		case DT_BOOL:
+			*((bool *) itr->ptr_data) =
+				of_property_read_bool(node, itr->dt_name);
+			ret = 0;
+			break;
+		default:
+			dev_err(&pdev->dev, "%d is an unknown DT entry type\n",
+								itr->type);
+			ret = -EBADE;
+		}
+
+		dev_dbg(&pdev->dev, "DT entry ret:%d name:%s val:%d\n",
+				ret, itr->dt_name, *((int *)itr->ptr_data));
+
+		if (ret) {
+			*((int *)itr->ptr_data) = itr->default_val;
+
+			if (itr->status < DT_OPT) {
+				dev_err(&pdev->dev, "Missing '%s' DT entry\n",
+								itr->dt_name);
+
+				/* cont on err to dump all missing entries */
+				if (itr->status == DT_REQ && !err)
+					err = ret;
+			}
+		}
+	}
+
+	return err;
+}
+
+/**
+ * msm_spi_dt_to_pdata: create pdata and read gpio config from device tree
+ */
+struct msm_spi_platform_data *msm_spi_dt_to_pdata(
+			struct platform_device *pdev, struct msm_spi *dd)
+{
+	struct msm_spi_platform_data *pdata;
+
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata) {
+		pr_err("Unable to allocate platform data\n");
+		return NULL;
+	} else {
+		struct msm_spi_dt_to_pdata_map map[] = {
+		{"spi-max-frequency",
+			&pdata->max_clock_speed,         DT_SGST, DT_U32,   0},
+		{"qcom,infinite-mode",
+			&pdata->infinite_mode,           DT_OPT,  DT_U32,   0},
+		{"qcom,master-id",
+			&pdata->master_id,               DT_SGST, DT_U32,   0},
+		{"qcom,bus-width",
+			&pdata->bus_width,               DT_OPT, DT_U32,   8},
+		{"qcom,ver-reg-exists",
+			&pdata->ver_reg_exists,          DT_OPT,  DT_BOOL,  0},
+		{"qcom,use-bam",
+			&pdata->use_bam,                 DT_OPT,  DT_BOOL,  0},
+		{"qcom,use-pinctrl",
+			&pdata->use_pinctrl,             DT_OPT,  DT_BOOL,  0},
+		{"qcom,bam-consumer-pipe-index",
+			&pdata->bam_consumer_pipe_index, DT_OPT,  DT_U32,   0},
+		{"qcom,bam-producer-pipe-index",
+			&pdata->bam_producer_pipe_index, DT_OPT,  DT_U32,   0},
+		{"qcom,gpio-clk",
+			&dd->spi_gpios[0],               DT_OPT,  DT_GPIO, -1},
+		{"qcom,gpio-miso",
+			&dd->spi_gpios[1],               DT_OPT,  DT_GPIO, -1},
+		{"qcom,gpio-mosi",
+			&dd->spi_gpios[2],               DT_OPT,  DT_GPIO, -1},
+		{"qcom,gpio-cs0",
+			&dd->cs_gpios[0].gpio_num,       DT_OPT,  DT_GPIO, -1},
+		{"qcom,gpio-cs1",
+			&dd->cs_gpios[1].gpio_num,       DT_OPT,  DT_GPIO, -1},
+		{"qcom,gpio-cs2",
+			&dd->cs_gpios[2].gpio_num,       DT_OPT,  DT_GPIO, -1},
+		{"qcom,gpio-cs3",
+			&dd->cs_gpios[3].gpio_num,       DT_OPT,  DT_GPIO, -1},
+		{"qcom,rt-priority",
+			&pdata->rt_priority,		 DT_OPT,  DT_BOOL,  0},
+		{"qcom,shared",
+			&pdata->is_shared,		 DT_OPT,  DT_BOOL,  0},
+		{"qcom,slv-ctrl",
+			&pdata->is_slv_ctrl,		DT_OPT,  DT_BOOL,  0},
+		{NULL,  NULL,                            0,       0,        0},
+		};
+
+		if (msm_spi_dt_to_pdata_populate(pdev, pdata, map)) {
+			devm_kfree(&pdev->dev, pdata);
+			return NULL;
+		}
+	}
+
+	if (pdata->use_bam) {
+		if (!pdata->bam_consumer_pipe_index) {
+			dev_warn(&pdev->dev,
+			"missing qcom,bam-consumer-pipe-index entry in device-tree\n");
+			pdata->use_bam = false;
+		}
+
+		if (!pdata->bam_producer_pipe_index) {
+			dev_warn(&pdev->dev,
+			"missing qcom,bam-producer-pipe-index entry in device-tree\n");
+			pdata->use_bam = false;
+		}
+	}
+	return pdata;
+}
+
+static int msm_spi_get_qup_hw_ver(struct device *dev, struct msm_spi *dd)
+{
+	u32 data = readl_relaxed(dd->base + QUP_HARDWARE_VER);
+	return (data >= QUP_HARDWARE_VER_2_1_1) ? SPI_QUP_VERSION_BFAM
+						: SPI_QUP_VERSION_NONE;
+}
+
+static int msm_spi_bam_get_resources(struct msm_spi *dd,
+	struct platform_device *pdev, struct spi_master *master)
+{
+	struct resource *resource;
+	size_t bam_mem_size;
+
+	resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"spi_bam_physical");
+	if (!resource) {
+		dev_warn(&pdev->dev,
+			"%s: Missing spi_bam_physical entry in DT",
+			__func__);
+		return -ENXIO;
+	}
+
+	dd->bam.phys_addr = resource->start;
+	bam_mem_size = resource_size(resource);
+	dd->bam.base = devm_ioremap(&pdev->dev, dd->bam.phys_addr,
+					bam_mem_size);
+	if (!dd->bam.base) {
+		dev_warn(&pdev->dev,
+			"%s: Failed to ioremap(spi_bam_physical)",
+			__func__);
+		return -ENXIO;
+	}
+
+	dd->bam.irq = platform_get_irq_byname(pdev, "spi_bam_irq");
+	if (dd->bam.irq < 0) {
+		dev_warn(&pdev->dev, "%s: Missing spi_bam_irq entry in DT",
+			__func__);
+		return -EINVAL;
+	}
+
+	dd->dma_init = msm_spi_bam_init;
+	dd->dma_teardown = msm_spi_bam_teardown;
+	return 0;
+}
+
+static int init_resources(struct platform_device *pdev)
+{
+	struct spi_master *master = platform_get_drvdata(pdev);
+	struct msm_spi	  *dd;
+	int               rc = -ENXIO;
+	int               clk_enabled = 0;
+	int               pclk_enabled = 0;
+
+	dd = spi_master_get_devdata(master);
+
+	if (dd->pdata && dd->pdata->use_pinctrl) {
+		rc = msm_spi_pinctrl_init(dd);
+		if (rc) {
+			dev_err(&pdev->dev, "%s: pinctrl init failed\n",
+					 __func__);
+			return rc;
+		}
+	}
+
+	mutex_lock(&dd->core_lock);
+
+	dd->clk = clk_get(&pdev->dev, "core_clk");
+	if (IS_ERR(dd->clk)) {
+		dev_err(&pdev->dev, "%s: unable to get core_clk\n", __func__);
+		rc = PTR_ERR(dd->clk);
+		goto err_clk_get;
+	}
+
+	dd->pclk = clk_get(&pdev->dev, "iface_clk");
+	if (IS_ERR(dd->pclk)) {
+		dev_err(&pdev->dev, "%s: unable to get iface_clk\n", __func__);
+		rc = PTR_ERR(dd->pclk);
+		goto err_pclk_get;
+	}
+
+	if (dd->pdata && dd->pdata->max_clock_speed)
+		msm_spi_clock_set(dd, dd->pdata->max_clock_speed);
+
+	rc = clk_prepare_enable(dd->clk);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: unable to enable core_clk\n",
+			__func__);
+		goto err_clk_enable;
+	}
+
+	clk_enabled = 1;
+	rc = clk_prepare_enable(dd->pclk);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: unable to enable iface_clk\n",
+		__func__);
+		goto err_pclk_enable;
+	}
+
+	pclk_enabled = 1;
+
+	if (dd->pdata && dd->pdata->ver_reg_exists) {
+		enum msm_spi_qup_version ver =
+					msm_spi_get_qup_hw_ver(&pdev->dev, dd);
+		if (dd->qup_ver != ver)
+			dev_warn(&pdev->dev,
+			"%s: HW version different then initially assumed by probe",
+			__func__);
+	}
+
+	/* GSBI dose not exists on B-family MSM-chips */
+	if (dd->qup_ver != SPI_QUP_VERSION_BFAM) {
+		rc = msm_spi_configure_gsbi(dd, pdev);
+		if (rc)
+			goto err_config_gsbi;
+	}
+
+	msm_spi_calculate_fifo_size(dd);
+	if (dd->use_dma) {
+		rc = dd->dma_init(dd);
+		if (rc) {
+			dev_err(&pdev->dev,
+				"%s: failed to init DMA. Disabling DMA mode\n",
+				__func__);
+			dd->use_dma = 0;
+		}
+	}
+
+	if (msm_spi_register_init(dd))
+		goto err_spi_state;
+	/*
+	 * The SPI core generates a bogus input overrun error on some targets,
+	 * when a transition from run to reset state occurs and if the FIFO has
+	 * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
+	 * bit.
+	 */
+	msm_spi_enable_error_flags(dd);
+
+	if (dd->pdata && !dd->pdata->is_slv_ctrl)
+		writel_relaxed(SPI_IO_C_NO_TRI_STATE,
+				dd->base + SPI_IO_CONTROL);
+	rc = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
+	if (rc)
+		goto err_spi_state;
+
+	clk_disable_unprepare(dd->clk);
+	clk_disable_unprepare(dd->pclk);
+	clk_enabled = 0;
+	pclk_enabled = 0;
+
+	dd->transfer_pending = 0;
+	dd->tx_mode = SPI_MODE_NONE;
+	dd->rx_mode = SPI_MODE_NONE;
+
+	rc = msm_spi_request_irq(dd, pdev, master);
+	if (rc)
+		goto err_irq;
+
+	msm_spi_disable_irqs(dd);
+
+	mutex_unlock(&dd->core_lock);
+	return 0;
+
+err_irq:
+err_spi_state:
+	if (dd->use_dma && dd->dma_teardown)
+		dd->dma_teardown(dd);
+err_config_gsbi:
+	if (pclk_enabled)
+		clk_disable_unprepare(dd->pclk);
+err_pclk_enable:
+	if (clk_enabled)
+		clk_disable_unprepare(dd->clk);
+err_clk_enable:
+	clk_put(dd->pclk);
+err_pclk_get:
+	clk_put(dd->clk);
+err_clk_get:
+	mutex_unlock(&dd->core_lock);
+	return rc;
+}
+
+static const struct of_device_id msm_spi_dt_match[] = {
+	{ .compatible = "qcom,spi-qup-v2", },
+	{ .compatible = "qcom,qup-v26", },
+	{}
+};
+
+static int msm_spi_probe(struct platform_device *pdev)
+{
+	struct spi_master      *master;
+	struct msm_spi	       *dd;
+	struct resource	       *resource;
+	int			i = 0;
+	int                     rc = -ENXIO;
+	struct msm_spi_platform_data *pdata;
+
+	master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi));
+	if (!master) {
+		rc = -ENOMEM;
+		dev_err(&pdev->dev, "master allocation failed\n");
+		goto err_probe_exit;
+	}
+
+	master->bus_num        = pdev->id;
+	master->mode_bits      = SPI_SUPPORTED_MODES;
+	master->num_chipselect = SPI_NUM_CHIPSELECTS;
+	master->set_cs	       = msm_spi_set_cs;
+	master->setup          = msm_spi_setup;
+	master->prepare_transfer_hardware = msm_spi_prepare_transfer_hardware;
+	master->transfer_one = msm_spi_transfer_one;
+	master->unprepare_transfer_hardware
+			= msm_spi_unprepare_transfer_hardware;
+
+	platform_set_drvdata(pdev, master);
+	dd = spi_master_get_devdata(master);
+
+	if (pdev->dev.of_node) {
+		const struct of_device_id *dev_id;
+		enum msm_spi_qup_version ver;
+
+		dd->qup_ver = SPI_QUP_VERSION_BFAM;
+		master->dev.of_node = pdev->dev.of_node;
+		pdata = msm_spi_dt_to_pdata(pdev, dd);
+		if (!pdata) {
+			rc = -ENOMEM;
+			goto err_probe_exit;
+		}
+
+		rc = of_alias_get_id(pdev->dev.of_node, "spi");
+		if (rc < 0)
+			dev_warn(&pdev->dev,
+				"using default bus_num %d\n", pdev->id);
+		else
+			master->bus_num = pdev->id = rc;
+
+		dev_id = of_match_device(msm_spi_dt_match, &pdev->dev);
+		if (dev_id)
+			ver = SPI_QUP_VERSION_SPI_SLV;
+		else
+			ver = SPI_QUP_VERSION_BFAM;
+
+		if (ver >= SPI_QUP_VERSION_SPI_SLV)
+			dd->slv_support = true;
+		else
+			dd->slv_support = false;
+	} else {
+		pdata = pdev->dev.platform_data;
+		dd->qup_ver = SPI_QUP_VERSION_NONE;
+
+		for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
+			resource = platform_get_resource(pdev, IORESOURCE_IO,
+							i);
+			dd->spi_gpios[i] = resource ? resource->start : -1;
+		}
+
+		for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
+			resource = platform_get_resource(pdev, IORESOURCE_IO,
+						i + ARRAY_SIZE(spi_rsrcs));
+			dd->cs_gpios[i].gpio_num = resource ?
+							resource->start : -1;
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i)
+		dd->cs_gpios[i].valid = 0;
+
+	dd->pdata = pdata;
+	resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!resource) {
+		rc = -ENXIO;
+		goto err_probe_res;
+	}
+
+	dd->mem_phys_addr = resource->start;
+	dd->mem_size = resource_size(resource);
+	dd->dev = &pdev->dev;
+
+	atomic_set(&dd->qup_state, SPI_OP_STATE_RESET);
+	if (pdata) {
+		master->rt = pdata->rt_priority;
+		if (pdata->dma_config) {
+			rc = pdata->dma_config();
+			if (rc) {
+				dev_warn(&pdev->dev,
+					"%s: DM mode not supported\n",
+					__func__);
+				dd->use_dma = 0;
+				goto skip_dma_resources;
+			}
+		}
+		if (!dd->pdata->use_bam)
+			goto skip_dma_resources;
+
+		rc = msm_spi_bam_get_resources(dd, pdev, master);
+		if (rc) {
+			dev_warn(dd->dev,
+					"%s: Faild to get BAM resources",
+					__func__);
+			goto skip_dma_resources;
+		}
+		dd->use_dma = 1;
+	}
+
+	spi_dma_mask(&pdev->dev);
+
+	if (pdata && pdata->is_slv_ctrl) {
+		if (!dd->slv_support) {
+			rc = -ENXIO;
+			dev_err(&pdev->dev, "QUP ver %d, no slv support\n",
+								dd->qup_ver);
+			goto err_probe_res;
+		}
+
+		master->slave		= true;
+		master->set_cs		= NULL;
+		master->setup		= NULL;
+		master->slave_abort	= msm_spi_slv_abort;
+	}
+
+skip_dma_resources:
+
+	spin_lock_init(&dd->queue_lock);
+	mutex_init(&dd->core_lock);
+	init_waitqueue_head(&dd->continue_suspend);
+
+	if (!devm_request_mem_region(&pdev->dev, dd->mem_phys_addr,
+					dd->mem_size, SPI_DRV_NAME)) {
+		rc = -ENXIO;
+		goto err_probe_reqmem;
+	}
+
+	dd->base = devm_ioremap(&pdev->dev, dd->mem_phys_addr, dd->mem_size);
+	if (!dd->base) {
+		rc = -ENOMEM;
+		goto err_probe_reqmem;
+	}
+
+	pm_runtime_set_autosuspend_delay(&pdev->dev, MSEC_PER_SEC);
+	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
+	dd->suspended = 1;
+	rc = spi_register_master(master);
+	if (rc)
+		goto err_probe_reg_master;
+
+	rc = sysfs_create_group(&(dd->dev->kobj), &dev_attr_grp);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
+		goto err_attrs;
+	}
+	rc = sysfs_create_file(&(dd->dev->kobj), &dev_attr_spi_qup_state.attr);
+	spi_debugfs_init(dd);
+
+	return 0;
+
+err_attrs:
+	spi_unregister_master(master);
+err_probe_reg_master:
+	pm_runtime_disable(&pdev->dev);
+err_probe_reqmem:
+err_probe_res:
+	spi_master_put(master);
+err_probe_exit:
+	return rc;
+}
+
+static int msm_spi_pm_suspend_runtime(struct device *device)
+{
+	struct platform_device *pdev = to_platform_device(device);
+	struct spi_master *master = platform_get_drvdata(pdev);
+	struct msm_spi	  *dd;
+	unsigned long	   flags;
+
+	dev_dbg(device, "pm_runtime: suspending...\n");
+	if (!master)
+		goto suspend_exit;
+	dd = spi_master_get_devdata(master);
+	if (!dd)
+		goto suspend_exit;
+
+	if (dd->suspended)
+		return 0;
+
+	/*
+	 * Make sure nothing is added to the queue while we're
+	 * suspending
+	 */
+	spin_lock_irqsave(&dd->queue_lock, flags);
+	dd->suspended = 1;
+	spin_unlock_irqrestore(&dd->queue_lock, flags);
+
+	/* Wait for transactions to end, or time out */
+	wait_event_interruptible(dd->continue_suspend,
+		!dd->transfer_pending);
+
+	mutex_lock(&dd->core_lock);
+	if (dd->pdata && !dd->pdata->is_shared && dd->use_dma) {
+		msm_spi_bam_pipe_disconnect(dd, &dd->bam.prod);
+		msm_spi_bam_pipe_disconnect(dd, &dd->bam.cons);
+	}
+	if (dd->pdata && !dd->pdata->is_shared)
+		put_local_resources(dd);
+
+	if (dd->pdata)
+		msm_spi_clk_path_vote(dd, 0);
+	mutex_unlock(&dd->core_lock);
+
+suspend_exit:
+	return 0;
+}
+
+static int msm_spi_pm_resume_runtime(struct device *device)
+{
+	struct platform_device *pdev = to_platform_device(device);
+	struct spi_master *master = platform_get_drvdata(pdev);
+	struct msm_spi	  *dd;
+	int               ret = 0;
+
+	dev_dbg(device, "pm_runtime: resuming...\n");
+	if (!master)
+		goto resume_exit;
+	dd = spi_master_get_devdata(master);
+	if (!dd)
+		goto resume_exit;
+
+	if (!dd->suspended)
+		return 0;
+	if (!dd->is_init_complete) {
+		ret = init_resources(pdev);
+		if (ret != 0)
+			return ret;
+		else
+			dd->is_init_complete = true;
+	}
+	msm_spi_clk_path_init(dd);
+	msm_spi_clk_path_vote(dd, dd->pdata->max_clock_speed);
+
+	if (!dd->pdata->is_shared) {
+		ret = get_local_resources(dd);
+		if (ret)
+			return ret;
+	}
+	if (!dd->pdata->is_shared && dd->use_dma) {
+		msm_spi_bam_pipe_connect(dd, &dd->bam.prod,
+				&dd->bam.prod.config);
+		msm_spi_bam_pipe_connect(dd, &dd->bam.cons,
+				&dd->bam.cons.config);
+	}
+	dd->suspended = 0;
+
+resume_exit:
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int msm_spi_suspend(struct device *device)
+{
+	if (!pm_runtime_enabled(device) || !pm_runtime_suspended(device)) {
+		struct platform_device *pdev = to_platform_device(device);
+		struct spi_master *master = platform_get_drvdata(pdev);
+		struct msm_spi   *dd;
+
+		dev_dbg(device, "system suspend");
+		if (!master)
+			goto suspend_exit;
+		dd = spi_master_get_devdata(master);
+		if (!dd)
+			goto suspend_exit;
+		msm_spi_pm_suspend_runtime(device);
+
+		/*
+		 * set the device's runtime PM status to 'suspended'
+		 */
+		pm_runtime_disable(device);
+		pm_runtime_set_suspended(device);
+		pm_runtime_enable(device);
+	}
+suspend_exit:
+	return 0;
+}
+
+static int msm_spi_resume(struct device *device)
+{
+	/*
+	 * Rely on runtime-PM to call resume in case it is enabled
+	 * Even if it's not enabled, rely on 1st client transaction to do
+	 * clock ON and gpio configuration
+	 */
+	dev_dbg(device, "system resume");
+	return 0;
+}
+#else
+#define msm_spi_suspend NULL
+#define msm_spi_resume NULL
+#endif
+
+
+static int msm_spi_remove(struct platform_device *pdev)
+{
+	struct spi_master *master = platform_get_drvdata(pdev);
+	struct msm_spi    *dd = spi_master_get_devdata(master);
+
+	spi_debugfs_exit(dd);
+	sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
+	sysfs_remove_file(&pdev->dev.kobj, &dev_attr_spi_qup_state.attr);
+
+	if (dd->dma_teardown)
+		dd->dma_teardown(dd);
+	pm_runtime_disable(&pdev->dev);
+	pm_runtime_set_suspended(&pdev->dev);
+	clk_put(dd->clk);
+	clk_put(dd->pclk);
+	msm_spi_clk_path_teardown(dd);
+	platform_set_drvdata(pdev, 0);
+	spi_unregister_master(master);
+	spi_master_put(master);
+
+	return 0;
+}
+
+static const struct dev_pm_ops msm_spi_dev_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(msm_spi_suspend, msm_spi_resume)
+	SET_RUNTIME_PM_OPS(msm_spi_pm_suspend_runtime,
+			msm_spi_pm_resume_runtime, NULL)
+};
+
+static struct platform_driver msm_spi_driver = {
+	.driver		= {
+		.name	= SPI_DRV_NAME,
+		.owner	= THIS_MODULE,
+		.pm		= &msm_spi_dev_pm_ops,
+		.of_match_table = msm_spi_dt_match,
+	},
+	.probe		= msm_spi_probe,
+	.remove		= msm_spi_remove,
+	.probe		= msm_spi_probe,
+};
+
+module_platform_driver(msm_spi_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.4");
+MODULE_ALIAS("platform:"SPI_DRV_NAME);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/spi/spi_qsd.h	2019-01-22 16:16:26.707275421 +0100
@@ -0,0 +1,587 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SPI_QSD_H
+#define _SPI_QSD_H
+
+#include <linux/pinctrl/consumer.h>
+#define SPI_DRV_NAME                  "spi_qsd"
+
+#if defined(CONFIG_SPI_QSD) || defined(CONFIG_SPI_QSD_MODULE)
+
+#define QSD_REG(x) (x)
+#define QUP_REG(x)
+
+#define SPI_FIFO_WORD_CNT             0x0048
+
+#else
+
+#define QSD_REG(x)
+#define QUP_REG(x) (x)
+
+#define QUP_CONFIG                    0x0000 /* N & NO_INPUT/NO_OUPUT bits */
+#define QUP_ERROR_FLAGS_EN            0x030C
+#define QUP_ERR_MASK                  0x3
+#define SPI_OUTPUT_FIFO_WORD_CNT      0x010C
+#define SPI_INPUT_FIFO_WORD_CNT       0x0214
+#define QUP_MX_WRITE_COUNT            0x0150
+#define QUP_MX_WRITE_CNT_CURRENT      0x0154
+
+#define QUP_CONFIG_SPI_MODE           0x0100
+#define QUP_CONFIG_SPI_SLAVE          0x0400
+#endif
+
+#define GSBI_CTRL_REG                 0x0
+#define GSBI_SPI_CONFIG               0x30
+/* B-family only registers */
+#define QUP_HARDWARE_VER              0x0030
+#define QUP_HARDWARE_VER_2_1_1        0X20010001
+#define QUP_OPERATIONAL_MASK          0x0028
+#define QUP_OP_MASK_OUTPUT_SERVICE_FLAG 0x100
+#define QUP_OP_MASK_INPUT_SERVICE_FLAG  0x200
+
+#define QUP_ERROR_FLAGS               0x0308
+
+#define SPI_CONFIG                    QSD_REG(0x0000) QUP_REG(0x0300)
+#define SPI_IO_CONTROL                QSD_REG(0x0004) QUP_REG(0x0304)
+#define SPI_IO_MODES                  QSD_REG(0x0008) QUP_REG(0x0008)
+#define SPI_SW_RESET                  QSD_REG(0x000C) QUP_REG(0x000C)
+#define SPI_TIME_OUT_CURRENT          QSD_REG(0x0014) QUP_REG(0x0014)
+#define SPI_MX_OUTPUT_COUNT           QSD_REG(0x0018) QUP_REG(0x0100)
+#define SPI_MX_OUTPUT_CNT_CURRENT     QSD_REG(0x001C) QUP_REG(0x0104)
+#define SPI_MX_INPUT_COUNT            QSD_REG(0x0020) QUP_REG(0x0200)
+#define SPI_MX_INPUT_CNT_CURRENT      QSD_REG(0x0024) QUP_REG(0x0204)
+#define SPI_MX_READ_COUNT             QSD_REG(0x0028) QUP_REG(0x0208)
+#define SPI_MX_READ_CNT_CURRENT       QSD_REG(0x002C) QUP_REG(0x020C)
+#define SPI_OPERATIONAL               QSD_REG(0x0030) QUP_REG(0x0018)
+#define SPI_ERROR_FLAGS               QSD_REG(0x0034) QUP_REG(0x001C)
+#define SPI_ERROR_FLAGS_EN            QSD_REG(0x0038) QUP_REG(0x0020)
+#define SPI_DEASSERT_WAIT             QSD_REG(0x003C) QUP_REG(0x0310)
+#define SPI_OUTPUT_DEBUG              QSD_REG(0x0040) QUP_REG(0x0108)
+#define SPI_INPUT_DEBUG               QSD_REG(0x0044) QUP_REG(0x0210)
+#define SPI_TEST_CTRL                 QSD_REG(0x004C) QUP_REG(0x0024)
+#define SPI_OUTPUT_FIFO               QSD_REG(0x0100) QUP_REG(0x0110)
+#define SPI_INPUT_FIFO                QSD_REG(0x0200) QUP_REG(0x0218)
+#define SPI_STATE                     QSD_REG(SPI_OPERATIONAL) QUP_REG(0x0004)
+#define SPI_SLAVE_IRQ_STATUS		(0x0330)
+#define SPI_SLAVE_IRQ_EN		(0x0334)
+#define SPI_SLAVE_CONFIG		(0x0338)
+
+/* QUP_CONFIG fields */
+#define SPI_CFG_N                     0x0000001F
+#define SPI_NO_INPUT                  0x00000080
+#define SPI_NO_OUTPUT                 0x00000040
+#define SPI_EN_EXT_OUT_FLAG           0x00010000
+#define QUP_CFG_MODE                  0x00000F00
+#define APP_CLK_ON_EN			BIT(12)
+#define CORE_CLK_ON_EN			BIT(13)
+#define FIFO_CLK_ON_EN			BIT(14)
+#define CORE_EX_CLK_ON_EN		BIT(15)
+
+/* SPI_CONFIG fields */
+#define SPI_CFG_LOOPBACK              0x00000100
+#define SPI_CFG_INPUT_FIRST           0x00000200
+#define SPI_CFG_HS_MODE               0x00000400
+#define SPI_CFG_SLAVE_OP              0x00000020
+
+/* SPI_IO_CONTROL fields */
+#define SPI_IO_C_FORCE_CS             0x00000800
+#define SPI_IO_C_CLK_IDLE_HIGH        0x00000400
+#define SPI_IO_C_MX_CS_MODE           0x00000100
+#define SPI_IO_C_CS_N_POLARITY        0x000000F0
+#define SPI_IO_C_CS_N_POLARITY_0      0x00000010
+#define SPI_IO_C_CS_SELECT            0x0000000C
+#define SPI_IO_C_TRISTATE_CS          0x00000002
+#define SPI_IO_C_NO_TRI_STATE         0x00000001
+
+/* SPI_IO_MODES fields */
+#define SPI_IO_M_OUTPUT_BIT_SHIFT_EN  QSD_REG(0x00004000) QUP_REG(0x00010000)
+#define SPI_IO_M_PACK_EN              QSD_REG(0x00002000) QUP_REG(0x00008000)
+#define SPI_IO_M_UNPACK_EN            QSD_REG(0x00001000) QUP_REG(0x00004000)
+#define SPI_IO_M_INPUT_MODE           QSD_REG(0x00000C00) QUP_REG(0x00003000)
+#define SPI_IO_M_OUTPUT_MODE          QSD_REG(0x00000300) QUP_REG(0x00000C00)
+#define SPI_IO_M_INPUT_FIFO_SIZE      QSD_REG(0x000000C0) QUP_REG(0x00000380)
+#define SPI_IO_M_INPUT_BLOCK_SIZE     QSD_REG(0x00000030) QUP_REG(0x00000060)
+#define SPI_IO_M_OUTPUT_FIFO_SIZE     QSD_REG(0x0000000C) QUP_REG(0x0000001C)
+#define SPI_IO_M_OUTPUT_BLOCK_SIZE    QSD_REG(0x00000003) QUP_REG(0x00000003)
+
+#define INPUT_BLOCK_SZ_SHIFT          QSD_REG(4)          QUP_REG(5)
+#define INPUT_FIFO_SZ_SHIFT           QSD_REG(6)          QUP_REG(7)
+#define OUTPUT_BLOCK_SZ_SHIFT         QSD_REG(0)          QUP_REG(0)
+#define OUTPUT_FIFO_SZ_SHIFT          QSD_REG(2)          QUP_REG(2)
+#define OUTPUT_MODE_SHIFT             QSD_REG(8)          QUP_REG(10)
+#define INPUT_MODE_SHIFT              QSD_REG(10)         QUP_REG(12)
+
+/* SPI_OPERATIONAL fields */
+#define SPI_OP_IN_BLK_RD_REQ_FLAG     0x00002000
+#define SPI_OP_OUT_BLK_WR_REQ_FLAG    0x00001000
+#define SPI_OP_MAX_INPUT_DONE_FLAG    0x00000800
+#define SPI_OP_MAX_OUTPUT_DONE_FLAG   0x00000400
+#define SPI_OP_INPUT_SERVICE_FLAG     0x00000200
+#define SPI_OP_OUTPUT_SERVICE_FLAG    0x00000100
+#define SPI_OP_INPUT_FIFO_FULL        0x00000080
+#define SPI_OP_OUTPUT_FIFO_FULL       0x00000040
+#define SPI_OP_IP_FIFO_NOT_EMPTY      0x00000020
+#define SPI_OP_OP_FIFO_NOT_EMPTY      0x00000010
+#define SPI_OP_STATE_VALID            0x00000004
+#define SPI_OP_STATE                  0x00000003
+
+#define SPI_OP_STATE_CLEAR_BITS       0x2
+
+/* SPI SLAVE IRQ_STATUS/EN fields */
+#define CS_N_ASSERT			BIT(0)
+#define CS_N_DEASSERT			BIT(1)
+#define CS_N_ETXT			BIT(2)
+#define TX_UNDERFLOW			BIT(3)
+#define RX_OVERFLOW_WAIT_EOT		BIT(4)
+#define RX_OVERFLOW_NO_EOT		BIT(5)
+#define CS_N_ERXT			BIT(6)
+
+/* SPI_SLAVE_CONFIG Fields */
+#define RX_N_SHIFT			BIT(0)
+#define PAUSE_ON_ERR_DIS		BIT(1)
+#define SPI_S_CGC_EN			BIT(2)
+#define RX_UNBALANCED_MASK		BIT(3)
+#define SLAVE_DIS_RESET_ST		BIT(4)
+#define SLAVE_AUTO_PAUSE_EOT		BIT(7)
+
+#define SPI_PINCTRL_STATE_DEFAULT "spi_default"
+#define SPI_PINCTRL_STATE_SLEEP "spi_sleep"
+
+enum msm_spi_state {
+	SPI_OP_STATE_RESET = 0x00000000,
+	SPI_OP_STATE_RUN   = 0x00000001,
+	SPI_OP_STATE_PAUSE = 0x00000003,
+};
+
+/* SPI_ERROR_FLAGS fields */
+#define SPI_ERR_OUTPUT_OVER_RUN_ERR   0x00000020
+#define SPI_ERR_INPUT_UNDER_RUN_ERR   0x00000010
+#define SPI_ERR_OUTPUT_UNDER_RUN_ERR  0x00000008
+#define SPI_ERR_INPUT_OVER_RUN_ERR    0x00000004
+#define SPI_ERR_CLK_OVER_RUN_ERR      0x00000002
+#define SPI_ERR_CLK_UNDER_RUN_ERR     0x00000001
+
+/* We don't allow transactions larger than 4K-64 or 64K-64 due to
+   mx_input/output_cnt register size */
+#define SPI_MAX_TRANSFERS             QSD_REG(0xFC0) QUP_REG(0xFC0)
+#define SPI_MAX_LEN                   (SPI_MAX_TRANSFERS * dd->bytes_per_word)
+
+#define SPI_NUM_CHIPSELECTS           4
+#define SPI_SUPPORTED_MODES  (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP)
+
+/* high speed mode is when bus rate is greater then 26MHz */
+#define SPI_HS_MIN_RATE               (26000000)
+
+#define SPI_DELAY_THRESHOLD           1
+/* Default timeout is 10 milliseconds */
+#define SPI_DEFAULT_TIMEOUT           10
+/* 250 microseconds */
+#define SPI_TRYLOCK_DELAY             250
+
+/* Data Mover burst size */
+#define DM_BURST_SIZE                 16
+/* Data Mover commands should be aligned to 64 bit(8 bytes) */
+#define DM_BYTE_ALIGN                 8
+
+#if defined(CONFIG_ARM64) || defined(CONFIG_LPAE)
+#define spi_dma_mask(dev)   (dma_set_mask((dev), DMA_BIT_MASK(36)))
+#else
+#define spi_dma_mask(dev)   (dma_set_mask((dev), DMA_BIT_MASK(32)))
+#endif
+
+
+enum msm_spi_qup_version {
+	SPI_QUP_VERSION_NONE    = 0x0,
+	SPI_QUP_VERSION_BFAM    = 0x2,
+	SPI_QUP_VERSION_SPI_SLV = 0x26,
+};
+
+enum msm_spi_pipe_direction {
+	SPI_BAM_CONSUMER_PIPE   = 0x0,
+	SPI_BAM_PRODUCER_PIPE   = 0x1,
+};
+
+#define SPI_BAM_MAX_DESC_NUM      32
+#define SPI_MAX_TRFR_BTWN_RESETS  ((64 * 1024) - 16)  /* 64KB - 16byte */
+
+enum msm_spi_clk_path_vec_idx {
+	MSM_SPI_CLK_PATH_SUSPEND_VEC = 0,
+	MSM_SPI_CLK_PATH_RESUME_VEC  = 1,
+};
+#define MSM_SPI_CLK_PATH_AVRG_BW(dd) (76800000)
+#define MSM_SPI_CLK_PATH_BRST_BW(dd) (76800000)
+
+static char const * const spi_rsrcs[] = {
+	"spi_clk",
+	"spi_miso",
+	"spi_mosi"
+};
+
+static char const * const spi_cs_rsrcs[] = {
+	"spi_cs",
+	"spi_cs1",
+	"spi_cs2",
+	"spi_cs3",
+};
+
+enum msm_spi_mode {
+	SPI_FIFO_MODE  = 0x0,  /* 00 */
+	SPI_BLOCK_MODE = 0x1,  /* 01 */
+	SPI_BAM_MODE   = 0x3,  /* 11 */
+	SPI_MODE_NONE  = 0xFF, /* invalid value */
+};
+
+/* Structure for SPI CS GPIOs */
+struct spi_cs_gpio {
+	int  gpio_num;
+	bool valid;
+};
+
+#ifdef CONFIG_DEBUG_FS
+struct msm_spi_debugfs_data {
+	int offset;
+	struct msm_spi *dd;
+};
+/* Used to create debugfs entries */
+static struct msm_spi_regs{
+	const char *name;
+	mode_t mode;
+	int offset;
+} debugfs_spi_regs[] = {
+	{"config",                S_IRUGO | S_IWUSR, SPI_CONFIG },
+	{"io_control",            S_IRUGO | S_IWUSR, SPI_IO_CONTROL },
+	{"io_modes",              S_IRUGO | S_IWUSR, SPI_IO_MODES },
+	{"sw_reset",                        S_IWUSR, SPI_SW_RESET },
+	{"time_out_current",      S_IRUGO,           SPI_TIME_OUT_CURRENT },
+	{"mx_output_count",       S_IRUGO | S_IWUSR, SPI_MX_OUTPUT_COUNT },
+	{"mx_output_cnt_current", S_IRUGO,
+						SPI_MX_OUTPUT_CNT_CURRENT },
+	{"mx_input_count",        S_IRUGO | S_IWUSR, SPI_MX_INPUT_COUNT },
+	{"mx_input_cnt_current",  S_IRUGO,           SPI_MX_INPUT_CNT_CURRENT },
+	{"mx_read_count",         S_IRUGO | S_IWUSR, SPI_MX_READ_COUNT, },
+	{"mx_read_cnt_current",   S_IRUGO,           SPI_MX_READ_CNT_CURRENT },
+	{"operational",           S_IRUGO | S_IWUSR, SPI_OPERATIONAL },
+	{"error_flags",           S_IRUGO | S_IWUSR, SPI_ERROR_FLAGS },
+	{"error_flags_en",        S_IRUGO | S_IWUSR, SPI_ERROR_FLAGS_EN },
+	{"deassert_wait",         S_IRUGO | S_IWUSR, SPI_DEASSERT_WAIT },
+	{"output_debug",          S_IRUGO,           SPI_OUTPUT_DEBUG },
+	{"input_debug",           S_IRUGO,           SPI_INPUT_DEBUG },
+	{"test_ctrl",             S_IRUGO | S_IWUSR, SPI_TEST_CTRL },
+	{"output_fifo",                     S_IWUSR, SPI_OUTPUT_FIFO },
+	{"input_fifo",		  S_IRUSR,           SPI_INPUT_FIFO },
+	{"spi_state",             S_IRUGO | S_IWUSR, SPI_STATE },
+#if defined(CONFIG_SPI_QSD) || defined(CONFIG_SPI_QSD_MODULE)
+	{"fifo_word_cnt",         S_IRUGO,           SPI_FIFO_WORD_CNT },
+#else
+	{"qup_config",            S_IRUGO | S_IWUSR, QUP_CONFIG },
+	{"qup_error_flags",       S_IRUGO | S_IWUSR, QUP_ERROR_FLAGS },
+	{"qup_error_flags_en",    S_IRUGO | S_IWUSR, QUP_ERROR_FLAGS_EN },
+	{"mx_write_cnt",          S_IRUGO | S_IWUSR, QUP_MX_WRITE_COUNT },
+	{"mx_write_cnt_current",  S_IRUGO,           QUP_MX_WRITE_CNT_CURRENT },
+	{"output_fifo_word_cnt",  S_IRUGO,           SPI_OUTPUT_FIFO_WORD_CNT },
+	{"input_fifo_word_cnt",   S_IRUGO,           SPI_INPUT_FIFO_WORD_CNT },
+#endif
+};
+#endif
+
+struct msm_spi_bam_pipe {
+	const char              *name;
+	struct sps_pipe         *handle;
+	struct sps_connect       config;
+	bool                     teardown_required;
+};
+
+struct msm_spi_bam {
+	void __iomem            *base;
+	phys_addr_t              phys_addr;
+	uintptr_t                handle;
+	u32                      irq;
+	struct msm_spi_bam_pipe  prod;
+	struct msm_spi_bam_pipe  cons;
+	bool                     deregister_required;
+	u32			 curr_rx_bytes_recvd;
+	u32			 curr_tx_bytes_sent;
+	u32			 bam_rx_len;
+	u32			 bam_tx_len;
+};
+
+struct msm_spi {
+	u8                      *read_buf;
+	const u8                *write_buf;
+	void __iomem            *base;
+	struct device           *dev;
+	spinlock_t               queue_lock;
+	struct mutex             core_lock;
+	struct spi_device       *spi;
+	struct spi_transfer     *cur_transfer;
+	struct completion        tx_transfer_complete;
+	struct completion        rx_transfer_complete;
+	struct clk              *clk;    /* core clock */
+	struct clk              *pclk;   /* interface clock */
+	struct msm_bus_client_handle *bus_cl_hdl;
+	unsigned long            mem_phys_addr;
+	size_t                   mem_size;
+	int                      input_fifo_size;
+	int                      output_fifo_size;
+	u32                      rx_bytes_remaining;
+	u32                      tx_bytes_remaining;
+	u32                      clock_speed;
+	int                      irq_in;
+	int                      read_xfr_cnt;
+	int                      write_xfr_cnt;
+	int                      write_len;
+	int                      read_len;
+#if defined(CONFIG_SPI_QSD) || defined(CONFIG_SPI_QSD_MODULE)
+	int                      irq_out;
+	int                      irq_err;
+#endif
+	int                      bytes_per_word;
+	bool                     suspended;
+	bool                     transfer_pending;
+	wait_queue_head_t        continue_suspend;
+	/* DMA data */
+	enum msm_spi_mode        tx_mode;
+	enum msm_spi_mode        rx_mode;
+	bool                     use_dma;
+	int                      tx_dma_chan;
+	int                      tx_dma_crci;
+	int                      rx_dma_chan;
+	int                      rx_dma_crci;
+	int                      (*dma_init) (struct msm_spi *dd);
+	void                     (*dma_teardown) (struct msm_spi *dd);
+	struct msm_spi_bam       bam;
+	int                      input_block_size;
+	int                      output_block_size;
+	int                      input_burst_size;
+	int                      output_burst_size;
+	atomic_t                 rx_irq_called;
+	atomic_t                 tx_irq_called;
+	/* Used to pad messages unaligned to block size */
+	u8                       *tx_padding;
+	dma_addr_t               tx_padding_dma;
+	u8                       *rx_padding;
+	dma_addr_t               rx_padding_dma;
+	u32                      tx_unaligned_len;
+	u32                      rx_unaligned_len;
+	/* DMA statistics */
+	int                      stat_rx;
+	int                      stat_tx;
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *dent_spi;
+	struct dentry *debugfs_spi_regs[ARRAY_SIZE(debugfs_spi_regs)];
+	struct msm_spi_debugfs_data reg_data[ARRAY_SIZE(debugfs_spi_regs)];
+#endif
+	struct msm_spi_platform_data *pdata; /* Platform data */
+	/* When set indicates multiple transfers in a single message */
+	bool                     rx_done;
+	bool                     tx_done;
+	u32                      cur_msg_len;
+	/* Used in FIFO mode to keep track of the transfer being processed */
+	struct spi_transfer     *cur_tx_transfer;
+	struct spi_transfer     *cur_rx_transfer;
+	/* Temporary buffer used for WR-WR or WR-RD transfers */
+	u8                      *temp_buf;
+	/* GPIO pin numbers for SPI clk, miso and mosi */
+	int                      spi_gpios[ARRAY_SIZE(spi_rsrcs)];
+	/* SPI CS GPIOs for each slave */
+	struct spi_cs_gpio       cs_gpios[ARRAY_SIZE(spi_cs_rsrcs)];
+	enum msm_spi_qup_version qup_ver;
+	int			 max_trfr_len;
+	u16			 xfrs_delay_usec;
+	struct pinctrl		*pinctrl;
+	struct pinctrl_state	*pins_active;
+	struct pinctrl_state	*pins_sleep;
+	bool			is_init_complete;
+	bool			pack_words;
+	bool			slv_support;
+	atomic_t                qup_state;
+};
+
+/* Forward declaration */
+static irqreturn_t msm_spi_input_irq(int irq, void *dev_id);
+static irqreturn_t msm_spi_output_irq(int irq, void *dev_id);
+static irqreturn_t msm_spi_error_irq(int irq, void *dev_id);
+static inline int msm_spi_set_state(struct msm_spi *dd,
+				    enum msm_spi_state state);
+static void msm_spi_write_word_to_fifo(struct msm_spi *dd);
+static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd);
+static irqreturn_t msm_spi_qup_irq(int irq, void *dev_id);
+
+#if defined(CONFIG_SPI_QSD) || defined(CONFIG_SPI_QSD_MODULE)
+static inline void msm_spi_disable_irqs(struct msm_spi *dd)
+{
+	disable_irq(dd->irq_in);
+	disable_irq(dd->irq_out);
+	disable_irq(dd->irq_err);
+}
+
+static inline void msm_spi_enable_irqs(struct msm_spi *dd)
+{
+	enable_irq(dd->irq_in);
+	enable_irq(dd->irq_out);
+	enable_irq(dd->irq_err);
+}
+
+static inline int msm_spi_request_irq(struct msm_spi *dd,
+				struct platform_device *pdev,
+				struct spi_master *master)
+{
+	int rc;
+
+	dd->irq_in  = platform_get_irq(pdev, 0);
+	dd->irq_out = platform_get_irq(pdev, 1);
+	dd->irq_err = platform_get_irq(pdev, 2);
+	if ((dd->irq_in < 0) || (dd->irq_out < 0) || (dd->irq_err < 0))
+		return -EINVAL;
+
+	rc = devm_request_irq(dd->dev, dd->irq_in, msm_spi_input_irq,
+		IRQF_TRIGGER_RISING, pdev->name, dd);
+	if (rc)
+		goto error_irq;
+
+	rc = devm_request_irq(dd->dev, dd->irq_out, msm_spi_output_irq,
+		IRQF_TRIGGER_RISING, pdev->name, dd);
+	if (rc)
+		goto error_irq;
+
+	rc = devm_request_irq(dd->dev, dd->irq_err, msm_spi_error_irq,
+		IRQF_TRIGGER_RISING, pdev->name, master);
+	if (rc)
+		goto error_irq;
+
+error_irq:
+	return rc;
+}
+
+static inline void msm_spi_get_clk_err(struct msm_spi *dd, u32 *spi_err) {}
+static inline void msm_spi_ack_clk_err(struct msm_spi *dd) {}
+static inline void msm_spi_set_qup_config(struct msm_spi *dd, int bpw) {}
+
+static inline int  msm_spi_prepare_for_write(struct msm_spi *dd) { return 0; }
+static inline void msm_spi_start_write(struct msm_spi *dd, u32 read_count)
+{
+	msm_spi_write_word_to_fifo(dd);
+}
+static inline void msm_spi_set_write_count(struct msm_spi *dd, int val) {}
+
+static inline void msm_spi_complete(struct msm_spi *dd)
+{
+	complete(&dd->transfer_complete);
+}
+
+static inline void msm_spi_enable_error_flags(struct msm_spi *dd)
+{
+	writel_relaxed(0x0000007B, dd->base + SPI_ERROR_FLAGS_EN);
+}
+
+static inline void msm_spi_clear_error_flags(struct msm_spi *dd)
+{
+	writel_relaxed(0x0000007F, dd->base + SPI_ERROR_FLAGS);
+}
+
+#else
+/* In QUP the same interrupt line is used for input, output and error*/
+static inline int msm_spi_request_irq(struct msm_spi *dd,
+				struct platform_device *pdev,
+				struct spi_master *master)
+{
+	dd->irq_in  = platform_get_irq(pdev, 0);
+	if (dd->irq_in < 0)
+		return -EINVAL;
+
+	return devm_request_irq(dd->dev, dd->irq_in, msm_spi_qup_irq,
+		IRQF_TRIGGER_HIGH, pdev->name, dd);
+}
+
+static inline void msm_spi_disable_irqs(struct msm_spi *dd)
+{
+	disable_irq(dd->irq_in);
+}
+
+static inline void msm_spi_enable_irqs(struct msm_spi *dd)
+{
+	enable_irq(dd->irq_in);
+}
+
+static inline void msm_spi_get_clk_err(struct msm_spi *dd, u32 *spi_err)
+{
+	*spi_err = readl_relaxed(dd->base + QUP_ERROR_FLAGS);
+}
+
+static inline void msm_spi_ack_clk_err(struct msm_spi *dd)
+{
+	writel_relaxed(QUP_ERR_MASK, dd->base + QUP_ERROR_FLAGS);
+}
+
+static inline void
+msm_spi_set_bpw_and_no_io_flags(struct msm_spi *dd, u32 *config, int n);
+
+/**
+ * msm_spi_set_qup_config: set QUP_CONFIG to no_input, no_output, and N bits
+ */
+static inline void msm_spi_set_qup_config(struct msm_spi *dd, int bpw)
+{
+	u32 qup_config = readl_relaxed(dd->base + QUP_CONFIG);
+
+	msm_spi_set_bpw_and_no_io_flags(dd, &qup_config, bpw-1);
+	writel_relaxed(qup_config | QUP_CONFIG_SPI_MODE, dd->base + QUP_CONFIG);
+}
+
+static inline int msm_spi_prepare_for_write(struct msm_spi *dd)
+{
+	if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
+		return -EINVAL;
+	if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE))
+		return -EINVAL;
+	return 0;
+}
+
+static inline void msm_spi_start_write(struct msm_spi *dd, u32 read_count)
+{
+	msm_spi_write_rmn_to_fifo(dd);
+}
+
+static inline void msm_spi_set_write_count(struct msm_spi *dd, int val)
+{
+	writel_relaxed(val, dd->base + QUP_MX_WRITE_COUNT);
+}
+
+static inline void msm_spi_complete(struct msm_spi *dd)
+{
+	dd->tx_done = true;
+	dd->rx_done = true;
+}
+
+static inline void msm_spi_enable_error_flags(struct msm_spi *dd)
+{
+	if (dd->qup_ver == SPI_QUP_VERSION_BFAM)
+		writel_relaxed(
+			SPI_ERR_CLK_UNDER_RUN_ERR | SPI_ERR_CLK_OVER_RUN_ERR,
+			dd->base + SPI_ERROR_FLAGS_EN);
+	else
+		writel_relaxed(0x00000078, dd->base + SPI_ERROR_FLAGS_EN);
+}
+
+static inline void msm_spi_clear_error_flags(struct msm_spi *dd)
+{
+	if (dd->qup_ver == SPI_QUP_VERSION_BFAM)
+		writel_relaxed(
+			SPI_ERR_CLK_UNDER_RUN_ERR | SPI_ERR_CLK_OVER_RUN_ERR,
+			dd->base + SPI_ERROR_FLAGS);
+	else
+		writel_relaxed(0x0000007C, dd->base + SPI_ERROR_FLAGS);
+}
+
+#endif
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/staging/android/fiq_debugger./Kconfig linux-4.4.115-fbx/drivers/staging/android/fiq_debugger/Kconfig
--- linux-4.4.115-fbx/drivers/staging/android/fiq_debugger./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/android/fiq_debugger/Kconfig	2019-10-29 09:26:24.845214941 +0100
@@ -0,0 +1,58 @@
+config FIQ_DEBUGGER
+	bool "FIQ Mode Serial Debugger"
+	default n
+	depends on ARM || ARM64
+	help
+	  The FIQ serial debugger can accept commands even when the
+	  kernel is unresponsive due to being stuck with interrupts
+	  disabled.
+
+config FIQ_DEBUGGER_NO_SLEEP
+	bool "Keep serial debugger active"
+	depends on FIQ_DEBUGGER
+	default n
+	help
+	  Enables the serial debugger at boot. Passing
+	  fiq_debugger.no_sleep on the kernel commandline will
+	  override this config option.
+
+config FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON
+	bool "Don't disable wakeup IRQ when debugger is active"
+	depends on FIQ_DEBUGGER
+	default n
+	help
+	  Don't disable the wakeup irq when enabling the uart clock.  This will
+	  cause extra interrupts, but it makes the serial debugger usable with
+	  on some MSM radio builds that ignore the uart clock request in power
+	  collapse.
+
+config FIQ_DEBUGGER_CONSOLE
+	bool "Console on FIQ Serial Debugger port"
+	depends on FIQ_DEBUGGER
+	default n
+	help
+	  Enables a console so that printk messages are displayed on
+	  the debugger serial port as the occur.
+
+config FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE
+	bool "Put the FIQ debugger into console mode by default"
+	depends on FIQ_DEBUGGER_CONSOLE
+	default n
+	help
+	  If enabled, this puts the fiq debugger into console mode by default.
+	  Otherwise, the fiq debugger will start out in debug mode.
+
+config FIQ_DEBUGGER_UART_OVERLAY
+	bool "Install uart DT overlay"
+	depends on FIQ_DEBUGGER
+	select OF_OVERLAY
+	default n
+	help
+	  If enabled, fiq debugger is calling fiq_debugger_uart_overlay()
+	  that will apply overlay uart_overlay@0 to disable proper uart.
+
+config FIQ_WATCHDOG
+	bool
+	select FIQ_DEBUGGER
+	select PSTORE_RAM
+	default n
diff -Nruw linux-4.4.115-fbx/drivers/staging/android/fiq_debugger./Makefile linux-4.4.115-fbx/drivers/staging/android/fiq_debugger/Makefile
--- linux-4.4.115-fbx/drivers/staging/android/fiq_debugger./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/android/fiq_debugger/Makefile	2019-10-29 09:26:24.845214941 +0100
@@ -0,0 +1,4 @@
+obj-y			+= fiq_debugger.o
+obj-$(CONFIG_ARM)	+= fiq_debugger_arm.o
+obj-$(CONFIG_ARM64)	+= fiq_debugger_arm64.o
+obj-$(CONFIG_FIQ_WATCHDOG)	+= fiq_watchdog.o
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/staging/android/ion/ion_cma_secure_heap.c	2019-01-22 16:16:26.711275457 +0100
@@ -0,0 +1,896 @@
+/*
+ * drivers/staging/android/ion/ion_cma_secure_heap.c
+ *
+ * Copyright (C) Linaro 2012
+ * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/ion.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/msm_ion.h>
+#include <trace/events/kmem.h>
+
+#include <soc/qcom/secure_buffer.h>
+#include <asm/cacheflush.h>
+
+/* for ion_heap_ops structure */
+#include "ion_priv.h"
+#include "msm/ion_cp_common.h"
+
+#define ION_CMA_ALLOCATE_FAILED NULL
+
+struct ion_secure_cma_non_contig_info {
+	dma_addr_t phys;
+	int len;
+	struct list_head entry;
+};
+
+struct ion_secure_cma_buffer_info {
+	dma_addr_t phys;
+	struct sg_table *table;
+	bool is_cached;
+	int len;
+	struct list_head non_contig_list;
+	unsigned long ncelems;
+};
+
+struct ion_cma_alloc_chunk {
+	void *cpu_addr;
+	struct list_head entry;
+	dma_addr_t handle;
+	unsigned long chunk_size;
+	atomic_t cnt;
+};
+
+struct ion_cma_secure_heap {
+	struct device *dev;
+	/*
+	 * Protects against races between threads allocating memory/adding to
+	 * pool at the same time. (e.g. thread 1 adds to pool, thread 2
+	 * allocates thread 1's memory before thread 1 knows it needs to
+	 * allocate more.
+	 * Admittedly this is fairly coarse grained right now but the chance for
+	 * contention on this lock is unlikely right now. This can be changed if
+	 * this ever changes in the future
+	 */
+	struct mutex alloc_lock;
+	/*
+	 * protects the list of memory chunks in this pool
+	 */
+	struct mutex chunk_lock;
+	struct ion_heap heap;
+	/*
+	 * Bitmap for allocation. This contains the aggregate of all chunks. */
+	unsigned long *bitmap;
+	/*
+	 * List of all allocated chunks
+	 *
+	 * This is where things get 'clever'. Individual allocations from
+	 * dma_alloc_coherent must be allocated and freed in one chunk.
+	 * We don't just want to limit the allocations to those confined
+	 * within a single chunk (if clients allocate n small chunks we would
+	 * never be able to use the combined size). The bitmap allocator is
+	 * used to find the contiguous region and the parts of the chunks are
+	 * marked off as used. The chunks won't be freed in the shrinker until
+	 * the usage is actually zero.
+	 */
+	struct list_head chunks;
+	int npages;
+	ion_phys_addr_t base;
+	struct work_struct work;
+	unsigned long last_alloc;
+	struct shrinker shrinker;
+	atomic_t total_allocated;
+	atomic_t total_pool_size;
+	atomic_t total_leaked;
+	unsigned long heap_size;
+	unsigned long default_prefetch_size;
+};
+
+static void ion_secure_pool_pages(struct work_struct *work);
+
+/*
+ * Create scatter-list for the already allocated DMA buffer.
+ * This function could be replace by dma_common_get_sgtable
+ * as soon as it will avalaible.
+ */
+int ion_secure_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
+			dma_addr_t handle, size_t size)
+{
+	struct page *page = pfn_to_page(PFN_DOWN(handle));
+	int ret;
+
+	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+	if (unlikely(ret))
+		return ret;
+
+	sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+	sg_dma_address(sgt->sgl) = handle;
+	return 0;
+}
+
+static int ion_secure_cma_add_to_pool(
+					struct ion_cma_secure_heap *sheap,
+					unsigned long len,
+					bool prefetch)
+{
+	void *cpu_addr;
+	dma_addr_t handle;
+	DEFINE_DMA_ATTRS(attrs);
+	int ret = 0;
+	struct ion_cma_alloc_chunk *chunk;
+
+
+	trace_ion_secure_cma_add_to_pool_start(len,
+				atomic_read(&sheap->total_pool_size), prefetch);
+	mutex_lock(&sheap->chunk_lock);
+
+	chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
+	if (!chunk) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+	dma_set_attr(DMA_ATTR_SKIP_ZEROING, &attrs);
+
+	cpu_addr = dma_alloc_attrs(sheap->dev, len, &handle, GFP_KERNEL,
+								&attrs);
+
+	if (!cpu_addr) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+
+	chunk->cpu_addr = cpu_addr;
+	chunk->handle = handle;
+	chunk->chunk_size = len;
+	atomic_set(&chunk->cnt, 0);
+	list_add(&chunk->entry, &sheap->chunks);
+	atomic_add(len, &sheap->total_pool_size);
+	 /* clear the bitmap to indicate this region can be allocated from */
+	bitmap_clear(sheap->bitmap, (handle - sheap->base) >> PAGE_SHIFT,
+				len >> PAGE_SHIFT);
+	goto out;
+
+out_free:
+	kfree(chunk);
+out:
+	mutex_unlock(&sheap->chunk_lock);
+
+	trace_ion_secure_cma_add_to_pool_end(len,
+				atomic_read(&sheap->total_pool_size), prefetch);
+
+	return ret;
+}
+
+static void ion_secure_pool_pages(struct work_struct *work)
+{
+	struct ion_cma_secure_heap *sheap = container_of(work,
+			struct ion_cma_secure_heap, work);
+
+	ion_secure_cma_add_to_pool(sheap, sheap->last_alloc, true);
+}
+/*
+ * @s1: start of the first region
+ * @l1: length of the first region
+ * @s2: start of the second region
+ * @l2: length of the second region
+ *
+ * Returns the total number of bytes that intersect.
+ *
+ * s1 is the region we are trying to clear so s2 may be subsumed by s1 but the
+ * maximum size to clear should only ever be l1
+ *
+ */
+static unsigned int intersect(unsigned long s1, unsigned long l1,
+				unsigned long s2, unsigned long l2)
+{
+	unsigned long base1 = s1;
+	unsigned long end1 = s1 + l1;
+	unsigned long base2 = s2;
+	unsigned long end2 = s2 + l2;
+
+	/* Case 0: The regions don't overlap at all */
+	if (!(base1 < end2 && base2 < end1))
+		return 0;
+
+	/* Case 1: region 2 is subsumed by region 1 */
+	if (base1 <= base2 && end2 <= end1)
+		return l2;
+
+	/* case 2: region 1 is subsumed by region 2 */
+	if (base2 <= base1 && end1 <= end2)
+		return l1;
+
+	/* case 3: region1 overlaps region2 on the bottom */
+	if (base2 < end1 && base2 > base1)
+		return end1 - base2;
+
+	/* case 4: region 2 overlaps region1 on the bottom */
+	if (base1 < end2 && base1 > base2)
+		return end2 - base1;
+
+	pr_err("Bad math! Did not detect chunks correctly! %lx %lx %lx %lx\n",
+			s1, l1, s2, l2);
+	BUG();
+}
+
+int ion_secure_cma_prefetch(struct ion_heap *heap, void *data)
+{
+	unsigned long len = (unsigned long)data;
+	struct ion_cma_secure_heap *sheap =
+		container_of(heap, struct ion_cma_secure_heap, heap);
+	unsigned long diff;
+
+	if ((int) heap->type != ION_HEAP_TYPE_SECURE_DMA)
+		return -EINVAL;
+
+	if (len == 0)
+		len = sheap->default_prefetch_size;
+
+	/*
+	 * Only prefetch as much space as there is left in the pool so
+	 * check against the current free size of the heap.
+	 * This is slightly racy if someone else is allocating at the same
+	 * time. CMA has a restricted size for the heap so worst case
+	 * the prefetch doesn't work because the allocation fails.
+	 */
+	diff = sheap->heap_size - atomic_read(&sheap->total_pool_size);
+
+	if (len > diff)
+		len = diff;
+
+	sheap->last_alloc = len;
+	trace_ion_prefetching(sheap->last_alloc);
+	schedule_work(&sheap->work);
+
+	return 0;
+}
+
+static void bad_math_dump(unsigned long len, int total_overlap,
+				struct ion_cma_secure_heap *sheap,
+				bool alloc, dma_addr_t paddr)
+{
+	struct list_head *entry;
+
+	pr_err("Bad math! expected total was %lx actual was %x\n",
+			len, total_overlap);
+	pr_err("attempted %s address was %pa len %lx\n",
+			alloc ? "allocation" : "free", &paddr, len);
+	pr_err("chunks:\n");
+	list_for_each(entry, &sheap->chunks) {
+		struct ion_cma_alloc_chunk *chunk =
+			container_of(entry,
+				struct ion_cma_alloc_chunk, entry);
+		pr_info("---   pa %pa len %lx\n",
+			&chunk->handle, chunk->chunk_size);
+	}
+	BUG();
+
+}
+
+static int ion_secure_cma_alloc_from_pool(
+					struct ion_cma_secure_heap *sheap,
+					dma_addr_t *phys,
+					unsigned long len)
+{
+	dma_addr_t paddr;
+	unsigned long page_no;
+	int ret = 0;
+	int total_overlap = 0;
+	struct list_head *entry;
+
+	mutex_lock(&sheap->chunk_lock);
+
+	page_no = bitmap_find_next_zero_area(sheap->bitmap,
+				sheap->npages, 0, len >> PAGE_SHIFT, 0);
+	if (page_no >= sheap->npages) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	bitmap_set(sheap->bitmap, page_no, len >> PAGE_SHIFT);
+	paddr = sheap->base + (page_no << PAGE_SHIFT);
+
+
+	list_for_each(entry, &sheap->chunks) {
+		struct ion_cma_alloc_chunk *chunk = container_of(entry,
+					struct ion_cma_alloc_chunk, entry);
+		int overlap = intersect(chunk->handle,
+					chunk->chunk_size, paddr, len);
+
+		atomic_add(overlap, &chunk->cnt);
+		total_overlap += overlap;
+	}
+
+	if (total_overlap != len)
+		bad_math_dump(len, total_overlap, sheap, 1, paddr);
+
+	*phys = paddr;
+out:
+	mutex_unlock(&sheap->chunk_lock);
+	return ret;
+}
+
+static void ion_secure_cma_free_chunk(struct ion_cma_secure_heap *sheap,
+					struct ion_cma_alloc_chunk *chunk)
+{
+	DEFINE_DMA_ATTRS(attrs);
+
+	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+	/* This region is 'allocated' and not available to allocate from */
+	bitmap_set(sheap->bitmap, (chunk->handle - sheap->base) >> PAGE_SHIFT,
+			chunk->chunk_size >> PAGE_SHIFT);
+	dma_free_attrs(sheap->dev, chunk->chunk_size, chunk->cpu_addr,
+				chunk->handle, &attrs);
+	atomic_sub(chunk->chunk_size, &sheap->total_pool_size);
+	list_del(&chunk->entry);
+	kfree(chunk);
+
+}
+
+static unsigned long
+__ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap, int max_nr)
+{
+	struct list_head *entry, *_n;
+	unsigned long drained_size = 0, skipped_size = 0;
+
+	trace_ion_secure_cma_shrink_pool_start(drained_size, skipped_size);
+
+	list_for_each_safe(entry, _n, &sheap->chunks) {
+		struct ion_cma_alloc_chunk *chunk = container_of(entry,
+					struct ion_cma_alloc_chunk, entry);
+
+		if (max_nr < 0)
+			break;
+
+		if (atomic_read(&chunk->cnt) == 0) {
+			max_nr -= chunk->chunk_size;
+			drained_size += chunk->chunk_size;
+			ion_secure_cma_free_chunk(sheap, chunk);
+		} else {
+			skipped_size += chunk->chunk_size;
+		}
+	}
+
+	trace_ion_secure_cma_shrink_pool_end(drained_size, skipped_size);
+	return drained_size;
+}
+
+int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused)
+{
+	struct ion_cma_secure_heap *sheap =
+		container_of(heap, struct ion_cma_secure_heap, heap);
+
+	mutex_lock(&sheap->chunk_lock);
+	__ion_secure_cma_shrink_pool(sheap, INT_MAX);
+	mutex_unlock(&sheap->chunk_lock);
+
+	return 0;
+}
+
+static unsigned long ion_secure_cma_shrinker(struct shrinker *shrinker,
+					struct shrink_control *sc)
+{
+	unsigned long freed;
+	struct ion_cma_secure_heap *sheap = container_of(shrinker,
+					struct ion_cma_secure_heap, shrinker);
+	int nr_to_scan = sc->nr_to_scan;
+
+	/*
+	 * Allocation path may invoke the shrinker. Proceeding any further
+	 * would cause a deadlock in several places so don't shrink if that
+	 * happens.
+	 */
+	if (!mutex_trylock(&sheap->chunk_lock))
+		return -1;
+
+	freed = __ion_secure_cma_shrink_pool(sheap, nr_to_scan);
+
+	mutex_unlock(&sheap->chunk_lock);
+
+	return freed;
+}
+
+static unsigned long ion_secure_cma_shrinker_count(struct shrinker *shrinker,
+					struct shrink_control *sc)
+{
+	struct ion_cma_secure_heap *sheap = container_of(shrinker,
+					struct ion_cma_secure_heap, shrinker);
+	return atomic_read(&sheap->total_pool_size);
+}
+
+static void ion_secure_cma_free_from_pool(struct ion_cma_secure_heap *sheap,
+					dma_addr_t handle,
+					unsigned long len)
+{
+	struct list_head *entry, *_n;
+	int total_overlap = 0;
+
+	mutex_lock(&sheap->chunk_lock);
+	bitmap_clear(sheap->bitmap, (handle - sheap->base) >> PAGE_SHIFT,
+				len >> PAGE_SHIFT);
+
+	list_for_each_safe(entry, _n, &sheap->chunks) {
+		struct ion_cma_alloc_chunk *chunk = container_of(entry,
+					struct ion_cma_alloc_chunk, entry);
+		int overlap = intersect(chunk->handle,
+					chunk->chunk_size, handle, len);
+
+		/*
+		 * Don't actually free this from the pool list yet, let either
+		 * an explicit drain call or the shrinkers take care of the
+		 * pool.
+		 */
+		atomic_sub_return(overlap, &chunk->cnt);
+		BUG_ON(atomic_read(&chunk->cnt) < 0);
+
+		total_overlap += overlap;
+	}
+
+	BUG_ON(atomic_read(&sheap->total_pool_size) < 0);
+
+	if (total_overlap != len)
+		bad_math_dump(len, total_overlap, sheap, 0, handle);
+
+	mutex_unlock(&sheap->chunk_lock);
+}
+
+/* ION CMA heap operations functions */
+static struct ion_secure_cma_buffer_info *__ion_secure_cma_allocate(
+			    struct ion_heap *heap, struct ion_buffer *buffer,
+			    unsigned long len, unsigned long align,
+			    unsigned long flags)
+{
+	struct ion_cma_secure_heap *sheap =
+		container_of(heap, struct ion_cma_secure_heap, heap);
+	struct ion_secure_cma_buffer_info *info;
+	int ret;
+
+	dev_dbg(sheap->dev, "Request buffer allocation len %ld\n", len);
+
+	info = kzalloc(sizeof(struct ion_secure_cma_buffer_info), GFP_KERNEL);
+	if (!info) {
+		dev_err(sheap->dev, "Can't allocate buffer info\n");
+		return ION_CMA_ALLOCATE_FAILED;
+	}
+
+	mutex_lock(&sheap->alloc_lock);
+	ret = ion_secure_cma_alloc_from_pool(sheap, &info->phys, len);
+
+	if (ret) {
+retry:
+		ret = ion_secure_cma_add_to_pool(sheap, len, false);
+		if (ret) {
+			mutex_unlock(&sheap->alloc_lock);
+			dev_err(sheap->dev, "Fail to allocate buffer\n");
+			goto err;
+		}
+		ret = ion_secure_cma_alloc_from_pool(sheap, &info->phys, len);
+		if (ret) {
+			/*
+			 * Lost the race with the shrinker, try again
+			 */
+			goto retry;
+		}
+	}
+	mutex_unlock(&sheap->alloc_lock);
+
+	atomic_add(len, &sheap->total_allocated);
+	info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!info->table) {
+		dev_err(sheap->dev, "Fail to allocate sg table\n");
+		goto err;
+	}
+
+	info->len = len;
+	ion_secure_cma_get_sgtable(sheap->dev,
+			info->table, info->phys, len);
+
+	/* keep this for memory release */
+	buffer->priv_virt = info;
+	dev_dbg(sheap->dev, "Allocate buffer %pK\n", buffer);
+	return info;
+
+err:
+	kfree(info);
+	return ION_CMA_ALLOCATE_FAILED;
+}
+
+static void __ion_secure_cma_free_non_contig(struct ion_cma_secure_heap *sheap,
+					struct ion_secure_cma_buffer_info *info)
+{
+	struct ion_secure_cma_non_contig_info *nc_info, *temp;
+
+	list_for_each_entry_safe(nc_info, temp, &info->non_contig_list, entry) {
+		ion_secure_cma_free_from_pool(sheap, nc_info->phys,
+								nc_info->len);
+		list_del(&nc_info->entry);
+		kfree(nc_info);
+	}
+}
+
+static void __ion_secure_cma_free(struct ion_cma_secure_heap *sheap,
+				struct ion_secure_cma_buffer_info *info,
+				bool release_memory)
+{
+	if (release_memory) {
+		if (info->ncelems)
+			__ion_secure_cma_free_non_contig(sheap, info);
+		else
+			ion_secure_cma_free_from_pool(sheap, info->phys,
+								info->len);
+	}
+	sg_free_table(info->table);
+	kfree(info->table);
+	kfree(info);
+}
+
+static struct ion_secure_cma_buffer_info *__ion_secure_cma_allocate_non_contig(
+			struct ion_heap *heap, struct ion_buffer *buffer,
+			unsigned long len, unsigned long align,
+			unsigned long flags)
+{
+	struct ion_cma_secure_heap *sheap =
+		container_of(heap, struct ion_cma_secure_heap, heap);
+	struct ion_secure_cma_buffer_info *info;
+	int ret;
+	unsigned long alloc_size = len;
+	struct ion_secure_cma_non_contig_info *nc_info, *temp;
+	unsigned long ncelems = 0;
+	struct scatterlist *sg;
+	unsigned long total_allocated = 0;
+
+	dev_dbg(sheap->dev, "Request buffer allocation len %ld\n", len);
+
+	info = kzalloc(sizeof(struct ion_secure_cma_buffer_info), GFP_KERNEL);
+	if (!info) {
+		dev_err(sheap->dev, "Can't allocate buffer info\n");
+		return ION_CMA_ALLOCATE_FAILED;
+	}
+
+	INIT_LIST_HEAD(&info->non_contig_list);
+	info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!info->table) {
+		dev_err(sheap->dev, "Fail to allocate sg table\n");
+		goto err;
+	}
+	mutex_lock(&sheap->alloc_lock);
+	while (total_allocated < len) {
+		if (alloc_size < SZ_1M) {
+			pr_err("Cannot allocate less than 1MB\n");
+			goto err2;
+		}
+		nc_info = kzalloc(sizeof(struct ion_secure_cma_non_contig_info),
+						GFP_KERNEL);
+		if (!nc_info) {
+			dev_err(sheap->dev,
+				"Can't allocate non contig buffer info\n");
+			goto err2;
+		}
+		ret = ion_secure_cma_alloc_from_pool(sheap, &nc_info->phys,
+								alloc_size);
+		if (ret) {
+retry:
+			ret = ion_secure_cma_add_to_pool(sheap, alloc_size,
+									false);
+			if (ret) {
+				alloc_size = alloc_size / 2;
+				if (!IS_ALIGNED(alloc_size, SZ_1M))
+					alloc_size = round_down(alloc_size,
+								SZ_1M);
+				kfree(nc_info);
+				continue;
+			}
+			ret = ion_secure_cma_alloc_from_pool(sheap,
+						&nc_info->phys, alloc_size);
+			if (ret) {
+				/*
+				 * Lost the race with the shrinker, try again
+				 */
+				goto retry;
+			}
+		}
+		nc_info->len = alloc_size;
+		list_add_tail(&nc_info->entry, &info->non_contig_list);
+		ncelems++;
+		total_allocated += alloc_size;
+		alloc_size = min(alloc_size, len - total_allocated);
+	}
+	mutex_unlock(&sheap->alloc_lock);
+	atomic_add(total_allocated, &sheap->total_allocated);
+
+	nc_info = list_first_entry_or_null(&info->non_contig_list,
+			struct ion_secure_cma_non_contig_info, entry);
+	if (!nc_info) {
+		pr_err("%s: Unable to find first entry of non contig list\n",
+								__func__);
+		goto err1;
+	}
+	info->phys = nc_info->phys;
+	info->len = total_allocated;
+	info->ncelems = ncelems;
+
+	ret = sg_alloc_table(info->table, ncelems, GFP_KERNEL);
+	if (unlikely(ret))
+		goto err1;
+
+	sg = info->table->sgl;
+	list_for_each_entry(nc_info, &info->non_contig_list, entry) {
+		sg_set_page(sg, phys_to_page(nc_info->phys), nc_info->len, 0);
+		sg_dma_address(sg) = nc_info->phys;
+		sg = sg_next(sg);
+	}
+	buffer->priv_virt = info;
+	dev_dbg(sheap->dev, "Allocate buffer %pK\n", buffer);
+	return info;
+
+err2:
+	mutex_unlock(&sheap->alloc_lock);
+err1:
+	list_for_each_entry_safe(nc_info, temp, &info->non_contig_list,
+								entry) {
+		list_del(&nc_info->entry);
+		kfree(nc_info);
+	}
+	kfree(info->table);
+err:
+	kfree(info);
+	return ION_CMA_ALLOCATE_FAILED;
+
+}
+static int ion_secure_cma_allocate(struct ion_heap *heap,
+			    struct ion_buffer *buffer,
+			    unsigned long len, unsigned long align,
+			    unsigned long flags)
+{
+	unsigned long secure_allocation = flags & ION_FLAG_SECURE;
+	struct ion_secure_cma_buffer_info *buf = NULL;
+	unsigned long allow_non_contig = flags & ION_FLAG_ALLOW_NON_CONTIG;
+
+	if (!secure_allocation &&
+		!ion_heap_allow_secure_allocation(heap->type)) {
+		pr_err("%s: non-secure allocation disallowed from heap %s %lx\n",
+			__func__, heap->name, flags);
+		return -ENOMEM;
+	}
+
+	if (ION_IS_CACHED(flags)) {
+		pr_err("%s: cannot allocate cached memory from secure heap %s\n",
+			__func__, heap->name);
+		return -ENOMEM;
+	}
+
+	if (!IS_ALIGNED(len, SZ_1M)) {
+		pr_err("%s: length of allocation from %s must be a multiple of 1MB\n",
+			__func__, heap->name);
+		return -ENOMEM;
+	}
+	trace_ion_secure_cma_allocate_start(heap->name, len, align, flags);
+	if (!allow_non_contig)
+		buf = __ion_secure_cma_allocate(heap, buffer, len, align,
+									flags);
+	else
+		buf = __ion_secure_cma_allocate_non_contig(heap, buffer, len,
+								align, flags);
+	trace_ion_secure_cma_allocate_end(heap->name, len, align, flags);
+	if (buf) {
+		int ret;
+
+		if (!msm_secure_v2_is_supported()) {
+			pr_err("%s: securing buffers from clients is not supported on this platform\n",
+				__func__);
+			ret = 1;
+		} else {
+			trace_ion_cp_secure_buffer_start(heap->name, len, align,
+									flags);
+			ret = msm_secure_table(buf->table);
+			trace_ion_cp_secure_buffer_end(heap->name, len, align,
+									flags);
+		}
+		if (ret) {
+			struct ion_cma_secure_heap *sheap =
+				container_of(buffer->heap,
+					struct ion_cma_secure_heap, heap);
+
+			pr_err("%s: failed to secure buffer\n", __func__);
+			__ion_secure_cma_free(sheap, buf, true);
+		}
+		return ret;
+	} else {
+		return -ENOMEM;
+	}
+}
+
+static void ion_secure_cma_free(struct ion_buffer *buffer)
+{
+	struct ion_cma_secure_heap *sheap =
+		container_of(buffer->heap, struct ion_cma_secure_heap, heap);
+	struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
+	int ret = 0;
+
+	dev_dbg(sheap->dev, "Release buffer %pK\n", buffer);
+	if (msm_secure_v2_is_supported())
+		ret = msm_unsecure_table(info->table);
+	atomic_sub(buffer->size, &sheap->total_allocated);
+	BUG_ON(atomic_read(&sheap->total_allocated) < 0);
+
+	/* release memory */
+	if (ret) {
+		WARN(1, "Unsecure failed, can't free the memory. Leaking it!");
+		atomic_add(buffer->size, &sheap->total_leaked);
+	}
+
+	__ion_secure_cma_free(sheap, info, ret ? false : true);
+}
+
+static int ion_secure_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
+			ion_phys_addr_t *addr, size_t *len)
+{
+	struct ion_cma_secure_heap *sheap =
+		container_of(heap, struct ion_cma_secure_heap, heap);
+	struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
+
+	dev_dbg(sheap->dev, "Return buffer %pK physical address 0x%pa\n",
+		buffer, &info->phys);
+
+	*addr = info->phys;
+	*len = buffer->size;
+
+	return 0;
+}
+
+struct sg_table *ion_secure_cma_heap_map_dma(struct ion_heap *heap,
+					 struct ion_buffer *buffer)
+{
+	struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
+
+	return info->table;
+}
+
+void ion_secure_cma_heap_unmap_dma(struct ion_heap *heap,
+			       struct ion_buffer *buffer)
+{
+	return;
+}
+
+static int ion_secure_cma_mmap(struct ion_heap *mapper,
+			struct ion_buffer *buffer,
+			struct vm_area_struct *vma)
+{
+	pr_info("%s: mmaping from secure heap %s disallowed\n",
+		__func__, mapper->name);
+	return -EINVAL;
+}
+
+static void *ion_secure_cma_map_kernel(struct ion_heap *heap,
+				struct ion_buffer *buffer)
+{
+	pr_info("%s: kernel mapping from secure heap %s disallowed\n",
+		__func__, heap->name);
+	return ERR_PTR(-EINVAL);
+}
+
+static void ion_secure_cma_unmap_kernel(struct ion_heap *heap,
+				 struct ion_buffer *buffer)
+{
+	return;
+}
+
+static int ion_secure_cma_print_debug(struct ion_heap *heap, struct seq_file *s,
+			const struct list_head *mem_map)
+{
+	struct ion_cma_secure_heap *sheap =
+		container_of(heap, struct ion_cma_secure_heap, heap);
+
+	if (mem_map) {
+		struct mem_map_data *data;
+
+		seq_printf(s, "\nMemory Map\n");
+		seq_printf(s, "%16.s %14.s %14.s %14.s\n",
+			   "client", "start address", "end address",
+			   "size");
+
+		list_for_each_entry(data, mem_map, node) {
+			const char *client_name = "(null)";
+
+
+			if (data->client_name)
+				client_name = data->client_name;
+
+			seq_printf(s, "%16.s 0x%14pa 0x%14pa %14lu (0x%lx)\n",
+				   client_name, &data->addr,
+				   &data->addr_end,
+				   data->size, data->size);
+		}
+	}
+	seq_printf(s, "Total allocated: 0x%x\n",
+				atomic_read(&sheap->total_allocated));
+	seq_printf(s, "Total pool size: 0x%x\n",
+				atomic_read(&sheap->total_pool_size));
+	seq_printf(s, "Total memory leaked due to unlock failures: 0x%x\n",
+				atomic_read(&sheap->total_leaked));
+
+	return 0;
+}
+
+static struct ion_heap_ops ion_secure_cma_ops = {
+	.allocate = ion_secure_cma_allocate,
+	.free = ion_secure_cma_free,
+	.map_dma = ion_secure_cma_heap_map_dma,
+	.unmap_dma = ion_secure_cma_heap_unmap_dma,
+	.phys = ion_secure_cma_phys,
+	.map_user = ion_secure_cma_mmap,
+	.map_kernel = ion_secure_cma_map_kernel,
+	.unmap_kernel = ion_secure_cma_unmap_kernel,
+	.print_debug = ion_secure_cma_print_debug,
+};
+
+struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *data)
+{
+	struct ion_cma_secure_heap *sheap;
+	int map_size = BITS_TO_LONGS(data->size >> PAGE_SHIFT) * sizeof(long);
+
+	sheap = kzalloc(sizeof(*sheap), GFP_KERNEL);
+	if (!sheap)
+		return ERR_PTR(-ENOMEM);
+
+	sheap->dev = data->priv;
+	mutex_init(&sheap->chunk_lock);
+	mutex_init(&sheap->alloc_lock);
+	sheap->heap.ops = &ion_secure_cma_ops;
+	sheap->heap.type = ION_HEAP_TYPE_SECURE_DMA;
+	sheap->npages = data->size >> PAGE_SHIFT;
+	sheap->base = data->base;
+	sheap->heap_size = data->size;
+	sheap->bitmap = kmalloc(map_size, GFP_KERNEL);
+	INIT_LIST_HEAD(&sheap->chunks);
+	INIT_WORK(&sheap->work, ion_secure_pool_pages);
+	sheap->shrinker.seeks = DEFAULT_SEEKS;
+	sheap->shrinker.batch = 0;
+	sheap->shrinker.scan_objects = ion_secure_cma_shrinker;
+	sheap->shrinker.count_objects = ion_secure_cma_shrinker_count;
+	sheap->default_prefetch_size = sheap->heap_size;
+	register_shrinker(&sheap->shrinker);
+
+	if (!sheap->bitmap) {
+		kfree(sheap);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (data->extra_data) {
+		struct ion_cma_pdata *extra = data->extra_data;
+		sheap->default_prefetch_size = extra->default_prefetch_size;
+	}
+
+	/*
+	 * we initially mark everything in the allocator as being free so that
+	 * allocations can come in later
+	 */
+	bitmap_fill(sheap->bitmap, sheap->npages);
+
+	return &sheap->heap;
+}
+
+void ion_secure_cma_heap_destroy(struct ion_heap *heap)
+{
+	struct ion_cma_secure_heap *sheap =
+		container_of(heap, struct ion_cma_secure_heap, heap);
+
+	kfree(sheap);
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/staging/android/ion/ion_system_secure_heap.c	2019-10-29 09:26:24.849214980 +0100
@@ -0,0 +1,427 @@
+/*
+ *
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/msm_ion.h>
+#include <soc/qcom/secure_buffer.h>
+#include <linux/workqueue.h>
+#include <linux/uaccess.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+struct ion_system_secure_heap {
+	struct ion_heap *sys_heap;
+	struct ion_heap heap;
+
+	/* Protects prefetch_list */
+	spinlock_t work_lock;
+	bool destroy_heap;
+	struct list_head prefetch_list;
+	struct delayed_work prefetch_work;
+};
+
+struct prefetch_info {
+	struct list_head list;
+	int vmid;
+	size_t size;
+	bool shrink;
+};
+
+/*
+ * The video client may not hold the last reference count on the
+ * ion_buffer(s). Delay for a short time after the video client sends
+ * the IOC_DRAIN event to increase the chance that the reference
+ * count drops to zero. Time in milliseconds.
+ */
+#define SHRINK_DELAY 1000
+
+static bool is_cp_flag_present(unsigned long flags)
+{
+	return flags && (ION_FLAG_CP_TOUCH ||
+			ION_FLAG_CP_BITSTREAM ||
+			ION_FLAG_CP_PIXEL ||
+			ION_FLAG_CP_NON_PIXEL ||
+			ION_FLAG_CP_CAMERA);
+}
+
+int ion_system_secure_heap_unassign_sg(struct sg_table *sgt, int source_vmid)
+{
+	u32 dest_vmid = VMID_HLOS;
+	u32 dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
+	struct scatterlist *sg;
+	int ret, i;
+
+	ret = hyp_assign_table(sgt, &source_vmid, 1,
+			       &dest_vmid, &dest_perms, 1);
+	if (ret) {
+		pr_err("%s: Not freeing memory since assign call failed. VMID %d\n",
+		       __func__, source_vmid);
+		return -ENXIO;
+	}
+
+	for_each_sg(sgt->sgl, sg, sgt->nents, i)
+		ClearPagePrivate(sg_page(sg));
+	return 0;
+}
+
+int ion_system_secure_heap_assign_sg(struct sg_table *sgt, int dest_vmid)
+{
+	int source_vmid = VMID_HLOS;
+	u32 dest_perms = PERM_READ | PERM_WRITE;
+	struct scatterlist *sg;
+	int ret, i;
+
+	ret = hyp_assign_table(sgt, &source_vmid, 1,
+			       &dest_vmid, &dest_perms, 1);
+	if (ret) {
+		pr_err("%s: Assign call failed. VMID %d\n",
+		       __func__, dest_vmid);
+		return -EINVAL;
+	}
+
+	for_each_sg(sgt->sgl, sg, sgt->nents, i)
+		SetPagePrivate(sg_page(sg));
+	return 0;
+}
+
+static void ion_system_secure_heap_free(struct ion_buffer *buffer)
+{
+	struct ion_heap *heap = buffer->heap;
+	struct ion_system_secure_heap *secure_heap = container_of(heap,
+						struct ion_system_secure_heap,
+						heap);
+	buffer->heap = secure_heap->sys_heap;
+	secure_heap->sys_heap->ops->free(buffer);
+}
+
+static int ion_system_secure_heap_allocate(struct ion_heap *heap,
+					struct ion_buffer *buffer,
+					unsigned long size, unsigned long align,
+					unsigned long flags)
+{
+	int ret = 0;
+	struct ion_system_secure_heap *secure_heap = container_of(heap,
+						struct ion_system_secure_heap,
+						heap);
+
+	if (!ion_heap_is_system_secure_heap_type(secure_heap->heap.type) ||
+		!is_cp_flag_present(flags)) {
+		pr_info("%s: Incorrect heap type or incorrect flags\n",
+								__func__);
+		return -EINVAL;
+	}
+
+	ret = secure_heap->sys_heap->ops->allocate(secure_heap->sys_heap,
+						buffer, size, align, flags);
+	if (ret) {
+		pr_info("%s: Failed to get allocation for %s, ret = %d\n",
+			__func__, heap->name, ret);
+		return ret;
+	}
+	return ret;
+}
+
+static void process_one_prefetch(struct ion_heap *sys_heap,
+				 struct prefetch_info *info)
+{
+	struct ion_buffer buffer;
+	struct sg_table *sg_table;
+	int ret;
+
+	buffer.heap = sys_heap;
+	buffer.flags = 0;
+
+	ret = sys_heap->ops->allocate(sys_heap, &buffer, info->size,
+						PAGE_SIZE, buffer.flags);
+	if (ret) {
+		pr_debug("%s: Failed to prefetch 0x%zx, ret = %d\n",
+			 __func__, info->size, ret);
+		return;
+	}
+
+	sg_table = sys_heap->ops->map_dma(sys_heap, &buffer);
+	if (IS_ERR_OR_NULL(sg_table))
+		goto out;
+
+	ret = ion_system_secure_heap_assign_sg(sg_table,
+					       get_secure_vmid(info->vmid));
+	if (ret)
+		goto unmap;
+
+	/* Now free it to the secure heap */
+	buffer.heap = sys_heap;
+	buffer.flags = info->vmid;
+
+unmap:
+	sys_heap->ops->unmap_dma(sys_heap, &buffer);
+out:
+	sys_heap->ops->free(&buffer);
+}
+
+static void process_one_shrink(struct ion_heap *sys_heap,
+			       struct prefetch_info *info)
+{
+	struct ion_buffer buffer;
+	size_t pool_size, size;
+	int ret;
+
+	buffer.heap = sys_heap;
+	buffer.flags = info->vmid;
+
+	pool_size = ion_system_heap_secure_page_pool_total(sys_heap,
+							   info->vmid);
+	size = min(pool_size, info->size);
+	ret = sys_heap->ops->allocate(sys_heap, &buffer, size, PAGE_SIZE,
+				      buffer.flags);
+	if (ret) {
+		pr_debug("%s: Failed to shrink 0x%zx, ret = %d\n",
+			 __func__, info->size, ret);
+		return;
+	}
+
+	buffer.private_flags = ION_PRIV_FLAG_SHRINKER_FREE;
+	sys_heap->ops->free(&buffer);
+}
+
+static void ion_system_secure_heap_prefetch_work(struct work_struct *work)
+{
+	struct ion_system_secure_heap *secure_heap = container_of(work,
+						struct ion_system_secure_heap,
+						prefetch_work.work);
+	struct ion_heap *sys_heap = secure_heap->sys_heap;
+	struct prefetch_info *info, *tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&secure_heap->work_lock, flags);
+	list_for_each_entry_safe(info, tmp,
+				 &secure_heap->prefetch_list, list) {
+		list_del(&info->list);
+		spin_unlock_irqrestore(&secure_heap->work_lock, flags);
+
+		if (info->shrink)
+			process_one_shrink(sys_heap, info);
+		else
+			process_one_prefetch(sys_heap, info);
+
+		kfree(info);
+		spin_lock_irqsave(&secure_heap->work_lock, flags);
+	}
+	spin_unlock_irqrestore(&secure_heap->work_lock, flags);
+}
+
+static int alloc_prefetch_info(
+			struct ion_prefetch_regions __user *user_regions,
+			bool shrink, struct list_head *items)
+{
+	struct prefetch_info *info;
+	size_t __user *user_sizes;
+	int err;
+	unsigned int nr_sizes, vmid, i;
+
+	err = get_user(nr_sizes, &user_regions->nr_sizes);
+	err |= get_user(user_sizes, &user_regions->sizes);
+	err |= get_user(vmid, &user_regions->vmid);
+	if (err)
+		return -EFAULT;
+
+	if (!is_secure_vmid_valid(get_secure_vmid(vmid)))
+		return -EINVAL;
+
+	if (nr_sizes > 0x10)
+		return -EINVAL;
+
+	for (i = 0; i < nr_sizes; i++) {
+		info = kzalloc(sizeof(*info), GFP_KERNEL);
+		if (!info)
+			return -ENOMEM;
+
+		err = get_user(info->size, &user_sizes[i]);
+		if (err)
+			goto out_free;
+
+		info->vmid = vmid;
+		info->shrink = shrink;
+		INIT_LIST_HEAD(&info->list);
+		list_add_tail(&info->list, items);
+	}
+	return err;
+out_free:
+	kfree(info);
+	return err;
+}
+
+static int __ion_system_secure_heap_resize(struct ion_heap *heap, void *ptr,
+					   bool shrink)
+{
+	struct ion_system_secure_heap *secure_heap = container_of(heap,
+						struct ion_system_secure_heap,
+						heap);
+	struct ion_prefetch_data *data = ptr;
+	int i, ret = 0;
+	struct prefetch_info *info, *tmp;
+	unsigned long flags;
+	LIST_HEAD(items);
+
+	if ((int)heap->type != ION_HEAP_TYPE_SYSTEM_SECURE)
+		return -EINVAL;
+
+	if (data->nr_regions > 0x10)
+		return -EINVAL;
+
+	for (i = 0; i < data->nr_regions; i++) {
+		ret = alloc_prefetch_info(&data->regions[i], shrink, &items);
+		if (ret)
+			goto out_free;
+	}
+
+	spin_lock_irqsave(&secure_heap->work_lock, flags);
+	if (secure_heap->destroy_heap) {
+		spin_unlock_irqrestore(&secure_heap->work_lock, flags);
+		goto out_free;
+	}
+	list_splice_init(&items, &secure_heap->prefetch_list);
+	schedule_delayed_work(&secure_heap->prefetch_work,
+			      shrink ? msecs_to_jiffies(SHRINK_DELAY) : 0);
+	spin_unlock_irqrestore(&secure_heap->work_lock, flags);
+
+	return 0;
+
+out_free:
+	list_for_each_entry_safe(info, tmp, &items, list) {
+		list_del(&info->list);
+		kfree(info);
+	}
+	return ret;
+}
+
+int ion_system_secure_heap_prefetch(struct ion_heap *heap, void *ptr)
+{
+	return __ion_system_secure_heap_resize(heap, ptr, false);
+}
+
+int ion_system_secure_heap_drain(struct ion_heap *heap, void *ptr)
+{
+	return __ion_system_secure_heap_resize(heap, ptr, true);
+}
+
+static struct sg_table *ion_system_secure_heap_map_dma(struct ion_heap *heap,
+					struct ion_buffer *buffer)
+{
+	struct ion_system_secure_heap *secure_heap = container_of(heap,
+						struct ion_system_secure_heap,
+						heap);
+
+	return secure_heap->sys_heap->ops->map_dma(secure_heap->sys_heap,
+							buffer);
+}
+
+static void ion_system_secure_heap_unmap_dma(struct ion_heap *heap,
+					struct ion_buffer *buffer)
+{
+	struct ion_system_secure_heap *secure_heap = container_of(heap,
+						struct ion_system_secure_heap,
+						heap);
+
+	secure_heap->sys_heap->ops->unmap_dma(secure_heap->sys_heap,
+							buffer);
+}
+
+static void *ion_system_secure_heap_map_kernel(struct ion_heap *heap,
+					struct ion_buffer *buffer)
+{
+	pr_info("%s: Kernel mapping from secure heap %s disallowed\n",
+		__func__, heap->name);
+	return ERR_PTR(-EINVAL);
+}
+
+static void ion_system_secure_heap_unmap_kernel(struct ion_heap *heap,
+				struct ion_buffer *buffer)
+{
+}
+
+static int ion_system_secure_heap_map_user(struct ion_heap *mapper,
+					struct ion_buffer *buffer,
+					struct vm_area_struct *vma)
+{
+	pr_info("%s: Mapping from secure heap %s disallowed\n",
+		__func__, mapper->name);
+	return -EINVAL;
+}
+
+static int ion_system_secure_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
+						int nr_to_scan)
+{
+	struct ion_system_secure_heap *secure_heap = container_of(heap,
+						struct ion_system_secure_heap,
+						heap);
+
+	return secure_heap->sys_heap->ops->shrink(secure_heap->sys_heap,
+						gfp_mask, nr_to_scan);
+}
+
+static struct ion_heap_ops system_secure_heap_ops = {
+	.allocate = ion_system_secure_heap_allocate,
+	.free = ion_system_secure_heap_free,
+	.map_dma = ion_system_secure_heap_map_dma,
+	.unmap_dma = ion_system_secure_heap_unmap_dma,
+	.map_kernel = ion_system_secure_heap_map_kernel,
+	.unmap_kernel = ion_system_secure_heap_unmap_kernel,
+	.map_user = ion_system_secure_heap_map_user,
+	.shrink = ion_system_secure_heap_shrink,
+};
+
+struct ion_heap *ion_system_secure_heap_create(struct ion_platform_heap *unused)
+{
+	struct ion_system_secure_heap *heap;
+
+	heap = kzalloc(sizeof(struct ion_system_secure_heap), GFP_KERNEL);
+	if (!heap)
+		return ERR_PTR(-ENOMEM);
+	heap->heap.ops = &system_secure_heap_ops;
+	heap->heap.type = ION_HEAP_TYPE_SYSTEM_SECURE;
+	heap->sys_heap = get_ion_heap(ION_SYSTEM_HEAP_ID);
+
+	heap->destroy_heap = false;
+	heap->work_lock = __SPIN_LOCK_UNLOCKED(heap->work_lock);
+	INIT_LIST_HEAD(&heap->prefetch_list);
+	INIT_DELAYED_WORK(&heap->prefetch_work,
+			  ion_system_secure_heap_prefetch_work);
+	return &heap->heap;
+}
+
+void ion_system_secure_heap_destroy(struct ion_heap *heap)
+{
+	struct ion_system_secure_heap *secure_heap = container_of(heap,
+						struct ion_system_secure_heap,
+						heap);
+	unsigned long flags;
+	LIST_HEAD(items);
+	struct prefetch_info *info, *tmp;
+
+	/* Stop any pending/future work */
+	spin_lock_irqsave(&secure_heap->work_lock, flags);
+	secure_heap->destroy_heap = true;
+	list_splice_init(&secure_heap->prefetch_list, &items);
+	spin_unlock_irqrestore(&secure_heap->work_lock, flags);
+
+	cancel_delayed_work_sync(&secure_heap->prefetch_work);
+
+	list_for_each_entry_safe(info, tmp, &items, list) {
+		list_del(&info->list);
+		kfree(info);
+	}
+
+	kfree(heap);
+}
diff -Nruw linux-4.4.115-fbx/drivers/staging/android/ion/msm./compat_msm_ion.c linux-4.4.115-fbx/drivers/staging/android/ion/msm/compat_msm_ion.c
--- linux-4.4.115-fbx/drivers/staging/android/ion/msm./compat_msm_ion.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/android/ion/msm/compat_msm_ion.c	2019-01-22 16:16:26.715275493 +0100
@@ -0,0 +1,210 @@
+/* Copyright (c) 2014,2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/compat.h>
+#include <linux/ion.h>
+#include <linux/msm_ion.h>
+#include <linux/uaccess.h>
+#include "../ion_priv.h"
+#include "../compat_ion.h"
+
+struct compat_ion_flush_data {
+	compat_ion_user_handle_t handle;
+	compat_int_t fd;
+	compat_uptr_t vaddr;
+	compat_uint_t offset;
+	compat_uint_t length;
+};
+
+struct compat_ion_prefetch_regions {
+	compat_uint_t vmid;
+	compat_uptr_t sizes;
+	compat_uint_t nr_sizes;
+};
+
+struct compat_ion_prefetch_data {
+	compat_int_t heap_id;
+	compat_ulong_t len;
+	compat_uptr_t regions;
+	compat_uint_t nr_regions;
+};
+
+#define COMPAT_ION_IOC_CLEAN_CACHES    _IOWR(ION_IOC_MSM_MAGIC, 0, \
+						struct compat_ion_flush_data)
+#define COMPAT_ION_IOC_INV_CACHES      _IOWR(ION_IOC_MSM_MAGIC, 1, \
+						struct compat_ion_flush_data)
+#define COMPAT_ION_IOC_CLEAN_INV_CACHES        _IOWR(ION_IOC_MSM_MAGIC, 2, \
+						struct compat_ion_flush_data)
+#define COMPAT_ION_IOC_PREFETCH                _IOWR(ION_IOC_MSM_MAGIC, 3, \
+						struct compat_ion_prefetch_data)
+#define COMPAT_ION_IOC_DRAIN                   _IOWR(ION_IOC_MSM_MAGIC, 4, \
+						struct compat_ion_prefetch_data)
+
+static int compat_get_ion_flush_data(
+			struct compat_ion_flush_data __user *data32,
+			struct ion_flush_data __user *data)
+{
+	compat_ion_user_handle_t h;
+	compat_int_t i;
+	compat_uptr_t u;
+	compat_ulong_t l;
+	int err;
+
+	err = get_user(h, &data32->handle);
+	err |= put_user(h, &data->handle);
+	err |= get_user(i, &data32->fd);
+	err |= put_user(i, &data->fd);
+	err |= get_user(u, &data32->vaddr);
+	/* upper bits won't get set, zero them */
+	err |= put_user(NULL, &data->vaddr);
+	err |= put_user(u, (compat_uptr_t *)&data->vaddr);
+	err |= get_user(l, &data32->offset);
+	err |= put_user(l, &data->offset);
+	err |= get_user(l, &data32->length);
+	err |= put_user(l, &data->length);
+
+	return err;
+}
+
+static int compat_get_ion_prefetch_data(
+			struct compat_ion_prefetch_data __user *data32,
+			struct ion_prefetch_data __user *data,
+			size_t stack_offset)
+{
+	compat_int_t i;
+	compat_ulong_t l;
+	compat_uint_t u;
+	int err, j, k;
+	compat_uint_t nr_regions, nr_sizes;
+	struct compat_ion_prefetch_regions __user *regions32;
+	struct ion_prefetch_regions __user *regions;
+	compat_uptr_t ptr;
+
+	err = get_user(i, &data32->heap_id);
+	err |= put_user(i, &data->heap_id);
+	err |= get_user(l, &data32->len);
+	err |= put_user(l, &data->len);
+	err |= get_user(nr_regions, &data32->nr_regions);
+	err |= put_user(nr_regions, &data->nr_regions);
+	err |= get_user(ptr, &data32->regions);
+	regions32 = compat_ptr(ptr);
+	if (err)
+		return err;
+
+	stack_offset += nr_regions * sizeof(*regions);
+	regions = compat_alloc_user_space(stack_offset);
+	if (!regions)
+		return -EFAULT;
+	err |= put_user(regions, &data->regions);
+
+	for (k = 0; k < nr_regions; k++) {
+		compat_size_t __user *sizes32;
+		size_t __user *sizes;
+
+		err |= get_user(u, &regions32[k].vmid);
+		err |= put_user(u, &regions[k].vmid);
+		err |= get_user(nr_sizes, &regions32[k].nr_sizes);
+		err |= put_user(nr_sizes, &regions[k].nr_sizes);
+		err |= get_user(ptr, &regions32[k].sizes);
+		sizes32 = compat_ptr(ptr);
+		if (err)
+			return -EFAULT;
+
+		stack_offset += nr_sizes * sizeof(*sizes);
+		sizes = compat_alloc_user_space(stack_offset);
+		if (!sizes)
+			return -EFAULT;
+		err |= put_user(sizes, &regions[k].sizes);
+
+		for (j = 0; j < nr_sizes; j++) {
+			compat_size_t s;
+
+			err |= get_user(s, &sizes32[j]);
+			err |= put_user(s, &sizes[j]);
+		}
+	}
+
+	return err;
+}
+
+
+
+static unsigned int convert_cmd(unsigned int cmd)
+{
+	switch (cmd) {
+	case COMPAT_ION_IOC_CLEAN_CACHES:
+		return ION_IOC_CLEAN_CACHES;
+	case COMPAT_ION_IOC_INV_CACHES:
+		return ION_IOC_INV_CACHES;
+	case COMPAT_ION_IOC_CLEAN_INV_CACHES:
+		return ION_IOC_CLEAN_INV_CACHES;
+	case COMPAT_ION_IOC_PREFETCH:
+		return ION_IOC_PREFETCH;
+	case COMPAT_ION_IOC_DRAIN:
+		return ION_IOC_DRAIN;
+	default:
+		return cmd;
+	}
+}
+
+long compat_msm_ion_ioctl(struct ion_client *client, unsigned int cmd,
+				unsigned long arg)
+{
+	switch (cmd) {
+	case COMPAT_ION_IOC_CLEAN_CACHES:
+	case COMPAT_ION_IOC_INV_CACHES:
+	case COMPAT_ION_IOC_CLEAN_INV_CACHES:
+	{
+		struct compat_ion_flush_data __user *data32;
+		struct ion_flush_data __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_ion_flush_data(data32, data);
+		if (err)
+			return err;
+
+		return msm_ion_custom_ioctl(client, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	case COMPAT_ION_IOC_PREFETCH:
+	case COMPAT_ION_IOC_DRAIN:
+	{
+		struct compat_ion_prefetch_data __user *data32;
+		struct ion_prefetch_data __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_ion_prefetch_data(data32, data, sizeof(*data));
+		if (err)
+			return err;
+
+		return msm_ion_custom_ioctl(client, convert_cmd(cmd),
+						(unsigned long)data);
+
+	}
+	default:
+		if (is_compat_task())
+			return -ENOIOCTLCMD;
+		else
+			return msm_ion_custom_ioctl(client, cmd, arg);
+	}
+}
diff -Nruw linux-4.4.115-fbx/drivers/staging/android/ion/msm./compat_msm_ion.h linux-4.4.115-fbx/drivers/staging/android/ion/msm/compat_msm_ion.h
--- linux-4.4.115-fbx/drivers/staging/android/ion/msm./compat_msm_ion.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/android/ion/msm/compat_msm_ion.h	2019-01-22 16:16:26.715275493 +0100
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_COMPAT_ION_H
+#define _LINUX_COMPAT_ION_H
+
+#include <linux/ion.h>
+
+#if IS_ENABLED(CONFIG_COMPAT)
+
+long compat_msm_ion_ioctl(struct ion_client *client, unsigned int cmd,
+					unsigned long arg);
+
+#define compat_ion_user_handle_t compat_int_t
+
+#else
+
+#define compat_msm_ion_ioctl  msm_ion_custom_ioctl
+
+#endif
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/staging/android/ion/msm./ion_cp_common.h linux-4.4.115-fbx/drivers/staging/android/ion/msm/ion_cp_common.h
--- linux-4.4.115-fbx/drivers/staging/android/ion/msm./ion_cp_common.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/android/ion/msm/ion_cp_common.h	2019-01-22 16:16:26.715275493 +0100
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef ION_CP_COMMON_H
+#define ION_CP_COMMON_H
+
+#include <asm-generic/errno-base.h>
+#include <linux/msm_ion.h>
+
+#define ION_CP_V1	1
+#define ION_CP_V2	2
+
+struct ion_cp_buffer {
+	phys_addr_t buffer;
+	atomic_t secure_cnt;
+	int is_secure;
+	int want_delayed_unsecure;
+	/*
+	 * Currently all user/kernel mapping is protected by the heap lock.
+	 * This is sufficient to protect the map count as well. The lock
+	 * should be used to protect map_cnt if the whole heap lock is
+	 * ever removed.
+	 */
+	atomic_t map_cnt;
+	/*
+	 * protects secure_cnt for securing.
+	 */
+	struct mutex lock;
+	int version;
+	void *data;
+	/*
+	 * secure is happening at allocation time, ignore version/data check
+	 */
+	bool ignore_check;
+};
+
+#if defined(CONFIG_ION_MSM)
+/*
+ * ion_cp2_protect_mem - secures memory via trustzone
+ *
+ * @chunks - physical address of the array containing the chunks to
+ *		be locked down
+ * @nchunks - number of entries in the array
+ * @chunk_size - size of each memory chunk
+ * @usage - usage hint
+ * @lock - 1 for lock, 0 for unlock
+ *
+ * return value is the result of the scm call
+ */
+int ion_cp_change_chunks_state(unsigned long chunks, unsigned int nchunks,
+			unsigned int chunk_size, enum cp_mem_usage usage,
+			int lock);
+
+int ion_cp_protect_mem(unsigned int phy_base, unsigned int size,
+			unsigned int permission_type, int version,
+			void *data);
+
+int ion_cp_unprotect_mem(unsigned int phy_base, unsigned int size,
+				unsigned int permission_type, int version,
+				void *data);
+
+int ion_cp_secure_buffer(struct ion_buffer *buffer, int version, void *data,
+				int flags);
+
+int ion_cp_unsecure_buffer(struct ion_buffer *buffer, int force_unsecure);
+
+#else
+static inline int ion_cp_change_chunks_state(unsigned long chunks,
+			unsigned int nchunks, unsigned int chunk_size,
+			enum cp_mem_usage usage, int lock)
+{
+	return -ENODEV;
+}
+
+static inline int ion_cp_protect_mem(unsigned int phy_base, unsigned int size,
+			unsigned int permission_type, int version,
+			void *data)
+{
+	return -ENODEV;
+}
+
+static inline int ion_cp_unprotect_mem(unsigned int phy_base, unsigned int size,
+				unsigned int permission_type, int version,
+				void *data)
+{
+	return -ENODEV;
+}
+
+static inline int ion_cp_secure_buffer(struct ion_buffer *buffer, int version,
+				void *data, int flags)
+{
+	return -ENODEV;
+}
+
+static inline int ion_cp_unsecure_buffer(struct ion_buffer *buffer,
+				int force_unsecure)
+{
+	return -ENODEV;
+}
+#endif
+
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/staging/android/ion/msm./Makefile linux-4.4.115-fbx/drivers/staging/android/ion/msm/Makefile
--- linux-4.4.115-fbx/drivers/staging/android/ion/msm./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/android/ion/msm/Makefile	2019-01-22 16:16:26.715275493 +0100
@@ -0,0 +1,4 @@
+obj-y += msm_ion.o
+ifdef CONFIG_COMPAT
+obj-y += compat_msm_ion.o
+endif
diff -Nruw linux-4.4.115-fbx/drivers/staging/android/ion/msm./msm_ion.c linux-4.4.115-fbx/drivers/staging/android/ion/msm/msm_ion.c
--- linux-4.4.115-fbx/drivers/staging/android/ion/msm./msm_ion.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/android/ion/msm/msm_ion.c	2019-10-29 09:26:24.849214980 +0100
@@ -0,0 +1,1137 @@
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/msm_ion.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/sched.h>
+#include <linux/rwsem.h>
+#include <linux/uaccess.h>
+#include <linux/memblock.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
+#include <linux/vmalloc.h>
+#include <linux/highmem.h>
+#include <linux/cma.h>
+#include <linux/module.h>
+#include <linux/show_mem_notifier.h>
+#include <asm/cacheflush.h>
+#include "../ion_priv.h"
+#include "ion_cp_common.h"
+#include "compat_msm_ion.h"
+#include <soc/qcom/secure_buffer.h>
+
+#define ION_COMPAT_STR	"qcom,msm-ion"
+
+static struct ion_device *idev;
+static int num_heaps;
+static struct ion_heap **heaps;
+
+struct ion_heap_desc {
+	unsigned int id;
+	enum ion_heap_type type;
+	const char *name;
+	unsigned int permission_type;
+};
+
+
+#ifdef CONFIG_OF
+static struct ion_heap_desc ion_heap_meta[] = {
+	{
+		.id	= ION_SYSTEM_HEAP_ID,
+		.name	= ION_SYSTEM_HEAP_NAME,
+	},
+	{
+		.id	= ION_SYSTEM_CONTIG_HEAP_ID,
+		.name	= ION_KMALLOC_HEAP_NAME,
+	},
+	{
+		.id	= ION_SECURE_HEAP_ID,
+		.name	= ION_SECURE_HEAP_NAME,
+	},
+	{
+		.id	= ION_CP_MM_HEAP_ID,
+		.name	= ION_MM_HEAP_NAME,
+		.permission_type = IPT_TYPE_MM_CARVEOUT,
+	},
+	{
+		.id	= ION_MM_FIRMWARE_HEAP_ID,
+		.name	= ION_MM_FIRMWARE_HEAP_NAME,
+	},
+	{
+		.id	= ION_CP_MFC_HEAP_ID,
+		.name	= ION_MFC_HEAP_NAME,
+		.permission_type = IPT_TYPE_MFC_SHAREDMEM,
+	},
+	{
+		.id	= ION_SF_HEAP_ID,
+		.name	= ION_SF_HEAP_NAME,
+	},
+	{
+		.id	= ION_QSECOM_HEAP_ID,
+		.name	= ION_QSECOM_HEAP_NAME,
+	},
+	{
+		.id	= ION_SPSS_HEAP_ID,
+		.name	= ION_SPSS_HEAP_NAME,
+	},
+	{
+		.id	= ION_AUDIO_HEAP_ID,
+		.name	= ION_AUDIO_HEAP_NAME,
+	},
+	{
+		.id	= ION_PIL1_HEAP_ID,
+		.name	= ION_PIL1_HEAP_NAME,
+	},
+	{
+		.id	= ION_PIL2_HEAP_ID,
+		.name	= ION_PIL2_HEAP_NAME,
+	},
+	{
+		.id	= ION_CP_WB_HEAP_ID,
+		.name	= ION_WB_HEAP_NAME,
+	},
+	{
+		.id	= ION_CAMERA_HEAP_ID,
+		.name	= ION_CAMERA_HEAP_NAME,
+	},
+	{
+		.id	= ION_ADSP_HEAP_ID,
+		.name	= ION_ADSP_HEAP_NAME,
+	},
+	{
+		.id	= ION_SECURE_DISPLAY_HEAP_ID,
+		.name	= ION_SECURE_DISPLAY_HEAP_NAME,
+	}
+};
+#endif
+
+static int msm_ion_lowmem_notifier(struct notifier_block *nb,
+					unsigned long action, void *data)
+{
+	show_ion_usage(idev);
+	return 0;
+}
+
+static struct notifier_block msm_ion_nb = {
+	.notifier_call = msm_ion_lowmem_notifier,
+};
+
+struct ion_client *msm_ion_client_create(const char *name)
+{
+	/*
+	 * The assumption is that if there is a NULL device, the ion
+	 * driver has not yet probed.
+	 */
+	if (idev == NULL)
+		return ERR_PTR(-EPROBE_DEFER);
+
+	if (IS_ERR(idev))
+		return (struct ion_client *)idev;
+
+	return ion_client_create(idev, name);
+}
+EXPORT_SYMBOL(msm_ion_client_create);
+
+int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
+			void *vaddr, unsigned long len, unsigned int cmd)
+{
+	return ion_do_cache_op(client, handle, vaddr, 0, len, cmd);
+}
+EXPORT_SYMBOL(msm_ion_do_cache_op);
+
+int msm_ion_do_cache_offset_op(
+		struct ion_client *client, struct ion_handle *handle,
+		void *vaddr, unsigned int offset, unsigned long len,
+		unsigned int cmd)
+{
+	return ion_do_cache_op(client, handle, vaddr, offset, len, cmd);
+}
+EXPORT_SYMBOL(msm_ion_do_cache_offset_op);
+
+static int ion_no_pages_cache_ops(struct ion_client *client,
+			struct ion_handle *handle,
+			void *vaddr,
+			unsigned int offset, unsigned int length,
+			unsigned int cmd)
+{
+	unsigned long size_to_vmap, total_size;
+	int i, j, ret;
+	void *ptr = NULL;
+	ion_phys_addr_t buff_phys = 0;
+	ion_phys_addr_t buff_phys_start = 0;
+	size_t buf_length = 0;
+
+	ret = ion_phys(client, handle, &buff_phys_start, &buf_length);
+	if (ret)
+		return -EINVAL;
+
+	buff_phys = buff_phys_start;
+
+	if (!vaddr) {
+		/*
+		 * Split the vmalloc space into smaller regions in
+		 * order to clean and/or invalidate the cache.
+		 */
+		size_to_vmap = ((VMALLOC_END - VMALLOC_START)/8);
+		total_size = buf_length;
+
+		for (i = 0; i < total_size; i += size_to_vmap) {
+			size_to_vmap = min(size_to_vmap, total_size - i);
+			for (j = 0; j < 10 && size_to_vmap; ++j) {
+				ptr = ioremap(buff_phys, size_to_vmap);
+				if (ptr) {
+					switch (cmd) {
+					case ION_IOC_CLEAN_CACHES:
+						dmac_clean_range(ptr,
+							ptr + size_to_vmap);
+						break;
+					case ION_IOC_INV_CACHES:
+						dmac_inv_range(ptr,
+							ptr + size_to_vmap);
+						break;
+					case ION_IOC_CLEAN_INV_CACHES:
+						dmac_flush_range(ptr,
+							ptr + size_to_vmap);
+						break;
+					default:
+						return -EINVAL;
+					}
+					buff_phys += size_to_vmap;
+					break;
+				} else {
+					size_to_vmap >>= 1;
+				}
+			}
+			if (!ptr) {
+				pr_err("Couldn't io-remap the memory\n");
+				return -EINVAL;
+			}
+			iounmap(ptr);
+		}
+	} else {
+		switch (cmd) {
+		case ION_IOC_CLEAN_CACHES:
+			dmac_clean_range(vaddr, vaddr + length);
+			break;
+		case ION_IOC_INV_CACHES:
+			dmac_inv_range(vaddr, vaddr + length);
+			break;
+		case ION_IOC_CLEAN_INV_CACHES:
+			dmac_flush_range(vaddr, vaddr + length);
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void __do_cache_ops(struct page *page, unsigned int offset,
+		unsigned int length, void (*op)(const void *, const void *))
+{
+	unsigned int left = length;
+	unsigned long pfn;
+	void *vaddr;
+
+	pfn = page_to_pfn(page) + offset / PAGE_SIZE;
+	page = pfn_to_page(pfn);
+	offset &= ~PAGE_MASK;
+
+	if (!PageHighMem(page)) {
+		vaddr = page_address(page) + offset;
+		op(vaddr, vaddr + length);
+		goto out;
+	}
+
+	do {
+		unsigned int len;
+
+		len = left;
+		if (len + offset > PAGE_SIZE)
+			len = PAGE_SIZE - offset;
+
+		page = pfn_to_page(pfn);
+		vaddr = kmap_atomic(page);
+		op(vaddr + offset, vaddr + offset + len);
+		kunmap_atomic(vaddr);
+
+		offset = 0;
+		pfn++;
+		left -= len;
+	} while (left);
+
+out:
+	return;
+}
+
+static int ion_pages_cache_ops(struct ion_client *client,
+			struct ion_handle *handle,
+			void *vaddr, unsigned int offset, unsigned int length,
+			unsigned int cmd)
+{
+	struct sg_table *table = NULL;
+	struct scatterlist *sg;
+	int i;
+	unsigned int len = 0;
+	void (*op)(const void *, const void *);
+
+
+	table = ion_sg_table(client, handle);
+	if (IS_ERR_OR_NULL(table))
+		return PTR_ERR(table);
+
+	switch (cmd) {
+		case ION_IOC_CLEAN_CACHES:
+			op = dmac_clean_range;
+			break;
+		case ION_IOC_INV_CACHES:
+			op = dmac_inv_range;
+			break;
+		case ION_IOC_CLEAN_INV_CACHES:
+			op = dmac_flush_range;
+			break;
+		default:
+			return -EINVAL;
+	};
+
+	for_each_sg(table->sgl, sg, table->nents, i) {
+		unsigned int sg_offset, sg_left, size = 0;
+
+		len += sg->length;
+		if (len <= offset)
+			continue;
+
+		sg_left = len - offset;
+		sg_offset = sg->length - sg_left;
+
+		size = (length < sg_left) ? length : sg_left;
+
+		__do_cache_ops(sg_page(sg), sg_offset, size, op);
+
+		offset += size;
+		length -= size;
+
+		if (length == 0)
+			break;
+	}
+	return 0;
+}
+
+int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
+			void *uaddr, unsigned long offset, unsigned long len,
+			unsigned int cmd)
+{
+	int ret = -EINVAL;
+	unsigned long flags;
+	struct sg_table *table;
+	struct page *page;
+
+	ret = ion_handle_get_flags(client, handle, &flags);
+	if (ret)
+		return -EINVAL;
+
+	if (!ION_IS_CACHED(flags))
+		return 0;
+
+	if (get_secure_vmid(flags) > 0)
+		return 0;
+
+	table = ion_sg_table(client, handle);
+
+	if (IS_ERR_OR_NULL(table))
+		return PTR_ERR(table);
+
+	page = sg_page(table->sgl);
+
+	if (page)
+		ret = ion_pages_cache_ops(client, handle, uaddr,
+					offset, len, cmd);
+	else
+		ret = ion_no_pages_cache_ops(client, handle, uaddr,
+					offset, len, cmd);
+
+	return ret;
+
+}
+
+static void msm_ion_allocate(struct ion_platform_heap *heap)
+{
+
+	if (!heap->base && heap->extra_data) {
+		WARN(1, "Specifying carveout heaps without a base is deprecated. Convert to the DMA heap type instead");
+		return;
+	}
+}
+
+#ifdef CONFIG_OF
+static int msm_init_extra_data(struct device_node *node,
+			       struct ion_platform_heap *heap,
+			       const struct ion_heap_desc *heap_desc)
+{
+	int ret = 0;
+
+	switch ((int) heap->type) {
+	case ION_HEAP_TYPE_CARVEOUT:
+	{
+		heap->extra_data = kzalloc(sizeof(struct ion_co_heap_pdata),
+					   GFP_KERNEL);
+		if (!heap->extra_data)
+			ret = -ENOMEM;
+		break;
+	}
+	case ION_HEAP_TYPE_SECURE_DMA:
+	{
+		unsigned int val;
+
+		ret = of_property_read_u32(node,
+					"qcom,default-prefetch-size", &val);
+
+		if (!ret) {
+			heap->extra_data = kzalloc(sizeof(struct ion_cma_pdata),
+					   GFP_KERNEL);
+
+			if (!heap->extra_data) {
+				ret = -ENOMEM;
+			} else {
+				struct ion_cma_pdata *extra = heap->extra_data;
+				extra->default_prefetch_size = val;
+			}
+		} else {
+			ret = 0;
+		}
+		break;
+	}
+	default:
+		heap->extra_data = 0;
+		break;
+	}
+	return ret;
+}
+
+#define MAKE_HEAP_TYPE_MAPPING(h) { .name = #h, \
+			.heap_type = ION_HEAP_TYPE_##h, }
+
+static struct heap_types_info {
+	const char *name;
+	int heap_type;
+} heap_types_info[] = {
+	MAKE_HEAP_TYPE_MAPPING(SYSTEM),
+	MAKE_HEAP_TYPE_MAPPING(SYSTEM_CONTIG),
+	MAKE_HEAP_TYPE_MAPPING(CARVEOUT),
+	MAKE_HEAP_TYPE_MAPPING(CHUNK),
+	MAKE_HEAP_TYPE_MAPPING(DMA),
+	MAKE_HEAP_TYPE_MAPPING(SECURE_DMA),
+	MAKE_HEAP_TYPE_MAPPING(SYSTEM_SECURE),
+	MAKE_HEAP_TYPE_MAPPING(HYP_CMA),
+};
+
+static int msm_ion_get_heap_type_from_dt_node(struct device_node *node,
+					int *heap_type)
+{
+	const char *name;
+	int i, ret = -EINVAL;
+	ret = of_property_read_string(node, "qcom,ion-heap-type", &name);
+	if (ret)
+		goto out;
+	for (i = 0; i < ARRAY_SIZE(heap_types_info); ++i) {
+		if (!strcmp(heap_types_info[i].name, name)) {
+			*heap_type = heap_types_info[i].heap_type;
+			ret = 0;
+			goto out;
+		}
+	}
+	WARN(1, "Unknown heap type: %s. You might need to update heap_types_info in %s",
+		name, __FILE__);
+out:
+	return ret;
+}
+
+static int msm_ion_populate_heap(struct device_node *node,
+				struct ion_platform_heap *heap)
+{
+	unsigned int i;
+	int ret = -EINVAL, heap_type = -1;
+	unsigned int len = ARRAY_SIZE(ion_heap_meta);
+	for (i = 0; i < len; ++i) {
+		if (ion_heap_meta[i].id == heap->id) {
+			heap->name = ion_heap_meta[i].name;
+			ret = msm_ion_get_heap_type_from_dt_node(node,
+								&heap_type);
+			if (ret)
+				break;
+			heap->type = heap_type;
+			ret = msm_init_extra_data(node, heap,
+						&ion_heap_meta[i]);
+			break;
+		}
+	}
+	if (ret)
+		pr_err("%s: Unable to populate heap, error: %d", __func__, ret);
+	return ret;
+}
+
+static void free_pdata(const struct ion_platform_data *pdata)
+{
+	unsigned int i;
+	for (i = 0; i < pdata->nr; ++i)
+		kfree(pdata->heaps[i].extra_data);
+	kfree(pdata->heaps);
+	kfree(pdata);
+}
+
+static void msm_ion_get_heap_dt_data(struct device_node *node,
+				 struct ion_platform_heap *heap)
+{
+	struct device_node *pnode;
+
+	pnode = of_parse_phandle(node, "memory-region", 0);
+	if (pnode != NULL) {
+		const __be32 *basep;
+		u64 size;
+		u64 base;
+
+		basep = of_get_address(pnode,  0, &size, NULL);
+		if (!basep) {
+			base = cma_get_base(dev_get_cma_area(heap->priv));
+			size = cma_get_size(dev_get_cma_area(heap->priv));
+		} else {
+			base = of_translate_address(pnode, basep);
+			WARN(base == OF_BAD_ADDR, "Failed to parse DT node for heap %s\n",
+					heap->name);
+		}
+		heap->base = base;
+		heap->size = size;
+		of_node_put(pnode);
+	}
+}
+
+static struct ion_platform_data *msm_ion_parse_dt(struct platform_device *pdev)
+{
+	struct ion_platform_data *pdata = 0;
+	struct ion_platform_heap *heaps = NULL;
+	struct device_node *node;
+	struct platform_device *new_dev = NULL;
+	const struct device_node *dt_node = pdev->dev.of_node;
+	uint32_t val = 0;
+	int ret = 0;
+	uint32_t num_heaps = 0;
+	int idx = 0;
+
+	for_each_available_child_of_node(dt_node, node)
+		num_heaps++;
+
+	if (!num_heaps)
+		return ERR_PTR(-EINVAL);
+
+	pdata = kzalloc(sizeof(struct ion_platform_data), GFP_KERNEL);
+	if (!pdata)
+		return ERR_PTR(-ENOMEM);
+
+	heaps = kzalloc(sizeof(struct ion_platform_heap)*num_heaps, GFP_KERNEL);
+	if (!heaps) {
+		kfree(pdata);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	pdata->heaps = heaps;
+	pdata->nr = num_heaps;
+
+	for_each_available_child_of_node(dt_node, node) {
+		new_dev = of_platform_device_create(node, NULL, &pdev->dev);
+		if (!new_dev) {
+			pr_err("Failed to create device %s\n", node->name);
+			goto free_heaps;
+		}
+
+		pdata->heaps[idx].priv = &new_dev->dev;
+		/**
+		 * TODO: Replace this with of_get_address() when this patch
+		 * gets merged: http://
+		 * permalink.gmane.org/gmane.linux.drivers.devicetree/18614
+		*/
+		ret = of_property_read_u32(node, "reg", &val);
+		if (ret) {
+			pr_err("%s: Unable to find reg key", __func__);
+			goto free_heaps;
+		}
+		pdata->heaps[idx].id = val;
+
+		ret = msm_ion_populate_heap(node, &pdata->heaps[idx]);
+		if (ret)
+			goto free_heaps;
+
+		msm_ion_get_heap_dt_data(node, &pdata->heaps[idx]);
+
+		++idx;
+	}
+	return pdata;
+
+free_heaps:
+	free_pdata(pdata);
+	return ERR_PTR(ret);
+}
+#else
+static struct ion_platform_data *msm_ion_parse_dt(struct platform_device *pdev)
+{
+	return NULL;
+}
+
+static void free_pdata(const struct ion_platform_data *pdata)
+{
+
+}
+#endif
+
+static int check_vaddr_bounds(unsigned long start, unsigned long end)
+{
+	struct mm_struct *mm = current->active_mm;
+	struct vm_area_struct *vma;
+	int ret = 1;
+
+	if (end < start)
+		goto out;
+
+	vma = find_vma(mm, start);
+	if (vma && vma->vm_start < end) {
+		if (start < vma->vm_start)
+			goto out;
+		if (end > vma->vm_end)
+			goto out;
+		ret = 0;
+	}
+
+out:
+	return ret;
+}
+
+int ion_heap_is_system_secure_heap_type(enum ion_heap_type type)
+{
+	return type == ((enum ion_heap_type) ION_HEAP_TYPE_SYSTEM_SECURE);
+}
+
+int ion_heap_allow_secure_allocation(enum ion_heap_type type)
+{
+	return type == ((enum ion_heap_type) ION_HEAP_TYPE_SECURE_DMA);
+}
+
+int ion_heap_allow_handle_secure(enum ion_heap_type type)
+{
+	return type == ((enum ion_heap_type) ION_HEAP_TYPE_SECURE_DMA);
+}
+
+int ion_heap_allow_heap_secure(enum ion_heap_type type)
+{
+	return false;
+}
+
+bool is_secure_vmid_valid(int vmid)
+{
+	return (vmid == VMID_CP_TOUCH ||
+		vmid == VMID_CP_BITSTREAM ||
+		vmid == VMID_CP_PIXEL ||
+		vmid == VMID_CP_NON_PIXEL ||
+		vmid == VMID_CP_CAMERA ||
+		vmid == VMID_CP_SEC_DISPLAY ||
+		vmid == VMID_CP_APP ||
+		vmid == VMID_CP_CAMERA_PREVIEW ||
+		vmid == VMID_CP_SPSS_SP_SHARED);
+}
+
+int get_secure_vmid(unsigned long flags)
+{
+	if (flags & ION_FLAG_CP_TOUCH)
+		return VMID_CP_TOUCH;
+	if (flags & ION_FLAG_CP_BITSTREAM)
+		return VMID_CP_BITSTREAM;
+	if (flags & ION_FLAG_CP_PIXEL)
+		return VMID_CP_PIXEL;
+	if (flags & ION_FLAG_CP_NON_PIXEL)
+		return VMID_CP_NON_PIXEL;
+	if (flags & ION_FLAG_CP_CAMERA)
+		return VMID_CP_CAMERA;
+	if (flags & ION_FLAG_CP_SEC_DISPLAY)
+		return VMID_CP_SEC_DISPLAY;
+	if (flags & ION_FLAG_CP_APP)
+		return VMID_CP_APP;
+	if (flags & ION_FLAG_CP_CAMERA_PREVIEW)
+		return VMID_CP_CAMERA_PREVIEW;
+	if (flags & ION_FLAG_CP_SPSS_SP_SHARED)
+		return VMID_CP_SPSS_SP_SHARED;
+	return -EINVAL;
+}
+/* fix up the cases where the ioctl direction bits are incorrect */
+static unsigned int msm_ion_ioctl_dir(unsigned int cmd)
+{
+	switch (cmd) {
+	case ION_IOC_CLEAN_CACHES:
+	case ION_IOC_INV_CACHES:
+	case ION_IOC_CLEAN_INV_CACHES:
+	case ION_IOC_PREFETCH:
+	case ION_IOC_DRAIN:
+		return _IOC_WRITE;
+	default:
+		return _IOC_DIR(cmd);
+	}
+}
+
+long msm_ion_custom_ioctl(struct ion_client *client,
+				unsigned int cmd,
+				unsigned long arg)
+{
+	unsigned int dir;
+	union {
+		struct ion_flush_data flush_data;
+		struct ion_prefetch_data prefetch_data;
+	} data;
+
+	dir = msm_ion_ioctl_dir(cmd);
+
+	if (_IOC_SIZE(cmd) > sizeof(data))
+		return -EINVAL;
+
+	if (dir & _IOC_WRITE)
+		if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+			return -EFAULT;
+
+	switch (cmd) {
+	case ION_IOC_CLEAN_CACHES:
+	case ION_IOC_INV_CACHES:
+	case ION_IOC_CLEAN_INV_CACHES:
+	{
+		unsigned long start, end;
+		struct ion_handle *handle = NULL;
+		int ret;
+		struct mm_struct *mm = current->active_mm;
+
+		if (data.flush_data.handle > 0) {
+			handle = ion_handle_get_by_id(client,
+						(int)data.flush_data.handle);
+			if (IS_ERR(handle)) {
+				pr_info("%s: Could not find handle: %d\n",
+					__func__, (int)data.flush_data.handle);
+				return PTR_ERR(handle);
+			}
+		} else {
+			handle = ion_import_dma_buf(client, data.flush_data.fd);
+			if (IS_ERR(handle)) {
+				pr_info("%s: Could not import handle: %pK\n",
+					__func__, handle);
+				return -EINVAL;
+			}
+		}
+
+		down_read(&mm->mmap_sem);
+
+		start = (unsigned long)data.flush_data.vaddr +
+			data.flush_data.offset;
+		end = start + data.flush_data.length;
+
+		if (start && check_vaddr_bounds(start, end)) {
+			pr_err("%s: virtual address %pK is out of bounds\n",
+			       __func__, data.flush_data.vaddr);
+			ret = -EINVAL;
+		} else {
+			ret = ion_do_cache_op(
+				client, handle, data.flush_data.vaddr,
+				data.flush_data.offset,
+				data.flush_data.length, cmd);
+		}
+		up_read(&mm->mmap_sem);
+
+		ion_free(client, handle);
+
+		if (ret < 0)
+			return ret;
+		break;
+	}
+	case ION_IOC_PREFETCH:
+	{
+		int ret;
+
+		ret = ion_walk_heaps(client, data.prefetch_data.heap_id,
+			ION_HEAP_TYPE_SECURE_DMA,
+			(void *)data.prefetch_data.len,
+			ion_secure_cma_prefetch);
+		if (ret)
+			return ret;
+
+		ret = ion_walk_heaps(client, data.prefetch_data.heap_id,
+				     ION_HEAP_TYPE_SYSTEM_SECURE,
+				     (void *)&data.prefetch_data,
+				     ion_system_secure_heap_prefetch);
+		if (ret)
+			return ret;
+		break;
+	}
+	case ION_IOC_DRAIN:
+	{
+		int ret;
+
+		ret = ion_walk_heaps(client, data.prefetch_data.heap_id,
+			ION_HEAP_TYPE_SECURE_DMA,
+			(void *)data.prefetch_data.len,
+			ion_secure_cma_drain_pool);
+
+		if (ret)
+			return ret;
+
+		ret = ion_walk_heaps(client, data.prefetch_data.heap_id,
+				     ION_HEAP_TYPE_SYSTEM_SECURE,
+				     (void *)&data.prefetch_data,
+				     ion_system_secure_heap_drain);
+
+		if (ret)
+			return ret;
+		break;
+	}
+
+	default:
+		return -ENOTTY;
+	}
+	return 0;
+}
+
+#define MAX_VMAP_RETRIES 10
+
+/**
+ * An optimized page-zero'ing function. vmaps arrays of pages in large
+ * chunks to minimize the number of memsets and vmaps/vunmaps.
+ *
+ * Note that the `pages' array should be composed of all 4K pages.
+ *
+ * NOTE: This function does not guarantee synchronization of the caches
+ * and thus caller is responsible for handling any cache maintenance
+ * operations needed.
+ */
+int msm_ion_heap_pages_zero(struct page **pages, int num_pages)
+{
+	int i, j, npages_to_vmap;
+	void *ptr = NULL;
+
+	/*
+	 * As an optimization, we manually zero out all of the pages
+	 * in one fell swoop here. To safeguard against insufficient
+	 * vmalloc space, we only vmap `npages_to_vmap' at a time,
+	 * starting with a conservative estimate of 1/8 of the total
+	 * number of vmalloc pages available.
+	 */
+	npages_to_vmap = ((VMALLOC_END - VMALLOC_START)/8)
+			>> PAGE_SHIFT;
+	for (i = 0; i < num_pages; i += npages_to_vmap) {
+		npages_to_vmap = min(npages_to_vmap, num_pages - i);
+		for (j = 0; j < MAX_VMAP_RETRIES && npages_to_vmap;
+			++j) {
+			ptr = vmap(&pages[i], npages_to_vmap,
+					VM_IOREMAP, PAGE_KERNEL);
+			if (ptr)
+				break;
+			else
+				npages_to_vmap >>= 1;
+		}
+		if (!ptr)
+			return -ENOMEM;
+
+		memset(ptr, 0, npages_to_vmap * PAGE_SIZE);
+		vunmap(ptr);
+	}
+
+	return 0;
+}
+
+int msm_ion_heap_alloc_pages_mem(struct pages_mem *pages_mem)
+{
+	struct page **pages;
+	unsigned int page_tbl_size;
+
+	pages_mem->free_fn = kfree;
+	page_tbl_size = sizeof(struct page *) * (pages_mem->size >> PAGE_SHIFT);
+	if (page_tbl_size > SZ_8K) {
+		/*
+		 * Do fallback to ensure we have a balance between
+		 * performance and availability.
+		 */
+		pages = kmalloc(page_tbl_size,
+				__GFP_COMP | __GFP_NORETRY |
+				__GFP_NOWARN);
+		if (!pages) {
+			pages = vmalloc(page_tbl_size);
+			pages_mem->free_fn = vfree;
+		}
+	} else {
+		pages = kmalloc(page_tbl_size, GFP_KERNEL);
+	}
+
+	if (!pages)
+		return -ENOMEM;
+
+	pages_mem->pages = pages;
+	return 0;
+}
+
+void msm_ion_heap_free_pages_mem(struct pages_mem *pages_mem)
+{
+	pages_mem->free_fn(pages_mem->pages);
+}
+
+int msm_ion_heap_high_order_page_zero(struct device *dev, struct page *page,
+				      int order)
+{
+	int i, ret;
+	struct pages_mem pages_mem;
+	int npages = 1 << order;
+	pages_mem.size = npages * PAGE_SIZE;
+
+	if (msm_ion_heap_alloc_pages_mem(&pages_mem))
+		return -ENOMEM;
+
+	for (i = 0; i < (1 << order); ++i)
+		pages_mem.pages[i] = page + i;
+
+	ret = msm_ion_heap_pages_zero(pages_mem.pages, npages);
+	dma_sync_single_for_device(dev, page_to_phys(page), pages_mem.size,
+				   DMA_BIDIRECTIONAL);
+	msm_ion_heap_free_pages_mem(&pages_mem);
+	return ret;
+}
+
+int msm_ion_heap_sg_table_zero(struct device *dev, struct sg_table *table,
+			       size_t size)
+{
+	struct scatterlist *sg;
+	int i, j, ret = 0, npages = 0;
+	struct pages_mem pages_mem;
+
+	pages_mem.size = PAGE_ALIGN(size);
+
+	if (msm_ion_heap_alloc_pages_mem(&pages_mem))
+		return -ENOMEM;
+
+	for_each_sg(table->sgl, sg, table->nents, i) {
+		struct page *page = sg_page(sg);
+		unsigned long len = sg->length;
+		/* needed to make dma_sync_sg_for_device work: */
+		sg->dma_address = sg_phys(sg);
+
+		for (j = 0; j < len / PAGE_SIZE; j++)
+			pages_mem.pages[npages++] = page + j;
+	}
+
+	ret = msm_ion_heap_pages_zero(pages_mem.pages, npages);
+	dma_sync_sg_for_device(dev, table->sgl, table->nents,
+			       DMA_BIDIRECTIONAL);
+	msm_ion_heap_free_pages_mem(&pages_mem);
+	return ret;
+}
+
+static struct ion_heap *msm_ion_heap_create(struct ion_platform_heap *heap_data)
+{
+	struct ion_heap *heap = NULL;
+
+	switch ((int)heap_data->type) {
+#ifdef CONFIG_CMA
+	case ION_HEAP_TYPE_SECURE_DMA:
+		heap = ion_secure_cma_heap_create(heap_data);
+		break;
+#endif
+	case ION_HEAP_TYPE_SYSTEM_SECURE:
+		heap = ion_system_secure_heap_create(heap_data);
+		break;
+	case ION_HEAP_TYPE_HYP_CMA:
+		heap = ion_cma_secure_heap_create(heap_data);
+		break;
+	default:
+		heap = ion_heap_create(heap_data);
+	}
+
+	if (IS_ERR_OR_NULL(heap)) {
+		pr_err("%s: error creating heap %s type %d base %pa size %zu\n",
+		       __func__, heap_data->name, heap_data->type,
+		       &heap_data->base, heap_data->size);
+		return ERR_PTR(-EINVAL);
+	}
+
+	heap->name = heap_data->name;
+	heap->id = heap_data->id;
+	heap->priv = heap_data->priv;
+	return heap;
+}
+
+static void msm_ion_heap_destroy(struct ion_heap *heap)
+{
+	if (!heap)
+		return;
+
+	switch ((int)heap->type) {
+#ifdef CONFIG_CMA
+	case ION_HEAP_TYPE_SECURE_DMA:
+		ion_secure_cma_heap_destroy(heap);
+		break;
+#endif
+	case ION_HEAP_TYPE_SYSTEM_SECURE:
+		ion_system_secure_heap_destroy(heap);
+		break;
+
+	case ION_HEAP_TYPE_HYP_CMA:
+		ion_cma_secure_heap_destroy(heap);
+		break;
+	default:
+		ion_heap_destroy(heap);
+	}
+}
+
+struct ion_heap *get_ion_heap(int heap_id)
+{
+	int i;
+	struct ion_heap *heap;
+
+	for (i = 0; i < num_heaps; i++) {
+		heap = heaps[i];
+		if (heap->id == heap_id)
+			return heap;
+	}
+
+	pr_err("%s: heap_id %d not found\n", __func__, heap_id);
+	return NULL;
+}
+
+static int msm_ion_probe(struct platform_device *pdev)
+{
+	static struct ion_device *new_dev;
+	struct ion_platform_data *pdata;
+	unsigned int pdata_needs_to_be_freed;
+	int err = -1;
+	int i;
+	if (pdev->dev.of_node) {
+		pdata = msm_ion_parse_dt(pdev);
+		if (IS_ERR(pdata)) {
+			err = PTR_ERR(pdata);
+			goto out;
+		}
+		pdata_needs_to_be_freed = 1;
+	} else {
+		pdata = pdev->dev.platform_data;
+		pdata_needs_to_be_freed = 0;
+	}
+
+	num_heaps = pdata->nr;
+
+	heaps = kcalloc(pdata->nr, sizeof(struct ion_heap *), GFP_KERNEL);
+
+	if (!heaps) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	new_dev = ion_device_create(compat_msm_ion_ioctl);
+	if (IS_ERR_OR_NULL(new_dev)) {
+		/*
+		 * set this to the ERR to indicate to the clients
+		 * that Ion failed to probe.
+		 */
+		idev = new_dev;
+		err = PTR_ERR(new_dev);
+		goto freeheaps;
+	}
+
+	/* create the heaps as specified in the board file */
+	for (i = 0; i < num_heaps; i++) {
+		struct ion_platform_heap *heap_data = &pdata->heaps[i];
+		msm_ion_allocate(heap_data);
+
+		heap_data->has_outer_cache = pdata->has_outer_cache;
+		heaps[i] = msm_ion_heap_create(heap_data);
+		if (IS_ERR_OR_NULL(heaps[i])) {
+			heaps[i] = 0;
+			continue;
+		} else {
+			if (heap_data->size)
+				pr_info("ION heap %s created at %pa with size %zx\n",
+							heap_data->name,
+							  &heap_data->base,
+							  heap_data->size);
+			else
+				pr_info("ION heap %s created\n",
+							  heap_data->name);
+		}
+
+		ion_device_add_heap(new_dev, heaps[i]);
+	}
+	if (pdata_needs_to_be_freed)
+		free_pdata(pdata);
+
+	platform_set_drvdata(pdev, new_dev);
+	/*
+	 * intentionally set this at the very end to allow probes to be deferred
+	 * completely until Ion is setup
+	 */
+	idev = new_dev;
+
+	show_mem_notifier_register(&msm_ion_nb);
+	return 0;
+
+freeheaps:
+	kfree(heaps);
+	if (pdata_needs_to_be_freed)
+		free_pdata(pdata);
+out:
+	return err;
+}
+
+static int msm_ion_remove(struct platform_device *pdev)
+{
+	struct ion_device *idev = platform_get_drvdata(pdev);
+	int i;
+
+	for (i = 0; i < num_heaps; i++)
+		msm_ion_heap_destroy(heaps[i]);
+
+	ion_device_destroy(idev);
+	kfree(heaps);
+	return 0;
+}
+
+static struct of_device_id msm_ion_match_table[] = {
+	{.compatible = ION_COMPAT_STR},
+	{},
+};
+
+static struct platform_driver msm_ion_driver = {
+	.probe = msm_ion_probe,
+	.remove = msm_ion_remove,
+	.driver = {
+		.name = "ion-msm",
+		.of_match_table = msm_ion_match_table,
+	},
+};
+
+static int __init msm_ion_init(void)
+{
+	return platform_driver_register(&msm_ion_driver);
+}
+
+static void __exit msm_ion_exit(void)
+{
+	platform_driver_unregister(&msm_ion_driver);
+}
+
+subsys_initcall(msm_ion_init);
+module_exit(msm_ion_exit);
diff -Nruw linux-4.4.115-fbx/drivers/staging/android/ion/msm./msm_ion.h linux-4.4.115-fbx/drivers/staging/android/ion/msm/msm_ion.h
--- linux-4.4.115-fbx/drivers/staging/android/ion/msm./msm_ion.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/staging/android/ion/msm/msm_ion.h	2019-10-29 09:26:24.849214980 +0100
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_MSM_ION_H
+#define _MSM_MSM_ION_H
+
+#include "../ion.h"
+#include "../../uapi/msm_ion.h"
+
+enum ion_permission_type {
+	IPT_TYPE_MM_CARVEOUT = 0,
+	IPT_TYPE_MFC_SHAREDMEM = 1,
+	IPT_TYPE_MDP_WRITEBACK = 2,
+};
+
+/*
+ * This flag allows clients when mapping into the IOMMU to specify to
+ * defer un-mapping from the IOMMU until the buffer memory is freed.
+ */
+#define ION_IOMMU_UNMAP_DELAYED 1
+
+/*
+ * This flag allows clients to defer unsecuring a buffer until the buffer
+ * is actually freed.
+ */
+#define ION_UNSECURE_DELAYED	1
+
+/**
+ * struct ion_cp_heap_pdata - defines a content protection heap in the given
+ * platform
+ * @permission_type:	Memory ID used to identify the memory to TZ
+ * @align:		Alignment requirement for the memory
+ * @secure_base:	Base address for securing the heap.
+ *			Note: This might be different from actual base address
+ *			of this heap in the case of a shared heap.
+ * @secure_size:	Memory size for securing the heap.
+ *			Note: This might be different from actual size
+ *			of this heap in the case of a shared heap.
+ * @fixed_position	If nonzero, position in the fixed area.
+ * @iommu_map_all:	Indicates whether we should map whole heap into IOMMU.
+ * @iommu_2x_map_domain: Indicates the domain to use for overmapping.
+ * @request_ion_region:	function to be called when the number of allocations
+ *			goes from 0 -> 1
+ * @release_ion_region:	function to be called when the number of allocations
+ *			goes from 1 -> 0
+ * @setup_ion_region:	function to be called upon ion registration
+ * @allow_nonsecure_alloc: allow non-secure allocations from this heap. For
+ *			secure heaps, this flag must be set so allow non-secure
+ *			allocations. For non-secure heaps, this flag is ignored.
+ *
+ */
+struct ion_cp_heap_pdata {
+	enum ion_permission_type permission_type;
+	unsigned int align;
+	ion_phys_addr_t secure_base; /* Base addr used when heap is shared */
+	size_t secure_size; /* Size used for securing heap when heap is shared*/
+	int is_cma;
+	enum ion_fixed_position fixed_position;
+	int iommu_map_all;
+	int iommu_2x_map_domain;
+	int (*request_ion_region)(void *);
+	int (*release_ion_region)(void *);
+	void *(*setup_ion_region)(void);
+	int allow_nonsecure_alloc;
+};
+
+/**
+ * struct ion_co_heap_pdata - defines a carveout heap in the given platform
+ * @adjacent_mem_id:	Id of heap that this heap must be adjacent to.
+ * @align:		Alignment requirement for the memory
+ * @fixed_position	If nonzero, position in the fixed area.
+ * @request_ion_region:	function to be called when the number of allocations
+ *			goes from 0 -> 1
+ * @release_ion_region:	function to be called when the number of allocations
+ *			goes from 1 -> 0
+ * @setup_ion_region:	function to be called upon ion registration
+ * @memory_type:Memory type used for the heap
+ *
+ */
+struct ion_co_heap_pdata {
+	int adjacent_mem_id;
+	unsigned int align;
+	enum ion_fixed_position fixed_position;
+	int (*request_ion_region)(void *);
+	int (*release_ion_region)(void *);
+	void *(*setup_ion_region)(void);
+};
+
+struct msm_ion_prefetch_info {
+	struct list_head list;
+	int heap_id;
+	unsigned long *sizes;
+	int nr_sizes;
+};
+
+/**
+ * struct ion_cma_pdata - extra data for CMA regions
+ * @default_prefetch_size - default size to use for prefetching
+ */
+struct ion_cma_pdata {
+	unsigned long default_prefetch_size;
+};
+
+#ifdef CONFIG_ION
+/**
+ *  msm_ion_client_create - allocate a client using the ion_device specified in
+ *				drivers/staging/android/ion/msm/msm_ion.c
+ *
+ * name is the same as ion_client_create, return values
+ * are the same as ion_client_create.
+ */
+
+struct ion_client *msm_ion_client_create(const char *name);
+
+/**
+ * ion_handle_get_flags - get the flags for a given handle
+ *
+ * @client - client who allocated the handle
+ * @handle - handle to get the flags
+ * @flags - pointer to store the flags
+ *
+ * Gets the current flags for a handle. These flags indicate various options
+ * of the buffer (caching, security, etc.)
+ */
+int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
+				unsigned long *flags);
+
+
+
+/**
+ * ion_handle_get_size - get the allocated size of a given handle
+ *
+ * @client - client who allocated the handle
+ * @handle - handle to get the size
+ * @size - pointer to store the size
+ *
+ * gives the allocated size of a handle. returns 0 on success, negative
+ * value on error
+ *
+ * NOTE: This is intended to be used only to get a size to pass to map_iommu.
+ * You should *NOT* rely on this for any other usage.
+ */
+
+int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
+			size_t *size);
+/**
+ * msm_ion_do_cache_op - do cache operations.
+ *
+ * @client - pointer to ION client.
+ * @handle - pointer to buffer handle.
+ * @vaddr -  virtual address to operate on.
+ * @len - Length of data to do cache operation on.
+ * @cmd - Cache operation to perform:
+ *		ION_IOC_CLEAN_CACHES
+ *		ION_IOC_INV_CACHES
+ *		ION_IOC_CLEAN_INV_CACHES
+ *
+ * Returns 0 on success
+ */
+int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
+			void *vaddr, unsigned long len, unsigned int cmd);
+
+int msm_ion_do_cache_offset_op(
+		struct ion_client *client, struct ion_handle *handle,
+		void *vaddr, unsigned int offset, unsigned long len,
+		unsigned int cmd);
+
+#else
+static inline struct ion_client *msm_ion_client_create(const char *name)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline int ion_handle_get_size(struct ion_client *client,
+				struct ion_handle *handle, size_t *size)
+{
+	return -ENODEV;
+}
+
+static inline int msm_ion_do_cache_op(struct ion_client *client,
+			struct ion_handle *handle, void *vaddr,
+			unsigned long len, unsigned int cmd)
+{
+	return -ENODEV;
+}
+
+int msm_ion_do_cache_offset_op(
+		struct ion_client *client, struct ion_handle *handle,
+		void *vaddr, unsigned int offset, unsigned long len,
+		unsigned int cmd)
+{
+	return -ENODEV;
+}
+
+#endif /* CONFIG_ION */
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/staging/android/ion/msm_ion_priv.h	2019-01-22 16:16:26.715275493 +0100
@@ -0,0 +1,149 @@
+/*
+ * drivers/staging/android/ion/msm_ion_priv.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MSM_ION_PRIV_H
+#define _MSM_ION_PRIV_H
+
+#include <linux/kref.h>
+#include <linux/mm_types.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/seq_file.h>
+
+/**
+ * struct mem_map_data - represents information about the memory map for a heap
+ * @node:		list node used to store in the list of mem_map_data
+ * @addr:		start address of memory region.
+ * @addr:		end address of memory region.
+ * @size:		size of memory region
+ * @client_name:		name of the client who owns this buffer.
+ *
+ */
+struct mem_map_data {
+	struct list_head node;
+	ion_phys_addr_t addr;
+	ion_phys_addr_t addr_end;
+	unsigned long size;
+	const char *client_name;
+};
+
+struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *);
+void ion_iommu_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *);
+void ion_cp_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_system_secure_heap_create(struct ion_platform_heap *);
+void ion_system_secure_heap_destroy(struct ion_heap *);
+int ion_system_secure_heap_prefetch(struct ion_heap *heap, void *data);
+int ion_system_secure_heap_drain(struct ion_heap *heap, void *data);
+
+struct ion_heap *ion_cma_secure_heap_create(struct ion_platform_heap *);
+void ion_cma_secure_heap_destroy(struct ion_heap *);
+
+long msm_ion_custom_ioctl(struct ion_client *client,
+				unsigned int cmd,
+				unsigned long arg);
+
+#ifdef CONFIG_CMA
+struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *);
+void ion_secure_cma_heap_destroy(struct ion_heap *);
+
+int ion_secure_cma_prefetch(struct ion_heap *heap, void *data);
+
+int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused);
+
+#else
+static inline int ion_secure_cma_prefetch(struct ion_heap *heap, void *data)
+{
+	return -ENODEV;
+}
+
+static inline int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused)
+{
+	return -ENODEV;
+}
+
+
+
+#endif
+
+struct ion_heap *ion_removed_heap_create(struct ion_platform_heap *);
+void ion_removed_heap_destroy(struct ion_heap *);
+
+#define ION_CP_ALLOCATE_FAIL -1
+#define ION_RESERVED_ALLOCATE_FAIL -1
+
+/**
+ * ion_do_cache_op - do cache operations.
+ *
+ * @client - pointer to ION client.
+ * @handle - pointer to buffer handle.
+ * @uaddr -  virtual address to operate on.
+ * @offset - offset from physical address.
+ * @len - Length of data to do cache operation on.
+ * @cmd - Cache operation to perform:
+ *		ION_IOC_CLEAN_CACHES
+ *		ION_IOC_INV_CACHES
+ *		ION_IOC_CLEAN_INV_CACHES
+ *
+ * Returns 0 on success
+ */
+int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
+			void *uaddr, unsigned long offset, unsigned long len,
+			unsigned int cmd);
+
+void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base,
+			unsigned long *size);
+
+void ion_mem_map_show(struct ion_heap *heap);
+
+int ion_heap_is_system_secure_heap_type(enum ion_heap_type type);
+
+int ion_heap_allow_secure_allocation(enum ion_heap_type type);
+
+int ion_heap_allow_heap_secure(enum ion_heap_type type);
+
+int ion_heap_allow_handle_secure(enum ion_heap_type type);
+
+int get_secure_vmid(unsigned long);
+
+bool is_secure_vmid_valid(int vmid);
+
+/**
+ * Functions to help assign/unassign sg_table for System Secure Heap
+ */
+
+int ion_system_secure_heap_unassign_sg(struct sg_table *sgt, int source_vmid);
+int ion_system_secure_heap_assign_sg(struct sg_table *sgt, int dest_vmid);
+
+/**
+ * ion_create_chunked_sg_table - helper function to create sg table
+ * with specified chunk size
+ * @buffer_base:	The starting address used for the sg dma address
+ * @chunk_size:		The size of each entry in the sg table
+ * @total_size:		The total size of the sg table (i.e. the sum of the
+ *			entries). This will be rounded up to the nearest
+ *			multiple of `chunk_size'
+ */
+struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base,
+					size_t chunk_size, size_t total_size);
+
+void show_ion_usage(struct ion_device *dev);
+#endif /* _MSM_ION_PRIV_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/staging/android/trace/lowmemorykiller.h	2019-01-22 16:16:26.719275529 +0100
@@ -0,0 +1,41 @@
+#undef TRACE_SYSTEM
+#define TRACE_INCLUDE_PATH ../../drivers/staging/android/trace
+#define TRACE_SYSTEM lowmemorykiller
+
+#if !defined(_TRACE_LOWMEMORYKILLER_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_LOWMEMORYKILLER_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(lowmemory_kill,
+	TP_PROTO(struct task_struct *killed_task, long cache_size, \
+		 long cache_limit, long free),
+
+	TP_ARGS(killed_task, cache_size, cache_limit, free),
+
+	TP_STRUCT__entry(
+			__array(char, comm, TASK_COMM_LEN)
+			__field(pid_t, pid)
+			__field(long, pagecache_size)
+			__field(long, pagecache_limit)
+			__field(long, free)
+	),
+
+	TP_fast_assign(
+			memcpy(__entry->comm, killed_task->comm, TASK_COMM_LEN);
+			__entry->pid = killed_task->pid;
+			__entry->pagecache_size = cache_size;
+			__entry->pagecache_limit = cache_limit;
+			__entry->free = free;
+	),
+
+	TP_printk("%s (%d), page cache %ldkB (limit %ldkB), free %ldKb",
+		__entry->comm, __entry->pid, __entry->pagecache_size,
+		__entry->pagecache_limit, __entry->free)
+);
+
+
+#endif /* if !defined(_TRACE_LOWMEMORYKILLER_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/staging/android/uapi/msm_ion.h	2019-01-22 16:16:26.719275529 +0100
@@ -0,0 +1,216 @@
+#ifndef _UAPI_MSM_ION_H
+#define _UAPI_MSM_ION_H
+
+#include "ion.h"
+
+#define ION_BIT(nr) (1UL << (nr))
+
+enum msm_ion_heap_types {
+	ION_HEAP_TYPE_MSM_START = ION_HEAP_TYPE_CUSTOM + 1,
+	ION_HEAP_TYPE_SECURE_DMA = ION_HEAP_TYPE_MSM_START,
+	ION_HEAP_TYPE_SYSTEM_SECURE,
+	ION_HEAP_TYPE_HYP_CMA,
+	/*
+	 * if you add a heap type here you should also add it to
+	 * heap_types_info[] in msm_ion.c
+	 */
+};
+
+/**
+ * These are the only ids that should be used for Ion heap ids.
+ * The ids listed are the order in which allocation will be attempted
+ * if specified. Don't swap the order of heap ids unless you know what
+ * you are doing!
+ * Id's are spaced by purpose to allow new Id's to be inserted in-between (for
+ * possible fallbacks)
+ */
+
+enum ion_heap_ids {
+	INVALID_HEAP_ID = -1,
+	ION_CP_MM_HEAP_ID = 8,
+	ION_SECURE_HEAP_ID = 9,
+	ION_SECURE_DISPLAY_HEAP_ID = 10,
+	ION_CP_MFC_HEAP_ID = 12,
+	ION_SPSS_HEAP_ID = 13, /* Secure Processor ION heap */
+	ION_CP_WB_HEAP_ID = 16, /* 8660 only */
+	ION_CAMERA_HEAP_ID = 20, /* 8660 only */
+	ION_SYSTEM_CONTIG_HEAP_ID = 21,
+	ION_ADSP_HEAP_ID = 22,
+	ION_PIL1_HEAP_ID = 23, /* Currently used for other PIL images */
+	ION_SF_HEAP_ID = 24,
+	ION_SYSTEM_HEAP_ID = 25,
+	ION_PIL2_HEAP_ID = 26, /* Currently used for modem firmware images */
+	ION_QSECOM_HEAP_ID = 27,
+	ION_AUDIO_HEAP_ID = 28,
+
+	ION_MM_FIRMWARE_HEAP_ID = 29,
+
+	ION_HEAP_ID_RESERVED = 31 /** Bit reserved for ION_FLAG_SECURE flag */
+};
+
+/*
+ * The IOMMU heap is deprecated! Here are some aliases for backwards
+ * compatibility:
+ */
+#define ION_IOMMU_HEAP_ID ION_SYSTEM_HEAP_ID
+#define ION_HEAP_TYPE_IOMMU ION_HEAP_TYPE_SYSTEM
+
+#define ION_SPSS_HEAP_ID ION_SPSS_HEAP_ID
+
+enum ion_fixed_position {
+	NOT_FIXED,
+	FIXED_LOW,
+	FIXED_MIDDLE,
+	FIXED_HIGH,
+};
+
+enum cp_mem_usage {
+	VIDEO_BITSTREAM = 0x1,
+	VIDEO_PIXEL = 0x2,
+	VIDEO_NONPIXEL = 0x3,
+	DISPLAY_SECURE_CP_USAGE = 0x4,
+	CAMERA_SECURE_CP_USAGE = 0x5,
+	MAX_USAGE = 0x6,
+	UNKNOWN = 0x7FFFFFFF,
+};
+
+/**
+ * Flags to be used when allocating from the secure heap for
+ * content protection
+ */
+#define ION_FLAG_CP_TOUCH		ION_BIT(17)
+#define ION_FLAG_CP_BITSTREAM		ION_BIT(18)
+#define ION_FLAG_CP_PIXEL		ION_BIT(19)
+#define ION_FLAG_CP_NON_PIXEL		ION_BIT(20)
+#define ION_FLAG_CP_CAMERA		ION_BIT(21)
+#define ION_FLAG_CP_HLOS		ION_BIT(22)
+#define ION_FLAG_CP_HLOS_FREE		ION_BIT(23)
+#define ION_FLAG_CP_SPSS_SP_SHARED	ION_BIT(24)
+#define ION_FLAG_CP_SEC_DISPLAY		ION_BIT(25)
+#define ION_FLAG_CP_APP			ION_BIT(26)
+#define ION_FLAG_CP_CAMERA_PREVIEW	ION_BIT(27)
+
+
+/**
+ * Flag to allow non continguous allocation of memory from secure
+ * heap
+ */
+#define ION_FLAG_ALLOW_NON_CONTIG	ION_BIT(24)
+
+/**
+ * Flag to use when allocating to indicate that a heap is secure.
+ */
+#define ION_FLAG_SECURE			ION_BIT(ION_HEAP_ID_RESERVED)
+
+/**
+ * Flag for clients to force contiguous memort allocation
+ *
+ * Use of this flag is carefully monitored!
+ */
+#define ION_FLAG_FORCE_CONTIGUOUS	ION_BIT(30)
+
+/*
+ * Used in conjunction with heap which pool memory to force an allocation
+ * to come from the page allocator directly instead of from the pool allocation
+ */
+#define ION_FLAG_POOL_FORCE_ALLOC	ION_BIT(16)
+
+/**
+* Deprecated! Please use the corresponding ION_FLAG_*
+*/
+#define ION_SECURE ION_FLAG_SECURE
+#define ION_FORCE_CONTIGUOUS ION_FLAG_FORCE_CONTIGUOUS
+
+/**
+ * Macro should be used with ion_heap_ids defined above.
+ */
+#define ION_HEAP(bit)			ION_BIT(bit)
+
+#define ION_ADSP_HEAP_NAME	"adsp"
+#define ION_SYSTEM_HEAP_NAME	"system"
+#define ION_VMALLOC_HEAP_NAME	ION_SYSTEM_HEAP_NAME
+#define ION_KMALLOC_HEAP_NAME	"kmalloc"
+#define ION_AUDIO_HEAP_NAME	"audio"
+#define ION_SF_HEAP_NAME	"sf"
+#define ION_MM_HEAP_NAME	"mm"
+#define ION_CAMERA_HEAP_NAME	"camera_preview"
+#define ION_IOMMU_HEAP_NAME	"iommu"
+#define ION_MFC_HEAP_NAME	"mfc"
+#define ION_SPSS_HEAP_NAME	"spss"
+#define ION_WB_HEAP_NAME	"wb"
+#define ION_MM_FIRMWARE_HEAP_NAME	"mm_fw"
+#define ION_PIL1_HEAP_NAME  "pil_1"
+#define ION_PIL2_HEAP_NAME  "pil_2"
+#define ION_QSECOM_HEAP_NAME	"qsecom"
+#define ION_SECURE_HEAP_NAME	"secure_heap"
+#define ION_SECURE_DISPLAY_HEAP_NAME "secure_display"
+
+#define ION_SET_CACHED(__cache)		(__cache | ION_FLAG_CACHED)
+#define ION_SET_UNCACHED(__cache)	(__cache & ~ION_FLAG_CACHED)
+
+#define ION_IS_CACHED(__flags)	((__flags) & ION_FLAG_CACHED)
+
+/* struct ion_flush_data - data passed to ion for flushing caches
+ *
+ * @handle:	handle with data to flush
+ * @fd:		fd to flush
+ * @vaddr:	userspace virtual address mapped with mmap
+ * @offset:	offset into the handle to flush
+ * @length:	length of handle to flush
+ *
+ * Performs cache operations on the handle. If p is the start address
+ * of the handle, p + offset through p + offset + length will have
+ * the cache operations performed
+ */
+struct ion_flush_data {
+	ion_user_handle_t handle;
+	int fd;
+	void *vaddr;
+	unsigned int offset;
+	unsigned int length;
+};
+
+struct ion_prefetch_regions {
+	unsigned int vmid;
+	size_t __user *sizes;
+	unsigned int nr_sizes;
+};
+
+struct ion_prefetch_data {
+	int heap_id;
+	unsigned long len;
+	struct ion_prefetch_regions __user *regions;
+	unsigned int nr_regions;
+};
+
+#define ION_IOC_MSM_MAGIC 'M'
+
+/**
+ * DOC: ION_IOC_CLEAN_CACHES - clean the caches
+ *
+ * Clean the caches of the handle specified.
+ */
+#define ION_IOC_CLEAN_CACHES	_IOWR(ION_IOC_MSM_MAGIC, 0, \
+						struct ion_flush_data)
+/**
+ * DOC: ION_IOC_INV_CACHES - invalidate the caches
+ *
+ * Invalidate the caches of the handle specified.
+ */
+#define ION_IOC_INV_CACHES	_IOWR(ION_IOC_MSM_MAGIC, 1, \
+						struct ion_flush_data)
+/**
+ * DOC: ION_IOC_CLEAN_INV_CACHES - clean and invalidate the caches
+ *
+ * Clean and invalidate the caches of the handle specified.
+ */
+#define ION_IOC_CLEAN_INV_CACHES	_IOWR(ION_IOC_MSM_MAGIC, 2, \
+						struct ion_flush_data)
+
+#define ION_IOC_PREFETCH		_IOWR(ION_IOC_MSM_MAGIC, 3, \
+						struct ion_prefetch_data)
+
+#define ION_IOC_DRAIN			_IOWR(ION_IOC_MSM_MAGIC, 4, \
+						struct ion_prefetch_data)
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/staging/android/uapi/oneshot_sync.h	2019-01-22 16:16:26.719275529 +0100
@@ -0,0 +1,49 @@
+#ifndef ONESHOT_SYNC_H
+#define ONESHOT_SYNC_H
+
+/**
+ * DOC: Oneshot sync Userspace API
+ *
+ * Opening a file descriptor from /dev/oneshot_sync creates a * sync timeline
+ * for userspace signaled fences. Userspace may create new fences from a
+ * /dev/oneshot_sync file descriptor and then signal them by passing the fence
+ * file descriptor in an ioctl() call on the fd used to create the fence.
+ * Unlike most sync timelines, there is no ordering on a oneshot timeline.
+ * Each fence may be signaled in any order without affecting the state of other
+ * fences on the timeline.
+ */
+
+#define ONESHOT_SYNC_IOC_MAGIC '1'
+
+/**
+ * struct oneshot_sync_create_fence - argument to create fence ioctl
+ * @name: name of the new fence, to aid debugging.
+ * @fence_fd: returned sync_fence file descriptor
+ */
+struct oneshot_sync_create_fence {
+	char name[32];
+	int fence_fd;
+};
+
+/**
+ * DOC: ONESHOT_SYNC_IOC_CREATE_FENCE - create a userspace signaled fence
+ *
+ * Create a fence that may be signaled by userspace by calling
+ * ONESHOT_SYNC_IOC_SIGNAL_FENCE. There are no order dependencies between
+ * these fences, but otherwise they behave like normal sync fences.
+ * Argument is struct oneshot_sync_create_fence.
+ */
+#define ONESHOT_SYNC_IOC_CREATE_FENCE _IOWR(ONESHOT_SYNC_IOC_MAGIC, 1,\
+		struct oneshot_sync_create_fence)
+
+/**
+ * DOC: ONESHOT_SYNC_IOC_SIGNAL_FENCE - signal a fence
+ *
+ * Signal a fence that was created by a ONESHOT_SYNC_IOC_CREATE_FENCE
+ * call on the same file descriptor. This allows a fence to be shared
+ * to other processes but only signaled by the process owning the fd
+ * used to create the fence.  Argument is the fence file descriptor.
+ */
+#define ONESHOT_SYNC_IOC_SIGNAL_FENCE _IOWR(ONESHOT_SYNC_IOC_MAGIC, 2,\
+		int)
+#endif
diff -Nruw linux-4.4.115-fbx/drivers/switch./Kconfig linux-4.4.115-fbx/drivers/switch/Kconfig
--- linux-4.4.115-fbx/drivers/switch./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/switch/Kconfig	2019-01-22 16:16:27.123279188 +0100
@@ -0,0 +1,15 @@
+menuconfig SWITCH
+	tristate "Switch class support"
+	help
+	  Say Y here to enable switch class support. This allows
+	  monitoring switches by userspace via sysfs and uevent.
+
+if SWITCH
+
+config SWITCH_GPIO
+	tristate "GPIO Swith support"
+	depends on GPIOLIB
+	help
+	  Say Y here to enable GPIO based switch support.
+
+endif # SWITCH
diff -Nruw linux-4.4.115-fbx/drivers/switch./Makefile linux-4.4.115-fbx/drivers/switch/Makefile
--- linux-4.4.115-fbx/drivers/switch./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/switch/Makefile	2019-01-22 16:16:27.123279188 +0100
@@ -0,0 +1,4 @@
+# Switch Class Driver
+obj-$(CONFIG_SWITCH)		+= switch_class.o
+obj-$(CONFIG_SWITCH_GPIO)	+= switch_gpio.o
+
diff -Nruw linux-4.4.115-fbx/drivers/switch./switch_class.c linux-4.4.115-fbx/drivers/switch/switch_class.c
--- linux-4.4.115-fbx/drivers/switch./switch_class.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/switch/switch_class.c	2019-01-22 16:16:27.123279188 +0100
@@ -0,0 +1,174 @@
+/*
+ *  drivers/switch/switch_class.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/switch.h>
+
+struct class *switch_class;
+static atomic_t device_count;
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct switch_dev *sdev = (struct switch_dev *)
+		dev_get_drvdata(dev);
+
+	if (sdev->print_state) {
+		int ret = sdev->print_state(sdev, buf);
+		if (ret >= 0)
+			return ret;
+	}
+	return sprintf(buf, "%d\n", sdev->state);
+}
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct switch_dev *sdev = (struct switch_dev *)
+		dev_get_drvdata(dev);
+
+	if (sdev->print_name) {
+		int ret = sdev->print_name(sdev, buf);
+		if (ret >= 0)
+			return ret;
+	}
+	return sprintf(buf, "%s\n", sdev->name);
+}
+
+static DEVICE_ATTR(state, S_IRUGO, state_show, NULL);
+static DEVICE_ATTR(name, S_IRUGO, name_show, NULL);
+
+void switch_set_state(struct switch_dev *sdev, int state)
+{
+	char name_buf[120];
+	char state_buf[120];
+	char *prop_buf;
+	char *envp[3];
+	int env_offset = 0;
+	int length;
+
+	if (sdev->state != state) {
+		sdev->state = state;
+
+		prop_buf = (char *)get_zeroed_page(GFP_KERNEL);
+		if (prop_buf) {
+			length = name_show(sdev->dev, NULL, prop_buf);
+			if (length > 0) {
+				if (prop_buf[length - 1] == '\n')
+					prop_buf[length - 1] = 0;
+				snprintf(name_buf, sizeof(name_buf),
+					"SWITCH_NAME=%s", prop_buf);
+				envp[env_offset++] = name_buf;
+			}
+			length = state_show(sdev->dev, NULL, prop_buf);
+			if (length > 0) {
+				if (prop_buf[length - 1] == '\n')
+					prop_buf[length - 1] = 0;
+				snprintf(state_buf, sizeof(state_buf),
+					"SWITCH_STATE=%s", prop_buf);
+				envp[env_offset++] = state_buf;
+			}
+			envp[env_offset] = NULL;
+			kobject_uevent_env(&sdev->dev->kobj, KOBJ_CHANGE, envp);
+			free_page((unsigned long)prop_buf);
+		} else {
+			printk(KERN_ERR "out of memory in switch_set_state\n");
+			kobject_uevent(&sdev->dev->kobj, KOBJ_CHANGE);
+		}
+	}
+}
+EXPORT_SYMBOL_GPL(switch_set_state);
+
+static int create_switch_class(void)
+{
+	if (!switch_class) {
+		switch_class = class_create(THIS_MODULE, "switch");
+		if (IS_ERR(switch_class))
+			return PTR_ERR(switch_class);
+		atomic_set(&device_count, 0);
+	}
+
+	return 0;
+}
+
+int switch_dev_register(struct switch_dev *sdev)
+{
+	int ret;
+
+	if (!switch_class) {
+		ret = create_switch_class();
+		if (ret < 0)
+			return ret;
+	}
+
+	sdev->index = atomic_inc_return(&device_count);
+	sdev->dev = device_create(switch_class, NULL,
+		MKDEV(0, sdev->index), NULL, sdev->name);
+	if (IS_ERR(sdev->dev))
+		return PTR_ERR(sdev->dev);
+
+	ret = device_create_file(sdev->dev, &dev_attr_state);
+	if (ret < 0)
+		goto err_create_file_1;
+	ret = device_create_file(sdev->dev, &dev_attr_name);
+	if (ret < 0)
+		goto err_create_file_2;
+
+	dev_set_drvdata(sdev->dev, sdev);
+	sdev->state = 0;
+	return 0;
+
+err_create_file_2:
+	device_remove_file(sdev->dev, &dev_attr_state);
+err_create_file_1:
+	device_destroy(switch_class, MKDEV(0, sdev->index));
+	printk(KERN_ERR "switch: Failed to register driver %s\n", sdev->name);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(switch_dev_register);
+
+void switch_dev_unregister(struct switch_dev *sdev)
+{
+	device_remove_file(sdev->dev, &dev_attr_name);
+	device_remove_file(sdev->dev, &dev_attr_state);
+	device_destroy(switch_class, MKDEV(0, sdev->index));
+	dev_set_drvdata(sdev->dev, NULL);
+}
+EXPORT_SYMBOL_GPL(switch_dev_unregister);
+
+static int __init switch_class_init(void)
+{
+	return create_switch_class();
+}
+
+static void __exit switch_class_exit(void)
+{
+	class_destroy(switch_class);
+}
+
+module_init(switch_class_init);
+module_exit(switch_class_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("Switch class driver");
+MODULE_LICENSE("GPL");
diff -Nruw linux-4.4.115-fbx/drivers/tee./Kconfig linux-4.4.115-fbx/drivers/tee/Kconfig
--- linux-4.4.115-fbx/drivers/tee./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/tee/Kconfig	2019-01-22 16:16:27.147279405 +0100
@@ -0,0 +1,19 @@
+# Generic Trusted Execution Environment Configuration
+config TEE
+	tristate "Trusted Execution Environment support"
+	depends on HAVE_ARM_SMCCC || COMPILE_TEST
+	select DMA_SHARED_BUFFER
+	select GENERIC_ALLOCATOR
+	help
+	  This implements a generic interface towards a Trusted Execution
+	  Environment (TEE).
+
+if TEE
+
+menu "TEE drivers"
+
+source "drivers/tee/optee/Kconfig"
+
+endmenu
+
+endif
diff -Nruw linux-4.4.115-fbx/drivers/tee./Makefile linux-4.4.115-fbx/drivers/tee/Makefile
--- linux-4.4.115-fbx/drivers/tee./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/tee/Makefile	2019-01-22 16:16:27.147279405 +0100
@@ -0,0 +1,5 @@
+obj-$(CONFIG_TEE) += tee.o
+tee-objs += tee_core.o
+tee-objs += tee_shm.o
+tee-objs += tee_shm_pool.o
+obj-$(CONFIG_OPTEE) += optee/
diff -Nruw linux-4.4.115-fbx/drivers/tee./optee/Kconfig linux-4.4.115-fbx/drivers/tee/optee/Kconfig
--- linux-4.4.115-fbx/drivers/tee./optee/Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/tee/optee/Kconfig	2019-01-22 16:16:27.147279405 +0100
@@ -0,0 +1,7 @@
+# OP-TEE Trusted Execution Environment Configuration
+config OPTEE
+	tristate "OP-TEE"
+	depends on HAVE_ARM_SMCCC
+	help
+	  This implements the OP-TEE Trusted Execution Environment (TEE)
+	  driver.
diff -Nruw linux-4.4.115-fbx/drivers/tee./optee/Makefile linux-4.4.115-fbx/drivers/tee/optee/Makefile
--- linux-4.4.115-fbx/drivers/tee./optee/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/tee/optee/Makefile	2019-10-29 09:26:24.913215606 +0100
@@ -0,0 +1,5 @@
+obj-$(CONFIG_OPTEE) += optee.o
+optee-objs += core.o
+optee-objs += call.o
+optee-objs += rpc.o
+optee-objs += supp.o
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/thermal/lmh_interface.c	2019-01-22 16:16:27.151279441 +0100
@@ -0,0 +1,1244 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/sysfs.h>
+#include <linux/rwsem.h>
+#include <linux/debugfs.h>
+#include <linux/thermal.h>
+#include <linux/slab.h>
+#include "lmh_interface.h"
+#include <linux/string.h>
+#include <linux/uaccess.h>
+
+#define LMH_MON_NAME			"lmh_monitor"
+#define LMH_ISR_POLL_DELAY		"interrupt_poll_delay_msec"
+#define LMH_TRACE_ENABLE		"hw_trace_enable"
+#define LMH_TRACE_INTERVAL		"hw_trace_interval"
+#define LMH_DBGFS_DIR			"debug"
+#define LMH_DBGFS_READ			"data"
+#define LMH_DBGFS_CONFIG_READ		"config"
+#define LMH_DBGFS_READ_TYPES		"data_types"
+#define LMH_DBGFS_CONFIG_TYPES		"config_types"
+#define LMH_TRACE_INTERVAL_XO_TICKS	250
+#define LMH_POLLING_MSEC		30
+
+struct lmh_mon_threshold {
+	int				value;
+	bool				active;
+};
+
+struct lmh_device_data {
+	char				device_name[LMH_NAME_MAX];
+	struct lmh_device_ops		*device_ops;
+	uint32_t			max_level;
+	int				curr_level;
+	int				*levels;
+	struct dentry			*dev_parent;
+	struct dentry			*max_lvl_fs;
+	struct dentry			*curr_lvl_fs;
+	struct dentry			*avail_lvl_fs;
+	struct list_head		list_ptr;
+	struct rw_semaphore		lock;
+	struct device			dev;
+};
+
+struct lmh_mon_sensor_data {
+	struct list_head		list_ptr;
+	char				sensor_name[LMH_NAME_MAX];
+	struct lmh_sensor_ops		*sensor_ops;
+	struct rw_semaphore		lock;
+	struct lmh_mon_threshold	trip[LMH_TRIP_MAX];
+	struct thermal_zone_device	*tzdev;
+	enum thermal_device_mode	mode;
+};
+
+struct lmh_mon_driver_data {
+	struct dentry			*debugfs_parent;
+	struct dentry			*poll_fs;
+	struct dentry			*enable_hw_log;
+	struct dentry			*hw_log_delay;
+	uint32_t			hw_log_enable;
+	uint64_t			hw_log_interval;
+	struct dentry			*debug_dir;
+	struct dentry			*debug_read;
+	struct dentry			*debug_config;
+	struct dentry			*debug_read_type;
+	struct dentry			*debug_config_type;
+	struct lmh_debug_ops		*debug_ops;
+};
+
+enum lmh_read_type {
+	LMH_DEBUG_READ_TYPE,
+	LMH_DEBUG_CONFIG_TYPE,
+	LMH_PROFILES,
+};
+
+static struct lmh_mon_driver_data	*lmh_mon_data;
+static struct class			lmh_class_info = {
+	.name = "msm_limits",
+};
+static int lmh_poll_interval = LMH_POLLING_MSEC;
+static DECLARE_RWSEM(lmh_mon_access_lock);
+static LIST_HEAD(lmh_sensor_list);
+static DECLARE_RWSEM(lmh_dev_access_lock);
+static LIST_HEAD(lmh_device_list);
+
+#define LMH_CREATE_DEBUGFS_FILE(_node, _name, _mode, _parent, _data, _ops, \
+	_ret) do { \
+		_node = debugfs_create_file(_name, _mode, _parent, \
+				_data, _ops); \
+		if (IS_ERR(_node)) { \
+			_ret = PTR_ERR(_node); \
+			pr_err("Error creating debugfs file:%s. err:%d\n", \
+					_name, _ret); \
+		} \
+	} while (0)
+
+#define LMH_CREATE_DEBUGFS_DIR(_node, _name, _parent, _ret) \
+	do { \
+		_node = debugfs_create_dir(_name, _parent); \
+		if (IS_ERR(_node)) { \
+			_ret = PTR_ERR(_node); \
+			pr_err("Error creating debugfs dir:%s. err:%d\n", \
+					_name, _ret); \
+		} \
+	} while (0)
+
+#define LMH_HW_LOG_FS(_name) \
+static int _name##_get(void *data, u64 *val) \
+{ \
+	*val = lmh_mon_data->_name; \
+	return 0; \
+} \
+static int _name##_set(void *data, u64 val) \
+{ \
+	struct lmh_mon_sensor_data *lmh_sensor = data; \
+	int ret = 0; \
+	lmh_mon_data->_name = val; \
+	if (lmh_mon_data->hw_log_enable) \
+		ret = lmh_sensor->sensor_ops->enable_hw_log( \
+			lmh_mon_data->hw_log_interval \
+				, lmh_mon_data->hw_log_enable); \
+	else \
+		ret = lmh_sensor->sensor_ops->disable_hw_log(); \
+	return ret; \
+} \
+DEFINE_SIMPLE_ATTRIBUTE(_name##_fops, _name##_get, _name##_set, \
+	"%llu\n");
+
+#define LMH_DEV_GET(_name) \
+static ssize_t _name##_get(struct device *dev, \
+	struct device_attribute *attr, char *buf) \
+{ \
+	struct lmh_device_data *lmh_dev = container_of(dev, \
+			struct lmh_device_data, dev); \
+	return snprintf(buf, LMH_NAME_MAX, "%d", lmh_dev->_name); \
+} \
+
+LMH_HW_LOG_FS(hw_log_enable);
+LMH_HW_LOG_FS(hw_log_interval);
+LMH_DEV_GET(max_level);
+LMH_DEV_GET(curr_level);
+
+int lmh_get_poll_interval(void)
+{
+	return lmh_poll_interval;
+}
+
+static ssize_t curr_level_set(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct lmh_device_data *lmh_dev = container_of(dev,
+		struct lmh_device_data, dev);
+	int val = 0, ret = 0;
+
+	ret = kstrtouint(buf, 0, &val);
+	if (ret < 0) {
+		pr_err("Invalid input [%s]. err:%d\n", buf, ret);
+		return ret;
+	}
+	return lmh_set_dev_level(lmh_dev->device_name, val);
+}
+
+static ssize_t avail_level_get(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct lmh_device_data *lmh_dev = container_of(dev,
+		struct lmh_device_data, dev);
+	uint32_t *type_list = NULL;
+	int ret = 0, count = 0, lvl_buf_count = 0, idx = 0;
+	char *lvl_buf = NULL;
+
+	if (!lmh_dev || !lmh_dev->levels || !lmh_dev->max_level) {
+		pr_err("Invalid input\n");
+		return -EINVAL;
+	}
+	type_list = lmh_dev->levels;
+	lvl_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!lvl_buf) {
+		pr_err("Error allocating memory\n");
+		return -ENOMEM;
+	}
+	for (idx = 0; (idx < lmh_dev->max_level) && (lvl_buf_count < PAGE_SIZE)
+		; idx++) {
+		count = snprintf(lvl_buf + lvl_buf_count,
+				PAGE_SIZE - lvl_buf_count, "%d ",
+				type_list[idx]);
+		if (count + lvl_buf_count >= PAGE_SIZE) {
+			pr_err("overflow.\n");
+			break;
+		} else if (count < 0) {
+			pr_err("Error writing to buffer. err:%d\n", count);
+			ret = count;
+			goto lvl_get_exit;
+		}
+		lvl_buf_count += count;
+	}
+	count = snprintf(lvl_buf + lvl_buf_count, PAGE_SIZE - lvl_buf_count,
+			"\n");
+	if (count < 0)
+		pr_err("Error writing new line to buffer. err:%d\n", count);
+	else if (count + lvl_buf_count < PAGE_SIZE)
+		lvl_buf_count += count;
+
+	count = snprintf(buf, lvl_buf_count + 1, lvl_buf);
+	if (count > PAGE_SIZE || count < 0) {
+		pr_err("copy to user buffer failed\n");
+		ret = -EFAULT;
+		goto lvl_get_exit;
+	}
+
+lvl_get_exit:
+	kfree(lvl_buf);
+	return (ret) ? ret : count;
+}
+
+static int lmh_create_dev_sysfs(struct lmh_device_data *lmh_dev)
+{
+	int ret = 0;
+	static DEVICE_ATTR(level, 0600, curr_level_get, curr_level_set);
+	static DEVICE_ATTR(available_levels, 0400, avail_level_get, NULL);
+	static DEVICE_ATTR(total_levels, 0400, max_level_get, NULL);
+
+	lmh_dev->dev.class = &lmh_class_info;
+	dev_set_name(&lmh_dev->dev, "%s", lmh_dev->device_name);
+	ret = device_register(&lmh_dev->dev);
+	if (ret) {
+		pr_err("Error registering profile device. err:%d\n", ret);
+		return ret;
+	}
+	ret = device_create_file(&lmh_dev->dev, &dev_attr_level);
+	if (ret) {
+		pr_err("Error creating profile level sysfs node. err:%d\n",
+			ret);
+		goto dev_sysfs_exit;
+	}
+	ret = device_create_file(&lmh_dev->dev, &dev_attr_total_levels);
+	if (ret) {
+		pr_err("Error creating total level sysfs node. err:%d\n",
+			ret);
+		goto dev_sysfs_exit;
+	}
+	ret = device_create_file(&lmh_dev->dev, &dev_attr_available_levels);
+	if (ret) {
+		pr_err("Error creating available level sysfs node. err:%d\n",
+			ret);
+		goto dev_sysfs_exit;
+	}
+
+dev_sysfs_exit:
+	if (ret)
+		device_unregister(&lmh_dev->dev);
+	return ret;
+}
+
+static int lmh_create_debugfs_nodes(struct lmh_mon_sensor_data *lmh_sensor)
+{
+	int ret = 0;
+
+	lmh_mon_data->hw_log_enable = 0;
+	lmh_mon_data->hw_log_interval = LMH_TRACE_INTERVAL_XO_TICKS;
+	LMH_CREATE_DEBUGFS_FILE(lmh_mon_data->enable_hw_log, LMH_TRACE_ENABLE,
+		0600, lmh_mon_data->debugfs_parent, (void *)lmh_sensor,
+		&hw_log_enable_fops, ret);
+	if (ret)
+		goto create_debugfs_exit;
+	LMH_CREATE_DEBUGFS_FILE(lmh_mon_data->hw_log_delay, LMH_TRACE_INTERVAL,
+		0600, lmh_mon_data->debugfs_parent, (void *)lmh_sensor,
+		&hw_log_interval_fops, ret);
+	if (ret)
+		goto create_debugfs_exit;
+
+create_debugfs_exit:
+	if (ret)
+		debugfs_remove_recursive(lmh_mon_data->debugfs_parent);
+	return ret;
+}
+
+static struct lmh_mon_sensor_data *lmh_match_sensor_ops(
+		struct lmh_sensor_ops *ops)
+{
+	struct lmh_mon_sensor_data *lmh_sensor = NULL;
+
+	list_for_each_entry(lmh_sensor, &lmh_sensor_list, list_ptr) {
+		if (lmh_sensor->sensor_ops == ops)
+			return lmh_sensor;
+	}
+
+	return NULL;
+}
+
+static struct lmh_mon_sensor_data *lmh_match_sensor_name(char *sensor_name)
+{
+	struct lmh_mon_sensor_data *lmh_sensor = NULL;
+
+	list_for_each_entry(lmh_sensor, &lmh_sensor_list, list_ptr) {
+		if (!strncasecmp(lmh_sensor->sensor_name, sensor_name,
+			LMH_NAME_MAX))
+			return lmh_sensor;
+	}
+
+	return NULL;
+}
+
+static void lmh_evaluate_and_notify(struct lmh_mon_sensor_data *lmh_sensor,
+	       int val)
+{
+	int idx = 0, trip = 0;
+	bool cond = false;
+
+	for (idx = 0; idx < LMH_TRIP_MAX; idx++) {
+		if (!lmh_sensor->trip[idx].active)
+			continue;
+		if (idx == LMH_HIGH_TRIP) {
+			trip = THERMAL_TRIP_CONFIGURABLE_HI;
+			cond = (val >= lmh_sensor->trip[idx].value);
+		} else {
+			trip = THERMAL_TRIP_CONFIGURABLE_LOW;
+			cond = (val <= lmh_sensor->trip[idx].value);
+		}
+		if (cond) {
+			lmh_sensor->trip[idx].active = false;
+			thermal_sensor_trip(lmh_sensor->tzdev, trip, val);
+		}
+	}
+}
+
+void lmh_update_reading(struct lmh_sensor_ops *ops, int trip_val)
+{
+	struct lmh_mon_sensor_data *lmh_sensor = NULL;
+
+	if (!ops) {
+		pr_err("Invalid input\n");
+		return;
+	}
+
+	down_read(&lmh_mon_access_lock);
+	lmh_sensor = lmh_match_sensor_ops(ops);
+	if (!lmh_sensor) {
+		pr_err("Invalid ops\n");
+		goto interrupt_exit;
+	}
+	down_write(&lmh_sensor->lock);
+	pr_debug("Sensor:[%s] intensity:%d\n", lmh_sensor->sensor_name,
+		trip_val);
+	lmh_evaluate_and_notify(lmh_sensor, trip_val);
+interrupt_exit:
+	if (lmh_sensor)
+		up_write(&lmh_sensor->lock);
+	up_read(&lmh_mon_access_lock);
+	return;
+}
+
+static int lmh_sensor_read(struct thermal_zone_device *dev, int *val)
+{
+	int ret = 0;
+	struct lmh_mon_sensor_data *lmh_sensor;
+
+	if (!val || !dev || !dev->devdata) {
+		pr_err("Invalid input\n");
+		return -EINVAL;
+	}
+	lmh_sensor = dev->devdata;
+	down_read(&lmh_mon_access_lock);
+	down_read(&lmh_sensor->lock);
+	ret = lmh_sensor->sensor_ops->read(lmh_sensor->sensor_ops, val);
+	if (ret) {
+		pr_err("Error reading sensor:%s. err:%d\n",
+				lmh_sensor->sensor_name, ret);
+		goto unlock_and_exit;
+	}
+unlock_and_exit:
+	up_read(&lmh_sensor->lock);
+	up_read(&lmh_mon_access_lock);
+
+	return ret;
+}
+
+static int lmh_get_mode(struct thermal_zone_device *dev,
+		enum thermal_device_mode *mode)
+{
+	struct lmh_mon_sensor_data *lmh_sensor;
+
+	if (!dev || !dev->devdata || !mode) {
+		pr_err("Invalid input\n");
+		return -EINVAL;
+	}
+	lmh_sensor = dev->devdata;
+	*mode = lmh_sensor->mode;
+
+	return 0;
+}
+
+static int lmh_get_trip_type(struct thermal_zone_device *dev,
+		int trip, enum thermal_trip_type *type)
+{
+	if (!type || !dev || !dev->devdata || trip < 0
+		|| trip >= LMH_TRIP_MAX) {
+		pr_err("Invalid input\n");
+		return -EINVAL;
+	}
+
+	switch (trip) {
+	case LMH_HIGH_TRIP:
+		*type = THERMAL_TRIP_CONFIGURABLE_HI;
+		break;
+	case LMH_LOW_TRIP:
+		*type = THERMAL_TRIP_CONFIGURABLE_LOW;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int lmh_activate_trip(struct thermal_zone_device *dev,
+		int trip, enum thermal_trip_activation_mode mode)
+{
+	struct lmh_mon_sensor_data *lmh_sensor;
+
+	if (!dev || !dev->devdata || trip < 0 || trip >= LMH_TRIP_MAX) {
+		pr_err("Invalid input\n");
+		return -EINVAL;
+	}
+
+	lmh_sensor = dev->devdata;
+	down_read(&lmh_mon_access_lock);
+	down_write(&lmh_sensor->lock);
+	lmh_sensor->trip[trip].active = (mode ==
+					THERMAL_TRIP_ACTIVATION_ENABLED);
+	up_write(&lmh_sensor->lock);
+	up_read(&lmh_mon_access_lock);
+
+	return 0;
+}
+
+static int lmh_get_trip_value(struct thermal_zone_device *dev,
+		int trip, int *value)
+{
+	struct lmh_mon_sensor_data *lmh_sensor;
+
+	if (!dev || !dev->devdata || trip < 0 || trip >= LMH_TRIP_MAX
+		|| !value) {
+		pr_err("Invalid input\n");
+		return -EINVAL;
+	}
+
+	lmh_sensor = dev->devdata;
+	down_read(&lmh_mon_access_lock);
+	down_read(&lmh_sensor->lock);
+	*value = lmh_sensor->trip[trip].value;
+	up_read(&lmh_sensor->lock);
+	up_read(&lmh_mon_access_lock);
+
+	return 0;
+}
+
+static int lmh_set_trip_value(struct thermal_zone_device *dev,
+		int trip, int value)
+{
+	struct lmh_mon_sensor_data *lmh_sensor;
+
+	if (!dev || !dev->devdata || trip < 0 || trip >= LMH_TRIP_MAX) {
+		pr_err("Invalid input\n");
+		return -EINVAL;
+	}
+
+	lmh_sensor = dev->devdata;
+	down_read(&lmh_mon_access_lock);
+	down_write(&lmh_sensor->lock);
+	lmh_sensor->trip[trip].value = value;
+	up_write(&lmh_sensor->lock);
+	up_read(&lmh_mon_access_lock);
+
+	return 0;
+}
+
+static struct thermal_zone_device_ops lmh_sens_ops = {
+	.get_temp = lmh_sensor_read,
+	.get_mode = lmh_get_mode,
+	.get_trip_type = lmh_get_trip_type,
+	.activate_trip_type = lmh_activate_trip,
+	.get_trip_temp = lmh_get_trip_value,
+	.set_trip_temp = lmh_set_trip_value,
+};
+
+static int lmh_register_sensor(struct lmh_mon_sensor_data *lmh_sensor)
+{
+	int ret = 0;
+
+	lmh_sensor->tzdev = thermal_zone_device_register(
+			lmh_sensor->sensor_name, LMH_TRIP_MAX,
+			(1 << LMH_TRIP_MAX) - 1, lmh_sensor, &lmh_sens_ops,
+			NULL, 0 , 0);
+	if (IS_ERR_OR_NULL(lmh_sensor->tzdev)) {
+		ret = PTR_ERR(lmh_sensor->tzdev);
+		pr_err("Error registering sensor:[%s] with thermal. err:%d\n",
+			lmh_sensor->sensor_name, ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int lmh_sensor_init(struct lmh_mon_sensor_data *lmh_sensor,
+	       char *sensor_name, struct lmh_sensor_ops *ops)
+{
+	int idx = 0, ret = 0;
+
+	strlcpy(lmh_sensor->sensor_name, sensor_name, LMH_NAME_MAX);
+	lmh_sensor->sensor_ops = ops;
+	ops->new_value_notify = lmh_update_reading;
+	for (idx = 0; idx < LMH_TRIP_MAX; idx++) {
+		lmh_sensor->trip[idx].value = 0;
+		lmh_sensor->trip[idx].active = false;
+	}
+	init_rwsem(&lmh_sensor->lock);
+	if (list_empty(&lmh_sensor_list)
+		&& !lmh_mon_data->enable_hw_log)
+		lmh_create_debugfs_nodes(lmh_sensor);
+	list_add_tail(&lmh_sensor->list_ptr, &lmh_sensor_list);
+
+	return ret;
+}
+
+int lmh_sensor_register(char *sensor_name, struct lmh_sensor_ops *ops)
+{
+	int ret = 0;
+	struct lmh_mon_sensor_data *lmh_sensor = NULL;
+
+	if (!sensor_name || !ops) {
+		pr_err("Invalid input\n");
+		return -EINVAL;
+	}
+
+	if (!ops->read || !ops->enable_hw_log || !ops->disable_hw_log) {
+		pr_err("Invalid ops input for sensor:%s\n", sensor_name);
+		return -EINVAL;
+	}
+	down_write(&lmh_mon_access_lock);
+	if (lmh_match_sensor_name(sensor_name)
+		|| lmh_match_sensor_ops(ops)) {
+		ret = -EEXIST;
+		pr_err("Sensor[%s] exists\n", sensor_name);
+		goto register_exit;
+	}
+	lmh_sensor = kzalloc(sizeof(struct lmh_mon_sensor_data), GFP_KERNEL);
+	if (!lmh_sensor) {
+		pr_err("kzalloc failed\n");
+		ret = -ENOMEM;
+		goto register_exit;
+	}
+	ret = lmh_sensor_init(lmh_sensor, sensor_name, ops);
+	if (ret) {
+		pr_err("Error registering sensor:%s. err:%d\n", sensor_name,
+			ret);
+		kfree(lmh_sensor);
+		goto register_exit;
+	}
+
+	pr_debug("Registered Sensor:[%s]\n", sensor_name);
+
+register_exit:
+	up_write(&lmh_mon_access_lock);
+	if (ret)
+		return ret;
+	ret = lmh_register_sensor(lmh_sensor);
+	if (ret) {
+		pr_err("Thermal Zone register failed for Sensor:[%s]\n"
+			, sensor_name);
+		return ret;
+	}
+	pr_debug("Registered Sensor:[%s]\n", sensor_name);
+	return ret;
+}
+
+static void lmh_sensor_remove(struct lmh_sensor_ops *ops)
+{
+	struct lmh_mon_sensor_data *lmh_sensor = NULL;
+
+	lmh_sensor = lmh_match_sensor_ops(ops);
+	if (!lmh_sensor) {
+		pr_err("No match for the sensor\n");
+		goto deregister_exit;
+	}
+	down_write(&lmh_sensor->lock);
+	thermal_zone_device_unregister(lmh_sensor->tzdev);
+	list_del(&lmh_sensor->list_ptr);
+	up_write(&lmh_sensor->lock);
+	pr_debug("Deregistered sensor:[%s]\n", lmh_sensor->sensor_name);
+	kfree(lmh_sensor);
+
+deregister_exit:
+	return;
+}
+
+void lmh_sensor_deregister(struct lmh_sensor_ops *ops)
+{
+	if (!ops) {
+		pr_err("Invalid input\n");
+		return;
+	}
+
+	down_write(&lmh_mon_access_lock);
+	lmh_sensor_remove(ops);
+	up_write(&lmh_mon_access_lock);
+
+	return;
+}
+
+static struct lmh_device_data *lmh_match_device_name(char *device_name)
+{
+	struct lmh_device_data *lmh_device = NULL;
+
+	list_for_each_entry(lmh_device, &lmh_device_list, list_ptr) {
+		if (!strncasecmp(lmh_device->device_name, device_name,
+			LMH_NAME_MAX))
+			return lmh_device;
+	}
+
+	return NULL;
+}
+
+static struct lmh_device_data *lmh_match_device_ops(struct lmh_device_ops *ops)
+{
+	struct lmh_device_data *lmh_device = NULL;
+
+	list_for_each_entry(lmh_device, &lmh_device_list, list_ptr) {
+		if (lmh_device->device_ops == ops)
+			return lmh_device;
+	}
+
+	return NULL;
+}
+
+static int lmh_device_init(struct lmh_device_data *lmh_device,
+		char *device_name, struct lmh_device_ops *ops)
+{
+	int ret = 0;
+
+	ret = ops->get_curr_level(ops, &lmh_device->curr_level);
+	if (ret) {
+		pr_err("Error getting curr level for Device:[%s]. err:%d\n",
+			device_name, ret);
+		goto dev_init_exit;
+	}
+	ret = ops->get_available_levels(ops, NULL);
+	if (ret <= 0) {
+		pr_err("Error getting max level for Device:[%s]. err:%d\n",
+			device_name, ret);
+		ret = (!ret) ? -EINVAL : ret;
+		goto dev_init_exit;
+	}
+	lmh_device->max_level = ret;
+	lmh_device->levels = kzalloc(lmh_device->max_level * sizeof(int),
+				GFP_KERNEL);
+	if (!lmh_device->levels) {
+		pr_err("No memory\n");
+		ret = -ENOMEM;
+		goto dev_init_exit;
+	}
+	ret = ops->get_available_levels(ops, lmh_device->levels);
+	if (ret) {
+		pr_err("Error getting device:[%s] levels. err:%d\n",
+			device_name, ret);
+		goto dev_init_exit;
+	}
+	init_rwsem(&lmh_device->lock);
+	lmh_device->device_ops = ops;
+	strlcpy(lmh_device->device_name, device_name, LMH_NAME_MAX);
+	list_add_tail(&lmh_device->list_ptr, &lmh_device_list);
+	lmh_create_dev_sysfs(lmh_device);
+
+dev_init_exit:
+	if (ret)
+		kfree(lmh_device->levels);
+	return ret;
+}
+
+int lmh_get_all_dev_levels(char *device_name, int *val)
+{
+	int ret = 0;
+	struct lmh_device_data *lmh_device = NULL;
+
+	if (!device_name) {
+		pr_err("Invalid input\n");
+		return -EINVAL;
+	}
+	down_read(&lmh_dev_access_lock);
+	lmh_device = lmh_match_device_name(device_name);
+	if (!lmh_device) {
+		pr_err("Invalid device:%s\n", device_name);
+		ret = -EINVAL;
+		goto get_all_lvl_exit;
+	}
+	down_read(&lmh_device->lock);
+	if (!val) {
+		ret = lmh_device->max_level;
+		goto get_all_lvl_exit;
+	}
+	memcpy(val, lmh_device->levels,
+		sizeof(int) * lmh_device->max_level);
+
+get_all_lvl_exit:
+	if (lmh_device)
+		up_read(&lmh_device->lock);
+	up_read(&lmh_dev_access_lock);
+	return ret;
+}
+
+int lmh_set_dev_level(char *device_name, int curr_lvl)
+{
+	int ret = 0;
+	struct lmh_device_data *lmh_device = NULL;
+
+	if (!device_name) {
+		pr_err("Invalid input\n");
+		return -EINVAL;
+	}
+	down_read(&lmh_dev_access_lock);
+	lmh_device = lmh_match_device_name(device_name);
+	if (!lmh_device) {
+		pr_err("Invalid device:%s\n", device_name);
+		ret = -EINVAL;
+		goto set_dev_exit;
+	}
+	down_write(&lmh_device->lock);
+	curr_lvl = min(curr_lvl, lmh_device->levels[lmh_device->max_level - 1]);
+	curr_lvl = max(curr_lvl, lmh_device->levels[0]);
+	if (curr_lvl == lmh_device->curr_level)
+		goto set_dev_exit;
+	ret = lmh_device->device_ops->set_level(lmh_device->device_ops,
+			curr_lvl);
+	if (ret) {
+		pr_err("Error setting current level%d for device[%s]. err:%d\n",
+			curr_lvl, device_name, ret);
+		goto set_dev_exit;
+	}
+	pr_debug("Device:[%s] configured to level %d\n", device_name, curr_lvl);
+	lmh_device->curr_level = curr_lvl;
+
+set_dev_exit:
+	if (lmh_device)
+		up_write(&lmh_device->lock);
+	up_read(&lmh_dev_access_lock);
+	return ret;
+}
+
+int lmh_get_curr_level(char *device_name, int *val)
+{
+	int ret = 0;
+	struct lmh_device_data *lmh_device = NULL;
+
+	if (!device_name || !val) {
+		pr_err("Invalid input\n");
+		return -EINVAL;
+	}
+	down_read(&lmh_dev_access_lock);
+	lmh_device = lmh_match_device_name(device_name);
+	if (!lmh_device) {
+		pr_err("Invalid device:%s\n", device_name);
+		ret = -EINVAL;
+		goto get_curr_level;
+	}
+	down_read(&lmh_device->lock);
+	ret = lmh_device->device_ops->get_curr_level(lmh_device->device_ops,
+			&lmh_device->curr_level);
+	if (ret) {
+		pr_err("Error getting device[%s] current level. err:%d\n",
+			device_name, ret);
+		goto get_curr_level;
+	}
+	*val = lmh_device->curr_level;
+	pr_debug("Device:%s current level:%d\n", device_name, *val);
+
+get_curr_level:
+	if (lmh_device)
+		up_read(&lmh_device->lock);
+	up_read(&lmh_dev_access_lock);
+	return ret;
+}
+
+int lmh_device_register(char *device_name, struct lmh_device_ops *ops)
+{
+	int ret = 0;
+	struct lmh_device_data *lmh_device = NULL;
+
+	if (!device_name || !ops) {
+		pr_err("Invalid input\n");
+		return -EINVAL;
+	}
+
+	if (!ops->get_available_levels || !ops->get_curr_level
+		|| !ops->set_level) {
+		pr_err("Invalid ops input for device:%s\n", device_name);
+		return -EINVAL;
+	}
+
+	down_write(&lmh_dev_access_lock);
+	if (lmh_match_device_name(device_name)
+		|| lmh_match_device_ops(ops)) {
+		ret = -EEXIST;
+		pr_err("Device[%s] allready exists\n", device_name);
+		goto register_exit;
+	}
+	lmh_device = kzalloc(sizeof(struct lmh_device_data), GFP_KERNEL);
+	if (!lmh_device) {
+		pr_err("kzalloc failed\n");
+		ret = -ENOMEM;
+		goto register_exit;
+	}
+	ret = lmh_device_init(lmh_device, device_name, ops);
+	if (ret) {
+		pr_err("Error registering device:%s. err:%d\n", device_name,
+			ret);
+		kfree(lmh_device);
+		goto register_exit;
+	}
+
+	pr_debug("Registered Device:[%s] with %d levels\n", device_name,
+			lmh_device->max_level);
+
+register_exit:
+	up_write(&lmh_dev_access_lock);
+	return ret;
+}
+
+static void lmh_device_remove(struct lmh_device_ops *ops)
+{
+	struct lmh_device_data *lmh_device = NULL;
+
+	lmh_device = lmh_match_device_ops(ops);
+	if (!lmh_device) {
+		pr_err("No match for the device\n");
+		goto deregister_exit;
+	}
+	down_write(&lmh_device->lock);
+	list_del(&lmh_device->list_ptr);
+	pr_debug("Deregistered device:[%s]\n", lmh_device->device_name);
+	kfree(lmh_device->levels);
+	up_write(&lmh_device->lock);
+	kfree(lmh_device);
+
+deregister_exit:
+	return;
+}
+
+void lmh_device_deregister(struct lmh_device_ops *ops)
+{
+	if (!ops) {
+		pr_err("Invalid input\n");
+		return;
+	}
+
+	down_write(&lmh_dev_access_lock);
+	lmh_device_remove(ops);
+	up_write(&lmh_dev_access_lock);
+	return;
+}
+
+static int lmh_parse_and_extract(const char __user *user_buf, size_t count,
+	enum lmh_read_type type)
+{
+	char *local_buf = NULL, *token = NULL, *curr_ptr = NULL, *token1 = NULL;
+	char *next_line = NULL;
+	int ret = 0, data_ct = 0, i = 0, size = 0;
+	uint32_t *config_buf = NULL;
+
+	/* Allocate two extra space to add ';' character and NULL terminate */
+	local_buf = kzalloc(count + 2, GFP_KERNEL);
+	if (!local_buf) {
+		ret = -ENOMEM;
+		goto dfs_cfg_write_exit;
+	}
+	if (copy_from_user(local_buf, user_buf, count)) {
+		pr_err("user buf error\n");
+		ret = -EFAULT;
+		goto dfs_cfg_write_exit;
+	}
+	size = count + (strnchr(local_buf, count, '\n') ? 1 :  2);
+	local_buf[size - 2] = ';';
+	local_buf[size - 1] = '\0';
+	curr_ptr = next_line = local_buf;
+	while ((token1 = strnchr(next_line, local_buf + size - next_line, ';'))
+		!= NULL) {
+		data_ct = 0;
+		*token1 = '\0';
+		curr_ptr = next_line;
+		next_line = token1 + 1;
+		for (token = (char *)curr_ptr; token &&
+			((token = strnchr(token, next_line - token, ' '))
+			 != NULL); token++)
+			data_ct++;
+		if (data_ct < 2) {
+			pr_err("Invalid format string:[%s]\n", curr_ptr);
+			ret = -EINVAL;
+			goto dfs_cfg_write_exit;
+		}
+		config_buf = kzalloc((++data_ct) * sizeof(uint32_t),
+				GFP_KERNEL);
+		if (!config_buf) {
+			ret = -ENOMEM;
+			goto dfs_cfg_write_exit;
+		}
+		pr_debug("Input:%s data_ct:%d\n", curr_ptr, data_ct);
+		for (i = 0, token = (char *)curr_ptr; token && (i < data_ct);
+			i++) {
+			token = strnchr(token, next_line - token, ' ');
+			if (token)
+				*token = '\0';
+			ret = kstrtouint(curr_ptr, 0, &config_buf[i]);
+			if (ret < 0) {
+				pr_err("Data[%s] scan error. err:%d\n",
+					curr_ptr, ret);
+				kfree(config_buf);
+				goto dfs_cfg_write_exit;
+			}
+			if (token)
+				curr_ptr = ++token;
+		}
+		switch (type) {
+		case LMH_DEBUG_READ_TYPE:
+			ret = lmh_mon_data->debug_ops->debug_config_read(
+				lmh_mon_data->debug_ops, config_buf, data_ct);
+			break;
+		case LMH_DEBUG_CONFIG_TYPE:
+			ret = lmh_mon_data->debug_ops->debug_config_lmh(
+				lmh_mon_data->debug_ops, config_buf, data_ct);
+			break;
+		default:
+			ret = -EINVAL;
+			break;
+		}
+		kfree(config_buf);
+		if (ret) {
+			pr_err("Config error. type:%d err:%d\n", type, ret);
+			goto dfs_cfg_write_exit;
+		}
+	}
+
+dfs_cfg_write_exit:
+	kfree(local_buf);
+	return ret;
+}
+
+static ssize_t lmh_dbgfs_config_write(struct file *file,
+	const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	lmh_parse_and_extract(user_buf, count, LMH_DEBUG_CONFIG_TYPE);
+	return count;
+}
+
+static int lmh_dbgfs_data_read(struct seq_file *seq_fp, void *data)
+{
+	static uint32_t *read_buf;
+	static int read_buf_size;
+	int idx = 0, ret = 0;
+
+	if (!read_buf_size) {
+		ret = lmh_mon_data->debug_ops->debug_read(
+			lmh_mon_data->debug_ops, &read_buf);
+		if (ret <= 0)
+			goto dfs_read_exit;
+		if (!read_buf || ret < sizeof(uint32_t)) {
+			ret = -EINVAL;
+			goto dfs_read_exit;
+	       }
+		read_buf_size = ret;
+		ret = 0;
+	}
+
+	do {
+		seq_printf(seq_fp, "0x%x ", read_buf[idx]);
+		if (seq_has_overflowed(seq_fp)) {
+			pr_err("Seq overflow. idx:%d\n", idx);
+			goto dfs_read_exit;
+		}
+		idx++;
+		if ((idx % LMH_READ_LINE_LENGTH) == 0) {
+			seq_puts(seq_fp, "\n");
+			if (seq_has_overflowed(seq_fp)) {
+				pr_err("Seq overflow. idx:%d\n", idx);
+				goto dfs_read_exit;
+			}
+		}
+	} while (idx < (read_buf_size / sizeof(uint32_t)));
+	read_buf_size = 0;
+	read_buf = NULL;
+
+dfs_read_exit:
+	return ret;
+}
+
+static ssize_t lmh_dbgfs_data_write(struct file *file,
+	const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	lmh_parse_and_extract(user_buf, count, LMH_DEBUG_READ_TYPE);
+	return count;
+}
+
+static int lmh_dbgfs_data_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, lmh_dbgfs_data_read, inode->i_private);
+}
+
+static int lmh_get_types(struct seq_file *seq_fp, enum lmh_read_type type)
+{
+	int ret = 0, idx = 0, size = 0;
+	uint32_t *type_list = NULL;
+
+	switch (type) {
+	case LMH_DEBUG_READ_TYPE:
+		ret = lmh_mon_data->debug_ops->debug_get_types(
+			lmh_mon_data->debug_ops, true, &type_list);
+		break;
+	case LMH_DEBUG_CONFIG_TYPE:
+		ret = lmh_mon_data->debug_ops->debug_get_types(
+			lmh_mon_data->debug_ops, false, &type_list);
+		break;
+	default:
+		return -EINVAL;
+	}
+	if (ret <= 0 || !type_list) {
+		pr_err("No device information. err:%d\n", ret);
+		return -ENODEV;
+	}
+	size = ret;
+	for (idx = 0; idx < size; idx++)
+		seq_printf(seq_fp, "0x%x ", type_list[idx]);
+	seq_puts(seq_fp, "\n");
+
+	return 0;
+}
+
+static int lmh_dbgfs_read_type(struct seq_file *seq_fp, void *data)
+{
+	return lmh_get_types(seq_fp, LMH_DEBUG_READ_TYPE);
+}
+
+static int lmh_dbgfs_read_type_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, lmh_dbgfs_read_type, inode->i_private);
+}
+
+static int lmh_dbgfs_config_type(struct seq_file *seq_fp, void *data)
+{
+	return lmh_get_types(seq_fp, LMH_DEBUG_CONFIG_TYPE);
+}
+
+static int lmh_dbgfs_config_type_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, lmh_dbgfs_config_type, inode->i_private);
+}
+
+static const struct file_operations lmh_dbgfs_config_fops = {
+	.write		= lmh_dbgfs_config_write,
+};
+static const struct file_operations lmh_dbgfs_read_fops = {
+	.open		= lmh_dbgfs_data_open,
+	.read		= seq_read,
+	.write		= lmh_dbgfs_data_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+static const struct file_operations lmh_dbgfs_read_type_fops = {
+	.open		= lmh_dbgfs_read_type_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+static const struct file_operations lmh_dbgfs_config_type_fops = {
+	.open		= lmh_dbgfs_config_type_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+int lmh_debug_register(struct lmh_debug_ops *ops)
+{
+	int ret = 0;
+
+	if (!ops || !ops->debug_read || !ops->debug_config_read
+		       || !ops->debug_get_types) {
+		pr_err("Invalid input");
+		ret = -EINVAL;
+		goto dbg_reg_exit;
+	}
+
+	lmh_mon_data->debug_ops = ops;
+	LMH_CREATE_DEBUGFS_DIR(lmh_mon_data->debug_dir, LMH_DBGFS_DIR,
+			lmh_mon_data->debugfs_parent, ret);
+	if (ret)
+		goto dbg_reg_exit;
+
+	LMH_CREATE_DEBUGFS_FILE(lmh_mon_data->debug_read, LMH_DBGFS_READ, 0600,
+		lmh_mon_data->debug_dir, NULL, &lmh_dbgfs_read_fops, ret);
+	if (!lmh_mon_data->debug_read) {
+		pr_err("Error creating" LMH_DBGFS_READ "entry.\n");
+		ret = -ENODEV;
+		goto dbg_reg_exit;
+	}
+	LMH_CREATE_DEBUGFS_FILE(lmh_mon_data->debug_config,
+		LMH_DBGFS_CONFIG_READ, 0200, lmh_mon_data->debug_dir, NULL,
+		&lmh_dbgfs_config_fops, ret);
+	if (!lmh_mon_data->debug_config) {
+		pr_err("Error creating" LMH_DBGFS_CONFIG_READ "entry\n");
+		ret = -ENODEV;
+		goto dbg_reg_exit;
+	}
+	LMH_CREATE_DEBUGFS_FILE(lmh_mon_data->debug_read_type,
+		LMH_DBGFS_READ_TYPES, 0400, lmh_mon_data->debug_dir, NULL,
+		&lmh_dbgfs_read_type_fops, ret);
+	if (!lmh_mon_data->debug_read_type) {
+		pr_err("Error creating" LMH_DBGFS_READ_TYPES "entry\n");
+		ret = -ENODEV;
+		goto dbg_reg_exit;
+	}
+	LMH_CREATE_DEBUGFS_FILE(lmh_mon_data->debug_config_type,
+		LMH_DBGFS_CONFIG_TYPES, 0400, lmh_mon_data->debug_dir, NULL,
+		&lmh_dbgfs_config_type_fops, ret);
+	if (!lmh_mon_data->debug_config_type) {
+		pr_err("Error creating" LMH_DBGFS_CONFIG_TYPES "entry\n");
+		ret = -ENODEV;
+		goto dbg_reg_exit;
+	}
+
+dbg_reg_exit:
+	if (ret) {
+		/*Clean up all the dbg nodes*/
+		debugfs_remove_recursive(lmh_mon_data->debug_dir);
+		lmh_mon_data->debug_ops = NULL;
+	}
+
+	return ret;
+}
+
+static int lmh_mon_init_driver(void)
+{
+	int ret = 0;
+
+	lmh_mon_data = kzalloc(sizeof(struct lmh_mon_driver_data),
+				GFP_KERNEL);
+	if (!lmh_mon_data) {
+		pr_err("No memory\n");
+		return -ENOMEM;
+	}
+
+	LMH_CREATE_DEBUGFS_DIR(lmh_mon_data->debugfs_parent, LMH_MON_NAME,
+				NULL, ret);
+	if (ret)
+		goto init_exit;
+	lmh_mon_data->poll_fs = debugfs_create_u32(LMH_ISR_POLL_DELAY, 0600,
+			lmh_mon_data->debugfs_parent, &lmh_poll_interval);
+	if (IS_ERR(lmh_mon_data->poll_fs))
+		pr_err("Error creating debugfs:[%s]. err:%ld\n",
+			LMH_ISR_POLL_DELAY, PTR_ERR(lmh_mon_data->poll_fs));
+
+init_exit:
+	if (ret == -ENODEV)
+		ret = 0;
+	return ret;
+}
+
+static int __init lmh_mon_init_call(void)
+{
+	int ret = 0;
+
+	ret = lmh_mon_init_driver();
+	if (ret) {
+		pr_err("Error initializing the debugfs. err:%d\n", ret);
+		goto lmh_init_exit;
+	}
+	ret = class_register(&lmh_class_info);
+	if (ret)
+		goto lmh_init_exit;
+
+lmh_init_exit:
+	if (ret)
+		class_unregister(&lmh_class_info);
+	return ret;
+}
+
+static void lmh_mon_cleanup(void)
+{
+	down_write(&lmh_mon_access_lock);
+	while (!list_empty(&lmh_sensor_list)) {
+		lmh_sensor_remove(list_first_entry(&lmh_sensor_list,
+			struct lmh_mon_sensor_data, list_ptr)->sensor_ops);
+	}
+	up_write(&lmh_mon_access_lock);
+	debugfs_remove_recursive(lmh_mon_data->debugfs_parent);
+	kfree(lmh_mon_data);
+}
+
+static void lmh_device_cleanup(void)
+{
+	down_write(&lmh_dev_access_lock);
+	while (!list_empty(&lmh_device_list)) {
+		lmh_device_remove(list_first_entry(&lmh_device_list,
+			struct lmh_device_data, list_ptr)->device_ops);
+	}
+	up_write(&lmh_dev_access_lock);
+}
+
+static void lmh_debug_cleanup(void)
+{
+	if (lmh_mon_data->debug_ops) {
+		debugfs_remove_recursive(lmh_mon_data->debug_dir);
+		lmh_mon_data->debug_ops = NULL;
+	}
+}
+
+static void __exit lmh_mon_exit(void)
+{
+	lmh_mon_cleanup();
+	lmh_device_cleanup();
+	lmh_debug_cleanup();
+	class_unregister(&lmh_class_info);
+}
+
+module_init(lmh_mon_init_call);
+module_exit(lmh_mon_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("LMH monitor driver");
+MODULE_ALIAS("platform:" LMH_MON_NAME);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/thermal/lmh_interface.h	2019-01-22 16:16:27.151279441 +0100
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LMH_INTERFACE_H
+#define __LMH_INTERFACE_H
+
+#define LMH_NAME_MAX			20
+#define LMH_READ_LINE_LENGTH		10
+
+enum lmh_trip_type {
+	LMH_LOW_TRIP,
+	LMH_HIGH_TRIP,
+	LMH_TRIP_MAX,
+};
+
+enum lmh_monitor_state {
+	LMH_ISR_DISABLED,
+	LMH_ISR_MONITOR,
+	LMH_ISR_POLLING,
+	LMH_ISR_NR,
+};
+
+struct lmh_sensor_ops {
+	int (*read)(struct lmh_sensor_ops *, int *);
+	int (*enable_hw_log)(uint32_t, uint32_t);
+	int (*disable_hw_log)(void);
+	void (*new_value_notify)(struct lmh_sensor_ops *, int);
+};
+
+struct lmh_device_ops {
+	int (*get_available_levels)(struct lmh_device_ops *, int *);
+	int (*get_curr_level)(struct lmh_device_ops *, int *);
+	int (*set_level)(struct lmh_device_ops *, int);
+};
+
+struct lmh_debug_ops {
+	int (*debug_read)(struct lmh_debug_ops *, uint32_t **);
+	int (*debug_config_read)(struct lmh_debug_ops *, uint32_t *, int);
+	int (*debug_config_lmh)(struct lmh_debug_ops *, uint32_t *, int);
+	int (*debug_get_types)(struct lmh_debug_ops *, bool, uint32_t **);
+};
+
+#ifdef CONFIG_LIMITS_MONITOR
+int lmh_get_all_dev_levels(char *, int *);
+int lmh_set_dev_level(char *, int);
+int lmh_get_curr_level(char *, int *);
+int lmh_sensor_register(char *, struct lmh_sensor_ops *);
+void lmh_sensor_deregister(struct lmh_sensor_ops *);
+int lmh_device_register(char *, struct lmh_device_ops *);
+void lmh_device_deregister(struct lmh_device_ops *);
+int lmh_debug_register(struct lmh_debug_ops *);
+void lmh_debug_deregister(struct lmh_debug_ops *ops);
+int lmh_get_poll_interval(void);
+#else
+static inline int lmh_get_all_dev_levels(char *device_name, int *level)
+{
+	return -ENOSYS;
+}
+
+static inline int lmh_set_dev_level(char *device_name, int level)
+{
+	return -ENOSYS;
+}
+
+static inline int lmh_get_curr_level(char *device_name, int *level)
+{
+	return -ENOSYS;
+}
+
+static inline int lmh_sensor_register(char *sensor_name,
+	struct lmh_sensor_ops *ops)
+{
+	return -ENOSYS;
+}
+
+static inline void lmh_sensor_deregister(struct lmh_sensor_ops *ops)
+{
+	return;
+}
+
+static inline int lmh_device_register(char *device_name,
+	struct lmh_device_ops *ops)
+{
+	return -ENOSYS;
+}
+
+static inline void lmh_device_deregister(struct lmh_device_ops *ops)
+{
+	return;
+}
+
+static inline int lmh_debug_register(struct lmh_debug_ops *)
+{
+	return -ENOSYS;
+}
+
+static inline void lmh_debug_deregister(struct lmh_debug_ops *ops)
+{ }
+
+static inline int lmh_get_poll_interval(void)
+{
+	return -ENOSYS;
+}
+#endif
+
+#endif /*__LMH_INTERFACE_H*/
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/thermal/lmh_lite.c	2019-01-22 16:16:27.151279441 +0100
@@ -0,0 +1,1407 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/mutex.h>
+#include "lmh_interface.h"
+#include <linux/slab.h>
+#include <asm/cacheflush.h>
+#include <soc/qcom/scm.h>
+#include <linux/dma-mapping.h>
+#include <linux/regulator/consumer.h>
+
+#define CREATE_TRACE_POINTS
+#define TRACE_MSM_LMH
+#include <trace/trace_thermal.h>
+
+#define LMH_DRIVER_NAME			"lmh-lite-driver"
+#define LMH_INTERRUPT			"lmh-interrupt"
+#define LMH_DEVICE			"lmh-profile"
+#define LMH_MAX_SENSOR			10
+#define LMH_GET_PROFILE_SIZE		10
+#define LMH_SCM_PAYLOAD_SIZE		10
+#define LMH_DEFAULT_PROFILE		0
+#define LMH_DEBUG_READ_TYPE		0x0
+#define LMH_DEBUG_CONFIG_TYPE		0x1
+#define LMH_CHANGE_PROFILE		0x01
+#define LMH_GET_PROFILES		0x02
+#define LMH_CTRL_QPMDA			0x03
+#define LMH_TRIM_ERROR			0x04
+#define LMH_GET_INTENSITY		0x06
+#define LMH_GET_SENSORS			0x07
+#define LMH_DEBUG_SET			0x08
+#define LMH_DEBUG_READ_BUF_SIZE		0x09
+#define LMH_DEBUG_READ			0x0A
+#define LMH_DEBUG_GET_TYPE		0x0B
+#define MAX_TRACE_EVENT_MSG_LEN		50
+#define APCS_DPM_VOLTAGE_SCALE		0x09950804
+#define LMH_ODCM_MAX_COUNT		6
+
+#define LMH_CHECK_SCM_CMD(_cmd) \
+	do { \
+		if (!scm_is_call_available(SCM_SVC_LMH, _cmd)) { \
+			pr_err("SCM cmd:%d not available\n", _cmd); \
+			return -ENODEV; \
+		} \
+	} while (0)
+
+#define LMH_GET_RECURSSIVE_DATA(desc_arg, cmd_idx, cmd_buf, payload, next, \
+	size, cmd_id, dest_buf, ret)					\
+	do {								\
+		int idx = 0;						\
+		desc_arg.args[cmd_idx] = cmd_buf.list_start = next;	\
+		trace_lmh_event_call("GET_TYPE enter");			\
+		dmac_flush_range(payload, (void *)payload +             \
+				sizeof(uint32_t) * LMH_SCM_PAYLOAD_SIZE);\
+		if (!is_scm_armv8()) {					\
+			ret = scm_call(SCM_SVC_LMH, cmd_id,		\
+				(void *) &cmd_buf, SCM_BUFFER_SIZE(cmd_buf), \
+				&size, SCM_BUFFER_SIZE(size));		\
+		} else {						\
+			ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH,	\
+				cmd_id), &desc_arg);			\
+			size = desc_arg.ret[0];				\
+		}							\
+		/* Have barrier before reading from TZ data */		\
+		mb();							\
+		dmac_inv_range(payload, (void *)payload +               \
+				sizeof(uint32_t) * LMH_SCM_PAYLOAD_SIZE);\
+		trace_lmh_event_call("GET_TYPE exit");			\
+		if (ret) {						\
+			pr_err("Error in SCM v%d get type. cmd:%x err:%d\n", \
+				(is_scm_armv8()) ? 8 : 7, cmd_id, ret);	\
+			break;						\
+		}							\
+		if (!size) {						\
+			pr_err("No LMH device supported.\n");		\
+			ret = -ENODEV;					\
+			break;						\
+		}							\
+		if (!dest_buf) {					\
+			dest_buf = devm_kzalloc(lmh_data->dev,		\
+				sizeof(uint32_t) * size, GFP_KERNEL);	\
+			if (!dest_buf) {				\
+				ret = -ENOMEM;				\
+				break;					\
+			}						\
+		}							\
+		for (idx = next;					\
+			idx < min((next + LMH_SCM_PAYLOAD_SIZE), size); \
+			idx++)						\
+			dest_buf[idx] = payload[idx - next];		\
+		next += LMH_SCM_PAYLOAD_SIZE;				\
+	} while (next < size)						\
+
+struct __attribute__((__packed__)) lmh_sensor_info {
+	uint32_t			name;
+	uint32_t			node_id;
+	uint32_t			intensity;
+	uint32_t			max_intensity;
+	uint32_t			type;
+};
+
+struct __attribute__((__packed__)) lmh_sensor_packet {
+	uint32_t			count;
+	struct lmh_sensor_info		sensor[LMH_MAX_SENSOR];
+};
+
+struct lmh_profile {
+	struct lmh_device_ops		dev_ops;
+	uint32_t			level_ct;
+	uint32_t			curr_level;
+	uint32_t			*levels;
+	uint32_t			read_type_count;
+	uint32_t			config_type_count;
+};
+
+struct lmh_debug {
+	struct lmh_debug_ops		debug_ops;
+	uint32_t			*read_type;
+	uint32_t			*config_type;
+	uint32_t			read_type_count;
+	uint32_t			config_type_count;
+};
+
+struct lmh_driver_data {
+	struct device			*dev;
+	struct workqueue_struct		*poll_wq;
+	struct delayed_work		poll_work;
+	uint32_t			log_enabled;
+	uint32_t			log_delay;
+	enum lmh_monitor_state		intr_state;
+	uint32_t			intr_reg_val;
+	uint32_t			intr_status_val;
+	uint32_t			trim_err_offset;
+	bool				trim_err_disable;
+	void				*intr_addr;
+	int				irq_num;
+	int				max_sensor_count;
+	struct lmh_profile		dev_info;
+	struct lmh_debug		debug_info;
+	struct regulator		*regulator;
+	struct notifier_block		dpm_notifier_blk;
+	void __iomem			*dpm_voltage_scale_reg;
+	uint32_t			odcm_thresh_mV;
+	void __iomem			*odcm_reg[LMH_ODCM_MAX_COUNT];
+	bool				odcm_enabled;
+};
+
+struct lmh_sensor_data {
+	char				sensor_name[LMH_NAME_MAX];
+	uint32_t			sensor_hw_name;
+	uint32_t			sensor_hw_node_id;
+	int				sensor_sw_id;
+	struct lmh_sensor_ops		ops;
+	int				last_read_value;
+	struct list_head		list_ptr;
+};
+
+struct lmh_default_data {
+	uint32_t			default_profile;
+	uint32_t			odcm_reg_addr[LMH_ODCM_MAX_COUNT];
+};
+
+static struct lmh_default_data		lmh_lite_data = {
+	.default_profile = 0,
+};
+static struct lmh_default_data		lmh_v1_data = {
+	.default_profile = 1,
+	.odcm_reg_addr = {	0x09981030, /* CPU0 */
+				0x09991030, /* CPU1 */
+				0x099A1028, /* APC0_L2 */
+				0x099B1030, /* CPU2 */
+				0x099C1030, /* CPU3 */
+				0x099D1028, /* APC1_l2 */
+	},
+};
+static struct lmh_default_data		*lmh_hw_data;
+static struct lmh_driver_data		*lmh_data;
+static DECLARE_RWSEM(lmh_sensor_access);
+static DEFINE_MUTEX(lmh_sensor_read);
+static DEFINE_MUTEX(lmh_odcm_access);
+static LIST_HEAD(lmh_sensor_list);
+
+static int lmh_read(struct lmh_sensor_ops *ops, int *val)
+{
+	struct lmh_sensor_data *lmh_sensor = container_of(ops,
+		       struct lmh_sensor_data, ops);
+
+	mutex_lock(&lmh_sensor_read);
+	*val = lmh_sensor->last_read_value;
+	mutex_unlock(&lmh_sensor_read);
+
+	return 0;
+}
+
+static int lmh_ctrl_qpmda(uint32_t enable)
+{
+	int ret = 0;
+	struct scm_desc desc_arg;
+	struct {
+		uint32_t enable;
+		uint32_t rate;
+	} cmd_buf;
+
+	desc_arg.args[0] = cmd_buf.enable = enable;
+	desc_arg.args[1] = cmd_buf.rate = lmh_data->log_delay;
+	desc_arg.arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL);
+	trace_lmh_event_call("CTRL_QPMDA enter");
+	if (!is_scm_armv8())
+		ret = scm_call(SCM_SVC_LMH, LMH_CTRL_QPMDA,
+			(void *) &cmd_buf, SCM_BUFFER_SIZE(cmd_buf), NULL, 0);
+	else
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH,
+			LMH_CTRL_QPMDA), &desc_arg);
+	trace_lmh_event_call("CTRL_QPMDA exit");
+	if (ret) {
+		pr_err("Error in SCM v%d %s QPMDA call. err:%d\n",
+			(is_scm_armv8()) ? 8 : 7, (enable) ? "enable" :
+			"disable", ret);
+		goto ctrl_exit;
+	}
+
+ctrl_exit:
+	return ret;
+}
+
+static int lmh_disable_log(void)
+{
+	int ret = 0;
+
+	if (!lmh_data->log_enabled)
+		return ret;
+	ret = lmh_ctrl_qpmda(0);
+	if (ret)
+		goto disable_exit;
+	pr_debug("LMH hardware log disabled.\n");
+	lmh_data->log_enabled = 0;
+
+disable_exit:
+	return ret;
+}
+
+static int lmh_enable_log(uint32_t delay, uint32_t reg_val)
+{
+	int ret = 0;
+
+	if (lmh_data->log_enabled == reg_val && lmh_data->log_delay == delay)
+		return ret;
+
+	lmh_data->log_delay = delay;
+	ret = lmh_ctrl_qpmda(reg_val);
+	if (ret)
+		goto enable_exit;
+	pr_debug("LMH hardware log enabled[%u]. delay:%u\n", reg_val, delay);
+	lmh_data->log_enabled = reg_val;
+
+enable_exit:
+	return ret;
+}
+
+static void lmh_update(struct lmh_driver_data *lmh_dat,
+	struct lmh_sensor_data *lmh_sensor)
+{
+	if (lmh_sensor->last_read_value > 0 && !(lmh_dat->intr_status_val
+		& BIT(lmh_sensor->sensor_sw_id))) {
+		pr_debug("Sensor:[%s] interrupt triggered\n",
+			lmh_sensor->sensor_name);
+		trace_lmh_sensor_interrupt(lmh_sensor->sensor_name,
+			lmh_sensor->last_read_value);
+		lmh_dat->intr_status_val |= BIT(lmh_sensor->sensor_sw_id);
+	} else if (lmh_sensor->last_read_value == 0 && (lmh_dat->intr_status_val
+		& BIT(lmh_sensor->sensor_sw_id))) {
+		pr_debug("Sensor:[%s] interrupt clear\n",
+			lmh_sensor->sensor_name);
+		trace_lmh_sensor_interrupt(lmh_sensor->sensor_name,
+			lmh_sensor->last_read_value);
+
+		lmh_data->intr_status_val ^= BIT(lmh_sensor->sensor_sw_id);
+	}
+	lmh_sensor->ops.new_value_notify(&lmh_sensor->ops,
+		lmh_sensor->last_read_value);
+}
+
+static void lmh_read_and_update(struct lmh_driver_data *lmh_dat)
+{
+	int ret = 0, idx = 0;
+	struct lmh_sensor_data *lmh_sensor = NULL;
+	static struct lmh_sensor_packet payload;
+	struct scm_desc desc_arg;
+	struct {
+		/* TZ is 32-bit right now */
+		uint32_t addr;
+		uint32_t size;
+	} cmd_buf;
+
+	mutex_lock(&lmh_sensor_read);
+	list_for_each_entry(lmh_sensor, &lmh_sensor_list, list_ptr)
+		lmh_sensor->last_read_value = 0;
+	payload.count = 0;
+	cmd_buf.addr = SCM_BUFFER_PHYS(&payload);
+	/* &payload may be a physical address > 4 GB */
+	desc_arg.args[0] = SCM_BUFFER_PHYS(&payload);
+	desc_arg.args[1] = cmd_buf.size
+			= SCM_BUFFER_SIZE(struct lmh_sensor_packet);
+	desc_arg.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL);
+	trace_lmh_event_call("GET_INTENSITY enter");
+	dmac_flush_range(&payload, (void *)&payload +
+			sizeof(struct lmh_sensor_packet));
+	if (!is_scm_armv8())
+		ret = scm_call(SCM_SVC_LMH, LMH_GET_INTENSITY,
+			(void *) &cmd_buf, SCM_BUFFER_SIZE(cmd_buf), NULL, 0);
+	else
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH,
+			LMH_GET_INTENSITY), &desc_arg);
+	/* Have memory barrier before we access the TZ data */
+	mb();
+	trace_lmh_event_call("GET_INTENSITY exit");
+	dmac_inv_range(&payload, (void *)&payload +
+			sizeof(struct lmh_sensor_packet));
+	if (ret) {
+		pr_err("Error in SCM v%d read call. err:%d\n",
+				(is_scm_armv8()) ? 8 : 7, ret);
+		goto read_exit;
+	}
+
+	for (idx = 0; idx < payload.count; idx++) {
+		list_for_each_entry(lmh_sensor, &lmh_sensor_list, list_ptr) {
+
+			if (payload.sensor[idx].name
+				== lmh_sensor->sensor_hw_name
+				&& (payload.sensor[idx].node_id
+				== lmh_sensor->sensor_hw_node_id)) {
+
+				lmh_sensor->last_read_value =
+					(payload.sensor[idx].max_intensity) ?
+					((payload.sensor[idx].intensity * 100)
+					/ payload.sensor[idx].max_intensity)
+					: payload.sensor[idx].intensity;
+				trace_lmh_sensor_reading(
+					lmh_sensor->sensor_name,
+					lmh_sensor->last_read_value);
+				break;
+			}
+		}
+	}
+
+read_exit:
+	mutex_unlock(&lmh_sensor_read);
+	list_for_each_entry(lmh_sensor, &lmh_sensor_list, list_ptr)
+		lmh_update(lmh_dat, lmh_sensor);
+
+	return;
+}
+
+static void lmh_poll(struct work_struct *work)
+{
+	struct lmh_driver_data *lmh_dat = container_of(work,
+		       struct lmh_driver_data, poll_work.work);
+
+	down_write(&lmh_sensor_access);
+	if (lmh_dat->intr_state != LMH_ISR_POLLING)
+		goto poll_exit;
+	lmh_read_and_update(lmh_dat);
+	if (!lmh_data->intr_status_val) {
+		lmh_data->intr_state = LMH_ISR_MONITOR;
+		pr_debug("Zero throttling. Re-enabling interrupt\n");
+		trace_lmh_event_call("Lmh Interrupt Clear");
+		enable_irq(lmh_data->irq_num);
+		goto poll_exit;
+	} else {
+		queue_delayed_work(lmh_dat->poll_wq, &lmh_dat->poll_work,
+			msecs_to_jiffies(lmh_get_poll_interval()));
+	}
+
+poll_exit:
+	up_write(&lmh_sensor_access);
+	return;
+}
+
+static void lmh_trim_error(void)
+{
+	struct scm_desc desc_arg;
+	int ret = 0;
+
+	WARN_ON(1);
+	pr_err("LMH hardware trim error\n");
+	desc_arg.arginfo = SCM_ARGS(0);
+	trace_lmh_event_call("TRIM_ERROR enter");
+	if (!is_scm_armv8())
+		ret = scm_call(SCM_SVC_LMH, LMH_TRIM_ERROR, NULL, 0, NULL, 0);
+	else
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH,
+			LMH_TRIM_ERROR), &desc_arg);
+	trace_lmh_event_call("TRIM_ERROR exit");
+	if (ret)
+		pr_err("Error in SCM v%d trim error call. err:%d\n",
+					(is_scm_armv8()) ? 8 : 7, ret);
+
+	return;
+}
+
+static irqreturn_t lmh_isr_thread(int irq, void *data)
+{
+	struct lmh_driver_data *lmh_dat = data;
+
+	pr_debug("LMH Interrupt triggered\n");
+	trace_lmh_event_call("Lmh Interrupt");
+
+	disable_irq_nosync(irq);
+	down_write(&lmh_sensor_access);
+	if (lmh_dat->intr_state != LMH_ISR_MONITOR) {
+		pr_err("Invalid software state\n");
+		trace_lmh_event_call("Invalid software state");
+		WARN_ON(1);
+		goto isr_unlock_exit;
+	}
+	lmh_dat->intr_state = LMH_ISR_POLLING;
+	if (!lmh_data->trim_err_disable) {
+		lmh_dat->intr_reg_val = readl_relaxed(lmh_dat->intr_addr);
+		pr_debug("Lmh hw interrupt:%d\n", lmh_dat->intr_reg_val);
+		if (lmh_dat->intr_reg_val & BIT(lmh_dat->trim_err_offset)) {
+			trace_lmh_event_call("Lmh trim error");
+			lmh_trim_error();
+			lmh_dat->intr_state = LMH_ISR_MONITOR;
+			goto decide_next_action;
+		}
+	}
+	lmh_read_and_update(lmh_dat);
+	if (!lmh_dat->intr_status_val) {
+		pr_debug("LMH not throttling. Enabling interrupt\n");
+		lmh_dat->intr_state = LMH_ISR_MONITOR;
+		trace_lmh_event_call("Lmh Zero throttle Interrupt Clear");
+		goto decide_next_action;
+	}
+
+decide_next_action:
+	if (lmh_dat->intr_state == LMH_ISR_POLLING)
+		queue_delayed_work(lmh_dat->poll_wq, &lmh_dat->poll_work,
+			msecs_to_jiffies(lmh_get_poll_interval()));
+	else
+		enable_irq(lmh_dat->irq_num);
+
+isr_unlock_exit:
+	up_write(&lmh_sensor_access);
+	return IRQ_HANDLED;
+}
+
+static int lmh_get_sensor_devicetree(struct platform_device *pdev)
+{
+	int ret = 0, idx = 0;
+	char *key = NULL;
+	struct device_node *node = pdev->dev.of_node;
+	struct resource *lmh_intr_base = NULL;
+
+	lmh_data->trim_err_disable = false;
+	key = "qcom,lmh-trim-err-offset";
+	ret = of_property_read_u32(node, key,
+			&lmh_data->trim_err_offset);
+	if (ret) {
+		if (ret == -EINVAL) {
+			lmh_data->trim_err_disable = true;
+			ret = 0;
+		} else {
+			pr_err("Error reading:%s. err:%d\n", key, ret);
+			goto dev_exit;
+		}
+	}
+
+	lmh_data->regulator = devm_regulator_get(lmh_data->dev, "vdd-apss");
+	if (IS_ERR(lmh_data->regulator)) {
+		pr_err("unable to get vdd-apss regulator. err:%ld\n",
+			PTR_ERR(lmh_data->regulator));
+		lmh_data->regulator = NULL;
+	} else {
+		key = "qcom,lmh-odcm-disable-threshold-mA";
+		ret = of_property_read_u32(node, key,
+			&lmh_data->odcm_thresh_mV);
+		if (ret) {
+			pr_err("Error getting ODCM thresh. err:%d\n", ret);
+			ret = 0;
+		} else {
+			lmh_data->odcm_enabled = true;
+			for (; idx < LMH_ODCM_MAX_COUNT; idx++) {
+				lmh_data->odcm_reg[idx] =
+					devm_ioremap(&pdev->dev,
+					lmh_hw_data->odcm_reg_addr[idx], 4);
+				if (!lmh_data->odcm_reg[idx]) {
+					pr_err("Err mapping ODCM memory 0x%x\n",
+					lmh_hw_data->odcm_reg_addr[idx]);
+					lmh_data->odcm_enabled = false;
+					lmh_data->odcm_reg[0] = NULL;
+					break;
+				}
+			}
+		}
+	}
+
+	lmh_data->irq_num = platform_get_irq(pdev, 0);
+	if (lmh_data->irq_num < 0) {
+		ret = lmh_data->irq_num;
+		pr_err("Error getting IRQ number. err:%d\n", ret);
+		goto dev_exit;
+	}
+
+	ret = request_threaded_irq(lmh_data->irq_num, NULL,
+		lmh_isr_thread, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+		LMH_INTERRUPT, lmh_data);
+	if (ret) {
+		pr_err("Error getting irq for LMH. err:%d\n", ret);
+		goto dev_exit;
+	}
+
+	if (!lmh_data->trim_err_disable) {
+		lmh_intr_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		if (!lmh_intr_base) {
+			ret = -EINVAL;
+			pr_err("Error getting reg MEM for LMH.\n");
+			goto dev_exit;
+		}
+		lmh_data->intr_addr =
+			devm_ioremap(&pdev->dev, lmh_intr_base->start,
+			resource_size(lmh_intr_base));
+		if (!lmh_data->intr_addr) {
+			ret = -ENODEV;
+			pr_err("Error Mapping LMH memory address\n");
+			goto dev_exit;
+		}
+	}
+
+dev_exit:
+	return ret;
+}
+
+static void lmh_remove_sensors(void)
+{
+	struct lmh_sensor_data *curr_sensor = NULL, *prev_sensor = NULL;
+
+	down_write(&lmh_sensor_access);
+	list_for_each_entry_safe(prev_sensor, curr_sensor, &lmh_sensor_list,
+		list_ptr) {
+		list_del(&prev_sensor->list_ptr);
+		pr_debug("Deregistering Sensor:[%s]\n",
+			prev_sensor->sensor_name);
+		lmh_sensor_deregister(&prev_sensor->ops);
+		devm_kfree(lmh_data->dev, prev_sensor);
+	}
+	up_write(&lmh_sensor_access);
+}
+
+static int lmh_check_tz_debug_cmds(void)
+{
+	LMH_CHECK_SCM_CMD(LMH_DEBUG_SET);
+	LMH_CHECK_SCM_CMD(LMH_DEBUG_READ_BUF_SIZE);
+	LMH_CHECK_SCM_CMD(LMH_DEBUG_READ);
+	LMH_CHECK_SCM_CMD(LMH_DEBUG_GET_TYPE);
+
+	return 0;
+}
+
+static int lmh_check_tz_dev_cmds(void)
+{
+	LMH_CHECK_SCM_CMD(LMH_CHANGE_PROFILE);
+	LMH_CHECK_SCM_CMD(LMH_GET_PROFILES);
+
+	return 0;
+}
+
+static int lmh_check_tz_sensor_cmds(void)
+{
+	LMH_CHECK_SCM_CMD(LMH_CTRL_QPMDA);
+	if (!lmh_data->trim_err_disable)
+		LMH_CHECK_SCM_CMD(LMH_TRIM_ERROR);
+	LMH_CHECK_SCM_CMD(LMH_GET_INTENSITY);
+	LMH_CHECK_SCM_CMD(LMH_GET_SENSORS);
+
+	return 0;
+}
+
+static int lmh_parse_sensor(struct lmh_sensor_info *sens_info)
+{
+	int ret = 0, idx = 0, size = 0;
+	struct lmh_sensor_data *lmh_sensor = NULL;
+
+	lmh_sensor = devm_kzalloc(lmh_data->dev, sizeof(struct lmh_sensor_data),
+			GFP_KERNEL);
+	if (!lmh_sensor) {
+		pr_err("No payload\n");
+		return -ENOMEM;
+	}
+	size = sizeof(sens_info->name);
+	size = min(size, LMH_NAME_MAX);
+	memset(lmh_sensor->sensor_name, '\0', LMH_NAME_MAX);
+	while (size--)
+		lmh_sensor->sensor_name[idx++] = ((sens_info->name
+				       & (0xFF << (size	* 8))) >> (size * 8));
+	if (lmh_sensor->sensor_name[idx - 1] == '\0')
+		idx--;
+	lmh_sensor->sensor_name[idx++] = '_';
+	size = sizeof(sens_info->node_id);
+	if ((idx + size) > LMH_NAME_MAX)
+		size -= LMH_NAME_MAX - idx - size - 1;
+	while (size--)
+		lmh_sensor->sensor_name[idx++] = ((sens_info->node_id
+				       & (0xFF << (size * 8))) >> (size * 8));
+	pr_info("Registering sensor:[%s]\n", lmh_sensor->sensor_name);
+	lmh_sensor->ops.read = lmh_read;
+	lmh_sensor->ops.disable_hw_log = lmh_disable_log;
+	lmh_sensor->ops.enable_hw_log = lmh_enable_log;
+	lmh_sensor->sensor_sw_id = lmh_data->max_sensor_count++;
+	lmh_sensor->sensor_hw_name = sens_info->name;
+	lmh_sensor->sensor_hw_node_id = sens_info->node_id;
+	ret = lmh_sensor_register(lmh_sensor->sensor_name, &lmh_sensor->ops);
+	if (ret) {
+		pr_err("Sensor:[%s] registration failed. err:%d\n",
+			lmh_sensor->sensor_name, ret);
+		goto sens_exit;
+	}
+	list_add_tail(&lmh_sensor->list_ptr, &lmh_sensor_list);
+	pr_debug("Registered sensor:[%s] driver\n", lmh_sensor->sensor_name);
+
+sens_exit:
+	if (ret)
+		devm_kfree(lmh_data->dev, lmh_sensor);
+	return ret;
+}
+
+static int lmh_get_sensor_list(void)
+{
+	int ret = 0, buf_size = 0;
+	uint32_t size = 0, next = 0, idx = 0, count = 0;
+	struct scm_desc desc_arg;
+	struct lmh_sensor_packet *payload = NULL;
+	struct {
+		uint32_t addr;
+		uint32_t size;
+	} cmd_buf;
+
+	buf_size = PAGE_ALIGN(sizeof(*payload));
+	payload = kzalloc(buf_size, GFP_KERNEL);
+	if (!payload)
+		return -ENOMEM;
+
+	do {
+		memset(payload, 0, buf_size);
+		payload->count = next;
+		cmd_buf.addr = SCM_BUFFER_PHYS(payload);
+		/* payload_phys may be a physical address > 4 GB */
+		desc_arg.args[0] = SCM_BUFFER_PHYS(payload);
+		desc_arg.args[1] = cmd_buf.size = SCM_BUFFER_SIZE(struct
+				lmh_sensor_packet);
+		desc_arg.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL);
+		trace_lmh_event_call("GET_SENSORS enter");
+		dmac_flush_range(payload, (void *)payload + buf_size);
+		if (!is_scm_armv8())
+			ret = scm_call(SCM_SVC_LMH, LMH_GET_SENSORS,
+				(void *) &cmd_buf,
+				SCM_BUFFER_SIZE(cmd_buf),
+				NULL, 0);
+		else
+			ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH,
+				LMH_GET_SENSORS), &desc_arg);
+		/* Have memory barrier before we access the TZ data */
+		mb();
+		trace_lmh_event_call("GET_SENSORS exit");
+		dmac_inv_range(payload, (void *)payload + buf_size);
+		if (ret < 0) {
+			pr_err("Error in SCM v%d call. err:%d\n",
+					(is_scm_armv8()) ? 8 : 7, ret);
+			goto get_exit;
+		}
+		size = payload->count;
+		if (!size) {
+			pr_err("No LMH sensor supported\n");
+			ret = -ENODEV;
+			goto get_exit;
+		}
+		count = ((size - next) > LMH_MAX_SENSOR) ? LMH_MAX_SENSOR :
+				(size - next);
+		next += LMH_MAX_SENSOR;
+		for (idx = 0; idx < count; idx++) {
+			ret = lmh_parse_sensor(&payload->sensor[idx]);
+			if (ret)
+				goto get_exit;
+		}
+	} while (next < size);
+
+get_exit:
+	kfree(payload);
+	return ret;
+}
+
+static int lmh_set_level(struct lmh_device_ops *ops, int level)
+{
+	int ret = 0, idx = 0;
+	struct scm_desc desc_arg;
+	struct lmh_profile *lmh_dev;
+
+	if (level < 0 || !ops) {
+		pr_err("Invalid Input\n");
+		return -EINVAL;
+	}
+	lmh_dev = container_of(ops, struct lmh_profile, dev_ops);
+	for (idx = 0; idx < lmh_dev->level_ct; idx++) {
+		if (level != lmh_dev->levels[idx])
+			continue;
+		break;
+	}
+	if (idx == lmh_dev->level_ct) {
+		pr_err("Invalid profile:[%d]\n", level);
+		return -EINVAL;
+	}
+	desc_arg.args[0] = level;
+	desc_arg.arginfo = SCM_ARGS(1, SCM_VAL);
+	if (!is_scm_armv8())
+		ret = scm_call_atomic1(SCM_SVC_LMH, LMH_CHANGE_PROFILE,
+			level);
+	else
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH,
+			LMH_CHANGE_PROFILE), &desc_arg);
+	if (ret) {
+		pr_err("Error in SCM v%d switching profile:[%d]. err:%d\n",
+			(is_scm_armv8()) ? 8 : 7, level, ret);
+		return ret;
+	}
+	pr_debug("Device:[%s] Current level:%d\n", LMH_DEVICE, level);
+	lmh_dev->curr_level = level;
+
+	return ret;
+
+}
+
+static int lmh_get_all_level(struct lmh_device_ops *ops, int *level)
+{
+	struct lmh_profile *lmh_dev;
+
+	if (!ops) {
+		pr_err("Invalid input\n");
+		return -EINVAL;
+	}
+	lmh_dev = container_of(ops, struct lmh_profile, dev_ops);
+	if (!level)
+		return lmh_dev->level_ct;
+	memcpy(level, lmh_dev->levels, lmh_dev->level_ct * sizeof(uint32_t));
+
+	return 0;
+}
+
+
+static int lmh_get_level(struct lmh_device_ops *ops, int *level)
+{
+	struct lmh_profile *lmh_dev;
+
+	if (!level || !ops) {
+		pr_err("Invalid input\n");
+		return -EINVAL;
+	}
+	lmh_dev = container_of(ops, struct lmh_profile, dev_ops);
+
+	*level = lmh_dev->curr_level;
+
+	return 0;
+}
+
+static int lmh_get_dev_info(void)
+{
+	int ret = 0;
+	uint32_t size = 0, next = 0;
+	struct scm_desc desc_arg;
+	uint32_t *payload = NULL;
+	struct {
+		uint32_t list_addr;
+		uint32_t list_size;
+		uint32_t list_start;
+	} cmd_buf;
+
+	payload = devm_kzalloc(lmh_data->dev, sizeof(uint32_t) *
+		LMH_GET_PROFILE_SIZE, GFP_KERNEL);
+	if (!payload) {
+		pr_err("No payload\n");
+		ret = -ENOMEM;
+		goto get_dev_exit;
+	}
+
+	cmd_buf.list_addr = SCM_BUFFER_PHYS(payload);
+	/* &payload may be a physical address > 4 GB */
+	desc_arg.args[0] = SCM_BUFFER_PHYS(payload);
+	desc_arg.args[1] = cmd_buf.list_size =
+		SCM_BUFFER_SIZE(uint32_t) * LMH_GET_PROFILE_SIZE;
+	desc_arg.arginfo = SCM_ARGS(3, SCM_RW, SCM_VAL, SCM_VAL);
+	LMH_GET_RECURSSIVE_DATA(desc_arg, 2, cmd_buf, payload, next, size,
+		LMH_GET_PROFILES, lmh_data->dev_info.levels, ret);
+	if (ret)
+		goto get_dev_exit;
+	lmh_data->dev_info.level_ct = size;
+	lmh_data->dev_info.curr_level = LMH_DEFAULT_PROFILE;
+	ret = lmh_set_level(&lmh_data->dev_info.dev_ops,
+		lmh_hw_data->default_profile);
+	if (ret) {
+		pr_err("Error switching to default profile%d, err:%d\n",
+			lmh_data->dev_info.curr_level, ret);
+		goto get_dev_exit;
+	}
+
+get_dev_exit:
+	if (ret)
+		devm_kfree(lmh_data->dev, lmh_data->dev_info.levels);
+	devm_kfree(lmh_data->dev, payload);
+	return ret;
+}
+
+static int lmh_device_init(void)
+{
+	int ret = 0;
+
+	if (lmh_check_tz_dev_cmds())
+		return -ENODEV;
+
+	ret = lmh_get_dev_info();
+	if (ret)
+		goto dev_init_exit;
+
+	lmh_data->dev_info.dev_ops.get_available_levels = lmh_get_all_level;
+	lmh_data->dev_info.dev_ops.get_curr_level = lmh_get_level;
+	lmh_data->dev_info.dev_ops.set_level = lmh_set_level;
+	ret = lmh_device_register(LMH_DEVICE, &lmh_data->dev_info.dev_ops);
+	if (ret) {
+		pr_err("Error registering device:[%s]. err:%d", LMH_DEVICE,
+			ret);
+		goto dev_init_exit;
+	}
+
+dev_init_exit:
+	return ret;
+}
+
+static int lmh_debug_read(struct lmh_debug_ops *ops, uint32_t **buf)
+{
+	int ret = 0, size = 0, tz_ret = 0;
+	static uint32_t curr_size;
+	struct scm_desc desc_arg;
+	static uint32_t *payload;
+	struct {
+		uint32_t buf_addr;
+		uint32_t buf_size;
+	} cmd_buf;
+
+	desc_arg.arginfo = SCM_ARGS(0);
+	trace_lmh_event_call("GET_DEBUG_READ_SIZE enter");
+	if (!is_scm_armv8()) {
+		ret = scm_call(SCM_SVC_LMH, LMH_DEBUG_READ_BUF_SIZE,
+			NULL, 0, &size, SCM_BUFFER_SIZE(size));
+	} else {
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH,
+			LMH_DEBUG_READ_BUF_SIZE), &desc_arg);
+		size = desc_arg.ret[0];
+	}
+	trace_lmh_event_call("GET_DEBUG_READ_SIZE exit");
+	if (ret) {
+		pr_err("Error in SCM v%d get debug buffer size call. err:%d\n",
+				(is_scm_armv8()) ? 8 : 7, ret);
+		goto get_dbg_exit;
+	}
+	if (!size) {
+		pr_err("No Debug data to read.\n");
+		ret = -ENODEV;
+		goto get_dbg_exit;
+	}
+	size = SCM_BUFFER_SIZE(uint32_t) * size * LMH_READ_LINE_LENGTH;
+	if (curr_size != size) {
+		if (payload)
+			devm_kfree(lmh_data->dev, payload);
+		payload = devm_kzalloc(lmh_data->dev, PAGE_ALIGN(size),
+				       GFP_KERNEL);
+		if (!payload) {
+			pr_err("payload buffer alloc failed\n");
+			ret = -ENOMEM;
+			goto get_dbg_exit;
+		}
+		curr_size = size;
+	}
+
+	cmd_buf.buf_addr = SCM_BUFFER_PHYS(payload);
+	/* &payload may be a physical address > 4 GB */
+	desc_arg.args[0] = SCM_BUFFER_PHYS(payload);
+	desc_arg.args[1] = cmd_buf.buf_size = curr_size;
+	desc_arg.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL);
+	trace_lmh_event_call("GET_DEBUG_READ enter");
+	dmac_flush_range(payload, (void *)payload + curr_size);
+	if (!is_scm_armv8()) {
+		ret = scm_call(SCM_SVC_LMH, LMH_DEBUG_READ,
+			(void *) &cmd_buf, SCM_BUFFER_SIZE(cmd_buf),
+			&tz_ret, SCM_BUFFER_SIZE(tz_ret));
+	} else {
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH,
+			LMH_DEBUG_READ), &desc_arg);
+		tz_ret = desc_arg.ret[0];
+	}
+	/* Have memory barrier before we access the TZ data */
+	mb();
+	dmac_inv_range(payload, (void *)payload + curr_size);
+	trace_lmh_event_call("GET_DEBUG_READ exit");
+	if (ret) {
+		pr_err("Error in SCM v%d get debug read. err:%d\n",
+				(is_scm_armv8()) ? 8 : 7, ret);
+		goto get_dbg_exit;
+	}
+	if (tz_ret) {
+		pr_err("TZ API returned error. err:%d\n", tz_ret);
+		ret = tz_ret;
+		goto get_dbg_exit;
+	}
+	trace_lmh_debug_data("Debug read", payload,
+		curr_size / sizeof(uint32_t));
+
+get_dbg_exit:
+	if (ret && payload) {
+		devm_kfree(lmh_data->dev, payload);
+		payload = NULL;
+		curr_size = 0;
+	}
+	*buf = payload;
+
+	return (ret < 0) ? ret : curr_size;
+}
+
+static int lmh_debug_config_write(uint32_t cmd_id, uint32_t *buf, int size)
+{
+	int ret = 0, size_bytes = 0;
+	struct scm_desc desc_arg;
+	uint32_t *payload = NULL;
+	struct {
+		uint32_t buf_addr;
+		uint32_t buf_size;
+		uint32_t node;
+		uint32_t node_id;
+		uint32_t read_type;
+	} cmd_buf;
+
+	trace_lmh_debug_data("Config LMH", buf, size);
+	size_bytes = (size - 3) * sizeof(uint32_t);
+	payload = devm_kzalloc(lmh_data->dev, PAGE_ALIGN(size_bytes),
+			       GFP_KERNEL);
+	if (!payload) {
+		ret = -ENOMEM;
+		goto set_cfg_exit;
+	}
+	memcpy(payload, &buf[3], size_bytes);
+
+	cmd_buf.buf_addr = SCM_BUFFER_PHYS(payload);
+	/* &payload may be a physical address > 4 GB */
+	desc_arg.args[0] = SCM_BUFFER_PHYS(payload);
+	desc_arg.args[1] = cmd_buf.buf_size = size_bytes;
+	desc_arg.args[2] = cmd_buf.node = buf[0];
+	desc_arg.args[3] = cmd_buf.node_id = buf[1];
+	desc_arg.args[4] = cmd_buf.read_type = buf[2];
+	desc_arg.arginfo = SCM_ARGS(5, SCM_RO, SCM_VAL, SCM_VAL, SCM_VAL,
+					SCM_VAL);
+	trace_lmh_event_call("CONFIG_DEBUG_WRITE enter");
+	dmac_flush_range(payload, (void *)payload + size_bytes);
+	if (!is_scm_armv8())
+		ret = scm_call(SCM_SVC_LMH, cmd_id, (void *) &cmd_buf,
+			SCM_BUFFER_SIZE(cmd_buf), NULL, 0);
+	else
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH, cmd_id), &desc_arg);
+	/* Have memory barrier before we access the TZ data */
+	mb();
+	dmac_inv_range(payload, (void *)payload + size_bytes);
+	trace_lmh_event_call("CONFIG_DEBUG_WRITE exit");
+	if (ret) {
+		pr_err("Error in SCM v%d config debug read. err:%d\n",
+				(is_scm_armv8()) ? 8 : 7, ret);
+		goto set_cfg_exit;
+	}
+
+set_cfg_exit:
+	return ret;
+}
+
+static int lmh_debug_config_read(struct lmh_debug_ops *ops, uint32_t *buf,
+	int size)
+{
+	return lmh_debug_config_write(LMH_DEBUG_SET, buf, size);
+}
+
+static int lmh_debug_get_types(struct lmh_debug_ops *ops, bool is_read,
+	uint32_t **buf)
+{
+	int ret = 0;
+	uint32_t size = 0, next = 0;
+	struct scm_desc desc_arg;
+	uint32_t *payload = NULL, *dest_buf = NULL;
+	struct {
+		uint32_t list_addr;
+		uint32_t list_size;
+		uint32_t cmd_type;
+		uint32_t list_start;
+	} cmd_buf;
+
+	if (is_read && lmh_data->debug_info.read_type) {
+		*buf = lmh_data->debug_info.read_type;
+		trace_lmh_debug_data("Data type",
+			lmh_data->debug_info.read_type,
+			lmh_data->debug_info.read_type_count);
+		return lmh_data->debug_info.read_type_count;
+	} else if (!is_read && lmh_data->debug_info.config_type) {
+		*buf = lmh_data->debug_info.config_type;
+		trace_lmh_debug_data("Config type",
+			lmh_data->debug_info.config_type,
+			lmh_data->debug_info.config_type_count);
+		return lmh_data->debug_info.config_type_count;
+	}
+	payload = devm_kzalloc(lmh_data->dev, sizeof(uint32_t) *
+		LMH_SCM_PAYLOAD_SIZE, GFP_KERNEL);
+	if (!payload) {
+		ret = -ENOMEM;
+		goto get_type_exit;
+	}
+	cmd_buf.list_addr = SCM_BUFFER_PHYS(payload);
+	/* &payload may be a physical address > 4 GB */
+	desc_arg.args[0] = SCM_BUFFER_PHYS(payload);
+	desc_arg.args[1] = cmd_buf.list_size =
+		SCM_BUFFER_SIZE(uint32_t) * LMH_SCM_PAYLOAD_SIZE;
+	desc_arg.args[2] = cmd_buf.cmd_type = (is_read) ?
+			LMH_DEBUG_READ_TYPE : LMH_DEBUG_CONFIG_TYPE;
+	desc_arg.arginfo = SCM_ARGS(4, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL);
+	LMH_GET_RECURSSIVE_DATA(desc_arg, 3, cmd_buf, payload, next, size,
+		LMH_DEBUG_GET_TYPE, dest_buf, ret);
+	if (ret)
+		goto get_type_exit;
+	pr_debug("Total %s types:%d\n", (is_read) ? "read" : "config", size);
+	if (is_read) {
+		lmh_data->debug_info.read_type = *buf = dest_buf;
+		lmh_data->debug_info.read_type_count = size;
+		trace_lmh_debug_data("Data type", dest_buf, size);
+	} else {
+		lmh_data->debug_info.config_type = *buf = dest_buf;
+		lmh_data->debug_info.config_type_count = size;
+		trace_lmh_debug_data("Config type", dest_buf, size);
+	}
+
+get_type_exit:
+	if (ret) {
+		devm_kfree(lmh_data->dev, lmh_data->debug_info.read_type);
+		devm_kfree(lmh_data->dev, lmh_data->debug_info.config_type);
+		lmh_data->debug_info.config_type_count = 0;
+		lmh_data->debug_info.read_type_count = 0;
+	}
+	devm_kfree(lmh_data->dev, payload);
+	return (ret) ? ret : size;
+}
+
+static int lmh_debug_lmh_config(struct lmh_debug_ops *ops, uint32_t *buf,
+	int size)
+{
+	return lmh_debug_config_write(LMH_DEBUG_SET, buf, size);
+}
+
+static void lmh_voltage_scale_set(uint32_t voltage)
+{
+	char trace_buf[MAX_TRACE_EVENT_MSG_LEN] = "";
+
+	mutex_lock(&scm_lmh_lock);
+	writel_relaxed(voltage, lmh_data->dpm_voltage_scale_reg);
+	mutex_unlock(&scm_lmh_lock);
+	snprintf(trace_buf, MAX_TRACE_EVENT_MSG_LEN,
+		"DPM voltage scale %d mV", voltage);
+	pr_debug("%s\n", trace_buf);
+	trace_lmh_event_call(trace_buf);
+}
+
+static void write_to_odcm(bool enable)
+{
+	uint32_t idx = 0, data = enable ? 1 : 0;
+
+	for (; idx < LMH_ODCM_MAX_COUNT; idx++)
+		writel_relaxed(data, lmh_data->odcm_reg[idx]);
+}
+
+static void evaluate_and_config_odcm(uint32_t rail_uV, unsigned long state)
+{
+	uint32_t rail_mV = rail_uV / 1000;
+	static bool prev_state, disable_odcm;
+
+	mutex_lock(&lmh_odcm_access);
+	switch (state) {
+	case REGULATOR_EVENT_VOLTAGE_CHANGE:
+		if (!disable_odcm)
+			break;
+		pr_debug("Disable ODCM\n");
+		write_to_odcm(false);
+		lmh_data->odcm_enabled = false;
+		disable_odcm = false;
+		break;
+	case REGULATOR_EVENT_PRE_VOLTAGE_CHANGE:
+		disable_odcm = false;
+		prev_state = lmh_data->odcm_enabled;
+		if (rail_mV > lmh_data->odcm_thresh_mV) {
+			if (lmh_data->odcm_enabled)
+				break;
+			/* Enable ODCM before the voltage increases */
+			pr_debug("Enable ODCM for voltage %u mV\n", rail_mV);
+			write_to_odcm(true);
+			lmh_data->odcm_enabled = true;
+		} else {
+			if (!lmh_data->odcm_enabled)
+				break;
+			/* Disable ODCM after the voltage decreases */
+			pr_debug("Disable ODCM for voltage %u mV\n", rail_mV);
+			disable_odcm = true;
+		}
+		break;
+	case REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE:
+		disable_odcm = false;
+		if (prev_state == lmh_data->odcm_enabled)
+			break;
+		pr_debug("Reverting ODCM state to %s\n",
+			prev_state ? "enabled" : "disabled");
+		write_to_odcm(prev_state);
+		lmh_data->odcm_enabled = prev_state;
+		break;
+	default:
+		break;
+	}
+	mutex_unlock(&lmh_odcm_access);
+}
+
+static int lmh_voltage_change_notifier(struct notifier_block *nb_data,
+	unsigned long event, void *data)
+{
+	uint32_t voltage = 0;
+	static uint32_t last_voltage;
+	static bool change_needed;
+
+	if (event == REGULATOR_EVENT_VOLTAGE_CHANGE) {
+		/* Convert from uV to mV */
+		pr_debug("Received event POST_VOLTAGE_CHANGE\n");
+		voltage = ((unsigned long)data) / 1000;
+		if (change_needed == 1 &&
+			(last_voltage == voltage)) {
+			lmh_voltage_scale_set(voltage);
+			change_needed = 0;
+		}
+		if (lmh_data->odcm_reg[0])
+			evaluate_and_config_odcm(0, event);
+	} else if (event == REGULATOR_EVENT_PRE_VOLTAGE_CHANGE) {
+		struct pre_voltage_change_data *change_data =
+			(struct pre_voltage_change_data *)data;
+		last_voltage = change_data->min_uV / 1000;
+		if (change_data->min_uV > change_data->old_uV)
+			/* Going from low to high apply change first */
+			lmh_voltage_scale_set(last_voltage);
+		else
+			/* Going from high to low apply change after */
+			change_needed = 1;
+		pr_debug("Received event PRE_VOLTAGE_CHANGE\n");
+		pr_debug("max = %lu mV min = %lu mV previous = %lu mV\n",
+			change_data->max_uV / 1000, change_data->min_uV / 1000,
+			change_data->old_uV / 1000);
+
+		if (lmh_data->odcm_reg[0])
+			evaluate_and_config_odcm(change_data->max_uV, event);
+	} else if (event == REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE) {
+		pr_debug("Received event ABORT_VOLTAGE_CHANGE\n");
+		if (lmh_data->odcm_reg[0])
+			evaluate_and_config_odcm(0, event);
+	}
+
+	return NOTIFY_OK;
+}
+
+static void lmh_dpm_remove(void)
+{
+	if (!IS_ERR_OR_NULL(lmh_data->regulator) &&
+		lmh_data->dpm_notifier_blk.notifier_call != NULL) {
+		regulator_unregister_notifier(lmh_data->regulator,
+			&(lmh_data->dpm_notifier_blk));
+		lmh_data->regulator = NULL;
+	}
+}
+
+static void lmh_dpm_init(void)
+{
+	int ret = 0;
+
+	lmh_data->dpm_voltage_scale_reg = devm_ioremap(lmh_data->dev,
+			(phys_addr_t)APCS_DPM_VOLTAGE_SCALE, 4);
+	if (!lmh_data->dpm_voltage_scale_reg) {
+		ret = -ENODEV;
+		pr_err("Error mapping LMH DPM voltage scale register\n");
+		goto dpm_init_exit;
+	}
+
+	lmh_data->dpm_notifier_blk.notifier_call = lmh_voltage_change_notifier;
+	ret = regulator_register_notifier(lmh_data->regulator,
+		&(lmh_data->dpm_notifier_blk));
+	if (ret) {
+		pr_err("DPM regulator notification registration failed. err:%d\n",
+			ret);
+		goto dpm_init_exit;
+	}
+
+dpm_init_exit:
+	if (ret) {
+		if (lmh_data->dpm_notifier_blk.notifier_call)
+			regulator_unregister_notifier(lmh_data->regulator,
+				&(lmh_data->dpm_notifier_blk));
+		devm_regulator_put(lmh_data->regulator);
+		lmh_data->dpm_notifier_blk.notifier_call = NULL;
+		lmh_data->regulator = NULL;
+	}
+}
+
+
+static int lmh_debug_init(void)
+{
+	int ret = 0;
+
+	if (lmh_check_tz_debug_cmds()) {
+		pr_debug("Debug commands not available.\n");
+		return -ENODEV;
+	}
+
+	lmh_data->debug_info.debug_ops.debug_read = lmh_debug_read;
+	lmh_data->debug_info.debug_ops.debug_config_read
+		= lmh_debug_config_read;
+	lmh_data->debug_info.debug_ops.debug_config_lmh
+		= lmh_debug_lmh_config;
+	lmh_data->debug_info.debug_ops.debug_get_types
+		= lmh_debug_get_types;
+	ret = lmh_debug_register(&lmh_data->debug_info.debug_ops);
+	if (ret) {
+		pr_err("Error registering debug ops. err:%d\n", ret);
+		goto debug_init_exit;
+	}
+
+debug_init_exit:
+	return ret;
+}
+static int lmh_sensor_init(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	if (lmh_check_tz_sensor_cmds())
+		return -ENODEV;
+
+	down_write(&lmh_sensor_access);
+	ret = lmh_get_sensor_list();
+	if (ret)
+		goto init_exit;
+
+	lmh_data->intr_state = LMH_ISR_MONITOR;
+
+	ret = lmh_get_sensor_devicetree(pdev);
+	if (ret) {
+		pr_err("Error getting device tree data. err:%d\n", ret);
+		goto init_exit;
+	}
+	pr_debug("LMH Sensor Init complete\n");
+
+init_exit:
+	up_write(&lmh_sensor_access);
+	if (ret)
+		lmh_remove_sensors();
+
+	return ret;
+}
+
+static int lmh_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	if (lmh_data) {
+		pr_err("Reinitializing lmh hardware driver\n");
+		return -EEXIST;
+	}
+	lmh_data = devm_kzalloc(&pdev->dev, sizeof(struct lmh_driver_data),
+					GFP_KERNEL);
+	if (!lmh_data) {
+		pr_err("kzalloc failed\n");
+		return -ENOMEM;
+	}
+	lmh_data->dev = &pdev->dev;
+
+	lmh_data->poll_wq = alloc_workqueue("lmh_poll_wq", WQ_HIGHPRI, 0);
+	if (!lmh_data->poll_wq) {
+		pr_err("Error allocating workqueue\n");
+		ret = -ENOMEM;
+		goto probe_exit;
+	}
+	INIT_DEFERRABLE_WORK(&lmh_data->poll_work, lmh_poll);
+
+	ret = lmh_sensor_init(pdev);
+	if (ret) {
+		pr_err("Sensor Init failed. err:%d\n", ret);
+		goto probe_exit;
+	}
+	ret = lmh_device_init();
+	if (ret) {
+		pr_err("WARNING: Device Init failed. err:%d. LMH continues\n",
+			ret);
+		ret = 0;
+	}
+
+	if (lmh_data->regulator)
+		lmh_dpm_init();
+
+	ret = lmh_debug_init();
+	if (ret) {
+		pr_err("LMH debug init failed. err:%d\n", ret);
+		ret = 0;
+	}
+	platform_set_drvdata(pdev, lmh_data);
+
+	return ret;
+
+probe_exit:
+	if (lmh_data->poll_wq)
+		destroy_workqueue(lmh_data->poll_wq);
+	lmh_data = NULL;
+	return ret;
+}
+
+static int lmh_remove(struct platform_device *pdev)
+{
+	struct lmh_driver_data *lmh_dat = platform_get_drvdata(pdev);
+
+	destroy_workqueue(lmh_dat->poll_wq);
+	free_irq(lmh_dat->irq_num, lmh_dat);
+	lmh_remove_sensors();
+	lmh_device_deregister(&lmh_dat->dev_info.dev_ops);
+	lmh_dpm_remove();
+
+	return 0;
+}
+
+static struct of_device_id lmh_match[] = {
+	{
+		.compatible = "qcom,lmh",
+		.data = (void *)&lmh_lite_data,
+	},
+	{
+		.compatible = "qcom,lmh_v1",
+		.data = (void *)&lmh_v1_data,
+	},
+	{},
+};
+
+static struct platform_driver lmh_driver = {
+	.probe  = lmh_probe,
+	.remove = lmh_remove,
+	.driver = {
+		.name           = LMH_DRIVER_NAME,
+		.owner          = THIS_MODULE,
+		.of_match_table = lmh_match,
+	},
+};
+
+int __init lmh_init_driver(void)
+{
+	struct device_node *comp_node;
+
+	comp_node = of_find_matching_node(NULL, lmh_match);
+	if (comp_node) {
+		const struct of_device_id *match = of_match_node(lmh_match,
+							comp_node);
+		if (!match) {
+			pr_err("Couldnt find a match\n");
+			goto plt_register;
+		}
+		lmh_hw_data = (struct lmh_default_data *)match->data;
+		of_node_put(comp_node);
+	}
+
+plt_register:
+	return platform_driver_register(&lmh_driver);
+}
+
+static void __exit lmh_exit(void)
+{
+	platform_driver_unregister(&lmh_driver);
+}
+
+late_initcall(lmh_init_driver);
+module_exit(lmh_exit);
+
+MODULE_DESCRIPTION("LMH hardware interface");
+MODULE_ALIAS("platform:" LMH_DRIVER_NAME);
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/thermal/msm_lmh_dcvs.c	2019-10-29 09:26:24.917215646 +0100
@@ -0,0 +1,581 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/pm_opp.h>
+#include <linux/cpu_cooling.h>
+#include <linux/bitmap.h>
+#include <linux/msm_thermal.h>
+
+#include <asm/smp_plat.h>
+#include <asm/cacheflush.h>
+
+#include <soc/qcom/scm.h>
+
+#include "thermal_core.h"
+
+#define CREATE_TRACE_POINTS
+#define LMH_DCVS_TRACE
+#include <trace/trace_thermal.h>
+
+#define MSM_LIMITS_DCVSH		0x10
+#define MSM_LIMITS_NODE_DCVS		0x44435653
+
+#define MSM_LIMITS_SUB_FN_THERMAL	0x54484D4C
+#define MSM_LIMITS_SUB_FN_GENERAL	0x47454E00
+
+#define MSM_LIMITS_ALGO_MODE_ENABLE	0x454E424C
+
+#define MSM_LIMITS_HI_THRESHOLD		0x48494748
+#define MSM_LIMITS_LOW_THRESHOLD        0x4C4F5700
+#define MSM_LIMITS_ARM_THRESHOLD	0x41524D00
+
+#define MSM_LIMITS_CLUSTER_0		0x6370302D
+#define MSM_LIMITS_CLUSTER_1		0x6370312D
+
+#define MSM_LIMITS_DOMAIN_MAX		0x444D4158
+
+#define MSM_LIMITS_HIGH_THRESHOLD_VAL	95000
+#define MSM_LIMITS_ARM_THRESHOLD_VAL	65000
+#define MSM_LIMITS_LOW_THRESHOLD_OFFSET 500
+#define MSM_LIMITS_POLLING_DELAY_MS	10
+#define MSM_LIMITS_CLUSTER_0_REQ	0x179C1B04
+#define MSM_LIMITS_CLUSTER_1_REQ	0x179C3B04
+#define MSM_LIMITS_CLUSTER_0_INT_CLR	0x179CE808
+#define MSM_LIMITS_CLUSTER_1_INT_CLR	0x179CC808
+#define dcvsh_get_frequency(_val, _max) do { \
+	_max = (_val) & 0x3FF; \
+	_max *= 19200; \
+} while (0)
+#define FREQ_KHZ_TO_HZ(_val) ((_val) * 1000)
+#define FREQ_HZ_TO_KHZ(_val) ((_val) / 1000)
+
+enum lmh_hw_trips {
+	LIMITS_TRIP_LO,
+	LIMITS_TRIP_HI,
+	LIMITS_TRIP_MAX,
+};
+
+struct msm_lmh_dcvs_hw {
+	char sensor_name[THERMAL_NAME_LENGTH];
+	uint32_t affinity;
+	uint32_t temp_limits[LIMITS_TRIP_MAX];
+	struct sensor_threshold default_lo, default_hi;
+	struct thermal_cooling_device *cdev;
+	int irq_num;
+	void *osm_hw_reg;
+	void *int_clr_reg;
+	cpumask_t core_map;
+	struct timer_list poll_timer;
+	uint32_t max_freq;
+	uint32_t hw_freq_limit;
+	struct list_head list;
+	DECLARE_BITMAP(is_irq_enabled, 1);
+};
+
+LIST_HEAD(lmh_dcvs_hw_list);
+
+static void msm_lmh_dcvs_get_max_freq(uint32_t cpu, uint32_t *max_freq)
+{
+	unsigned long freq_ceil = UINT_MAX;
+	struct device *cpu_dev = NULL;
+
+	cpu_dev = get_cpu_device(cpu);
+	if (!cpu_dev) {
+		pr_err("Error in get CPU%d device\n", cpu);
+		return;
+	}
+
+	rcu_read_lock();
+	dev_pm_opp_find_freq_floor(cpu_dev, &freq_ceil);
+	rcu_read_unlock();
+	*max_freq = freq_ceil/1000;
+}
+
+static uint32_t msm_lmh_mitigation_notify(struct msm_lmh_dcvs_hw *hw)
+{
+	uint32_t val = 0;
+	struct device *cpu_dev = NULL;
+	unsigned long freq_val, max_limit = 0;
+	struct dev_pm_opp *opp_entry;
+
+	val = readl_relaxed(hw->osm_hw_reg);
+	dcvsh_get_frequency(val, max_limit);
+	cpu_dev = get_cpu_device(cpumask_first(&hw->core_map));
+	if (!cpu_dev) {
+		pr_err("Error in get CPU%d device\n",
+			cpumask_first(&hw->core_map));
+		goto notify_exit;
+	}
+
+	freq_val = FREQ_KHZ_TO_HZ(max_limit);
+	rcu_read_lock();
+	opp_entry = dev_pm_opp_find_freq_floor(cpu_dev, &freq_val);
+	/*
+	 * Hardware mitigation frequency can be lower than the lowest
+	 * possible CPU frequency. In that case freq floor call will
+	 * fail with -ERANGE and we need to match to the lowest
+	 * frequency using freq_ceil.
+	 */
+	if (IS_ERR(opp_entry) && PTR_ERR(opp_entry) == -ERANGE) {
+		opp_entry = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_val);
+		if (IS_ERR(opp_entry))
+			dev_err(cpu_dev, "frequency:%lu. opp error:%ld\n",
+					freq_val, PTR_ERR(opp_entry));
+	}
+	rcu_read_unlock();
+	max_limit = FREQ_HZ_TO_KHZ(freq_val);
+
+	sched_update_cpu_freq_min_max(&hw->core_map, 0, max_limit);
+	trace_lmh_dcvs_freq(cpumask_first(&hw->core_map), max_limit);
+
+notify_exit:
+	hw->hw_freq_limit = max_limit;
+	return max_limit;
+}
+
+static void msm_lmh_dcvs_poll(unsigned long data)
+{
+	uint32_t max_limit = 0;
+	struct msm_lmh_dcvs_hw *hw = (struct msm_lmh_dcvs_hw *)data;
+
+	if (hw->max_freq == UINT_MAX)
+		msm_lmh_dcvs_get_max_freq(cpumask_first(&hw->core_map),
+			&hw->max_freq);
+	max_limit = msm_lmh_mitigation_notify(hw);
+	if (max_limit >= hw->max_freq) {
+		del_timer(&hw->poll_timer);
+		writel_relaxed(0xFF, hw->int_clr_reg);
+		set_bit(1, hw->is_irq_enabled);
+		enable_irq(hw->irq_num);
+	} else {
+		mod_timer(&hw->poll_timer, jiffies + msecs_to_jiffies(
+			MSM_LIMITS_POLLING_DELAY_MS));
+	}
+}
+
+static void lmh_dcvs_notify(struct msm_lmh_dcvs_hw *hw)
+{
+	if (test_and_clear_bit(1, hw->is_irq_enabled)) {
+		disable_irq_nosync(hw->irq_num);
+		msm_lmh_mitigation_notify(hw);
+		mod_timer(&hw->poll_timer, jiffies + msecs_to_jiffies(
+			MSM_LIMITS_POLLING_DELAY_MS));
+	}
+}
+
+static irqreturn_t lmh_dcvs_handle_isr(int irq, void *data)
+{
+	struct msm_lmh_dcvs_hw *hw = data;
+
+	lmh_dcvs_notify(hw);
+	return IRQ_HANDLED;
+}
+
+static int msm_lmh_dcvs_write(uint32_t node_id, uint32_t fn,
+		uint32_t setting, uint32_t val)
+{
+	int ret;
+	struct scm_desc desc_arg;
+	uint32_t *payload = NULL;
+
+	payload = kzalloc(sizeof(uint32_t) * 5, GFP_KERNEL);
+	if (!payload)
+		return -ENOMEM;
+
+	payload[0] = fn; /* algorithm */
+	payload[1] = 0; /* unused sub-algorithm */
+	payload[2] = setting;
+	payload[3] = 1; /* number of values */
+	payload[4] = val;
+
+	desc_arg.args[0] = SCM_BUFFER_PHYS(payload);
+	desc_arg.args[1] = sizeof(uint32_t) * 5;
+	desc_arg.args[2] = MSM_LIMITS_NODE_DCVS;
+	desc_arg.args[3] = node_id;
+	desc_arg.args[4] = 0; /* version */
+	desc_arg.arginfo = SCM_ARGS(5, SCM_RO, SCM_VAL, SCM_VAL,
+					SCM_VAL, SCM_VAL);
+
+	dmac_flush_range(payload, (void *)payload + 5 * (sizeof(uint32_t)));
+	ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH, MSM_LIMITS_DCVSH), &desc_arg);
+
+	kfree(payload);
+	return ret;
+}
+
+static int lmh_get_trip_type(struct thermal_zone_device *dev,
+				int trip, enum thermal_trip_type *type)
+{
+	switch (trip) {
+	case LIMITS_TRIP_LO:
+		*type = THERMAL_TRIP_CONFIGURABLE_LOW;
+		break;
+	case LIMITS_TRIP_HI:
+		*type = THERMAL_TRIP_CONFIGURABLE_HI;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int lmh_activate_trip(struct thermal_zone_device *dev,
+		int trip, enum thermal_trip_activation_mode mode)
+{
+	struct msm_lmh_dcvs_hw *hw = dev->devdata;
+	uint32_t enable, temp;
+	int ret = 0;
+
+	enable = (mode == THERMAL_TRIP_ACTIVATION_ENABLED) ? 1 : 0;
+	if (!enable) {
+		pr_info("%s: disable not supported\n", __func__);
+		return 0;
+	}
+
+	/* Sanity check limits before writing to the hardware */
+	if (hw->temp_limits[LIMITS_TRIP_LO] >=
+			hw->temp_limits[LIMITS_TRIP_HI])
+		return -EINVAL;
+
+	temp = hw->temp_limits[trip];
+	switch (trip) {
+	case LIMITS_TRIP_LO:
+		ret =  msm_lmh_dcvs_write(hw->affinity,
+				MSM_LIMITS_SUB_FN_THERMAL,
+				MSM_LIMITS_ARM_THRESHOLD, temp);
+		break;
+	case LIMITS_TRIP_HI:
+		/*
+		 * The high threshold should be atleast greater than the
+		 * low threshold offset
+		 */
+		if (temp < MSM_LIMITS_LOW_THRESHOLD_OFFSET)
+			return -EINVAL;
+		ret =  msm_lmh_dcvs_write(hw->affinity,
+				MSM_LIMITS_SUB_FN_THERMAL,
+				MSM_LIMITS_HI_THRESHOLD, temp);
+		if (ret)
+			break;
+		ret =  msm_lmh_dcvs_write(hw->affinity,
+				MSM_LIMITS_SUB_FN_THERMAL,
+				MSM_LIMITS_LOW_THRESHOLD, temp -
+				MSM_LIMITS_LOW_THRESHOLD_OFFSET);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+static int lmh_get_trip_temp(struct thermal_zone_device *dev,
+			int trip, int *value)
+{
+	struct msm_lmh_dcvs_hw *hw = dev->devdata;
+
+	*value = hw->temp_limits[trip];
+
+	return 0;
+}
+
+static int lmh_set_trip_temp(struct thermal_zone_device *dev,
+			int trip, int value)
+{
+	struct msm_lmh_dcvs_hw *hw = dev->devdata;
+
+	if (value < 0) {
+		pr_err("Value out of range :%d\n", value);
+		return -EINVAL;
+	}
+
+	hw->temp_limits[trip] = (uint32_t)value;
+
+	return 0;
+}
+
+static struct thermal_zone_device_ops limits_sensor_ops = {
+	.get_trip_type		= lmh_get_trip_type,
+	.activate_trip_type	= lmh_activate_trip,
+	.get_trip_temp		= lmh_get_trip_temp,
+	.set_trip_temp		= lmh_set_trip_temp,
+};
+
+static int trip_notify(enum thermal_trip_type type, int temp, void *data)
+{
+	return 0;
+}
+
+static struct msm_lmh_dcvs_hw *get_dcvsh_hw_from_cpu(int cpu)
+{
+	struct msm_lmh_dcvs_hw *hw;
+
+	list_for_each_entry(hw, &lmh_dcvs_hw_list, list) {
+		if (cpumask_test_cpu(cpu, &hw->core_map))
+			return hw;
+	}
+
+	return NULL;
+}
+
+static int lmh_set_max_limit(int cpu, u32 freq)
+{
+	struct msm_lmh_dcvs_hw *hw = get_dcvsh_hw_from_cpu(cpu);
+
+	if (!hw)
+		return -EINVAL;
+
+	return msm_lmh_dcvs_write(hw->affinity, MSM_LIMITS_SUB_FN_GENERAL,
+				MSM_LIMITS_DOMAIN_MAX, freq);
+}
+
+static int lmh_get_cur_limit(int cpu, unsigned long *freq)
+{
+	struct msm_lmh_dcvs_hw *hw = get_dcvsh_hw_from_cpu(cpu);
+
+	if (!hw)
+		return -EINVAL;
+	*freq = hw->hw_freq_limit;
+
+	return 0;
+}
+
+static struct cpu_cooling_ops cd_ops = {
+	.get_cur_state = lmh_get_cur_limit,
+	.ceil_limit = lmh_set_max_limit,
+};
+
+int msm_lmh_dcvsh_sw_notify(int cpu)
+{
+	struct msm_lmh_dcvs_hw *hw = get_dcvsh_hw_from_cpu(cpu);
+
+	if (!hw)
+		return -EINVAL;
+
+	lmh_dcvs_notify(hw);
+	return 0;
+}
+
+static int __ref lmh_dcvs_cpu_callback(struct notifier_block *nfb,
+		unsigned long action, void *hcpu)
+{
+	uint32_t cpu = (uintptr_t)hcpu;
+	struct msm_lmh_dcvs_hw *hw = get_dcvsh_hw_from_cpu(cpu);
+
+	if (!hw || hw->cdev)
+		return NOTIFY_OK;
+
+	switch (action & ~CPU_TASKS_FROZEN) {
+	case CPU_ONLINE:
+		hw->cdev = cpufreq_platform_cooling_register(&hw->core_map,
+				&cd_ops);
+		if (IS_ERR_OR_NULL(hw->cdev))
+			hw->cdev = NULL;
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block __refdata lmh_dcvs_cpu_notifier = {
+	.notifier_call = lmh_dcvs_cpu_callback,
+};
+
+static int msm_lmh_dcvs_probe(struct platform_device *pdev)
+{
+	int ret;
+	int affinity = -1;
+	struct msm_lmh_dcvs_hw *hw;
+	struct thermal_zone_device *tzdev;
+	struct device_node *dn = pdev->dev.of_node;
+	struct device_node *cpu_node, *lmh_node;
+	uint32_t id, max_freq, request_reg, clear_reg;
+	int cpu;
+	cpumask_t mask = { CPU_BITS_NONE };
+
+	for_each_possible_cpu(cpu) {
+		cpu_node = of_cpu_device_node_get(cpu);
+		if (!cpu_node)
+			continue;
+		lmh_node = of_parse_phandle(cpu_node, "qcom,lmh-dcvs", 0);
+		if (lmh_node == dn) {
+			affinity = MPIDR_AFFINITY_LEVEL(
+					cpu_logical_map(cpu), 1);
+			/*set the cpumask*/
+			cpumask_set_cpu(cpu, &(mask));
+		}
+		of_node_put(cpu_node);
+		of_node_put(lmh_node);
+	}
+
+	/*
+	 * We return error if none of the CPUs have
+	 * reference to our LMH node
+	 */
+	if (affinity == -1)
+		return -EINVAL;
+
+	msm_lmh_dcvs_get_max_freq(cpumask_first(&mask), &max_freq);
+	hw = devm_kzalloc(&pdev->dev, sizeof(*hw), GFP_KERNEL);
+	if (!hw)
+		return -ENOMEM;
+
+	cpumask_copy(&hw->core_map, &mask);
+	switch (affinity) {
+	case 0:
+		hw->affinity = MSM_LIMITS_CLUSTER_0;
+		break;
+	case 1:
+		hw->affinity = MSM_LIMITS_CLUSTER_1;
+		break;
+	default:
+		return -EINVAL;
+	};
+
+	/* Enable the thermal algorithm early */
+	ret = msm_lmh_dcvs_write(hw->affinity, MSM_LIMITS_SUB_FN_THERMAL,
+		 MSM_LIMITS_ALGO_MODE_ENABLE, 1);
+	if (ret)
+		return ret;
+
+	hw->default_lo.temp = MSM_LIMITS_ARM_THRESHOLD_VAL;
+	hw->default_lo.trip = THERMAL_TRIP_CONFIGURABLE_LOW;
+	hw->default_lo.notify = trip_notify;
+
+	hw->default_hi.temp = MSM_LIMITS_HIGH_THRESHOLD_VAL;
+	hw->default_hi.trip = THERMAL_TRIP_CONFIGURABLE_HI;
+	hw->default_hi.notify = trip_notify;
+
+	/*
+	 * Setup virtual thermal zones for each LMH-DCVS hardware
+	 * The sensor does not do actual thermal temperature readings
+	 * but does support setting thresholds for trips.
+	 * Let's register with thermal framework, so we have the ability
+	 * to set low/high thresholds.
+	 */
+	snprintf(hw->sensor_name, sizeof(hw->sensor_name), "limits_sensor-%02d",
+			affinity);
+	tzdev = thermal_zone_device_register(hw->sensor_name, LIMITS_TRIP_MAX,
+			(1 << LIMITS_TRIP_MAX) - 1, hw, &limits_sensor_ops,
+			NULL, 0, 0);
+	if (IS_ERR_OR_NULL(tzdev))
+		return PTR_ERR(tzdev);
+
+	/*
+	 * Driver defaults to for low and hi thresholds.
+	 * Since we make a check for hi > lo value, set the hi threshold
+	 * before the low threshold
+	 */
+	id = sensor_get_id(hw->sensor_name);
+	if (id < 0)
+		return id;
+
+	ret = sensor_set_trip(id, &hw->default_hi);
+	if (!ret) {
+		ret = sensor_activate_trip(id, &hw->default_hi, true);
+		if (ret)
+			return ret;
+	} else {
+		return ret;
+	}
+
+	ret = sensor_set_trip(id, &hw->default_lo);
+	if (!ret) {
+		ret = sensor_activate_trip(id, &hw->default_lo, true);
+		if (ret)
+			return ret;
+	}
+
+	hw->hw_freq_limit = hw->max_freq = max_freq;
+
+	switch (affinity) {
+	case 0:
+		request_reg = MSM_LIMITS_CLUSTER_0_REQ;
+		clear_reg = MSM_LIMITS_CLUSTER_0_INT_CLR;
+		break;
+	case 1:
+		request_reg = MSM_LIMITS_CLUSTER_1_REQ;
+		clear_reg = MSM_LIMITS_CLUSTER_1_INT_CLR;
+		break;
+	default:
+		return -EINVAL;
+	};
+
+	hw->osm_hw_reg = devm_ioremap(&pdev->dev, request_reg, 0x4);
+	if (!hw->osm_hw_reg) {
+		pr_err("register remap failed\n");
+		return -ENOMEM;
+	}
+	hw->int_clr_reg = devm_ioremap(&pdev->dev, clear_reg, 0x4);
+	if (!hw->int_clr_reg) {
+		pr_err("interrupt clear reg remap failed\n");
+		return -ENOMEM;
+	}
+	init_timer_deferrable(&hw->poll_timer);
+	hw->poll_timer.data = (unsigned long)hw;
+	hw->poll_timer.function = msm_lmh_dcvs_poll;
+
+	hw->irq_num = of_irq_get(pdev->dev.of_node, 0);
+	if (hw->irq_num < 0) {
+		ret = hw->irq_num;
+		pr_err("Error getting IRQ number. err:%d\n", ret);
+		return ret;
+	}
+	set_bit(1, hw->is_irq_enabled);
+	ret = devm_request_threaded_irq(&pdev->dev, hw->irq_num, NULL,
+		lmh_dcvs_handle_isr, IRQF_TRIGGER_HIGH | IRQF_ONESHOT
+		| IRQF_NO_SUSPEND, hw->sensor_name, hw);
+	if (ret) {
+		pr_err("Error registering for irq. err:%d\n", ret);
+		return ret;
+	}
+
+	if (list_empty(&lmh_dcvs_hw_list))
+		register_cpu_notifier(&lmh_dcvs_cpu_notifier);
+
+	INIT_LIST_HEAD(&hw->list);
+	list_add(&hw->list, &lmh_dcvs_hw_list);
+
+	/* Better register explicitly for 1st CPU of each HW */
+	lmh_dcvs_cpu_callback(&lmh_dcvs_cpu_notifier, CPU_ONLINE,
+			(void *)(long)cpumask_first(&hw->core_map));
+
+	return ret;
+}
+
+static const struct of_device_id msm_lmh_dcvs_match[] = {
+	{ .compatible = "qcom,msm-hw-limits", },
+	{},
+};
+
+static struct platform_driver msm_lmh_dcvs_driver = {
+	.probe		= msm_lmh_dcvs_probe,
+	.driver		= {
+		.name = KBUILD_MODNAME,
+		.of_match_table = msm_lmh_dcvs_match,
+	},
+};
+builtin_platform_driver(msm_lmh_dcvs_driver);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/thermal/msm_thermal.c	2019-10-29 09:26:24.917215646 +0100
@@ -0,0 +1,7601 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/msm_tsens.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/msm_tsens.h>
+#include <linux/msm_thermal.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/thermal.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/msm_thermal_ioctl.h>
+#include <soc/qcom/rpm-smd.h>
+#include <soc/qcom/scm.h>
+#include <linux/debugfs.h>
+#include <linux/pm_opp.h>
+#include <linux/sched/rt.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <soc/qcom/msm-core.h>
+#include <linux/cpumask.h>
+#include <linux/suspend.h>
+#include <linux/uaccess.h>
+#include <linux/uio_driver.h>
+#include <linux/io.h>
+
+#include <asm/cacheflush.h>
+
+#define CREATE_TRACE_POINTS
+#define TRACE_MSM_THERMAL
+#include <trace/trace_thermal.h>
+
+#define MSM_LIMITS_DCVSH		0x10
+#define MSM_LIMITS_NODE_DCVS		0x44435653
+#define MSM_LIMITS_SUB_FN_GENERAL	0x47454E00
+#define MSM_LIMITS_SUB_FN_CRNT		0x43524E54
+#define MSM_LIMITS_SUB_FN_REL		0x52454C00
+#define MSM_LIMITS_DOMAIN_MAX		0x444D4158
+#define MSM_LIMITS_DOMAIN_MIN		0x444D494E
+#define MSM_LIMITS_CLUSTER_0		0x6370302D
+#define MSM_LIMITS_CLUSTER_1		0x6370312D
+#define MSM_LIMITS_ALGO_MODE_ENABLE	0x454E424C
+
+#define MAX_CURRENT_UA 100000
+#define MAX_RAILS 5
+#define TSENS_NAME_FORMAT "tsens_tz_sensor%d"
+#define THERM_SECURE_BITE_CMD 8
+#define SENSOR_SCALING_FACTOR 1
+#define MSM_THERMAL_NAME "msm_thermal"
+#define MSM_TSENS_PRINT  "log_tsens_temperature"
+#define CPU_BUF_SIZE 64
+#define CPU_DEVICE "cpu%d"
+#define MAX_DEBUGFS_CONFIG_LEN   32
+#define MSM_THERMAL_CONFIG        "config"
+#define MSM_CONFIG_DATA           "data"
+#define DEBUGFS_DISABLE_ALL_MIT   "disable"
+#define DEBUGFS_CONFIG_UPDATE     "update"
+#define MSM_THERMAL_THRESH        "thresh_degc"
+#define MSM_THERMAL_THRESH_CLR    "thresh_clr_degc"
+#define MSM_THERMAL_THRESH_UPDATE "update"
+#define DEVM_NAME_MAX 30
+#define HOTPLUG_RETRY_INTERVAL_MS 100
+#define UIO_VERSION "1.0"
+
+#define CXIP_LM_BASE_ADDRESS      0x1FE5000
+#define CXIP_LM_ADDRESS_SIZE      0x68
+#define CXIP_LM_VOTE_STATUS       0x40
+#define CXIP_LM_BYPASS            0x44
+#define CXIP_LM_VOTE_CLEAR        0x48
+#define CXIP_LM_VOTE_SET          0x4c
+#define CXIP_LM_FEATURE_EN        0x50
+#define CXIP_LM_DISABLE_VAL       0x0
+#define CXIP_LM_BYPASS_VAL        0xFF00
+#define CXIP_LM_THERM_VOTE_VAL    0x80
+#define CXIP_LM_THERM_SENS_ID     8
+#define CXIP_LM_THERM_SENS_HIGH   90
+#define CXIP_LM_THERM_SENS_LOW    75
+
+#define VALIDATE_AND_SET_MASK(_node, _key, _mask, _cpu) \
+	do { \
+		if (of_property_read_bool(_node, _key)) \
+			_mask |= BIT(_cpu); \
+	} while (0)
+
+#define THERM_CREATE_DEBUGFS_DIR(_node, _name, _parent, _ret) \
+	do { \
+		_node = debugfs_create_dir(_name, _parent); \
+		if (IS_ERR(_node)) { \
+			_ret = PTR_ERR(_node); \
+			pr_err("Error creating debugfs dir:%s. err:%d\n", \
+					_name, _ret); \
+		} \
+	} while (0)
+
+#define UPDATE_THRESHOLD_SET(_val, _trip) do {		\
+	if (_trip == THERMAL_TRIP_CONFIGURABLE_HI)	\
+		_val |= 1;				\
+	else if (_trip == THERMAL_TRIP_CONFIGURABLE_LOW)\
+		_val |= 2;				\
+} while (0)
+
+#define UPDATE_CPU_CONFIG_THRESHOLD(_mask, _id, _high, _low) \
+	do { \
+		int cpu; \
+		for_each_possible_cpu(cpu) { \
+			if (!(_mask & BIT(cpus[cpu].cpu))) \
+				continue; \
+			cpus[cpu].threshold[_id].temp = _high \
+				* tsens_scaling_factor; \
+			cpus[cpu].threshold[_id + 1].temp = _low \
+				* tsens_scaling_factor; \
+			set_and_activate_threshold( \
+				cpus[cpu].sensor_id, \
+				&cpus[cpu].threshold[_id]); \
+			set_and_activate_threshold( \
+				cpus[cpu].sensor_id, \
+				&cpus[cpu].threshold[_id + 1]); \
+		} \
+	} while (0)
+
+static struct msm_thermal_data msm_thermal_info;
+static struct delayed_work check_temp_work, retry_hotplug_work;
+static bool core_control_enabled;
+static uint32_t cpus_offlined;
+static cpumask_var_t cpus_previously_online;
+static DEFINE_MUTEX(core_control_mutex);
+static struct kobject *cc_kobj;
+static struct kobject *mx_kobj;
+static struct task_struct *hotplug_task;
+static struct task_struct *freq_mitigation_task;
+static struct task_struct *thermal_monitor_task;
+static struct completion hotplug_notify_complete;
+static struct completion freq_mitigation_complete;
+static struct completion thermal_monitor_complete;
+
+static int enabled;
+static int polling_enabled;
+static int rails_cnt;
+static int sensor_cnt;
+static int psm_rails_cnt;
+static int ocr_rail_cnt;
+static int limit_idx;
+static int limit_idx_low;
+static int limit_idx_high;
+static int max_tsens_num;
+static struct cpufreq_frequency_table *table;
+static uint32_t usefreq;
+static int freq_table_get;
+static bool vdd_rstr_enabled;
+static bool vdd_rstr_nodes_called;
+static bool vdd_rstr_probed;
+static bool sensor_info_nodes_called;
+static bool sensor_info_probed;
+static bool psm_enabled;
+static bool psm_nodes_called;
+static bool psm_probed;
+static bool freq_mitigation_enabled;
+static bool boot_freq_mitig_enabled;
+static bool ocr_enabled;
+static bool ocr_nodes_called;
+static bool ocr_probed;
+static bool ocr_reg_init_defer;
+static bool hotplug_enabled;
+static bool msm_thermal_probed;
+static bool gfx_crit_phase_ctrl_enabled;
+static bool gfx_warm_phase_ctrl_enabled;
+static bool cx_phase_ctrl_enabled;
+static bool vdd_mx_enabled;
+static bool therm_reset_enabled;
+static bool cxip_lm_enabled;
+static bool online_core;
+static bool cluster_info_probed;
+static bool cluster_info_nodes_called;
+static bool in_suspend, retry_in_progress;
+static bool lmh_dcvs_available;
+static bool lmh_dcvs_is_supported;
+static int *tsens_id_map;
+static int *zone_id_tsens_map;
+static DEFINE_MUTEX(vdd_rstr_mutex);
+static DEFINE_MUTEX(psm_mutex);
+static DEFINE_MUTEX(cx_mutex);
+static DEFINE_MUTEX(gfx_mutex);
+static DEFINE_MUTEX(ocr_mutex);
+static DEFINE_MUTEX(vdd_mx_mutex);
+static DEFINE_MUTEX(threshold_mutex);
+static uint32_t curr_gfx_band;
+static uint32_t curr_cx_band;
+static struct kobj_attribute cx_mode_attr;
+static struct kobj_attribute gfx_mode_attr;
+static struct kobj_attribute mx_enabled_attr;
+static struct attribute_group cx_attr_gp;
+static struct attribute_group gfx_attr_gp;
+static struct attribute_group mx_attr_group;
+static struct regulator *vdd_mx, *vdd_cx;
+static int *tsens_temp_at_panic;
+static bool tsens_temp_print;
+static uint32_t bucket;
+static cpumask_t throttling_mask;
+static int tsens_scaling_factor = SENSOR_SCALING_FACTOR;
+static void *cxip_lm_reg_base;
+
+static LIST_HEAD(devices_list);
+static LIST_HEAD(thresholds_list);
+static int mitigation = 1;
+
+enum thermal_threshold {
+	HOTPLUG_THRESHOLD_HIGH,
+	HOTPLUG_THRESHOLD_LOW,
+	FREQ_THRESHOLD_HIGH,
+	FREQ_THRESHOLD_LOW,
+	THRESHOLD_MAX_NR,
+};
+
+struct cluster_info {
+	int cluster_id;
+	uint32_t entity_count;
+	struct cluster_info *child_entity_ptr;
+	struct cluster_info *parent_ptr;
+	struct cpufreq_frequency_table *freq_table;
+	int freq_idx;
+	int freq_idx_low;
+	int freq_idx_high;
+	struct cpumask cluster_cores;
+	uint32_t limited_max_freq;
+	uint32_t limited_min_freq;
+};
+
+struct cpu_info {
+	uint32_t cpu;
+	const char *sensor_type;
+	enum sensor_id_type id_type;
+	uint32_t sensor_id;
+	bool offline;
+	bool user_offline;
+	bool hotplug_thresh_clear;
+	struct sensor_threshold threshold[THRESHOLD_MAX_NR];
+	bool max_freq;
+	uint32_t user_max_freq;
+	uint32_t shutdown_max_freq;
+	uint32_t suspend_max_freq;
+	uint32_t vdd_max_freq;
+	uint32_t user_min_freq;
+	uint32_t limited_max_freq;
+	uint32_t limited_min_freq;
+	bool freq_thresh_clear;
+	struct cluster_info *parent_ptr;
+};
+
+struct rail {
+	const char			*name;
+	uint32_t			freq_req;
+	uint32_t			min_level;
+	uint32_t			num_levels;
+	int32_t				curr_level;
+	uint32_t			levels[3];
+	struct kobj_attribute		value_attr;
+	struct kobj_attribute		level_attr;
+	struct regulator		*reg;
+	struct attribute_group		attr_gp;
+	uint32_t			max_frequency_limit;
+	struct device_clnt_data		*device_handle[NR_CPUS];
+	union device_request		request[NR_CPUS];
+};
+
+struct msm_sensor_info {
+	const char *name;
+	const char *alias;
+	const char *type;
+	uint32_t scaling_factor;
+};
+
+struct psm_rail {
+	const char *name;
+	uint8_t init;
+	uint8_t mode;
+	struct kobj_attribute mode_attr;
+	struct rpm_regulator *reg;
+	struct regulator *phase_reg;
+	struct attribute_group attr_gp;
+};
+
+struct devmgr_devices {
+	struct device_manager_data *hotplug_dev;
+	struct device_manager_data *cpufreq_dev[NR_CPUS];
+};
+
+enum msm_thresh_list {
+	MSM_THERM_RESET,
+	MSM_VDD_RESTRICTION,
+	MSM_CX_PHASE_CTRL_HOT,
+	MSM_GFX_PHASE_CTRL_WARM,
+	MSM_GFX_PHASE_CTRL_HOT,
+	MSM_OCR,
+	MSM_VDD_MX_RESTRICTION,
+	MSM_THERM_CXIP_LM,
+	MSM_LIST_MAX_NR,
+};
+
+enum msm_thermal_phase_ctrl {
+	MSM_CX_PHASE_CTRL,
+	MSM_GFX_PHASE_CTRL,
+	MSM_PHASE_CTRL_NR,
+};
+
+enum msm_temp_band {
+	MSM_COLD_CRITICAL = 1,
+	MSM_COLD,
+	MSM_COOL,
+	MSM_NORMAL,
+	MSM_WARM,
+	MSM_HOT,
+	MSM_HOT_CRITICAL,
+	MSM_TEMP_MAX_NR,
+};
+
+enum cpu_mit_type {
+	CPU_FREQ_MITIGATION    = 0x1,
+	CPU_HOTPLUG_MITIGATION = 0x2,
+};
+
+enum cpu_config {
+	HOTPLUG_CONFIG,
+	CPUFREQ_CONFIG,
+	MAX_CPU_CONFIG
+};
+
+enum freq_limits {
+	FREQ_LIMIT_MIN = 0x1,
+	FREQ_LIMIT_MAX = 0x2,
+	FREQ_LIMIT_ALL = 0x3,
+};
+
+struct msm_thermal_debugfs_thresh_config {
+	char config_name[MAX_DEBUGFS_CONFIG_LEN];
+	long thresh;
+	long thresh_clr;
+	bool update;
+	void (*disable_config)(void);
+	struct dentry *dbg_config;
+	struct dentry *dbg_thresh;
+	struct dentry *dbg_thresh_clr;
+	struct dentry *dbg_thresh_update;
+};
+
+struct msm_thermal_debugfs_entry {
+	struct dentry *parent;
+	struct dentry *tsens_print;
+	struct dentry *config;
+	struct dentry *config_data;
+};
+
+static struct psm_rail *psm_rails;
+static struct psm_rail *ocr_rails;
+static struct rail *rails;
+static struct msm_sensor_info *sensors;
+static struct cpu_info cpus[NR_CPUS];
+static struct threshold_info *thresh;
+static bool mx_restr_applied;
+static struct cluster_info *core_ptr;
+static struct msm_thermal_debugfs_entry *msm_therm_debugfs;
+static struct devmgr_devices *devices;
+static struct msm_thermal_debugfs_thresh_config *mit_config;
+
+struct vdd_rstr_enable {
+	struct kobj_attribute ko_attr;
+	uint32_t enabled;
+};
+
+/* For SMPS only*/
+enum PMIC_SW_MODE {
+	PMIC_AUTO_MODE  = RPM_REGULATOR_MODE_AUTO,
+	PMIC_IPEAK_MODE = RPM_REGULATOR_MODE_IPEAK,
+	PMIC_PWM_MODE   = RPM_REGULATOR_MODE_HPM,
+};
+
+enum ocr_request {
+	OPTIMUM_CURRENT_MIN,
+	OPTIMUM_CURRENT_MAX,
+	OPTIMUM_CURRENT_NR,
+};
+
+static int thermal_config_debugfs_read(struct seq_file *m, void *data);
+static ssize_t thermal_config_debugfs_write(struct file *file,
+					const char __user *buffer,
+					size_t count, loff_t *ppos);
+
+#define VDD_RES_RO_ATTRIB(_rail, ko_attr, j, _name) \
+	ko_attr.attr.name = __stringify(_name); \
+	ko_attr.attr.mode = 0444; \
+	ko_attr.show = vdd_rstr_reg_##_name##_show; \
+	ko_attr.store = NULL; \
+	sysfs_attr_init(&ko_attr.attr); \
+	_rail.attr_gp.attrs[j] = &ko_attr.attr;
+
+#define VDD_RES_RW_ATTRIB(_rail, ko_attr, j, _name) \
+	ko_attr.attr.name = __stringify(_name); \
+	ko_attr.attr.mode = 0644; \
+	ko_attr.show = vdd_rstr_reg_##_name##_show; \
+	ko_attr.store = vdd_rstr_reg_##_name##_store; \
+	sysfs_attr_init(&ko_attr.attr); \
+	_rail.attr_gp.attrs[j] = &ko_attr.attr;
+
+#define VDD_RSTR_ENABLE_FROM_ATTRIBS(attr) \
+	(container_of(attr, struct vdd_rstr_enable, ko_attr));
+
+#define VDD_RSTR_REG_VALUE_FROM_ATTRIBS(attr) \
+	(container_of(attr, struct rail, value_attr));
+
+#define VDD_RSTR_REG_LEVEL_FROM_ATTRIBS(attr) \
+	(container_of(attr, struct rail, level_attr));
+
+#define OCR_RW_ATTRIB(_rail, ko_attr, j, _name) \
+	ko_attr.attr.name = __stringify(_name); \
+	ko_attr.attr.mode = 0644; \
+	ko_attr.show = ocr_reg_##_name##_show; \
+	ko_attr.store = ocr_reg_##_name##_store; \
+	sysfs_attr_init(&ko_attr.attr); \
+	_rail.attr_gp.attrs[j] = &ko_attr.attr;
+
+#define PSM_RW_ATTRIB(_rail, ko_attr, j, _name) \
+	ko_attr.attr.name = __stringify(_name); \
+	ko_attr.attr.mode = 0644; \
+	ko_attr.show = psm_reg_##_name##_show; \
+	ko_attr.store = psm_reg_##_name##_store; \
+	sysfs_attr_init(&ko_attr.attr); \
+	_rail.attr_gp.attrs[j] = &ko_attr.attr;
+
+#define PSM_REG_MODE_FROM_ATTRIBS(attr) \
+	(container_of(attr, struct psm_rail, mode_attr));
+
+#define PHASE_RW_ATTR(_phase, _name, _attr, j, _attr_gr) \
+	_attr.attr.name = __stringify(_name); \
+	_attr.attr.mode = 0644; \
+	_attr.show = _phase##_phase_show; \
+	_attr.store = _phase##_phase_store; \
+	sysfs_attr_init(&_attr.attr); \
+	_attr_gr.attrs[j] = &_attr.attr;
+
+#define MX_RW_ATTR(ko_attr, _name, _attr_gp) \
+	ko_attr.attr.name = __stringify(_name); \
+	ko_attr.attr.mode = 0644; \
+	ko_attr.show = show_mx_##_name; \
+	ko_attr.store = store_mx_##_name; \
+	sysfs_attr_init(&ko_attr.attr); \
+	_attr_gp.attrs[0] = &ko_attr.attr;
+
+#define THERM_MITIGATION_DISABLE(_flag, _id) \
+	do { \
+		if (!_flag) \
+			return; \
+		if (_id >= 0) \
+			sensor_mgr_disable_threshold( \
+				&thresh[_id]); \
+		_flag = 0; \
+	} while (0)
+
+#define APPLY_VDD_RESTRICTION(vdd, level, name, ret)                   \
+	do {                                                              \
+		ret = regulator_set_voltage(vdd, level, INT_MAX);         \
+		if (ret) {                                                \
+			pr_err("Failed to vote %s to level %d, err %d\n", \
+			 #name, level, ret);                              \
+		} else {                                                  \
+			ret = regulator_enable(vdd);                      \
+			if (ret)                                          \
+				pr_err("Failed to enable %s, err %d\n",   \
+					#name, ret);                      \
+			else                                              \
+				pr_debug("Vote %s with level %d\n",       \
+					#name, level);                    \
+		}                                                         \
+	} while (0)
+
+#define REMOVE_VDD_RESTRICTION(vdd, name, ret)                             \
+	do {                                                                  \
+		ret = regulator_disable(vdd);                                 \
+		if (ret) {                                                    \
+			pr_err("Failed to disable %s, error %d\n",            \
+				#name, ret);                                  \
+		} else {                                                      \
+			ret = regulator_set_voltage(vdd, 0, INT_MAX);      \
+			if (ret)                                              \
+				pr_err("Failed to remove %s vote, error %d\n",\
+					#name, ret);                          \
+			else                                                  \
+				pr_debug("Remove voting to %s\n", #name);     \
+		}                                                             \
+	} while (0)
+
+#define CXIP_LM_CLIENTS_STATUS()                                        \
+	readl_relaxed(cxip_lm_reg_base + CXIP_LM_VOTE_STATUS)
+
+static void uio_init(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct uio_info *uio_reg_info = NULL;
+	struct resource *clnt_res = NULL;
+	u32 mem_size = 0;
+	phys_addr_t mem_pyhsical = 0;
+
+	clnt_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!clnt_res) {
+		pr_debug("resource not found\n");
+		goto exit;
+	}
+	mem_size = resource_size(clnt_res);
+	if (mem_size == 0) {
+		pr_err("resource memory size is zero\n");
+		goto exit;
+	}
+
+	uio_reg_info = devm_kzalloc(&pdev->dev, sizeof(struct uio_info),
+			GFP_KERNEL);
+	if (!uio_reg_info)
+		goto exit;
+
+	mem_pyhsical = clnt_res->start;
+
+	/* Setup device */
+	uio_reg_info->name = clnt_res->name;
+	uio_reg_info->version = UIO_VERSION;
+	uio_reg_info->mem[0].addr = mem_pyhsical;
+	uio_reg_info->mem[0].size = mem_size;
+	uio_reg_info->mem[0].memtype = UIO_MEM_PHYS;
+
+	ret = uio_register_device(&pdev->dev, uio_reg_info);
+	if (ret) {
+		devm_kfree(&pdev->dev, uio_reg_info);
+		pr_err("uio register failed ret=%d\n", ret);
+		goto exit;
+	}
+	dev_set_drvdata(&pdev->dev, uio_reg_info);
+
+exit:
+	return;
+}
+
+static void get_cluster_mask(uint32_t cpu, cpumask_t *mask)
+{
+	int i;
+
+	cpumask_set_cpu(cpu, mask);
+	if (core_ptr) {
+		for (i = 0; i < core_ptr->entity_count; i++) {
+			struct cluster_info *cluster_ptr =
+				&core_ptr->child_entity_ptr[i];
+			if (*cluster_ptr->cluster_cores.bits & BIT(cpu)) {
+				cpumask_copy(mask,
+					&cluster_ptr->cluster_cores);
+				break;
+			}
+		}
+	}
+}
+
+static uint32_t get_core_max_freq(uint32_t cpu)
+{
+	int i;
+	uint32_t max_freq = 0;
+
+	if (core_ptr) {
+		for (i = 0; i < core_ptr->entity_count; i++) {
+			struct cluster_info *cluster_ptr =
+				&core_ptr->child_entity_ptr[i];
+			if (*cluster_ptr->cluster_cores.bits & BIT(cpu)) {
+				if (cluster_ptr->freq_table)
+					max_freq =
+					cluster_ptr->freq_table
+					[cluster_ptr->freq_idx_high].frequency;
+				break;
+			}
+		}
+	} else {
+		if (table)
+			max_freq = table[limit_idx_high].frequency;
+	}
+
+	return max_freq;
+}
+
+static void cpus_previously_online_update(void)
+{
+	get_online_cpus();
+	cpumask_or(cpus_previously_online, cpus_previously_online,
+		   cpu_online_mask);
+	put_online_cpus();
+	pr_debug("%*pb\n", cpumask_pr_args(cpus_previously_online));
+}
+
+static uint32_t get_core_min_freq(uint32_t cpu)
+{
+	int i;
+	uint32_t min_freq = UINT_MAX;
+
+	if (core_ptr) {
+		for (i = 0; i < core_ptr->entity_count; i++) {
+			struct cluster_info *cluster_ptr =
+				&core_ptr->child_entity_ptr[i];
+			if (*cluster_ptr->cluster_cores.bits & BIT(cpu)) {
+				if (cluster_ptr->freq_table)
+					min_freq =
+					cluster_ptr->freq_table[0].frequency;
+				break;
+			}
+		}
+	} else {
+		if (table)
+			min_freq = table[0].frequency;
+	}
+
+	return min_freq;
+}
+
+static void msm_thermal_update_freq(bool is_shutdown, bool mitigate)
+{
+	uint32_t cpu;
+	bool update = false;
+
+	for_each_possible_cpu(cpu) {
+		if (msm_thermal_info.freq_mitig_control_mask
+			& BIT(cpu)) {
+			uint32_t *freq = (is_shutdown)
+				? &cpus[cpu].shutdown_max_freq
+				: &cpus[cpu].suspend_max_freq;
+			uint32_t mitigation_freq = (mitigate) ?
+				get_core_min_freq(cpu) : UINT_MAX;
+
+			if (*freq == mitigation_freq)
+				continue;
+			*freq = mitigation_freq;
+			update = true;
+			pr_debug("%s mitigate CPU%u to %u\n",
+				(is_shutdown) ? "Shutdown" : "Suspend", cpu,
+				mitigation_freq);
+		}
+	}
+
+	if (!update)
+		goto notify_exit;
+
+	if (freq_mitigation_task)
+		complete(&freq_mitigation_complete);
+	else
+		pr_err("Freq mitigation task is not initialized\n");
+notify_exit:
+	return;
+}
+
+static int msm_thermal_power_down_callback(
+		struct notifier_block *nfb, unsigned long action, void *data)
+{
+
+	switch (action) {
+	case SYS_RESTART:
+	case SYS_POWER_OFF:
+	case SYS_HALT:
+		msm_thermal_update_freq(true, true);
+		break;
+
+	default:
+		return NOTIFY_DONE;
+	}
+
+	return NOTIFY_OK;
+}
+
+static int msm_thermal_suspend_callback(
+		struct notifier_block *nfb, unsigned long action, void *data)
+{
+	switch (action) {
+	case PM_HIBERNATION_PREPARE:
+	case PM_SUSPEND_PREPARE:
+		msm_thermal_update_freq(false, true);
+		in_suspend = true;
+		retry_in_progress = false;
+		cancel_delayed_work_sync(&retry_hotplug_work);
+		break;
+
+	case PM_POST_HIBERNATION:
+	case PM_POST_SUSPEND:
+		msm_thermal_update_freq(false, false);
+		in_suspend = false;
+		if (hotplug_task)
+			complete(&hotplug_notify_complete);
+		else
+			pr_debug("Hotplug task not initialized\n");
+		break;
+
+	default:
+		return NOTIFY_DONE;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block msm_thermal_reboot_notifier = {
+	.notifier_call = msm_thermal_power_down_callback,
+};
+
+static struct device_manager_data *find_device_by_name(const char *device_name)
+{
+	struct device_manager_data *dev_mgr = NULL;
+
+	list_for_each_entry(dev_mgr, &devices_list, dev_ptr) {
+		if (strcmp(dev_mgr->device_name, device_name) == 0)
+			return dev_mgr;
+	}
+
+	return NULL;
+}
+
+static int validate_client(struct device_clnt_data *clnt)
+{
+	int ret = 0;
+	struct device_manager_data *dev_mgr = NULL;
+	struct device_clnt_data *client_ptr = NULL;
+
+	if (!clnt || !clnt->dev_mgr) {
+		pr_err("Invalid client\n");
+		ret = -EINVAL;
+		goto validate_exit;
+	}
+
+	list_for_each_entry(dev_mgr, &devices_list, dev_ptr) {
+		if (dev_mgr == clnt->dev_mgr)
+			break;
+	}
+	if (dev_mgr != clnt->dev_mgr) {
+		pr_err("Invalid device manager\n");
+		ret = -EINVAL;
+		goto validate_exit;
+	}
+
+	mutex_lock(&dev_mgr->clnt_lock);
+	list_for_each_entry(client_ptr, &dev_mgr->client_list, clnt_ptr) {
+		if (clnt == client_ptr)
+			break;
+	}
+	if (clnt != client_ptr) {
+		pr_err("Invalid client\n");
+		ret = -EINVAL;
+		goto validate_unlock;
+	}
+validate_unlock:
+	mutex_unlock(&dev_mgr->clnt_lock);
+
+validate_exit:
+	return ret;
+}
+
+static int devmgr_client_cpufreq_update(struct device_manager_data *dev_mgr)
+{
+	int ret = 0;
+	struct device_clnt_data *clnt = NULL;
+	uint32_t max_freq = UINT_MAX;
+	uint32_t min_freq = 0;
+
+	mutex_lock(&dev_mgr->clnt_lock);
+	list_for_each_entry(clnt, &dev_mgr->client_list, clnt_ptr) {
+		if (!clnt->req_active)
+			continue;
+		max_freq = min(max_freq, clnt->request.freq.max_freq);
+		min_freq = max(min_freq, clnt->request.freq.min_freq);
+	}
+	if (dev_mgr->active_req.freq.max_freq == max_freq &&
+		dev_mgr->active_req.freq.min_freq == min_freq) {
+		goto update_exit;
+	}
+	dev_mgr->active_req.freq.max_freq = max_freq;
+	dev_mgr->active_req.freq.min_freq = min_freq;
+
+	if (freq_mitigation_task) {
+		complete(&freq_mitigation_complete);
+	} else {
+		pr_err("Frequency mitigation task is not initialized\n");
+		ret = -ESRCH;
+	}
+
+update_exit:
+	mutex_unlock(&dev_mgr->clnt_lock);
+	return ret;
+}
+
+static int devmgr_client_hotplug_update(struct device_manager_data *dev_mgr)
+{
+	int ret = 0;
+	struct device_clnt_data *clnt = NULL;
+	cpumask_t offline_mask = CPU_MASK_NONE;
+
+	mutex_lock(&dev_mgr->clnt_lock);
+	list_for_each_entry(clnt, &dev_mgr->client_list, clnt_ptr) {
+		if (!clnt->req_active)
+			continue;
+		cpumask_or(&offline_mask, &offline_mask,
+				&clnt->request.offline_mask);
+	}
+	if (cpumask_equal(&dev_mgr->active_req.offline_mask, &offline_mask))
+		goto update_exit;
+
+	cpumask_copy(&dev_mgr->active_req.offline_mask, &offline_mask);
+
+	if (hotplug_task) {
+		complete(&hotplug_notify_complete);
+	} else {
+		pr_err("Hotplug task is not initialized\n");
+		ret = -ESRCH;
+	}
+
+update_exit:
+	mutex_unlock(&dev_mgr->clnt_lock);
+	return ret;
+}
+
+static int devmgr_hotplug_client_request_validate_and_update(
+				struct device_clnt_data *clnt,
+				union device_request *req,
+				enum device_req_type type)
+{
+	if (type != HOTPLUG_MITIGATION_REQ)
+		return -EINVAL;
+
+	cpumask_copy(&clnt->request.offline_mask, &req->offline_mask);
+
+	if (!cpumask_empty(&req->offline_mask))
+		clnt->req_active = true;
+	else
+		clnt->req_active = false;
+
+	return 0;
+}
+
+static int devmgr_cpufreq_client_request_validate_and_update(
+						struct device_clnt_data *clnt,
+						union device_request *req,
+						enum device_req_type type)
+{
+	if (type != CPUFREQ_MITIGATION_REQ)
+		return -EINVAL;
+
+	if (req->freq.max_freq < req->freq.min_freq) {
+		pr_err("Invalid Max and Min freq req. max:%u min:%u\n",
+			req->freq.max_freq, req->freq.min_freq);
+		return -EINVAL;
+	}
+
+	clnt->request.freq.max_freq = req->freq.max_freq;
+	clnt->request.freq.min_freq = req->freq.min_freq;
+
+	if ((req->freq.max_freq == CPUFREQ_MAX_NO_MITIGATION) &&
+		(req->freq.min_freq == CPUFREQ_MIN_NO_MITIGATION))
+		clnt->req_active = false;
+	else
+		clnt->req_active = true;
+
+	return 0;
+}
+
+int devmgr_client_request_mitigation(struct device_clnt_data *clnt,
+					enum device_req_type type,
+					union device_request *req)
+{
+	int ret = 0;
+	struct device_manager_data *dev_mgr = NULL;
+
+	if (!mitigation) {
+		pr_err("Thermal Mitigations disabled.\n");
+		goto req_exit;
+	}
+
+	if (!clnt || !req) {
+		pr_err("Invalid inputs for mitigation.\n");
+		ret = -EINVAL;
+		goto req_exit;
+	}
+
+	ret = validate_client(clnt);
+	if (ret) {
+		pr_err("Invalid mitigation client. ret:%d\n", ret);
+		goto req_exit;
+	}
+
+	if (!clnt->dev_mgr->request_validate) {
+		pr_err("Invalid dev mgr request update\n");
+		ret = -EINVAL;
+		goto req_exit;
+	}
+
+	dev_mgr = clnt->dev_mgr;
+	mutex_lock(&dev_mgr->clnt_lock);
+	ret = dev_mgr->request_validate(clnt, req, type);
+	if (ret) {
+		pr_err("Invalid client request\n");
+		goto req_unlock;
+	}
+
+req_unlock:
+	mutex_unlock(&dev_mgr->clnt_lock);
+	if (!ret && dev_mgr->update)
+		dev_mgr->update(dev_mgr);
+
+req_exit:
+	return ret;
+}
+
+struct device_clnt_data *devmgr_register_mitigation_client(struct device *dev,
+				const char *device_name,
+				void (*callback)(struct device_clnt_data *,
+				union device_request *, void *))
+{
+	struct device_clnt_data *client = NULL;
+	struct device_manager_data *dev_mgr = NULL;
+
+	if (!dev || !device_name) {
+		pr_err("Invalid input\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	dev_mgr = find_device_by_name(device_name);
+	if (!dev_mgr) {
+		pr_err("Invalid device %s\n", device_name);
+		return ERR_PTR(-EINVAL);
+	}
+
+	client = devm_kzalloc(dev,
+		sizeof(struct device_clnt_data), GFP_KERNEL);
+	if (!client) {
+		pr_err("Memory alloc failed\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	mutex_lock(&dev_mgr->clnt_lock);
+	client->dev_mgr = dev_mgr;
+	client->callback = callback;
+	list_add_tail(&client->clnt_ptr, &dev_mgr->client_list);
+	mutex_unlock(&dev_mgr->clnt_lock);
+
+	return client;
+}
+
+void devmgr_unregister_mitigation_client(struct device *dev,
+					struct device_clnt_data *clnt)
+{
+	int ret = 0;
+	struct device_manager_data *dev_mgr = NULL;
+
+	if (!clnt) {
+		pr_err("Invalid input\n");
+		return;
+	}
+
+	ret = validate_client(clnt);
+	if (ret)
+		return;
+
+	dev_mgr = clnt->dev_mgr;
+	mutex_lock(&dev_mgr->clnt_lock);
+	list_del(&clnt->clnt_ptr);
+	mutex_unlock(&dev_mgr->clnt_lock);
+	devm_kfree(dev, clnt);
+	if (dev_mgr->update)
+		dev_mgr->update(dev_mgr);
+}
+
+static int  msm_thermal_cpufreq_callback(struct notifier_block *nfb,
+		unsigned long event, void *data)
+{
+	struct cpufreq_policy *policy = data;
+	uint32_t max_freq_req, min_freq_req;
+
+	switch (event) {
+	case CPUFREQ_ADJUST:
+		max_freq_req = (lmh_dcvs_is_supported) ? UINT_MAX :
+			cpus[policy->cpu].parent_ptr->limited_max_freq;
+		min_freq_req = cpus[policy->cpu].parent_ptr->limited_min_freq;
+		pr_debug("mitigating CPU%d to freq max: %u min: %u\n",
+			policy->cpu, max_freq_req, min_freq_req);
+
+		cpufreq_verify_within_limits(policy, min_freq_req,
+			max_freq_req);
+
+		if (max_freq_req < min_freq_req)
+			pr_err("Invalid frequency request Max:%u Min:%u\n",
+				max_freq_req, min_freq_req);
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block msm_thermal_cpufreq_notifier = {
+	.notifier_call = msm_thermal_cpufreq_callback,
+};
+
+static int msm_lmh_dcvs_write(uint32_t node_id, uint32_t fn, uint32_t setting,
+				uint32_t val)
+{
+	int ret;
+	struct scm_desc desc_arg;
+	uint32_t *payload = NULL;
+
+	payload = kzalloc(sizeof(uint32_t) * 5, GFP_KERNEL);
+	if (!payload)
+		return -ENOMEM;
+
+	payload[0] = fn;
+	payload[1] = 0; /* unused sub-algorithm */
+	payload[2] = setting;
+	payload[3] = 1; /* number of values */
+	payload[4] = val;
+
+	desc_arg.args[0] = SCM_BUFFER_PHYS(payload);
+	desc_arg.args[1] = sizeof(uint32_t) * 5;
+	desc_arg.args[2] = MSM_LIMITS_NODE_DCVS;
+	desc_arg.args[3] = node_id;
+	desc_arg.args[4] = 0; /* version */
+	desc_arg.arginfo = SCM_ARGS(5, SCM_RO, SCM_VAL, SCM_VAL,
+					SCM_VAL, SCM_VAL);
+
+	dmac_flush_range(payload, (void *)payload + 5 * (sizeof(uint32_t)));
+	ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH, MSM_LIMITS_DCVSH), &desc_arg);
+
+	kfree(payload);
+	return ret;
+}
+
+static int msm_lmh_dcvs_update(int cpu)
+{
+	uint32_t id = cpus[cpu].parent_ptr->cluster_id;
+	uint32_t max_freq = cpus[cpu].limited_max_freq;
+	uint32_t min_freq = cpus[cpu].limited_min_freq;
+	uint32_t affinity;
+	int ret;
+
+	/*
+	 * It is better to use max/min limits of cluster for given
+	 * cpu if cluster mitigation is supported. It ensures that it
+	 * requests aggregated max/min limits of all cpus in that cluster.
+	 */
+	if (core_ptr) {
+		max_freq = cpus[cpu].parent_ptr->limited_max_freq;
+		min_freq = cpus[cpu].parent_ptr->limited_min_freq;
+	}
+
+	switch (id) {
+	case 0:
+		affinity = MSM_LIMITS_CLUSTER_0;
+		break;
+	case 1:
+		affinity = MSM_LIMITS_CLUSTER_1;
+		break;
+	default:
+		pr_err("%s: unknown affinity %d\n", __func__, id);
+		return -EINVAL;
+	};
+
+	ret = msm_lmh_dcvs_write(affinity, MSM_LIMITS_SUB_FN_GENERAL,
+					MSM_LIMITS_DOMAIN_MAX, max_freq);
+	if (ret)
+		return ret;
+
+	ret = msm_lmh_dcvs_write(affinity, MSM_LIMITS_SUB_FN_GENERAL,
+					MSM_LIMITS_DOMAIN_MIN, min_freq);
+	if (ret)
+		return ret;
+	/*
+	 * Notify LMH dcvs driver about the new software limit. This will
+	 * trigger LMH DCVS driver polling for the mitigated frequency.
+	 */
+	msm_lmh_dcvsh_sw_notify(cpu);
+
+	return ret;
+}
+
+static void update_cpu_freq(int cpu, enum freq_limits changed)
+{
+	int ret = 0;
+	cpumask_t mask;
+
+	/*
+	 * If the limits overshoot each other, choose the min requirement
+	 * over the max freq requirement.
+	 */
+	if (cpus[cpu].limited_min_freq > cpus[cpu].limited_max_freq)
+		cpus[cpu].limited_max_freq = cpus[cpu].limited_min_freq;
+
+	get_cluster_mask(cpu, &mask);
+	if (cpu_online(cpu)) {
+		if ((cpumask_intersects(&mask, &throttling_mask))
+			&& (cpus[cpu].limited_max_freq
+				>= get_core_max_freq(cpu))) {
+			cpumask_xor(&throttling_mask, &mask, &throttling_mask);
+			set_cpu_throttled(&mask, false);
+		} else if (!cpumask_intersects(&mask, &throttling_mask)) {
+			cpumask_or(&throttling_mask, &mask, &throttling_mask);
+			set_cpu_throttled(&mask, true);
+		}
+		trace_thermal_pre_frequency_mit(cpu,
+			cpus[cpu].limited_max_freq,
+			cpus[cpu].limited_min_freq);
+
+		/*
+		 * If LMH DCVS is available, we update the hardware directly
+		 * for faster response. However, the LMH DCVS does not aggregate
+		 * min freq correctly - cpufreq could be voting for a min
+		 * freq lesser than what we desire and that would be honored.
+		 * Update cpufreq, so the min freq remains consistent in the hw.
+		 */
+		if (lmh_dcvs_available) {
+			msm_lmh_dcvs_update(cpu);
+			if (changed & FREQ_LIMIT_MIN)
+				cpufreq_update_policy(cpu);
+		} else {
+			cpufreq_update_policy(cpu);
+		}
+
+		trace_thermal_post_frequency_mit(cpu,
+			cpufreq_quick_get_max(cpu),
+			cpus[cpu].limited_min_freq);
+		if (ret)
+			pr_err("Unable to update policy for cpu:%d. err:%d\n",
+				cpu, ret);
+	} else if (lmh_dcvs_available) {
+		trace_thermal_pre_frequency_mit(cpu,
+			cpus[cpu].limited_max_freq,
+			cpus[cpu].limited_min_freq);
+		msm_lmh_dcvs_update(cpu);
+		trace_thermal_post_frequency_mit(cpu,
+			cpufreq_quick_get_max(cpu),
+			cpus[cpu].limited_min_freq);
+	}
+}
+
+static ssize_t cluster_info_show(
+	struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+	uint32_t i = 0;
+	ssize_t tot_size = 0, size = 0;
+
+	for (; i < core_ptr->entity_count; i++) {
+		struct cluster_info *cluster_ptr =
+				&core_ptr->child_entity_ptr[i];
+
+		size = snprintf(&buf[tot_size], PAGE_SIZE - tot_size,
+			"%d:%lu:1 ", cluster_ptr->cluster_id,
+			*cluster_ptr->cluster_cores.bits);
+		if ((tot_size + size) >= PAGE_SIZE) {
+			pr_err("Not enough buffer size");
+			break;
+		}
+		tot_size += size;
+	}
+
+	return tot_size;
+}
+
+static int thermal_config_debugfs_open(struct inode *inode,
+					struct file *file)
+{
+	return single_open(file, thermal_config_debugfs_read,
+				inode->i_private);
+}
+
+static const struct file_operations thermal_debugfs_config_ops = {
+	.open = thermal_config_debugfs_open,
+	.read = seq_read,
+	.write = thermal_config_debugfs_write,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int create_config_debugfs(
+		struct msm_thermal_debugfs_thresh_config *config_ptr,
+		struct dentry *parent)
+{
+	int ret = 0;
+
+	if (!strlen(config_ptr->config_name))
+		return -ENODEV;
+
+	THERM_CREATE_DEBUGFS_DIR(config_ptr->dbg_config,
+		config_ptr->config_name, parent, ret);
+	if (ret)
+		goto create_exit;
+
+	config_ptr->dbg_thresh = debugfs_create_u64(MSM_THERMAL_THRESH,
+		0600, config_ptr->dbg_config, (u64 *)&config_ptr->thresh);
+	if (IS_ERR(config_ptr->dbg_thresh)) {
+		ret = PTR_ERR(config_ptr->dbg_thresh);
+		pr_err("Error creating thresh debugfs:[%s]. error:%d\n",
+			config_ptr->config_name, ret);
+		goto create_exit;
+	}
+
+	config_ptr->dbg_thresh_clr = debugfs_create_u64(MSM_THERMAL_THRESH_CLR,
+		0600, config_ptr->dbg_config, (u64 *)&config_ptr->thresh_clr);
+	if (IS_ERR(config_ptr->dbg_thresh)) {
+		ret = PTR_ERR(config_ptr->dbg_thresh);
+		pr_err("Error creating thresh_clr debugfs:[%s]. error:%d\n",
+			config_ptr->config_name, ret);
+		goto create_exit;
+	}
+
+	config_ptr->dbg_thresh_update = debugfs_create_bool(
+		MSM_THERMAL_THRESH_UPDATE, 0600, config_ptr->dbg_config,
+		&config_ptr->update);
+	if (IS_ERR(config_ptr->dbg_thresh_update)) {
+		ret = PTR_ERR(config_ptr->dbg_thresh_update);
+		pr_err("Error creating enable debugfs:[%s]. error:%d\n",
+			config_ptr->config_name, ret);
+		goto create_exit;
+	}
+
+create_exit:
+	if (ret)
+		debugfs_remove_recursive(parent);
+
+	return ret;
+}
+
+static int create_thermal_debugfs(void)
+{
+	int ret = 0, idx = 0;
+
+	if (msm_therm_debugfs)
+		return ret;
+
+	msm_therm_debugfs = devm_kzalloc(&msm_thermal_info.pdev->dev,
+			sizeof(struct msm_thermal_debugfs_entry), GFP_KERNEL);
+	if (!msm_therm_debugfs) {
+		ret = -ENOMEM;
+		pr_err("Memory alloc failed. err:%d\n", ret);
+		return ret;
+	}
+
+	THERM_CREATE_DEBUGFS_DIR(msm_therm_debugfs->parent, MSM_THERMAL_NAME,
+		NULL, ret);
+	if (ret)
+		goto create_exit;
+
+	msm_therm_debugfs->tsens_print = debugfs_create_bool(MSM_TSENS_PRINT,
+			0600, msm_therm_debugfs->parent, &tsens_temp_print);
+	if (IS_ERR(msm_therm_debugfs->tsens_print)) {
+		ret = PTR_ERR(msm_therm_debugfs->tsens_print);
+		pr_err("Error creating debugfs:[%s]. err:%d\n",
+			MSM_TSENS_PRINT, ret);
+		goto create_exit;
+	}
+
+	THERM_CREATE_DEBUGFS_DIR(msm_therm_debugfs->config, MSM_THERMAL_CONFIG,
+		msm_therm_debugfs->parent, ret);
+	if (ret)
+		goto create_exit;
+
+	msm_therm_debugfs->config_data = debugfs_create_file(MSM_CONFIG_DATA,
+			0600, msm_therm_debugfs->config, NULL,
+			&thermal_debugfs_config_ops);
+	if (!msm_therm_debugfs->config_data) {
+		pr_err("Error creating debugfs:[%s]\n",
+			MSM_CONFIG_DATA);
+		goto create_exit;
+	}
+	for (idx = 0; idx < MSM_LIST_MAX_NR + MAX_CPU_CONFIG; idx++)
+		create_config_debugfs(&mit_config[idx],
+			msm_therm_debugfs->config);
+
+create_exit:
+	if (ret) {
+		debugfs_remove_recursive(msm_therm_debugfs->parent);
+		devm_kfree(&msm_thermal_info.pdev->dev, msm_therm_debugfs);
+	}
+	return ret;
+}
+
+static struct kobj_attribute cluster_info_attr = __ATTR_RO(cluster_info);
+static int create_cpu_topology_sysfs(void)
+{
+	int ret = 0;
+	struct kobject *module_kobj = NULL;
+
+	if (!cluster_info_probed) {
+		cluster_info_nodes_called = true;
+		return ret;
+	}
+	if (!core_ptr)
+		return ret;
+
+	module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+	if (!module_kobj) {
+		pr_err("cannot find kobject\n");
+		return -ENODEV;
+	}
+
+	sysfs_attr_init(&cluster_info_attr.attr);
+	ret = sysfs_create_file(module_kobj, &cluster_info_attr.attr);
+	if (ret) {
+		pr_err("cannot create cluster info attr group. err:%d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int get_device_tree_cluster_info(struct device *dev, int *cluster_id,
+			cpumask_t *cluster_cpus)
+{
+	int idx = 0, ret = 0, max_entry = 0, core_cnt = 0, c_idx = 0, cpu = 0;
+	uint32_t val = 0;
+	char *key = "qcom,synchronous-cluster-map";
+	struct device_node *core_phandle = NULL;
+
+	if (!of_get_property(dev->of_node, key, &max_entry)
+		|| max_entry <= 0) {
+		pr_debug("Property %s not defined.\n", key);
+		return -ENODEV;
+	}
+	max_entry /= sizeof(__be32);
+
+	for (idx = 0; idx < max_entry; idx++, c_idx++) {
+		/* Read Cluster ID */
+		ret = of_property_read_u32_index(dev->of_node, key, idx++,
+			&val);
+		if (ret) {
+			pr_err("Error reading index%d. err:%d\n", idx - 1,
+				ret);
+			return -EINVAL;
+		}
+		/* Read number of cores inside a cluster */
+		cluster_id[c_idx] = val;
+		cpumask_clear(&cluster_cpus[c_idx]);
+		ret = of_property_read_u32_index(dev->of_node, key, idx,
+			&val);
+		if (ret || val < 1) {
+			pr_err("Invalid core count[%d] for Cluster%d. err:%d\n"
+					, val, cluster_id[c_idx - 1], ret);
+			return -EINVAL;
+		}
+		core_cnt = val + idx;
+		/* map the cores to logical CPUs and get sibiling mask */
+		for (; core_cnt != idx; core_cnt--) {
+			core_phandle = of_parse_phandle(dev->of_node, key,
+						core_cnt);
+			if (!core_phandle) {
+				pr_debug("Invalid phandle. core%d cluster%d\n",
+					core_cnt, cluster_id[c_idx - 1]);
+				continue;
+			}
+
+			for_each_possible_cpu(cpu) {
+				if (of_get_cpu_node(cpu, NULL)
+					== core_phandle)
+					break;
+			}
+			if (cpu >= num_possible_cpus()) {
+				pr_debug("Skipping core%d in cluster%d\n",
+					core_cnt, cluster_id[c_idx - 1]);
+				continue;
+			}
+			cpumask_set_cpu(cpu, &cluster_cpus[c_idx]);
+		}
+		idx += val;
+	}
+
+	return c_idx;
+}
+
+static int get_kernel_cluster_info(int *cluster_id, cpumask_t *cluster_cpus)
+{
+	uint32_t _cpu, cluster_index, cluster_cnt;
+
+	for (_cpu = 0, cluster_cnt = 0; _cpu < num_possible_cpus(); _cpu++) {
+		if (topology_physical_package_id(_cpu) < 0) {
+			pr_err("CPU%d topology not initialized.\n", _cpu);
+			return -ENODEV;
+		}
+		/* Do not use the sibling cpumask from topology module.
+		** kernel topology module updates the sibling cpumask
+		** only when the cores are brought online for the first time.
+		** KTM figures out the sibling cpumask using the
+		** cluster and core ID mapping.
+		*/
+		for (cluster_index = 0; cluster_index < num_possible_cpus();
+			cluster_index++) {
+			if (cluster_id[cluster_index] == -1) {
+				cluster_id[cluster_index] =
+					topology_physical_package_id(_cpu);
+				cpumask_clear(&cluster_cpus[cluster_index]);
+				cpumask_set_cpu(_cpu,
+					&cluster_cpus[cluster_index]);
+				cluster_cnt++;
+				break;
+			}
+			if (cluster_id[cluster_index] ==
+				topology_physical_package_id(_cpu)) {
+				cpumask_set_cpu(_cpu,
+					&cluster_cpus[cluster_index]);
+				break;
+			}
+		}
+	}
+
+	return cluster_cnt;
+}
+
+static void update_cpu_topology(struct device *dev)
+{
+	int cluster_id[NR_CPUS] = {[0 ... NR_CPUS-1] = -1};
+	cpumask_t cluster_cpus[NR_CPUS];
+	uint32_t i;
+	int cluster_cnt;
+	struct cluster_info *temp_ptr = NULL;
+
+	cluster_info_probed = true;
+	cluster_cnt = get_kernel_cluster_info(cluster_id, cluster_cpus);
+	if (cluster_cnt <= 0) {
+		cluster_cnt = get_device_tree_cluster_info(dev, cluster_id,
+						cluster_cpus);
+		if (cluster_cnt <= 0) {
+			core_ptr = NULL;
+			pr_debug("Cluster Info not defined. KTM continues.\n");
+			return;
+		}
+	}
+
+	core_ptr = devm_kzalloc(dev, sizeof(struct cluster_info), GFP_KERNEL);
+	if (!core_ptr) {
+		pr_err("Memory alloc failed\n");
+		return;
+	}
+	core_ptr->parent_ptr = NULL;
+	core_ptr->entity_count = cluster_cnt;
+	core_ptr->cluster_id = -1;
+
+	temp_ptr = devm_kzalloc(dev, sizeof(struct cluster_info) * cluster_cnt,
+					GFP_KERNEL);
+	if (!temp_ptr) {
+		pr_err("Memory alloc failed\n");
+		devm_kfree(dev, core_ptr);
+		core_ptr = NULL;
+		return;
+	}
+
+	for (i = 0; i < cluster_cnt; i++) {
+		int idx = 0;
+
+		pr_debug("Cluster_ID:%d CPU's:%lu\n", cluster_id[i],
+				*cpumask_bits(&cluster_cpus[i]));
+		temp_ptr[i].cluster_id = cluster_id[i];
+		temp_ptr[i].parent_ptr = core_ptr;
+		cpumask_copy(&temp_ptr[i].cluster_cores, &cluster_cpus[i]);
+		temp_ptr[i].limited_max_freq = UINT_MAX;
+		temp_ptr[i].limited_min_freq = 0;
+		temp_ptr[i].freq_idx = 0;
+		temp_ptr[i].freq_idx_low = 0;
+		temp_ptr[i].freq_idx_high = 0;
+		temp_ptr[i].freq_table = NULL;
+		temp_ptr[i].entity_count = cpumask_weight(&cluster_cpus[i]);
+		for_each_cpu(idx, &temp_ptr[i].cluster_cores) {
+			cpus[idx].parent_ptr = &temp_ptr[i];
+		}
+		temp_ptr[i].child_entity_ptr = NULL;
+	}
+	core_ptr->child_entity_ptr = temp_ptr;
+}
+
+static int get_cpu_freq_plan_len(int cpu)
+{
+	int table_len = 0;
+	struct device *cpu_dev = NULL;
+
+	cpu_dev = get_cpu_device(cpu);
+	if (!cpu_dev) {
+		pr_err("Error in get CPU%d device\n", cpu);
+		goto exit;
+	}
+
+	rcu_read_lock();
+	table_len = dev_pm_opp_get_opp_count(cpu_dev);
+	if (table_len <= 0) {
+		pr_err("Error reading CPU%d freq table len. error:%d\n",
+			cpu, table_len);
+		table_len = 0;
+		goto unlock_and_exit;
+	}
+
+unlock_and_exit:
+	rcu_read_unlock();
+
+exit:
+	return table_len;
+}
+
+static int get_cpu_freq_plan(int cpu,
+		 struct cpufreq_frequency_table *freq_table_ptr)
+{
+	int table_len = 0;
+	struct dev_pm_opp *opp = NULL;
+	unsigned long freq = 0;
+	struct device *cpu_dev = NULL;
+
+	cpu_dev = get_cpu_device(cpu);
+	if (!cpu_dev) {
+		pr_err("Error in get CPU%d device\n", cpu);
+		goto exit;
+	}
+
+	rcu_read_lock();
+	while (!IS_ERR(opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq))) {
+		/* Convert from Hz to kHz */
+		freq_table_ptr[table_len].frequency = freq / 1000;
+		pr_debug("cpu%d freq %d :%d\n", cpu, table_len,
+			freq_table_ptr[table_len].frequency);
+		freq++;
+		table_len++;
+	}
+	rcu_read_unlock();
+
+exit:
+	return table_len;
+}
+
+static int init_cluster_freq_table(void)
+{
+	uint32_t _cluster = 0;
+	int table_len = 0;
+	int ret = 0;
+	struct cluster_info *cluster_ptr = NULL;
+
+	for (; _cluster < core_ptr->entity_count; _cluster++, table_len = 0) {
+		cluster_ptr = &core_ptr->child_entity_ptr[_cluster];
+		if (cluster_ptr->freq_table)
+			continue;
+
+		table_len = get_cpu_freq_plan_len(
+				cpumask_first(&cluster_ptr->cluster_cores));
+		if (!table_len) {
+			ret = -EAGAIN;
+			continue;
+		}
+		cluster_ptr->freq_idx_low = 0;
+		cluster_ptr->freq_idx_high = cluster_ptr->freq_idx =
+				table_len - 1;
+		if (cluster_ptr->freq_idx_high < 0
+			|| (cluster_ptr->freq_idx_high
+			< cluster_ptr->freq_idx_low)) {
+			cluster_ptr->freq_idx = cluster_ptr->freq_idx_low =
+				cluster_ptr->freq_idx_high = 0;
+			WARN(1, "Cluster%d frequency table length:%d\n",
+				cluster_ptr->cluster_id, table_len);
+			ret = -EINVAL;
+			goto exit;
+		}
+		cluster_ptr->freq_table = kzalloc(
+			sizeof(struct cpufreq_frequency_table) * table_len,
+			GFP_KERNEL);
+		if (!cluster_ptr->freq_table) {
+			pr_err("memory alloc failed\n");
+			cluster_ptr->freq_idx = cluster_ptr->freq_idx_low =
+				cluster_ptr->freq_idx_high = 0;
+			ret = -ENOMEM;
+			goto exit;
+		}
+		table_len = get_cpu_freq_plan(
+				cpumask_first(&cluster_ptr->cluster_cores),
+				cluster_ptr->freq_table);
+		if (!table_len) {
+			kfree(cluster_ptr->freq_table);
+			cluster_ptr->freq_table = NULL;
+			pr_err("Error reading cluster%d cpufreq table\n",
+				cluster_ptr->cluster_id);
+			ret = -EAGAIN;
+			continue;
+		}
+	}
+
+exit:
+	return ret;
+}
+
+static void update_cluster_freq(void)
+{
+	int online_cpu = -1;
+	struct cluster_info *cluster_ptr = NULL;
+	uint32_t _cluster = 0, _cpu = 0, max = UINT_MAX, min = 0;
+	uint32_t changed;
+
+	if (!core_ptr)
+		return;
+
+	for (; _cluster < core_ptr->entity_count; _cluster++, _cpu = 0,
+			online_cpu = -1, max = UINT_MAX, min = 0) {
+		/*
+		** Go over the frequency limits
+		** of each core in the cluster and aggregate the minimum
+		** and maximum frequencies. After aggregating, request for
+		** frequency update on the first online core in that cluster.
+		** Cpufreq driver takes care of updating the frequency of
+		** other cores in a synchronous cluster.
+		*/
+		cluster_ptr = &core_ptr->child_entity_ptr[_cluster];
+
+		for_each_cpu(_cpu, &cluster_ptr->cluster_cores) {
+			if (online_cpu == -1 && cpu_online(_cpu))
+				online_cpu = _cpu;
+			max = min(max, cpus[_cpu].limited_max_freq);
+			min = max(min, cpus[_cpu].limited_min_freq);
+		}
+		if (cluster_ptr->limited_max_freq == max
+			&& cluster_ptr->limited_min_freq == min)
+			continue;
+		changed = 0;
+		if (max != cluster_ptr->limited_max_freq)
+			changed |= FREQ_LIMIT_MAX;
+		if (min != cluster_ptr->limited_min_freq)
+			changed |= FREQ_LIMIT_MIN;
+		cluster_ptr->limited_max_freq = max;
+		cluster_ptr->limited_min_freq = min;
+		if (online_cpu == -1 && lmh_dcvs_available)
+			online_cpu = cpumask_first(
+					&cluster_ptr->cluster_cores);
+		if (online_cpu != -1)
+			update_cpu_freq(online_cpu, changed);
+	}
+}
+
+static void do_cluster_freq_ctrl(int temp)
+{
+	uint32_t _cluster = 0;
+	int _cpu = -1, freq_idx = 0;
+	bool mitigate = false;
+	struct cluster_info *cluster_ptr = NULL;
+
+	if (temp >= msm_thermal_info.limit_temp_degC)
+		mitigate = true;
+	else if (temp < msm_thermal_info.limit_temp_degC -
+		 msm_thermal_info.temp_hysteresis_degC)
+		mitigate = false;
+	else
+		return;
+
+	get_online_cpus();
+	for (; _cluster < core_ptr->entity_count; _cluster++) {
+		cluster_ptr = &core_ptr->child_entity_ptr[_cluster];
+		if (!cluster_ptr->freq_table)
+			continue;
+
+		if (mitigate)
+			freq_idx = max_t(int, cluster_ptr->freq_idx_low,
+				(cluster_ptr->freq_idx
+				- msm_thermal_info.bootup_freq_step));
+		else
+			freq_idx = min_t(int, cluster_ptr->freq_idx_high,
+				(cluster_ptr->freq_idx
+				+ msm_thermal_info.bootup_freq_step));
+		if (freq_idx == cluster_ptr->freq_idx)
+			continue;
+
+		cluster_ptr->freq_idx = freq_idx;
+		for_each_cpu(_cpu, &cluster_ptr->cluster_cores) {
+			if (!(msm_thermal_info.bootup_freq_control_mask
+				& BIT(_cpu)))
+				continue;
+			pr_info("Limiting CPU%d max frequency to %u. Temp:%d\n"
+				, _cpu
+				, cluster_ptr->freq_table[freq_idx].frequency
+				, temp);
+			cpus[_cpu].limited_max_freq = min(
+				cluster_ptr->freq_table[freq_idx].frequency,
+				cpus[_cpu].vdd_max_freq);
+		}
+	}
+	if (_cpu != -1)
+		update_cluster_freq();
+	put_online_cpus();
+}
+
+/**
+ * msm_thermal_lmh_dcvs_init: Initialize LMH DCVS hardware block
+ *
+ * @pdev: handle to the thermal device node
+ *
+ * Probe for the 'OSM clock' and initialize the LMH DCVS blocks.
+ */
+static int msm_thermal_lmh_dcvs_init(struct platform_device *pdev)
+{
+	struct clk *osm_clk;
+	const char *clk_name = "osm";
+	int ret = 0;
+
+	/* We are okay if the osm clock is not present in DT */
+	osm_clk = devm_clk_get(&pdev->dev, clk_name);
+	if (IS_ERR(osm_clk))
+		return ret;
+
+	/*
+	 * We actually don't need the clock, we just wanted to make sure
+	 * the OSM block is ready.
+	 */
+	devm_clk_put(&pdev->dev, osm_clk);
+
+	/* Enable the CRNT and Reliability algorithm. Again, we dont
+	 * care if this fails
+	 */
+	ret = msm_lmh_dcvs_write(MSM_LIMITS_CLUSTER_0,
+				MSM_LIMITS_SUB_FN_REL,
+				MSM_LIMITS_ALGO_MODE_ENABLE, 1);
+	if (ret)
+		pr_err("Unable to enable REL algo for cluster0\n");
+	ret = msm_lmh_dcvs_write(MSM_LIMITS_CLUSTER_1,
+				MSM_LIMITS_SUB_FN_REL,
+				MSM_LIMITS_ALGO_MODE_ENABLE, 1);
+	if (ret)
+		pr_err("Unable to enable REL algo for cluster1\n");
+
+	ret = msm_lmh_dcvs_write(MSM_LIMITS_CLUSTER_0,
+				MSM_LIMITS_SUB_FN_CRNT,
+				MSM_LIMITS_ALGO_MODE_ENABLE, 1);
+	if (ret)
+		pr_err("Unable enable CRNT algo for cluster0\n");
+	ret = msm_lmh_dcvs_write(MSM_LIMITS_CLUSTER_1,
+				MSM_LIMITS_SUB_FN_CRNT,
+				MSM_LIMITS_ALGO_MODE_ENABLE, 1);
+	if (ret)
+		pr_err("Unable enable CRNT algo for cluster1\n");
+
+	lmh_dcvs_available = true;
+
+	return ret;
+}
+
+/* If freq table exists, then we can send freq request */
+static int check_freq_table(void)
+{
+	int ret = 0;
+	static bool invalid_table;
+	int table_len = 0;
+
+	if (invalid_table)
+		return -EINVAL;
+	if (freq_table_get)
+		return 0;
+
+	if (core_ptr) {
+		ret = init_cluster_freq_table();
+		if (!ret)
+			freq_table_get = 1;
+		else if (ret == -EINVAL)
+			invalid_table = true;
+		goto exit;
+	}
+
+	table_len = get_cpu_freq_plan_len(0);
+	if (!table_len)
+		return -EINVAL;
+
+	table = kzalloc(sizeof(struct cpufreq_frequency_table)
+			* table_len, GFP_KERNEL);
+	if (!table) {
+		ret = -ENOMEM;
+		goto exit;
+	}
+	table_len = get_cpu_freq_plan(0, table);
+	if (!table_len) {
+		pr_err("error reading cpufreq table\n");
+		ret = -EINVAL;
+		goto free_and_exit;
+	}
+
+	limit_idx_low = 0;
+	limit_idx_high = limit_idx = table_len - 1;
+	if (limit_idx_high < 0 || limit_idx_high < limit_idx_low) {
+		invalid_table = true;
+		limit_idx_low = limit_idx_high = limit_idx = 0;
+		WARN(1, "CPU0 frequency table length:%d\n", table_len);
+		ret = -EINVAL;
+		goto free_and_exit;
+	}
+	freq_table_get = 1;
+
+free_and_exit:
+	if (ret) {
+		kfree(table);
+		table = NULL;
+	}
+
+exit:
+	if (!ret) {
+		int err;
+
+		err = msm_thermal_lmh_dcvs_init(msm_thermal_info.pdev);
+		if (err)
+			pr_err("Error initializing OSM\n");
+	}
+	return ret;
+}
+
+static int update_cpu_min_freq_all(struct rail *apss_rail, uint32_t min)
+{
+	uint32_t cpu = 0, _cluster = 0, max_freq = UINT_MAX;
+	int ret = 0;
+	struct cluster_info *cluster_ptr = NULL;
+	bool valid_table = false;
+
+	if (!freq_table_get) {
+		ret = check_freq_table();
+		if (ret && !core_ptr) {
+			pr_err("Fail to get freq table. err:%d\n", ret);
+			return ret;
+		}
+	}
+	if (min != apss_rail->min_level)
+		max_freq = apss_rail->max_frequency_limit;
+
+	get_online_cpus();
+	/* If min is larger than allowed max */
+	if (core_ptr) {
+		for (; _cluster < core_ptr->entity_count; _cluster++) {
+			cluster_ptr = &core_ptr->child_entity_ptr[_cluster];
+			if (!cluster_ptr->freq_table)
+				continue;
+			valid_table = true;
+			min = min(min,
+				cluster_ptr->freq_table[
+				cluster_ptr->freq_idx_high].frequency);
+			max_freq = max(max_freq, cluster_ptr->freq_table[
+				cluster_ptr->freq_idx_low].frequency);
+		}
+		if (!valid_table)
+			goto update_freq_exit;
+	} else {
+		min = min(min, table[limit_idx_high].frequency);
+		max_freq = max(max_freq, table[limit_idx_low].frequency);
+	}
+
+	pr_debug("Requesting min freq:%u max freq:%u for all CPU's\n",
+		min, max_freq);
+	if (freq_mitigation_task) {
+		if (!apss_rail->device_handle[0]) {
+			pr_err("device manager handle not registered\n");
+			ret = -ENODEV;
+			goto update_freq_exit;
+		}
+		for_each_possible_cpu(cpu) {
+			cpus[cpu].vdd_max_freq = max_freq;
+			apss_rail->request[cpu].freq.max_freq = max_freq;
+			apss_rail->request[cpu].freq.min_freq = min;
+			ret = devmgr_client_request_mitigation(
+				apss_rail->device_handle[cpu],
+				CPUFREQ_MITIGATION_REQ,
+				&apss_rail->request[cpu]);
+		}
+	} else if (core_ptr) {
+		for (_cluster = 0; _cluster < core_ptr->entity_count;
+			_cluster++) {
+			cluster_ptr = &core_ptr->child_entity_ptr[_cluster];
+			if (!cluster_ptr->freq_table)
+				continue;
+			for_each_cpu(cpu, &cluster_ptr->cluster_cores) {
+				uint32_t max;
+				uint32_t changed = 0;
+
+				cpus[cpu].vdd_max_freq = max_freq;
+				max = min(cluster_ptr->freq_table[
+					cluster_ptr->freq_idx].frequency,
+					cpus[cpu].vdd_max_freq);
+
+				if (max != cpus[cpu].limited_max_freq)
+					changed |= FREQ_LIMIT_MAX;
+				if (min != cpus[cpu].limited_min_freq)
+					changed |= FREQ_LIMIT_MIN;
+
+				cpus[cpu].limited_min_freq = min;
+				cpus[cpu].limited_max_freq = max;
+			}
+			update_cluster_freq();
+		}
+	} else {
+		for_each_possible_cpu(cpu) {
+			uint32_t max;
+			uint32_t changed = 0;
+
+			cpus[cpu].vdd_max_freq = max_freq;
+			max = min(table[limit_idx].frequency,
+				cpus[cpu].vdd_max_freq);
+
+			if (max != cpus[cpu].limited_max_freq)
+				changed |= FREQ_LIMIT_MAX;
+			if (min != cpus[cpu].limited_min_freq)
+				changed |= FREQ_LIMIT_MIN;
+
+			cpus[cpu].limited_min_freq = min;
+			cpus[cpu].limited_max_freq = max;
+		}
+		update_cluster_freq();
+	}
+
+update_freq_exit:
+	put_online_cpus();
+	return ret;
+}
+
+static int vdd_restriction_apply_freq(struct rail *r, int level)
+{
+	int ret = 0;
+
+	if (level == r->curr_level)
+		return ret;
+
+	/* level = -1: disable, level = 0,1,2..n: enable */
+	if (level == -1) {
+		ret = update_cpu_min_freq_all(r, r->min_level);
+		if (ret)
+			return ret;
+		else
+			r->curr_level = -1;
+	} else if (level >= 0 && level < (r->num_levels)) {
+		ret = update_cpu_min_freq_all(r, r->levels[level]);
+		if (ret)
+			return ret;
+		else
+			r->curr_level = level;
+	} else {
+		pr_err("level input:%d is not within range\n", level);
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+static int vdd_restriction_apply_voltage(struct rail *r, int level)
+{
+	int ret = 0;
+
+	if (r->reg == NULL) {
+		pr_err("%s don't have regulator handle. can't apply vdd\n",
+				r->name);
+		return -EFAULT;
+	}
+	if (level == r->curr_level)
+		return ret;
+
+	/* level = -1: disable, level = 0,1,2..n: enable */
+	if (level == -1) {
+		ret = regulator_set_voltage(r->reg, r->min_level,
+			INT_MAX);
+		if (!ret)
+			r->curr_level = -1;
+		pr_debug("Requested min level for %s. curr level: %d\n",
+				r->name, r->curr_level);
+	} else if (level >= 0 && level < (r->num_levels)) {
+		ret = regulator_set_voltage(r->reg, r->levels[level],
+			INT_MAX);
+		if (!ret)
+			r->curr_level = level;
+		pr_debug("Requesting level %d for %s. curr level: %d\n",
+			r->levels[level], r->name, r->levels[r->curr_level]);
+	} else {
+		pr_err("level input:%d is not within range\n", level);
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+/* Setting all rails the same mode */
+static int psm_set_mode_all(int mode)
+{
+	int i = 0;
+	int fail_cnt = 0;
+	int ret = 0;
+
+	pr_debug("Requesting PMIC Mode: %d\n", mode);
+	for (i = 0; i < psm_rails_cnt; i++) {
+		if (psm_rails[i].mode != mode) {
+			ret = rpm_regulator_set_mode(psm_rails[i].reg, mode);
+			if (ret) {
+				pr_err("Cannot set mode:%d for %s. err:%d",
+					mode, psm_rails[i].name, ret);
+				fail_cnt++;
+			} else
+				psm_rails[i].mode = mode;
+		}
+	}
+
+	return fail_cnt ? (-EFAULT) : ret;
+}
+
+static ssize_t vdd_rstr_en_show(
+	struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+	struct vdd_rstr_enable *en = VDD_RSTR_ENABLE_FROM_ATTRIBS(attr);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", en->enabled);
+}
+
+static ssize_t vdd_rstr_en_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int ret = 0;
+	int i = 0;
+	uint8_t en_cnt = 0;
+	uint8_t dis_cnt = 0;
+	uint32_t val = 0;
+	struct kernel_param kp;
+	struct vdd_rstr_enable *en = VDD_RSTR_ENABLE_FROM_ATTRIBS(attr);
+
+	mutex_lock(&vdd_rstr_mutex);
+	kp.arg = &val;
+	ret = param_set_bool(buf, &kp);
+	if (ret) {
+		pr_err("Invalid input %s for enabled\n", buf);
+		goto done_vdd_rstr_en;
+	}
+
+	if ((val == 0) && (en->enabled == 0))
+		goto done_vdd_rstr_en;
+
+	for (i = 0; i < rails_cnt; i++) {
+		if (rails[i].freq_req == 1 && freq_table_get)
+			ret = vdd_restriction_apply_freq(&rails[i],
+					(val) ? 0 : -1);
+		else
+			ret = vdd_restriction_apply_voltage(&rails[i],
+			(val) ? 0 : -1);
+
+		/*
+		 * Even if fail to set one rail, still try to set the
+		 * others. Continue the loop
+		 */
+		if (ret)
+			pr_err("Set vdd restriction for %s failed\n",
+					rails[i].name);
+		else {
+			if (val)
+				en_cnt++;
+			else
+				dis_cnt++;
+		}
+	}
+	/* As long as one rail is enabled, vdd rstr is enabled */
+	if (val && en_cnt)
+		en->enabled = 1;
+	else if (!val && (dis_cnt == rails_cnt))
+		en->enabled = 0;
+	pr_debug("%s vdd restriction. curr: %d\n",
+			(val) ? "Enable" : "Disable", en->enabled);
+
+done_vdd_rstr_en:
+	mutex_unlock(&vdd_rstr_mutex);
+	return count;
+}
+
+static int send_temperature_band(enum msm_thermal_phase_ctrl phase,
+	enum msm_temp_band req_band)
+{
+	int ret = 0;
+	uint32_t msg_id;
+	struct msm_rpm_request *rpm_req;
+	unsigned int band = req_band;
+	uint32_t key, resource, resource_id;
+
+	if (phase < 0 || phase >= MSM_PHASE_CTRL_NR ||
+		req_band <= 0 || req_band >= MSM_TEMP_MAX_NR) {
+		pr_err("Invalid input\n");
+		ret = -EINVAL;
+		goto phase_ctrl_exit;
+	}
+	switch (phase) {
+	case MSM_CX_PHASE_CTRL:
+		key = msm_thermal_info.cx_phase_request_key;
+		break;
+	case MSM_GFX_PHASE_CTRL:
+		key = msm_thermal_info.gfx_phase_request_key;
+		break;
+	default:
+		goto phase_ctrl_exit;
+		break;
+	}
+
+	resource = msm_thermal_info.phase_rpm_resource_type;
+	resource_id = msm_thermal_info.phase_rpm_resource_id;
+	pr_debug("Sending %s temperature band %d\n",
+		(phase == MSM_CX_PHASE_CTRL) ? "CX" : "GFX",
+		req_band);
+	rpm_req = msm_rpm_create_request(MSM_RPM_CTX_ACTIVE_SET,
+			resource, resource_id, 1);
+	if (!rpm_req) {
+		pr_err("Creating RPM request failed\n");
+		ret = -ENXIO;
+		goto phase_ctrl_exit;
+	}
+
+	ret = msm_rpm_add_kvp_data(rpm_req, key, (const uint8_t *)&band,
+		(int)sizeof(band));
+	if (ret) {
+		pr_err("Adding KVP data failed. err:%d\n", ret);
+		goto free_rpm_handle;
+	}
+
+	msg_id = msm_rpm_send_request(rpm_req);
+	if (!msg_id) {
+		pr_err("RPM send request failed\n");
+		ret = -ENXIO;
+		goto free_rpm_handle;
+	}
+
+	ret = msm_rpm_wait_for_ack(msg_id);
+	if (ret) {
+		pr_err("RPM wait for ACK failed. err:%d\n", ret);
+		goto free_rpm_handle;
+	}
+
+free_rpm_handle:
+	msm_rpm_free_request(rpm_req);
+phase_ctrl_exit:
+	return ret;
+}
+
+static uint32_t msm_thermal_str_to_int(const char *inp)
+{
+	int i, len;
+	uint32_t output = 0;
+
+	len = strnlen(inp, sizeof(uint32_t));
+	for (i = 0; i < len; i++)
+		output |= inp[i] << (i * 8);
+
+	return output;
+}
+
+static ssize_t sensor_info_show(
+	struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+	int i;
+	ssize_t tot_size = 0, size = 0;
+
+	for (i = 0; i < sensor_cnt; i++) {
+		size = snprintf(&buf[tot_size], PAGE_SIZE - tot_size,
+			"%s:%s:%s:%d ",
+			sensors[i].type, sensors[i].name,
+			sensors[i].alias ? : "",
+			sensors[i].scaling_factor);
+		if (tot_size + size >= PAGE_SIZE) {
+			pr_err("Not enough buffer size\n");
+			break;
+		}
+		tot_size += size;
+	}
+	if (tot_size)
+		buf[tot_size - 1] = '\n';
+
+	return tot_size;
+}
+
+static struct vdd_rstr_enable vdd_rstr_en = {
+	.ko_attr.attr.name = __stringify(enabled),
+	.ko_attr.attr.mode = 0644,
+	.ko_attr.show = vdd_rstr_en_show,
+	.ko_attr.store = vdd_rstr_en_store,
+	.enabled = 1,
+};
+
+static struct attribute *vdd_rstr_en_attribs[] = {
+	&vdd_rstr_en.ko_attr.attr,
+	NULL,
+};
+
+static struct attribute_group vdd_rstr_en_attribs_gp = {
+	.attrs  = vdd_rstr_en_attribs,
+};
+
+static ssize_t vdd_rstr_reg_value_show(
+	struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+	int val = 0;
+	struct rail *reg = VDD_RSTR_REG_VALUE_FROM_ATTRIBS(attr);
+	/* -1:disabled, -2:fail to get regualtor handle */
+	if (reg->curr_level < 0)
+		val = reg->curr_level;
+	else
+		val = reg->levels[reg->curr_level];
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t vdd_rstr_reg_level_show(
+	struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+	struct rail *reg = VDD_RSTR_REG_LEVEL_FROM_ATTRIBS(attr);
+	return snprintf(buf, PAGE_SIZE, "%d\n", reg->curr_level);
+}
+
+static ssize_t vdd_rstr_reg_level_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int ret = 0;
+	int val = 0;
+
+	struct rail *reg = VDD_RSTR_REG_LEVEL_FROM_ATTRIBS(attr);
+
+	mutex_lock(&vdd_rstr_mutex);
+	if (vdd_rstr_en.enabled == 0)
+		goto done_store_level;
+
+	ret = kstrtouint(buf, 10, &val);
+	if (ret) {
+		pr_err("Invalid input %s for level\n", buf);
+		goto done_store_level;
+	}
+
+	if (val < 0 || val > reg->num_levels - 1) {
+		pr_err(" Invalid number %d for level\n", val);
+		goto done_store_level;
+	}
+
+	if (val != reg->curr_level) {
+		if (reg->freq_req == 1 && freq_table_get)
+			update_cpu_min_freq_all(reg, reg->levels[val]);
+		else {
+			ret = vdd_restriction_apply_voltage(reg, val);
+			if (ret) {
+				pr_err( \
+				"Set vdd restriction for regulator %s failed. err:%d\n",
+				reg->name, ret);
+				goto done_store_level;
+			}
+		}
+		reg->curr_level = val;
+		pr_debug("Request level %d for %s\n",
+				reg->curr_level, reg->name);
+	}
+
+done_store_level:
+	mutex_unlock(&vdd_rstr_mutex);
+	return count;
+}
+
+static int request_optimum_current(struct psm_rail *rail, enum ocr_request req)
+{
+	int ret = 0;
+
+	if ((!rail) || (req >= OPTIMUM_CURRENT_NR) ||
+		(req < 0)) {
+		pr_err("Invalid input %d\n", req);
+		ret = -EINVAL;
+		goto request_ocr_exit;
+	}
+
+	ret = regulator_set_load(rail->phase_reg,
+		(req == OPTIMUM_CURRENT_MAX) ? MAX_CURRENT_UA : 0);
+	if (ret < 0) {
+		pr_err("Optimum current request failed. err:%d\n", ret);
+		goto request_ocr_exit;
+	}
+	ret = 0; /*regulator_set_optimum_mode returns the mode on success*/
+	pr_debug("Requested optimum current mode: %d\n", req);
+
+request_ocr_exit:
+	return ret;
+}
+
+static int ocr_set_mode_all(enum ocr_request req)
+{
+	int ret = 0, i;
+
+	for (i = 0; i < ocr_rail_cnt; i++) {
+		if (ocr_rails[i].mode == req)
+			continue;
+		ret = request_optimum_current(&ocr_rails[i], req);
+		if (ret)
+			goto ocr_set_mode_exit;
+		ocr_rails[i].mode = req;
+	}
+
+ocr_set_mode_exit:
+	return ret;
+}
+
+static ssize_t ocr_reg_mode_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr);
+	return snprintf(buf, PAGE_SIZE, "%d\n", reg->mode);
+}
+
+static ssize_t ocr_reg_mode_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int ret = 0;
+	int val = 0;
+	struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr);
+
+	if (!ocr_enabled)
+		return count;
+
+	mutex_lock(&ocr_mutex);
+	ret = kstrtoint(buf, 10, &val);
+	if (ret) {
+		pr_err("Invalid input %s for mode. err:%d\n",
+			buf, ret);
+		goto done_ocr_store;
+	}
+
+	if ((val != OPTIMUM_CURRENT_MAX) &&
+		(val != OPTIMUM_CURRENT_MIN)) {
+		pr_err("Invalid value %d for mode\n", val);
+		goto done_ocr_store;
+	}
+
+	if (val != reg->mode) {
+		ret = request_optimum_current(reg, val);
+		if (ret)
+			goto done_ocr_store;
+		reg->mode = val;
+	}
+
+done_ocr_store:
+	mutex_unlock(&ocr_mutex);
+	return count;
+}
+
+static ssize_t store_phase_request(const char *buf, size_t count, bool is_cx)
+{
+	int ret = 0, val;
+	struct mutex *phase_mutex = (is_cx) ? (&cx_mutex) : (&gfx_mutex);
+	enum msm_thermal_phase_ctrl phase_req = (is_cx) ? MSM_CX_PHASE_CTRL :
+		MSM_GFX_PHASE_CTRL;
+
+	ret = kstrtoint(buf, 10, &val);
+	if (ret) {
+		pr_err("Invalid input %s for %s temperature band\n",
+			buf, (is_cx) ? "CX" : "GFX");
+		goto phase_store_exit;
+	}
+	if ((val <= 0) || (val >= MSM_TEMP_MAX_NR)) {
+		pr_err("Invalid input %d for %s temperature band\n",
+			val, (is_cx) ? "CX" : "GFX");
+		ret = -EINVAL;
+		goto phase_store_exit;
+	}
+	mutex_lock(phase_mutex);
+	if (val != ((is_cx) ? curr_cx_band : curr_gfx_band)) {
+		ret = send_temperature_band(phase_req, val);
+		if (!ret) {
+			*((is_cx) ? &curr_cx_band : &curr_gfx_band) = val;
+		} else {
+			pr_err("Failed to send %d temp. band to %s rail\n", val,
+					(is_cx) ? "CX" : "GFX");
+			goto phase_store_unlock_exit;
+		}
+	}
+	ret = count;
+phase_store_unlock_exit:
+	mutex_unlock(phase_mutex);
+phase_store_exit:
+	return ret;
+}
+
+#define show_phase(_name, _variable) \
+static ssize_t _name##_phase_show(struct kobject *kobj, \
+	struct kobj_attribute *attr, char *buf) \
+{ \
+	return snprintf(buf, PAGE_SIZE, "%u\n", _variable); \
+}
+
+#define store_phase(_name, _variable, _iscx) \
+static ssize_t _name##_phase_store(struct kobject *kobj, \
+	struct kobj_attribute *attr, const char *buf, size_t count) \
+{ \
+	return store_phase_request(buf, count, _iscx); \
+}
+
+show_phase(gfx, curr_gfx_band)
+show_phase(cx, curr_cx_band)
+store_phase(gfx, curr_gfx_band, false)
+store_phase(cx, curr_cx_band, true)
+
+static ssize_t psm_reg_mode_show(
+	struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+	struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr);
+	return snprintf(buf, PAGE_SIZE, "%d\n", reg->mode);
+}
+
+static ssize_t psm_reg_mode_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int ret = 0;
+	int val = 0;
+	struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr);
+
+	mutex_lock(&psm_mutex);
+	ret = kstrtoint(buf, 10, &val);
+	if (ret) {
+		pr_err("Invalid input %s for mode\n", buf);
+		goto done_psm_store;
+	}
+
+	if ((val != PMIC_PWM_MODE) && (val != PMIC_AUTO_MODE)) {
+		pr_err("Invalid number %d for mode\n", val);
+		goto done_psm_store;
+	}
+
+	if (val != reg->mode) {
+		ret = rpm_regulator_set_mode(reg->reg, val);
+		if (ret) {
+			pr_err("Fail to set Mode:%d for %s. err:%d\n",
+			val, reg->name, ret);
+			goto done_psm_store;
+		}
+		reg->mode = val;
+	}
+
+done_psm_store:
+	mutex_unlock(&psm_mutex);
+	return count;
+}
+
+static int check_sensor_id(int sensor_id)
+{
+	int i = 0;
+	bool hw_id_found = false;
+	int ret = 0;
+
+	for (i = 0; i < max_tsens_num; i++) {
+		if (sensor_id == tsens_id_map[i]) {
+			hw_id_found = true;
+			break;
+		}
+	}
+	if (!hw_id_found) {
+		pr_err("Invalid sensor hw id:%d\n", sensor_id);
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+static int zone_id_to_tsen_id(int zone_id, int *tsens_id)
+{
+	int i = 0;
+	int ret = 0;
+
+	if (!zone_id_tsens_map) {
+		pr_debug("zone_id_tsens_map is not initialized.\n");
+		*tsens_id = zone_id;
+		return ret;
+	}
+
+	for (i = 0; i < max_tsens_num; i++) {
+		if (zone_id == zone_id_tsens_map[i]) {
+			*tsens_id = tsens_id_map[i];
+			break;
+		}
+	}
+	if (i == max_tsens_num) {
+		pr_err("Invalid sensor zone id:%d\n", zone_id);
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+static int create_sensor_zone_id_map(void)
+{
+	int i = 0;
+	int zone_id = -1;
+
+	zone_id_tsens_map = devm_kzalloc(&msm_thermal_info.pdev->dev,
+		sizeof(int) * max_tsens_num, GFP_KERNEL);
+
+	if (!zone_id_tsens_map) {
+		pr_err("Cannot allocate memory for zone_id_tsens_map\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < max_tsens_num; i++) {
+		char tsens_name[TSENS_NAME_MAX] = "";
+
+		snprintf(tsens_name, TSENS_NAME_MAX, TSENS_NAME_FORMAT,
+			tsens_id_map[i]);
+		zone_id = sensor_get_id(tsens_name);
+		if (zone_id < 0) {
+			pr_err("Error getting zone id for %s. err:%d\n",
+				tsens_name, zone_id);
+			goto fail;
+		} else {
+			zone_id_tsens_map[i] = zone_id;
+		}
+	}
+	return 0;
+
+fail:
+	devm_kfree(&msm_thermal_info.pdev->dev, zone_id_tsens_map);
+	return zone_id;
+}
+
+static int create_sensor_id_map(struct device *dev)
+{
+	int ret = 0;
+
+	tsens_id_map = devm_kzalloc(dev,
+		sizeof(int) * max_tsens_num, GFP_KERNEL);
+
+	if (!tsens_id_map) {
+		pr_err("Cannot allocate memory for tsens_id_map\n");
+		return -ENOMEM;
+	}
+
+	ret = tsens_get_hw_id_mapping(max_tsens_num, tsens_id_map);
+	if (ret) {
+		pr_err("Failed to get tsens id's:%d\n", ret);
+		goto fail;
+	}
+
+	return ret;
+fail:
+	devm_kfree(dev, tsens_id_map);
+	return ret;
+}
+
+/* 1:enable, 0:disable */
+static int vdd_restriction_apply_all(int en)
+{
+	int i = 0;
+	int en_cnt = 0;
+	int dis_cnt = 0;
+	int fail_cnt = 0;
+	int ret = 0;
+
+	for (i = 0; i < rails_cnt; i++) {
+		if (rails[i].freq_req == 1)
+			if (freq_table_get)
+				ret = vdd_restriction_apply_freq(&rails[i],
+					en ? 0 : -1);
+			else
+				continue;
+		else
+			ret = vdd_restriction_apply_voltage(&rails[i],
+					en ? 0 : -1);
+		if (ret) {
+			pr_err("Failed to %s for %s. err:%d",
+					(en) ? "enable" : "disable",
+					rails[i].name, ret);
+			fail_cnt++;
+		} else {
+			if (en)
+				en_cnt++;
+			else
+				dis_cnt++;
+		}
+	}
+
+	/* As long as one rail is enabled, vdd rstr is enabled */
+	if (en && en_cnt)
+		vdd_rstr_en.enabled = 1;
+	else if (!en && (dis_cnt == rails_cnt))
+		vdd_rstr_en.enabled = 0;
+
+	/*
+	 * Check fail_cnt again to make sure all of the rails are applied
+	 * restriction successfully or not
+	 */
+	if (fail_cnt)
+		return -EFAULT;
+	return ret;
+}
+
+static int set_and_activate_threshold(uint32_t sensor_id,
+	struct sensor_threshold *threshold)
+{
+	int ret = 0;
+
+	ret = sensor_set_trip(sensor_id, threshold);
+	if (ret != 0) {
+		pr_err("sensor:%u Error in setting trip:%d. err:%d\n",
+			sensor_id, threshold->trip, ret);
+		goto set_done;
+	}
+
+	ret = sensor_activate_trip(sensor_id, threshold, true);
+	if (ret != 0) {
+		pr_err("sensor:%u Error in enabling trip:%d. err:%d\n",
+			sensor_id, threshold->trip, ret);
+		goto set_done;
+	}
+
+set_done:
+	return ret;
+}
+
+static int therm_get_temp(uint32_t id, enum sensor_id_type type, int *temp)
+{
+	int ret = 0;
+	struct tsens_device tsens_dev;
+
+	if (!temp) {
+		pr_err("Invalid value\n");
+		ret = -EINVAL;
+		goto get_temp_exit;
+	}
+
+	switch (type) {
+	case THERM_ZONE_ID:
+		ret = sensor_get_temp(id, temp);
+		if (ret) {
+			pr_err("Unable to read thermal zone sensor:%d\n", id);
+			goto get_temp_exit;
+		}
+		break;
+	case THERM_TSENS_ID:
+		tsens_dev.sensor_num = id;
+		ret = tsens_get_temp(&tsens_dev, temp);
+		if (ret) {
+			pr_err("Unable to read TSENS sensor:%d\n",
+				tsens_dev.sensor_num);
+			goto get_temp_exit;
+		}
+		break;
+	default:
+		pr_err("Invalid type\n");
+		ret = -EINVAL;
+		goto get_temp_exit;
+	}
+
+	if (tsens_scaling_factor)
+		*temp = *temp / tsens_scaling_factor;
+
+get_temp_exit:
+	return ret;
+}
+
+static int msm_thermal_panic_callback(struct notifier_block *nfb,
+			unsigned long event, void *data)
+{
+	int i;
+
+	for (i = 0; i < max_tsens_num; i++) {
+		therm_get_temp(tsens_id_map[i],
+				THERM_TSENS_ID,
+				&tsens_temp_at_panic[i]);
+		if (tsens_temp_print)
+			pr_err("tsens%d temperature:%dC\n",
+				tsens_id_map[i], tsens_temp_at_panic[i]);
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block msm_thermal_panic_notifier = {
+	.notifier_call = msm_thermal_panic_callback,
+};
+
+int sensor_mgr_set_threshold(uint32_t zone_id,
+	struct sensor_threshold *threshold)
+{
+	int i = 0, ret = 0;
+	int temp;
+
+	if (!threshold) {
+		pr_err("Invalid input\n");
+		ret = -EINVAL;
+		goto set_threshold_exit;
+	}
+
+	ret = therm_get_temp(zone_id, THERM_ZONE_ID, &temp);
+	if (ret) {
+		pr_err("Unable to read temperature for zone:%d. err:%d\n",
+			zone_id, ret);
+		goto set_threshold_exit;
+	}
+	pr_debug("Sensor:[%d] temp:[%d]\n", zone_id, temp);
+	while (i < MAX_THRESHOLD) {
+		switch (threshold[i].trip) {
+		case THERMAL_TRIP_CONFIGURABLE_HI:
+			if (threshold[i].temp / tsens_scaling_factor >= temp) {
+				ret = set_and_activate_threshold(zone_id,
+					&threshold[i]);
+				if (ret)
+					goto set_threshold_exit;
+				UPDATE_THRESHOLD_SET(ret,
+					THERMAL_TRIP_CONFIGURABLE_HI);
+			}
+			break;
+		case THERMAL_TRIP_CONFIGURABLE_LOW:
+			if (threshold[i].temp / tsens_scaling_factor <= temp) {
+				ret = set_and_activate_threshold(zone_id,
+					&threshold[i]);
+				if (ret)
+					goto set_threshold_exit;
+				UPDATE_THRESHOLD_SET(ret,
+					THERMAL_TRIP_CONFIGURABLE_LOW);
+			}
+			break;
+		default:
+			pr_err("zone:%u Invalid trip:%d\n", zone_id,
+					threshold[i].trip);
+			break;
+		}
+		i++;
+	}
+set_threshold_exit:
+	return ret;
+}
+
+static int apply_vdd_mx_restriction(void)
+{
+	int ret_mx = 0, ret_cx = 0;
+
+	if (mx_restr_applied)
+		goto done;
+
+	APPLY_VDD_RESTRICTION(vdd_mx, msm_thermal_info.vdd_mx_min, mx, ret_mx);
+	if (vdd_cx)
+		APPLY_VDD_RESTRICTION(vdd_cx, msm_thermal_info.vdd_cx_min,
+			cx, ret_cx);
+	if (!ret_mx && !ret_cx)
+		mx_restr_applied = true;
+
+done:
+	return (ret_mx | ret_cx);
+}
+
+static int remove_vdd_mx_restriction(void)
+{
+	int ret_mx = 0, ret_cx = 0;
+
+	if (!mx_restr_applied)
+		goto done;
+
+	REMOVE_VDD_RESTRICTION(vdd_mx, mx, ret_mx);
+	if (vdd_cx)
+		REMOVE_VDD_RESTRICTION(vdd_cx, cx, ret_cx);
+	if (!ret_mx && !ret_cx)
+		mx_restr_applied = false;
+
+done:
+	return (ret_mx | ret_cx);
+}
+
+static int do_vdd_mx(void)
+{
+	int temp = 0;
+	int ret = 0;
+	int i = 0;
+	int dis_cnt = 0;
+
+	if (!vdd_mx_enabled)
+		return ret;
+
+	mutex_lock(&vdd_mx_mutex);
+	for (i = 0; i < thresh[MSM_VDD_MX_RESTRICTION].thresh_ct; i++) {
+		ret = therm_get_temp(
+			thresh[MSM_VDD_MX_RESTRICTION].thresh_list[i].sensor_id,
+			thresh[MSM_VDD_MX_RESTRICTION].thresh_list[i].id_type,
+			&temp);
+		if (ret) {
+			pr_err("Unable to read TSENS sensor:%d, err:%d\n",
+				thresh[MSM_VDD_MX_RESTRICTION].thresh_list[i].
+					sensor_id, ret);
+			dis_cnt++;
+			continue;
+		}
+		if (temp <=  msm_thermal_info.vdd_mx_temp_degC) {
+			ret = apply_vdd_mx_restriction();
+			if (ret)
+				pr_err(
+				"Failed to apply mx restriction\n");
+			goto exit;
+		} else if (temp >= (msm_thermal_info.vdd_mx_temp_degC +
+				msm_thermal_info.vdd_mx_temp_hyst_degC)) {
+			dis_cnt++;
+		}
+	}
+
+	if ((dis_cnt == thresh[MSM_VDD_MX_RESTRICTION].thresh_ct)) {
+		ret = remove_vdd_mx_restriction();
+		if (ret)
+			pr_err("Failed to remove vdd mx restriction\n");
+	}
+
+exit:
+	mutex_unlock(&vdd_mx_mutex);
+	return ret;
+}
+
+static void vdd_mx_notify(struct therm_threshold *trig_thresh)
+{
+	static uint32_t mx_sens_status;
+	int ret;
+
+	pr_debug("Sensor%d trigger recevied for type %d\n",
+		trig_thresh->sensor_id,
+		trig_thresh->trip_triggered);
+
+	if (!vdd_mx_enabled)
+		return;
+
+	mutex_lock(&vdd_mx_mutex);
+
+	switch (trig_thresh->trip_triggered) {
+	case THERMAL_TRIP_CONFIGURABLE_LOW:
+		mx_sens_status |= BIT(trig_thresh->sensor_id);
+		break;
+	case THERMAL_TRIP_CONFIGURABLE_HI:
+		if (mx_sens_status & BIT(trig_thresh->sensor_id))
+			mx_sens_status ^= BIT(trig_thresh->sensor_id);
+		break;
+	default:
+		pr_err("Unsupported trip type\n");
+		break;
+	}
+
+	if (mx_sens_status) {
+		ret = apply_vdd_mx_restriction();
+		if (ret)
+			pr_err("Failed to apply mx restriction\n");
+	} else if (!mx_sens_status) {
+		ret = remove_vdd_mx_restriction();
+		if (ret)
+			pr_err("Failed to remove vdd mx restriction\n");
+	}
+	mutex_unlock(&vdd_mx_mutex);
+
+	if (trig_thresh->cur_state != trig_thresh->trip_triggered) {
+		sensor_mgr_set_threshold(trig_thresh->sensor_id,
+					trig_thresh->threshold);
+		trig_thresh->cur_state = trig_thresh->trip_triggered;
+	}
+}
+
+static void msm_thermal_bite(int zone_id, int temp)
+{
+	struct scm_desc desc;
+	int tsens_id = 0;
+	int ret = 0;
+
+	ret = zone_id_to_tsen_id(zone_id, &tsens_id);
+	if (ret < 0) {
+		pr_err("Zone:%d reached temperature:%d. Err = %d System reset\n",
+			zone_id, temp, ret);
+	} else {
+		pr_err("Tsens:%d reached temperature:%d. System reset\n",
+			tsens_id, temp);
+	}
+	/* If it is a secure device ignore triggering the thermal bite. */
+	if (!scm_is_secure_device())
+		return;
+	if (!is_scm_armv8()) {
+		scm_call_atomic1(SCM_SVC_BOOT, THERM_SECURE_BITE_CMD, 0);
+	} else {
+		desc.args[0] = 0;
+		desc.arginfo = SCM_ARGS(1);
+		scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_BOOT,
+				 THERM_SECURE_BITE_CMD), &desc);
+	}
+}
+
+static int do_therm_reset(void)
+{
+	int ret = 0, i;
+	int temp = 0;
+
+	if (!therm_reset_enabled)
+		return ret;
+
+	for (i = 0; i < thresh[MSM_THERM_RESET].thresh_ct; i++) {
+		ret = therm_get_temp(
+			thresh[MSM_THERM_RESET].thresh_list[i].sensor_id,
+			thresh[MSM_THERM_RESET].thresh_list[i].id_type,
+			&temp);
+		if (ret) {
+			pr_err("Unable to read TSENS sensor:%d. err:%d\n",
+			thresh[MSM_THERM_RESET].thresh_list[i].sensor_id,
+			ret);
+			continue;
+		}
+
+		if (temp >= msm_thermal_info.therm_reset_temp_degC)
+			msm_thermal_bite(
+			thresh[MSM_THERM_RESET].thresh_list[i].sensor_id, temp);
+	}
+
+	return ret;
+}
+
+static void therm_reset_notify(struct therm_threshold *thresh_data)
+{
+	int temp = 0;
+	int ret = 0;
+
+	if (!therm_reset_enabled)
+		return;
+
+	if (!thresh_data) {
+		pr_err("Invalid input\n");
+		return;
+	}
+
+	switch (thresh_data->trip_triggered) {
+	case THERMAL_TRIP_CONFIGURABLE_HI:
+		ret = therm_get_temp(thresh_data->sensor_id,
+				thresh_data->id_type, &temp);
+		if (ret)
+			pr_err("Unable to read TSENS sensor:%d. err:%d\n",
+				thresh_data->sensor_id, ret);
+		msm_thermal_bite(thresh_data->sensor_id, temp);
+		break;
+	case THERMAL_TRIP_CONFIGURABLE_LOW:
+		break;
+	default:
+		pr_err("Invalid trip type\n");
+		break;
+	}
+	sensor_mgr_set_threshold(thresh_data->sensor_id,
+					thresh_data->threshold);
+}
+
+static void cxip_lm_therm_vote_apply(bool vote)
+{
+	static bool prev_vote;
+
+	if (prev_vote == vote)
+		return;
+
+	prev_vote = vote;
+	writel_relaxed(CXIP_LM_THERM_VOTE_VAL,
+		cxip_lm_reg_base +
+		(vote ? CXIP_LM_VOTE_SET : CXIP_LM_VOTE_CLEAR));
+
+	pr_debug("%s vote for cxip_lm. Agg.vote:0x%x\n",
+		vote ? "Applied" : "Cleared", CXIP_LM_CLIENTS_STATUS());
+}
+
+static int do_cxip_lm(void)
+{
+	int temp = 0, ret = 0;
+
+	if (!cxip_lm_enabled)
+		return ret;
+
+	ret = therm_get_temp(
+		thresh[MSM_THERM_CXIP_LM].thresh_list->sensor_id,
+		thresh[MSM_THERM_CXIP_LM].thresh_list->id_type,
+		&temp);
+	if (ret) {
+		pr_err("Unable to read TSENS sensor:%d, err:%d\n",
+			thresh[MSM_THERM_CXIP_LM].thresh_list->sensor_id, ret);
+		return ret;
+	}
+
+	if (temp >= CXIP_LM_THERM_SENS_HIGH)
+		cxip_lm_therm_vote_apply(true);
+	else if (temp <= CXIP_LM_THERM_SENS_LOW)
+		cxip_lm_therm_vote_apply(false);
+
+	return ret;
+}
+
+static void therm_cxip_lm_notify(struct therm_threshold *trig_thresh)
+{
+	if (!cxip_lm_enabled)
+		return;
+
+	if (!trig_thresh) {
+		pr_err("Invalid input\n");
+		return;
+	}
+
+	switch (trig_thresh->trip_triggered) {
+	case THERMAL_TRIP_CONFIGURABLE_HI:
+		cxip_lm_therm_vote_apply(true);
+		break;
+	case THERMAL_TRIP_CONFIGURABLE_LOW:
+		cxip_lm_therm_vote_apply(false);
+		break;
+	default:
+		pr_err("Invalid trip type\n");
+		break;
+	}
+
+	if (trig_thresh->cur_state != trig_thresh->trip_triggered) {
+		sensor_mgr_set_threshold(trig_thresh->sensor_id,
+					trig_thresh->threshold);
+		trig_thresh->cur_state = trig_thresh->trip_triggered;
+	}
+}
+
+static void retry_hotplug(struct work_struct *work)
+{
+	mutex_lock(&core_control_mutex);
+	if (retry_in_progress) {
+		pr_debug("Retrying hotplug\n");
+		retry_in_progress = false;
+		complete(&hotplug_notify_complete);
+	}
+	mutex_unlock(&core_control_mutex);
+}
+
+#ifdef CONFIG_SMP
+static void __ref do_core_control(int temp)
+{
+	int i = 0;
+	int ret = 0;
+	struct device *cpu_dev = NULL;
+
+	if (!core_control_enabled)
+		return;
+
+	mutex_lock(&core_control_mutex);
+	if (msm_thermal_info.core_control_mask &&
+		temp >= msm_thermal_info.core_limit_temp_degC) {
+		for (i = num_possible_cpus(); i > 0; i--) {
+			if (!(msm_thermal_info.core_control_mask & BIT(i)))
+				continue;
+			if (cpus_offlined & BIT(i) && !cpu_online(i))
+				continue;
+			pr_info("Set Offline: CPU%d Temp: %d\n",
+					i, temp);
+			lock_device_hotplug();
+			if (cpu_online(i)) {
+				cpu_dev = get_cpu_device(i);
+				trace_thermal_pre_core_offline(i);
+				ret = device_offline(cpu_dev);
+				if (ret < 0)
+					pr_err("Error %d offline core %d\n",
+					       ret, i);
+				trace_thermal_post_core_offline(i,
+					cpumask_test_cpu(i, cpu_online_mask));
+			}
+			unlock_device_hotplug();
+			cpus_offlined |= BIT(i);
+			break;
+		}
+	} else if (msm_thermal_info.core_control_mask && cpus_offlined &&
+		temp <= (msm_thermal_info.core_limit_temp_degC -
+			msm_thermal_info.core_temp_hysteresis_degC)) {
+		for (i = 0; i < num_possible_cpus(); i++) {
+			if (!(cpus_offlined & BIT(i)))
+				continue;
+			cpus_offlined &= ~BIT(i);
+			pr_info("Allow Online CPU%d Temp: %d\n",
+					i, temp);
+			/*
+			 * If this core is already online, then bring up the
+			 * next offlined core.
+			 */
+			lock_device_hotplug();
+			if (cpu_online(i)) {
+				unlock_device_hotplug();
+				continue;
+			}
+			/* If this core wasn't previously online don't put it
+			   online */
+			if (!(cpumask_test_cpu(i, cpus_previously_online))) {
+				unlock_device_hotplug();
+				continue;
+			}
+			cpu_dev = get_cpu_device(i);
+			trace_thermal_pre_core_online(i);
+			ret = device_online(cpu_dev);
+			if (ret)
+				pr_err("Error %d online core %d\n",
+						ret, i);
+			trace_thermal_post_core_online(i,
+				cpumask_test_cpu(i, cpu_online_mask));
+			unlock_device_hotplug();
+			break;
+		}
+	}
+	mutex_unlock(&core_control_mutex);
+}
+/* Call with core_control_mutex locked */
+static int __ref update_offline_cores(int val)
+{
+	uint32_t cpu = 0;
+	int ret = 0;
+	uint32_t previous_cpus_offlined = 0;
+	bool pend_hotplug_req = false;
+	struct device *cpu_dev = NULL;
+
+	if (!core_control_enabled)
+		return 0;
+
+	previous_cpus_offlined = cpus_offlined;
+	cpus_offlined = msm_thermal_info.core_control_mask & val;
+
+	for_each_possible_cpu(cpu) {
+		if (cpus_offlined & BIT(cpu)) {
+			lock_device_hotplug();
+			if (!cpu_online(cpu)) {
+				unlock_device_hotplug();
+				continue;
+			}
+			cpu_dev = get_cpu_device(cpu);
+			trace_thermal_pre_core_offline(cpu);
+			ret = device_offline(cpu_dev);
+			if (ret < 0) {
+				cpus_offlined &= ~BIT(cpu);
+				pr_err_ratelimited(
+					"Unable to offline CPU%d. err:%d\n",
+					cpu, ret);
+				pend_hotplug_req = true;
+			} else {
+				pr_debug("Offlined CPU%d\n", cpu);
+			}
+			trace_thermal_post_core_offline(cpu,
+				cpumask_test_cpu(cpu, cpu_online_mask));
+			unlock_device_hotplug();
+		} else if (online_core && (previous_cpus_offlined & BIT(cpu))) {
+			lock_device_hotplug();
+			if (cpu_online(cpu)) {
+				unlock_device_hotplug();
+				continue;
+			}
+			/* If this core wasn't previously online don't put it
+			   online */
+			if (!(cpumask_test_cpu(cpu, cpus_previously_online))) {
+				unlock_device_hotplug();
+				continue;
+			}
+			cpu_dev = get_cpu_device(cpu);
+			trace_thermal_pre_core_online(cpu);
+			ret = device_online(cpu_dev);
+			if (ret && ret == notifier_to_errno(NOTIFY_BAD)) {
+				pr_debug("Onlining CPU%d is vetoed\n", cpu);
+			} else if (ret) {
+				cpus_offlined |= BIT(cpu);
+				pend_hotplug_req = true;
+				pr_err_ratelimited(
+					"Unable to online CPU%d. err:%d\n",
+					cpu, ret);
+			} else {
+				pr_debug("Onlined CPU%d\n", cpu);
+				trace_thermal_post_core_online(cpu,
+					cpumask_test_cpu(cpu, cpu_online_mask));
+			}
+			unlock_device_hotplug();
+		}
+	}
+
+	if (pend_hotplug_req && !in_suspend && !retry_in_progress) {
+		retry_in_progress = true;
+		schedule_delayed_work(&retry_hotplug_work,
+			msecs_to_jiffies(HOTPLUG_RETRY_INTERVAL_MS));
+	}
+
+	return ret;
+}
+
+static __ref int do_hotplug(void *data)
+{
+	int ret = 0;
+	uint32_t cpu = 0, mask = 0;
+	struct device_clnt_data *clnt = NULL;
+	struct sched_param param = {.sched_priority = MAX_RT_PRIO-2};
+
+	if (!core_control_enabled) {
+		pr_debug("Core control disabled\n");
+		return -EINVAL;
+	}
+
+	sched_setscheduler(current, SCHED_FIFO, &param);
+	while (!kthread_should_stop()) {
+		while (wait_for_completion_interruptible(
+			&hotplug_notify_complete) != 0)
+			;
+		reinit_completion(&hotplug_notify_complete);
+
+		/*
+		 * Suspend framework will have disabled the
+		 * hotplug functionality. So wait till the suspend exits
+		 * and then re-evaluate.
+		 */
+		if (in_suspend)
+			continue;
+		mask = 0;
+
+		mutex_lock(&core_control_mutex);
+		for_each_possible_cpu(cpu) {
+			if (hotplug_enabled &&
+				cpus[cpu].hotplug_thresh_clear) {
+				ret =
+				sensor_mgr_set_threshold(cpus[cpu].sensor_id,
+				&cpus[cpu].threshold[HOTPLUG_THRESHOLD_HIGH]);
+
+				if (cpus[cpu].offline
+					&& !IS_LOW_THRESHOLD_SET(ret))
+					cpus[cpu].offline = 0;
+				cpus[cpu].hotplug_thresh_clear = false;
+			}
+			if (cpus[cpu].offline || cpus[cpu].user_offline)
+				mask |= BIT(cpu);
+		}
+		if (devices && devices->hotplug_dev) {
+			mutex_lock(&devices->hotplug_dev->clnt_lock);
+			for_each_cpu(cpu,
+				&devices->hotplug_dev->active_req.offline_mask)
+				mask |= BIT(cpu);
+			mutex_unlock(&devices->hotplug_dev->clnt_lock);
+		}
+		if (mask != cpus_offlined)
+			update_offline_cores(mask);
+		mutex_unlock(&core_control_mutex);
+
+		if (devices && devices->hotplug_dev) {
+			union device_request req;
+
+			req.offline_mask = CPU_MASK_NONE;
+			mutex_lock(&devices->hotplug_dev->clnt_lock);
+			for_each_cpu(cpu,
+				&devices->hotplug_dev->active_req.offline_mask)
+				if (mask & BIT(cpu))
+					cpumask_test_and_set_cpu(cpu,
+						&req.offline_mask);
+
+			list_for_each_entry(clnt,
+					&devices->hotplug_dev->client_list,
+					clnt_ptr) {
+				if (clnt->callback)
+					clnt->callback(clnt, &req,
+							clnt->usr_data);
+			}
+			mutex_unlock(&devices->hotplug_dev->clnt_lock);
+		}
+		sysfs_notify(cc_kobj, NULL, "cpus_offlined");
+	}
+
+	return ret;
+}
+#else
+static void __ref do_core_control(int temp)
+{
+	return;
+}
+
+static __ref int do_hotplug(void *data)
+{
+	return 0;
+}
+
+static int __ref update_offline_cores(int val)
+{
+	return 0;
+}
+#endif
+
+static int do_gfx_phase_cond(void)
+{
+	int temp = 0;
+	int ret = 0;
+	uint32_t new_req_band = curr_gfx_band;
+
+	if (!gfx_warm_phase_ctrl_enabled && !gfx_crit_phase_ctrl_enabled)
+		return ret;
+
+	mutex_lock(&gfx_mutex);
+	if (gfx_warm_phase_ctrl_enabled) {
+		ret = therm_get_temp(
+			thresh[MSM_GFX_PHASE_CTRL_WARM].thresh_list->sensor_id,
+			thresh[MSM_GFX_PHASE_CTRL_WARM].thresh_list->id_type,
+			&temp);
+		if (ret) {
+			pr_err("Unable to read TSENS sensor:%d. err:%d\n",
+			thresh[MSM_GFX_PHASE_CTRL_WARM].thresh_list->sensor_id,
+			ret);
+			goto gfx_phase_cond_exit;
+		}
+	} else {
+		ret = therm_get_temp(
+			thresh[MSM_GFX_PHASE_CTRL_HOT].thresh_list->sensor_id,
+			thresh[MSM_GFX_PHASE_CTRL_HOT].thresh_list->id_type,
+			&temp);
+		if (ret) {
+			pr_err("Unable to read TSENS sensor:%d. err:%d\n",
+			thresh[MSM_GFX_PHASE_CTRL_HOT].thresh_list->sensor_id,
+			ret);
+			goto gfx_phase_cond_exit;
+		}
+	}
+
+	switch (curr_gfx_band) {
+	case MSM_HOT_CRITICAL:
+		if (temp < (msm_thermal_info.gfx_phase_hot_temp_degC -
+			msm_thermal_info.gfx_phase_hot_temp_hyst_degC))
+			new_req_band = MSM_WARM;
+		break;
+	case MSM_WARM:
+		if (temp >= msm_thermal_info.gfx_phase_hot_temp_degC)
+			new_req_band = MSM_HOT_CRITICAL;
+		else if (temp < (msm_thermal_info.gfx_phase_warm_temp_degC -
+			msm_thermal_info.gfx_phase_warm_temp_hyst_degC))
+			new_req_band = MSM_NORMAL;
+		break;
+	case MSM_NORMAL:
+		if (temp >= msm_thermal_info.gfx_phase_warm_temp_degC)
+			new_req_band = MSM_WARM;
+		break;
+	default:
+		if (temp >= msm_thermal_info.gfx_phase_hot_temp_degC)
+			new_req_band = MSM_HOT_CRITICAL;
+		else if (temp >= msm_thermal_info.gfx_phase_warm_temp_degC)
+			new_req_band = MSM_WARM;
+		else
+			new_req_band = MSM_NORMAL;
+		break;
+	}
+
+	if (new_req_band != curr_gfx_band) {
+		ret = send_temperature_band(MSM_GFX_PHASE_CTRL, new_req_band);
+		if (!ret) {
+			pr_debug("Reached %d band. Temp:%d\n", new_req_band,
+					temp);
+			curr_gfx_band = new_req_band;
+		} else {
+			pr_err("Error sending temp. band:%d. Temp:%d. err:%d",
+					new_req_band, temp, ret);
+		}
+	}
+
+gfx_phase_cond_exit:
+	mutex_unlock(&gfx_mutex);
+	return ret;
+}
+
+static int do_cx_phase_cond(void)
+{
+	int temp = 0;
+	int i, ret = 0, dis_cnt = 0;
+
+	if (!cx_phase_ctrl_enabled)
+		return ret;
+
+	mutex_lock(&cx_mutex);
+	for (i = 0; i < thresh[MSM_CX_PHASE_CTRL_HOT].thresh_ct; i++) {
+		ret = therm_get_temp(
+			thresh[MSM_CX_PHASE_CTRL_HOT].thresh_list[i].sensor_id,
+			thresh[MSM_CX_PHASE_CTRL_HOT].thresh_list[i].id_type,
+			&temp);
+		if (ret) {
+			pr_err("Unable to read TSENS sensor:%d. err:%d\n",
+			thresh[MSM_CX_PHASE_CTRL_HOT].thresh_list[i].sensor_id,
+			ret);
+			dis_cnt++;
+			continue;
+		}
+
+		if (temp >=  msm_thermal_info.cx_phase_hot_temp_degC) {
+			if (curr_cx_band != MSM_HOT_CRITICAL) {
+				ret = send_temperature_band(MSM_CX_PHASE_CTRL,
+					MSM_HOT_CRITICAL);
+				if (!ret) {
+					pr_debug("band:HOT_CRITICAL Temp:%d\n",
+							temp);
+					curr_cx_band = MSM_HOT_CRITICAL;
+				} else {
+					pr_err("Error %d sending HOT_CRITICAL",
+							ret);
+				}
+			}
+			goto cx_phase_cond_exit;
+		} else if (temp < (msm_thermal_info.cx_phase_hot_temp_degC -
+			msm_thermal_info.cx_phase_hot_temp_hyst_degC))
+			dis_cnt++;
+	}
+	if (dis_cnt == max_tsens_num && curr_cx_band != MSM_WARM) {
+		ret = send_temperature_band(MSM_CX_PHASE_CTRL, MSM_WARM);
+		if (!ret) {
+			pr_debug("band:WARM Temp:%d\n", temp);
+			curr_cx_band = MSM_WARM;
+		} else {
+			pr_err("Error sending WARM temp band. err:%d",
+					ret);
+		}
+	}
+cx_phase_cond_exit:
+	mutex_unlock(&cx_mutex);
+	return ret;
+}
+
+static int do_ocr(void)
+{
+	int temp = 0;
+	int ret = 0;
+	int i = 0, j = 0;
+	int pfm_cnt = 0;
+
+	if (!ocr_enabled)
+		return ret;
+
+	mutex_lock(&ocr_mutex);
+	for (i = 0; i < thresh[MSM_OCR].thresh_ct; i++) {
+		ret = therm_get_temp(
+			thresh[MSM_OCR].thresh_list[i].sensor_id,
+			thresh[MSM_OCR].thresh_list[i].id_type,
+			&temp);
+		if (ret) {
+			pr_err("Unable to read TSENS sensor %d. err:%d\n",
+			thresh[MSM_OCR].thresh_list[i].sensor_id,
+			ret);
+			pfm_cnt++;
+			continue;
+		}
+
+		if (temp > msm_thermal_info.ocr_temp_degC) {
+			if (ocr_rails[0].init != OPTIMUM_CURRENT_NR)
+				for (j = 0; j < ocr_rail_cnt; j++)
+					ocr_rails[j].init = OPTIMUM_CURRENT_NR;
+			ret = ocr_set_mode_all(OPTIMUM_CURRENT_MAX);
+			if (ret)
+				pr_err("Error setting max ocr. err:%d\n",
+					ret);
+			else
+				pr_debug("Requested MAX OCR. tsens:%d Temp:%d",
+				thresh[MSM_OCR].thresh_list[i].sensor_id, temp);
+			goto do_ocr_exit;
+		} else if (temp <= (msm_thermal_info.ocr_temp_degC -
+			msm_thermal_info.ocr_temp_hyst_degC))
+			pfm_cnt++;
+	}
+
+	if (pfm_cnt == thresh[MSM_OCR].thresh_ct ||
+		ocr_rails[0].init != OPTIMUM_CURRENT_NR) {
+		/* 'init' not equal to OPTIMUM_CURRENT_NR means this is the
+		** first polling iteration after device probe. During first
+		** iteration, if temperature is less than the set point, clear
+		** the max current request made and reset the 'init'.
+		*/
+		if (ocr_rails[0].init != OPTIMUM_CURRENT_NR)
+			for (j = 0; j < ocr_rail_cnt; j++)
+				ocr_rails[j].init = OPTIMUM_CURRENT_NR;
+		ret = ocr_set_mode_all(OPTIMUM_CURRENT_MIN);
+		if (ret) {
+			pr_err("Error setting min ocr. err:%d\n",
+				ret);
+			goto do_ocr_exit;
+		} else {
+			pr_debug("Requested MIN OCR. Temp:%d", temp);
+		}
+	}
+do_ocr_exit:
+	mutex_unlock(&ocr_mutex);
+	return ret;
+}
+
+static int do_vdd_restriction(void)
+{
+	int temp = 0;
+	int ret = 0;
+	int i = 0;
+	int dis_cnt = 0;
+
+	if (!vdd_rstr_enabled)
+		return ret;
+
+	if (usefreq && !freq_table_get) {
+		if (check_freq_table() && !core_ptr)
+			return ret;
+	}
+	mutex_lock(&vdd_rstr_mutex);
+	for (i = 0; i < thresh[MSM_VDD_RESTRICTION].thresh_ct; i++) {
+		ret = therm_get_temp(
+			thresh[MSM_VDD_RESTRICTION].thresh_list[i].sensor_id,
+			thresh[MSM_VDD_RESTRICTION].thresh_list[i].id_type,
+			&temp);
+		if (ret) {
+			pr_err("Unable to read TSENS sensor:%d. err:%d\n",
+			thresh[MSM_VDD_RESTRICTION].thresh_list[i].sensor_id,
+			ret);
+			dis_cnt++;
+			continue;
+		}
+		if (temp <=  msm_thermal_info.vdd_rstr_temp_degC) {
+			ret = vdd_restriction_apply_all(1);
+			if (ret) {
+				pr_err( \
+				"Enable vdd rstr for all failed. err:%d\n",
+					ret);
+				goto exit;
+			}
+			pr_debug("Enabled Vdd Restriction tsens:%d. Temp:%d\n",
+			thresh[MSM_VDD_RESTRICTION].thresh_list[i].sensor_id,
+			temp);
+			goto exit;
+		} else if (temp > msm_thermal_info.vdd_rstr_temp_hyst_degC)
+			dis_cnt++;
+	}
+	if (dis_cnt == max_tsens_num) {
+		ret = vdd_restriction_apply_all(0);
+		if (ret) {
+			pr_err("Disable vdd rstr for all failed. err:%d\n",
+					ret);
+			goto exit;
+		}
+		pr_debug("Disabled Vdd Restriction\n");
+	}
+exit:
+	mutex_unlock(&vdd_rstr_mutex);
+	return ret;
+}
+
+static int do_psm(void)
+{
+	int temp = 0;
+	int ret = 0;
+	int i = 0;
+	int auto_cnt = 0;
+
+	if (!psm_enabled)
+		return ret;
+
+	mutex_lock(&psm_mutex);
+	for (i = 0; i < max_tsens_num; i++) {
+		ret = therm_get_temp(tsens_id_map[i], THERM_TSENS_ID, &temp);
+		if (ret) {
+			pr_err("Unable to read TSENS sensor:%d. err:%d\n",
+					tsens_id_map[i], ret);
+			auto_cnt++;
+			continue;
+		}
+
+		/*
+		 * As long as one sensor is above the threshold, set PWM mode
+		 * on all rails, and loop stops. Set auto mode when all rails
+		 * are below thershold
+		 */
+		if (temp >  msm_thermal_info.psm_temp_degC) {
+			ret = psm_set_mode_all(PMIC_PWM_MODE);
+			if (ret) {
+				pr_err("Set pwm mode for all failed. err:%d\n",
+						ret);
+				goto exit;
+			}
+			pr_debug("Requested PMIC PWM Mode tsens:%d. Temp:%d\n",
+					tsens_id_map[i], temp);
+			break;
+		} else if (temp <= msm_thermal_info.psm_temp_hyst_degC)
+			auto_cnt++;
+	}
+
+	if (auto_cnt == max_tsens_num) {
+		ret = psm_set_mode_all(PMIC_AUTO_MODE);
+		if (ret) {
+			pr_err("Set auto mode for all failed. err:%d\n", ret);
+			goto exit;
+		}
+		pr_debug("Requested PMIC AUTO Mode\n");
+	}
+
+exit:
+	mutex_unlock(&psm_mutex);
+	return ret;
+}
+
+static void do_freq_control(int temp)
+{
+	uint32_t cpu = 0;
+	uint32_t max_freq = cpus[cpu].limited_max_freq;
+
+	if (!boot_freq_mitig_enabled)
+		return;
+	if (core_ptr)
+		return do_cluster_freq_ctrl(temp);
+	if (!freq_table_get)
+		return;
+
+	if (temp >= msm_thermal_info.limit_temp_degC) {
+		if (limit_idx == limit_idx_low)
+			return;
+
+		limit_idx -= msm_thermal_info.bootup_freq_step;
+		if (limit_idx < limit_idx_low)
+			limit_idx = limit_idx_low;
+	} else if (temp < msm_thermal_info.limit_temp_degC -
+		 msm_thermal_info.temp_hysteresis_degC) {
+		if (limit_idx == limit_idx_high)
+			return;
+
+		limit_idx += msm_thermal_info.bootup_freq_step;
+		if (limit_idx >= limit_idx_high)
+			limit_idx = limit_idx_high;
+	}
+
+	/* Update new limits */
+	get_online_cpus();
+	max_freq = table[limit_idx].frequency;
+	if (max_freq == cpus[cpu].limited_max_freq) {
+		put_online_cpus();
+		return;
+	}
+
+	for_each_possible_cpu(cpu) {
+		if (!(msm_thermal_info.bootup_freq_control_mask & BIT(cpu)))
+			continue;
+		pr_info("Limiting CPU%d max frequency to %u. Temp:%d\n",
+			cpu, max_freq, temp);
+		cpus[cpu].limited_max_freq =
+				min(max_freq, cpus[cpu].vdd_max_freq);
+	}
+	update_cluster_freq();
+	put_online_cpus();
+}
+
+static void check_temp(struct work_struct *work)
+{
+	int temp = 0;
+	int ret = 0;
+
+	do_therm_reset();
+
+	ret = therm_get_temp(msm_thermal_info.sensor_id, THERM_TSENS_ID, &temp);
+	if (ret) {
+		pr_err("Unable to read TSENS sensor:%d. err:%d\n",
+				msm_thermal_info.sensor_id, ret);
+		goto reschedule;
+	}
+	do_core_control(temp);
+	do_cxip_lm();
+	do_vdd_mx();
+	do_psm();
+	do_gfx_phase_cond();
+	do_cx_phase_cond();
+	do_ocr();
+
+	/*
+	** All mitigation involving CPU frequency should be
+	** placed below this check. The mitigation following this
+	** frequency table check, should be able to handle the failure case.
+	*/
+	if (!freq_table_get)
+		check_freq_table();
+
+	do_vdd_restriction();
+	do_freq_control(temp);
+
+reschedule:
+	if (polling_enabled)
+		schedule_delayed_work(&check_temp_work,
+				msecs_to_jiffies(msm_thermal_info.poll_ms));
+}
+
+static int __ref msm_thermal_cpu_callback(struct notifier_block *nfb,
+		unsigned long action, void *hcpu)
+{
+	uint32_t cpu = (uintptr_t)hcpu;
+
+	switch (action & ~CPU_TASKS_FROZEN) {
+	case CPU_UP_PREPARE:
+		if (!cpumask_test_and_set_cpu(cpu, cpus_previously_online))
+			pr_debug("Total prev cores online tracked %u\n",
+				cpumask_weight(cpus_previously_online));
+		if (core_control_enabled &&
+			(msm_thermal_info.core_control_mask & BIT(cpu)) &&
+			(cpus_offlined & BIT(cpu))) {
+			pr_debug("Preventing CPU%d from coming online.\n",
+				cpu);
+			return NOTIFY_BAD;
+		}
+		break;
+	case CPU_DOWN_PREPARE:
+		if (!cpumask_test_and_set_cpu(cpu, cpus_previously_online))
+			pr_debug("Total prev cores online tracked %u\n",
+				cpumask_weight(cpus_previously_online));
+		break;
+	case CPU_ONLINE:
+		if (core_control_enabled &&
+			(msm_thermal_info.core_control_mask & BIT(cpu)) &&
+			(cpus_offlined & BIT(cpu))) {
+			if (hotplug_task) {
+				pr_debug("Re-evaluate and hotplug CPU%d\n",
+					cpu);
+				complete(&hotplug_notify_complete);
+			} else {
+				/*
+				 * This will be auto-corrected next time
+				 * do_core_control() is called
+				 */
+				pr_err("CPU%d online, after thermal veto\n",
+					cpu);
+			}
+		}
+		break;
+	default:
+		break;
+	}
+
+	pr_debug("voting for CPU%d to be online\n", cpu);
+	return NOTIFY_OK;
+}
+
+static struct notifier_block __refdata msm_thermal_cpu_notifier = {
+	.notifier_call = msm_thermal_cpu_callback,
+};
+static int hotplug_notify(enum thermal_trip_type type, int temp, void *data)
+{
+	struct cpu_info *cpu_node = (struct cpu_info *)data;
+
+	pr_info_ratelimited("%s reach temp threshold: %d\n",
+			       cpu_node->sensor_type, temp);
+
+	if (!(msm_thermal_info.core_control_mask & BIT(cpu_node->cpu)))
+		return 0;
+	switch (type) {
+	case THERMAL_TRIP_CONFIGURABLE_HI:
+		if (!(cpu_node->offline))
+			cpu_node->offline = 1;
+		break;
+	case THERMAL_TRIP_CONFIGURABLE_LOW:
+		if (cpu_node->offline)
+			cpu_node->offline = 0;
+		break;
+	default:
+		break;
+	}
+	if (hotplug_task) {
+		cpu_node->hotplug_thresh_clear = true;
+		complete(&hotplug_notify_complete);
+	} else
+		pr_err("Hotplug task is not initialized\n");
+	return 0;
+}
+/* Adjust cpus offlined bit based on temperature reading. */
+static int hotplug_init_cpu_offlined(void)
+{
+	int temp = 0;
+	uint32_t cpu = 0;
+
+	if (!hotplug_enabled || !hotplug_task)
+		return 0;
+
+	mutex_lock(&core_control_mutex);
+	for_each_possible_cpu(cpu) {
+		if (!(msm_thermal_info.core_control_mask & BIT(cpus[cpu].cpu)))
+			continue;
+		if (therm_get_temp(cpus[cpu].sensor_id, cpus[cpu].id_type,
+					&temp)) {
+			pr_err("Unable to read TSENS sensor:%d.\n",
+				cpus[cpu].sensor_id);
+			mutex_unlock(&core_control_mutex);
+			return -EINVAL;
+		}
+
+		if (temp >= msm_thermal_info.hotplug_temp_degC)
+			cpus[cpu].offline = 1;
+		else
+			cpus[cpu].offline = 0;
+	}
+	mutex_unlock(&core_control_mutex);
+
+	if (hotplug_task)
+		complete(&hotplug_notify_complete);
+	else {
+		pr_err("Hotplug task is not initialized\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void hotplug_init(void)
+{
+	uint32_t cpu = 0;
+	struct sensor_threshold *hi_thresh = NULL, *low_thresh = NULL;
+
+	if (hotplug_task)
+		return;
+
+	if (!hotplug_enabled)
+		goto init_kthread;
+
+	for_each_possible_cpu(cpu) {
+		cpus[cpu].sensor_id =
+			sensor_get_id((char *)cpus[cpu].sensor_type);
+		cpus[cpu].id_type = THERM_ZONE_ID;
+		if (!(msm_thermal_info.core_control_mask & BIT(cpus[cpu].cpu)))
+			continue;
+
+		hi_thresh = &cpus[cpu].threshold[HOTPLUG_THRESHOLD_HIGH];
+		low_thresh = &cpus[cpu].threshold[HOTPLUG_THRESHOLD_LOW];
+		hi_thresh->temp = (msm_thermal_info.hotplug_temp_degC)
+				* tsens_scaling_factor;
+		hi_thresh->trip = THERMAL_TRIP_CONFIGURABLE_HI;
+		low_thresh->temp = (msm_thermal_info.hotplug_temp_degC -
+				msm_thermal_info.hotplug_temp_hysteresis_degC)
+				* tsens_scaling_factor;
+		low_thresh->trip = THERMAL_TRIP_CONFIGURABLE_LOW;
+		hi_thresh->notify = low_thresh->notify = hotplug_notify;
+		hi_thresh->data = low_thresh->data = (void *)&cpus[cpu];
+
+		sensor_mgr_set_threshold(cpus[cpu].sensor_id, hi_thresh);
+	}
+init_kthread:
+	init_completion(&hotplug_notify_complete);
+	hotplug_task = kthread_run(do_hotplug, NULL, "msm_thermal:hotplug");
+	if (IS_ERR(hotplug_task)) {
+		pr_err("Failed to create do_hotplug thread. err:%ld\n",
+				PTR_ERR(hotplug_task));
+		return;
+	}
+	/*
+	 * Adjust cpus offlined bit when hotplug intitializes so that the new
+	 * cpus offlined state is based on hotplug threshold range
+	 */
+	if (hotplug_init_cpu_offlined())
+		kthread_stop(hotplug_task);
+}
+
+static __ref int do_freq_mitigation(void *data)
+{
+	int ret = 0;
+	uint32_t cpu = 0, max_freq_req = 0, min_freq_req = 0;
+	struct sched_param param = {.sched_priority = MAX_RT_PRIO-1};
+	struct device_clnt_data *clnt = NULL;
+	struct device_manager_data *cpu_dev = NULL;
+	uint32_t changed;
+
+	sched_setscheduler(current, SCHED_FIFO, &param);
+	while (!kthread_should_stop()) {
+		while (wait_for_completion_interruptible(
+			&freq_mitigation_complete) != 0)
+			;
+		reinit_completion(&freq_mitigation_complete);
+
+		for_each_possible_cpu(cpu) {
+			max_freq_req = (cpus[cpu].max_freq) ?
+					msm_thermal_info.freq_limit :
+					UINT_MAX;
+			max_freq_req = min(max_freq_req,
+					cpus[cpu].user_max_freq);
+
+			max_freq_req = min(max_freq_req,
+					cpus[cpu].shutdown_max_freq);
+
+			max_freq_req = min(max_freq_req,
+					cpus[cpu].suspend_max_freq);
+
+			if (devices && devices->cpufreq_dev[cpu]) {
+				cpu_dev = devices->cpufreq_dev[cpu];
+				mutex_lock(&cpu_dev->clnt_lock);
+				max_freq_req = min(max_freq_req,
+					cpu_dev->active_req.freq.max_freq);
+				min_freq_req =
+					cpu_dev->active_req.freq.min_freq;
+				mutex_unlock(&cpu_dev->clnt_lock);
+			}
+
+			if ((max_freq_req == cpus[cpu].limited_max_freq)
+				&& (min_freq_req ==
+				cpus[cpu].limited_min_freq))
+				goto reset_threshold;
+
+			changed = 0;
+			if (max_freq_req != cpus[cpu].limited_max_freq)
+				changed |= FREQ_LIMIT_MAX;
+			if (min_freq_req != cpus[cpu].limited_min_freq)
+				changed |= FREQ_LIMIT_MIN;
+
+			cpus[cpu].limited_max_freq = max_freq_req;
+			cpus[cpu].limited_min_freq = min_freq_req;
+reset_threshold:
+			if (devices && devices->cpufreq_dev[cpu]) {
+				union device_request req;
+
+				req.freq.max_freq = max_freq_req;
+				req.freq.min_freq = min_freq_req;
+				cpu_dev = devices->cpufreq_dev[cpu];
+				mutex_lock(&cpu_dev->clnt_lock);
+				list_for_each_entry(clnt,
+					&cpu_dev->client_list,
+					clnt_ptr) {
+					if (clnt->callback)
+						clnt->callback(clnt,
+							&req,
+							clnt->usr_data);
+				}
+				mutex_unlock(&cpu_dev->clnt_lock);
+			}
+			if (freq_mitigation_enabled &&
+				cpus[cpu].freq_thresh_clear) {
+				ret =
+				sensor_mgr_set_threshold(cpus[cpu].sensor_id,
+				&cpus[cpu].threshold[FREQ_THRESHOLD_HIGH]);
+
+				if (cpus[cpu].max_freq
+					&& !IS_LOW_THRESHOLD_SET(ret)) {
+					cpus[cpu].max_freq = false;
+					complete(&freq_mitigation_complete);
+				}
+				cpus[cpu].freq_thresh_clear = false;
+			}
+		}
+		update_cluster_freq();
+	}
+	return ret;
+}
+
+static int freq_mitigation_notify(enum thermal_trip_type type,
+	int temp, void *data)
+{
+	struct cpu_info *cpu_node = (struct cpu_info *) data;
+
+	pr_debug("%s reached temp threshold: %d\n",
+		cpu_node->sensor_type, temp);
+
+	if (!(msm_thermal_info.freq_mitig_control_mask &
+		BIT(cpu_node->cpu)))
+		return 0;
+
+	switch (type) {
+	case THERMAL_TRIP_CONFIGURABLE_HI:
+		if (!cpu_node->max_freq) {
+			pr_info_ratelimited(
+				"Mitigating CPU%d frequency to %d\n",
+				cpu_node->cpu, msm_thermal_info.freq_limit);
+
+			cpu_node->max_freq = true;
+		}
+		break;
+	case THERMAL_TRIP_CONFIGURABLE_LOW:
+		if (cpu_node->max_freq) {
+			pr_info_ratelimited(
+				"Removing frequency mitigation for CPU%d\n",
+				cpu_node->cpu);
+
+			cpu_node->max_freq = false;
+		}
+		break;
+	default:
+		break;
+	}
+
+	if (freq_mitigation_task) {
+		cpu_node->freq_thresh_clear = true;
+		complete(&freq_mitigation_complete);
+	} else {
+		pr_err("Frequency mitigation task is not initialized\n");
+	}
+
+	return 0;
+}
+
+static void freq_mitigation_init(void)
+{
+	uint32_t cpu = 0;
+	struct sensor_threshold *hi_thresh = NULL, *low_thresh = NULL;
+
+	if (freq_mitigation_task)
+		return;
+	if (!freq_mitigation_enabled)
+		goto init_freq_thread;
+
+	for_each_possible_cpu(cpu) {
+		/*
+		 * Hotplug may not be enabled,
+		 * make sure core sensor id is initialized.
+		 */
+		cpus[cpu].sensor_id =
+			sensor_get_id((char *)cpus[cpu].sensor_type);
+		cpus[cpu].id_type = THERM_ZONE_ID;
+		if (!(msm_thermal_info.freq_mitig_control_mask & BIT(cpu)))
+			continue;
+		hi_thresh = &cpus[cpu].threshold[FREQ_THRESHOLD_HIGH];
+		low_thresh = &cpus[cpu].threshold[FREQ_THRESHOLD_LOW];
+
+		hi_thresh->temp = msm_thermal_info.freq_mitig_temp_degc
+				* tsens_scaling_factor;
+		hi_thresh->trip = THERMAL_TRIP_CONFIGURABLE_HI;
+		low_thresh->temp = (msm_thermal_info.freq_mitig_temp_degc -
+			msm_thermal_info.freq_mitig_temp_hysteresis_degc)
+				* tsens_scaling_factor;
+		low_thresh->trip = THERMAL_TRIP_CONFIGURABLE_LOW;
+		hi_thresh->notify = low_thresh->notify =
+			freq_mitigation_notify;
+		hi_thresh->data = low_thresh->data = (void *)&cpus[cpu];
+
+		sensor_mgr_set_threshold(cpus[cpu].sensor_id, hi_thresh);
+	}
+init_freq_thread:
+	init_completion(&freq_mitigation_complete);
+	freq_mitigation_task = kthread_run(do_freq_mitigation, NULL,
+		"msm_thermal:freq_mitig");
+
+	if (IS_ERR(freq_mitigation_task)) {
+		pr_err("Failed to create frequency mitigation thread. err:%ld\n",
+				PTR_ERR(freq_mitigation_task));
+		return;
+	} else {
+		complete(&freq_mitigation_complete);
+	}
+}
+
+int msm_thermal_get_freq_plan_size(uint32_t cluster, unsigned int *table_len)
+{
+	uint32_t i = 0;
+	struct cluster_info *cluster_ptr = NULL;
+
+	if (!core_ptr) {
+		pr_err("Topology ptr not initialized\n");
+		return -ENODEV;
+	}
+	if (!table_len) {
+		pr_err("Invalid input\n");
+		return -EINVAL;
+	}
+	if (!freq_table_get)
+		check_freq_table();
+
+	for (; i < core_ptr->entity_count; i++) {
+		cluster_ptr = &core_ptr->child_entity_ptr[i];
+		if (cluster_ptr->cluster_id == cluster) {
+			if (!cluster_ptr->freq_table) {
+				pr_err("Cluster%d clock plan not initialized\n",
+						cluster);
+				return -EINVAL;
+			}
+			*table_len = cluster_ptr->freq_idx_high + 1;
+			return 0;
+		}
+	}
+
+	pr_err("Invalid cluster ID:%d\n", cluster);
+	return -EINVAL;
+}
+
+int msm_thermal_get_cluster_voltage_plan(uint32_t cluster, uint32_t *table_ptr)
+{
+	int i = 0, corner = 0;
+	struct dev_pm_opp *opp = NULL;
+	unsigned int table_len = 0;
+	struct device *cpu_dev = NULL;
+	struct cluster_info *cluster_ptr = NULL;
+
+	if (!core_ptr) {
+		pr_err("Topology ptr not initialized\n");
+		return -ENODEV;
+	}
+	if (!table_ptr) {
+		pr_err("Invalid input\n");
+		return -EINVAL;
+	}
+	if (!freq_table_get)
+		check_freq_table();
+
+	for (i = 0; i < core_ptr->entity_count; i++) {
+		cluster_ptr = &core_ptr->child_entity_ptr[i];
+		if (cluster_ptr->cluster_id == cluster)
+			break;
+	}
+	if (i == core_ptr->entity_count) {
+		pr_err("Invalid cluster ID:%d\n", cluster);
+		return -EINVAL;
+	}
+	if (!cluster_ptr->freq_table) {
+		pr_err("Cluster%d clock plan not initialized\n", cluster);
+		return -EINVAL;
+	}
+
+	cpu_dev = get_cpu_device(cpumask_first(&cluster_ptr->cluster_cores));
+	table_len =  cluster_ptr->freq_idx_high + 1;
+
+	rcu_read_lock();
+	for (i = 0; i < table_len; i++) {
+		opp = dev_pm_opp_find_freq_exact(cpu_dev,
+			cluster_ptr->freq_table[i].frequency * 1000, true);
+		if (IS_ERR(opp)) {
+			pr_err("Error on OPP freq :%d\n",
+				cluster_ptr->freq_table[i].frequency);
+			return -EINVAL;
+		}
+		corner = dev_pm_opp_get_voltage(opp);
+		if (corner == 0) {
+			pr_err("Bad voltage corner for OPP freq :%d\n",
+				cluster_ptr->freq_table[i].frequency);
+			return -EINVAL;
+		}
+		table_ptr[i] = corner / 1000;
+		pr_debug("Cluster:%d freq:%d Khz voltage:%d mV\n",
+			cluster, cluster_ptr->freq_table[i].frequency,
+			table_ptr[i]);
+	}
+	rcu_read_unlock();
+
+	return 0;
+}
+
+int msm_thermal_get_cluster_freq_plan(uint32_t cluster, unsigned int *table_ptr)
+{
+	uint32_t i = 0;
+	struct cluster_info *cluster_ptr = NULL;
+
+	if (!core_ptr) {
+		pr_err("Topology ptr not initialized\n");
+		return -ENODEV;
+	}
+	if (!table_ptr) {
+		pr_err("Invalid input\n");
+		return -EINVAL;
+	}
+	if (!freq_table_get)
+		check_freq_table();
+
+	for (; i < core_ptr->entity_count; i++) {
+		cluster_ptr = &core_ptr->child_entity_ptr[i];
+		if (cluster_ptr->cluster_id == cluster)
+			break;
+	}
+	if (i == core_ptr->entity_count) {
+		pr_err("Invalid cluster ID:%d\n", cluster);
+		return -EINVAL;
+	}
+	if (!cluster_ptr->freq_table) {
+		pr_err("Cluster%d clock plan not initialized\n", cluster);
+		return -EINVAL;
+	}
+
+	for (i = 0; i <= cluster_ptr->freq_idx_high; i++)
+		table_ptr[i] = cluster_ptr->freq_table[i].frequency;
+
+	return 0;
+}
+
+int msm_thermal_set_cluster_freq(uint32_t cluster, uint32_t freq, bool is_max)
+{
+	int ret = 0;
+	uint32_t i = 0;
+	struct cluster_info *cluster_ptr = NULL;
+	bool notify = false;
+
+	if (!mitigation) {
+		pr_err("Thermal Mitigations disabled.\n");
+		return -ENODEV;
+	}
+
+	if (!core_ptr) {
+		pr_err("Topology ptr not initialized\n");
+		return -ENODEV;
+	}
+
+	for (; i < core_ptr->entity_count; i++) {
+		cluster_ptr = &core_ptr->child_entity_ptr[i];
+		if (cluster_ptr->cluster_id != cluster)
+			continue;
+		pr_debug("Update Cluster%d %s frequency to %d\n",
+			cluster, (is_max) ? "max" : "min", freq);
+		break;
+	}
+	if (i == core_ptr->entity_count) {
+		pr_err("Invalid cluster ID:%d\n", cluster);
+		return -EINVAL;
+	}
+
+	for_each_cpu(i, &cluster_ptr->cluster_cores) {
+		uint32_t *freq_ptr = (is_max) ? &cpus[i].user_max_freq
+					: &cpus[i].user_min_freq;
+		if (*freq_ptr == freq)
+			continue;
+		notify = true;
+		*freq_ptr = freq;
+	}
+
+	if (freq_mitigation_task) {
+		if (notify)
+			complete(&freq_mitigation_complete);
+	} else {
+		pr_err("Frequency mitigation task is not initialized\n");
+		return -ESRCH;
+	}
+
+	return ret;
+}
+
+int msm_thermal_set_frequency(uint32_t cpu, uint32_t freq, bool is_max)
+{
+	int ret = 0;
+
+	if (!mitigation) {
+		pr_err("Thermal Mitigations disabled.\n");
+		goto set_freq_exit;
+	}
+
+	if (cpu >= num_possible_cpus()) {
+		pr_err("Invalid input\n");
+		ret = -EINVAL;
+		goto set_freq_exit;
+	}
+
+	pr_debug("Userspace requested %s frequency %u for CPU%u\n",
+			(is_max) ? "Max" : "Min", freq, cpu);
+	if (is_max) {
+		if (cpus[cpu].user_max_freq == freq)
+			goto set_freq_exit;
+
+		cpus[cpu].user_max_freq = freq;
+	} else {
+		if (cpus[cpu].user_min_freq == freq)
+			goto set_freq_exit;
+
+		cpus[cpu].user_min_freq = freq;
+	}
+
+	if (freq_mitigation_task) {
+		complete(&freq_mitigation_complete);
+	} else {
+		pr_err("Frequency mitigation task is not initialized\n");
+		ret = -ESRCH;
+		goto set_freq_exit;
+	}
+
+set_freq_exit:
+	return ret;
+}
+
+int therm_set_threshold(struct threshold_info *thresh_inp)
+{
+	int ret = 0, i = 0, err = 0;
+	struct therm_threshold *thresh_ptr;
+
+	if (!thresh_inp) {
+		pr_err("Invalid input\n");
+		ret = -EINVAL;
+		goto therm_set_exit;
+	}
+
+	thresh_inp->thresh_triggered = false;
+	for (i = 0; i < thresh_inp->thresh_ct; i++) {
+		thresh_ptr = &thresh_inp->thresh_list[i];
+		thresh_ptr->trip_triggered = -1;
+		err = sensor_mgr_set_threshold(thresh_ptr->sensor_id,
+			thresh_ptr->threshold);
+		if (err < 0) {
+			ret = err;
+			err = 0;
+		}
+	}
+
+therm_set_exit:
+	return ret;
+}
+
+static void cx_phase_ctrl_notify(struct therm_threshold *trig_thresh)
+{
+	static uint32_t cx_sens_status;
+	int ret = 0;
+
+	if (!cx_phase_ctrl_enabled)
+		return;
+
+	if (trig_thresh->trip_triggered < 0)
+		goto cx_phase_ctrl_exit;
+
+	mutex_lock(&cx_mutex);
+	pr_debug("sensor:%d reached %s thresh for CX\n",
+		tsens_id_map[trig_thresh->sensor_id],
+		(trig_thresh->trip_triggered == THERMAL_TRIP_CONFIGURABLE_HI) ?
+		"hot critical" : "warm");
+
+	switch (trig_thresh->trip_triggered) {
+	case THERMAL_TRIP_CONFIGURABLE_HI:
+		cx_sens_status |= BIT(trig_thresh->sensor_id);
+		break;
+	case THERMAL_TRIP_CONFIGURABLE_LOW:
+		if (cx_sens_status & BIT(trig_thresh->sensor_id))
+			cx_sens_status ^= BIT(trig_thresh->sensor_id);
+		break;
+	default:
+		pr_err("Unsupported trip type\n");
+		goto cx_phase_unlock_exit;
+		break;
+	}
+
+	if ((cx_sens_status && (curr_cx_band == MSM_HOT_CRITICAL)) ||
+		(!cx_sens_status && (curr_cx_band == MSM_WARM)))
+		goto cx_phase_unlock_exit;
+	ret = send_temperature_band(MSM_CX_PHASE_CTRL, (cx_sens_status) ?
+		MSM_HOT_CRITICAL : MSM_WARM);
+	if (!ret)
+		curr_cx_band = (cx_sens_status) ? MSM_HOT_CRITICAL : MSM_WARM;
+
+cx_phase_unlock_exit:
+	mutex_unlock(&cx_mutex);
+cx_phase_ctrl_exit:
+	if (trig_thresh->cur_state != trig_thresh->trip_triggered) {
+		sensor_mgr_set_threshold(trig_thresh->sensor_id,
+					trig_thresh->threshold);
+		trig_thresh->cur_state = trig_thresh->trip_triggered;
+	}
+	return;
+}
+
+static void gfx_phase_ctrl_notify(struct therm_threshold *trig_thresh)
+{
+	uint32_t new_req_band = curr_gfx_band;
+	int ret = 0;
+
+	if (!gfx_warm_phase_ctrl_enabled && !gfx_crit_phase_ctrl_enabled)
+		return;
+
+	if (trig_thresh->trip_triggered < 0)
+		goto gfx_phase_ctrl_exit;
+
+	mutex_lock(&gfx_mutex);
+	if (gfx_crit_phase_ctrl_enabled) {
+		switch (
+		thresh[MSM_GFX_PHASE_CTRL_HOT].thresh_list->trip_triggered) {
+		case THERMAL_TRIP_CONFIGURABLE_HI:
+			new_req_band = MSM_HOT_CRITICAL;
+			pr_debug(
+			"sensor:%d reached hot critical thresh for GFX\n",
+				tsens_id_map[trig_thresh->sensor_id]);
+			goto notify_new_band;
+			break;
+		case THERMAL_TRIP_CONFIGURABLE_LOW:
+			new_req_band = MSM_WARM;
+			pr_debug("sensor:%d reached warm thresh for GFX\n",
+			tsens_id_map[trig_thresh->sensor_id]);
+			goto notify_new_band;
+			break;
+		default:
+			break;
+		}
+	}
+	if (gfx_warm_phase_ctrl_enabled) {
+		switch (
+		thresh[MSM_GFX_PHASE_CTRL_WARM].thresh_list->trip_triggered) {
+		case THERMAL_TRIP_CONFIGURABLE_HI:
+			new_req_band = MSM_WARM;
+			pr_debug("sensor:%d reached warm thresh for GFX\n",
+				tsens_id_map[trig_thresh->sensor_id]);
+			goto notify_new_band;
+			break;
+		case THERMAL_TRIP_CONFIGURABLE_LOW:
+			new_req_band = MSM_NORMAL;
+			pr_debug("sensor:%d reached normal thresh for GFX\n",
+				tsens_id_map[trig_thresh->sensor_id]);
+			goto notify_new_band;
+			break;
+		default:
+			break;
+		}
+	}
+
+notify_new_band:
+	if (new_req_band != curr_gfx_band) {
+		ret = send_temperature_band(MSM_GFX_PHASE_CTRL, new_req_band);
+		if (!ret)
+			curr_gfx_band = new_req_band;
+	}
+	mutex_unlock(&gfx_mutex);
+gfx_phase_ctrl_exit:
+	switch (curr_gfx_band) {
+	case MSM_HOT_CRITICAL:
+		if (gfx_crit_phase_ctrl_enabled)
+			therm_set_threshold(&thresh[MSM_GFX_PHASE_CTRL_HOT]);
+		break;
+	case MSM_NORMAL:
+		if (gfx_warm_phase_ctrl_enabled)
+			therm_set_threshold(&thresh[MSM_GFX_PHASE_CTRL_WARM]);
+		break;
+	case MSM_WARM:
+	default:
+		if (gfx_crit_phase_ctrl_enabled)
+			therm_set_threshold(&thresh[MSM_GFX_PHASE_CTRL_HOT]);
+		if (gfx_warm_phase_ctrl_enabled)
+			therm_set_threshold(&thresh[MSM_GFX_PHASE_CTRL_WARM]);
+		break;
+	}
+	return;
+}
+
+static void vdd_restriction_notify(struct therm_threshold *trig_thresh)
+{
+	int ret = 0;
+	static uint32_t vdd_sens_status;
+
+	if (!vdd_rstr_enabled)
+		return;
+	if (!trig_thresh) {
+		pr_err("Invalid input\n");
+		return;
+	}
+	if (trig_thresh->trip_triggered < 0)
+		goto set_and_exit;
+
+	mutex_lock(&vdd_rstr_mutex);
+	pr_debug("sensor:%d reached %s thresh for Vdd restriction\n",
+		tsens_id_map[trig_thresh->sensor_id],
+		(trig_thresh->trip_triggered == THERMAL_TRIP_CONFIGURABLE_HI) ?
+		"high" : "low");
+	switch (trig_thresh->trip_triggered) {
+	case THERMAL_TRIP_CONFIGURABLE_HI:
+		if (vdd_sens_status & BIT(trig_thresh->sensor_id))
+			vdd_sens_status ^= BIT(trig_thresh->sensor_id);
+		break;
+	case THERMAL_TRIP_CONFIGURABLE_LOW:
+		vdd_sens_status |= BIT(trig_thresh->sensor_id);
+		break;
+	default:
+		pr_err("Unsupported trip type\n");
+		goto unlock_and_exit;
+		break;
+	}
+
+	ret = vdd_restriction_apply_all((vdd_sens_status) ? 1 : 0);
+	if (ret) {
+		pr_err("%s vdd rstr votlage for all failed\n",
+			(vdd_sens_status) ?
+			"Enable" : "Disable");
+			goto unlock_and_exit;
+	}
+
+unlock_and_exit:
+	mutex_unlock(&vdd_rstr_mutex);
+set_and_exit:
+	if (trig_thresh->cur_state != trig_thresh->trip_triggered) {
+		sensor_mgr_set_threshold(trig_thresh->sensor_id,
+					trig_thresh->threshold);
+		trig_thresh->cur_state = trig_thresh->trip_triggered;
+	}
+	return;
+}
+
+static void ocr_notify(struct therm_threshold *trig_thresh)
+{
+	int ret = 0;
+	static uint32_t ocr_sens_status;
+
+	if (!ocr_enabled)
+		return;
+	if (!trig_thresh) {
+		pr_err("Invalid input\n");
+		return;
+	}
+	if (trig_thresh->trip_triggered < 0)
+		goto set_and_exit;
+
+	mutex_lock(&ocr_mutex);
+	pr_debug("sensor%d reached %d thresh for Optimum current request\n",
+		tsens_id_map[trig_thresh->sensor_id],
+		trig_thresh->trip_triggered);
+	switch (trig_thresh->trip_triggered) {
+	case THERMAL_TRIP_CONFIGURABLE_HI:
+		ocr_sens_status |= BIT(trig_thresh->sensor_id);
+		break;
+	case THERMAL_TRIP_CONFIGURABLE_LOW:
+		if (ocr_sens_status & BIT(trig_thresh->sensor_id))
+			ocr_sens_status ^= BIT(trig_thresh->sensor_id);
+		break;
+	default:
+		pr_err("Unsupported trip type\n");
+		goto unlock_and_exit;
+		break;
+	}
+
+	ret = ocr_set_mode_all(ocr_sens_status ? OPTIMUM_CURRENT_MAX :
+				OPTIMUM_CURRENT_MIN);
+	if (ret) {
+		pr_err("%s Optimum current mode for all failed. err:%d\n",
+			(ocr_sens_status) ?
+			"Enable" : "Disable", ret);
+			goto unlock_and_exit;
+	}
+
+unlock_and_exit:
+	mutex_unlock(&ocr_mutex);
+set_and_exit:
+	if (trig_thresh->cur_state != trig_thresh->trip_triggered) {
+		sensor_mgr_set_threshold(trig_thresh->sensor_id,
+				trig_thresh->threshold);
+		trig_thresh->cur_state = trig_thresh->trip_triggered;
+	}
+	return;
+}
+
+static __ref int do_thermal_monitor(void *data)
+{
+	int ret = 0, j;
+	struct therm_threshold *sensor_list;
+	struct threshold_info *thresholds = NULL;
+
+	while (!kthread_should_stop()) {
+		while (wait_for_completion_interruptible(
+			&thermal_monitor_complete) != 0)
+			;
+		reinit_completion(&thermal_monitor_complete);
+
+		mutex_lock(&threshold_mutex);
+		list_for_each_entry(thresholds, &thresholds_list, list_ptr) {
+			if (!thresholds->thresh_triggered)
+				continue;
+			thresholds->thresh_triggered = false;
+			for (j = 0; j < thresholds->thresh_ct; j++) {
+				sensor_list = &thresholds->thresh_list[j];
+				if (sensor_list->trip_triggered < 0)
+					continue;
+				sensor_list->notify(sensor_list);
+				sensor_list->trip_triggered = -1;
+			}
+		}
+		mutex_unlock(&threshold_mutex);
+	}
+	return ret;
+}
+
+static int vdd_rstr_apss_freq_dev_init(void)
+{
+	int idx = 0, ret = 0;
+	char device_str[DEVM_NAME_MAX] = "";
+	struct rail *r = NULL;
+
+	for (idx = 0; idx < rails_cnt; idx++) {
+		if (rails[idx].freq_req) {
+			r = &rails[idx];
+			break;
+		}
+	}
+	if (!r) {
+		pr_err("APSS rail not initialized\n");
+		return -ENODEV;
+	}
+
+	for_each_possible_cpu(idx) {
+		if (r->device_handle[idx])
+			continue;
+		snprintf(device_str, DEVM_NAME_MAX, CPU_DEVICE, idx);
+		r->device_handle[idx]
+			= devmgr_register_mitigation_client(
+				&msm_thermal_info.pdev->dev,
+				device_str, NULL);
+		if (IS_ERR(r->device_handle[idx])) {
+			ret = PTR_ERR(r->device_handle[idx]);
+			pr_err("Error registering %s handle. err:%d\n",
+				device_str, ret);
+			goto freq_init_exit;
+		}
+		r->request[idx].freq.max_freq = CPUFREQ_MAX_NO_MITIGATION;
+		r->request[idx].freq.min_freq = CPUFREQ_MIN_NO_MITIGATION;
+	}
+
+freq_init_exit:
+	if (ret) {
+		for_each_possible_cpu(idx) {
+			devmgr_unregister_mitigation_client(
+				&msm_thermal_info.pdev->dev,
+				r->device_handle[idx]);
+			r->device_handle[idx] = NULL;
+		}
+	}
+	return ret;
+}
+
+static int convert_to_zone_id(struct threshold_info *thresh_inp)
+{
+	int ret = 0, i, zone_id;
+	struct therm_threshold *thresh_array;
+
+	if (!thresh_inp) {
+		pr_err("Invalid input\n");
+		ret = -EINVAL;
+		goto convert_to_exit;
+	}
+	thresh_array = thresh_inp->thresh_list;
+
+	for (i = 0; i < thresh_inp->thresh_ct; i++) {
+		char tsens_name[TSENS_NAME_MAX] = "";
+
+		if (thresh_array[i].id_type == THERM_ZONE_ID)
+			continue;
+		snprintf(tsens_name, TSENS_NAME_MAX, TSENS_NAME_FORMAT,
+			thresh_array[i].sensor_id);
+		zone_id = sensor_get_id(tsens_name);
+		if (zone_id < 0) {
+			pr_err("Error getting zone id for %s. err:%d\n",
+				tsens_name, ret);
+			ret = zone_id;
+			goto convert_to_exit;
+		}
+		thresh_array[i].sensor_id = zone_id;
+		thresh_array[i].id_type = THERM_ZONE_ID;
+	}
+
+convert_to_exit:
+	return ret;
+}
+
+int sensor_mgr_convert_id_and_set_threshold(struct threshold_info *thresh_inp)
+{
+	int ret = 0;
+
+	if (!thresh_inp) {
+		pr_err("Invalid input\n");
+		ret = -EINVAL;
+		goto therm_set_exit;
+	}
+	ret = convert_to_zone_id(thresh_inp);
+	if (ret)
+		goto therm_set_exit;
+	ret = therm_set_threshold(thresh_inp);
+
+therm_set_exit:
+	return ret;
+}
+
+static void thermal_monitor_init(void)
+{
+	if (thermal_monitor_task)
+		return;
+
+	init_completion(&thermal_monitor_complete);
+	thermal_monitor_task = kthread_run(do_thermal_monitor, NULL,
+		"msm_thermal:therm_monitor");
+	if (IS_ERR(thermal_monitor_task)) {
+		pr_err("Failed to create thermal monitor thread. err:%ld\n",
+				PTR_ERR(thermal_monitor_task));
+		goto init_exit;
+	}
+
+	if (therm_reset_enabled &&
+		!(convert_to_zone_id(&thresh[MSM_THERM_RESET])))
+		therm_set_threshold(&thresh[MSM_THERM_RESET]);
+
+	if ((cx_phase_ctrl_enabled) &&
+		!(convert_to_zone_id(&thresh[MSM_CX_PHASE_CTRL_HOT])))
+		therm_set_threshold(&thresh[MSM_CX_PHASE_CTRL_HOT]);
+
+	if (vdd_rstr_enabled) {
+		if (vdd_rstr_apss_freq_dev_init())
+			pr_err("vdd APSS mitigation device init failed\n");
+		else if (!(convert_to_zone_id(&thresh[MSM_VDD_RESTRICTION])))
+			therm_set_threshold(&thresh[MSM_VDD_RESTRICTION]);
+	}
+
+	if ((gfx_warm_phase_ctrl_enabled) &&
+		!(convert_to_zone_id(&thresh[MSM_GFX_PHASE_CTRL_WARM]))) {
+		therm_set_threshold(&thresh[MSM_GFX_PHASE_CTRL_WARM]);
+	}
+
+	if ((gfx_crit_phase_ctrl_enabled) &&
+		!(convert_to_zone_id(&thresh[MSM_GFX_PHASE_CTRL_HOT]))) {
+		therm_set_threshold(&thresh[MSM_GFX_PHASE_CTRL_HOT]);
+	}
+
+	if ((ocr_enabled) &&
+		!(convert_to_zone_id(&thresh[MSM_OCR])))
+		therm_set_threshold(&thresh[MSM_OCR]);
+
+	if (vdd_mx_enabled &&
+		!(convert_to_zone_id(&thresh[MSM_VDD_MX_RESTRICTION])))
+		therm_set_threshold(&thresh[MSM_VDD_MX_RESTRICTION]);
+
+	if (cxip_lm_enabled &&
+		!(convert_to_zone_id(&thresh[MSM_THERM_CXIP_LM]))) {
+		/* To handle if temp > HIGH */
+		do_cxip_lm();
+		therm_set_threshold(&thresh[MSM_THERM_CXIP_LM]);
+	}
+
+init_exit:
+	return;
+}
+
+static int msm_thermal_notify(enum thermal_trip_type type, int temp, void *data)
+{
+	struct therm_threshold *thresh_data = (struct therm_threshold *)data;
+
+	if (thermal_monitor_task) {
+		thresh_data->trip_triggered = type;
+		thresh_data->parent->thresh_triggered = true;
+		complete(&thermal_monitor_complete);
+	} else {
+		pr_err("Thermal monitor task is not initialized\n");
+	}
+	return 0;
+}
+
+int sensor_mgr_init_threshold(struct threshold_info *thresh_inp,
+	int sensor_id, int32_t high_temp, int32_t low_temp,
+	void (*callback)(struct therm_threshold *))
+{
+	int ret = 0, i;
+	struct therm_threshold *thresh_ptr;
+
+	if (!callback || !thresh_inp
+		|| sensor_id == -ENODEV) {
+		pr_err("Invalid input\n");
+		ret = -EINVAL;
+		goto init_thresh_exit;
+	}
+	if (thresh_inp->thresh_list) {
+		pr_info("threshold id already initialized\n");
+		goto init_thresh_exit;
+	}
+
+	thresh_inp->thresh_ct = (sensor_id == MONITOR_ALL_TSENS) ?
+						max_tsens_num : 1;
+	thresh_inp->thresh_triggered = false;
+	thresh_inp->thresh_list = kzalloc(sizeof(struct therm_threshold) *
+					thresh_inp->thresh_ct, GFP_KERNEL);
+	if (!thresh_inp->thresh_list) {
+		pr_err("kzalloc failed for thresh\n");
+		ret = -ENOMEM;
+		goto init_thresh_exit;
+	}
+
+	thresh_ptr = thresh_inp->thresh_list;
+	if (sensor_id == MONITOR_ALL_TSENS) {
+		for (i = 0; i < max_tsens_num; i++) {
+			thresh_ptr[i].sensor_id = tsens_id_map[i];
+			thresh_ptr[i].id_type = THERM_TSENS_ID;
+			thresh_ptr[i].notify = callback;
+			thresh_ptr[i].trip_triggered = -1;
+			thresh_ptr[i].parent = thresh_inp;
+			thresh_ptr[i].cur_state = -1;
+			thresh_ptr[i].threshold[0].temp =
+				high_temp * tsens_scaling_factor;
+			thresh_ptr[i].threshold[0].trip =
+				THERMAL_TRIP_CONFIGURABLE_HI;
+			thresh_ptr[i].threshold[1].temp =
+				low_temp * tsens_scaling_factor;
+			thresh_ptr[i].threshold[1].trip =
+				THERMAL_TRIP_CONFIGURABLE_LOW;
+			thresh_ptr[i].threshold[0].notify =
+			thresh_ptr[i].threshold[1].notify = msm_thermal_notify;
+			thresh_ptr[i].threshold[0].data =
+			thresh_ptr[i].threshold[1].data =
+				(void *)&thresh_ptr[i];
+		}
+	} else {
+		thresh_ptr->sensor_id = sensor_id;
+		thresh_ptr->id_type = THERM_TSENS_ID;
+		thresh_ptr->notify = callback;
+		thresh_ptr->trip_triggered = -1;
+		thresh_ptr->parent = thresh_inp;
+		thresh_ptr->cur_state = -1;
+		thresh_ptr->threshold[0].temp = high_temp * tsens_scaling_factor;
+		thresh_ptr->threshold[0].trip =
+			THERMAL_TRIP_CONFIGURABLE_HI;
+		thresh_ptr->threshold[1].temp = low_temp * tsens_scaling_factor;
+		thresh_ptr->threshold[1].trip =
+			THERMAL_TRIP_CONFIGURABLE_LOW;
+		thresh_ptr->threshold[0].notify =
+		thresh_ptr->threshold[1].notify = msm_thermal_notify;
+		thresh_ptr->threshold[0].data =
+		thresh_ptr->threshold[1].data = (void *)thresh_ptr;
+	}
+	mutex_lock(&threshold_mutex);
+	list_add_tail(&thresh_inp->list_ptr, &thresholds_list);
+	mutex_unlock(&threshold_mutex);
+
+init_thresh_exit:
+	return ret;
+}
+
+void sensor_mgr_disable_threshold(struct threshold_info *thresh_inp)
+{
+	int i;
+	struct therm_threshold *thresh_ptr;
+
+	mutex_lock(&threshold_mutex);
+	for (i = 0; i < thresh_inp->thresh_ct; i++) {
+		thresh_ptr = &thresh_inp->thresh_list[i];
+		thresh_ptr->trip_triggered = -1;
+		sensor_cancel_trip(thresh_ptr->sensor_id,
+				&thresh_ptr->threshold[0]);
+		sensor_cancel_trip(thresh_ptr->sensor_id,
+				&thresh_ptr->threshold[1]);
+	}
+	thresh_inp->thresh_triggered = false;
+	mutex_unlock(&threshold_mutex);
+}
+
+void sensor_mgr_remove_threshold(struct threshold_info *thresh_inp)
+{
+	sensor_mgr_disable_threshold(thresh_inp);
+	mutex_lock(&threshold_mutex);
+	kfree(thresh_inp->thresh_list);
+	thresh_inp->thresh_list = NULL;
+	thresh_inp->thresh_ct = 0;
+	list_del(&thresh_inp->list_ptr);
+	mutex_unlock(&threshold_mutex);
+}
+
+static int msm_thermal_add_gfx_nodes(void)
+{
+	struct kobject *module_kobj = NULL;
+	struct kobject *gfx_kobj = NULL;
+	int ret = 0;
+
+	if (!gfx_warm_phase_ctrl_enabled && !gfx_crit_phase_ctrl_enabled)
+		return -EINVAL;
+
+	module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+	if (!module_kobj) {
+		pr_err("cannot find kobject\n");
+		ret = -ENOENT;
+		goto gfx_node_exit;
+	}
+
+	gfx_kobj = kobject_create_and_add("gfx_phase_ctrl", module_kobj);
+	if (!gfx_kobj) {
+		pr_err("cannot create gfx kobject\n");
+		ret = -ENOMEM;
+		goto gfx_node_exit;
+	}
+
+	gfx_attr_gp.attrs = kzalloc(sizeof(struct attribute *) * 2, GFP_KERNEL);
+	if (!gfx_attr_gp.attrs) {
+		pr_err("kzalloc failed\n");
+		ret = -ENOMEM;
+		goto gfx_node_fail;
+	}
+
+	PHASE_RW_ATTR(gfx, temp_band, gfx_mode_attr, 0, gfx_attr_gp);
+	gfx_attr_gp.attrs[1] = NULL;
+
+	ret = sysfs_create_group(gfx_kobj, &gfx_attr_gp);
+	if (ret) {
+		pr_err("cannot create GFX attribute group. err:%d\n", ret);
+		goto gfx_node_fail;
+	}
+
+gfx_node_fail:
+	if (ret) {
+		kobject_put(gfx_kobj);
+		kfree(gfx_attr_gp.attrs);
+		gfx_attr_gp.attrs = NULL;
+	}
+gfx_node_exit:
+	return ret;
+}
+
+static int msm_thermal_add_cx_nodes(void)
+{
+	struct kobject *module_kobj = NULL;
+	struct kobject *cx_kobj = NULL;
+	int ret = 0;
+
+	if (!cx_phase_ctrl_enabled)
+		return -EINVAL;
+
+	module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+	if (!module_kobj) {
+		pr_err("cannot find kobject\n");
+		ret = -ENOENT;
+		goto cx_node_exit;
+	}
+
+	cx_kobj = kobject_create_and_add("cx_phase_ctrl", module_kobj);
+	if (!cx_kobj) {
+		pr_err("cannot create cx kobject\n");
+		ret = -ENOMEM;
+		goto cx_node_exit;
+	}
+
+	cx_attr_gp.attrs = kzalloc(sizeof(struct attribute *) * 2, GFP_KERNEL);
+	if (!cx_attr_gp.attrs) {
+		pr_err("kzalloc failed\n");
+		ret = -ENOMEM;
+		goto cx_node_fail;
+	}
+
+	PHASE_RW_ATTR(cx, temp_band, cx_mode_attr, 0, cx_attr_gp);
+	cx_attr_gp.attrs[1] = NULL;
+
+	ret = sysfs_create_group(cx_kobj, &cx_attr_gp);
+	if (ret) {
+		pr_err("cannot create CX attribute group. err:%d\n", ret);
+		goto cx_node_fail;
+	}
+
+cx_node_fail:
+	if (ret) {
+		kobject_put(cx_kobj);
+		kfree(cx_attr_gp.attrs);
+		cx_attr_gp.attrs = NULL;
+	}
+cx_node_exit:
+	return ret;
+}
+
+/*
+ * We will reset the cpu frequencies limits here. The core online/offline
+ * status will be carried over to the process stopping the msm_thermal, as
+ * we dont want to online a core and bring in the thermal issues.
+ */
+static void __ref disable_msm_thermal(void)
+{
+	uint32_t cpu = 0;
+
+	/* make sure check_temp is no longer running */
+	cancel_delayed_work_sync(&check_temp_work);
+
+	get_online_cpus();
+	for_each_possible_cpu(cpu) {
+		if (cpus[cpu].limited_max_freq == UINT_MAX &&
+			cpus[cpu].limited_min_freq == 0)
+			continue;
+		pr_info("Max frequency reset for CPU%d\n", cpu);
+		cpus[cpu].limited_max_freq = UINT_MAX;
+		cpus[cpu].vdd_max_freq = UINT_MAX;
+		cpus[cpu].limited_min_freq = 0;
+	}
+	update_cluster_freq();
+	put_online_cpus();
+}
+
+static void interrupt_mode_init(void)
+{
+	if (!msm_thermal_probed)
+		return;
+
+	if (polling_enabled) {
+		polling_enabled = 0;
+		create_sensor_zone_id_map();
+		disable_msm_thermal();
+		hotplug_init();
+		freq_mitigation_init();
+		thermal_monitor_init();
+		msm_thermal_add_cx_nodes();
+		msm_thermal_add_gfx_nodes();
+	}
+}
+
+static int __ref set_enabled(const char *val, const struct kernel_param *kp)
+{
+	int ret = 0;
+
+	ret = param_set_bool(val, kp);
+	if (!enabled)
+		interrupt_mode_init();
+	else
+		pr_info("no action for enabled = %d\n",
+			enabled);
+
+	pr_info("enabled = %d\n", enabled);
+
+	return ret;
+}
+
+static struct kernel_param_ops module_ops = {
+	.set = set_enabled,
+	.get = param_get_bool,
+};
+
+module_param_cb(enabled, &module_ops, &enabled, 0644);
+MODULE_PARM_DESC(enabled, "enforce thermal limit on cpu");
+
+static ssize_t show_cc_enabled(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", core_control_enabled);
+}
+
+static ssize_t __ref store_cc_enabled(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int ret = 0;
+	int val = 0;
+	uint32_t cpu = 0;
+
+	if (!mitigation) {
+		pr_err("Thermal Mitigations disabled.\n");
+		goto done_store_cc;
+	}
+
+	ret = kstrtoint(buf, 10, &val);
+	if (ret) {
+		pr_err("Invalid input %s. err:%d\n", buf, ret);
+		goto done_store_cc;
+	}
+
+	if (core_control_enabled == !!val)
+		goto done_store_cc;
+
+	core_control_enabled = !!val;
+	if (core_control_enabled) {
+		pr_info("Core control enabled\n");
+		cpus_previously_online_update();
+		register_cpu_notifier(&msm_thermal_cpu_notifier);
+		/*
+		 * Re-evaluate thermal core condition, update current status
+		 * and set threshold for all cpus.
+		 */
+		hotplug_init_cpu_offlined();
+		mutex_lock(&core_control_mutex);
+		update_offline_cores(cpus_offlined);
+		if (hotplug_enabled && hotplug_task) {
+			for_each_possible_cpu(cpu) {
+				if (!(msm_thermal_info.core_control_mask &
+					BIT(cpus[cpu].cpu)))
+					continue;
+				sensor_mgr_set_threshold(cpus[cpu].sensor_id,
+				&cpus[cpu].threshold[HOTPLUG_THRESHOLD_HIGH]);
+			}
+		}
+		mutex_unlock(&core_control_mutex);
+	} else {
+		pr_info("Core control disabled\n");
+		unregister_cpu_notifier(&msm_thermal_cpu_notifier);
+	}
+
+done_store_cc:
+	return count;
+}
+
+static ssize_t show_cpus_offlined(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", cpus_offlined);
+}
+
+static ssize_t __ref store_cpus_offlined(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int ret = 0;
+	uint32_t val = 0;
+	uint32_t cpu;
+
+	if (!mitigation) {
+		pr_err("Thermal Mitigations disabled.\n");
+		goto done_cc;
+	}
+	mutex_lock(&core_control_mutex);
+	ret = kstrtouint(buf, 10, &val);
+	if (ret) {
+		pr_err("Invalid input %s. err:%d\n", buf, ret);
+		goto done_cc;
+	}
+
+	if (polling_enabled) {
+		pr_err("Ignoring request; polling thread is enabled.\n");
+		goto done_cc;
+	}
+
+	for_each_possible_cpu(cpu) {
+		if (!(msm_thermal_info.core_control_mask & BIT(cpu)))
+			continue;
+		cpus[cpu].user_offline = !!(val & BIT(cpu));
+		pr_debug("\"%s\"(PID:%i) requests %s CPU%d.\n", current->comm,
+			current->pid, (cpus[cpu].user_offline) ? "offline" :
+			"online", cpu);
+	}
+
+	if (hotplug_task)
+		complete(&hotplug_notify_complete);
+	else
+		pr_err("Hotplug task is not initialized\n");
+done_cc:
+	mutex_unlock(&core_control_mutex);
+	return count;
+}
+
+static __refdata struct kobj_attribute cc_enabled_attr =
+__ATTR(enabled, 0644, show_cc_enabled, store_cc_enabled);
+
+static __refdata struct kobj_attribute cpus_offlined_attr =
+__ATTR(cpus_offlined, 0644, show_cpus_offlined, store_cpus_offlined);
+
+static __refdata struct attribute *cc_attrs[] = {
+	&cc_enabled_attr.attr,
+	&cpus_offlined_attr.attr,
+	NULL,
+};
+
+static __refdata struct attribute_group cc_attr_group = {
+	.attrs = cc_attrs,
+};
+static __init int msm_thermal_add_cc_nodes(void)
+{
+	struct kobject *module_kobj = NULL;
+	int ret = 0;
+
+	module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+	if (!module_kobj) {
+		pr_err("cannot find kobject\n");
+		ret = -ENOENT;
+		goto done_cc_nodes;
+	}
+
+	cc_kobj = kobject_create_and_add("core_control", module_kobj);
+	if (!cc_kobj) {
+		pr_err("cannot create core control kobj\n");
+		ret = -ENOMEM;
+		goto done_cc_nodes;
+	}
+
+	ret = sysfs_create_group(cc_kobj, &cc_attr_group);
+	if (ret) {
+		pr_err("cannot create sysfs group. err:%d\n", ret);
+		goto done_cc_nodes;
+	}
+
+	return 0;
+
+done_cc_nodes:
+	if (cc_kobj)
+		kobject_del(cc_kobj);
+	return ret;
+}
+
+static ssize_t show_mx_enabled(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", vdd_mx_enabled);
+}
+
+static ssize_t __ref store_mx_enabled(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int ret = 0;
+	int val = 0;
+
+	ret = kstrtoint(buf, 10, &val);
+	if (ret) {
+		pr_err("Invalid input %s\n", buf);
+		goto done_store_mx;
+	}
+
+	if (vdd_mx_enabled == !!val)
+		goto done_store_mx;
+
+	vdd_mx_enabled = !!val;
+
+	mutex_lock(&vdd_mx_mutex);
+	if (!vdd_mx_enabled)
+		remove_vdd_mx_restriction();
+	else if (!(convert_to_zone_id(&thresh[MSM_VDD_MX_RESTRICTION])))
+		therm_set_threshold(&thresh[MSM_VDD_MX_RESTRICTION]);
+	mutex_unlock(&vdd_mx_mutex);
+
+done_store_mx:
+	return count;
+}
+
+static __init int msm_thermal_add_mx_nodes(void)
+{
+	struct kobject *module_kobj = NULL;
+	int ret = 0;
+
+	if (!vdd_mx_enabled)
+		return -EINVAL;
+
+	module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+	if (!module_kobj) {
+		pr_err("cannot find kobject for module\n");
+		ret = -ENOENT;
+		goto done_mx_nodes;
+	}
+
+	mx_kobj = kobject_create_and_add("vdd_mx", module_kobj);
+	if (!mx_kobj) {
+		pr_err("cannot create mx restriction kobj\n");
+		ret = -ENOMEM;
+		goto done_mx_nodes;
+	}
+
+	mx_attr_group.attrs = kzalloc(sizeof(struct attribute *) * 2,
+					GFP_KERNEL);
+	if (!mx_attr_group.attrs) {
+		ret = -ENOMEM;
+		pr_err("cannot allocate memory for mx_attr_group.attrs");
+		goto done_mx_nodes;
+	}
+
+	MX_RW_ATTR(mx_enabled_attr, enabled, mx_attr_group);
+	mx_attr_group.attrs[1] = NULL;
+
+	ret = sysfs_create_group(mx_kobj, &mx_attr_group);
+	if (ret) {
+		pr_err("cannot create group\n");
+		goto done_mx_nodes;
+	}
+
+done_mx_nodes:
+	if (ret) {
+		if (mx_kobj)
+			kobject_del(mx_kobj);
+		kfree(mx_attr_group.attrs);
+	}
+	return ret;
+}
+
+static void msm_thermal_panic_notifier_init(struct device *dev)
+{
+	int i;
+
+	tsens_temp_at_panic = devm_kzalloc(dev,	sizeof(int) * max_tsens_num,
+				GFP_KERNEL);
+	if (!tsens_temp_at_panic) {
+		pr_err("kzalloc failed\n");
+		return;
+	}
+
+	for (i = 0; i < max_tsens_num; i++)
+		tsens_temp_at_panic[i] = INT_MIN;
+
+	atomic_notifier_chain_register(&panic_notifier_list,
+		&msm_thermal_panic_notifier);
+}
+
+static int msm_thermal_pre_init(struct device *dev)
+{
+	int ret = 0;
+
+	if (tsens_is_ready() <= 0) {
+		pr_err("Tsens driver is not ready yet\n");
+		return -EPROBE_DEFER;
+	}
+
+	ret = tsens_get_max_sensor_num(&max_tsens_num);
+	if (ret < 0) {
+		pr_err("failed to get max sensor number, err:%d\n", ret);
+		return ret;
+	}
+
+	if (create_sensor_id_map(dev)) {
+		pr_err("Creating sensor id map failed\n");
+		ret = -EINVAL;
+		goto pre_init_exit;
+	}
+
+	if (!thresh) {
+		thresh = kzalloc(
+				sizeof(struct threshold_info) * MSM_LIST_MAX_NR,
+				GFP_KERNEL);
+		if (!thresh) {
+			pr_err("kzalloc failed\n");
+			ret = -ENOMEM;
+			goto pre_init_exit;
+		}
+		memset(thresh, 0, sizeof(struct threshold_info) *
+			MSM_LIST_MAX_NR);
+	}
+	mit_config = devm_kzalloc(dev,
+			sizeof(struct msm_thermal_debugfs_thresh_config)
+			* (MSM_LIST_MAX_NR + MAX_CPU_CONFIG), GFP_KERNEL);
+	if (!mit_config) {
+		pr_err("kzalloc failed\n");
+		ret = -ENOMEM;
+		goto pre_init_exit;
+	}
+
+pre_init_exit:
+	return ret;
+}
+
+static int devmgr_devices_init(struct platform_device *pdev)
+{
+	int ret = 0;
+	uint32_t cpu;
+	struct device_manager_data *dev_mgr = NULL;
+
+	devices = devm_kzalloc(&pdev->dev,
+				sizeof(struct devmgr_devices),
+				GFP_KERNEL);
+	if (!devices) {
+		pr_err("Malloc failed for devmgr devices\n");
+		ret = -ENOMEM;
+		goto device_exit;
+	}
+	if (num_possible_cpus() > 1) {
+		/* Add hotplug device */
+		dev_mgr = devm_kzalloc(&pdev->dev,
+		sizeof(struct device_manager_data),
+			GFP_KERNEL);
+		if (!dev_mgr) {
+			pr_err("Malloc failed for hotplug device\n");
+			ret = -ENOMEM;
+			goto device_exit;
+		}
+		snprintf(dev_mgr->device_name,
+				TSENS_NAME_MAX, HOTPLUG_DEVICE);
+		dev_mgr->request_validate =
+			devmgr_hotplug_client_request_validate_and_update;
+		dev_mgr->update = devmgr_client_hotplug_update;
+		HOTPLUG_NO_MITIGATION(&dev_mgr->active_req.offline_mask);
+		mutex_init(&dev_mgr->clnt_lock);
+		INIT_LIST_HEAD(&dev_mgr->client_list);
+		list_add_tail(&dev_mgr->dev_ptr, &devices_list);
+		devices->hotplug_dev = dev_mgr;
+	}
+	/*  Add cpu devices */
+	for_each_possible_cpu(cpu) {
+		dev_mgr = devm_kzalloc(&pdev->dev,
+		sizeof(struct device_manager_data),
+			GFP_KERNEL);
+		if (!dev_mgr) {
+			pr_err("Malloc failed for cpu%d device\n", cpu);
+			ret = -ENOMEM;
+			goto device_exit;
+		}
+		snprintf(dev_mgr->device_name, TSENS_NAME_MAX, CPU_DEVICE, cpu);
+		dev_mgr->request_validate =
+			devmgr_cpufreq_client_request_validate_and_update;
+		dev_mgr->update = devmgr_client_cpufreq_update;
+		dev_mgr->active_req.freq.max_freq = CPUFREQ_MAX_NO_MITIGATION;
+		dev_mgr->active_req.freq.min_freq = CPUFREQ_MIN_NO_MITIGATION;
+		mutex_init(&dev_mgr->clnt_lock);
+		INIT_LIST_HEAD(&dev_mgr->client_list);
+		list_add_tail(&dev_mgr->dev_ptr, &devices_list);
+		devices->cpufreq_dev[cpu] = dev_mgr;
+	}
+device_exit:
+	if (ret) {
+		if (devices) {
+			if (devices->hotplug_dev)
+				devm_kfree(&pdev->dev,
+					devices->hotplug_dev);
+			for_each_possible_cpu(cpu) {
+				if (devices->cpufreq_dev[cpu])
+					devm_kfree(&pdev->dev,
+					devices->cpufreq_dev[cpu]);
+			}
+		}
+	}
+	return ret;
+}
+
+static void msm_thermal_init_cpu_mit(enum cpu_mit_type cpu_mit)
+{
+	uint32_t cpu;
+
+	for_each_possible_cpu(cpu) {
+		cpus[cpu].cpu = cpu;
+		if (cpu_mit & CPU_HOTPLUG_MITIGATION) {
+			cpus[cpu].offline = 0;
+			cpus[cpu].user_offline = 0;
+			cpus[cpu].hotplug_thresh_clear = false;
+		}
+		if (cpu_mit & CPU_FREQ_MITIGATION) {
+			cpus[cpu].max_freq = false;
+			cpus[cpu].user_max_freq = UINT_MAX;
+			cpus[cpu].shutdown_max_freq = UINT_MAX;
+			cpus[cpu].suspend_max_freq = UINT_MAX;
+			cpus[cpu].vdd_max_freq = UINT_MAX;
+			cpus[cpu].user_min_freq = 0;
+			cpus[cpu].limited_max_freq = UINT_MAX;
+			cpus[cpu].limited_min_freq = 0;
+			cpus[cpu].freq_thresh_clear = false;
+		}
+	}
+}
+
+int msm_thermal_init(struct msm_thermal_data *pdata)
+{
+	int ret = 0;
+
+	msm_thermal_ioctl_init();
+	ret = devmgr_devices_init(pdata->pdev);
+	if (ret)
+		pr_err("cannot initialize devm devices. err:%d\n", ret);
+
+	msm_thermal_init_cpu_mit(CPU_FREQ_MITIGATION | CPU_HOTPLUG_MITIGATION);
+	BUG_ON(!pdata);
+	memcpy(&msm_thermal_info, pdata, sizeof(struct msm_thermal_data));
+
+	if (check_sensor_id(msm_thermal_info.sensor_id)) {
+		pr_err("Invalid sensor:%d for polling\n",
+				msm_thermal_info.sensor_id);
+		return -EINVAL;
+	}
+
+	enabled = 1;
+	polling_enabled = 1;
+	ret = cpufreq_register_notifier(&msm_thermal_cpufreq_notifier,
+			CPUFREQ_POLICY_NOTIFIER);
+	if (ret)
+		pr_err("cannot register cpufreq notifier. err:%d\n", ret);
+
+	if (!lmh_dcvs_is_supported) {
+		register_reboot_notifier(&msm_thermal_reboot_notifier);
+		pm_notifier(msm_thermal_suspend_callback, 0);
+	}
+	INIT_DELAYED_WORK(&retry_hotplug_work, retry_hotplug);
+
+	if (num_possible_cpus() > 1) {
+		cpus_previously_online_update();
+		register_cpu_notifier(&msm_thermal_cpu_notifier);
+	}
+
+	INIT_DELAYED_WORK(&check_temp_work, check_temp);
+	schedule_delayed_work(&check_temp_work, 0);
+	msm_thermal_panic_notifier_init(&pdata->pdev->dev);
+
+	return ret;
+}
+
+static int ocr_reg_init(struct platform_device *pdev)
+{
+	int ret = 0;
+	int i, j;
+
+	for (i = 0; i < ocr_rail_cnt; i++) {
+		/* Check if vdd_restriction has already initialized any
+		 * regualtor handle. If so use the same handle.*/
+		for (j = 0; j < rails_cnt; j++) {
+			if (!strcmp(ocr_rails[i].name, rails[j].name)) {
+				if (rails[j].reg == NULL)
+					break;
+				ocr_rails[i].phase_reg = rails[j].reg;
+				goto reg_init;
+			}
+
+		}
+		ocr_rails[i].phase_reg = devm_regulator_get(&pdev->dev,
+					ocr_rails[i].name);
+		if (IS_ERR_OR_NULL(ocr_rails[i].phase_reg)) {
+			ret = PTR_ERR(ocr_rails[i].phase_reg);
+			if (ret != -EPROBE_DEFER) {
+				pr_err("Could not get regulator: %s, err:%d\n",
+					ocr_rails[i].name, ret);
+				ocr_rails[i].phase_reg = NULL;
+				ocr_rails[i].mode = 0;
+				ocr_rails[i].init = 0;
+			}
+			return ret;
+		}
+reg_init:
+		ocr_rails[i].mode = OPTIMUM_CURRENT_MIN;
+	}
+	return ret;
+}
+
+static int vdd_restriction_reg_init(struct platform_device *pdev)
+{
+	int ret = 0;
+	int i;
+
+	for (i = 0; i < rails_cnt; i++) {
+		if (rails[i].freq_req == 1) {
+			usefreq |= BIT(i);
+		} else {
+			rails[i].reg = devm_regulator_get(&pdev->dev,
+					rails[i].name);
+			if (IS_ERR_OR_NULL(rails[i].reg)) {
+				ret = PTR_ERR(rails[i].reg);
+				if (ret != -EPROBE_DEFER) {
+					pr_err( \
+					"could not get regulator: %s. err:%d\n",
+					rails[i].name, ret);
+					rails[i].reg = NULL;
+					rails[i].curr_level = -2;
+					return ret;
+				}
+				pr_info("Defer regulator %s probe\n",
+					rails[i].name);
+				return ret;
+			}
+		}
+	}
+
+	return ret;
+}
+
+static int psm_reg_init(struct platform_device *pdev)
+{
+	int ret = 0;
+	int i = 0;
+	int j = 0;
+
+	for (i = 0; i < psm_rails_cnt; i++) {
+		psm_rails[i].reg = rpm_regulator_get(&pdev->dev,
+				psm_rails[i].name);
+		if (IS_ERR_OR_NULL(psm_rails[i].reg)) {
+			ret = PTR_ERR(psm_rails[i].reg);
+			if (ret != -EPROBE_DEFER) {
+				pr_err("couldn't get rpm regulator %s. err%d\n",
+					psm_rails[i].name, ret);
+				psm_rails[i].reg = NULL;
+				goto psm_reg_exit;
+			}
+			pr_info("Defer regulator %s probe\n",
+					psm_rails[i].name);
+			return ret;
+		}
+		/* Apps default vote for PWM mode */
+		psm_rails[i].init = PMIC_PWM_MODE;
+		ret = rpm_regulator_set_mode(psm_rails[i].reg,
+				psm_rails[i].init);
+		if (ret) {
+			pr_err("Cannot set PMIC PWM mode. err:%d\n", ret);
+			return ret;
+		} else
+			psm_rails[i].mode = PMIC_PWM_MODE;
+	}
+
+	return ret;
+
+psm_reg_exit:
+	if (ret) {
+		for (j = 0; j < i; j++) {
+			if (psm_rails[j].reg != NULL)
+				rpm_regulator_put(psm_rails[j].reg);
+		}
+	}
+
+	return ret;
+}
+
+static ssize_t bucket_info_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int ret = 0;
+	uint32_t val = 0;
+
+	ret = kstrtouint(buf, 10, &val);
+	if (ret) {
+		pr_err("Invalid input:%s. ret:%d", buf, ret);
+		goto done_store;
+	}
+
+	bucket = val & 0xff;
+	pr_debug("\"%s\"(PID:%i) request cluster:%d bucket:%d\n",
+		current->comm, current->pid, (bucket & 0xf0) >> 4,
+		bucket & 0xf);
+
+done_store:
+	return count;
+}
+
+static ssize_t bucket_info_show(
+	struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", bucket);
+}
+
+static struct kobj_attribute bucket_info_attr =
+		__ATTR_RW(bucket_info);
+static int msm_thermal_add_bucket_info_nodes(void)
+{
+	struct kobject *module_kobj = NULL;
+	int ret = 0;
+
+	module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+	if (!module_kobj) {
+		pr_err("cannot find kobject\n");
+		return -ENOENT;
+	}
+	sysfs_attr_init(&bucket_info_attr.attr);
+	ret = sysfs_create_file(module_kobj, &bucket_info_attr.attr);
+	if (ret) {
+		pr_err(
+		"cannot create bucket info kobject attribute. err:%d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static struct kobj_attribute sensor_info_attr =
+		__ATTR_RO(sensor_info);
+static int msm_thermal_add_sensor_info_nodes(void)
+{
+	struct kobject *module_kobj = NULL;
+	int ret = 0;
+
+	if (!sensor_info_probed) {
+		sensor_info_nodes_called = true;
+		return ret;
+	}
+	if (sensor_info_probed && sensor_cnt == 0)
+		return ret;
+
+	module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+	if (!module_kobj) {
+		pr_err("cannot find kobject\n");
+		return -ENOENT;
+	}
+	sysfs_attr_init(&sensor_info_attr.attr);
+	ret = sysfs_create_file(module_kobj, &sensor_info_attr.attr);
+	if (ret) {
+		pr_err(
+		"cannot create sensor info kobject attribute. err:%d\n",
+		ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int msm_thermal_add_vdd_rstr_nodes(void)
+{
+	struct kobject *module_kobj = NULL;
+	struct kobject *vdd_rstr_kobj = NULL;
+	struct kobject *vdd_rstr_reg_kobj[MAX_RAILS] = {0};
+	int rc = 0;
+	int i = 0;
+
+	if (!vdd_rstr_probed) {
+		vdd_rstr_nodes_called = true;
+		return rc;
+	}
+
+	if (vdd_rstr_probed && rails_cnt == 0)
+		return rc;
+
+	module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+	if (!module_kobj) {
+		pr_err("cannot find kobject\n");
+		rc = -ENOENT;
+		goto thermal_sysfs_add_exit;
+	}
+
+	vdd_rstr_kobj = kobject_create_and_add("vdd_restriction", module_kobj);
+	if (!vdd_rstr_kobj) {
+		pr_err("cannot create vdd_restriction kobject\n");
+		rc = -ENOMEM;
+		goto thermal_sysfs_add_exit;
+	}
+
+	rc = sysfs_create_group(vdd_rstr_kobj, &vdd_rstr_en_attribs_gp);
+	if (rc) {
+		pr_err("cannot create kobject attribute group. err:%d\n", rc);
+		rc = -ENOMEM;
+		goto thermal_sysfs_add_exit;
+	}
+
+	for (i = 0; i < rails_cnt; i++) {
+		vdd_rstr_reg_kobj[i] = kobject_create_and_add(rails[i].name,
+					vdd_rstr_kobj);
+		if (!vdd_rstr_reg_kobj[i]) {
+			pr_err("cannot create kobject for %s\n",
+					rails[i].name);
+			rc = -ENOMEM;
+			goto thermal_sysfs_add_exit;
+		}
+
+		rails[i].attr_gp.attrs = kzalloc(sizeof(struct attribute *) * 3,
+					GFP_KERNEL);
+		if (!rails[i].attr_gp.attrs) {
+			pr_err("kzalloc failed\n");
+			rc = -ENOMEM;
+			goto thermal_sysfs_add_exit;
+		}
+
+		VDD_RES_RW_ATTRIB(rails[i], rails[i].level_attr, 0, level);
+		VDD_RES_RO_ATTRIB(rails[i], rails[i].value_attr, 1, value);
+		rails[i].attr_gp.attrs[2] = NULL;
+
+		rc = sysfs_create_group(vdd_rstr_reg_kobj[i],
+				&rails[i].attr_gp);
+		if (rc) {
+			pr_err("cannot create attribute group for %s. err:%d\n",
+					rails[i].name, rc);
+			goto thermal_sysfs_add_exit;
+		}
+	}
+
+	return rc;
+
+thermal_sysfs_add_exit:
+	if (rc) {
+		for (i = 0; i < rails_cnt; i++) {
+			kobject_del(vdd_rstr_reg_kobj[i]);
+			kfree(rails[i].attr_gp.attrs);
+		}
+		if (vdd_rstr_kobj)
+			kobject_del(vdd_rstr_kobj);
+	}
+	return rc;
+}
+
+static int msm_thermal_add_ocr_nodes(void)
+{
+	struct kobject *module_kobj = NULL;
+	struct kobject *ocr_kobj = NULL;
+	struct kobject *ocr_reg_kobj[MAX_RAILS] = {0};
+	int rc = 0;
+	int i = 0;
+
+	if (!ocr_probed) {
+		ocr_nodes_called = true;
+		return rc;
+	}
+
+	if (ocr_probed && ocr_rail_cnt == 0)
+		return rc;
+
+	module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+	if (!module_kobj) {
+		pr_err("Cannot find kobject\n");
+		rc = -ENOENT;
+		goto ocr_node_exit;
+	}
+
+	ocr_kobj = kobject_create_and_add("opt_curr_req", module_kobj);
+	if (!ocr_kobj) {
+		pr_err("Cannot create ocr kobject\n");
+		rc = -ENOMEM;
+		goto ocr_node_exit;
+	}
+
+	for (i = 0; i < ocr_rail_cnt; i++) {
+		ocr_reg_kobj[i] = kobject_create_and_add(ocr_rails[i].name,
+					ocr_kobj);
+		if (!ocr_reg_kobj[i]) {
+			pr_err("Cannot create kobject for %s\n",
+				ocr_rails[i].name);
+			rc = -ENOMEM;
+			goto ocr_node_exit;
+		}
+		ocr_rails[i].attr_gp.attrs = kzalloc(
+				sizeof(struct attribute *) * 2, GFP_KERNEL);
+		if (!ocr_rails[i].attr_gp.attrs) {
+			pr_err("Fail to allocate memory for attribute for %s\n",
+				ocr_rails[i].name);
+			rc = -ENOMEM;
+			goto ocr_node_exit;
+		}
+
+		OCR_RW_ATTRIB(ocr_rails[i], ocr_rails[i].mode_attr, 0, mode);
+		ocr_rails[i].attr_gp.attrs[1] = NULL;
+
+		rc = sysfs_create_group(ocr_reg_kobj[i], &ocr_rails[i].attr_gp);
+		if (rc) {
+			pr_err("Cannot create attribute group for %s. err:%d\n",
+				ocr_rails[i].name, rc);
+			goto ocr_node_exit;
+		}
+	}
+
+ocr_node_exit:
+	if (rc) {
+		for (i = 0; i < ocr_rail_cnt; i++) {
+			if (ocr_reg_kobj[i])
+				kobject_del(ocr_reg_kobj[i]);
+			kfree(ocr_rails[i].attr_gp.attrs);
+			ocr_rails[i].attr_gp.attrs = NULL;
+		}
+		if (ocr_kobj)
+			kobject_del(ocr_kobj);
+	}
+	return rc;
+}
+
+static int msm_thermal_add_psm_nodes(void)
+{
+	struct kobject *module_kobj = NULL;
+	struct kobject *psm_kobj = NULL;
+	struct kobject *psm_reg_kobj[MAX_RAILS] = {0};
+	int rc = 0;
+	int i = 0;
+
+	if (!psm_probed) {
+		psm_nodes_called = true;
+		return rc;
+	}
+
+	if (psm_probed && psm_rails_cnt == 0)
+		return rc;
+
+	module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+	if (!module_kobj) {
+		pr_err("cannot find kobject\n");
+		rc = -ENOENT;
+		goto psm_node_exit;
+	}
+
+	psm_kobj = kobject_create_and_add("pmic_sw_mode", module_kobj);
+	if (!psm_kobj) {
+		pr_err("cannot create psm kobject\n");
+		rc = -ENOMEM;
+		goto psm_node_exit;
+	}
+
+	for (i = 0; i < psm_rails_cnt; i++) {
+		psm_reg_kobj[i] = kobject_create_and_add(psm_rails[i].name,
+					psm_kobj);
+		if (!psm_reg_kobj[i]) {
+			pr_err("cannot create kobject for %s\n",
+					psm_rails[i].name);
+			rc = -ENOMEM;
+			goto psm_node_exit;
+		}
+		psm_rails[i].attr_gp.attrs = kzalloc( \
+				sizeof(struct attribute *) * 2, GFP_KERNEL);
+		if (!psm_rails[i].attr_gp.attrs) {
+			pr_err("kzalloc failed\n");
+			rc = -ENOMEM;
+			goto psm_node_exit;
+		}
+
+		PSM_RW_ATTRIB(psm_rails[i], psm_rails[i].mode_attr, 0, mode);
+		psm_rails[i].attr_gp.attrs[1] = NULL;
+
+		rc = sysfs_create_group(psm_reg_kobj[i], &psm_rails[i].attr_gp);
+		if (rc) {
+			pr_err("cannot create attribute group for %s. err:%d\n",
+					psm_rails[i].name, rc);
+			goto psm_node_exit;
+		}
+	}
+
+	return rc;
+
+psm_node_exit:
+	if (rc) {
+		for (i = 0; i < psm_rails_cnt; i++) {
+			kobject_del(psm_reg_kobj[i]);
+			kfree(psm_rails[i].attr_gp.attrs);
+		}
+		if (psm_kobj)
+			kobject_del(psm_kobj);
+	}
+	return rc;
+}
+
+static void thermal_cpu_freq_mit_disable(void)
+{
+	uint32_t cpu = 0, th_cnt = 0;
+	struct device_manager_data *dev_mgr = NULL;
+	struct device_clnt_data *clnt = NULL;
+	char device_name[TSENS_NAME_MAX] = {0};
+
+	freq_mitigation_enabled = 0;
+	msm_thermal_init_cpu_mit(CPU_FREQ_MITIGATION);
+	for_each_possible_cpu(cpu) {
+		for (th_cnt = FREQ_THRESHOLD_HIGH;
+			th_cnt <= FREQ_THRESHOLD_LOW; th_cnt++)
+			sensor_cancel_trip(cpus[cpu].sensor_id,
+				&cpus[cpu].threshold[th_cnt]);
+
+		snprintf(device_name, TSENS_NAME_MAX, CPU_DEVICE, cpu);
+		dev_mgr = find_device_by_name(device_name);
+		if (!dev_mgr) {
+			pr_err("Invalid device %s\n", device_name);
+			return;
+		}
+		mutex_lock(&dev_mgr->clnt_lock);
+		list_for_each_entry(clnt, &dev_mgr->client_list, clnt_ptr) {
+			if (!clnt->req_active)
+				continue;
+			clnt->request.freq.max_freq
+				= CPUFREQ_MAX_NO_MITIGATION;
+			clnt->request.freq.min_freq
+				= CPUFREQ_MIN_NO_MITIGATION;
+		}
+		dev_mgr->active_req.freq.max_freq = CPUFREQ_MAX_NO_MITIGATION;
+		dev_mgr->active_req.freq.min_freq = CPUFREQ_MIN_NO_MITIGATION;
+		mutex_unlock(&dev_mgr->clnt_lock);
+	}
+	if (freq_mitigation_task)
+		complete(&freq_mitigation_complete);
+	else
+		pr_err("Freq mit task is not initialized\n");
+}
+
+static void thermal_cpu_hotplug_mit_disable(void)
+{
+	uint32_t cpu = 0, th_cnt = 0;
+	struct device_manager_data *dev_mgr = NULL;
+	struct device_clnt_data *clnt = NULL;
+
+	mutex_lock(&core_control_mutex);
+	hotplug_enabled = 0;
+	msm_thermal_init_cpu_mit(CPU_HOTPLUG_MITIGATION);
+	for_each_possible_cpu(cpu) {
+		if (!(msm_thermal_info.core_control_mask & BIT(cpu)))
+			continue;
+
+		for (th_cnt = HOTPLUG_THRESHOLD_HIGH;
+			th_cnt <= HOTPLUG_THRESHOLD_LOW; th_cnt++)
+			sensor_cancel_trip(cpus[cpu].sensor_id,
+				&cpus[cpu].threshold[th_cnt]);
+	}
+
+	dev_mgr = find_device_by_name(HOTPLUG_DEVICE);
+	if (!dev_mgr) {
+		pr_err("Invalid device %s\n", HOTPLUG_DEVICE);
+		mutex_unlock(&core_control_mutex);
+		return;
+	}
+	mutex_lock(&dev_mgr->clnt_lock);
+	list_for_each_entry(clnt, &dev_mgr->client_list, clnt_ptr) {
+		if (!clnt->req_active)
+			continue;
+		HOTPLUG_NO_MITIGATION(&clnt->request.offline_mask);
+	}
+	HOTPLUG_NO_MITIGATION(&dev_mgr->active_req.offline_mask);
+	mutex_unlock(&dev_mgr->clnt_lock);
+
+	if (hotplug_task)
+		complete(&hotplug_notify_complete);
+	else
+		pr_err("Hotplug task is not initialized\n");
+
+	mutex_unlock(&core_control_mutex);
+}
+
+static void thermal_reset_disable(void)
+{
+	THERM_MITIGATION_DISABLE(therm_reset_enabled, MSM_THERM_RESET);
+}
+
+static void thermal_mx_mit_disable(void)
+{
+	int ret = 0;
+
+	THERM_MITIGATION_DISABLE(vdd_mx_enabled, MSM_VDD_MX_RESTRICTION);
+	ret = remove_vdd_mx_restriction();
+	if (ret)
+		pr_err("Failed to remove vdd mx restriction\n");
+}
+
+static void thermal_vdd_mit_disable(void)
+{
+	int ret = 0;
+
+	THERM_MITIGATION_DISABLE(vdd_rstr_enabled, MSM_VDD_RESTRICTION);
+	ret = vdd_restriction_apply_all(0);
+	if (ret)
+		pr_err("Disable vdd rstr for all failed. err:%d\n", ret);
+}
+
+static void thermal_psm_mit_disable(void)
+{
+	int ret = 0;
+
+	THERM_MITIGATION_DISABLE(psm_enabled, -1);
+	ret = psm_set_mode_all(PMIC_AUTO_MODE);
+	if (ret)
+		pr_err("Set auto mode for all failed. err:%d\n", ret);
+}
+
+static void thermal_ocr_mit_disable(void)
+{
+	int ret = 0;
+
+	THERM_MITIGATION_DISABLE(ocr_enabled, MSM_OCR);
+	ret = ocr_set_mode_all(OPTIMUM_CURRENT_MAX);
+	if (ret)
+		pr_err("Set max optimum current failed. err:%d\n", ret);
+}
+
+static void thermal_cx_phase_ctrl_mit_disable(void)
+{
+	int ret = 0;
+
+	THERM_MITIGATION_DISABLE(cx_phase_ctrl_enabled, MSM_CX_PHASE_CTRL_HOT);
+	ret = send_temperature_band(MSM_CX_PHASE_CTRL, MSM_WARM);
+	if (ret)
+		pr_err("cx band set to WARM failed. err:%d\n", ret);
+}
+
+static void thermal_gfx_phase_warm_ctrl_mit_disable(void)
+{
+	int ret = 0;
+
+	if (gfx_warm_phase_ctrl_enabled) {
+		THERM_MITIGATION_DISABLE(gfx_warm_phase_ctrl_enabled,
+					MSM_GFX_PHASE_CTRL_WARM);
+		ret = send_temperature_band(MSM_GFX_PHASE_CTRL, MSM_NORMAL);
+		if (!ret)
+			pr_err("gfx phase set to NORMAL failed. err:%d\n",
+				ret);
+	}
+}
+
+static void thermal_gfx_phase_crit_ctrl_mit_disable(void)
+{
+	int ret = 0;
+
+	if (gfx_crit_phase_ctrl_enabled) {
+		THERM_MITIGATION_DISABLE(gfx_crit_phase_ctrl_enabled,
+					MSM_GFX_PHASE_CTRL_HOT);
+		ret = send_temperature_band(MSM_GFX_PHASE_CTRL, MSM_NORMAL);
+		if (!ret)
+			pr_err("gfx phase set to NORMAL failed. err:%d\n",
+				ret);
+	}
+}
+
+static int probe_vdd_mx(struct device_node *node,
+		struct msm_thermal_data *data, struct platform_device *pdev)
+{
+	int ret = 0;
+	char *key = NULL;
+
+	key = "qcom,disable-vdd-mx";
+	if (of_property_read_bool(node, key)) {
+		vdd_mx_enabled = false;
+		return ret;
+	}
+
+	key = "qcom,mx-restriction-temp";
+	ret = of_property_read_u32(node, key, &data->vdd_mx_temp_degC);
+	if (ret)
+		goto read_node_done;
+
+	key = "qcom,mx-restriction-temp-hysteresis";
+	ret = of_property_read_u32(node, key, &data->vdd_mx_temp_hyst_degC);
+	if (ret)
+		goto read_node_done;
+
+	key = "qcom,mx-retention-min";
+	ret = of_property_read_u32(node, key, &data->vdd_mx_min);
+	if (ret)
+		goto read_node_done;
+
+	/*
+	 * Monitor only this sensor if defined, otherwise monitor all tsens
+	 */
+	key = "qcom,mx-restriction-sensor_id";
+	if (of_property_read_u32(node, key, &data->vdd_mx_sensor_id))
+		data->vdd_mx_sensor_id = MONITOR_ALL_TSENS;
+
+	vdd_mx = devm_regulator_get(&pdev->dev, "vdd-mx");
+	if (IS_ERR_OR_NULL(vdd_mx)) {
+		ret = PTR_ERR(vdd_mx);
+		if (ret != -EPROBE_DEFER) {
+			pr_err(
+			"Could not get regulator: vdd-mx, err:%d\n", ret);
+		}
+		goto read_node_done;
+	}
+
+	key = "qcom,cx-retention-min";
+	ret = of_property_read_u32(node, key, &data->vdd_cx_min);
+	if (!ret) {
+		vdd_cx = devm_regulator_get(&pdev->dev, "vdd-cx");
+		if (IS_ERR_OR_NULL(vdd_cx)) {
+			ret = PTR_ERR(vdd_cx);
+			if (ret != -EPROBE_DEFER) {
+				pr_err(
+				"Could not get regulator: vdd-cx, err:%d\n",
+				ret);
+			}
+			goto read_node_done;
+		}
+	}
+
+	ret = sensor_mgr_init_threshold(&thresh[MSM_VDD_MX_RESTRICTION],
+			data->vdd_mx_sensor_id,
+			data->vdd_mx_temp_degC + data->vdd_mx_temp_hyst_degC,
+			data->vdd_mx_temp_degC, vdd_mx_notify);
+
+read_node_done:
+	if (!ret) {
+		vdd_mx_enabled = true;
+		snprintf(mit_config[MSM_VDD_MX_RESTRICTION].config_name,
+			MAX_DEBUGFS_CONFIG_LEN, "mx");
+		mit_config[MSM_VDD_MX_RESTRICTION].disable_config
+			= thermal_mx_mit_disable;
+	} else if (ret != -EPROBE_DEFER) {
+		dev_info(&pdev->dev,
+			"%s:Failed reading node=%s, key=%s. KTM continues\n",
+			__func__, node->full_name, key);
+	}
+
+	return ret;
+}
+
+static int probe_vdd_rstr(struct device_node *node,
+		struct msm_thermal_data *data, struct platform_device *pdev)
+{
+	int ret = 0;
+	int i = 0;
+	int arr_size;
+	char *key = NULL;
+	struct device_node *child_node = NULL;
+
+	rails = NULL;
+
+	key = "qcom,disable-vdd-rstr";
+	if (of_property_read_bool(node, key)) {
+		vdd_rstr_probed = true;
+		vdd_rstr_enabled = false;
+		rails_cnt = 0;
+		return ret;
+	}
+
+	key = "qcom,vdd-restriction-temp";
+	ret = of_property_read_u32(node, key, &data->vdd_rstr_temp_degC);
+	if (ret)
+		goto read_node_fail;
+
+	key = "qcom,vdd-restriction-temp-hysteresis";
+	ret = of_property_read_u32(node, key, &data->vdd_rstr_temp_hyst_degC);
+	if (ret)
+		goto read_node_fail;
+
+	/*
+	 * Monitor only this sensor if defined, otherwise monitor all tsens
+	 */
+	key = "qcom,vdd-restriction-sensor-id";
+	if (of_property_read_u32(node, key, &data->vdd_rstr_sensor_id))
+		data->vdd_rstr_sensor_id = MONITOR_ALL_TSENS;
+
+	for_each_child_of_node(node, child_node) {
+		rails_cnt++;
+	}
+
+	if (rails_cnt == 0)
+		goto read_node_fail;
+	if (rails_cnt >= MAX_RAILS) {
+		pr_err("Too many rails:%d.\n", rails_cnt);
+		return -EFAULT;
+	}
+
+	rails = kzalloc(sizeof(struct rail) * rails_cnt,
+				GFP_KERNEL);
+	if (!rails) {
+		pr_err("Fail to allocate memory for rails.\n");
+		return -ENOMEM;
+	}
+
+	i = 0;
+	for_each_child_of_node(node, child_node) {
+		key = "qcom,vdd-rstr-reg";
+		ret = of_property_read_string(child_node, key, &rails[i].name);
+		if (ret)
+			goto read_node_fail;
+
+		key = "qcom,levels";
+		if (!of_get_property(child_node, key, &arr_size))
+			goto read_node_fail;
+		rails[i].num_levels = arr_size/sizeof(__be32);
+		if (rails[i].num_levels >
+			sizeof(rails[i].levels)/sizeof(uint32_t)) {
+			pr_err("Array size:%d too large for index:%d\n",
+				rails[i].num_levels, i);
+			return -EFAULT;
+		}
+		ret = of_property_read_u32_array(child_node, key,
+				rails[i].levels, rails[i].num_levels);
+		if (ret)
+			goto read_node_fail;
+
+		key = "qcom,freq-req";
+		rails[i].freq_req = of_property_read_bool(child_node, key);
+		if (rails[i].freq_req) {
+			rails[i].min_level = 0;
+			key = "qcom,max-freq-level";
+			ret = of_property_read_u32(child_node, key,
+				&rails[i].max_frequency_limit);
+			if (ret)
+				rails[i].max_frequency_limit
+					= UINT_MAX;
+			ret = 0;
+		} else {
+			key = "qcom,min-level";
+			ret = of_property_read_u32(child_node, key,
+				&rails[i].min_level);
+			if (ret)
+				goto read_node_fail;
+		}
+
+		rails[i].curr_level = -1;
+		rails[i].reg = NULL;
+		i++;
+	}
+
+	if (rails_cnt) {
+		ret = vdd_restriction_reg_init(pdev);
+		if (ret) {
+			pr_err("Err regulator init. err:%d. KTM continues.\n",
+					ret);
+			goto read_node_fail;
+		}
+		ret = sensor_mgr_init_threshold(&thresh[MSM_VDD_RESTRICTION],
+			data->vdd_rstr_sensor_id,
+			data->vdd_rstr_temp_hyst_degC, data->vdd_rstr_temp_degC,
+			vdd_restriction_notify);
+		if (ret) {
+			pr_err("Error in initializing thresholds. err:%d\n",
+					ret);
+			goto read_node_fail;
+		}
+		vdd_rstr_enabled = true;
+		snprintf(mit_config[MSM_VDD_RESTRICTION].config_name,
+			MAX_DEBUGFS_CONFIG_LEN, "vdd");
+		mit_config[MSM_VDD_RESTRICTION].disable_config
+			= thermal_vdd_mit_disable;
+	}
+read_node_fail:
+	vdd_rstr_probed = true;
+	if (ret) {
+		dev_info(&pdev->dev,
+		"%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
+			__func__, node->full_name, key, ret);
+		kfree(rails);
+		rails_cnt = 0;
+	}
+	if (ret == -EPROBE_DEFER)
+		vdd_rstr_probed = false;
+	return ret;
+}
+
+static int create_alias_name(int _cpu, struct device_node *limits,
+		struct platform_device *pdev)
+{
+	char device_name[DEVM_NAME_MAX] = "";
+	int sensor_idx = 0, sensor_ct = 0, idx = 0, err = 0;
+	struct device_node *tsens = NULL;
+	const char *sensor_name = NULL;
+
+	if (!sensors) {
+		pr_debug("sensor info not defined\n");
+		return -ENOSYS;
+	}
+	snprintf(device_name, DEVM_NAME_MAX, CPU_DEVICE, _cpu);
+
+	if (!of_get_property(limits, "qcom,temperature-sensor", &sensor_ct)
+		|| sensor_ct <= 0) {
+		pr_err("Sensor not defined\n");
+		return -ENODEV;
+	}
+	sensor_ct /= sizeof(__be32);
+	do {
+		tsens = of_parse_phandle(limits, "qcom,temperature-sensor",
+				idx);
+		if (!tsens) {
+			pr_err("No temperature sensor defined for CPU%d\n",
+				_cpu);
+			return -ENODEV;
+		}
+
+		err = of_property_read_string(tsens, "qcom,sensor-name",
+			&sensor_name);
+		if (err) {
+			pr_err("Sensor name not populated for CPU%d. err:%d\n",
+					_cpu, err);
+			return -ENODEV;
+		}
+		for (sensor_idx = 0; sensor_idx < sensor_cnt; sensor_idx++) {
+			char cat_str[DEVM_NAME_MAX] = "";
+
+			if (strcmp(sensors[sensor_idx].name, sensor_name))
+				continue;
+			if (!sensors[sensor_idx].alias) {
+				sensors[sensor_idx].alias = devm_kzalloc(
+					&pdev->dev, DEVM_NAME_MAX, GFP_KERNEL);
+				if (!sensors[sensor_idx].alias) {
+					pr_err("Memory alloc failed\n");
+					return -ENOMEM;
+				}
+				strlcpy((char *)sensors[sensor_idx].alias,
+					device_name, DEVM_NAME_MAX);
+				if (sensor_ct > 1) {
+					/* Multiple sensor monitoring
+					 * single device */
+					snprintf(cat_str, DEVM_NAME_MAX, "_%d",
+						idx);
+					strlcat((char *)
+						sensors[sensor_idx].alias,
+						cat_str, DEVM_NAME_MAX);
+				}
+			} else {
+				/* Single sensor monitoring multiple devices */
+				snprintf(cat_str, DEVM_NAME_MAX,
+					"-"CPU_DEVICE, _cpu);
+				strlcat((char *)sensors[sensor_idx].alias,
+					cat_str, DEVM_NAME_MAX);
+			}
+			break;
+		}
+		idx++;
+	} while (idx < sensor_ct);
+
+	return 0;
+}
+
+static int fetch_cpu_mitigaiton_info(struct msm_thermal_data *data,
+		struct platform_device *pdev)
+{
+
+	int _cpu = 0, err = 0;
+	struct device_node *cpu_node = NULL, *limits = NULL, *tsens = NULL;
+	char *key = NULL;
+	struct device_node *node = pdev->dev.of_node;
+
+	key = "qcom,sensor-id";
+	err = of_property_read_u32(node, key, &data->sensor_id);
+	if (err)
+		goto fetch_mitig_exit;
+
+	key = "qcom,poll-ms";
+	err = of_property_read_u32(node, key, &data->poll_ms);
+	if (err)
+		goto fetch_mitig_exit;
+
+	for_each_possible_cpu(_cpu) {
+		const char *sensor_name = NULL;
+
+		cpu_node = of_get_cpu_node(_cpu, NULL);
+		if (!cpu_node) {
+			pr_err("No CPU phandle for CPU%d\n", _cpu);
+			__WARN();
+			continue;
+		}
+		limits = of_parse_phandle(cpu_node, "qcom,limits-info", 0);
+		if (!limits) {
+			pr_err("No mitigation info defined for CPU%d\n", _cpu);
+			continue;
+		}
+		VALIDATE_AND_SET_MASK(limits, "qcom,boot-frequency-mitigate",
+			data->bootup_freq_control_mask, _cpu);
+		VALIDATE_AND_SET_MASK(limits,
+			"qcom,emergency-frequency-mitigate",
+			data->freq_mitig_control_mask, _cpu);
+		VALIDATE_AND_SET_MASK(limits, "qcom,hotplug-mitigation-enable",
+			data->core_control_mask, _cpu);
+
+		tsens = of_parse_phandle(limits, "qcom,temperature-sensor", 0);
+		if (!tsens) {
+			pr_err("No temperature sensor defined for CPU%d\n",
+				_cpu);
+			continue;
+		}
+
+		err = of_property_read_string(tsens, "qcom,sensor-name",
+			&sensor_name);
+		if (err) {
+			pr_err("Sensor name not populated for CPU%d. err:%d\n",
+					_cpu, err);
+			continue;
+		}
+		cpus[_cpu].sensor_type = devm_kzalloc(&pdev->dev,
+					strlen(sensor_name) + 1, GFP_KERNEL);
+		if (!cpus[_cpu].sensor_type) {
+			pr_err("Memory alloc failed\n");
+			err = -ENOMEM;
+			goto fetch_mitig_exit;
+		}
+		strlcpy((char *) cpus[_cpu].sensor_type, sensor_name,
+			strlen(sensor_name) + 1);
+		create_alias_name(_cpu, limits, pdev);
+	}
+
+fetch_mitig_exit:
+	return err;
+}
+
+static void thermal_cxip_lm_disable(void)
+{
+	THERM_MITIGATION_DISABLE(cxip_lm_enabled, MSM_THERM_CXIP_LM);
+	cxip_lm_therm_vote_apply(false);
+}
+
+static int probe_cxip_lm(struct device_node *node,
+		struct msm_thermal_data *data,
+		struct platform_device *pdev)
+{
+	char *key = NULL;
+	int ret = 0;
+	u32 val = 0;
+
+	key = "qcom,cxip-lm-enable";
+	ret = of_property_read_u32(node, key, &val);
+	if (ret) {
+		cxip_lm_enabled = false;
+		return -EINVAL;
+	}
+	cxip_lm_enabled = val ? true : false;
+
+	cxip_lm_reg_base = devm_ioremap(&pdev->dev,
+				CXIP_LM_BASE_ADDRESS, CXIP_LM_ADDRESS_SIZE);
+	if (!cxip_lm_reg_base) {
+		pr_err("cxip_lm reg remap failed\n");
+		ret = -ENOMEM;
+		goto PROBE_CXIP_LM_EXIT;
+	}
+
+	/* If it is disable request, disable and exit */
+	if (!cxip_lm_enabled) {
+		writel_relaxed(CXIP_LM_DISABLE_VAL,
+			cxip_lm_reg_base + CXIP_LM_FEATURE_EN);
+		devm_ioremap_release(&pdev->dev, cxip_lm_reg_base);
+		return 0;
+	};
+
+	/* Set bypass clients bits */
+	writel_relaxed(CXIP_LM_BYPASS_VAL, cxip_lm_reg_base + CXIP_LM_BYPASS);
+
+	ret = sensor_mgr_init_threshold(&thresh[MSM_THERM_CXIP_LM],
+		CXIP_LM_THERM_SENS_ID, CXIP_LM_THERM_SENS_HIGH,
+		CXIP_LM_THERM_SENS_LOW, therm_cxip_lm_notify);
+	if (ret) {
+		pr_err("cxip_lm sensor init failed\n");
+		goto PROBE_CXIP_LM_EXIT;
+	}
+
+	snprintf(mit_config[MSM_THERM_CXIP_LM].config_name,
+		MAX_DEBUGFS_CONFIG_LEN, "cxip_lm");
+	mit_config[MSM_THERM_CXIP_LM].disable_config
+		= thermal_cxip_lm_disable;
+
+PROBE_CXIP_LM_EXIT:
+	if (ret) {
+		if (cxip_lm_reg_base)
+			devm_ioremap_release(&pdev->dev,
+				cxip_lm_reg_base);
+		dev_info(&pdev->dev,
+		"%s:Failed reading node=%s, key=%s err=%d. KTM continues\n",
+			__func__, node->full_name, key, ret);
+		cxip_lm_enabled = false;
+	}
+
+	return ret;
+}
+
+static void probe_sensor_info(struct device_node *node,
+		struct msm_thermal_data *data, struct platform_device *pdev)
+{
+	int err = 0;
+	int i = 0;
+	char *key = NULL;
+	struct device_node *child_node = NULL;
+	struct device_node *np = NULL;
+	int scale_tsens_found = 0;
+
+	key = "qcom,disable-sensor-info";
+	if (of_property_read_bool(node, key)) {
+		sensor_info_probed = true;
+		return;
+	}
+
+	np = of_find_compatible_node(NULL, NULL, "qcom,sensor-information");
+	if (!np) {
+		dev_info(&pdev->dev,
+		"%s:unable to find DT for sensor-information.KTM continues\n",
+		__func__);
+		sensor_info_probed = true;
+		return;
+	}
+	sensor_cnt = of_get_child_count(np);
+	if (sensor_cnt == 0) {
+		err = -ENODEV;
+		goto read_node_fail;
+	}
+
+	sensors = devm_kzalloc(&pdev->dev,
+			sizeof(struct msm_sensor_info) * sensor_cnt,
+			GFP_KERNEL);
+	if (!sensors) {
+		pr_err("Fail to allocate memory for sensor_info.\n");
+		err = -ENOMEM;
+		goto read_node_fail;
+	}
+
+	for_each_child_of_node(np, child_node) {
+		const char *alias_name = NULL;
+
+		key = "qcom,sensor-type";
+		err = of_property_read_string(child_node,
+				key, &sensors[i].type);
+		if (err)
+			goto read_node_fail;
+
+		key = "qcom,sensor-name";
+		err = of_property_read_string(child_node,
+				key, &sensors[i].name);
+		if (err)
+			goto read_node_fail;
+
+		key = "qcom,alias-name";
+		of_property_read_string(child_node, key, &alias_name);
+		if (alias_name && !strnstr(alias_name, "cpu",
+			strlen(alias_name)))
+			sensors[i].alias = alias_name;
+
+		key = "qcom,scaling-factor";
+		err = of_property_read_u32(child_node, key,
+				&sensors[i].scaling_factor);
+		if (err || sensors[i].scaling_factor == 0) {
+			sensors[i].scaling_factor = SENSOR_SCALING_FACTOR;
+			err = 0;
+		}
+		if (scale_tsens_found == 0) {
+			if (!strcmp(sensors[i].type, "tsens")) {
+				scale_tsens_found = 1;
+				tsens_scaling_factor =
+					sensors[i].scaling_factor;
+			}
+		}
+		i++;
+	}
+
+read_node_fail:
+	sensor_info_probed = true;
+	if (err) {
+		dev_info(&pdev->dev,
+		"%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
+			__func__, np->full_name, key, err);
+		devm_kfree(&pdev->dev, sensors);
+	}
+}
+
+static int probe_ocr(struct device_node *node, struct msm_thermal_data *data,
+		struct platform_device *pdev)
+{
+	int ret = 0;
+	int j = 0;
+	char *key = NULL;
+
+	if (ocr_probed) {
+		pr_info("Nodes already probed\n");
+		goto read_ocr_exit;
+	}
+	ocr_rails = NULL;
+
+	key = "qcom,disable-ocr";
+	if (of_property_read_bool(node, key)) {
+		ocr_probed = true;
+		ocr_enabled = false;
+		ocr_rail_cnt = 0;
+		goto read_ocr_exit;
+	}
+
+	key = "qcom,pmic-opt-curr-temp";
+	ret = of_property_read_u32(node, key, &data->ocr_temp_degC);
+	if (ret)
+		goto read_ocr_fail;
+
+	key = "qcom,pmic-opt-curr-temp-hysteresis";
+	ret = of_property_read_u32(node, key, &data->ocr_temp_hyst_degC);
+	if (ret)
+		goto read_ocr_fail;
+
+	key = "qcom,pmic-opt-curr-regs";
+	ocr_rail_cnt = of_property_count_strings(node, key);
+	if (ocr_rail_cnt <= 0) {
+		pr_err("Invalid ocr rail count. err:%d\n", ocr_rail_cnt);
+		goto read_ocr_fail;
+	}
+	ocr_rails = kzalloc(sizeof(struct psm_rail) * ocr_rail_cnt,
+			GFP_KERNEL);
+	if (!ocr_rails) {
+		pr_err("Fail to allocate memory for ocr rails\n");
+		ocr_rail_cnt = 0;
+		return -ENOMEM;
+	}
+
+	for (j = 0; j < ocr_rail_cnt; j++) {
+		ret = of_property_read_string_index(node, key, j,
+				&ocr_rails[j].name);
+		if (ret)
+			goto read_ocr_fail;
+		ocr_rails[j].phase_reg = NULL;
+		ocr_rails[j].init = OPTIMUM_CURRENT_MAX;
+	}
+
+	key = "qcom,pmic-opt-curr-sensor-id";
+	ret = of_property_read_u32(node, key, &data->ocr_sensor_id);
+	if (ret) {
+		pr_info("ocr sensor is not configured, use all TSENS. err:%d\n",
+			ret);
+		data->ocr_sensor_id = MONITOR_ALL_TSENS;
+	}
+
+	ret = ocr_reg_init(pdev);
+	if (ret) {
+		if (ret == -EPROBE_DEFER) {
+			ocr_reg_init_defer = true;
+			pr_info("ocr reg init is defered\n");
+		} else {
+			pr_err(
+			"Failed to get regulators. KTM continues. err:%d\n",
+			ret);
+			goto read_ocr_fail;
+		}
+	}
+
+	ret = sensor_mgr_init_threshold(&thresh[MSM_OCR], data->ocr_sensor_id,
+		data->ocr_temp_degC,
+		data->ocr_temp_degC - data->ocr_temp_hyst_degC,
+		ocr_notify);
+	if (ret)
+		goto read_ocr_fail;
+
+	if (!ocr_reg_init_defer)
+		ocr_enabled = true;
+	ocr_nodes_called = false;
+	/*
+	 * Vote for max optimum current by default until we have made
+	 * our first temp reading
+	 */
+	if (ocr_enabled) {
+		ret = ocr_set_mode_all(OPTIMUM_CURRENT_MAX);
+		if (ret) {
+			pr_err("Set max optimum current failed. err:%d\n",
+				ret);
+			ocr_enabled = false;
+		}
+	}
+
+read_ocr_fail:
+	ocr_probed = true;
+	if (ret) {
+		if (ret == -EPROBE_DEFER) {
+			ret = 0;
+			goto read_ocr_exit;
+		}
+		dev_err(
+		&pdev->dev,
+		"%s:Failed reading node=%s, key=%s err:%d. KTM continues\n",
+		__func__, node->full_name, key, ret);
+		kfree(ocr_rails);
+		ocr_rails = NULL;
+		ocr_rail_cnt = 0;
+	} else {
+		snprintf(mit_config[MSM_OCR].config_name,
+			MAX_DEBUGFS_CONFIG_LEN, "ocr");
+		mit_config[MSM_OCR].disable_config = thermal_ocr_mit_disable;
+	}
+
+read_ocr_exit:
+	return ret;
+}
+
+static int probe_psm(struct device_node *node, struct msm_thermal_data *data,
+		struct platform_device *pdev)
+{
+	int ret = 0;
+	int j = 0;
+	char *key = NULL;
+
+	psm_rails = NULL;
+
+	key = "qcom,disable-psm";
+	if (of_property_read_bool(node, key)) {
+		psm_probed = true;
+		psm_enabled = false;
+		psm_rails_cnt = 0;
+		return ret;
+	}
+
+	key = "qcom,pmic-sw-mode-temp";
+	ret = of_property_read_u32(node, key, &data->psm_temp_degC);
+	if (ret)
+		goto read_node_fail;
+
+	key = "qcom,pmic-sw-mode-temp-hysteresis";
+	ret = of_property_read_u32(node, key, &data->psm_temp_hyst_degC);
+	if (ret)
+		goto read_node_fail;
+
+	key = "qcom,pmic-sw-mode-regs";
+	psm_rails_cnt = of_property_count_strings(node, key);
+	psm_rails = kzalloc(sizeof(struct psm_rail) * psm_rails_cnt,
+			GFP_KERNEL);
+	if (!psm_rails) {
+		pr_err("Fail to allocate memory for psm rails\n");
+		psm_rails_cnt = 0;
+		return -ENOMEM;
+	}
+
+	for (j = 0; j < psm_rails_cnt; j++) {
+		ret = of_property_read_string_index(node, key, j,
+				&psm_rails[j].name);
+		if (ret)
+			goto read_node_fail;
+	}
+
+	if (psm_rails_cnt) {
+		ret = psm_reg_init(pdev);
+		if (ret) {
+			pr_err("Err regulator init. err:%d. KTM continues.\n",
+					ret);
+			goto read_node_fail;
+		}
+		psm_enabled = true;
+	}
+
+read_node_fail:
+	psm_probed = true;
+	if (ret) {
+		dev_info(&pdev->dev,
+		"%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
+			__func__, node->full_name, key, ret);
+		kfree(psm_rails);
+		psm_rails_cnt = 0;
+	}
+	if (ret == -EPROBE_DEFER)
+		psm_probed = false;
+	return ret;
+}
+
+static int probe_cc(struct device_node *node, struct msm_thermal_data *data,
+		struct platform_device *pdev)
+{
+	char *key = NULL;
+	int ret = 0;
+
+	if (num_possible_cpus() > 1) {
+		core_control_enabled = 1;
+		hotplug_enabled = 1;
+	}
+
+	key = "qcom,online-hotplug-core";
+	if (of_property_read_bool(node, key))
+		online_core = true;
+	else
+		online_core = false;
+
+	key = "qcom,core-limit-temp";
+	ret = of_property_read_u32(node, key, &data->core_limit_temp_degC);
+	if (ret)
+		goto read_node_fail;
+
+	key = "qcom,core-temp-hysteresis";
+	ret = of_property_read_u32(node, key, &data->core_temp_hysteresis_degC);
+	if (ret)
+		goto read_node_fail;
+
+	key = "qcom,hotplug-temp";
+	ret = of_property_read_u32(node, key, &data->hotplug_temp_degC);
+	if (ret)
+		goto hotplug_node_fail;
+
+	key = "qcom,hotplug-temp-hysteresis";
+	ret = of_property_read_u32(node, key,
+			&data->hotplug_temp_hysteresis_degC);
+	if (ret)
+		goto hotplug_node_fail;
+
+read_node_fail:
+	if (ret) {
+		dev_info(&pdev->dev,
+		"%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
+			KBUILD_MODNAME, node->full_name, key, ret);
+		core_control_enabled = 0;
+	} else {
+		snprintf(mit_config[MSM_LIST_MAX_NR + HOTPLUG_CONFIG]
+			.config_name, MAX_DEBUGFS_CONFIG_LEN,
+			"hotplug");
+		mit_config[MSM_LIST_MAX_NR + HOTPLUG_CONFIG].disable_config
+			= thermal_cpu_hotplug_mit_disable;
+	}
+
+	return ret;
+
+hotplug_node_fail:
+	if (ret) {
+		dev_info(&pdev->dev,
+		"%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
+			KBUILD_MODNAME, node->full_name, key, ret);
+		hotplug_enabled = 0;
+	}
+
+	return ret;
+}
+
+static int probe_gfx_phase_ctrl(struct device_node *node,
+		struct msm_thermal_data *data,
+		struct platform_device *pdev)
+{
+	char *key = NULL;
+	const char *tmp_str = NULL;
+	int ret = 0;
+
+	key = "qcom,disable-gfx-phase-ctrl";
+	if (of_property_read_bool(node, key)) {
+		gfx_crit_phase_ctrl_enabled = false;
+		gfx_warm_phase_ctrl_enabled = false;
+		return ret;
+	}
+
+	key = "qcom,gfx-sensor-id";
+	ret = of_property_read_u32(node, key,
+		&data->gfx_sensor);
+	if (ret)
+		goto probe_gfx_exit;
+
+	key = "qcom,gfx-phase-resource-key";
+	ret = of_property_read_string(node, key,
+		&tmp_str);
+	if (ret)
+		goto probe_gfx_exit;
+	data->gfx_phase_request_key = msm_thermal_str_to_int(tmp_str);
+
+	key = "qcom,gfx-phase-warm-temp";
+	ret = of_property_read_u32(node, key,
+		&data->gfx_phase_warm_temp_degC);
+	if (ret) {
+		dev_info(&pdev->dev,
+		"%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
+			KBUILD_MODNAME, node->full_name, key, ret);
+		data->gfx_phase_warm_temp_degC = INT_MIN;
+		goto probe_gfx_crit;
+	}
+
+	key = "qcom,gfx-phase-warm-temp-hyst";
+	ret = of_property_read_u32(node, key,
+		&data->gfx_phase_warm_temp_hyst_degC);
+	if (ret) {
+		dev_info(&pdev->dev,
+		"%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
+			KBUILD_MODNAME, node->full_name, key, ret);
+		goto probe_gfx_crit;
+	}
+
+	ret = sensor_mgr_init_threshold(&thresh[MSM_GFX_PHASE_CTRL_WARM],
+		data->gfx_sensor,
+		data->gfx_phase_warm_temp_degC, data->gfx_phase_warm_temp_degC -
+		data->gfx_phase_warm_temp_hyst_degC,
+		gfx_phase_ctrl_notify);
+	if (ret) {
+		pr_err("init WARM threshold failed. err:%d\n", ret);
+		goto probe_gfx_crit;
+	}
+	gfx_warm_phase_ctrl_enabled = true;
+	snprintf(mit_config[MSM_GFX_PHASE_CTRL_WARM].config_name,
+		MAX_DEBUGFS_CONFIG_LEN, "gfx_phase_warm");
+	mit_config[MSM_GFX_PHASE_CTRL_WARM].disable_config
+		= thermal_gfx_phase_warm_ctrl_mit_disable;
+
+probe_gfx_crit:
+	key = "qcom,gfx-phase-hot-crit-temp";
+	ret = of_property_read_u32(node, key,
+		&data->gfx_phase_hot_temp_degC);
+	if (ret) {
+		data->gfx_phase_hot_temp_degC = INT_MAX;
+		goto probe_gfx_exit;
+	}
+
+	key = "qcom,gfx-phase-hot-crit-temp-hyst";
+	ret = of_property_read_u32(node, key,
+		&data->gfx_phase_hot_temp_hyst_degC);
+	if (ret)
+		goto probe_gfx_exit;
+
+	ret = sensor_mgr_init_threshold(&thresh[MSM_GFX_PHASE_CTRL_HOT],
+		data->gfx_sensor,
+		data->gfx_phase_hot_temp_degC, data->gfx_phase_hot_temp_degC -
+		data->gfx_phase_hot_temp_hyst_degC,
+		gfx_phase_ctrl_notify);
+	if (ret) {
+		pr_err("init HOT threshold failed. err:%d\n", ret);
+		goto probe_gfx_exit;
+	}
+
+	gfx_crit_phase_ctrl_enabled = true;
+	snprintf(mit_config[MSM_GFX_PHASE_CTRL_HOT].config_name,
+		MAX_DEBUGFS_CONFIG_LEN, "gfx_phase_crit");
+	mit_config[MSM_GFX_PHASE_CTRL_HOT].disable_config
+		= thermal_gfx_phase_crit_ctrl_mit_disable;
+
+probe_gfx_exit:
+	if (ret) {
+		dev_info(&pdev->dev,
+		"%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
+			KBUILD_MODNAME, node->full_name, key, ret);
+	}
+	return ret;
+}
+
+static int probe_cx_phase_ctrl(struct device_node *node,
+		struct msm_thermal_data *data,
+		struct platform_device *pdev)
+{
+	char *key = NULL;
+	const char *tmp_str;
+	int ret = 0;
+
+	key = "qcom,disable-cx-phase-ctrl";
+	if (of_property_read_bool(node, key)) {
+		cx_phase_ctrl_enabled = false;
+		return ret;
+	}
+
+	key = "qcom,rpm-phase-resource-type";
+	ret = of_property_read_string(node, key,
+		&tmp_str);
+	if (ret)
+		goto probe_cx_exit;
+	data->phase_rpm_resource_type = msm_thermal_str_to_int(tmp_str);
+
+	key = "qcom,rpm-phase-resource-id";
+	ret = of_property_read_u32(node, key,
+		&data->phase_rpm_resource_id);
+	if (ret)
+		goto probe_cx_exit;
+
+	key = "qcom,cx-phase-resource-key";
+	ret = of_property_read_string(node, key,
+		&tmp_str);
+	if (ret)
+		goto probe_cx_exit;
+	data->cx_phase_request_key = msm_thermal_str_to_int(tmp_str);
+
+	key = "qcom,cx-phase-hot-crit-temp";
+	ret = of_property_read_u32(node, key,
+		&data->cx_phase_hot_temp_degC);
+	if (ret)
+		goto probe_cx_exit;
+
+	key = "qcom,cx-phase-hot-crit-temp-hyst";
+	ret = of_property_read_u32(node, key,
+		&data->cx_phase_hot_temp_hyst_degC);
+	if (ret)
+		goto probe_cx_exit;
+
+	ret = sensor_mgr_init_threshold(&thresh[MSM_CX_PHASE_CTRL_HOT],
+		MONITOR_ALL_TSENS,
+		data->cx_phase_hot_temp_degC, data->cx_phase_hot_temp_degC -
+		data->cx_phase_hot_temp_hyst_degC,
+		cx_phase_ctrl_notify);
+	if (ret) {
+		pr_err("init HOT threshold failed. err:%d\n", ret);
+		goto probe_cx_exit;
+	}
+
+	cx_phase_ctrl_enabled = true;
+	snprintf(mit_config[MSM_CX_PHASE_CTRL_HOT].config_name,
+		MAX_DEBUGFS_CONFIG_LEN, "cx_phase");
+	mit_config[MSM_CX_PHASE_CTRL_HOT].disable_config
+		= thermal_cx_phase_ctrl_mit_disable;
+
+probe_cx_exit:
+	if (ret) {
+		dev_info(&pdev->dev,
+		"%s:Failed reading node=%s, key=%s err=%d. KTM continues\n",
+			KBUILD_MODNAME, node->full_name, key, ret);
+		cx_phase_ctrl_enabled = false;
+	}
+	return ret;
+}
+
+static int probe_therm_reset(struct device_node *node,
+		struct msm_thermal_data *data,
+		struct platform_device *pdev)
+{
+	char *key = NULL;
+	int ret = 0;
+
+	key = "qcom,therm-reset-temp";
+	ret = of_property_read_u32(node, key, &data->therm_reset_temp_degC);
+	if (ret)
+		goto PROBE_RESET_EXIT;
+
+	ret = sensor_mgr_init_threshold(&thresh[MSM_THERM_RESET],
+		MONITOR_ALL_TSENS,
+		data->therm_reset_temp_degC, data->therm_reset_temp_degC - 10,
+		therm_reset_notify);
+	if (ret) {
+		pr_err("Therm reset data structure init failed\n");
+		goto PROBE_RESET_EXIT;
+	}
+
+	therm_reset_enabled = true;
+	snprintf(mit_config[MSM_THERM_RESET].config_name,
+		MAX_DEBUGFS_CONFIG_LEN, "reset");
+	mit_config[MSM_THERM_RESET].disable_config
+		= thermal_reset_disable;
+
+PROBE_RESET_EXIT:
+	if (ret) {
+		dev_info(&pdev->dev,
+		"%s:Failed reading node=%s, key=%s err=%d. KTM continues\n",
+			__func__, node->full_name, key, ret);
+		therm_reset_enabled = false;
+	}
+	return ret;
+}
+
+static int probe_freq_mitigation(struct device_node *node,
+		struct msm_thermal_data *data,
+		struct platform_device *pdev)
+{
+	char *key = NULL;
+	int ret = 0;
+
+	key = "qcom,limit-temp";
+	ret = of_property_read_u32(node, key, &data->limit_temp_degC);
+	if (ret)
+		goto PROBE_FREQ_EXIT;
+
+	key = "qcom,temp-hysteresis";
+	ret = of_property_read_u32(node, key, &data->temp_hysteresis_degC);
+	if (ret)
+		goto PROBE_FREQ_EXIT;
+
+	key = "qcom,freq-step";
+	ret = of_property_read_u32(node, key, &data->bootup_freq_step);
+	if (ret)
+		goto PROBE_FREQ_EXIT;
+	boot_freq_mitig_enabled = true;
+
+	key = "qcom,freq-mitigation-temp";
+	ret = of_property_read_u32(node, key, &data->freq_mitig_temp_degc);
+	if (ret)
+		goto PROBE_FREQ_EXIT;
+
+	key = "qcom,freq-mitigation-temp-hysteresis";
+	ret = of_property_read_u32(node, key,
+		&data->freq_mitig_temp_hysteresis_degc);
+	if (ret)
+		goto PROBE_FREQ_EXIT;
+
+	key = "qcom,freq-mitigation-value";
+	ret = of_property_read_u32(node, key, &data->freq_limit);
+	if (ret)
+		goto PROBE_FREQ_EXIT;
+
+	freq_mitigation_enabled = 1;
+	snprintf(mit_config[MSM_LIST_MAX_NR + CPUFREQ_CONFIG].config_name,
+		MAX_DEBUGFS_CONFIG_LEN, "cpufreq");
+	mit_config[MSM_LIST_MAX_NR + CPUFREQ_CONFIG].disable_config
+		= thermal_cpu_freq_mit_disable;
+
+PROBE_FREQ_EXIT:
+	if (ret) {
+		dev_info(&pdev->dev,
+		"%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
+			__func__, node->full_name, key, ret);
+		freq_mitigation_enabled = 0;
+	}
+	return ret;
+}
+
+static void thermal_boot_config_read(struct seq_file *m, void *data)
+{
+
+	seq_puts(m, "---------Boot Mitigation------------\n");
+	seq_printf(m, "tsens sensor:tsens_tz_sensor%d\n",
+			msm_thermal_info.sensor_id);
+	seq_printf(m, "polling rate:%d ms\n", msm_thermal_info.poll_ms);
+	seq_printf(m, "frequency threshold:%d degC\n",
+			msm_thermal_info.limit_temp_degC);
+	seq_printf(m, "frequency threshold clear:%d degC\n",
+			msm_thermal_info.limit_temp_degC
+			- msm_thermal_info.temp_hysteresis_degC);
+	seq_printf(m, "frequency step:%d\n",
+			msm_thermal_info.bootup_freq_step);
+	seq_printf(m, "frequency mask:0x%x\n",
+			msm_thermal_info.bootup_freq_control_mask);
+	seq_printf(m, "hotplug threshold:%d degC\n",
+			msm_thermal_info.core_limit_temp_degC);
+	seq_printf(m, "hotplug threshold clear:%d degC\n",
+			msm_thermal_info.core_limit_temp_degC
+			- msm_thermal_info.core_temp_hysteresis_degC);
+	seq_printf(m, "hotplug mask:0x%x\n",
+			msm_thermal_info.core_control_mask);
+	seq_printf(m, "reset threshold:%d degC\n",
+			msm_thermal_info.therm_reset_temp_degC);
+}
+
+static void thermal_emergency_config_read(struct seq_file *m, void *data)
+{
+	int cpu = 0;
+
+	seq_puts(m, "\n---------Emergency Mitigation------------\n");
+	for_each_possible_cpu(cpu)
+		seq_printf(m, "cpu%d sensor:%s\n", cpu, cpus[cpu].sensor_type);
+	seq_printf(m, "frequency threshold:%d degC\n",
+			msm_thermal_info.freq_mitig_temp_degc);
+	seq_printf(m, "frequency threshold clr:%d degC\n",
+			msm_thermal_info.freq_mitig_temp_degc
+			- msm_thermal_info.freq_mitig_temp_hysteresis_degc);
+	seq_printf(m, "frequency value:%d KHz\n",
+			msm_thermal_info.freq_limit);
+	seq_printf(m, "frequency mask:0x%x\n",
+			msm_thermal_info.freq_mitig_control_mask);
+	seq_printf(m, "hotplug threshold:%d degC\n",
+			msm_thermal_info.hotplug_temp_degC);
+	seq_printf(m, "hotplug threshold clr:%d degC\n",
+			msm_thermal_info.hotplug_temp_degC
+			- msm_thermal_info.hotplug_temp_hysteresis_degC);
+	seq_printf(m, "hotplug mask:0x%x\n",
+			msm_thermal_info.core_control_mask);
+	seq_printf(m, "online hotplug core:%s\n", online_core
+			? "true" : "false");
+
+}
+
+static void thermal_mx_config_read(struct seq_file *m, void *data)
+{
+	if (vdd_mx_enabled) {
+		seq_puts(m, "\n---------Mx Retention------------\n");
+		seq_printf(m, "threshold:%d degC\n",
+				msm_thermal_info.vdd_mx_temp_degC);
+		seq_printf(m, "threshold clear:%d degC\n",
+				msm_thermal_info.vdd_mx_temp_degC
+				+ msm_thermal_info.vdd_mx_temp_hyst_degC);
+		seq_printf(m, "mx retention value:%d\n",
+				msm_thermal_info.vdd_mx_min);
+		if (vdd_cx)
+			seq_printf(m, "cx retention value:%d\n",
+				msm_thermal_info.vdd_cx_min);
+		if (msm_thermal_info.vdd_mx_sensor_id != MONITOR_ALL_TSENS)
+			seq_printf(m, "tsens sensor:tsens_tz_sensor%d\n",
+				msm_thermal_info.vdd_mx_sensor_id);
+	}
+}
+
+static void thermal_vdd_config_read(struct seq_file *m, void *data)
+{
+	int i = 0;
+
+	if (vdd_rstr_enabled) {
+		seq_puts(m, "\n---------VDD restriction------------\n");
+		seq_printf(m, "threshold:%d degC\n",
+				msm_thermal_info.vdd_rstr_temp_degC);
+		seq_printf(m, "threshold clear:%d degC\n",
+				msm_thermal_info.vdd_rstr_temp_hyst_degC);
+		if (msm_thermal_info.vdd_rstr_sensor_id != MONITOR_ALL_TSENS)
+			seq_printf(m, "tsens sensor:tsens_tz_sensor%d\n",
+				msm_thermal_info.vdd_rstr_sensor_id);
+
+		for (i = 0; i < rails_cnt; i++) {
+			if (!strcmp(rails[i].name, "vdd-dig")
+				&& rails[i].num_levels)
+				seq_printf(m, "vdd_dig restriction value:%d\n",
+					rails[i].levels[0]);
+			if (!strcmp(rails[i].name, "vdd-gfx")
+				&& rails[i].num_levels)
+				seq_printf(m, "vdd_gfx restriction value:%d\n",
+					rails[i].levels[0]);
+			if (!strcmp(rails[i].name, "vdd-apps")
+				&& rails[i].num_levels)
+				seq_printf(m,
+					"vdd_apps restriction value:%d KHz\n",
+					rails[i].levels[0]);
+		}
+	}
+}
+
+static void thermal_psm_config_read(struct seq_file *m, void *data)
+{
+	if (psm_enabled) {
+		seq_puts(m, "\n------PMIC Software Mode(PSM)-------\n");
+		seq_printf(m, "threshold:%d degC\n",
+				msm_thermal_info.psm_temp_degC);
+		seq_printf(m, "threshold clear:%d degC\n",
+				msm_thermal_info.psm_temp_degC
+				- msm_thermal_info.psm_temp_hyst_degC);
+	}
+}
+
+static void thermal_ocr_config_read(struct seq_file *m, void *data)
+{
+	if (ocr_enabled) {
+		seq_puts(m, "\n-----Optimum Current Request(OCR)-----\n");
+		seq_printf(m, "threshold:%d degC\n",
+				msm_thermal_info.ocr_temp_degC);
+		seq_printf(m, "threshold clear:%d degC\n",
+				msm_thermal_info.ocr_temp_degC
+				- msm_thermal_info.ocr_temp_hyst_degC);
+		seq_printf(m, "tsens sensor:tsens_tz_sensor%d\n",
+				msm_thermal_info.ocr_sensor_id);
+	}
+}
+
+static void thermal_phase_ctrl_config_read(struct seq_file *m, void *data)
+{
+	if (cx_phase_ctrl_enabled) {
+		seq_puts(m, "\n---------Phase control------------\n");
+		seq_printf(m, "cx hot critical threshold:%d degC\n",
+				msm_thermal_info.cx_phase_hot_temp_degC);
+		seq_printf(m, "cx hot critical threshold clear:%d degC\n",
+			msm_thermal_info.cx_phase_hot_temp_degC
+			- msm_thermal_info.cx_phase_hot_temp_hyst_degC);
+	}
+	if (gfx_crit_phase_ctrl_enabled) {
+		seq_printf(m, "gfx hot critical threshold:%d degC\n",
+				msm_thermal_info.gfx_phase_hot_temp_degC);
+		seq_printf(m, "gfx hot critical threshold clear:%d degC\n",
+			msm_thermal_info.gfx_phase_hot_temp_degC
+			- msm_thermal_info.gfx_phase_hot_temp_hyst_degC);
+	}
+	if (gfx_warm_phase_ctrl_enabled) {
+		seq_printf(m, "gfx warm threshold:%d degC\n",
+				msm_thermal_info.gfx_phase_warm_temp_degC);
+		seq_printf(m, "gfx warm threshold clear:%d degC\n",
+			msm_thermal_info.gfx_phase_warm_temp_degC
+			- msm_thermal_info.gfx_phase_warm_temp_hyst_degC);
+	}
+	if (gfx_crit_phase_ctrl_enabled || gfx_warm_phase_ctrl_enabled)
+		seq_printf(m, "gfx tsens sensor:tsens_tz_sensor%d\n",
+			msm_thermal_info.gfx_sensor);
+}
+
+static void thermal_cxip_lm_config_read(struct seq_file *m, void *data)
+{
+	if (cxip_lm_enabled) {
+		seq_puts(m, "\n-----CX IPEAK LM-----\n");
+		seq_printf(m, "threshold:%d degC\n",
+				CXIP_LM_THERM_SENS_HIGH);
+		seq_printf(m, "threshold clear:%d degC\n",
+				CXIP_LM_THERM_SENS_LOW);
+		seq_printf(m, "tsens sensor:tsens_tz_sensor%d\n",
+				CXIP_LM_THERM_SENS_ID);
+	}
+}
+
+static void thermal_disable_all_mitigation(void)
+{
+	thermal_cpu_freq_mit_disable();
+	thermal_cpu_hotplug_mit_disable();
+	thermal_reset_disable();
+	thermal_mx_mit_disable();
+	thermal_vdd_mit_disable();
+	thermal_psm_mit_disable();
+	thermal_ocr_mit_disable();
+	thermal_cx_phase_ctrl_mit_disable();
+	thermal_gfx_phase_warm_ctrl_mit_disable();
+	thermal_gfx_phase_crit_ctrl_mit_disable();
+	thermal_cxip_lm_disable();
+}
+
+static void enable_config(int config_id)
+{
+	switch (config_id) {
+	case MSM_THERM_RESET:
+		therm_reset_enabled = 1;
+		break;
+	case MSM_VDD_RESTRICTION:
+		vdd_rstr_enabled = 1;
+		break;
+	case MSM_CX_PHASE_CTRL_HOT:
+		cx_phase_ctrl_enabled = 1;
+		break;
+	case MSM_GFX_PHASE_CTRL_WARM:
+		gfx_warm_phase_ctrl_enabled = 1;
+		break;
+	case MSM_GFX_PHASE_CTRL_HOT:
+		gfx_crit_phase_ctrl_enabled = 1;
+		break;
+	case MSM_OCR:
+		ocr_enabled = 1;
+		break;
+	case MSM_VDD_MX_RESTRICTION:
+		vdd_mx_enabled = 1;
+		break;
+	case MSM_THERM_CXIP_LM:
+		cxip_lm_enabled = 1;
+		break;
+	case MSM_LIST_MAX_NR + HOTPLUG_CONFIG:
+		hotplug_enabled = 1;
+		break;
+	case MSM_LIST_MAX_NR + CPUFREQ_CONFIG:
+		freq_mitigation_enabled = 1;
+		break;
+	default:
+		pr_err("Bad config:%d\n", config_id);
+		break;
+	}
+}
+
+static void thermal_update_mit_threshold(
+		struct msm_thermal_debugfs_thresh_config *config, int max_mit)
+{
+	int idx = 0, i = 0;
+
+	for (idx = 0; idx < max_mit; idx++) {
+		if (!config[idx].update)
+			continue;
+		config[idx].disable_config();
+		enable_config(idx);
+		if (idx >= MSM_LIST_MAX_NR) {
+			if (idx == MSM_LIST_MAX_NR + HOTPLUG_CONFIG)
+				UPDATE_CPU_CONFIG_THRESHOLD(
+					msm_thermal_info.core_control_mask,
+					HOTPLUG_THRESHOLD_HIGH,
+					config[idx].thresh,
+					config[idx].thresh_clr);
+			else if (idx == MSM_LIST_MAX_NR + CPUFREQ_CONFIG)
+				UPDATE_CPU_CONFIG_THRESHOLD(
+					msm_thermal_info
+					.freq_mitig_control_mask,
+					FREQ_THRESHOLD_HIGH,
+					config[idx].thresh,
+					config[idx].thresh_clr);
+		} else {
+			for (i = 0; i < thresh[idx].thresh_ct; i++) {
+				thresh[idx].thresh_list[i].threshold[0].temp
+					= config[idx].thresh
+					* tsens_scaling_factor;
+				thresh[idx].thresh_list[i].threshold[1].temp
+					= config[idx].thresh_clr
+					* tsens_scaling_factor;
+				set_and_activate_threshold(
+					thresh[idx].thresh_list[i].sensor_id,
+					&thresh[idx].thresh_list[i]
+					.threshold[0]);
+				set_and_activate_threshold(
+					thresh[idx].thresh_list[i].sensor_id,
+					&thresh[idx].thresh_list[i]
+					.threshold[1]);
+			}
+		}
+		config[idx].update = 0;
+	}
+}
+
+static ssize_t thermal_config_debugfs_write(struct file *file,
+			const char __user *buffer, size_t count, loff_t *ppos)
+{
+	int ret = 0;
+	char config_string[MAX_DEBUGFS_CONFIG_LEN] = { '\0' };
+
+	if (!mitigation || count > (MAX_DEBUGFS_CONFIG_LEN - 1)) {
+		pr_err("Invalid parameters\n");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(config_string, buffer, count)) {
+		pr_err("Error reading debugfs command\n");
+		ret = -EFAULT;
+		goto exit_debugfs_write;
+	}
+	pr_debug("Debugfs config command string: %s\n", config_string);
+	if (!strcmp(config_string, DEBUGFS_DISABLE_ALL_MIT)) {
+		mitigation = 0;
+		pr_err("KTM mitigations disabled via debugfs\n");
+		thermal_disable_all_mitigation();
+	} else if (!strcmp(config_string, DEBUGFS_CONFIG_UPDATE)) {
+		thermal_update_mit_threshold(mit_config, MSM_LIST_MAX_NR
+			+ MAX_CPU_CONFIG);
+	}
+
+exit_debugfs_write:
+	if (!ret)
+		return count;
+	return ret;
+}
+
+static int thermal_config_debugfs_read(struct seq_file *m, void *data)
+{
+	if (!mitigation) {
+		seq_puts(m, "KTM Mitigations Disabled\n");
+		return 0;
+	}
+	thermal_boot_config_read(m, data);
+	thermal_emergency_config_read(m, data);
+	thermal_mx_config_read(m, data);
+	thermal_vdd_config_read(m, data);
+	thermal_psm_config_read(m, data);
+	thermal_ocr_config_read(m, data);
+	thermal_phase_ctrl_config_read(m, data);
+	thermal_cxip_lm_config_read(m, data);
+
+	return 0;
+}
+
+static void msm_thermal_late_sysfs_init(void)
+{
+	/*
+	 * In case sysfs add nodes get called before probe function.
+	 * Need to make sure sysfs node is created again
+	 */
+	if (psm_nodes_called) {
+		msm_thermal_add_psm_nodes();
+		psm_nodes_called = false;
+	}
+	if (vdd_rstr_nodes_called) {
+		msm_thermal_add_vdd_rstr_nodes();
+		vdd_rstr_nodes_called = false;
+	}
+	if (sensor_info_nodes_called) {
+		msm_thermal_add_sensor_info_nodes();
+		sensor_info_nodes_called = false;
+	}
+	if (ocr_nodes_called) {
+		msm_thermal_add_ocr_nodes();
+		ocr_nodes_called = false;
+	}
+	if (cluster_info_nodes_called) {
+		create_cpu_topology_sysfs();
+		cluster_info_nodes_called = false;
+	}
+}
+
+static int probe_deferrable_properties(struct device_node *node,
+	struct msm_thermal_data *data, struct platform_device *pdev)
+{
+	int ret = 0;
+
+	/*
+	 * Probe optional properties below. Call probe_psm before
+	 * probe_vdd_rstr because rpm_regulator_get has to be called
+	 * before devm_regulator_get
+	 * probe_ocr should be called after probe_vdd_rstr to reuse the
+	 * regualtor handle. calling devm_regulator_get more than once
+	 * will fail.
+	 */
+	ret = probe_psm(node, data, pdev);
+	if (ret == -EPROBE_DEFER)
+		return ret;
+
+	ret = probe_vdd_rstr(node, data, pdev);
+	if (ret == -EPROBE_DEFER)
+		return ret;
+
+	probe_ocr(node, data, pdev);
+	ret = probe_vdd_mx(node, data, pdev);
+	if (ret == -EPROBE_DEFER)
+		return ret;
+
+	return 0;
+}
+
+static int msm_thermal_dev_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct device_node *node = pdev->dev.of_node;
+	struct msm_thermal_data data;
+
+	if (!mitigation)
+		return ret;
+
+	memset(&data, 0, sizeof(struct msm_thermal_data));
+	data.pdev = pdev;
+
+	ret = msm_thermal_pre_init(&pdev->dev);
+	if (ret) {
+		pr_err("thermal pre init failed. err:%d\n", ret);
+		goto probe_exit;
+	}
+	probe_sensor_info(node, &data, pdev);
+	ret = probe_deferrable_properties(node, &data, pdev);
+	if (ret)
+		goto probe_exit;
+
+	lmh_dcvs_is_supported = of_property_read_bool(node, "clock-names");
+	probe_cc(node, &data, pdev);
+	probe_freq_mitigation(node, &data, pdev);
+	probe_cx_phase_ctrl(node, &data, pdev);
+	probe_gfx_phase_ctrl(node, &data, pdev);
+	probe_therm_reset(node, &data, pdev);
+	probe_cxip_lm(node, &data, pdev);
+	update_cpu_topology(&pdev->dev);
+	ret = fetch_cpu_mitigaiton_info(&data, pdev);
+	if (ret) {
+		pr_err("Error fetching CPU mitigation information. err:%d\n",
+				ret);
+		goto probe_exit;
+	}
+	msm_thermal_late_sysfs_init();
+	ret = msm_thermal_init(&data);
+	if (ret)
+		goto probe_exit;
+	msm_thermal_probed = true;
+
+probe_exit:
+	return ret;
+}
+
+static int msm_thermal_dev_exit(struct platform_device *inp_dev)
+{
+	int i = 0;
+	uint32_t _cluster = 0;
+	struct cluster_info *cluster_ptr = NULL;
+	struct uio_info *info = dev_get_drvdata(&inp_dev->dev);
+	struct rail *r = NULL;
+
+	uio_unregister_device(info);
+	unregister_reboot_notifier(&msm_thermal_reboot_notifier);
+	if (msm_therm_debugfs && msm_therm_debugfs->parent)
+		debugfs_remove_recursive(msm_therm_debugfs->parent);
+	msm_thermal_ioctl_cleanup();
+	if (thresh) {
+		if (vdd_rstr_enabled) {
+			sensor_mgr_remove_threshold(
+				&thresh[MSM_VDD_RESTRICTION]);
+			kfree(thresh[MSM_VDD_RESTRICTION].thresh_list);
+			for (i = 0; i < rails_cnt; i++) {
+				if (!rails[i].freq_req)
+					continue;
+				r = &rails[i];
+				for_each_possible_cpu(i) {
+					devmgr_unregister_mitigation_client(
+						&msm_thermal_info.pdev->dev,
+						r->device_handle[i]);
+					r->device_handle[i] = NULL;
+				}
+			}
+			kfree(rails);
+		}
+		if (cx_phase_ctrl_enabled) {
+			sensor_mgr_remove_threshold(
+				&thresh[MSM_CX_PHASE_CTRL_HOT]);
+			kfree(thresh[MSM_CX_PHASE_CTRL_HOT].thresh_list);
+		}
+		if (gfx_warm_phase_ctrl_enabled) {
+			sensor_mgr_remove_threshold(
+				&thresh[MSM_GFX_PHASE_CTRL_WARM]);
+			kfree(thresh[MSM_GFX_PHASE_CTRL_WARM].thresh_list);
+		}
+		if (gfx_crit_phase_ctrl_enabled) {
+			sensor_mgr_remove_threshold(
+				&thresh[MSM_GFX_PHASE_CTRL_HOT]);
+			kfree(thresh[MSM_GFX_PHASE_CTRL_HOT].thresh_list);
+		}
+		if (ocr_enabled) {
+			for (i = 0; i < ocr_rail_cnt; i++)
+				kfree(ocr_rails[i].attr_gp.attrs);
+			kfree(ocr_rails);
+			ocr_rails = NULL;
+			sensor_mgr_remove_threshold(
+				&thresh[MSM_OCR]);
+			kfree(thresh[MSM_OCR].thresh_list);
+		}
+		if (vdd_mx_enabled) {
+			kfree(mx_kobj);
+			kfree(mx_attr_group.attrs);
+			sensor_mgr_remove_threshold(
+				&thresh[MSM_VDD_MX_RESTRICTION]);
+			kfree(thresh[MSM_VDD_MX_RESTRICTION].thresh_list);
+		}
+		if (cxip_lm_enabled) {
+			sensor_mgr_remove_threshold(
+				&thresh[MSM_THERM_CXIP_LM]);
+			kfree(thresh[MSM_THERM_CXIP_LM].thresh_list);
+		}
+		kfree(thresh);
+		thresh = NULL;
+	}
+	kfree(table);
+	if (core_ptr) {
+		for (; _cluster < core_ptr->entity_count; _cluster++) {
+			cluster_ptr = &core_ptr->child_entity_ptr[_cluster];
+			kfree(cluster_ptr->freq_table);
+		}
+	}
+
+	return 0;
+}
+
+static int __init ktm_params(char *str)
+{
+	if (str != NULL && !strcmp(str, "disable")) {
+		pr_info("KTM Disabled at Boot\n");
+		mitigation = 0;
+	}
+
+	return 0;
+}
+
+early_param("qcomthermal", ktm_params);
+
+static struct of_device_id msm_thermal_match_table[] = {
+	{.compatible = "qcom,msm-thermal"},
+	{},
+};
+
+static struct platform_driver msm_thermal_device_driver = {
+	.probe = msm_thermal_dev_probe,
+	.driver = {
+		.name = "msm-thermal",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_thermal_match_table,
+	},
+	.remove = msm_thermal_dev_exit,
+};
+
+int __init msm_thermal_device_init(void)
+{
+	return platform_driver_register(&msm_thermal_device_driver);
+}
+arch_initcall(msm_thermal_device_init);
+
+int __init msm_thermal_late_init(void)
+{
+	if (!msm_thermal_probed)
+		return 0;
+
+	if (num_possible_cpus() > 1)
+		msm_thermal_add_cc_nodes();
+	msm_thermal_add_psm_nodes();
+	msm_thermal_add_vdd_rstr_nodes();
+	msm_thermal_add_sensor_info_nodes();
+	if (ocr_reg_init_defer) {
+		if (!ocr_reg_init(msm_thermal_info.pdev)) {
+			ocr_enabled = true;
+			msm_thermal_add_ocr_nodes();
+		}
+	}
+	msm_thermal_add_mx_nodes();
+	create_cpu_topology_sysfs();
+	create_thermal_debugfs();
+	msm_thermal_add_bucket_info_nodes();
+	uio_init(msm_thermal_info.pdev);
+
+	return 0;
+}
+late_initcall(msm_thermal_late_init);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/thermal/msm_thermal-dev.c	2019-01-22 16:16:27.155279478 +0100
@@ -0,0 +1,434 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/msm_thermal_ioctl.h>
+#include <linux/msm_thermal.h>
+#include <linux/uaccess.h>
+#include <linux/cdev.h>
+#include <linux/semaphore.h>
+#include <linux/module.h>
+
+struct msm_thermal_ioctl_dev {
+	struct semaphore sem;
+	struct cdev char_dev;
+};
+
+static int msm_thermal_major;
+static struct class *thermal_class;
+static struct msm_thermal_ioctl_dev *msm_thermal_dev;
+static unsigned int freq_table_len[NR_CPUS], freq_table_set[NR_CPUS];
+static unsigned int voltage_table_set[NR_CPUS];
+static unsigned int *freq_table_ptr[NR_CPUS];
+static uint32_t *voltage_table_ptr[NR_CPUS];
+static DEFINE_MUTEX(ioctl_access_mutex);
+
+static int msm_thermal_ioctl_open(struct inode *node, struct file *filep)
+{
+	int ret = 0;
+	struct msm_thermal_ioctl_dev *dev;
+
+	dev = container_of(node->i_cdev, struct msm_thermal_ioctl_dev,
+		char_dev);
+	filep->private_data = dev;
+
+	return ret;
+}
+
+static int msm_thermal_ioctl_release(struct inode *node, struct file *filep)
+{
+	pr_debug("%s: IOCTL: release\n", KBUILD_MODNAME);
+	return 0;
+}
+
+static long validate_and_copy(unsigned int *cmd, unsigned long *arg,
+	struct msm_thermal_ioctl *query)
+{
+	long ret = 0, err_val = 0;
+
+	if ((_IOC_TYPE(*cmd) != MSM_THERMAL_MAGIC_NUM) ||
+		(_IOC_NR(*cmd) >= MSM_CMD_MAX_NR)) {
+		ret = -ENOTTY;
+		goto validate_exit;
+	}
+
+	if (_IOC_DIR(*cmd) & _IOC_READ) {
+		err_val = !access_ok(VERIFY_WRITE, (void __user *)*arg,
+				_IOC_SIZE(*cmd));
+	} else if (_IOC_DIR(*cmd) & _IOC_WRITE) {
+		err_val = !access_ok(VERIFY_READ, (void __user *)*arg,
+				_IOC_SIZE(*cmd));
+	}
+	if (err_val) {
+		ret = -EFAULT;
+		goto validate_exit;
+	}
+
+	if (copy_from_user(query, (void __user *)(*arg),
+		sizeof(struct msm_thermal_ioctl))) {
+		ret = -EACCES;
+		goto validate_exit;
+	}
+
+	if (query->size != sizeof(struct msm_thermal_ioctl)) {
+		pr_err("%s: Invalid input argument size\n", __func__);
+		ret = -EINVAL;
+		goto validate_exit;
+	}
+
+	switch (*cmd) {
+	case MSM_THERMAL_SET_CPU_MAX_FREQUENCY:
+	case MSM_THERMAL_SET_CPU_MIN_FREQUENCY:
+		if (query->cpu_freq.cpu_num >= num_possible_cpus()) {
+			pr_err("%s: Invalid CPU number: %u\n", __func__,
+				query->cpu_freq.cpu_num);
+			ret = -EINVAL;
+			goto validate_exit;
+		}
+		break;
+	default:
+		break;
+	}
+
+validate_exit:
+	return ret;
+}
+
+static long msm_thermal_process_freq_table_req(struct msm_thermal_ioctl *query,
+		unsigned long *arg)
+{
+	long ret = 0;
+	uint32_t table_idx, idx = 0, cluster_id = query->clock_freq.cluster_num;
+	struct clock_plan_arg *clock_freq = &(query->clock_freq);
+
+	if (cluster_id >= num_possible_cpus())
+		return -EINVAL;
+
+	if (!freq_table_len[cluster_id]) {
+		ret = msm_thermal_get_freq_plan_size(cluster_id,
+			&freq_table_len[cluster_id]);
+		if (ret) {
+			pr_err("%s: Cluster%d freq table length get err:%ld\n",
+				KBUILD_MODNAME, cluster_id, ret);
+			goto process_freq_exit;
+		}
+		if (!freq_table_len[cluster_id]) {
+			pr_err("%s: Cluster%d freq table empty\n",
+				KBUILD_MODNAME, cluster_id);
+			ret = -EAGAIN;
+			goto process_freq_exit;
+		}
+
+		freq_table_set[cluster_id] = freq_table_len[cluster_id]
+						/ MSM_IOCTL_FREQ_SIZE;
+		if (freq_table_len[cluster_id] % MSM_IOCTL_FREQ_SIZE)
+			freq_table_set[cluster_id]++;
+
+		if (!freq_table_ptr[cluster_id]) {
+			freq_table_ptr[cluster_id] = kzalloc(
+				sizeof(unsigned int) *
+				freq_table_len[cluster_id], GFP_KERNEL);
+			if (!freq_table_ptr[cluster_id]) {
+				pr_err("%s: memory alloc failed\n",
+						KBUILD_MODNAME);
+				freq_table_len[cluster_id] = 0;
+				ret = -ENOMEM;
+				goto process_freq_exit;
+			}
+		}
+		ret = msm_thermal_get_cluster_freq_plan(cluster_id,
+			freq_table_ptr[cluster_id]);
+		if (ret) {
+			pr_err("%s: Error getting frequency table. err:%ld\n",
+					KBUILD_MODNAME, ret);
+			freq_table_len[cluster_id] = 0;
+			freq_table_set[cluster_id] = 0;
+			kfree(freq_table_ptr[cluster_id]);
+			freq_table_ptr[cluster_id] = NULL;
+			goto process_freq_exit;
+		}
+	}
+
+	if (!clock_freq->freq_table_len) {
+		clock_freq->freq_table_len = freq_table_len[cluster_id];
+		goto copy_and_return;
+	}
+	if (clock_freq->set_idx >= freq_table_set[cluster_id]) {
+		pr_err("%s: Invalid freq table set%d for cluster%d\n",
+			KBUILD_MODNAME, clock_freq->set_idx,
+			cluster_id);
+		ret = -EINVAL;
+		goto process_freq_exit;
+	}
+
+	table_idx = MSM_IOCTL_FREQ_SIZE * clock_freq->set_idx;
+	for (; table_idx < freq_table_len[cluster_id]
+		&& idx < MSM_IOCTL_FREQ_SIZE; idx++, table_idx++) {
+		clock_freq->freq_table[idx] =
+			freq_table_ptr[cluster_id][table_idx];
+	}
+	clock_freq->freq_table_len = idx;
+
+copy_and_return:
+	ret = copy_to_user((void __user *)(*arg), query,
+		sizeof(struct msm_thermal_ioctl));
+	if (ret) {
+		pr_err("%s: copy_to_user error:%ld.\n", KBUILD_MODNAME, ret);
+		goto process_freq_exit;
+	}
+
+process_freq_exit:
+	return ret;
+}
+
+static long msm_thermal_process_voltage_table_req(
+		struct msm_thermal_ioctl *query,
+		unsigned long *arg)
+{
+	long ret = 0;
+	uint32_t table_idx = 0, idx = 0;
+	uint32_t cluster_id = query->voltage.cluster_num;
+	struct voltage_plan_arg *voltage = &(query->voltage);
+
+	if (cluster_id >= num_possible_cpus())
+		return -EINVAL;
+
+	if (!voltage_table_ptr[cluster_id]) {
+		if (!freq_table_len[cluster_id]) {
+			ret = msm_thermal_get_freq_plan_size(cluster_id,
+				&freq_table_len[cluster_id]);
+			if (ret) {
+				pr_err(
+				"%s: Cluster%d freq table len err:%ld\n",
+				KBUILD_MODNAME, cluster_id, ret);
+				goto process_volt_exit;
+			}
+			if (!freq_table_len[cluster_id]) {
+				pr_err("%s: Cluster%d freq table empty\n",
+					KBUILD_MODNAME, cluster_id);
+				ret = -EAGAIN;
+				goto process_volt_exit;
+			}
+		}
+		voltage_table_ptr[cluster_id] = kzalloc(
+			sizeof(uint32_t) *
+			freq_table_len[cluster_id], GFP_KERNEL);
+		if (!voltage_table_ptr[cluster_id]) {
+			pr_err("%s: memory alloc failed\n",
+				KBUILD_MODNAME);
+			ret = -ENOMEM;
+			goto process_volt_exit;
+		}
+		ret = msm_thermal_get_cluster_voltage_plan(cluster_id,
+			voltage_table_ptr[cluster_id]);
+		if (ret) {
+			pr_err("%s: Error getting voltage table. err:%ld\n",
+				KBUILD_MODNAME, ret);
+			kfree(voltage_table_ptr[cluster_id]);
+			voltage_table_ptr[cluster_id] = NULL;
+			goto process_volt_exit;
+		}
+	}
+
+	if (!voltage->voltage_table_len) {
+		voltage->voltage_table_len = freq_table_len[cluster_id];
+		goto copy_and_return;
+	}
+
+	voltage_table_set[cluster_id] = freq_table_len[cluster_id]
+					/ MSM_IOCTL_FREQ_SIZE;
+	if (freq_table_len[cluster_id] % MSM_IOCTL_FREQ_SIZE)
+		voltage_table_set[cluster_id]++;
+
+	if (voltage->set_idx >= voltage_table_set[cluster_id]) {
+		pr_err("%s: Invalid voltage table set%d for cluster%d\n",
+			KBUILD_MODNAME, voltage->set_idx,
+			cluster_id);
+		ret = -EINVAL;
+		goto process_volt_exit;
+	}
+
+	table_idx = MSM_IOCTL_FREQ_SIZE * voltage->set_idx;
+	for (; table_idx < freq_table_len[cluster_id]
+		&& idx < MSM_IOCTL_FREQ_SIZE; idx++, table_idx++) {
+		voltage->voltage_table[idx] =
+			voltage_table_ptr[cluster_id][table_idx];
+	}
+	voltage->voltage_table_len = idx;
+
+copy_and_return:
+	ret = copy_to_user((void __user *)(*arg), query,
+		sizeof(struct msm_thermal_ioctl));
+	if (ret) {
+		pr_err("%s: copy_to_user error:%ld.\n", KBUILD_MODNAME, ret);
+		goto process_volt_exit;
+	}
+
+process_volt_exit:
+	return ret;
+}
+
+static long msm_thermal_ioctl_process(struct file *filep, unsigned int cmd,
+	unsigned long arg)
+{
+	long ret = 0;
+	struct msm_thermal_ioctl query;
+
+	pr_debug("%s: IOCTL: processing cmd:%u\n", KBUILD_MODNAME, cmd);
+
+	ret = validate_and_copy(&cmd, &arg, &query);
+	if (ret)
+		return ret;
+
+	mutex_lock(&ioctl_access_mutex);
+	switch (cmd) {
+	case MSM_THERMAL_SET_CPU_MAX_FREQUENCY:
+		ret = msm_thermal_set_frequency(query.cpu_freq.cpu_num,
+			query.cpu_freq.freq_req, true);
+		break;
+	case MSM_THERMAL_SET_CPU_MIN_FREQUENCY:
+		ret = msm_thermal_set_frequency(query.cpu_freq.cpu_num,
+			query.cpu_freq.freq_req, false);
+		break;
+	case MSM_THERMAL_SET_CLUSTER_MAX_FREQUENCY:
+		ret = msm_thermal_set_cluster_freq(query.cpu_freq.cpu_num,
+			query.cpu_freq.freq_req, true);
+		break;
+	case MSM_THERMAL_SET_CLUSTER_MIN_FREQUENCY:
+		ret = msm_thermal_set_cluster_freq(query.cpu_freq.cpu_num,
+			query.cpu_freq.freq_req, false);
+		break;
+	case MSM_THERMAL_GET_CLUSTER_FREQUENCY_PLAN:
+		ret = msm_thermal_process_freq_table_req(&query, &arg);
+		break;
+	case MSM_THERMAL_GET_CLUSTER_VOLTAGE_PLAN:
+		ret = msm_thermal_process_voltage_table_req(&query, &arg);
+		break;
+	default:
+		ret = -ENOTTY;
+		goto process_exit;
+	}
+process_exit:
+	mutex_unlock(&ioctl_access_mutex);
+	return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static long msm_thermal_compat_ioctl_process(struct file *filep,
+				   unsigned int cmd, unsigned long arg)
+{
+	arg = (unsigned long)compat_ptr(arg);
+	return msm_thermal_ioctl_process(filep, cmd, arg);
+}
+#endif	/* CONFIG_COMPAT */
+
+static const struct file_operations msm_thermal_fops = {
+	.owner = THIS_MODULE,
+	.open = msm_thermal_ioctl_open,
+	.unlocked_ioctl = msm_thermal_ioctl_process,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = msm_thermal_compat_ioctl_process,
+#endif  /* CONFIG_COMPAT */
+	.release = msm_thermal_ioctl_release,
+};
+
+int msm_thermal_ioctl_init()
+{
+	int ret = 0;
+	dev_t thermal_dev;
+	struct device *therm_device;
+
+	ret = alloc_chrdev_region(&thermal_dev, 0, 1,
+		MSM_THERMAL_IOCTL_NAME);
+	if (ret < 0) {
+		pr_err("%s: Error in allocating char device region. Err:%d\n",
+			KBUILD_MODNAME, ret);
+		goto ioctl_init_exit;
+	}
+
+	msm_thermal_major = MAJOR(thermal_dev);
+
+	thermal_class = class_create(THIS_MODULE, "msm_thermal");
+	if (IS_ERR(thermal_class)) {
+		pr_err("%s: Error in creating class\n",
+			KBUILD_MODNAME);
+		ret = PTR_ERR(thermal_class);
+		goto ioctl_class_fail;
+	}
+
+	therm_device = device_create(thermal_class, NULL, thermal_dev, NULL,
+				MSM_THERMAL_IOCTL_NAME);
+	if (IS_ERR(therm_device)) {
+		pr_err("%s: Error in creating character device\n",
+			KBUILD_MODNAME);
+		ret = PTR_ERR(therm_device);
+		goto ioctl_dev_fail;
+	}
+	msm_thermal_dev = kmalloc(sizeof(struct msm_thermal_ioctl_dev),
+				GFP_KERNEL);
+	if (!msm_thermal_dev) {
+		pr_err("%s: Error allocating memory\n",
+			KBUILD_MODNAME);
+		ret = -ENOMEM;
+		goto ioctl_clean_all;
+	}
+
+	memset(msm_thermal_dev, 0, sizeof(struct msm_thermal_ioctl_dev));
+	sema_init(&msm_thermal_dev->sem, 1);
+	cdev_init(&msm_thermal_dev->char_dev, &msm_thermal_fops);
+	ret = cdev_add(&msm_thermal_dev->char_dev, thermal_dev, 1);
+	if (ret < 0) {
+		pr_err("%s: Error in adding character device\n",
+			KBUILD_MODNAME);
+		goto ioctl_clean_all;
+	}
+
+	return ret;
+
+ioctl_clean_all:
+	device_destroy(thermal_class, thermal_dev);
+ioctl_dev_fail:
+	class_destroy(thermal_class);
+ioctl_class_fail:
+	unregister_chrdev_region(thermal_dev, 1);
+ioctl_init_exit:
+	return ret;
+}
+
+void msm_thermal_ioctl_cleanup()
+{
+	uint32_t idx = 0;
+	dev_t thermal_dev = MKDEV(msm_thermal_major, 0);
+
+	if (!msm_thermal_dev) {
+		pr_err("%s: Thermal IOCTL cleanup already done\n",
+			KBUILD_MODNAME);
+		return;
+	}
+
+	for (; idx < num_possible_cpus(); idx++) {
+		kfree(freq_table_ptr[idx]);
+		kfree(voltage_table_ptr[idx]);
+	}
+	device_destroy(thermal_class, thermal_dev);
+	class_destroy(thermal_class);
+	cdev_del(&msm_thermal_dev->char_dev);
+	unregister_chrdev_region(thermal_dev, 1);
+	kfree(msm_thermal_dev);
+	msm_thermal_dev = NULL;
+	thermal_class = NULL;
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/thermal/msm-tsens.c	2019-10-29 09:26:24.917215646 +0100
@@ -0,0 +1,2680 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/msm_tsens.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/vmalloc.h>
+#include <asm/arch_timer.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/trace_thermal.h>
+
+#define TSENS_DRIVER_NAME		"msm-tsens"
+/* TSENS register info */
+#define TSENS_UPPER_LOWER_INTERRUPT_CTRL(n)		((n) + 0x1000)
+#define TSENS_INTERRUPT_EN		BIT(0)
+
+#define TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR(n)	((n) + 0x1004)
+#define TSENS_UPPER_STATUS_CLR		BIT(21)
+#define TSENS_LOWER_STATUS_CLR		BIT(20)
+#define TSENS_UPPER_THRESHOLD_MASK	0xffc00
+#define TSENS_LOWER_THRESHOLD_MASK	0x3ff
+#define TSENS_UPPER_THRESHOLD_SHIFT	10
+
+#define TSENS_S0_STATUS_ADDR(n)		((n) + 0x1030)
+#define TSENS_SN_ADDR_OFFSET		0x4
+#define TSENS_SN_STATUS_TEMP_MASK	0x3ff
+#define TSENS_SN_STATUS_LOWER_STATUS	BIT(11)
+#define TSENS_SN_STATUS_UPPER_STATUS	BIT(12)
+#define TSENS_STATUS_ADDR_OFFSET			2
+
+#define TSENS_TRDY_ADDR(n)		((n) + 0x105c)
+#define TSENS_TRDY_MASK			BIT(0)
+
+#define TSENS2_SN_STATUS_ADDR(n)	((n) + 0x1044)
+#define TSENS2_SN_STATUS_VALID		BIT(14)
+#define TSENS2_SN_STATUS_VALID_MASK	0x4000
+#define TSENS2_TRDY_ADDR(n)		((n) + 0x84)
+
+#define TSENS4_TRDY_ADDR(n)            ((n) + 0x1084)
+
+#define TSENS_MTC_ZONE0_SW_MASK_ADDR(n)  ((n) + 0x10c0)
+#define TSENS_TH1_MTC_IN_EFFECT               BIT(0)
+#define TSENS_TH2_MTC_IN_EFFECT               BIT(1)
+#define TSENS_MTC_IN_EFFECT			0x3
+#define TSENS_MTC_DISABLE			0x0
+
+#define TSENS_MTC_ZONE0_LOG(n)     ((n) + 0x10d0)
+#define TSENS_LOGS_VALID_MASK      0x40000000
+#define TSENS_LOGS_VALID_SHIFT     30
+#define TSENS_LOGS_LATEST_MASK    0x0000001f
+#define TSENS_LOGS_LOG1_MASK      0x000003e0
+#define TSENS_LOGS_LOG2_MASK      0x00007c00
+#define TSENS_LOGS_LOG3_MASK      0x000f8000
+#define TSENS_LOGS_LOG4_MASK      0x01f00000
+#define TSENS_LOGS_LOG5_MASK      0x3e000000
+#define TSENS_LOGS_LOG1_SHIFT     5
+#define TSENS_LOGS_LOG2_SHIFT     10
+#define TSENS_LOGS_LOG3_SHIFT     15
+#define TSENS_LOGS_LOG4_SHIFT     20
+#define TSENS_LOGS_LOG5_SHIFT     25
+
+/* TSENS_TM registers for 8996 */
+#define TSENS_TM_INT_EN(n)			((n) + 0x1004)
+#define TSENS_TM_CRITICAL_WD_BARK		BIT(31)
+#define TSENS_TM_CRITICAL_CYCLE_MONITOR	BIT(30)
+#define TSENS_TM_CRITICAL_INT_EN		BIT(2)
+#define TSENS_TM_UPPER_INT_EN			BIT(1)
+#define TSENS_TM_LOWER_INT_EN			BIT(0)
+
+#define TSENS_TM_UPPER_INT_MASK(n)	(((n) & 0xffff0000) >> 16)
+#define TSENS_TM_LOWER_INT_MASK(n)	((n) & 0xffff)
+#define TSENS_TM_UPPER_LOWER_INT_STATUS(n)	((n) + 0x1008)
+#define TSENS_TM_UPPER_LOWER_INT_CLEAR(n)	((n) + 0x100c)
+#define TSENS_TM_UPPER_LOWER_INT_MASK(n)	((n) + 0x1010)
+#define TSENS_TM_UPPER_INT_SET(n)		(1 << (n + 16))
+
+#define TSENS_TM_CRITICAL_INT_STATUS(n)		((n) + 0x1014)
+#define TSENS_TM_CRITICAL_INT_CLEAR(n)		((n) + 0x1018)
+#define TSENS_TM_CRITICAL_INT_MASK(n)		((n) + 0x101c)
+
+#define TSENS_TM_UPPER_LOWER_THRESHOLD(n)	((n) + 0x1020)
+#define TSENS_TM_UPPER_THRESHOLD_SET(n)		((n) << 12)
+#define TSENS_TM_UPPER_THRESHOLD_VALUE_SHIFT(n)	((n) >> 12)
+#define TSENS_TM_LOWER_THRESHOLD_VALUE(n)	((n) & 0xfff)
+#define TSENS_TM_UPPER_THRESHOLD_VALUE(n)	(((n) & 0xfff000) >> 12)
+#define TSENS_TM_UPPER_THRESHOLD_MASK	0xfff000
+#define TSENS_TM_LOWER_THRESHOLD_MASK	0xfff
+#define TSENS_TM_UPPER_THRESHOLD_SHIFT	12
+
+#define TSENS_TM_SN_CRITICAL_THRESHOLD_MASK	0xfff
+#define TSENS_TM_SN_CRITICAL_THRESHOLD(n)	((n) + 0x1060)
+#define TSENS_TM_SN_STATUS(n)			((n) + 0x10a0)
+#define TSENS_TM_SN_STATUS_VALID_BIT		BIT(21)
+#define TSENS_TM_SN_STATUS_CRITICAL_STATUS	BIT(19)
+#define TSENS_TM_SN_STATUS_UPPER_STATUS		BIT(18)
+#define TSENS_TM_SN_STATUS_LOWER_STATUS		BIT(17)
+#define TSENS_TM_SN_LAST_TEMP_MASK		0xfff
+
+#define TSENS_TM_TRDY(n)			((n) + 0x10e4)
+#define TSENS_TM_CODE_BIT_MASK			0xfff
+#define TSENS_TM_CODE_SIGN_BIT			0x800
+
+#define TSENS_CONTROLLER_ID(n)			((n) + 0x1000)
+#define TSENS_DEBUG_CONTROL(n)			((n) + 0x1130)
+#define TSENS_DEBUG_DATA(n)			((n) + 0x1134)
+#define TSENS_TM_MTC_ZONE0_SW_MASK_ADDR(n)	((n) + 0x1140)
+#define TSENS_TM_MTC_ZONE0_LOG(n)		((n) + 0x1150)
+#define TSENS_TM_MTC_ZONE0_HISTORY(n)		((n) + 0x1160)
+#define TSENS_RESET_HISTORY_MASK	0x4
+#define TSENS_RESET_HISTORY_SHIFT	2
+#define TSENS_PS_RED_CMD_MASK	0x3ff00000
+#define TSENS_PS_YELLOW_CMD_MASK	0x000ffc00
+#define TSENS_PS_COOL_CMD_MASK	0x000003ff
+#define TSENS_PS_YELLOW_CMD_SHIFT	0xa
+#define TSENS_PS_RED_CMD_SHIFT	0x14
+/* End TSENS_TM registers for 8996 */
+
+#define TSENS_CTRL_ADDR(n)		(n)
+#define TSENS_EN			BIT(0)
+
+#define TSENS_CAL_DEGC_POINT1		30
+#define TSENS_CAL_DEGC_POINT2		120
+#define TSENS_SLOPE_FACTOR		1000
+
+/* TSENS register data */
+#define TSENS_TRDY_RDY_MIN_TIME		2000
+#define TSENS_TRDY_RDY_MAX_TIME		2100
+#define TSENS_THRESHOLD_MAX_CODE	0x3ff
+#define TSENS_THRESHOLD_MIN_CODE	0x0
+
+#define TSENS_TYPE0		0
+#define TSENS_TYPE2		2
+#define TSENS_TYPE3		3
+#define TSENS_TYPE4		4
+
+/* debug defines */
+#define TSENS_DBG_BUS_ID_0		0
+#define TSENS_DBG_BUS_ID_1		1
+#define TSENS_DBG_BUS_ID_2		2
+#define TSENS_DBG_BUS_ID_15		15
+#define TSENS_DEBUG_LOOP_COUNT_ID_0	2
+#define TSENS_DEBUG_LOOP_COUNT		5
+#define TSENS_DEBUG_STATUS_REG_START	10
+#define TSENS_DEBUG_OFFSET_RANGE	16
+#define TSENS_DEBUG_OFFSET_WORD1	0x4
+#define TSENS_DEBUG_OFFSET_WORD2	0x8
+#define TSENS_DEBUG_OFFSET_WORD3	0xc
+#define TSENS_DEBUG_OFFSET_ROW		0x10
+#define TSENS_DEBUG_DECIDEGC		-950
+#define TSENS_DEBUG_CYCLE_MS		64
+#define TSENS_DEBUG_POLL_MS		200
+#define TSENS_DEBUG_BUS_ID2_MIN_CYCLE	50
+#define TSENS_DEBUG_BUS_ID2_MAX_CYCLE	51
+#define TSENS_DEBUG_ID_MASK_1_4		0xffffffe1
+
+static uint32_t tsens_sec_to_msec_value = 1000;
+static uint32_t tsens_completion_timeout_hz = HZ/2;
+static uint32_t tsens_poll_check = 1;
+
+/* Trips: warm and cool */
+enum tsens_trip_type {
+	TSENS_TRIP_WARM = 0,
+	TSENS_TRIP_COOL,
+	TSENS_TRIP_NUM,
+};
+
+enum tsens_tm_trip_type {
+	TSENS_TM_TRIP_WARM = 0,
+	TSENS_TM_TRIP_COOL,
+	TSENS_TM_TRIP_CRITICAL,
+	TSENS_TM_TRIP_NUM,
+};
+
+#define TSENS_WRITABLE_TRIPS_MASK ((1 << TSENS_TRIP_NUM) - 1)
+#define TSENS_TM_WRITABLE_TRIPS_MASK ((1 << TSENS_TM_TRIP_NUM) - 1)
+
+struct tsens_thrshld_state {
+	enum thermal_device_mode	high_th_state;
+	enum thermal_device_mode	low_th_state;
+	enum thermal_device_mode	crit_th_state;
+	unsigned int			high_adc_code;
+	unsigned int			low_adc_code;
+	int				high_temp;
+	int				low_temp;
+	int				crit_temp;
+};
+
+struct tsens_tm_device_sensor {
+	struct thermal_zone_device	*tz_dev;
+	struct tsens_tm_device		*tm;
+	enum thermal_device_mode	mode;
+	/* Physical HW sensor number */
+	unsigned int			sensor_hw_num;
+	/* Software index. This is keep track of the HW/SW
+	 * sensor_ID mapping */
+	unsigned int			sensor_sw_id;
+	unsigned int			sensor_client_id;
+	int				offset;
+	int				calib_data_point1;
+	int				calib_data_point2;
+	uint32_t			slope_mul_tsens_factor;
+	struct tsens_thrshld_state	debug_thr_state_copy;
+	/* dbg_adc_code logs either the raw ADC code or temperature values in
+	 * decidegC based on the controller settings.
+	 */
+	int				dbg_adc_code;
+	u32				wa_temp1_calib_offset_factor;
+	u32				wa_temp2_calib_offset_factor;
+};
+
+struct tsens_dbg_counter {
+	uint32_t			dbg_count[10];
+	uint32_t			idx;
+	unsigned long long		time_stmp[10];
+};
+
+struct tsens_sensor_dbg_info {
+	unsigned long			temp[10];
+	uint32_t			idx;
+	unsigned long long		time_stmp[10];
+	int				adccode[10];
+};
+
+struct tsens_mtc_sysfs {
+	uint32_t zone_log;
+	int zone_mtc;
+	int th1;
+	int th2;
+	uint32_t zone_hist;
+};
+
+struct tsens_tm_device {
+	struct platform_device		*pdev;
+	struct workqueue_struct		*tsens_critical_wq;
+	struct list_head		list;
+	bool				is_ready;
+	bool				prev_reading_avail;
+	bool				calibration_less_mode;
+	bool				tsens_local_init;
+	bool				gain_offset_programmed;
+	bool				cycle_compltn_monitor;
+	bool				wd_bark;
+	int				tsens_factor;
+	uint32_t			tsens_num_sensor;
+	uint32_t			cycle_compltn_monitor_val;
+	uint32_t			wd_bark_val;
+	int				tsens_irq;
+	int				tsens_critical_irq;
+	void				*tsens_addr;
+	void				*tsens_calib_addr;
+	int				tsens_len;
+	int				calib_len;
+	struct resource			*res_tsens_mem;
+	struct resource			*res_calib_mem;
+	uint32_t			tsens_type;
+	bool				tsens_valid_status_check;
+	struct tsens_dbg_counter	tsens_thread_iq_dbg;
+	struct tsens_sensor_dbg_info	sensor_dbg_info[16];
+	int				tsens_upper_irq_cnt;
+	int				tsens_lower_irq_cnt;
+	int				tsens_critical_irq_cnt;
+	int				tsens_critical_wd_cnt;
+	struct delayed_work		tsens_critical_poll_test;
+	struct completion		tsens_rslt_completion;
+	struct tsens_mtc_sysfs		mtcsys;
+	spinlock_t			tsens_crit_lock;
+	spinlock_t			tsens_upp_low_lock;
+	bool				crit_set;
+	struct tsens_dbg_counter	crit_timestamp_last_run;
+	struct tsens_dbg_counter	crit_timestamp_last_interrupt_handled;
+	struct tsens_dbg_counter	crit_timestamp_last_poll_request;
+	u64				qtimer_val_detection_start;
+	u64				qtimer_val_last_detection_interrupt;
+	u64				qtimer_val_last_polling_check;
+	bool				tsens_critical_poll;
+	struct tsens_tm_device_sensor	sensor[0];
+};
+
+LIST_HEAD(tsens_device_list);
+
+static char dbg_buff[1024];
+static struct dentry *dent;
+static struct dentry *dfile_stats;
+
+static struct of_device_id tsens_match[] = {
+	{	.compatible = "qcom,msm8996-tsens",
+	},
+	{	.compatible = "qcom,msmtitanium-tsens",
+	},
+	{	.compatible = "qcom,msm8998-tsens",
+	},
+	{	.compatible = "qcom,msmhamster-tsens",
+	},
+	{	.compatible = "qcom,sdm660-tsens",
+	},
+	{	.compatible = "qcom,sdm630-tsens",
+	},
+	{}
+};
+
+static struct tsens_tm_device *tsens_controller_is_present(void)
+{
+	struct tsens_tm_device *tmdev_chip = NULL;
+
+	if (list_empty(&tsens_device_list)) {
+		pr_err("%s: TSENS controller not available\n", __func__);
+		return tmdev_chip;
+	}
+
+	list_for_each_entry(tmdev_chip, &tsens_device_list, list)
+		return tmdev_chip;
+
+	return tmdev_chip;
+}
+
+static int32_t get_tsens_sensor_for_client_id(struct tsens_tm_device *tmdev,
+						uint32_t sensor_client_id)
+{
+	bool id_found = false;
+	uint32_t i = 0;
+	struct device_node *of_node = NULL;
+	const struct of_device_id *id;
+
+	of_node = tmdev->pdev->dev.of_node;
+	if (of_node == NULL) {
+		pr_err("Invalid of_node??\n");
+		return -EINVAL;
+	}
+
+	if (!of_match_node(tsens_match, of_node)) {
+		pr_err("Need to read SoC specific fuse map\n");
+		return -ENODEV;
+	}
+
+	id = of_match_node(tsens_match, of_node);
+	if (id == NULL) {
+		pr_err("can not find tsens_match of_node\n");
+		return -ENODEV;
+	}
+
+	if (!strcmp(id->compatible, "qcom,msm8996-tsens") ||
+		(!strcmp(id->compatible, "qcom,msm8998-tsens")) ||
+		(!strcmp(id->compatible, "qcom,sdm660-tsens")) ||
+		(!strcmp(id->compatible, "qcom,sdm630-tsens")) ||
+		(!strcmp(id->compatible, "qcom,msmhamster-tsens"))) {
+		while (i < tmdev->tsens_num_sensor && !id_found) {
+			if (tmdev->sensor[i].sensor_client_id ==
+							sensor_client_id) {
+				id_found = true;
+				return tmdev->sensor[i].sensor_hw_num;
+			}
+			i++;
+		}
+	} else
+		return sensor_client_id;
+
+	if (!id_found)
+		return -EINVAL;
+
+	return -EINVAL;
+}
+
+static struct tsens_tm_device *get_tsens_controller_for_client_id(
+						uint32_t sensor_client_id)
+{
+	struct tsens_tm_device *tmdev_chip = NULL;
+	bool id_found = false;
+	uint32_t i = 0;
+
+	list_for_each_entry(tmdev_chip, &tsens_device_list, list) {
+		i = 0;
+		while (i < tmdev_chip->tsens_num_sensor && !id_found) {
+			if (tmdev_chip->sensor[i].sensor_client_id ==
+						sensor_client_id) {
+				id_found = true;
+				return tmdev_chip;
+			}
+			i++;
+		}
+	}
+
+	if (!id_found)
+		return NULL;
+
+	return tmdev_chip;
+}
+
+static struct tsens_tm_device *get_all_tsens_controller_sensor_count(
+						uint32_t *sensor_count)
+{
+	struct tsens_tm_device *tmdev_chip = NULL;
+
+	list_for_each_entry(tmdev_chip, &tsens_device_list, list)
+		*sensor_count += tmdev_chip->tsens_num_sensor;
+
+	return tmdev_chip;
+}
+
+int tsens_is_ready(void)
+{
+	struct tsens_tm_device *tmdev = NULL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev)
+		return -EPROBE_DEFER;
+	else
+		return tmdev->is_ready;
+}
+EXPORT_SYMBOL(tsens_is_ready);
+
+static int tsens_get_sw_id_mapping_for_controller(
+					int sensor_hw_num,
+					int *sensor_sw_idx,
+					struct tsens_tm_device *tmdev)
+{
+	int i = 0;
+	bool id_found = false;
+
+	while (i < tmdev->tsens_num_sensor && !id_found) {
+		if (sensor_hw_num == tmdev->sensor[i].sensor_hw_num) {
+			*sensor_sw_idx = tmdev->sensor[i].sensor_sw_id;
+			id_found = true;
+		}
+		i++;
+	}
+
+	if (!id_found)
+		return -EINVAL;
+
+	return 0;
+}
+
+int tsens_get_hw_id_mapping(int thermal_sensor_num, int *sensor_client_id)
+{
+	struct tsens_tm_device *tmdev = NULL;
+	struct device_node *of_node = NULL;
+	const struct of_device_id *id;
+	uint32_t tsens_max_sensors = 0, idx = 0, i = 0;
+
+	if (list_empty(&tsens_device_list)) {
+		pr_err("%s: TSENS controller not available\n", __func__);
+		return -EPROBE_DEFER;
+	}
+
+	list_for_each_entry(tmdev, &tsens_device_list, list)
+		tsens_max_sensors += tmdev->tsens_num_sensor;
+
+	if (tsens_max_sensors != thermal_sensor_num) {
+		pr_err("TSENS total sensors is %d, thermal expects:%d\n",
+			tsens_max_sensors, thermal_sensor_num);
+		return -EINVAL;
+	}
+
+	list_for_each_entry(tmdev, &tsens_device_list, list) {
+		of_node = tmdev->pdev->dev.of_node;
+		if (of_node == NULL) {
+			pr_err("Invalid of_node??\n");
+			return -EINVAL;
+		}
+
+		if (!of_match_node(tsens_match, of_node)) {
+			pr_err("Need to read SoC specific fuse map\n");
+			return -ENODEV;
+		}
+
+		id = of_match_node(tsens_match, of_node);
+		if (id == NULL) {
+			pr_err("can not find tsens_match of_node\n");
+			return -ENODEV;
+		}
+
+		if (!strcmp(id->compatible, "qcom,msm8996-tsens") ||
+			(!strcmp(id->compatible, "qcom,msm8998-tsens")) ||
+			(!strcmp(id->compatible, "qcom,sdm660-tsens")) ||
+			(!strcmp(id->compatible, "qcom,sdm630-tsens")) ||
+			(!strcmp(id->compatible, "qcom,msmhamster-tsens"))) {
+			/* Assign client id's that is used to get the
+			 * controller and hw_sensor details
+			 */
+			for (i = 0; i < tmdev->tsens_num_sensor; i++) {
+				sensor_client_id[idx] =
+					tmdev->sensor[i].sensor_client_id;
+				idx++;
+			}
+		} else {
+			/* Assign the corresponding hw sensor number
+			 * prior to support for multiple controllres
+			 */
+			for (i = 0; i < tmdev->tsens_num_sensor; i++) {
+				sensor_client_id[idx] =
+					tmdev->sensor[i].sensor_hw_num;
+				idx++;
+			}
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(tsens_get_hw_id_mapping);
+
+static ssize_t
+zonemask_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct tsens_tm_device *tmdev = NULL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev) {
+		pr_err("No TSENS controller present\n");
+		return -EPROBE_DEFER;
+	}
+
+	return snprintf(buf, PAGE_SIZE,
+		"Zone =%d th1=%d th2=%d\n" , tmdev->mtcsys.zone_mtc,
+				tmdev->mtcsys.th1 , tmdev->mtcsys.th2);
+}
+
+static ssize_t
+zonemask_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int ret;
+	struct tsens_tm_device *tmdev = NULL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev) {
+		pr_err("No TSENS controller present\n");
+		return -EPROBE_DEFER;
+	}
+
+	ret = sscanf(buf, "%d %d %d", &tmdev->mtcsys.zone_mtc ,
+				&tmdev->mtcsys.th1 , &tmdev->mtcsys.th2);
+
+	if (ret != TSENS_ZONEMASK_PARAMS) {
+		pr_err("Invalid command line arguments\n");
+		count = -EINVAL;
+	} else {
+		pr_debug("store zone_mtc=%d th1=%d th2=%d\n",
+				tmdev->mtcsys.zone_mtc,
+				tmdev->mtcsys.th1 , tmdev->mtcsys.th2);
+		ret = tsens_set_mtc_zone_sw_mask(tmdev->mtcsys.zone_mtc ,
+					tmdev->mtcsys.th1 , tmdev->mtcsys.th2);
+		if (ret < 0) {
+			pr_err("Invalid command line arguments\n");
+			count = -EINVAL;
+		}
+	}
+
+	return count;
+}
+
+static ssize_t
+zonelog_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	int ret, zlog[TSENS_MTC_ZONE_LOG_SIZE];
+	struct tsens_tm_device *tmdev = NULL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev) {
+		pr_err("No TSENS controller present\n");
+		return -EPROBE_DEFER;
+	}
+
+	ret = tsens_get_mtc_zone_log(tmdev->mtcsys.zone_log , zlog);
+	if (ret < 0) {
+		pr_err("Invalid command line arguments\n");
+		return -EINVAL;
+	}
+
+	return snprintf(buf, PAGE_SIZE,
+		"Log[0]=%d\nLog[1]=%d\nLog[2]=%d\nLog[3]=%d\nLog[4]=%d\nLog[5]=%d\n",
+			zlog[0], zlog[1], zlog[2], zlog[3], zlog[4], zlog[5]);
+}
+
+static ssize_t
+zonelog_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int ret;
+	struct tsens_tm_device *tmdev = NULL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev) {
+		pr_err("No TSENS controller present\n");
+		return -EPROBE_DEFER;
+	}
+
+	ret = kstrtou32(buf, 0, &tmdev->mtcsys.zone_log);
+	if (ret < 0) {
+		pr_err("Invalid command line arguments\n");
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static ssize_t
+zonehist_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	int ret, zhist[TSENS_MTC_ZONE_HISTORY_SIZE];
+	struct tsens_tm_device *tmdev = NULL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev) {
+		pr_err("No TSENS controller present\n");
+		return -EPROBE_DEFER;
+	}
+
+	ret = tsens_get_mtc_zone_history(tmdev->mtcsys.zone_hist , zhist);
+	if (ret < 0) {
+		pr_err("Invalid command line arguments\n");
+		return -EINVAL;
+	}
+
+	return snprintf(buf, PAGE_SIZE,
+		"Cool = %d\nYellow = %d\nRed = %d\n",
+			zhist[0], zhist[1], zhist[2]);
+}
+
+static ssize_t
+zonehist_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int ret;
+	struct tsens_tm_device *tmdev = NULL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev) {
+		pr_err("No TSENS controller present\n");
+		return -EPROBE_DEFER;
+	}
+
+	ret = kstrtou32(buf, 0, &tmdev->mtcsys.zone_hist);
+	if (ret < 0) {
+		pr_err("Invalid command line arguments\n");
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+
+static struct device_attribute tsens_mtc_dev_attr[] = {
+	__ATTR(zonemask, 0644, zonemask_show, zonemask_store),
+	__ATTR(zonelog, 0644, zonelog_show, zonelog_store),
+	__ATTR(zonehist, 0644, zonehist_show, zonehist_store),
+};
+
+static int create_tsens_mtc_sysfs(struct platform_device *pdev)
+{
+	int result = 0, i;
+	struct device_attribute *attr_ptr = NULL;
+
+	attr_ptr = tsens_mtc_dev_attr;
+
+	for (i = 0; i < ARRAY_SIZE(tsens_mtc_dev_attr); i++) {
+		result = device_create_file(&pdev->dev, &attr_ptr[i]);
+		if (result < 0)
+			goto error;
+	}
+
+	pr_debug("create_tsens_mtc_sysfs success\n");
+
+	return result;
+
+error:
+	for (i--; i >= 0; i--)
+		device_remove_file(&pdev->dev, &attr_ptr[i]);
+
+	return result;
+}
+
+static int tsens_tz_code_to_degc(int adc_code, int sensor_sw_id,
+				struct tsens_tm_device *tmdev)
+{
+	int degc, num, den, idx;
+
+	idx = sensor_sw_id;
+	num = ((adc_code * tmdev->tsens_factor) -
+				tmdev->sensor[idx].offset);
+	den = (int) tmdev->sensor[idx].slope_mul_tsens_factor;
+
+	if (num > 0)
+		degc = ((num + (den/2))/den);
+	else if (num < 0)
+		degc = ((num - (den/2))/den);
+	else
+		degc = num/den;
+
+	pr_debug("raw_code:0x%x, sensor_num:%d, degc:%d, offset:%d\n",
+			adc_code, idx, degc, tmdev->sensor[idx].offset);
+
+	return degc;
+}
+
+static int tsens_tz_degc_to_code(int degc, int idx,
+				struct tsens_tm_device *tmdev)
+{
+	int code = ((degc * tmdev->sensor[idx].slope_mul_tsens_factor)
+		+ tmdev->sensor[idx].offset)/tmdev->tsens_factor;
+
+	if (code > TSENS_THRESHOLD_MAX_CODE)
+		code = TSENS_THRESHOLD_MAX_CODE;
+	else if (code < TSENS_THRESHOLD_MIN_CODE)
+		code = TSENS_THRESHOLD_MIN_CODE;
+	pr_debug("raw_code:0x%x, sensor_num:%d, degc:%d\n",
+			code, idx, degc);
+	return code;
+}
+
+static int msm_tsens_get_temp(int sensor_client_id, int *temp)
+{
+	unsigned int code;
+	void __iomem *sensor_addr;
+	void __iomem *trdy_addr;
+	int sensor_sw_id = -EINVAL, rc = 0, last_temp = 0, last_temp2 = 0;
+	int last_temp3 = 0, last_temp_mask, valid_status_mask, code_mask = 0;
+	bool last_temp_valid = false, last_temp2_valid = false;
+	bool last_temp3_valid = false;
+	struct tsens_tm_device *tmdev = NULL;
+	uint32_t sensor_hw_num = 0;
+
+	tmdev = get_tsens_controller_for_client_id(sensor_client_id);
+	if (tmdev == NULL) {
+		pr_err("TSENS early init not done\n");
+		return -EPROBE_DEFER;
+	}
+
+	pr_debug("sensor_client_id:%d\n", sensor_client_id);
+
+	sensor_hw_num = get_tsens_sensor_for_client_id(tmdev, sensor_client_id);
+	if (sensor_hw_num < 0) {
+		pr_err("cannot read the temperature\n");
+		return sensor_hw_num;
+	}
+	pr_debug("sensor_hw_num:%d\n", sensor_hw_num);
+
+	if (tmdev->tsens_type == TSENS_TYPE2) {
+		trdy_addr = TSENS2_TRDY_ADDR(tmdev->tsens_addr);
+		sensor_addr = TSENS2_SN_STATUS_ADDR(tmdev->tsens_addr);
+	} else if (tmdev->tsens_type == TSENS_TYPE3) {
+		trdy_addr = TSENS_TM_TRDY(tmdev->tsens_addr);
+		sensor_addr = TSENS_TM_SN_STATUS(tmdev->tsens_addr);
+	} else if (tmdev->tsens_type == TSENS_TYPE4) {
+		trdy_addr = TSENS4_TRDY_ADDR(tmdev->tsens_addr);
+		sensor_addr = TSENS2_SN_STATUS_ADDR(tmdev->tsens_addr);
+	} else {
+		trdy_addr = TSENS_TRDY_ADDR(tmdev->tsens_addr);
+		sensor_addr = TSENS_S0_STATUS_ADDR(tmdev->tsens_addr);
+	}
+
+	if ((!tmdev->prev_reading_avail) && !tmdev->tsens_valid_status_check) {
+		while (!((readl_relaxed_no_log(trdy_addr)) & TSENS_TRDY_MASK))
+			usleep_range(TSENS_TRDY_RDY_MIN_TIME,
+				TSENS_TRDY_RDY_MAX_TIME);
+		tmdev->prev_reading_avail = true;
+	}
+
+	if (tmdev->tsens_type == TSENS_TYPE3)
+		last_temp_mask = TSENS_TM_SN_LAST_TEMP_MASK;
+	else
+		last_temp_mask = TSENS_SN_STATUS_TEMP_MASK;
+
+	code = readl_relaxed_no_log(sensor_addr +
+			(sensor_hw_num << TSENS_STATUS_ADDR_OFFSET));
+	last_temp = code & last_temp_mask;
+
+	if (tmdev->tsens_valid_status_check) {
+		if (tmdev->tsens_type == TSENS_TYPE3)
+			valid_status_mask = TSENS_TM_SN_STATUS_VALID_BIT;
+		else
+			valid_status_mask = TSENS2_SN_STATUS_VALID;
+		if (code & valid_status_mask)
+			last_temp_valid = true;
+		else {
+			code = readl_relaxed_no_log(sensor_addr +
+				(sensor_hw_num << TSENS_STATUS_ADDR_OFFSET));
+			last_temp2 = code & last_temp_mask;
+			if (code & valid_status_mask) {
+				last_temp = last_temp2;
+				last_temp2_valid = true;
+			} else {
+				code = readl_relaxed_no_log(sensor_addr +
+					(sensor_hw_num <<
+					TSENS_STATUS_ADDR_OFFSET));
+				last_temp3 = code & last_temp_mask;
+				if (code & valid_status_mask) {
+					last_temp = last_temp3;
+					last_temp3_valid = true;
+				}
+			}
+		}
+	}
+
+	if ((tmdev->tsens_valid_status_check) &&
+		(!last_temp_valid && !last_temp2_valid && !last_temp3_valid)) {
+		if (last_temp == last_temp2)
+			last_temp = last_temp2;
+		else if (last_temp2 == last_temp3)
+			last_temp = last_temp3;
+	}
+
+	if (tmdev->tsens_type != TSENS_TYPE3) {
+		/* Obtain SW index to map the corresponding thermal zone's
+		 * offset and slope for code to degc conversion. */
+		rc = tsens_get_sw_id_mapping_for_controller(sensor_hw_num,
+						&sensor_sw_id, tmdev);
+		if (rc < 0) {
+			pr_err("tsens mapping index not found\n");
+			return rc;
+		}
+
+		*temp = tsens_tz_code_to_degc(last_temp, sensor_sw_id, tmdev);
+	} else {
+		if (last_temp & TSENS_TM_CODE_SIGN_BIT) {
+			/* Sign extension for negative value */
+			code_mask = ~TSENS_TM_CODE_BIT_MASK;
+			last_temp |= code_mask;
+		}
+		*temp = last_temp;
+	}
+
+	tmdev->sensor[sensor_hw_num].dbg_adc_code = last_temp;
+
+	trace_tsens_read(*temp, sensor_client_id);
+
+	return 0;
+}
+
+static int tsens_tz_get_temp(struct thermal_zone_device *thermal,
+			     int *temp)
+{
+	struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
+	struct tsens_tm_device *tmdev = NULL;
+	uint32_t idx = 0;
+	int rc = 0;
+
+	if (!tm_sensor || !temp)
+		return -EINVAL;
+
+	tmdev = tm_sensor->tm;
+	if (!tmdev)
+		return -EINVAL;
+
+	rc = msm_tsens_get_temp(tm_sensor->sensor_client_id, temp);
+	if (rc)
+		return rc;
+
+	idx = tmdev->sensor_dbg_info[tm_sensor->sensor_hw_num].idx;
+	tmdev->sensor_dbg_info[tm_sensor->sensor_hw_num].temp[idx%10] = *temp;
+	tmdev->sensor_dbg_info[tm_sensor->sensor_hw_num].time_stmp[idx%10] =
+					sched_clock();
+	tmdev->sensor_dbg_info[tm_sensor->sensor_hw_num].adccode[idx%10] =
+			tmdev->sensor[tm_sensor->sensor_hw_num].dbg_adc_code;
+	idx++;
+	tmdev->sensor_dbg_info[tm_sensor->sensor_hw_num].idx = idx;
+
+	return 0;
+}
+
+int tsens_get_temp(struct tsens_device *device, int *temp)
+{
+	int rc = 0;
+
+	if (tsens_is_ready() <= 0) {
+		pr_debug("TSENS early init not done\n");
+		return -EPROBE_DEFER;
+	}
+
+	rc = msm_tsens_get_temp(device->sensor_num, temp);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+EXPORT_SYMBOL(tsens_get_temp);
+
+int tsens_get_max_sensor_num(uint32_t *tsens_num_sensors)
+{
+	if (tsens_is_ready() <= 0) {
+		pr_debug("TSENS early init not done\n");
+		return -EPROBE_DEFER;
+	}
+
+	*tsens_num_sensors = 0;
+
+	if (get_all_tsens_controller_sensor_count(tsens_num_sensors) == NULL)
+		return -EINVAL;
+
+	pr_debug("%d\n", *tsens_num_sensors);
+
+	return 0;
+}
+EXPORT_SYMBOL(tsens_get_max_sensor_num);
+
+static int tsens_tz_get_mode(struct thermal_zone_device *thermal,
+			      enum thermal_device_mode *mode)
+{
+	struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
+
+	if (!tm_sensor || !mode)
+		return -EINVAL;
+
+	*mode = tm_sensor->mode;
+
+	return 0;
+}
+
+static int tsens_tz_get_trip_type(struct thermal_zone_device *thermal,
+				   int trip, enum thermal_trip_type *type)
+{
+	struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
+
+	if (!tm_sensor || trip < 0 || !type)
+		return -EINVAL;
+
+	switch (trip) {
+	case TSENS_TRIP_WARM:
+		*type = THERMAL_TRIP_CONFIGURABLE_HI;
+		break;
+	case TSENS_TRIP_COOL:
+		*type = THERMAL_TRIP_CONFIGURABLE_LOW;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int tsens_tm_get_trip_type(struct thermal_zone_device *thermal,
+				   int trip, enum thermal_trip_type *type)
+{
+	struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
+
+	if (!tm_sensor || trip < 0 || !type)
+		return -EINVAL;
+
+	switch (trip) {
+	case TSENS_TM_TRIP_WARM:
+		*type = THERMAL_TRIP_CONFIGURABLE_HI;
+		break;
+	case TSENS_TM_TRIP_COOL:
+		*type = THERMAL_TRIP_CONFIGURABLE_LOW;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int tsens_tm_activate_trip_type(struct thermal_zone_device *thermal,
+			int trip, enum thermal_trip_activation_mode mode)
+{
+	struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
+	unsigned int reg_cntl, mask;
+	unsigned long flags;
+	struct tsens_tm_device *tmdev = NULL;
+	int rc = 0;
+
+	/* clear the interrupt and unmask */
+	if (!tm_sensor || trip < 0)
+		return -EINVAL;
+
+	tmdev = tm_sensor->tm;
+	if (!tmdev)
+		return -EINVAL;
+
+	spin_lock_irqsave(&tmdev->tsens_upp_low_lock, flags);
+	mask = (tm_sensor->sensor_hw_num);
+	switch (trip) {
+	case TSENS_TM_TRIP_CRITICAL:
+		tmdev->sensor[tm_sensor->sensor_hw_num].
+			debug_thr_state_copy.crit_th_state = mode;
+		reg_cntl = readl_relaxed(TSENS_TM_CRITICAL_INT_MASK
+							(tmdev->tsens_addr));
+		if (mode == THERMAL_TRIP_ACTIVATION_DISABLED)
+			writel_relaxed(reg_cntl | (1 << mask),
+				(TSENS_TM_CRITICAL_INT_MASK
+				(tmdev->tsens_addr)));
+		else
+			writel_relaxed(reg_cntl & ~(1 << mask),
+				(TSENS_TM_CRITICAL_INT_MASK
+				(tmdev->tsens_addr)));
+		break;
+	case TSENS_TM_TRIP_WARM:
+		tmdev->sensor[tm_sensor->sensor_hw_num].
+			debug_thr_state_copy.high_th_state = mode;
+		reg_cntl = readl_relaxed(TSENS_TM_UPPER_LOWER_INT_MASK
+						(tmdev->tsens_addr));
+		if (mode == THERMAL_TRIP_ACTIVATION_DISABLED)
+			writel_relaxed(reg_cntl |
+				(TSENS_TM_UPPER_INT_SET(mask)),
+				(TSENS_TM_UPPER_LOWER_INT_MASK
+				(tmdev->tsens_addr)));
+		else
+			writel_relaxed(reg_cntl &
+				~(TSENS_TM_UPPER_INT_SET(mask)),
+				(TSENS_TM_UPPER_LOWER_INT_MASK
+				(tmdev->tsens_addr)));
+		break;
+	case TSENS_TM_TRIP_COOL:
+		tmdev->sensor[tm_sensor->sensor_hw_num].
+			debug_thr_state_copy.low_th_state = mode;
+		reg_cntl = readl_relaxed(TSENS_TM_UPPER_LOWER_INT_MASK
+						(tmdev->tsens_addr));
+		if (mode == THERMAL_TRIP_ACTIVATION_DISABLED)
+			writel_relaxed(reg_cntl | (1 << mask),
+			(TSENS_TM_UPPER_LOWER_INT_MASK(tmdev->tsens_addr)));
+		else
+			writel_relaxed(reg_cntl & ~(1 << mask),
+			(TSENS_TM_UPPER_LOWER_INT_MASK(tmdev->tsens_addr)));
+		break;
+	default:
+		rc = -EINVAL;
+	}
+
+	spin_unlock_irqrestore(&tmdev->tsens_upp_low_lock, flags);
+	/* Activate and enable the respective trip threshold setting */
+	mb();
+
+	return rc;
+}
+
+static int tsens_tz_activate_trip_type(struct thermal_zone_device *thermal,
+			int trip, enum thermal_trip_activation_mode mode)
+{
+	struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
+	unsigned int reg_cntl, code, hi_code, lo_code, mask;
+	struct tsens_tm_device *tmdev = NULL;
+
+	if (!tm_sensor || trip < 0)
+		return -EINVAL;
+
+	tmdev = tm_sensor->tm;
+	if (!tmdev)
+		return -EINVAL;
+
+	lo_code = TSENS_THRESHOLD_MIN_CODE;
+	hi_code = TSENS_THRESHOLD_MAX_CODE;
+
+	reg_cntl = readl_relaxed((TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR
+					(tmdev->tsens_addr) +
+					(tm_sensor->sensor_hw_num *
+					TSENS_SN_ADDR_OFFSET)));
+
+	switch (trip) {
+	case TSENS_TRIP_WARM:
+		tmdev->sensor[tm_sensor->sensor_hw_num].
+				debug_thr_state_copy.high_th_state = mode;
+
+		code = (reg_cntl & TSENS_UPPER_THRESHOLD_MASK)
+					>> TSENS_UPPER_THRESHOLD_SHIFT;
+		mask = TSENS_UPPER_STATUS_CLR;
+
+		if (!(reg_cntl & TSENS_LOWER_STATUS_CLR))
+			lo_code = (reg_cntl & TSENS_LOWER_THRESHOLD_MASK);
+		break;
+	case TSENS_TRIP_COOL:
+		tmdev->sensor[tm_sensor->sensor_hw_num].
+				debug_thr_state_copy.low_th_state = mode;
+
+		code = (reg_cntl & TSENS_LOWER_THRESHOLD_MASK);
+		mask = TSENS_LOWER_STATUS_CLR;
+
+		if (!(reg_cntl & TSENS_UPPER_STATUS_CLR))
+			hi_code = (reg_cntl & TSENS_UPPER_THRESHOLD_MASK)
+					>> TSENS_UPPER_THRESHOLD_SHIFT;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (mode == THERMAL_TRIP_ACTIVATION_DISABLED)
+		writel_relaxed(reg_cntl | mask,
+		(TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR(tmdev->tsens_addr) +
+			(tm_sensor->sensor_hw_num * TSENS_SN_ADDR_OFFSET)));
+
+	else
+		writel_relaxed(reg_cntl & ~mask,
+		(TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR(tmdev->tsens_addr) +
+		(tm_sensor->sensor_hw_num * TSENS_SN_ADDR_OFFSET)));
+	/* Enable the thresholds */
+	mb();
+	return 0;
+}
+
+static int tsens_tm_get_trip_temp(struct thermal_zone_device *thermal,
+				   int trip, int *temp)
+{
+	struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
+	int reg_cntl, code_mask;
+	struct tsens_tm_device *tmdev = NULL;
+
+	if (!tm_sensor || trip < 0 || !temp)
+		return -EINVAL;
+
+	tmdev = tm_sensor->tm;
+	if (!tmdev)
+		return -EINVAL;
+
+	switch (trip) {
+	case TSENS_TM_TRIP_CRITICAL:
+		reg_cntl = readl_relaxed((TSENS_TM_SN_CRITICAL_THRESHOLD
+						(tmdev->tsens_addr)) +
+				(tm_sensor->sensor_hw_num *
+				TSENS_SN_ADDR_OFFSET));
+		if (reg_cntl & TSENS_TM_CODE_SIGN_BIT) {
+			/* Sign extension for negative value */
+			code_mask = ~TSENS_TM_CODE_BIT_MASK;
+			reg_cntl |= code_mask;
+		}
+		break;
+	case TSENS_TM_TRIP_WARM:
+		reg_cntl = readl_relaxed((TSENS_TM_UPPER_LOWER_THRESHOLD
+						(tmdev->tsens_addr)) +
+				(tm_sensor->sensor_hw_num *
+				TSENS_SN_ADDR_OFFSET));
+		reg_cntl = TSENS_TM_UPPER_THRESHOLD_VALUE(reg_cntl);
+		if (reg_cntl & TSENS_TM_CODE_SIGN_BIT) {
+			/* Sign extension for negative value */
+			code_mask = ~TSENS_TM_CODE_BIT_MASK;
+			reg_cntl |= code_mask;
+		}
+		break;
+	case TSENS_TM_TRIP_COOL:
+		reg_cntl = readl_relaxed((TSENS_TM_UPPER_LOWER_THRESHOLD
+						(tmdev->tsens_addr)) +
+				(tm_sensor->sensor_hw_num *
+				TSENS_SN_ADDR_OFFSET));
+		reg_cntl = TSENS_TM_LOWER_THRESHOLD_VALUE(reg_cntl);
+		if (reg_cntl & TSENS_TM_CODE_SIGN_BIT) {
+			/* Sign extension for negative value */
+			code_mask = ~TSENS_TM_CODE_BIT_MASK;
+			reg_cntl |= code_mask;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	*temp = reg_cntl;
+
+	return 0;
+}
+
+static int tsens_tz_get_trip_temp(struct thermal_zone_device *thermal,
+				   int trip, int *temp)
+{
+	struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
+	unsigned int reg;
+	int sensor_sw_id = -EINVAL, rc = 0;
+	struct tsens_tm_device *tmdev = NULL;
+
+	if (!tm_sensor || trip < 0 || !temp)
+		return -EINVAL;
+
+	tmdev = tm_sensor->tm;
+	if (!tmdev)
+		return -EINVAL;
+
+	reg = readl_relaxed(TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR
+						(tmdev->tsens_addr) +
+			(tm_sensor->sensor_hw_num * TSENS_SN_ADDR_OFFSET));
+	switch (trip) {
+	case TSENS_TRIP_WARM:
+		reg = (reg & TSENS_UPPER_THRESHOLD_MASK) >>
+				TSENS_UPPER_THRESHOLD_SHIFT;
+		break;
+	case TSENS_TRIP_COOL:
+		reg = (reg & TSENS_LOWER_THRESHOLD_MASK);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	rc = tsens_get_sw_id_mapping_for_controller(tm_sensor->sensor_hw_num,
+							&sensor_sw_id, tmdev);
+	if (rc < 0) {
+		pr_err("tsens mapping index not found\n");
+		return rc;
+	}
+	*temp = tsens_tz_code_to_degc(reg, sensor_sw_id, tmdev);
+
+	return 0;
+}
+
+static int tsens_tz_notify(struct thermal_zone_device *thermal,
+				int count, enum thermal_trip_type type)
+{
+	/* Critical temperature threshold are enabled and will
+	 * shutdown the device once critical thresholds are crossed. */
+	pr_debug("%s debug\n", __func__);
+	return 1;
+}
+
+static int tsens_tm_set_trip_temp(struct thermal_zone_device *thermal,
+				   int trip, int temp)
+{
+	struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
+	unsigned int reg_cntl;
+	unsigned long flags;
+	struct tsens_tm_device *tmdev = NULL;
+	int rc = 0;
+
+	if (!tm_sensor || trip < 0)
+		return -EINVAL;
+
+	tmdev = tm_sensor->tm;
+	if (!tmdev)
+		return -EINVAL;
+
+	spin_lock_irqsave(&tmdev->tsens_upp_low_lock, flags);
+	switch (trip) {
+	case TSENS_TM_TRIP_CRITICAL:
+		tmdev->sensor[tm_sensor->sensor_hw_num].
+				debug_thr_state_copy.crit_temp = temp;
+		temp &= TSENS_TM_SN_CRITICAL_THRESHOLD_MASK;
+		writel_relaxed(temp,
+			(TSENS_TM_SN_CRITICAL_THRESHOLD(tmdev->tsens_addr) +
+			(tm_sensor->sensor_hw_num * TSENS_SN_ADDR_OFFSET)));
+		break;
+	case TSENS_TM_TRIP_WARM:
+		tmdev->sensor[tm_sensor->sensor_hw_num].
+				debug_thr_state_copy.high_temp = temp;
+		reg_cntl = readl_relaxed((TSENS_TM_UPPER_LOWER_THRESHOLD
+				(tmdev->tsens_addr)) +
+				(tm_sensor->sensor_hw_num *
+				TSENS_SN_ADDR_OFFSET));
+		temp = TSENS_TM_UPPER_THRESHOLD_SET(temp);
+		temp &= TSENS_TM_UPPER_THRESHOLD_MASK;
+		reg_cntl &= ~TSENS_TM_UPPER_THRESHOLD_MASK;
+		writel_relaxed(reg_cntl | temp,
+			(TSENS_TM_UPPER_LOWER_THRESHOLD(tmdev->tsens_addr) +
+			(tm_sensor->sensor_hw_num * TSENS_SN_ADDR_OFFSET)));
+		break;
+	case TSENS_TM_TRIP_COOL:
+		tmdev->sensor[tm_sensor->sensor_hw_num].
+				debug_thr_state_copy.low_temp = temp;
+		reg_cntl = readl_relaxed((TSENS_TM_UPPER_LOWER_THRESHOLD
+				(tmdev->tsens_addr)) +
+				(tm_sensor->sensor_hw_num *
+				TSENS_SN_ADDR_OFFSET));
+		temp &= TSENS_TM_LOWER_THRESHOLD_MASK;
+		reg_cntl &= ~TSENS_TM_LOWER_THRESHOLD_MASK;
+		writel_relaxed(reg_cntl | temp,
+			(TSENS_TM_UPPER_LOWER_THRESHOLD(tmdev->tsens_addr) +
+			(tm_sensor->sensor_hw_num * TSENS_SN_ADDR_OFFSET)));
+		break;
+	default:
+		rc = -EINVAL;
+	}
+
+	spin_unlock_irqrestore(&tmdev->tsens_upp_low_lock, flags);
+	/* Set trip temperature thresholds */
+	mb();
+	return rc;
+}
+
+static int tsens_tz_set_trip_temp(struct thermal_zone_device *thermal,
+				   int trip, int temp)
+{
+	struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
+	unsigned int reg_cntl;
+	int code, hi_code, lo_code, code_err_chk, sensor_sw_id = 0, rc = 0;
+	struct tsens_tm_device *tmdev = NULL;
+
+	if (!tm_sensor || trip < 0)
+		return -EINVAL;
+
+	tmdev = tm_sensor->tm;
+	if (!tmdev)
+		return -EINVAL;
+
+	rc = tsens_get_sw_id_mapping_for_controller(tm_sensor->sensor_hw_num,
+							&sensor_sw_id, tmdev);
+	if (rc < 0) {
+		pr_err("tsens mapping index not found\n");
+		return rc;
+	}
+
+	code_err_chk = code = tsens_tz_degc_to_code(temp, sensor_sw_id, tmdev);
+	if (!tm_sensor || trip < 0)
+		return -EINVAL;
+
+	lo_code = TSENS_THRESHOLD_MIN_CODE;
+	hi_code = TSENS_THRESHOLD_MAX_CODE;
+
+	reg_cntl = readl_relaxed(TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR
+			(tmdev->tsens_addr) + (tm_sensor->sensor_hw_num *
+					TSENS_SN_ADDR_OFFSET));
+	switch (trip) {
+	case TSENS_TRIP_WARM:
+		tmdev->sensor[tm_sensor->sensor_hw_num].
+				debug_thr_state_copy.high_adc_code = code;
+		tmdev->sensor[tm_sensor->sensor_hw_num].
+				debug_thr_state_copy.high_temp = temp;
+		code <<= TSENS_UPPER_THRESHOLD_SHIFT;
+		reg_cntl &= ~TSENS_UPPER_THRESHOLD_MASK;
+		if (!(reg_cntl & TSENS_LOWER_STATUS_CLR))
+			lo_code = (reg_cntl & TSENS_LOWER_THRESHOLD_MASK);
+		break;
+	case TSENS_TRIP_COOL:
+		tmdev->sensor[tm_sensor->sensor_hw_num].
+				debug_thr_state_copy.low_adc_code = code;
+		tmdev->sensor[tm_sensor->sensor_hw_num].
+				debug_thr_state_copy.low_temp = temp;
+		reg_cntl &= ~TSENS_LOWER_THRESHOLD_MASK;
+		if (!(reg_cntl & TSENS_UPPER_STATUS_CLR))
+			hi_code = (reg_cntl & TSENS_UPPER_THRESHOLD_MASK)
+					>> TSENS_UPPER_THRESHOLD_SHIFT;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	writel_relaxed(reg_cntl | code, (TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR
+					(tmdev->tsens_addr) +
+					(tm_sensor->sensor_hw_num *
+					TSENS_SN_ADDR_OFFSET)));
+	/* Activate the set trip temperature thresholds */
+	mb();
+	return 0;
+}
+
+static void tsens_poll(struct work_struct *work)
+{
+	struct tsens_tm_device *tmdev = container_of(work,
+		       struct tsens_tm_device, tsens_critical_poll_test.work);
+	unsigned int reg_cntl, mask, rc = 0, debug_dump, i = 0, loop = 0;
+	unsigned int debug_id = 0, cntrl_id = 0;
+	uint32_t r1, r2, r3, r4, offset = 0, idx = 0;
+	unsigned long temp, flags;
+	unsigned int status, int_mask, int_mask_val;
+	void __iomem *srot_addr;
+	void __iomem *controller_id_addr;
+	void __iomem *debug_id_addr;
+	void __iomem *debug_data_addr;
+	void __iomem *sensor_status_addr;
+	void __iomem *sensor_int_mask_addr;
+	void __iomem *sensor_critical_addr;
+
+	/* Set the Critical temperature threshold to a value of 10 that should
+	 * guarantee a threshold to trigger. Check the interrupt count if
+	 * it did. Schedule the next round of the above test again after
+	 * 3 seconds.
+	 */
+
+	controller_id_addr = TSENS_CONTROLLER_ID(tmdev->tsens_addr);
+	debug_id_addr = TSENS_DEBUG_CONTROL(tmdev->tsens_addr);
+	debug_data_addr = TSENS_DEBUG_DATA(tmdev->tsens_addr);
+	srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_addr);
+
+	temp = TSENS_DEBUG_DECIDEGC;
+	/* Sensor 0 on either of the controllers */
+	mask = 0;
+
+	reinit_completion(&tmdev->tsens_rslt_completion);
+
+	temp &= TSENS_TM_SN_CRITICAL_THRESHOLD_MASK;
+	writel_relaxed(temp,
+			(TSENS_TM_SN_CRITICAL_THRESHOLD(tmdev->tsens_addr) +
+			(mask * TSENS_SN_ADDR_OFFSET)));
+
+	/* debug */
+	idx = tmdev->crit_timestamp_last_run.idx;
+	tmdev->crit_timestamp_last_run.time_stmp[idx%10] = sched_clock();
+	tmdev->crit_timestamp_last_run.idx++;
+	tmdev->qtimer_val_detection_start = arch_counter_get_cntvct();
+
+	spin_lock_irqsave(&tmdev->tsens_crit_lock, flags);
+	/* Clear the sensor0 critical status */
+	int_mask_val = 1;
+	writel_relaxed(int_mask_val,
+		TSENS_TM_CRITICAL_INT_CLEAR(tmdev->tsens_addr));
+	writel_relaxed(0,
+		TSENS_TM_CRITICAL_INT_CLEAR(
+					tmdev->tsens_addr));
+	/* Clear the status */
+	mb();
+	tmdev->crit_set = true;
+	if (!tmdev->tsens_critical_poll) {
+		reg_cntl = readl_relaxed(
+			TSENS_TM_CRITICAL_INT_MASK(tmdev->tsens_addr));
+		writel_relaxed(reg_cntl & ~(1 << mask),
+				(TSENS_TM_CRITICAL_INT_MASK
+				(tmdev->tsens_addr)));
+		/* Enable the critical int mask */
+		mb();
+	}
+	spin_unlock_irqrestore(&tmdev->tsens_crit_lock, flags);
+
+	if (tmdev->tsens_critical_poll) {
+		msleep(TSENS_DEBUG_POLL_MS);
+		sensor_status_addr = TSENS_TM_SN_STATUS(tmdev->tsens_addr);
+
+		spin_lock_irqsave(&tmdev->tsens_crit_lock, flags);
+		status = readl_relaxed(sensor_status_addr);
+		spin_unlock_irqrestore(&tmdev->tsens_crit_lock, flags);
+
+		if (status & TSENS_TM_SN_STATUS_CRITICAL_STATUS)
+			goto re_schedule;
+		else {
+			pr_err("status:0x%x\n", status);
+			goto debug_start;
+		}
+	}
+
+	rc = wait_for_completion_timeout(
+				&tmdev->tsens_rslt_completion,
+				tsens_completion_timeout_hz);
+	if (!rc) {
+		pr_debug("Switch to polling, TSENS critical interrupt failed\n");
+		sensor_status_addr = TSENS_TM_SN_STATUS(tmdev->tsens_addr);
+		sensor_int_mask_addr =
+			TSENS_TM_CRITICAL_INT_MASK(tmdev->tsens_addr);
+		sensor_critical_addr =
+			TSENS_TM_SN_CRITICAL_THRESHOLD(tmdev->tsens_addr);
+
+		spin_lock_irqsave(&tmdev->tsens_crit_lock, flags);
+		if (!tmdev->crit_set) {
+			pr_debug("Ignore this check cycle\n");
+			spin_unlock_irqrestore(&tmdev->tsens_crit_lock, flags);
+			goto re_schedule;
+		}
+		status = readl_relaxed(sensor_status_addr);
+		int_mask = readl_relaxed(sensor_int_mask_addr);
+		tmdev->crit_set = false;
+		spin_unlock_irqrestore(&tmdev->tsens_crit_lock, flags);
+
+		idx = tmdev->crit_timestamp_last_poll_request.idx;
+		tmdev->crit_timestamp_last_poll_request.time_stmp[idx%10] =
+								sched_clock();
+		tmdev->crit_timestamp_last_poll_request.idx++;
+		tmdev->qtimer_val_last_polling_check =
+						arch_counter_get_cntvct();
+		if (status & TSENS_TM_SN_STATUS_CRITICAL_STATUS) {
+
+			spin_lock_irqsave(&tmdev->tsens_crit_lock, flags);
+			int_mask = readl_relaxed(sensor_int_mask_addr);
+			int_mask_val = 1;
+			/* Mask the corresponding interrupt for the sensors */
+			writel_relaxed(int_mask | int_mask_val,
+				TSENS_TM_CRITICAL_INT_MASK(
+					tmdev->tsens_addr));
+			/* Clear the corresponding sensors interrupt */
+			writel_relaxed(int_mask_val,
+				TSENS_TM_CRITICAL_INT_CLEAR(tmdev->tsens_addr));
+			writel_relaxed(0,
+				TSENS_TM_CRITICAL_INT_CLEAR(
+					tmdev->tsens_addr));
+			spin_unlock_irqrestore(&tmdev->tsens_crit_lock, flags);
+
+			/* Clear critical status */
+			mb();
+			goto re_schedule;
+		}
+
+debug_start:
+		cntrl_id = readl_relaxed(controller_id_addr);
+		pr_err("Controller_id: 0x%x\n", cntrl_id);
+
+		loop = 0;
+		i = 0;
+		debug_id = readl_relaxed(debug_id_addr);
+		writel_relaxed((debug_id | (i << 1) | 1),
+				TSENS_DEBUG_CONTROL(tmdev->tsens_addr));
+		while (loop < TSENS_DEBUG_LOOP_COUNT_ID_0) {
+			debug_dump = readl_relaxed(debug_data_addr);
+			r1 = readl_relaxed(debug_data_addr);
+			r2 = readl_relaxed(debug_data_addr);
+			r3 = readl_relaxed(debug_data_addr);
+			r4 = readl_relaxed(debug_data_addr);
+			pr_err("cntrl:%d, bus-id:%d value:0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
+				cntrl_id, i, debug_dump, r1, r2, r3, r4);
+			loop++;
+		}
+
+		for (i = TSENS_DBG_BUS_ID_1; i <= TSENS_DBG_BUS_ID_15; i++) {
+			loop = 0;
+			debug_id = readl_relaxed(debug_id_addr);
+			debug_id = debug_id & TSENS_DEBUG_ID_MASK_1_4;
+			writel_relaxed((debug_id | (i << 1) | 1),
+					TSENS_DEBUG_CONTROL(tmdev->tsens_addr));
+			while (loop < TSENS_DEBUG_LOOP_COUNT) {
+				debug_dump = readl_relaxed(debug_data_addr);
+				pr_err("cntrl:%d, bus-id:%d with value: 0x%x\n",
+					cntrl_id, i, debug_dump);
+				if (i == TSENS_DBG_BUS_ID_2)
+					usleep_range(
+						TSENS_DEBUG_BUS_ID2_MIN_CYCLE,
+						TSENS_DEBUG_BUS_ID2_MAX_CYCLE);
+				loop++;
+			}
+		}
+
+		pr_err("Start of TSENS TM dump\n");
+		for (i = 0; i < TSENS_DEBUG_OFFSET_RANGE; i++) {
+			r1 = readl_relaxed(controller_id_addr + offset);
+			r2 = readl_relaxed(controller_id_addr + (offset +
+						TSENS_DEBUG_OFFSET_WORD1));
+			r3 = readl_relaxed(controller_id_addr +	(offset +
+						TSENS_DEBUG_OFFSET_WORD2));
+			r4 = readl_relaxed(controller_id_addr + (offset +
+						TSENS_DEBUG_OFFSET_WORD3));
+
+			pr_err("ctrl:%d:0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+				cntrl_id, offset, r1, r2, r3, r4);
+			offset += TSENS_DEBUG_OFFSET_ROW;
+		}
+
+		offset = 0;
+		pr_err("Start of TSENS SROT dump\n");
+		for (i = 0; i < TSENS_DEBUG_OFFSET_RANGE; i++) {
+			r1 = readl_relaxed(srot_addr + offset);
+			r2 = readl_relaxed(srot_addr + (offset +
+						TSENS_DEBUG_OFFSET_WORD1));
+			r3 = readl_relaxed(srot_addr + (offset +
+						TSENS_DEBUG_OFFSET_WORD2));
+			r4 = readl_relaxed(srot_addr + (offset +
+						TSENS_DEBUG_OFFSET_WORD3));
+
+			pr_err("ctrl:%d:0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+				cntrl_id, offset, r1, r2, r3, r4);
+			offset += TSENS_DEBUG_OFFSET_ROW;
+		}
+
+		loop = 0;
+		while (loop < TSENS_DEBUG_LOOP_COUNT) {
+			offset = TSENS_DEBUG_OFFSET_ROW *
+					TSENS_DEBUG_STATUS_REG_START;
+			pr_err("Start of TSENS TM dump %d\n", loop);
+			/* Limited dump of the registers for the temperature */
+			for (i = 0; i < TSENS_DEBUG_LOOP_COUNT; i++) {
+				r1 = readl_relaxed(controller_id_addr + offset);
+				r2 = readl_relaxed(controller_id_addr +
+					(offset + TSENS_DEBUG_OFFSET_WORD1));
+				r3 = readl_relaxed(controller_id_addr +
+					(offset + TSENS_DEBUG_OFFSET_WORD2));
+				r4 = readl_relaxed(controller_id_addr +
+					(offset + TSENS_DEBUG_OFFSET_WORD3));
+
+			pr_err("ctrl:%d:0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+				cntrl_id, offset, r1, r2, r3, r4);
+				offset += TSENS_DEBUG_OFFSET_ROW;
+			}
+			loop++;
+			msleep(TSENS_DEBUG_CYCLE_MS);
+		}
+		BUG();
+	}
+
+re_schedule:
+
+	schedule_delayed_work(&tmdev->tsens_critical_poll_test,
+			msecs_to_jiffies(tsens_sec_to_msec_value));
+}
+
+int tsens_mtc_reset_history_counter(unsigned int zone)
+{
+	unsigned int reg_cntl, is_valid;
+	void __iomem *sensor_addr;
+	struct tsens_tm_device *tmdev = NULL;
+
+	if (zone > TSENS_NUM_MTC_ZONES_SUPPORT)
+			return -EINVAL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev) {
+		pr_err("No TSENS controller present\n");
+		return -EPROBE_DEFER;
+	}
+
+	sensor_addr = TSENS_TM_MTC_ZONE0_SW_MASK_ADDR(tmdev->tsens_addr);
+	reg_cntl = readl_relaxed((sensor_addr +
+				(zone * TSENS_SN_ADDR_OFFSET)));
+	is_valid = (reg_cntl & TSENS_RESET_HISTORY_MASK)
+				>> TSENS_RESET_HISTORY_SHIFT;
+	if (!is_valid) {
+		/*Enable the bit to reset counter*/
+		writel_relaxed(reg_cntl | (1 << TSENS_RESET_HISTORY_SHIFT),
+				(sensor_addr + (zone * TSENS_SN_ADDR_OFFSET)));
+		reg_cntl = readl_relaxed((sensor_addr +
+				(zone * TSENS_SN_ADDR_OFFSET)));
+		pr_debug("tsens : zone =%d reg=%x\n", zone , reg_cntl);
+	}
+
+	/*Disble the bit to start counter*/
+	writel_relaxed(reg_cntl & ~(1 << TSENS_RESET_HISTORY_SHIFT),
+				(sensor_addr + (zone * TSENS_SN_ADDR_OFFSET)));
+	reg_cntl = readl_relaxed((sensor_addr +
+			(zone * TSENS_SN_ADDR_OFFSET)));
+	pr_debug("tsens : zone =%d reg=%x\n", zone , reg_cntl);
+
+	return 0;
+}
+EXPORT_SYMBOL(tsens_mtc_reset_history_counter);
+
+int tsens_set_mtc_zone_sw_mask(unsigned int zone , unsigned int th1_enable,
+				unsigned int th2_enable)
+{
+	unsigned int reg_cntl;
+	void __iomem *sensor_addr;
+	struct tsens_tm_device *tmdev = NULL;
+
+	if (zone > TSENS_NUM_MTC_ZONES_SUPPORT)
+			return -EINVAL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev) {
+		pr_err("No TSENS controller present\n");
+		return -EPROBE_DEFER;
+	}
+
+	if (tmdev->tsens_type == TSENS_TYPE3)
+			sensor_addr = TSENS_TM_MTC_ZONE0_SW_MASK_ADDR
+						(tmdev->tsens_addr);
+		else
+			sensor_addr = TSENS_MTC_ZONE0_SW_MASK_ADDR
+						(tmdev->tsens_addr);
+
+	if (th1_enable && th2_enable)
+		writel_relaxed(TSENS_MTC_IN_EFFECT,
+				(sensor_addr +
+				(zone * TSENS_SN_ADDR_OFFSET)));
+	if (!th1_enable && !th2_enable)
+		writel_relaxed(TSENS_MTC_DISABLE,
+				(sensor_addr +
+				(zone * TSENS_SN_ADDR_OFFSET)));
+	if (th1_enable && !th2_enable)
+		writel_relaxed(TSENS_TH1_MTC_IN_EFFECT,
+				(sensor_addr +
+				(zone * TSENS_SN_ADDR_OFFSET)));
+	if (!th1_enable && th2_enable)
+		writel_relaxed(TSENS_TH2_MTC_IN_EFFECT,
+				(sensor_addr +
+				(zone * TSENS_SN_ADDR_OFFSET)));
+	reg_cntl = readl_relaxed((sensor_addr +
+				(zone *	TSENS_SN_ADDR_OFFSET)));
+	pr_debug("tsens : zone =%d th1=%d th2=%d reg=%x\n",
+		zone , th1_enable , th2_enable , reg_cntl);
+
+	return 0;
+}
+EXPORT_SYMBOL(tsens_set_mtc_zone_sw_mask);
+
+int tsens_get_mtc_zone_log(unsigned int zone , void *zone_log)
+{
+	unsigned int i , reg_cntl , is_valid , log[TSENS_MTC_ZONE_LOG_SIZE];
+	int *zlog = (int *)zone_log;
+	void __iomem *sensor_addr;
+	struct tsens_tm_device *tmdev = NULL;
+
+	if (zone > TSENS_NUM_MTC_ZONES_SUPPORT)
+		return -EINVAL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev) {
+		pr_err("No TSENS controller present\n");
+		return -EPROBE_DEFER;
+	}
+
+	if (tmdev->tsens_type == TSENS_TYPE3)
+		sensor_addr = TSENS_TM_MTC_ZONE0_LOG(tmdev->tsens_addr);
+	else
+		sensor_addr = TSENS_MTC_ZONE0_LOG(tmdev->tsens_addr);
+
+	reg_cntl = readl_relaxed((sensor_addr +
+				(zone * TSENS_SN_ADDR_OFFSET)));
+	is_valid = (reg_cntl & TSENS_LOGS_VALID_MASK)
+				>> TSENS_LOGS_VALID_SHIFT;
+	if (is_valid) {
+		log[0] = (reg_cntl & TSENS_LOGS_LATEST_MASK);
+		log[1] = (reg_cntl & TSENS_LOGS_LOG1_MASK)
+				  >> TSENS_LOGS_LOG1_SHIFT;
+		log[2] = (reg_cntl & TSENS_LOGS_LOG2_MASK)
+				  >> TSENS_LOGS_LOG2_SHIFT;
+		log[3] = (reg_cntl & TSENS_LOGS_LOG3_MASK)
+				  >> TSENS_LOGS_LOG3_SHIFT;
+		log[4] = (reg_cntl & TSENS_LOGS_LOG4_MASK)
+				  >> TSENS_LOGS_LOG4_SHIFT;
+		log[5] = (reg_cntl & TSENS_LOGS_LOG5_MASK)
+				  >> TSENS_LOGS_LOG5_SHIFT;
+		for (i = 0; i < (TSENS_MTC_ZONE_LOG_SIZE); i++) {
+			*(zlog+i) = log[i];
+			pr_debug("Log[%d]=%d\n", i , log[i]);
+		}
+	} else {
+		pr_debug("tsens: Valid bit disabled\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(tsens_get_mtc_zone_log);
+
+int tsens_get_mtc_zone_history(unsigned int zone , void *zone_hist)
+{
+	unsigned int i, reg_cntl, hist[TSENS_MTC_ZONE_HISTORY_SIZE];
+	int *zhist = (int *)zone_hist;
+	void __iomem *sensor_addr;
+	struct tsens_tm_device *tmdev = NULL;
+
+	if (zone > TSENS_NUM_MTC_ZONES_SUPPORT)
+		return -EINVAL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev) {
+		pr_err("No TSENS controller present\n");
+		return -EPROBE_DEFER;
+	}
+
+	sensor_addr = TSENS_TM_MTC_ZONE0_HISTORY(tmdev->tsens_addr);
+	reg_cntl = readl_relaxed((sensor_addr +
+				(zone * TSENS_SN_ADDR_OFFSET)));
+
+	hist[0] = (reg_cntl & TSENS_PS_COOL_CMD_MASK);
+	hist[1] = (reg_cntl & TSENS_PS_YELLOW_CMD_MASK)
+			  >> TSENS_PS_YELLOW_CMD_SHIFT;
+	hist[2] = (reg_cntl & TSENS_PS_RED_CMD_MASK)
+			  >> TSENS_PS_RED_CMD_SHIFT;
+	for (i = 0; i < (TSENS_MTC_ZONE_HISTORY_SIZE); i++) {
+		*(zhist+i) = hist[i];
+		pr_debug("tsens : %d\n", hist[i]);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(tsens_get_mtc_zone_history);
+
+static struct thermal_zone_device_ops tsens_thermal_zone_ops = {
+	.get_temp = tsens_tz_get_temp,
+	.get_mode = tsens_tz_get_mode,
+	.get_trip_type = tsens_tz_get_trip_type,
+	.activate_trip_type = tsens_tz_activate_trip_type,
+	.get_trip_temp = tsens_tz_get_trip_temp,
+	.set_trip_temp = tsens_tz_set_trip_temp,
+	.notify = tsens_tz_notify,
+};
+
+/* Thermal zone ops for decidegC */
+static struct thermal_zone_device_ops tsens_tm_thermal_zone_ops = {
+	.get_temp = tsens_tz_get_temp,
+	.get_trip_type = tsens_tm_get_trip_type,
+	.activate_trip_type = tsens_tm_activate_trip_type,
+	.get_trip_temp = tsens_tm_get_trip_temp,
+	.set_trip_temp = tsens_tm_set_trip_temp,
+	.notify = tsens_tz_notify,
+};
+
+static irqreturn_t tsens_tm_critical_irq_thread(int irq, void *data)
+{
+	struct tsens_tm_device *tm = data;
+	unsigned int i, status, idx = 0;
+	unsigned long flags;
+	void __iomem *sensor_status_addr;
+	void __iomem *sensor_int_mask_addr;
+	void __iomem *sensor_critical_addr;
+	void __iomem *wd_critical_addr;
+	int sensor_sw_id = -EINVAL, rc = 0;
+	int wd_mask;
+
+	tm->crit_set = false;
+	sensor_status_addr = TSENS_TM_SN_STATUS(tm->tsens_addr);
+	sensor_int_mask_addr =
+		TSENS_TM_CRITICAL_INT_MASK(tm->tsens_addr);
+	sensor_critical_addr =
+		TSENS_TM_SN_CRITICAL_THRESHOLD(tm->tsens_addr);
+	wd_critical_addr =
+		TSENS_TM_CRITICAL_INT_STATUS(tm->tsens_addr);
+
+	if (tm->wd_bark) {
+		wd_mask = readl_relaxed(wd_critical_addr);
+		/*
+		* Check whether the reason for critical interrupt is
+		* because of watchdog
+		*/
+		if (wd_mask & TSENS_TM_CRITICAL_WD_BARK) {
+			/*
+			 * Clear watchdog interrupt and
+			 * increment global wd count
+			 */
+			writel_relaxed(wd_mask | TSENS_TM_CRITICAL_WD_BARK,
+				(TSENS_TM_CRITICAL_INT_CLEAR
+				(tm->tsens_addr)));
+			writel_relaxed(wd_mask & ~(TSENS_TM_CRITICAL_WD_BARK),
+				(TSENS_TM_CRITICAL_INT_CLEAR
+				(tm->tsens_addr)));
+			tm->tsens_critical_wd_cnt++;
+			return IRQ_HANDLED;
+		}
+	}
+
+	for (i = 0; i < tm->tsens_num_sensor; i++) {
+		bool critical_thr = false;
+		int int_mask, int_mask_val;
+		uint32_t addr_offset;
+
+		spin_lock_irqsave(&tm->tsens_crit_lock, flags);
+		addr_offset = tm->sensor[i].sensor_hw_num *
+						TSENS_SN_ADDR_OFFSET;
+		status = readl_relaxed(sensor_status_addr + addr_offset);
+		int_mask = readl_relaxed(sensor_int_mask_addr);
+
+		if ((status & TSENS_TM_SN_STATUS_CRITICAL_STATUS) &&
+			!(int_mask & (1 << tm->sensor[i].sensor_hw_num))) {
+			int_mask = readl_relaxed(sensor_int_mask_addr);
+			int_mask_val = (1 << tm->sensor[i].sensor_hw_num);
+			/* Mask the corresponding interrupt for the sensors */
+			writel_relaxed(int_mask | int_mask_val,
+				TSENS_TM_CRITICAL_INT_MASK(
+					tm->tsens_addr));
+			/* Clear the corresponding sensors interrupt */
+			writel_relaxed(int_mask_val,
+				TSENS_TM_CRITICAL_INT_CLEAR(tm->tsens_addr));
+			writel_relaxed(0,
+				TSENS_TM_CRITICAL_INT_CLEAR(
+					tm->tsens_addr));
+			critical_thr = true;
+			tm->sensor[i].debug_thr_state_copy.
+					crit_th_state = THERMAL_DEVICE_DISABLED;
+		}
+		spin_unlock_irqrestore(&tm->tsens_crit_lock, flags);
+
+		if (critical_thr) {
+			int temp;
+
+			tsens_tz_get_temp(tm->sensor[i].tz_dev, &temp);
+			rc = tsens_get_sw_id_mapping_for_controller(
+					tm->sensor[i].sensor_hw_num,
+					&sensor_sw_id, tm);
+			if (rc < 0)
+				pr_err("tsens mapping index not found\n");
+			pr_debug("sensor:%d trigger temp (%d degC) with count:%d\n",
+				tm->sensor[i].sensor_hw_num,
+				(status & TSENS_TM_SN_LAST_TEMP_MASK),
+				tm->tsens_critical_irq_cnt);
+				tm->tsens_critical_irq_cnt++;
+		}
+	}
+
+	idx = tm->crit_timestamp_last_interrupt_handled.idx;
+	tm->crit_timestamp_last_interrupt_handled.dbg_count[idx%10]++;
+	tm->crit_timestamp_last_interrupt_handled.time_stmp[idx%10] =
+							sched_clock();
+	tm->qtimer_val_last_detection_interrupt = arch_counter_get_cntvct();
+	if (tsens_poll_check)
+		complete(&tm->tsens_rslt_completion);
+	/* Mask critical interrupt */
+	mb();
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t tsens_tm_irq_thread(int irq, void *data)
+{
+	struct tsens_tm_device *tm = data;
+	unsigned int i, status, threshold;
+	unsigned long flags;
+	void __iomem *sensor_status_addr;
+	void __iomem *sensor_int_mask_addr;
+	void __iomem *sensor_upper_lower_addr;
+	int sensor_sw_id = -EINVAL, rc = 0;
+	uint32_t addr_offset;
+
+	sensor_status_addr = TSENS_TM_SN_STATUS(tm->tsens_addr);
+	sensor_int_mask_addr =
+		TSENS_TM_UPPER_LOWER_INT_MASK(tm->tsens_addr);
+	sensor_upper_lower_addr =
+		TSENS_TM_UPPER_LOWER_THRESHOLD(tm->tsens_addr);
+
+	for (i = 0; i < tm->tsens_num_sensor; i++) {
+		bool upper_thr = false, lower_thr = false;
+		int int_mask, int_mask_val = 0;
+
+		spin_lock_irqsave(&tm->tsens_upp_low_lock, flags);
+		addr_offset = tm->sensor[i].sensor_hw_num *
+						TSENS_SN_ADDR_OFFSET;
+		status = readl_relaxed(sensor_status_addr + addr_offset);
+		threshold = readl_relaxed(sensor_upper_lower_addr +
+								addr_offset);
+		int_mask = readl_relaxed(sensor_int_mask_addr);
+
+		if ((status & TSENS_TM_SN_STATUS_UPPER_STATUS) &&
+			!(int_mask &
+				(1 << (tm->sensor[i].sensor_hw_num + 16)))) {
+			int_mask = readl_relaxed(sensor_int_mask_addr);
+			int_mask_val = TSENS_TM_UPPER_INT_SET(
+					tm->sensor[i].sensor_hw_num);
+			/* Mask the corresponding interrupt for the sensors */
+			writel_relaxed(int_mask | int_mask_val,
+				TSENS_TM_UPPER_LOWER_INT_MASK(
+					tm->tsens_addr));
+			/* Clear the corresponding sensors interrupt */
+			writel_relaxed(int_mask_val,
+				TSENS_TM_UPPER_LOWER_INT_CLEAR(
+					tm->tsens_addr));
+			writel_relaxed(0,
+				TSENS_TM_UPPER_LOWER_INT_CLEAR(
+					tm->tsens_addr));
+			upper_thr = true;
+			tm->sensor[i].debug_thr_state_copy.
+					high_th_state = THERMAL_DEVICE_DISABLED;
+		}
+
+		if ((status & TSENS_TM_SN_STATUS_LOWER_STATUS) &&
+			!(int_mask &
+				(1 << tm->sensor[i].sensor_hw_num))) {
+			int_mask = readl_relaxed(sensor_int_mask_addr);
+			int_mask_val = (1 << tm->sensor[i].sensor_hw_num);
+			/* Mask the corresponding interrupt for the sensors */
+			writel_relaxed(int_mask | int_mask_val,
+				TSENS_TM_UPPER_LOWER_INT_MASK(
+					tm->tsens_addr));
+			/* Clear the corresponding sensors interrupt */
+			writel_relaxed(int_mask_val,
+				TSENS_TM_UPPER_LOWER_INT_CLEAR(
+					tm->tsens_addr));
+			writel_relaxed(0,
+				TSENS_TM_UPPER_LOWER_INT_CLEAR(
+					tm->tsens_addr));
+			lower_thr = true;
+			tm->sensor[i].debug_thr_state_copy.
+					low_th_state = THERMAL_DEVICE_DISABLED;
+		}
+		spin_unlock_irqrestore(&tm->tsens_upp_low_lock, flags);
+
+		if (upper_thr || lower_thr) {
+			int temp;
+			enum thermal_trip_type trip =
+					THERMAL_TRIP_CONFIGURABLE_LOW;
+
+			if (upper_thr)
+				trip = THERMAL_TRIP_CONFIGURABLE_HI;
+			tsens_tz_get_temp(tm->sensor[i].tz_dev, &temp);
+			thermal_sensor_trip(tm->sensor[i].tz_dev, trip, temp);
+
+			rc = tsens_get_sw_id_mapping_for_controller(
+					tm->sensor[i].sensor_hw_num,
+					&sensor_sw_id, tm);
+			if (rc < 0)
+				pr_debug("tsens mapping index not found\n");
+			/* Use sensor_client_id for multiple controllers */
+			pr_debug("sensor:%d trigger temp (%d degC)\n",
+				tm->sensor[i].sensor_client_id,
+				(status & TSENS_TM_SN_LAST_TEMP_MASK));
+			if (upper_thr) {
+				trace_tsens_threshold_hit(
+					TSENS_TM_UPPER_THRESHOLD_VALUE(
+						threshold),
+					tm->sensor[i].sensor_client_id);
+				tm->tsens_upper_irq_cnt++;
+			} else {
+				trace_tsens_threshold_clear(
+					TSENS_TM_LOWER_THRESHOLD_VALUE(
+						threshold),
+					tm->sensor[i].sensor_client_id);
+				tm->tsens_lower_irq_cnt++;
+			}
+		}
+	}
+
+	/* Disable monitoring sensor trip threshold for triggered sensor */
+	mb();
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t tsens_irq_thread(int irq, void *data)
+{
+	struct tsens_tm_device *tm = data;
+	unsigned int i, status, threshold;
+	void __iomem *sensor_status_addr;
+	void __iomem *sensor_status_ctrl_addr;
+	int sensor_sw_id = -EINVAL;
+	uint32_t idx = 0;
+
+	if ((tm->tsens_type == TSENS_TYPE2) ||
+			(tm->tsens_type == TSENS_TYPE4))
+		sensor_status_addr = TSENS2_SN_STATUS_ADDR(tm->tsens_addr);
+	else
+		sensor_status_addr = TSENS_S0_STATUS_ADDR(tm->tsens_addr);
+
+	sensor_status_ctrl_addr =
+		TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR(tm->tsens_addr);
+	for (i = 0; i < tm->tsens_num_sensor; i++) {
+		bool upper_thr = false, lower_thr = false;
+		uint32_t addr_offset;
+
+		sensor_sw_id = tm->sensor[i].sensor_sw_id;
+		addr_offset = tm->sensor[i].sensor_hw_num *
+						TSENS_SN_ADDR_OFFSET;
+		status = readl_relaxed(sensor_status_addr + addr_offset);
+		threshold = readl_relaxed(sensor_status_ctrl_addr +
+								addr_offset);
+		if (status & TSENS_SN_STATUS_UPPER_STATUS) {
+			writel_relaxed(threshold | TSENS_UPPER_STATUS_CLR,
+				TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR(
+					tm->tsens_addr + addr_offset));
+			upper_thr = true;
+			tm->sensor[i].debug_thr_state_copy.
+					high_th_state = THERMAL_DEVICE_DISABLED;
+		}
+		if (status & TSENS_SN_STATUS_LOWER_STATUS) {
+			writel_relaxed(threshold | TSENS_LOWER_STATUS_CLR,
+				TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR(
+					tm->tsens_addr + addr_offset));
+			lower_thr = true;
+			tm->sensor[i].debug_thr_state_copy.
+					low_th_state = THERMAL_DEVICE_DISABLED;
+		}
+		if (upper_thr || lower_thr) {
+			int temp;
+			enum thermal_trip_type trip =
+					THERMAL_TRIP_CONFIGURABLE_LOW;
+
+			if (upper_thr)
+				trip = THERMAL_TRIP_CONFIGURABLE_HI;
+			tsens_tz_get_temp(tm->sensor[i].tz_dev, &temp);
+			thermal_sensor_trip(tm->sensor[i].tz_dev, trip, temp);
+
+			pr_debug("sensor:%d trigger temp (%d degC)\n",
+				tm->sensor[i].sensor_hw_num,
+				tsens_tz_code_to_degc((status &
+				TSENS_SN_STATUS_TEMP_MASK),
+				tm->sensor[i].sensor_sw_id, tm));
+			if (upper_thr)
+				trace_tsens_threshold_hit(
+					tsens_tz_code_to_degc((threshold &
+					TSENS_UPPER_THRESHOLD_MASK) >>
+					TSENS_UPPER_THRESHOLD_SHIFT,
+					sensor_sw_id, tm),
+					tm->sensor[i].sensor_hw_num);
+			else
+				trace_tsens_threshold_clear(
+					tsens_tz_code_to_degc((threshold &
+					TSENS_LOWER_THRESHOLD_MASK),
+					sensor_sw_id, tm),
+					tm->sensor[i].sensor_hw_num);
+		}
+	}
+	/* debug */
+	idx = tm->tsens_thread_iq_dbg.idx;
+	tm->tsens_thread_iq_dbg.dbg_count[idx%10]++;
+	tm->tsens_thread_iq_dbg.time_stmp[idx%10] = sched_clock();
+	tm->tsens_thread_iq_dbg.idx++;
+
+	/* Disable monitoring sensor trip threshold for triggered sensor */
+	mb();
+
+	return IRQ_HANDLED;
+}
+
+static int tsens_hw_init(struct tsens_tm_device *tmdev)
+{
+	void __iomem *srot_addr;
+	void __iomem *sensor_int_mask_addr;
+	unsigned int srot_val;
+	int crit_mask;
+
+	if (!tmdev) {
+		pr_err("Invalid tsens device\n");
+		return -EINVAL;
+	}
+
+	if (tmdev->tsens_type == TSENS_TYPE3) {
+		srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_addr + 0x4);
+		srot_val = readl_relaxed(srot_addr);
+		if (!(srot_val & TSENS_EN)) {
+			pr_err("TSENS device is not enabled\n");
+			return -ENODEV;
+		}
+
+		if (tmdev->cycle_compltn_monitor) {
+			sensor_int_mask_addr =
+				TSENS_TM_CRITICAL_INT_MASK(tmdev->tsens_addr);
+			crit_mask = readl_relaxed(sensor_int_mask_addr);
+			writel_relaxed(
+				crit_mask | tmdev->cycle_compltn_monitor_val,
+				(TSENS_TM_CRITICAL_INT_MASK
+				(tmdev->tsens_addr)));
+			/*Update critical cycle monitoring*/
+			mb();
+		}
+		writel_relaxed(TSENS_TM_CRITICAL_INT_EN |
+			TSENS_TM_UPPER_INT_EN | TSENS_TM_LOWER_INT_EN,
+			TSENS_TM_INT_EN(tmdev->tsens_addr));
+	} else
+		writel_relaxed(TSENS_INTERRUPT_EN,
+			TSENS_UPPER_LOWER_INTERRUPT_CTRL(tmdev->tsens_addr));
+
+	return 0;
+}
+
+static int get_device_tree_data(struct platform_device *pdev,
+				struct tsens_tm_device *tmdev)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	struct resource *res_mem = NULL;
+	u32 *tsens_slope_data = NULL, *sensor_id, *client_id;
+	u32 *temp1_calib_offset_factor, *temp2_calib_offset_factor;
+	u32 rc = 0, i, tsens_num_sensors = 0;
+	u32 cycle_monitor = 0, wd_bark = 0;
+	const struct of_device_id *id;
+
+	rc = of_property_read_u32(of_node,
+			"qcom,sensors", &tsens_num_sensors);
+	if (rc) {
+		dev_err(&pdev->dev, "missing sensor number\n");
+		return -ENODEV;
+	}
+
+	if (tsens_num_sensors == 0) {
+		pr_err("No sensors?\n");
+		return -ENODEV;
+	}
+
+	/* TSENS calibration region */
+	tmdev->res_calib_mem = platform_get_resource_byname(pdev,
+				IORESOURCE_MEM, "tsens_eeprom_physical");
+	if (!tmdev->res_calib_mem) {
+		pr_debug("Using controller programmed gain and offset\n");
+		tmdev->gain_offset_programmed = true;
+	} else {
+		tsens_slope_data = devm_kzalloc(&pdev->dev,
+			tsens_num_sensors * sizeof(u32), GFP_KERNEL);
+		if (!tsens_slope_data)
+			return -ENOMEM;
+
+		rc = of_property_read_u32_array(of_node,
+			"qcom,slope", tsens_slope_data, tsens_num_sensors);
+		if (rc) {
+			dev_err(&pdev->dev, "missing property: tsens-slope\n");
+			return rc;
+		};
+	}
+
+	if (!of_match_node(tsens_match, of_node)) {
+		pr_err("Need to read SoC specific fuse map\n");
+		return -ENODEV;
+	}
+
+	id = of_match_node(tsens_match, of_node);
+	if (id == NULL) {
+		pr_err("can not find tsens_match of_node\n");
+		return -ENODEV;
+	}
+
+	if (!tmdev->gain_offset_programmed) {
+		for (i = 0; i < tsens_num_sensors; i++)
+			tmdev->sensor[i].slope_mul_tsens_factor =
+							tsens_slope_data[i];
+		tmdev->tsens_factor = TSENS_SLOPE_FACTOR;
+	}
+
+	tmdev->tsens_num_sensor = tsens_num_sensors;
+	tmdev->calibration_less_mode = of_property_read_bool(of_node,
+				"qcom,calibration-less-mode");
+	tmdev->tsens_local_init = of_property_read_bool(of_node,
+				"qcom,tsens-local-init");
+
+	sensor_id = devm_kzalloc(&pdev->dev,
+		tsens_num_sensors * sizeof(u32), GFP_KERNEL);
+	if (!sensor_id)
+		return -ENOMEM;
+
+	client_id = devm_kzalloc(&pdev->dev,
+		tsens_num_sensors * sizeof(u32), GFP_KERNEL);
+	if (!client_id)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(of_node,
+		"qcom,client-id", client_id, tsens_num_sensors);
+	if (rc) {
+		for (i = 0; i < tsens_num_sensors; i++)
+			tmdev->sensor[i].sensor_client_id = i;
+		pr_debug("Default client id mapping\n");
+	} else {
+		for (i = 0; i < tsens_num_sensors; i++)
+			tmdev->sensor[i].sensor_client_id = client_id[i];
+		pr_debug("Use specified client id mapping\n");
+	}
+
+	rc = of_property_read_u32_array(of_node,
+		"qcom,sensor-id", sensor_id, tsens_num_sensors);
+	if (rc) {
+		pr_debug("Default sensor id mapping\n");
+		for (i = 0; i < tsens_num_sensors; i++) {
+			tmdev->sensor[i].sensor_hw_num = i;
+			tmdev->sensor[i].sensor_sw_id = i;
+		}
+	} else {
+		pr_debug("Use specified sensor id mapping\n");
+		for (i = 0; i < tsens_num_sensors; i++) {
+			tmdev->sensor[i].sensor_hw_num = sensor_id[i];
+			tmdev->sensor[i].sensor_sw_id = i;
+		}
+	}
+
+	rc = of_property_read_u32(of_node,
+			"qcom,cycle-monitor", &cycle_monitor);
+	if (rc) {
+		pr_debug("Default cycle completion monitor\n");
+		tmdev->cycle_compltn_monitor = false;
+	} else {
+		pr_debug("Use specified cycle completion monitor\n");
+		tmdev->cycle_compltn_monitor = true;
+		tmdev->cycle_compltn_monitor_val = cycle_monitor;
+	}
+
+	rc = of_property_read_u32(of_node,
+			"qcom,wd-bark", &wd_bark);
+	if (rc) {
+		pr_debug("Default Watchdog bark\n");
+		tmdev->wd_bark = false;
+	} else {
+		pr_debug("Use specified Watchdog bark\n");
+		tmdev->wd_bark = true;
+		tmdev->wd_bark_val = wd_bark;
+	}
+
+	if (!strcmp(id->compatible, "qcom,msm8996-tsens") ||
+		(!strcmp(id->compatible, "qcom,msm8998-tsens")))
+		tmdev->tsens_type = TSENS_TYPE3;
+	else if (!strcmp(id->compatible, "qcom,msmtitanium-tsens") ||
+		(!strcmp(id->compatible, "qcom,sdm660-tsens")) ||
+		(!strcmp(id->compatible, "qcom,sdm630-tsens")) ||
+		(!strcmp(id->compatible, "qcom,msmhamster-tsens"))) {
+		tmdev->tsens_type = TSENS_TYPE3;
+		tsens_poll_check = 0;
+	} else
+		tmdev->tsens_type = TSENS_TYPE0;
+
+	tmdev->tsens_valid_status_check = of_property_read_bool(of_node,
+				"qcom,valid-status-check");
+	if (!tmdev->tsens_valid_status_check) {
+		if (!strcmp(id->compatible, "qcom,msm8996-tsens") ||
+		(!strcmp(id->compatible, "qcom,msmtitanium-tsens")) ||
+		(!strcmp(id->compatible, "qcom,msm8998-tsens")) ||
+		(!strcmp(id->compatible, "qcom,sdm660-tsens")) ||
+		(!strcmp(id->compatible, "qcom,sdm630-tsens")) ||
+		(!strcmp(id->compatible, "qcom,msmhamster-tsens")))
+			tmdev->tsens_valid_status_check = true;
+	}
+
+	tmdev->tsens_irq = platform_get_irq_byname(pdev,
+					"tsens-upper-lower");
+	if (tmdev->tsens_irq < 0) {
+		pr_err("Invalid Upper/Lower get irq\n");
+		rc = tmdev->tsens_irq;
+		goto fail_tmdev;
+	}
+
+	if (!strcmp(id->compatible, "qcom,msm8996-tsens") ||
+		(!strcmp(id->compatible, "qcom,msm8998-tsens")) ||
+		(!strcmp(id->compatible, "qcom,msmhamster-tsens")) ||
+		(!strcmp(id->compatible, "qcom,sdm660-tsens")) ||
+		(!strcmp(id->compatible, "qcom,sdm630-tsens")) ||
+		(!strcmp(id->compatible, "qcom,msmtitanium-tsens"))) {
+		tmdev->tsens_critical_irq =
+				platform_get_irq_byname(pdev,
+						"tsens-critical");
+		if (tmdev->tsens_critical_irq < 0) {
+			pr_err("Invalid Critical get irq\n");
+			rc = tmdev->tsens_critical_irq;
+			goto fail_tmdev;
+		}
+	}
+
+	temp1_calib_offset_factor = devm_kzalloc(&pdev->dev,
+			tsens_num_sensors * sizeof(u32), GFP_KERNEL);
+	if (!temp1_calib_offset_factor)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(of_node,
+				"qcom,temp1-offset", temp1_calib_offset_factor,
+							tsens_num_sensors);
+	if (rc) {
+		pr_debug("Default temp1-offsets\n");
+		for (i = 0; i < tsens_num_sensors; i++)
+			tmdev->sensor[i].wa_temp1_calib_offset_factor = 0;
+	} else {
+		pr_debug("Use specific temp1-offsets\n");
+		for (i = 0; i < tsens_num_sensors; i++)
+			tmdev->sensor[i].wa_temp1_calib_offset_factor =
+						temp1_calib_offset_factor[i];
+	}
+
+	temp2_calib_offset_factor = devm_kzalloc(&pdev->dev,
+			tsens_num_sensors * sizeof(u32), GFP_KERNEL);
+	if (!temp2_calib_offset_factor)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(of_node,
+				"qcom,temp2-offset", temp2_calib_offset_factor,
+							tsens_num_sensors);
+	if (rc) {
+		pr_debug("Default temp2-offsets\n");
+		for (i = 0; i < tsens_num_sensors; i++)
+			tmdev->sensor[i].wa_temp2_calib_offset_factor = 0;
+	} else {
+		pr_debug("Use specific temp2-offsets\n");
+		for (i = 0; i < tsens_num_sensors; i++)
+			tmdev->sensor[i].wa_temp2_calib_offset_factor =
+						temp2_calib_offset_factor[i];
+	}
+
+	/* TSENS register region */
+	tmdev->res_tsens_mem = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, "tsens_physical");
+	if (!tmdev->res_tsens_mem) {
+		pr_err("Could not get tsens physical address resource\n");
+		rc = -EINVAL;
+		goto fail_tmdev;
+	}
+
+	tmdev->tsens_len = tmdev->res_tsens_mem->end -
+					tmdev->res_tsens_mem->start + 1;
+
+	res_mem = request_mem_region(tmdev->res_tsens_mem->start,
+				tmdev->tsens_len, tmdev->res_tsens_mem->name);
+	if (!res_mem) {
+		pr_err("Request tsens physical memory region failed\n");
+		rc = -EINVAL;
+		goto fail_tmdev;
+	}
+
+	tmdev->tsens_addr = ioremap(res_mem->start, tmdev->tsens_len);
+	if (!tmdev->tsens_addr) {
+		pr_err("Failed to IO map TSENS registers.\n");
+		rc = -EINVAL;
+		goto fail_unmap_tsens_region;
+	}
+
+	if (!tmdev->gain_offset_programmed) {
+		tmdev->calib_len = tmdev->res_calib_mem->end -
+					tmdev->res_calib_mem->start + 1;
+
+		tmdev->tsens_calib_addr = ioremap(tmdev->res_calib_mem->start,
+						tmdev->calib_len);
+		if (!tmdev->tsens_calib_addr) {
+			pr_err("Failed to IO map EEPROM registers.\n");
+			rc = -EINVAL;
+			goto fail_unmap_tsens;
+		}
+	}
+
+	return 0;
+
+fail_unmap_tsens:
+	if (tmdev->tsens_addr)
+		iounmap(tmdev->tsens_addr);
+fail_unmap_tsens_region:
+	if (tmdev->res_tsens_mem)
+		release_mem_region(tmdev->res_tsens_mem->start,
+					tmdev->tsens_len);
+fail_tmdev:
+	platform_set_drvdata(pdev, NULL);
+
+	return rc;
+}
+
+static int tsens_tm_probe(struct platform_device *pdev)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	int rc, i;
+	u32 tsens_num_sensors;
+	struct tsens_tm_device *tmdev = NULL;
+
+	rc = of_property_read_u32(of_node,
+			"qcom,sensors", &tsens_num_sensors);
+	tmdev = devm_kzalloc(&pdev->dev,
+			sizeof(struct tsens_tm_device) +
+			tsens_num_sensors *
+			sizeof(struct tsens_tm_device_sensor),
+			GFP_KERNEL);
+	if (tmdev == NULL) {
+		pr_err("%s: kzalloc() failed.\n", __func__);
+		return -ENOMEM;
+	}
+
+	if (pdev->dev.of_node) {
+		rc = get_device_tree_data(pdev, tmdev);
+		if (rc) {
+			pr_err("Error reading TSENS DT\n");
+			return rc;
+		}
+	} else
+		return -ENODEV;
+
+	tmdev->pdev = pdev;
+
+	tmdev->tsens_critical_wq = alloc_workqueue("tsens_critical_wq",
+							WQ_HIGHPRI, 0);
+	if (!tmdev->tsens_critical_wq) {
+		rc = -ENOMEM;
+		goto fail;
+	}
+
+	rc = tsens_hw_init(tmdev);
+	if (rc)
+		return rc;
+
+	tmdev->prev_reading_avail = true;
+
+	for (i = 0; i < 16; i++)
+		tmdev->sensor_dbg_info[i].idx = 0;
+
+	spin_lock_init(&tmdev->tsens_crit_lock);
+	spin_lock_init(&tmdev->tsens_upp_low_lock);
+	tmdev->is_ready = true;
+
+	list_add_tail(&tmdev->list, &tsens_device_list);
+	platform_set_drvdata(pdev, tmdev);
+
+	rc = create_tsens_mtc_sysfs(pdev);
+	if (rc < 0)
+		pr_debug("Cannot create create_tsens_mtc_sysfs %d\n", rc);
+
+	return 0;
+fail:
+	if (tmdev->tsens_critical_wq)
+		destroy_workqueue(tmdev->tsens_critical_wq);
+	if (tmdev->tsens_calib_addr)
+		iounmap(tmdev->tsens_calib_addr);
+	if (tmdev->tsens_addr)
+		iounmap(tmdev->tsens_addr);
+	if (tmdev->res_tsens_mem)
+		release_mem_region(tmdev->res_tsens_mem->start,
+			tmdev->tsens_len);
+	platform_set_drvdata(pdev, NULL);
+
+	return rc;
+}
+
+static ssize_t tsens_debugfs_read(struct file *file, char __user *ubuf,
+				  size_t count, loff_t *ppos)
+{
+	int nbytes = 0;
+	struct tsens_tm_device *tmdev = NULL;
+
+	list_for_each_entry(tmdev, &tsens_device_list, list) {
+		nbytes += scnprintf(dbg_buff + nbytes, 1024 - nbytes,
+			"TSENS Critical count: %d\n",
+			tmdev->tsens_critical_irq_cnt);
+		nbytes += scnprintf(dbg_buff + nbytes, 1024 - nbytes,
+			"TSENS Upper count: %d\n",
+			tmdev->tsens_upper_irq_cnt);
+		nbytes += scnprintf(dbg_buff + nbytes, 1024 - nbytes,
+			"TSENS Lower count: %d\n",
+			tmdev->tsens_lower_irq_cnt);
+
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+const struct file_operations tsens_stats_ops = {
+	.read = tsens_debugfs_read,
+};
+
+static void tsens_debugfs_init(void)
+{
+	const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
+
+	dent = debugfs_create_dir("tsens", 0);
+	if (IS_ERR(dent)) {
+		pr_err("Error creating TSENS directory\n");
+		return;
+	}
+
+	dfile_stats = debugfs_create_file("stats", read_only_mode, dent,
+					0, &tsens_stats_ops);
+	if (!dfile_stats || IS_ERR(dfile_stats)) {
+		pr_err("Failed to create TSENS folder\n");
+		return;
+	}
+}
+
+static int tsens_thermal_zone_register(struct tsens_tm_device *tmdev)
+{
+	int rc = 0, i = 0;
+
+	if (tmdev == NULL) {
+		pr_err("Invalid tsens instance\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < tmdev->tsens_num_sensor; i++) {
+		char name[18];
+
+		snprintf(name, sizeof(name), "tsens_tz_sensor%d",
+			tmdev->sensor[i].sensor_client_id);
+		tmdev->sensor[i].mode = THERMAL_DEVICE_ENABLED;
+		tmdev->sensor[i].tm = tmdev;
+		if (tmdev->tsens_type == TSENS_TYPE3) {
+			tmdev->sensor[i].tz_dev = thermal_zone_device_register(
+					name, TSENS_TRIP_NUM,
+					TSENS_WRITABLE_TRIPS_MASK,
+					&tmdev->sensor[i],
+					&tsens_tm_thermal_zone_ops, NULL, 0, 0);
+			if (IS_ERR(tmdev->sensor[i].tz_dev)) {
+				pr_err("%s: failed.\n", __func__);
+				rc = -ENODEV;
+				goto fail;
+			}
+		} else {
+			tmdev->sensor[i].tz_dev = thermal_zone_device_register(
+					name, TSENS_TRIP_NUM,
+					TSENS_WRITABLE_TRIPS_MASK,
+					&tmdev->sensor[i],
+					&tsens_thermal_zone_ops, NULL, 0, 0);
+			if (IS_ERR(tmdev->sensor[i].tz_dev)) {
+				pr_err("%s: failed.\n", __func__);
+				rc = -ENODEV;
+				goto fail;
+			}
+		}
+	}
+
+	if (tmdev->tsens_type == TSENS_TYPE3) {
+		rc = request_threaded_irq(tmdev->tsens_irq, NULL,
+				tsens_tm_irq_thread,
+				IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+				"tsens_interrupt", tmdev);
+		if (rc < 0) {
+			pr_err("%s: request_irq FAIL: %d\n", __func__, rc);
+			for (i = 0; i < tmdev->tsens_num_sensor; i++)
+				thermal_zone_device_unregister(
+					tmdev->sensor[i].tz_dev);
+			goto fail;
+		} else {
+			enable_irq_wake(tmdev->tsens_irq);
+		}
+
+		rc = request_threaded_irq(tmdev->tsens_critical_irq, NULL,
+			tsens_tm_critical_irq_thread,
+			IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+			"tsens_critical_interrupt", tmdev);
+		if (rc < 0) {
+			pr_err("%s: request_irq FAIL: %d\n", __func__, rc);
+			for (i = 0; i < tmdev->tsens_num_sensor; i++)
+				thermal_zone_device_unregister(
+					tmdev->sensor[i].tz_dev);
+			goto fail;
+		} else {
+			enable_irq_wake(tmdev->tsens_critical_irq);
+		}
+
+		if (tsens_poll_check) {
+			INIT_DEFERRABLE_WORK(&tmdev->tsens_critical_poll_test,
+								tsens_poll);
+			schedule_delayed_work(&tmdev->tsens_critical_poll_test,
+				msecs_to_jiffies(tsens_sec_to_msec_value));
+			init_completion(&tmdev->tsens_rslt_completion);
+			tmdev->tsens_critical_poll = true;
+		}
+	} else {
+		rc = request_threaded_irq(tmdev->tsens_irq, NULL,
+			tsens_irq_thread, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+			"tsens_interrupt", tmdev);
+		if (rc < 0) {
+			pr_err("%s: request_irq FAIL: %d\n", __func__, rc);
+			for (i = 0; i < tmdev->tsens_num_sensor; i++)
+				thermal_zone_device_unregister(
+					tmdev->sensor[i].tz_dev);
+			goto fail;
+		} else {
+			enable_irq_wake(tmdev->tsens_irq);
+		}
+	}
+
+	return 0;
+fail:
+	if (tmdev->tsens_calib_addr)
+		iounmap(tmdev->tsens_calib_addr);
+	if (tmdev->tsens_addr)
+		iounmap(tmdev->tsens_addr);
+	if (tmdev->res_tsens_mem)
+		release_mem_region(tmdev->res_tsens_mem->start,
+			tmdev->tsens_len);
+	return rc;
+}
+
+static int _tsens_register_thermal(void)
+{
+	struct tsens_tm_device *tmdev = NULL;
+	int rc;
+
+	if (tsens_is_ready() <= 0) {
+		pr_err("%s: TSENS early init not done\n", __func__);
+		return -ENODEV;
+	}
+
+	list_for_each_entry(tmdev, &tsens_device_list, list) {
+		rc = tsens_thermal_zone_register(tmdev);
+		if (rc) {
+			pr_err("Error registering the thermal zone\n");
+			return rc;
+		}
+	}
+
+	tsens_debugfs_init();
+
+	return 0;
+}
+
+static int tsens_tm_remove(struct platform_device *pdev)
+{
+	struct tsens_tm_device *tmdev = platform_get_drvdata(pdev);
+	int i;
+
+	for (i = 0; i < tmdev->tsens_num_sensor; i++)
+		thermal_zone_device_unregister(tmdev->sensor[i].tz_dev);
+	if (tmdev->tsens_calib_addr)
+		iounmap(tmdev->tsens_calib_addr);
+	if (tmdev->tsens_addr)
+		iounmap(tmdev->tsens_addr);
+	if (tmdev->res_tsens_mem)
+		release_mem_region(tmdev->res_tsens_mem->start,
+			tmdev->tsens_len);
+	if (tmdev->tsens_critical_wq)
+		destroy_workqueue(tmdev->tsens_critical_wq);
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static struct platform_driver tsens_tm_driver = {
+	.probe = tsens_tm_probe,
+	.remove = tsens_tm_remove,
+	.driver = {
+		.name = "msm-tsens",
+		.owner = THIS_MODULE,
+		.of_match_table = tsens_match,
+	},
+};
+
+int __init tsens_tm_init_driver(void)
+{
+	return platform_driver_register(&tsens_tm_driver);
+}
+arch_initcall(tsens_tm_init_driver);
+
+static int __init tsens_thermal_register(void)
+{
+	return _tsens_register_thermal();
+}
+module_init(tsens_thermal_register);
+
+static void __exit _tsens_tm_remove(void)
+{
+	platform_driver_unregister(&tsens_tm_driver);
+}
+module_exit(_tsens_tm_remove);
+
+MODULE_ALIAS("platform:" TSENS_DRIVER_NAME);
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/thermal/qpnp-adc-tm.c	2019-01-22 16:16:27.155279478 +0100
@@ -0,0 +1,3386 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/thermal.h>
+#include <linux/platform_device.h>
+
+/* QPNP VADC TM register definition */
+#define QPNP_REVISION3					0x2
+#define QPNP_PERPH_SUBTYPE				0x5
+#define QPNP_PERPH_TYPE2				0x2
+#define QPNP_REVISION_EIGHT_CHANNEL_SUPPORT		2
+#define QPNP_PERPH_SUBTYPE_TWO_CHANNEL_SUPPORT		0x22
+#define QPNP_STATUS1					0x8
+#define QPNP_STATUS1_OP_MODE				4
+#define QPNP_STATUS1_MEAS_INTERVAL_EN_STS		BIT(2)
+#define QPNP_STATUS1_REQ_STS				BIT(1)
+#define QPNP_STATUS1_EOC				BIT(0)
+#define QPNP_STATUS2					0x9
+#define QPNP_STATUS2_CONV_SEQ_STATE			6
+#define QPNP_STATUS2_FIFO_NOT_EMPTY_FLAG		BIT(1)
+#define QPNP_STATUS2_CONV_SEQ_TIMEOUT_STS		BIT(0)
+#define QPNP_CONV_TIMEOUT_ERR				2
+
+#define QPNP_MODE_CTL					0x40
+#define QPNP_OP_MODE_SHIFT				3
+#define QPNP_VREF_XO_THM_FORCE				BIT(2)
+#define QPNP_AMUX_TRIM_EN				BIT(1)
+#define QPNP_ADC_TRIM_EN				BIT(0)
+#define QPNP_EN_CTL1					0x46
+#define QPNP_ADC_TM_EN					BIT(7)
+#define QPNP_BTM_CONV_REQ				0x47
+#define QPNP_ADC_CONV_REQ_EN				BIT(7)
+
+#define QPNP_ADC_CH_SEL_CTL				0x48
+#define QPNP_ADC_DIG_PARAM				0x50
+#define QPNP_ADC_DIG_DEC_RATIO_SEL_SHIFT		3
+#define QPNP_HW_SETTLE_DELAY				0x51
+#define QPNP_CONV_REQ					0x52
+#define QPNP_CONV_REQ_SET				BIT(7)
+#define QPNP_CONV_SEQ_CTL				0x54
+#define QPNP_CONV_SEQ_HOLDOFF_SHIFT			4
+#define QPNP_CONV_SEQ_TRIG_CTL				0x55
+#define QPNP_ADC_TM_MEAS_INTERVAL_CTL			0x57
+#define QPNP_ADC_TM_MEAS_INTERVAL_TIME_SHIFT		0x3
+#define QPNP_ADC_TM_MEAS_INTERVAL_CTL2			0x58
+#define QPNP_ADC_TM_MEAS_INTERVAL_CTL2_SHIFT		0x4
+#define QPNP_ADC_TM_MEAS_INTERVAL_CTL2_MASK		0xf0
+#define QPNP_ADC_TM_MEAS_INTERVAL_CTL3_MASK		0xf
+
+#define QPNP_ADC_MEAS_INTERVAL_OP_CTL			0x59
+#define QPNP_ADC_MEAS_INTERVAL_OP			BIT(7)
+
+#define QPNP_FAST_AVG_CTL				0x5a
+#define QPNP_FAST_AVG_EN				0x5b
+#define QPNP_FAST_AVG_ENABLED				BIT(7)
+
+#define QPNP_M0_LOW_THR_LSB				0x5c
+#define QPNP_M0_LOW_THR_MSB				0x5d
+#define QPNP_M0_HIGH_THR_LSB				0x5e
+#define QPNP_M0_HIGH_THR_MSB				0x5f
+#define QPNP_M1_ADC_CH_SEL_CTL				0x68
+#define QPNP_M1_LOW_THR_LSB				0x69
+#define QPNP_M1_LOW_THR_MSB				0x6a
+#define QPNP_M1_HIGH_THR_LSB				0x6b
+#define QPNP_M1_HIGH_THR_MSB				0x6c
+#define QPNP_M2_ADC_CH_SEL_CTL				0x70
+#define QPNP_M2_LOW_THR_LSB				0x71
+#define QPNP_M2_LOW_THR_MSB				0x72
+#define QPNP_M2_HIGH_THR_LSB				0x73
+#define QPNP_M2_HIGH_THR_MSB				0x74
+#define QPNP_M3_ADC_CH_SEL_CTL				0x78
+#define QPNP_M3_LOW_THR_LSB				0x79
+#define QPNP_M3_LOW_THR_MSB				0x7a
+#define QPNP_M3_HIGH_THR_LSB				0x7b
+#define QPNP_M3_HIGH_THR_MSB				0x7c
+#define QPNP_M4_ADC_CH_SEL_CTL				0x80
+#define QPNP_M4_LOW_THR_LSB				0x81
+#define QPNP_M4_LOW_THR_MSB				0x82
+#define QPNP_M4_HIGH_THR_LSB				0x83
+#define QPNP_M4_HIGH_THR_MSB				0x84
+#define QPNP_M5_ADC_CH_SEL_CTL				0x88
+#define QPNP_M5_LOW_THR_LSB				0x89
+#define QPNP_M5_LOW_THR_MSB				0x8a
+#define QPNP_M5_HIGH_THR_LSB				0x8b
+#define QPNP_M5_HIGH_THR_MSB				0x8c
+#define QPNP_M6_ADC_CH_SEL_CTL				0x90
+#define QPNP_M6_LOW_THR_LSB				0x91
+#define QPNP_M6_LOW_THR_MSB				0x92
+#define QPNP_M6_HIGH_THR_LSB				0x93
+#define QPNP_M6_HIGH_THR_MSB				0x94
+#define QPNP_M7_ADC_CH_SEL_CTL				0x98
+#define QPNP_M7_LOW_THR_LSB				0x99
+#define QPNP_M7_LOW_THR_MSB				0x9a
+#define QPNP_M7_HIGH_THR_LSB				0x9b
+#define QPNP_M7_HIGH_THR_MSB				0x9c
+
+#define QPNP_ADC_TM_MULTI_MEAS_EN			0x41
+#define QPNP_ADC_TM_MULTI_MEAS_EN_M0			BIT(0)
+#define QPNP_ADC_TM_MULTI_MEAS_EN_M1			BIT(1)
+#define QPNP_ADC_TM_MULTI_MEAS_EN_M2			BIT(2)
+#define QPNP_ADC_TM_MULTI_MEAS_EN_M3			BIT(3)
+#define QPNP_ADC_TM_MULTI_MEAS_EN_M4			BIT(4)
+#define QPNP_ADC_TM_MULTI_MEAS_EN_M5			BIT(5)
+#define QPNP_ADC_TM_MULTI_MEAS_EN_M6			BIT(6)
+#define QPNP_ADC_TM_MULTI_MEAS_EN_M7			BIT(7)
+#define QPNP_ADC_TM_LOW_THR_INT_EN			0x42
+#define QPNP_ADC_TM_LOW_THR_INT_EN_M0			BIT(0)
+#define QPNP_ADC_TM_LOW_THR_INT_EN_M1			BIT(1)
+#define QPNP_ADC_TM_LOW_THR_INT_EN_M2			BIT(2)
+#define QPNP_ADC_TM_LOW_THR_INT_EN_M3			BIT(3)
+#define QPNP_ADC_TM_LOW_THR_INT_EN_M4			BIT(4)
+#define QPNP_ADC_TM_LOW_THR_INT_EN_M5			BIT(5)
+#define QPNP_ADC_TM_LOW_THR_INT_EN_M6			BIT(6)
+#define QPNP_ADC_TM_LOW_THR_INT_EN_M7			BIT(7)
+#define QPNP_ADC_TM_HIGH_THR_INT_EN			0x43
+#define QPNP_ADC_TM_HIGH_THR_INT_EN_M0			BIT(0)
+#define QPNP_ADC_TM_HIGH_THR_INT_EN_M1			BIT(1)
+#define QPNP_ADC_TM_HIGH_THR_INT_EN_M2			BIT(2)
+#define QPNP_ADC_TM_HIGH_THR_INT_EN_M3			BIT(3)
+#define QPNP_ADC_TM_HIGH_THR_INT_EN_M4			BIT(4)
+#define QPNP_ADC_TM_HIGH_THR_INT_EN_M5			BIT(5)
+#define QPNP_ADC_TM_HIGH_THR_INT_EN_M6			BIT(6)
+#define QPNP_ADC_TM_HIGH_THR_INT_EN_M7			BIT(7)
+
+#define QPNP_ADC_TM_M0_MEAS_INTERVAL_CTL			0x59
+#define QPNP_ADC_TM_M1_MEAS_INTERVAL_CTL			0x6d
+#define QPNP_ADC_TM_M2_MEAS_INTERVAL_CTL			0x75
+#define QPNP_ADC_TM_M3_MEAS_INTERVAL_CTL			0x7d
+#define QPNP_ADC_TM_M4_MEAS_INTERVAL_CTL			0x85
+#define QPNP_ADC_TM_M5_MEAS_INTERVAL_CTL			0x8d
+#define QPNP_ADC_TM_M6_MEAS_INTERVAL_CTL			0x95
+#define QPNP_ADC_TM_M7_MEAS_INTERVAL_CTL			0x9d
+#define QPNP_ADC_TM_STATUS1				0x8
+#define QPNP_ADC_TM_STATUS_LOW				0xa
+#define QPNP_ADC_TM_STATUS_HIGH				0xb
+
+#define QPNP_ADC_TM_M0_LOW_THR				0x5d5c
+#define QPNP_ADC_TM_M0_HIGH_THR				0x5f5e
+#define QPNP_ADC_TM_MEAS_INTERVAL			0x0
+
+#define QPNP_ADC_TM_THR_LSB_MASK(val)			(val & 0xff)
+#define QPNP_ADC_TM_THR_MSB_MASK(val)			((val & 0xff00) >> 8)
+
+#define QPNP_MIN_TIME			2000
+#define QPNP_MAX_TIME			2100
+#define QPNP_RETRY			1000
+
+/* QPNP ADC TM HC start */
+#define QPNP_BTM_HC_STATUS1		0x08
+#define QPNP_BTM_HC_STATUS_LOW		0x0a
+#define QPNP_BTM_HC_STATUS_HIGH		0x0b
+
+#define QPNP_BTM_HC_ADC_DIG_PARAM	0x42
+#define QPNP_BTM_HC_FAST_AVG_CTL	0x43
+#define QPNP_BTM_EN_CTL1		0x46
+#define QPNP_BTM_CONV_REQ		0x47
+
+#define QPNP_BTM_MEAS_INTERVAL_CTL	0x50
+#define QPNP_BTM_MEAS_INTERVAL_CTL2	0x51
+
+#define QPNP_BTM_Mn_ADC_CH_SEL_CTL(n)		((n * 8) + 0x60)
+#define QPNP_BTM_Mn_LOW_THR0(n)			((n * 8) + 0x61)
+#define QPNP_BTM_Mn_LOW_THR1(n)			((n * 8) + 0x62)
+#define QPNP_BTM_Mn_HIGH_THR0(n)		((n * 8) + 0x63)
+#define QPNP_BTM_Mn_HIGH_THR1(n)		((n * 8) + 0x64)
+#define QPNP_BTM_Mn_MEAS_INTERVAL_CTL(n)	((n * 8) + 0x65)
+#define QPNP_BTM_Mn_CTL(n)			((n * 8) + 0x66)
+#define QPNP_BTM_CTL_HW_SETTLE_DELAY_MASK	0xf
+#define QPNP_BTM_CTL_CAL_SEL			0x30
+#define QPNP_BTM_CTL_CAL_SEL_MASK_SHIFT		4
+#define QPNP_BTM_CTL_CAL_VAL			0x40
+
+#define QPNP_BTM_Mn_EN(n)			((n * 8) + 0x67)
+#define QPNP_BTM_Mn_MEAS_EN			BIT(7)
+#define QPNP_BTM_Mn_HIGH_THR_INT_EN		BIT(1)
+#define QPNP_BTM_Mn_LOW_THR_INT_EN		BIT(0)
+
+#define QPNP_BTM_Mn_DATA0(n)			((n * 2) + 0xa0)
+#define QPNP_BTM_Mn_DATA1(n)			((n * 2) + 0xa1)
+
+/* QPNP ADC TM HC end */
+
+struct qpnp_adc_thr_info {
+	u8		status_low;
+	u8		status_high;
+	u8		qpnp_adc_tm_meas_en;
+	u8		adc_tm_low_enable;
+	u8		adc_tm_high_enable;
+	u8		adc_tm_low_thr_set;
+	u8		adc_tm_high_thr_set;
+};
+
+struct qpnp_adc_thr_client_info {
+	struct list_head		list;
+	struct qpnp_adc_tm_btm_param	*btm_param;
+	int32_t				low_thr_requested;
+	int32_t				high_thr_requested;
+	enum qpnp_state_request		state_requested;
+	enum qpnp_state_request		state_req_copy;
+	bool				low_thr_set;
+	bool				high_thr_set;
+	bool				notify_low_thr;
+	bool				notify_high_thr;
+};
+
+struct qpnp_adc_tm_sensor {
+	struct thermal_zone_device	*tz_dev;
+	struct qpnp_adc_tm_chip		*chip;
+	enum thermal_device_mode	mode;
+	uint32_t			sensor_num;
+	enum qpnp_adc_meas_timer_select	timer_select;
+	uint32_t			meas_interval;
+	uint32_t			low_thr;
+	uint32_t			high_thr;
+	uint32_t			btm_channel_num;
+	uint32_t			vadc_channel_num;
+	struct workqueue_struct		*req_wq;
+	struct work_struct		work;
+	bool				thermal_node;
+	uint32_t			scale_type;
+	struct list_head		thr_list;
+	bool				high_thr_triggered;
+	bool				low_thr_triggered;
+};
+
+struct qpnp_adc_tm_chip {
+	struct device			*dev;
+	struct qpnp_adc_drv		*adc;
+	struct list_head		list;
+	bool				adc_tm_initialized;
+	bool				adc_tm_recalib_check;
+	int				max_channels_available;
+	atomic_t			wq_cnt;
+	struct qpnp_vadc_chip		*vadc_dev;
+	struct workqueue_struct		*high_thr_wq;
+	struct workqueue_struct		*low_thr_wq;
+	struct workqueue_struct		*thr_wq;
+	struct work_struct		trigger_high_thr_work;
+	struct work_struct		trigger_low_thr_work;
+	struct work_struct		trigger_thr_work;
+	bool				adc_vote_enable;
+	struct qpnp_adc_thr_info	th_info;
+	bool				adc_tm_hc;
+	struct qpnp_adc_tm_sensor	sensor[0];
+};
+
+LIST_HEAD(qpnp_adc_tm_device_list);
+
+struct qpnp_adc_tm_trip_reg_type {
+	enum qpnp_adc_tm_channel_select	btm_amux_chan;
+	uint16_t			low_thr_lsb_addr;
+	uint16_t			low_thr_msb_addr;
+	uint16_t			high_thr_lsb_addr;
+	uint16_t			high_thr_msb_addr;
+	u8				multi_meas_en;
+	u8				low_thr_int_chan_en;
+	u8				high_thr_int_chan_en;
+	u8				meas_interval_ctl;
+};
+
+static struct qpnp_adc_tm_trip_reg_type adc_tm_data[] = {
+	[QPNP_ADC_TM_CHAN0] = {QPNP_ADC_TM_M0_ADC_CH_SEL_CTL,
+		QPNP_M0_LOW_THR_LSB,
+		QPNP_M0_LOW_THR_MSB, QPNP_M0_HIGH_THR_LSB,
+		QPNP_M0_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M0,
+		QPNP_ADC_TM_LOW_THR_INT_EN_M0, QPNP_ADC_TM_HIGH_THR_INT_EN_M0,
+		QPNP_ADC_TM_M0_MEAS_INTERVAL_CTL},
+	[QPNP_ADC_TM_CHAN1] = {QPNP_ADC_TM_M1_ADC_CH_SEL_CTL,
+		QPNP_M1_LOW_THR_LSB,
+		QPNP_M1_LOW_THR_MSB, QPNP_M1_HIGH_THR_LSB,
+		QPNP_M1_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M1,
+		QPNP_ADC_TM_LOW_THR_INT_EN_M1, QPNP_ADC_TM_HIGH_THR_INT_EN_M1,
+		QPNP_ADC_TM_M1_MEAS_INTERVAL_CTL},
+	[QPNP_ADC_TM_CHAN2] = {QPNP_ADC_TM_M2_ADC_CH_SEL_CTL,
+		QPNP_M2_LOW_THR_LSB,
+		QPNP_M2_LOW_THR_MSB, QPNP_M2_HIGH_THR_LSB,
+		QPNP_M2_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M2,
+		QPNP_ADC_TM_LOW_THR_INT_EN_M2, QPNP_ADC_TM_HIGH_THR_INT_EN_M2,
+		QPNP_ADC_TM_M2_MEAS_INTERVAL_CTL},
+	[QPNP_ADC_TM_CHAN3] = {QPNP_ADC_TM_M3_ADC_CH_SEL_CTL,
+		QPNP_M3_LOW_THR_LSB,
+		QPNP_M3_LOW_THR_MSB, QPNP_M3_HIGH_THR_LSB,
+		QPNP_M3_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M3,
+		QPNP_ADC_TM_LOW_THR_INT_EN_M3, QPNP_ADC_TM_HIGH_THR_INT_EN_M3,
+		QPNP_ADC_TM_M3_MEAS_INTERVAL_CTL},
+	[QPNP_ADC_TM_CHAN4] = {QPNP_ADC_TM_M4_ADC_CH_SEL_CTL,
+		QPNP_M4_LOW_THR_LSB,
+		QPNP_M4_LOW_THR_MSB, QPNP_M4_HIGH_THR_LSB,
+		QPNP_M4_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M4,
+		QPNP_ADC_TM_LOW_THR_INT_EN_M4, QPNP_ADC_TM_HIGH_THR_INT_EN_M4,
+		QPNP_ADC_TM_M4_MEAS_INTERVAL_CTL},
+	[QPNP_ADC_TM_CHAN5] = {QPNP_ADC_TM_M5_ADC_CH_SEL_CTL,
+		QPNP_M5_LOW_THR_LSB,
+		QPNP_M5_LOW_THR_MSB, QPNP_M5_HIGH_THR_LSB,
+		QPNP_M5_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M5,
+		QPNP_ADC_TM_LOW_THR_INT_EN_M5, QPNP_ADC_TM_HIGH_THR_INT_EN_M5,
+		QPNP_ADC_TM_M5_MEAS_INTERVAL_CTL},
+	[QPNP_ADC_TM_CHAN6] = {QPNP_ADC_TM_M6_ADC_CH_SEL_CTL,
+		QPNP_M6_LOW_THR_LSB,
+		QPNP_M6_LOW_THR_MSB, QPNP_M6_HIGH_THR_LSB,
+		QPNP_M6_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M6,
+		QPNP_ADC_TM_LOW_THR_INT_EN_M6, QPNP_ADC_TM_HIGH_THR_INT_EN_M6,
+		QPNP_ADC_TM_M6_MEAS_INTERVAL_CTL},
+	[QPNP_ADC_TM_CHAN7] = {QPNP_ADC_TM_M7_ADC_CH_SEL_CTL,
+		QPNP_M7_LOW_THR_LSB,
+		QPNP_M7_LOW_THR_MSB, QPNP_M7_HIGH_THR_LSB,
+		QPNP_M7_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M7,
+		QPNP_ADC_TM_LOW_THR_INT_EN_M7, QPNP_ADC_TM_HIGH_THR_INT_EN_M7,
+		QPNP_ADC_TM_M7_MEAS_INTERVAL_CTL},
+};
+
+static struct qpnp_adc_tm_reverse_scale_fn adc_tm_rscale_fn[] = {
+	[SCALE_R_VBATT] = {qpnp_adc_vbatt_rscaler},
+	[SCALE_RBATT_THERM] = {qpnp_adc_btm_scaler},
+	[SCALE_R_USB_ID] = {qpnp_adc_usb_scaler},
+	[SCALE_RPMIC_THERM] = {qpnp_adc_scale_millidegc_pmic_voltage_thr},
+	[SCALE_R_SMB_BATT_THERM] = {qpnp_adc_smb_btm_rscaler},
+	[SCALE_R_ABSOLUTE] = {qpnp_adc_absolute_rthr},
+	[SCALE_QRD_SKUH_RBATT_THERM] = {qpnp_adc_qrd_skuh_btm_scaler},
+	[SCALE_QRD_SKUT1_RBATT_THERM] = {qpnp_adc_qrd_skut1_btm_scaler},
+};
+
+static int32_t qpnp_adc_tm_read_reg(struct qpnp_adc_tm_chip *chip,
+					int16_t reg, u8 *data, int len)
+{
+	int rc = 0;
+
+	rc = regmap_bulk_read(chip->adc->regmap, (chip->adc->offset + reg),
+								data, len);
+	if (rc < 0)
+		pr_err("adc-tm read reg %d failed with %d\n", reg, rc);
+
+	return rc;
+}
+
+static int32_t qpnp_adc_tm_write_reg(struct qpnp_adc_tm_chip *chip,
+					int16_t reg, u8 data, int len)
+{
+	int rc = 0;
+	u8 *buf;
+
+	buf = &data;
+
+	rc = regmap_bulk_write(chip->adc->regmap, (chip->adc->offset + reg),
+								buf, len);
+	if (rc < 0)
+		pr_err("adc-tm write reg %d failed with %d\n", reg, rc);
+
+	return rc;
+}
+
+static int32_t qpnp_adc_tm_fast_avg_en(struct qpnp_adc_tm_chip *chip,
+				uint32_t *fast_avg_sample)
+{
+	int rc = 0, version = 0;
+	u8 fast_avg_en = 0;
+
+	version = qpnp_adc_get_revid_version(chip->dev);
+	if (!((version == QPNP_REV_ID_8916_1_0) ||
+		(version == QPNP_REV_ID_8916_1_1) ||
+		(version == QPNP_REV_ID_8916_2_0))) {
+		pr_debug("fast-avg-en not required for this version\n");
+		return rc;
+	}
+
+	fast_avg_en = QPNP_FAST_AVG_ENABLED;
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_FAST_AVG_EN, fast_avg_en, 1);
+	if (rc < 0) {
+		pr_err("adc-tm fast-avg enable err\n");
+		return rc;
+	}
+
+	if (*fast_avg_sample >= 3)
+		*fast_avg_sample = 2;
+
+	return rc;
+}
+
+static int qpnp_adc_tm_check_vreg_vote(struct qpnp_adc_tm_chip *chip)
+{
+	int rc = 0;
+
+	if (!chip->adc_vote_enable) {
+		if (chip->adc->hkadc_ldo && chip->adc->hkadc_ldo_ok) {
+			rc = qpnp_adc_enable_voltage(chip->adc);
+			if (rc) {
+				pr_err("failed enabling VADC LDO\n");
+				return rc;
+			}
+			chip->adc_vote_enable = true;
+		}
+	}
+
+	return rc;
+}
+
+static int32_t qpnp_adc_tm_enable(struct qpnp_adc_tm_chip *chip)
+{
+	int rc = 0;
+	u8 data = 0;
+
+	rc = qpnp_adc_tm_check_vreg_vote(chip);
+	if (rc) {
+		pr_err("ADC TM VREG enable failed:%d\n", rc);
+		return rc;
+	}
+
+	data = QPNP_ADC_TM_EN;
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_EN_CTL1, data, 1);
+	if (rc < 0) {
+		pr_err("adc-tm enable failed\n");
+		return rc;
+	}
+
+	if (chip->adc_tm_hc) {
+		data = QPNP_ADC_CONV_REQ_EN;
+		rc = qpnp_adc_tm_write_reg(chip, QPNP_BTM_CONV_REQ, data, 1);
+		if (rc < 0) {
+			pr_err("adc-tm enable failed\n");
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int32_t qpnp_adc_tm_disable(struct qpnp_adc_tm_chip *chip)
+{
+	u8 data = 0;
+	int rc = 0;
+
+	if (chip->adc_tm_hc) {
+		rc = qpnp_adc_tm_write_reg(chip, QPNP_BTM_CONV_REQ, data, 1);
+		if (rc < 0) {
+			pr_err("adc-tm enable failed\n");
+			return rc;
+		}
+	}
+
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_EN_CTL1, data, 1);
+	if (rc < 0) {
+		pr_err("adc-tm disable failed\n");
+		return rc;
+	}
+
+	return rc;
+}
+
+static int qpnp_adc_tm_is_valid(struct qpnp_adc_tm_chip *chip)
+{
+	struct qpnp_adc_tm_chip *adc_tm_chip = NULL;
+
+	list_for_each_entry(adc_tm_chip, &qpnp_adc_tm_device_list, list)
+		if (chip == adc_tm_chip)
+			return 0;
+
+	return -EINVAL;
+}
+
+static int32_t qpnp_adc_tm_rc_check_channel_en(struct qpnp_adc_tm_chip *chip)
+{
+	u8 adc_tm_ctl = 0, status_low = 0, status_high = 0;
+	int rc = 0, i = 0;
+	bool ldo_en = false;
+
+	for (i = 0; i < chip->max_channels_available; i++) {
+		rc = qpnp_adc_tm_read_reg(chip, QPNP_BTM_Mn_CTL(i),
+							&adc_tm_ctl, 1);
+		if (rc) {
+			pr_err("adc-tm-tm read ctl failed with %d\n", rc);
+			return rc;
+		}
+
+		adc_tm_ctl &= QPNP_BTM_Mn_MEAS_EN;
+		status_low = adc_tm_ctl & QPNP_BTM_Mn_LOW_THR_INT_EN;
+		status_high = adc_tm_ctl & QPNP_BTM_Mn_HIGH_THR_INT_EN;
+
+		/* Enable only if there are pending measurement requests */
+		if ((adc_tm_ctl && status_high) ||
+					(adc_tm_ctl && status_low)) {
+			qpnp_adc_tm_enable(chip);
+			ldo_en = true;
+
+			/* Request conversion */
+			rc = qpnp_adc_tm_write_reg(chip, QPNP_CONV_REQ,
+							QPNP_CONV_REQ_SET, 1);
+			if (rc < 0) {
+				pr_err("adc-tm request conversion failed\n");
+				return rc;
+			}
+		}
+		break;
+	}
+
+	if (!ldo_en) {
+		/* disable the vote if applicable */
+		if (chip->adc_vote_enable && chip->adc->hkadc_ldo &&
+					chip->adc->hkadc_ldo_ok) {
+			qpnp_adc_disable_voltage(chip->adc);
+			chip->adc_vote_enable = false;
+		}
+	}
+
+	return rc;
+}
+
+static int32_t qpnp_adc_tm_enable_if_channel_meas(
+					struct qpnp_adc_tm_chip *chip)
+{
+	u8 adc_tm_meas_en = 0, status_low = 0, status_high = 0;
+	int rc = 0;
+
+	if (chip->adc_tm_hc) {
+		rc = qpnp_adc_tm_rc_check_channel_en(chip);
+		if (rc) {
+			pr_err("adc_tm channel check failed\n");
+			return rc;
+		}
+	} else {
+		/* Check if a measurement request is still required */
+		rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
+							&adc_tm_meas_en, 1);
+		if (rc) {
+			pr_err("read status high failed with %d\n", rc);
+			return rc;
+		}
+
+		rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_LOW_THR_INT_EN,
+							&status_low, 1);
+		if (rc) {
+			pr_err("read status low failed with %d\n", rc);
+			return rc;
+		}
+
+		rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_HIGH_THR_INT_EN,
+							&status_high, 1);
+		if (rc) {
+			pr_err("read status high failed with %d\n", rc);
+			return rc;
+		}
+
+		/* Enable only if there are pending measurement requests */
+		if ((adc_tm_meas_en && status_high) ||
+				(adc_tm_meas_en && status_low)) {
+			qpnp_adc_tm_enable(chip);
+
+			/* Request conversion */
+			rc = qpnp_adc_tm_write_reg(chip, QPNP_CONV_REQ,
+							QPNP_CONV_REQ_SET, 1);
+			if (rc < 0) {
+				pr_err("adc-tm request conversion failed\n");
+				return rc;
+			}
+		} else {
+			/* disable the vote if applicable */
+			if (chip->adc_vote_enable && chip->adc->hkadc_ldo &&
+					chip->adc->hkadc_ldo_ok) {
+				qpnp_adc_disable_voltage(chip->adc);
+				chip->adc_vote_enable = false;
+			}
+		}
+	}
+
+	return rc;
+}
+
+static int32_t qpnp_adc_tm_mode_select(struct qpnp_adc_tm_chip *chip,
+								u8 mode_ctl)
+{
+	int rc;
+
+	mode_ctl |= (QPNP_ADC_TRIM_EN | QPNP_AMUX_TRIM_EN);
+
+	/* VADC_BTM current sets mode to recurring measurements */
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_MODE_CTL, mode_ctl, 1);
+	if (rc < 0)
+		pr_err("adc-tm write mode selection err\n");
+
+	return rc;
+}
+
+static int32_t qpnp_adc_tm_req_sts_check(struct qpnp_adc_tm_chip *chip)
+{
+	u8 status1 = 0, mode_ctl = 0;
+	int rc, count = 0;
+
+	/* Re-enable the peripheral */
+	rc = qpnp_adc_tm_enable(chip);
+	if (rc) {
+		pr_err("adc-tm re-enable peripheral failed\n");
+		return rc;
+	}
+
+	/* The VADC_TM bank needs to be disabled for new conversion request */
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS1, &status1, 1);
+	if (rc) {
+		pr_err("adc-tm read status1 failed\n");
+		return rc;
+	}
+
+	/* Disable the bank if a conversion is occuring */
+	while (status1 & QPNP_STATUS1_REQ_STS) {
+		if (count > QPNP_RETRY) {
+			pr_err("retry error=%d with 0x%x\n", count, status1);
+			break;
+		}
+		/*
+		 * Wait time is based on the optimum sampling rate
+		 * and adding enough time buffer to account for ADC conversions
+		 * occurring on different peripheral banks
+		 */
+		usleep_range(QPNP_MIN_TIME, QPNP_MAX_TIME);
+		rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS1,
+							&status1, 1);
+		if (rc < 0) {
+			pr_err("adc-tm disable failed\n");
+			return rc;
+		}
+		count++;
+	}
+
+	if (!chip->adc_tm_hc) {
+		/* Change the mode back to recurring measurement mode */
+		mode_ctl = ADC_OP_MEASUREMENT_INTERVAL << QPNP_OP_MODE_SHIFT;
+		rc = qpnp_adc_tm_mode_select(chip, mode_ctl);
+		if (rc < 0) {
+			pr_err("adc-tm mode change to recurring failed\n");
+			return rc;
+		}
+	}
+
+	/* Disable the peripheral */
+	rc = qpnp_adc_tm_disable(chip);
+	if (rc < 0) {
+		pr_err("adc-tm peripheral disable failed\n");
+		return rc;
+	}
+
+	return rc;
+}
+
+static int32_t qpnp_adc_tm_get_btm_idx(struct qpnp_adc_tm_chip *chip,
+				uint32_t btm_chan, uint32_t *btm_chan_idx)
+{
+	int rc = 0, i;
+	bool chan_found = false;
+
+	if (!chip->adc_tm_hc) {
+		for (i = 0; i < QPNP_ADC_TM_CHAN_NONE; i++) {
+			if (adc_tm_data[i].btm_amux_chan == btm_chan) {
+				*btm_chan_idx = i;
+				chan_found = true;
+			}
+		}
+	} else {
+		for (i = 0; i < chip->max_channels_available; i++) {
+			if (chip->sensor[i].btm_channel_num == btm_chan) {
+				*btm_chan_idx = i;
+				chan_found = true;
+				break;
+			}
+		}
+	}
+
+	if (!chan_found)
+		return -EINVAL;
+
+	return rc;
+}
+
+static int32_t qpnp_adc_tm_check_revision(struct qpnp_adc_tm_chip *chip,
+							uint32_t btm_chan_num)
+{
+	u8 rev, perph_subtype;
+	int rc = 0;
+
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_REVISION3, &rev, 1);
+	if (rc) {
+		pr_err("adc-tm revision read failed\n");
+		return rc;
+	}
+
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_PERPH_SUBTYPE, &perph_subtype, 1);
+	if (rc) {
+		pr_err("adc-tm perph_subtype read failed\n");
+		return rc;
+	}
+
+	if (perph_subtype == QPNP_PERPH_TYPE2) {
+		if ((rev < QPNP_REVISION_EIGHT_CHANNEL_SUPPORT) &&
+			(btm_chan_num > QPNP_ADC_TM_M4_ADC_CH_SEL_CTL)) {
+			pr_debug("Version does not support more than 5 channels\n");
+			return -EINVAL;
+		}
+	}
+
+	if (perph_subtype == QPNP_PERPH_SUBTYPE_TWO_CHANNEL_SUPPORT) {
+		if (btm_chan_num > QPNP_ADC_TM_M1_ADC_CH_SEL_CTL) {
+			pr_debug("Version does not support more than 2 channels\n");
+			return -EINVAL;
+		}
+	}
+
+	return rc;
+}
+
+static int32_t qpnp_adc_tm_timer_interval_select(
+		struct qpnp_adc_tm_chip *chip, uint32_t btm_chan,
+		struct qpnp_vadc_chan_properties *chan_prop)
+{
+	int rc, chan_idx = 0, i = 0;
+	bool chan_found = false;
+	u8 meas_interval_timer2 = 0, timer_interval_store = 0;
+	uint32_t btm_chan_idx = 0;
+
+	while (i < chip->max_channels_available) {
+		if (chip->sensor[i].btm_channel_num == btm_chan) {
+			chan_idx = i;
+			chan_found = true;
+			i++;
+		} else
+			i++;
+	}
+
+	if (!chan_found) {
+		pr_err("Channel not found\n");
+		return -EINVAL;
+	}
+
+	switch (chip->sensor[chan_idx].timer_select) {
+	case ADC_MEAS_TIMER_SELECT1:
+		if (!chip->adc_tm_hc)
+			rc = qpnp_adc_tm_write_reg(chip,
+				QPNP_ADC_TM_MEAS_INTERVAL_CTL,
+				chip->sensor[chan_idx].meas_interval, 1);
+		else
+			rc = qpnp_adc_tm_write_reg(chip,
+				QPNP_BTM_MEAS_INTERVAL_CTL,
+				chip->sensor[chan_idx].meas_interval, 1);
+		if (rc < 0) {
+			pr_err("timer1 configure failed\n");
+			return rc;
+		}
+	break;
+	case ADC_MEAS_TIMER_SELECT2:
+		/* Thermal channels uses timer2, default to 1 second */
+		if (!chip->adc_tm_hc)
+			rc = qpnp_adc_tm_read_reg(chip,
+				QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
+				&meas_interval_timer2, 1);
+		else
+			rc = qpnp_adc_tm_read_reg(chip,
+				QPNP_BTM_MEAS_INTERVAL_CTL2,
+				&meas_interval_timer2, 1);
+		if (rc < 0) {
+			pr_err("timer2 configure read failed\n");
+			return rc;
+		}
+		timer_interval_store = chip->sensor[chan_idx].meas_interval;
+		timer_interval_store <<= QPNP_ADC_TM_MEAS_INTERVAL_CTL2_SHIFT;
+		timer_interval_store &= QPNP_ADC_TM_MEAS_INTERVAL_CTL2_MASK;
+		meas_interval_timer2 |= timer_interval_store;
+		if (!chip->adc_tm_hc)
+			rc = qpnp_adc_tm_write_reg(chip,
+				QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
+				meas_interval_timer2, 1);
+		else
+			rc = qpnp_adc_tm_write_reg(chip,
+				QPNP_BTM_MEAS_INTERVAL_CTL2,
+				meas_interval_timer2, 1);
+		if (rc < 0) {
+			pr_err("timer2 configure failed\n");
+			return rc;
+		}
+	break;
+	case ADC_MEAS_TIMER_SELECT3:
+		if (!chip->adc_tm_hc)
+			rc = qpnp_adc_tm_read_reg(chip,
+				QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
+				&meas_interval_timer2, 1);
+		else
+			rc = qpnp_adc_tm_read_reg(chip,
+				QPNP_BTM_MEAS_INTERVAL_CTL2,
+				&meas_interval_timer2, 1);
+		if (rc < 0) {
+			pr_err("timer3 read failed\n");
+			return rc;
+		}
+		timer_interval_store = chip->sensor[chan_idx].meas_interval;
+		timer_interval_store &= QPNP_ADC_TM_MEAS_INTERVAL_CTL3_MASK;
+		meas_interval_timer2 |= timer_interval_store;
+		if (!chip->adc_tm_hc)
+			rc = qpnp_adc_tm_write_reg(chip,
+				QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
+				meas_interval_timer2, 1);
+		else
+			rc = qpnp_adc_tm_write_reg(chip,
+				QPNP_BTM_MEAS_INTERVAL_CTL2,
+				meas_interval_timer2, 1);
+		if (rc < 0) {
+			pr_err("timer3 configure failed\n");
+			return rc;
+		}
+	break;
+	default:
+		pr_err("Invalid timer selection\n");
+		return -EINVAL;
+	}
+
+	/* Select the timer to use for the corresponding channel */
+	rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx);
+	if (rc < 0) {
+		pr_err("Invalid btm channel idx\n");
+		return rc;
+	}
+	if (!chip->adc_tm_hc)
+		rc = qpnp_adc_tm_write_reg(chip,
+			adc_tm_data[btm_chan_idx].meas_interval_ctl,
+				chip->sensor[chan_idx].timer_select, 1);
+	else
+		rc = qpnp_adc_tm_write_reg(chip,
+				QPNP_BTM_Mn_MEAS_INTERVAL_CTL(btm_chan_idx),
+				chip->sensor[chan_idx].timer_select, 1);
+	if (rc < 0) {
+		pr_err("TM channel timer configure failed\n");
+		return rc;
+	}
+
+	pr_debug("timer select:%d, timer_value_within_select:%d, channel:%x\n",
+			chip->sensor[chan_idx].timer_select,
+			chip->sensor[chan_idx].meas_interval,
+			btm_chan);
+
+	return rc;
+}
+
+static int32_t qpnp_adc_tm_add_to_list(struct qpnp_adc_tm_chip *chip,
+				uint32_t dt_index,
+				struct qpnp_adc_tm_btm_param *param,
+				struct qpnp_vadc_chan_properties *chan_prop)
+{
+	struct qpnp_adc_thr_client_info *client_info = NULL;
+	bool client_info_exists = false;
+
+	list_for_each_entry(client_info,
+			&chip->sensor[dt_index].thr_list, list) {
+		if (client_info->btm_param == param) {
+			client_info->low_thr_requested = chan_prop->low_thr;
+			client_info->high_thr_requested = chan_prop->high_thr;
+			client_info->state_requested = param->state_request;
+			client_info->state_req_copy = param->state_request;
+			client_info->notify_low_thr = false;
+			client_info->notify_high_thr = false;
+			client_info_exists = true;
+			pr_debug("client found\n");
+		}
+	}
+
+	if (!client_info_exists) {
+		client_info = devm_kzalloc(chip->dev,
+			sizeof(struct qpnp_adc_thr_client_info), GFP_KERNEL);
+		if (!client_info) {
+			pr_err("%s: kzalloc() failed.\n", __func__);
+			return -ENOMEM;
+		}
+
+		pr_debug("new client\n");
+		client_info->btm_param = param;
+		client_info->low_thr_requested = chan_prop->low_thr;
+		client_info->high_thr_requested = chan_prop->high_thr;
+		client_info->state_requested = param->state_request;
+		client_info->state_req_copy = param->state_request;
+
+		list_add_tail(&client_info->list,
+					&chip->sensor[dt_index].thr_list);
+	}
+
+	return 0;
+}
+
+static int32_t qpnp_adc_tm_reg_update(struct qpnp_adc_tm_chip *chip,
+		uint16_t addr, u8 mask, bool state)
+{
+	u8 reg_value = 0;
+	int rc = 0;
+
+	rc = qpnp_adc_tm_read_reg(chip, addr, &reg_value, 1);
+	if (rc < 0) {
+		pr_err("read failed for addr:0x%x\n", addr);
+		return rc;
+	}
+
+	reg_value = reg_value & ~mask;
+	if (state)
+		reg_value |= mask;
+
+	pr_debug("state:%d, reg:0x%x with bits:0x%x and mask:0x%x\n",
+					state, addr, reg_value, ~mask);
+	rc = qpnp_adc_tm_write_reg(chip, addr, reg_value, 1);
+	if (rc < 0) {
+		pr_err("write failed for addr:%x\n", addr);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int32_t qpnp_adc_tm_read_thr_value(struct qpnp_adc_tm_chip *chip,
+			uint32_t btm_chan)
+{
+	int rc = 0;
+	u8 data_lsb = 0, data_msb = 0;
+	uint32_t btm_chan_idx = 0;
+	int32_t low_thr = 0, high_thr = 0;
+
+	if (!chip->adc_tm_hc) {
+		pr_err("Not applicable for VADC HC peripheral\n");
+		return -EINVAL;
+	}
+
+	rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx);
+	if (rc < 0) {
+		pr_err("Invalid btm channel idx\n");
+		return rc;
+	}
+
+	rc = qpnp_adc_tm_read_reg(chip,
+			adc_tm_data[btm_chan_idx].low_thr_lsb_addr,
+			&data_lsb, 1);
+	if (rc < 0) {
+		pr_err("low threshold lsb setting failed\n");
+		return rc;
+	}
+
+	rc = qpnp_adc_tm_read_reg(chip,
+		adc_tm_data[btm_chan_idx].low_thr_msb_addr,
+		&data_msb, 1);
+	if (rc < 0) {
+		pr_err("low threshold msb setting failed\n");
+		return rc;
+	}
+
+	low_thr = (data_msb << 8) | data_lsb;
+
+	rc = qpnp_adc_tm_read_reg(chip,
+		adc_tm_data[btm_chan_idx].high_thr_lsb_addr,
+		&data_lsb, 1);
+	if (rc < 0) {
+		pr_err("high threshold lsb setting failed\n");
+		return rc;
+	}
+
+	rc = qpnp_adc_tm_read_reg(chip,
+		adc_tm_data[btm_chan_idx].high_thr_msb_addr,
+		&data_msb, 1);
+	if (rc < 0) {
+		pr_err("high threshold msb setting failed\n");
+		return rc;
+	}
+
+	high_thr = (data_msb << 8) | data_lsb;
+
+	pr_debug("configured thresholds high:0x%x and low:0x%x\n",
+		high_thr, low_thr);
+
+	return rc;
+}
+
+static int32_t qpnp_adc_tm_thr_update(struct qpnp_adc_tm_chip *chip,
+			uint32_t btm_chan, int32_t high_thr, int32_t low_thr)
+{
+	int rc = 0;
+	uint32_t btm_chan_idx = 0;
+
+	rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx);
+	if (rc < 0) {
+		pr_err("Invalid btm channel idx\n");
+		return rc;
+	}
+
+	if (!chip->adc_tm_hc) {
+		rc = qpnp_adc_tm_write_reg(chip,
+			adc_tm_data[btm_chan_idx].low_thr_lsb_addr,
+			QPNP_ADC_TM_THR_LSB_MASK(low_thr), 1);
+		if (rc < 0) {
+			pr_err("low threshold lsb setting failed\n");
+			return rc;
+		}
+
+		rc = qpnp_adc_tm_write_reg(chip,
+			adc_tm_data[btm_chan_idx].low_thr_msb_addr,
+			QPNP_ADC_TM_THR_MSB_MASK(low_thr), 1);
+		if (rc < 0) {
+			pr_err("low threshold msb setting failed\n");
+			return rc;
+		}
+
+		rc = qpnp_adc_tm_write_reg(chip,
+			adc_tm_data[btm_chan_idx].high_thr_lsb_addr,
+			QPNP_ADC_TM_THR_LSB_MASK(high_thr), 1);
+		if (rc < 0) {
+			pr_err("high threshold lsb setting failed\n");
+			return rc;
+		}
+
+		rc = qpnp_adc_tm_write_reg(chip,
+			adc_tm_data[btm_chan_idx].high_thr_msb_addr,
+			QPNP_ADC_TM_THR_MSB_MASK(high_thr), 1);
+		if (rc < 0)
+			pr_err("high threshold msb setting failed\n");
+	} else {
+		rc = qpnp_adc_tm_write_reg(chip,
+			QPNP_BTM_Mn_LOW_THR0(btm_chan_idx),
+			QPNP_ADC_TM_THR_LSB_MASK(low_thr), 1);
+		if (rc < 0) {
+			pr_err("low threshold lsb setting failed\n");
+			return rc;
+		}
+
+		rc = qpnp_adc_tm_write_reg(chip,
+			QPNP_BTM_Mn_LOW_THR1(btm_chan_idx),
+			QPNP_ADC_TM_THR_MSB_MASK(low_thr), 1);
+		if (rc < 0) {
+			pr_err("low threshold msb setting failed\n");
+			return rc;
+		}
+
+		rc = qpnp_adc_tm_write_reg(chip,
+			QPNP_BTM_Mn_HIGH_THR0(btm_chan_idx),
+			QPNP_ADC_TM_THR_LSB_MASK(high_thr), 1);
+		if (rc < 0) {
+			pr_err("high threshold lsb setting failed\n");
+			return rc;
+		}
+
+		rc = qpnp_adc_tm_write_reg(chip,
+			QPNP_BTM_Mn_HIGH_THR1(btm_chan_idx),
+			QPNP_ADC_TM_THR_MSB_MASK(high_thr), 1);
+		if (rc < 0)
+			pr_err("high threshold msb setting failed\n");
+
+	}
+
+	pr_debug("client requested high:%d and low:%d\n",
+		high_thr, low_thr);
+
+	return rc;
+}
+
+static int32_t qpnp_adc_tm_manage_thresholds(struct qpnp_adc_tm_chip *chip,
+		uint32_t dt_index, uint32_t btm_chan)
+{
+	struct qpnp_adc_thr_client_info *client_info = NULL;
+	struct list_head *thr_list;
+	int high_thr = 0, low_thr = 0, rc = 0;
+
+
+	/*
+	 * high_thr/low_thr starting point and reset the high_thr_set and
+	 * low_thr_set back to reset since the thresholds will be
+	 * recomputed.
+	 */
+	list_for_each(thr_list,
+			&chip->sensor[dt_index].thr_list) {
+		client_info = list_entry(thr_list,
+					struct qpnp_adc_thr_client_info, list);
+		high_thr = client_info->high_thr_requested;
+		low_thr = client_info->low_thr_requested;
+		client_info->high_thr_set = false;
+		client_info->low_thr_set = false;
+	}
+
+	pr_debug("init threshold is high:%d and low:%d\n", high_thr, low_thr);
+
+	/* Find the min of high_thr and max of low_thr */
+	list_for_each(thr_list,
+			&chip->sensor[dt_index].thr_list) {
+		client_info = list_entry(thr_list,
+					struct qpnp_adc_thr_client_info, list);
+		if ((client_info->state_req_copy == ADC_TM_HIGH_THR_ENABLE) ||
+			(client_info->state_req_copy ==
+						ADC_TM_HIGH_LOW_THR_ENABLE))
+			if (client_info->high_thr_requested < high_thr)
+				high_thr = client_info->high_thr_requested;
+
+		if ((client_info->state_req_copy == ADC_TM_LOW_THR_ENABLE) ||
+			(client_info->state_req_copy ==
+						ADC_TM_HIGH_LOW_THR_ENABLE))
+			if (client_info->low_thr_requested > low_thr)
+				low_thr = client_info->low_thr_requested;
+
+		pr_debug("threshold compared is high:%d and low:%d\n",
+				client_info->high_thr_requested,
+				client_info->low_thr_requested);
+		pr_debug("current threshold is high:%d and low:%d\n",
+							high_thr, low_thr);
+	}
+
+	/* Check which of the high_thr and low_thr got set */
+	list_for_each(thr_list,
+			&chip->sensor[dt_index].thr_list) {
+		client_info = list_entry(thr_list,
+					struct qpnp_adc_thr_client_info, list);
+		if ((client_info->state_req_copy == ADC_TM_HIGH_THR_ENABLE) ||
+			(client_info->state_req_copy ==
+						ADC_TM_HIGH_LOW_THR_ENABLE))
+			if (high_thr == client_info->high_thr_requested)
+				client_info->high_thr_set = true;
+
+		if ((client_info->state_req_copy == ADC_TM_LOW_THR_ENABLE) ||
+			(client_info->state_req_copy ==
+						ADC_TM_HIGH_LOW_THR_ENABLE))
+			if (low_thr == client_info->low_thr_requested)
+				client_info->low_thr_set = true;
+	}
+
+	rc = qpnp_adc_tm_thr_update(chip, btm_chan, high_thr, low_thr);
+	if (rc < 0)
+		pr_err("setting chan:%d threshold failed\n", btm_chan);
+
+	pr_debug("threshold written is high:%d and low:%d\n",
+							high_thr, low_thr);
+
+	return 0;
+}
+
+static int32_t qpnp_adc_tm_channel_configure(struct qpnp_adc_tm_chip *chip,
+			uint32_t btm_chan,
+			struct qpnp_vadc_chan_properties *chan_prop,
+			uint32_t amux_channel)
+{
+	int rc = 0, i = 0, chan_idx = 0;
+	bool chan_found = false, high_thr_set = false, low_thr_set = false;
+	u8 sensor_mask = 0;
+	struct qpnp_adc_thr_client_info *client_info = NULL;
+	uint32_t btm_chan_idx = 0;
+
+	while (i < chip->max_channels_available) {
+		if (chip->sensor[i].btm_channel_num == btm_chan) {
+			chan_idx = i;
+			chan_found = true;
+			i++;
+		} else
+			i++;
+	}
+
+	if (!chan_found) {
+		pr_err("Channel not found\n");
+		return -EINVAL;
+	}
+
+	rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx);
+	if (rc < 0) {
+		pr_err("Invalid btm channel idx\n");
+		return rc;
+	}
+
+	sensor_mask = 1 << chan_idx;
+	if (!chip->sensor[chan_idx].thermal_node) {
+		/* Update low and high notification thresholds */
+		rc = qpnp_adc_tm_manage_thresholds(chip, chan_idx,
+				btm_chan);
+		if (rc < 0) {
+			pr_err("setting chan:%d threshold failed\n", btm_chan);
+			return rc;
+		}
+
+		list_for_each_entry(client_info,
+				&chip->sensor[chan_idx].thr_list, list) {
+			if (client_info->high_thr_set == true)
+				high_thr_set = true;
+			if (client_info->low_thr_set == true)
+				low_thr_set = true;
+		}
+
+		if (low_thr_set) {
+			pr_debug("low sensor mask:%x with state:%d\n",
+					sensor_mask, chan_prop->state_request);
+			/* Enable low threshold's interrupt */
+			if (!chip->adc_tm_hc)
+				rc = qpnp_adc_tm_reg_update(chip,
+					QPNP_ADC_TM_LOW_THR_INT_EN,
+					sensor_mask, true);
+			else
+				rc = qpnp_adc_tm_reg_update(chip,
+					QPNP_BTM_Mn_EN(btm_chan_idx),
+					QPNP_BTM_Mn_LOW_THR_INT_EN, true);
+			if (rc < 0) {
+				pr_err("low thr enable err:%d\n", btm_chan);
+				return rc;
+			}
+		}
+
+		if (high_thr_set) {
+			/* Enable high threshold's interrupt */
+			pr_debug("high sensor mask:%x\n", sensor_mask);
+			if (!chip->adc_tm_hc)
+				rc = qpnp_adc_tm_reg_update(chip,
+					QPNP_ADC_TM_HIGH_THR_INT_EN,
+					sensor_mask, true);
+			else
+				rc = qpnp_adc_tm_reg_update(chip,
+					QPNP_BTM_Mn_EN(btm_chan_idx),
+					QPNP_BTM_Mn_HIGH_THR_INT_EN, true);
+			if (rc < 0) {
+				pr_err("high thr enable err:%d\n", btm_chan);
+				return rc;
+			}
+		}
+	}
+
+	/* Enable corresponding BTM channel measurement */
+	if (!chip->adc_tm_hc)
+		rc = qpnp_adc_tm_reg_update(chip,
+			QPNP_ADC_TM_MULTI_MEAS_EN, sensor_mask, true);
+	else
+		rc = qpnp_adc_tm_reg_update(chip, QPNP_BTM_Mn_EN(btm_chan_idx),
+			QPNP_BTM_Mn_MEAS_EN, true);
+	if (rc < 0) {
+		pr_err("multi measurement en failed\n");
+		return rc;
+	}
+
+	return rc;
+}
+
+static int32_t qpnp_adc_tm_hc_configure(struct qpnp_adc_tm_chip *chip,
+			struct qpnp_adc_amux_properties *chan_prop)
+{
+	u8 decimation = 0, fast_avg_ctl = 0;
+	u8 buf[8];
+	int rc = 0;
+	uint32_t btm_chan = 0, cal_type = 0, btm_chan_idx = 0;
+
+	/* Disable bank */
+	rc = qpnp_adc_tm_disable(chip);
+	if (rc)
+		return rc;
+
+	/* Decimation setup */
+	decimation = chan_prop->decimation;
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_BTM_HC_ADC_DIG_PARAM,
+						decimation, 1);
+	if (rc < 0) {
+		pr_err("adc-tm digital parameter setup err\n");
+		return rc;
+	}
+
+	/* Fast averaging setup/enable */
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_BTM_HC_FAST_AVG_CTL,
+						&fast_avg_ctl, 1);
+	if (rc < 0) {
+		pr_err("adc-tm fast-avg enable read err\n");
+		return rc;
+	}
+	fast_avg_ctl |= chan_prop->fast_avg_setup;
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_BTM_HC_FAST_AVG_CTL,
+						fast_avg_ctl, 1);
+	if (rc < 0) {
+		pr_err("adc-tm fast-avg enable write err\n");
+		return rc;
+	}
+
+	/* Read block registers for respective BTM channel */
+	btm_chan = chan_prop->chan_prop->tm_channel_select;
+	rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx);
+	if (rc < 0) {
+		pr_err("Invalid btm channel idx\n");
+		return rc;
+	}
+
+	rc = qpnp_adc_tm_read_reg(chip,
+			QPNP_BTM_Mn_ADC_CH_SEL_CTL(btm_chan_idx), buf, 8);
+	if (rc < 0) {
+		pr_err("qpnp adc configure block read failed\n");
+		return rc;
+	}
+
+	/* Update ADC channel sel */
+	rc = qpnp_adc_tm_write_reg(chip,
+			QPNP_BTM_Mn_ADC_CH_SEL_CTL(btm_chan_idx),
+				chan_prop->amux_channel, 1);
+	if (rc < 0) {
+		pr_err("adc-tm channel amux select failed\n");
+		return rc;
+	}
+
+	/* Manage thresholds */
+	rc = qpnp_adc_tm_channel_configure(chip, btm_chan,
+			chan_prop->chan_prop, chan_prop->amux_channel);
+	if (rc < 0) {
+		pr_err("adc-tm channel threshold configure failed\n");
+		return rc;
+	}
+
+	/* Measurement interval setup */
+	rc = qpnp_adc_tm_timer_interval_select(chip, btm_chan,
+						chan_prop->chan_prop);
+	if (rc < 0) {
+		pr_err("adc-tm timer select failed\n");
+		return rc;
+	}
+
+	/* Set calibration select, hw_settle delay */
+	cal_type |= (chan_prop->calib_type << QPNP_BTM_CTL_CAL_SEL_MASK_SHIFT);
+	buf[6] &= ~QPNP_BTM_CTL_HW_SETTLE_DELAY_MASK;
+	buf[6] |= chan_prop->hw_settle_time;
+	buf[6] &= ~QPNP_BTM_CTL_CAL_SEL;
+	buf[6] |= cal_type;
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_BTM_Mn_CTL(btm_chan_idx),
+								buf[6], 1);
+	if (rc < 0) {
+		pr_err("adc-tm hw-settle, calib sel failed\n");
+		return rc;
+	}
+
+	/* Enable bank */
+	rc = qpnp_adc_tm_enable(chip);
+	if (rc)
+		return rc;
+
+	/* Request conversion */
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_CONV_REQ, QPNP_CONV_REQ_SET, 1);
+	if (rc < 0) {
+		pr_err("adc-tm request conversion failed\n");
+		return rc;
+	}
+
+	return 0;
+}
+
+static int32_t qpnp_adc_tm_configure(struct qpnp_adc_tm_chip *chip,
+			struct qpnp_adc_amux_properties *chan_prop)
+{
+	u8 decimation = 0, op_cntrl = 0, mode_ctl = 0;
+	int rc = 0;
+	uint32_t btm_chan = 0;
+
+	/* Set measurement in single measurement mode */
+	mode_ctl = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT;
+	rc = qpnp_adc_tm_mode_select(chip, mode_ctl);
+	if (rc < 0) {
+		pr_err("adc-tm single mode select failed\n");
+		return rc;
+	}
+
+	/* Disable bank */
+	rc = qpnp_adc_tm_disable(chip);
+	if (rc)
+		return rc;
+
+	/* Check if a conversion is in progress */
+	rc = qpnp_adc_tm_req_sts_check(chip);
+	if (rc < 0) {
+		pr_err("adc-tm req_sts check failed\n");
+		return rc;
+	}
+
+	/* Configure AMUX channel select for the corresponding BTM channel*/
+	btm_chan = chan_prop->chan_prop->tm_channel_select;
+	rc = qpnp_adc_tm_write_reg(chip, btm_chan, chan_prop->amux_channel, 1);
+	if (rc < 0) {
+		pr_err("adc-tm channel selection err\n");
+		return rc;
+	}
+
+	/* Digital paramater setup */
+	decimation |= chan_prop->decimation <<
+				QPNP_ADC_DIG_DEC_RATIO_SEL_SHIFT;
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_DIG_PARAM, decimation, 1);
+	if (rc < 0) {
+		pr_err("adc-tm digital parameter setup err\n");
+		return rc;
+	}
+
+	/* Hardware setting time */
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_HW_SETTLE_DELAY,
+					chan_prop->hw_settle_time, 1);
+	if (rc < 0) {
+		pr_err("adc-tm hw settling time setup err\n");
+		return rc;
+	}
+
+	/* Fast averaging setup/enable */
+	rc = qpnp_adc_tm_fast_avg_en(chip, &chan_prop->fast_avg_setup);
+	if (rc < 0) {
+		pr_err("adc-tm fast-avg enable err\n");
+		return rc;
+	}
+
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_FAST_AVG_CTL,
+				chan_prop->fast_avg_setup, 1);
+	if (rc < 0) {
+		pr_err("adc-tm fast-avg setup err\n");
+		return rc;
+	}
+
+	/* Measurement interval setup */
+	rc = qpnp_adc_tm_timer_interval_select(chip, btm_chan,
+						chan_prop->chan_prop);
+	if (rc < 0) {
+		pr_err("adc-tm timer select failed\n");
+		return rc;
+	}
+
+	/* Channel configuration setup */
+	rc = qpnp_adc_tm_channel_configure(chip, btm_chan,
+			chan_prop->chan_prop, chan_prop->amux_channel);
+	if (rc < 0) {
+		pr_err("adc-tm channel configure failed\n");
+		return rc;
+	}
+
+	/* Recurring interval measurement enable */
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_MEAS_INTERVAL_OP_CTL,
+							&op_cntrl, 1);
+	op_cntrl |= QPNP_ADC_MEAS_INTERVAL_OP;
+	rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_MEAS_INTERVAL_OP_CTL,
+			op_cntrl, true);
+	if (rc < 0) {
+		pr_err("adc-tm meas interval op configure failed\n");
+		return rc;
+	}
+
+	/* Enable bank */
+	rc = qpnp_adc_tm_enable(chip);
+	if (rc)
+		return rc;
+
+	/* Request conversion */
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_CONV_REQ, QPNP_CONV_REQ_SET, 1);
+	if (rc < 0) {
+		pr_err("adc-tm request conversion failed\n");
+		return rc;
+	}
+
+	return 0;
+}
+
+static int qpnp_adc_tm_get_mode(struct thermal_zone_device *thermal,
+			      enum thermal_device_mode *mode)
+{
+	struct qpnp_adc_tm_sensor *adc_tm = thermal->devdata;
+
+	if ((IS_ERR(adc_tm)) || qpnp_adc_tm_check_revision(
+			adc_tm->chip, adc_tm->btm_channel_num))
+		return -EINVAL;
+
+	*mode = adc_tm->mode;
+
+	return 0;
+}
+
+static int qpnp_adc_tm_set_mode(struct thermal_zone_device *thermal,
+			      enum thermal_device_mode mode)
+{
+	struct qpnp_adc_tm_sensor *adc_tm = thermal->devdata;
+	struct qpnp_adc_tm_chip *chip = adc_tm->chip;
+	int rc = 0, channel;
+	u8 sensor_mask = 0, mode_ctl = 0;
+	uint32_t btm_chan_idx = 0, btm_chan = 0;
+
+	if (qpnp_adc_tm_is_valid(chip)) {
+		pr_err("invalid device\n");
+		return -ENODEV;
+	}
+
+	if (qpnp_adc_tm_check_revision(chip, adc_tm->btm_channel_num))
+		return -EINVAL;
+
+	mutex_lock(&chip->adc->adc_lock);
+
+	btm_chan = adc_tm->btm_channel_num;
+	rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx);
+	if (rc < 0) {
+		pr_err("Invalid btm channel idx\n");
+		goto fail;
+	}
+
+	if (mode == THERMAL_DEVICE_ENABLED) {
+		chip->adc->amux_prop->amux_channel =
+					adc_tm->vadc_channel_num;
+		channel = adc_tm->sensor_num;
+		chip->adc->amux_prop->decimation =
+			chip->adc->adc_channels[channel].adc_decimation;
+		chip->adc->amux_prop->hw_settle_time =
+			chip->adc->adc_channels[channel].hw_settle_time;
+		chip->adc->amux_prop->fast_avg_setup =
+			chip->adc->adc_channels[channel].fast_avg_setup;
+		chip->adc->amux_prop->mode_sel =
+			ADC_OP_MEASUREMENT_INTERVAL << QPNP_OP_MODE_SHIFT;
+		chip->adc->amux_prop->chan_prop->low_thr = adc_tm->low_thr;
+		chip->adc->amux_prop->chan_prop->high_thr = adc_tm->high_thr;
+		chip->adc->amux_prop->chan_prop->tm_channel_select =
+			adc_tm->btm_channel_num;
+		chip->adc->amux_prop->calib_type =
+			chip->adc->adc_channels[channel].calib_type;
+
+		if (!chip->adc_tm_hc) {
+			rc = qpnp_adc_tm_configure(chip, chip->adc->amux_prop);
+			if (rc) {
+				pr_err("adc-tm configure failed with %d\n", rc);
+				goto fail;
+			}
+		} else {
+			rc = qpnp_adc_tm_hc_configure(chip,
+							chip->adc->amux_prop);
+			if (rc) {
+				pr_err("hc configure failed with %d\n", rc);
+				goto fail;
+			}
+		}
+	} else if (mode == THERMAL_DEVICE_DISABLED) {
+		sensor_mask = 1 << adc_tm->sensor_num;
+
+		if (!chip->adc_tm_hc) {
+			mode_ctl = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT;
+			rc = qpnp_adc_tm_mode_select(chip, mode_ctl);
+			if (rc < 0) {
+				pr_err("adc-tm single mode select failed\n");
+				goto fail;
+			}
+		}
+
+		/* Disable bank */
+		rc = qpnp_adc_tm_disable(chip);
+		if (rc < 0) {
+			pr_err("adc-tm disable failed\n");
+			goto fail;
+		}
+
+		if (!chip->adc_tm_hc) {
+			/* Check if a conversion is in progress */
+			rc = qpnp_adc_tm_req_sts_check(chip);
+			if (rc < 0) {
+				pr_err("adc-tm req_sts check failed\n");
+				goto fail;
+			}
+
+			rc = qpnp_adc_tm_reg_update(chip,
+				QPNP_ADC_TM_MULTI_MEAS_EN, sensor_mask, false);
+			if (rc < 0) {
+				pr_err("multi measurement update failed\n");
+				goto fail;
+			}
+		} else {
+			rc = qpnp_adc_tm_reg_update(chip,
+				QPNP_BTM_Mn_EN(btm_chan_idx),
+				QPNP_BTM_Mn_MEAS_EN, false);
+			if (rc < 0) {
+				pr_err("multi measurement disable failed\n");
+				goto fail;
+			}
+		}
+
+		rc = qpnp_adc_tm_enable_if_channel_meas(chip);
+		if (rc < 0) {
+			pr_err("re-enabling measurement failed\n");
+			goto fail;
+		}
+	}
+
+	adc_tm->mode = mode;
+
+fail:
+	mutex_unlock(&chip->adc->adc_lock);
+
+	return 0;
+}
+
+static int qpnp_adc_tm_get_trip_type(struct thermal_zone_device *thermal,
+				   int trip, enum thermal_trip_type *type)
+{
+	struct qpnp_adc_tm_sensor *adc_tm = thermal->devdata;
+	struct qpnp_adc_tm_chip *chip = adc_tm->chip;
+
+	if (qpnp_adc_tm_is_valid(chip))
+		return -ENODEV;
+
+	if (qpnp_adc_tm_check_revision(chip, adc_tm->btm_channel_num))
+		return -EINVAL;
+
+	switch (trip) {
+	case ADC_TM_TRIP_HIGH_WARM:
+		*type = THERMAL_TRIP_CONFIGURABLE_HI;
+	break;
+	case ADC_TM_TRIP_LOW_COOL:
+		*type = THERMAL_TRIP_CONFIGURABLE_LOW;
+	break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int qpnp_adc_tm_get_trip_temp(struct thermal_zone_device *thermal,
+				   int trip, int *temp)
+{
+	struct qpnp_adc_tm_sensor *adc_tm_sensor = thermal->devdata;
+	struct qpnp_adc_tm_chip *chip = adc_tm_sensor->chip;
+	int64_t result = 0;
+	u8 trip_cool_thr0, trip_cool_thr1, trip_warm_thr0, trip_warm_thr1;
+	unsigned int reg, rc = 0;
+	uint16_t reg_low_thr_lsb, reg_low_thr_msb;
+	uint16_t reg_high_thr_lsb, reg_high_thr_msb;
+	uint32_t btm_chan_idx = 0, btm_chan = 0;
+
+	if (qpnp_adc_tm_is_valid(chip))
+		return -ENODEV;
+
+	if (qpnp_adc_tm_check_revision(chip, adc_tm_sensor->btm_channel_num))
+		return -EINVAL;
+
+	btm_chan = adc_tm_sensor->btm_channel_num;
+	rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx);
+	if (rc < 0) {
+		pr_err("Invalid btm channel idx\n");
+		return rc;
+	}
+
+	if (!chip->adc_tm_hc) {
+		reg_low_thr_lsb = adc_tm_data[btm_chan_idx].low_thr_lsb_addr;
+		reg_low_thr_msb = adc_tm_data[btm_chan_idx].low_thr_msb_addr;
+		reg_high_thr_lsb = adc_tm_data[btm_chan_idx].high_thr_lsb_addr;
+		reg_high_thr_msb = adc_tm_data[btm_chan_idx].high_thr_msb_addr;
+	} else {
+		reg_low_thr_lsb = QPNP_BTM_Mn_LOW_THR0(btm_chan_idx);
+		reg_low_thr_msb = QPNP_BTM_Mn_LOW_THR1(btm_chan_idx);
+		reg_high_thr_lsb = QPNP_BTM_Mn_HIGH_THR0(btm_chan_idx);
+		reg_high_thr_msb = QPNP_BTM_Mn_HIGH_THR1(btm_chan_idx);
+	}
+
+	switch (trip) {
+	case ADC_TM_TRIP_HIGH_WARM:
+		rc = qpnp_adc_tm_read_reg(chip, reg_low_thr_lsb,
+						&trip_warm_thr0, 1);
+		if (rc) {
+			pr_err("adc-tm low_thr_lsb err\n");
+			return rc;
+		}
+
+		rc = qpnp_adc_tm_read_reg(chip, reg_low_thr_msb,
+						&trip_warm_thr1, 1);
+		if (rc) {
+			pr_err("adc-tm low_thr_msb err\n");
+			return rc;
+		}
+	reg = (trip_warm_thr1 << 8) | trip_warm_thr0;
+	break;
+	case ADC_TM_TRIP_LOW_COOL:
+		rc = qpnp_adc_tm_read_reg(chip, reg_high_thr_lsb,
+						&trip_cool_thr0, 1);
+		if (rc) {
+			pr_err("adc-tm_tm high_thr_lsb err\n");
+			return rc;
+		}
+
+		rc = qpnp_adc_tm_read_reg(chip, reg_high_thr_msb,
+						&trip_cool_thr1, 1);
+		if (rc) {
+			pr_err("adc-tm_tm high_thr_lsb err\n");
+			return rc;
+		}
+	reg = (trip_cool_thr1 << 8) | trip_cool_thr0;
+	break;
+	default:
+		return -EINVAL;
+	}
+
+	rc = qpnp_adc_tm_scale_voltage_therm_pu2(chip->vadc_dev,
+					chip->adc->adc_prop, reg, &result);
+	if (rc < 0) {
+		pr_err("Failed to lookup the therm thresholds\n");
+		return rc;
+	}
+
+	*temp = result;
+
+	return 0;
+}
+
+static int qpnp_adc_tm_set_trip_temp(struct thermal_zone_device *thermal,
+				   int trip, int temp)
+{
+	struct qpnp_adc_tm_sensor *adc_tm = thermal->devdata;
+	struct qpnp_adc_tm_chip *chip = adc_tm->chip;
+	struct qpnp_adc_tm_config tm_config;
+	u8 trip_cool_thr0, trip_cool_thr1, trip_warm_thr0, trip_warm_thr1;
+	uint16_t reg_low_thr_lsb, reg_low_thr_msb;
+	uint16_t reg_high_thr_lsb, reg_high_thr_msb;
+	int rc = 0;
+	uint32_t btm_chan = 0, btm_chan_idx = 0;
+
+	if (qpnp_adc_tm_is_valid(chip))
+		return -ENODEV;
+
+	if (qpnp_adc_tm_check_revision(chip, adc_tm->btm_channel_num))
+		return -EINVAL;
+
+	tm_config.channel = adc_tm->vadc_channel_num;
+	tm_config.high_thr_temp = tm_config.low_thr_temp = 0;
+	switch (trip) {
+	case ADC_TM_TRIP_HIGH_WARM:
+		tm_config.high_thr_temp = temp;
+		break;
+	case ADC_TM_TRIP_LOW_COOL:
+		tm_config.low_thr_temp = temp;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	pr_debug("requested a high - %d and low - %d with trip - %d\n",
+			tm_config.high_thr_temp, tm_config.low_thr_temp, trip);
+	rc = qpnp_adc_tm_scale_therm_voltage_pu2(chip->vadc_dev,
+				chip->adc->adc_prop, &tm_config);
+	if (rc < 0) {
+		pr_err("Failed to lookup the adc-tm thresholds\n");
+		return rc;
+	}
+
+	trip_warm_thr0 = ((tm_config.low_thr_voltage << 24) >> 24);
+	trip_warm_thr1 = ((tm_config.low_thr_voltage << 16) >> 24);
+	trip_cool_thr0 = ((tm_config.high_thr_voltage << 24) >> 24);
+	trip_cool_thr1 = ((tm_config.high_thr_voltage << 16) >> 24);
+
+	pr_debug("low_thr:0x%llx, high_thr:0x%llx\n", tm_config.low_thr_voltage,
+				tm_config.high_thr_voltage);
+
+	btm_chan = adc_tm->btm_channel_num;
+	rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx);
+	if (rc < 0) {
+		pr_err("Invalid btm channel idx\n");
+		return rc;
+	}
+
+	if (!chip->adc_tm_hc) {
+		reg_low_thr_lsb = adc_tm_data[btm_chan_idx].low_thr_lsb_addr;
+		reg_low_thr_msb = adc_tm_data[btm_chan_idx].low_thr_msb_addr;
+		reg_high_thr_lsb = adc_tm_data[btm_chan_idx].high_thr_lsb_addr;
+		reg_high_thr_msb = adc_tm_data[btm_chan_idx].high_thr_msb_addr;
+	} else {
+		reg_low_thr_lsb = QPNP_BTM_Mn_LOW_THR0(btm_chan_idx);
+		reg_low_thr_msb = QPNP_BTM_Mn_LOW_THR1(btm_chan_idx);
+		reg_high_thr_lsb = QPNP_BTM_Mn_HIGH_THR0(btm_chan_idx);
+		reg_high_thr_msb = QPNP_BTM_Mn_HIGH_THR1(btm_chan_idx);
+	}
+
+	switch (trip) {
+	case ADC_TM_TRIP_HIGH_WARM:
+		rc = qpnp_adc_tm_write_reg(chip, reg_low_thr_lsb,
+						trip_cool_thr0, 1);
+		if (rc) {
+			pr_err("adc-tm_tm read threshold err\n");
+			return rc;
+		}
+
+		rc = qpnp_adc_tm_write_reg(chip, reg_low_thr_msb,
+						trip_cool_thr1, 1);
+		if (rc) {
+			pr_err("adc-tm_tm read threshold err\n");
+			return rc;
+		}
+	adc_tm->low_thr = tm_config.high_thr_voltage;
+	break;
+	case ADC_TM_TRIP_LOW_COOL:
+		rc = qpnp_adc_tm_write_reg(chip, reg_high_thr_lsb,
+						trip_warm_thr0, 1);
+		if (rc) {
+			pr_err("adc-tm_tm read threshold err\n");
+			return rc;
+		}
+
+		rc = qpnp_adc_tm_write_reg(chip, reg_high_thr_msb,
+						trip_warm_thr1, 1);
+		if (rc) {
+			pr_err("adc-tm_tm read threshold err\n");
+			return rc;
+		}
+	adc_tm->high_thr = tm_config.low_thr_voltage;
+	break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void notify_battery_therm(struct qpnp_adc_tm_sensor *adc_tm)
+{
+	struct qpnp_adc_thr_client_info *client_info = NULL;
+
+	list_for_each_entry(client_info,
+			&adc_tm->thr_list, list) {
+		/* Batt therm's warm temperature translates to low voltage */
+		if (client_info->notify_low_thr) {
+			/* HIGH_STATE = WARM_TEMP for battery client */
+			client_info->btm_param->threshold_notification(
+			ADC_TM_WARM_STATE, client_info->btm_param->btm_ctx);
+			client_info->notify_low_thr = false;
+		}
+
+		/* Batt therm's cool temperature translates to high voltage */
+		if (client_info->notify_high_thr) {
+			/* LOW_STATE = COOL_TEMP for battery client */
+			client_info->btm_param->threshold_notification(
+			ADC_TM_COOL_STATE, client_info->btm_param->btm_ctx);
+			client_info->notify_high_thr = false;
+		}
+	}
+
+	return;
+}
+
+static void notify_clients(struct qpnp_adc_tm_sensor *adc_tm)
+{
+	struct qpnp_adc_thr_client_info *client_info = NULL;
+
+	list_for_each_entry(client_info,
+			&adc_tm->thr_list, list) {
+		/* For non batt therm clients */
+		if (client_info->notify_low_thr) {
+			if (client_info->btm_param->threshold_notification
+								!= NULL) {
+				pr_debug("notify kernel with low state\n");
+				client_info->btm_param->threshold_notification(
+					ADC_TM_LOW_STATE,
+					client_info->btm_param->btm_ctx);
+				client_info->notify_low_thr = false;
+			}
+		}
+
+		if (client_info->notify_high_thr) {
+			if (client_info->btm_param->threshold_notification
+								!= NULL) {
+				pr_debug("notify kernel with high state\n");
+				client_info->btm_param->threshold_notification(
+					ADC_TM_HIGH_STATE,
+					client_info->btm_param->btm_ctx);
+				client_info->notify_high_thr = false;
+			}
+		}
+	}
+
+	return;
+}
+
+static void notify_adc_tm_fn(struct work_struct *work)
+{
+	struct qpnp_adc_tm_sensor *adc_tm = container_of(work,
+		struct qpnp_adc_tm_sensor, work);
+	struct qpnp_adc_tm_chip *chip = adc_tm->chip;
+
+	if (adc_tm->thermal_node) {
+		sysfs_notify(&adc_tm->tz_dev->device.kobj,
+					NULL, "type");
+		pr_debug("notifying uspace client\n");
+	} else {
+		if (adc_tm->scale_type == SCALE_RBATT_THERM)
+			notify_battery_therm(adc_tm);
+		else
+			notify_clients(adc_tm);
+	}
+
+	atomic_dec(&chip->wq_cnt);
+	return;
+}
+
+static int qpnp_adc_tm_activate_trip_type(struct thermal_zone_device *thermal,
+			int trip, enum thermal_trip_activation_mode mode)
+{
+	struct qpnp_adc_tm_sensor *adc_tm = thermal->devdata;
+	struct qpnp_adc_tm_chip *chip = adc_tm->chip;
+	int rc = 0, sensor_mask = 0;
+	u8 thr_int_en = 0;
+	bool state = false;
+	uint32_t btm_chan_idx = 0, btm_chan = 0;
+
+	if (qpnp_adc_tm_is_valid(chip))
+		return -ENODEV;
+
+	if (qpnp_adc_tm_check_revision(chip, adc_tm->btm_channel_num))
+		return -EINVAL;
+
+	if (mode == THERMAL_TRIP_ACTIVATION_ENABLED)
+		state = true;
+
+	sensor_mask = 1 << adc_tm->sensor_num;
+
+	pr_debug("Sensor number:%x with state:%d\n",
+					adc_tm->sensor_num, state);
+
+	btm_chan = adc_tm->btm_channel_num;
+	rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx);
+	if (rc < 0) {
+		pr_err("Invalid btm channel idx\n");
+		return rc;
+	}
+
+	switch (trip) {
+	case ADC_TM_TRIP_HIGH_WARM:
+		/* low_thr (lower voltage) for higher temp */
+		thr_int_en = adc_tm_data[btm_chan_idx].low_thr_int_chan_en;
+		if (!chip->adc_tm_hc)
+			rc = qpnp_adc_tm_reg_update(chip,
+				QPNP_ADC_TM_LOW_THR_INT_EN,
+				sensor_mask, state);
+		else
+			rc = qpnp_adc_tm_reg_update(chip,
+				QPNP_BTM_Mn_EN(btm_chan_idx),
+				QPNP_BTM_Mn_LOW_THR_INT_EN, state);
+		if (rc)
+			pr_err("channel:%x failed\n", btm_chan);
+	break;
+	case ADC_TM_TRIP_LOW_COOL:
+		/* high_thr (higher voltage) for cooler temp */
+		thr_int_en = adc_tm_data[btm_chan_idx].high_thr_int_chan_en;
+		if (!chip->adc_tm_hc)
+			rc = qpnp_adc_tm_reg_update(chip,
+				QPNP_ADC_TM_HIGH_THR_INT_EN,
+				sensor_mask, state);
+		else
+			rc = qpnp_adc_tm_reg_update(chip,
+				QPNP_BTM_Mn_EN(btm_chan_idx),
+				QPNP_BTM_Mn_HIGH_THR_INT_EN, state);
+		if (rc)
+			pr_err("channel:%x failed\n", btm_chan);
+	break;
+	default:
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int qpnp_adc_tm_recalib_request_check(struct qpnp_adc_tm_chip *chip,
+			int sensor_num, u8 status_high, u8 *notify_check)
+{
+	int rc = 0;
+	u8 sensor_mask = 0, mode_ctl = 0;
+	int32_t old_thr = 0, new_thr = 0;
+	uint32_t channel, btm_chan_num, scale_type;
+	struct qpnp_vadc_result result;
+	struct qpnp_adc_thr_client_info *client_info = NULL;
+	struct list_head *thr_list;
+	bool status = false;
+
+	if (!chip->adc_tm_recalib_check) {
+		*notify_check = 1;
+		return rc;
+	}
+
+	list_for_each(thr_list, &chip->sensor[sensor_num].thr_list) {
+		client_info = list_entry(thr_list,
+				struct qpnp_adc_thr_client_info, list);
+		channel = client_info->btm_param->channel;
+		btm_chan_num = chip->sensor[sensor_num].btm_channel_num;
+		sensor_mask = 1 << sensor_num;
+
+		rc = qpnp_vadc_read(chip->vadc_dev, channel, &result);
+		if (rc < 0) {
+			pr_err("failure to read vadc channel=%d\n",
+					client_info->btm_param->channel);
+			goto fail;
+		}
+		new_thr = result.physical;
+
+		if (status_high)
+			old_thr = client_info->btm_param->high_thr;
+		else
+			old_thr = client_info->btm_param->low_thr;
+
+		if (new_thr > old_thr)
+			status = (status_high) ? true : false;
+		else
+			status = (status_high) ? false : true;
+
+		pr_debug(
+			"recalib:sen=%d, new_thr=%d, new_thr_adc_code=0x%x, old_thr=%d status=%d valid_status=%d\n",
+			sensor_num, new_thr, result.adc_code,
+			old_thr, status_high, status);
+
+		rc = qpnp_adc_tm_read_thr_value(chip, btm_chan_num);
+		if (rc < 0) {
+			pr_err("adc-tm thresholds read failed\n");
+			goto fail;
+		}
+
+		if (status) {
+			*notify_check = 1;
+			pr_debug("Client can be notify\n");
+			return rc;
+		}
+
+		pr_debug("Client can not be notify, restart measurement\n");
+		/* Set measurement in single measurement mode */
+		mode_ctl = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT;
+		rc = qpnp_adc_tm_mode_select(chip, mode_ctl);
+		if (rc < 0) {
+			pr_err("adc-tm single mode select failed\n");
+			goto fail;
+		}
+
+		/* Disable bank */
+		rc = qpnp_adc_tm_disable(chip);
+		if (rc < 0) {
+			pr_err("adc-tm disable failed\n");
+			goto fail;
+		}
+
+		/* Check if a conversion is in progress */
+		rc = qpnp_adc_tm_req_sts_check(chip);
+		if (rc < 0) {
+			pr_err("adc-tm req_sts check failed\n");
+			goto fail;
+		}
+
+		rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_LOW_THR_INT_EN,
+							sensor_mask, false);
+		if (rc < 0) {
+			pr_err("low threshold int write failed\n");
+			goto fail;
+		}
+
+		rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_HIGH_THR_INT_EN,
+							sensor_mask, false);
+		if (rc < 0) {
+			pr_err("high threshold int enable failed\n");
+			goto fail;
+		}
+
+		rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
+							sensor_mask, false);
+		if (rc < 0) {
+			pr_err("multi measurement en failed\n");
+			goto fail;
+		}
+
+		/* restart measurement */
+		scale_type = chip->sensor[sensor_num].scale_type;
+		chip->adc->amux_prop->amux_channel = channel;
+		chip->adc->amux_prop->decimation =
+			chip->adc->adc_channels[sensor_num].adc_decimation;
+		chip->adc->amux_prop->hw_settle_time =
+			chip->adc->adc_channels[sensor_num].hw_settle_time;
+		chip->adc->amux_prop->fast_avg_setup =
+			chip->adc->adc_channels[sensor_num].fast_avg_setup;
+		chip->adc->amux_prop->mode_sel =
+			ADC_OP_MEASUREMENT_INTERVAL << QPNP_OP_MODE_SHIFT;
+		adc_tm_rscale_fn[scale_type].chan(chip->vadc_dev,
+				client_info->btm_param,
+				&chip->adc->amux_prop->chan_prop->low_thr,
+				&chip->adc->amux_prop->chan_prop->high_thr);
+		qpnp_adc_tm_add_to_list(chip, sensor_num,
+				client_info->btm_param,
+				chip->adc->amux_prop->chan_prop);
+		chip->adc->amux_prop->chan_prop->tm_channel_select =
+				chip->sensor[sensor_num].btm_channel_num;
+		chip->adc->amux_prop->chan_prop->state_request =
+				client_info->btm_param->state_request;
+
+		rc = qpnp_adc_tm_configure(chip, chip->adc->amux_prop);
+		if (rc) {
+			pr_err("adc-tm configure failed with %d\n", rc);
+			goto fail;
+		}
+		*notify_check = 0;
+		pr_debug("BTM channel reconfigured for measuremnt\n");
+	}
+fail:
+	return rc;
+}
+
+static int qpnp_adc_tm_disable_rearm_high_thresholds(
+			struct qpnp_adc_tm_chip *chip, int sensor_num)
+{
+
+	struct qpnp_adc_thr_client_info *client_info = NULL;
+	struct list_head *thr_list;
+	uint32_t btm_chan_num = 0;
+	u8 sensor_mask = 0, notify_check = 0;
+	int rc = 0;
+
+	btm_chan_num = chip->sensor[sensor_num].btm_channel_num;
+	pr_debug("high:sen:%d, hs:0x%x, ls:0x%x, meas_en:0x%x\n",
+		sensor_num, chip->th_info.adc_tm_high_enable,
+		chip->th_info.adc_tm_low_enable,
+		chip->th_info.qpnp_adc_tm_meas_en);
+	if (!chip->sensor[sensor_num].thermal_node) {
+		/*
+		 * For non thermal registered clients such as usb_id,
+		 * vbatt, pmic_therm
+		 */
+		sensor_mask = 1 << sensor_num;
+		pr_debug("non thermal node - mask:%x\n", sensor_mask);
+		rc = qpnp_adc_tm_recalib_request_check(chip,
+				sensor_num, true, &notify_check);
+		if (rc < 0 || !notify_check) {
+			pr_debug("Calib recheck re-armed rc=%d\n", rc);
+			chip->th_info.adc_tm_high_enable = 0;
+			return rc;
+		}
+	} else {
+		/*
+		 * Uses the thermal sysfs registered device to disable
+		 * the corresponding high voltage threshold which
+		 * is triggered by low temp
+		 */
+		sensor_mask = 1 << sensor_num;
+		pr_debug("thermal node with mask:%x\n", sensor_mask);
+		rc = qpnp_adc_tm_activate_trip_type(
+			chip->sensor[sensor_num].tz_dev,
+			ADC_TM_TRIP_LOW_COOL,
+			THERMAL_TRIP_ACTIVATION_DISABLED);
+		if (rc < 0) {
+			pr_err("notify error:%d\n", sensor_num);
+			return rc;
+		}
+	}
+	list_for_each(thr_list, &chip->sensor[sensor_num].thr_list) {
+		client_info = list_entry(thr_list,
+				struct qpnp_adc_thr_client_info, list);
+		if (client_info->high_thr_set) {
+			client_info->high_thr_set = false;
+			client_info->notify_high_thr = true;
+			if (client_info->state_req_copy ==
+					ADC_TM_HIGH_LOW_THR_ENABLE)
+				client_info->state_req_copy =
+						ADC_TM_LOW_THR_ENABLE;
+			else
+				client_info->state_req_copy =
+						ADC_TM_HIGH_THR_DISABLE;
+		}
+	}
+	qpnp_adc_tm_manage_thresholds(chip, sensor_num, btm_chan_num);
+
+	if (!chip->adc_tm_hc) {
+		rc = qpnp_adc_tm_reg_update(chip,
+			QPNP_ADC_TM_MULTI_MEAS_EN,
+			sensor_mask, false);
+		if (rc < 0) {
+			pr_err("multi meas disable failed\n");
+			return rc;
+		}
+	} else {
+		rc = qpnp_adc_tm_reg_update(chip,
+			QPNP_BTM_Mn_EN(sensor_num),
+			QPNP_BTM_Mn_MEAS_EN, false);
+		if (rc < 0) {
+			pr_err("multi meas disable failed\n");
+			return rc;
+		}
+	}
+
+	rc = qpnp_adc_tm_enable_if_channel_meas(chip);
+	if (rc < 0) {
+		pr_err("re-enabling measurement failed\n");
+		return rc;
+	}
+
+	queue_work(chip->sensor[sensor_num].req_wq,
+				&chip->sensor[sensor_num].work);
+
+	return rc;
+}
+
+static int qpnp_adc_tm_disable_rearm_low_thresholds(
+			struct qpnp_adc_tm_chip *chip, int sensor_num)
+{
+	struct qpnp_adc_thr_client_info *client_info = NULL;
+	struct list_head *thr_list;
+	uint32_t btm_chan_num = 0;
+	u8 sensor_mask = 0, notify_check = 0;
+	int rc = 0;
+
+	btm_chan_num = chip->sensor[sensor_num].btm_channel_num;
+	pr_debug("low:sen:%d, hs:0x%x, ls:0x%x, meas_en:0x%x\n",
+		sensor_num, chip->th_info.adc_tm_high_enable,
+		chip->th_info.adc_tm_low_enable,
+		chip->th_info.qpnp_adc_tm_meas_en);
+	if (!chip->sensor[sensor_num].thermal_node) {
+		/*
+		 * For non thermal registered clients such as usb_id,
+		 * vbatt, pmic_therm
+		 */
+		pr_debug("non thermal node - mask:%x\n", sensor_mask);
+		rc = qpnp_adc_tm_recalib_request_check(chip,
+				sensor_num, false, &notify_check);
+		if (rc < 0 || !notify_check) {
+			pr_debug("Calib recheck re-armed rc=%d\n", rc);
+			chip->th_info.adc_tm_low_enable = 0;
+			return rc;
+		}
+		sensor_mask = 1 << sensor_num;
+		rc = qpnp_adc_tm_reg_update(chip,
+			QPNP_ADC_TM_LOW_THR_INT_EN,
+			sensor_mask, false);
+		if (rc < 0) {
+			pr_err("low threshold int read failed\n");
+			return rc;
+		}
+	} else {
+		/*
+		 * Uses the thermal sysfs registered device to disable
+		 * the corresponding high voltage threshold which
+		 * is triggered by low temp
+		 */
+		sensor_mask = 1 << sensor_num;
+		pr_debug("thermal node with mask:%x\n", sensor_mask);
+		rc = qpnp_adc_tm_activate_trip_type(
+			chip->sensor[sensor_num].tz_dev,
+			ADC_TM_TRIP_HIGH_WARM,
+			THERMAL_TRIP_ACTIVATION_DISABLED);
+		if (rc < 0) {
+			pr_err("notify error:%d\n", sensor_num);
+			return rc;
+		}
+	}
+	list_for_each(thr_list, &chip->sensor[sensor_num].thr_list) {
+		client_info = list_entry(thr_list,
+				struct qpnp_adc_thr_client_info, list);
+		if (client_info->low_thr_set) {
+			client_info->low_thr_set = false;
+			client_info->notify_low_thr = true;
+			if (client_info->state_req_copy ==
+					ADC_TM_HIGH_LOW_THR_ENABLE)
+				client_info->state_req_copy =
+						ADC_TM_HIGH_THR_ENABLE;
+			else
+				client_info->state_req_copy =
+						ADC_TM_LOW_THR_DISABLE;
+		}
+	}
+	qpnp_adc_tm_manage_thresholds(chip, sensor_num, btm_chan_num);
+
+	if (!chip->adc_tm_hc) {
+		rc = qpnp_adc_tm_reg_update(chip,
+			QPNP_ADC_TM_MULTI_MEAS_EN,
+			sensor_mask, false);
+		if (rc < 0) {
+			pr_err("multi meas disable failed\n");
+			return rc;
+		}
+	} else {
+		rc = qpnp_adc_tm_reg_update(chip,
+			QPNP_BTM_Mn_EN(sensor_num),
+			QPNP_BTM_Mn_MEAS_EN, false);
+		if (rc < 0) {
+			pr_err("multi meas disable failed\n");
+			return rc;
+		}
+	}
+
+	rc = qpnp_adc_tm_enable_if_channel_meas(chip);
+	if (rc < 0) {
+		pr_err("re-enabling measurement failed\n");
+		return rc;
+	}
+
+	queue_work(chip->sensor[sensor_num].req_wq,
+				&chip->sensor[sensor_num].work);
+
+	return rc;
+}
+
+static int qpnp_adc_tm_read_status(struct qpnp_adc_tm_chip *chip)
+{
+	int rc = 0, sensor_num = 0;
+
+	if (qpnp_adc_tm_is_valid(chip))
+		return -ENODEV;
+
+	pr_debug("%s\n", __func__);
+
+	mutex_lock(&chip->adc->adc_lock);
+
+	if (!chip->adc_tm_hc) {
+		rc = qpnp_adc_tm_req_sts_check(chip);
+		if (rc) {
+			pr_err("adc-tm-tm req sts check failed with %d\n", rc);
+			goto fail;
+		}
+	}
+
+	while (sensor_num < chip->max_channels_available) {
+		if (chip->sensor[sensor_num].high_thr_triggered) {
+			rc = qpnp_adc_tm_disable_rearm_high_thresholds(
+					chip, sensor_num);
+			if (rc) {
+				pr_err("rearm threshold failed\n");
+				goto fail;
+			}
+			chip->sensor[sensor_num].high_thr_triggered = false;
+		}
+		sensor_num++;
+	}
+
+	sensor_num = 0;
+	while (sensor_num < chip->max_channels_available) {
+		if (chip->sensor[sensor_num].low_thr_triggered) {
+			rc = qpnp_adc_tm_disable_rearm_low_thresholds(
+					chip, sensor_num);
+			if (rc) {
+				pr_err("rearm threshold failed\n");
+				goto fail;
+			}
+			chip->sensor[sensor_num].low_thr_triggered = false;
+		}
+		sensor_num++;
+	}
+
+fail:
+	mutex_unlock(&chip->adc->adc_lock);
+
+	if (rc < 0 || (!chip->th_info.adc_tm_high_enable &&
+					!chip->th_info.adc_tm_low_enable))
+		atomic_dec(&chip->wq_cnt);
+
+	return rc;
+}
+
+static void qpnp_adc_tm_high_thr_work(struct work_struct *work)
+{
+	struct qpnp_adc_tm_chip *chip = container_of(work,
+			struct qpnp_adc_tm_chip, trigger_high_thr_work);
+	int rc;
+
+	/* disable the vote if applicable */
+	if (chip->adc_vote_enable && chip->adc->hkadc_ldo &&
+					chip->adc->hkadc_ldo_ok) {
+		qpnp_adc_disable_voltage(chip->adc);
+		chip->adc_vote_enable = false;
+	}
+
+	pr_debug("thr:0x%x\n", chip->th_info.adc_tm_high_enable);
+
+	rc = qpnp_adc_tm_read_status(chip);
+	if (rc < 0)
+		pr_err("adc-tm high thr work failed\n");
+
+	return;
+}
+
+static irqreturn_t qpnp_adc_tm_high_thr_isr(int irq, void *data)
+{
+	struct qpnp_adc_tm_chip *chip = data;
+	u8 mode_ctl = 0, status1 = 0, sensor_mask = 0;
+	int rc = 0, sensor_notify_num = 0, i = 0, sensor_num = 0;
+
+	mode_ctl = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT;
+	/* Set measurement in single measurement mode */
+	qpnp_adc_tm_mode_select(chip, mode_ctl);
+
+	qpnp_adc_tm_disable(chip);
+
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS1, &status1, 1);
+	if (rc) {
+		pr_err("adc-tm read status1 failed\n");
+		return IRQ_HANDLED;
+	}
+
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS_HIGH,
+					&chip->th_info.status_high, 1);
+	if (rc) {
+		pr_err("adc-tm-tm read status high failed with %d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_HIGH_THR_INT_EN,
+				&chip->th_info.adc_tm_high_thr_set, 1);
+	if (rc) {
+		pr_err("adc-tm-tm read high thr failed with %d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	/* Check which interrupt threshold is lower and measure against the
+	 * enabled channel */
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
+				&chip->th_info.qpnp_adc_tm_meas_en, 1);
+	if (rc) {
+		pr_err("adc-tm-tm read status high failed with %d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	chip->th_info.adc_tm_high_enable = chip->th_info.qpnp_adc_tm_meas_en &
+						chip->th_info.status_high;
+	chip->th_info.adc_tm_high_enable &= chip->th_info.adc_tm_high_thr_set;
+
+	sensor_notify_num = chip->th_info.adc_tm_high_enable;
+	while (i < chip->max_channels_available) {
+		if ((sensor_notify_num & 0x1) == 1)
+			sensor_num = i;
+		sensor_notify_num >>= 1;
+		i++;
+	}
+
+	if (!chip->sensor[sensor_num].thermal_node) {
+		sensor_mask = 1 << sensor_num;
+		rc = qpnp_adc_tm_reg_update(chip,
+			QPNP_ADC_TM_HIGH_THR_INT_EN,
+			sensor_mask, false);
+		if (rc < 0) {
+			pr_err("high threshold int read failed\n");
+			return IRQ_HANDLED;
+		}
+	} else {
+		/* Uses the thermal sysfs registered device to disable
+			the corresponding high voltage threshold which
+			 is triggered by low temp */
+		pr_debug("thermal node with mask:%x\n", sensor_mask);
+		rc = qpnp_adc_tm_activate_trip_type(
+			chip->sensor[sensor_num].tz_dev,
+			ADC_TM_TRIP_LOW_COOL,
+			THERMAL_TRIP_ACTIVATION_DISABLED);
+		if (rc < 0) {
+			pr_err("notify error:%d\n", sensor_num);
+			return IRQ_HANDLED;
+		}
+	}
+
+	atomic_inc(&chip->wq_cnt);
+	queue_work(chip->high_thr_wq, &chip->trigger_high_thr_work);
+
+	return IRQ_HANDLED;
+}
+
+static void qpnp_adc_tm_low_thr_work(struct work_struct *work)
+{
+	struct qpnp_adc_tm_chip *chip = container_of(work,
+			struct qpnp_adc_tm_chip, trigger_low_thr_work);
+	int rc;
+
+	/* disable the vote if applicable */
+	if (chip->adc_vote_enable && chip->adc->hkadc_ldo &&
+					chip->adc->hkadc_ldo_ok) {
+		qpnp_adc_disable_voltage(chip->adc);
+		chip->adc_vote_enable = false;
+	}
+
+	pr_debug("thr:0x%x\n", chip->th_info.adc_tm_low_enable);
+
+	rc = qpnp_adc_tm_read_status(chip);
+	if (rc < 0)
+		pr_err("adc-tm low thr work failed\n");
+
+	return;
+}
+
+static irqreturn_t qpnp_adc_tm_low_thr_isr(int irq, void *data)
+{
+	struct qpnp_adc_tm_chip *chip = data;
+	u8 mode_ctl = 0, status1 = 0, sensor_mask = 0;
+	int rc = 0, sensor_notify_num = 0, i = 0, sensor_num = 0;
+
+	mode_ctl = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT;
+	/* Set measurement in single measurement mode */
+	qpnp_adc_tm_mode_select(chip, mode_ctl);
+
+	qpnp_adc_tm_disable(chip);
+
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS1, &status1, 1);
+	if (rc) {
+		pr_err("adc-tm read status1 failed\n");
+		return IRQ_HANDLED;
+	}
+
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS_LOW,
+					&chip->th_info.status_low, 1);
+	if (rc) {
+		pr_err("adc-tm-tm read status low failed with %d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_LOW_THR_INT_EN,
+				&chip->th_info.adc_tm_low_thr_set, 1);
+	if (rc) {
+		pr_err("adc-tm-tm read low thr failed with %d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
+				&chip->th_info.qpnp_adc_tm_meas_en, 1);
+	if (rc) {
+		pr_err("adc-tm-tm read status high failed with %d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	chip->th_info.adc_tm_low_enable = chip->th_info.qpnp_adc_tm_meas_en &
+					chip->th_info.status_low;
+	chip->th_info.adc_tm_low_enable &= chip->th_info.adc_tm_low_thr_set;
+
+	sensor_notify_num = chip->th_info.adc_tm_low_enable;
+	while (i < chip->max_channels_available) {
+		if ((sensor_notify_num & 0x1) == 1)
+			sensor_num = i;
+		sensor_notify_num >>= 1;
+		i++;
+	}
+
+	if (!chip->sensor[sensor_num].thermal_node) {
+		sensor_mask = 1 << sensor_num;
+		rc = qpnp_adc_tm_reg_update(chip,
+			QPNP_ADC_TM_LOW_THR_INT_EN,
+			sensor_mask, false);
+		if (rc < 0) {
+			pr_err("low threshold int read failed\n");
+			return IRQ_HANDLED;
+		}
+	} else {
+		/* Uses the thermal sysfs registered device to disable
+			the corresponding low voltage threshold which
+			 is triggered by high temp */
+		pr_debug("thermal node with mask:%x\n", sensor_mask);
+		rc = qpnp_adc_tm_activate_trip_type(
+			chip->sensor[sensor_num].tz_dev,
+			ADC_TM_TRIP_HIGH_WARM,
+			THERMAL_TRIP_ACTIVATION_DISABLED);
+		if (rc < 0) {
+			pr_err("notify error:%d\n", sensor_num);
+			return IRQ_HANDLED;
+		}
+	}
+
+	atomic_inc(&chip->wq_cnt);
+	queue_work(chip->low_thr_wq, &chip->trigger_low_thr_work);
+
+	return IRQ_HANDLED;
+}
+
+static int qpnp_adc_tm_rc_check_sensor_trip(struct qpnp_adc_tm_chip *chip,
+			u8 status_low, u8 status_high, int i,
+			int *sensor_low_notify_num, int *sensor_high_notify_num)
+{
+	int rc = 0;
+	u8 ctl = 0, sensor_mask = 0;
+
+	if (((status_low & 0x1) == 1) || ((status_high & 0x1) == 1)) {
+		rc = qpnp_adc_tm_read_reg(chip,
+					QPNP_BTM_Mn_EN(i), &ctl, 1);
+		if (rc) {
+			pr_err("ctl read failed with %d\n", rc);
+			return IRQ_HANDLED;
+		}
+
+		if ((status_low & 0x1) && (ctl & QPNP_BTM_Mn_MEAS_EN)
+			&& (ctl & QPNP_BTM_Mn_LOW_THR_INT_EN)) {
+			/* Mask the corresponding low threshold interrupt en */
+			if (!chip->sensor[i].thermal_node) {
+				rc = qpnp_adc_tm_reg_update(chip,
+					QPNP_BTM_Mn_EN(i),
+					QPNP_BTM_Mn_LOW_THR_INT_EN, false);
+				if (rc < 0) {
+					pr_err("low thr_int en failed\n");
+					return IRQ_HANDLED;
+				}
+			} else {
+			/*
+			 * Uses the thermal sysfs registered device to disable
+			 * the corresponding low voltage threshold which
+			 * is triggered by high temp
+			 */
+			pr_debug("thermal node with mask:%x\n", sensor_mask);
+				rc = qpnp_adc_tm_activate_trip_type(
+					chip->sensor[i].tz_dev,
+					ADC_TM_TRIP_HIGH_WARM,
+					THERMAL_TRIP_ACTIVATION_DISABLED);
+				if (rc < 0) {
+					pr_err("notify error:%d\n", i);
+					return IRQ_HANDLED;
+				}
+			}
+			*sensor_low_notify_num |= (status_low & 0x1);
+			chip->sensor[i].low_thr_triggered = true;
+		}
+
+		if ((status_high & 0x1) && (ctl & QPNP_BTM_Mn_MEAS_EN) &&
+					(ctl & QPNP_BTM_Mn_HIGH_THR_INT_EN)) {
+			/* Mask the corresponding high threshold interrupt en */
+			if (!chip->sensor[i].thermal_node) {
+				rc = qpnp_adc_tm_reg_update(chip,
+					QPNP_BTM_Mn_EN(i),
+					QPNP_BTM_Mn_HIGH_THR_INT_EN, false);
+				if (rc < 0) {
+					pr_err("high thr_int en failed\n");
+					return IRQ_HANDLED;
+				}
+			} else {
+			/*
+			 * Uses the thermal sysfs registered device to disable
+			 * the corresponding high voltage threshold which
+			 * is triggered by low temp
+			 */
+				pr_debug("thermal node with mask:%x\n", i);
+				rc = qpnp_adc_tm_activate_trip_type(
+					chip->sensor[i].tz_dev,
+					ADC_TM_TRIP_LOW_COOL,
+					THERMAL_TRIP_ACTIVATION_DISABLED);
+				if (rc < 0) {
+					pr_err("notify error:%d\n", i);
+					return IRQ_HANDLED;
+				}
+			}
+			*sensor_high_notify_num |= (status_high & 0x1);
+			chip->sensor[i].high_thr_triggered = true;
+		}
+	}
+
+	return rc;
+}
+
+static irqreturn_t qpnp_adc_tm_rc_thr_isr(int irq, void *data)
+{
+	struct qpnp_adc_tm_chip *chip = data;
+	u8 status_low = 0, status_high = 0;
+	int rc = 0, sensor_low_notify_num = 0, i = 0;
+	int sensor_high_notify_num = 0;
+
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS_LOW,
+						&status_low, 1);
+	if (rc) {
+		pr_err("adc-tm-tm read status low failed with %d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	if (status_low)
+		chip->th_info.adc_tm_low_enable = status_low;
+
+	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS_HIGH,
+							&status_high, 1);
+	if (rc) {
+		pr_err("adc-tm-tm read status high failed with %d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	if (status_high)
+		chip->th_info.adc_tm_high_enable = status_high;
+
+	while (i < chip->max_channels_available) {
+		rc = qpnp_adc_tm_rc_check_sensor_trip(chip,
+				status_low, status_high, i,
+				&sensor_low_notify_num,
+				&sensor_high_notify_num);
+		if (rc) {
+			pr_err("Sensor trip read failed\n");
+			return IRQ_HANDLED;
+		}
+		status_low >>= 1;
+		status_high >>= 1;
+		i++;
+	}
+
+	if (sensor_low_notify_num) {
+		atomic_inc(&chip->wq_cnt);
+		queue_work(chip->low_thr_wq, &chip->trigger_low_thr_work);
+	}
+
+	if (sensor_high_notify_num) {
+		atomic_inc(&chip->wq_cnt);
+		queue_work(chip->high_thr_wq, &chip->trigger_high_thr_work);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int qpnp_adc_read_temp(struct thermal_zone_device *thermal,
+			     int *temp)
+{
+	struct qpnp_adc_tm_sensor *adc_tm_sensor = thermal->devdata;
+	struct qpnp_adc_tm_chip *chip = adc_tm_sensor->chip;
+	struct qpnp_vadc_result result;
+	int rc = 0;
+
+	rc = qpnp_vadc_read(chip->vadc_dev,
+				adc_tm_sensor->vadc_channel_num, &result);
+	if (rc)
+		return rc;
+
+	*temp = result.physical;
+
+	return rc;
+}
+
+static struct thermal_zone_device_ops qpnp_adc_tm_thermal_ops = {
+	.get_temp = qpnp_adc_read_temp,
+	.get_mode = qpnp_adc_tm_get_mode,
+	.set_mode = qpnp_adc_tm_set_mode,
+	.get_trip_type = qpnp_adc_tm_get_trip_type,
+	.activate_trip_type = qpnp_adc_tm_activate_trip_type,
+	.get_trip_temp = qpnp_adc_tm_get_trip_temp,
+	.set_trip_temp = qpnp_adc_tm_set_trip_temp,
+};
+
+int32_t qpnp_adc_tm_channel_measure(struct qpnp_adc_tm_chip *chip,
+					struct qpnp_adc_tm_btm_param *param)
+{
+	uint32_t channel, amux_prescaling, dt_index = 0, scale_type = 0;
+	int rc = 0, i = 0, version = 0;
+	bool chan_found = false;
+
+	if (qpnp_adc_tm_is_valid(chip)) {
+		pr_err("chip not valid\n");
+		return -ENODEV;
+	}
+
+	if (param->threshold_notification == NULL) {
+		pr_debug("No notification for high/low temp??\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&chip->adc->adc_lock);
+
+	channel = param->channel;
+
+	if (channel == VSYS) {
+		version = qpnp_adc_get_revid_version(chip->dev);
+		if (version == QPNP_REV_ID_PM8950_1_0) {
+			pr_debug("Channel not supported\n");
+			rc = -EINVAL;
+			goto fail_unlock;
+		}
+	}
+
+	while (i < chip->max_channels_available) {
+		if (chip->adc->adc_channels[i].channel_num ==
+							channel) {
+			dt_index = i;
+			chan_found = true;
+			i++;
+		} else
+			i++;
+	}
+
+	if (!chan_found)  {
+		pr_err("not a valid ADC_TM channel\n");
+		rc = -EINVAL;
+		goto fail_unlock;
+	}
+
+	rc = qpnp_adc_tm_check_revision(chip,
+			chip->sensor[dt_index].btm_channel_num);
+	if (rc < 0)
+		goto fail_unlock;
+
+	scale_type = chip->adc->adc_channels[dt_index].adc_scale_fn;
+	if (scale_type >= SCALE_RSCALE_NONE) {
+		rc = -EBADF;
+		goto fail_unlock;
+	}
+
+
+	amux_prescaling =
+		chip->adc->adc_channels[dt_index].chan_path_prescaling;
+
+	if (amux_prescaling >= PATH_SCALING_NONE) {
+		rc = -EINVAL;
+		goto fail_unlock;
+	}
+
+	pr_debug("channel:%d, scale_type:%d, dt_idx:%d",
+					channel, scale_type, dt_index);
+	param->gain_num = qpnp_vadc_amux_scaling_ratio[amux_prescaling].num;
+	param->gain_den = qpnp_vadc_amux_scaling_ratio[amux_prescaling].den;
+	param->adc_tm_hc = chip->adc_tm_hc;
+	chip->adc->amux_prop->amux_channel = channel;
+	chip->adc->amux_prop->decimation =
+			chip->adc->adc_channels[dt_index].adc_decimation;
+	chip->adc->amux_prop->hw_settle_time =
+			chip->adc->adc_channels[dt_index].hw_settle_time;
+	chip->adc->amux_prop->fast_avg_setup =
+			chip->adc->adc_channels[dt_index].fast_avg_setup;
+	chip->adc->amux_prop->mode_sel =
+		ADC_OP_MEASUREMENT_INTERVAL << QPNP_OP_MODE_SHIFT;
+	adc_tm_rscale_fn[scale_type].chan(chip->vadc_dev, param,
+			&chip->adc->amux_prop->chan_prop->low_thr,
+			&chip->adc->amux_prop->chan_prop->high_thr);
+	qpnp_adc_tm_add_to_list(chip, dt_index, param,
+				chip->adc->amux_prop->chan_prop);
+	chip->adc->amux_prop->chan_prop->tm_channel_select =
+				chip->sensor[dt_index].btm_channel_num;
+	chip->adc->amux_prop->chan_prop->state_request =
+					param->state_request;
+	chip->adc->amux_prop->calib_type =
+			chip->adc->adc_channels[dt_index].calib_type;
+	if (!chip->adc_tm_hc) {
+		rc = qpnp_adc_tm_configure(chip, chip->adc->amux_prop);
+		if (rc) {
+			pr_err("adc-tm configure failed with %d\n", rc);
+			goto fail_unlock;
+		}
+	} else {
+		rc = qpnp_adc_tm_hc_configure(chip, chip->adc->amux_prop);
+		if (rc) {
+			pr_err("adc-tm hc configure failed with %d\n", rc);
+			goto fail_unlock;
+		}
+	}
+
+	chip->sensor[dt_index].scale_type = scale_type;
+
+fail_unlock:
+	mutex_unlock(&chip->adc->adc_lock);
+
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_adc_tm_channel_measure);
+
+int32_t qpnp_adc_tm_disable_chan_meas(struct qpnp_adc_tm_chip *chip,
+					struct qpnp_adc_tm_btm_param *param)
+{
+	uint32_t channel, dt_index = 0, btm_chan_num;
+	u8 sensor_mask = 0, mode_ctl = 0;
+	int rc = 0;
+
+	if (qpnp_adc_tm_is_valid(chip))
+		return -ENODEV;
+
+	mutex_lock(&chip->adc->adc_lock);
+
+	if (!chip->adc_tm_hc) {
+		/* Set measurement in single measurement mode */
+		mode_ctl = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT;
+		rc = qpnp_adc_tm_mode_select(chip, mode_ctl);
+		if (rc < 0) {
+			pr_err("adc-tm single mode select failed\n");
+			goto fail;
+		}
+	}
+
+	/* Disable bank */
+	rc = qpnp_adc_tm_disable(chip);
+	if (rc < 0) {
+		pr_err("adc-tm disable failed\n");
+		goto fail;
+	}
+
+	if (!chip->adc_tm_hc) {
+		/* Check if a conversion is in progress */
+		rc = qpnp_adc_tm_req_sts_check(chip);
+		if (rc < 0) {
+			pr_err("adc-tm req_sts check failed\n");
+			goto fail;
+		}
+	}
+
+	channel = param->channel;
+	while ((chip->adc->adc_channels[dt_index].channel_num
+		!= channel) && (dt_index < chip->max_channels_available))
+		dt_index++;
+
+	if (dt_index >= chip->max_channels_available) {
+		pr_err("not a valid ADC_TMN channel\n");
+		rc = -EINVAL;
+		goto fail;
+	}
+
+	btm_chan_num = chip->sensor[dt_index].btm_channel_num;
+
+	if (!chip->adc_tm_hc) {
+		sensor_mask = 1 << chip->sensor[dt_index].sensor_num;
+
+		rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_LOW_THR_INT_EN,
+			sensor_mask, false);
+		if (rc < 0) {
+			pr_err("low threshold int write failed\n");
+			goto fail;
+		}
+
+		rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_HIGH_THR_INT_EN,
+			sensor_mask, false);
+		if (rc < 0) {
+			pr_err("high threshold int enable failed\n");
+			goto fail;
+		}
+
+		rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
+			sensor_mask, false);
+		if (rc < 0) {
+			pr_err("multi measurement en failed\n");
+			goto fail;
+		}
+	} else {
+		rc = qpnp_adc_tm_reg_update(chip, QPNP_BTM_Mn_EN(btm_chan_num),
+					QPNP_BTM_Mn_HIGH_THR_INT_EN, false);
+		if (rc < 0) {
+			pr_err("high thr disable err:%d\n", btm_chan_num);
+			return rc;
+		}
+
+		rc = qpnp_adc_tm_reg_update(chip, QPNP_BTM_Mn_EN(btm_chan_num),
+					QPNP_BTM_Mn_LOW_THR_INT_EN, false);
+		if (rc < 0) {
+			pr_err("low thr disable err:%d\n", btm_chan_num);
+			return rc;
+		}
+
+		rc = qpnp_adc_tm_reg_update(chip, QPNP_BTM_Mn_EN(btm_chan_num),
+					QPNP_BTM_Mn_MEAS_EN, false);
+		if (rc < 0) {
+			pr_err("multi measurement disable failed\n");
+			return rc;
+		}
+	}
+
+	rc = qpnp_adc_tm_enable_if_channel_meas(chip);
+	if (rc < 0)
+		pr_err("re-enabling measurement failed\n");
+
+fail:
+	mutex_unlock(&chip->adc->adc_lock);
+
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_adc_tm_disable_chan_meas);
+
+int32_t qpnp_adc_tm_usbid_configure(struct qpnp_adc_tm_chip *chip,
+				struct qpnp_adc_tm_btm_param *param)
+{
+	param->channel = LR_MUX10_PU2_AMUX_USB_ID_LV;
+	return qpnp_adc_tm_channel_measure(chip, param);
+}
+EXPORT_SYMBOL(qpnp_adc_tm_usbid_configure);
+
+int32_t qpnp_adc_tm_usbid_end(struct qpnp_adc_tm_chip *chip)
+{
+	struct qpnp_adc_tm_btm_param param;
+
+	return qpnp_adc_tm_disable_chan_meas(chip, &param);
+}
+EXPORT_SYMBOL(qpnp_adc_tm_usbid_end);
+
+struct qpnp_adc_tm_chip *qpnp_get_adc_tm(struct device *dev, const char *name)
+{
+	struct qpnp_adc_tm_chip *chip;
+	struct device_node *node = NULL;
+	char prop_name[QPNP_MAX_PROP_NAME_LEN];
+
+	snprintf(prop_name, QPNP_MAX_PROP_NAME_LEN, "qcom,%s-adc_tm", name);
+
+	node = of_parse_phandle(dev->of_node, prop_name, 0);
+	if (node == NULL)
+		return ERR_PTR(-ENODEV);
+
+	list_for_each_entry(chip, &qpnp_adc_tm_device_list, list)
+		if (chip->adc->pdev->dev.of_node == node)
+			return chip;
+
+	return ERR_PTR(-EPROBE_DEFER);
+}
+EXPORT_SYMBOL(qpnp_get_adc_tm);
+
+static int qpnp_adc_tm_initial_setup(struct qpnp_adc_tm_chip *chip)
+{
+	u8 thr_init = 0;
+	int rc = 0;
+
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_TM_HIGH_THR_INT_EN,
+							thr_init, 1);
+	if (rc < 0) {
+		pr_err("high thr init failed\n");
+		return rc;
+	}
+
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_TM_LOW_THR_INT_EN,
+							thr_init, 1);
+	if (rc < 0) {
+		pr_err("low thr init failed\n");
+		return rc;
+	}
+
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
+							thr_init, 1);
+	if (rc < 0) {
+		pr_err("multi meas en failed\n");
+		return rc;
+	}
+
+	return rc;
+}
+
+static const struct of_device_id qpnp_adc_tm_match_table[] = {
+	{	.compatible = "qcom,qpnp-adc-tm" },
+	{	.compatible = "qcom,qpnp-adc-tm-hc" },
+	{}
+};
+
+static int qpnp_adc_tm_probe(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node, *child;
+	struct qpnp_adc_tm_chip *chip;
+	struct qpnp_adc_drv *adc_qpnp;
+	int32_t count_adc_channel_list = 0, rc, sen_idx = 0, i = 0;
+	bool thermal_node = false;
+	const struct of_device_id *id;
+
+	for_each_child_of_node(node, child)
+		count_adc_channel_list++;
+
+	if (!count_adc_channel_list) {
+		pr_err("No channel listing\n");
+		return -EINVAL;
+	}
+
+	id = of_match_node(qpnp_adc_tm_match_table, node);
+	if (id == NULL) {
+		pr_err("qpnp_adc_tm_match of_node prop not present\n");
+		return -ENODEV;
+	}
+
+	chip = devm_kzalloc(&pdev->dev, sizeof(struct qpnp_adc_tm_chip) +
+			(count_adc_channel_list *
+			sizeof(struct qpnp_adc_tm_sensor)),
+				GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	adc_qpnp = devm_kzalloc(&pdev->dev, sizeof(struct qpnp_adc_drv),
+			GFP_KERNEL);
+	if (!adc_qpnp) {
+		rc = -ENOMEM;
+		goto fail;
+	}
+
+	chip->dev = &(pdev->dev);
+	chip->adc = adc_qpnp;
+	chip->adc->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!chip->adc->regmap) {
+		dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+		rc = -EINVAL;
+		goto fail;
+	}
+
+	if (of_device_is_compatible(node, "qcom,qpnp-adc-tm-hc")) {
+		chip->adc_tm_hc = true;
+		chip->adc->adc_hc = true;
+	}
+
+	rc = qpnp_adc_get_devicetree_data(pdev, chip->adc);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to read device tree\n");
+		goto fail;
+	}
+	mutex_init(&chip->adc->adc_lock);
+
+	/* Register the ADC peripheral interrupt */
+	if (!chip->adc_tm_hc) {
+		chip->adc->adc_high_thr_irq = platform_get_irq_byname(pdev,
+						"high-thr-en-set");
+		if (chip->adc->adc_high_thr_irq < 0) {
+			pr_err("Invalid irq\n");
+			rc = -ENXIO;
+			goto fail;
+		}
+
+		chip->adc->adc_low_thr_irq = platform_get_irq_byname(pdev,
+						"low-thr-en-set");
+		if (chip->adc->adc_low_thr_irq < 0) {
+			pr_err("Invalid irq\n");
+			rc = -ENXIO;
+			goto fail;
+		}
+	}
+
+	chip->vadc_dev = qpnp_get_vadc(&pdev->dev, "adc_tm");
+	if (IS_ERR(chip->vadc_dev)) {
+		rc = PTR_ERR(chip->vadc_dev);
+		if (rc != -EPROBE_DEFER)
+			pr_err("vadc property missing, rc=%d\n", rc);
+		goto fail;
+	}
+
+	chip->adc_tm_recalib_check = of_property_read_bool(node,
+				"qcom,adc-tm-recalib-check");
+
+	for_each_child_of_node(node, child) {
+		char name[25];
+		int btm_channel_num, timer_select = 0;
+
+		rc = of_property_read_u32(child,
+				"qcom,btm-channel-number", &btm_channel_num);
+		if (rc) {
+			pr_err("Invalid btm channel number\n");
+			goto fail;
+		}
+		rc = of_property_read_u32(child,
+				"qcom,meas-interval-timer-idx", &timer_select);
+		if (rc) {
+			pr_debug("Default to timer2 with interval of 1 sec\n");
+			chip->sensor[sen_idx].timer_select =
+							ADC_MEAS_TIMER_SELECT2;
+			chip->sensor[sen_idx].meas_interval =
+							ADC_MEAS2_INTERVAL_1S;
+		} else {
+			if (timer_select >= ADC_MEAS_TIMER_NUM) {
+				pr_err("Invalid timer selection number\n");
+				goto fail;
+			}
+			chip->sensor[sen_idx].timer_select = timer_select;
+			if (timer_select == ADC_MEAS_TIMER_SELECT1)
+				chip->sensor[sen_idx].meas_interval =
+						ADC_MEAS1_INTERVAL_3P9MS;
+			else if (timer_select == ADC_MEAS_TIMER_SELECT3)
+				chip->sensor[sen_idx].meas_interval =
+						ADC_MEAS3_INTERVAL_4S;
+			else if (timer_select == ADC_MEAS_TIMER_SELECT2)
+				chip->sensor[sen_idx].meas_interval =
+						ADC_MEAS2_INTERVAL_1S;
+		}
+
+		chip->sensor[sen_idx].btm_channel_num = btm_channel_num;
+		chip->sensor[sen_idx].vadc_channel_num =
+				chip->adc->adc_channels[sen_idx].channel_num;
+		chip->sensor[sen_idx].sensor_num = sen_idx;
+		chip->sensor[sen_idx].chip = chip;
+		pr_debug("btm_chan:%x, vadc_chan:%x\n", btm_channel_num,
+			chip->adc->adc_channels[sen_idx].channel_num);
+		thermal_node = of_property_read_bool(child,
+					"qcom,thermal-node");
+		if (thermal_node) {
+			/* Register with the thermal zone */
+			pr_debug("thermal node%x\n", btm_channel_num);
+			chip->sensor[sen_idx].mode = THERMAL_DEVICE_DISABLED;
+			chip->sensor[sen_idx].thermal_node = true;
+			snprintf(name, sizeof(name), "%s",
+				chip->adc->adc_channels[sen_idx].name);
+			chip->sensor[sen_idx].meas_interval =
+				QPNP_ADC_TM_MEAS_INTERVAL;
+			chip->sensor[sen_idx].low_thr =
+						QPNP_ADC_TM_M0_LOW_THR;
+			chip->sensor[sen_idx].high_thr =
+						QPNP_ADC_TM_M0_HIGH_THR;
+			chip->sensor[sen_idx].tz_dev =
+				thermal_zone_device_register(name,
+				ADC_TM_TRIP_NUM, ADC_TM_WRITABLE_TRIPS_MASK,
+				&chip->sensor[sen_idx],
+				&qpnp_adc_tm_thermal_ops, NULL, 0, 0);
+			if (IS_ERR(chip->sensor[sen_idx].tz_dev))
+				pr_err("thermal device register failed.\n");
+		}
+		chip->sensor[sen_idx].req_wq = alloc_workqueue(
+				"qpnp_adc_notify_wq", WQ_HIGHPRI, 0);
+		if (!chip->sensor[sen_idx].req_wq) {
+			pr_err("Requesting priority wq failed\n");
+			goto fail;
+		}
+		INIT_WORK(&chip->sensor[sen_idx].work, notify_adc_tm_fn);
+		INIT_LIST_HEAD(&chip->sensor[sen_idx].thr_list);
+		sen_idx++;
+	}
+	chip->max_channels_available = count_adc_channel_list;
+	chip->high_thr_wq = alloc_workqueue("qpnp_adc_tm_high_thr_wq",
+							WQ_HIGHPRI, 0);
+	if (!chip->high_thr_wq) {
+		pr_err("Requesting high thr priority wq failed\n");
+		goto fail;
+	}
+	chip->low_thr_wq = alloc_workqueue("qpnp_adc_tm_low_thr_wq",
+							WQ_HIGHPRI, 0);
+	if (!chip->low_thr_wq) {
+		pr_err("Requesting low thr priority wq failed\n");
+		goto fail;
+	}
+	chip->thr_wq = alloc_workqueue("qpnp_adc_tm_thr_wq",
+						WQ_HIGHPRI, 0);
+	if (!chip->thr_wq) {
+		pr_err("Requesting thr priority wq failed\n");
+		goto fail;
+	}
+
+	INIT_WORK(&chip->trigger_high_thr_work, qpnp_adc_tm_high_thr_work);
+	INIT_WORK(&chip->trigger_low_thr_work, qpnp_adc_tm_low_thr_work);
+	atomic_set(&chip->wq_cnt, 0);
+
+	if (!chip->adc_tm_hc) {
+		rc = qpnp_adc_tm_initial_setup(chip);
+		if (rc)
+			goto fail;
+
+		rc = devm_request_irq(&pdev->dev, chip->adc->adc_high_thr_irq,
+				qpnp_adc_tm_high_thr_isr,
+		IRQF_TRIGGER_RISING, "qpnp_adc_tm_high_interrupt", chip);
+		if (rc) {
+			dev_err(&pdev->dev, "failed to request adc irq\n");
+			goto fail;
+		} else {
+			enable_irq_wake(chip->adc->adc_high_thr_irq);
+		}
+
+		rc = devm_request_irq(&pdev->dev, chip->adc->adc_low_thr_irq,
+					qpnp_adc_tm_low_thr_isr,
+			IRQF_TRIGGER_RISING, "qpnp_adc_tm_low_interrupt", chip);
+		if (rc) {
+			dev_err(&pdev->dev, "failed to request adc irq\n");
+			goto fail;
+		} else {
+			enable_irq_wake(chip->adc->adc_low_thr_irq);
+		}
+	} else {
+		rc = devm_request_irq(&pdev->dev, chip->adc->adc_irq_eoc,
+				qpnp_adc_tm_rc_thr_isr,
+			IRQF_TRIGGER_RISING, "qpnp_adc_tm_interrupt", chip);
+		if (rc)
+			dev_err(&pdev->dev, "failed to request adc irq\n");
+		else
+			enable_irq_wake(chip->adc->adc_irq_eoc);
+	}
+
+	chip->adc_vote_enable = false;
+	dev_set_drvdata(&pdev->dev, chip);
+	list_add(&chip->list, &qpnp_adc_tm_device_list);
+
+	pr_debug("OK\n");
+	return 0;
+fail:
+	for_each_child_of_node(node, child) {
+		thermal_node = of_property_read_bool(child,
+					"qcom,thermal-node");
+		if (thermal_node) {
+			thermal_zone_device_unregister(chip->sensor[i].tz_dev);
+			if (chip->sensor[i].req_wq)
+				destroy_workqueue(chip->sensor[sen_idx].req_wq);
+		}
+	}
+	if (chip->high_thr_wq)
+		destroy_workqueue(chip->high_thr_wq);
+	if (chip->low_thr_wq)
+		destroy_workqueue(chip->low_thr_wq);
+	dev_set_drvdata(&pdev->dev, NULL);
+	return rc;
+}
+
+static int qpnp_adc_tm_remove(struct platform_device *pdev)
+{
+	struct qpnp_adc_tm_chip *chip = dev_get_drvdata(&pdev->dev);
+	struct device_node *node = pdev->dev.of_node, *child;
+	bool thermal_node = false;
+	int i = 0;
+
+	for_each_child_of_node(node, child) {
+		thermal_node = of_property_read_bool(child,
+					"qcom,thermal-node");
+		if (thermal_node) {
+			thermal_zone_device_unregister(chip->sensor[i].tz_dev);
+			if (chip->sensor[i].req_wq)
+				destroy_workqueue(chip->sensor[i].req_wq);
+		}
+		i++;
+	}
+
+	if (chip->high_thr_wq)
+		destroy_workqueue(chip->high_thr_wq);
+	if (chip->low_thr_wq)
+		destroy_workqueue(chip->low_thr_wq);
+	if (chip->adc->hkadc_ldo && chip->adc->hkadc_ldo_ok)
+		qpnp_adc_free_voltage_resource(chip->adc);
+	dev_set_drvdata(&pdev->dev, NULL);
+
+	return 0;
+}
+
+static void qpnp_adc_tm_shutdown(struct platform_device *pdev)
+{
+	struct qpnp_adc_tm_chip *chip = dev_get_drvdata(&pdev->dev);
+	int rc = 0, i = 0;
+	u8 reg_val = 0, status1 = 0, en_ctl1 = 0;
+
+	if (!chip->adc_tm_hc) {
+		/* Set measurement in single measurement mode */
+		reg_val = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT;
+		rc = qpnp_adc_tm_mode_select(chip, reg_val);
+		if (rc < 0)
+			pr_err("adc-tm single mode select failed\n");
+	}
+
+	/* Disable bank */
+	rc = qpnp_adc_tm_disable(chip);
+	if (rc < 0)
+		pr_err("adc-tm disable failed\n");
+
+	if (chip->adc_tm_hc) {
+		for (i = 0; i < 8; i++) {
+			rc = qpnp_adc_tm_reg_update(chip,
+				QPNP_BTM_Mn_EN(i),
+				QPNP_BTM_Mn_MEAS_EN, false);
+			if (rc < 0)
+				pr_err("multi meas disable failed\n");
+		}
+	} else {
+		/* Check if a conversion is in progress */
+		rc = qpnp_adc_tm_req_sts_check(chip);
+		if (rc < 0)
+			pr_err("adc-tm req_sts check failed\n");
+
+		/* Disable multimeasurement */
+		reg_val = 0;
+		rc = qpnp_adc_tm_write_reg(chip,
+				QPNP_ADC_TM_MULTI_MEAS_EN, reg_val, 1);
+		if (rc < 0)
+			pr_err("adc-tm multi-meas mode disable failed\n");
+
+		rc = qpnp_adc_tm_read_reg(chip,
+				QPNP_ADC_TM_STATUS1, &status1, 1);
+		if (rc < 0)
+			pr_err("adc-tm status1 read failed\n");
+
+		rc = qpnp_adc_tm_read_reg(chip,
+				QPNP_EN_CTL1, &en_ctl1, 1);
+		if (rc < 0)
+			pr_err("adc-tm en_ctl1 read failed\n");
+
+		pr_debug("status1=0%x, en_ctl1=0x%x\n", status1, en_ctl1);
+	}
+}
+
+static int qpnp_adc_tm_suspend_noirq(struct device *dev)
+{
+	struct qpnp_adc_tm_chip *chip = dev_get_drvdata(dev);
+
+	if (atomic_read(&chip->wq_cnt) != 0) {
+		pr_err(
+			"Aborting suspend, adc_tm notification running while suspending\n");
+		return -EBUSY;
+	}
+	return 0;
+}
+
+static const struct dev_pm_ops qpnp_adc_tm_pm_ops = {
+	.suspend_noirq	= qpnp_adc_tm_suspend_noirq,
+};
+
+static struct platform_driver qpnp_adc_tm_driver = {
+	.driver		= {
+		.name		= "qcom,qpnp-adc-tm",
+		.of_match_table	= qpnp_adc_tm_match_table,
+		.pm		= &qpnp_adc_tm_pm_ops,
+	},
+	.probe		= qpnp_adc_tm_probe,
+	.remove		= qpnp_adc_tm_remove,
+	.shutdown	= qpnp_adc_tm_shutdown,
+};
+
+static int __init qpnp_adc_tm_init(void)
+{
+	return platform_driver_register(&qpnp_adc_tm_driver);
+}
+module_init(qpnp_adc_tm_init);
+
+static void __exit qpnp_adc_tm_exit(void)
+{
+	platform_driver_unregister(&qpnp_adc_tm_driver);
+}
+module_exit(qpnp_adc_tm_exit);
+
+MODULE_DESCRIPTION("QPNP PMIC ADC Threshold Monitoring driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/thermal/qpnp-temp-alarm.c	2019-01-22 16:16:27.155279478 +0100
@@ -0,0 +1,812 @@
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/thermal.h>
+#include <linux/qpnp/qpnp-adc.h>
+
+#define QPNP_TM_DRIVER_NAME "qcom,qpnp-temp-alarm"
+
+enum qpnp_tm_registers {
+	QPNP_TM_REG_TYPE		= 0x04,
+	QPNP_TM_REG_SUBTYPE		= 0x05,
+	QPNP_TM_REG_STATUS		= 0x08,
+	QPNP_TM_REG_SHUTDOWN_CTRL1	= 0x40,
+	QPNP_TM_REG_SHUTDOWN_CTRL2	= 0x42,
+	QPNP_TM_REG_ALARM_CTRL		= 0x46,
+};
+
+#define QPNP_TM_TYPE			0x09
+#define QPNP_TM_SUBTYPE_GEN1		0x08
+#define QPNP_TM_SUBTYPE_GEN2		0x09
+
+#define STATUS_STATE_MASK		0x70
+#define STATUS_STATE_SHIFT		4
+#define STATUS_STAGE_MASK		0x03
+
+#define SHUTDOWN_CTRL1_OVERRIDE_STAGE3	0x80
+#define SHUTDOWN_CTRL1_OVERRIDE_STAGE2	0x40
+#define SHUTDOWN_CTRL1_CLK_RATE_MASK	0x0C
+#define SHUTDOWN_CTRL1_CLK_RATE_SHIFT	2
+#define SHUTDOWN_CTRL1_THRESHOLD_MASK	0x03
+
+#define SHUTDOWN_CTRL2_CLEAR_STAGE3	0x80
+#define SHUTDOWN_CTRL2_CLEAR_STAGE2	0x40
+
+#define ALARM_CTRL_FORCE_ENABLE		0x80
+#define ALARM_CTRL_FOLLOW_HW_ENABLE	0x01
+
+#define TEMP_STAGE_STEP			20000	/* Stage step: 20.000 C */
+#define TEMP_STAGE_HYSTERESIS		2000
+
+#define TEMP_THRESH_MIN			105000	/* Threshold Min: 105 C */
+#define TEMP_THRESH_STEP		5000	/* Threshold step: 5 C */
+
+#define THRESH_MIN			0
+#define THRESH_MAX			3
+
+#define CLOCK_RATE_MIN			0
+#define CLOCK_RATE_MAX			3
+
+/* Trip points from most critical to least critical */
+#define TRIP_STAGE3			0
+#define TRIP_STAGE2			1
+#define TRIP_STAGE1			2
+#define TRIP_NUM			3
+
+enum qpnp_tm_adc_type {
+	QPNP_TM_ADC_NONE,	/* Estimates temp based on overload level. */
+	QPNP_TM_ADC_QPNP_ADC,
+};
+
+/*
+ * Temperature in millicelcius reported during stage 0 if no ADC is present and
+ * no value has been specified via device tree.
+ */
+#define DEFAULT_NO_ADC_TEMP		37000
+
+struct qpnp_tm_chip {
+	struct delayed_work		irq_work;
+	struct platform_device		*pdev;
+	struct regmap			*regmap;
+	struct thermal_zone_device	*tz_dev;
+	const char			*tm_name;
+	unsigned int			subtype;
+	enum qpnp_tm_adc_type		adc_type;
+	int				temperature;
+	enum thermal_device_mode	mode;
+	unsigned int			thresh;
+	unsigned int			clock_rate;
+	unsigned int			stage;
+	unsigned int			prev_stage;
+	int				irq;
+	enum qpnp_vadc_channels		adc_channel;
+	u16				base_addr;
+	bool				allow_software_override;
+	struct qpnp_vadc_chip		*vadc_dev;
+};
+
+/* Delay between TEMP_STAT IRQ going high and status value changing in ms. */
+#define STATUS_REGISTER_DELAY_MS       40
+
+enum pmic_thermal_override_mode {
+	SOFTWARE_OVERRIDE_DISABLED = 0,
+	SOFTWARE_OVERRIDE_ENABLED,
+};
+
+/* This array maps from GEN2 alarm state to GEN1 alarm stage */
+const unsigned int alarm_state_map[8] = {0, 1, 1, 2, 2, 3, 3, 3};
+
+static inline int qpnp_tm_read(struct qpnp_tm_chip *chip, u16 addr, u8 *buf,
+				int len)
+{
+	int rc;
+
+	rc = regmap_bulk_read(chip->regmap, chip->base_addr + addr, buf, len);
+
+	if (rc)
+		dev_err(&chip->pdev->dev,
+			"%s: regmap_bulk_readl failed. sid=%d, addr=%04X, len=%d, rc=%d\n",
+			__func__,
+			to_spmi_device(chip->pdev->dev.parent)->usid,
+			chip->base_addr + addr,
+			len, rc);
+
+	return rc;
+}
+
+static inline int qpnp_tm_write(struct qpnp_tm_chip *chip, u16 addr, u8 *buf,
+				int len)
+{
+	int rc;
+
+	rc = regmap_bulk_write(chip->regmap, chip->base_addr + addr, buf, len);
+
+	if (rc)
+		dev_err(&chip->pdev->dev,
+			"%s: regmap_bulk_write failed. sid=%d, addr=%04X, len=%d, rc=%d\n",
+			__func__,
+			to_spmi_device(chip->pdev->dev.parent)->usid,
+			chip->base_addr + addr,
+			len, rc);
+
+	return rc;
+}
+
+
+static inline int qpnp_tm_shutdown_override(struct qpnp_tm_chip *chip,
+			    enum pmic_thermal_override_mode mode)
+{
+	int rc = 0;
+	u8 reg;
+
+	if (chip->allow_software_override) {
+		reg = chip->thresh & SHUTDOWN_CTRL1_THRESHOLD_MASK;
+		reg |= (chip->clock_rate << SHUTDOWN_CTRL1_CLK_RATE_SHIFT)
+			& SHUTDOWN_CTRL1_CLK_RATE_MASK;
+
+		if (mode == SOFTWARE_OVERRIDE_ENABLED)
+			reg |= SHUTDOWN_CTRL1_OVERRIDE_STAGE2
+				| SHUTDOWN_CTRL1_OVERRIDE_STAGE3;
+
+		rc = qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, &reg, 1);
+	}
+
+	return rc;
+}
+
+static int qpnp_tm_update_temp(struct qpnp_tm_chip *chip)
+{
+	struct qpnp_vadc_result adc_result;
+	int rc;
+
+	rc = qpnp_vadc_read(chip->vadc_dev, chip->adc_channel, &adc_result);
+	if (!rc)
+		chip->temperature = adc_result.physical;
+	else
+		dev_err(&chip->pdev->dev,
+			"%s: qpnp_vadc_read(%d) failed, rc=%d\n",
+			__func__, chip->adc_channel, rc);
+
+	return rc;
+}
+
+static int qpnp_tm_get_temp_stage(struct qpnp_tm_chip *chip,
+			unsigned int *stage)
+{
+	int rc;
+	u8 reg;
+
+	rc = qpnp_tm_read(chip, QPNP_TM_REG_STATUS, &reg, 1);
+	if (rc < 0)
+		return rc;
+
+	if (chip->subtype == QPNP_TM_SUBTYPE_GEN1)
+		*stage = reg & STATUS_STAGE_MASK;
+	else
+		*stage = (reg & STATUS_STATE_MASK) >> STATUS_STATE_SHIFT;
+
+	return 0;
+}
+
+/*
+ * This function initializes the internal temperature value based on only the
+ * current thermal stage and threshold.
+ */
+static int qpnp_tm_init_temp_no_adc(struct qpnp_tm_chip *chip)
+{
+	unsigned int stage;
+	int rc;
+
+	rc = qpnp_tm_get_temp_stage(chip, &chip->stage);
+	if (rc < 0)
+		return rc;
+
+	stage = chip->subtype == QPNP_TM_SUBTYPE_GEN1
+		? chip->stage : alarm_state_map[chip->stage];
+
+	if (stage)
+		chip->temperature = chip->thresh * TEMP_THRESH_STEP +
+			   (stage - 1) * TEMP_STAGE_STEP +
+			   TEMP_THRESH_MIN;
+
+	return 0;
+}
+
+/*
+ * This function updates the internal temperature value based on the
+ * current thermal stage and threshold as well as the previous stage
+ */
+static int qpnp_tm_update_temp_no_adc(struct qpnp_tm_chip *chip)
+{
+	unsigned int stage, stage_new, stage_old;
+	int rc;
+
+	rc = qpnp_tm_get_temp_stage(chip, &stage);
+	if (rc < 0)
+		return rc;
+
+	if (chip->subtype == QPNP_TM_SUBTYPE_GEN1) {
+		stage_new = stage;
+		stage_old = chip->stage;
+	} else {
+		stage_new = alarm_state_map[stage];
+		stage_old = alarm_state_map[chip->stage];
+	}
+
+	if (stage_new > stage_old) {
+		/* increasing stage, use lower bound */
+		chip->temperature = (stage_new - 1) * TEMP_STAGE_STEP
+				+ chip->thresh * TEMP_THRESH_STEP
+				+ TEMP_STAGE_HYSTERESIS + TEMP_THRESH_MIN;
+	} else if (stage_new < stage_old) {
+		/* decreasing stage, use upper bound */
+		chip->temperature = stage_new * TEMP_STAGE_STEP
+				+ chip->thresh * TEMP_THRESH_STEP
+				- TEMP_STAGE_HYSTERESIS + TEMP_THRESH_MIN;
+	}
+
+	chip->stage = stage;
+
+	return 0;
+}
+
+static int qpnp_tz_get_temp_no_adc(struct thermal_zone_device *thermal,
+				     int *temperature)
+{
+	struct qpnp_tm_chip *chip = thermal->devdata;
+	int rc;
+
+	if (!temperature)
+		return -EINVAL;
+
+	rc = qpnp_tm_update_temp_no_adc(chip);
+	if (rc < 0)
+		return rc;
+
+	*temperature = chip->temperature;
+
+	return 0;
+}
+
+static int qpnp_tz_get_temp_qpnp_adc(struct thermal_zone_device *thermal,
+				      int *temperature)
+{
+	struct qpnp_tm_chip *chip = thermal->devdata;
+	int rc;
+
+	if (!temperature)
+		return -EINVAL;
+
+	rc = qpnp_tm_update_temp(chip);
+	if (rc < 0) {
+		dev_err(&chip->pdev->dev,
+			"%s: %s: adc read failed, rc = %d\n",
+			__func__, chip->tm_name, rc);
+		return rc;
+	}
+
+	*temperature = chip->temperature;
+
+	return 0;
+}
+
+static int qpnp_tz_get_mode(struct thermal_zone_device *thermal,
+			      enum thermal_device_mode *mode)
+{
+	struct qpnp_tm_chip *chip = thermal->devdata;
+
+	if (!mode)
+		return -EINVAL;
+
+	*mode = chip->mode;
+
+	return 0;
+}
+
+static int qpnp_tz_set_mode(struct thermal_zone_device *thermal,
+			      enum thermal_device_mode mode)
+{
+	struct qpnp_tm_chip *chip = thermal->devdata;
+	int rc = 0;
+
+	if (mode != chip->mode) {
+		if (mode == THERMAL_DEVICE_ENABLED)
+			rc = qpnp_tm_shutdown_override(chip,
+				SOFTWARE_OVERRIDE_ENABLED);
+		else
+			rc = qpnp_tm_shutdown_override(chip,
+				SOFTWARE_OVERRIDE_DISABLED);
+
+		chip->mode = mode;
+	}
+
+	return rc;
+}
+
+static int qpnp_tz_get_trip_type(struct thermal_zone_device *thermal,
+				   int trip, enum thermal_trip_type *type)
+{
+	if (trip < 0 || !type)
+		return -EINVAL;
+
+	switch (trip) {
+	case TRIP_STAGE3:
+		*type = THERMAL_TRIP_CRITICAL;
+		break;
+	case TRIP_STAGE2:
+		*type = THERMAL_TRIP_HOT;
+		break;
+	case TRIP_STAGE1:
+		*type = THERMAL_TRIP_HOT;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int qpnp_tz_get_trip_temp(struct thermal_zone_device *thermal,
+				   int trip, int *temperature)
+{
+	struct qpnp_tm_chip *chip = thermal->devdata;
+	int thresh_temperature;
+
+	if (trip < 0 || !temperature)
+		return -EINVAL;
+
+	thresh_temperature = chip->thresh * TEMP_THRESH_STEP + TEMP_THRESH_MIN;
+
+	switch (trip) {
+	case TRIP_STAGE3:
+		thresh_temperature += 2 * TEMP_STAGE_STEP;
+		break;
+	case TRIP_STAGE2:
+		thresh_temperature += TEMP_STAGE_STEP;
+		break;
+	case TRIP_STAGE1:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	*temperature = thresh_temperature;
+
+	return 0;
+}
+
+static int qpnp_tz_get_crit_temp(struct thermal_zone_device *thermal,
+				   int *temperature)
+{
+	struct qpnp_tm_chip *chip = thermal->devdata;
+
+	if (!temperature)
+		return -EINVAL;
+
+	*temperature = chip->thresh * TEMP_THRESH_STEP + TEMP_THRESH_MIN +
+		2 * TEMP_STAGE_STEP;
+
+	return 0;
+}
+
+static struct thermal_zone_device_ops qpnp_thermal_zone_ops_no_adc = {
+	.get_temp = qpnp_tz_get_temp_no_adc,
+	.get_mode = qpnp_tz_get_mode,
+	.set_mode = qpnp_tz_set_mode,
+	.get_trip_type = qpnp_tz_get_trip_type,
+	.get_trip_temp = qpnp_tz_get_trip_temp,
+	.get_crit_temp = qpnp_tz_get_crit_temp,
+};
+
+static struct thermal_zone_device_ops qpnp_thermal_zone_ops_qpnp_adc = {
+	.get_temp = qpnp_tz_get_temp_qpnp_adc,
+	.get_mode = qpnp_tz_get_mode,
+	.set_mode = qpnp_tz_set_mode,
+	.get_trip_type = qpnp_tz_get_trip_type,
+	.get_trip_temp = qpnp_tz_get_trip_temp,
+	.get_crit_temp = qpnp_tz_get_crit_temp,
+};
+
+static void qpnp_tm_work(struct work_struct *work)
+{
+	struct delayed_work *dwork
+		= container_of(work, struct delayed_work, work);
+	struct qpnp_tm_chip *chip
+		= container_of(dwork, struct qpnp_tm_chip, irq_work);
+	unsigned int stage_new, stage_old;
+	int rc;
+
+	if (chip->adc_type == QPNP_TM_ADC_NONE) {
+		rc = qpnp_tm_update_temp_no_adc(chip);
+		if (rc < 0)
+			goto bail;
+	} else {
+		rc = qpnp_tm_get_temp_stage(chip, &chip->stage);
+		if (rc < 0)
+			goto bail;
+
+		rc = qpnp_tm_update_temp(chip);
+		if (rc < 0)
+			goto bail;
+	}
+
+	if (chip->subtype == QPNP_TM_SUBTYPE_GEN1) {
+		stage_new = chip->stage;
+		stage_old = chip->prev_stage;
+	} else {
+		stage_new = alarm_state_map[chip->stage];
+		stage_old = alarm_state_map[chip->prev_stage];
+	}
+
+	chip->prev_stage = chip->stage;
+
+	if (stage_new != stage_old) {
+		if (chip->subtype == QPNP_TM_SUBTYPE_GEN1)
+			pr_crit("%s: PMIC Temp Alarm - stage=%u, threshold=%u, temperature=%d mC\n",
+				chip->tm_name, chip->stage, chip->thresh,
+				chip->temperature);
+		else
+			pr_crit("%s: PMIC Temp Alarm - stage=%u, state=%u, threshold=%u, temperature=%d mC\n",
+				chip->tm_name, stage_new, chip->stage,
+				chip->thresh, chip->temperature);
+
+		thermal_zone_device_update(chip->tz_dev);
+
+		/* Notify user space */
+		sysfs_notify(&chip->tz_dev->device.kobj, NULL, "type");
+	}
+
+bail:
+	return;
+}
+
+static irqreturn_t qpnp_tm_isr(int irq, void *data)
+{
+	struct qpnp_tm_chip *chip = data;
+
+	schedule_delayed_work(&chip->irq_work,
+			msecs_to_jiffies(STATUS_REGISTER_DELAY_MS) + 1);
+
+	return IRQ_HANDLED;
+}
+
+static int qpnp_tm_init_reg(struct qpnp_tm_chip *chip)
+{
+	int rc = 0;
+	u8 reg;
+
+	rc = qpnp_tm_read(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, &reg, 1);
+	if (rc < 0)
+		return rc;
+
+	if (chip->thresh < THRESH_MIN || chip->thresh > THRESH_MAX) {
+		/* Use hardware threshold value if configuration is invalid. */
+		chip->thresh = reg & SHUTDOWN_CTRL1_THRESHOLD_MASK;
+	}
+
+	if (chip->clock_rate < CLOCK_RATE_MIN
+	    || chip->clock_rate > CLOCK_RATE_MAX) {
+		/* Use hardware clock rate value if configuration is invalid. */
+		chip->clock_rate = (reg & SHUTDOWN_CTRL1_CLK_RATE_MASK)
+					>> SHUTDOWN_CTRL1_CLK_RATE_SHIFT;
+	}
+
+	/*
+	 * Set threshold and clock rate and also disable software override of
+	 * stage 2 and 3 shutdowns.
+	 */
+	reg = chip->thresh & SHUTDOWN_CTRL1_THRESHOLD_MASK;
+	reg |= (chip->clock_rate << SHUTDOWN_CTRL1_CLK_RATE_SHIFT)
+		& SHUTDOWN_CTRL1_CLK_RATE_MASK;
+	rc = qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, &reg, 1);
+	if (rc < 0)
+		return rc;
+
+	/* Enable the thermal alarm PMIC module in always-on mode. */
+	reg = ALARM_CTRL_FORCE_ENABLE;
+	rc = qpnp_tm_write(chip, QPNP_TM_REG_ALARM_CTRL, &reg, 1);
+
+	return rc;
+}
+
+static int qpnp_tm_probe(struct platform_device *pdev)
+{
+	struct device_node *node;
+	unsigned int base;
+	struct qpnp_tm_chip *chip;
+	struct thermal_zone_device_ops *tz_ops;
+	char *tm_name;
+	u32 default_temperature;
+	int rc = 0;
+	u8 raw_type[2], type, subtype;
+
+	if (!pdev || !(&pdev->dev) || !pdev->dev.of_node) {
+		dev_err(&pdev->dev, "%s: device tree node not found\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	node = pdev->dev.of_node;
+
+	chip = kzalloc(sizeof(struct qpnp_tm_chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!chip->regmap) {
+		dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+		return -EINVAL;
+	}
+
+	dev_set_drvdata(&pdev->dev, chip);
+
+	rc = of_property_read_u32(pdev->dev.of_node, "reg", &base);
+	if (rc < 0) {
+		dev_err(&pdev->dev,
+			"Couldn't find reg in node = %s rc = %d\n",
+			pdev->dev.of_node->full_name, rc);
+		goto free_chip;
+	}
+	chip->base_addr	= base;
+	chip->pdev	= pdev;
+
+	chip->irq = platform_get_irq(pdev, 0);
+	if (chip->irq < 0) {
+		rc = chip->irq;
+		dev_err(&pdev->dev, "%s: node is missing irq, rc=%d\n",
+			__func__, rc);
+		goto free_chip;
+	}
+
+	chip->tm_name = of_get_property(node, "label", NULL);
+	if (chip->tm_name == NULL) {
+		dev_err(&pdev->dev, "%s: node is missing label\n", __func__);
+		rc = -EINVAL;
+		goto free_chip;
+	}
+
+	tm_name = kstrdup(chip->tm_name, GFP_KERNEL);
+	if (tm_name == NULL) {
+		rc = -ENOMEM;
+		goto free_chip;
+	}
+	chip->tm_name = tm_name;
+
+	INIT_DELAYED_WORK(&chip->irq_work, qpnp_tm_work);
+
+	/* These bindings are optional, so it is okay if they are not found. */
+	chip->thresh = THRESH_MAX + 1;
+	rc = of_property_read_u32(node, "qcom,threshold-set", &chip->thresh);
+	if (!rc && (chip->thresh < THRESH_MIN || chip->thresh > THRESH_MAX))
+		dev_err(&pdev->dev,
+			"%s: invalid qcom,threshold-set=%u specified\n",
+			__func__, chip->thresh);
+
+	chip->clock_rate = CLOCK_RATE_MAX + 1;
+	rc = of_property_read_u32(node, "qcom,clock-rate", &chip->clock_rate);
+	if (!rc && (chip->clock_rate < CLOCK_RATE_MIN
+		    || chip->clock_rate > CLOCK_RATE_MAX))
+		dev_err(&pdev->dev,
+			"%s: invalid qcom,clock-rate=%u specified\n", __func__,
+			chip->clock_rate);
+
+	chip->adc_type = QPNP_TM_ADC_NONE;
+	rc = of_property_read_u32(node, "qcom,channel-num", &chip->adc_channel);
+	if (!rc) {
+		if (chip->adc_channel < 0 || chip->adc_channel >= ADC_MAX_NUM) {
+			dev_err(&pdev->dev,
+				"%s: invalid qcom,channel-num=%d specified\n",
+				__func__, chip->adc_channel);
+		} else {
+			chip->adc_type = QPNP_TM_ADC_QPNP_ADC;
+			chip->vadc_dev = qpnp_get_vadc(&pdev->dev,
+							"temp_alarm");
+			if (IS_ERR(chip->vadc_dev)) {
+				rc = PTR_ERR(chip->vadc_dev);
+				if (rc != -EPROBE_DEFER)
+					pr_err("vadc property missing\n");
+				goto err_cancel_work;
+			}
+		}
+	}
+
+	if (chip->adc_type == QPNP_TM_ADC_QPNP_ADC)
+		tz_ops = &qpnp_thermal_zone_ops_qpnp_adc;
+	else
+		tz_ops = &qpnp_thermal_zone_ops_no_adc;
+
+	chip->allow_software_override
+		= of_property_read_bool(node, "qcom,allow-override");
+
+	default_temperature = DEFAULT_NO_ADC_TEMP;
+	rc = of_property_read_u32(node, "qcom,default-temp",
+					&default_temperature);
+	chip->temperature = default_temperature;
+
+	rc = qpnp_tm_read(chip, QPNP_TM_REG_TYPE, raw_type, 2);
+	if (rc) {
+		dev_err(&pdev->dev,
+			"%s: could not read type register, rc=%d\n",
+			__func__, rc);
+		goto err_cancel_work;
+	}
+	type = raw_type[0];
+	subtype = raw_type[1];
+
+	if (type != QPNP_TM_TYPE || (subtype != QPNP_TM_SUBTYPE_GEN1
+				     && subtype != QPNP_TM_SUBTYPE_GEN2)) {
+		dev_err(&pdev->dev,
+			"%s: invalid type=%02X or subtype=%02X register value\n",
+			__func__, type, subtype);
+		rc = -ENODEV;
+		goto err_cancel_work;
+	}
+
+	chip->subtype = subtype;
+
+	rc = qpnp_tm_init_reg(chip);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: qpnp_tm_init_reg() failed, rc=%d\n",
+			__func__, rc);
+		goto err_cancel_work;
+	}
+
+	if (chip->adc_type == QPNP_TM_ADC_NONE) {
+		rc = qpnp_tm_init_temp_no_adc(chip);
+		if (rc) {
+			dev_err(&pdev->dev,
+				"%s: qpnp_tm_init_temp_no_adc() failed, rc=%d\n",
+				__func__, rc);
+			goto err_cancel_work;
+		}
+	}
+
+	/* Start in HW control; switch to SW control when user changes mode. */
+	chip->mode = THERMAL_DEVICE_DISABLED;
+	rc = qpnp_tm_shutdown_override(chip, SOFTWARE_OVERRIDE_DISABLED);
+	if (rc) {
+		dev_err(&pdev->dev,
+			"%s: qpnp_tm_shutdown_override() failed, rc=%d\n",
+			__func__, rc);
+		goto err_cancel_work;
+	}
+
+	chip->tz_dev = thermal_zone_device_register(tm_name, TRIP_NUM, 0, chip,
+			tz_ops, NULL, 0, 0);
+	if (chip->tz_dev == NULL) {
+		dev_err(&pdev->dev,
+			"%s: thermal_zone_device_register() failed.\n",
+			__func__);
+		rc = -ENODEV;
+		goto err_cancel_work;
+	}
+
+	rc = request_irq(chip->irq, qpnp_tm_isr, IRQF_TRIGGER_RISING, tm_name,
+			chip);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "%s: request_irq(%d) failed: %d\n",
+			__func__, chip->irq, rc);
+		goto err_free_tz;
+	}
+
+	return 0;
+
+err_free_tz:
+	thermal_zone_device_unregister(chip->tz_dev);
+err_cancel_work:
+	cancel_delayed_work_sync(&chip->irq_work);
+	kfree(chip->tm_name);
+free_chip:
+	dev_set_drvdata(&pdev->dev, NULL);
+	kfree(chip);
+	return rc;
+}
+
+static int qpnp_tm_remove(struct platform_device *pdev)
+{
+	struct qpnp_tm_chip *chip = dev_get_drvdata(&pdev->dev);
+
+	dev_set_drvdata(&pdev->dev, NULL);
+	thermal_zone_device_unregister(chip->tz_dev);
+	kfree(chip->tm_name);
+	qpnp_tm_shutdown_override(chip, SOFTWARE_OVERRIDE_DISABLED);
+	free_irq(chip->irq, chip);
+	cancel_delayed_work_sync(&chip->irq_work);
+	kfree(chip);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int qpnp_tm_suspend(struct device *dev)
+{
+	struct qpnp_tm_chip *chip = dev_get_drvdata(dev);
+
+	/* Clear override bits in suspend to allow hardware control */
+	qpnp_tm_shutdown_override(chip, SOFTWARE_OVERRIDE_DISABLED);
+
+	return 0;
+}
+
+static int qpnp_tm_resume(struct device *dev)
+{
+	struct qpnp_tm_chip *chip = dev_get_drvdata(dev);
+
+	/* Override hardware actions so software can control */
+	if (chip->mode == THERMAL_DEVICE_ENABLED)
+		qpnp_tm_shutdown_override(chip, SOFTWARE_OVERRIDE_ENABLED);
+
+	return 0;
+}
+
+static const struct dev_pm_ops qpnp_tm_pm_ops = {
+	.suspend = qpnp_tm_suspend,
+	.resume = qpnp_tm_resume,
+};
+
+#define QPNP_TM_PM_OPS	(&qpnp_tm_pm_ops)
+#else
+#define QPNP_TM_PM_OPS	NULL
+#endif
+
+static const struct of_device_id qpnp_tm_match_table[] = {
+	{ .compatible = QPNP_TM_DRIVER_NAME, },
+	{}
+};
+
+static const struct platform_device_id qpnp_tm_id[] = {
+	{ QPNP_TM_DRIVER_NAME, 0 },
+	{}
+};
+
+static struct platform_driver qpnp_tm_driver = {
+	.driver = {
+		.name		= QPNP_TM_DRIVER_NAME,
+		.of_match_table	= qpnp_tm_match_table,
+		.owner		= THIS_MODULE,
+		.pm		= QPNP_TM_PM_OPS,
+	},
+	.probe	  = qpnp_tm_probe,
+	.remove	  = qpnp_tm_remove,
+	.id_table = qpnp_tm_id,
+};
+
+int __init qpnp_tm_init(void)
+{
+	return platform_driver_register(&qpnp_tm_driver);
+}
+
+static void __exit qpnp_tm_exit(void)
+{
+	platform_driver_unregister(&qpnp_tm_driver);
+}
+
+module_init(qpnp_tm_init);
+module_exit(qpnp_tm_exit);
+
+MODULE_DESCRIPTION("QPNP PMIC Temperature Alarm driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/tty/serial/msm_serial_hs.c	2019-10-29 09:26:24.941215880 +0100
@@ -0,0 +1,3894 @@
+/* drivers/serial/msm_serial_hs.c
+ *
+ * MSM 7k High speed uart driver
+ *
+ * Copyright (c) 2008 Google Inc.
+ * Copyright (c) 2007-2017, The Linux Foundation. All rights reserved.
+ * Modified: Nick Pelly <npelly@google.com>
+ *
+ * All source code in this file is licensed under the following license
+ * except where indicated.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * Has optional support for uart power management independent of linux
+ * suspend/resume:
+ *
+ * RX wakeup.
+ * UART wakeup can be triggered by RX activity (using a wakeup GPIO on the
+ * UART RX pin). This should only be used if there is not a wakeup
+ * GPIO on the UART CTS, and the first RX byte is known (for example, with the
+ * Bluetooth Texas Instruments HCILL protocol), since the first RX byte will
+ * always be lost. RTS will be asserted even while the UART is off in this mode
+ * of operation. See msm_serial_hs_platform_data.rx_wakeup_irq.
+ */
+
+#include <linux/module.h>
+
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/atomic.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/dma-mapping.h>
+#include <linux/tty_flip.h>
+#include <linux/wait.h>
+#include <linux/sysfs.h>
+#include <linux/stat.h>
+#include <linux/device.h>
+#include <linux/wakelock.h>
+#include <linux/debugfs.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/ipc_logging.h>
+#include <asm/irq.h>
+#include <linux/kthread.h>
+
+#include <linux/msm-sps.h>
+#include <linux/platform_data/msm_serial_hs.h>
+#include <linux/msm-bus.h>
+
+#include "msm_serial_hs_hwreg.h"
+#define UART_SPS_CONS_PERIPHERAL 0
+#define UART_SPS_PROD_PERIPHERAL 1
+
+#define IPC_MSM_HS_LOG_STATE_PAGES 2
+#define IPC_MSM_HS_LOG_USER_PAGES 2
+#define IPC_MSM_HS_LOG_DATA_PAGES 3
+#define UART_DMA_DESC_NR 8
+#define BUF_DUMP_SIZE 32
+
+/* If the debug_mask gets set to FATAL_LEV,
+ * a fatal error has happened and further IPC logging
+ * is disabled so that this problem can be detected
+ */
+enum {
+	FATAL_LEV = 0U,
+	ERR_LEV = 1U,
+	WARN_LEV = 2U,
+	INFO_LEV = 3U,
+	DBG_LEV = 4U,
+};
+
+#define MSM_HS_DBG(x...) do { \
+	if (msm_uport->ipc_debug_mask >= DBG_LEV) { \
+		if (msm_uport->ipc_msm_hs_log_ctxt) \
+			ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
+	} \
+} while (0)
+
+#define MSM_HS_INFO(x...) do { \
+	if (msm_uport->ipc_debug_mask >= INFO_LEV) {\
+		if (msm_uport->ipc_msm_hs_log_ctxt) \
+			ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
+	} \
+} while (0)
+
+/* warnings and errors show up on console always */
+#define MSM_HS_WARN(x...) do { \
+	pr_warn(x); \
+	if (msm_uport->ipc_msm_hs_log_ctxt && \
+			msm_uport->ipc_debug_mask >= WARN_LEV) \
+		ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
+} while (0)
+
+/* ERROR condition in the driver sets the hs_serial_debug_mask
+ * to ERR_FATAL level, so that this message can be seen
+ * in IPC logging. Further errors continue to log on the console
+ */
+#define MSM_HS_ERR(x...) do { \
+	pr_err(x); \
+	if (msm_uport->ipc_msm_hs_log_ctxt && \
+			msm_uport->ipc_debug_mask >= ERR_LEV) { \
+		ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
+		msm_uport->ipc_debug_mask = FATAL_LEV; \
+	} \
+} while (0)
+
+#define LOG_USR_MSG(ctx, x...) do { \
+	if (ctx) \
+		ipc_log_string(ctx, x); \
+} while (0)
+
+/*
+ * There are 3 different kind of UART Core available on MSM.
+ * High Speed UART (i.e. Legacy HSUART), GSBI based HSUART
+ * and BSLP based HSUART.
+ */
+enum uart_core_type {
+	LEGACY_HSUART,
+	GSBI_HSUART,
+	BLSP_HSUART,
+};
+
+enum flush_reason {
+	FLUSH_NONE,
+	FLUSH_DATA_READY,
+	FLUSH_DATA_INVALID,  /* values after this indicate invalid data */
+	FLUSH_IGNORE,
+	FLUSH_STOP,
+	FLUSH_SHUTDOWN,
+};
+
+/*
+ * SPS data structures to support HSUART with BAM
+ * @sps_pipe - This struct defines BAM pipe descriptor
+ * @sps_connect - This struct defines a connection's end point
+ * @sps_register - This struct defines a event registration parameters
+ */
+struct msm_hs_sps_ep_conn_data {
+	struct sps_pipe *pipe_handle;
+	struct sps_connect config;
+	struct sps_register_event event;
+};
+
+struct msm_hs_tx {
+	bool dma_in_flight;    /* tx dma in progress */
+	enum flush_reason flush;
+	wait_queue_head_t wait;
+	int tx_count;
+	dma_addr_t dma_base;
+	struct kthread_work kwork;
+	struct kthread_worker kworker;
+	struct task_struct *task;
+	struct msm_hs_sps_ep_conn_data cons;
+	struct timer_list tx_timeout_timer;
+	void *ipc_tx_ctxt;
+};
+
+struct msm_hs_rx {
+	enum flush_reason flush;
+	wait_queue_head_t wait;
+	dma_addr_t rbuffer;
+	unsigned char *buffer;
+	unsigned int buffer_pending;
+	struct delayed_work flip_insert_work;
+	struct kthread_work kwork;
+	struct kthread_worker kworker;
+	struct task_struct *task;
+	struct msm_hs_sps_ep_conn_data prod;
+	unsigned long queued_flag;
+	unsigned long pending_flag;
+	int rx_inx;
+	struct sps_iovec iovec[UART_DMA_DESC_NR]; /* track descriptors */
+	void *ipc_rx_ctxt;
+};
+enum buffer_states {
+	NONE_PENDING = 0x0,
+	FIFO_OVERRUN = 0x1,
+	PARITY_ERROR = 0x2,
+	CHARS_NORMAL = 0x4,
+};
+
+enum msm_hs_pm_state {
+	MSM_HS_PM_ACTIVE,
+	MSM_HS_PM_SUSPENDED,
+	MSM_HS_PM_SYS_SUSPENDED,
+};
+
+/* optional low power wakeup, typically on a GPIO RX irq */
+struct msm_hs_wakeup {
+	int irq;  /* < 0 indicates low power wakeup disabled */
+	unsigned char ignore;  /* bool */
+
+	/* bool: inject char into rx tty on wakeup */
+	bool inject_rx;
+	unsigned char rx_to_inject;
+	bool enabled;
+	bool freed;
+};
+
+struct msm_hs_port {
+	atomic_t startup_locked;
+	struct uart_port uport;
+	unsigned long imr_reg;  /* shadow value of UARTDM_IMR */
+	struct clk *clk;
+	struct clk *pclk;
+	struct msm_hs_tx tx;
+	struct msm_hs_rx rx;
+	atomic_t resource_count;
+	struct msm_hs_wakeup wakeup;
+
+	struct dentry *loopback_dir;
+	struct work_struct clock_off_w; /* work for actual clock off */
+	struct workqueue_struct *hsuart_wq; /* hsuart workqueue */
+	struct mutex mtx; /* resource access mutex */
+	enum uart_core_type uart_type;
+	unsigned long bam_handle;
+	resource_size_t bam_mem;
+	int bam_irq;
+	unsigned char __iomem *bam_base;
+	unsigned int bam_tx_ep_pipe_index;
+	unsigned int bam_rx_ep_pipe_index;
+	/* struct sps_event_notify is an argument passed when triggering a
+	 * callback event object registered for an SPS connection end point.
+	 */
+	struct sps_event_notify notify;
+	/* bus client handler */
+	u32 bus_perf_client;
+	/* BLSP UART required BUS Scaling data */
+	struct msm_bus_scale_pdata *bus_scale_table;
+	bool rx_bam_inprogress;
+	wait_queue_head_t bam_disconnect_wait;
+	bool use_pinctrl;
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *gpio_state_active;
+	struct pinctrl_state *gpio_state_suspend;
+	bool flow_control;
+	enum msm_hs_pm_state pm_state;
+	atomic_t client_count;
+	bool obs; /* out of band sleep flag */
+	atomic_t client_req_state;
+	int sys_suspend_noirq_cnt;
+	void *ipc_msm_hs_log_ctxt;
+	void *ipc_msm_hs_pwr_ctxt;
+	int ipc_debug_mask;
+};
+
+static struct of_device_id msm_hs_match_table[] = {
+	{ .compatible = "qcom,msm-hsuart-v14"},
+	{}
+};
+
+
+#define MSM_UARTDM_BURST_SIZE 16   /* DM burst size (in bytes) */
+#define UARTDM_TX_BUF_SIZE UART_XMIT_SIZE
+#define UARTDM_RX_BUF_SIZE 512
+#define RETRY_TIMEOUT 5
+#define UARTDM_NR 4
+#define BAM_PIPE_MIN 0
+#define BAM_PIPE_MAX 11
+#define BUS_SCALING 1
+#define BUS_RESET 0
+#define RX_FLUSH_COMPLETE_TIMEOUT 300 /* In jiffies */
+#define BLSP_UART_CLK_FMAX 63160000
+
+static struct dentry *debug_base;
+static struct platform_driver msm_serial_hs_platform_driver;
+static struct uart_driver msm_hs_driver;
+static struct uart_ops msm_hs_ops;
+static void msm_hs_start_rx_locked(struct uart_port *uport);
+static void msm_serial_hs_rx_work(struct kthread_work *work);
+static void flip_insert_work(struct work_struct *work);
+static void msm_hs_bus_voting(struct msm_hs_port *msm_uport, unsigned int vote);
+static struct msm_hs_port *msm_hs_get_hs_port(int port_index);
+static void msm_hs_queue_rx_desc(struct msm_hs_port *msm_uport);
+static int disconnect_rx_endpoint(struct msm_hs_port *msm_uport);
+static int msm_hs_pm_resume(struct device *dev);
+static void msm_hs_pm_suspend(struct device *dev);
+
+
+#define UARTDM_TO_MSM(uart_port) \
+	container_of((uart_port), struct msm_hs_port, uport)
+
+static int msm_hs_ioctl(struct uart_port *uport, unsigned int cmd,
+						unsigned long arg)
+{
+	int ret = 0, state = 1;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	if (!msm_uport)
+		return -ENODEV;
+
+	switch (cmd) {
+	case MSM_ENABLE_UART_CLOCK: {
+		ret = msm_hs_request_clock_on(&msm_uport->uport);
+		break;
+	}
+	case MSM_DISABLE_UART_CLOCK: {
+		ret = msm_hs_request_clock_off(&msm_uport->uport);
+		break;
+	}
+	case MSM_GET_UART_CLOCK_STATUS: {
+		/* Return value 0 - UART CLOCK is OFF
+		 * Return value 1 - UART CLOCK is ON */
+
+		if (msm_uport->pm_state != MSM_HS_PM_ACTIVE)
+			state = 0;
+		ret = state;
+		MSM_HS_INFO("%s():GET UART CLOCK STATUS: cmd=%d state=%d\n",
+			__func__, cmd, state);
+		break;
+	}
+	default: {
+		MSM_HS_INFO("%s():Unknown cmd specified: cmd=%d\n", __func__,
+			   cmd);
+		ret = -ENOIOCTLCMD;
+		break;
+	}
+	}
+
+	return ret;
+}
+
+/*
+ * This function is called initially during probe and then
+ * through the runtime PM framework. The function directly calls
+ * resource APIs to enable them.
+ */
+
+static int msm_hs_clk_bus_vote(struct msm_hs_port *msm_uport)
+{
+	int rc = 0;
+
+	msm_hs_bus_voting(msm_uport, BUS_SCALING);
+	/* Turn on core clk and iface clk */
+	if (msm_uport->pclk) {
+		rc = clk_prepare_enable(msm_uport->pclk);
+		if (rc) {
+			dev_err(msm_uport->uport.dev,
+				"%s: Could not turn on pclk [%d]\n",
+				__func__, rc);
+			goto busreset;
+		}
+	}
+	rc = clk_prepare_enable(msm_uport->clk);
+	if (rc) {
+		dev_err(msm_uport->uport.dev,
+			"%s: Could not turn on core clk [%d]\n",
+			__func__, rc);
+		goto core_unprepare;
+	}
+	MSM_HS_DBG("%s: Clock ON successful\n", __func__);
+	return rc;
+core_unprepare:
+	clk_disable_unprepare(msm_uport->pclk);
+busreset:
+	msm_hs_bus_voting(msm_uport, BUS_RESET);
+	return rc;
+}
+
+/*
+ * This function is called initially during probe and then
+ * through the runtime PM framework. The function directly calls
+ * resource apis to disable them.
+ */
+static void msm_hs_clk_bus_unvote(struct msm_hs_port *msm_uport)
+{
+	clk_disable_unprepare(msm_uport->clk);
+	if (msm_uport->pclk)
+		clk_disable_unprepare(msm_uport->pclk);
+	msm_hs_bus_voting(msm_uport, BUS_RESET);
+	MSM_HS_DBG("%s: Clock OFF successful\n", __func__);
+}
+
+ /* Remove vote for resources when done */
+static void msm_hs_resource_unvote(struct msm_hs_port *msm_uport)
+{
+	struct uart_port *uport = &(msm_uport->uport);
+	int rc = atomic_read(&msm_uport->resource_count);
+
+	MSM_HS_DBG("%s(): power usage count %d", __func__, rc);
+	if (rc <= 0) {
+		MSM_HS_WARN("%s(): rc zero, bailing\n", __func__);
+		WARN_ON(1);
+		return;
+	}
+	atomic_dec(&msm_uport->resource_count);
+	pm_runtime_mark_last_busy(uport->dev);
+	pm_runtime_put_autosuspend(uport->dev);
+}
+
+ /* Vote for resources before accessing them */
+static void msm_hs_resource_vote(struct msm_hs_port *msm_uport)
+{
+	int ret;
+	struct uart_port *uport = &(msm_uport->uport);
+	ret = pm_runtime_get_sync(uport->dev);
+	if (ret < 0 || msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
+		MSM_HS_WARN("%s:%s runtime callback not invoked ret:%d st:%d",
+			__func__, dev_name(uport->dev), ret,
+					msm_uport->pm_state);
+		msm_hs_pm_resume(uport->dev);
+	}
+	atomic_inc(&msm_uport->resource_count);
+}
+
+/* Check if the uport line number matches with user id stored in pdata.
+ * User id information is stored during initialization. This function
+ * ensues that the same device is selected */
+
+static struct msm_hs_port *get_matching_hs_port(struct platform_device *pdev)
+{
+	struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
+	struct msm_hs_port *msm_uport = msm_hs_get_hs_port(pdev->id);
+
+	if ((!msm_uport) || (msm_uport->uport.line != pdev->id
+	   && msm_uport->uport.line != pdata->userid)) {
+		pr_err("uport line number mismatch!");
+		WARN_ON(1);
+		return NULL;
+	}
+
+	return msm_uport;
+}
+
+static ssize_t show_clock(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	int state = 1;
+	ssize_t ret = 0;
+	struct platform_device *pdev = container_of(dev, struct
+						    platform_device, dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+
+	/* This check should not fail */
+	if (msm_uport) {
+		if (msm_uport->pm_state != MSM_HS_PM_ACTIVE)
+			state = 0;
+		ret = snprintf(buf, PAGE_SIZE, "%d\n", state);
+	}
+	return ret;
+}
+
+static ssize_t set_clock(struct device *dev, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	int state;
+	ssize_t ret = 0;
+	struct platform_device *pdev = container_of(dev, struct
+						    platform_device, dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+
+	/* This check should not fail */
+	if (msm_uport) {
+		state = buf[0] - '0';
+		switch (state) {
+		case 0:
+			MSM_HS_DBG("%s: Request clock OFF\n", __func__);
+			msm_hs_request_clock_off(&msm_uport->uport);
+			ret = count;
+			break;
+		case 1:
+			MSM_HS_DBG("%s: Request clock ON\n", __func__);
+			msm_hs_request_clock_on(&msm_uport->uport);
+			ret = count;
+			break;
+		default:
+			ret = -EINVAL;
+		}
+	}
+	return ret;
+}
+
+static DEVICE_ATTR(clock, S_IWUSR | S_IRUGO, show_clock, set_clock);
+
+static ssize_t show_debug_mask(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	ssize_t ret = 0;
+	struct platform_device *pdev = container_of(dev, struct
+						    platform_device, dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+
+	/* This check should not fail */
+	if (msm_uport)
+		ret = snprintf(buf, sizeof(int), "%u\n",
+					msm_uport->ipc_debug_mask);
+	return ret;
+}
+
+static ssize_t set_debug_mask(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct platform_device *pdev = container_of(dev, struct
+						    platform_device, dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+
+	/* This check should not fail */
+	if (msm_uport) {
+		msm_uport->ipc_debug_mask = buf[0] - '0';
+		if (msm_uport->ipc_debug_mask < FATAL_LEV ||
+				msm_uport->ipc_debug_mask > DBG_LEV) {
+			/* set to default level */
+			msm_uport->ipc_debug_mask = INFO_LEV;
+			MSM_HS_ERR("Range is 0 to 4;Set to default level 3\n");
+			return -EINVAL;
+		}
+	}
+	return count;
+}
+
+static DEVICE_ATTR(debug_mask, S_IWUSR | S_IRUGO, show_debug_mask,
+							set_debug_mask);
+
+static inline bool is_use_low_power_wakeup(struct msm_hs_port *msm_uport)
+{
+	return msm_uport->wakeup.irq > 0;
+}
+
+static void msm_hs_bus_voting(struct msm_hs_port *msm_uport, unsigned int vote)
+{
+	int ret;
+
+	if (msm_uport->bus_perf_client) {
+		MSM_HS_DBG("Bus voting:%d\n", vote);
+		ret = msm_bus_scale_client_update_request(
+				msm_uport->bus_perf_client, vote);
+		if (ret)
+			MSM_HS_ERR("%s(): Failed for Bus voting: %d\n",
+							__func__, vote);
+	}
+}
+
+static inline unsigned int msm_hs_read(struct uart_port *uport,
+				       unsigned int index)
+{
+	return readl_relaxed(uport->membase + index);
+}
+
+static inline void msm_hs_write(struct uart_port *uport, unsigned int index,
+				 unsigned int value)
+{
+	writel_relaxed(value, uport->membase + index);
+}
+
+static int sps_rx_disconnect(struct sps_pipe *sps_pipe_handler)
+{
+	struct sps_connect config;
+	int ret;
+
+	ret = sps_get_config(sps_pipe_handler, &config);
+	if (ret) {
+		pr_err("%s: sps_get_config() failed ret %d\n", __func__, ret);
+		return ret;
+	}
+	config.options |= SPS_O_POLL;
+	ret = sps_set_config(sps_pipe_handler, &config);
+	if (ret) {
+		pr_err("%s: sps_set_config() failed ret %d\n", __func__, ret);
+		return ret;
+	}
+	return sps_disconnect(sps_pipe_handler);
+}
+
+static void hex_dump_ipc(struct msm_hs_port *msm_uport, void *ipc_ctx,
+			char *prefix, char *string, u64 addr, int size)
+
+{
+	char buf[(BUF_DUMP_SIZE * 3) + 2];
+	int len = 0;
+
+	if (msm_uport->ipc_debug_mask == FATAL_LEV)
+		return;
+	len = min(size, BUF_DUMP_SIZE);
+	/*
+	 * Print upto 32 data bytes, 32 bytes per line, 1 byte at a time and
+	 * don't include the ASCII text at the end of the buffer.
+	 */
+	hex_dump_to_buffer(string, len, 32, 1, buf, sizeof(buf), false);
+	ipc_log_string(ipc_ctx, "%s[0x%.10x:%d] : %s", prefix,
+					(unsigned int)addr, size, buf);
+}
+
+/*
+ * This API read and provides UART Core registers information.
+*/
+static void dump_uart_hs_registers(struct msm_hs_port *msm_uport)
+{
+	struct uart_port *uport = &(msm_uport->uport);
+
+	if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
+		MSM_HS_INFO("%s:Failed clocks are off, resource_count %d",
+			__func__, atomic_read(&msm_uport->resource_count));
+		return;
+	}
+
+	MSM_HS_DBG(
+	"MR1:%x MR2:%x TFWR:%x RFWR:%x DMEN:%x IMR:%x MISR:%x NCF_TX:%x\n",
+	msm_hs_read(uport, UART_DM_MR1),
+	msm_hs_read(uport, UART_DM_MR2),
+	msm_hs_read(uport, UART_DM_TFWR),
+	msm_hs_read(uport, UART_DM_RFWR),
+	msm_hs_read(uport, UART_DM_DMEN),
+	msm_hs_read(uport, UART_DM_IMR),
+	msm_hs_read(uport, UART_DM_MISR),
+	msm_hs_read(uport, UART_DM_NCF_TX));
+	MSM_HS_INFO("SR:%x ISR:%x DMRX:%x RX_SNAP:%x TXFS:%x RXFS:%x\n",
+	msm_hs_read(uport, UART_DM_SR),
+	msm_hs_read(uport, UART_DM_ISR),
+	msm_hs_read(uport, UART_DM_DMRX),
+	msm_hs_read(uport, UART_DM_RX_TOTAL_SNAP),
+	msm_hs_read(uport, UART_DM_TXFS),
+	msm_hs_read(uport, UART_DM_RXFS));
+	MSM_HS_DBG("rx.flush:%u\n", msm_uport->rx.flush);
+}
+
+static int msm_serial_loopback_enable_set(void *data, u64 val)
+{
+	struct msm_hs_port *msm_uport = data;
+	struct uart_port *uport = &(msm_uport->uport);
+	unsigned long flags;
+	int ret = 0;
+
+	msm_hs_resource_vote(msm_uport);
+
+	if (val) {
+		spin_lock_irqsave(&uport->lock, flags);
+		ret = msm_hs_read(uport, UART_DM_MR2);
+		ret |= (UARTDM_MR2_LOOP_MODE_BMSK |
+			UARTDM_MR2_RFR_CTS_LOOP_MODE_BMSK);
+		msm_hs_write(uport, UART_DM_MR2, ret);
+		spin_unlock_irqrestore(&uport->lock, flags);
+	} else {
+		spin_lock_irqsave(&uport->lock, flags);
+		ret = msm_hs_read(uport, UART_DM_MR2);
+		ret &= ~(UARTDM_MR2_LOOP_MODE_BMSK |
+			UARTDM_MR2_RFR_CTS_LOOP_MODE_BMSK);
+		msm_hs_write(uport, UART_DM_MR2, ret);
+		spin_unlock_irqrestore(&uport->lock, flags);
+	}
+	/* Calling CLOCK API. Hence mb() requires here. */
+	mb();
+	msm_hs_resource_unvote(msm_uport);
+	return 0;
+}
+
+static int msm_serial_loopback_enable_get(void *data, u64 *val)
+{
+	struct msm_hs_port *msm_uport = data;
+	struct uart_port *uport = &(msm_uport->uport);
+	unsigned long flags;
+	int ret = 0;
+
+	msm_hs_resource_vote(msm_uport);
+
+	spin_lock_irqsave(&uport->lock, flags);
+	ret = msm_hs_read(&msm_uport->uport, UART_DM_MR2);
+	spin_unlock_irqrestore(&uport->lock, flags);
+
+	msm_hs_resource_unvote(msm_uport);
+
+	*val = (ret & UARTDM_MR2_LOOP_MODE_BMSK) ? 1 : 0;
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(loopback_enable_fops, msm_serial_loopback_enable_get,
+			msm_serial_loopback_enable_set, "%llu\n");
+
+/*
+ * msm_serial_hs debugfs node: <debugfs_root>/msm_serial_hs/loopback.<id>
+ * writing 1 turns on internal loopback mode in HW. Useful for automation
+ * test scripts.
+ * writing 0 disables the internal loopback mode. Default is disabled.
+ */
+static void msm_serial_debugfs_init(struct msm_hs_port *msm_uport,
+					   int id)
+{
+	char node_name[15];
+	snprintf(node_name, sizeof(node_name), "loopback.%d", id);
+	msm_uport->loopback_dir = debugfs_create_file(node_name,
+						S_IRUGO | S_IWUSR,
+						debug_base,
+						msm_uport,
+						&loopback_enable_fops);
+
+	if (IS_ERR_OR_NULL(msm_uport->loopback_dir))
+		MSM_HS_ERR("%s(): Cannot create loopback.%d debug entry",
+							__func__, id);
+}
+
+static int msm_hs_remove(struct platform_device *pdev)
+{
+
+	struct msm_hs_port *msm_uport;
+	struct device *dev;
+
+	if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
+		pr_err("Invalid plaform device ID = %d\n", pdev->id);
+		return -EINVAL;
+	}
+
+	msm_uport = get_matching_hs_port(pdev);
+	if (!msm_uport)
+		return -EINVAL;
+
+	dev = msm_uport->uport.dev;
+	sysfs_remove_file(&pdev->dev.kobj, &dev_attr_clock.attr);
+	sysfs_remove_file(&pdev->dev.kobj, &dev_attr_debug_mask.attr);
+	debugfs_remove(msm_uport->loopback_dir);
+
+	dma_free_coherent(msm_uport->uport.dev,
+			UART_DMA_DESC_NR * UARTDM_RX_BUF_SIZE,
+			msm_uport->rx.buffer, msm_uport->rx.rbuffer);
+
+	msm_uport->rx.buffer = NULL;
+	msm_uport->rx.rbuffer = 0;
+
+	destroy_workqueue(msm_uport->hsuart_wq);
+	mutex_destroy(&msm_uport->mtx);
+
+	uart_remove_one_port(&msm_hs_driver, &msm_uport->uport);
+	clk_put(msm_uport->clk);
+	if (msm_uport->pclk)
+		clk_put(msm_uport->pclk);
+
+	iounmap(msm_uport->uport.membase);
+
+	return 0;
+}
+
+
+/* Connect a UART peripheral's SPS endpoint(consumer endpoint)
+ *
+ * Also registers a SPS callback function for the consumer
+ * process with the SPS driver
+ *
+ * @uport - Pointer to uart uport structure
+ *
+ * @return - 0 if successful else negative value.
+ *
+ */
+
+static int msm_hs_spsconnect_tx(struct msm_hs_port *msm_uport)
+{
+	int ret;
+	struct uart_port *uport = &msm_uport->uport;
+	struct msm_hs_tx *tx = &msm_uport->tx;
+	struct sps_pipe *sps_pipe_handle = tx->cons.pipe_handle;
+	struct sps_connect *sps_config = &tx->cons.config;
+	struct sps_register_event *sps_event = &tx->cons.event;
+	unsigned long flags;
+	unsigned int data;
+
+	if (tx->flush != FLUSH_SHUTDOWN) {
+		MSM_HS_ERR("%s:Invalid flush state:%d\n", __func__, tx->flush);
+		return 0;
+	}
+
+	/* Establish connection between peripheral and memory endpoint */
+	ret = sps_connect(sps_pipe_handle, sps_config);
+	if (ret) {
+		MSM_HS_ERR("msm_serial_hs: sps_connect() failed for tx!!\n"
+		"pipe_handle=0x%p ret=%d", sps_pipe_handle, ret);
+		return ret;
+	}
+	/* Register callback event for EOT (End of transfer) event. */
+	ret = sps_register_event(sps_pipe_handle, sps_event);
+	if (ret) {
+		MSM_HS_ERR("msm_serial_hs: sps_connect() failed for tx!!\n"
+		"pipe_handle=0x%p ret=%d", sps_pipe_handle, ret);
+		goto reg_event_err;
+	}
+
+	spin_lock_irqsave(&(msm_uport->uport.lock), flags);
+	msm_uport->tx.flush = FLUSH_STOP;
+	spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
+
+	data = msm_hs_read(uport, UART_DM_DMEN);
+	/* Enable UARTDM Tx BAM Interface */
+	data |= UARTDM_TX_BAM_ENABLE_BMSK;
+	msm_hs_write(uport, UART_DM_DMEN, data);
+
+	msm_hs_write(uport, UART_DM_CR, RESET_TX);
+	msm_hs_write(uport, UART_DM_CR, START_TX_BAM_IFC);
+	msm_hs_write(uport, UART_DM_CR, UARTDM_CR_TX_EN_BMSK);
+
+	MSM_HS_DBG("%s(): TX Connect", __func__);
+	return 0;
+
+reg_event_err:
+	sps_disconnect(sps_pipe_handle);
+	return ret;
+}
+
+/* Connect a UART peripheral's SPS endpoint(producer endpoint)
+ *
+ * Also registers a SPS callback function for the producer
+ * process with the SPS driver
+ *
+ * @uport - Pointer to uart uport structure
+ *
+ * @return - 0 if successful else negative value.
+ *
+ */
+
+static int msm_hs_spsconnect_rx(struct uart_port *uport)
+{
+	int ret;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
+	struct sps_connect *sps_config = &rx->prod.config;
+	struct sps_register_event *sps_event = &rx->prod.event;
+	unsigned long flags;
+
+	if (msm_uport->rx.pending_flag) {
+		MSM_HS_WARN("%s(): Buffers may be pending 0x%lx",
+			__func__, msm_uport->rx.pending_flag);
+	}
+
+	/* Establish connection between peripheral and memory endpoint */
+	ret = sps_connect(sps_pipe_handle, sps_config);
+	if (ret) {
+		MSM_HS_ERR("msm_serial_hs: sps_connect() failed for rx!!\n"
+		"pipe_handle=0x%p ret=%d", sps_pipe_handle, ret);
+		return ret;
+	}
+	/* Register callback event for DESC_DONE event. */
+	ret = sps_register_event(sps_pipe_handle, sps_event);
+	if (ret) {
+		MSM_HS_ERR("msm_serial_hs: sps_connect() failed for rx!!\n"
+		"pipe_handle=0x%p ret=%d", sps_pipe_handle, ret);
+		goto reg_event_err;
+	}
+	spin_lock_irqsave(&uport->lock, flags);
+	msm_uport->rx.queued_flag = 0;
+	msm_uport->rx.pending_flag = 0;
+	msm_uport->rx.rx_inx = 0;
+	msm_uport->rx.flush = FLUSH_STOP;
+	spin_unlock_irqrestore(&uport->lock, flags);
+	MSM_HS_DBG("%s(): RX Connect\n", __func__);
+	return 0;
+
+reg_event_err:
+	sps_disconnect(sps_pipe_handle);
+	return ret;
+}
+
+/*
+ * programs the UARTDM_CSR register with correct bit rates
+ *
+ * Interrupts should be disabled before we are called, as
+ * we modify Set Baud rate
+ * Set receive stale interrupt level, dependant on Bit Rate
+ * Goal is to have around 8 ms before indicate stale.
+ * roundup (((Bit Rate * .008) / 10) + 1
+ */
+static void msm_hs_set_bps_locked(struct uart_port *uport,
+			       unsigned int bps)
+{
+	unsigned long rxstale;
+	unsigned long data;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	switch (bps) {
+	case 300:
+		msm_hs_write(uport, UART_DM_CSR, 0x00);
+		rxstale = 1;
+		break;
+	case 600:
+		msm_hs_write(uport, UART_DM_CSR, 0x11);
+		rxstale = 1;
+		break;
+	case 1200:
+		msm_hs_write(uport, UART_DM_CSR, 0x22);
+		rxstale = 1;
+		break;
+	case 2400:
+		msm_hs_write(uport, UART_DM_CSR, 0x33);
+		rxstale = 1;
+		break;
+	case 4800:
+		msm_hs_write(uport, UART_DM_CSR, 0x44);
+		rxstale = 1;
+		break;
+	case 9600:
+		msm_hs_write(uport, UART_DM_CSR, 0x55);
+		rxstale = 2;
+		break;
+	case 14400:
+		msm_hs_write(uport, UART_DM_CSR, 0x66);
+		rxstale = 3;
+		break;
+	case 19200:
+		msm_hs_write(uport, UART_DM_CSR, 0x77);
+		rxstale = 4;
+		break;
+	case 28800:
+		msm_hs_write(uport, UART_DM_CSR, 0x88);
+		rxstale = 6;
+		break;
+	case 38400:
+		msm_hs_write(uport, UART_DM_CSR, 0x99);
+		rxstale = 8;
+		break;
+	case 57600:
+		msm_hs_write(uport, UART_DM_CSR, 0xaa);
+		rxstale = 16;
+		break;
+	case 76800:
+		msm_hs_write(uport, UART_DM_CSR, 0xbb);
+		rxstale = 16;
+		break;
+	case 115200:
+		msm_hs_write(uport, UART_DM_CSR, 0xcc);
+		rxstale = 31;
+		break;
+	case 230400:
+		msm_hs_write(uport, UART_DM_CSR, 0xee);
+		rxstale = 31;
+		break;
+	case 460800:
+		msm_hs_write(uport, UART_DM_CSR, 0xff);
+		rxstale = 31;
+		break;
+	case 4000000:
+	case 3686400:
+	case 3200000:
+	case 3500000:
+	case 3000000:
+	case 2500000:
+	case 2000000:
+	case 1500000:
+	case 1152000:
+	case 1000000:
+	case 921600:
+		msm_hs_write(uport, UART_DM_CSR, 0xff);
+		rxstale = 31;
+		break;
+	default:
+		msm_hs_write(uport, UART_DM_CSR, 0xff);
+		/* default to 9600 */
+		bps = 9600;
+		rxstale = 2;
+		break;
+	}
+	/*
+	 * uart baud rate depends on CSR and MND Values
+	 * we are updating CSR before and then calling
+	 * clk_set_rate which updates MND Values. Hence
+	 * dsb requires here.
+	 */
+	mb();
+	if (bps > 460800) {
+		uport->uartclk = bps * 16;
+		/* BLSP based UART supports maximum clock frequency
+		 * of 63.16 Mhz. With this (63.16 Mhz) clock frequency
+		 * UART can support baud rate of 3.94 Mbps which is
+		 * equivalent to 4 Mbps.
+		 * UART hardware is robust enough to handle this
+		 * deviation to achieve baud rate ~4 Mbps.
+		 */
+		if (bps == 4000000)
+			uport->uartclk = BLSP_UART_CLK_FMAX;
+	} else {
+		uport->uartclk = 7372800;
+	}
+
+	if (clk_set_rate(msm_uport->clk, uport->uartclk)) {
+		MSM_HS_WARN("Error setting clock rate on UART\n");
+		WARN_ON(1);
+	}
+
+	data = rxstale & UARTDM_IPR_STALE_LSB_BMSK;
+	data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
+
+	msm_hs_write(uport, UART_DM_IPR, data);
+	/*
+	 * It is suggested to do reset of transmitter and receiver after
+	 * changing any protocol configuration. Here Baud rate and stale
+	 * timeout are getting updated. Hence reset transmitter and receiver.
+	 */
+	msm_hs_write(uport, UART_DM_CR, RESET_TX);
+	msm_hs_write(uport, UART_DM_CR, RESET_RX);
+}
+
+
+static void msm_hs_set_std_bps_locked(struct uart_port *uport,
+			       unsigned int bps)
+{
+	unsigned long rxstale;
+	unsigned long data;
+
+	switch (bps) {
+	case 9600:
+		msm_hs_write(uport, UART_DM_CSR, 0x99);
+		rxstale = 2;
+		break;
+	case 14400:
+		msm_hs_write(uport, UART_DM_CSR, 0xaa);
+		rxstale = 3;
+		break;
+	case 19200:
+		msm_hs_write(uport, UART_DM_CSR, 0xbb);
+		rxstale = 4;
+		break;
+	case 28800:
+		msm_hs_write(uport, UART_DM_CSR, 0xcc);
+		rxstale = 6;
+		break;
+	case 38400:
+		msm_hs_write(uport, UART_DM_CSR, 0xdd);
+		rxstale = 8;
+		break;
+	case 57600:
+		msm_hs_write(uport, UART_DM_CSR, 0xee);
+		rxstale = 16;
+		break;
+	case 115200:
+		msm_hs_write(uport, UART_DM_CSR, 0xff);
+		rxstale = 31;
+		break;
+	default:
+		msm_hs_write(uport, UART_DM_CSR, 0x99);
+		/* default to 9600 */
+		bps = 9600;
+		rxstale = 2;
+		break;
+	}
+
+	data = rxstale & UARTDM_IPR_STALE_LSB_BMSK;
+	data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
+
+	msm_hs_write(uport, UART_DM_IPR, data);
+}
+
+static void msm_hs_enable_flow_control(struct uart_port *uport, bool override)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	unsigned int data;
+
+	if (msm_uport->flow_control || override) {
+		/* Enable RFR line */
+		msm_hs_write(uport, UART_DM_CR, RFR_LOW);
+		/* Enable auto RFR */
+		data = msm_hs_read(uport, UART_DM_MR1);
+		data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
+		msm_hs_write(uport, UART_DM_MR1, data);
+		/* Ensure register IO completion */
+		mb();
+	}
+}
+
+static void msm_hs_disable_flow_control(struct uart_port *uport, bool override)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	unsigned int data;
+
+	/*
+	 * Clear the Rx Ready Ctl bit - This ensures that
+	 * flow control lines stop the other side from sending
+	 * data while we change the parameters
+	 */
+
+	if (msm_uport->flow_control || override) {
+		data = msm_hs_read(uport, UART_DM_MR1);
+		/* disable auto ready-for-receiving */
+		data &= ~UARTDM_MR1_RX_RDY_CTL_BMSK;
+		msm_hs_write(uport, UART_DM_MR1, data);
+		/* Disable RFR line */
+		msm_hs_write(uport, UART_DM_CR, RFR_HIGH);
+		/* Ensure register IO completion */
+		mb();
+	}
+}
+
+/*
+ * termios :  new ktermios
+ * oldtermios:  old ktermios previous setting
+ *
+ * Configure the serial port
+ */
+static void msm_hs_set_termios(struct uart_port *uport,
+				   struct ktermios *termios,
+				   struct ktermios *oldtermios)
+{
+	unsigned int bps;
+	unsigned long data;
+	unsigned int c_cflag = termios->c_cflag;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	/**
+	 * set_termios can be invoked from the framework when
+	 * the clocks are off and the client has not had a chance
+	 * to turn them on. Make sure that they are on
+	 */
+	msm_hs_resource_vote(msm_uport);
+	mutex_lock(&msm_uport->mtx);
+	msm_hs_write(uport, UART_DM_IMR, 0);
+
+	msm_hs_disable_flow_control(uport, true);
+
+	/*
+	 * Disable Rx channel of UARTDM
+	 * DMA Rx Stall happens if enqueue and flush of Rx command happens
+	 * concurrently. Hence before changing the baud rate/protocol
+	 * configuration and sending flush command to ADM, disable the Rx
+	 * channel of UARTDM.
+	 * Note: should not reset the receiver here immediately as it is not
+	 * suggested to do disable/reset or reset/disable at the same time.
+	 */
+	data = msm_hs_read(uport, UART_DM_DMEN);
+	/* Disable UARTDM RX BAM Interface */
+	data &= ~UARTDM_RX_BAM_ENABLE_BMSK;
+	msm_hs_write(uport, UART_DM_DMEN, data);
+
+	/*
+	 * Reset RX and TX.
+	 * Resetting the RX enables it, therefore we must reset and disable.
+	 */
+	msm_hs_write(uport, UART_DM_CR, RESET_RX);
+	msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_DISABLE_BMSK);
+	msm_hs_write(uport, UART_DM_CR, RESET_TX);
+
+	/* 300 is the minimum baud support by the driver  */
+	bps = uart_get_baud_rate(uport, termios, oldtermios, 200, 4000000);
+
+	/* Temporary remapping  200 BAUD to 3.2 mbps */
+	if (bps == 200)
+		bps = 3200000;
+
+	uport->uartclk = clk_get_rate(msm_uport->clk);
+	if (!uport->uartclk)
+		msm_hs_set_std_bps_locked(uport, bps);
+	else
+		msm_hs_set_bps_locked(uport, bps);
+
+	data = msm_hs_read(uport, UART_DM_MR2);
+	data &= ~UARTDM_MR2_PARITY_MODE_BMSK;
+	/* set parity */
+	if (c_cflag & PARENB) {
+		if (c_cflag & PARODD)
+			data |= ODD_PARITY;
+		else if (c_cflag & CMSPAR)
+			data |= SPACE_PARITY;
+		else
+			data |= EVEN_PARITY;
+	}
+
+	/* Set bits per char */
+	data &= ~UARTDM_MR2_BITS_PER_CHAR_BMSK;
+
+	switch (c_cflag & CSIZE) {
+	case CS5:
+		data |= FIVE_BPC;
+		break;
+	case CS6:
+		data |= SIX_BPC;
+		break;
+	case CS7:
+		data |= SEVEN_BPC;
+		break;
+	default:
+		data |= EIGHT_BPC;
+		break;
+	}
+	/* stop bits */
+	if (c_cflag & CSTOPB) {
+		data |= STOP_BIT_TWO;
+	} else {
+		/* otherwise 1 stop bit */
+		data |= STOP_BIT_ONE;
+	}
+	data |= UARTDM_MR2_ERROR_MODE_BMSK;
+	/* write parity/bits per char/stop bit configuration */
+	msm_hs_write(uport, UART_DM_MR2, data);
+
+	uport->ignore_status_mask = termios->c_iflag & INPCK;
+	uport->ignore_status_mask |= termios->c_iflag & IGNPAR;
+	uport->ignore_status_mask |= termios->c_iflag & IGNBRK;
+
+	uport->read_status_mask = (termios->c_cflag & CREAD);
+
+	/* Set Transmit software time out */
+	uart_update_timeout(uport, c_cflag, bps);
+
+	/* Enable UARTDM Rx BAM Interface */
+	data = msm_hs_read(uport, UART_DM_DMEN);
+	data |= UARTDM_RX_BAM_ENABLE_BMSK;
+	msm_hs_write(uport, UART_DM_DMEN, data);
+	msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_EN_BMSK);
+	/* Issue TX,RX BAM Start IFC command */
+	msm_hs_write(uport, UART_DM_CR, START_TX_BAM_IFC);
+	msm_hs_write(uport, UART_DM_CR, START_RX_BAM_IFC);
+	/* Ensure Register Writes Complete */
+	mb();
+
+	/* Configure HW flow control
+	 * UART Core would see status of CTS line when it is sending data
+	 * to remote uart to confirm that it can receive or not.
+	 * UART Core would trigger RFR if it is not having any space with
+	 * RX FIFO.
+	 */
+	/* Pulling RFR line high */
+	msm_hs_write(uport, UART_DM_CR, RFR_LOW);
+	data = msm_hs_read(uport, UART_DM_MR1);
+	data &= ~(UARTDM_MR1_CTS_CTL_BMSK | UARTDM_MR1_RX_RDY_CTL_BMSK);
+	if (c_cflag & CRTSCTS) {
+		data |= UARTDM_MR1_CTS_CTL_BMSK;
+		data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
+		msm_uport->flow_control = true;
+	}
+	msm_hs_write(uport, UART_DM_MR1, data);
+	MSM_HS_INFO("%s: Cflags 0x%x Baud %u\n", __func__, c_cflag, bps);
+
+	mutex_unlock(&msm_uport->mtx);
+
+	msm_hs_resource_unvote(msm_uport);
+}
+
+/*
+ *  Standard API, Transmitter
+ *  Any character in the transmit shift register is sent
+ */
+unsigned int msm_hs_tx_empty(struct uart_port *uport)
+{
+	unsigned int data;
+	unsigned int isr;
+	unsigned int ret = 0;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	msm_hs_resource_vote(msm_uport);
+	data = msm_hs_read(uport, UART_DM_SR);
+	isr = msm_hs_read(uport, UART_DM_ISR);
+	msm_hs_resource_unvote(msm_uport);
+	MSM_HS_INFO("%s(): SR:0x%x ISR:0x%x ", __func__, data, isr);
+
+	if (data & UARTDM_SR_TXEMT_BMSK) {
+		ret = TIOCSER_TEMT;
+	} else
+		/*
+		 * Add an extra sleep here because sometimes the framework's
+		 * delay (based on baud rate) isn't good enough.
+		 * Note that this won't happen during every port close, only
+		 * on select occassions when the userspace does back to back
+		 * write() and close().
+		 */
+		usleep_range(5000, 7000);
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_hs_tx_empty);
+
+/*
+ *  Standard API, Stop transmitter.
+ *  Any character in the transmit shift register is sent as
+ *  well as the current data mover transfer .
+ */
+static void msm_hs_stop_tx_locked(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct msm_hs_tx *tx = &msm_uport->tx;
+
+	tx->flush = FLUSH_STOP;
+}
+
+static int disconnect_rx_endpoint(struct msm_hs_port *msm_uport)
+{
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
+	int ret = 0;
+
+	ret = sps_rx_disconnect(sps_pipe_handle);
+	if (ret)
+		MSM_HS_ERR("%s(): sps_disconnect failed\n", __func__);
+
+	if (msm_uport->rx.pending_flag)
+		MSM_HS_WARN("%s(): Buffers may be pending 0x%lx",
+		__func__, msm_uport->rx.pending_flag);
+	MSM_HS_DBG("%s(): clearing desc usage flag", __func__);
+	msm_uport->rx.queued_flag = 0;
+	msm_uport->rx.pending_flag = 0;
+	msm_uport->rx.rx_inx = 0;
+
+	msm_uport->rx.flush = FLUSH_SHUTDOWN;
+	MSM_HS_DBG("%s: Calling Completion\n", __func__);
+	wake_up(&msm_uport->bam_disconnect_wait);
+	MSM_HS_DBG("%s: Done Completion\n", __func__);
+	wake_up(&msm_uport->rx.wait);
+	return ret;
+}
+
+static int sps_tx_disconnect(struct msm_hs_port *msm_uport)
+{
+	struct uart_port *uport = &msm_uport->uport;
+	struct msm_hs_tx *tx = &msm_uport->tx;
+	struct sps_pipe *tx_pipe = tx->cons.pipe_handle;
+	unsigned long flags;
+	int ret = 0;
+
+	if (msm_uport->tx.flush == FLUSH_SHUTDOWN) {
+		MSM_HS_DBG("%s(): pipe already disonnected", __func__);
+		return ret;
+	}
+
+	ret = sps_disconnect(tx_pipe);
+
+	if (ret) {
+		MSM_HS_ERR("%s(): sps_disconnect failed %d", __func__, ret);
+		return ret;
+	}
+
+	spin_lock_irqsave(&uport->lock, flags);
+	msm_uport->tx.flush = FLUSH_SHUTDOWN;
+	spin_unlock_irqrestore(&uport->lock, flags);
+
+	MSM_HS_DBG("%s(): TX Disconnect", __func__);
+	return ret;
+}
+
+static void msm_hs_disable_rx(struct uart_port *uport)
+{
+	unsigned int data;
+
+	data = msm_hs_read(uport, UART_DM_DMEN);
+	data &= ~UARTDM_RX_BAM_ENABLE_BMSK;
+	msm_hs_write(uport, UART_DM_DMEN, data);
+}
+
+/*
+ *  Standard API, Stop receiver as soon as possible.
+ *
+ *  Function immediately terminates the operation of the
+ *  channel receiver and any incoming characters are lost. None
+ *  of the receiver status bits are affected by this command and
+ *  characters that are already in the receive FIFO there.
+ */
+static void msm_hs_stop_rx_locked(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
+		MSM_HS_WARN("%s(): Clocks are off, Rx still active\n",
+				__func__);
+		return;
+	} else
+		msm_hs_disable_rx(uport);
+
+	if (msm_uport->rx.flush == FLUSH_NONE)
+		msm_uport->rx.flush = FLUSH_STOP;
+}
+
+static void msm_hs_disconnect_rx(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
+	u32 prod_empty = 0;
+
+	msm_hs_disable_rx(uport);
+	/* Disconnect the BAM RX pipe */
+	if (msm_uport->rx.flush == FLUSH_NONE)
+		msm_uport->rx.flush = FLUSH_STOP;
+
+	if (!sps_is_pipe_empty(sps_pipe_handle, &prod_empty)) {
+		if (prod_empty == false)
+		MSM_HS_WARN("%s():Pipe Not Empty, prod=%d, flush=%d\n",
+			__func__, prod_empty, msm_uport->rx.flush);
+	}
+	disconnect_rx_endpoint(msm_uport);
+	MSM_HS_DBG("%s(): rx->flush %d", __func__, msm_uport->rx.flush);
+}
+
+/* Tx timeout callback function */
+void tx_timeout_handler(unsigned long arg)
+{
+	struct msm_hs_port *msm_uport = (struct msm_hs_port *) arg;
+	struct uart_port *uport = &msm_uport->uport;
+	int isr;
+
+	if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
+		MSM_HS_WARN("%s(): clocks are off", __func__);
+		return;
+	}
+
+	isr = msm_hs_read(uport, UART_DM_ISR);
+	if (UARTDM_ISR_CURRENT_CTS_BMSK & isr)
+		MSM_HS_WARN("%s(): CTS Disabled, ISR 0x%x", __func__, isr);
+	dump_uart_hs_registers(msm_uport);
+	/* Stop further logging */
+	MSM_HS_ERR("%s(): Stop IPC logging\n", __func__);
+}
+
+/*  Transmit the next chunk of data */
+static void msm_hs_submit_tx_locked(struct uart_port *uport)
+{
+	int left;
+	int tx_count;
+	int aligned_tx_count;
+	dma_addr_t src_addr;
+	dma_addr_t aligned_src_addr;
+	u32 flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_INT;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct msm_hs_tx *tx = &msm_uport->tx;
+	struct circ_buf *tx_buf = &msm_uport->uport.state->xmit;
+	struct sps_pipe *sps_pipe_handle;
+	int ret;
+
+	if (uart_circ_empty(tx_buf) || uport->state->port.tty->stopped) {
+		tx->dma_in_flight = false;
+		msm_hs_stop_tx_locked(uport);
+		return;
+	}
+
+	tx_count = uart_circ_chars_pending(tx_buf);
+
+	if (UARTDM_TX_BUF_SIZE < tx_count)
+		tx_count = UARTDM_TX_BUF_SIZE;
+
+	left = UART_XMIT_SIZE - tx_buf->tail;
+
+	if (tx_count > left)
+		tx_count = left;
+
+	src_addr = tx->dma_base + tx_buf->tail;
+	/* Mask the src_addr to align on a cache
+	 * and add those bytes to tx_count */
+	aligned_src_addr = src_addr & ~(dma_get_cache_alignment() - 1);
+	aligned_tx_count = tx_count + src_addr - aligned_src_addr;
+
+	dma_sync_single_for_device(uport->dev, aligned_src_addr,
+			aligned_tx_count, DMA_TO_DEVICE);
+
+	tx->tx_count = tx_count;
+
+	hex_dump_ipc(msm_uport, tx->ipc_tx_ctxt, "Tx",
+			&tx_buf->buf[tx_buf->tail], (u64)src_addr, tx_count);
+	sps_pipe_handle = tx->cons.pipe_handle;
+
+	/* Set 1 second timeout */
+	mod_timer(&tx->tx_timeout_timer,
+		jiffies + msecs_to_jiffies(MSEC_PER_SEC));
+	/* Queue transfer request to SPS */
+	ret = sps_transfer_one(sps_pipe_handle, src_addr, tx_count,
+				msm_uport, flags);
+
+	MSM_HS_DBG("%s:Enqueue Tx Cmd, ret %d\n", __func__, ret);
+}
+
+/* This function queues the rx descriptor for BAM transfer */
+static void msm_hs_post_rx_desc(struct msm_hs_port *msm_uport, int inx)
+{
+	u32 flags = SPS_IOVEC_FLAG_INT;
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	int ret;
+
+	phys_addr_t rbuff_addr = rx->rbuffer + (UARTDM_RX_BUF_SIZE * inx);
+	u8 *virt_addr = rx->buffer + (UARTDM_RX_BUF_SIZE * inx);
+
+	MSM_HS_DBG("%s: %d:Queue desc %d, 0x%llx, base 0x%llx virtaddr %p",
+		__func__, msm_uport->uport.line, inx,
+		(u64)rbuff_addr, (u64)rx->rbuffer, virt_addr);
+
+	rx->iovec[inx].size = 0;
+	ret = sps_transfer_one(rx->prod.pipe_handle, rbuff_addr,
+		UARTDM_RX_BUF_SIZE, msm_uport, flags);
+
+	if (ret)
+		MSM_HS_ERR("Error processing descriptor %d", ret);
+	return;
+}
+
+/* Update the rx descriptor index to specify the next one to be processed */
+static void msm_hs_mark_next(struct msm_hs_port *msm_uport, int inx)
+{
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	int prev;
+
+	inx %= UART_DMA_DESC_NR;
+	MSM_HS_DBG("%s(): inx %d, pending 0x%lx", __func__, inx,
+		rx->pending_flag);
+
+	if (!inx)
+		prev = UART_DMA_DESC_NR - 1;
+	else
+		prev = inx - 1;
+
+	if (!test_bit(prev, &rx->pending_flag))
+		msm_uport->rx.rx_inx = inx;
+	MSM_HS_DBG("%s(): prev %d pending flag 0x%lx, next %d", __func__,
+		prev, rx->pending_flag, msm_uport->rx.rx_inx);
+	return;
+}
+
+/*
+ *	Queue the rx descriptor that has just been processed or
+ *	all of them if queueing for the first time
+ */
+static void msm_hs_queue_rx_desc(struct msm_hs_port *msm_uport)
+{
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	int i, flag = 0;
+
+	/* At first, queue all, if not, queue only one */
+	if (rx->queued_flag || rx->pending_flag) {
+		if (!test_bit(rx->rx_inx, &rx->queued_flag) &&
+		    !test_bit(rx->rx_inx, &rx->pending_flag)) {
+			msm_hs_post_rx_desc(msm_uport, rx->rx_inx);
+			set_bit(rx->rx_inx, &rx->queued_flag);
+			MSM_HS_DBG("%s(): Set Queued Bit %d",
+				__func__, rx->rx_inx);
+		} else
+			MSM_HS_ERR("%s(): rx_inx pending or queued", __func__);
+		return;
+	}
+
+	for (i = 0; i < UART_DMA_DESC_NR; i++) {
+		if (!test_bit(i, &rx->queued_flag) &&
+		!test_bit(i, &rx->pending_flag)) {
+			MSM_HS_DBG("%s(): Calling post rx %d", __func__, i);
+			msm_hs_post_rx_desc(msm_uport, i);
+			set_bit(i, &rx->queued_flag);
+			flag = 1;
+		}
+	}
+
+	if (!flag)
+		MSM_HS_ERR("%s(): error queueing descriptor", __func__);
+}
+
+/* Start to receive the next chunk of data */
+static void msm_hs_start_rx_locked(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	unsigned int buffer_pending = msm_uport->rx.buffer_pending;
+	unsigned int data;
+
+	if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
+		MSM_HS_WARN("%s(): Clocks are off\n", __func__);
+		return;
+	}
+	if (rx->pending_flag) {
+		MSM_HS_INFO("%s: Rx Cmd got executed, wait for rx_tlet\n",
+								 __func__);
+		rx->flush = FLUSH_IGNORE;
+		return;
+	}
+	if (buffer_pending)
+		MSM_HS_ERR("Error: rx started in buffer state =%x",
+			buffer_pending);
+
+	msm_hs_write(uport, UART_DM_CR, RESET_STALE_INT);
+	msm_hs_write(uport, UART_DM_DMRX, UARTDM_RX_BUF_SIZE);
+	msm_hs_write(uport, UART_DM_CR, STALE_EVENT_ENABLE);
+	/*
+	 * Enable UARTDM Rx Interface as previously it has been
+	 * disable in set_termios before configuring baud rate.
+	 */
+	data = msm_hs_read(uport, UART_DM_DMEN);
+	/* Enable UARTDM Rx BAM Interface */
+	data |= UARTDM_RX_BAM_ENABLE_BMSK;
+
+	msm_hs_write(uport, UART_DM_DMEN, data);
+	msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
+	/* Calling next DMOV API. Hence mb() here. */
+	mb();
+
+	/*
+	 * RX-transfer will be automatically re-activated
+	 * after last data of previous transfer was read.
+	 */
+	data = (RX_STALE_AUTO_RE_EN | RX_TRANS_AUTO_RE_ACTIVATE |
+				RX_DMRX_CYCLIC_EN);
+	msm_hs_write(uport, UART_DM_RX_TRANS_CTRL, data);
+	/* Issue RX BAM Start IFC command */
+	msm_hs_write(uport, UART_DM_CR, START_RX_BAM_IFC);
+	/* Ensure register IO completion */
+	mb();
+
+	msm_uport->rx.flush = FLUSH_NONE;
+	msm_uport->rx_bam_inprogress = true;
+	msm_hs_queue_rx_desc(msm_uport);
+	msm_uport->rx_bam_inprogress = false;
+	wake_up(&msm_uport->rx.wait);
+	MSM_HS_DBG("%s:Enqueue Rx Cmd\n", __func__);
+}
+
+static void flip_insert_work(struct work_struct *work)
+{
+	unsigned long flags;
+	int retval;
+	struct msm_hs_port *msm_uport =
+		container_of(work, struct msm_hs_port,
+			     rx.flip_insert_work.work);
+	struct tty_struct *tty = msm_uport->uport.state->port.tty;
+
+	spin_lock_irqsave(&msm_uport->uport.lock, flags);
+	if (!tty || msm_uport->rx.flush == FLUSH_SHUTDOWN) {
+		dev_err(msm_uport->uport.dev,
+			"%s:Invalid driver state flush %d\n",
+				__func__, msm_uport->rx.flush);
+		MSM_HS_ERR("%s:Invalid driver state flush %d\n",
+				__func__, msm_uport->rx.flush);
+		spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
+		return;
+	}
+
+	if (msm_uport->rx.buffer_pending == NONE_PENDING) {
+		MSM_HS_ERR("Error: No buffer pending in %s", __func__);
+		spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
+		return;
+	}
+	if (msm_uport->rx.buffer_pending & FIFO_OVERRUN) {
+		retval = tty_insert_flip_char(tty->port, 0, TTY_OVERRUN);
+		if (retval)
+			msm_uport->rx.buffer_pending &= ~FIFO_OVERRUN;
+	}
+	if (msm_uport->rx.buffer_pending & PARITY_ERROR) {
+		retval = tty_insert_flip_char(tty->port, 0, TTY_PARITY);
+		if (retval)
+			msm_uport->rx.buffer_pending &= ~PARITY_ERROR;
+	}
+	if (msm_uport->rx.buffer_pending & CHARS_NORMAL) {
+		int rx_count, rx_offset;
+		rx_count = (msm_uport->rx.buffer_pending & 0xFFFF0000) >> 16;
+		rx_offset = (msm_uport->rx.buffer_pending & 0xFFD0) >> 5;
+		retval = tty_insert_flip_string(tty->port,
+			msm_uport->rx.buffer +
+			(msm_uport->rx.rx_inx * UARTDM_RX_BUF_SIZE)
+			+ rx_offset, rx_count);
+		msm_uport->rx.buffer_pending &= (FIFO_OVERRUN |
+						 PARITY_ERROR);
+		if (retval != rx_count)
+			msm_uport->rx.buffer_pending |= CHARS_NORMAL |
+				retval << 8 | (rx_count - retval) << 16;
+	}
+	if (msm_uport->rx.buffer_pending) {
+		schedule_delayed_work(&msm_uport->rx.flip_insert_work,
+				      msecs_to_jiffies(RETRY_TIMEOUT));
+	} else if (msm_uport->rx.flush <= FLUSH_IGNORE) {
+			MSM_HS_WARN("Pending buffers cleared, restarting");
+			clear_bit(msm_uport->rx.rx_inx,
+				&msm_uport->rx.pending_flag);
+			msm_hs_start_rx_locked(&msm_uport->uport);
+			msm_hs_mark_next(msm_uport, msm_uport->rx.rx_inx+1);
+		}
+	spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
+	tty_flip_buffer_push(tty->port);
+}
+
+static void msm_serial_hs_rx_work(struct kthread_work *work)
+{
+	int retval;
+	int rx_count = 0;
+	unsigned long status;
+	unsigned long flags;
+	unsigned int error_f = 0;
+	struct uart_port *uport;
+	struct msm_hs_port *msm_uport;
+	unsigned int flush = FLUSH_DATA_INVALID;
+	struct tty_struct *tty;
+	struct sps_event_notify *notify;
+	struct msm_hs_rx *rx;
+	struct sps_pipe *sps_pipe_handle;
+	struct platform_device *pdev;
+	const struct msm_serial_hs_platform_data *pdata;
+
+	msm_uport = container_of((struct kthread_work *) work,
+				 struct msm_hs_port, rx.kwork);
+	msm_hs_resource_vote(msm_uport);
+	uport = &msm_uport->uport;
+	tty = uport->state->port.tty;
+	notify = &msm_uport->notify;
+	rx = &msm_uport->rx;
+	pdev = to_platform_device(uport->dev);
+	pdata = pdev->dev.platform_data;
+
+	spin_lock_irqsave(&uport->lock, flags);
+
+	if (!tty || rx->flush == FLUSH_SHUTDOWN) {
+		dev_err(uport->dev, "%s:Invalid driver state flush %d\n",
+				__func__, rx->flush);
+		MSM_HS_ERR("%s:Invalid driver state flush %d\n",
+				__func__, rx->flush);
+		spin_unlock_irqrestore(&uport->lock, flags);
+		msm_hs_resource_unvote(msm_uport);
+		return;
+	}
+
+	/*
+	 * Process all pending descs or if nothing is
+	 * queued - called from termios
+	 */
+	while (!rx->buffer_pending &&
+		(rx->pending_flag || !rx->queued_flag)) {
+		MSM_HS_DBG("%s(): Loop P 0x%lx Q 0x%lx", __func__,
+			rx->pending_flag, rx->queued_flag);
+
+		status = msm_hs_read(uport, UART_DM_SR);
+
+		MSM_HS_DBG("In %s\n", __func__);
+
+		/* overflow is not connect to data in a FIFO */
+		if (unlikely((status & UARTDM_SR_OVERRUN_BMSK) &&
+			     (uport->read_status_mask & CREAD))) {
+			retval = tty_insert_flip_char(tty->port,
+							0, TTY_OVERRUN);
+			MSM_HS_WARN("%s(): RX Buffer Overrun Detected\n",
+				__func__);
+			if (!retval)
+				msm_uport->rx.buffer_pending |= TTY_OVERRUN;
+			uport->icount.buf_overrun++;
+			error_f = 1;
+		}
+
+		if (!(uport->ignore_status_mask & INPCK))
+			status = status & ~(UARTDM_SR_PAR_FRAME_BMSK);
+
+		if (unlikely(status & UARTDM_SR_PAR_FRAME_BMSK)) {
+			/* Can not tell diff between parity & frame error */
+			MSM_HS_WARN("msm_serial_hs: parity error\n");
+			uport->icount.parity++;
+			error_f = 1;
+			if (!(uport->ignore_status_mask & IGNPAR)) {
+				retval = tty_insert_flip_char(tty->port,
+							0, TTY_PARITY);
+				if (!retval)
+					msm_uport->rx.buffer_pending
+								|= TTY_PARITY;
+			}
+		}
+
+		if (unlikely(status & UARTDM_SR_RX_BREAK_BMSK)) {
+			MSM_HS_DBG("msm_serial_hs: Rx break\n");
+			uport->icount.brk++;
+			error_f = 1;
+			if (!(uport->ignore_status_mask & IGNBRK)) {
+				retval = tty_insert_flip_char(tty->port,
+								0, TTY_BREAK);
+				if (!retval)
+					msm_uport->rx.buffer_pending
+								|= TTY_BREAK;
+			}
+		}
+
+		if (error_f)
+			msm_hs_write(uport, UART_DM_CR,	RESET_ERROR_STATUS);
+		flush = msm_uport->rx.flush;
+		if (flush == FLUSH_IGNORE)
+			if (!msm_uport->rx.buffer_pending) {
+				MSM_HS_DBG("%s: calling start_rx_locked\n",
+					__func__);
+				msm_hs_start_rx_locked(uport);
+			}
+		if (flush >= FLUSH_DATA_INVALID)
+			goto out;
+
+		rx_count = msm_uport->rx.iovec[msm_uport->rx.rx_inx].size;
+		hex_dump_ipc(msm_uport, rx->ipc_rx_ctxt, "Rx",
+			(msm_uport->rx.buffer +
+			(msm_uport->rx.rx_inx * UARTDM_RX_BUF_SIZE)),
+			msm_uport->rx.iovec[msm_uport->rx.rx_inx].addr,
+			rx_count);
+
+		 /*
+		  * We are in a spin locked context, spin lock taken at
+		  * other places where these flags are updated
+		  */
+		if (0 != (uport->read_status_mask & CREAD)) {
+			if (!test_bit(msm_uport->rx.rx_inx,
+				&msm_uport->rx.pending_flag) &&
+			    !test_bit(msm_uport->rx.rx_inx,
+				&msm_uport->rx.queued_flag))
+				MSM_HS_ERR("%s: RX INX not set", __func__);
+			else if (test_bit(msm_uport->rx.rx_inx,
+					&msm_uport->rx.pending_flag) &&
+				!test_bit(msm_uport->rx.rx_inx,
+					&msm_uport->rx.queued_flag)) {
+				MSM_HS_DBG("%s(): Clear Pending Bit %d",
+					__func__, msm_uport->rx.rx_inx);
+
+				retval = tty_insert_flip_string(tty->port,
+					msm_uport->rx.buffer +
+					(msm_uport->rx.rx_inx *
+					UARTDM_RX_BUF_SIZE),
+					rx_count);
+
+				if (retval != rx_count) {
+					MSM_HS_INFO("%s(): ret %d rx_count %d",
+						__func__, retval, rx_count);
+					msm_uport->rx.buffer_pending |=
+					CHARS_NORMAL | retval << 5 |
+					(rx_count - retval) << 16;
+				}
+			} else
+				MSM_HS_ERR("%s: Error in inx %d", __func__,
+					msm_uport->rx.rx_inx);
+		}
+
+		if (!msm_uport->rx.buffer_pending) {
+			msm_uport->rx.flush = FLUSH_NONE;
+			msm_uport->rx_bam_inprogress = true;
+			sps_pipe_handle = rx->prod.pipe_handle;
+			MSM_HS_DBG("Queing bam descriptor\n");
+			/* Queue transfer request to SPS */
+			clear_bit(msm_uport->rx.rx_inx,
+				&msm_uport->rx.pending_flag);
+			msm_hs_queue_rx_desc(msm_uport);
+			msm_hs_mark_next(msm_uport, msm_uport->rx.rx_inx+1);
+			msm_hs_write(uport, UART_DM_CR, START_RX_BAM_IFC);
+			msm_uport->rx_bam_inprogress = false;
+			wake_up(&msm_uport->rx.wait);
+		} else
+			break;
+
+	}
+out:
+	if (msm_uport->rx.buffer_pending) {
+		MSM_HS_WARN("%s: tty buffer exhausted. Stalling\n", __func__);
+		schedule_delayed_work(&msm_uport->rx.flip_insert_work
+				      , msecs_to_jiffies(RETRY_TIMEOUT));
+	}
+	/* tty_flip_buffer_push() might call msm_hs_start(), so unlock */
+	spin_unlock_irqrestore(&uport->lock, flags);
+	if (flush < FLUSH_DATA_INVALID)
+		tty_flip_buffer_push(tty->port);
+	msm_hs_resource_unvote(msm_uport);
+}
+
+static void msm_hs_start_tx_locked(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct msm_hs_tx *tx = &msm_uport->tx;
+	unsigned int isr;
+
+	/* Bail if transfer in progress */
+	if (tx->flush < FLUSH_STOP || tx->dma_in_flight) {
+		MSM_HS_INFO("%s(): retry, flush %d, dma_in_flight %d\n",
+			__func__, tx->flush, tx->dma_in_flight);
+
+		if (msm_uport->pm_state == MSM_HS_PM_ACTIVE) {
+			isr = msm_hs_read(uport, UART_DM_ISR);
+			if (UARTDM_ISR_CURRENT_CTS_BMSK & isr) {
+				MSM_HS_DBG("%s():CTS 1: Peer is Busy\n",
+					__func__);
+				MSM_HS_DBG("%s():ISR 0x%x\n",
+					__func__, isr);
+			}
+		} else
+			MSM_HS_WARN("%s(): Clocks are off\n", __func__);
+
+		return;
+	}
+
+	if (!tx->dma_in_flight) {
+		tx->dma_in_flight = true;
+		queue_kthread_work(&msm_uport->tx.kworker,
+			&msm_uport->tx.kwork);
+	}
+}
+
+/**
+ * Callback notification from SPS driver
+ *
+ * This callback function gets triggered called from
+ * SPS driver when requested SPS data transfer is
+ * completed.
+ *
+ */
+
+static void msm_hs_sps_tx_callback(struct sps_event_notify *notify)
+{
+	struct msm_hs_port *msm_uport =
+		(struct msm_hs_port *)
+		((struct sps_event_notify *)notify)->user;
+	phys_addr_t addr = DESC_FULL_ADDR(notify->data.transfer.iovec.flags,
+		notify->data.transfer.iovec.addr);
+
+	msm_uport->notify = *notify;
+	MSM_HS_INFO("tx_cb: addr=0x%pa, size=0x%x, flags=0x%x\n",
+		&addr, notify->data.transfer.iovec.size,
+		notify->data.transfer.iovec.flags);
+
+	del_timer(&msm_uport->tx.tx_timeout_timer);
+	MSM_HS_DBG("%s(): Queue kthread work", __func__);
+	queue_kthread_work(&msm_uport->tx.kworker, &msm_uport->tx.kwork);
+}
+
+static void msm_serial_hs_tx_work(struct kthread_work *work)
+{
+	unsigned long flags;
+	struct msm_hs_port *msm_uport =
+			container_of((struct kthread_work *)work,
+			struct msm_hs_port, tx.kwork);
+	struct uart_port *uport = &msm_uport->uport;
+	struct circ_buf *tx_buf = &uport->state->xmit;
+	struct msm_hs_tx *tx = &msm_uport->tx;
+
+	/*
+	 * Do the work buffer related work in BAM
+	 * mode that is equivalent to legacy mode
+	 */
+	msm_hs_resource_vote(msm_uport);
+	if (tx->flush >= FLUSH_STOP) {
+		spin_lock_irqsave(&(msm_uport->uport.lock), flags);
+		tx->flush = FLUSH_NONE;
+		MSM_HS_DBG("%s(): calling submit_tx", __func__);
+		msm_hs_submit_tx_locked(uport);
+		spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
+		msm_hs_resource_unvote(msm_uport);
+		return;
+	}
+
+	spin_lock_irqsave(&(msm_uport->uport.lock), flags);
+	if (!uart_circ_empty(tx_buf))
+		tx_buf->tail = (tx_buf->tail +
+		tx->tx_count) & ~UART_XMIT_SIZE;
+	else
+		MSM_HS_DBG("%s:circ buffer is empty\n", __func__);
+
+	wake_up(&msm_uport->tx.wait);
+
+	uport->icount.tx += tx->tx_count;
+
+	/*
+	 * Calling to send next chunk of data
+	 * If the circ buffer is empty, we stop
+	 * If the clock off was requested, the clock
+	 * off sequence is kicked off
+	 */
+	 MSM_HS_DBG("%s(): calling submit_tx", __func__);
+	 msm_hs_submit_tx_locked(uport);
+
+	if (uart_circ_chars_pending(tx_buf) < WAKEUP_CHARS)
+		uart_write_wakeup(uport);
+
+	spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
+	msm_hs_resource_unvote(msm_uport);
+}
+
+static void
+msm_hs_mark_proc_rx_desc(struct msm_hs_port *msm_uport,
+			struct sps_event_notify *notify)
+{
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	phys_addr_t addr = DESC_FULL_ADDR(notify->data.transfer.iovec.flags,
+		notify->data.transfer.iovec.addr);
+	/* divide by UARTDM_RX_BUF_SIZE */
+	int inx = (addr - rx->rbuffer) >> 9;
+
+	set_bit(inx, &rx->pending_flag);
+	clear_bit(inx, &rx->queued_flag);
+	rx->iovec[inx] = notify->data.transfer.iovec;
+	MSM_HS_DBG("Clear Q, Set P Bit %d, Q 0x%lx P 0x%lx",
+		inx, rx->queued_flag, rx->pending_flag);
+}
+
+/**
+ * Callback notification from SPS driver
+ *
+ * This callback function gets triggered called from
+ * SPS driver when requested SPS data transfer is
+ * completed.
+ *
+ */
+
+static void msm_hs_sps_rx_callback(struct sps_event_notify *notify)
+{
+
+	struct msm_hs_port *msm_uport =
+		(struct msm_hs_port *)
+		((struct sps_event_notify *)notify)->user;
+	struct uart_port *uport;
+	unsigned long flags;
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	phys_addr_t addr = DESC_FULL_ADDR(notify->data.transfer.iovec.flags,
+		notify->data.transfer.iovec.addr);
+	/* divide by UARTDM_RX_BUF_SIZE */
+	int inx = (addr - rx->rbuffer) >> 9;
+
+	uport = &(msm_uport->uport);
+	msm_uport->notify = *notify;
+	MSM_HS_INFO("rx_cb: addr=0x%pa, size=0x%x, flags=0x%x\n",
+		&addr, notify->data.transfer.iovec.size,
+		notify->data.transfer.iovec.flags);
+
+	spin_lock_irqsave(&uport->lock, flags);
+	msm_hs_mark_proc_rx_desc(msm_uport, notify);
+	spin_unlock_irqrestore(&uport->lock, flags);
+
+	if (msm_uport->rx.flush == FLUSH_NONE) {
+		/* Test if others are queued */
+		if (msm_uport->rx.pending_flag & ~(1 << inx)) {
+			MSM_HS_DBG("%s(): inx 0x%x, 0x%lx not processed",
+			__func__, inx,
+			msm_uport->rx.pending_flag & ~(1<<inx));
+		}
+		queue_kthread_work(&msm_uport->rx.kworker,
+				&msm_uport->rx.kwork);
+		MSM_HS_DBG("%s(): Scheduled rx_tlet", __func__);
+	}
+}
+
+/*
+ *  Standard API, Current states of modem control inputs
+ *
+ * Since CTS can be handled entirely by HARDWARE we always
+ * indicate clear to send and count on the TX FIFO to block when
+ * it fills up.
+ *
+ * - TIOCM_DCD
+ * - TIOCM_CTS
+ * - TIOCM_DSR
+ * - TIOCM_RI
+ *  (Unsupported) DCD and DSR will return them high. RI will return low.
+ */
+static unsigned int msm_hs_get_mctrl_locked(struct uart_port *uport)
+{
+	return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS;
+}
+
+/*
+ *  Standard API, Set or clear RFR_signal
+ *
+ * Set RFR high, (Indicate we are not ready for data), we disable auto
+ * ready for receiving and then set RFR_N high. To set RFR to low we just turn
+ * back auto ready for receiving and it should lower RFR signal
+ * when hardware is ready
+ */
+void msm_hs_set_mctrl_locked(struct uart_port *uport,
+				    unsigned int mctrl)
+{
+	unsigned int set_rts;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
+		MSM_HS_WARN("%s(): Clocks are off\n", __func__);
+		return;
+	}
+	/* RTS is active low */
+	set_rts = TIOCM_RTS & mctrl ? 0 : 1;
+	MSM_HS_INFO("%s: set_rts %d\n", __func__, set_rts);
+
+	if (set_rts)
+		msm_hs_disable_flow_control(uport, false);
+	else
+		msm_hs_enable_flow_control(uport, false);
+}
+
+void msm_hs_set_mctrl(struct uart_port *uport,
+				    unsigned int mctrl)
+{
+	unsigned long flags;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	msm_hs_resource_vote(msm_uport);
+	spin_lock_irqsave(&uport->lock, flags);
+	msm_hs_set_mctrl_locked(uport, mctrl);
+	spin_unlock_irqrestore(&uport->lock, flags);
+	msm_hs_resource_unvote(msm_uport);
+}
+EXPORT_SYMBOL(msm_hs_set_mctrl);
+
+/* Standard API, Enable modem status (CTS) interrupt  */
+static void msm_hs_enable_ms_locked(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
+		MSM_HS_WARN("%s(): Clocks are off\n", __func__);
+		return;
+	}
+
+	/* Enable DELTA_CTS Interrupt */
+	msm_uport->imr_reg |= UARTDM_ISR_DELTA_CTS_BMSK;
+	msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
+	/* Ensure register IO completion */
+	mb();
+
+}
+
+/*
+ *  Standard API, Break Signal
+ *
+ * Control the transmission of a break signal. ctl eq 0 => break
+ * signal terminate ctl ne 0 => start break signal
+ */
+static void msm_hs_break_ctl(struct uart_port *uport, int ctl)
+{
+	unsigned long flags;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	msm_hs_resource_vote(msm_uport);
+	spin_lock_irqsave(&uport->lock, flags);
+	msm_hs_write(uport, UART_DM_CR, ctl ? START_BREAK : STOP_BREAK);
+	/* Ensure register IO completion */
+	mb();
+	spin_unlock_irqrestore(&uport->lock, flags);
+	msm_hs_resource_unvote(msm_uport);
+}
+
+static void msm_hs_config_port(struct uart_port *uport, int cfg_flags)
+{
+	if (cfg_flags & UART_CONFIG_TYPE)
+		uport->type = PORT_MSM;
+
+}
+
+/*  Handle CTS changes (Called from interrupt handler) */
+static void msm_hs_handle_delta_cts_locked(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	msm_hs_resource_vote(msm_uport);
+	/* clear interrupt */
+	msm_hs_write(uport, UART_DM_CR, RESET_CTS);
+	/* Calling CLOCK API. Hence mb() requires here. */
+	mb();
+	uport->icount.cts++;
+
+	/* clear the IOCTL TIOCMIWAIT if called */
+	wake_up_interruptible(&uport->state->port.delta_msr_wait);
+	msm_hs_resource_unvote(msm_uport);
+}
+
+static irqreturn_t msm_hs_isr(int irq, void *dev)
+{
+	unsigned long flags;
+	unsigned int isr_status;
+	struct msm_hs_port *msm_uport = (struct msm_hs_port *)dev;
+	struct uart_port *uport = &msm_uport->uport;
+	struct circ_buf *tx_buf = &uport->state->xmit;
+	struct msm_hs_tx *tx = &msm_uport->tx;
+
+	spin_lock_irqsave(&uport->lock, flags);
+
+	isr_status = msm_hs_read(uport, UART_DM_MISR);
+	MSM_HS_INFO("%s: DM_ISR: 0x%x\n", __func__, isr_status);
+	dump_uart_hs_registers(msm_uport);
+
+	/* Uart RX starting */
+	if (isr_status & UARTDM_ISR_RXLEV_BMSK) {
+		MSM_HS_DBG("%s:UARTDM_ISR_RXLEV_BMSK\n", __func__);
+		msm_uport->imr_reg &= ~UARTDM_ISR_RXLEV_BMSK;
+		msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
+		/* Complete device write for IMR. Hence mb() requires. */
+		mb();
+	}
+	/* Stale rx interrupt */
+	if (isr_status & UARTDM_ISR_RXSTALE_BMSK) {
+		msm_hs_write(uport, UART_DM_CR, STALE_EVENT_DISABLE);
+		msm_hs_write(uport, UART_DM_CR, RESET_STALE_INT);
+		/*
+		 * Complete device write before calling DMOV API. Hence
+		 * mb() requires here.
+		 */
+		mb();
+		MSM_HS_DBG("%s:Stal Interrupt\n", __func__);
+	}
+	/* tx ready interrupt */
+	if (isr_status & UARTDM_ISR_TX_READY_BMSK) {
+		MSM_HS_DBG("%s: ISR_TX_READY Interrupt\n", __func__);
+		/* Clear  TX Ready */
+		msm_hs_write(uport, UART_DM_CR, CLEAR_TX_READY);
+
+		/*
+		 * Complete both writes before starting new TX.
+		 * Hence mb() requires here.
+		 */
+		mb();
+		/* Complete DMA TX transactions and submit new transactions */
+
+		/* Do not update tx_buf.tail if uart_flush_buffer already
+		 * called in serial core
+		 */
+		if (!uart_circ_empty(tx_buf))
+			tx_buf->tail = (tx_buf->tail +
+					tx->tx_count) & ~UART_XMIT_SIZE;
+
+		tx->dma_in_flight = false;
+
+		uport->icount.tx += tx->tx_count;
+
+		if (uart_circ_chars_pending(tx_buf) < WAKEUP_CHARS)
+			uart_write_wakeup(uport);
+	}
+	if (isr_status & UARTDM_ISR_TXLEV_BMSK) {
+		/* TX FIFO is empty */
+		msm_uport->imr_reg &= ~UARTDM_ISR_TXLEV_BMSK;
+		msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
+		MSM_HS_DBG("%s: TXLEV Interrupt\n", __func__);
+		/*
+		 * Complete device write before starting clock_off request.
+		 * Hence mb() requires here.
+		 */
+		mb();
+		queue_work(msm_uport->hsuart_wq, &msm_uport->clock_off_w);
+	}
+
+	/* Change in CTS interrupt */
+	if (isr_status & UARTDM_ISR_DELTA_CTS_BMSK)
+		msm_hs_handle_delta_cts_locked(uport);
+
+	spin_unlock_irqrestore(&uport->lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+/* The following two functions provide interfaces to get the underlying
+ * port structure (struct uart_port or struct msm_hs_port) given
+ * the port index. msm_hs_get_uart port is called by clients.
+ * The function msm_hs_get_hs_port is for internal use
+ */
+
+struct uart_port *msm_hs_get_uart_port(int port_index)
+{
+	struct uart_state *state = msm_hs_driver.state + port_index;
+
+	/* The uart_driver structure stores the states in an array.
+	 * Thus the corresponding offset from the drv->state returns
+	 * the state for the uart_port that is requested
+	 */
+	if (port_index == state->uart_port->line)
+		return state->uart_port;
+
+	return NULL;
+}
+EXPORT_SYMBOL(msm_hs_get_uart_port);
+
+static struct msm_hs_port *msm_hs_get_hs_port(int port_index)
+{
+	struct uart_port *uport = msm_hs_get_uart_port(port_index);
+	if (uport)
+		return UARTDM_TO_MSM(uport);
+	return NULL;
+}
+
+void enable_wakeup_interrupt(struct msm_hs_port *msm_uport)
+{
+	unsigned long flags;
+	struct uart_port *uport = &(msm_uport->uport);
+
+	if (!is_use_low_power_wakeup(msm_uport))
+		return;
+	if (msm_uport->wakeup.freed)
+		return;
+
+	if (!(msm_uport->wakeup.enabled)) {
+		spin_lock_irqsave(&uport->lock, flags);
+		msm_uport->wakeup.ignore = 1;
+		msm_uport->wakeup.enabled = true;
+		spin_unlock_irqrestore(&uport->lock, flags);
+		disable_irq(uport->irq);
+		enable_irq(msm_uport->wakeup.irq);
+	} else {
+		MSM_HS_WARN("%s:Wake up IRQ already enabled", __func__);
+	}
+}
+
+void disable_wakeup_interrupt(struct msm_hs_port *msm_uport)
+{
+	unsigned long flags;
+	struct uart_port *uport = &(msm_uport->uport);
+
+	if (!is_use_low_power_wakeup(msm_uport))
+		return;
+	if (msm_uport->wakeup.freed)
+		return;
+
+	if (msm_uport->wakeup.enabled) {
+		disable_irq(msm_uport->wakeup.irq);
+		enable_irq(uport->irq);
+		spin_lock_irqsave(&uport->lock, flags);
+		msm_uport->wakeup.enabled = false;
+		spin_unlock_irqrestore(&uport->lock, flags);
+	} else {
+		MSM_HS_WARN("%s:Wake up IRQ already disabled", __func__);
+	}
+}
+
+void msm_hs_resource_off(struct msm_hs_port *msm_uport)
+{
+	struct uart_port *uport = &(msm_uport->uport);
+	unsigned int data;
+	int ret = 0;
+
+	MSM_HS_DBG("%s(): begin", __func__);
+	msm_hs_disable_flow_control(uport, false);
+	if (msm_uport->rx.flush == FLUSH_NONE)
+		msm_hs_disconnect_rx(uport);
+	else if (msm_uport->rx.flush != FLUSH_SHUTDOWN) {
+		MSM_HS_WARN("%s():Rx Flush=%d Not Expected\n",
+			__func__, msm_uport->rx.flush);
+		/* disable and disconnect rx */
+		ret = wait_event_timeout(msm_uport->rx.wait,
+			!msm_uport->rx.pending_flag, 500);
+		if (!ret)
+			MSM_HS_WARN("%s(): rx disconnect not complete",
+				__func__);
+		msm_hs_disconnect_rx(uport);
+	} else
+		MSM_HS_DBG("%s():Rx Flush=%d In Proper State\n",
+			__func__, msm_uport->rx.flush);
+
+	/* disable dlink */
+	if (msm_uport->tx.flush == FLUSH_NONE) {
+		ret = wait_event_timeout(msm_uport->tx.wait,
+			msm_uport->tx.flush == FLUSH_STOP, 500);
+		if (!ret)
+			MSM_HS_WARN("%s(): tx disconnect not complete",
+					__func__);
+	}
+
+	if (msm_uport->tx.flush != FLUSH_SHUTDOWN) {
+		data = msm_hs_read(uport, UART_DM_DMEN);
+		data &= ~UARTDM_TX_BAM_ENABLE_BMSK;
+		msm_hs_write(uport, UART_DM_DMEN, data);
+		sps_tx_disconnect(msm_uport);
+	}
+	if (!atomic_read(&msm_uport->client_req_state))
+		msm_hs_enable_flow_control(uport, false);
+}
+
+void msm_hs_resource_on(struct msm_hs_port *msm_uport)
+{
+	struct uart_port *uport = &(msm_uport->uport);
+	unsigned int data;
+	unsigned long flags;
+
+	if (atomic_read(&msm_uport->startup_locked)) {
+		MSM_HS_DBG("%s(): Port open in progress\n", __func__);
+		return;
+	}
+	msm_hs_disable_flow_control(uport, false);
+
+	if (msm_uport->rx.flush == FLUSH_SHUTDOWN ||
+	msm_uport->rx.flush == FLUSH_STOP) {
+		msm_hs_write(uport, UART_DM_CR, RESET_RX);
+		data = msm_hs_read(uport, UART_DM_DMEN);
+		data |= UARTDM_RX_BAM_ENABLE_BMSK;
+		msm_hs_write(uport, UART_DM_DMEN, data);
+	} else
+		MSM_HS_DBG("%s():rx.flush=%d, Rx is not enabled\n",
+			__func__, msm_uport->rx.flush);
+
+	if (msm_uport->rx.flush == FLUSH_SHUTDOWN) {
+		msm_hs_spsconnect_rx(uport);
+		spin_lock_irqsave(&uport->lock, flags);
+		msm_hs_start_rx_locked(uport);
+		spin_unlock_irqrestore(&uport->lock, flags);
+	}
+	msm_hs_spsconnect_tx(msm_uport);
+
+	msm_hs_enable_flow_control(uport, false);
+}
+
+/* Request to turn off uart clock once pending TX is flushed */
+int msm_hs_request_clock_off(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	int ret = 0;
+	int client_count = 0;
+
+	mutex_lock(&msm_uport->mtx);
+	/*
+	 * If we're in the middle of a system suspend, don't process these
+	 * userspace/kernel API commands.
+	 */
+	if (msm_uport->pm_state == MSM_HS_PM_SYS_SUSPENDED) {
+		MSM_HS_WARN("%s:Can't process clk request during suspend",
+			__func__);
+		ret = -EIO;
+	}
+	mutex_unlock(&msm_uport->mtx);
+	if (ret)
+		goto exit_request_clock_off;
+
+	if (atomic_read(&msm_uport->client_count) <= 0) {
+		MSM_HS_WARN("%s(): ioctl count -ve, client check voting",
+			__func__);
+		ret = -EPERM;
+		goto exit_request_clock_off;
+	}
+	/* Set the flag to disable flow control and wakeup irq */
+	if (msm_uport->obs)
+		atomic_set(&msm_uport->client_req_state, 1);
+	msm_hs_resource_unvote(msm_uport);
+	atomic_dec(&msm_uport->client_count);
+	client_count = atomic_read(&msm_uport->client_count);
+	LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+			"%s: Client_Count %d\n", __func__,
+			client_count);
+exit_request_clock_off:
+	return ret;
+}
+EXPORT_SYMBOL(msm_hs_request_clock_off);
+
+int msm_hs_request_clock_on(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	int client_count;
+	int ret = 0;
+
+	mutex_lock(&msm_uport->mtx);
+	/*
+	 * If we're in the middle of a system suspend, don't process these
+	 * userspace/kernel API commands.
+	 */
+	if (msm_uport->pm_state == MSM_HS_PM_SYS_SUSPENDED) {
+		MSM_HS_WARN("%s:Can't process clk request during suspend",
+			__func__);
+		ret = -EIO;
+	}
+	mutex_unlock(&msm_uport->mtx);
+	if (ret)
+		goto exit_request_clock_on;
+
+	msm_hs_resource_vote(UARTDM_TO_MSM(uport));
+	atomic_inc(&msm_uport->client_count);
+	client_count = atomic_read(&msm_uport->client_count);
+	LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+			"%s: Client_Count %d\n", __func__,
+			client_count);
+
+	/* Clear the flag */
+	if (msm_uport->obs)
+		atomic_set(&msm_uport->client_req_state, 0);
+exit_request_clock_on:
+	return ret;
+}
+EXPORT_SYMBOL(msm_hs_request_clock_on);
+
+static irqreturn_t msm_hs_wakeup_isr(int irq, void *dev)
+{
+	unsigned int wakeup = 0;
+	unsigned long flags;
+	struct msm_hs_port *msm_uport = (struct msm_hs_port *)dev;
+	struct uart_port *uport = &msm_uport->uport;
+	struct tty_struct *tty = NULL;
+
+	spin_lock_irqsave(&uport->lock, flags);
+
+	if (msm_uport->wakeup.ignore)
+		msm_uport->wakeup.ignore = 0;
+	else
+		wakeup = 1;
+
+	if (wakeup) {
+		/*
+		 * Port was clocked off during rx, wake up and
+		 * optionally inject char into tty rx
+		 */
+		if (msm_uport->wakeup.inject_rx) {
+			tty = uport->state->port.tty;
+			tty_insert_flip_char(tty->port,
+					     msm_uport->wakeup.rx_to_inject,
+					     TTY_NORMAL);
+			hex_dump_ipc(msm_uport, msm_uport->rx.ipc_rx_ctxt,
+				"Rx Inject",
+				&msm_uport->wakeup.rx_to_inject, 0, 1);
+			MSM_HS_INFO("Wakeup ISR.Ignore%d\n",
+						msm_uport->wakeup.ignore);
+		}
+	}
+
+	spin_unlock_irqrestore(&uport->lock, flags);
+
+	if (wakeup && msm_uport->wakeup.inject_rx)
+		tty_flip_buffer_push(tty->port);
+	return IRQ_HANDLED;
+}
+
+static const char *msm_hs_type(struct uart_port *port)
+{
+	return "MSM HS UART";
+}
+
+/**
+ * msm_hs_unconfig_uart_gpios: Unconfigures UART GPIOs
+ * @uport: uart port
+ */
+static void msm_hs_unconfig_uart_gpios(struct uart_port *uport)
+{
+	struct platform_device *pdev = to_platform_device(uport->dev);
+	const struct msm_serial_hs_platform_data *pdata =
+					pdev->dev.platform_data;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	int ret;
+
+	if (msm_uport->use_pinctrl) {
+		ret = pinctrl_select_state(msm_uport->pinctrl,
+				msm_uport->gpio_state_suspend);
+		if (ret)
+			MSM_HS_ERR("%s():Failed to pinctrl set_state",
+				__func__);
+	} else if (pdata) {
+		if (gpio_is_valid(pdata->uart_tx_gpio))
+			gpio_free(pdata->uart_tx_gpio);
+		if (gpio_is_valid(pdata->uart_rx_gpio))
+			gpio_free(pdata->uart_rx_gpio);
+		if (gpio_is_valid(pdata->uart_cts_gpio))
+			gpio_free(pdata->uart_cts_gpio);
+		if (gpio_is_valid(pdata->uart_rfr_gpio))
+			gpio_free(pdata->uart_rfr_gpio);
+	} else
+		MSM_HS_ERR("Error:Pdata is NULL.\n");
+}
+
+/**
+ * msm_hs_config_uart_gpios - Configures UART GPIOs
+ * @uport: uart port
+ */
+static int msm_hs_config_uart_gpios(struct uart_port *uport)
+{
+	struct platform_device *pdev = to_platform_device(uport->dev);
+	const struct msm_serial_hs_platform_data *pdata =
+					pdev->dev.platform_data;
+	int ret = 0;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	if (!IS_ERR_OR_NULL(msm_uport->pinctrl)) {
+		MSM_HS_DBG("%s(): Using Pinctrl", __func__);
+		msm_uport->use_pinctrl = true;
+		ret = pinctrl_select_state(msm_uport->pinctrl,
+				msm_uport->gpio_state_active);
+		if (ret)
+			MSM_HS_ERR("%s(): Failed to pinctrl set_state",
+				__func__);
+		return ret;
+	} else if (pdata) {
+		/* Fall back to using gpio lib */
+		if (gpio_is_valid(pdata->uart_tx_gpio)) {
+			ret = gpio_request(pdata->uart_tx_gpio,
+							"UART_TX_GPIO");
+			if (unlikely(ret)) {
+				MSM_HS_ERR("gpio request failed for:%d\n",
+					pdata->uart_tx_gpio);
+				goto exit_uart_config;
+			}
+		}
+
+		if (gpio_is_valid(pdata->uart_rx_gpio)) {
+			ret = gpio_request(pdata->uart_rx_gpio,
+							"UART_RX_GPIO");
+			if (unlikely(ret)) {
+				MSM_HS_ERR("gpio request failed for:%d\n",
+					pdata->uart_rx_gpio);
+				goto uart_tx_unconfig;
+			}
+		}
+
+		if (gpio_is_valid(pdata->uart_cts_gpio)) {
+			ret = gpio_request(pdata->uart_cts_gpio,
+							"UART_CTS_GPIO");
+			if (unlikely(ret)) {
+				MSM_HS_ERR("gpio request failed for:%d\n",
+					pdata->uart_cts_gpio);
+				goto uart_rx_unconfig;
+			}
+		}
+
+		if (gpio_is_valid(pdata->uart_rfr_gpio)) {
+			ret = gpio_request(pdata->uart_rfr_gpio,
+							"UART_RFR_GPIO");
+			if (unlikely(ret)) {
+				MSM_HS_ERR("gpio request failed for:%d\n",
+					pdata->uart_rfr_gpio);
+				goto uart_cts_unconfig;
+			}
+		}
+	} else {
+		MSM_HS_ERR("Pdata is NULL.\n");
+		ret = -EINVAL;
+	}
+	return ret;
+
+uart_cts_unconfig:
+	if (gpio_is_valid(pdata->uart_cts_gpio))
+		gpio_free(pdata->uart_cts_gpio);
+uart_rx_unconfig:
+	if (gpio_is_valid(pdata->uart_rx_gpio))
+		gpio_free(pdata->uart_rx_gpio);
+uart_tx_unconfig:
+	if (gpio_is_valid(pdata->uart_tx_gpio))
+		gpio_free(pdata->uart_tx_gpio);
+exit_uart_config:
+	return ret;
+}
+
+
+static void msm_hs_get_pinctrl_configs(struct uart_port *uport)
+{
+	struct pinctrl_state *set_state;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	msm_uport->pinctrl = devm_pinctrl_get(uport->dev);
+	if (IS_ERR_OR_NULL(msm_uport->pinctrl)) {
+		MSM_HS_DBG("%s(): Pinctrl not defined", __func__);
+	} else {
+		MSM_HS_DBG("%s(): Using Pinctrl", __func__);
+		msm_uport->use_pinctrl = true;
+
+		set_state = pinctrl_lookup_state(msm_uport->pinctrl,
+						PINCTRL_STATE_DEFAULT);
+		if (IS_ERR_OR_NULL(set_state)) {
+			dev_err(uport->dev,
+				"pinctrl lookup failed for default state");
+			goto pinctrl_fail;
+		}
+
+		MSM_HS_DBG("%s(): Pinctrl state active %p\n", __func__,
+			set_state);
+		msm_uport->gpio_state_active = set_state;
+
+		set_state = pinctrl_lookup_state(msm_uport->pinctrl,
+						PINCTRL_STATE_SLEEP);
+		if (IS_ERR_OR_NULL(set_state)) {
+			dev_err(uport->dev,
+				"pinctrl lookup failed for sleep state");
+			goto pinctrl_fail;
+		}
+
+		MSM_HS_DBG("%s(): Pinctrl state sleep %p\n", __func__,
+			set_state);
+		msm_uport->gpio_state_suspend = set_state;
+		return;
+	}
+pinctrl_fail:
+	msm_uport->pinctrl = NULL;
+	return;
+}
+
+/* Called when port is opened */
+static int msm_hs_startup(struct uart_port *uport)
+{
+	int ret;
+	int rfr_level;
+	unsigned long flags;
+	unsigned int data;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct circ_buf *tx_buf = &uport->state->xmit;
+	struct msm_hs_tx *tx = &msm_uport->tx;
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	struct sps_pipe *sps_pipe_handle_tx = tx->cons.pipe_handle;
+	struct sps_pipe *sps_pipe_handle_rx = rx->prod.pipe_handle;
+
+	atomic_set(&msm_uport->startup_locked, 1);
+	rfr_level = uport->fifosize;
+	if (rfr_level > 16)
+		rfr_level -= 16;
+
+	tx->dma_base = dma_map_single(uport->dev, tx_buf->buf, UART_XMIT_SIZE,
+				      DMA_TO_DEVICE);
+
+	/* turn on uart clk */
+	msm_hs_resource_vote(msm_uport);
+
+	if (is_use_low_power_wakeup(msm_uport)) {
+		ret = request_irq(msm_uport->wakeup.irq, msm_hs_wakeup_isr,
+					IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+					"msm_hs_wakeup", msm_uport);
+		if (unlikely(ret)) {
+			MSM_HS_ERR("%s():Err getting uart wakeup_irq %d\n",
+				  __func__, ret);
+			goto unvote_exit;
+		}
+
+		msm_uport->wakeup.freed = false;
+		disable_irq(msm_uport->wakeup.irq);
+		msm_uport->wakeup.enabled = false;
+
+		ret = irq_set_irq_wake(msm_uport->wakeup.irq, 1);
+		if (unlikely(ret)) {
+			MSM_HS_ERR("%s():Err setting wakeup irq\n", __func__);
+			goto free_uart_irq;
+		}
+	}
+
+	ret = msm_hs_config_uart_gpios(uport);
+	if (ret) {
+		MSM_HS_ERR("Uart GPIO request failed\n");
+		goto free_uart_irq;
+	}
+
+	msm_hs_write(uport, UART_DM_DMEN, 0);
+
+	/* Connect TX */
+	sps_tx_disconnect(msm_uport);
+	ret = msm_hs_spsconnect_tx(msm_uport);
+	if (ret) {
+		MSM_HS_ERR("msm_serial_hs: SPS connect failed for TX");
+		goto unconfig_uart_gpios;
+	}
+
+	/* Connect RX */
+	flush_kthread_worker(&msm_uport->rx.kworker);
+	if (rx->flush != FLUSH_SHUTDOWN)
+		disconnect_rx_endpoint(msm_uport);
+	else
+		MSM_HS_DBG("%s(): Rx Flush=%d In Proper state\n",
+			__func__, rx->flush);
+	ret = msm_hs_spsconnect_rx(uport);
+	if (ret) {
+		MSM_HS_ERR("msm_serial_hs: SPS connect failed for RX");
+		goto sps_disconnect_tx;
+	}
+
+	data = (UARTDM_BCR_TX_BREAK_DISABLE | UARTDM_BCR_STALE_IRQ_EMPTY |
+		UARTDM_BCR_RX_DMRX_LOW_EN | UARTDM_BCR_RX_STAL_IRQ_DMRX_EQL |
+		UARTDM_BCR_RX_DMRX_1BYTE_RES_EN);
+	msm_hs_write(uport, UART_DM_BCR, data);
+
+	/* Set auto RFR Level */
+	data = msm_hs_read(uport, UART_DM_MR1);
+	data &= ~UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK;
+	data &= ~UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK;
+	data |= (UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK & (rfr_level << 2));
+	data |= (UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK & rfr_level);
+	msm_hs_write(uport, UART_DM_MR1, data);
+
+	/* Make sure RXSTALE count is non-zero */
+	data = msm_hs_read(uport, UART_DM_IPR);
+	if (!data) {
+		data |= 0x1f & UARTDM_IPR_STALE_LSB_BMSK;
+		msm_hs_write(uport, UART_DM_IPR, data);
+	}
+
+	/* Assume no flow control, unless termios sets it */
+	msm_uport->flow_control = false;
+	msm_hs_disable_flow_control(uport, true);
+
+
+	/* Reset TX */
+	msm_hs_write(uport, UART_DM_CR, RESET_TX);
+	msm_hs_write(uport, UART_DM_CR, RESET_RX);
+	msm_hs_write(uport, UART_DM_CR, RESET_ERROR_STATUS);
+	msm_hs_write(uport, UART_DM_CR, RESET_BREAK_INT);
+	msm_hs_write(uport, UART_DM_CR, RESET_STALE_INT);
+	msm_hs_write(uport, UART_DM_CR, RESET_CTS);
+	msm_hs_write(uport, UART_DM_CR, RFR_LOW);
+	/* Turn on Uart Receiver */
+	msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_EN_BMSK);
+
+	/* Turn on Uart Transmitter */
+	msm_hs_write(uport, UART_DM_CR, UARTDM_CR_TX_EN_BMSK);
+
+	tx->dma_in_flight = false;
+	MSM_HS_DBG("%s():desc usage flag 0x%lx", __func__, rx->queued_flag);
+	setup_timer(&(tx->tx_timeout_timer),
+			tx_timeout_handler,
+			(unsigned long) msm_uport);
+
+	/* Enable reading the current CTS, no harm even if CTS is ignored */
+	msm_uport->imr_reg |= UARTDM_ISR_CURRENT_CTS_BMSK;
+
+	/* TXLEV on empty TX fifo */
+	msm_hs_write(uport, UART_DM_TFWR, 4);
+	/*
+	 * Complete all device write related configuration before
+	 * queuing RX request. Hence mb() requires here.
+	 */
+	mb();
+
+	ret = request_irq(uport->irq, msm_hs_isr, IRQF_TRIGGER_HIGH,
+			  "msm_hs_uart", msm_uport);
+	if (unlikely(ret)) {
+		MSM_HS_ERR("%s():Error %d getting uart irq\n", __func__, ret);
+		goto sps_disconnect_rx;
+	}
+
+
+	spin_lock_irqsave(&uport->lock, flags);
+	atomic_set(&msm_uport->client_count, 0);
+	atomic_set(&msm_uport->client_req_state, 0);
+	LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+			"%s: Client_Count 0\n", __func__);
+	atomic_set(&msm_uport->startup_locked, 0);
+	msm_hs_start_rx_locked(uport);
+
+	spin_unlock_irqrestore(&uport->lock, flags);
+
+	msm_hs_resource_unvote(msm_uport);
+	return 0;
+
+sps_disconnect_rx:
+	sps_disconnect(sps_pipe_handle_rx);
+sps_disconnect_tx:
+	sps_disconnect(sps_pipe_handle_tx);
+unconfig_uart_gpios:
+	msm_hs_unconfig_uart_gpios(uport);
+free_uart_irq:
+	free_irq(uport->irq, msm_uport);
+unvote_exit:
+	atomic_set(&msm_uport->startup_locked, 0);
+	msm_hs_resource_unvote(msm_uport);
+	MSM_HS_ERR("%s(): Error return\n", __func__);
+	return ret;
+}
+
+/* Initialize tx and rx data structures */
+static int uartdm_init_port(struct uart_port *uport)
+{
+	int ret = 0;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct msm_hs_tx *tx = &msm_uport->tx;
+	struct msm_hs_rx *rx = &msm_uport->rx;
+
+	init_waitqueue_head(&rx->wait);
+	init_waitqueue_head(&tx->wait);
+	init_waitqueue_head(&msm_uport->bam_disconnect_wait);
+
+	/* Init kernel threads for tx and rx */
+
+	init_kthread_worker(&rx->kworker);
+	rx->task = kthread_run(kthread_worker_fn,
+			&rx->kworker, "msm_serial_hs_%d_rx_work", uport->line);
+	if (IS_ERR(rx->task)) {
+		MSM_HS_ERR("%s(): error creating task", __func__);
+		goto exit_lh_init;
+	}
+	init_kthread_work(&rx->kwork, msm_serial_hs_rx_work);
+
+	init_kthread_worker(&tx->kworker);
+	tx->task = kthread_run(kthread_worker_fn,
+			&tx->kworker, "msm_serial_hs_%d_tx_work", uport->line);
+	if (IS_ERR(rx->task)) {
+		MSM_HS_ERR("%s(): error creating task", __func__);
+		goto exit_lh_init;
+	}
+
+	init_kthread_work(&tx->kwork, msm_serial_hs_tx_work);
+
+	rx->buffer = dma_alloc_coherent(uport->dev,
+				UART_DMA_DESC_NR * UARTDM_RX_BUF_SIZE,
+				 &rx->rbuffer, GFP_KERNEL);
+	if (!rx->buffer) {
+		MSM_HS_ERR("%s(): cannot allocate rx->buffer", __func__);
+		ret = -ENOMEM;
+		goto exit_lh_init;
+	}
+
+	/* Set up Uart Receive */
+	msm_hs_write(uport, UART_DM_RFWR, 32);
+	/* Write to BADR explicitly to set up FIFO sizes */
+	msm_hs_write(uport, UARTDM_BADR_ADDR, 64);
+
+	INIT_DELAYED_WORK(&rx->flip_insert_work, flip_insert_work);
+
+	return ret;
+exit_lh_init:
+	kthread_stop(rx->task);
+	rx->task = NULL;
+	kthread_stop(tx->task);
+	tx->task = NULL;
+	return ret;
+}
+
+struct msm_serial_hs_platform_data
+	*msm_hs_dt_to_pdata(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct msm_serial_hs_platform_data *pdata;
+	u32 rx_to_inject;
+	int ret;
+
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata) {
+		pr_err("unable to allocate memory for platform data\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	pdev->id = of_alias_get_id(pdev->dev.of_node, "uart");
+	/* UART TX GPIO */
+	pdata->uart_tx_gpio = of_get_named_gpio(node,
+					"qcom,tx-gpio", 0);
+	if (pdata->uart_tx_gpio < 0)
+		pr_err("uart_tx_gpio is not available\n");
+
+	/* UART RX GPIO */
+	pdata->uart_rx_gpio = of_get_named_gpio(node,
+					"qcom,rx-gpio", 0);
+	if (pdata->uart_rx_gpio < 0)
+		pr_err("uart_rx_gpio is not available\n");
+
+	/* UART CTS GPIO */
+	pdata->uart_cts_gpio = of_get_named_gpio(node,
+					"qcom,cts-gpio", 0);
+	if (pdata->uart_cts_gpio < 0)
+		pr_err("uart_cts_gpio is not available\n");
+
+	/* UART RFR GPIO */
+	pdata->uart_rfr_gpio = of_get_named_gpio(node,
+					"qcom,rfr-gpio", 0);
+	if (pdata->uart_rfr_gpio < 0)
+		pr_err("uart_rfr_gpio is not available\n");
+
+	pdata->no_suspend_delay = of_property_read_bool(node,
+				"qcom,no-suspend-delay");
+
+	pdata->obs = of_property_read_bool(node,
+				"qcom,msm-obs");
+	if (pdata->obs)
+		pr_err("%s:Out of Band sleep flag is set\n", __func__);
+
+	pdata->inject_rx_on_wakeup = of_property_read_bool(node,
+				"qcom,inject-rx-on-wakeup");
+
+	if (pdata->inject_rx_on_wakeup) {
+		ret = of_property_read_u32(node, "qcom,rx-char-to-inject",
+						&rx_to_inject);
+		if (ret < 0) {
+			pr_err("Error: Rx_char_to_inject not specified.\n");
+			return ERR_PTR(ret);
+		}
+		pdata->rx_to_inject = (u8)rx_to_inject;
+	}
+
+	ret = of_property_read_u32(node, "qcom,bam-tx-ep-pipe-index",
+				&pdata->bam_tx_ep_pipe_index);
+	if (ret < 0) {
+		pr_err("Error: Getting UART BAM TX EP Pipe Index.\n");
+		return ERR_PTR(ret);
+	}
+
+	if (!(pdata->bam_tx_ep_pipe_index >= BAM_PIPE_MIN &&
+		pdata->bam_tx_ep_pipe_index <= BAM_PIPE_MAX)) {
+		pr_err("Error: Invalid UART BAM TX EP Pipe Index.\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	ret = of_property_read_u32(node, "qcom,bam-rx-ep-pipe-index",
+					&pdata->bam_rx_ep_pipe_index);
+	if (ret < 0) {
+		pr_err("Error: Getting UART BAM RX EP Pipe Index.\n");
+		return ERR_PTR(ret);
+	}
+
+	if (!(pdata->bam_rx_ep_pipe_index >= BAM_PIPE_MIN &&
+		pdata->bam_rx_ep_pipe_index <= BAM_PIPE_MAX)) {
+		pr_err("Error: Invalid UART BAM RX EP Pipe Index.\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	pr_debug("tx_ep_pipe_index:%d rx_ep_pipe_index:%d\n"
+		"tx_gpio:%d rx_gpio:%d rfr_gpio:%d cts_gpio:%d",
+		pdata->bam_tx_ep_pipe_index, pdata->bam_rx_ep_pipe_index,
+		pdata->uart_tx_gpio, pdata->uart_rx_gpio, pdata->uart_cts_gpio,
+		pdata->uart_rfr_gpio);
+
+	return pdata;
+}
+
+
+/**
+ * Deallocate UART peripheral's SPS endpoint
+ * @msm_uport - Pointer to msm_hs_port structure
+ * @ep - Pointer to sps endpoint data structure
+ */
+
+static void msm_hs_exit_ep_conn(struct msm_hs_port *msm_uport,
+				struct msm_hs_sps_ep_conn_data *ep)
+{
+	struct sps_pipe *sps_pipe_handle = ep->pipe_handle;
+	struct sps_connect *sps_config = &ep->config;
+
+	dma_free_coherent(msm_uport->uport.dev,
+			sps_config->desc.size,
+			&sps_config->desc.phys_base,
+			GFP_KERNEL);
+	sps_free_endpoint(sps_pipe_handle);
+}
+
+
+/**
+ * Allocate UART peripheral's SPS endpoint
+ *
+ * This function allocates endpoint context
+ * by calling appropriate SPS driver APIs.
+ *
+ * @msm_uport - Pointer to msm_hs_port structure
+ * @ep - Pointer to sps endpoint data structure
+ * @is_produce - 1 means Producer endpoint
+ *             - 0 means Consumer endpoint
+ *
+ * @return - 0 if successful else negative value
+ */
+
+static int msm_hs_sps_init_ep_conn(struct msm_hs_port *msm_uport,
+				struct msm_hs_sps_ep_conn_data *ep,
+				bool is_producer)
+{
+	int rc = 0;
+	struct sps_pipe *sps_pipe_handle;
+	struct sps_connect *sps_config = &ep->config;
+	struct sps_register_event *sps_event = &ep->event;
+
+	/* Allocate endpoint context */
+	sps_pipe_handle = sps_alloc_endpoint();
+	if (!sps_pipe_handle) {
+		MSM_HS_ERR("%s(): sps_alloc_endpoint() failed!!\n"
+			"is_producer=%d", __func__, is_producer);
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	/* Get default connection configuration for an endpoint */
+	rc = sps_get_config(sps_pipe_handle, sps_config);
+	if (rc) {
+		MSM_HS_ERR("%s(): failed! pipe_handle=0x%p rc=%d",
+			__func__, sps_pipe_handle, rc);
+		goto get_config_err;
+	}
+
+	/* Modify the default connection configuration */
+	if (is_producer) {
+		/* For UART producer transfer, source is UART peripheral
+		where as destination is system memory */
+		sps_config->source = msm_uport->bam_handle;
+		sps_config->destination = SPS_DEV_HANDLE_MEM;
+		sps_config->mode = SPS_MODE_SRC;
+		sps_config->src_pipe_index = msm_uport->bam_rx_ep_pipe_index;
+		sps_config->dest_pipe_index = 0;
+		sps_event->callback = msm_hs_sps_rx_callback;
+	} else {
+		/* For UART consumer transfer, source is system memory
+		where as destination is UART peripheral */
+		sps_config->source = SPS_DEV_HANDLE_MEM;
+		sps_config->destination = msm_uport->bam_handle;
+		sps_config->mode = SPS_MODE_DEST;
+		sps_config->src_pipe_index = 0;
+		sps_config->dest_pipe_index = msm_uport->bam_tx_ep_pipe_index;
+		sps_event->callback = msm_hs_sps_tx_callback;
+	}
+
+	sps_config->options = SPS_O_EOT | SPS_O_DESC_DONE | SPS_O_AUTO_ENABLE;
+	sps_config->event_thresh = 0x10;
+
+	/* Allocate maximum descriptor fifo size */
+	sps_config->desc.size =
+		(1 + UART_DMA_DESC_NR) * sizeof(struct sps_iovec);
+	sps_config->desc.base = dma_alloc_coherent(msm_uport->uport.dev,
+						sps_config->desc.size,
+						&sps_config->desc.phys_base,
+						GFP_KERNEL);
+	if (!sps_config->desc.base) {
+		rc = -ENOMEM;
+		MSM_HS_ERR("msm_serial_hs: dma_alloc_coherent() failed!!\n");
+		goto get_config_err;
+	}
+	memset(sps_config->desc.base, 0x00, sps_config->desc.size);
+
+	sps_event->mode = SPS_TRIGGER_CALLBACK;
+
+	sps_event->options = SPS_O_DESC_DONE | SPS_O_EOT;
+	sps_event->user = (void *)msm_uport;
+
+	/* Now save the sps pipe handle */
+	ep->pipe_handle = sps_pipe_handle;
+	MSM_HS_DBG("msm_serial_hs: success !! %s: pipe_handle=0x%p\n"
+		"desc_fifo.phys_base=0x%pa\n",
+		is_producer ? "READ" : "WRITE",
+		sps_pipe_handle, &sps_config->desc.phys_base);
+	return 0;
+
+get_config_err:
+	sps_free_endpoint(sps_pipe_handle);
+out:
+	return rc;
+}
+
+/**
+ * Initialize SPS HW connected with UART core
+ *
+ * This function register BAM HW resources with
+ * SPS driver and then initialize 2 SPS endpoints
+ *
+ * msm_uport - Pointer to msm_hs_port structure
+ *
+ * @return - 0 if successful else negative value
+ */
+
+static int msm_hs_sps_init(struct msm_hs_port *msm_uport)
+{
+	int rc = 0;
+	struct sps_bam_props bam = {0};
+	unsigned long bam_handle;
+
+	rc = sps_phy2h(msm_uport->bam_mem, &bam_handle);
+	if (rc || !bam_handle) {
+		bam.phys_addr = msm_uport->bam_mem;
+		bam.virt_addr = msm_uport->bam_base;
+		/*
+		 * This event thresold value is only significant for BAM-to-BAM
+		 * transfer. It's ignored for BAM-to-System mode transfer.
+		 */
+		bam.event_threshold = 0x10;	/* Pipe event threshold */
+		bam.summing_threshold = 1;	/* BAM event threshold */
+
+		/* SPS driver wll handle the UART BAM IRQ */
+		bam.irq = (u32)msm_uport->bam_irq;
+		bam.manage = SPS_BAM_MGR_DEVICE_REMOTE;
+
+		MSM_HS_DBG("msm_serial_hs: bam physical base=0x%pa\n",
+							&bam.phys_addr);
+		MSM_HS_DBG("msm_serial_hs: bam virtual base=0x%p\n",
+							bam.virt_addr);
+
+		/* Register UART Peripheral BAM device to SPS driver */
+		rc = sps_register_bam_device(&bam, &bam_handle);
+		if (rc) {
+			MSM_HS_ERR("%s: BAM device register failed\n",
+				  __func__);
+			return rc;
+		}
+		MSM_HS_DBG("%s:BAM device registered. bam_handle=0x%lx",
+			   __func__, msm_uport->bam_handle);
+	}
+	msm_uport->bam_handle = bam_handle;
+
+	rc = msm_hs_sps_init_ep_conn(msm_uport, &msm_uport->rx.prod,
+				UART_SPS_PROD_PERIPHERAL);
+	if (rc) {
+		MSM_HS_ERR("%s: Failed to Init Producer BAM-pipe", __func__);
+		goto deregister_bam;
+	}
+
+	rc = msm_hs_sps_init_ep_conn(msm_uport, &msm_uport->tx.cons,
+				UART_SPS_CONS_PERIPHERAL);
+	if (rc) {
+		MSM_HS_ERR("%s: Failed to Init Consumer BAM-pipe", __func__);
+		goto deinit_ep_conn_prod;
+	}
+	return 0;
+
+deinit_ep_conn_prod:
+	msm_hs_exit_ep_conn(msm_uport, &msm_uport->rx.prod);
+deregister_bam:
+	sps_deregister_bam_device(msm_uport->bam_handle);
+	return rc;
+}
+
+
+static bool deviceid[UARTDM_NR] = {0};
+/*
+ * The mutex synchronizes grabbing next free device number
+ * both in case of an alias being used or not. When alias is
+ * used, the msm_hs_dt_to_pdata gets it and the boolean array
+ * is accordingly updated with device_id_set_used. If no alias
+ * is used, then device_id_grab_next_free sets that array.
+ */
+static DEFINE_MUTEX(mutex_next_device_id);
+
+static int device_id_grab_next_free(void)
+{
+	int i;
+	int ret = -ENODEV;
+	mutex_lock(&mutex_next_device_id);
+	for (i = 0; i < UARTDM_NR; i++)
+		if (!deviceid[i]) {
+			ret = i;
+			deviceid[i] = true;
+			break;
+		}
+	mutex_unlock(&mutex_next_device_id);
+	return ret;
+}
+
+static int device_id_set_used(int index)
+{
+	int ret = 0;
+	mutex_lock(&mutex_next_device_id);
+	if (deviceid[index])
+		ret = -ENODEV;
+	else
+		deviceid[index] = true;
+	mutex_unlock(&mutex_next_device_id);
+	return ret;
+}
+
+static void obs_manage_irq(struct msm_hs_port *msm_uport, bool en)
+{
+	struct uart_port *uport = &(msm_uport->uport);
+
+	if (msm_uport->obs) {
+		if (en)
+			enable_irq(uport->irq);
+		else
+			disable_irq(uport->irq);
+	}
+}
+
+static void msm_hs_pm_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+	int ret;
+	int client_count = 0;
+
+	if (!msm_uport)
+		goto err_suspend;
+	mutex_lock(&msm_uport->mtx);
+
+	client_count = atomic_read(&msm_uport->client_count);
+	msm_uport->pm_state = MSM_HS_PM_SUSPENDED;
+	msm_hs_resource_off(msm_uport);
+	obs_manage_irq(msm_uport, false);
+	msm_hs_clk_bus_unvote(msm_uport);
+
+	/* For OBS, don't use wakeup interrupt, set gpio to suspended state */
+	if (msm_uport->obs) {
+		ret = pinctrl_select_state(msm_uport->pinctrl,
+			msm_uport->gpio_state_suspend);
+		if (ret)
+			MSM_HS_ERR("%s():Error selecting pinctrl suspend state",
+				__func__);
+	}
+
+	if (!atomic_read(&msm_uport->client_req_state))
+		enable_wakeup_interrupt(msm_uport);
+	LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+		"%s: PM State Suspended client_count %d\n", __func__,
+								client_count);
+	mutex_unlock(&msm_uport->mtx);
+	return;
+err_suspend:
+	pr_err("%s(): invalid uport", __func__);
+	return;
+}
+
+static int msm_hs_pm_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+	int ret = 0;
+	int client_count = 0;
+
+	if (!msm_uport) {
+		dev_err(dev, "%s:Invalid uport\n", __func__);
+		return -ENODEV;
+	}
+
+	mutex_lock(&msm_uport->mtx);
+	client_count = atomic_read(&msm_uport->client_count);
+	if (msm_uport->pm_state == MSM_HS_PM_ACTIVE)
+		goto exit_pm_resume;
+	if (!atomic_read(&msm_uport->client_req_state))
+		disable_wakeup_interrupt(msm_uport);
+
+	/* For OBS, don't use wakeup interrupt, set gpio to active state */
+	if (msm_uport->obs) {
+		ret = pinctrl_select_state(msm_uport->pinctrl,
+				msm_uport->gpio_state_active);
+		if (ret)
+			MSM_HS_ERR("%s():Error selecting active state",
+				 __func__);
+	}
+
+	ret = msm_hs_clk_bus_vote(msm_uport);
+	if (ret) {
+		MSM_HS_ERR("%s:Failed clock vote %d\n", __func__, ret);
+		dev_err(dev, "%s:Failed clock vote %d\n", __func__, ret);
+		goto exit_pm_resume;
+	}
+	obs_manage_irq(msm_uport, true);
+	msm_uport->pm_state = MSM_HS_PM_ACTIVE;
+	msm_hs_resource_on(msm_uport);
+
+	LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+		"%s:PM State:Active client_count %d\n", __func__, client_count);
+exit_pm_resume:
+	msm_uport->sys_suspend_noirq_cnt = 0;
+	mutex_unlock(&msm_uport->mtx);
+	return ret;
+}
+
+#ifdef CONFIG_PM
+static int msm_hs_pm_sys_suspend_noirq(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+	int clk_cnt, client_count, ret = 0;
+
+	if (IS_ERR_OR_NULL(msm_uport))
+		return -ENODEV;
+	mutex_lock(&msm_uport->mtx);
+	/*
+	 * If there is an active clk request or an impending userspace request
+	 * fail the suspend callback.
+	 */
+	clk_cnt = atomic_read(&msm_uport->resource_count);
+	client_count = atomic_read(&msm_uport->client_count);
+	if (msm_uport->pm_state == MSM_HS_PM_ACTIVE) {
+		if (clk_cnt == 0 && client_count == 0)
+			msm_uport->sys_suspend_noirq_cnt++;
+		/*Serve force suspend post autosuspend timer expires
+		 */
+		if (msm_uport->sys_suspend_noirq_cnt >= 2) {
+			msm_uport->pm_state = MSM_HS_PM_SYS_SUSPENDED;
+			msm_uport->sys_suspend_noirq_cnt = 0;
+			mutex_unlock(&msm_uport->mtx);
+
+			msm_hs_pm_suspend(dev);
+			/*
+			 * Synchronize RT-pm and system-pm, RT-PM thinks that
+			 * we are active. The three calls below let the RT-PM
+			 * know that we are suspended already without calling
+			 * suspend callback
+			 */
+			pm_runtime_disable(dev);
+			pm_runtime_set_suspended(dev);
+			pm_runtime_enable(dev);
+
+			/*To Balance out exit time Mutex unlock */
+			mutex_lock(&msm_uport->mtx);
+		} else {
+			ret = -EBUSY;
+		}
+	}
+	mutex_unlock(&msm_uport->mtx);
+	if (ret)
+		MSM_HS_WARN("%s:Fail Suspend.clk_cnt:%d,clnt_count:%d\n",
+			__func__, clk_cnt, client_count);
+	else
+		LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+			"%s:PM State:Sys-Suspended client_count %d\n",
+			__func__, client_count);
+	return ret;
+};
+
+static int msm_hs_pm_sys_resume_noirq(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+
+	if (IS_ERR_OR_NULL(msm_uport))
+		return -ENODEV;
+	/*
+	 * Note system-pm resume and update the state
+	 * variable. Resource activation will be done
+	 * when transfer is requested.
+	 */
+
+	mutex_lock(&msm_uport->mtx);
+	if (msm_uport->pm_state == MSM_HS_PM_SYS_SUSPENDED)
+		msm_uport->pm_state = MSM_HS_PM_SUSPENDED;
+	LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+		"%s:PM State: Suspended\n", __func__);
+	mutex_unlock(&msm_uport->mtx);
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static void  msm_serial_hs_rt_init(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	MSM_HS_INFO("%s(): Enabling runtime pm", __func__);
+	pm_runtime_set_suspended(uport->dev);
+	pm_runtime_set_autosuspend_delay(uport->dev, 100);
+	pm_runtime_use_autosuspend(uport->dev);
+	mutex_lock(&msm_uport->mtx);
+	msm_uport->pm_state = MSM_HS_PM_SUSPENDED;
+	mutex_unlock(&msm_uport->mtx);
+	pm_runtime_enable(uport->dev);
+}
+
+static int msm_hs_runtime_suspend(struct device *dev)
+{
+	msm_hs_pm_suspend(dev);
+	return 0;
+}
+
+static int msm_hs_runtime_resume(struct device *dev)
+{
+	return msm_hs_pm_resume(dev);
+}
+#else
+static void  msm_serial_hs_rt_init(struct uart_port *uport) {}
+static int msm_hs_runtime_suspend(struct device *dev) {}
+static int msm_hs_runtime_resume(struct device *dev) {}
+#endif
+
+
+static int msm_hs_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct uart_port *uport;
+	struct msm_hs_port *msm_uport;
+	struct resource *core_resource;
+	struct resource *bam_resource;
+	int core_irqres, bam_irqres, wakeup_irqres;
+	struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
+	unsigned long data;
+	char name[30];
+
+	if (pdev->dev.of_node) {
+		dev_dbg(&pdev->dev, "device tree enabled\n");
+		pdata = msm_hs_dt_to_pdata(pdev);
+		if (IS_ERR(pdata))
+			return PTR_ERR(pdata);
+
+		if (pdev->id < 0) {
+			pdev->id = device_id_grab_next_free();
+			if (pdev->id < 0) {
+				dev_err(&pdev->dev,
+					"Error grabbing next free device id");
+				return pdev->id;
+			}
+		} else {
+			ret = device_id_set_used(pdev->id);
+			if (ret < 0) {
+				dev_err(&pdev->dev, "%d alias taken",
+					pdev->id);
+				return ret;
+			}
+		}
+		pdev->dev.platform_data = pdata;
+	}
+
+	if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
+		dev_err(&pdev->dev, "Invalid plaform device ID = %d\n",
+								pdev->id);
+		return -EINVAL;
+	}
+
+	msm_uport = devm_kzalloc(&pdev->dev, sizeof(struct msm_hs_port),
+			GFP_KERNEL);
+	if (!msm_uport) {
+		dev_err(&pdev->dev, "Memory allocation failed\n");
+		return -ENOMEM;
+	}
+
+	msm_uport->uport.type = PORT_UNKNOWN;
+	uport = &msm_uport->uport;
+	uport->dev = &pdev->dev;
+
+	if (pdev->dev.of_node)
+		msm_uport->uart_type = BLSP_HSUART;
+
+	msm_hs_get_pinctrl_configs(uport);
+	/* Get required resources for BAM HSUART */
+	core_resource = platform_get_resource_byname(pdev,
+				IORESOURCE_MEM, "core_mem");
+	if (!core_resource) {
+		dev_err(&pdev->dev, "Invalid core HSUART Resources.\n");
+		return -ENXIO;
+	}
+	bam_resource = platform_get_resource_byname(pdev,
+				IORESOURCE_MEM, "bam_mem");
+	if (!bam_resource) {
+		dev_err(&pdev->dev, "Invalid BAM HSUART Resources.\n");
+		return -ENXIO;
+	}
+	core_irqres = platform_get_irq_byname(pdev, "core_irq");
+	if (core_irqres < 0) {
+		dev_err(&pdev->dev, "Error %d, invalid core irq resources.\n",
+			core_irqres);
+		return -ENXIO;
+	}
+	bam_irqres = platform_get_irq_byname(pdev, "bam_irq");
+	if (bam_irqres < 0) {
+		dev_err(&pdev->dev, "Error %d, invalid bam irq resources.\n",
+			bam_irqres);
+		return -ENXIO;
+	}
+	wakeup_irqres = platform_get_irq_byname(pdev, "wakeup_irq");
+	if (wakeup_irqres < 0) {
+		wakeup_irqres = -1;
+		pr_info("Wakeup irq not specified.\n");
+	}
+
+	uport->mapbase = core_resource->start;
+
+	uport->membase = ioremap(uport->mapbase,
+				resource_size(core_resource));
+	if (unlikely(!uport->membase)) {
+		dev_err(&pdev->dev, "UART Resource ioremap Failed.\n");
+		return -ENOMEM;
+	}
+	msm_uport->bam_mem = bam_resource->start;
+	msm_uport->bam_base = ioremap(msm_uport->bam_mem,
+				resource_size(bam_resource));
+	if (unlikely(!msm_uport->bam_base)) {
+		dev_err(&pdev->dev, "UART BAM Resource ioremap Failed.\n");
+		iounmap(uport->membase);
+		return -ENOMEM;
+	}
+
+	memset(name, 0, sizeof(name));
+	scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
+									"_state");
+	msm_uport->ipc_msm_hs_log_ctxt =
+			ipc_log_context_create(IPC_MSM_HS_LOG_STATE_PAGES,
+								name, 0);
+	if (!msm_uport->ipc_msm_hs_log_ctxt) {
+		dev_err(&pdev->dev, "%s: error creating logging context",
+								__func__);
+	} else {
+		msm_uport->ipc_debug_mask = INFO_LEV;
+		ret = sysfs_create_file(&pdev->dev.kobj,
+				&dev_attr_debug_mask.attr);
+		if (unlikely(ret))
+			MSM_HS_WARN("%s: Failed to create dev. attr", __func__);
+	}
+
+	uport->irq = core_irqres;
+	msm_uport->bam_irq = bam_irqres;
+	pdata->wakeup_irq = wakeup_irqres;
+
+	msm_uport->bus_scale_table = msm_bus_cl_get_pdata(pdev);
+	if (!msm_uport->bus_scale_table) {
+		MSM_HS_ERR("BLSP UART: Bus scaling is disabled.\n");
+	} else {
+		msm_uport->bus_perf_client =
+			msm_bus_scale_register_client
+				(msm_uport->bus_scale_table);
+		if (IS_ERR(&msm_uport->bus_perf_client)) {
+			MSM_HS_ERR("%s():Bus client register failed\n",
+				   __func__);
+			ret = -EINVAL;
+			goto unmap_memory;
+		}
+	}
+
+	msm_uport->wakeup.irq = pdata->wakeup_irq;
+	msm_uport->wakeup.ignore = 1;
+	msm_uport->wakeup.inject_rx = pdata->inject_rx_on_wakeup;
+	msm_uport->wakeup.rx_to_inject = pdata->rx_to_inject;
+	msm_uport->obs = pdata->obs;
+
+	msm_uport->bam_tx_ep_pipe_index =
+			pdata->bam_tx_ep_pipe_index;
+	msm_uport->bam_rx_ep_pipe_index =
+			pdata->bam_rx_ep_pipe_index;
+	msm_uport->wakeup.enabled = true;
+
+	uport->iotype = UPIO_MEM;
+	uport->fifosize = 64;
+	uport->ops = &msm_hs_ops;
+	uport->flags = UPF_BOOT_AUTOCONF;
+	uport->uartclk = 7372800;
+	msm_uport->imr_reg = 0x0;
+
+	msm_uport->clk = clk_get(&pdev->dev, "core_clk");
+	if (IS_ERR(msm_uport->clk)) {
+		ret = PTR_ERR(msm_uport->clk);
+		goto deregister_bus_client;
+	}
+
+	msm_uport->pclk = clk_get(&pdev->dev, "iface_clk");
+	/*
+	 * Some configurations do not require explicit pclk control so
+	 * do not flag error on pclk get failure.
+	 */
+	if (IS_ERR(msm_uport->pclk))
+		msm_uport->pclk = NULL;
+
+	msm_uport->hsuart_wq = alloc_workqueue("k_hsuart",
+					WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+	if (!msm_uport->hsuart_wq) {
+		MSM_HS_ERR("%s(): Unable to create workqueue hsuart_wq\n",
+								__func__);
+		ret =  -ENOMEM;
+		goto put_clk;
+	}
+
+	mutex_init(&msm_uport->mtx);
+
+	/* Initialize SPS HW connected with UART core */
+	ret = msm_hs_sps_init(msm_uport);
+	if (unlikely(ret)) {
+		MSM_HS_ERR("SPS Initialization failed ! err=%d", ret);
+		goto destroy_mutex;
+	}
+
+	msm_uport->tx.flush = FLUSH_SHUTDOWN;
+	msm_uport->rx.flush = FLUSH_SHUTDOWN;
+
+	memset(name, 0, sizeof(name));
+	scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
+									"_tx");
+	msm_uport->tx.ipc_tx_ctxt =
+		ipc_log_context_create(IPC_MSM_HS_LOG_DATA_PAGES, name, 0);
+	if (!msm_uport->tx.ipc_tx_ctxt)
+		dev_err(&pdev->dev, "%s: error creating tx logging context",
+								__func__);
+
+	memset(name, 0, sizeof(name));
+	scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
+									"_rx");
+	msm_uport->rx.ipc_rx_ctxt = ipc_log_context_create(
+					IPC_MSM_HS_LOG_DATA_PAGES, name, 0);
+	if (!msm_uport->rx.ipc_rx_ctxt)
+		dev_err(&pdev->dev, "%s: error creating rx logging context",
+								__func__);
+
+	memset(name, 0, sizeof(name));
+	scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
+									"_pwr");
+	msm_uport->ipc_msm_hs_pwr_ctxt = ipc_log_context_create(
+					IPC_MSM_HS_LOG_USER_PAGES, name, 0);
+	if (!msm_uport->ipc_msm_hs_pwr_ctxt)
+		dev_err(&pdev->dev, "%s: error creating usr logging context",
+								__func__);
+
+	uport->irq = core_irqres;
+	msm_uport->bam_irq = bam_irqres;
+
+	clk_set_rate(msm_uport->clk, msm_uport->uport.uartclk);
+	msm_hs_clk_bus_vote(msm_uport);
+	ret = uartdm_init_port(uport);
+	if (unlikely(ret))
+		goto err_clock;
+
+	/* configure the CR Protection to Enable */
+	msm_hs_write(uport, UART_DM_CR, CR_PROTECTION_EN);
+
+	/*
+	 * Enable Command register protection before going ahead as this hw
+	 * configuration makes sure that issued cmd to CR register gets complete
+	 * before next issued cmd start. Hence mb() requires here.
+	 */
+	mb();
+
+	/*
+	* Set RX_BREAK_ZERO_CHAR_OFF and RX_ERROR_CHAR_OFF
+	* so any rx_break and character having parity of framing
+	* error don't enter inside UART RX FIFO.
+	*/
+	data = msm_hs_read(uport, UART_DM_MR2);
+	data |= (UARTDM_MR2_RX_BREAK_ZERO_CHAR_OFF |
+			UARTDM_MR2_RX_ERROR_CHAR_OFF);
+	msm_hs_write(uport, UART_DM_MR2, data);
+	/* Ensure register IO completion */
+	mb();
+
+	ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_clock.attr);
+	if (unlikely(ret)) {
+		MSM_HS_ERR("Probe Failed as sysfs failed\n");
+		goto err_clock;
+	}
+
+	msm_serial_debugfs_init(msm_uport, pdev->id);
+	msm_hs_unconfig_uart_gpios(uport);
+
+	uport->line = pdev->id;
+	if (pdata->userid && pdata->userid <= UARTDM_NR)
+		uport->line = pdata->userid;
+	ret = uart_add_one_port(&msm_hs_driver, uport);
+	if (!ret) {
+		msm_hs_clk_bus_unvote(msm_uport);
+		msm_serial_hs_rt_init(uport);
+		return ret;
+	}
+
+err_clock:
+	msm_hs_clk_bus_unvote(msm_uport);
+
+destroy_mutex:
+	mutex_destroy(&msm_uport->mtx);
+	destroy_workqueue(msm_uport->hsuart_wq);
+
+put_clk:
+	if (msm_uport->pclk)
+		clk_put(msm_uport->pclk);
+
+	if (msm_uport->clk)
+		clk_put(msm_uport->clk);
+
+deregister_bus_client:
+	msm_bus_scale_unregister_client(msm_uport->bus_perf_client);
+unmap_memory:
+	iounmap(uport->membase);
+	iounmap(msm_uport->bam_base);
+
+	return ret;
+}
+
+static int __init msm_serial_hs_init(void)
+{
+	int ret;
+
+	ret = uart_register_driver(&msm_hs_driver);
+	if (unlikely(ret)) {
+		pr_err("%s failed to load\n", __func__);
+		return ret;
+	}
+	debug_base = debugfs_create_dir("msm_serial_hs", NULL);
+	if (IS_ERR_OR_NULL(debug_base))
+		pr_err("msm_serial_hs: Cannot create debugfs dir\n");
+
+	ret = platform_driver_register(&msm_serial_hs_platform_driver);
+	if (ret) {
+		pr_err("%s failed to load\n", __func__);
+		debugfs_remove_recursive(debug_base);
+		uart_unregister_driver(&msm_hs_driver);
+		return ret;
+	}
+
+	pr_info("msm_serial_hs module loaded\n");
+	return ret;
+}
+
+/*
+ *  Called by the upper layer when port is closed.
+ *     - Disables the port
+ *     - Unhook the ISR
+ */
+static void msm_hs_shutdown(struct uart_port *uport)
+{
+	int ret, rc;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct circ_buf *tx_buf = &uport->state->xmit;
+	int data;
+	unsigned long flags;
+
+	if (is_use_low_power_wakeup(msm_uport))
+		irq_set_irq_wake(msm_uport->wakeup.irq, 0);
+
+	if (msm_uport->wakeup.enabled)
+		disable_irq(msm_uport->wakeup.irq);
+	else
+		disable_irq(uport->irq);
+
+	spin_lock_irqsave(&uport->lock, flags);
+	msm_uport->wakeup.enabled = false;
+	msm_uport->wakeup.ignore = 1;
+	spin_unlock_irqrestore(&uport->lock, flags);
+
+	/* Free the interrupt */
+	free_irq(uport->irq, msm_uport);
+	if (is_use_low_power_wakeup(msm_uport)) {
+		free_irq(msm_uport->wakeup.irq, msm_uport);
+		MSM_HS_DBG("%s(): wakeup irq freed", __func__);
+	}
+	msm_uport->wakeup.freed = true;
+
+	/* make sure tx lh finishes */
+	flush_kthread_worker(&msm_uport->tx.kworker);
+	ret = wait_event_timeout(msm_uport->tx.wait,
+			uart_circ_empty(tx_buf), 500);
+	if (!ret)
+		MSM_HS_WARN("Shutdown called when tx buff not empty");
+
+	msm_hs_resource_vote(msm_uport);
+	/* Stop remote side from sending data */
+	msm_hs_disable_flow_control(uport, false);
+	/* make sure rx lh finishes */
+	flush_kthread_worker(&msm_uport->rx.kworker);
+
+	if (msm_uport->rx.flush != FLUSH_SHUTDOWN) {
+		/* disable and disconnect rx */
+		ret = wait_event_timeout(msm_uport->rx.wait,
+				!msm_uport->rx.pending_flag, 500);
+		if (!ret)
+			MSM_HS_WARN("%s(): rx disconnect not complete",
+				__func__);
+		msm_hs_disconnect_rx(uport);
+	} else {
+		MSM_HS_DBG("%s(): Rx Flush is in Proper state=%d\n",
+			__func__, msm_uport->rx.flush);
+	}
+
+	if (cancel_delayed_work_sync(&msm_uport->rx.flip_insert_work))
+		MSM_HS_DBG("%s(): Work was pending, canceled it\n",
+			__func__);
+	flush_workqueue(msm_uport->hsuart_wq);
+
+	/* BAM Disconnect for TX */
+	data = msm_hs_read(uport, UART_DM_DMEN);
+	data &= ~UARTDM_TX_BAM_ENABLE_BMSK;
+	msm_hs_write(uport, UART_DM_DMEN, data);
+	ret = sps_tx_disconnect(msm_uport);
+	if (ret)
+		MSM_HS_ERR("%s(): sps_disconnect failed\n",
+					__func__);
+	msm_uport->tx.flush = FLUSH_SHUTDOWN;
+	/* Disable the transmitter */
+	msm_hs_write(uport, UART_DM_CR, UARTDM_CR_TX_DISABLE_BMSK);
+	/* Disable the receiver */
+	msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_DISABLE_BMSK);
+
+	msm_uport->imr_reg = 0;
+	msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
+	/*
+	 * Complete all device write before actually disabling uartclk.
+	 * Hence mb() requires here.
+	 */
+	mb();
+
+	msm_uport->rx.buffer_pending = NONE_PENDING;
+	MSM_HS_DBG("%s(): tx, rx events complete", __func__);
+
+	dma_unmap_single(uport->dev, msm_uport->tx.dma_base,
+			 UART_XMIT_SIZE, DMA_TO_DEVICE);
+
+	msm_hs_resource_unvote(msm_uport);
+	rc = atomic_read(&msm_uport->resource_count);
+	if (rc) {
+		atomic_set(&msm_uport->resource_count, 1);
+		MSM_HS_WARN("%s(): removing extra vote\n", __func__);
+		msm_hs_resource_unvote(msm_uport);
+	}
+	if (atomic_read(&msm_uport->client_req_state)) {
+		MSM_HS_WARN("%s: Client clock vote imbalance\n", __func__);
+		atomic_set(&msm_uport->client_req_state, 0);
+	}
+	if (atomic_read(&msm_uport->client_count)) {
+		MSM_HS_WARN("%s: Client vote on, forcing to 0\n", __func__);
+		atomic_set(&msm_uport->client_count, 0);
+	}
+	msm_hs_unconfig_uart_gpios(uport);
+	LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+		"%s:UART port closed, Client_Count 0\n", __func__);
+}
+
+static void __exit msm_serial_hs_exit(void)
+{
+	pr_info("msm_serial_hs module removed\n");
+	debugfs_remove_recursive(debug_base);
+	platform_driver_unregister(&msm_serial_hs_platform_driver);
+	uart_unregister_driver(&msm_hs_driver);
+}
+
+static const struct dev_pm_ops msm_hs_dev_pm_ops = {
+	.runtime_suspend = msm_hs_runtime_suspend,
+	.runtime_resume = msm_hs_runtime_resume,
+	.runtime_idle = NULL,
+	.suspend_noirq = msm_hs_pm_sys_suspend_noirq,
+	.resume_noirq = msm_hs_pm_sys_resume_noirq,
+};
+
+static struct platform_driver msm_serial_hs_platform_driver = {
+	.probe	= msm_hs_probe,
+	.remove = msm_hs_remove,
+	.driver = {
+		.name = "msm_serial_hs",
+		.pm   = &msm_hs_dev_pm_ops,
+		.of_match_table = msm_hs_match_table,
+	},
+};
+
+static struct uart_driver msm_hs_driver = {
+	.owner = THIS_MODULE,
+	.driver_name = "msm_serial_hs",
+	.dev_name = "ttyHS",
+	.nr = UARTDM_NR,
+	.cons = 0,
+};
+
+static struct uart_ops msm_hs_ops = {
+	.tx_empty = msm_hs_tx_empty,
+	.set_mctrl = msm_hs_set_mctrl_locked,
+	.get_mctrl = msm_hs_get_mctrl_locked,
+	.stop_tx = msm_hs_stop_tx_locked,
+	.start_tx = msm_hs_start_tx_locked,
+	.stop_rx = msm_hs_stop_rx_locked,
+	.enable_ms = msm_hs_enable_ms_locked,
+	.break_ctl = msm_hs_break_ctl,
+	.startup = msm_hs_startup,
+	.shutdown = msm_hs_shutdown,
+	.set_termios = msm_hs_set_termios,
+	.type = msm_hs_type,
+	.config_port = msm_hs_config_port,
+	.flush_buffer = NULL,
+	.ioctl = msm_hs_ioctl,
+};
+
+module_init(msm_serial_hs_init);
+module_exit(msm_serial_hs_exit);
+MODULE_DESCRIPTION("High Speed UART Driver for the MSM chipset");
+MODULE_VERSION("1.2");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/tty/serial/msm_serial_hs_hwreg.h	2019-01-22 16:16:27.199279876 +0100
@@ -0,0 +1,283 @@
+/* drivers/serial/msm_serial_hs_hwreg.h
+ *
+ * Copyright (c) 2007-2009, 2012-2014,The Linux Foundation. All rights reserved.
+ * 
+ * All source code in this file is licensed under the following license
+ * except where indicated.
+ * 
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ * 
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ * 
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can find it at http://www.fsf.org
+ */
+
+#ifndef MSM_SERIAL_HS_HWREG_H
+#define MSM_SERIAL_HS_HWREG_H
+
+#define GSBI_CONTROL_ADDR              0x0
+#define GSBI_PROTOCOL_CODE_MASK        0x30
+#define GSBI_PROTOCOL_I2C_UART         0x60
+#define GSBI_PROTOCOL_UART             0x40
+#define GSBI_PROTOCOL_IDLE             0x0
+
+#define TCSR_ADM_1_A_CRCI_MUX_SEL      0x78
+#define TCSR_ADM_1_B_CRCI_MUX_SEL      0x7C
+#define ADM1_CRCI_GSBI6_RX_SEL         0x800
+#define ADM1_CRCI_GSBI6_TX_SEL         0x400
+
+#define MSM_ENABLE_UART_CLOCK TIOCPMGET
+#define MSM_DISABLE_UART_CLOCK TIOCPMPUT
+#define MSM_GET_UART_CLOCK_STATUS TIOCPMACT
+
+enum msm_hsl_regs {
+	UARTDM_MR1,
+	UARTDM_MR2,
+	UARTDM_IMR,
+	UARTDM_SR,
+	UARTDM_CR,
+	UARTDM_CSR,
+	UARTDM_IPR,
+	UARTDM_ISR,
+	UARTDM_RX_TOTAL_SNAP,
+	UARTDM_RFWR,
+	UARTDM_TFWR,
+	UARTDM_RF,
+	UARTDM_TF,
+	UARTDM_MISR,
+	UARTDM_DMRX,
+	UARTDM_NCF_TX,
+	UARTDM_DMEN,
+	UARTDM_BCR,
+	UARTDM_TXFS,
+	UARTDM_RXFS,
+	UARTDM_LAST,
+};
+
+enum msm_hs_regs {
+	UART_DM_MR1 = 0x0,
+	UART_DM_MR2 = 0x4,
+	UART_DM_IMR = 0xb0,
+	UART_DM_SR = 0xa4,
+	UART_DM_CR = 0xa8,
+	UART_DM_CSR = 0xa0,
+	UART_DM_IPR = 0x18,
+	UART_DM_ISR = 0xb4,
+	UART_DM_RX_TOTAL_SNAP = 0xbc,
+	UART_DM_TFWR = 0x1c,
+	UART_DM_RFWR = 0x20,
+	UART_DM_RF = 0x140,
+	UART_DM_TF = 0x100,
+	UART_DM_MISR = 0xac,
+	UART_DM_DMRX = 0x34,
+	UART_DM_NCF_TX = 0x40,
+	UART_DM_DMEN = 0x3c,
+	UART_DM_TXFS = 0x4c,
+	UART_DM_RXFS = 0x50,
+	UART_DM_RX_TRANS_CTRL = 0xcc,
+	UART_DM_BCR = 0xc8,
+};
+
+#define UARTDM_MR1_ADDR 0x0
+#define UARTDM_MR2_ADDR 0x4
+
+/* Backward Compatability Register for UARTDM Core v1.4 */
+#define UARTDM_BCR_ADDR	0xc8
+
+/*
+ * UARTDM Core v1.4 STALE_IRQ_EMPTY bit defination
+ * Stale interrupt will fire if bit is set when RX-FIFO is empty
+ */
+#define UARTDM_BCR_TX_BREAK_DISABLE	0x1
+#define UARTDM_BCR_STALE_IRQ_EMPTY	0x2
+#define UARTDM_BCR_RX_DMRX_LOW_EN	0x4
+#define UARTDM_BCR_RX_STAL_IRQ_DMRX_EQL	0x10
+#define UARTDM_BCR_RX_DMRX_1BYTE_RES_EN	0x20
+
+/* TRANSFER_CONTROL Register for UARTDM Core v1.4 */
+#define UARTDM_RX_TRANS_CTRL_ADDR      0xcc
+
+/* TRANSFER_CONTROL Register bits */
+#define RX_STALE_AUTO_RE_EN		0x1
+#define RX_TRANS_AUTO_RE_ACTIVATE	0x2
+#define RX_DMRX_CYCLIC_EN		0x4
+
+/* write only register */
+#define UARTDM_CSR_115200 0xFF
+#define UARTDM_CSR_57600  0xEE
+#define UARTDM_CSR_38400  0xDD
+#define UARTDM_CSR_28800  0xCC
+#define UARTDM_CSR_19200  0xBB
+#define UARTDM_CSR_14400  0xAA
+#define UARTDM_CSR_9600   0x99
+#define UARTDM_CSR_7200   0x88
+#define UARTDM_CSR_4800   0x77
+#define UARTDM_CSR_3600   0x66
+#define UARTDM_CSR_2400   0x55
+#define UARTDM_CSR_1200   0x44
+#define UARTDM_CSR_600    0x33
+#define UARTDM_CSR_300    0x22
+#define UARTDM_CSR_150    0x11
+#define UARTDM_CSR_75     0x00
+
+/* write only register */
+#define UARTDM_IPR_ADDR 0x18
+#define UARTDM_TFWR_ADDR 0x1c
+#define UARTDM_RFWR_ADDR 0x20
+#define UARTDM_HCR_ADDR 0x24
+#define UARTDM_DMRX_ADDR 0x34
+#define UARTDM_DMEN_ADDR 0x3c
+
+/* UART_DM_NO_CHARS_FOR_TX */
+#define UARTDM_NCF_TX_ADDR 0x40
+
+#define UARTDM_BADR_ADDR 0x44
+
+#define UARTDM_SIM_CFG_ADDR 0x80
+
+/* Read Only register */
+#define UARTDM_TXFS_ADDR 0x4C
+#define UARTDM_RXFS_ADDR 0x50
+
+/* Register field Mask Mapping */
+#define UARTDM_SR_RX_BREAK_BMSK	        BIT(6)
+#define UARTDM_SR_PAR_FRAME_BMSK	BIT(5)
+#define UARTDM_SR_OVERRUN_BMSK		BIT(4)
+#define UARTDM_SR_TXEMT_BMSK		BIT(3)
+#define UARTDM_SR_TXRDY_BMSK		BIT(2)
+#define UARTDM_SR_RXRDY_BMSK		BIT(0)
+
+#define UARTDM_CR_TX_DISABLE_BMSK	BIT(3)
+#define UARTDM_CR_RX_DISABLE_BMSK	BIT(1)
+#define UARTDM_CR_TX_EN_BMSK		BIT(2)
+#define UARTDM_CR_RX_EN_BMSK		BIT(0)
+
+/* UARTDM_CR channel_comman bit value (register field is bits 8:4) */
+#define RESET_RX		0x10
+#define RESET_TX		0x20
+#define RESET_ERROR_STATUS	0x30
+#define RESET_BREAK_INT		0x40
+#define START_BREAK		0x50
+#define STOP_BREAK		0x60
+#define RESET_CTS		0x70
+#define RESET_STALE_INT		0x80
+#define RFR_LOW			0xD0
+#define RFR_HIGH		0xE0
+#define CR_PROTECTION_EN	0x100
+#define STALE_EVENT_ENABLE	0x500
+#define STALE_EVENT_DISABLE	0x600
+#define FORCE_STALE_EVENT	0x400
+#define CLEAR_TX_READY		0x300
+#define RESET_TX_ERROR		0x800
+#define RESET_TX_DONE		0x810
+
+/*
+ * UARTDM_CR BAM IFC comman bit value
+ * for UARTDM Core v1.4
+ */
+#define START_RX_BAM_IFC	0x850
+#define START_TX_BAM_IFC	0x860
+
+#define UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK 0xffffff00
+#define UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK 0x3f
+#define UARTDM_MR1_CTS_CTL_BMSK 0x40
+#define UARTDM_MR1_RX_RDY_CTL_BMSK 0x80
+
+/*
+ * UARTDM Core v1.4 MR2_RFR_CTS_LOOP bitmask
+ * Enables internal loopback between RFR_N of
+ * RX channel and CTS_N of TX channel.
+ */
+#define UARTDM_MR2_RFR_CTS_LOOP_MODE_BMSK	0x400
+
+#define UARTDM_MR2_LOOP_MODE_BMSK		0x80
+#define UARTDM_MR2_ERROR_MODE_BMSK		0x40
+#define UARTDM_MR2_BITS_PER_CHAR_BMSK		0x30
+#define UARTDM_MR2_RX_ZERO_CHAR_OFF		0x100
+#define UARTDM_MR2_RX_ERROR_CHAR_OFF		0x200
+#define UARTDM_MR2_RX_BREAK_ZERO_CHAR_OFF	0x100
+
+#define UARTDM_MR2_BITS_PER_CHAR_8	(0x3 << 4)
+
+/* bits per character configuration */
+#define FIVE_BPC  (0 << 4)
+#define SIX_BPC   (1 << 4)
+#define SEVEN_BPC (2 << 4)
+#define EIGHT_BPC (3 << 4)
+
+#define UARTDM_MR2_STOP_BIT_LEN_BMSK 0xc
+#define STOP_BIT_ONE (1 << 2)
+#define STOP_BIT_TWO (3 << 2)
+
+#define UARTDM_MR2_PARITY_MODE_BMSK 0x3
+
+/* Parity configuration */
+#define NO_PARITY 0x0
+#define EVEN_PARITY 0x2
+#define ODD_PARITY 0x1
+#define SPACE_PARITY 0x3
+
+#define UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK 0xffffff80
+#define UARTDM_IPR_STALE_LSB_BMSK 0x1f
+
+/* These can be used for both ISR and IMR register */
+#define UARTDM_ISR_TX_READY_BMSK	BIT(7)
+#define UARTDM_ISR_CURRENT_CTS_BMSK	BIT(6)
+#define UARTDM_ISR_DELTA_CTS_BMSK	BIT(5)
+#define UARTDM_ISR_RXLEV_BMSK		BIT(4)
+#define UARTDM_ISR_RXSTALE_BMSK		BIT(3)
+#define UARTDM_ISR_RXBREAK_BMSK		BIT(2)
+#define UARTDM_ISR_RXHUNT_BMSK		BIT(1)
+#define UARTDM_ISR_TXLEV_BMSK		BIT(0)
+
+/* Field definitions for UART_DM_DMEN*/
+#define UARTDM_TX_DM_EN_BMSK 0x1
+#define UARTDM_RX_DM_EN_BMSK 0x2
+
+/*
+ * UARTDM Core v1.4 bitmask
+ * Bitmasks for enabling Rx and Tx BAM Interface
+ */
+#define UARTDM_TX_BAM_ENABLE_BMSK 0x4
+#define UARTDM_RX_BAM_ENABLE_BMSK 0x8
+
+/* Register offsets for UART Core v13 */
+
+/* write only register */
+#define UARTDM_CSR_ADDR    0x8
+
+/* write only register */
+#define UARTDM_TF_ADDR   0x70
+#define UARTDM_TF2_ADDR  0x74
+#define UARTDM_TF3_ADDR  0x78
+#define UARTDM_TF4_ADDR  0x7c
+
+/* write only register */
+#define UARTDM_CR_ADDR 0x10
+/* write only register */
+#define UARTDM_IMR_ADDR 0x14
+#define UARTDM_IRDA_ADDR 0x38
+
+/* Read Only register */
+#define UARTDM_SR_ADDR 0x8
+
+/* Read Only register */
+#define UARTDM_RF_ADDR   0x70
+#define UARTDM_RF2_ADDR  0x74
+#define UARTDM_RF3_ADDR  0x78
+#define UARTDM_RF4_ADDR  0x7c
+
+/* Read Only register */
+#define UARTDM_MISR_ADDR 0x10
+
+/* Read Only register */
+#define UARTDM_ISR_ADDR 0x14
+#define UARTDM_RX_TOTAL_SNAP_ADDR 0x38
+
+#endif /* MSM_SERIAL_HS_HWREG_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/tty/serial/msm_smd_tty.c	2019-10-29 09:26:24.941215880 +0100
@@ -0,0 +1,1045 @@
+/* Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2015, The Linux Foundation. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/ipc_logging.h>
+#include <linux/of.h>
+#include <linux/suspend.h>
+
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+
+#include <soc/qcom/smd.h>
+#include <soc/qcom/smsm.h>
+#include <soc/qcom/subsystem_restart.h>
+
+#define MODULE_NAME "msm_smdtty"
+#define MAX_SMD_TTYS 37
+#define MAX_TTY_BUF_SIZE 2048
+#define TTY_PUSH_WS_DELAY 500
+#define TTY_PUSH_WS_POST_SUSPEND_DELAY 100
+#define MAX_RA_WAKE_LOCK_NAME_LEN 32
+#define SMD_TTY_LOG_PAGES 2
+
+#define SMD_TTY_INFO(buf...) \
+do { \
+	if (smd_tty_log_ctx) { \
+		ipc_log_string(smd_tty_log_ctx, buf); \
+	} \
+} while (0)
+
+#define SMD_TTY_ERR(buf...) \
+do { \
+	if (smd_tty_log_ctx) \
+		ipc_log_string(smd_tty_log_ctx, buf); \
+	pr_err(buf); \
+} while (0)
+
+static void *smd_tty_log_ctx;
+static bool smd_tty_in_suspend;
+static bool smd_tty_read_in_suspend;
+static struct wakeup_source read_in_suspend_ws;
+
+/**
+ * struct smd_tty_info - context for an individual SMD TTY device
+ *
+ * @ch:  SMD channel handle
+ * @port:  TTY port context structure
+ * @device_ptr:  TTY device pointer
+ * @pending_ws:  pending-data wakeup source
+ * @tty_tsklt:  read tasklet
+ * @buf_req_timer:  RX buffer retry timer
+ * @ch_allocated:  completion set when SMD channel is allocated
+ * @pil:  Peripheral Image Loader handle
+ * @edge:  SMD edge associated with port
+ * @ch_name:  SMD channel name associated with port
+ * @dev_name:  SMD platform device name associated with port
+ *
+ * @open_lock_lha1: open/close lock - used to serialize open/close operations
+ * @open_wait:  Timeout in seconds to wait for SMD port to be created / opened
+ *
+ * @reset_lock_lha2: lock for reset and open state
+ * @in_reset:  True if SMD channel is closed / in SSR
+ * @in_reset_updated:  reset state changed
+ * @is_open:  True if SMD port is open
+ * @ch_opened_wait_queue:  SMD port open/close wait queue
+ *
+ * @ra_lock_lha3:  Read-available lock - used to synchronize reads from SMD
+ * @ra_wakeup_source_name: Name of the read-available wakeup source
+ * @ra_wakeup_source:  Read-available wakeup source
+ */
+struct smd_tty_info {
+	smd_channel_t *ch;
+	struct tty_port port;
+	struct device *device_ptr;
+	struct wakeup_source pending_ws;
+	struct tasklet_struct tty_tsklt;
+	struct timer_list buf_req_timer;
+	struct completion ch_allocated;
+	void *pil;
+	uint32_t edge;
+	char ch_name[SMD_MAX_CH_NAME_LEN];
+	char dev_name[SMD_MAX_CH_NAME_LEN];
+
+	struct mutex open_lock_lha1;
+	unsigned int open_wait;
+
+	spinlock_t reset_lock_lha2;
+	int in_reset;
+	int in_reset_updated;
+	int is_open;
+	wait_queue_head_t ch_opened_wait_queue;
+
+	spinlock_t ra_lock_lha3;
+	char ra_wakeup_source_name[MAX_RA_WAKE_LOCK_NAME_LEN];
+	struct wakeup_source ra_wakeup_source;
+};
+
+/**
+ * struct smd_tty_pfdriver - SMD tty channel platform driver structure
+ *
+ * @list:  Adds this structure into smd_tty_platform_driver_list::list.
+ * @ref_cnt:  reference count for this structure.
+ * @driver:  SMD channel platform driver context structure
+ */
+struct smd_tty_pfdriver {
+	struct list_head list;
+	int ref_cnt;
+	struct platform_driver driver;
+};
+
+#define LOOPBACK_IDX 36
+
+static struct delayed_work loopback_work;
+static struct smd_tty_info smd_tty[MAX_SMD_TTYS];
+
+static DEFINE_MUTEX(smd_tty_pfdriver_lock_lha1);
+static LIST_HEAD(smd_tty_pfdriver_list);
+
+static int is_in_reset(struct smd_tty_info *info)
+{
+	return info->in_reset;
+}
+
+static void buf_req_retry(unsigned long param)
+{
+	struct smd_tty_info *info = (struct smd_tty_info *)param;
+	unsigned long flags;
+
+	spin_lock_irqsave(&info->reset_lock_lha2, flags);
+	if (info->is_open) {
+		spin_unlock_irqrestore(&info->reset_lock_lha2, flags);
+		tasklet_hi_schedule(&info->tty_tsklt);
+		return;
+	}
+	spin_unlock_irqrestore(&info->reset_lock_lha2, flags);
+}
+
+static ssize_t open_timeout_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t n)
+{
+	unsigned int num_dev;
+	unsigned long wait;
+	if (dev == NULL) {
+		SMD_TTY_INFO("%s: Invalid Device passed", __func__);
+		return -EINVAL;
+	}
+	for (num_dev = 0; num_dev < MAX_SMD_TTYS; num_dev++) {
+		if (dev == smd_tty[num_dev].device_ptr)
+			break;
+	}
+	if (num_dev >= MAX_SMD_TTYS) {
+		SMD_TTY_ERR("[%s]: Device Not found", __func__);
+		return -EINVAL;
+	}
+	if (!kstrtoul(buf, 10, &wait)) {
+		mutex_lock(&smd_tty[num_dev].open_lock_lha1);
+		smd_tty[num_dev].open_wait = wait;
+		mutex_unlock(&smd_tty[num_dev].open_lock_lha1);
+		return n;
+	} else {
+		SMD_TTY_INFO("[%s]: Unable to convert %s to an int",
+			__func__, buf);
+		return -EINVAL;
+	}
+}
+
+static ssize_t open_timeout_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	unsigned int num_dev;
+	unsigned int open_wait;
+
+	if (dev == NULL) {
+		SMD_TTY_INFO("%s: Invalid Device passed", __func__);
+		return -EINVAL;
+	}
+	for (num_dev = 0; num_dev < MAX_SMD_TTYS; num_dev++) {
+		if (dev == smd_tty[num_dev].device_ptr)
+			break;
+	}
+	if (num_dev >= MAX_SMD_TTYS) {
+		SMD_TTY_ERR("[%s]: Device Not Found", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&smd_tty[num_dev].open_lock_lha1);
+	open_wait = smd_tty[num_dev].open_wait;
+	mutex_unlock(&smd_tty[num_dev].open_lock_lha1);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", open_wait);
+}
+
+static DEVICE_ATTR
+	(open_timeout, 0664, open_timeout_show, open_timeout_store);
+
+static void smd_tty_read(unsigned long param)
+{
+	unsigned char *ptr;
+	int avail;
+	struct smd_tty_info *info = (struct smd_tty_info *)param;
+	struct tty_struct *tty = tty_port_tty_get(&info->port);
+	unsigned long flags;
+
+	if (!tty)
+		return;
+
+	for (;;) {
+		if (is_in_reset(info)) {
+			/* signal TTY clients using TTY_BREAK */
+			tty_insert_flip_char(tty->port, 0x00, TTY_BREAK);
+			tty_flip_buffer_push(tty->port);
+			break;
+		}
+
+		if (test_bit(TTY_THROTTLED, &tty->flags))
+			break;
+		spin_lock_irqsave(&info->ra_lock_lha3, flags);
+		avail = smd_read_avail(info->ch);
+		if (avail == 0) {
+			__pm_relax(&info->ra_wakeup_source);
+			spin_unlock_irqrestore(&info->ra_lock_lha3, flags);
+			break;
+		}
+		spin_unlock_irqrestore(&info->ra_lock_lha3, flags);
+
+		if (avail > MAX_TTY_BUF_SIZE)
+			avail = MAX_TTY_BUF_SIZE;
+
+		avail = tty_prepare_flip_string(tty->port, &ptr, avail);
+		if (avail <= 0) {
+			mod_timer(&info->buf_req_timer,
+					jiffies + msecs_to_jiffies(30));
+			tty_kref_put(tty);
+			return;
+		}
+
+		if (smd_read(info->ch, ptr, avail) != avail) {
+			/* shouldn't be possible since we're in interrupt
+			** context here and nobody else could 'steal' our
+			** characters.
+			*/
+			SMD_TTY_ERR(
+				"%s - Possible smd_tty_buffer mismatch for %s",
+				__func__, info->ch_name);
+		}
+
+		/*
+		 * Keep system awake long enough to allow the TTY
+		 * framework to pass the flip buffer to any waiting
+		 * userspace clients.
+		 */
+		__pm_wakeup_event(&info->pending_ws, TTY_PUSH_WS_DELAY);
+
+		if (smd_tty_in_suspend)
+			smd_tty_read_in_suspend = true;
+
+		tty_flip_buffer_push(tty->port);
+	}
+
+	/* XXX only when writable and necessary */
+	tty_wakeup(tty);
+	tty_kref_put(tty);
+}
+
+static void smd_tty_notify(void *priv, unsigned event)
+{
+	struct smd_tty_info *info = priv;
+	struct tty_struct *tty;
+	unsigned long flags;
+
+	switch (event) {
+	case SMD_EVENT_DATA:
+		spin_lock_irqsave(&info->reset_lock_lha2, flags);
+		if (!info->is_open) {
+			spin_unlock_irqrestore(&info->reset_lock_lha2, flags);
+			break;
+		}
+		spin_unlock_irqrestore(&info->reset_lock_lha2, flags);
+		/* There may be clients (tty framework) that are blocked
+		 * waiting for space to write data, so if a possible read
+		 * interrupt came in wake anyone waiting and disable the
+		 * interrupts
+		 */
+		if (smd_write_avail(info->ch)) {
+			smd_disable_read_intr(info->ch);
+			tty = tty_port_tty_get(&info->port);
+			if (tty)
+				wake_up_interruptible(&tty->write_wait);
+			tty_kref_put(tty);
+		}
+		spin_lock_irqsave(&info->ra_lock_lha3, flags);
+		if (smd_read_avail(info->ch)) {
+			__pm_stay_awake(&info->ra_wakeup_source);
+			tasklet_hi_schedule(&info->tty_tsklt);
+		}
+		spin_unlock_irqrestore(&info->ra_lock_lha3, flags);
+		break;
+
+	case SMD_EVENT_OPEN:
+		tty = tty_port_tty_get(&info->port);
+		spin_lock_irqsave(&info->reset_lock_lha2, flags);
+		if (tty)
+			clear_bit(TTY_OTHER_CLOSED, &tty->flags);
+		info->in_reset = 0;
+		info->in_reset_updated = 1;
+		info->is_open = 1;
+		wake_up_interruptible(&info->ch_opened_wait_queue);
+		spin_unlock_irqrestore(&info->reset_lock_lha2, flags);
+		tty_kref_put(tty);
+		break;
+
+	case SMD_EVENT_CLOSE:
+		spin_lock_irqsave(&info->reset_lock_lha2, flags);
+		info->in_reset = 1;
+		info->in_reset_updated = 1;
+		info->is_open = 0;
+		wake_up_interruptible(&info->ch_opened_wait_queue);
+		spin_unlock_irqrestore(&info->reset_lock_lha2, flags);
+
+		tty = tty_port_tty_get(&info->port);
+		if (tty) {
+			/* send TTY_BREAK through read tasklet */
+			set_bit(TTY_OTHER_CLOSED, &tty->flags);
+			tasklet_hi_schedule(&info->tty_tsklt);
+
+			if (tty->index == LOOPBACK_IDX)
+				schedule_delayed_work(&loopback_work,
+						msecs_to_jiffies(1000));
+		}
+		tty_kref_put(tty);
+		break;
+	}
+}
+
+static uint32_t is_modem_smsm_inited(void)
+{
+	uint32_t modem_state;
+	uint32_t ready_state = (SMSM_INIT | SMSM_SMDINIT);
+
+	modem_state = smsm_get_state(SMSM_MODEM_STATE);
+	return (modem_state & ready_state) == ready_state;
+}
+
+static int smd_tty_dummy_probe(struct platform_device *pdev)
+{
+	int n;
+
+	for (n = 0; n < MAX_SMD_TTYS; ++n) {
+		if (!smd_tty[n].dev_name)
+			continue;
+
+		if (pdev->id == smd_tty[n].edge &&
+			!strcmp(pdev->name, smd_tty[n].dev_name)) {
+			complete_all(&smd_tty[n].ch_allocated);
+			return 0;
+		}
+	}
+	SMD_TTY_ERR("[ERR]%s: unknown device '%s'\n", __func__, pdev->name);
+
+	return -ENODEV;
+}
+
+/**
+ * smd_tty_add_driver() - Add platform drivers for smd tty device
+ *
+ * @info: context for an individual SMD TTY device
+ *
+ * @returns: 0 for success, standard Linux error code otherwise
+ *
+ * This function is used to register platform driver once for all
+ * smd tty devices which have same names and increment the reference
+ * count for 2nd to nth devices.
+ */
+static int smd_tty_add_driver(struct smd_tty_info *info)
+{
+	int r = 0;
+	struct smd_tty_pfdriver *smd_tty_pfdriverp;
+	struct smd_tty_pfdriver *item;
+
+	if (!info) {
+		pr_err("%s on a NULL device structure\n", __func__);
+		return -EINVAL;
+	}
+
+	SMD_TTY_INFO("Begin %s on smd_tty[%s]\n", __func__,
+					info->ch_name);
+
+	mutex_lock(&smd_tty_pfdriver_lock_lha1);
+	list_for_each_entry(item, &smd_tty_pfdriver_list, list) {
+		if (!strcmp(item->driver.driver.name, info->dev_name)) {
+			SMD_TTY_INFO("%s:%s Driver Already reg. cnt:%d\n",
+				__func__, info->ch_name, item->ref_cnt);
+			++item->ref_cnt;
+			goto exit;
+		}
+	}
+
+	smd_tty_pfdriverp = kzalloc(sizeof(*smd_tty_pfdriverp), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(smd_tty_pfdriverp)) {
+		pr_err("%s: kzalloc() failed for smd_tty_pfdriver[%s]\n",
+			__func__, info->ch_name);
+		r = -ENOMEM;
+		goto exit;
+	}
+
+	smd_tty_pfdriverp->driver.probe = smd_tty_dummy_probe;
+	smd_tty_pfdriverp->driver.driver.name = info->dev_name;
+	smd_tty_pfdriverp->driver.driver.owner = THIS_MODULE;
+	r = platform_driver_register(&smd_tty_pfdriverp->driver);
+	if (r) {
+		pr_err("%s: %s Platform driver reg. failed\n",
+			__func__, info->ch_name);
+		kfree(smd_tty_pfdriverp);
+		goto exit;
+	}
+	++smd_tty_pfdriverp->ref_cnt;
+	list_add(&smd_tty_pfdriverp->list, &smd_tty_pfdriver_list);
+
+exit:
+	SMD_TTY_INFO("End %s on smd_tty_ch[%s]\n", __func__, info->ch_name);
+	mutex_unlock(&smd_tty_pfdriver_lock_lha1);
+	return r;
+}
+
+/**
+ * smd_tty_remove_driver() - Remove the platform drivers for smd tty device
+ *
+ * @info: context for an individual SMD TTY device
+ *
+ * This function is used to decrement the reference count on
+ * platform drivers for smd pkt devices and removes the drivers
+ * when the reference count becomes zero.
+ */
+static void smd_tty_remove_driver(struct smd_tty_info *info)
+{
+	struct smd_tty_pfdriver *smd_tty_pfdriverp;
+	bool found_item = false;
+
+	if (!info) {
+		pr_err("%s on a NULL device\n", __func__);
+		return;
+	}
+
+	SMD_TTY_INFO("Begin %s on smd_tty_ch[%s]\n", __func__,
+					info->ch_name);
+	mutex_lock(&smd_tty_pfdriver_lock_lha1);
+	list_for_each_entry(smd_tty_pfdriverp, &smd_tty_pfdriver_list, list) {
+		if (!strcmp(smd_tty_pfdriverp->driver.driver.name,
+					info->dev_name)) {
+			found_item = true;
+			SMD_TTY_INFO("%s:%s Platform driver cnt:%d\n",
+				__func__, info->ch_name,
+				smd_tty_pfdriverp->ref_cnt);
+			if (smd_tty_pfdriverp->ref_cnt > 0)
+				--smd_tty_pfdriverp->ref_cnt;
+			else
+				pr_warn("%s reference count <= 0\n", __func__);
+			break;
+		}
+	}
+	if (!found_item)
+		SMD_TTY_ERR("%s:%s No item found in list.\n",
+			__func__, info->ch_name);
+
+	if (found_item && smd_tty_pfdriverp->ref_cnt == 0) {
+		platform_driver_unregister(&smd_tty_pfdriverp->driver);
+		smd_tty_pfdriverp->driver.probe = NULL;
+		list_del(&smd_tty_pfdriverp->list);
+		kfree(smd_tty_pfdriverp);
+	}
+	mutex_unlock(&smd_tty_pfdriver_lock_lha1);
+	SMD_TTY_INFO("End %s on smd_tty_ch[%s]\n", __func__, info->ch_name);
+}
+
+static int smd_tty_port_activate(struct tty_port *tport,
+				 struct tty_struct *tty)
+{
+	int res = 0;
+	unsigned int n = tty->index;
+	struct smd_tty_info *info;
+	const char *peripheral = NULL;
+
+	if (n >= MAX_SMD_TTYS || !smd_tty[n].ch_name)
+		return -ENODEV;
+
+	info = smd_tty + n;
+
+	mutex_lock(&info->open_lock_lha1);
+	tty->driver_data = info;
+
+	res = smd_tty_add_driver(info);
+	if (res) {
+		SMD_TTY_ERR("%s:%d Idx smd_tty_driver register failed %d\n",
+							__func__, n, res);
+		goto out;
+	}
+
+	peripheral = smd_edge_to_pil_str(smd_tty[n].edge);
+	if (!IS_ERR_OR_NULL(peripheral)) {
+		info->pil = subsystem_get(peripheral);
+		if (IS_ERR(info->pil)) {
+			SMD_TTY_INFO(
+				"%s failed on smd_tty device :%s subsystem_get failed for %s",
+				__func__, info->ch_name,
+				peripheral);
+
+			/*
+			 * Sleep, inorder to reduce the frequency of
+			 * retry by user-space modules and to avoid
+			 * possible watchdog bite.
+			 */
+			msleep((smd_tty[n].open_wait * 1000));
+			res = PTR_ERR(info->pil);
+			goto platform_unregister;
+		}
+	}
+
+	/* Wait for the modem SMSM to be inited for the SMD
+	 * Loopback channel to be allocated at the modem. Since
+	 * the wait need to be done atmost once, using msleep
+	 * doesn't degrade the performance.
+	 */
+	if (n == LOOPBACK_IDX) {
+		if (!is_modem_smsm_inited())
+			msleep(5000);
+		smsm_change_state(SMSM_APPS_STATE,
+				  0, SMSM_SMD_LOOPBACK);
+		msleep(100);
+	}
+
+	/*
+	 * Wait for a channel to be allocated so we know
+	 * the modem is ready enough.
+	 */
+	if (smd_tty[n].open_wait) {
+		res = wait_for_completion_interruptible_timeout(
+				&info->ch_allocated,
+				msecs_to_jiffies(smd_tty[n].open_wait *
+								1000));
+
+		if (res == 0) {
+			SMD_TTY_INFO(
+				"Timed out waiting for SMD channel %s",
+				info->ch_name);
+			res = -ETIMEDOUT;
+			goto release_pil;
+		} else if (res < 0) {
+			SMD_TTY_INFO(
+				"Error waiting for SMD channel %s : %d\n",
+				info->ch_name, res);
+			goto release_pil;
+		}
+	}
+
+	tasklet_init(&info->tty_tsklt, smd_tty_read, (unsigned long)info);
+	wakeup_source_init(&info->pending_ws, info->ch_name);
+	scnprintf(info->ra_wakeup_source_name, MAX_RA_WAKE_LOCK_NAME_LEN,
+		  "SMD_TTY_%s_RA", info->ch_name);
+	wakeup_source_init(&info->ra_wakeup_source,
+			info->ra_wakeup_source_name);
+
+	res = smd_named_open_on_edge(info->ch_name,
+				     smd_tty[n].edge, &info->ch, info,
+				     smd_tty_notify);
+	if (res < 0) {
+		SMD_TTY_INFO("%s: %s open failed %d\n",
+			      __func__, info->ch_name, res);
+		goto release_wl_tl;
+	}
+
+	res = wait_event_interruptible_timeout(info->ch_opened_wait_queue,
+					       info->is_open, (2 * HZ));
+	if (res == 0)
+		res = -ETIMEDOUT;
+	if (res < 0) {
+		SMD_TTY_INFO("%s: wait for %s smd_open failed %d\n",
+			      __func__, info->ch_name, res);
+		goto close_ch;
+	}
+	SMD_TTY_INFO("%s with PID %u opened port %s",
+		      current->comm, current->pid, info->ch_name);
+	smd_disable_read_intr(info->ch);
+	mutex_unlock(&info->open_lock_lha1);
+	return 0;
+
+close_ch:
+	smd_close(info->ch);
+	info->ch = NULL;
+
+release_wl_tl:
+	tasklet_kill(&info->tty_tsklt);
+	wakeup_source_trash(&info->pending_ws);
+	wakeup_source_trash(&info->ra_wakeup_source);
+
+release_pil:
+	subsystem_put(info->pil);
+
+platform_unregister:
+	smd_tty_remove_driver(info);
+
+out:
+	mutex_unlock(&info->open_lock_lha1);
+
+	return res;
+}
+
+static void smd_tty_port_shutdown(struct tty_port *tport)
+{
+	struct smd_tty_info *info;
+	struct tty_struct *tty = tty_port_tty_get(tport);
+	unsigned long flags;
+
+	info = tty->driver_data;
+	if (info == 0) {
+		tty_kref_put(tty);
+		return;
+	}
+
+	mutex_lock(&info->open_lock_lha1);
+
+	spin_lock_irqsave(&info->reset_lock_lha2, flags);
+	info->is_open = 0;
+	spin_unlock_irqrestore(&info->reset_lock_lha2, flags);
+
+	tasklet_kill(&info->tty_tsklt);
+	wakeup_source_trash(&info->pending_ws);
+	wakeup_source_trash(&info->ra_wakeup_source);
+
+	SMD_TTY_INFO("%s with PID %u closed port %s",
+			current->comm, current->pid,
+			info->ch_name);
+	tty->driver_data = NULL;
+	del_timer(&info->buf_req_timer);
+
+	smd_close(info->ch);
+	info->ch = NULL;
+	subsystem_put(info->pil);
+	smd_tty_remove_driver(info);
+
+	mutex_unlock(&info->open_lock_lha1);
+	tty_kref_put(tty);
+}
+
+static int smd_tty_open(struct tty_struct *tty, struct file *f)
+{
+	struct smd_tty_info *info = smd_tty + tty->index;
+
+	return tty_port_open(&info->port, tty, f);
+}
+
+static void smd_tty_close(struct tty_struct *tty, struct file *f)
+{
+	struct smd_tty_info *info = smd_tty + tty->index;
+
+	tty_port_close(&info->port, tty, f);
+}
+
+static int smd_tty_write(struct tty_struct *tty, const unsigned char *buf,
+									int len)
+{
+	struct smd_tty_info *info = tty->driver_data;
+	int avail;
+
+	/* if we're writing to a packet channel we will
+	** never be able to write more data than there
+	** is currently space for
+	*/
+	if (is_in_reset(info))
+		return -ENETRESET;
+
+	avail = smd_write_avail(info->ch);
+	/* if no space, we'll have to setup a notification later to wake up the
+	 * tty framework when space becomes avaliable
+	 */
+	if (!avail) {
+		smd_enable_read_intr(info->ch);
+		return 0;
+	}
+	if (len > avail)
+		len = avail;
+	SMD_TTY_INFO("[WRITE]: PID %u -> port %s %x bytes",
+			current->pid, info->ch_name, len);
+
+	return smd_write(info->ch, buf, len);
+}
+
+static int smd_tty_write_room(struct tty_struct *tty)
+{
+	struct smd_tty_info *info = tty->driver_data;
+	return smd_write_avail(info->ch);
+}
+
+static int smd_tty_chars_in_buffer(struct tty_struct *tty)
+{
+	struct smd_tty_info *info = tty->driver_data;
+	return smd_read_avail(info->ch);
+}
+
+static void smd_tty_unthrottle(struct tty_struct *tty)
+{
+	struct smd_tty_info *info = tty->driver_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&info->reset_lock_lha2, flags);
+	if (info->is_open) {
+		spin_unlock_irqrestore(&info->reset_lock_lha2, flags);
+		tasklet_hi_schedule(&info->tty_tsklt);
+		return;
+	}
+	spin_unlock_irqrestore(&info->reset_lock_lha2, flags);
+}
+
+/*
+ * Returns the current TIOCM status bits including:
+ *      SMD Signals (DTR/DSR, CTS/RTS, CD, RI)
+ *      TIOCM_OUT1 - reset state (1=in reset)
+ *      TIOCM_OUT2 - reset state updated (1=updated)
+ */
+static int smd_tty_tiocmget(struct tty_struct *tty)
+{
+	struct smd_tty_info *info = tty->driver_data;
+	unsigned long flags;
+	int tiocm;
+
+	tiocm = smd_tiocmget(info->ch);
+
+	spin_lock_irqsave(&info->reset_lock_lha2, flags);
+	tiocm |= (info->in_reset ? TIOCM_OUT1 : 0);
+	if (info->in_reset_updated) {
+		tiocm |= TIOCM_OUT2;
+		info->in_reset_updated = 0;
+	}
+	SMD_TTY_INFO("PID %u --> %s TIOCM is %x ",
+			current->pid, __func__, tiocm);
+	spin_unlock_irqrestore(&info->reset_lock_lha2, flags);
+
+	return tiocm;
+}
+
+static int smd_tty_tiocmset(struct tty_struct *tty,
+				unsigned int set, unsigned int clear)
+{
+	struct smd_tty_info *info = tty->driver_data;
+
+	if (info->in_reset)
+		return -ENETRESET;
+
+	SMD_TTY_INFO("PID %u --> %s Set: %x Clear: %x",
+			current->pid, __func__, set, clear);
+	return smd_tiocmset(info->ch, set, clear);
+}
+
+static void loopback_probe_worker(struct work_struct *work)
+{
+	/* wait for modem to restart before requesting loopback server */
+	if (!is_modem_smsm_inited())
+		schedule_delayed_work(&loopback_work, msecs_to_jiffies(1000));
+	else
+		smsm_change_state(SMSM_APPS_STATE,
+			  0, SMSM_SMD_LOOPBACK);
+}
+
+static const struct tty_port_operations smd_tty_port_ops = {
+	.shutdown = smd_tty_port_shutdown,
+	.activate = smd_tty_port_activate,
+};
+
+static const struct tty_operations smd_tty_ops = {
+	.open = smd_tty_open,
+	.close = smd_tty_close,
+	.write = smd_tty_write,
+	.write_room = smd_tty_write_room,
+	.chars_in_buffer = smd_tty_chars_in_buffer,
+	.unthrottle = smd_tty_unthrottle,
+	.tiocmget = smd_tty_tiocmget,
+	.tiocmset = smd_tty_tiocmset,
+};
+
+static int smd_tty_pm_notifier(struct notifier_block *nb,
+				unsigned long event, void *unused)
+{
+	switch (event) {
+	case PM_SUSPEND_PREPARE:
+		smd_tty_read_in_suspend = false;
+		smd_tty_in_suspend = true;
+		break;
+
+	case PM_POST_SUSPEND:
+		smd_tty_in_suspend = false;
+		if (smd_tty_read_in_suspend) {
+			smd_tty_read_in_suspend = false;
+			__pm_wakeup_event(&read_in_suspend_ws,
+					TTY_PUSH_WS_POST_SUSPEND_DELAY);
+		}
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block smd_tty_pm_nb = {
+	.notifier_call = smd_tty_pm_notifier,
+	.priority = 0,
+};
+
+/**
+ * smd_tty_log_init()- Init function for IPC logging
+ *
+ * Initialize the buffer that is used to provide the log information
+ * pertaining to the smd_tty module.
+ */
+static void smd_tty_log_init(void)
+{
+	smd_tty_log_ctx = ipc_log_context_create(SMD_TTY_LOG_PAGES,
+						"smd_tty", 0);
+	if (!smd_tty_log_ctx)
+		pr_err("%s: Unable to create IPC log", __func__);
+}
+
+static struct tty_driver *smd_tty_driver;
+
+static int smd_tty_register_driver(void)
+{
+	int ret;
+
+	smd_tty_driver = alloc_tty_driver(MAX_SMD_TTYS);
+	if (smd_tty_driver == 0) {
+		SMD_TTY_ERR("%s - Driver allocation failed", __func__);
+		return -ENOMEM;
+	}
+
+	smd_tty_driver->owner = THIS_MODULE;
+	smd_tty_driver->driver_name = "smd_tty_driver";
+	smd_tty_driver->name = "smd";
+	smd_tty_driver->major = 0;
+	smd_tty_driver->minor_start = 0;
+	smd_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+	smd_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+	smd_tty_driver->init_termios = tty_std_termios;
+	smd_tty_driver->init_termios.c_iflag = 0;
+	smd_tty_driver->init_termios.c_oflag = 0;
+	smd_tty_driver->init_termios.c_cflag = B38400 | CS8 | CREAD;
+	smd_tty_driver->init_termios.c_lflag = 0;
+	smd_tty_driver->flags = TTY_DRIVER_RESET_TERMIOS |
+		TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+	tty_set_operations(smd_tty_driver, &smd_tty_ops);
+
+	ret = tty_register_driver(smd_tty_driver);
+	if (ret) {
+		put_tty_driver(smd_tty_driver);
+		SMD_TTY_ERR("%s: driver registration failed %d", __func__, ret);
+	}
+
+	return ret;
+}
+
+static void smd_tty_device_init(int idx)
+{
+	struct tty_port *port;
+
+	port = &smd_tty[idx].port;
+	tty_port_init(port);
+	port->ops = &smd_tty_port_ops;
+	smd_tty[idx].device_ptr = tty_port_register_device(port, smd_tty_driver,
+							   idx, NULL);
+	if (IS_ERR_OR_NULL(smd_tty[idx].device_ptr)) {
+		SMD_TTY_ERR("%s: Unable to register tty port %s reason %d\n",
+				__func__,
+				smd_tty[idx].ch_name,
+				PTR_ERR_OR_ZERO(smd_tty[idx].device_ptr));
+		return;
+	}
+	init_completion(&smd_tty[idx].ch_allocated);
+	mutex_init(&smd_tty[idx].open_lock_lha1);
+	spin_lock_init(&smd_tty[idx].reset_lock_lha2);
+	spin_lock_init(&smd_tty[idx].ra_lock_lha3);
+	smd_tty[idx].is_open = 0;
+	setup_timer(&smd_tty[idx].buf_req_timer, buf_req_retry,
+			(unsigned long)&smd_tty[idx]);
+	init_waitqueue_head(&smd_tty[idx].ch_opened_wait_queue);
+
+	if (device_create_file(smd_tty[idx].device_ptr, &dev_attr_open_timeout))
+		SMD_TTY_ERR("%s: Unable to create device attributes for %s",
+			__func__, smd_tty[idx].ch_name);
+}
+
+static int smd_tty_devicetree_init(struct platform_device *pdev)
+{
+	int ret;
+	int idx;
+	int edge;
+	char *key = NULL;
+	const char *ch_name;
+	const char *dev_name;
+	const char *remote_ss;
+	struct device_node *node;
+
+	ret = smd_tty_register_driver();
+	if (ret) {
+		SMD_TTY_ERR("%s: driver registration failed %d\n",
+						__func__, ret);
+		return ret;
+	}
+
+	for_each_child_of_node(pdev->dev.of_node, node) {
+
+		ret = of_alias_get_id(node, "smd");
+		SMD_TTY_INFO("%s:adding smd%d\n", __func__, ret);
+
+		if (ret < 0 || ret >= MAX_SMD_TTYS)
+			goto error;
+		idx = ret;
+
+		key = "qcom,smdtty-remote";
+		remote_ss = of_get_property(node, key, NULL);
+		if (!remote_ss)
+			goto error;
+
+		edge = smd_remote_ss_to_edge(remote_ss);
+		if (edge < 0)
+			goto error;
+		smd_tty[idx].edge = edge;
+
+		key = "qcom,smdtty-port-name";
+		ch_name = of_get_property(node, key, NULL);
+		if (!ch_name)
+			goto error;
+		strlcpy(smd_tty[idx].ch_name, ch_name,
+					SMD_MAX_CH_NAME_LEN);
+
+		key = "qcom,smdtty-dev-name";
+		dev_name = of_get_property(node, key, NULL);
+		if (!dev_name) {
+			strlcpy(smd_tty[idx].dev_name, smd_tty[idx].ch_name,
+							SMD_MAX_CH_NAME_LEN);
+		} else {
+			strlcpy(smd_tty[idx].dev_name, dev_name,
+						SMD_MAX_CH_NAME_LEN);
+		}
+
+		smd_tty_device_init(idx);
+	}
+	INIT_DELAYED_WORK(&loopback_work, loopback_probe_worker);
+
+	ret = register_pm_notifier(&smd_tty_pm_nb);
+	if (ret)
+		pr_err("%s: power state notif error %d\n", __func__, ret);
+
+	return 0;
+
+error:
+	SMD_TTY_ERR("%s:Initialization error, key[%s]\n", __func__, key);
+	/* Unregister tty platform devices */
+	for_each_child_of_node(pdev->dev.of_node, node) {
+
+		ret = of_alias_get_id(node, "smd");
+		SMD_TTY_INFO("%s:Removing smd%d\n", __func__, ret);
+
+		if (ret < 0 || ret >= MAX_SMD_TTYS)
+			goto out;
+		idx = ret;
+
+		if (smd_tty[idx].device_ptr) {
+			device_remove_file(smd_tty[idx].device_ptr,
+						&dev_attr_open_timeout);
+			tty_unregister_device(smd_tty_driver, idx);
+		}
+	}
+out:
+	tty_unregister_driver(smd_tty_driver);
+	put_tty_driver(smd_tty_driver);
+	return ret;
+}
+
+static int msm_smd_tty_probe(struct platform_device *pdev)
+{
+	int ret;
+
+	if (pdev) {
+		if (pdev->dev.of_node) {
+			ret = smd_tty_devicetree_init(pdev);
+			if (ret) {
+				SMD_TTY_ERR("%s: device tree init failed\n",
+								__func__);
+				return ret;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static struct of_device_id msm_smd_tty_match_table[] = {
+	{ .compatible = "qcom,smdtty" },
+	{},
+};
+
+static struct platform_driver msm_smd_tty_driver = {
+	.probe = msm_smd_tty_probe,
+	.driver = {
+		.name = MODULE_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = msm_smd_tty_match_table,
+	 },
+};
+
+
+static int __init smd_tty_init(void)
+{
+	int rc;
+
+	smd_tty_log_init();
+	rc = platform_driver_register(&msm_smd_tty_driver);
+	if (rc) {
+		SMD_TTY_ERR("%s: msm_smd_tty_driver register failed %d\n",
+								__func__, rc);
+		return rc;
+	}
+
+	wakeup_source_init(&read_in_suspend_ws, "SMDTTY_READ_IN_SUSPEND");
+	return 0;
+}
+
+module_init(smd_tty_init);
diff -Nruw linux-4.4.115-fbx/drivers/uio/msm_sharedmem./Makefile linux-4.4.115-fbx/drivers/uio/msm_sharedmem/Makefile
--- linux-4.4.115-fbx/drivers/uio/msm_sharedmem./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/uio/msm_sharedmem/Makefile	2019-01-22 16:16:27.223280093 +0100
@@ -0,0 +1,4 @@
+obj-$(CONFIG_UIO_MSM_SHAREDMEM) := \
+	msm_sharedmem.o \
+	remote_filesystem_access_v01.o \
+	sharedmem_qmi.o \
diff -Nruw linux-4.4.115-fbx/drivers/uio/msm_sharedmem./msm_sharedmem.c linux-4.4.115-fbx/drivers/uio/msm_sharedmem/msm_sharedmem.c
--- linux-4.4.115-fbx/drivers/uio/msm_sharedmem./msm_sharedmem.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/uio/msm_sharedmem/msm_sharedmem.c	2019-10-29 09:26:24.957216037 +0100
@@ -0,0 +1,240 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define DRIVER_NAME "msm_sharedmem"
+#define pr_fmt(fmt) DRIVER_NAME ": %s: " fmt, __func__
+
+#include <linux/uio_driver.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/dma-mapping.h>
+
+#include <soc/qcom/secure_buffer.h>
+
+#include "sharedmem_qmi.h"
+
+#define CLIENT_ID_PROP "qcom,client-id"
+
+#define MPSS_RMTS_CLIENT_ID 1
+
+static int uio_get_mem_index(struct uio_info *info, struct vm_area_struct *vma)
+{
+	if (vma->vm_pgoff >= MAX_UIO_MAPS)
+		return -EINVAL;
+
+	if (info->mem[vma->vm_pgoff].size == 0)
+		return -EINVAL;
+
+	return (int)vma->vm_pgoff;
+}
+
+static int sharedmem_mmap(struct uio_info *info, struct vm_area_struct *vma)
+{
+	int result;
+	struct uio_mem *mem;
+	int mem_index = uio_get_mem_index(info, vma);
+
+	if (mem_index < 0) {
+		pr_err("mem_index is invalid errno %d\n", mem_index);
+		return mem_index;
+	}
+
+	mem = info->mem + mem_index;
+
+	if (vma->vm_end - vma->vm_start > mem->size) {
+		pr_err("vm_end[%lu] - vm_start[%lu] [%lu] > mem->size[%pa]\n",
+			vma->vm_end, vma->vm_start,
+			(vma->vm_end - vma->vm_start), &mem->size);
+		return -EINVAL;
+	}
+	pr_debug("Attempting to setup mmap.\n");
+
+	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+	result = remap_pfn_range(vma,
+				 vma->vm_start,
+				 mem->addr >> PAGE_SHIFT,
+				 vma->vm_end - vma->vm_start,
+				 vma->vm_page_prot);
+	if (result != 0)
+		pr_err("mmap Failed with errno %d\n", result);
+	else
+		pr_debug("mmap success\n");
+
+	return result;
+}
+
+/* Setup the shared ram permissions.
+ * This function currently supports the mpss client only.
+ */
+static void setup_shared_ram_perms(u32 client_id, phys_addr_t addr, u32 size)
+{
+	int ret;
+	u32 source_vmlist[1] = {VMID_HLOS};
+	int dest_vmids[2] = {VMID_HLOS, VMID_MSS_MSA};
+	int dest_perms[2] = {PERM_READ|PERM_WRITE ,
+			     PERM_READ|PERM_WRITE};
+
+	if (client_id != MPSS_RMTS_CLIENT_ID)
+		return;
+
+	ret = hyp_assign_phys(addr, size, source_vmlist, 1, dest_vmids,
+				dest_perms, 2);
+	if (ret != 0) {
+		if (ret == -ENOSYS)
+			pr_warn("hyp_assign_phys is not supported!");
+		else
+			pr_err("hyp_assign_phys failed IPA=0x016%pa size=%u err=%d\n",
+				&addr, size, ret);
+	}
+}
+
+static int msm_sharedmem_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct uio_info *info = NULL;
+	struct resource *clnt_res = NULL;
+	u32 client_id = ((u32)~0U);
+	u32 shared_mem_size = 0;
+	void *shared_mem = NULL;
+	phys_addr_t shared_mem_pyhsical = 0;
+	bool is_addr_dynamic = false;
+	struct sharemem_qmi_entry qmi_entry;
+
+	/* Get the addresses from platform-data */
+	if (!pdev->dev.of_node) {
+		pr_err("Node not found\n");
+		ret = -ENODEV;
+		goto out;
+	}
+	clnt_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!clnt_res) {
+		pr_err("resource not found\n");
+		return -ENODEV;
+	}
+
+	ret = of_property_read_u32(pdev->dev.of_node, CLIENT_ID_PROP,
+				   &client_id);
+	if (ret) {
+		client_id = ((u32)~0U);
+		pr_warn("qcom,client-id property not found\n");
+	}
+
+	info = devm_kzalloc(&pdev->dev, sizeof(struct uio_info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	shared_mem_size = resource_size(clnt_res);
+	shared_mem_pyhsical = clnt_res->start;
+
+	if (shared_mem_size == 0) {
+		pr_err("Shared memory size is zero\n");
+		return -EINVAL;
+	}
+
+	if (shared_mem_pyhsical == 0) {
+		is_addr_dynamic = true;
+		shared_mem = dma_alloc_coherent(&pdev->dev, shared_mem_size,
+					&shared_mem_pyhsical, GFP_KERNEL);
+		if (shared_mem == NULL) {
+			pr_err("Shared mem alloc client=%s, size=%u\n",
+				clnt_res->name, shared_mem_size);
+			return -ENOMEM;
+		}
+	}
+
+	/* Set up the permissions for the shared ram that was allocated. */
+	setup_shared_ram_perms(client_id, shared_mem_pyhsical, shared_mem_size);
+
+	/* Setup device */
+	info->mmap = sharedmem_mmap; /* Custom mmap function. */
+	info->name = clnt_res->name;
+	info->version = "1.0";
+	info->mem[0].addr = shared_mem_pyhsical;
+	info->mem[0].size = shared_mem_size;
+	info->mem[0].memtype = UIO_MEM_PHYS;
+
+	ret = uio_register_device(&pdev->dev, info);
+	if (ret) {
+		pr_err("uio register failed ret=%d\n", ret);
+		goto out;
+	}
+	dev_set_drvdata(&pdev->dev, info);
+
+	qmi_entry.client_id = client_id;
+	qmi_entry.client_name = info->name;
+	qmi_entry.address = info->mem[0].addr;
+	qmi_entry.size = info->mem[0].size;
+	qmi_entry.is_addr_dynamic = is_addr_dynamic;
+
+	sharedmem_qmi_add_entry(&qmi_entry);
+	pr_info("Device created for client '%s'\n", clnt_res->name);
+out:
+	return ret;
+}
+
+static int msm_sharedmem_remove(struct platform_device *pdev)
+{
+	struct uio_info *info = dev_get_drvdata(&pdev->dev);
+
+	uio_unregister_device(info);
+
+	return 0;
+}
+
+static struct of_device_id msm_sharedmem_of_match[] = {
+	{.compatible = "qcom,sharedmem-uio",},
+	{},
+};
+MODULE_DEVICE_TABLE(of, msm_sharedmem_of_match);
+
+static struct platform_driver msm_sharedmem_driver = {
+	.probe          = msm_sharedmem_probe,
+	.remove         = msm_sharedmem_remove,
+	.driver         = {
+		.name   = DRIVER_NAME,
+		.owner	= THIS_MODULE,
+		.of_match_table = msm_sharedmem_of_match,
+	},
+};
+
+
+static int __init msm_sharedmem_init(void)
+{
+	int result;
+
+	result = sharedmem_qmi_init();
+	if (result < 0) {
+		pr_err("sharedmem_qmi_init failed result = %d\n", result);
+		return result;
+	}
+
+	result = platform_driver_register(&msm_sharedmem_driver);
+	if (result != 0) {
+		pr_err("Platform driver registration failed\n");
+		return result;
+	}
+	return 0;
+}
+
+static void __exit msm_sharedmem_exit(void)
+{
+	platform_driver_unregister(&msm_sharedmem_driver);
+	sharedmem_qmi_exit();
+}
+
+module_init(msm_sharedmem_init);
+module_exit(msm_sharedmem_exit);
+
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/drivers/uio/msm_sharedmem./remote_filesystem_access_v01.c linux-4.4.115-fbx/drivers/uio/msm_sharedmem/remote_filesystem_access_v01.c
--- linux-4.4.115-fbx/drivers/uio/msm_sharedmem./remote_filesystem_access_v01.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/uio/msm_sharedmem/remote_filesystem_access_v01.c	2019-01-22 16:16:27.223280093 +0100
@@ -0,0 +1,80 @@
+ /* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/qmi_encdec.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+
+#include "remote_filesystem_access_v01.h"
+
+struct elem_info rfsa_get_buff_addr_req_msg_v01_ei[] = {
+	{
+		.data_type   = QMI_UNSIGNED_4_BYTE,
+		.elem_len    = 1,
+		.elem_size   = sizeof(uint32_t),
+		.is_array    = NO_ARRAY,
+		.tlv_type    = 0x01,
+		.offset      = offsetof(struct rfsa_get_buff_addr_req_msg_v01,
+					   client_id),
+	},
+	{
+		.data_type   = QMI_UNSIGNED_4_BYTE,
+		.elem_len    = 1,
+		.elem_size   = sizeof(uint32_t),
+		.is_array    = NO_ARRAY,
+		.tlv_type    = 0x02,
+		.offset      = offsetof(struct rfsa_get_buff_addr_req_msg_v01,
+					   size),
+	},
+	{
+		.data_type   = QMI_EOTI,
+		.is_array    = NO_ARRAY,
+		.is_array    = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info rfsa_get_buff_addr_resp_msg_v01_ei[] = {
+	{
+		.data_type   = QMI_STRUCT,
+		.elem_len    = 1,
+		.elem_size   = sizeof(struct qmi_response_type_v01),
+		.is_array    = NO_ARRAY,
+		.tlv_type    = 0x02,
+		.offset      = offsetof(struct rfsa_get_buff_addr_resp_msg_v01,
+					   resp),
+		.ei_array    = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type   = QMI_OPT_FLAG,
+		.elem_len    = 1,
+		.elem_size   = sizeof(uint8_t),
+		.is_array    = NO_ARRAY,
+		.tlv_type    = 0x10,
+		.offset      = offsetof(struct rfsa_get_buff_addr_resp_msg_v01,
+					address_valid),
+	},
+	{
+		.data_type   = QMI_UNSIGNED_8_BYTE,
+		.elem_len    = 1,
+		.elem_size   = sizeof(uint64_t),
+		.is_array    = NO_ARRAY,
+		.tlv_type    = 0x10,
+		.offset      = offsetof(struct rfsa_get_buff_addr_resp_msg_v01,
+					address),
+	},
+	{
+		.data_type   = QMI_EOTI,
+		.is_array    = NO_ARRAY,
+		.is_array    = QMI_COMMON_TLV_TYPE,
+	},
+};
+
diff -Nruw linux-4.4.115-fbx/drivers/uio/msm_sharedmem./remote_filesystem_access_v01.h linux-4.4.115-fbx/drivers/uio/msm_sharedmem/remote_filesystem_access_v01.h
--- linux-4.4.115-fbx/drivers/uio/msm_sharedmem./remote_filesystem_access_v01.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/uio/msm_sharedmem/remote_filesystem_access_v01.h	2019-01-22 16:16:27.223280093 +0100
@@ -0,0 +1,39 @@
+ /* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __REMOTE_FILESYSTEM_ACCESS_V01_H__
+#define __REMOTE_FILESYSTEM_ACCESS_V01_H__
+
+#define RFSA_SERVICE_ID_V01 0x1C
+#define RFSA_SERVICE_VERS_V01 0x01
+
+#define QMI_RFSA_GET_BUFF_ADDR_REQ_MSG_V01 0x0023
+#define QMI_RFSA_GET_BUFF_ADDR_RESP_MSG_V01 0x0023
+
+#define RFSA_GET_BUFF_ADDR_REQ_MSG_MAX_LEN_V01 14
+#define RFSA_GET_BUFF_ADDR_RESP_MSG_MAX_LEN_V01 18
+
+extern struct elem_info rfsa_get_buff_addr_req_msg_v01_ei[];
+extern struct elem_info rfsa_get_buff_addr_resp_msg_v01_ei[];
+
+struct rfsa_get_buff_addr_req_msg_v01 {
+	uint32_t client_id;
+	uint32_t size;
+};
+
+struct rfsa_get_buff_addr_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	uint8_t address_valid;
+	uint64_t address;
+};
+
+#endif /* __REMOTE_FILESYSTEM_ACCESS_V01_H__ */
diff -Nruw linux-4.4.115-fbx/drivers/uio/msm_sharedmem./sharedmem_qmi.c linux-4.4.115-fbx/drivers/uio/msm_sharedmem/sharedmem_qmi.c
--- linux-4.4.115-fbx/drivers/uio/msm_sharedmem./sharedmem_qmi.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/uio/msm_sharedmem/sharedmem_qmi.c	2019-01-22 16:16:27.223280093 +0100
@@ -0,0 +1,453 @@
+/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define DRIVER_NAME "msm_sharedmem"
+#define pr_fmt(fmt) DRIVER_NAME ": %s: " fmt, __func__
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/debugfs.h>
+#include <soc/qcom/msm_qmi_interface.h>
+#include "sharedmem_qmi.h"
+#include "remote_filesystem_access_v01.h"
+
+#define RFSA_SERVICE_INSTANCE_NUM 1
+#define SHARED_ADDR_ENTRY_NAME_MAX_LEN 10
+
+struct shared_addr_entry {
+	u32 id;
+	u64 address;
+	u32 size;
+	u64 request_count;
+	bool is_addr_dynamic;
+	char name[SHARED_ADDR_ENTRY_NAME_MAX_LEN + 1];
+};
+
+struct shared_addr_list {
+	struct list_head node;
+	struct shared_addr_entry entry;
+};
+
+static struct shared_addr_list list;
+
+static struct qmi_handle *sharedmem_qmi_svc_handle;
+static void sharedmem_qmi_svc_recv_msg(struct work_struct *work);
+static DECLARE_DELAYED_WORK(work_recv_msg, sharedmem_qmi_svc_recv_msg);
+static struct workqueue_struct *sharedmem_qmi_svc_workqueue;
+static struct dentry *dir_ent;
+
+static u32 rfsa_count;
+static u32 rmts_count;
+
+static DECLARE_RWSEM(sharedmem_list_lock); /* declare list lock semaphore */
+
+static struct work_struct sharedmem_qmi_init_work;
+
+static struct msg_desc rfsa_get_buffer_addr_req_desc = {
+	.max_msg_len = RFSA_GET_BUFF_ADDR_REQ_MSG_MAX_LEN_V01,
+	.msg_id = QMI_RFSA_GET_BUFF_ADDR_REQ_MSG_V01,
+	.ei_array = rfsa_get_buff_addr_req_msg_v01_ei,
+};
+
+static struct msg_desc rfsa_get_buffer_addr_resp_desc = {
+	.max_msg_len = RFSA_GET_BUFF_ADDR_RESP_MSG_MAX_LEN_V01,
+	.msg_id = QMI_RFSA_GET_BUFF_ADDR_RESP_MSG_V01,
+	.ei_array = rfsa_get_buff_addr_resp_msg_v01_ei,
+};
+
+void sharedmem_qmi_add_entry(struct sharemem_qmi_entry *qmi_entry)
+{
+	struct shared_addr_list *list_entry;
+
+	list_entry = kzalloc(sizeof(*list_entry), GFP_KERNEL);
+
+	/* If we cannot add the entry log the failure and bail */
+	if (list_entry == NULL) {
+		pr_err("Alloc of new list entry failed\n");
+		return;
+	}
+
+	/* Copy as much of the client name that can fit in the entry. */
+	strlcpy(list_entry->entry.name, qmi_entry->client_name,
+		sizeof(list_entry->entry.name));
+
+	/* Setup the rest of the entry. */
+	list_entry->entry.id = qmi_entry->client_id;
+	list_entry->entry.address = qmi_entry->address;
+	list_entry->entry.size = qmi_entry->size;
+	list_entry->entry.is_addr_dynamic = qmi_entry->is_addr_dynamic;
+	list_entry->entry.request_count = 0;
+
+	down_write(&sharedmem_list_lock);
+	list_add_tail(&(list_entry->node), &(list.node));
+	up_write(&sharedmem_list_lock);
+	pr_debug("Added new entry to list\n");
+
+}
+
+static int get_buffer_for_client(u32 id, u32 size, u64 *address)
+{
+	int result = -ENOENT;
+	int client_found = 0;
+	struct list_head *curr_node;
+	struct shared_addr_list *list_entry;
+
+	if (size == 0)
+		return -ENOMEM;
+
+	down_read(&sharedmem_list_lock);
+
+	list_for_each(curr_node, &list.node) {
+		list_entry = list_entry(curr_node, struct shared_addr_list,
+					node);
+		if (list_entry->entry.id == id) {
+			if (list_entry->entry.size >= size) {
+				*address = list_entry->entry.address;
+				list_entry->entry.request_count++;
+				result = 0;
+			} else {
+				pr_err("Shared mem req too large for id=%u\n",
+					id);
+				result = -ENOMEM;
+			}
+			client_found = 1;
+			break;
+		}
+	}
+
+	up_read(&sharedmem_list_lock);
+
+	if (client_found != 1) {
+		pr_err("Unknown client id %u\n", id);
+		result = -ENOENT;
+	}
+	return result;
+}
+
+static int sharedmem_qmi_get_buffer(void *conn_h, void *req_handle, void *req)
+{
+	struct rfsa_get_buff_addr_req_msg_v01 *get_buffer_req;
+	struct rfsa_get_buff_addr_resp_msg_v01 get_buffer_resp;
+	int result;
+	u64 address = 0;
+
+	get_buffer_req = (struct rfsa_get_buff_addr_req_msg_v01 *)req;
+	pr_debug("req->client_id = 0x%X and req->size = %d\n",
+		get_buffer_req->client_id, get_buffer_req->size);
+
+	result = get_buffer_for_client(get_buffer_req->client_id,
+					get_buffer_req->size, &address);
+	if (result != 0)
+		return result;
+
+	if (address == 0) {
+		pr_err("Entry found for client id= 0x%X but address is zero\n",
+			get_buffer_req->client_id);
+		return -ENOMEM;
+	}
+
+	memset(&get_buffer_resp, 0, sizeof(get_buffer_resp));
+	get_buffer_resp.address_valid = 1;
+	get_buffer_resp.address = address;
+	get_buffer_resp.resp.result = QMI_RESULT_SUCCESS_V01;
+
+	result = qmi_send_resp_from_cb(sharedmem_qmi_svc_handle, conn_h,
+				req_handle,
+				&rfsa_get_buffer_addr_resp_desc,
+				&get_buffer_resp,
+				sizeof(get_buffer_resp));
+	return result;
+}
+
+
+static int sharedmem_qmi_connect_cb(struct qmi_handle *handle, void *conn_h)
+{
+	if (sharedmem_qmi_svc_handle != handle || !conn_h)
+		return -EINVAL;
+	return 0;
+}
+
+static int sharedmem_qmi_disconnect_cb(struct qmi_handle *handle, void *conn_h)
+{
+	if (sharedmem_qmi_svc_handle != handle || !conn_h)
+		return -EINVAL;
+	return 0;
+}
+
+static int sharedmem_qmi_req_desc_cb(unsigned int msg_id,
+				struct msg_desc **req_desc)
+{
+	int rc;
+
+	switch (msg_id) {
+	case QMI_RFSA_GET_BUFF_ADDR_REQ_MSG_V01:
+		*req_desc = &rfsa_get_buffer_addr_req_desc;
+		rc = sizeof(struct rfsa_get_buff_addr_req_msg_v01);
+		break;
+
+	default:
+		rc = -ENOTSUPP;
+		break;
+	}
+	return rc;
+}
+
+static int sharedmem_qmi_req_cb(struct qmi_handle *handle, void *conn_h,
+				void *req_handle, unsigned int msg_id,
+				void *req)
+{
+	int rc = -ENOTSUPP;
+
+	if (sharedmem_qmi_svc_handle != handle || !conn_h)
+		return -EINVAL;
+
+	if (msg_id == QMI_RFSA_GET_BUFF_ADDR_REQ_MSG_V01)
+		rc = sharedmem_qmi_get_buffer(conn_h, req_handle, req);
+
+	return rc;
+}
+
+#define DEBUG_BUF_SIZE (2048)
+static char *debug_buffer;
+static u32 debug_data_size;
+static struct mutex dbg_buf_lock;	/* mutex for debug_buffer */
+
+static ssize_t debug_read(struct file *file, char __user *buf,
+			  size_t count, loff_t *file_pos)
+{
+	return simple_read_from_buffer(buf, count, file_pos, debug_buffer,
+					debug_data_size);
+}
+
+static u32 fill_debug_info(char *buffer, u32 buffer_size)
+{
+	u32 size = 0;
+	struct list_head *curr_node;
+	struct shared_addr_list *list_entry;
+
+	memset(buffer, 0, buffer_size);
+	size += scnprintf(buffer + size, buffer_size - size, "\n");
+
+	down_read(&sharedmem_list_lock);
+	list_for_each(curr_node, &list.node) {
+		list_entry = list_entry(curr_node, struct shared_addr_list,
+					node);
+		size += scnprintf(buffer + size, buffer_size - size,
+				"Client_name: %s\n", list_entry->entry.name);
+		size += scnprintf(buffer + size, buffer_size - size,
+				"Client_id: 0x%08X\n", list_entry->entry.id);
+		size += scnprintf(buffer + size, buffer_size - size,
+				"Buffer Size: 0x%08X (%d)\n",
+				list_entry->entry.size,
+				list_entry->entry.size);
+		size += scnprintf(buffer + size, buffer_size - size,
+				"Address: 0x%016llX\n",
+				list_entry->entry.address);
+		size += scnprintf(buffer + size, buffer_size - size,
+				"Address Allocation: %s\n",
+				(list_entry->entry.is_addr_dynamic ?
+				"Dynamic" : "Static"));
+		size += scnprintf(buffer + size, buffer_size - size,
+				"Request count: %llu\n",
+				list_entry->entry.request_count);
+		size += scnprintf(buffer + size, buffer_size - size, "\n\n");
+	}
+	up_read(&sharedmem_list_lock);
+
+	size += scnprintf(buffer + size, buffer_size - size,
+			"RFSA server start count = %u\n", rfsa_count);
+	size += scnprintf(buffer + size, buffer_size - size,
+			"RMTS server start count = %u\n", rmts_count);
+
+	size += scnprintf(buffer + size, buffer_size - size, "\n");
+	return size;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	u32 buffer_size;
+
+	mutex_lock(&dbg_buf_lock);
+	if (debug_buffer != NULL) {
+		mutex_unlock(&dbg_buf_lock);
+		return -EBUSY;
+	}
+	buffer_size = DEBUG_BUF_SIZE;
+	debug_buffer = kzalloc(buffer_size, GFP_KERNEL);
+	if (debug_buffer == NULL) {
+		mutex_unlock(&dbg_buf_lock);
+		return -ENOMEM;
+	}
+	debug_data_size = fill_debug_info(debug_buffer, buffer_size);
+	mutex_unlock(&dbg_buf_lock);
+	return 0;
+}
+
+static int debug_close(struct inode *inode, struct file *file)
+{
+	mutex_lock(&dbg_buf_lock);
+	kfree(debug_buffer);
+	debug_buffer = NULL;
+	debug_data_size = 0;
+	mutex_unlock(&dbg_buf_lock);
+	return 0;
+}
+
+static const struct file_operations debug_ops = {
+	.read = debug_read,
+	.open = debug_open,
+	.release = debug_close,
+};
+
+static int rfsa_increment(void *data, u64 val)
+{
+	if (rfsa_count != ~0)
+		rfsa_count++;
+	return 0;
+}
+
+static int rmts_increment(void *data, u64 val)
+{
+	if (rmts_count != ~0)
+		rmts_count++;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(rfsa_fops, NULL, rfsa_increment, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(rmts_fops, NULL, rmts_increment, "%llu\n");
+
+static void debugfs_init(void)
+{
+	struct dentry *f_ent;
+
+	mutex_init(&dbg_buf_lock);
+	dir_ent = debugfs_create_dir("rmt_storage", NULL);
+	if (IS_ERR(dir_ent)) {
+		pr_err("Failed to create debug_fs directory\n");
+		return;
+	}
+
+	f_ent = debugfs_create_file("info", 0400, dir_ent, NULL, &debug_ops);
+	if (IS_ERR(f_ent)) {
+		pr_err("Failed to create debug_fs info file\n");
+		return;
+	}
+
+	f_ent = debugfs_create_file("rfsa", 0200, dir_ent, NULL, &rfsa_fops);
+	if (IS_ERR(f_ent)) {
+		pr_err("Failed to create debug_fs rfsa file\n");
+		return;
+	}
+
+	f_ent = debugfs_create_file("rmts", 0200, dir_ent, NULL, &rmts_fops);
+	if (IS_ERR(f_ent)) {
+		pr_err("Failed to create debug_fs rmts file\n");
+		return;
+	}
+}
+
+static void debugfs_exit(void)
+{
+	debugfs_remove_recursive(dir_ent);
+	mutex_destroy(&dbg_buf_lock);
+}
+
+static void sharedmem_qmi_svc_recv_msg(struct work_struct *work)
+{
+	int rc;
+
+	do {
+		pr_debug("Notified about a Receive Event\n");
+	} while ((rc = qmi_recv_msg(sharedmem_qmi_svc_handle)) == 0);
+
+	if (rc != -ENOMSG)
+		pr_err("Error receiving message\n");
+}
+
+static void sharedmem_qmi_notify(struct qmi_handle *handle,
+		enum qmi_event_type event, void *priv)
+{
+	switch (event) {
+	case QMI_RECV_MSG:
+		queue_delayed_work(sharedmem_qmi_svc_workqueue,
+				   &work_recv_msg, 0);
+		break;
+	default:
+		break;
+	}
+}
+
+static struct qmi_svc_ops_options sharedmem_qmi_ops_options = {
+	.version = 1,
+	.service_id = RFSA_SERVICE_ID_V01,
+	.service_vers = RFSA_SERVICE_VERS_V01,
+	.service_ins = RFSA_SERVICE_INSTANCE_NUM,
+	.connect_cb = sharedmem_qmi_connect_cb,
+	.disconnect_cb = sharedmem_qmi_disconnect_cb,
+	.req_desc_cb = sharedmem_qmi_req_desc_cb,
+	.req_cb = sharedmem_qmi_req_cb,
+};
+
+
+static void sharedmem_register_qmi(void)
+{
+	int rc;
+
+	sharedmem_qmi_svc_workqueue =
+		create_singlethread_workqueue("sharedmem_qmi_work");
+	if (!sharedmem_qmi_svc_workqueue)
+		return;
+
+	sharedmem_qmi_svc_handle = qmi_handle_create(sharedmem_qmi_notify,
+							NULL);
+	if (!sharedmem_qmi_svc_handle) {
+		pr_err("Creating sharedmem_qmi qmi handle failed\n");
+		destroy_workqueue(sharedmem_qmi_svc_workqueue);
+		return;
+	}
+	rc = qmi_svc_register(sharedmem_qmi_svc_handle,
+				&sharedmem_qmi_ops_options);
+	if (rc < 0) {
+		pr_err("Registering sharedmem_qmi failed %d\n", rc);
+		qmi_handle_destroy(sharedmem_qmi_svc_handle);
+		destroy_workqueue(sharedmem_qmi_svc_workqueue);
+		return;
+	}
+	pr_info("qmi init successful\n");
+}
+
+static void sharedmem_qmi_init_worker(struct work_struct *work)
+{
+	sharedmem_register_qmi();
+	debugfs_init();
+}
+
+int sharedmem_qmi_init(void)
+{
+	INIT_LIST_HEAD(&list.node);
+	INIT_WORK(&sharedmem_qmi_init_work, sharedmem_qmi_init_worker);
+	schedule_work(&sharedmem_qmi_init_work);
+	return 0;
+}
+
+void sharedmem_qmi_exit(void)
+{
+	qmi_svc_unregister(sharedmem_qmi_svc_handle);
+	flush_workqueue(sharedmem_qmi_svc_workqueue);
+	qmi_handle_destroy(sharedmem_qmi_svc_handle);
+	destroy_workqueue(sharedmem_qmi_svc_workqueue);
+	debugfs_exit();
+}
diff -Nruw linux-4.4.115-fbx/drivers/uio/msm_sharedmem./sharedmem_qmi.h linux-4.4.115-fbx/drivers/uio/msm_sharedmem/sharedmem_qmi.h
--- linux-4.4.115-fbx/drivers/uio/msm_sharedmem./sharedmem_qmi.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/uio/msm_sharedmem/sharedmem_qmi.h	2019-01-22 16:16:27.223280093 +0100
@@ -0,0 +1,33 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SHAREDMEM_QMI_H__
+#define __SHAREDMEM_QMI_H__
+
+#include <linux/module.h>
+
+struct sharemem_qmi_entry {
+	const char *client_name;
+	u32 client_id;
+	u64 address;
+	u32 size;
+	bool is_addr_dynamic;
+};
+
+int sharedmem_qmi_init(void);
+
+void sharedmem_qmi_exit(void);
+
+void sharedmem_qmi_add_entry(struct sharemem_qmi_entry *qmi_entry);
+
+#endif /* __SHAREDMEM_QMI_H__ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/usb/dwc3/dbm.c	2019-01-22 16:16:27.255280383 +0100
@@ -0,0 +1,643 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include "dbm.h"
+
+/**
+*  USB DBM Hardware registers.
+*
+*/
+enum dbm_reg {
+	DBM_EP_CFG,
+	DBM_DATA_FIFO,
+	DBM_DATA_FIFO_SIZE,
+	DBM_DATA_FIFO_EN,
+	DBM_GEVNTADR,
+	DBM_GEVNTSIZ,
+	DBM_DBG_CNFG,
+	DBM_HW_TRB0_EP,
+	DBM_HW_TRB1_EP,
+	DBM_HW_TRB2_EP,
+	DBM_HW_TRB3_EP,
+	DBM_PIPE_CFG,
+	DBM_DISABLE_UPDXFER,
+	DBM_SOFT_RESET,
+	DBM_GEN_CFG,
+	DBM_GEVNTADR_LSB,
+	DBM_GEVNTADR_MSB,
+	DBM_DATA_FIFO_LSB,
+	DBM_DATA_FIFO_MSB,
+	DBM_DATA_FIFO_ADDR_EN,
+	DBM_DATA_FIFO_SIZE_EN,
+};
+
+struct dbm_reg_data {
+	u32 offset;
+	unsigned int ep_mult;
+};
+
+#define DBM_1_4_NUM_EP		4
+#define DBM_1_5_NUM_EP		8
+
+struct dbm {
+	void __iomem *base;
+	const struct dbm_reg_data *reg_table;
+
+	struct device		*dev;
+	struct list_head	head;
+
+	int dbm_num_eps;
+	u8 ep_num_mapping[DBM_1_5_NUM_EP];
+	bool dbm_reset_ep_after_lpm;
+
+	bool is_1p4;
+};
+
+static const struct dbm_reg_data dbm_1_4_regtable[] = {
+	[DBM_EP_CFG]		= { 0x0000, 0x4 },
+	[DBM_DATA_FIFO]		= { 0x0010, 0x4 },
+	[DBM_DATA_FIFO_SIZE]	= { 0x0020, 0x4 },
+	[DBM_DATA_FIFO_EN]	= { 0x0030, 0x0 },
+	[DBM_GEVNTADR]		= { 0x0034, 0x0 },
+	[DBM_GEVNTSIZ]		= { 0x0038, 0x0 },
+	[DBM_DBG_CNFG]		= { 0x003C, 0x0 },
+	[DBM_HW_TRB0_EP]	= { 0x0040, 0x4 },
+	[DBM_HW_TRB1_EP]	= { 0x0050, 0x4 },
+	[DBM_HW_TRB2_EP]	= { 0x0060, 0x4 },
+	[DBM_HW_TRB3_EP]	= { 0x0070, 0x4 },
+	[DBM_PIPE_CFG]		= { 0x0080, 0x0 },
+	[DBM_SOFT_RESET]	= { 0x0084, 0x0 },
+	[DBM_GEN_CFG]		= { 0x0088, 0x0 },
+	[DBM_GEVNTADR_LSB]	= { 0x0098, 0x0 },
+	[DBM_GEVNTADR_MSB]	= { 0x009C, 0x0 },
+	[DBM_DATA_FIFO_LSB]	= { 0x00A0, 0x8 },
+	[DBM_DATA_FIFO_MSB]	= { 0x00A4, 0x8 },
+};
+
+static const struct dbm_reg_data dbm_1_5_regtable[] = {
+	[DBM_EP_CFG]		= { 0x0000, 0x4 },
+	[DBM_DATA_FIFO]		= { 0x0280, 0x4 },
+	[DBM_DATA_FIFO_SIZE]	= { 0x0080, 0x4 },
+	[DBM_DATA_FIFO_EN]	= { 0x026C, 0x0 },
+	[DBM_GEVNTADR]		= { 0x0270, 0x0 },
+	[DBM_GEVNTSIZ]		= { 0x0268, 0x0 },
+	[DBM_DBG_CNFG]		= { 0x0208, 0x0 },
+	[DBM_HW_TRB0_EP]	= { 0x0220, 0x4 },
+	[DBM_HW_TRB1_EP]	= { 0x0230, 0x4 },
+	[DBM_HW_TRB2_EP]	= { 0x0240, 0x4 },
+	[DBM_HW_TRB3_EP]	= { 0x0250, 0x4 },
+	[DBM_PIPE_CFG]		= { 0x0274, 0x0 },
+	[DBM_DISABLE_UPDXFER]	= { 0x0298, 0x0 },
+	[DBM_SOFT_RESET]	= { 0x020C, 0x0 },
+	[DBM_GEN_CFG]		= { 0x0210, 0x0 },
+	[DBM_GEVNTADR_LSB]	= { 0x0260, 0x0 },
+	[DBM_GEVNTADR_MSB]	= { 0x0264, 0x0 },
+	[DBM_DATA_FIFO_LSB]	= { 0x0100, 0x8 },
+	[DBM_DATA_FIFO_MSB]	= { 0x0104, 0x8 },
+	[DBM_DATA_FIFO_ADDR_EN]	= { 0x0200, 0x0 },
+	[DBM_DATA_FIFO_SIZE_EN]	= { 0x0204, 0x0 },
+};
+
+static LIST_HEAD(dbm_list);
+
+/**
+ * Write register masked field with debug info.
+ *
+ * @dbm - DBM specific data
+ * @reg - DBM register, used to look up the offset value
+ * @ep - endpoint number
+ * @mask - register bitmask.
+ * @val - value to write.
+ *
+ */
+static inline void msm_dbm_write_ep_reg_field(struct dbm *dbm,
+					      enum dbm_reg reg, int ep,
+					      const u32 mask, u32 val)
+{
+	u32 shift = find_first_bit((void *)&mask, 32);
+	u32 offset = dbm->reg_table[reg].offset +
+			(dbm->reg_table[reg].ep_mult * ep);
+	u32 tmp = ioread32(dbm->base + offset);
+
+	tmp &= ~mask;		/* clear written bits */
+	val = tmp | (val << shift);
+	iowrite32(val, dbm->base + offset);
+}
+
+#define msm_dbm_write_reg_field(d, r, m, v) \
+	msm_dbm_write_ep_reg_field(d, r, 0, m, v)
+
+/**
+ *
+ * Read register with debug info.
+ *
+ * @dbm - DBM specific data
+ * @reg - DBM register, used to look up the offset value
+ * @ep - endpoint number
+ *
+ * @return u32
+ */
+static inline u32 msm_dbm_read_ep_reg(struct dbm *dbm, enum dbm_reg reg, int ep)
+{
+	u32 offset = dbm->reg_table[reg].offset +
+			(dbm->reg_table[reg].ep_mult * ep);
+	return ioread32(dbm->base + offset);
+}
+
+#define msm_dbm_read_reg(d, r) msm_dbm_read_ep_reg(d, r, 0)
+
+/**
+ *
+ * Write register with debug info.
+ *
+ * @dbm - DBM specific data
+ * @reg - DBM register, used to look up the offset value
+ * @ep - endpoint number
+ *
+ */
+static inline void msm_dbm_write_ep_reg(struct dbm *dbm, enum dbm_reg reg,
+					int ep, u32 val)
+{
+	u32 offset = dbm->reg_table[reg].offset +
+			(dbm->reg_table[reg].ep_mult * ep);
+	iowrite32(val, dbm->base + offset);
+}
+
+#define msm_dbm_write_reg(d, r, v) msm_dbm_write_ep_reg(d, r, 0, v)
+
+/**
+ * Return DBM EP number according to usb endpoint number.
+ *
+ */
+static int find_matching_dbm_ep(struct dbm *dbm, u8 usb_ep)
+{
+	int i;
+
+	for (i = 0; i < dbm->dbm_num_eps; i++)
+		if (dbm->ep_num_mapping[i] == usb_ep)
+			return i;
+
+	pr_debug("%s: No DBM EP matches USB EP %d", __func__, usb_ep);
+	return -ENODEV; /* Not found */
+}
+
+
+/**
+ * Reset the DBM registers upon initialization.
+ *
+ */
+int dbm_soft_reset(struct dbm *dbm, bool reset)
+{
+	if (!dbm) {
+		pr_err("%s: dbm pointer is NULL!\n", __func__);
+		return -EPERM;
+	}
+
+	pr_debug("%s DBM reset\n", (reset ? "Enter" : "Exit"));
+
+	msm_dbm_write_reg_field(dbm, DBM_SOFT_RESET, DBM_SFT_RST_MASK, reset);
+
+	return 0;
+}
+
+/**
+ * Soft reset specific DBM ep.
+ * This function is called by the function driver upon events
+ * such as transfer aborting, USB re-enumeration and USB
+ * disconnection.
+ *
+ * @dbm_ep - DBM ep number.
+ * @enter_reset - should we enter a reset state or get out of it.
+ *
+ */
+static int ep_soft_reset(struct dbm *dbm, u8 dbm_ep, bool enter_reset)
+{
+	pr_debug("Setting DBM ep %d reset to %d\n", dbm_ep, enter_reset);
+
+	if (dbm_ep >= dbm->dbm_num_eps) {
+		pr_err("Invalid DBM ep index %d\n", dbm_ep);
+		return -ENODEV;
+	}
+
+	if (enter_reset) {
+		msm_dbm_write_reg_field(dbm, DBM_SOFT_RESET,
+			DBM_SFT_RST_EPS_MASK & 1 << dbm_ep, 1);
+	} else {
+		msm_dbm_write_reg_field(dbm, DBM_SOFT_RESET,
+			DBM_SFT_RST_EPS_MASK & 1 << dbm_ep, 0);
+	}
+
+	return 0;
+}
+
+
+/**
+ * Soft reset specific DBM ep (by USB EP number).
+ * This function is called by the function driver upon events
+ * such as transfer aborting, USB re-enumeration and USB
+ * disconnection.
+ *
+ * The function relies on ep_soft_reset() for checking
+ * the legality of the resulting DBM ep number.
+ *
+ * @usb_ep - USB ep number.
+ * @enter_reset - should we enter a reset state or get out of it.
+ *
+ */
+int dbm_ep_soft_reset(struct dbm *dbm, u8 usb_ep, bool enter_reset)
+{
+	int dbm_ep;
+
+	if (!dbm) {
+		pr_err("%s: dbm pointer is NULL!\n", __func__);
+		return -EPERM;
+	}
+
+	dbm_ep = find_matching_dbm_ep(dbm, usb_ep);
+
+	pr_debug("Setting USB ep %d reset to %d\n", usb_ep, enter_reset);
+	return ep_soft_reset(dbm, dbm_ep, enter_reset);
+}
+
+/**
+ * Configure a USB DBM ep to work in BAM mode.
+ *
+ *
+ * @usb_ep - USB physical EP number.
+ * @producer - producer/consumer.
+ * @disable_wb - disable write back to system memory.
+ * @internal_mem - use internal USB memory for data fifo.
+ * @ioc - enable interrupt on completion.
+ *
+ * @return int - DBM ep number.
+ */
+int dbm_ep_config(struct dbm *dbm, u8 usb_ep, u8 bam_pipe, bool producer,
+		  bool disable_wb, bool internal_mem, bool ioc)
+{
+	int dbm_ep;
+	u32 ep_cfg;
+	u32 data;
+
+	if (!dbm) {
+		pr_err("%s: dbm pointer is NULL!\n", __func__);
+		return -EPERM;
+	}
+
+	pr_debug("Configuring DBM ep\n");
+
+	dbm_ep = find_matching_dbm_ep(dbm, usb_ep);
+
+	if (dbm_ep < 0) {
+		pr_err("usb ep index %d has no corresponding dbm ep\n", usb_ep);
+		return -ENODEV;
+	}
+
+	/* Due to HW issue, EP 7 can be set as IN EP only */
+	if (!dbm->is_1p4 && dbm_ep == 7 && producer) {
+		pr_err("last DBM EP can't be OUT EP\n");
+		return -ENODEV;
+	}
+
+	/* Set ioc bit for dbm_ep if needed */
+	msm_dbm_write_reg_field(dbm, DBM_DBG_CNFG,
+		DBM_ENABLE_IOC_MASK & 1 << dbm_ep, ioc ? 1 : 0);
+
+	ep_cfg = (producer ? DBM_PRODUCER : 0) |
+		(disable_wb ? DBM_DISABLE_WB : 0) |
+		(internal_mem ? DBM_INT_RAM_ACC : 0);
+
+	msm_dbm_write_ep_reg_field(dbm, DBM_EP_CFG, dbm_ep,
+		DBM_PRODUCER | DBM_DISABLE_WB | DBM_INT_RAM_ACC, ep_cfg >> 8);
+
+	msm_dbm_write_ep_reg_field(dbm, DBM_EP_CFG, dbm_ep, USB3_EPNUM,
+		usb_ep);
+
+	if (dbm->is_1p4) {
+		msm_dbm_write_ep_reg_field(dbm, DBM_EP_CFG, dbm_ep,
+				DBM_BAM_PIPE_NUM, bam_pipe);
+		msm_dbm_write_reg_field(dbm, DBM_PIPE_CFG, 0x000000ff, 0xe4);
+	}
+
+	msm_dbm_write_ep_reg_field(dbm, DBM_EP_CFG, dbm_ep, DBM_EN_EP, 1);
+
+	data = msm_dbm_read_reg(dbm, DBM_DISABLE_UPDXFER);
+	data &= ~(0x1 << dbm_ep);
+	msm_dbm_write_reg(dbm, DBM_DISABLE_UPDXFER, data);
+
+	return dbm_ep;
+}
+
+/**
+ * Return number of configured DBM endpoints.
+ */
+int dbm_get_num_of_eps_configured(struct dbm *dbm)
+{
+	int i;
+	int count = 0;
+
+	if (!dbm) {
+		pr_err("%s: dbm pointer is NULL!\n", __func__);
+		return -EPERM;
+	}
+
+	for (i = 0; i < dbm->dbm_num_eps; i++)
+		if (dbm->ep_num_mapping[i])
+			count++;
+
+	return count;
+}
+
+/**
+ * Configure a USB DBM ep to work in normal mode.
+ *
+ * @usb_ep - USB ep number.
+ *
+ */
+int dbm_ep_unconfig(struct dbm *dbm, u8 usb_ep)
+{
+	int dbm_ep;
+	u32 data;
+
+	if (!dbm) {
+		pr_err("%s: dbm pointer is NULL!\n", __func__);
+		return -EPERM;
+	}
+
+	pr_debug("Unconfiguring DB ep\n");
+
+	dbm_ep = find_matching_dbm_ep(dbm, usb_ep);
+
+	if (dbm_ep < 0) {
+		pr_debug("usb ep index %d has no corespondng dbm ep\n", usb_ep);
+		return -ENODEV;
+	}
+
+	dbm->ep_num_mapping[dbm_ep] = 0;
+
+	data = msm_dbm_read_ep_reg(dbm, DBM_EP_CFG, dbm_ep);
+	data &= (~0x1);
+	msm_dbm_write_ep_reg(dbm, DBM_EP_CFG, dbm_ep, data);
+
+	/*
+	 * ep_soft_reset is not required during disconnect as pipe reset on
+	 * next connect will take care of the same.
+	 */
+	return 0;
+}
+
+/**
+ * Configure the DBM with the USB3 core event buffer.
+ * This function is called by the SNPS UDC upon initialization.
+ *
+ * @addr - address of the event buffer.
+ * @size - size of the event buffer.
+ *
+ */
+int dbm_event_buffer_config(struct dbm *dbm, u32 addr_lo, u32 addr_hi, int size)
+{
+	if (!dbm) {
+		pr_err("%s: dbm pointer is NULL!\n", __func__);
+		return -EPERM;
+	}
+
+	pr_debug("Configuring event buffer\n");
+
+	if (size < 0) {
+		pr_err("Invalid size. size = %d", size);
+		return -EINVAL;
+	}
+
+	/* In case event buffer is already configured, Do nothing. */
+	if (msm_dbm_read_reg(dbm, DBM_GEVNTSIZ))
+		return 0;
+
+	if (!dbm->is_1p4 || sizeof(phys_addr_t) > sizeof(u32)) {
+		msm_dbm_write_reg(dbm, DBM_GEVNTADR_LSB, addr_lo);
+		msm_dbm_write_reg(dbm, DBM_GEVNTADR_MSB, addr_hi);
+	} else {
+		msm_dbm_write_reg(dbm, DBM_GEVNTADR, addr_lo);
+	}
+
+	msm_dbm_write_reg_field(dbm, DBM_GEVNTSIZ, DBM_GEVNTSIZ_MASK, size);
+
+	return 0;
+}
+
+/**
+ * Disable update xfer before queueing stop xfer command to USB3 core.
+ *
+ * @usb_ep - USB physical EP number.
+ *
+ */
+int dwc3_dbm_disable_update_xfer(struct dbm *dbm, u8 usb_ep)
+{
+	u32 data;
+	u8 dbm_ep;
+
+	if (!dbm) {
+		pr_err("%s: dbm pointer is NULL!\n", __func__);
+		return -EPERM;
+	}
+
+	dbm_ep = find_matching_dbm_ep(dbm, usb_ep);
+
+	if (dbm_ep < 0) {
+		pr_err("usb ep index %d has no corresponding dbm ep\n", usb_ep);
+		return -ENODEV;
+	}
+
+	data = msm_dbm_read_reg(dbm, DBM_DISABLE_UPDXFER);
+	data |= (0x1 << dbm_ep);
+	msm_dbm_write_reg(dbm, DBM_DISABLE_UPDXFER, data);
+
+	return 0;
+}
+
+int dbm_data_fifo_config(struct dbm *dbm, u8 dep_num, phys_addr_t addr,
+				u32 size, u8 dst_pipe_idx)
+{
+	u8 dbm_ep = dst_pipe_idx;
+	u32 lo = lower_32_bits(addr);
+	u32 hi = upper_32_bits(addr);
+
+	if (!dbm) {
+		pr_err("%s: dbm pointer is NULL!\n", __func__);
+		return -EPERM;
+	}
+
+	dbm->ep_num_mapping[dbm_ep] = dep_num;
+
+	if (!dbm->is_1p4 || sizeof(addr) > sizeof(u32)) {
+		msm_dbm_write_ep_reg(dbm, DBM_DATA_FIFO_LSB, dbm_ep, lo);
+		msm_dbm_write_ep_reg(dbm, DBM_DATA_FIFO_MSB, dbm_ep, hi);
+	} else {
+		msm_dbm_write_ep_reg(dbm, DBM_DATA_FIFO, dbm_ep, addr);
+	}
+
+	msm_dbm_write_ep_reg_field(dbm, DBM_DATA_FIFO_SIZE, dbm_ep,
+		DBM_DATA_FIFO_SIZE_MASK, size);
+
+	return 0;
+}
+
+void dbm_set_speed(struct dbm *dbm, bool speed)
+{
+	if (!dbm) {
+		pr_err("%s: dbm pointer is NULL!\n", __func__);
+		return;
+	}
+
+	msm_dbm_write_reg(dbm, DBM_GEN_CFG, speed);
+}
+
+void dbm_enable(struct dbm *dbm)
+{
+	if (!dbm) {
+		pr_err("%s: dbm pointer is NULL!\n", __func__);
+		return;
+	}
+
+	if (dbm->is_1p4) /* no-op */
+		return;
+
+	msm_dbm_write_reg(dbm, DBM_DATA_FIFO_ADDR_EN, 0x000000FF);
+	msm_dbm_write_reg(dbm, DBM_DATA_FIFO_SIZE_EN, 0x000000FF);
+}
+
+bool dbm_reset_ep_after_lpm(struct dbm *dbm)
+{
+	if (!dbm) {
+		pr_err("%s: dbm pointer is NULL!\n", __func__);
+		return false;
+	}
+
+	return dbm->dbm_reset_ep_after_lpm;
+}
+
+bool dbm_l1_lpm_interrupt(struct dbm *dbm)
+{
+	if (!dbm) {
+		pr_err("%s: dbm pointer is NULL!\n", __func__);
+		return false;
+	}
+
+	return !dbm->is_1p4;
+}
+
+static const struct of_device_id msm_dbm_id_table[] = {
+	{ .compatible = "qcom,usb-dbm-1p4", .data = &dbm_1_4_regtable },
+	{ .compatible = "qcom,usb-dbm-1p5", .data = &dbm_1_5_regtable },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, msm_dbm_id_table);
+
+static int msm_dbm_probe(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	const struct of_device_id *match;
+	struct dbm *dbm;
+	struct resource *res;
+
+	dbm = devm_kzalloc(&pdev->dev, sizeof(*dbm), GFP_KERNEL);
+	if (!dbm)
+		return -ENOMEM;
+
+	match = of_match_node(msm_dbm_id_table, node);
+	if (!match) {
+		dev_err(&pdev->dev, "Unsupported DBM module\n");
+		return -ENODEV;
+	}
+	dbm->reg_table = match->data;
+
+	if (!strcmp(match->compatible, "qcom,usb-dbm-1p4")) {
+		dbm->dbm_num_eps = DBM_1_4_NUM_EP;
+		dbm->is_1p4 = true;
+	} else {
+		dbm->dbm_num_eps = DBM_1_5_NUM_EP;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "missing memory base resource\n");
+		return -ENODEV;
+	}
+
+	dbm->base = devm_ioremap_nocache(&pdev->dev, res->start,
+		resource_size(res));
+	if (!dbm->base) {
+		dev_err(&pdev->dev, "ioremap failed\n");
+		return -ENOMEM;
+	}
+
+	dbm->dbm_reset_ep_after_lpm = of_property_read_bool(node,
+			"qcom,reset-ep-after-lpm-resume");
+
+	dbm->dev = &pdev->dev;
+
+	platform_set_drvdata(pdev, dbm);
+
+	list_add_tail(&dbm->head, &dbm_list);
+
+	return 0;
+}
+
+static struct platform_driver msm_dbm_driver = {
+	.probe		= msm_dbm_probe,
+	.driver = {
+		.name	= "msm-usb-dbm",
+		.of_match_table = of_match_ptr(msm_dbm_id_table),
+	},
+};
+
+module_platform_driver(msm_dbm_driver);
+
+static struct dbm *of_usb_find_dbm(struct device_node *node)
+{
+	struct dbm  *dbm;
+
+	list_for_each_entry(dbm, &dbm_list, head) {
+		if (node != dbm->dev->of_node)
+			continue;
+		return dbm;
+	}
+	return ERR_PTR(-ENODEV);
+}
+
+struct dbm *usb_get_dbm_by_phandle(struct device *dev, const char *phandle)
+{
+	struct device_node *node;
+
+	if (!dev->of_node) {
+		dev_dbg(dev, "device does not have a device node entry\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	node = of_parse_phandle(dev->of_node, phandle, 0);
+	if (!node) {
+		dev_dbg(dev, "failed to get %s phandle in %s node\n", phandle,
+			dev->of_node->full_name);
+		return ERR_PTR(-ENODEV);
+	}
+
+	return of_usb_find_dbm(node);
+}
+
+MODULE_DESCRIPTION("MSM USB DBM driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/usb/dwc3/dbm.h	2019-01-22 16:16:27.255280383 +0100
@@ -0,0 +1,75 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DBM_H
+#define __DBM_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+/**
+ *  USB DBM  Hardware registers bitmask.
+ *
+ */
+/* DBM_EP_CFG */
+#define DBM_EN_EP		0x00000001
+#define USB3_EPNUM		0x0000003E
+#define DBM_BAM_PIPE_NUM	0x000000C0
+#define DBM_PRODUCER		0x00000100
+#define DBM_DISABLE_WB		0x00000200
+#define DBM_INT_RAM_ACC		0x00000400
+
+/* DBM_DATA_FIFO_SIZE */
+#define DBM_DATA_FIFO_SIZE_MASK	0x0000ffff
+
+/* DBM_GEVNTSIZ */
+#define DBM_GEVNTSIZ_MASK	0x0000ffff
+
+/* DBM_DBG_CNFG */
+#define DBM_ENABLE_IOC_MASK	0x0000000f
+
+/* DBM_SOFT_RESET */
+#define DBM_SFT_RST_EP0		0x00000001
+#define DBM_SFT_RST_EP1		0x00000002
+#define DBM_SFT_RST_EP2		0x00000004
+#define DBM_SFT_RST_EP3		0x00000008
+#define DBM_SFT_RST_EPS_MASK	0x0000000F
+#define DBM_SFT_RST_MASK	0x80000000
+#define DBM_EN_MASK		0x00000002
+
+/* DBM TRB configurations */
+#define DBM_TRB_BIT		0x80000000
+#define DBM_TRB_DATA_SRC	0x40000000
+#define DBM_TRB_DMA		0x20000000
+#define DBM_TRB_EP_NUM(ep)	(ep<<24)
+
+struct dbm;
+
+struct dbm *usb_get_dbm_by_phandle(struct device *dev, const char *phandle);
+
+int dbm_soft_reset(struct dbm *dbm, bool enter_reset);
+int dbm_ep_config(struct dbm  *dbm, u8 usb_ep, u8 bam_pipe, bool producer,
+			bool disable_wb, bool internal_mem, bool ioc);
+int dbm_ep_unconfig(struct dbm *dbm, u8 usb_ep);
+int dbm_get_num_of_eps_configured(struct dbm *dbm);
+int dbm_event_buffer_config(struct dbm *dbm, u32 addr_lo, u32 addr_hi,
+				int size);
+int dwc3_dbm_disable_update_xfer(struct dbm *dbm, u8 usb_ep);
+int dbm_data_fifo_config(struct dbm *dbm, u8 dep_num, phys_addr_t addr,
+				u32 size, u8 dst_pipe_idx);
+void dbm_set_speed(struct dbm *dbm, bool speed);
+void dbm_enable(struct dbm *dbm);
+int dbm_ep_soft_reset(struct dbm *dbm, u8 usb_ep, bool enter_reset);
+bool dbm_reset_ep_after_lpm(struct dbm *dbm);
+bool dbm_l1_lpm_interrupt(struct dbm *dbm);
+
+#endif /* __DBM_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/usb/dwc3/dwc3-msm.c	2019-10-29 09:26:24.985216311 +0100
@@ -0,0 +1,4116 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/pm_runtime.h>
+#include <linux/ratelimit.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/list.h>
+#include <linux/uaccess.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/of.h>
+#include <linux/usb/msm_hsusb.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_wakeup.h>
+#include <linux/power_supply.h>
+#include <linux/cdev.h>
+#include <linux/completion.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/msm-bus.h>
+#include <linux/irq.h>
+#include <linux/extcon.h>
+#include <linux/reset.h>
+
+#include "power.h"
+#include "core.h"
+#include "gadget.h"
+#include "dbm.h"
+#include "debug.h"
+#include "xhci.h"
+
+#define SDP_CONNETION_CHECK_TIME 10000 /* in ms */
+
+/* time out to wait for USB cable status notification (in ms)*/
+#define SM_INIT_TIMEOUT 30000
+#define DWC3_WAKEUP_SRC_TIMEOUT 5000
+/* AHB2PHY register offsets */
+#define PERIPH_SS_AHB2PHY_TOP_CFG 0x10
+
+/* AHB2PHY read/write waite value */
+#define ONE_READ_WRITE_WAIT 0x11
+
+/* cpu to fix usb interrupt */
+static int cpu_to_affin;
+module_param(cpu_to_affin, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(cpu_to_affin, "affin usb irq to this cpu");
+
+/* XHCI registers */
+#define USB3_HCSPARAMS1		(0x4)
+#define USB3_HCCPARAMS2		(0x1c)
+#define HCC_CTC(p)		((p) & (1 << 3))
+#define USB3_PORTSC		(0x420)
+
+/**
+ *  USB QSCRATCH Hardware registers
+ *
+ */
+#define QSCRATCH_REG_OFFSET	(0x000F8800)
+#define QSCRATCH_GENERAL_CFG	(QSCRATCH_REG_OFFSET + 0x08)
+#define CGCTL_REG		(QSCRATCH_REG_OFFSET + 0x28)
+#define PWR_EVNT_IRQ_STAT_REG    (QSCRATCH_REG_OFFSET + 0x58)
+#define PWR_EVNT_IRQ_MASK_REG    (QSCRATCH_REG_OFFSET + 0x5C)
+
+#define PWR_EVNT_POWERDOWN_IN_P3_MASK		BIT(2)
+#define PWR_EVNT_POWERDOWN_OUT_P3_MASK		BIT(3)
+#define PWR_EVNT_LPM_IN_L2_MASK			BIT(4)
+#define PWR_EVNT_LPM_OUT_L2_MASK		BIT(5)
+#define PWR_EVNT_LPM_OUT_L1_MASK		BIT(13)
+
+/* QSCRATCH_GENERAL_CFG register bit offset */
+#define PIPE_UTMI_CLK_SEL	BIT(0)
+#define PIPE3_PHYSTATUS_SW	BIT(3)
+#define PIPE_UTMI_CLK_DIS	BIT(8)
+
+#define HS_PHY_CTRL_REG		(QSCRATCH_REG_OFFSET + 0x10)
+#define UTMI_OTG_VBUS_VALID	BIT(20)
+#define SW_SESSVLD_SEL		BIT(28)
+
+#define SS_PHY_CTRL_REG		(QSCRATCH_REG_OFFSET + 0x30)
+#define LANE0_PWR_PRESENT	BIT(24)
+
+/* GSI related registers */
+#define GSI_TRB_ADDR_BIT_53_MASK	(1 << 21)
+#define GSI_TRB_ADDR_BIT_55_MASK	(1 << 23)
+
+#define	GSI_GENERAL_CFG_REG		(QSCRATCH_REG_OFFSET + 0xFC)
+#define	GSI_RESTART_DBL_PNTR_MASK	BIT(20)
+#define	GSI_CLK_EN_MASK			BIT(12)
+#define	BLOCK_GSI_WR_GO_MASK		BIT(1)
+#define	GSI_EN_MASK			BIT(0)
+
+#define GSI_DBL_ADDR_L(n)	((QSCRATCH_REG_OFFSET + 0x110) + (n*4))
+#define GSI_DBL_ADDR_H(n)	((QSCRATCH_REG_OFFSET + 0x120) + (n*4))
+#define GSI_RING_BASE_ADDR_L(n)	((QSCRATCH_REG_OFFSET + 0x130) + (n*4))
+#define GSI_RING_BASE_ADDR_H(n)	((QSCRATCH_REG_OFFSET + 0x140) + (n*4))
+
+#define	GSI_IF_STS	(QSCRATCH_REG_OFFSET + 0x1A4)
+#define	GSI_WR_CTRL_STATE_MASK	BIT(15)
+
+struct dwc3_msm_req_complete {
+	struct list_head list_item;
+	struct usb_request *req;
+	void (*orig_complete)(struct usb_ep *ep,
+			      struct usb_request *req);
+};
+
+enum dwc3_id_state {
+	DWC3_ID_GROUND = 0,
+	DWC3_ID_FLOAT,
+};
+
+/* for type c cable */
+enum plug_orientation {
+	ORIENTATION_NONE,
+	ORIENTATION_CC1,
+	ORIENTATION_CC2,
+};
+
+/* Input bits to state machine (mdwc->inputs) */
+
+#define ID			0
+#define B_SESS_VLD		1
+#define B_SUSPEND		2
+
+#define PM_QOS_SAMPLE_SEC	2
+#define PM_QOS_THRESHOLD	400
+
+struct dwc3_msm {
+	struct device *dev;
+	void __iomem *base;
+	void __iomem *ahb2phy_base;
+	struct platform_device	*dwc3;
+	const struct usb_ep_ops *original_ep_ops[DWC3_ENDPOINTS_NUM];
+	struct list_head req_complete_list;
+	struct clk		*xo_clk;
+	struct clk		*core_clk;
+	long			core_clk_rate;
+	long			core_clk_rate_hs;
+	struct clk		*iface_clk;
+	struct clk		*sleep_clk;
+	struct clk		*utmi_clk;
+	unsigned int		utmi_clk_rate;
+	struct clk		*utmi_clk_src;
+	struct clk		*bus_aggr_clk;
+	struct clk		*noc_aggr_clk;
+	struct clk		*cfg_ahb_clk;
+	struct reset_control	*core_reset;
+	struct regulator	*dwc3_gdsc;
+
+	struct usb_phy		*hs_phy, *ss_phy;
+
+	struct dbm		*dbm;
+
+	/* VBUS regulator for host mode */
+	struct regulator	*vbus_reg;
+	int			vbus_retry_count;
+	bool			resume_pending;
+	atomic_t                pm_suspended;
+	int			hs_phy_irq;
+	int			ss_phy_irq;
+	struct work_struct	resume_work;
+	struct work_struct	restart_usb_work;
+	bool			in_restart;
+	struct workqueue_struct *dwc3_wq;
+	struct delayed_work	sm_work;
+	unsigned long		inputs;
+	unsigned		max_power;
+	bool			charging_disabled;
+	enum usb_otg_state	otg_state;
+	enum usb_chg_state	chg_state;
+	struct work_struct	bus_vote_w;
+	unsigned int		bus_vote;
+	u32			bus_perf_client;
+	struct msm_bus_scale_pdata	*bus_scale_table;
+	struct power_supply	*usb_psy;
+	struct work_struct	vbus_draw_work;
+	bool			in_host_mode;
+	enum usb_device_speed	max_rh_port_speed;
+	unsigned int		tx_fifo_size;
+	bool			vbus_active;
+	bool			suspend;
+	bool			disable_host_mode_pm;
+	enum dwc3_id_state	id_state;
+	unsigned long		lpm_flags;
+#define MDWC3_SS_PHY_SUSPEND		BIT(0)
+#define MDWC3_ASYNC_IRQ_WAKE_CAPABILITY	BIT(1)
+#define MDWC3_POWER_COLLAPSE		BIT(2)
+
+	unsigned int		irq_to_affin;
+	struct notifier_block	dwc3_cpu_notifier;
+	struct notifier_block	usbdev_nb;
+	bool			hc_died;
+	bool			xhci_ss_compliance_enable;
+	bool			no_wakeup_src_in_hostmode;
+
+	struct extcon_dev	*extcon_vbus;
+	struct extcon_dev	*extcon_id;
+	struct notifier_block	vbus_nb;
+	struct notifier_block	id_nb;
+
+	struct notifier_block	host_nb;
+	bool			host_only_mode;
+
+	int			pwr_event_irq;
+	atomic_t                in_p3;
+	unsigned int		lpm_to_suspend_delay;
+	bool			init;
+	enum plug_orientation	typec_orientation;
+	int pm_qos_latency;
+	struct pm_qos_request pm_qos_req_dma;
+	struct delayed_work perf_vote_work;
+	struct delayed_work sdp_check;
+	bool usb_compliance_mode;
+	struct mutex suspend_resume_mutex;
+};
+
+#define USB_HSPHY_3P3_VOL_MIN		3050000 /* uV */
+#define USB_HSPHY_3P3_VOL_MAX		3300000 /* uV */
+#define USB_HSPHY_3P3_HPM_LOAD		16000	/* uA */
+
+#define USB_HSPHY_1P8_VOL_MIN		1800000 /* uV */
+#define USB_HSPHY_1P8_VOL_MAX		1800000 /* uV */
+#define USB_HSPHY_1P8_HPM_LOAD		19000	/* uA */
+
+#define USB_SSPHY_1P8_VOL_MIN		1800000 /* uV */
+#define USB_SSPHY_1P8_VOL_MAX		1800000 /* uV */
+#define USB_SSPHY_1P8_HPM_LOAD		23000	/* uA */
+
+#define DSTS_CONNECTSPD_SS		0x4
+
+
+static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc);
+static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned mA);
+
+/**
+ *
+ * Read register with debug info.
+ *
+ * @base - DWC3 base virtual address.
+ * @offset - register offset.
+ *
+ * @return u32
+ */
+static inline u32 dwc3_msm_read_reg(void *base, u32 offset)
+{
+	u32 val = ioread32(base + offset);
+	return val;
+}
+
+/**
+ * Read register masked field with debug info.
+ *
+ * @base - DWC3 base virtual address.
+ * @offset - register offset.
+ * @mask - register bitmask.
+ *
+ * @return u32
+ */
+static inline u32 dwc3_msm_read_reg_field(void *base,
+					  u32 offset,
+					  const u32 mask)
+{
+	u32 shift = find_first_bit((void *)&mask, 32);
+	u32 val = ioread32(base + offset);
+	val &= mask;		/* clear other bits */
+	val >>= shift;
+	return val;
+}
+
+/**
+ *
+ * Write register with debug info.
+ *
+ * @base - DWC3 base virtual address.
+ * @offset - register offset.
+ * @val - value to write.
+ *
+ */
+static inline void dwc3_msm_write_reg(void *base, u32 offset, u32 val)
+{
+	iowrite32(val, base + offset);
+}
+
+/**
+ * Write register masked field with debug info.
+ *
+ * @base - DWC3 base virtual address.
+ * @offset - register offset.
+ * @mask - register bitmask.
+ * @val - value to write.
+ *
+ */
+static inline void dwc3_msm_write_reg_field(void *base, u32 offset,
+					    const u32 mask, u32 val)
+{
+	u32 shift = find_first_bit((void *)&mask, 32);
+	u32 tmp = ioread32(base + offset);
+
+	tmp &= ~mask;		/* clear written bits */
+	val = tmp | (val << shift);
+	iowrite32(val, base + offset);
+}
+
+/**
+ * Write register and read back masked value to confirm it is written
+ *
+ * @base - DWC3 base virtual address.
+ * @offset - register offset.
+ * @mask - register bitmask specifying what should be updated
+ * @val - value to write.
+ *
+ */
+static inline void dwc3_msm_write_readback(void *base, u32 offset,
+					    const u32 mask, u32 val)
+{
+	u32 write_val, tmp = ioread32(base + offset);
+
+	tmp &= ~mask;		/* retain other bits */
+	write_val = tmp | val;
+
+	iowrite32(write_val, base + offset);
+
+	/* Read back to see if val was written */
+	tmp = ioread32(base + offset);
+	tmp &= mask;		/* clear other bits */
+
+	if (tmp != val)
+		pr_err("%s: write: %x to QSCRATCH: %x FAILED\n",
+			__func__, val, offset);
+}
+
+static bool dwc3_msm_is_ss_rhport_connected(struct dwc3_msm *mdwc)
+{
+	int i, num_ports;
+	u32 reg;
+
+	reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
+	num_ports = HCS_MAX_PORTS(reg);
+
+	for (i = 0; i < num_ports; i++) {
+		reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
+		if ((reg & PORT_CONNECT) && DEV_SUPERSPEED(reg))
+			return true;
+	}
+
+	return false;
+}
+
+static bool dwc3_msm_is_host_superspeed(struct dwc3_msm *mdwc)
+{
+	int i, num_ports;
+	u32 reg;
+
+	reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
+	num_ports = HCS_MAX_PORTS(reg);
+
+	for (i = 0; i < num_ports; i++) {
+		reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
+		if ((reg & PORT_PE) && DEV_SUPERSPEED(reg))
+			return true;
+	}
+
+	return false;
+}
+
+static inline bool dwc3_msm_is_dev_superspeed(struct dwc3_msm *mdwc)
+{
+	u8 speed;
+
+	speed = dwc3_msm_read_reg(mdwc->base, DWC3_DSTS) & DWC3_DSTS_CONNECTSPD;
+	return !!(speed & DSTS_CONNECTSPD_SS);
+}
+
+static inline bool dwc3_msm_is_superspeed(struct dwc3_msm *mdwc)
+{
+	if (mdwc->in_host_mode)
+		return dwc3_msm_is_host_superspeed(mdwc);
+
+	return dwc3_msm_is_dev_superspeed(mdwc);
+}
+
+int dwc3_msm_dbm_disable_updxfer(struct dwc3 *dwc, u8 usb_ep)
+{
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+
+	dev_dbg(mdwc->dev, "%s\n", __func__);
+	dwc3_dbm_disable_update_xfer(mdwc->dbm, usb_ep);
+
+	return 0;
+}
+
+#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
+/**
+ * Configure the DBM with the BAM's data fifo.
+ * This function is called by the USB BAM Driver
+ * upon initialization.
+ *
+ * @ep - pointer to usb endpoint.
+ * @addr - address of data fifo.
+ * @size - size of data fifo.
+ *
+ */
+int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr,
+			 u32 size, u8 dst_pipe_idx)
+{
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3 *dwc = dep->dwc;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+
+	dev_dbg(mdwc->dev, "%s\n", __func__);
+
+	return	dbm_data_fifo_config(mdwc->dbm, dep->number, addr, size,
+						dst_pipe_idx);
+}
+
+
+/**
+* Cleanups for msm endpoint on request complete.
+*
+* Also call original request complete.
+*
+* @usb_ep - pointer to usb_ep instance.
+* @request - pointer to usb_request instance.
+*
+* @return int - 0 on success, negative on error.
+*/
+static void dwc3_msm_req_complete_func(struct usb_ep *ep,
+				       struct usb_request *request)
+{
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3 *dwc = dep->dwc;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+	struct dwc3_msm_req_complete *req_complete = NULL;
+
+	/* Find original request complete function and remove it from list */
+	list_for_each_entry(req_complete, &mdwc->req_complete_list, list_item) {
+		if (req_complete->req == request)
+			break;
+	}
+	if (!req_complete || req_complete->req != request) {
+		dev_err(dep->dwc->dev, "%s: could not find the request\n",
+					__func__);
+		return;
+	}
+	list_del(&req_complete->list_item);
+
+	/*
+	 * Release another one TRB to the pool since DBM queue took 2 TRBs
+	 * (normal and link), and the dwc3/gadget.c :: dwc3_gadget_giveback
+	 * released only one.
+	 */
+	dep->busy_slot++;
+
+	/* Unconfigure dbm ep */
+	dbm_ep_unconfig(mdwc->dbm, dep->number);
+
+	/*
+	 * If this is the last endpoint we unconfigured, than reset also
+	 * the event buffers; unless unconfiguring the ep due to lpm,
+	 * in which case the event buffer only gets reset during the
+	 * block reset.
+	 */
+	if (0 == dbm_get_num_of_eps_configured(mdwc->dbm) &&
+		!dbm_reset_ep_after_lpm(mdwc->dbm))
+			dbm_event_buffer_config(mdwc->dbm, 0, 0, 0);
+
+	/*
+	 * Call original complete function, notice that dwc->lock is already
+	 * taken by the caller of this function (dwc3_gadget_giveback()).
+	 */
+	request->complete = req_complete->orig_complete;
+	if (request->complete)
+		request->complete(ep, request);
+
+	kfree(req_complete);
+}
+
+
+/**
+* Helper function
+*
+* Reset  DBM endpoint.
+*
+* @mdwc - pointer to dwc3_msm instance.
+* @dep - pointer to dwc3_ep instance.
+*
+* @return int - 0 on success, negative on error.
+*/
+static int __dwc3_msm_dbm_ep_reset(struct dwc3_msm *mdwc, struct dwc3_ep *dep)
+{
+	int ret;
+
+	dev_dbg(mdwc->dev, "Resetting dbm endpoint %d\n", dep->number);
+
+	/* Reset the dbm endpoint */
+	ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, true);
+	if (ret) {
+		dev_err(mdwc->dev, "%s: failed to assert dbm ep reset\n",
+				__func__);
+		return ret;
+	}
+
+	/*
+	 * The necessary delay between asserting and deasserting the dbm ep
+	 * reset is based on the number of active endpoints. If there is more
+	 * than one endpoint, a 1 msec delay is required. Otherwise, a shorter
+	 * delay will suffice.
+	 */
+	if (dbm_get_num_of_eps_configured(mdwc->dbm) > 1)
+		usleep_range(1000, 1200);
+	else
+		udelay(10);
+	ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, false);
+	if (ret) {
+		dev_err(mdwc->dev, "%s: failed to deassert dbm ep reset\n",
+				__func__);
+		return ret;
+	}
+
+	return 0;
+}
+
+/**
+* Reset the DBM endpoint which is linked to the given USB endpoint.
+*
+* @usb_ep - pointer to usb_ep instance.
+*
+* @return int - 0 on success, negative on error.
+*/
+
+int msm_dwc3_reset_dbm_ep(struct usb_ep *ep)
+{
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3 *dwc = dep->dwc;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+
+	return __dwc3_msm_dbm_ep_reset(mdwc, dep);
+}
+EXPORT_SYMBOL(msm_dwc3_reset_dbm_ep);
+
+
+/**
+* Helper function.
+* See the header of the dwc3_msm_ep_queue function.
+*
+* @dwc3_ep - pointer to dwc3_ep instance.
+* @req - pointer to dwc3_request instance.
+*
+* @return int - 0 on success, negative on error.
+*/
+static int __dwc3_msm_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
+{
+	struct dwc3_trb *trb;
+	struct dwc3_trb *trb_link;
+	struct dwc3_gadget_ep_cmd_params params;
+	u32 cmd;
+	int ret = 0;
+
+	/* We push the request to the dep->req_queued list to indicate that
+	 * this request is issued with start transfer. The request will be out
+	 * from this list in 2 cases. The first is that the transfer will be
+	 * completed (not if the transfer is endless using a circular TRBs with
+	 * with link TRB). The second case is an option to do stop stransfer,
+	 * this can be initiated by the function driver when calling dequeue.
+	 */
+	req->queued = true;
+	list_add_tail(&req->list, &dep->req_queued);
+
+	/* First, prepare a normal TRB, point to the fake buffer */
+	trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
+	dep->free_slot++;
+	memset(trb, 0, sizeof(*trb));
+
+	req->trb = trb;
+	trb->bph = DBM_TRB_BIT | DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
+	trb->size = DWC3_TRB_SIZE_LENGTH(req->request.length);
+	trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_HWO |
+		DWC3_TRB_CTRL_CHN | (req->direction ? 0 : DWC3_TRB_CTRL_CSP);
+	req->trb_dma = dwc3_trb_dma_offset(dep, trb);
+
+	/* Second, prepare a Link TRB that points to the first TRB*/
+	trb_link = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
+	dep->free_slot++;
+	memset(trb_link, 0, sizeof *trb_link);
+
+	trb_link->bpl = lower_32_bits(req->trb_dma);
+	trb_link->bph = DBM_TRB_BIT |
+			DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
+	trb_link->size = 0;
+	trb_link->ctrl = DWC3_TRBCTL_LINK_TRB | DWC3_TRB_CTRL_HWO;
+
+	/*
+	 * Now start the transfer
+	 */
+	memset(&params, 0, sizeof(params));
+	params.param0 = 0; /* TDAddr High */
+	params.param1 = lower_32_bits(req->trb_dma); /* DAddr Low */
+
+	/* DBM requires IOC to be set */
+	cmd = DWC3_DEPCMD_STARTTRANSFER | DWC3_DEPCMD_CMDIOC;
+	ret = dwc3_send_gadget_ep_cmd(dep->dwc, dep->number, cmd, &params);
+	if (ret < 0) {
+		dev_dbg(dep->dwc->dev,
+			"%s: failed to send STARTTRANSFER command\n",
+			__func__);
+
+		list_del(&req->list);
+		return ret;
+	}
+	dep->flags |= DWC3_EP_BUSY;
+	dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep->dwc,
+		dep->number);
+
+	return ret;
+}
+
+/**
+* Queue a usb request to the DBM endpoint.
+* This function should be called after the endpoint
+* was enabled by the ep_enable.
+*
+* This function prepares special structure of TRBs which
+* is familiar with the DBM HW, so it will possible to use
+* this endpoint in DBM mode.
+*
+* The TRBs prepared by this function, is one normal TRB
+* which point to a fake buffer, followed by a link TRB
+* that points to the first TRB.
+*
+* The API of this function follow the regular API of
+* usb_ep_queue (see usb_ep_ops in include/linuk/usb/gadget.h).
+*
+* @usb_ep - pointer to usb_ep instance.
+* @request - pointer to usb_request instance.
+* @gfp_flags - possible flags.
+*
+* @return int - 0 on success, negative on error.
+*/
+static int dwc3_msm_ep_queue(struct usb_ep *ep,
+			     struct usb_request *request, gfp_t gfp_flags)
+{
+	struct dwc3_request *req = to_dwc3_request(request);
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3 *dwc = dep->dwc;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+	struct dwc3_msm_req_complete *req_complete;
+	unsigned long flags;
+	int ret = 0, size;
+	bool superspeed;
+
+	/*
+	 * We must obtain the lock of the dwc3 core driver,
+	 * including disabling interrupts, so we will be sure
+	 * that we are the only ones that configure the HW device
+	 * core and ensure that we queuing the request will finish
+	 * as soon as possible so we will release back the lock.
+	 */
+	spin_lock_irqsave(&dwc->lock, flags);
+	if (!dep->endpoint.desc) {
+		dev_err(mdwc->dev,
+			"%s: trying to queue request %p to disabled ep %s\n",
+			__func__, request, ep->name);
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		return -EPERM;
+	}
+
+	if (!mdwc->original_ep_ops[dep->number]) {
+		dev_err(mdwc->dev,
+			"ep [%s,%d] was unconfigured as msm endpoint\n",
+			ep->name, dep->number);
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		return -EINVAL;
+	}
+
+	if (!request) {
+		dev_err(mdwc->dev, "%s: request is NULL\n", __func__);
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		return -EINVAL;
+	}
+
+	if (!(request->udc_priv & MSM_SPS_MODE)) {
+		dev_err(mdwc->dev, "%s: sps mode is not set\n",
+					__func__);
+
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		return -EINVAL;
+	}
+
+	/* HW restriction regarding TRB size (8KB) */
+	if (req->request.length < 0x2000) {
+		dev_err(mdwc->dev, "%s: Min TRB size is 8KB\n", __func__);
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		return -EINVAL;
+	}
+
+	if (dep->number == 0 || dep->number == 1) {
+		dev_err(mdwc->dev,
+			"%s: trying to queue dbm request %p to control ep %s\n",
+			__func__, request, ep->name);
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		return -EPERM;
+	}
+
+	if (dep->busy_slot != dep->free_slot || !list_empty(&dep->request_list)
+					 || !list_empty(&dep->req_queued)) {
+		dev_err(mdwc->dev,
+			"%s: trying to queue dbm request %p tp ep %s\n",
+			__func__, request, ep->name);
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		return -EPERM;
+	}
+	dep->busy_slot = 0;
+	dep->free_slot = 0;
+
+	/*
+	 * Override req->complete function, but before doing that,
+	 * store it's original pointer in the req_complete_list.
+	 */
+	req_complete = kzalloc(sizeof(*req_complete), gfp_flags);
+	if (!req_complete) {
+		dev_err(mdwc->dev, "%s: not enough memory\n", __func__);
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		return -ENOMEM;
+	}
+	req_complete->req = request;
+	req_complete->orig_complete = request->complete;
+	list_add_tail(&req_complete->list_item, &mdwc->req_complete_list);
+	request->complete = dwc3_msm_req_complete_func;
+
+	dev_vdbg(dwc->dev, "%s: queing request %pK to ep %s length %d\n",
+			__func__, request, ep->name, request->length);
+	size = dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTSIZ(0));
+	dbm_event_buffer_config(mdwc->dbm,
+		dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRLO(0)),
+		dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRHI(0)),
+		DWC3_GEVNTSIZ_SIZE(size));
+
+	ret = __dwc3_msm_ep_queue(dep, req);
+	if (ret < 0) {
+		dev_err(mdwc->dev,
+			"error %d after calling __dwc3_msm_ep_queue\n", ret);
+		goto err;
+	}
+
+	spin_unlock_irqrestore(&dwc->lock, flags);
+	superspeed = dwc3_msm_is_dev_superspeed(mdwc);
+	dbm_set_speed(mdwc->dbm, (u8)superspeed);
+
+	return 0;
+
+err:
+	list_del(&req_complete->list_item);
+	spin_unlock_irqrestore(&dwc->lock, flags);
+	kfree(req_complete);
+	return ret;
+}
+
+/*
+* Returns XferRscIndex for the EP. This is stored at StartXfer GSI EP OP
+*
+* @usb_ep - pointer to usb_ep instance.
+*
+* @return int - XferRscIndex
+*/
+static inline int gsi_get_xfer_index(struct usb_ep *ep)
+{
+	struct dwc3_ep			*dep = to_dwc3_ep(ep);
+
+	return dep->resource_index;
+}
+
+/*
+* Fills up the GSI channel information needed in call to IPA driver
+* for GSI channel creation.
+*
+* @usb_ep - pointer to usb_ep instance.
+* @ch_info - output parameter with requested channel info
+*/
+static void gsi_get_channel_info(struct usb_ep *ep,
+			struct gsi_channel_info *ch_info)
+{
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	int last_trb_index = 0;
+	struct dwc3	*dwc = dep->dwc;
+	struct usb_gsi_request *request = ch_info->ch_req;
+
+	/* Provide physical USB addresses for DEPCMD and GEVENTCNT registers */
+	ch_info->depcmd_low_addr = (u32)(dwc->reg_phys +
+						DWC3_DEPCMD(dep->number));
+	ch_info->depcmd_hi_addr = 0;
+
+	ch_info->xfer_ring_base_addr = dwc3_trb_dma_offset(dep,
+							&dep->trb_pool[0]);
+	/* Convert to multipled of 1KB */
+	ch_info->const_buffer_size = request->buf_len/1024;
+
+	/* IN direction */
+	if (dep->direction) {
+		/*
+		 * Multiply by size of each TRB for xfer_ring_len in bytes.
+		 * 2n + 2 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
+		 * extra Xfer TRB followed by n ZLP TRBs + 1 LINK TRB.
+		 */
+		ch_info->xfer_ring_len = (2 * request->num_bufs + 2) * 0x10;
+		last_trb_index = 2 * request->num_bufs + 2;
+	} else { /* OUT direction */
+		/*
+		 * Multiply by size of each TRB for xfer_ring_len in bytes.
+		 * n + 1 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
+		 * LINK TRB.
+		 */
+		ch_info->xfer_ring_len = (request->num_bufs + 1) * 0x10;
+		last_trb_index = request->num_bufs + 1;
+	}
+
+	/* Store last 16 bits of LINK TRB address as per GSI hw requirement */
+	ch_info->last_trb_addr = (dwc3_trb_dma_offset(dep,
+			&dep->trb_pool[last_trb_index - 1]) & 0x0000FFFF);
+	ch_info->gevntcount_low_addr = (u32)(dwc->reg_phys +
+			DWC3_GEVNTCOUNT(ep->ep_intr_num));
+	ch_info->gevntcount_hi_addr = 0;
+
+	dev_dbg(dwc->dev,
+	"depcmd_laddr=%x last_trb_addr=%x gevtcnt_laddr=%x gevtcnt_haddr=%x",
+		ch_info->depcmd_low_addr, ch_info->last_trb_addr,
+		ch_info->gevntcount_low_addr, ch_info->gevntcount_hi_addr);
+}
+
+/*
+* Perform StartXfer on GSI EP. Stores XferRscIndex.
+*
+* @usb_ep - pointer to usb_ep instance.
+*
+* @return int - 0 on success
+*/
+static int gsi_startxfer_for_ep(struct usb_ep *ep)
+{
+	int ret;
+	struct dwc3_gadget_ep_cmd_params params;
+	u32				cmd;
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3	*dwc = dep->dwc;
+
+	memset(&params, 0, sizeof(params));
+	params.param0 = GSI_TRB_ADDR_BIT_53_MASK | GSI_TRB_ADDR_BIT_55_MASK;
+	params.param0 |= (ep->ep_intr_num << 16);
+	params.param1 = lower_32_bits(dwc3_trb_dma_offset(dep,
+						&dep->trb_pool[0]));
+	cmd = DWC3_DEPCMD_STARTTRANSFER;
+	cmd |= DWC3_DEPCMD_PARAM(0);
+	ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
+
+	if (ret < 0)
+		dev_dbg(dwc->dev, "Fail StrtXfr on GSI EP#%d\n", dep->number);
+	dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
+								dep->number);
+	dev_dbg(dwc->dev, "XferRsc = %x", dep->resource_index);
+	return ret;
+}
+
+/*
+* Store Ring Base and Doorbell Address for GSI EP
+* for GSI channel creation.
+*
+* @usb_ep - pointer to usb_ep instance.
+* @dbl_addr - Doorbell address obtained from IPA driver
+*/
+static void gsi_store_ringbase_dbl_info(struct usb_ep *ep, u32 dbl_addr)
+{
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3	*dwc = dep->dwc;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+	int n = ep->ep_intr_num - 1;
+
+	dwc3_msm_write_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n),
+			dwc3_trb_dma_offset(dep, &dep->trb_pool[0]));
+	dwc3_msm_write_reg(mdwc->base, GSI_DBL_ADDR_L(n), dbl_addr);
+
+	dev_dbg(mdwc->dev, "Ring Base Addr %d = %x", n,
+			dwc3_msm_read_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n)));
+	dev_dbg(mdwc->dev, "GSI DB Addr %d = %x", n,
+			dwc3_msm_read_reg(mdwc->base, GSI_DBL_ADDR_L(n)));
+}
+
+/*
+* Rings Doorbell for IN GSI Channel
+*
+* @usb_ep - pointer to usb_ep instance.
+* @request - pointer to GSI request. This is used to pass in the
+* address of the GSI doorbell obtained from IPA driver
+*/
+static void gsi_ring_in_db(struct usb_ep *ep, struct usb_gsi_request *request)
+{
+	void __iomem *gsi_dbl_address_lsb;
+	void __iomem *gsi_dbl_address_msb;
+	dma_addr_t offset;
+	u64 dbl_addr = *((u64 *)request->buf_base_addr);
+	u32 dbl_lo_addr = (dbl_addr & 0xFFFFFFFF);
+	u32 dbl_hi_addr = (dbl_addr >> 32);
+	u32 num_trbs = (request->num_bufs * 2 + 2);
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3	*dwc = dep->dwc;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+
+	gsi_dbl_address_lsb = devm_ioremap_nocache(mdwc->dev,
+					dbl_lo_addr, sizeof(u32));
+	if (!gsi_dbl_address_lsb)
+		dev_dbg(mdwc->dev, "Failed to get GSI DBL address LSB\n");
+
+	gsi_dbl_address_msb = devm_ioremap_nocache(mdwc->dev,
+					dbl_hi_addr, sizeof(u32));
+	if (!gsi_dbl_address_msb)
+		dev_dbg(mdwc->dev, "Failed to get GSI DBL address MSB\n");
+
+	offset = dwc3_trb_dma_offset(dep, &dep->trb_pool[num_trbs-1]);
+	dev_dbg(mdwc->dev, "Writing link TRB addr: %pa to %pK (%x)\n",
+	&offset, gsi_dbl_address_lsb, dbl_lo_addr);
+
+	writel_relaxed(offset, gsi_dbl_address_lsb);
+	writel_relaxed(0, gsi_dbl_address_msb);
+}
+
+/*
+* Sets HWO bit for TRBs and performs UpdateXfer for OUT EP.
+*
+* @usb_ep - pointer to usb_ep instance.
+* @request - pointer to GSI request. Used to determine num of TRBs for OUT EP.
+*
+* @return int - 0 on success
+*/
+static int gsi_updatexfer_for_ep(struct usb_ep *ep,
+					struct usb_gsi_request *request)
+{
+	int i;
+	int ret;
+	u32				cmd;
+	int num_trbs = request->num_bufs + 1;
+	struct dwc3_trb *trb;
+	struct dwc3_gadget_ep_cmd_params params;
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3 *dwc = dep->dwc;
+
+	for (i = 0; i < num_trbs - 1; i++) {
+		trb = &dep->trb_pool[i];
+		trb->ctrl |= DWC3_TRB_CTRL_HWO;
+	}
+
+	memset(&params, 0, sizeof(params));
+	cmd = DWC3_DEPCMD_UPDATETRANSFER;
+	cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
+	ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
+	dep->flags |= DWC3_EP_BUSY;
+	if (ret < 0)
+		dev_dbg(dwc->dev, "UpdateXfr fail on GSI EP#%d\n", dep->number);
+	return ret;
+}
+
+/*
+* Perform EndXfer on particular GSI EP.
+*
+* @usb_ep - pointer to usb_ep instance.
+*/
+static void gsi_endxfer_for_ep(struct usb_ep *ep)
+{
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3	*dwc = dep->dwc;
+
+	dwc3_stop_active_transfer(dwc, dep->number, true);
+}
+
+/*
+* Allocates and configures TRBs for GSI EPs.
+*
+* @usb_ep - pointer to usb_ep instance.
+* @request - pointer to GSI request.
+*
+* @return int - 0 on success
+*/
+static int gsi_prepare_trbs(struct usb_ep *ep, struct usb_gsi_request *req)
+{
+	int i = 0;
+	dma_addr_t buffer_addr = req->dma;
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3		*dwc = dep->dwc;
+	struct dwc3_trb *trb;
+	int num_trbs = (dep->direction) ? (2 * (req->num_bufs) + 2)
+					: (req->num_bufs + 1);
+
+	dep->trb_dma_pool = dma_pool_create(ep->name, dwc->dev,
+					num_trbs * sizeof(struct dwc3_trb),
+					num_trbs * sizeof(struct dwc3_trb), 0);
+	if (!dep->trb_dma_pool) {
+		dev_err(dep->dwc->dev, "failed to alloc trb dma pool for %s\n",
+				dep->name);
+		return -ENOMEM;
+	}
+
+	dep->num_trbs = num_trbs;
+
+	dep->trb_pool = dma_pool_alloc(dep->trb_dma_pool,
+					   GFP_KERNEL, &dep->trb_pool_dma);
+	if (!dep->trb_pool) {
+		dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
+				dep->name);
+		return -ENOMEM;
+	}
+
+	/* IN direction */
+	if (dep->direction) {
+		for (i = 0; i < num_trbs ; i++) {
+			trb = &dep->trb_pool[i];
+			memset(trb, 0, sizeof(*trb));
+			/* Set up first n+1 TRBs for ZLPs */
+			if (i < (req->num_bufs + 1)) {
+				trb->bpl = 0;
+				trb->bph = 0;
+				trb->size = 0;
+				trb->ctrl = DWC3_TRBCTL_NORMAL
+						| DWC3_TRB_CTRL_IOC;
+				continue;
+			}
+
+			/* Setup n TRBs pointing to valid buffers */
+			trb->bpl = lower_32_bits(buffer_addr);
+			trb->bph = 0;
+			trb->size = 0;
+			trb->ctrl = DWC3_TRBCTL_NORMAL
+					| DWC3_TRB_CTRL_IOC;
+			buffer_addr += req->buf_len;
+
+			/* Set up the Link TRB at the end */
+			if (i == (num_trbs - 1)) {
+				trb->bpl = dwc3_trb_dma_offset(dep,
+							&dep->trb_pool[0]);
+				trb->bph = (1 << 23) | (1 << 21)
+						| (ep->ep_intr_num << 16);
+				trb->size = 0;
+				trb->ctrl = DWC3_TRBCTL_LINK_TRB
+						| DWC3_TRB_CTRL_HWO;
+			}
+		}
+	} else { /* OUT direction */
+
+		for (i = 0; i < num_trbs ; i++) {
+
+			trb = &dep->trb_pool[i];
+			memset(trb, 0, sizeof(*trb));
+			trb->bpl = lower_32_bits(buffer_addr);
+			trb->bph = 0;
+			trb->size = req->buf_len;
+			trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_IOC
+					| DWC3_TRB_CTRL_CSP
+					| DWC3_TRB_CTRL_ISP_IMI;
+			buffer_addr += req->buf_len;
+
+			/* Set up the Link TRB at the end */
+			if (i == (num_trbs - 1)) {
+				trb->bpl = dwc3_trb_dma_offset(dep,
+							&dep->trb_pool[0]);
+				trb->bph = (1 << 23) | (1 << 21)
+						| (ep->ep_intr_num << 16);
+				trb->size = 0;
+				trb->ctrl = DWC3_TRBCTL_LINK_TRB
+						| DWC3_TRB_CTRL_HWO;
+			}
+		 }
+	}
+	return 0;
+}
+
+/*
+* Frees TRBs for GSI EPs.
+*
+* @usb_ep - pointer to usb_ep instance.
+*
+*/
+static void gsi_free_trbs(struct usb_ep *ep)
+{
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+
+	if (dep->endpoint.ep_type == EP_TYPE_NORMAL)
+		return;
+
+	/*  Free TRBs and TRB pool for EP */
+	if (dep->trb_dma_pool) {
+		dma_pool_free(dep->trb_dma_pool, dep->trb_pool,
+						dep->trb_pool_dma);
+		dma_pool_destroy(dep->trb_dma_pool);
+		dep->trb_pool = NULL;
+		dep->trb_pool_dma = 0;
+		dep->trb_dma_pool = NULL;
+	}
+}
+/*
+* Configures GSI EPs. For GSI EPs we need to set interrupter numbers.
+*
+* @usb_ep - pointer to usb_ep instance.
+* @request - pointer to GSI request.
+*/
+static void gsi_configure_ep(struct usb_ep *ep, struct usb_gsi_request *request)
+{
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3		*dwc = dep->dwc;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+	struct dwc3_gadget_ep_cmd_params params;
+	const struct usb_endpoint_descriptor *desc = ep->desc;
+	const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
+	u32			reg;
+
+	memset(&params, 0x00, sizeof(params));
+
+	/* Configure GSI EP */
+	params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
+		| DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
+
+	/* Burst size is only needed in SuperSpeed mode */
+	if (dwc->gadget.speed == USB_SPEED_SUPER) {
+		u32 burst = dep->endpoint.maxburst - 1;
+
+		params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
+	}
+
+	if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
+		params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
+					| DWC3_DEPCFG_STREAM_EVENT_EN;
+		dep->stream_capable = true;
+	}
+
+	/* Set EP number */
+	params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
+
+	/* Set interrupter number for GSI endpoints */
+	params.param1 |= DWC3_DEPCFG_INT_NUM(ep->ep_intr_num);
+
+	/* Enable XferInProgress and XferComplete Interrupts */
+	params.param1 |= DWC3_DEPCFG_XFER_COMPLETE_EN;
+	params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
+	params.param1 |= DWC3_DEPCFG_FIFO_ERROR_EN;
+	/*
+	 * We must use the lower 16 TX FIFOs even though
+	 * HW might have more
+	 */
+	/* Remove FIFO Number for GSI EP*/
+	if (dep->direction)
+		params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
+
+	params.param0 |= DWC3_DEPCFG_ACTION_INIT;
+
+	dev_dbg(mdwc->dev, "Set EP config to params = %x %x %x, for %s\n",
+	params.param0, params.param1, params.param2, dep->name);
+
+	dwc3_send_gadget_ep_cmd(dwc, dep->number,
+				DWC3_DEPCMD_SETEPCONFIG, &params);
+
+	/* Set XferRsc Index for GSI EP */
+	if (!(dep->flags & DWC3_EP_ENABLED)) {
+		memset(&params, 0x00, sizeof(params));
+		params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
+		dwc3_send_gadget_ep_cmd(dwc, dep->number,
+				DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
+
+		dep->endpoint.desc = desc;
+		dep->comp_desc = comp_desc;
+		dep->type = usb_endpoint_type(desc);
+		dep->flags |= DWC3_EP_ENABLED;
+		reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
+		reg |= DWC3_DALEPENA_EP(dep->number);
+		dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
+	}
+
+}
+
+/*
+* Enables USB wrapper for GSI
+*
+* @usb_ep - pointer to usb_ep instance.
+*/
+static void gsi_enable(struct usb_ep *ep)
+{
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3 *dwc = dep->dwc;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+
+	dwc3_msm_write_reg_field(mdwc->base,
+			GSI_GENERAL_CFG_REG, GSI_CLK_EN_MASK, 1);
+	dwc3_msm_write_reg_field(mdwc->base,
+			GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 1);
+	dwc3_msm_write_reg_field(mdwc->base,
+			GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 0);
+	dev_dbg(mdwc->dev, "%s: Enable GSI\n", __func__);
+	dwc3_msm_write_reg_field(mdwc->base,
+			GSI_GENERAL_CFG_REG, GSI_EN_MASK, 1);
+}
+
+/*
+* Block or allow doorbell towards GSI
+*
+* @usb_ep - pointer to usb_ep instance.
+* @request - pointer to GSI request. In this case num_bufs is used as a bool
+* to set or clear the doorbell bit
+*/
+static void gsi_set_clear_dbell(struct usb_ep *ep,
+					bool block_db)
+{
+
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3 *dwc = dep->dwc;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+
+	dwc3_msm_write_reg_field(mdwc->base,
+		GSI_GENERAL_CFG_REG, BLOCK_GSI_WR_GO_MASK, block_db);
+}
+
+/*
+* Performs necessary checks before stopping GSI channels
+*
+* @usb_ep - pointer to usb_ep instance to access DWC3 regs
+*/
+static bool gsi_check_ready_to_suspend(struct usb_ep *ep, bool f_suspend)
+{
+	u32	timeout = 500;
+	u32	reg = 0;
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3 *dwc = dep->dwc;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+
+	while (dwc3_msm_read_reg_field(mdwc->base,
+		GSI_IF_STS, GSI_WR_CTRL_STATE_MASK)) {
+		if (!timeout--) {
+			dev_err(mdwc->dev,
+			"Unable to suspend GSI ch. WR_CTRL_STATE != 0\n");
+			return false;
+		}
+		usleep_range(20, 22);
+	}
+	/* Check for U3 only if we are not handling Function Suspend */
+	if (!f_suspend) {
+		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+		if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U3) {
+			dev_err(mdwc->dev, "Unable to suspend GSI ch\n");
+			return false;
+		}
+	}
+
+	return true;
+}
+
+
+/**
+* Performs GSI operations or GSI EP related operations.
+*
+* @usb_ep - pointer to usb_ep instance.
+* @op_data - pointer to opcode related data.
+* @op - GSI related or GSI EP related op code.
+*
+* @return int - 0 on success, negative on error.
+* Also returns XferRscIdx for GSI_EP_OP_GET_XFER_IDX.
+*/
+static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
+		void *op_data, enum gsi_ep_op op)
+{
+	u32 ret = 0;
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3 *dwc = dep->dwc;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+	struct usb_gsi_request *request;
+	struct gsi_channel_info *ch_info;
+	bool block_db, f_suspend;
+	unsigned long flags;
+
+	switch (op) {
+	case GSI_EP_OP_PREPARE_TRBS:
+		request = (struct usb_gsi_request *)op_data;
+		dev_dbg(mdwc->dev, "EP_OP_PREPARE_TRBS for %s\n", ep->name);
+		ret = gsi_prepare_trbs(ep, request);
+		break;
+	case GSI_EP_OP_FREE_TRBS:
+		dev_dbg(mdwc->dev, "EP_OP_FREE_TRBS for %s\n", ep->name);
+		gsi_free_trbs(ep);
+		break;
+	case GSI_EP_OP_CONFIG:
+		request = (struct usb_gsi_request *)op_data;
+		dev_dbg(mdwc->dev, "EP_OP_CONFIG for %s\n", ep->name);
+		spin_lock_irqsave(&dwc->lock, flags);
+		gsi_configure_ep(ep, request);
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		break;
+	case GSI_EP_OP_STARTXFER:
+		dev_dbg(mdwc->dev, "EP_OP_STARTXFER for %s\n", ep->name);
+		spin_lock_irqsave(&dwc->lock, flags);
+		ret = gsi_startxfer_for_ep(ep);
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		break;
+	case GSI_EP_OP_GET_XFER_IDX:
+		dev_dbg(mdwc->dev, "EP_OP_GET_XFER_IDX for %s\n", ep->name);
+		ret = gsi_get_xfer_index(ep);
+		break;
+	case GSI_EP_OP_STORE_DBL_INFO:
+		dev_dbg(mdwc->dev, "EP_OP_STORE_DBL_INFO\n");
+		gsi_store_ringbase_dbl_info(ep, *((u32 *)op_data));
+		break;
+	case GSI_EP_OP_ENABLE_GSI:
+		dev_dbg(mdwc->dev, "EP_OP_ENABLE_GSI\n");
+		gsi_enable(ep);
+		break;
+	case GSI_EP_OP_GET_CH_INFO:
+		ch_info = (struct gsi_channel_info *)op_data;
+		gsi_get_channel_info(ep, ch_info);
+		break;
+	case GSI_EP_OP_RING_IN_DB:
+		request = (struct usb_gsi_request *)op_data;
+		dev_dbg(mdwc->dev, "RING IN EP DB\n");
+		gsi_ring_in_db(ep, request);
+		break;
+	case GSI_EP_OP_UPDATEXFER:
+		request = (struct usb_gsi_request *)op_data;
+		dev_dbg(mdwc->dev, "EP_OP_UPDATEXFER\n");
+		spin_lock_irqsave(&dwc->lock, flags);
+		ret = gsi_updatexfer_for_ep(ep, request);
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		break;
+	case GSI_EP_OP_ENDXFER:
+		request = (struct usb_gsi_request *)op_data;
+		dev_dbg(mdwc->dev, "EP_OP_ENDXFER for %s\n", ep->name);
+		spin_lock_irqsave(&dwc->lock, flags);
+		gsi_endxfer_for_ep(ep);
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		break;
+	case GSI_EP_OP_SET_CLR_BLOCK_DBL:
+		block_db = *((bool *)op_data);
+		dev_dbg(mdwc->dev, "EP_OP_SET_CLR_BLOCK_DBL %d\n",
+						block_db);
+		gsi_set_clear_dbell(ep, block_db);
+		break;
+	case GSI_EP_OP_CHECK_FOR_SUSPEND:
+		dev_dbg(mdwc->dev, "EP_OP_CHECK_FOR_SUSPEND\n");
+		f_suspend = *((bool *)op_data);
+		ret = gsi_check_ready_to_suspend(ep, f_suspend);
+		break;
+	case GSI_EP_OP_DISABLE:
+		dev_dbg(mdwc->dev, "EP_OP_DISABLE\n");
+		ret = ep->ops->disable(ep);
+		break;
+	default:
+		dev_err(mdwc->dev, "%s: Invalid opcode GSI EP\n", __func__);
+	}
+
+	return ret;
+}
+
+/**
+ * Configure MSM endpoint.
+ * This function do specific configurations
+ * to an endpoint which need specific implementaion
+ * in the MSM architecture.
+ *
+ * This function should be called by usb function/class
+ * layer which need a support from the specific MSM HW
+ * which wrap the USB3 core. (like GSI or DBM specific endpoints)
+ *
+ * @ep - a pointer to some usb_ep instance
+ *
+ * @return int - 0 on success, negetive on error.
+ */
+int msm_ep_config(struct usb_ep *ep, struct usb_request *request)
+{
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3 *dwc = dep->dwc;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+	struct usb_ep_ops *new_ep_ops;
+	int ret = 0;
+	u8 bam_pipe;
+	bool producer;
+	bool disable_wb;
+	bool internal_mem;
+	bool ioc;
+	unsigned long flags;
+
+
+	spin_lock_irqsave(&dwc->lock, flags);
+	/* Save original ep ops for future restore*/
+	if (mdwc->original_ep_ops[dep->number]) {
+		dev_err(mdwc->dev,
+			"ep [%s,%d] already configured as msm endpoint\n",
+			ep->name, dep->number);
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		return -EPERM;
+	}
+	mdwc->original_ep_ops[dep->number] = ep->ops;
+
+	/* Set new usb ops as we like */
+	new_ep_ops = kzalloc(sizeof(struct usb_ep_ops), GFP_ATOMIC);
+	if (!new_ep_ops) {
+		dev_err(mdwc->dev,
+			"%s: unable to allocate mem for new usb ep ops\n",
+			__func__);
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		return -ENOMEM;
+	}
+	(*new_ep_ops) = (*ep->ops);
+	new_ep_ops->queue = dwc3_msm_ep_queue;
+	new_ep_ops->gsi_ep_op = dwc3_msm_gsi_ep_op;
+	ep->ops = new_ep_ops;
+
+	if (!mdwc->dbm || !request || (dep->endpoint.ep_type == EP_TYPE_GSI)) {
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		return 0;
+	}
+
+	/*
+	 * Configure the DBM endpoint if required.
+	 */
+	bam_pipe = request->udc_priv & MSM_PIPE_ID_MASK;
+	producer = ((request->udc_priv & MSM_PRODUCER) ? true : false);
+	disable_wb = ((request->udc_priv & MSM_DISABLE_WB) ? true : false);
+	internal_mem = ((request->udc_priv & MSM_INTERNAL_MEM) ? true : false);
+	ioc = ((request->udc_priv & MSM_ETD_IOC) ? true : false);
+
+	ret = dbm_ep_config(mdwc->dbm, dep->number, bam_pipe, producer,
+					disable_wb, internal_mem, ioc);
+	if (ret < 0) {
+		dev_err(mdwc->dev,
+			"error %d after calling dbm_ep_config\n", ret);
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		return ret;
+	}
+
+	spin_unlock_irqrestore(&dwc->lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_ep_config);
+
+/**
+ * Un-configure MSM endpoint.
+ * Tear down configurations done in the
+ * dwc3_msm_ep_config function.
+ *
+ * @ep - a pointer to some usb_ep instance
+ *
+ * @return int - 0 on success, negative on error.
+ */
+int msm_ep_unconfig(struct usb_ep *ep)
+{
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3 *dwc = dep->dwc;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+	struct usb_ep_ops *old_ep_ops;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dwc->lock, flags);
+	/* Restore original ep ops */
+	if (!mdwc->original_ep_ops[dep->number]) {
+		dev_err(mdwc->dev,
+			"ep [%s,%d] was not configured as msm endpoint\n",
+			ep->name, dep->number);
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		return -EINVAL;
+	}
+	old_ep_ops = (struct usb_ep_ops	*)ep->ops;
+	ep->ops = mdwc->original_ep_ops[dep->number];
+	mdwc->original_ep_ops[dep->number] = NULL;
+	kfree(old_ep_ops);
+
+	/*
+	 * Do HERE more usb endpoint un-configurations
+	 * which are specific to MSM.
+	 */
+	if (!mdwc->dbm || (dep->endpoint.ep_type == EP_TYPE_GSI)) {
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		return 0;
+	}
+
+	if (dep->busy_slot == dep->free_slot && list_empty(&dep->request_list)
+					 && list_empty(&dep->req_queued)) {
+		dev_dbg(mdwc->dev,
+			"%s: request is not queued, disable DBM ep for ep %s\n",
+			__func__, ep->name);
+		/* Unconfigure dbm ep */
+		dbm_ep_unconfig(mdwc->dbm, dep->number);
+
+		/*
+		 * If this is the last endpoint we unconfigured, than reset also
+		 * the event buffers; unless unconfiguring the ep due to lpm,
+		 * in which case the event buffer only gets reset during the
+		 * block reset.
+		 */
+		if (dbm_get_num_of_eps_configured(mdwc->dbm) == 0 &&
+				!dbm_reset_ep_after_lpm(mdwc->dbm))
+			dbm_event_buffer_config(mdwc->dbm, 0, 0, 0);
+	}
+
+	spin_unlock_irqrestore(&dwc->lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_ep_unconfig);
+#endif /* (CONFIG_USB_DWC3_GADGET) || (CONFIG_USB_DWC3_DUAL_ROLE) */
+
+static void dwc3_resume_work(struct work_struct *w);
+
+static void dwc3_restart_usb_work(struct work_struct *w)
+{
+	struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
+						restart_usb_work);
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+	unsigned timeout = 50;
+
+	dev_dbg(mdwc->dev, "%s\n", __func__);
+
+	if (atomic_read(&dwc->in_lpm) || !dwc->is_drd) {
+		dev_dbg(mdwc->dev, "%s failed!!!\n", __func__);
+		return;
+	}
+
+	/* guard against concurrent VBUS handling */
+	mdwc->in_restart = true;
+
+	if (!mdwc->vbus_active) {
+		dev_dbg(mdwc->dev, "%s bailing out in disconnect\n", __func__);
+		dwc->err_evt_seen = false;
+		mdwc->in_restart = false;
+		return;
+	}
+
+	dbg_event(0xFF, "RestartUSB", 0);
+
+	/* Reset active USB connection */
+	dwc3_resume_work(&mdwc->resume_work);
+
+	/* Make sure disconnect is processed before sending connect */
+	while (--timeout && !pm_runtime_suspended(mdwc->dev))
+		msleep(20);
+
+	if (!timeout) {
+		dev_dbg(mdwc->dev,
+			"Not in LPM after disconnect, forcing suspend...\n");
+		dbg_event(0xFF, "ReStart:RT SUSP",
+			atomic_read(&mdwc->dev->power.usage_count));
+		pm_runtime_suspend(mdwc->dev);
+	}
+
+	mdwc->in_restart = false;
+	/* Force reconnect only if cable is still connected */
+	if (mdwc->vbus_active)
+		dwc3_resume_work(&mdwc->resume_work);
+
+	dwc->err_evt_seen = false;
+	flush_delayed_work(&mdwc->sm_work);
+}
+
+static int msm_dwc3_usbdev_notify(struct notifier_block *self,
+			unsigned long action, void *priv)
+{
+	struct dwc3_msm *mdwc = container_of(self, struct dwc3_msm, usbdev_nb);
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+	struct usb_bus *bus = priv;
+
+	/* Interested only in recovery when HC dies */
+	if (action != USB_BUS_DIED)
+		return 0;
+
+	dev_dbg(mdwc->dev, "%s initiate recovery from hc_died\n", __func__);
+	/* Recovery already under process */
+	if (mdwc->hc_died)
+		return 0;
+
+	if (bus->controller != &dwc->xhci->dev) {
+		dev_dbg(mdwc->dev, "%s event for diff HCD\n", __func__);
+		return 0;
+	}
+
+	mdwc->hc_died = true;
+	schedule_delayed_work(&mdwc->sm_work, 0);
+	return 0;
+}
+
+
+/*
+ * Check whether the DWC3 requires resetting the ep
+ * after going to Low Power Mode (lpm)
+ */
+bool msm_dwc3_reset_ep_after_lpm(struct usb_gadget *gadget)
+{
+	struct dwc3 *dwc = container_of(gadget, struct dwc3, gadget);
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+
+	return dbm_reset_ep_after_lpm(mdwc->dbm);
+}
+EXPORT_SYMBOL(msm_dwc3_reset_ep_after_lpm);
+
+/*
+ * Config Global Distributed Switch Controller (GDSC)
+ * to support controller power collapse
+ */
+static int dwc3_msm_config_gdsc(struct dwc3_msm *mdwc, int on)
+{
+	int ret;
+
+	if (IS_ERR_OR_NULL(mdwc->dwc3_gdsc))
+		return -EPERM;
+
+	if (on) {
+		ret = regulator_enable(mdwc->dwc3_gdsc);
+		if (ret) {
+			dev_err(mdwc->dev, "unable to enable usb3 gdsc\n");
+			return ret;
+		}
+	} else {
+		ret = regulator_disable(mdwc->dwc3_gdsc);
+		if (ret) {
+			dev_err(mdwc->dev, "unable to disable usb3 gdsc\n");
+			return ret;
+		}
+	}
+
+	return ret;
+}
+
+static int dwc3_msm_link_clk_reset(struct dwc3_msm *mdwc, bool assert)
+{
+	int ret = 0;
+
+	if (assert) {
+		disable_irq(mdwc->pwr_event_irq);
+		/* Using asynchronous block reset to the hardware */
+		dev_dbg(mdwc->dev, "block_reset ASSERT\n");
+		clk_disable_unprepare(mdwc->utmi_clk);
+		clk_disable_unprepare(mdwc->sleep_clk);
+		clk_disable_unprepare(mdwc->core_clk);
+		clk_disable_unprepare(mdwc->iface_clk);
+		ret = reset_control_assert(mdwc->core_reset);
+		if (ret)
+			dev_err(mdwc->dev, "dwc3 core_reset assert failed\n");
+	} else {
+		dev_dbg(mdwc->dev, "block_reset DEASSERT\n");
+		ret = reset_control_deassert(mdwc->core_reset);
+		if (ret)
+			dev_err(mdwc->dev, "dwc3 core_reset deassert failed\n");
+		ndelay(200);
+		clk_prepare_enable(mdwc->iface_clk);
+		clk_prepare_enable(mdwc->core_clk);
+		clk_prepare_enable(mdwc->sleep_clk);
+		clk_prepare_enable(mdwc->utmi_clk);
+		enable_irq(mdwc->pwr_event_irq);
+	}
+
+	return ret;
+}
+
+static void dwc3_msm_update_ref_clk(struct dwc3_msm *mdwc)
+{
+	u32 guctl, gfladj = 0;
+
+	guctl = dwc3_msm_read_reg(mdwc->base, DWC3_GUCTL);
+	guctl &= ~DWC3_GUCTL_REFCLKPER;
+
+	/* GFLADJ register is used starting with revision 2.50a */
+	if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) >= DWC3_REVISION_250A) {
+		gfladj = dwc3_msm_read_reg(mdwc->base, DWC3_GFLADJ);
+		gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
+		gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZ_DECR;
+		gfladj &= ~DWC3_GFLADJ_REFCLK_LPM_SEL;
+		gfladj &= ~DWC3_GFLADJ_REFCLK_FLADJ;
+	}
+
+	/* Refer to SNPS Databook Table 6-55 for calculations used */
+	switch (mdwc->utmi_clk_rate) {
+	case 19200000:
+		guctl |= 52 << __ffs(DWC3_GUCTL_REFCLKPER);
+		gfladj |= 12 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
+		gfladj |= DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
+		gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
+		gfladj |= 200 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
+		break;
+	case 24000000:
+		guctl |= 41 << __ffs(DWC3_GUCTL_REFCLKPER);
+		gfladj |= 10 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
+		gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
+		gfladj |= 2032 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
+		break;
+	default:
+		dev_warn(mdwc->dev, "Unsupported utmi_clk_rate: %u\n",
+				mdwc->utmi_clk_rate);
+		break;
+	}
+
+	dwc3_msm_write_reg(mdwc->base, DWC3_GUCTL, guctl);
+	if (gfladj)
+		dwc3_msm_write_reg(mdwc->base, DWC3_GFLADJ, gfladj);
+}
+
+/* Initialize QSCRATCH registers for HSPHY and SSPHY operation */
+static void dwc3_msm_qscratch_reg_init(struct dwc3_msm *mdwc)
+{
+	if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) < DWC3_REVISION_250A)
+		/* On older cores set XHCI_REV bit to specify revision 1.0 */
+		dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
+					 BIT(2), 1);
+
+	/*
+	 * Enable master clock for RAMs to allow BAM to access RAMs when
+	 * RAM clock gating is enabled via DWC3's GCTL. Otherwise issues
+	 * are seen where RAM clocks get turned OFF in SS mode
+	 */
+	dwc3_msm_write_reg(mdwc->base, CGCTL_REG,
+		dwc3_msm_read_reg(mdwc->base, CGCTL_REG) | 0x18);
+
+}
+
+static void dwc3_msm_vbus_draw_work(struct work_struct *w)
+{
+	struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
+			vbus_draw_work);
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+	dwc3_msm_gadget_vbus_draw(mdwc, dwc->vbus_draw);
+}
+
+static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned event,
+							unsigned value)
+{
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+	u32 reg;
+
+	if (dwc->revision < DWC3_REVISION_230A)
+		return;
+
+	switch (event) {
+	case DWC3_CONTROLLER_ERROR_EVENT:
+		dev_info(mdwc->dev,
+			"DWC3_CONTROLLER_ERROR_EVENT received, irq cnt %lu\n",
+			dwc->irq_cnt);
+
+		dwc3_gadget_disable_irq(dwc);
+
+		/* prevent core from generating interrupts until recovery */
+		reg = dwc3_msm_read_reg(mdwc->base, DWC3_GCTL);
+		reg |= DWC3_GCTL_CORESOFTRESET;
+		dwc3_msm_write_reg(mdwc->base, DWC3_GCTL, reg);
+
+		/* restart USB which performs full reset and reconnect */
+		schedule_work(&mdwc->restart_usb_work);
+		break;
+	case DWC3_CONTROLLER_RESET_EVENT:
+		dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESET_EVENT received\n");
+		/* HS & SSPHYs get reset as part of core soft reset */
+		dwc3_msm_qscratch_reg_init(mdwc);
+		break;
+	case DWC3_CONTROLLER_POST_RESET_EVENT:
+		dev_dbg(mdwc->dev,
+				"DWC3_CONTROLLER_POST_RESET_EVENT received\n");
+
+		/*
+		 * Below sequence is used when controller is working without
+		 * having ssphy and only USB high/full speed is supported.
+		 */
+		if (dwc->maximum_speed == USB_SPEED_HIGH ||
+					dwc->maximum_speed == USB_SPEED_FULL) {
+			dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
+				dwc3_msm_read_reg(mdwc->base,
+				QSCRATCH_GENERAL_CFG)
+				| PIPE_UTMI_CLK_DIS);
+
+			usleep_range(2, 5);
+
+
+			dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
+				dwc3_msm_read_reg(mdwc->base,
+				QSCRATCH_GENERAL_CFG)
+				| PIPE_UTMI_CLK_SEL
+				| PIPE3_PHYSTATUS_SW);
+
+			usleep_range(2, 5);
+
+			dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
+				dwc3_msm_read_reg(mdwc->base,
+				QSCRATCH_GENERAL_CFG)
+				& ~PIPE_UTMI_CLK_DIS);
+		}
+
+		dwc3_msm_update_ref_clk(mdwc);
+		dwc->tx_fifo_size = mdwc->tx_fifo_size;
+		break;
+	case DWC3_CONTROLLER_CONNDONE_EVENT:
+		dev_dbg(mdwc->dev, "DWC3_CONTROLLER_CONNDONE_EVENT received\n");
+		/*
+		 * Add power event if the dbm indicates coming out of L1 by
+		 * interrupt
+		 */
+		if (mdwc->dbm && dbm_l1_lpm_interrupt(mdwc->dbm))
+			dwc3_msm_write_reg_field(mdwc->base,
+					PWR_EVNT_IRQ_MASK_REG,
+					PWR_EVNT_LPM_OUT_L1_MASK, 1);
+
+		atomic_set(&dwc->in_lpm, 0);
+		break;
+	case DWC3_CONTROLLER_NOTIFY_OTG_EVENT:
+		dev_dbg(mdwc->dev, "DWC3_CONTROLLER_NOTIFY_OTG_EVENT received\n");
+		if (dwc->enable_bus_suspend) {
+			mdwc->suspend = dwc->b_suspend;
+			queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
+		}
+		break;
+	case DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT:
+		dev_dbg(mdwc->dev, "DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT received\n");
+		schedule_work(&mdwc->vbus_draw_work);
+		break;
+	case DWC3_CONTROLLER_RESTART_USB_SESSION:
+		dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESTART_USB_SESSION received\n");
+		schedule_work(&mdwc->restart_usb_work);
+		break;
+	case DWC3_CONTROLLER_NOTIFY_DISABLE_UPDXFER:
+		dwc3_msm_dbm_disable_updxfer(dwc, value);
+		break;
+	default:
+		dev_dbg(mdwc->dev, "unknown dwc3 event\n");
+		break;
+	}
+}
+
+static void dwc3_msm_block_reset(struct dwc3_msm *mdwc, bool core_reset)
+{
+	int ret  = 0;
+
+	if (core_reset) {
+		ret = dwc3_msm_link_clk_reset(mdwc, 1);
+		if (ret)
+			return;
+
+		usleep_range(1000, 1200);
+		ret = dwc3_msm_link_clk_reset(mdwc, 0);
+		if (ret)
+			return;
+
+		usleep_range(10000, 12000);
+	}
+
+	if (mdwc->dbm) {
+		/* Reset the DBM */
+		dbm_soft_reset(mdwc->dbm, 1);
+		usleep_range(1000, 1200);
+		dbm_soft_reset(mdwc->dbm, 0);
+
+		/*enable DBM*/
+		dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
+			DBM_EN_MASK, 0x1);
+		dbm_enable(mdwc->dbm);
+	}
+}
+
+static void dwc3_msm_power_collapse_por(struct dwc3_msm *mdwc)
+{
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+	u32 val;
+	int ret;
+
+	/* Configure AHB2PHY for one wait state read/write */
+	if (mdwc->ahb2phy_base) {
+		clk_prepare_enable(mdwc->cfg_ahb_clk);
+		val = readl_relaxed(mdwc->ahb2phy_base +
+				PERIPH_SS_AHB2PHY_TOP_CFG);
+		if (val != ONE_READ_WRITE_WAIT) {
+			writel_relaxed(ONE_READ_WRITE_WAIT,
+				mdwc->ahb2phy_base + PERIPH_SS_AHB2PHY_TOP_CFG);
+			/* complete above write before configuring USB PHY. */
+			mb();
+		}
+		clk_disable_unprepare(mdwc->cfg_ahb_clk);
+	}
+
+	if (!mdwc->init) {
+		dbg_event(0xFF, "dwc3 init",
+				atomic_read(&mdwc->dev->power.usage_count));
+		ret = dwc3_core_pre_init(dwc);
+		if (ret) {
+			dev_err(mdwc->dev, "dwc3_core_pre_init failed\n");
+			return;
+		}
+		mdwc->init = true;
+	}
+
+	dwc3_core_init(dwc);
+	/* Re-configure event buffers */
+	dwc3_event_buffers_setup(dwc);
+}
+
+static int dwc3_msm_prepare_suspend(struct dwc3_msm *mdwc)
+{
+	unsigned long timeout;
+	u32 reg = 0;
+
+	if ((mdwc->in_host_mode || mdwc->vbus_active)
+			&& dwc3_msm_is_superspeed(mdwc) && !mdwc->in_restart) {
+		if (!atomic_read(&mdwc->in_p3)) {
+			dev_err(mdwc->dev, "Not in P3,aborting LPM sequence\n");
+			return -EBUSY;
+		}
+	}
+
+	/* Clear previous L2 events */
+	dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
+		PWR_EVNT_LPM_IN_L2_MASK | PWR_EVNT_LPM_OUT_L2_MASK);
+
+	/* Prepare HSPHY for suspend */
+	reg = dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0));
+	dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
+		reg | DWC3_GUSB2PHYCFG_ENBLSLPM | DWC3_GUSB2PHYCFG_SUSPHY);
+
+	/* Wait for PHY to go into L2 */
+	timeout = jiffies + msecs_to_jiffies(5);
+	while (!time_after(jiffies, timeout)) {
+		reg = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
+		if (reg & PWR_EVNT_LPM_IN_L2_MASK)
+			break;
+		usleep_range(20, 30);
+	}
+	if (!(reg & PWR_EVNT_LPM_IN_L2_MASK))
+		dev_err(mdwc->dev, "could not transition HS PHY to L2\n");
+
+	/* Clear L2 event bit */
+	dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
+		PWR_EVNT_LPM_IN_L2_MASK);
+
+	return 0;
+}
+
+static void dwc3_msm_bus_vote_w(struct work_struct *w)
+{
+	struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, bus_vote_w);
+	int ret;
+
+	ret = msm_bus_scale_client_update_request(mdwc->bus_perf_client,
+			mdwc->bus_vote);
+	if (ret)
+		dev_err(mdwc->dev, "Failed to reset bus bw vote %d\n", ret);
+}
+
+static void dwc3_set_phy_speed_flags(struct dwc3_msm *mdwc)
+{
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+	int i, num_ports;
+	u32 reg;
+
+	mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
+	if (mdwc->in_host_mode) {
+		reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
+		num_ports = HCS_MAX_PORTS(reg);
+		for (i = 0; i < num_ports; i++) {
+			reg = dwc3_msm_read_reg(mdwc->base,
+					USB3_PORTSC + i*0x10);
+			if (reg & PORT_PE) {
+				if (DEV_HIGHSPEED(reg) || DEV_FULLSPEED(reg))
+					mdwc->hs_phy->flags |= PHY_HSFS_MODE;
+				else if (DEV_LOWSPEED(reg))
+					mdwc->hs_phy->flags |= PHY_LS_MODE;
+			}
+		}
+	} else {
+		if (dwc->gadget.speed == USB_SPEED_HIGH ||
+			dwc->gadget.speed == USB_SPEED_FULL)
+			mdwc->hs_phy->flags |= PHY_HSFS_MODE;
+		else if (dwc->gadget.speed == USB_SPEED_LOW)
+			mdwc->hs_phy->flags |= PHY_LS_MODE;
+	}
+}
+
+static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc,
+						bool perf_mode);
+
+static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
+{
+	int ret, i;
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+	dbg_event(0xFF, "Ctl Sus", atomic_read(&dwc->in_lpm));
+
+	mutex_lock(&mdwc->suspend_resume_mutex);
+	if (atomic_read(&dwc->in_lpm)) {
+		dev_dbg(mdwc->dev, "%s: Already suspended\n", __func__);
+		mutex_unlock(&mdwc->suspend_resume_mutex);
+		return 0;
+	}
+
+	cancel_delayed_work_sync(&mdwc->perf_vote_work);
+	msm_dwc3_perf_vote_update(mdwc, false);
+
+	if (!mdwc->in_host_mode) {
+		/* pending device events unprocessed */
+		for (i = 0; i < dwc->num_event_buffers; i++) {
+			struct dwc3_event_buffer *evt = dwc->ev_buffs[i];
+			if ((evt->flags & DWC3_EVENT_PENDING)) {
+				dev_dbg(mdwc->dev,
+				"%s: %d device events pending, abort suspend\n",
+				__func__, evt->count / 4);
+				dbg_print_reg("PENDING DEVICE EVENT",
+						*(u32 *)(evt->buf + evt->lpos));
+				mutex_unlock(&mdwc->suspend_resume_mutex);
+				return -EBUSY;
+			}
+		}
+	}
+
+	if (!mdwc->vbus_active && dwc->is_drd &&
+		mdwc->otg_state == OTG_STATE_B_PERIPHERAL) {
+		/*
+		 * In some cases, the pm_runtime_suspend may be called by
+		 * usb_bam when there is pending lpm flag. However, if this is
+		 * done when cable was disconnected and otg state has not
+		 * yet changed to IDLE, then it means OTG state machine
+		 * is running and we race against it. So cancel LPM for now,
+		 * and OTG state machine will go for LPM later, after completing
+		 * transition to IDLE state.
+		*/
+		dev_dbg(mdwc->dev,
+			"%s: cable disconnected while not in idle otg state\n",
+			__func__);
+		mutex_unlock(&mdwc->suspend_resume_mutex);
+		return -EBUSY;
+	}
+
+	/*
+	 * Check if device is not in CONFIGURED state
+	 * then check controller state of L2 and break
+	 * LPM sequence. Check this for device bus suspend case.
+	 */
+	if ((dwc->is_drd && mdwc->otg_state == OTG_STATE_B_SUSPEND) &&
+		(dwc->gadget.state != USB_STATE_CONFIGURED)) {
+		pr_err("%s(): Trying to go in LPM with state:%d\n",
+					__func__, dwc->gadget.state);
+		pr_err("%s(): LPM is not performed.\n", __func__);
+		mutex_unlock(&mdwc->suspend_resume_mutex);
+		return -EBUSY;
+	}
+
+	ret = dwc3_msm_prepare_suspend(mdwc);
+	if (ret) {
+		mutex_unlock(&mdwc->suspend_resume_mutex);
+		return ret;
+	}
+
+	/* Disable core irq */
+	if (dwc->irq)
+		disable_irq(dwc->irq);
+
+	if (work_busy(&dwc->bh_work))
+		dbg_event(0xFF, "pend evt", 0);
+
+	/* disable power event irq, hs and ss phy irq is used as wake up src */
+	disable_irq(mdwc->pwr_event_irq);
+
+	dwc3_set_phy_speed_flags(mdwc);
+	/* Suspend HS PHY */
+	usb_phy_set_suspend(mdwc->hs_phy, 1);
+
+	/* Suspend SS PHY */
+	if (dwc->maximum_speed == USB_SPEED_SUPER) {
+		/* indicate phy about SS mode */
+		if (dwc3_msm_is_superspeed(mdwc))
+			mdwc->ss_phy->flags |= DEVICE_IN_SS_MODE;
+		usb_phy_set_suspend(mdwc->ss_phy, 1);
+		mdwc->lpm_flags |= MDWC3_SS_PHY_SUSPEND;
+	}
+
+	/* make sure above writes are completed before turning off clocks */
+	wmb();
+
+	/* Disable clocks */
+	if (mdwc->bus_aggr_clk)
+		clk_disable_unprepare(mdwc->bus_aggr_clk);
+	clk_disable_unprepare(mdwc->utmi_clk);
+
+	/* Memory core: OFF, Memory periphery: OFF */
+	if (!mdwc->in_host_mode && !mdwc->vbus_active) {
+		clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_MEM);
+		clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_PERIPH);
+	}
+
+	clk_set_rate(mdwc->core_clk, 19200000);
+	clk_disable_unprepare(mdwc->core_clk);
+	if (mdwc->noc_aggr_clk)
+		clk_disable_unprepare(mdwc->noc_aggr_clk);
+	/*
+	 * Disable iface_clk only after core_clk as core_clk has FSM
+	 * depedency on iface_clk. Hence iface_clk should be turned off
+	 * after core_clk is turned off.
+	 */
+	clk_disable_unprepare(mdwc->iface_clk);
+	/* USB PHY no more requires TCXO */
+	clk_disable_unprepare(mdwc->xo_clk);
+
+	/* Perform controller power collapse */
+	if (!mdwc->in_host_mode && (!mdwc->vbus_active || mdwc->in_restart)) {
+		mdwc->lpm_flags |= MDWC3_POWER_COLLAPSE;
+		dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
+		dwc3_msm_config_gdsc(mdwc, 0);
+		clk_disable_unprepare(mdwc->sleep_clk);
+	}
+
+	/* Remove bus voting */
+	if (mdwc->bus_perf_client) {
+		mdwc->bus_vote = 0;
+		schedule_work(&mdwc->bus_vote_w);
+	}
+
+	/*
+	 * release wakeup source with timeout to defer system suspend to
+	 * handle case where on USB cable disconnect, SUSPEND and DISCONNECT
+	 * event is received.
+	 */
+	if (mdwc->lpm_to_suspend_delay) {
+		dev_dbg(mdwc->dev, "defer suspend with %d(msecs)\n",
+					mdwc->lpm_to_suspend_delay);
+		pm_wakeup_event(mdwc->dev, mdwc->lpm_to_suspend_delay);
+	} else {
+		pm_relax(mdwc->dev);
+	}
+
+	atomic_set(&dwc->in_lpm, 1);
+
+	/*
+	 * with DCP or during cable disconnect, we dont require wakeup
+	 * using HS_PHY_IRQ or SS_PHY_IRQ. Hence enable wakeup only in
+	 * case of host bus suspend and device bus suspend.
+	 */
+	if (mdwc->vbus_active || mdwc->in_host_mode) {
+		enable_irq_wake(mdwc->hs_phy_irq);
+		enable_irq(mdwc->hs_phy_irq);
+		if (mdwc->ss_phy_irq) {
+			enable_irq_wake(mdwc->ss_phy_irq);
+			enable_irq(mdwc->ss_phy_irq);
+		}
+		mdwc->lpm_flags |= MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
+	}
+
+	dev_info(mdwc->dev, "DWC3 in low power mode\n");
+	mutex_unlock(&mdwc->suspend_resume_mutex);
+	return 0;
+}
+
+static int dwc3_msm_resume(struct dwc3_msm *mdwc)
+{
+	int ret;
+	long core_clk_rate;
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+	dev_dbg(mdwc->dev, "%s: exiting lpm\n", __func__);
+
+	mutex_lock(&mdwc->suspend_resume_mutex);
+	if (!atomic_read(&dwc->in_lpm)) {
+		dev_dbg(mdwc->dev, "%s: Already resumed\n", __func__);
+		mutex_unlock(&mdwc->suspend_resume_mutex);
+		return 0;
+	}
+
+	pm_stay_awake(mdwc->dev);
+
+	/* Enable bus voting */
+	if (mdwc->bus_perf_client) {
+		mdwc->bus_vote = 1;
+		schedule_work(&mdwc->bus_vote_w);
+	}
+
+	/* Vote for TCXO while waking up USB HSPHY */
+	ret = clk_prepare_enable(mdwc->xo_clk);
+	if (ret)
+		dev_err(mdwc->dev, "%s failed to vote TCXO buffer%d\n",
+						__func__, ret);
+
+	/* Restore controller power collapse */
+	if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
+		dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
+		dwc3_msm_config_gdsc(mdwc, 1);
+		ret = reset_control_assert(mdwc->core_reset);
+		if (ret)
+			dev_err(mdwc->dev, "%s:core_reset assert failed\n",
+					__func__);
+		/* HW requires a short delay for reset to take place properly */
+		usleep_range(1000, 1200);
+		ret = reset_control_deassert(mdwc->core_reset);
+		if (ret)
+			dev_err(mdwc->dev, "%s:core_reset deassert failed\n",
+					__func__);
+		clk_prepare_enable(mdwc->sleep_clk);
+	}
+
+	/*
+	 * Enable clocks
+	 * Turned ON iface_clk before core_clk due to FSM depedency.
+	 */
+	clk_prepare_enable(mdwc->iface_clk);
+	if (mdwc->noc_aggr_clk)
+		clk_prepare_enable(mdwc->noc_aggr_clk);
+
+	core_clk_rate = mdwc->core_clk_rate;
+	if (mdwc->in_host_mode && mdwc->max_rh_port_speed == USB_SPEED_HIGH) {
+		core_clk_rate = mdwc->core_clk_rate_hs;
+		dev_dbg(mdwc->dev, "%s: set hs core clk rate %ld\n", __func__,
+			core_clk_rate);
+	}
+
+	clk_set_rate(mdwc->core_clk, core_clk_rate);
+	clk_prepare_enable(mdwc->core_clk);
+
+	/* set Memory core: ON, Memory periphery: ON */
+	clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_MEM);
+	clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_PERIPH);
+
+	clk_prepare_enable(mdwc->utmi_clk);
+	if (mdwc->bus_aggr_clk)
+		clk_prepare_enable(mdwc->bus_aggr_clk);
+
+	/* Resume SS PHY */
+	if (dwc->maximum_speed == USB_SPEED_SUPER &&
+			mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND) {
+		mdwc->ss_phy->flags &= ~(PHY_LANE_A | PHY_LANE_B);
+		if (mdwc->typec_orientation == ORIENTATION_CC1)
+			mdwc->ss_phy->flags |= PHY_LANE_A;
+		if (mdwc->typec_orientation == ORIENTATION_CC2)
+			mdwc->ss_phy->flags |= PHY_LANE_B;
+		usb_phy_set_suspend(mdwc->ss_phy, 0);
+		mdwc->ss_phy->flags &= ~DEVICE_IN_SS_MODE;
+		mdwc->lpm_flags &= ~MDWC3_SS_PHY_SUSPEND;
+	}
+
+	mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
+	/* Resume HS PHY */
+	usb_phy_set_suspend(mdwc->hs_phy, 0);
+
+	/* Recover from controller power collapse */
+	if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
+		u32 tmp;
+
+		dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
+
+		dwc3_msm_power_collapse_por(mdwc);
+
+		/* Get initial P3 status and enable IN_P3 event */
+		tmp = dwc3_msm_read_reg_field(mdwc->base,
+			DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
+		atomic_set(&mdwc->in_p3, tmp == DWC3_LINK_STATE_U3);
+		dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG,
+					PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
+
+		mdwc->lpm_flags &= ~MDWC3_POWER_COLLAPSE;
+	}
+
+	atomic_set(&dwc->in_lpm, 0);
+
+	/* enable power evt irq for IN P3 detection */
+	enable_irq(mdwc->pwr_event_irq);
+
+	/* Disable HSPHY auto suspend */
+	dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
+		dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0)) &
+				~(DWC3_GUSB2PHYCFG_ENBLSLPM |
+					DWC3_GUSB2PHYCFG_SUSPHY));
+
+	/* Disable wakeup capable for HS_PHY IRQ & SS_PHY_IRQ if enabled */
+	if (mdwc->lpm_flags & MDWC3_ASYNC_IRQ_WAKE_CAPABILITY) {
+		disable_irq_wake(mdwc->hs_phy_irq);
+		disable_irq_nosync(mdwc->hs_phy_irq);
+		if (mdwc->ss_phy_irq) {
+			disable_irq_wake(mdwc->ss_phy_irq);
+			disable_irq_nosync(mdwc->ss_phy_irq);
+		}
+		mdwc->lpm_flags &= ~MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
+	}
+
+	dev_info(mdwc->dev, "DWC3 exited from low power mode\n");
+
+	/* Enable core irq */
+	if (dwc->irq)
+		enable_irq(dwc->irq);
+
+	/*
+	 * Handle other power events that could not have been handled during
+	 * Low Power Mode
+	 */
+	dwc3_pwr_event_handler(mdwc);
+
+	if (pm_qos_request_active(&mdwc->pm_qos_req_dma))
+		schedule_delayed_work(&mdwc->perf_vote_work,
+			msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
+
+	dbg_event(0xFF, "Ctl Res", atomic_read(&dwc->in_lpm));
+	mutex_unlock(&mdwc->suspend_resume_mutex);
+
+	return 0;
+}
+
+/**
+ * dwc3_ext_event_notify - callback to handle events from external transceiver
+ *
+ * Returns 0 on success
+ */
+static void dwc3_ext_event_notify(struct dwc3_msm *mdwc)
+{
+	/* Flush processing any pending events before handling new ones */
+	flush_delayed_work(&mdwc->sm_work);
+
+	if (mdwc->id_state == DWC3_ID_FLOAT) {
+		dev_dbg(mdwc->dev, "XCVR: ID set\n");
+		set_bit(ID, &mdwc->inputs);
+	} else {
+		dev_dbg(mdwc->dev, "XCVR: ID clear\n");
+		clear_bit(ID, &mdwc->inputs);
+	}
+
+	if (mdwc->vbus_active && !mdwc->in_restart) {
+		dev_dbg(mdwc->dev, "XCVR: BSV set\n");
+		set_bit(B_SESS_VLD, &mdwc->inputs);
+	} else {
+		dev_dbg(mdwc->dev, "XCVR: BSV clear\n");
+		clear_bit(B_SESS_VLD, &mdwc->inputs);
+	}
+
+	if (mdwc->suspend) {
+		dev_dbg(mdwc->dev, "XCVR: SUSP set\n");
+		set_bit(B_SUSPEND, &mdwc->inputs);
+	} else {
+		dev_dbg(mdwc->dev, "XCVR: SUSP clear\n");
+		clear_bit(B_SUSPEND, &mdwc->inputs);
+	}
+
+	pm_stay_awake(mdwc->dev);
+	schedule_delayed_work(&mdwc->sm_work, 0);
+}
+
+static void dwc3_resume_work(struct work_struct *w)
+{
+	struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, resume_work);
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+	dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__);
+
+	/*
+	 * exit LPM first to meet resume timeline from device side.
+	 * resume_pending flag would prevent calling
+	 * dwc3_msm_resume() in case we are here due to system
+	 * wide resume without usb cable connected. This flag is set
+	 * only in case of power event irq in lpm.
+	 */
+	if (mdwc->resume_pending) {
+		dwc3_msm_resume(mdwc);
+		mdwc->resume_pending = false;
+	}
+
+	if (atomic_read(&mdwc->pm_suspended)) {
+		dbg_event(0xFF, "RWrk PMSus", 0);
+		/* let pm resume kick in resume work later */
+		return;
+	}
+
+	dbg_event(0xFF, "RWrk", dwc->is_drd);
+	dwc3_ext_event_notify(mdwc);
+}
+
+static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc)
+{
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+	u32 irq_stat, irq_clear = 0;
+
+	irq_stat = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
+	dev_dbg(mdwc->dev, "%s irq_stat=%X\n", __func__, irq_stat);
+
+	/* Check for P3 events */
+	if ((irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) &&
+			(irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK)) {
+		/* Can't tell if entered or exit P3, so check LINKSTATE */
+		u32 ls = dwc3_msm_read_reg_field(mdwc->base,
+				DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
+		dev_dbg(mdwc->dev, "%s link state = 0x%04x\n", __func__, ls);
+		atomic_set(&mdwc->in_p3, ls == DWC3_LINK_STATE_U3);
+
+		irq_stat &= ~(PWR_EVNT_POWERDOWN_OUT_P3_MASK |
+				PWR_EVNT_POWERDOWN_IN_P3_MASK);
+		irq_clear |= (PWR_EVNT_POWERDOWN_OUT_P3_MASK |
+				PWR_EVNT_POWERDOWN_IN_P3_MASK);
+	} else if (irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) {
+		atomic_set(&mdwc->in_p3, 0);
+		irq_stat &= ~PWR_EVNT_POWERDOWN_OUT_P3_MASK;
+		irq_clear |= PWR_EVNT_POWERDOWN_OUT_P3_MASK;
+	} else if (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK) {
+		atomic_set(&mdwc->in_p3, 1);
+		irq_stat &= ~PWR_EVNT_POWERDOWN_IN_P3_MASK;
+		irq_clear |= PWR_EVNT_POWERDOWN_IN_P3_MASK;
+	}
+
+	/* Clear L2 exit */
+	if (irq_stat & PWR_EVNT_LPM_OUT_L2_MASK) {
+		irq_stat &= ~PWR_EVNT_LPM_OUT_L2_MASK;
+		irq_stat |= PWR_EVNT_LPM_OUT_L2_MASK;
+	}
+
+	/* Handle exit from L1 events */
+	if (irq_stat & PWR_EVNT_LPM_OUT_L1_MASK) {
+		dev_dbg(mdwc->dev, "%s: handling PWR_EVNT_LPM_OUT_L1_MASK\n",
+				__func__);
+		if (usb_gadget_wakeup(&dwc->gadget))
+			dev_err(mdwc->dev, "%s failed to take dwc out of L1\n",
+					__func__);
+		irq_stat &= ~PWR_EVNT_LPM_OUT_L1_MASK;
+		irq_clear |= PWR_EVNT_LPM_OUT_L1_MASK;
+	}
+
+	/* Unhandled events */
+	if (irq_stat)
+		dev_dbg(mdwc->dev, "%s: unexpected PWR_EVNT, irq_stat=%X\n",
+			__func__, irq_stat);
+
+	dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG, irq_clear);
+}
+
+static irqreturn_t msm_dwc3_pwr_irq_thread(int irq, void *_mdwc)
+{
+	struct dwc3_msm *mdwc = _mdwc;
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+	dev_dbg(mdwc->dev, "%s\n", __func__);
+
+	if (atomic_read(&dwc->in_lpm))
+		dwc3_resume_work(&mdwc->resume_work);
+	else
+		dwc3_pwr_event_handler(mdwc);
+
+	dbg_event(0xFF, "PWR IRQ", atomic_read(&dwc->in_lpm));
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t msm_dwc3_pwr_irq(int irq, void *data)
+{
+	struct dwc3_msm *mdwc = data;
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+	dwc->t_pwr_evt_irq = ktime_get();
+	dev_dbg(mdwc->dev, "%s received\n", __func__);
+	/*
+	 * When in Low Power Mode, can't read PWR_EVNT_IRQ_STAT_REG to acertain
+	 * which interrupts have been triggered, as the clocks are disabled.
+	 * Resume controller by waking up pwr event irq thread.After re-enabling
+	 * clocks, dwc3_msm_resume will call dwc3_pwr_event_handler to handle
+	 * all other power events.
+	 */
+	if (atomic_read(&dwc->in_lpm)) {
+		/* set this to call dwc3_msm_resume() */
+		mdwc->resume_pending = true;
+		return IRQ_WAKE_THREAD;
+	}
+
+	dwc3_pwr_event_handler(mdwc);
+	return IRQ_HANDLED;
+}
+
+static int dwc3_cpu_notifier_cb(struct notifier_block *nfb,
+		unsigned long action, void *hcpu)
+{
+	uint32_t cpu = (uintptr_t)hcpu;
+	struct dwc3_msm *mdwc =
+			container_of(nfb, struct dwc3_msm, dwc3_cpu_notifier);
+
+	if (cpu == cpu_to_affin && action == CPU_ONLINE) {
+		pr_debug("%s: cpu online:%u irq:%d\n", __func__,
+				cpu_to_affin, mdwc->irq_to_affin);
+		irq_set_affinity(mdwc->irq_to_affin, get_cpu_mask(cpu));
+	}
+
+	return NOTIFY_OK;
+}
+
+static void dwc3_otg_sm_work(struct work_struct *w);
+
+static int dwc3_msm_get_clk_gdsc(struct dwc3_msm *mdwc)
+{
+	int ret;
+
+	mdwc->dwc3_gdsc = devm_regulator_get(mdwc->dev, "USB3_GDSC");
+	if (IS_ERR(mdwc->dwc3_gdsc))
+		mdwc->dwc3_gdsc = NULL;
+
+	mdwc->xo_clk = devm_clk_get(mdwc->dev, "xo");
+	if (IS_ERR(mdwc->xo_clk)) {
+		dev_err(mdwc->dev, "%s unable to get TCXO buffer handle\n",
+								__func__);
+		ret = PTR_ERR(mdwc->xo_clk);
+		return ret;
+	}
+	clk_set_rate(mdwc->xo_clk, 19200000);
+
+	mdwc->iface_clk = devm_clk_get(mdwc->dev, "iface_clk");
+	if (IS_ERR(mdwc->iface_clk)) {
+		dev_err(mdwc->dev, "failed to get iface_clk\n");
+		ret = PTR_ERR(mdwc->iface_clk);
+		return ret;
+	}
+
+	/*
+	 * DWC3 Core requires its CORE CLK (aka master / bus clk) to
+	 * run at 125Mhz in SSUSB mode and >60MHZ for HSUSB mode.
+	 * On newer platform it can run at 150MHz as well.
+	 */
+	mdwc->core_clk = devm_clk_get(mdwc->dev, "core_clk");
+	if (IS_ERR(mdwc->core_clk)) {
+		dev_err(mdwc->dev, "failed to get core_clk\n");
+		ret = PTR_ERR(mdwc->core_clk);
+		return ret;
+	}
+
+	if (of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate",
+				(u32 *)&mdwc->core_clk_rate)) {
+		dev_err(mdwc->dev, "USB core-clk-rate is not present\n");
+		return -EINVAL;
+	}
+
+	mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk,
+							mdwc->core_clk_rate);
+
+	dev_dbg(mdwc->dev, "USB core frequency = %ld\n",
+						mdwc->core_clk_rate);
+	ret = clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
+	if (ret)
+		dev_err(mdwc->dev, "fail to set core_clk freq:%d\n", ret);
+
+	if (of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate-hs",
+				(u32 *)&mdwc->core_clk_rate_hs)) {
+		dev_dbg(mdwc->dev, "USB core-clk-rate-hs is not present\n");
+		mdwc->core_clk_rate_hs = mdwc->core_clk_rate;
+	}
+
+	mdwc->core_reset = devm_reset_control_get(mdwc->dev, "core_reset");
+	if (IS_ERR(mdwc->core_reset)) {
+		dev_err(mdwc->dev, "failed to get core_reset\n");
+		return PTR_ERR(mdwc->core_reset);
+	}
+
+	mdwc->sleep_clk = devm_clk_get(mdwc->dev, "sleep_clk");
+	if (IS_ERR(mdwc->sleep_clk)) {
+		dev_err(mdwc->dev, "failed to get sleep_clk\n");
+		ret = PTR_ERR(mdwc->sleep_clk);
+		return ret;
+	}
+
+	clk_set_rate(mdwc->sleep_clk, 32000);
+	mdwc->utmi_clk_rate = 19200000;
+	mdwc->utmi_clk = devm_clk_get(mdwc->dev, "utmi_clk");
+	if (IS_ERR(mdwc->utmi_clk)) {
+		dev_err(mdwc->dev, "failed to get utmi_clk\n");
+		ret = PTR_ERR(mdwc->utmi_clk);
+		return ret;
+	}
+
+	clk_set_rate(mdwc->utmi_clk, mdwc->utmi_clk_rate);
+	mdwc->bus_aggr_clk = devm_clk_get(mdwc->dev, "bus_aggr_clk");
+	if (IS_ERR(mdwc->bus_aggr_clk))
+		mdwc->bus_aggr_clk = NULL;
+
+	mdwc->noc_aggr_clk = devm_clk_get(mdwc->dev, "noc_aggr_clk");
+	if (IS_ERR(mdwc->noc_aggr_clk))
+		mdwc->noc_aggr_clk = NULL;
+
+	if (of_property_match_string(mdwc->dev->of_node,
+				"clock-names", "cfg_ahb_clk") >= 0) {
+		mdwc->cfg_ahb_clk = devm_clk_get(mdwc->dev, "cfg_ahb_clk");
+		if (IS_ERR(mdwc->cfg_ahb_clk)) {
+			ret = PTR_ERR(mdwc->cfg_ahb_clk);
+			mdwc->cfg_ahb_clk = NULL;
+			if (ret != -EPROBE_DEFER)
+				dev_err(mdwc->dev,
+					"failed to get cfg_ahb_clk ret %d\n",
+					ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int dwc3_msm_id_notifier(struct notifier_block *nb,
+	unsigned long event, void *ptr)
+{
+	struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, id_nb);
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+	struct extcon_dev *edev = ptr;
+	enum dwc3_id_state id;
+	int cc_state;
+	int speed;
+
+	if (!edev) {
+		dev_err(mdwc->dev, "%s: edev null\n", __func__);
+		goto done;
+	}
+
+	id = event ? DWC3_ID_GROUND : DWC3_ID_FLOAT;
+
+	dev_dbg(mdwc->dev, "host:%ld (id:%d) event received\n", event, id);
+
+	cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
+	if (cc_state < 0)
+		mdwc->typec_orientation = ORIENTATION_NONE;
+	else
+		mdwc->typec_orientation =
+			cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
+
+	dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
+
+	speed = extcon_get_cable_state_(edev, EXTCON_USB_SPEED);
+	dwc->maximum_speed = (speed <= 0) ? USB_SPEED_HIGH : USB_SPEED_SUPER;
+	if (dwc->maximum_speed > dwc->max_hw_supp_speed)
+		dwc->maximum_speed = dwc->max_hw_supp_speed;
+
+	if (mdwc->id_state != id) {
+		mdwc->id_state = id;
+		dbg_event(0xFF, "id_state", mdwc->id_state);
+		pm_stay_awake(mdwc->dev);
+		queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
+	}
+
+done:
+	return NOTIFY_DONE;
+}
+
+
+static void check_for_sdp_connection(struct work_struct *w)
+{
+	struct dwc3_msm *mdwc =
+		container_of(w, struct dwc3_msm, sdp_check.work);
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+	if (!mdwc->vbus_active)
+		return;
+
+	/* USB 3.1 compliance equipment usually repoted as floating
+	 * charger as HS dp/dm lines are never connected. Do not
+	 * tear down USB stack if compliance parameter is set
+	 */
+	if (mdwc->usb_compliance_mode)
+		return;
+
+	/* floating D+/D- lines detected */
+	if (dwc->gadget.state < USB_STATE_DEFAULT &&
+		dwc3_gadget_get_link_state(dwc) != DWC3_LINK_STATE_CMPLY) {
+		mdwc->vbus_active = 0;
+		dbg_event(0xFF, "Q RW SPD CHK", mdwc->vbus_active);
+		queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
+	}
+}
+
+static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
+	unsigned long event, void *ptr)
+{
+	struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, vbus_nb);
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+	struct extcon_dev *edev = ptr;
+	int cc_state;
+	int speed;
+
+	if (!edev) {
+		dev_err(mdwc->dev, "%s: edev null\n", __func__);
+		goto done;
+	}
+
+	dev_dbg(mdwc->dev, "vbus:%ld event received\n", event);
+
+	if (mdwc->vbus_active == event)
+		return NOTIFY_DONE;
+
+	cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
+	if (cc_state < 0)
+		mdwc->typec_orientation = ORIENTATION_NONE;
+	else
+		mdwc->typec_orientation =
+			cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
+
+	dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
+
+	speed = extcon_get_cable_state_(edev, EXTCON_USB_SPEED);
+	dwc->maximum_speed = (speed <= 0) ? USB_SPEED_HIGH : USB_SPEED_SUPER;
+	if (dwc->maximum_speed > dwc->max_hw_supp_speed)
+		dwc->maximum_speed = dwc->max_hw_supp_speed;
+
+	mdwc->vbus_active = event;
+	if (dwc->is_drd && !mdwc->in_restart) {
+		dbg_event(0xFF, "Q RW (vbus)", mdwc->vbus_active);
+		pm_stay_awake(mdwc->dev);
+		queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
+	}
+done:
+	return NOTIFY_DONE;
+}
+
+static int dwc3_msm_extcon_register(struct dwc3_msm *mdwc)
+{
+	struct device_node *node = mdwc->dev->of_node;
+	struct extcon_dev *edev;
+	struct dwc3 *dwc;
+	int ret = 0;
+
+	dwc = platform_get_drvdata(mdwc->dwc3);
+	if (!of_property_read_bool(node, "extcon")) {
+		dev_dbg(mdwc->dev, "extcon property doesn't exist\n");
+		if (usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST
+							|| dwc->is_drd)
+			return 0;
+		dev_err(mdwc->dev, "Neither host nor DRD, fail probe\n");
+		return -EINVAL;
+	}
+
+	edev = extcon_get_edev_by_phandle(mdwc->dev, 0);
+	if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV)
+		return PTR_ERR(edev);
+
+	if (!IS_ERR(edev)) {
+		mdwc->extcon_vbus = edev;
+		mdwc->vbus_nb.notifier_call = dwc3_msm_vbus_notifier;
+		ret = extcon_register_notifier(edev, EXTCON_USB,
+				&mdwc->vbus_nb);
+		if (ret < 0) {
+			dev_err(mdwc->dev, "failed to register notifier for USB\n");
+			return ret;
+		}
+	}
+
+	/* if a second phandle was provided, use it to get a separate edev */
+	if (of_count_phandle_with_args(node, "extcon", NULL) > 1) {
+		edev = extcon_get_edev_by_phandle(mdwc->dev, 1);
+		if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
+			ret = PTR_ERR(edev);
+			goto err;
+		}
+	}
+
+	if (!IS_ERR(edev)) {
+		mdwc->extcon_id = edev;
+		mdwc->id_nb.notifier_call = dwc3_msm_id_notifier;
+		ret = extcon_register_notifier(edev, EXTCON_USB_HOST,
+				&mdwc->id_nb);
+		if (ret < 0) {
+			dev_err(mdwc->dev, "failed to register notifier for USB-HOST\n");
+			goto err;
+		}
+	}
+
+	return 0;
+err:
+	if (mdwc->extcon_vbus)
+		extcon_unregister_notifier(mdwc->extcon_vbus, EXTCON_USB,
+				&mdwc->vbus_nb);
+	return ret;
+}
+
+static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+	if (mdwc->vbus_active)
+		return snprintf(buf, PAGE_SIZE, "peripheral\n");
+	if (mdwc->id_state == DWC3_ID_GROUND)
+		return snprintf(buf, PAGE_SIZE, "host\n");
+
+	return snprintf(buf, PAGE_SIZE, "none\n");
+}
+
+static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+	if (sysfs_streq(buf, "peripheral")) {
+		mdwc->vbus_active = true;
+		mdwc->id_state = DWC3_ID_FLOAT;
+	} else if (sysfs_streq(buf, "host")) {
+		mdwc->vbus_active = false;
+		mdwc->id_state = DWC3_ID_GROUND;
+	} else {
+		mdwc->vbus_active = false;
+		mdwc->id_state = DWC3_ID_FLOAT;
+	}
+
+	dwc3_ext_event_notify(mdwc);
+
+	return count;
+}
+
+static DEVICE_ATTR_RW(mode);
+
+static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+	return snprintf(buf, PAGE_SIZE, "%s",
+			usb_speed_string(dwc->max_hw_supp_speed));
+}
+
+static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+	enum usb_device_speed req_speed = USB_SPEED_UNKNOWN;
+
+	if (sysfs_streq(buf, "high"))
+		req_speed = USB_SPEED_HIGH;
+	else if (sysfs_streq(buf, "super"))
+		req_speed = USB_SPEED_SUPER;
+
+	if (req_speed != USB_SPEED_UNKNOWN &&
+			req_speed != dwc->max_hw_supp_speed) {
+		dwc->maximum_speed = dwc->max_hw_supp_speed = req_speed;
+		schedule_work(&mdwc->restart_usb_work);
+	}
+
+	return count;
+}
+static DEVICE_ATTR_RW(speed);
+
+static void msm_dwc3_perf_vote_work(struct work_struct *w);
+static ssize_t xhci_link_compliance_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+	if (mdwc->xhci_ss_compliance_enable)
+		return snprintf(buf, PAGE_SIZE, "y\n");
+	else
+		return snprintf(buf, PAGE_SIZE, "n\n");
+}
+
+static ssize_t xhci_link_compliance_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+	bool value;
+	int ret;
+
+	ret = strtobool(buf, &value);
+	if (!ret) {
+		mdwc->xhci_ss_compliance_enable = value;
+		return count;
+	}
+
+	return ret;
+}
+
+static DEVICE_ATTR_RW(xhci_link_compliance);
+
+static ssize_t usb_compliance_mode_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%c\n",
+			mdwc->usb_compliance_mode ? 'Y' : 'N');
+}
+
+static ssize_t usb_compliance_mode_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int ret = 0;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+	ret = strtobool(buf, &mdwc->usb_compliance_mode);
+
+	if (ret)
+		return ret;
+
+	return count;
+}
+static DEVICE_ATTR_RW(usb_compliance_mode);
+
+static int dwc3_msm_probe(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node, *dwc3_node;
+	struct device	*dev = &pdev->dev;
+	union power_supply_propval pval = {0};
+	struct dwc3_msm *mdwc;
+	struct dwc3	*dwc;
+	struct resource *res;
+	void __iomem *tcsr;
+	bool host_mode;
+	int ret = 0;
+	int ext_hub_reset_gpio;
+	u32 val;
+
+	mdwc = devm_kzalloc(&pdev->dev, sizeof(*mdwc), GFP_KERNEL);
+	if (!mdwc)
+		return -ENOMEM;
+
+	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
+		dev_err(&pdev->dev, "setting DMA mask to 64 failed.\n");
+		if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
+			dev_err(&pdev->dev, "setting DMA mask to 32 failed.\n");
+			return -EOPNOTSUPP;
+		}
+	}
+
+	platform_set_drvdata(pdev, mdwc);
+	mdwc->dev = &pdev->dev;
+
+	INIT_LIST_HEAD(&mdwc->req_complete_list);
+	INIT_WORK(&mdwc->resume_work, dwc3_resume_work);
+	INIT_WORK(&mdwc->restart_usb_work, dwc3_restart_usb_work);
+	INIT_WORK(&mdwc->bus_vote_w, dwc3_msm_bus_vote_w);
+	INIT_WORK(&mdwc->vbus_draw_work, dwc3_msm_vbus_draw_work);
+	INIT_DELAYED_WORK(&mdwc->sm_work, dwc3_otg_sm_work);
+	INIT_DELAYED_WORK(&mdwc->perf_vote_work, msm_dwc3_perf_vote_work);
+	INIT_DELAYED_WORK(&mdwc->sdp_check, check_for_sdp_connection);
+
+	mdwc->dwc3_wq = alloc_ordered_workqueue("dwc3_wq", 0);
+	if (!mdwc->dwc3_wq) {
+		pr_err("%s: Unable to create workqueue dwc3_wq\n", __func__);
+		return -ENOMEM;
+	}
+
+	/* Get all clks and gdsc reference */
+	ret = dwc3_msm_get_clk_gdsc(mdwc);
+	if (ret) {
+		dev_err(&pdev->dev, "error getting clock or gdsc.\n");
+		goto err;
+	}
+
+	mdwc->id_state = DWC3_ID_FLOAT;
+	set_bit(ID, &mdwc->inputs);
+
+	mdwc->charging_disabled = of_property_read_bool(node,
+				"qcom,charging-disabled");
+
+	ret = of_property_read_u32(node, "qcom,lpm-to-suspend-delay-ms",
+				&mdwc->lpm_to_suspend_delay);
+	if (ret) {
+		dev_dbg(&pdev->dev, "setting lpm_to_suspend_delay to zero.\n");
+		mdwc->lpm_to_suspend_delay = 0;
+	}
+
+	/*
+	 * DWC3 has separate IRQ line for OTG events (ID/BSV) and for
+	 * DP and DM linestate transitions during low power mode.
+	 */
+	mdwc->hs_phy_irq = platform_get_irq_byname(pdev, "hs_phy_irq");
+	if (mdwc->hs_phy_irq < 0) {
+		dev_err(&pdev->dev, "pget_irq for hs_phy_irq failed\n");
+		ret = -EINVAL;
+		goto err;
+	} else {
+		irq_set_status_flags(mdwc->hs_phy_irq, IRQ_NOAUTOEN);
+		ret = devm_request_threaded_irq(&pdev->dev, mdwc->hs_phy_irq,
+					msm_dwc3_pwr_irq,
+					msm_dwc3_pwr_irq_thread,
+					IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
+					| IRQF_ONESHOT, "hs_phy_irq", mdwc);
+		if (ret) {
+			dev_err(&pdev->dev, "irqreq hs_phy_irq failed: %d\n",
+					ret);
+			goto err;
+		}
+	}
+
+	mdwc->ss_phy_irq = platform_get_irq_byname(pdev, "ss_phy_irq");
+	if (mdwc->ss_phy_irq < 0) {
+		dev_dbg(&pdev->dev, "pget_irq for ss_phy_irq failed\n");
+	} else {
+		irq_set_status_flags(mdwc->ss_phy_irq, IRQ_NOAUTOEN);
+		ret = devm_request_threaded_irq(&pdev->dev, mdwc->ss_phy_irq,
+					msm_dwc3_pwr_irq,
+					msm_dwc3_pwr_irq_thread,
+					IRQF_TRIGGER_HIGH | IRQ_TYPE_LEVEL_HIGH
+					| IRQF_EARLY_RESUME | IRQF_ONESHOT,
+					"ss_phy_irq", mdwc);
+		if (ret) {
+			dev_err(&pdev->dev, "irqreq ss_phy_irq failed: %d\n",
+					ret);
+			goto err;
+		}
+	}
+
+	mdwc->pwr_event_irq = platform_get_irq_byname(pdev, "pwr_event_irq");
+	if (mdwc->pwr_event_irq < 0) {
+		dev_err(&pdev->dev, "pget_irq for pwr_event_irq failed\n");
+		ret = -EINVAL;
+		goto err;
+	} else {
+		/* will be enabled in dwc3_msm_resume() */
+		irq_set_status_flags(mdwc->pwr_event_irq, IRQ_NOAUTOEN);
+		ret = devm_request_threaded_irq(&pdev->dev, mdwc->pwr_event_irq,
+					msm_dwc3_pwr_irq,
+					msm_dwc3_pwr_irq_thread,
+					IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME,
+					"msm_dwc3", mdwc);
+		if (ret) {
+			dev_err(&pdev->dev, "irqreq pwr_event_irq failed: %d\n",
+					ret);
+			goto err;
+		}
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcsr_base");
+	if (!res) {
+		dev_dbg(&pdev->dev, "missing TCSR memory resource\n");
+	} else {
+		tcsr = devm_ioremap_nocache(&pdev->dev, res->start,
+			resource_size(res));
+		if (IS_ERR_OR_NULL(tcsr)) {
+			dev_dbg(&pdev->dev, "tcsr ioremap failed\n");
+		} else {
+			/* Enable USB3 on the primary USB port. */
+			writel_relaxed(0x1, tcsr);
+			/*
+			 * Ensure that TCSR write is completed before
+			 * USB registers initialization.
+			 */
+			mb();
+		}
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core_base");
+	if (!res) {
+		dev_err(&pdev->dev, "missing memory base resource\n");
+		ret = -ENODEV;
+		goto err;
+	}
+
+	mdwc->base = devm_ioremap_nocache(&pdev->dev, res->start,
+			resource_size(res));
+	if (!mdwc->base) {
+		dev_err(&pdev->dev, "ioremap failed\n");
+		ret = -ENODEV;
+		goto err;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"ahb2phy_base");
+	if (res) {
+		mdwc->ahb2phy_base = devm_ioremap_nocache(&pdev->dev,
+					res->start, resource_size(res));
+		if (IS_ERR_OR_NULL(mdwc->ahb2phy_base)) {
+			dev_err(dev, "couldn't find ahb2phy_base addr.\n");
+			mdwc->ahb2phy_base = NULL;
+		} else {
+			/*
+			 * On some targets cfg_ahb_clk depends upon usb gdsc
+			 * regulator. If cfg_ahb_clk is enabled without
+			 * turning on usb gdsc regulator clk is stuck off.
+			 */
+			dwc3_msm_config_gdsc(mdwc, 1);
+			clk_prepare_enable(mdwc->cfg_ahb_clk);
+			/* Configure AHB2PHY for one wait state read/write*/
+			val = readl_relaxed(mdwc->ahb2phy_base +
+					PERIPH_SS_AHB2PHY_TOP_CFG);
+			if (val != ONE_READ_WRITE_WAIT) {
+				writel_relaxed(ONE_READ_WRITE_WAIT,
+					mdwc->ahb2phy_base +
+					PERIPH_SS_AHB2PHY_TOP_CFG);
+				/* complete above write before using USB PHY */
+				mb();
+			}
+			clk_disable_unprepare(mdwc->cfg_ahb_clk);
+			dwc3_msm_config_gdsc(mdwc, 0);
+		}
+	}
+
+	if (of_get_property(pdev->dev.of_node, "qcom,usb-dbm", NULL)) {
+		mdwc->dbm = usb_get_dbm_by_phandle(&pdev->dev, "qcom,usb-dbm");
+		if (IS_ERR(mdwc->dbm)) {
+			dev_err(&pdev->dev, "unable to get dbm device\n");
+			ret = -EPROBE_DEFER;
+			goto err;
+		}
+		/*
+		 * Add power event if the dbm indicates coming out of L1
+		 * by interrupt
+		 */
+		if (dbm_l1_lpm_interrupt(mdwc->dbm)) {
+			if (!mdwc->pwr_event_irq) {
+				dev_err(&pdev->dev,
+					"need pwr_event_irq exiting L1\n");
+				ret = -EINVAL;
+				goto err;
+			}
+		}
+	}
+
+	ext_hub_reset_gpio = of_get_named_gpio(node,
+					"qcom,ext-hub-reset-gpio", 0);
+
+	if (gpio_is_valid(ext_hub_reset_gpio)
+		&& (!devm_gpio_request(&pdev->dev, ext_hub_reset_gpio,
+			"qcom,ext-hub-reset-gpio"))) {
+		/* reset external hub */
+		gpio_direction_output(ext_hub_reset_gpio, 1);
+		/*
+		 * Hub reset should be asserted for minimum 5microsec
+		 * before deasserting.
+		 */
+		usleep_range(5, 1000);
+		gpio_direction_output(ext_hub_reset_gpio, 0);
+	}
+
+	if (of_property_read_u32(node, "qcom,dwc-usb3-msm-tx-fifo-size",
+				 &mdwc->tx_fifo_size))
+		dev_err(&pdev->dev,
+			"unable to read platform data tx fifo size\n");
+
+	mdwc->disable_host_mode_pm = of_property_read_bool(node,
+				"qcom,disable-host-mode-pm");
+
+	mdwc->no_wakeup_src_in_hostmode = of_property_read_bool(node,
+				"qcom,no-wakeup-src-in-hostmode");
+	if (mdwc->no_wakeup_src_in_hostmode)
+		dev_dbg(&pdev->dev, "dwc3 host not using wakeup source\n");
+
+	dwc3_set_notifier(&dwc3_msm_notify_event);
+
+	/* Assumes dwc3 is the first DT child of dwc3-msm */
+	dwc3_node = of_get_next_available_child(node, NULL);
+	if (!dwc3_node) {
+		dev_err(&pdev->dev, "failed to find dwc3 child\n");
+		ret = -ENODEV;
+		goto err;
+	}
+
+	ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
+	if (ret) {
+		dev_err(&pdev->dev,
+				"failed to add create dwc3 core\n");
+		of_node_put(dwc3_node);
+		goto err;
+	}
+
+	mdwc->dwc3 = of_find_device_by_node(dwc3_node);
+	of_node_put(dwc3_node);
+	if (!mdwc->dwc3) {
+		dev_err(&pdev->dev, "failed to get dwc3 platform device\n");
+		goto put_dwc3;
+	}
+
+	mdwc->hs_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
+							"usb-phy", 0);
+	if (IS_ERR(mdwc->hs_phy)) {
+		dev_err(&pdev->dev, "unable to get hsphy device\n");
+		ret = PTR_ERR(mdwc->hs_phy);
+		goto put_dwc3;
+	}
+	mdwc->ss_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
+							"usb-phy", 1);
+	if (IS_ERR(mdwc->ss_phy)) {
+		dev_err(&pdev->dev, "unable to get ssphy device\n");
+		ret = PTR_ERR(mdwc->ss_phy);
+		goto put_dwc3;
+	}
+
+	mdwc->bus_scale_table = msm_bus_cl_get_pdata(pdev);
+	if (mdwc->bus_scale_table) {
+		mdwc->bus_perf_client =
+			msm_bus_scale_register_client(mdwc->bus_scale_table);
+	}
+
+	dwc = platform_get_drvdata(mdwc->dwc3);
+	if (!dwc) {
+		dev_err(&pdev->dev, "Failed to get dwc3 device\n");
+		goto put_dwc3;
+	}
+
+	mdwc->irq_to_affin = platform_get_irq(mdwc->dwc3, 0);
+	mdwc->dwc3_cpu_notifier.notifier_call = dwc3_cpu_notifier_cb;
+
+	if (cpu_to_affin)
+		register_cpu_notifier(&mdwc->dwc3_cpu_notifier);
+
+	/*
+	 * Clocks and regulators will not be turned on until the first time
+	 * runtime PM resume is called. This is to allow for booting up with
+	 * charger already connected so as not to disturb PHY line states.
+	 */
+	mdwc->lpm_flags = MDWC3_POWER_COLLAPSE | MDWC3_SS_PHY_SUSPEND;
+	atomic_set(&dwc->in_lpm, 1);
+	pm_runtime_set_autosuspend_delay(mdwc->dev, 1000);
+	pm_runtime_use_autosuspend(mdwc->dev);
+	device_init_wakeup(mdwc->dev, 1);
+
+	if (of_property_read_bool(node, "qcom,disable-dev-mode-pm"))
+		pm_runtime_get_noresume(mdwc->dev);
+
+	ret = dwc3_msm_extcon_register(mdwc);
+	if (ret)
+		goto put_dwc3;
+
+	ret = of_property_read_u32(node, "qcom,pm-qos-latency",
+				&mdwc->pm_qos_latency);
+	if (ret) {
+		dev_dbg(&pdev->dev, "setting pm-qos-latency to zero.\n");
+		mdwc->pm_qos_latency = 0;
+	}
+
+	mdwc->usb_psy = power_supply_get_by_name("usb");
+	if (!mdwc->usb_psy) {
+		dev_warn(mdwc->dev, "Could not get usb power_supply\n");
+		pval.intval = -EINVAL;
+	} else {
+		power_supply_get_property(mdwc->usb_psy,
+			POWER_SUPPLY_PROP_PRESENT, &pval);
+	}
+
+	mutex_init(&mdwc->suspend_resume_mutex);
+	/* Update initial VBUS/ID state from extcon */
+	if (mdwc->extcon_vbus && extcon_get_cable_state_(mdwc->extcon_vbus,
+							EXTCON_USB))
+		dwc3_msm_vbus_notifier(&mdwc->vbus_nb, true, mdwc->extcon_vbus);
+	else if (mdwc->extcon_id && extcon_get_cable_state_(mdwc->extcon_id,
+							EXTCON_USB_HOST))
+		dwc3_msm_id_notifier(&mdwc->id_nb, true, mdwc->extcon_id);
+	else if (!pval.intval) {
+		/* USB cable is not connected */
+		schedule_delayed_work(&mdwc->sm_work, 0);
+	} else {
+		if (pval.intval > 0)
+			dev_info(mdwc->dev, "charger detection in progress\n");
+	}
+
+	device_create_file(&pdev->dev, &dev_attr_mode);
+	device_create_file(&pdev->dev, &dev_attr_speed);
+	device_create_file(&pdev->dev, &dev_attr_xhci_link_compliance);
+	device_create_file(&pdev->dev, &dev_attr_usb_compliance_mode);
+
+	host_mode = usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST;
+	if (host_mode ||
+		(dwc->is_drd && !of_property_read_bool(node, "extcon"))) {
+		dev_dbg(&pdev->dev, "DWC3 in default host mode\n");
+		mdwc->host_only_mode = true;
+		mdwc->id_state = DWC3_ID_GROUND;
+		dwc3_ext_event_notify(mdwc);
+	}
+
+	return 0;
+
+put_dwc3:
+	if (mdwc->bus_perf_client)
+		msm_bus_scale_unregister_client(mdwc->bus_perf_client);
+	of_platform_depopulate(&pdev->dev);
+err:
+	destroy_workqueue(mdwc->dwc3_wq);
+	return ret;
+}
+
+static int dwc3_msm_remove(struct platform_device *pdev)
+{
+	struct dwc3_msm	*mdwc = platform_get_drvdata(pdev);
+	int ret_pm;
+
+	device_remove_file(&pdev->dev, &dev_attr_mode);
+	device_remove_file(&pdev->dev, &dev_attr_xhci_link_compliance);
+
+	if (cpu_to_affin)
+		unregister_cpu_notifier(&mdwc->dwc3_cpu_notifier);
+
+	/*
+	 * In case of system suspend, pm_runtime_get_sync fails.
+	 * Hence turn ON the clocks manually.
+	 */
+	ret_pm = pm_runtime_get_sync(mdwc->dev);
+	dbg_event(0xFF, "Remov gsyn", ret_pm);
+	if (ret_pm < 0) {
+		dev_err(mdwc->dev,
+			"pm_runtime_get_sync failed with %d\n", ret_pm);
+		if (mdwc->noc_aggr_clk)
+			clk_prepare_enable(mdwc->noc_aggr_clk);
+		clk_prepare_enable(mdwc->utmi_clk);
+		clk_prepare_enable(mdwc->core_clk);
+		clk_prepare_enable(mdwc->iface_clk);
+		clk_prepare_enable(mdwc->sleep_clk);
+		if (mdwc->bus_aggr_clk)
+			clk_prepare_enable(mdwc->bus_aggr_clk);
+		clk_prepare_enable(mdwc->xo_clk);
+	}
+
+	cancel_delayed_work_sync(&mdwc->perf_vote_work);
+	cancel_delayed_work_sync(&mdwc->sm_work);
+
+	if (mdwc->hs_phy)
+		mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
+	of_platform_depopulate(&pdev->dev);
+
+	dbg_event(0xFF, "Remov put", 0);
+	pm_runtime_disable(mdwc->dev);
+	pm_runtime_barrier(mdwc->dev);
+	pm_runtime_put_sync(mdwc->dev);
+	pm_runtime_set_suspended(mdwc->dev);
+	device_wakeup_disable(mdwc->dev);
+
+	if (mdwc->bus_perf_client)
+		msm_bus_scale_unregister_client(mdwc->bus_perf_client);
+
+	if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
+		regulator_disable(mdwc->vbus_reg);
+
+	disable_irq(mdwc->hs_phy_irq);
+	if (mdwc->ss_phy_irq)
+		disable_irq(mdwc->ss_phy_irq);
+	disable_irq(mdwc->pwr_event_irq);
+
+	clk_disable_unprepare(mdwc->utmi_clk);
+	clk_set_rate(mdwc->core_clk, 19200000);
+	clk_disable_unprepare(mdwc->core_clk);
+	clk_disable_unprepare(mdwc->iface_clk);
+	clk_disable_unprepare(mdwc->sleep_clk);
+	clk_disable_unprepare(mdwc->xo_clk);
+	clk_put(mdwc->xo_clk);
+
+	dwc3_msm_config_gdsc(mdwc, 0);
+
+	return 0;
+}
+
+static int dwc3_msm_host_notifier(struct notifier_block *nb,
+	unsigned long event, void *ptr)
+{
+	struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, host_nb);
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+	struct usb_device *udev = ptr;
+	union power_supply_propval pval;
+	unsigned max_power;
+
+	if (event != USB_DEVICE_ADD && event != USB_DEVICE_REMOVE)
+		return NOTIFY_DONE;
+
+	if (!mdwc->usb_psy) {
+		mdwc->usb_psy = power_supply_get_by_name("usb");
+		if (!mdwc->usb_psy)
+			return NOTIFY_DONE;
+	}
+
+	/*
+	 * For direct-attach devices, new udev is direct child of root hub
+	 * i.e. dwc -> xhci -> root_hub -> udev
+	 * root_hub's udev->parent==NULL, so traverse struct device hierarchy
+	 */
+	if (udev->parent && !udev->parent->parent &&
+			udev->dev.parent->parent == &dwc->xhci->dev) {
+		if (event == USB_DEVICE_ADD && udev->actconfig) {
+			if (!dwc3_msm_is_ss_rhport_connected(mdwc)) {
+				/*
+				 * Core clock rate can be reduced only if root
+				 * hub SS port is not enabled/connected.
+				 */
+				clk_set_rate(mdwc->core_clk,
+				mdwc->core_clk_rate_hs);
+				dev_dbg(mdwc->dev,
+					"set hs core clk rate %ld\n",
+					mdwc->core_clk_rate_hs);
+				mdwc->max_rh_port_speed = USB_SPEED_HIGH;
+			} else {
+				mdwc->max_rh_port_speed = USB_SPEED_SUPER;
+			}
+
+			if (udev->speed >= USB_SPEED_SUPER)
+				max_power = udev->actconfig->desc.bMaxPower * 8;
+			else
+				max_power = udev->actconfig->desc.bMaxPower * 2;
+
+			dev_dbg(mdwc->dev, "%s configured bMaxPower:%d (mA)\n",
+					dev_name(&udev->dev), max_power);
+
+			/* inform PMIC of max power so it can optimize boost */
+			pval.intval = max_power * 1000;
+			power_supply_set_property(mdwc->usb_psy,
+					POWER_SUPPLY_PROP_BOOST_CURRENT, &pval);
+		} else {
+			pval.intval = 0;
+			power_supply_set_property(mdwc->usb_psy,
+					POWER_SUPPLY_PROP_BOOST_CURRENT, &pval);
+
+			/* set rate back to default core clk rate */
+			clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
+			dev_dbg(mdwc->dev, "set core clk rate %ld\n",
+				mdwc->core_clk_rate);
+			mdwc->max_rh_port_speed = USB_SPEED_UNKNOWN;
+		}
+	}
+
+	return NOTIFY_DONE;
+}
+
+static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc, bool perf_mode)
+{
+	static bool curr_perf_mode;
+	int latency = mdwc->pm_qos_latency;
+
+	if ((curr_perf_mode == perf_mode) || !latency)
+		return;
+
+	if (perf_mode)
+		pm_qos_update_request(&mdwc->pm_qos_req_dma, latency);
+	else
+		pm_qos_update_request(&mdwc->pm_qos_req_dma,
+						PM_QOS_DEFAULT_VALUE);
+
+	curr_perf_mode = perf_mode;
+	pr_debug("%s: latency updated to: %d\n", __func__,
+			perf_mode ? latency : PM_QOS_DEFAULT_VALUE);
+}
+
+static void msm_dwc3_perf_vote_work(struct work_struct *w)
+{
+	struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
+						perf_vote_work.work);
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+	bool in_perf_mode = false;
+	int latency = mdwc->pm_qos_latency;
+
+	if (!latency)
+		return;
+
+	if (dwc->irq_cnt - dwc->last_irq_cnt >= PM_QOS_THRESHOLD)
+		in_perf_mode = true;
+
+	pr_debug("%s: in_perf_mode:%u, interrupts in last sample:%lu\n",
+		 __func__, in_perf_mode, (dwc->irq_cnt - dwc->last_irq_cnt));
+
+	dwc->last_irq_cnt = dwc->irq_cnt;
+	msm_dwc3_perf_vote_update(mdwc, in_perf_mode);
+	schedule_delayed_work(&mdwc->perf_vote_work,
+			msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
+}
+
+#define VBUS_REG_CHECK_DELAY	(msecs_to_jiffies(1000))
+
+/**
+ * dwc3_otg_start_host -  helper function for starting/stoping the host controller driver.
+ *
+ * @mdwc: Pointer to the dwc3_msm structure.
+ * @on: start / stop the host controller driver.
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
+{
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+	int ret = 0;
+
+	if (!dwc->xhci)
+		return -EINVAL;
+
+	/*
+	 * The vbus_reg pointer could have multiple values
+	 * NULL: regulator_get() hasn't been called, or was previously deferred
+	 * IS_ERR: regulator could not be obtained, so skip using it
+	 * Valid pointer otherwise
+	 */
+	if (!mdwc->vbus_reg) {
+		mdwc->vbus_reg = devm_regulator_get_optional(mdwc->dev,
+					"vbus_dwc3");
+		if (IS_ERR(mdwc->vbus_reg) &&
+				PTR_ERR(mdwc->vbus_reg) == -EPROBE_DEFER) {
+			/* regulators may not be ready, so retry again later */
+			mdwc->vbus_reg = NULL;
+			return -EPROBE_DEFER;
+		}
+	}
+
+	if (on) {
+		dev_dbg(mdwc->dev, "%s: turn on host\n", __func__);
+
+		pm_runtime_get_sync(mdwc->dev);
+		mdwc->hs_phy->flags |= PHY_HOST_MODE;
+		if (dwc->maximum_speed == USB_SPEED_SUPER) {
+			mdwc->ss_phy->flags |= PHY_HOST_MODE;
+			usb_phy_notify_connect(mdwc->ss_phy,
+						USB_SPEED_SUPER);
+		}
+
+		usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
+		dbg_event(0xFF, "StrtHost gync",
+			atomic_read(&mdwc->dev->power.usage_count));
+		if (!IS_ERR(mdwc->vbus_reg))
+			ret = regulator_enable(mdwc->vbus_reg);
+		if (ret) {
+			dev_err(mdwc->dev, "unable to enable vbus_reg\n");
+			mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
+			mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
+			pm_runtime_put_sync(mdwc->dev);
+			dbg_event(0xFF, "vregerr psync",
+				atomic_read(&mdwc->dev->power.usage_count));
+			return ret;
+		}
+
+		dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
+
+		mdwc->host_nb.notifier_call = dwc3_msm_host_notifier;
+		usb_register_notify(&mdwc->host_nb);
+
+		mdwc->usbdev_nb.notifier_call = msm_dwc3_usbdev_notify;
+		usb_register_atomic_notify(&mdwc->usbdev_nb);
+		/*
+		 * FIXME If micro A cable is disconnected during system suspend,
+		 * xhci platform device will be removed before runtime pm is
+		 * enabled for xhci device. Due to this, disable_depth becomes
+		 * greater than one and runtimepm is not enabled for next microA
+		 * connect. Fix this by calling pm_runtime_init for xhci device.
+		 */
+		pm_runtime_init(&dwc->xhci->dev);
+		ret = platform_device_add(dwc->xhci);
+		if (ret) {
+			dev_err(mdwc->dev,
+				"%s: failed to add XHCI pdev ret=%d\n",
+				__func__, ret);
+			if (!IS_ERR(mdwc->vbus_reg))
+				regulator_disable(mdwc->vbus_reg);
+			mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
+			mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
+			pm_runtime_put_sync(mdwc->dev);
+			dbg_event(0xFF, "pdeverr psync",
+				atomic_read(&mdwc->dev->power.usage_count));
+			usb_unregister_notify(&mdwc->host_nb);
+			return ret;
+		}
+
+		/*
+		 * If the Compliance Transition Capability(CTC) flag of
+		 * HCCPARAMS2 register is set and xhci_link_compliance sysfs
+		 * param has been enabled by the user for the SuperSpeed host
+		 * controller, then write 10 (Link in Compliance Mode State)
+		 * onto the Port Link State(PLS) field of the PORTSC register
+		 * for 3.0 host controller which is at an offset of USB3_PORTSC
+		 * + 0x10 from the DWC3 base address. Also, disable the runtime
+		 * PM of 3.0 root hub (root hub of shared_hcd of xhci device)
+		 */
+		if (HCC_CTC(dwc3_msm_read_reg(mdwc->base, USB3_HCCPARAMS2))
+				&& mdwc->xhci_ss_compliance_enable
+				&& dwc->maximum_speed == USB_SPEED_SUPER) {
+			dwc3_msm_write_reg(mdwc->base, USB3_PORTSC + 0x10,
+					0x10340);
+			pm_runtime_disable(&hcd_to_xhci(platform_get_drvdata(
+				dwc->xhci))->shared_hcd->self.root_hub->dev);
+		}
+
+		/*
+		 * In some cases it is observed that USB PHY is not going into
+		 * suspend with host mode suspend functionality. Hence disable
+		 * XHCI's runtime PM here if disable_host_mode_pm is set.
+		 */
+		if (mdwc->disable_host_mode_pm)
+			pm_runtime_disable(&dwc->xhci->dev);
+
+		mdwc->in_host_mode = true;
+		dwc3_usb3_phy_suspend(dwc, true);
+
+		/* xHCI should have incremented child count as necessary */
+		dbg_event(0xFF, "StrtHost psync",
+			atomic_read(&mdwc->dev->power.usage_count));
+		pm_runtime_mark_last_busy(mdwc->dev);
+		pm_runtime_put_sync_autosuspend(mdwc->dev);
+#ifdef CONFIG_SMP
+		mdwc->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
+		mdwc->pm_qos_req_dma.irq = dwc->irq;
+#endif
+		pm_qos_add_request(&mdwc->pm_qos_req_dma,
+				PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+		/* start in perf mode for better performance initially */
+		msm_dwc3_perf_vote_update(mdwc, true);
+		schedule_delayed_work(&mdwc->perf_vote_work,
+				msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
+	} else {
+		dev_dbg(mdwc->dev, "%s: turn off host\n", __func__);
+
+		usb_unregister_atomic_notify(&mdwc->usbdev_nb);
+		if (!IS_ERR(mdwc->vbus_reg))
+			ret = regulator_disable(mdwc->vbus_reg);
+		if (ret) {
+			dev_err(mdwc->dev, "unable to disable vbus_reg\n");
+			return ret;
+		}
+
+		cancel_delayed_work_sync(&mdwc->perf_vote_work);
+		msm_dwc3_perf_vote_update(mdwc, false);
+		pm_qos_remove_request(&mdwc->pm_qos_req_dma);
+
+		pm_runtime_get_sync(mdwc->dev);
+		dbg_event(0xFF, "StopHost gsync",
+			atomic_read(&mdwc->dev->power.usage_count));
+		usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
+		if (mdwc->ss_phy->flags & PHY_HOST_MODE) {
+			usb_phy_notify_disconnect(mdwc->ss_phy,
+					USB_SPEED_SUPER);
+			mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
+		}
+
+		mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
+		platform_device_del(dwc->xhci);
+		usb_unregister_notify(&mdwc->host_nb);
+
+		/*
+		 * Perform USB hardware RESET (both core reset and DBM reset)
+		 * when moving from host to peripheral. This is required for
+		 * peripheral mode to work.
+		 */
+		dwc3_msm_block_reset(mdwc, true);
+
+		dwc3_usb3_phy_suspend(dwc, false);
+		dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+
+		mdwc->in_host_mode = false;
+
+		/* re-init core and OTG registers as block reset clears these */
+		if (!mdwc->host_only_mode)
+			dwc3_post_host_reset_core_init(dwc);
+
+		pm_runtime_mark_last_busy(mdwc->dev);
+		pm_runtime_put_sync_autosuspend(mdwc->dev);
+		dbg_event(0xFF, "StopHost psync",
+			atomic_read(&mdwc->dev->power.usage_count));
+	}
+
+	return 0;
+}
+
+static void dwc3_override_vbus_status(struct dwc3_msm *mdwc, bool vbus_present)
+{
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+	/* Update OTG VBUS Valid from HSPHY to controller */
+	dwc3_msm_write_readback(mdwc->base, HS_PHY_CTRL_REG,
+		vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL :
+		UTMI_OTG_VBUS_VALID,
+		vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL : 0);
+
+	/* Update only if Super Speed is supported */
+	if (dwc->maximum_speed == USB_SPEED_SUPER) {
+		/* Update VBUS Valid from SSPHY to controller */
+		dwc3_msm_write_readback(mdwc->base, SS_PHY_CTRL_REG,
+			LANE0_PWR_PRESENT,
+			vbus_present ? LANE0_PWR_PRESENT : 0);
+	}
+}
+
+/**
+ * dwc3_otg_start_peripheral -  bind/unbind the peripheral controller.
+ *
+ * @mdwc: Pointer to the dwc3_msm structure.
+ * @on:   Turn ON/OFF the gadget.
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on)
+{
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+	pm_runtime_get_sync(mdwc->dev);
+	dbg_event(0xFF, "StrtGdgt gsync",
+		atomic_read(&mdwc->dev->power.usage_count));
+
+	if (on) {
+		dev_dbg(mdwc->dev, "%s: turn on gadget %s\n",
+					__func__, dwc->gadget.name);
+
+		dwc3_override_vbus_status(mdwc, true);
+		usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
+		usb_phy_notify_connect(mdwc->ss_phy, USB_SPEED_SUPER);
+
+		/* Core reset is not required during start peripheral. Only
+		 * DBM reset is required, hence perform only DBM reset here */
+		dwc3_msm_block_reset(mdwc, false);
+
+		dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+		usb_gadget_vbus_connect(&dwc->gadget);
+#ifdef CONFIG_SMP
+		mdwc->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
+		mdwc->pm_qos_req_dma.irq = dwc->irq;
+#endif
+		pm_qos_add_request(&mdwc->pm_qos_req_dma,
+				PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+		/* start in perf mode for better performance initially */
+		msm_dwc3_perf_vote_update(mdwc, true);
+		schedule_delayed_work(&mdwc->perf_vote_work,
+				msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
+	} else {
+		dev_dbg(mdwc->dev, "%s: turn off gadget %s\n",
+					__func__, dwc->gadget.name);
+		cancel_delayed_work_sync(&mdwc->perf_vote_work);
+		msm_dwc3_perf_vote_update(mdwc, false);
+		pm_qos_remove_request(&mdwc->pm_qos_req_dma);
+
+		usb_gadget_vbus_disconnect(&dwc->gadget);
+		usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
+		usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER);
+		dwc3_override_vbus_status(mdwc, false);
+		dwc3_usb3_phy_suspend(dwc, false);
+	}
+
+	pm_runtime_put_sync(mdwc->dev);
+	dbg_event(0xFF, "StopGdgt psync",
+		atomic_read(&mdwc->dev->power.usage_count));
+
+	return 0;
+}
+
+int get_psy_type(struct dwc3_msm *mdwc)
+{
+	union power_supply_propval pval = {0};
+
+	if (mdwc->charging_disabled)
+		return -EINVAL;
+
+	if (!mdwc->usb_psy) {
+		mdwc->usb_psy = power_supply_get_by_name("usb");
+		if (!mdwc->usb_psy) {
+			dev_err(mdwc->dev, "Could not get usb psy\n");
+			return -ENODEV;
+		}
+	}
+
+	power_supply_get_property(mdwc->usb_psy, POWER_SUPPLY_PROP_REAL_TYPE,
+			&pval);
+
+	return pval.intval;
+}
+
+static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned mA)
+{
+	union power_supply_propval pval = {0};
+	int ret, psy_type;
+
+	psy_type = get_psy_type(mdwc);
+	if (psy_type == POWER_SUPPLY_TYPE_USB_FLOAT) {
+		if (!mA)
+			pval.intval = -ETIMEDOUT;
+		else
+			pval.intval = 1000 * mA;
+		goto set_prop;
+	}
+
+	if (mdwc->max_power == mA || psy_type != POWER_SUPPLY_TYPE_USB)
+		return 0;
+
+	dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
+	/* Set max current limit in uA */
+	pval.intval = 1000 * mA;
+
+set_prop:
+	ret = power_supply_set_property(mdwc->usb_psy,
+				POWER_SUPPLY_PROP_SDP_CURRENT_MAX, &pval);
+	if (ret) {
+		dev_dbg(mdwc->dev, "power supply error when setting property\n");
+		return ret;
+	}
+
+	mdwc->max_power = mA;
+	return 0;
+}
+
+
+/**
+ * dwc3_otg_sm_work - workqueue function.
+ *
+ * @w: Pointer to the dwc3 otg workqueue
+ *
+ * NOTE: After any change in otg_state, we must reschdule the state machine.
+ */
+static void dwc3_otg_sm_work(struct work_struct *w)
+{
+	struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, sm_work.work);
+	struct dwc3 *dwc = NULL;
+	bool work = 0;
+	int ret = 0;
+	unsigned long delay = 0;
+	const char *state;
+
+	if (mdwc->dwc3)
+		dwc = platform_get_drvdata(mdwc->dwc3);
+
+	if (!dwc) {
+		dev_err(mdwc->dev, "dwc is NULL.\n");
+		return;
+	}
+
+	state = usb_otg_state_string(mdwc->otg_state);
+	dev_dbg(mdwc->dev, "%s state\n", state);
+	dbg_event(0xFF, state, 0);
+
+	/* Check OTG state */
+	switch (mdwc->otg_state) {
+	case OTG_STATE_UNDEFINED:
+		/* put controller and phy in suspend if no cable connected */
+		if (test_bit(ID, &mdwc->inputs) &&
+				!test_bit(B_SESS_VLD, &mdwc->inputs)) {
+			dbg_event(0xFF, "undef_id_!bsv", 0);
+			pm_runtime_set_active(mdwc->dev);
+			pm_runtime_enable(mdwc->dev);
+			pm_runtime_get_noresume(mdwc->dev);
+			dwc3_msm_resume(mdwc);
+			pm_runtime_put_sync(mdwc->dev);
+			dbg_event(0xFF, "Undef NoUSB",
+				atomic_read(&mdwc->dev->power.usage_count));
+			mdwc->otg_state = OTG_STATE_B_IDLE;
+			break;
+		}
+
+		dbg_event(0xFF, "Exit UNDEF", 0);
+		mdwc->otg_state = OTG_STATE_B_IDLE;
+		pm_runtime_set_suspended(mdwc->dev);
+		pm_runtime_enable(mdwc->dev);
+		/* fall-through */
+	case OTG_STATE_B_IDLE:
+		if (!test_bit(ID, &mdwc->inputs)) {
+			dev_dbg(mdwc->dev, "!id\n");
+			mdwc->otg_state = OTG_STATE_A_IDLE;
+			work = 1;
+		} else if (test_bit(B_SESS_VLD, &mdwc->inputs)) {
+			dev_dbg(mdwc->dev, "b_sess_vld\n");
+			if (get_psy_type(mdwc) == POWER_SUPPLY_TYPE_USB_FLOAT)
+				queue_delayed_work(mdwc->dwc3_wq,
+						&mdwc->sdp_check,
+				msecs_to_jiffies(SDP_CONNETION_CHECK_TIME));
+			/*
+			 * Increment pm usage count upon cable connect. Count
+			 * is decremented in OTG_STATE_B_PERIPHERAL state on
+			 * cable disconnect or in bus suspend.
+			 */
+			pm_runtime_get_sync(mdwc->dev);
+			dbg_event(0xFF, "BIDLE gsync",
+				atomic_read(&mdwc->dev->power.usage_count));
+			dwc3_otg_start_peripheral(mdwc, 1);
+			mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
+			work = 1;
+			break;
+		} else {
+			dwc3_msm_gadget_vbus_draw(mdwc, 0);
+			dev_dbg(mdwc->dev, "Cable disconnected\n");
+		}
+		break;
+
+	case OTG_STATE_B_PERIPHERAL:
+		if (!test_bit(B_SESS_VLD, &mdwc->inputs) ||
+				!test_bit(ID, &mdwc->inputs)) {
+			dev_dbg(mdwc->dev, "!id || !bsv\n");
+			mdwc->otg_state = OTG_STATE_B_IDLE;
+			cancel_delayed_work_sync(&mdwc->sdp_check);
+			dwc3_otg_start_peripheral(mdwc, 0);
+			/*
+			 * Decrement pm usage count upon cable disconnect
+			 * which was incremented upon cable connect in
+			 * OTG_STATE_B_IDLE state
+			 */
+			pm_runtime_put_sync(mdwc->dev);
+			dbg_event(0xFF, "!BSV psync",
+				atomic_read(&mdwc->dev->power.usage_count));
+			work = 1;
+		} else if (test_bit(B_SUSPEND, &mdwc->inputs) &&
+			test_bit(B_SESS_VLD, &mdwc->inputs)) {
+			dev_dbg(mdwc->dev, "BPER bsv && susp\n");
+			mdwc->otg_state = OTG_STATE_B_SUSPEND;
+			/*
+			 * Decrement pm usage count upon bus suspend.
+			 * Count was incremented either upon cable
+			 * connect in OTG_STATE_B_IDLE or host
+			 * initiated resume after bus suspend in
+			 * OTG_STATE_B_SUSPEND state
+			 */
+			pm_runtime_mark_last_busy(mdwc->dev);
+			pm_runtime_put_autosuspend(mdwc->dev);
+			dbg_event(0xFF, "SUSP put",
+				atomic_read(&mdwc->dev->power.usage_count));
+		}
+		break;
+
+	case OTG_STATE_B_SUSPEND:
+		if (!test_bit(B_SESS_VLD, &mdwc->inputs)) {
+			dev_dbg(mdwc->dev, "BSUSP: !bsv\n");
+			mdwc->otg_state = OTG_STATE_B_IDLE;
+			cancel_delayed_work_sync(&mdwc->sdp_check);
+			dwc3_otg_start_peripheral(mdwc, 0);
+		} else if (!test_bit(B_SUSPEND, &mdwc->inputs)) {
+			dev_dbg(mdwc->dev, "BSUSP !susp\n");
+			mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
+			/*
+			 * Increment pm usage count upon host
+			 * initiated resume. Count was decremented
+			 * upon bus suspend in
+			 * OTG_STATE_B_PERIPHERAL state.
+			 */
+			pm_runtime_get_sync(mdwc->dev);
+			dbg_event(0xFF, "!SUSP gsync",
+				atomic_read(&mdwc->dev->power.usage_count));
+		}
+		break;
+
+	case OTG_STATE_A_IDLE:
+		/* Switch to A-Device*/
+		if (test_bit(ID, &mdwc->inputs)) {
+			dev_dbg(mdwc->dev, "id\n");
+			mdwc->otg_state = OTG_STATE_B_IDLE;
+			mdwc->vbus_retry_count = 0;
+			work = 1;
+		} else {
+			mdwc->otg_state = OTG_STATE_A_HOST;
+			ret = dwc3_otg_start_host(mdwc, 1);
+			if ((ret == -EPROBE_DEFER) &&
+						mdwc->vbus_retry_count < 3) {
+				/*
+				 * Get regulator failed as regulator driver is
+				 * not up yet. Will try to start host after 1sec
+				 */
+				mdwc->otg_state = OTG_STATE_A_IDLE;
+				dev_dbg(mdwc->dev, "Unable to get vbus regulator. Retrying...\n");
+				delay = VBUS_REG_CHECK_DELAY;
+				work = 1;
+				mdwc->vbus_retry_count++;
+			} else if (ret) {
+				dev_err(mdwc->dev, "unable to start host\n");
+				mdwc->otg_state = OTG_STATE_A_IDLE;
+				goto ret;
+			}
+			pm_wakeup_event(mdwc->dev, DWC3_WAKEUP_SRC_TIMEOUT);
+		}
+		break;
+
+	case OTG_STATE_A_HOST:
+		if (test_bit(ID, &mdwc->inputs) || mdwc->hc_died) {
+			dbg_event(0xFF, "id || hc_died", 0);
+			dev_dbg(mdwc->dev, "%s state id || hc_died\n", state);
+			dwc3_otg_start_host(mdwc, 0);
+			mdwc->otg_state = OTG_STATE_B_IDLE;
+			mdwc->vbus_retry_count = 0;
+			mdwc->hc_died = false;
+			work = 1;
+		} else {
+			dev_dbg(mdwc->dev, "still in a_host state. Resuming root hub.\n");
+			dbg_event(0xFF, "XHCIResume", 0);
+			if (dwc)
+				pm_runtime_resume(&dwc->xhci->dev);
+			pm_wakeup_event(mdwc->dev, DWC3_WAKEUP_SRC_TIMEOUT);
+		}
+		break;
+
+	default:
+		dev_err(mdwc->dev, "%s: invalid otg-state\n", __func__);
+
+	}
+
+	if (work)
+		schedule_delayed_work(&mdwc->sm_work, delay);
+
+ret:
+	return;
+}
+
+static int dwc3_msm_pm_prepare(struct device *dev)
+{
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+	struct usb_hcd		*hcd;
+	struct xhci_hcd		*xhci;
+
+	dev_dbg(dev, "dwc3-msm PM prepare,lpm:%u\n", atomic_read(&dwc->in_lpm));
+	dbg_event(0xFF, "PM Prep", 0);
+	if (!mdwc->in_host_mode || !mdwc->no_wakeup_src_in_hostmode)
+		return 0;
+
+	hcd = dev_get_drvdata(&dwc->xhci->dev);
+	xhci = hcd_to_xhci(hcd);
+	flush_delayed_work(&mdwc->sm_work);
+
+	/* If in lpm then prevent usb core to runtime_resume from pm_suspend */
+	if (atomic_read(&dwc->in_lpm)) {
+		hcd_to_bus(hcd)->skip_resume = true;
+		hcd_to_bus(xhci->shared_hcd)->skip_resume = true;
+	} else {
+		hcd_to_bus(hcd)->skip_resume = false;
+		hcd_to_bus(xhci->shared_hcd)->skip_resume = false;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int dwc3_msm_pm_suspend(struct device *dev)
+{
+	int ret = 0;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+	dev_dbg(dev, "dwc3-msm PM suspend\n");
+	dbg_event(0xFF, "PM Sus", 0);
+
+	flush_workqueue(mdwc->dwc3_wq);
+	if (!atomic_read(&dwc->in_lpm) && !mdwc->no_wakeup_src_in_hostmode) {
+		dev_err(mdwc->dev, "Abort PM suspend!! (USB is outside LPM)\n");
+		return -EBUSY;
+	}
+
+	ret = dwc3_msm_suspend(mdwc);
+	if (!ret)
+		atomic_set(&mdwc->pm_suspended, 1);
+
+	return ret;
+}
+
+static int dwc3_msm_pm_resume(struct device *dev)
+{
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+	dev_dbg(dev, "dwc3-msm PM resume\n");
+
+	dbg_event(0xFF, "PM Res", 0);
+
+	/* flush to avoid race in read/write of pm_suspended */
+	flush_workqueue(mdwc->dwc3_wq);
+	atomic_set(&mdwc->pm_suspended, 0);
+
+	/* Resume h/w in host mode as it may not be runtime suspended */
+	if (mdwc->no_wakeup_src_in_hostmode && !test_bit(ID, &mdwc->inputs))
+		dwc3_msm_resume(mdwc);
+
+	/* kick in otg state machine */
+	if (mdwc->vbus_active || !mdwc->id_state)
+		queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
+
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int dwc3_msm_runtime_idle(struct device *dev)
+{
+	dev_dbg(dev, "DWC3-msm runtime idle\n");
+	dbg_event(0xFF, "RT Idle", 0);
+
+	return 0;
+}
+
+static int dwc3_msm_runtime_suspend(struct device *dev)
+{
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+	dev_dbg(dev, "DWC3-msm runtime suspend\n");
+	dbg_event(0xFF, "RT Sus", 0);
+
+	return dwc3_msm_suspend(mdwc);
+}
+
+static int dwc3_msm_runtime_resume(struct device *dev)
+{
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+	dev_dbg(dev, "DWC3-msm runtime resume\n");
+	dbg_event(0xFF, "RT Res", 0);
+
+	return dwc3_msm_resume(mdwc);
+}
+#endif
+
+static const struct dev_pm_ops dwc3_msm_dev_pm_ops = {
+	.prepare =		dwc3_msm_pm_prepare,
+	SET_SYSTEM_SLEEP_PM_OPS(dwc3_msm_pm_suspend, dwc3_msm_pm_resume)
+	SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume,
+				dwc3_msm_runtime_idle)
+};
+
+static const struct of_device_id of_dwc3_matach[] = {
+	{
+		.compatible = "qcom,dwc-usb3-msm",
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, of_dwc3_matach);
+
+static struct platform_driver dwc3_msm_driver = {
+	.probe		= dwc3_msm_probe,
+	.remove		= dwc3_msm_remove,
+	.driver		= {
+		.name	= "msm-dwc3",
+		.pm	= &dwc3_msm_dev_pm_ops,
+		.of_match_table	= of_dwc3_matach,
+	},
+};
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DesignWare USB3 MSM Glue Layer");
+
+static int dwc3_msm_init(void)
+{
+	return platform_driver_register(&dwc3_msm_driver);
+}
+module_init(dwc3_msm_init);
+
+static void __exit dwc3_msm_exit(void)
+{
+	platform_driver_unregister(&dwc3_msm_driver);
+}
+module_exit(dwc3_msm_exit);
diff -Nruw linux-4.4.115-fbx/drivers/usb/pd./Kconfig linux-4.4.115-fbx/drivers/usb/pd/Kconfig
--- linux-4.4.115-fbx/drivers/usb/pd./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/pd/Kconfig	2019-01-22 16:16:27.371281434 +0100
@@ -0,0 +1,25 @@
+#
+# USB Power Delivery driver configuration
+#
+menu "USB Power Delivery"
+
+config USB_PD
+	def_bool n
+
+config USB_PD_POLICY
+	tristate "USB Power Delivery Protocol and Policy Engine"
+	depends on EXTCON
+	depends on DUAL_ROLE_USB_INTF
+	select USB_PD
+	help
+          Say Y here to enable USB PD protocol and policy engine.
+
+config QPNP_USB_PDPHY
+	tristate "QPNP USB Power Delivery PHY"
+	depends on SPMI
+	help
+          Say Y here to enable QPNP USB PD PHY peripheral driver
+	  which communicates over the SPMI bus. The is used to handle
+	  the PHY layer communication of the Power Delivery stack.
+
+endmenu
diff -Nruw linux-4.4.115-fbx/drivers/usb/pd./Makefile linux-4.4.115-fbx/drivers/usb/pd/Makefile
--- linux-4.4.115-fbx/drivers/usb/pd./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/usb/pd/Makefile	2019-01-22 16:16:27.371281434 +0100
@@ -0,0 +1,6 @@
+#
+# Makefile for USB Power Delivery drivers
+#
+
+obj-$(CONFIG_USB_PD_POLICY)	+= policy_engine.o
+obj-$(CONFIG_QPNP_USB_PDPHY)	+= qpnp-pdphy.o
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/usb/phy/otg-wakelock.c	2019-01-22 16:16:27.371281434 +0100
@@ -0,0 +1,173 @@
+/*
+ * otg-wakelock.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/wakelock.h>
+#include <linux/spinlock.h>
+#include <linux/usb/otg.h>
+
+#define TEMPORARY_HOLD_TIME	2000
+
+static bool enabled = true;
+static struct usb_phy *otgwl_xceiv;
+static struct notifier_block otgwl_nb;
+
+/*
+ * otgwl_spinlock is held while the VBUS lock is grabbed or dropped and the
+ * held field is updated to match.
+ */
+
+static DEFINE_SPINLOCK(otgwl_spinlock);
+
+/*
+ * Only one lock, but since these 3 fields are associated with each other...
+ */
+
+struct otgwl_lock {
+	char name[40];
+	struct wake_lock wakelock;
+	bool held;
+};
+
+/*
+ * VBUS present lock.  Also used as a timed lock on charger
+ * connect/disconnect and USB host disconnect, to allow the system
+ * to react to the change in power.
+ */
+
+static struct otgwl_lock vbus_lock;
+
+static void otgwl_hold(struct otgwl_lock *lock)
+{
+	if (!lock->held) {
+		wake_lock(&lock->wakelock);
+		lock->held = true;
+	}
+}
+
+static void otgwl_temporary_hold(struct otgwl_lock *lock)
+{
+	wake_lock_timeout(&lock->wakelock,
+			  msecs_to_jiffies(TEMPORARY_HOLD_TIME));
+	lock->held = false;
+}
+
+static void otgwl_drop(struct otgwl_lock *lock)
+{
+	if (lock->held) {
+		wake_unlock(&lock->wakelock);
+		lock->held = false;
+	}
+}
+
+static void otgwl_handle_event(unsigned long event)
+{
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&otgwl_spinlock, irqflags);
+
+	if (!enabled) {
+		otgwl_drop(&vbus_lock);
+		spin_unlock_irqrestore(&otgwl_spinlock, irqflags);
+		return;
+	}
+
+	switch (event) {
+	case USB_EVENT_VBUS:
+	case USB_EVENT_ENUMERATED:
+		otgwl_hold(&vbus_lock);
+		break;
+
+	case USB_EVENT_NONE:
+	case USB_EVENT_ID:
+	case USB_EVENT_CHARGER:
+		otgwl_temporary_hold(&vbus_lock);
+		break;
+
+	default:
+		break;
+	}
+
+	spin_unlock_irqrestore(&otgwl_spinlock, irqflags);
+}
+
+static int otgwl_otg_notifications(struct notifier_block *nb,
+				   unsigned long event, void *unused)
+{
+	otgwl_handle_event(event);
+	return NOTIFY_OK;
+}
+
+static int set_enabled(const char *val, const struct kernel_param *kp)
+{
+	int rv = param_set_bool(val, kp);
+
+	if (rv)
+		return rv;
+
+	if (otgwl_xceiv)
+		otgwl_handle_event(otgwl_xceiv->last_event);
+
+	return 0;
+}
+
+static struct kernel_param_ops enabled_param_ops = {
+	.set = set_enabled,
+	.get = param_get_bool,
+};
+
+module_param_cb(enabled, &enabled_param_ops, &enabled, 0644);
+MODULE_PARM_DESC(enabled, "enable wakelock when VBUS present");
+
+static int __init otg_wakelock_init(void)
+{
+	int ret;
+	struct usb_phy *phy;
+
+	phy = usb_get_phy(USB_PHY_TYPE_USB2);
+
+	if (IS_ERR(phy)) {
+		pr_err("%s: No USB transceiver found\n", __func__);
+		return PTR_ERR(phy);
+	}
+	otgwl_xceiv = phy;
+
+	snprintf(vbus_lock.name, sizeof(vbus_lock.name), "vbus-%s",
+		 dev_name(otgwl_xceiv->dev));
+	wake_lock_init(&vbus_lock.wakelock, WAKE_LOCK_SUSPEND,
+		       vbus_lock.name);
+
+	otgwl_nb.notifier_call = otgwl_otg_notifications;
+	ret = usb_register_notifier(otgwl_xceiv, &otgwl_nb);
+
+	if (ret) {
+		pr_err("%s: usb_register_notifier on transceiver %s"
+		       " failed\n", __func__,
+		       dev_name(otgwl_xceiv->dev));
+		otgwl_xceiv = NULL;
+		wake_lock_destroy(&vbus_lock.wakelock);
+		return ret;
+	}
+
+	otgwl_handle_event(otgwl_xceiv->last_event);
+	return ret;
+}
+
+late_initcall(otg_wakelock_init);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/usb/phy/phy-msm-qusb.c	2019-10-29 09:26:25.045216898 +0100
@@ -0,0 +1,1216 @@
+/*
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/usb/phy.h>
+#include <linux/usb/msm_hsusb.h>
+#include <linux/reset.h>
+
+#define QUSB2PHY_PLL_STATUS	0x38
+#define QUSB2PHY_PLL_LOCK	BIT(5)
+
+#define QUSB2PHY_PORT_QC1	0x70
+#define VDM_SRC_EN		BIT(4)
+#define VDP_SRC_EN		BIT(2)
+
+#define QUSB2PHY_PORT_QC2	0x74
+#define RDM_UP_EN		BIT(1)
+#define RDP_UP_EN		BIT(3)
+#define RPUM_LOW_EN		BIT(4)
+#define RPUP_LOW_EN		BIT(5)
+
+#define QUSB2PHY_PORT_POWERDOWN		0xB4
+#define CLAMP_N_EN			BIT(5)
+#define FREEZIO_N			BIT(1)
+#define POWER_DOWN			BIT(0)
+
+#define QUSB2PHY_PORT_TEST_CTRL		0xB8
+
+#define QUSB2PHY_PWR_CTRL1		0x210
+#define PWR_CTRL1_CLAMP_N_EN		BIT(1)
+#define PWR_CTRL1_POWR_DOWN		BIT(0)
+
+#define QUSB2PHY_PLL_COMMON_STATUS_ONE	0x1A0
+#define CORE_READY_STATUS		BIT(0)
+
+#define QUSB2PHY_PORT_UTMI_CTRL1	0xC0
+#define TERM_SELECT			BIT(4)
+#define XCVR_SELECT_FS			BIT(2)
+#define OP_MODE_NON_DRIVE		BIT(0)
+
+#define QUSB2PHY_PORT_UTMI_CTRL2	0xC4
+#define UTMI_ULPI_SEL			BIT(7)
+#define UTMI_TEST_MUX_SEL		BIT(6)
+
+#define QUSB2PHY_PLL_TEST		0x04
+#define CLK_REF_SEL			BIT(7)
+
+#define QUSB2PHY_PORT_TUNE1             0x80
+#define QUSB2PHY_PORT_TUNE2             0x84
+#define QUSB2PHY_PORT_TUNE3             0x88
+#define QUSB2PHY_PORT_TUNE4             0x8C
+#define QUSB2PHY_PORT_TUNE5             0x90
+
+/* Get TUNE2's high nibble value read from efuse */
+#define TUNE2_HIGH_NIBBLE_VAL(val, pos, mask)	((val >> pos) & mask)
+
+#define QUSB2PHY_PORT_INTR_CTRL         0xBC
+#define CHG_DET_INTR_EN                 BIT(4)
+#define DMSE_INTR_HIGH_SEL              BIT(3)
+#define DMSE_INTR_EN                    BIT(2)
+#define DPSE_INTR_HIGH_SEL              BIT(1)
+#define DPSE_INTR_EN                    BIT(0)
+
+#define QUSB2PHY_PORT_UTMI_STATUS	0xF4
+#define LINESTATE_DP			BIT(0)
+#define LINESTATE_DM			BIT(1)
+
+
+#define QUSB2PHY_1P8_VOL_MIN           1800000 /* uV */
+#define QUSB2PHY_1P8_VOL_MAX           1800000 /* uV */
+#define QUSB2PHY_1P8_HPM_LOAD          30000   /* uA */
+
+#define QUSB2PHY_3P3_VOL_MIN		3075000 /* uV */
+#define QUSB2PHY_3P3_VOL_MAX		3200000 /* uV */
+#define QUSB2PHY_3P3_HPM_LOAD		30000	/* uA */
+
+#define QUSB2PHY_REFCLK_ENABLE		BIT(0)
+
+unsigned int tune1;
+module_param(tune1, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tune1, "QUSB PHY TUNE1");
+
+unsigned int tune2;
+module_param(tune2, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tune2, "QUSB PHY TUNE2");
+
+unsigned int tune3;
+module_param(tune3, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tune3, "QUSB PHY TUNE3");
+
+unsigned int tune4;
+module_param(tune4, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tune4, "QUSB PHY TUNE4");
+
+unsigned int tune5;
+module_param(tune5, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tune5, "QUSB PHY TUNE5");
+
+
+struct qusb_phy {
+	struct usb_phy		phy;
+	void __iomem		*base;
+	void __iomem		*tune2_efuse_reg;
+	void __iomem		*ref_clk_base;
+	void __iomem		*tcsr_clamp_dig_n;
+
+	struct clk		*ref_clk_src;
+	struct clk		*ref_clk;
+	struct clk		*cfg_ahb_clk;
+	struct reset_control	*phy_reset;
+
+	struct regulator	*vdd;
+	struct regulator	*vdda33;
+	struct regulator	*vdda18;
+	int			vdd_levels[3]; /* none, low, high */
+	int			init_seq_len;
+	int			*qusb_phy_init_seq;
+	u32			major_rev;
+
+	u32			tune2_val;
+	int			tune2_efuse_bit_pos;
+	int			tune2_efuse_num_of_bits;
+	int			tune2_efuse_correction;
+
+	bool			power_enabled;
+	bool			clocks_enabled;
+	bool			cable_connected;
+	bool			suspended;
+	bool			ulpi_mode;
+	bool			rm_pulldown;
+	bool			is_se_clk;
+
+	struct regulator_desc	dpdm_rdesc;
+	struct regulator_dev	*dpdm_rdev;
+
+	/* emulation targets specific */
+	void __iomem		*emu_phy_base;
+	bool			emulation;
+	int			*emu_init_seq;
+	int			emu_init_seq_len;
+	int			*phy_pll_reset_seq;
+	int			phy_pll_reset_seq_len;
+	int			*emu_dcm_reset_seq;
+	int			emu_dcm_reset_seq_len;
+	bool			put_into_high_z_state;
+	struct mutex		phy_lock;
+};
+
+static void qusb_phy_enable_clocks(struct qusb_phy *qphy, bool on)
+{
+	dev_dbg(qphy->phy.dev, "%s(): clocks_enabled:%d on:%d\n",
+			__func__, qphy->clocks_enabled, on);
+
+	if (!qphy->clocks_enabled && on) {
+		clk_prepare_enable(qphy->ref_clk_src);
+		clk_prepare_enable(qphy->ref_clk);
+		clk_prepare_enable(qphy->cfg_ahb_clk);
+		qphy->clocks_enabled = true;
+	}
+
+	if (qphy->clocks_enabled && !on) {
+		clk_disable_unprepare(qphy->ref_clk);
+		clk_disable_unprepare(qphy->ref_clk_src);
+		clk_disable_unprepare(qphy->cfg_ahb_clk);
+		qphy->clocks_enabled = false;
+	}
+
+	dev_dbg(qphy->phy.dev, "%s(): clocks_enabled:%d\n", __func__,
+						qphy->clocks_enabled);
+}
+
+static int qusb_phy_config_vdd(struct qusb_phy *qphy, int high)
+{
+	int min, ret;
+
+	min = high ? 1 : 0; /* low or none? */
+	ret = regulator_set_voltage(qphy->vdd, qphy->vdd_levels[min],
+						qphy->vdd_levels[2]);
+	if (ret) {
+		dev_err(qphy->phy.dev, "unable to set voltage for qusb vdd\n");
+		return ret;
+	}
+
+	dev_dbg(qphy->phy.dev, "min_vol:%d max_vol:%d\n",
+			qphy->vdd_levels[min], qphy->vdd_levels[2]);
+	return ret;
+}
+
+static int qusb_phy_enable_power(struct qusb_phy *qphy, bool on)
+{
+	int ret = 0;
+
+	dev_dbg(qphy->phy.dev, "%s turn %s regulators. power_enabled:%d\n",
+			__func__, on ? "on" : "off", qphy->power_enabled);
+
+	if (qphy->power_enabled == on) {
+		dev_dbg(qphy->phy.dev, "PHYs' regulators are already ON.\n");
+		return 0;
+	}
+
+	if (!on)
+		goto disable_vdda33;
+
+	ret = qusb_phy_config_vdd(qphy, true);
+	if (ret) {
+		dev_err(qphy->phy.dev, "Unable to config VDD:%d\n",
+							ret);
+		goto err_vdd;
+	}
+
+	ret = regulator_enable(qphy->vdd);
+	if (ret) {
+		dev_err(qphy->phy.dev, "Unable to enable VDD\n");
+		goto unconfig_vdd;
+	}
+
+	ret = regulator_set_load(qphy->vdda18, QUSB2PHY_1P8_HPM_LOAD);
+	if (ret < 0) {
+		dev_err(qphy->phy.dev, "Unable to set HPM of vdda18:%d\n", ret);
+		goto disable_vdd;
+	}
+
+	ret = regulator_set_voltage(qphy->vdda18, QUSB2PHY_1P8_VOL_MIN,
+						QUSB2PHY_1P8_VOL_MAX);
+	if (ret) {
+		dev_err(qphy->phy.dev,
+				"Unable to set voltage for vdda18:%d\n", ret);
+		goto put_vdda18_lpm;
+	}
+
+	ret = regulator_enable(qphy->vdda18);
+	if (ret) {
+		dev_err(qphy->phy.dev, "Unable to enable vdda18:%d\n", ret);
+		goto unset_vdda18;
+	}
+
+	ret = regulator_set_load(qphy->vdda33, QUSB2PHY_3P3_HPM_LOAD);
+	if (ret < 0) {
+		dev_err(qphy->phy.dev, "Unable to set HPM of vdda33:%d\n", ret);
+		goto disable_vdda18;
+	}
+
+	ret = regulator_set_voltage(qphy->vdda33, QUSB2PHY_3P3_VOL_MIN,
+						QUSB2PHY_3P3_VOL_MAX);
+	if (ret) {
+		dev_err(qphy->phy.dev,
+				"Unable to set voltage for vdda33:%d\n", ret);
+		goto put_vdda33_lpm;
+	}
+
+	ret = regulator_enable(qphy->vdda33);
+	if (ret) {
+		dev_err(qphy->phy.dev, "Unable to enable vdda33:%d\n", ret);
+		goto unset_vdd33;
+	}
+
+	qphy->power_enabled = true;
+
+	pr_debug("%s(): QUSB PHY's regulators are turned ON.\n", __func__);
+	return ret;
+
+disable_vdda33:
+	ret = regulator_disable(qphy->vdda33);
+	if (ret)
+		dev_err(qphy->phy.dev, "Unable to disable vdda33:%d\n", ret);
+
+unset_vdd33:
+	ret = regulator_set_voltage(qphy->vdda33, 0, QUSB2PHY_3P3_VOL_MAX);
+	if (ret)
+		dev_err(qphy->phy.dev,
+			"Unable to set (0) voltage for vdda33:%d\n", ret);
+
+put_vdda33_lpm:
+	ret = regulator_set_load(qphy->vdda33, 0);
+	if (ret < 0)
+		dev_err(qphy->phy.dev, "Unable to set (0) HPM of vdda33\n");
+
+disable_vdda18:
+	ret = regulator_disable(qphy->vdda18);
+	if (ret)
+		dev_err(qphy->phy.dev, "Unable to disable vdda18:%d\n", ret);
+
+unset_vdda18:
+	ret = regulator_set_voltage(qphy->vdda18, 0, QUSB2PHY_1P8_VOL_MAX);
+	if (ret)
+		dev_err(qphy->phy.dev,
+			"Unable to set (0) voltage for vdda18:%d\n", ret);
+
+put_vdda18_lpm:
+	ret = regulator_set_load(qphy->vdda18, 0);
+	if (ret < 0)
+		dev_err(qphy->phy.dev, "Unable to set LPM of vdda18\n");
+
+disable_vdd:
+	ret = regulator_disable(qphy->vdd);
+	if (ret)
+		dev_err(qphy->phy.dev, "Unable to disable vdd:%d\n",
+								ret);
+
+unconfig_vdd:
+	ret = qusb_phy_config_vdd(qphy, false);
+	if (ret)
+		dev_err(qphy->phy.dev, "Unable unconfig VDD:%d\n",
+								ret);
+err_vdd:
+	qphy->power_enabled = false;
+	dev_dbg(qphy->phy.dev, "QUSB PHY's regulators are turned OFF.\n");
+	return ret;
+}
+
+static int qusb_phy_update_dpdm(struct usb_phy *phy, int value)
+{
+	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+	int ret = 0;
+
+	dev_dbg(phy->dev, "%s value:%d rm_pulldown:%d\n",
+				__func__, value, qphy->rm_pulldown);
+
+	switch (value) {
+	case POWER_SUPPLY_DP_DM_DPF_DMF:
+		dev_dbg(phy->dev, "POWER_SUPPLY_DP_DM_DPF_DMF\n");
+		mutex_lock(&qphy->phy_lock);
+		if (!qphy->rm_pulldown) {
+			ret = qusb_phy_enable_power(qphy, true);
+			if (ret >= 0) {
+				qphy->rm_pulldown = true;
+				dev_dbg(phy->dev, "DP_DM_F: rm_pulldown:%d\n",
+						qphy->rm_pulldown);
+			}
+
+			if (qphy->put_into_high_z_state) {
+				if (qphy->tcsr_clamp_dig_n)
+					writel_relaxed(0x1,
+					       qphy->tcsr_clamp_dig_n);
+
+				qusb_phy_enable_clocks(qphy, true);
+
+				dev_dbg(phy->dev, "RESET QUSB PHY\n");
+				ret = reset_control_assert(qphy->phy_reset);
+				if (ret)
+					dev_err(phy->dev, "phyassert failed\n");
+				usleep_range(100, 150);
+				ret = reset_control_deassert(qphy->phy_reset);
+				if (ret)
+					dev_err(phy->dev, "deassert failed\n");
+
+				/*
+				 * Phy in non-driving mode leaves Dp and Dm
+				 * lines in high-Z state. Controller power
+				 * collapse is not switching phy to non-driving
+				 * mode causing charger detection failure. Bring
+				 * phy to non-driving mode by overriding
+				 * controller output via UTMI interface.
+				 */
+				writel_relaxed(TERM_SELECT | XCVR_SELECT_FS |
+					OP_MODE_NON_DRIVE,
+					qphy->base + QUSB2PHY_PORT_UTMI_CTRL1);
+				writel_relaxed(UTMI_ULPI_SEL |
+					UTMI_TEST_MUX_SEL,
+					qphy->base + QUSB2PHY_PORT_UTMI_CTRL2);
+
+				/* Disable PHY */
+				writel_relaxed(CLAMP_N_EN | FREEZIO_N |
+					POWER_DOWN,
+					qphy->base + QUSB2PHY_PORT_POWERDOWN);
+				/* Make sure that above write is completed */
+				wmb();
+
+				if (qphy->suspended)
+					qusb_phy_enable_clocks(qphy, false);
+			}
+		}
+		mutex_unlock(&qphy->phy_lock);
+
+		break;
+
+	case POWER_SUPPLY_DP_DM_DPR_DMR:
+		dev_dbg(phy->dev, "POWER_SUPPLY_DP_DM_DPR_DMR\n");
+		mutex_lock(&qphy->phy_lock);
+		if (qphy->rm_pulldown) {
+			if (!qphy->cable_connected) {
+				if (qphy->tcsr_clamp_dig_n)
+					writel_relaxed(0x0,
+					       qphy->tcsr_clamp_dig_n);
+				dev_dbg(phy->dev, "turn off for HVDCP case\n");
+				ret = qusb_phy_enable_power(qphy, false);
+			}
+			if (ret >= 0) {
+				qphy->rm_pulldown = false;
+				dev_dbg(phy->dev, "DP_DM_R: rm_pulldown:%d\n",
+						qphy->rm_pulldown);
+			}
+		}
+		mutex_unlock(&qphy->phy_lock);
+		break;
+
+	default:
+		ret = -EINVAL;
+		dev_err(phy->dev, "Invalid power supply property(%d)\n", value);
+		break;
+	}
+
+	return ret;
+}
+
+static void qusb_phy_get_tune2_param(struct qusb_phy *qphy)
+{
+	u8 num_of_bits;
+	u32 bit_mask = 1;
+	u8 reg_val;
+
+	pr_debug("%s(): num_of_bits:%d bit_pos:%d\n", __func__,
+				qphy->tune2_efuse_num_of_bits,
+				qphy->tune2_efuse_bit_pos);
+
+	/* get bit mask based on number of bits to use with efuse reg */
+	if (qphy->tune2_efuse_num_of_bits) {
+		num_of_bits = qphy->tune2_efuse_num_of_bits;
+		bit_mask = (bit_mask << num_of_bits) - 1;
+	}
+
+	/*
+	 * Read EFUSE register having TUNE2 parameter's high nibble.
+	 * If efuse register shows value as 0x0, then use previous value
+	 * as it is. Otherwise use efuse register based value for this purpose.
+	 */
+	qphy->tune2_val = readl_relaxed(qphy->tune2_efuse_reg);
+	pr_debug("%s(): bit_mask:%d efuse based tune2 value:%d\n",
+				__func__, bit_mask, qphy->tune2_val);
+
+	qphy->tune2_val = TUNE2_HIGH_NIBBLE_VAL(qphy->tune2_val,
+				qphy->tune2_efuse_bit_pos, bit_mask);
+
+	/* Update higher nibble of TUNE2 value for better rise/fall times */
+	if (qphy->tune2_efuse_correction && qphy->tune2_val) {
+		if (qphy->tune2_efuse_correction > 5 ||
+				qphy->tune2_efuse_correction < -10)
+			pr_warn("Correction value is out of range : %d\n",
+					qphy->tune2_efuse_correction);
+		else
+			qphy->tune2_val = qphy->tune2_val +
+						qphy->tune2_efuse_correction;
+	}
+
+	reg_val = readb_relaxed(qphy->base + QUSB2PHY_PORT_TUNE2);
+	if (qphy->tune2_val) {
+		reg_val  &= 0x0f;
+		reg_val |= (qphy->tune2_val << 4);
+	}
+
+	qphy->tune2_val = reg_val;
+}
+
+static void qusb_phy_write_seq(void __iomem *base, u32 *seq, int cnt,
+		unsigned long delay)
+{
+	int i;
+
+	pr_debug("Seq count:%d\n", cnt);
+	for (i = 0; i < cnt; i = i+2) {
+		pr_debug("write 0x%02x to 0x%02x\n", seq[i], seq[i+1]);
+		writel_relaxed(seq[i], base + seq[i+1]);
+		if (delay)
+			usleep_range(delay, (delay + 2000));
+	}
+}
+
+static int qusb_phy_init(struct usb_phy *phy)
+{
+	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+	int ret, reset_val = 0;
+	u8 reg;
+	bool pll_lock_fail = false;
+
+	dev_dbg(phy->dev, "%s\n", __func__);
+
+	ret = qusb_phy_enable_power(qphy, true);
+	if (ret)
+		return ret;
+
+	qusb_phy_enable_clocks(qphy, true);
+
+	/*
+	 * ref clock is enabled by default after power on reset. Linux clock
+	 * driver will disable this clock as part of late init if peripheral
+	 * driver(s) does not explicitly votes for it. Linux clock driver also
+	 * does not disable the clock until late init even if peripheral
+	 * driver explicitly requests it and cannot defer the probe until late
+	 * init. Hence, Explicitly disable the clock using register write to
+	 * allow QUSB PHY PLL to lock properly.
+	 */
+	if (qphy->ref_clk_base) {
+		writel_relaxed((readl_relaxed(qphy->ref_clk_base) &
+					~QUSB2PHY_REFCLK_ENABLE),
+					qphy->ref_clk_base);
+		/* Make sure that above write complete to get ref clk OFF */
+		wmb();
+	}
+
+	/* Perform phy reset */
+	ret = reset_control_assert(qphy->phy_reset);
+	if (ret)
+		dev_err(phy->dev, "%s: phy_reset assert failed\n", __func__);
+	usleep_range(100, 150);
+	ret = reset_control_deassert(qphy->phy_reset);
+	if (ret)
+		dev_err(phy->dev, "%s: phy_reset deassert failed\n", __func__);
+
+	if (qphy->emulation) {
+		if (qphy->emu_init_seq)
+			qusb_phy_write_seq(qphy->emu_phy_base,
+				qphy->emu_init_seq, qphy->emu_init_seq_len, 0);
+
+		if (qphy->qusb_phy_init_seq)
+			qusb_phy_write_seq(qphy->base, qphy->qusb_phy_init_seq,
+					qphy->init_seq_len, 0);
+
+		/* Wait for 5ms as per QUSB2 RUMI sequence */
+		usleep_range(5000, 7000);
+
+		if (qphy->phy_pll_reset_seq)
+			qusb_phy_write_seq(qphy->base, qphy->phy_pll_reset_seq,
+					qphy->phy_pll_reset_seq_len, 10000);
+
+		if (qphy->emu_dcm_reset_seq)
+			qusb_phy_write_seq(qphy->emu_phy_base,
+					qphy->emu_dcm_reset_seq,
+					qphy->emu_dcm_reset_seq_len, 10000);
+
+		return 0;
+	}
+
+	/* Disable the PHY */
+	if (qphy->major_rev < 2)
+		writel_relaxed(CLAMP_N_EN | FREEZIO_N | POWER_DOWN,
+				qphy->base + QUSB2PHY_PORT_POWERDOWN);
+	else
+		writel_relaxed(readl_relaxed(qphy->base + QUSB2PHY_PWR_CTRL1) |
+				PWR_CTRL1_POWR_DOWN,
+				qphy->base + QUSB2PHY_PWR_CTRL1);
+
+	/* configure for ULPI mode if requested */
+	if (qphy->ulpi_mode)
+		writel_relaxed(0x0, qphy->base + QUSB2PHY_PORT_UTMI_CTRL2);
+
+	/* save reset value to override based on clk scheme */
+	if (qphy->ref_clk_base)
+		reset_val = readl_relaxed(qphy->base + QUSB2PHY_PLL_TEST);
+
+	if (qphy->qusb_phy_init_seq)
+		qusb_phy_write_seq(qphy->base, qphy->qusb_phy_init_seq,
+				qphy->init_seq_len, 0);
+
+	/*
+	 * Check for EFUSE value only if tune2_efuse_reg is available
+	 * and try to read EFUSE value only once i.e. not every USB
+	 * cable connect case.
+	 */
+	if (qphy->tune2_efuse_reg && !tune2) {
+		if (!qphy->tune2_val)
+			qusb_phy_get_tune2_param(qphy);
+
+		pr_debug("%s(): Programming TUNE2 parameter as:%x\n", __func__,
+				qphy->tune2_val);
+		writel_relaxed(qphy->tune2_val,
+				qphy->base + QUSB2PHY_PORT_TUNE2);
+	}
+
+	/* If tune modparam set, override tune value */
+
+	pr_debug("%s():userspecified modparams TUNEX val:0x%x %x %x %x %x\n",
+				__func__, tune1, tune2, tune3, tune4, tune5);
+	if (tune1)
+		writel_relaxed(tune1,
+				qphy->base + QUSB2PHY_PORT_TUNE1);
+
+	if (tune2)
+		writel_relaxed(tune2,
+				qphy->base + QUSB2PHY_PORT_TUNE2);
+
+	if (tune3)
+		writel_relaxed(tune3,
+				qphy->base + QUSB2PHY_PORT_TUNE3);
+
+	if (tune4)
+		writel_relaxed(tune4,
+				qphy->base + QUSB2PHY_PORT_TUNE4);
+
+	if (tune5)
+		writel_relaxed(tune5,
+				qphy->base + QUSB2PHY_PORT_TUNE5);
+
+	/* ensure above writes are completed before re-enabling PHY */
+	wmb();
+
+	/* Enable the PHY */
+	if (qphy->major_rev < 2)
+		writel_relaxed(CLAMP_N_EN | FREEZIO_N,
+				qphy->base + QUSB2PHY_PORT_POWERDOWN);
+	else
+		writel_relaxed(readl_relaxed(qphy->base + QUSB2PHY_PWR_CTRL1) &
+				~PWR_CTRL1_POWR_DOWN,
+				qphy->base + QUSB2PHY_PWR_CTRL1);
+
+	/* Ensure above write is completed before turning ON ref clk */
+	wmb();
+
+	/* Require to get phy pll lock successfully */
+	usleep_range(150, 160);
+
+	/* Turn on phy ref_clk if DIFF_CLK else select SE_CLK */
+	if (qphy->ref_clk_base) {
+		if (!qphy->is_se_clk) {
+			reset_val &= ~CLK_REF_SEL;
+			writel_relaxed((readl_relaxed(qphy->ref_clk_base) |
+					QUSB2PHY_REFCLK_ENABLE),
+					qphy->ref_clk_base);
+		} else {
+			reset_val |= CLK_REF_SEL;
+			writel_relaxed(reset_val,
+					qphy->base + QUSB2PHY_PLL_TEST);
+		}
+
+		/* Make sure above write is completed to get PLL source clock */
+		wmb();
+
+		/* Required to get PHY PLL lock successfully */
+		usleep_range(100, 110);
+	}
+
+	if (qphy->major_rev < 2) {
+		reg = readb_relaxed(qphy->base + QUSB2PHY_PLL_STATUS);
+		dev_dbg(phy->dev, "QUSB2PHY_PLL_STATUS:%x\n", reg);
+		if (!(reg & QUSB2PHY_PLL_LOCK))
+			pll_lock_fail = true;
+	} else {
+		reg = readb_relaxed(qphy->base +
+				QUSB2PHY_PLL_COMMON_STATUS_ONE);
+		dev_dbg(phy->dev, "QUSB2PHY_PLL_COMMON_STATUS_ONE:%x\n", reg);
+		if (!(reg & CORE_READY_STATUS))
+			pll_lock_fail = true;
+	}
+
+	if (pll_lock_fail) {
+		dev_err(phy->dev, "QUSB PHY PLL LOCK fails:%x\n", reg);
+		WARN_ON(1);
+	}
+
+	return 0;
+}
+
+static void qusb_phy_shutdown(struct usb_phy *phy)
+{
+	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+
+	dev_dbg(phy->dev, "%s\n", __func__);
+
+	qusb_phy_enable_clocks(qphy, true);
+
+	/* Disable the PHY */
+	if (qphy->major_rev < 2)
+		writel_relaxed(CLAMP_N_EN | FREEZIO_N | POWER_DOWN,
+				qphy->base + QUSB2PHY_PORT_POWERDOWN);
+	else
+		writel_relaxed(readl_relaxed(qphy->base + QUSB2PHY_PWR_CTRL1) |
+				PWR_CTRL1_POWR_DOWN,
+				qphy->base + QUSB2PHY_PWR_CTRL1);
+	wmb();
+
+	qusb_phy_enable_clocks(qphy, false);
+}
+/**
+ * Performs QUSB2 PHY suspend/resume functionality.
+ *
+ * @uphy - usb phy pointer.
+ * @suspend - to enable suspend or not. 1 - suspend, 0 - resume
+ *
+ */
+static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend)
+{
+	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+	u32 linestate = 0, intr_mask = 0;
+
+	if (qphy->suspended && suspend) {
+		dev_dbg(phy->dev, "%s: USB PHY is already suspended\n",
+			__func__);
+		return 0;
+	}
+
+	if (suspend) {
+		/* Bus suspend case */
+		if (qphy->cable_connected ||
+			(qphy->phy.flags & PHY_HOST_MODE)) {
+			/* Clear all interrupts */
+			writel_relaxed(0x00,
+				qphy->base + QUSB2PHY_PORT_INTR_CTRL);
+
+			linestate = readl_relaxed(qphy->base +
+					QUSB2PHY_PORT_UTMI_STATUS);
+
+			/*
+			 * D+/D- interrupts are level-triggered, but we are
+			 * only interested if the line state changes, so enable
+			 * the high/low trigger based on current state. In
+			 * other words, enable the triggers _opposite_ of what
+			 * the current D+/D- levels are.
+			 * e.g. if currently D+ high, D- low (HS 'J'/Suspend),
+			 * configure the mask to trigger on D+ low OR D- high
+			 */
+			intr_mask = DPSE_INTR_EN | DMSE_INTR_EN;
+			if (!(linestate & LINESTATE_DP)) /* D+ low */
+				intr_mask |= DPSE_INTR_HIGH_SEL;
+			if (!(linestate & LINESTATE_DM)) /* D- low */
+				intr_mask |= DMSE_INTR_HIGH_SEL;
+
+			writel_relaxed(intr_mask,
+				qphy->base + QUSB2PHY_PORT_INTR_CTRL);
+
+			if (linestate & (LINESTATE_DP | LINESTATE_DM)) {
+				/* enable phy auto-resume */
+				writel_relaxed(0x0C,
+					qphy->base + QUSB2PHY_PORT_TEST_CTRL);
+				/* flush the previous write before next write */
+				wmb();
+				writel_relaxed(0x04,
+					qphy->base + QUSB2PHY_PORT_TEST_CTRL);
+			}
+
+
+			dev_dbg(phy->dev, "%s: intr_mask = %x\n",
+			__func__, intr_mask);
+
+			/* Makes sure that above write goes through */
+			wmb();
+
+			qusb_phy_enable_clocks(qphy, false);
+		} else { /* Disconnect case */
+			mutex_lock(&qphy->phy_lock);
+			/* Disable all interrupts */
+			writel_relaxed(0x00,
+				qphy->base + QUSB2PHY_PORT_INTR_CTRL);
+
+			/* Disable PHY */
+			writel_relaxed(POWER_DOWN,
+				qphy->base + QUSB2PHY_PORT_POWERDOWN);
+			/* Make sure that above write is completed */
+			wmb();
+
+			qusb_phy_enable_clocks(qphy, false);
+			if (qphy->tcsr_clamp_dig_n)
+				writel_relaxed(0x0,
+					qphy->tcsr_clamp_dig_n);
+			/* Do not disable power rails if there is vote for it */
+			if (!qphy->rm_pulldown)
+				qusb_phy_enable_power(qphy, false);
+			else
+				dev_dbg(phy->dev, "race with rm_pulldown. Keep ldo ON\n");
+			mutex_unlock(&qphy->phy_lock);
+
+			/*
+			 * Set put_into_high_z_state to true so next USB
+			 * cable connect, DPF_DMF request performs PHY
+			 * reset and put it into high-z state. For bootup
+			 * with or without USB cable, it doesn't require
+			 * to put QUSB PHY into high-z state.
+			 */
+			qphy->put_into_high_z_state = true;
+		}
+		qphy->suspended = true;
+	} else {
+		/* Bus suspend case */
+		if (qphy->cable_connected ||
+			(qphy->phy.flags & PHY_HOST_MODE)) {
+			qusb_phy_enable_clocks(qphy, true);
+			/* Clear all interrupts on resume */
+			writel_relaxed(0x00,
+				qphy->base + QUSB2PHY_PORT_INTR_CTRL);
+		} else {
+			qusb_phy_enable_power(qphy, true);
+			if (qphy->tcsr_clamp_dig_n)
+				writel_relaxed(0x1,
+					qphy->tcsr_clamp_dig_n);
+			qusb_phy_enable_clocks(qphy, true);
+		}
+		qphy->suspended = false;
+	}
+
+	return 0;
+}
+
+static int qusb_phy_notify_connect(struct usb_phy *phy,
+					enum usb_device_speed speed)
+{
+	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+
+	qphy->cable_connected = true;
+
+	dev_dbg(phy->dev, "QUSB PHY: connect notification cable_connected=%d\n",
+							qphy->cable_connected);
+	return 0;
+}
+
+static int qusb_phy_notify_disconnect(struct usb_phy *phy,
+					enum usb_device_speed speed)
+{
+	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+
+	qphy->cable_connected = false;
+
+	dev_dbg(phy->dev, "QUSB PHY: connect notification cable_connected=%d\n",
+							qphy->cable_connected);
+	return 0;
+}
+
+static int qusb_phy_dpdm_regulator_enable(struct regulator_dev *rdev)
+{
+	struct qusb_phy *qphy = rdev_get_drvdata(rdev);
+
+	dev_dbg(qphy->phy.dev, "%s\n", __func__);
+	return qusb_phy_update_dpdm(&qphy->phy, POWER_SUPPLY_DP_DM_DPF_DMF);
+}
+
+static int qusb_phy_dpdm_regulator_disable(struct regulator_dev *rdev)
+{
+	struct qusb_phy *qphy = rdev_get_drvdata(rdev);
+
+	dev_dbg(qphy->phy.dev, "%s\n", __func__);
+	return qusb_phy_update_dpdm(&qphy->phy, POWER_SUPPLY_DP_DM_DPR_DMR);
+}
+
+static int qusb_phy_dpdm_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct qusb_phy *qphy = rdev_get_drvdata(rdev);
+
+	dev_dbg(qphy->phy.dev, "%s qphy->rm_pulldown = %d\n", __func__,
+					qphy->rm_pulldown);
+	return qphy->rm_pulldown;
+}
+
+static struct regulator_ops qusb_phy_dpdm_regulator_ops = {
+	.enable		= qusb_phy_dpdm_regulator_enable,
+	.disable	= qusb_phy_dpdm_regulator_disable,
+	.is_enabled	= qusb_phy_dpdm_regulator_is_enabled,
+};
+
+static int qusb_phy_regulator_init(struct qusb_phy *qphy)
+{
+	struct device *dev = qphy->phy.dev;
+	struct regulator_config cfg = {};
+	struct regulator_init_data *init_data;
+
+	init_data = devm_kzalloc(dev, sizeof(*init_data), GFP_KERNEL);
+	if (!init_data)
+		return -ENOMEM;
+
+	init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS;
+	qphy->dpdm_rdesc.owner = THIS_MODULE;
+	qphy->dpdm_rdesc.type = REGULATOR_VOLTAGE;
+	qphy->dpdm_rdesc.ops = &qusb_phy_dpdm_regulator_ops;
+	qphy->dpdm_rdesc.name = kbasename(dev->of_node->full_name);
+
+	cfg.dev = dev;
+	cfg.init_data = init_data;
+	cfg.driver_data = qphy;
+	cfg.of_node = dev->of_node;
+
+	qphy->dpdm_rdev = devm_regulator_register(dev, &qphy->dpdm_rdesc, &cfg);
+	if (IS_ERR(qphy->dpdm_rdev))
+		return PTR_ERR(qphy->dpdm_rdev);
+
+	return 0;
+}
+
+static int qusb_phy_probe(struct platform_device *pdev)
+{
+	struct qusb_phy *qphy;
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	int ret = 0, size = 0;
+	const char *phy_type;
+	bool hold_phy_reset;
+
+	qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
+	if (!qphy)
+		return -ENOMEM;
+
+	qphy->phy.dev = dev;
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"qusb_phy_base");
+	qphy->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(qphy->base))
+		return PTR_ERR(qphy->base);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"emu_phy_base");
+	if (res) {
+		qphy->emu_phy_base = devm_ioremap_resource(dev, res);
+		if (IS_ERR(qphy->emu_phy_base)) {
+			dev_dbg(dev, "couldn't ioremap emu_phy_base\n");
+			qphy->emu_phy_base = NULL;
+		}
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"tune2_efuse_addr");
+	if (res) {
+		qphy->tune2_efuse_reg = devm_ioremap_nocache(dev, res->start,
+							resource_size(res));
+		if (!IS_ERR_OR_NULL(qphy->tune2_efuse_reg)) {
+			ret = of_property_read_u32(dev->of_node,
+					"qcom,tune2-efuse-bit-pos",
+					&qphy->tune2_efuse_bit_pos);
+			if (!ret) {
+				ret = of_property_read_u32(dev->of_node,
+						"qcom,tune2-efuse-num-bits",
+						&qphy->tune2_efuse_num_of_bits);
+			}
+			of_property_read_u32(dev->of_node,
+						"qcom,tune2-efuse-correction",
+						&qphy->tune2_efuse_correction);
+
+			if (ret) {
+				dev_err(dev, "DT Value for tune2 efuse is invalid.\n");
+				return -EINVAL;
+			}
+		}
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"ref_clk_addr");
+	if (res) {
+		qphy->ref_clk_base = devm_ioremap_nocache(dev,
+				res->start, resource_size(res));
+		if (IS_ERR(qphy->ref_clk_base)) {
+			dev_dbg(dev, "ref_clk_address is not available.\n");
+			return PTR_ERR(qphy->ref_clk_base);
+		}
+
+		ret = of_property_read_string(dev->of_node,
+				"qcom,phy-clk-scheme", &phy_type);
+		if (ret) {
+			dev_err(dev, "error need qsub_phy_clk_scheme.\n");
+			return ret;
+		}
+
+		if (!strcasecmp(phy_type, "cml")) {
+			qphy->is_se_clk = false;
+		} else if (!strcasecmp(phy_type, "cmos")) {
+			qphy->is_se_clk = true;
+		} else {
+			dev_err(dev, "erro invalid qusb_phy_clk_scheme\n");
+			return -EINVAL;
+		}
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"tcsr_clamp_dig_n_1p8");
+	if (res) {
+		qphy->tcsr_clamp_dig_n = devm_ioremap_nocache(dev,
+				res->start, resource_size(res));
+		if (IS_ERR(qphy->tcsr_clamp_dig_n)) {
+			dev_err(dev, "err reading tcsr_clamp_dig_n\n");
+			qphy->tcsr_clamp_dig_n = NULL;
+		}
+	}
+
+
+	qphy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
+	if (IS_ERR(qphy->ref_clk_src))
+		dev_dbg(dev, "clk get failed for ref_clk_src\n");
+
+	qphy->ref_clk = devm_clk_get(dev, "ref_clk");
+	if (IS_ERR(qphy->ref_clk))
+		dev_dbg(dev, "clk get failed for ref_clk\n");
+	else
+		clk_set_rate(qphy->ref_clk, 19200000);
+
+	qphy->cfg_ahb_clk = devm_clk_get(dev, "cfg_ahb_clk");
+	if (IS_ERR(qphy->cfg_ahb_clk))
+		return PTR_ERR(qphy->cfg_ahb_clk);
+
+	qphy->phy_reset = devm_reset_control_get(dev, "phy_reset");
+	if (IS_ERR(qphy->phy_reset))
+		return PTR_ERR(qphy->phy_reset);
+
+	qphy->emulation = of_property_read_bool(dev->of_node,
+					"qcom,emulation");
+
+	of_get_property(dev->of_node, "qcom,emu-init-seq", &size);
+	if (size) {
+		qphy->emu_init_seq = devm_kzalloc(dev,
+						size, GFP_KERNEL);
+		if (qphy->emu_init_seq) {
+			qphy->emu_init_seq_len =
+				(size / sizeof(*qphy->emu_init_seq));
+			if (qphy->emu_init_seq_len % 2) {
+				dev_err(dev, "invalid emu_init_seq_len\n");
+				return -EINVAL;
+			}
+
+			of_property_read_u32_array(dev->of_node,
+				"qcom,emu-init-seq",
+				qphy->emu_init_seq,
+				qphy->emu_init_seq_len);
+		} else {
+			dev_dbg(dev, "error allocating memory for emu_init_seq\n");
+		}
+	}
+
+	size = 0;
+	of_get_property(dev->of_node, "qcom,phy-pll-reset-seq", &size);
+	if (size) {
+		qphy->phy_pll_reset_seq = devm_kzalloc(dev,
+						size, GFP_KERNEL);
+		if (qphy->phy_pll_reset_seq) {
+			qphy->phy_pll_reset_seq_len =
+				(size / sizeof(*qphy->phy_pll_reset_seq));
+			if (qphy->phy_pll_reset_seq_len % 2) {
+				dev_err(dev, "invalid phy_pll_reset_seq_len\n");
+				return -EINVAL;
+			}
+
+			of_property_read_u32_array(dev->of_node,
+				"qcom,phy-pll-reset-seq",
+				qphy->phy_pll_reset_seq,
+				qphy->phy_pll_reset_seq_len);
+		} else {
+			dev_dbg(dev, "error allocating memory for phy_pll_reset_seq\n");
+		}
+	}
+
+	size = 0;
+	of_get_property(dev->of_node, "qcom,emu-dcm-reset-seq", &size);
+	if (size) {
+		qphy->emu_dcm_reset_seq = devm_kzalloc(dev,
+						size, GFP_KERNEL);
+		if (qphy->emu_dcm_reset_seq) {
+			qphy->emu_dcm_reset_seq_len =
+				(size / sizeof(*qphy->emu_dcm_reset_seq));
+			if (qphy->emu_dcm_reset_seq_len % 2) {
+				dev_err(dev, "invalid emu_dcm_reset_seq_len\n");
+				return -EINVAL;
+			}
+
+			of_property_read_u32_array(dev->of_node,
+				"qcom,emu-dcm-reset-seq",
+				qphy->emu_dcm_reset_seq,
+				qphy->emu_dcm_reset_seq_len);
+		} else {
+			dev_dbg(dev, "error allocating memory for emu_dcm_reset_seq\n");
+		}
+	}
+
+	size = 0;
+	of_get_property(dev->of_node, "qcom,qusb-phy-init-seq", &size);
+	if (size) {
+		qphy->qusb_phy_init_seq = devm_kzalloc(dev,
+						size, GFP_KERNEL);
+		if (qphy->qusb_phy_init_seq) {
+			qphy->init_seq_len =
+				(size / sizeof(*qphy->qusb_phy_init_seq));
+			if (qphy->init_seq_len % 2) {
+				dev_err(dev, "invalid init_seq_len\n");
+				return -EINVAL;
+			}
+
+			of_property_read_u32_array(dev->of_node,
+				"qcom,qusb-phy-init-seq",
+				qphy->qusb_phy_init_seq,
+				qphy->init_seq_len);
+		} else {
+			dev_err(dev, "error allocating memory for phy_init_seq\n");
+		}
+	}
+
+	qphy->ulpi_mode = false;
+	ret = of_property_read_string(dev->of_node, "phy_type", &phy_type);
+
+	if (!ret) {
+		if (!strcasecmp(phy_type, "ulpi"))
+			qphy->ulpi_mode = true;
+	} else {
+		dev_err(dev, "error reading phy_type property\n");
+		return ret;
+	}
+
+	hold_phy_reset = of_property_read_bool(dev->of_node, "qcom,hold-reset");
+
+	/* use default major revision as 2 */
+	qphy->major_rev = 2;
+	ret = of_property_read_u32(dev->of_node, "qcom,major-rev",
+						&qphy->major_rev);
+
+	ret = of_property_read_u32_array(dev->of_node, "qcom,vdd-voltage-level",
+					 (u32 *) qphy->vdd_levels,
+					 ARRAY_SIZE(qphy->vdd_levels));
+	if (ret) {
+		dev_err(dev, "error reading qcom,vdd-voltage-level property\n");
+		return ret;
+	}
+
+	qphy->vdd = devm_regulator_get(dev, "vdd");
+	if (IS_ERR(qphy->vdd)) {
+		dev_err(dev, "unable to get vdd supply\n");
+		return PTR_ERR(qphy->vdd);
+	}
+
+	qphy->vdda33 = devm_regulator_get(dev, "vdda33");
+	if (IS_ERR(qphy->vdda33)) {
+		dev_err(dev, "unable to get vdda33 supply\n");
+		return PTR_ERR(qphy->vdda33);
+	}
+
+	qphy->vdda18 = devm_regulator_get(dev, "vdda18");
+	if (IS_ERR(qphy->vdda18)) {
+		dev_err(dev, "unable to get vdda18 supply\n");
+		return PTR_ERR(qphy->vdda18);
+	}
+
+	mutex_init(&qphy->phy_lock);
+	platform_set_drvdata(pdev, qphy);
+
+	qphy->phy.label			= "msm-qusb-phy";
+	qphy->phy.init			= qusb_phy_init;
+	qphy->phy.set_suspend           = qusb_phy_set_suspend;
+	qphy->phy.shutdown		= qusb_phy_shutdown;
+	qphy->phy.type			= USB_PHY_TYPE_USB2;
+	qphy->phy.notify_connect        = qusb_phy_notify_connect;
+	qphy->phy.notify_disconnect     = qusb_phy_notify_disconnect;
+
+	/*
+	 * On some platforms multiple QUSB PHYs are available. If QUSB PHY is
+	 * not used, there is leakage current seen with QUSB PHY related voltage
+	 * rail. Hence keep QUSB PHY into reset state explicitly here.
+	 */
+	if (hold_phy_reset) {
+		ret = reset_control_assert(qphy->phy_reset);
+		if (ret)
+			dev_err(dev, "%s:phy_reset assert failed\n", __func__);
+	}
+
+	ret = usb_add_phy_dev(&qphy->phy);
+	if (ret)
+		return ret;
+
+	ret = qusb_phy_regulator_init(qphy);
+	if (ret)
+		usb_remove_phy(&qphy->phy);
+
+	/* de-assert clamp dig n to reduce leakage on 1p8 upon boot up */
+	if (qphy->tcsr_clamp_dig_n)
+		writel_relaxed(0x0, qphy->tcsr_clamp_dig_n);
+
+	return ret;
+}
+
+static int qusb_phy_remove(struct platform_device *pdev)
+{
+	struct qusb_phy *qphy = platform_get_drvdata(pdev);
+
+	usb_remove_phy(&qphy->phy);
+
+	if (qphy->clocks_enabled) {
+		clk_disable_unprepare(qphy->cfg_ahb_clk);
+		clk_disable_unprepare(qphy->ref_clk);
+		clk_disable_unprepare(qphy->ref_clk_src);
+		qphy->clocks_enabled = false;
+	}
+
+	qusb_phy_enable_power(qphy, false);
+
+	return 0;
+}
+
+static const struct of_device_id qusb_phy_id_table[] = {
+	{ .compatible = "qcom,qusb2phy", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, qusb_phy_id_table);
+
+static struct platform_driver qusb_phy_driver = {
+	.probe		= qusb_phy_probe,
+	.remove		= qusb_phy_remove,
+	.driver = {
+		.name	= "msm-qusb-phy",
+		.of_match_table = of_match_ptr(qusb_phy_id_table),
+	},
+};
+
+module_platform_driver(qusb_phy_driver);
+
+MODULE_DESCRIPTION("MSM QUSB2 PHY driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/usb/phy/phy-msm-qusb-v2.c	2019-01-22 16:16:27.375281470 +0100
@@ -0,0 +1,1141 @@
+/*
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/usb/phy.h>
+#include <linux/usb/msm_hsusb.h>
+#include <linux/reset.h>
+
+#define QUSB2PHY_PWR_CTRL1		0x210
+#define PWR_CTRL1_POWR_DOWN		BIT(0)
+
+#define QUSB2PHY_PLL_COMMON_STATUS_ONE	0x1A0
+#define CORE_READY_STATUS		BIT(0)
+
+/* Get TUNE value from efuse bit-mask */
+#define TUNE_VAL_MASK(val, pos, mask)	((val >> pos) & mask)
+
+#define QUSB2PHY_INTR_CTRL		0x22C
+#define DMSE_INTR_HIGH_SEL              BIT(4)
+#define DPSE_INTR_HIGH_SEL              BIT(3)
+#define CHG_DET_INTR_EN                 BIT(2)
+#define DMSE_INTR_EN                    BIT(1)
+#define DPSE_INTR_EN                    BIT(0)
+
+#define QUSB2PHY_INTR_STAT		0x230
+#define DMSE_INTERRUPT			BIT(1)
+#define DPSE_INTERRUPT			BIT(0)
+
+#define QUSB2PHY_PORT_TUNE1		0x23c
+#define QUSB2PHY_TEST1			0x24C
+
+#define QUSB2PHY_1P2_VOL_MIN           1200000 /* uV */
+#define QUSB2PHY_1P2_VOL_MAX           1200000 /* uV */
+#define QUSB2PHY_1P2_HPM_LOAD          23000
+
+#define QUSB2PHY_1P8_VOL_MIN           1800000 /* uV */
+#define QUSB2PHY_1P8_VOL_MAX           1800000 /* uV */
+#define QUSB2PHY_1P8_HPM_LOAD          30000   /* uA */
+
+#define QUSB2PHY_3P3_VOL_MIN		3075000 /* uV */
+#define QUSB2PHY_3P3_VOL_MAX		3200000 /* uV */
+#define QUSB2PHY_3P3_HPM_LOAD		30000	/* uA */
+
+#define LINESTATE_DP			BIT(0)
+#define LINESTATE_DM			BIT(1)
+
+#define QUSB2PHY_PLL_ANALOG_CONTROLS_ONE	0x0
+#define QUSB2PHY_PLL_ANALOG_CONTROLS_TWO	0x4
+
+unsigned int phy_tune1;
+module_param(phy_tune1, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(phy_tune1, "QUSB PHY v2 TUNE1");
+
+struct qusb_phy {
+	struct usb_phy		phy;
+	struct mutex		lock;
+	void __iomem		*base;
+	void __iomem		*efuse_reg;
+	void __iomem		*tcsr_clamp_dig_n;
+
+	struct clk		*ref_clk_src;
+	struct clk		*ref_clk;
+	struct clk		*cfg_ahb_clk;
+	struct reset_control	*phy_reset;
+
+	struct regulator	*vdd;
+	struct regulator	*vdda33;
+	struct regulator	*vdda18;
+	struct regulator	*vdda12;
+	int			vdd_levels[3]; /* none, low, high */
+	int			vdda33_levels[3];
+	int			init_seq_len;
+	int			*qusb_phy_init_seq;
+	int			host_init_seq_len;
+	int			*qusb_phy_host_init_seq;
+
+	u32			tune_val;
+	int			efuse_bit_pos;
+	int			efuse_num_of_bits;
+
+	int			power_enabled_ref;
+	bool			clocks_enabled;
+	bool			cable_connected;
+	bool			suspended;
+	bool			rm_pulldown;
+
+	struct regulator_desc	dpdm_rdesc;
+	struct regulator_dev	*dpdm_rdev;
+
+	/* emulation targets specific */
+	void __iomem		*emu_phy_base;
+	bool			emulation;
+	int			*emu_init_seq;
+	int			emu_init_seq_len;
+	int			*phy_pll_reset_seq;
+	int			phy_pll_reset_seq_len;
+	int			*emu_dcm_reset_seq;
+	int			emu_dcm_reset_seq_len;
+};
+
+static void qusb_phy_enable_clocks(struct qusb_phy *qphy, bool on)
+{
+	dev_dbg(qphy->phy.dev, "%s(): clocks_enabled:%d on:%d\n",
+			__func__, qphy->clocks_enabled, on);
+
+	if (!qphy->clocks_enabled && on) {
+		clk_prepare_enable(qphy->ref_clk_src);
+		clk_prepare_enable(qphy->ref_clk);
+		clk_prepare_enable(qphy->cfg_ahb_clk);
+		qphy->clocks_enabled = true;
+	}
+
+	if (qphy->clocks_enabled && !on) {
+		clk_disable_unprepare(qphy->ref_clk);
+		clk_disable_unprepare(qphy->ref_clk_src);
+		clk_disable_unprepare(qphy->cfg_ahb_clk);
+		qphy->clocks_enabled = false;
+	}
+
+	dev_dbg(qphy->phy.dev, "%s(): clocks_enabled:%d\n", __func__,
+						qphy->clocks_enabled);
+}
+
+static int qusb_phy_config_vdd(struct qusb_phy *qphy, int high)
+{
+	int min, ret;
+
+	min = high ? 1 : 0; /* low or none? */
+	ret = regulator_set_voltage(qphy->vdd, qphy->vdd_levels[min],
+						qphy->vdd_levels[2]);
+	if (ret) {
+		dev_err(qphy->phy.dev, "unable to set voltage for qusb vdd\n");
+		return ret;
+	}
+
+	dev_dbg(qphy->phy.dev, "min_vol:%d max_vol:%d\n",
+			qphy->vdd_levels[min], qphy->vdd_levels[2]);
+	return ret;
+}
+
+static int qusb_phy_enable_power(struct qusb_phy *qphy, bool on)
+{
+	int ret = 0;
+
+	mutex_lock(&qphy->lock);
+
+	dev_dbg(qphy->phy.dev,
+		"%s:req to turn %s regulators. power_enabled_ref:%d\n",
+			__func__, on ? "on" : "off", qphy->power_enabled_ref);
+
+	if (on && ++qphy->power_enabled_ref > 1) {
+		dev_dbg(qphy->phy.dev, "PHYs' regulators are already on\n");
+		goto done;
+	}
+
+	if (!on) {
+		if (on == qphy->power_enabled_ref) {
+			dev_dbg(qphy->phy.dev,
+				"PHYs' regulators are already off\n");
+			goto done;
+		}
+
+		qphy->power_enabled_ref--;
+		if (!qphy->power_enabled_ref)
+			goto disable_vdda33;
+
+		dev_dbg(qphy->phy.dev, "Skip turning off PHYs' regulators\n");
+		goto done;
+	}
+
+	ret = qusb_phy_config_vdd(qphy, true);
+	if (ret) {
+		dev_err(qphy->phy.dev, "Unable to config VDD:%d\n",
+							ret);
+		goto err_vdd;
+	}
+
+	ret = regulator_enable(qphy->vdd);
+	if (ret) {
+		dev_err(qphy->phy.dev, "Unable to enable VDD\n");
+		goto unconfig_vdd;
+	}
+
+	ret = regulator_set_load(qphy->vdda12, QUSB2PHY_1P2_HPM_LOAD);
+	if (ret < 0) {
+		dev_err(qphy->phy.dev, "Unable to set HPM of vdda12:%d\n", ret);
+		goto disable_vdd;
+	}
+
+	ret = regulator_set_voltage(qphy->vdda12, QUSB2PHY_1P2_VOL_MIN,
+						QUSB2PHY_1P2_VOL_MAX);
+	if (ret) {
+		dev_err(qphy->phy.dev,
+				"Unable to set voltage for vdda12:%d\n", ret);
+		goto put_vdda12_lpm;
+	}
+
+	ret = regulator_enable(qphy->vdda12);
+	if (ret) {
+		dev_err(qphy->phy.dev, "Unable to enable vdda12:%d\n", ret);
+		goto unset_vdda12;
+	}
+
+	ret = regulator_set_load(qphy->vdda18, QUSB2PHY_1P8_HPM_LOAD);
+	if (ret < 0) {
+		dev_err(qphy->phy.dev, "Unable to set HPM of vdda18:%d\n", ret);
+		goto disable_vdda12;
+	}
+
+	ret = regulator_set_voltage(qphy->vdda18, QUSB2PHY_1P8_VOL_MIN,
+						QUSB2PHY_1P8_VOL_MAX);
+	if (ret) {
+		dev_err(qphy->phy.dev,
+				"Unable to set voltage for vdda18:%d\n", ret);
+		goto put_vdda18_lpm;
+	}
+
+	ret = regulator_enable(qphy->vdda18);
+	if (ret) {
+		dev_err(qphy->phy.dev, "Unable to enable vdda18:%d\n", ret);
+		goto unset_vdda18;
+	}
+
+	ret = regulator_set_load(qphy->vdda33, QUSB2PHY_3P3_HPM_LOAD);
+	if (ret < 0) {
+		dev_err(qphy->phy.dev, "Unable to set HPM of vdda33:%d\n", ret);
+		goto disable_vdda18;
+	}
+
+	ret = regulator_set_voltage(qphy->vdda33, qphy->vdda33_levels[0],
+						qphy->vdda33_levels[2]);
+	if (ret) {
+		dev_err(qphy->phy.dev,
+				"Unable to set voltage for vdda33:%d\n", ret);
+		goto put_vdda33_lpm;
+	}
+
+	ret = regulator_enable(qphy->vdda33);
+	if (ret) {
+		dev_err(qphy->phy.dev, "Unable to enable vdda33:%d\n", ret);
+		goto unset_vdd33;
+	}
+
+	pr_debug("%s(): QUSB PHY's regulators are turned ON.\n", __func__);
+
+	mutex_unlock(&qphy->lock);
+	return ret;
+
+disable_vdda33:
+	ret = regulator_disable(qphy->vdda33);
+	if (ret)
+		dev_err(qphy->phy.dev, "Unable to disable vdda33:%d\n", ret);
+
+unset_vdd33:
+	ret = regulator_set_voltage(qphy->vdda33, 0, qphy->vdda33_levels[2]);
+	if (ret)
+		dev_err(qphy->phy.dev,
+			"Unable to set (0) voltage for vdda33:%d\n", ret);
+
+put_vdda33_lpm:
+	ret = regulator_set_load(qphy->vdda33, 0);
+	if (ret < 0)
+		dev_err(qphy->phy.dev, "Unable to set (0) HPM of vdda33\n");
+
+disable_vdda18:
+	ret = regulator_disable(qphy->vdda18);
+	if (ret)
+		dev_err(qphy->phy.dev, "Unable to disable vdda18:%d\n", ret);
+
+unset_vdda18:
+	ret = regulator_set_voltage(qphy->vdda18, 0, QUSB2PHY_1P8_VOL_MAX);
+	if (ret)
+		dev_err(qphy->phy.dev,
+			"Unable to set (0) voltage for vdda18:%d\n", ret);
+
+put_vdda18_lpm:
+	ret = regulator_set_load(qphy->vdda18, 0);
+	if (ret < 0)
+		dev_err(qphy->phy.dev, "Unable to set LPM of vdda18\n");
+
+disable_vdda12:
+	ret = regulator_disable(qphy->vdda12);
+	if (ret)
+		dev_err(qphy->phy.dev, "Unable to disable vdda12:%d\n", ret);
+unset_vdda12:
+	ret = regulator_set_voltage(qphy->vdda12, 0, QUSB2PHY_1P2_VOL_MAX);
+	if (ret)
+		dev_err(qphy->phy.dev,
+			"Unable to set (0) voltage for vdda12:%d\n", ret);
+put_vdda12_lpm:
+	ret = regulator_set_load(qphy->vdda12, 0);
+	if (ret < 0)
+		dev_err(qphy->phy.dev, "Unable to set LPM of vdda12\n");
+
+disable_vdd:
+	ret = regulator_disable(qphy->vdd);
+	if (ret)
+		dev_err(qphy->phy.dev, "Unable to disable vdd:%d\n",
+							ret);
+
+unconfig_vdd:
+	ret = qusb_phy_config_vdd(qphy, false);
+	if (ret)
+		dev_err(qphy->phy.dev, "Unable unconfig VDD:%d\n",
+							ret);
+err_vdd:
+	dev_dbg(qphy->phy.dev, "QUSB PHY's regulators are turned OFF.\n");
+
+	/* in case of error in turning on regulators */
+	if (qphy->power_enabled_ref)
+		qphy->power_enabled_ref--;
+done:
+	mutex_unlock(&qphy->lock);
+	return ret;
+}
+
+static int qusb_phy_update_dpdm(struct usb_phy *phy, int value)
+{
+	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+	int ret = 0;
+
+	dev_dbg(phy->dev, "%s value:%d rm_pulldown:%d\n",
+				__func__, value, qphy->rm_pulldown);
+
+	switch (value) {
+	case POWER_SUPPLY_DP_DM_DPF_DMF:
+		dev_dbg(phy->dev, "POWER_SUPPLY_DP_DM_DPF_DMF\n");
+		if (!qphy->rm_pulldown) {
+			ret = qusb_phy_enable_power(qphy, true);
+			if (ret >= 0) {
+				qphy->rm_pulldown = true;
+				dev_dbg(phy->dev, "DP_DM_F: rm_pulldown:%d\n",
+						qphy->rm_pulldown);
+			}
+		}
+
+		break;
+
+	case POWER_SUPPLY_DP_DM_DPR_DMR:
+		dev_dbg(phy->dev, "POWER_SUPPLY_DP_DM_DPR_DMR\n");
+		if (qphy->rm_pulldown) {
+			ret = qusb_phy_enable_power(qphy, false);
+			if (ret >= 0) {
+				qphy->rm_pulldown = false;
+				dev_dbg(phy->dev, "DP_DM_R: rm_pulldown:%d\n",
+						qphy->rm_pulldown);
+			}
+		}
+		break;
+
+	default:
+		ret = -EINVAL;
+		dev_err(phy->dev, "Invalid power supply property(%d)\n", value);
+		break;
+	}
+
+	return ret;
+}
+
+static void qusb_phy_get_tune1_param(struct qusb_phy *qphy)
+{
+	u8 reg;
+	u32 bit_mask = 1;
+
+	pr_debug("%s(): num_of_bits:%d bit_pos:%d\n", __func__,
+				qphy->efuse_num_of_bits,
+				qphy->efuse_bit_pos);
+
+	/* get bit mask based on number of bits to use with efuse reg */
+	bit_mask = (bit_mask << qphy->efuse_num_of_bits) - 1;
+
+	/*
+	 * if efuse reg is updated (i.e non-zero) then use it to program
+	 * tune parameters
+	 */
+	qphy->tune_val = readl_relaxed(qphy->efuse_reg);
+	pr_debug("%s(): bit_mask:%d efuse based tune1 value:%d\n",
+				__func__, bit_mask, qphy->tune_val);
+
+	qphy->tune_val = TUNE_VAL_MASK(qphy->tune_val,
+				qphy->efuse_bit_pos, bit_mask);
+	reg = readb_relaxed(qphy->base + QUSB2PHY_PORT_TUNE1);
+	if (qphy->tune_val) {
+		reg = reg & 0x0f;
+		reg |= (qphy->tune_val << 4);
+	}
+	qphy->tune_val = reg;
+}
+
+static void qusb_phy_write_seq(void __iomem *base, u32 *seq, int cnt,
+		unsigned long delay)
+{
+	int i;
+
+	pr_debug("Seq count:%d\n", cnt);
+	for (i = 0; i < cnt; i = i+2) {
+		pr_debug("write 0x%02x to 0x%02x\n", seq[i], seq[i+1]);
+		writel_relaxed(seq[i], base + seq[i+1]);
+		if (delay)
+			usleep_range(delay, (delay + 2000));
+	}
+}
+
+static void qusb_phy_host_init(struct usb_phy *phy)
+{
+	u8 reg;
+	int ret;
+	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+
+	dev_dbg(phy->dev, "%s\n", __func__);
+
+	/* Perform phy reset */
+	ret = reset_control_assert(qphy->phy_reset);
+	if (ret)
+		dev_err(phy->dev, "%s: phy_reset assert failed\n", __func__);
+	usleep_range(100, 150);
+	ret = reset_control_deassert(qphy->phy_reset);
+	if (ret)
+		dev_err(phy->dev, "%s: phy_reset deassert failed\n", __func__);
+
+	qusb_phy_write_seq(qphy->base, qphy->qusb_phy_host_init_seq,
+			qphy->host_init_seq_len, 0);
+
+	/* Ensure above write is completed before turning ON ref clk */
+	wmb();
+
+	/* Require to get phy pll lock successfully */
+	usleep_range(150, 160);
+
+	reg = readb_relaxed(qphy->base + QUSB2PHY_PLL_COMMON_STATUS_ONE);
+	dev_dbg(phy->dev, "QUSB2PHY_PLL_COMMON_STATUS_ONE:%x\n", reg);
+	if (!(reg & CORE_READY_STATUS)) {
+		dev_err(phy->dev, "QUSB PHY PLL LOCK fails:%x\n", reg);
+		WARN_ON(1);
+	}
+}
+
+static int qusb_phy_init(struct usb_phy *phy)
+{
+	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+	int ret;
+	u8 reg;
+
+	dev_dbg(phy->dev, "%s\n", __func__);
+
+	/* bump up vdda33 voltage to operating level*/
+	ret = regulator_set_voltage(qphy->vdda33, qphy->vdda33_levels[1],
+						qphy->vdda33_levels[2]);
+	if (ret) {
+		dev_err(qphy->phy.dev,
+				"Unable to set voltage for vdda33:%d\n", ret);
+		return ret;
+	}
+
+	qusb_phy_enable_clocks(qphy, true);
+
+	/* Perform phy reset */
+	ret = reset_control_assert(qphy->phy_reset);
+	if (ret)
+		dev_err(phy->dev, "%s: phy_reset assert failed\n", __func__);
+	usleep_range(100, 150);
+	ret = reset_control_deassert(qphy->phy_reset);
+	if (ret)
+		dev_err(phy->dev, "%s: phy_reset deassert failed\n", __func__);
+
+	if (qphy->emulation) {
+		if (qphy->emu_init_seq)
+			qusb_phy_write_seq(qphy->emu_phy_base,
+				qphy->emu_init_seq, qphy->emu_init_seq_len, 0);
+
+		if (qphy->qusb_phy_init_seq)
+			qusb_phy_write_seq(qphy->base, qphy->qusb_phy_init_seq,
+					qphy->init_seq_len, 0);
+
+		/* Wait for 5ms as per QUSB2 RUMI sequence */
+		usleep_range(5000, 7000);
+
+		if (qphy->phy_pll_reset_seq)
+			qusb_phy_write_seq(qphy->base, qphy->phy_pll_reset_seq,
+					qphy->phy_pll_reset_seq_len, 10000);
+
+		if (qphy->emu_dcm_reset_seq)
+			qusb_phy_write_seq(qphy->emu_phy_base,
+					qphy->emu_dcm_reset_seq,
+					qphy->emu_dcm_reset_seq_len, 10000);
+
+		return 0;
+	}
+
+	/* Disable the PHY */
+	writel_relaxed(readl_relaxed(qphy->base + QUSB2PHY_PWR_CTRL1) |
+			PWR_CTRL1_POWR_DOWN,
+			qphy->base + QUSB2PHY_PWR_CTRL1);
+
+	if (qphy->qusb_phy_init_seq)
+		qusb_phy_write_seq(qphy->base, qphy->qusb_phy_init_seq,
+				qphy->init_seq_len, 0);
+	if (qphy->efuse_reg) {
+		if (!qphy->tune_val)
+			qusb_phy_get_tune1_param(qphy);
+
+		pr_debug("%s(): Programming TUNE1 parameter as:%x\n", __func__,
+				qphy->tune_val);
+		writel_relaxed(qphy->tune_val,
+				qphy->base + QUSB2PHY_PORT_TUNE1);
+	}
+
+	/* If phy_tune1 modparam set, override tune1 value */
+	if (phy_tune1) {
+		pr_debug("%s(): (modparam) TUNE1 val:0x%02x\n",
+						__func__, phy_tune1);
+		writel_relaxed(phy_tune1,
+				qphy->base + QUSB2PHY_PORT_TUNE1);
+	}
+
+	/* ensure above writes are completed before re-enabling PHY */
+	wmb();
+
+	/* Enable the PHY */
+	writel_relaxed(readl_relaxed(qphy->base + QUSB2PHY_PWR_CTRL1) &
+			~PWR_CTRL1_POWR_DOWN,
+			qphy->base + QUSB2PHY_PWR_CTRL1);
+
+	/* Ensure above write is completed before turning ON ref clk */
+	wmb();
+
+	/* Require to get phy pll lock successfully */
+	usleep_range(150, 160);
+
+	reg = readb_relaxed(qphy->base + QUSB2PHY_PLL_COMMON_STATUS_ONE);
+	dev_dbg(phy->dev, "QUSB2PHY_PLL_COMMON_STATUS_ONE:%x\n", reg);
+	if (!(reg & CORE_READY_STATUS)) {
+		dev_err(phy->dev, "QUSB PHY PLL LOCK fails:%x\n", reg);
+		WARN_ON(1);
+	}
+	return 0;
+}
+
+static void qusb_phy_shutdown(struct usb_phy *phy)
+{
+	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+
+	dev_dbg(phy->dev, "%s\n", __func__);
+
+	qusb_phy_enable_clocks(qphy, true);
+
+	/* Disable the PHY */
+	writel_relaxed(readl_relaxed(qphy->base + QUSB2PHY_PWR_CTRL1) |
+			PWR_CTRL1_POWR_DOWN,
+			qphy->base + QUSB2PHY_PWR_CTRL1);
+
+	/* Makes sure that above write goes through */
+	wmb();
+
+	qusb_phy_enable_clocks(qphy, false);
+}
+
+static u32 qusb_phy_get_linestate(struct qusb_phy *qphy)
+{
+	u32 linestate = 0;
+
+	if (qphy->cable_connected) {
+		if (qphy->phy.flags & PHY_HSFS_MODE)
+			linestate |= LINESTATE_DP;
+		else if (qphy->phy.flags & PHY_LS_MODE)
+			linestate |= LINESTATE_DM;
+	}
+	return linestate;
+}
+
+/**
+ * Performs QUSB2 PHY suspend/resume functionality.
+ *
+ * @uphy - usb phy pointer.
+ * @suspend - to enable suspend or not. 1 - suspend, 0 - resume
+ *
+ */
+static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend)
+{
+	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+	u32 linestate = 0, intr_mask = 0;
+	static u8 analog_ctrl_two;
+	int ret;
+
+	if (qphy->suspended && suspend) {
+		dev_dbg(phy->dev, "%s: USB PHY is already suspended\n",
+			__func__);
+		return 0;
+	}
+
+	if (suspend) {
+		/* Bus suspend case */
+		if (qphy->cable_connected ||
+			(qphy->phy.flags & PHY_HOST_MODE)) {
+
+			/* store clock settings like cmos/cml */
+			analog_ctrl_two =
+				readl_relaxed(qphy->base +
+					QUSB2PHY_PLL_ANALOG_CONTROLS_TWO);
+
+			/* use CSR & switch to SE clk */
+			writel_relaxed(0xb,
+				qphy->base + QUSB2PHY_PLL_ANALOG_CONTROLS_TWO);
+
+			/* enable clock bypass */
+			writel_relaxed(0x90,
+				qphy->base + QUSB2PHY_PLL_ANALOG_CONTROLS_ONE);
+
+			/* Disable all interrupts */
+			writel_relaxed(0x00,
+				qphy->base + QUSB2PHY_INTR_CTRL);
+
+			linestate = qusb_phy_get_linestate(qphy);
+			/*
+			 * D+/D- interrupts are level-triggered, but we are
+			 * only interested if the line state changes, so enable
+			 * the high/low trigger based on current state. In
+			 * other words, enable the triggers _opposite_ of what
+			 * the current D+/D- levels are.
+			 * e.g. if currently D+ high, D- low (HS 'J'/Suspend),
+			 * configure the mask to trigger on D+ low OR D- high
+			 */
+			intr_mask = DMSE_INTERRUPT | DPSE_INTERRUPT;
+			if (!(linestate & LINESTATE_DP)) /* D+ low */
+				intr_mask |= DPSE_INTR_HIGH_SEL;
+			if (!(linestate & LINESTATE_DM)) /* D- low */
+				intr_mask |= DMSE_INTR_HIGH_SEL;
+
+			writel_relaxed(intr_mask,
+				qphy->base + QUSB2PHY_INTR_CTRL);
+
+			if (linestate & (LINESTATE_DP | LINESTATE_DM)) {
+
+				/* enable phy auto-resume */
+				writel_relaxed(0x91,
+					qphy->base + QUSB2PHY_TEST1);
+				/* flush the previous write before next write */
+				wmb();
+				writel_relaxed(0x90,
+					qphy->base + QUSB2PHY_TEST1);
+			}
+
+			dev_dbg(phy->dev, "%s: intr_mask = %x\n",
+			__func__, intr_mask);
+
+			/* Makes sure that above write goes through */
+			wmb();
+			qusb_phy_enable_clocks(qphy, false);
+		} else { /* Cable disconnect case */
+
+			ret = reset_control_assert(qphy->phy_reset);
+			if (ret)
+				dev_err(phy->dev, "%s: phy_reset assert failed\n",
+						__func__);
+			usleep_range(100, 150);
+			ret = reset_control_deassert(qphy->phy_reset);
+			if (ret)
+				dev_err(phy->dev, "%s: phy_reset deassert failed\n",
+						__func__);
+
+			writel_relaxed(0x1b,
+				qphy->base + QUSB2PHY_PLL_ANALOG_CONTROLS_TWO);
+
+			/* enable clock bypass */
+			writel_relaxed(0x90,
+				qphy->base + QUSB2PHY_PLL_ANALOG_CONTROLS_ONE);
+
+			writel_relaxed(0x0, qphy->tcsr_clamp_dig_n);
+			/*
+			 * clamp needs asserted before
+			 * power/clocks can be turned off
+			 */
+			wmb();
+
+			qusb_phy_enable_clocks(qphy, false);
+			qusb_phy_enable_power(qphy, false);
+		}
+		qphy->suspended = true;
+	} else {
+		/* Bus resume case */
+		if (qphy->cable_connected ||
+			(qphy->phy.flags & PHY_HOST_MODE)) {
+			qusb_phy_enable_clocks(qphy, true);
+
+			/* restore the default clock settings */
+			writel_relaxed(analog_ctrl_two,
+				qphy->base + QUSB2PHY_PLL_ANALOG_CONTROLS_TWO);
+
+			/* disable clock bypass */
+			writel_relaxed(0x80,
+				qphy->base + QUSB2PHY_PLL_ANALOG_CONTROLS_ONE);
+
+			/* Clear all interrupts on resume */
+			writel_relaxed(0x00,
+				qphy->base + QUSB2PHY_INTR_CTRL);
+
+			/* Makes sure that above write goes through */
+			wmb();
+		} else { /* Cable connect case */
+			writel_relaxed(0x1, qphy->tcsr_clamp_dig_n);
+
+			/*
+			 * clamp needs de-asserted before
+			 * power/clocks can be turned on
+			 */
+			wmb();
+
+			qusb_phy_enable_power(qphy, true);
+			ret = reset_control_assert(qphy->phy_reset);
+			if (ret)
+				dev_err(phy->dev, "%s: phy_reset assert failed\n",
+						__func__);
+			usleep_range(100, 150);
+			ret = reset_control_deassert(qphy->phy_reset);
+			if (ret)
+				dev_err(phy->dev, "%s: phy_reset deassert failed\n",
+						__func__);
+
+			qusb_phy_enable_clocks(qphy, true);
+		}
+		qphy->suspended = false;
+	}
+
+	return 0;
+}
+
+static int qusb_phy_notify_connect(struct usb_phy *phy,
+					enum usb_device_speed speed)
+{
+	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+
+	qphy->cable_connected = true;
+
+	if (qphy->qusb_phy_host_init_seq && qphy->phy.flags & PHY_HOST_MODE)
+		qusb_phy_host_init(phy);
+
+	dev_dbg(phy->dev, "QUSB PHY: connect notification cable_connected=%d\n",
+							qphy->cable_connected);
+	return 0;
+}
+
+static int qusb_phy_notify_disconnect(struct usb_phy *phy,
+					enum usb_device_speed speed)
+{
+	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+
+	qphy->cable_connected = false;
+
+	dev_dbg(phy->dev, "QUSB PHY: connect notification cable_connected=%d\n",
+							qphy->cable_connected);
+	return 0;
+}
+
+static int qusb_phy_dpdm_regulator_enable(struct regulator_dev *rdev)
+{
+	struct qusb_phy *qphy = rdev_get_drvdata(rdev);
+
+	dev_dbg(qphy->phy.dev, "%s\n", __func__);
+	return qusb_phy_update_dpdm(&qphy->phy, POWER_SUPPLY_DP_DM_DPF_DMF);
+}
+
+static int qusb_phy_dpdm_regulator_disable(struct regulator_dev *rdev)
+{
+	struct qusb_phy *qphy = rdev_get_drvdata(rdev);
+
+	dev_dbg(qphy->phy.dev, "%s\n", __func__);
+	return qusb_phy_update_dpdm(&qphy->phy, POWER_SUPPLY_DP_DM_DPR_DMR);
+}
+
+static int qusb_phy_dpdm_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct qusb_phy *qphy = rdev_get_drvdata(rdev);
+
+	dev_dbg(qphy->phy.dev, "%s qphy->rm_pulldown = %d\n", __func__,
+					qphy->rm_pulldown);
+	return qphy->rm_pulldown;
+}
+
+static struct regulator_ops qusb_phy_dpdm_regulator_ops = {
+	.enable		= qusb_phy_dpdm_regulator_enable,
+	.disable	= qusb_phy_dpdm_regulator_disable,
+	.is_enabled	= qusb_phy_dpdm_regulator_is_enabled,
+};
+
+static int qusb_phy_regulator_init(struct qusb_phy *qphy)
+{
+	struct device *dev = qphy->phy.dev;
+	struct regulator_config cfg = {};
+	struct regulator_init_data *init_data;
+
+	init_data = devm_kzalloc(dev, sizeof(*init_data), GFP_KERNEL);
+	if (!init_data)
+		return -ENOMEM;
+
+	init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS;
+	qphy->dpdm_rdesc.owner = THIS_MODULE;
+	qphy->dpdm_rdesc.type = REGULATOR_VOLTAGE;
+	qphy->dpdm_rdesc.ops = &qusb_phy_dpdm_regulator_ops;
+	qphy->dpdm_rdesc.name = kbasename(dev->of_node->full_name);
+
+	cfg.dev = dev;
+	cfg.init_data = init_data;
+	cfg.driver_data = qphy;
+	cfg.of_node = dev->of_node;
+
+	qphy->dpdm_rdev = devm_regulator_register(dev, &qphy->dpdm_rdesc, &cfg);
+	if (IS_ERR(qphy->dpdm_rdev))
+		return PTR_ERR(qphy->dpdm_rdev);
+
+	return 0;
+}
+
+static int qusb_phy_probe(struct platform_device *pdev)
+{
+	struct qusb_phy *qphy;
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	int ret = 0, size = 0;
+
+	qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
+	if (!qphy)
+		return -ENOMEM;
+
+	qphy->phy.dev = dev;
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"qusb_phy_base");
+	qphy->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(qphy->base))
+		return PTR_ERR(qphy->base);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"emu_phy_base");
+	if (res) {
+		qphy->emu_phy_base = devm_ioremap_resource(dev, res);
+		if (IS_ERR(qphy->emu_phy_base)) {
+			dev_dbg(dev, "couldn't ioremap emu_phy_base\n");
+			qphy->emu_phy_base = NULL;
+		}
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"tcsr_clamp_dig_n_1p8");
+	if (res) {
+		qphy->tcsr_clamp_dig_n = devm_ioremap_resource(dev, res);
+		if (IS_ERR(qphy->tcsr_clamp_dig_n)) {
+			dev_dbg(dev, "couldn't ioremap tcsr_clamp_dig_n\n");
+			return PTR_ERR(qphy->tcsr_clamp_dig_n);
+		}
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"efuse_addr");
+	if (res) {
+		qphy->efuse_reg = devm_ioremap_nocache(dev, res->start,
+							resource_size(res));
+		if (!IS_ERR_OR_NULL(qphy->efuse_reg)) {
+			ret = of_property_read_u32(dev->of_node,
+					"qcom,efuse-bit-pos",
+					&qphy->efuse_bit_pos);
+			if (!ret) {
+				ret = of_property_read_u32(dev->of_node,
+						"qcom,efuse-num-bits",
+						&qphy->efuse_num_of_bits);
+			}
+
+			if (ret) {
+				dev_err(dev,
+				"DT Value for efuse is invalid.\n");
+				return -EINVAL;
+			}
+		}
+	}
+
+	qphy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
+	if (IS_ERR(qphy->ref_clk_src))
+		dev_dbg(dev, "clk get failed for ref_clk_src\n");
+
+	qphy->ref_clk = devm_clk_get(dev, "ref_clk");
+	if (IS_ERR(qphy->ref_clk))
+		dev_dbg(dev, "clk get failed for ref_clk\n");
+	else
+		clk_set_rate(qphy->ref_clk, 19200000);
+
+	if (of_property_match_string(pdev->dev.of_node,
+				"clock-names", "cfg_ahb_clk") >= 0) {
+		qphy->cfg_ahb_clk = devm_clk_get(dev, "cfg_ahb_clk");
+		if (IS_ERR(qphy->cfg_ahb_clk)) {
+			ret = PTR_ERR(qphy->cfg_ahb_clk);
+			if (ret != -EPROBE_DEFER)
+				dev_err(dev,
+				"clk get failed for cfg_ahb_clk ret %d\n", ret);
+			return ret;
+		}
+	}
+
+	qphy->phy_reset = devm_reset_control_get(dev, "phy_reset");
+	if (IS_ERR(qphy->phy_reset))
+		return PTR_ERR(qphy->phy_reset);
+
+	qphy->emulation = of_property_read_bool(dev->of_node,
+					"qcom,emulation");
+
+	of_get_property(dev->of_node, "qcom,emu-init-seq", &size);
+	if (size) {
+		qphy->emu_init_seq = devm_kzalloc(dev,
+						size, GFP_KERNEL);
+		if (qphy->emu_init_seq) {
+			qphy->emu_init_seq_len =
+				(size / sizeof(*qphy->emu_init_seq));
+			if (qphy->emu_init_seq_len % 2) {
+				dev_err(dev, "invalid emu_init_seq_len\n");
+				return -EINVAL;
+			}
+
+			of_property_read_u32_array(dev->of_node,
+				"qcom,emu-init-seq",
+				qphy->emu_init_seq,
+				qphy->emu_init_seq_len);
+		} else {
+			dev_dbg(dev,
+			"error allocating memory for emu_init_seq\n");
+		}
+	}
+
+	size = 0;
+	of_get_property(dev->of_node, "qcom,phy-pll-reset-seq", &size);
+	if (size) {
+		qphy->phy_pll_reset_seq = devm_kzalloc(dev,
+						size, GFP_KERNEL);
+		if (qphy->phy_pll_reset_seq) {
+			qphy->phy_pll_reset_seq_len =
+				(size / sizeof(*qphy->phy_pll_reset_seq));
+			if (qphy->phy_pll_reset_seq_len % 2) {
+				dev_err(dev, "invalid phy_pll_reset_seq_len\n");
+				return -EINVAL;
+			}
+
+			of_property_read_u32_array(dev->of_node,
+				"qcom,phy-pll-reset-seq",
+				qphy->phy_pll_reset_seq,
+				qphy->phy_pll_reset_seq_len);
+		} else {
+			dev_dbg(dev,
+			"error allocating memory for phy_pll_reset_seq\n");
+		}
+	}
+
+	size = 0;
+	of_get_property(dev->of_node, "qcom,emu-dcm-reset-seq", &size);
+	if (size) {
+		qphy->emu_dcm_reset_seq = devm_kzalloc(dev,
+						size, GFP_KERNEL);
+		if (qphy->emu_dcm_reset_seq) {
+			qphy->emu_dcm_reset_seq_len =
+				(size / sizeof(*qphy->emu_dcm_reset_seq));
+			if (qphy->emu_dcm_reset_seq_len % 2) {
+				dev_err(dev, "invalid emu_dcm_reset_seq_len\n");
+				return -EINVAL;
+			}
+
+			of_property_read_u32_array(dev->of_node,
+				"qcom,emu-dcm-reset-seq",
+				qphy->emu_dcm_reset_seq,
+				qphy->emu_dcm_reset_seq_len);
+		} else {
+			dev_dbg(dev,
+			"error allocating memory for emu_dcm_reset_seq\n");
+		}
+	}
+
+	size = 0;
+	of_get_property(dev->of_node, "qcom,qusb-phy-init-seq", &size);
+	if (size) {
+		qphy->qusb_phy_init_seq = devm_kzalloc(dev,
+						size, GFP_KERNEL);
+		if (qphy->qusb_phy_init_seq) {
+			qphy->init_seq_len =
+				(size / sizeof(*qphy->qusb_phy_init_seq));
+			if (qphy->init_seq_len % 2) {
+				dev_err(dev, "invalid init_seq_len\n");
+				return -EINVAL;
+			}
+
+			of_property_read_u32_array(dev->of_node,
+				"qcom,qusb-phy-init-seq",
+				qphy->qusb_phy_init_seq,
+				qphy->init_seq_len);
+		} else {
+			dev_err(dev,
+			"error allocating memory for phy_init_seq\n");
+		}
+	}
+
+	qphy->host_init_seq_len = of_property_count_elems_of_size(dev->of_node,
+				"qcom,qusb-phy-host-init-seq",
+				sizeof(*qphy->qusb_phy_host_init_seq));
+	if (qphy->host_init_seq_len > 0) {
+		qphy->qusb_phy_host_init_seq = devm_kcalloc(dev,
+					qphy->host_init_seq_len,
+					sizeof(*qphy->qusb_phy_host_init_seq),
+					GFP_KERNEL);
+		if (qphy->qusb_phy_host_init_seq)
+			of_property_read_u32_array(dev->of_node,
+				"qcom,qusb-phy-host-init-seq",
+				qphy->qusb_phy_host_init_seq,
+				qphy->host_init_seq_len);
+		else
+			return -ENOMEM;
+	}
+
+	ret = of_property_read_u32_array(dev->of_node, "qcom,vdd-voltage-level",
+					 (u32 *) qphy->vdd_levels,
+					 ARRAY_SIZE(qphy->vdd_levels));
+	if (ret) {
+		dev_err(dev, "error reading qcom,vdd-voltage-level property\n");
+		return ret;
+	}
+
+	ret = of_property_read_u32_array(dev->of_node,
+					"qcom,vdda33-voltage-level",
+					 (u32 *) qphy->vdda33_levels,
+					 ARRAY_SIZE(qphy->vdda33_levels));
+	if (ret == -EINVAL) {
+		qphy->vdda33_levels[0] = QUSB2PHY_3P3_VOL_MIN;
+		qphy->vdda33_levels[1] = QUSB2PHY_3P3_VOL_MIN;
+		qphy->vdda33_levels[2] = QUSB2PHY_3P3_VOL_MAX;
+	} else if (ret) {
+		dev_err(dev, "error reading qcom,vdda33-voltage-level property\n");
+		return ret;
+	}
+
+	qphy->vdd = devm_regulator_get(dev, "vdd");
+	if (IS_ERR(qphy->vdd)) {
+		dev_err(dev, "unable to get vdd supply\n");
+		return PTR_ERR(qphy->vdd);
+	}
+
+	qphy->vdda33 = devm_regulator_get(dev, "vdda33");
+	if (IS_ERR(qphy->vdda33)) {
+		dev_err(dev, "unable to get vdda33 supply\n");
+		return PTR_ERR(qphy->vdda33);
+	}
+
+	qphy->vdda18 = devm_regulator_get(dev, "vdda18");
+	if (IS_ERR(qphy->vdda18)) {
+		dev_err(dev, "unable to get vdda18 supply\n");
+		return PTR_ERR(qphy->vdda18);
+	}
+
+	qphy->vdda12 = devm_regulator_get(dev, "vdda12");
+	if (IS_ERR(qphy->vdda12)) {
+		dev_err(dev, "unable to get vdda12 supply\n");
+		return PTR_ERR(qphy->vdda12);
+	}
+
+	mutex_init(&qphy->lock);
+
+	platform_set_drvdata(pdev, qphy);
+
+	qphy->phy.label			= "msm-qusb-phy-v2";
+	qphy->phy.init			= qusb_phy_init;
+	qphy->phy.set_suspend           = qusb_phy_set_suspend;
+	qphy->phy.shutdown		= qusb_phy_shutdown;
+	qphy->phy.type			= USB_PHY_TYPE_USB2;
+	qphy->phy.notify_connect        = qusb_phy_notify_connect;
+	qphy->phy.notify_disconnect     = qusb_phy_notify_disconnect;
+
+	ret = usb_add_phy_dev(&qphy->phy);
+	if (ret)
+		return ret;
+
+	ret = qusb_phy_regulator_init(qphy);
+	if (ret)
+		usb_remove_phy(&qphy->phy);
+
+	/* de-asseert clamp dig n to reduce leakage on 1p8 upon boot up */
+	writel_relaxed(0x0, qphy->tcsr_clamp_dig_n);
+
+	return ret;
+}
+
+static int qusb_phy_remove(struct platform_device *pdev)
+{
+	struct qusb_phy *qphy = platform_get_drvdata(pdev);
+
+	usb_remove_phy(&qphy->phy);
+
+	if (qphy->clocks_enabled) {
+		clk_disable_unprepare(qphy->cfg_ahb_clk);
+		clk_disable_unprepare(qphy->ref_clk);
+		clk_disable_unprepare(qphy->ref_clk_src);
+		qphy->clocks_enabled = false;
+	}
+
+	qusb_phy_enable_power(qphy, false);
+
+	return 0;
+}
+
+static const struct of_device_id qusb_phy_id_table[] = {
+	{ .compatible = "qcom,qusb2phy-v2", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, qusb_phy_id_table);
+
+static struct platform_driver qusb_phy_driver = {
+	.probe		= qusb_phy_probe,
+	.remove		= qusb_phy_remove,
+	.driver = {
+		.name	= "msm-qusb-phy-v2",
+		.of_match_table = of_match_ptr(qusb_phy_id_table),
+	},
+};
+
+module_platform_driver(qusb_phy_driver);
+
+MODULE_DESCRIPTION("MSM QUSB2 PHY v2 driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/drivers/usb/phy/phy-msm-ssusb-qmp.c	2019-10-29 09:26:25.045216898 +0100
@@ -0,0 +1,837 @@
+/*
+ * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/usb/phy.h>
+#include <linux/usb/msm_hsusb.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/reset.h>
+
+enum ldo_levels {
+	VOLTAGE_LEVEL_NONE = 0,
+	VOLTAGE_LEVEL_MIN,
+	VOLTAGE_LEVEL_MAX,
+};
+
+#define INIT_MAX_TIME_USEC			1000
+
+/* default CORE votlage and load values */
+#define USB_SSPHY_1P2_VOL_MIN		1200000 /* uV */
+#define USB_SSPHY_1P2_VOL_MAX		1200000 /* uV */
+#define USB_SSPHY_HPM_LOAD		23000	/* uA */
+
+#define USB_SSPHY_LOAD_DEFAULT		-1
+
+/* USB3PHY_PCIE_USB3_PCS_PCS_STATUS bit */
+#define PHYSTATUS				BIT(6)
+
+/* PCIE_USB3_PHY_AUTONOMOUS_MODE_CTRL bits */
+#define ARCVR_DTCT_EN		BIT(0)
+#define ALFPS_DTCT_EN		BIT(1)
+#define ARCVR_DTCT_EVENT_SEL	BIT(4)
+
+/* PCIE_USB3_PHY_PCS_MISC_TYPEC_CTRL bits */
+
+/* 0 - selects Lane A. 1 - selects Lane B */
+#define SW_PORTSELECT		BIT(0)
+/* port select mux: 1 - sw control. 0 - HW control*/
+#define SW_PORTSELECT_MX	BIT(1)
+
+enum qmp_phy_rev_reg {
+	USB3_PHY_PCS_STATUS,
+	USB3_PHY_AUTONOMOUS_MODE_CTRL,
+	USB3_PHY_LFPS_RXTERM_IRQ_CLEAR,
+	USB3_PHY_POWER_DOWN_CONTROL,
+	USB3_PHY_SW_RESET,
+	USB3_PHY_START,
+	USB3_PHY_PCS_MISC_TYPEC_CTRL,
+	USB3_PHY_REG_MAX,
+};
+
+/* reg values to write */
+struct qmp_reg_val {
+	u32 offset;
+	u32 val;
+	u32 delay;
+};
+
+struct msm_ssphy_qmp {
+	struct usb_phy		phy;
+	void __iomem		*base;
+	void __iomem		*vls_clamp_reg;
+	void __iomem		*tcsr_usb3_dp_phymode;
+
+	struct regulator	*vdd;
+	int			vdd_levels[3]; /* none, low, high */
+	struct regulator	*core_ldo;
+	int			core_voltage_levels[3];
+	struct regulator	*fpc_redrive_ldo;
+	int			redrive_voltage_levels[3];
+	int			redrive_load;
+	struct clk		*ref_clk_src;
+	struct clk		*ref_clk;
+	struct clk		*aux_clk;
+	struct clk		*cfg_ahb_clk;
+	struct clk		*pipe_clk;
+	bool			power_enabled;
+	struct reset_control	*phy_reset;
+	struct reset_control	*phy_phy_reset;
+
+	bool			clk_enabled;
+	bool			cable_connected;
+	bool			in_suspend;
+	bool			emulation;
+	unsigned int		*phy_reg; /* revision based offset */
+	unsigned int		*qmp_phy_init_seq;
+	int			init_seq_len;
+	unsigned int		*qmp_phy_reg_offset;
+	int			reg_offset_cnt;
+};
+
+static const struct of_device_id msm_usb_id_table[] = {
+	{
+		.compatible = "qcom,usb-ssphy-qmp",
+	},
+	{
+		.compatible = "qcom,usb-ssphy-qmp-v1",
+	},
+	{
+		.compatible = "qcom,usb-ssphy-qmp-v2",
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, msm_usb_id_table);
+
+static inline char *get_cable_status_str(struct msm_ssphy_qmp *phy)
+{
+	return phy->cable_connected ? "connected" : "disconnected";
+}
+
+static void msm_ssusb_qmp_clr_lfps_rxterm_int(struct msm_ssphy_qmp *phy)
+{
+	writeb_relaxed(1, phy->base +
+			phy->phy_reg[USB3_PHY_LFPS_RXTERM_IRQ_CLEAR]);
+	/* flush the previous write before next write */
+	wmb();
+	writeb_relaxed(0, phy->base +
+			phy->phy_reg[USB3_PHY_LFPS_RXTERM_IRQ_CLEAR]);
+}
+
+static void msm_ssusb_qmp_enable_autonomous(struct msm_ssphy_qmp *phy,
+		int enable)
+{
+	u8 val;
+	unsigned int autonomous_mode_offset =
+			phy->phy_reg[USB3_PHY_AUTONOMOUS_MODE_CTRL];
+
+	dev_dbg(phy->phy.dev, "enabling QMP autonomous mode with cable %s\n",
+			get_cable_status_str(phy));
+
+	if (enable) {
+		msm_ssusb_qmp_clr_lfps_rxterm_int(phy);
+		val = readb_relaxed(phy->base + autonomous_mode_offset);
+		val |= ARCVR_DTCT_EN;
+		if (phy->phy.flags & DEVICE_IN_SS_MODE) {
+			val |= ALFPS_DTCT_EN;
+			val &= ~ARCVR_DTCT_EVENT_SEL;
+		} else {
+			val &= ~ALFPS_DTCT_EN;
+			val |= ARCVR_DTCT_EVENT_SEL;
+		}
+
+		writeb_relaxed(val, phy->base + autonomous_mode_offset);
+		/* clamp phy level shifter to perform autonomous detection */
+		writel_relaxed(0x1, phy->vls_clamp_reg);
+	} else {
+		writel_relaxed(0x0, phy->vls_clamp_reg);
+		writeb_relaxed(0, phy->base + autonomous_mode_offset);
+		msm_ssusb_qmp_clr_lfps_rxterm_int(phy);
+	}
+}
+
+static int msm_ldo_enable(struct msm_ssphy_qmp *phy,
+		struct regulator *ldo, int *voltage_levels, int load)
+{
+	int ret = 0;
+
+	dev_dbg(phy->phy.dev,
+		"ldo: min_vol:%duV max_vol:%duV\n",
+		voltage_levels[VOLTAGE_LEVEL_MIN],
+		voltage_levels[VOLTAGE_LEVEL_MAX]);
+
+	if (load > 0) {
+		ret = regulator_set_load(ldo, load);
+		if (ret < 0)
+			return ret;
+	}
+
+	ret = regulator_set_voltage(ldo,
+			voltage_levels[VOLTAGE_LEVEL_MIN],
+			voltage_levels[VOLTAGE_LEVEL_MAX]);
+	if (ret)
+		return ret;
+
+	ret = regulator_enable(ldo);
+
+	return ret;
+}
+
+static int msm_ssusb_qmp_ldo_enable(struct msm_ssphy_qmp *phy, int on)
+{
+	int min, rc = 0;
+
+	dev_dbg(phy->phy.dev, "reg (%s)\n", on ? "HPM" : "LPM");
+
+	if (phy->power_enabled == on) {
+		dev_dbg(phy->phy.dev, "PHYs' regulators status %d\n",
+			phy->power_enabled);
+		return 0;
+	}
+
+	phy->power_enabled = on;
+
+	min = on ? 1 : 0; /* low or none? */
+
+	if (!on)
+		goto disable_regulators;
+
+	if (phy->fpc_redrive_ldo) {
+		rc = msm_ldo_enable(phy, phy->fpc_redrive_ldo,
+				phy->redrive_voltage_levels,
+				phy->redrive_load);
+		if (rc < 0) {
+			dev_err(phy->phy.dev,
+				"enable phy->fpc_redrive_ldo failed\n");
+			return rc;
+		}
+	}
+
+	rc = msm_ldo_enable(phy, phy->vdd, phy->vdd_levels,
+			USB_SSPHY_LOAD_DEFAULT);
+	if (rc < 0) {
+		dev_err(phy->phy.dev, "enable phy->vdd failed\n");
+		goto disable_fpc_redrive;
+	}
+
+	rc = msm_ldo_enable(phy, phy->core_ldo, phy->core_voltage_levels,
+			USB_SSPHY_HPM_LOAD);
+	if (rc < 0) {
+		dev_err(phy->phy.dev, "enable phy->core_ldo failed\n");
+		goto disable_vdd;
+	}
+
+	return 0;
+
+disable_regulators:
+	rc = regulator_disable(phy->core_ldo);
+	if (rc)
+		dev_err(phy->phy.dev, "disable phy->core_ldo failed\n");
+
+disable_vdd:
+	rc = regulator_disable(phy->vdd);
+	if (rc)
+		dev_err(phy->phy.dev, "disable phy->vdd failed\n");
+
+disable_fpc_redrive:
+	if (phy->fpc_redrive_ldo) {
+		rc = regulator_disable(phy->fpc_redrive_ldo);
+		if (rc)
+			dev_err(phy->phy.dev,
+				"disable phy->fpc_redrive_ldo failed\n");
+	}
+
+	return rc < 0 ? rc : 0;
+}
+
+static int configure_phy_regs(struct usb_phy *uphy,
+				const struct qmp_reg_val *reg)
+{
+	struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp,
+					phy);
+
+	if (!reg) {
+		dev_err(uphy->dev, "NULL PHY configuration\n");
+		return -EINVAL;
+	}
+
+	while (reg->offset != -1) {
+		writel_relaxed(reg->val, phy->base + reg->offset);
+		if (reg->delay)
+			usleep_range(reg->delay, reg->delay + 10);
+		reg++;
+	}
+	return 0;
+}
+
+/* SSPHY Initialization */
+static int msm_ssphy_qmp_init(struct usb_phy *uphy)
+{
+	struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp,
+					phy);
+	int ret, val;
+	unsigned init_timeout_usec = INIT_MAX_TIME_USEC;
+	const struct qmp_reg_val *reg = NULL;
+
+	dev_dbg(uphy->dev, "Initializing QMP phy\n");
+
+	if (phy->emulation)
+		return 0;
+
+	ret = msm_ssusb_qmp_ldo_enable(phy, 1);
+	if (ret) {
+		dev_err(phy->phy.dev,
+		"msm_ssusb_qmp_ldo_enable(1) failed, ret=%d\n",
+		ret);
+		return ret;
+	}
+
+	if (!phy->clk_enabled) {
+		if (phy->ref_clk_src)
+			clk_prepare_enable(phy->ref_clk_src);
+		if (phy->ref_clk)
+			clk_prepare_enable(phy->ref_clk);
+		clk_prepare_enable(phy->aux_clk);
+		clk_prepare_enable(phy->cfg_ahb_clk);
+		clk_set_rate(phy->pipe_clk, 125000000);
+		clk_prepare_enable(phy->pipe_clk);
+		phy->clk_enabled = true;
+	}
+
+	writel_relaxed(0x01,
+		phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
+
+	/* Make sure that above write completed to get PHY into POWER DOWN */
+	mb();
+
+	reg = (struct qmp_reg_val *)phy->qmp_phy_init_seq;
+
+	/* Main configuration */
+	ret = configure_phy_regs(uphy, reg);
+	if (ret) {
+		dev_err(uphy->dev, "Failed the main PHY configuration\n");
+		return ret;
+	}
+
+	/* perform lane selection */
+	val = -EINVAL;
+	if (phy->phy.flags & PHY_LANE_A)
+		val = SW_PORTSELECT_MX;
+
+	if (phy->phy.flags & PHY_LANE_B)
+		val = SW_PORTSELECT | SW_PORTSELECT_MX;
+
+	if (val > 0)
+		writel_relaxed(val,
+			phy->base + phy->phy_reg[USB3_PHY_PCS_MISC_TYPEC_CTRL]);
+
+	writel_relaxed(0x03, phy->base + phy->phy_reg[USB3_PHY_START]);
+	writel_relaxed(0x00, phy->base + phy->phy_reg[USB3_PHY_SW_RESET]);
+
+	/* Make sure above write completed to bring PHY out of reset */
+	mb();
+
+	/* Wait for PHY initialization to be done */
+	do {
+		if (readl_relaxed(phy->base +
+			phy->phy_reg[USB3_PHY_PCS_STATUS]) & PHYSTATUS)
+			usleep_range(1, 2);
+		else
+			break;
+	} while (--init_timeout_usec);
+
+	if (!init_timeout_usec) {
+		dev_err(uphy->dev, "QMP PHY initialization timeout\n");
+		dev_err(uphy->dev, "USB3_PHY_PCS_STATUS:%x\n",
+				readl_relaxed(phy->base +
+					phy->phy_reg[USB3_PHY_PCS_STATUS]));
+		return -EBUSY;
+	};
+
+	return 0;
+}
+
+static int msm_ssphy_qmp_reset(struct usb_phy *uphy)
+{
+	struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp,
+					phy);
+	int ret;
+
+	dev_dbg(uphy->dev, "Resetting QMP phy\n");
+
+	/* Assert USB3 PHY reset */
+	ret = reset_control_assert(phy->phy_phy_reset);
+	if (ret) {
+		dev_err(uphy->dev, "phy_phy_reset assert failed\n");
+		goto exit;
+	}
+
+	/* Assert USB3 PHY CSR reset */
+	ret = reset_control_assert(phy->phy_reset);
+	if (ret) {
+		dev_err(uphy->dev, "phy_reset assert failed\n");
+		goto deassert_phy_phy_reset;
+	}
+
+	/* select usb3 phy mode */
+	if (phy->tcsr_usb3_dp_phymode)
+		writel_relaxed(0x0, phy->tcsr_usb3_dp_phymode);
+
+	/* Deassert USB3 PHY CSR reset */
+	ret = reset_control_deassert(phy->phy_reset);
+	if (ret) {
+		dev_err(uphy->dev, "phy_reset deassert failed\n");
+		goto deassert_phy_phy_reset;
+	}
+
+	/* Deassert USB3 PHY reset */
+	ret = reset_control_deassert(phy->phy_phy_reset);
+	if (ret) {
+		dev_err(uphy->dev, "phy_phy_reset deassert failed\n");
+		goto exit;
+	}
+
+	return 0;
+
+deassert_phy_phy_reset:
+	ret = reset_control_deassert(phy->phy_phy_reset);
+	if (ret)
+		dev_err(uphy->dev, "phy_phy_reset deassert failed\n");
+exit:
+	phy->in_suspend = false;
+
+	return ret;
+}
+
+static int msm_ssphy_power_enable(struct msm_ssphy_qmp *phy, bool on)
+{
+	bool host = phy->phy.flags & PHY_HOST_MODE;
+	int ret = 0;
+
+	/*
+	 * Turn off the phy's LDOs when cable is disconnected for device mode
+	 * with external vbus_id indication.
+	 */
+	if (!host && !phy->cable_connected) {
+		if (on) {
+			ret = msm_ssusb_qmp_ldo_enable(phy, 1);
+			if (ret)
+				dev_err(phy->phy.dev,
+				"msm_ssusb_qmp_ldo_enable(1) failed, ret=%d\n",
+				ret);
+		} else {
+			ret = msm_ssusb_qmp_ldo_enable(phy, 0);
+			if (ret)
+				dev_err(phy->phy.dev,
+					"msm_ssusb_qmp_ldo_enable(0) failed, ret=%d\n",
+					ret);
+		}
+	}
+
+	return ret;
+}
+
+/**
+ * Performs QMP PHY suspend/resume functionality.
+ *
+ * @uphy - usb phy pointer.
+ * @suspend - to enable suspend or not. 1 - suspend, 0 - resume
+ *
+ */
+static int msm_ssphy_qmp_set_suspend(struct usb_phy *uphy, int suspend)
+{
+	struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp,
+					phy);
+
+	dev_dbg(uphy->dev, "QMP PHY set_suspend for %s called with cable %s\n",
+			(suspend ? "suspend" : "resume"),
+			get_cable_status_str(phy));
+
+	if (phy->in_suspend == suspend) {
+		dev_dbg(uphy->dev, "%s: USB PHY is already %s.\n",
+			__func__, (suspend ? "suspended" : "resumed"));
+		return 0;
+	}
+
+	if (suspend) {
+		if (phy->cable_connected)
+			msm_ssusb_qmp_enable_autonomous(phy, 1);
+		else
+			writel_relaxed(0x00,
+			phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
+
+		/* Make sure above write completed with PHY */
+		wmb();
+
+		clk_disable_unprepare(phy->cfg_ahb_clk);
+		clk_disable_unprepare(phy->aux_clk);
+		clk_disable_unprepare(phy->pipe_clk);
+		if (phy->ref_clk)
+			clk_disable_unprepare(phy->ref_clk);
+		if (phy->ref_clk_src)
+			clk_disable_unprepare(phy->ref_clk_src);
+		phy->clk_enabled = false;
+		phy->in_suspend = true;
+		msm_ssphy_power_enable(phy, 0);
+		dev_dbg(uphy->dev, "QMP PHY is suspend\n");
+	} else {
+		msm_ssphy_power_enable(phy, 1);
+		clk_prepare_enable(phy->pipe_clk);
+		if (!phy->clk_enabled) {
+			if (phy->ref_clk_src)
+				clk_prepare_enable(phy->ref_clk_src);
+			if (phy->ref_clk)
+				clk_prepare_enable(phy->ref_clk);
+			clk_prepare_enable(phy->aux_clk);
+			clk_prepare_enable(phy->cfg_ahb_clk);
+			phy->clk_enabled = true;
+		}
+		if (!phy->cable_connected) {
+			writel_relaxed(0x01,
+			phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
+		} else  {
+			msm_ssusb_qmp_enable_autonomous(phy, 0);
+		}
+
+		/* Make sure that above write completed with PHY */
+		wmb();
+
+		phy->in_suspend = false;
+		dev_dbg(uphy->dev, "QMP PHY is resumed\n");
+	}
+
+	return 0;
+}
+
+static int msm_ssphy_qmp_notify_connect(struct usb_phy *uphy,
+				       enum usb_device_speed speed)
+{
+	struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp,
+					phy);
+
+	dev_dbg(uphy->dev, "QMP phy connect notification\n");
+	phy->cable_connected = true;
+	dev_dbg(uphy->dev, "cable_connected=%d\n", phy->cable_connected);
+	return 0;
+}
+
+static int msm_ssphy_qmp_notify_disconnect(struct usb_phy *uphy,
+				       enum usb_device_speed speed)
+{
+	struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp,
+					phy);
+
+	writel_relaxed(0x00,
+		phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
+	readl_relaxed(phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
+
+	dev_dbg(uphy->dev, "QMP phy disconnect notification\n");
+	dev_dbg(uphy->dev, " cable_connected=%d\n", phy->cable_connected);
+	phy->cable_connected = false;
+	return 0;
+}
+
+static int msm_ssphy_qmp_probe(struct platform_device *pdev)
+{
+	struct msm_ssphy_qmp *phy;
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	int ret = 0, size = 0, len;
+
+	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+	if (!phy)
+		return -ENOMEM;
+
+	phy->aux_clk = devm_clk_get(dev, "aux_clk");
+	if (IS_ERR(phy->aux_clk)) {
+		ret = PTR_ERR(phy->aux_clk);
+		phy->aux_clk = NULL;
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "failed to get aux_clk\n");
+		goto err;
+	}
+
+	clk_set_rate(phy->aux_clk, clk_round_rate(phy->aux_clk, ULONG_MAX));
+
+	if (of_property_match_string(pdev->dev.of_node,
+				"clock-names", "cfg_ahb_clk") >= 0) {
+		phy->cfg_ahb_clk = devm_clk_get(dev, "cfg_ahb_clk");
+		if (IS_ERR(phy->cfg_ahb_clk)) {
+			ret = PTR_ERR(phy->cfg_ahb_clk);
+			if (ret != -EPROBE_DEFER)
+				dev_err(dev,
+				"failed to get cfg_ahb_clk ret %d\n", ret);
+			goto err;
+		}
+	}
+
+	phy->pipe_clk = devm_clk_get(dev, "pipe_clk");
+	if (IS_ERR(phy->pipe_clk)) {
+		ret = PTR_ERR(phy->pipe_clk);
+		phy->pipe_clk = NULL;
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "failed to get pipe_clk\n");
+		goto err;
+	}
+
+	phy->phy_reset = devm_reset_control_get(dev, "phy_reset");
+	if (IS_ERR(phy->phy_reset)) {
+		ret = PTR_ERR(phy->phy_reset);
+		dev_dbg(dev, "failed to get phy_reset\n");
+		goto err;
+	}
+
+	phy->phy_phy_reset = devm_reset_control_get(dev, "phy_phy_reset");
+	if (IS_ERR(phy->phy_phy_reset)) {
+		ret = PTR_ERR(phy->phy_phy_reset);
+		dev_dbg(dev, "failed to get phy_phy_reset\n");
+		goto err;
+	}
+
+	of_get_property(dev->of_node, "qcom,qmp-phy-reg-offset", &size);
+	if (size) {
+		phy->qmp_phy_reg_offset = devm_kzalloc(dev,
+						size, GFP_KERNEL);
+		if (phy->qmp_phy_reg_offset) {
+			phy->reg_offset_cnt =
+				(size / sizeof(*phy->qmp_phy_reg_offset));
+			if (phy->reg_offset_cnt > USB3_PHY_REG_MAX) {
+				dev_err(dev, "invalid reg offset count\n");
+				return -EINVAL;
+			}
+
+			of_property_read_u32_array(dev->of_node,
+				"qcom,qmp-phy-reg-offset",
+				phy->qmp_phy_reg_offset,
+				phy->reg_offset_cnt);
+		} else {
+			dev_err(dev, "err mem alloc for qmp_phy_reg_offset\n");
+			return -ENOMEM;
+		}
+		phy->phy_reg = phy->qmp_phy_reg_offset;
+	} else {
+		dev_err(dev, "err provide qcom,qmp-phy-reg-offset\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"qmp_phy_base");
+	if (!res) {
+		dev_err(dev, "failed getting qmp_phy_base\n");
+		return -ENODEV;
+	}
+	phy->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(phy->base)) {
+		ret = PTR_ERR(phy->base);
+		goto err;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+			"vls_clamp_reg");
+	if (!res) {
+		dev_err(dev, "failed getting vls_clamp_reg\n");
+		return -ENODEV;
+	}
+	phy->vls_clamp_reg = devm_ioremap_resource(dev, res);
+	if (IS_ERR(phy->vls_clamp_reg)) {
+		dev_err(dev, "couldn't find vls_clamp_reg address.\n");
+		return PTR_ERR(phy->vls_clamp_reg);
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+			"tcsr_usb3_dp_phymode");
+	if (res) {
+		phy->tcsr_usb3_dp_phymode = devm_ioremap_resource(dev, res);
+		if (IS_ERR(phy->tcsr_usb3_dp_phymode)) {
+			dev_err(dev, "err getting tcsr_usb3_dp_phymode addr\n");
+			return PTR_ERR(phy->tcsr_usb3_dp_phymode);
+		}
+	}
+
+	phy->emulation = of_property_read_bool(dev->of_node,
+						"qcom,emulation");
+	if (!phy->emulation) {
+		of_get_property(dev->of_node, "qcom,qmp-phy-init-seq", &size);
+		if (size) {
+			if (size % sizeof(*phy->qmp_phy_init_seq)) {
+				dev_err(dev, "invalid init_seq_len\n");
+				return -EINVAL;
+			}
+			phy->qmp_phy_init_seq = devm_kzalloc(dev,
+							size, GFP_KERNEL);
+			if (phy->qmp_phy_init_seq) {
+				phy->init_seq_len =
+					(size / sizeof(*phy->qmp_phy_init_seq));
+
+				of_property_read_u32_array(dev->of_node,
+					"qcom,qmp-phy-init-seq",
+					phy->qmp_phy_init_seq,
+					phy->init_seq_len);
+			} else {
+				dev_err(dev, "error allocating memory for phy_init_seq\n");
+				return -EINVAL;
+			}
+		} else {
+			dev_err(dev, "error need qmp-phy-init-seq\n");
+			return -EINVAL;
+		}
+	}
+
+	/* Set default core voltage values */
+	phy->core_voltage_levels[VOLTAGE_LEVEL_NONE] = 0;
+	phy->core_voltage_levels[VOLTAGE_LEVEL_MIN] = USB_SSPHY_1P2_VOL_MIN;
+	phy->core_voltage_levels[VOLTAGE_LEVEL_MAX] = USB_SSPHY_1P2_VOL_MAX;
+
+	if (of_get_property(dev->of_node, "qcom,core-voltage-level", &len) &&
+		len == sizeof(phy->core_voltage_levels)) {
+		ret = of_property_read_u32_array(dev->of_node,
+				"qcom,core-voltage-level",
+				(u32 *)phy->core_voltage_levels,
+				len / sizeof(u32));
+		if (ret) {
+			dev_err(dev, "err qcom,core-voltage-level property\n");
+			goto err;
+		}
+	}
+
+	if (of_get_property(dev->of_node, "qcom,vdd-voltage-level", &len) &&
+		len == sizeof(phy->vdd_levels)) {
+		ret = of_property_read_u32_array(dev->of_node,
+				"qcom,vdd-voltage-level",
+				(u32 *) phy->vdd_levels,
+				len / sizeof(u32));
+		if (ret) {
+			dev_err(dev, "err qcom,vdd-voltage-level property\n");
+			goto err;
+		}
+	} else {
+		ret = -EINVAL;
+		dev_err(dev, "error invalid inputs for vdd-voltage-level\n");
+		goto err;
+	}
+
+	phy->vdd = devm_regulator_get(dev, "vdd");
+	if (IS_ERR(phy->vdd)) {
+		dev_err(dev, "unable to get vdd supply\n");
+		ret = PTR_ERR(phy->vdd);
+		goto err;
+	}
+
+	phy->core_ldo = devm_regulator_get(dev, "core");
+	if (IS_ERR(phy->core_ldo)) {
+		dev_err(dev, "unable to get core ldo supply\n");
+		ret = PTR_ERR(phy->core_ldo);
+		goto err;
+	}
+
+	phy->fpc_redrive_ldo = devm_regulator_get_optional(dev, "fpc-redrive");
+	if (IS_ERR(phy->fpc_redrive_ldo)) {
+		phy->fpc_redrive_ldo = NULL;
+		dev_dbg(dev, "no FPC re-drive ldo regulator\n");
+	} else {
+		if (of_get_property(dev->of_node,
+				"qcom,redrive-voltage-level", &len) &&
+				len == sizeof(phy->redrive_voltage_levels)) {
+			ret = of_property_read_u32_array(dev->of_node,
+					"qcom,redrive-voltage-level",
+					(u32 *) phy->redrive_voltage_levels,
+					len / sizeof(u32));
+			if (ret) {
+				dev_err(dev,
+					"err qcom,redrive-voltage-level\n");
+				goto err;
+			}
+		} else {
+			ret = -EINVAL;
+			dev_err(dev, "err inputs for redrive-voltage-level\n");
+			goto err;
+		}
+
+		ret = of_property_read_u32(dev->of_node, "qcom,redrive-load",
+				&phy->redrive_load);
+		if (ret) {
+			dev_err(&pdev->dev, "unable to read redrive load\n");
+			goto err;
+		}
+
+		dev_dbg(dev, "Get FPC re-drive ldo regulator\n");
+	}
+
+	phy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
+	if (IS_ERR(phy->ref_clk_src))
+		phy->ref_clk_src = NULL;
+	phy->ref_clk = devm_clk_get(dev, "ref_clk");
+	if (IS_ERR(phy->ref_clk))
+		phy->ref_clk = NULL;
+
+	platform_set_drvdata(pdev, phy);
+
+	if (of_property_read_bool(dev->of_node, "qcom,vbus-valid-override"))
+		phy->phy.flags |= PHY_VBUS_VALID_OVERRIDE;
+
+	phy->phy.dev			= dev;
+	phy->phy.init			= msm_ssphy_qmp_init;
+	phy->phy.set_suspend		= msm_ssphy_qmp_set_suspend;
+	phy->phy.notify_connect		= msm_ssphy_qmp_notify_connect;
+	phy->phy.notify_disconnect	= msm_ssphy_qmp_notify_disconnect;
+	phy->phy.reset			= msm_ssphy_qmp_reset;
+	phy->phy.type			= USB_PHY_TYPE_USB3;
+
+	ret = usb_add_phy_dev(&phy->phy);
+
+err:
+	return ret;
+}
+
+static int msm_ssphy_qmp_remove(struct platform_device *pdev)
+{
+	struct msm_ssphy_qmp *phy = platform_get_drvdata(pdev);
+
+	if (!phy)
+		return 0;
+
+	usb_remove_phy(&phy->phy);
+	if (phy->ref_clk)
+		clk_disable_unprepare(phy->ref_clk);
+	if (phy->ref_clk_src)
+		clk_disable_unprepare(phy->ref_clk_src);
+	msm_ssusb_qmp_ldo_enable(phy, 0);
+	clk_disable_unprepare(phy->aux_clk);
+	clk_disable_unprepare(phy->cfg_ahb_clk);
+	clk_disable_unprepare(phy->pipe_clk);
+	kfree(phy);
+	return 0;
+}
+
+static struct platform_driver msm_ssphy_qmp_driver = {
+	.probe		= msm_ssphy_qmp_probe,
+	.remove		= msm_ssphy_qmp_remove,
+	.driver = {
+		.name	= "msm-usb-ssphy-qmp",
+		.of_match_table = of_match_ptr(msm_usb_id_table),
+	},
+};
+
+module_platform_driver(msm_ssphy_qmp_driver);
+
+MODULE_DESCRIPTION("MSM USB SS QMP PHY driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/drivers/video/adf./Kconfig linux-4.4.115-fbx/drivers/video/adf/Kconfig
--- linux-4.4.115-fbx/drivers/video/adf./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/video/adf/Kconfig	2019-01-22 16:16:27.431281977 +0100
@@ -0,0 +1,14 @@
+menuconfig ADF
+	depends on SYNC
+	depends on DMA_SHARED_BUFFER
+	tristate "Atomic Display Framework"
+
+menuconfig ADF_FBDEV
+	depends on ADF
+	depends on FB
+	tristate "Helper for implementing the fbdev API in ADF drivers"
+
+menuconfig ADF_MEMBLOCK
+	depends on ADF
+	depends on HAVE_MEMBLOCK
+	bool "Helper for using memblocks as buffers in ADF drivers"
diff -Nruw linux-4.4.115-fbx/drivers/video/adf./Makefile linux-4.4.115-fbx/drivers/video/adf/Makefile
--- linux-4.4.115-fbx/drivers/video/adf./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/video/adf/Makefile	2019-01-22 16:16:27.431281977 +0100
@@ -0,0 +1,17 @@
+ccflags-y := -Idrivers/staging/android
+
+CFLAGS_adf.o := -I$(src)
+
+obj-$(CONFIG_ADF) += adf_core.o
+
+adf_core-y := adf.o \
+	adf_client.o \
+	adf_fops.o \
+	adf_format.o \
+	adf_sysfs.o
+
+adf_core-$(CONFIG_COMPAT) += adf_fops32.o
+
+obj-$(CONFIG_ADF_FBDEV) += adf_fbdev.o
+
+obj-$(CONFIG_ADF_MEMBLOCK) += adf_memblock.o
diff -Nruw linux-4.4.115-fbx/drivers/video/fbdev/msm./Kconfig linux-4.4.115-fbx/drivers/video/fbdev/msm/Kconfig
--- linux-4.4.115-fbx/drivers/video/fbdev/msm./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/video/fbdev/msm/Kconfig	2019-01-22 16:16:27.479282412 +0100
@@ -0,0 +1,125 @@
+source "drivers/video/fbdev/msm/msm_dba/Kconfig"
+
+if FB_MSM
+
+config FB_MSM_MDSS_COMMON
+	bool
+
+choice
+	prompt "MDP HW version"
+	default FB_MSM_MDP
+
+config FB_MSM_MDP
+	select FB_MSM_MDP_HW
+	bool "MDP HW"
+	---help---
+	  Support for MSM MDP HW revision 2.2
+	  Say Y here if this is msm7201 variant platform.
+
+config FB_MSM_MDSS
+	bool "MDSS HW"
+	select SYNC
+	select SW_SYNC
+	select FB_MSM_MDSS_COMMON
+	---help---
+	The Mobile Display Sub System (MDSS) driver supports devices which
+	contain MDSS hardware block.
+
+	The MDSS driver implements frame buffer interface to provide access to
+	the display hardware and provide a way for users to display graphics
+	on connected display panels.
+
+config FB_MSM_MDP_NONE
+	bool "MDP HW None"
+	---help---
+	  Say Y here if this is mdm platform.
+
+endchoice
+
+config FB_MSM_QPIC
+	bool
+	select FB_MSM_MDSS_COMMON
+
+config FB_MSM_QPIC_ILI_QVGA_PANEL
+	bool "Qpic MIPI ILI QVGA Panel"
+	select FB_MSM_QPIC
+	---help---
+	  Support for MIPI ILI QVGA (240x320) panel
+	  ILI TECHNOLOGY 9341
+	  with on-chip full display RAM
+	  use parallel interface
+
+config FB_MSM_QPIC_PANEL_DETECT
+	bool "Qpic Panel Detect"
+	select FB_MSM_QPIC_ILI_QVGA_PANEL
+	---help---
+	  Support for Qpic panel auto detect
+
+config FB_MSM_MDSS_WRITEBACK
+	bool "MDSS Writeback Panel"
+	---help---
+	The MDSS Writeback Panel provides support for routing the output of
+	MDSS frame buffer driver and MDP processing to memory.
+
+config FB_MSM_MDSS_HDMI_PANEL
+	depends on FB_MSM_MDSS
+	select MSM_EXT_DISPLAY
+	bool "MDSS HDMI Tx Panel"
+	default n
+	---help---
+	The MDSS HDMI Panel provides support for transmitting TMDS signals of
+	MDSS frame buffer data to connected hdmi compliant TVs, monitors etc.
+
+config FB_MSM_MDSS_HDMI_MHL_SII8334
+	depends on FB_MSM_MDSS_HDMI_PANEL
+	bool 'MHL SII8334 support '
+	default n
+	---help---
+	  Support the HDMI to MHL conversion.
+	  MHL (Mobile High-Definition Link) technology
+	  uses USB connector to output HDMI content
+
+config FB_MSM_MDSS_MHL3
+	depends on FB_MSM_MDSS_HDMI_PANEL
+	bool "MHL3 SII8620 Support"
+	default n
+	---help---
+	  Support the SiliconImage 8620 MHL Tx transmitter that uses
+	  USB connector to output HDMI content. Transmitter is an
+	  i2c device acting as an HDMI to MHL bridge. Chip supports
+	  MHL 3.0 standard.
+
+config FB_MSM_MDSS_DSI_CTRL_STATUS
+	tristate "DSI controller status check feature"
+	---help---
+	  Check DSI controller status periodically (default period is 5
+	  seconds) by sending Bus-Turn-Around (BTA) command. If DSI controller
+	  fails to acknowledge the BTA command, it sends PANEL_ALIVE=0 status
+	  to HAL layer to reset the controller.
+
+config FB_MSM_MDSS_DP_PANEL
+	depends on FB_MSM_MDSS
+	select MSM_EXT_DISPLAY
+	bool "MDSS DP Panel"
+	---help---
+	The MDSS DP Panel provides support for DP host controller driver
+	which runs in Video mode only and is responsible for transmitting
+	frame buffer from host SOC to DP display panel.
+
+config FB_MSM_MDSS_MDP3
+	depends on FB_MSM_MDSS
+	bool "MDP3 display controller"
+	---help---
+	The MDP3 provides support for an older version display controller
+	included in latest display sub-system, known as MDSS.
+
+config FB_MSM_MDSS_XLOG_DEBUG
+	depends on FB_MSM_MDSS
+	bool "Enable MDSS debugging"
+	---help---
+	The MDSS debugging provides support to enable display debugging
+	features to: Dump MDSS registers during driver errors, panic
+	driver during fatal errors and enable some display-driver logging
+	into an internal buffer (this avoids logging overhead).
+
+endif
diff -Nruw linux-4.4.115-fbx/drivers/video/fbdev/msm./msm_dba/Kconfig linux-4.4.115-fbx/drivers/video/fbdev/msm/msm_dba/Kconfig
--- linux-4.4.115-fbx/drivers/video/fbdev/msm./msm_dba/Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/video/fbdev/msm/msm_dba/Kconfig	2019-01-22 16:16:27.535282919 +0100
@@ -0,0 +1,24 @@
+#
+# MSM DBA
+#
+
+config MSM_DBA
+	bool "MSM Display Bridge Abstraction support"
+	depends on ARM || ARM64
+	---help---
+	  Support for MSM display bridge abstraction interface. MSM display
+	  drivers can use the same interface to interact with different third
+	  party bridge chips. Drivers implemented for third party bridge chips
+	  should support this interface to allow display driver to control the
+	  bridge chip. The MSM DBA driver maintains a list of devices supported
+	  on the platform and allow clients to register and access these
+	  devices.
+
+config MSM_DBA_ADV7533
+	bool "ADV7533 driver support through MSM DBA interface"
+	depends on MSM_DBA
+	default n
+	---help---
+	  Support for ADV7533 DSI to HDMI display bridge driver. The driver
+	  controls the ADV7533 HW through the I2C interface and configures
+	  the DSI input and HDMI output video format.
diff -Nruw linux-4.4.115-fbx/drivers/video/fbdev/msm./msm_dba/Makefile linux-4.4.115-fbx/drivers/video/fbdev/msm/msm_dba/Makefile
--- linux-4.4.115-fbx/drivers/video/fbdev/msm./msm_dba/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/video/fbdev/msm/msm_dba/Makefile	2019-01-22 16:16:27.535282919 +0100
@@ -0,0 +1,5 @@
+CFLAGS_msm_dba.o = -I$(src)
+obj-$(CONFIG_MSM_DBA) += msm_dba.o msm_dba_init.o msm_dba_helpers.o msm_dba_debug.o
+obj-$(CONFIG_MSM_DBA_ADV7533) += adv7533.o
+clean:
+	rm *.o
diff -Nruw linux-4.4.115-fbx/drivers/video/msm./ba/Kconfig linux-4.4.115-fbx/drivers/video/msm/ba/Kconfig
--- linux-4.4.115-fbx/drivers/video/msm./ba/Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/video/msm/ba/Kconfig	2019-01-22 16:16:27.591283426 +0100
@@ -0,0 +1,12 @@
+#
+# MSM BA V4L2
+#
+
+config MSM_BA_V4L2
+   tristate "Qualcomm technologies Inc MSM V4L2 based BA driver"
+   depends on VIDEO_V4L2
+   select FB_CFB_FILLRECT
+   select FB_CFB_COPYAREA
+   select FB_CFB_IMAGEBLIT
+	---help---
+	  Enables support for the MSM V4L2 bridge abstraction
diff -Nruw linux-4.4.115-fbx/drivers/video/msm./ba/Makefile linux-4.4.115-fbx/drivers/video/msm/ba/Makefile
--- linux-4.4.115-fbx/drivers/video/msm./ba/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/drivers/video/msm/ba/Makefile	2019-01-22 16:16:27.591283426 +0100
@@ -0,0 +1,5 @@
+obj-$(CONFIG_MSM_BA_V4L2) += msm_v4l2_ba.o \
+            msm_ba_common.o \
+            msm_ba.o \
+            msm_ba_debug.o
+
diff -Nruw linux-4.4.115-fbx/fs/crypto./Kconfig linux-4.4.115-fbx/fs/crypto/Kconfig
--- linux-4.4.115-fbx/fs/crypto./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/fs/crypto/Kconfig	2019-01-22 16:16:27.779285128 +0100
@@ -0,0 +1,18 @@
+config FS_ENCRYPTION
+	tristate "FS Encryption (Per-file encryption)"
+	depends on BLOCK
+	select CRYPTO
+	select CRYPTO_AES
+	select CRYPTO_CBC
+	select CRYPTO_ECB
+	select CRYPTO_XTS
+	select CRYPTO_CTS
+	select CRYPTO_CTR
+	select CRYPTO_SHA256
+	select KEYS
+	select ENCRYPTED_KEYS
+	help
+	  Enable encryption of files and directories.  This
+	  feature is similar to ecryptfs, but it is more memory
+	  efficient since it avoids caching the encrypted and
+	  decrypted pages in the page cache.
diff -Nruw linux-4.4.115-fbx/fs/crypto./Makefile linux-4.4.115-fbx/fs/crypto/Makefile
--- linux-4.4.115-fbx/fs/crypto./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/fs/crypto/Makefile	2019-01-22 16:16:27.779285128 +0100
@@ -0,0 +1,4 @@
+obj-$(CONFIG_FS_ENCRYPTION)	+= fscrypto.o
+
+fscrypto-y := crypto.o fname.o hooks.o keyinfo.o policy.o
+fscrypto-$(CONFIG_BLOCK) += bio.o
diff -Nruw linux-4.4.115-fbx/fs/exfat./bitmap.c linux-4.4.115-fbx/fs/exfat/bitmap.c
--- linux-4.4.115-fbx/fs/exfat./bitmap.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/fs/exfat/bitmap.c	2019-01-25 20:32:47.247752405 +0100
@@ -0,0 +1,606 @@
+/*
+ * bitmap.c for exfat
+ * Created by <nschichan@freebox.fr> on Thu Aug  8 19:21:05 2013
+ */
+
+#include <linux/buffer_head.h>
+#include <linux/fs.h>
+
+#include "exfat.h"
+#include "exfat_fs.h"
+
+
+static inline sector_t exfat_bitmap_sector(struct exfat_sb_info *sbi,
+					   u32 cluster)
+{
+	return sbi->first_bitmap_sector + ((cluster / 8) >> sbi->sectorbits);
+}
+
+static inline u32 exfat_bitmap_off(struct exfat_sb_info *sbi,
+				   u32 cluster)
+{
+	return (cluster / 8) & sbi->sectormask;
+}
+
+static inline u32 exfat_bitmap_shift(u32 cluster)
+{
+	return cluster & 7;
+}
+
+static int __find_get_free_cluster(struct inode *inode, u32 *out_cluster)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+
+	while (1) {
+		sector_t sect = exfat_bitmap_sector(sbi,
+						    sbi->cur_bitmap_cluster);
+		u32 off = exfat_bitmap_off(sbi, sbi->cur_bitmap_cluster);
+		u32 shift = exfat_bitmap_shift(sbi->cur_bitmap_cluster);
+
+		/* disk is full */
+		if (!sbi->free_clusters)
+			break;
+
+		if (!sbi->cur_bitmap_bh ||
+		    sect != sbi->cur_bitmap_sector) {
+			if (sbi->cur_bitmap_bh)
+				brelse(sbi->cur_bitmap_bh);
+			sbi->cur_bitmap_bh = sb_bread(inode->i_sb, sect);
+			sbi->cur_bitmap_sector = sect;
+			if (!sbi->cur_bitmap_bh) {
+				exfat_msg(inode->i_sb, KERN_ERR,
+					  "unable to read bitmap sector "
+					  "at %llu", (unsigned long long)sect);
+				return -EIO;
+			}
+		}
+
+		if (!(sbi->cur_bitmap_bh->b_data[off] & (1 << shift))) {
+			sbi->cur_bitmap_bh->b_data[off] |= (1 << shift);
+			*out_cluster = sbi->cur_bitmap_cluster;
+			goto found;
+		}
+
+		++sbi->cur_bitmap_cluster;
+		if (sbi->cur_bitmap_cluster == sbi->cluster_count)
+			sbi->cur_bitmap_cluster = 0;
+	}
+	return -ENOSPC;
+
+found:
+	sbi->prev_free_cluster = *out_cluster;
+	--sbi->free_clusters;
+	mark_buffer_dirty(sbi->cur_bitmap_bh);
+	return 0;
+}
+
+static int __put_cluster(struct inode *inode, u32 cluster)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	sector_t sect = exfat_bitmap_sector(sbi, cluster);
+	u32 off = exfat_bitmap_off(sbi, cluster);
+	u32 shift = exfat_bitmap_shift(cluster);
+
+
+	if (!sbi->cur_bitmap_bh || sect != sbi->cur_bitmap_sector) {
+		if (sbi->cur_bitmap_bh)
+			brelse(sbi->cur_bitmap_bh);
+		sbi->cur_bitmap_bh = sb_bread(inode->i_sb, sect);
+		if (!sbi->cur_bitmap_bh) {
+			exfat_msg(inode->i_sb, KERN_ERR,
+				  "unable to read bitmap sector at %llu",
+				  (unsigned long long)sect);
+			return -EIO;
+		}
+		sbi->cur_bitmap_sector = sect;
+		sbi->cur_bitmap_cluster = cluster;
+	}
+	if ((sbi->cur_bitmap_bh->b_data[off] & (1 << shift)) == 0) {
+		exfat_fs_error(inode->i_sb, "put_cluster: cluster %u "
+			  "already free.", cluster);
+		return -EIO;
+	}
+
+	++sbi->free_clusters;
+	sbi->cur_bitmap_bh->b_data[off] &= ~(1 << shift);
+	sbi->prev_free_cluster = cluster;
+	mark_buffer_dirty(sbi->cur_bitmap_bh);
+	/* sync_dirty_buffer(sbi->cur_bitmap_bh); */
+	return 0;
+}
+
+/*
+ * setup search to start at given cluster.
+ */
+static void __exfat_reset_bitmap(struct exfat_sb_info *sbi, u32 cluster)
+{
+	sector_t sect;
+
+	if (cluster >= sbi->cluster_count)
+		cluster = 0;
+
+	sect = exfat_bitmap_sector(sbi, cluster);
+	if (sbi->cur_bitmap_sector != sect) {
+		sbi->cur_bitmap_sector = sect;
+		if (sbi->cur_bitmap_bh) {
+			brelse(sbi->cur_bitmap_bh);
+			sbi->cur_bitmap_bh = NULL;
+		}
+	}
+	sbi->cur_bitmap_cluster = cluster;
+}
+
+static bool all_contiguous(u32 *clusters, u32 nr)
+{
+	u32 i;
+
+	for (i = 0; i < nr - 1; ++i) {
+		if (clusters[i] != clusters[i + 1] - 1)
+			return false;
+	}
+	return true;
+}
+
+/*
+ * hint must be the immediately after the last allocated cluster of
+ * the inode.
+ */
+int exfat_alloc_clusters(struct inode *inode, u32 hint, u32 *clusters, u32 nr)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	u32 i;
+
+	mutex_lock(&sbi->bitmap_mutex);
+	__exfat_reset_bitmap(sbi, hint - 2);
+	for (i = 0; i < nr; ++i) {
+		u32 new;
+		int error;
+
+		error = __find_get_free_cluster(inode, &new);
+		if (error) {
+			mutex_unlock(&sbi->bitmap_mutex);
+			return error;
+		}
+
+		clusters[i] = new + 2;
+	}
+	mutex_unlock(&sbi->bitmap_mutex);
+
+	/*
+	 * all clusters found: now see if we need to update/create a
+	 * fat chain.
+	 */
+	if (info->first_cluster == 0) {
+		info->first_cluster = clusters[0];
+		if (all_contiguous(clusters, nr)) {
+			/*
+			 * first cluster alloc on inode and all
+			 * clusters are contiguous.
+			 */
+			info->flags |= EXFAT_I_FAT_INVALID;
+		} else {
+			/*
+			 * first alloc and already fragmented.
+			 */
+			return exfat_write_fat(inode, 0, clusters, nr);
+		}
+	} else {
+		int error;
+		if ((info->flags & EXFAT_I_FAT_INVALID) &&
+		    (clusters[0] != hint || !all_contiguous(clusters, nr))) {
+			/*
+			 * must now use fat chain instead of bitmap.
+			 */
+			info->flags &= ~(EXFAT_I_FAT_INVALID);
+
+			/*
+			 * write the contiguous chain that would
+			 * previously be accessed without the FAT
+			 * chain.
+			 */
+			error = exfat_write_fat_contiguous(inode,
+						  info->first_cluster,
+						  hint - info->first_cluster);
+			if (error)
+				return error;
+		}
+
+		if ((info->flags & EXFAT_I_FAT_INVALID) == 0) {
+			/*
+			 * link the allocated clusters after hint.
+			 */
+			error = exfat_write_fat(inode, hint - 1, clusters, nr);
+			if (error)
+				return  error;
+		}
+
+	}
+
+	/*
+	 * update i_blocks.
+	 */
+	inode->i_blocks += nr << (sbi->clusterbits - 9);
+	info->allocated_clusters += nr;
+
+	/*
+	 * caller must call mark_inode_dirty so that inode
+	 * first_cluster and inode flags get written to the disk.
+	 * caller must update inode size (directory and regular file
+	 * have different rules).
+	 */
+	return 0;
+}
+
+
+static int exfat_free_clusters_contiguous(struct inode *inode,
+					  u32 start, u32 nr)
+{
+	u32 cluster;
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	int error = 0;
+
+	mutex_lock(&sbi->bitmap_mutex);
+	for (cluster = start; cluster < start + nr; ++cluster) {
+		error = __put_cluster(inode, cluster - 2);
+		if (error)
+			break;
+	}
+	mutex_unlock(&sbi->bitmap_mutex);
+	return error;
+}
+
+static int exfat_free_clusters_fat(struct inode *inode,
+				   u32 fcluster_start, u32 nr)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	u32 fcluster;
+	int error = 0;
+
+	mutex_lock(&sbi->bitmap_mutex);
+	for (fcluster = fcluster_start; fcluster < fcluster_start + nr;
+	     ++fcluster) {
+		u32 dcluster;
+		int error;
+
+		error = exfat_get_fat_cluster(inode, fcluster, &dcluster);
+		if (error)
+			break;
+
+		error = __put_cluster(inode, dcluster - 2);
+		if (error)
+			break;
+	}
+	mutex_unlock(&sbi->bitmap_mutex);
+
+	/*
+	 * per-inode file cluster to disk cluster translation cache
+	 * mostly now holds entries to the zone we just truncated, so
+	 * they must not be kept (this could lead to FS corruption).
+	 */
+	exfat_inode_cache_drop(inode);
+
+	return error;
+}
+
+int exfat_free_clusters_inode(struct inode *inode, u32 fcluster_start)
+{
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	int error;
+	u32 nr_to_free = info->allocated_clusters - fcluster_start;
+
+	if (info->first_cluster == 0 || nr_to_free == 0)
+		/*
+		 * no clusters allocated, or nothing to do
+		 */
+		return 0;
+
+	if (info->flags & EXFAT_I_FAT_INVALID)
+		error = exfat_free_clusters_contiguous(inode,
+				       info->first_cluster + fcluster_start,
+				       nr_to_free);
+	else
+		error = exfat_free_clusters_fat(inode, fcluster_start,
+					nr_to_free);
+	if (error)
+		return error;
+
+	info->allocated_clusters -= nr_to_free;
+	inode->i_blocks = EXFAT_I(inode)->allocated_clusters <<
+		(EXFAT_SB(inode->i_sb)->clusterbits - 9);
+
+	/*
+	 * update inode info, caller must call mark_inode_dirty and
+	 * update inode->i_size.
+	 */
+	if (fcluster_start == 0) {
+		info->first_cluster = 0;
+		info->flags &= ~(EXFAT_I_FAT_INVALID);
+	}
+	return 0;
+}
+
+static u32 count_clusters_bh(struct buffer_head *bh, u32 count)
+{
+	u8 *ptr = bh->b_data;
+	u32 ret = 0;
+	u8 val;
+
+	while (count >= sizeof (u64) * 8) {
+		u64 val = *(u64*)ptr;
+
+		ret += hweight64(~val);
+		count -= sizeof (u64) * 8;
+		ptr += sizeof (u64);
+	}
+	if (count >= sizeof (u32) * 8) {
+		u32 val = *(u32*)ptr;
+
+		ret += hweight32(~val);
+		count -= sizeof (u32) * 8;
+		ptr += sizeof (u32);
+	}
+	if (count >= sizeof (u16) * 8) {
+		u16 val = *(u16*)ptr;
+
+		ret += hweight16(~val);
+		count -= sizeof (u16) * 8;
+		ptr += sizeof (u16);
+	}
+	if (count >= sizeof (u8) * 8) {
+		u8 val = *ptr;
+
+		ret += hweight8(~val);
+		count -= sizeof (u8) * 8;
+		ptr += sizeof (u8);
+	}
+
+	if (count) {
+		val = *ptr;
+		while (count) {
+			ret += (~val & 1);
+			val >>= 1;
+			--count;
+		}
+	}
+	return ret;
+}
+
+/*
+ * only called during mount, so taking sbi->bitmap_mutex should not be
+ * needed.
+ */
+static int exfat_get_free_cluster_count(struct super_block *sb, u32 *out_count)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	u32 clusters_per_sector = 8 * sbi->sectorsize;
+	u32 cluster;
+
+	*out_count = 0;
+	for (cluster = 0; cluster < sbi->cluster_count;
+	     cluster += clusters_per_sector) {
+		sector_t sect = exfat_bitmap_sector(sbi, cluster);
+		struct buffer_head *bh;
+		u32 count = clusters_per_sector;
+
+		if (cluster + clusters_per_sector > sbi->cluster_count)
+			count = sbi->cluster_count - cluster;
+
+		bh = sb_bread(sb, sect);
+		if (!bh) {
+			exfat_msg(sb, KERN_ERR,
+				  "unable to read bitmap sector at %llu",
+				  (unsigned long long)sect);
+			return -EIO;
+		}
+		*out_count += count_clusters_bh(bh, count);
+		brelse(bh);
+	}
+	return 0;
+}
+
+/*
+ * setup a bitmap context, preload a bh from the requested starting
+ * cluster.
+ */
+int exfat_init_bitmap_context(struct super_block *sb,
+			      struct exfat_bitmap_ctx *ctx,
+			      u32 cluster)
+{
+	memset(ctx, 0, sizeof (*ctx));
+	ctx->sb = sb;
+
+	cluster -= 2;
+	if (cluster >= EXFAT_SB(sb)->cluster_count)
+		return -ENOSPC;
+
+	ctx->cur_sector = exfat_bitmap_sector(EXFAT_SB(sb), cluster);
+	ctx->bh = sb_bread(ctx->sb, ctx->cur_sector);
+
+	if (!ctx->bh) {
+		exfat_msg(sb, KERN_ERR, "unable to read bitmap sector at %llu",
+			  (unsigned long long)ctx->cur_sector);
+		return -EIO;
+	}
+	return 0;
+}
+
+/*
+ * release bh in an already setup bitmap context.
+ */
+void exfat_exit_bitmap_context(struct exfat_bitmap_ctx *ctx)
+{
+	if (ctx->bh)
+		brelse(ctx->bh);
+}
+
+/*
+ * test a specific cluster usage in the bitmap. reuse the bh in the
+ * exfat_bitmap_ctx or read a new one if starting cluster is outside
+ * the current one.
+ */
+static int exfat_test_bitmap_cluster(struct exfat_bitmap_ctx *ctx,
+				     uint32_t cluster, bool *cluster_in_use)
+{
+	sector_t sect;
+	uint32_t off = exfat_bitmap_off(EXFAT_SB(ctx->sb), cluster);
+	int shift = exfat_bitmap_shift(cluster);
+
+	sect = exfat_bitmap_sector(EXFAT_SB(ctx->sb), cluster);
+	if (sect != ctx->cur_sector) {
+		ctx->cur_sector = sect;
+		ctx->bh = sb_bread(ctx->sb, ctx->cur_sector);
+		if (!ctx->bh) {
+			exfat_msg(ctx->sb, KERN_ERR,
+				  "unable to read bitmap sector at %llu",
+				  (unsigned long long)sect);
+			return -EIO;
+		}
+	}
+
+	*cluster_in_use = !!(ctx->bh->b_data[off] & (1 << shift));
+	return 0;
+}
+
+/*
+ * update first_in_use and nr_in_use with the first zone of used
+ * clusters starting from start_cluster.
+ */
+int exfat_test_bitmap(struct exfat_bitmap_ctx *ctx, uint32_t start_cluster,
+		      uint32_t *first_in_use, uint32_t *nr_in_use)
+{
+	bool in_use = false;
+	int error = 0;
+	struct exfat_sb_info *sbi = EXFAT_SB(ctx->sb);
+
+	start_cluster -= 2;
+
+	/*
+	 * scan bitmap until we find a cluster that is in use.
+	 */
+	while (1) {
+		if (start_cluster == sbi->cluster_count) {
+			/*
+			 * readched end of disk: no more in use
+			 * cluster found.
+			 */
+			*first_in_use = sbi->cluster_count;
+			*nr_in_use = 0;
+			return 0;
+		}
+		error = exfat_test_bitmap_cluster(ctx, start_cluster, &in_use);
+		if (error)
+			return error;
+		if (in_use)
+			break;
+		++start_cluster;
+	}
+
+
+	/*
+	 * update first_in_use, and scan until a free cluster is
+	 * found.
+	 */
+	*first_in_use = start_cluster + 2;
+	*nr_in_use = 0;
+	while (1) {
+		error = exfat_test_bitmap_cluster(ctx, start_cluster, &in_use);
+		if (error)
+			return error;
+		if (!in_use)
+			break;
+		++(*nr_in_use);
+		++start_cluster;
+	}
+	return 0;
+}
+
+int exfat_init_bitmap(struct inode *root)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(root->i_sb);
+	struct exfat_bitmap_entry *be;
+	struct exfat_dir_ctx dctx;
+	u32 first_bitmap_cluster;
+	u32 last_bitmap_cluster;
+
+	int error;
+
+	mutex_init(&sbi->bitmap_mutex);
+
+	error = exfat_init_dir_ctx(root, &dctx, 0);
+	if (error)
+		return error;
+
+try_bitmap:
+	error = -ENOENT;
+	be = __exfat_dentry_next(&dctx, E_EXFAT_BITMAP, 0xff, true, NULL);
+	if (!be) {
+		exfat_msg(root->i_sb, KERN_ERR, "root directory does not "
+			  "have a bitmap entry.");
+		goto fail;
+	}
+
+	if (exfat_bitmap_nr(be->flags) != 0)
+		/*
+		 * not expected to find a second bitmap entry here
+		 * since we checked during superblock fill that we
+		 * were not on a texFAT volume ...
+		 */
+		goto try_bitmap;
+
+
+	error = -EINVAL;
+	if (__le64_to_cpu(be->length) * 8 < sbi->cluster_count) {
+		exfat_msg(root->i_sb, KERN_INFO, "bitmap does not cover "
+			  "the whole cluster heap.");
+		goto fail;
+	}
+
+	first_bitmap_cluster = __le32_to_cpu(be->cluster_addr);
+	last_bitmap_cluster = first_bitmap_cluster +
+		(__le32_to_cpu(be->length) >> sbi->clusterbits);
+
+	/*
+	 * check that bitmap start and end clusters are inside the
+	 * disk.
+	 */
+	error = -ERANGE;
+	if (first_bitmap_cluster < 2 &&
+	    first_bitmap_cluster >= sbi->cluster_count) {
+		exfat_msg(root->i_sb, KERN_ERR, "bitmap start cluster is "
+			  "outside disk limits.");
+		goto fail;
+	}
+	if (last_bitmap_cluster < 2 &&
+	    last_bitmap_cluster >= sbi->cluster_count) {
+		exfat_msg(root->i_sb, KERN_ERR, "bitmap last cluster is "
+			  "outside disk limits.");
+		goto fail;
+	}
+
+	sbi->bitmap_length = __le32_to_cpu(be->length);
+	sbi->first_bitmap_sector = exfat_cluster_sector(sbi,
+					__le32_to_cpu(be->cluster_addr));
+	sbi->last_bitmap_sector = sbi->first_bitmap_sector +
+		DIV_ROUND_UP(sbi->bitmap_length, sbi->sectorsize);
+
+	error = exfat_get_free_cluster_count(root->i_sb, &sbi->free_clusters);
+	if (error)
+		goto fail;
+
+	sbi->prev_free_cluster = 0;
+
+	exfat_cleanup_dir_ctx(&dctx);
+	return 0;
+fail:
+	exfat_cleanup_dir_ctx(&dctx);
+	return error;
+}
+
+void exfat_exit_bitmap(struct super_block *sb)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+	if (sbi->cur_bitmap_bh)
+		brelse(sbi->cur_bitmap_bh);
+}
diff -Nruw linux-4.4.115-fbx/fs/exfat./dir.c linux-4.4.115-fbx/fs/exfat/dir.c
--- linux-4.4.115-fbx/fs/exfat./dir.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/fs/exfat/dir.c	2019-01-25 20:32:47.247752405 +0100
@@ -0,0 +1,400 @@
+/*
+ * dir.c for exfat
+ * Created by <nschichan@freebox.fr> on Tue Aug 20 11:42:46 2013
+ */
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/slab.h>
+#include <linux/nls.h>
+
+#include "exfat.h"
+#include "exfat_fs.h"
+
+/*
+ * setup an exfat_dir_ctx structure so that __exfat_dentry_next can
+ * work with it.
+ */
+int exfat_init_dir_ctx(struct inode *inode, struct exfat_dir_ctx *ctx,
+		       off_t start)
+{
+	u32 cluster = EXFAT_I(inode)->first_cluster;
+
+	memset(ctx, 0, sizeof (*ctx));
+
+	if (cluster == 0) {
+		ctx->empty = true;
+		ctx->sb = inode->i_sb;
+		return 0;
+	}
+
+	if (cluster < EXFAT_CLUSTER_FIRSTVALID ||
+	    cluster > EXFAT_CLUSTER_LASTVALID) {
+		exfat_msg(inode->i_sb, KERN_ERR, "exfat_init_dir_ctx: invalid "
+			  "cluster %u", cluster);
+		return -EINVAL;
+	}
+
+	start &= ~(0x20 - 1);
+	if (start == 0)
+		ctx->off = -1;
+	else
+		ctx->off = start - 0x20;
+
+	ctx->sb = inode->i_sb;
+	ctx->inode = inode;
+
+	return 0;
+}
+
+void exfat_cleanup_dir_ctx(struct exfat_dir_ctx *dctx)
+{
+	if (dctx->bh)
+		brelse(dctx->bh);
+}
+
+/*
+ * calculate the checksum for the current direntry. fields containing
+ * the checksum for the first entry is not part of the checksum
+ * calculation.
+ */
+u16 exfat_direntry_checksum(void *data, u16 checksum, bool first)
+{
+	u8 *ptr = data;
+	int i;
+
+	for (i = 0; i < 0x20; ++i) {
+		if (first && (i == 2 || i == 3))
+			continue ;
+		checksum = ((checksum << 15) | (checksum >> 1)) + (u16)ptr[i];
+	}
+	return checksum;
+}
+
+u32 exfat_dctx_fpos(struct exfat_dir_ctx *dctx)
+{
+	return dctx->off;
+}
+
+u64 exfat_dctx_dpos(struct exfat_dir_ctx *dctx)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(dctx->sb);
+
+	return (dctx->sector << sbi->sectorbits) +
+		(dctx->off & sbi->sectormask);
+}
+
+static int exfat_get_dctx_disk_cluster(struct exfat_dir_ctx *dctx,
+				       u32 file_cluster, u32 *disk_cluster)
+{
+	struct exfat_inode_info *info = EXFAT_I(dctx->inode);
+
+	if (info->flags & EXFAT_I_FAT_INVALID) {
+		*disk_cluster = info->first_cluster + file_cluster;
+		return 0;
+	} else {
+		return exfat_get_fat_cluster(dctx->inode, file_cluster,
+					     disk_cluster);
+	}
+}
+
+/*
+ * get the next typed dentry in the exfat_dir_ctx structure. can_skip
+ * indicates whether the entry must be immediately there in the entry
+ * stream. *end indicates whether end of directory entry stream is
+ * reached or not.
+ *
+ * only one buffer_head is kept at a time. subsequent calls to
+ * __exfat_dentry_next can invalidate pointers from previous calls due
+ * to that.
+ */
+void *__exfat_dentry_next(struct exfat_dir_ctx *dctx, int type, int mask,
+			  bool can_skip, bool *end)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(dctx->sb);
+
+	if (dctx->empty) {
+		if (end)
+			*end = true;
+		return NULL;
+	}
+
+	if (end)
+		*end = false;
+
+	if (dctx->off == -1)
+		dctx->off = 0;
+	else
+		dctx->off += 0x20;
+
+	for (;;) {
+		sector_t wanted_sector;
+		u32 file_cluster = dctx->off >> sbi->clusterbits;
+		u32 disk_cluster;
+		int error;
+		int sector_offset;
+		sector_t sector_in_cluster;
+
+		if (dctx->off >= dctx->inode->i_size) {
+			*end = true;
+			return NULL;
+		}
+
+
+		error = exfat_get_dctx_disk_cluster(dctx, file_cluster,
+						    &disk_cluster);
+		if (error)
+			return NULL;
+
+		sector_in_cluster = (dctx->off >> sbi->sectorbits) %
+			sbi->sectors_per_cluster;
+
+		wanted_sector = exfat_cluster_sector(sbi, disk_cluster) +
+			sector_in_cluster;
+		if (wanted_sector != dctx->sector || !dctx->bh) {
+			/*
+			 * need to fetch a new sector from the current
+			 * cluster.
+			 */
+			dctx->sector = wanted_sector;
+			if (dctx->bh)
+				brelse(dctx->bh);
+			dctx->bh = sb_bread(dctx->sb, dctx->sector);
+			if (!dctx->bh)
+				return NULL;
+		}
+
+		sector_offset = dctx->off & sbi->sectormask;
+		if ((dctx->bh->b_data[sector_offset] & mask) == (type & mask))
+			/*
+			 * return pointer to entry if type matches the
+			 * one given.
+			 */
+			return dctx->bh->b_data + sector_offset;
+
+		if (dctx->bh->b_data[sector_offset] == 0 && end)
+			/*
+			 * set end if no more entries in this directory.
+			 */
+			*end = true;
+
+		if (dctx->bh->b_data[sector_offset] == 0 || !can_skip)
+			/*
+			 * handle can_skip / end of directory.
+			 */
+			return NULL;
+
+		/*
+		 * move to next entry.
+		 */
+		dctx->off += 0x20;
+	}
+	return NULL;
+}
+
+/*
+ * helper around __exfat_dentry_next that copies the content of the
+ * found entry in a user supplied buffer.
+ */
+int exfat_dentry_next(void *out, struct exfat_dir_ctx *dctx,
+			     int type, bool can_skip)
+{
+	bool end;
+
+	void *ptr = __exfat_dentry_next(dctx, type, 0xff, can_skip, &end);
+
+	if (!ptr) {
+		if (end)
+			return -ENOENT;
+		else {
+			exfat_msg(dctx->sb, KERN_INFO, "no ptr and "
+				  "end not reached: "
+				  "type %02x, can_skip %s\n", type,
+				  can_skip ? "true" : "false");
+			return -EIO;
+		}
+	}
+	memcpy(out, ptr, 0x20);
+	return 0;
+}
+
+/*
+ * extract name by parsing consecutive E_EXFAT_FILENAME entries in a
+ * caller provided buffer. also update the checksum on the fly.
+ *
+ * no utf16 to utf8 conversion is performed.
+ */
+int __exfat_get_name(struct exfat_dir_ctx *dctx, u32 name_length,
+			    __le16 *name, u16 *calc_checksum,
+			    struct exfat_iloc *iloc)
+{
+	__le16 *ptr;
+	int error;
+	int nr;
+
+	ptr = name;
+
+	error = -EIO;
+	nr = 0;
+	while (name_length) {
+		struct exfat_filename_entry *e;
+		u32 len = 15;
+
+		e = __exfat_dentry_next(dctx, E_EXFAT_FILENAME, 0xff,
+					false, NULL);
+		if (!e)
+			goto fail;
+		*calc_checksum = exfat_direntry_checksum(e, *calc_checksum,
+							 false);
+
+		if (iloc)
+			iloc->disk_offs[nr + 2] = exfat_dctx_dpos(dctx);
+		if (name_length < 15)
+			len = name_length;
+
+		memcpy(ptr, e->name_frag, len * sizeof (__le16));
+		name_length -= len;
+		ptr += len;
+		nr++;
+	}
+	return 0;
+
+fail:
+	return error;
+}
+
+/*
+ * walk the directory and invoke filldir on all found entries.
+ */
+static int __exfat_iterate(struct exfat_dir_ctx *dctx, struct file *file,
+			   struct dir_context *ctx)
+{
+	int error;
+	char *name = __getname();
+	__le16 *utf16name = __getname();
+
+	if (!name)
+		return -ENOMEM;
+	if (!utf16name) {
+		__putname(name);
+		return -ENOMEM;
+	}
+
+	for (;;) {
+		struct exfat_filedir_entry *efd;
+		struct exfat_stream_extension_entry *esx;
+		int dtype = DT_REG;
+		int name_length;
+		bool end;
+		u16 calc_checksum;
+		u16 expect_checksum;
+
+		/*
+		 * get the next filedir entry, we are allowed to skip
+		 * entries for that.
+		 */
+		error = -EIO;
+		efd = __exfat_dentry_next(dctx, E_EXFAT_FILEDIR, 0xff,
+					  true, &end);
+		if (!efd) {
+			if (end)
+				break;
+			else
+				goto fail;
+		}
+		expect_checksum = __le16_to_cpu(efd->set_checksum);
+		calc_checksum = exfat_direntry_checksum(efd, 0, true);
+
+		if (__le16_to_cpu(efd->attributes & E_EXFAT_ATTR_DIRECTORY))
+			dtype = DT_DIR;
+
+		/*
+		 * get immediate stream extension entry.
+		 */
+		esx = __exfat_dentry_next(dctx, E_EXFAT_STREAM_EXT, 0xff, false,
+					  NULL);
+		if (!esx)
+			goto fail;
+		calc_checksum = exfat_direntry_checksum(esx, calc_checksum,
+							false);
+
+		/*
+		 * get immediate name.
+		 */
+		error = __exfat_get_name(dctx, esx->name_length, utf16name,
+					 &calc_checksum, NULL);
+		if (error) {
+			exfat_msg(dctx->sb, KERN_INFO, "__exfat_get_name "
+				  "has failed with %i", error);
+			goto fail;
+		}
+
+		if (calc_checksum != expect_checksum) {
+			exfat_msg(dctx->sb, KERN_INFO, "checksum: "
+				  "calculated %04x, expect %04x",
+				  calc_checksum, expect_checksum);
+			error = -EIO;
+			goto fail;
+		}
+
+		/*
+		 * convert utf16 to utf8 for kernel filldir callback.
+		 */
+		name_length = utf16s_to_utf8s(utf16name, esx->name_length,
+						   UTF16_LITTLE_ENDIAN,
+						   name, NAME_MAX + 2);
+		if (name_length < 0) {
+			error = name_length;
+			goto fail;
+		}
+		if (name_length > 255) {
+			error = -ENAMETOOLONG;
+			goto fail;
+		}
+
+		/*
+		 * tell the kernel we have an entry by calling
+		 * dir_emit
+		 */
+		if (dir_emit(ctx, name, name_length, 1, dtype))
+			ctx->pos = 2 + exfat_dctx_fpos(dctx);
+		else
+			goto fail;
+	}
+	__putname(name);
+	__putname(utf16name);
+	ctx->pos = file_inode(file)->i_size + 2;
+	return 0;
+fail:
+	__putname(name);
+	__putname(utf16name);
+	return error;
+}
+
+/*
+ * readdir callback for VFS. fill "." and "..", then invoke
+ * __exfat_iterate.
+ */
+int exfat_iterate(struct file *file, struct dir_context *ctx)
+{
+	struct exfat_dir_ctx dctx;
+	int error;
+	struct inode *inode = file_inode(file);
+
+	switch (ctx->pos) {
+	case 0:
+		return dir_emit_dots(file, ctx);
+	default:
+		if (ctx->pos >= inode->i_size + 2)
+			return 0;
+		error = exfat_init_dir_ctx(inode, &dctx, ctx->pos - 2);
+		if (error)
+			return error;
+		exfat_lock_super(inode->i_sb);
+		error = __exfat_iterate(&dctx, file, ctx);
+		exfat_unlock_super(inode->i_sb);
+		exfat_cleanup_dir_ctx(&dctx);
+		return error;
+	}
+}
diff -Nruw linux-4.4.115-fbx/fs/exfat./exfat_fs.h linux-4.4.115-fbx/fs/exfat/exfat_fs.h
--- linux-4.4.115-fbx/fs/exfat./exfat_fs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/fs/exfat/exfat_fs.h	2019-01-25 20:32:47.247752405 +0100
@@ -0,0 +1,200 @@
+/*
+ * exfat_fs.h for exfat
+ * Created by <nschichan@freebox.fr> on Mon Jul 29 15:06:38 2013
+ */
+
+#ifndef __EXFAT_FS_H
+# define __EXFAT_FS_H
+
+/*
+ * exfat on disk structures and constants
+ */
+
+#include <linux/types.h>
+
+struct exfat_vbr {
+	u8	jump[3];
+	u8	fsname[8];
+	u8	reserved1[53];
+
+	__le64	partition_offset;
+	__le64	volume_length;
+
+	__le32	fat_offset;
+	__le32	fat_length;
+
+	__le32	cluster_heap_offset;
+	__le32	cluster_count;
+	__le32	cluster_root_dir;
+
+	__le32	serial_number;
+
+	__le16	fs_rev;
+	__le16	volume_flags;
+
+	u8	bytes_per_sector;
+	u8	sectors_per_cluster;
+
+	u8	fat_num;
+	u8	drive_select;
+	u8	heap_use_percent;
+
+	u8	reserved2[7];
+	u8	boot_code[390];
+
+	u8	boot_sig[2];
+};
+
+enum {
+	EXFAT_CLUSTER_FIRSTVALID	= 0x00000002,
+	EXFAT_CLUSTER_LASTVALID		= 0xfffffff6,
+	EXFAT_CLUSTER_BADBLK		= 0xfffffff7,
+	EXFAT_CLUSTER_MEDIATYPE		= 0xfffffff8,
+	EXFAT_CLUSTER_EOF		= 0xffffffff,
+};
+
+enum {
+	EXFAT_ACTIVEFAT_MASK = (1 << 0),
+	EXFAT_FLAG_DIRTY = (1 << 1),
+	EXFAT_FLAG_MEDIA_FAILURE = (1 << 2),
+};
+
+static inline int exfat_active_fat(u16 flags)
+{
+	return flags & EXFAT_ACTIVEFAT_MASK;
+}
+
+#define EXFAT_CHECKSUM_SECTORS	11
+
+enum {
+	EXFAT_I_ALLOC_POSSIBLE = (1 << 0),
+	EXFAT_I_FAT_INVALID = (1 << 1),
+};
+
+/*
+ * directory cluster content
+ */
+
+/*
+ * entry types
+ */
+enum {
+	E_EXFAT_EOD		= 0x00,
+	E_EXFAT_VOLUME_LABEL	= 0x83,
+	E_EXFAT_BITMAP		= 0x81,
+	E_EXFAT_UPCASE_TABLE	= 0x82,
+	E_EXFAT_GUID		= 0xa0,
+	E_EXFAT_PADDING		= 0xa1,
+	E_EXFAT_ACL		= 0xe2,
+	E_EXFAT_FILEDIR		= 0x85,
+	E_EXFAT_STREAM_EXT	= 0xc0,
+	E_EXFAT_FILENAME	= 0xc1,
+};
+
+/*
+ * file attributes in exfat_filedir_entry
+ */
+enum {
+	E_EXFAT_ATTR_RO		= (1 << 0),
+	E_EXFAT_ATTR_HIDDEN	= (1 << 1),
+	E_EXFAT_ATTR_SYSTEM	= (1 << 2),
+	/* bit 3 reserved */
+	E_EXFAT_ATTR_DIRECTORY	= (1 << 4),
+	E_EXFAT_ATTR_ARCHIVE	= (1 << 5),
+	/* bits 6-15 reserved */
+};
+
+/* type 0x83 */
+struct exfat_volume_label_entry {
+	u8 type;
+	u8 charcount;
+	__u16 label[11];
+	u8 reserved1[8];
+};
+
+static inline int exfat_bitmap_nr(u8 flags)
+{
+	return flags & 1;
+}
+
+/* type 0x81 */
+struct exfat_bitmap_entry {
+	u8 type;
+	u8 flags;
+	u8 reserved1[18];
+	__le32 cluster_addr;
+	__le64 length;
+};
+
+/* type 0x82 */
+struct exfat_upcase_entry {
+	u8 type;
+	u8 reserved1[3];
+	__le32 checksum;
+	u8 reserved2[12];
+	__le32 cluster_addr;
+	__le64 length;
+};
+
+/* type 0xa0 */
+struct exfat_guid_entry {
+	u8 type;
+	u8 secondary_count;
+	__le16 set_checksum;
+	__le16 flags;
+	u8 guid[16];
+	u8 reserved1[10];
+};
+
+/* type 0xa1 */
+struct exfat_padding_entry {
+	u8 type;
+	u8 reserved1[31];
+};
+
+/* type 0xe2 */
+struct exfat_acl_entry {
+	u8 type;
+	u8 reserved1[31];
+};
+
+/* type 0x85 */
+struct exfat_filedir_entry {
+	u8 type;
+	u8 secondary_count;
+	__le16 set_checksum;
+	__le16 attributes;
+	u8 reserved1[2];
+	__le32 create;
+	__le32 modified;
+	__le32 accessed;
+	u8 create_10ms;
+	u8 modified_10ms;
+	s8 create_tz_offset;
+	s8 modified_tz_offset;
+	s8 accessed_tz_offset;
+	u8 reserved2[7];
+};
+
+/* 0xc0 */
+struct exfat_stream_extension_entry {
+	u8 type;
+	u8 flags;
+	u8 reserved1;
+	u8 name_length;
+	__le16 name_hash;
+	u8 reserved2[2];
+	__le64 valid_data_length;
+	u8 reserved3[4];
+	__le32 first_cluster;
+	__le64 data_length;
+};
+
+/* 0xc1 */
+struct exfat_filename_entry {
+	u8 type;
+	u8 flags;
+	__le16 name_frag[15];
+};
+
+#endif /*! __EXFAT_FS_H */
diff -Nruw linux-4.4.115-fbx/fs/exfat./exfat.h linux-4.4.115-fbx/fs/exfat/exfat.h
--- linux-4.4.115-fbx/fs/exfat./exfat.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/fs/exfat/exfat.h	2019-01-25 20:32:47.247752405 +0100
@@ -0,0 +1,325 @@
+/*
+ * exfat.h for exfat
+ * Created by <nschichan@freebox.fr> on Tue Jul 23 12:37:12 2013
+ */
+
+#ifndef __EXFAT_H
+# define __EXFAT_H
+
+#define EXFAT_HASH_BITS	(8)
+#define EXFAT_HASH_SIZE	(1 << EXFAT_HASH_BITS)
+
+/*
+ * special inode number for root directory.
+ */
+#define EXFAT_ROOT_INO	1
+
+enum {
+	EXFAT_ERROR_ACTION_CONTINUE,
+	EXFAT_ERROR_ACTION_REMOUNT_RO,
+	EXFAT_ERROR_ACTION_PANIC,
+};
+
+struct exfat_sb_options {
+	kuid_t	uid;
+	kgid_t	gid;
+	mode_t	dmask;
+	mode_t	fmask;
+	int	time_offset;
+	int	time_offset_set;
+	int	error_action;
+};
+
+struct exfat_sb_info {
+	struct exfat_sb_options options;
+
+	struct buffer_head *sb_bh;
+	struct exfat_vbr *vbr;
+	bool dirty;
+
+	u32 sectorsize; /* in bytes*/
+	u32 clustersize; /* in bytes */
+	u32 sectors_per_cluster;
+	int sectorbits;
+	int clusterbits;
+	u32 sectormask;
+	u32 clustermask;
+
+	u32 fat_offset;
+	u32 fat_length;
+
+	u32 root_dir_cluster;
+	u32 cluster_heap_offset;
+	u32 cluster_count;
+
+	__le16	*upcase_table;
+	u32	upcase_len;
+
+	/*
+	 * bitmap fields
+	 */
+	struct mutex		bitmap_mutex;
+	u32			bitmap_length;
+	sector_t		first_bitmap_sector;
+	sector_t		last_bitmap_sector;
+	sector_t		cur_bitmap_sector;
+	u32			cur_bitmap_cluster;
+	struct buffer_head	*cur_bitmap_bh;
+	u32			free_clusters;
+	u32			prev_free_cluster;
+
+	/*
+	 * inode hash fields
+	 */
+	spinlock_t		inode_hash_lock;
+	struct hlist_head	inode_hash[EXFAT_HASH_SIZE];
+
+	struct mutex		sb_mutex;
+};
+
+struct exfat_cache_entry {
+	struct list_head list;
+	u32 file_cluster;
+	u32 disk_cluster;
+	u32 nr_contig;
+};
+
+struct exfat_cache {
+	struct mutex		mutex;
+	struct list_head	entries;
+	u32			nr_entries;
+};
+
+struct exfat_iloc {
+	u8 nr_secondary;
+	u32 file_off;
+	u64 disk_offs[19];
+};
+
+struct exfat_inode_info {
+	u8			flags;
+	u16			attributes;
+	u32			first_cluster;
+	u32			allocated_clusters;
+	loff_t			mmu_private;
+	struct exfat_iloc	iloc;
+	struct hlist_node	hash_list;
+
+	struct exfat_cache	exfat_cache;
+	struct inode		vfs_inode;
+};
+
+static inline struct exfat_sb_info *EXFAT_SB(struct super_block *sb)
+{
+	return sb->s_fs_info;
+}
+
+static inline struct exfat_inode_info *EXFAT_I(struct inode *inode)
+{
+	return container_of(inode, struct exfat_inode_info, vfs_inode);
+}
+
+loff_t exfat_dir_links(struct inode *inode);
+
+int exfat_write_fat_contiguous(struct inode *inode, u32 first_cluster,
+			       u32 nr_clusters);
+int exfat_write_fat(struct inode *inode, u32 prev_cluster, u32 *clusters,
+		    u32 nr_clusters);
+
+__printf(3, 4) void exfat_msg(struct super_block *sb, const char *level,
+			      const char *fmt, ...);
+__printf(2, 3) void exfat_fs_error(struct super_block *sb,
+				   const char *fmt, ...);
+int exfat_get_fat_cluster(struct inode *inode, u32 fcluster, u32 *dcluster);
+int __exfat_get_fat_cluster(struct inode *inode, u32 fcluster, u32 *dcluster,
+			    bool eof_is_fatal);
+
+void exfat_inode_cache_init(struct inode *inode);
+void exfat_inode_cache_drop(struct inode *inode);
+
+int exfat_init_fat(struct super_block *sb);
+
+int exfat_init_bitmap(struct inode *root);
+void exfat_exit_bitmap(struct super_block *sb);
+int exfat_alloc_clusters(struct inode *inode, u32 hint_cluster,
+			 u32 *cluster, u32 nr);
+int exfat_free_clusters_inode(struct inode *inode, u32 start);
+
+
+/*
+ * read only bitmap accessors: used by EXFAT_IOCGETBITMAP ioctl.
+ */
+struct exfat_bitmap_ctx {
+	struct super_block *sb;
+	struct buffer_head *bh;
+	sector_t cur_sector;
+};
+
+int exfat_init_bitmap_context(struct super_block *sb,
+			      struct exfat_bitmap_ctx *ctx, u32 cluster);
+void exfat_exit_bitmap_context(struct exfat_bitmap_ctx *ctx);
+int exfat_test_bitmap(struct exfat_bitmap_ctx *ctx, uint32_t start_cluster,
+		      uint32_t *first_in_use, uint32_t *nr_in_use);
+
+
+/*
+ * return the physical sector address for a given cluster.
+ */
+static inline sector_t exfat_cluster_sector(struct exfat_sb_info *sbi,
+					    u32 cluster)
+{
+	return (sector_t)sbi->cluster_heap_offset + (cluster - 2) *
+		(sector_t)sbi->sectors_per_cluster;
+}
+
+/*
+ * in dir.c
+ */
+struct exfat_dir_ctx {
+	struct super_block	*sb;
+	struct inode		*inode;
+	struct buffer_head	*bh;
+
+	off_t			off; /* from beginning of directory */
+	sector_t		sector;
+	bool empty;
+};
+
+int exfat_init_dir_ctx(struct inode *inode, struct exfat_dir_ctx *ctx,
+		       off_t off);
+void exfat_cleanup_dir_ctx(struct exfat_dir_ctx *dctx);
+int exfat_get_cluster_hint(struct inode *inode, u32 *out_hint);
+int exfat_dentry_next(void *, struct exfat_dir_ctx *, int, bool);
+void *__exfat_dentry_next(struct exfat_dir_ctx *dctx, int type, int mask,
+			  bool can_skip, bool *end);
+u16 exfat_direntry_checksum(void *data, u16 checksum, bool first);
+u32 exfat_dctx_fpos(struct exfat_dir_ctx *dctx);
+u64 exfat_dctx_dpos(struct exfat_dir_ctx *dctx);
+int __exfat_get_name(struct exfat_dir_ctx *dctx, u32 name_length, __le16 *name,
+		     u16 *calc_checksum, struct exfat_iloc *iloc);
+
+/*
+ * in namei.c
+ */
+
+/*
+ * hold a pointer to an exfat dir entry, with the corresponding bh.
+ */
+struct dir_entry_buffer {
+	struct buffer_head *bh;
+	u32 off; /* in bytes, inside the buffer_head b_data array */
+	void *start;
+};
+
+int exfat_get_dir_entry_buffers(struct inode *dir, struct exfat_iloc *iloc,
+				struct dir_entry_buffer *entries,
+				size_t nr_entries);
+u16 exfat_dir_entries_checksum(struct dir_entry_buffer *entries, u32 nr);
+void exfat_dirty_dir_entries(struct dir_entry_buffer *entries,
+			     size_t nr_entries, bool sync);
+void exfat_write_time(struct exfat_sb_info *sbi, struct timespec *ts,
+		      __le32 *datetime, u8 *time_cs, u8 *tz_offset);
+
+/*
+ * in inode.c
+ */
+
+int exfat_init_inodes(void);
+void exfat_exit_inodes(void);
+
+struct inode *exfat_iget(struct super_block *sb, loff_t disk_pos);
+void exfat_insert_inode_hash(struct inode *inode);
+void exfat_remove_inode_hash(struct inode *inode);
+int __exfat_write_inode(struct inode *inode, bool sync);
+
+/*
+ * in upcase.c
+ */
+int exfat_upcase_init(struct inode *root);
+static inline __le16 exfat_upcase_convert(struct super_block *sb, __le16 _c)
+{
+	u16 c = __le16_to_cpu(_c);
+
+	if (c >= EXFAT_SB(sb)->upcase_len)
+		return _c;
+	return EXFAT_SB(sb)->upcase_table[c];
+}
+
+/*
+ * superblock operations
+ */
+struct inode *exfat_alloc_inode(struct super_block *sb);
+void exfat_destroy_inode(struct inode *_inode);
+int exfat_drop_inode(struct inode *inode);
+void exfat_evict_inode(struct inode *inode);
+
+/*
+ * file operations
+ */
+int exfat_iterate(struct file *f, struct dir_context *ctx);
+long exfat_ioctl(struct file *, unsigned int, unsigned long);
+int exfat_truncate_blocks(struct inode *inode, loff_t newsize);
+
+/*
+ * inode operations
+ */
+struct dentry *exfat_inode_lookup(struct inode *, struct dentry *,
+				  unsigned int);
+int exfat_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+		       bool excl);
+int exfat_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
+
+mode_t exfat_make_mode(struct exfat_sb_info *sbi, mode_t mode, u16 attrs);
+
+int exfat_write_inode(struct inode *inode, struct writeback_control *wbc);
+
+int exfat_inode_unlink(struct inode *inode, struct dentry *dentry);
+
+int exfat_inode_rmdir(struct inode *inode, struct dentry *dentry);
+
+int exfat_getattr(struct vfsmount *, struct dentry *, struct kstat *);
+int exfat_setattr(struct dentry *, struct iattr *);
+int exfat_rename(struct inode *, struct dentry *,
+		 struct inode *, struct dentry *);
+
+/*
+ * address space operations
+ */
+int exfat_readpage(struct file *file, struct page *page);
+int exfat_readpages(struct file *file, struct address_space *mapping,
+		    struct list_head *pages, unsigned nr_pages);
+int exfat_write_begin(struct file *file, struct address_space *mapping,
+		      loff_t pos, unsigned len, unsigned flags,
+		      struct page **pagep, void **fsdata);
+int exfat_write_end(struct file *file, struct address_space *mapping,
+		    loff_t pos, unsigned len, unsigned copied,
+		    struct page *page, void *fsdata);
+int exfat_writepage(struct page *page, struct writeback_control *wbc);
+int exfat_writepages(struct address_space *, struct writeback_control *);
+
+
+extern const struct inode_operations exfat_dir_inode_operations;
+extern const struct inode_operations exfat_file_inode_operations;
+extern const struct file_operations exfat_dir_operations;
+extern const struct file_operations exfat_file_operations;
+extern const struct address_space_operations exfat_address_space_operations;
+
+/*
+ * time functions
+ */
+void exfat_time_2unix(struct timespec *ts, u32 datetime, u8 time_cs,
+		      s8 tz_offset);
+void exfat_time_2exfat(struct exfat_sb_info *sbi, struct timespec *ts,
+		       u32 *datetime, u8 *time_cs, s8 *tz_offset);
+
+static inline void exfat_lock_super(struct super_block *sb)
+{
+	mutex_lock(&EXFAT_SB(sb)->sb_mutex);
+}
+
+static inline void exfat_unlock_super(struct super_block *sb)
+{
+	mutex_unlock(&EXFAT_SB(sb)->sb_mutex);
+}
+
+#endif /*! __EXFAT_H */
diff -Nruw linux-4.4.115-fbx/fs/exfat./fat.c linux-4.4.115-fbx/fs/exfat/fat.c
--- linux-4.4.115-fbx/fs/exfat./fat.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/fs/exfat/fat.c	2019-01-25 20:32:47.247752405 +0100
@@ -0,0 +1,424 @@
+/*
+ * fat.c for exfat
+ * Created by <nschichan@freebox.fr> on Mon Jul 29 19:43:38 2013
+ */
+
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/slab.h>
+
+#include "exfat.h"
+#include "exfat_fs.h"
+
+#define MAX_CACHED_FAT	16
+
+/*
+ * helpers for exfat_next_fat_cluster.
+ */
+
+/*
+ * get the sector number in the fat where the next requested cluster
+ * number is to be found.
+ */
+static inline sector_t cluster_sector(struct exfat_sb_info *sbi, u32 cluster)
+{
+	return sbi->fat_offset + (((u64)cluster * sizeof (u32)) >> sbi->sectorbits);
+}
+
+/*
+ * get the offset in the fat sector where the next requested cluster
+ * number is to be found.
+ */
+static inline off_t cluster_offset(struct exfat_sb_info *sbi, u32 cluster)
+{
+	return (cluster * sizeof (u32)) & sbi->sectormask;
+}
+
+/*
+ * walk one step in the fat chain.
+ */
+static int exfat_next_fat_cluster(struct super_block *sb, u32 *cluster)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	sector_t sect = cluster_sector(sbi, *cluster);
+	off_t off = cluster_offset(sbi, *cluster);
+	struct buffer_head *bh;
+
+	bh = sb_bread(sb, sect);
+	if (!bh) {
+		exfat_msg(sb, KERN_ERR, "unable to read FAT sector at %llu",
+			  (unsigned long long)sect);
+		return -EIO;
+	}
+
+	*cluster = __le32_to_cpu(*(u32*)&bh->b_data[off]);
+	brelse(bh);
+	return 0;
+}
+
+/*
+ * setup inode cache
+ */
+void exfat_inode_cache_init(struct inode *inode)
+{
+	mutex_init(&EXFAT_I(inode)->exfat_cache.mutex);
+	EXFAT_I(inode)->exfat_cache.nr_entries = 0;
+	INIT_LIST_HEAD(&EXFAT_I(inode)->exfat_cache.entries);
+}
+
+/*
+ * drop inode cache content
+ */
+void exfat_inode_cache_drop(struct inode *inode)
+{
+	struct exfat_cache *cache = &EXFAT_I(inode)->exfat_cache;
+	struct exfat_cache_entry *e, *tmp;
+
+	mutex_lock(&cache->mutex);
+	list_for_each_entry_safe (e, tmp, &cache->entries, list) {
+		kfree(e);
+	}
+	INIT_LIST_HEAD(&cache->entries);
+	cache->nr_entries = 0;
+	mutex_unlock(&cache->mutex);
+}
+
+/*
+ * move the entry to the head of the list, this will make it less
+ * likely to be the victim in when caching new entries.
+ *
+ * caller must hold cache->mutex.
+ */
+static void __exfat_fat_lru(struct exfat_cache *cache,
+			  struct exfat_cache_entry *e)
+{
+	if (cache->entries.next != &e->list)
+		list_move(&e->list, &cache->entries);
+}
+
+/*
+ * find a cache entry that is close to the wanted fcluster (ideally
+ * spanning over the requested file cluster).
+ *
+ * caller must hold cache->mutex.
+ */
+static struct exfat_cache_entry *__exfat_cache_lookup(struct exfat_cache *cache,
+						      u32 fcluster)
+{
+	struct exfat_cache_entry *e;
+	struct exfat_cache_entry *best = NULL;
+
+	list_for_each_entry (e, &cache->entries, list) {
+		if (e->file_cluster <= fcluster &&
+		    e->file_cluster + e->nr_contig >= fcluster)
+			return e;
+
+		if (!best && e->file_cluster < fcluster)
+			best = e;
+		if (best && best->file_cluster < e->file_cluster &&
+		    e->file_cluster < fcluster)
+			best = e;
+	}
+	return best;
+}
+
+/*
+ * caller must hold cache->mutex.
+ */
+static int __exfat_cache_cluster(struct exfat_cache *cache,
+			       struct exfat_cache_entry *nearest,
+			       u32 fcluster, u32 dcluster)
+{
+	struct exfat_cache_entry *e;
+
+	/*
+	 * see if we can merge with the nearest entry. in the ideal
+	 * case, all cluster in the chain are contiguous, and only
+	 * one entry is needed for a single file.
+	 */
+	if (nearest &&
+	    nearest->file_cluster + nearest->nr_contig + 1 == fcluster &&
+	    nearest->disk_cluster + nearest->nr_contig + 1 == dcluster) {
+		list_move(&nearest->list, &cache->entries);
+		nearest->nr_contig++;
+		return 0;
+	}
+
+	/*
+	 * allocate a new entry or reuse an existing one if the number
+	 * of cached entries is too hihc.
+	 */
+	if (cache->nr_entries < MAX_CACHED_FAT) {
+		e = kmalloc(sizeof (*e), GFP_NOFS);
+		list_add(&e->list, &cache->entries);
+		++cache->nr_entries;
+	} else {
+		e = list_entry(cache->entries.prev, struct exfat_cache_entry,
+			       list);
+		list_move(&e->list, &cache->entries);
+	}
+
+	if (!e)
+		return -ENOMEM;
+
+	e->file_cluster = fcluster;
+	e->disk_cluster = dcluster;
+	e->nr_contig = 0;
+
+	return 0;
+}
+
+int __exfat_get_fat_cluster(struct inode *inode, u32 fcluster, u32 *dcluster,
+			    bool eof_is_fatal)
+{
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	struct exfat_cache *cache = &info->exfat_cache;
+	int error;
+	struct exfat_cache_entry *e;
+	u32 fcluster_start;
+
+	/*
+	 * intial translation: first file cluster is found in the
+	 * inode info.
+	 */
+	if (fcluster == 0) {
+		*dcluster = info->first_cluster;
+		return 0;
+	}
+
+	mutex_lock(&cache->mutex);
+	/*
+	 * try to find a cached entry either covering the file cluster
+	 * we want or at least close to the file cluster.
+	 */
+	e = __exfat_cache_lookup(cache, fcluster);
+	if (e && e->file_cluster <= fcluster &&
+	    e->file_cluster + e->nr_contig >= fcluster) {
+		/*
+		 * perfect match, entry zone covers the requested file
+		 * cluster.
+		 */
+		__exfat_fat_lru(cache, e);
+		*dcluster = e->disk_cluster + (fcluster - e->file_cluster);
+		mutex_unlock(&cache->mutex);
+		return 0;
+	}
+
+	if (e) {
+		/*
+		 * we have an entry, hopefully close enough, setup
+		 * cluster walk from there.
+		 */
+		*dcluster = e->disk_cluster + e->nr_contig;
+		fcluster_start = e->file_cluster + e->nr_contig;
+	} else {
+		/*
+		 * no entry, walk the FAT chain from the start of the
+		 * file.
+		 */
+		fcluster_start = 0;
+		*dcluster = info->first_cluster;
+	}
+
+	/*
+	 * walk fhe FAT chain the number of time required to get the
+	 * disk cluster corresponding to the file cluster.
+	 */
+	while (fcluster_start != fcluster) {
+		error = exfat_next_fat_cluster(inode->i_sb, dcluster);
+		if (error) {
+			mutex_unlock(&cache->mutex);
+			return error;
+		}
+		if (*dcluster == EXFAT_CLUSTER_EOF) {
+			if (eof_is_fatal)
+				/*
+				 * exfat_fill_root uses
+				 * __exfat_get_fat_cluster with
+				 * eof_is_fatal set to false, as the
+				 * root inode does not have a size
+				 * field and thus requires a complete
+				 * FAT walk to compute the size.
+				 */
+				exfat_fs_error(inode->i_sb, "premature EOF in FAT "
+					       "chain. file cluster %u out "
+					       "of %u\n", fcluster_start,
+					       fcluster);
+			mutex_unlock(&cache->mutex);
+			return -EIO;
+		}
+		if (*dcluster < EXFAT_CLUSTER_FIRSTVALID) {
+			exfat_fs_error(inode->i_sb, "invalid cluster %u found "
+				       "in fat chain.", *dcluster);
+			mutex_unlock(&cache->mutex);
+			return -EIO;
+		}
+		++fcluster_start;
+	}
+
+	/*
+	 * cache the result.
+	 */
+	__exfat_cache_cluster(cache, e, fcluster, *dcluster);
+	mutex_unlock(&cache->mutex);
+	return 0;
+}
+
+int exfat_get_fat_cluster(struct inode *inode, u32 fcluster, u32 *dcluster)
+{
+	return __exfat_get_fat_cluster(inode, fcluster, dcluster, true);
+}
+
+int exfat_init_fat(struct super_block *sb)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	struct buffer_head *bh;
+	int error = 0;
+	u32 first, second;
+
+	bh = sb_bread(sb, sbi->fat_offset);
+	if (!bh) {
+		exfat_msg(sb, KERN_ERR, "unable to read FAT sector at %u",
+			  sbi->fat_offset);
+		return -EIO;
+	}
+
+	first = __le32_to_cpu(*(__le32*)(bh->b_data + 0));
+	second = __le32_to_cpu(*(__le32*)(bh->b_data + sizeof (__le32)));
+
+	if (first != 0xf8ffffff && second != 0xffffffff) {
+		exfat_msg(sb, KERN_INFO, "invalid FAT start: %08x, %08x",
+			  first, second);
+		error = -ENXIO;
+	}
+
+	brelse(bh);
+	return error;
+}
+
+/*
+ * fat write context, store the current buffer_head and current
+ * cluster to avoid having sb_bread all the time when the clusters are
+ * contiguous or at least not too far apart.
+ */
+struct fat_write_ctx {
+	struct super_block *sb;
+	struct buffer_head *bh;
+	u32 cur_cluster;
+};
+
+static void fat_init_write_ctx(struct fat_write_ctx *fwctx,
+				struct super_block *sb)
+{
+	memset(fwctx, 0, sizeof (*fwctx));
+	fwctx->sb = sb;
+}
+
+static void fat_exit_write_ctx(struct fat_write_ctx *fwctx)
+{
+	if (fwctx->bh)
+		brelse(fwctx->bh);
+}
+
+static int __fat_write_entry(struct fat_write_ctx *fwctx,
+			       u32 cluster, u32 next)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(fwctx->sb);
+	sector_t current_sector = cluster_sector(sbi, fwctx->cur_cluster);
+	sector_t wanted_sector = cluster_sector(sbi, cluster);
+	off_t off = cluster_offset(sbi, cluster);
+
+	/*
+	 * first see if we need a different buffer head from the
+	 * current one in the fat_write_ctx.
+	 */
+	if (current_sector != wanted_sector || !fwctx->bh) {
+		if (fwctx->bh)
+			brelse(fwctx->bh);
+		fwctx->bh = sb_bread(fwctx->sb, wanted_sector);
+		if (!fwctx->bh) {
+			exfat_msg(fwctx->sb, KERN_ERR,
+				  "unable to read FAT sector at %llu",
+				  (unsigned long long)wanted_sector);
+			return -EIO;
+		}
+	}
+
+	/*
+	 * set fat cluster to point to the next cluster, and mark bh
+	 * dirty so that the change hits the storage device.
+	 */
+	fwctx->cur_cluster = cluster;
+	*(__le32*)(fwctx->bh->b_data + off) = __cpu_to_le32(next);
+	mark_buffer_dirty(fwctx->bh);
+	return 0;
+}
+
+/*
+ * write nr_clusters contiguous clusters starting at first_cluster.
+ */
+int exfat_write_fat_contiguous(struct inode *inode, u32 first_cluster,
+			       u32 nr_clusters)
+{
+	u32 cluster;
+	struct fat_write_ctx fwctx;
+	int error = 0;
+
+	fat_init_write_ctx(&fwctx, inode->i_sb);
+	for (cluster = first_cluster;
+	     cluster < first_cluster + nr_clusters - 1;
+	     ++cluster) {
+		error = __fat_write_entry(&fwctx, cluster, cluster + 1);
+		if (error)
+			goto end;
+	}
+
+	/*
+	 * set EOF
+	 */
+	error = __fat_write_entry(&fwctx, cluster, EXFAT_CLUSTER_EOF);
+end:
+	fat_exit_write_ctx(&fwctx);
+	return error;
+
+}
+
+/*
+ * write cluster nr_clusters stored in clusters array, link with prev_cluster.
+ */
+int exfat_write_fat(struct inode *inode, u32 prev_cluster, u32 *clusters,
+		    u32 nr_clusters)
+{
+	u32 i;
+	struct fat_write_ctx fwctx;
+	int error;
+
+	if (!nr_clusters)
+		/* ??! */
+		return 0;
+
+	fat_init_write_ctx(&fwctx, inode->i_sb);
+
+	if (prev_cluster) {
+		/*
+		 * link with previous cluster if applicable.
+		 */
+		error = __fat_write_entry(&fwctx, prev_cluster, clusters[0]);
+		if (error)
+			goto end;
+	}
+	for (i = 0; i < nr_clusters - 1; ++i) {
+		error = __fat_write_entry(&fwctx, clusters[i], clusters[i + 1]);
+		if (error)
+			goto end;
+	}
+
+	/*
+	 * set EOF.
+	 */
+	error = __fat_write_entry(&fwctx, clusters[i], EXFAT_CLUSTER_EOF);
+
+ end:
+	fat_exit_write_ctx(&fwctx);
+	return error;
+}
diff -Nruw linux-4.4.115-fbx/fs/exfat./file.c linux-4.4.115-fbx/fs/exfat/file.c
--- linux-4.4.115-fbx/fs/exfat./file.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/fs/exfat/file.c	2019-01-25 20:32:47.247752405 +0100
@@ -0,0 +1,427 @@
+/*
+ * file.c for exfat
+ * Created by <nschichan@freebox.fr> on Tue Aug 20 14:39:41 2013
+ */
+
+#include <linux/buffer_head.h>
+#include <linux/fs.h>
+#include <linux/exfat_user.h>
+
+#include "exfat.h"
+#include "exfat_fs.h"
+
+static int append_fragment(struct exfat_fragment __user *ufrag,
+			   struct exfat_fragment *kfrag)
+{
+	if (copy_to_user(ufrag, kfrag, sizeof (*kfrag)))
+		return -EFAULT;
+	return 0;
+}
+
+static void setup_fragment(struct exfat_sb_info *sbi,
+			  struct exfat_fragment *fragment, uint32_t fcluster,
+			  uint32_t dcluster)
+{
+	fragment->fcluster_start = fcluster;
+	fragment->dcluster_start = dcluster;
+	fragment->sector_start = exfat_cluster_sector(sbi, dcluster);
+	fragment->nr_clusters = 1;
+}
+
+static int exfat_ioctl_get_fragments(struct inode *inode,
+				     struct exfat_fragment_head __user *uhead)
+{
+	struct exfat_fragment_head head;
+	struct exfat_fragment fragment;
+	u32 fcluster;
+	u32 prev_dcluster;
+	u32 cur_fragment;
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	int error;
+
+	memset(&fragment, 0, sizeof (fragment));
+
+	if (copy_from_user(&head, uhead, sizeof (head)))
+		return -EFAULT;
+
+
+	if (put_user(sbi->sectorsize, &uhead->sector_size) ||
+	    put_user(sbi->clustersize, &uhead->cluster_size))
+		return -EFAULT;
+
+	if (!head.nr_fragments) {
+		/*
+		 * user did not provide space for fragments after
+		 * header.
+		 */
+		return 0;
+	}
+
+	if (head.fcluster_start >= info->allocated_clusters) {
+		/*
+		 * requested start cluster is after file EOF
+		 */
+		if (put_user(0, &uhead->nr_fragments))
+			return -EFAULT;
+		return 0;
+	}
+
+	if (info->flags & EXFAT_I_FAT_INVALID) {
+		/*
+		 * not FAT chain, this file has only one fragment.
+		 */
+		fragment.fcluster_start = head.fcluster_start;
+		fragment.dcluster_start =
+			info->first_cluster + head.fcluster_start;
+		fragment.nr_clusters = info->allocated_clusters -
+			head.fcluster_start;
+		fragment.sector_start =
+			exfat_cluster_sector(sbi, fragment.dcluster_start);
+
+		if (copy_to_user(&uhead->fragments[0], &fragment,
+				 sizeof (fragment)))
+			return -EFAULT;
+		if (put_user(1, &uhead->nr_fragments))
+			return -EFAULT;
+		if (put_user(info->first_cluster + info->allocated_clusters,
+			     &uhead->fcluster_start))
+			return -EFAULT;
+		return 0;
+	}
+
+	fcluster = head.fcluster_start;
+	cur_fragment = 0;
+
+	/*
+	 * initial fragment setup
+	 */
+	error = exfat_get_fat_cluster(inode, fcluster,
+				      &prev_dcluster);
+	if (error)
+		return error;
+	setup_fragment(sbi, &fragment, fcluster, prev_dcluster);
+	++fcluster;
+	while (fcluster < info->allocated_clusters) {
+		int error;
+		u32 dcluster;
+
+		/*
+		 * walk one step in the FAT.
+		 */
+		error = exfat_get_fat_cluster(inode, fcluster, &dcluster);
+		if (error)
+			return error;
+
+		if (prev_dcluster == dcluster - 1) {
+			/*
+			 * dcluster and prev_dcluster are contiguous.
+			 */
+			++fragment.nr_clusters;
+		} else {
+			/*
+			 * put this cluster in the user array
+			 */
+			error = append_fragment(&uhead->fragments[cur_fragment],
+						&fragment);
+			if (error)
+				return error;
+
+			++cur_fragment;
+			if (cur_fragment == head.nr_fragments)
+				break;
+
+			/*
+			 * setup a new fragment.
+			 */
+			setup_fragment(sbi, &fragment, fcluster, dcluster);
+		}
+		++fcluster;
+		prev_dcluster = dcluster;
+	}
+
+	if (cur_fragment < head.nr_fragments) {
+		append_fragment(&uhead->fragments[cur_fragment], &fragment);
+		++cur_fragment;
+	}
+
+	/*
+	 * update nr_fragments in user supplied head.
+	 */
+	if (cur_fragment != head.nr_fragments &&
+	    put_user(cur_fragment, &uhead->nr_fragments))
+		return -EFAULT;
+
+	/*
+	 * update fcluster_start in user supplied head.
+	 */
+	if (put_user(fcluster, &uhead->fcluster_start))
+		return -EFAULT;
+
+
+	return 0;
+}
+
+static int exfat_ioctl_get_bitmap(struct super_block *sb,
+				  struct exfat_bitmap_head __user *uhead)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	struct exfat_bitmap_head head;
+	uint32_t i;
+	int error;
+	struct exfat_bitmap_ctx ctx;
+	uint32_t start_cluster;
+
+	if (copy_from_user(&head, uhead, sizeof (head)))
+		return -EFAULT;
+
+	start_cluster = head.start_cluster;
+	if (start_cluster < 2)
+		return -EINVAL;
+
+
+	error = exfat_init_bitmap_context(sb, &ctx, head.start_cluster);
+	if (error)
+		return error;
+	for (i = 0; i < head.nr_entries; ++i) {
+		uint32_t first_in_use;
+		uint32_t nr_in_use;
+		int error;
+
+		error = exfat_test_bitmap(&ctx, start_cluster, &first_in_use,
+					  &nr_in_use);
+		if (error)
+			goto out_error;
+
+		if (first_in_use == sbi->cluster_count)
+			break;
+		if (put_user(first_in_use, &uhead->entries[i].start_cluster))
+			goto out_efault;
+		if (put_user(nr_in_use, &uhead->entries[i].nr_clusters))
+			goto out_efault;
+		if (put_user(exfat_cluster_sector(sbi, first_in_use),
+			     &uhead->entries[i].sector_start))
+			goto out_efault;
+		if (put_user((u64)nr_in_use * sbi->sectors_per_cluster,
+			     &uhead->entries[i].nr_sectors))
+			goto out_efault;
+		start_cluster = first_in_use + nr_in_use + 1;
+	}
+
+	exfat_exit_bitmap_context(&ctx);
+	if (put_user(i, &uhead->nr_entries))
+		return -EFAULT;
+	if (put_user(start_cluster, &uhead->start_cluster))
+		return -EFAULT;
+
+	return 0;
+
+out_efault:
+	error = -EFAULT;
+out_error:
+	exfat_exit_bitmap_context(&ctx);
+	return error;
+}
+
+static int exfat_ioctl_get_dirents(struct inode *inode,
+				   struct exfat_dirent_head __user *uhead)
+{
+	struct exfat_dir_ctx dctx;
+	struct exfat_dirent_head head;
+	int error;
+	uint32_t i;
+
+	if (!S_ISDIR(inode->i_mode))
+		return -ENOTDIR;
+
+	if (copy_from_user(&head, uhead, sizeof (head)))
+		return -EFAULT;
+
+	/* make sure we're aligned on an entry boundary */
+	head.offset &= ~0x1f;
+
+	error = exfat_init_dir_ctx(inode, &dctx, head.offset);
+	if (error < 0)
+		return error;
+
+	error = 0;
+	for (i = 0; i < head.nr_entries; ++i) {
+		bool end;
+		u8 *entry = __exfat_dentry_next(&dctx, 0, 0, false, &end);
+		u8 type;
+
+		if (!entry && end)
+			/* genuine end of file */
+			break;
+		if (!entry) {
+			/* something went wrong */
+			error = -EIO;
+			goto out;
+		}
+		type = *entry;
+
+		if (put_user(type, &uhead->entries[i])) {
+			error = -EFAULT;
+			goto out;
+		}
+	}
+
+	/*
+	 * update head nr_entries and offset.
+	 */
+	if (put_user(i, &uhead->nr_entries))  {
+		error = -EFAULT;
+		goto out;
+	}
+	if (put_user(head.offset + 0x20 * i, &uhead->offset)) {
+		error = -EFAULT;
+		goto out;
+	}
+
+ out:
+	exfat_cleanup_dir_ctx(&dctx);
+	return error;
+}
+
+long exfat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case EXFAT_IOCGETFRAGMENTS:
+		return exfat_ioctl_get_fragments(file_inode(file),
+						 (void __user*)arg);
+	case EXFAT_IOCGETBITMAP:
+		return exfat_ioctl_get_bitmap(file_inode(file)->i_sb,
+					      (void __user*)arg);
+	case EXFAT_IOCGETDIRENTS:
+		return exfat_ioctl_get_dirents(file_inode(file),
+					       (void __user*)arg);
+	default:
+		return -ENOTTY;
+	}
+}
+
+static int exfat_cont_expand(struct inode *inode, loff_t newsize)
+{
+	int error;
+
+	error = generic_cont_expand_simple(inode, newsize);
+	if (error)
+		return error;
+
+	inode->i_mtime = CURRENT_TIME_SEC;
+	mark_inode_dirty(inode);
+
+	if (IS_SYNC(inode))
+		exfat_msg(inode->i_sb, KERN_ERR, "TODO: cont_expand with "
+			  "sync mode.");
+	return 0;
+}
+
+int exfat_truncate_blocks(struct inode *inode, loff_t newsize)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	u32 fcluster = (newsize + sbi->clustersize - 1) >> sbi->clusterbits;
+	int error;
+
+	if (EXFAT_I(inode)->mmu_private > newsize)
+		EXFAT_I(inode)->mmu_private = newsize;
+
+	error = exfat_free_clusters_inode(inode, fcluster);
+	if (error) {
+		exfat_msg(inode->i_sb, KERN_INFO, "exfat_free_clusters_inode: "
+			  "%i", error);
+		return error;
+	}
+
+	return 0;
+}
+
+int exfat_getattr(struct vfsmount *mnt, struct dentry *dentry,
+		  struct kstat *stat)
+{
+	struct inode *inode = dentry->d_inode;
+	generic_fillattr(inode, stat);
+	stat->blksize = EXFAT_SB(inode->i_sb)->clustersize;
+	return 0;
+}
+
+#define EXFAT_VALID_MODE       (S_IFREG | S_IFDIR | S_IRWXUGO)
+
+static int exfat_mode_fixup(struct inode *inode, umode_t *mode)
+{
+	mode_t mask, perm;
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+
+	if (S_ISDIR(*mode))
+		mask = sbi->options.dmask;
+	else
+		mask = sbi->options.fmask;
+
+	perm = *mode & ~(S_IFMT | mask);
+
+	/*
+	 * we want 'r' and 'x' bits when mask allows for it.
+	 */
+	if ((perm & (S_IRUGO | S_IXUGO)) !=
+	    (inode->i_mode & ~mask & (S_IRUGO | S_IXUGO))) {
+		return -EPERM;
+	}
+
+	/*
+	 * we want all 'w' bits or none, depending on mask.
+	 */
+	if ((perm & S_IWUGO) && (perm & S_IWUGO) != (~mask & S_IWUGO))
+		return -EPERM;
+	*mode &= ~mask;
+	return 0;
+}
+
+int exfat_setattr(struct dentry *dentry, struct iattr *attrs)
+{
+	struct inode *inode = dentry->d_inode;
+	int error;
+
+	/*
+	 * can set uid/gid, only if it the same as the current one in
+	 * the inode.
+	 */
+	if (attrs->ia_valid & ATTR_UID &&
+	    !uid_eq(inode->i_uid, attrs->ia_uid))
+		return -EPERM;
+
+	if (attrs->ia_valid & ATTR_GID &&
+	    !gid_eq(inode->i_gid, attrs->ia_gid))
+		return -EPERM;
+
+	if (attrs->ia_valid & ATTR_MODE &&
+	    (attrs->ia_mode & ~EXFAT_VALID_MODE ||
+	     exfat_mode_fixup(inode, &attrs->ia_mode) < 0)) {
+		/*
+		 * silently ignore mode change if we're not OK with
+		 * it (same behavior as vfat).
+		 */
+		attrs->ia_valid &= ~ATTR_MODE;
+	}
+
+	if (attrs->ia_valid & ATTR_SIZE) {
+		inode_dio_wait(inode);
+		if (attrs->ia_size > inode->i_size) {
+			/*
+			 * expand file
+			 */
+			error = exfat_cont_expand(inode, attrs->ia_size);
+			if (error)
+				return error;
+		} else {
+			/*
+			 * shrink file
+			 */
+			truncate_setsize(inode, attrs->ia_size);
+			exfat_truncate_blocks(inode, attrs->ia_size);
+		}
+	}
+
+	setattr_copy(inode, attrs);
+	mark_inode_dirty(inode);
+	return 0;
+}
diff -Nruw linux-4.4.115-fbx/fs/exfat./inode.c linux-4.4.115-fbx/fs/exfat/inode.c
--- linux-4.4.115-fbx/fs/exfat./inode.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/fs/exfat/inode.c	2019-01-25 20:32:47.247752405 +0100
@@ -0,0 +1,277 @@
+/*
+ * inode.c<2> for exfat
+ * Created by <nschichan@freebox.fr> on Wed Jul 24 16:15:52 2013
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/buffer_head.h>
+#include <linux/writeback.h>
+#include <linux/hash.h>
+
+#include "exfat_fs.h"
+#include "exfat.h"
+
+static struct kmem_cache *exfat_inodes_cachep;
+
+/*
+ * inode callbacks.
+ */
+struct inode *exfat_alloc_inode(struct super_block *sb)
+{
+	struct exfat_inode_info *ei = kmem_cache_alloc(exfat_inodes_cachep,
+						       GFP_NOFS);
+
+	if (!ei)
+		return NULL;
+
+	return &ei->vfs_inode;
+}
+
+static void exfat_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+
+	kmem_cache_free(exfat_inodes_cachep, EXFAT_I(inode));
+}
+
+void exfat_destroy_inode(struct inode *_inode)
+{
+	struct exfat_inode_info *inode = EXFAT_I(_inode);
+
+	call_rcu(&inode->vfs_inode.i_rcu, exfat_i_callback);
+}
+
+static void exfat_inode_init_once(void *ptr)
+{
+	struct exfat_inode_info *info = ptr;
+
+	INIT_HLIST_NODE(&info->hash_list);
+	exfat_inode_cache_init(&info->vfs_inode);
+	inode_init_once(&info->vfs_inode);
+}
+
+/*
+ * inode cache create/destroy.
+ */
+int exfat_init_inodes(void)
+{
+	exfat_inodes_cachep = kmem_cache_create("exfat-inodes",
+				       sizeof (struct exfat_inode_info), 0,
+				       SLAB_RECLAIM_ACCOUNT |SLAB_MEM_SPREAD,
+				       exfat_inode_init_once);
+	if (!exfat_inodes_cachep)
+		return -ENOMEM;
+	return 0;
+}
+
+void exfat_exit_inodes(void)
+{
+	kmem_cache_destroy(exfat_inodes_cachep);
+}
+
+int exfat_drop_inode(struct inode *inode)
+{
+	return generic_drop_inode(inode);
+}
+
+void exfat_evict_inode(struct inode *inode)
+{
+	truncate_inode_pages_final(&inode->i_data);
+	if (!inode->i_nlink) {
+		inode->i_size = 0;
+		exfat_free_clusters_inode(inode, 0);
+	}
+	invalidate_inode_buffers(inode);
+	clear_inode(inode);
+	exfat_remove_inode_hash(inode);
+	exfat_inode_cache_drop(inode);
+}
+
+static u32 exfat_hash(loff_t disk_pos)
+{
+	return hash_32(disk_pos, EXFAT_HASH_BITS);
+}
+
+struct inode *exfat_iget(struct super_block *sb, loff_t disk_pos)
+{
+	struct exfat_inode_info *info;
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	struct hlist_head *head = sbi->inode_hash + exfat_hash(disk_pos);
+	struct inode *ret = NULL;
+
+
+	spin_lock(&sbi->inode_hash_lock);
+	hlist_for_each_entry (info, head, hash_list) {
+		if (info->iloc.disk_offs[0] != disk_pos)
+			continue ;
+		ret = igrab(&info->vfs_inode);
+		if (ret)
+			break;
+	}
+	spin_unlock(&sbi->inode_hash_lock);
+	return ret;
+}
+
+void exfat_insert_inode_hash(struct inode *inode)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	struct hlist_head *head = sbi->inode_hash +
+		exfat_hash(info->iloc.disk_offs[0]);
+
+	spin_lock(&sbi->inode_hash_lock);
+	hlist_add_head(&info->hash_list, head);
+	spin_unlock(&sbi->inode_hash_lock);
+}
+
+void exfat_remove_inode_hash(struct inode *inode)
+{
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+
+	spin_lock(&sbi->inode_hash_lock);
+	info->iloc.disk_offs[0] = 0;
+	hlist_del_init(&info->hash_list);
+	spin_unlock(&sbi->inode_hash_lock);
+}
+
+/*
+ * calculate the number of links in a directory. this is the number of
+ * EXFAT_FILEDIR_ENTRY typed elements in the directory stream. This
+ * does not include the '.' and '..' entries.
+ */
+loff_t exfat_dir_links(struct inode *inode)
+{
+	size_t ret = 0;
+	struct exfat_dir_ctx dctx;
+	int error;
+	bool end;
+
+	error = exfat_init_dir_ctx(inode, &dctx, 0);
+	if (error)
+		return error;
+
+	error = -EIO;
+	for (;;) {
+		struct exfat_filedir_entry *e =
+			__exfat_dentry_next(&dctx, E_EXFAT_FILEDIR, 0xff,
+					    true, &end);
+		if (!e) {
+			if (end)
+				error = 0;
+			goto out;
+		}
+		++ret;
+	}
+out:
+	exfat_cleanup_dir_ctx(&dctx);
+	if (error)
+		return error;
+	return ret;
+}
+
+int exfat_get_cluster_hint(struct inode *inode, u32 *out_hint)
+{
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	int error;
+	u32 first_cluster = info->first_cluster;
+
+
+	if (!first_cluster) {
+		/*
+		 * empty file, return a cluster likely to be free.
+		 */
+		*out_hint = EXFAT_SB(inode->i_sb)->prev_free_cluster + 2;
+		return 0;
+	}
+
+	if (info->flags & EXFAT_I_FAT_INVALID) {
+		/*
+		 * not fat run, all clusters are contiguous, set hint
+		 * to next last file cluster.
+		 */
+		*out_hint = first_cluster + info->allocated_clusters;
+		return 0;
+	}
+
+	/*
+	 * fat run available, walk it to get the last physical cluster
+	 * address and set hint to the immediate next physical
+	 * cluster.
+	 */
+	error = exfat_get_fat_cluster(inode, info->allocated_clusters - 1,
+				      out_hint);
+	if (error)
+		return error;
+	(*out_hint)++;
+	return 0;
+}
+
+int __exfat_write_inode(struct inode *inode, bool sync)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	struct dir_entry_buffer entries[info->iloc.nr_secondary];
+	int error;
+	struct exfat_filedir_entry *efd;
+	struct exfat_stream_extension_entry *esx;
+	u16 checksum;
+
+	if (inode->i_ino == EXFAT_ROOT_INO)
+		return 0;
+
+	if (info->iloc.disk_offs[0] == 0) {
+		/*
+		 * write_inode() to unlinked inode: don't corrupt
+		 * superblock.
+		 */
+		return 0;
+	}
+
+	error = exfat_get_dir_entry_buffers(inode, &info->iloc,
+					    entries, info->iloc.nr_secondary);
+	if (error)
+		return error;
+
+	if (inode->i_mode & S_IWUGO)
+		info->attributes &= ~E_EXFAT_ATTR_RO;
+	else
+		info->attributes |= E_EXFAT_ATTR_RO;
+
+	efd = entries[0].start;
+	esx = entries[1].start;
+
+	efd->attributes = __cpu_to_le16(info->attributes);
+	esx->data_length = __cpu_to_le64(inode->i_size);
+	esx->valid_data_length = esx->data_length =
+		__cpu_to_le64(inode->i_size);
+	esx->flags = info->flags;
+	esx->first_cluster = __cpu_to_le32(info->first_cluster);
+
+	exfat_write_time(sbi, &inode->i_ctime, &efd->create, &efd->create_10ms,
+			 &efd->create_tz_offset);
+	exfat_write_time(sbi, &inode->i_mtime, &efd->modified,
+			 &efd->modified_10ms, &efd->modified_tz_offset);
+	exfat_write_time(sbi, &inode->i_atime, &efd->accessed, NULL,
+			 &efd->accessed_tz_offset);
+
+	checksum = exfat_dir_entries_checksum(entries, info->iloc.nr_secondary);
+	efd->set_checksum = __cpu_to_le16(checksum);
+
+	exfat_dirty_dir_entries(entries, info->iloc.nr_secondary, sync);
+
+
+	return 0;
+}
+
+int exfat_write_inode(struct inode *inode, struct writeback_control *wbc)
+{
+	int ret;
+
+	exfat_lock_super(inode->i_sb);
+	ret = __exfat_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
+	exfat_unlock_super(inode->i_sb);
+	return ret;
+}
diff -Nruw linux-4.4.115-fbx/fs/exfat./Kconfig linux-4.4.115-fbx/fs/exfat/Kconfig
--- linux-4.4.115-fbx/fs/exfat./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/fs/exfat/Kconfig	2019-01-25 20:32:47.247752405 +0100
@@ -0,0 +1,3 @@
+
+config EXFAT_FS
+	tristate "exFAT fs support"
diff -Nruw linux-4.4.115-fbx/fs/exfat./Makefile linux-4.4.115-fbx/fs/exfat/Makefile
--- linux-4.4.115-fbx/fs/exfat./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/fs/exfat/Makefile	2019-01-25 20:32:47.247752405 +0100
@@ -0,0 +1,13 @@
+
+obj-$(CONFIG_EXFAT_FS)	+= exfat.o
+
+exfat-y	= super.o				\
+	inode.o					\
+	fat.o					\
+	read-write.o				\
+	upcase.o				\
+	bitmap.o				\
+	time.o					\
+	dir.o					\
+	namei.o					\
+	file.o
diff -Nruw linux-4.4.115-fbx/fs/exfat./namei.c linux-4.4.115-fbx/fs/exfat/namei.c
--- linux-4.4.115-fbx/fs/exfat./namei.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/fs/exfat/namei.c	2019-01-25 20:32:47.247752405 +0100
@@ -0,0 +1,924 @@
+/*
+ * namei.c for exfat
+ * Created by <nschichan@freebox.fr> on Tue Aug 20 12:00:27 2013
+ */
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/nls.h>
+
+#include "exfat.h"
+#include "exfat_fs.h"
+
+static u16 exfat_filename_hash_cont(struct super_block *sb,
+				    const __le16 *name, u16 hash, size_t len);
+
+
+void exfat_write_time(struct exfat_sb_info *sbi, struct timespec *ts,
+		      __le32 *datetime, u8 *time_cs, u8 *tz_offset)
+{
+	u32 cpu_datetime;
+
+	exfat_time_2exfat(sbi, ts, &cpu_datetime, time_cs, tz_offset);
+	*datetime = __cpu_to_le32(cpu_datetime);
+}
+
+static void exfat_read_time(struct timespec *ts, __le32 datetime, u8 time_cs,
+			    u8 tz_offset)
+{
+	u32 cpu_datetime = __le32_to_cpu(datetime);
+	exfat_time_2unix(ts, cpu_datetime, time_cs, tz_offset);
+}
+
+static int exfat_zero_cluster(struct super_block *sb, u32 cluster, bool sync)
+{
+	sector_t start = exfat_cluster_sector(EXFAT_SB(sb), cluster);
+	sector_t end = start + EXFAT_SB(sb)->sectors_per_cluster;
+	sector_t sect;
+
+	for (sect = start; sect < end; ++sect) {
+		struct buffer_head *bh = sb_bread(sb, sect);
+		if (!bh) {
+			exfat_msg(sb, KERN_WARNING,
+				  "unable to read sector %llu for zeroing.",
+				  (unsigned long long)sect);
+			return -EIO;
+		}
+		memset(bh->b_data, 0, bh->b_size);
+		mark_buffer_dirty(bh);
+		if (sync)
+			sync_dirty_buffer(bh);
+		brelse(bh);
+	}
+	return 0;
+}
+
+/*
+ * use per superblock fmask or dmaks, depending on provided entry
+ * attribute to restrict the provided mode even more.
+ */
+mode_t exfat_make_mode(struct exfat_sb_info *sbi, mode_t mode, u16 attrs)
+{
+	if (attrs & E_EXFAT_ATTR_DIRECTORY)
+		mode = (mode & ~sbi->options.dmask) | S_IFDIR;
+	else
+		mode = (mode & ~sbi->options.fmask) | S_IFREG;
+	if (attrs & E_EXFAT_ATTR_RO)
+		mode &= ~S_IWUGO;
+	return mode;
+}
+
+/*
+ * populate inode fields.
+ */
+static struct inode *exfat_populate_inode(struct super_block *sb,
+			  const struct exfat_filedir_entry *efd,
+			  const struct exfat_stream_extension_entry *esx,
+			  const struct exfat_iloc *iloc)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	struct inode *inode;
+
+	inode = exfat_iget(sb, iloc->disk_offs[0]);
+	if (inode)
+		return inode;
+
+	inode = new_inode(sb);
+	if (!inode)
+		return NULL;
+
+	inode->i_ino = iunique(sb, EXFAT_ROOT_INO);
+	EXFAT_I(inode)->first_cluster = __le32_to_cpu(esx->first_cluster);
+	EXFAT_I(inode)->flags = esx->flags;
+	EXFAT_I(inode)->iloc = *iloc;
+	EXFAT_I(inode)->attributes = __le16_to_cpu(efd->attributes);
+
+	inode->i_size = __le64_to_cpu(esx->data_length);
+	EXFAT_I(inode)->allocated_clusters = inode->i_size >> sbi->clusterbits;
+	if (inode->i_size & sbi->clustermask)
+		EXFAT_I(inode)->allocated_clusters++;
+	inode->i_blocks = EXFAT_I(inode)->allocated_clusters <<
+		(sbi->clusterbits - 9);
+	EXFAT_I(inode)->mmu_private = inode->i_size;
+
+	inode->i_uid = sbi->options.uid;
+	inode->i_gid = sbi->options.gid;
+	inode->i_mode = exfat_make_mode(sbi, S_IRWXUGO,
+					EXFAT_I(inode)->attributes);
+
+	if (EXFAT_I(inode)->attributes & E_EXFAT_ATTR_DIRECTORY) {
+		loff_t nlinks = exfat_dir_links(inode);
+		if (nlinks < 0)
+			goto iput;
+		set_nlink(inode, nlinks + 2);
+	} else
+		set_nlink(inode, 1);
+
+	if (esx->data_length != esx->valid_data_length)
+		exfat_msg(sb, KERN_WARNING, "data length (%llu) != valid data "
+			  "length (%llu)", __le64_to_cpu(esx->data_length),
+			  __le64_to_cpu(esx->valid_data_length));
+
+	if (S_ISDIR(inode->i_mode)) {
+		inode->i_fop = &exfat_dir_operations;
+		inode->i_op = &exfat_dir_inode_operations;
+	} else {
+		/* until we support write */
+		inode->i_fop = &exfat_file_operations;
+		inode->i_op = &exfat_file_inode_operations;
+		inode->i_data.a_ops = &exfat_address_space_operations;
+	}
+
+
+	exfat_read_time(&inode->i_ctime, efd->create, efd->create_10ms,
+			efd->create_tz_offset);
+	exfat_read_time(&inode->i_mtime, efd->modified, efd->modified_10ms,
+			efd->modified_tz_offset);
+	exfat_read_time(&inode->i_atime, efd->accessed, 0,
+			efd->accessed_tz_offset);
+
+	exfat_insert_inode_hash(inode);
+	insert_inode_hash(inode);
+	return inode;
+iput:
+	iput(inode);
+	return NULL;
+}
+
+/*
+ * lookup an inode.
+ */
+struct dentry *exfat_inode_lookup(struct inode *parent, struct dentry *dentry,
+				  unsigned int flags)
+{
+	struct super_block *sb = dentry->d_sb;
+	struct exfat_dir_ctx dctx;
+	int error;
+	struct exfat_filedir_entry efd;
+	struct exfat_stream_extension_entry esx;
+	__le16 *name = __getname();
+	__le16 *utf16_name = __getname();
+	unsigned int utf16_name_length;
+	__le16 name_hash;
+
+	exfat_lock_super(parent->i_sb);
+
+	if (!name || !utf16_name) {
+		error = -ENOMEM;
+		goto putnames;
+	}
+
+	utf16_name_length = utf8s_to_utf16s(dentry->d_name.name,
+					    dentry->d_name.len,
+					    UTF16_LITTLE_ENDIAN,
+					    utf16_name, 255 + 2);
+	if (utf16_name_length > 255) {
+		error = -ENAMETOOLONG;
+		goto putnames;
+	}
+
+	/*
+	 * get the name hash of the wanted inode early so that we can
+	 * skip entries with only an efd and an esx entry.
+	 */
+	name_hash = __cpu_to_le16(exfat_filename_hash_cont(sb, utf16_name, 0,
+							   utf16_name_length));
+
+	/*
+	 * create a dir ctx from the parent so that we can iterate on
+	 * it.
+	 */
+	error = exfat_init_dir_ctx(parent, &dctx, 0);
+	if (error)
+		goto putnames;
+
+	for (;;) {
+		u32 name_length;
+		struct inode *inode;
+		u16 calc_checksum;
+		u16 expect_checksum;
+		struct exfat_iloc iloc;
+
+		memset(&iloc, 0, sizeof (iloc));
+		/*
+		 * get filedir and stream extension entries.
+		 */
+		error = exfat_dentry_next(&efd, &dctx, E_EXFAT_FILEDIR, true);
+		if (error < 0)
+			/* end of directory reached, or other error */
+			goto cleanup;
+
+		error = -EINVAL;
+		if (efd.secondary_count > 18)
+			goto cleanup;
+
+		iloc.file_off = exfat_dctx_fpos(&dctx);
+		iloc.disk_offs[0] = exfat_dctx_dpos(&dctx);
+		iloc.nr_secondary = efd.secondary_count + 1;
+
+		error = exfat_dentry_next(&esx, &dctx, E_EXFAT_STREAM_EXT,
+					  false);
+		if (error)
+			goto cleanup;
+
+		if (esx.name_hash != name_hash)
+			/*
+			 * stored name hash is not the same as the
+			 * wanted hash: no point in processing the
+			 * remaining entries for the current efd/esx
+			 * any further.
+			 */
+			continue ;
+
+		/*
+		 * now that the hash matches it is ok to update the
+		 * checksum for the efd and esx entries.
+		 */
+		expect_checksum = __le16_to_cpu(efd.set_checksum);
+		calc_checksum = exfat_direntry_checksum(&efd, 0, true);
+
+		calc_checksum = exfat_direntry_checksum(&esx,
+							calc_checksum, false);
+		iloc.disk_offs[1] = exfat_dctx_dpos(&dctx);
+
+		/*
+		 * fetch name.
+		 */
+		name_length = esx.name_length;
+		error = __exfat_get_name(&dctx, name_length, name,
+					 &calc_checksum, &iloc);
+		if (error)
+			goto cleanup;
+
+		if (calc_checksum != expect_checksum) {
+			exfat_msg(dctx.sb, KERN_INFO, "checksum: "
+				  "calculated %04x, expect %04x",
+				  calc_checksum, expect_checksum);
+			error = -EIO;
+			goto cleanup;
+		}
+
+
+		if (utf16_name_length != name_length)
+			continue ;
+
+		if (memcmp(utf16_name, name, name_length * sizeof (__le16)))
+			continue ;
+
+		inode = exfat_populate_inode(sb, &efd, &esx, &iloc);
+		if (inode) {
+			d_add(dentry, inode);
+			error = 0;
+		} else
+			error = -EIO;
+		goto cleanup;
+	}
+
+cleanup:
+	exfat_cleanup_dir_ctx(&dctx);
+putnames:
+	if (name)
+		__putname(name);
+	if (utf16_name)
+		__putname(utf16_name);
+	exfat_unlock_super(parent->i_sb);
+	if (error && error != -ENOENT)
+		return ERR_PTR(error);
+	return NULL;
+}
+
+/*
+ * find nr unused directory entries (type & 0x80 == 0).
+ */
+static int exfat_find_dir_iloc(struct inode *inode, int nr,
+			       struct exfat_iloc *iloc)
+{
+	struct exfat_dir_ctx dctx;
+	bool end = false;
+	int error;
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	u32 nr_new_clusters, i;
+	u32 new_clusters[2];
+	u32 hint_cluster;
+
+retry:
+	memset(iloc, 0, sizeof (*iloc));
+	iloc->nr_secondary = nr;
+
+	error = exfat_init_dir_ctx(inode, &dctx, 0);
+	if (error)
+		return error;
+
+	while (1) {
+		int nr_free;
+		void *ent;
+
+		ent = __exfat_dentry_next(&dctx, 0x00, 0x80, true, &end);
+		if (end)
+			break;
+		if (!ent) {
+			exfat_cleanup_dir_ctx(&dctx);
+			return -EIO;
+		}
+
+		nr_free = 1;
+		iloc->file_off = exfat_dctx_fpos(&dctx);
+		iloc->disk_offs[0] = exfat_dctx_dpos(&dctx);
+		while (__exfat_dentry_next(&dctx, 0x00, 0x80, false, &end)
+		       != NULL && nr_free < nr) {
+			iloc->disk_offs[nr_free] = exfat_dctx_dpos(&dctx);
+			++nr_free;
+		}
+		if (nr_free == nr) {
+			/*
+			 * we found enough consecutive free entries.
+			 */
+			exfat_cleanup_dir_ctx(&dctx);
+			return 0;
+		}
+
+	}
+
+	/*
+	 * not enough consecutive free entries found, kick the cluster
+	 * allocator and retry.
+	 */
+	exfat_cleanup_dir_ctx(&dctx);
+
+	/*
+	 * with the smallest cluster size, a file can take more than
+	 * two clusters. allocate two in that case reardless of what
+	 * is needed to make code simplier.
+	 */
+	switch (sbi->clustersize) {
+	case 512:
+		nr_new_clusters = 2;
+		break;
+	default:
+		nr_new_clusters = 1;
+		break;
+	}
+
+	/*
+	 * get a hint cluster for the cluster allocator.
+	 */
+	error = exfat_get_cluster_hint(inode, &hint_cluster);
+	if (error)
+		return error;
+
+	/*
+	 * peform the allocation.
+	 */
+	error = exfat_alloc_clusters(inode, hint_cluster, new_clusters,
+				     nr_new_clusters);
+	if (error)
+		return error;
+
+	/*
+	 * fill new cluster(s) with zero.
+	 */
+	for (i = 0; i < nr_new_clusters; ++i)
+		exfat_zero_cluster(inode->i_sb, new_clusters[i], false);
+
+	/*
+	 * update size and mark inode as dirty so that write_inode()
+	 * can update it's size, and the other fields updated by
+	 * exfat_alloc_clusters.
+	 */
+	inode->i_size += nr_new_clusters << sbi->clusterbits;
+	mark_inode_dirty(inode);
+
+	/*
+	 * kick the whole place search again, this time with the newly
+	 * allocated clusters.
+	 */
+	goto retry;
+}
+
+/*
+ * setup dir_entry_buffers starting at using iloc.
+ */
+int exfat_get_dir_entry_buffers(struct inode *dir, struct exfat_iloc *iloc,
+				struct dir_entry_buffer *entries,
+				size_t nr_entries)
+{
+	size_t i;
+	int error;
+	struct exfat_sb_info *sbi = EXFAT_SB(dir->i_sb);
+
+	BUG_ON(iloc->nr_secondary != nr_entries);
+
+	memset(entries, 0, sizeof (*entries) * nr_entries);
+	for (i = 0; i < nr_entries; ++i) {
+		sector_t sector = iloc->disk_offs[i] >> sbi->sectorbits;
+
+		entries[i].off = iloc->disk_offs[i] & sbi->sectormask;
+		entries[i].bh = sb_bread(dir->i_sb, sector);
+		if (!entries[i].bh) {
+			error = -EIO;
+			goto fail;
+		}
+		entries[i].start = entries[i].bh->b_data + entries[i].off;
+	}
+	return 0;
+
+fail:
+	for (i = 0; i < nr_entries; ++i)
+		if (entries[i].bh)
+			brelse(entries[i].bh);
+	return error;
+}
+
+static u16 exfat_filename_hash_cont(struct super_block *sb,
+				    const __le16 *name, u16 hash, size_t len)
+{
+	while (len) {
+		u16 c = __le16_to_cpu(exfat_upcase_convert(sb, *name));
+
+		hash = ((hash << 15) | (hash >> 1)) + (c & 0xff);
+		hash = ((hash << 15) | (hash >> 1)) + (c >> 8);
+		--len;
+		++name;
+	}
+	return hash;
+}
+
+u16 exfat_dir_entries_checksum(struct dir_entry_buffer *entries, u32 nr)
+{
+	u32 checksum = 0;
+
+	if (nr) {
+		checksum = exfat_direntry_checksum(entries->start,
+						   checksum, true);
+		--nr;
+		++entries;
+	}
+	while (nr) {
+		checksum = exfat_direntry_checksum(entries->start,
+						   checksum, false);
+		--nr;
+		++entries;
+	}
+	return checksum;
+}
+
+/*
+ * setup exfat_filedir_entry and exfat_stream_extension_entry for a
+ * new entry, with attribute attrs, and named name.
+ */
+static void exfat_fill_dir_entries(struct super_block *sb,
+				  struct dir_entry_buffer *entries,
+				  size_t nr_entries, u8 attrs,
+				  __le16 *name, int name_length)
+{
+	struct exfat_filedir_entry *efd;
+	struct exfat_stream_extension_entry *esx;
+	int i;
+	u16 name_hash;
+	u16 checksum;
+	struct timespec ts = CURRENT_TIME_SEC;
+
+	efd = entries[0].start;
+	esx = entries[1].start;
+
+	/*
+	 * fill exfat filedir entry
+	 */
+	memset(efd, 0, sizeof (*efd));
+	efd->type = E_EXFAT_FILEDIR;
+	efd->secondary_count = nr_entries - 1;
+	efd->set_checksum = 0;
+	efd->attributes = __cpu_to_le16(attrs);
+
+	/*
+	 * update file directory entry times
+	 */
+	efd = entries[0].start;
+	exfat_write_time(EXFAT_SB(sb), &ts, &efd->create, &efd->create_10ms,
+			 &efd->create_tz_offset);
+	efd->modified = efd->accessed = efd->create;
+	efd->modified_10ms = efd->create_10ms;
+	efd->accessed_tz_offset = efd->modified_tz_offset =
+		efd->create_tz_offset;
+
+	/*
+	 * fill exfat stream extension entry
+	 */
+	memset(esx, 0, sizeof (*esx));
+	esx->type = E_EXFAT_STREAM_EXT;
+	esx->flags = EXFAT_I_ALLOC_POSSIBLE;
+	esx->first_cluster = __cpu_to_le32(0);
+	esx->data_length = __cpu_to_le64(0);
+	esx->valid_data_length = __cpu_to_le64(0);
+	esx->name_length = name_length;
+
+	/*
+	 * fill name fragments.
+	 */
+	name_hash = 0;
+	for (i = 0; i < nr_entries - 2; ++i, name_length -= 15) {
+		struct exfat_filename_entry *efn = entries[i + 2].start;
+		int len = 15;
+
+		if (name_length < 15)
+			len = name_length;
+
+		memset(efn, 0, sizeof (*efn));
+		efn->type = E_EXFAT_FILENAME;
+		memcpy(efn->name_frag, name + i * 15, len * sizeof (__le16));
+		name_hash = exfat_filename_hash_cont(sb, efn->name_frag,
+						     name_hash, len);
+	}
+	esx->name_hash = __cpu_to_le16(name_hash);
+
+	checksum = exfat_dir_entries_checksum(entries, nr_entries);
+	efd->set_checksum = __cpu_to_le16(checksum);
+}
+
+/*
+ * mark all buffer heads in the entries array as dirty. optionally
+ * sync them if required.
+ */
+void exfat_dirty_dir_entries(struct dir_entry_buffer *entries,
+			     size_t nr_entries, bool sync)
+{
+	size_t i;
+
+	for (i = 0; i < nr_entries; ++i) {
+		mark_buffer_dirty(entries[i].bh);
+		if (sync)
+			sync_dirty_buffer(entries[i].bh);
+		brelse(entries[i].bh);
+	}
+}
+
+/*
+ * cleanup all buffer heads in entries.
+ */
+static void exfat_cleanup_dir_entries(struct dir_entry_buffer *entries,
+				     size_t nr_entries)
+{
+	size_t i;
+
+	for (i = 0; i < nr_entries; ++i)
+		brelse(entries[i].bh);
+}
+
+/*
+ * create an inode
+ */
+static int __exfat_inode_create(struct inode *dir, struct dentry *dentry,
+				umode_t mode, bool is_dir)
+{
+	int nr_entries;
+	struct dir_entry_buffer entries[19];
+	struct inode *new;
+	struct exfat_iloc iloc;
+	int error;
+	u8 attr = 0;
+	__le16 *utf16_name;
+	int utf16_name_length;
+
+	if (is_dir)
+		attr |= E_EXFAT_ATTR_DIRECTORY;
+
+	exfat_lock_super(dir->i_sb);
+
+	utf16_name = __getname();
+	if (!utf16_name) {
+		error = -ENOMEM;
+		goto unlock_super;
+	}
+
+	utf16_name_length = utf8s_to_utf16s(dentry->d_name.name,
+					    dentry->d_name.len,
+					    UTF16_LITTLE_ENDIAN, utf16_name,
+					    255 + 2);
+	if (utf16_name_length < 0) {
+		error = utf16_name_length;
+		goto putname;
+	}
+	if (utf16_name_length > 255) {
+		error = -ENAMETOOLONG;
+		goto putname;
+	}
+
+
+	nr_entries = 2 + DIV_ROUND_UP(utf16_name_length, 15);
+	if (nr_entries > 19) {
+		error = -ENAMETOOLONG;
+		goto putname;
+	}
+
+	error = exfat_find_dir_iloc(dir, nr_entries, &iloc);
+	if (error < 0)
+		goto putname;
+
+	error = exfat_get_dir_entry_buffers(dir, &iloc, entries, nr_entries);
+	if (error)
+		goto putname;
+	exfat_fill_dir_entries(dir->i_sb, entries, nr_entries, attr,
+				       utf16_name, utf16_name_length);
+
+	/*
+	 * create an inode with it.
+	 */
+	error = -ENOMEM;
+	new = exfat_populate_inode(dir->i_sb, entries[0].start,
+				   entries[1].start, &iloc);
+	if (!new)
+		goto cleanup;
+	inc_nlink(dir);
+	d_instantiate(dentry, new);
+
+	/*
+	 * update directory atime / ctime.
+	 */
+	dir->i_atime = dir->i_mtime = CURRENT_TIME_SEC;
+	if (IS_DIRSYNC(dir))
+		__exfat_write_inode(dir, true);
+	else
+		mark_inode_dirty(dir);
+
+	/*
+	 * write to disk
+	 */
+	exfat_dirty_dir_entries(entries, nr_entries, false);
+	__putname(utf16_name);
+	exfat_unlock_super(dir->i_sb);
+	return 0;
+
+cleanup:
+	exfat_cleanup_dir_entries(entries, nr_entries);
+putname:
+	__putname(utf16_name);
+unlock_super:
+	exfat_unlock_super(dir->i_sb);
+	return error;
+}
+
+int exfat_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+		       bool excl)
+{
+	return __exfat_inode_create(dir, dentry, mode, false);
+}
+
+int exfat_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+	return __exfat_inode_create(dir, dentry, mode, true);
+}
+
+/*
+ * inode unlink: find all direntry buffers and clear seventh bit of
+ * the entry type to mark the as unused.
+ */
+static int __exfat_inode_unlink(struct inode *dir, struct dentry *dentry)
+{
+	struct inode *inode = dentry->d_inode;
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	struct dir_entry_buffer entries[info->iloc.nr_secondary];
+	int error;
+	u32 i;
+
+	error = exfat_get_dir_entry_buffers(inode, &info->iloc,
+					    entries, info->iloc.nr_secondary);
+	if (error)
+		return error;
+
+	for (i = 0; i < info->iloc.nr_secondary; ++i) {
+		u8 *type = entries[i].start;
+
+		*type &= 0x7f;
+	}
+
+	drop_nlink(dir);
+	clear_nlink(inode);
+	inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
+
+	/*
+	 * update atime & mtime for parent directory.
+	 */
+	dir->i_mtime = dir->i_atime = CURRENT_TIME_SEC;
+	if (IS_DIRSYNC(dir))
+		__exfat_write_inode(dir, true);
+	else
+		mark_inode_dirty(dir);
+
+	exfat_dirty_dir_entries(entries, info->iloc.nr_secondary, false);
+	exfat_remove_inode_hash(inode);
+	return 0;
+}
+
+int exfat_inode_unlink(struct inode *dir, struct dentry *dentry)
+{
+	int ret;
+
+	exfat_lock_super(dir->i_sb);
+	ret = __exfat_inode_unlink(dir, dentry);
+	exfat_unlock_super(dir->i_sb);
+	return ret;
+}
+
+/*
+ * inode rmdir: check that links is not greater than 2 (meaning that
+ * the directory is empty) and invoke unlink.
+ */
+static int __exfat_inode_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	struct inode *inode = dentry->d_inode;
+
+	if (inode->i_nlink > 2)
+		return -ENOTEMPTY;
+
+	return __exfat_inode_unlink(dir, dentry);
+}
+
+int exfat_inode_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	int ret;
+
+	exfat_lock_super(dir->i_sb);
+	ret = __exfat_inode_rmdir(dir, dentry);
+	exfat_unlock_super(dir->i_sb);
+	return ret;
+}
+
+int exfat_rename(struct inode *old_dir, struct dentry *old_dentry,
+		 struct inode *new_dir, struct dentry *new_dentry)
+{
+	struct inode *old_inode = old_dentry->d_inode;
+	struct inode *new_inode = new_dentry->d_inode;
+	int new_nr_entries;
+	int error = 0;
+	struct exfat_iloc new_iloc;
+	struct exfat_inode_info *old_info = EXFAT_I(old_inode);
+	struct dir_entry_buffer old_buffers[old_info->iloc.nr_secondary];
+	struct dir_entry_buffer new_buffers[19];
+	struct exfat_filedir_entry *efd;
+	struct exfat_stream_extension_entry *esx;
+	int name_length;
+	__le16 *name;
+	u16 name_hash;
+	int i;
+
+	exfat_lock_super(new_dir->i_sb);
+
+	/*
+	 * convert new name to utf16
+	 */
+	name = __getname();
+	if (!name) {
+		error = -ENOMEM;
+		goto unlock_super;
+	}
+	name_length = utf8s_to_utf16s(new_dentry->d_name.name,
+				      new_dentry->d_name.len,
+				      UTF16_LITTLE_ENDIAN, name, 255 + 2);
+
+	if (name_length > 255) {
+		error = -ENAMETOOLONG;
+		goto err_putname;
+	}
+	if (name_length < 0) {
+		error = name_length;
+		goto err_putname;
+	}
+
+	new_nr_entries = 2 + DIV_ROUND_UP(name_length, 15);
+
+	/*
+	 * find space for new entry
+	 */
+	error = exfat_find_dir_iloc(new_dir, new_nr_entries, &new_iloc);
+	if (error < 0)
+		goto err_putname;
+
+	/*
+	 * get buffers for old and new entries.
+	 */
+	error = exfat_get_dir_entry_buffers(old_dir, &old_info->iloc,
+				    old_buffers, old_info->iloc.nr_secondary);
+	if (error < 0)
+		goto err_putname;
+
+	error = exfat_get_dir_entry_buffers(new_dir, &new_iloc, new_buffers,
+					    new_nr_entries);
+	if (error < 0)
+		goto err_cleanup_old_buffers;
+
+
+	/*
+	 * remove new inode, if it exists.
+	 */
+	if (new_inode) {
+		if (S_ISDIR(new_inode->i_mode))
+			error = __exfat_inode_rmdir(new_dir, new_dentry);
+		else
+			error = __exfat_inode_unlink(new_dir, new_dentry);
+		if (error < 0)
+			goto err_cleanup_new_buffers;
+	}
+
+	/*
+	 * move old esd to new esd (and ditto for esx).
+	 */
+	efd = new_buffers[0].start;
+	esx = new_buffers[1].start;
+	memcpy(efd, old_buffers[0].start, sizeof (*efd));
+	memcpy(esx, old_buffers[1].start, sizeof (*esx));
+
+	efd->secondary_count = new_nr_entries - 1;
+
+	/*
+	 * patch new name after that.
+	 */
+	esx->name_length = __cpu_to_le16(name_length);
+
+	/*
+	 * fill name fragments.
+	 */
+	name_hash = 0;
+	for (i = 0; i < new_nr_entries - 2; ++i, name_length -= 15) {
+		struct exfat_filename_entry *efn = new_buffers[i + 2].start;
+		int len = 15;
+
+		if (name_length < 15)
+			len = name_length;
+
+		memset(efn, 0, sizeof (*efn));
+		efn->type = E_EXFAT_FILENAME;
+		memcpy(efn->name_frag, name + i * 15, len * sizeof (__le16));
+		name_hash = exfat_filename_hash_cont(new_dir->i_sb,
+						     efn->name_frag,
+						     name_hash, len);
+	}
+	__putname(name);
+	esx->name_hash = __cpu_to_le16(name_hash);
+	efd->set_checksum = exfat_dir_entries_checksum(new_buffers,
+						       new_nr_entries);
+	efd->set_checksum = __cpu_to_le16(efd->set_checksum);
+
+	/*
+	 * mark old buffer entries as unused.
+	 */
+	for (i = 0; i < old_info->iloc.nr_secondary; ++i)
+		*((u8*)old_buffers[i].start) &= 0x7f;
+
+	/*
+	 * dirty old & new entries buffers.
+	 */
+	exfat_dirty_dir_entries(new_buffers, new_nr_entries, false);
+	exfat_dirty_dir_entries(old_buffers, old_info->iloc.nr_secondary,
+				false);
+
+	/*
+	 * update links if new_dir and old_dir are differents.
+	 */
+	if (new_dir != old_dir) {
+		drop_nlink(old_dir);
+		inc_nlink(new_dir);
+	}
+
+	/*
+	 * make old inode use the new iloc, and update sb inode hash.
+	 */
+	exfat_remove_inode_hash(old_inode);
+	old_info->iloc = new_iloc;
+	exfat_insert_inode_hash(old_inode);
+
+	/*
+	 * update new dir & old dir mtime/atime
+	 */
+	if (new_dir == old_dir) {
+		new_dir->i_mtime = new_dir->i_atime = CURRENT_TIME_SEC;
+		if (IS_DIRSYNC(new_dir))
+			__exfat_write_inode(new_dir, true);
+		else
+			mark_inode_dirty(new_dir);
+	} else {
+		new_dir->i_mtime = new_dir->i_atime =
+			old_dir->i_mtime = old_dir->i_atime = CURRENT_TIME_SEC;
+		if (IS_DIRSYNC(new_dir)) {
+			__exfat_write_inode(new_dir, true);
+			__exfat_write_inode(old_dir, true);
+		} else {
+			mark_inode_dirty(new_dir);
+			mark_inode_dirty(old_dir);
+		}
+	}
+
+	exfat_unlock_super(new_dir->i_sb);
+	return 0;
+
+err_cleanup_new_buffers:
+	exfat_cleanup_dir_entries(new_buffers, new_nr_entries);
+err_cleanup_old_buffers:
+	exfat_cleanup_dir_entries(old_buffers, old_info->iloc.nr_secondary);
+err_putname:
+	__putname(name);
+unlock_super:
+	exfat_unlock_super(new_dir->i_sb);
+	return error;
+}
diff -Nruw linux-4.4.115-fbx/fs/exfat./read-write.c linux-4.4.115-fbx/fs/exfat/read-write.c
--- linux-4.4.115-fbx/fs/exfat./read-write.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/fs/exfat/read-write.c	2019-01-25 20:32:47.247752405 +0100
@@ -0,0 +1,150 @@
+/*
+ * read-write.c for exfat
+ * Created by <nschichan@freebox.fr> on Wed Jul 31 16:37:51 2013
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/mpage.h>
+#include <linux/buffer_head.h>
+
+#include "exfat.h"
+#include "exfat_fs.h"
+
+/*
+ * map file sector to disk sector.
+ */
+static int exfat_bmap(struct inode *inode, sector_t fsect, sector_t *dsect)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	u32 cluster_nr = fsect >> (sbi->clusterbits - sbi->sectorbits);
+	u32 cluster;
+	unsigned int offset = fsect & (sbi->sectors_per_cluster - 1);
+
+	if (info->flags & EXFAT_I_FAT_INVALID)
+		cluster = info->first_cluster + cluster_nr;
+	else {
+		int error;
+
+		error = exfat_get_fat_cluster(inode, cluster_nr, &cluster);
+		if (error)
+			return error;
+	}
+
+	*dsect = exfat_cluster_sector(sbi, cluster) + offset;
+	return 0;
+}
+
+static int exfat_get_block(struct inode *inode, sector_t block,
+			   struct buffer_head *bh, int create)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	sector_t last_block;
+	unsigned int offset;
+	sector_t dblock;
+	int error;
+
+	last_block = (i_size_read(inode) + sbi->sectorsize - 1) >>
+		sbi->sectorbits;
+	offset = block & (sbi->sectors_per_cluster - 1);
+
+	if (!create && block >= last_block)
+		return 0;
+
+	if (create && block >= last_block && offset == 0) {
+		u32 hint, cluster;
+
+		/*
+		 * request for first sector in a cluster immediate to
+		 * the last allocated cluster of the file: must
+		 * allocate a new clluster.
+		 */
+		error = exfat_get_cluster_hint(inode, &hint);
+		if (error)
+			return error;
+
+		error = exfat_alloc_clusters(inode, hint, &cluster, 1);
+		if (error)
+			return error;
+	}
+
+	error = exfat_bmap(inode, block, &dblock);
+	if (error)
+		return error;
+
+	if (create && block >= last_block) {
+		/*
+		 * currently in create mode: we need to update
+		 * mmu_private.
+		 */
+		info->mmu_private += sbi->sectorsize;
+		set_buffer_new(bh);
+	}
+	map_bh(bh, inode->i_sb, dblock);
+	return 0;
+}
+
+int exfat_readpage(struct file *file, struct page *page)
+{
+	return mpage_readpage(page, exfat_get_block);
+}
+
+int exfat_readpages(struct file *file, struct address_space *mapping,
+		    struct list_head *pages, unsigned nr_pages)
+{
+	return mpage_readpages(mapping, pages, nr_pages, exfat_get_block);
+}
+
+static int exfat_write_error(struct inode *inode, loff_t to)
+{
+	if (to > inode->i_size) {
+		truncate_pagecache(inode, to);
+		exfat_truncate_blocks(inode, inode->i_size);
+	}
+	return 0;
+}
+
+int exfat_write_begin(struct file *file, struct address_space *mapping,
+		      loff_t pos, unsigned len, unsigned flags,
+		      struct page **pagep, void **fsdata)
+{
+	struct inode *inode = mapping->host;
+	int error;
+
+	*pagep = NULL;
+	error = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
+				 exfat_get_block, &EXFAT_I(inode)->mmu_private);
+
+	if (error)
+		exfat_write_error(inode, pos + len);
+	return error;
+}
+
+int exfat_write_end(struct file *file, struct address_space *mapping,
+		    loff_t pos, unsigned len, unsigned copied,
+		    struct page *page, void *fsdata)
+{
+	struct inode *inode = mapping->host;
+	int error;
+
+	error = generic_write_end(file, mapping, pos, len, copied, page,
+				  fsdata);
+
+	if (error < len)
+		exfat_write_error(inode, pos + len);
+	return error;
+}
+
+int exfat_writepage(struct page *page, struct writeback_control *wbc)
+{
+	return block_write_full_page(page, exfat_get_block, wbc);
+}
+
+int exfat_writepages(struct address_space *mapping,
+		     struct writeback_control *wbc)
+{
+	return mpage_writepages(mapping, wbc, exfat_get_block);
+}
diff -Nruw linux-4.4.115-fbx/fs/exfat./super.c linux-4.4.115-fbx/fs/exfat/super.c
--- linux-4.4.115-fbx/fs/exfat./super.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/fs/exfat/super.c	2019-01-25 20:32:47.247752405 +0100
@@ -0,0 +1,735 @@
+/*
+ * super.c<2> for exfat
+ * Created by <nschichan@freebox.fr> on Tue Jul 23 12:33:53 2013
+ */
+
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/buffer_head.h>
+#include <linux/statfs.h>
+#include <linux/parser.h>
+#include <linux/seq_file.h>
+#include <linux/sched.h>
+#include <linux/cred.h>
+
+#include "exfat_fs.h"
+#include "exfat.h"
+
+
+#define PFX	"exFAT: "
+
+static void exfat_put_super(struct super_block *sb);
+static int exfat_statfs(struct dentry *dentry, struct kstatfs *kstat);
+static int exfat_show_options(struct seq_file *m, struct dentry *root);
+static int exfat_remount(struct super_block *sb, int *flags, char *opts);
+
+static const struct super_operations exfat_super_ops = {
+	.alloc_inode	= exfat_alloc_inode,
+	.destroy_inode	= exfat_destroy_inode,
+	.drop_inode	= exfat_drop_inode,
+	.evict_inode	= exfat_evict_inode,
+	.write_inode	= exfat_write_inode,
+	.statfs         = exfat_statfs,
+	.put_super      = exfat_put_super,
+	.show_options	= exfat_show_options,
+	.remount_fs	= exfat_remount,
+};
+
+const struct file_operations exfat_dir_operations = {
+	.llseek = generic_file_llseek,
+	.read = generic_read_dir,
+	.iterate = exfat_iterate,
+	.unlocked_ioctl	= exfat_ioctl,
+};
+
+const struct file_operations exfat_file_operations = {
+	.llseek		= generic_file_llseek,
+	.read_iter	= generic_file_read_iter,
+	.write_iter	= generic_file_write_iter,
+	.mmap		= generic_file_mmap,
+	.splice_read	= generic_file_splice_read,
+	.unlocked_ioctl	= exfat_ioctl,
+	.fsync		= generic_file_fsync,
+};
+
+const struct inode_operations exfat_dir_inode_operations =
+{
+	.create = exfat_inode_create,
+	.mkdir	= exfat_inode_mkdir,
+	.lookup = exfat_inode_lookup,
+	.rmdir	= exfat_inode_rmdir,
+	.unlink	= exfat_inode_unlink,
+	.rename	= exfat_rename,
+	.setattr = exfat_setattr,
+	.getattr = exfat_getattr,
+};
+
+const struct inode_operations exfat_file_inode_operations = {
+	.setattr = exfat_setattr,
+	.getattr = exfat_getattr,
+};
+
+const struct address_space_operations exfat_address_space_operations = {
+	.readpage	= exfat_readpage,
+	.readpages	= exfat_readpages,
+	.write_begin	= exfat_write_begin,
+	.write_end	= exfat_write_end,
+	.writepage	= exfat_writepage,
+	.writepages	= exfat_writepages,
+};
+
+void exfat_msg(struct super_block *sb, const char *prefix,
+		const char *fmt, ...)
+{
+	struct va_format vaf;
+	va_list args;
+
+	va_start(args, fmt);
+	vaf.fmt = fmt;
+	vaf.va = &args;
+	printk("%sexFAT-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
+	va_end(args);
+}
+
+void exfat_fs_error(struct super_block *sb, const char *fmt, ...)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	struct va_format vaf;
+	va_list args;
+
+	va_start(args, fmt);
+	vaf.fmt = fmt;
+	vaf.va = &args;
+	exfat_msg(sb, KERN_ERR, "error: %pV", &vaf);
+	va_end(args);
+
+	if (sbi->options.error_action == EXFAT_ERROR_ACTION_REMOUNT_RO &&
+	    !(sb->s_flags & MS_RDONLY)) {
+		sb->s_flags |= MS_RDONLY;
+		exfat_msg(sb, KERN_ERR, "remounted read-only due to fs error.");
+	} else if (sbi->options.error_action == EXFAT_ERROR_ACTION_PANIC)
+		panic("exFAT-fs (%s): panic due fs error.\n", sb->s_id);
+}
+
+/*
+ * process checksum on buffer head. first indicates if the special
+ * treatment of the first sector needs to be done or not.
+ *
+ * first sector can be changed (volume flags, and heap use percent),
+ * those fields are excluded from the checksum to allow updating
+ * without recalculating the checksum.
+ */
+static u32 exfat_sb_checksum_process(struct buffer_head *bh, u32 checksum,
+				     unsigned int size,
+				     bool first)
+{
+	unsigned int i;
+
+	for (i = 0; i < size; ++i) {
+		if (first && (i == 106 || i == 107 || i == 112))
+			continue ;
+		checksum = ((checksum << 31) | (checksum >> 1)) +
+			(unsigned char)bh->b_data[i];
+	}
+	return checksum;
+}
+
+static int exfat_check_sb_checksum(struct super_block *sb)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	u32 checksum;
+	int i;
+	int err;
+	struct buffer_head *bh[EXFAT_CHECKSUM_SECTORS + 1];
+
+	/*
+	 * fetch needed sectors, reuse first sector from sbi.
+	 */
+	err = -ENOMEM;
+	memset(bh, 0, sizeof (struct buffer_head*) *
+	       (EXFAT_CHECKSUM_SECTORS + 1));
+	bh[0] = sbi->sb_bh;
+	for (i = 1; i < EXFAT_CHECKSUM_SECTORS + 1; ++i) {
+		bh[i] = sb_bread(sb, i);
+		if (!bh[i])
+			goto out;
+	}
+
+	/*
+	 * calculate checksum.
+	 */
+	checksum = exfat_sb_checksum_process(bh[0], 0, sbi->sectorsize, true);
+	for (i = 1; i < EXFAT_CHECKSUM_SECTORS; ++i) {
+		checksum = exfat_sb_checksum_process(bh[i], checksum,
+						     sbi->sectorsize, false);
+	}
+
+	/*
+	 * compare with the checksum sector.
+	 */
+	err = -EINVAL;
+	for (i = 0; i < sbi->sectorsize; i += sizeof (u32)) {
+		__le32 val = *(u32*)(bh[EXFAT_CHECKSUM_SECTORS]->b_data + i);
+
+		if (__le32_to_cpu(val) != checksum) {
+			exfat_msg(sb, KERN_INFO, "at offset %i, checksum "
+				  "%08x != %08x", i, __le32_to_cpu(val), checksum);
+			goto out;
+		}
+	}
+	err = 0;
+
+out:
+	for (i = 1; i < EXFAT_CHECKSUM_SECTORS; ++i)
+		if (bh[i])
+			brelse(bh[i]);
+	return err;
+}
+
+static int exfat_check_sb(struct super_block *sb)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	struct exfat_vbr *vbr = sbi->vbr;
+	u16 fs_rev;
+	u16 flags;
+	int active_fat;
+	u16 num_fats;
+
+	if (memcmp(vbr->jump, "\xeb\x76\x90", sizeof (vbr->jump))) {
+		exfat_msg(sb, KERN_INFO, "invalid jump field in vbr.");
+		return -EINVAL;
+	}
+
+	if (memcmp(vbr->fsname, "EXFAT   ", 8)) {
+		exfat_msg(sb, KERN_INFO, "invalid fsname field in vbr: %s.",
+			  vbr->fsname);
+		return -EINVAL;
+	}
+
+	fs_rev = __le16_to_cpu(vbr->fs_rev);
+	if (fs_rev != 0x0100) {
+		exfat_msg(sb, KERN_INFO, "filesystem version invalid: "
+			  "have 0x%04x, need 0x0100", fs_rev);
+		return -EINVAL;
+	}
+
+	flags = __le16_to_cpu(vbr->volume_flags);
+	active_fat = exfat_active_fat(flags);
+	if (active_fat != 0) {
+		exfat_msg(sb, KERN_INFO, "filesystems with active fat > 0 are "
+			  "not supported.");
+		return -EINVAL;
+	}
+
+	if (flags & EXFAT_FLAG_MEDIA_FAILURE)
+		exfat_msg(sb, KERN_WARNING, "filesystem had media failure(s)");
+
+	/*
+	 * bytes per sectors are on the range 2^9 - 2^12 (512 - 4096)
+	 */
+	if (vbr->bytes_per_sector < 9 || vbr->bytes_per_sector > 12) {
+		exfat_msg(sb, KERN_ERR, "invalid byte per sectors: %u",
+			  (1 << vbr->bytes_per_sector));
+		return -EINVAL;
+	}
+
+	/*
+	 * sectors per cluster can be as low as 0, and must not result
+	 * in a cluster size higher than 32MB (byte_per_sector +
+	 * sectors_per_cluster must not be creater than 25)
+	 */
+	if (vbr->bytes_per_sector + vbr->sectors_per_cluster > 25) {
+		exfat_msg(sb, KERN_ERR, "invalid cluster size: %u",
+		  1 << (vbr->bytes_per_sector + vbr->sectors_per_cluster));
+		return -EINVAL;
+	}
+
+	num_fats = __le16_to_cpu(vbr->fat_num);
+	if (num_fats == 0) {
+		exfat_msg(sb, KERN_ERR, "superblock reports no FAT.");
+		return -EINVAL;
+	}
+	if (num_fats > 1) {
+		exfat_msg(sb, KERN_ERR, "TexFAT is not supported.");
+		return -EINVAL;
+	}
+
+	if (memcmp(vbr->boot_sig, "\x55\xaa", 2)) {
+		exfat_msg(sb, KERN_ERR, "invalid end boot signature: %02x%02x.",
+			  vbr->boot_sig[0], vbr->boot_sig[1]);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int exfat_fill_root(struct super_block *sb, struct inode *root)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	u32 nclust;
+	u32 dummy;
+	loff_t links;
+
+	root->i_ino = EXFAT_ROOT_INO;
+	root->i_version = 1;
+	EXFAT_I(root)->first_cluster =
+		__le32_to_cpu(sbi->root_dir_cluster);
+	EXFAT_I(root)->attributes = E_EXFAT_ATTR_DIRECTORY;
+
+	root->i_uid = sbi->options.uid;
+	root->i_gid = sbi->options.gid;
+
+	root->i_mode = exfat_make_mode(sbi, S_IRWXUGO, E_EXFAT_ATTR_DIRECTORY);
+	root->i_version++;
+	root->i_generation = 0;
+
+	root->i_op = &exfat_dir_inode_operations;
+	root->i_fop = &exfat_dir_operations;
+
+	/*
+	 * root inode cannot use bitmap.
+	 */
+	EXFAT_I(root)->flags = EXFAT_I_ALLOC_POSSIBLE;
+
+	/*
+	 * set i_size
+	 */
+	nclust = 0;
+	while (__exfat_get_fat_cluster(root, nclust, &dummy, false) == 0)
+		++nclust;
+	root->i_size = nclust << sbi->clusterbits;
+	root->i_blocks = nclust << (sbi->clusterbits - 9);
+	EXFAT_I(root)->allocated_clusters = nclust;
+
+	/*
+	 * +2 to account for '.' and '..'
+	 */
+	links = exfat_dir_links(root);
+	if (links < 0)
+		return links;
+	set_nlink(root, links + 2);
+
+	root->i_mtime = root->i_atime = root->i_ctime = CURRENT_TIME_SEC;
+
+	return 0;
+}
+
+static loff_t exfat_file_max_byte(struct exfat_sb_info *sbi)
+{
+	u32 max_clusters = EXFAT_CLUSTER_LASTVALID -
+		EXFAT_CLUSTER_FIRSTVALID + 1;
+
+	return (loff_t)max_clusters << sbi->clusterbits;
+}
+
+static int exfat_show_options(struct seq_file *m, struct dentry *root)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(root->d_inode->i_sb);
+
+	if (!uid_eq(sbi->options.uid, GLOBAL_ROOT_UID))
+		seq_printf(m, ",uid=%u",
+			   from_kuid_munged(&init_user_ns, sbi->options.uid));
+	if (!gid_eq(sbi->options.gid, GLOBAL_ROOT_GID))
+		seq_printf(m, ",gid=%u",
+			   from_kgid_munged(&init_user_ns, sbi->options.gid));
+
+	seq_printf(m, ",fmask=%04o", sbi->options.fmask);
+	seq_printf(m, ",dmask=%04o", sbi->options.dmask);
+
+	if (sbi->options.time_offset_set)
+		seq_printf(m, ",time_offset=%d", sbi->options.time_offset);
+
+	switch (sbi->options.error_action) {
+	case EXFAT_ERROR_ACTION_PANIC:
+		seq_printf(m, ",errors=panic");
+		break;
+	case EXFAT_ERROR_ACTION_REMOUNT_RO:
+		seq_printf(m, ",errors=remount-ro");
+		break;
+	default:
+		seq_printf(m, ",errors=continue");
+		break;
+	}
+
+	return 0;
+}
+
+enum {
+	Opt_exfat_uid,
+	Opt_exfat_gid,
+	Opt_exfat_dmask,
+	Opt_exfat_fmask,
+	Opt_exfat_time_offset,
+	Opt_exfat_error_continue,
+	Opt_exfat_error_remount_ro,
+	Opt_exfat_error_panic,
+	Opt_exfat_err,
+};
+
+static const match_table_t exfat_tokens = {
+	{ Opt_exfat_uid, "uid=%u", },
+	{ Opt_exfat_gid, "gid=%u", },
+	{ Opt_exfat_dmask, "dmask=%04o", },
+	{ Opt_exfat_fmask, "fmask=%04o", },
+	{ Opt_exfat_time_offset, "time_offset=%d", },
+	{ Opt_exfat_error_continue, "errors=continue", },
+	{ Opt_exfat_error_remount_ro, "errors=remount-ro", },
+	{ Opt_exfat_error_panic, "errors=panic", },
+	{ Opt_exfat_err, NULL },
+};
+
+static int exfat_parse_options(struct super_block *sb, char *opts, int silent)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	char *p;
+
+	sbi->options.uid = current_uid();
+	sbi->options.gid = current_gid();
+
+	sbi->options.dmask = current_umask();
+	sbi->options.fmask = current_umask();
+	sbi->options.time_offset_set = 0;
+	sbi->options.error_action = EXFAT_ERROR_ACTION_CONTINUE;
+
+	while (1) {
+		int token;
+		substring_t args[MAX_OPT_ARGS];
+		unsigned int optval;
+
+		p = strsep(&opts, ",");
+		if (!p)
+			break;
+		token = match_token(p, exfat_tokens, args);
+
+		switch (token) {
+		case Opt_exfat_uid:
+			if (match_int(&args[0], &optval))
+				return -EINVAL;
+			sbi->options.uid = make_kuid(current_user_ns(), optval);
+			break;
+
+		case Opt_exfat_gid:
+			if (match_int(&args[0], &optval))
+				return -EINVAL;
+			sbi->options.gid = make_kgid(current_user_ns(), optval);
+			break;
+
+		case Opt_exfat_dmask:
+			if (match_octal(&args[0], &optval))
+				return -EINVAL;
+			sbi->options.dmask = optval;
+			break;
+
+		case Opt_exfat_fmask:
+			if (match_octal(&args[0], &optval))
+				return -EINVAL;
+			sbi->options.fmask = optval;
+			break;
+
+		case Opt_exfat_time_offset:
+			if (match_int(&args[0], &optval))
+				return -EINVAL;
+			if (optval < -12 * 60 && optval > 12 * 60) {
+				if (!silent)
+					exfat_msg(sb, KERN_INFO, "invalid "
+						  "time_offset value %d: "
+						  "should be between %d and %d",
+						  optval, -12 * 60, 12 * 60);
+				return -EINVAL;
+			}
+			sbi->options.time_offset = optval;
+			sbi->options.time_offset_set = 1;
+			break;
+
+		case Opt_exfat_error_continue:
+			sbi->options.error_action = EXFAT_ERROR_ACTION_CONTINUE;
+			break;
+
+		case Opt_exfat_error_remount_ro:
+			sbi->options.error_action =
+				EXFAT_ERROR_ACTION_REMOUNT_RO;
+			break;
+
+		case Opt_exfat_error_panic:
+			sbi->options.error_action = EXFAT_ERROR_ACTION_PANIC;
+			break;
+
+		default:
+			if (!silent)
+				exfat_msg(sb, KERN_INFO, "Unrecognized mount "
+					  "option %s or missing parameter.\n",
+					  p);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static void exfat_set_sb_dirty(struct super_block *sb, bool set, bool force)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	u16 flags;
+
+	/*
+	 * do not change anything if mounted read only and not
+	 * forced. the force case would happen during remount.
+	 */
+	if ((sb->s_flags & MS_RDONLY) && !force)
+		return ;
+
+	if (sbi->dirty) {
+		if (set)
+			exfat_msg(sb, KERN_WARNING, "Volume was not cleanly "
+				  "umounted. fsck should probably be needed.");
+		return ;
+	}
+
+	flags = __le16_to_cpu(sbi->vbr->volume_flags);
+	if (set)
+		flags |= EXFAT_FLAG_DIRTY;
+	else
+		flags &= ~EXFAT_FLAG_DIRTY;
+	sbi->vbr->volume_flags = __cpu_to_le16(flags);
+
+	mark_buffer_dirty(sbi->sb_bh);
+	sync_dirty_buffer(sbi->sb_bh);
+}
+
+static int exfat_remount(struct super_block *sb, int *flags, char *opts)
+{
+	int new_rdonly = *flags & MS_RDONLY;
+
+	if (new_rdonly != (sb->s_flags & MS_RDONLY)) {
+		if (new_rdonly)
+			exfat_set_sb_dirty(sb, false, false);
+		else
+			/*
+			 * sb->s_flag still has MS_RDONLY, so we need
+			 * to force the dirty state
+			 */
+			exfat_set_sb_dirty(sb, true, true);
+	}
+	return 0;
+}
+
+static int exfat_fill_super(struct super_block *sb, void *data, int silent)
+{
+	struct exfat_sb_info *sbi = NULL;
+	int ret = -ENOMEM;
+	struct inode *root = NULL;
+	int i;
+
+	sbi = kzalloc(sizeof (*sbi), GFP_KERNEL);
+	if (!sbi)
+		return -ENOMEM;
+
+	sb->s_fs_info = sbi;
+	if (exfat_parse_options(sb, data, silent) < 0)
+		return -EINVAL;
+
+	mutex_init(&sbi->sb_mutex);
+	spin_lock_init(&sbi->inode_hash_lock);
+
+	/*
+	 * first block, before we know sector size.
+	 */
+	sbi->sb_bh = sb_bread(sb, 0);
+	if (!sbi->sb_bh)
+		goto fail;
+
+	sbi->vbr = (struct exfat_vbr*)sbi->sb_bh->b_data;
+	sb->s_op = &exfat_super_ops;
+
+
+	ret = exfat_check_sb(sb);
+	if (ret)
+		goto fail;
+
+	/*
+	 * vbr seems sane, fill sbi.
+	 */
+	sbi->sectorsize = (1 << sbi->vbr->bytes_per_sector);
+	sbi->clustersize = sbi->sectorsize *
+		(1 << sbi->vbr->sectors_per_cluster);
+
+	sbi->sectors_per_cluster = sbi->clustersize / sbi->sectorsize;
+
+	sbi->sectorbits = sbi->vbr->bytes_per_sector;
+	sbi->clusterbits = sbi->vbr->sectors_per_cluster + sbi->sectorbits;
+	sbi->sectormask = sbi->sectorsize - 1;
+	sbi->clustermask = sbi->clustersize - 1;
+
+
+	sbi->fat_offset = __le32_to_cpu(sbi->vbr->fat_offset);
+	sbi->fat_length = __le32_to_cpu(sbi->vbr->fat_length);
+
+	sbi->root_dir_cluster = __le32_to_cpu(sbi->vbr->cluster_root_dir);
+
+	sbi->cluster_heap_offset = __le32_to_cpu(sbi->vbr->cluster_heap_offset);
+	sbi->cluster_count = __le32_to_cpu(sbi->vbr->cluster_count);
+
+	sbi->dirty = !!(__le16_to_cpu(sbi->vbr->volume_flags) &
+			EXFAT_FLAG_DIRTY);
+
+	/*
+	 * now that we know sector size, reread superblock with
+	 * correct sector size.
+	 */
+	ret = -EIO;
+	if (sb->s_blocksize != sbi->sectorsize) {
+		if (!sb_set_blocksize(sb, sbi->sectorsize)) {
+			exfat_msg(sb, KERN_INFO, "bad block size %d.",
+				  sbi->sectorsize);
+			goto fail;
+		}
+
+		brelse(sbi->sb_bh);
+		sbi->vbr = NULL;
+
+		sbi->sb_bh = sb_bread(sb, 0);
+		if (!sbi->sb_bh)
+			goto fail;
+		sbi->vbr = (struct exfat_vbr*)sbi->sb_bh->b_data;
+		sb->s_fs_info = sbi;
+	}
+
+	ret = exfat_check_sb_checksum(sb);
+	if (ret)
+		goto fail;
+
+	sb->s_maxbytes = exfat_file_max_byte(sbi);
+
+	ret = exfat_init_fat(sb);
+	if (ret)
+		goto fail;
+
+	for (i = 0 ; i < EXFAT_HASH_SIZE; ++i) {
+		INIT_HLIST_HEAD(&sbi->inode_hash[i]);
+	}
+
+	/*
+	 * create root inode.
+	 */
+	root = new_inode(sb);
+	if (!root)
+		goto fail;
+
+	exfat_fill_root(sb, root);
+
+	ret = exfat_upcase_init(root);
+	if (ret)
+		goto fail_iput;
+
+	ret = exfat_init_bitmap(root);
+	if (ret)
+		goto fail_iput;
+
+
+	sb->s_root = d_make_root(root);
+	if (!sb->s_root)
+		goto fail_iput;
+
+	exfat_set_sb_dirty(sb, true, false);
+	return 0;
+
+fail_iput:
+	iput(root);
+
+fail:
+	if (sbi->sb_bh)
+		brelse(sbi->sb_bh);
+	if (sbi)
+		kfree(sbi);
+	return ret;
+}
+
+static struct dentry *exfat_mount(struct file_system_type *fstype,
+				  int flags, const char *dev_name, void *data)
+{
+	return mount_bdev(fstype, flags, dev_name, data, exfat_fill_super);
+}
+
+static void exfat_put_super(struct super_block *sb)
+{
+	struct exfat_sb_info *sbi;
+
+	sbi = EXFAT_SB(sb);
+	if (sbi) {
+		exfat_set_sb_dirty(sb, false, false);
+		exfat_exit_bitmap(sb);
+		brelse(sbi->sb_bh);
+		kfree(sbi->upcase_table);
+		kfree(sbi);
+	}
+}
+
+static int exfat_statfs(struct dentry *dentry, struct kstatfs *kstat)
+{
+	struct super_block *sb = dentry->d_inode->i_sb;
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
+
+	memset(kstat, 0, sizeof (*kstat));
+
+
+	kstat->f_bsize = sbi->clustersize;
+	kstat->f_blocks = sbi->cluster_count;
+	kstat->f_bfree = sbi->free_clusters;
+	kstat->f_bavail = sbi->free_clusters;
+	kstat->f_namelen = 255;
+	kstat->f_fsid.val[0] = (u32)id;
+	kstat->f_fsid.val[1] = (u32)(id >> 32);
+
+	return 0;
+}
+
+static struct file_system_type exfat_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "exfat",
+	.mount		= exfat_mount,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+static int __init exfat_init(void)
+{
+	int error;
+
+	/* some sanity check on internal structure sizes */
+	BUILD_BUG_ON(sizeof (struct exfat_vbr) != 512);
+
+	BUILD_BUG_ON(sizeof (struct exfat_volume_label_entry) != 0x20);
+	BUILD_BUG_ON(sizeof (struct exfat_bitmap_entry) != 0x20);
+	BUILD_BUG_ON(sizeof (struct exfat_upcase_entry) != 0x20);
+	BUILD_BUG_ON(sizeof (struct exfat_guid_entry) != 0x20);
+	BUILD_BUG_ON(sizeof (struct exfat_padding_entry) != 0x20);
+	BUILD_BUG_ON(sizeof (struct exfat_acl_entry) != 0x20);
+	BUILD_BUG_ON(sizeof (struct exfat_filedir_entry) != 0x20);
+	BUILD_BUG_ON(sizeof (struct exfat_stream_extension_entry) != 0x20);
+	BUILD_BUG_ON(sizeof (struct exfat_filename_entry) != 0x20);
+
+	error = exfat_init_inodes();
+	if (error)
+		return error;
+
+
+	error = register_filesystem(&exfat_fs_type);
+	if (error)
+		exfat_exit_inodes();
+	return error;
+}
+
+static void __exit exfat_exit(void)
+{
+	unregister_filesystem(&exfat_fs_type);
+	exfat_exit_inodes();
+}
+
+module_init(exfat_init);
+module_exit(exfat_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Schichan <nschichan@freebox.fr>");
diff -Nruw linux-4.4.115-fbx/fs/exfat./time.c linux-4.4.115-fbx/fs/exfat/time.c
--- linux-4.4.115-fbx/fs/exfat./time.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/fs/exfat/time.c	2019-01-25 20:32:47.247752405 +0100
@@ -0,0 +1,126 @@
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+
+#include "exfat.h"
+#include "exfat_fs.h"
+
+
+
+extern struct timezone sys_tz;
+
+/*
+ * The epoch of FAT timestamp is 1980.
+ *     :  bits :     value
+ * date:  0 -  4: day	(1 -  31)
+ * date:  5 -  8: month	(1 -  12)
+ * date:  9 - 15: year	(0 - 127) from 1980
+ * time:  0 -  4: sec	(0 -  29) 2sec counts
+ * time:  5 - 10: min	(0 -  59)
+ * time: 11 - 15: hour	(0 -  23)
+ */
+#define SECS_PER_MIN	60
+#define SECS_PER_HOUR	(60 * 60)
+#define SECS_PER_DAY	(SECS_PER_HOUR * 24)
+/* days between 1.1.70 and 1.1.80 (2 leap days) */
+#define DAYS_DELTA	(365 * 10 + 2)
+/* 120 (2100 - 1980) isn't leap year */
+#define YEAR_2100	120
+#define IS_LEAP_YEAR(y)	(!((y) & 3) && (y) != YEAR_2100)
+
+/* Linear day numbers of the respective 1sts in non-leap years. */
+static time_t days_in_year[] = {
+	/* Jan  Feb  Mar  Apr  May  Jun  Jul  Aug  Sep  Oct  Nov  Dec */
+	0,   0,  31,  59,  90, 120, 151, 181, 212, 243, 273, 304, 334, 0, 0, 0,
+};
+
+/* Convert a FAT time/date pair to a UNIX date (seconds since 1 1 70). */
+void exfat_time_2unix(struct timespec *ts, u32 datetime, u8 time_cs,
+		      s8 tz_offset)
+{
+	u16 date = (datetime >> 16);
+	u16 time = (datetime & 0xffff);
+	time_t second, day, leap_day, month, year;
+
+	year  = date >> 9;
+	month = max(1, (date >> 5) & 0xf);
+	day   = max(1, date & 0x1f) - 1;
+
+	if (((tz_offset & (1 << 6)) == 0))
+		tz_offset &= ~(1 << 7);
+
+	leap_day = (year + 3) / 4;
+	if (year > YEAR_2100)		/* 2100 isn't leap year */
+		leap_day--;
+	if (IS_LEAP_YEAR(year) && month > 2)
+		leap_day++;
+
+	second =  (time & 0x1f) << 1;
+	second += ((time >> 5) & 0x3f) * SECS_PER_MIN;
+	second += (time >> 11) * SECS_PER_HOUR;
+	second += (year * 365 + leap_day
+		   + days_in_year[month] + day
+		   + DAYS_DELTA) * SECS_PER_DAY;
+
+	second -= tz_offset * 15 * SECS_PER_MIN;
+
+	if (time_cs) {
+		ts->tv_sec = second + (time_cs / 100);
+		ts->tv_nsec = (time_cs % 100) * 10000000;
+	} else {
+		ts->tv_sec = second;
+		ts->tv_nsec = 0;
+	}
+}
+
+/* Convert linear UNIX date to a FAT time/date pair. */
+void exfat_time_2exfat(struct exfat_sb_info *sbi, struct timespec *ts,
+		       u32 *datetime, u8 *time_cs, s8 *tz_offset)
+{
+	struct tm tm;
+	u16 time;
+	u16 date;
+	int offset;
+
+	if (sbi->options.time_offset_set) {
+		offset = -sbi->options.time_offset;
+	} else
+		offset = sys_tz.tz_minuteswest;
+
+	time_to_tm(ts->tv_sec, -offset * SECS_PER_MIN, &tm);
+
+	/*  FAT can only support year between 1980 to 2107 */
+	if (tm.tm_year < 1980 - 1900) {
+		time = 0;
+		date = cpu_to_le16((0 << 9) | (1 << 5) | 1);
+		if (time_cs)
+			*time_cs = 0;
+		*tz_offset = 0;
+		return;
+	}
+	if (tm.tm_year > 2107 - 1900) {
+		time = cpu_to_le16((23 << 11) | (59 << 5) | 29);
+		date = cpu_to_le16((127 << 9) | (12 << 5) | 31);
+		if (time_cs)
+			*time_cs = 199;
+		*tz_offset = 0;
+		return;
+	}
+
+	/* from 1900 -> from 1980 */
+	tm.tm_year -= 80;
+	/* 0~11 -> 1~12 */
+	tm.tm_mon++;
+	/* 0~59 -> 0~29(2sec counts) */
+	tm.tm_sec >>= 1;
+
+	time = cpu_to_le16(tm.tm_hour << 11 | tm.tm_min << 5 | tm.tm_sec);
+	date = cpu_to_le16(tm.tm_year << 9 | tm.tm_mon << 5 | tm.tm_mday);
+
+	*datetime = (date << 16) | time;
+
+	if (time_cs)
+		*time_cs = (ts->tv_sec & 1) * 100 + ts->tv_nsec / 10000000;
+	*tz_offset = -offset / 15;
+	*tz_offset |= (1 << 7);
+}
diff -Nruw linux-4.4.115-fbx/fs/exfat./upcase.c linux-4.4.115-fbx/fs/exfat/upcase.c
--- linux-4.4.115-fbx/fs/exfat./upcase.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/fs/exfat/upcase.c	2019-01-25 20:32:47.247752405 +0100
@@ -0,0 +1,137 @@
+/*
+ * upcase.c for exfat
+ * Created by <nschichan@freebox.fr> on Wed Aug  7 11:51:37 2013
+ */
+
+#include <linux/buffer_head.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+
+#include "exfat.h"
+#include "exfat_fs.h"
+
+static u32 exfat_calc_upcase_checksum(const u8 *data, u32 checksum,
+				      size_t count)
+{
+	while (count) {
+		checksum = ((checksum << 31) | (checksum >> 1)) + *data;
+		--count;
+		++data;
+	}
+	return checksum;
+}
+
+static int exfat_load_upcase_table(struct super_block *sb, u32 disk_cluster,
+				   u32 *out_checksum)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	struct buffer_head *bh;
+	sector_t start, sect, end;
+	u32 off = 0;
+	u32 byte_len = sbi->upcase_len * sizeof (__le16);
+	u32 checksum = 0;
+
+	/*
+	 * up-case table are not fragmented, so sequential cluster
+	 * read will do here.
+	 */
+	start = exfat_cluster_sector(sbi, disk_cluster);
+	end = start + DIV_ROUND_UP(byte_len,
+			   sbi->sectorsize);
+	for (sect = start; sect < end; ++sect) {
+		u32 len = sbi->sectorsize;
+
+		if (sect == end - 1)
+			len = byte_len & sbi->sectormask;
+
+		bh = sb_bread(sb, sect);
+		if (!bh) {
+			exfat_msg(sb, KERN_ERR,
+				  "unable to read upcase sector %llu",
+				  (unsigned long long)sect);
+			return -EIO;
+		}
+		memcpy((u8*)sbi->upcase_table + off, bh->b_data,
+		       len);
+
+		checksum = exfat_calc_upcase_checksum(bh->b_data, checksum,
+						      len);
+
+		off += len;
+		brelse(bh);
+	}
+
+	BUG_ON(off != byte_len);
+	*out_checksum = checksum;
+	return 0;
+}
+
+int exfat_upcase_init(struct inode *root)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(root->i_sb);
+	struct exfat_upcase_entry *upcase;
+	struct exfat_dir_ctx dctx;
+	int error;
+	u64 upcase_length;
+	u32 checksum;
+
+	/*
+	 * configure directory context and look for an upcase table
+	 * entry.
+	 */
+	if (exfat_init_dir_ctx(root, &dctx, 0) < 0)
+		return -EIO;
+
+	error = -EIO;
+	upcase = __exfat_dentry_next(&dctx, E_EXFAT_UPCASE_TABLE, 0xff,
+				     true, NULL);
+	if (!upcase)
+		goto fail;
+
+	/*
+	 * check upcase table length. we need it to be non-zero,
+	 * ending on a __le16 boundary and provide at most a
+	 * conversion for the whole __le16 space.
+	 */
+	upcase_length = __le64_to_cpu(upcase->length);
+	if (upcase_length == 0 ||
+	    upcase_length & (sizeof (__le16) - 1) ||
+	    upcase_length > 0xffff * sizeof (__le16)) {
+		exfat_msg(root->i_sb, KERN_ERR, "invalid upcase length %llu",
+			  (unsigned long long)upcase_length);
+		goto fail;
+	}
+
+	/*
+	 * load complete upcase table in memory.
+	 */
+	error = -ENOMEM;
+	sbi->upcase_len = upcase_length / sizeof (__le16);
+	sbi->upcase_table = kmalloc(upcase_length, GFP_NOFS);
+	if (!sbi->upcase_table)
+		goto fail;
+
+	error = exfat_load_upcase_table(root->i_sb,
+					__le32_to_cpu(upcase->cluster_addr),
+					&checksum);
+	if (error)
+		goto fail;
+
+	if (checksum != __le32_to_cpu(upcase->checksum)) {
+		exfat_msg(root->i_sb, KERN_INFO,
+			  "upcase table checksum mismatch: have %08x, "
+			  "expect %08x", checksum,
+			  __le32_to_cpu(upcase->checksum));
+		error = -EINVAL;
+		goto fail;
+	}
+
+	exfat_cleanup_dir_ctx(&dctx);
+	return 0;
+
+fail:
+	if (sbi->upcase_table)
+		kfree(sbi->upcase_table);
+	exfat_cleanup_dir_ctx(&dctx);
+	return error;
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/fs/ext4/ext4_ice.h	2019-01-22 16:16:27.819285490 +0100
@@ -0,0 +1,104 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _EXT4_ICE_H
+#define _EXT4_ICE_H
+
+#include "ext4.h"
+#include "ext4_crypto.h"
+
+#ifdef CONFIG_EXT4_FS_ICE_ENCRYPTION
+static inline int ext4_should_be_processed_by_ice(const struct inode *inode)
+{
+	if (!ext4_encrypted_inode((struct inode *)inode))
+		return 0;
+
+	return ext4_using_hardware_encryption((struct inode *)inode);
+}
+
+static inline int ext4_is_ice_enabled(void)
+{
+	return 1;
+}
+
+int ext4_is_aes_xts_cipher(const struct inode *inode);
+
+char *ext4_get_ice_encryption_key(const struct inode *inode);
+char *ext4_get_ice_encryption_salt(const struct inode *inode);
+
+int ext4_is_ice_encryption_info_equal(const struct inode *inode1,
+	const struct inode *inode2);
+
+static inline size_t ext4_get_ice_encryption_key_size(
+	const struct inode *inode)
+{
+	return EXT4_AES_256_XTS_KEY_SIZE / 2;
+}
+
+static inline size_t ext4_get_ice_encryption_salt_size(
+	const struct inode *inode)
+{
+	return EXT4_AES_256_XTS_KEY_SIZE / 2;
+}
+
+#else
+static inline int ext4_should_be_processed_by_ice(const struct inode *inode)
+{
+	return 0;
+}
+static inline int ext4_is_ice_enabled(void)
+{
+	return 0;
+}
+
+static inline char *ext4_get_ice_encryption_key(const struct inode *inode)
+{
+	return NULL;
+}
+
+static inline char *ext4_get_ice_encryption_salt(const struct inode *inode)
+{
+	return NULL;
+}
+
+static inline size_t ext4_get_ice_encryption_key_size(
+	const struct inode *inode)
+{
+	return 0;
+}
+
+static inline size_t ext4_get_ice_encryption_salt_size(
+	const struct inode *inode)
+{
+	return 0;
+}
+
+static inline int ext4_is_xts_cipher(const struct inode *inode)
+{
+	return 0;
+}
+
+static inline int ext4_is_ice_encryption_info_equal(
+	const struct inode *inode1,
+	const struct inode *inode2)
+{
+	return 0;
+}
+
+static inline int ext4_is_aes_xts_cipher(const struct inode *inode)
+{
+	return 0;
+}
+
+#endif
+
+#endif	/* _EXT4_ICE_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/fs/fuse/fuse_passthrough.h	2019-01-22 16:16:27.859285853 +0100
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _FS_FUSE_PASSTHROUGH_H
+#define _FS_FUSE_PASSTHROUGH_H
+
+#include "fuse_i.h"
+
+#include <linux/fuse.h>
+#include <linux/file.h>
+
+void fuse_setup_passthrough(struct fuse_conn *fc, struct fuse_req *req);
+
+ssize_t fuse_passthrough_read_iter(struct kiocb *iocb, struct iov_iter *to);
+
+ssize_t fuse_passthrough_write_iter(struct kiocb *iocb, struct iov_iter *from);
+
+void fuse_passthrough_release(struct fuse_file *ff);
+
+#endif /* _FS_FUSE_PASSTHROUGH_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/fs/fuse/passthrough.c	2019-01-22 16:16:27.859285853 +0100
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "fuse_passthrough.h"
+
+#include <linux/aio.h>
+#include <linux/fs_stack.h>
+
+void fuse_setup_passthrough(struct fuse_conn *fc, struct fuse_req *req)
+{
+	int daemon_fd, fs_stack_depth;
+	unsigned open_out_index;
+	struct file *passthrough_filp;
+	struct inode *passthrough_inode;
+	struct super_block *passthrough_sb;
+	struct fuse_open_out *open_out;
+
+	req->passthrough_filp = NULL;
+
+	if (!(fc->passthrough))
+		return;
+
+	if ((req->in.h.opcode != FUSE_OPEN) &&
+	    (req->in.h.opcode != FUSE_CREATE))
+		return;
+
+	open_out_index = req->in.numargs - 1;
+
+	BUG_ON(open_out_index != 0 && open_out_index != 1);
+	BUG_ON(req->out.args[open_out_index].size != sizeof(*open_out));
+
+	open_out = req->out.args[open_out_index].value;
+
+	daemon_fd = (int)open_out->passthrough_fd;
+	if (daemon_fd < 0)
+		return;
+
+	passthrough_filp = fget_raw(daemon_fd);
+	if (!passthrough_filp)
+		return;
+
+	passthrough_inode = file_inode(passthrough_filp);
+	passthrough_sb = passthrough_inode->i_sb;
+	fs_stack_depth = passthrough_sb->s_stack_depth + 1;
+
+	/* If we reached the stacking limit go through regular io */
+	if (fs_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
+		/* Release the passthrough file. */
+		fput(passthrough_filp);
+		pr_err("FUSE: maximum fs stacking depth exceeded, cannot use passthrough for this file\n");
+		return;
+	}
+	req->passthrough_filp = passthrough_filp;
+}
+
+static ssize_t fuse_passthrough_read_write_iter(struct kiocb *iocb,
+					    struct iov_iter *iter, int do_write)
+{
+	ssize_t ret_val;
+	struct fuse_file *ff;
+	struct file *fuse_file, *passthrough_filp;
+	struct inode *fuse_inode, *passthrough_inode;
+	struct fuse_conn *fc;
+
+	ff = iocb->ki_filp->private_data;
+	fuse_file = iocb->ki_filp;
+	passthrough_filp = ff->passthrough_filp;
+	fc = ff->fc;
+
+	/* lock passthrough file to prevent it from being released */
+	get_file(passthrough_filp);
+	iocb->ki_filp = passthrough_filp;
+	fuse_inode = fuse_file->f_path.dentry->d_inode;
+	passthrough_inode = file_inode(passthrough_filp);
+
+	if (do_write) {
+		if (!passthrough_filp->f_op->write_iter)
+			return -EIO;
+		ret_val = passthrough_filp->f_op->write_iter(iocb, iter);
+
+		if (ret_val >= 0 || ret_val == -EIOCBQUEUED) {
+			spin_lock(&fc->lock);
+			fsstack_copy_inode_size(fuse_inode, passthrough_inode);
+			spin_unlock(&fc->lock);
+			fsstack_copy_attr_times(fuse_inode, passthrough_inode);
+		}
+	} else {
+		if (!passthrough_filp->f_op->read_iter)
+			return -EIO;
+		ret_val = passthrough_filp->f_op->read_iter(iocb, iter);
+		if (ret_val >= 0 || ret_val == -EIOCBQUEUED)
+			fsstack_copy_attr_atime(fuse_inode, passthrough_inode);
+	}
+
+	iocb->ki_filp = fuse_file;
+
+	/* unlock passthrough file */
+	fput(passthrough_filp);
+
+	return ret_val;
+}
+
+ssize_t fuse_passthrough_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+	return fuse_passthrough_read_write_iter(iocb, to, 0);
+}
+
+ssize_t fuse_passthrough_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+	return fuse_passthrough_read_write_iter(iocb, from, 1);
+}
+
+void fuse_passthrough_release(struct fuse_file *ff)
+{
+	if (!(ff->passthrough_filp))
+		return;
+
+	/* Release the passthrough file. */
+	fput(ff->passthrough_filp);
+	ff->passthrough_filp = NULL;
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/fs/mbcache2.c	2019-01-22 16:16:27.911286324 +0100
@@ -0,0 +1,359 @@
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/list_bl.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/mbcache2.h>
+
+/*
+ * Mbcache is a simple key-value store. Keys need not be unique, however
+ * key-value pairs are expected to be unique (we use this fact in
+ * mb2_cache_entry_delete_block()).
+ *
+ * Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
+ * They use hash of a block contents as a key and block number as a value.
+ * That's why keys need not be unique (different xattr blocks may end up having
+ * the same hash). However block number always uniquely identifies a cache
+ * entry.
+ *
+ * We provide functions for creation and removal of entries, search by key,
+ * and a special "delete entry with given key-value pair" operation. Fixed
+ * size hash table is used for fast key lookups.
+ */
+
+struct mb2_cache {
+	/* Hash table of entries */
+	struct hlist_bl_head	*c_hash;
+	/* log2 of hash table size */
+	int			c_bucket_bits;
+	/* Protects c_lru_list, c_entry_count */
+	spinlock_t		c_lru_list_lock;
+	struct list_head	c_lru_list;
+	/* Number of entries in cache */
+	unsigned long		c_entry_count;
+	struct shrinker		c_shrink;
+};
+
+static struct kmem_cache *mb2_entry_cache;
+
+/*
+ * mb2_cache_entry_create - create entry in cache
+ * @cache - cache where the entry should be created
+ * @mask - gfp mask with which the entry should be allocated
+ * @key - key of the entry
+ * @block - block that contains data
+ *
+ * Creates entry in @cache with key @key and records that data is stored in
+ * block @block. The function returns -EBUSY if entry with the same key
+ * and for the same block already exists in cache. Otherwise 0 is returned.
+ */
+int mb2_cache_entry_create(struct mb2_cache *cache, gfp_t mask, u32 key,
+			   sector_t block)
+{
+	struct mb2_cache_entry *entry, *dup;
+	struct hlist_bl_node *dup_node;
+	struct hlist_bl_head *head;
+
+	entry = kmem_cache_alloc(mb2_entry_cache, mask);
+	if (!entry)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&entry->e_lru_list);
+	/* One ref for hash, one ref returned */
+	atomic_set(&entry->e_refcnt, 1);
+	entry->e_key = key;
+	entry->e_block = block;
+	head = &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
+	entry->e_hash_list_head = head;
+	hlist_bl_lock(head);
+	hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
+		if (dup->e_key == key && dup->e_block == block) {
+			hlist_bl_unlock(head);
+			kmem_cache_free(mb2_entry_cache, entry);
+			return -EBUSY;
+		}
+	}
+	hlist_bl_add_head(&entry->e_hash_list, head);
+	hlist_bl_unlock(head);
+
+	spin_lock(&cache->c_lru_list_lock);
+	list_add_tail(&entry->e_lru_list, &cache->c_lru_list);
+	/* Grab ref for LRU list */
+	atomic_inc(&entry->e_refcnt);
+	cache->c_entry_count++;
+	spin_unlock(&cache->c_lru_list_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(mb2_cache_entry_create);
+
+void __mb2_cache_entry_free(struct mb2_cache_entry *entry)
+{
+	kmem_cache_free(mb2_entry_cache, entry);
+}
+EXPORT_SYMBOL(__mb2_cache_entry_free);
+
+static struct mb2_cache_entry *__entry_find(struct mb2_cache *cache,
+					    struct mb2_cache_entry *entry,
+					    u32 key)
+{
+	struct mb2_cache_entry *old_entry = entry;
+	struct hlist_bl_node *node;
+	struct hlist_bl_head *head;
+
+	if (entry)
+		head = entry->e_hash_list_head;
+	else
+		head = &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
+	hlist_bl_lock(head);
+	if (entry && !hlist_bl_unhashed(&entry->e_hash_list))
+		node = entry->e_hash_list.next;
+	else
+		node = hlist_bl_first(head);
+	while (node) {
+		entry = hlist_bl_entry(node, struct mb2_cache_entry,
+				       e_hash_list);
+		if (entry->e_key == key) {
+			atomic_inc(&entry->e_refcnt);
+			goto out;
+		}
+		node = node->next;
+	}
+	entry = NULL;
+out:
+	hlist_bl_unlock(head);
+	if (old_entry)
+		mb2_cache_entry_put(cache, old_entry);
+
+	return entry;
+}
+
+/*
+ * mb2_cache_entry_find_first - find the first entry in cache with given key
+ * @cache: cache where we should search
+ * @key: key to look for
+ *
+ * Search in @cache for entry with key @key. Grabs reference to the first
+ * entry found and returns the entry.
+ */
+struct mb2_cache_entry *mb2_cache_entry_find_first(struct mb2_cache *cache,
+						   u32 key)
+{
+	return __entry_find(cache, NULL, key);
+}
+EXPORT_SYMBOL(mb2_cache_entry_find_first);
+
+/*
+ * mb2_cache_entry_find_next - find next entry in cache with the same
+ * @cache: cache where we should search
+ * @entry: entry to start search from
+ *
+ * Finds next entry in the hash chain which has the same key as @entry.
+ * If @entry is unhashed (which can happen when deletion of entry races
+ * with the search), finds the first entry in the hash chain. The function
+ * drops reference to @entry and returns with a reference to the found entry.
+ */
+struct mb2_cache_entry *mb2_cache_entry_find_next(struct mb2_cache *cache,
+						  struct mb2_cache_entry *entry)
+{
+	return __entry_find(cache, entry, entry->e_key);
+}
+EXPORT_SYMBOL(mb2_cache_entry_find_next);
+
+/* mb2_cache_entry_delete_block - remove information about block from cache
+ * @cache - cache we work with
+ * @key - key of the entry to remove
+ * @block - block containing data for @key
+ *
+ * Remove entry from cache @cache with key @key with data stored in @block.
+ */
+void mb2_cache_entry_delete_block(struct mb2_cache *cache, u32 key,
+				  sector_t block)
+{
+	struct hlist_bl_node *node;
+	struct hlist_bl_head *head;
+	struct mb2_cache_entry *entry;
+
+	head = &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
+	hlist_bl_lock(head);
+	hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
+		if (entry->e_key == key && entry->e_block == block) {
+			/* We keep hash list reference to keep entry alive */
+			hlist_bl_del_init(&entry->e_hash_list);
+			hlist_bl_unlock(head);
+			spin_lock(&cache->c_lru_list_lock);
+			if (!list_empty(&entry->e_lru_list)) {
+				list_del_init(&entry->e_lru_list);
+				cache->c_entry_count--;
+				atomic_dec(&entry->e_refcnt);
+			}
+			spin_unlock(&cache->c_lru_list_lock);
+			mb2_cache_entry_put(cache, entry);
+			return;
+		}
+	}
+	hlist_bl_unlock(head);
+}
+EXPORT_SYMBOL(mb2_cache_entry_delete_block);
+
+/* mb2_cache_entry_touch - cache entry got used
+ * @cache - cache the entry belongs to
+ * @entry - entry that got used
+ *
+ * Move entry in lru list to reflect the fact that it was used.
+ */
+void mb2_cache_entry_touch(struct mb2_cache *cache,
+			   struct mb2_cache_entry *entry)
+{
+	spin_lock(&cache->c_lru_list_lock);
+	if (!list_empty(&entry->e_lru_list))
+		list_move_tail(&cache->c_lru_list, &entry->e_lru_list);
+	spin_unlock(&cache->c_lru_list_lock);
+}
+EXPORT_SYMBOL(mb2_cache_entry_touch);
+
+static unsigned long mb2_cache_count(struct shrinker *shrink,
+				     struct shrink_control *sc)
+{
+	struct mb2_cache *cache = container_of(shrink, struct mb2_cache,
+					       c_shrink);
+
+	return cache->c_entry_count;
+}
+
+/* Shrink number of entries in cache */
+static unsigned long mb2_cache_scan(struct shrinker *shrink,
+				    struct shrink_control *sc)
+{
+	int nr_to_scan = sc->nr_to_scan;
+	struct mb2_cache *cache = container_of(shrink, struct mb2_cache,
+					      c_shrink);
+	struct mb2_cache_entry *entry;
+	struct hlist_bl_head *head;
+	unsigned int shrunk = 0;
+
+	spin_lock(&cache->c_lru_list_lock);
+	while (nr_to_scan-- && !list_empty(&cache->c_lru_list)) {
+		entry = list_first_entry(&cache->c_lru_list,
+					 struct mb2_cache_entry, e_lru_list);
+		list_del_init(&entry->e_lru_list);
+		cache->c_entry_count--;
+		/*
+		 * We keep LRU list reference so that entry doesn't go away
+		 * from under us.
+		 */
+		spin_unlock(&cache->c_lru_list_lock);
+		head = entry->e_hash_list_head;
+		hlist_bl_lock(head);
+		if (!hlist_bl_unhashed(&entry->e_hash_list)) {
+			hlist_bl_del_init(&entry->e_hash_list);
+			atomic_dec(&entry->e_refcnt);
+		}
+		hlist_bl_unlock(head);
+		if (mb2_cache_entry_put(cache, entry))
+			shrunk++;
+		cond_resched();
+		spin_lock(&cache->c_lru_list_lock);
+	}
+	spin_unlock(&cache->c_lru_list_lock);
+
+	return shrunk;
+}
+
+/*
+ * mb2_cache_create - create cache
+ * @bucket_bits: log2 of the hash table size
+ *
+ * Create cache for keys with 2^bucket_bits hash entries.
+ */
+struct mb2_cache *mb2_cache_create(int bucket_bits)
+{
+	struct mb2_cache *cache;
+	int bucket_count = 1 << bucket_bits;
+	int i;
+
+	if (!try_module_get(THIS_MODULE))
+		return NULL;
+
+	cache = kzalloc(sizeof(struct mb2_cache), GFP_KERNEL);
+	if (!cache)
+		goto err_out;
+	cache->c_bucket_bits = bucket_bits;
+	INIT_LIST_HEAD(&cache->c_lru_list);
+	spin_lock_init(&cache->c_lru_list_lock);
+	cache->c_hash = kmalloc(bucket_count * sizeof(struct hlist_bl_head),
+				GFP_KERNEL);
+	if (!cache->c_hash) {
+		kfree(cache);
+		goto err_out;
+	}
+	for (i = 0; i < bucket_count; i++)
+		INIT_HLIST_BL_HEAD(&cache->c_hash[i]);
+
+	cache->c_shrink.count_objects = mb2_cache_count;
+	cache->c_shrink.scan_objects = mb2_cache_scan;
+	cache->c_shrink.seeks = DEFAULT_SEEKS;
+	register_shrinker(&cache->c_shrink);
+
+	return cache;
+
+err_out:
+	module_put(THIS_MODULE);
+	return NULL;
+}
+EXPORT_SYMBOL(mb2_cache_create);
+
+/*
+ * mb2_cache_destroy - destroy cache
+ * @cache: the cache to destroy
+ *
+ * Free all entries in cache and cache itself. Caller must make sure nobody
+ * (except shrinker) can reach @cache when calling this.
+ */
+void mb2_cache_destroy(struct mb2_cache *cache)
+{
+	struct mb2_cache_entry *entry, *next;
+
+	unregister_shrinker(&cache->c_shrink);
+
+	/*
+	 * We don't bother with any locking. Cache must not be used at this
+	 * point.
+	 */
+	list_for_each_entry_safe(entry, next, &cache->c_lru_list, e_lru_list) {
+		if (!hlist_bl_unhashed(&entry->e_hash_list)) {
+			hlist_bl_del_init(&entry->e_hash_list);
+			atomic_dec(&entry->e_refcnt);
+		} else
+			WARN_ON(1);
+		list_del(&entry->e_lru_list);
+		WARN_ON(atomic_read(&entry->e_refcnt) != 1);
+		mb2_cache_entry_put(cache, entry);
+	}
+	kfree(cache->c_hash);
+	kfree(cache);
+	module_put(THIS_MODULE);
+}
+EXPORT_SYMBOL(mb2_cache_destroy);
+
+static int __init mb2cache_init(void)
+{
+	mb2_entry_cache = kmem_cache_create("mbcache",
+				sizeof(struct mb2_cache_entry), 0,
+				SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
+	BUG_ON(!mb2_entry_cache);
+	return 0;
+}
+
+static void __exit mb2cache_exit(void)
+{
+	kmem_cache_destroy(mb2_entry_cache);
+}
+
+module_init(mb2cache_init)
+module_exit(mb2cache_exit)
+
+MODULE_AUTHOR("Jan Kara <jack@suse.cz>");
+MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
+MODULE_LICENSE("GPL");
diff -Nruw linux-4.4.115-fbx/fs/sdcardfs./Kconfig linux-4.4.115-fbx/fs/sdcardfs/Kconfig
--- linux-4.4.115-fbx/fs/sdcardfs./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/fs/sdcardfs/Kconfig	2019-01-22 16:16:28.059287664 +0100
@@ -0,0 +1,13 @@
+config SDCARD_FS
+	tristate "sdcard file system"
+	depends on CONFIGFS_FS
+	default n
+	help
+	  Sdcardfs is based on Wrapfs file system.
+
+config SDCARD_FS_FADV_NOACTIVE
+	bool "sdcardfs fadvise noactive support"
+	depends on FADV_NOACTIVE
+	default y
+	help
+	  Sdcardfs supports fadvise noactive mode.
diff -Nruw linux-4.4.115-fbx/fs/sdcardfs./Makefile linux-4.4.115-fbx/fs/sdcardfs/Makefile
--- linux-4.4.115-fbx/fs/sdcardfs./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/fs/sdcardfs/Makefile	2019-01-22 16:16:28.059287664 +0100
@@ -0,0 +1,7 @@
+SDCARDFS_VERSION="0.1"
+
+EXTRA_CFLAGS += -DSDCARDFS_VERSION=\"$(SDCARDFS_VERSION)\"
+
+obj-$(CONFIG_SDCARD_FS) += sdcardfs.o
+
+sdcardfs-y := dentry.o file.o inode.o main.o super.o lookup.o mmap.o packagelist.o derived_perm.o
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/asm-generic/hash.h	2019-01-22 16:16:28.151288497 +0100
@@ -0,0 +1,9 @@
+#ifndef __ASM_GENERIC_HASH_H
+#define __ASM_GENERIC_HASH_H
+
+struct fast_hash_ops;
+static inline void setup_arch_fast_hash(struct fast_hash_ops *ops)
+{
+}
+
+#endif /* __ASM_GENERIC_HASH_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/crypto/ice.h	2019-01-22 16:16:28.159288569 +0100
@@ -0,0 +1,79 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QCOM_INLINE_CRYPTO_ENGINE_H_
+#define _QCOM_INLINE_CRYPTO_ENGINE_H_
+
+#include <linux/platform_device.h>
+
+struct request;
+
+enum ice_cryto_algo_mode {
+	ICE_CRYPTO_ALGO_MODE_AES_ECB = 0x0,
+	ICE_CRYPTO_ALGO_MODE_AES_XTS = 0x3,
+};
+
+enum ice_crpto_key_size {
+	ICE_CRYPTO_KEY_SIZE_128 = 0x0,
+	ICE_CRYPTO_KEY_SIZE_256 = 0x2,
+};
+
+enum ice_crpto_key_mode {
+	ICE_CRYPTO_USE_KEY0_HW_KEY = 0x0,
+	ICE_CRYPTO_USE_KEY1_HW_KEY = 0x1,
+	ICE_CRYPTO_USE_LUT_SW_KEY0 = 0x2,
+	ICE_CRYPTO_USE_LUT_SW_KEY  = 0x3
+};
+
+struct ice_crypto_setting {
+	enum ice_crpto_key_size		key_size;
+	enum ice_cryto_algo_mode	algo_mode;
+	enum ice_crpto_key_mode		key_mode;
+	short				key_index;
+
+};
+
+struct ice_data_setting {
+	struct ice_crypto_setting	crypto_data;
+	bool				sw_forced_context_switch;
+	bool				decr_bypass;
+	bool				encr_bypass;
+};
+
+typedef void (*ice_error_cb)(void *, u32 error);
+
+struct qcom_ice_variant_ops *qcom_ice_get_variant_ops(struct device_node *node);
+struct platform_device *qcom_ice_get_pdevice(struct device_node *node);
+
+#ifdef CONFIG_CRYPTO_DEV_QCOM_ICE
+int qcom_ice_setup_ice_hw(const char *, int);
+#else
+static inline int qcom_ice_setup_ice_hw(const char *storage_type, int enable)
+{
+	return 0;
+}
+#endif
+
+struct qcom_ice_variant_ops {
+	const char *name;
+	int	(*init)(struct platform_device *, void *, ice_error_cb);
+	int	(*reset)(struct platform_device *);
+	int	(*resume)(struct platform_device *);
+	int	(*suspend)(struct platform_device *);
+	int	(*config_start)(struct platform_device *, struct request *,
+				struct ice_data_setting *, bool);
+	int	(*config_end)(struct request *);
+	int	(*status)(struct platform_device *);
+	void	(*debug)(struct platform_device *);
+};
+
+#endif /* _QCOM_INLINE_CRYPTO_ENGINE_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/dt-bindings/clock/audio-ext-clk.h	2019-01-22 16:16:28.167288642 +0100
@@ -0,0 +1,30 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __AUDIO_EXT_CLK_H
+#define __AUDIO_EXT_CLK_H
+
+/* Audio External Clocks */
+#define AUDIO_PMI_CLK		0
+#define AUDIO_PMIC_LNBB_CLK	0
+#define AUDIO_AP_CLK		1
+#define AUDIO_AP_CLK2		2
+#define AUDIO_LPASS_MCLK	3
+#define AUDIO_LPASS_MCLK2	4
+
+#define clk_audio_ap_clk        0x9b5727cb
+#define clk_audio_pmi_clk       0xcbfe416d
+#define clk_audio_ap_clk2       0x454d1e91
+#define clk_audio_lpass_mclk    0xf0f2a284
+#define clk_audio_pmi_lnbb_clk   0x57312343
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/dt-bindings/clock/msm-clocks-8996.h	2019-01-22 16:16:28.171288678 +0100
@@ -0,0 +1,574 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CLOCKS_8996_H
+#define __MSM_CLOCKS_8996_H
+
+#include "audio-ext-clk.h"
+
+/* clock_gcc controlled clocks */
+#define clk_cxo_clk_src			0x79e95308
+#define clk_pnoc_clk			0x4325d220
+#define clk_pnoc_a_clk			0x2808c12b
+#define clk_bimc_clk			0x4b80bf00
+#define clk_bimc_a_clk			0x4b25668a
+#define clk_cnoc_clk			0xd5ccb7f4
+#define clk_cnoc_a_clk			0xd8fe2ccc
+#define clk_snoc_clk			0x2c341aa0
+#define clk_snoc_a_clk			0x8fcef2af
+#define clk_bb_clk1			0xf5304268
+#define clk_bb_clk1_ao			0xfa113810
+#define clk_bb_clk1_pin			0x6dd0a779
+#define clk_bb_clk1_pin_ao		0x9b637772
+#define clk_bb_clk2			0xfe15cb87
+#define clk_bb_clk2_ao			0x59682706
+#define clk_bb_clk2_pin			0x498938e5
+#define clk_bb_clk2_pin_ao		0x52513787
+#define clk_bimc_msmbus_clk		0xd212feea
+#define clk_bimc_msmbus_a_clk		0x71d1a499
+#define clk_ce1_a_clk			0x44a833fe
+#define clk_cnoc_msmbus_clk		0x62228b5d
+#define clk_cnoc_msmbus_a_clk		0x67442955
+#define clk_cxo_clk_src_ao		0x64eb6004
+#define clk_cxo_dwc3_clk		0xf79c19f6
+#define clk_cxo_lpm_clk			0x94adbf3d
+#define clk_cxo_otg_clk			0x4eec0bb9
+#define clk_cxo_pil_lpass_clk		0xe17f0ff6
+#define clk_cxo_pil_ssc_clk		0x81832015
+#define clk_div_clk1			0xaa1157a6
+#define clk_div_clk1_ao			0x6b943d68
+#define clk_div_clk2			0xd454019f
+#define clk_div_clk2_ao			0x53f9e788
+#define clk_div_clk3			0xa9a55a68
+#define clk_div_clk3_ao			0x3d6725a8
+#define clk_ipa_a_clk			0xeeec2919
+#define clk_ipa_clk			0xfa685cda
+#define clk_ln_bb_clk			0x3ab0b36d
+#define clk_ln_bb_a_clk			0xc7257ea8
+#define clk_ln_bb_clk_pin		0x1b1c476a
+#define clk_ln_bb_a_clk_pin		0x9cbb5411
+#define clk_mcd_ce1_clk			0xbb615d26
+#define clk_pnoc_keepalive_a_clk	0xf8f91f0b
+#define clk_pnoc_msmbus_clk		0x38b95c77
+#define clk_pnoc_msmbus_a_clk		0x8c9b4e93
+#define clk_pnoc_pm_clk			0xd6f7dfb9
+#define clk_pnoc_sps_clk		0xd482ecc7
+#define clk_qdss_a_clk			0xdd121669
+#define clk_qdss_clk			0x1492202a
+#define clk_rf_clk1			0xaabeea5a
+#define clk_rf_clk1_ao			0x72a10cb8
+#define clk_rf_clk1_pin			0x8f463562
+#define clk_rf_clk1_pin_ao		0x62549ff6
+#define clk_rf_clk2			0x24a30992
+#define clk_rf_clk2_ao			0x944d8bbd
+#define clk_rf_clk2_pin			0xa7c5602a
+#define clk_rf_clk2_pin_ao		0x2d75eb4d
+#define clk_snoc_msmbus_clk		0xe6900bb6
+#define clk_snoc_msmbus_a_clk		0x5d4683bd
+#define clk_mcd_ce1_clk			0xbb615d26
+#define clk_qcedev_ce1_clk		0x293f97b0
+#define clk_qcrypto_ce1_clk		0xa6ac14df
+#define clk_qseecom_ce1_clk		0xaa858373
+#define clk_scm_ce1_clk			0xd8ebcc62
+#define clk_ce1_clk			0x42229c55
+#define clk_gcc_ce1_ahb_m_clk		0x2eb28c01
+#define clk_gcc_ce1_axi_m_clk		0xc174dfba
+#define clk_measure_only_bimc_hmss_axi_clk	0xc1cc4f11
+#define clk_aggre1_noc_clk		0x049abba8
+#define clk_aggre1_noc_a_clk		0xc12e4220
+#define clk_aggre2_noc_clk		0xaa681404
+#define clk_aggre2_noc_a_clk		0xcab67089
+#define clk_mmssnoc_axi_rpm_clk		0x4d7f8cdc
+#define clk_mmssnoc_axi_rpm_a_clk	0xfbea899b
+#define clk_mmssnoc_axi_clk		0xdb4b31e6
+#define clk_mmssnoc_axi_a_clk		0xd4970614
+#define clk_mmssnoc_gds_clk		0x06a22afa
+
+#define clk_gpll0			0x1ebe3bc4
+#define clk_gpll0_ao			0xa1368304
+#define clk_gpll0_out_main		0xe9374de7
+#define clk_gpll4			0xb3b5d85b
+#define clk_gpll4_out_main		0xa9a0ab9d
+#define clk_ufs_axi_clk_src		0x297ca380
+#define clk_pcie_aux_clk_src		0xebc50566
+#define clk_usb30_master_clk_src	0xc6262f89
+#define clk_usb20_master_clk_src	0x5680ac83
+#define clk_ufs_ice_core_clk_src	0xda8e7119
+#define clk_blsp1_qup1_i2c_apps_clk_src 0x17f78f5e
+#define clk_blsp1_qup1_spi_apps_clk_src 0xf534c4fa
+#define clk_blsp1_qup2_i2c_apps_clk_src 0x8de71c79
+#define clk_blsp1_qup2_spi_apps_clk_src 0x33cf809a
+#define clk_blsp1_qup3_i2c_apps_clk_src 0xf161b902
+#define clk_blsp1_qup3_spi_apps_clk_src 0x5e95683f
+#define clk_blsp1_qup4_i2c_apps_clk_src 0xb2ecce68
+#define clk_blsp1_qup4_spi_apps_clk_src 0xddb5bbdb
+#define clk_blsp1_qup5_i2c_apps_clk_src 0x71ea7804
+#define clk_blsp1_qup5_spi_apps_clk_src 0x9752f35f
+#define clk_blsp1_qup6_i2c_apps_clk_src 0x28806803
+#define clk_blsp1_qup6_spi_apps_clk_src 0x44a1edc4
+#define clk_blsp1_uart1_apps_clk_src	0xf8146114
+#define clk_blsp1_uart2_apps_clk_src	0xfc9c2f73
+#define clk_blsp1_uart3_apps_clk_src	0x600497f2
+#define clk_blsp1_uart4_apps_clk_src	0x56bff15c
+#define clk_blsp1_uart5_apps_clk_src	0x218ef697
+#define clk_blsp1_uart6_apps_clk_src	0x8fbdbe4c
+#define clk_blsp2_qup1_i2c_apps_clk_src 0xd6d1e95d
+#define clk_blsp2_qup1_spi_apps_clk_src 0xcc1b8365
+#define clk_blsp2_qup2_i2c_apps_clk_src 0x603b5c51
+#define clk_blsp2_qup2_spi_apps_clk_src 0xd577dc44
+#define clk_blsp2_qup3_i2c_apps_clk_src 0xea82959c
+#define clk_blsp2_qup3_spi_apps_clk_src 0xd04b1e92
+#define clk_blsp2_qup4_i2c_apps_clk_src 0x73dc968c
+#define clk_blsp2_qup4_spi_apps_clk_src 0x25d4a2b1
+#define clk_blsp2_qup5_i2c_apps_clk_src 0xcc3698bd
+#define clk_blsp2_qup5_spi_apps_clk_src 0xfa0cf45e
+#define clk_blsp2_qup6_i2c_apps_clk_src 0x2fa53151
+#define clk_blsp2_qup6_spi_apps_clk_src 0x5ca86755
+#define clk_blsp2_uart1_apps_clk_src	0x562c66dc
+#define clk_blsp2_uart2_apps_clk_src	0xdd448080
+#define clk_blsp2_uart3_apps_clk_src	0x46b2e90f
+#define clk_blsp2_uart4_apps_clk_src	0x23a093d2
+#define clk_blsp2_uart5_apps_clk_src	0xe067616a
+#define clk_blsp2_uart6_apps_clk_src	0xe02d2829
+#define clk_gp1_clk_src			0xad85b97a
+#define clk_gp2_clk_src			0xfb1f0065
+#define clk_gp3_clk_src			0x63b693d6
+#define clk_hmss_rbcpr_clk_src		0xedd9a474
+#define clk_pdm2_clk_src		0x31e494fd
+#define clk_sdcc1_apps_clk_src		0xd4975db2
+#define clk_sdcc2_apps_clk_src		0xfc46c821
+#define clk_sdcc3_apps_clk_src		0xea34c7f4
+#define clk_sdcc4_apps_clk_src		0x7aaaaa0c
+#define clk_tsif_ref_clk_src		0x4e9042d1
+#define clk_usb20_mock_utmi_clk_src	0xc3aaeecb
+#define clk_usb30_mock_utmi_clk_src	0xa024a976
+#define clk_usb3_phy_aux_clk_src	0x15eec63c
+#define clk_gcc_qusb2phy_prim_reset	0x07550fa1
+#define clk_gcc_qusb2phy_sec_reset	0x3f3a87d0
+#define clk_gcc_periph_noc_usb20_ahb_clk	0xfb9f26e9
+#define clk_gcc_mmss_gcc_dbg_clk	0xe89d461c
+#define clk_cpu_dbg_clk			0x6550dfa9
+#define clk_gcc_blsp1_ahb_clk		0x8caa5b4f
+#define clk_gcc_blsp1_qup1_i2c_apps_clk 0xc303fae9
+#define clk_gcc_blsp1_qup1_spi_apps_clk 0x759a76b0
+#define clk_gcc_blsp1_qup2_i2c_apps_clk 0x1076f220
+#define clk_gcc_blsp1_qup2_spi_apps_clk 0x3e77d48f
+#define clk_gcc_blsp1_qup3_i2c_apps_clk 0x9e25ac82
+#define clk_gcc_blsp1_qup3_spi_apps_clk 0xfb978880
+#define clk_gcc_blsp1_qup4_i2c_apps_clk 0xd7f40f6f
+#define clk_gcc_blsp1_qup4_spi_apps_clk 0x80f8722f
+#define clk_gcc_blsp1_qup5_i2c_apps_clk 0xacae5604
+#define clk_gcc_blsp1_qup5_spi_apps_clk 0xbf3e15d7
+#define clk_gcc_blsp1_qup6_i2c_apps_clk 0x5c6ad820
+#define clk_gcc_blsp1_qup6_spi_apps_clk 0x780d9f85
+#define clk_gcc_blsp1_uart1_apps_clk	0xc7c62f90
+#define clk_gcc_blsp1_uart2_apps_clk	0xf8a61c96
+#define clk_gcc_blsp1_uart3_apps_clk	0xc3298bd7
+#define clk_gcc_blsp1_uart4_apps_clk	0x26be16c0
+#define clk_gcc_blsp1_uart5_apps_clk	0x28a6bc74
+#define clk_gcc_blsp1_uart6_apps_clk	0x28fd3466
+#define clk_gcc_blsp2_ahb_clk		0x8f283c1d
+#define clk_gcc_blsp2_qup1_i2c_apps_clk 0x9ace11dd
+#define clk_gcc_blsp2_qup1_spi_apps_clk 0xa32604cc
+#define clk_gcc_blsp2_qup2_i2c_apps_clk 0x1bf9a57e
+#define clk_gcc_blsp2_qup2_spi_apps_clk 0xbf54ca6d
+#define clk_gcc_blsp2_qup3_i2c_apps_clk 0x336d4170
+#define clk_gcc_blsp2_qup3_spi_apps_clk 0xc68509d6
+#define clk_gcc_blsp2_qup4_i2c_apps_clk 0xbd22539d
+#define clk_gcc_blsp2_qup4_spi_apps_clk 0x01a72b93
+#define clk_gcc_blsp2_qup5_i2c_apps_clk 0xe2b2ce1d
+#define clk_gcc_blsp2_qup5_spi_apps_clk 0xf40999cd
+#define clk_gcc_blsp2_qup6_i2c_apps_clk 0x894bcea4
+#define clk_gcc_blsp2_qup6_spi_apps_clk 0xfe1bd34a
+#define clk_gcc_blsp2_uart1_apps_clk	0x8c3512ff
+#define clk_gcc_blsp2_uart2_apps_clk	0x1e1965a3
+#define clk_gcc_blsp2_uart3_apps_clk	0x382415ab
+#define clk_gcc_blsp2_uart4_apps_clk	0x87a44b42
+#define clk_gcc_blsp2_uart5_apps_clk	0x5cd30649
+#define clk_gcc_blsp2_uart6_apps_clk	0x8feee5ab
+#define clk_gcc_boot_rom_ahb_clk	0xde2adeb1
+#define clk_gcc_gp1_clk			0x057f7b69
+#define clk_gcc_gp2_clk			0x9bf83ffd
+#define clk_gcc_gp3_clk			0xec6539ee
+#define clk_gcc_hmss_rbcpr_clk		0x699183be
+#define clk_gcc_mmss_noc_cfg_ahb_clk	0xb41a9d99
+#define clk_gcc_pcie_0_aux_clk		0x3d2e3ece
+#define clk_gcc_pcie_0_cfg_ahb_clk	0x4dd325c3
+#define clk_gcc_pcie_0_mstr_axi_clk	0x3f85285b
+#define clk_gcc_pcie_0_slv_axi_clk	0xd69638a1
+#define clk_gcc_pcie_0_pipe_clk		0x4f37621e
+#define clk_gcc_pcie_0_phy_reset	0xdc3201c1
+#define clk_gcc_pcie_1_aux_clk		0xc9bb962c
+#define clk_gcc_pcie_1_cfg_ahb_clk	0xb6338658
+#define clk_gcc_pcie_1_mstr_axi_clk	0xc20f6269
+#define clk_gcc_pcie_1_slv_axi_clk	0xd54e40d6
+#define clk_gcc_pcie_1_pipe_clk		0xc1627422
+#define clk_gcc_pcie_1_phy_reset	0x674481bb
+#define clk_gcc_pcie_2_aux_clk		0xa4dc7ae8
+#define clk_gcc_pcie_2_cfg_ahb_clk	0x4f1d3121
+#define clk_gcc_pcie_2_mstr_axi_clk	0x9e81724a
+#define clk_gcc_pcie_2_slv_axi_clk	0x7990d8b2
+#define clk_gcc_pcie_2_pipe_clk		0xa757a834
+#define clk_gcc_pcie_2_phy_reset	0x82634880
+#define clk_gcc_pcie_phy_reset		0x9bc3c959
+#define clk_gcc_pcie_phy_com_reset	0x8bf513e6
+#define clk_gcc_pcie_phy_nocsr_com_phy_reset	0x0c16a2da
+#define clk_gcc_pcie_phy_aux_clk	0x4746e74f
+#define clk_gcc_pcie_phy_cfg_ahb_clk	0x8533671a
+#define clk_gcc_pdm2_clk		0x99d55711
+#define clk_gcc_pdm_ahb_clk		0x365664f6
+#define clk_gcc_prng_ahb_clk		0x397e7eaa
+#define clk_gcc_sdcc1_ahb_clk		0x691e0caa
+#define clk_gcc_sdcc1_apps_clk		0x9ad6fb96
+#define clk_gcc_sdcc2_ahb_clk		0x23d5727f
+#define clk_gcc_sdcc2_apps_clk		0x861b20ac
+#define clk_gcc_sdcc3_ahb_clk		0x565b2c03
+#define clk_gcc_sdcc3_apps_clk		0x0b27aeac
+#define clk_gcc_sdcc4_ahb_clk		0x64f3e6a8
+#define clk_gcc_sdcc4_apps_clk		0xbf7c4dc8
+#define clk_gcc_tsif_ahb_clk		0x88d2822c
+#define clk_gcc_tsif_ref_clk		0x8f1ed2c2
+#define clk_gcc_ufs_ahb_clk		0x1914bb84
+#define clk_gcc_ufs_axi_clk		0x47c743a7
+#define clk_gcc_ufs_ice_core_clk	0x310b0710
+#define clk_gcc_ufs_rx_cfg_clk		0xa6747786
+#define clk_gcc_ufs_rx_symbol_0_clk	0x7f43251c
+#define clk_gcc_ufs_rx_symbol_1_clk	0x03182fde
+#define clk_gcc_ufs_tx_cfg_clk		0xba2cf8b5
+#define clk_gcc_ufs_tx_symbol_0_clk	0x6a9f747a
+#define clk_gcc_ufs_unipro_core_clk	0x2daf7fd2
+#define clk_gcc_ufs_sys_clk_core_clk	0x360e5ac8
+#define clk_gcc_ufs_tx_symbol_clk_core_clk	0xf6fb0df7
+#define clk_gcc_usb20_master_clk	0x24c3b66a
+#define clk_gcc_usb20_mock_utmi_clk	0xe8db8203
+#define clk_gcc_usb20_sleep_clk		0x6e8cb4b2
+#define clk_gcc_usb30_master_clk	0xb3b4e2cb
+#define clk_gcc_usb30_mock_utmi_clk	0xa800b65a
+#define clk_gcc_usb30_sleep_clk		0xd0b65c92
+#define clk_gcc_usb3_phy_aux_clk	0x0d9a36e0
+#define clk_gcc_usb3_phy_pipe_clk	0xf279aff2
+#define clk_gcc_usb_phy_cfg_ahb2phy_clk	0xd1231a0e
+#define clk_gcc_aggre0_cnoc_ahb_clk	0x53a35559
+#define clk_gcc_aggre0_snoc_axi_clk	0x3c446400
+#define clk_gcc_aggre0_noc_qosgen_extref_clk	0x8c4356ba
+#define clk_hlos1_vote_lpass_core_smmu_clk	0x3aaa1743
+#define clk_hlos1_vote_lpass_adsp_smmu_clk	0xc76f702f
+#define clk_gcc_usb3_phy_reset		0x03d559f1
+#define clk_gcc_usb3phy_phy_reset	0xb1a4f885
+#define clk_gcc_usb3_clkref_clk		0xb6cc8f01
+#define clk_gcc_hdmi_clkref_clk		0x4d4eec04
+#define clk_gcc_edp_clkref_clk		0xa8685c3f
+#define clk_gcc_ufs_clkref_clk		0x92aa126f
+#define clk_gcc_pcie_clkref_clk		0xa2e247fa
+#define clk_gcc_rx2_usb2_clkref_clk	0x27ec24ba
+#define clk_gcc_rx1_usb2_clkref_clk	0x53351d25
+#define clk_gcc_smmu_aggre0_ahb_clk	0x47a06ce4
+#define clk_gcc_smmu_aggre0_axi_clk	0x3cac4a6c
+#define clk_gcc_sys_noc_usb3_axi_clk	0x94d26800
+#define clk_gcc_sys_noc_ufs_axi_clk	0x19d38312
+#define clk_gcc_aggre2_usb3_axi_clk	0xd5822a8e
+#define clk_gcc_aggre2_ufs_axi_clk	0xb31e5191
+#define clk_gcc_mmss_gpll0_div_clk	0xdd06848d
+#define clk_gcc_mmss_bimc_gfx_clk	0xe4f28754
+#define clk_gcc_bimc_gfx_clk		0x3edd69ad
+#define clk_gcc_qspi_ahb_clk		0x96969dc8
+#define clk_gcc_qspi_ser_clk		0xfaf1e266
+#define clk_qspi_ser_clk_src		0x426676ee
+#define clk_sdcc1_ice_core_clk_src	0xfd6a4301
+#define clk_gcc_sdcc1_ice_core_clk	0x0fd5680a
+#define clk_gcc_mss_cfg_ahb_clk		0x111cde81
+#define clk_gcc_mss_snoc_axi_clk	0x0e71de85
+#define clk_gcc_mss_q6_bimc_axi_clk	0x67544d62
+#define clk_gcc_mss_mnoc_bimc_axi_clk	0xf665d03f
+#define clk_gpll0_out_msscc		0x7d794829
+#define clk_gcc_debug_mux_v2		0xf7e749f0
+#define clk_gcc_dcc_ahb_clk		0xfa14a88c
+#define clk_gcc_aggre0_noc_mpu_cfg_ahb_clk	0x5c1bb8e2
+
+/* clock_mmss controlled clocks */
+#define clk_mmsscc_xo			0x05e63704
+#define clk_mmsscc_gpll0		0xe900c515
+#define clk_mmsscc_gpll0_div		0x73892e05
+#define clk_mmsscc_mmssnoc_ahb		0x7b4bd6f7
+#define clk_mmpll0			0xdd83b751
+#define clk_mmpll0_out_main		0x2f996a31
+#define clk_mmpll1			0x6da7fb90
+#define clk_mmpll1_out_main		0xa0d3a7da
+#define clk_mmpll4			0x22c063c1
+#define clk_mmpll4_out_main		0xfb21c2fd
+#define clk_mmpll3			0x18c76899
+#define clk_mmpll3_out_main		0x6eb6328f
+#define clk_ahb_clk_src			0x86f49203
+#define clk_mmpll2			0x1190e4d8
+#define clk_mmpll2_out_main		0x1e9e24a8
+#define clk_mmpll8			0xd06ad45e
+#define clk_mmpll8_out_main		0x75b1f386
+#define clk_mmpll9			0x1c50684c
+#define clk_mmpll9_out_main		0x16b74937
+#define clk_mmpll5			0xa41e1936
+#define clk_mmpll5_out_main		0xcc1897bf
+#define clk_csi0_clk_src		0x227e65bc
+#define clk_vfe0_clk_src		0xa0c2bd8f
+#define clk_vfe1_clk_src		0x4e357366
+#define clk_csi1_clk_src		0x6a2a6c36
+#define clk_csi2_clk_src		0x4113589f
+#define clk_csi3_clk_src		0xfd934012
+#define clk_maxi_clk_src		0x52c09777
+#define clk_cpp_clk_src			0x8382f56d
+#define clk_jpeg0_clk_src		0x9a0a0ac3
+#define clk_jpeg2_clk_src		0x5ad927f3
+#define clk_jpeg_dma_clk_src		0xb68afcea
+#define clk_mdp_clk_src			0x6dc1f8f1
+#define clk_video_core_clk_src		0x8be4c944
+#define clk_fd_core_clk_src		0xe4799ab7
+#define clk_cci_clk_src			0x822f3d97
+#define clk_csiphy0_3p_clk_src		0xd2474b12
+#define clk_csiphy1_3p_clk_src		0x46a02aff
+#define clk_csiphy2_3p_clk_src		0x1447813f
+#define clk_camss_gp0_clk_src		0x6b57cfe6
+#define clk_camss_gp1_clk_src		0xf735368a
+#define clk_jpeg_dma_clk_src		0xb68afcea
+#define clk_mclk0_clk_src		0x266b3853
+#define clk_mclk1_clk_src		0xa73cad0c
+#define clk_mclk2_clk_src		0x42545468
+#define clk_mclk3_clk_src		0x2bfbb714
+#define clk_csi0phytimer_clk_src	0xc8a309be
+#define clk_csi1phytimer_clk_src	0x7c0fe23a
+#define clk_csi2phytimer_clk_src	0x62ffea9c
+#define clk_rbbmtimer_clk_src		0x17649ecc
+#define clk_esc0_clk_src		0xb41d7c38
+#define clk_esc1_clk_src		0x3b0afa42
+#define clk_hdmi_clk_src		0xb40aeea9
+#define clk_vsync_clk_src		0xecb43940
+#define clk_rbcpr_clk_src		0x2c2e9af2
+#define clk_video_subcore0_clk_src	0x88d79636
+#define clk_video_subcore1_clk_src	0x4966930c
+#define clk_mmss_bto_ahb_clk		0xfdf8c361
+#define clk_camss_ahb_clk		0xc4ff91d4
+#define clk_camss_cci_ahb_clk		0x04c4441a
+#define clk_camss_cci_clk		0xd6cb5eb9
+#define clk_camss_cpp_ahb_clk		0x12e9a87b
+#define clk_camss_cpp_clk		0xb82f366b
+#define clk_camss_cpp_axi_clk		0x5598c804
+#define clk_camss_cpp_vbif_ahb_clk	0xb5f31be4
+#define clk_camss_csi0_ahb_clk		0x6e29c972
+#define clk_camss_csi0_clk		0x30862ddb
+#define clk_camss_csi0phy_clk		0x2cecfb84
+#define clk_camss_csi0pix_clk		0x6946f77b
+#define clk_camss_csi0rdi_clk		0x83645ef5
+#define clk_camss_csi1_ahb_clk		0xccc15f06
+#define clk_camss_csi1_clk		0xb150f052
+#define clk_camss_csi1phy_clk		0xb989f06d
+#define clk_camss_csi1pix_clk		0x58d19bf3
+#define clk_camss_csi1rdi_clk		0x4d2f3352
+#define clk_camss_csi2_ahb_clk		0x92d02d75
+#define clk_camss_csi2_clk		0x74fc92e8
+#define clk_camss_csi2phy_clk		0xda05d9d8
+#define clk_camss_csi2pix_clk		0xf8ed0731
+#define clk_camss_csi2rdi_clk		0xdc1b2081
+#define clk_camss_csi3_ahb_clk		0xee5e459c
+#define clk_camss_csi3_clk		0x39488fdd
+#define clk_camss_csi3phy_clk		0x8b6063b9
+#define clk_camss_csi3pix_clk		0xd82bd467
+#define clk_camss_csi3rdi_clk		0xb6750046
+#define clk_camss_csi_vfe0_clk		0x3023937a
+#define clk_camss_csi_vfe1_clk		0xe66fa522
+#define clk_camss_csiphy0_3p_clk	0xf2a54f5a
+#define clk_camss_csiphy1_3p_clk	0x8bf70cb2
+#define clk_camss_csiphy2_3p_clk	0x1c14c939
+#define clk_camss_gp0_clk		0xcee7e51d
+#define clk_camss_gp1_clk		0x41f1c2e3
+#define clk_camss_ispif_ahb_clk		0x9a212c6d
+#define clk_camss_jpeg0_clk		0x0b0e2db7
+#define clk_camss_jpeg2_clk		0xd7291c8d
+#define clk_camss_jpeg_ahb_clk		0x1f47fd28
+#define clk_camss_jpeg_axi_clk		0x9e5545c8
+#define clk_camss_jpeg_dma_clk		0x2336e65d
+#define clk_camss_mclk0_clk		0xcf0c61e0
+#define clk_camss_mclk1_clk		0xd1410ed4
+#define clk_camss_mclk2_clk		0x851286f2
+#define clk_camss_mclk3_clk		0x4db11c45
+#define clk_camss_micro_ahb_clk		0x33a23277
+#define clk_camss_csi0phytimer_clk	0xff93b3c8
+#define clk_camss_csi1phytimer_clk	0x6c399ab6
+#define clk_camss_csi2phytimer_clk	0x24f47f49
+#define clk_camss_top_ahb_clk		0x8f8b2d33
+#define clk_camss_vfe_ahb_clk		0x595197bc
+#define clk_camss_vfe_axi_clk		0x273d4c31
+#define clk_camss_vfe0_ahb_clk		0x4652833c
+#define clk_camss_vfe0_clk		0x1e9bb8c4
+#define clk_camss_vfe0_stream_clk	0x22835fa4
+#define clk_camss_vfe1_ahb_clk		0x6a56abd3
+#define clk_camss_vfe1_clk		0x5bffa69b
+#define clk_camss_vfe1_stream_clk	0x92f849b9
+#define clk_fd_ahb_clk			0x868a2c5c
+#define clk_fd_core_clk			0x3badcae4
+#define clk_fd_core_uar_clk		0x7e624e15
+#define clk_gpu_ahb_clk			0xf97f1d43
+#define clk_gpu_aon_isense_clk		0xa9e9b297
+#define clk_gpu_gx_gfx3d_clk		0xb7ece823
+#define clk_gpu_mx_clk			0xb80ccedf
+#define clk_gpu_gx_rbbmtimer_clk	0xdeba634e
+#define clk_mdss_ahb_clk		0x684ccb41
+#define clk_mdss_axi_clk		0xcc07d687
+#define clk_mdss_esc0_clk		0x28cafbe6
+#define clk_mdss_esc1_clk		0xc22c6883
+#define clk_mdss_hdmi_ahb_clk		0x01cef516
+#define clk_mdss_hdmi_clk		0x097a6de9
+#define clk_mdss_mdp_clk		0x618336ac
+#define clk_mdss_vsync_clk		0x42a022d3
+#define clk_mmss_misc_ahb_clk		0xea30b0e7
+#define clk_mmss_misc_cxo_clk		0xe620cd80
+#define clk_mmagic_bimc_noc_cfg_ahb_clk 0x12d5ba72
+#define clk_mmagic_camss_axi_clk	0xa8b1c16b
+#define clk_mmagic_camss_noc_cfg_ahb_clk 0x5182c819
+#define clk_mmss_mmagic_cfg_ahb_clk	0x5e94a822
+#define clk_mmagic_mdss_axi_clk		0xa0359d10
+#define clk_mmagic_mdss_noc_cfg_ahb_clk 0x9c6d5482
+#define clk_mmagic_video_axi_clk	0x7b9219c3
+#define clk_mmagic_video_noc_cfg_ahb_clk 0x5124d256
+#define clk_mmss_mmagic_ahb_clk		0x3d15f2b0
+#define clk_mmss_mmagic_maxi_clk	0xbdaf5af7
+#define clk_mmss_rbcpr_ahb_clk		0x623ba55f
+#define clk_mmss_rbcpr_clk		0x69a23a6f
+#define clk_mmss_spdm_cpp_clk		0xefe35cd2
+#define clk_mmss_spdm_jpeg_dma_clk	0xcb7bd5a0
+#define clk_smmu_cpp_ahb_clk		0x3ad82d84
+#define clk_smmu_cpp_axi_clk		0xa6bb2f4a
+#define clk_smmu_jpeg_ahb_clk		0x10c436ec
+#define clk_smmu_jpeg_axi_clk		0x41112f37
+#define clk_smmu_mdp_ahb_clk		0x04994cb2
+#define clk_smmu_mdp_axi_clk		0x7fd71687
+#define clk_smmu_rot_ahb_clk		0xa30772c9
+#define clk_smmu_rot_axi_clk		0xfed7c078
+#define clk_smmu_vfe_ahb_clk		0x4dabebe7
+#define clk_smmu_vfe_axi_clk		0xde483725
+#define clk_smmu_video_ahb_clk		0x2d738e2c
+#define clk_smmu_video_axi_clk		0xe2b5b887
+#define clk_video_ahb_clk		0x90775cfb
+#define clk_video_axi_clk		0xe6c16dba
+#define clk_video_core_clk		0x7e876ec3
+#define clk_video_maxi_clk		0x97749db6
+#define clk_video_subcore0_clk		0xb6f63e6c
+#define clk_video_subcore1_clk		0x26c29cb4
+#define clk_vmem_ahb_clk		0xab6223ff
+#define clk_vmem_maxi_clk		0x15ef32db
+#define clk_mmss_debug_mux		0xe646ffda
+#define clk_mmss_gcc_dbg_clk		0xafa4d48a
+#define clk_gfx3d_clk_src		0x917f76ef
+#define clk_extpclk_clk_src		0xb2c31abd
+#define clk_mdss_byte0_clk		0xf5a03f64
+#define clk_mdss_byte1_clk		0xb8c7067d
+#define clk_mdss_extpclk_clk		0xfa5aadb0
+#define clk_mdss_pclk0_clk		0x3487234a
+#define clk_mdss_pclk1_clk		0xd5804246
+#define clk_gpu_gcc_dbg_clk		0x0ccc42cd
+#define clk_mdss_mdp_vote_clk		0x588460a4
+#define clk_mdss_rotator_vote_clk	0x5b1f675e
+#define clk_mmpll2_postdiv_clk		0x4fdeaaba
+#define clk_mmpll8_postdiv_clk		0xedf57882
+#define clk_mmpll9_postdiv_clk		0x3064b618
+#define clk_gfx3d_clk_src_v2		0x4210acb7
+#define clk_byte0_clk_src		0x75cc885b
+#define clk_byte1_clk_src		0x63c2c955
+#define clk_pclk0_clk_src		0xccac1f35
+#define clk_pclk1_clk_src		0x090f68ac
+#define clk_ext_byte0_clk_src		0xfb32f31e
+#define clk_ext_byte1_clk_src		0x585ef6d4
+#define clk_ext_pclk0_clk_src		0x087c1612
+#define clk_ext_pclk1_clk_src		0x8067c5a3
+
+/* clock_debug controlled clocks */
+#define clk_gcc_debug_mux		0x8121ac15
+
+/* external multimedia clocks */
+#define clk_dsi0pll_pixel_clk_mux	0x792379e1
+#define clk_dsi0pll_byte_clk_mux	0x60e83f06
+#define clk_dsi0pll_byte_clk_src	0xbbaa30be
+#define clk_dsi0pll_pixel_clk_src	0x45b3260f
+#define clk_dsi0pll_n2_div_clk		0x1474c213
+#define clk_dsi0pll_post_n1_div_clk	0xdab8c389
+#define clk_dsi0pll_vco_clk		0x15940d40
+#define clk_dsi1pll_pixel_clk_mux	0x36458019
+#define clk_dsi1pll_byte_clk_mux	0xb5a42b7b
+#define clk_dsi1pll_byte_clk_src	0x63930a8f
+#define clk_dsi1pll_pixel_clk_src	0x0e4c9b56
+#define clk_dsi1pll_n2_div_clk		0x2c9d4007
+#define clk_dsi1pll_post_n1_div_clk	0x03020041
+#define clk_dsi1pll_vco_clk		0x99797b50
+#define clk_mdss_dsi1_vco_clk_src	0xfcd15658
+#define clk_hdmi_vco_clk		0x66003284
+
+#define clk_dsi0pll_shadow_byte_clk_src	0x177c029c
+#define clk_dsi0pll_shadow_pixel_clk_src	0x98ae3c92
+#define clk_dsi0pll_shadow_n2_div_clk	0xd5f0dad9
+#define clk_dsi0pll_shadow_post_n1_div_clk	0x1f7c8cf8
+#define clk_dsi0pll_shadow_vco_clk	0xb100ca83
+#define clk_dsi1pll_shadow_byte_clk_src	0xfc021ce5
+#define clk_dsi1pll_shadow_pixel_clk_src	0xdcca3ffc
+#define clk_dsi1pll_shadow_n2_div_clk		0x189541bf
+#define clk_dsi1pll_shadow_post_n1_div_clk	0x1637020e
+#define clk_dsi1pll_shadow_vco_clk		0x68d8b6f7
+
+/* CPU clocks */
+#define clk_pwrcl_clk 0xc554130e
+#define clk_pwrcl_pll 0x25454ca1
+#define clk_pwrcl_alt_pll 0xc445471b
+#define clk_pwrcl_pll_main 0x28948e22
+#define clk_pwrcl_alt_pll_main 0x25c8270e
+#define clk_pwrcl_hf_mux 0x77706ae6
+#define clk_pwrcl_lf_mux 0xd99e334d
+#define clk_perfcl_clk 0x58869997
+#define clk_perfcl_pll 0x97dcec1c
+#define clk_perfcl_alt_pll 0xfe2eaea1
+#define clk_perfcl_pll_main 0x0dbf0c0b
+#define clk_perfcl_alt_pll_main 0x0b892aab
+#define clk_perfcl_hf_mux 0x9e8bbe59
+#define clk_perfcl_lf_mux 0x2f9c278d
+#define clk_cbf_pll 0xfe2e96a3
+#define clk_cbf_pll_main 0x2b05cf95
+#define clk_cbf_hf_mux 0x71244f73
+#define clk_cbf_clk 0x48e9e16b
+#define clk_xo_ao 0x428c856d
+#define clk_sys_apcsaux_clk 0x0b0dd513
+#define clk_cpu_debug_mux 0xc7acaa31
+
+/* GCC block resets */
+#define QUSB2PHY_PRIM_BCR		0
+#define QUSB2PHY_SEC_BCR		1
+#define BLSP1_BCR			2
+#define BLSP2_BCR			3
+#define BOOT_ROM_BCR			4
+#define PRNG_BCR			5
+#define UFS_BCR				6
+#define USB_20_BCR			7
+#define USB_30_BCR			8
+#define USB3_PHY_BCR			9
+#define USB3PHY_PHY_BCR			10
+#define PCIE_0_PHY_BCR			11
+#define PCIE_1_PHY_BCR			12
+#define PCIE_2_PHY_BCR			13
+#define PCIE_PHY_BCR			14
+#define PCIE_PHY_COM_BCR		15
+#define PCIE_PHY_NOCSR_COM_PHY_BCR	16
+
+/* MMSS Block resets */
+#define VIDEO_BCR			0
+#define MDSS_BCR			1
+#define CAMSS_MICRO_BCR			2
+#define CAMSS_JPEG_BCR			3
+#define CAMSS_VFE0_BCR			4
+#define CAMSS_VFE1_BCR			5
+#define FD_BCR				6
+#define GPU_GX_BCR			7
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/dt-bindings/clock/msm-clocks-8998.h	2019-01-22 16:16:28.171288678 +0100
@@ -0,0 +1,534 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CLOCKS_8998_H
+#define __MSM_CLOCKS_8998_H
+
+#include "audio-ext-clk.h"
+
+/* clock_rpm controlled clocks */
+#define clk_ce1_clk				0x42229c55
+#define clk_ce1_a_clk				0x44a833fe
+#define clk_cxo_clk_src				0x79e95308
+#define clk_bimc_clk				0x4b80bf00
+#define clk_bimc_a_clk				0x4b25668a
+#define clk_cnoc_clk				0xd5ccb7f4
+#define clk_cnoc_a_clk				0xd8fe2ccc
+#define clk_snoc_clk				0x2c341aa0
+#define clk_snoc_a_clk				0x8fcef2af
+#define clk_cnoc_periph_clk			0xb11e9cf9
+#define clk_cnoc_periph_a_clk			0x1d7faa2e
+#define clk_cnoc_periph_keepalive_a_clk		0x7287aef2
+#define clk_ln_bb_clk1				0xb867b147
+#define clk_ln_bb_clk1_ao			0x7f63a93a
+#define clk_ln_bb_clk1_pin			0x6fc5653c
+#define clk_ln_bb_clk1_pin_ao			0x25d625bf
+#define clk_ln_bb_clk2				0xf83e6387
+#define clk_ln_bb_clk2_ao			0x96f09628
+#define clk_ln_bb_clk2_pin			0xa9ebe8d5
+#define clk_ln_bb_clk2_pin_ao			0x89a1226f
+#define clk_ln_bb_clk3				0x4f52a39e
+#define clk_ln_bb_clk3_ao			0xb15eba76
+#define clk_ln_bb_clk3_pin			0xc4de7dad
+#define clk_ln_bb_clk3_pin_ao			0xc01022e8
+#define clk_bimc_msmbus_clk			0xd212feea
+#define clk_bimc_msmbus_a_clk			0x71d1a499
+#define clk_cnoc_msmbus_clk			0x62228b5d
+#define clk_cnoc_msmbus_a_clk			0x67442955
+#define clk_cxo_clk_src_ao			0x64eb6004
+#define clk_cxo_dwc3_clk			0xf79c19f6
+#define clk_cxo_lpm_clk				0x94adbf3d
+#define clk_cxo_otg_clk				0x4eec0bb9
+#define clk_cxo_pil_lpass_clk			0xe17f0ff6
+#define clk_cxo_pil_ssc_clk			0x81832015
+#define clk_cxo_pil_spss_clk			0x5cd71a61
+#define clk_div_clk1				0xaa1157a6
+#define clk_div_clk1_ao				0x6b943d68
+#define clk_div_clk2				0xd454019f
+#define clk_div_clk2_ao				0x53f9e788
+#define clk_div_clk3				0xa9a55a68
+#define clk_div_clk3_ao				0x3d6725a8
+#define clk_ipa_clk				0xfa685cda
+#define clk_ipa_a_clk				0xeeec2919
+#define clk_mcd_ce1_clk				0xbb615d26
+#define clk_mmssnoc_axi_clk			0xdb4b31e6
+#define clk_mmssnoc_axi_a_clk			0xd4970614
+#define clk_qcedev_ce1_clk			0x293f97b0
+#define clk_qcrypto_ce1_clk			0xa6ac14df
+#define clk_qdss_clk				0x1492202a
+#define clk_qdss_a_clk				0xdd121669
+#define clk_qseecom_ce1_clk			0xaa858373
+#define clk_rf_clk1				0xaabeea5a
+#define clk_rf_clk1_ao				0x72a10cb8
+#define clk_rf_clk1_pin				0x8f463562
+#define clk_rf_clk1_pin_ao			0x62549ff6
+#define clk_rf_clk2				0x24a30992
+#define clk_rf_clk2_ao				0x944d8bbd
+#define clk_rf_clk2_pin				0xa7c5602a
+#define clk_rf_clk2_pin_ao			0x2d75eb4d
+#define clk_rf_clk3				0xb673936b
+#define clk_rf_clk3_ao				0x038bb968
+#define clk_rf_clk3_pin				0x726f53f5
+#define clk_rf_clk3_pin_ao			0x76f9240f
+#define clk_scm_ce1_clk				0xd8ebcc62
+#define clk_snoc_msmbus_clk			0xe6900bb6
+#define clk_snoc_msmbus_a_clk			0x5d4683bd
+#define clk_gcc_ce1_ahb_m_clk			0x2eb28c01
+#define clk_gcc_ce1_axi_m_clk			0xc174dfba
+#define clk_aggre1_noc_clk			0x049abba8
+#define clk_aggre1_noc_a_clk			0xc12e4220
+#define clk_aggre2_noc_clk			0xaa681404
+#define clk_aggre2_noc_a_clk			0xcab67089
+#define clk_measure_only_bimc_hmss_axi_clk	0xc1cc4f11
+
+/* clock_gcc controlled clocks*/
+#define clk_debug_mmss_clk			0x977c99b6
+#define clk_debug_rpm_clk			0x8e2b07ca
+#define clk_debug_cpu_clk			0x0e696b2b
+#define clk_gpu_gcc_debug_clk			0x3eb88190
+#define clk_gfx_gcc_debug_clk			0xa3a64fec
+#define clk_gpll0				0x1ebe3bc4
+#define clk_gpll0_out_main			0xe9374de7
+#define clk_gpll0_ao				0xa1368304
+#define clk_gcc_mmss_gpll0_clk			0x8050f008
+#define clk_gcc_mmss_gpll0_div_clk		0xdd06848d
+#define clk_gcc_gpu_gpll0_clk			0xdad7a7a4
+#define clk_gcc_gpu_gpll0_div_clk		0x07d16c6a
+#define clk_gpll4				0xb3b5d85b
+#define clk_gpll4_out_main			0xa9a0ab9d
+#define clk_usb30_master_clk_src		0xc6262f89
+#define clk_pcie_aux_clk_src			0xebc50566
+#define clk_ufs_axi_clk_src			0x297ca380
+#define clk_blsp1_qup1_i2c_apps_clk_src		0x17f78f5e
+#define clk_blsp1_qup1_spi_apps_clk_src		0xf534c4fa
+#define clk_blsp1_qup2_i2c_apps_clk_src		0x8de71c79
+#define clk_blsp1_qup2_spi_apps_clk_src		0x33cf809a
+#define clk_blsp1_qup3_i2c_apps_clk_src		0xf161b902
+#define clk_blsp1_qup3_spi_apps_clk_src		0x5e95683f
+#define clk_blsp1_qup4_i2c_apps_clk_src		0xb2ecce68
+#define clk_blsp1_qup4_spi_apps_clk_src		0xddb5bbdb
+#define clk_blsp1_qup5_i2c_apps_clk_src		0x71ea7804
+#define clk_blsp1_qup5_spi_apps_clk_src		0x9752f35f
+#define clk_blsp1_qup6_i2c_apps_clk_src		0x28806803
+#define clk_blsp1_qup6_spi_apps_clk_src		0x44a1edc4
+#define clk_blsp1_uart1_apps_clk_src		0xf8146114
+#define clk_blsp1_uart2_apps_clk_src		0xfc9c2f73
+#define clk_blsp1_uart3_apps_clk_src		0x600497f2
+#define clk_blsp2_qup1_i2c_apps_clk_src		0xd6d1e95d
+#define clk_blsp2_qup1_spi_apps_clk_src		0xcc1b8365
+#define clk_blsp2_qup2_i2c_apps_clk_src		0x603b5c51
+#define clk_blsp2_qup2_spi_apps_clk_src		0xd577dc44
+#define clk_blsp2_qup3_i2c_apps_clk_src		0xea82959c
+#define clk_blsp2_qup3_spi_apps_clk_src		0xd04b1e92
+#define clk_blsp2_qup4_i2c_apps_clk_src		0x73dc968c
+#define clk_blsp2_qup4_spi_apps_clk_src		0x25d4a2b1
+#define clk_blsp2_qup5_i2c_apps_clk_src		0xcc3698bd
+#define clk_blsp2_qup5_spi_apps_clk_src		0xfa0cf45e
+#define clk_blsp2_qup6_i2c_apps_clk_src		0x2fa53151
+#define clk_blsp2_qup6_spi_apps_clk_src		0x5ca86755
+#define clk_blsp2_uart1_apps_clk_src		0x562c66dc
+#define clk_blsp2_uart2_apps_clk_src		0xdd448080
+#define clk_blsp2_uart3_apps_clk_src		0x46b2e90f
+#define clk_gp1_clk_src				0xad85b97a
+#define clk_gp2_clk_src				0xfb1f0065
+#define clk_gp3_clk_src				0x63b693d6
+#define clk_hmss_rbcpr_clk_src			0xedd9a474
+#define clk_pdm2_clk_src			0x31e494fd
+#define clk_sdcc2_apps_clk_src			0xfc46c821
+#define clk_sdcc4_apps_clk_src			0x7aaaaa0c
+#define clk_tsif_ref_clk_src			0x4e9042d1
+#define clk_ufs_ice_core_clk_src		0xda8e7119
+#define clk_ufs_phy_aux_clk_src			0xc6bca085
+#define clk_ufs_unipro_core_clk_src		0x179e80a9
+#define clk_usb30_mock_utmi_clk_src		0xa024a976
+#define clk_usb3_phy_aux_clk_src		0x15eec63c
+#define clk_qspi_ref_clk_src			0xfe6b8e11
+#define clk_gcc_pcie_phy_0_reset		0x6bb4df33
+#define clk_gcc_usb3_phy_reset			0x03d559f1
+#define clk_gcc_usb3phy_phy_reset		0xb1a4f885
+#define clk_gcc_aggre1_ufs_axi_clk		0x873459d8
+#define clk_gcc_aggre1_ufs_axi_hw_ctl_clk	0x117a6f39
+#define clk_gcc_aggre1_usb3_axi_clk		0xc5c3fbe8
+#define clk_gcc_bimc_mss_q6_axi_clk		0x7437988f
+#define clk_gcc_blsp1_ahb_clk			0x8caa5b4f
+#define clk_gcc_blsp1_qup1_i2c_apps_clk		0xc303fae9
+#define clk_gcc_blsp1_qup1_spi_apps_clk		0x759a76b0
+#define clk_gcc_blsp1_qup2_i2c_apps_clk		0x1076f220
+#define clk_gcc_blsp1_qup2_spi_apps_clk		0x3e77d48f
+#define clk_gcc_blsp1_qup3_i2c_apps_clk		0x9e25ac82
+#define clk_gcc_blsp1_qup3_spi_apps_clk		0xfb978880
+#define clk_gcc_blsp1_qup4_i2c_apps_clk		0xd7f40f6f
+#define clk_gcc_blsp1_qup4_spi_apps_clk		0x80f8722f
+#define clk_gcc_blsp1_qup5_i2c_apps_clk		0xacae5604
+#define clk_gcc_blsp1_qup5_spi_apps_clk		0xbf3e15d7
+#define clk_gcc_blsp1_qup6_i2c_apps_clk		0x5c6ad820
+#define clk_gcc_blsp1_qup6_spi_apps_clk		0x780d9f85
+#define clk_gcc_blsp1_uart1_apps_clk		0xc7c62f90
+#define clk_gcc_blsp1_uart2_apps_clk		0xf8a61c96
+#define clk_gcc_blsp1_uart3_apps_clk		0xc3298bd7
+#define clk_gcc_blsp2_ahb_clk			0x8f283c1d
+#define clk_gcc_blsp2_qup1_i2c_apps_clk		0x9ace11dd
+#define clk_gcc_blsp2_qup1_spi_apps_clk		0xa32604cc
+#define clk_gcc_blsp2_qup2_i2c_apps_clk		0x1bf9a57e
+#define clk_gcc_blsp2_qup2_spi_apps_clk		0xbf54ca6d
+#define clk_gcc_blsp2_qup3_i2c_apps_clk		0x336d4170
+#define clk_gcc_blsp2_qup3_spi_apps_clk		0xc68509d6
+#define clk_gcc_blsp2_qup4_i2c_apps_clk		0xbd22539d
+#define clk_gcc_blsp2_qup4_spi_apps_clk		0x01a72b93
+#define clk_gcc_blsp2_qup5_i2c_apps_clk		0xe2b2ce1d
+#define clk_gcc_blsp2_qup5_spi_apps_clk		0xf40999cd
+#define clk_gcc_blsp2_qup6_i2c_apps_clk		0x894bcea4
+#define clk_gcc_blsp2_qup6_spi_apps_clk		0xfe1bd34a
+#define clk_gcc_blsp2_uart1_apps_clk		0x8c3512ff
+#define clk_gcc_blsp2_uart2_apps_clk		0x1e1965a3
+#define clk_gcc_blsp2_uart3_apps_clk		0x382415ab
+#define clk_gcc_boot_rom_ahb_clk		0xde2adeb1
+#define clk_gcc_bimc_gfx_clk			0x3edd69ad
+#define clk_gcc_cfg_noc_usb3_axi_clk		0x9ea4c2d9
+#define clk_gcc_gp1_clk				0x057f7b69
+#define clk_gcc_gp2_clk				0x9bf83ffd
+#define clk_gcc_gp3_clk				0xec6539ee
+#define clk_gcc_gpu_bimc_gfx_clk		0x3909459b
+#define clk_gcc_gpu_cfg_ahb_clk			0x72f20a57
+#define clk_gcc_gpu_iref_clk			0xfd82abad
+#define clk_gcc_hmss_dvm_bus_clk		0x17cc8b53
+#define clk_gcc_hmss_rbcpr_clk			0x699183be
+#define clk_hmss_gpll0_clk_src			0x17eb05d0
+#define clk_hmss_gpll4_clk_src			0x20456cae
+#define clk_gcc_mmss_sys_noc_axi_clk		0x4467b15b
+#define clk_gcc_mss_at_clk			0x1692c5aa
+#define clk_nav_gcc_dbg_clk			0x2221c544
+#define clk_gcc_pcie_0_aux_clk			0x3d2e3ece
+#define clk_gcc_pcie_0_cfg_ahb_clk		0x4dd325c3
+#define clk_gcc_pcie_0_mstr_axi_clk		0x3f85285b
+#define clk_gcc_pcie_0_pipe_clk			0x4f37621e
+#define clk_gcc_pcie_0_slv_axi_clk		0xd69638a1
+#define clk_gcc_pcie_phy_aux_clk		0x4746e74f
+#define clk_gcc_pdm2_clk			0x99d55711
+#define clk_gcc_pdm_ahb_clk			0x365664f6
+#define clk_gcc_prng_ahb_clk			0x397e7eaa
+#define clk_gcc_sdcc2_ahb_clk			0x23d5727f
+#define clk_gcc_sdcc2_apps_clk			0x861b20ac
+#define clk_gcc_sdcc4_ahb_clk			0x64f3e6a8
+#define clk_gcc_sdcc4_apps_clk			0xbf7c4dc8
+#define clk_gcc_tsif_ahb_clk			0x88d2822c
+#define clk_gcc_tsif_ref_clk			0x8f1ed2c2
+#define clk_gcc_ufs_ahb_clk			0x1914bb84
+#define clk_gcc_ufs_axi_clk			0x47c743a7
+#define clk_gcc_ufs_axi_hw_ctl_clk		0x69385b45
+#define clk_gcc_ufs_ice_core_clk		0x310b0710
+#define clk_gcc_ufs_ice_core_hw_ctl_clk		0x84e15a5b
+#define clk_gcc_ufs_phy_aux_clk			0x17acc8fb
+#define clk_gcc_ufs_phy_aux_hw_ctl_clk		0x7dbdb2e2
+#define clk_gcc_ufs_rx_symbol_0_clk		0x7f43251c
+#define clk_gcc_ufs_rx_symbol_1_clk		0x03182fde
+#define clk_gcc_ufs_tx_symbol_0_clk		0x6a9f747a
+#define clk_ufs_tx_symbol_0_clk			0xb3fcd0f7
+#define clk_ufs_rx_symbol_0_clk			0x17a0f1cd
+#define clk_gcc_ufs_unipro_core_clk		0x2daf7fd2
+#define clk_gcc_ufs_unipro_core_hw_ctl_clk	0x4a4e0f3d
+#define clk_gcc_usb30_master_clk		0xb3b4e2cb
+#define clk_gcc_usb30_mock_utmi_clk		0xa800b65a
+#define clk_gcc_usb30_sleep_clk			0xd0b65c92
+#define clk_gcc_usb3_phy_aux_clk		0x0d9a36e0
+#define clk_gcc_usb3_phy_pipe_clk		0xf279aff2
+#define clk_gcc_usb3_clkref_clk			0xb6cc8f00
+#define clk_gcc_hdmi_clkref_clk			0x4d4eec04
+#define clk_gcc_edp_clkref_clk			0xa8685c3f
+#define clk_gcc_ufs_clkref_clk			0x92aa126f
+#define clk_gcc_pcie_clkref_clk			0xa2e247fa
+#define clk_gcc_rx2_qlink_clkref_clk		0xd0ba986d
+#define clk_gcc_rx1_usb2_clkref_clk		0x53351d25
+#define clk_gcc_pcie_phy_reset			0x9bc3c959
+#define clk_gcc_pcie_phy_com_reset		0x8bf513e6
+#define clk_gcc_pcie_phy_nocsr_com_phy_reset	0x0c16a2da
+#define clk_gcc_qusb2phy_prim_reset		0x07550fa1
+#define clk_gcc_qusb2phy_sec_reset		0x3f3a87d0
+#define clk_gcc_mmss_noc_cfg_ahb_clk		0xb41a9d99
+#define clk_gcc_dcc_ahb_clk			0xfa14a88c
+#define clk_hlos1_vote_lpass_core_smmu_clk	0x3aaa1743
+#define clk_hlos1_vote_lpass_adsp_smmu_clk	0xc76f702f
+#define clk_gcc_mss_cfg_ahb_clk			0x111cde81
+#define clk_gcc_mss_q6_bimc_axi_clk		0x67544d62
+#define clk_gcc_mss_mnoc_bimc_axi_clk		0xf665d03f
+#define clk_gpll0_out_msscc			0x7d794829
+#define clk_gcc_mss_snoc_axi_clk		0x0e71de85
+#define clk_gcc_qspi_ref_clk			0x766a0f7c
+#define clk_gcc_qspi_ahb_clk			0x96969dc8
+#define clk_gcc_debug_mux			0x8121ac15
+
+/* clock_mmss controlled clocks */
+#define clk_mmsscc_xo				0x05e63704
+#define clk_mmsscc_gpll0			0xe900c515
+#define clk_mmsscc_gpll0_div			0x73892e05
+#define clk_mmpll0_pll				0x361e3cfd
+#define clk_mmpll1_pll				0x198e426b
+#define clk_mmpll3_pll				0x18c76899
+#define clk_mmpll4_pll				0x22c063c1
+#define clk_mmpll5_pll				0xa41e1936
+#define clk_mmpll6_pll				0xc56fb440
+#define clk_mmpll7_pll				0x3ac216af
+#define clk_mmpll10_pll				0x2561263b
+#define clk_mmpll0_pll_out			0x1e9e24a8
+#define clk_mmpll1_pll_out			0x5fa32257
+#define clk_mmpll3_pll_out			0x6eb6328f
+#define clk_mmpll4_pll_out			0xfb21c2fd
+#define clk_mmpll5_pll_out			0xcc1897bf
+#define clk_mmpll6_pll_out			0xfb1060bd
+#define clk_mmpll7_pll_out			0x767758ed
+#define clk_mmpll10_pll_out			0x3c5668f3
+#define clk_ahb_clk_src				0x86f49203
+#define clk_csi0_clk_src			0x227e65bc
+#define clk_vfe0_clk_src			0xa0c2bd8f
+#define clk_vfe1_clk_src			0x4e357366
+#define clk_mdp_clk_src				0x6dc1f8f1
+#define clk_maxi_clk_src			0x52c09777
+#define clk_cpp_clk_src				0x8382f56d
+#define clk_jpeg0_clk_src			0x9a0a0ac3
+#define clk_rot_clk_src				0xce49b56c
+#define clk_video_core_clk_src			0x8be4c944
+#define clk_csi1_clk_src			0x6a2a6c36
+#define clk_csi2_clk_src			0x4113589f
+#define clk_csi3_clk_src			0xfd934012
+#define clk_fd_core_clk_src			0xe4799ab7
+#define clk_ext_dp_phy_pll_vco			0x441b576b
+#define clk_ext_dp_phy_pll_link			0xea12644c
+#define clk_dp_link_clk_src			0x370d0626
+#define clk_dp_crypto_clk_src			0xf8faa811
+#define clk_dp_pixel_clk_src			0xf5dfbabf
+#define clk_ext_extpclk_clk_src			0xe5b273af
+#define clk_ext_pclk0_clk_src			0x087c1612
+#define clk_ext_pclk1_clk_src			0x8067c5a3
+#define clk_pclk0_clk_src			0xccac1f35
+#define clk_pclk1_clk_src			0x090f68ac
+#define clk_video_subcore0_clk_src		0x88d79636
+#define clk_video_subcore1_clk_src		0x4966930c
+#define clk_cci_clk_src				0x822f3d97
+#define clk_camss_gp0_clk_src			0x43b063e9
+#define clk_camss_gp1_clk_src			0xa3315f1b
+#define clk_mclk0_clk_src			0x266b3853
+#define clk_mclk1_clk_src			0xa73cad0c
+#define clk_mclk2_clk_src			0x42545468
+#define clk_mclk3_clk_src			0x2bfbb714
+#define clk_csiphy_clk_src			0x8cceb70a
+#define clk_csi0phytimer_clk_src		0xc8a309be
+#define clk_csi1phytimer_clk_src		0x7c0fe23a
+#define clk_csi2phytimer_clk_src		0x62ffea9c
+#define clk_ext_byte0_clk_src			0xfb32f31e
+#define clk_ext_byte1_clk_src			0x585ef6d4
+#define clk_byte0_clk_src			0x75cc885b
+#define clk_byte1_clk_src			0x63c2c955
+#define clk_dp_aux_clk_src			0x2b6e972b
+#define clk_dp_gtc_clk_src			0xc5a86a42
+#define clk_esc0_clk_src			0xb41d7c38
+#define clk_esc1_clk_src			0x3b0afa42
+#define clk_extpclk_clk_src			0xb2c31abd
+#define clk_hdmi_clk_src			0xb40aeea9
+#define clk_vsync_clk_src			0xecb43940
+#define clk_mmss_bimc_smmu_ahb_clk		0x4825baf4
+#define clk_mmss_bimc_smmu_axi_clk		0xc365ac39
+#define clk_mmss_snoc_dvm_axi_clk		0x2c159a11
+#define clk_mmss_camss_ahb_clk			0xa51f2c1d
+#define clk_mmss_camss_cci_ahb_clk		0xfda8bb6a
+#define clk_mmss_camss_cci_clk			0x71bb5c97
+#define clk_mmss_camss_cpp_ahb_clk		0xd5554f15
+#define clk_mmss_camss_cpp_clk			0x8e99ef57
+#define clk_mmss_camss_cpp_axi_clk		0xd84e390b
+#define clk_mmss_camss_cpp_vbif_ahb_clk		0x1b33a88e
+#define clk_mmss_camss_cphy_csid0_clk		0x56114361
+#define clk_mmss_camss_csi0_ahb_clk		0x2b58d241
+#define clk_mmss_camss_csi0_clk			0xccfe39ef
+#define clk_mmss_camss_csi0pix_clk		0x9e26509d
+#define clk_mmss_camss_csi0rdi_clk		0x01d5bf83
+#define clk_mmss_camss_cphy_csid1_clk		0x79fbcd8a
+#define clk_mmss_camss_csi1_ahb_clk		0x7073244b
+#define clk_mmss_camss_csi1_clk			0x3eeeaac0
+#define clk_mmss_camss_csi1pix_clk		0xf1375139
+#define clk_mmss_camss_csi1rdi_clk		0x43185024
+#define clk_mmss_camss_cphy_csid2_clk		0xf295e3ef
+#define clk_mmss_camss_csi2_ahb_clk		0x681c1479
+#define clk_mmss_camss_csi2_clk			0x94524569
+#define clk_mmss_camss_csi2pix_clk		0xf4de617d
+#define clk_mmss_camss_csi2rdi_clk		0x4bf01dc5
+#define clk_mmss_camss_cphy_csid3_clk		0x100188e9
+#define clk_mmss_camss_csi3_ahb_clk		0xfae7c29b
+#define clk_mmss_camss_csi3_clk			0x55e4bbae
+#define clk_mmss_camss_csi3pix_clk		0xc166a015
+#define clk_mmss_camss_csi3rdi_clk		0x6983a4cd
+#define clk_mmss_camss_csi_vfe0_clk		0x3b30b798
+#define clk_mmss_camss_csi_vfe1_clk		0xfe729af7
+#define clk_mmss_camss_csiphy0_clk		0x96c81af8
+#define clk_mmss_camss_csiphy1_clk		0xee9ac2bb
+#define clk_mmss_camss_csiphy2_clk		0x3365e70e
+#define clk_mmss_fd_ahb_clk			0x4ff1da4d
+#define clk_mmss_fd_core_clk			0x749e7eb0
+#define clk_mmss_fd_core_uar_clk		0x8ea480c5
+#define clk_mmss_camss_gp0_clk			0x3f7f6c87
+#define clk_mmss_camss_gp1_clk			0xdccdd730
+#define clk_mmss_camss_ispif_ahb_clk		0xbda4f0e3
+#define clk_mmss_camss_jpeg0_clk		0x4cc73b07
+#define clk_mmss_camss_jpeg0_vote_clk		0xc9efa6ac
+#define clk_mmss_camss_jpeg0_dma_vote_clk	0x371ec109
+#define clk_mmss_camss_jpeg_ahb_clk		0xde1fece3
+#define clk_mmss_camss_jpeg_axi_clk		0x7534616b
+#define clk_mmss_camss_mclk0_clk		0x056293a7
+#define clk_mmss_camss_mclk1_clk		0x96c7b69b
+#define clk_mmss_camss_mclk2_clk		0x8820556e
+#define clk_mmss_camss_mclk3_clk		0xf90ffb67
+#define clk_mmss_camss_micro_ahb_clk		0x6c6fd3c7
+#define clk_mmss_camss_csi0phytimer_clk		0x7a78864e
+#define clk_mmss_camss_csi1phytimer_clk		0x6e6c1de5
+#define clk_mmss_camss_csi2phytimer_clk		0x0235e2de
+#define clk_mmss_camss_top_ahb_clk		0x120618d6
+#define clk_mmss_camss_vfe0_ahb_clk		0x137bd0bd
+#define clk_mmss_camss_vfe0_clk			0xead28288
+#define clk_mmss_camss_vfe0_stream_clk		0xa0428287
+#define clk_mmss_camss_vfe1_ahb_clk		0xac0154c0
+#define clk_mmss_camss_vfe1_clk			0xc216b14d
+#define clk_mmss_camss_vfe1_stream_clk		0x745af3b6
+#define clk_mmss_camss_vfe_vbif_ahb_clk		0x0109a9c6
+#define clk_mmss_camss_vfe_vbif_axi_clk		0xe626d8a1
+#define clk_mmss_mdss_ahb_clk			0x85d37ab5
+#define clk_mmss_mdss_axi_clk			0xdf04fc1d
+#define clk_mmss_mdss_byte0_clk			0x38105d25
+#define clk_mmss_mdss_byte0_intf_clk		0x38e5aa79
+#define clk_mmss_mdss_byte0_intf_div_clk	0x8604f181
+#define clk_mmss_mdss_byte1_clk			0xe0c21354
+#define clk_mmss_mdss_byte1_intf_clk		0xcf654d8e
+#define clk_mmss_mdss_byte1_intf_div_clk	0xcdf334c5
+#define clk_mmss_mdss_dp_aux_clk		0x23125eb6
+#define clk_mmss_mdss_dp_crypto_clk		0x9a072d4e
+#define clk_mmss_mdss_dp_link_clk		0x8dd302d1
+#define clk_mmss_mdss_dp_link_intf_clk		0x70e386e6
+#define clk_mmss_mdss_dp_pixel_clk		0xb707b765
+#define clk_mmss_mdss_dp_gtc_clk		0xb59c151a
+#define clk_mmss_mdss_esc0_clk			0x5721ff83
+#define clk_mmss_mdss_esc1_clk			0xc3d0376b
+#define clk_mmss_mdss_extpclk_clk		0x74d5a954
+#define clk_mmss_mdss_hdmi_clk			0x28460a6d
+#define clk_mmss_mdss_hdmi_dp_ahb_clk		0x5448519f
+#define clk_mmss_mdss_mdp_clk			0x43539b0e
+#define clk_mmss_mdss_mdp_lut_clk		0x00627b2b
+#define clk_mmss_mdss_pclk0_clk			0xcc0e909d
+#define clk_mmss_mdss_pclk1_clk			0x850d9146
+#define clk_mmss_mdss_rot_clk			0xbb7e71c4
+#define clk_mmss_mdss_vsync_clk			0x629b36dc
+#define clk_mmss_misc_ahb_clk			0xea30b0e7
+#define clk_mmss_misc_cxo_clk			0xe620cd80
+#define clk_mmss_mnoc_ahb_clk			0x49a394f4
+#define clk_mmss_mnoc_maxi_clk			0xd8b7278f
+#define clk_mmss_video_subcore0_clk		0x23fae359
+#define clk_mmss_video_subcore1_clk		0x5213a0c7
+#define clk_mmss_video_ahb_clk			0x94334ae9
+#define clk_mmss_video_axi_clk			0xf3178ba5
+#define clk_mmss_video_core_clk			0x78f14c85
+#define clk_mmss_video_maxi_clk			0x1785ef88
+#define clk_mmss_vmem_ahb_clk			0x4b18955b
+#define clk_mmss_vmem_maxi_clk			0xb6067889
+#define clk_mmss_debug_mux			0xe646ffda
+
+/* external multimedia clocks */
+#define clk_dsi0pll_byteclk_mux			0xecf2c434
+#define clk_dsi0pll_byteclk_src			0x6f6f740f
+#define clk_dsi0pll_pclk_mux			0x6c9da335
+#define clk_dsi0pll_pclk_src			0x5efd85d4
+#define clk_dsi0pll_pclk_src_mux		0x84b14663
+#define clk_dsi0pll_post_bit_div		0xf46dcf27
+#define clk_dsi0pll_pll_out_div1		0xeda5b7fe
+#define clk_dsi0pll_pll_out_div2		0x97fa476d
+#define clk_dsi0pll_pll_out_div4		0x90a98ce0
+#define clk_dsi0pll_pll_out_div8		0x9d9d85cf
+#define clk_dsi0pll_pll_out_mux			0x179c27ca
+#define clk_dsi0pll_post_vco_mux		0xfaf9bd1f
+#define clk_dsi0pll_post_vco_div1		0xabb50b2a
+#define clk_dsi0pll_post_vco_div4		0xbe51c091
+#define clk_dsi0pll_bitclk_src			0x36c3c437
+#define clk_dsi0pll_vco_clk			0x15940d40
+
+#define clk_dsi1pll_byteclk_mux			0x14e2f38f
+#define clk_dsi1pll_byteclk_src			0x4b65c298
+#define clk_dsi1pll_pclk_mux			0x4c0518b5
+#define clk_dsi1pll_pclk_src			0xeddcd80e
+#define clk_dsi1pll_pclk_src_mux		0x3651feb3
+#define clk_dsi1pll_post_bit_div		0x712f0260
+#define clk_dsi1pll_pll_out_div8		0x87628ddb
+#define clk_dsi1pll_pll_out_div4		0x0d9a384b
+#define clk_dsi1pll_pll_out_div2		0x0c9b5748
+#define clk_dsi1pll_pll_out_div1		0x3193164e
+#define clk_dsi1pll_pll_out_mux			0x171bf8fd
+#define clk_dsi1pll_post_vco_mux		0xc6a90d20
+#define clk_dsi1pll_post_vco_div1		0x6f47ca7d
+#define clk_dsi1pll_post_vco_div4		0x90628974
+#define clk_dsi1pll_bitclk_src			0x13ab045b
+#define clk_dsi1pll_vco_clk			0x99797b50
+
+#define clk_dp_vco_clk				0xfcaaeec7
+#define clk_dp_link_2x_clk_divsel_five		0xcfe3f5dd
+#define clk_vco_divsel_four_clk_src		0xe0da19c0
+#define clk_vco_divsel_two_clk_src		0xb5cfc6a8
+#define clk_vco_divided_clk_src_mux		0x3f8197c2
+#define clk_hdmi_vco_clk			0xbb7dc20d
+
+/* clock_gpu controlled clocks*/
+#define clk_gpucc_xo				0xc4e1a890
+#define clk_gpucc_gpll0				0x0db0e37f
+#define clk_gfx3d_clk_src			0x917f76ef
+#define clk_rbbmtimer_clk_src			0x17649ecc
+#define clk_gfx3d_isense_clk_src		0xecc3eafa
+#define clk_rbcpr_clk_src			0x2c2e9af2
+#define clk_gpu_debug_div_clk			0x75d6f53f
+#define clk_gpucc_gfx3d_clk			0x95f01bd5
+#define clk_gpucc_rbbmtimer_clk			0x58a0a7ca
+#define clk_gpucc_gfx3d_isense_clk		0xb2678e80
+#define clk_gpucc_cxo_clk			0x6532dcae
+#define clk_gpucc_rbcpr_clk			0x7bd750e8
+#define clk_gpu_pll0_pll			0x0e61ab4d
+#define clk_gpu_pll0_pll_out_even		0xb0ed5009
+#define clk_gpu_pll0_pll_out_odd		0x08c5a8a5
+#define clk_gpu_pll0_postdiv_clk		0x76c19f3c
+#define clk_gpucc_mx_clk			0x1edbb879
+#define clk_gpucc_gcc_dbg_clk			0x9ae8cd3c
+#define clk_gfxcc_dbg_clk			0x3ed47625
+
+/* CPU clocks */
+#define clk_pwrcl_clk				0xc554130e
+#define clk_perfcl_clk				0x58869997
+#define clk_sys_apcsaux_clk_gcc			0xf905e862
+#define clk_xo_ao				0x428c856d
+#define clk_osm_clk_src				0xaabe68c3
+#define clk_cpu_debug_mux			0x3ae8bcb2
+
+/* Audio External Clocks */
+#define clk_audio_ap_clk			0x9b5727cb
+#define clk_audio_pmi_clk			0xcbfe416d
+#define clk_audio_ap_clk2			0x454d1e91
+
+/* GCC block resets */
+#define QUSB2PHY_PRIM_BCR			0
+#define QUSB2PHY_SEC_BCR			1
+#define BLSP1_BCR				2
+#define BLSP2_BCR				3
+#define BOOT_ROM_BCR				4
+#define PRNG_BCR				5
+#define UFS_BCR					6
+#define USB_30_BCR				7
+#define USB3_PHY_BCR				8
+#define USB3PHY_PHY_BCR				9
+#define PCIE_0_PHY_BCR				10
+#define PCIE_PHY_BCR				11
+#define PCIE_PHY_COM_BCR			12
+#define PCIE_PHY_NOCSR_COM_PHY_BCR		13
+
+/* MMSS block resets */
+#define CAMSS_MICRO_BCR				0
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/dt-bindings/clock/msm-clocks-hwio-8998.h	2019-01-22 16:16:28.171288678 +0100
@@ -0,0 +1,395 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define CLKFLAG_NO_RATE_CACHE					0x00004000
+
+#define FMAX_LOWER						0
+#define FMAX_LOW						1
+#define FMAX_NOM						2
+#define FMAX_TURBO						3
+
+#define HALT_CHECK_DELAY					5
+
+#define RPM_MISC_CLK_TYPE					0x306b6c63
+#define RPM_BUS_CLK_TYPE					0x316b6c63
+#define RPM_MEM_CLK_TYPE					0x326b6c63
+#define RPM_IPA_CLK_TYPE					0x617069
+#define RPM_CE_CLK_TYPE						0x6563
+#define RPM_AGGR_CLK_TYPE					0x72676761
+#define RPM_SMD_KEY_ENABLE					0x62616E45
+#define RPM_MMAXI_CLK_TYPE					0x69786d6d
+
+#define CXO_CLK_SRC_ID						0x0
+#define QDSS_CLK_ID						0x1
+#define SNOC_CLK_ID						0x1
+#define CNOC_CLK_ID						0x2
+#define CNOC_PERIPH_CLK_ID					0x0
+#define BIMC_CLK_ID						0x0
+#define IPA_CLK_ID						0x0
+#define CE1_CLK_ID						0x0
+#define RF_CLK1_ID						0x4
+#define RF_CLK2_ID						0x5
+#define RF_CLK3_ID						0x6
+#define LN_BB_CLK1_ID						0x1
+#define LN_BB_CLK2_ID						0x2
+#define LN_BB_CLK3_ID						0x3
+#define DIV_CLK1_ID						0xb
+#define DIV_CLK2_ID						0xc
+#define DIV_CLK3_ID						0xd
+#define RF_CLK1_PIN_ID						0x4
+#define RF_CLK2_PIN_ID						0x5
+#define RF_CLK3_PIN_ID						0x6
+#define LN_BB_CLK1_PIN_ID					0x1
+#define LN_BB_CLK2_PIN_ID					0x2
+#define LN_BB_CLK3_PIN_ID					0x3
+#define AGGR1_NOC_ID						0x1
+#define AGGR2_NOC_ID						0x2
+#define MMSSNOC_AXI_CLK_ID					0x0
+
+#define APCS_COMMON_LMH_CMD_RCGR				0x0012C
+
+#define GCC_APCS_GPLL_ENA_VOTE					0x52000
+#define GCC_APCS_CLOCK_BRANCH_ENA_VOTE				0x52004
+#define GCC_APCS_CLOCK_BRANCH_ENA_VOTE_1			0x5200C
+#define PLLTEST_PAD_CFG						0x6200C
+#define GCC_XO_DIV4_CBCR				        0x43008
+#define CLOCK_FRQ_MEASURE_CTL				        0x62004
+#define CLOCK_FRQ_MEASURE_STATUS			        0x62008
+#define GCC_GPLL0_MODE						0x00000
+#define GCC_GPLL4_MODE						0x77000
+#define GCC_USB30_MASTER_CMD_RCGR				0x0F014
+#define GCC_PCIE_AUX_CMD_RCGR					0x6C000
+#define GCC_UFS_AXI_CMD_RCGR					0x75018
+#define GCC_BLSP1_QUP1_I2C_APPS_CMD_RCGR			0x19020
+#define GCC_BLSP1_QUP1_SPI_APPS_CMD_RCGR			0x1900C
+#define GCC_BLSP1_QUP2_I2C_APPS_CMD_RCGR			0x1B020
+#define GCC_BLSP1_QUP2_SPI_APPS_CMD_RCGR			0x1B00C
+#define GCC_BLSP1_QUP3_I2C_APPS_CMD_RCGR			0x1D020
+#define GCC_BLSP1_QUP3_SPI_APPS_CMD_RCGR			0x1D00C
+#define GCC_BLSP1_QUP4_I2C_APPS_CMD_RCGR			0x1F020
+#define GCC_BLSP1_QUP4_SPI_APPS_CMD_RCGR			0x1F00C
+#define GCC_BLSP1_QUP5_I2C_APPS_CMD_RCGR			0x21020
+#define GCC_BLSP1_QUP5_SPI_APPS_CMD_RCGR			0x2100C
+#define GCC_BLSP1_QUP6_I2C_APPS_CMD_RCGR			0x23020
+#define GCC_BLSP1_QUP6_SPI_APPS_CMD_RCGR			0x2300C
+#define GCC_BLSP1_UART1_APPS_CMD_RCGR				0x1A00C
+#define GCC_BLSP1_UART2_APPS_CMD_RCGR				0x1C00C
+#define GCC_BLSP1_UART3_APPS_CMD_RCGR				0x1E00C
+#define GCC_BLSP2_QUP1_I2C_APPS_CMD_RCGR			0x26020
+#define GCC_BLSP2_QUP2_I2C_APPS_CMD_RCGR			0x28020
+#define GCC_BLSP2_QUP3_I2C_APPS_CMD_RCGR			0x2A020
+#define GCC_BLSP2_QUP4_I2C_APPS_CMD_RCGR			0x2C020
+#define GCC_BLSP2_QUP5_I2C_APPS_CMD_RCGR			0x2E020
+#define GCC_BLSP2_QUP6_I2C_APPS_CMD_RCGR			0x30020
+#define GCC_BLSP2_QUP1_SPI_APPS_CMD_RCGR			0x2600C
+#define GCC_BLSP2_QUP2_SPI_APPS_CMD_RCGR			0x2800C
+#define GCC_BLSP2_QUP3_SPI_APPS_CMD_RCGR			0x2A00C
+#define GCC_BLSP2_QUP4_SPI_APPS_CMD_RCGR			0x2C00C
+#define GCC_BLSP2_QUP5_SPI_APPS_CMD_RCGR			0x2E00C
+#define GCC_BLSP2_QUP6_SPI_APPS_CMD_RCGR			0x3000C
+#define GCC_BLSP2_UART1_APPS_CMD_RCGR				0x2700C
+#define GCC_BLSP2_UART2_APPS_CMD_RCGR				0x2900C
+#define GCC_BLSP2_UART3_APPS_CMD_RCGR				0x2B00C
+#define GCC_GP1_CMD_RCGR					0x64004
+#define GCC_GP2_CMD_RCGR					0x65004
+#define GCC_GP3_CMD_RCGR					0x66004
+#define GCC_HMSS_RBCPR_CMD_RCGR					0x48044
+#define GCC_PDM2_CMD_RCGR					0x33010
+#define GCC_SDCC2_APPS_CMD_RCGR					0x14010
+#define GCC_SDCC4_APPS_CMD_RCGR					0x16010
+#define GCC_TSIF_REF_CMD_RCGR					0x36010
+#define GCC_UFS_ICE_CORE_CMD_RCGR				0x76010
+#define GCC_UFS_PHY_AUX_CMD_RCGR				0x76044
+#define GCC_UFS_UNIPRO_CORE_CMD_RCGR				0x76028
+#define GCC_USB30_MOCK_UTMI_CMD_RCGR				0x0F028
+#define GCC_USB3_PHY_AUX_CMD_RCGR				0x5000C
+#define GCC_QSPI_REF_CMD_RCGR					0x9000C
+#define GCC_PCIE_0_PHY_BCR					0x6C01C
+#define GCC_HDMI_CLKREF_EN					0x88000
+#define GCC_UFS_CLKREF_EN					0x88004
+#define GCC_USB3_CLKREF_EN					0x88008
+#define GCC_PCIE_CLKREF_EN					0x8800C
+#define GCC_RX1_USB2_CLKREF_EN					0x88014
+#define GCC_USB3_PHY_BCR					0x50020
+#define GCC_AGGRE1_NOC_XO_CBCR					0x8202C
+#define GCC_AGGRE1_UFS_AXI_CBCR					0x82028
+#define GCC_AGGRE1_USB3_AXI_CBCR				0x82024
+#define GCC_BIMC_MSS_Q6_AXI_CBCR				0x4401C
+#define GCC_BLSP1_AHB_CBCR					0x17004
+#define GCC_BLSP1_BCR						0x17000
+#define GCC_BLSP1_QUP1_SPI_APPS_CBCR				0x19004
+#define GCC_BLSP1_QUP1_I2C_APPS_CBCR				0x19008
+#define GCC_BLSP1_QUP2_SPI_APPS_CBCR				0x1B004
+#define GCC_BLSP1_QUP2_I2C_APPS_CBCR				0x1B008
+#define GCC_BLSP1_QUP3_SPI_APPS_CBCR				0x1D004
+#define GCC_BLSP1_QUP3_I2C_APPS_CBCR				0x1D008
+#define GCC_BLSP1_QUP4_SPI_APPS_CBCR				0x1F004
+#define GCC_BLSP1_QUP4_I2C_APPS_CBCR				0x1F008
+#define GCC_BLSP1_QUP5_SPI_APPS_CBCR				0x21004
+#define GCC_BLSP1_QUP5_I2C_APPS_CBCR				0x21008
+#define GCC_BLSP1_QUP6_SPI_APPS_CBCR				0x23004
+#define GCC_BLSP1_QUP6_I2C_APPS_CBCR				0x23008
+#define GCC_BLSP1_UART1_APPS_CBCR				0x1A004
+#define GCC_BLSP1_UART2_APPS_CBCR				0x1C004
+#define GCC_BLSP1_UART3_APPS_CBCR				0x1E004
+#define GCC_BLSP2_AHB_CBCR					0x25004
+#define GCC_BLSP2_BCR						0x25000
+#define GCC_BLSP2_QUP1_SPI_APPS_CBCR				0x26004
+#define GCC_BLSP2_QUP1_I2C_APPS_CBCR				0x26008
+#define GCC_BLSP2_QUP2_I2C_APPS_CBCR				0x28008
+#define GCC_BLSP2_QUP2_SPI_APPS_CBCR				0x28004
+#define GCC_BLSP2_QUP3_SPI_APPS_CBCR				0x2A004
+#define GCC_BLSP2_QUP3_I2C_APPS_CBCR				0x2A008
+#define GCC_BLSP2_QUP4_SPI_APPS_CBCR				0x2C004
+#define GCC_BLSP2_QUP4_I2C_APPS_CBCR				0x2C008
+#define GCC_BLSP2_QUP5_SPI_APPS_CBCR				0x2E004
+#define GCC_BLSP2_QUP5_I2C_APPS_CBCR				0x2E008
+#define GCC_BLSP2_QUP6_SPI_APPS_CBCR				0x30004
+#define GCC_BLSP2_QUP6_I2C_APPS_CBCR				0x30008
+#define GCC_BLSP2_UART1_APPS_CBCR				0x27004
+#define GCC_BLSP2_UART2_APPS_CBCR				0x29004
+#define GCC_BLSP2_UART3_APPS_CBCR				0x2B004
+#define GCC_BOOT_ROM_AHB_CBCR					0x38004
+#define GCC_BOOT_ROM_BCR					0x38000
+#define GCC_CFG_NOC_USB3_AXI_CBCR				0x05018
+#define GCC_BIMC_GFX_CBCR					0x46040
+#define GCC_GP1_CBCR						0x64000
+#define GCC_GP2_CBCR						0x65000
+#define GCC_GP3_CBCR						0x66000
+#define GCC_GPU_BIMC_GFX_CBCR					0x71010
+#define GCC_GPU_CFG_AHB_CBCR					0x71004
+#define GCC_GPU_IREF_EN						0x88010
+#define GCC_HMSS_DVM_BUS_CBCR					0x4808C
+#define GCC_HMSS_RBCPR_CBCR					0x48008
+#define GCC_MMSS_SYS_NOC_AXI_CBCR				0x09000
+#define GCC_MMSS_NOC_CFG_AHB_CBCR				0x09004
+#define GCC_PCIE_0_SLV_AXI_CBCR					0x6B008
+#define GCC_PCIE_0_MSTR_AXI_CBCR				0x6B00C
+#define GCC_PCIE_0_CFG_AHB_CBCR					0x6B010
+#define GCC_PCIE_0_AUX_CBCR					0x6B014
+#define GCC_PCIE_0_PIPE_CBCR					0x6B018
+#define GCC_PCIE_PHY_AUX_CBCR					0x6F004
+#define GCC_PCIE_PHY_BCR					0x6F000
+#define GCC_PCIE_PHY_COM_BCR					0x6F014
+#define GCC_PCIE_PHY_NOCSR_COM_PHY_BCR				0x6F00C
+#define GCC_QUSB2PHY_PRIM_BCR					0x12000
+#define GCC_QUSB2PHY_SEC_BCR					0x12004
+#define GCC_PDM2_CBCR						0x3300C
+#define GCC_PDM_AHB_CBCR					0x33004
+#define GCC_PRNG_AHB_CBCR					0x34004
+#define GCC_PRNG_BCR						0x34000
+#define GCC_SDCC2_APPS_CBCR					0x14004
+#define GCC_SDCC2_AHB_CBCR					0x14008
+#define GCC_SDCC4_APPS_CBCR					0x16004
+#define GCC_SDCC4_AHB_CBCR					0x16008
+#define GCC_TSIF_AHB_CBCR					0x36004
+#define GCC_TSIF_REF_CBCR					0x36008
+#define GCC_UFS_AXI_CBCR					0x75008
+#define GCC_UFS_BCR						0x75000
+#define GCC_UFS_AHB_CBCR					0x7500C
+#define GCC_UFS_TX_SYMBOL_0_CBCR				0x75010
+#define GCC_UFS_RX_SYMBOL_0_CBCR				0x75014
+#define GCC_UFS_RX_SYMBOL_1_CBCR				0x7605C
+#define GCC_UFS_UNIPRO_CORE_CBCR				0x76008
+#define GCC_UFS_ICE_CORE_CBCR					0x7600C
+#define GCC_UFS_PHY_AUX_CBCR					0x76040
+#define GCC_USB30_MASTER_CBCR					0x0F008
+#define GCC_USB30_SLEEP_CBCR					0x0F00C
+#define GCC_USB30_MOCK_UTMI_CBCR				0x0F010
+#define GCC_USB_30_BCR						0x0F000
+#define GCC_USB3_PHY_AUX_CBCR					0x50000
+#define GCC_USB3_PHY_PIPE_CBCR					0x50004
+#define GCC_USB3PHY_PHY_BCR					0x50024
+#define GCC_APCS_CLOCK_SLEEP_ENA_VOTE				0x52008
+#define GCC_MSS_CFG_AHB_CBCR					0x8A000
+#define GCC_MSS_Q6_BIMC_AXI_CBCR				0x8A040
+#define GCC_MSS_MNOC_BIMC_AXI_CBCR				0x8A004
+#define GCC_MSS_SNOC_AXI_CBCR					0x8A03C
+#define GCC_HMSS_GPLL0_CMD_RCGR					0x4805C
+#define GCC_MSS_CFG_AHB_CBCR					0x8A000
+#define GCC_MSS_Q6_BIMC_AXI_CBCR				0x8A040
+#define GCC_MSS_MNOC_BIMC_AXI_CBCR				0x8A004
+#define GCC_MSS_SNOC_AXI_CBCR					0x8A03C
+#define GCC_DCC_AHB_CBCR					0x84004
+#define GCC_HLOS1_VOTE_LPASS_CORE_SMMU_CBCR			0x7D010
+#define GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CBCR			0x7D014
+#define GCC_QSPI_AHB_CBCR					0x90004
+#define GCC_QSPI_REF_CBCR					0x90008
+#define GCC_MMSS_MISC						0x0902C
+#define GCC_GPU_MISC						0x71028
+
+#define GPUCC_GPU_PLL0_PLL_MODE					0x00000
+#define GPUCC_GPU_PLL0_USER_CTL_MODE				0x0000C
+#define GPUCC_GFX3D_CMD_RCGR					0x01070
+#define GPUCC_RBBMTIMER_CMD_RCGR				0x010B0
+#define GPUCC_GFX3D_ISENSE_CMD_RCGR				0x01100
+#define GPUCC_RBCPR_CMD_RCGR					0x01030
+#define GPUCC_GFX3D_CBCR					0x01098
+#define GPUCC_RBBMTIMER_CBCR					0x010D0
+#define GPUCC_GFX3D_ISENSE_CBCR					0x01124
+#define GPUCC_CXO_CBCR						0x01020
+#define GPUCC_RBCPR_CBCR					0x01054
+#define GPU_GX_BCR						0x01090
+#define GPUCC_GX_DOMAIN_MISC					0x00130
+#define GPUCC_GPU_DD_WRAP_CTRL					0x00430
+#define GPUCC_DEBUG_CLK_CTL					0x00120
+
+#define MMSS_PLL_VOTE_APCS					0x001E0
+#define MMSS_MMPLL0_PLL_MODE					0x0C000
+#define MMSS_MMPLL1_PLL_MODE					0x0C050
+#define MMSS_MMPLL3_PLL_MODE					0x00000
+#define MMSS_MMPLL4_PLL_MODE					0x00050
+#define MMSS_MMPLL5_PLL_MODE					0x000A0
+#define MMSS_MMPLL6_PLL_MODE					0x000F0
+#define MMSS_MMPLL7_PLL_MODE					0x00140
+#define MMSS_MMPLL10_PLL_MODE					0x00190
+#define MMSS_AHB_CMD_RCGR					0x05000
+#define MMSS_CSI0_CMD_RCGR					0x03090
+#define MMSS_VFE0_CMD_RCGR					0x03600
+#define MMSS_VFE1_CMD_RCGR					0x03620
+#define MMSS_MDP_CMD_RCGR					0x02040
+#define MMSS_MAXI_CMD_RCGR					0x0F020
+#define MMSS_CPP_CMD_RCGR					0x03640
+#define MMSS_JPEG0_CMD_RCGR					0x03500
+#define MMSS_ROT_CMD_RCGR					0x021A0
+#define MMSS_VIDEO_CORE_CMD_RCGR				0x01000
+#define MMSS_CSI1_CMD_RCGR					0x03100
+#define MMSS_CSI2_CMD_RCGR					0x03160
+#define MMSS_CSI3_CMD_RCGR					0x031C0
+#define MMSS_FD_CORE_CMD_RCGR					0x03B00
+#define MMSS_BYTE0_CMD_RCGR					0x02120
+#define MMSS_BYTE1_CMD_RCGR					0x02140
+#define MMSS_PCLK0_CMD_RCGR					0x02000
+#define MMSS_PCLK1_CMD_RCGR					0x02020
+#define MMSS_VIDEO_SUBCORE0_CMD_RCGR				0x01060
+#define MMSS_VIDEO_SUBCORE1_CMD_RCGR				0x01080
+#define MMSS_CSIPHY_CMD_RCGR					0x03800
+#define MMSS_CCI_CMD_RCGR					0x03300
+#define MMSS_CAMSS_GP0_CMD_RCGR					0x03420
+#define MMSS_CAMSS_GP1_CMD_RCGR					0x03450
+#define MMSS_MCLK0_CMD_RCGR					0x03360
+#define MMSS_MCLK1_CMD_RCGR					0x03390
+#define MMSS_MCLK2_CMD_RCGR					0x033C0
+#define MMSS_MCLK3_CMD_RCGR					0x033F0
+#define MMSS_CAMSS_CSI2PHYTIMER_CBCR				0x03084
+#define MMSS_CSI0PHYTIMER_CMD_RCGR				0x03000
+#define MMSS_CSI1PHYTIMER_CMD_RCGR				0x03030
+#define MMSS_CSI2PHYTIMER_CMD_RCGR				0x03060
+#define MMSS_DP_GTC_CMD_RCGR					0x02280
+#define MMSS_ESC0_CMD_RCGR					0x02160
+#define MMSS_ESC1_CMD_RCGR					0x02180
+#define MMSS_EXTPCLK_CMD_RCGR					0x02060
+#define MMSS_HDMI_CMD_RCGR					0x02100
+#define MMSS_VSYNC_CMD_RCGR					0x02080
+#define MMSS_BIMC_SMMU_AHB_CBCR					0x0E004
+#define MMSS_BIMC_SMMU_AXI_CBCR					0x0E008
+#define MMSS_SNOC_DVM_AXI_CBCR					0x0E040
+#define MMSS_CAMSS_AHB_CBCR					0x0348C
+#define MMSS_CAMSS_CCI_AHB_CBCR					0x03348
+#define MMSS_CAMSS_CCI_CBCR					0x03344
+#define MMSS_CAMSS_CPP_AHB_CBCR					0x036B4
+#define MMSS_CAMSS_CPP_CBCR					0x036B0
+#define MMSS_CAMSS_CPP_AXI_CBCR					0x036C4
+#define MMSS_CAMSS_CPP_VBIF_AHB_CBCR				0x036C8
+#define MMSS_CAMSS_CPHY_CSID0_CBCR				0x03730
+#define MMSS_CAMSS_CSI0_AHB_CBCR				0x030BC
+#define MMSS_CAMSS_CSI0_CBCR					0x030B4
+#define MMSS_CAMSS_CSI0PIX_CBCR					0x030E4
+#define MMSS_CAMSS_CSI0RDI_CBCR					0x030D4
+#define MMSS_CAMSS_CPHY_CSID1_CBCR				0x03734
+#define MMSS_CAMSS_CSI1_AHB_CBCR				0x03128
+#define MMSS_CAMSS_CSI1_CBCR					0x03124
+#define MMSS_CAMSS_CSI1PIX_CBCR					0x03154
+#define MMSS_CAMSS_CSI1RDI_CBCR					0x03144
+#define MMSS_CAMSS_CPHY_CSID2_CBCR				0x03738
+#define MMSS_CAMSS_CSI2_AHB_CBCR				0x03188
+#define MMSS_CAMSS_CSI2_CBCR					0x03184
+#define MMSS_CAMSS_CSI2PIX_CBCR					0x031B4
+#define MMSS_CAMSS_CSI2RDI_CBCR					0x031A4
+#define MMSS_CAMSS_CPHY_CSID3_CBCR				0x0373C
+#define MMSS_CAMSS_CSI3_AHB_CBCR				0x031E8
+#define MMSS_CAMSS_CSI3_CBCR					0x031E4
+#define MMSS_CAMSS_CSI3PIX_CBCR					0x03214
+#define MMSS_CAMSS_CSI3RDI_CBCR					0x03204
+#define MMSS_CAMSS_CSI_VFE0_CBCR				0x03704
+#define MMSS_CAMSS_CSI_VFE1_CBCR				0x03714
+#define MMSS_CAMSS_CSIPHY0_CBCR					0x03740
+#define MMSS_CAMSS_CSIPHY1_CBCR					0x03744
+#define MMSS_CAMSS_CSIPHY2_CBCR					0x03748
+#define MMSS_FD_AHB_CBCR					0x03B74
+#define MMSS_FD_CORE_CBCR					0x03B68
+#define MMSS_FD_CORE_UAR_CBCR					0x03B6C
+#define MMSS_CAMSS_GP0_CBCR					0x03444
+#define MMSS_CAMSS_GP1_CBCR					0x03474
+#define MMSS_CAMSS_ISPIF_AHB_CBCR				0x03224
+#define MMSS_CAMSS_JPEG0_CBCR					0x035A8
+#define MMSS_CAMSS_JPEG_AHB_CBCR				0x035B4
+#define MMSS_CAMSS_JPEG_AXI_CBCR				0x035B8
+#define MMSS_CAMSS_MCLK0_CBCR					0x03384
+#define MMSS_CAMSS_MCLK1_CBCR					0x033B4
+#define MMSS_CAMSS_MCLK2_CBCR					0x033E4
+#define MMSS_CAMSS_MCLK3_CBCR					0x03414
+#define MMSS_CAMSS_MICRO_AHB_CBCR				0x03494
+#define MMSS_CAMSS_CSI0PHYTIMER_CBCR				0x03024
+#define MMSS_CAMSS_CSI1PHYTIMER_CBCR				0x03054
+#define MMSS_CSI2PHYTIMER_CMD_RCGR				0x03060
+#define MMSS_CAMSS_TOP_AHB_CBCR					0x03484
+#define MMSS_CAMSS_VFE0_AHB_CBCR				0x03668
+#define MMSS_CAMSS_VFE0_CBCR					0x036A8
+#define MMSS_CAMSS_VFE0_STREAM_CBCR				0x03720
+#define MMSS_CAMSS_VFE1_AHB_CBCR				0x03678
+#define MMSS_CAMSS_VFE1_CBCR					0x036AC
+#define MMSS_CAMSS_VFE1_STREAM_CBCR				0x03724
+#define MMSS_CAMSS_VFE_VBIF_AHB_CBCR				0x036B8
+#define MMSS_CAMSS_VFE_VBIF_AXI_CBCR				0x036BC
+#define MMSS_MDSS_AHB_CBCR					0x02308
+#define MMSS_MDSS_AXI_CBCR					0x02310
+#define MMSS_MDSS_BYTE0_CBCR					0x0233C
+#define MMSS_MDSS_BYTE0_INTF_CBCR				0x02374
+#define MMSS_MDSS_BYTE0_INTF_DIV				0x0237C
+#define MMSS_MDSS_BYTE1_CBCR					0x02340
+#define MMSS_MDSS_BYTE1_INTF_CBCR				0x02378
+#define MMSS_MDSS_BYTE1_INTF_DIV				0x02380
+#define MMSS_MDSS_DP_AUX_CBCR					0x02364
+#define MMSS_MDSS_DP_CRYPTO_CBCR				0x0235C
+#define MMSS_MDSS_DP_GTC_CBCR					0x02368
+#define MMSS_MDSS_DP_LINK_CBCR					0x02354
+#define MMSS_MDSS_DP_LINK_INTF_CBCR				0x02358
+#define MMSS_MDSS_DP_PIXEL_CBCR					0x02360
+#define MMSS_MDSS_ESC0_CBCR					0x02344
+#define MMSS_MDSS_ESC1_CBCR					0x02348
+#define MMSS_MDSS_EXTPCLK_CBCR					0x02324
+#define MMSS_MDSS_HDMI_CBCR					0x02338
+#define MMSS_MDSS_HDMI_DP_AHB_CBCR				0x0230C
+#define MMSS_MDSS_MDP_CBCR					0x0231C
+#define MMSS_MDSS_MDP_LUT_CBCR					0x02320
+#define MMSS_MDSS_PCLK0_CBCR					0x02314
+#define MMSS_MDSS_PCLK1_CBCR					0x02318
+#define MMSS_MDSS_ROT_CBCR					0x02350
+#define MMSS_MDSS_VSYNC_CBCR					0x02328
+#define MMSS_MISC_AHB_CBCR					0x00328
+#define MMSS_MISC_CXO_CBCR					0x00324
+#define MMSS_MNOC_AHB_CBCR					0x05024
+#define MMSS_MNOC_MAXI_CBCR					0x0F004
+#define MMSS_VIDEO_SUBCORE0_CBCR				0x01048
+#define MMSS_VIDEO_SUBCORE1_CBCR				0x0104C
+#define MMSS_VIDEO_AHB_CBCR					0x01030
+#define MMSS_VIDEO_AXI_CBCR					0x01034
+#define MMSS_VIDEO_CORE_CBCR					0x01028
+#define MMSS_VIDEO_MAXI_CBCR					0x01038
+#define MMSS_VMEM_AHB_CBCR					0x0F068
+#define MMSS_VMEM_MAXI_CBCR					0x0F064
+#define MMSS_DP_AUX_CMD_RCGR					0x02260
+#define MMSS_DP_CRYPTO_CMD_RCGR					0x02220
+#define MMSS_DP_LINK_CMD_RCGR					0x02200
+#define MMSS_DP_PIXEL_CMD_RCGR					0x02240
+#define MMSS_DEBUG_CLK_CTL					0x00900
diff -Nruw linux-4.4.115-fbx/include/dt-bindings/msm./msm-bus-ids.h linux-4.4.115-fbx/include/dt-bindings/msm/msm-bus-ids.h
--- linux-4.4.115-fbx/include/dt-bindings/msm./msm-bus-ids.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/dt-bindings/msm/msm-bus-ids.h	2019-01-22 16:16:28.179288750 +0100
@@ -0,0 +1,887 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_BUS_IDS_H
+#define __MSM_BUS_IDS_H
+
+/* Aggregation types */
+#define AGG_SCHEME_NONE	0
+#define AGG_SCHEME_LEG	1
+#define AGG_SCHEME_1	2
+
+/* Topology related enums */
+#define	MSM_BUS_FAB_DEFAULT 0
+#define	MSM_BUS_FAB_APPSS 0
+#define	MSM_BUS_FAB_SYSTEM 1024
+#define	MSM_BUS_FAB_MMSS 2048
+#define	MSM_BUS_FAB_SYSTEM_FPB 3072
+#define	MSM_BUS_FAB_CPSS_FPB 4096
+
+#define	MSM_BUS_FAB_BIMC 0
+#define	MSM_BUS_FAB_SYS_NOC 1024
+#define	MSM_BUS_FAB_MMSS_NOC 2048
+#define	MSM_BUS_FAB_OCMEM_NOC 3072
+#define	MSM_BUS_FAB_PERIPH_NOC 4096
+#define	MSM_BUS_FAB_CONFIG_NOC 5120
+#define	MSM_BUS_FAB_OCMEM_VNOC 6144
+#define	MSM_BUS_FAB_MMSS_AHB 2049
+#define	MSM_BUS_FAB_A0_NOC 6145
+#define	MSM_BUS_FAB_A1_NOC 6146
+#define	MSM_BUS_FAB_A2_NOC 6147
+#define	MSM_BUS_FAB_GNOC 6148
+#define	MSM_BUS_FAB_CR_VIRT 6149
+
+#define	MSM_BUS_MASTER_FIRST 1
+#define	MSM_BUS_MASTER_AMPSS_M0 1
+#define	MSM_BUS_MASTER_AMPSS_M1 2
+#define	MSM_BUS_APPSS_MASTER_FAB_MMSS 3
+#define	MSM_BUS_APPSS_MASTER_FAB_SYSTEM 4
+#define	MSM_BUS_SYSTEM_MASTER_FAB_APPSS 5
+#define	MSM_BUS_MASTER_SPS 6
+#define	MSM_BUS_MASTER_ADM_PORT0 7
+#define	MSM_BUS_MASTER_ADM_PORT1 8
+#define	MSM_BUS_SYSTEM_MASTER_ADM1_PORT0 9
+#define	MSM_BUS_MASTER_ADM1_PORT1 10
+#define	MSM_BUS_MASTER_LPASS_PROC 11
+#define	MSM_BUS_MASTER_MSS_PROCI 12
+#define	MSM_BUS_MASTER_MSS_PROCD 13
+#define	MSM_BUS_MASTER_MSS_MDM_PORT0 14
+#define	MSM_BUS_MASTER_LPASS 15
+#define	MSM_BUS_SYSTEM_MASTER_CPSS_FPB 16
+#define	MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB 17
+#define	MSM_BUS_SYSTEM_MASTER_MMSS_FPB 18
+#define	MSM_BUS_MASTER_ADM1_CI 19
+#define	MSM_BUS_MASTER_ADM0_CI 20
+#define	MSM_BUS_MASTER_MSS_MDM_PORT1 21
+#define	MSM_BUS_MASTER_MDP_PORT0 22
+#define	MSM_BUS_MASTER_MDP_PORT1 23
+#define	MSM_BUS_MMSS_MASTER_ADM1_PORT0 24
+#define	MSM_BUS_MASTER_ROTATOR 25
+#define	MSM_BUS_MASTER_GRAPHICS_3D 26
+#define	MSM_BUS_MASTER_JPEG_DEC 27
+#define	MSM_BUS_MASTER_GRAPHICS_2D_CORE0 28
+#define	MSM_BUS_MASTER_VFE 29
+#define	MSM_BUS_MASTER_VFE0 MSM_BUS_MASTER_VFE
+#define	MSM_BUS_MASTER_VPE 30
+#define	MSM_BUS_MASTER_JPEG_ENC 31
+#define	MSM_BUS_MASTER_GRAPHICS_2D_CORE1 32
+#define	MSM_BUS_MMSS_MASTER_APPS_FAB 33
+#define	MSM_BUS_MASTER_HD_CODEC_PORT0 34
+#define	MSM_BUS_MASTER_HD_CODEC_PORT1 35
+#define	MSM_BUS_MASTER_SPDM 36
+#define	MSM_BUS_MASTER_RPM 37
+#define	MSM_BUS_MASTER_MSS 38
+#define	MSM_BUS_MASTER_RIVA 39
+#define	MSM_BUS_MASTER_SNOC_VMEM 40
+#define	MSM_BUS_MASTER_MSS_SW_PROC 41
+#define	MSM_BUS_MASTER_MSS_FW_PROC 42
+#define	MSM_BUS_MASTER_HMSS 43
+#define	MSM_BUS_MASTER_GSS_NAV 44
+#define	MSM_BUS_MASTER_PCIE 45
+#define	MSM_BUS_MASTER_SATA 46
+#define	MSM_BUS_MASTER_CRYPTO 47
+#define	MSM_BUS_MASTER_VIDEO_CAP 48
+#define	MSM_BUS_MASTER_GRAPHICS_3D_PORT1 49
+#define	MSM_BUS_MASTER_VIDEO_ENC 50
+#define	MSM_BUS_MASTER_VIDEO_DEC 51
+#define	MSM_BUS_MASTER_LPASS_AHB 52
+#define	MSM_BUS_MASTER_QDSS_BAM 53
+#define	MSM_BUS_MASTER_SNOC_CFG 54
+#define	MSM_BUS_MASTER_CRYPTO_CORE0 55
+#define	MSM_BUS_MASTER_CRYPTO_CORE1 56
+#define	MSM_BUS_MASTER_MSS_NAV 57
+#define	MSM_BUS_MASTER_OCMEM_DMA 58
+#define	MSM_BUS_MASTER_WCSS 59
+#define	MSM_BUS_MASTER_QDSS_ETR 60
+#define	MSM_BUS_MASTER_USB3 61
+#define	MSM_BUS_MASTER_JPEG 62
+#define	MSM_BUS_MASTER_VIDEO_P0 63
+#define	MSM_BUS_MASTER_VIDEO_P1 64
+#define	MSM_BUS_MASTER_MSS_PROC 65
+#define	MSM_BUS_MASTER_JPEG_OCMEM 66
+#define	MSM_BUS_MASTER_MDP_OCMEM 67
+#define	MSM_BUS_MASTER_VIDEO_P0_OCMEM 68
+#define	MSM_BUS_MASTER_VIDEO_P1_OCMEM 69
+#define	MSM_BUS_MASTER_VFE_OCMEM 70
+#define	MSM_BUS_MASTER_CNOC_ONOC_CFG 71
+#define	MSM_BUS_MASTER_RPM_INST 72
+#define	MSM_BUS_MASTER_RPM_DATA 73
+#define	MSM_BUS_MASTER_RPM_SYS 74
+#define	MSM_BUS_MASTER_DEHR 75
+#define	MSM_BUS_MASTER_QDSS_DAP 76
+#define	MSM_BUS_MASTER_TIC 77
+#define	MSM_BUS_MASTER_SDCC_1 78
+#define	MSM_BUS_MASTER_SDCC_3 79
+#define	MSM_BUS_MASTER_SDCC_4 80
+#define	MSM_BUS_MASTER_SDCC_2 81
+#define	MSM_BUS_MASTER_TSIF 82
+#define	MSM_BUS_MASTER_BAM_DMA 83
+#define	MSM_BUS_MASTER_BLSP_2 84
+#define	MSM_BUS_MASTER_USB_HSIC 85
+#define	MSM_BUS_MASTER_BLSP_1 86
+#define	MSM_BUS_MASTER_USB_HS 87
+#define	MSM_BUS_MASTER_PNOC_CFG 88
+#define	MSM_BUS_MASTER_V_OCMEM_GFX3D 89
+#define	MSM_BUS_MASTER_IPA 90
+#define	MSM_BUS_MASTER_QPIC 91
+#define	MSM_BUS_MASTER_MDPE 92
+#define	MSM_BUS_MASTER_USB_HS2 93
+#define	MSM_BUS_MASTER_VPU 94
+#define	MSM_BUS_MASTER_UFS 95
+#define	MSM_BUS_MASTER_BCAST 96
+#define	MSM_BUS_MASTER_CRYPTO_CORE2 97
+#define	MSM_BUS_MASTER_EMAC 98
+#define	MSM_BUS_MASTER_VPU_1 99
+#define	MSM_BUS_MASTER_PCIE_1 100
+#define	MSM_BUS_MASTER_USB3_1 101
+#define	MSM_BUS_MASTER_CNOC_MNOC_MMSS_CFG 102
+#define	MSM_BUS_MASTER_CNOC_MNOC_CFG 103
+#define	MSM_BUS_MASTER_TCU_0 104
+#define	MSM_BUS_MASTER_TCU_1 105
+#define	MSM_BUS_MASTER_CPP 106
+#define	MSM_BUS_MASTER_AUDIO 107
+#define	MSM_BUS_MASTER_PCIE_2 108
+#define	MSM_BUS_MASTER_VFE1 109
+#define	MSM_BUS_MASTER_XM_USB_HS1 110
+#define	MSM_BUS_MASTER_PCNOC_BIMC_1 111
+#define	MSM_BUS_MASTER_BIMC_PCNOC   112
+#define	MSM_BUS_MASTER_XI_USB_HSIC  113
+#define	MSM_BUS_MASTER_SGMII	    114
+#define	MSM_BUS_SPMI_FETCHER 115
+#define	MSM_BUS_MASTER_GNOC_BIMC 116
+#define	MSM_BUS_MASTER_CRVIRT_A2NOC 117
+#define	MSM_BUS_MASTER_CNOC_A2NOC 118
+#define	MSM_BUS_MASTER_WLAN 119
+#define	MSM_BUS_MASTER_MSS_CE 120
+#define	MSM_BUS_MASTER_CDSP_PROC 121
+#define	MSM_BUS_MASTER_GNOC_SNOC 122
+#define	MSM_BUS_MASTER_PIMEM 123
+#define	MSM_BUS_MASTER_MASTER_LAST 124
+
+#define	MSM_BUS_SYSTEM_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB
+#define	MSM_BUS_CPSS_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_CPSS_FPB
+
+#define	MSM_BUS_SNOC_MM_INT_0 10000
+#define	MSM_BUS_SNOC_MM_INT_1 10001
+#define	MSM_BUS_SNOC_MM_INT_2 10002
+#define	MSM_BUS_SNOC_MM_INT_BIMC 10003
+#define	MSM_BUS_SNOC_INT_0 10004
+#define	MSM_BUS_SNOC_INT_1 10005
+#define	MSM_BUS_SNOC_INT_BIMC 10006
+#define	MSM_BUS_SNOC_BIMC_0_MAS 10007
+#define	MSM_BUS_SNOC_BIMC_1_MAS 10008
+#define	MSM_BUS_SNOC_QDSS_INT 10009
+#define	MSM_BUS_PNOC_SNOC_MAS 10010
+#define	MSM_BUS_PNOC_SNOC_SLV 10011
+#define	MSM_BUS_PNOC_INT_0 10012
+#define	MSM_BUS_PNOC_INT_1 10013
+#define	MSM_BUS_PNOC_M_0 10014
+#define	MSM_BUS_PNOC_M_1 10015
+#define	MSM_BUS_BIMC_SNOC_MAS 10016
+#define	MSM_BUS_BIMC_SNOC_SLV 10017
+#define	MSM_BUS_PNOC_SLV_0 10018
+#define	MSM_BUS_PNOC_SLV_1 10019
+#define	MSM_BUS_PNOC_SLV_2 10020
+#define	MSM_BUS_PNOC_SLV_3 10021
+#define	MSM_BUS_PNOC_SLV_4 10022
+#define	MSM_BUS_PNOC_SLV_8 10023
+#define	MSM_BUS_PNOC_SLV_9 10024
+#define	MSM_BUS_SNOC_BIMC_0_SLV 10025
+#define	MSM_BUS_SNOC_BIMC_1_SLV 10026
+#define	MSM_BUS_MNOC_BIMC_MAS 10027
+#define	MSM_BUS_MNOC_BIMC_SLV 10028
+#define	MSM_BUS_BIMC_MNOC_MAS 10029
+#define	MSM_BUS_BIMC_MNOC_SLV 10030
+#define	MSM_BUS_SNOC_BIMC_MAS 10031
+#define	MSM_BUS_SNOC_BIMC_SLV 10032
+#define	MSM_BUS_CNOC_SNOC_MAS 10033
+#define	MSM_BUS_CNOC_SNOC_SLV 10034
+#define	MSM_BUS_SNOC_CNOC_MAS 10035
+#define	MSM_BUS_SNOC_CNOC_SLV 10036
+#define	MSM_BUS_OVNOC_SNOC_MAS 10037
+#define	MSM_BUS_OVNOC_SNOC_SLV 10038
+#define	MSM_BUS_SNOC_OVNOC_MAS 10039
+#define	MSM_BUS_SNOC_OVNOC_SLV 10040
+#define	MSM_BUS_SNOC_PNOC_MAS 10041
+#define	MSM_BUS_SNOC_PNOC_SLV 10042
+#define	MSM_BUS_BIMC_INT_APPS_EBI 10043
+#define	MSM_BUS_BIMC_INT_APPS_SNOC 10044
+#define	MSM_BUS_SNOC_BIMC_2_MAS 10045
+#define	MSM_BUS_SNOC_BIMC_2_SLV 10046
+#define	MSM_BUS_PNOC_SLV_5	10047
+#define	MSM_BUS_PNOC_SLV_7	10048
+#define	MSM_BUS_PNOC_INT_2 10049
+#define	MSM_BUS_PNOC_INT_3 10050
+#define	MSM_BUS_PNOC_INT_4 10051
+#define	MSM_BUS_PNOC_INT_5 10052
+#define	MSM_BUS_PNOC_INT_6 10053
+#define	MSM_BUS_PNOC_INT_7 10054
+#define	MSM_BUS_BIMC_SNOC_1_MAS 10055
+#define	MSM_BUS_BIMC_SNOC_1_SLV 10056
+#define	MSM_BUS_PNOC_A1NOC_MAS 10057
+#define	MSM_BUS_PNOC_A1NOC_SLV 10058
+#define	MSM_BUS_CNOC_A1NOC_MAS 10059
+#define	MSM_BUS_A0NOC_SNOC_MAS 10060
+#define	MSM_BUS_A0NOC_SNOC_SLV 10061
+#define	MSM_BUS_A1NOC_SNOC_SLV 10062
+#define	MSM_BUS_A1NOC_SNOC_MAS 10063
+#define	MSM_BUS_A2NOC_SNOC_MAS 10064
+#define	MSM_BUS_A2NOC_SNOC_SLV 10065
+#define	MSM_BUS_SNOC_INT_2 10066
+#define	MSM_BUS_A0NOC_QDSS_INT	10067
+#define	MSM_BUS_INT_LAST 10068
+
+#define	MSM_BUS_INT_TEST_ID	20000
+#define	MSM_BUS_INT_TEST_LAST	20050
+
+#define	MSM_BUS_SLAVE_FIRST 512
+#define	MSM_BUS_SLAVE_EBI_CH0 512
+#define	MSM_BUS_SLAVE_EBI_CH1 513
+#define	MSM_BUS_SLAVE_AMPSS_L2 514
+#define	MSM_BUS_APPSS_SLAVE_FAB_MMSS 515
+#define	MSM_BUS_APPSS_SLAVE_FAB_SYSTEM 516
+#define	MSM_BUS_SYSTEM_SLAVE_FAB_APPS 517
+#define	MSM_BUS_SLAVE_SPS 518
+#define	MSM_BUS_SLAVE_SYSTEM_IMEM 519
+#define	MSM_BUS_SLAVE_AMPSS 520
+#define	MSM_BUS_SLAVE_MSS 521
+#define	MSM_BUS_SLAVE_LPASS 522
+#define	MSM_BUS_SYSTEM_SLAVE_CPSS_FPB 523
+#define	MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB 524
+#define	MSM_BUS_SYSTEM_SLAVE_MMSS_FPB 525
+#define	MSM_BUS_SLAVE_CORESIGHT 526
+#define	MSM_BUS_SLAVE_RIVA 527
+#define	MSM_BUS_SLAVE_SMI 528
+#define	MSM_BUS_MMSS_SLAVE_FAB_APPS 529
+#define	MSM_BUS_MMSS_SLAVE_FAB_APPS_1 530
+#define	MSM_BUS_SLAVE_MM_IMEM 531
+#define	MSM_BUS_SLAVE_CRYPTO 532
+#define	MSM_BUS_SLAVE_SPDM 533
+#define	MSM_BUS_SLAVE_RPM 534
+#define	MSM_BUS_SLAVE_RPM_MSG_RAM 535
+#define	MSM_BUS_SLAVE_MPM 536
+#define	MSM_BUS_SLAVE_PMIC1_SSBI1_A 537
+#define	MSM_BUS_SLAVE_PMIC1_SSBI1_B 538
+#define	MSM_BUS_SLAVE_PMIC1_SSBI1_C 539
+#define	MSM_BUS_SLAVE_PMIC2_SSBI2_A 540
+#define	MSM_BUS_SLAVE_PMIC2_SSBI2_B 541
+#define	MSM_BUS_SLAVE_GSBI1_UART 542
+#define	MSM_BUS_SLAVE_GSBI2_UART 543
+#define	MSM_BUS_SLAVE_GSBI3_UART 544
+#define	MSM_BUS_SLAVE_GSBI4_UART 545
+#define	MSM_BUS_SLAVE_GSBI5_UART 546
+#define	MSM_BUS_SLAVE_GSBI6_UART 547
+#define	MSM_BUS_SLAVE_GSBI7_UART 548
+#define	MSM_BUS_SLAVE_GSBI8_UART 549
+#define	MSM_BUS_SLAVE_GSBI9_UART 550
+#define	MSM_BUS_SLAVE_GSBI10_UART 551
+#define	MSM_BUS_SLAVE_GSBI11_UART 552
+#define	MSM_BUS_SLAVE_GSBI12_UART 553
+#define	MSM_BUS_SLAVE_GSBI1_QUP 554
+#define	MSM_BUS_SLAVE_GSBI2_QUP 555
+#define	MSM_BUS_SLAVE_GSBI3_QUP 556
+#define	MSM_BUS_SLAVE_GSBI4_QUP 557
+#define	MSM_BUS_SLAVE_GSBI5_QUP 558
+#define	MSM_BUS_SLAVE_GSBI6_QUP 559
+#define	MSM_BUS_SLAVE_GSBI7_QUP 560
+#define	MSM_BUS_SLAVE_GSBI8_QUP 561
+#define	MSM_BUS_SLAVE_GSBI9_QUP 562
+#define	MSM_BUS_SLAVE_GSBI10_QUP 563
+#define	MSM_BUS_SLAVE_GSBI11_QUP 564
+#define	MSM_BUS_SLAVE_GSBI12_QUP 565
+#define	MSM_BUS_SLAVE_EBI2_NAND 566
+#define	MSM_BUS_SLAVE_EBI2_CS0 567
+#define	MSM_BUS_SLAVE_EBI2_CS1 568
+#define	MSM_BUS_SLAVE_EBI2_CS2 569
+#define	MSM_BUS_SLAVE_EBI2_CS3 570
+#define	MSM_BUS_SLAVE_EBI2_CS4 571
+#define	MSM_BUS_SLAVE_EBI2_CS5 572
+#define	MSM_BUS_SLAVE_USB_FS1 573
+#define	MSM_BUS_SLAVE_USB_FS2 574
+#define	MSM_BUS_SLAVE_TSIF 575
+#define	MSM_BUS_SLAVE_MSM_TSSC 576
+#define	MSM_BUS_SLAVE_MSM_PDM 577
+#define	MSM_BUS_SLAVE_MSM_DIMEM 578
+#define	MSM_BUS_SLAVE_MSM_TCSR 579
+#define	MSM_BUS_SLAVE_MSM_PRNG 580
+#define	MSM_BUS_SLAVE_GSS 581
+#define	MSM_BUS_SLAVE_SATA 582
+#define	MSM_BUS_SLAVE_USB3 583
+#define	MSM_BUS_SLAVE_WCSS 584
+#define	MSM_BUS_SLAVE_OCIMEM 585
+#define	MSM_BUS_SLAVE_SNOC_OCMEM 586
+#define	MSM_BUS_SLAVE_SERVICE_SNOC 587
+#define	MSM_BUS_SLAVE_QDSS_STM 588
+#define	MSM_BUS_SLAVE_CAMERA_CFG 589
+#define	MSM_BUS_SLAVE_DISPLAY_CFG 590
+#define	MSM_BUS_SLAVE_OCMEM_CFG 591
+#define	MSM_BUS_SLAVE_CPR_CFG 592
+#define	MSM_BUS_SLAVE_CPR_XPU_CFG 593
+#define	MSM_BUS_SLAVE_MISC_CFG 594
+#define	MSM_BUS_SLAVE_MISC_XPU_CFG 595
+#define	MSM_BUS_SLAVE_VENUS_CFG 596
+#define	MSM_BUS_SLAVE_MISC_VENUS_CFG 597
+#define	MSM_BUS_SLAVE_GRAPHICS_3D_CFG 598
+#define	MSM_BUS_SLAVE_MMSS_CLK_CFG 599
+#define	MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG 600
+#define	MSM_BUS_SLAVE_MNOC_MPU_CFG 601
+#define	MSM_BUS_SLAVE_ONOC_MPU_CFG 602
+#define	MSM_BUS_SLAVE_SERVICE_MNOC 603
+#define	MSM_BUS_SLAVE_OCMEM 604
+#define	MSM_BUS_SLAVE_SERVICE_ONOC 605
+#define	MSM_BUS_SLAVE_SDCC_1 606
+#define	MSM_BUS_SLAVE_SDCC_3 607
+#define	MSM_BUS_SLAVE_SDCC_2 608
+#define	MSM_BUS_SLAVE_SDCC_4 609
+#define	MSM_BUS_SLAVE_BAM_DMA 610
+#define	MSM_BUS_SLAVE_BLSP_2 611
+#define	MSM_BUS_SLAVE_USB_HSIC 612
+#define	MSM_BUS_SLAVE_BLSP_1 613
+#define	MSM_BUS_SLAVE_USB_HS 614
+#define	MSM_BUS_SLAVE_PDM 615
+#define	MSM_BUS_SLAVE_PERIPH_APU_CFG 616
+#define	MSM_BUS_SLAVE_PNOC_MPU_CFG 617
+#define	MSM_BUS_SLAVE_PRNG 618
+#define	MSM_BUS_SLAVE_SERVICE_PNOC 619
+#define	MSM_BUS_SLAVE_CLK_CTL 620
+#define	MSM_BUS_SLAVE_CNOC_MSS 621
+#define	MSM_BUS_SLAVE_SECURITY 622
+#define	MSM_BUS_SLAVE_TCSR 623
+#define	MSM_BUS_SLAVE_TLMM 624
+#define	MSM_BUS_SLAVE_CRYPTO_0_CFG 625
+#define	MSM_BUS_SLAVE_CRYPTO_1_CFG 626
+#define	MSM_BUS_SLAVE_IMEM_CFG 627
+#define	MSM_BUS_SLAVE_MESSAGE_RAM 628
+#define	MSM_BUS_SLAVE_BIMC_CFG 629
+#define	MSM_BUS_SLAVE_BOOT_ROM 630
+#define	MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG 631
+#define	MSM_BUS_SLAVE_PMIC_ARB 632
+#define	MSM_BUS_SLAVE_SPDM_WRAPPER 633
+#define	MSM_BUS_SLAVE_DEHR_CFG 634
+#define	MSM_BUS_SLAVE_QDSS_CFG 635
+#define	MSM_BUS_SLAVE_RBCPR_CFG 636
+#define	MSM_BUS_SLAVE_RBCPR_QDSS_APU_CFG 637
+#define	MSM_BUS_SLAVE_SNOC_MPU_CFG 638
+#define	MSM_BUS_SLAVE_CNOC_ONOC_CFG 639
+#define	MSM_BUS_SLAVE_CNOC_MNOC_CFG 640
+#define	MSM_BUS_SLAVE_PNOC_CFG 641
+#define	MSM_BUS_SLAVE_SNOC_CFG 642
+#define	MSM_BUS_SLAVE_EBI1_DLL_CFG 643
+#define	MSM_BUS_SLAVE_PHY_APU_CFG 644
+#define	MSM_BUS_SLAVE_EBI1_PHY_CFG 645
+#define	MSM_BUS_SLAVE_SERVICE_CNOC 646
+#define	MSM_BUS_SLAVE_IPS_CFG 647
+#define	MSM_BUS_SLAVE_QPIC 648
+#define	MSM_BUS_SLAVE_DSI_CFG 649
+#define	MSM_BUS_SLAVE_UFS_CFG 650
+#define	MSM_BUS_SLAVE_RBCPR_CX_CFG 651
+#define	MSM_BUS_SLAVE_RBCPR_MX_CFG 652
+#define	MSM_BUS_SLAVE_PCIE_CFG 653
+#define	MSM_BUS_SLAVE_USB_PHYS_CFG 654
+#define	MSM_BUS_SLAVE_VIDEO_CAP_CFG 655
+#define	MSM_BUS_SLAVE_AVSYNC_CFG 656
+#define	MSM_BUS_SLAVE_CRYPTO_2_CFG 657
+#define	MSM_BUS_SLAVE_VPU_CFG 658
+#define	MSM_BUS_SLAVE_BCAST_CFG 659
+#define	MSM_BUS_SLAVE_KLM_CFG 660
+#define	MSM_BUS_SLAVE_GENI_IR_CFG 661
+#define	MSM_BUS_SLAVE_OCMEM_GFX 662
+#define	MSM_BUS_SLAVE_CATS_128 663
+#define	MSM_BUS_SLAVE_OCMEM_64 664
+#define	MSM_BUS_SLAVE_PCIE_0 665
+#define	MSM_BUS_SLAVE_PCIE_1 666
+#define	MSM_BUS_SLAVE_PCIE_0_CFG 667
+#define	MSM_BUS_SLAVE_PCIE_1_CFG 668
+#define	MSM_BUS_SLAVE_SRVC_MNOC 669
+#define	MSM_BUS_SLAVE_USB_HS2 670
+#define	MSM_BUS_SLAVE_AUDIO 671
+#define	MSM_BUS_SLAVE_TCU 672
+#define	MSM_BUS_SLAVE_APPSS 673
+#define	MSM_BUS_SLAVE_PCIE_PARF 674
+#define	MSM_BUS_SLAVE_USB3_PHY_CFG 675
+#define	MSM_BUS_SLAVE_IPA_CFG 676
+#define	MSM_BUS_SLAVE_A0NOC_SNOC 677
+#define	MSM_BUS_SLAVE_A1NOC_SNOC 678
+#define	MSM_BUS_SLAVE_A2NOC_SNOC 679
+#define	MSM_BUS_SLAVE_HMSS_L3 680
+#define	MSM_BUS_SLAVE_PIMEM_CFG 681
+#define	MSM_BUS_SLAVE_DCC_CFG 682
+#define	MSM_BUS_SLAVE_QDSS_RBCPR_APU_CFG 683
+#define	MSM_BUS_SLAVE_PCIE_2_CFG 684
+#define	MSM_BUS_SLAVE_PCIE20_AHB2PHY 685
+#define	MSM_BUS_SLAVE_A0NOC_CFG 686
+#define	MSM_BUS_SLAVE_A1NOC_CFG 687
+#define	MSM_BUS_SLAVE_A2NOC_CFG 688
+#define	MSM_BUS_SLAVE_A1NOC_MPU_CFG 689
+#define	MSM_BUS_SLAVE_A2NOC_MPU_CFG 690
+#define	MSM_BUS_SLAVE_A0NOC_SMMU_CFG 691
+#define	MSM_BUS_SLAVE_A1NOC_SMMU_CFG 692
+#define	MSM_BUS_SLAVE_A2NOC_SMMU_CFG 693
+#define	MSM_BUS_SLAVE_LPASS_SMMU_CFG 694
+#define	MSM_BUS_SLAVE_MMAGIC_CFG 695
+#define	MSM_BUS_SLAVE_VENUS_THROTTLE_CFG 696
+#define	MSM_BUS_SLAVE_SSC_CFG 697
+#define	MSM_BUS_SLAVE_DSA_CFG 698
+#define	MSM_BUS_SLAVE_DSA_MPU_CFG 699
+#define	MSM_BUS_SLAVE_DISPLAY_THROTTLE_CFG 700
+#define	MSM_BUS_SLAVE_SMMU_CPP_CFG 701
+#define	MSM_BUS_SLAVE_SMMU_JPEG_CFG 702
+#define	MSM_BUS_SLAVE_SMMU_MDP_CFG 703
+#define	MSM_BUS_SLAVE_SMMU_ROTATOR_CFG 704
+#define	MSM_BUS_SLAVE_SMMU_VENUS_CFG 705
+#define	MSM_BUS_SLAVE_SMMU_VFE_CFG 706
+#define	MSM_BUS_SLAVE_A0NOC_MPU_CFG 707
+#define	MSM_BUS_SLAVE_VMEM_CFG 708
+#define	MSM_BUS_SLAVE_CAMERA_THROTTLE_CFG 709
+#define	MSM_BUS_SLAVE_VMEM 710
+#define	MSM_BUS_SLAVE_AHB2PHY 711
+#define	MSM_BUS_SLAVE_PIMEM 712
+#define	MSM_BUS_SLAVE_SNOC_VMEM 713
+#define	MSM_BUS_SLAVE_PCIE_2 714
+#define	MSM_BUS_SLAVE_RBCPR_MX 715
+#define	MSM_BUS_SLAVE_RBCPR_CX 716
+#define	MSM_BUS_SLAVE_BIMC_PCNOC 717
+#define	MSM_BUS_SLAVE_PCNOC_BIMC_1 718
+#define	MSM_BUS_SLAVE_SGMII 719
+#define	MSM_BUS_SLAVE_SPMI_FETCHER 720
+#define	MSM_BUS_PNOC_SLV_6 721
+#define	MSM_BUS_SLAVE_MMSS_SMMU_CFG 722
+#define	MSM_BUS_SLAVE_WLAN 723
+#define	MSM_BUS_SLAVE_CRVIRT_A2NOC 724
+#define	MSM_BUS_SLAVE_CNOC_A2NOC 725
+#define	MSM_BUS_SLAVE_GLM 726
+#define	MSM_BUS_SLAVE_GNOC_BIMC 727
+#define	MSM_BUS_SLAVE_GNOC_SNOC 728
+#define	MSM_BUS_SLAVE_QM_CFG 729
+#define	MSM_BUS_SLAVE_TLMM_EAST 730
+#define	MSM_BUS_SLAVE_TLMM_NORTH 731
+#define	MSM_BUS_SLAVE_TLMM_WEST 732
+#define	MSM_BUS_SLAVE_SKL 733
+#define	MSM_BUS_SLAVE_LPASS_TCM	734
+#define	MSM_BUS_SLAVE_TLMM_SOUTH 735
+#define	MSM_BUS_SLAVE_TLMM_CENTER 736
+#define	MSM_BUS_MSS_NAV_CE_MPU_CFG 737
+#define	MSM_BUS_SLAVE_A2NOC_THROTTLE_CFG 738
+#define	MSM_BUS_SLAVE_CDSP 739
+#define	MSM_BUS_SLAVE_CDSP_SMMU_CFG 740
+#define	MSM_BUS_SLAVE_LPASS_MPU_CFG 741
+#define	MSM_BUS_SLAVE_CSI_PHY_CFG 742
+#define	MSM_BUS_SLAVE_LAST 743
+
+#define	MSM_BUS_SYSTEM_FPB_SLAVE_SYSTEM  MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB
+#define	MSM_BUS_CPSS_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_CPSS_FPB
+
+/*
+ * ID's used in RPM messages
+ */
+#define	ICBID_MASTER_APPSS_PROC 0
+#define	ICBID_MASTER_MSS_PROC 1
+#define	ICBID_MASTER_MNOC_BIMC 2
+#define	ICBID_MASTER_SNOC_BIMC 3
+#define	ICBID_MASTER_SNOC_BIMC_0 ICBID_MASTER_SNOC_BIMC
+#define	ICBID_MASTER_CNOC_MNOC_MMSS_CFG 4
+#define	ICBID_MASTER_CNOC_MNOC_CFG 5
+#define	ICBID_MASTER_GFX3D 6
+#define	ICBID_MASTER_JPEG 7
+#define	ICBID_MASTER_MDP 8
+#define	ICBID_MASTER_MDP0 ICBID_MASTER_MDP
+#define	ICBID_MASTER_MDPS ICBID_MASTER_MDP
+#define	ICBID_MASTER_VIDEO 9
+#define	ICBID_MASTER_VIDEO_P0 ICBID_MASTER_VIDEO
+#define	ICBID_MASTER_VIDEO_P1 10
+#define	ICBID_MASTER_VFE 11
+#define	ICBID_MASTER_VFE0 ICBID_MASTER_VFE
+#define	ICBID_MASTER_CNOC_ONOC_CFG 12
+#define	ICBID_MASTER_JPEG_OCMEM 13
+#define	ICBID_MASTER_MDP_OCMEM 14
+#define	ICBID_MASTER_VIDEO_P0_OCMEM 15
+#define	ICBID_MASTER_VIDEO_P1_OCMEM 16
+#define	ICBID_MASTER_VFE_OCMEM 17
+#define	ICBID_MASTER_LPASS_AHB 18
+#define	ICBID_MASTER_QDSS_BAM 19
+#define	ICBID_MASTER_SNOC_CFG 20
+#define	ICBID_MASTER_BIMC_SNOC 21
+#define	ICBID_MASTER_BIMC_SNOC_0 ICBID_MASTER_BIMC_SNOC
+#define	ICBID_MASTER_CNOC_SNOC 22
+#define	ICBID_MASTER_CRYPTO 23
+#define	ICBID_MASTER_CRYPTO_CORE0 ICBID_MASTER_CRYPTO
+#define	ICBID_MASTER_CRYPTO_CORE1 24
+#define	ICBID_MASTER_LPASS_PROC 25
+#define	ICBID_MASTER_MSS 26
+#define	ICBID_MASTER_MSS_NAV 27
+#define	ICBID_MASTER_OCMEM_DMA 28
+#define	ICBID_MASTER_PNOC_SNOC 29
+#define	ICBID_MASTER_WCSS 30
+#define	ICBID_MASTER_QDSS_ETR 31
+#define	ICBID_MASTER_USB3 32
+#define	ICBID_MASTER_USB3_0 ICBID_MASTER_USB3
+#define	ICBID_MASTER_SDCC_1 33
+#define	ICBID_MASTER_SDCC_3 34
+#define	ICBID_MASTER_SDCC_2 35
+#define	ICBID_MASTER_SDCC_4 36
+#define	ICBID_MASTER_TSIF 37
+#define	ICBID_MASTER_BAM_DMA 38
+#define	ICBID_MASTER_BLSP_2 39
+#define	ICBID_MASTER_USB_HSIC 40
+#define	ICBID_MASTER_BLSP_1 41
+#define	ICBID_MASTER_USB_HS 42
+#define	ICBID_MASTER_USB_HS1 ICBID_MASTER_USB_HS
+#define	ICBID_MASTER_PNOC_CFG 43
+#define	ICBID_MASTER_SNOC_PNOC 44
+#define	ICBID_MASTER_RPM_INST 45
+#define	ICBID_MASTER_RPM_DATA 46
+#define	ICBID_MASTER_RPM_SYS 47
+#define	ICBID_MASTER_DEHR 48
+#define	ICBID_MASTER_QDSS_DAP 49
+#define	ICBID_MASTER_SPDM 50
+#define	ICBID_MASTER_TIC 51
+#define	ICBID_MASTER_SNOC_CNOC 52
+#define	ICBID_MASTER_GFX3D_OCMEM 53
+#define	ICBID_MASTER_GFX3D_GMEM ICBID_MASTER_GFX3D_OCMEM
+#define	ICBID_MASTER_OVIRT_SNOC 54
+#define	ICBID_MASTER_SNOC_OVIRT 55
+#define	ICBID_MASTER_SNOC_GVIRT ICBID_MASTER_SNOC_OVIRT
+#define	ICBID_MASTER_ONOC_OVIRT 56
+#define	ICBID_MASTER_USB_HS2 57
+#define	ICBID_MASTER_QPIC 58
+#define	ICBID_MASTER_IPA 59
+#define	ICBID_MASTER_DSI 60
+#define	ICBID_MASTER_MDP1 61
+#define	ICBID_MASTER_MDPE ICBID_MASTER_MDP1
+#define	ICBID_MASTER_VPU_PROC 62
+#define	ICBID_MASTER_VPU 63
+#define	ICBID_MASTER_VPU0 ICBID_MASTER_VPU
+#define	ICBID_MASTER_CRYPTO_CORE2 64
+#define	ICBID_MASTER_PCIE_0 65
+#define	ICBID_MASTER_PCIE_1 66
+#define	ICBID_MASTER_SATA 67
+#define	ICBID_MASTER_UFS 68
+#define	ICBID_MASTER_USB3_1 69
+#define	ICBID_MASTER_VIDEO_OCMEM 70
+#define	ICBID_MASTER_VPU1 71
+#define	ICBID_MASTER_VCAP 72
+#define	ICBID_MASTER_EMAC 73
+#define	ICBID_MASTER_BCAST 74
+#define	ICBID_MASTER_MMSS_PROC 75
+#define	ICBID_MASTER_SNOC_BIMC_1 76
+#define	ICBID_MASTER_SNOC_PCNOC 77
+#define	ICBID_MASTER_AUDIO 78
+#define	ICBID_MASTER_MM_INT_0 79
+#define	ICBID_MASTER_MM_INT_1 80
+#define	ICBID_MASTER_MM_INT_2 81
+#define	ICBID_MASTER_MM_INT_BIMC 82
+#define	ICBID_MASTER_MSS_INT 83
+#define	ICBID_MASTER_PCNOC_CFG 84
+#define	ICBID_MASTER_PCNOC_INT_0 85
+#define	ICBID_MASTER_PCNOC_INT_1 86
+#define	ICBID_MASTER_PCNOC_M_0 87
+#define	ICBID_MASTER_PCNOC_M_1 88
+#define	ICBID_MASTER_PCNOC_S_0 89
+#define	ICBID_MASTER_PCNOC_S_1 90
+#define	ICBID_MASTER_PCNOC_S_2 91
+#define	ICBID_MASTER_PCNOC_S_3 92
+#define	ICBID_MASTER_PCNOC_S_4 93
+#define	ICBID_MASTER_PCNOC_S_6 94
+#define	ICBID_MASTER_PCNOC_S_7 95
+#define	ICBID_MASTER_PCNOC_S_8 96
+#define	ICBID_MASTER_PCNOC_S_9 97
+#define	ICBID_MASTER_QDSS_INT 98
+#define	ICBID_MASTER_SNOC_INT_0	99
+#define	ICBID_MASTER_SNOC_INT_1 100
+#define	ICBID_MASTER_SNOC_INT_BIMC 101
+#define	ICBID_MASTER_TCU_0 102
+#define	ICBID_MASTER_TCU_1 103
+#define	ICBID_MASTER_BIMC_INT_0 104
+#define	ICBID_MASTER_BIMC_INT_1 105
+#define	ICBID_MASTER_CAMERA 106
+#define	ICBID_MASTER_RICA 107
+#define	ICBID_MASTER_SNOC_BIMC_2 108
+#define	ICBID_MASTER_BIMC_SNOC_1 109
+#define	ICBID_MASTER_A0NOC_SNOC 110
+#define	ICBID_MASTER_A1NOC_SNOC 111
+#define	ICBID_MASTER_A2NOC_SNOC 112
+#define	ICBID_MASTER_PIMEM 113
+#define	ICBID_MASTER_SNOC_VMEM 114
+#define	ICBID_MASTER_CPP 115
+#define	ICBID_MASTER_CNOC_A1NOC 116
+#define	ICBID_MASTER_PNOC_A1NOC 117
+#define	ICBID_MASTER_HMSS 118
+#define	ICBID_MASTER_PCIE_2 119
+#define	ICBID_MASTER_ROTATOR 120
+#define	ICBID_MASTER_VENUS_VMEM 121
+#define	ICBID_MASTER_DCC 122
+#define	ICBID_MASTER_MCDMA 123
+#define	ICBID_MASTER_PCNOC_INT_2 124
+#define	ICBID_MASTER_PCNOC_INT_3 125
+#define	ICBID_MASTER_PCNOC_INT_4 126
+#define	ICBID_MASTER_PCNOC_INT_5 127
+#define	ICBID_MASTER_PCNOC_INT_6 128
+#define	ICBID_MASTER_PCNOC_S_5 129
+#define	ICBID_MASTER_SENSORS_AHB 130
+#define	ICBID_MASTER_SENSORS_PROC 131
+#define	ICBID_MASTER_QSPI 132
+#define	ICBID_MASTER_VFE1 133
+#define	ICBID_MASTER_SNOC_INT_2 134
+#define	ICBID_MASTER_SMMNOC_BIMC 135
+#define	ICBID_MASTER_CRVIRT_A1NOC 136
+#define	ICBID_MASTER_XM_USB_HS1 137
+#define	ICBID_MASTER_XI_USB_HS1 138
+#define	ICBID_MASTER_PCNOC_BIMC_1 139
+#define	ICBID_MASTER_BIMC_PCNOC 140
+#define	ICBID_MASTER_XI_HSIC 141
+#define	ICBID_MASTER_SGMII  142
+#define	ICBID_MASTER_SPMI_FETCHER 143
+#define	ICBID_MASTER_GNOC_BIMC 144
+#define	ICBID_MASTER_CRVIRT_A2NOC 145
+#define	ICBID_MASTER_CNOC_A2NOC 146
+#define	ICBID_MASTER_WLAN 147
+#define	ICBID_MASTER_MSS_CE 148
+#define	ICBID_MASTER_CDSP_PROC 149
+#define	ICBID_MASTER_GNOC_SNOC 150
+
+#define	ICBID_SLAVE_EBI1 0
+#define	ICBID_SLAVE_APPSS_L2 1
+#define	ICBID_SLAVE_BIMC_SNOC 2
+#define	ICBID_SLAVE_BIMC_SNOC_0 ICBID_SLAVE_BIMC_SNOC
+#define	ICBID_SLAVE_CAMERA_CFG 3
+#define	ICBID_SLAVE_DISPLAY_CFG 4
+#define	ICBID_SLAVE_OCMEM_CFG 5
+#define	ICBID_SLAVE_CPR_CFG 6
+#define	ICBID_SLAVE_CPR_XPU_CFG 7
+#define	ICBID_SLAVE_MISC_CFG 8
+#define	ICBID_SLAVE_MISC_XPU_CFG 9
+#define	ICBID_SLAVE_VENUS_CFG 10
+#define	ICBID_SLAVE_GFX3D_CFG 11
+#define	ICBID_SLAVE_MMSS_CLK_CFG 12
+#define	ICBID_SLAVE_MMSS_CLK_XPU_CFG 13
+#define	ICBID_SLAVE_MNOC_MPU_CFG 14
+#define	ICBID_SLAVE_ONOC_MPU_CFG 15
+#define	ICBID_SLAVE_MNOC_BIMC 16
+#define	ICBID_SLAVE_SERVICE_MNOC 17
+#define	ICBID_SLAVE_OCMEM 18
+#define	ICBID_SLAVE_GMEM ICBID_SLAVE_OCMEM
+#define	ICBID_SLAVE_SERVICE_ONOC 19
+#define	ICBID_SLAVE_APPSS 20
+#define	ICBID_SLAVE_LPASS 21
+#define	ICBID_SLAVE_USB3 22
+#define	ICBID_SLAVE_USB3_0 ICBID_SLAVE_USB3
+#define	ICBID_SLAVE_WCSS 23
+#define	ICBID_SLAVE_SNOC_BIMC 24
+#define	ICBID_SLAVE_SNOC_BIMC_0 ICBID_SLAVE_SNOC_BIMC
+#define	ICBID_SLAVE_SNOC_CNOC 25
+#define	ICBID_SLAVE_IMEM 26
+#define	ICBID_SLAVE_OCIMEM ICBID_SLAVE_IMEM
+#define	ICBID_SLAVE_SNOC_OVIRT 27
+#define	ICBID_SLAVE_SNOC_GVIRT ICBID_SLAVE_SNOC_OVIRT
+#define	ICBID_SLAVE_SNOC_PNOC 28
+#define	ICBID_SLAVE_SNOC_PCNOC ICBID_SLAVE_SNOC_PNOC
+#define	ICBID_SLAVE_SERVICE_SNOC 29
+#define	ICBID_SLAVE_QDSS_STM 30
+#define	ICBID_SLAVE_SDCC_1 31
+#define	ICBID_SLAVE_SDCC_3 32
+#define	ICBID_SLAVE_SDCC_2 33
+#define	ICBID_SLAVE_SDCC_4 34
+#define	ICBID_SLAVE_TSIF 35
+#define	ICBID_SLAVE_BAM_DMA 36
+#define	ICBID_SLAVE_BLSP_2 37
+#define	ICBID_SLAVE_USB_HSIC 38
+#define	ICBID_SLAVE_BLSP_1 39
+#define	ICBID_SLAVE_USB_HS 40
+#define	ICBID_SLAVE_USB_HS1 ICBID_SLAVE_USB_HS
+#define	ICBID_SLAVE_PDM 41
+#define	ICBID_SLAVE_PERIPH_APU_CFG 42
+#define	ICBID_SLAVE_PNOC_MPU_CFG 43
+#define	ICBID_SLAVE_PRNG 44
+#define	ICBID_SLAVE_PNOC_SNOC 45
+#define	ICBID_SLAVE_PCNOC_SNOC ICBID_SLAVE_PNOC_SNOC
+#define	ICBID_SLAVE_SERVICE_PNOC 46
+#define	ICBID_SLAVE_CLK_CTL 47
+#define	ICBID_SLAVE_CNOC_MSS 48
+#define	ICBID_SLAVE_PCNOC_MSS ICBID_SLAVE_CNOC_MSS
+#define	ICBID_SLAVE_SECURITY 49
+#define	ICBID_SLAVE_TCSR 50
+#define	ICBID_SLAVE_TLMM 51
+#define	ICBID_SLAVE_CRYPTO_0_CFG 52
+#define	ICBID_SLAVE_CRYPTO_1_CFG 53
+#define	ICBID_SLAVE_IMEM_CFG 54
+#define	ICBID_SLAVE_MESSAGE_RAM 55
+#define	ICBID_SLAVE_BIMC_CFG 56
+#define	ICBID_SLAVE_BOOT_ROM 57
+#define	ICBID_SLAVE_CNOC_MNOC_MMSS_CFG 58
+#define	ICBID_SLAVE_PMIC_ARB 59
+#define	ICBID_SLAVE_SPDM_WRAPPER 60
+#define	ICBID_SLAVE_DEHR_CFG 61
+#define	ICBID_SLAVE_MPM 62
+#define	ICBID_SLAVE_QDSS_CFG 63
+#define	ICBID_SLAVE_RBCPR_CFG 64
+#define	ICBID_SLAVE_RBCPR_CX_CFG ICBID_SLAVE_RBCPR_CFG
+#define	ICBID_SLAVE_RBCPR_QDSS_APU_CFG 65
+#define	ICBID_SLAVE_CNOC_MNOC_CFG 66
+#define	ICBID_SLAVE_SNOC_MPU_CFG 67
+#define	ICBID_SLAVE_CNOC_ONOC_CFG 68
+#define	ICBID_SLAVE_PNOC_CFG 69
+#define	ICBID_SLAVE_SNOC_CFG 70
+#define	ICBID_SLAVE_EBI1_DLL_CFG 71
+#define	ICBID_SLAVE_PHY_APU_CFG 72
+#define	ICBID_SLAVE_EBI1_PHY_CFG 73
+#define	ICBID_SLAVE_RPM 74
+#define	ICBID_SLAVE_CNOC_SNOC 75
+#define	ICBID_SLAVE_SERVICE_CNOC 76
+#define	ICBID_SLAVE_OVIRT_SNOC 77
+#define	ICBID_SLAVE_OVIRT_OCMEM 78
+#define	ICBID_SLAVE_USB_HS2 79
+#define	ICBID_SLAVE_QPIC 80
+#define	ICBID_SLAVE_IPS_CFG 81
+#define	ICBID_SLAVE_DSI_CFG 82
+#define	ICBID_SLAVE_USB3_1 83
+#define	ICBID_SLAVE_PCIE_0 84
+#define	ICBID_SLAVE_PCIE_1 85
+#define	ICBID_SLAVE_PSS_SMMU_CFG 86
+#define	ICBID_SLAVE_CRYPTO_2_CFG 87
+#define	ICBID_SLAVE_PCIE_0_CFG 88
+#define	ICBID_SLAVE_PCIE_1_CFG 89
+#define	ICBID_SLAVE_SATA_CFG 90
+#define	ICBID_SLAVE_SPSS_GENI_IR 91
+#define	ICBID_SLAVE_UFS_CFG 92
+#define	ICBID_SLAVE_AVSYNC_CFG 93
+#define	ICBID_SLAVE_VPU_CFG 94
+#define	ICBID_SLAVE_USB_PHY_CFG 95
+#define	ICBID_SLAVE_RBCPR_MX_CFG 96
+#define	ICBID_SLAVE_PCIE_PARF 97
+#define	ICBID_SLAVE_VCAP_CFG 98
+#define	ICBID_SLAVE_EMAC_CFG 99
+#define	ICBID_SLAVE_BCAST_CFG 100
+#define	ICBID_SLAVE_KLM_CFG 101
+#define	ICBID_SLAVE_DISPLAY_PWM 102
+#define	ICBID_SLAVE_GENI 103
+#define	ICBID_SLAVE_SNOC_BIMC_1 104
+#define	ICBID_SLAVE_AUDIO 105
+#define	ICBID_SLAVE_CATS_0 106
+#define	ICBID_SLAVE_CATS_1 107
+#define	ICBID_SLAVE_MM_INT_0 108
+#define	ICBID_SLAVE_MM_INT_1 109
+#define	ICBID_SLAVE_MM_INT_2 110
+#define	ICBID_SLAVE_MM_INT_BIMC 111
+#define	ICBID_SLAVE_MMU_MODEM_XPU_CFG 112
+#define	ICBID_SLAVE_MSS_INT 113
+#define	ICBID_SLAVE_PCNOC_INT_0 114
+#define	ICBID_SLAVE_PCNOC_INT_1 115
+#define	ICBID_SLAVE_PCNOC_M_0 116
+#define	ICBID_SLAVE_PCNOC_M_1 117
+#define	ICBID_SLAVE_PCNOC_S_0 118
+#define	ICBID_SLAVE_PCNOC_S_1 119
+#define	ICBID_SLAVE_PCNOC_S_2 120
+#define	ICBID_SLAVE_PCNOC_S_3 121
+#define	ICBID_SLAVE_PCNOC_S_4 122
+#define	ICBID_SLAVE_PCNOC_S_6 123
+#define	ICBID_SLAVE_PCNOC_S_7 124
+#define	ICBID_SLAVE_PCNOC_S_8 125
+#define	ICBID_SLAVE_PCNOC_S_9 126
+#define	ICBID_SLAVE_PRNG_XPU_CFG 127
+#define	ICBID_SLAVE_QDSS_INT 128
+#define	ICBID_SLAVE_RPM_XPU_CFG 129
+#define	ICBID_SLAVE_SNOC_INT_0 130
+#define	ICBID_SLAVE_SNOC_INT_1 131
+#define	ICBID_SLAVE_SNOC_INT_BIMC 132
+#define	ICBID_SLAVE_TCU 133
+#define	ICBID_SLAVE_BIMC_INT_0 134
+#define	ICBID_SLAVE_BIMC_INT_1 135
+#define	ICBID_SLAVE_RICA_CFG 136
+#define	ICBID_SLAVE_SNOC_BIMC_2 137
+#define	ICBID_SLAVE_BIMC_SNOC_1 138
+#define	ICBID_SLAVE_PNOC_A1NOC 139
+#define	ICBID_SLAVE_SNOC_VMEM 140
+#define	ICBID_SLAVE_A0NOC_SNOC 141
+#define	ICBID_SLAVE_A1NOC_SNOC 142
+#define	ICBID_SLAVE_A2NOC_SNOC 143
+#define	ICBID_SLAVE_A0NOC_CFG 144
+#define	ICBID_SLAVE_A0NOC_MPU_CFG 145
+#define	ICBID_SLAVE_A0NOC_SMMU_CFG 146
+#define	ICBID_SLAVE_A1NOC_CFG 147
+#define	ICBID_SLAVE_A1NOC_MPU_CFG 148
+#define	ICBID_SLAVE_A1NOC_SMMU_CFG 149
+#define	ICBID_SLAVE_A2NOC_CFG 150
+#define	ICBID_SLAVE_A2NOC_MPU_CFG 151
+#define	ICBID_SLAVE_A2NOC_SMMU_CFG 152
+#define	ICBID_SLAVE_AHB2PHY 153
+#define	ICBID_SLAVE_CAMERA_THROTTLE_CFG 154
+#define	ICBID_SLAVE_DCC_CFG 155
+#define	ICBID_SLAVE_DISPLAY_THROTTLE_CFG 156
+#define	ICBID_SLAVE_DSA_CFG 157
+#define	ICBID_SLAVE_DSA_MPU_CFG 158
+#define	ICBID_SLAVE_SSC_MPU_CFG 159
+#define	ICBID_SLAVE_HMSS_L3 160
+#define	ICBID_SLAVE_LPASS_SMMU_CFG 161
+#define	ICBID_SLAVE_MMAGIC_CFG 162
+#define	ICBID_SLAVE_PCIE20_AHB2PHY 163
+#define	ICBID_SLAVE_PCIE_2 164
+#define	ICBID_SLAVE_PCIE_2_CFG 165
+#define	ICBID_SLAVE_PIMEM 166
+#define	ICBID_SLAVE_PIMEM_CFG 167
+#define	ICBID_SLAVE_QDSS_RBCPR_APU_CFG 168
+#define	ICBID_SLAVE_RBCPR_CX 169
+#define	ICBID_SLAVE_RBCPR_MX 170
+#define	ICBID_SLAVE_SMMU_CPP_CFG 171
+#define	ICBID_SLAVE_SMMU_JPEG_CFG 172
+#define	ICBID_SLAVE_SMMU_MDP_CFG 173
+#define	ICBID_SLAVE_SMMU_ROTATOR_CFG 174
+#define	ICBID_SLAVE_SMMU_VENUS_CFG 175
+#define	ICBID_SLAVE_SMMU_VFE_CFG 176
+#define	ICBID_SLAVE_SSC_CFG 177
+#define	ICBID_SLAVE_VENUS_THROTTLE_CFG 178
+#define	ICBID_SLAVE_VMEM 179
+#define	ICBID_SLAVE_VMEM_CFG 180
+#define	ICBID_SLAVE_QDSS_MPU_CFG 181
+#define	ICBID_SLAVE_USB3_PHY_CFG 182
+#define	ICBID_SLAVE_IPA_CFG 183
+#define	ICBID_SLAVE_PCNOC_INT_2 184
+#define	ICBID_SLAVE_PCNOC_INT_3 185
+#define	ICBID_SLAVE_PCNOC_INT_4 186
+#define	ICBID_SLAVE_PCNOC_INT_5 187
+#define	ICBID_SLAVE_PCNOC_INT_6 188
+#define	ICBID_SLAVE_PCNOC_S_5 189
+#define	ICBID_SLAVE_QSPI 190
+#define	ICBID_SLAVE_A1NOC_MS_MPU_CFG 191
+#define	ICBID_SLAVE_A2NOC_MS_MPU_CFG 192
+#define	ICBID_SLAVE_MODEM_Q6_SMMU_CFG 193
+#define	ICBID_SLAVE_MSS_MPU_CFG 194
+#define	ICBID_SLAVE_MSS_PROC_MS_MPU_CFG 195
+#define	ICBID_SLAVE_SKL 196
+#define	ICBID_SLAVE_SNOC_INT_2 197
+#define	ICBID_SLAVE_SMMNOC_BIMC 198
+#define	ICBID_SLAVE_CRVIRT_A1NOC 199
+#define	ICBID_SLAVE_SGMII	 200
+#define	ICBID_SLAVE_QHS4_APPS	 201
+#define	ICBID_SLAVE_BIMC_PCNOC   202
+#define	ICBID_SLAVE_PCNOC_BIMC_1 203
+#define	ICBID_SLAVE_SPMI_FETCHER 204
+#define	ICBID_SLAVE_MMSS_SMMU_CFG 205
+#define	ICBID_SLAVE_WLAN 206
+#define	ICBID_SLAVE_CRVIRT_A2NOC 207
+#define	ICBID_SLAVE_CNOC_A2NOC 208
+#define	ICBID_SLAVE_GLM 209
+#define	ICBID_SLAVE_GNOC_BIMC 210
+#define	ICBID_SLAVE_GNOC_SNOC 211
+#define	ICBID_SLAVE_QM_CFG 212
+#define	ICBID_SLAVE_TLMM_EAST 213
+#define	ICBID_SLAVE_TLMM_NORTH 214
+#define	ICBID_SLAVE_TLMM_WEST 215
+#define	ICBID_SLAVE_LPASS_TCM	216
+#define	ICBID_SLAVE_TLMM_SOUTH	217
+#define	ICBID_SLAVE_TLMM_CENTER	218
+#define	ICBID_SLAVE_MSS_NAV_CE_MPU_CFG	219
+#define	ICBID_SLAVE_A2NOC_THROTTLE_CFG	220
+#define	ICBID_SLAVE_CDSP	221
+#define	ICBID_SLAVE_CDSP_SMMU_CFG	222
+#define	ICBID_SLAVE_LPASS_MPU_CFG	223
+#define	ICBID_SLAVE_CSI_PHY_CFG	224
+#endif
diff -Nruw linux-4.4.115-fbx/include/dt-bindings/msm./msm-bus-rule-ops.h linux-4.4.115-fbx/include/dt-bindings/msm/msm-bus-rule-ops.h
--- linux-4.4.115-fbx/include/dt-bindings/msm./msm-bus-rule-ops.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/dt-bindings/msm/msm-bus-rule-ops.h	2019-01-22 16:16:28.179288750 +0100
@@ -0,0 +1,34 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_BUS_RULE_OPS_H
+#define __MSM_BUS_RULE_OPS_H
+
+#define FLD_IB	0
+#define FLD_AB	1
+#define FLD_CLK	2
+
+#define OP_LE	0
+#define OP_LT	1
+#define OP_GE	2
+#define OP_GT	3
+#define OP_NOOP	4
+
+#define RULE_STATE_NOT_APPLIED	0
+#define RULE_STATE_APPLIED	1
+
+#define THROTTLE_ON	0
+#define THROTTLE_OFF	1
+#define THROTTLE_REG	2
+
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/dt-bindings/msm./pm.h linux-4.4.115-fbx/include/dt-bindings/msm/pm.h
--- linux-4.4.115-fbx/include/dt-bindings/msm./pm.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/dt-bindings/msm/pm.h	2019-01-22 16:16:28.179288750 +0100
@@ -0,0 +1,25 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DT_MSM_PM_H__
+#define __DT_MSM_PM_H__
+
+#define LPM_RESET_LVL_NONE	0
+#define LPM_RESET_LVL_RET	1
+#define LPM_RESET_LVL_GDHS	2
+#define LPM_RESET_LVL_PC	3
+
+#define LPM_AFF_LVL_CPU		0
+#define LPM_AFF_LVL_L2		1
+#define LPM_AFF_LVL_CCI		2
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/dt-bindings/msm./power-on.h linux-4.4.115-fbx/include/dt-bindings/msm/power-on.h
--- linux-4.4.115-fbx/include/dt-bindings/msm./power-on.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/dt-bindings/msm/power-on.h	2019-01-22 16:16:28.179288750 +0100
@@ -0,0 +1,24 @@
+/* Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_POWER_ON_H__
+#define __MSM_POWER_ON_H__
+
+#define PON_POWER_OFF_RESERVED		0x00
+#define PON_POWER_OFF_WARM_RESET	0x01
+#define PON_POWER_OFF_SHUTDOWN		0x04
+#define PON_POWER_OFF_DVDD_SHUTDOWN	0x05
+#define PON_POWER_OFF_HARD_RESET	0x07
+#define PON_POWER_OFF_DVDD_HARD_RESET	0x08
+#define PON_POWER_OFF_MAX_TYPE		0x10
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/dt-bindings/regulator/qcom,rpm-smd-regulator.h	2019-01-22 16:16:28.183288787 +0100
@@ -0,0 +1,28 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_RPM_SMD_REGULATOR_H
+#define __QCOM_RPM_SMD_REGULATOR_H
+
+#define RPM_SMD_REGULATOR_LEVEL_NONE		0
+#define RPM_SMD_REGULATOR_LEVEL_RETENTION	16
+#define RPM_SMD_REGULATOR_LEVEL_RETENTION_PLUS	32
+#define RPM_SMD_REGULATOR_LEVEL_MIN_SVS		48
+#define RPM_SMD_REGULATOR_LEVEL_LOW_SVS		64
+#define RPM_SMD_REGULATOR_LEVEL_SVS		128
+#define RPM_SMD_REGULATOR_LEVEL_SVS_PLUS	192
+#define RPM_SMD_REGULATOR_LEVEL_NOM		256
+#define RPM_SMD_REGULATOR_LEVEL_NOM_PLUS	320
+#define RPM_SMD_REGULATOR_LEVEL_TURBO		384
+#define RPM_SMD_REGULATOR_LEVEL_BINNING		512
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/arm-smccc.h	2019-01-22 16:16:28.187288823 +0100
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __LINUX_ARM_SMCCC_H
+#define __LINUX_ARM_SMCCC_H
+
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+/*
+ * This file provides common defines for ARM SMC Calling Convention as
+ * specified in
+ * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
+ */
+
+#define ARM_SMCCC_STD_CALL		0
+#define ARM_SMCCC_FAST_CALL		1
+#define ARM_SMCCC_TYPE_SHIFT		31
+
+#define ARM_SMCCC_SMC_32		0
+#define ARM_SMCCC_SMC_64		1
+#define ARM_SMCCC_CALL_CONV_SHIFT	30
+
+#define ARM_SMCCC_OWNER_MASK		0x3F
+#define ARM_SMCCC_OWNER_SHIFT		24
+
+#define ARM_SMCCC_FUNC_MASK		0xFFFF
+
+#define ARM_SMCCC_IS_FAST_CALL(smc_val)	\
+	((smc_val) & (ARM_SMCCC_FAST_CALL << ARM_SMCCC_TYPE_SHIFT))
+#define ARM_SMCCC_IS_64(smc_val) \
+	((smc_val) & (ARM_SMCCC_SMC_64 << ARM_SMCCC_CALL_CONV_SHIFT))
+#define ARM_SMCCC_FUNC_NUM(smc_val)	((smc_val) & ARM_SMCCC_FUNC_MASK)
+#define ARM_SMCCC_OWNER_NUM(smc_val) \
+	(((smc_val) >> ARM_SMCCC_OWNER_SHIFT) & ARM_SMCCC_OWNER_MASK)
+
+#define ARM_SMCCC_CALL_VAL(type, calling_convention, owner, func_num) \
+	(((type) << ARM_SMCCC_TYPE_SHIFT) | \
+	((calling_convention) << ARM_SMCCC_CALL_CONV_SHIFT) | \
+	(((owner) & ARM_SMCCC_OWNER_MASK) << ARM_SMCCC_OWNER_SHIFT) | \
+	((func_num) & ARM_SMCCC_FUNC_MASK))
+
+#define ARM_SMCCC_OWNER_ARCH		0
+#define ARM_SMCCC_OWNER_CPU		1
+#define ARM_SMCCC_OWNER_SIP		2
+#define ARM_SMCCC_OWNER_OEM		3
+#define ARM_SMCCC_OWNER_STANDARD	4
+#define ARM_SMCCC_OWNER_TRUSTED_APP	48
+#define ARM_SMCCC_OWNER_TRUSTED_APP_END	49
+#define ARM_SMCCC_OWNER_TRUSTED_OS	50
+#define ARM_SMCCC_OWNER_TRUSTED_OS_END	63
+
+/**
+ * struct arm_smccc_res - Result from SMC/HVC call
+ * @a0-a3 result values from registers 0 to 3
+ */
+struct arm_smccc_res {
+	unsigned long a0;
+	unsigned long a1;
+	unsigned long a2;
+	unsigned long a3;
+};
+
+/**
+ * arm_smccc_smc() - make SMC calls
+ * @a0-a7: arguments passed in registers 0 to 7
+ * @res: result values from registers 0 to 3
+ *
+ * This function is used to make SMC calls following SMC Calling Convention.
+ * The content of the supplied param are copied to registers 0 to 7 prior
+ * to the SMC instruction. The return values are updated with the content
+ * from register 0 to 3 on return from the SMC instruction.
+ */
+asmlinkage void arm_smccc_smc(unsigned long a0, unsigned long a1,
+			unsigned long a2, unsigned long a3, unsigned long a4,
+			unsigned long a5, unsigned long a6, unsigned long a7,
+			struct arm_smccc_res *res);
+
+/**
+ * arm_smccc_hvc() - make HVC calls
+ * @a0-a7: arguments passed in registers 0 to 7
+ * @res: result values from registers 0 to 3
+ *
+ * This function is used to make HVC calls following SMC Calling
+ * Convention.  The content of the supplied param are copied to registers 0
+ * to 7 prior to the HVC instruction. The return values are updated with
+ * the content from register 0 to 3 on return from the HVC instruction.
+ */
+asmlinkage void arm_smccc_hvc(unsigned long a0, unsigned long a1,
+			unsigned long a2, unsigned long a3, unsigned long a4,
+			unsigned long a5, unsigned long a6, unsigned long a7,
+			struct arm_smccc_res *res);
+
+
+static inline unsigned long __invoke_psci_fn_hvc(unsigned long function_id,
+			unsigned long arg0, unsigned long arg1,
+			unsigned long arg2)
+{
+	struct arm_smccc_res res;
+
+	arm_smccc_hvc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res);
+	return res.a0;
+}
+
+static inline unsigned long __invoke_psci_fn_smc(unsigned long function_id,
+			unsigned long arg0, unsigned long arg1,
+			unsigned long arg2)
+{
+	struct arm_smccc_res res;
+
+	arm_smccc_smc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res);
+	return res.a0;
+}
+
+#endif /*__LINUX_ARM_SMCCC_H*/
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/avtimer_kernel.h	2019-01-22 16:16:28.191288859 +0100
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _AVTIMER_H
+#define _AVTIMER_H
+
+#include <uapi/linux/avtimer.h>
+
+int avcs_core_open(void);
+int avcs_core_disable_power_collapse(int disable);/* true or flase */
+int avcs_core_query_timer(uint64_t *avtimer_tick);
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/batterydata-lib.h	2019-01-22 16:16:28.191288859 +0100
@@ -0,0 +1,173 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __BMS_BATTERYDATA_H
+#define __BMS_BATTERYDATA_H
+
+#include <linux/errno.h>
+
+#define FCC_CC_COLS		5
+#define FCC_TEMP_COLS		8
+
+#define PC_CC_ROWS             31
+#define PC_CC_COLS             13
+
+#define PC_TEMP_ROWS		31
+#define PC_TEMP_COLS		8
+
+#define ACC_IBAT_ROWS		4
+#define ACC_TEMP_COLS		3
+
+#define MAX_SINGLE_LUT_COLS	20
+
+#define MAX_BATT_ID_NUM		4
+#define DEGC_SCALE		10
+
+struct single_row_lut {
+	int x[MAX_SINGLE_LUT_COLS];
+	int y[MAX_SINGLE_LUT_COLS];
+	int cols;
+};
+
+/**
+ * struct sf_lut -
+ * @rows:	number of percent charge entries should be <= PC_CC_ROWS
+ * @cols:	number of charge cycle entries should be <= PC_CC_COLS
+ * @row_entries:	the charge cycles/temperature at which sf data
+ *			is available in the table.
+ *		The charge cycles must be in increasing order from 0 to rows.
+ * @percent:	the percent charge at which sf data is available in the table
+ *		The  percentcharge must be in decreasing order from 0 to cols.
+ * @sf:		the scaling factor data
+ */
+struct sf_lut {
+	int rows;
+	int cols;
+	int row_entries[PC_CC_COLS];
+	int percent[PC_CC_ROWS];
+	int sf[PC_CC_ROWS][PC_CC_COLS];
+};
+
+/**
+ * struct pc_temp_ocv_lut -
+ * @rows:	number of percent charge entries should be <= PC_TEMP_ROWS
+ * @cols:	number of temperature entries should be <= PC_TEMP_COLS
+ * @temp:	the temperatures at which ocv data is available in the table
+ *		The temperatures must be in increasing order from 0 to rows.
+ * @percent:	the percent charge at which ocv data is available in the table
+ *		The  percentcharge must be in decreasing order from 0 to cols.
+ * @ocv:	the open circuit voltage
+ */
+struct pc_temp_ocv_lut {
+	int rows;
+	int cols;
+	int temp[PC_TEMP_COLS];
+	int percent[PC_TEMP_ROWS];
+	int ocv[PC_TEMP_ROWS][PC_TEMP_COLS];
+};
+
+struct ibat_temp_acc_lut {
+	int rows;
+	int cols;
+	int temp[ACC_TEMP_COLS];
+	int ibat[ACC_IBAT_ROWS];
+	int acc[ACC_IBAT_ROWS][ACC_TEMP_COLS];
+};
+
+struct batt_ids {
+	int kohm[MAX_BATT_ID_NUM];
+	int num;
+};
+
+enum battery_type {
+	BATT_UNKNOWN = 0,
+	BATT_PALLADIUM,
+	BATT_DESAY,
+	BATT_OEM,
+	BATT_QRD_4V35_2000MAH,
+	BATT_QRD_4V2_1300MAH,
+};
+
+/**
+ * struct bms_battery_data -
+ * @fcc:		full charge capacity (mAmpHour)
+ * @fcc_temp_lut:	table to get fcc at a given temp
+ * @pc_temp_ocv_lut:	table to get percent charge given batt temp and cycles
+ * @pc_sf_lut:		table to get percent charge scaling factor given cycles
+ *			and percent charge
+ * @rbatt_sf_lut:	table to get battery resistance scaling factor given
+ *			temperature and percent charge
+ * @default_rbatt_mohm:	the default value of battery resistance to use when
+ *			readings from bms are not available.
+ * @delta_rbatt_mohm:	the resistance to be added towards lower soc to
+ *			compensate for battery capacitance.
+ * @rbatt_capacitve_mohm: the resistance to be added to compensate for
+ *				battery capacitance
+ * @flat_ocv_threshold_uv: the voltage where the battery's discharge curve
+ *				starts flattening out.
+ * @max_voltage_uv:	max voltage of the battery
+ * @cutoff_uv:		cutoff voltage of the battery
+ * @iterm_ua:		termination current of the battery when charging
+ *			to 100%
+ * @batt_id_kohm:	the best matched battery id resistor value
+ * @fastchg_current_ma: maximum fast charge current
+ * @fg_cc_cv_threshold_mv: CC to CV threashold voltage
+ */
+
+struct bms_battery_data {
+	unsigned int		fcc;
+	struct single_row_lut	*fcc_temp_lut;
+	struct single_row_lut	*fcc_sf_lut;
+	struct pc_temp_ocv_lut	*pc_temp_ocv_lut;
+	struct ibat_temp_acc_lut *ibat_acc_lut;
+	struct sf_lut		*pc_sf_lut;
+	struct sf_lut		*rbatt_sf_lut;
+	int			default_rbatt_mohm;
+	int			delta_rbatt_mohm;
+	int			rbatt_capacitive_mohm;
+	int			flat_ocv_threshold_uv;
+	int			max_voltage_uv;
+	int			cutoff_uv;
+	int			iterm_ua;
+	int			batt_id_kohm;
+	int			fastchg_current_ma;
+	int			fg_cc_cv_threshold_mv;
+	const char		*battery_type;
+};
+
+#define is_between(left, right, value) \
+		(((left) >= (right) && (left) >= (value) \
+			&& (value) >= (right)) \
+		|| ((left) <= (right) && (left) <= (value) \
+			&& (value) <= (right)))
+
+extern struct bms_battery_data  palladium_1500_data;
+extern struct bms_battery_data  desay_5200_data;
+extern struct bms_battery_data  oem_batt_data;
+extern struct bms_battery_data QRD_4v35_2000mAh_data;
+extern struct bms_battery_data  qrd_4v2_1300mah_data;
+
+int interpolate_fcc(struct single_row_lut *fcc_temp_lut, int batt_temp);
+int interpolate_scalingfactor(struct sf_lut *sf_lut, int row_entry, int pc);
+int interpolate_scalingfactor_fcc(struct single_row_lut *fcc_sf_lut,
+				int cycles);
+int interpolate_pc(struct pc_temp_ocv_lut *pc_temp_ocv,
+				int batt_temp_degc, int ocv);
+int interpolate_ocv(struct pc_temp_ocv_lut *pc_temp_ocv,
+				int batt_temp_degc, int pc);
+int interpolate_slope(struct pc_temp_ocv_lut *pc_temp_ocv,
+					int batt_temp, int pc);
+int interpolate_acc(struct ibat_temp_acc_lut *ibat_acc_lut,
+					int batt_temp, int ibat);
+int linear_interpolate(int y0, int x0, int y1, int x1, int x);
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/bluetooth-power.h	2019-01-22 16:16:28.199288932 +0100
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_BLUETOOTH_POWER_H
+#define __LINUX_BLUETOOTH_POWER_H
+
+/*
+ * voltage regulator information required for configuring the
+ * bluetooth chipset
+ */
+struct bt_power_vreg_data {
+	/* voltage regulator handle */
+	struct regulator *reg;
+	/* regulator name */
+	const char *name;
+	/* voltage levels to be set */
+	unsigned int low_vol_level;
+	unsigned int high_vol_level;
+	/* current level to be set */
+	unsigned int load_uA;
+	/*
+	 * is set voltage supported for this regulator?
+	 * false => set voltage is not supported
+	 * true  => set voltage is supported
+	 *
+	 * Some regulators (like gpio-regulators, LVS (low voltage swtiches)
+	 * PMIC regulators) dont have the capability to call
+	 * regulator_set_voltage or regulator_set_optimum_mode
+	 * Use this variable to indicate if its a such regulator or not
+	 */
+	bool set_voltage_sup;
+	/* is this regulator enabled? */
+	bool is_enabled;
+};
+
+struct bt_power_clk_data {
+	/* clock regulator handle */
+	struct clk *clk;
+	/* clock name */
+	const char *name;
+	/* is this clock enabled? */
+	bool is_enabled;
+};
+
+/*
+ * Platform data for the bluetooth power driver.
+ */
+struct bluetooth_power_platform_data {
+	/* Bluetooth reset gpio */
+	int bt_gpio_sys_rst;
+	struct device *slim_dev;
+	/* VDDIO voltage regulator */
+	struct bt_power_vreg_data *bt_vdd_io;
+	/* VDD_PA voltage regulator */
+	struct bt_power_vreg_data *bt_vdd_pa;
+	/* VDD_LDOIN voltage regulator */
+	struct bt_power_vreg_data *bt_vdd_ldo;
+	/* VDD_XTAL voltage regulator */
+	struct bt_power_vreg_data *bt_vdd_xtal;
+	/* VDD_CORE voltage regulator */
+	struct bt_power_vreg_data *bt_vdd_core;
+	/* Optional: chip power down gpio-regulator
+	 * chip power down data is required when bluetooth module
+	 * and other modules like wifi co-exist in a single chip and
+	 * shares a common gpio to bring chip out of reset.
+	 */
+	struct bt_power_vreg_data *bt_chip_pwd;
+	/* bluetooth reference clock */
+	struct bt_power_clk_data *bt_chip_clk;
+	/* Optional: Bluetooth power setup function */
+	int (*bt_power_setup)(int);
+};
+
+int bt_register_slimdev(struct device *dev);
+
+#define BT_CMD_SLIM_TEST		0xbfac
+#define BT_CMD_PWR_CTRL			0xbfad
+#endif /* __LINUX_BLUETOOTH_POWER_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/clk/msm-clk.h	2019-01-22 16:16:28.203288968 +0100
@@ -0,0 +1,138 @@
+/* Copyright (c) 2009, 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MACH_CLK_H
+#define __MACH_CLK_H
+
+#include <linux/notifier.h>
+
+#if defined(CONFIG_COMMON_CLK_QCOM)
+enum branch_mem_flags {
+	CLKFLAG_RETAIN_PERIPH,
+	CLKFLAG_NORETAIN_PERIPH,
+	CLKFLAG_RETAIN_MEM,
+	CLKFLAG_NORETAIN_MEM,
+	CLKFLAG_PERIPH_OFF_SET,
+	CLKFLAG_PERIPH_OFF_CLEAR,
+};
+
+#include <linux/clk.h>
+
+#elif defined(CONFIG_COMMON_CLK_MSM)
+#define CLKFLAG_INVERT			0x00000001
+#define CLKFLAG_NOINVERT		0x00000002
+#define CLKFLAG_NONEST			0x00000004
+#define CLKFLAG_NORESET			0x00000008
+#define CLKFLAG_RETAIN_PERIPH		0x00000010
+#define CLKFLAG_NORETAIN_PERIPH		0x00000020
+#define CLKFLAG_RETAIN_MEM		0x00000040
+#define CLKFLAG_NORETAIN_MEM		0x00000080
+#define CLKFLAG_SKIP_HANDOFF		0x00000100
+#define CLKFLAG_MIN			0x00000400
+#define CLKFLAG_MAX			0x00000800
+#define CLKFLAG_INIT_DONE		0x00001000
+#define CLKFLAG_INIT_ERR		0x00002000
+#define CLKFLAG_NO_RATE_CACHE		0x00004000
+#define CLKFLAG_MEASURE			0x00008000
+#define CLKFLAG_EPROBE_DEFER		0x00010000
+#define CLKFLAG_PERIPH_OFF_SET		0x00020000
+#define CLKFLAG_PERIPH_OFF_CLEAR	0x00040000
+
+struct clk_lookup;
+struct clk;
+
+enum clk_reset_action {
+	CLK_RESET_DEASSERT	= 0,
+	CLK_RESET_ASSERT	= 1
+};
+
+struct clk_src {
+	struct clk *src;
+	int sel;
+};
+
+/* Rate is maximum clock rate in Hz */
+int clk_set_max_rate(struct clk *clk, unsigned long rate);
+
+/* Assert/Deassert reset to a hardware block associated with a clock */
+int clk_reset(struct clk *clk, enum clk_reset_action action);
+
+/* Set clock-specific configuration parameters */
+int clk_set_flags(struct clk *clk, unsigned long flags);
+
+/* returns the mux selection index associated with a particular parent */
+int parent_to_src_sel(struct clk_src *parents, int num_parents, struct clk *p);
+
+/* returns the mux selection index associated with a particular parent */
+int clk_get_parent_sel(struct clk *c, struct clk *parent);
+
+/**
+ * DOC: clk notifier callback types
+ *
+ * PRE_RATE_CHANGE - called immediately before the clk rate is changed,
+ *     to indicate that the rate change will proceed.  Drivers must
+ *     immediately terminate any operations that will be affected by the
+ *     rate change.  Callbacks may either return NOTIFY_DONE, NOTIFY_OK,
+ *     NOTIFY_STOP or NOTIFY_BAD.
+ *
+ * ABORT_RATE_CHANGE: called if the rate change failed for some reason
+ *     after PRE_RATE_CHANGE.  In this case, all registered notifiers on
+ *     the clk will be called with ABORT_RATE_CHANGE. Callbacks must
+ *     always return NOTIFY_DONE or NOTIFY_OK.
+ *
+ * POST_RATE_CHANGE - called after the clk rate change has successfully
+ *     completed.  Callbacks must always return NOTIFY_DONE or NOTIFY_OK.
+ *
+ */
+#define PRE_RATE_CHANGE			BIT(0)
+#define POST_RATE_CHANGE		BIT(1)
+#define ABORT_RATE_CHANGE		BIT(2)
+
+/**
+ * struct msm_clk_notifier - associate a clk with a notifier
+ * @clk: struct clk * to associate the notifier with
+ * @notifier_head: a blocking_notifier_head for this clk
+ * @node: linked list pointers
+ *
+ * A list of struct clk_notifier is maintained by the notifier code.
+ * An entry is created whenever code registers the first notifier on a
+ * particular @clk.  Future notifiers on that @clk are added to the
+ * @notifier_head.
+ */
+struct msm_clk_notifier {
+	struct clk			*clk;
+	struct srcu_notifier_head	notifier_head;
+	struct list_head		node;
+};
+
+/**
+ * struct msm_clk_notifier_data - rate data to pass to the notifier callback
+ * @clk: struct clk * being changed
+ * @old_rate: previous rate of this clk
+ * @new_rate: new rate of this clk
+ *
+ * For a pre-notifier, old_rate is the clk's rate before this rate
+ * change, and new_rate is what the rate will be in the future.  For a
+ * post-notifier, old_rate and new_rate are both set to the clk's
+ * current rate (this was done to optimize the implementation).
+ */
+struct msm_clk_notifier_data {
+	struct clk		*clk;
+	unsigned long		old_rate;
+	unsigned long		new_rate;
+};
+
+int msm_clk_notif_register(struct clk *clk, struct notifier_block *nb);
+
+int msm_clk_notif_unregister(struct clk *clk, struct notifier_block *nb);
+
+#endif /* CONFIG_COMMON_CLK_MSM */
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/clk/msm-clk-provider.h	2019-01-22 16:16:28.203288968 +0100
@@ -0,0 +1,270 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2016, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MSM_CLK_PROVIDER_H
+#define __MSM_CLK_PROVIDER_H
+
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/seq_file.h>
+#include <linux/clk/msm-clk.h>
+
+#if defined(CONFIG_COMMON_CLK_MSM)
+/*
+ * Bit manipulation macros
+ */
+#define BM(msb, lsb)	(((((uint32_t)-1) << (31-msb)) >> (31-msb+lsb)) << lsb)
+#define BVAL(msb, lsb, val)	(((val) << lsb) & BM(msb, lsb))
+
+/*
+ * Halt/Status Checking Mode Macros
+ */
+#define HALT		0	/* Bit pol: 1 = halted */
+#define NOCHECK		1	/* No bit to check, do nothing */
+#define HALT_VOTED	2	/* Bit pol: 1 = halted; delay on disable */
+#define ENABLE		3	/* Bit pol: 1 = running */
+#define ENABLE_VOTED	4	/* Bit pol: 1 = running; delay on disable */
+#define DELAY		5	/* No bit to check, just delay */
+
+struct clk_register_data {
+	char *name;
+	u32 offset;
+};
+#ifdef CONFIG_DEBUG_FS
+void clk_debug_print_hw(struct clk *clk, struct seq_file *f);
+#else
+static inline void clk_debug_print_hw(struct clk *clk, struct seq_file *f) {}
+#endif
+
+#define CLK_WARN(clk, cond, fmt, ...) do {				\
+	clk_debug_print_hw(clk, NULL);					\
+	WARN(cond, "%s: " fmt, clk_name(clk), ##__VA_ARGS__);		\
+} while (0)
+
+/**
+ * struct clk_vdd_class - Voltage scaling class
+ * @class_name: name of the class
+ * @regulator: array of regulators.
+ * @num_regulators: size of regulator array. Standard regulator APIs will be
+			used if this field > 0.
+ * @set_vdd: function to call when applying a new voltage setting.
+ * @vdd_uv: sorted 2D array of legal voltage settings. Indexed by level, then
+		regulator.
+ * @vdd_ua: sorted 2D array of legal cureent settings. Indexed by level, then
+		regulator. Optional parameter.
+ * @level_votes: array of votes for each level.
+ * @num_levels: specifies the size of level_votes array.
+ * @skip_handoff: do not vote for the max possible voltage during init
+ * @use_max_uV: use INT_MAX for max_uV when calling regulator_set_voltage
+ *           This is useful when different vdd_class share same regulator.
+ * @cur_level: the currently set voltage level
+ * @lock: lock to protect this struct
+ */
+struct clk_vdd_class {
+	const char *class_name;
+	struct regulator **regulator;
+	int num_regulators;
+	int (*set_vdd)(struct clk_vdd_class *v_class, int level);
+	int *vdd_uv;
+	int *vdd_ua;
+	int *level_votes;
+	int num_levels;
+	bool skip_handoff;
+	bool use_max_uV;
+	unsigned long cur_level;
+	struct mutex lock;
+};
+
+#define DEFINE_VDD_CLASS(_name, _set_vdd, _num_levels) \
+	struct clk_vdd_class _name = { \
+		.class_name = #_name, \
+		.set_vdd = _set_vdd, \
+		.level_votes = (int [_num_levels]) {}, \
+		.num_levels = _num_levels, \
+		.cur_level = _num_levels, \
+		.lock = __MUTEX_INITIALIZER(_name.lock) \
+	}
+
+#define DEFINE_VDD_REGULATORS(_name, _num_levels, _num_regulators, _vdd_uv, \
+	 _vdd_ua) \
+	struct clk_vdd_class _name = { \
+		.class_name = #_name, \
+		.vdd_uv = _vdd_uv, \
+		.vdd_ua = _vdd_ua, \
+		.regulator = (struct regulator * [_num_regulators]) {}, \
+		.num_regulators = _num_regulators, \
+		.level_votes = (int [_num_levels]) {}, \
+		.num_levels = _num_levels, \
+		.cur_level = _num_levels, \
+		.lock = __MUTEX_INITIALIZER(_name.lock) \
+	}
+
+#define DEFINE_VDD_REGS_INIT(_name, _num_regulators) \
+	struct clk_vdd_class _name = { \
+		.class_name = #_name, \
+		.regulator = (struct regulator * [_num_regulators]) {}, \
+		.num_regulators = _num_regulators, \
+		.lock = __MUTEX_INITIALIZER(_name.lock) \
+	}
+
+enum handoff {
+	HANDOFF_ENABLED_CLK,
+	HANDOFF_DISABLED_CLK,
+};
+
+struct clk_ops {
+	int (*prepare)(struct clk *clk);
+	int (*enable)(struct clk *clk);
+	void (*disable)(struct clk *clk);
+	void (*unprepare)(struct clk *clk);
+	void (*enable_hwcg)(struct clk *clk);
+	void (*disable_hwcg)(struct clk *clk);
+	int (*in_hwcg_mode)(struct clk *clk);
+	enum handoff (*handoff)(struct clk *clk);
+	int (*reset)(struct clk *clk, enum clk_reset_action action);
+	int (*pre_set_rate)(struct clk *clk, unsigned long new_rate);
+	int (*set_rate)(struct clk *clk, unsigned long rate);
+	void (*post_set_rate)(struct clk *clk, unsigned long old_rate);
+	int (*set_max_rate)(struct clk *clk, unsigned long rate);
+	int (*set_flags)(struct clk *clk, unsigned flags);
+	unsigned long (*get_rate)(struct clk *clk);
+	long (*list_rate)(struct clk *clk, unsigned n);
+	int (*is_enabled)(struct clk *clk);
+	long (*round_rate)(struct clk *clk, unsigned long rate);
+	int (*set_parent)(struct clk *clk, struct clk *parent);
+	struct clk *(*get_parent)(struct clk *clk);
+	bool (*is_local)(struct clk *clk);
+	void __iomem *(*list_registers)(struct clk *clk, int n,
+				struct clk_register_data **regs, u32 *size);
+};
+
+/**
+ * struct clk
+ * @prepare_count: prepare refcount
+ * @prepare_lock: protects clk_prepare()/clk_unprepare() path and @prepare_count
+ * @count: enable refcount
+ * @lock: protects clk_enable()/clk_disable() path and @count
+ * @depends: non-direct parent of clock to enable when this clock is enabled
+ * @vdd_class: voltage scaling requirement class
+ * @fmax: maximum frequency in Hz supported at each voltage level
+ * @parent: the current source of this clock
+ * @opp_table_populated: tracks if the OPP table of this clock has been filled
+ */
+struct clk {
+	uint32_t flags;
+	struct clk_ops *ops;
+	const char *dbg_name;
+	struct clk *depends;
+	struct clk_vdd_class *vdd_class;
+	unsigned long *fmax;
+	int num_fmax;
+	unsigned long rate;
+	struct clk *parent;
+	struct clk_src *parents;
+	unsigned int num_parents;
+
+	struct list_head children;
+	struct list_head siblings;
+	struct list_head list;
+
+	unsigned count;
+	unsigned notifier_count;
+	spinlock_t lock;
+	unsigned prepare_count;
+	struct mutex prepare_lock;
+
+	unsigned long init_rate;
+	bool always_on;
+	bool opp_table_populated;
+
+	struct dentry *clk_dir;
+};
+
+#define CLK_INIT(name) \
+	.lock = __SPIN_LOCK_UNLOCKED((name).lock), \
+	.prepare_lock = __MUTEX_INITIALIZER((name).prepare_lock), \
+	.children = LIST_HEAD_INIT((name).children), \
+	.siblings = LIST_HEAD_INIT((name).siblings), \
+	.list = LIST_HEAD_INIT((name).list)
+
+bool is_rate_valid(struct clk *clk, unsigned long rate);
+int vote_vdd_level(struct clk_vdd_class *vdd_class, int level);
+int unvote_vdd_level(struct clk_vdd_class *vdd_class, int level);
+int __clk_pre_reparent(struct clk *c, struct clk *new, unsigned long *flags);
+void __clk_post_reparent(struct clk *c, struct clk *old, unsigned long *flags);
+
+/* Register clocks with the MSM clock driver */
+int msm_clock_register(struct clk_lookup *table, size_t size);
+int of_msm_clock_register(struct device_node *np, struct clk_lookup *table,
+				size_t size);
+
+int clock_rcgwr_init(struct platform_device *pdev);
+int clock_rcgwr_disable(struct platform_device *pdev);
+
+extern struct clk dummy_clk;
+extern struct clk_ops clk_ops_dummy;
+
+#define CLK_DUMMY(clk_name, clk_id, clk_dev, flags) { \
+	.con_id = clk_name, \
+	.dev_id = clk_dev, \
+	.clk = &dummy_clk, \
+	}
+
+#define DEFINE_CLK_DUMMY(name, _rate) \
+	static struct fixed_clk name = { \
+		.c = { \
+			.dbg_name = #name, \
+			.rate = _rate, \
+			.ops = &clk_ops_dummy, \
+			CLK_INIT(name.c), \
+		}, \
+	};
+
+#define CLK_LOOKUP(con, c, dev) { .con_id = con, .clk = &c, .dev_id = dev }
+#define CLK_LOOKUP_OF(con, _c, dev) { .con_id = con, .clk = &(&_c)->c, \
+				      .dev_id = dev, .of_idx = clk_##_c }
+#define CLK_LIST(_c) { .clk = &(&_c)->c, .of_idx = clk_##_c }
+
+static inline bool is_better_rate(unsigned long req, unsigned long best,
+				  unsigned long new)
+{
+	if (IS_ERR_VALUE(new))
+		return false;
+
+	return (req <= new && new < best) || (best < req && best < new);
+}
+
+extern int of_clk_add_provider(struct device_node *np,
+			struct clk *(*clk_src_get)(struct of_phandle_args *args,
+						   void *data),
+			void *data);
+extern void of_clk_del_provider(struct device_node *np);
+
+static inline const char *clk_name(struct clk *c)
+{
+	if (IS_ERR_OR_NULL(c))
+		return "(null)";
+	return c->dbg_name;
+};
+#endif /* CONFIG_COMMON_CLK_MSM */
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/clk/msm-clock-generic.h	2019-10-29 09:26:25.421220578 +0100
@@ -0,0 +1,322 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CLOCK_GENERIC_H
+#define __MSM_CLOCK_GENERIC_H
+
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/of.h>
+
+/**
+ * struct fixed_clk - fixed rate clock
+ * @c: clk
+ */
+struct fixed_clk {
+	struct clk c;
+};
+
+/* ==================== Mux clock ==================== */
+
+struct mux_clk;
+
+struct clk_mux_ops {
+	int (*set_mux_sel)(struct mux_clk *clk, int sel);
+	int (*get_mux_sel)(struct mux_clk *clk);
+
+	/* Optional */
+	bool (*is_enabled)(struct mux_clk *clk);
+	int (*enable)(struct mux_clk *clk);
+	void (*disable)(struct mux_clk *clk);
+	void __iomem *(*list_registers)(struct mux_clk *clk, int n,
+				struct clk_register_data **regs, u32 *size);
+};
+
+#define MUX_SRC_LIST(...) \
+	.parents = (struct clk_src[]){__VA_ARGS__}, \
+	.num_parents = ARRAY_SIZE(((struct clk_src[]){__VA_ARGS__}))
+
+#define MUX_REC_SRC_LIST(...) \
+	.rec_parents = (struct clk * []){__VA_ARGS__}, \
+	.num_rec_parents = ARRAY_SIZE(((struct clk * []){__VA_ARGS__}))
+
+struct mux_clk {
+	/* Parents in decreasing order of preference for obtaining rates. */
+	struct clk_src	*parents;
+	int		num_parents;
+	/* Recursively search for the requested parent in rec_parents. */
+	struct clk	**rec_parents;
+	int		num_rec_parents;
+	struct clk	*safe_parent;
+	int		safe_sel;
+	unsigned long	safe_freq;
+	/*
+	 * Before attempting a clk_round_rate on available sources, attempt a
+	 * clk_get_rate on all those sources. If one of them is already at the
+	 * necessary rate, that source will be used.
+	 */
+	bool		try_get_rate;
+	struct clk_mux_ops *ops;
+	/*
+	 * Set if you need the mux to try a new parent before falling back to
+	 * the current parent. If the safe_parent field above is set, then the
+	 * safe_sel intermediate source will only be used if we fall back to
+	 * to the current parent during mux_set_rate.
+	 */
+	bool		try_new_parent;
+
+	/* Fields not used by helper function. */
+	void *const __iomem *base;
+	u32		offset;
+	u32		en_offset;
+	u32		mask;
+	u32		shift;
+	u32		en_mask;
+	/*
+	 * Set post divider for debug mux in order to divide the clock
+	 * by post_div + 1.
+	 */
+	u32		post_div;
+	int		low_power_sel;
+	void		*priv;
+
+	struct clk	c;
+};
+
+static inline struct mux_clk *to_mux_clk(struct clk *c)
+{
+	return container_of(c, struct mux_clk, c);
+}
+
+extern struct clk_ops clk_ops_gen_mux;
+
+/* ==================== Divider clock ==================== */
+
+struct div_clk;
+
+struct clk_div_ops {
+	int (*set_div)(struct div_clk *clk, int div);
+	int (*get_div)(struct div_clk *clk);
+	bool (*is_enabled)(struct div_clk *clk);
+	int (*enable)(struct div_clk *clk);
+	void (*disable)(struct div_clk *clk);
+	void __iomem *(*list_registers)(struct div_clk *clk, int n,
+				struct clk_register_data **regs, u32 *size);
+};
+
+struct div_data {
+	unsigned int div;
+	unsigned int min_div;
+	unsigned int max_div;
+	unsigned long rate_margin;
+	/*
+	 * Indicate whether this divider clock supports half-interger divider.
+	 * If it is, all the min_div and max_div have been doubled. It means
+	 * they are 2*N.
+	 */
+	bool is_half_divider;
+	/*
+	 * Skip odd dividers since the hardware may not support them.
+	 */
+	bool skip_odd_div;
+	bool skip_even_div;
+	bool allow_div_one;
+	unsigned int cached_div;
+};
+
+struct div_clk {
+	struct div_data data;
+
+	/*
+	 * Some implementations may require the divider to be set to a "safe"
+	 * value that allows reprogramming of upstream clocks without violating
+	 * voltage constraints.
+	 */
+	unsigned long safe_freq;
+
+	/* Optional */
+	struct clk_div_ops *ops;
+
+	/* Fields not used by helper function. */
+	void *const __iomem *base;
+	u32		offset;
+	u32		mask;
+	u32		shift;
+	u32		en_mask;
+	void		*priv;
+	struct clk	c;
+};
+
+static inline struct div_clk *to_div_clk(struct clk *c)
+{
+	return container_of(c, struct div_clk, c);
+}
+
+extern struct clk_ops clk_ops_div;
+extern struct clk_ops clk_ops_slave_div;
+
+struct ext_clk {
+	struct clk c;
+	struct device *dev;
+	char *clk_id;
+};
+
+long parent_round_rate(struct clk *c, unsigned long rate);
+unsigned long parent_get_rate(struct clk *c);
+int parent_set_rate(struct clk *c, unsigned long rate);
+
+static inline struct ext_clk *to_ext_clk(struct clk *c)
+{
+	return container_of(c, struct ext_clk, c);
+}
+
+extern struct clk_ops clk_ops_ext;
+
+#define DEFINE_FIXED_DIV_CLK(clk_name, _div, _parent) \
+static struct div_clk clk_name = {	\
+	.data = {				\
+		.max_div = _div,		\
+		.min_div = _div,		\
+		.div = _div,			\
+	},					\
+	.c = {					\
+		.parent = _parent,		\
+		.dbg_name = #clk_name,		\
+		.ops = &clk_ops_div,		\
+		CLK_INIT(clk_name.c),		\
+	}					\
+}
+
+#define DEFINE_FIXED_SLAVE_DIV_CLK(clk_name, _div, _parent) \
+static struct div_clk clk_name = {	\
+	.data = {				\
+		.max_div = _div,		\
+		.min_div = _div,		\
+		.div = _div,			\
+	},					\
+	.c = {					\
+		.parent = _parent,		\
+		.dbg_name = #clk_name,		\
+		.ops = &clk_ops_slave_div,		\
+		CLK_INIT(clk_name.c),		\
+	}					\
+}
+
+#define DEFINE_EXT_CLK(clk_name, _parent) \
+static struct ext_clk clk_name = {		\
+	.c = {					\
+		.parent = _parent,		\
+		.dbg_name = #clk_name,		\
+		.ops = &clk_ops_ext,		\
+		CLK_INIT(clk_name.c),		\
+	}					\
+}
+
+/* ==================== Mux Div clock ==================== */
+
+struct mux_div_clk;
+
+/*
+ * struct mux_div_ops
+ * the enable and disable ops are optional.
+ */
+
+struct mux_div_ops {
+	int (*set_src_div)(struct mux_div_clk *, u32 src_sel, u32 div);
+	void (*get_src_div)(struct mux_div_clk *, u32 *src_sel, u32 *div);
+	int (*enable)(struct mux_div_clk *);
+	void (*disable)(struct mux_div_clk *);
+	bool (*is_enabled)(struct mux_div_clk *);
+	void __iomem *(*list_registers)(struct mux_div_clk *md, int n,
+				struct clk_register_data **regs, u32 *size);
+};
+
+/*
+ * struct mux_div_clk - combined mux/divider clock
+ * @priv
+		parameters needed by ops
+ * @safe_freq
+		when switching rates from A to B, the mux div clock will
+		instead switch from A -> safe_freq -> B. This allows the
+		mux_div clock to change rates while enabled, even if this
+		behavior is not supported by the parent clocks.
+
+		If changing the rate of parent A also causes the rate of
+		parent B to change, then safe_freq must be defined.
+
+		safe_freq is expected to have a source clock which is always
+		on and runs at only one rate.
+ * @parents
+		list of parents and mux indicies
+ * @ops
+		function pointers for hw specific operations
+ * @src_sel
+		the mux index which will be used if the clock is enabled.
+ * @try_get_rate
+		Set if you need the mux to directly jump to a source
+		that is at the desired rate currently.
+ * @force_enable_md
+		Set if the mux-div needs to be force enabled/disabled during
+		clk_enable/disable.
+ */
+
+struct mux_div_clk {
+	/* Required parameters */
+	struct mux_div_ops		*ops;
+	struct div_data			data;
+	struct clk_src			*parents;
+	u32				num_parents;
+
+	struct clk			c;
+
+	/* Internal */
+	u32				src_sel;
+
+	/* Optional parameters */
+	void				*priv;
+	void __iomem			*base;
+	u32				div_mask;
+	u32				div_offset;
+	u32				div_shift;
+	u32				src_mask;
+	u32				src_offset;
+	u32				src_shift;
+	u32				en_mask;
+	u32				en_offset;
+
+	u32				safe_div;
+	struct clk			*safe_parent;
+	unsigned long			safe_freq;
+	bool				try_get_rate;
+	bool				force_enable_md;
+};
+
+static inline struct mux_div_clk *to_mux_div_clk(struct clk *clk)
+{
+	return container_of(clk, struct mux_div_clk, c);
+}
+
+extern struct clk_ops clk_ops_mux_div_clk;
+
+/* ==================== Virtual clock ==================== */
+struct virtclk_front {
+	int id;
+	struct clk c;
+};
+
+extern struct clk_ops virtclk_front_ops;
+
+int msm_virtclk_front_probe(struct platform_device *pdev,
+		struct clk_lookup *table,
+		size_t size);
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/coresight-cti.h	2019-01-22 16:16:28.207289004 +0100
@@ -0,0 +1,93 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_CORESIGHT_CTI_H
+#define _LINUX_CORESIGHT_CTI_H
+
+#include <linux/list.h>
+
+struct coresight_cti_data {
+	int nr_ctis;
+	const char **names;
+};
+
+struct coresight_cti {
+	const char *name;
+	struct list_head link;
+};
+
+#ifdef CONFIG_CORESIGHT_CTI
+extern struct coresight_cti *coresight_cti_get(const char *name);
+extern void coresight_cti_put(struct coresight_cti *cti);
+extern int coresight_cti_map_trigin(
+			struct coresight_cti *cti, int trig, int ch);
+extern int coresight_cti_map_trigout(
+			struct coresight_cti *cti, int trig, int ch);
+extern void coresight_cti_unmap_trigin(
+			struct coresight_cti *cti, int trig, int ch);
+extern void coresight_cti_unmap_trigout(
+			struct coresight_cti *cti, int trig, int ch);
+extern void coresight_cti_reset(struct coresight_cti *cti);
+extern int coresight_cti_set_trig(struct coresight_cti *cti, int ch);
+extern void coresight_cti_clear_trig(struct coresight_cti *cti, int ch);
+extern int coresight_cti_pulse_trig(struct coresight_cti *cti, int ch);
+extern int coresight_cti_enable_gate(struct coresight_cti *cti, int ch);
+extern void coresight_cti_disable_gate(struct coresight_cti *cti, int ch);
+extern void coresight_cti_ctx_save(void);
+extern void coresight_cti_ctx_restore(void);
+extern int coresight_cti_ack_trig(struct coresight_cti *cti, int trig);
+#else
+static inline struct coresight_cti *coresight_cti_get(const char *name)
+{
+	return NULL;
+}
+static inline void coresight_cti_put(struct coresight_cti *cti) {}
+static inline int coresight_cti_map_trigin(
+			struct coresight_cti *cti, int trig, int ch)
+{
+	return -ENOSYS;
+}
+static inline int coresight_cti_map_trigout(
+			struct coresight_cti *cti, int trig, int ch)
+{
+	return -ENOSYS;
+}
+static inline void coresight_cti_unmap_trigin(
+			struct coresight_cti *cti, int trig, int ch) {}
+static inline void coresight_cti_unmap_trigout(
+			struct coresight_cti *cti, int trig, int ch) {}
+static inline void coresight_cti_reset(struct coresight_cti *cti) {}
+static inline int coresight_cti_set_trig(struct coresight_cti *cti, int ch)
+{
+	return -ENOSYS;
+}
+static inline void coresight_cti_clear_trig(struct coresight_cti *cti, int ch)
+{}
+static inline int coresight_cti_pulse_trig(struct coresight_cti *cti, int ch)
+{
+	return -ENOSYS;
+}
+static inline int coresight_cti_enable_gate(struct coresight_cti *cti, int ch)
+{
+	return -ENOSYS;
+}
+static inline void coresight_cti_disable_gate(struct coresight_cti *cti, int ch)
+{}
+static inline void coresight_cti_ctx_save(void){}
+static inline void coresight_cti_ctx_restore(void){}
+static inline int coresight_cti_ack_trig(struct coresight_cti *cti, int trig)
+{
+	return -ENOSYS;
+}
+#endif
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/coresight-stm.h	2019-01-22 16:16:28.207289004 +0100
@@ -0,0 +1,35 @@
+#ifndef __LINUX_CORESIGHT_STM_H_
+#define __LINUX_CORESIGHT_STM_H_
+
+#include <uapi/linux/coresight-stm.h>
+
+#define stm_log_inv(entity_id, proto_id, data, size)			\
+	stm_trace(STM_OPTION_NONE, entity_id, proto_id, data, size)
+
+#define stm_log_inv_ts(entity_id, proto_id, data, size)			\
+	stm_trace(STM_OPTION_TIMESTAMPED, entity_id, proto_id,		\
+		  data, size)
+
+#define stm_log_gtd(entity_id, proto_id, data, size)			\
+	stm_trace(STM_OPTION_GUARANTEED, entity_id, proto_id,		\
+		  data, size)
+
+#define stm_log_gtd_ts(entity_id, proto_id, data, size)			\
+	stm_trace(STM_OPTION_GUARANTEED | STM_OPTION_TIMESTAMPED,	\
+		  entity_id, proto_id, data, size)
+
+#define stm_log(entity_id, data, size)					\
+	stm_log_inv_ts(entity_id, 0, data, size)
+
+#ifdef CONFIG_CORESIGHT_STM
+extern int stm_trace(uint32_t options, uint8_t entity_id, uint8_t proto_id,
+		     const void *data, uint32_t size);
+#else
+static inline int stm_trace(uint32_t options, uint8_t entity_id,
+			    uint8_t proto_id, const void *data, uint32_t size)
+{
+	return 0;
+}
+#endif
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/diagchar.h	2019-10-29 09:26:25.429220656 +0100
@@ -0,0 +1,901 @@
+/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGCHAR_SHARED
+#define DIAGCHAR_SHARED
+
+#define MSG_MASKS_TYPE		0x00000001
+#define LOG_MASKS_TYPE		0x00000002
+#define EVENT_MASKS_TYPE	0x00000004
+#define PKT_TYPE		0x00000008
+#define DEINIT_TYPE		0x00000010
+#define USER_SPACE_DATA_TYPE	0x00000020
+#define DCI_DATA_TYPE		0x00000040
+#define USER_SPACE_RAW_DATA_TYPE	0x00000080
+#define DCI_LOG_MASKS_TYPE	0x00000100
+#define DCI_EVENT_MASKS_TYPE	0x00000200
+#define DCI_PKT_TYPE		0x00000400
+#define HDLC_SUPPORT_TYPE	0x00001000
+
+#define USB_MODE			1
+#define MEMORY_DEVICE_MODE		2
+#define NO_LOGGING_MODE			3
+#define UART_MODE			4
+#define SOCKET_MODE			5
+#define CALLBACK_MODE			6
+
+/* different values that go in for diag_data_type */
+
+#define DATA_TYPE_EVENT         	0
+#define DATA_TYPE_F3            	1
+#define DATA_TYPE_LOG           	2
+#define DATA_TYPE_RESPONSE      	3
+#define DATA_TYPE_DELAYED_RESPONSE	4
+#define DATA_TYPE_DCI_LOG		0x00000100
+#define DATA_TYPE_DCI_EVENT		0x00000200
+
+/* Different IOCTL values */
+#define DIAG_IOCTL_COMMAND_REG  	0
+#define DIAG_IOCTL_COMMAND_DEREG	1
+#define DIAG_IOCTL_SWITCH_LOGGING	7
+#define DIAG_IOCTL_GET_DELAYED_RSP_ID 	8
+#define DIAG_IOCTL_LSM_DEINIT		9
+#define DIAG_IOCTL_DCI_INIT		20
+#define DIAG_IOCTL_DCI_DEINIT		21
+#define DIAG_IOCTL_DCI_SUPPORT		22
+#define DIAG_IOCTL_DCI_REG		23
+#define DIAG_IOCTL_DCI_STREAM_INIT	24
+#define DIAG_IOCTL_DCI_HEALTH_STATS	25
+#define DIAG_IOCTL_DCI_LOG_STATUS	26
+#define DIAG_IOCTL_DCI_EVENT_STATUS	27
+#define DIAG_IOCTL_DCI_CLEAR_LOGS	28
+#define DIAG_IOCTL_DCI_CLEAR_EVENTS	29
+#define DIAG_IOCTL_REMOTE_DEV		32
+#define DIAG_IOCTL_VOTE_REAL_TIME	33
+#define DIAG_IOCTL_GET_REAL_TIME	34
+#define DIAG_IOCTL_PERIPHERAL_BUF_CONFIG	35
+#define DIAG_IOCTL_PERIPHERAL_BUF_DRAIN		36
+#define DIAG_IOCTL_REGISTER_CALLBACK	37
+#define DIAG_IOCTL_HDLC_TOGGLE	38
+#define DIAG_IOCTL_QUERY_PD_LOGGING	39
+
+/* PC Tools IDs */
+#define APQ8060_TOOLS_ID	4062
+#define AO8960_TOOLS_ID		4064
+#define APQ8064_TOOLS_ID	4072
+#define MSM8625_TOOLS_ID	4075
+#define MSM8930_TOOLS_ID	4076
+#define MSM8630_TOOLS_ID	4077
+#define MSM8230_TOOLS_ID	4078
+#define APQ8030_TOOLS_ID	4079
+#define MSM8627_TOOLS_ID	4080
+#define MSM8227_TOOLS_ID	4081
+#define MSM8974_TOOLS_ID	4083
+#define APQ8074_TOOLS_ID	4090
+#define MSM8916_TOOLS_ID	4094
+#define APQ8084_TOOLS_ID	4095
+#define MSM8994_TOOLS_ID	4097
+#define MSM8939_TOOLS_ID	4103
+#define APQ8026_TOOLS_ID	4104
+#define MSM8909_TOOLS_ID	4108
+#define MSM8992_TOOLS_ID	4111
+#define MSM8952_TOOLS_ID	4110
+#define MSM_8996_TOOLS_ID	4112
+
+#define MSG_MASK_0			(0x00000001)
+#define MSG_MASK_1			(0x00000002)
+#define MSG_MASK_2			(0x00000004)
+#define MSG_MASK_3			(0x00000008)
+#define MSG_MASK_4			(0x00000010)
+#define MSG_MASK_5			(0x00000020)
+#define MSG_MASK_6			(0x00000040)
+#define MSG_MASK_7			(0x00000080)
+#define MSG_MASK_8			(0x00000100)
+#define MSG_MASK_9			(0x00000200)
+#define MSG_MASK_10			(0x00000400)
+#define MSG_MASK_11			(0x00000800)
+#define MSG_MASK_12			(0x00001000)
+#define MSG_MASK_13			(0x00002000)
+#define MSG_MASK_14			(0x00004000)
+#define MSG_MASK_15			(0x00008000)
+#define MSG_MASK_16			(0x00010000)
+#define MSG_MASK_17			(0x00020000)
+#define MSG_MASK_18			(0x00040000)
+#define MSG_MASK_19			(0x00080000)
+#define MSG_MASK_20			(0x00100000)
+#define MSG_MASK_21			(0x00200000)
+#define MSG_MASK_22			(0x00400000)
+#define MSG_MASK_23			(0x00800000)
+#define MSG_MASK_24			(0x01000000)
+#define MSG_MASK_25			(0x02000000)
+#define MSG_MASK_26			(0x04000000)
+#define MSG_MASK_27			(0x08000000)
+#define MSG_MASK_28			(0x10000000)
+#define MSG_MASK_29			(0x20000000)
+#define MSG_MASK_30			(0x40000000)
+#define MSG_MASK_31			(0x80000000)
+
+/*  These masks are to be used for support of all legacy messages in the sw.
+The user does not need to remember the names as they will be embedded in
+the appropriate macros. */
+#define MSG_LEGACY_LOW			MSG_MASK_0
+#define MSG_LEGACY_MED			MSG_MASK_1
+#define MSG_LEGACY_HIGH			MSG_MASK_2
+#define MSG_LEGACY_ERROR		MSG_MASK_3
+#define MSG_LEGACY_FATAL		MSG_MASK_4
+
+/* Legacy Message Priorities */
+#define MSG_LVL_FATAL			(MSG_LEGACY_FATAL)
+#define MSG_LVL_ERROR			(MSG_LEGACY_ERROR | MSG_LVL_FATAL)
+#define MSG_LVL_HIGH			(MSG_LEGACY_HIGH | MSG_LVL_ERROR)
+#define MSG_LVL_MED			(MSG_LEGACY_MED | MSG_LVL_HIGH)
+#define MSG_LVL_LOW			(MSG_LEGACY_LOW | MSG_LVL_MED)
+
+#define MSG_LVL_NONE			0
+
+/* This needs to be modified manually now, when we add
+ a new RANGE of SSIDs to the msg_mask_tbl */
+#define MSG_MASK_TBL_CNT		26
+#define APPS_EVENT_LAST_ID		0x0B3F
+
+#define MSG_SSID_0			0
+#define MSG_SSID_0_LAST			121
+#define MSG_SSID_1			500
+#define MSG_SSID_1_LAST			506
+#define MSG_SSID_2			1000
+#define MSG_SSID_2_LAST			1007
+#define MSG_SSID_3			2000
+#define MSG_SSID_3_LAST			2008
+#define MSG_SSID_4			3000
+#define MSG_SSID_4_LAST			3014
+#define MSG_SSID_5			4000
+#define MSG_SSID_5_LAST			4010
+#define MSG_SSID_6			4500
+#define MSG_SSID_6_LAST			4583
+#define MSG_SSID_7			4600
+#define MSG_SSID_7_LAST			4615
+#define MSG_SSID_8			5000
+#define MSG_SSID_8_LAST			5033
+#define MSG_SSID_9			5500
+#define MSG_SSID_9_LAST			5516
+#define MSG_SSID_10			6000
+#define MSG_SSID_10_LAST		6081
+#define MSG_SSID_11			6500
+#define MSG_SSID_11_LAST		6521
+#define MSG_SSID_12			7000
+#define MSG_SSID_12_LAST		7003
+#define MSG_SSID_13			7100
+#define MSG_SSID_13_LAST		7111
+#define MSG_SSID_14			7200
+#define MSG_SSID_14_LAST		7201
+#define MSG_SSID_15			8000
+#define MSG_SSID_15_LAST		8000
+#define MSG_SSID_16			8500
+#define MSG_SSID_16_LAST		8529
+#define MSG_SSID_17			9000
+#define MSG_SSID_17_LAST		9008
+#define MSG_SSID_18			9500
+#define MSG_SSID_18_LAST		9521
+#define MSG_SSID_19			10200
+#define MSG_SSID_19_LAST		10210
+#define MSG_SSID_20			10251
+#define MSG_SSID_20_LAST		10255
+#define MSG_SSID_21			10300
+#define MSG_SSID_21_LAST		10300
+#define MSG_SSID_22			10350
+#define MSG_SSID_22_LAST		10377
+#define MSG_SSID_23			10400
+#define MSG_SSID_23_LAST		10416
+#define MSG_SSID_24			10500
+#define MSG_SSID_24_LAST		10505
+#define MSG_SSID_25			0xC000
+#define MSG_SSID_25_LAST		0xC063
+
+static const uint32_t msg_bld_masks_0[] = {
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_ERROR,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_HIGH,
+	MSG_LVL_ERROR,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_ERROR,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW|MSG_MASK_5|MSG_MASK_6|MSG_MASK_7|MSG_MASK_8,
+	MSG_LVL_LOW,
+	MSG_LVL_ERROR,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED|MSG_MASK_7 | \
+		MSG_MASK_8|MSG_MASK_9|MSG_MASK_10|MSG_MASK_11|MSG_MASK_12 | \
+		MSG_MASK_13|MSG_MASK_14|MSG_MASK_15|MSG_MASK_16 | \
+		MSG_MASK_17|MSG_MASK_18|MSG_MASK_19|MSG_MASK_20|MSG_MASK_21,
+	MSG_LVL_MED|MSG_MASK_5 | \
+		MSG_MASK_6|MSG_MASK_7|MSG_MASK_8|MSG_MASK_9|MSG_MASK_10| \
+		MSG_MASK_11|MSG_MASK_12|MSG_MASK_13|MSG_MASK_14| \
+		MSG_MASK_15|MSG_MASK_16|MSG_MASK_17,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED|MSG_MASK_5 | \
+		MSG_MASK_6|MSG_MASK_7|MSG_MASK_8,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_MED,
+	MSG_LVL_MED|MSG_MASK_5 | \
+		MSG_MASK_6|MSG_MASK_7|MSG_MASK_8|MSG_MASK_9|MSG_MASK_10| \
+		MSG_MASK_11|MSG_MASK_12|MSG_MASK_13|MSG_MASK_14|MSG_MASK_15| \
+		MSG_MASK_16|MSG_MASK_17|MSG_MASK_18|MSG_MASK_19|MSG_MASK_20| \
+		MSG_MASK_21|MSG_MASK_22|MSG_MASK_23|MSG_MASK_24|MSG_MASK_25,
+	MSG_LVL_MED|MSG_MASK_5 | \
+		MSG_MASK_6|MSG_MASK_7|MSG_MASK_8|MSG_MASK_9|MSG_MASK_10,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW | MSG_MASK_5 | \
+		MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8,
+	MSG_LVL_LOW | MSG_MASK_5 | \
+		MSG_MASK_6,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_MED | MSG_MASK_5 | \
+		MSG_MASK_6|MSG_MASK_7|MSG_MASK_8|MSG_MASK_9|MSG_MASK_10| \
+		MSG_MASK_11|MSG_MASK_12|MSG_MASK_13|MSG_MASK_14|MSG_MASK_15 | \
+		MSG_MASK_16|MSG_MASK_17|MSG_MASK_18|MSG_MASK_19|MSG_MASK_20,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_HIGH | MSG_MASK_21,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_MED,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR,
+	MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR,
+	MSG_LVL_MED|MSG_LVL_HIGH,
+	MSG_LVL_MED|MSG_LVL_HIGH,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_MED,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_HIGH
+};
+
+static const uint32_t msg_bld_masks_1[] = {
+	MSG_LVL_MED,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH
+};
+
+static const uint32_t msg_bld_masks_2[] = {
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED,
+	MSG_LVL_MED
+};
+
+static const uint32_t msg_bld_masks_3[] = {
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED|MSG_MASK_5|MSG_MASK_6|MSG_MASK_7|
+			MSG_MASK_8|MSG_MASK_9|MSG_MASK_10,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED
+};
+
+static const uint32_t msg_bld_masks_4[] = {
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW
+};
+
+static const uint32_t msg_bld_masks_5[] = {
+	MSG_LVL_HIGH,
+	MSG_LVL_MED,
+	MSG_LVL_HIGH,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED|MSG_LVL_MED|MSG_MASK_5|MSG_MASK_6|MSG_MASK_7| \
+		MSG_MASK_8|MSG_MASK_9,
+	MSG_LVL_MED
+};
+
+static const uint32_t msg_bld_masks_6[] = {
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW
+};
+
+static const uint32_t msg_bld_masks_7[] = {
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL
+};
+
+static const uint32_t msg_bld_masks_8[] = {
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_HIGH
+};
+
+static const uint32_t msg_bld_masks_9[] = {
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5
+};
+
+static const uint32_t msg_bld_masks_10[] =  {
+	MSG_LVL_MED,
+	MSG_LVL_ERROR,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW|MSG_MASK_5 | \
+		MSG_MASK_6|MSG_MASK_7|MSG_MASK_8|MSG_MASK_9|MSG_MASK_10| \
+		MSG_MASK_11|MSG_MASK_12|MSG_MASK_13|MSG_MASK_14|MSG_MASK_15| \
+		MSG_MASK_16|MSG_MASK_17|MSG_MASK_18|MSG_MASK_19|MSG_MASK_20| \
+		MSG_MASK_21|MSG_MASK_22,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_0 | MSG_MASK_1 | MSG_MASK_2 | \
+		MSG_MASK_3 | MSG_MASK_4 | MSG_MASK_5 | MSG_MASK_6,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_MED
+};
+
+static const uint32_t msg_bld_masks_11[] = {
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+};
+
+static const uint32_t msg_bld_masks_12[] = {
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+};
+
+static const uint32_t msg_bld_masks_13[] = {
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+};
+
+static const uint32_t msg_bld_masks_14[] = {
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+};
+
+static const uint32_t msg_bld_masks_15[] = {
+	MSG_LVL_MED
+};
+
+static const uint32_t msg_bld_masks_16[] = {
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL
+};
+
+static const uint32_t msg_bld_masks_17[] =  {
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED | MSG_MASK_6 | \
+		MSG_MASK_7 | MSG_MASK_8 | MSG_MASK_9,
+	MSG_LVL_MED | MSG_MASK_5 | \
+		MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 | MSG_MASK_9 | \
+		MSG_MASK_10 | MSG_MASK_11 | MSG_MASK_12 | MSG_MASK_13 | \
+		MSG_MASK_14 | MSG_MASK_15 | MSG_MASK_16 | MSG_MASK_17,
+	MSG_LVL_MED,
+	MSG_LVL_MED | MSG_MASK_5 | \
+		MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 | MSG_MASK_9 | \
+		MSG_MASK_10 | MSG_MASK_11 | MSG_MASK_12 | MSG_MASK_13 | \
+		MSG_MASK_14 | MSG_MASK_15 | MSG_MASK_16 | MSG_MASK_17 | \
+		MSG_MASK_18 | MSG_MASK_19 | MSG_MASK_20 | MSG_MASK_21 | \
+		MSG_MASK_22,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+};
+
+static const uint32_t msg_bld_masks_18[] = {
+	MSG_LVL_LOW,
+	MSG_LVL_LOW | MSG_MASK_8 | MSG_MASK_9 | MSG_MASK_10 | \
+		MSG_MASK_11|MSG_MASK_12|MSG_MASK_13|MSG_MASK_14|MSG_MASK_15 | \
+		MSG_MASK_16|MSG_MASK_17|MSG_MASK_18|MSG_MASK_19|MSG_MASK_20,
+	MSG_LVL_LOW | MSG_MASK_5 | MSG_MASK_6,
+	MSG_LVL_LOW | MSG_MASK_5,
+	MSG_LVL_LOW | MSG_MASK_5 | MSG_MASK_6,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW | MSG_MASK_5 | \
+		MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 | MSG_MASK_9,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW
+};
+
+static const uint32_t msg_bld_masks_19[] = {
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW
+};
+
+static const uint32_t msg_bld_masks_20[] = {
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW
+};
+
+static const uint32_t msg_bld_masks_21[] = {
+	MSG_LVL_HIGH
+};
+
+static const uint32_t msg_bld_masks_22[] = {
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW
+};
+
+static const uint32_t msg_bld_masks_23[] = {
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW
+};
+
+static const uint32_t msg_bld_masks_24[] = {
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH
+};
+
+static const uint32_t msg_bld_masks_25[] = {
+	MSG_LVL_LOW
+};
+
+/* LOG CODES */
+static const uint32_t log_code_last_tbl[] = {
+	0x0,	/* EQUIP ID 0 */
+	0x1A11,	/* EQUIP ID 1 */
+	0x0,	/* EQUIP ID 2 */
+	0x0,	/* EQUIP ID 3 */
+	0x4910,	/* EQUIP ID 4 */
+	0x5420,	/* EQUIP ID 5 */
+	0x0,	/* EQUIP ID 6 */
+	0x74FF,	/* EQUIP ID 7 */
+	0x0,	/* EQUIP ID 8 */
+	0x0,	/* EQUIP ID 9 */
+	0xA38A,	/* EQUIP ID 10 */
+	0xB201,	/* EQUIP ID 11 */
+	0x0,	/* EQUIP ID 12 */
+	0xD1FF,	/* EQUIP ID 13 */
+	0x0,	/* EQUIP ID 14 */
+	0x0,	/* EQUIP ID 15 */
+};
+
+#define LOG_GET_ITEM_NUM(xx_code)	(xx_code & 0x0FFF)
+#define LOG_GET_EQUIP_ID(xx_code)	((xx_code & 0xF000) >> 12)
+#define LOG_ITEMS_TO_SIZE(num_items)	((num_items+7)/8)
+#define LOG_SIZE_TO_ITEMS(size)		((8*size) - 7)
+#define EVENT_COUNT_TO_BYTES(count)	((count/8) + 1)
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/dma-mapping-fast.h	2019-01-22 16:16:28.219289113 +0100
@@ -0,0 +1,61 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_DMA_MAPPING_FAST_H
+#define __LINUX_DMA_MAPPING_FAST_H
+
+#include <linux/iommu.h>
+#include <linux/io-pgtable-fast.h>
+
+struct dma_iommu_mapping;
+
+struct dma_fast_smmu_mapping {
+	struct device		*dev;
+	struct iommu_domain	*domain;
+	dma_addr_t	 base;
+	size_t		 size;
+	size_t		 num_4k_pages;
+
+	unsigned int	bitmap_size;
+	unsigned long	*bitmap;
+	unsigned long	next_start;
+	unsigned long	upcoming_stale_bit;
+	bool		have_stale_tlbs;
+
+	dma_addr_t	pgtbl_dma_handle;
+	av8l_fast_iopte	*pgtbl_pmds;
+
+	spinlock_t	lock;
+	struct notifier_block notifier;
+
+	int		is_smmu_pt_coherent;
+};
+
+#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST
+int fast_smmu_attach_device(struct device *dev,
+			    struct dma_iommu_mapping *mapping);
+void fast_smmu_detach_device(struct device *dev,
+			     struct dma_iommu_mapping *mapping);
+#else
+static inline int fast_smmu_attach_device(struct device *dev,
+					  struct dma_iommu_mapping *mapping)
+{
+	return -ENODEV;
+}
+
+static inline void fast_smmu_detach_device(struct device *dev,
+					   struct dma_iommu_mapping *mapping)
+{
+}
+#endif
+
+#endif /* __LINUX_DMA_MAPPING_FAST_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/fbxgpio_core.h	2019-01-22 16:16:28.227289185 +0100
@@ -0,0 +1,45 @@
+/*
+ * fbxgpio.h for linux-freebox
+ * Created by <nschichan@freebox.fr> on Wed Feb 21 22:09:46 2007
+ * Freebox SA
+ */
+
+#ifndef FBXGPIO_H
+# define FBXGPIO_H
+
+# include <linux/types.h>
+
+/* can change pin direction */
+#define FBXGPIO_PIN_DIR_RW	(1 << 0)
+#define FBXGPIO_PIN_REVERSE_POL	(1 << 1)
+
+struct fbxgpio_operations {
+	int  (*get_datain)(int gpio);
+	void (*set_dataout)(int gpio, int val);
+	int  (*get_dataout)(int gpio);
+	int (*set_direction)(int gpio, int dir);
+	int  (*get_direction)(int gpio);
+};
+
+
+struct fbxgpio_pin {
+	const struct fbxgpio_operations	*ops;
+	const char			*pin_name;
+	uint32_t			flags;
+	int				direction;
+	int				pin_num;
+	bool				claimed;
+	unsigned int			cur_dataout;
+	struct device			*dev;
+	struct device_node		*of_node;
+};
+
+
+#define GPIO_DIR_IN	0x1
+#define GPIO_DIR_OUT	0x0
+
+struct fbxgpio_pin *fbxgpio_of_get(struct device_node *np,
+				   const char *propname,
+				   int index);
+
+#endif /* !FBXGPIO_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/fbxprocfs.h	2019-01-22 16:16:28.227289185 +0100
@@ -0,0 +1,40 @@
+#ifndef FBXPROCFS_H_
+#define FBXPROCFS_H_
+
+#include <linux/proc_fs.h>
+#include <asm/atomic.h>
+#include <linux/seq_file.h>
+
+struct fbxprocfs_client
+{
+	const char *dirname;
+	struct module *owner;
+	struct proc_dir_entry *dir;
+	atomic_t refcount;
+	struct list_head list;
+};
+
+struct fbxprocfs_desc {
+	char		*name;
+	unsigned long	id;
+	int	(*rfunc)(struct seq_file *, void *);
+	int	(*wfunc)(struct file *, const char *, unsigned long, void *);
+};
+
+struct fbxprocfs_client *fbxprocfs_add_client(const char *dirname,
+					      struct module *owner);
+
+int fbxprocfs_remove_client(struct fbxprocfs_client *client);
+
+
+int
+fbxprocfs_create_entries(struct fbxprocfs_client *client,
+			 const struct fbxprocfs_desc *ro_desc,
+			 const struct fbxprocfs_desc *rw_desc);
+
+int
+fbxprocfs_remove_entries(struct fbxprocfs_client *client,
+			 const struct fbxprocfs_desc *ro_desc,
+			 const struct fbxprocfs_desc *rw_desc);
+
+#endif /* FBXPROCFS_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/fbxserial.h	2019-01-22 16:16:28.227289185 +0100
@@ -0,0 +1,114 @@
+#ifndef FBXSERIAL_H_
+#define FBXSERIAL_H_
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+/*
+ * some part of serial may vary, we use abstract struct to store this,
+ * data content depends on type.
+ */
+#define EXTINFO_SIZE		128
+#define EXTINFO_MAX_COUNT	16
+
+/*
+ * extdev desc
+ */
+#define EXTINFO_TYPE_EXTDEV	1
+
+#define EXTDEV_TYPE_BUNDLE	1
+#define EXTDEV_TYPE_MAX		2
+
+struct fbx_serial_extinfo {
+	u32			type;
+
+	union {
+		/* extdev */
+		struct {
+			u32	type;
+			u32	model;
+			char	serial[64];
+		} extdev;
+
+		/* raw access */
+		unsigned char	data[EXTINFO_SIZE];
+	} u;
+}  __attribute__ ((packed));;
+
+
+/*
+ * master serial structure
+ */
+
+#define FBXSERIAL_VERSION	1
+
+#define FBXSERIAL_MAGIC		0x2d9521ab
+
+#define MAC_ADDR_SIZE		6
+#define RANDOM_DATA_SIZE	32
+
+/*
+ * this  is the  maximum size  we accept  to check  crc32  against, so
+ * structure may no grow larger than this
+ */
+#define FBXSERIAL_MAX_SIZE	8192
+
+struct fbx_serial {
+	u32	crc32;
+	u32	magic;
+	u32	struct_version;
+	u32	len;
+
+	/* board serial */
+	u16	type;
+	u8	version;
+	u8	manufacturer;
+	u16	year;
+	u8	week;
+	u32	number;
+	u32	flags;
+
+	/* mac address base */
+	u8	mac_addr_base[MAC_ADDR_SIZE];
+
+	/* mac address count */
+	u8	mac_count;
+
+	/* random data */
+	u8	random_data[RANDOM_DATA_SIZE];
+
+	/* last update of data (seconds since epoch) */
+	u32	last_modified;
+
+	/* count of following extinfo tag */
+	u32	extinfo_count;
+
+	/* beginning of extended info */
+	struct fbx_serial_extinfo	extinfos[EXTINFO_MAX_COUNT];
+
+} __attribute__ ((packed));
+
+void
+fbxserialinfo_get_random(unsigned char *data, unsigned int len);
+
+const void *
+fbxserialinfo_get_mac_addr(unsigned int index);
+
+int
+fbxserialinfo_read(const void *data, struct fbx_serial *out);
+
+struct fbx_serial *fbxserialinfo_get(void);
+
+/*
+ * implemented in board specific code
+ */
+#ifdef CONFIG_ARCH_HAS_FBXSERIAL
+extern const struct fbx_serial *arch_get_fbxserial(void);
+#else
+static inline const struct fbx_serial *arch_get_fbxserial(void)
+{
+	return NULL;
+}
+#endif
+
+#endif /* FBXSERIAL_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/gpio_event.h	2019-01-22 16:16:28.235289258 +0100
@@ -0,0 +1,170 @@
+/* include/linux/gpio_event.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_GPIO_EVENT_H
+#define _LINUX_GPIO_EVENT_H
+
+#include <linux/input.h>
+
+struct gpio_event_input_devs {
+	int count;
+	struct input_dev *dev[];
+};
+enum {
+	GPIO_EVENT_FUNC_UNINIT  = 0x0,
+	GPIO_EVENT_FUNC_INIT    = 0x1,
+	GPIO_EVENT_FUNC_SUSPEND = 0x2,
+	GPIO_EVENT_FUNC_RESUME  = 0x3,
+};
+struct gpio_event_info {
+	int (*func)(struct gpio_event_input_devs *input_devs,
+		    struct gpio_event_info *info,
+		    void **data, int func);
+	int (*event)(struct gpio_event_input_devs *input_devs,
+		     struct gpio_event_info *info,
+		     void **data, unsigned int dev, unsigned int type,
+		     unsigned int code, int value); /* out events */
+	bool no_suspend;
+};
+
+struct gpio_event_platform_data {
+	const char *name;
+	struct gpio_event_info **info;
+	size_t info_count;
+	int (*power)(const struct gpio_event_platform_data *pdata, bool on);
+	const char *names[]; /* If name is NULL, names contain a NULL */
+			     /* terminated list of input devices to create */
+};
+
+#define GPIO_EVENT_DEV_NAME "gpio-event"
+
+/* Key matrix */
+
+enum gpio_event_matrix_flags {
+	/* unset: drive active output low, set: drive active output high */
+	GPIOKPF_ACTIVE_HIGH              = 1U << 0,
+	GPIOKPF_DEBOUNCE                 = 1U << 1,
+	GPIOKPF_REMOVE_SOME_PHANTOM_KEYS = 1U << 2,
+	GPIOKPF_REMOVE_PHANTOM_KEYS      = GPIOKPF_REMOVE_SOME_PHANTOM_KEYS |
+					   GPIOKPF_DEBOUNCE,
+	GPIOKPF_DRIVE_INACTIVE           = 1U << 3,
+	GPIOKPF_LEVEL_TRIGGERED_IRQ      = 1U << 4,
+	GPIOKPF_PRINT_UNMAPPED_KEYS      = 1U << 16,
+	GPIOKPF_PRINT_MAPPED_KEYS        = 1U << 17,
+	GPIOKPF_PRINT_PHANTOM_KEYS       = 1U << 18,
+};
+
+#define MATRIX_CODE_BITS (10)
+#define MATRIX_KEY_MASK ((1U << MATRIX_CODE_BITS) - 1)
+#define MATRIX_KEY(dev, code) \
+	(((dev) << MATRIX_CODE_BITS) | (code & MATRIX_KEY_MASK))
+
+extern int gpio_event_matrix_func(struct gpio_event_input_devs *input_devs,
+			struct gpio_event_info *info, void **data, int func);
+struct gpio_event_matrix_info {
+	/* initialize to gpio_event_matrix_func */
+	struct gpio_event_info info;
+	/* size must be ninputs * noutputs */
+	const unsigned short *keymap;
+	unsigned int *input_gpios;
+	unsigned int *output_gpios;
+	unsigned int ninputs;
+	unsigned int noutputs;
+	/* time to wait before reading inputs after driving each output */
+	ktime_t settle_time;
+	/* time to wait before scanning the keypad a second time */
+	ktime_t debounce_delay;
+	ktime_t poll_time;
+	unsigned flags;
+};
+
+/* Directly connected inputs and outputs */
+
+enum gpio_event_direct_flags {
+	GPIOEDF_ACTIVE_HIGH         = 1U << 0,
+/*	GPIOEDF_USE_DOWN_IRQ        = 1U << 1, */
+/*	GPIOEDF_USE_IRQ             = (1U << 2) | GPIOIDF_USE_DOWN_IRQ, */
+	GPIOEDF_PRINT_KEYS          = 1U << 8,
+	GPIOEDF_PRINT_KEY_DEBOUNCE  = 1U << 9,
+	GPIOEDF_PRINT_KEY_UNSTABLE  = 1U << 10,
+};
+
+struct gpio_event_direct_entry {
+	uint32_t gpio:16;
+	uint32_t code:10;
+	uint32_t dev:6;
+};
+
+/* inputs */
+extern int gpio_event_input_func(struct gpio_event_input_devs *input_devs,
+			struct gpio_event_info *info, void **data, int func);
+struct gpio_event_input_info {
+	/* initialize to gpio_event_input_func */
+	struct gpio_event_info info;
+	ktime_t debounce_time;
+	ktime_t poll_time;
+	uint16_t flags;
+	uint16_t type;
+	const struct gpio_event_direct_entry *keymap;
+	size_t keymap_size;
+};
+
+/* outputs */
+extern int gpio_event_output_func(struct gpio_event_input_devs *input_devs,
+			struct gpio_event_info *info, void **data, int func);
+extern int gpio_event_output_event(struct gpio_event_input_devs *input_devs,
+			struct gpio_event_info *info, void **data,
+			unsigned int dev, unsigned int type,
+			unsigned int code, int value);
+struct gpio_event_output_info {
+	/* initialize to gpio_event_output_func and gpio_event_output_event */
+	struct gpio_event_info info;
+	uint16_t flags;
+	uint16_t type;
+	const struct gpio_event_direct_entry *keymap;
+	size_t keymap_size;
+};
+
+
+/* axes */
+
+enum gpio_event_axis_flags {
+	GPIOEAF_PRINT_UNKNOWN_DIRECTION  = 1U << 16,
+	GPIOEAF_PRINT_RAW                = 1U << 17,
+	GPIOEAF_PRINT_EVENT              = 1U << 18,
+};
+
+extern int gpio_event_axis_func(struct gpio_event_input_devs *input_devs,
+			struct gpio_event_info *info, void **data, int func);
+struct gpio_event_axis_info {
+	/* initialize to gpio_event_axis_func */
+	struct gpio_event_info info;
+	uint8_t  count; /* number of gpios for this axis */
+	uint8_t  dev; /* device index when using multiple input devices */
+	uint8_t  type; /* EV_REL or EV_ABS */
+	uint16_t code;
+	uint16_t decoded_size;
+	uint16_t (*map)(struct gpio_event_axis_info *info, uint16_t in);
+	uint32_t *gpio;
+	uint32_t flags;
+};
+#define gpio_axis_2bit_gray_map gpio_axis_4bit_gray_map
+#define gpio_axis_3bit_gray_map gpio_axis_4bit_gray_map
+uint16_t gpio_axis_4bit_gray_map(
+			struct gpio_event_axis_info *info, uint16_t in);
+uint16_t gpio_axis_5bit_singletrack_map(
+			struct gpio_event_axis_info *info, uint16_t in);
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/hdcp_qseecom.h	2019-01-22 16:16:28.235289258 +0100
@@ -0,0 +1,166 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 and
+* only version 2 as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*/
+
+#ifndef __HDCP_QSEECOM_H
+#define __HDCP_QSEECOM_H
+#include <linux/types.h>
+
+#define HDCP_MAX_MESSAGE_PARTS 4
+#define RECV_ID_SIZE 5
+#define MAX_DEVICES_SUPPORTED 127
+
+enum hdcp_lib_wakeup_cmd {
+	HDCP_LIB_WKUP_CMD_INVALID,
+	HDCP_LIB_WKUP_CMD_START,
+	HDCP_LIB_WKUP_CMD_STOP,
+	HDCP_LIB_WKUP_CMD_MSG_SEND_SUCCESS,
+	HDCP_LIB_WKUP_CMD_MSG_SEND_FAILED,
+	HDCP_LIB_WKUP_CMD_MSG_RECV_SUCCESS,
+	HDCP_LIB_WKUP_CMD_MSG_RECV_FAILED,
+	HDCP_LIB_WKUP_CMD_MSG_RECV_TIMEOUT,
+	HDCP_LIB_WKUP_CMD_QUERY_STREAM_TYPE,
+	HDCP_LIB_WKUP_CMD_LINK_FAILED,
+};
+
+enum hdmi_hdcp_wakeup_cmd {
+	HDMI_HDCP_WKUP_CMD_INVALID,
+	HDMI_HDCP_WKUP_CMD_SEND_MESSAGE,
+	HDMI_HDCP_WKUP_CMD_RECV_MESSAGE,
+	HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS,
+	HDMI_HDCP_WKUP_CMD_STATUS_FAILED,
+	HDMI_HDCP_WKUP_CMD_LINK_POLL,
+	HDMI_HDCP_WKUP_CMD_AUTHENTICATE
+};
+
+struct hdcp_lib_wakeup_data {
+	enum hdcp_lib_wakeup_cmd cmd;
+	void *context;
+	char *recvd_msg_buf;
+	uint32_t recvd_msg_len;
+	uint32_t timeout;
+};
+
+struct hdcp_msg_part {
+	char *name;
+	uint32_t offset;
+	uint32_t length;
+};
+
+struct hdcp_msg_data {
+	uint32_t num_messages;
+	struct hdcp_msg_part messages[HDCP_MAX_MESSAGE_PARTS];
+	uint8_t rx_status;
+};
+
+struct hdmi_hdcp_wakeup_data {
+	enum hdmi_hdcp_wakeup_cmd cmd;
+	void *context;
+	char *send_msg_buf;
+	uint32_t send_msg_len;
+	uint32_t timeout;
+	uint8_t abort_mask;
+	const struct hdcp_msg_data *message_data;
+};
+
+static inline char *hdmi_hdcp_cmd_to_str(uint32_t cmd)
+{
+	switch (cmd) {
+	case HDMI_HDCP_WKUP_CMD_SEND_MESSAGE:
+		return "HDMI_HDCP_WKUP_CMD_SEND_MESSAGE";
+	case HDMI_HDCP_WKUP_CMD_RECV_MESSAGE:
+		return "HDMI_HDCP_WKUP_CMD_RECV_MESSAGE";
+	case HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS:
+		return "HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS";
+	case HDMI_HDCP_WKUP_CMD_STATUS_FAILED:
+		return "HDMI_HDCP_WKUP_CMD_STATUS_FAIL";
+	case HDMI_HDCP_WKUP_CMD_LINK_POLL:
+		return "HDMI_HDCP_WKUP_CMD_LINK_POLL";
+	case HDMI_HDCP_WKUP_CMD_AUTHENTICATE:
+		return "HDMI_HDCP_WKUP_CMD_AUTHENTICATE";
+	default:
+		return "???";
+	}
+}
+
+static inline char *hdcp_lib_cmd_to_str(uint32_t cmd)
+{
+	switch (cmd) {
+	case HDCP_LIB_WKUP_CMD_START:
+		return "HDCP_LIB_WKUP_CMD_START";
+	case HDCP_LIB_WKUP_CMD_STOP:
+		return "HDCP_LIB_WKUP_CMD_STOP";
+	case HDCP_LIB_WKUP_CMD_MSG_SEND_SUCCESS:
+		return "HDCP_LIB_WKUP_CMD_MSG_SEND_SUCCESS";
+	case HDCP_LIB_WKUP_CMD_MSG_SEND_FAILED:
+		return "HDCP_LIB_WKUP_CMD_MSG_SEND_FAILED";
+	case HDCP_LIB_WKUP_CMD_MSG_RECV_SUCCESS:
+		return "HDCP_LIB_WKUP_CMD_MSG_RECV_SUCCESS";
+	case HDCP_LIB_WKUP_CMD_MSG_RECV_FAILED:
+		return "HDCP_LIB_WKUP_CMD_MSG_RECV_FAILED";
+	case HDCP_LIB_WKUP_CMD_MSG_RECV_TIMEOUT:
+		return "HDCP_LIB_WKUP_CMD_MSG_RECV_TIMEOUT";
+	case HDCP_LIB_WKUP_CMD_QUERY_STREAM_TYPE:
+		return "HDCP_LIB_WKUP_CMD_QUERY_STREAM_TYPE";
+	case HDCP_LIB_WKUP_CMD_LINK_FAILED:
+		return "HDCP_LIB_WKUP_CMD_LINK_FAILED";
+	default:
+		return "???";
+	}
+}
+
+struct hdcp_srm_device_id_t {
+	uint8_t data[RECV_ID_SIZE];
+};
+
+struct hdcp_txmtr_ops {
+	int (*wakeup)(struct hdcp_lib_wakeup_data *data);
+	bool (*feature_supported)(void *phdcpcontext);
+	void (*update_exec_type)(void *ctx, bool tethered);
+	int (*hdcp_txmtr_get_state)(void *phdcpcontext,
+		uint32_t *state);
+};
+
+struct hdcp_client_ops {
+	int (*wakeup)(struct hdmi_hdcp_wakeup_data *data);
+	void (*notify_lvl_change)(void *client_ctx, int min_lvl);
+	void (*srm_cb)(void *client_ctx);
+	void (*mute_sink)(void *client_ctx);
+};
+
+enum hdcp_device_type {
+	HDCP_TXMTR_HDMI = 0x8001,
+	HDCP_TXMTR_DP = 0x8002
+};
+
+struct hdcp_register_data {
+	struct hdcp_client_ops *client_ops;
+	struct hdcp_txmtr_ops *txmtr_ops;
+	enum hdcp_device_type device_type;
+	void *client_ctx;
+	void **hdcp_ctx;
+	bool tethered;
+};
+
+int hdcp_library_register(struct hdcp_register_data *data);
+void hdcp_library_deregister(void *phdcpcontext);
+bool hdcp1_check_if_supported_load_app(void);
+int hdcp1_set_keys(uint32_t *aksv_msb, uint32_t *aksv_lsb);
+int hdcp1_set_enc(bool enable);
+int hdcp1_validate_receiver_ids(struct hdcp_srm_device_id_t *device_ids,
+uint32_t device_id_cnt);
+void hdcp1_cache_repeater_topology(void *hdcp1_cached_tp);
+void hdcp1_notify_topology(void);
+void hdcp1_client_register(void *client_ctx,
+struct hdcp_client_ops *ops);
+void hdcp1_client_unregister(void);
+
+#endif /* __HDCP_QSEECOM_H */
diff -Nruw linux-4.4.115-fbx/include/linux/hdmi-cec./dev.h linux-4.4.115-fbx/include/linux/hdmi-cec/dev.h
--- linux-4.4.115-fbx/include/linux/hdmi-cec./dev.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/hdmi-cec/dev.h	2019-05-31 20:58:54.922934968 +0200
@@ -0,0 +1,30 @@
+#ifndef __HDMI_CEC_DEV_H
+#define __HDMI_CEC_DEV_H
+
+#include <linux/ioctl.h>
+#include <linux/hdmi-cec/hdmi-cec.h>
+
+#define CEC_IOCTL_BASE	'C'
+
+#define CEC_SET_LOGICAL_ADDRESS	_IOW(CEC_IOCTL_BASE, 0, int)
+#define CEC_RESET_DEVICE	_IOW(CEC_IOCTL_BASE, 3, int)
+#define CEC_GET_COUNTERS	_IOR(CEC_IOCTL_BASE, 4, struct cec_counters)
+#define CEC_SET_RX_MODE		_IOW(CEC_IOCTL_BASE, 5, enum cec_rx_mode)
+#define CEC_GET_TX_STATUS	_IOW(CEC_IOCTL_BASE, 6, struct cec_tx_status)
+#define CEC_SET_DETACHED_CONFIG	_IOW(CEC_IOCTL_BASE, 7, struct cec_detached_config)
+
+#define CEC_MAX_DEVS	(10)
+
+#ifdef __KERNEL__
+
+struct cec_adapter;
+
+int __init cec_cdev_init(void);
+void __exit cec_cdev_exit(void);
+
+int cec_create_adapter_node(struct cec_adapter *);
+void cec_remove_adapter_node(struct cec_adapter *);
+
+#endif /* __KERNEL__ */
+
+#endif /* __HDMI_CEC_DEV_H */
diff -Nruw linux-4.4.115-fbx/include/linux/hdmi-cec./hdmi-cec.h linux-4.4.115-fbx/include/linux/hdmi-cec/hdmi-cec.h
--- linux-4.4.115-fbx/include/linux/hdmi-cec./hdmi-cec.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/hdmi-cec/hdmi-cec.h	2019-05-31 20:58:54.922934968 +0200
@@ -0,0 +1,127 @@
+/*
+ * Header for the HDMI CEC core infrastructure
+ */
+#ifndef __HDMI_CEC_H
+#define __HDMI_CEC_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+
+#include <uapi/linux/hdmi-cec/hdmi-cec.h>
+
+struct cec_adapter;
+
+#define CEC_HW_HAS_COUNTERS	(1 << 0)	/* HW counts events */
+#define CEC_HW_HAS_RX_FILTER	(1 << 1)	/* HW has receive filter */
+
+/**
+ * struct cec_adapter_ops - cec adapter low-level operations
+ * @set_logical_address:	callback to set the logical address
+ * @send:	callback to send a cec payload
+ * @reset:	callback to reset the hardware
+ * @get_counters:	callback to get the counters (if supported by HW)
+ * @set_rx_mode:	callback to set the receive mode
+ * @attach:	callback to attach the host to the device
+ * @detach:	callbackt to detach the host from the device
+ * @set_detached_config:	callback to configure adapter when detached
+ */
+struct cec_adapter_ops {
+	int	(*set_logical_address)(struct cec_adapter *, const u8);
+	int	(*send)(struct cec_adapter *, u16, u8, const u8 *, const u8);
+	int	(*reset)(struct cec_adapter *);
+	int	(*get_counters)(struct cec_adapter *, struct cec_counters *);
+	int	(*set_rx_mode)(struct cec_adapter *, enum cec_rx_mode);
+	int	(*attach)(struct cec_adapter *);
+	int	(*detach)(struct cec_adapter *);
+	int	(*set_detached_config)(struct cec_adapter *,
+				       const struct cec_detached_config *);
+};
+
+/**
+ * struct cec_adapter - cec adapter structure
+ * @driver_name:	driver prefix used for device naming
+ * @module:		module pointer for refcounting
+ * @ops:		struct cec_adapter_ops pointer
+ * @flags:		adapter flags bitmask
+ * @name:		adapter unique name
+ * @dev:		device structure for device/driver model interaction
+ * @lock:		adapter all-purpose mutex for exclusive locking
+ * @attached:		adapter attached to host or not
+ * @tx_pending:		true if tx is ongoing
+ * @tx_lock:		transmit lock
+ * @rx_msg_list:	receive message list head
+ * @rx_msg_list_lock:	receive message list lock
+ * @rx_msg_len:		receive message queue len
+ * @wait:		receive waitqueue (used for poll, read)
+ * @cdev:		character device node
+ */
+struct cec_adapter {
+	const char		*driver_name;
+	struct module		*module;
+	const struct cec_adapter_ops	*ops;
+	unsigned int		flags;
+	atomic_t		users;
+
+	/* unique device name, used for sysfs & chardev */
+	char			name[128];
+
+	/* associated sysfs device */
+	struct device		dev;
+
+	/* private */
+	struct mutex		lock;
+	bool			attached;
+
+	wait_queue_head_t	wait;
+
+	/* transmit message list */
+	spinlock_t		tx_done_lock;
+	unsigned long		tx_pending;
+	bool			last_tx_success;
+	u8			last_tx_flags;
+	u8			last_tx_tries;
+	struct timer_list	tx_timeout_timer;
+
+	/* receive message list */
+	struct list_head	rx_msg_list;
+	spinlock_t		rx_msg_list_lock;
+	unsigned int		rx_msg_len;
+
+	/* associated chardev */
+	struct cdev		cdev;
+
+	/* true when unregistering device */
+	bool			dead;
+};
+
+#define CECDEV_PRIV_ALIGN	8
+
+static inline void *cec_adapter_priv(struct cec_adapter *adapter)
+{
+	return (u8 *)adapter + ((sizeof(struct cec_adapter)
+			      + (CECDEV_PRIV_ALIGN - 1))
+			     & ~(CECDEV_PRIV_ALIGN - 1));
+}
+
+static inline struct cec_adapter *to_cec_adapter(struct device *d)
+{
+	return container_of(d, struct cec_adapter, dev);
+}
+
+struct cec_adapter *alloc_cec_adapter(size_t priv_size);
+int register_cec_adapter(struct cec_adapter *, struct device *);
+void unregister_cec_adapter(struct cec_adapter *);
+void free_cec_adapter(struct cec_adapter *);
+int adapter_rx_done(struct cec_adapter *, const u8 *, const u8 len,
+		    bool valid, u8 flags);
+void adapter_tx_done(struct cec_adapter *, bool success, u8 flags, u8 tries);
+
+#endif /* __HDMI_CEC_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/i2c/i2c-msm-v2.h	2019-01-22 16:16:28.243289330 +0100
@@ -0,0 +1,666 @@
+/* Copyright (c) 2014-2015,2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * I2C controller driver for Qualcomm Technologies Inc platforms
+ */
+
+#ifndef _I2C_MSM_V2_H
+#define _I2C_MSM_V2_H
+
+#include <linux/bitops.h>
+#include <linux/dmaengine.h>
+
+enum msm_i2_debug_level {
+	MSM_ERR,	/* Error messages only. Always on */
+	MSM_PROF,	/* High level events. Use for profiling */
+	MSM_DBG,	/* Low level details. Use for debugging */
+};
+
+#define i2c_msm_dbg(ctrl, dbg_level, fmt, ...) do {\
+		if (ctrl->dbgfs.dbg_lvl >= dbg_level)\
+			dev_info(ctrl->dev, pr_fmt(fmt), ##__VA_ARGS__);\
+	} while (0)
+
+#define BIT_IS_SET(val, idx)        ((val >> idx) & 0x1)
+#define BITS_AT(val, idx, n_bits)(((val) & (((1 << n_bits) - 1) << idx)) >> idx)
+#define MASK_IS_SET(val, mask)      ((val & mask) == mask)
+#define MASK_IS_SET_BOOL(val, mask) (MASK_IS_SET(val, mask) ? 1 : 0)
+#define KHz(freq) (1000 * freq)
+#define I2C_MSM_CLK_FAST_PLUS_FREQ  (1000000)
+
+/* QUP Registers */
+enum {
+	QUP_CONFIG              = 0x0,
+	QUP_STATE               = 0x4,
+	QUP_IO_MODES            = 0x8,
+	QUP_SW_RESET            = 0xC,
+	QUP_OPERATIONAL         = 0x18,
+	QUP_ERROR_FLAGS         = 0x1C,
+	QUP_ERROR_FLAGS_EN      = 0x20,
+	QUP_TEST_CTRL           = 0x24,
+	QUP_OPERATIONAL_MASK    = 0x28,
+	QUP_HW_VERSION          = 0x30,
+	QUP_MX_READ_COUNT       = 0x208,
+	QUP_MX_WRITE_COUNT      = 0x150,
+	QUP_MX_OUTPUT_COUNT     = 0x100,
+	QUP_MX_INPUT_COUNT      = 0x200,
+	QUP_MX_WR_CNT           = 0x100,
+	QUP_OUT_DEBUG           = 0x108,
+	QUP_OUT_FIFO_CNT        = 0x10C,
+	QUP_OUT_FIFO_BASE       = 0x110,
+	QUP_IN_READ_CUR         = 0x20C,
+	QUP_IN_DEBUG            = 0x210,
+	QUP_IN_FIFO_CNT         = 0x214,
+	QUP_IN_FIFO_BASE        = 0x218,
+	QUP_I2C_MASTER_CLK_CTL  = 0x400,
+	QUP_I2C_STATUS          = 0x404,
+	QUP_I2C_MASTER_CONFIG   = 0x408,
+	QUP_I2C_MASTER_BUS_CLR  = 0x40C,
+};
+
+/* Register:QUP_STATE state field values */
+enum i2c_msm_qup_state {
+	QUP_STATE_RESET         = 0,
+	QUP_STATE_RUN           = 1U,
+	QUP_STATE_PAUSE         = 3U,
+};
+
+/* Register:QUP_STATE fields */
+enum {
+	QUP_STATE_MASK          = 3U,
+	QUP_STATE_VALID         = BIT(2),
+	QUP_I2C_MAST_GEN        = BIT(4),
+	QUP_I2C_FLUSH           = BIT(6),
+	QUP_I2C_STATUS_RESET    = 0x42,
+};
+
+
+/* Register:QUP_CONFIG fields */
+enum {
+	QUP_MINI_CORE_MASK      = 0xF00,
+	QUP_MINI_CORE_I2C_VAL   = 0x200,
+	QUP_N_MASK              = 0x1F,
+	QUP_N_VAL               = 0x7, /* 0xF for A family */
+	QUP_NO_OUPUT            = BIT(6),
+	QUP_NO_INPUT            = BIT(7),
+	QUP_APP_CLK_ON_EN       = BIT(12),
+	QUP_CORE_CLK_ON_EN      = BIT(13),
+	QUP_FIFO_CLK_GATE_EN    = BIT(14),
+};
+
+/* Register:QUP_OPERATIONAL fields */
+enum {
+	QUP_INPUT_FIFO_NOT_EMPTY = BIT(5),
+	QUP_OUTPUT_SERVICE_FLAG  = BIT(8),
+	QUP_INPUT_SERVICE_FLAG   = BIT(9),
+	QUP_MAX_OUTPUT_DONE_FLAG = BIT(10),
+	QUP_MAX_INPUT_DONE_FLAG  = BIT(11),
+	QUP_OUT_BLOCK_WRITE_REQ  = BIT(12),
+	QUP_IN_BLOCK_READ_REQ    = BIT(13),
+};
+
+/* Register:QUP_OPERATIONAL_MASK fields */
+enum {
+	QUP_INPUT_SERVICE_MASK  = BIT(9),
+	QUP_OUTPUT_SERVICE_MASK = BIT(8),
+};
+
+/* Register:QUP_IO_MODES fields */
+enum {
+	QUP_OUTPUT_MODE         = BIT(10) | BIT(11),
+	QUP_INPUT_MODE          = BIT(12) | BIT(13),
+	QUP_UNPACK_EN           = BIT(14),
+	QUP_PACK_EN             = BIT(15),
+	QUP_OUTPUT_BIT_SHIFT_EN = BIT(16),
+};
+
+/* Register:QUP_I2C_STATUS (a.k.a I2C_MASTER_STATUS) fields */
+enum {
+	QUP_BUS_ERROR           = BIT(2),
+	QUP_PACKET_NACKED       = BIT(3),
+	QUP_ARB_LOST            = BIT(4),
+	QUP_INVALID_WRITE       = BIT(5),
+	QUP_FAILED              = BIT(6),
+	QUP_BUS_ACTIVE          = BIT(8),
+	QUP_BUS_MASTER          = BIT(9),
+	QUP_INVALID_TAG         = BIT(23),
+	QUP_INVALID_READ_ADDR   = BIT(24),
+	QUP_INVALID_READ_SEQ    = BIT(25),
+	QUP_I2C_SDA             = BIT(26),
+	QUP_I2C_SCL             = BIT(27),
+	QUP_MSTR_STTS_ERR_MASK  = 0x380003C,
+};
+
+/* Register:QUP_I2C_MASTER_CONFIG fields */
+enum {
+	QUP_EN_VERSION_TWO_TAG  = 1U,
+};
+
+/* Register:QUP_I2C_MASTER_CLK_CTL field setters */
+#define I2C_MSM_SCL_NOISE_REJECTION(reg_val, noise_rej_val) \
+		(((reg_val) & ~(0x3 << 24)) | (((noise_rej_val) & 0x3) << 24))
+#define I2C_MSM_SDA_NOISE_REJECTION(reg_val, noise_rej_val) \
+		(((reg_val) & ~(0x3 << 26)) | (((noise_rej_val) & 0x3) << 26))
+
+/* Register:QUP_ERROR_FLAGS_EN flags */
+enum {
+	QUP_OUTPUT_OVER_RUN_ERR_EN  = BIT(5),
+	QUP_INPUT_UNDER_RUN_ERR_EN  = BIT(4),
+	QUP_OUTPUT_UNDER_RUN_ERR_EN = BIT(3),
+	QUP_INPUT_OVER_RUN_ERR_EN   = BIT(2),
+};
+
+/* Status, Error flags */
+enum {
+	I2C_STATUS_WR_BUFFER_FULL  = BIT(0),
+	I2C_STATUS_BUS_ACTIVE      = BIT(8),
+	I2C_STATUS_BUS_MASTER      = BIT(9),
+	I2C_STATUS_ERROR_MASK      = 0x38000FC,
+	QUP_I2C_NACK_FLAG          = BIT(3),
+	QUP_IN_NOT_EMPTY           = BIT(5),
+	QUP_ERR_FLGS_MASK           = 0x3C,
+};
+
+/* Master status clock states */
+enum {
+	I2C_CLK_RESET_BUSIDLE_STATE = 0,
+	I2C_CLK_FORCED_LOW_STATE    = 5,
+};
+
+/* Controller's power state */
+enum i2c_msm_power_state {
+	I2C_MSM_PM_RT_ACTIVE,
+	I2C_MSM_PM_RT_SUSPENDED,
+	I2C_MSM_PM_SYS_SUSPENDED
+};
+
+/*
+ * The max buffer size required for tags is for holding the following sequence:
+ * [start] + [start | slv-addr] + [ rd/wr | len]
+ * which sum up to 6 bytes. However, we use u64 to hold the value, thus we say
+ * that max length is 8 bytes.
+ */
+#define I2C_MSM_TAG2_MAX_LEN            (4)
+#define I2C_MSM_DMA_TX_SZ             (64) /* tx chan n entries */
+#define I2C_MSM_DMA_RX_SZ             (32) /* rx chan n entries */
+#define I2C_MSM_DMA_DESC_ARR_SIZ  (I2C_MSM_DMA_TX_SZ + I2C_MSM_DMA_RX_SZ)
+#define I2C_MSM_REG_2_STR_BUF_SZ        (128)
+/* Optimal value to hold the error strings */
+#define I2C_MSM_MAX_ERR_BUF_SZ		(256)
+#define I2C_MSM_BUF_DUMP_MAX_BC         (20)
+#define I2C_MSM_MAX_POLL_MSEC           (100)
+#define I2C_MSM_TIMEOUT_SAFTY_COEF      (10)
+#define I2C_MSM_TIMEOUT_MIN_USEC        (500000)
+#define I2C_QUP_MAX_BUS_RECOVERY_RETRY  (10)
+
+/* QUP v2 tags */
+#define QUP_TAG2_DATA_WRITE        (0x82ULL)
+#define QUP_TAG2_DATA_WRITE_N_STOP (0x83ULL)
+#define QUP_TAG2_DATA_READ         (0x85ULL)
+#define QUP_TAG2_DATA_READ_N_STOP  (0x87ULL)
+#define QUP_TAG2_START             (0x81ULL)
+#define QUP_TAG2_DATA_READ_N_NACK  (0x86ULL)
+#define QUP_TAG2_START_STOP        (0x8AULL)
+#define QUP_TAG2_INPUT_EOT         (0x93ULL)
+#define QUP_TAG2_FLUSH_STOP        (0x96ULL)
+#define QUP_BUF_OVERHD_BC          (2)
+#define QUP_MAX_BUF_SZ             (256)
+
+enum i2c_msm_clk_path_vec_idx {
+	I2C_MSM_CLK_PATH_SUSPEND_VEC,
+	I2C_MSM_CLK_PATH_RESUME_VEC,
+};
+#define I2C_MSM_CLK_PATH_AVRG_BW(ctrl) (0)
+#define I2C_MSM_CLK_PATH_BRST_BW(ctrl) (ctrl->rsrcs.clk_freq_in * 8)
+
+enum i2c_msm_gpio_name_idx {
+	I2C_MSM_GPIO_SCL,
+	I2C_MSM_GPIO_SDA,
+};
+
+extern const char * const i2c_msm_mode_str_tbl[];
+
+struct i2c_msm_ctrl;
+
+/*
+ *  i2c_msm_dma_mem: utility struct which holds both physical and virtual addr
+ */
+struct i2c_msm_dma_mem {
+	dma_addr_t               phy_addr;
+	void                    *vrtl_addr;
+};
+
+/*
+ * i2c_msm_tag: tag's data and its length.
+ *
+ * @len tag len can be two, four or six bytes.
+ */
+struct i2c_msm_tag {
+	u64                    val;
+	int                    len;
+};
+
+/*
+ * i2c_msm_dma_tag: similar to struct i2c_msm_tag but holds physical address.
+ *
+ * @buf physical address of entry in the tag_arr of
+ *          struct i2c_msm_xfer_mode_dma
+ * @len tag len.
+ *
+ * Hold the information from i2c_msm_dma_xfer_prepare() which is used by
+ * i2c_msm_dma_xfer_process() and freed by i2c_msm_dma_xfer_unprepare()
+ */
+struct i2c_msm_dma_tag {
+	dma_addr_t             buf;
+	size_t                 len;
+};
+
+/*
+ * i2c_msm_dma_buf: dma mapped pointer to i2c_msg data buffer and related tag
+ * @vir_addr ptr to i2c_msg buf beginning or with offset (when buf len > 256)
+ */
+struct i2c_msm_dma_buf {
+	struct i2c_msm_dma_mem   ptr;
+	enum dma_data_direction  dma_dir;
+	size_t                   len;
+	bool                     is_rx;
+	bool                     is_last;
+	struct i2c_msm_dma_tag   tag;
+	/* DMA API */
+	struct scatterlist	sg_list[2];
+};
+
+/*
+ * i2c_msm_dma_chan: per channel info
+ *
+ * @is_init true when the channel is initialized and requires eventual teardown.
+ * @name channel name (tx/rx) for debugging.
+ * @desc_cnt_cur number of occupied descriptors
+ */
+struct i2c_msm_dma_chan {
+	bool                     is_init;
+	const char              *name;
+	size_t                   desc_cnt_cur;
+	struct dma_chan         *dma_chan;
+	enum dma_transfer_direction dir;
+};
+
+enum i2c_msm_dma_chan_dir {
+	I2C_MSM_DMA_TX,
+	I2C_MSM_DMA_RX,
+	I2C_MSM_DMA_CNT,
+};
+
+enum i2c_msm_dma_state {
+	I2C_MSM_DMA_INIT_NONE, /* Uninitialized  DMA */
+	I2C_MSM_DMA_INIT_CORE, /* Core init not channels, memory Allocated */
+	I2C_MSM_DMA_INIT_CHAN, /* Both Core and channels are init */
+};
+
+/*
+ * struct i2c_msm_xfer_mode_dma: DMA mode configuration and work space
+ *
+ * @state   specifies the DMA core and channel initialization states.
+ * @buf_arr_cnt current number of valid buffers in buf_arr. The valid buffers
+ *          are at index 0..buf_arr_cnt excluding buf_arr_cnt.
+ * @buf_arr array of descriptors which point to the user's buffer
+ *     virtual and physical address, and hold meta data about the buffer
+ *     and respective tag.
+ * @tag_arr array of tags in DMAable memory. Holds a tag per buffer of the same
+ *          index, that is tag_arr[i] is related to buf_arr[i]. Also, tag_arr[i]
+ *          is queued in the tx channel just befor buf_arr[i] is queued in
+ *          the tx (output buf) or rx channel (input buffer).
+ * @eot_n_flush_stop_tags EOT and flush-stop tags to be queued to the tx
+ *          DMA channel after the last transfer when it is a read.
+ * @input_tag hw is placing input tags in the rx channel on read operations.
+ *          The value of these tags is "don't care" from DMA transfer
+ *          perspective. Thus, this single buffer is used for all the input
+ *          tags. The field is used as write only.
+ */
+struct i2c_msm_xfer_mode_dma {
+	enum i2c_msm_dma_state   state;
+	size_t                   buf_arr_cnt;
+	struct i2c_msm_dma_buf   buf_arr[I2C_MSM_DMA_DESC_ARR_SIZ];
+	struct i2c_msm_dma_mem   tag_arr;
+	struct i2c_msm_dma_mem   eot_n_flush_stop_tags;
+	struct i2c_msm_dma_mem   input_tag;
+	struct i2c_msm_dma_chan  chan[I2C_MSM_DMA_CNT];
+};
+
+/*
+ * I2C_MSM_DMA_TAG_MEM_SZ includes the following fields of
+ * struct i2c_msm_xfer_mode_dma (in order):
+ *
+ * Buffer of DMA memory:
+ * +-----------+---------+-----------+-----------+----+-----------+
+ * | input_tag | eot_... | tag_arr 0 | tag_arr 1 | .. | tag_arr n |
+ * +-----------+---------+-----------+-----------+----+-----------+
+ *
+ * Why +2?
+ * One tag buffer for the input tags. This is a write only buffer for DMA, it is
+ *    used to read the tags of the input fifo. We let them overwrite each other,
+ *    since it is a throw-away from the driver's perspective.
+ * Second tag buffer for the EOT and flush-stop tags. This is a read only
+ *    buffer (from DMA perspective). It is used to put EOT and flush-stop at the
+ *    end of every transaction.
+ */
+#define I2C_MSM_DMA_TAG_MEM_SZ  \
+	((I2C_MSM_DMA_DESC_ARR_SIZ + 2) * I2C_MSM_TAG2_MAX_LEN)
+
+/*
+ * i2c_msm_xfer_mode_fifo: operations and state of FIFO mode
+ *
+ * @ops     "base class" of i2c_msm_xfer_mode_dma. Contains the operations while
+ *          the rest of the fields contain the data.
+ * @input_fifo_sz input fifo size in bytes
+ * @output_fifo_sz output fifo size in bytes
+ * @in_rem  remaining u32 entries in input FIFO before empty
+ * @out_rem remaining u32 entries in output FIFO before full
+ * @out_buf buffer for collecting bytes to four bytes groups (u32) before
+ *          writing them to the output fifo.
+ * @out_buf_idx next free index in out_buf. 0..3
+ */
+struct i2c_msm_xfer_mode_fifo {
+	size_t                   input_fifo_sz;
+	size_t                   output_fifo_sz;
+	size_t                   in_rem;
+	size_t                   out_rem;
+	u8                       out_buf[4];
+	int                      out_buf_idx;
+};
+
+/* i2c_msm_xfer_mode_blk: operations and state of Block mode
+ *
+ * @is_init when true, struct is initialized and requires mem free on exit
+ * @in_blk_sz size of input/rx block
+ * @out_blk_sz size of output/tx block
+ * @tx_cache internal buffer to store tx data
+ * @rx_cache internal buffer to store rx data
+ * @rx_cache_idx points to the next unread index in rx cache
+ * @tx_cache_idx points to the next unwritten index in tx cache
+ * @wait_rx_blk completion object to wait on for end of blk rx transfer.
+ * @wait_tx_blk completion object to wait on for end of blk tx transfer.
+ * @complete_mask applied to QUP_OPERATIONAL to determine when blk
+ *  xfer is complete.
+ */
+struct i2c_msm_xfer_mode_blk {
+	bool                     is_init;
+	size_t                   in_blk_sz;
+	size_t                   out_blk_sz;
+	u8                       *tx_cache;
+	u8                       *rx_cache;
+	int                      rx_cache_idx;
+	int                      tx_cache_idx;
+	struct completion        wait_rx_blk;
+	struct completion        wait_tx_blk;
+	u32                      complete_mask;
+};
+
+/* INPUT_MODE and OUTPUT_MODE filds of QUP_IO_MODES register */
+enum i2c_msm_xfer_mode_id {
+	I2C_MSM_XFER_MODE_FIFO,
+	I2C_MSM_XFER_MODE_BLOCK,
+	I2C_MSM_XFER_MODE_DMA,
+	I2C_MSM_XFER_MODE_NONE, /* keep last as a counter */
+};
+
+
+struct i2c_msm_dbgfs {
+	struct dentry             *root;
+	enum msm_i2_debug_level    dbg_lvl;
+	enum i2c_msm_xfer_mode_id  force_xfer_mode;
+};
+
+/*
+ * qup_i2c_clk_path_vote: data to use bus scaling driver for clock path vote
+ *
+ * @mstr_id master id number of the i2c core or its wrapper (BLSP/GSBI).
+ *       When zero, clock path voting is disabled.
+ * @client_hdl when zero, client is not registered with the bus scaling driver,
+ *      and bus scaling functionality should not be used. When non zero, it
+ *      is a bus scaling client id and may be used to vote for clock path.
+ * @reg_err when true, registration error was detected and an error message was
+ *      logged. i2c will attempt to re-register but will log error only once.
+ *      once registration succeed, the flag is set to false.
+ * @actv_only when set, votes when system active and removes the vote when
+ *       system goes idle (optimises for performance). When unset, voting using
+ *       runtime pm (optimizes for power).
+ */
+struct qup_i2c_clk_path_vote {
+	u32                         mstr_id;
+	u32                         client_hdl;
+	struct msm_bus_scale_pdata *pdata;
+	bool                        reg_err;
+	bool                        actv_only;
+};
+
+/*
+ * i2c_msm_resources: OS resources
+ *
+ * @mem  I2C controller memory resource from platform data.
+ * @base I2C controller virtual base address
+ * @clk_freq_in core clock frequency in Hz
+ * @clk_freq_out bus clock frequency in Hz
+ */
+struct i2c_msm_resources {
+	struct resource             *mem;
+	void __iomem                *base; /* virtual */
+	struct clk                  *core_clk;
+	struct clk                  *iface_clk;
+	int                          clk_freq_in;
+	int                          clk_freq_out;
+	struct qup_i2c_clk_path_vote clk_path_vote;
+	int                          irq;
+	bool                         disable_dma;
+	struct pinctrl              *pinctrl;
+	struct pinctrl_state        *gpio_state_active;
+	struct pinctrl_state        *gpio_state_suspend;
+};
+
+#define I2C_MSM_PINCTRL_ACTIVE       "i2c_active"
+#define I2C_MSM_PINCTRL_SUSPEND      "i2c_sleep"
+
+/*
+ * i2c_msm_xfer_buf: current xfer position and preprocessed tags
+ *
+ * @is_init the buf is marked initialized by the first call to
+ *          i2c_msm_xfer_next_buf()
+ * @msg_idx   index of the message that the buffer is pointing to
+ * @byte_idx  index of first byte in the current buffer
+ * @end_idx   count of bytes processed from the current message. This value
+ *            is compared against len to find out if buffer is done processing.
+ * @len       number of bytes in current buffer.
+ * @is_rx when true, current buffer is pointing to a i2c read operation.
+ * @slv_addr 8 bit address. This is the i2c_msg->addr + rd/wr bit.
+ *
+ * Keep track of current position in the client's transfer request and
+ * pre-process a transfer's buffer and tags.
+ */
+struct i2c_msm_xfer_buf {
+	bool                       is_init;
+	int                        msg_idx;
+	int                        byte_idx;
+	int                        end_idx;
+	int                        len;
+	bool                       is_rx;
+	bool                       is_last;
+	u16                        slv_addr;
+	struct i2c_msm_tag         in_tag;
+	struct i2c_msm_tag         out_tag;
+};
+
+#ifdef DEBUG
+#define I2C_MSM_PROF_MAX_EVNTS   (64)
+#else
+#define I2C_MSM_PROF_MAX_EVNTS   (16)
+#endif
+
+/*
+ * i2c_msm_prof_event: profiling event
+ *
+ * @data Additional data about the event. The interpretation of the data is
+ *       dependant on the type field.
+ * @type event type (see enum i2c_msm_prof_event_type)
+ */
+struct i2c_msm_prof_event {
+	struct timespec time;
+	u64             data0;
+	u32             data1;
+	u32             data2;
+	u8              type;
+	u8              dump_func_id;
+};
+
+enum i2c_msm_err {
+	I2C_MSM_NO_ERR = 0,
+	I2C_MSM_ERR_NACK,
+	I2C_MSM_ERR_ARB_LOST,
+	I2C_MSM_ERR_BUS_ERR,
+	I2C_MSM_ERR_TIMEOUT,
+	I2C_MSM_ERR_CORE_CLK,
+	I2C_MSM_ERR_OVR_UNDR_RUN,
+};
+
+/*
+ * i2c_msm_xfer: A client transfer request. A list of one or more i2c messages
+ *
+ * @msgs         NULL when no active xfer. Points to array of i2c_msgs
+ *               given by the client.
+ * @msg_cnt      number of messages in msgs array.
+ * @complete     completion object to wait on for end of transfer.
+ * @rx_cnt       number of input  bytes in the client's request.
+ * @tx_cnt       number of output bytes in the client's request.
+ * @rx_ovrhd_cnt number of input  bytes due to tags.
+ * @tx_ovrhd_cnt number of output bytes due to tags.
+ * @event        profiling data. An array of timestamps of transfer events
+ * @event_cnt    number of items in event array.
+ * @is_active    true during xfer process and false after xfer end
+ * @mtx          mutex to solve multithreaded problem in xfer
+ */
+struct i2c_msm_xfer {
+	struct i2c_msg            *msgs;
+	int                        msg_cnt;
+	enum i2c_msm_xfer_mode_id  mode_id;
+	struct completion          complete;
+	struct completion          rx_complete;
+	size_t                     rx_cnt;
+	size_t                     tx_cnt;
+	size_t                     rx_ovrhd_cnt;
+	size_t                     tx_ovrhd_cnt;
+	struct i2c_msm_xfer_buf    cur_buf;
+	u32                        timeout;
+	bool                       last_is_rx;
+	enum i2c_msm_err           err;
+	struct i2c_msm_prof_event  event[I2C_MSM_PROF_MAX_EVNTS];
+	atomic_t                   event_cnt;
+	atomic_t                   is_active;
+	struct mutex               mtx;
+	struct i2c_msm_xfer_mode_fifo	fifo;
+	struct i2c_msm_xfer_mode_blk	blk;
+	struct i2c_msm_xfer_mode_dma	dma;
+};
+
+/*
+ * i2c_msm_ctrl: the driver's main struct
+ *
+ * @is_init true when
+ * @ver info that is different between i2c controller versions
+ * @ver_num  ha
+ * @xfer     state of the currently processed transfer.
+ * @dbgfs    debug-fs root and values that may be set via debug-fs.
+ * @rsrcs    resources from platform data including clocks, gpios, irqs, and
+ *           memory regions.
+ * @mstr_clk_ctl cached value for programming to mstr_clk_ctl register
+ * @i2c_sts_reg	 status of QUP_I2C_MASTER_STATUS register.
+ * @qup_op_reg	 status of QUP_OPERATIONAL register.
+ */
+struct i2c_msm_ctrl {
+	struct device             *dev;
+	struct i2c_adapter         adapter;
+	struct i2c_msm_xfer        xfer;
+	struct i2c_msm_dbgfs       dbgfs;
+	struct i2c_msm_resources   rsrcs;
+	u32                        mstr_clk_ctl;
+	u32			   i2c_sts_reg;
+	u32			   qup_op_reg;
+	enum i2c_msm_power_state   pwr_state;
+};
+
+/* Enum for the profiling event types */
+enum i2c_msm_prof_evnt_type {
+	I2C_MSM_VALID_END,
+	I2C_MSM_PIP_DSCN,
+	I2C_MSM_PIP_CNCT,
+	I2C_MSM_ACTV_END,
+	I2C_MSM_IRQ_BGN,
+	I2C_MSM_IRQ_END,
+	I2C_MSM_XFER_BEG,
+	I2C_MSM_XFER_END,
+	I2C_MSM_SCAN_SUM,
+	I2C_MSM_NEXT_BUF,
+	I2C_MSM_COMPLT_OK,
+	I2C_MSM_COMPLT_FL,
+	I2C_MSM_PROF_RESET,
+};
+
+#ifdef CONFIG_I2C_MSM_PROF_DBG
+void i2c_msm_dbgfs_init(struct i2c_msm_ctrl *ctrl);
+
+void i2c_msm_dbgfs_teardown(struct i2c_msm_ctrl *ctrl);
+
+/* diagonisis the i2c registers and dump the errors accordingly */
+const char *i2c_msm_dbg_tag_to_str(const struct i2c_msm_tag *tag,
+						char *buf, size_t buf_len);
+
+void i2c_msm_prof_evnt_dump(struct i2c_msm_ctrl *ctrl);
+
+/* function definitions to be used from the i2c-msm-v2-debug file */
+void i2c_msm_prof_evnt_add(struct i2c_msm_ctrl *ctrl,
+				enum msm_i2_debug_level dbg_level,
+				enum i2c_msm_prof_evnt_type event,
+				u64 data0, u32 data1, u32 data2);
+
+int i2c_msm_dbg_qup_reg_dump(struct i2c_msm_ctrl *ctrl);
+
+const char *
+i2c_msm_dbg_dma_tag_to_str(const struct i2c_msm_dma_tag *dma_tag, char *buf,
+								size_t buf_len);
+#else
+/* use dummy functions */
+static inline void i2c_msm_dbgfs_init(struct i2c_msm_ctrl *ctrl) {}
+static inline void i2c_msm_dbgfs_teardown(struct i2c_msm_ctrl *ctrl) {}
+
+static inline const char *i2c_msm_dbg_tag_to_str(const struct i2c_msm_tag *tag,
+						char *buf, size_t buf_len)
+{
+	return NULL;
+}
+static inline void i2c_msm_prof_evnt_dump(struct i2c_msm_ctrl *ctrl) {}
+
+/* function definitions to be used from the i2c-msm-v2-debug file */
+static inline void i2c_msm_prof_evnt_add(struct i2c_msm_ctrl *ctrl,
+				enum msm_i2_debug_level dbg_level,
+				enum i2c_msm_prof_evnt_type event,
+				u64 data0, u32 data1, u32 data2) {}
+
+static inline int i2c_msm_dbg_qup_reg_dump(struct i2c_msm_ctrl *ctrl)
+{
+	return true;
+}
+static inline const char *i2c_msm_dbg_dma_tag_to_str(const struct
+			i2c_msm_dma_tag * dma_tag, char *buf, size_t buf_len)
+{
+	return NULL;
+}
+#endif /* I2C_MSM_V2_PROF_DBG */
+#endif /* _I2C_MSM_V2_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/if_pppolac.h	2019-01-22 16:16:28.247289366 +0100
@@ -0,0 +1,23 @@
+/* include/linux/if_pppolac.h
+ *
+ * Header for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Chia-chi Yeh <chiachi@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_IF_PPPOLAC_H
+#define __LINUX_IF_PPPOLAC_H
+
+#include <uapi/linux/if_pppolac.h>
+
+#endif /* __LINUX_IF_PPPOLAC_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/if_pppopns.h	2019-01-22 16:16:28.247289366 +0100
@@ -0,0 +1,23 @@
+/* include/linux/if_pppopns.h
+ *
+ * Header for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Chia-chi Yeh <chiachi@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_IF_PPPOPNS_H
+#define __LINUX_IF_PPPOPNS_H
+
+#include <uapi/linux/if_pppopns.h>
+
+#endif /* __LINUX_IF_PPPOPNS_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/initramfs.h	2019-01-22 16:16:28.255289439 +0100
@@ -0,0 +1,32 @@
+/*
+ * include/linux/initramfs.h
+ *
+ * Copyright (C) 2015, Google
+ * Rom Lemarchand <romlem@android.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _LINUX_INITRAMFS_H
+#define _LINUX_INITRAMFS_H
+
+#include <linux/kconfig.h>
+
+#if IS_BUILTIN(CONFIG_BLK_DEV_INITRD)
+
+int __init default_rootfs(void);
+
+#endif
+
+#endif /* _LINUX_INITRAMFS_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/input/qpnp-power-on.h	2019-01-22 16:16:28.255289439 +0100
@@ -0,0 +1,101 @@
+/* Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef QPNP_PON_H
+#define QPNP_PON_H
+
+#include <linux/errno.h>
+
+/**
+ * enum pon_trigger_source: List of PON trigger sources
+ * %PON_SMPL:		PON triggered by SMPL - Sudden Momentary Power Loss
+ * %PON_RTC:		PON triggered by RTC alarm
+ * %PON_DC_CHG:		PON triggered by insertion of DC charger
+ * %PON_USB_CHG:	PON triggered by insertion of USB
+ * %PON_PON1:		PON triggered by other PMIC (multi-PMIC option)
+ * %PON_CBLPWR_N:	PON triggered by power-cable insertion
+ * %PON_KPDPWR_N:	PON triggered by long press of the power-key
+ */
+enum pon_trigger_source {
+	PON_SMPL = 1,
+	PON_RTC,
+	PON_DC_CHG,
+	PON_USB_CHG,
+	PON_PON1,
+	PON_CBLPWR_N,
+	PON_KPDPWR_N,
+};
+
+/**
+ * enum pon_power_off_type: Possible power off actions to perform
+ * %PON_POWER_OFF_RESERVED:          Reserved, not used
+ * %PON_POWER_OFF_WARM_RESET:        Reset the MSM but not all PMIC peripherals
+ * %PON_POWER_OFF_SHUTDOWN:          Shutdown the MSM and PMIC completely
+ * %PON_POWER_OFF_HARD_RESET:        Reset the MSM and all PMIC peripherals
+ */
+enum pon_power_off_type {
+	PON_POWER_OFF_RESERVED		= 0x00,
+	PON_POWER_OFF_WARM_RESET	= 0x01,
+	PON_POWER_OFF_SHUTDOWN		= 0x04,
+	PON_POWER_OFF_HARD_RESET	= 0x07,
+	PON_POWER_OFF_MAX_TYPE		= 0x10,
+};
+
+enum pon_restart_reason {
+	/* 0 ~ 31 for common defined features */
+	PON_RESTART_REASON_UNKNOWN		= 0x00,
+	PON_RESTART_REASON_RECOVERY		= 0x01,
+	PON_RESTART_REASON_BOOTLOADER		= 0x02,
+	PON_RESTART_REASON_RTC			= 0x03,
+	PON_RESTART_REASON_DMVERITY_CORRUPTED	= 0x04,
+	PON_RESTART_REASON_DMVERITY_ENFORCE	= 0x05,
+	PON_RESTART_REASON_KEYS_CLEAR		= 0x06,
+
+	/* 32 ~ 63 for OEMs/ODMs secific features */
+	PON_RESTART_REASON_OEM_MIN		= 0x20,
+	PON_RESTART_REASON_OEM_MAX		= 0x3f,
+};
+
+#ifdef CONFIG_INPUT_QPNP_POWER_ON
+int qpnp_pon_system_pwr_off(enum pon_power_off_type type);
+int qpnp_pon_is_warm_reset(void);
+int qpnp_pon_trigger_config(enum pon_trigger_source pon_src, bool enable);
+int qpnp_pon_wd_config(bool enable);
+int qpnp_pon_set_restart_reason(enum pon_restart_reason reason);
+bool qpnp_pon_check_hard_reset_stored(void);
+
+#else
+static int qpnp_pon_system_pwr_off(enum pon_power_off_type type)
+{
+	return -ENODEV;
+}
+static inline int qpnp_pon_is_warm_reset(void) { return -ENODEV; }
+static inline int qpnp_pon_trigger_config(enum pon_trigger_source pon_src,
+							bool enable)
+{
+	return -ENODEV;
+}
+int qpnp_pon_wd_config(bool enable)
+{
+	return -ENODEV;
+}
+static inline int qpnp_pon_set_restart_reason(enum pon_restart_reason reason)
+{
+	return -ENODEV;
+}
+static inline bool qpnp_pon_check_hard_reset_stored(void)
+{
+	return false;
+}
+#endif
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/ion.h	2019-01-22 16:16:28.259289475 +0100
@@ -0,0 +1,6 @@
+#ifndef __LINUX_ION_H__
+#define __LINUX_ION_H__
+
+#include "../../drivers/staging/android/ion/ion.h"
+
+#endif /* __LINUX_ION_H__ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/io-pgtable-fast.h	2019-01-22 16:16:28.255289439 +0100
@@ -0,0 +1,61 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_IO_PGTABLE_FAST_H
+#define __LINUX_IO_PGTABLE_FAST_H
+
+#include <linux/notifier.h>
+
+typedef u64 av8l_fast_iopte;
+
+#define iopte_pmd_offset(pmds, base, iova) (pmds + ((iova - base) >> 12))
+
+int av8l_fast_map_public(av8l_fast_iopte *ptep, phys_addr_t paddr, size_t size,
+			 int prot);
+void av8l_fast_unmap_public(av8l_fast_iopte *ptep, size_t size);
+
+/* events for notifiers passed to av8l_register_notify */
+#define MAPPED_OVER_STALE_TLB 1
+
+
+#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB
+/*
+ * Doesn't matter what we use as long as bit 0 is unset.  The reason why we
+ * need a different value at all is that there are certain hardware
+ * platforms with erratum that require that a PTE actually be zero'd out
+ * and not just have its valid bit unset.
+ */
+#define AV8L_FAST_PTE_UNMAPPED_NEED_TLBI 0xa
+
+void av8l_fast_clear_stale_ptes(av8l_fast_iopte *puds, u64 base,
+				u64 start, u64 end, bool skip_sync);
+void av8l_register_notify(struct notifier_block *nb);
+
+#else  /* !CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB */
+
+#define AV8L_FAST_PTE_UNMAPPED_NEED_TLBI 0
+
+static inline void av8l_fast_clear_stale_ptes(av8l_fast_iopte *puds,
+					      u64 base,
+					      u64 start,
+					      u64 end,
+					      bool skip_sync)
+{
+}
+
+static inline void av8l_register_notify(struct notifier_block *nb)
+{
+}
+
+#endif	/* CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB */
+
+#endif /* __LINUX_IO_PGTABLE_FAST_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/ipc_logging.h	2019-10-29 09:26:25.445220813 +0100
@@ -0,0 +1,290 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPC_LOGGING_H
+#define _IPC_LOGGING_H
+
+#include <linux/types.h>
+
+#define MAX_MSG_SIZE 255
+
+enum {
+	TSV_TYPE_MSG_START = 1,
+	TSV_TYPE_SKB = TSV_TYPE_MSG_START,
+	TSV_TYPE_STRING,
+	TSV_TYPE_MSG_END = TSV_TYPE_STRING,
+};
+
+struct tsv_header {
+	unsigned char type;
+	unsigned char size; /* size of data field */
+};
+
+struct encode_context {
+	struct tsv_header hdr;
+	char buff[MAX_MSG_SIZE];
+	int offset;
+};
+
+struct decode_context {
+	int output_format;      /* 0 = debugfs */
+	char *buff;             /* output buffer */
+	int size;               /* size of output buffer */
+};
+
+#if defined(CONFIG_IPC_LOGGING)
+/*
+ * ipc_log_context_create: Create a debug log context
+ *                         Should not be called from atomic context
+ *
+ * @max_num_pages: Number of pages of logging space required (max. 10)
+ * @mod_name     : Name of the directory entry under DEBUGFS
+ * @user_version : Version number of user-defined message formats
+ *
+ * returns context id on success, NULL on failure
+ */
+void *ipc_log_context_create(int max_num_pages, const char *modname,
+		uint16_t user_version);
+
+/*
+ * msg_encode_start: Start encoding a log message
+ *
+ * @ectxt: Temporary storage to hold the encoded message
+ * @type:  Root event type defined by the module which is logging
+ */
+void msg_encode_start(struct encode_context *ectxt, uint32_t type);
+
+/*
+ * tsv_timestamp_write: Writes the current timestamp count
+ *
+ * @ectxt: Context initialized by calling msg_encode_start()
+ */
+int tsv_timestamp_write(struct encode_context *ectxt);
+
+/*
+ * tsv_qtimer_write: Writes the current QTimer timestamp count
+ *
+ * @ectxt: Context initialized by calling msg_encode_start()
+ */
+int tsv_qtimer_write(struct encode_context *ectxt);
+
+/*
+ * tsv_pointer_write: Writes a data pointer
+ *
+ * @ectxt:   Context initialized by calling msg_encode_start()
+ * @pointer: Pointer value to write
+ */
+int tsv_pointer_write(struct encode_context *ectxt, void *pointer);
+
+/*
+ * tsv_int32_write: Writes a 32-bit integer value
+ *
+ * @ectxt: Context initialized by calling msg_encode_start()
+ * @n:     Integer to write
+ */
+int tsv_int32_write(struct encode_context *ectxt, int32_t n);
+
+/*
+ * tsv_int32_write: Writes a 32-bit integer value
+ *
+ * @ectxt: Context initialized by calling msg_encode_start()
+ * @n:     Integer to write
+ */
+int tsv_byte_array_write(struct encode_context *ectxt,
+			 void *data, int data_size);
+
+/*
+ * msg_encode_end: Complete the message encode process
+ *
+ * @ectxt: Temporary storage which holds the encoded message
+ */
+void msg_encode_end(struct encode_context *ectxt);
+
+/*
+ * msg_encode_end: Complete the message encode process
+ *
+ * @ectxt: Temporary storage which holds the encoded message
+ */
+void ipc_log_write(void *ctxt, struct encode_context *ectxt);
+
+/*
+ * ipc_log_string: Helper function to log a string
+ *
+ * @ilctxt: Debug Log Context created using ipc_log_context_create()
+ * @fmt:    Data specified using format specifiers
+ */
+int ipc_log_string(void *ilctxt, const char *fmt, ...) __printf(2, 3);
+
+/**
+ * ipc_log_extract - Reads and deserializes log
+ *
+ * @ilctxt:  logging context
+ * @buff:    buffer to receive the data
+ * @size:    size of the buffer
+ * @returns: 0 if no data read; >0 number of bytes read; < 0 error
+ *
+ * If no data is available to be read, then the ilctxt::read_avail
+ * completion is reinitialized.  This allows clients to block
+ * until new log data is save.
+ */
+int ipc_log_extract(void *ilctxt, char *buff, int size);
+
+/*
+ * Print a string to decode context.
+ * @dctxt   Decode context
+ * @args   printf args
+ */
+#define IPC_SPRINTF_DECODE(dctxt, args...) \
+do { \
+	int i; \
+	i = scnprintf(dctxt->buff, dctxt->size, args); \
+	dctxt->buff += i; \
+	dctxt->size -= i; \
+} while (0)
+
+/*
+ * tsv_timestamp_read: Reads a timestamp
+ *
+ * @ectxt:  Context retrieved by reading from log space
+ * @dctxt:  Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+void tsv_timestamp_read(struct encode_context *ectxt,
+			struct decode_context *dctxt, const char *format);
+
+/*
+ * tsv_qtimer_read: Reads a QTimer timestamp
+ *
+ * @ectxt:  Context retrieved by reading from log space
+ * @dctxt:  Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+void tsv_qtimer_read(struct encode_context *ectxt,
+		     struct decode_context *dctxt, const char *format);
+
+/*
+ * tsv_pointer_read: Reads a data pointer
+ *
+ * @ectxt:  Context retrieved by reading from log space
+ * @dctxt:  Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+void tsv_pointer_read(struct encode_context *ectxt,
+		      struct decode_context *dctxt, const char *format);
+
+/*
+ * tsv_int32_read: Reads a 32-bit integer value
+ *
+ * @ectxt:  Context retrieved by reading from log space
+ * @dctxt:  Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+int32_t tsv_int32_read(struct encode_context *ectxt,
+		       struct decode_context *dctxt, const char *format);
+
+/*
+ * tsv_int32_read: Reads a 32-bit integer value
+ *
+ * @ectxt:  Context retrieved by reading from log space
+ * @dctxt:  Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+void tsv_byte_array_read(struct encode_context *ectxt,
+			 struct decode_context *dctxt, const char *format);
+
+/*
+ * add_deserialization_func: Register a deserialization function to
+ *                           to unpack the subevents of a main event
+ *
+ * @ctxt: Debug log context to which the deserialization function has
+ *        to be registered
+ * @type: Main/Root event, defined by the module which is logging, to
+ *        which this deserialization function has to be registered.
+ * @dfune: Deserialization function to be registered
+ *
+ * return 0 on success, -ve value on FAILURE
+ */
+int add_deserialization_func(void *ctxt, int type,
+			void (*dfunc)(struct encode_context *,
+				      struct decode_context *));
+
+/*
+ * ipc_log_context_destroy: Destroy debug log context
+ *
+ * @ctxt: debug log context created by calling ipc_log_context_create API.
+ */
+int ipc_log_context_destroy(void *ctxt);
+
+#else
+
+static inline void *ipc_log_context_create(int max_num_pages,
+	const char *modname, uint16_t user_version)
+{ return NULL; }
+
+static inline void msg_encode_start(struct encode_context *ectxt,
+	uint32_t type) { }
+
+static inline int tsv_timestamp_write(struct encode_context *ectxt)
+{ return -EINVAL; }
+
+static inline int tsv_qtimer_write(struct encode_context *ectxt)
+{ return -EINVAL; }
+
+static inline int tsv_pointer_write(struct encode_context *ectxt, void *pointer)
+{ return -EINVAL; }
+
+static inline int tsv_int32_write(struct encode_context *ectxt, int32_t n)
+{ return -EINVAL; }
+
+static inline int tsv_byte_array_write(struct encode_context *ectxt,
+			 void *data, int data_size)
+{ return -EINVAL; }
+
+static inline void msg_encode_end(struct encode_context *ectxt) { }
+
+static inline void ipc_log_write(void *ctxt, struct encode_context *ectxt) { }
+
+static inline int ipc_log_string(void *ilctxt, const char *fmt, ...)
+{ return -EINVAL; }
+
+static inline int ipc_log_extract(void *ilctxt, char *buff, int size)
+{ return -EINVAL; }
+
+#define IPC_SPRINTF_DECODE(dctxt, args...) do { } while (0)
+
+static inline void tsv_timestamp_read(struct encode_context *ectxt,
+			struct decode_context *dctxt, const char *format) { }
+
+static inline void tsv_qtimer_read(struct encode_context *ectxt,
+			struct decode_context *dctxt, const char *format) { }
+
+static inline void tsv_pointer_read(struct encode_context *ectxt,
+		      struct decode_context *dctxt, const char *format) { }
+
+static inline int32_t tsv_int32_read(struct encode_context *ectxt,
+		       struct decode_context *dctxt, const char *format)
+{ return 0; }
+
+static inline void tsv_byte_array_read(struct encode_context *ectxt,
+			 struct decode_context *dctxt, const char *format) { }
+
+static inline int add_deserialization_func(void *ctxt, int type,
+			void (*dfunc)(struct encode_context *,
+				      struct decode_context *))
+{ return 0; }
+
+static inline int ipc_log_context_destroy(void *ctxt)
+{ return 0; }
+
+#endif
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/ipc_router.h	2019-01-22 16:16:28.259289475 +0100
@@ -0,0 +1,357 @@
+/* Copyright (c) 2012-2015,2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPC_ROUTER_H
+#define _IPC_ROUTER_H
+
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/pm.h>
+#include <linux/msm_ipc.h>
+#include <linux/device.h>
+#include <linux/kref.h>
+
+/* Maximum Wakeup Source Name Size */
+#define MAX_WS_NAME_SZ 32
+
+#define IPC_RTR_ERR(buf, ...) \
+	pr_err("IPC_RTR: " buf, __VA_ARGS__)
+
+/**
+ * enum msm_ipc_router_event - Events that will be generated by IPC Router
+ */
+enum msm_ipc_router_event {
+	IPC_ROUTER_CTRL_CMD_DATA = 1,
+	IPC_ROUTER_CTRL_CMD_HELLO,
+	IPC_ROUTER_CTRL_CMD_BYE,
+	IPC_ROUTER_CTRL_CMD_NEW_SERVER,
+	IPC_ROUTER_CTRL_CMD_REMOVE_SERVER,
+	IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT,
+	IPC_ROUTER_CTRL_CMD_RESUME_TX,
+};
+
+/**
+ * rr_control_msg - Control message structure
+ * @cmd: Command identifier for HELLO message in Version 1.
+ * @hello: Message structure for HELLO message in Version 2.
+ * @srv: Message structure for NEW_SERVER/REMOVE_SERVER events.
+ * @cli: Message structure for REMOVE_CLIENT event.
+ */
+union rr_control_msg {
+	uint32_t cmd;
+	struct {
+		uint32_t cmd;
+		uint32_t checksum;
+		uint32_t versions;
+		uint32_t capability;
+		uint32_t reserved;
+	} hello;
+	struct {
+		uint32_t cmd;
+		uint32_t service;
+		uint32_t instance;
+		uint32_t node_id;
+		uint32_t port_id;
+	} srv;
+	struct {
+		uint32_t cmd;
+		uint32_t node_id;
+		uint32_t port_id;
+	} cli;
+};
+
+struct comm_mode_info {
+	int mode;
+	void *xprt_info;
+};
+
+enum ipc_rtr_af_event_type {
+	IPCRTR_AF_INIT = 1,
+	IPCRTR_AF_DEINIT,
+};
+
+/**
+ * msm_ipc_port - Definition of IPC Router port
+ * @list: List(local/control ports) in which this port is present.
+ * @ref: Reference count for this port.
+ * @this_port: Contains port's node_id and port_id information.
+ * @port_name: Contains service & instance info if the port hosts a service.
+ * @type: Type of the port - Client, Service, Control or Security Config.
+ * @flags: Flags to identify the port state.
+ * @port_lock_lhc3: Lock to protect access to the port information.
+ * @mode_info: Communication mode of the port owner.
+ * @port_rx_q: Receive queue where incoming messages are queued.
+ * @port_rx_q_lock_lhc3: Lock to protect access to the port's rx_q.
+ * @rx_ws_name: Name of the receive wakeup source.
+ * @port_rx_ws: Wakeup source to prevent suspend until the rx_q is empty.
+ * @port_rx_wait_q: Wait queue to wait for the incoming messages.
+ * @restart_state: Flag to hold the restart state information.
+ * @restart_lock: Lock to protect access to the restart_state.
+ * @restart_wait: Wait Queue to wait for any restart events.
+ * @endpoint: Contains the information related to user-space interface.
+ * @notify: Function to notify the incoming events on the port.
+ * @check_send_permissions: Function to check access control from this port.
+ * @num_tx: Number of packets transmitted.
+ * @num_rx: Number of packets received.
+ * @num_tx_bytes: Number of bytes transmitted.
+ * @num_rx_bytes: Number of bytes received.
+ * @priv: Private information registered by the port owner.
+ */
+struct msm_ipc_port {
+	struct list_head list;
+	struct kref ref;
+
+	struct msm_ipc_port_addr this_port;
+	struct msm_ipc_port_name port_name;
+	uint32_t type;
+	unsigned flags;
+	struct mutex port_lock_lhc3;
+	struct comm_mode_info mode_info;
+
+	struct msm_ipc_port_addr dest_addr;
+	int conn_status;
+
+	struct list_head port_rx_q;
+	struct mutex port_rx_q_lock_lhc3;
+	char rx_ws_name[MAX_WS_NAME_SZ];
+	struct wakeup_source *port_rx_ws;
+	wait_queue_head_t port_rx_wait_q;
+	wait_queue_head_t port_tx_wait_q;
+
+	int restart_state;
+	spinlock_t restart_lock;
+	wait_queue_head_t restart_wait;
+
+	void *rport_info;
+	void *endpoint;
+	void (*notify)(unsigned event, void *oob_data,
+		       size_t oob_data_len, void *priv);
+	int (*check_send_permissions)(void *data);
+
+	uint32_t num_tx;
+	uint32_t num_rx;
+	unsigned long num_tx_bytes;
+	unsigned long num_rx_bytes;
+	uint32_t last_served_svc_id;
+	void *priv;
+};
+
+#ifdef CONFIG_IPC_ROUTER
+/**
+ * msm_ipc_router_create_port() - Create a IPC Router port/endpoint
+ * @notify: Callback function to notify any event on the port.
+ *   @event: Event ID to be handled.
+ *   @oob_data: Any out-of-band data associated with the event.
+ *   @oob_data_len: Size of the out-of-band data, if valid.
+ *   @priv: Private data registered during the port creation.
+ * @priv: Private info to be passed while the notification is generated.
+ *
+ * @return: Pointer to the port on success, NULL on error.
+ */
+struct msm_ipc_port *msm_ipc_router_create_port(
+	void (*notify)(unsigned event, void *oob_data,
+		       size_t oob_data_len, void *priv),
+	void *priv);
+
+/**
+ * msm_ipc_router_bind_control_port() - Bind a port as a control port
+ * @port_ptr: Port which needs to be marked as a control port.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ */
+int msm_ipc_router_bind_control_port(struct msm_ipc_port *port_ptr);
+
+/**
+ * msm_ipc_router_lookup_server_name() - Resolve server address
+ * @srv_name: Name<service:instance> of the server to be resolved.
+ * @srv_info: Buffer to hold the resolved address.
+ * @num_entries_in_array: Number of server info the buffer can hold.
+ * @lookup_mask: Mask to specify the range of instances to be resolved.
+ *
+ * @return: Number of server addresses resolved on success, < 0 on error.
+ */
+int msm_ipc_router_lookup_server_name(struct msm_ipc_port_name *srv_name,
+				      struct msm_ipc_server_info *srv_info,
+				      int num_entries_in_array,
+				      uint32_t lookup_mask);
+
+/**
+ * msm_ipc_router_send_msg() - Send a message/packet
+ * @src: Sender's address/port.
+ * @dest: Destination address.
+ * @data: Pointer to the data to be sent.
+ * @data_len: Length of the data to be sent.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int msm_ipc_router_send_msg(struct msm_ipc_port *src,
+			    struct msm_ipc_addr *dest,
+			    void *data, unsigned int data_len);
+
+/**
+ * msm_ipc_router_get_curr_pkt_size() - Get the packet size of the first
+ *                                      packet in the rx queue
+ * @port_ptr: Port which owns the rx queue.
+ *
+ * @return: Returns the size of the first packet, if available.
+ *          0 if no packets available, < 0 on error.
+ */
+int msm_ipc_router_get_curr_pkt_size(struct msm_ipc_port *port_ptr);
+
+/**
+ * msm_ipc_router_read_msg() - Read a message/packet
+ * @port_ptr: Receiver's port/address.
+ * @data: Pointer containing the address of the received data.
+ * @src: Address of the sender/source.
+ * @len: Length of the data being read.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int msm_ipc_router_read_msg(struct msm_ipc_port *port_ptr,
+			    struct msm_ipc_addr *src,
+			    unsigned char **data,
+			    unsigned int *len);
+
+/**
+ * msm_ipc_router_close_port() - Close the port
+ * @port_ptr: Pointer to the port to be closed.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int msm_ipc_router_close_port(struct msm_ipc_port *port_ptr);
+
+/**
+ * msm_ipc_router_register_server() - Register a service on a port
+ * @server_port: IPC Router port with which a service is registered.
+ * @name: Service name <service_id:instance_id> that gets registered.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ */
+int msm_ipc_router_register_server(struct msm_ipc_port *server_port,
+				   struct msm_ipc_addr *name);
+
+/**
+ * msm_ipc_router_unregister_server() - Unregister a service from a port
+ * @server_port: Port with with a service is already registered.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ */
+int msm_ipc_router_unregister_server(struct msm_ipc_port *server_port);
+
+/**
+ * register_ipcrtr_af_init_notifier() - Register for ipc router socket
+ *				address family initialization callback
+ * @nb: Notifier block which will be notified once address family is
+ *	initialized.
+ *
+ * Return: 0 on success, standard error code otherwise.
+ */
+int register_ipcrtr_af_init_notifier(struct notifier_block *nb);
+
+/**
+ * unregister_ipcrtr_af_init_notifier() - Unregister for ipc router socket
+ *					address family initialization callback
+ * @nb: Notifier block which will be notified once address family is
+ *	initialized.
+ *
+ * Return: 0 on success, standard error code otherwise.
+ */
+int unregister_ipcrtr_af_init_notifier(struct notifier_block *nb);
+
+/**
+ * msm_ipc_router_set_ws_allowed() - To Enable/disable the wakeup source allowed
+ *					flag
+ * @flag: Flag to set/clear the wakeup soruce allowed
+ *
+ */
+void msm_ipc_router_set_ws_allowed(bool flag);
+
+#else
+
+struct msm_ipc_port *msm_ipc_router_create_port(
+	void (*notify)(unsigned event, void *oob_data,
+		       size_t oob_data_len, void *priv),
+	void *priv)
+{
+	return NULL;
+}
+
+static inline int msm_ipc_router_bind_control_port(
+		struct msm_ipc_port *port_ptr)
+{
+	return -ENODEV;
+}
+
+int msm_ipc_router_lookup_server_name(struct msm_ipc_port_name *srv_name,
+				      struct msm_ipc_server_info *srv_info,
+				      int num_entries_in_array,
+				      uint32_t lookup_mask)
+{
+	return -ENODEV;
+}
+
+int msm_ipc_router_send_msg(struct msm_ipc_port *src,
+			    struct msm_ipc_addr *dest,
+			    void *data, unsigned int data_len)
+{
+	return -ENODEV;
+}
+
+int msm_ipc_router_get_curr_pkt_size(struct msm_ipc_port *port_ptr)
+{
+	return -ENODEV;
+}
+
+int msm_ipc_router_read_msg(struct msm_ipc_port *port_ptr,
+			    struct msm_ipc_addr *src,
+			    unsigned char **data,
+			    unsigned int *len)
+{
+	return -ENODEV;
+}
+
+int msm_ipc_router_close_port(struct msm_ipc_port *port_ptr)
+{
+	return -ENODEV;
+}
+
+static inline int msm_ipc_router_register_server(
+			struct msm_ipc_port *server_port,
+			struct msm_ipc_addr *name)
+{
+	return -ENODEV;
+}
+
+static inline int msm_ipc_router_unregister_server(
+			struct msm_ipc_port *server_port)
+{
+	return -ENODEV;
+}
+
+int register_ipcrtr_af_init_notifier(struct notifier_block *nb)
+{
+	return -ENODEV;
+}
+
+int unregister_ipcrtr_af_init_notifier(struct notifier_block *nb)
+{
+	return -ENODEV;
+}
+
+void msm_ipc_router_set_ws_allowed(bool flag) { }
+
+#endif
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/ipc_router_xprt.h	2019-01-22 16:16:28.259289475 +0100
@@ -0,0 +1,178 @@
+/* Copyright (c) 2011-2015,2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPC_ROUTER_XPRT_H
+#define _IPC_ROUTER_XPRT_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/msm_ipc.h>
+#include <linux/ipc_router.h>
+#include <linux/kref.h>
+
+#define IPC_ROUTER_XPRT_EVENT_DATA  1
+#define IPC_ROUTER_XPRT_EVENT_OPEN  2
+#define IPC_ROUTER_XPRT_EVENT_CLOSE 3
+
+#define FRAG_PKT_WRITE_ENABLE 0x1
+
+/**
+ * rr_header_v1 - IPC Router header version 1
+ * @version: Version information.
+ * @type: IPC Router Message Type.
+ * @src_node_id: Source Node ID of the message.
+ * @src_port_id: Source Port ID of the message.
+ * @control_flag: Flag to indicate flow control.
+ * @size: Size of the IPC Router payload.
+ * @dst_node_id: Destination Node ID of the message.
+ * @dst_port_id: Destination Port ID of the message.
+ */
+struct rr_header_v1 {
+	uint32_t version;
+	uint32_t type;
+	uint32_t src_node_id;
+	uint32_t src_port_id;
+	uint32_t control_flag;
+	uint32_t size;
+	uint32_t dst_node_id;
+	uint32_t dst_port_id;
+};
+
+/**
+ * rr_header_v2 - IPC Router header version 2
+ * @version: Version information.
+ * @type: IPC Router Message Type.
+ * @control_flag: Flags to indicate flow control, optional header etc.
+ * @opt_len: Combined size of the all optional headers in units of words.
+ * @size: Size of the IPC Router payload.
+ * @src_node_id: Source Node ID of the message.
+ * @src_port_id: Source Port ID of the message.
+ * @dst_node_id: Destination Node ID of the message.
+ * @dst_port_id: Destination Port ID of the message.
+ */
+struct rr_header_v2 {
+	uint8_t version;
+	uint8_t type;
+	uint8_t control_flag;
+	uint8_t opt_len;
+	uint32_t size;
+	uint16_t src_node_id;
+	uint16_t src_port_id;
+	uint16_t dst_node_id;
+	uint16_t dst_port_id;
+} __attribute__((__packed__));
+
+union rr_header {
+	struct rr_header_v1 hdr_v1;
+	struct rr_header_v2 hdr_v2;
+};
+
+/**
+ * rr_opt_hdr - Optional header for IPC Router header version 2
+ * @len: Total length of the optional header.
+ * @data: Pointer to the actual optional header.
+ */
+struct rr_opt_hdr {
+	size_t len;
+	unsigned char *data;
+};
+
+#define IPC_ROUTER_HDR_SIZE sizeof(union rr_header)
+#define IPCR_WORD_SIZE 4
+
+/**
+ * rr_packet - Router to Router packet structure
+ * @list: Pointer to prev & next packets in a port's rx list.
+ * @hdr: Header information extracted from or prepended to a packet.
+ * @opt_hdr: Optinal header information.
+ * @pkt_fragment_q: Queue of SKBs containing payload.
+ * @length: Length of data in the chain of SKBs
+ * @ref: Reference count for the packet.
+ * @ws_need: Flag to check wakeup soruce need
+ */
+struct rr_packet {
+	struct list_head list;
+	struct rr_header_v1 hdr;
+	struct rr_opt_hdr opt_hdr;
+	struct sk_buff_head *pkt_fragment_q;
+	uint32_t length;
+	struct kref ref;
+	bool ws_need;
+};
+
+/**
+ * msm_ipc_router_xprt - Structure to hold XPRT specific information
+ * @name: Name of the XPRT.
+ * @link_id: Network cluster ID to which the XPRT belongs to.
+ * @priv: XPRT's private data.
+ * @get_version: Method to get header version supported by the XPRT.
+ * @set_version: Method to set header version in XPRT.
+ * @get_option: Method to get XPRT specific options.
+ * @read_avail: Method to get data size available to be read from the XPRT.
+ * @read: Method to read data from the XPRT.
+ * @write_avail: Method to get write space available in the XPRT.
+ * @write: Method to write data to the XPRT.
+ * @close: Method to close the XPRT.
+ * @sft_close_done: Method to indicate to the XPRT that handling of reset
+ *                  event is complete.
+ * @get_ws_info: Method to get the wakeup soruce inforamtion of the XPRT
+ */
+struct msm_ipc_router_xprt {
+	char *name;
+	uint32_t link_id;
+	void *priv;
+
+	int (*get_version)(struct msm_ipc_router_xprt *xprt);
+	int (*get_option)(struct msm_ipc_router_xprt *xprt);
+	void (*set_version)(struct msm_ipc_router_xprt *xprt,
+			    unsigned version);
+	int (*read_avail)(struct msm_ipc_router_xprt *xprt);
+	int (*read)(void *data, uint32_t len,
+		    struct msm_ipc_router_xprt *xprt);
+	int (*write_avail)(struct msm_ipc_router_xprt *xprt);
+	int (*write)(void *data, uint32_t len,
+		     struct msm_ipc_router_xprt *xprt);
+	int (*close)(struct msm_ipc_router_xprt *xprt);
+	void (*sft_close_done)(struct msm_ipc_router_xprt *xprt);
+	bool (*get_ws_info)(struct msm_ipc_router_xprt *xprt);
+};
+
+void msm_ipc_router_xprt_notify(struct msm_ipc_router_xprt *xprt,
+				unsigned event,
+				void *data);
+
+/**
+ * create_pkt() - Create a Router packet
+ * @data: SKB queue to be contained inside the packet.
+ *
+ * @return: pointer to packet on success, NULL on failure.
+ */
+struct rr_packet *create_pkt(struct sk_buff_head *data);
+struct rr_packet *clone_pkt(struct rr_packet *pkt);
+void release_pkt(struct rr_packet *pkt);
+
+/**
+ * ipc_router_peek_pkt_size() - Peek into the packet header to get potential packet size
+ * @data: Starting address of the packet which points to router header.
+ *
+ * @returns: potential packet size on success, < 0 on error.
+ *
+ * This function is used by the underlying transport abstraction layer to
+ * peek into the potential packet size of an incoming packet. This information
+ * is used to perform link layer fragmentation and re-assembly
+ */
+int ipc_router_peek_pkt_size(char *data);
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/irqchip/msm-gpio-irq.h	2019-01-22 16:16:28.263289511 +0100
@@ -0,0 +1,51 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_GPIO_IRQ_H
+#define MSM_GPIO_IRQ_H
+
+#include <linux/irq.h>
+
+#if (defined(CONFIG_GPIO_MSM_V1) || defined(CONFIG_GPIO_MSM_V2) \
+	|| defined(CONFIG_GPIO_MSM_V3) && !defined(CONFIG_USE_PINCTRL_IRQ))
+int __init msm_gpio_of_init(struct device_node *node,
+					struct device_node *parent);
+extern struct irq_chip msm_gpio_irq_extn;
+static inline int __init msm_tlmm_of_irq_init(struct device_node *node,
+					struct device_node *parent)
+{
+	return 0;
+}
+#elif defined(CONFIG_PINCTRL_MSM_TLMM)
+int __init msm_tlmm_of_irq_init(struct device_node *node,
+					struct device_node *parent);
+extern struct irq_chip mpm_tlmm_irq_extn;
+static inline int __init msm_gpio_of_init(struct device_node *node,
+					struct device_node *parent)
+{
+	return 0;
+}
+#else
+extern struct irq_chip mpm_pinctrl_extn;
+static inline int __init msm_tlmm_of_irq_init(struct device_node *node,
+					      struct device_node *parent)
+{
+	return 0;
+}
+
+static inline int __init msm_gpio_of_init(struct device_node *node,
+					struct device_node *parent)
+{
+	return 0;
+}
+#endif
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/irqchip/msm-mpm-irq.h	2019-01-22 16:16:28.263289511 +0100
@@ -0,0 +1,167 @@
+/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MSM_MPM_IRQ_H
+#define __MSM_MPM_IRQ_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+#define MSM_MPM_NR_MPM_IRQS  64
+
+#if defined(CONFIG_MSM_MPM_OF)
+/**
+ * msm_mpm_enable_pin() -  Enable/Disable a MPM pin for idle wakeups.
+ *
+ * @pin:	MPM pin to set
+ * @enable:	enable/disable the pin
+ *
+ * returns 0 on success or errorno
+ *
+ * Drivers can call the function to configure MPM pins for wakeup from idle low
+ * power modes. The API provides a direct access to the configuring MPM pins
+ * that are not connected to a IRQ/GPIO
+ */
+int msm_mpm_enable_pin(unsigned int pin, unsigned int enable);
+
+/**
+ * msm_mpm_set_pin_wake() -  Enable/Disable a MPM pin during suspend
+ *
+ * @pin:	MPM pin to set
+ * @enable:	enable/disable the pin as wakeup
+ *
+ * returns 0 on success or errorno
+ *
+ * Drivers can call the function to configure MPM pins for wakeup from suspend
+ * low power modes. The API provides a direct access to the configuring MPM pins
+ * that are not connected to a IRQ/GPIO
+ */
+int msm_mpm_set_pin_wake(unsigned int pin, unsigned int on);
+/**
+ * msm_mpm_set_pin_type() - Set the flowtype of a MPM pin.
+ *
+ * @pin:	MPM pin to configure
+ * @flow_type:	flowtype of the MPM pin.
+ *
+ * returns 0 on success or errorno
+ *
+ * Drivers can call the function to configure the flowtype of the MPM pins
+ * The API provides a direct access to the configuring MPM pins that are not
+ * connected to a IRQ/GPIO
+ */
+int msm_mpm_set_pin_type(unsigned int pin, unsigned int flow_type);
+/**
+ * msm_mpm_irqs_detectable() - Check if active irqs can be monitored by MPM
+ *
+ * @from_idle: indicates if the sytem is entering low power mode as a part of
+ *		suspend/idle task.
+ *
+ * returns true if all active interrupts can be monitored by the MPM
+ *
+ * Low power management code calls into this API to check if all active
+ * interrupts can be monitored by MPM and choose a level such that all active
+ * interrupts can wake the system up from low power mode.
+ */
+bool msm_mpm_irqs_detectable(bool from_idle);
+/**
+ * msm_mpm_gpio_detectable() - Check if active gpio irqs can be monitored by
+ *				MPM
+ *
+ * @from_idle: indicates if the sytem is entering low power mode as a part of
+ *		suspend/idle task.
+ *
+ * returns true if all active GPIO interrupts can be monitored by the MPM
+ *
+ * Low power management code calls into this API to check if all active
+ * GPIO interrupts can be monitored by MPM and choose a level such that all
+ * active interrupts can wake the system up from low power mode.
+ */
+bool msm_mpm_gpio_irqs_detectable(bool from_idle);
+/**
+ * msm_mpm_enter_sleep() -Called from PM code before entering low power mode
+ *
+ * @sclk_count: wakeup time in sclk counts for programmed RPM wakeup
+ * @from_idle: indicates if the sytem is entering low power mode as a part of
+ *		suspend/idle task.
+ * @cpumask: the next cpu to wakeup.
+ *
+ * Low power management code calls into this API to configure the MPM to
+ * monitor the active irqs before going to sleep.
+ */
+void msm_mpm_enter_sleep(uint64_t sclk_count, bool from_idle,
+		const struct cpumask *cpumask);
+/**
+ * msm_mpm_exit_sleep() -Called from PM code after resuming from low power mode
+ *
+ * @from_idle: indicates if the sytem is entering low power mode as a part of
+ *		suspend/idle task.
+ *
+ * Low power management code calls into this API to query the MPM for the
+ * wakeup source and retriggering the appropriate interrupt.
+ */
+void msm_mpm_exit_sleep(bool from_idle);
+/**
+ * of_mpm_init() - Device tree initialization function
+ *
+ * The initialization function is called after * GPIO/GIC device initialization
+ * routines are called and before any device irqs are requested. MPM driver
+ * keeps track of all enabled/wakeup interrupts in the system to be able to
+ * configure MPM when entering a system wide low power mode. The MPM is a
+ * alway-on low power hardware block that monitors 64 wakeup interrupts when the
+ * system is in a low power mode. The initialization function constructs the MPM
+ * mapping between the IRQs and the MPM pin based on data in the device tree.
+ */
+void of_mpm_init(void);
+#else
+static inline int msm_mpm_enable_irq(unsigned int irq, unsigned int enable)
+{ return -ENODEV; }
+static inline int msm_mpm_set_irq_wake(unsigned int irq, unsigned int on)
+{ return -ENODEV; }
+static inline int msm_mpm_set_irq_type(unsigned int irq, unsigned int flow_type)
+{ return -ENODEV; }
+static inline int msm_mpm_enable_pin(unsigned int pin, unsigned int enable)
+{ return -ENODEV; }
+static inline int msm_mpm_set_pin_wake(unsigned int pin, unsigned int on)
+{ return -ENODEV; }
+static inline int msm_mpm_set_pin_type(unsigned int pin,
+				       unsigned int flow_type)
+{ return -ENODEV; }
+static inline bool msm_mpm_irqs_detectable(bool from_idle)
+{ return false; }
+static inline bool msm_mpm_gpio_irqs_detectable(bool from_idle)
+{ return false; }
+static inline void msm_mpm_enter_sleep(uint64_t sclk_count, bool from_idle,
+		const struct cpumask *cpumask) {}
+static inline void msm_mpm_exit_sleep(bool from_idle) {}
+static inline void of_mpm_init(void) {}
+#endif
+#ifdef CONFIG_MSM_MPM_OF
+/** msm_mpm_suspend_prepare() - Called at prepare_late() op during suspend
+ *
+ *
+ *  When called the MPM driver checks if the wakeup interrupts can be monitored
+ *  by MPM hardware and program them accordingly. If wake up interrupts cannot
+ *  be monitored then it disallows system low power modes.
+ */
+void msm_mpm_suspend_prepare(void);
+/** msm_mpm_suspend_wake - Called during wake() op in suspend.
+ *
+ *  When called MPM drivers sets the vote for system low power modes depending
+ *  on the active interrupts.
+ */
+void msm_mpm_suspend_wake(void);
+#else
+static inline void msm_mpm_suspend_prepare(void) {}
+static inline void msm_mpm_suspend_wake(void) {}
+#endif
+#endif /* __MSM_MPM_IRQ_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/Kbuild	2019-01-22 16:16:28.187288823 +0100
@@ -0,0 +1,2 @@
+header-y += if_pppolac.h
+header-y += if_pppopns.h
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/kcov.h	2019-01-22 16:16:28.267289547 +0100
@@ -0,0 +1,33 @@
+#ifndef _LINUX_KCOV_H
+#define _LINUX_KCOV_H
+
+#include <uapi/linux/kcov.h>
+
+struct task_struct;
+
+#ifdef CONFIG_KCOV
+
+enum kcov_mode {
+	/* Coverage collection is not enabled yet. */
+	KCOV_MODE_DISABLED = 0,
+	/* KCOV was initialized, but tracing mode hasn't been chosen yet. */
+	KCOV_MODE_INIT = 1,
+	/*
+	 * Tracing coverage collection mode.
+	 * Covered PCs are collected in a per-task buffer.
+	 */
+	KCOV_MODE_TRACE_PC = 2,
+	/* Collecting comparison operands mode. */
+	KCOV_MODE_TRACE_CMP = 3,
+};
+
+void kcov_task_init(struct task_struct *t);
+void kcov_task_exit(struct task_struct *t);
+
+#else
+
+static inline void kcov_task_init(struct task_struct *t) {}
+static inline void kcov_task_exit(struct task_struct *t) {}
+
+#endif /* CONFIG_KCOV */
+#endif /* _LINUX_KCOV_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/keycombo.h	2019-01-22 16:16:28.267289547 +0100
@@ -0,0 +1,36 @@
+/*
+ * include/linux/keycombo.h - platform data structure for keycombo driver
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_KEYCOMBO_H
+#define _LINUX_KEYCOMBO_H
+
+#define KEYCOMBO_NAME "keycombo"
+
+/*
+ * if key_down_fn and key_up_fn are both present, you are guaranteed that
+ * key_down_fn will return before key_up_fn is called, and that key_up_fn
+ * is called iff key_down_fn is called.
+ */
+struct keycombo_platform_data {
+	void (*key_down_fn)(void *);
+	void (*key_up_fn)(void *);
+	void *priv;
+	int key_down_delay; /* Time in ms */
+	int *keys_up;
+	int keys_down[]; /* 0 terminated */
+};
+
+#endif /* _LINUX_KEYCOMBO_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/keyreset.h	2019-01-22 16:16:28.267289547 +0100
@@ -0,0 +1,29 @@
+/*
+ * include/linux/keyreset.h - platform data structure for resetkeys driver
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_KEYRESET_H
+#define _LINUX_KEYRESET_H
+
+#define KEYRESET_NAME "keyreset"
+
+struct keyreset_platform_data {
+	int (*reset_fn)(void);
+	int key_down_delay;
+	int *keys_up;
+	int keys_down[]; /* 0 terminated */
+};
+
+#endif /* _LINUX_KEYRESET_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/leds-qpnp-flash.h	2019-10-29 09:26:25.449220852 +0100
@@ -0,0 +1,27 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LEDS_QPNP_FLASH_H
+#define __LEDS_QPNP_FLASH_H
+
+#include <linux/leds.h>
+
+#define ENABLE_REGULATOR	BIT(0)
+#define DISABLE_REGULATOR	BIT(1)
+#define QUERY_MAX_CURRENT	BIT(2)
+
+#define FLASH_LED_PREPARE_OPTIONS_MASK	GENMASK(3, 0)
+
+int qpnp_flash_led_prepare(struct led_trigger *trig, int options,
+					int *max_current);
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/leds-qpnp-flash-v2.h	2019-01-22 16:16:28.271289584 +0100
@@ -0,0 +1,34 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.	1
+ */
+
+#ifndef __LEDS_QPNP_FLASH_V2_H
+#define __LEDS_QPNP_FLASH_V2_H
+
+#include <linux/leds.h>
+#include <linux/notifier.h>
+
+enum flash_led_irq_type {
+	LED_FAULT_IRQ = BIT(0),
+	MITIGATION_IRQ = BIT(1),
+	FLASH_TIMER_EXP_IRQ = BIT(2),
+	ALL_RAMP_DOWN_DONE_IRQ = BIT(3),
+	ALL_RAMP_UP_DONE_IRQ = BIT(4),
+	LED3_RAMP_UP_DONE_IRQ = BIT(5),
+	LED2_RAMP_UP_DONE_IRQ = BIT(6),
+	LED1_RAMP_UP_DONE_IRQ = BIT(7),
+	INVALID_IRQ = BIT(8),
+};
+
+int qpnp_flash_led_register_irq_notifier(struct notifier_block *nb);
+int qpnp_flash_led_unregister_irq_notifier(struct notifier_block *nb);
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/leds-qpnp-wled.h	2019-01-22 16:16:28.271289584 +0100
@@ -0,0 +1,22 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __LEDS_QPNP_WLED_H
+
+#ifdef CONFIG_LEDS_QPNP_WLED
+int qpnp_ibb_enable(bool state);
+#else
+int qpnp_ibb_enable(bool state)
+{
+	return 0;
+}
+#endif
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/mbcache2.h	2019-01-22 16:16:28.275289620 +0100
@@ -0,0 +1,50 @@
+#ifndef _LINUX_MB2CACHE_H
+#define _LINUX_MB2CACHE_H
+
+#include <linux/hash.h>
+#include <linux/list_bl.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+#include <linux/fs.h>
+
+struct mb2_cache;
+
+struct mb2_cache_entry {
+	/* LRU list - protected by cache->c_lru_list_lock */
+	struct list_head	e_lru_list;
+	/* Hash table list - protected by bitlock in e_hash_list_head */
+	struct hlist_bl_node	e_hash_list;
+	atomic_t		e_refcnt;
+	/* Key in hash - stable during lifetime of the entry */
+	u32			e_key;
+	/* Block number of hashed block - stable during lifetime of the entry */
+	sector_t		e_block;
+	/* Head of hash list (for list bit lock) - stable */
+	struct hlist_bl_head	*e_hash_list_head;
+};
+
+struct mb2_cache *mb2_cache_create(int bucket_bits);
+void mb2_cache_destroy(struct mb2_cache *cache);
+
+int mb2_cache_entry_create(struct mb2_cache *cache, gfp_t mask, u32 key,
+			   sector_t block);
+void __mb2_cache_entry_free(struct mb2_cache_entry *entry);
+static inline int mb2_cache_entry_put(struct mb2_cache *cache,
+				      struct mb2_cache_entry *entry)
+{
+	if (!atomic_dec_and_test(&entry->e_refcnt))
+		return 0;
+	__mb2_cache_entry_free(entry);
+	return 1;
+}
+
+void mb2_cache_entry_delete_block(struct mb2_cache *cache, u32 key,
+				  sector_t block);
+struct mb2_cache_entry *mb2_cache_entry_find_first(struct mb2_cache *cache,
+						   u32 key);
+struct mb2_cache_entry *mb2_cache_entry_find_next(struct mb2_cache *cache,
+						  struct mb2_cache_entry *entry);
+void mb2_cache_entry_touch(struct mb2_cache *cache,
+			   struct mb2_cache_entry *entry);
+
+#endif	/* _LINUX_MB2CACHE_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/mdss_io_util.h	2019-01-22 16:16:28.279289656 +0100
@@ -0,0 +1,115 @@
+/* Copyright (c) 2012, 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_IO_UTIL_H__
+#define __MDSS_IO_UTIL_H__
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/i2c.h>
+#include <linux/types.h>
+
+#ifdef DEBUG
+#define DEV_DBG(fmt, args...)   pr_err(fmt, ##args)
+#else
+#define DEV_DBG(fmt, args...)   pr_debug(fmt, ##args)
+#endif
+#define DEV_INFO(fmt, args...)  pr_info(fmt, ##args)
+#define DEV_WARN(fmt, args...)  pr_warn(fmt, ##args)
+#define DEV_ERR(fmt, args...)   pr_err(fmt, ##args)
+
+struct dss_io_data {
+	u32 len;
+	void __iomem *base;
+};
+
+void dss_reg_w(struct dss_io_data *io, u32 offset, u32 value, u32 debug);
+u32 dss_reg_r(struct dss_io_data *io, u32 offset, u32 debug);
+void dss_reg_dump(void __iomem *base, u32 len, const char *prefix, u32 debug);
+
+#define DSS_REG_W_ND(io, offset, val)  dss_reg_w(io, offset, val, false)
+#define DSS_REG_W(io, offset, val)     dss_reg_w(io, offset, val, true)
+#define DSS_REG_R_ND(io, offset)       dss_reg_r(io, offset, false)
+#define DSS_REG_R(io, offset)          dss_reg_r(io, offset, true)
+
+enum dss_vreg_type {
+	DSS_REG_LDO,
+	DSS_REG_VS,
+};
+
+struct dss_vreg {
+	struct regulator *vreg; /* vreg handle */
+	char vreg_name[32];
+	int min_voltage;
+	int max_voltage;
+	int enable_load;
+	int disable_load;
+	int pre_on_sleep;
+	int post_on_sleep;
+	int pre_off_sleep;
+	int post_off_sleep;
+	bool lp_disable_allowed;
+	bool disabled;
+};
+
+struct dss_gpio {
+	unsigned gpio;
+	unsigned value;
+	char gpio_name[32];
+};
+
+enum dss_clk_type {
+	DSS_CLK_AHB, /* no set rate. rate controlled through rpm */
+	DSS_CLK_PCLK,
+	DSS_CLK_OTHER,
+};
+
+struct dss_clk {
+	struct clk *clk; /* clk handle */
+	char clk_name[32];
+	enum dss_clk_type type;
+	unsigned long rate;
+	unsigned long max_rate;
+};
+
+struct dss_module_power {
+	unsigned num_vreg;
+	struct dss_vreg *vreg_config;
+	unsigned num_gpio;
+	struct dss_gpio *gpio_config;
+	unsigned num_clk;
+	struct dss_clk *clk_config;
+};
+
+int msm_dss_ioremap_byname(struct platform_device *pdev,
+	struct dss_io_data *io_data, const char *name);
+void msm_dss_iounmap(struct dss_io_data *io_data);
+
+int msm_dss_enable_gpio(struct dss_gpio *in_gpio, int num_gpio, int enable);
+int msm_dss_gpio_enable(struct dss_gpio *in_gpio, int num_gpio, int enable);
+
+int msm_dss_config_vreg(struct device *dev, struct dss_vreg *in_vreg,
+	int num_vreg, int config);
+int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg,	int enable);
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk);
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable);
+
+int mdss_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr,
+		       uint8_t reg_offset, uint8_t *read_buf);
+int mdss_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr,
+			uint8_t reg_offset, uint8_t *value);
+
+#endif /* __MDSS_IO_UTIL_H__ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/mfd/msm-cdc-pinctrl.h	2019-01-22 16:16:28.287289728 +0100
@@ -0,0 +1,41 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MFD_CDC_PINCTRL_H_
+#define __MFD_CDC_PINCTRL_H_
+
+#include <linux/types.h>
+#include <linux/of.h>
+
+#ifdef CONFIG_MSM_CDC_PINCTRL
+extern int msm_cdc_pinctrl_select_sleep_state(struct device_node *);
+extern int msm_cdc_pinctrl_select_active_state(struct device_node *);
+extern bool msm_cdc_pinctrl_get_state(struct device_node *);
+extern int msm_cdc_get_gpio_state(struct device_node *);
+
+#else
+int msm_cdc_pinctrl_select_sleep_state(struct device_node *np)
+{
+	return 0;
+}
+int msm_cdc_pinctrl_select_active_state(struct device_node *np)
+{
+	return 0;
+}
+int msm_cdc_get_gpio_state(struct device_node *np)
+{
+	return 0;
+}
+#
+#endif
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/mfd/msm-cdc-supply.h	2019-01-22 16:16:28.287289728 +0100
@@ -0,0 +1,48 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CODEC_POWER_SUPPLY_H__
+#define __CODEC_POWER_SUPPLY_H__
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+
+struct cdc_regulator {
+	const char *name;
+	int min_uV;
+	int max_uV;
+	int optimum_uA;
+	bool ondemand;
+	struct regulator *regulator;
+};
+
+extern int msm_cdc_get_power_supplies(struct device *dev,
+				      struct cdc_regulator **cdc_vreg,
+				      int *total_num_supplies);
+extern int msm_cdc_disable_static_supplies(struct device *dev,
+					struct regulator_bulk_data *supplies,
+					struct cdc_regulator *cdc_vreg,
+					int num_supplies);
+extern int msm_cdc_release_supplies(struct device *dev,
+				    struct regulator_bulk_data *supplies,
+				    struct cdc_regulator *cdc_vreg,
+				    int num_supplies);
+extern int msm_cdc_enable_static_supplies(struct device *dev,
+					  struct regulator_bulk_data *supplies,
+					  struct cdc_regulator *cdc_vreg,
+					  int num_supplies);
+extern int msm_cdc_init_supplies(struct device *dev,
+				 struct regulator_bulk_data **supplies,
+				 struct cdc_regulator *cdc_vreg,
+				 int num_supplies);
+#endif
diff -Nruw linux-4.4.115-fbx/include/linux/mfd/wcd9335./registers.h linux-4.4.115-fbx/include/linux/mfd/wcd9335/registers.h
--- linux-4.4.115-fbx/include/linux/mfd/wcd9335./registers.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/mfd/wcd9335/registers.h	2019-01-22 16:16:28.295289801 +0100
@@ -0,0 +1,1348 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _WCD9335_REGISTERS_H
+#define _WCD9335_REGISTERS_H
+
+#define WCD9335_PAGE_SIZE 256
+#define WCD9335_NUM_PAGES 256
+
+extern const u8 *wcd9335_reg[WCD9335_NUM_PAGES];
+
+enum {
+	PAGE_0 = 0,
+	PAGE_1,
+	PAGE_2,
+	PAGE_6 = 6,
+	PAGE_10 = 0xA,
+	PAGE_11,
+	PAGE_12,
+	PAGE_13,
+	PAGE_0X80,
+};
+
+/* Page-0 Registers */
+#define WCD9335_PAGE0_PAGE_REGISTER                      0x0000
+#define WCD9335_CODEC_RPM_CLK_BYPASS                     0x0001
+#define WCD9335_CODEC_RPM_CLK_GATE                       0x0002
+#define WCD9335_CODEC_RPM_CLK_MCLK_CFG                   0x0003
+#define WCD9335_CODEC_RPM_RST_CTL                        0x0009
+#define WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL             0x0011
+#define WCD9335_CODEC_RPM_PWR_CPE_DEEPSLP_1              0x0012
+#define WCD9335_CODEC_RPM_PWR_CPE_DEEPSLP_2              0x0013
+#define WCD9335_CODEC_RPM_PWR_CPE_DEEPSLP_3              0x0014
+#define WCD9335_CODEC_RPM_PWR_CPE_IRAM_SHUTDOWN          0x0015
+#define WCD9335_CODEC_RPM_PWR_CPE_DRAM1_SHUTDOWN         0x0016
+#define WCD9335_CODEC_RPM_PWR_CPE_DRAM0_SHUTDOWN_1       0x0017
+#define WCD9335_CODEC_RPM_PWR_CPE_DRAM0_SHUTDOWN_2       0x0018
+#define WCD9335_CODEC_RPM_INT_MASK                       0x001d
+#define WCD9335_CODEC_RPM_INT_STATUS                     0x001e
+#define WCD9335_CODEC_RPM_INT_CLEAR                      0x001f
+#define WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE0             0x0021
+#define WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE1             0x0022
+#define WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE2             0x0023
+#define WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE3             0x0024
+#define WCD9335_CHIP_TIER_CTRL_EFUSE_CTL                 0x0025
+#define WCD9335_CHIP_TIER_CTRL_EFUSE_TEST0               0x0026
+#define WCD9335_CHIP_TIER_CTRL_EFUSE_TEST1               0x0027
+#define WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT0            0x0029
+#define WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT1            0x002a
+#define WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT2            0x002b
+#define WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT3            0x002c
+#define WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT4            0x002d
+#define WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT5            0x002e
+#define WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT6            0x002f
+#define WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT7            0x0030
+#define WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT8            0x0031
+#define WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT9            0x0032
+#define WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT10           0x0033
+#define WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT11           0x0034
+#define WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT12           0x0035
+#define WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT13           0x0036
+#define WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT14           0x0037
+#define WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT15           0x0038
+#define WCD9335_CHIP_TIER_CTRL_EFUSE_STATUS              0x0039
+#define WCD9335_CHIP_TIER_CTRL_I2C_SLAVE_ID_NONNEGO      0x003a
+#define WCD9335_CHIP_TIER_CTRL_I2C_SLAVE_ID_1            0x003b
+#define WCD9335_CHIP_TIER_CTRL_I2C_SLAVE_ID_2            0x003c
+#define WCD9335_CHIP_TIER_CTRL_I2C_SLAVE_ID_3            0x003d
+#define WCD9335_CHIP_TIER_CTRL_ANA_WAIT_STATE_CTL        0x003e
+#define WCD9335_CHIP_TIER_CTRL_I2C_ACTIVE                0x003f
+#define WCD9335_CHIP_TIER_CTRL_PROC1_MON_CTL             0x0041
+#define WCD9335_CHIP_TIER_CTRL_PROC1_MON_STATUS          0x0042
+#define WCD9335_CHIP_TIER_CTRL_PROC1_MON_CNT_MSB         0x0043
+#define WCD9335_CHIP_TIER_CTRL_PROC1_MON_CNT_LSB         0x0044
+#define WCD9335_CHIP_TIER_CTRL_PROC2_MON_CTL             0x0045
+#define WCD9335_CHIP_TIER_CTRL_PROC2_MON_STATUS          0x0046
+#define WCD9335_CHIP_TIER_CTRL_PROC2_MON_CNT_MSB         0x0047
+#define WCD9335_CHIP_TIER_CTRL_PROC2_MON_CNT_LSB         0x0048
+#define WCD9335_CHIP_TIER_CTRL_PROC3_MON_CTL             0x0049
+#define WCD9335_CHIP_TIER_CTRL_PROC3_MON_STATUS          0x004a
+#define WCD9335_CHIP_TIER_CTRL_PROC3_MON_CNT_MSB         0x004b
+#define WCD9335_CHIP_TIER_CTRL_PROC3_MON_CNT_LSB         0x004c
+#define WCD9335_DATA_HUB_DATA_HUB_RX_I2S_CTL             0x0051
+#define WCD9335_DATA_HUB_DATA_HUB_TX_I2S_CTL             0x0052
+#define WCD9335_DATA_HUB_DATA_HUB_I2S_CLK                0x0053
+#define WCD9335_DATA_HUB_DATA_HUB_RX0_INP_CFG            0x0054
+#define WCD9335_DATA_HUB_DATA_HUB_RX1_INP_CFG            0x0055
+#define WCD9335_DATA_HUB_DATA_HUB_RX2_INP_CFG            0x0056
+#define WCD9335_DATA_HUB_DATA_HUB_RX3_INP_CFG            0x0057
+#define WCD9335_DATA_HUB_DATA_HUB_RX4_INP_CFG            0x0058
+#define WCD9335_DATA_HUB_DATA_HUB_RX5_INP_CFG            0x0059
+#define WCD9335_DATA_HUB_DATA_HUB_RX6_INP_CFG            0x005a
+#define WCD9335_DATA_HUB_DATA_HUB_RX7_INP_CFG            0x005b
+#define WCD9335_DATA_HUB_DATA_HUB_SB_TX0_INP_CFG         0x0061
+#define WCD9335_DATA_HUB_DATA_HUB_SB_TX1_INP_CFG         0x0062
+#define WCD9335_DATA_HUB_DATA_HUB_SB_TX2_INP_CFG         0x0063
+#define WCD9335_DATA_HUB_DATA_HUB_SB_TX3_INP_CFG         0x0064
+#define WCD9335_DATA_HUB_DATA_HUB_SB_TX4_INP_CFG         0x0065
+#define WCD9335_DATA_HUB_DATA_HUB_SB_TX5_INP_CFG         0x0066
+#define WCD9335_DATA_HUB_DATA_HUB_SB_TX6_INP_CFG         0x0067
+#define WCD9335_DATA_HUB_DATA_HUB_SB_TX7_INP_CFG         0x0068
+#define WCD9335_DATA_HUB_DATA_HUB_SB_TX8_INP_CFG         0x0069
+#define WCD9335_DATA_HUB_DATA_HUB_SB_TX9_INP_CFG         0x006a
+#define WCD9335_DATA_HUB_DATA_HUB_SB_TX10_INP_CFG        0x006b
+#define WCD9335_DATA_HUB_DATA_HUB_SB_TX11_INP_CFG        0x006c
+#define WCD9335_DATA_HUB_DATA_HUB_SB_TX13_INP_CFG        0x006e
+#define WCD9335_DATA_HUB_DATA_HUB_SB_TX14_INP_CFG        0x006f
+#define WCD9335_DATA_HUB_DATA_HUB_SB_TX15_INP_CFG        0x0070
+#define WCD9335_DATA_HUB_DATA_HUB_TX_I2S_SD0_L_CFG       0x0071
+#define WCD9335_DATA_HUB_DATA_HUB_TX_I2S_SD0_R_CFG       0x0072
+#define WCD9335_DATA_HUB_DATA_HUB_TX_I2S_SD1_L_CFG       0x0073
+#define WCD9335_DATA_HUB_DATA_HUB_TX_I2S_SD1_R_CFG       0x0074
+#define WCD9335_DATA_HUB_NATIVE_FIFO_SYNC                0x0075
+#define WCD9335_DATA_HUB_NATIVE_FIFO_STATUS              0x007D
+#define WCD9335_INTR_CFG                                 0x0081
+#define WCD9335_INTR_CLR_COMMIT                          0x0082
+#define WCD9335_INTR_PIN1_MASK0                          0x0089
+#define WCD9335_INTR_PIN1_MASK1                          0x008a
+#define WCD9335_INTR_PIN1_MASK2                          0x008b
+#define WCD9335_INTR_PIN1_MASK3                          0x008c
+#define WCD9335_INTR_PIN1_STATUS0                        0x0091
+#define WCD9335_INTR_PIN1_STATUS1                        0x0092
+#define WCD9335_INTR_PIN1_STATUS2                        0x0093
+#define WCD9335_INTR_PIN1_STATUS3                        0x0094
+#define WCD9335_INTR_PIN1_CLEAR0                         0x0099
+#define WCD9335_INTR_PIN1_CLEAR1                         0x009a
+#define WCD9335_INTR_PIN1_CLEAR2                         0x009b
+#define WCD9335_INTR_PIN1_CLEAR3                         0x009c
+#define WCD9335_INTR_PIN2_MASK0                          0x00a1
+#define WCD9335_INTR_PIN2_MASK1                          0x00a2
+#define WCD9335_INTR_PIN2_MASK2                          0x00a3
+#define WCD9335_INTR_PIN2_MASK3                          0x00a4
+#define WCD9335_INTR_PIN2_STATUS0                        0x00a9
+#define WCD9335_INTR_PIN2_STATUS1                        0x00aa
+#define WCD9335_INTR_PIN2_STATUS2                        0x00ab
+#define WCD9335_INTR_PIN2_STATUS3                        0x00ac
+#define WCD9335_INTR_PIN2_CLEAR0                         0x00b1
+#define WCD9335_INTR_PIN2_CLEAR1                         0x00b2
+#define WCD9335_INTR_PIN2_CLEAR2                         0x00b3
+#define WCD9335_INTR_PIN2_CLEAR3                         0x00b4
+#define WCD9335_INTR_LEVEL0                              0x00e1
+#define WCD9335_INTR_LEVEL1                              0x00e2
+#define WCD9335_INTR_LEVEL2                              0x00e3
+#define WCD9335_INTR_LEVEL3                              0x00e4
+#define WCD9335_INTR_BYPASS0                             0x00e9
+#define WCD9335_INTR_BYPASS1                             0x00ea
+#define WCD9335_INTR_BYPASS2                             0x00eb
+#define WCD9335_INTR_BYPASS3                             0x00ec
+#define WCD9335_INTR_SET0                                0x00f1
+#define WCD9335_INTR_SET1                                0x00f2
+#define WCD9335_INTR_SET2                                0x00f3
+#define WCD9335_INTR_SET3                                0x00f4
+
+/* Page-1 Registers */
+#define WCD9335_PAGE1_PAGE_REGISTER                      0x0100
+#define WCD9335_CPE_FLL_USER_CTL_0                       0x0101
+#define WCD9335_CPE_FLL_USER_CTL_1                       0x0102
+#define WCD9335_CPE_FLL_USER_CTL_2                       0x0103
+#define WCD9335_CPE_FLL_USER_CTL_3                       0x0104
+#define WCD9335_CPE_FLL_USER_CTL_4                       0x0105
+#define WCD9335_CPE_FLL_USER_CTL_5                       0x0106
+#define WCD9335_CPE_FLL_USER_CTL_6                       0x0107
+#define WCD9335_CPE_FLL_USER_CTL_7                       0x0108
+#define WCD9335_CPE_FLL_USER_CTL_8                       0x0109
+#define WCD9335_CPE_FLL_USER_CTL_9                       0x010a
+#define WCD9335_CPE_FLL_L_VAL_CTL_0                      0x010b
+#define WCD9335_CPE_FLL_L_VAL_CTL_1                      0x010c
+#define WCD9335_CPE_FLL_DSM_FRAC_CTL_0                   0x010d
+#define WCD9335_CPE_FLL_DSM_FRAC_CTL_1                   0x010e
+#define WCD9335_CPE_FLL_CONFIG_CTL_0                     0x010f
+#define WCD9335_CPE_FLL_CONFIG_CTL_1                     0x0110
+#define WCD9335_CPE_FLL_CONFIG_CTL_2                     0x0111
+#define WCD9335_CPE_FLL_CONFIG_CTL_3                     0x0112
+#define WCD9335_CPE_FLL_CONFIG_CTL_4                     0x0113
+#define WCD9335_CPE_FLL_TEST_CTL_0                       0x0114
+#define WCD9335_CPE_FLL_TEST_CTL_1                       0x0115
+#define WCD9335_CPE_FLL_TEST_CTL_2                       0x0116
+#define WCD9335_CPE_FLL_TEST_CTL_3                       0x0117
+#define WCD9335_CPE_FLL_TEST_CTL_4                       0x0118
+#define WCD9335_CPE_FLL_TEST_CTL_5                       0x0119
+#define WCD9335_CPE_FLL_TEST_CTL_6                       0x011a
+#define WCD9335_CPE_FLL_TEST_CTL_7                       0x011b
+#define WCD9335_CPE_FLL_FREQ_CTL_0                       0x011c
+#define WCD9335_CPE_FLL_FREQ_CTL_1                       0x011d
+#define WCD9335_CPE_FLL_FREQ_CTL_2                       0x011e
+#define WCD9335_CPE_FLL_FREQ_CTL_3                       0x011f
+#define WCD9335_CPE_FLL_SSC_CTL_0                        0x0120
+#define WCD9335_CPE_FLL_SSC_CTL_1                        0x0121
+#define WCD9335_CPE_FLL_SSC_CTL_2                        0x0122
+#define WCD9335_CPE_FLL_SSC_CTL_3                        0x0123
+#define WCD9335_CPE_FLL_FLL_MODE                         0x0124
+#define WCD9335_CPE_FLL_STATUS_0                         0x0125
+#define WCD9335_CPE_FLL_STATUS_1                         0x0126
+#define WCD9335_CPE_FLL_STATUS_2                         0x0127
+#define WCD9335_CPE_FLL_STATUS_3                         0x0128
+#define WCD9335_I2S_FLL_USER_CTL_0                       0x0141
+#define WCD9335_I2S_FLL_USER_CTL_1                       0x0142
+#define WCD9335_I2S_FLL_USER_CTL_2                       0x0143
+#define WCD9335_I2S_FLL_USER_CTL_3                       0x0144
+#define WCD9335_I2S_FLL_USER_CTL_4                       0x0145
+#define WCD9335_I2S_FLL_USER_CTL_5                       0x0146
+#define WCD9335_I2S_FLL_USER_CTL_6                       0x0147
+#define WCD9335_I2S_FLL_USER_CTL_7                       0x0148
+#define WCD9335_I2S_FLL_USER_CTL_8                       0x0149
+#define WCD9335_I2S_FLL_USER_CTL_9                       0x014a
+#define WCD9335_I2S_FLL_L_VAL_CTL_0                      0x014b
+#define WCD9335_I2S_FLL_L_VAL_CTL_1                      0x014c
+#define WCD9335_I2S_FLL_DSM_FRAC_CTL_0                   0x014d
+#define WCD9335_I2S_FLL_DSM_FRAC_CTL_1                   0x014e
+#define WCD9335_I2S_FLL_CONFIG_CTL_0                     0x014f
+#define WCD9335_I2S_FLL_CONFIG_CTL_1                     0x0150
+#define WCD9335_I2S_FLL_CONFIG_CTL_2                     0x0151
+#define WCD9335_I2S_FLL_CONFIG_CTL_3                     0x0152
+#define WCD9335_I2S_FLL_CONFIG_CTL_4                     0x0153
+#define WCD9335_I2S_FLL_TEST_CTL_0                       0x0154
+#define WCD9335_I2S_FLL_TEST_CTL_1                       0x0155
+#define WCD9335_I2S_FLL_TEST_CTL_2                       0x0156
+#define WCD9335_I2S_FLL_TEST_CTL_3                       0x0157
+#define WCD9335_I2S_FLL_TEST_CTL_4                       0x0158
+#define WCD9335_I2S_FLL_TEST_CTL_5                       0x0159
+#define WCD9335_I2S_FLL_TEST_CTL_6                       0x015a
+#define WCD9335_I2S_FLL_TEST_CTL_7                       0x015b
+#define WCD9335_I2S_FLL_FREQ_CTL_0                       0x015c
+#define WCD9335_I2S_FLL_FREQ_CTL_1                       0x015d
+#define WCD9335_I2S_FLL_FREQ_CTL_2                       0x015e
+#define WCD9335_I2S_FLL_FREQ_CTL_3                       0x015f
+#define WCD9335_I2S_FLL_SSC_CTL_0                        0x0160
+#define WCD9335_I2S_FLL_SSC_CTL_1                        0x0161
+#define WCD9335_I2S_FLL_SSC_CTL_2                        0x0162
+#define WCD9335_I2S_FLL_SSC_CTL_3                        0x0163
+#define WCD9335_I2S_FLL_FLL_MODE                         0x0164
+#define WCD9335_I2S_FLL_STATUS_0                         0x0165
+#define WCD9335_I2S_FLL_STATUS_1                         0x0166
+#define WCD9335_I2S_FLL_STATUS_2                         0x0167
+#define WCD9335_I2S_FLL_STATUS_3                         0x0168
+#define WCD9335_SB_FLL_USER_CTL_0                        0x0181
+#define WCD9335_SB_FLL_USER_CTL_1                        0x0182
+#define WCD9335_SB_FLL_USER_CTL_2                        0x0183
+#define WCD9335_SB_FLL_USER_CTL_3                        0x0184
+#define WCD9335_SB_FLL_USER_CTL_4                        0x0185
+#define WCD9335_SB_FLL_USER_CTL_5                        0x0186
+#define WCD9335_SB_FLL_USER_CTL_6                        0x0187
+#define WCD9335_SB_FLL_USER_CTL_7                        0x0188
+#define WCD9335_SB_FLL_USER_CTL_8                        0x0189
+#define WCD9335_SB_FLL_USER_CTL_9                        0x018a
+#define WCD9335_SB_FLL_L_VAL_CTL_0                       0x018b
+#define WCD9335_SB_FLL_L_VAL_CTL_1                       0x018c
+#define WCD9335_SB_FLL_DSM_FRAC_CTL_0                    0x018d
+#define WCD9335_SB_FLL_DSM_FRAC_CTL_1                    0x018e
+#define WCD9335_SB_FLL_CONFIG_CTL_0                      0x018f
+#define WCD9335_SB_FLL_CONFIG_CTL_1                      0x0190
+#define WCD9335_SB_FLL_CONFIG_CTL_2                      0x0191
+#define WCD9335_SB_FLL_CONFIG_CTL_3                      0x0192
+#define WCD9335_SB_FLL_CONFIG_CTL_4                      0x0193
+#define WCD9335_SB_FLL_TEST_CTL_0                        0x0194
+#define WCD9335_SB_FLL_TEST_CTL_1                        0x0195
+#define WCD9335_SB_FLL_TEST_CTL_2                        0x0196
+#define WCD9335_SB_FLL_TEST_CTL_3                        0x0197
+#define WCD9335_SB_FLL_TEST_CTL_4                        0x0198
+#define WCD9335_SB_FLL_TEST_CTL_5                        0x0199
+#define WCD9335_SB_FLL_TEST_CTL_6                        0x019a
+#define WCD9335_SB_FLL_TEST_CTL_7                        0x019b
+#define WCD9335_SB_FLL_FREQ_CTL_0                        0x019c
+#define WCD9335_SB_FLL_FREQ_CTL_1                        0x019d
+#define WCD9335_SB_FLL_FREQ_CTL_2                        0x019e
+#define WCD9335_SB_FLL_FREQ_CTL_3                        0x019f
+#define WCD9335_SB_FLL_SSC_CTL_0                         0x01a0
+#define WCD9335_SB_FLL_SSC_CTL_1                         0x01a1
+#define WCD9335_SB_FLL_SSC_CTL_2                         0x01a2
+#define WCD9335_SB_FLL_SSC_CTL_3                         0x01a3
+#define WCD9335_SB_FLL_FLL_MODE                          0x01a4
+#define WCD9335_SB_FLL_STATUS_0                          0x01a5
+#define WCD9335_SB_FLL_STATUS_1                          0x01a6
+#define WCD9335_SB_FLL_STATUS_2                          0x01a7
+#define WCD9335_SB_FLL_STATUS_3                          0x01a8
+
+/* Page-2 Registers */
+#define WCD9335_PAGE2_PAGE_REGISTER                      0x0200
+#define WCD9335_CPE_SS_MEM_PTR_0                         0x0201
+#define WCD9335_CPE_SS_MEM_PTR_1                         0x0202
+#define WCD9335_CPE_SS_MEM_PTR_2                         0x0203
+#define WCD9335_CPE_SS_MEM_CTRL                          0x0205
+#define WCD9335_CPE_SS_MEM_BANK_0                        0x0206
+#define WCD9335_CPE_SS_MEM_BANK_1                        0x0207
+#define WCD9335_CPE_SS_MEM_BANK_2                        0x0208
+#define WCD9335_CPE_SS_MEM_BANK_3                        0x0209
+#define WCD9335_CPE_SS_MEM_BANK_4                        0x020a
+#define WCD9335_CPE_SS_MEM_BANK_5                        0x020b
+#define WCD9335_CPE_SS_MEM_BANK_6                        0x020c
+#define WCD9335_CPE_SS_MEM_BANK_7                        0x020d
+#define WCD9335_CPE_SS_MEM_BANK_8                        0x020e
+#define WCD9335_CPE_SS_MEM_BANK_9                        0x020f
+#define WCD9335_CPE_SS_MEM_BANK_10                       0x0210
+#define WCD9335_CPE_SS_MEM_BANK_11                       0x0211
+#define WCD9335_CPE_SS_MEM_BANK_12                       0x0212
+#define WCD9335_CPE_SS_MEM_BANK_13                       0x0213
+#define WCD9335_CPE_SS_MEM_BANK_14                       0x0214
+#define WCD9335_CPE_SS_MEM_BANK_15                       0x0215
+#define WCD9335_CPE_SS_INBOX1_TRG                        0x0216
+#define WCD9335_CPE_SS_INBOX2_TRG                        0x0217
+#define WCD9335_CPE_SS_INBOX1_0                          0x0218
+#define WCD9335_CPE_SS_INBOX1_1                          0x0219
+#define WCD9335_CPE_SS_INBOX1_2                          0x021a
+#define WCD9335_CPE_SS_INBOX1_3                          0x021b
+#define WCD9335_CPE_SS_INBOX1_4                          0x021c
+#define WCD9335_CPE_SS_INBOX1_5                          0x021d
+#define WCD9335_CPE_SS_INBOX1_6                          0x021e
+#define WCD9335_CPE_SS_INBOX1_7                          0x021f
+#define WCD9335_CPE_SS_INBOX1_8                          0x0220
+#define WCD9335_CPE_SS_INBOX1_9                          0x0221
+#define WCD9335_CPE_SS_INBOX1_10                         0x0222
+#define WCD9335_CPE_SS_INBOX1_11                         0x0223
+#define WCD9335_CPE_SS_INBOX1_12                         0x0224
+#define WCD9335_CPE_SS_INBOX1_13                         0x0225
+#define WCD9335_CPE_SS_INBOX1_14                         0x0226
+#define WCD9335_CPE_SS_INBOX1_15                         0x0227
+#define WCD9335_CPE_SS_OUTBOX1_0                         0x0228
+#define WCD9335_CPE_SS_OUTBOX1_1                         0x0229
+#define WCD9335_CPE_SS_OUTBOX1_2                         0x022a
+#define WCD9335_CPE_SS_OUTBOX1_3                         0x022b
+#define WCD9335_CPE_SS_OUTBOX1_4                         0x022c
+#define WCD9335_CPE_SS_OUTBOX1_5                         0x022d
+#define WCD9335_CPE_SS_OUTBOX1_6                         0x022e
+#define WCD9335_CPE_SS_OUTBOX1_7                         0x022f
+#define WCD9335_CPE_SS_OUTBOX1_8                         0x0230
+#define WCD9335_CPE_SS_OUTBOX1_9                         0x0231
+#define WCD9335_CPE_SS_OUTBOX1_10                        0x0232
+#define WCD9335_CPE_SS_OUTBOX1_11                        0x0233
+#define WCD9335_CPE_SS_OUTBOX1_12                        0x0234
+#define WCD9335_CPE_SS_OUTBOX1_13                        0x0235
+#define WCD9335_CPE_SS_OUTBOX1_14                        0x0236
+#define WCD9335_CPE_SS_OUTBOX1_15                        0x0237
+#define WCD9335_CPE_SS_INBOX2_0                          0x0238
+#define WCD9335_CPE_SS_INBOX2_1                          0x0239
+#define WCD9335_CPE_SS_INBOX2_2                          0x023a
+#define WCD9335_CPE_SS_INBOX2_3                          0x023b
+#define WCD9335_CPE_SS_INBOX2_4                          0x023c
+#define WCD9335_CPE_SS_INBOX2_5                          0x023d
+#define WCD9335_CPE_SS_INBOX2_6                          0x023e
+#define WCD9335_CPE_SS_INBOX2_7                          0x023f
+#define WCD9335_CPE_SS_INBOX2_8                          0x0240
+#define WCD9335_CPE_SS_INBOX2_9                          0x0241
+#define WCD9335_CPE_SS_INBOX2_10                         0x0242
+#define WCD9335_CPE_SS_INBOX2_11                         0x0243
+#define WCD9335_CPE_SS_INBOX2_12                         0x0244
+#define WCD9335_CPE_SS_INBOX2_13                         0x0245
+#define WCD9335_CPE_SS_INBOX2_14                         0x0246
+#define WCD9335_CPE_SS_INBOX2_15                         0x0247
+#define WCD9335_CPE_SS_OUTBOX2_0                         0x0248
+#define WCD9335_CPE_SS_OUTBOX2_1                         0x0249
+#define WCD9335_CPE_SS_OUTBOX2_2                         0x024a
+#define WCD9335_CPE_SS_OUTBOX2_3                         0x024b
+#define WCD9335_CPE_SS_OUTBOX2_4                         0x024c
+#define WCD9335_CPE_SS_OUTBOX2_5                         0x024d
+#define WCD9335_CPE_SS_OUTBOX2_6                         0x024e
+#define WCD9335_CPE_SS_OUTBOX2_7                         0x024f
+#define WCD9335_CPE_SS_OUTBOX2_8                         0x0250
+#define WCD9335_CPE_SS_OUTBOX2_9                         0x0251
+#define WCD9335_CPE_SS_OUTBOX2_10                        0x0252
+#define WCD9335_CPE_SS_OUTBOX2_11                        0x0253
+#define WCD9335_CPE_SS_OUTBOX2_12                        0x0254
+#define WCD9335_CPE_SS_OUTBOX2_13                        0x0255
+#define WCD9335_CPE_SS_OUTBOX2_14                        0x0256
+#define WCD9335_CPE_SS_OUTBOX2_15                        0x0257
+#define WCD9335_CPE_SS_OUTBOX1_ACK                       0x0258
+#define WCD9335_CPE_SS_OUTBOX2_ACK                       0x0259
+#define WCD9335_CPE_SS_EC_BUF_INT_PERIOD                 0x025a
+#define WCD9335_CPE_SS_US_BUF_INT_PERIOD                 0x025b
+#define WCD9335_CPE_SS_CPARMAD_BUFRDY_INT_PERIOD         0x025c
+#define WCD9335_CPE_SS_CFG                               0x025d
+#define WCD9335_CPE_SS_US_EC_MUX_CFG                     0x025e
+#define WCD9335_CPE_SS_MAD_CTL                           0x025f
+#define WCD9335_CPE_SS_CPAR_CTL                          0x0260
+#define WCD9335_CPE_SS_TX_PP_BUF_INT_PERIOD              0x0261
+#define WCD9335_CPE_SS_TX_PP_CFG                         0x0262
+#define WCD9335_CPE_SS_DMIC0_CTL                         0x0263
+#define WCD9335_CPE_SS_DMIC1_CTL                         0x0264
+#define WCD9335_CPE_SS_DMIC2_CTL                         0x0265
+#define WCD9335_CPE_SS_DMIC_CFG                          0x0266
+#define WCD9335_CPE_SS_SVA_CFG                           0x0267
+#define WCD9335_CPE_SS_CPAR_CFG                          0x0271
+#define WCD9335_CPE_SS_WDOG_CFG                          0x0272
+#define WCD9335_CPE_SS_BACKUP_INT                        0x0273
+#define WCD9335_CPE_SS_STATUS                            0x0274
+#define WCD9335_CPE_SS_CPE_OCD_CFG                       0x0275
+#define WCD9335_CPE_SS_SS_ERROR_INT_MASK                 0x0276
+#define WCD9335_CPE_SS_SS_ERROR_INT_STATUS               0x0277
+#define WCD9335_CPE_SS_SS_ERROR_INT_CLEAR                0x0278
+#define WCD9335_SOC_MAD_MAIN_CTL_1                       0x0281
+#define WCD9335_SOC_MAD_MAIN_CTL_2                       0x0282
+#define WCD9335_SOC_MAD_AUDIO_CTL_1                      0x0283
+#define WCD9335_SOC_MAD_AUDIO_CTL_2                      0x0284
+#define WCD9335_SOC_MAD_AUDIO_CTL_3                      0x0285
+#define WCD9335_SOC_MAD_AUDIO_CTL_4                      0x0286
+#define WCD9335_SOC_MAD_AUDIO_CTL_5                      0x0287
+#define WCD9335_SOC_MAD_AUDIO_CTL_6                      0x0288
+#define WCD9335_SOC_MAD_AUDIO_CTL_7                      0x0289
+#define WCD9335_SOC_MAD_AUDIO_CTL_8                      0x028a
+#define WCD9335_SOC_MAD_AUDIO_IIR_CTL_PTR                0x028b
+#define WCD9335_SOC_MAD_AUDIO_IIR_CTL_VAL                0x028c
+#define WCD9335_SOC_MAD_ULTR_CTL_1                       0x028d
+#define WCD9335_SOC_MAD_ULTR_CTL_2                       0x028e
+#define WCD9335_SOC_MAD_ULTR_CTL_3                       0x028f
+#define WCD9335_SOC_MAD_ULTR_CTL_4                       0x0290
+#define WCD9335_SOC_MAD_ULTR_CTL_5                       0x0291
+#define WCD9335_SOC_MAD_ULTR_CTL_6                       0x0292
+#define WCD9335_SOC_MAD_ULTR_CTL_7                       0x0293
+#define WCD9335_SOC_MAD_BEACON_CTL_1                     0x0294
+#define WCD9335_SOC_MAD_BEACON_CTL_2                     0x0295
+#define WCD9335_SOC_MAD_BEACON_CTL_3                     0x0296
+#define WCD9335_SOC_MAD_BEACON_CTL_4                     0x0297
+#define WCD9335_SOC_MAD_BEACON_CTL_5                     0x0298
+#define WCD9335_SOC_MAD_BEACON_CTL_6                     0x0299
+#define WCD9335_SOC_MAD_BEACON_CTL_7                     0x029a
+#define WCD9335_SOC_MAD_BEACON_CTL_8                     0x029b
+#define WCD9335_SOC_MAD_BEACON_IIR_CTL_PTR               0x029c
+#define WCD9335_SOC_MAD_BEACON_IIR_CTL_VAL               0x029d
+#define WCD9335_SOC_MAD_INP_SEL                          0x029e
+
+/* Page-6 Registers */
+#define WCD9335_PAGE6_PAGE_REGISTER                      0x0600
+#define WCD9335_ANA_BIAS                                 0x0601
+#define WCD9335_ANA_CLK_TOP                              0x0602
+#define WCD9335_ANA_RCO                                  0x0603
+#define WCD9335_ANA_BUCK_VOUT_A                          0x0604
+#define WCD9335_ANA_BUCK_VOUT_D                          0x0605
+#define WCD9335_ANA_BUCK_CTL                             0x0606
+#define WCD9335_ANA_BUCK_STATUS                          0x0607
+#define WCD9335_ANA_RX_SUPPLIES                          0x0608
+#define WCD9335_ANA_HPH                                  0x0609
+#define WCD9335_ANA_EAR                                  0x060a
+#define WCD9335_ANA_LO_1_2                               0x060b
+#define WCD9335_ANA_LO_3_4                               0x060c
+#define WCD9335_ANA_MAD_SETUP                            0x060d
+#define WCD9335_ANA_AMIC1                                0x060e
+#define WCD9335_ANA_AMIC2                                0x060f
+#define WCD9335_ANA_AMIC3                                0x0610
+#define WCD9335_ANA_AMIC4                                0x0611
+#define WCD9335_ANA_AMIC5                                0x0612
+#define WCD9335_ANA_AMIC6                                0x0613
+#define WCD9335_ANA_MBHC_MECH                            0x0614
+#define WCD9335_ANA_MBHC_ELECT                           0x0615
+#define WCD9335_ANA_MBHC_ZDET                            0x0616
+#define WCD9335_ANA_MBHC_RESULT_1                        0x0617
+#define WCD9335_ANA_MBHC_RESULT_2                        0x0618
+#define WCD9335_ANA_MBHC_RESULT_3                        0x0619
+#define WCD9335_ANA_MBHC_BTN0                            0x061a
+#define WCD9335_ANA_MBHC_BTN1                            0x061b
+#define WCD9335_ANA_MBHC_BTN2                            0x061c
+#define WCD9335_ANA_MBHC_BTN3                            0x061d
+#define WCD9335_ANA_MBHC_BTN4                            0x061e
+#define WCD9335_ANA_MBHC_BTN5                            0x061f
+#define WCD9335_ANA_MBHC_BTN6                            0x0620
+#define WCD9335_ANA_MBHC_BTN7                            0x0621
+#define WCD9335_ANA_MICB1                                0x0622
+#define WCD9335_ANA_MICB2                                0x0623
+#define WCD9335_ANA_MICB2_RAMP                           0x0624
+#define WCD9335_ANA_MICB3                                0x0625
+#define WCD9335_ANA_MICB4                                0x0626
+#define WCD9335_ANA_VBADC                                0x0627
+#define WCD9335_BIAS_CTL                                 0x0628
+#define WCD9335_BIAS_VBG_FINE_ADJ                        0x0629
+#define WCD9335_CLOCK_TEST_CTL                           0x062d
+#define WCD9335_RCO_CTRL_1                               0x062e
+#define WCD9335_RCO_CTRL_2                               0x062f
+#define WCD9335_RCO_CAL                                  0x0630
+#define WCD9335_RCO_CAL_1                                0x0631
+#define WCD9335_RCO_CAL_2                                0x0632
+#define WCD9335_RCO_TEST_CTRL                            0x0633
+#define WCD9335_RCO_CAL_OUT_1                            0x0634
+#define WCD9335_RCO_CAL_OUT_2                            0x0635
+#define WCD9335_RCO_CAL_OUT_3                            0x0636
+#define WCD9335_RCO_CAL_OUT_4                            0x0637
+#define WCD9335_RCO_CAL_OUT_5                            0x0638
+#define WCD9335_SIDO_SIDO_MODE_1                         0x063a
+#define WCD9335_SIDO_SIDO_MODE_2                         0x063b
+#define WCD9335_SIDO_SIDO_MODE_3                         0x063c
+#define WCD9335_SIDO_SIDO_MODE_4                         0x063d
+#define WCD9335_SIDO_SIDO_VCL_1                          0x063e
+#define WCD9335_SIDO_SIDO_VCL_2                          0x063f
+#define WCD9335_SIDO_SIDO_VCL_3                          0x0640
+#define WCD9335_SIDO_SIDO_CCL_1                          0x0641
+#define WCD9335_SIDO_SIDO_CCL_2                          0x0642
+#define WCD9335_SIDO_SIDO_CCL_3                          0x0643
+#define WCD9335_SIDO_SIDO_CCL_4                          0x0644
+#define WCD9335_SIDO_SIDO_CCL_5                          0x0645
+#define WCD9335_SIDO_SIDO_CCL_6                          0x0646
+#define WCD9335_SIDO_SIDO_CCL_7                          0x0647
+#define WCD9335_SIDO_SIDO_CCL_8                          0x0648
+#define WCD9335_SIDO_SIDO_CCL_9                          0x0649
+#define WCD9335_SIDO_SIDO_CCL_10                         0x064a
+#define WCD9335_SIDO_SIDO_FILTER_1                       0x064b
+#define WCD9335_SIDO_SIDO_FILTER_2                       0x064c
+#define WCD9335_SIDO_SIDO_DRIVER_1                       0x064d
+#define WCD9335_SIDO_SIDO_DRIVER_2                       0x064e
+#define WCD9335_SIDO_SIDO_DRIVER_3                       0x064f
+#define WCD9335_SIDO_SIDO_CAL_CODE_EXT_1                 0x0650
+#define WCD9335_SIDO_SIDO_CAL_CODE_EXT_2                 0x0651
+#define WCD9335_SIDO_SIDO_CAL_CODE_OUT_1                 0x0652
+#define WCD9335_SIDO_SIDO_CAL_CODE_OUT_2                 0x0653
+#define WCD9335_SIDO_SIDO_TEST_1                         0x0654
+#define WCD9335_SIDO_SIDO_TEST_2                         0x0655
+#define WCD9335_MBHC_CTL_1                               0x0656
+#define WCD9335_MBHC_CTL_2                               0x0657
+#define WCD9335_MBHC_PLUG_DETECT_CTL                     0x0658
+#define WCD9335_MBHC_ZDET_ANA_CTL                        0x0659
+#define WCD9335_MBHC_ZDET_RAMP_CTL                       0x065a
+#define WCD9335_MBHC_FSM_DEBUG                           0x065b /* v1.x */
+#define WCD9335_MBHC_FSM_STATUS                          0x065b /* v2.0 */
+#define WCD9335_MBHC_TEST_CTL                            0x065c
+#define WCD9335_VBADC_SUBBLOCK_EN                        0x065d
+#define WCD9335_VBADC_IBIAS_FE                           0x065e
+#define WCD9335_VBADC_BIAS_ADC                           0x065f
+#define WCD9335_VBADC_FE_CTRL                            0x0660
+#define WCD9335_VBADC_ADC_REF                            0x0661
+#define WCD9335_VBADC_ADC_IO                             0x0662
+#define WCD9335_VBADC_ADC_SAR                            0x0663
+#define WCD9335_VBADC_DEBUG                              0x0664
+#define WCD9335_VBADC_ADC_DOUTMSB                        0x0665
+#define WCD9335_VBADC_ADC_DOUTLSB                        0x0666
+#define WCD9335_LDOH_MODE                                0x0667
+#define WCD9335_LDOH_BIAS                                0x0668
+#define WCD9335_LDOH_STB_LOADS                           0x0669
+#define WCD9335_LDOH_SLOWRAMP                            0x066a
+#define WCD9335_MICB1_TEST_CTL_1                         0x066b
+#define WCD9335_MICB1_TEST_CTL_2                         0x066c
+#define WCD9335_MICB1_TEST_CTL_3                         0x066d
+#define WCD9335_MICB2_TEST_CTL_1                         0x066e
+#define WCD9335_MICB2_TEST_CTL_2                         0x066f
+#define WCD9335_MICB2_TEST_CTL_3                         0x0670
+#define WCD9335_MICB3_TEST_CTL_1                         0x0671
+#define WCD9335_MICB3_TEST_CTL_2                         0x0672
+#define WCD9335_MICB3_TEST_CTL_3                         0x0673
+#define WCD9335_MICB4_TEST_CTL_1                         0x0674
+#define WCD9335_MICB4_TEST_CTL_2                         0x0675
+#define WCD9335_MICB4_TEST_CTL_3                         0x0676
+#define WCD9335_TX_COM_ADC_VCM                           0x0677
+#define WCD9335_TX_COM_BIAS_ATEST                        0x0678
+#define WCD9335_TX_COM_ADC_INT1_IB                       0x0679
+#define WCD9335_TX_COM_ADC_INT2_IB                       0x067a
+#define WCD9335_TX_COM_TXFE_DIV_CTL                      0x067b
+#define WCD9335_TX_COM_TXFE_DIV_START                    0x067c
+#define WCD9335_TX_COM_TXFE_DIV_STOP_9P6M                0x067d
+#define WCD9335_TX_COM_TXFE_DIV_STOP_12P288M             0x067e
+#define WCD9335_TX_1_2_TEST_EN                           0x067f
+#define WCD9335_TX_1_2_ADC_IB                            0x0680
+#define WCD9335_TX_1_2_ATEST_REFCTL                      0x0681
+#define WCD9335_TX_1_2_TEST_CTL                          0x0682
+#define WCD9335_TX_1_2_TEST_BLK_EN                       0x0683
+#define WCD9335_TX_1_2_TXFE_CLKDIV                       0x0684
+#define WCD9335_TX_1_2_SAR1_ERR                          0x0685
+#define WCD9335_TX_1_2_SAR2_ERR                          0x0686
+#define WCD9335_TX_3_4_TEST_EN                           0x0687
+#define WCD9335_TX_3_4_ADC_IB                            0x0688
+#define WCD9335_TX_3_4_ATEST_REFCTL                      0x0689
+#define WCD9335_TX_3_4_TEST_CTL                          0x068a
+#define WCD9335_TX_3_4_TEST_BLK_EN                       0x068b
+#define WCD9335_TX_3_4_TXFE_CLKDIV                       0x068c
+#define WCD9335_TX_3_4_SAR1_ERR                          0x068d
+#define WCD9335_TX_3_4_SAR2_ERR                          0x068e
+#define WCD9335_TX_5_6_TEST_EN                           0x068f
+#define WCD9335_TX_5_6_ADC_IB                            0x0690
+#define WCD9335_TX_5_6_ATEST_REFCTL                      0x0691
+#define WCD9335_TX_5_6_TEST_CTL                          0x0692
+#define WCD9335_TX_5_6_TEST_BLK_EN                       0x0693
+#define WCD9335_TX_5_6_TXFE_CLKDIV                       0x0694
+#define WCD9335_TX_5_6_SAR1_ERR                          0x0695
+#define WCD9335_TX_5_6_SAR2_ERR                          0x0696
+#define WCD9335_CLASSH_MODE_1                            0x0697
+#define WCD9335_CLASSH_MODE_2                            0x0698
+#define WCD9335_CLASSH_MODE_3                            0x0699
+#define WCD9335_CLASSH_CTRL_VCL_1                        0x069a
+#define WCD9335_CLASSH_CTRL_VCL_2                        0x069b
+#define WCD9335_CLASSH_CTRL_CCL_1                        0x069c
+#define WCD9335_CLASSH_CTRL_CCL_2                        0x069d
+#define WCD9335_CLASSH_CTRL_CCL_3                        0x069e
+#define WCD9335_CLASSH_CTRL_CCL_4                        0x069f
+#define WCD9335_CLASSH_CTRL_CCL_5                        0x06a0
+#define WCD9335_CLASSH_BUCK_TMUX_A_D                     0x06a1
+#define WCD9335_CLASSH_BUCK_SW_DRV_CNTL                  0x06a2
+#define WCD9335_CLASSH_SPARE                             0x06a3
+#define WCD9335_FLYBACK_EN                               0x06a4
+#define WCD9335_FLYBACK_VNEG_CTRL_1                      0x06a5
+#define WCD9335_FLYBACK_VNEG_CTRL_2                      0x06a6
+#define WCD9335_FLYBACK_VNEG_CTRL_3                      0x06a7
+#define WCD9335_FLYBACK_VNEG_CTRL_4                      0x06a8
+#define WCD9335_FLYBACK_VNEG_CTRL_5                      0x06a9
+#define WCD9335_FLYBACK_VNEG_CTRL_6                      0x06aa
+#define WCD9335_FLYBACK_VNEG_CTRL_7                      0x06ab
+#define WCD9335_FLYBACK_VNEG_CTRL_8                      0x06ac
+#define WCD9335_FLYBACK_VNEG_CTRL_9                      0x06ad
+#define WCD9335_FLYBACK_VNEG_DAC_CTRL_1                  0x06ae
+#define WCD9335_FLYBACK_VNEG_DAC_CTRL_2                  0x06af
+#define WCD9335_FLYBACK_VNEG_DAC_CTRL_3                  0x06b0
+#define WCD9335_FLYBACK_VNEG_DAC_CTRL_4                  0x06b1 /* v1.x */
+#define WCD9335_FLYBACK_CTRL_1                           0x06b1 /* v2.0 */
+#define WCD9335_FLYBACK_TEST_CTL                         0x06b2
+#define WCD9335_RX_AUX_SW_CTL                            0x06b3
+#define WCD9335_RX_PA_AUX_IN_CONN                        0x06b4
+#define WCD9335_RX_TIMER_DIV                             0x06b5
+#define WCD9335_RX_OCP_CTL                               0x06b6
+#define WCD9335_RX_OCP_COUNT                             0x06b7
+#define WCD9335_RX_BIAS_EAR_DAC                          0x06b8
+#define WCD9335_RX_BIAS_EAR_AMP                          0x06b9
+#define WCD9335_RX_BIAS_HPH_LDO                          0x06ba
+#define WCD9335_RX_BIAS_HPH_PA                           0x06bb
+#define WCD9335_RX_BIAS_HPH_RDACBUFF_CNP2                0x06bc
+#define WCD9335_RX_BIAS_HPH_RDAC_LDO                     0x06bd
+#define WCD9335_RX_BIAS_HPH_CNP1                         0x06be
+#define WCD9335_RX_BIAS_HPH_LOWPOWER                     0x06bf
+#define WCD9335_RX_BIAS_DIFFLO_PA                        0x06c0
+#define WCD9335_RX_BIAS_DIFFLO_REF                       0x06c1
+#define WCD9335_RX_BIAS_DIFFLO_LDO                       0x06c2
+#define WCD9335_RX_BIAS_SELO_DAC_PA                      0x06c3
+#define WCD9335_RX_BIAS_BUCK_RST                         0x06c4
+#define WCD9335_RX_BIAS_BUCK_VREF_ERRAMP                 0x06c5
+#define WCD9335_RX_BIAS_FLYB_ERRAMP                      0x06c6
+#define WCD9335_RX_BIAS_FLYB_BUFF                        0x06c7
+#define WCD9335_RX_BIAS_FLYB_MID_RST                     0x06c8
+#define WCD9335_HPH_L_STATUS                             0x06c9
+#define WCD9335_HPH_R_STATUS                             0x06ca
+#define WCD9335_HPH_CNP_EN                               0x06cb
+#define WCD9335_HPH_CNP_WG_CTL                           0x06cc
+#define WCD9335_HPH_CNP_WG_TIME                          0x06cd
+#define WCD9335_HPH_OCP_CTL                              0x06ce
+#define WCD9335_HPH_AUTO_CHOP                            0x06cf
+#define WCD9335_HPH_CHOP_CTL                             0x06d0
+#define WCD9335_HPH_PA_CTL1                              0x06d1
+#define WCD9335_HPH_PA_CTL2                              0x06d2
+#define WCD9335_HPH_L_EN                                 0x06d3
+#define WCD9335_HPH_L_TEST                               0x06d4
+#define WCD9335_HPH_L_ATEST                              0x06d5
+#define WCD9335_HPH_R_EN                                 0x06d6
+#define WCD9335_HPH_R_TEST                               0x06d7
+#define WCD9335_HPH_R_ATEST                              0x06d8
+#define WCD9335_HPH_RDAC_CLK_CTL1                        0x06d9
+#define WCD9335_HPH_RDAC_CLK_CTL2                        0x06da
+#define WCD9335_HPH_RDAC_LDO_CTL                         0x06db
+#define WCD9335_HPH_RDAC_CHOP_CLK_LP_CTL                 0x06dc
+#define WCD9335_HPH_REFBUFF_UHQA_CTL                     0x06dd
+#define WCD9335_HPH_REFBUFF_LP_CTL                       0x06de
+#define WCD9335_HPH_L_DAC_CTL                            0x06df
+#define WCD9335_HPH_R_DAC_CTL                            0x06e0
+#define WCD9335_EAR_EN_REG                               0x06e1
+#define WCD9335_EAR_CMBUFF                               0x06e2
+#define WCD9335_EAR_ICTL                                 0x06e3
+#define WCD9335_EAR_EN_DBG_CTL                           0x06e4
+#define WCD9335_EAR_CNP                                  0x06e5
+#define WCD9335_EAR_DAC_CTL_ATEST                        0x06e6
+#define WCD9335_EAR_STATUS_REG                           0x06e7
+#define WCD9335_EAR_OUT_SHORT                            0x06e8
+#define WCD9335_DIFF_LO_MISC                             0x06e9
+#define WCD9335_DIFF_LO_LO2_COMPANDER                    0x06ea
+#define WCD9335_DIFF_LO_LO1_COMPANDER                    0x06eb
+#define WCD9335_DIFF_LO_COMMON                           0x06ec
+#define WCD9335_DIFF_LO_BYPASS_EN                        0x06ed
+#define WCD9335_DIFF_LO_CNP                              0x06ee
+#define WCD9335_DIFF_LO_CORE_OUT_PROG                    0x06ef
+#define WCD9335_DIFF_LO_LDO_OUT_PROG                     0x06f0
+#define WCD9335_DIFF_LO_COM_SWCAP_REFBUF_FREQ            0x06f1
+#define WCD9335_DIFF_LO_COM_PA_FREQ                      0x06f2
+#define WCD9335_DIFF_LO_RESERVED_REG                     0x06f3
+#define WCD9335_DIFF_LO_LO1_STATUS_1                     0x06f4
+#define WCD9335_DIFF_LO_LO1_STATUS_2                     0x06f5
+#define WCD9335_SE_LO_COM1                               0x06f6
+#define WCD9335_SE_LO_COM2                               0x06f7
+#define WCD9335_SE_LO_LO3_GAIN                           0x06f8
+#define WCD9335_SE_LO_LO3_CTRL                           0x06f9
+#define WCD9335_SE_LO_LO4_GAIN                           0x06fa
+#define WCD9335_SE_LO_LO4_CTRL                           0x06fb
+#define WCD9335_SE_LO_LO3_STATUS                         0x06fe
+#define WCD9335_SE_LO_LO4_STATUS                         0x06ff
+
+/* Page-10 Registers */
+#define WCD9335_PAGE10_PAGE_REGISTER                     0x0a00
+#define WCD9335_CDC_ANC0_CLK_RESET_CTL                   0x0a01
+#define WCD9335_CDC_ANC0_MODE_1_CTL                      0x0a02
+#define WCD9335_CDC_ANC0_MODE_2_CTL                      0x0a03
+#define WCD9335_CDC_ANC0_FF_SHIFT                        0x0a04
+#define WCD9335_CDC_ANC0_FB_SHIFT                        0x0a05
+#define WCD9335_CDC_ANC0_LPF_FF_A_CTL                    0x0a06
+#define WCD9335_CDC_ANC0_LPF_FF_B_CTL                    0x0a07
+#define WCD9335_CDC_ANC0_LPF_FB_CTL                      0x0a08
+#define WCD9335_CDC_ANC0_SMLPF_CTL                       0x0a09
+#define WCD9335_CDC_ANC0_DCFLT_SHIFT_CTL                 0x0a0a
+#define WCD9335_CDC_ANC0_IIR_ADAPT_CTL                   0x0a0b
+#define WCD9335_CDC_ANC0_IIR_COEFF_1_CTL                 0x0a0c
+#define WCD9335_CDC_ANC0_IIR_COEFF_2_CTL                 0x0a0d
+#define WCD9335_CDC_ANC0_FF_A_GAIN_CTL                   0x0a0e
+#define WCD9335_CDC_ANC0_FF_B_GAIN_CTL                   0x0a0f
+#define WCD9335_CDC_ANC0_FB_GAIN_CTL                     0x0a10
+#define WCD9335_CDC_ANC1_CLK_RESET_CTL                   0x0a19
+#define WCD9335_CDC_ANC1_MODE_1_CTL                      0x0a1a
+#define WCD9335_CDC_ANC1_MODE_2_CTL                      0x0a1b
+#define WCD9335_CDC_ANC1_FF_SHIFT                        0x0a1c
+#define WCD9335_CDC_ANC1_FB_SHIFT                        0x0a1d
+#define WCD9335_CDC_ANC1_LPF_FF_A_CTL                    0x0a1e
+#define WCD9335_CDC_ANC1_LPF_FF_B_CTL                    0x0a1f
+#define WCD9335_CDC_ANC1_LPF_FB_CTL                      0x0a20
+#define WCD9335_CDC_ANC1_SMLPF_CTL                       0x0a21
+#define WCD9335_CDC_ANC1_DCFLT_SHIFT_CTL                 0x0a22
+#define WCD9335_CDC_ANC1_IIR_ADAPT_CTL                   0x0a23
+#define WCD9335_CDC_ANC1_IIR_COEFF_1_CTL                 0x0a24
+#define WCD9335_CDC_ANC1_IIR_COEFF_2_CTL                 0x0a25
+#define WCD9335_CDC_ANC1_FF_A_GAIN_CTL                   0x0a26
+#define WCD9335_CDC_ANC1_FF_B_GAIN_CTL                   0x0a27
+#define WCD9335_CDC_ANC1_FB_GAIN_CTL                     0x0a28
+#define WCD9335_CDC_TX0_TX_PATH_CTL                      0x0a31
+#define WCD9335_CDC_TX0_TX_PATH_CFG0                     0x0a32
+#define WCD9335_CDC_TX0_TX_PATH_CFG1                     0x0a33
+#define WCD9335_CDC_TX0_TX_VOL_CTL                       0x0a34
+#define WCD9335_CDC_TX0_TX_PATH_192_CTL                  0x0a35
+#define WCD9335_CDC_TX0_TX_PATH_192_CFG                  0x0a36
+#define WCD9335_CDC_TX0_TX_PATH_SEC0                     0x0a37
+#define WCD9335_CDC_TX0_TX_PATH_SEC1                     0x0a38
+#define WCD9335_CDC_TX0_TX_PATH_SEC2                     0x0a39
+#define WCD9335_CDC_TX0_TX_PATH_SEC3                     0x0a3a
+#define WCD9335_CDC_TX0_TX_PATH_SEC4                     0x0a3b
+#define WCD9335_CDC_TX0_TX_PATH_SEC5                     0x0a3c
+#define WCD9335_CDC_TX0_TX_PATH_SEC6                     0x0a3d
+#define WCD9335_CDC_TX0_TX_PATH_SEC7                     0x0a3e
+#define WCD9335_CDC_TX1_TX_PATH_CTL                      0x0a41
+#define WCD9335_CDC_TX1_TX_PATH_CFG0                     0x0a42
+#define WCD9335_CDC_TX1_TX_PATH_CFG1                     0x0a43
+#define WCD9335_CDC_TX1_TX_VOL_CTL                       0x0a44
+#define WCD9335_CDC_TX1_TX_PATH_192_CTL                  0x0a45
+#define WCD9335_CDC_TX1_TX_PATH_192_CFG                  0x0a46
+#define WCD9335_CDC_TX1_TX_PATH_SEC0                     0x0a47
+#define WCD9335_CDC_TX1_TX_PATH_SEC1                     0x0a48
+#define WCD9335_CDC_TX1_TX_PATH_SEC2                     0x0a49
+#define WCD9335_CDC_TX1_TX_PATH_SEC3                     0x0a4a
+#define WCD9335_CDC_TX1_TX_PATH_SEC4                     0x0a4b
+#define WCD9335_CDC_TX1_TX_PATH_SEC5                     0x0a4c
+#define WCD9335_CDC_TX1_TX_PATH_SEC6                     0x0a4d
+#define WCD9335_CDC_TX2_TX_PATH_CTL                      0x0a51
+#define WCD9335_CDC_TX2_TX_PATH_CFG0                     0x0a52
+#define WCD9335_CDC_TX2_TX_PATH_CFG1                     0x0a53
+#define WCD9335_CDC_TX2_TX_VOL_CTL                       0x0a54
+#define WCD9335_CDC_TX2_TX_PATH_192_CTL                  0x0a55
+#define WCD9335_CDC_TX2_TX_PATH_192_CFG                  0x0a56
+#define WCD9335_CDC_TX2_TX_PATH_SEC0                     0x0a57
+#define WCD9335_CDC_TX2_TX_PATH_SEC1                     0x0a58
+#define WCD9335_CDC_TX2_TX_PATH_SEC2                     0x0a59
+#define WCD9335_CDC_TX2_TX_PATH_SEC3                     0x0a5a
+#define WCD9335_CDC_TX2_TX_PATH_SEC4                     0x0a5b
+#define WCD9335_CDC_TX2_TX_PATH_SEC5                     0x0a5c
+#define WCD9335_CDC_TX2_TX_PATH_SEC6                     0x0a5d
+#define WCD9335_CDC_TX3_TX_PATH_CTL                      0x0a61
+#define WCD9335_CDC_TX3_TX_PATH_CFG0                     0x0a62
+#define WCD9335_CDC_TX3_TX_PATH_CFG1                     0x0a63
+#define WCD9335_CDC_TX3_TX_VOL_CTL                       0x0a64
+#define WCD9335_CDC_TX3_TX_PATH_192_CTL                  0x0a65
+#define WCD9335_CDC_TX3_TX_PATH_192_CFG                  0x0a66
+#define WCD9335_CDC_TX3_TX_PATH_SEC0                     0x0a67
+#define WCD9335_CDC_TX3_TX_PATH_SEC1                     0x0a68
+#define WCD9335_CDC_TX3_TX_PATH_SEC2                     0x0a69
+#define WCD9335_CDC_TX3_TX_PATH_SEC3                     0x0a6a
+#define WCD9335_CDC_TX3_TX_PATH_SEC4                     0x0a6b
+#define WCD9335_CDC_TX3_TX_PATH_SEC5                     0x0a6c
+#define WCD9335_CDC_TX3_TX_PATH_SEC6                     0x0a6d
+#define WCD9335_CDC_TX4_TX_PATH_CTL                      0x0a71
+#define WCD9335_CDC_TX4_TX_PATH_CFG0                     0x0a72
+#define WCD9335_CDC_TX4_TX_PATH_CFG1                     0x0a73
+#define WCD9335_CDC_TX4_TX_VOL_CTL                       0x0a74
+#define WCD9335_CDC_TX4_TX_PATH_192_CTL                  0x0a75
+#define WCD9335_CDC_TX4_TX_PATH_192_CFG                  0x0a76
+#define WCD9335_CDC_TX4_TX_PATH_SEC0                     0x0a77
+#define WCD9335_CDC_TX4_TX_PATH_SEC1                     0x0a78
+#define WCD9335_CDC_TX4_TX_PATH_SEC2                     0x0a79
+#define WCD9335_CDC_TX4_TX_PATH_SEC3                     0x0a7a
+#define WCD9335_CDC_TX4_TX_PATH_SEC4                     0x0a7b
+#define WCD9335_CDC_TX4_TX_PATH_SEC5                     0x0a7c
+#define WCD9335_CDC_TX4_TX_PATH_SEC6                     0x0a7d
+#define WCD9335_CDC_TX5_TX_PATH_CTL                      0x0a81
+#define WCD9335_CDC_TX5_TX_PATH_CFG0                     0x0a82
+#define WCD9335_CDC_TX5_TX_PATH_CFG1                     0x0a83
+#define WCD9335_CDC_TX5_TX_VOL_CTL                       0x0a84
+#define WCD9335_CDC_TX5_TX_PATH_192_CTL                  0x0a85
+#define WCD9335_CDC_TX5_TX_PATH_192_CFG                  0x0a86
+#define WCD9335_CDC_TX5_TX_PATH_SEC0                     0x0a87
+#define WCD9335_CDC_TX5_TX_PATH_SEC1                     0x0a88
+#define WCD9335_CDC_TX5_TX_PATH_SEC2                     0x0a89
+#define WCD9335_CDC_TX5_TX_PATH_SEC3                     0x0a8a
+#define WCD9335_CDC_TX5_TX_PATH_SEC4                     0x0a8b
+#define WCD9335_CDC_TX5_TX_PATH_SEC5                     0x0a8c
+#define WCD9335_CDC_TX5_TX_PATH_SEC6                     0x0a8d
+#define WCD9335_CDC_TX6_TX_PATH_CTL                      0x0a91
+#define WCD9335_CDC_TX6_TX_PATH_CFG0                     0x0a92
+#define WCD9335_CDC_TX6_TX_PATH_CFG1                     0x0a93
+#define WCD9335_CDC_TX6_TX_VOL_CTL                       0x0a94
+#define WCD9335_CDC_TX6_TX_PATH_192_CTL                  0x0a95
+#define WCD9335_CDC_TX6_TX_PATH_192_CFG                  0x0a96
+#define WCD9335_CDC_TX6_TX_PATH_SEC0                     0x0a97
+#define WCD9335_CDC_TX6_TX_PATH_SEC1                     0x0a98
+#define WCD9335_CDC_TX6_TX_PATH_SEC2                     0x0a99
+#define WCD9335_CDC_TX6_TX_PATH_SEC3                     0x0a9a
+#define WCD9335_CDC_TX6_TX_PATH_SEC4                     0x0a9b
+#define WCD9335_CDC_TX6_TX_PATH_SEC5                     0x0a9c
+#define WCD9335_CDC_TX6_TX_PATH_SEC6                     0x0a9d
+#define WCD9335_CDC_TX7_TX_PATH_CTL                      0x0aa1
+#define WCD9335_CDC_TX7_TX_PATH_CFG0                     0x0aa2
+#define WCD9335_CDC_TX7_TX_PATH_CFG1                     0x0aa3
+#define WCD9335_CDC_TX7_TX_VOL_CTL                       0x0aa4
+#define WCD9335_CDC_TX7_TX_PATH_192_CTL                  0x0aa5
+#define WCD9335_CDC_TX7_TX_PATH_192_CFG                  0x0aa6
+#define WCD9335_CDC_TX7_TX_PATH_SEC0                     0x0aa7
+#define WCD9335_CDC_TX7_TX_PATH_SEC1                     0x0aa8
+#define WCD9335_CDC_TX7_TX_PATH_SEC2                     0x0aa9
+#define WCD9335_CDC_TX7_TX_PATH_SEC3                     0x0aaa
+#define WCD9335_CDC_TX7_TX_PATH_SEC4                     0x0aab
+#define WCD9335_CDC_TX7_TX_PATH_SEC5                     0x0aac
+#define WCD9335_CDC_TX7_TX_PATH_SEC6                     0x0aad
+#define WCD9335_CDC_TX8_TX_PATH_CTL                      0x0ab1
+#define WCD9335_CDC_TX8_TX_PATH_CFG0                     0x0ab2
+#define WCD9335_CDC_TX8_TX_PATH_CFG1                     0x0ab3
+#define WCD9335_CDC_TX8_TX_VOL_CTL                       0x0ab4
+#define WCD9335_CDC_TX8_TX_PATH_192_CTL                  0x0ab5
+#define WCD9335_CDC_TX8_TX_PATH_192_CFG                  0x0ab6
+#define WCD9335_CDC_TX8_TX_PATH_SEC0                     0x0ab7
+#define WCD9335_CDC_TX8_TX_PATH_SEC1                     0x0ab8
+#define WCD9335_CDC_TX8_TX_PATH_SEC2                     0x0ab9
+#define WCD9335_CDC_TX8_TX_PATH_SEC3                     0x0aba
+#define WCD9335_CDC_TX8_TX_PATH_SEC4                     0x0abb
+#define WCD9335_CDC_TX8_TX_PATH_SEC5                     0x0abc
+#define WCD9335_CDC_TX8_TX_PATH_SEC6                     0x0abd
+#define WCD9335_CDC_TX9_SPKR_PROT_PATH_CTL               0x0ac2
+#define WCD9335_CDC_TX9_SPKR_PROT_PATH_CFG0              0x0ac3
+#define WCD9335_CDC_TX10_SPKR_PROT_PATH_CTL              0x0ac6
+#define WCD9335_CDC_TX10_SPKR_PROT_PATH_CFG0             0x0ac7
+#define WCD9335_CDC_TX11_SPKR_PROT_PATH_CTL              0x0aca
+#define WCD9335_CDC_TX11_SPKR_PROT_PATH_CFG0             0x0acb
+#define WCD9335_CDC_TX12_SPKR_PROT_PATH_CTL              0x0ace
+#define WCD9335_CDC_TX12_SPKR_PROT_PATH_CFG0             0x0acf
+
+/* Page-11 Registers */
+#define WCD9335_PAGE11_PAGE_REGISTER                     0x0b00
+#define WCD9335_CDC_COMPANDER1_CTL0                      0x0b01
+#define WCD9335_CDC_COMPANDER1_CTL1                      0x0b02
+#define WCD9335_CDC_COMPANDER1_CTL2                      0x0b03
+#define WCD9335_CDC_COMPANDER1_CTL3                      0x0b04
+#define WCD9335_CDC_COMPANDER1_CTL4                      0x0b05
+#define WCD9335_CDC_COMPANDER1_CTL5                      0x0b06
+#define WCD9335_CDC_COMPANDER1_CTL6                      0x0b07
+#define WCD9335_CDC_COMPANDER1_CTL7                      0x0b08
+#define WCD9335_CDC_COMPANDER2_CTL0                      0x0b09
+#define WCD9335_CDC_COMPANDER2_CTL1                      0x0b0a
+#define WCD9335_CDC_COMPANDER2_CTL2                      0x0b0b
+#define WCD9335_CDC_COMPANDER2_CTL3                      0x0b0c
+#define WCD9335_CDC_COMPANDER2_CTL4                      0x0b0d
+#define WCD9335_CDC_COMPANDER2_CTL5                      0x0b0e
+#define WCD9335_CDC_COMPANDER2_CTL6                      0x0b0f
+#define WCD9335_CDC_COMPANDER2_CTL7                      0x0b10
+#define WCD9335_CDC_COMPANDER3_CTL0                      0x0b11
+#define WCD9335_CDC_COMPANDER3_CTL1                      0x0b12
+#define WCD9335_CDC_COMPANDER3_CTL2                      0x0b13
+#define WCD9335_CDC_COMPANDER3_CTL3                      0x0b14
+#define WCD9335_CDC_COMPANDER3_CTL4                      0x0b15
+#define WCD9335_CDC_COMPANDER3_CTL5                      0x0b16
+#define WCD9335_CDC_COMPANDER3_CTL6                      0x0b17
+#define WCD9335_CDC_COMPANDER3_CTL7                      0x0b18
+#define WCD9335_CDC_COMPANDER4_CTL0                      0x0b19
+#define WCD9335_CDC_COMPANDER4_CTL1                      0x0b1a
+#define WCD9335_CDC_COMPANDER4_CTL2                      0x0b1b
+#define WCD9335_CDC_COMPANDER4_CTL3                      0x0b1c
+#define WCD9335_CDC_COMPANDER4_CTL4                      0x0b1d
+#define WCD9335_CDC_COMPANDER4_CTL5                      0x0b1e
+#define WCD9335_CDC_COMPANDER4_CTL6                      0x0b1f
+#define WCD9335_CDC_COMPANDER4_CTL7                      0x0b20
+#define WCD9335_CDC_COMPANDER5_CTL0                      0x0b21
+#define WCD9335_CDC_COMPANDER5_CTL1                      0x0b22
+#define WCD9335_CDC_COMPANDER5_CTL2                      0x0b23
+#define WCD9335_CDC_COMPANDER5_CTL3                      0x0b24
+#define WCD9335_CDC_COMPANDER5_CTL4                      0x0b25
+#define WCD9335_CDC_COMPANDER5_CTL5                      0x0b26
+#define WCD9335_CDC_COMPANDER5_CTL6                      0x0b27
+#define WCD9335_CDC_COMPANDER5_CTL7                      0x0b28
+#define WCD9335_CDC_COMPANDER6_CTL0                      0x0b29
+#define WCD9335_CDC_COMPANDER6_CTL1                      0x0b2a
+#define WCD9335_CDC_COMPANDER6_CTL2                      0x0b2b
+#define WCD9335_CDC_COMPANDER6_CTL3                      0x0b2c
+#define WCD9335_CDC_COMPANDER6_CTL4                      0x0b2d
+#define WCD9335_CDC_COMPANDER6_CTL5                      0x0b2e
+#define WCD9335_CDC_COMPANDER6_CTL6                      0x0b2f
+#define WCD9335_CDC_COMPANDER6_CTL7                      0x0b30
+#define WCD9335_CDC_COMPANDER7_CTL0                      0x0b31
+#define WCD9335_CDC_COMPANDER7_CTL1                      0x0b32
+#define WCD9335_CDC_COMPANDER7_CTL2                      0x0b33
+#define WCD9335_CDC_COMPANDER7_CTL3                      0x0b34
+#define WCD9335_CDC_COMPANDER7_CTL4                      0x0b35
+#define WCD9335_CDC_COMPANDER7_CTL5                      0x0b36
+#define WCD9335_CDC_COMPANDER7_CTL6                      0x0b37
+#define WCD9335_CDC_COMPANDER7_CTL7                      0x0b38
+#define WCD9335_CDC_COMPANDER8_CTL0                      0x0b39
+#define WCD9335_CDC_COMPANDER8_CTL1                      0x0b3a
+#define WCD9335_CDC_COMPANDER8_CTL2                      0x0b3b
+#define WCD9335_CDC_COMPANDER8_CTL3                      0x0b3c
+#define WCD9335_CDC_COMPANDER8_CTL4                      0x0b3d
+#define WCD9335_CDC_COMPANDER8_CTL5                      0x0b3e
+#define WCD9335_CDC_COMPANDER8_CTL6                      0x0b3f
+#define WCD9335_CDC_COMPANDER8_CTL7                      0x0b40
+#define WCD9335_CDC_RX0_RX_PATH_CTL                      0x0b41
+#define WCD9335_CDC_RX0_RX_PATH_CFG0                     0x0b42
+#define WCD9335_CDC_RX0_RX_PATH_CFG1                     0x0b43
+#define WCD9335_CDC_RX0_RX_PATH_CFG2                     0x0b44
+#define WCD9335_CDC_RX0_RX_VOL_CTL                       0x0b45
+#define WCD9335_CDC_RX0_RX_PATH_MIX_CTL                  0x0b46
+#define WCD9335_CDC_RX0_RX_PATH_MIX_CFG                  0x0b47
+#define WCD9335_CDC_RX0_RX_VOL_MIX_CTL                   0x0b48
+#define WCD9335_CDC_RX0_RX_PATH_SEC0                     0x0b49
+#define WCD9335_CDC_RX0_RX_PATH_SEC1                     0x0b4a
+#define WCD9335_CDC_RX0_RX_PATH_SEC2                     0x0b4b
+#define WCD9335_CDC_RX0_RX_PATH_SEC3                     0x0b4c
+#define WCD9335_CDC_RX0_RX_PATH_SEC5                     0x0b4e
+#define WCD9335_CDC_RX0_RX_PATH_SEC6                     0x0b4f
+#define WCD9335_CDC_RX0_RX_PATH_SEC7                     0x0b50
+#define WCD9335_CDC_RX0_RX_PATH_MIX_SEC0                 0x0b51
+#define WCD9335_CDC_RX0_RX_PATH_MIX_SEC1                 0x0b52
+#define WCD9335_CDC_RX1_RX_PATH_CTL                      0x0b55
+#define WCD9335_CDC_RX1_RX_PATH_CFG0                     0x0b56
+#define WCD9335_CDC_RX1_RX_PATH_CFG1                     0x0b57
+#define WCD9335_CDC_RX1_RX_PATH_CFG2                     0x0b58
+#define WCD9335_CDC_RX1_RX_VOL_CTL                       0x0b59
+#define WCD9335_CDC_RX1_RX_PATH_MIX_CTL                  0x0b5a
+#define WCD9335_CDC_RX1_RX_PATH_MIX_CFG                  0x0b5b
+#define WCD9335_CDC_RX1_RX_VOL_MIX_CTL                   0x0b5c
+#define WCD9335_CDC_RX1_RX_PATH_SEC0                     0x0b5d
+#define WCD9335_CDC_RX1_RX_PATH_SEC1                     0x0b5e
+#define WCD9335_CDC_RX1_RX_PATH_SEC2                     0x0b5f
+#define WCD9335_CDC_RX1_RX_PATH_SEC3                     0x0b60
+#define WCD9335_CDC_RX1_RX_PATH_SEC4                     0x0b61
+#define WCD9335_CDC_RX1_RX_PATH_SEC5                     0x0b62
+#define WCD9335_CDC_RX1_RX_PATH_SEC6                     0x0b63
+#define WCD9335_CDC_RX1_RX_PATH_SEC7                     0x0b64
+#define WCD9335_CDC_RX1_RX_PATH_MIX_SEC0                 0x0b65
+#define WCD9335_CDC_RX1_RX_PATH_MIX_SEC1                 0x0b66
+#define WCD9335_CDC_RX2_RX_PATH_CTL                      0x0b69
+#define WCD9335_CDC_RX2_RX_PATH_CFG0                     0x0b6a
+#define WCD9335_CDC_RX2_RX_PATH_CFG1                     0x0b6b
+#define WCD9335_CDC_RX2_RX_PATH_CFG2                     0x0b6c
+#define WCD9335_CDC_RX2_RX_VOL_CTL                       0x0b6d
+#define WCD9335_CDC_RX2_RX_PATH_MIX_CTL                  0x0b6e
+#define WCD9335_CDC_RX2_RX_PATH_MIX_CFG                  0x0b6f
+#define WCD9335_CDC_RX2_RX_VOL_MIX_CTL                   0x0b70
+#define WCD9335_CDC_RX2_RX_PATH_SEC0                     0x0b71
+#define WCD9335_CDC_RX2_RX_PATH_SEC1                     0x0b72
+#define WCD9335_CDC_RX2_RX_PATH_SEC2                     0x0b73
+#define WCD9335_CDC_RX2_RX_PATH_SEC3                     0x0b74
+#define WCD9335_CDC_RX2_RX_PATH_SEC4                     0x0b75
+#define WCD9335_CDC_RX2_RX_PATH_SEC5                     0x0b76
+#define WCD9335_CDC_RX2_RX_PATH_SEC6                     0x0b77
+#define WCD9335_CDC_RX2_RX_PATH_SEC7                     0x0b78
+#define WCD9335_CDC_RX2_RX_PATH_MIX_SEC0                 0x0b79
+#define WCD9335_CDC_RX2_RX_PATH_MIX_SEC1                 0x0b7a
+#define WCD9335_CDC_RX3_RX_PATH_CTL                      0x0b7d
+#define WCD9335_CDC_RX3_RX_PATH_CFG0                     0x0b7e
+#define WCD9335_CDC_RX3_RX_PATH_CFG1                     0x0b7f
+#define WCD9335_CDC_RX3_RX_PATH_CFG2                     0x0b80
+#define WCD9335_CDC_RX3_RX_VOL_CTL                       0x0b81
+#define WCD9335_CDC_RX3_RX_PATH_MIX_CTL                  0x0b82
+#define WCD9335_CDC_RX3_RX_PATH_MIX_CFG                  0x0b83
+#define WCD9335_CDC_RX3_RX_VOL_MIX_CTL                   0x0b84
+#define WCD9335_CDC_RX3_RX_PATH_SEC0                     0x0b85
+#define WCD9335_CDC_RX3_RX_PATH_SEC1                     0x0b86
+#define WCD9335_CDC_RX3_RX_PATH_SEC2                     0x0b87
+#define WCD9335_CDC_RX3_RX_PATH_SEC3                     0x0b88
+#define WCD9335_CDC_RX3_RX_PATH_SEC5                     0x0b8a
+#define WCD9335_CDC_RX3_RX_PATH_SEC6                     0x0b8b
+#define WCD9335_CDC_RX3_RX_PATH_SEC7                     0x0b8c
+#define WCD9335_CDC_RX3_RX_PATH_MIX_SEC0                 0x0b8d
+#define WCD9335_CDC_RX3_RX_PATH_MIX_SEC1                 0x0b8e
+#define WCD9335_CDC_RX4_RX_PATH_CTL                      0x0b91
+#define WCD9335_CDC_RX4_RX_PATH_CFG0                     0x0b92
+#define WCD9335_CDC_RX4_RX_PATH_CFG1                     0x0b93
+#define WCD9335_CDC_RX4_RX_PATH_CFG2                     0x0b94
+#define WCD9335_CDC_RX4_RX_VOL_CTL                       0x0b95
+#define WCD9335_CDC_RX4_RX_PATH_MIX_CTL                  0x0b96
+#define WCD9335_CDC_RX4_RX_PATH_MIX_CFG                  0x0b97
+#define WCD9335_CDC_RX4_RX_VOL_MIX_CTL                   0x0b98
+#define WCD9335_CDC_RX4_RX_PATH_SEC0                     0x0b99
+#define WCD9335_CDC_RX4_RX_PATH_SEC1                     0x0b9a
+#define WCD9335_CDC_RX4_RX_PATH_SEC2                     0x0b9b
+#define WCD9335_CDC_RX4_RX_PATH_SEC3                     0x0b9c
+#define WCD9335_CDC_RX4_RX_PATH_SEC5                     0x0b9e
+#define WCD9335_CDC_RX4_RX_PATH_SEC6                     0x0b9f
+#define WCD9335_CDC_RX4_RX_PATH_SEC7                     0x0ba0
+#define WCD9335_CDC_RX4_RX_PATH_MIX_SEC0                 0x0ba1
+#define WCD9335_CDC_RX4_RX_PATH_MIX_SEC1                 0x0ba2
+#define WCD9335_CDC_RX5_RX_PATH_CTL                      0x0ba5
+#define WCD9335_CDC_RX5_RX_PATH_CFG0                     0x0ba6
+#define WCD9335_CDC_RX5_RX_PATH_CFG1                     0x0ba7
+#define WCD9335_CDC_RX5_RX_PATH_CFG2                     0x0ba8
+#define WCD9335_CDC_RX5_RX_VOL_CTL                       0x0ba9
+#define WCD9335_CDC_RX5_RX_PATH_MIX_CTL                  0x0baa
+#define WCD9335_CDC_RX5_RX_PATH_MIX_CFG                  0x0bab
+#define WCD9335_CDC_RX5_RX_VOL_MIX_CTL                   0x0bac
+#define WCD9335_CDC_RX5_RX_PATH_SEC0                     0x0bad
+#define WCD9335_CDC_RX5_RX_PATH_SEC1                     0x0bae
+#define WCD9335_CDC_RX5_RX_PATH_SEC2                     0x0baf
+#define WCD9335_CDC_RX5_RX_PATH_SEC3                     0x0bb0
+#define WCD9335_CDC_RX5_RX_PATH_SEC5                     0x0bb2
+#define WCD9335_CDC_RX5_RX_PATH_SEC6                     0x0bb3
+#define WCD9335_CDC_RX5_RX_PATH_SEC7                     0x0bb4
+#define WCD9335_CDC_RX5_RX_PATH_MIX_SEC0                 0x0bb5
+#define WCD9335_CDC_RX5_RX_PATH_MIX_SEC1                 0x0bb6
+#define WCD9335_CDC_RX6_RX_PATH_CTL                      0x0bb9
+#define WCD9335_CDC_RX6_RX_PATH_CFG0                     0x0bba
+#define WCD9335_CDC_RX6_RX_PATH_CFG1                     0x0bbb
+#define WCD9335_CDC_RX6_RX_PATH_CFG2                     0x0bbc
+#define WCD9335_CDC_RX6_RX_VOL_CTL                       0x0bbd
+#define WCD9335_CDC_RX6_RX_PATH_MIX_CTL                  0x0bbe
+#define WCD9335_CDC_RX6_RX_PATH_MIX_CFG                  0x0bbf
+#define WCD9335_CDC_RX6_RX_VOL_MIX_CTL                   0x0bc0
+#define WCD9335_CDC_RX6_RX_PATH_SEC0                     0x0bc1
+#define WCD9335_CDC_RX6_RX_PATH_SEC1                     0x0bc2
+#define WCD9335_CDC_RX6_RX_PATH_SEC2                     0x0bc3
+#define WCD9335_CDC_RX6_RX_PATH_SEC3                     0x0bc4
+#define WCD9335_CDC_RX6_RX_PATH_SEC5                     0x0bc6
+#define WCD9335_CDC_RX6_RX_PATH_SEC6                     0x0bc7
+#define WCD9335_CDC_RX6_RX_PATH_SEC7                     0x0bc8
+#define WCD9335_CDC_RX6_RX_PATH_MIX_SEC0                 0x0bc9
+#define WCD9335_CDC_RX6_RX_PATH_MIX_SEC1                 0x0bca
+#define WCD9335_CDC_RX7_RX_PATH_CTL                      0x0bcd
+#define WCD9335_CDC_RX7_RX_PATH_CFG0                     0x0bce
+#define WCD9335_CDC_RX7_RX_PATH_CFG1                     0x0bcf
+#define WCD9335_CDC_RX7_RX_PATH_CFG2                     0x0bd0
+#define WCD9335_CDC_RX7_RX_VOL_CTL                       0x0bd1
+#define WCD9335_CDC_RX7_RX_PATH_MIX_CTL                  0x0bd2
+#define WCD9335_CDC_RX7_RX_PATH_MIX_CFG                  0x0bd3
+#define WCD9335_CDC_RX7_RX_VOL_MIX_CTL                   0x0bd4
+#define WCD9335_CDC_RX7_RX_PATH_SEC0                     0x0bd5
+#define WCD9335_CDC_RX7_RX_PATH_SEC1                     0x0bd6
+#define WCD9335_CDC_RX7_RX_PATH_SEC2                     0x0bd7
+#define WCD9335_CDC_RX7_RX_PATH_SEC3                     0x0bd8
+#define WCD9335_CDC_RX7_RX_PATH_SEC5                     0x0bda
+#define WCD9335_CDC_RX7_RX_PATH_SEC6                     0x0bdb
+#define WCD9335_CDC_RX7_RX_PATH_SEC7                     0x0bdc
+#define WCD9335_CDC_RX7_RX_PATH_MIX_SEC0                 0x0bdd
+#define WCD9335_CDC_RX7_RX_PATH_MIX_SEC1                 0x0bde
+#define WCD9335_CDC_RX8_RX_PATH_CTL                      0x0be1
+#define WCD9335_CDC_RX8_RX_PATH_CFG0                     0x0be2
+#define WCD9335_CDC_RX8_RX_PATH_CFG1                     0x0be3
+#define WCD9335_CDC_RX8_RX_PATH_CFG2                     0x0be4
+#define WCD9335_CDC_RX8_RX_VOL_CTL                       0x0be5
+#define WCD9335_CDC_RX8_RX_PATH_MIX_CTL                  0x0be6
+#define WCD9335_CDC_RX8_RX_PATH_MIX_CFG                  0x0be7
+#define WCD9335_CDC_RX8_RX_VOL_MIX_CTL                   0x0be8
+#define WCD9335_CDC_RX8_RX_PATH_SEC0                     0x0be9
+#define WCD9335_CDC_RX8_RX_PATH_SEC1                     0x0bea
+#define WCD9335_CDC_RX8_RX_PATH_SEC2                     0x0beb
+#define WCD9335_CDC_RX8_RX_PATH_SEC3                     0x0bec
+#define WCD9335_CDC_RX8_RX_PATH_SEC5                     0x0bee
+#define WCD9335_CDC_RX8_RX_PATH_SEC6                     0x0bef
+#define WCD9335_CDC_RX8_RX_PATH_SEC7                     0x0bf0
+#define WCD9335_CDC_RX8_RX_PATH_MIX_SEC0                 0x0bf1
+#define WCD9335_CDC_RX8_RX_PATH_MIX_SEC1                 0x0bf2
+
+/* Page-12 Registers */
+#define WCD9335_PAGE12_PAGE_REGISTER                     0x0c00
+#define WCD9335_CDC_CLSH_CRC                             0x0c01
+#define WCD9335_CDC_CLSH_DLY_CTRL                        0x0c02
+#define WCD9335_CDC_CLSH_DECAY_CTRL                      0x0c03
+#define WCD9335_CDC_CLSH_HPH_V_PA                        0x0c04
+#define WCD9335_CDC_CLSH_EAR_V_PA                        0x0c05
+#define WCD9335_CDC_CLSH_HPH_V_HD                        0x0c06
+#define WCD9335_CDC_CLSH_EAR_V_HD                        0x0c07
+#define WCD9335_CDC_CLSH_K1_MSB                          0x0c08
+#define WCD9335_CDC_CLSH_K1_LSB                          0x0c09
+#define WCD9335_CDC_CLSH_K2_MSB                          0x0c0a
+#define WCD9335_CDC_CLSH_K2_LSB                          0x0c0b
+#define WCD9335_CDC_CLSH_IDLE_CTRL                       0x0c0c
+#define WCD9335_CDC_CLSH_IDLE_HPH                        0x0c0d
+#define WCD9335_CDC_CLSH_IDLE_EAR                        0x0c0e
+#define WCD9335_CDC_CLSH_TEST0                           0x0c0f
+#define WCD9335_CDC_CLSH_TEST1                           0x0c10
+#define WCD9335_CDC_CLSH_OVR_VREF                        0x0c11
+#define WCD9335_CDC_BOOST0_BOOST_PATH_CTL                0x0c19
+#define WCD9335_CDC_BOOST0_BOOST_CTL                     0x0c1a
+#define WCD9335_CDC_BOOST0_BOOST_CFG1                    0x0c1b
+#define WCD9335_CDC_BOOST0_BOOST_CFG2                    0x0c1c
+#define WCD9335_CDC_BOOST1_BOOST_PATH_CTL                0x0c21
+#define WCD9335_CDC_BOOST1_BOOST_CTL                     0x0c22
+#define WCD9335_CDC_BOOST1_BOOST_CFG1                    0x0c23
+#define WCD9335_CDC_BOOST1_BOOST_CFG2                    0x0c24
+#define WCD9335_SWR_AHB_BRIDGE_WR_DATA_0                 0x0c29
+#define WCD9335_SWR_AHB_BRIDGE_WR_DATA_1                 0x0c2a
+#define WCD9335_SWR_AHB_BRIDGE_WR_DATA_2                 0x0c2b
+#define WCD9335_SWR_AHB_BRIDGE_WR_DATA_3                 0x0c2c
+#define WCD9335_SWR_AHB_BRIDGE_WR_ADDR_0                 0x0c2d
+#define WCD9335_SWR_AHB_BRIDGE_WR_ADDR_1                 0x0c2e
+#define WCD9335_SWR_AHB_BRIDGE_WR_ADDR_2                 0x0c2f
+#define WCD9335_SWR_AHB_BRIDGE_WR_ADDR_3                 0x0c30
+#define WCD9335_SWR_AHB_BRIDGE_RD_ADDR_0                 0x0c31
+#define WCD9335_SWR_AHB_BRIDGE_RD_ADDR_1                 0x0c32
+#define WCD9335_SWR_AHB_BRIDGE_RD_ADDR_2                 0x0c33
+#define WCD9335_SWR_AHB_BRIDGE_RD_ADDR_3                 0x0c34
+#define WCD9335_SWR_AHB_BRIDGE_RD_DATA_0                 0x0c35
+#define WCD9335_SWR_AHB_BRIDGE_RD_DATA_1                 0x0c36
+#define WCD9335_SWR_AHB_BRIDGE_RD_DATA_2                 0x0c37
+#define WCD9335_SWR_AHB_BRIDGE_RD_DATA_3                 0x0c38
+#define WCD9335_SWR_AHB_BRIDGE_ACCESS_CFG                0x0c39
+#define WCD9335_SWR_AHB_BRIDGE_ACCESS_STATUS             0x0c3a
+#define WCD9335_CDC_VBAT_VBAT_PATH_CTL                   0x0c3d
+#define WCD9335_CDC_VBAT_VBAT_CFG                        0x0c3e
+#define WCD9335_CDC_VBAT_VBAT_ADC_CAL1                   0x0c3f
+#define WCD9335_CDC_VBAT_VBAT_ADC_CAL2                   0x0c40
+#define WCD9335_CDC_VBAT_VBAT_ADC_CAL3                   0x0c41
+#define WCD9335_CDC_VBAT_VBAT_PK_EST1                    0x0c42
+#define WCD9335_CDC_VBAT_VBAT_PK_EST2                    0x0c43
+#define WCD9335_CDC_VBAT_VBAT_PK_EST3                    0x0c44
+#define WCD9335_CDC_VBAT_VBAT_RF_PROC1                   0x0c45
+#define WCD9335_CDC_VBAT_VBAT_RF_PROC2                   0x0c46
+#define WCD9335_CDC_VBAT_VBAT_TAC1                       0x0c47
+#define WCD9335_CDC_VBAT_VBAT_TAC2                       0x0c48
+#define WCD9335_CDC_VBAT_VBAT_TAC3                       0x0c49
+#define WCD9335_CDC_VBAT_VBAT_TAC4                       0x0c4a
+#define WCD9335_CDC_VBAT_VBAT_GAIN_UPD1                  0x0c4b
+#define WCD9335_CDC_VBAT_VBAT_GAIN_UPD2                  0x0c4c
+#define WCD9335_CDC_VBAT_VBAT_GAIN_UPD3                  0x0c4d
+#define WCD9335_CDC_VBAT_VBAT_GAIN_UPD4                  0x0c4e
+#define WCD9335_CDC_VBAT_VBAT_DEBUG1                     0x0c4f
+#define WCD9335_CDC_VBAT_VBAT_GAIN_UPD_MON               0x0c50
+#define WCD9335_CDC_VBAT_VBAT_GAIN_MON_VAL               0x0c51
+#define WCD9335_SPLINE_SRC0_CLK_RST_CTL_0                0x0c55
+#define WCD9335_SPLINE_SRC0_STATUS                       0x0c56
+#define WCD9335_SPLINE_SRC1_CLK_RST_CTL_0                0x0c6d
+#define WCD9335_SPLINE_SRC1_STATUS                       0x0c6e
+#define WCD9335_SPLINE_SRC2_CLK_RST_CTL_0                0x0c85
+#define WCD9335_SPLINE_SRC2_STATUS                       0x0c86
+#define WCD9335_SPLINE_SRC3_CLK_RST_CTL_0                0x0c9d
+#define WCD9335_SPLINE_SRC3_STATUS                       0x0c9e
+#define WCD9335_CDC_SIDETONE_SRC0_ST_SRC_PATH_CTL        0x0cb5
+#define WCD9335_CDC_SIDETONE_SRC0_ST_SRC_PATH_CFG1       0x0cb6
+#define WCD9335_CDC_SIDETONE_SRC1_ST_SRC_PATH_CTL        0x0cb9
+#define WCD9335_CDC_SIDETONE_SRC1_ST_SRC_PATH_CFG1       0x0cba
+
+/* Page-13 Registers */
+#define WCD9335_PAGE13_PAGE_REGISTER                     0x0d00
+#define WCD9335_CDC_RX_INP_MUX_RX_INT0_CFG0              0x0d01
+#define WCD9335_CDC_RX_INP_MUX_RX_INT0_CFG1              0x0d02
+#define WCD9335_CDC_RX_INP_MUX_RX_INT1_CFG0              0x0d03
+#define WCD9335_CDC_RX_INP_MUX_RX_INT1_CFG1              0x0d04
+#define WCD9335_CDC_RX_INP_MUX_RX_INT2_CFG0              0x0d05
+#define WCD9335_CDC_RX_INP_MUX_RX_INT2_CFG1              0x0d06
+#define WCD9335_CDC_RX_INP_MUX_RX_INT3_CFG0              0x0d07
+#define WCD9335_CDC_RX_INP_MUX_RX_INT3_CFG1              0x0d08
+#define WCD9335_CDC_RX_INP_MUX_RX_INT4_CFG0              0x0d09
+#define WCD9335_CDC_RX_INP_MUX_RX_INT4_CFG1              0x0d0a
+#define WCD9335_CDC_RX_INP_MUX_RX_INT5_CFG0              0x0d0b
+#define WCD9335_CDC_RX_INP_MUX_RX_INT5_CFG1              0x0d0c
+#define WCD9335_CDC_RX_INP_MUX_RX_INT6_CFG0              0x0d0d
+#define WCD9335_CDC_RX_INP_MUX_RX_INT6_CFG1              0x0d0e
+#define WCD9335_CDC_RX_INP_MUX_RX_INT7_CFG0              0x0d0f
+#define WCD9335_CDC_RX_INP_MUX_RX_INT7_CFG1              0x0d10
+#define WCD9335_CDC_RX_INP_MUX_RX_INT8_CFG0              0x0d11
+#define WCD9335_CDC_RX_INP_MUX_RX_INT8_CFG1              0x0d12
+#define WCD9335_CDC_RX_INP_MUX_RX_MIX_CFG0               0x0d13
+#define WCD9335_CDC_RX_INP_MUX_RX_MIX_CFG1               0x0d14
+#define WCD9335_CDC_RX_INP_MUX_RX_MIX_CFG2               0x0d15
+#define WCD9335_CDC_RX_INP_MUX_RX_MIX_CFG3               0x0d16
+#define WCD9335_CDC_RX_INP_MUX_RX_MIX_CFG4               0x0d17
+#define WCD9335_CDC_RX_INP_MUX_SIDETONE_SRC_CFG0         0x0d18
+#define WCD9335_CDC_RX_INP_MUX_SIDETONE_SRC_CFG1         0x0d19
+#define WCD9335_CDC_RX_INP_MUX_ANC_CFG0                  0x0d1a
+#define WCD9335_CDC_RX_INP_MUX_SPLINE_SRC_CFG0           0x0d1b
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX0_CFG0             0x0d1d
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX0_CFG1             0x0d1e
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX1_CFG0             0x0d1f
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX1_CFG1             0x0d20
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX2_CFG0             0x0d21
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX2_CFG1             0x0d22
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX3_CFG0             0x0d23
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX3_CFG1             0x0d24
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX4_CFG0             0x0d25
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX5_CFG0             0x0d26
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX6_CFG0             0x0d27
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX7_CFG0             0x0d28
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX8_CFG0             0x0d29
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX10_CFG0            0x0d2b
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX11_CFG0            0x0d2c
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX12_CFG0            0x0d2d
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX13_CFG0            0x0d2e
+#define WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG0   0x0d31
+#define WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG1   0x0d32
+#define WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG2   0x0d33
+#define WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG3   0x0d34
+#define WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG0   0x0d35
+#define WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG1   0x0d36
+#define WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG2   0x0d37
+#define WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG3   0x0d38
+#define WCD9335_CDC_IF_ROUTER_TX_MUX_CFG0                0x0d3a
+#define WCD9335_CDC_IF_ROUTER_TX_MUX_CFG1                0x0d3b
+#define WCD9335_CDC_IF_ROUTER_TX_MUX_CFG2                0x0d3c
+#define WCD9335_CDC_IF_ROUTER_TX_MUX_CFG3                0x0d3d
+#define WCD9335_CDC_CLK_RST_CTRL_MCLK_CONTROL            0x0d41
+#define WCD9335_CDC_CLK_RST_CTRL_FS_CNT_CONTROL          0x0d42
+#define WCD9335_CDC_CLK_RST_CTRL_SWR_CONTROL             0x0d43
+#define WCD9335_CDC_PROX_DETECT_PROX_CTL                 0x0d49
+#define WCD9335_CDC_PROX_DETECT_PROX_POLL_PERIOD0        0x0d4a
+#define WCD9335_CDC_PROX_DETECT_PROX_POLL_PERIOD1        0x0d4b
+#define WCD9335_CDC_PROX_DETECT_PROX_SIG_PATTERN_LSB     0x0d4c
+#define WCD9335_CDC_PROX_DETECT_PROX_SIG_PATTERN_MSB     0x0d4d
+#define WCD9335_CDC_PROX_DETECT_PROX_STATUS              0x0d4e
+#define WCD9335_CDC_PROX_DETECT_PROX_TEST_CTRL           0x0d4f
+#define WCD9335_CDC_PROX_DETECT_PROX_TEST_BUFF_LSB       0x0d50
+#define WCD9335_CDC_PROX_DETECT_PROX_TEST_BUFF_MSB       0x0d51
+#define WCD9335_CDC_PROX_DETECT_PROX_TEST_BUFF_LSB_RD    0x0d52
+#define WCD9335_CDC_PROX_DETECT_PROX_TEST_BUFF_MSB_RD    0x0d53
+#define WCD9335_CDC_PROX_DETECT_PROX_CTL_REPEAT_PAT      0x0d54
+#define WCD9335_CDC_SIDETONE_IIR0_IIR_PATH_CTL           0x0d55
+#define WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B1_CTL        0x0d56
+#define WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B2_CTL        0x0d57
+#define WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B3_CTL        0x0d58
+#define WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B4_CTL        0x0d59
+#define WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B5_CTL        0x0d5a
+#define WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B6_CTL        0x0d5b
+#define WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B7_CTL        0x0d5c
+#define WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B8_CTL        0x0d5d
+#define WCD9335_CDC_SIDETONE_IIR0_IIR_CTL                0x0d5e
+#define WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_TIMER_CTL     0x0d5f
+#define WCD9335_CDC_SIDETONE_IIR0_IIR_COEF_B1_CTL        0x0d60
+#define WCD9335_CDC_SIDETONE_IIR0_IIR_COEF_B2_CTL        0x0d61
+#define WCD9335_CDC_SIDETONE_IIR1_IIR_PATH_CTL           0x0d65
+#define WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B1_CTL        0x0d66
+#define WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B2_CTL        0x0d67
+#define WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B3_CTL        0x0d68
+#define WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B4_CTL        0x0d69
+#define WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B5_CTL        0x0d6a
+#define WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B6_CTL        0x0d6b
+#define WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B7_CTL        0x0d6c
+#define WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B8_CTL        0x0d6d
+#define WCD9335_CDC_SIDETONE_IIR1_IIR_CTL                0x0d6e
+#define WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_TIMER_CTL     0x0d6f
+#define WCD9335_CDC_SIDETONE_IIR1_IIR_COEF_B1_CTL        0x0d70
+#define WCD9335_CDC_SIDETONE_IIR1_IIR_COEF_B2_CTL        0x0d71
+#define WCD9335_CDC_TOP_TOP_CFG0                         0x0d81
+#define WCD9335_CDC_TOP_TOP_CFG1                         0x0d82
+#define WCD9335_CDC_TOP_TOP_CFG2                         0x0d83
+#define WCD9335_CDC_TOP_TOP_CFG3                         0x0d84
+#define WCD9335_CDC_TOP_TOP_CFG4                         0x0d85
+#define WCD9335_CDC_TOP_TOP_CFG5                         0x0d86
+#define WCD9335_CDC_TOP_TOP_CFG6                         0x0d87
+#define WCD9335_CDC_TOP_TOP_CFG7                         0x0d88
+#define WCD9335_CDC_TOP_HPHL_COMP_WR_LSB                 0x0d89
+#define WCD9335_CDC_TOP_HPHL_COMP_WR_MSB                 0x0d8a
+#define WCD9335_CDC_TOP_HPHL_COMP_LUT                    0x0d8b
+#define WCD9335_CDC_TOP_HPHL_COMP_RD_LSB                 0x0d8c
+#define WCD9335_CDC_TOP_HPHL_COMP_RD_MSB                 0x0d8d
+#define WCD9335_CDC_TOP_HPHR_COMP_WR_LSB                 0x0d8e
+#define WCD9335_CDC_TOP_HPHR_COMP_WR_MSB                 0x0d8f
+#define WCD9335_CDC_TOP_HPHR_COMP_LUT                    0x0d90
+#define WCD9335_CDC_TOP_HPHR_COMP_RD_LSB                 0x0d91
+#define WCD9335_CDC_TOP_HPHR_COMP_RD_MSB                 0x0d92
+#define WCD9335_CDC_TOP_DIFFL_COMP_WR_LSB                0x0d93
+#define WCD9335_CDC_TOP_DIFFL_COMP_WR_MSB                0x0d94
+#define WCD9335_CDC_TOP_DIFFL_COMP_LUT                   0x0d95
+#define WCD9335_CDC_TOP_DIFFL_COMP_RD_LSB                0x0d96
+#define WCD9335_CDC_TOP_DIFFL_COMP_RD_MSB                0x0d97
+#define WCD9335_CDC_TOP_DIFFR_COMP_WR_LSB                0x0d98
+#define WCD9335_CDC_TOP_DIFFR_COMP_WR_MSB                0x0d99
+#define WCD9335_CDC_TOP_DIFFR_COMP_LUT                   0x0d9a
+#define WCD9335_CDC_TOP_DIFFR_COMP_RD_LSB                0x0d9b
+#define WCD9335_CDC_TOP_DIFFR_COMP_RD_MSB                0x0d9c
+
+/* Page-0x80 Registers */
+#define WCD9335_PAGE80_PAGE_REGISTER                     0x8000
+#define WCD9335_TLMM_BIST_MODE_PINCFG                    0x8001
+#define WCD9335_TLMM_RF_PA_ON_PINCFG                     0x8002
+#define WCD9335_TLMM_INTR1_PINCFG                        0x8003
+#define WCD9335_TLMM_INTR2_PINCFG                        0x8004
+#define WCD9335_TLMM_SWR_DATA_PINCFG                     0x8005
+#define WCD9335_TLMM_SWR_CLK_PINCFG                      0x8006
+#define WCD9335_TLMM_SLIMBUS_DATA2_PINCFG                0x8007
+#define WCD9335_TLMM_I2C_CLK_PINCFG                      0x8008
+#define WCD9335_TLMM_I2C_DATA_PINCFG                     0x8009
+#define WCD9335_TLMM_I2S_RX_SD0_PINCFG                   0x800a
+#define WCD9335_TLMM_I2S_RX_SD1_PINCFG                   0x800b
+#define WCD9335_TLMM_I2S_RX_SCK_PINCFG                   0x800c
+#define WCD9335_TLMM_I2S_RX_WS_PINCFG                    0x800d
+#define WCD9335_TLMM_I2S_TX_SD0_PINCFG                   0x800e
+#define WCD9335_TLMM_I2S_TX_SD1_PINCFG                   0x800f
+#define WCD9335_TLMM_I2S_TX_SCK_PINCFG                   0x8010
+#define WCD9335_TLMM_I2S_TX_WS_PINCFG                    0x8011
+#define WCD9335_TLMM_DMIC1_CLK_PINCFG                    0x8012
+#define WCD9335_TLMM_DMIC1_DATA_PINCFG                   0x8013
+#define WCD9335_TLMM_DMIC2_CLK_PINCFG                    0x8014
+#define WCD9335_TLMM_DMIC2_DATA_PINCFG                   0x8015
+#define WCD9335_TLMM_DMIC3_CLK_PINCFG                    0x8016
+#define WCD9335_TLMM_DMIC3_DATA_PINCFG                   0x8017
+#define WCD9335_TLMM_JTDI_PINCFG                         0x8018
+#define WCD9335_TLMM_JTDO_PINCFG                         0x8019
+#define WCD9335_TLMM_JTMS_PINCFG                         0x801a
+#define WCD9335_TLMM_JTCK_PINCFG                         0x801b
+#define WCD9335_TLMM_JTRST_PINCFG                        0x801c
+#define WCD9335_TEST_DEBUG_PIN_CTL_OE_0                  0x8031
+#define WCD9335_TEST_DEBUG_PIN_CTL_OE_1                  0x8032
+#define WCD9335_TEST_DEBUG_PIN_CTL_OE_2                  0x8033
+#define WCD9335_TEST_DEBUG_PIN_CTL_OE_3                  0x8034
+#define WCD9335_TEST_DEBUG_PIN_CTL_DATA_0                0x8035
+#define WCD9335_TEST_DEBUG_PIN_CTL_DATA_1                0x8036
+#define WCD9335_TEST_DEBUG_PIN_CTL_DATA_2                0x8037
+#define WCD9335_TEST_DEBUG_PIN_CTL_DATA_3                0x8038
+#define WCD9335_TEST_DEBUG_PAD_DRVCTL                    0x8039
+#define WCD9335_TEST_DEBUG_PIN_STATUS                    0x803a
+#define WCD9335_TEST_DEBUG_NPL_DLY_TEST_1                0x803b
+#define WCD9335_TEST_DEBUG_NPL_DLY_TEST_2                0x803c
+#define WCD9335_TEST_DEBUG_MEM_CTRL                      0x803d
+#define WCD9335_TEST_DEBUG_DEBUG_BUS_SEL                 0x8041
+#define WCD9335_TEST_DEBUG_DEBUG_JTAG                    0x8042
+#define WCD9335_TEST_DEBUG_DEBUG_EN_1                    0x8043
+#define WCD9335_TEST_DEBUG_DEBUG_EN_2                    0x8044
+#define WCD9335_TEST_DEBUG_DEBUG_EN_3                    0x8045
+#define WCD9335_MAX_REGISTER                             0x80FF
+
+/* SLIMBUS Slave Registers */
+#define TASHA_SLIM_PGD_PORT_INT_EN0                     (0x30)
+#define TASHA_SLIM_PGD_PORT_INT_STATUS_RX_0             (0x34)
+#define TASHA_SLIM_PGD_PORT_INT_STATUS_RX_1             (0x35)
+#define TASHA_SLIM_PGD_PORT_INT_STATUS_TX_0             (0x36)
+#define TASHA_SLIM_PGD_PORT_INT_STATUS_TX_1             (0x37)
+#define TASHA_SLIM_PGD_PORT_INT_CLR_RX_0                (0x38)
+#define TASHA_SLIM_PGD_PORT_INT_CLR_RX_1                (0x39)
+#define TASHA_SLIM_PGD_PORT_INT_CLR_TX_0                (0x3A)
+#define TASHA_SLIM_PGD_PORT_INT_CLR_TX_1                (0x3B)
+#define TASHA_SLIM_PGD_PORT_INT_RX_SOURCE0		(0x60)
+#define TASHA_SLIM_PGD_PORT_INT_TX_SOURCE0		(0x70)
+
+/* Macros for Packing Register Writes into a U32 */
+#define TASHA_PACKED_REG_SIZE sizeof(u32)
+
+#define TASHA_CODEC_PACK_ENTRY(reg, mask, val) ((val & 0xff)|\
+	((mask & 0xff) << 8)|((reg & 0xffff) << 16))
+#define TASHA_CODEC_UNPACK_ENTRY(packed, reg, mask, val) \
+	do { \
+		((reg) = ((packed >> 16) & (0xffff))); \
+		((mask) = ((packed >> 8) & (0xff))); \
+		((val) = ((packed) & (0xff))); \
+	} while (0)
+#endif
diff -Nruw linux-4.4.115-fbx/include/linux/mfd/wcd934x./registers.h linux-4.4.115-fbx/include/linux/mfd/wcd934x/registers.h
--- linux-4.4.115-fbx/include/linux/mfd/wcd934x./registers.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/mfd/wcd934x/registers.h	2019-01-22 16:16:28.295289801 +0100
@@ -0,0 +1,1848 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _WCD934X_REGISTERS_H
+#define _WCD934X_REGISTERS_H
+
+#define WCD934X_PAGE_SIZE 256
+#define WCD934X_NUM_PAGES 256
+
+extern const u8 * const wcd934x_reg[WCD934X_NUM_PAGES];
+
+enum {
+	WCD934X_PAGE_0 = 0,
+	WCD934X_PAGE_1,
+	WCD934X_PAGE_2,
+	WCD934X_PAGE_4 = 4,
+	WCD934X_PAGE_5,
+	WCD934X_PAGE_6,
+	WCD934X_PAGE_7,
+	WCD934X_PAGE_10 = 0xA,
+	WCD934X_PAGE_11,
+	WCD934X_PAGE_12,
+	WCD934X_PAGE_13,
+	WCD934X_PAGE_14,
+	WCD934X_PAGE_15,
+	WCD934X_PAGE_0x50,
+	WCD934X_PAGE_0X80,
+};
+
+enum {
+	WCD934X_WRITE = 0,
+	WCD934X_READ,
+	WCD934X_READ_WRITE,
+};
+
+/* Page-0 Registers */
+#define WCD934X_PAGE0_PAGE_REGISTER                        0x0000
+#define WCD934X_CODEC_RPM_CLK_BYPASS                       0x0001
+#define WCD934X_CODEC_RPM_CLK_GATE                         0x0002
+#define WCD934X_CODEC_RPM_CLK_MCLK_CFG                     0x0003
+#define WCD934X_CODEC_RPM_CLK_MCLK2_CFG                    0x0004
+#define WCD934X_CODEC_RPM_I2S_DSD_CLK_SEL                  0x0005
+#define WCD934X_CODEC_RPM_RST_CTL                          0x0009
+#define WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL               0x0011
+#define WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE0               0x0021
+#define WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE1               0x0022
+#define WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE2               0x0023
+#define WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE3               0x0024
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_CTL                   0x0025
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_TEST0                 0x0026
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_TEST1                 0x0027
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT0              0x0029
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT1              0x002a
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT2              0x002b
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT3              0x002c
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT4              0x002d
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT5              0x002e
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT6              0x002f
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT7              0x0030
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT8              0x0031
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT9              0x0032
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT10             0x0033
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT11             0x0034
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT12             0x0035
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT13             0x0036
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT14             0x0037
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT15             0x0038
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_STATUS                0x0039
+#define WCD934X_CHIP_TIER_CTRL_I2C_SLAVE_ID_NONNEGO        0x003a
+#define WCD934X_CHIP_TIER_CTRL_I2C_SLAVE_ID_1              0x003b
+#define WCD934X_CHIP_TIER_CTRL_I2C_SLAVE_ID_2              0x003c
+#define WCD934X_CHIP_TIER_CTRL_I2C_SLAVE_ID_3              0x003d
+#define WCD934X_CHIP_TIER_CTRL_ANA_WAIT_STATE_CTL          0x003e
+#define WCD934X_CHIP_TIER_CTRL_SLNQ_WAIT_STATE_CTL         0x003f
+#define WCD934X_CHIP_TIER_CTRL_I2C_ACTIVE                  0x0040
+#define WCD934X_CHIP_TIER_CTRL_ALT_FUNC_EN                 0x0041
+#define WCD934X_CHIP_TIER_CTRL_GPIO_CTL_OE                 0x0042
+#define WCD934X_CHIP_TIER_CTRL_GPIO_CTL_DATA               0x0043
+#define WCD934X_DATA_HUB_RX0_CFG                           0x0051
+#define WCD934X_DATA_HUB_RX1_CFG                           0x0052
+#define WCD934X_DATA_HUB_RX2_CFG                           0x0053
+#define WCD934X_DATA_HUB_RX3_CFG                           0x0054
+#define WCD934X_DATA_HUB_RX4_CFG                           0x0055
+#define WCD934X_DATA_HUB_RX5_CFG                           0x0056
+#define WCD934X_DATA_HUB_RX6_CFG                           0x0057
+#define WCD934X_DATA_HUB_RX7_CFG                           0x0058
+#define WCD934X_DATA_HUB_SB_TX0_INP_CFG                    0x0061
+#define WCD934X_DATA_HUB_SB_TX1_INP_CFG                    0x0062
+#define WCD934X_DATA_HUB_SB_TX2_INP_CFG                    0x0063
+#define WCD934X_DATA_HUB_SB_TX3_INP_CFG                    0x0064
+#define WCD934X_DATA_HUB_SB_TX4_INP_CFG                    0x0065
+#define WCD934X_DATA_HUB_SB_TX5_INP_CFG                    0x0066
+#define WCD934X_DATA_HUB_SB_TX6_INP_CFG                    0x0067
+#define WCD934X_DATA_HUB_SB_TX7_INP_CFG                    0x0068
+#define WCD934X_DATA_HUB_SB_TX8_INP_CFG                    0x0069
+#define WCD934X_DATA_HUB_SB_TX9_INP_CFG                    0x006a
+#define WCD934X_DATA_HUB_SB_TX10_INP_CFG                   0x006b
+#define WCD934X_DATA_HUB_SB_TX11_INP_CFG                   0x006c
+#define WCD934X_DATA_HUB_SB_TX13_INP_CFG                   0x006e
+#define WCD934X_DATA_HUB_SB_TX14_INP_CFG                   0x006f
+#define WCD934X_DATA_HUB_SB_TX15_INP_CFG                   0x0070
+#define WCD934X_DATA_HUB_I2S_TX0_CFG                       0x0071
+#define WCD934X_DATA_HUB_I2S_TX1_0_CFG                     0x0073
+#define WCD934X_DATA_HUB_I2S_TX1_1_CFG                     0x0074
+#define WCD934X_DATA_HUB_I2S_0_CTL                         0x0081
+#define WCD934X_DATA_HUB_I2S_1_CTL                         0x0082
+#define WCD934X_DATA_HUB_I2S_2_CTL                         0x0083
+#define WCD934X_DATA_HUB_I2S_3_CTL                         0x0084
+#define WCD934X_DATA_HUB_I2S_CLKSRC_CTL                    0x0085
+#define WCD934X_DATA_HUB_I2S_COMMON_CTL                    0x0086
+#define WCD934X_DATA_HUB_I2S_0_TDM_CTL                     0x0087
+#define WCD934X_DATA_HUB_I2S_STATUS                        0x0088
+#define WCD934X_DMA_RDMA_CTL_0                             0x0091
+#define WCD934X_DMA_CH_2_3_CFG_RDMA_0                      0x0092
+#define WCD934X_DMA_CH_0_1_CFG_RDMA_0                      0x0093
+#define WCD934X_DMA_RDMA_CTL_1                             0x0094
+#define WCD934X_DMA_CH_2_3_CFG_RDMA_1                      0x0095
+#define WCD934X_DMA_CH_0_1_CFG_RDMA_1                      0x0096
+#define WCD934X_DMA_RDMA_CTL_2                             0x0097
+#define WCD934X_DMA_CH_2_3_CFG_RDMA_2                      0x0098
+#define WCD934X_DMA_CH_0_1_CFG_RDMA_2                      0x0099
+#define WCD934X_DMA_RDMA_CTL_3                             0x009A
+#define WCD934X_DMA_CH_2_3_CFG_RDMA_3                      0x009B
+#define WCD934X_DMA_CH_0_1_CFG_RDMA_3                      0x009C
+#define WCD934X_DMA_RDMA_CTL_4                             0x009D
+#define WCD934X_DMA_CH_2_3_CFG_RDMA_4                      0x009E
+#define WCD934X_DMA_CH_0_1_CFG_RDMA_4                      0x009F
+#define WCD934X_DMA_RDMA4_PRT_CFG                          0x00b1
+#define WCD934X_DMA_RDMA_SBTX0_7_CFG                       0x00b9
+#define WCD934X_DMA_RDMA_SBTX8_11_CFG                      0x00ba
+#define WCD934X_DMA_WDMA_CTL_0                             0x00c1
+#define WCD934X_DMA_CH_4_5_CFG_WDMA_0                      0x00c2
+#define WCD934X_DMA_CH_2_3_CFG_WDMA_0                      0x00c3
+#define WCD934X_DMA_CH_0_1_CFG_WDMA_0                      0x00c4
+#define WCD934X_DMA_WDMA_CTL_1                             0x00C6
+#define WCD934X_DMA_CH_4_5_CFG_WDMA_1                      0x00C7
+#define WCD934X_DMA_CH_2_3_CFG_WDMA_1                      0x00C8
+#define WCD934X_DMA_CH_0_1_CFG_WDMA_1                      0x00C9
+#define WCD934X_DMA_WDMA_CTL_2                             0x00CB
+#define WCD934X_DMA_CH_4_5_CFG_WDMA_2                      0x00CC
+#define WCD934X_DMA_CH_2_3_CFG_WDMA_2                      0x00CD
+#define WCD934X_DMA_CH_0_1_CFG_WDMA_2                      0x00CE
+#define WCD934X_DMA_WDMA_CTL_3                             0x00D0
+#define WCD934X_DMA_CH_4_5_CFG_WDMA_3                      0x00D1
+#define WCD934X_DMA_CH_2_3_CFG_WDMA_3                      0x00D2
+#define WCD934X_DMA_CH_0_1_CFG_WDMA_3                      0x00D3
+#define WCD934X_DMA_WDMA_CTL_4                             0x00D5
+#define WCD934X_DMA_CH_4_5_CFG_WDMA_4                      0x00D6
+#define WCD934X_DMA_CH_2_3_CFG_WDMA_4                      0x00D7
+#define WCD934X_DMA_CH_0_1_CFG_WDMA_4                      0x00D8
+#define WCD934X_DMA_WDMA0_PRT_CFG                          0x00E1
+#define WCD934X_DMA_WDMA3_PRT_CFG                          0x00E2
+#define WCD934X_DMA_WDMA4_PRT0_3_CFG                       0x00E3
+#define WCD934X_DMA_WDMA4_PRT4_7_CFG                       0x00E4
+#define WCD934X_PAGE1_PAGE_REGISTER                        0x0100
+#define WCD934X_CPE_FLL_USER_CTL_0                         0x0101
+#define WCD934X_CPE_FLL_USER_CTL_1                         0x0102
+#define WCD934X_CPE_FLL_USER_CTL_2                         0x0103
+#define WCD934X_CPE_FLL_USER_CTL_3                         0x0104
+#define WCD934X_CPE_FLL_USER_CTL_4                         0x0105
+#define WCD934X_CPE_FLL_USER_CTL_5                         0x0106
+#define WCD934X_CPE_FLL_USER_CTL_6                         0x0107
+#define WCD934X_CPE_FLL_USER_CTL_7                         0x0108
+#define WCD934X_CPE_FLL_USER_CTL_8                         0x0109
+#define WCD934X_CPE_FLL_USER_CTL_9                         0x010a
+#define WCD934X_CPE_FLL_L_VAL_CTL_0                        0x010b
+#define WCD934X_CPE_FLL_L_VAL_CTL_1                        0x010c
+#define WCD934X_CPE_FLL_DSM_FRAC_CTL_0                     0x010d
+#define WCD934X_CPE_FLL_DSM_FRAC_CTL_1                     0x010e
+#define WCD934X_CPE_FLL_CONFIG_CTL_0                       0x010f
+#define WCD934X_CPE_FLL_CONFIG_CTL_1                       0x0110
+#define WCD934X_CPE_FLL_CONFIG_CTL_2                       0x0111
+#define WCD934X_CPE_FLL_CONFIG_CTL_3                       0x0112
+#define WCD934X_CPE_FLL_CONFIG_CTL_4                       0x0113
+#define WCD934X_CPE_FLL_TEST_CTL_0                         0x0114
+#define WCD934X_CPE_FLL_TEST_CTL_1                         0x0115
+#define WCD934X_CPE_FLL_TEST_CTL_2                         0x0116
+#define WCD934X_CPE_FLL_TEST_CTL_3                         0x0117
+#define WCD934X_CPE_FLL_TEST_CTL_4                         0x0118
+#define WCD934X_CPE_FLL_TEST_CTL_5                         0x0119
+#define WCD934X_CPE_FLL_TEST_CTL_6                         0x011a
+#define WCD934X_CPE_FLL_TEST_CTL_7                         0x011b
+#define WCD934X_CPE_FLL_FREQ_CTL_0                         0x011c
+#define WCD934X_CPE_FLL_FREQ_CTL_1                         0x011d
+#define WCD934X_CPE_FLL_FREQ_CTL_2                         0x011e
+#define WCD934X_CPE_FLL_FREQ_CTL_3                         0x011f
+#define WCD934X_CPE_FLL_SSC_CTL_0                          0x0120
+#define WCD934X_CPE_FLL_SSC_CTL_1                          0x0121
+#define WCD934X_CPE_FLL_SSC_CTL_2                          0x0122
+#define WCD934X_CPE_FLL_SSC_CTL_3                          0x0123
+#define WCD934X_CPE_FLL_FLL_MODE                           0x0124
+#define WCD934X_CPE_FLL_STATUS_0                           0x0125
+#define WCD934X_CPE_FLL_STATUS_1                           0x0126
+#define WCD934X_CPE_FLL_STATUS_2                           0x0127
+#define WCD934X_CPE_FLL_STATUS_3                           0x0128
+#define WCD934X_I2S_FLL_USER_CTL_0                         0x0141
+#define WCD934X_I2S_FLL_USER_CTL_1                         0x0142
+#define WCD934X_I2S_FLL_USER_CTL_2                         0x0143
+#define WCD934X_I2S_FLL_USER_CTL_3                         0x0144
+#define WCD934X_I2S_FLL_USER_CTL_4                         0x0145
+#define WCD934X_I2S_FLL_USER_CTL_5                         0x0146
+#define WCD934X_I2S_FLL_USER_CTL_6                         0x0147
+#define WCD934X_I2S_FLL_USER_CTL_7                         0x0148
+#define WCD934X_I2S_FLL_USER_CTL_8                         0x0149
+#define WCD934X_I2S_FLL_USER_CTL_9                         0x014a
+#define WCD934X_I2S_FLL_L_VAL_CTL_0                        0x014b
+#define WCD934X_I2S_FLL_L_VAL_CTL_1                        0x014c
+#define WCD934X_I2S_FLL_DSM_FRAC_CTL_0                     0x014d
+#define WCD934X_I2S_FLL_DSM_FRAC_CTL_1                     0x014e
+#define WCD934X_I2S_FLL_CONFIG_CTL_0                       0x014f
+#define WCD934X_I2S_FLL_CONFIG_CTL_1                       0x0150
+#define WCD934X_I2S_FLL_CONFIG_CTL_2                       0x0151
+#define WCD934X_I2S_FLL_CONFIG_CTL_3                       0x0152
+#define WCD934X_I2S_FLL_CONFIG_CTL_4                       0x0153
+#define WCD934X_I2S_FLL_TEST_CTL_0                         0x0154
+#define WCD934X_I2S_FLL_TEST_CTL_1                         0x0155
+#define WCD934X_I2S_FLL_TEST_CTL_2                         0x0156
+#define WCD934X_I2S_FLL_TEST_CTL_3                         0x0157
+#define WCD934X_I2S_FLL_TEST_CTL_4                         0x0158
+#define WCD934X_I2S_FLL_TEST_CTL_5                         0x0159
+#define WCD934X_I2S_FLL_TEST_CTL_6                         0x015a
+#define WCD934X_I2S_FLL_TEST_CTL_7                         0x015b
+#define WCD934X_I2S_FLL_FREQ_CTL_0                         0x015c
+#define WCD934X_I2S_FLL_FREQ_CTL_1                         0x015d
+#define WCD934X_I2S_FLL_FREQ_CTL_2                         0x015e
+#define WCD934X_I2S_FLL_FREQ_CTL_3                         0x015f
+#define WCD934X_I2S_FLL_SSC_CTL_0                          0x0160
+#define WCD934X_I2S_FLL_SSC_CTL_1                          0x0161
+#define WCD934X_I2S_FLL_SSC_CTL_2                          0x0162
+#define WCD934X_I2S_FLL_SSC_CTL_3                          0x0163
+#define WCD934X_I2S_FLL_FLL_MODE                           0x0164
+#define WCD934X_I2S_FLL_STATUS_0                           0x0165
+#define WCD934X_I2S_FLL_STATUS_1                           0x0166
+#define WCD934X_I2S_FLL_STATUS_2                           0x0167
+#define WCD934X_I2S_FLL_STATUS_3                           0x0168
+#define WCD934X_SB_FLL_USER_CTL_0                          0x0181
+#define WCD934X_SB_FLL_USER_CTL_1                          0x0182
+#define WCD934X_SB_FLL_USER_CTL_2                          0x0183
+#define WCD934X_SB_FLL_USER_CTL_3                          0x0184
+#define WCD934X_SB_FLL_USER_CTL_4                          0x0185
+#define WCD934X_SB_FLL_USER_CTL_5                          0x0186
+#define WCD934X_SB_FLL_USER_CTL_6                          0x0187
+#define WCD934X_SB_FLL_USER_CTL_7                          0x0188
+#define WCD934X_SB_FLL_USER_CTL_8                          0x0189
+#define WCD934X_SB_FLL_USER_CTL_9                          0x018a
+#define WCD934X_SB_FLL_L_VAL_CTL_0                         0x018b
+#define WCD934X_SB_FLL_L_VAL_CTL_1                         0x018c
+#define WCD934X_SB_FLL_DSM_FRAC_CTL_0                      0x018d
+#define WCD934X_SB_FLL_DSM_FRAC_CTL_1                      0x018e
+#define WCD934X_SB_FLL_CONFIG_CTL_0                        0x018f
+#define WCD934X_SB_FLL_CONFIG_CTL_1                        0x0190
+#define WCD934X_SB_FLL_CONFIG_CTL_2                        0x0191
+#define WCD934X_SB_FLL_CONFIG_CTL_3                        0x0192
+#define WCD934X_SB_FLL_CONFIG_CTL_4                        0x0193
+#define WCD934X_SB_FLL_TEST_CTL_0                          0x0194
+#define WCD934X_SB_FLL_TEST_CTL_1                          0x0195
+#define WCD934X_SB_FLL_TEST_CTL_2                          0x0196
+#define WCD934X_SB_FLL_TEST_CTL_3                          0x0197
+#define WCD934X_SB_FLL_TEST_CTL_4                          0x0198
+#define WCD934X_SB_FLL_TEST_CTL_5                          0x0199
+#define WCD934X_SB_FLL_TEST_CTL_6                          0x019a
+#define WCD934X_SB_FLL_TEST_CTL_7                          0x019b
+#define WCD934X_SB_FLL_FREQ_CTL_0                          0x019c
+#define WCD934X_SB_FLL_FREQ_CTL_1                          0x019d
+#define WCD934X_SB_FLL_FREQ_CTL_2                          0x019e
+#define WCD934X_SB_FLL_FREQ_CTL_3                          0x019f
+#define WCD934X_SB_FLL_SSC_CTL_0                           0x01a0
+#define WCD934X_SB_FLL_SSC_CTL_1                           0x01a1
+#define WCD934X_SB_FLL_SSC_CTL_2                           0x01a2
+#define WCD934X_SB_FLL_SSC_CTL_3                           0x01a3
+#define WCD934X_SB_FLL_FLL_MODE                            0x01a4
+#define WCD934X_SB_FLL_STATUS_0                            0x01a5
+#define WCD934X_SB_FLL_STATUS_1                            0x01a6
+#define WCD934X_SB_FLL_STATUS_2                            0x01a7
+#define WCD934X_SB_FLL_STATUS_3                            0x01a8
+#define WCD934X_PAGE2_PAGE_REGISTER                        0x0200
+#define WCD934X_CPE_SS_CPE_CTL                             0x0201
+#define WCD934X_CPE_SS_PWR_SYS_PSTATE_CTL_0                0x0202
+#define WCD934X_CPE_SS_PWR_SYS_PSTATE_CTL_1                0x0203
+#define WCD934X_CPE_SS_PWR_CPEFLL_CTL                      0x0204
+#define WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_0            0x0205
+#define WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_1            0x0206
+#define WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_OVERRIDE     0x0207
+#define WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_0           0x0208
+#define WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_1           0x0209
+#define WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_2           0x020a
+#define WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_3           0x020b
+#define WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_4           0x020c
+#define WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_5           0x020d
+#define WCD934X_CPE_SS_PWR_CPE_DRAM1_SHUTDOWN              0x020e
+#define WCD934X_CPE_SS_SOC_SW_COLLAPSE_CTL                 0x020f
+#define WCD934X_CPE_SS_SOC_SW_COLLAPSE_OVERRIDE_CTL        0x0210
+#define WCD934X_CPE_SS_SOC_SW_COLLAPSE_OVERRIDE_CTL1       0x0211
+#define WCD934X_CPE_SS_US_BUF_INT_PERIOD                   0x0212
+#define WCD934X_CPE_SS_CPARMAD_BUFRDY_INT_PERIOD           0x0213
+#define WCD934X_CPE_SS_SVA_CFG                             0x0214
+#define WCD934X_CPE_SS_US_CFG                              0x0215
+#define WCD934X_CPE_SS_MAD_CTL                             0x0216
+#define WCD934X_CPE_SS_CPAR_CTL                            0x0217
+#define WCD934X_CPE_SS_DMIC0_CTL                           0x0218
+#define WCD934X_CPE_SS_DMIC1_CTL                           0x0219
+#define WCD934X_CPE_SS_DMIC2_CTL                           0x021a
+#define WCD934X_CPE_SS_DMIC_CFG                            0x021b
+#define WCD934X_CPE_SS_CPAR_CFG                            0x021c
+#define WCD934X_CPE_SS_WDOG_CFG                            0x021d
+#define WCD934X_CPE_SS_BACKUP_INT                          0x021e
+#define WCD934X_CPE_SS_STATUS                              0x021f
+#define WCD934X_CPE_SS_CPE_OCD_CFG                         0x0220
+#define WCD934X_CPE_SS_SS_ERROR_INT_MASK_0A                0x0221
+#define WCD934X_CPE_SS_SS_ERROR_INT_MASK_0B                0x0222
+#define WCD934X_CPE_SS_SS_ERROR_INT_MASK_1A                0x0223
+#define WCD934X_CPE_SS_SS_ERROR_INT_MASK_1B                0x0224
+#define WCD934X_CPE_SS_SS_ERROR_INT_STATUS_0A              0x0225
+#define WCD934X_CPE_SS_SS_ERROR_INT_STATUS_0B              0x0226
+#define WCD934X_CPE_SS_SS_ERROR_INT_STATUS_1A              0x0227
+#define WCD934X_CPE_SS_SS_ERROR_INT_STATUS_1B              0x0228
+#define WCD934X_CPE_SS_SS_ERROR_INT_CLEAR_0A               0x0229
+#define WCD934X_CPE_SS_SS_ERROR_INT_CLEAR_0B               0x022a
+#define WCD934X_CPE_SS_SS_ERROR_INT_CLEAR_1A               0x022b
+#define WCD934X_CPE_SS_SS_ERROR_INT_CLEAR_1B               0x022c
+#define WCD934X_SOC_MAD_MAIN_CTL_1                         0x0281
+#define WCD934X_SOC_MAD_MAIN_CTL_2                         0x0282
+#define WCD934X_SOC_MAD_AUDIO_CTL_1                        0x0283
+#define WCD934X_SOC_MAD_AUDIO_CTL_2                        0x0284
+#define WCD934X_SOC_MAD_AUDIO_CTL_3                        0x0285
+#define WCD934X_SOC_MAD_AUDIO_CTL_4                        0x0286
+#define WCD934X_SOC_MAD_AUDIO_CTL_5                        0x0287
+#define WCD934X_SOC_MAD_AUDIO_CTL_6                        0x0288
+#define WCD934X_SOC_MAD_AUDIO_CTL_7                        0x0289
+#define WCD934X_SOC_MAD_AUDIO_CTL_8                        0x028a
+#define WCD934X_SOC_MAD_AUDIO_IIR_CTL_PTR                  0x028b
+#define WCD934X_SOC_MAD_AUDIO_IIR_CTL_VAL                  0x028c
+#define WCD934X_SOC_MAD_ULTR_CTL_1                         0x028d
+#define WCD934X_SOC_MAD_ULTR_CTL_2                         0x028e
+#define WCD934X_SOC_MAD_ULTR_CTL_3                         0x028f
+#define WCD934X_SOC_MAD_ULTR_CTL_4                         0x0290
+#define WCD934X_SOC_MAD_ULTR_CTL_5                         0x0291
+#define WCD934X_SOC_MAD_ULTR_CTL_6                         0x0292
+#define WCD934X_SOC_MAD_ULTR_CTL_7                         0x0293
+#define WCD934X_SOC_MAD_BEACON_CTL_1                       0x0294
+#define WCD934X_SOC_MAD_BEACON_CTL_2                       0x0295
+#define WCD934X_SOC_MAD_BEACON_CTL_3                       0x0296
+#define WCD934X_SOC_MAD_BEACON_CTL_4                       0x0297
+#define WCD934X_SOC_MAD_BEACON_CTL_5                       0x0298
+#define WCD934X_SOC_MAD_BEACON_CTL_6                       0x0299
+#define WCD934X_SOC_MAD_BEACON_CTL_7                       0x029a
+#define WCD934X_SOC_MAD_BEACON_CTL_8                       0x029b
+#define WCD934X_SOC_MAD_BEACON_IIR_CTL_PTR                 0x029c
+#define WCD934X_SOC_MAD_BEACON_IIR_CTL_VAL                 0x029d
+#define WCD934X_SOC_MAD_INP_SEL                            0x029e
+#define WCD934X_PAGE4_PAGE_REGISTER                        0x0400
+#define WCD934X_INTR_CFG                                   0x0401
+#define WCD934X_INTR_CLR_COMMIT                            0x0402
+#define WCD934X_INTR_PIN1_MASK0                            0x0409
+#define WCD934X_INTR_PIN1_MASK1                            0x040a
+#define WCD934X_INTR_PIN1_MASK2                            0x040b
+#define WCD934X_INTR_PIN1_MASK3                            0x040c
+#define WCD934X_INTR_PIN1_STATUS0                          0x0411
+#define WCD934X_INTR_PIN1_STATUS1                          0x0412
+#define WCD934X_INTR_PIN1_STATUS2                          0x0413
+#define WCD934X_INTR_PIN1_STATUS3                          0x0414
+#define WCD934X_INTR_PIN1_CLEAR0                           0x0419
+#define WCD934X_INTR_PIN1_CLEAR1                           0x041a
+#define WCD934X_INTR_PIN1_CLEAR2                           0x041b
+#define WCD934X_INTR_PIN1_CLEAR3                           0x041c
+#define WCD934X_INTR_PIN2_MASK3                            0x0424
+#define WCD934X_INTR_PIN2_STATUS3                          0x042c
+#define WCD934X_INTR_PIN2_CLEAR3                           0x0434
+#define WCD934X_INTR_CPESS_SUMRY_MASK2                     0x043b
+#define WCD934X_INTR_CPESS_SUMRY_MASK3                     0x043c
+#define WCD934X_INTR_CPESS_SUMRY_STATUS2                   0x0443
+#define WCD934X_INTR_CPESS_SUMRY_STATUS3                   0x0444
+#define WCD934X_INTR_CPESS_SUMRY_CLEAR2                    0x044b
+#define WCD934X_INTR_CPESS_SUMRY_CLEAR3                    0x044c
+#define WCD934X_INTR_LEVEL0                                0x0461
+#define WCD934X_INTR_LEVEL1                                0x0462
+#define WCD934X_INTR_LEVEL2                                0x0463
+#define WCD934X_INTR_LEVEL3                                0x0464
+#define WCD934X_INTR_BYPASS0                               0x0469
+#define WCD934X_INTR_BYPASS1                               0x046a
+#define WCD934X_INTR_BYPASS2                               0x046b
+#define WCD934X_INTR_BYPASS3                               0x046c
+#define WCD934X_INTR_SET0                                  0x0471
+#define WCD934X_INTR_SET1                                  0x0472
+#define WCD934X_INTR_SET2                                  0x0473
+#define WCD934X_INTR_SET3                                  0x0474
+#define WCD934X_INTR_CODEC_MISC_MASK                       0x04b1
+#define WCD934X_INTR_CODEC_MISC_STATUS                     0x04b2
+#define WCD934X_INTR_CODEC_MISC_CLEAR                      0x04b3
+#define WCD934X_PAGE5_PAGE_REGISTER                        0x0500
+#define WCD934X_SLNQ_DIG_DEVICE                            0x0501
+#define WCD934X_SLNQ_DIG_REVISION                          0x0502
+#define WCD934X_SLNQ_DIG_H_COMMAND                         0x0511
+#define WCD934X_SLNQ_DIG_NUMBER_OF_BYTE_MSB                0x0512
+#define WCD934X_SLNQ_DIG_NUMBER_OF_BYTE_LSB                0x0513
+#define WCD934X_SLNQ_DIG_MASTER_ADDRESS_MSB                0x0514
+#define WCD934X_SLNQ_DIG_MASTER_ADDRESS_LSB                0x0515
+#define WCD934X_SLNQ_DIG_SLAVE_ADDRESS_MSB                 0x0516
+#define WCD934X_SLNQ_DIG_SLAVE_ADDRESS_LSB                 0x0517
+#define WCD934X_SLNQ_DIG_TIMER0_INTERRUPT_MSB              0x0518
+#define WCD934X_SLNQ_DIG_TIMER0_INTERRUPT_LSB              0x0519
+#define WCD934X_SLNQ_DIG_TIMER1_INTERRUPT_MSB              0x051a
+#define WCD934X_SLNQ_DIG_TIMER1_INTERRUPT_LSB              0x051b
+#define WCD934X_SLNQ_DIG_TIMER2_INTERRUPT_MSB              0x051c
+#define WCD934X_SLNQ_DIG_TIMER2_INTERRUPT_LSB              0x051d
+#define WCD934X_SLNQ_DIG_COMM_CTL                          0x0520
+#define WCD934X_SLNQ_DIG_FRAME_CTRL                        0x0542
+#define WCD934X_SLNQ_DIG_PDM_2ND_DATA_CH1_2                0x055c
+#define WCD934X_SLNQ_DIG_PDM_2ND_DATA_CH3_4                0x055d
+#define WCD934X_SLNQ_DIG_PDM_2ND_DATA_CH5                  0x055e
+#define WCD934X_SLNQ_DIG_SW_EVENT_RD                       0x0561
+#define WCD934X_SLNQ_DIG_SW_EVENT_CTRL                     0x0562
+#define WCD934X_SLNQ_DIG_PDM_SELECT_1                      0x0563
+#define WCD934X_SLNQ_DIG_PDM_SELECT_2                      0x0564
+#define WCD934X_SLNQ_DIG_PDM_SELECT_3                      0x0565
+#define WCD934X_SLNQ_DIG_PDM_SAMPLING_FREQ                 0x0566
+#define WCD934X_SLNQ_DIG_PDM_DC_CONVERSION_CTL             0x0569
+#define WCD934X_SLNQ_DIG_PDM_DC_CONVERSION_SEL             0x056a
+#define WCD934X_SLNQ_DIG_PDM_DC_CONV_CHA_MSB               0x056b
+#define WCD934X_SLNQ_DIG_PDM_DC_CONV_CHA_LSB               0x056c
+#define WCD934X_SLNQ_DIG_PDM_DC_CONV_CHB_MSB               0x056d
+#define WCD934X_SLNQ_DIG_PDM_DC_CONV_CHB_LSB               0x056e
+#define WCD934X_SLNQ_DIG_RAM_CNTRL                         0x0571
+#define WCD934X_SLNQ_DIG_SRAM_BANK                         0x0572
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_0                       0x0573
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_1                       0x0574
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_2                       0x0575
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_3                       0x0576
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_4                       0x0577
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_5                       0x0578
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_6                       0x0579
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_7                       0x057a
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_8                       0x057b
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_9                       0x057c
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_A                       0x057d
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_B                       0x057e
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_C                       0x057f
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_D                       0x0580
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_E                       0x0581
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_F                       0x0582
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_10                      0x0583
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_11                      0x0584
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_12                      0x0585
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_13                      0x0586
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_14                      0x0587
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_15                      0x0588
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_16                      0x0589
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_17                      0x058a
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_18                      0x058b
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_19                      0x058c
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_1A                      0x058d
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_1B                      0x058e
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_1C                      0x058f
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_1D                      0x0590
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_1E                      0x0591
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_1F                      0x0592
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_20                      0x0593
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_21                      0x0594
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_22                      0x0595
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_23                      0x0596
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_24                      0x0597
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_25                      0x0598
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_26                      0x0599
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_27                      0x059a
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_28                      0x059b
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_29                      0x059c
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_2A                      0x059d
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_2B                      0x059e
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_2C                      0x059f
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_2D                      0x05a0
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_2E                      0x05a1
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_2F                      0x05a2
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_30                      0x05a3
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_31                      0x05a4
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_32                      0x05a5
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_33                      0x05a6
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_34                      0x05a7
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_35                      0x05a8
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_36                      0x05a9
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_37                      0x05aa
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_38                      0x05ab
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_39                      0x05ac
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_3A                      0x05ad
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_3B                      0x05ae
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_3C                      0x05af
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_3D                      0x05b0
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_3E                      0x05b1
+#define WCD934X_SLNQ_DIG_SRAM_BYTE_3F                      0x05b2
+#define WCD934X_SLNQ_DIG_TOP_CTRL1                         0x05b3
+#define WCD934X_SLNQ_DIG_TOP_CTRL2                         0x05b4
+#define WCD934X_SLNQ_DIG_PDM_CTRL                          0x05b5
+#define WCD934X_SLNQ_DIG_PDM_MUTE_CTRL                     0x05b6
+#define WCD934X_SLNQ_DIG_DEC_BYPASS_CTRL                   0x05b7
+#define WCD934X_SLNQ_DIG_DEC_BYPASS_STATUS                 0x05b8
+#define WCD934X_SLNQ_DIG_DEC_BYPASS_FS                     0x05b9
+#define WCD934X_SLNQ_DIG_DEC_BYPASS_IN_SEL                 0x05ba
+#define WCD934X_SLNQ_DIG_GPOUT_ENABLE                      0x05bb
+#define WCD934X_SLNQ_DIG_GPOUT_VAL                         0x05bc
+#define WCD934X_SLNQ_DIG_ANA_INTERRUPT_MASK                0x05be
+#define WCD934X_SLNQ_DIG_ANA_INTERRUPT_STATUS              0x05bf
+#define WCD934X_SLNQ_DIG_ANA_INTERRUPT_CLR                 0x05c0
+#define WCD934X_SLNQ_DIG_IP_TESTING                        0x05c1
+#define WCD934X_SLNQ_DIG_INTERRUPT_CNTRL                   0x05e3
+#define WCD934X_SLNQ_DIG_INTERRUPT_CNT                     0x05e9
+#define WCD934X_SLNQ_DIG_INTERRUPT_CNT_MSB                 0x05eb
+#define WCD934X_SLNQ_DIG_INTERRUPT_CNT_LSB                 0x05ec
+#define WCD934X_SLNQ_DIG_INTERRUPT_MASK0                   0x05f1
+#define WCD934X_SLNQ_DIG_INTERRUPT_MASK1                   0x05f2
+#define WCD934X_SLNQ_DIG_INTERRUPT_MASK2                   0x05f3
+#define WCD934X_SLNQ_DIG_INTERRUPT_MASK3                   0x05f4
+#define WCD934X_SLNQ_DIG_INTERRUPT_MASK4                   0x05f5
+#define WCD934X_SLNQ_DIG_INTERRUPT_STATUS0                 0x05f6
+#define WCD934X_SLNQ_DIG_INTERRUPT_STATUS1                 0x05f7
+#define WCD934X_SLNQ_DIG_INTERRUPT_STATUS2                 0x05f8
+#define WCD934X_SLNQ_DIG_INTERRUPT_STATUS3                 0x05f9
+#define WCD934X_SLNQ_DIG_INTERRUPT_STATUS4                 0x05fa
+#define WCD934X_SLNQ_DIG_INTERRUPT_CLR0                    0x05fb
+#define WCD934X_SLNQ_DIG_INTERRUPT_CLR1                    0x05fc
+#define WCD934X_SLNQ_DIG_INTERRUPT_CLR2                    0x05fd
+#define WCD934X_SLNQ_DIG_INTERRUPT_CLR3                    0x05fe
+#define WCD934X_SLNQ_DIG_INTERRUPT_CLR4                    0x05ff
+#define WCD934X_ANA_PAGE_REGISTER                          0x0600
+#define WCD934X_ANA_BIAS                                   0x0601
+#define WCD934X_ANA_RCO                                    0x0603
+#define WCD934X_ANA_PAGE6_SPARE2                           0x0604
+#define WCD934X_ANA_PAGE6_SPARE3                           0x0605
+#define WCD934X_ANA_BUCK_CTL                               0x0606
+#define WCD934X_ANA_BUCK_STATUS                            0x0607
+#define WCD934X_ANA_RX_SUPPLIES                            0x0608
+#define WCD934X_ANA_HPH                                    0x0609
+#define WCD934X_ANA_EAR                                    0x060a
+#define WCD934X_ANA_LO_1_2                                 0x060b
+#define WCD934X_ANA_MAD_SETUP                              0x060d
+#define WCD934X_ANA_AMIC1                                  0x060e
+#define WCD934X_ANA_AMIC2                                  0x060f
+#define WCD934X_ANA_AMIC3                                  0x0610
+#define WCD934X_ANA_AMIC4                                  0x0611
+#define WCD934X_ANA_MBHC_MECH                              0x0614
+#define WCD934X_ANA_MBHC_ELECT                             0x0615
+#define WCD934X_ANA_MBHC_ZDET                              0x0616
+#define WCD934X_ANA_MBHC_RESULT_1                          0x0617
+#define WCD934X_ANA_MBHC_RESULT_2                          0x0618
+#define WCD934X_ANA_MBHC_RESULT_3                          0x0619
+#define WCD934X_ANA_MBHC_BTN0                              0x061a
+#define WCD934X_ANA_MBHC_BTN1                              0x061b
+#define WCD934X_ANA_MBHC_BTN2                              0x061c
+#define WCD934X_ANA_MBHC_BTN3                              0x061d
+#define WCD934X_ANA_MBHC_BTN4                              0x061e
+#define WCD934X_ANA_MBHC_BTN5                              0x061f
+#define WCD934X_ANA_MBHC_BTN6                              0x0620
+#define WCD934X_ANA_MBHC_BTN7                              0x0621
+#define WCD934X_ANA_MICB1                                  0x0622
+#define WCD934X_ANA_MICB2                                  0x0623
+#define WCD934X_ANA_MICB2_RAMP                             0x0624
+#define WCD934X_ANA_MICB3                                  0x0625
+#define WCD934X_ANA_MICB4                                  0x0626
+#define WCD934X_ANA_VBADC                                  0x0627
+#define WCD934X_BIAS_CTL                                   0x0628
+#define WCD934X_BIAS_VBG_FINE_ADJ                          0x0629
+#define WCD934X_RCO_CTRL_1                                 0x062e
+#define WCD934X_RCO_CTRL_2                                 0x062f
+#define WCD934X_RCO_CAL                                    0x0630
+#define WCD934X_RCO_CAL_1                                  0x0631
+#define WCD934X_RCO_CAL_2                                  0x0632
+#define WCD934X_RCO_TEST_CTRL                              0x0633
+#define WCD934X_RCO_CAL_OUT_1                              0x0634
+#define WCD934X_RCO_CAL_OUT_2                              0x0635
+#define WCD934X_RCO_CAL_OUT_3                              0x0636
+#define WCD934X_RCO_CAL_OUT_4                              0x0637
+#define WCD934X_RCO_CAL_OUT_5                              0x0638
+#define WCD934X_SIDO_MODE_1                                0x063a
+#define WCD934X_SIDO_MODE_2                                0x063b
+#define WCD934X_SIDO_MODE_3                                0x063c
+#define WCD934X_SIDO_MODE_4                                0x063d
+#define WCD934X_SIDO_VCL_1                                 0x063e
+#define WCD934X_SIDO_VCL_2                                 0x063f
+#define WCD934X_SIDO_VCL_3                                 0x0640
+#define WCD934X_SIDO_CCL_1                                 0x0641
+#define WCD934X_SIDO_CCL_2                                 0x0642
+#define WCD934X_SIDO_CCL_3                                 0x0643
+#define WCD934X_SIDO_CCL_4                                 0x0644
+#define WCD934X_SIDO_CCL_5                                 0x0645
+#define WCD934X_SIDO_CCL_6                                 0x0646
+#define WCD934X_SIDO_CCL_7                                 0x0647
+#define WCD934X_SIDO_CCL_8                                 0x0648
+#define WCD934X_SIDO_CCL_9                                 0x0649
+#define WCD934X_SIDO_CCL_10                                0x064a
+#define WCD934X_SIDO_FILTER_1                              0x064b
+#define WCD934X_SIDO_FILTER_2                              0x064c
+#define WCD934X_SIDO_DRIVER_1                              0x064d
+#define WCD934X_SIDO_DRIVER_2                              0x064e
+#define WCD934X_SIDO_DRIVER_3                              0x064f
+#define WCD934X_SIDO_CAL_CODE_EXT_1                        0x0650
+#define WCD934X_SIDO_CAL_CODE_EXT_2                        0x0651
+#define WCD934X_SIDO_CAL_CODE_OUT_1                        0x0652
+#define WCD934X_SIDO_CAL_CODE_OUT_2                        0x0653
+#define WCD934X_SIDO_TEST_1                                0x0654
+#define WCD934X_SIDO_TEST_2                                0x0655
+#define WCD934X_MBHC_CTL_CLK                               0x0656
+#define WCD934X_MBHC_CTL_ANA                               0x0657
+#define WCD934X_MBHC_CTL_SPARE_1                           0x0658
+#define WCD934X_MBHC_CTL_SPARE_2                           0x0659
+#define WCD934X_MBHC_CTL_BCS                               0x065a
+#define WCD934X_MBHC_STATUS_SPARE_1                        0x065b
+#define WCD934X_MBHC_TEST_CTL                              0x065c
+#define WCD934X_VBADC_SUBBLOCK_EN                          0x065d
+#define WCD934X_VBADC_IBIAS_FE                             0x065e
+#define WCD934X_VBADC_BIAS_ADC                             0x065f
+#define WCD934X_VBADC_FE_CTRL                              0x0660
+#define WCD934X_VBADC_ADC_REF                              0x0661
+#define WCD934X_VBADC_ADC_IO                               0x0662
+#define WCD934X_VBADC_ADC_SAR                              0x0663
+#define WCD934X_VBADC_DEBUG                                0x0664
+#define WCD934X_LDOH_MODE                                  0x0667
+#define WCD934X_LDOH_BIAS                                  0x0668
+#define WCD934X_LDOH_STB_LOADS                             0x0669
+#define WCD934X_LDOH_SLOWRAMP                              0x066a
+#define WCD934X_MICB1_TEST_CTL_1                           0x066b
+#define WCD934X_MICB1_TEST_CTL_2                           0x066c
+#define WCD934X_MICB1_TEST_CTL_3                           0x066d
+#define WCD934X_MICB2_TEST_CTL_1                           0x066e
+#define WCD934X_MICB2_TEST_CTL_2                           0x066f
+#define WCD934X_MICB2_TEST_CTL_3                           0x0670
+#define WCD934X_MICB3_TEST_CTL_1                           0x0671
+#define WCD934X_MICB3_TEST_CTL_2                           0x0672
+#define WCD934X_MICB3_TEST_CTL_3                           0x0673
+#define WCD934X_MICB4_TEST_CTL_1                           0x0674
+#define WCD934X_MICB4_TEST_CTL_2                           0x0675
+#define WCD934X_MICB4_TEST_CTL_3                           0x0676
+#define WCD934X_TX_COM_ADC_VCM                             0x0677
+#define WCD934X_TX_COM_BIAS_ATEST                          0x0678
+#define WCD934X_TX_COM_ADC_INT1_IB                         0x0679
+#define WCD934X_TX_COM_ADC_INT2_IB                         0x067a
+#define WCD934X_TX_COM_TXFE_DIV_CTL                        0x067b
+#define WCD934X_TX_COM_TXFE_DIV_START                      0x067c
+#define WCD934X_TX_COM_TXFE_DIV_STOP_9P6M                  0x067d
+#define WCD934X_TX_COM_TXFE_DIV_STOP_12P288M               0x067e
+#define WCD934X_TX_1_2_TEST_EN                             0x067f
+#define WCD934X_TX_1_2_ADC_IB                              0x0680
+#define WCD934X_TX_1_2_ATEST_REFCTL                        0x0681
+#define WCD934X_TX_1_2_TEST_CTL                            0x0682
+#define WCD934X_TX_1_2_TEST_BLK_EN                         0x0683
+#define WCD934X_TX_1_2_TXFE_CLKDIV                         0x0684
+#define WCD934X_TX_1_2_SAR1_ERR                            0x0685
+#define WCD934X_TX_1_2_SAR2_ERR                            0x0686
+#define WCD934X_TX_3_4_TEST_EN                             0x0687
+#define WCD934X_TX_3_4_ADC_IB                              0x0688
+#define WCD934X_TX_3_4_ATEST_REFCTL                        0x0689
+#define WCD934X_TX_3_4_TEST_CTL                            0x068a
+#define WCD934X_TX_3_4_TEST_BLK_EN                         0x068b
+#define WCD934X_TX_3_4_TXFE_CLKDIV                         0x068c
+#define WCD934X_TX_3_4_SAR1_ERR                            0x068d
+#define WCD934X_TX_3_4_SAR2_ERR                            0x068e
+#define WCD934X_CLASSH_MODE_1                              0x0697
+#define WCD934X_CLASSH_MODE_2                              0x0698
+#define WCD934X_CLASSH_MODE_3                              0x0699
+#define WCD934X_CLASSH_CTRL_VCL_1                          0x069a
+#define WCD934X_CLASSH_CTRL_VCL_2                          0x069b
+#define WCD934X_CLASSH_CTRL_CCL_1                          0x069c
+#define WCD934X_CLASSH_CTRL_CCL_2                          0x069d
+#define WCD934X_CLASSH_CTRL_CCL_3                          0x069e
+#define WCD934X_CLASSH_CTRL_CCL_4                          0x069f
+#define WCD934X_CLASSH_CTRL_CCL_5                          0x06a0
+#define WCD934X_CLASSH_BUCK_TMUX_A_D                       0x06a1
+#define WCD934X_CLASSH_BUCK_SW_DRV_CNTL                    0x06a2
+#define WCD934X_CLASSH_SPARE                               0x06a3
+#define WCD934X_FLYBACK_EN                                 0x06a4
+#define WCD934X_FLYBACK_VNEG_CTRL_1                        0x06a5
+#define WCD934X_FLYBACK_VNEG_CTRL_2                        0x06a6
+#define WCD934X_FLYBACK_VNEG_CTRL_3                        0x06a7
+#define WCD934X_FLYBACK_VNEG_CTRL_4                        0x06a8
+#define WCD934X_FLYBACK_VNEG_CTRL_5                        0x06a9
+#define WCD934X_FLYBACK_VNEG_CTRL_6                        0x06aa
+#define WCD934X_FLYBACK_VNEG_CTRL_7                        0x06ab
+#define WCD934X_FLYBACK_VNEG_CTRL_8                        0x06ac
+#define WCD934X_FLYBACK_VNEG_CTRL_9                        0x06ad
+#define WCD934X_FLYBACK_VNEGDAC_CTRL_1                     0x06ae
+#define WCD934X_FLYBACK_VNEGDAC_CTRL_2                     0x06af
+#define WCD934X_FLYBACK_VNEGDAC_CTRL_3                     0x06b0
+#define WCD934X_FLYBACK_CTRL_1                             0x06b1
+#define WCD934X_FLYBACK_TEST_CTL                           0x06b2
+#define WCD934X_RX_AUX_SW_CTL                              0x06b3
+#define WCD934X_RX_PA_AUX_IN_CONN                          0x06b4
+#define WCD934X_RX_TIMER_DIV                               0x06b5
+#define WCD934X_RX_OCP_CTL                                 0x06b6
+#define WCD934X_RX_OCP_COUNT                               0x06b7
+#define WCD934X_RX_BIAS_EAR_DAC                            0x06b8
+#define WCD934X_RX_BIAS_EAR_AMP                            0x06b9
+#define WCD934X_RX_BIAS_HPH_LDO                            0x06ba
+#define WCD934X_RX_BIAS_HPH_PA                             0x06bb
+#define WCD934X_RX_BIAS_HPH_RDACBUFF_CNP2                  0x06bc
+#define WCD934X_RX_BIAS_HPH_RDAC_LDO                       0x06bd
+#define WCD934X_RX_BIAS_HPH_CNP1                           0x06be
+#define WCD934X_RX_BIAS_HPH_LOWPOWER                       0x06bf
+#define WCD934X_RX_BIAS_DIFFLO_PA                          0x06c0
+#define WCD934X_RX_BIAS_DIFFLO_REF                         0x06c1
+#define WCD934X_RX_BIAS_DIFFLO_LDO                         0x06c2
+#define WCD934X_RX_BIAS_SELO_DAC_PA                        0x06c3
+#define WCD934X_RX_BIAS_BUCK_RST                           0x06c4
+#define WCD934X_RX_BIAS_BUCK_VREF_ERRAMP                   0x06c5
+#define WCD934X_RX_BIAS_FLYB_ERRAMP                        0x06c6
+#define WCD934X_RX_BIAS_FLYB_BUFF                          0x06c7
+#define WCD934X_RX_BIAS_FLYB_MID_RST                       0x06c8
+#define WCD934X_HPH_L_STATUS                               0x06c9
+#define WCD934X_HPH_R_STATUS                               0x06ca
+#define WCD934X_HPH_CNP_EN                                 0x06cb
+#define WCD934X_HPH_CNP_WG_CTL                             0x06cc
+#define WCD934X_HPH_CNP_WG_TIME                            0x06cd
+#define WCD934X_HPH_OCP_CTL                                0x06ce
+#define WCD934X_HPH_AUTO_CHOP                              0x06cf
+#define WCD934X_HPH_CHOP_CTL                               0x06d0
+#define WCD934X_HPH_PA_CTL1                                0x06d1
+#define WCD934X_HPH_PA_CTL2                                0x06d2
+#define WCD934X_HPH_L_EN                                   0x06d3
+#define WCD934X_HPH_L_TEST                                 0x06d4
+#define WCD934X_HPH_L_ATEST                                0x06d5
+#define WCD934X_HPH_R_EN                                   0x06d6
+#define WCD934X_HPH_R_TEST                                 0x06d7
+#define WCD934X_HPH_R_ATEST                                0x06d8
+#define WCD934X_HPH_RDAC_CLK_CTL1                          0x06d9
+#define WCD934X_HPH_RDAC_CLK_CTL2                          0x06da
+#define WCD934X_HPH_RDAC_LDO_CTL                           0x06db
+#define WCD934X_HPH_RDAC_CHOP_CLK_LP_CTL                   0x06dc
+#define WCD934X_HPH_REFBUFF_UHQA_CTL                       0x06dd
+#define WCD934X_HPH_REFBUFF_LP_CTL                         0x06de
+#define WCD934X_HPH_L_DAC_CTL                              0x06df
+#define WCD934X_HPH_R_DAC_CTL                              0x06e0
+#define WCD934X_EAR_EN_REG                                 0x06e1
+#define WCD934X_EAR_CMBUFF                                 0x06e2
+#define WCD934X_EAR_ICTL                                   0x06e3
+#define WCD934X_EAR_EN_DBG_CTL                             0x06e4
+#define WCD934X_EAR_CNP                                    0x06e5
+#define WCD934X_EAR_DAC_CTL_ATEST                          0x06e6
+#define WCD934X_EAR_STATUS_REG                             0x06e7
+#define WCD934X_EAR_EAR_MISC                               0x06e8
+#define WCD934X_DIFF_LO_MISC                               0x06e9
+#define WCD934X_DIFF_LO_LO2_COMPANDER                      0x06ea
+#define WCD934X_DIFF_LO_LO1_COMPANDER                      0x06eb
+#define WCD934X_DIFF_LO_COMMON                             0x06ec
+#define WCD934X_DIFF_LO_BYPASS_EN                          0x06ed
+#define WCD934X_DIFF_LO_CNP                                0x06ee
+#define WCD934X_DIFF_LO_CORE_OUT_PROG                      0x06ef
+#define WCD934X_DIFF_LO_LDO_OUT_PROG                       0x06f0
+#define WCD934X_DIFF_LO_COM_SWCAP_REFBUF_FREQ              0x06f1
+#define WCD934X_DIFF_LO_COM_PA_FREQ                        0x06f2
+#define WCD934X_DIFF_LO_RESERVED_REG                       0x06f3
+#define WCD934X_DIFF_LO_LO1_STATUS_1                       0x06f4
+#define WCD934X_DIFF_LO_LO1_STATUS_2                       0x06f5
+#define WCD934X_ANA_NEW_PAGE_REGISTER                      0x0700
+#define WCD934X_HPH_NEW_ANA_HPH2                           0x0701
+#define WCD934X_HPH_NEW_ANA_HPH3                           0x0702
+#define WCD934X_SLNQ_ANA_EN                                0x0703
+#define WCD934X_SLNQ_ANA_STATUS                            0x0704
+#define WCD934X_SLNQ_ANA_LDO_CONFIG                        0x0705
+#define WCD934X_SLNQ_ANA_LDO_OCP_CONFIG                    0x0706
+#define WCD934X_SLNQ_ANA_TX_LDO_CONFIG                     0x0707
+#define WCD934X_SLNQ_ANA_TX_DRV_CONFIG                     0x0708
+#define WCD934X_SLNQ_ANA_RX_CONFIG_1                       0x0709
+#define WCD934X_SLNQ_ANA_RX_CONFIG_2                       0x070a
+#define WCD934X_SLNQ_ANA_PLL_ENABLES                       0x070b
+#define WCD934X_SLNQ_ANA_PLL_PRESET                        0x070c
+#define WCD934X_SLNQ_ANA_PLL_STATUS                        0x070d
+#define WCD934X_CLK_SYS_PLL_ENABLES                        0x070e
+#define WCD934X_CLK_SYS_PLL_PRESET                         0x070f
+#define WCD934X_CLK_SYS_PLL_STATUS                         0x0710
+#define WCD934X_CLK_SYS_MCLK_PRG                           0x0711
+#define WCD934X_CLK_SYS_MCLK2_PRG1                         0x0712
+#define WCD934X_CLK_SYS_MCLK2_PRG2                         0x0713
+#define WCD934X_CLK_SYS_XO_PRG                             0x0714
+#define WCD934X_CLK_SYS_XO_CAP_XTP                         0x0715
+#define WCD934X_CLK_SYS_XO_CAP_XTM                         0x0716
+#define WCD934X_BOOST_BST_EN_DLY                           0x0718
+#define WCD934X_BOOST_CTRL_ILIM                            0x0719
+#define WCD934X_BOOST_VOUT_SETTING                         0x071a
+#define WCD934X_SIDO_NEW_VOUT_A_STARTUP                    0x071b
+#define WCD934X_SIDO_NEW_VOUT_D_STARTUP                    0x071c
+#define WCD934X_SIDO_NEW_VOUT_D_FREQ1                      0x071d
+#define WCD934X_SIDO_NEW_VOUT_D_FREQ2                      0x071e
+#define WCD934X_MBHC_NEW_ELECT_REM_CLAMP_CTL               0x071f
+#define WCD934X_MBHC_NEW_CTL_1                             0x0720
+#define WCD934X_MBHC_NEW_CTL_2                             0x0721
+#define WCD934X_MBHC_NEW_PLUG_DETECT_CTL                   0x0722
+#define WCD934X_MBHC_NEW_ZDET_ANA_CTL                      0x0723
+#define WCD934X_MBHC_NEW_ZDET_RAMP_CTL                     0x0724
+#define WCD934X_MBHC_NEW_FSM_STATUS                        0x0725
+#define WCD934X_MBHC_NEW_ADC_RESULT                        0x0726
+#define WCD934X_TX_NEW_AMIC_4_5_SEL                        0x0727
+#define WCD934X_VBADC_NEW_ADC_MODE                         0x072f
+#define WCD934X_VBADC_NEW_ADC_DOUTMSB                      0x0730
+#define WCD934X_VBADC_NEW_ADC_DOUTLSB                      0x0731
+#define WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL                  0x0732
+#define WCD934X_HPH_NEW_INT_RDAC_HD2_CTL                   0x0733
+#define WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_L                 0x0733
+#define WCD934X_HPH_NEW_INT_RDAC_VREF_CTL                  0x0734
+#define WCD934X_HPH_NEW_INT_RDAC_OVERRIDE_CTL              0x0735
+#define WCD934X_HPH_NEW_INT_RDAC_MISC1                     0x0736
+#define WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_R                 0x0736
+#define WCD934X_HPH_NEW_INT_PA_MISC1                       0x0737
+#define WCD934X_HPH_NEW_INT_PA_MISC2                       0x0738
+#define WCD934X_HPH_NEW_INT_PA_RDAC_MISC                   0x0739
+#define WCD934X_HPH_NEW_INT_HPH_TIMER1                     0x073a
+#define WCD934X_HPH_NEW_INT_HPH_TIMER2                     0x073b
+#define WCD934X_HPH_NEW_INT_HPH_TIMER3                     0x073c
+#define WCD934X_HPH_NEW_INT_HPH_TIMER4                     0x073d
+#define WCD934X_HPH_NEW_INT_PA_RDAC_MISC2                  0x073e
+#define WCD934X_HPH_NEW_INT_PA_RDAC_MISC3                  0x073f
+#define WCD934X_RX_NEW_INT_HPH_RDAC_BIAS_LOHIFI            0x0745
+#define WCD934X_RX_NEW_INT_HPH_RDAC_BIAS_ULP               0x0746
+#define WCD934X_RX_NEW_INT_HPH_RDAC_LDO_LP                 0x0747
+#define WCD934X_SLNQ_INT_ANA_INT_LDO_TEST                  0x074b
+#define WCD934X_SLNQ_INT_ANA_INT_LDO_DEBUG_1               0x074c
+#define WCD934X_SLNQ_INT_ANA_INT_LDO_DEBUG_2               0x074d
+#define WCD934X_SLNQ_INT_ANA_INT_TX_LDO_TEST               0x074e
+#define WCD934X_SLNQ_INT_ANA_INT_TX_DRV_TEST               0x074f
+#define WCD934X_SLNQ_INT_ANA_INT_RX_TEST                   0x0750
+#define WCD934X_SLNQ_INT_ANA_INT_RX_TEST_STATUS            0x0751
+#define WCD934X_SLNQ_INT_ANA_INT_RX_DEBUG_1                0x0752
+#define WCD934X_SLNQ_INT_ANA_INT_RX_DEBUG_2                0x0753
+#define WCD934X_SLNQ_INT_ANA_INT_CLK_CTRL                  0x0754
+#define WCD934X_SLNQ_INT_ANA_INT_RESERVED_1                0x0755
+#define WCD934X_SLNQ_INT_ANA_INT_RESERVED_2                0x0756
+#define WCD934X_SLNQ_INT_ANA_INT_PLL_POST_DIV_REG0         0x0757
+#define WCD934X_SLNQ_INT_ANA_INT_PLL_POST_DIV_REG1         0x0758
+#define WCD934X_SLNQ_INT_ANA_INT_PLL_REF_DIV_REG0          0x0759
+#define WCD934X_SLNQ_INT_ANA_INT_PLL_REF_DIV_REG1          0x075a
+#define WCD934X_SLNQ_INT_ANA_INT_PLL_FILTER_REG0           0x075b
+#define WCD934X_SLNQ_INT_ANA_INT_PLL_FILTER_REG1           0x075c
+#define WCD934X_SLNQ_INT_ANA_INT_PLL_L_VAL                 0x075d
+#define WCD934X_SLNQ_INT_ANA_INT_PLL_M_VAL                 0x075e
+#define WCD934X_SLNQ_INT_ANA_INT_PLL_N_VAL                 0x075f
+#define WCD934X_SLNQ_INT_ANA_INT_PLL_TEST_REG0             0x0760
+#define WCD934X_SLNQ_INT_ANA_INT_PLL_PFD_CP_DSM_PROG       0x0761
+#define WCD934X_SLNQ_INT_ANA_INT_PLL_VCO_PROG              0x0762
+#define WCD934X_SLNQ_INT_ANA_INT_PLL_TEST_REG1             0x0763
+#define WCD934X_SLNQ_INT_ANA_INT_PLL_LDO_LOCK_CFG          0x0764
+#define WCD934X_SLNQ_INT_ANA_INT_PLL_DIG_LOCK_DET_CFG      0x0765
+#define WCD934X_CLK_SYS_INT_POST_DIV_REG0                  0x076c
+#define WCD934X_CLK_SYS_INT_POST_DIV_REG1                  0x076d
+#define WCD934X_CLK_SYS_INT_REF_DIV_REG0                   0x076e
+#define WCD934X_CLK_SYS_INT_REF_DIV_REG1                   0x076f
+#define WCD934X_CLK_SYS_INT_FILTER_REG0                    0x0770
+#define WCD934X_CLK_SYS_INT_FILTER_REG1                    0x0771
+#define WCD934X_CLK_SYS_INT_PLL_L_VAL                      0x0772
+#define WCD934X_CLK_SYS_INT_PLL_M_VAL                      0x0773
+#define WCD934X_CLK_SYS_INT_PLL_N_VAL                      0x0774
+#define WCD934X_CLK_SYS_INT_TEST_REG0                      0x0775
+#define WCD934X_CLK_SYS_INT_PFD_CP_DSM_PROG                0x0776
+#define WCD934X_CLK_SYS_INT_VCO_PROG                       0x0777
+#define WCD934X_CLK_SYS_INT_TEST_REG1                      0x0778
+#define WCD934X_CLK_SYS_INT_LDO_LOCK_CFG                   0x0779
+#define WCD934X_CLK_SYS_INT_DIG_LOCK_DET_CFG               0x077a
+#define WCD934X_CLK_SYS_INT_CLK_TEST1                      0x077b
+#define WCD934X_CLK_SYS_INT_CLK_TEST2                      0x077c
+#define WCD934X_CLK_SYS_INT_CLK_TEST3                      0x077d
+#define WCD934X_CLK_SYS_INT_XO_TEST1                       0x077e
+#define WCD934X_CLK_SYS_INT_XO_TEST2                       0x077f
+#define WCD934X_BOOST_INT_VCOMP_HYST                       0x0787
+#define WCD934X_BOOST_INT_VLOOP_FILTER                     0x0788
+#define WCD934X_BOOST_INT_CTRL_IDELTA                      0x0789
+#define WCD934X_BOOST_INT_CTRL_ILIM_STARTUP                0x078a
+#define WCD934X_BOOST_INT_CTRL_MIN_ONTIME                  0x078b
+#define WCD934X_BOOST_INT_CTRL_MAX_ONTIME                  0x078c
+#define WCD934X_BOOST_INT_CTRL_TIMING                      0x078d
+#define WCD934X_BOOST_INT_TMUX_A_D                         0x078e
+#define WCD934X_BOOST_INT_SW_DRV_CNTL                      0x078f
+#define WCD934X_BOOST_INT_SPARE1                           0x0790
+#define WCD934X_BOOST_INT_SPARE2                           0x0791
+#define WCD934X_SIDO_NEW_INT_RAMP_STATUS                   0x0796
+#define WCD934X_SIDO_NEW_INT_SPARE_1                       0x0797
+#define WCD934X_SIDO_NEW_INT_DEBUG_VOUT_SETTING_A          0x0798
+#define WCD934X_SIDO_NEW_INT_DEBUG_VOUT_SETTING_D          0x0799
+#define WCD934X_SIDO_NEW_INT_RAMP_INC_WAIT                 0x079a
+#define WCD934X_SIDO_NEW_INT_DYNAMIC_IPEAK_CTL             0x079b
+#define WCD934X_SIDO_NEW_INT_RAMP_IBLEED_CTL               0x079c
+#define WCD934X_SIDO_NEW_INT_DEBUG_CPROVR_TEST             0x079d
+#define WCD934X_SIDO_NEW_INT_RAMP_CTL_A                    0x079e
+#define WCD934X_SIDO_NEW_INT_RAMP_CTL_D                    0x079f
+#define WCD934X_SIDO_NEW_INT_RAMP_TIMEOUT_PERIOD           0x07a0
+#define WCD934X_SIDO_NEW_INT_DYNAMIC_IPEAK_SETTING1        0x07a1
+#define WCD934X_SIDO_NEW_INT_DYNAMIC_IPEAK_SETTING2        0x07a2
+#define WCD934X_SIDO_NEW_INT_DYNAMIC_IPEAK_SETTING3        0x07a3
+#define WCD934X_SIDO_NEW_INT_HIGH_ACCU_MODE_SEL1           0x07a4
+#define WCD934X_SIDO_NEW_INT_HIGH_ACCU_MODE_SEL2           0x07a5
+#define WCD934X_MBHC_NEW_INT_SLNQ_HPF                      0x07af
+#define WCD934X_MBHC_NEW_INT_SLNQ_REF                      0x07b0
+#define WCD934X_MBHC_NEW_INT_SLNQ_COMP                     0x07b1
+#define WCD934X_MBHC_NEW_INT_SPARE_2                       0x07b2
+#define WCD934X_PAGE10_PAGE_REGISTER                       0x0a00
+#define WCD934X_CDC_ANC0_CLK_RESET_CTL                     0x0a01
+#define WCD934X_CDC_ANC0_MODE_1_CTL                        0x0a02
+#define WCD934X_CDC_ANC0_MODE_2_CTL                        0x0a03
+#define WCD934X_CDC_ANC0_FF_SHIFT                          0x0a04
+#define WCD934X_CDC_ANC0_FB_SHIFT                          0x0a05
+#define WCD934X_CDC_ANC0_LPF_FF_A_CTL                      0x0a06
+#define WCD934X_CDC_ANC0_LPF_FF_B_CTL                      0x0a07
+#define WCD934X_CDC_ANC0_LPF_FB_CTL                        0x0a08
+#define WCD934X_CDC_ANC0_SMLPF_CTL                         0x0a09
+#define WCD934X_CDC_ANC0_DCFLT_SHIFT_CTL                   0x0a0a
+#define WCD934X_CDC_ANC0_IIR_ADAPT_CTL                     0x0a0b
+#define WCD934X_CDC_ANC0_IIR_COEFF_1_CTL                   0x0a0c
+#define WCD934X_CDC_ANC0_IIR_COEFF_2_CTL                   0x0a0d
+#define WCD934X_CDC_ANC0_FF_A_GAIN_CTL                     0x0a0e
+#define WCD934X_CDC_ANC0_FF_B_GAIN_CTL                     0x0a0f
+#define WCD934X_CDC_ANC0_FB_GAIN_CTL                       0x0a10
+#define WCD934X_CDC_ANC0_RC_COMMON_CTL                     0x0a11
+#define WCD934X_CDC_ANC0_FIFO_COMMON_CTL                   0x0a13
+#define WCD934X_CDC_ANC0_RC0_STATUS_FMIN_CNTR              0x0a14
+#define WCD934X_CDC_ANC0_RC1_STATUS_FMIN_CNTR              0x0a15
+#define WCD934X_CDC_ANC0_RC0_STATUS_FMAX_CNTR              0x0a16
+#define WCD934X_CDC_ANC0_RC1_STATUS_FMAX_CNTR              0x0a17
+#define WCD934X_CDC_ANC0_STATUS_FIFO                       0x0a18
+#define WCD934X_CDC_ANC1_CLK_RESET_CTL                     0x0a19
+#define WCD934X_CDC_ANC1_MODE_1_CTL                        0x0a1a
+#define WCD934X_CDC_ANC1_MODE_2_CTL                        0x0a1b
+#define WCD934X_CDC_ANC1_FF_SHIFT                          0x0a1c
+#define WCD934X_CDC_ANC1_FB_SHIFT                          0x0a1d
+#define WCD934X_CDC_ANC1_LPF_FF_A_CTL                      0x0a1e
+#define WCD934X_CDC_ANC1_LPF_FF_B_CTL                      0x0a1f
+#define WCD934X_CDC_ANC1_LPF_FB_CTL                        0x0a20
+#define WCD934X_CDC_ANC1_SMLPF_CTL                         0x0a21
+#define WCD934X_CDC_ANC1_DCFLT_SHIFT_CTL                   0x0a22
+#define WCD934X_CDC_ANC1_IIR_ADAPT_CTL                     0x0a23
+#define WCD934X_CDC_ANC1_IIR_COEFF_1_CTL                   0x0a24
+#define WCD934X_CDC_ANC1_IIR_COEFF_2_CTL                   0x0a25
+#define WCD934X_CDC_ANC1_FF_A_GAIN_CTL                     0x0a26
+#define WCD934X_CDC_ANC1_FF_B_GAIN_CTL                     0x0a27
+#define WCD934X_CDC_ANC1_FB_GAIN_CTL                       0x0a28
+#define WCD934X_CDC_ANC1_RC_COMMON_CTL                     0x0a29
+#define WCD934X_CDC_ANC1_FIFO_COMMON_CTL                   0x0a2b
+#define WCD934X_CDC_ANC1_RC0_STATUS_FMIN_CNTR              0x0a2c
+#define WCD934X_CDC_ANC1_RC1_STATUS_FMIN_CNTR              0x0a2d
+#define WCD934X_CDC_ANC1_RC0_STATUS_FMAX_CNTR              0x0a2e
+#define WCD934X_CDC_ANC1_RC1_STATUS_FMAX_CNTR              0x0a2f
+#define WCD934X_CDC_ANC1_STATUS_FIFO                       0x0a30
+#define WCD934X_CDC_TX0_TX_PATH_CTL                        0x0a31
+#define WCD934X_CDC_TX0_TX_PATH_CFG0                       0x0a32
+#define WCD934X_CDC_TX0_TX_PATH_CFG1                       0x0a33
+#define WCD934X_CDC_TX0_TX_VOL_CTL                         0x0a34
+#define WCD934X_CDC_TX0_TX_PATH_192_CTL                    0x0a35
+#define WCD934X_CDC_TX0_TX_PATH_192_CFG                    0x0a36
+#define WCD934X_CDC_TX0_TX_PATH_SEC0                       0x0a37
+#define WCD934X_CDC_TX0_TX_PATH_SEC1                       0x0a38
+#define WCD934X_CDC_TX0_TX_PATH_SEC2                       0x0a39
+#define WCD934X_CDC_TX0_TX_PATH_SEC3                       0x0a3a
+#define WCD934X_CDC_TX0_TX_PATH_SEC4                       0x0a3b
+#define WCD934X_CDC_TX0_TX_PATH_SEC5                       0x0a3c
+#define WCD934X_CDC_TX0_TX_PATH_SEC6                       0x0a3d
+#define WCD934X_CDC_TX0_TX_PATH_SEC7                       0x0a3e
+#define WCD934X_CDC_TX1_TX_PATH_CTL                        0x0a41
+#define WCD934X_CDC_TX1_TX_PATH_CFG0                       0x0a42
+#define WCD934X_CDC_TX1_TX_PATH_CFG1                       0x0a43
+#define WCD934X_CDC_TX1_TX_VOL_CTL                         0x0a44
+#define WCD934X_CDC_TX1_TX_PATH_192_CTL                    0x0a45
+#define WCD934X_CDC_TX1_TX_PATH_192_CFG                    0x0a46
+#define WCD934X_CDC_TX1_TX_PATH_SEC0                       0x0a47
+#define WCD934X_CDC_TX1_TX_PATH_SEC1                       0x0a48
+#define WCD934X_CDC_TX1_TX_PATH_SEC2                       0x0a49
+#define WCD934X_CDC_TX1_TX_PATH_SEC3                       0x0a4a
+#define WCD934X_CDC_TX1_TX_PATH_SEC4                       0x0a4b
+#define WCD934X_CDC_TX1_TX_PATH_SEC5                       0x0a4c
+#define WCD934X_CDC_TX1_TX_PATH_SEC6                       0x0a4d
+#define WCD934X_CDC_TX2_TX_PATH_CTL                        0x0a51
+#define WCD934X_CDC_TX2_TX_PATH_CFG0                       0x0a52
+#define WCD934X_CDC_TX2_TX_PATH_CFG1                       0x0a53
+#define WCD934X_CDC_TX2_TX_VOL_CTL                         0x0a54
+#define WCD934X_CDC_TX2_TX_PATH_192_CTL                    0x0a55
+#define WCD934X_CDC_TX2_TX_PATH_192_CFG                    0x0a56
+#define WCD934X_CDC_TX2_TX_PATH_SEC0                       0x0a57
+#define WCD934X_CDC_TX2_TX_PATH_SEC1                       0x0a58
+#define WCD934X_CDC_TX2_TX_PATH_SEC2                       0x0a59
+#define WCD934X_CDC_TX2_TX_PATH_SEC3                       0x0a5a
+#define WCD934X_CDC_TX2_TX_PATH_SEC4                       0x0a5b
+#define WCD934X_CDC_TX2_TX_PATH_SEC5                       0x0a5c
+#define WCD934X_CDC_TX2_TX_PATH_SEC6                       0x0a5d
+#define WCD934X_CDC_TX3_TX_PATH_CTL                        0x0a61
+#define WCD934X_CDC_TX3_TX_PATH_CFG0                       0x0a62
+#define WCD934X_CDC_TX3_TX_PATH_CFG1                       0x0a63
+#define WCD934X_CDC_TX3_TX_VOL_CTL                         0x0a64
+#define WCD934X_CDC_TX3_TX_PATH_192_CTL                    0x0a65
+#define WCD934X_CDC_TX3_TX_PATH_192_CFG                    0x0a66
+#define WCD934X_CDC_TX3_TX_PATH_SEC0                       0x0a67
+#define WCD934X_CDC_TX3_TX_PATH_SEC1                       0x0a68
+#define WCD934X_CDC_TX3_TX_PATH_SEC2                       0x0a69
+#define WCD934X_CDC_TX3_TX_PATH_SEC3                       0x0a6a
+#define WCD934X_CDC_TX3_TX_PATH_SEC4                       0x0a6b
+#define WCD934X_CDC_TX3_TX_PATH_SEC5                       0x0a6c
+#define WCD934X_CDC_TX3_TX_PATH_SEC6                       0x0a6d
+#define WCD934X_CDC_TX4_TX_PATH_CTL                        0x0a71
+#define WCD934X_CDC_TX4_TX_PATH_CFG0                       0x0a72
+#define WCD934X_CDC_TX4_TX_PATH_CFG1                       0x0a73
+#define WCD934X_CDC_TX4_TX_VOL_CTL                         0x0a74
+#define WCD934X_CDC_TX4_TX_PATH_192_CTL                    0x0a75
+#define WCD934X_CDC_TX4_TX_PATH_192_CFG                    0x0a76
+#define WCD934X_CDC_TX4_TX_PATH_SEC0                       0x0a77
+#define WCD934X_CDC_TX4_TX_PATH_SEC1                       0x0a78
+#define WCD934X_CDC_TX4_TX_PATH_SEC2                       0x0a79
+#define WCD934X_CDC_TX4_TX_PATH_SEC3                       0x0a7a
+#define WCD934X_CDC_TX4_TX_PATH_SEC4                       0x0a7b
+#define WCD934X_CDC_TX4_TX_PATH_SEC5                       0x0a7c
+#define WCD934X_CDC_TX4_TX_PATH_SEC6                       0x0a7d
+#define WCD934X_CDC_TX5_TX_PATH_CTL                        0x0a81
+#define WCD934X_CDC_TX5_TX_PATH_CFG0                       0x0a82
+#define WCD934X_CDC_TX5_TX_PATH_CFG1                       0x0a83
+#define WCD934X_CDC_TX5_TX_VOL_CTL                         0x0a84
+#define WCD934X_CDC_TX5_TX_PATH_192_CTL                    0x0a85
+#define WCD934X_CDC_TX5_TX_PATH_192_CFG                    0x0a86
+#define WCD934X_CDC_TX5_TX_PATH_SEC0                       0x0a87
+#define WCD934X_CDC_TX5_TX_PATH_SEC1                       0x0a88
+#define WCD934X_CDC_TX5_TX_PATH_SEC2                       0x0a89
+#define WCD934X_CDC_TX5_TX_PATH_SEC3                       0x0a8a
+#define WCD934X_CDC_TX5_TX_PATH_SEC4                       0x0a8b
+#define WCD934X_CDC_TX5_TX_PATH_SEC5                       0x0a8c
+#define WCD934X_CDC_TX5_TX_PATH_SEC6                       0x0a8d
+#define WCD934X_CDC_TX6_TX_PATH_CTL                        0x0a91
+#define WCD934X_CDC_TX6_TX_PATH_CFG0                       0x0a92
+#define WCD934X_CDC_TX6_TX_PATH_CFG1                       0x0a93
+#define WCD934X_CDC_TX6_TX_VOL_CTL                         0x0a94
+#define WCD934X_CDC_TX6_TX_PATH_192_CTL                    0x0a95
+#define WCD934X_CDC_TX6_TX_PATH_192_CFG                    0x0a96
+#define WCD934X_CDC_TX6_TX_PATH_SEC0                       0x0a97
+#define WCD934X_CDC_TX6_TX_PATH_SEC1                       0x0a98
+#define WCD934X_CDC_TX6_TX_PATH_SEC2                       0x0a99
+#define WCD934X_CDC_TX6_TX_PATH_SEC3                       0x0a9a
+#define WCD934X_CDC_TX6_TX_PATH_SEC4                       0x0a9b
+#define WCD934X_CDC_TX6_TX_PATH_SEC5                       0x0a9c
+#define WCD934X_CDC_TX6_TX_PATH_SEC6                       0x0a9d
+#define WCD934X_CDC_TX7_TX_PATH_CTL                        0x0aa1
+#define WCD934X_CDC_TX7_TX_PATH_CFG0                       0x0aa2
+#define WCD934X_CDC_TX7_TX_PATH_CFG1                       0x0aa3
+#define WCD934X_CDC_TX7_TX_VOL_CTL                         0x0aa4
+#define WCD934X_CDC_TX7_TX_PATH_192_CTL                    0x0aa5
+#define WCD934X_CDC_TX7_TX_PATH_192_CFG                    0x0aa6
+#define WCD934X_CDC_TX7_TX_PATH_SEC0                       0x0aa7
+#define WCD934X_CDC_TX7_TX_PATH_SEC1                       0x0aa8
+#define WCD934X_CDC_TX7_TX_PATH_SEC2                       0x0aa9
+#define WCD934X_CDC_TX7_TX_PATH_SEC3                       0x0aaa
+#define WCD934X_CDC_TX7_TX_PATH_SEC4                       0x0aab
+#define WCD934X_CDC_TX7_TX_PATH_SEC5                       0x0aac
+#define WCD934X_CDC_TX7_TX_PATH_SEC6                       0x0aad
+#define WCD934X_CDC_TX8_TX_PATH_CTL                        0x0ab1
+#define WCD934X_CDC_TX8_TX_PATH_CFG0                       0x0ab2
+#define WCD934X_CDC_TX8_TX_PATH_CFG1                       0x0ab3
+#define WCD934X_CDC_TX8_TX_VOL_CTL                         0x0ab4
+#define WCD934X_CDC_TX8_TX_PATH_192_CTL                    0x0ab5
+#define WCD934X_CDC_TX8_TX_PATH_192_CFG                    0x0ab6
+#define WCD934X_CDC_TX8_TX_PATH_SEC0                       0x0ab7
+#define WCD934X_CDC_TX8_TX_PATH_SEC1                       0x0ab8
+#define WCD934X_CDC_TX8_TX_PATH_SEC2                       0x0ab9
+#define WCD934X_CDC_TX8_TX_PATH_SEC3                       0x0aba
+#define WCD934X_CDC_TX8_TX_PATH_SEC4                       0x0abb
+#define WCD934X_CDC_TX8_TX_PATH_SEC5                       0x0abc
+#define WCD934X_CDC_TX8_TX_PATH_SEC6                       0x0abd
+#define WCD934X_CDC_TX9_SPKR_PROT_PATH_CTL                 0x0ac2
+#define WCD934X_CDC_TX9_SPKR_PROT_PATH_CFG0                0x0ac3
+#define WCD934X_CDC_TX10_SPKR_PROT_PATH_CTL                0x0ac6
+#define WCD934X_CDC_TX10_SPKR_PROT_PATH_CFG0               0x0ac7
+#define WCD934X_CDC_TX11_SPKR_PROT_PATH_CTL                0x0aca
+#define WCD934X_CDC_TX11_SPKR_PROT_PATH_CFG0               0x0acb
+#define WCD934X_CDC_TX12_SPKR_PROT_PATH_CTL                0x0ace
+#define WCD934X_CDC_TX12_SPKR_PROT_PATH_CFG0               0x0acf
+#define WCD934X_PAGE11_PAGE_REGISTER                       0x0b00
+#define WCD934X_CDC_COMPANDER1_CTL0                        0x0b01
+#define WCD934X_CDC_COMPANDER1_CTL1                        0x0b02
+#define WCD934X_CDC_COMPANDER1_CTL2                        0x0b03
+#define WCD934X_CDC_COMPANDER1_CTL3                        0x0b04
+#define WCD934X_CDC_COMPANDER1_CTL4                        0x0b05
+#define WCD934X_CDC_COMPANDER1_CTL5                        0x0b06
+#define WCD934X_CDC_COMPANDER1_CTL6                        0x0b07
+#define WCD934X_CDC_COMPANDER1_CTL7                        0x0b08
+#define WCD934X_CDC_COMPANDER2_CTL0                        0x0b09
+#define WCD934X_CDC_COMPANDER2_CTL1                        0x0b0a
+#define WCD934X_CDC_COMPANDER2_CTL2                        0x0b0b
+#define WCD934X_CDC_COMPANDER2_CTL3                        0x0b0c
+#define WCD934X_CDC_COMPANDER2_CTL4                        0x0b0d
+#define WCD934X_CDC_COMPANDER2_CTL5                        0x0b0e
+#define WCD934X_CDC_COMPANDER2_CTL6                        0x0b0f
+#define WCD934X_CDC_COMPANDER2_CTL7                        0x0b10
+#define WCD934X_CDC_COMPANDER3_CTL0                        0x0b11
+#define WCD934X_CDC_COMPANDER3_CTL1                        0x0b12
+#define WCD934X_CDC_COMPANDER3_CTL2                        0x0b13
+#define WCD934X_CDC_COMPANDER3_CTL3                        0x0b14
+#define WCD934X_CDC_COMPANDER3_CTL4                        0x0b15
+#define WCD934X_CDC_COMPANDER3_CTL5                        0x0b16
+#define WCD934X_CDC_COMPANDER3_CTL6                        0x0b17
+#define WCD934X_CDC_COMPANDER3_CTL7                        0x0b18
+#define WCD934X_CDC_COMPANDER4_CTL0                        0x0b19
+#define WCD934X_CDC_COMPANDER4_CTL1                        0x0b1a
+#define WCD934X_CDC_COMPANDER4_CTL2                        0x0b1b
+#define WCD934X_CDC_COMPANDER4_CTL3                        0x0b1c
+#define WCD934X_CDC_COMPANDER4_CTL4                        0x0b1d
+#define WCD934X_CDC_COMPANDER4_CTL5                        0x0b1e
+#define WCD934X_CDC_COMPANDER4_CTL6                        0x0b1f
+#define WCD934X_CDC_COMPANDER4_CTL7                        0x0b20
+#define WCD934X_CDC_COMPANDER7_CTL0                        0x0b31
+#define WCD934X_CDC_COMPANDER7_CTL1                        0x0b32
+#define WCD934X_CDC_COMPANDER7_CTL2                        0x0b33
+#define WCD934X_CDC_COMPANDER7_CTL3                        0x0b34
+#define WCD934X_CDC_COMPANDER7_CTL4                        0x0b35
+#define WCD934X_CDC_COMPANDER7_CTL5                        0x0b36
+#define WCD934X_CDC_COMPANDER7_CTL6                        0x0b37
+#define WCD934X_CDC_COMPANDER7_CTL7                        0x0b38
+#define WCD934X_CDC_COMPANDER8_CTL0                        0x0b39
+#define WCD934X_CDC_COMPANDER8_CTL1                        0x0b3a
+#define WCD934X_CDC_COMPANDER8_CTL2                        0x0b3b
+#define WCD934X_CDC_COMPANDER8_CTL3                        0x0b3c
+#define WCD934X_CDC_COMPANDER8_CTL4                        0x0b3d
+#define WCD934X_CDC_COMPANDER8_CTL5                        0x0b3e
+#define WCD934X_CDC_COMPANDER8_CTL6                        0x0b3f
+#define WCD934X_CDC_COMPANDER8_CTL7                        0x0b40
+#define WCD934X_CDC_RX0_RX_PATH_CTL                        0x0b41
+#define WCD934X_CDC_RX0_RX_PATH_CFG0                       0x0b42
+#define WCD934X_CDC_RX0_RX_PATH_CFG1                       0x0b43
+#define WCD934X_CDC_RX0_RX_PATH_CFG2                       0x0b44
+#define WCD934X_CDC_RX0_RX_VOL_CTL                         0x0b45
+#define WCD934X_CDC_RX0_RX_PATH_MIX_CTL                    0x0b46
+#define WCD934X_CDC_RX0_RX_PATH_MIX_CFG                    0x0b47
+#define WCD934X_CDC_RX0_RX_VOL_MIX_CTL                     0x0b48
+#define WCD934X_CDC_RX0_RX_PATH_SEC0                       0x0b49
+#define WCD934X_CDC_RX0_RX_PATH_SEC1                       0x0b4a
+#define WCD934X_CDC_RX0_RX_PATH_SEC2                       0x0b4b
+#define WCD934X_CDC_RX0_RX_PATH_SEC3                       0x0b4c
+#define WCD934X_CDC_RX0_RX_PATH_SEC5                       0x0b4e
+#define WCD934X_CDC_RX0_RX_PATH_SEC6                       0x0b4f
+#define WCD934X_CDC_RX0_RX_PATH_SEC7                       0x0b50
+#define WCD934X_CDC_RX0_RX_PATH_MIX_SEC0                   0x0b51
+#define WCD934X_CDC_RX0_RX_PATH_MIX_SEC1                   0x0b52
+#define WCD934X_CDC_RX0_RX_PATH_DSMDEM_CTL                 0x0b53
+#define WCD934X_CDC_RX1_RX_PATH_CTL                        0x0b55
+#define WCD934X_CDC_RX1_RX_PATH_CFG0                       0x0b56
+#define WCD934X_CDC_RX1_RX_PATH_CFG1                       0x0b57
+#define WCD934X_CDC_RX1_RX_PATH_CFG2                       0x0b58
+#define WCD934X_CDC_RX1_RX_VOL_CTL                         0x0b59
+#define WCD934X_CDC_RX1_RX_PATH_MIX_CTL                    0x0b5a
+#define WCD934X_CDC_RX1_RX_PATH_MIX_CFG                    0x0b5b
+#define WCD934X_CDC_RX1_RX_VOL_MIX_CTL                     0x0b5c
+#define WCD934X_CDC_RX1_RX_PATH_SEC0                       0x0b5d
+#define WCD934X_CDC_RX1_RX_PATH_SEC1                       0x0b5e
+#define WCD934X_CDC_RX1_RX_PATH_SEC2                       0x0b5f
+#define WCD934X_CDC_RX1_RX_PATH_SEC3                       0x0b60
+#define WCD934X_CDC_RX1_RX_PATH_SEC4                       0x0b61
+#define WCD934X_CDC_RX1_RX_PATH_SEC5                       0x0b62
+#define WCD934X_CDC_RX1_RX_PATH_SEC6                       0x0b63
+#define WCD934X_CDC_RX1_RX_PATH_SEC7                       0x0b64
+#define WCD934X_CDC_RX1_RX_PATH_MIX_SEC0                   0x0b65
+#define WCD934X_CDC_RX1_RX_PATH_MIX_SEC1                   0x0b66
+#define WCD934X_CDC_RX1_RX_PATH_DSMDEM_CTL                 0x0b67
+#define WCD934X_CDC_RX2_RX_PATH_CTL                        0x0b69
+#define WCD934X_CDC_RX2_RX_PATH_CFG0                       0x0b6a
+#define WCD934X_CDC_RX2_RX_PATH_CFG1                       0x0b6b
+#define WCD934X_CDC_RX2_RX_PATH_CFG2                       0x0b6c
+#define WCD934X_CDC_RX2_RX_VOL_CTL                         0x0b6d
+#define WCD934X_CDC_RX2_RX_PATH_MIX_CTL                    0x0b6e
+#define WCD934X_CDC_RX2_RX_PATH_MIX_CFG                    0x0b6f
+#define WCD934X_CDC_RX2_RX_VOL_MIX_CTL                     0x0b70
+#define WCD934X_CDC_RX2_RX_PATH_SEC0                       0x0b71
+#define WCD934X_CDC_RX2_RX_PATH_SEC1                       0x0b72
+#define WCD934X_CDC_RX2_RX_PATH_SEC2                       0x0b73
+#define WCD934X_CDC_RX2_RX_PATH_SEC3                       0x0b74
+#define WCD934X_CDC_RX2_RX_PATH_SEC4                       0x0b75
+#define WCD934X_CDC_RX2_RX_PATH_SEC5                       0x0b76
+#define WCD934X_CDC_RX2_RX_PATH_SEC6                       0x0b77
+#define WCD934X_CDC_RX2_RX_PATH_SEC7                       0x0b78
+#define WCD934X_CDC_RX2_RX_PATH_MIX_SEC0                   0x0b79
+#define WCD934X_CDC_RX2_RX_PATH_MIX_SEC1                   0x0b7a
+#define WCD934X_CDC_RX2_RX_PATH_DSMDEM_CTL                 0x0b7b
+#define WCD934X_CDC_RX3_RX_PATH_CTL                        0x0b7d
+#define WCD934X_CDC_RX3_RX_PATH_CFG0                       0x0b7e
+#define WCD934X_CDC_RX3_RX_PATH_CFG1                       0x0b7f
+#define WCD934X_CDC_RX3_RX_PATH_CFG2                       0x0b80
+#define WCD934X_CDC_RX3_RX_VOL_CTL                         0x0b81
+#define WCD934X_CDC_RX3_RX_PATH_MIX_CTL                    0x0b82
+#define WCD934X_CDC_RX3_RX_PATH_MIX_CFG                    0x0b83
+#define WCD934X_CDC_RX3_RX_VOL_MIX_CTL                     0x0b84
+#define WCD934X_CDC_RX3_RX_PATH_SEC0                       0x0b85
+#define WCD934X_CDC_RX3_RX_PATH_SEC1                       0x0b86
+#define WCD934X_CDC_RX3_RX_PATH_SEC2                       0x0b87
+#define WCD934X_CDC_RX3_RX_PATH_SEC3                       0x0b88
+#define WCD934X_CDC_RX3_RX_PATH_SEC5                       0x0b8a
+#define WCD934X_CDC_RX3_RX_PATH_SEC6                       0x0b8b
+#define WCD934X_CDC_RX3_RX_PATH_SEC7                       0x0b8c
+#define WCD934X_CDC_RX3_RX_PATH_MIX_SEC0                   0x0b8d
+#define WCD934X_CDC_RX3_RX_PATH_MIX_SEC1                   0x0b8e
+#define WCD934X_CDC_RX3_RX_PATH_DSMDEM_CTL                 0x0b8f
+#define WCD934X_CDC_RX4_RX_PATH_CTL                        0x0b91
+#define WCD934X_CDC_RX4_RX_PATH_CFG0                       0x0b92
+#define WCD934X_CDC_RX4_RX_PATH_CFG1                       0x0b93
+#define WCD934X_CDC_RX4_RX_PATH_CFG2                       0x0b94
+#define WCD934X_CDC_RX4_RX_VOL_CTL                         0x0b95
+#define WCD934X_CDC_RX4_RX_PATH_MIX_CTL                    0x0b96
+#define WCD934X_CDC_RX4_RX_PATH_MIX_CFG                    0x0b97
+#define WCD934X_CDC_RX4_RX_VOL_MIX_CTL                     0x0b98
+#define WCD934X_CDC_RX4_RX_PATH_SEC0                       0x0b99
+#define WCD934X_CDC_RX4_RX_PATH_SEC1                       0x0b9a
+#define WCD934X_CDC_RX4_RX_PATH_SEC2                       0x0b9b
+#define WCD934X_CDC_RX4_RX_PATH_SEC3                       0x0b9c
+#define WCD934X_CDC_RX4_RX_PATH_SEC5                       0x0b9e
+#define WCD934X_CDC_RX4_RX_PATH_SEC6                       0x0b9f
+#define WCD934X_CDC_RX4_RX_PATH_SEC7                       0x0ba0
+#define WCD934X_CDC_RX4_RX_PATH_MIX_SEC0                   0x0ba1
+#define WCD934X_CDC_RX4_RX_PATH_MIX_SEC1                   0x0ba2
+#define WCD934X_CDC_RX4_RX_PATH_DSMDEM_CTL                 0x0ba3
+#define WCD934X_CDC_RX7_RX_PATH_CTL                        0x0bcd
+#define WCD934X_CDC_RX7_RX_PATH_CFG0                       0x0bce
+#define WCD934X_CDC_RX7_RX_PATH_CFG1                       0x0bcf
+#define WCD934X_CDC_RX7_RX_PATH_CFG2                       0x0bd0
+#define WCD934X_CDC_RX7_RX_VOL_CTL                         0x0bd1
+#define WCD934X_CDC_RX7_RX_PATH_MIX_CTL                    0x0bd2
+#define WCD934X_CDC_RX7_RX_PATH_MIX_CFG                    0x0bd3
+#define WCD934X_CDC_RX7_RX_VOL_MIX_CTL                     0x0bd4
+#define WCD934X_CDC_RX7_RX_PATH_SEC0                       0x0bd5
+#define WCD934X_CDC_RX7_RX_PATH_SEC1                       0x0bd6
+#define WCD934X_CDC_RX7_RX_PATH_SEC2                       0x0bd7
+#define WCD934X_CDC_RX7_RX_PATH_SEC3                       0x0bd8
+#define WCD934X_CDC_RX7_RX_PATH_SEC5                       0x0bda
+#define WCD934X_CDC_RX7_RX_PATH_SEC6                       0x0bdb
+#define WCD934X_CDC_RX7_RX_PATH_SEC7                       0x0bdc
+#define WCD934X_CDC_RX7_RX_PATH_MIX_SEC0                   0x0bdd
+#define WCD934X_CDC_RX7_RX_PATH_MIX_SEC1                   0x0bde
+#define WCD934X_CDC_RX7_RX_PATH_DSMDEM_CTL                 0x0bdf
+#define WCD934X_CDC_RX8_RX_PATH_CTL                        0x0be1
+#define WCD934X_CDC_RX8_RX_PATH_CFG0                       0x0be2
+#define WCD934X_CDC_RX8_RX_PATH_CFG1                       0x0be3
+#define WCD934X_CDC_RX8_RX_PATH_CFG2                       0x0be4
+#define WCD934X_CDC_RX8_RX_VOL_CTL                         0x0be5
+#define WCD934X_CDC_RX8_RX_PATH_MIX_CTL                    0x0be6
+#define WCD934X_CDC_RX8_RX_PATH_MIX_CFG                    0x0be7
+#define WCD934X_CDC_RX8_RX_VOL_MIX_CTL                     0x0be8
+#define WCD934X_CDC_RX8_RX_PATH_SEC0                       0x0be9
+#define WCD934X_CDC_RX8_RX_PATH_SEC1                       0x0bea
+#define WCD934X_CDC_RX8_RX_PATH_SEC2                       0x0beb
+#define WCD934X_CDC_RX8_RX_PATH_SEC3                       0x0bec
+#define WCD934X_CDC_RX8_RX_PATH_SEC5                       0x0bee
+#define WCD934X_CDC_RX8_RX_PATH_SEC6                       0x0bef
+#define WCD934X_CDC_RX8_RX_PATH_SEC7                       0x0bf0
+#define WCD934X_CDC_RX8_RX_PATH_MIX_SEC0                   0x0bf1
+#define WCD934X_CDC_RX8_RX_PATH_MIX_SEC1                   0x0bf2
+#define WCD934X_CDC_RX8_RX_PATH_DSMDEM_CTL                 0x0bf3
+#define WCD934X_PAGE12_PAGE_REGISTER                       0x0c00
+#define WCD934X_CDC_CLSH_CRC                               0x0c01
+#define WCD934X_CDC_CLSH_DLY_CTRL                          0x0c02
+#define WCD934X_CDC_CLSH_DECAY_CTRL                        0x0c03
+#define WCD934X_CDC_CLSH_HPH_V_PA                          0x0c04
+#define WCD934X_CDC_CLSH_EAR_V_PA                          0x0c05
+#define WCD934X_CDC_CLSH_HPH_V_HD                          0x0c06
+#define WCD934X_CDC_CLSH_EAR_V_HD                          0x0c07
+#define WCD934X_CDC_CLSH_K1_MSB                            0x0c08
+#define WCD934X_CDC_CLSH_K1_LSB                            0x0c09
+#define WCD934X_CDC_CLSH_K2_MSB                            0x0c0a
+#define WCD934X_CDC_CLSH_K2_LSB                            0x0c0b
+#define WCD934X_CDC_CLSH_IDLE_CTRL                         0x0c0c
+#define WCD934X_CDC_CLSH_IDLE_HPH                          0x0c0d
+#define WCD934X_CDC_CLSH_IDLE_EAR                          0x0c0e
+#define WCD934X_CDC_CLSH_TEST0                             0x0c0f
+#define WCD934X_CDC_CLSH_TEST1                             0x0c10
+#define WCD934X_CDC_CLSH_OVR_VREF                          0x0c11
+#define WCD934X_CDC_BOOST0_BOOST_PATH_CTL                  0x0c19
+#define WCD934X_CDC_BOOST0_BOOST_CTL                       0x0c1a
+#define WCD934X_CDC_BOOST0_BOOST_CFG1                      0x0c1b
+#define WCD934X_CDC_BOOST0_BOOST_CFG2                      0x0c1c
+#define WCD934X_CDC_BOOST1_BOOST_PATH_CTL                  0x0c21
+#define WCD934X_CDC_BOOST1_BOOST_CTL                       0x0c22
+#define WCD934X_CDC_BOOST1_BOOST_CFG1                      0x0c23
+#define WCD934X_CDC_BOOST1_BOOST_CFG2                      0x0c24
+#define WCD934X_CDC_VBAT_VBAT_PATH_CTL                     0x0c3d
+#define WCD934X_CDC_VBAT_VBAT_CFG                          0x0c3e
+#define WCD934X_CDC_VBAT_VBAT_ADC_CAL1                     0x0c3f
+#define WCD934X_CDC_VBAT_VBAT_ADC_CAL2                     0x0c40
+#define WCD934X_CDC_VBAT_VBAT_ADC_CAL3                     0x0c41
+#define WCD934X_CDC_VBAT_VBAT_PK_EST1                      0x0c42
+#define WCD934X_CDC_VBAT_VBAT_PK_EST2                      0x0c43
+#define WCD934X_CDC_VBAT_VBAT_PK_EST3                      0x0c44
+#define WCD934X_CDC_VBAT_VBAT_RF_PROC1                     0x0c45
+#define WCD934X_CDC_VBAT_VBAT_RF_PROC2                     0x0c46
+#define WCD934X_CDC_VBAT_VBAT_TAC1                         0x0c47
+#define WCD934X_CDC_VBAT_VBAT_TAC2                         0x0c48
+#define WCD934X_CDC_VBAT_VBAT_TAC3                         0x0c49
+#define WCD934X_CDC_VBAT_VBAT_TAC4                         0x0c4a
+#define WCD934X_CDC_VBAT_VBAT_GAIN_UPD1                    0x0c4b
+#define WCD934X_CDC_VBAT_VBAT_GAIN_UPD2                    0x0c4c
+#define WCD934X_CDC_VBAT_VBAT_GAIN_UPD3                    0x0c4d
+#define WCD934X_CDC_VBAT_VBAT_GAIN_UPD4                    0x0c4e
+#define WCD934X_CDC_VBAT_VBAT_DEBUG1                       0x0c4f
+#define WCD934X_CDC_VBAT_VBAT_GAIN_UPD_MON                 0x0c50
+#define WCD934X_CDC_VBAT_VBAT_GAIN_MON_VAL                 0x0c51
+#define WCD934X_CDC_VBAT_VBAT_BAN                          0x0c52
+#define WCD934X_MIXING_ASRC0_CLK_RST_CTL                   0x0c55
+#define WCD934X_MIXING_ASRC0_CTL0                          0x0c56
+#define WCD934X_MIXING_ASRC0_CTL1                          0x0c57
+#define WCD934X_MIXING_ASRC0_FIFO_CTL                      0x0c58
+#define WCD934X_MIXING_ASRC0_STATUS_FMIN_CNTR_LSB          0x0c59
+#define WCD934X_MIXING_ASRC0_STATUS_FMIN_CNTR_MSB          0x0c5a
+#define WCD934X_MIXING_ASRC0_STATUS_FMAX_CNTR_LSB          0x0c5b
+#define WCD934X_MIXING_ASRC0_STATUS_FMAX_CNTR_MSB          0x0c5c
+#define WCD934X_MIXING_ASRC0_STATUS_FIFO                   0x0c5d
+#define WCD934X_MIXING_ASRC1_CLK_RST_CTL                   0x0c61
+#define WCD934X_MIXING_ASRC1_CTL0                          0x0c62
+#define WCD934X_MIXING_ASRC1_CTL1                          0x0c63
+#define WCD934X_MIXING_ASRC1_FIFO_CTL                      0x0c64
+#define WCD934X_MIXING_ASRC1_STATUS_FMIN_CNTR_LSB          0x0c65
+#define WCD934X_MIXING_ASRC1_STATUS_FMIN_CNTR_MSB          0x0c66
+#define WCD934X_MIXING_ASRC1_STATUS_FMAX_CNTR_LSB          0x0c67
+#define WCD934X_MIXING_ASRC1_STATUS_FMAX_CNTR_MSB          0x0c68
+#define WCD934X_MIXING_ASRC1_STATUS_FIFO                   0x0c69
+#define WCD934X_MIXING_ASRC2_CLK_RST_CTL                   0x0c6d
+#define WCD934X_MIXING_ASRC2_CTL0                          0x0c6e
+#define WCD934X_MIXING_ASRC2_CTL1                          0x0c6f
+#define WCD934X_MIXING_ASRC2_FIFO_CTL                      0x0c70
+#define WCD934X_MIXING_ASRC2_STATUS_FMIN_CNTR_LSB          0x0c71
+#define WCD934X_MIXING_ASRC2_STATUS_FMIN_CNTR_MSB          0x0c72
+#define WCD934X_MIXING_ASRC2_STATUS_FMAX_CNTR_LSB          0x0c73
+#define WCD934X_MIXING_ASRC2_STATUS_FMAX_CNTR_MSB          0x0c74
+#define WCD934X_MIXING_ASRC2_STATUS_FIFO                   0x0c75
+#define WCD934X_MIXING_ASRC3_CLK_RST_CTL                   0x0c79
+#define WCD934X_MIXING_ASRC3_CTL0                          0x0c7a
+#define WCD934X_MIXING_ASRC3_CTL1                          0x0c7b
+#define WCD934X_MIXING_ASRC3_FIFO_CTL                      0x0c7c
+#define WCD934X_MIXING_ASRC3_STATUS_FMIN_CNTR_LSB          0x0c7d
+#define WCD934X_MIXING_ASRC3_STATUS_FMIN_CNTR_MSB          0x0c7e
+#define WCD934X_MIXING_ASRC3_STATUS_FMAX_CNTR_LSB          0x0c7f
+#define WCD934X_MIXING_ASRC3_STATUS_FMAX_CNTR_MSB          0x0c80
+#define WCD934X_MIXING_ASRC3_STATUS_FIFO                   0x0c81
+#define WCD934X_SWR_AHB_BRIDGE_WR_DATA_0                   0x0c85
+#define WCD934X_SWR_AHB_BRIDGE_WR_DATA_1                   0x0c86
+#define WCD934X_SWR_AHB_BRIDGE_WR_DATA_2                   0x0c87
+#define WCD934X_SWR_AHB_BRIDGE_WR_DATA_3                   0x0c88
+#define WCD934X_SWR_AHB_BRIDGE_WR_ADDR_0                   0x0c89
+#define WCD934X_SWR_AHB_BRIDGE_WR_ADDR_1                   0x0c8a
+#define WCD934X_SWR_AHB_BRIDGE_WR_ADDR_2                   0x0c8b
+#define WCD934X_SWR_AHB_BRIDGE_WR_ADDR_3                   0x0c8c
+#define WCD934X_SWR_AHB_BRIDGE_RD_ADDR_0                   0x0c8d
+#define WCD934X_SWR_AHB_BRIDGE_RD_ADDR_1                   0x0c8e
+#define WCD934X_SWR_AHB_BRIDGE_RD_ADDR_2                   0x0c8f
+#define WCD934X_SWR_AHB_BRIDGE_RD_ADDR_3                   0x0c90
+#define WCD934X_SWR_AHB_BRIDGE_RD_DATA_0                   0x0c91
+#define WCD934X_SWR_AHB_BRIDGE_RD_DATA_1                   0x0c92
+#define WCD934X_SWR_AHB_BRIDGE_RD_DATA_2                   0x0c93
+#define WCD934X_SWR_AHB_BRIDGE_RD_DATA_3                   0x0c94
+#define WCD934X_SWR_AHB_BRIDGE_ACCESS_CFG                  0x0c95
+#define WCD934X_SWR_AHB_BRIDGE_ACCESS_STATUS               0x0c96
+#define WCD934X_CDC_SIDETONE_SRC0_ST_SRC_PATH_CTL          0x0cb5
+#define WCD934X_CDC_SIDETONE_SRC0_ST_SRC_PATH_CFG1         0x0cb6
+#define WCD934X_CDC_SIDETONE_SRC1_ST_SRC_PATH_CTL          0x0cb9
+#define WCD934X_CDC_SIDETONE_SRC1_ST_SRC_PATH_CFG1         0x0cba
+#define WCD934X_SIDETONE_ASRC0_CLK_RST_CTL                 0x0cbd
+#define WCD934X_SIDETONE_ASRC0_CTL0                        0x0cbe
+#define WCD934X_SIDETONE_ASRC0_CTL1                        0x0cbf
+#define WCD934X_SIDETONE_ASRC0_FIFO_CTL                    0x0cc0
+#define WCD934X_SIDETONE_ASRC0_STATUS_FMIN_CNTR_LSB        0x0cc1
+#define WCD934X_SIDETONE_ASRC0_STATUS_FMIN_CNTR_MSB        0x0cc2
+#define WCD934X_SIDETONE_ASRC0_STATUS_FMAX_CNTR_LSB        0x0cc3
+#define WCD934X_SIDETONE_ASRC0_STATUS_FMAX_CNTR_MSB        0x0cc4
+#define WCD934X_SIDETONE_ASRC0_STATUS_FIFO                 0x0cc5
+#define WCD934X_SIDETONE_ASRC1_CLK_RST_CTL                 0x0cc9
+#define WCD934X_SIDETONE_ASRC1_CTL0                        0x0cca
+#define WCD934X_SIDETONE_ASRC1_CTL1                        0x0ccb
+#define WCD934X_SIDETONE_ASRC1_FIFO_CTL                    0x0ccc
+#define WCD934X_SIDETONE_ASRC1_STATUS_FMIN_CNTR_LSB        0x0ccd
+#define WCD934X_SIDETONE_ASRC1_STATUS_FMIN_CNTR_MSB        0x0cce
+#define WCD934X_SIDETONE_ASRC1_STATUS_FMAX_CNTR_LSB        0x0ccf
+#define WCD934X_SIDETONE_ASRC1_STATUS_FMAX_CNTR_MSB        0x0cd0
+#define WCD934X_SIDETONE_ASRC1_STATUS_FIFO                 0x0cd1
+#define WCD934X_EC_REF_HQ0_EC_REF_HQ_PATH_CTL              0x0cd5
+#define WCD934X_EC_REF_HQ0_EC_REF_HQ_CFG0                  0x0cd6
+#define WCD934X_EC_REF_HQ1_EC_REF_HQ_PATH_CTL              0x0cdd
+#define WCD934X_EC_REF_HQ1_EC_REF_HQ_CFG0                  0x0cde
+#define WCD934X_EC_ASRC0_CLK_RST_CTL                       0x0ce5
+#define WCD934X_EC_ASRC0_CTL0                              0x0ce6
+#define WCD934X_EC_ASRC0_CTL1                              0x0ce7
+#define WCD934X_EC_ASRC0_FIFO_CTL                          0x0ce8
+#define WCD934X_EC_ASRC0_STATUS_FMIN_CNTR_LSB              0x0ce9
+#define WCD934X_EC_ASRC0_STATUS_FMIN_CNTR_MSB              0x0cea
+#define WCD934X_EC_ASRC0_STATUS_FMAX_CNTR_LSB              0x0ceb
+#define WCD934X_EC_ASRC0_STATUS_FMAX_CNTR_MSB              0x0cec
+#define WCD934X_EC_ASRC0_STATUS_FIFO                       0x0ced
+#define WCD934X_EC_ASRC1_CLK_RST_CTL                       0x0cf1
+#define WCD934X_EC_ASRC1_CTL0                              0x0cf2
+#define WCD934X_EC_ASRC1_CTL1                              0x0cf3
+#define WCD934X_EC_ASRC1_FIFO_CTL                          0x0cf4
+#define WCD934X_EC_ASRC1_STATUS_FMIN_CNTR_LSB              0x0cf5
+#define WCD934X_EC_ASRC1_STATUS_FMIN_CNTR_MSB              0x0cf6
+#define WCD934X_EC_ASRC1_STATUS_FMAX_CNTR_LSB              0x0cf7
+#define WCD934X_EC_ASRC1_STATUS_FMAX_CNTR_MSB              0x0cf8
+#define WCD934X_EC_ASRC1_STATUS_FIFO                       0x0cf9
+#define WCD934X_PAGE13_PAGE_REGISTER                       0x0d00
+#define WCD934X_CDC_RX_INP_MUX_RX_INT0_CFG0                0x0d01
+#define WCD934X_CDC_RX_INP_MUX_RX_INT0_CFG1                0x0d02
+#define WCD934X_CDC_RX_INP_MUX_RX_INT1_CFG0                0x0d03
+#define WCD934X_CDC_RX_INP_MUX_RX_INT1_CFG1                0x0d04
+#define WCD934X_CDC_RX_INP_MUX_RX_INT2_CFG0                0x0d05
+#define WCD934X_CDC_RX_INP_MUX_RX_INT2_CFG1                0x0d06
+#define WCD934X_CDC_RX_INP_MUX_RX_INT3_CFG0                0x0d07
+#define WCD934X_CDC_RX_INP_MUX_RX_INT3_CFG1                0x0d08
+#define WCD934X_CDC_RX_INP_MUX_RX_INT4_CFG0                0x0d09
+#define WCD934X_CDC_RX_INP_MUX_RX_INT4_CFG1                0x0d0a
+#define WCD934X_CDC_RX_INP_MUX_RX_INT7_CFG0                0x0d0f
+#define WCD934X_CDC_RX_INP_MUX_RX_INT7_CFG1                0x0d10
+#define WCD934X_CDC_RX_INP_MUX_RX_INT8_CFG0                0x0d11
+#define WCD934X_CDC_RX_INP_MUX_RX_INT8_CFG1                0x0d12
+#define WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG0                 0x0d13
+#define WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG1                 0x0d14
+#define WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG2                 0x0d15
+#define WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG3                 0x0d16
+#define WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG4                 0x0d17
+#define WCD934X_CDC_RX_INP_MUX_SIDETONE_SRC_CFG0           0x0d18
+#define WCD934X_CDC_RX_INP_MUX_SIDETONE_SRC_CFG1           0x0d19
+#define WCD934X_CDC_RX_INP_MUX_ANC_CFG0                    0x0d1a
+#define WCD934X_CDC_RX_INP_MUX_SPLINE_ASRC_CFG0            0x0d1b
+#define WCD934X_CDC_RX_INP_MUX_EC_REF_HQ_CFG0              0x0d1c
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG0               0x0d1d
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG1               0x0d1e
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX1_CFG0               0x0d1f
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX1_CFG1               0x0d20
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX2_CFG0               0x0d21
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX2_CFG1               0x0d22
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX3_CFG0               0x0d23
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX3_CFG1               0x0d25
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX4_CFG0               0x0d26
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX5_CFG0               0x0d27
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX6_CFG0               0x0d28
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX7_CFG0               0x0d29
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX8_CFG0               0x0d2a
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX10_CFG0              0x0d2b
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX11_CFG0              0x0d2c
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX12_CFG0              0x0d2d
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX13_CFG0              0x0d2e
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG0     0x0d31
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG1     0x0d32
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG2     0x0d33
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG3     0x0d34
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG0     0x0d35
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG1     0x0d36
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG2     0x0d37
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG3     0x0d38
+#define WCD934X_CDC_IF_ROUTER_TX_MUX_CFG0                  0x0d3a
+#define WCD934X_CDC_IF_ROUTER_TX_MUX_CFG1                  0x0d3b
+#define WCD934X_CDC_IF_ROUTER_TX_MUX_CFG2                  0x0d3c
+#define WCD934X_CDC_IF_ROUTER_TX_MUX_CFG3                  0x0d3d
+#define WCD934X_CDC_CLK_RST_CTRL_MCLK_CONTROL              0x0d41
+#define WCD934X_CDC_CLK_RST_CTRL_FS_CNT_CONTROL            0x0d42
+#define WCD934X_CDC_CLK_RST_CTRL_SWR_CONTROL               0x0d43
+#define WCD934X_CDC_CLK_RST_CTRL_DSD_CONTROL               0x0d44
+#define WCD934X_CDC_CLK_RST_CTRL_ASRC_SHARE_CONTROL        0x0d45
+#define WCD934X_CDC_CLK_RST_CTRL_GFM_CONTROL               0x0d46
+#define WCD934X_CDC_PROX_DETECT_PROX_CTL                   0x0d49
+#define WCD934X_CDC_PROX_DETECT_PROX_POLL_PERIOD0          0x0d4a
+#define WCD934X_CDC_PROX_DETECT_PROX_POLL_PERIOD1          0x0d4b
+#define WCD934X_CDC_PROX_DETECT_PROX_SIG_PATTERN_LSB       0x0d4c
+#define WCD934X_CDC_PROX_DETECT_PROX_SIG_PATTERN_MSB       0x0d4d
+#define WCD934X_CDC_PROX_DETECT_PROX_STATUS                0x0d4e
+#define WCD934X_CDC_PROX_DETECT_PROX_TEST_CTRL             0x0d4f
+#define WCD934X_CDC_PROX_DETECT_PROX_TEST_BUFF_LSB         0x0d50
+#define WCD934X_CDC_PROX_DETECT_PROX_TEST_BUFF_MSB         0x0d51
+#define WCD934X_CDC_PROX_DETECT_PROX_TEST_BUFF_LSB_RD      0x0d52
+#define WCD934X_CDC_PROX_DETECT_PROX_TEST_BUFF_MSB_RD      0x0d53
+#define WCD934X_CDC_PROX_DETECT_PROX_CTL_REPEAT_PAT        0x0d54
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_PATH_CTL             0x0d55
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B1_CTL          0x0d56
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B2_CTL          0x0d57
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B3_CTL          0x0d58
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B4_CTL          0x0d59
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B5_CTL          0x0d5a
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B6_CTL          0x0d5b
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B7_CTL          0x0d5c
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B8_CTL          0x0d5d
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_CTL                  0x0d5e
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_TIMER_CTL       0x0d5f
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B1_CTL          0x0d60
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B2_CTL          0x0d61
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_PATH_CTL             0x0d65
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B1_CTL          0x0d66
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B2_CTL          0x0d67
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B3_CTL          0x0d68
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B4_CTL          0x0d69
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B5_CTL          0x0d6a
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B6_CTL          0x0d6b
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B7_CTL          0x0d6c
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B8_CTL          0x0d6d
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_CTL                  0x0d6e
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_TIMER_CTL       0x0d6f
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_COEF_B1_CTL          0x0d70
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_COEF_B2_CTL          0x0d71
+#define WCD934X_CDC_TOP_TOP_CFG0                           0x0d81
+#define WCD934X_CDC_TOP_TOP_CFG1                           0x0d82
+#define WCD934X_CDC_TOP_TOP_CFG7                           0x0d88
+#define WCD934X_CDC_TOP_HPHL_COMP_WR_LSB                   0x0d89
+#define WCD934X_CDC_TOP_HPHL_COMP_WR_MSB                   0x0d8a
+#define WCD934X_CDC_TOP_HPHL_COMP_LUT                      0x0d8b
+#define WCD934X_CDC_TOP_HPHL_COMP_RD_LSB                   0x0d8c
+#define WCD934X_CDC_TOP_HPHL_COMP_RD_MSB                   0x0d8d
+#define WCD934X_CDC_TOP_HPHR_COMP_WR_LSB                   0x0d8e
+#define WCD934X_CDC_TOP_HPHR_COMP_WR_MSB                   0x0d8f
+#define WCD934X_CDC_TOP_HPHR_COMP_LUT                      0x0d90
+#define WCD934X_CDC_TOP_HPHR_COMP_RD_LSB                   0x0d91
+#define WCD934X_CDC_TOP_HPHR_COMP_RD_MSB                   0x0d92
+#define WCD934X_CDC_TOP_DIFFL_COMP_WR_LSB                  0x0d93
+#define WCD934X_CDC_TOP_DIFFL_COMP_WR_MSB                  0x0d94
+#define WCD934X_CDC_TOP_DIFFL_COMP_LUT                     0x0d95
+#define WCD934X_CDC_TOP_DIFFL_COMP_RD_LSB                  0x0d96
+#define WCD934X_CDC_TOP_DIFFL_COMP_RD_MSB                  0x0d97
+#define WCD934X_CDC_TOP_DIFFR_COMP_WR_LSB                  0x0d98
+#define WCD934X_CDC_TOP_DIFFR_COMP_WR_MSB                  0x0d99
+#define WCD934X_CDC_TOP_DIFFR_COMP_LUT                     0x0d9a
+#define WCD934X_CDC_TOP_DIFFR_COMP_RD_LSB                  0x0d9b
+#define WCD934X_CDC_TOP_DIFFR_COMP_RD_MSB                  0x0d9c
+#define WCD934X_CDC_DSD0_PATH_CTL                          0x0db1
+#define WCD934X_CDC_DSD0_CFG0                              0x0db2
+#define WCD934X_CDC_DSD0_CFG1                              0x0db3
+#define WCD934X_CDC_DSD0_CFG2                              0x0db4
+#define WCD934X_CDC_DSD0_CFG3                              0x0db5
+#define WCD934X_CDC_DSD0_CFG4                              0x0db6
+#define WCD934X_CDC_DSD0_CFG5                              0x0db7
+#define WCD934X_CDC_DSD1_PATH_CTL                          0x0dc1
+#define WCD934X_CDC_DSD1_CFG0                              0x0dc2
+#define WCD934X_CDC_DSD1_CFG1                              0x0dc3
+#define WCD934X_CDC_DSD1_CFG2                              0x0dc4
+#define WCD934X_CDC_DSD1_CFG3                              0x0dc5
+#define WCD934X_CDC_DSD1_CFG4                              0x0dc6
+#define WCD934X_CDC_DSD1_CFG5                              0x0dc7
+#define WCD934X_CDC_RX_IDLE_DET_PATH_CTL                   0x0dd1
+#define WCD934X_CDC_RX_IDLE_DET_CFG0                       0x0dd2
+#define WCD934X_CDC_RX_IDLE_DET_CFG1                       0x0dd3
+#define WCD934X_CDC_RX_IDLE_DET_CFG2                       0x0dd4
+#define WCD934X_CDC_RX_IDLE_DET_CFG3                       0x0dd5
+#define WCD934X_PAGE14_PAGE_REGISTER                       0x0e00
+#define WCD934X_CDC_RATE_EST0_RE_CLK_RST_CTL               0x0e01
+#define WCD934X_CDC_RATE_EST0_RE_CTL                       0x0e02
+#define WCD934X_CDC_RATE_EST0_RE_PULSE_SUPR_CTL            0x0e03
+#define WCD934X_CDC_RATE_EST0_RE_TIMER                     0x0e04
+#define WCD934X_CDC_RATE_EST0_RE_BW_SW                     0x0e05
+#define WCD934X_CDC_RATE_EST0_RE_THRESH                    0x0e06
+#define WCD934X_CDC_RATE_EST0_RE_STATUS                    0x0e07
+#define WCD934X_CDC_RATE_EST0_RE_DIAG_CTRL                 0x0e09
+#define WCD934X_CDC_RATE_EST0_RE_DIAG_TIMER2               0x0e0c
+#define WCD934X_CDC_RATE_EST0_RE_DIAG_OFFSET_BW1           0x0e0d
+#define WCD934X_CDC_RATE_EST0_RE_DIAG_OFFSET_BW2           0x0e0e
+#define WCD934X_CDC_RATE_EST0_RE_DIAG_OFFSET_BW3           0x0e0f
+#define WCD934X_CDC_RATE_EST0_RE_DIAG_OFFSET_BW4           0x0e10
+#define WCD934X_CDC_RATE_EST0_RE_DIAG_OFFSET_BW5           0x0e11
+#define WCD934X_CDC_RATE_EST0_RE_DIAG_LIMIT_BW1            0x0e12
+#define WCD934X_CDC_RATE_EST0_RE_DIAG_LIMIT_BW2            0x0e13
+#define WCD934X_CDC_RATE_EST0_RE_DIAG_LIMIT_BW3            0x0e14
+#define WCD934X_CDC_RATE_EST0_RE_DIAG_LIMIT_BW4            0x0e15
+#define WCD934X_CDC_RATE_EST0_RE_DIAG_LIMIT_BW5            0x0e16
+#define WCD934X_CDC_RATE_EST0_RE_DIAG_LIMITD1_BW1          0x0e17
+#define WCD934X_CDC_RATE_EST0_RE_DIAG_LIMITD1_BW2          0x0e18
+#define WCD934X_CDC_RATE_EST0_RE_DIAG_LIMITD1_BW3          0x0e19
+#define WCD934X_CDC_RATE_EST0_RE_DIAG_LIMITD1_BW4          0x0e1a
+#define WCD934X_CDC_RATE_EST0_RE_DIAG_LIMITD1_BW5          0x0e1b
+#define WCD934X_CDC_RATE_EST0_RE_DIAG_HYST_BW1             0x0e1c
+#define WCD934X_CDC_RATE_EST0_RE_DIAG_HYST_BW2             0x0e1d
+#define WCD934X_CDC_RATE_EST0_RE_DIAG_HYST_BW3             0x0e1e
+#define WCD934X_CDC_RATE_EST0_RE_DIAG_HYST_BW4             0x0e1f
+#define WCD934X_CDC_RATE_EST0_RE_DIAG_HYST_BW5             0x0e20
+#define WCD934X_CDC_RATE_EST0_RE_RMAX_DIAG                 0x0e21
+#define WCD934X_CDC_RATE_EST0_RE_RMIN_DIAG                 0x0e22
+#define WCD934X_CDC_RATE_EST0_RE_PH_DET                    0x0e23
+#define WCD934X_CDC_RATE_EST0_RE_DIAG_CLR                  0x0e24
+#define WCD934X_CDC_RATE_EST0_RE_MB_SW_STATE               0x0e25
+#define WCD934X_CDC_RATE_EST0_RE_MAST_DIAG_STATE           0x0e26
+#define WCD934X_CDC_RATE_EST0_RE_RATE_OUT_7_0              0x0e27
+#define WCD934X_CDC_RATE_EST0_RE_RATE_OUT_15_8             0x0e28
+#define WCD934X_CDC_RATE_EST0_RE_RATE_OUT_23_16            0x0e29
+#define WCD934X_CDC_RATE_EST0_RE_RATE_OUT_31_24            0x0e2a
+#define WCD934X_CDC_RATE_EST0_RE_RATE_OUT_39_32            0x0e2b
+#define WCD934X_CDC_RATE_EST0_RE_RATE_OUT_40_43            0x0e2c
+#define WCD934X_CDC_RATE_EST1_RE_CLK_RST_CTL               0x0e31
+#define WCD934X_CDC_RATE_EST1_RE_CTL                       0x0e32
+#define WCD934X_CDC_RATE_EST1_RE_PULSE_SUPR_CTL            0x0e33
+#define WCD934X_CDC_RATE_EST1_RE_TIMER                     0x0e34
+#define WCD934X_CDC_RATE_EST1_RE_BW_SW                     0x0e35
+#define WCD934X_CDC_RATE_EST1_RE_THRESH                    0x0e36
+#define WCD934X_CDC_RATE_EST1_RE_STATUS                    0x0e37
+#define WCD934X_CDC_RATE_EST1_RE_DIAG_CTRL                 0x0e39
+#define WCD934X_CDC_RATE_EST1_RE_DIAG_TIMER2               0x0e3c
+#define WCD934X_CDC_RATE_EST1_RE_DIAG_OFFSET_BW1           0x0e3d
+#define WCD934X_CDC_RATE_EST1_RE_DIAG_OFFSET_BW2           0x0e3e
+#define WCD934X_CDC_RATE_EST1_RE_DIAG_OFFSET_BW3           0x0e3f
+#define WCD934X_CDC_RATE_EST1_RE_DIAG_OFFSET_BW4           0x0e40
+#define WCD934X_CDC_RATE_EST1_RE_DIAG_OFFSET_BW5           0x0e41
+#define WCD934X_CDC_RATE_EST1_RE_DIAG_LIMIT_BW1            0x0e42
+#define WCD934X_CDC_RATE_EST1_RE_DIAG_LIMIT_BW2            0x0e43
+#define WCD934X_CDC_RATE_EST1_RE_DIAG_LIMIT_BW3            0x0e44
+#define WCD934X_CDC_RATE_EST1_RE_DIAG_LIMIT_BW4            0x0e45
+#define WCD934X_CDC_RATE_EST1_RE_DIAG_LIMIT_BW5            0x0e46
+#define WCD934X_CDC_RATE_EST1_RE_DIAG_LIMITD1_BW1          0x0e47
+#define WCD934X_CDC_RATE_EST1_RE_DIAG_LIMITD1_BW2          0x0e48
+#define WCD934X_CDC_RATE_EST1_RE_DIAG_LIMITD1_BW3          0x0e49
+#define WCD934X_CDC_RATE_EST1_RE_DIAG_LIMITD1_BW4          0x0e4a
+#define WCD934X_CDC_RATE_EST1_RE_DIAG_LIMITD1_BW5          0x0e4b
+#define WCD934X_CDC_RATE_EST1_RE_DIAG_HYST_BW1             0x0e4c
+#define WCD934X_CDC_RATE_EST1_RE_DIAG_HYST_BW2             0x0e4d
+#define WCD934X_CDC_RATE_EST1_RE_DIAG_HYST_BW3             0x0e4e
+#define WCD934X_CDC_RATE_EST1_RE_DIAG_HYST_BW4             0x0e4f
+#define WCD934X_CDC_RATE_EST1_RE_DIAG_HYST_BW5             0x0e50
+#define WCD934X_CDC_RATE_EST1_RE_RMAX_DIAG                 0x0e51
+#define WCD934X_CDC_RATE_EST1_RE_RMIN_DIAG                 0x0e52
+#define WCD934X_CDC_RATE_EST1_RE_PH_DET                    0x0e53
+#define WCD934X_CDC_RATE_EST1_RE_DIAG_CLR                  0x0e54
+#define WCD934X_CDC_RATE_EST1_RE_MB_SW_STATE               0x0e55
+#define WCD934X_CDC_RATE_EST1_RE_MAST_DIAG_STATE           0x0e56
+#define WCD934X_CDC_RATE_EST1_RE_RATE_OUT_7_0              0x0e57
+#define WCD934X_CDC_RATE_EST1_RE_RATE_OUT_15_8             0x0e58
+#define WCD934X_CDC_RATE_EST1_RE_RATE_OUT_23_16            0x0e59
+#define WCD934X_CDC_RATE_EST1_RE_RATE_OUT_31_24            0x0e5a
+#define WCD934X_CDC_RATE_EST1_RE_RATE_OUT_39_32            0x0e5b
+#define WCD934X_CDC_RATE_EST1_RE_RATE_OUT_40_43            0x0e5c
+#define WCD934X_CDC_RATE_EST2_RE_CLK_RST_CTL               0x0e61
+#define WCD934X_CDC_RATE_EST2_RE_CTL                       0x0e62
+#define WCD934X_CDC_RATE_EST2_RE_PULSE_SUPR_CTL            0x0e63
+#define WCD934X_CDC_RATE_EST2_RE_TIMER                     0x0e64
+#define WCD934X_CDC_RATE_EST2_RE_BW_SW                     0x0e65
+#define WCD934X_CDC_RATE_EST2_RE_THRESH                    0x0e66
+#define WCD934X_CDC_RATE_EST2_RE_STATUS                    0x0e67
+#define WCD934X_CDC_RATE_EST2_RE_DIAG_CTRL                 0x0e69
+#define WCD934X_CDC_RATE_EST2_RE_DIAG_TIMER2               0x0e6c
+#define WCD934X_CDC_RATE_EST2_RE_DIAG_OFFSET_BW1           0x0e6d
+#define WCD934X_CDC_RATE_EST2_RE_DIAG_OFFSET_BW2           0x0e6e
+#define WCD934X_CDC_RATE_EST2_RE_DIAG_OFFSET_BW3           0x0e6f
+#define WCD934X_CDC_RATE_EST2_RE_DIAG_OFFSET_BW4           0x0e70
+#define WCD934X_CDC_RATE_EST2_RE_DIAG_OFFSET_BW5           0x0e71
+#define WCD934X_CDC_RATE_EST2_RE_DIAG_LIMIT_BW1            0x0e72
+#define WCD934X_CDC_RATE_EST2_RE_DIAG_LIMIT_BW2            0x0e73
+#define WCD934X_CDC_RATE_EST2_RE_DIAG_LIMIT_BW3            0x0e74
+#define WCD934X_CDC_RATE_EST2_RE_DIAG_LIMIT_BW4            0x0e75
+#define WCD934X_CDC_RATE_EST2_RE_DIAG_LIMIT_BW5            0x0e76
+#define WCD934X_CDC_RATE_EST2_RE_DIAG_LIMITD1_BW1          0x0e77
+#define WCD934X_CDC_RATE_EST2_RE_DIAG_LIMITD1_BW2          0x0e78
+#define WCD934X_CDC_RATE_EST2_RE_DIAG_LIMITD1_BW3          0x0e79
+#define WCD934X_CDC_RATE_EST2_RE_DIAG_LIMITD1_BW4          0x0e7a
+#define WCD934X_CDC_RATE_EST2_RE_DIAG_LIMITD1_BW5          0x0e7b
+#define WCD934X_CDC_RATE_EST2_RE_DIAG_HYST_BW1             0x0e7c
+#define WCD934X_CDC_RATE_EST2_RE_DIAG_HYST_BW2             0x0e7d
+#define WCD934X_CDC_RATE_EST2_RE_DIAG_HYST_BW3             0x0e7e
+#define WCD934X_CDC_RATE_EST2_RE_DIAG_HYST_BW4             0x0e7f
+#define WCD934X_CDC_RATE_EST2_RE_DIAG_HYST_BW5             0x0e80
+#define WCD934X_CDC_RATE_EST2_RE_RMAX_DIAG                 0x0e81
+#define WCD934X_CDC_RATE_EST2_RE_RMIN_DIAG                 0x0e82
+#define WCD934X_CDC_RATE_EST2_RE_PH_DET                    0x0e83
+#define WCD934X_CDC_RATE_EST2_RE_DIAG_CLR                  0x0e84
+#define WCD934X_CDC_RATE_EST2_RE_MB_SW_STATE               0x0e85
+#define WCD934X_CDC_RATE_EST2_RE_MAST_DIAG_STATE           0x0e86
+#define WCD934X_CDC_RATE_EST2_RE_RATE_OUT_7_0              0x0e87
+#define WCD934X_CDC_RATE_EST2_RE_RATE_OUT_15_8             0x0e88
+#define WCD934X_CDC_RATE_EST2_RE_RATE_OUT_23_16            0x0e89
+#define WCD934X_CDC_RATE_EST2_RE_RATE_OUT_31_24            0x0e8a
+#define WCD934X_CDC_RATE_EST2_RE_RATE_OUT_39_32            0x0e8b
+#define WCD934X_CDC_RATE_EST2_RE_RATE_OUT_40_43            0x0e8c
+#define WCD934X_CDC_RATE_EST3_RE_CLK_RST_CTL               0x0e91
+#define WCD934X_CDC_RATE_EST3_RE_CTL                       0x0e92
+#define WCD934X_CDC_RATE_EST3_RE_PULSE_SUPR_CTL            0x0e93
+#define WCD934X_CDC_RATE_EST3_RE_TIMER                     0x0e94
+#define WCD934X_CDC_RATE_EST3_RE_BW_SW                     0x0e95
+#define WCD934X_CDC_RATE_EST3_RE_THRESH                    0x0e96
+#define WCD934X_CDC_RATE_EST3_RE_STATUS                    0x0e97
+#define WCD934X_CDC_RATE_EST3_RE_DIAG_CTRL                 0x0e99
+#define WCD934X_CDC_RATE_EST3_RE_DIAG_TIMER2               0x0e9c
+#define WCD934X_CDC_RATE_EST3_RE_DIAG_OFFSET_BW1           0x0e9d
+#define WCD934X_CDC_RATE_EST3_RE_DIAG_OFFSET_BW2           0x0e9e
+#define WCD934X_CDC_RATE_EST3_RE_DIAG_OFFSET_BW3           0x0e9f
+#define WCD934X_CDC_RATE_EST3_RE_DIAG_OFFSET_BW4           0x0ea0
+#define WCD934X_CDC_RATE_EST3_RE_DIAG_OFFSET_BW5           0x0ea1
+#define WCD934X_CDC_RATE_EST3_RE_DIAG_LIMIT_BW1            0x0ea2
+#define WCD934X_CDC_RATE_EST3_RE_DIAG_LIMIT_BW2            0x0ea3
+#define WCD934X_CDC_RATE_EST3_RE_DIAG_LIMIT_BW3            0x0ea4
+#define WCD934X_CDC_RATE_EST3_RE_DIAG_LIMIT_BW4            0x0ea5
+#define WCD934X_CDC_RATE_EST3_RE_DIAG_LIMIT_BW5            0x0ea6
+#define WCD934X_CDC_RATE_EST3_RE_DIAG_LIMITD1_BW1          0x0ea7
+#define WCD934X_CDC_RATE_EST3_RE_DIAG_LIMITD1_BW2          0x0ea8
+#define WCD934X_CDC_RATE_EST3_RE_DIAG_LIMITD1_BW3          0x0ea9
+#define WCD934X_CDC_RATE_EST3_RE_DIAG_LIMITD1_BW4          0x0eaa
+#define WCD934X_CDC_RATE_EST3_RE_DIAG_LIMITD1_BW5          0x0eab
+#define WCD934X_CDC_RATE_EST3_RE_DIAG_HYST_BW1             0x0eac
+#define WCD934X_CDC_RATE_EST3_RE_DIAG_HYST_BW2             0x0ead
+#define WCD934X_CDC_RATE_EST3_RE_DIAG_HYST_BW3             0x0eae
+#define WCD934X_CDC_RATE_EST3_RE_DIAG_HYST_BW4             0x0eaf
+#define WCD934X_CDC_RATE_EST3_RE_DIAG_HYST_BW5             0x0eb0
+#define WCD934X_CDC_RATE_EST3_RE_RMAX_DIAG                 0x0eb1
+#define WCD934X_CDC_RATE_EST3_RE_RMIN_DIAG                 0x0eb2
+#define WCD934X_CDC_RATE_EST3_RE_PH_DET                    0x0eb3
+#define WCD934X_CDC_RATE_EST3_RE_DIAG_CLR                  0x0eb4
+#define WCD934X_CDC_RATE_EST3_RE_MB_SW_STATE               0x0eb5
+#define WCD934X_CDC_RATE_EST3_RE_MAST_DIAG_STATE           0x0eb6
+#define WCD934X_CDC_RATE_EST3_RE_RATE_OUT_7_0              0x0eb7
+#define WCD934X_CDC_RATE_EST3_RE_RATE_OUT_15_8             0x0eb8
+#define WCD934X_CDC_RATE_EST3_RE_RATE_OUT_23_16            0x0eb9
+#define WCD934X_CDC_RATE_EST3_RE_RATE_OUT_31_24            0x0eba
+#define WCD934X_CDC_RATE_EST3_RE_RATE_OUT_39_32            0x0ebb
+#define WCD934X_CDC_RATE_EST3_RE_RATE_OUT_40_43            0x0ebc
+#define WCD934X_PAGE15_PAGE_REGISTER                       0x0f00
+#define WCD934X_SPLINE_SRC0_CLK_RST_CTL_0                  0x0f01
+#define WCD934X_SPLINE_SRC0_STATUS                         0x0f02
+#define WCD934X_SPLINE_SRC1_CLK_RST_CTL_0                  0x0f19
+#define WCD934X_SPLINE_SRC1_STATUS                         0x0f1a
+#define WCD934X_SPLINE_SRC2_CLK_RST_CTL_0                  0x0f31
+#define WCD934X_SPLINE_SRC2_STATUS                         0x0f32
+#define WCD934X_SPLINE_SRC3_CLK_RST_CTL_0                  0x0f49
+#define WCD934X_SPLINE_SRC3_STATUS                         0x0f4a
+#define WCD934X_CDC_DEBUG_DSD0_DEBUG_CFG0                  0x0fa1
+#define WCD934X_CDC_DEBUG_DSD0_DEBUG_CFG1                  0x0fa2
+#define WCD934X_CDC_DEBUG_DSD0_DEBUG_CFG2                  0x0fa3
+#define WCD934X_CDC_DEBUG_DSD0_DEBUG_CFG3                  0x0fa4
+#define WCD934X_CDC_DEBUG_DSD1_DEBUG_CFG0                  0x0fa5
+#define WCD934X_CDC_DEBUG_DSD1_DEBUG_CFG1                  0x0fa6
+#define WCD934X_CDC_DEBUG_DSD1_DEBUG_CFG2                  0x0fa7
+#define WCD934X_CDC_DEBUG_DSD1_DEBUG_CFG3                  0x0fa8
+#define WCD934X_CDC_DEBUG_SPLINE_SRC_DEBUG_CFG0            0x0fa9
+#define WCD934X_CDC_DEBUG_SPLINE_SRC_DEBUG_CFG1            0x0faa
+#define WCD934X_CDC_DEBUG_RC_RE_ASRC_DEBUG_CFG0            0x0fab
+#define WCD934X_CDC_DEBUG_ANC0_RC0_FIFO_CTL                0x0fac
+#define WCD934X_CDC_DEBUG_ANC0_RC1_FIFO_CTL                0x0fad
+#define WCD934X_CDC_DEBUG_ANC1_RC0_FIFO_CTL                0x0fae
+#define WCD934X_CDC_DEBUG_ANC1_RC1_FIFO_CTL                0x0faf
+#define WCD934X_CDC_DEBUG_ANC_RC_RST_DBG_CNTR              0x0fb0
+#define WCD934X_PAGE80_PAGE_REGISTER                       0x5000
+#define WCD934X_CODEC_CPR_WR_DATA_0                        0x5001
+#define WCD934X_CODEC_CPR_WR_DATA_1                        0x5002
+#define WCD934X_CODEC_CPR_WR_DATA_2                        0x5003
+#define WCD934X_CODEC_CPR_WR_DATA_3                        0x5004
+#define WCD934X_CODEC_CPR_WR_ADDR_0                        0x5005
+#define WCD934X_CODEC_CPR_WR_ADDR_1                        0x5006
+#define WCD934X_CODEC_CPR_WR_ADDR_2                        0x5007
+#define WCD934X_CODEC_CPR_WR_ADDR_3                        0x5008
+#define WCD934X_CODEC_CPR_RD_ADDR_0                        0x5009
+#define WCD934X_CODEC_CPR_RD_ADDR_1                        0x500a
+#define WCD934X_CODEC_CPR_RD_ADDR_2                        0x500b
+#define WCD934X_CODEC_CPR_RD_ADDR_3                        0x500c
+#define WCD934X_CODEC_CPR_RD_DATA_0                        0x500d
+#define WCD934X_CODEC_CPR_RD_DATA_1                        0x500e
+#define WCD934X_CODEC_CPR_RD_DATA_2                        0x500f
+#define WCD934X_CODEC_CPR_RD_DATA_3                        0x5010
+#define WCD934X_CODEC_CPR_ACCESS_CFG                       0x5011
+#define WCD934X_CODEC_CPR_ACCESS_STATUS                    0x5012
+#define WCD934X_CODEC_CPR_NOM_CX_VDD                       0x5021
+#define WCD934X_CODEC_CPR_SVS_CX_VDD                       0x5022
+#define WCD934X_CODEC_CPR_SVS2_CX_VDD                      0x5023
+#define WCD934X_CODEC_CPR_NOM_MX_VDD                       0x5024
+#define WCD934X_CODEC_CPR_SVS_MX_VDD                       0x5025
+#define WCD934X_CODEC_CPR_SVS2_MX_VDD                      0x5026
+#define WCD934X_CODEC_CPR_SVS2_MIN_CX_VDD                  0x5027
+#define WCD934X_CODEC_CPR_MAX_SVS2_STEP                    0x5028
+#define WCD934X_CODEC_CPR_CTL                              0x5029
+#define WCD934X_CODEC_CPR_SW_MODECHNG_STATUS               0x502a
+#define WCD934X_CODEC_CPR_SW_MODECHNG_START                0x502b
+#define WCD934X_CODEC_CPR_CPR_STATUS                       0x502c
+#define WCD934X_PAGE128_PAGE_REGISTER                      0x8000
+#define WCD934X_TLMM_BIST_MODE_PINCFG                      0x8001
+#define WCD934X_TLMM_RF_PA_ON_PINCFG                       0x8002
+#define WCD934X_TLMM_INTR1_PINCFG                          0x8003
+#define WCD934X_TLMM_INTR2_PINCFG                          0x8004
+#define WCD934X_TLMM_SWR_DATA_PINCFG                       0x8005
+#define WCD934X_TLMM_SWR_CLK_PINCFG                        0x8006
+#define WCD934X_TLMM_I2S_2_SCK_PINCFG                      0x8007
+#define WCD934X_TLMM_SLIMBUS_DATA1_PINCFG                  0x8008
+#define WCD934X_TLMM_SLIMBUS_DATA2_PINCFG                  0x8009
+#define WCD934X_TLMM_SLIMBUS_CLK_PINCFG                    0x800a
+#define WCD934X_TLMM_I2C_CLK_PINCFG                        0x800b
+#define WCD934X_TLMM_I2C_DATA_PINCFG                       0x800c
+#define WCD934X_TLMM_I2S_0_RX_PINCFG                       0x800d
+#define WCD934X_TLMM_I2S_0_TX_PINCFG                       0x800e
+#define WCD934X_TLMM_I2S_0_SCK_PINCFG                      0x800f
+#define WCD934X_TLMM_I2S_0_WS_PINCFG                       0x8010
+#define WCD934X_TLMM_I2S_1_RX_PINCFG                       0x8011
+#define WCD934X_TLMM_I2S_1_TX_PINCFG                       0x8012
+#define WCD934X_TLMM_I2S_1_SCK_PINCFG                      0x8013
+#define WCD934X_TLMM_I2S_1_WS_PINCFG                       0x8014
+#define WCD934X_TLMM_DMIC1_CLK_PINCFG                      0x8015
+#define WCD934X_TLMM_DMIC1_DATA_PINCFG                     0x8016
+#define WCD934X_TLMM_DMIC2_CLK_PINCFG                      0x8017
+#define WCD934X_TLMM_DMIC2_DATA_PINCFG                     0x8018
+#define WCD934X_TLMM_DMIC3_CLK_PINCFG                      0x8019
+#define WCD934X_TLMM_DMIC3_DATA_PINCFG                     0x801a
+#define WCD934X_TLMM_JTCK_PINCFG                           0x801b
+#define WCD934X_TLMM_GPIO1_PINCFG                          0x801c
+#define WCD934X_TLMM_GPIO2_PINCFG                          0x801d
+#define WCD934X_TLMM_GPIO3_PINCFG                          0x801e
+#define WCD934X_TLMM_GPIO4_PINCFG                          0x801f
+#define WCD934X_TLMM_SPI_S_CSN_PINCFG                      0x8020
+#define WCD934X_TLMM_SPI_S_CLK_PINCFG                      0x8021
+#define WCD934X_TLMM_SPI_S_DOUT_PINCFG                     0x8022
+#define WCD934X_TLMM_SPI_S_DIN_PINCFG                      0x8023
+#define WCD934X_TLMM_BA_N_PINCFG                           0x8024
+#define WCD934X_TLMM_GPIO0_PINCFG                          0x8025
+#define WCD934X_TLMM_I2S_2_RX_PINCFG                       0x8026
+#define WCD934X_TLMM_I2S_2_WS_PINCFG                       0x8027
+#define WCD934X_TEST_DEBUG_PIN_CTL_OE_0                    0x8031
+#define WCD934X_TEST_DEBUG_PIN_CTL_OE_1                    0x8032
+#define WCD934X_TEST_DEBUG_PIN_CTL_OE_2                    0x8033
+#define WCD934X_TEST_DEBUG_PIN_CTL_OE_3                    0x8034
+#define WCD934X_TEST_DEBUG_PIN_CTL_OE_4                    0x8035
+#define WCD934X_TEST_DEBUG_PIN_CTL_DATA_0                  0x8036
+#define WCD934X_TEST_DEBUG_PIN_CTL_DATA_1                  0x8037
+#define WCD934X_TEST_DEBUG_PIN_CTL_DATA_2                  0x8038
+#define WCD934X_TEST_DEBUG_PIN_CTL_DATA_3                  0x8039
+#define WCD934X_TEST_DEBUG_PIN_CTL_DATA_4                  0x803a
+#define WCD934X_TEST_DEBUG_PAD_DRVCTL_0                    0x803b
+#define WCD934X_TEST_DEBUG_PAD_DRVCTL_1                    0x803c
+#define WCD934X_TEST_DEBUG_PIN_STATUS                      0x803d
+#define WCD934X_TEST_DEBUG_NPL_DLY_TEST_1                  0x803e
+#define WCD934X_TEST_DEBUG_NPL_DLY_TEST_2                  0x803f
+#define WCD934X_TEST_DEBUG_MEM_CTRL                        0x8040
+#define WCD934X_TEST_DEBUG_DEBUG_BUS_SEL                   0x8041
+#define WCD934X_TEST_DEBUG_DEBUG_JTAG                      0x8042
+#define WCD934X_TEST_DEBUG_DEBUG_EN_1                      0x8043
+#define WCD934X_TEST_DEBUG_DEBUG_EN_2                      0x8044
+#define WCD934X_TEST_DEBUG_DEBUG_EN_3                      0x8045
+#define WCD934X_TEST_DEBUG_DEBUG_EN_4                      0x8046
+#define WCD934X_TEST_DEBUG_DEBUG_EN_5                      0x8047
+#define WCD934X_TEST_DEBUG_ANA_DTEST_DIR                   0x804a
+#define WCD934X_TEST_DEBUG_PAD_INP_DISABLE_0               0x804b
+#define WCD934X_TEST_DEBUG_PAD_INP_DISABLE_1               0x804c
+#define WCD934X_TEST_DEBUG_PAD_INP_DISABLE_2               0x804d
+#define WCD934X_TEST_DEBUG_PAD_INP_DISABLE_3               0x804e
+#define WCD934X_TEST_DEBUG_PAD_INP_DISABLE_4               0x804f
+#define WCD934X_TEST_DEBUG_SYSMEM_CTRL                     0x8050
+#define WCD934X_TEST_DEBUG_SOC_SW_PWR_SEQ_DELAY            0x8051
+#define WCD934X_TEST_DEBUG_LVAL_NOM_LOW                    0x8052
+#define WCD934X_TEST_DEBUG_LVAL_NOM_HIGH                   0x8053
+#define WCD934X_TEST_DEBUG_LVAL_SVS_SVS2_LOW               0x8054
+#define WCD934X_TEST_DEBUG_LVAL_SVS_SVS2_HIGH              0x8055
+#define WCD934X_TEST_DEBUG_SPI_SLAVE_CHAR                  0x8056
+#define WCD934X_TEST_DEBUG_CODEC_DIAGS                     0x8057
+#define WCD934X_MAX_REGISTER                               0x80FF
+
+/* SLIMBUS Slave Registers */
+#define WCD934X_SLIM_PGD_PORT_INT_RX_EN0                     (0x30)
+#define WCD934X_SLIM_PGD_PORT_INT_TX_EN0                     (0x32)
+#define WCD934X_SLIM_PGD_PORT_INT_STATUS_RX_0                (0x34)
+#define WCD934X_SLIM_PGD_PORT_INT_STATUS_RX_1                (0x35)
+#define WCD934X_SLIM_PGD_PORT_INT_STATUS_TX_0                (0x36)
+#define WCD934X_SLIM_PGD_PORT_INT_STATUS_TX_1                (0x37)
+#define WCD934X_SLIM_PGD_PORT_INT_CLR_RX_0                   (0x38)
+#define WCD934X_SLIM_PGD_PORT_INT_CLR_RX_1                   (0x39)
+#define WCD934X_SLIM_PGD_PORT_INT_CLR_TX_0                   (0x3A)
+#define WCD934X_SLIM_PGD_PORT_INT_CLR_TX_1                   (0x3B)
+#define WCD934X_SLIM_PGD_PORT_INT_RX_SOURCE0                 (0x60)
+#define WCD934X_SLIM_PGD_PORT_INT_TX_SOURCE0                 (0x70)
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/linux/mfd/wcd9xxx./core.h linux-4.4.115-fbx/include/linux/mfd/wcd9xxx/core.h
--- linux-4.4.115-fbx/include/linux/mfd/wcd9xxx./core.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/mfd/wcd9xxx/core.h	2019-01-22 16:16:28.295289801 +0100
@@ -0,0 +1,437 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MFD_TABLA_CORE_H__
+#define __MFD_TABLA_CORE_H__
+
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/pm_qos.h>
+
+#define WCD9XXX_MAX_IRQ_REGS 4
+#define WCD9XXX_MAX_NUM_IRQS (WCD9XXX_MAX_IRQ_REGS * 8)
+#define WCD9XXX_SLIM_NUM_PORT_REG 3
+#define TABLA_VERSION_1_0	0
+#define TABLA_VERSION_1_1	1
+#define TABLA_VERSION_2_0	2
+#define TABLA_IS_1_X(ver) \
+	(((ver == TABLA_VERSION_1_0) || (ver == TABLA_VERSION_1_1)) ? 1 : 0)
+#define TABLA_IS_2_0(ver) ((ver == TABLA_VERSION_2_0) ? 1 : 0)
+
+#define WCD9XXX_SUPPLY_BUCK_NAME "cdc-vdd-buck"
+
+#define SITAR_VERSION_1P0 0
+#define SITAR_VERSION_1P1 1
+#define SITAR_IS_1P0(ver) \
+	((ver == SITAR_VERSION_1P0) ? 1 : 0)
+#define SITAR_IS_1P1(ver) \
+	((ver == SITAR_VERSION_1P1) ? 1 : 0)
+
+#define TAIKO_VERSION_1_0	1
+#define TAIKO_IS_1_0(ver) \
+	((ver == TAIKO_VERSION_1_0) ? 1 : 0)
+
+#define TAPAN_VERSION_1_0	0
+#define TAPAN_IS_1_0(ver) \
+	((ver == TAPAN_VERSION_1_0) ? 1 : 0)
+
+#define TOMTOM_VERSION_1_0	1
+#define TOMTOM_IS_1_0(ver) \
+	((ver == TOMTOM_VERSION_1_0) ? 1 : 0)
+
+#define TASHA_VERSION_1_0     0
+#define TASHA_VERSION_1_1     1
+#define TASHA_VERSION_2_0     2
+
+#define TASHA_IS_1_0(wcd) \
+	((wcd->type == WCD9335 || wcd->type == WCD9326) ? \
+	((wcd->version == TASHA_VERSION_1_0) ? 1 : 0) : 0)
+
+#define TASHA_IS_1_1(wcd) \
+	((wcd->type == WCD9335 || wcd->type == WCD9326) ? \
+	((wcd->version == TASHA_VERSION_1_1) ? 1 : 0) : 0)
+
+#define TASHA_IS_2_0(wcd) \
+	((wcd->type == WCD9335 || wcd->type == WCD9326) ? \
+	((wcd->version == TASHA_VERSION_2_0) ? 1 : 0) : 0)
+
+/*
+ * As fine version info cannot be retrieved before tavil probe.
+ * Define three coarse versions for possible future use before tavil probe.
+ */
+#define TAVIL_VERSION_1_0             0
+#define TAVIL_VERSION_1_1             1
+#define TAVIL_VERSION_WCD9340_1_0     2
+#define TAVIL_VERSION_WCD9341_1_0     3
+#define TAVIL_VERSION_WCD9340_1_1     4
+#define TAVIL_VERSION_WCD9341_1_1     5
+
+#define TAVIL_IS_1_0(wcd) \
+	((wcd->type == WCD934X) ? \
+	 ((wcd->version == TAVIL_VERSION_1_0 || \
+	   wcd->version == TAVIL_VERSION_WCD9340_1_0 || \
+	   wcd->version == TAVIL_VERSION_WCD9341_1_0) ? 1 : 0) : 0)
+#define TAVIL_IS_1_1(wcd) \
+	((wcd->type == WCD934X) ? \
+	 ((wcd->version == TAVIL_VERSION_1_1 || \
+	   wcd->version == TAVIL_VERSION_WCD9340_1_1 || \
+	   wcd->version == TAVIL_VERSION_WCD9341_1_1) ? 1 : 0) : 0)
+#define TAVIL_IS_WCD9340_1_0(wcd) \
+	((wcd->type == WCD934X) ? \
+	 ((wcd->version == TAVIL_VERSION_WCD9340_1_0) ? 1 : 0) : 0)
+#define TAVIL_IS_WCD9341_1_0(wcd) \
+	((wcd->type == WCD934X) ? \
+	 ((wcd->version == TAVIL_VERSION_WCD9341_1_0) ? 1 : 0) : 0)
+#define TAVIL_IS_WCD9340_1_1(wcd) \
+	((wcd->type == WCD934X) ? \
+	 ((wcd->version == TAVIL_VERSION_WCD9340_1_1) ? 1 : 0) : 0)
+#define TAVIL_IS_WCD9341_1_1(wcd) \
+	((wcd->type == WCD934X) ? \
+	 ((wcd->version == TAVIL_VERSION_WCD9341_1_1) ? 1 : 0) : 0)
+
+#define IS_CODEC_TYPE(wcd, wcdtype) \
+	((wcd->type == wcdtype) ? true : false)
+#define IS_CODEC_VERSION(wcd, wcdversion) \
+	((wcd->version == wcdversion) ? true : false)
+
+enum {
+	CDC_V_1_0,
+	CDC_V_1_1,
+	CDC_V_2_0,
+};
+
+enum codec_variant {
+	WCD9XXX,
+	WCD9330,
+	WCD9335,
+	WCD9326,
+	WCD934X,
+};
+
+enum wcd9xxx_slim_slave_addr_type {
+	WCD9XXX_SLIM_SLAVE_ADDR_TYPE_0,
+	WCD9XXX_SLIM_SLAVE_ADDR_TYPE_1,
+};
+
+enum wcd9xxx_pm_state {
+	WCD9XXX_PM_SLEEPABLE,
+	WCD9XXX_PM_AWAKE,
+	WCD9XXX_PM_ASLEEP,
+};
+
+enum {
+	WCD9XXX_INTR_STATUS_BASE = 0,
+	WCD9XXX_INTR_CLEAR_BASE,
+	WCD9XXX_INTR_MASK_BASE,
+	WCD9XXX_INTR_LEVEL_BASE,
+	WCD9XXX_INTR_CLR_COMMIT,
+	WCD9XXX_INTR_REG_MAX,
+};
+
+enum wcd9xxx_intf_status {
+	WCD9XXX_INTERFACE_TYPE_PROBING,
+	WCD9XXX_INTERFACE_TYPE_SLIMBUS,
+	WCD9XXX_INTERFACE_TYPE_I2C,
+};
+
+enum {
+	/* INTR_REG 0 */
+	WCD9XXX_IRQ_SLIMBUS = 0,
+	WCD9XXX_IRQ_MBHC_REMOVAL,
+	WCD9XXX_IRQ_MBHC_SHORT_TERM,
+	WCD9XXX_IRQ_MBHC_PRESS,
+	WCD9XXX_IRQ_MBHC_RELEASE,
+	WCD9XXX_IRQ_MBHC_POTENTIAL,
+	WCD9XXX_IRQ_MBHC_INSERTION,
+	WCD9XXX_IRQ_BG_PRECHARGE,
+	/* INTR_REG 1 */
+	WCD9XXX_IRQ_PA1_STARTUP,
+	WCD9XXX_IRQ_PA2_STARTUP,
+	WCD9XXX_IRQ_PA3_STARTUP,
+	WCD9XXX_IRQ_PA4_STARTUP,
+	WCD9306_IRQ_HPH_PA_OCPR_FAULT = WCD9XXX_IRQ_PA4_STARTUP,
+	WCD9XXX_IRQ_PA5_STARTUP,
+	WCD9XXX_IRQ_MICBIAS1_PRECHARGE,
+	WCD9306_IRQ_HPH_PA_OCPL_FAULT = WCD9XXX_IRQ_MICBIAS1_PRECHARGE,
+	WCD9XXX_IRQ_MICBIAS2_PRECHARGE,
+	WCD9XXX_IRQ_MICBIAS3_PRECHARGE,
+	/* INTR_REG 2 */
+	WCD9XXX_IRQ_HPH_PA_OCPL_FAULT,
+	WCD9XXX_IRQ_HPH_PA_OCPR_FAULT,
+	WCD9XXX_IRQ_EAR_PA_OCPL_FAULT,
+	WCD9XXX_IRQ_HPH_L_PA_STARTUP,
+	WCD9XXX_IRQ_HPH_R_PA_STARTUP,
+	WCD9320_IRQ_EAR_PA_STARTUP,
+	WCD9306_IRQ_MBHC_JACK_SWITCH = WCD9320_IRQ_EAR_PA_STARTUP,
+	WCD9310_NUM_IRQS,
+	WCD9XXX_IRQ_RESERVED_0 = WCD9310_NUM_IRQS,
+	WCD9XXX_IRQ_RESERVED_1,
+	WCD9330_IRQ_SVASS_ERR_EXCEPTION = WCD9310_NUM_IRQS,
+	WCD9330_IRQ_MBHC_JACK_SWITCH,
+	/* INTR_REG 3 */
+	WCD9XXX_IRQ_MAD_AUDIO,
+	WCD9XXX_IRQ_MAD_ULTRASOUND,
+	WCD9XXX_IRQ_MAD_BEACON,
+	WCD9XXX_IRQ_SPEAKER_CLIPPING,
+	WCD9320_IRQ_MBHC_JACK_SWITCH,
+	WCD9306_NUM_IRQS,
+	WCD9XXX_IRQ_VBAT_MONITOR_ATTACK = WCD9306_NUM_IRQS,
+	WCD9XXX_IRQ_VBAT_MONITOR_RELEASE,
+	WCD9XXX_NUM_IRQS,
+	/* WCD9330 INTR1_REG 3*/
+	WCD9330_IRQ_SVASS_ENGINE = WCD9XXX_IRQ_MAD_AUDIO,
+	WCD9330_IRQ_MAD_AUDIO,
+	WCD9330_IRQ_MAD_ULTRASOUND,
+	WCD9330_IRQ_MAD_BEACON,
+	WCD9330_IRQ_SPEAKER1_CLIPPING,
+	WCD9330_IRQ_SPEAKER2_CLIPPING,
+	WCD9330_IRQ_VBAT_MONITOR_ATTACK,
+	WCD9330_IRQ_VBAT_MONITOR_RELEASE,
+	WCD9330_NUM_IRQS,
+	WCD9XXX_IRQ_RESERVED_2 = WCD9330_NUM_IRQS,
+};
+
+enum {
+	TABLA_NUM_IRQS = WCD9310_NUM_IRQS,
+	SITAR_NUM_IRQS = WCD9310_NUM_IRQS,
+	TAIKO_NUM_IRQS = WCD9XXX_NUM_IRQS,
+	TAPAN_NUM_IRQS = WCD9306_NUM_IRQS,
+	TOMTOM_NUM_IRQS = WCD9330_NUM_IRQS,
+};
+
+struct intr_data {
+	int intr_num;
+	bool clear_first;
+};
+
+struct wcd9xxx_core_resource {
+	struct mutex irq_lock;
+	struct mutex nested_irq_lock;
+
+	enum wcd9xxx_pm_state pm_state;
+	struct mutex pm_lock;
+	/* pm_wq notifies change of pm_state */
+	wait_queue_head_t pm_wq;
+	struct pm_qos_request pm_qos_req;
+	int wlock_holders;
+
+
+	/* holds the table of interrupts per codec */
+	const struct intr_data *intr_table;
+	int intr_table_size;
+	unsigned int irq_base;
+	unsigned int irq;
+	u8 irq_masks_cur[WCD9XXX_MAX_IRQ_REGS];
+	u8 irq_masks_cache[WCD9XXX_MAX_IRQ_REGS];
+	bool irq_level_high[WCD9XXX_MAX_NUM_IRQS];
+	int num_irqs;
+	int num_irq_regs;
+	u16 intr_reg[WCD9XXX_INTR_REG_MAX];
+	struct regmap *wcd_core_regmap;
+
+	/* Pointer to parent container data structure */
+	void *parent;
+
+	struct device *dev;
+	struct irq_domain *domain;
+};
+
+/*
+ * data structure for Slimbus and I2S channel.
+ * Some of fields are only used in smilbus mode
+ */
+struct wcd9xxx_ch {
+	u32 sph;		/* share channel handle - slimbus only	*/
+	u32 ch_num;		/*
+				 * vitrual channel number, such as 128 -144.
+				 * apply for slimbus only
+				 */
+	u16 ch_h;		/* chanel handle - slimbus only */
+	u16 port;		/*
+				 * tabla port for RX and TX
+				 * such as 0-9 for TX and 10 -16 for RX
+				 * apply for both i2s and slimbus
+				 */
+	u16 shift;		/*
+				 * shift bit for RX and TX
+				 * apply for both i2s and slimbus
+				 */
+	struct list_head list;	/*
+				 * channel link list
+				 * apply for both i2s and slimbus
+				 */
+};
+
+struct wcd9xxx_codec_dai_data {
+	u32 rate;				/* sample rate          */
+	u32 bit_width;				/* sit width 16,24,32   */
+	struct list_head wcd9xxx_ch_list;	/* channel list         */
+	u16 grph;				/* slimbus group handle */
+	unsigned long ch_mask;
+	wait_queue_head_t dai_wait;
+	bool bus_down_in_recovery;
+};
+
+#define WCD9XXX_CH(xport, xshift) \
+	{.port = xport, .shift = xshift}
+
+enum wcd9xxx_chipid_major {
+	TABLA_MAJOR = cpu_to_le16(0x100),
+	SITAR_MAJOR = cpu_to_le16(0x101),
+	TAIKO_MAJOR = cpu_to_le16(0x102),
+	TAPAN_MAJOR = cpu_to_le16(0x103),
+	TOMTOM_MAJOR = cpu_to_le16(0x105),
+	TASHA_MAJOR = cpu_to_le16(0x0),
+	TASHA2P0_MAJOR = cpu_to_le16(0x107),
+	TAVIL_MAJOR = cpu_to_le16(0x108),
+};
+
+enum codec_power_states {
+	WCD_REGION_POWER_COLLAPSE_REMOVE,
+	WCD_REGION_POWER_COLLAPSE_BEGIN,
+	WCD_REGION_POWER_DOWN,
+};
+
+enum wcd_power_regions {
+	WCD9XXX_DIG_CORE_REGION_1,
+	WCD9XXX_MAX_PWR_REGIONS,
+};
+
+struct wcd9xxx_codec_type {
+	u16 id_major;
+	u16 id_minor;
+	struct mfd_cell *dev;
+	int size;
+	int num_irqs;
+	int version; /* -1 to retrive version from chip version register */
+	enum wcd9xxx_slim_slave_addr_type slim_slave_type;
+	u16 i2c_chip_status;
+	const struct intr_data *intr_tbl;
+	int intr_tbl_size;
+	u16 intr_reg[WCD9XXX_INTR_REG_MAX];
+};
+
+struct wcd9xxx_power_region {
+	enum codec_power_states power_state;
+	u16 pwr_collapse_reg_min;
+	u16 pwr_collapse_reg_max;
+};
+
+struct wcd9xxx {
+	struct device *dev;
+	struct slim_device *slim;
+	struct slim_device *slim_slave;
+	struct mutex io_lock;
+	struct mutex xfer_lock;
+	struct mutex reset_lock;
+	u8 version;
+
+	int reset_gpio;
+	struct device_node *wcd_rst_np;
+
+	int (*read_dev)(struct wcd9xxx *wcd9xxx, unsigned short reg,
+			int bytes, void *dest, bool interface_reg);
+	int (*write_dev)(struct wcd9xxx *wcd9xxx, unsigned short reg,
+			int bytes, void *src, bool interface_reg);
+	int (*multi_reg_write)(struct wcd9xxx *wcd9xxx, const void *data,
+			       size_t count);
+	int (*dev_down)(struct wcd9xxx *wcd9xxx);
+	int (*post_reset)(struct wcd9xxx *wcd9xxx);
+
+	void *ssr_priv;
+	bool dev_up;
+
+	u32 num_of_supplies;
+	struct regulator_bulk_data *supplies;
+
+	struct wcd9xxx_core_resource core_res;
+
+	u16 id_minor;
+	u16 id_major;
+
+	/* Slimbus or I2S port */
+	u32 num_rx_port;
+	u32 num_tx_port;
+	struct wcd9xxx_ch *rx_chs;
+	struct wcd9xxx_ch *tx_chs;
+	u32 mclk_rate;
+	enum codec_variant type;
+	struct regmap *regmap;
+
+	struct wcd9xxx_codec_type *codec_type;
+	bool prev_pg_valid;
+	u8 prev_pg;
+	u8 avoid_cdc_rstlow;
+	struct wcd9xxx_power_region *wcd9xxx_pwr[WCD9XXX_MAX_PWR_REGIONS];
+};
+
+struct wcd9xxx_reg_val {
+	unsigned short reg; /* register address */
+	u8 *buf;            /* buffer to be written to reg. addr */
+	int bytes;          /* number of bytes to be written */
+};
+
+int wcd9xxx_interface_reg_read(struct wcd9xxx *wcd9xxx, unsigned short reg);
+int wcd9xxx_interface_reg_write(struct wcd9xxx *wcd9xxx, unsigned short reg,
+		u8 val);
+int wcd9xxx_get_logical_addresses(u8 *pgd_la, u8 *inf_la);
+int wcd9xxx_slim_write_repeat(struct wcd9xxx *wcd9xxx, unsigned short reg,
+			     int bytes, void *src);
+int wcd9xxx_slim_reserve_bw(struct wcd9xxx *wcd9xxx,
+			    u32 bw_ops, bool commit);
+int wcd9xxx_set_power_state(struct wcd9xxx *, enum codec_power_states,
+			    enum wcd_power_regions);
+int wcd9xxx_get_current_power_state(struct wcd9xxx *,
+				    enum wcd_power_regions);
+
+int wcd9xxx_page_write(struct wcd9xxx *wcd9xxx, unsigned short *reg);
+
+int wcd9xxx_slim_bulk_write(struct wcd9xxx *wcd9xxx,
+			    struct wcd9xxx_reg_val *bulk_reg,
+			    unsigned int size, bool interface);
+
+extern int wcd9xxx_core_res_init(
+	struct wcd9xxx_core_resource*,
+	int, int, struct regmap *);
+
+extern void wcd9xxx_core_res_deinit(
+	struct wcd9xxx_core_resource *);
+
+extern int wcd9xxx_core_res_suspend(
+	struct wcd9xxx_core_resource *,
+	pm_message_t);
+
+extern int wcd9xxx_core_res_resume(
+	struct wcd9xxx_core_resource *);
+
+extern int wcd9xxx_core_irq_init(
+	struct wcd9xxx_core_resource*);
+
+extern int wcd9xxx_assign_irq(struct wcd9xxx_core_resource*,
+			      unsigned int,
+			      unsigned int);
+
+extern enum wcd9xxx_intf_status wcd9xxx_get_intf_type(void);
+extern void wcd9xxx_set_intf_type(enum wcd9xxx_intf_status);
+
+extern enum wcd9xxx_pm_state wcd9xxx_pm_cmpxchg(
+			struct wcd9xxx_core_resource *,
+			enum wcd9xxx_pm_state,
+			enum wcd9xxx_pm_state);
+static inline int __init wcd9xxx_irq_of_init(struct device_node *node,
+			       struct device_node *parent)
+{
+	return 0;
+}
+#endif
diff -Nruw linux-4.4.115-fbx/include/linux/mfd/wcd9xxx./Kbuild linux-4.4.115-fbx/include/linux/mfd/wcd9xxx/Kbuild
--- linux-4.4.115-fbx/include/linux/mfd/wcd9xxx./Kbuild	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/mfd/wcd9xxx/Kbuild	2019-01-22 16:16:28.295289801 +0100
@@ -0,0 +1,2 @@
+header-y += wcd9xxx_registers.h
+header-y += wcd9320_registers.h
diff -Nruw linux-4.4.115-fbx/include/linux/mfd/wcd9xxx./pdata.h linux-4.4.115-fbx/include/linux/mfd/wcd9xxx/pdata.h
--- linux-4.4.115-fbx/include/linux/mfd/wcd9xxx./pdata.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/mfd/wcd9xxx/pdata.h	2019-01-22 16:16:28.295289801 +0100
@@ -0,0 +1,197 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MFD_WCD9XXX_PDATA_H__
+
+#define __MFD_WCD9XXX_PDATA_H__
+
+#include <linux/slimbus/slimbus.h>
+#include <linux/mfd/msm-cdc-supply.h>
+
+#define MICBIAS_EXT_BYP_CAP 0x00
+#define MICBIAS_NO_EXT_BYP_CAP 0x01
+
+#define SITAR_LDOH_1P95_V 0x0
+#define SITAR_LDOH_2P35_V 0x1
+#define SITAR_LDOH_2P75_V 0x2
+#define SITAR_LDOH_2P85_V 0x3
+
+#define SITAR_CFILT1_SEL 0x0
+#define SITAR_CFILT2_SEL 0x1
+#define SITAR_CFILT3_SEL 0x2
+
+#define WCD9XXX_LDOH_1P95_V 0x0
+#define WCD9XXX_LDOH_2P35_V 0x1
+#define WCD9XXX_LDOH_2P75_V 0x2
+#define WCD9XXX_LDOH_2P85_V 0x3
+#define WCD9XXX_LDOH_3P0_V 0x3
+
+#define TABLA_LDOH_1P95_V 0x0
+#define TABLA_LDOH_2P35_V 0x1
+#define TABLA_LDOH_2P75_V 0x2
+#define TABLA_LDOH_2P85_V 0x3
+
+#define TABLA_CFILT1_SEL 0x0
+#define TABLA_CFILT2_SEL 0x1
+#define TABLA_CFILT3_SEL 0x2
+
+#define MAX_AMIC_CHANNEL 7
+
+#define TABLA_OCP_300_MA 0x0
+#define TABLA_OCP_350_MA 0x2
+#define TABLA_OCP_365_MA 0x3
+#define TABLA_OCP_150_MA 0x4
+#define TABLA_OCP_190_MA 0x6
+#define TABLA_OCP_220_MA 0x7
+
+#define TABLA_DCYCLE_255  0x0
+#define TABLA_DCYCLE_511  0x1
+#define TABLA_DCYCLE_767  0x2
+#define TABLA_DCYCLE_1023 0x3
+#define TABLA_DCYCLE_1279 0x4
+#define TABLA_DCYCLE_1535 0x5
+#define TABLA_DCYCLE_1791 0x6
+#define TABLA_DCYCLE_2047 0x7
+#define TABLA_DCYCLE_2303 0x8
+#define TABLA_DCYCLE_2559 0x9
+#define TABLA_DCYCLE_2815 0xA
+#define TABLA_DCYCLE_3071 0xB
+#define TABLA_DCYCLE_3327 0xC
+#define TABLA_DCYCLE_3583 0xD
+#define TABLA_DCYCLE_3839 0xE
+#define TABLA_DCYCLE_4095 0xF
+
+#define WCD9XXX_MCLK_CLK_12P288MHZ 12288000
+#define WCD9XXX_MCLK_CLK_9P6HZ 9600000
+
+/* Only valid for 9.6 MHz mclk */
+#define WCD9XXX_DMIC_SAMPLE_RATE_600KHZ 600000
+#define WCD9XXX_DMIC_SAMPLE_RATE_2P4MHZ 2400000
+#define WCD9XXX_DMIC_SAMPLE_RATE_3P2MHZ 3200000
+#define WCD9XXX_DMIC_SAMPLE_RATE_4P8MHZ 4800000
+
+/* Only valid for 12.288 MHz mclk */
+#define WCD9XXX_DMIC_SAMPLE_RATE_768KHZ 768000
+#define WCD9XXX_DMIC_SAMPLE_RATE_2P048MHZ 2048000
+#define WCD9XXX_DMIC_SAMPLE_RATE_3P072MHZ 3072000
+#define WCD9XXX_DMIC_SAMPLE_RATE_4P096MHZ 4096000
+#define WCD9XXX_DMIC_SAMPLE_RATE_6P144MHZ 6144000
+
+#define WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED 0
+
+#define WCD9XXX_DMIC_CLK_DRIVE_UNDEFINED 0
+
+struct wcd9xxx_amic {
+	/*legacy mode, txfe_enable and txfe_buff take 7 input
+	 * each bit represent the channel / TXFE number
+	 * and numbered as below
+	 * bit 0 = channel 1 / TXFE1_ENABLE / TXFE1_BUFF
+	 * bit 1 = channel 2 / TXFE2_ENABLE / TXFE2_BUFF
+	 * ...
+	 * bit 7 = channel 7 / TXFE7_ENABLE / TXFE7_BUFF
+	 */
+	u8 legacy_mode:MAX_AMIC_CHANNEL;
+	u8 txfe_enable:MAX_AMIC_CHANNEL;
+	u8 txfe_buff:MAX_AMIC_CHANNEL;
+	u8 use_pdata:MAX_AMIC_CHANNEL;
+};
+
+/* Each micbias can be assigned to one of three cfilters
+ * Vbatt_min >= .15V + ldoh_v
+ * ldoh_v >= .15v + cfiltx_mv
+ * If ldoh_v = 1.95 160 mv < cfiltx_mv < 1800 mv
+ * If ldoh_v = 2.35 200 mv < cfiltx_mv < 2200 mv
+ * If ldoh_v = 2.75 240 mv < cfiltx_mv < 2600 mv
+ * If ldoh_v = 2.85 250 mv < cfiltx_mv < 2700 mv
+ */
+
+struct wcd9xxx_micbias_setting {
+	u8 ldoh_v;
+	u32 cfilt1_mv; /* in mv */
+	u32 cfilt2_mv; /* in mv */
+	u32 cfilt3_mv; /* in mv */
+	u32 micb1_mv;
+	u32 micb2_mv;
+	u32 micb3_mv;
+	u32 micb4_mv;
+	/* Different WCD9xxx series codecs may not
+	 * have 4 mic biases. If a codec has fewer
+	 * mic biases, some of these properties will
+	 * not be used.
+	 */
+	u8 bias1_cfilt_sel;
+	u8 bias2_cfilt_sel;
+	u8 bias3_cfilt_sel;
+	u8 bias4_cfilt_sel;
+	u8 bias1_cap_mode;
+	u8 bias2_cap_mode;
+	u8 bias3_cap_mode;
+	u8 bias4_cap_mode;
+	bool bias2_is_headset_only;
+};
+
+struct wcd9xxx_ocp_setting {
+	unsigned int	use_pdata:1; /* 0 - use sys default as recommended */
+	unsigned int	num_attempts:4; /* up to 15 attempts */
+	unsigned int	run_time:4; /* in duty cycle */
+	unsigned int	wait_time:4; /* in duty cycle */
+	unsigned int	hph_ocp_limit:3; /* Headphone OCP current limit */
+};
+
+#define WCD9XXX_MAX_REGULATOR	9
+/*
+ *      format : TABLA_<POWER_SUPPLY_PIN_NAME>_CUR_MAX
+ *
+ *      <POWER_SUPPLY_PIN_NAME> from Tabla objective spec
+*/
+
+#define  WCD9XXX_CDC_VDDA_CP_CUR_MAX      500000
+#define  WCD9XXX_CDC_VDDA_RX_CUR_MAX      20000
+#define  WCD9XXX_CDC_VDDA_TX_CUR_MAX      20000
+#define  WCD9XXX_VDDIO_CDC_CUR_MAX        5000
+
+#define  WCD9XXX_VDDD_CDC_D_CUR_MAX       5000
+#define  WCD9XXX_VDDD_CDC_A_CUR_MAX       5000
+
+#define WCD9XXX_VDD_SPKDRV_NAME "cdc-vdd-spkdrv"
+#define WCD9XXX_VDD_SPKDRV2_NAME "cdc-vdd-spkdrv-2"
+
+struct wcd9xxx_regulator {
+	const char *name;
+	int min_uV;
+	int max_uV;
+	int optimum_uA;
+	bool ondemand;
+	struct regulator *regulator;
+};
+
+struct wcd9xxx_pdata {
+	int irq;
+	int irq_base;
+	int num_irqs;
+	int reset_gpio;
+	struct device_node *wcd_rst_np;
+	struct wcd9xxx_amic amic_settings;
+	struct slim_device slimbus_slave_device;
+	struct wcd9xxx_micbias_setting micbias;
+	struct wcd9xxx_ocp_setting ocp;
+	struct cdc_regulator *regulator;
+	int num_supplies;
+	u32 mclk_rate;
+	u32 dmic_sample_rate;
+	u32 mad_dmic_sample_rate;
+	u32 ecpp_dmic_sample_rate;
+	u32 dmic_clk_drv;
+	u16 use_pinctrl;
+};
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/linux/mfd/wcd9xxx./wcd9330_registers.h linux-4.4.115-fbx/include/linux/mfd/wcd9xxx/wcd9330_registers.h
--- linux-4.4.115-fbx/include/linux/mfd/wcd9xxx./wcd9330_registers.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/mfd/wcd9xxx/wcd9330_registers.h	2019-01-22 16:16:28.299289837 +0100
@@ -0,0 +1,1626 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef WCD9330_REGISTERS_H
+#define WCD9330_REGISTERS_H
+
+#include <linux/types.h>
+
+#define TOMTOM_A_CHIP_CTL			(0x000)
+#define TOMTOM_A_CHIP_CTL__POR				(0x38)
+#define TOMTOM_A_CHIP_STATUS			(0x001)
+#define TOMTOM_A_CHIP_STATUS__POR				(0x00)
+#define TOMTOM_A_CHIP_ID_BYTE_0			(0x004)
+#define TOMTOM_A_CHIP_ID_BYTE_0__POR				(0x00)
+#define TOMTOM_A_CHIP_ID_BYTE_1			(0x005)
+#define TOMTOM_A_CHIP_ID_BYTE_1__POR				(0x00)
+#define TOMTOM_A_CHIP_ID_BYTE_2			(0x006)
+#define TOMTOM_A_CHIP_ID_BYTE_2__POR				(0x05)
+#define TOMTOM_A_CHIP_ID_BYTE_3			(0x007)
+#define TOMTOM_A_CHIP_ID_BYTE_3__POR				(0x01)
+#define TOMTOM_A_CHIP_I2C_SLAVE_ID			(0x008)
+#define TOMTOM_A_CHIP_I2C_SLAVE_ID__POR				(0x01)
+#define TOMTOM_A_SLAVE_ID_1			(0x00C)
+#define TOMTOM_A_SLAVE_ID_1__POR				(0x77)
+#define TOMTOM_A_SLAVE_ID_2			(0x00D)
+#define TOMTOM_A_SLAVE_ID_2__POR				(0x66)
+#define TOMTOM_A_SLAVE_ID_3			(0x00E)
+#define TOMTOM_A_SLAVE_ID_3__POR				(0x55)
+#define TOMTOM_A_PIN_CTL_OE0			(0x010)
+#define TOMTOM_A_PIN_CTL_OE0__POR				(0x00)
+#define TOMTOM_A_PIN_CTL_OE1			(0x011)
+#define TOMTOM_A_PIN_CTL_OE1__POR				(0x00)
+#define TOMTOM_A_PIN_CTL_OE2			(0x012)
+#define TOMTOM_A_PIN_CTL_OE2__POR				(0x00)
+#define TOMTOM_A_PIN_CTL_DATA0			(0x013)
+#define TOMTOM_A_PIN_CTL_DATA0__POR				(0x00)
+#define TOMTOM_A_PIN_CTL_DATA1			(0x014)
+#define TOMTOM_A_PIN_CTL_DATA1__POR				(0x00)
+#define TOMTOM_A_PIN_CTL_DATA2			(0x015)
+#define TOMTOM_A_PIN_CTL_DATA2__POR				(0x00)
+#define TOMTOM_A_HDRIVE_GENERIC			(0x018)
+#define TOMTOM_A_HDRIVE_GENERIC__POR				(0x00)
+#define TOMTOM_A_HDRIVE_OVERRIDE			(0x019)
+#define TOMTOM_A_HDRIVE_OVERRIDE__POR				(0x08)
+#define TOMTOM_A_ANA_CSR_WAIT_STATE			(0x01C)
+#define TOMTOM_A_ANA_CSR_WAIT_STATE__POR				(0x44)
+#define TOMTOM_A_PROCESS_MONITOR_CTL0			(0x020)
+#define TOMTOM_A_PROCESS_MONITOR_CTL0__POR				(0x80)
+#define TOMTOM_A_PROCESS_MONITOR_CTL1			(0x021)
+#define TOMTOM_A_PROCESS_MONITOR_CTL1__POR				(0x00)
+#define TOMTOM_A_PROCESS_MONITOR_CTL2			(0x022)
+#define TOMTOM_A_PROCESS_MONITOR_CTL2__POR				(0x00)
+#define TOMTOM_A_PROCESS_MONITOR_CTL3			(0x023)
+#define TOMTOM_A_PROCESS_MONITOR_CTL3__POR				(0x01)
+#define TOMTOM_A_QFUSE_CTL			(0x028)
+#define TOMTOM_A_QFUSE_CTL__POR				(0x00)
+#define TOMTOM_A_QFUSE_STATUS			(0x029)
+#define TOMTOM_A_QFUSE_STATUS__POR				(0x00)
+#define TOMTOM_A_QFUSE_DATA_OUT0			(0x02A)
+#define TOMTOM_A_QFUSE_DATA_OUT0__POR				(0x00)
+#define TOMTOM_A_QFUSE_DATA_OUT1			(0x02B)
+#define TOMTOM_A_QFUSE_DATA_OUT1__POR				(0x00)
+#define TOMTOM_A_QFUSE_DATA_OUT2			(0x02C)
+#define TOMTOM_A_QFUSE_DATA_OUT2__POR				(0x00)
+#define TOMTOM_A_QFUSE_DATA_OUT3			(0x02D)
+#define TOMTOM_A_QFUSE_DATA_OUT3__POR				(0x00)
+#define TOMTOM_A_QFUSE_DATA_OUT4			(0x02E)
+#define TOMTOM_A_QFUSE_DATA_OUT4__POR				(0x00)
+#define TOMTOM_A_QFUSE_DATA_OUT5			(0x02F)
+#define TOMTOM_A_QFUSE_DATA_OUT5__POR				(0x00)
+#define TOMTOM_A_QFUSE_DATA_OUT6			(0x030)
+#define TOMTOM_A_QFUSE_DATA_OUT6__POR				(0x00)
+#define TOMTOM_A_QFUSE_DATA_OUT7			(0x031)
+#define TOMTOM_A_QFUSE_DATA_OUT7__POR				(0x00)
+#define TOMTOM_A_CDC_CTL			(0x034)
+#define TOMTOM_A_CDC_CTL__POR				(0x00)
+#define TOMTOM_A_LEAKAGE_CTL			(0x03C)
+#define TOMTOM_A_LEAKAGE_CTL__POR				(0x04)
+#define TOMTOM_A_SVASS_MEM_PTR0			(0x044)
+#define TOMTOM_A_SVASS_MEM_PTR0__POR				(0x00)
+#define TOMTOM_A_SVASS_MEM_PTR1			(0x045)
+#define TOMTOM_A_SVASS_MEM_PTR1__POR				(0x00)
+#define TOMTOM_A_SVASS_MEM_PTR2			(0x046)
+#define TOMTOM_A_SVASS_MEM_PTR2__POR				(0x00)
+#define TOMTOM_A_SVASS_MEM_CTL			(0x048)
+#define TOMTOM_A_SVASS_MEM_CTL__POR				(0x04)
+#define TOMTOM_A_SVASS_MEM_BANK			(0x049)
+#define TOMTOM_A_SVASS_MEM_BANK__POR				(0x00)
+#define TOMTOM_A_DMIC_B1_CTL			(0x04A)
+#define TOMTOM_A_DMIC_B1_CTL__POR				(0x00)
+#define TOMTOM_A_DMIC_B2_CTL			(0x04B)
+#define TOMTOM_A_DMIC_B2_CTL__POR				(0x00)
+#define TOMTOM_A_SVASS_CLKRST_CTL			(0x04C)
+#define TOMTOM_A_SVASS_CLKRST_CTL__POR				(0x00)
+#define TOMTOM_A_SVASS_CPAR_CFG			(0x04D)
+#define TOMTOM_A_SVASS_CPAR_CFG__POR				(0x00)
+#define TOMTOM_A_SVASS_BUF_RDY_INT_PERIOD			(0x04E)
+#define TOMTOM_A_SVASS_BUF_RDY_INT_PERIOD__POR				(0x14)
+#define TOMTOM_A_SVASS_CPAR_WDOG_CFG			(0x04F)
+#define TOMTOM_A_SVASS_CPAR_WDOG_CFG__POR				(0x00)
+#define TOMTOM_A_SVASS_CFG			(0x050)
+#define TOMTOM_A_SVASS_CFG__POR				(0x01)
+#define TOMTOM_A_SVASS_SPE_CFG			(0x051)
+#define TOMTOM_A_SVASS_SPE_CFG__POR				(0x04)
+#define TOMTOM_A_SVASS_STATUS			(0x052)
+#define TOMTOM_A_SVASS_STATUS__POR				(0x00)
+#define TOMTOM_A_SVASS_INT_MASK			(0x053)
+#define TOMTOM_A_SVASS_INT_MASK__POR				(0x3F)
+#define TOMTOM_A_SVASS_INT_STATUS			(0x054)
+#define TOMTOM_A_SVASS_INT_STATUS__POR				(0x00)
+#define TOMTOM_A_SVASS_INT_CLR			(0x055)
+#define TOMTOM_A_SVASS_INT_CLR__POR				(0x00)
+#define TOMTOM_A_SVASS_DEBUG			(0x056)
+#define TOMTOM_A_SVASS_DEBUG__POR				(0x00)
+#define TOMTOM_A_SVASS_SPE_BKUP_INT			(0x057)
+#define TOMTOM_A_SVASS_SPE_BKUP_INT__POR				(0x00)
+#define TOMTOM_A_SVASS_MEM_ACC			(0x058)
+#define TOMTOM_A_SVASS_MEM_ACC__POR				(0x00)
+#define TOMTOM_A_MEM_LEAKAGE_CTL			(0x059)
+#define TOMTOM_A_MEM_LEAKAGE_CTL__POR				(0x04)
+#define TOMTOM_A_SVASS_SPE_INBOX_TRG			(0x05A)
+#define TOMTOM_A_SVASS_SPE_INBOX_TRG__POR				(0x00)
+#define TOMTOM_A_SVASS_SPE_INBOX_0			(0x060)
+#define TOMTOM_A_SVASS_SPE_INBOX_0__POR				(0x00)
+#define TOMTOM_A_SVASS_SPE_INBOX_1			(0x061)
+#define TOMTOM_A_SVASS_SPE_INBOX_1__POR				(0x00)
+#define TOMTOM_A_SVASS_SPE_INBOX_2			(0x062)
+#define TOMTOM_A_SVASS_SPE_INBOX_2__POR				(0x00)
+#define TOMTOM_A_SVASS_SPE_INBOX_3			(0x063)
+#define TOMTOM_A_SVASS_SPE_INBOX_3__POR				(0x00)
+#define TOMTOM_A_SVASS_SPE_INBOX_4			(0x064)
+#define TOMTOM_A_SVASS_SPE_INBOX_4__POR				(0x00)
+#define TOMTOM_A_SVASS_SPE_INBOX_5			(0x065)
+#define TOMTOM_A_SVASS_SPE_INBOX_5__POR				(0x00)
+#define TOMTOM_A_SVASS_SPE_INBOX_6			(0x066)
+#define TOMTOM_A_SVASS_SPE_INBOX_6__POR				(0x00)
+#define TOMTOM_A_SVASS_SPE_INBOX_7			(0x067)
+#define TOMTOM_A_SVASS_SPE_INBOX_7__POR				(0x00)
+#define TOMTOM_A_SVASS_SPE_INBOX_8			(0x068)
+#define TOMTOM_A_SVASS_SPE_INBOX_8__POR				(0x00)
+#define TOMTOM_A_SVASS_SPE_INBOX_9			(0x069)
+#define TOMTOM_A_SVASS_SPE_INBOX_9__POR				(0x00)
+#define TOMTOM_A_SVASS_SPE_INBOX_10			(0x06A)
+#define TOMTOM_A_SVASS_SPE_INBOX_10__POR				(0x00)
+#define TOMTOM_A_SVASS_SPE_INBOX_11			(0x06B)
+#define TOMTOM_A_SVASS_SPE_INBOX_11__POR				(0x00)
+#define TOMTOM_A_SVASS_SPE_OUTBOX_0			(0x070)
+#define TOMTOM_A_SVASS_SPE_OUTBOX_0__POR				(0x00)
+#define TOMTOM_A_SVASS_SPE_OUTBOX_1			(0x071)
+#define TOMTOM_A_SVASS_SPE_OUTBOX_1__POR				(0x00)
+#define TOMTOM_A_SVASS_SPE_OUTBOX_2			(0x072)
+#define TOMTOM_A_SVASS_SPE_OUTBOX_2__POR				(0x00)
+#define TOMTOM_A_SVASS_SPE_OUTBOX_3			(0x073)
+#define TOMTOM_A_SVASS_SPE_OUTBOX_3__POR				(0x00)
+#define TOMTOM_A_SVASS_SPE_OUTBOX_4			(0x074)
+#define TOMTOM_A_SVASS_SPE_OUTBOX_4__POR				(0x00)
+#define TOMTOM_A_SVASS_SPE_OUTBOX_5			(0x075)
+#define TOMTOM_A_SVASS_SPE_OUTBOX_5__POR				(0x00)
+#define TOMTOM_A_SVASS_SPE_OUTBOX_6			(0x076)
+#define TOMTOM_A_SVASS_SPE_OUTBOX_6__POR				(0x00)
+#define TOMTOM_A_SVASS_SPE_OUTBOX_7			(0x077)
+#define TOMTOM_A_SVASS_SPE_OUTBOX_7__POR				(0x00)
+#define TOMTOM_A_SVASS_SPE_OUTBOX_8			(0x078)
+#define TOMTOM_A_SVASS_SPE_OUTBOX_8__POR				(0x00)
+#define TOMTOM_A_SVASS_SPE_OUTBOX_9			(0x079)
+#define TOMTOM_A_SVASS_SPE_OUTBOX_9__POR				(0x00)
+#define TOMTOM_A_SVASS_SPE_OUTBOX_10			(0x07A)
+#define TOMTOM_A_SVASS_SPE_OUTBOX_10__POR				(0x00)
+#define TOMTOM_A_SVASS_SPE_OUTBOX_11			(0x07B)
+#define TOMTOM_A_SVASS_SPE_OUTBOX_11__POR				(0x00)
+#define TOMTOM_A_INTR_MODE			(0x090)
+#define TOMTOM_A_INTR_MODE__POR				(0x00)
+#define TOMTOM_A_INTR1_MASK0			(0x094)
+#define TOMTOM_A_INTR1_MASK0__POR				(0xFF)
+#define TOMTOM_A_INTR1_MASK1			(0x095)
+#define TOMTOM_A_INTR1_MASK1__POR				(0xFF)
+#define TOMTOM_A_INTR1_MASK2			(0x096)
+#define TOMTOM_A_INTR1_MASK2__POR				(0xFF)
+#define TOMTOM_A_INTR1_MASK3			(0x097)
+#define TOMTOM_A_INTR1_MASK3__POR				(0xFF)
+#define TOMTOM_A_INTR1_STATUS0			(0x098)
+#define TOMTOM_A_INTR1_STATUS0__POR				(0x00)
+#define TOMTOM_A_INTR1_STATUS1			(0x099)
+#define TOMTOM_A_INTR1_STATUS1__POR				(0x00)
+#define TOMTOM_A_INTR1_STATUS2			(0x09A)
+#define TOMTOM_A_INTR1_STATUS2__POR				(0x00)
+#define TOMTOM_A_INTR1_STATUS3			(0x09B)
+#define TOMTOM_A_INTR1_STATUS3__POR				(0x00)
+#define TOMTOM_A_INTR1_CLEAR0			(0x09C)
+#define TOMTOM_A_INTR1_CLEAR0__POR				(0x00)
+#define TOMTOM_A_INTR1_CLEAR1			(0x09D)
+#define TOMTOM_A_INTR1_CLEAR1__POR				(0x00)
+#define TOMTOM_A_INTR1_CLEAR2			(0x09E)
+#define TOMTOM_A_INTR1_CLEAR2__POR				(0x00)
+#define TOMTOM_A_INTR1_CLEAR3			(0x09F)
+#define TOMTOM_A_INTR1_CLEAR3__POR				(0x00)
+#define TOMTOM_A_INTR1_LEVEL0			(0x0A0)
+#define TOMTOM_A_INTR1_LEVEL0__POR				(0x01)
+#define TOMTOM_A_INTR1_LEVEL1			(0x0A1)
+#define TOMTOM_A_INTR1_LEVEL1__POR				(0x00)
+#define TOMTOM_A_INTR1_LEVEL2			(0x0A2)
+#define TOMTOM_A_INTR1_LEVEL2__POR				(0x40)
+#define TOMTOM_A_INTR1_LEVEL3			(0x0A3)
+#define TOMTOM_A_INTR1_LEVEL3__POR				(0x00)
+#define TOMTOM_A_INTR1_TEST0			(0x0A4)
+#define TOMTOM_A_INTR1_TEST0__POR				(0x00)
+#define TOMTOM_A_INTR1_TEST1			(0x0A5)
+#define TOMTOM_A_INTR1_TEST1__POR				(0x00)
+#define TOMTOM_A_INTR1_TEST2			(0x0A6)
+#define TOMTOM_A_INTR1_TEST2__POR				(0x00)
+#define TOMTOM_A_INTR1_TEST3			(0x0A7)
+#define TOMTOM_A_INTR1_TEST3__POR				(0x00)
+#define TOMTOM_A_INTR1_SET0			(0x0A8)
+#define TOMTOM_A_INTR1_SET0__POR				(0x00)
+#define TOMTOM_A_INTR1_SET1			(0x0A9)
+#define TOMTOM_A_INTR1_SET1__POR				(0x00)
+#define TOMTOM_A_INTR1_SET2			(0x0AA)
+#define TOMTOM_A_INTR1_SET2__POR				(0x00)
+#define TOMTOM_A_INTR1_SET3			(0x0AB)
+#define TOMTOM_A_INTR1_SET3__POR				(0x00)
+#define TOMTOM_A_INTR2_MASK0			(0x0B0)
+#define TOMTOM_A_INTR2_MASK0__POR				(0xFF)
+#define TOMTOM_A_INTR2_STATUS0			(0x0B2)
+#define TOMTOM_A_INTR2_STATUS0__POR				(0x00)
+#define TOMTOM_A_INTR2_CLEAR0			(0x0B4)
+#define TOMTOM_A_INTR2_CLEAR0__POR				(0x00)
+#define TOMTOM_A_INTR2_LEVEL0			(0x0B6)
+#define TOMTOM_A_INTR2_LEVEL0__POR				(0x00)
+#define TOMTOM_A_INTR2_TEST0			(0x0B8)
+#define TOMTOM_A_INTR2_TEST0__POR				(0x00)
+#define TOMTOM_A_INTR2_SET0			(0x0BA)
+#define TOMTOM_A_INTR2_SET0__POR				(0x00)
+#define TOMTOM_A_CDC_TX_I2S_SCK_MODE			(0x0C0)
+#define TOMTOM_A_CDC_TX_I2S_SCK_MODE__POR				(0x00)
+#define TOMTOM_A_CDC_TX_I2S_WS_MODE			(0x0C1)
+#define TOMTOM_A_CDC_TX_I2S_WS_MODE__POR				(0x00)
+#define TOMTOM_A_CDC_DMIC_DATA0_MODE			(0x0C4)
+#define TOMTOM_A_CDC_DMIC_DATA0_MODE__POR				(0x00)
+#define TOMTOM_A_CDC_DMIC_CLK0_MODE			(0x0C5)
+#define TOMTOM_A_CDC_DMIC_CLK0_MODE__POR				(0x00)
+#define TOMTOM_A_CDC_DMIC_DATA1_MODE			(0x0C6)
+#define TOMTOM_A_CDC_DMIC_DATA1_MODE__POR				(0x00)
+#define TOMTOM_A_CDC_DMIC_CLK1_MODE			(0x0C7)
+#define TOMTOM_A_CDC_DMIC_CLK1_MODE__POR				(0x00)
+#define TOMTOM_A_CDC_RX_I2S_SCK_MODE			(0x0C8)
+#define TOMTOM_A_CDC_RX_I2S_SCK_MODE__POR				(0x00)
+#define TOMTOM_A_CDC_RX_I2S_WS_MODE			(0x0C9)
+#define TOMTOM_A_CDC_RX_I2S_WS_MODE__POR				(0x00)
+#define TOMTOM_A_CDC_DMIC_DATA2_MODE			(0x0CA)
+#define TOMTOM_A_CDC_DMIC_DATA2_MODE__POR				(0x00)
+#define TOMTOM_A_CDC_DMIC_CLK2_MODE			(0x0CB)
+#define TOMTOM_A_CDC_DMIC_CLK2_MODE__POR				(0x00)
+#define TOMTOM_A_CDC_INTR1_MODE			(0x0CC)
+#define TOMTOM_A_CDC_INTR1_MODE__POR				(0x00)
+#define TOMTOM_A_CDC_SB_NRZ_SEL_MODE			(0x0CD)
+#define TOMTOM_A_CDC_SB_NRZ_SEL_MODE__POR				(0x00)
+#define TOMTOM_A_CDC_INTR2_MODE			(0x0CE)
+#define TOMTOM_A_CDC_INTR2_MODE__POR				(0x00)
+#define TOMTOM_A_CDC_RF_PA_ON_MODE			(0x0CF)
+#define TOMTOM_A_CDC_RF_PA_ON_MODE__POR				(0x00)
+#define TOMTOM_A_CDC_BOOST_MODE			(0x0D0)
+#define TOMTOM_A_CDC_BOOST_MODE__POR				(0x00)
+#define TOMTOM_A_CDC_JTCK_MODE			(0x0D1)
+#define TOMTOM_A_CDC_JTCK_MODE__POR				(0x00)
+#define TOMTOM_A_CDC_JTDI_MODE			(0x0D2)
+#define TOMTOM_A_CDC_JTDI_MODE__POR				(0x00)
+#define TOMTOM_A_CDC_JTMS_MODE			(0x0D3)
+#define TOMTOM_A_CDC_JTMS_MODE__POR				(0x00)
+#define TOMTOM_A_CDC_JTDO_MODE			(0x0D4)
+#define TOMTOM_A_CDC_JTDO_MODE__POR				(0x00)
+#define TOMTOM_A_CDC_JTRST_MODE			(0x0D5)
+#define TOMTOM_A_CDC_JTRST_MODE__POR				(0x00)
+#define TOMTOM_A_CDC_BIST_MODE_MODE			(0x0D6)
+#define TOMTOM_A_CDC_BIST_MODE_MODE__POR				(0x00)
+#define TOMTOM_A_CDC_MAD_MAIN_CTL_1			(0x0E0)
+#define TOMTOM_A_CDC_MAD_MAIN_CTL_1__POR				(0x00)
+#define TOMTOM_A_CDC_MAD_MAIN_CTL_2			(0x0E1)
+#define TOMTOM_A_CDC_MAD_MAIN_CTL_2__POR				(0x00)
+#define TOMTOM_A_CDC_MAD_AUDIO_CTL_1			(0x0E2)
+#define TOMTOM_A_CDC_MAD_AUDIO_CTL_1__POR				(0x00)
+#define TOMTOM_A_CDC_MAD_AUDIO_CTL_2			(0x0E3)
+#define TOMTOM_A_CDC_MAD_AUDIO_CTL_2__POR				(0x00)
+#define TOMTOM_A_CDC_MAD_AUDIO_CTL_3			(0x0E4)
+#define TOMTOM_A_CDC_MAD_AUDIO_CTL_3__POR				(0x00)
+#define TOMTOM_A_CDC_MAD_AUDIO_CTL_4			(0x0E5)
+#define TOMTOM_A_CDC_MAD_AUDIO_CTL_4__POR				(0x00)
+#define TOMTOM_A_CDC_MAD_AUDIO_CTL_5			(0x0E6)
+#define TOMTOM_A_CDC_MAD_AUDIO_CTL_5__POR				(0x00)
+#define TOMTOM_A_CDC_MAD_AUDIO_CTL_6			(0x0E7)
+#define TOMTOM_A_CDC_MAD_AUDIO_CTL_6__POR				(0x00)
+#define TOMTOM_A_CDC_MAD_AUDIO_CTL_7			(0x0E8)
+#define TOMTOM_A_CDC_MAD_AUDIO_CTL_7__POR				(0x00)
+#define TOMTOM_A_CDC_MAD_AUDIO_CTL_8			(0x0E9)
+#define TOMTOM_A_CDC_MAD_AUDIO_CTL_8__POR				(0x00)
+#define TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_PTR			(0x0EA)
+#define TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_PTR__POR				(0x00)
+#define TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_VAL			(0x0EB)
+#define TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_VAL__POR				(0x40)
+#define TOMTOM_A_CDC_MAD_ULTR_CTL_1			(0x0EC)
+#define TOMTOM_A_CDC_MAD_ULTR_CTL_1__POR				(0x00)
+#define TOMTOM_A_CDC_MAD_ULTR_CTL_2			(0x0ED)
+#define TOMTOM_A_CDC_MAD_ULTR_CTL_2__POR				(0x00)
+#define TOMTOM_A_CDC_MAD_ULTR_CTL_3			(0x0EE)
+#define TOMTOM_A_CDC_MAD_ULTR_CTL_3__POR				(0x00)
+#define TOMTOM_A_CDC_MAD_ULTR_CTL_4			(0x0EF)
+#define TOMTOM_A_CDC_MAD_ULTR_CTL_4__POR				(0x00)
+#define TOMTOM_A_CDC_MAD_ULTR_CTL_5			(0x0F0)
+#define TOMTOM_A_CDC_MAD_ULTR_CTL_5__POR				(0x00)
+#define TOMTOM_A_CDC_MAD_ULTR_CTL_6			(0x0F1)
+#define TOMTOM_A_CDC_MAD_ULTR_CTL_6__POR				(0x00)
+#define TOMTOM_A_CDC_MAD_ULTR_CTL_7			(0x0F2)
+#define TOMTOM_A_CDC_MAD_ULTR_CTL_7__POR				(0x00)
+#define TOMTOM_A_CDC_MAD_BEACON_CTL_1			(0x0F3)
+#define TOMTOM_A_CDC_MAD_BEACON_CTL_1__POR				(0x00)
+#define TOMTOM_A_CDC_MAD_BEACON_CTL_2			(0x0F4)
+#define TOMTOM_A_CDC_MAD_BEACON_CTL_2__POR				(0x00)
+#define TOMTOM_A_CDC_MAD_BEACON_CTL_3			(0x0F5)
+#define TOMTOM_A_CDC_MAD_BEACON_CTL_3__POR				(0x00)
+#define TOMTOM_A_CDC_MAD_BEACON_CTL_4			(0x0F6)
+#define TOMTOM_A_CDC_MAD_BEACON_CTL_4__POR				(0x00)
+#define TOMTOM_A_CDC_MAD_BEACON_CTL_5			(0x0F7)
+#define TOMTOM_A_CDC_MAD_BEACON_CTL_5__POR				(0x00)
+#define TOMTOM_A_CDC_MAD_BEACON_CTL_6			(0x0F8)
+#define TOMTOM_A_CDC_MAD_BEACON_CTL_6__POR				(0x00)
+#define TOMTOM_A_CDC_MAD_BEACON_CTL_7			(0x0F9)
+#define TOMTOM_A_CDC_MAD_BEACON_CTL_7__POR				(0x00)
+#define TOMTOM_A_CDC_MAD_BEACON_CTL_8			(0x0FA)
+#define TOMTOM_A_CDC_MAD_BEACON_CTL_8__POR				(0x00)
+#define TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_PTR			(0x0FB)
+#define TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_PTR__POR			(0x00)
+#define TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_VAL			(0x0FC)
+#define TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_VAL__POR			(0x00)
+#define TOMTOM_A_CDC_MAD_INP_SEL			(0x0FD)
+#define TOMTOM_A_CDC_MAD_INP_SEL__POR				(0x00)
+#define TOMTOM_A_BIAS_REF_CTL			(0x100)
+#define TOMTOM_A_BIAS_REF_CTL__POR				(0x1C)
+#define TOMTOM_A_BIAS_CENTRAL_BG_CTL			(0x101)
+#define TOMTOM_A_BIAS_CENTRAL_BG_CTL__POR				(0x50)
+#define TOMTOM_A_BIAS_PRECHRG_CTL			(0x102)
+#define TOMTOM_A_BIAS_PRECHRG_CTL__POR				(0x07)
+#define TOMTOM_A_BIAS_CURR_CTL_1			(0x103)
+#define TOMTOM_A_BIAS_CURR_CTL_1__POR				(0x52)
+#define TOMTOM_A_BIAS_CURR_CTL_2			(0x104)
+#define TOMTOM_A_BIAS_CURR_CTL_2__POR				(0x00)
+#define TOMTOM_A_BIAS_OSC_BG_CTL			(0x105)
+#define TOMTOM_A_BIAS_OSC_BG_CTL__POR				(0x36)
+#define TOMTOM_A_CLK_BUFF_EN1			(0x108)
+#define TOMTOM_A_CLK_BUFF_EN1__POR				(0x04)
+#define TOMTOM_A_CLK_BUFF_EN2			(0x109)
+#define TOMTOM_A_CLK_BUFF_EN2__POR				(0x02)
+#define TOMTOM_A_LDO_L_MODE_1			(0x10A)
+#define TOMTOM_A_LDO_L_MODE_1__POR				(0x08)
+#define TOMTOM_A_LDO_L_MODE_2			(0x10B)
+#define TOMTOM_A_LDO_L_MODE_2__POR				(0x50)
+#define TOMTOM_A_LDO_L_CTRL_1			(0x10C)
+#define TOMTOM_A_LDO_L_CTRL_1__POR				(0x70)
+#define TOMTOM_A_LDO_L_CTRL_2			(0x10D)
+#define TOMTOM_A_LDO_L_CTRL_2__POR				(0x55)
+#define TOMTOM_A_LDO_L_CTRL_3			(0x10E)
+#define TOMTOM_A_LDO_L_CTRL_3__POR				(0x56)
+#define TOMTOM_A_LDO_L_CTRL_4			(0x10F)
+#define TOMTOM_A_LDO_L_CTRL_4__POR				(0x55)
+#define TOMTOM_A_LDO_H_MODE_1			(0x110)
+#define TOMTOM_A_LDO_H_MODE_1__POR				(0x65)
+#define TOMTOM_A_LDO_H_MODE_2			(0x111)
+#define TOMTOM_A_LDO_H_MODE_2__POR				(0xA8)
+#define TOMTOM_A_LDO_H_LOOP_CTL			(0x112)
+#define TOMTOM_A_LDO_H_LOOP_CTL__POR				(0x6B)
+#define TOMTOM_A_LDO_H_COMP_1			(0x113)
+#define TOMTOM_A_LDO_H_COMP_1__POR				(0x84)
+#define TOMTOM_A_LDO_H_COMP_2			(0x114)
+#define TOMTOM_A_LDO_H_COMP_2__POR				(0xE0)
+#define TOMTOM_A_LDO_H_BIAS_1			(0x115)
+#define TOMTOM_A_LDO_H_BIAS_1__POR				(0x6D)
+#define TOMTOM_A_LDO_H_BIAS_2			(0x116)
+#define TOMTOM_A_LDO_H_BIAS_2__POR				(0xA5)
+#define TOMTOM_A_LDO_H_BIAS_3			(0x117)
+#define TOMTOM_A_LDO_H_BIAS_3__POR				(0x60)
+#define TOMTOM_A_VBAT_CLK			(0x118)
+#define TOMTOM_A_VBAT_CLK__POR				(0x03)
+#define TOMTOM_A_VBAT_LOOP			(0x119)
+#define TOMTOM_A_VBAT_LOOP__POR				(0x02)
+#define TOMTOM_A_VBAT_REF			(0x11A)
+#define TOMTOM_A_VBAT_REF__POR				(0x20)
+#define TOMTOM_A_VBAT_ADC_TEST			(0x11B)
+#define TOMTOM_A_VBAT_ADC_TEST__POR				(0x00)
+#define TOMTOM_A_VBAT_FE			(0x11C)
+#define TOMTOM_A_VBAT_FE__POR				(0x48)
+#define TOMTOM_A_VBAT_BIAS_1			(0x11D)
+#define TOMTOM_A_VBAT_BIAS_1__POR				(0x03)
+#define TOMTOM_A_VBAT_BIAS_2			(0x11E)
+#define TOMTOM_A_VBAT_BIAS_2__POR				(0x00)
+#define TOMTOM_A_VBAT_ADC_DATA_MSB			(0x11F)
+#define TOMTOM_A_VBAT_ADC_DATA_MSB__POR				(0x00)
+#define TOMTOM_A_VBAT_ADC_DATA_LSB			(0x120)
+#define TOMTOM_A_VBAT_ADC_DATA_LSB__POR				(0x00)
+#define TOMTOM_A_FLL_NREF			(0x121)
+#define TOMTOM_A_FLL_NREF__POR				(0x12)
+#define TOMTOM_A_FLL_KDCO_TUNE			(0x122)
+#define TOMTOM_A_FLL_KDCO_TUNE__POR				(0x05)
+#define TOMTOM_A_FLL_LOCK_THRESH			(0x123)
+#define TOMTOM_A_FLL_LOCK_THRESH__POR				(0xC2)
+#define TOMTOM_A_FLL_LOCK_DET_COUNT			(0x124)
+#define TOMTOM_A_FLL_LOCK_DET_COUNT__POR				(0x40)
+#define TOMTOM_A_FLL_DAC_THRESHOLD			(0x125)
+#define TOMTOM_A_FLL_DAC_THRESHOLD__POR				(0xC8)
+#define TOMTOM_A_FLL_TEST_DCO_FREERUN			(0x126)
+#define TOMTOM_A_FLL_TEST_DCO_FREERUN__POR				(0x00)
+#define TOMTOM_A_FLL_TEST_ENABLE			(0x127)
+#define TOMTOM_A_FLL_TEST_ENABLE__POR				(0x00)
+#define TOMTOM_A_MICB_CFILT_1_CTL			(0x128)
+#define TOMTOM_A_MICB_CFILT_1_CTL__POR				(0x40)
+#define TOMTOM_A_MICB_CFILT_1_VAL			(0x129)
+#define TOMTOM_A_MICB_CFILT_1_VAL__POR				(0x80)
+#define TOMTOM_A_MICB_CFILT_1_PRECHRG			(0x12A)
+#define TOMTOM_A_MICB_CFILT_1_PRECHRG__POR				(0x38)
+#define TOMTOM_A_MICB_1_CTL			(0x12B)
+#define TOMTOM_A_MICB_1_CTL__POR				(0x16)
+#define TOMTOM_A_MICB_1_INT_RBIAS			(0x12C)
+#define TOMTOM_A_MICB_1_INT_RBIAS__POR				(0x24)
+#define TOMTOM_A_MICB_1_MBHC			(0x12D)
+#define TOMTOM_A_MICB_1_MBHC__POR				(0x01)
+#define TOMTOM_A_MICB_CFILT_2_CTL			(0x12E)
+#define TOMTOM_A_MICB_CFILT_2_CTL__POR				(0x41)
+#define TOMTOM_A_MICB_CFILT_2_VAL			(0x12F)
+#define TOMTOM_A_MICB_CFILT_2_VAL__POR				(0x80)
+#define TOMTOM_A_MICB_CFILT_2_PRECHRG			(0x130)
+#define TOMTOM_A_MICB_CFILT_2_PRECHRG__POR				(0x38)
+#define TOMTOM_A_MICB_2_CTL			(0x131)
+#define TOMTOM_A_MICB_2_CTL__POR				(0x16)
+#define TOMTOM_A_MICB_2_INT_RBIAS			(0x132)
+#define TOMTOM_A_MICB_2_INT_RBIAS__POR				(0x24)
+#define TOMTOM_A_MICB_2_MBHC			(0x133)
+#define TOMTOM_A_MICB_2_MBHC__POR				(0x02)
+#define TOMTOM_A_MICB_CFILT_3_CTL			(0x134)
+#define TOMTOM_A_MICB_CFILT_3_CTL__POR				(0x40)
+#define TOMTOM_A_MICB_CFILT_3_VAL			(0x135)
+#define TOMTOM_A_MICB_CFILT_3_VAL__POR				(0x80)
+#define TOMTOM_A_MICB_CFILT_3_PRECHRG			(0x136)
+#define TOMTOM_A_MICB_CFILT_3_PRECHRG__POR				(0x38)
+#define TOMTOM_A_MICB_3_CTL			(0x137)
+#define TOMTOM_A_MICB_3_CTL__POR				(0x16)
+#define TOMTOM_A_MICB_3_INT_RBIAS			(0x138)
+#define TOMTOM_A_MICB_3_INT_RBIAS__POR				(0x24)
+#define TOMTOM_A_MICB_3_MBHC			(0x139)
+#define TOMTOM_A_MICB_3_MBHC__POR				(0x00)
+#define TOMTOM_A_MICB_4_CTL			(0x13A)
+#define TOMTOM_A_MICB_4_CTL__POR				(0x16)
+#define TOMTOM_A_MICB_4_INT_RBIAS			(0x13B)
+#define TOMTOM_A_MICB_4_INT_RBIAS__POR				(0x24)
+#define TOMTOM_A_MICB_4_MBHC			(0x13C)
+#define TOMTOM_A_MICB_4_MBHC__POR				(0x01)
+#define TOMTOM_A_SPKR_DRV2_EN			(0x13D)
+#define TOMTOM_A_SPKR_DRV2_EN__POR				(0x6F)
+#define TOMTOM_A_SPKR_DRV2_GAIN			(0x13E)
+#define TOMTOM_A_SPKR_DRV2_GAIN__POR				(0x00)
+#define TOMTOM_A_SPKR_DRV2_DAC_CTL			(0x13F)
+#define TOMTOM_A_SPKR_DRV2_DAC_CTL__POR				(0x04)
+#define TOMTOM_A_SPKR_DRV2_OCP_CTL			(0x140)
+#define TOMTOM_A_SPKR_DRV2_OCP_CTL__POR				(0x97)
+#define TOMTOM_A_SPKR_DRV2_CLIP_DET			(0x141)
+#define TOMTOM_A_SPKR_DRV2_CLIP_DET__POR				(0x01)
+#define TOMTOM_A_SPKR_DRV2_DBG_DAC			(0x142)
+#define TOMTOM_A_SPKR_DRV2_DBG_DAC__POR				(0x05)
+#define TOMTOM_A_SPKR_DRV2_DBG_PA			(0x143)
+#define TOMTOM_A_SPKR_DRV2_DBG_PA__POR				(0x18)
+#define TOMTOM_A_SPKR_DRV2_DBG_PWRSTG			(0x144)
+#define TOMTOM_A_SPKR_DRV2_DBG_PWRSTG__POR				(0x00)
+#define TOMTOM_A_SPKR_DRV2_BIAS_LDO			(0x145)
+#define TOMTOM_A_SPKR_DRV2_BIAS_LDO__POR				(0x45)
+#define TOMTOM_A_SPKR_DRV2_BIAS_INT			(0x146)
+#define TOMTOM_A_SPKR_DRV2_BIAS_INT__POR				(0xA5)
+#define TOMTOM_A_SPKR_DRV2_BIAS_PA			(0x147)
+#define TOMTOM_A_SPKR_DRV2_BIAS_PA__POR				(0x55)
+#define TOMTOM_A_SPKR_DRV2_STATUS_OCP			(0x148)
+#define TOMTOM_A_SPKR_DRV2_STATUS_OCP__POR				(0x00)
+#define TOMTOM_A_SPKR_DRV2_STATUS_PA			(0x149)
+#define TOMTOM_A_SPKR_DRV2_STATUS_PA__POR				(0x00)
+#define TOMTOM_A_MBHC_INSERT_DETECT			(0x14A)
+#define TOMTOM_A_MBHC_INSERT_DETECT__POR				(0x00)
+#define TOMTOM_A_MBHC_INSERT_DET_STATUS			(0x14B)
+#define TOMTOM_A_MBHC_INSERT_DET_STATUS__POR				(0x00)
+#define TOMTOM_A_TX_COM_BIAS			(0x14C)
+#define TOMTOM_A_TX_COM_BIAS__POR				(0xF0)
+#define TOMTOM_A_MBHC_INSERT_DETECT2			(0x14D)
+#define TOMTOM_A_MBHC_INSERT_DETECT2__POR				(0xD0)
+#define TOMTOM_A_MBHC_SCALING_MUX_1			(0x14E)
+#define TOMTOM_A_MBHC_SCALING_MUX_1__POR				(0x00)
+#define TOMTOM_A_MBHC_SCALING_MUX_2			(0x14F)
+#define TOMTOM_A_MBHC_SCALING_MUX_2__POR				(0x80)
+#define TOMTOM_A_MAD_ANA_CTRL			(0x150)
+#define TOMTOM_A_MAD_ANA_CTRL__POR				(0xF1)
+#define TOMTOM_A_TX_SUP_SWITCH_CTRL_1			(0x151)
+#define TOMTOM_A_TX_SUP_SWITCH_CTRL_1__POR				(0x00)
+#define TOMTOM_A_TX_SUP_SWITCH_CTRL_2			(0x152)
+#define TOMTOM_A_TX_SUP_SWITCH_CTRL_2__POR				(0x80)
+#define TOMTOM_A_TX_1_GAIN			(0x153)
+#define TOMTOM_A_TX_1_GAIN__POR				(0x02)
+#define TOMTOM_A_TX_1_2_TEST_EN			(0x154)
+#define TOMTOM_A_TX_1_2_TEST_EN__POR				(0xCC)
+#define TOMTOM_A_TX_2_GAIN			(0x155)
+#define TOMTOM_A_TX_2_GAIN__POR				(0x02)
+#define TOMTOM_A_TX_1_2_ADC_IB			(0x156)
+#define TOMTOM_A_TX_1_2_ADC_IB__POR				(0x44)
+#define TOMTOM_A_TX_1_2_ATEST_REFCTRL			(0x157)
+#define TOMTOM_A_TX_1_2_ATEST_REFCTRL__POR				(0x00)
+#define TOMTOM_A_TX_1_2_TEST_CTL			(0x158)
+#define TOMTOM_A_TX_1_2_TEST_CTL__POR				(0x38)
+#define TOMTOM_A_TX_1_2_TEST_BLOCK_EN			(0x159)
+#define TOMTOM_A_TX_1_2_TEST_BLOCK_EN__POR				(0xFC)
+#define TOMTOM_A_TX_1_2_TXFE_CLKDIV			(0x15A)
+#define TOMTOM_A_TX_1_2_TXFE_CLKDIV__POR				(0x55)
+#define TOMTOM_A_TX_1_2_SAR_ERR_CH1			(0x15B)
+#define TOMTOM_A_TX_1_2_SAR_ERR_CH1__POR				(0x00)
+#define TOMTOM_A_TX_1_2_SAR_ERR_CH2			(0x15C)
+#define TOMTOM_A_TX_1_2_SAR_ERR_CH2__POR				(0x00)
+#define TOMTOM_A_TX_3_GAIN			(0x15D)
+#define TOMTOM_A_TX_3_GAIN__POR				(0x02)
+#define TOMTOM_A_TX_3_4_TEST_EN			(0x15E)
+#define TOMTOM_A_TX_3_4_TEST_EN__POR				(0xCC)
+#define TOMTOM_A_TX_4_GAIN			(0x15F)
+#define TOMTOM_A_TX_4_GAIN__POR				(0x02)
+#define TOMTOM_A_TX_3_4_ADC_IB			(0x160)
+#define TOMTOM_A_TX_3_4_ADC_IB__POR				(0x44)
+#define TOMTOM_A_TX_3_4_ATEST_REFCTRL			(0x161)
+#define TOMTOM_A_TX_3_4_ATEST_REFCTRL__POR				(0x00)
+#define TOMTOM_A_TX_3_4_TEST_CTL			(0x162)
+#define TOMTOM_A_TX_3_4_TEST_CTL__POR				(0x38)
+#define TOMTOM_A_TX_3_4_TEST_BLOCK_EN			(0x163)
+#define TOMTOM_A_TX_3_4_TEST_BLOCK_EN__POR				(0xFC)
+#define TOMTOM_A_TX_3_4_TXFE_CKDIV			(0x164)
+#define TOMTOM_A_TX_3_4_TXFE_CKDIV__POR				(0x55)
+#define TOMTOM_A_TX_3_4_SAR_ERR_CH3			(0x165)
+#define TOMTOM_A_TX_3_4_SAR_ERR_CH3__POR				(0x00)
+#define TOMTOM_A_TX_3_4_SAR_ERR_CH4			(0x166)
+#define TOMTOM_A_TX_3_4_SAR_ERR_CH4__POR				(0x00)
+#define TOMTOM_A_TX_5_GAIN			(0x167)
+#define TOMTOM_A_TX_5_GAIN__POR				(0x02)
+#define TOMTOM_A_TX_5_6_TEST_EN			(0x168)
+#define TOMTOM_A_TX_5_6_TEST_EN__POR				(0xCC)
+#define TOMTOM_A_TX_6_GAIN			(0x169)
+#define TOMTOM_A_TX_6_GAIN__POR				(0x02)
+#define TOMTOM_A_TX_5_6_ADC_IB			(0x16A)
+#define TOMTOM_A_TX_5_6_ADC_IB__POR				(0x44)
+#define TOMTOM_A_TX_5_6_ATEST_REFCTRL			(0x16B)
+#define TOMTOM_A_TX_5_6_ATEST_REFCTRL__POR				(0x00)
+#define TOMTOM_A_TX_5_6_TEST_CTL			(0x16C)
+#define TOMTOM_A_TX_5_6_TEST_CTL__POR				(0x38)
+#define TOMTOM_A_TX_5_6_TEST_BLOCK_EN			(0x16D)
+#define TOMTOM_A_TX_5_6_TEST_BLOCK_EN__POR				(0xFC)
+#define TOMTOM_A_TX_5_6_TXFE_CKDIV			(0x16E)
+#define TOMTOM_A_TX_5_6_TXFE_CKDIV__POR				(0x55)
+#define TOMTOM_A_TX_5_6_SAR_ERR_CH5			(0x16F)
+#define TOMTOM_A_TX_5_6_SAR_ERR_CH5__POR				(0x00)
+#define TOMTOM_A_TX_5_6_SAR_ERR_CH6			(0x170)
+#define TOMTOM_A_TX_5_6_SAR_ERR_CH6__POR				(0x00)
+#define TOMTOM_A_TX_7_MBHC_EN			(0x171)
+#define TOMTOM_A_TX_7_MBHC_EN__POR				(0x0C)
+#define TOMTOM_A_TX_7_MBHC_ATEST_REFCTRL			(0x172)
+#define TOMTOM_A_TX_7_MBHC_ATEST_REFCTRL__POR				(0x00)
+#define TOMTOM_A_TX_7_MBHC_ADC			(0x173)
+#define TOMTOM_A_TX_7_MBHC_ADC__POR				(0x44)
+#define TOMTOM_A_TX_7_MBHC_TEST_CTL			(0x174)
+#define TOMTOM_A_TX_7_MBHC_TEST_CTL__POR				(0x38)
+#define TOMTOM_A_TX_7_MBHC_SAR_ERR			(0x175)
+#define TOMTOM_A_TX_7_MBHC_SAR_ERR__POR				(0x00)
+#define TOMTOM_A_TX_7_TXFE_CLKDIV			(0x176)
+#define TOMTOM_A_TX_7_TXFE_CLKDIV__POR				(0x8B)
+#define TOMTOM_A_RCO_CTRL			(0x177)
+#define TOMTOM_A_RCO_CTRL__POR				(0x00)
+#define TOMTOM_A_RCO_CALIBRATION_CTRL1			(0x178)
+#define TOMTOM_A_RCO_CALIBRATION_CTRL1__POR				(0x00)
+#define TOMTOM_A_RCO_CALIBRATION_CTRL2			(0x179)
+#define TOMTOM_A_RCO_CALIBRATION_CTRL2__POR				(0x00)
+#define TOMTOM_A_RCO_CALIBRATION_CTRL3			(0x17A)
+#define TOMTOM_A_RCO_CALIBRATION_CTRL3__POR				(0x00)
+#define TOMTOM_A_RCO_TEST_CTRL			(0x17B)
+#define TOMTOM_A_RCO_TEST_CTRL__POR				(0x00)
+#define TOMTOM_A_RCO_CALIBRATION_RESULT1			(0x17C)
+#define TOMTOM_A_RCO_CALIBRATION_RESULT1__POR				(0x00)
+#define TOMTOM_A_RCO_CALIBRATION_RESULT2			(0x17D)
+#define TOMTOM_A_RCO_CALIBRATION_RESULT2__POR				(0x00)
+#define TOMTOM_A_BUCK_MODE_1			(0x181)
+#define TOMTOM_A_BUCK_MODE_1__POR				(0x21)
+#define TOMTOM_A_BUCK_MODE_2			(0x182)
+#define TOMTOM_A_BUCK_MODE_2__POR				(0xFF)
+#define TOMTOM_A_BUCK_MODE_3			(0x183)
+#define TOMTOM_A_BUCK_MODE_3__POR				(0xCE)
+#define TOMTOM_A_BUCK_MODE_4			(0x184)
+#define TOMTOM_A_BUCK_MODE_4__POR				(0x3A)
+#define TOMTOM_A_BUCK_MODE_5			(0x185)
+#define TOMTOM_A_BUCK_MODE_5__POR				(0x00)
+#define TOMTOM_A_BUCK_CTRL_VCL_1			(0x186)
+#define TOMTOM_A_BUCK_CTRL_VCL_1__POR				(0x08)
+#define TOMTOM_A_BUCK_CTRL_VCL_2			(0x187)
+#define TOMTOM_A_BUCK_CTRL_VCL_2__POR				(0xA3)
+#define TOMTOM_A_BUCK_CTRL_VCL_3			(0x188)
+#define TOMTOM_A_BUCK_CTRL_VCL_3__POR				(0x82)
+#define TOMTOM_A_BUCK_CTRL_CCL_1			(0x189)
+#define TOMTOM_A_BUCK_CTRL_CCL_1__POR				(0x5B)
+#define TOMTOM_A_BUCK_CTRL_CCL_2			(0x18A)
+#define TOMTOM_A_BUCK_CTRL_CCL_2__POR				(0xDC)
+#define TOMTOM_A_BUCK_CTRL_CCL_3			(0x18B)
+#define TOMTOM_A_BUCK_CTRL_CCL_3__POR				(0x6A)
+#define TOMTOM_A_BUCK_CTRL_CCL_4			(0x18C)
+#define TOMTOM_A_BUCK_CTRL_CCL_4__POR				(0x51)
+#define TOMTOM_A_BUCK_CTRL_PWM_DRVR_1			(0x18D)
+#define TOMTOM_A_BUCK_CTRL_PWM_DRVR_1__POR				(0x50)
+#define TOMTOM_A_BUCK_CTRL_PWM_DRVR_2			(0x18E)
+#define TOMTOM_A_BUCK_CTRL_PWM_DRVR_2__POR				(0x64)
+#define TOMTOM_A_BUCK_CTRL_PWM_DRVR_3			(0x18F)
+#define TOMTOM_A_BUCK_CTRL_PWM_DRVR_3__POR				(0x77)
+#define TOMTOM_A_BUCK_TMUX_A_D			(0x190)
+#define TOMTOM_A_BUCK_TMUX_A_D__POR				(0x00)
+#define TOMTOM_A_NCP_BUCKREF			(0x191)
+#define TOMTOM_A_NCP_BUCKREF__POR				(0x00)
+#define TOMTOM_A_NCP_EN			(0x192)
+#define TOMTOM_A_NCP_EN__POR				(0xFE)
+#define TOMTOM_A_NCP_CLK			(0x193)
+#define TOMTOM_A_NCP_CLK__POR				(0x94)
+#define TOMTOM_A_NCP_STATIC			(0x194)
+#define TOMTOM_A_NCP_STATIC__POR				(0x28)
+#define TOMTOM_A_NCP_VTH_LOW			(0x195)
+#define TOMTOM_A_NCP_VTH_LOW__POR				(0x88)
+#define TOMTOM_A_NCP_VTH_HIGH			(0x196)
+#define TOMTOM_A_NCP_VTH_HIGH__POR				(0xA0)
+#define TOMTOM_A_NCP_ATEST			(0x197)
+#define TOMTOM_A_NCP_ATEST__POR				(0x00)
+#define TOMTOM_A_NCP_DTEST			(0x198)
+#define TOMTOM_A_NCP_DTEST__POR				(0x10)
+#define TOMTOM_A_NCP_DLY1			(0x199)
+#define TOMTOM_A_NCP_DLY1__POR				(0x06)
+#define TOMTOM_A_NCP_DLY2			(0x19A)
+#define TOMTOM_A_NCP_DLY2__POR				(0x06)
+#define TOMTOM_A_RX_AUX_SW_CTL			(0x19B)
+#define TOMTOM_A_RX_AUX_SW_CTL__POR				(0x00)
+#define TOMTOM_A_RX_PA_AUX_IN_CONN			(0x19C)
+#define TOMTOM_A_RX_PA_AUX_IN_CONN__POR				(0x00)
+#define TOMTOM_A_RX_COM_TIMER_DIV			(0x19E)
+#define TOMTOM_A_RX_COM_TIMER_DIV__POR				(0xE8)
+#define TOMTOM_A_RX_COM_OCP_CTL			(0x19F)
+#define TOMTOM_A_RX_COM_OCP_CTL__POR				(0x1F)
+#define TOMTOM_A_RX_COM_OCP_COUNT			(0x1A0)
+#define TOMTOM_A_RX_COM_OCP_COUNT__POR				(0x77)
+#define TOMTOM_A_RX_COM_DAC_CTL			(0x1A1)
+#define TOMTOM_A_RX_COM_DAC_CTL__POR				(0x00)
+#define TOMTOM_A_RX_COM_BIAS			(0x1A2)
+#define TOMTOM_A_RX_COM_BIAS__POR				(0x20)
+#define TOMTOM_A_RX_HPH_AUTO_CHOP			(0x1A4)
+#define TOMTOM_A_RX_HPH_AUTO_CHOP__POR				(0x38)
+#define TOMTOM_A_RX_HPH_CHOP_CTL			(0x1A5)
+#define TOMTOM_A_RX_HPH_CHOP_CTL__POR				(0xA4)
+#define TOMTOM_A_RX_HPH_BIAS_PA			(0x1A6)
+#define TOMTOM_A_RX_HPH_BIAS_PA__POR				(0x7A)
+#define TOMTOM_A_RX_HPH_BIAS_LDO			(0x1A7)
+#define TOMTOM_A_RX_HPH_BIAS_LDO__POR				(0x87)
+#define TOMTOM_A_RX_HPH_BIAS_CNP			(0x1A8)
+#define TOMTOM_A_RX_HPH_BIAS_CNP__POR				(0x8A)
+#define TOMTOM_A_RX_HPH_BIAS_WG_OCP			(0x1A9)
+#define TOMTOM_A_RX_HPH_BIAS_WG_OCP__POR				(0x2A)
+#define TOMTOM_A_RX_HPH_OCP_CTL			(0x1AA)
+#define TOMTOM_A_RX_HPH_OCP_CTL__POR				(0x69)
+#define TOMTOM_A_RX_HPH_CNP_EN			(0x1AB)
+#define TOMTOM_A_RX_HPH_CNP_EN__POR				(0x80)
+#define TOMTOM_A_RX_HPH_CNP_WG_CTL			(0x1AC)
+#define TOMTOM_A_RX_HPH_CNP_WG_CTL__POR				(0xDA)
+#define TOMTOM_A_RX_HPH_CNP_WG_TIME			(0x1AD)
+#define TOMTOM_A_RX_HPH_CNP_WG_TIME__POR				(0x15)
+#define TOMTOM_A_RX_HPH_L_GAIN			(0x1AE)
+#define TOMTOM_A_RX_HPH_L_GAIN__POR				(0xC0)
+#define TOMTOM_A_RX_HPH_L_TEST			(0x1AF)
+#define TOMTOM_A_RX_HPH_L_TEST__POR				(0x02)
+#define TOMTOM_A_RX_HPH_L_PA_CTL			(0x1B0)
+#define TOMTOM_A_RX_HPH_L_PA_CTL__POR				(0x42)
+#define TOMTOM_A_RX_HPH_L_DAC_CTL			(0x1B1)
+#define TOMTOM_A_RX_HPH_L_DAC_CTL__POR				(0x00)
+#define TOMTOM_A_RX_HPH_L_ATEST			(0x1B2)
+#define TOMTOM_A_RX_HPH_L_ATEST__POR				(0x00)
+#define TOMTOM_A_RX_HPH_L_STATUS			(0x1B3)
+#define TOMTOM_A_RX_HPH_L_STATUS__POR				(0x00)
+#define TOMTOM_A_RX_HPH_R_GAIN			(0x1B4)
+#define TOMTOM_A_RX_HPH_R_GAIN__POR				(0x00)
+#define TOMTOM_A_RX_HPH_R_TEST			(0x1B5)
+#define TOMTOM_A_RX_HPH_R_TEST__POR				(0x02)
+#define TOMTOM_A_RX_HPH_R_PA_CTL			(0x1B6)
+#define TOMTOM_A_RX_HPH_R_PA_CTL__POR				(0x42)
+#define TOMTOM_A_RX_HPH_R_DAC_CTL			(0x1B7)
+#define TOMTOM_A_RX_HPH_R_DAC_CTL__POR				(0x00)
+#define TOMTOM_A_RX_HPH_R_ATEST			(0x1B8)
+#define TOMTOM_A_RX_HPH_R_ATEST__POR				(0x00)
+#define TOMTOM_A_RX_HPH_R_STATUS			(0x1B9)
+#define TOMTOM_A_RX_HPH_R_STATUS__POR				(0x00)
+#define TOMTOM_A_RX_EAR_BIAS_PA			(0x1BA)
+#define TOMTOM_A_RX_EAR_BIAS_PA__POR				(0x76)
+#define TOMTOM_A_RX_EAR_BIAS_CMBUFF			(0x1BB)
+#define TOMTOM_A_RX_EAR_BIAS_CMBUFF__POR				(0xA0)
+#define TOMTOM_A_RX_EAR_EN			(0x1BC)
+#define TOMTOM_A_RX_EAR_EN__POR				(0x00)
+#define TOMTOM_A_RX_EAR_GAIN			(0x1BD)
+#define TOMTOM_A_RX_EAR_GAIN__POR				(0x02)
+#define TOMTOM_A_RX_EAR_CMBUFF			(0x1BE)
+#define TOMTOM_A_RX_EAR_CMBUFF__POR				(0x05)
+#define TOMTOM_A_RX_EAR_ICTL			(0x1BF)
+#define TOMTOM_A_RX_EAR_ICTL__POR				(0x40)
+#define TOMTOM_A_RX_EAR_CCOMP			(0x1C0)
+#define TOMTOM_A_RX_EAR_CCOMP__POR				(0x08)
+#define TOMTOM_A_RX_EAR_VCM			(0x1C1)
+#define TOMTOM_A_RX_EAR_VCM__POR				(0x03)
+#define TOMTOM_A_RX_EAR_CNP			(0x1C2)
+#define TOMTOM_A_RX_EAR_CNP__POR				(0xC0)
+#define TOMTOM_A_RX_EAR_DAC_CTL_ATEST			(0x1C3)
+#define TOMTOM_A_RX_EAR_DAC_CTL_ATEST__POR				(0x00)
+#define TOMTOM_A_RX_EAR_STATUS			(0x1C5)
+#define TOMTOM_A_RX_EAR_STATUS__POR				(0x04)
+#define TOMTOM_A_RX_LINE_BIAS_PA			(0x1C6)
+#define TOMTOM_A_RX_LINE_BIAS_PA__POR				(0x78)
+#define TOMTOM_A_RX_BUCK_BIAS1			(0x1C7)
+#define TOMTOM_A_RX_BUCK_BIAS1__POR				(0x42)
+#define TOMTOM_A_RX_BUCK_BIAS2			(0x1C8)
+#define TOMTOM_A_RX_BUCK_BIAS2__POR				(0x84)
+#define TOMTOM_A_RX_LINE_COM			(0x1C9)
+#define TOMTOM_A_RX_LINE_COM__POR				(0x80)
+#define TOMTOM_A_RX_LINE_CNP_EN			(0x1CA)
+#define TOMTOM_A_RX_LINE_CNP_EN__POR				(0x00)
+#define TOMTOM_A_RX_LINE_CNP_WG_CTL			(0x1CB)
+#define TOMTOM_A_RX_LINE_CNP_WG_CTL__POR				(0x00)
+#define TOMTOM_A_RX_LINE_CNP_WG_TIME			(0x1CC)
+#define TOMTOM_A_RX_LINE_CNP_WG_TIME__POR				(0x04)
+#define TOMTOM_A_RX_LINE_1_GAIN			(0x1CD)
+#define TOMTOM_A_RX_LINE_1_GAIN__POR				(0x00)
+#define TOMTOM_A_RX_LINE_1_TEST			(0x1CE)
+#define TOMTOM_A_RX_LINE_1_TEST__POR				(0x02)
+#define TOMTOM_A_RX_LINE_1_DAC_CTL			(0x1CF)
+#define TOMTOM_A_RX_LINE_1_DAC_CTL__POR				(0x00)
+#define TOMTOM_A_RX_LINE_1_STATUS			(0x1D0)
+#define TOMTOM_A_RX_LINE_1_STATUS__POR				(0x00)
+#define TOMTOM_A_RX_LINE_2_GAIN			(0x1D1)
+#define TOMTOM_A_RX_LINE_2_GAIN__POR				(0x00)
+#define TOMTOM_A_RX_LINE_2_TEST			(0x1D2)
+#define TOMTOM_A_RX_LINE_2_TEST__POR				(0x02)
+#define TOMTOM_A_RX_LINE_2_DAC_CTL			(0x1D3)
+#define TOMTOM_A_RX_LINE_2_DAC_CTL__POR				(0x00)
+#define TOMTOM_A_RX_LINE_2_STATUS			(0x1D4)
+#define TOMTOM_A_RX_LINE_2_STATUS__POR				(0x00)
+#define TOMTOM_A_RX_LINE_3_GAIN			(0x1D5)
+#define TOMTOM_A_RX_LINE_3_GAIN__POR				(0x00)
+#define TOMTOM_A_RX_LINE_3_TEST			(0x1D6)
+#define TOMTOM_A_RX_LINE_3_TEST__POR				(0x02)
+#define TOMTOM_A_RX_LINE_3_DAC_CTL			(0x1D7)
+#define TOMTOM_A_RX_LINE_3_DAC_CTL__POR				(0x00)
+#define TOMTOM_A_RX_LINE_3_STATUS			(0x1D8)
+#define TOMTOM_A_RX_LINE_3_STATUS__POR				(0x00)
+#define TOMTOM_A_RX_LINE_4_GAIN			(0x1D9)
+#define TOMTOM_A_RX_LINE_4_GAIN__POR				(0x00)
+#define TOMTOM_A_RX_LINE_4_TEST			(0x1DA)
+#define TOMTOM_A_RX_LINE_4_TEST__POR				(0x02)
+#define TOMTOM_A_RX_LINE_4_DAC_CTL			(0x1DB)
+#define TOMTOM_A_RX_LINE_4_DAC_CTL__POR				(0x00)
+#define TOMTOM_A_RX_LINE_4_STATUS			(0x1DC)
+#define TOMTOM_A_RX_LINE_4_STATUS__POR				(0x00)
+#define TOMTOM_A_RX_LINE_CNP_DBG			(0x1DD)
+#define TOMTOM_A_RX_LINE_CNP_DBG__POR				(0x00)
+#define TOMTOM_A_SPKR_DRV1_EN			(0x1DF)
+#define TOMTOM_A_SPKR_DRV1_EN__POR				(0x6F)
+#define TOMTOM_A_SPKR_DRV1_GAIN			(0x1E0)
+#define TOMTOM_A_SPKR_DRV1_GAIN__POR				(0x00)
+#define TOMTOM_A_SPKR_DRV1_DAC_CTL			(0x1E1)
+#define TOMTOM_A_SPKR_DRV1_DAC_CTL__POR				(0x04)
+#define TOMTOM_A_SPKR_DRV1_OCP_CTL			(0x1E2)
+#define TOMTOM_A_SPKR_DRV1_OCP_CTL__POR				(0x97)
+#define TOMTOM_A_SPKR_DRV1_CLIP_DET			(0x1E3)
+#define TOMTOM_A_SPKR_DRV1_CLIP_DET__POR				(0x01)
+#define TOMTOM_A_SPKR_DRV1_IEC			(0x1E4)
+#define TOMTOM_A_SPKR_DRV1_IEC__POR				(0x00)
+#define TOMTOM_A_SPKR_DRV1_DBG_DAC			(0x1E5)
+#define TOMTOM_A_SPKR_DRV1_DBG_DAC__POR				(0x05)
+#define TOMTOM_A_SPKR_DRV1_DBG_PA			(0x1E6)
+#define TOMTOM_A_SPKR_DRV1_DBG_PA__POR				(0x18)
+#define TOMTOM_A_SPKR_DRV1_DBG_PWRSTG			(0x1E7)
+#define TOMTOM_A_SPKR_DRV1_DBG_PWRSTG__POR				(0x00)
+#define TOMTOM_A_SPKR_DRV1_BIAS_LDO			(0x1E8)
+#define TOMTOM_A_SPKR_DRV1_BIAS_LDO__POR				(0x45)
+#define TOMTOM_A_SPKR_DRV1_BIAS_INT			(0x1E9)
+#define TOMTOM_A_SPKR_DRV1_BIAS_INT__POR				(0xA5)
+#define TOMTOM_A_SPKR_DRV1_BIAS_PA			(0x1EA)
+#define TOMTOM_A_SPKR_DRV1_BIAS_PA__POR				(0x55)
+#define TOMTOM_A_SPKR_DRV1_STATUS_OCP			(0x1EB)
+#define TOMTOM_A_SPKR_DRV1_STATUS_OCP__POR				(0x00)
+#define TOMTOM_A_SPKR_DRV1_STATUS_PA			(0x1EC)
+#define TOMTOM_A_SPKR_DRV1_STATUS_PA__POR				(0x00)
+#define TOMTOM_A_SPKR1_PROT_EN			(0x1ED)
+#define TOMTOM_A_SPKR1_PROT_EN__POR				(0x00)
+#define TOMTOM_A_SPKR1_PROT_ADC_TEST_EN			(0x1EE)
+#define TOMTOM_A_SPKR1_PROT_ADC_TEST_EN__POR				(0x44)
+#define TOMTOM_A_SPKR1_PROT_ATEST			(0x1EF)
+#define TOMTOM_A_SPKR1_PROT_ATEST__POR				(0x00)
+#define TOMTOM_A_SPKR1_PROT_LDO_CTRL			(0x1F0)
+#define TOMTOM_A_SPKR1_PROT_LDO_CTRL__POR				(0x00)
+#define TOMTOM_A_SPKR1_PROT_ISENSE_CTRL			(0x1F1)
+#define TOMTOM_A_SPKR1_PROT_ISENSE_CTRL__POR				(0x00)
+#define TOMTOM_A_SPKR1_PROT_VSENSE_CTRL			(0x1F2)
+#define TOMTOM_A_SPKR1_PROT_VSENSE_CTRL__POR				(0x00)
+#define TOMTOM_A_SPKR2_PROT_EN			(0x1F3)
+#define TOMTOM_A_SPKR2_PROT_EN__POR				(0x00)
+#define TOMTOM_A_SPKR2_PROT_ADC_TEST_EN			(0x1F4)
+#define TOMTOM_A_SPKR2_PROT_ADC_TEST_EN__POR				(0x44)
+#define TOMTOM_A_SPKR2_PROT_ATEST			(0x1F5)
+#define TOMTOM_A_SPKR2_PROT_ATEST__POR				(0x00)
+#define TOMTOM_A_SPKR2_PROT_LDO_CTRL			(0x1F6)
+#define TOMTOM_A_SPKR2_PROT_LDO_CTRL__POR				(0x00)
+#define TOMTOM_A_SPKR2_PROT_ISENSE_CTRL			(0x1F7)
+#define TOMTOM_A_SPKR2_PROT_ISENSE_CTRL__POR				(0x00)
+#define TOMTOM_A_SPKR2_PROT_VSENSE_CTRL			(0x1F8)
+#define TOMTOM_A_SPKR2_PROT_VSENSE_CTRL__POR				(0x00)
+#define TOMTOM_A_MBHC_HPH			(0x1FE)
+#define TOMTOM_A_MBHC_HPH__POR				(0x44)
+#define TOMTOM_A_CDC_ANC1_B1_CTL			(0x200)
+#define TOMTOM_A_CDC_ANC1_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_ANC2_B1_CTL			(0x280)
+#define TOMTOM_A_CDC_ANC2_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_ANC1_SHIFT			(0x201)
+#define TOMTOM_A_CDC_ANC1_SHIFT__POR				(0x00)
+#define TOMTOM_A_CDC_ANC2_SHIFT			(0x281)
+#define TOMTOM_A_CDC_ANC2_SHIFT__POR				(0x00)
+#define TOMTOM_A_CDC_ANC1_IIR_B1_CTL			(0x202)
+#define TOMTOM_A_CDC_ANC1_IIR_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_ANC2_IIR_B1_CTL			(0x282)
+#define TOMTOM_A_CDC_ANC2_IIR_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_ANC1_IIR_B2_CTL			(0x203)
+#define TOMTOM_A_CDC_ANC1_IIR_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_ANC2_IIR_B2_CTL			(0x283)
+#define TOMTOM_A_CDC_ANC2_IIR_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_ANC1_IIR_B3_CTL			(0x204)
+#define TOMTOM_A_CDC_ANC1_IIR_B3_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_ANC2_IIR_B3_CTL			(0x284)
+#define TOMTOM_A_CDC_ANC2_IIR_B3_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_ANC1_LPF_B1_CTL			(0x206)
+#define TOMTOM_A_CDC_ANC1_LPF_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_ANC2_LPF_B1_CTL			(0x286)
+#define TOMTOM_A_CDC_ANC2_LPF_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_ANC1_LPF_B2_CTL			(0x207)
+#define TOMTOM_A_CDC_ANC1_LPF_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_ANC2_LPF_B2_CTL			(0x287)
+#define TOMTOM_A_CDC_ANC2_LPF_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_ANC1_SPARE			(0x209)
+#define TOMTOM_A_CDC_ANC1_SPARE__POR				(0x00)
+#define TOMTOM_A_CDC_ANC2_SPARE			(0x289)
+#define TOMTOM_A_CDC_ANC2_SPARE__POR				(0x00)
+#define TOMTOM_A_CDC_ANC1_SMLPF_CTL			(0x20A)
+#define TOMTOM_A_CDC_ANC1_SMLPF_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_ANC2_SMLPF_CTL			(0x28A)
+#define TOMTOM_A_CDC_ANC2_SMLPF_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_ANC1_DCFLT_CTL			(0x20B)
+#define TOMTOM_A_CDC_ANC1_DCFLT_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_ANC2_DCFLT_CTL			(0x28B)
+#define TOMTOM_A_CDC_ANC2_DCFLT_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_ANC1_GAIN_CTL			(0x20C)
+#define TOMTOM_A_CDC_ANC1_GAIN_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_ANC2_GAIN_CTL			(0x28C)
+#define TOMTOM_A_CDC_ANC2_GAIN_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_ANC1_B2_CTL			(0x20D)
+#define TOMTOM_A_CDC_ANC1_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_ANC2_B2_CTL			(0x28D)
+#define TOMTOM_A_CDC_ANC2_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_TX1_VOL_CTL_TIMER			(0x220)
+#define TOMTOM_A_CDC_TX1_VOL_CTL_TIMER__POR				(0x00)
+#define TOMTOM_A_CDC_TX2_VOL_CTL_TIMER			(0x228)
+#define TOMTOM_A_CDC_TX2_VOL_CTL_TIMER__POR				(0x00)
+#define TOMTOM_A_CDC_TX3_VOL_CTL_TIMER			(0x230)
+#define TOMTOM_A_CDC_TX3_VOL_CTL_TIMER__POR				(0x00)
+#define TOMTOM_A_CDC_TX4_VOL_CTL_TIMER			(0x238)
+#define TOMTOM_A_CDC_TX4_VOL_CTL_TIMER__POR				(0x00)
+#define TOMTOM_A_CDC_TX5_VOL_CTL_TIMER			(0x240)
+#define TOMTOM_A_CDC_TX5_VOL_CTL_TIMER__POR				(0x00)
+#define TOMTOM_A_CDC_TX6_VOL_CTL_TIMER			(0x248)
+#define TOMTOM_A_CDC_TX6_VOL_CTL_TIMER__POR				(0x00)
+#define TOMTOM_A_CDC_TX7_VOL_CTL_TIMER			(0x250)
+#define TOMTOM_A_CDC_TX7_VOL_CTL_TIMER__POR				(0x00)
+#define TOMTOM_A_CDC_TX8_VOL_CTL_TIMER			(0x258)
+#define TOMTOM_A_CDC_TX8_VOL_CTL_TIMER__POR				(0x00)
+#define TOMTOM_A_CDC_TX9_VOL_CTL_TIMER			(0x260)
+#define TOMTOM_A_CDC_TX9_VOL_CTL_TIMER__POR				(0x00)
+#define TOMTOM_A_CDC_TX10_VOL_CTL_TIMER			(0x268)
+#define TOMTOM_A_CDC_TX10_VOL_CTL_TIMER__POR				(0x00)
+#define TOMTOM_A_CDC_TX1_VOL_CTL_GAIN			(0x221)
+#define TOMTOM_A_CDC_TX1_VOL_CTL_GAIN__POR				(0x00)
+#define TOMTOM_A_CDC_TX2_VOL_CTL_GAIN			(0x229)
+#define TOMTOM_A_CDC_TX2_VOL_CTL_GAIN__POR				(0x00)
+#define TOMTOM_A_CDC_TX3_VOL_CTL_GAIN			(0x231)
+#define TOMTOM_A_CDC_TX3_VOL_CTL_GAIN__POR				(0x00)
+#define TOMTOM_A_CDC_TX4_VOL_CTL_GAIN			(0x239)
+#define TOMTOM_A_CDC_TX4_VOL_CTL_GAIN__POR				(0x00)
+#define TOMTOM_A_CDC_TX5_VOL_CTL_GAIN			(0x241)
+#define TOMTOM_A_CDC_TX5_VOL_CTL_GAIN__POR				(0x00)
+#define TOMTOM_A_CDC_TX6_VOL_CTL_GAIN			(0x249)
+#define TOMTOM_A_CDC_TX6_VOL_CTL_GAIN__POR				(0x00)
+#define TOMTOM_A_CDC_TX7_VOL_CTL_GAIN			(0x251)
+#define TOMTOM_A_CDC_TX7_VOL_CTL_GAIN__POR				(0x00)
+#define TOMTOM_A_CDC_TX8_VOL_CTL_GAIN			(0x259)
+#define TOMTOM_A_CDC_TX8_VOL_CTL_GAIN__POR				(0x00)
+#define TOMTOM_A_CDC_TX9_VOL_CTL_GAIN			(0x261)
+#define TOMTOM_A_CDC_TX9_VOL_CTL_GAIN__POR				(0x00)
+#define TOMTOM_A_CDC_TX10_VOL_CTL_GAIN			(0x269)
+#define TOMTOM_A_CDC_TX10_VOL_CTL_GAIN__POR				(0x00)
+#define TOMTOM_A_CDC_TX1_VOL_CTL_CFG			(0x222)
+#define TOMTOM_A_CDC_TX1_VOL_CTL_CFG__POR				(0x00)
+#define TOMTOM_A_CDC_TX2_VOL_CTL_CFG			(0x22A)
+#define TOMTOM_A_CDC_TX2_VOL_CTL_CFG__POR				(0x00)
+#define TOMTOM_A_CDC_TX3_VOL_CTL_CFG			(0x232)
+#define TOMTOM_A_CDC_TX3_VOL_CTL_CFG__POR				(0x00)
+#define TOMTOM_A_CDC_TX4_VOL_CTL_CFG			(0x23A)
+#define TOMTOM_A_CDC_TX4_VOL_CTL_CFG__POR				(0x00)
+#define TOMTOM_A_CDC_TX5_VOL_CTL_CFG			(0x242)
+#define TOMTOM_A_CDC_TX5_VOL_CTL_CFG__POR				(0x00)
+#define TOMTOM_A_CDC_TX6_VOL_CTL_CFG			(0x24A)
+#define TOMTOM_A_CDC_TX6_VOL_CTL_CFG__POR				(0x00)
+#define TOMTOM_A_CDC_TX7_VOL_CTL_CFG			(0x252)
+#define TOMTOM_A_CDC_TX7_VOL_CTL_CFG__POR				(0x00)
+#define TOMTOM_A_CDC_TX8_VOL_CTL_CFG			(0x25A)
+#define TOMTOM_A_CDC_TX8_VOL_CTL_CFG__POR				(0x00)
+#define TOMTOM_A_CDC_TX9_VOL_CTL_CFG			(0x262)
+#define TOMTOM_A_CDC_TX9_VOL_CTL_CFG__POR				(0x00)
+#define TOMTOM_A_CDC_TX10_VOL_CTL_CFG			(0x26A)
+#define TOMTOM_A_CDC_TX10_VOL_CTL_CFG__POR				(0x00)
+#define TOMTOM_A_CDC_TX1_MUX_CTL			(0x223)
+#define TOMTOM_A_CDC_TX1_MUX_CTL__POR				(0x48)
+#define TOMTOM_A_CDC_TX2_MUX_CTL			(0x22B)
+#define TOMTOM_A_CDC_TX2_MUX_CTL__POR				(0x48)
+#define TOMTOM_A_CDC_TX3_MUX_CTL			(0x233)
+#define TOMTOM_A_CDC_TX3_MUX_CTL__POR				(0x48)
+#define TOMTOM_A_CDC_TX4_MUX_CTL			(0x23B)
+#define TOMTOM_A_CDC_TX4_MUX_CTL__POR				(0x48)
+#define TOMTOM_A_CDC_TX5_MUX_CTL			(0x243)
+#define TOMTOM_A_CDC_TX5_MUX_CTL__POR				(0x48)
+#define TOMTOM_A_CDC_TX6_MUX_CTL			(0x24B)
+#define TOMTOM_A_CDC_TX6_MUX_CTL__POR				(0x48)
+#define TOMTOM_A_CDC_TX7_MUX_CTL			(0x253)
+#define TOMTOM_A_CDC_TX7_MUX_CTL__POR				(0x48)
+#define TOMTOM_A_CDC_TX8_MUX_CTL			(0x25B)
+#define TOMTOM_A_CDC_TX8_MUX_CTL__POR				(0x48)
+#define TOMTOM_A_CDC_TX9_MUX_CTL			(0x263)
+#define TOMTOM_A_CDC_TX9_MUX_CTL__POR				(0x48)
+#define TOMTOM_A_CDC_TX10_MUX_CTL			(0x26B)
+#define TOMTOM_A_CDC_TX10_MUX_CTL__POR				(0x48)
+#define TOMTOM_A_CDC_TX1_CLK_FS_CTL			(0x224)
+#define TOMTOM_A_CDC_TX1_CLK_FS_CTL__POR				(0x03)
+#define TOMTOM_A_CDC_TX2_CLK_FS_CTL			(0x22C)
+#define TOMTOM_A_CDC_TX2_CLK_FS_CTL__POR				(0x03)
+#define TOMTOM_A_CDC_TX3_CLK_FS_CTL			(0x234)
+#define TOMTOM_A_CDC_TX3_CLK_FS_CTL__POR				(0x03)
+#define TOMTOM_A_CDC_TX4_CLK_FS_CTL			(0x23C)
+#define TOMTOM_A_CDC_TX4_CLK_FS_CTL__POR				(0x03)
+#define TOMTOM_A_CDC_TX5_CLK_FS_CTL			(0x244)
+#define TOMTOM_A_CDC_TX5_CLK_FS_CTL__POR				(0x03)
+#define TOMTOM_A_CDC_TX6_CLK_FS_CTL			(0x24C)
+#define TOMTOM_A_CDC_TX6_CLK_FS_CTL__POR				(0x03)
+#define TOMTOM_A_CDC_TX7_CLK_FS_CTL			(0x254)
+#define TOMTOM_A_CDC_TX7_CLK_FS_CTL__POR				(0x03)
+#define TOMTOM_A_CDC_TX8_CLK_FS_CTL			(0x25C)
+#define TOMTOM_A_CDC_TX8_CLK_FS_CTL__POR				(0x03)
+#define TOMTOM_A_CDC_TX9_CLK_FS_CTL			(0x264)
+#define TOMTOM_A_CDC_TX9_CLK_FS_CTL__POR				(0x03)
+#define TOMTOM_A_CDC_TX10_CLK_FS_CTL			(0x26C)
+#define TOMTOM_A_CDC_TX10_CLK_FS_CTL__POR				(0x03)
+#define TOMTOM_A_CDC_TX1_DMIC_CTL			(0x225)
+#define TOMTOM_A_CDC_TX1_DMIC_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_TX2_DMIC_CTL			(0x22D)
+#define TOMTOM_A_CDC_TX2_DMIC_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_TX3_DMIC_CTL			(0x235)
+#define TOMTOM_A_CDC_TX3_DMIC_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_TX4_DMIC_CTL			(0x23D)
+#define TOMTOM_A_CDC_TX4_DMIC_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_TX5_DMIC_CTL			(0x245)
+#define TOMTOM_A_CDC_TX5_DMIC_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_TX6_DMIC_CTL			(0x24D)
+#define TOMTOM_A_CDC_TX6_DMIC_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_TX7_DMIC_CTL			(0x255)
+#define TOMTOM_A_CDC_TX7_DMIC_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_TX8_DMIC_CTL			(0x25D)
+#define TOMTOM_A_CDC_TX8_DMIC_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_TX9_DMIC_CTL			(0x265)
+#define TOMTOM_A_CDC_TX9_DMIC_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_TX10_DMIC_CTL			(0x26D)
+#define TOMTOM_A_CDC_TX10_DMIC_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL0			(0x270)
+#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL0__POR				(0x00)
+#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL1			(0x271)
+#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL1__POR				(0x00)
+#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL2			(0x272)
+#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL2__POR				(0x00)
+#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL3			(0x273)
+#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL3__POR				(0x00)
+#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL4			(0x274)
+#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL4__POR				(0x00)
+#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL5			(0x275)
+#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL5__POR				(0x00)
+#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL6			(0x276)
+#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL6__POR				(0x00)
+#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL7			(0x277)
+#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL7__POR				(0x00)
+#define TOMTOM_A_CDC_DEBUG_B1_CTL			(0x278)
+#define TOMTOM_A_CDC_DEBUG_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_DEBUG_B2_CTL			(0x279)
+#define TOMTOM_A_CDC_DEBUG_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_DEBUG_B3_CTL			(0x27A)
+#define TOMTOM_A_CDC_DEBUG_B3_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_DEBUG_B4_CTL			(0x27B)
+#define TOMTOM_A_CDC_DEBUG_B4_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_DEBUG_B5_CTL			(0x27C)
+#define TOMTOM_A_CDC_DEBUG_B5_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_DEBUG_B6_CTL			(0x27D)
+#define TOMTOM_A_CDC_DEBUG_B6_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_DEBUG_B7_CTL			(0x27E)
+#define TOMTOM_A_CDC_DEBUG_B7_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_SRC1_PDA_CFG			(0x2A0)
+#define TOMTOM_A_CDC_SRC1_PDA_CFG__POR				(0x00)
+#define TOMTOM_A_CDC_SRC2_PDA_CFG			(0x2A8)
+#define TOMTOM_A_CDC_SRC2_PDA_CFG__POR				(0x00)
+#define TOMTOM_A_CDC_SRC1_FS_CTL			(0x2A1)
+#define TOMTOM_A_CDC_SRC1_FS_CTL__POR				(0x1B)
+#define TOMTOM_A_CDC_SRC2_FS_CTL			(0x2A9)
+#define TOMTOM_A_CDC_SRC2_FS_CTL__POR				(0x1B)
+#define TOMTOM_A_CDC_RX1_B1_CTL			(0x2B0)
+#define TOMTOM_A_CDC_RX1_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX2_B1_CTL			(0x2B8)
+#define TOMTOM_A_CDC_RX2_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX3_B1_CTL			(0x2C0)
+#define TOMTOM_A_CDC_RX3_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX4_B1_CTL			(0x2C8)
+#define TOMTOM_A_CDC_RX4_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX5_B1_CTL			(0x2D0)
+#define TOMTOM_A_CDC_RX5_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX6_B1_CTL			(0x2D8)
+#define TOMTOM_A_CDC_RX6_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX7_B1_CTL			(0x2E0)
+#define TOMTOM_A_CDC_RX7_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX1_B2_CTL			(0x2B1)
+#define TOMTOM_A_CDC_RX1_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX2_B2_CTL			(0x2B9)
+#define TOMTOM_A_CDC_RX2_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX3_B2_CTL			(0x2C1)
+#define TOMTOM_A_CDC_RX3_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX4_B2_CTL			(0x2C9)
+#define TOMTOM_A_CDC_RX4_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX5_B2_CTL			(0x2D1)
+#define TOMTOM_A_CDC_RX5_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX6_B2_CTL			(0x2D9)
+#define TOMTOM_A_CDC_RX6_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX7_B2_CTL			(0x2E1)
+#define TOMTOM_A_CDC_RX7_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX1_B3_CTL			(0x2B2)
+#define TOMTOM_A_CDC_RX1_B3_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX2_B3_CTL			(0x2BA)
+#define TOMTOM_A_CDC_RX2_B3_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX3_B3_CTL			(0x2C2)
+#define TOMTOM_A_CDC_RX3_B3_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX4_B3_CTL			(0x2CA)
+#define TOMTOM_A_CDC_RX4_B3_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX5_B3_CTL			(0x2D2)
+#define TOMTOM_A_CDC_RX5_B3_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX6_B3_CTL			(0x2DA)
+#define TOMTOM_A_CDC_RX6_B3_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX7_B3_CTL			(0x2E2)
+#define TOMTOM_A_CDC_RX7_B3_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX1_B4_CTL			(0x2B3)
+#define TOMTOM_A_CDC_RX1_B4_CTL__POR				(0x0B)
+#define TOMTOM_A_CDC_RX2_B4_CTL			(0x2BB)
+#define TOMTOM_A_CDC_RX2_B4_CTL__POR				(0x0B)
+#define TOMTOM_A_CDC_RX3_B4_CTL			(0x2C3)
+#define TOMTOM_A_CDC_RX3_B4_CTL__POR				(0x0B)
+#define TOMTOM_A_CDC_RX4_B4_CTL			(0x2CB)
+#define TOMTOM_A_CDC_RX4_B4_CTL__POR				(0x0B)
+#define TOMTOM_A_CDC_RX5_B4_CTL			(0x2D3)
+#define TOMTOM_A_CDC_RX5_B4_CTL__POR				(0x0B)
+#define TOMTOM_A_CDC_RX6_B4_CTL			(0x2DB)
+#define TOMTOM_A_CDC_RX6_B4_CTL__POR				(0x0B)
+#define TOMTOM_A_CDC_RX7_B4_CTL			(0x2E3)
+#define TOMTOM_A_CDC_RX7_B4_CTL__POR				(0x0B)
+#define TOMTOM_A_CDC_RX1_B5_CTL			(0x2B4)
+#define TOMTOM_A_CDC_RX1_B5_CTL__POR				(0x78)
+#define TOMTOM_A_CDC_RX2_B5_CTL			(0x2BC)
+#define TOMTOM_A_CDC_RX2_B5_CTL__POR				(0x78)
+#define TOMTOM_A_CDC_RX3_B5_CTL			(0x2C4)
+#define TOMTOM_A_CDC_RX3_B5_CTL__POR				(0x78)
+#define TOMTOM_A_CDC_RX4_B5_CTL			(0x2CC)
+#define TOMTOM_A_CDC_RX4_B5_CTL__POR				(0x78)
+#define TOMTOM_A_CDC_RX5_B5_CTL			(0x2D4)
+#define TOMTOM_A_CDC_RX5_B5_CTL__POR				(0x78)
+#define TOMTOM_A_CDC_RX6_B5_CTL			(0x2DC)
+#define TOMTOM_A_CDC_RX6_B5_CTL__POR				(0x78)
+#define TOMTOM_A_CDC_RX7_B5_CTL			(0x2E4)
+#define TOMTOM_A_CDC_RX7_B5_CTL__POR				(0x78)
+#define TOMTOM_A_CDC_RX1_B6_CTL			(0x2B5)
+#define TOMTOM_A_CDC_RX1_B6_CTL__POR				(0x80)
+#define TOMTOM_A_CDC_RX2_B6_CTL			(0x2BD)
+#define TOMTOM_A_CDC_RX2_B6_CTL__POR				(0x80)
+#define TOMTOM_A_CDC_RX3_B6_CTL			(0x2C5)
+#define TOMTOM_A_CDC_RX3_B6_CTL__POR				(0x80)
+#define TOMTOM_A_CDC_RX4_B6_CTL			(0x2CD)
+#define TOMTOM_A_CDC_RX4_B6_CTL__POR				(0x80)
+#define TOMTOM_A_CDC_RX5_B6_CTL			(0x2D5)
+#define TOMTOM_A_CDC_RX5_B6_CTL__POR				(0x80)
+#define TOMTOM_A_CDC_RX6_B6_CTL			(0x2DD)
+#define TOMTOM_A_CDC_RX6_B6_CTL__POR				(0x80)
+#define TOMTOM_A_CDC_RX7_B6_CTL			(0x2E5)
+#define TOMTOM_A_CDC_RX7_B6_CTL__POR				(0x80)
+#define TOMTOM_A_CDC_RX1_VOL_CTL_B1_CTL			(0x2B6)
+#define TOMTOM_A_CDC_RX1_VOL_CTL_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX2_VOL_CTL_B1_CTL			(0x2BE)
+#define TOMTOM_A_CDC_RX2_VOL_CTL_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX3_VOL_CTL_B1_CTL			(0x2C6)
+#define TOMTOM_A_CDC_RX3_VOL_CTL_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX4_VOL_CTL_B1_CTL			(0x2CE)
+#define TOMTOM_A_CDC_RX4_VOL_CTL_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX5_VOL_CTL_B1_CTL			(0x2D6)
+#define TOMTOM_A_CDC_RX5_VOL_CTL_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX6_VOL_CTL_B1_CTL			(0x2DE)
+#define TOMTOM_A_CDC_RX6_VOL_CTL_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX7_VOL_CTL_B1_CTL			(0x2E6)
+#define TOMTOM_A_CDC_RX7_VOL_CTL_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL			(0x2B7)
+#define TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL			(0x2BF)
+#define TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL			(0x2C7)
+#define TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL			(0x2CF)
+#define TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL			(0x2D7)
+#define TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL			(0x2DF)
+#define TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL			(0x2E7)
+#define TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_VBAT_CFG			(0x2E8)
+#define TOMTOM_A_CDC_VBAT_CFG__POR				(0x1A)
+#define TOMTOM_A_CDC_VBAT_ADC_CAL1			(0x2E9)
+#define TOMTOM_A_CDC_VBAT_ADC_CAL1__POR				(0x00)
+#define TOMTOM_A_CDC_VBAT_ADC_CAL2			(0x2EA)
+#define TOMTOM_A_CDC_VBAT_ADC_CAL2__POR				(0x00)
+#define TOMTOM_A_CDC_VBAT_ADC_CAL3			(0x2EB)
+#define TOMTOM_A_CDC_VBAT_ADC_CAL3__POR				(0x04)
+#define TOMTOM_A_CDC_VBAT_PK_EST1			(0x2EC)
+#define TOMTOM_A_CDC_VBAT_PK_EST1__POR				(0xE0)
+#define TOMTOM_A_CDC_VBAT_PK_EST2			(0x2ED)
+#define TOMTOM_A_CDC_VBAT_PK_EST2__POR				(0x01)
+#define TOMTOM_A_CDC_VBAT_PK_EST3			(0x2EE)
+#define TOMTOM_A_CDC_VBAT_PK_EST3__POR				(0x40)
+#define TOMTOM_A_CDC_VBAT_RF_PROC1			(0x2EF)
+#define TOMTOM_A_CDC_VBAT_RF_PROC1__POR				(0x2A)
+#define TOMTOM_A_CDC_VBAT_RF_PROC2			(0x2F0)
+#define TOMTOM_A_CDC_VBAT_RF_PROC2__POR				(0x86)
+#define TOMTOM_A_CDC_VBAT_TAC1			(0x2F1)
+#define TOMTOM_A_CDC_VBAT_TAC1__POR				(0x70)
+#define TOMTOM_A_CDC_VBAT_TAC2			(0x2F2)
+#define TOMTOM_A_CDC_VBAT_TAC2__POR				(0x18)
+#define TOMTOM_A_CDC_VBAT_TAC3			(0x2F3)
+#define TOMTOM_A_CDC_VBAT_TAC3__POR				(0x18)
+#define TOMTOM_A_CDC_VBAT_TAC4			(0x2F4)
+#define TOMTOM_A_CDC_VBAT_TAC4__POR				(0x03)
+#define TOMTOM_A_CDC_VBAT_GAIN_UPD1			(0x2F5)
+#define TOMTOM_A_CDC_VBAT_GAIN_UPD1__POR				(0x01)
+#define TOMTOM_A_CDC_VBAT_GAIN_UPD2			(0x2F6)
+#define TOMTOM_A_CDC_VBAT_GAIN_UPD2__POR				(0x00)
+#define TOMTOM_A_CDC_VBAT_GAIN_UPD3			(0x2F7)
+#define TOMTOM_A_CDC_VBAT_GAIN_UPD3__POR				(0x64)
+#define TOMTOM_A_CDC_VBAT_GAIN_UPD4			(0x2F8)
+#define TOMTOM_A_CDC_VBAT_GAIN_UPD4__POR				(0x01)
+#define TOMTOM_A_CDC_VBAT_DEBUG1			(0x2F9)
+#define TOMTOM_A_CDC_VBAT_DEBUG1__POR				(0x00)
+#define TOMTOM_A_CDC_VBAT_GAIN_UPD_MON			(0x2FA)
+#define TOMTOM_A_CDC_VBAT_GAIN_UPD_MON__POR				(0x00)
+#define TOMTOM_A_CDC_VBAT_GAIN_MON_VAL			(0x2FB)
+#define TOMTOM_A_CDC_VBAT_GAIN_MON_VAL__POR				(0x00)
+#define TOMTOM_A_CDC_CLK_ANC_RESET_CTL			(0x300)
+#define TOMTOM_A_CDC_CLK_ANC_RESET_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CLK_RX_RESET_CTL			(0x301)
+#define TOMTOM_A_CDC_CLK_RX_RESET_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CLK_TX_RESET_B1_CTL			(0x302)
+#define TOMTOM_A_CDC_CLK_TX_RESET_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CLK_TX_RESET_B2_CTL			(0x303)
+#define TOMTOM_A_CDC_CLK_TX_RESET_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CLK_RX_I2S_CTL			(0x306)
+#define TOMTOM_A_CDC_CLK_RX_I2S_CTL__POR				(0x03)
+#define TOMTOM_A_CDC_CLK_TX_I2S_CTL			(0x307)
+#define TOMTOM_A_CDC_CLK_TX_I2S_CTL__POR				(0x03)
+#define TOMTOM_A_CDC_CLK_OTHR_RESET_B1_CTL			(0x308)
+#define TOMTOM_A_CDC_CLK_OTHR_RESET_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL			(0x309)
+#define TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL			(0x30A)
+#define TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL			(0x30B)
+#define TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CLK_OTHR_CTL			(0x30C)
+#define TOMTOM_A_CDC_CLK_OTHR_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CLK_ANC_CLK_EN_CTL			(0x30E)
+#define TOMTOM_A_CDC_CLK_ANC_CLK_EN_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CLK_RX_B1_CTL			(0x30F)
+#define TOMTOM_A_CDC_CLK_RX_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CLK_RX_B2_CTL			(0x310)
+#define TOMTOM_A_CDC_CLK_RX_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CLK_MCLK_CTL			(0x311)
+#define TOMTOM_A_CDC_CLK_MCLK_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CLK_PDM_CTL			(0x312)
+#define TOMTOM_A_CDC_CLK_PDM_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CLK_SD_CTL			(0x313)
+#define TOMTOM_A_CDC_CLK_SD_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CLSH_B1_CTL			(0x320)
+#define TOMTOM_A_CDC_CLSH_B1_CTL__POR				(0xE4)
+#define TOMTOM_A_CDC_CLSH_B2_CTL			(0x321)
+#define TOMTOM_A_CDC_CLSH_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CLSH_B3_CTL			(0x322)
+#define TOMTOM_A_CDC_CLSH_B3_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CLSH_BUCK_NCP_VARS			(0x323)
+#define TOMTOM_A_CDC_CLSH_BUCK_NCP_VARS__POR				(0x00)
+#define TOMTOM_A_CDC_CLSH_IDLE_HPH_THSD			(0x324)
+#define TOMTOM_A_CDC_CLSH_IDLE_HPH_THSD__POR				(0x12)
+#define TOMTOM_A_CDC_CLSH_IDLE_EAR_THSD			(0x325)
+#define TOMTOM_A_CDC_CLSH_IDLE_EAR_THSD__POR				(0x0C)
+#define TOMTOM_A_CDC_CLSH_FCLKONLY_HPH_THSD			(0x326)
+#define TOMTOM_A_CDC_CLSH_FCLKONLY_HPH_THSD__POR			(0x18)
+#define TOMTOM_A_CDC_CLSH_FCLKONLY_EAR_THSD			(0x327)
+#define TOMTOM_A_CDC_CLSH_FCLKONLY_EAR_THSD__POR			(0x23)
+#define TOMTOM_A_CDC_CLSH_K_ADDR			(0x328)
+#define TOMTOM_A_CDC_CLSH_K_ADDR__POR				(0x00)
+#define TOMTOM_A_CDC_CLSH_K_DATA			(0x329)
+#define TOMTOM_A_CDC_CLSH_K_DATA__POR				(0xA4)
+#define TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_L			(0x32A)
+#define TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_L__POR				(0xD7)
+#define TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_U			(0x32B)
+#define TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_U__POR				(0x05)
+#define TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_L			(0x32C)
+#define TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_L__POR				(0x60)
+#define TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_U			(0x32D)
+#define TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_U__POR				(0x09)
+#define TOMTOM_A_CDC_CLSH_V_PA_HD_EAR			(0x32E)
+#define TOMTOM_A_CDC_CLSH_V_PA_HD_EAR__POR				(0x00)
+#define TOMTOM_A_CDC_CLSH_V_PA_HD_HPH			(0x32F)
+#define TOMTOM_A_CDC_CLSH_V_PA_HD_HPH__POR				(0x00)
+#define TOMTOM_A_CDC_CLSH_V_PA_MIN_EAR			(0x330)
+#define TOMTOM_A_CDC_CLSH_V_PA_MIN_EAR__POR				(0x00)
+#define TOMTOM_A_CDC_CLSH_V_PA_MIN_HPH			(0x331)
+#define TOMTOM_A_CDC_CLSH_V_PA_MIN_HPH__POR				(0x00)
+#define TOMTOM_A_CDC_IIR1_GAIN_B1_CTL			(0x340)
+#define TOMTOM_A_CDC_IIR1_GAIN_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_IIR2_GAIN_B1_CTL			(0x350)
+#define TOMTOM_A_CDC_IIR2_GAIN_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_IIR1_GAIN_B2_CTL			(0x341)
+#define TOMTOM_A_CDC_IIR1_GAIN_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_IIR2_GAIN_B2_CTL			(0x351)
+#define TOMTOM_A_CDC_IIR2_GAIN_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_IIR1_GAIN_B3_CTL			(0x342)
+#define TOMTOM_A_CDC_IIR1_GAIN_B3_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_IIR2_GAIN_B3_CTL			(0x352)
+#define TOMTOM_A_CDC_IIR2_GAIN_B3_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_IIR1_GAIN_B4_CTL			(0x343)
+#define TOMTOM_A_CDC_IIR1_GAIN_B4_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_IIR2_GAIN_B4_CTL			(0x353)
+#define TOMTOM_A_CDC_IIR2_GAIN_B4_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_IIR1_GAIN_B5_CTL			(0x344)
+#define TOMTOM_A_CDC_IIR1_GAIN_B5_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_IIR2_GAIN_B5_CTL			(0x354)
+#define TOMTOM_A_CDC_IIR2_GAIN_B5_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_IIR1_GAIN_B6_CTL			(0x345)
+#define TOMTOM_A_CDC_IIR1_GAIN_B6_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_IIR2_GAIN_B6_CTL			(0x355)
+#define TOMTOM_A_CDC_IIR2_GAIN_B6_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_IIR1_GAIN_B7_CTL			(0x346)
+#define TOMTOM_A_CDC_IIR1_GAIN_B7_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_IIR2_GAIN_B7_CTL			(0x356)
+#define TOMTOM_A_CDC_IIR2_GAIN_B7_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_IIR1_GAIN_B8_CTL			(0x347)
+#define TOMTOM_A_CDC_IIR1_GAIN_B8_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_IIR2_GAIN_B8_CTL			(0x357)
+#define TOMTOM_A_CDC_IIR2_GAIN_B8_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_IIR1_CTL			(0x348)
+#define TOMTOM_A_CDC_IIR1_CTL__POR				(0x40)
+#define TOMTOM_A_CDC_IIR2_CTL			(0x358)
+#define TOMTOM_A_CDC_IIR2_CTL__POR				(0x40)
+#define TOMTOM_A_CDC_IIR1_GAIN_TIMER_CTL			(0x349)
+#define TOMTOM_A_CDC_IIR1_GAIN_TIMER_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_IIR2_GAIN_TIMER_CTL			(0x359)
+#define TOMTOM_A_CDC_IIR2_GAIN_TIMER_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_IIR1_COEF_B1_CTL			(0x34A)
+#define TOMTOM_A_CDC_IIR1_COEF_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_IIR2_COEF_B1_CTL			(0x35A)
+#define TOMTOM_A_CDC_IIR2_COEF_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_IIR1_COEF_B2_CTL			(0x34B)
+#define TOMTOM_A_CDC_IIR1_COEF_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_IIR2_COEF_B2_CTL			(0x35B)
+#define TOMTOM_A_CDC_IIR2_COEF_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_TOP_GAIN_UPDATE			(0x360)
+#define TOMTOM_A_CDC_TOP_GAIN_UPDATE__POR				(0x00)
+#define TOMTOM_A_CDC_PA_RAMP_B1_CTL			(0x361)
+#define TOMTOM_A_CDC_PA_RAMP_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_PA_RAMP_B2_CTL			(0x362)
+#define TOMTOM_A_CDC_PA_RAMP_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_PA_RAMP_B3_CTL			(0x363)
+#define TOMTOM_A_CDC_PA_RAMP_B3_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_PA_RAMP_B4_CTL			(0x364)
+#define TOMTOM_A_CDC_PA_RAMP_B4_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_SPKR_CLIPDET_B1_CTL			(0x365)
+#define TOMTOM_A_CDC_SPKR_CLIPDET_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_SPKR2_CLIPDET_B1_CTL			(0x366)
+#define TOMTOM_A_CDC_SPKR2_CLIPDET_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_COMP0_B1_CTL			(0x368)
+#define TOMTOM_A_CDC_COMP0_B1_CTL__POR				(0x30)
+#define TOMTOM_A_CDC_COMP1_B1_CTL			(0x370)
+#define TOMTOM_A_CDC_COMP1_B1_CTL__POR				(0x30)
+#define TOMTOM_A_CDC_COMP2_B1_CTL			(0x378)
+#define TOMTOM_A_CDC_COMP2_B1_CTL__POR				(0x30)
+#define TOMTOM_A_CDC_COMP0_B2_CTL			(0x369)
+#define TOMTOM_A_CDC_COMP0_B2_CTL__POR				(0xB5)
+#define TOMTOM_A_CDC_COMP1_B2_CTL			(0x371)
+#define TOMTOM_A_CDC_COMP1_B2_CTL__POR				(0xB5)
+#define TOMTOM_A_CDC_COMP2_B2_CTL			(0x379)
+#define TOMTOM_A_CDC_COMP2_B2_CTL__POR				(0xB5)
+#define TOMTOM_A_CDC_COMP0_B3_CTL			(0x36A)
+#define TOMTOM_A_CDC_COMP0_B3_CTL__POR				(0x28)
+#define TOMTOM_A_CDC_COMP1_B3_CTL			(0x372)
+#define TOMTOM_A_CDC_COMP1_B3_CTL__POR				(0x28)
+#define TOMTOM_A_CDC_COMP2_B3_CTL			(0x37A)
+#define TOMTOM_A_CDC_COMP2_B3_CTL__POR				(0x28)
+#define TOMTOM_A_CDC_COMP0_B4_CTL			(0x36B)
+#define TOMTOM_A_CDC_COMP0_B4_CTL__POR				(0x37)
+#define TOMTOM_A_CDC_COMP1_B4_CTL			(0x373)
+#define TOMTOM_A_CDC_COMP1_B4_CTL__POR				(0x37)
+#define TOMTOM_A_CDC_COMP2_B4_CTL			(0x37B)
+#define TOMTOM_A_CDC_COMP2_B4_CTL__POR				(0x37)
+#define TOMTOM_A_CDC_COMP0_B5_CTL			(0x36C)
+#define TOMTOM_A_CDC_COMP0_B5_CTL__POR				(0x7F)
+#define TOMTOM_A_CDC_COMP1_B5_CTL			(0x374)
+#define TOMTOM_A_CDC_COMP1_B5_CTL__POR				(0x7F)
+#define TOMTOM_A_CDC_COMP2_B5_CTL			(0x37C)
+#define TOMTOM_A_CDC_COMP2_B5_CTL__POR				(0x7F)
+#define TOMTOM_A_CDC_COMP0_B6_CTL			(0x36D)
+#define TOMTOM_A_CDC_COMP0_B6_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_COMP1_B6_CTL			(0x375)
+#define TOMTOM_A_CDC_COMP1_B6_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_COMP2_B6_CTL			(0x37D)
+#define TOMTOM_A_CDC_COMP2_B6_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_COMP0_SHUT_DOWN_STATUS			(0x36E)
+#define TOMTOM_A_CDC_COMP0_SHUT_DOWN_STATUS__POR			(0x03)
+#define TOMTOM_A_CDC_COMP1_SHUT_DOWN_STATUS			(0x376)
+#define TOMTOM_A_CDC_COMP1_SHUT_DOWN_STATUS__POR			(0x03)
+#define TOMTOM_A_CDC_COMP2_SHUT_DOWN_STATUS			(0x37E)
+#define TOMTOM_A_CDC_COMP2_SHUT_DOWN_STATUS__POR			(0x03)
+#define TOMTOM_A_CDC_COMP0_FS_CFG			(0x36F)
+#define TOMTOM_A_CDC_COMP0_FS_CFG__POR				(0x03)
+#define TOMTOM_A_CDC_COMP1_FS_CFG			(0x377)
+#define TOMTOM_A_CDC_COMP1_FS_CFG__POR				(0x03)
+#define TOMTOM_A_CDC_COMP2_FS_CFG			(0x37F)
+#define TOMTOM_A_CDC_COMP2_FS_CFG__POR				(0x03)
+#define TOMTOM_A_CDC_CONN_RX1_B1_CTL			(0x380)
+#define TOMTOM_A_CDC_CONN_RX1_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_RX1_B2_CTL			(0x381)
+#define TOMTOM_A_CDC_CONN_RX1_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_RX1_B3_CTL			(0x382)
+#define TOMTOM_A_CDC_CONN_RX1_B3_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_RX2_B1_CTL			(0x383)
+#define TOMTOM_A_CDC_CONN_RX2_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_RX2_B2_CTL			(0x384)
+#define TOMTOM_A_CDC_CONN_RX2_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_RX2_B3_CTL			(0x385)
+#define TOMTOM_A_CDC_CONN_RX2_B3_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_RX3_B1_CTL			(0x386)
+#define TOMTOM_A_CDC_CONN_RX3_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_RX3_B2_CTL			(0x387)
+#define TOMTOM_A_CDC_CONN_RX3_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_RX4_B1_CTL			(0x388)
+#define TOMTOM_A_CDC_CONN_RX4_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_RX4_B2_CTL			(0x389)
+#define TOMTOM_A_CDC_CONN_RX4_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_RX5_B1_CTL			(0x38A)
+#define TOMTOM_A_CDC_CONN_RX5_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_RX5_B2_CTL			(0x38B)
+#define TOMTOM_A_CDC_CONN_RX5_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_RX6_B1_CTL			(0x38C)
+#define TOMTOM_A_CDC_CONN_RX6_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_RX6_B2_CTL			(0x38D)
+#define TOMTOM_A_CDC_CONN_RX6_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_RX7_B1_CTL			(0x38E)
+#define TOMTOM_A_CDC_CONN_RX7_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_RX7_B2_CTL			(0x38F)
+#define TOMTOM_A_CDC_CONN_RX7_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_RX7_B3_CTL			(0x390)
+#define TOMTOM_A_CDC_CONN_RX7_B3_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_ANC_B1_CTL			(0x391)
+#define TOMTOM_A_CDC_CONN_ANC_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_ANC_B2_CTL			(0x392)
+#define TOMTOM_A_CDC_CONN_ANC_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_TX_B1_CTL			(0x393)
+#define TOMTOM_A_CDC_CONN_TX_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_TX_B2_CTL			(0x394)
+#define TOMTOM_A_CDC_CONN_TX_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_TX_B3_CTL			(0x395)
+#define TOMTOM_A_CDC_CONN_TX_B3_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_TX_B4_CTL			(0x396)
+#define TOMTOM_A_CDC_CONN_TX_B4_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_EQ1_B1_CTL			(0x397)
+#define TOMTOM_A_CDC_CONN_EQ1_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_EQ1_B2_CTL			(0x398)
+#define TOMTOM_A_CDC_CONN_EQ1_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_EQ1_B3_CTL			(0x399)
+#define TOMTOM_A_CDC_CONN_EQ1_B3_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_EQ1_B4_CTL			(0x39A)
+#define TOMTOM_A_CDC_CONN_EQ1_B4_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_EQ2_B1_CTL			(0x39B)
+#define TOMTOM_A_CDC_CONN_EQ2_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_EQ2_B2_CTL			(0x39C)
+#define TOMTOM_A_CDC_CONN_EQ2_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_EQ2_B3_CTL			(0x39D)
+#define TOMTOM_A_CDC_CONN_EQ2_B3_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_EQ2_B4_CTL			(0x39E)
+#define TOMTOM_A_CDC_CONN_EQ2_B4_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_SRC1_B1_CTL			(0x39F)
+#define TOMTOM_A_CDC_CONN_SRC1_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_SRC1_B2_CTL			(0x3A0)
+#define TOMTOM_A_CDC_CONN_SRC1_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_SRC2_B1_CTL			(0x3A1)
+#define TOMTOM_A_CDC_CONN_SRC2_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_SRC2_B2_CTL			(0x3A2)
+#define TOMTOM_A_CDC_CONN_SRC2_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_TX_SB_B1_CTL			(0x3A3)
+#define TOMTOM_A_CDC_CONN_TX_SB_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_TX_SB_B2_CTL			(0x3A4)
+#define TOMTOM_A_CDC_CONN_TX_SB_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_TX_SB_B3_CTL			(0x3A5)
+#define TOMTOM_A_CDC_CONN_TX_SB_B3_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_TX_SB_B4_CTL			(0x3A6)
+#define TOMTOM_A_CDC_CONN_TX_SB_B4_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_TX_SB_B5_CTL			(0x3A7)
+#define TOMTOM_A_CDC_CONN_TX_SB_B5_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_TX_SB_B6_CTL			(0x3A8)
+#define TOMTOM_A_CDC_CONN_TX_SB_B6_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_TX_SB_B7_CTL			(0x3A9)
+#define TOMTOM_A_CDC_CONN_TX_SB_B7_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_TX_SB_B8_CTL			(0x3AA)
+#define TOMTOM_A_CDC_CONN_TX_SB_B8_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_TX_SB_B9_CTL			(0x3AB)
+#define TOMTOM_A_CDC_CONN_TX_SB_B9_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_TX_SB_B10_CTL			(0x3AC)
+#define TOMTOM_A_CDC_CONN_TX_SB_B10_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_TX_SB_B11_CTL			(0x3AD)
+#define TOMTOM_A_CDC_CONN_TX_SB_B11_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_RX_SB_B1_CTL			(0x3AE)
+#define TOMTOM_A_CDC_CONN_RX_SB_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_RX_SB_B2_CTL			(0x3AF)
+#define TOMTOM_A_CDC_CONN_RX_SB_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_CLSH_CTL			(0x3B0)
+#define TOMTOM_A_CDC_CONN_CLSH_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CONN_MISC			(0x3B1)
+#define TOMTOM_A_CDC_CONN_MISC__POR				(0x01)
+#define TOMTOM_A_CDC_CONN_RX8_B1_CTL			(0x3B3)
+#define TOMTOM_A_CDC_CONN_RX8_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_B1_CTL		(0x3B4)
+#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_B1_CTL__POR				(0x81)
+#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_CLIP_LEVEL_ADJUST	(0x3B5)
+#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_CLIP_LEVEL_ADJUST__POR		(0x00)
+#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_MIN_CLIP_THRESHOLD	(0x3B6)
+#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_MIN_CLIP_THRESHOLD__POR		(0xFF)
+#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_THRESHOLD_STATUS	(0x3B7)
+#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_THRESHOLD_STATUS__POR		(0x00)
+#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_SAMPLE_MARK		(0x3B8)
+#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_SAMPLE_MARK__POR			(0x04)
+#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_BOOST_GATING		(0x3B9)
+#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_BOOST_GATING__POR			(0x04)
+#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_B1_CTL		(0x3BA)
+#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_B1_CTL__POR				(0x81)
+#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_CLIP_LEVEL_ADJUST	(0x3BB)
+#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_CLIP_LEVEL_ADJUST__POR		(0x00)
+#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_MIN_CLIP_THRESHOLD	(0x3BC)
+#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_MIN_CLIP_THRESHOLD__POR		(0xFF)
+#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_THRESHOLD_STATUS	(0x3BD)
+#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_THRESHOLD_STATUS__POR		(0x00)
+#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_SAMPLE_MARK		(0x3BE)
+#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_SAMPLE_MARK__POR			(0x04)
+#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_BOOST_GATING	(0x3BF)
+#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_BOOST_GATING__POR			(0x04)
+#define TOMTOM_A_CDC_MBHC_EN_CTL			(0x3C0)
+#define TOMTOM_A_CDC_MBHC_EN_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_MBHC_FIR_B1_CFG			(0x3C1)
+#define TOMTOM_A_CDC_MBHC_FIR_B1_CFG__POR				(0x00)
+#define TOMTOM_A_CDC_MBHC_FIR_B2_CFG			(0x3C2)
+#define TOMTOM_A_CDC_MBHC_FIR_B2_CFG__POR				(0x06)
+#define TOMTOM_A_CDC_MBHC_TIMER_B1_CTL			(0x3C3)
+#define TOMTOM_A_CDC_MBHC_TIMER_B1_CTL__POR				(0x03)
+#define TOMTOM_A_CDC_MBHC_TIMER_B2_CTL			(0x3C4)
+#define TOMTOM_A_CDC_MBHC_TIMER_B2_CTL__POR				(0x09)
+#define TOMTOM_A_CDC_MBHC_TIMER_B3_CTL			(0x3C5)
+#define TOMTOM_A_CDC_MBHC_TIMER_B3_CTL__POR				(0x1E)
+#define TOMTOM_A_CDC_MBHC_TIMER_B4_CTL			(0x3C6)
+#define TOMTOM_A_CDC_MBHC_TIMER_B4_CTL__POR				(0x45)
+#define TOMTOM_A_CDC_MBHC_TIMER_B5_CTL			(0x3C7)
+#define TOMTOM_A_CDC_MBHC_TIMER_B5_CTL__POR				(0x04)
+#define TOMTOM_A_CDC_MBHC_TIMER_B6_CTL			(0x3C8)
+#define TOMTOM_A_CDC_MBHC_TIMER_B6_CTL__POR				(0x78)
+#define TOMTOM_A_CDC_MBHC_B1_STATUS			(0x3C9)
+#define TOMTOM_A_CDC_MBHC_B1_STATUS__POR				(0x00)
+#define TOMTOM_A_CDC_MBHC_B2_STATUS			(0x3CA)
+#define TOMTOM_A_CDC_MBHC_B2_STATUS__POR				(0x00)
+#define TOMTOM_A_CDC_MBHC_B3_STATUS			(0x3CB)
+#define TOMTOM_A_CDC_MBHC_B3_STATUS__POR				(0x00)
+#define TOMTOM_A_CDC_MBHC_B4_STATUS			(0x3CC)
+#define TOMTOM_A_CDC_MBHC_B4_STATUS__POR				(0x00)
+#define TOMTOM_A_CDC_MBHC_B5_STATUS			(0x3CD)
+#define TOMTOM_A_CDC_MBHC_B5_STATUS__POR				(0x00)
+#define TOMTOM_A_CDC_MBHC_B1_CTL			(0x3CE)
+#define TOMTOM_A_CDC_MBHC_B1_CTL__POR				(0xC0)
+#define TOMTOM_A_CDC_MBHC_B2_CTL			(0x3CF)
+#define TOMTOM_A_CDC_MBHC_B2_CTL__POR				(0x5D)
+#define TOMTOM_A_CDC_MBHC_VOLT_B1_CTL			(0x3D0)
+#define TOMTOM_A_CDC_MBHC_VOLT_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_MBHC_VOLT_B2_CTL			(0x3D1)
+#define TOMTOM_A_CDC_MBHC_VOLT_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_MBHC_VOLT_B3_CTL			(0x3D2)
+#define TOMTOM_A_CDC_MBHC_VOLT_B3_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_MBHC_VOLT_B4_CTL			(0x3D3)
+#define TOMTOM_A_CDC_MBHC_VOLT_B4_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_MBHC_VOLT_B5_CTL			(0x3D4)
+#define TOMTOM_A_CDC_MBHC_VOLT_B5_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_MBHC_VOLT_B6_CTL			(0x3D5)
+#define TOMTOM_A_CDC_MBHC_VOLT_B6_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_MBHC_VOLT_B7_CTL			(0x3D6)
+#define TOMTOM_A_CDC_MBHC_VOLT_B7_CTL__POR				(0xFF)
+#define TOMTOM_A_CDC_MBHC_VOLT_B8_CTL			(0x3D7)
+#define TOMTOM_A_CDC_MBHC_VOLT_B8_CTL__POR				(0x07)
+#define TOMTOM_A_CDC_MBHC_VOLT_B9_CTL			(0x3D8)
+#define TOMTOM_A_CDC_MBHC_VOLT_B9_CTL__POR				(0xFF)
+#define TOMTOM_A_CDC_MBHC_VOLT_B10_CTL			(0x3D9)
+#define TOMTOM_A_CDC_MBHC_VOLT_B10_CTL__POR				(0x7F)
+#define TOMTOM_A_CDC_MBHC_VOLT_B11_CTL			(0x3DA)
+#define TOMTOM_A_CDC_MBHC_VOLT_B11_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_MBHC_VOLT_B12_CTL			(0x3DB)
+#define TOMTOM_A_CDC_MBHC_VOLT_B12_CTL__POR				(0x80)
+#define TOMTOM_A_CDC_MBHC_CLK_CTL			(0x3DC)
+#define TOMTOM_A_CDC_MBHC_CLK_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_MBHC_INT_CTL			(0x3DD)
+#define TOMTOM_A_CDC_MBHC_INT_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_MBHC_DEBUG_CTL			(0x3DE)
+#define TOMTOM_A_CDC_MBHC_DEBUG_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_MBHC_SPARE			(0x3DF)
+#define TOMTOM_A_CDC_MBHC_SPARE__POR				(0x00)
+#define TOMTOM_A_CDC_RX8_B1_CTL			(0x3E0)
+#define TOMTOM_A_CDC_RX8_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX8_B2_CTL			(0x3E1)
+#define TOMTOM_A_CDC_RX8_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX8_B3_CTL			(0x3E2)
+#define TOMTOM_A_CDC_RX8_B3_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX8_B4_CTL			(0x3E3)
+#define TOMTOM_A_CDC_RX8_B4_CTL__POR				(0x0B)
+#define TOMTOM_A_CDC_RX8_B5_CTL			(0x3E4)
+#define TOMTOM_A_CDC_RX8_B5_CTL__POR				(0x78)
+#define TOMTOM_A_CDC_RX8_B6_CTL			(0x3E5)
+#define TOMTOM_A_CDC_RX8_B6_CTL__POR				(0x80)
+#define TOMTOM_A_CDC_RX8_VOL_CTL_B1_CTL			(0x3E6)
+#define TOMTOM_A_CDC_RX8_VOL_CTL_B1_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL			(0x3E7)
+#define TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL0			(0x3E8)
+#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL0__POR				(0x00)
+#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL1			(0x3E9)
+#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL1__POR				(0x00)
+#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL2			(0x3EA)
+#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL2__POR				(0x00)
+#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL3			(0x3EB)
+#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL3__POR				(0x00)
+#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL4			(0x3EC)
+#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL4__POR				(0x00)
+#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL5			(0x3ED)
+#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL5__POR				(0x00)
+#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL6			(0x3EE)
+#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL6__POR				(0x00)
+#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL7			(0x3EF)
+#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL7__POR				(0x00)
+#define TOMTOM_A_CDC_BOOST_MODE_CTL			(0x3F0)
+#define TOMTOM_A_CDC_BOOST_MODE_CTL__POR				(0x00)
+#define TOMTOM_A_CDC_BOOST_THRESHOLD			(0x3F1)
+#define TOMTOM_A_CDC_BOOST_THRESHOLD__POR				(0x02)
+#define TOMTOM_A_CDC_BOOST_TAP_SEL			(0x3F2)
+#define TOMTOM_A_CDC_BOOST_TAP_SEL__POR				(0x00)
+#define TOMTOM_A_CDC_BOOST_HOLD_TIME			(0x3F3)
+#define TOMTOM_A_CDC_BOOST_HOLD_TIME__POR				(0x02)
+#define TOMTOM_A_CDC_BOOST_TRGR_EN			(0x3F4)
+#define TOMTOM_A_CDC_BOOST_TRGR_EN__POR				(0x00)
+
+/* SLIMBUS Slave Registers */
+#define TOMTOM_SLIM_PGD_PORT_INT_EN0                     (0x30)
+#define TOMTOM_SLIM_PGD_PORT_INT_STATUS_RX_0             (0x34)
+#define TOMTOM_SLIM_PGD_PORT_INT_STATUS_RX_1             (0x35)
+#define TOMTOM_SLIM_PGD_PORT_INT_STATUS_TX_0             (0x36)
+#define TOMTOM_SLIM_PGD_PORT_INT_STATUS_TX_1             (0x37)
+#define TOMTOM_SLIM_PGD_PORT_INT_CLR_RX_0                (0x38)
+#define TOMTOM_SLIM_PGD_PORT_INT_CLR_RX_1                (0x39)
+#define TOMTOM_SLIM_PGD_PORT_INT_CLR_TX_0                (0x3A)
+#define TOMTOM_SLIM_PGD_PORT_INT_CLR_TX_1                (0x3B)
+#define TOMTOM_SLIM_PGD_PORT_INT_RX_SOURCE0		(0x60)
+#define TOMTOM_SLIM_PGD_PORT_INT_TX_SOURCE0		(0x70)
+
+/* Macros for Packing Register Writes into a U32 */
+#define TOMTOM_PACKED_REG_SIZE sizeof(u32)
+
+#define TOMTOM_CODEC_PACK_ENTRY(reg, mask, val) ((val & 0xff)|\
+	((mask & 0xff) << 8)|((reg & 0xffff) << 16))
+#define TOMTOM_CODEC_UNPACK_ENTRY(packed, reg, mask, val) \
+	do { \
+		((reg) = ((packed >> 16) & (0xffff))); \
+		((mask) = ((packed >> 8) & (0xff))); \
+		((val) = ((packed) & (0xff))); \
+	} while (0)
+
+#define TOMTOM_SB_PGD_PORT_TX_BASE    0x50
+#define TOMTOM_SB_PGD_PORT_RX_BASE    0x40
+#define WCD9330_MAX_REGISTER 0x3FF
+extern const u8 tomtom_reg_readable[WCD9330_MAX_REGISTER + 1];
+#endif
diff -Nruw linux-4.4.115-fbx/include/linux/mfd/wcd9xxx./wcd9xxx-irq.h linux-4.4.115-fbx/include/linux/mfd/wcd9xxx/wcd9xxx-irq.h
--- linux-4.4.115-fbx/include/linux/mfd/wcd9xxx./wcd9xxx-irq.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/mfd/wcd9xxx/wcd9xxx-irq.h	2019-01-22 16:16:28.299289837 +0100
@@ -0,0 +1,32 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/mfd/wcd9xxx/core.h>
+
+#ifndef __MFD_WCD9XXX_IRQ_H
+#define __MFD_WCD9XXX_IRQ_H
+bool wcd9xxx_lock_sleep(struct wcd9xxx_core_resource *);
+void wcd9xxx_unlock_sleep(struct wcd9xxx_core_resource *);
+void wcd9xxx_nested_irq_lock(struct wcd9xxx_core_resource *);
+void wcd9xxx_nested_irq_unlock(struct wcd9xxx_core_resource *);
+int wcd9xxx_request_irq(struct wcd9xxx_core_resource *, int,
+			irq_handler_t, const char *, void *);
+
+void wcd9xxx_free_irq(struct wcd9xxx_core_resource *, int, void*);
+void wcd9xxx_enable_irq(struct wcd9xxx_core_resource *, int);
+void wcd9xxx_disable_irq(struct wcd9xxx_core_resource *, int);
+void wcd9xxx_disable_irq_sync(struct wcd9xxx_core_resource *, int);
+
+int wcd9xxx_irq_init(struct wcd9xxx_core_resource *);
+void wcd9xxx_irq_exit(struct wcd9xxx_core_resource *);
+#endif
diff -Nruw linux-4.4.115-fbx/include/linux/mfd/wcd9xxx./wcd9xxx-slimslave.h linux-4.4.115-fbx/include/linux/mfd/wcd9xxx/wcd9xxx-slimslave.h
--- linux-4.4.115-fbx/include/linux/mfd/wcd9xxx./wcd9xxx-slimslave.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/mfd/wcd9xxx/wcd9xxx-slimslave.h	2019-01-22 16:16:28.299289837 +0100
@@ -0,0 +1,119 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __WCD9310_SLIMSLAVE_H_
+#define __WCD9310_SLIMSLAVE_H_
+
+#include <linux/slimbus/slimbus.h>
+#include <linux/mfd/wcd9xxx/core.h>
+
+
+/*
+ *  client is expected to give port ids in the range of
+ *  1-10 for pre Taiko Tx ports and 1-16 for Taiko
+ *  1-7 for pre Taiko Rx ports and 1-16 for Tako,
+ *  we need to add offset for getting the absolute slave
+ *  port id before configuring the HW
+ */
+#define TABLA_SB_PGD_MAX_NUMBER_OF_TX_SLAVE_DEV_PORTS 10
+#define TAIKO_SB_PGD_MAX_NUMBER_OF_TX_SLAVE_DEV_PORTS 16
+
+#define SLIM_MAX_TX_PORTS TAIKO_SB_PGD_MAX_NUMBER_OF_TX_SLAVE_DEV_PORTS
+
+#define TABLA_SB_PGD_OFFSET_OF_RX_SLAVE_DEV_PORTS \
+	TABLA_SB_PGD_MAX_NUMBER_OF_TX_SLAVE_DEV_PORTS
+#define TAIKO_SB_PGD_OFFSET_OF_RX_SLAVE_DEV_PORTS \
+	TAIKO_SB_PGD_MAX_NUMBER_OF_TX_SLAVE_DEV_PORTS
+
+#define TABLA_SB_PGD_MAX_NUMBER_OF_RX_SLAVE_DEV_PORTS 7
+#define TAIKO_SB_PGD_MAX_NUMBER_OF_RX_SLAVE_DEV_PORTS 13
+
+#define SLIM_MAX_RX_PORTS TAIKO_SB_PGD_MAX_NUMBER_OF_RX_SLAVE_DEV_PORTS
+
+#define SLIM_MAX_REG_ADDR (0x180 + 4 * (SLIM_MAX_RX_PORTS))
+
+#define TABLA_SB_PGD_RX_PORT_MULTI_CHANNEL_0_START_PORT_ID \
+	TABLA_SB_PGD_OFFSET_OF_RX_SLAVE_DEV_PORTS
+#define TAIKO_SB_PGD_RX_PORT_MULTI_CHANNEL_0_START_PORT_ID \
+	TAIKO_SB_PGD_OFFSET_OF_RX_SLAVE_DEV_PORTS
+
+#define TABLA_SB_PGD_RX_PORT_MULTI_CHANNEL_0_END_PORT_ID 16
+#define TAIKO_SB_PGD_RX_PORT_MULTI_CHANNEL_0_END_PORT_ID 31
+
+#define TABLA_SB_PGD_TX_PORT_MULTI_CHANNEL_1_END_PORT_ID 9
+#define TAIKO_SB_PGD_TX_PORT_MULTI_CHANNEL_1_END_PORT_ID 15
+
+/* below details are taken from SLIMBUS slave SWI */
+#define SB_PGD_PORT_BASE 0x000
+
+#define SB_PGD_PORT_CFG_BYTE_ADDR(offset, port_num) \
+		(SB_PGD_PORT_BASE + offset + (1 * port_num))
+
+#define SB_PGD_TX_PORT_MULTI_CHANNEL_0(port_num) \
+		(SB_PGD_PORT_BASE + 0x100 + 4*port_num)
+#define SB_PGD_TX_PORT_MULTI_CHANNEL_0_START_PORT_ID   0
+#define SB_PGD_TX_PORT_MULTI_CHANNEL_0_END_PORT_ID     7
+
+#define SB_PGD_TX_PORT_MULTI_CHANNEL_1(port_num) \
+		(SB_PGD_PORT_BASE + 0x101 + 4*port_num)
+#define SB_PGD_TX_PORT_MULTI_CHANNEL_1_START_PORT_ID   8
+
+#define SB_PGD_RX_PORT_MULTI_CHANNEL_0(offset, port_num) \
+		(SB_PGD_PORT_BASE + offset + (4 * port_num))
+
+/* slave port water mark level
+ *   (0: 6bytes, 1: 9bytes, 2: 12 bytes, 3: 15 bytes)
+ */
+#define SLAVE_PORT_WATER_MARK_6BYTES  0
+#define SLAVE_PORT_WATER_MARK_9BYTES  1
+#define SLAVE_PORT_WATER_MARK_12BYTES 2
+#define SLAVE_PORT_WATER_MARK_15BYTES 3
+#define SLAVE_PORT_WATER_MARK_SHIFT 1
+#define SLAVE_PORT_ENABLE           1
+#define SLAVE_PORT_DISABLE          0
+#define WATER_MARK_VAL \
+	((SLAVE_PORT_WATER_MARK_12BYTES << SLAVE_PORT_WATER_MARK_SHIFT) | \
+	 (SLAVE_PORT_ENABLE))
+#define BASE_CH_NUM 128
+
+
+int wcd9xxx_init_slimslave(struct wcd9xxx *wcd9xxx,
+			   u8 wcd9xxx_pgd_la,
+			   unsigned int tx_num, unsigned int *tx_slot,
+			   unsigned int rx_num, unsigned int *rx_slot);
+
+int wcd9xxx_deinit_slimslave(struct wcd9xxx *wcd9xxx);
+
+int wcd9xxx_cfg_slim_sch_rx(struct wcd9xxx *wcd9xxx,
+			    struct list_head *wcd9xxx_ch_list,
+			    unsigned int rate, unsigned int bit_width,
+			    u16 *grph);
+int wcd9xxx_cfg_slim_sch_tx(struct wcd9xxx *wcd9xxx,
+			    struct list_head *wcd9xxx_ch_list,
+			    unsigned int rate, unsigned int bit_width,
+				u16 *grph);
+int wcd9xxx_close_slim_sch_rx(struct wcd9xxx *wcd9xxx,
+			      struct list_head *wcd9xxx_ch_list, u16 grph);
+int wcd9xxx_close_slim_sch_tx(struct wcd9xxx *wcd9xxx,
+			      struct list_head *wcd9xxx_ch_list, u16 grph);
+int wcd9xxx_get_channel(struct wcd9xxx *wcd9xxx,
+			unsigned int *rx_ch,
+			unsigned int *tx_ch);
+int wcd9xxx_get_slave_port(unsigned int ch_num);
+int wcd9xxx_disconnect_port(struct wcd9xxx *wcd9xxx,
+			    struct list_head *wcd9xxx_ch_list, u16 grph);
+int wcd9xxx_rx_vport_validation(u32 port_id,
+				struct list_head *codec_dai_list);
+int wcd9xxx_tx_vport_validation(u32 vtable, u32 port_id,
+				struct wcd9xxx_codec_dai_data *codec_dai,
+				u32 num_codec_dais);
+#endif /* __WCD9310_SLIMSLAVE_H_ */
diff -Nruw linux-4.4.115-fbx/include/linux/mfd/wcd9xxx./wcd9xxx-utils.h linux-4.4.115-fbx/include/linux/mfd/wcd9xxx/wcd9xxx-utils.h
--- linux-4.4.115-fbx/include/linux/mfd/wcd9xxx./wcd9xxx-utils.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/mfd/wcd9xxx/wcd9xxx-utils.h	2019-01-22 16:16:28.299289837 +0100
@@ -0,0 +1,141 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __WCD9XXX_UTILS_H__
+#define __WCD9XXX_UTILS_H__
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/wcd9xxx/pdata.h>
+#include <linux/mfd/wcd9xxx/core.h>
+
+struct wcd9xxx_pdata *wcd9xxx_populate_dt_data(struct device *dev);
+int wcd9xxx_bringup(struct device *dev);
+int wcd9xxx_bringdown(struct device *dev);
+struct regmap *wcd9xxx_regmap_init(struct device *,
+				   const struct regmap_config *);
+int wcd9xxx_reset(struct device *dev);
+int wcd9xxx_reset_low(struct device *dev);
+int wcd9xxx_get_codec_info(struct device *dev);
+
+typedef int (*codec_bringup_fn)(struct wcd9xxx *);
+typedef int (*codec_bringdown_fn)(struct wcd9xxx *);
+typedef int (*codec_type_fn)(struct wcd9xxx *,
+			     struct wcd9xxx_codec_type *);
+
+#ifdef CONFIG_WCD934X_CODEC
+extern int wcd934x_bringup(struct wcd9xxx *wcd9xxx);
+extern int wcd934x_bringdown(struct wcd9xxx *wcd9xxx);
+extern int wcd934x_get_codec_info(struct wcd9xxx *,
+				  struct wcd9xxx_codec_type *);
+#endif
+
+#ifdef CONFIG_WCD9335_CODEC
+extern int wcd9335_bringup(struct wcd9xxx *wcd9xxx);
+extern int wcd9335_bringdown(struct wcd9xxx *wcd9xxx);
+extern int wcd9335_get_codec_info(struct wcd9xxx *,
+				  struct wcd9xxx_codec_type *);
+#endif
+
+#ifdef CONFIG_WCD9330_CODEC
+extern int wcd9330_bringup(struct wcd9xxx *wcd9xxx);
+extern int wcd9330_bringdown(struct wcd9xxx *wcd9xxx);
+extern int wcd9330_get_codec_info(struct wcd9xxx *,
+				  struct wcd9xxx_codec_type *);
+#endif
+
+static inline codec_bringdown_fn wcd9xxx_bringdown_fn(int type)
+{
+	codec_bringdown_fn cdc_bdown_fn;
+
+	switch (type) {
+#ifdef CONFIG_WCD934X_CODEC
+	case WCD934X:
+		cdc_bdown_fn = wcd934x_bringdown;
+		break;
+#endif
+#ifdef CONFIG_WCD9335_CODEC
+	case WCD9335:
+		cdc_bdown_fn = wcd9335_bringdown;
+		break;
+#endif
+#ifdef CONFIG_WCD9330_CODEC
+	case WCD9330:
+		cdc_bdown_fn = wcd9330_bringdown;
+		break;
+#endif
+	default:
+		cdc_bdown_fn = NULL;
+		break;
+	}
+
+	return cdc_bdown_fn;
+}
+
+static inline codec_bringup_fn wcd9xxx_bringup_fn(int type)
+{
+	codec_bringup_fn cdc_bup_fn;
+
+	switch (type) {
+#ifdef CONFIG_WCD934X_CODEC
+	case WCD934X:
+		cdc_bup_fn = wcd934x_bringup;
+		break;
+#endif
+#ifdef CONFIG_WCD9335_CODEC
+	case WCD9335:
+		cdc_bup_fn = wcd9335_bringup;
+		break;
+#endif
+#ifdef CONFIG_WCD9330_CODEC
+	case WCD9330:
+		cdc_bup_fn = wcd9330_bringup;
+		break;
+#endif
+	default:
+		cdc_bup_fn = NULL;
+		break;
+	}
+
+	return cdc_bup_fn;
+}
+
+static inline codec_type_fn wcd9xxx_get_codec_info_fn(int type)
+{
+	codec_type_fn cdc_type_fn;
+
+	switch (type) {
+#ifdef CONFIG_WCD934X_CODEC
+	case WCD934X:
+		cdc_type_fn = wcd934x_get_codec_info;
+		break;
+#endif
+#ifdef CONFIG_WCD9335_CODEC
+	case WCD9335:
+		cdc_type_fn = wcd9335_get_codec_info;
+		break;
+#endif
+#ifdef CONFIG_WCD9330_CODEC
+	case WCD9330:
+		cdc_type_fn = wcd9330_get_codec_info;
+		break;
+#endif
+	default:
+		cdc_type_fn = NULL;
+		break;
+	}
+
+	return cdc_type_fn;
+}
+#endif
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/mmc/ring_buffer.h	2019-01-22 16:16:28.311289946 +0100
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MMC_RING_BUFFER__
+#define __MMC_RING_BUFFER__
+
+#include <linux/mmc/card.h>
+#include <linux/smp.h>
+
+#include "core.h"
+
+#define MMC_TRACE_RBUF_SZ_ORDER	2	/* 2^2 pages */
+#define MMC_TRACE_RBUF_SZ	(PAGE_SIZE * (1 << MMC_TRACE_RBUF_SZ_ORDER))
+#define MMC_TRACE_EVENT_SZ	256
+#define MMC_TRACE_RBUF_NUM_EVENTS	(MMC_TRACE_RBUF_SZ / MMC_TRACE_EVENT_SZ)
+
+struct mmc_host;
+struct mmc_trace_buffer {
+	int	wr_idx;
+	bool stop_tracing;
+	spinlock_t trace_lock;
+	char *data;
+};
+
+#ifdef CONFIG_MMC_RING_BUFFER
+void mmc_stop_tracing(struct mmc_host *mmc);
+void mmc_trace_write(struct mmc_host *mmc, const char *fmt, ...);
+void mmc_trace_init(struct mmc_host *mmc);
+void mmc_trace_free(struct mmc_host *mmc);
+void mmc_dump_trace_buffer(struct mmc_host *mmc, struct seq_file *s);
+#else
+static inline void mmc_stop_tracing(struct mmc_host *mmc) {}
+static inline void mmc_trace_write(struct mmc_host *mmc,
+		const char *fmt, ...) {}
+static inline void mmc_trace_init(struct mmc_host *mmc) {}
+static inline void mmc_trace_free(struct mmc_host *mmc) {}
+static inline void mmc_dump_trace_buffer(struct mmc_host *mmc,
+		struct seq_file *s) {}
+#endif
+
+#define MMC_TRACE(mmc, fmt, ...) \
+		mmc_trace_write(mmc, fmt, ##__VA_ARGS__)
+
+#endif /* __MMC_RING_BUFFER__ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/msm_audio_ion.h	2019-01-22 16:16:28.315289982 +0100
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2013-2015, 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_MSM_AUDIO_ION_H
+#define _LINUX_MSM_AUDIO_ION_H
+#include <sound/q6asm-v2.h>
+#include <sound/pcm.h>
+#include <linux/msm_ion.h>
+
+enum {
+	HLOS_TO_ADSP = 1,
+	ADSP_TO_HLOS,
+};
+
+#define VMID_CP_ADSP_SHARED 33
+
+int msm_audio_ion_alloc(const char *name, struct ion_client **client,
+			struct ion_handle **handle, size_t bufsz,
+			ion_phys_addr_t *paddr, size_t *pa_len, void **vaddr);
+
+int msm_audio_ion_import(const char *name, struct ion_client **client,
+			struct ion_handle **handle, int fd,
+			unsigned long *ionflag, size_t bufsz,
+			ion_phys_addr_t *paddr, size_t *pa_len, void **vaddr);
+
+int msm_audio_ion_free(struct ion_client *client, struct ion_handle *handle);
+int msm_audio_ion_mmap(struct audio_buffer *substream,
+		       struct vm_area_struct *vma);
+
+bool msm_audio_ion_is_smmu_available(void);
+int msm_audio_ion_cache_operations(struct audio_buffer *abuff, int cache_op);
+
+struct ion_client *msm_audio_ion_client_create(const char *name);
+void msm_audio_ion_client_destroy(struct ion_client *client);
+int msm_audio_ion_import_legacy(const char *name, struct ion_client *client,
+			struct ion_handle **handle, int fd,
+			unsigned long *ionflag, size_t bufsz,
+			ion_phys_addr_t *paddr, size_t *pa_len, void **vaddr);
+int msm_audio_ion_free_legacy(struct ion_client *client,
+			struct ion_handle *handle);
+u32 msm_audio_populate_upper_32_bits(ion_phys_addr_t pa);
+
+int msm_audio_ion_phys_assign(const char *name, struct ion_client **client,
+			      struct ion_handle **handle,
+			      int fd, ion_phys_addr_t *paddr,
+			      size_t *pa_len, u8 assign_type);
+int msm_audio_ion_phys_free(struct ion_client *client,
+			    struct ion_handle *handle,
+			    ion_phys_addr_t *paddr,
+			    size_t *pa_len, u8 assign_type);
+#endif /* _LINUX_MSM_AUDIO_ION_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/msm_bcl.h	2019-01-22 16:16:28.315289982 +0100
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_BCL_H
+#define __MSM_BCL_H
+
+#define BCL_NAME_MAX_LEN 20
+
+enum bcl_trip_type {
+	BCL_HIGH_TRIP,
+	BCL_LOW_TRIP,
+	BCL_TRIP_MAX,
+};
+
+enum bcl_param {
+	BCL_PARAM_VOLTAGE,
+	BCL_PARAM_CURRENT,
+	BCL_PARAM_MAX,
+};
+
+struct bcl_threshold {
+	int                     trip_value;
+	enum bcl_trip_type      type;
+	void                    *trip_data;
+	void (*trip_notify)     (enum bcl_trip_type, int, void *);
+};
+struct bcl_param_data;
+struct bcl_driver_ops {
+	int (*read)             (int *);
+	int (*set_high_trip)    (int);
+	int (*get_high_trip)    (int *);
+	int (*set_low_trip)     (int);
+	int (*get_low_trip)     (int *);
+	int (*disable)          (void);
+	int (*enable)           (void);
+	int (*notify)           (struct bcl_param_data *, int,
+					enum bcl_trip_type);
+};
+
+struct bcl_param_data {
+	char                    name[BCL_NAME_MAX_LEN];
+	struct device           device;
+	struct bcl_driver_ops   *ops;
+	int                     high_trip;
+	int                     low_trip;
+	int                     last_read_val;
+	bool                    registered;
+	struct kobj_attribute   val_attr;
+	struct kobj_attribute   high_trip_attr;
+	struct kobj_attribute   low_trip_attr;
+	struct attribute_group  bcl_attr_gp;
+	struct bcl_threshold    *thresh[BCL_TRIP_MAX];
+};
+
+#ifdef CONFIG_MSM_BCL_CTL
+struct bcl_param_data *msm_bcl_register_param(enum bcl_param,
+	struct bcl_driver_ops *, char *);
+int msm_bcl_unregister_param(struct bcl_param_data *);
+int msm_bcl_enable(void);
+int msm_bcl_disable(void);
+int msm_bcl_set_threshold(enum bcl_param, enum bcl_trip_type,
+	struct bcl_threshold *);
+int msm_bcl_read(enum bcl_param, int *);
+#else
+static inline struct bcl_param_data *msm_bcl_register_param(
+	enum bcl_param param_type, struct bcl_driver_ops *ops, char *name)
+{
+	return NULL;
+}
+static inline int msm_bcl_unregister_param(struct bcl_param_data *data)
+{
+	return -ENOSYS;
+}
+static inline int msm_bcl_enable(void)
+{
+	return -ENOSYS;
+}
+static inline int msm_bcl_disable(void)
+{
+	return -ENOSYS;
+}
+static inline int msm_bcl_set_threshold(enum bcl_param param_type,
+	enum bcl_trip_type type,
+	struct bcl_threshold *inp_thresh)
+{
+	return -ENOSYS;
+}
+static inline int msm_bcl_read(enum bcl_param param_type, int *vbat_value)
+{
+	return -ENOSYS;
+}
+#endif
+
+#endif /*__MSM_BCL_H*/
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/msm-bus-board.h	2019-01-22 16:16:28.315289982 +0100
@@ -0,0 +1,198 @@
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ARCH_MSM_BUS_BOARD_H
+#define __ASM_ARCH_MSM_BUS_BOARD_H
+
+#include <linux/types.h>
+#include <linux/input.h>
+
+enum context {
+	DUAL_CTX,
+	ACTIVE_CTX,
+	NUM_CTX
+};
+
+struct msm_bus_fabric_registration {
+	unsigned int id;
+	const char *name;
+	struct msm_bus_node_info *info;
+	unsigned int len;
+	int ahb;
+	const char *fabclk[NUM_CTX];
+	const char *iface_clk;
+	unsigned int offset;
+	unsigned int haltid;
+	unsigned int rpm_enabled;
+	unsigned int nmasters;
+	unsigned int nslaves;
+	unsigned int ntieredslaves;
+	bool il_flag;
+	const struct msm_bus_board_algorithm *board_algo;
+	int hw_sel;
+	void *hw_data;
+	uint32_t qos_freq;
+	uint32_t qos_baseoffset;
+	u64 nr_lim_thresh;
+	uint32_t eff_fact;
+	uint32_t qos_delta;
+	bool virt;
+};
+
+struct msm_bus_device_node_registration {
+	struct msm_bus_node_device_type *info;
+	unsigned int num_devices;
+	bool virt;
+};
+
+enum msm_bus_bw_tier_type {
+	MSM_BUS_BW_TIER1 = 1,
+	MSM_BUS_BW_TIER2,
+	MSM_BUS_BW_COUNT,
+	MSM_BUS_BW_SIZE = 0x7FFFFFFF,
+};
+
+struct msm_bus_halt_vector {
+	uint32_t haltval;
+	uint32_t haltmask;
+};
+
+extern struct msm_bus_fabric_registration msm_bus_apps_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_sys_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_sys_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_cpss_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_def_fab_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_8960_apps_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_sys_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_sg_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_sys_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_cpss_fpb_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_8064_apps_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8064_sys_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8064_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8064_sys_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8064_cpss_fpb_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_9615_sys_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_9615_def_fab_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_8930_apps_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8930_sys_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8930_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8930_sys_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8930_cpss_fpb_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_8974_sys_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_mmss_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_bimc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_ocmem_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_periph_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_config_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_ocmem_vnoc_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_9625_sys_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_9625_bimc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_9625_periph_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_9625_config_noc_pdata;
+
+extern int msm_bus_device_match_adhoc(struct device *dev, void *id);
+
+void msm_bus_rpm_set_mt_mask(void);
+int msm_bus_board_rpm_get_il_ids(uint16_t *id);
+int msm_bus_board_get_iid(int id);
+
+#define NFAB_MSM8226 6
+#define NFAB_MSM8610 5
+
+/*
+ * These macros specify the convention followed for allocating
+ * ids to fabrics, masters and slaves for 8x60.
+ *
+ * A node can be identified as a master/slave/fabric by using
+ * these ids.
+ */
+#define FABRIC_ID_KEY 1024
+#define SLAVE_ID_KEY ((FABRIC_ID_KEY) >> 1)
+#define MAX_FAB_KEY 7168  /* OR(All fabric ids) */
+#define INT_NODE_START 10000
+
+#define GET_FABID(id) ((id) & MAX_FAB_KEY)
+
+#define NODE_ID(id) ((id) & (FABRIC_ID_KEY - 1))
+#define IS_SLAVE(id) ((NODE_ID(id)) >= SLAVE_ID_KEY ? 1 : 0)
+#define CHECK_ID(iid, id) (((iid & id) != id) ? -ENXIO : iid)
+
+/*
+ * The following macros are used to format the data for port halt
+ * and unhalt requests.
+ */
+#define MSM_BUS_CLK_HALT 0x1
+#define MSM_BUS_CLK_HALT_MASK 0x1
+#define MSM_BUS_CLK_HALT_FIELDSIZE 0x1
+#define MSM_BUS_CLK_UNHALT 0x0
+
+#define MSM_BUS_MASTER_SHIFT(master, fieldsize) \
+	((master) * (fieldsize))
+
+#define MSM_BUS_SET_BITFIELD(word, fieldmask, fieldvalue) \
+	{	\
+		(word) &= ~(fieldmask);	\
+		(word) |= (fieldvalue);	\
+	}
+
+
+#define MSM_BUS_MASTER_HALT(u32haltmask, u32haltval, master) \
+	MSM_BUS_SET_BITFIELD(u32haltmask, \
+		MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+		MSM_BUS_CLK_HALT_FIELDSIZE), \
+		MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+		MSM_BUS_CLK_HALT_FIELDSIZE))\
+	MSM_BUS_SET_BITFIELD(u32haltval, \
+		MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+		MSM_BUS_CLK_HALT_FIELDSIZE), \
+		MSM_BUS_CLK_HALT<<MSM_BUS_MASTER_SHIFT((master),\
+		MSM_BUS_CLK_HALT_FIELDSIZE))\
+
+#define MSM_BUS_MASTER_UNHALT(u32haltmask, u32haltval, master) \
+	MSM_BUS_SET_BITFIELD(u32haltmask, \
+		MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+		MSM_BUS_CLK_HALT_FIELDSIZE), \
+		MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+		MSM_BUS_CLK_HALT_FIELDSIZE))\
+	MSM_BUS_SET_BITFIELD(u32haltval, \
+		MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+		MSM_BUS_CLK_HALT_FIELDSIZE), \
+		MSM_BUS_CLK_UNHALT<<MSM_BUS_MASTER_SHIFT((master),\
+		MSM_BUS_CLK_HALT_FIELDSIZE))\
+
+#define RPM_BUS_SLAVE_REQ	0x766c7362
+#define RPM_BUS_MASTER_REQ	0x73616d62
+
+enum msm_bus_rpm_slave_field_type {
+	RPM_SLAVE_FIELD_BW = 0x00007762,
+};
+
+enum msm_bus_rpm_mas_field_type {
+	RPM_MASTER_FIELD_BW =		0x00007762,
+	RPM_MASTER_FIELD_BW_T0 =	0x30747762,
+	RPM_MASTER_FIELD_BW_T1 =	0x31747762,
+	RPM_MASTER_FIELD_BW_T2 =	0x32747762,
+};
+
+#include <dt-bindings/msm/msm-bus-ids.h>
+
+
+#endif /*__ASM_ARCH_MSM_BUS_BOARD_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/msm-bus.h	2019-01-22 16:16:28.315289982 +0100
@@ -0,0 +1,213 @@
+/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_H
+#define _ARCH_ARM_MACH_MSM_BUS_H
+
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+
+/*
+ * Macros for clients to convert their data to ib and ab
+ * Ws : Time window over which to transfer the data in SECONDS
+ * Bs : Size of the data block in bytes
+ * Per : Recurrence period
+ * Tb : Throughput bandwidth to prevent stalling
+ * R  : Ratio of actual bandwidth used to Tb
+ * Ib : Instantaneous bandwidth
+ * Ab : Arbitrated bandwidth
+ *
+ * IB_RECURRBLOCK and AB_RECURRBLOCK:
+ * These are used if the requirement is to transfer a
+ * recurring block of data over a known time window.
+ *
+ * IB_THROUGHPUTBW and AB_THROUGHPUTBW:
+ * These are used for CPU style masters. Here the requirement
+ * is to have minimum throughput bandwidth available to avoid
+ * stalling.
+ */
+#define IB_RECURRBLOCK(Ws, Bs) ((Ws) == 0 ? 0 : ((Bs)/(Ws)))
+#define AB_RECURRBLOCK(Ws, Per) ((Ws) == 0 ? 0 : ((Bs)/(Per)))
+#define IB_THROUGHPUTBW(Tb) (Tb)
+#define AB_THROUGHPUTBW(Tb, R) ((Tb) * (R))
+
+struct msm_bus_vectors {
+	int src; /* Master */
+	int dst; /* Slave */
+	uint64_t ab; /* Arbitrated bandwidth */
+	uint64_t ib; /* Instantaneous bandwidth */
+};
+
+struct msm_bus_paths {
+	int num_paths;
+	struct msm_bus_vectors *vectors;
+};
+
+struct msm_bus_scale_pdata {
+	struct msm_bus_paths *usecase;
+	int num_usecases;
+	const char *name;
+	/*
+	 * If the active_only flag is set to 1, the BW request is applied
+	 * only when at least one CPU is active (powered on). If the flag
+	 * is set to 0, then the BW request is always applied irrespective
+	 * of the CPU state.
+	 */
+	unsigned int active_only;
+};
+
+struct msm_bus_client_handle {
+	char *name;
+	int mas;
+	int slv;
+	int first_hop;
+	struct device *mas_dev;
+	u64 cur_act_ib;
+	u64 cur_act_ab;
+	u64 cur_slp_ib;
+	u64 cur_slp_ab;
+	bool active_only;
+};
+
+/* Scaling APIs */
+
+/*
+ * This function returns a handle to the client. This should be used to
+ * call msm_bus_scale_client_update_request.
+ * The function returns 0 if bus driver is unable to register a client
+ */
+
+#if (defined(CONFIG_QCOM_BUS_SCALING) || defined(CONFIG_QCOM_BUS_TOPOLOGY_ADHOC))
+int __init msm_bus_fabric_init_driver(void);
+uint32_t msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata);
+int msm_bus_scale_client_update_request(uint32_t cl, unsigned int index);
+void msm_bus_scale_unregister_client(uint32_t cl);
+int msm_bus_scale_client_update_context(uint32_t cl, bool active_only,
+							unsigned int ctx_idx);
+
+struct msm_bus_client_handle*
+msm_bus_scale_register(uint32_t mas, uint32_t slv, char *name,
+							bool active_only);
+void msm_bus_scale_unregister(struct msm_bus_client_handle *cl);
+int msm_bus_scale_update_bw(struct msm_bus_client_handle *cl, u64 ab, u64 ib);
+int msm_bus_scale_update_bw_context(struct msm_bus_client_handle *cl,
+		u64 act_ab, u64 act_ib, u64 slp_ib, u64 slp_ab);
+/* AXI Port configuration APIs */
+int msm_bus_axi_porthalt(int master_port);
+int msm_bus_axi_portunhalt(int master_port);
+
+#else
+static inline int __init msm_bus_fabric_init_driver(void) { return 0; }
+static struct msm_bus_client_handle dummy_cl;
+
+static inline uint32_t
+msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata)
+{
+	return 1;
+}
+
+static inline int
+msm_bus_scale_client_update_request(uint32_t cl, unsigned int index)
+{
+	return 0;
+}
+
+static inline int
+msm_bus_scale_client_update_context(uint32_t cl, bool active_only,
+							unsigned int ctx_idx)
+{
+	return 0;
+}
+
+static inline void
+msm_bus_scale_unregister_client(uint32_t cl)
+{
+}
+
+static inline int msm_bus_axi_porthalt(int master_port)
+{
+	return 0;
+}
+
+static inline int msm_bus_axi_portunhalt(int master_port)
+{
+	return 0;
+}
+
+static inline struct msm_bus_client_handle*
+msm_bus_scale_register(uint32_t mas, uint32_t slv, char *name,
+							bool active_only)
+{
+	return &dummy_cl;
+}
+
+static inline void msm_bus_scale_unregister(struct msm_bus_client_handle *cl)
+{
+}
+
+static inline int
+msm_bus_scale_update_bw(struct msm_bus_client_handle *cl, u64 ab, u64 ib)
+{
+	return 0;
+}
+
+static inline int
+msm_bus_scale_update_bw_context(struct msm_bus_client_handle *cl, u64 act_ab,
+				u64 act_ib, u64 slp_ib, u64 slp_ab)
+
+{
+	return 0;
+}
+
+#endif
+
+#if defined(CONFIG_OF) && defined(CONFIG_QCOM_BUS_SCALING)
+struct msm_bus_scale_pdata *msm_bus_pdata_from_node(
+		struct platform_device *pdev, struct device_node *of_node);
+struct msm_bus_scale_pdata *msm_bus_cl_get_pdata(struct platform_device *pdev);
+void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata);
+#else
+static inline struct msm_bus_scale_pdata
+*msm_bus_cl_get_pdata(struct platform_device *pdev)
+{
+	return NULL;
+}
+
+static inline struct msm_bus_scale_pdata *msm_bus_pdata_from_node(
+		struct platform_device *pdev, struct device_node *of_node)
+{
+	return NULL;
+}
+
+static inline void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata)
+{
+}
+#endif
+
+#ifdef CONFIG_DEBUG_BUS_VOTER
+int msm_bus_floor_vote_context(const char *name, u64 floor_hz,
+						bool active_only);
+int msm_bus_floor_vote(const char *name, u64 floor_hz);
+#else
+static inline int msm_bus_floor_vote(const char *name, u64 floor_hz)
+{
+	return -EINVAL;
+}
+
+static inline int msm_bus_floor_vote_context(const char *name, u64 floor_hz,
+						bool active_only)
+{
+	return -EINVAL;
+}
+#endif /*defined(CONFIG_DEBUG_BUS_VOTER) && defined(CONFIG_BUS_TOPOLOGY_ADHOC)*/
+#endif /*_ARCH_ARM_MACH_MSM_BUS_H*/
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/msm_bus_rules.h	2019-01-22 16:16:28.315289982 +0100
@@ -0,0 +1,63 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_RULES_H
+#define _ARCH_ARM_MACH_MSM_BUS_RULES_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/notifier.h>
+#include <dt-bindings/msm/msm-bus-rule-ops.h>
+
+#define MAX_NODES		(5)
+
+struct rule_update_path_info {
+	u32 id;
+	u64 ab;
+	u64 ib;
+	u64 clk;
+	bool added;
+	struct list_head link;
+};
+
+struct rule_apply_rcm_info {
+	u32 id;
+	u64 lim_bw;
+	int throttle;
+	bool after_clk_commit;
+	struct list_head link;
+};
+
+struct bus_rule_type {
+	int num_src;
+	int *src_id;
+	int src_field;
+	int op;
+	u64 thresh;
+	int num_dst;
+	int *dst_node;
+	u64 dst_bw;
+	int mode;
+	void *client_data;
+};
+
+void msm_rule_register(int num_rules, struct bus_rule_type *rule,
+				struct notifier_block *nb);
+void msm_rule_unregister(int num_rules, struct bus_rule_type *rule,
+						struct notifier_block *nb);
+bool msm_rule_update(struct bus_rule_type *old_rule,
+				struct bus_rule_type *new_rule,
+				struct notifier_block *nb);
+void msm_rule_evaluate_rules(int node);
+void print_rules_buf(char *buf, int count);
+bool msm_rule_are_rules_registered(void);
+#endif /* _ARCH_ARM_MACH_MSM_BUS_RULES_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/msm-core-interface.h	2019-01-22 16:16:28.315289982 +0100
@@ -0,0 +1,13 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <uapi/linux/msm-core-interface.h>
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/msm_dma_iommu_mapping.h	2019-01-22 16:16:28.315289982 +0100
@@ -0,0 +1,101 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_MSM_DMA_IOMMU_MAPPING_H
+#define _LINUX_MSM_DMA_IOMMU_MAPPING_H
+
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+
+#ifdef CONFIG_IOMMU_API
+/*
+* This function is not taking a reference to the dma_buf here. It is expected
+* that clients hold reference to the dma_buf until they are done with mapping
+* and unmapping.
+*/
+int msm_dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
+		   enum dma_data_direction dir, struct dma_buf *dma_buf,
+		   struct dma_attrs *attrs);
+
+static inline int msm_dma_map_sg_lazy(struct device *dev,
+			       struct scatterlist *sg, int nents,
+			       enum dma_data_direction dir,
+			       struct dma_buf *dma_buf)
+{
+	return msm_dma_map_sg_attrs(dev, sg, nents, dir, dma_buf, NULL);
+}
+
+static inline int msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
+				  int nents, enum dma_data_direction dir,
+				  struct dma_buf *dma_buf)
+{
+	DEFINE_DMA_ATTRS(attrs);
+
+	init_dma_attrs(&attrs);
+	dma_set_attr(DMA_ATTR_NO_DELAYED_UNMAP, &attrs);
+	return msm_dma_map_sg_attrs(dev, sg, nents, dir, dma_buf, &attrs);
+}
+
+void msm_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents,
+		      enum dma_data_direction dir, struct dma_buf *dma_buf);
+
+int msm_dma_unmap_all_for_dev(struct device *dev);
+
+/*
+ * Below is private function only to be called by framework (ION) and not by
+ * clients.
+ */
+void msm_dma_buf_freed(void *buffer);
+
+#else /*CONFIG_IOMMU_API*/
+
+static inline int msm_dma_map_sg_attrs(struct device *dev,
+			struct scatterlist *sg, int nents,
+			enum dma_data_direction dir, struct dma_buf *dma_buf,
+			struct dma_attrs *attr)
+{
+	return -EINVAL;
+}
+
+static inline int msm_dma_map_sg_lazy(struct device *dev,
+			       struct scatterlist *sg, int nents,
+			       enum dma_data_direction dir,
+			       struct dma_buf *dma_buf)
+{
+	return -EINVAL;
+}
+
+static inline int msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
+				  int nents, enum dma_data_direction dir,
+				  struct dma_buf *dma_buf)
+{
+	return -EINVAL;
+}
+
+static inline void msm_dma_unmap_sg(struct device *dev,
+					struct scatterlist *sgl, int nents,
+					enum dma_data_direction dir,
+					struct dma_buf *dma_buf)
+{
+}
+
+static inline int msm_dma_unmap_all_for_dev(struct device *dev)
+{
+	return 0;
+}
+
+static inline void msm_dma_buf_freed(void *buffer) {}
+#endif /*CONFIG_IOMMU_API*/
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/msm_ext_display.h	2019-01-22 16:16:28.315289982 +0100
@@ -0,0 +1,167 @@
+/* include/linux/msm_ext_display.h
+ *
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MSM_EXT_DISPLAY_H_
+#define _MSM_EXT_DISPLAY_H_
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+
+#define AUDIO_ACK_SET_ENABLE BIT(5)
+#define AUDIO_ACK_ENABLE BIT(4)
+#define AUDIO_ACK_CONNECT BIT(0)
+
+/**
+ *  Flags to be used with the HPD operation of the external display
+ *  interface:
+ *  MSM_EXT_DISP_HPD_AUDIO: audio will be routed to external display
+ *  MSM_EXT_DISP_HPD_VIDEO: video will be routed to external display
+ *  MSM_EXT_DISP_HPD_ASYNC_AUDIO: don't wait audio notification once wake it up
+ *  MSM_EXT_DISP_HPD_ASYNC_VIDEO: don't wait video notification once wake it up
+ */
+#define MSM_EXT_DISP_HPD_AUDIO BIT(0)
+#define MSM_EXT_DISP_HPD_VIDEO BIT(1)
+#define MSM_EXT_DISP_HPD_ASYNC_AUDIO BIT(2)
+#define MSM_EXT_DISP_HPD_ASYNC_VIDEO BIT(3)
+
+/**
+ * struct ext_disp_cable_notify - cable notify handler structure
+ * @link: a link for the linked list
+ * @status: current status of HDMI/DP cable connection
+ * @hpd_notify: callback function to provide cable status
+ */
+struct ext_disp_cable_notify {
+	struct list_head link;
+	int status;
+	void (*hpd_notify)(struct ext_disp_cable_notify *h);
+};
+
+struct msm_ext_disp_audio_edid_blk {
+	u8 *audio_data_blk;
+	unsigned int audio_data_blk_size; /* in bytes */
+	u8 *spk_alloc_data_blk;
+	unsigned int spk_alloc_data_blk_size; /* in bytes */
+};
+
+struct msm_ext_disp_audio_setup_params {
+	u32 sample_rate_hz;
+	u32 num_of_channels;
+	u32 channel_allocation;
+	u32 level_shift;
+	bool down_mix;
+	u32 sample_present;
+};
+
+/**
+ * External Display identifier for use to determine which interface
+ * the audio driver is interacting with.
+ */
+enum msm_ext_disp_type {
+	EXT_DISPLAY_TYPE_HDMI,
+	EXT_DISPLAY_TYPE_DP,
+	EXT_DISPLAY_TYPE_MAX
+};
+
+/**
+ * External Display cable state used by display interface to indicate
+ * connect/disconnect of interface.
+ */
+enum msm_ext_disp_cable_state {
+	EXT_DISPLAY_CABLE_DISCONNECT,
+	EXT_DISPLAY_CABLE_CONNECT,
+	EXT_DISPLAY_CABLE_STATE_MAX
+};
+
+/**
+ * External Display power state used by display interface to indicate
+ * power on/off of the interface.
+ */
+enum msm_ext_disp_power_state {
+	EXT_DISPLAY_POWER_OFF,
+	EXT_DISPLAY_POWER_ON,
+	EXT_DISPLAY_POWER_MAX
+};
+
+/**
+ * struct msm_ext_disp_intf_ops - operations exposed to display interface
+ * @hpd: updates external display interface state
+ * @notify: acknowledgment to power on or off
+ */
+struct msm_ext_disp_intf_ops {
+	int (*hpd)(struct platform_device *pdev,
+			enum msm_ext_disp_type type,
+			enum msm_ext_disp_cable_state state,
+			u32 flags);
+	int (*notify)(struct platform_device *pdev,
+			enum msm_ext_disp_cable_state state);
+	int (*ack)(struct platform_device *pdev, u32 ack);
+};
+
+/**
+ * struct msm_ext_disp_audio_codec_ops - operations exposed to audio codec
+ * @audio_info_setup: configure audio on interface
+ * @get_audio_edid_blk: retrieve audio edid block
+ * @cable_status: cable connected/disconnected
+ * @get_intf_id: id of connected interface
+ * @acknowledge: acknowledge audio status
+ * @codec_ready: notify when codec is ready
+ */
+struct msm_ext_disp_audio_codec_ops {
+	int (*audio_info_setup)(struct platform_device *pdev,
+		struct msm_ext_disp_audio_setup_params *params);
+	int (*get_audio_edid_blk)(struct platform_device *pdev,
+		struct msm_ext_disp_audio_edid_blk *blk);
+	int (*cable_status)(struct platform_device *pdev, u32 vote);
+	int (*get_intf_id)(struct platform_device *pdev);
+	void (*teardown_done)(struct platform_device *pdev);
+	int (*acknowledge)(struct platform_device *pdev, u32 ack);
+	void (*codec_ready)(struct platform_device *pdev);
+};
+
+/*
+ * struct msm_ext_disp_init_data - data needed to register the display interface
+ * @disp: external display type
+ * @intf_ops: external display interface operations
+ * @codec_ops: audio codec operations
+ */
+struct msm_ext_disp_init_data {
+	enum msm_ext_disp_type type;
+	struct kobject *kobj;
+	struct msm_ext_disp_intf_ops intf_ops;
+	struct msm_ext_disp_audio_codec_ops codec_ops;
+	struct platform_device *pdev;
+};
+
+/*
+ * msm_ext_disp_register_audio_codec() - audio codec registration
+ * @pdev: platform device pointer
+ * @codec_ops: audio codec operations
+ */
+int msm_ext_disp_register_audio_codec(struct platform_device *pdev,
+		struct msm_ext_disp_audio_codec_ops *ops);
+
+/*
+ * msm_hdmi_register_audio_codec() - wrapper for hdmi audio codec registration
+ * @pdev: platform device pointer
+ * @codec_ops: audio codec operations
+ */
+int msm_hdmi_register_audio_codec(struct platform_device *pdev,
+	struct msm_ext_disp_audio_codec_ops *ops);
+/*
+ * msm_ext_disp_register_intf() - display interface registration
+ * @init_data: data needed to register the display interface
+ */
+int msm_ext_disp_register_intf(struct platform_device *pdev,
+		struct msm_ext_disp_init_data *init_data);
+
+#endif /*_MSM_EXT_DISPLAY_H_*/
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/msm_ion.h	2019-01-22 16:16:28.315289982 +0100
@@ -0,0 +1,6 @@
+#ifndef __LINUX_MSM_ION_H__
+#define __LINUX_MSM_ION_H__
+
+#include "../../drivers/staging/android/ion/msm/msm_ion.h"
+
+#endif /* __LINUX_MSM_ION_H__ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/msm_mhi.h	2019-01-22 16:16:28.315289982 +0100
@@ -0,0 +1,421 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef MSM_MHI_H
+#define MSM_MHI_H
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/scatterlist.h>
+
+#define MHI_DMA_MASK       0xFFFFFFFFFFULL
+#define MHI_MAX_MTU        0xFFFF
+
+struct mhi_client_config;
+struct mhi_device_ctxt;
+
+enum MHI_CLIENT_CHANNEL {
+	MHI_CLIENT_LOOPBACK_OUT = 0,
+	MHI_CLIENT_LOOPBACK_IN = 1,
+	MHI_CLIENT_SAHARA_OUT = 2,
+	MHI_CLIENT_SAHARA_IN = 3,
+	MHI_CLIENT_DIAG_OUT = 4,
+	MHI_CLIENT_DIAG_IN = 5,
+	MHI_CLIENT_SSR_OUT = 6,
+	MHI_CLIENT_SSR_IN = 7,
+	MHI_CLIENT_QDSS_OUT = 8,
+	MHI_CLIENT_QDSS_IN = 9,
+	MHI_CLIENT_EFS_OUT = 10,
+	MHI_CLIENT_EFS_IN = 11,
+	MHI_CLIENT_MBIM_OUT = 12,
+	MHI_CLIENT_MBIM_IN = 13,
+	MHI_CLIENT_QMI_OUT = 14,
+	MHI_CLIENT_QMI_IN = 15,
+	MHI_CLIENT_IP_CTRL_0_OUT = 16,
+	MHI_CLIENT_IP_CTRL_0_IN = 17,
+	MHI_CLIENT_IP_CTRL_1_OUT = 18,
+	MHI_CLIENT_IP_CTRL_1_IN = 19,
+	MHI_CLIENT_DCI_OUT = 20,
+	MHI_CLIENT_DCI_IN = 21,
+	MHI_CLIENT_TF_OUT = 22,
+	MHI_CLIENT_TF_IN = 23,
+	MHI_CLIENT_BL_OUT = 24,
+	MHI_CLIENT_BL_IN = 25,
+	MHI_CLIENT_DUN_OUT = 32,
+	MHI_CLIENT_DUN_IN = 33,
+	MHI_CLIENT_IPC_ROUTER_OUT = 34,
+	MHI_CLIENT_IPC_ROUTER_IN = 35,
+	MHI_CLIENT_IP_SW_1_OUT = 36,
+	MHI_CLIENT_IP_SW_1_IN = 37,
+	MHI_CLIENT_IP_SW_2_OUT = 38,
+	MHI_CLIENT_IP_SW_2_IN = 39,
+	MHI_CLIENT_IP_SW_3_OUT = 40,
+	MHI_CLIENT_IP_SW_3_IN = 41,
+	MHI_CLIENT_CSVT_OUT = 42,
+	MHI_CLIENT_CSVT_IN = 43,
+	MHI_CLIENT_SMCT_OUT = 44,
+	MHI_CLIENT_SMCT_IN = 45,
+	MHI_CLIENT_IP_SW_4_OUT = 46,
+	MHI_CLIENT_IP_SW_4_IN = 47,
+	MHI_CLIENT_RESERVED_1_LOWER = 48,
+	MHI_CLIENT_RESERVED_1_UPPER = 99,
+	MHI_CLIENT_IP_HW_0_OUT = 100,
+	MHI_CLIENT_IP_HW_0_IN = 101,
+	MHI_CLIENT_IP_HW_ADPL_IN = 102,
+	MHI_CLIENT_RESERVED_2_LOWER = 103,
+	MHI_CLIENT_RESERVED_2_UPPER = 127,
+	MHI_MAX_CHANNELS = 103
+};
+
+enum MHI_CB_REASON {
+	MHI_CB_XFER,
+	MHI_CB_MHI_DISABLED,
+	MHI_CB_MHI_ENABLED,
+	MHI_CB_MHI_SHUTDOWN,
+	MHI_CB_SYS_ERROR,
+	MHI_CB_RDDM,
+	MHI_CB_MHI_PROBED,
+};
+
+enum MHI_FLAGS {
+	MHI_EOB = 0x100,
+	MHI_EOT = 0x200,
+	MHI_CHAIN = 0x1,
+	MHI_FLAGS_reserved = 0x80000000,
+};
+
+struct mhi_result {
+	void *user_data;
+	void *buf_addr;
+	size_t bytes_xferd;
+	int transaction_status;
+	enum MHI_FLAGS flags;
+};
+
+struct mhi_cb_info {
+	struct mhi_result *result;
+	enum MHI_CB_REASON cb_reason;
+	u32 chan;
+};
+
+struct mhi_client_info_t {
+	enum MHI_CLIENT_CHANNEL chan;
+	const struct device *dev;
+	const char *node_name;
+	void (*mhi_client_cb)(struct mhi_cb_info *);
+	bool pre_allocate;
+	size_t max_payload;
+	void *user_data;
+};
+
+struct mhi_client_handle {
+	u32 dev_id;
+	u32 domain;
+	u32 bus;
+	u32 slot;
+	bool enabled;
+	struct mhi_client_config *client_config;
+};
+
+struct __packed bhi_vec_entry {
+	u64 phys_addr;
+	u64 size;
+};
+
+/**
+ * struct mhi_device - IO resources for MHI
+ * @dev: device node points to of_node
+ * @pdev: pci device node
+ * @resource: bar memory space and IRQ resources
+ * @support_rddm: this device support ramdump collection
+ * @rddm_size: size of ramdump buffer in bytes to allocate
+ * @pm_runtime_get: fp for bus masters rpm pm_runtime_get
+ * @pm_runtime_noidle: fp for bus masters rpm pm_runtime_noidle
+ * @status_cb: fp for MHI status change notifications
+ * @mhi_dev_ctxt: private data for host
+ */
+struct mhi_device {
+	struct device *dev;
+	struct pci_dev *pci_dev;
+	struct resource resources[2];
+	bool support_rddm;
+	size_t rddm_size;
+	int (*pm_runtime_get)(struct pci_dev *pci_dev);
+	void (*pm_runtime_put_noidle)(struct pci_dev *pci_dev);
+	void (*status_cb)(enum MHI_CB_REASON, void *priv);
+	struct mhi_device_ctxt *mhi_dev_ctxt;
+};
+
+enum mhi_dev_ctrl {
+	MHI_DEV_CTRL_INIT,
+	MHI_DEV_CTRL_DE_INIT,
+	MHI_DEV_CTRL_SUSPEND,
+	MHI_DEV_CTRL_RESUME,
+	MHI_DEV_CTRL_POWER_OFF,
+	MHI_DEV_CTRL_POWER_ON,
+	MHI_DEV_CTRL_TRIGGER_RDDM,
+	MHI_DEV_CTRL_RDDM,
+	MHI_DEV_CTRL_RDDM_KERNEL_PANIC,
+	MHI_DEV_CTRL_NOTIFY_LINK_ERROR,
+	MHI_DEV_CTRL_MAXCMD,
+};
+
+enum mhi_rddm_segment {
+	MHI_RDDM_FW_SEGMENT,
+	MHI_RDDM_RD_SEGMENT,
+};
+
+#if defined(CONFIG_MSM_MHI)
+/**
+ * mhi_is_device_ready - Check if MHI is ready to register clients
+ *
+ * @dev: device node that points to DT node
+ * @node_name: device tree node that links MHI node
+ *
+ * @Return true if ready
+ */
+bool mhi_is_device_ready(const struct device * const dev,
+			 const char *node_name);
+
+/**
+ * mhi_resgister_device - register hardware resources with MHI
+ *
+ * @mhi_device: resources to be used
+ * @node_name: DT node name
+ * @userdata: cb data for client
+ * @Return 0 on success
+ */
+int mhi_register_device(struct mhi_device *mhi_device, const char *node_name,
+			void *user_data);
+
+/**
+ * mhi_register_channel - Client must call this function to obtain a handle for
+ *			  any MHI operations
+ *
+ *  @client_handle:  Handle populated by MHI, opaque to client
+ *  @client_info:    Channel\device information provided by client to
+ *                   which the handle maps to.
+ *
+ * @Return errno
+ */
+int mhi_register_channel(struct mhi_client_handle **client_handle,
+			 struct mhi_client_info_t *client_info);
+
+/**
+ * mhi_pm_control_device - power management control api
+ * @mhi_device: registered device structure
+ * @ctrl: specific command
+ * @Return 0 on success
+ */
+int mhi_pm_control_device(struct mhi_device *mhi_device,
+			  enum mhi_dev_ctrl ctrl);
+
+/**
+ * mhi_xfer_rddm - transfer rddm segment to bus master
+ * @mhi_device: registered device structure
+ * @seg: scatterlist pointing to segments
+ * @Return: # of segments, 0 if no segment available
+ */
+int mhi_xfer_rddm(struct mhi_device *mhi_device, enum mhi_rddm_segment seg,
+		  struct scatterlist **sg_list);
+
+/**
+ * mhi_deregister_channel - de-register callbacks from MHI
+ *
+ * @client_handle: Handle populated by MHI, opaque to client
+ *
+ * @Return errno
+ */
+int mhi_deregister_channel(struct mhi_client_handle *client_handle);
+
+/**
+ * mhi_open_channel - Client must call this function to open a channel
+ *
+ * @client_handle:  Handle populated by MHI, opaque to client
+ *
+ *  Not thread safe, caller must ensure concurrency protection.
+ *
+ * @Return errno
+ */
+int mhi_open_channel(struct mhi_client_handle *client_handle);
+
+/**
+ * mhi_queue_xfer - Client called function to add a buffer to MHI channel
+ *
+ *  @client_handle  Pointer to client handle previously obtained from
+ *                  mhi_open_channel
+ *  @buf            Pointer to client buffer
+ *  @buf_len        Length of the client buffer
+ *  @chain          Specify whether to set the chain bit on this buffer
+ *  @eob            Specify whether this buffer should trigger EOB interrupt
+ *
+ *  NOTE:
+ *  Not thread safe, caller must ensure concurrency protection.
+ *  User buffer must be physically contiguous.
+ *
+ * @Return errno
+ */
+int mhi_queue_xfer(struct mhi_client_handle *client_handle, void *buf,
+		   size_t buf_len, enum MHI_FLAGS mhi_flags);
+
+/**
+ * mhi_close_channel - Client can request channel to be closed and handle freed
+ *
+ *  @client_handle  Pointer to client handle previously obtained from
+ *                  mhi_open_channel
+ *  Not thread safe, caller must ensure concurrency protection.
+ *
+ * @client_handle  Pointer to handle to be released
+ */
+void mhi_close_channel(struct mhi_client_handle *client_handle);
+
+/**
+ * mhi_get_free_desc - Get the number of free descriptors on channel.
+ *  client_handle  Pointer to client handle previously obtained from
+ *                      mhi_open_channel.
+ *
+ * This API returns a snapshot of available descriptors on the given
+ * channel
+ *
+ * @Return  non negative on success
+ */
+int mhi_get_free_desc(struct mhi_client_handle *client_handle);
+
+/*
+ * mhi_poll_inbound - Poll a buffer from MHI channel
+ * @client_handle  Pointer to client handle previously obtained from
+ *                      mhi_open_channel.
+ * @result         Result structure to be populated with buffer info
+ *			if available;
+ *
+ * Client may asynchronously poll on an inbound channel for descriptors
+ * which have been populated. This API is used by client to receive data
+ * from device after a callback notification has been received.
+ *
+ *  Not thread safe, caller must ensure concurrency protection.
+ *
+ * @Return  non negative on success
+ */
+int mhi_poll_inbound(struct mhi_client_handle *client_handle,
+		     struct mhi_result *result);
+
+/**
+ * mhi_get_max_desc - Get the maximum number of descriptors
+ *			supported on the channel.
+ * @client_handle  Pointer to client handle previously obtained from
+ *                      mhi_open_channel.
+ * @Return  non negative on success
+ */
+int mhi_get_max_desc(struct mhi_client_handle *client_handle);
+
+/* following APIs meant to be used by rmnet interface only */
+int mhi_set_lpm(struct mhi_client_handle *client_handle, bool enable_lpm);
+int mhi_get_epid(struct mhi_client_handle *mhi_handle);
+struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle);
+void mhi_mask_irq(struct mhi_client_handle *client_handle);
+void mhi_unmask_irq(struct mhi_client_handle *client_handle);
+
+#else
+static inline bool mhi_is_device_ready(const struct device * const dev,
+				       const char *node_name)
+{
+	return false;
+};
+
+static inline int mhi_register_device(struct mhi_device *mhi_device,
+				      const char *node_name, void *user_data)
+{
+	return -EINVAL;
+};
+
+static inline int mhi_register_channel(struct mhi_client_handle **client_handle,
+				       struct mhi_client_info_t *client_info)
+{
+	return -EINVAL;
+};
+
+static inline int mhi_pm_control_device(struct mhi_device *mhi_device,
+					enum mhi_dev_ctrl ctrl)
+{
+	return -EINVAL;
+};
+
+static inline int mhi_xfer_rddm(struct mhi_device *mhi_device,
+				enum mhi_rddm_segment seg,
+				struct scatterlist **sg_list)
+{
+	return -EINVAL;
+};
+
+static inline int mhi_deregister_channel(struct mhi_client_handle
+					 *client_handle)
+{
+	return -EINVAL;
+};
+
+static inline int mhi_open_channel(struct mhi_client_handle *client_handle)
+{
+	return -EINVAL;
+};
+
+static inline int mhi_queue_xfer(struct mhi_client_handle *client_handle,
+				 void *buf, size_t buf_len,
+				 enum MHI_FLAGS mhi_flags)
+{
+	return -EINVAL;
+};
+
+static inline void mhi_close_channel(struct mhi_client_handle *client_handle)
+{
+};
+
+static inline int mhi_get_free_desc(struct mhi_client_handle *client_handle)
+{
+	return -EINVAL;
+};
+
+static inline int mhi_poll_inbound(struct mhi_client_handle *client_handle,
+				   struct mhi_result *result)
+{
+	return -EINVAL;
+};
+
+static inline int mhi_get_max_desc(struct mhi_client_handle *client_handle)
+{
+	return -EINVAL;
+};
+
+static inline int mhi_set_lpm(struct mhi_client_handle *client_handle,
+			      bool enable_lpm)
+{
+	return -EINVAL;
+};
+
+static inline int mhi_get_epid(struct mhi_client_handle *mhi_handle)
+{
+	return -EINVAL;
+};
+
+static inline struct mhi_result *mhi_poll(struct mhi_client_handle
+					  *client_handle)
+{
+	return NULL;
+};
+
+static inline void mhi_mask_irq(struct mhi_client_handle *client_handle)
+{
+};
+
+static inline void mhi_unmask_irq(struct mhi_client_handle *client_handle)
+{
+};
+
+#endif
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/msm_pcie.h	2019-01-22 16:16:28.315289982 +0100
@@ -0,0 +1,217 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_PCIE_H
+#define __MSM_PCIE_H
+
+#include <linux/types.h>
+#include <linux/pci.h>
+
+enum msm_pcie_config {
+	MSM_PCIE_CONFIG_INVALID = 0,
+	MSM_PCIE_CONFIG_NO_CFG_RESTORE = 0x1,
+	MSM_PCIE_CONFIG_LINKDOWN = 0x2,
+	MSM_PCIE_CONFIG_NO_RECOVERY = 0x4,
+};
+
+enum msm_pcie_pm_opt {
+	MSM_PCIE_SUSPEND,
+	MSM_PCIE_RESUME,
+	MSM_PCIE_DISABLE_PC,
+	MSM_PCIE_ENABLE_PC,
+};
+
+enum msm_pcie_event {
+	MSM_PCIE_EVENT_INVALID = 0,
+	MSM_PCIE_EVENT_LINKDOWN = 0x1,
+	MSM_PCIE_EVENT_LINKUP = 0x2,
+	MSM_PCIE_EVENT_WAKEUP = 0x4,
+};
+
+enum msm_pcie_trigger {
+	MSM_PCIE_TRIGGER_CALLBACK,
+	MSM_PCIE_TRIGGER_COMPLETION,
+};
+
+struct msm_pcie_notify {
+	enum msm_pcie_event event;
+	void *user;
+	void *data;
+	u32 options;
+};
+
+struct msm_pcie_register_event {
+	u32 events;
+	void *user;
+	enum msm_pcie_trigger mode;
+	void (*callback)(struct msm_pcie_notify *notify);
+	struct msm_pcie_notify notify;
+	struct completion *completion;
+	u32 options;
+};
+
+#ifdef CONFIG_PCI_MSM
+/**
+ * msm_pcie_pm_control - control the power state of a PCIe link.
+ * @pm_opt:	power management operation
+ * @busnr:	bus number of PCIe endpoint
+ * @user:	handle of the caller
+ * @data:	private data from the caller
+ * @options:	options for pm control
+ *
+ * This function gives PCIe endpoint device drivers the control to change
+ * the power state of a PCIe link for their device.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr, void *user,
+			void *data, u32 options);
+
+/**
+ * msm_pcie_register_event - register an event with PCIe bus driver.
+ * @reg:	event structure
+ *
+ * This function gives PCIe endpoint device drivers an option to register
+ * events with PCIe bus driver.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int msm_pcie_register_event(struct msm_pcie_register_event *reg);
+
+/**
+ * msm_pcie_deregister_event - deregister an event with PCIe bus driver.
+ * @reg:	event structure
+ *
+ * This function gives PCIe endpoint device drivers an option to deregister
+ * events with PCIe bus driver.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int msm_pcie_deregister_event(struct msm_pcie_register_event *reg);
+
+/**
+ * msm_pcie_recover_config - recover config space.
+ * @dev:	pci device structure
+ *
+ * This function recovers the config space of both RC and Endpoint.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int msm_pcie_recover_config(struct pci_dev *dev);
+
+/**
+ * msm_pcie_enumerate - enumerate Endpoints.
+ * @rc_idx:	RC that Endpoints connect to.
+ *
+ * This function enumerates Endpoints connected to RC.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int msm_pcie_enumerate(u32 rc_idx);
+
+/**
+ * msm_pcie_recover_config - recover config space.
+ * @dev:	pci device structure
+ *
+ * This function recovers the config space of both RC and Endpoint.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int msm_pcie_recover_config(struct pci_dev *dev);
+
+/**
+ * msm_pcie_shadow_control - control the shadowing of PCIe config space.
+ * @dev:	pci device structure
+ * @enable:	shadowing should be enabled or disabled
+ *
+ * This function gives PCIe endpoint device drivers the control to enable
+ * or disable the shadowing of PCIe config space.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int msm_pcie_shadow_control(struct pci_dev *dev, bool enable);
+
+/*
+ * msm_pcie_debug_info - run a PCIe specific debug testcase.
+ * @dev:	pci device structure
+ * @option:	specifies which PCIe debug testcase to execute
+ * @base:	PCIe specific range
+ * @offset:	offset of destination register
+ * @mask:	mask the bit(s) of destination register
+ * @value:	value to be written to destination register
+ *
+ * This function gives PCIe endpoint device drivers the control to
+ * run a debug testcase.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int msm_pcie_debug_info(struct pci_dev *dev, u32 option, u32 base,
+			u32 offset, u32 mask, u32 value);
+
+/*
+ * msm_pcie_configure_sid - calculates the SID for a PCIe endpoint.
+ * @dev:	device structure
+ * @sid:	the calculated SID
+ * @domain:	the domain number of the Root Complex
+ *
+ * This function calculates the SID for a PCIe endpoint device.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int msm_pcie_configure_sid(struct device *dev, u32 *sid,
+			int *domain);
+#else /* !CONFIG_PCI_MSM */
+static inline int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr,
+			void *user, void *data, u32 options)
+{
+	return -ENODEV;
+}
+
+static inline int msm_pcie_register_event(struct msm_pcie_register_event *reg)
+{
+	return -ENODEV;
+}
+
+static inline int msm_pcie_deregister_event(struct msm_pcie_register_event *reg)
+{
+	return -ENODEV;
+}
+
+static inline int msm_pcie_recover_config(struct pci_dev *dev)
+{
+	return -ENODEV;
+}
+
+static inline int msm_pcie_enumerate(u32 rc_idx)
+{
+	return -ENODEV;
+}
+
+static inline int msm_pcie_shadow_control(struct pci_dev *dev, bool enable)
+{
+	return -ENODEV;
+}
+
+static inline int msm_pcie_debug_info(struct pci_dev *dev, u32 option, u32 base,
+			u32 offset, u32 mask, u32 value)
+{
+	return -ENODEV;
+}
+
+static inline int msm_pcie_configure_sid(struct device *dev, u32 *sid,
+			int *domain)
+{
+	return -ENODEV;
+}
+#endif /* CONFIG_PCI_MSM */
+
+#endif /* __MSM_PCIE_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/msm_remote_spinlock.h	2019-01-22 16:16:28.315289982 +0100
@@ -0,0 +1,80 @@
+/* Copyright (c) 2009, 2011, 2013-2015 The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * Part of this this code is based on the standard ARM spinlock
+ * implementation (asm/spinlock.h) found in the 2.6.29 kernel.
+ */
+
+#ifndef __ASM__ARCH_QC_REMOTE_SPINLOCK_H
+#define __ASM__ARCH_QC_REMOTE_SPINLOCK_H
+
+#include <linux/io.h>
+#include <linux/types.h>
+
+#define REMOTE_SPINLOCK_NUM_PID 128
+#define REMOTE_SPINLOCK_TID_START REMOTE_SPINLOCK_NUM_PID
+
+/* Remote spinlock definitions. */
+
+typedef struct {
+	volatile uint32_t lock;
+} raw_remote_spinlock_t;
+
+typedef raw_remote_spinlock_t *_remote_spinlock_t;
+
+#define remote_spinlock_id_t const char *
+
+#if defined(CONFIG_REMOTE_SPINLOCK_MSM)
+int _remote_spin_lock_init(remote_spinlock_id_t, _remote_spinlock_t *lock);
+void _remote_spin_release_all(uint32_t pid);
+void _remote_spin_lock(_remote_spinlock_t *lock);
+void _remote_spin_unlock(_remote_spinlock_t *lock);
+int _remote_spin_trylock(_remote_spinlock_t *lock);
+int _remote_spin_release(_remote_spinlock_t *lock, uint32_t pid);
+int _remote_spin_owner(_remote_spinlock_t *lock);
+void _remote_spin_lock_rlock_id(_remote_spinlock_t *lock, uint32_t tid);
+void _remote_spin_unlock_rlock(_remote_spinlock_t *lock);
+int _remote_spin_get_hw_spinlocks_element(_remote_spinlock_t *lock);
+#else
+static inline
+int _remote_spin_lock_init(remote_spinlock_id_t id, _remote_spinlock_t *lock)
+{
+	return -EINVAL;
+}
+static inline void _remote_spin_release_all(uint32_t pid) {}
+static inline void _remote_spin_lock(_remote_spinlock_t *lock) {}
+static inline void _remote_spin_unlock(_remote_spinlock_t *lock) {}
+static inline int _remote_spin_trylock(_remote_spinlock_t *lock)
+{
+	return -ENODEV;
+}
+static inline int _remote_spin_release(_remote_spinlock_t *lock, uint32_t pid)
+{
+	return -ENODEV;
+}
+static inline int _remote_spin_owner(_remote_spinlock_t *lock)
+{
+	return -ENODEV;
+}
+static inline void _remote_spin_lock_rlock_id(_remote_spinlock_t *lock,
+					      uint32_t tid) {}
+static inline void _remote_spin_unlock_rlock(_remote_spinlock_t *lock) {}
+static inline int _remote_spin_get_hw_spinlocks_element(
+		_remote_spinlock_t *lock)
+{
+	return -ENODEV;
+}
+#endif
+#endif /* __ASM__ARCH_QC_REMOTE_SPINLOCK_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/msm_rtb.h	2019-01-22 16:16:28.315289982 +0100
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MSM_RTB_H__
+#define __MSM_RTB_H__
+
+/*
+ * These numbers are used from the kernel command line and sysfs
+ * to control filtering. Remove items from here with extreme caution.
+ */
+enum logk_event_type {
+	LOGK_NONE = 0,
+	LOGK_READL = 1,
+	LOGK_WRITEL = 2,
+	LOGK_LOGBUF = 3,
+	LOGK_HOTPLUG = 4,
+	LOGK_CTXID = 5,
+	LOGK_TIMESTAMP = 6,
+	LOGK_L2CPREAD = 7,
+	LOGK_L2CPWRITE = 8,
+	LOGK_IRQ = 9,
+};
+
+#define LOGTYPE_NOPC 0x80
+
+struct msm_rtb_platform_data {
+	unsigned int size;
+};
+
+#if defined(CONFIG_QCOM_RTB)
+/*
+ * returns 1 if data was logged, 0 otherwise
+ */
+int uncached_logk_pc(enum logk_event_type log_type, void *caller,
+				void *data);
+
+/*
+ * returns 1 if data was logged, 0 otherwise
+ */
+int uncached_logk(enum logk_event_type log_type, void *data);
+
+#define ETB_WAYPOINT  do { \
+				BRANCH_TO_NEXT_ISTR; \
+				nop(); \
+				BRANCH_TO_NEXT_ISTR; \
+				nop(); \
+			} while (0)
+
+#define BRANCH_TO_NEXT_ISTR  asm volatile("b .+4\n" : : : "memory")
+/*
+ * both the mb and the isb are needed to ensure enough waypoints for
+ * etb tracing
+ */
+#define LOG_BARRIER	do { \
+				mb(); \
+				isb();\
+			 } while (0)
+#else
+
+static inline int uncached_logk_pc(enum logk_event_type log_type,
+					void *caller,
+					void *data) { return 0; }
+
+static inline int uncached_logk(enum logk_event_type log_type,
+					void *data) { return 0; }
+
+#define ETB_WAYPOINT
+#define BRANCH_TO_NEXT_ISTR
+/*
+ * Due to a GCC bug, we need to have a nop here in order to prevent an extra
+ * read from being generated after the write.
+ */
+#define LOG_BARRIER		nop()
+#endif
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/msm-sps.h	2019-01-22 16:16:28.315289982 +0100
@@ -0,0 +1,1635 @@
+/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* Smart-Peripheral-Switch (SPS) API. */
+
+#ifndef _SPS_H_
+#define _SPS_H_
+
+#include <linux/types.h>	/* u32 */
+
+#if defined(CONFIG_PHYS_ADDR_T_64BIT) || defined(CONFIG_ARM_LPAE)
+
+/* Returns upper 4bits of 36bits physical address */
+#define SPS_GET_UPPER_ADDR(addr) ((addr & 0xF00000000ULL) >> 32)
+
+/* Returns 36bits physical address from 32bit address &
+ * flags word */
+#define DESC_FULL_ADDR(flags, addr) ((((phys_addr_t)flags & 0xF) << 32) | addr)
+
+/* Returns flags word with flags and 4bit upper address
+ * from flags and 36bit physical address */
+#define DESC_FLAG_WORD(flags, addr) (((addr & 0xF00000000ULL) >> 32) | flags)
+
+#else
+
+#define SPS_GET_UPPER_ADDR(addr) (0)
+#define DESC_FULL_ADDR(flags, addr) (addr)
+#define DESC_FLAG_WORD(flags, addr) (flags)
+
+#endif
+
+/* Returns upper 4bits of 36bits physical address from
+ * flags word */
+#define DESC_UPPER_ADDR(flags) ((flags & 0xF))
+
+/* Returns lower 32bits of 36bits physical address */
+#define SPS_GET_LOWER_ADDR(addr) ((u32)(addr & 0xFFFFFFFF))
+
+/* SPS device handle indicating use of system memory */
+#define SPS_DEV_HANDLE_MEM       (~0x0ul>>1)
+
+/* SPS device handle indicating use of BAM-DMA */
+
+/* SPS device handle invalid value */
+#define SPS_DEV_HANDLE_INVALID   ((unsigned long)0)
+
+/* BAM invalid IRQ value */
+#define SPS_IRQ_INVALID          0
+
+/* Invalid address value */
+#define SPS_ADDR_INVALID      ((unsigned long)0xDEADBEEF)
+
+/* Invalid peripheral device enumeration class */
+#define SPS_CLASS_INVALID     ((unsigned long)-1)
+
+/*
+ * This value specifies different configurations for an SPS connection.
+ * A non-default value instructs the SPS driver to search for the configuration
+ * in the fixed connection mapping table.
+ */
+#define SPS_CONFIG_DEFAULT       0
+
+/*
+ * This value instructs the SPS driver to use the default BAM-DMA channel
+ * threshold
+ */
+#define SPS_DMA_THRESHOLD_DEFAULT   0
+
+/* Flag bits supported by SPS hardware for struct sps_iovec */
+#define SPS_IOVEC_FLAG_INT  0x8000  /* Generate interrupt */
+#define SPS_IOVEC_FLAG_EOT  0x4000  /* Generate end-of-transfer indication */
+#define SPS_IOVEC_FLAG_EOB  0x2000  /* Generate end-of-block indication */
+#define SPS_IOVEC_FLAG_NWD  0x1000  /* notify when done */
+#define SPS_IOVEC_FLAG_CMD  0x0800  /* command descriptor */
+#define SPS_IOVEC_FLAG_LOCK  0x0400  /* pipe lock */
+#define SPS_IOVEC_FLAG_UNLOCK  0x0200  /* pipe unlock */
+#define SPS_IOVEC_FLAG_IMME 0x0100  /* immediate command descriptor */
+#define SPS_IOVEC_FLAG_NO_SUBMIT 0x0020  /* Do not submit descriptor to HW */
+#define SPS_IOVEC_FLAG_DEFAULT   0x0010  /* Use driver default */
+
+/* Maximum descriptor/iovec size */
+#define SPS_IOVEC_MAX_SIZE   (32 * 1024 - 1)  /* 32K-1 bytes due to HW limit */
+
+/* BAM device options flags */
+
+/*
+ * BAM will be configured and enabled at boot.  Otherwise, BAM will be
+ * configured and enabled when first pipe connect occurs.
+ */
+#define SPS_BAM_OPT_ENABLE_AT_BOOT  1UL
+/* BAM IRQ is disabled */
+#define SPS_BAM_OPT_IRQ_DISABLED    (1UL << 1)
+/* BAM peripheral is a BAM-DMA */
+#define SPS_BAM_OPT_BAMDMA          (1UL << 2)
+/* BAM IRQ is registered for apps wakeup */
+#define SPS_BAM_OPT_IRQ_WAKEUP      (1UL << 3)
+/* Ignore external block pipe reset */
+#define SPS_BAM_NO_EXT_P_RST        (1UL << 4)
+/* Don't enable local clock gating */
+#define SPS_BAM_NO_LOCAL_CLK_GATING (1UL << 5)
+/* Don't enable writeback cancel*/
+#define SPS_BAM_CANCEL_WB           (1UL << 6)
+/* BAM uses SMMU */
+#define SPS_BAM_SMMU_EN             (1UL << 9)
+/* Confirm resource status before access BAM*/
+#define SPS_BAM_RES_CONFIRM         (1UL << 7)
+/* Hold memory for BAM DMUX */
+#define SPS_BAM_HOLD_MEM            (1UL << 8)
+/* Use cached write pointer */
+#define SPS_BAM_CACHED_WP           (1UL << 10)
+
+/* BAM device management flags */
+
+/* BAM global device control is managed remotely */
+#define SPS_BAM_MGR_DEVICE_REMOTE   1UL
+/* BAM device supports multiple execution environments */
+#define SPS_BAM_MGR_MULTI_EE        (1UL << 1)
+/* BAM pipes are *not* allocated locally */
+#define SPS_BAM_MGR_PIPE_NO_ALLOC   (1UL << 2)
+/* BAM pipes are *not* configured locally */
+#define SPS_BAM_MGR_PIPE_NO_CONFIG  (1UL << 3)
+/* BAM pipes are *not* controlled locally */
+#define SPS_BAM_MGR_PIPE_NO_CTRL    (1UL << 4)
+/* "Globbed" management properties */
+#define SPS_BAM_MGR_NONE            \
+	(SPS_BAM_MGR_DEVICE_REMOTE | SPS_BAM_MGR_PIPE_NO_ALLOC | \
+	 SPS_BAM_MGR_PIPE_NO_CONFIG | SPS_BAM_MGR_PIPE_NO_CTRL)
+#define SPS_BAM_MGR_LOCAL           0
+#define SPS_BAM_MGR_LOCAL_SHARED    SPS_BAM_MGR_MULTI_EE
+#define SPS_BAM_MGR_REMOTE_SHARED   \
+	(SPS_BAM_MGR_DEVICE_REMOTE | SPS_BAM_MGR_MULTI_EE | \
+	 SPS_BAM_MGR_PIPE_NO_ALLOC)
+#define SPS_BAM_MGR_ACCESS_MASK     SPS_BAM_MGR_NONE
+
+/*
+ * BAM security configuration
+ */
+#define SPS_BAM_NUM_EES             4
+#define SPS_BAM_SEC_DO_NOT_CONFIG   0
+#define SPS_BAM_SEC_DO_CONFIG       0x0A434553
+
+/* BAM pipe selection */
+#define SPS_BAM_PIPE(n)             (1UL << (n))
+
+/* This enum specifies the operational mode for an SPS connection */
+enum sps_mode {
+	SPS_MODE_SRC = 0,  /* end point is the source (producer) */
+	SPS_MODE_DEST,	   /* end point is the destination (consumer) */
+};
+
+
+/*
+ * This enum is a set of bit flag options for SPS connection.
+ * The enums should be OR'd together to create the option set
+ * for the SPS connection.
+ */
+enum sps_option {
+	/*
+	 * Options to enable specific SPS hardware interrupts.
+	 * These bit flags are also used to indicate interrupt source
+	 * for the SPS_EVENT_IRQ event.
+	 */
+	SPS_O_DESC_DONE = 0x00000001,  /* Descriptor processed */
+	SPS_O_INACTIVE  = 0x00000002,  /* Inactivity timeout */
+	SPS_O_WAKEUP    = 0x00000004,  /* Peripheral wake up */
+	SPS_O_OUT_OF_DESC = 0x00000008,/* Out of descriptors */
+	SPS_O_ERROR     = 0x00000010,  /* Error */
+	SPS_O_EOT       = 0x00000020,  /* End-of-transfer */
+	SPS_O_RST_ERROR = 0x00000040,  /* Pipe reset unsucessful error */
+	SPS_O_HRESP_ERROR = 0x00000080,/* Errorneous Hresponse by AHB MASTER */
+
+	/* Options to enable hardware features */
+	SPS_O_STREAMING = 0x00010000,  /* Enable streaming mode (no EOT) */
+	/* Use MTI/SETPEND instead of BAM interrupt */
+	SPS_O_IRQ_MTI   = 0x00020000,
+	/* NWD bit written with EOT for BAM2BAM producer pipe */
+	SPS_O_WRITE_NWD   = 0x00040000,
+       /* EOT set after pipe SW offset advanced */
+	SPS_O_LATE_EOT   = 0x00080000,
+
+	/* Options to enable software features */
+	/* Do not disable a pipe during disconnection */
+	SPS_O_NO_DISABLE      = 0x00800000,
+	/* Transfer operation should be polled */
+	SPS_O_POLL      = 0x01000000,
+	/* Disable queuing of transfer events for the connection end point */
+	SPS_O_NO_Q      = 0x02000000,
+	SPS_O_FLOWOFF   = 0x04000000,  /* Graceful halt */
+	/* SPS_O_WAKEUP will be disabled after triggered */
+	SPS_O_WAKEUP_IS_ONESHOT = 0x08000000,
+	/**
+	 * Client must read each descriptor from the FIFO
+	 * using sps_get_iovec()
+	 */
+	SPS_O_ACK_TRANSFERS = 0x10000000,
+	/* Connection is automatically enabled */
+	SPS_O_AUTO_ENABLE = 0x20000000,
+	/* DISABLE endpoint synchronization for config/enable/disable */
+	SPS_O_NO_EP_SYNC = 0x40000000,
+	/* Allow partial polling duing IRQ mode */
+	SPS_O_HYBRID = 0x80000000,
+};
+
+/**
+ * This enum specifies BAM DMA channel priority.  Clients should use
+ * SPS_DMA_PRI_DEFAULT unless a specific priority is required.
+ */
+enum sps_dma_priority {
+	SPS_DMA_PRI_DEFAULT = 0,
+	SPS_DMA_PRI_LOW,
+	SPS_DMA_PRI_MED,
+	SPS_DMA_PRI_HIGH,
+};
+
+/*
+ * This enum specifies the ownership of a connection resource.
+ * Remote or shared ownership is only possible/meaningful on the processor
+ * that controls resource.
+ */
+enum sps_owner {
+	SPS_OWNER_LOCAL = 0x1,	/* Resource is owned by local processor */
+	SPS_OWNER_REMOTE = 0x2,	/* Resource is owned by a satellite processor */
+};
+
+/* This enum indicates the event associated with a client event trigger */
+enum sps_event {
+	SPS_EVENT_INVALID = 0,
+
+	SPS_EVENT_EOT,		/* End-of-transfer */
+	SPS_EVENT_DESC_DONE,	/* Descriptor processed */
+	SPS_EVENT_OUT_OF_DESC,	/* Out of descriptors */
+	SPS_EVENT_WAKEUP,	/* Peripheral wake up */
+	SPS_EVENT_FLOWOFF,	/* Graceful halt (idle) */
+	SPS_EVENT_INACTIVE,	/* Inactivity timeout */
+	SPS_EVENT_ERROR,	/* Error */
+	SPS_EVENT_RST_ERROR,    /* Pipe Reset unsuccessful */
+	SPS_EVENT_HRESP_ERROR,  /* Errorneous Hresponse by AHB Master*/
+	SPS_EVENT_MAX,
+};
+
+/*
+ * This enum specifies the event trigger mode and is an argument for the
+ * sps_register_event() function.
+ */
+enum sps_trigger {
+	/* Trigger with payload for callback */
+	SPS_TRIGGER_CALLBACK = 0,
+	/* Trigger without payload for wait or poll */
+	SPS_TRIGGER_WAIT,
+};
+
+/*
+ * This enum indicates the desired halting mechanism and is an argument for the
+ * sps_flow_off() function
+ */
+enum sps_flow_off {
+	SPS_FLOWOFF_FORCED = 0,	/* Force hardware into halt state */
+	/* Allow hardware to empty pipe before halting */
+	SPS_FLOWOFF_GRACEFUL,
+};
+
+/*
+ * This enum indicates the target memory heap and is an argument for the
+ * sps_mem_alloc() function.
+ */
+enum sps_mem {
+	SPS_MEM_LOCAL = 0,  /* SPS subsystem local (pipe) memory */
+	SPS_MEM_UC,	    /* Microcontroller (ARM7) local memory */
+};
+
+/*
+ * This enum indicates a timer control operation and is an argument for the
+ * sps_timer_ctrl() function.
+ */
+enum sps_timer_op {
+	SPS_TIMER_OP_CONFIG = 0,
+	SPS_TIMER_OP_RESET,
+/*   SPS_TIMER_OP_START,   Not supported by hardware yet */
+/*   SPS_TIMER_OP_STOP,    Not supported by hardware yet */
+	SPS_TIMER_OP_READ,
+};
+
+/*
+ * This enum indicates the inactivity timer operating mode and is an
+ * argument for the sps_timer_ctrl() function.
+ */
+enum sps_timer_mode {
+	SPS_TIMER_MODE_ONESHOT = 0,
+/*   SPS_TIMER_MODE_PERIODIC,    Not supported by hardware yet */
+};
+
+/* This enum indicates the cases when callback the user of BAM */
+enum sps_callback_case {
+	SPS_CALLBACK_BAM_ERROR_IRQ = 1,     /* BAM ERROR IRQ */
+	SPS_CALLBACK_BAM_HRESP_ERR_IRQ,	    /* Erroneous HResponse */
+	SPS_CALLBACK_BAM_TIMER_IRQ,	    /* Inactivity timer */
+	SPS_CALLBACK_BAM_RES_REQ,	    /* Request resource */
+	SPS_CALLBACK_BAM_RES_REL,	    /* Release resource */
+	SPS_CALLBACK_BAM_POLL,	            /* To poll each pipe */
+};
+
+/*
+ * This enum indicates the command type in a command element
+ */
+enum sps_command_type {
+	SPS_WRITE_COMMAND = 0,
+	SPS_READ_COMMAND,
+};
+
+/**
+ * struct msm_sps_platform_data - SPS Platform specific data.
+ * @bamdma_restricted_pipes - Bitmask of pipes restricted from local use.
+ *
+ */
+struct msm_sps_platform_data {
+	u32 bamdma_restricted_pipes;
+};
+
+/**
+ * This data type corresponds to the native I/O vector (BAM descriptor)
+ * supported by SPS hardware
+ *
+ * @addr - Buffer physical address.
+ * @size - Buffer size in bytes.
+ * @flags -Flag bitmask (see SPS_IOVEC_FLAG_ #defines).
+ *
+ */
+struct sps_iovec {
+	u32 addr;
+	u32 size:16;
+	u32 flags:16;
+};
+
+/**
+ * This data type corresponds to the native Command Element
+ * supported by SPS hardware
+ *
+ * @addr - register address.
+ * @command - command type.
+ * @data - for write command: content to be written into peripheral register.
+ *         for read command: dest addr to write peripheral register value to.
+ * @mask - register mask.
+ * @reserved - for future usage.
+ *
+ */
+struct sps_command_element {
+	u32 addr:24;
+	u32 command:8;
+	u32 data;
+	u32 mask;
+	u32 reserved;
+};
+
+/*
+ * BAM device's security configuation
+ */
+struct sps_bam_pipe_sec_config_props {
+	u32 pipe_mask;
+	u32 vmid;
+};
+
+struct sps_bam_sec_config_props {
+	/* Per-EE configuration - This is a pipe bit mask for each EE */
+	struct sps_bam_pipe_sec_config_props ees[SPS_BAM_NUM_EES];
+};
+
+/**
+ * This struct defines a BAM device. The client must memset() this struct to
+ * zero before writing device information.  A value of zero for uninitialized
+ * values will instruct the SPS driver to use general defaults or
+ * hardware/BIOS supplied values.
+ *
+ *
+ * @options - See SPS_BAM_OPT_* bit flag.
+ * @phys_addr - BAM base physical address (not peripheral address).
+ * @virt_addr - BAM base virtual address.
+ * @virt_size - For virtual mapping.
+ * @irq - IRQ enum for use in ISR vector install.
+ * @num_pipes - number of pipes. Can be read from hardware.
+ * @summing_threshold - BAM event threshold.
+ *
+ * @periph_class - Peripheral device enumeration class.
+ * @periph_dev_id - Peripheral global device ID.
+ * @periph_phys_addr - Peripheral base physical address, for BAM-DMA only.
+ * @periph_virt_addr - Peripheral base virtual address.
+ * @periph_virt_size - Size for virtual mapping.
+ *
+ * @callback - callback function for BAM user.
+ * @user - pointer to user data.
+ *
+ * @event_threshold - Pipe event threshold.
+ * @desc_size - Size (bytes) of descriptor FIFO.
+ * @data_size - Size (bytes) of data FIFO.
+ * @desc_mem_id - Heap ID for default descriptor FIFO allocations.
+ * @data_mem_id - Heap ID for default data FIFO allocations.
+ *
+ * @manage - BAM device management flags (see SPS_BAM_MGR_*).
+ * @restricted_pipes - Bitmask of pipes restricted from local use.
+ * @ee - Local execution environment index.
+ *
+ * @irq_gen_addr - MTI interrupt generation address. This configuration only
+ * applies to BAM rev 1 and 2 hardware. MTIs are only supported on BAMs when
+ * global config is controlled by a remote processor.
+ * NOTE: This address must correspond to the MTI associated with the "irq" IRQ
+ * enum specified above.
+ *
+ * @sec_config - must be set to SPS_BAM_SEC_DO_CONFIG to perform BAM security
+ * configuration.  Only the processor that manages the BAM is allowed to
+ * perform the configuration. The global (top-level) BAM interrupt will be
+ * assigned to the EE of the processor that manages the BAM.
+ *
+ * @p_sec_config_props - BAM device's security configuation
+ *
+ */
+struct sps_bam_props {
+
+	/* BAM device properties. */
+
+	u32 options;
+	phys_addr_t phys_addr;
+	void *virt_addr;
+	u32 virt_size;
+	u32 irq;
+	u32 num_pipes;
+	u32 summing_threshold;
+
+	/* Peripheral device properties */
+
+	u32 periph_class;
+	u32 periph_dev_id;
+	phys_addr_t periph_phys_addr;
+	void *periph_virt_addr;
+	u32 periph_virt_size;
+
+	/* Connection pipe parameter defaults. */
+
+	u32 event_threshold;
+	u32 desc_size;
+	u32 data_size;
+	u32 desc_mem_id;
+	u32 data_mem_id;
+
+	/* Feedback to BAM user */
+	void (*callback)(enum sps_callback_case, void *);
+	void *user;
+
+	/* Security properties */
+
+	u32 manage;
+	u32 restricted_pipes;
+	u32 ee;
+
+	/* Log Level property */
+	u32 ipc_loglevel;
+
+	/* BAM MTI interrupt generation */
+
+	u32 irq_gen_addr;
+
+	/* Security configuration properties */
+
+	u32 sec_config;
+	struct sps_bam_sec_config_props *p_sec_config_props;
+
+	/* Logging control */
+
+	bool constrained_logging;
+	u32 logging_number;
+};
+
+/**
+ *  This struct specifies memory buffer properties.
+ *
+ * @base - Buffer virtual address.
+ * @phys_base - Buffer physical address.
+ * @size - Specifies buffer size (or maximum size).
+ * @min_size - If non-zero, specifies buffer minimum size.
+ *
+ */
+struct sps_mem_buffer {
+	void *base;
+	phys_addr_t phys_base;
+	unsigned long iova;
+	u32 size;
+	u32 min_size;
+};
+
+/**
+ * This struct defines a connection's end point and is used as the argument
+ * for the sps_connect(), sps_get_config(), and sps_set_config() functions.
+ * For system mode pipe, use SPS_DEV_HANDLE_MEM for the end point that
+ * corresponds to system memory.
+ *
+ * The client can force SPS to reserve a specific pipe on a BAM.
+ * If the pipe is in use, the sps_connect/set_config() will fail.
+ *
+ * @source - Source BAM.
+ * @src_pipe_index - BAM pipe index, 0 to 30.
+ * @destination - Destination BAM.
+ * @dest_pipe_index - BAM pipe index, 0 to 30.
+ *
+ * @mode - specifies which end (source or destination) of the connection will
+ * be controlled/referenced by the client.
+ *
+ * @config - This value is for future use and should be set to
+ * SPS_CONFIG_DEFAULT or left as default from sps_get_config().
+ *
+ * @options - OR'd connection end point options (see SPS_O defines).
+ *
+ * WARNING: The memory provided should be physically contiguous and non-cached.
+ * The user can use one of the following:
+ * 1. sps_alloc_mem() - allocated from pipe-memory.
+ * 2. dma_alloc_coherent() - allocate coherent DMA memory.
+ * 3. dma_map_single() - for using memory allocated by kmalloc().
+ *
+ * @desc - Descriptor FIFO.
+ * @data - Data FIFO (BAM-to-BAM mode only).
+ *
+ * @event_thresh - Pipe event threshold or derivative.
+ * @lock_group - The lock group this pipe belongs to.
+ *
+ * @sps_reserved - Reserved word - client must not modify.
+ *
+ */
+struct sps_connect {
+	unsigned long source;
+	unsigned long source_iova;
+	u32 src_pipe_index;
+	unsigned long destination;
+	unsigned long dest_iova;
+	u32 dest_pipe_index;
+
+	enum sps_mode mode;
+
+	u32 config;
+
+	enum sps_option options;
+
+	struct sps_mem_buffer desc;
+	struct sps_mem_buffer data;
+
+	u32 event_thresh;
+
+	u32 lock_group;
+
+	/* SETPEND/MTI interrupt generation parameters */
+
+	u32 irq_gen_addr;
+	u32 irq_gen_data;
+
+	u32 sps_reserved;
+
+};
+
+/**
+ * This struct defines a satellite connection's end point.  The client of the
+ * SPS driver on the satellite processor must call sps_get_config() to
+ * initialize a struct sps_connect, then copy the values from the struct
+ * sps_satellite to the struct sps_connect before making the sps_connect()
+ * call to the satellite SPS driver.
+ *
+ */
+struct sps_satellite {
+	/**
+	 * These values must be copied to either the source or destination
+	 * corresponding values in the connect struct.
+	 */
+	phys_addr_t dev;
+	u32 pipe_index;
+
+	/**
+	 * These values must be copied to the corresponding values in the
+	 * connect struct
+	 */
+	u32 config;
+	enum sps_option options;
+
+};
+
+/**
+ * This struct defines parameters for allocation of a BAM DMA channel. The
+ * client must memset() this struct to zero before writing allocation
+ * information.  A value of zero for uninitialized values will instruct
+ * the SPS driver to use defaults or "don't care".
+ *
+ * @dev - Associated BAM device handle, or SPS_DEV_HANDLE_DMA.
+ *
+ * @src_owner - Source owner processor ID.
+ * @dest_owner - Destination owner processor ID.
+ *
+ */
+struct sps_alloc_dma_chan {
+	unsigned long dev;
+
+	/* BAM DMA channel configuration parameters */
+
+	u32 threshold;
+	enum sps_dma_priority priority;
+
+	/**
+	 * Owner IDs are global host processor identifiers used by the system
+	 * SROT when establishing execution environments.
+	 */
+	u32 src_owner;
+	u32 dest_owner;
+
+};
+
+/**
+ * This struct defines parameters for an allocated BAM DMA channel.
+ *
+ * @dev - BAM DMA device handle.
+ * @dest_pipe_index - Destination/input/write pipe index.
+ * @src_pipe_index - Source/output/read pipe index.
+ *
+ */
+struct sps_dma_chan {
+	unsigned long dev;
+	u32 dest_pipe_index;
+	u32 src_pipe_index;
+};
+
+/**
+ * This struct is an argument passed payload when triggering a callback event
+ * object registered for an SPS connection end point.
+ *
+ * @user - Pointer registered with sps_register_event().
+ *
+ * @event_id - Which event.
+ *
+ * @iovec - The associated I/O vector. If the end point is a system-mode
+ * producer, the size will reflect the actual number of bytes written to the
+ * buffer by the pipe. NOTE: If this I/O vector was part of a set submitted to
+ * sps_transfer(), then the vector array itself will be	updated with all of
+ * the actual counts.
+ *
+ * @user - Pointer registered with the transfer.
+ *
+ */
+struct sps_event_notify {
+	void *user;
+
+	enum sps_event event_id;
+
+	/* Data associated with the event */
+
+	union {
+		/* Data for SPS_EVENT_IRQ */
+		struct {
+			u32 mask;
+		} irq;
+
+		/* Data for SPS_EVENT_EOT or SPS_EVENT_DESC_DONE */
+
+		struct {
+			struct sps_iovec iovec;
+			void *user;
+		} transfer;
+
+		/* Data for SPS_EVENT_ERROR */
+
+		struct {
+			u32 status;
+		} err;
+
+	} data;
+};
+
+/**
+ * This struct defines a event registration parameters and is used as the
+ * argument for the sps_register_event() function.
+ *
+ * @options - Event options that will trigger the event object.
+ * @mode - Event trigger mode.
+ *
+ * @xfer_done - a pointer to a completion object. NULL if not in use.
+ *
+ * @callback - a callback to call on completion. NULL if not in use.
+ *
+ * @user - User pointer that will be provided in event callback data.
+ *
+ */
+struct sps_register_event {
+	enum sps_option options;
+	enum sps_trigger mode;
+	struct completion *xfer_done;
+	void (*callback)(struct sps_event_notify *notify);
+	void *user;
+};
+
+/**
+ * This struct defines a system memory transfer's parameters and is used as the
+ * argument for the sps_transfer() function.
+ *
+ * @iovec_phys - Physical address of I/O vectors buffer.
+ * @iovec - Pointer to I/O vectors buffer.
+ * @iovec_count - Number of I/O vectors.
+ * @user - User pointer passed in callback event.
+ *
+ */
+struct sps_transfer {
+	phys_addr_t iovec_phys;
+	struct sps_iovec *iovec;
+	u32 iovec_count;
+	void *user;
+};
+
+/**
+ * This struct defines a timer control operation parameters and is used as an
+ * argument for the sps_timer_ctrl() function.
+ *
+ * @op - Timer control operation.
+ * @timeout_msec - Inactivity timeout (msec).
+ *
+ */
+struct sps_timer_ctrl {
+	enum sps_timer_op op;
+
+	/**
+	 * The following configuration parameters must be set when the timer
+	 * control operation is SPS_TIMER_OP_CONFIG.
+	 */
+	enum sps_timer_mode mode;
+	u32 timeout_msec;
+};
+
+/**
+ * This struct defines a timer control operation result and is used as an
+ * argument for the sps_timer_ctrl() function.
+ */
+struct sps_timer_result {
+	u32 current_timer;
+};
+
+
+/*----------------------------------------------------------------------------
+ * Functions specific to sps interface
+ * -------------------------------------------------------------------------*/
+struct sps_pipe;	/* Forward declaration */
+
+#ifdef CONFIG_SPS
+/**
+ * Register a BAM device
+ *
+ * This function registers a BAM device with the SPS driver. For each
+ *peripheral that includes a BAM, the peripheral driver must register
+ * the BAM with the SPS driver.
+ *
+ * A requirement is that the peripheral driver must remain attached
+ * to the SPS driver until the BAM is deregistered. Otherwise, the
+ * system may attempt to unload the SPS driver. BAM registrations would
+ * be lost.
+ *
+ * @bam_props - Pointer to struct for BAM device properties.
+ *
+ * @dev_handle - Device handle will be written to this location (output).
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_register_bam_device(const struct sps_bam_props *bam_props,
+			    unsigned long *dev_handle);
+
+/**
+ * Deregister a BAM device
+ *
+ * This function deregisters a BAM device from the SPS driver. The peripheral
+ * driver should deregister a BAM when the peripheral driver is shut down or
+ * when BAM use should be disabled.
+ *
+ * A BAM cannot be deregistered if any of its pipes is in an active connection.
+ *
+ * When all BAMs have been deregistered, the system is free to unload the
+ * SPS driver.
+ *
+ * @dev_handle - BAM device handle.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_deregister_bam_device(unsigned long dev_handle);
+
+/**
+ * Allocate client state context
+ *
+ * This function allocate and initializes a client state context struct.
+ *
+ * @return pointer to client state context
+ *
+ */
+struct sps_pipe *sps_alloc_endpoint(void);
+
+/**
+ * Free client state context
+ *
+ * This function de-initializes and free a client state context struct.
+ *
+ * @ctx - client context for SPS connection end point
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_free_endpoint(struct sps_pipe *h);
+
+/**
+ * Get the configuration parameters for an SPS connection end point
+ *
+ * This function retrieves the configuration parameters for an SPS connection
+ * end point.
+ * This function may be called before the end point is connected (before
+ * sps_connect is called). This allows the client to specify parameters before
+ * the connection is established.
+ *
+ * The client must call this function to fill it's struct sps_connect
+ * struct before modifying values and passing the struct to sps_set_config().
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @config - Pointer to buffer for the end point's configuration parameters.
+ * Must not be NULL.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_get_config(struct sps_pipe *h, struct sps_connect *config);
+
+/**
+ * Allocate memory from the SPS Pipe-Memory.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @mem - memory type - N/A.
+ *
+ * @mem_buffer - Pointer to struct for allocated memory properties.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_alloc_mem(struct sps_pipe *h, enum sps_mem mem,
+		  struct sps_mem_buffer *mem_buffer);
+
+/**
+ * Free memory from the SPS Pipe-Memory.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @mem_buffer - Pointer to struct for allocated memory properties.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_free_mem(struct sps_pipe *h, struct sps_mem_buffer *mem_buffer);
+
+/**
+ * Connect an SPS connection end point
+ *
+ * This function creates a connection between two SPS peripherals or between
+ * an SPS peripheral and the local host processor (via system memory, end
+ *point SPS_DEV_HANDLE_MEM). Establishing the connection includes
+ * initialization of the SPS hardware and allocation of any other connection
+ * resources (buffer memory, etc.).
+ *
+ * This function requires the client to specify both the source and
+ * destination end points of the SPS connection. However, the handle
+ * returned applies only to the end point of the connection that the client
+ * controls. The end point under control must be specified by the
+ * enum sps_mode mode argument, either SPS_MODE_SRC, SPS_MODE_DEST, or
+ * SPS_MODE_CTL. Note that SPS_MODE_CTL is only supported for I/O
+ * accelerator connections, and only a limited set of control operations are
+ * allowed (TBD).
+ *
+ * For a connection involving system memory
+ * (SPS_DEV_HANDLE_MEM), the peripheral end point must be
+ * specified. For example, SPS_MODE_SRC must be specified for a
+ * BAM-to-system connection, since the BAM pipe is the data
+ * producer.
+ *
+ * For a specific peripheral-to-peripheral connection, there may be more than
+ * one required configuration. For example, there might be high-performance
+ * and low-power configurations for a connection between the two peripherals.
+ * The config argument allows the client to specify different configurations,
+ * which may require different system resource allocations and hardware
+ * initialization.
+ *
+ * A client is allowed to create one and only one connection for its
+ * struct sps_pipe. The handle is used to identify the connection end point
+ * in subsequent SPS driver calls. A specific connection source or
+ * destination end point can be associated with one and only one
+ * struct sps_pipe.
+ *
+ * The client must establish an open device handle to the SPS. To do so, the
+ * client must attach to the SPS driver and open the SPS device by calling
+ * the following functions.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @connect - Pointer to connection parameters
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_connect(struct sps_pipe *h, struct sps_connect *connect);
+
+/**
+ * Disconnect an SPS connection end point
+ *
+ * This function disconnects an SPS connection end point.
+ * The SPS hardware associated with that end point will be disabled.
+ * For a connection involving system memory (SPS_DEV_HANDLE_MEM), all
+ * connection resources are deallocated. For a peripheral-to-peripheral
+ * connection, the resources associated with the connection will not be
+ * deallocated until both end points are closed.
+ *
+ * The client must call sps_connect() for the handle before calling
+ * this function.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_disconnect(struct sps_pipe *h);
+
+/**
+ * Register an event object for an SPS connection end point
+ *
+ * This function registers a callback event object for an SPS connection end
+ *point. The registered event object will be triggered for the set of
+ * events specified in reg->options that are enabled for the end point.
+ *
+ * There can only be one registered event object for each event. If an event
+ * object is already registered for an event, it will be replaced. If
+ *reg->event handle is NULL, then any registered event object for the
+ * event will be deregistered. Option bits in reg->options not associated
+ * with events are ignored.
+ *
+ * The client must call sps_connect() for the handle before calling
+ * this function.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @reg - Pointer to event registration parameters
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_register_event(struct sps_pipe *h, struct sps_register_event *reg);
+
+/**
+ * Perform a single DMA transfer on an SPS connection end point
+ *
+ * This function submits a DMA transfer request consisting of a single buffer
+ * for an SPS connection end point associated with a peripheral-to/from-memory
+ * connection. The request will be submitted immediately to hardware if the
+ * hardware is idle (data flow off, no other pending transfers). Otherwise, it
+ * will be queued for later handling in the SPS driver work loop.
+ *
+ * The data buffer must be DMA ready. The client is responsible for insuring
+ *physically contiguous memory, cache maintenance, and memory barrier. For
+ * more information, see Appendix A.
+ *
+ * The client must not modify the data buffer until the completion indication is
+ * received.
+ *
+ * This function cannot be used if transfer queuing is disabled (see option
+ * SPS_O_NO_Q). The client must set the SPS_O_EOT option to receive a callback
+ * event trigger when the transfer is complete. The SPS driver will insure the
+ * appropriate flags in the I/O vectors are set to generate the completion
+ * indication.
+ *
+ * The return value from this function may indicate that an error occurred.
+ * Possible causes include invalid arguments.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @addr - Physical address of buffer to transfer.
+ *
+ * WARNING: The memory provided	should be physically contiguous and
+ * non-cached.
+ *
+ * The user can use one of the following:
+ * 1. sps_alloc_mem() - allocated from pipe-memory.
+ * 2. dma_alloc_coherent() - allocate DMA memory.
+ * 3. dma_map_single() for memory allocated by kmalloc().
+ *
+ * @size - Size in bytes of buffer to transfer
+ *
+ * @user - User pointer that will be returned to user as part of
+ *  event payload
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_transfer_one(struct sps_pipe *h, phys_addr_t addr, u32 size,
+		     void *user, u32 flags);
+
+/**
+ * Read event queue for an SPS connection end point
+ *
+ * This function reads event queue for an SPS connection end point.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @event - pointer to client's event data buffer
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_get_event(struct sps_pipe *h, struct sps_event_notify *event);
+
+/**
+ * Get processed I/O vector (completed transfers)
+ *
+ * This function fetches the next processed I/O vector.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @iovec - Pointer to I/O vector struct (output).
+ * This struct will be zeroed if there are no more processed I/O vectors.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_get_iovec(struct sps_pipe *h, struct sps_iovec *iovec);
+
+/**
+ * Enable an SPS connection end point
+ *
+ * This function enables an SPS connection end point.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_flow_on(struct sps_pipe *h);
+
+/**
+ * Disable an SPS connection end point
+ *
+ * This function disables an SPS connection end point.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @mode - Desired mode for disabling pipe data flow
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_flow_off(struct sps_pipe *h, enum sps_flow_off mode);
+
+/**
+ * Perform a Multiple DMA transfer on an SPS connection end point
+ *
+ * This function submits a DMA transfer request for an SPS connection end point
+ * associated with a peripheral-to/from-memory connection. The request will be
+ * submitted immediately to hardware if the hardware is idle (data flow off, no
+ * other pending transfers). Otherwise, it will be queued for later handling in
+ * the SPS driver work loop.
+ *
+ * The data buffers referenced by the I/O vectors must be DMA ready.
+ * The client is responsible for insuring physically contiguous memory,
+ * any cache maintenance, and memory barrier. For more information,
+ * see Appendix A.
+ *
+ * The I/O vectors must specify physical addresses for the referenced buffers.
+ *
+ * The client must not modify the data buffers referenced by I/O vectors until
+ * the completion indication is received.
+ *
+ * If transfer queuing is disabled (see option SPS_O_NO_Q), the client is
+ * responsible for setting the appropriate flags in the I/O vectors to generate
+ * the completion indication. Also, the client is responsible for enabling the
+ * appropriate connection callback event options for completion indication (see
+ * sps_connect(), sps_set_config()).
+ *
+ * If transfer queuing is enabled, the client must set the SPS_O_EOT option to
+ * receive a callback event trigger when the transfer is complete. The SPS
+ * driver will insure the appropriate flags in the I/O vectors are set to
+ * generate the completion indication. The client must not set any flags in the
+ * I/O vectors, as this may cause the SPS driver to become out of sync with the
+ * hardware.
+ *
+ * The return value from this function may indicate that an error occurred.
+ * Possible causes include invalid arguments. If transfer queuing is disabled,
+ * an error will occur if the pipe is already processing a transfer.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @transfer - Pointer to transfer parameter struct
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_transfer(struct sps_pipe *h, struct sps_transfer *transfer);
+
+/**
+ * Determine whether an SPS connection end point FIFO is empty
+ *
+ * This function returns the empty state of an SPS connection end point.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @empty - pointer to client's empty status word (boolean)
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_is_pipe_empty(struct sps_pipe *h, u32 *empty);
+
+/**
+ * Reset an SPS BAM device
+ *
+ * This function resets an SPS BAM device.
+ *
+ * @dev - device handle for the BAM
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_device_reset(unsigned long dev);
+
+/**
+ * Set the configuration parameters for an SPS connection end point
+ *
+ * This function sets the configuration parameters for an SPS connection
+ * end point. This function may be called before the end point is connected
+ * (before sps_connect is called). This allows the client to specify
+ *parameters before the connection is established. The client is allowed
+ * to pre-allocate resources and override driver defaults.
+ *
+ * The client must call sps_get_config() to fill it's struct sps_connect
+ * struct before modifying values and passing the struct to this function.
+ * Only those parameters that differ from the current configuration will
+ * be processed.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @config - Pointer to the end point's new configuration parameters.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_set_config(struct sps_pipe *h, struct sps_connect *config);
+
+/**
+ * Set ownership of an SPS connection end point
+ *
+ * This function sets the ownership of an SPS connection end point to
+ * either local (default) or non-local. This function is used to
+ * retrieve the struct sps_connect data that must be used by a
+ * satellite processor when calling sps_connect().
+ *
+ * Non-local ownership is only possible/meaningful on the processor
+ * that controls resource allocations (apps processor). Setting ownership
+ * to non-local on a satellite processor will fail.
+ *
+ * Setting ownership from non-local to local will succeed only if the
+ * owning satellite processor has properly brought the end point to
+ * an idle condition.
+ *
+ * This function will succeed if the connection end point is already in
+ * the specified ownership state.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @owner - New ownership of the connection end point
+ *
+ * @connect - Pointer to buffer for satellite processor connect data.
+ *  Can be NULL to avoid retrieving the connect data. Will be ignored
+ *  if the end point ownership is set to local.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_set_owner(struct sps_pipe *h, enum sps_owner owner,
+		  struct sps_satellite *connect);
+
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+/**
+ * Allocate a BAM DMA channel
+ *
+ * This function allocates a BAM DMA channel. A "BAM DMA" is a special
+ * DMA peripheral with a BAM front end. The DMA peripheral acts as a conduit
+ * for data to flow into a consumer pipe and then out of a producer pipe.
+ * It's primarily purpose is to serve as a path for interprocessor communication
+ * that allows each processor to control and protect it's own memory space.
+ *
+ * @alloc - Pointer to struct for BAM DMA channel allocation properties.
+ *
+ * @chan - Allocated channel information will be written to this
+ *  location (output).
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_alloc_dma_chan(const struct sps_alloc_dma_chan *alloc,
+		       struct sps_dma_chan *chan);
+
+/**
+ * Free a BAM DMA channel
+ *
+ * This function frees a BAM DMA channel.
+ *
+ * @chan - Pointer to information for channel to free
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_free_dma_chan(struct sps_dma_chan *chan);
+
+/**
+ * Get the BAM handle for BAM-DMA.
+ *
+ * The BAM handle should be use as source/destination in the sps_connect().
+ *
+ * @return handle on success, zero on error
+ *
+ */
+unsigned long sps_dma_get_bam_handle(void);
+
+/**
+ * Free the BAM handle for BAM-DMA.
+ *
+ */
+void sps_dma_free_bam_handle(unsigned long h);
+#else
+static inline int sps_alloc_dma_chan(const struct sps_alloc_dma_chan *alloc,
+		       struct sps_dma_chan *chan)
+{
+	return -EPERM;
+}
+
+static inline int sps_free_dma_chan(struct sps_dma_chan *chan)
+{
+	return -EPERM;
+}
+
+static inline unsigned long sps_dma_get_bam_handle(void)
+{
+	return 0;
+}
+
+static inline void sps_dma_free_bam_handle(unsigned long h)
+{
+}
+#endif
+
+/**
+ * Get number of free transfer entries for an SPS connection end point
+ *
+ * This function returns the number of free transfer entries for an
+ * SPS connection end point.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @count - pointer to count status
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_get_free_count(struct sps_pipe *h, u32 *count);
+
+/**
+ * Perform timer control
+ *
+ * This function performs timer control operations.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @timer_ctrl - Pointer to timer control specification
+ *
+ * @timer_result - Pointer to buffer for timer operation result.
+ *  This argument can be NULL if no result is expected for the operation.
+ *  If non-NULL, the current timer value will always provided.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_timer_ctrl(struct sps_pipe *h,
+		   struct sps_timer_ctrl *timer_ctrl,
+		   struct sps_timer_result *timer_result);
+
+/**
+ * Find the handle of a BAM device based on the physical address
+ *
+ * This function finds a BAM device in the BAM registration list that
+ * matches the specified physical address, and returns its handle.
+ *
+ * @phys_addr - physical address of the BAM
+ *
+ * @h - device handle of the BAM
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_phy2h(phys_addr_t phys_addr, unsigned long *handle);
+
+/**
+ * Setup desc/data FIFO for bam-to-bam connection
+ *
+ * @mem_buffer - Pointer to struct for allocated memory properties.
+ *
+ * @addr - address of FIFO
+ *
+ * @size - FIFO size
+ *
+ * @use_offset - use address offset instead of absolute address
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_setup_bam2bam_fifo(struct sps_mem_buffer *mem_buffer,
+		  u32 addr, u32 size, int use_offset);
+
+/**
+ * Get the number of unused descriptors in the descriptor FIFO
+ * of a pipe
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @desc_num - number of unused descriptors
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_get_unused_desc_num(struct sps_pipe *h, u32 *desc_num);
+
+/**
+ * Get the debug info of BAM registers and descriptor FIFOs
+ *
+ * @dev - BAM device handle
+ *
+ * @option - debugging option
+ *
+ * @para - parameter used for an option (such as pipe combination)
+ *
+ * @tb_sel - testbus selection
+ *
+ * @desc_sel - selection of descriptors
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_get_bam_debug_info(unsigned long dev, u32 option, u32 para,
+		u32 tb_sel, u32 desc_sel);
+
+/**
+ * Vote for or relinquish BAM DMA clock
+ *
+ * @clk_on - to turn on or turn off the clock
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_ctrl_bam_dma_clk(bool clk_on);
+
+/*
+ * sps_pipe_reset - reset a pipe of a BAM.
+ * @dev:	BAM device handle
+ * @pipe:	pipe index
+ *
+ * This function resets a pipe of a BAM.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int sps_pipe_reset(unsigned long dev, u32 pipe);
+
+/*
+ * sps_pipe_disable - disable a pipe of a BAM.
+ * @dev:	BAM device handle
+ * @pipe:	pipe index
+ *
+ * This function disables a pipe of a BAM.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int sps_pipe_disable(unsigned long dev, u32 pipe);
+
+/*
+ * sps_pipe_pending_desc - checking pending descriptor.
+ * @dev:	BAM device handle
+ * @pipe:	pipe index
+ * @pending:	indicate if there is any pending descriptor.
+ *
+ * This function checks if a pipe of a BAM has any pending descriptor.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int sps_pipe_pending_desc(unsigned long dev, u32 pipe, bool *pending);
+
+/*
+ * sps_bam_process_irq - process IRQ of a BAM.
+ * @dev:	BAM device handle
+ *
+ * This function processes any pending IRQ of a BAM.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int sps_bam_process_irq(unsigned long dev);
+
+/*
+ * sps_get_bam_addr - get address info of a BAM.
+ * @dev:	BAM device handle
+ * @base:	beginning address
+ * @size:	address range size
+ *
+ * This function returns the address info of a BAM.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int sps_get_bam_addr(unsigned long dev, phys_addr_t *base,
+				u32 *size);
+
+/*
+ * sps_pipe_inject_zlt - inject a ZLT with EOT.
+ * @dev:	BAM device handle
+ * @pipe_index:	pipe index
+ *
+ * This function injects a ZLT with EOT for a pipe of a BAM.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int sps_pipe_inject_zlt(unsigned long dev, u32 pipe_index);
+#else
+static inline int sps_register_bam_device(const struct sps_bam_props
+			*bam_props, unsigned long *dev_handle)
+{
+	return -EPERM;
+}
+
+static inline int sps_deregister_bam_device(unsigned long dev_handle)
+{
+	return -EPERM;
+}
+
+static inline struct sps_pipe *sps_alloc_endpoint(void)
+{
+	return NULL;
+}
+
+static inline int sps_free_endpoint(struct sps_pipe *h)
+{
+	return -EPERM;
+}
+
+static inline int sps_get_config(struct sps_pipe *h, struct sps_connect *config)
+{
+	return -EPERM;
+}
+
+static inline int sps_alloc_mem(struct sps_pipe *h, enum sps_mem mem,
+		  struct sps_mem_buffer *mem_buffer)
+{
+	return -EPERM;
+}
+
+static inline int sps_free_mem(struct sps_pipe *h,
+				struct sps_mem_buffer *mem_buffer)
+{
+	return -EPERM;
+}
+
+static inline int sps_connect(struct sps_pipe *h, struct sps_connect *connect)
+{
+	return -EPERM;
+}
+
+static inline int sps_disconnect(struct sps_pipe *h)
+{
+	return -EPERM;
+}
+
+static inline int sps_register_event(struct sps_pipe *h,
+					struct sps_register_event *reg)
+{
+	return -EPERM;
+}
+
+static inline int sps_transfer_one(struct sps_pipe *h, phys_addr_t addr,
+					u32 size, void *user, u32 flags)
+{
+	return -EPERM;
+}
+
+static inline int sps_get_event(struct sps_pipe *h,
+				struct sps_event_notify *event)
+{
+	return -EPERM;
+}
+
+static inline int sps_get_iovec(struct sps_pipe *h, struct sps_iovec *iovec)
+{
+	return -EPERM;
+}
+
+static inline int sps_flow_on(struct sps_pipe *h)
+{
+	return -EPERM;
+}
+
+static inline int sps_flow_off(struct sps_pipe *h, enum sps_flow_off mode)
+{
+	return -EPERM;
+}
+
+static inline int sps_transfer(struct sps_pipe *h,
+				struct sps_transfer *transfer)
+{
+	return -EPERM;
+}
+
+static inline int sps_is_pipe_empty(struct sps_pipe *h, u32 *empty)
+{
+	return -EPERM;
+}
+
+static inline int sps_device_reset(unsigned long dev)
+{
+	return -EPERM;
+}
+
+static inline int sps_set_config(struct sps_pipe *h, struct sps_connect *config)
+{
+	return -EPERM;
+}
+
+static inline int sps_set_owner(struct sps_pipe *h, enum sps_owner owner,
+		  struct sps_satellite *connect)
+{
+	return -EPERM;
+}
+
+static inline int sps_get_free_count(struct sps_pipe *h, u32 *count)
+{
+	return -EPERM;
+}
+
+static inline int sps_alloc_dma_chan(const struct sps_alloc_dma_chan *alloc,
+		       struct sps_dma_chan *chan)
+{
+	return -EPERM;
+}
+
+static inline int sps_free_dma_chan(struct sps_dma_chan *chan)
+{
+	return -EPERM;
+}
+
+static inline unsigned long sps_dma_get_bam_handle(void)
+{
+	return 0;
+}
+
+static inline void sps_dma_free_bam_handle(unsigned long h)
+{
+}
+
+static inline int sps_timer_ctrl(struct sps_pipe *h,
+		   struct sps_timer_ctrl *timer_ctrl,
+		   struct sps_timer_result *timer_result)
+{
+	return -EPERM;
+}
+
+static inline int sps_phy2h(phys_addr_t phys_addr, unsigned long *handle)
+{
+	return -EPERM;
+}
+
+static inline int sps_setup_bam2bam_fifo(struct sps_mem_buffer *mem_buffer,
+		  u32 addr, u32 size, int use_offset)
+{
+	return -EPERM;
+}
+
+static inline int sps_get_unused_desc_num(struct sps_pipe *h, u32 *desc_num)
+{
+	return -EPERM;
+}
+
+static inline int sps_get_bam_debug_info(unsigned long dev, u32 option,
+		u32 para, u32 tb_sel, u32 desc_sel)
+{
+	return -EPERM;
+}
+
+static inline int sps_ctrl_bam_dma_clk(bool clk_on)
+{
+	return -EPERM;
+}
+
+static inline int sps_pipe_reset(unsigned long dev, u32 pipe)
+{
+	return -EPERM;
+}
+
+static inline int sps_pipe_disable(unsigned long dev, u32 pipe)
+{
+	return -EPERM;
+}
+
+static inline int sps_pipe_pending_desc(unsigned long dev, u32 pipe,
+					bool *pending)
+{
+	return -EPERM;
+}
+
+static inline int sps_bam_process_irq(unsigned long dev)
+{
+	return -EPERM;
+}
+
+static inline int sps_get_bam_addr(unsigned long dev, phys_addr_t *base,
+				u32 *size)
+{
+	return -EPERM;
+}
+
+static inline int sps_pipe_inject_zlt(unsigned long dev, u32 pipe_index)
+{
+	return -EPERM;
+}
+#endif
+
+#endif /* _SPS_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/msm_thermal.h	2019-01-22 16:16:28.315289982 +0100
@@ -0,0 +1,349 @@
+/*
+ * Copyright (c) 2012-2016,2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_THERMAL_H
+#define __MSM_THERMAL_H
+
+#include <linux/thermal.h>
+
+#define MAX_THRESHOLD  2
+#define TSENS_NAME_MAX 20
+#define MONITOR_ALL_TSENS -1
+#define HOTPLUG_DEVICE "hotplug"
+#define CPU0_DEVICE     "cpu0"
+#define CPU1_DEVICE     "cpu1"
+#define CPU2_DEVICE     "cpu2"
+#define CPU3_DEVICE     "cpu3"
+#define CPU4_DEVICE     "cpu4"
+#define CPU5_DEVICE     "cpu5"
+#define CPU6_DEVICE     "cpu6"
+#define CPU7_DEVICE     "cpu7"
+#define CPUFREQ_MAX_NO_MITIGATION     UINT_MAX
+#define CPUFREQ_MIN_NO_MITIGATION     0
+#define HOTPLUG_NO_MITIGATION(_mask)  cpumask_clear(_mask)
+
+#define IS_HI_THRESHOLD_SET(_val) (_val & 1)
+#define IS_LOW_THRESHOLD_SET(_val) (_val & 2)
+
+struct msm_thermal_data {
+	struct platform_device *pdev;
+	uint32_t sensor_id;
+	uint32_t poll_ms;
+	int32_t limit_temp_degC;
+	int32_t temp_hysteresis_degC;
+	uint32_t bootup_freq_step;
+	uint32_t bootup_freq_control_mask;
+	int32_t core_limit_temp_degC;
+	int32_t core_temp_hysteresis_degC;
+	int32_t hotplug_temp_degC;
+	int32_t hotplug_temp_hysteresis_degC;
+	uint32_t core_control_mask;
+	uint32_t freq_mitig_temp_degc;
+	uint32_t freq_mitig_temp_hysteresis_degc;
+	uint32_t freq_mitig_control_mask;
+	uint32_t freq_limit;
+	int32_t vdd_rstr_temp_degC;
+	int32_t vdd_rstr_temp_hyst_degC;
+	int32_t vdd_rstr_sensor_id;
+	int32_t vdd_mx_min;
+	int32_t vdd_cx_min;
+	int32_t psm_temp_degC;
+	int32_t psm_temp_hyst_degC;
+	int32_t ocr_temp_degC;
+	int32_t ocr_temp_hyst_degC;
+	uint32_t ocr_sensor_id;
+	int32_t phase_rpm_resource_type;
+	int32_t phase_rpm_resource_id;
+	int32_t gfx_phase_warm_temp_degC;
+	int32_t gfx_phase_warm_temp_hyst_degC;
+	int32_t gfx_phase_hot_temp_degC;
+	int32_t gfx_phase_hot_temp_hyst_degC;
+	int32_t gfx_sensor;
+	int32_t gfx_phase_request_key;
+	int32_t cx_phase_hot_temp_degC;
+	int32_t cx_phase_hot_temp_hyst_degC;
+	int32_t cx_phase_request_key;
+	int32_t vdd_mx_temp_degC;
+	int32_t vdd_mx_temp_hyst_degC;
+	int32_t vdd_mx_sensor_id;
+	int32_t therm_reset_temp_degC;
+};
+
+enum sensor_id_type {
+	THERM_ZONE_ID,
+	THERM_TSENS_ID,
+	THERM_ID_MAX_NR,
+};
+
+struct threshold_info;
+struct therm_threshold {
+	int32_t                     sensor_id;
+	enum sensor_id_type         id_type;
+	struct sensor_threshold     threshold[MAX_THRESHOLD];
+	int32_t                     trip_triggered;
+	void (*notify)(struct therm_threshold *);
+	struct threshold_info       *parent;
+	int32_t                     cur_state;
+};
+
+struct threshold_info {
+	uint32_t                     thresh_ct;
+	bool                         thresh_triggered;
+	struct list_head             list_ptr;
+	struct therm_threshold       *thresh_list;
+};
+
+enum device_req_type {
+	DEVICE_REQ_NONE = -1,
+	HOTPLUG_MITIGATION_REQ,
+	CPUFREQ_MITIGATION_REQ,
+	DEVICE_REQ_MAX,
+};
+
+/**
+ * For frequency mitigation request, if client is interested
+ * only in one, either max_freq or min_freq, update default
+ * value for other one also for mitigation request.
+ * Default value for request structure variables:
+ *   max_freq = UINT_MAX;
+ *   min_freq = 0;
+ *   offline_mask =  CPU_MASK_NONE;
+ */
+struct cpufreq_request {
+	uint32_t                     max_freq;
+	uint32_t                     min_freq;
+};
+
+union device_request {
+	struct cpufreq_request       freq;
+	cpumask_t                    offline_mask;
+};
+
+struct device_clnt_data;
+struct device_manager_data {
+	char                         device_name[TSENS_NAME_MAX];
+	union device_request         active_req;
+	struct list_head             client_list;
+	struct list_head             dev_ptr;
+	struct mutex                 clnt_lock;
+	int (*request_validate)(struct device_clnt_data *,
+			union device_request *,
+			enum device_req_type);
+	int (*update)(struct device_manager_data *);
+	void                         *data;
+};
+
+struct device_clnt_data {
+	struct device_manager_data   *dev_mgr;
+	bool                         req_active;
+	union device_request         request;
+	struct list_head             clnt_ptr;
+	void (*callback)(struct device_clnt_data *,
+			union device_request *req, void *);
+	void                         *usr_data;
+};
+
+#ifdef CONFIG_THERMAL_MONITOR
+extern int msm_thermal_ioctl_init(void);
+extern void msm_thermal_ioctl_cleanup(void);
+extern int msm_thermal_init(struct msm_thermal_data *pdata);
+extern int msm_thermal_device_init(void);
+extern int msm_thermal_set_frequency(uint32_t cpu, uint32_t freq,
+	bool is_max);
+extern int msm_thermal_set_cluster_freq(uint32_t cluster, uint32_t freq,
+	bool is_max);
+extern int msm_thermal_get_freq_plan_size(uint32_t cluster,
+	unsigned int *table_len);
+extern int msm_thermal_get_cluster_freq_plan(uint32_t cluster,
+	unsigned int *table_ptr);
+extern int msm_thermal_get_cluster_voltage_plan(uint32_t cluster,
+	uint32_t *table_ptr);
+/**
+ * sensor_mgr_init_threshold - Initialize thresholds data structure for
+ *                             sensor(s) with high and low thresholds and
+ *                             threshold callback.
+ *
+ * @thresh_inp: Client threshold data structure.
+ * @sensor_id: Sensor h/w ID to be monitored. Use MONITOR_ALL_TSENS
+ *             to monitor all temperature sensors.
+ *
+ * @high_temp: Trigger threshold value for sensor_id or all sensors.
+ * @low_temp: Clear threshold value for sensor_id or all sensors.
+ * @callback: Callback pointer for threshold notification.
+ *
+ * Returns which threshold is set on success, negative error number
+ * on failure. MACRO IS_HI_THRESHOLD_SET/IS_LOW_THRESHOLD_SET can be used
+ * to decipher which threshold being set.
+ */
+extern int sensor_mgr_init_threshold(struct threshold_info *thresh_inp,
+				int sensor_id, int32_t high_temp,
+				int32_t low_temp,
+				void (*callback)(struct therm_threshold *));
+/**
+ * sensor_mgr_convert_id_and_set_threshold - It accepts sensor h/w ID, converts
+ *                                           it to sensor zone id and sets
+ *                                           thermal threshold for those
+ *                                           sensors listed in threshold info.
+ *
+ * @thresh_inp: Client threshold data structure.
+ *
+ * Returns zero on success, negative error number on failure.
+ */
+extern int sensor_mgr_convert_id_and_set_threshold(
+				struct threshold_info *thresh_inp);
+/**
+ * sensor_mgr_set_threshold- It sets thermal threshold trips for a sensor.
+ *
+ * @zone_id: Thermal zone ID for the sensor.
+ * @threshold: threshold info for the sensor.
+ *
+ * Returns zero on success, negative error number on failure.
+ */
+extern int sensor_mgr_set_threshold(uint32_t zone_id,
+				struct sensor_threshold *threshold);
+/**
+ * sensor_mgr_remove_threshold- It cancels threshold notification and
+ *                              removes threshold from sensor manager
+ *                              threshold list.
+ *
+ * @thresh_inp: The threshold info which needs to be removed.
+ */
+extern void sensor_mgr_remove_threshold(struct threshold_info *thresh_inp);
+/**
+ * devmgr_register_mitigation_client - Register for a device and
+ *                                     gets a handle for mitigation.
+ * @dev: Client device structure.
+ * @device_name: Mitgation device name which the client is interested
+ *               to mitigate.
+ * @callback: Optional callback pointer for device change notification,
+ *            otherwise pass NULL.
+ *
+ * Returns client handle structure for that device on success, or NULL
+ * with IS_ERR() condition containing error number.
+ */
+extern struct device_clnt_data *devmgr_register_mitigation_client(
+				struct device *dev,
+				const char *device_name,
+				void (*callback)(struct device_clnt_data *,
+				union device_request *, void *));
+/**
+ * devmgr_client_request_mitigation -  Set a valid mitigation for
+ *                                     registered device.
+ * @clnt: Client handle for device.
+ * @type: Type of device request populated above.
+ * @req:  Valid mitigation request.
+ *
+ * Returns zero on successful mitigation update, or negative error number.
+ */
+extern int devmgr_client_request_mitigation(struct device_clnt_data *clnt,
+						enum device_req_type type,
+						union device_request *req);
+/**
+ * devmgr_unregister_mitigation_client - Unregister mitigation device
+ * @dev: Client device structure.
+ * @clnt: Client handle for device.
+ */
+extern void devmgr_unregister_mitigation_client(
+					struct device *dev,
+					struct device_clnt_data *clnt);
+#ifdef CONFIG_QCOM_THERMAL_LIMITS_DCVS
+extern int msm_lmh_dcvsh_sw_notify(int cpu);
+#else
+static inline int msm_lmh_dcvsh_sw_notify(int cpu)
+{
+	return -ENODEV;
+}
+#endif
+
+#else
+static inline int msm_thermal_init(struct msm_thermal_data *pdata)
+{
+	return -ENOSYS;
+}
+static inline int msm_thermal_device_init(void)
+{
+	return -ENOSYS;
+}
+static inline int msm_thermal_set_frequency(uint32_t cpu, uint32_t freq,
+	bool is_max)
+{
+	return -ENOSYS;
+}
+static inline int msm_thermal_set_cluster_freq(uint32_t cluster, uint32_t freq,
+	bool is_max)
+{
+	return -ENOSYS;
+}
+static inline int msm_thermal_get_freq_plan_size(uint32_t cluster,
+	unsigned int *table_len)
+{
+	return -ENOSYS;
+}
+static inline int msm_thermal_get_cluster_freq_plan(uint32_t cluster,
+	unsigned int *table_ptr)
+{
+	return -ENOSYS;
+}
+static inline int msm_thermal_get_cluster_voltage_plan(uint32_t cluster,
+	uint32_t *table_ptr)
+{
+	return -ENOSYS;
+}
+static inline int sensor_mgr_init_threshold(struct threshold_info *thresh_inp,
+				int sensor_id, int32_t high_temp,
+				int32_t low_temp,
+				void (*callback)(struct therm_threshold *))
+{
+	return -ENOSYS;
+}
+static inline int sensor_mgr_convert_id_and_set_threshold(
+			struct threshold_info *thresh_inp)
+{
+	return -ENOSYS;
+}
+static inline int sensor_mgr_set_threshold(uint32_t zone_id,
+			struct sensor_threshold *threshold)
+{
+	return -ENOSYS;
+}
+static inline void sensor_mgr_remove_threshold(
+				struct threshold_info *thresh_inp)
+{
+}
+static inline struct device_clnt_data *devmgr_register_mitigation_client(
+				struct device *dev,
+				const char *device_name,
+				void (*callback)(struct device_clnt_data *,
+				union device_request *, void *))
+{
+	return NULL;
+}
+static inline int devmgr_client_request_mitigation(
+					struct device_clnt_data *clnt,
+					enum device_req_type type,
+					union device_request *req)
+{
+	return -ENOSYS;
+}
+static inline void devmgr_unregister_mitigation_client(
+					struct device *dev,
+					struct device_clnt_data *clnt)
+{
+}
+static inline int msm_lmh_dcvsh_sw_notify(int cpu)
+{
+	return -ENODEV;
+}
+#endif
+
+#endif /*__MSM_THERMAL_H*/
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/msm_tsens.h	2019-01-22 16:16:28.315289982 +0100
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Qualcomm TSENS Header file
+ *
+ */
+
+#ifndef __MSM_TSENS_H
+#define __MSM_TSENS_H
+
+#define TSENS_MAX_SENSORS		11
+#define TSENS_MTC_ZONE_LOG_SIZE		6
+#define TSENS_NUM_MTC_ZONES_SUPPORT	3
+#define TSENS_ZONEMASK_PARAMS		3
+#define TSENS_ZONELOG_PARAMS		1
+#define TSENS_MTC_ZONE_HISTORY_SIZE	3
+
+struct tsens_device {
+	uint32_t			sensor_num;
+};
+
+#if defined(CONFIG_THERMAL_TSENS8974)
+/**
+ * tsens_is_ready() - Clients can use this API to check if the TSENS device
+ *		is ready and clients can start requesting temperature reads.
+ * @return:	Returns true if device is ready else returns -EPROBE_DEFER
+ *		for clients to check back after a time duration.
+ */
+int tsens_is_ready(void);
+/**
+ * tsens_tm_init_driver() - Early initialization for clients to read
+ *		TSENS temperature.
+ */
+int __init tsens_tm_init_driver(void);
+/**
+ * tsens_get_hw_id_mapping() - Mapping software or sensor ID with the physical
+ *		TSENS sensor. On certain cases where there are more number of
+ *		controllers the sensor ID is used to map the clients software ID
+ *		with the physical HW sensors used by the driver.
+ * @sensors_sw_id:	Client ID.
+ * @sensor_hw_num:	Sensor client ID passed by the driver. This ID is used
+ *			by the driver to map it to the physical HW sensor
+ *			number.
+ * @return:	If the device is not present returns -EPROBE_DEFER
+ *		for clients to check back after a time duration.
+ *		0 on success else error code on error.
+ */
+int tsens_get_hw_id_mapping(int sensor_sw_id, int *sensor_hw_num);
+/**
+ * tsens_get_max_sensor_num() - Get the total number of active TSENS sensors.
+ *		The total number received by the client is across multiple
+ *		TSENS controllers if present.
+ * @tsens_num_sensors: Total number of sensor result to be stored.
+ */
+int tsens_get_max_sensor_num(uint32_t *tsens_num_sensors);
+/**
+ * tsens_set_mtc_zone_sw_mask() - Mask the MTC threshold level of a zone.
+ *		SW can force the MTC to stop issuing throttling commands that
+ *		correspond to each MTC threshold level by writing the
+ *		corresponding bit in register at any time.
+ * @zone: Zone ID.
+ * @th1_enable : Value corresponding to the threshold level.
+ * @th2_enable : Value corresponding to the threshold level.
+ */
+int tsens_set_mtc_zone_sw_mask(unsigned int zone , unsigned int th1_enable,
+				unsigned int th2_enable);
+/**
+ * tsens_get_mtc_zone_log() - Get the log of last 6 commands sent to pulse
+ *		swallower of a zone.
+ * zone: Zone ID
+ * @zone_log: Log commands result to be stored.
+ */
+int tsens_get_mtc_zone_log(unsigned int zone , void *zone_log);
+/**
+ * tsens_mtc_reset_history_counter() - Reset history of throttling commands
+ *		sent to pulse swallower. Tsens controller issues clock
+ *		throttling commands to Pulse swallower to perform HW
+ *		based clock throttling. Reset the history counter of a zone.
+ * @zone: Zone ID.
+ */
+int tsens_mtc_reset_history_counter(unsigned int zone);
+/**
+ * tsens_get_mtc_zone_history() - Get the history of throttling commands sent
+ *		to pulse swallower. Tsens controller issues clock throttling
+ *		commands to Pulse swallower to perform HW based clock
+ *		throttling.
+ * @zone: Zone ID
+ * @zone_hist: Commands history result to be stored.
+ */
+int tsens_get_mtc_zone_history(unsigned int zone , void *zone_hist);
+/**
+ * tsens_get_temp() - Obtain the TSENS temperature for the respective sensor.
+ *
+ * @dev:	Sensor number for which client wants the TSENS temperature
+ *		reading. The ID passed by the sensor could be the sensor ID
+ *		which the driver translates to internally to read the
+ *		respective physical HW sensor from the controller.
+ * @temp:	temperature result to be stored.
+ * @return:	If the device is not present returns -EPROBE_DEFER
+ *		for clients to check back after a time duration.
+ *		0 on success else error code on error.
+ */
+int tsens_get_temp(struct tsens_device *dev, int *temp);
+#else
+static inline int tsens_is_ready(void)
+{ return -ENXIO; }
+static inline int __init tsens_tm_init_driver(void)
+{ return -ENXIO; }
+static inline int tsens_get_hw_id_mapping(
+				int sensor_sw_id, int *sensor_hw_num)
+{ return -ENXIO; }
+static inline int tsens_get_max_sensor_num(uint32_t *tsens_num_sensors)
+{ return -ENXIO; }
+static inline int tsens_set_mtc_zone_sw_mask(unsigned int zone ,
+				unsigned int th1_enable ,
+				unsigned int th2_enable)
+{ return -ENXIO; }
+static inline int tsens_get_mtc_zone_log(unsigned int zone , void *zone_log)
+{ return -ENXIO; }
+static inline int tsens_mtc_reset_history_counter(unsigned int zone)
+{ return -ENXIO; }
+static inline int tsens_get_temp(struct tsens_device *dev,
+						int *temp)
+{ return -ENXIO; }
+static inline int tsens_get_mtc_zone_history(unsigned int zone, void *zone_hist)
+{ return -ENXIO; }
+#endif
+
+#endif /*MSM_TSENS_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/netfilter/xt_qtaguid.h	2019-01-22 16:16:28.327290091 +0100
@@ -0,0 +1,14 @@
+#ifndef _XT_QTAGUID_MATCH_H
+#define _XT_QTAGUID_MATCH_H
+
+/* For now we just replace the xt_owner.
+ * FIXME: make iptables aware of qtaguid. */
+#include <linux/netfilter/xt_owner.h>
+
+#define XT_QTAGUID_UID    XT_OWNER_UID
+#define XT_QTAGUID_GID    XT_OWNER_GID
+#define XT_QTAGUID_SOCKET XT_OWNER_SOCKET
+#define xt_qtaguid_match_info xt_owner_match_info
+
+int qtaguid_untag(struct socket *sock, bool kernel);
+#endif /* _XT_QTAGUID_MATCH_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/nfcinfo.h	2019-01-22 16:16:28.327290091 +0100
@@ -0,0 +1,6 @@
+#ifndef _NFCINFO_H
+#define _NFCINFO_H
+
+#include <uapi/linux/nfc/nfcinfo.h>
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/of_batterydata.h	2019-01-22 16:16:28.331290127 +0100
@@ -0,0 +1,64 @@
+/* Copyright (c) 2013-2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/batterydata-lib.h>
+
+#ifdef CONFIG_OF_BATTERYDATA
+/**
+ * of_batterydata_read_data() - Populate battery data from the device tree
+ * @container_node: pointer to the battery-data container device node
+ *		containing the profile nodes.
+ * @batt_data: pointer to an allocated bms_battery_data structure that the
+ *		loaded profile will be written to.
+ * @batt_id_uv: ADC voltage of the battery id line used to differentiate
+ *		between different battery profiles. If there are multiple
+ *		battery data in the device tree, the one with the closest
+ *		battery id resistance will be automatically loaded.
+ *
+ * This routine loads the closest match battery data from device tree based on
+ * the battery id reading. Then, it will try to load all the relevant data from
+ * the device tree battery data profile.
+ *
+ * If any of the lookup table pointers are NULL, this routine will skip trying
+ * to read them.
+ */
+int of_batterydata_read_data(struct device_node *container_node,
+				struct bms_battery_data *batt_data,
+				int batt_id_uv);
+/**
+ * of_batterydata_get_best_profile() - Find matching battery data device node
+ * @batterydata_container_node: pointer to the battery-data container device
+ *		node containing the profile nodes.
+ * @batt_id_kohm: Battery ID in KOhms for which we want to find the profile.
+ * @batt_type: Battery type which we want to force load the profile.
+ *
+ * This routine returns a device_node pointer to the closest match battery data
+ * from device tree based on the battery id reading.
+ */
+struct device_node *of_batterydata_get_best_profile(
+		struct device_node *batterydata_container_node,
+		int batt_id_kohm, const char *batt_type);
+#else
+static inline int of_batterydata_read_data(struct device_node *container_node,
+				struct bms_battery_data *batt_data,
+				int batt_id_uv)
+{
+	return -ENXIO;
+}
+static inline struct device_node *of_batterydata_get_best_profile(
+		struct device_node *batterydata_container_node,
+		int batt_id_kohm, const char *batt_type)
+{
+	return -ENXIO;
+}
+#endif /* CONFIG_OF_QPNP */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/of_slimbus.h	2019-01-22 16:16:28.331290127 +0100
@@ -0,0 +1,34 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slimbus/slimbus.h>
+#include <linux/of_irq.h>
+
+#ifdef CONFIG_OF_SLIMBUS
+/*
+ * of_slim_register_devices() - Register devices in the SLIMbus Device Tree
+ * @ctrl: slim_controller which devices should be registered to.
+ *
+ * This routine scans the SLIMbus Device Tree, allocating resources and
+ * creating slim_devices according to the SLIMbus Device Tree
+ * hierarchy. Details of this hierarchy can be found in
+ * Documentation/devicetree/bindings/slimbus. This routine is normally
+ * called from the probe routine of the driver registering as a
+ * slim_controller.
+ */
+extern int of_register_slim_devices(struct slim_controller *ctrl);
+#else
+static int of_register_slim_devices(struct slim_controller *ctrl)
+{
+	return 0;
+}
+#endif /* CONFIG_OF_SLIMBUS */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/pfk.h	2019-01-22 16:16:28.339290199 +0100
@@ -0,0 +1,57 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef PFK_H_
+#define PFK_H_
+
+#include <linux/bio.h>
+
+struct ice_crypto_setting;
+
+#ifdef CONFIG_PFK
+
+int pfk_load_key_start(const struct bio *bio,
+		struct ice_crypto_setting *ice_setting, bool *is_pfe, bool);
+int pfk_load_key_end(const struct bio *bio, bool *is_pfe);
+int pfk_remove_key(const unsigned char *key, size_t key_size);
+bool pfk_allow_merge_bio(const struct bio *bio1, const struct bio *bio2);
+void pfk_clear_on_reset(void);
+
+#else
+static inline int pfk_load_key_start(const struct bio *bio,
+	struct ice_crypto_setting *ice_setting, bool *is_pfe, bool async)
+{
+	return -ENODEV;
+}
+
+static inline int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
+{
+	return -ENODEV;
+}
+
+static inline int pfk_remove_key(const unsigned char *key, size_t key_size)
+{
+	return -ENODEV;
+}
+
+static inline bool pfk_allow_merge_bio(const struct bio *bio1,
+		const struct bio *bio2)
+{
+	return true;
+}
+
+static inline void pfk_clear_on_reset(void)
+{}
+
+#endif /* CONFIG_PFK */
+
+#endif /* PFK_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/pft.h	2019-01-22 16:16:28.339290199 +0100
@@ -0,0 +1,97 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef PFT_H_
+#define PFT_H_
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/bio.h>
+
+#ifdef CONFIG_PFT
+
+/* dm-req-crypt API */
+int pft_get_key_index(struct bio *bio, u32 *key_index,
+		      bool *is_encrypted, bool *is_inplace);
+
+/* block layer API */
+bool pft_allow_merge_bio(struct bio *bio1, struct bio *bio2);
+
+/* --- security hooks , called from selinux --- */
+int pft_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode);
+
+int pft_inode_post_create(struct inode *dir, struct dentry *dentry,
+			  umode_t mode);
+
+int pft_file_open(struct file *filp, const struct cred *cred);
+
+int pft_file_permission(struct file *file, int mask);
+
+int pft_file_close(struct file *filp);
+
+int pft_inode_unlink(struct inode *dir, struct dentry *dentry);
+
+int pft_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
+		    dev_t dev);
+
+int pft_inode_rename(struct inode *inode, struct dentry *dentry,
+		     struct inode *new_inode, struct dentry *new_dentry);
+
+int pft_inode_set_xattr(struct dentry *dentry, const char *name,
+			const void *value, size_t size, int flags);
+
+
+#else
+static inline int pft_get_key_index(struct bio *bio, u32 *key_index,
+				    bool *is_encrypted, bool *is_inplace)
+{ return -ENODEV; }
+
+static inline bool pft_allow_merge_bio(const struct bio *bio1,
+	const struct bio *bio2)
+{ return true; }
+
+static inline int pft_file_permission(struct file *file, int mask)
+{ return 0; }
+
+static inline int pft_inode_create(
+	struct inode *dir, struct dentry *dentry, umode_t mode)
+{ return 0; }
+
+static inline int pft_inode_post_create(
+	struct inode *dir, struct dentry *dentry, umode_t mode)
+{ return 0; }
+
+static inline int pft_file_open(struct file *filp, const struct cred *cred)
+{ return 0; }
+
+static inline int pft_file_close(struct file *filp)
+{ return 0; }
+
+static inline int pft_inode_unlink(struct inode *dir, struct dentry *dentry)
+{ return 0; }
+
+static inline int pft_inode_mknod(struct inode *dir, struct dentry *dentry,
+				  umode_t mode, dev_t dev)
+{ return 0; }
+
+static inline int pft_inode_rename(struct inode *inode, struct dentry *dentry,
+		     struct inode *new_inode, struct dentry *new_dentry)
+{ return 0; }
+
+static inline int pft_inode_set_xattr(struct dentry *dentry, const char *name,
+				      const void *value, size_t size,
+				      int flags)
+{ return 0; }
+
+#endif /* CONFIG_PFT */
+
+#endif /* PFT_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/platform_data/msm_serial_hs.h	2019-01-22 16:16:28.351290308 +0100
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (C) 2010-2014, The Linux Foundation. All rights reserved.
+ * Author: Nick Pelly <npelly@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ARCH_MSM_SERIAL_HS_H
+#define __ASM_ARCH_MSM_SERIAL_HS_H
+
+#include <linux/serial_core.h>
+
+/**
+ * struct msm_serial_hs_platform_data - platform device data
+ *					for msm hsuart device
+ * @wakeup_irq : IRQ line to be configured as Wakeup source.
+ * @inject_rx_on_wakeup : Set 1 if specific character to be inserted on wakeup
+ * @rx_to_inject : Character to be inserted on wakeup
+ * @gpio_config : Configure gpios that are used for uart communication
+ * @userid : User-defined number to be used to enumerate device as tty<userid>
+ * @uart_tx_gpio: GPIO number for UART Tx Line.
+ * @uart_rx_gpio: GPIO number for UART Rx Line.
+ * @uart_cts_gpio: GPIO number for UART CTS Line.
+ * @uart_rfr_gpio: GPIO number for UART RFR Line.
+ * @bam_tx_ep_pipe_index : BAM TX Endpoint Pipe Index for HSUART
+ * @bam_tx_ep_pipe_index : BAM RX Endpoint Pipe Index for HSUART
+ * @no_suspend_delay : Flag used to make system go to suspend
+ * immediately or not
+ * @obs: Flag to enable out of band sleep feature support
+ */
+struct msm_serial_hs_platform_data {
+	int wakeup_irq;  /* wakeup irq */
+	bool inject_rx_on_wakeup;
+	u8 rx_to_inject;
+	int (*gpio_config)(int);
+	int userid;
+
+	int uart_tx_gpio;
+	int uart_rx_gpio;
+	int uart_cts_gpio;
+	int uart_rfr_gpio;
+	unsigned bam_tx_ep_pipe_index;
+	unsigned bam_rx_ep_pipe_index;
+	bool no_suspend_delay;
+	bool obs;
+};
+
+/* return true when tx is empty */
+unsigned int msm_hs_tx_empty(struct uart_port *uport);
+int msm_hs_request_clock_off(struct uart_port *uport);
+int msm_hs_request_clock_on(struct uart_port *uport);
+struct uart_port *msm_hs_get_uart_port(int port_index);
+void msm_hs_set_mctrl(struct uart_port *uport,
+				    unsigned int mctrl);
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/platform_data/qcom_crypto_device.h	2019-01-22 16:16:28.351290308 +0100
@@ -0,0 +1,24 @@
+/* Copyright (c) 2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CRYPTO_DEVICE__H
+#define __QCOM_CRYPTO_DEVICE__H
+
+struct msm_ce_hw_support {
+	uint32_t ce_shared;
+	uint32_t shared_ce_resource;
+	uint32_t hw_key_support;
+	uint32_t sha_hmac;
+	void *bus_scale_table;
+};
+
+#endif /* __QCOM_CRYPTO_DEVICE__H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/pmic-voter.h	2019-01-22 16:16:28.355290344 +0100
@@ -0,0 +1,50 @@
+/* Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __PMIC_VOTER_H
+#define __PMIC_VOTER_H
+
+#include <linux/mutex.h>
+
+struct votable;
+
+enum votable_type {
+	VOTE_MIN,
+	VOTE_MAX,
+	VOTE_SET_ANY,
+	NUM_VOTABLE_TYPES,
+};
+
+bool is_client_vote_enabled(struct votable *votable, const char *client_str);
+bool is_client_vote_enabled_locked(struct votable *votable,
+							const char *client_str);
+int get_client_vote(struct votable *votable, const char *client_str);
+int get_client_vote_locked(struct votable *votable, const char *client_str);
+int get_effective_result(struct votable *votable);
+int get_effective_result_locked(struct votable *votable);
+const char *get_effective_client(struct votable *votable);
+const char *get_effective_client_locked(struct votable *votable);
+int vote(struct votable *votable, const char *client_str, bool state, int val);
+int rerun_election(struct votable *votable);
+struct votable *find_votable(const char *name);
+struct votable *create_votable(const char *name,
+				int votable_type,
+				int (*callback)(struct votable *votable,
+						void *data,
+						int effective_result,
+						const char *effective_client),
+				void *data);
+void destroy_votable(struct votable *votable);
+void lock_votable(struct votable *votable);
+void unlock_votable(struct votable *votable);
+
+#endif /* __PMIC_VOTER_H */
diff -Nruw linux-4.4.115-fbx/include/linux/power/qcom./apm.h linux-4.4.115-fbx/include/linux/power/qcom/apm.h
--- linux-4.4.115-fbx/include/linux/power/qcom./apm.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/power/qcom/apm.h	2019-01-22 16:16:28.359290380 +0100
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_POWER_QCOM_APM_H__
+#define __LINUX_POWER_QCOM_APM_H__
+
+#include <linux/device.h>
+#include <linux/err.h>
+
+/**
+ * enum msm_apm_supply - supported power rails to supply memory arrays
+ * %MSM_APM_SUPPLY_APCC:	to enable selection of VDD_APCC rail as supply
+ * %MSM_APM_SUPPLY_MX:		to enable selection of VDD_MX rail as supply
+ */
+enum msm_apm_supply {
+	MSM_APM_SUPPLY_APCC,
+	MSM_APM_SUPPLY_MX,
+};
+
+/* Handle used to identify an APM controller device  */
+struct msm_apm_ctrl_dev;
+
+#ifdef CONFIG_MSM_APM
+struct msm_apm_ctrl_dev *msm_apm_ctrl_dev_get(struct device *dev);
+int msm_apm_set_supply(struct msm_apm_ctrl_dev *ctrl_dev,
+		       enum msm_apm_supply supply);
+int msm_apm_get_supply(struct msm_apm_ctrl_dev *ctrl_dev);
+
+#else
+static inline struct msm_apm_ctrl_dev *msm_apm_ctrl_dev_get(struct device *dev)
+{ return ERR_PTR(-EPERM); }
+static inline int msm_apm_set_supply(struct msm_apm_ctrl_dev *ctrl_dev,
+		       enum msm_apm_supply supply)
+{ return -EPERM; }
+static inline int msm_apm_get_supply(struct msm_apm_ctrl_dev *ctrl_dev)
+{ return -EPERM; }
+#endif
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/qcom_iommu.h	2019-01-22 16:16:28.363290417 +0100
@@ -0,0 +1,429 @@
+/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_IOMMU_H
+#define MSM_IOMMU_H
+
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/list.h>
+#include <linux/regulator/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/idr.h>
+#include <soc/qcom/socinfo.h>
+
+extern pgprot_t     pgprot_kernel;
+extern struct bus_type msm_iommu_sec_bus_type;
+extern struct bus_type *msm_iommu_non_sec_bus_type;
+extern struct iommu_access_ops iommu_access_ops_v0;
+extern struct iommu_access_ops iommu_access_ops_v1;
+
+/* Domain attributes */
+#define MSM_IOMMU_DOMAIN_PT_CACHEABLE	0x1
+#define MSM_IOMMU_DOMAIN_PT_SECURE	0x2
+
+/* Mask for the cache policy attribute */
+#define MSM_IOMMU_CP_MASK		0x03
+
+/* Maximum number of Machine IDs that we are allowing to be mapped to the same
+ * context bank. The number of MIDs mapped to the same CB does not affect
+ * performance, but there is a practical limit on how many distinct MIDs may
+ * be present. These mappings are typically determined at design time and are
+ * not expected to change at run time.
+ */
+#define MAX_NUM_MIDS	32
+
+/* Maximum number of SMT entries allowed by the system */
+#define MAX_NUM_SMR	128
+
+#define MAX_NUM_BFB_REGS	32
+
+/**
+ * struct msm_iommu_dev - a single IOMMU hardware instance
+ * name		Human-readable name given to this IOMMU HW instance
+ * ncb		Number of context banks present on this IOMMU HW instance
+ */
+struct msm_iommu_dev {
+	const char *name;
+	int ncb;
+	int ttbr_split;
+};
+
+/**
+ * struct msm_iommu_ctx_dev - an IOMMU context bank instance
+ * name		Human-readable name given to this context bank
+ * num		Index of this context bank within the hardware
+ * mids		List of Machine IDs that are to be mapped into this context
+ *		bank, terminated by -1. The MID is a set of signals on the
+ *		AXI bus that identifies the function associated with a specific
+ *		memory request. (See ARM spec).
+ */
+struct msm_iommu_ctx_dev {
+	const char *name;
+	int num;
+	int mids[MAX_NUM_MIDS];
+};
+
+/**
+ * struct msm_iommu_bfb_settings - a set of IOMMU BFB tuning parameters
+ * regs		An array of register offsets to configure
+ * data		Values to write to corresponding registers
+ * length	Number of valid entries in the offset/val arrays
+ */
+struct msm_iommu_bfb_settings {
+	unsigned int regs[MAX_NUM_BFB_REGS];
+	unsigned int data[MAX_NUM_BFB_REGS];
+	int length;
+};
+
+/**
+ * struct msm_iommu_drvdata - A single IOMMU hardware instance
+ * @base:	IOMMU config port base address (VA)
+ * @glb_base:	IOMMU config port base address for global register space (VA)
+ * @phys_base:  IOMMU physical base address.
+ * @ncb		The number of contexts on this IOMMU
+ * @irq:	Interrupt number
+ * @clk:	The bus clock for this IOMMU hardware instance
+ * @pclk:	The clock for the IOMMU bus interconnect
+ * @aclk:	Alternate core clock for this IOMMU core, if any
+ * @aiclk:	Alternate interface clock for this IOMMU core, if any
+ * @name:	Human-readable name of this IOMMU device
+ * @gdsc:	Regulator needed to power this HW block (v2 only)
+ * @bfb_settings: Optional BFB performance tuning parameters
+ * @dev:	Struct device this hardware instance is tied to
+ * @list:	List head to link all iommus together
+ * @clk_reg_virt: Optional clock register virtual address.
+ * @halt_enabled: Set to 1 if IOMMU halt is supported in the IOMMU, 0 otherwise.
+ * @ctx_attach_count: Count of how many context are attached.
+ * @bus_client  : Bus client needed to vote for bus bandwidth.
+ * @needs_rem_spinlock  : 1 if remote spinlock is needed, 0 otherwise
+ * @powered_on: Powered status of the IOMMU. 0 means powered off.
+ *
+ * A msm_iommu_drvdata holds the global driver data about a single piece
+ * of an IOMMU hardware instance.
+ */
+struct msm_iommu_drvdata {
+	void __iomem *base;
+	phys_addr_t phys_base;
+	void __iomem *glb_base;
+	void __iomem *cb_base;
+	void __iomem *smmu_local_base;
+	void __iomem *vbif_base;
+	int ncb;
+	int ttbr_split;
+	struct clk *clk;
+	struct clk *pclk;
+	struct clk *aclk;
+	struct clk *aiclk;
+	const char *name;
+	struct regulator *gdsc;
+	struct regulator *alt_gdsc;
+	struct msm_iommu_bfb_settings *bfb_settings;
+	int sec_id;
+	struct device *dev;
+	struct list_head list;
+	void __iomem *clk_reg_virt;
+	int halt_enabled;
+	unsigned int ctx_attach_count;
+	unsigned int bus_client;
+	int needs_rem_spinlock;
+	int powered_on;
+	unsigned int model;
+	struct idr asid_idr;
+};
+
+/**
+ * struct iommu_access_ops - Callbacks for accessing IOMMU
+ * @iommu_power_on:     Turn on power to unit
+ * @iommu_power_off:    Turn off power to unit
+ * @iommu_bus_vote:     Vote for bus bandwidth
+ * @iommu_clk_on:       Turn on clks to unit
+ * @iommu_clk_off:      Turn off clks to unit
+ * @iommu_lock_initialize: Initialize the remote lock
+ * @iommu_lock_acquire: Acquire any locks needed
+ * @iommu_lock_release: Release locks needed
+ */
+struct iommu_access_ops {
+	int (*iommu_power_on)(struct msm_iommu_drvdata *);
+	void (*iommu_power_off)(struct msm_iommu_drvdata *);
+	int (*iommu_bus_vote)(struct msm_iommu_drvdata *drvdata,
+			      unsigned int vote);
+	int (*iommu_clk_on)(struct msm_iommu_drvdata *);
+	void (*iommu_clk_off)(struct msm_iommu_drvdata *);
+	void * (*iommu_lock_initialize)(void);
+	void (*iommu_lock_acquire)(unsigned int need_extra_lock);
+	void (*iommu_lock_release)(unsigned int need_extra_lock);
+};
+
+void msm_iommu_add_drv(struct msm_iommu_drvdata *drv);
+void msm_iommu_remove_drv(struct msm_iommu_drvdata *drv);
+void program_iommu_bfb_settings(void __iomem *base,
+			const struct msm_iommu_bfb_settings *bfb_settings);
+void iommu_halt(const struct msm_iommu_drvdata *iommu_drvdata);
+void iommu_resume(const struct msm_iommu_drvdata *iommu_drvdata);
+
+/**
+ * struct msm_iommu_ctx_drvdata - an IOMMU context bank instance
+ * @num:		Hardware context number of this context
+ * @pdev:		Platform device associated wit this HW instance
+ * @attached_elm:	List element for domains to track which devices are
+ *			attached to them
+ * @attached_domain	Domain currently attached to this context (if any)
+ * @name		Human-readable name of this context device
+ * @sids		List of Stream IDs mapped to this context
+ * @nsid		Number of Stream IDs mapped to this context
+ * @secure_context	true if this is a secure context programmed by
+			the secure environment, false otherwise
+ * @asid		ASID used with this context.
+ * @attach_count	Number of time this context has been attached.
+ * @report_error_on_fault - true if error is returned back to master
+ * @dynamic		true if any dynamic domain is ever attached to this CB
+ *
+ * A msm_iommu_ctx_drvdata holds the driver data for a single context bank
+ * within each IOMMU hardware instance
+ */
+struct msm_iommu_ctx_drvdata {
+	int num;
+	struct platform_device *pdev;
+	struct list_head attached_elm;
+	struct iommu_domain *attached_domain;
+	const char *name;
+	u32 sids[MAX_NUM_SMR];
+	unsigned int nsid;
+	unsigned int secure_context;
+	int asid;
+	int attach_count;
+	u32 sid_mask[MAX_NUM_SMR];
+	unsigned int n_sid_mask;
+	bool report_error_on_fault;
+	unsigned int prefetch_depth;
+	bool dynamic;
+};
+
+enum dump_reg {
+	DUMP_REG_FIRST,
+	DUMP_REG_FAR0 = DUMP_REG_FIRST,
+	DUMP_REG_FAR1,
+	DUMP_REG_PAR0,
+	DUMP_REG_PAR1,
+	DUMP_REG_FSR,
+	DUMP_REG_FSYNR0,
+	DUMP_REG_FSYNR1,
+	DUMP_REG_TTBR0_0,
+	DUMP_REG_TTBR0_1,
+	DUMP_REG_TTBR1_0,
+	DUMP_REG_TTBR1_1,
+	DUMP_REG_SCTLR,
+	DUMP_REG_ACTLR,
+	DUMP_REG_PRRR,
+	DUMP_REG_MAIR0 = DUMP_REG_PRRR,
+	DUMP_REG_NMRR,
+	DUMP_REG_MAIR1 = DUMP_REG_NMRR,
+	DUMP_REG_CBAR_N,
+	DUMP_REG_CBFRSYNRA_N,
+	MAX_DUMP_REGS,
+};
+
+enum dump_reg_type {
+	DRT_CTX_REG,
+	DRT_GLOBAL_REG,
+	DRT_GLOBAL_REG_N,
+};
+
+enum model_id {
+	QSMMUv1 = 1,
+	QSMMUv2,
+	MMU_500 = 500,
+	MAX_MODEL,
+};
+
+struct dump_regs_tbl_entry {
+	/*
+	 * To keep things context-bank-agnostic, we only store the
+	 * register offset in `reg_offset'
+	 */
+	unsigned int reg_offset;
+	const char *name;
+	int must_be_present;
+	enum dump_reg_type dump_reg_type;
+};
+extern struct dump_regs_tbl_entry dump_regs_tbl[MAX_DUMP_REGS];
+
+#define COMBINE_DUMP_REG(upper, lower) (((u64) upper << 32) | lower)
+
+struct msm_iommu_context_reg {
+	uint32_t val;
+	bool valid;
+};
+
+void print_ctx_regs(struct msm_iommu_context_reg regs[]);
+
+/*
+ * Interrupt handler for the IOMMU context fault interrupt. Hooking the
+ * interrupt is not supported in the API yet, but this will print an error
+ * message and dump useful IOMMU registers.
+ */
+irqreturn_t msm_iommu_global_fault_handler(int irq, void *dev_id);
+irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id);
+irqreturn_t msm_iommu_fault_handler_v2(int irq, void *dev_id);
+irqreturn_t msm_iommu_secure_fault_handler_v2(int irq, void *dev_id);
+
+enum {
+	PROC_APPS,
+	PROC_GPU,
+	PROC_MAX
+};
+
+/* Expose structure to allow kgsl iommu driver to use the same structure to
+ * communicate to GPU the addresses of the flag and turn variables.
+ */
+struct remote_iommu_petersons_spinlock {
+	uint32_t flag[PROC_MAX];
+	uint32_t turn;
+};
+
+#ifdef CONFIG_MSM_IOMMU
+void *msm_iommu_lock_initialize(void);
+void msm_iommu_mutex_lock(void);
+void msm_iommu_mutex_unlock(void);
+void msm_set_iommu_access_ops(struct iommu_access_ops *ops);
+struct iommu_access_ops *msm_get_iommu_access_ops(void);
+#else
+static inline void *msm_iommu_lock_initialize(void)
+{
+	return NULL;
+}
+static inline void msm_iommu_mutex_lock(void) { }
+static inline void msm_iommu_mutex_unlock(void) { }
+static inline void msm_set_iommu_access_ops(struct iommu_access_ops *ops)
+{
+
+}
+static inline struct iommu_access_ops *msm_get_iommu_access_ops(void)
+{
+	return NULL;
+}
+#endif
+
+#ifdef CONFIG_MSM_IOMMU_SYNC
+void msm_iommu_remote_p0_spin_lock(unsigned int need_lock);
+void msm_iommu_remote_p0_spin_unlock(unsigned int need_lock);
+
+#define msm_iommu_remote_lock_init() _msm_iommu_remote_spin_lock_init()
+#define msm_iommu_remote_spin_lock(need_lock) \
+				msm_iommu_remote_p0_spin_lock(need_lock)
+#define msm_iommu_remote_spin_unlock(need_lock) \
+				msm_iommu_remote_p0_spin_unlock(need_lock)
+#else
+#define msm_iommu_remote_lock_init()
+#define msm_iommu_remote_spin_lock(need_lock)
+#define msm_iommu_remote_spin_unlock(need_lock)
+#endif
+
+#ifdef CONFIG_MSM_IOMMU
+/*
+ * Look up an IOMMU context device by its context name. NULL if none found.
+ * Useful for testing and drivers that do not yet fully have IOMMU stuff in
+ * their platform devices.
+ */
+struct device *msm_iommu_get_ctx(const char *ctx_name);
+struct bus_type *msm_iommu_get_bus(struct device *dev);
+int msm_iommu_bus_register(void);
+void msm_access_control(void);
+#else
+static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
+{
+	return NULL;
+}
+
+static inline struct bus_type *msm_iommu_get_bus(struct device *dev)
+{
+	return &platform_bus_type;
+}
+
+static inline void msm_access_control(void)
+{
+}
+#endif
+
+/*
+ * Function to program the global registers of an IOMMU securely.
+ * This should only be called on IOMMUs for which kernel programming
+ * of global registers is not possible
+ */
+void msm_iommu_sec_set_access_ops(struct iommu_access_ops *access_ops);
+int msm_iommu_sec_program_iommu(struct msm_iommu_drvdata *drvdata,
+				struct msm_iommu_ctx_drvdata *ctx_drvdata);
+int is_vfe_secure(void);
+
+#ifdef CONFIG_MSM_IOMMU_V0
+static inline int msm_soc_version_supports_iommu_v0(void)
+{
+	static int soc_supports_v0 = -1;
+#ifdef CONFIG_OF
+	struct device_node *node;
+#endif
+
+	if (soc_supports_v0 != -1)
+		return soc_supports_v0;
+
+#ifdef CONFIG_OF
+	node = of_find_compatible_node(NULL, NULL, "qcom,msm-smmu-v0");
+	if (node) {
+		soc_supports_v0 = 1;
+		of_node_put(node);
+		return 1;
+	}
+#endif
+	if (cpu_is_msm8960() &&
+	    SOCINFO_VERSION_MAJOR(socinfo_get_version()) < 2) {
+		soc_supports_v0 = 0;
+		return 0;
+	}
+
+	if (cpu_is_msm8x60() &&
+	    (SOCINFO_VERSION_MAJOR(socinfo_get_version()) != 2 ||
+	    SOCINFO_VERSION_MINOR(socinfo_get_version()) < 1))	{
+		soc_supports_v0 = 0;
+		return 0;
+	}
+
+	soc_supports_v0 = 1;
+	return 1;
+}
+#else
+static inline int msm_soc_version_supports_iommu_v0(void)
+{
+	return 0;
+}
+#endif
+
+int msm_iommu_get_scm_call_avail(void);
+void msm_iommu_check_scm_call_avail(void);
+
+u32 msm_iommu_get_mair0(void);
+u32 msm_iommu_get_mair1(void);
+u32 msm_iommu_get_prrr(void);
+u32 msm_iommu_get_nmrr(void);
+
+/* events for notifiers passed to msm_iommu_register_notify */
+#define TLB_SYNC_TIMEOUT 1
+
+#ifdef CONFIG_MSM_IOMMU_V1
+void msm_iommu_register_notify(struct notifier_block *nb);
+#else
+static inline void msm_iommu_register_notify(struct notifier_block *nb)
+{
+}
+#endif
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/qcrypto.h	2019-01-22 16:16:28.363290417 +0100
@@ -0,0 +1,65 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTO_H_
+#define _DRIVERS_CRYPTO_MSM_QCRYPTO_H_
+
+#include <linux/crypto.h>
+#include <crypto/hash.h>
+
+#define QCRYPTO_CTX_KEY_MASK		0x000000ff
+#define QCRYPTO_CTX_USE_HW_KEY		0x00000001
+#define QCRYPTO_CTX_USE_PIPE_KEY	0x00000002
+
+#define QCRYPTO_CTX_XTS_MASK		0x0000ff00
+#define QCRYPTO_CTX_XTS_DU_SIZE_512B	0x00000100
+#define QCRYPTO_CTX_XTS_DU_SIZE_1KB	0x00000200
+
+
+int qcrypto_cipher_set_device(struct ablkcipher_request *req, unsigned int dev);
+int qcrypto_ahash_set_device(struct ahash_request *req, unsigned int dev);
+/*int qcrypto_aead_set_device(struct aead_request *req, unsigned int dev);*/
+
+int qcrypto_cipher_set_flag(struct ablkcipher_request *req, unsigned int flags);
+int qcrypto_ahash_set_flag(struct ahash_request *req, unsigned int flags);
+/*int qcrypto_aead_set_flag(struct aead_request *req, unsigned int flags);*/
+
+int qcrypto_cipher_clear_flag(struct ablkcipher_request *req,
+							unsigned int flags);
+int qcrypto_ahash_clear_flag(struct ahash_request *req, unsigned int flags);
+/*int qcrypto_aead_clear_flag(struct aead_request *req, unsigned int flags);*/
+
+struct crypto_engine_entry {
+	u32 hw_instance;
+	u32 ce_device;
+	int shared;
+};
+
+int qcrypto_get_num_engines(void);
+void qcrypto_get_engine_list(size_t num_engines,
+				struct crypto_engine_entry *arr);
+int qcrypto_cipher_set_device_hw(struct ablkcipher_request *req,
+				unsigned int fde_pfe,
+				unsigned int hw_inst);
+
+
+struct qcrypto_func_set {
+	int (*cipher_set)(struct ablkcipher_request *req,
+			unsigned int fde_pfe,
+			unsigned hw_inst);
+	int (*cipher_flag)(struct ablkcipher_request *req, unsigned int flags);
+	int (*get_num_engines)(void);
+	void (*get_engine_list)(size_t num_engines,
+				struct crypto_engine_entry *arr);
+};
+
+#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTO_H */
diff -Nruw linux-4.4.115-fbx/include/linux/qdsp6v2./apr.h linux-4.4.115-fbx/include/linux/qdsp6v2/apr.h
--- linux-4.4.115-fbx/include/linux/qdsp6v2./apr.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/qdsp6v2/apr.h	2019-10-29 09:26:25.473221087 +0100
@@ -0,0 +1,194 @@
+/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __APR_H_
+#define __APR_H_
+
+#include <linux/mutex.h>
+#include <soc/qcom/subsystem_notif.h>
+
+enum apr_subsys_state {
+	APR_SUBSYS_DOWN,
+	APR_SUBSYS_UP,
+	APR_SUBSYS_LOADED,
+};
+
+struct apr_q6 {
+	void *pil;
+	atomic_t q6_state;
+	atomic_t modem_state;
+	struct mutex lock;
+};
+
+struct apr_hdr {
+	uint16_t hdr_field;
+	uint16_t pkt_size;
+	uint8_t src_svc;
+	uint8_t src_domain;
+	uint16_t src_port;
+	uint8_t dest_svc;
+	uint8_t dest_domain;
+	uint16_t dest_port;
+	uint32_t token;
+	uint32_t opcode;
+};
+
+#define APR_HDR_LEN(hdr_len) ((hdr_len)/4)
+#define APR_PKT_SIZE(hdr_len, payload_len) ((hdr_len) + (payload_len))
+#define APR_HDR_FIELD(msg_type, hdr_len, ver)\
+	(((msg_type & 0x3) << 8) | ((hdr_len & 0xF) << 4) | (ver & 0xF))
+
+#define APR_HDR_SIZE sizeof(struct apr_hdr)
+
+/* Version */
+#define APR_PKT_VER		0x0
+
+/* Command and Response Types */
+#define APR_MSG_TYPE_EVENT	0x0
+#define APR_MSG_TYPE_CMD_RSP	0x1
+#define APR_MSG_TYPE_SEQ_CMD	0x2
+#define APR_MSG_TYPE_NSEQ_CMD	0x3
+#define APR_MSG_TYPE_MAX	0x04
+
+/* APR Basic Response Message */
+#define APR_BASIC_RSP_RESULT 0x000110E8
+#define APR_RSP_ACCEPTED     0x000100BE
+
+/* Domain IDs */
+#define APR_DOMAIN_SIM	0x1
+#define APR_DOMAIN_PC		0x2
+#define APR_DOMAIN_MODEM	0x3
+#define APR_DOMAIN_ADSP	0x4
+#define APR_DOMAIN_APPS	0x5
+#define APR_DOMAIN_MAX	0x6
+
+/* ADSP service IDs */
+#define APR_SVC_TEST_CLIENT     0x2
+#define APR_SVC_ADSP_CORE	0x3
+#define APR_SVC_AFE		0x4
+#define APR_SVC_VSM		0x5
+#define APR_SVC_VPM		0x6
+#define APR_SVC_ASM		0x7
+#define APR_SVC_ADM		0x8
+#define APR_SVC_ADSP_MVM	0x09
+#define APR_SVC_ADSP_CVS	0x0A
+#define APR_SVC_ADSP_CVP	0x0B
+#define APR_SVC_USM		0x0C
+#define APR_SVC_LSM		0x0D
+#define APR_SVC_VIDC		0x16
+#define APR_SVC_MAX		0x17
+
+/* Modem Service IDs */
+#define APR_SVC_MVS		0x3
+#define APR_SVC_MVM		0x4
+#define APR_SVC_CVS		0x5
+#define APR_SVC_CVP		0x6
+#define APR_SVC_SRD		0x7
+
+/* APR Port IDs */
+#define APR_MAX_PORTS		0x80
+
+#define APR_NAME_MAX		0x40
+
+#define RESET_EVENTS		0x000130D7
+
+#define LPASS_RESTART_EVENT	0x1000
+#define LPASS_RESTART_READY	0x1001
+
+struct apr_client_data {
+	uint16_t reset_event;
+	uint16_t reset_proc;
+	uint16_t payload_size;
+	uint16_t hdr_len;
+	uint16_t msg_type;
+	uint16_t src;
+	uint16_t dest_svc;
+	uint16_t src_port;
+	uint16_t dest_port;
+	uint32_t token;
+	uint32_t opcode;
+	void *payload;
+};
+
+typedef int32_t (*apr_fn)(struct apr_client_data *data, void *priv);
+
+struct apr_svc {
+	uint16_t id;
+	uint16_t dest_id;
+	uint16_t client_id;
+	uint16_t dest_domain;
+	uint8_t rvd;
+	uint8_t port_cnt;
+	uint8_t svc_cnt;
+	uint8_t need_reset;
+	apr_fn port_fn[APR_MAX_PORTS];
+	void *port_priv[APR_MAX_PORTS];
+	apr_fn fn;
+	void *priv;
+	struct mutex m_lock;
+	spinlock_t w_lock;
+	uint8_t pkt_owner;
+#ifdef CONFIG_MSM_QDSP6_APRV2_VM
+	uint16_t vm_dest_svc;
+	uint32_t vm_handle;
+#endif
+};
+
+struct apr_client {
+	uint8_t id;
+	uint8_t svc_cnt;
+	uint8_t rvd;
+	struct mutex m_lock;
+	struct apr_svc_ch_dev *handle;
+	struct apr_svc svc[APR_SVC_MAX];
+};
+
+struct apr_rx_intents {
+	int num_of_intents;
+	uint32_t size;
+};
+
+struct apr_pkt_cfg {
+	uint8_t pkt_owner;
+	struct apr_rx_intents intents;
+};
+
+int apr_load_adsp_image(void);
+struct apr_client *apr_get_client(int dest_id, int client_id);
+int apr_wait_for_device_up(int dest_id);
+int apr_get_svc(const char *svc_name, int dest_id, int *client_id,
+		int *svc_idx, int *svc_id);
+void apr_cb_func(void *buf, int len, void *priv);
+struct apr_svc *apr_register(char *dest, char *svc_name, apr_fn svc_fn,
+					uint32_t src_port, void *priv);
+inline int apr_fill_hdr(void *handle, uint32_t *buf, uint16_t src_port,
+			uint16_t msg_type, uint16_t dest_port,
+			uint32_t token, uint32_t opcode, uint16_t len);
+
+int apr_send_pkt(void *handle, uint32_t *buf);
+int apr_deregister(void *handle);
+void subsys_notif_register(char *client_name, int domain,
+			   struct notifier_block *nb);
+int apr_get_dest_id(char *dest);
+uint16_t apr_get_data_src(struct apr_hdr *hdr);
+void change_q6_state(int state);
+void q6audio_dsp_not_responding(void);
+void apr_reset(void *handle);
+enum apr_subsys_state apr_get_subsys_state(void);
+enum apr_subsys_state apr_get_modem_state(void);
+void apr_set_modem_state(enum apr_subsys_state state);
+enum apr_subsys_state apr_get_q6_state(void);
+int apr_set_q6_state(enum apr_subsys_state state);
+void apr_set_subsys_state(void);
+const char *apr_get_lpass_subsys_name(void);
+uint16_t apr_get_reset_domain(uint16_t proc);
+#endif
diff -Nruw linux-4.4.115-fbx/include/linux/qdsp6v2./apr_tal.h linux-4.4.115-fbx/include/linux/qdsp6v2/apr_tal.h
--- linux-4.4.115-fbx/include/linux/qdsp6v2./apr_tal.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/qdsp6v2/apr_tal.h	2019-10-29 09:26:25.473221087 +0100
@@ -0,0 +1,107 @@
+/* Copyright (c) 2010-2011, 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __APR_TAL_H_
+#define __APR_TAL_H_
+
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/uaccess.h>
+
+/* APR Client IDs */
+#define APR_CLIENT_AUDIO	0x0
+#define APR_CLIENT_VOICE	0x1
+#define APR_CLIENT_MAX		0x2
+
+#define APR_DL_SMD    0
+#define APR_DL_MAX    1
+
+#define APR_DEST_MODEM 0
+#define APR_DEST_QDSP6 1
+#define APR_DEST_MAX   2
+
+#if defined(CONFIG_MSM_QDSP6_APRV2_GLINK) || \
+	defined(CONFIG_MSM_QDSP6_APRV3_GLINK)
+#define APR_MAX_BUF			512
+#else
+#define APR_MAX_BUF			8092
+#endif
+
+#define APR_DEFAULT_NUM_OF_INTENTS 20
+
+#define APR_OPEN_TIMEOUT_MS 5000
+
+enum {
+	/* If client sets the pkt_owner to APR_PKT_OWNER_DRIVER, APR
+	 * driver will allocate a buffer, where the user packet is
+	 * copied into, for each and every single Tx transmission.
+	 * The buffer is thereafter passed to underlying link layer
+	 * and freed upon the notification received from the link layer
+	 * that the packet has been consumed.
+	 */
+	APR_PKT_OWNER_DRIVER,
+	/* If client sets the pkt_owner to APR_PKT_OWNER_CLIENT, APR
+	 * will pass the user packet memory address directly to underlying
+	 * link layer. In this case it is the client's responsibility to
+	 * make sure the packet is intact until being notified that the
+	 * packet has been consumed.
+	 */
+	APR_PKT_OWNER_CLIENT,
+};
+
+struct apr_pkt_priv {
+	/* This property is only applicable for APR over Glink.
+	 * It is ignored in APR over SMD cases.
+	 */
+	uint8_t pkt_owner;
+};
+
+typedef void (*apr_svc_cb_fn)(void *buf, int len, void *priv);
+struct apr_svc_ch_dev *apr_tal_open(uint32_t svc, uint32_t dest,
+			uint32_t dl, apr_svc_cb_fn func, void *priv);
+int apr_tal_write(struct apr_svc_ch_dev *apr_ch, void *data,
+		struct apr_pkt_priv *pkt_priv, int len);
+int apr_tal_close(struct apr_svc_ch_dev *apr_ch);
+int apr_tal_rx_intents_config(struct apr_svc_ch_dev *apr_ch,
+		int num_of_intents, uint32_t size);
+
+
+#if defined(CONFIG_MSM_QDSP6_APRV2_GLINK) || \
+	 defined(CONFIG_MSM_QDSP6_APRV3_GLINK)
+struct apr_svc_ch_dev {
+	void               *handle;
+	spinlock_t         w_lock;
+	spinlock_t         r_lock;
+	struct mutex       m_lock;
+	apr_svc_cb_fn      func;
+	wait_queue_head_t  wait;
+	void               *priv;
+	unsigned           channel_state;
+	bool               if_remote_intent_ready;
+};
+#else
+struct apr_svc_ch_dev {
+	struct smd_channel *ch;
+	spinlock_t         lock;
+	spinlock_t         w_lock;
+	struct mutex       m_lock;
+	apr_svc_cb_fn      func;
+	char               data[APR_MAX_BUF];
+	wait_queue_head_t  wait;
+	void               *priv;
+	uint32_t           smd_state;
+	wait_queue_head_t  dest;
+	uint32_t           dest_state;
+};
+#endif
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/linux/qdsp6v2./apr_us.h linux-4.4.115-fbx/include/linux/qdsp6v2/apr_us.h
--- linux-4.4.115-fbx/include/linux/qdsp6v2./apr_us.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/qdsp6v2/apr_us.h	2019-01-22 16:16:28.363290417 +0100
@@ -0,0 +1,193 @@
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __APR_US_H__
+#define __APR_US_H__
+
+#include <linux/qdsp6v2/apr.h>
+
+/* ======================================================================= */
+/*  Session Level commands */
+
+#define USM_SESSION_CMD_RUN				0x00012306
+struct usm_stream_cmd_run {
+	struct apr_hdr hdr;
+	u32            flags;
+	u32            msw_ts;
+	u32            lsw_ts;
+} __packed;
+
+/* Stream level commands */
+#define USM_STREAM_CMD_OPEN_READ			0x00012309
+struct usm_stream_cmd_open_read {
+	struct apr_hdr hdr;
+	u32            uMode;
+	u32            src_endpoint;
+	u32            pre_proc_top;
+	u32            format;
+} __packed;
+
+#define USM_STREAM_CMD_OPEN_WRITE			0x00011271
+struct usm_stream_cmd_open_write {
+	struct apr_hdr hdr;
+	u32            format;
+} __packed;
+
+
+#define USM_STREAM_CMD_CLOSE				0x0001230A
+
+#define USM_STREAM_CMD_SET_PARAM			0x00012731
+struct usm_stream_cmd_set_param {
+	struct apr_hdr hdr;
+	u32            buf_addr_lsw;
+	u32            buf_addr_msw;
+	u32            mem_map_handle;
+	u32            buf_size;
+	u32            module_id;
+	u32            param_id;
+} __packed;
+
+#define USM_STREAM_CMD_GET_PARAM			0x00012732
+struct usm_stream_cmd_get_param {
+	struct apr_hdr hdr;
+	u32            buf_addr_lsw;
+	u32            buf_addr_msw;
+	u32            mem_map_handle;
+	u32            buf_size;
+	u32            module_id;
+	u32            param_id;
+} __packed;
+
+/* Encoder configuration definitions */
+#define USM_STREAM_CMD_SET_ENC_PARAM			0x0001230B
+/* Decoder configuration definitions */
+#define USM_DATA_CMD_MEDIA_FORMAT_UPDATE		0x00011272
+
+/* Encoder/decoder configuration block */
+#define USM_PARAM_ID_ENCDEC_ENC_CFG_BLK			0x0001230D
+
+/* Max number of static located ports (bytes) */
+#define USM_MAX_PORT_NUMBER 8
+
+/* Max number of static located transparent data (bytes) */
+#define USM_MAX_CFG_DATA_SIZE 100
+
+/* Parameter structures used in  USM_STREAM_CMD_SET_ENCDEC_PARAM command */
+/* common declarations */
+struct usm_cfg_common {
+	u16 ch_cfg;
+	u16 bits_per_sample;
+	u32 sample_rate;
+	u32 dev_id;
+	u8 data_map[USM_MAX_PORT_NUMBER];
+} __packed;
+
+struct us_encdec_cfg {
+	u32 format_id;
+	struct usm_cfg_common cfg_common;
+	u16 params_size;
+	u8 *params;
+} __packed;
+
+/* Start/stop US signal detection */
+#define USM_SESSION_CMD_SIGNAL_DETECT_MODE		0x00012719
+
+struct usm_session_cmd_detect_info {
+	struct apr_hdr hdr;
+	u32 detect_mode;
+	u32 skip_interval;
+	u32 algorithm_cfg_size;
+} __packed;
+
+/* US signal detection result */
+#define USM_SESSION_EVENT_SIGNAL_DETECT_RESULT		0x00012720
+
+/* ======================================================================= */
+/*  Session Level commands */
+#define USM_CMD_SHARED_MEM_MAP_REGION		0x00012728
+struct usm_cmd_memory_map_region {
+	struct apr_hdr hdr;
+	u16            mempool_id;
+	u16            num_regions;
+	u32            flags;
+	u32            shm_addr_lsw;
+	u32            shm_addr_msw;
+	u32            mem_size_bytes;
+} __packed;
+
+#define USM_CMDRSP_SHARED_MEM_MAP_REGION	0x00012729
+struct usm_cmdrsp_memory_map_region {
+	u32            mem_map_handle;
+} __packed;
+
+#define USM_CMD_SHARED_MEM_UNMAP_REGION         0x0001272A
+struct usm_cmd_memory_unmap_region {
+	struct apr_hdr hdr;
+	u32            mem_map_handle;
+} __packed;
+
+#define USM_DATA_CMD_READ			0x00012724
+struct usm_stream_cmd_read {
+	struct apr_hdr hdr;
+	u32            buf_addr_lsw;
+	u32            buf_addr_msw;
+	u32            mem_map_handle;
+	u32            buf_size;
+	u32            seq_id;
+	u32            counter;
+} __packed;
+
+#define USM_DATA_EVENT_READ_DONE		0x00012725
+
+#define USM_DATA_CMD_WRITE			0x00012726
+struct usm_stream_cmd_write {
+	struct apr_hdr hdr;
+	u32            buf_addr_lsw;
+	u32            buf_addr_msw;
+	u32            mem_map_handle;
+	u32            buf_size;
+	u32            seq_id;
+	u32            res0;
+	u32            res1;
+	u32            res2;
+} __packed;
+
+#define USM_DATA_EVENT_WRITE_DONE		0x00012727
+
+struct usm_stream_media_format_update {
+	struct apr_hdr hdr;
+	u32 format_id;
+	/* <cfg_size> = sizeof(usm_cfg_common)+|transp_data| */
+	u32 cfg_size;
+	struct usm_cfg_common cfg_common;
+	/* Transparent configuration data for specific encoder */
+	u8  transp_data[USM_MAX_CFG_DATA_SIZE];
+} __packed;
+
+struct usm_encode_cfg_blk {
+	u32 frames_per_buf;
+	u32 format_id;
+	/* <cfg_size> = sizeof(usm_cfg_common)+|transp_data| */
+	u32 cfg_size;
+	struct usm_cfg_common cfg_common;
+	/* Transparent configuration data for specific encoder */
+	u8  transp_data[USM_MAX_CFG_DATA_SIZE];
+} __packed;
+
+struct usm_stream_cmd_encdec_cfg_blk {
+	struct apr_hdr hdr;
+	u32 param_id;
+	u32 param_size;
+	struct usm_encode_cfg_blk enc_blk;
+} __packed;
+
+#endif /* __APR_US_H__ */
diff -Nruw linux-4.4.115-fbx/include/linux/qdsp6v2./audio_notifier.h linux-4.4.115-fbx/include/linux/qdsp6v2/audio_notifier.h
--- linux-4.4.115-fbx/include/linux/qdsp6v2./audio_notifier.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/qdsp6v2/audio_notifier.h	2019-10-29 09:26:25.473221087 +0100
@@ -0,0 +1,105 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __AUDIO_NOTIFIER_H_
+#define __AUDIO_NOTIFIER_H_
+
+/* State of the notifier domain */
+enum {
+	AUDIO_NOTIFIER_SERVICE_DOWN,
+	AUDIO_NOTIFIER_SERVICE_UP
+};
+
+/* Service order determines connection priority
+ * Highest number connected first
+ */
+enum {
+	AUDIO_NOTIFIER_SSR_SERVICE,
+	AUDIO_NOTIFIER_PDR_SERVICE,
+	AUDIO_NOTIFIER_MAX_SERVICES
+};
+
+enum {
+	AUDIO_NOTIFIER_ADSP_DOMAIN,
+	AUDIO_NOTIFIER_MODEM_DOMAIN,
+	AUDIO_NOTIFIER_MAX_DOMAINS
+};
+
+/* Structure populated in void *data of nb function
+ * callback used for audio_notifier_register
+ */
+struct audio_notifier_cb_data {
+	int service;
+	int domain;
+};
+
+#ifdef CONFIG_MSM_QDSP6_NOTIFIER
+
+/*
+ * Use audio_notifier_register to register any audio
+ * clients who need to be notified of a remote process.
+ * This API will determine and register the client with
+ * the best available subsystem (SSR or PDR) for that
+ * domain (Adsp or Modem). When an event is sent from that
+ * domain the notifier block callback function will be called.
+ *
+ * client_name - A unique user name defined by the client.
+ *	If the same name is used for multiple calls each will
+ *	be tracked & called back separately and a single call
+ *	to deregister will delete them all.
+ * domain - Domain the client wants to get events from.
+ *	AUDIO_NOTIFIER_ADSP_DOMAIN
+ *	AUDIO_NOTIFIER_MODEM_DOMAIN
+ * *nb - Pointer to a notifier block. Provide a callback function
+ *	to be notified of an even on that domain.
+ *
+ *      nb_func(struct notifier_block *this, unsigned long opcode, void *data)
+ *		this - pointer to own nb
+ *		opcode - event from registered domain
+ *			AUDIO_NOTIFIER_SERVICE_DOWN
+ *			AUDIO_NOTIFIER_SERVICE_UP
+ *		*data - pointer to struct audio_notifier_cb_data
+ *
+ * Returns:	Success: 0
+ *		Error: -#
+ */
+int audio_notifier_register(char *client_name, int domain,
+			    struct notifier_block *nb);
+
+/*
+ * Use audio_notifier_deregister to deregister the clients from
+ * all domains registered using audio_notifier_register that
+ * match the client name.
+ *
+ * client_name - Unique user name used in audio_notifier_register.
+ * Returns:	Success: 0
+ *		Error: -#
+ */
+int audio_notifier_deregister(char *client_name);
+
+#else
+
+static inline int audio_notifier_register(char *client_name, int domain,
+					  struct notifier_block *nb)
+{
+	return -ENODEV;
+}
+
+static inline int audio_notifier_deregister(char *client_name)
+{
+	return 0;
+}
+
+#endif /* CONFIG_MSM_QDSP6_PDR */
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/linux/qdsp6v2./audio_pdr.h linux-4.4.115-fbx/include/linux/qdsp6v2/audio_pdr.h
--- linux-4.4.115-fbx/include/linux/qdsp6v2./audio_pdr.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/qdsp6v2/audio_pdr.h	2019-01-22 16:16:28.363290417 +0100
@@ -0,0 +1,101 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __AUDIO_PDR_H_
+#define __AUDIO_PDR_H_
+
+enum {
+	AUDIO_PDR_DOMAIN_ADSP,
+	AUDIO_PDR_DOMAIN_MAX
+};
+
+enum {
+	AUDIO_PDR_FRAMEWORK_DOWN,
+	AUDIO_PDR_FRAMEWORK_UP
+};
+
+#ifdef CONFIG_MSM_QDSP6_PDR
+
+/*
+ * Use audio_pdr_register to register with the PDR subsystem this
+ * should be done before module late init otherwise notification
+ * of the AUDIO_PDR_FRAMEWORK_UP cannot be guaranteed.
+ *
+ * *nb - Pointer to a notifier block. Provide a callback function
+ *       to be notified once the PDR framework has been initialized.
+ *       Callback will receive either the AUDIO_PDR_FRAMEWORK_DOWN
+ *       or AUDIO_PDR_FRAMEWORK_UP ioctl depending on the state of
+ *       the PDR framework.
+ *
+ * Returns: Success: 0
+ *          Failure: Error code
+ */
+int audio_pdr_register(struct notifier_block *nb);
+
+/*
+ * Use audio_pdr_service_register to register with a PDR service
+ * Function should be called after nb callback registered with
+ * audio_pdr_register has been called back with the
+ * AUDIO_PDR_FRAMEWORK_UP ioctl.
+ *
+ * domain_id - Domain to use, example: AUDIO_PDR_ADSP
+ * *nb - Pointer to a notifier block. Provide a callback function
+ *       that will be notified of the state of the domain
+ *       requested. The ioctls received by the callback are
+ *       defined in service-notifier.h.
+ *
+ * Returns: Success: Client handle
+ *          Failure: Pointer error code
+ */
+void *audio_pdr_service_register(int domain_id,
+				 struct notifier_block *nb, int *curr_state);
+
+				 /*
+ * Use audio_pdr_service_deregister to deregister with a PDR
+ * service that was registered using the audio_pdr_service_register
+ * API.
+ *
+ * *service_handle - Service handle returned by audio_pdr_service_register
+ * *nb - Pointer to the notifier block. Used in the call to
+ *       audio_pdr_service_register.
+ *
+ * Returns: Success: Client handle
+ *          Failure: Error code
+ */
+int audio_pdr_service_deregister(void *service_handle,
+				 struct notifier_block *nb);
+
+#else
+
+static inline int audio_pdr_register(struct notifier_block *nb)
+{
+	return -ENODEV;
+}
+
+
+static inline void *audio_pdr_service_register(int domain_id,
+					       struct notifier_block *nb,
+					       int *curr_state)
+{
+	return NULL;
+}
+
+static inline int audio_pdr_service_deregister(void *service_handle,
+					       struct notifier_block *nb)
+{
+	return 0;
+}
+
+#endif /* CONFIG_MSM_QDSP6_PDR */
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/linux/qdsp6v2./audio_ssr.h linux-4.4.115-fbx/include/linux/qdsp6v2/audio_ssr.h
--- linux-4.4.115-fbx/include/linux/qdsp6v2./audio_ssr.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/qdsp6v2/audio_ssr.h	2019-01-22 16:16:28.363290417 +0100
@@ -0,0 +1,78 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __AUDIO_SSR_H_
+#define __AUDIO_SSR_H_
+
+enum {
+	AUDIO_SSR_DOMAIN_ADSP,
+	AUDIO_SSR_DOMAIN_MODEM,
+	AUDIO_SSR_DOMAIN_MAX
+};
+
+#ifdef CONFIG_MSM_QDSP6_SSR
+
+/*
+ * Use audio_ssr_register to register with the SSR subsystem
+ *
+ * domain_id - Service to use, example: AUDIO_SSR_DOMAIN_ADSP
+ * *nb - Pointer to a notifier block. Provide a callback function
+ *       to be notified of an event for that service. The ioctls
+ *       used by the callback are defined in subsystem_notif.h.
+ *
+ * Returns: Success: Client handle
+ *          Failure: Pointer error code
+ */
+void *audio_ssr_register(int domain_id, struct notifier_block *nb);
+
+/*
+ * Use audio_ssr_deregister to register with the SSR subsystem
+ *
+ * handle - Handle received from audio_ssr_register
+ * *nb - Pointer to a notifier block. Callback function
+ *       Used from audio_ssr_register.
+ *
+ * Returns: Success: 0
+ *          Failure: Error code
+ */
+int audio_ssr_deregister(void *handle, struct notifier_block *nb);
+
+
+/*
+ * Use audio_ssr_send_nmi to force a RAM dump on ADSP
+ * down event.
+ *
+ * *ssr_cb_data - *data received from notifier callback
+ */
+void audio_ssr_send_nmi(void *ssr_cb_data);
+
+#else
+
+static inline void *audio_ssr_register(int domain_id,
+				       struct notifier_block *nb)
+{
+	return NULL;
+}
+
+static inline int audio_ssr_deregister(void *handle, struct notifier_block *nb)
+{
+	return 0;
+}
+
+static inline void audio_ssr_send_nmi(void *ssr_cb_data)
+{
+}
+
+#endif /* CONFIG_MSM_QDSP6_SSR */
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/linux/qdsp6v2./dsp_debug.h linux-4.4.115-fbx/include/linux/qdsp6v2/dsp_debug.h
--- linux-4.4.115-fbx/include/linux/qdsp6v2./dsp_debug.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/qdsp6v2/dsp_debug.h	2019-01-22 16:16:28.363290417 +0100
@@ -0,0 +1,22 @@
+/* Copyright (c) 2010, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __DSP_DEBUG_H_
+#define __DSP_DEBUG_H_
+
+typedef int (*dsp_state_cb)(int state);
+int dsp_debug_register(dsp_state_cb ptr);
+
+#define DSP_STATE_CRASHED         0x0
+#define DSP_STATE_CRASH_DUMP_DONE 0x1
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/linux/qdsp6v2./rtac.h linux-4.4.115-fbx/include/linux/qdsp6v2/rtac.h
--- linux-4.4.115-fbx/include/linux/qdsp6v2./rtac.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/qdsp6v2/rtac.h	2019-01-22 16:16:28.363290417 +0100
@@ -0,0 +1,100 @@
+/* Copyright (c) 2011, 2013-2015, 2017, The Linux Foundation. All rights
+ * reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __RTAC_H__
+#define __RTAC_H__
+
+#include <sound/apr_audio-v2.h>
+
+/* Voice Modes */
+#define RTAC_CVP		0
+#define RTAC_CVS		1
+#define RTAC_VOICE_MODES	2
+
+#define RTAC_MAX_ACTIVE_DEVICES		4
+#define RTAC_MAX_ACTIVE_POPP		8
+
+#define DEFAULT_APP_TYPE	0x00011130
+
+enum {
+	ADM_RTAC_CAL,
+	ASM_RTAC_CAL,
+	VOICE_RTAC_CAL,
+	AFE_RTAC_CAL,
+	MAX_RTAC_BLOCKS
+};
+
+struct rtac_cal_mem_map_data {
+	uint32_t		map_size;
+	uint32_t		map_handle;
+	struct ion_client	*ion_client;
+	struct ion_handle	*ion_handle;
+};
+
+struct rtac_cal_data {
+	size_t			size;
+	void			*kvaddr;
+	phys_addr_t		paddr;
+};
+
+struct rtac_cal_block_data {
+	struct rtac_cal_mem_map_data	map_data;
+	struct rtac_cal_data		cal_data;
+};
+
+struct rtac_popp_data {
+	uint32_t	popp;
+	uint32_t	popp_topology;
+	uint32_t	app_type;
+};
+
+struct rtac_adm_data {
+	uint32_t		topology_id;
+	uint32_t		afe_topology;
+	uint32_t		afe_port;
+	uint32_t		copp;
+	uint32_t		num_of_popp;
+	uint32_t		app_type;
+	uint32_t		acdb_dev_id;
+	struct rtac_popp_data	popp[RTAC_MAX_ACTIVE_POPP];
+};
+
+struct rtac_adm {
+	uint32_t			num_of_dev;
+	struct rtac_adm_data		device[RTAC_MAX_ACTIVE_DEVICES];
+};
+
+void rtac_add_adm_device(u32 port_id, u32 copp_id, u32 path_id, u32 popp_id,
+			u32 app_type, u32 acdb_dev_id);
+void rtac_remove_adm_device(u32 port_id, u32 copp_id);
+void rtac_remove_popp_from_adm_devices(u32 popp_id);
+void rtac_add_voice(u32 cvs_handle, u32 cvp_handle, u32 rx_afe_port,
+	u32 tx_afe_port, u32 rx_acdb_id, u32 tx_acdb_id, u32 session_id);
+void rtac_remove_voice(u32 cvs_handle);
+void rtac_set_adm_handle(void *handle);
+bool rtac_make_adm_callback(uint32_t *payload, u32 payload_size);
+void rtac_copy_adm_payload_to_user(void *payload, u32 payload_size);
+void rtac_set_asm_handle(u32 session_id, void *handle);
+bool rtac_make_asm_callback(u32 session_id, uint32_t *payload,
+	u32 payload_size);
+void rtac_copy_asm_payload_to_user(void *payload, u32 payload_size);
+void rtac_set_voice_handle(u32 mode, void *handle);
+bool rtac_make_voice_callback(u32 mode, uint32_t *payload, u32 payload_size);
+void rtac_copy_voice_payload_to_user(void *payload, u32 payload_size);
+int rtac_clear_mapping(uint32_t cal_type);
+bool rtac_make_afe_callback(uint32_t *payload, u32 payload_size);
+void rtac_set_afe_handle(void *handle);
+void get_rtac_adm_data(struct rtac_adm *adm_data);
+void rtac_update_afe_topology(u32 port_id);
+#endif
diff -Nruw linux-4.4.115-fbx/include/linux/qdsp6v2./usf.h linux-4.4.115-fbx/include/linux/qdsp6v2/usf.h
--- linux-4.4.115-fbx/include/linux/qdsp6v2./usf.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/qdsp6v2/usf.h	2019-01-22 16:16:28.363290417 +0100
@@ -0,0 +1,298 @@
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __USF_H__
+#define __USF_H__
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define USF_IOCTL_MAGIC 'U'
+
+#define US_SET_TX_INFO   _IOW(USF_IOCTL_MAGIC, 0, \
+				struct us_tx_info_type)
+#define US_START_TX      _IO(USF_IOCTL_MAGIC, 1)
+#define US_GET_TX_UPDATE _IOWR(USF_IOCTL_MAGIC, 2, \
+				struct us_tx_update_info_type)
+#define US_SET_RX_INFO   _IOW(USF_IOCTL_MAGIC, 3, \
+				struct us_rx_info_type)
+#define US_SET_RX_UPDATE _IOWR(USF_IOCTL_MAGIC, 4, \
+				struct us_rx_update_info_type)
+#define US_START_RX      _IO(USF_IOCTL_MAGIC, 5)
+
+#define US_STOP_TX      _IO(USF_IOCTL_MAGIC, 6)
+#define US_STOP_RX      _IO(USF_IOCTL_MAGIC, 7)
+
+#define US_SET_DETECTION _IOWR(USF_IOCTL_MAGIC, 8, \
+				struct us_detect_info_type)
+
+#define US_GET_VERSION  _IOWR(USF_IOCTL_MAGIC, 9, \
+				struct us_version_info_type)
+
+#define US_SET_TX_STREAM_PARAM   _IOW(USF_IOCTL_MAGIC, 10, \
+				struct us_stream_param_type)
+#define US_GET_TX_STREAM_PARAM  _IOWR(USF_IOCTL_MAGIC, 11, \
+				struct us_stream_param_type)
+#define US_SET_RX_STREAM_PARAM   _IOW(USF_IOCTL_MAGIC, 12, \
+				struct us_stream_param_type)
+#define US_GET_RX_STREAM_PARAM  _IOWR(USF_IOCTL_MAGIC, 13, \
+				struct us_stream_param_type)
+
+/* Special timeout values */
+#define USF_NO_WAIT_TIMEOUT	0x00000000
+/* Infinitive */
+#define USF_INFINITIVE_TIMEOUT	0xffffffff
+/* Default value, used by the driver */
+#define USF_DEFAULT_TIMEOUT	0xfffffffe
+
+/* US detection place (HW|FW) */
+enum us_detect_place_enum {
+/* US is detected in HW */
+	US_DETECT_HW,
+/* US is detected in FW */
+	US_DETECT_FW
+};
+
+/* US detection mode */
+enum us_detect_mode_enum {
+/* US detection is disabled */
+	US_DETECT_DISABLED_MODE,
+/* US detection is enabled in continue mode */
+	US_DETECT_CONTINUE_MODE,
+/* US detection is enabled in one shot mode */
+	US_DETECT_SHOT_MODE
+};
+
+/* Encoder (TX), decoder (RX) supported US data formats */
+#define USF_POINT_EPOS_FORMAT	0
+#define USF_RAW_FORMAT		1
+
+/* Indexes of event types, produced by the calculators */
+#define USF_TSC_EVENT_IND      0
+#define USF_TSC_PTR_EVENT_IND  1
+#define USF_MOUSE_EVENT_IND    2
+#define USF_KEYBOARD_EVENT_IND 3
+#define USF_TSC_EXT_EVENT_IND  4
+#define USF_MAX_EVENT_IND      5
+
+/* Types of events, produced by the calculators */
+#define USF_NO_EVENT 0
+#define USF_TSC_EVENT      (1 << USF_TSC_EVENT_IND)
+#define USF_TSC_PTR_EVENT  (1 << USF_TSC_PTR_EVENT_IND)
+#define USF_MOUSE_EVENT    (1 << USF_MOUSE_EVENT_IND)
+#define USF_KEYBOARD_EVENT (1 << USF_KEYBOARD_EVENT_IND)
+#define USF_TSC_EXT_EVENT  (1 << USF_TSC_EXT_EVENT_IND)
+#define USF_ALL_EVENTS         (USF_TSC_EVENT |\
+				USF_TSC_PTR_EVENT |\
+				USF_MOUSE_EVENT |\
+				USF_KEYBOARD_EVENT |\
+				USF_TSC_EXT_EVENT)
+
+/* min, max array dimension */
+#define MIN_MAX_DIM 2
+
+/* coordinates (x,y,z) array dimension */
+#define COORDINATES_DIM 3
+
+/* tilts (x,y) array dimension */
+#define TILTS_DIM 2
+
+/* Max size of the client name */
+#define USF_MAX_CLIENT_NAME_SIZE	20
+
+/* Max number of the ports (mics/speakers) */
+#define USF_MAX_PORT_NUM                8
+
+/* Info structure common for TX and RX */
+struct us_xx_info_type {
+/* Input:  general info */
+/* Name of the client - event calculator */
+	const char __user *client_name;
+/* Selected device identification, accepted in the kernel's CAD */
+	uint32_t dev_id;
+/* 0 - point_epos type; (e.g. 1 - gr_mmrd) */
+	uint32_t stream_format;
+/* Required sample rate in Hz */
+	uint32_t sample_rate;
+/* Size of a buffer (bytes) for US data transfer between the module and USF */
+	uint32_t buf_size;
+/* Number of the buffers for the US data transfer */
+	uint16_t buf_num;
+/* Number of the microphones (TX) or speakers(RX) */
+	uint16_t port_cnt;
+/* Microphones(TX) or speakers(RX) indexes in their enumeration */
+	uint8_t  port_id[USF_MAX_PORT_NUM];
+/* Bits per sample 16 or 32 */
+	uint16_t bits_per_sample;
+/* Input:  Transparent info for encoder in the LPASS */
+/* Parameters data size in bytes */
+	uint16_t params_data_size;
+/* Pointer to the parameters */
+	uint8_t __user *params_data;
+/* Max size of buffer for get and set parameter */
+	uint32_t max_get_set_param_buf_size;
+};
+
+struct us_input_info_type {
+	/* Touch screen dimensions: min & max;for input module */
+	int tsc_x_dim[MIN_MAX_DIM];
+	int tsc_y_dim[MIN_MAX_DIM];
+	int tsc_z_dim[MIN_MAX_DIM];
+	/* Touch screen tilt dimensions: min & max;for input module */
+	int tsc_x_tilt[MIN_MAX_DIM];
+	int tsc_y_tilt[MIN_MAX_DIM];
+	/* Touch screen pressure limits: min & max; for input module */
+	int tsc_pressure[MIN_MAX_DIM];
+	/* The requested buttons bitmap */
+	uint16_t req_buttons_bitmap;
+	/* Bitmap of types of events (USF_X_EVENT), produced by calculator */
+	uint16_t event_types;
+	/* Bitmap of types of events from devs, conflicting with USF */
+	uint16_t conflicting_event_types;
+};
+
+struct us_tx_info_type {
+	/* Common info */
+	struct us_xx_info_type us_xx_info;
+	/* Info specific for TX*/
+	struct us_input_info_type input_info;
+};
+
+struct us_rx_info_type {
+	/* Common info */
+	struct us_xx_info_type us_xx_info;
+	/* Info specific for RX*/
+};
+
+struct point_event_type {
+/* Pen coordinates (x, y, z) in units, defined by <coordinates_type>  */
+	int coordinates[COORDINATES_DIM];
+	/* {x;y}  in transparent units */
+	int inclinations[TILTS_DIM];
+/* [0-1023] (10bits); 0 - pen up */
+	uint32_t pressure;
+/* Bitmap for button state. 1 - down, 0 - up */
+	uint16_t buttons_state_bitmap;
+};
+
+/* Mouse buttons, supported by USF */
+#define USF_BUTTON_LEFT_MASK   1
+#define USF_BUTTON_MIDDLE_MASK 2
+#define USF_BUTTON_RIGHT_MASK  4
+struct mouse_event_type {
+/* The mouse relative movement (dX, dY, dZ) */
+	int rels[COORDINATES_DIM];
+/* Bitmap of mouse buttons states: 1 - down, 0 - up; */
+	uint16_t buttons_states;
+};
+
+struct key_event_type {
+/*  Calculated MS key- see input.h. */
+	uint32_t key;
+/* Keyboard's key state: 1 - down, 0 - up; */
+	uint8_t key_state;
+};
+
+struct usf_event_type {
+/* Event sequence number */
+	uint32_t seq_num;
+/* Event generation system time */
+	uint32_t timestamp;
+/* Destination input event type index (e.g. touch screen, mouse, key) */
+	uint16_t event_type_ind;
+	union {
+		struct point_event_type point_event;
+		struct mouse_event_type mouse_event;
+		struct key_event_type   key_event;
+	} event_data;
+};
+
+struct us_tx_update_info_type {
+/* Input  general: */
+/* Number of calculated events */
+	uint16_t event_counter;
+/* Calculated events or NULL */
+	struct usf_event_type __user *event;
+/* Pointer (read index) to the end of available region */
+/* in the shared US data memory */
+	uint32_t free_region;
+/* Time (sec) to wait for data or special values: */
+/* USF_NO_WAIT_TIMEOUT, USF_INFINITIVE_TIMEOUT, USF_DEFAULT_TIMEOUT */
+	uint32_t timeout;
+/* Events (from conflicting devs) to be disabled/enabled */
+	uint16_t event_filters;
+
+/* Input  transparent data: */
+/* Parameters size */
+	uint16_t params_data_size;
+/* Pointer to the parameters */
+	uint8_t __user *params_data;
+/* Output parameters: */
+/* Pointer (write index) to the end of ready US data region */
+/* in the shared memory */
+	uint32_t ready_region;
+};
+
+struct us_rx_update_info_type {
+/* Input  general: */
+/* Pointer (write index) to the end of ready US data region */
+/* in the shared memory */
+	uint32_t ready_region;
+/* Input  transparent data: */
+/* Parameters size */
+	uint16_t params_data_size;
+/* pPointer to the parameters */
+	uint8_t __user *params_data;
+/* Output parameters: */
+/* Pointer (read index) to the end of available region */
+/* in the shared US data memory */
+	uint32_t free_region;
+};
+
+struct us_detect_info_type {
+/* US detection place (HW|FW) */
+/* NA in the Active and OFF states */
+	enum us_detect_place_enum us_detector;
+/* US detection mode */
+	enum us_detect_mode_enum  us_detect_mode;
+/* US data dropped during this time (msec) */
+	uint32_t skip_time;
+/* Transparent data size */
+	uint16_t params_data_size;
+/* Pointer to the transparent data */
+	uint8_t __user *params_data;
+/* Time (sec) to wait for US presence event */
+	uint32_t detect_timeout;
+/* Out parameter: US presence */
+	bool is_us;
+};
+
+struct us_version_info_type {
+/* Size of memory for the version string */
+	uint16_t buf_size;
+/* Pointer to the memory for the version string */
+	char __user *pbuf;
+};
+
+struct us_stream_param_type {
+/* Id of module */
+	uint32_t module_id;
+/* Id of parameter */
+	uint32_t param_id;
+/* Size of memory of the parameter buffer */
+	uint32_t buf_size;
+/* Pointer to the memory of the parameter buffer */
+	uint8_t __user *pbuf;
+};
+
+#endif /* __USF_H__ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/qmi_encdec.h	2019-01-22 16:16:28.363290417 +0100
@@ -0,0 +1,184 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QMI_ENCDEC_H_
+#define _QMI_ENCDEC_H_
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/socket.h>
+#include <linux/gfp.h>
+
+#define QMI_REQUEST_CONTROL_FLAG 0x00
+#define QMI_RESPONSE_CONTROL_FLAG 0x02
+#define QMI_INDICATION_CONTROL_FLAG 0x04
+#define QMI_HEADER_SIZE 7
+
+/**
+ * elem_type - Enum to identify the data type of elements in a data
+ *             structure.
+ */
+enum elem_type {
+	QMI_OPT_FLAG = 1,
+	QMI_DATA_LEN,
+	QMI_UNSIGNED_1_BYTE,
+	QMI_UNSIGNED_2_BYTE,
+	QMI_UNSIGNED_4_BYTE,
+	QMI_UNSIGNED_8_BYTE,
+	QMI_SIGNED_2_BYTE_ENUM,
+	QMI_SIGNED_4_BYTE_ENUM,
+	QMI_STRUCT,
+	QMI_STRING,
+	QMI_EOTI,
+};
+
+/**
+ * array_type - Enum to identify if an element in a data structure is
+ *              an array. If so, then is it a static length array or a
+ *              variable length array.
+ */
+enum array_type {
+	NO_ARRAY = 0,
+	STATIC_ARRAY = 1,
+	VAR_LEN_ARRAY = 2,
+};
+
+/**
+ * elem_info - Data structure to specify information about an element
+ *               in a data structure. An array of this data structure
+ *               can be used to specify info about a complex data
+ *               structure to be encoded/decoded.
+ *
+ * @data_type: Data type of this element.
+ * @elem_len: Array length of this element, if an array.
+ * @elem_size: Size of a single instance of this data type.
+ * @is_array: Array type of this element.
+ * @tlv_type: QMI message specific type to identify which element
+ *            is present in an incoming message.
+ * @offset: To identify the address of the first instance of this
+ *          element in the data structure.
+ * @ei_array: Array to provide information about the nested structure
+ *            within a data structure to be encoded/decoded.
+ */
+struct elem_info {
+	enum elem_type data_type;
+	uint32_t elem_len;
+	uint32_t elem_size;
+	enum array_type is_array;
+	uint8_t tlv_type;
+	uint32_t offset;
+	struct elem_info *ei_array;
+};
+
+/**
+ * @msg_desc - Describe about the main/outer structure to be
+ *		  encoded/decoded.
+ *
+ * @max_msg_len: Maximum possible length of the QMI message.
+ * @ei_array: Array to provide information about a data structure.
+ */
+struct msg_desc {
+	uint16_t msg_id;
+	int max_msg_len;
+	struct elem_info *ei_array;
+};
+
+struct qmi_header {
+	unsigned char cntl_flag;
+	uint16_t txn_id;
+	uint16_t msg_id;
+	uint16_t msg_len;
+} __attribute__((__packed__));
+
+static inline void encode_qmi_header(unsigned char *buf,
+			unsigned char cntl_flag, uint16_t txn_id,
+			uint16_t msg_id, uint16_t msg_len)
+{
+	struct qmi_header *hdr = (struct qmi_header *)buf;
+
+	hdr->cntl_flag = cntl_flag;
+	hdr->txn_id = txn_id;
+	hdr->msg_id = msg_id;
+	hdr->msg_len = msg_len;
+}
+
+static inline void decode_qmi_header(unsigned char *buf,
+			unsigned char *cntl_flag, uint16_t *txn_id,
+			uint16_t *msg_id, uint16_t *msg_len)
+{
+	struct qmi_header *hdr = (struct qmi_header *)buf;
+
+	*cntl_flag = hdr->cntl_flag;
+	*txn_id = hdr->txn_id;
+	*msg_id = hdr->msg_id;
+	*msg_len = hdr->msg_len;
+}
+
+#ifdef CONFIG_QMI_ENCDEC
+/**
+ * qmi_kernel_encode() - Encode to QMI message wire format
+ * @desc: Pointer to structure descriptor.
+ * @out_buf: Buffer to hold the encoded QMI message.
+ * @out_buf_len: Length of the out buffer.
+ * @in_c_struct: C Structure to be encoded.
+ *
+ * @return: size of encoded message on success, < 0 on error.
+ */
+int qmi_kernel_encode(struct msg_desc *desc,
+		      void *out_buf, uint32_t out_buf_len,
+		      void *in_c_struct);
+
+/**
+ * qmi_kernel_decode() - Decode to C Structure format
+ * @desc: Pointer to structure descriptor.
+ * @out_c_struct: Buffer to hold the decoded C structure.
+ * @in_buf: Buffer containg the QMI message to be decoded.
+ * @in_buf_len: Length of the incoming QMI message.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_kernel_decode(struct msg_desc *desc, void *out_c_struct,
+		      void *in_buf, uint32_t in_buf_len);
+
+/**
+ * qmi_verify_max_msg_len() - Verify the maximum length of a QMI message
+ * @desc: Pointer to structure descriptor.
+ *
+ * @return: true if the maximum message length embedded in structure
+ *          descriptor matches the calculated value, else false.
+ */
+bool qmi_verify_max_msg_len(struct msg_desc *desc);
+
+#else
+static inline int qmi_kernel_encode(struct msg_desc *desc,
+				    void *out_buf, uint32_t out_buf_len,
+				    void *in_c_struct)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int qmi_kernel_decode(struct msg_desc *desc,
+				    void *out_c_struct,
+				    void *in_buf, uint32_t in_buf_len)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline bool qmi_verify_max_msg_len(struct msg_desc *desc)
+{
+	return false;
+}
+#endif
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/linux/qpnp./pin.h linux-4.4.115-fbx/include/linux/qpnp/pin.h
--- linux-4.4.115-fbx/include/linux/qpnp./pin.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/qpnp/pin.h	2019-01-22 16:16:28.363290417 +0100
@@ -0,0 +1,226 @@
+/* Copyright (c) 2012, 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* Mode select */
+#define QPNP_PIN_MODE_DIG_IN			0
+#define QPNP_PIN_MODE_DIG_OUT			1
+#define QPNP_PIN_MODE_DIG_IN_OUT		2
+#define QPNP_PIN_MODE_ANA_PASS_THRU		3
+#define QPNP_PIN_MODE_BIDIR			3
+#define QPNP_PIN_MODE_AIN			4
+#define QPNP_PIN_MODE_AOUT			5
+#define QPNP_PIN_MODE_SINK			6
+
+/* Invert source select (GPIO, MPP) */
+#define QPNP_PIN_INVERT_DISABLE			0
+#define QPNP_PIN_INVERT_ENABLE			1
+
+/* Output type (GPIO) */
+#define QPNP_PIN_OUT_BUF_CMOS			0
+#define QPNP_PIN_OUT_BUF_OPEN_DRAIN_NMOS	1
+#define QPNP_PIN_OUT_BUF_OPEN_DRAIN_PMOS	2
+#define QPNP_PIN_OUT_BUF_NO_DRIVE		3
+
+/* Voltage select (GPIO, MPP) */
+#define QPNP_PIN_VIN0				0
+#define QPNP_PIN_VIN1				1
+#define QPNP_PIN_VIN2				2
+#define QPNP_PIN_VIN3				3
+#define QPNP_PIN_VIN4				4
+#define QPNP_PIN_VIN5				5
+#define QPNP_PIN_VIN6				6
+#define QPNP_PIN_VIN7				7
+
+/* Pull Up Values (GPIO) */
+#define QPNP_PIN_GPIO_PULL_UP_30		0
+#define QPNP_PIN_GPIO_PULL_UP_1P5		1
+#define QPNP_PIN_GPIO_PULL_UP_31P5		2
+#define QPNP_PIN_GPIO_PULL_UP_1P5_30		3
+#define QPNP_PIN_GPIO_PULL_DN			4
+#define QPNP_PIN_GPIO_PULL_NO			5
+
+/* Pull Up Values (MPP) */
+#define QPNP_PIN_MPP_PULL_UP_0P6KOHM		0
+#define QPNP_PIN_MPP_PULL_UP_OPEN		1
+#define QPNP_PIN_MPP_PULL_UP_10KOHM		2
+#define QPNP_PIN_MPP_PULL_UP_30KOHM		3
+
+/* Out Strength (GPIO) */
+#define QPNP_PIN_OUT_STRENGTH_LOW		1
+#define QPNP_PIN_OUT_STRENGTH_MED		2
+#define QPNP_PIN_OUT_STRENGTH_HIGH		3
+
+/* Digital-in CTL (GPIO/MPP) */
+#define QPNP_PIN_DIG_IN_CTL_DTEST1		1
+#define QPNP_PIN_DIG_IN_CTL_DTEST2		2
+#define QPNP_PIN_DIG_IN_CTL_DTEST3		3
+#define QPNP_PIN_DIG_IN_CTL_DTEST4		4
+
+/* Source Select (GPIO) / Enable Select (MPP) */
+#define QPNP_PIN_SEL_FUNC_CONSTANT		0
+#define QPNP_PIN_SEL_FUNC_PAIRED		1
+#define QPNP_PIN_SEL_FUNC_1			2
+#define QPNP_PIN_SEL_FUNC_2			3
+#define QPNP_PIN_SEL_DTEST1			4
+#define QPNP_PIN_SEL_DTEST2			5
+#define QPNP_PIN_SEL_DTEST3			6
+#define QPNP_PIN_SEL_DTEST4			7
+
+/* Source Select for GPIO_LV/GPIO_MV only */
+#define QPNP_PIN_LV_MV_SEL_FUNC_CONSTANT	0
+#define QPNP_PIN_LV_MV_SEL_FUNC_PAIRED		1
+#define QPNP_PIN_LV_MV_SEL_FUNC_1		2
+#define QPNP_PIN_LV_MV_SEL_FUNC_2		3
+#define QPNP_PIN_LV_MV_SEL_FUNC_3		4
+#define QPNP_PIN_LV_MV_SEL_FUNC_4		5
+#define QPNP_PIN_LV_MV_SEL_DTEST1		6
+#define QPNP_PIN_LV_MV_SEL_DTEST2		7
+#define QPNP_PIN_LV_MV_SEL_DTEST3		8
+#define QPNP_PIN_LV_MV_SEL_DTEST4		9
+
+/* Master enable (GPIO, MPP) */
+#define QPNP_PIN_MASTER_DISABLE			0
+#define QPNP_PIN_MASTER_ENABLE			1
+
+/* Analog Output (MPP) */
+#define QPNP_PIN_AOUT_1V25			0
+#define QPNP_PIN_AOUT_0V625			1
+#define QPNP_PIN_AOUT_0V3125			2
+#define QPNP_PIN_AOUT_MPP			3
+#define QPNP_PIN_AOUT_ABUS1			4
+#define QPNP_PIN_AOUT_ABUS2			5
+#define QPNP_PIN_AOUT_ABUS3			6
+#define QPNP_PIN_AOUT_ABUS4			7
+
+/* Analog Input (MPP) */
+#define QPNP_PIN_AIN_AMUX_CH5			0
+#define QPNP_PIN_AIN_AMUX_CH6			1
+#define QPNP_PIN_AIN_AMUX_CH7			2
+#define QPNP_PIN_AIN_AMUX_CH8			3
+#define QPNP_PIN_AIN_AMUX_ABUS1			4
+#define QPNP_PIN_AIN_AMUX_ABUS2			5
+#define QPNP_PIN_AIN_AMUX_ABUS3			6
+#define QPNP_PIN_AIN_AMUX_ABUS4			7
+
+/* Current Sink (MPP) */
+#define QPNP_PIN_CS_OUT_5MA			0
+#define QPNP_PIN_CS_OUT_10MA			1
+#define QPNP_PIN_CS_OUT_15MA			2
+#define QPNP_PIN_CS_OUT_20MA			3
+#define QPNP_PIN_CS_OUT_25MA			4
+#define QPNP_PIN_CS_OUT_30MA			5
+#define QPNP_PIN_CS_OUT_35MA			6
+#define QPNP_PIN_CS_OUT_40MA			7
+
+/* ANALOG PASS SEL (GPIO LV/MV) */
+#define QPNP_PIN_APASS_SEL_ATEST1		0
+#define QPNP_PIN_APASS_SEL_ATEST2		1
+#define QPNP_PIN_APASS_SEL_ATEST3		2
+#define QPNP_PIN_APASS_SEL_ATEST4		3
+
+/**
+ * struct qpnp_pin_cfg - structure to specify pin configurtion values
+ * @mode:		indicates whether the pin should be input, output, or
+ *			both for gpios. mpp pins also support bidirectional,
+ *			analog in, analog out and current sink. This value
+ *			should be of type QPNP_PIN_MODE_*.
+ * @output_type:	indicates pin should be configured as CMOS or open
+ *			drain. Should be of the type QPNP_PIN_OUT_BUF_*. This
+ *			setting applies for gpios only.
+ * @invert:		Invert the signal of the line -
+ *			QPNP_PIN_INVERT_DISABLE or QPNP_PIN_INVERT_ENABLE.
+ * @pull:		This parameter should be programmed to different values
+ *			depending on whether it's GPIO or MPP.
+ *			For GPIO, it indicates whether a pull up or pull down
+ *			should be applied. If a pullup is required the
+ *			current strength needs to be specified.
+ *			Current values of 30uA, 1.5uA, 31.5uA, 1.5uA with 30uA
+ *			boost are supported. This value should be one of
+ *			the QPNP_PIN_GPIO_PULL_*. Note that the hardware ignores
+ *			this configuration if the GPIO is not set to input or
+ *			output open-drain mode.
+ *			For MPP, it indicates whether a pullup should be
+ *			applied for bidirectitional mode only. The hardware
+ *			ignores the configuration when operating in other modes.
+ *			This value should be one of the QPNP_PIN_MPP_PULL_*.
+ * @vin_sel:		specifies the voltage level when the output is set to 1.
+ *			For an input gpio specifies the voltage level at which
+ *			the input is interpreted as a logical 1.
+ * @out_strength:	the amount of current supplied for an output gpio,
+ *			should be of the type QPNP_PIN_STRENGTH_*.
+ * @src_sel:		select alternate function for the pin. Certain pins
+ *			can be paired (shorted) with each other. Some pins
+ *			can act as alternate functions. In the context of
+ *			gpio, this acts as a source select. For mpps,
+ *			this is an enable select.
+ *			This parameter should be of type QPNP_PIN_SEL_*.
+ * @master_en:		QPNP_PIN_MASTER_ENABLE = Enable features within the
+ *			pin block based on configurations.
+ *			QPNP_PIN_MASTER_DISABLE = Completely disable the pin
+ *			block and let the pin float with high impedance
+ *			regardless of other settings.
+ * @aout_ref:		Set the analog output reference. This parameter should
+ *			be of type QPNP_PIN_AOUT_*. This parameter only applies
+ *			to mpp pins.
+ * @ain_route:		Set the source for analog input. This parameter
+ *			should be of type QPNP_PIN_AIN_*. This parameter only
+ *			applies to mpp pins.
+ * @cs_out:		Set the the amount of current to sync in mA. This
+ *			parameter should be of type QPNP_PIN_CS_OUT_*. This
+ *			parameter only applies to mpp pins.
+ * @apass_sel:		Set the ATEST line to which the signal is to be
+ *			routed to. The parameter should be of type
+ *			QPNP_PIN_APASS_SEL_*. This
+ *			parameter only applies to GPIO LV/MV pins.
+ * @dtest_sel:		Select the DTEST line to which the signal needs
+ *			is routed to. The parameter should be of type
+ *			QPNP_PIN_DIG_IN_CTL_*. The parameter applies
+ *			to both gpio and mpp pins.
+ */
+struct qpnp_pin_cfg {
+	int mode;
+	int output_type;
+	int invert;
+	int pull;
+	int vin_sel;
+	int out_strength;
+	int src_sel;
+	int master_en;
+	int aout_ref;
+	int ain_route;
+	int cs_out;
+	int apass_sel;
+	int dtest_sel;
+};
+
+/**
+ * qpnp_pin_config - Apply pin configuration for Linux gpio
+ * @gpio: Linux gpio number to configure.
+ * @param: parameters to configure.
+ *
+ * This routine takes a Linux gpio number that corresponds with a
+ * PMIC pin and applies the configuration specified in 'param'.
+ * This gpio number can be ascertained by of_get_gpio_flags() or
+ * the qpnp_pin_map_gpio() API.
+ */
+int qpnp_pin_config(int gpio, struct qpnp_pin_cfg *param);
+
+/**
+ * qpnp_pin_map - Obtain Linux GPIO number from device spec
+ * @name: Name assigned by the 'label' binding for the primary node.
+ * @pmic_pin: PMIC pin number to lookup.
+ *
+ * This routine is used in legacy configurations that do not support
+ * Device Tree. If you are using Device Tree, you should not use this.
+ * For such cases, use of_get_gpio() or friends instead.
+ */
+int qpnp_pin_map(const char *name, uint32_t pmic_pin);
diff -Nruw linux-4.4.115-fbx/include/linux/qpnp./pwm.h linux-4.4.115-fbx/include/linux/qpnp/pwm.h
--- linux-4.4.115-fbx/include/linux/qpnp./pwm.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/qpnp/pwm.h	2019-01-22 16:16:28.363290417 +0100
@@ -0,0 +1,217 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QPNP_PWM_H__
+#define __QPNP_PWM_H__
+
+#include <linux/pwm.h>
+
+/* usec: 19.2M, n=6, m=0, pre=2 */
+#define PM_PWM_PERIOD_MIN			7
+/* 1K, n=9, m=7, pre=6 */
+#define PM_PWM_PERIOD_MAX			(384 * USEC_PER_SEC)
+#define PM_PWM_LUT_RAMP_STEP_TIME_MAX		499
+#define PM_PWM_MAX_PAUSE_CNT			8191
+/*
+ * Formula from HSID,
+ * pause_time (hi/lo) = (pause_code - 1)*(duty_ms)
+ */
+#define PM_PWM_LUT_PAUSE_MAX \
+	((PM_PWM_MAX_PAUSE_CNT - 1) * PM_PWM_LUT_RAMP_STEP_TIME_MAX) /* ms */
+
+/* Flags for Look Up Table */
+#define PM_PWM_LUT_LOOP			0x01
+#define PM_PWM_LUT_RAMP_UP		0x02
+#define PM_PWM_LUT_REVERSE		0x04
+#define PM_PWM_LUT_PAUSE_HI_EN		0x08
+#define PM_PWM_LUT_PAUSE_LO_EN		0x10
+
+#define PM_PWM_LUT_NO_TABLE		0x20
+#define PM_PWM_LUT_USE_RAW_VALUE	0x40
+
+/*
+ * PWM frequency/period control
+ *
+ * PWM Frequency = ClockFrequency / (N * T)
+ *   or
+ * PWM Period = Clock Period * (N * T)
+ *   where
+ * N = 2^9 or 2^6 for 9-bit or 6-bit PWM size
+ * T = Pre-divide * 2^m, m = 0..7 (exponent)
+ */
+
+/*
+ * enum pm_pwm_size - PWM bit mode selection
+ * %PM_PWM_SIZE_6BIT - Select 6 bit mode; 64 levels
+ * %PM_PWM_SIZE_9BIT - Select 9 bit mode; 512 levels
+ */
+enum pm_pwm_size {
+	PM_PWM_SIZE_6BIT =	6,
+	PM_PWM_SIZE_9BIT =	9,
+};
+
+/*
+ * enum pm_pwm_clk - PWM clock selection
+ * %PM_PWM_CLK_1KHZ - 1KHz clock
+ * %PM_PWM_CLK_32KHZ - 32KHz clock
+ * %PM_PWM_CLK_19P2MHZ - 19.2MHz clock
+ * Note: Here 1KHz = 1024Hz
+ */
+enum pm_pwm_clk {
+	PM_PWM_CLK_1KHZ,
+	PM_PWM_CLK_32KHZ,
+	PM_PWM_CLK_19P2MHZ,
+};
+
+/* PWM pre-divider selection */
+enum pm_pwm_pre_div {
+	PM_PWM_PDIV_2,
+	PM_PWM_PDIV_3,
+	PM_PWM_PDIV_5,
+	PM_PWM_PDIV_6,
+};
+
+/*
+ * struct pwm_period_config - PWM period configuration
+ * @pwm_size: enum pm_pwm_size
+ * @clk: enum pm_pwm_clk
+ * @pre_div: enum pm_pwm_pre_div
+ * @pre_div_exp: exponent of 2 as part of pre-divider: 0..7
+ */
+struct pwm_period_config {
+	enum pm_pwm_size	pwm_size;
+	enum pm_pwm_clk		clk;
+	enum pm_pwm_pre_div	pre_div;
+	int			pre_div_exp;
+};
+
+/*
+ * struct pwm_duty_cycles - PWM duty cycle info
+ * duty_pcts - pointer to an array of duty percentage for a pwm period
+ * num_duty_pcts - total entries in duty_pcts array
+ * duty_ms - duty cycle time in ms
+ * start_idx - index in the LUT
+ */
+struct pwm_duty_cycles {
+	int *duty_pcts;
+	int num_duty_pcts;
+	int duty_ms;
+	int start_idx;
+};
+
+/*
+ * enum pm_pwm_mode - PWM mode selection
+ * %PM_PWM_MODE_PWM - Select PWM mode
+ * %PM_PWM_MODE_LPG - Select LPG mode
+ */
+enum pm_pwm_mode {
+	PM_PWM_MODE_PWM,
+	PM_PWM_MODE_LPG,
+};
+
+/*
+ * lut_params: Lookup table (LUT) parameters
+ * @start_idx: start index in lookup table from 0 to MAX-1
+ * @idx_len: number of index
+ * @pause_lo: pause time in millisecond at low index
+ * @pause_hi: pause time in millisecond at high index
+ * @ramp_step_ms: time before loading next LUT pattern in millisecond
+ * @flags: control flags
+ */
+struct lut_params {
+	int start_idx;
+	int idx_len;
+	int lut_pause_hi;
+	int lut_pause_lo;
+	int ramp_step_ms;
+	int flags;
+};
+
+#if IS_ENABLED(CONFIG_PWM_QPNP)
+int pwm_config_period(struct pwm_device *pwm,
+			     struct pwm_period_config *pwm_p);
+
+int pwm_config_pwm_value(struct pwm_device *pwm, int pwm_value);
+
+
+int pwm_change_mode(struct pwm_device *pwm, enum pm_pwm_mode mode);
+
+
+int pwm_lut_config(struct pwm_device *pwm, int period_us,
+		int duty_pct[], struct lut_params lut_params);
+
+/*
+ * support microsecond level configuration
+ */
+int pwm_config_us(struct pwm_device *pwm,
+		int duty_us, int period_us);
+
+#else
+static inline int pwm_config_period(struct pwm_device *pwm,
+			     struct pwm_period_config *pwm_p)
+{
+	return -EINVAL;
+}
+
+static inline int pwm_config_pwm_value(struct pwm_device *pwm, int pwm_value)
+{
+	return -EINVAL;
+}
+
+static inline int pwm_change_mode(struct pwm_device *pwm, enum pm_pwm_mode mode)
+{
+	return -EINVAL;
+}
+
+static inline int pwm_lut_config(struct pwm_device *pwm, int period_us,
+		int duty_pct[], struct lut_params lut_params)
+{
+	return -EINVAL;
+}
+
+static inline int pwm_config_us(struct pwm_device *pwm,
+		int duty_us, int period_us)
+{
+	return -EINVAL;
+}
+#endif
+
+/* Standard APIs supported */
+/*
+ * pwm_request - request a PWM device
+ * @pwm_id: PWM id or channel
+ * @label: the label to identify the user
+ */
+
+/*
+ * pwm_free - free a PWM device
+ * @pwm: the PWM device
+ */
+
+/*
+ * pwm_config - change a PWM device configuration
+ * @pwm: the PWM device
+ * @period_ns: period in nanosecond
+ * @duty_ns: duty cycle in nanosecond
+ */
+
+/*
+ * pwm_enable - start a PWM output toggling
+ * @pwm: the PWM device
+ */
+
+/*
+ * pwm_disable - stop a PWM output toggling
+ * @pwm: the PWM device
+ */
+
+#endif /* __QPNP_PWM_H__ */
diff -Nruw linux-4.4.115-fbx/include/linux/qpnp./qpnp-adc.h linux-4.4.115-fbx/include/linux/qpnp/qpnp-adc.h
--- linux-4.4.115-fbx/include/linux/qpnp./qpnp-adc.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/qpnp/qpnp-adc.h	2019-01-22 16:16:28.367290453 +0100
@@ -0,0 +1,2290 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Qualcomm PMIC QPNP ADC driver header file
+ *
+ */
+
+#ifndef __QPNP_ADC_H
+#define __QPNP_ADC_H
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include <linux/regulator/consumer.h>
+/**
+ * enum qpnp_vadc_channels - QPNP AMUX arbiter channels
+ */
+enum qpnp_vadc_channels {
+	USBIN = 0,
+	DCIN,
+	VCHG_SNS,
+	SPARE1_03,
+	USB_ID_MV,
+	VCOIN,
+	VBAT_SNS,
+	VSYS,
+	DIE_TEMP,
+	REF_625MV,
+	REF_125V,
+	CHG_TEMP,
+	SPARE1,
+	SPARE2,
+	GND_REF,
+	VDD_VADC,
+	P_MUX1_1_1,
+	P_MUX2_1_1,
+	P_MUX3_1_1,
+	P_MUX4_1_1,
+	P_MUX5_1_1,
+	P_MUX6_1_1,
+	P_MUX7_1_1,
+	P_MUX8_1_1,
+	P_MUX9_1_1,
+	P_MUX10_1_1,
+	P_MUX11_1_1,
+	P_MUX12_1_1,
+	P_MUX13_1_1,
+	P_MUX14_1_1,
+	P_MUX15_1_1,
+	P_MUX16_1_1,
+	P_MUX1_1_3,
+	P_MUX2_1_3,
+	P_MUX3_1_3,
+	P_MUX4_1_3,
+	P_MUX5_1_3,
+	P_MUX6_1_3,
+	P_MUX7_1_3,
+	P_MUX8_1_3,
+	P_MUX9_1_3,
+	P_MUX10_1_3,
+	P_MUX11_1_3,
+	P_MUX12_1_3,
+	P_MUX13_1_3,
+	P_MUX14_1_3,
+	P_MUX15_1_3,
+	P_MUX16_1_3,
+	LR_MUX1_BATT_THERM,
+	LR_MUX2_BAT_ID,
+	LR_MUX3_XO_THERM,
+	LR_MUX4_AMUX_THM1,
+	LR_MUX5_AMUX_THM2,
+	LR_MUX6_AMUX_THM3,
+	LR_MUX7_HW_ID,
+	LR_MUX8_AMUX_THM4,
+	LR_MUX9_AMUX_THM5,
+	LR_MUX10_USB_ID_LV,
+	AMUX_PU1,
+	AMUX_PU2,
+	LR_MUX3_BUF_XO_THERM_BUF,
+	LR_MUX1_PU1_BAT_THERM = 112,
+	LR_MUX2_PU1_BAT_ID = 113,
+	LR_MUX3_PU1_XO_THERM = 114,
+	LR_MUX4_PU1_AMUX_THM1 = 115,
+	LR_MUX5_PU1_AMUX_THM2 = 116,
+	LR_MUX6_PU1_AMUX_THM3 = 117,
+	LR_MUX7_PU1_AMUX_HW_ID = 118,
+	LR_MUX8_PU1_AMUX_THM4 = 119,
+	LR_MUX9_PU1_AMUX_THM5 = 120,
+	LR_MUX10_PU1_AMUX_USB_ID_LV = 121,
+	LR_MUX3_BUF_PU1_XO_THERM_BUF = 124,
+	LR_MUX1_PU2_BAT_THERM = 176,
+	LR_MUX2_PU2_BAT_ID = 177,
+	LR_MUX3_PU2_XO_THERM = 178,
+	LR_MUX4_PU2_AMUX_THM1 = 179,
+	LR_MUX5_PU2_AMUX_THM2 = 180,
+	LR_MUX6_PU2_AMUX_THM3 = 181,
+	LR_MUX7_PU2_AMUX_HW_ID = 182,
+	LR_MUX8_PU2_AMUX_THM4 = 183,
+	LR_MUX9_PU2_AMUX_THM5 = 184,
+	LR_MUX10_PU2_AMUX_USB_ID_LV = 185,
+	LR_MUX3_BUF_PU2_XO_THERM_BUF = 188,
+	LR_MUX1_PU1_PU2_BAT_THERM = 240,
+	LR_MUX2_PU1_PU2_BAT_ID = 241,
+	LR_MUX3_PU1_PU2_XO_THERM = 242,
+	LR_MUX4_PU1_PU2_AMUX_THM1 = 243,
+	LR_MUX5_PU1_PU2_AMUX_THM2 = 244,
+	LR_MUX6_PU1_PU2_AMUX_THM3 = 245,
+	LR_MUX7_PU1_PU2_AMUX_HW_ID = 246,
+	LR_MUX8_PU1_PU2_AMUX_THM4 = 247,
+	LR_MUX9_PU1_PU2_AMUX_THM5 = 248,
+	LR_MUX10_PU1_PU2_AMUX_USB_ID_LV = 249,
+	LR_MUX3_BUF_PU1_PU2_XO_THERM_BUF = 252,
+	ALL_OFF = 255,
+	ADC_MAX_NUM = 0xffff,
+
+	/* Channel listing for refreshed VADC in hex format */
+	VADC_VREF_GND = 0,
+	VADC_CALIB_VREF_1P25 = 1,
+	VADC_CALIB_VREF = 2,
+	VADC_CALIB_VREF_1_DIV_3 = 0x82,
+	VADC_VPH_PWR = 0x83,
+	VADC_VBAT_SNS = 0x84,
+	VADC_VCOIN = 0x85,
+	VADC_DIE_TEMP = 6,
+	VADC_CHG_TEMP = 7,
+	VADC_USB_IN = 8,
+	VADC_IREG_FB = 9,
+	/* External input connection */
+	VADC_BAT_THERM = 0xa,
+	VADC_BAT_ID = 0xb,
+	VADC_XO_THERM = 0xc,
+	VADC_AMUX_THM1 = 0xd,
+	VADC_AMUX_THM2 = 0xe,
+	VADC_AMUX_THM3 = 0xf,
+	VADC_AMUX_THM4 = 0x10,
+	VADC_AMUX_THM5 = 0x11,
+	VADC_AMUX1_GPIO = 0x12,
+	VADC_AMUX2_GPIO = 0x13,
+	VADC_AMUX3_GPIO = 0x14,
+	VADC_AMUX4_GPIO = 0x15,
+	VADC_AMUX5_GPIO = 0x16,
+	VADC_AMUX6_GPIO = 0x17,
+	VADC_AMUX7_GPIO = 0x18,
+	VADC_AMUX8_GPIO = 0x19,
+	VADC_ATEST1 = 0x1a,
+	VADC_ATEST2 = 0x1b,
+	VADC_ATEST3 = 0x1c,
+	VADC_ATEST4 = 0x1d,
+	VADC_OFF = 0xff,
+	/* PU1 is 30K pull up */
+	VADC_BAT_THERM_PU1 = 0x2a,
+	VADC_BAT_ID_PU1 = 0x2b,
+	VADC_XO_THERM_PU1 = 0x2c,
+	VADC_AMUX_THM1_PU1 = 0x2d,
+	VADC_AMUX_THM2_PU1 = 0x2e,
+	VADC_AMUX_THM3_PU1 = 0x2f,
+	VADC_AMUX_THM4_PU1 = 0x30,
+	VADC_AMUX_THM5_PU1 = 0x31,
+	VADC_AMUX1_GPIO_PU1 = 0x32,
+	VADC_AMUX2_GPIO_PU1 = 0x33,
+	VADC_AMUX3_GPIO_PU1 = 0x34,
+	VADC_AMUX4_GPIO_PU1 = 0x35,
+	VADC_AMUX5_GPIO_PU1 = 0x36,
+	VADC_AMUX6_GPIO_PU1 = 0x37,
+	VADC_AMUX7_GPIO_PU1 = 0x38,
+	VADC_AMUX8_GPIO_PU1 = 0x39,
+	/* PU2 is 100K pull up */
+	VADC_BAT_THERM_PU2 = 0x4a,
+	VADC_BAT_ID_PU2 = 0x4b,
+	VADC_XO_THERM_PU2 = 0x4c,
+	VADC_AMUX_THM1_PU2 = 0x4d,
+	VADC_AMUX_THM2_PU2 = 0x4e,
+	VADC_AMUX_THM3_PU2 = 0x4f,
+	VADC_AMUX_THM4_PU2 = 0x50,
+	VADC_AMUX_THM5_PU2 = 0x51,
+	VADC_AMUX1_GPIO_PU2 = 0x52,
+	VADC_AMUX2_GPIO_PU2 = 0x53,
+	VADC_AMUX3_GPIO_PU2 = 0x54,
+	VADC_AMUX4_GPIO_PU2 = 0x55,
+	VADC_AMUX5_GPIO_PU2 = 0x56,
+	VADC_AMUX6_GPIO_PU2 = 0x57,
+	VADC_AMUX7_GPIO_PU2 = 0x58,
+	VADC_AMUX8_GPIO_PU2 = 0x59,
+	/* PU3 is 400K pull up */
+	VADC_BAT_THERM_PU3 = 0x6a,
+	VADC_BAT_ID_PU3 = 0x6b,
+	VADC_XO_THERM_PU3 = 0x6c,
+	VADC_AMUX_THM1_PU3 = 0x6d,
+	VADC_AMUX_THM2_PU3 = 0x6e,
+	VADC_AMUX_THM3_PU3 = 0x6f,
+	VADC_AMUX_THM4_PU3 = 0x70,
+	VADC_AMUX_THM5_PU3 = 0x71,
+	VADC_AMUX1_GPIO_PU3 = 0x72,
+	VADC_AMUX2_GPIO_PU3 = 0x73,
+	VADC_AMUX3_GPIO_PU3 = 0x74,
+	VADC_AMUX4_GPIO_PU3 = 0x75,
+	VADC_AMUX5_GPIO_PU3 = 0x76,
+	VADC_AMUX6_GPIO_PU3 = 0x77,
+	VADC_AMUX7_GPIO_PU3 = 0x78,
+	VADC_AMUX8_GPIO_PU3 = 0x79,
+	/* External input connection with 1/3 div */
+	VADC_AMUX1_GPIO_DIV_3 = 0x92,
+	VADC_AMUX2_GPIO_DIV_3 = 0x93,
+	VADC_AMUX3_GPIO_DIV_3 = 0x94,
+	VADC_AMUX4_GPIO_DIV_3 = 0x95,
+	VADC_AMUX5_GPIO_DIV_3 = 0x96,
+	VADC_AMUX6_GPIO_DIV_3 = 0x97,
+	VADC_AMUX7_GPIO_DIV_3 = 0x98,
+	VADC_AMUX8_GPIO_DIV_3 = 0x99,
+	VADC_ATEST1_DIV_3 = 0x9a,
+	VADC_ATEST2_DIV_3 = 0x9b,
+	VADC_ATEST3_DIV_3 = 0x9c,
+	VADC_ATEST4_DIV_3 = 0x9d,
+	VADC_REFRESH_MAX_NUM = 0xffff,
+};
+
+/**
+ * enum qpnp_iadc_channels - QPNP IADC channel list
+ */
+enum qpnp_iadc_channels {
+	INTERNAL_RSENSE = 0,
+	EXTERNAL_RSENSE,
+	ALT_LEAD_PAIR,
+	GAIN_CALIBRATION_17P857MV,
+	OFFSET_CALIBRATION_SHORT_CADC_LEADS,
+	OFFSET_CALIBRATION_CSP_CSN,
+	OFFSET_CALIBRATION_CSP2_CSN2,
+	IADC_MUX_NUM,
+};
+
+#define QPNP_ADC_625_UV	625000
+#define QPNP_ADC_HWMON_NAME_LENGTH				64
+#define QPNP_MAX_PROP_NAME_LEN					32
+#define QPNP_THERMALNODE_NAME_LENGTH                            25
+#define QPNP_ADC_1P25_UV					1250000
+
+/* Structure device for qpnp vadc */
+struct qpnp_vadc_chip;
+
+/* Structure device for qpnp iadc */
+struct qpnp_iadc_chip;
+
+/* Structure device for qpnp adc tm */
+struct qpnp_adc_tm_chip;
+
+/**
+ * enum qpnp_adc_clk_type - Clock rate supported.
+ * %CLK_TYPE1: 2P4MHZ
+ * %CLK_TYPE2: 4P8MHZ
+ * %CLK_TYPE3: 9P6MHZ
+ * %CLK_TYPE4: 19P2MHZ
+ * %CLK_NONE: Do not use this Clk type.
+ *
+ * The Clock rate is specific to each channel of the QPNP ADC arbiter.
+ */
+enum qpnp_adc_clk_type {
+	CLK_TYPE1 = 0,
+	CLK_TYPE2,
+	CLK_TYPE3,
+	CLK_TYPE4,
+	CLK_NONE,
+};
+
+/**
+ * enum qpnp_adc_decimation_type - Sampling rate supported.
+ * %DECIMATION_TYPE1: 512
+ * %DECIMATION_TYPE2: 1K
+ * %DECIMATION_TYPE3: 2K
+ * %DECIMATION_TYPE4: 4k
+ * %DECIMATION_NONE: Do not use this Sampling type.
+ *
+ * The Sampling rate is specific to each channel of the QPNP ADC arbiter.
+ */
+enum qpnp_adc_decimation_type {
+	DECIMATION_TYPE1 = 0,
+	DECIMATION_TYPE2,
+	DECIMATION_TYPE3,
+	DECIMATION_TYPE4,
+	DECIMATION_NONE = 0xff,
+
+	ADC_HC_DEC_RATIO_256 = 0,
+	ADC_HC_DEC_RATIO_512 = 1,
+	ADC_HC_DEC_RATIO_1024 = 2,
+	ADC_HC_DEC_RATIO_NONE = 0xff,
+};
+
+/**
+ * enum qpnp_adc_calib_type - QPNP ADC Calibration type.
+ * %ADC_CALIB_ABSOLUTE: Use 625mV and 1.25V reference channels.
+ * %ADC_CALIB_RATIOMETRIC: Use reference Voltage/GND.
+ * %ADC_CALIB_CONFIG_NONE: Do not use this calibration type.
+ *
+ * enum qpnp_adc_cal_sel - Selects the calibration type that is applied
+ *			   on the corresponding channel measurement after
+ *			   the ADC data is read.
+ * %ADC_HC_NO_CAL :	To obtain raw, uncalibrated data on qpnp-vadc-hc type.
+ * %ADC_HC_RATIO_CAL :	Applies ratiometric calibration. Note the calibration
+ *			values stored in the CAL peripheral for VADC_VREF and
+ *			VREF_1P25 already have GND_REF value removed. Used
+ *			only with qpnp-vadc-hc type of VADC.
+ * %ADC_HC_ABS_CAL :	Applies absolute calibration. Note the calibration
+ *			values stored in the CAL peripheral for VADC_VREF and
+ *			VREF_1P25 already have GND_REF value removed. Used
+ *			only with qpnp-vadc-hc type of VADC.
+ *
+ * Use the input reference voltage depending on the calibration type
+ * to calcluate the offset and gain parameters. The calibration is
+ * specific to each channel of the QPNP ADC.
+ */
+enum qpnp_adc_calib_type {
+	CALIB_ABSOLUTE = 0,
+	CALIB_RATIOMETRIC,
+	CALIB_NONE,
+
+	ADC_HC_NO_CAL = 0,
+	ADC_HC_RATIO_CAL = 1,
+	ADC_HC_ABS_CAL = 2,
+	ADC_HC_CAL_SEL_NONE,
+};
+
+/**
+ * enum qpnp_adc_channel_scaling_param - pre-scaling AMUX ratio.
+ * %CHAN_PATH_SCALING0: ratio of {1, 1}
+ * %CHAN_PATH_SCALING1: ratio of {1, 3}
+ * %CHAN_PATH_SCALING2: ratio of {1, 4}
+ * %CHAN_PATH_SCALING3: ratio of {1, 6}
+ * %CHAN_PATH_SCALING4: ratio of {1, 20}
+ * %CHAN_PATH_SCALING5: ratio of {1, 8}
+ * %CHAN_PATH_SCALING6: ratio of {10, 81} The actual ratio is (1/8.1).
+ * %CHAN_PATH_SCALING7: ratio of {1, 10}
+ * %CHAN_PATH_NONE: Do not use this pre-scaling ratio type.
+ *
+ * The pre-scaling is applied for signals to be within the voltage range
+ * of the ADC.
+ */
+enum qpnp_adc_channel_scaling_param {
+	PATH_SCALING0 = 0,
+	PATH_SCALING1,
+	PATH_SCALING2,
+	PATH_SCALING3,
+	PATH_SCALING4,
+	PATH_SCALING5,
+	PATH_SCALING6,
+	PATH_SCALING7,
+	PATH_SCALING_NONE,
+};
+
+/**
+ * enum qpnp_adc_scale_fn_type - Scaling function for pm8941 pre calibrated
+ *				   digital data relative to ADC reference.
+ * %SCALE_DEFAULT: Default scaling to convert raw adc code to voltage (uV).
+ * %SCALE_BATT_THERM: Conversion to temperature(decidegC) based on btm
+ *			parameters.
+ * %SCALE_THERM_100K_PULLUP: Returns temperature in degC.
+ *				 Uses a mapping table with 100K pullup.
+ * %SCALE_PMIC_THERM: Returns result in milli degree's Centigrade.
+ * %SCALE_XOTHERM: Returns XO thermistor voltage in degree's Centigrade.
+ * %SCALE_THERM_150K_PULLUP: Returns temperature in degC.
+ *				 Uses a mapping table with 150K pullup.
+ * %SCALE_QRD_BATT_THERM: Conversion to temperature(decidegC) based on
+ *			btm parameters.
+ * %SCALE_QRD_SKUAA_BATT_THERM: Conversion to temperature(decidegC) based on
+ *          btm parameters for SKUAA.
+ * %SCALE_SMB_BATT_THERM: Conversion to temperature(decidegC) based on
+ *          btm parameters for SMB.
+ * %SCALE_QRD_SKUG_BATT_THERM: Conversion to temperature(decidegC) based on
+ *          btm parameters for SKUG.
+ * %SCALE_QRD_SKUH_BATT_THERM: Conversion to temperature(decidegC) based on
+ *          btm parameters for SKUH
+ * %SCALE_QRD_SKUT1_BATT_THERM: Conversion to temperature(decidegC) based on
+ *          btm parameters for SKUT1
+ * %SCALE_PMI_CHG_TEMP: Conversion for PMI CHG temp
+ * %SCALE_NONE: Do not use this scaling type.
+ */
+enum qpnp_adc_scale_fn_type {
+	SCALE_DEFAULT = 0,
+	SCALE_BATT_THERM,
+	SCALE_THERM_100K_PULLUP,
+	SCALE_PMIC_THERM,
+	SCALE_XOTHERM,
+	SCALE_THERM_150K_PULLUP,
+	SCALE_QRD_BATT_THERM,
+	SCALE_QRD_SKUAA_BATT_THERM,
+	SCALE_SMB_BATT_THERM,
+	SCALE_QRD_SKUG_BATT_THERM,
+	SCALE_QRD_SKUH_BATT_THERM,
+	SCALE_NCP_03WF683_THERM,
+	SCALE_QRD_SKUT1_BATT_THERM,
+	SCALE_PMI_CHG_TEMP = 16,
+	SCALE_NONE,
+};
+
+/**
+ * enum qpnp_adc_tm_rscale_fn_type - Scaling function used to convert the
+ *	channels input voltage/temperature to corresponding ADC code that is
+ *	applied for thresholds. Check the corresponding channels scaling to
+ *	determine the appropriate temperature/voltage units that are passed
+ *	to the scaling function. Example battery follows the power supply
+ *	framework that needs its units to be in decidegreesC so it passes
+ *	deci-degreesC. PA_THERM clients pass the temperature in degrees.
+ *	The order below should match the one in the driver for
+ *	adc_tm_rscale_fn[].
+ */
+enum qpnp_adc_tm_rscale_fn_type {
+	SCALE_R_VBATT = 0,
+	SCALE_RBATT_THERM,
+	SCALE_R_USB_ID,
+	SCALE_RPMIC_THERM,
+	SCALE_R_SMB_BATT_THERM,
+	SCALE_R_ABSOLUTE,
+	SCALE_QRD_SKUH_RBATT_THERM,
+	SCALE_QRD_SKUT1_RBATT_THERM,
+	SCALE_RSCALE_NONE,
+};
+
+/**
+ * enum qpnp_vadc_rscale_fn_type - Scaling function used to convert the
+ *	channels input voltage/temperature to corresponding ADC code that is
+ *	applied for thresholds. Check the corresponding channels scaling to
+ *	determine the appropriate temperature/voltage units that are passed
+ *	to the scaling function. The order below should match the one in the
+ *	driver for qpnp_adc_scale_fn[].
+ */
+enum qpnp_vadc_rscale_fn_type {
+	SCALE_RVADC_ABSOLUTE = 0,
+	SCALE_RVADC_SCALE_NONE,
+};
+
+/**
+ * enum qpnp_adc_fast_avg_ctl - Provides ability to obtain single result
+ *		from the ADC that is an average of multiple measurement
+ *		samples. Select number of samples for use in fast
+ *		average mode (i.e. 2 ^ value).
+ * %ADC_FAST_AVG_SAMPLE_1:   0x0 = 1
+ * %ADC_FAST_AVG_SAMPLE_2:   0x1 = 2
+ * %ADC_FAST_AVG_SAMPLE_4:   0x2 = 4
+ * %ADC_FAST_AVG_SAMPLE_8:   0x3 = 8
+ * %ADC_FAST_AVG_SAMPLE_16:  0x4 = 16
+ * %ADC_FAST_AVG_SAMPLE_32:  0x5 = 32
+ * %ADC_FAST_AVG_SAMPLE_64:  0x6 = 64
+ * %ADC_FAST_AVG_SAMPLE_128: 0x7 = 128
+ * %ADC_FAST_AVG_SAMPLE_256: 0x8 = 256
+ * %ADC_FAST_AVG_SAMPLE_512: 0x9 = 512
+ */
+enum qpnp_adc_fast_avg_ctl {
+	ADC_FAST_AVG_SAMPLE_1 = 0,
+	ADC_FAST_AVG_SAMPLE_2,
+	ADC_FAST_AVG_SAMPLE_4,
+	ADC_FAST_AVG_SAMPLE_8,
+	ADC_FAST_AVG_SAMPLE_16,
+	ADC_FAST_AVG_SAMPLE_32,
+	ADC_FAST_AVG_SAMPLE_64,
+	ADC_FAST_AVG_SAMPLE_128,
+	ADC_FAST_AVG_SAMPLE_256,
+	ADC_FAST_AVG_SAMPLE_512,
+	ADC_FAST_AVG_SAMPLE_NONE,
+};
+
+/**
+ * enum qpnp_adc_hw_settle_time - Time between AMUX getting configured and
+ *		the ADC starting conversion. Delay = 100us * value for
+ *		value < 11 and 2ms * (value - 10) otherwise.
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_0US:   0us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_100US: 100us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_200US: 200us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_300US: 300us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_400US: 400us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_500US: 500us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_600US: 600us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_700US: 700us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_800US: 800us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_900US: 900us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_1MS:   1ms
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_2MS:   2ms
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_4MS:   4ms
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_6MS:   6ms
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_8MS:   8ms
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_10MS:  10ms
+ * %ADC_CHANNEL_HW_SETTLE_NONE
+ */
+enum qpnp_adc_hw_settle_time {
+	ADC_CHANNEL_HW_SETTLE_DELAY_0US = 0,
+	ADC_CHANNEL_HW_SETTLE_DELAY_100US,
+	ADC_CHANNEL_HW_SETTLE_DELAY_2000US,
+	ADC_CHANNEL_HW_SETTLE_DELAY_300US,
+	ADC_CHANNEL_HW_SETTLE_DELAY_400US,
+	ADC_CHANNEL_HW_SETTLE_DELAY_500US,
+	ADC_CHANNEL_HW_SETTLE_DELAY_600US,
+	ADC_CHANNEL_HW_SETTLE_DELAY_700US,
+	ADC_CHANNEL_HW_SETTLE_DELAY_800US,
+	ADC_CHANNEL_HW_SETTLE_DELAY_900US,
+	ADC_CHANNEL_HW_SETTLE_DELAY_1MS,
+	ADC_CHANNEL_HW_SETTLE_DELAY_2MS,
+	ADC_CHANNEL_HW_SETTLE_DELAY_4MS,
+	ADC_CHANNEL_HW_SETTLE_DELAY_6MS,
+	ADC_CHANNEL_HW_SETTLE_DELAY_8MS,
+	ADC_CHANNEL_HW_SETTLE_DELAY_10MS,
+	ADC_CHANNEL_HW_SETTLE_NONE,
+};
+
+/**
+ * enum qpnp_adc_dec_ratio_sel - Selects the decimation ratio of the ADC.
+ *				 Support values are 256, 512 and 1024.
+ */
+enum qpnp_vadc_dec_ratio_sel {
+	ADC_DEC_RATIO_256 = 0,
+	ADC_DEC_RATIO_512,
+	ADC_DEC_RATIO_1024,
+	ADC_DEC_RATIO_NONE,
+};
+
+/**
+ * enum qpnp_adc_cal_sel - Selects the calibration type that is applied
+ *			   on the corresponding channel measurement after
+ *			   the ADC data is read.
+ * %ADC_NO_CAL :	To obtain raw, uncalibrated data.
+ * %ADC_RATIO_CAL :	Applies ratiometric calibration. Note the calibration
+ *			values stored in the CAL peripheral for VADC_VREF and
+ *			VREF_1P25 already have GND_REF value removed.
+ * %ADC_ABS_CAL :	Applies absolute calibration. Note the calibration
+ *			values stored in the CAL peripheral for VADC_VREF and
+ *			VREF_1P25 already have GND_REF value removed.
+ */
+
+/**
+ * enum qpnp_adc_cal_val - Selects if the calibration values applied
+ *			    are the ones when collected on a timer interval
+ *			    or if an immediate calibration needs to be forced.
+ * %ADC_TIMER_CAL : Uses calibration value collected on the timer interval.
+ * %ADC_NEW_CAL : Forces an immediate calibration. Use only when necessary
+ *		  since it forces 3 calibration measurements in addition to
+ *		  the channel measurement. For most measurement, using
+ *		  calibration based on the timer interval is sufficient.
+ */
+enum qpnp_adc_cal_val {
+	ADC_TIMER_CAL = 0,
+	ADC_NEW_CAL,
+	ADC_CAL_VAL_NONE,
+};
+
+/**
+ * enum qpnp_vadc_mode_sel - Selects the basic mode of operation.
+ *		- The normal mode is used for single measurement.
+ *		- The Conversion sequencer is used to trigger an
+ *		  ADC read when a HW trigger is selected.
+ *		- The measurement interval performs a single or
+ *		  continous measurement at a specified interval/delay.
+ * %ADC_OP_NORMAL_MODE : Normal mode used for single measurement.
+ * %ADC_OP_CONVERSION_SEQUENCER : Conversion sequencer used to trigger
+ *		  an ADC read on a HW supported trigger.
+ *		  Refer to enum qpnp_vadc_trigger for
+ *		  supported HW triggers.
+ * %ADC_OP_MEASUREMENT_INTERVAL : The measurement interval performs a
+ *		  single or continous measurement after a specified delay.
+ *		  For delay look at qpnp_adc_meas_timer.
+ */
+enum qpnp_vadc_mode_sel {
+	ADC_OP_NORMAL_MODE = 0,
+	ADC_OP_CONVERSION_SEQUENCER,
+	ADC_OP_MEASUREMENT_INTERVAL,
+	ADC_OP_MODE_NONE,
+};
+
+/**
+ * enum qpnp_vadc_trigger - Select the HW trigger to be used while
+ *		measuring the ADC reading.
+ * %ADC_GSM_PA_ON : GSM power amplifier on.
+ * %ADC_TX_GTR_THRES : Transmit power greater than threshold.
+ * %ADC_CAMERA_FLASH_RAMP : Flash ramp up done.
+ * %ADC_DTEST : DTEST.
+ */
+enum qpnp_vadc_trigger {
+	ADC_GSM_PA_ON = 0,
+	ADC_TX_GTR_THRES,
+	ADC_CAMERA_FLASH_RAMP,
+	ADC_DTEST,
+	ADC_SEQ_NONE,
+};
+
+/**
+ * enum qpnp_vadc_conv_seq_timeout - Select delay (0 to 15ms) from
+ *		conversion request to triggering conversion sequencer
+ *		hold off time.
+ */
+enum qpnp_vadc_conv_seq_timeout {
+	ADC_CONV_SEQ_TIMEOUT_0MS = 0,
+	ADC_CONV_SEQ_TIMEOUT_1MS,
+	ADC_CONV_SEQ_TIMEOUT_2MS,
+	ADC_CONV_SEQ_TIMEOUT_3MS,
+	ADC_CONV_SEQ_TIMEOUT_4MS,
+	ADC_CONV_SEQ_TIMEOUT_5MS,
+	ADC_CONV_SEQ_TIMEOUT_6MS,
+	ADC_CONV_SEQ_TIMEOUT_7MS,
+	ADC_CONV_SEQ_TIMEOUT_8MS,
+	ADC_CONV_SEQ_TIMEOUT_9MS,
+	ADC_CONV_SEQ_TIMEOUT_10MS,
+	ADC_CONV_SEQ_TIMEOUT_11MS,
+	ADC_CONV_SEQ_TIMEOUT_12MS,
+	ADC_CONV_SEQ_TIMEOUT_13MS,
+	ADC_CONV_SEQ_TIMEOUT_14MS,
+	ADC_CONV_SEQ_TIMEOUT_15MS,
+	ADC_CONV_SEQ_TIMEOUT_NONE,
+};
+
+/**
+ * enum qpnp_adc_conv_seq_holdoff - Select delay from conversion
+ *		trigger signal (i.e. adc_conv_seq_trig) transition
+ *		to ADC enable. Delay = 25us * (value + 1).
+ */
+enum qpnp_adc_conv_seq_holdoff {
+	ADC_SEQ_HOLD_25US = 0,
+	ADC_SEQ_HOLD_50US,
+	ADC_SEQ_HOLD_75US,
+	ADC_SEQ_HOLD_100US,
+	ADC_SEQ_HOLD_125US,
+	ADC_SEQ_HOLD_150US,
+	ADC_SEQ_HOLD_175US,
+	ADC_SEQ_HOLD_200US,
+	ADC_SEQ_HOLD_225US,
+	ADC_SEQ_HOLD_250US,
+	ADC_SEQ_HOLD_275US,
+	ADC_SEQ_HOLD_300US,
+	ADC_SEQ_HOLD_325US,
+	ADC_SEQ_HOLD_350US,
+	ADC_SEQ_HOLD_375US,
+	ADC_SEQ_HOLD_400US,
+	ADC_SEQ_HOLD_NONE,
+};
+
+/**
+ * enum qpnp_adc_conv_seq_state - Conversion sequencer operating state
+ * %ADC_CONV_SEQ_IDLE : Sequencer is in idle.
+ * %ADC_CONV_TRIG_RISE : Waiting for rising edge trigger.
+ * %ADC_CONV_TRIG_HOLDOFF : Waiting for rising trigger hold off time.
+ * %ADC_CONV_MEAS_RISE : Measuring selected ADC signal.
+ * %ADC_CONV_TRIG_FALL : Waiting for falling trigger edge.
+ * %ADC_CONV_FALL_HOLDOFF : Waiting for falling trigger hold off time.
+ * %ADC_CONV_MEAS_FALL : Measuring selected ADC signal.
+ * %ADC_CONV_ERROR : Aberrant Hardware problem.
+ */
+enum qpnp_adc_conv_seq_state {
+	ADC_CONV_SEQ_IDLE = 0,
+	ADC_CONV_TRIG_RISE,
+	ADC_CONV_TRIG_HOLDOFF,
+	ADC_CONV_MEAS_RISE,
+	ADC_CONV_TRIG_FALL,
+	ADC_CONV_FALL_HOLDOFF,
+	ADC_CONV_MEAS_FALL,
+	ADC_CONV_ERROR,
+	ADC_CONV_NONE,
+};
+
+/**
+ * enum qpnp_adc_meas_timer_1 - Selects the measurement interval time.
+ *		If value = 0, use 0ms else use 2^(value + 4)/ 32768).
+ * The timer period is used by the USB_ID. Do not set a polling rate
+ * greater than 1 second on PMIC 2.0. The max polling rate on the PMIC 2.0
+ * appears to be limited to 1 second.
+ * %ADC_MEAS_INTERVAL_0MS : 0ms
+ * %ADC_MEAS_INTERVAL_1P0MS : 1ms
+ * %ADC_MEAS_INTERVAL_2P0MS : 2ms
+ * %ADC_MEAS_INTERVAL_3P9MS : 3.9ms
+ * %ADC_MEAS_INTERVAL_7P8MS : 7.8ms
+ * %ADC_MEAS_INTERVAL_15P6MS : 15.6ms
+ * %ADC_MEAS_INTERVAL_31P3MS : 31.3ms
+ * %ADC_MEAS_INTERVAL_62P5MS : 62.5ms
+ * %ADC_MEAS_INTERVAL_125MS : 125ms
+ * %ADC_MEAS_INTERVAL_250MS : 250ms
+ * %ADC_MEAS_INTERVAL_500MS : 500ms
+ * %ADC_MEAS_INTERVAL_1S : 1seconds
+ * %ADC_MEAS_INTERVAL_2S : 2seconds
+ * %ADC_MEAS_INTERVAL_4S : 4seconds
+ * %ADC_MEAS_INTERVAL_8S : 8seconds
+ * %ADC_MEAS_INTERVAL_16S: 16seconds
+ */
+enum qpnp_adc_meas_timer_1 {
+	ADC_MEAS1_INTERVAL_0MS = 0,
+	ADC_MEAS1_INTERVAL_1P0MS,
+	ADC_MEAS1_INTERVAL_2P0MS,
+	ADC_MEAS1_INTERVAL_3P9MS,
+	ADC_MEAS1_INTERVAL_7P8MS,
+	ADC_MEAS1_INTERVAL_15P6MS,
+	ADC_MEAS1_INTERVAL_31P3MS,
+	ADC_MEAS1_INTERVAL_62P5MS,
+	ADC_MEAS1_INTERVAL_125MS,
+	ADC_MEAS1_INTERVAL_250MS,
+	ADC_MEAS1_INTERVAL_500MS,
+	ADC_MEAS1_INTERVAL_1S,
+	ADC_MEAS1_INTERVAL_2S,
+	ADC_MEAS1_INTERVAL_4S,
+	ADC_MEAS1_INTERVAL_8S,
+	ADC_MEAS1_INTERVAL_16S,
+	ADC_MEAS1_INTERVAL_NONE,
+};
+
+/**
+ * enum qpnp_adc_meas_timer_2 - Selects the measurement interval time.
+ *		If value = 0, use 0ms else use 2^(value + 4)/ 32768).
+ * The timer period is used by the batt_therm. Do not set a polling rate
+ * greater than 1 second on PMIC 2.0. The max polling rate on the PMIC 2.0
+ * appears to be limited to 1 second.
+ * %ADC_MEAS_INTERVAL_0MS : 0ms
+ * %ADC_MEAS_INTERVAL_100MS : 100ms
+ * %ADC_MEAS_INTERVAL_200MS : 200ms
+ * %ADC_MEAS_INTERVAL_300MS : 300ms
+ * %ADC_MEAS_INTERVAL_400MS : 400ms
+ * %ADC_MEAS_INTERVAL_500MS : 500ms
+ * %ADC_MEAS_INTERVAL_600MS : 600ms
+ * %ADC_MEAS_INTERVAL_700MS : 700ms
+ * %ADC_MEAS_INTERVAL_800MS : 800ms
+ * %ADC_MEAS_INTERVAL_900MS : 900ms
+ * %ADC_MEAS_INTERVAL_1S: 1seconds
+ * %ADC_MEAS_INTERVAL_1P1S: 1.1seconds
+ * %ADC_MEAS_INTERVAL_1P2S: 1.2seconds
+ * %ADC_MEAS_INTERVAL_1P3S: 1.3seconds
+ * %ADC_MEAS_INTERVAL_1P4S: 1.4seconds
+ * %ADC_MEAS_INTERVAL_1P5S: 1.5seconds
+ */
+enum qpnp_adc_meas_timer_2 {
+	ADC_MEAS2_INTERVAL_0MS = 0,
+	ADC_MEAS2_INTERVAL_100MS,
+	ADC_MEAS2_INTERVAL_200MS,
+	ADC_MEAS2_INTERVAL_300MS,
+	ADC_MEAS2_INTERVAL_400MS,
+	ADC_MEAS2_INTERVAL_500MS,
+	ADC_MEAS2_INTERVAL_600MS,
+	ADC_MEAS2_INTERVAL_700MS,
+	ADC_MEAS2_INTERVAL_800MS,
+	ADC_MEAS2_INTERVAL_900MS,
+	ADC_MEAS2_INTERVAL_1S,
+	ADC_MEAS2_INTERVAL_1P1S,
+	ADC_MEAS2_INTERVAL_1P2S,
+	ADC_MEAS2_INTERVAL_1P3S,
+	ADC_MEAS2_INTERVAL_1P4S,
+	ADC_MEAS2_INTERVAL_1P5S,
+	ADC_MEAS2_INTERVAL_NONE,
+};
+
+/**
+ * enum qpnp_adc_meas_timer_3 - Selects the measurement interval time.
+ *		If value = 0, use 0ms else use 2^(value + 4)/ 32768).
+ * Do not set a polling rate greater than 1 second on PMIC 2.0.
+ * The max polling rate on the PMIC 2.0 appears to be limited to 1 second.
+ * %ADC_MEAS_INTERVAL_0MS : 0ms
+ * %ADC_MEAS_INTERVAL_1S : 1seconds
+ * %ADC_MEAS_INTERVAL_2S : 2seconds
+ * %ADC_MEAS_INTERVAL_3S : 3seconds
+ * %ADC_MEAS_INTERVAL_4S : 4seconds
+ * %ADC_MEAS_INTERVAL_5S : 5seconds
+ * %ADC_MEAS_INTERVAL_6S: 6seconds
+ * %ADC_MEAS_INTERVAL_7S : 7seconds
+ * %ADC_MEAS_INTERVAL_8S : 8seconds
+ * %ADC_MEAS_INTERVAL_9S : 9seconds
+ * %ADC_MEAS_INTERVAL_10S : 10seconds
+ * %ADC_MEAS_INTERVAL_11S : 11seconds
+ * %ADC_MEAS_INTERVAL_12S : 12seconds
+ * %ADC_MEAS_INTERVAL_13S : 13seconds
+ * %ADC_MEAS_INTERVAL_14S : 14seconds
+ * %ADC_MEAS_INTERVAL_15S : 15seconds
+ */
+enum qpnp_adc_meas_timer_3 {
+	ADC_MEAS3_INTERVAL_0S = 0,
+	ADC_MEAS3_INTERVAL_1S,
+	ADC_MEAS3_INTERVAL_2S,
+	ADC_MEAS3_INTERVAL_3S,
+	ADC_MEAS3_INTERVAL_4S,
+	ADC_MEAS3_INTERVAL_5S,
+	ADC_MEAS3_INTERVAL_6S,
+	ADC_MEAS3_INTERVAL_7S,
+	ADC_MEAS3_INTERVAL_8S,
+	ADC_MEAS3_INTERVAL_9S,
+	ADC_MEAS3_INTERVAL_10S,
+	ADC_MEAS3_INTERVAL_11S,
+	ADC_MEAS3_INTERVAL_12S,
+	ADC_MEAS3_INTERVAL_13S,
+	ADC_MEAS3_INTERVAL_14S,
+	ADC_MEAS3_INTERVAL_15S,
+	ADC_MEAS3_INTERVAL_NONE,
+};
+
+/**
+ * enum qpnp_adc_meas_timer_select - Selects the timer for which
+ *	the appropriate polling frequency is set.
+ * %ADC_MEAS_TIMER_SELECT1 - Select this timer for measurement polling interval
+ *				for 1 second.
+ * %ADC_MEAS_TIMER_SELECT2 - Select this timer for 500ms measurement interval.
+ * %ADC_MEAS_TIMER_SELECT3 - Select this timer for 5 second interval.
+ */
+enum qpnp_adc_meas_timer_select {
+	ADC_MEAS_TIMER_SELECT1 = 0,
+	ADC_MEAS_TIMER_SELECT2,
+	ADC_MEAS_TIMER_SELECT3,
+	ADC_MEAS_TIMER_NUM,
+};
+
+/**
+ * enum qpnp_adc_meas_interval_op_ctl - Select operating mode.
+ * %ADC_MEAS_INTERVAL_OP_SINGLE : Conduct single measurement at specified time
+ *			delay.
+ * %ADC_MEAS_INTERVAL_OP_CONTINUOUS : Make measurements at measurement interval
+ *			times.
+ */
+enum qpnp_adc_meas_interval_op_ctl {
+	ADC_MEAS_INTERVAL_OP_SINGLE = 0,
+	ADC_MEAS_INTERVAL_OP_CONTINUOUS,
+	ADC_MEAS_INTERVAL_OP_NONE,
+};
+
+/**
+ * Channel selection registers for each of the configurable measurements
+ * Channels allotment is set at device config for a channel.
+ * The USB_ID, BATT_THERM, PMIC_THERM and VBAT channels are used by the
+ * kernel space USB, Battery and IADC drivers.
+ * The other 3 channels are configurable for use by userspace clients.
+ */
+enum qpnp_adc_tm_channel_select	{
+	QPNP_ADC_TM_M0_ADC_CH_SEL_CTL = 0x48,
+	QPNP_ADC_TM_M1_ADC_CH_SEL_CTL = 0x68,
+	QPNP_ADC_TM_M2_ADC_CH_SEL_CTL = 0x70,
+	QPNP_ADC_TM_M3_ADC_CH_SEL_CTL = 0x78,
+	QPNP_ADC_TM_M4_ADC_CH_SEL_CTL = 0x80,
+	QPNP_ADC_TM_M5_ADC_CH_SEL_CTL = 0x88,
+	QPNP_ADC_TM_M6_ADC_CH_SEL_CTL = 0x90,
+	QPNP_ADC_TM_M7_ADC_CH_SEL_CTL = 0x98,
+	QPNP_ADC_TM_CH_SELECT_NONE
+};
+
+/**
+ * Channel index for the corresponding index to qpnp_adc_tm_channel_selec
+ */
+enum qpnp_adc_tm_channel_num {
+	QPNP_ADC_TM_CHAN0 = 0,
+	QPNP_ADC_TM_CHAN1,
+	QPNP_ADC_TM_CHAN2,
+	QPNP_ADC_TM_CHAN3,
+	QPNP_ADC_TM_CHAN4,
+	QPNP_ADC_TM_CHAN5,
+	QPNP_ADC_TM_CHAN6,
+	QPNP_ADC_TM_CHAN7,
+	QPNP_ADC_TM_CHAN_NONE
+};
+
+enum qpnp_comp_scheme_type {
+	COMP_ID_GF = 0,
+	COMP_ID_SMIC,
+	COMP_ID_TSMC,
+	COMP_ID_NUM,
+};
+
+/**
+ * struct qpnp_adc_tm_config - Represent ADC Thermal Monitor configuration.
+ * @channel: ADC channel for which thermal monitoring is requested.
+ * @adc_code: The pre-calibrated digital output of a given ADC releative to the
+ *		ADC reference.
+ * @high_thr_temp: Temperature at which high threshold notification is required.
+ * @low_thr_temp: Temperature at which low threshold notification is required.
+ * @low_thr_voltage : Low threshold voltage ADC code used for reverse
+ *			calibration.
+ * @high_thr_voltage: High threshold voltage ADC code used for reverse
+ *			calibration.
+ */
+struct qpnp_adc_tm_config {
+	int	channel;
+	int	adc_code;
+	int	high_thr_temp;
+	int	low_thr_temp;
+	int64_t	high_thr_voltage;
+	int64_t	low_thr_voltage;
+};
+
+/**
+ * enum qpnp_adc_tm_trip_type - Type for setting high/low temperature/voltage.
+ * %ADC_TM_TRIP_HIGH_WARM: Setting high temperature. Note that high temperature
+ *			corresponds to low voltage. Driver handles this case
+ *			appropriately to set high/low thresholds for voltage.
+ *			threshold.
+ * %ADC_TM_TRIP_LOW_COOL: Setting low temperature.
+ */
+enum qpnp_adc_tm_trip_type {
+	ADC_TM_TRIP_HIGH_WARM = 0,
+	ADC_TM_TRIP_LOW_COOL,
+	ADC_TM_TRIP_NUM,
+};
+
+#define ADC_TM_WRITABLE_TRIPS_MASK ((1 << ADC_TM_TRIP_NUM) - 1)
+
+/**
+ * enum qpnp_tm_state - This lets the client know whether the threshold
+ *		that was crossed was high/low.
+ * %ADC_TM_HIGH_STATE: Client is notified of crossing the requested high
+ *			voltage threshold.
+ * %ADC_TM_COOL_STATE: Client is notified of crossing the requested cool
+ *			temperature threshold.
+ * %ADC_TM_LOW_STATE: Client is notified of crossing the requested low
+ *			voltage threshold.
+ * %ADC_TM_WARM_STATE: Client is notified of crossing the requested high
+ *			temperature threshold.
+ */
+enum qpnp_tm_state {
+	ADC_TM_HIGH_STATE = 0,
+	ADC_TM_COOL_STATE = ADC_TM_HIGH_STATE,
+	ADC_TM_LOW_STATE,
+	ADC_TM_WARM_STATE = ADC_TM_LOW_STATE,
+	ADC_TM_STATE_NUM,
+};
+
+/**
+ * enum qpnp_state_request - Request to enable/disable the corresponding
+ *			high/low voltage/temperature thresholds.
+ * %ADC_TM_HIGH_THR_ENABLE: Enable high voltage threshold.
+ * %ADC_TM_COOL_THR_ENABLE = Enables cool temperature threshold.
+ * %ADC_TM_LOW_THR_ENABLE: Enable low voltage/temperature threshold.
+ * %ADC_TM_WARM_THR_ENABLE = Enables warm temperature threshold.
+ * %ADC_TM_HIGH_LOW_THR_ENABLE: Enable high and low voltage/temperature
+ *				threshold.
+ * %ADC_TM_HIGH_THR_DISABLE: Disable high voltage/temperature threshold.
+ * %ADC_TM_COOL_THR_ENABLE = Disables cool temperature threshold.
+ * %ADC_TM_LOW_THR_DISABLE: Disable low voltage/temperature threshold.
+ * %ADC_TM_WARM_THR_ENABLE = Disables warm temperature threshold.
+ * %ADC_TM_HIGH_THR_DISABLE: Disable high and low voltage/temperature
+ *				threshold.
+ */
+enum qpnp_state_request {
+	ADC_TM_HIGH_THR_ENABLE = 0,
+	ADC_TM_COOL_THR_ENABLE = ADC_TM_HIGH_THR_ENABLE,
+	ADC_TM_LOW_THR_ENABLE,
+	ADC_TM_WARM_THR_ENABLE = ADC_TM_LOW_THR_ENABLE,
+	ADC_TM_HIGH_LOW_THR_ENABLE,
+	ADC_TM_HIGH_THR_DISABLE,
+	ADC_TM_COOL_THR_DISABLE = ADC_TM_HIGH_THR_DISABLE,
+	ADC_TM_LOW_THR_DISABLE,
+	ADC_TM_WARM_THR_DISABLE = ADC_TM_LOW_THR_DISABLE,
+	ADC_TM_HIGH_LOW_THR_DISABLE,
+	ADC_TM_THR_NUM,
+};
+
+/**
+ * struct qpnp_adc_tm_btm_param - Represent Battery temperature threshold
+ *				monitoring configuration.
+ * @high_temp: High temperature threshold for which notification is requested.
+ * @low_temp: Low temperature threshold for which notification is requested.
+ * @high_thr_voltage: High voltage for which notification is requested.
+ * @low_thr_voltage: Low voltage for which notification is requested.
+ * @adc_tm_hc: Represents the refreshed BTM register design.
+ * @state_request: Enable/disable the corresponding high and low temperature
+ *		thresholds.
+ * @timer_interval1: Select polling rate from qpnp_adc_meas_timer_1 type.
+ * @timer_interval2: Select polling rate from qpnp_adc_meas_timer_2 type.
+ * @timer_interval3: Select polling rate from qpnp_adc_meas_timer_3 type.
+ * @btmid_ctx: A context of void type.
+ * @threshold_notification: Notification callback once threshold are crossed.
+ * units to be used for High/Low temperature and voltage notification -
+ * This depends on the clients usage. Check the rscaling function
+ * for the appropriate channel nodes.
+ * @Batt therm clients temperature units is decidegreesCentigrate.
+ * @USB_ID inputs the voltage units in milli-volts.
+ * @PA_THERM inputs the units in degC.
+ * @PMIC_THERM inputs the units in millidegC.
+ */
+struct qpnp_adc_tm_btm_param {
+	int32_t					high_temp;
+	int32_t					low_temp;
+	int32_t					high_thr;
+	int32_t					low_thr;
+	int32_t					gain_num;
+	int32_t					gain_den;
+	bool					adc_tm_hc;
+	enum qpnp_vadc_channels			channel;
+	enum qpnp_state_request			state_request;
+	enum qpnp_adc_meas_timer_1		timer_interval;
+	enum qpnp_adc_meas_timer_2		timer_interval2;
+	enum qpnp_adc_meas_timer_3		timer_interval3;
+	void					*btm_ctx;
+	void	(*threshold_notification)(enum qpnp_tm_state state,
+						void *ctx);
+};
+
+/**
+ * struct qpnp_vadc_linear_graph - Represent ADC characteristics.
+ * @dy: Numerator slope to calculate the gain.
+ * @dx: Denominator slope to calculate the gain.
+ * @adc_vref: A/D word of the voltage reference used for the channel.
+ * @adc_gnd: A/D word of the ground reference used for the channel.
+ *
+ * Each ADC device has different offset and gain parameters which are computed
+ * to calibrate the device.
+ */
+struct qpnp_vadc_linear_graph {
+	int64_t dy;
+	int64_t dx;
+	int64_t adc_vref;
+	int64_t adc_gnd;
+};
+
+/**
+ * struct qpnp_vadc_map_pt - Map the graph representation for ADC channel
+ * @x: Represent the ADC digitized code.
+ * @y: Represent the physical data which can be temperature, voltage,
+ *     resistance.
+ */
+struct qpnp_vadc_map_pt {
+	int32_t x;
+	int32_t y;
+};
+
+/**
+ * struct qpnp_vadc_scaling_ratio - Represent scaling ratio for adc input.
+ * @num: Numerator scaling parameter.
+ * @den: Denominator scaling parameter.
+ */
+struct qpnp_vadc_scaling_ratio {
+	int32_t num;
+	int32_t den;
+};
+
+/**
+ * struct qpnp_adc_properties - Represent the ADC properties.
+ * @adc_reference: Reference voltage for QPNP ADC.
+ * @bitresolution: ADC bit resolution for QPNP ADC.
+ * @biploar: Polarity for QPNP ADC.
+ * @adc_hc: Represents using HC variant of the ADC controller.
+ */
+struct qpnp_adc_properties {
+	uint32_t	adc_vdd_reference;
+	uint32_t	bitresolution;
+	bool		bipolar;
+	bool		adc_hc;
+};
+
+/**
+ * struct qpnp_vadc_chan_properties - Represent channel properties of the ADC.
+ * @offset_gain_numerator: The inverse numerator of the gain applied to the
+ *			   input channel.
+ * @offset_gain_denominator: The inverse denominator of the gain applied to the
+ *			     input channel.
+ * @high_thr: High threshold voltage that is requested to be set.
+ * @low_thr: Low threshold voltage that is requested to be set.
+ * @timer_select: Choosen from one of the 3 timers to set the polling rate for
+ *		  the VADC_BTM channel.
+ * @meas_interval1: Polling rate to set for timer 1.
+ * @meas_interval2: Polling rate to set for timer 2.
+ * @tm_channel_select: BTM channel number for the 5 VADC_BTM channels.
+ * @state_request: User can select either enable or disable high/low or both
+ * activation levels based on the qpnp_state_request type.
+ * @adc_graph: ADC graph for the channel of struct type qpnp_adc_linear_graph.
+ */
+struct qpnp_vadc_chan_properties {
+	uint32_t			offset_gain_numerator;
+	uint32_t			offset_gain_denominator;
+	uint32_t				high_thr;
+	uint32_t				low_thr;
+	enum qpnp_adc_meas_timer_select		timer_select;
+	enum qpnp_adc_meas_timer_1		meas_interval1;
+	enum qpnp_adc_meas_timer_2		meas_interval2;
+	enum qpnp_adc_tm_channel_select		tm_channel_select;
+	enum qpnp_state_request			state_request;
+	enum qpnp_adc_calib_type		calib_type;
+	struct qpnp_vadc_linear_graph	adc_graph[ADC_HC_CAL_SEL_NONE];
+};
+
+/**
+ * struct qpnp_vadc_result - Represent the result of the QPNP ADC.
+ * @chan: The channel number of the requested conversion.
+ * @adc_code: The pre-calibrated digital output of a given ADC relative to the
+ *	      the ADC reference.
+ * @measurement: In units specific for a given ADC; most ADC uses reference
+ *		 voltage but some ADC uses reference current. This measurement
+ *		 here is a number relative to a reference of a given ADC.
+ * @physical: The data meaningful for each individual channel whether it is
+ *	      voltage, current, temperature, etc.
+ *	      All voltage units are represented in micro - volts.
+ *	      -Battery temperature units are represented as 0.1 DegC.
+ *	      -PA Therm temperature units are represented as DegC.
+ *	      -PMIC Die temperature units are represented as 0.001 DegC.
+ */
+struct qpnp_vadc_result {
+	uint32_t	chan;
+	int32_t		adc_code;
+	int64_t		measurement;
+	int64_t		physical;
+};
+
+/**
+ * struct qpnp_adc_amux - AMUX properties for individual channel
+ * @name: Channel string name.
+ * @channel_num: Channel in integer used from qpnp_adc_channels.
+ * @chan_path_prescaling: Channel scaling performed on the input signal.
+ * @adc_decimation: Sampling rate desired for the channel.
+ * adc_scale_fn: Scaling function to convert to the data meaningful for
+ *		 each individual channel whether it is voltage, current,
+ *		 temperature, etc and compensates the channel properties.
+ */
+struct qpnp_adc_amux {
+	char					*name;
+	enum qpnp_vadc_channels			channel_num;
+	enum qpnp_adc_channel_scaling_param	chan_path_prescaling;
+	enum qpnp_adc_decimation_type		adc_decimation;
+	enum qpnp_adc_scale_fn_type		adc_scale_fn;
+	enum qpnp_adc_fast_avg_ctl		fast_avg_setup;
+	enum qpnp_adc_hw_settle_time		hw_settle_time;
+	enum qpnp_adc_calib_type		calib_type;
+	enum qpnp_adc_cal_val			cal_val;
+};
+
+/**
+ * struct qpnp_vadc_scaling_ratio
+ *
+ */
+static const struct qpnp_vadc_scaling_ratio qpnp_vadc_amux_scaling_ratio[] = {
+	{1, 1},
+	{1, 3},
+	{1, 4},
+	{1, 6},
+	{1, 20},
+	{1, 8},
+	{10, 81},
+	{1, 10}
+};
+
+/**
+ * struct qpnp_vadc_scale_fn - Scaling function prototype
+ * @chan: Function pointer to one of the scaling functions
+ *	which takes the adc properties, channel properties,
+ *	and returns the physical result
+ */
+struct qpnp_vadc_scale_fn {
+	int32_t (*chan)(struct qpnp_vadc_chip *, int32_t,
+		const struct qpnp_adc_properties *,
+		const struct qpnp_vadc_chan_properties *,
+		struct qpnp_vadc_result *);
+};
+
+/**
+ * struct qpnp_adc_tm_reverse_scale_fn - Reverse scaling prototype
+ * @chan: Function pointer to one of the scaling functions
+ *	which takes the adc properties, channel properties,
+ *	and returns the physical result
+ */
+struct qpnp_adc_tm_reverse_scale_fn {
+	int32_t (*chan)(struct qpnp_vadc_chip *,
+		struct qpnp_adc_tm_btm_param *,
+		uint32_t *, uint32_t *);
+};
+
+/**
+ * struct qpnp_vadc_rscale_fn - Scaling function prototype
+ * @chan: Function pointer to one of the scaling functions
+ *	which takes the adc properties, channel properties,
+ *	and returns the physical result
+ */
+struct qpnp_vadc_rscale_fn {
+	int32_t (*chan)(struct qpnp_vadc_chip *,
+		const struct qpnp_vadc_chan_properties *,
+		struct qpnp_adc_tm_btm_param *,
+		uint32_t *, uint32_t *);
+};
+
+/**
+ * struct qpnp_iadc_calib - IADC channel calibration structure.
+ * @channel - Channel for which the historical offset and gain is
+ *	      calculated. Available channels are internal rsense,
+ *	      external rsense and alternate lead pairs.
+ * @offset_raw - raw Offset value for the channel.
+ * @gain_raw - raw Gain of the channel.
+ * @ideal_offset_uv - ideal offset value for the channel.
+ * @ideal_gain_nv - ideal gain for the channel.
+ * @offset_uv - converted value of offset in uV.
+ * @gain_uv - converted value of gain in uV.
+ */
+struct qpnp_iadc_calib {
+	enum qpnp_iadc_channels		channel;
+	uint16_t			offset_raw;
+	uint16_t			gain_raw;
+	uint32_t			ideal_offset_uv;
+	uint32_t			ideal_gain_nv;
+	uint32_t			offset_uv;
+	uint32_t			gain_uv;
+};
+
+/**
+ * struct qpnp_iadc_result - IADC read result structure.
+ * @oresult_uv - Result of ADC in uV.
+ * @result_ua - Result of ADC in uA.
+ */
+struct qpnp_iadc_result {
+	int32_t				result_uv;
+	int32_t				result_ua;
+};
+
+/**
+ * struct qpnp_adc_drv - QPNP ADC device structure.
+ * @spmi - spmi device for ADC peripheral.
+ * @offset - base offset for the ADC peripheral.
+ * @adc_prop - ADC properties specific to the ADC peripheral.
+ * @amux_prop - AMUX properties representing the ADC peripheral.
+ * @adc_channels - ADC channel properties for the ADC peripheral.
+ * @adc_irq_eoc - End of Conversion IRQ.
+ * @adc_irq_fifo_not_empty - Conversion sequencer request written
+ *			to FIFO when not empty.
+ * @adc_irq_conv_seq_timeout - Conversion sequencer trigger timeout.
+ * @adc_high_thr_irq - Output higher than high threshold set for measurement.
+ * @adc_low_thr_irq - Output lower than low threshold set for measurement.
+ * @adc_lock - ADC lock for access to the peripheral.
+ * @adc_rslt_completion - ADC result notification after interrupt
+ *			  is received.
+ * @calib - Internal rsens calibration values for gain and offset.
+ */
+struct qpnp_adc_drv {
+	struct platform_device		*pdev;
+	struct regmap			*regmap;
+	uint8_t				slave;
+	uint16_t			offset;
+	struct qpnp_adc_properties	*adc_prop;
+	struct qpnp_adc_amux_properties	*amux_prop;
+	struct qpnp_adc_amux		*adc_channels;
+	int				adc_irq_eoc;
+	int				adc_irq_fifo_not_empty;
+	int				adc_irq_conv_seq_timeout;
+	int				adc_high_thr_irq;
+	int				adc_low_thr_irq;
+	struct mutex			adc_lock;
+	struct completion		adc_rslt_completion;
+	struct qpnp_iadc_calib		calib;
+	struct regulator		*hkadc_ldo;
+	struct regulator		*hkadc_ldo_ok;
+	bool				adc_hc;
+};
+
+/**
+ * struct qpnp_adc_amux_properties - QPNP VADC amux channel property.
+ * @amux_channel - Refer to the qpnp_vadc_channel list.
+ * @decimation - Sampling rate supported for the channel.
+ * @mode_sel - The basic mode of operation.
+ * @hw_settle_time - The time between AMUX being configured and the
+ *			start of conversion.
+ * @fast_avg_setup - Ability to provide single result from the ADC
+ *			that is an average of multiple measurements.
+ * @trigger_channel - HW trigger channel for conversion sequencer.
+ * @calib_type - Used to store the calibration type for the channel
+ *		 absolute/ratiometric.
+ * @cal_val - Used to determine if fresh calibration value or timer
+ *	      updated calibration value is to be used.
+ * @chan_prop - Represent the channel properties of the ADC.
+ */
+struct qpnp_adc_amux_properties {
+	uint32_t				amux_channel;
+	uint32_t				decimation;
+	uint32_t				mode_sel;
+	uint32_t				hw_settle_time;
+	uint32_t				fast_avg_setup;
+	enum qpnp_vadc_trigger			trigger_channel;
+	enum qpnp_adc_calib_type		calib_type;
+	enum qpnp_adc_cal_val			cal_val;
+	struct qpnp_vadc_chan_properties	chan_prop[0];
+};
+
+/* SW index's for PMIC type and version used by QPNP VADC and IADC */
+#define QPNP_REV_ID_8941_3_1	1
+#define QPNP_REV_ID_8026_1_0	2
+#define QPNP_REV_ID_8026_2_0	3
+#define QPNP_REV_ID_8110_1_0	4
+#define QPNP_REV_ID_8026_2_1	5
+#define QPNP_REV_ID_8110_2_0	6
+#define QPNP_REV_ID_8026_2_2	7
+#define QPNP_REV_ID_8941_3_0	8
+#define QPNP_REV_ID_8941_2_0	9
+#define QPNP_REV_ID_8916_1_0	10
+#define QPNP_REV_ID_8916_1_1	11
+#define QPNP_REV_ID_8916_2_0	12
+#define QPNP_REV_ID_8909_1_0	13
+#define QPNP_REV_ID_8909_1_1	14
+#define QPNP_REV_ID_PM8950_1_0	16
+
+/* Public API */
+#if defined(CONFIG_SENSORS_QPNP_ADC_VOLTAGE)				\
+			|| defined(CONFIG_SENSORS_QPNP_ADC_VOLTAGE_MODULE)
+/**
+ * qpnp_vadc_read() - Performs ADC read on the channel.
+ * @dev:	Structure device for qpnp vadc
+ * @channel:	Input channel to perform the ADC read.
+ * @result:	Structure pointer of type adc_chan_result
+ *		in which the ADC read results are stored.
+ */
+int32_t qpnp_vadc_read(struct qpnp_vadc_chip *dev,
+				enum qpnp_vadc_channels channel,
+				struct qpnp_vadc_result *result);
+
+/**
+ * qpnp_vadc_hc_read() - Performs ADC read on the channel.
+ *		It uses the refreshed VADC design from qpnp-vadc-hc.
+ * @dev:	Structure device for qpnp vadc
+ * @channel:	Input channel to perform the ADC read.
+ * @result:	Structure pointer of type adc_chan_result
+ *		in which the ADC read results are stored.
+ */
+int32_t qpnp_vadc_hc_read(struct qpnp_vadc_chip *dev,
+				enum qpnp_vadc_channels channel,
+				struct qpnp_vadc_result *result);
+
+/**
+ * qpnp_vadc_conv_seq_request() - Performs ADC read on the conversion
+ *				sequencer channel.
+ * @dev:	Structure device for qpnp vadc
+ * @channel:	Input channel to perform the ADC read.
+ * @result:	Structure pointer of type adc_chan_result
+ *		in which the ADC read results are stored.
+ */
+int32_t qpnp_vadc_conv_seq_request(struct qpnp_vadc_chip *dev,
+			enum qpnp_vadc_trigger trigger_channel,
+			enum qpnp_vadc_channels channel,
+			struct qpnp_vadc_result *result);
+
+/**
+ * qpnp_adc_get_devicetree_data() - Abstracts the ADC devicetree data.
+ * @spmi:	spmi ADC device.
+ * @adc_qpnp:	spmi device tree node structure
+ */
+int32_t qpnp_adc_get_devicetree_data(struct platform_device *pdev,
+					struct qpnp_adc_drv *adc_qpnp);
+
+/**
+ * qpnp_adc_scale_default() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the qpnp adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	Individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	Physical result to be stored.
+ */
+int32_t qpnp_adc_scale_default(struct qpnp_vadc_chip *dev,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_pmic_therm() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset. Performs the AMUX out as 2mV/K and returns
+ *		the temperature in milli degC.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the qpnp adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	Individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	Physical result to be stored.
+ */
+int32_t qpnp_adc_scale_pmic_therm(struct qpnp_vadc_chip *dev,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_pmi_chg_temp() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset. The voltage measured by HKADC is related to
+ *		the junction temperature according to
+ *		Tj = -137.67 degC * (V_adc * 2) + 382.04 degC
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the qpnp adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	Individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	Physical result to be stored.
+ */
+int32_t qpnp_adc_scale_pmi_chg_temp(struct qpnp_vadc_chip *dev,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_batt_therm() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset. Returns the temperature in decidegC.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	physical result to be stored.
+ */
+int32_t qpnp_adc_scale_batt_therm(struct qpnp_vadc_chip *dev,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_qrd_batt_therm() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset. Returns the temperature in decidegC.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	physical result to be stored.
+ */
+int32_t qpnp_adc_scale_qrd_batt_therm(struct qpnp_vadc_chip *dev,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_qrd_skuaa_batt_therm() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset. Returns the temperature in decidegC.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	physical result to be stored.
+ */
+int32_t qpnp_adc_scale_qrd_skuaa_batt_therm(struct qpnp_vadc_chip *dev,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_qrd_skug_batt_therm() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset. Returns the temperature in decidegC.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	physical result to be stored.
+ */
+int32_t qpnp_adc_scale_qrd_skug_batt_therm(struct qpnp_vadc_chip *dev,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_qrd_skuh_batt_therm() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset. Returns the temperature in decidegC.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	physical result to be stored.
+ */
+int32_t qpnp_adc_scale_qrd_skuh_batt_therm(struct qpnp_vadc_chip *dev,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_qrd_skut1_batt_therm() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset. Returns the temperature in decidegC.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	physical result to be stored.
+ */
+int32_t qpnp_adc_scale_qrd_skut1_batt_therm(struct qpnp_vadc_chip *dev,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_smb_batt_therm() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset. Returns the temperature in decidegC.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	physical result to be stored.
+ */
+int32_t qpnp_adc_scale_smb_batt_therm(struct qpnp_vadc_chip *dev,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_batt_id() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	physical result to be stored.
+ */
+int32_t qpnp_adc_scale_batt_id(struct qpnp_vadc_chip *dev, int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_tdkntcg_therm() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset. Returns the temperature of the xo therm in mili
+		degC.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	physical result to be stored.
+ */
+int32_t qpnp_adc_tdkntcg_therm(struct qpnp_vadc_chip *dev, int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_therm_pu1() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset. Returns the temperature of the therm in degC.
+ *		It uses a mapping table computed for a 150K pull-up.
+ *		Pull-up1 is an internal pull-up on the AMUX of 150K.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	physical result to be stored.
+ */
+int32_t qpnp_adc_scale_therm_pu1(struct qpnp_vadc_chip *dev, int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_therm_pu2() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset. Returns the temperature of the therm in degC.
+ *		It uses a mapping table computed for a 100K pull-up.
+ *		Pull-up2 is an internal pull-up on the AMUX of 100K.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	physical result to be stored.
+ */
+int32_t qpnp_adc_scale_therm_pu2(struct qpnp_vadc_chip *dev, int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_therm_ncp03() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset. Returns the temperature of the therm in degC.
+ *		It uses a mapping table computed for a NCP03WF683.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	physical result to be stored.
+ */
+int32_t qpnp_adc_scale_therm_ncp03(struct qpnp_vadc_chip *dev, int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_get_vadc() - Clients need to register with the vadc using the
+ *		corresponding device instance it wants to read the channels
+ *		from. Read the bindings document on how to pass the phandle
+ *		for the corresponding vadc driver to register with.
+ * @dev:	Clients device structure
+ * @name:	Corresponding client's DT parser name. Read the DT bindings
+ *		document on how to register with the vadc
+ * @struct qpnp_vadc_chip * - On success returns the vadc device structure
+ *		pointer that needs to be used during an ADC request.
+ */
+struct qpnp_vadc_chip *qpnp_get_vadc(struct device *dev, const char *name);
+/**
+ * qpnp_adc_tm_scaler() - Performs reverse calibration.
+ * @config:	Thermal monitoring configuration.
+ * @adc_prop:	adc properties of the qpnp adc such as bit resolution and
+ *		reference voltage.
+ * @chan_prop:	Individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ */
+static inline int32_t qpnp_adc_tm_scaler(struct qpnp_adc_tm_config *tm_config,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop)
+{ return -ENXIO; }
+/**
+ * qpnp_get_vadc_gain_and_offset() - Obtains the VADC gain and offset
+ *		for absolute and ratiometric calibration.
+ * @dev:	Structure device for qpnp vadc
+ * @param:	The result in which the ADC offset and gain values are stored.
+ * @type:	The calibration type whether client needs the absolute or
+ *		ratiometric gain and offset values.
+ */
+int32_t qpnp_get_vadc_gain_and_offset(struct qpnp_vadc_chip *dev,
+			struct qpnp_vadc_linear_graph *param,
+			enum qpnp_adc_calib_type calib_type);
+/**
+ * qpnp_adc_scale_millidegc_pmic_voltage_thr() - Performs reverse calibration
+ *		on the low/high temperature threshold values passed by the
+ *		client. The function coverts milldegC to voltage threshold
+ *		and accounts for the corresponding channels scaling as (2mV/K).
+ * @dev:	Structure device for qpnp vadc
+ * @param:	The input parameters that contain the low/high temperature
+ *		values.
+ * @low_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ * @high_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ */
+int32_t qpnp_adc_scale_millidegc_pmic_voltage_thr(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold);
+/**
+ * qpnp_adc_btm_scaler() - Performs reverse calibration on the low/high
+ *		temperature threshold values passed by the client.
+ *		The function maps the temperature to voltage and applies
+ *		ratiometric calibration on the voltage values.
+ * @dev:	Structure device for qpnp vadc
+ * @param:	The input parameters that contain the low/high temperature
+ *		values.
+ * @low_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ * @high_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ */
+int32_t qpnp_adc_btm_scaler(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold);
+
+/**
+ * qpnp_adc_qrd_skuh_btm_scaler() - Performs reverse calibration on the low/high
+ *		temperature threshold values passed by the client.
+ *		The function maps the temperature to voltage and applies
+ *		ratiometric calibration on the voltage values for SKUH board.
+ * @dev:	Structure device for qpnp vadc
+ * @param:	The input parameters that contain the low/high temperature
+ *		values.
+ * @low_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ * @high_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ */
+int32_t qpnp_adc_qrd_skuh_btm_scaler(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold);
+/**
+ * qpnp_adc_qrd_skut1_btm_scaler() - Performs reverse calibration on the low/high
+ *		temperature threshold values passed by the client.
+ *		The function maps the temperature to voltage and applies
+ *		ratiometric calibration on the voltage values for SKUT1 board.
+ * @dev:	Structure device for qpnp vadc
+ * @param:	The input parameters that contain the low/high temperature
+ *		values.
+ * @low_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ * @high_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ */
+int32_t qpnp_adc_qrd_skut1_btm_scaler(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold);
+/**
+ * qpnp_adc_tm_scale_therm_voltage_pu2() - Performs reverse calibration
+ *		and convert given temperature to voltage on supported
+ *		thermistor channels using 100k pull-up.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_prop:	adc properties of the qpnp adc such as bit resolution,
+ *		reference voltage.
+ * @param:	The input temperature values.
+ */
+int32_t qpnp_adc_tm_scale_therm_voltage_pu2(struct qpnp_vadc_chip *dev,
+		const struct qpnp_adc_properties *adc_properties,
+				struct qpnp_adc_tm_config *param);
+/**
+ * qpnp_adc_tm_scale_therm_voltage_pu2() - Performs reverse calibration
+ *		and converts the given ADC code to temperature for
+ *		thermistor channels using 100k pull-up.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_prop:	adc properties of the qpnp adc such as bit resolution,
+ *		reference voltage.
+ * @reg:	The input ADC code.
+ * @result:	The physical measurement temperature on the thermistor.
+ */
+int32_t qpnp_adc_tm_scale_voltage_therm_pu2(struct qpnp_vadc_chip *dev,
+			const struct qpnp_adc_properties *adc_prop,
+				uint32_t reg, int64_t *result);
+/**
+ * qpnp_adc_usb_scaler() - Performs reverse calibration on the low/high
+ *		voltage threshold values passed by the client.
+ *		The function applies ratiometric calibration on the
+ *		voltage values.
+ * @dev:	Structure device for qpnp vadc
+ * @param:	The input parameters that contain the low/high voltage
+ *		threshold values.
+ * @low_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ * @high_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ */
+int32_t qpnp_adc_usb_scaler(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold);
+/**
+ * qpnp_adc_vbatt_rscaler() - Performs reverse calibration on the low/high
+ *		voltage threshold values passed by the client.
+ *		The function applies ratiometric calibration on the
+ *		voltage values.
+ * @dev:	Structure device for qpnp vadc
+ * @param:	The input parameters that contain the low/high voltage
+ *		threshold values.
+ * @low_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ * @high_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ */
+int32_t qpnp_adc_vbatt_rscaler(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold);
+/**
+ * qpnp_vadc_absolute_rthr() - Performs reverse calibration on the low/high
+ *		voltage threshold values passed by the client.
+ *		The function applies absolute calibration on the
+ *		voltage values.
+ * @dev:	Structure device for qpnp vadc
+ * @chan_prop:	Individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @param:	The input parameters that contain the low/high voltage
+ *		threshold values.
+ * @low_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ * @high_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ */
+int32_t qpnp_vadc_absolute_rthr(struct qpnp_vadc_chip *dev,
+		const struct qpnp_vadc_chan_properties *chan_prop,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold);
+/**
+ * qpnp_adc_absolute_rthr() - Performs reverse calibration on the low/high
+ *		voltage threshold values passed by the client.
+ *		The function applies absolute calibration on the
+ *		voltage values.
+ * @dev:	Structure device for qpnp vadc
+ * @param:	The input parameters that contain the low/high voltage
+ *		threshold values.
+ * @low_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ * @high_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ */
+int32_t qpnp_adc_absolute_rthr(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold);
+/**
+ * qpnp_adc_smb_btm_rscaler() - Performs reverse calibration on the low/high
+ *		temperature threshold values passed by the client.
+ *		The function maps the temperature to voltage and applies
+ *		ratiometric calibration on the voltage values.
+ * @dev:	Structure device for qpnp vadc
+ * @param:	The input parameters that contain the low/high temperature
+ *		values.
+ * @low_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ * @high_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ */
+int32_t qpnp_adc_smb_btm_rscaler(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold);
+/**
+ * qpnp_vadc_iadc_sync_request() - Performs Voltage ADC read and
+ *		locks the peripheral. When performing simultaneous
+ *		voltage and current request the VADC peripheral is
+ *		prepared for conversion and the IADC sync conversion
+ *		is done from the IADC peripheral.
+ * @dev:	Structure device for qpnp vadc
+ * @channel:	Input channel to perform the voltage ADC read.
+ */
+int32_t qpnp_vadc_iadc_sync_request(struct qpnp_vadc_chip *dev,
+				enum qpnp_vadc_channels channel);
+/**
+ * qpnp_vadc_iadc_sync_complete_request() - Reads the ADC result and
+ *		unlocks the peripheral.
+ * @dev:	Structure device for qpnp vadc
+ * @result:	Structure pointer of type adc_chan_result
+ *		in which the ADC read results are stored.
+ */
+int32_t qpnp_vadc_iadc_sync_complete_request(struct qpnp_vadc_chip *dev,
+	enum qpnp_vadc_channels channel, struct qpnp_vadc_result *result);
+/**
+ * qpnp_vadc_sns_comp_result() - Compensate vbatt readings based on temperature
+ * @dev:	Structure device for qpnp vadc
+ * @result:	Voltage in uV that needs compensation.
+ * @is_pon_ocv: Whether the reading is from a power on OCV or not
+ */
+int32_t qpnp_vbat_sns_comp_result(struct qpnp_vadc_chip *dev,
+					int64_t *result, bool is_pon_ocv);
+/**
+ * qpnp_adc_get_revid_version() - Obtain the PMIC number and revision.
+ * @dev:	Structure device node.
+ * returns internal mapped PMIC number and revision id.
+ */
+int qpnp_adc_get_revid_version(struct device *dev);
+/**
+ * qpnp_vadc_channel_monitor() - Configures kernel clients a channel to
+ *		monitor the corresponding ADC channel for threshold detection.
+ *		Driver passes the high/low voltage threshold along
+ *		with the notification callback once the set thresholds
+ *		are crossed.
+ * @param:	Structure pointer of qpnp_adc_tm_btm_param type.
+ *		Clients pass the low/high temperature along with the threshold
+ *		notification callback.
+ */
+int32_t qpnp_vadc_channel_monitor(struct qpnp_vadc_chip *chip,
+					struct qpnp_adc_tm_btm_param *param);
+/**
+ * qpnp_vadc_end_channel_monitor() - Disables recurring measurement mode for
+ *		VADC_USR and disables the bank.
+ * @param:	device instance for the VADC
+ */
+int32_t qpnp_vadc_end_channel_monitor(struct qpnp_vadc_chip *chip);
+/**
+ * qpnp_vadc_calib_vref() - Read calibration channel REF_125V/VDD_VADC
+ * @dev:	Structure device for qpnp vadc
+ * @calib_type:	absolute or ratiometric calib type.
+ * returns calibration channel adc code.
+ */
+int32_t qpnp_vadc_calib_vref(struct qpnp_vadc_chip *vadc,
+				enum qpnp_adc_calib_type calib_type,
+				int *calib_data);
+/**
+ * qpnp_vadc_calib_gnd() - Read calibration channel REF_625MV/GND_REF
+ * @dev:	Structure device for qpnp vadc
+ * @calib_type:	absolute or ratiometric calib type.
+ * returns calibration channel adc code.
+ */
+int32_t qpnp_vadc_calib_gnd(struct qpnp_vadc_chip *vadc,
+				enum qpnp_adc_calib_type calib_type,
+				int *calib_data);
+
+/**
+ * qpnp_adc_enable_voltage() - Enable LDO for HKADC
+ * @dev:	Structure device for qpnp vadc
+ * returns result of enabling the regulator interface.
+ */
+int32_t qpnp_adc_enable_voltage(struct qpnp_adc_drv *adc);
+
+/**
+ * qpnp_adc_disable_voltage() - Disable vote for HKADC LDO
+ * @dev:	Structure device for qpnp vadc
+ */
+void qpnp_adc_disable_voltage(struct qpnp_adc_drv *adc);
+
+/**
+ * qpnp_adc_free_voltage_resource() - Puts HKADC LDO
+ * @dev:	Structure device for qpnp vadc
+ */
+void qpnp_adc_free_voltage_resource(struct qpnp_adc_drv *adc);
+
+#else
+static inline int32_t qpnp_vadc_read(struct qpnp_vadc_chip *dev,
+				uint32_t channel,
+				struct qpnp_vadc_result *result)
+{ return -ENXIO; }
+static inline int32_t qpnp_vadc_hc_read(struct qpnp_vadc_chip *dev,
+				uint32_t channel,
+				struct qpnp_vadc_result *result)
+{ return -ENXIO; }
+static inline int32_t qpnp_vadc_conv_seq_request(struct qpnp_vadc_chip *dev,
+			enum qpnp_vadc_trigger trigger_channel,
+			enum qpnp_vadc_channels channel,
+			struct qpnp_vadc_result *result)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_default(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_pmic_therm(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_pmi_chg_temp(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_batt_therm(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_qrd_batt_therm(
+			struct qpnp_vadc_chip *vadc, int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_qrd_skuaa_batt_therm(
+			struct qpnp_vadc_chip *vadc, int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_qrd_skug_batt_therm(
+			struct qpnp_vadc_chip *vadc, int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_qrd_skuh_batt_therm(
+			struct qpnp_vadc_chip *vdev, int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_qrd_skut1_batt_therm(
+			struct qpnp_vadc_chip *vdev, int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_smb_batt_therm(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_batt_id(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_tdkntcg_therm(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_therm_pu1(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_therm_pu2(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_therm_ncp03(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline struct qpnp_vadc_chip *qpnp_get_vadc(struct device *dev,
+							const char *name)
+{ return ERR_PTR(-ENXIO); }
+static inline int32_t qpnp_get_vadc_gain_and_offset(struct qpnp_vadc_chip *dev,
+			struct qpnp_vadc_linear_graph *param,
+			enum qpnp_adc_calib_type calib_type)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_usb_scaler(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_vbatt_rscaler(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{ return -ENXIO; }
+static inline int32_t qpnp_vadc_absolute_rthr(struct qpnp_vadc_chip *dev,
+		const struct qpnp_vadc_chan_properties *chan_prop,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_absolute_rthr(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_btm_scaler(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_qrd_skuh_btm_scaler(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_qrd_skut1_btm_scaler(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_millidegc_pmic_voltage_thr(
+		struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_tm_scale_therm_voltage_pu2(
+				struct qpnp_vadc_chip *dev,
+			const struct qpnp_adc_properties *adc_properties,
+				struct qpnp_adc_tm_config *param)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_tm_scale_voltage_therm_pu2(
+				struct qpnp_vadc_chip *dev,
+			const struct qpnp_adc_properties *adc_prop,
+			uint32_t reg, int64_t *result)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_smb_btm_rscaler(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{ return -ENXIO; }
+static inline int32_t qpnp_vadc_iadc_sync_request(struct qpnp_vadc_chip *dev,
+				enum qpnp_vadc_channels channel)
+{ return -ENXIO; }
+static inline int32_t qpnp_vadc_iadc_sync_complete_request(
+				struct qpnp_vadc_chip *dev,
+				enum qpnp_vadc_channels channel,
+				struct qpnp_vadc_result *result)
+{ return -ENXIO; }
+static inline int32_t qpnp_vbat_sns_comp_result(struct qpnp_vadc_chip *dev,
+						int64_t *result)
+{ return -ENXIO; }
+static inline int qpnp_adc_get_revid_version(struct device *dev)
+{ return -ENXIO; }
+static inline int32_t qpnp_vadc_channel_monitor(struct qpnp_vadc_chip *chip,
+					struct qpnp_adc_tm_btm_param *param)
+{ return -ENXIO; }
+static inline int32_t qpnp_vadc_end_channel_monitor(struct qpnp_vadc_chip *chip)
+{ return -ENXIO; }
+static inline int32_t qpnp_vadc_calib_vref(struct qpnp_vadc_chip *vadc,
+					enum qpnp_adc_calib_type calib_type,
+					int *calib_data)
+{ return -ENXIO; }
+static inline int32_t qpnp_vadc_calib_gnd(struct qpnp_vadc_chip *vadc,
+					enum qpnp_adc_calib_type calib_type,
+					int *calib_data)
+{ return -ENXIO; }
+
+static inline int32_t qpnp_adc_enable_voltage(struct qpnp_adc_drv *adc)
+{ return -ENXIO; }
+
+static inline void qpnp_adc_disable_voltage(struct qpnp_adc_drv *adc)
+{ return; }
+
+static inline void qpnp_adc_free_voltage_resource(struct qpnp_adc_drv *adc)
+{ return; }
+
+static inline int32_t qpnp_adc_get_devicetree_data(
+		struct platform_device *pdev, struct qpnp_adc_drv *adc_qpnp)
+{ return -ENXIO; }
+
+#endif
+
+/* Public API */
+#if defined(CONFIG_SENSORS_QPNP_ADC_CURRENT)				\
+			|| defined(CONFIG_SENSORS_QPNP_ADC_CURRENT_MODULE)
+/**
+ * qpnp_iadc_read() - Performs ADC read on the current channel.
+ * @dev:	Structure device for qpnp iadc
+ * @channel:	Input channel to perform the ADC read.
+ * @result:	Current across rsense in mA.
+ * @return:	0 on success.
+ */
+int32_t qpnp_iadc_read(struct qpnp_iadc_chip *dev,
+				enum qpnp_iadc_channels channel,
+				struct qpnp_iadc_result *result);
+/**
+ * qpnp_iadc_get_rsense() - Reads the RDS resistance value from the
+			trim registers.
+ * @dev:	Structure device for qpnp iadc
+ * @rsense:	RDS resistance in nOhms.
+ * @return:	0 on success.
+ */
+int32_t qpnp_iadc_get_rsense(struct qpnp_iadc_chip *dev, int32_t *rsense);
+/**
+ * qpnp_iadc_get_gain_and_offset() - Performs gain calibration
+ *				over 17.8571mV and offset over selected
+ *				channel. Channel can be internal rsense,
+ *				external rsense and alternate lead pair.
+ * @dev:	Structure device for qpnp iadc
+ * @result:	result structure where the gain and offset is stored of
+ *		type qpnp_iadc_calib.
+ * @return:	0 on success.
+ */
+int32_t qpnp_iadc_get_gain_and_offset(struct qpnp_iadc_chip *dev,
+					struct qpnp_iadc_calib *result);
+/**
+ * qpnp_get_iadc() - Clients need to register with the iadc with the
+ *		corresponding device instance it wants to read the channels.
+ *		Read the bindings document on how to pass the phandle for
+ *		the corresponding vadc driver to register with.
+ * @dev:	Clients device structure
+ * @name:	Corresponding client's DT parser name. Read the DT bindings
+ *		document on how to register with the iadc
+ * @struct qpnp_iadc_chip * - On success returns the iadc device structure
+ *		pointer used everytime client makes an ADC request.
+ */
+struct qpnp_iadc_chip *qpnp_get_iadc(struct device *dev, const char *name);
+/**
+ * qpnp_iadc_vadc_sync_read() - Performs synchronous VADC and IADC read.
+ *		The api is to be used only by the BMS to perform
+ *		simultaneous VADC and IADC measurement for battery voltage
+ *		and current.
+ * @dev:	Structure device for qpnp iadc
+ * @i_channel:	Input battery current channel to perform the IADC read.
+ * @i_result:	Current across the rsense in mA.
+ * @v_channel:	Input battery voltage channel to perform VADC read.
+ * @v_result:	Voltage on the vbatt channel with units in mV.
+ * @return:	0 on success.
+ */
+int32_t qpnp_iadc_vadc_sync_read(struct qpnp_iadc_chip *dev,
+	enum qpnp_iadc_channels i_channel, struct qpnp_iadc_result *i_result,
+	enum qpnp_vadc_channels v_channel, struct qpnp_vadc_result *v_result);
+/**
+ * qpnp_iadc_calibrate_for_trim - Clients can use this API to re-calibrate
+ *		IADC. The offset and gain values are programmed in the trim
+ *		registers. The offset and the gain can be retrieved using
+ *		qpnp_iadc_get_gain_and_offset
+ * @dev:	Structure device for qpnp iadc
+ * @batfet_closed: batfet is opened or closed. The IADC chooses proper
+ *			channel (internal/external) based on batfet status
+ *			for calibration.
+ * RETURNS:	0 on success.
+ */
+int32_t qpnp_iadc_calibrate_for_trim(struct qpnp_iadc_chip *dev,
+						bool batfet_closed);
+/**
+ * qpnp_iadc_comp_result() - Compensates the result of the current based on
+ *		the gain and offset co-effients and rsense parameters.
+ * @dev:	Structure device for qpnp iadc
+ * @result:	Current value to perform the compensation.
+ * @return:	0 on success.
+ */
+int32_t qpnp_iadc_comp_result(struct qpnp_iadc_chip *dev, int64_t *result);
+/**
+ * qpnp_iadc_skip_calibration() - Clients can use this API to ask the driver
+ *				to skip iadc calibrations
+ * @dev:	Structure device for qpnp iadc
+ * @result:	0 on success and -EPROBE_DEFER when probe for the device
+ *		has not occured.
+ */
+int qpnp_iadc_skip_calibration(struct qpnp_iadc_chip *dev);
+/**
+ * qpnp_iadc_resume_calibration() - Clients can use this API to ask the driver
+ *				to resume iadc calibrations
+ * @dev:	Structure device for qpnp iadc
+ * @result:	0 on success and -EPROBE_DEFER when probe for the device
+ *		has not occured.
+ */
+int qpnp_iadc_resume_calibration(struct qpnp_iadc_chip *dev);
+#else
+static inline int32_t qpnp_iadc_read(struct qpnp_iadc_chip *iadc,
+	enum qpnp_iadc_channels channel, struct qpnp_iadc_result *result)
+{ return -ENXIO; }
+static inline int32_t qpnp_iadc_get_rsense(struct qpnp_iadc_chip *iadc,
+							int32_t *rsense)
+{ return -ENXIO; }
+static inline int32_t qpnp_iadc_get_gain_and_offset(struct qpnp_iadc_chip *iadc,
+				struct qpnp_iadc_calib *result)
+{ return -ENXIO; }
+static inline struct qpnp_iadc_chip *qpnp_get_iadc(struct device *dev,
+							const char *name)
+{ return ERR_PTR(-ENXIO); }
+static inline int32_t qpnp_iadc_vadc_sync_read(struct qpnp_iadc_chip *iadc,
+	enum qpnp_iadc_channels i_channel, struct qpnp_iadc_result *i_result,
+	enum qpnp_vadc_channels v_channel, struct qpnp_vadc_result *v_result)
+{ return -ENXIO; }
+static inline int32_t qpnp_iadc_calibrate_for_trim(struct qpnp_iadc_chip *iadc,
+							bool batfet_closed)
+{ return -ENXIO; }
+static inline int32_t qpnp_iadc_comp_result(struct qpnp_iadc_chip *iadc,
+						int64_t *result)
+{ return -ENXIO; }
+static inline int qpnp_iadc_skip_calibration(struct qpnp_iadc_chip *iadc)
+{ return -ENXIO; }
+static inline int qpnp_iadc_resume_calibration(struct qpnp_iadc_chip *iadc)
+{ return -ENXIO; }
+#endif
+
+/* Public API */
+#if defined(CONFIG_THERMAL_QPNP_ADC_TM)				\
+			|| defined(CONFIG_THERMAL_QPNP_ADC_TM_MODULE)
+/**
+ * qpnp_adc_tm_usbid_configure() - Configures Channel 0 of VADC_BTM to
+ *		monitor USB_ID channel using 100k internal pull-up.
+ *		USB driver passes the high/low voltage threshold along
+ *		with the notification callback once the set thresholds
+ *		are crossed.
+ * @param:	Structure pointer of qpnp_adc_tm_usbid_param type.
+ *		Clients pass the low/high voltage along with the threshold
+ *		notification callback.
+ */
+int32_t qpnp_adc_tm_usbid_configure(struct qpnp_adc_tm_chip *chip,
+					struct qpnp_adc_tm_btm_param *param);
+/**
+ * qpnp_adc_tm_usbid_end() - Disables the monitoring of channel 0 thats
+ *		assigned for monitoring USB_ID. Disables the low/high
+ *		threshold activation for channel 0 as well.
+ * @param:	none.
+ */
+int32_t qpnp_adc_tm_usbid_end(struct qpnp_adc_tm_chip *chip);
+/**
+ * qpnp_adc_tm_channel_measure() - Configures kernel clients a channel to
+ *		monitor the corresponding ADC channel for threshold detection.
+ *		Driver passes the high/low voltage threshold along
+ *		with the notification callback once the set thresholds
+ *		are crossed.
+ * @param:	Structure pointer of qpnp_adc_tm_btm_param type.
+ *		Clients pass the low/high temperature along with the threshold
+ *		notification callback.
+ */
+int32_t qpnp_adc_tm_channel_measure(struct qpnp_adc_tm_chip *chip,
+					struct qpnp_adc_tm_btm_param *param);
+/**
+ * qpnp_adc_tm_disable_chan_meas() - Disables the monitoring of channel thats
+ *		assigned for monitoring kernel clients. Disables the low/high
+ *		threshold activation for the corresponding channel.
+ * @param:	Structure pointer of qpnp_adc_tm_btm_param type.
+ *		This is used to identify the channel for which the corresponding
+ *		channels high/low threshold notification will be disabled.
+ */
+int32_t qpnp_adc_tm_disable_chan_meas(struct qpnp_adc_tm_chip *chip,
+					struct qpnp_adc_tm_btm_param *param);
+/**
+ * qpnp_get_adc_tm() - Clients need to register with the adc_tm using the
+ *		corresponding device instance it wants to read the channels
+ *		from. Read the bindings document on how to pass the phandle
+ *		for the corresponding adc_tm driver to register with.
+ * @name:	Corresponding client's DT parser name. Read the DT bindings
+ *		document on how to register with the vadc
+ * @struct qpnp_adc_tm_chip * - On success returns the vadc device structure
+ *		pointer that needs to be used during an ADC TM request.
+ */
+struct qpnp_adc_tm_chip *qpnp_get_adc_tm(struct device *dev, const char *name);
+#else
+static inline int32_t qpnp_adc_tm_usbid_configure(
+			struct qpnp_adc_tm_chip *chip,
+			struct qpnp_adc_tm_btm_param *param)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_tm_usbid_end(struct qpnp_adc_tm_chip *chip)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_tm_channel_measure(
+					struct qpnp_adc_tm_chip *chip,
+					struct qpnp_adc_tm_btm_param *param)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_tm_disable_chan_meas(
+					struct qpnp_adc_tm_chip *chip,
+					struct qpnp_adc_tm_btm_param *param)
+{ return -ENXIO; }
+static inline struct qpnp_adc_tm_chip *qpnp_get_adc_tm(struct device *dev,
+							const char *name)
+{ return ERR_PTR(-ENXIO); }
+#endif
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/linux/qpnp./qpnp-haptic.h linux-4.4.115-fbx/include/linux/qpnp/qpnp-haptic.h
--- linux-4.4.115-fbx/include/linux/qpnp./qpnp-haptic.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/qpnp/qpnp-haptic.h	2019-01-22 16:16:28.367290453 +0100
@@ -0,0 +1,24 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __QPNP_HAPTIC_H
+#define __QPNP_HAPTIC_H
+
+/* interface for the other module to play different sequences */
+#ifdef CONFIG_QPNP_HAPTIC
+int qpnp_hap_play_byte(u8 data, bool on);
+#else
+static inline int qpnp_hap_play_byte(u8 data, bool on)
+{
+	return 0;
+}
+#endif
+#endif
diff -Nruw linux-4.4.115-fbx/include/linux/qpnp./qpnp-revid.h linux-4.4.115-fbx/include/linux/qpnp/qpnp-revid.h
--- linux-4.4.115-fbx/include/linux/qpnp./qpnp-revid.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/qpnp/qpnp-revid.h	2019-01-22 16:16:28.367290453 +0100
@@ -0,0 +1,267 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QPNP_REVID
+#define __QPNP_REVID
+
+/* Common TYPE for all PMICs */
+#define PMIC_TYPE		0x51
+
+/* PM8994 */
+#define PM8941_SUBTYPE		0x01
+
+#define PM8941_V1P0_REV1	0x00
+#define PM8941_V1P0_REV2	0x00
+#define PM8941_V1P0_REV3	0x00
+#define PM8941_V1P0_REV4	0x01
+
+#define PM8941_V2P0_REV1	0x00
+#define PM8941_V2P0_REV2	0x00
+#define PM8941_V2P0_REV3	0x00
+#define PM8941_V2P0_REV4	0x01
+
+#define PM8941_V3P0_REV1	0x00
+#define PM8941_V3P0_REV2	0x00
+#define PM8941_V3P0_REV3	0x00
+#define PM8941_V3P0_REV4	0x03
+
+#define PM8941_V3P1_REV1	0x00
+#define PM8941_V3P1_REV2	0x00
+#define PM8941_V3P1_REV3	0x01
+#define PM8941_V3P1_REV4	0x03
+
+/* PM8841 */
+#define PM8841_SUBTYPE		0x02
+
+/* PM8019 */
+#define PM8019_SUBTYPE		0x03
+
+/* PM8226 */
+#define PM8226_SUBTYPE		0x04
+
+#define PM8226_V2P2_REV1	0x00
+#define PM8226_V2P2_REV2	0x00
+#define PM8226_V2P2_REV3	0x02
+#define PM8226_V2P2_REV4	0x02
+
+#define PM8226_V2P1_REV1	0x00
+#define PM8226_V2P1_REV2	0x00
+#define PM8226_V2P1_REV3	0x01
+#define PM8226_V2P1_REV4	0x02
+
+#define PM8226_V2P0_REV1	0x00
+#define PM8226_V2P0_REV2	0x00
+#define PM8226_V2P0_REV3	0x00
+#define PM8226_V2P0_REV4	0x02
+
+#define PM8226_V1P0_REV1	0x00
+#define PM8226_V1P0_REV2	0x00
+#define PM8226_V1P0_REV3	0x00
+#define PM8226_V1P0_REV4	0x00
+
+/* PM8110 */
+#define PM8110_SUBTYPE		0x05
+
+#define PM8110_V1P0_REV1	0x00
+#define PM8110_V1P0_REV2	0x00
+#define PM8110_V1P0_REV3	0x00
+#define PM8110_V1P0_REV4	0x01
+
+#define PM8110_V1P1_REV1	0x00
+#define PM8110_V1P1_REV2	0x01
+#define PM8110_V1P1_REV3	0x00
+#define PM8110_V1P1_REV4	0x01
+
+#define PM8110_V1P3_REV1	0x00
+#define PM8110_V1P3_REV2	0x03
+#define PM8110_V1P3_REV3	0x00
+#define PM8110_V1P3_REV4	0x01
+
+#define PM8110_V2P0_REV1	0x00
+#define PM8110_V2P0_REV2	0x00
+#define PM8110_V2P0_REV3	0x00
+#define PM8110_V2P0_REV4	0x02
+
+/* PMA8084 */
+#define PMA8084_SUBTYPE		0x06
+
+/* PMI8962 */
+#define PMI8962_SUBTYPE		0x07
+
+/* PMD9635 */
+#define PMD9635_SUBTYPE		0x08
+/* PM8994 */
+#define PM8994_SUBTYPE		0x09
+
+/* PMI8994 */
+#define PMI8994_TYPE		0x51
+#define PMI8994_SUBTYPE		0x0A
+
+#define PMI8994_V1P0_REV1	0x00
+#define PMI8994_V1P0_REV2	0x00
+#define PMI8994_V1P0_REV3	0x00
+#define PMI8994_V1P0_REV4	0x01
+
+#define PMI8994_V2P0_REV1	0x00
+#define PMI8994_V2P0_REV2	0x00
+#define PMI8994_V2P0_REV3	0x00
+#define PMI8994_V2P0_REV4	0x02
+
+/* PM8916 */
+#define PM8916_SUBTYPE		0x0B
+
+#define PM8916_V1P0_REV1	0x00
+#define PM8916_V1P0_REV2	0x00
+#define PM8916_V1P0_REV3	0x00
+#define PM8916_V1P0_REV4	0x01
+
+#define PM8916_V1P1_REV1	0x00
+#define PM8916_V1P1_REV2	0x00
+#define PM8916_V1P1_REV3	0x01
+#define PM8916_V1P1_REV4	0x01
+
+#define PM8916_V2P0_REV1	0x00
+#define PM8916_V2P0_REV2	0x00
+#define PM8916_V2P0_REV3	0x00
+#define PM8916_V2P0_REV4	0x02
+
+/* PM8004 */
+#define PM8004_SUBTYPE		0x0C
+
+/* PM8909 */
+#define PM8909_SUBTYPE		0x0D
+
+#define PM8909_V1P0_REV1	0x00
+#define PM8909_V1P0_REV2	0x00
+#define PM8909_V1P0_REV3	0x00
+#define PM8909_V1P0_REV4	0x01
+
+#define PM8909_V1P1_REV1	0x00
+#define PM8909_V1P1_REV2	0x00
+#define PM8909_V1P1_REV3	0x01
+#define PM8909_V1P1_REV4	0x01
+
+/* PM2433 */
+#define PM2433_SUBTYPE		0x0E
+
+/* PMD9655 */
+#define PMD9655_SUBTYPE		0x0F
+
+/* PM8950 */
+#define PM8950_SUBTYPE		0x10
+#define PM8950_V1P0_REV4	0x01
+
+#define PM8950_V2P0_REV4	0x02
+
+/* PMI8950 */
+#define PMI8950_SUBTYPE		0x11
+
+/* PMK8001 */
+#define PMK8001_SUBTYPE		0x12
+
+/* PMI8996 */
+#define PMI8996_SUBTYPE		0x13
+
+/* PM8998 */
+#define PM8998_SUBTYPE	0x14
+
+/* PMI8998 */
+#define PMI8998_SUBTYPE	0x15
+
+/* PM660 */
+#define PM660L_SUBTYPE	0x1A
+#define PM660_SUBTYPE	0x1B
+
+/* PMI8998 REV_ID */
+#define PMI8998_V1P0_REV1	0x00
+#define PMI8998_V1P0_REV2	0x00
+#define PMI8998_V1P0_REV3	0x00
+#define PMI8998_V1P0_REV4	0x01
+
+#define PMI8998_V1P1_REV1	0x00
+#define PMI8998_V1P1_REV2	0x00
+#define PMI8998_V1P1_REV3	0x01
+#define PMI8998_V1P1_REV4	0x01
+
+#define PMI8998_V2P0_REV1	0x00
+#define PMI8998_V2P0_REV2	0x00
+#define PMI8998_V2P0_REV3	0x00
+#define PMI8998_V2P0_REV4	0x02
+
+/* PM660 REV_ID */
+#define PM660_V1P0_REV1		0x00
+#define PM660_V1P0_REV2		0x00
+#define PM660_V1P0_REV3		0x00
+#define PM660_V1P0_REV4		0x01
+
+#define PM660_V1P1_REV1		0x00
+#define PM660_V1P1_REV2		0x00
+#define PM660_V1P1_REV3		0x01
+#define PM660_V1P1_REV4		0x01
+
+/* PM660L REV_ID */
+#define PM660L_V1P1_REV1	0x00
+#define PM660L_V1P1_REV2	0x00
+#define PM660L_V1P1_REV3	0x01
+#define PM660L_V1P1_REV4	0x01
+
+#define PM660L_V2P0_REV1	0x00
+#define PM660L_V2P0_REV2	0x00
+#define PM660L_V2P0_REV3	0x00
+#define PM660L_V2P0_REV4	0x02
+
+/* PMI8998 FAB_ID */
+#define PMI8998_FAB_ID_SMIC	0x11
+#define PMI8998_FAB_ID_GF	0x30
+
+/* PM660 FAB_ID */
+#define PM660_FAB_ID_GF		0x0
+#define PM660_FAB_ID_TSMC	0x2
+#define PM660_FAB_ID_MX		0x3
+
+/* PM8005 */
+#define PM8005_SUBTYPE		0x18
+
+/* PM8937 */
+#define PM8937_SUBTYPE		0x19
+
+/* PMI8937 */
+#define PMI8937_SUBTYPE		0x37
+
+/* SMB1381 */
+#define SMB1381_SUBTYPE		0x17
+
+/* SMB1355 */
+#define SMB1355_SUBTYPE		0x1C
+
+struct pmic_revid_data {
+	u8		rev1;
+	u8		rev2;
+	u8		rev3;
+	u8		rev4;
+	u8		pmic_type;
+	u8		pmic_subtype;
+	const char	*pmic_name;
+	int		fab_id;
+	int		tp_rev;
+};
+
+#ifdef CONFIG_QPNP_REVID
+struct pmic_revid_data *get_revid_data(struct device_node *dev_node);
+#else
+static inline
+struct pmic_revid_data *get_revid_data(struct device_node *dev_node)
+{
+	return NULL;
+}
+#endif
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/qpnp-misc.h	2019-01-22 16:16:28.363290417 +0100
@@ -0,0 +1,56 @@
+/* Copyright (c) 2013-2014, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QPNP_MISC_H
+#define __QPNP_MISC_H
+
+#include <linux/errno.h>
+
+#ifdef CONFIG_QPNP_MISC
+/**
+ * qpnp_misc_irqs_available - check if IRQs are available
+ *
+ * @consumer_dev: device struct
+ *
+ * This function returns true if the MISC interrupts are available
+ * based on a check in the MISC peripheral revision registers.
+ *
+ * Any consumer of this function needs to reference a MISC device phandle
+ * using the "qcom,misc-ref" property in their device tree node.
+ */
+
+int qpnp_misc_irqs_available(struct device *consumer_dev);
+
+/**
+ * qpnp_misc_read_reg - read register from misc device
+ *
+ * @node: device node pointer
+ * @address: address offset in misc peripheral to be read
+ * @val: data read from register
+ *
+ * This function returns zero if reading the MISC register succeeds.
+ *
+ */
+
+int qpnp_misc_read_reg(struct device_node *node, u16 addr, u8 *val);
+#else
+static inline int qpnp_misc_irqs_available(struct device *consumer_dev)
+{
+	return 0;
+}
+static inline int qpnp_misc_read_reg(struct device_node *node, u16 addr,
+					u8 *val)
+{
+	return 0;
+}
+#endif
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/refcount.h	2019-10-29 09:26:25.477221126 +0100
@@ -0,0 +1,294 @@
+#ifndef _LINUX_REFCOUNT_H
+#define _LINUX_REFCOUNT_H
+
+/*
+ * Variant of atomic_t specialized for reference counts.
+ *
+ * The interface matches the atomic_t interface (to aid in porting) but only
+ * provides the few functions one should use for reference counting.
+ *
+ * It differs in that the counter saturates at UINT_MAX and will not move once
+ * there. This avoids wrapping the counter and causing 'spurious'
+ * use-after-free issues.
+ *
+ * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
+ * and provide only what is strictly required for refcounts.
+ *
+ * The increments are fully relaxed; these will not provide ordering. The
+ * rationale is that whatever is used to obtain the object we're increasing the
+ * reference count on will provide the ordering. For locked data structures,
+ * its the lock acquire, for RCU/lockless data structures its the dependent
+ * load.
+ *
+ * Do note that inc_not_zero() provides a control dependency which will order
+ * future stores against the inc, this ensures we'll never modify the object
+ * if we did not in fact acquire a reference.
+ *
+ * The decrements will provide release order, such that all the prior loads and
+ * stores will be issued before, it also provides a control dependency, which
+ * will order us against the subsequent free().
+ *
+ * The control dependency is against the load of the cmpxchg (ll/sc) that
+ * succeeded. This means the stores aren't fully ordered, but this is fine
+ * because the 1->0 transition indicates no concurrency.
+ *
+ * Note that the allocator is responsible for ordering things between free()
+ * and alloc().
+ *
+ */
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+
+#ifdef CONFIG_DEBUG_REFCOUNT
+#define REFCOUNT_WARN(cond, str) WARN_ON(cond)
+#define __refcount_check	__must_check
+#else
+#define REFCOUNT_WARN(cond, str) (void)(cond)
+#define __refcount_check
+#endif
+
+typedef struct refcount_struct {
+	atomic_t refs;
+} refcount_t;
+
+#define REFCOUNT_INIT(n)	{ .refs = ATOMIC_INIT(n), }
+
+static inline void refcount_set(refcount_t *r, unsigned int n)
+{
+	atomic_set(&r->refs, n);
+}
+
+static inline unsigned int refcount_read(const refcount_t *r)
+{
+	return atomic_read(&r->refs);
+}
+
+static inline __refcount_check
+bool refcount_add_not_zero(unsigned int i, refcount_t *r)
+{
+	unsigned int old, new, val = atomic_read(&r->refs);
+
+	for (;;) {
+		if (!val)
+			return false;
+
+		if (unlikely(val == UINT_MAX))
+			return true;
+
+		new = val + i;
+		if (new < val)
+			new = UINT_MAX;
+		old = atomic_cmpxchg_relaxed(&r->refs, val, new);
+		if (old == val)
+			break;
+
+		val = old;
+	}
+
+	REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
+
+	return true;
+}
+
+static inline void refcount_add(unsigned int i, refcount_t *r)
+{
+	REFCOUNT_WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
+}
+
+/*
+ * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
+ * and thereby orders future stores. See the comment on top.
+ */
+static inline __refcount_check
+bool refcount_inc_not_zero(refcount_t *r)
+{
+	unsigned int old, new, val = atomic_read(&r->refs);
+
+	for (;;) {
+		new = val + 1;
+
+		if (!val)
+			return false;
+
+		if (unlikely(!new))
+			return true;
+
+		old = atomic_cmpxchg_relaxed(&r->refs, val, new);
+		if (old == val)
+			break;
+
+		val = old;
+	}
+
+	REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
+
+	return true;
+}
+
+/*
+ * Similar to atomic_inc(), will saturate at UINT_MAX and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller already has a
+ * reference on the object, will WARN when this is not so.
+ */
+static inline void refcount_inc(refcount_t *r)
+{
+	REFCOUNT_WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
+}
+
+/*
+ * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
+ * decrement when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ */
+static inline __refcount_check
+bool refcount_sub_and_test(unsigned int i, refcount_t *r)
+{
+	unsigned int old, new, val = atomic_read(&r->refs);
+
+	for (;;) {
+		if (unlikely(val == UINT_MAX))
+			return false;
+
+		new = val - i;
+		if (new > val) {
+			REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n");
+			return false;
+		}
+
+		old = atomic_cmpxchg_release(&r->refs, val, new);
+		if (old == val)
+			break;
+
+		val = old;
+	}
+
+	return !new;
+}
+
+static inline __refcount_check
+bool refcount_dec_and_test(refcount_t *r)
+{
+	return refcount_sub_and_test(1, r);
+}
+
+/*
+ * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
+ * when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before.
+ */
+static inline
+void refcount_dec(refcount_t *r)
+{
+	REFCOUNT_WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
+}
+
+/*
+ * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
+ * success thereof.
+ *
+ * Like all decrement operations, it provides release memory order and provides
+ * a control dependency.
+ *
+ * It can be used like a try-delete operator; this explicit case is provided
+ * and not cmpxchg in generic, because that would allow implementing unsafe
+ * operations.
+ */
+static inline __refcount_check
+bool refcount_dec_if_one(refcount_t *r)
+{
+	return atomic_cmpxchg_release(&r->refs, 1, 0) == 1;
+}
+
+/*
+ * No atomic_t counterpart, it decrements unless the value is 1, in which case
+ * it will return false.
+ *
+ * Was often done like: atomic_add_unless(&var, -1, 1)
+ */
+static inline __refcount_check
+bool refcount_dec_not_one(refcount_t *r)
+{
+	unsigned int old, new, val = atomic_read(&r->refs);
+
+	for (;;) {
+		if (unlikely(val == UINT_MAX))
+			return true;
+
+		if (val == 1)
+			return false;
+
+		new = val - 1;
+		if (new > val) {
+			REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n");
+			return true;
+		}
+
+		old = atomic_cmpxchg_release(&r->refs, val, new);
+		if (old == val)
+			break;
+
+		val = old;
+	}
+
+	return true;
+}
+
+/*
+ * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
+ * to decrement when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ */
+static inline __refcount_check
+bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
+{
+	if (refcount_dec_not_one(r))
+		return false;
+
+	mutex_lock(lock);
+	if (!refcount_dec_and_test(r)) {
+		mutex_unlock(lock);
+		return false;
+	}
+
+	return true;
+}
+
+/*
+ * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
+ * decrement when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ */
+static inline __refcount_check
+bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
+{
+	if (refcount_dec_not_one(r))
+		return false;
+
+	spin_lock(lock);
+	if (!refcount_dec_and_test(r)) {
+		spin_unlock(lock);
+		return false;
+	}
+
+	return true;
+}
+
+#endif /* _LINUX_REFCOUNT_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/regulator/msm-ldo-regulator.h	2019-01-22 16:16:28.371290489 +0100
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_LDO_REGULATOR_H__
+#define __MSM_LDO_REGULATOR_H__
+
+/**
+ * enum msm_ldo_supply_mode - supported operating modes by this regulator type.
+ * Use negative logic to ensure BHS mode is treated as the safe default by the
+ * the regulator framework. This is necessary since LDO mode can only be enabled
+ * when several constraints are satisfied. Consumers of this regulator are
+ * expected to request changes in operating modes through the use of
+ * regulator_allow_bypass() passing in the desired LDO supply mode.
+ * %BHS_MODE:	to select BHS as operating mode
+ * %LDO_MODE:	to select LDO as operating mode
+ */
+enum msm_ldo_supply_mode {
+	BHS_MODE = false,
+	LDO_MODE = true,
+};
+
+#endif /* __MSM_LDO_REGULATOR_H__ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/regulator/proxy-consumer.h	2019-01-22 16:16:28.371290489 +0100
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_REGULATOR_PROXY_CONSUMER_H_
+#define _LINUX_REGULATOR_PROXY_CONSUMER_H_
+
+#include <linux/device.h>
+#include <linux/of.h>
+
+struct proxy_consumer;
+
+#ifdef CONFIG_REGULATOR_PROXY_CONSUMER
+
+struct proxy_consumer *regulator_proxy_consumer_register(struct device *reg_dev,
+			struct device_node *reg_node);
+
+int regulator_proxy_consumer_unregister(struct proxy_consumer *consumer);
+
+#else
+
+static inline struct proxy_consumer *regulator_proxy_consumer_register(
+			struct device *reg_dev, struct device_node *reg_node)
+{ return NULL; }
+
+static inline int regulator_proxy_consumer_unregister(
+			struct proxy_consumer *consumer)
+{ return 0; }
+
+#endif
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/regulator/qpnp-labibb-regulator.h	2019-01-22 16:16:28.371290489 +0100
@@ -0,0 +1,24 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QPNP_LABIBB_REGULATOR_H
+#define _QPNP_LABIBB_REGULATOR_H
+
+enum labibb_notify_event {
+	LAB_VREG_OK = 1,
+	LAB_VREG_NOT_OK,
+};
+
+int qpnp_labibb_notifier_register(struct notifier_block *nb);
+int qpnp_labibb_notifier_unregister(struct notifier_block *nb);
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/regulator/qpnp-regulator.h	2019-01-22 16:16:28.371290489 +0100
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2012-2013, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __REGULATOR_QPNP_REGULATOR_H__
+#define __REGULATOR_QPNP_REGULATOR_H__
+
+#include <linux/regulator/machine.h>
+
+#define QPNP_REGULATOR_DRIVER_NAME "qcom,qpnp-regulator"
+
+/* Pin control enable input pins. */
+#define QPNP_REGULATOR_PIN_CTRL_ENABLE_NONE		0x00
+#define QPNP_REGULATOR_PIN_CTRL_ENABLE_EN0		0x01
+#define QPNP_REGULATOR_PIN_CTRL_ENABLE_EN1		0x02
+#define QPNP_REGULATOR_PIN_CTRL_ENABLE_EN2		0x04
+#define QPNP_REGULATOR_PIN_CTRL_ENABLE_EN3		0x08
+#define QPNP_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT	0x10
+
+/* Pin control high power mode input pins. */
+#define QPNP_REGULATOR_PIN_CTRL_HPM_NONE		0x00
+#define QPNP_REGULATOR_PIN_CTRL_HPM_EN0			0x01
+#define QPNP_REGULATOR_PIN_CTRL_HPM_EN1			0x02
+#define QPNP_REGULATOR_PIN_CTRL_HPM_EN2			0x04
+#define QPNP_REGULATOR_PIN_CTRL_HPM_EN3			0x08
+#define QPNP_REGULATOR_PIN_CTRL_HPM_SLEEP_B		0x10
+#define QPNP_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT		0x20
+
+/*
+ * Used with enable parameters to specify that hardware default register values
+ * should be left unaltered.
+ */
+#define QPNP_REGULATOR_DISABLE				0
+#define QPNP_REGULATOR_ENABLE				1
+#define QPNP_REGULATOR_USE_HW_DEFAULT			2
+
+/* Soft start strength of a voltage switch type regulator */
+enum qpnp_vs_soft_start_str {
+	QPNP_VS_SOFT_START_STR_0P05_UA,
+	QPNP_VS_SOFT_START_STR_0P25_UA,
+	QPNP_VS_SOFT_START_STR_0P55_UA,
+	QPNP_VS_SOFT_START_STR_0P75_UA,
+	QPNP_VS_SOFT_START_STR_HW_DEFAULT,
+};
+
+/* Current limit of a boost type regulator */
+enum qpnp_boost_current_limit {
+	QPNP_BOOST_CURRENT_LIMIT_300_MA,
+	QPNP_BOOST_CURRENT_LIMIT_600_MA,
+	QPNP_BOOST_CURRENT_LIMIT_900_MA,
+	QPNP_BOOST_CURRENT_LIMIT_1200_MA,
+	QPNP_BOOST_CURRENT_LIMIT_1500_MA,
+	QPNP_BOOST_CURRENT_LIMIT_1800_MA,
+	QPNP_BOOST_CURRENT_LIMIT_2100_MA,
+	QPNP_BOOST_CURRENT_LIMIT_2400_MA,
+	QPNP_BOOST_CURRENT_LIMIT_HW_DEFAULT,
+};
+
+/**
+ * struct qpnp_regulator_platform_data - qpnp-regulator initialization data
+ * @init_data:		regulator constraints
+ * @pull_down_enable:       1 = Enable output pull down resistor when the
+ *			        regulator is disabled
+ *			    0 = Disable pull down resistor
+ *			    QPNP_REGULATOR_USE_HW_DEFAULT = do not modify
+ *			        pull down state
+ * @pin_ctrl_enable:        Bit mask specifying which hardware pins should be
+ *				used to enable the regulator, if any
+ *			    Value should be an ORing of
+ *				QPNP_REGULATOR_PIN_CTRL_ENABLE_* constants.  If
+ *				the bit specified by
+ *				QPNP_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT is
+ *				set, then pin control enable hardware registers
+ *				will not be modified.
+ * @pin_ctrl_hpm:           Bit mask specifying which hardware pins should be
+ *				used to force the regulator into high power
+ *				mode, if any
+ *			    Value should be an ORing of
+ *				QPNP_REGULATOR_PIN_CTRL_HPM_* constants.  If
+ *				the bit specified by
+ *				QPNP_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT is
+ *				set, then pin control mode hardware registers
+ *				will not be modified.
+ * @system_load:            Load in uA present on regulator that is not captured
+ *				by any consumer request
+ * @enable_time:            Time in us to delay after enabling the regulator
+ * @ocp_enable:             1 = Allow over current protection (OCP) to be
+ *				enabled for voltage switch type regulators so
+ *				that they latch off automatically when over
+ *				current is detected.  OCP is enabled when in HPM
+ *				or auto mode.
+ *			    0 = Disable OCP
+ *			    QPNP_REGULATOR_USE_HW_DEFAULT = do not modify
+ *			        OCP state
+ * @ocp_irq:                IRQ number of the voltage switch OCP IRQ.  If
+ *				specified the voltage switch will be toggled off
+ *				and back on when OCP triggers in order to handle
+ *				high in-rush current.
+ * @ocp_max_retries:        Maximum number of times to try toggling a voltage
+ *				switch off and back on as a result of
+ *				consecutive over current events.
+ * @ocp_retry_delay_ms:     Time to delay in milliseconds between each
+ *				voltage switch toggle after an over current
+ *				event takes place.
+ * @boost_current_limit:    This parameter sets the current limit of boost type
+ *				regulators.  Its value should be one of
+ *				QPNP_BOOST_CURRENT_LIMIT_*.  If its value is
+ *				QPNP_BOOST_CURRENT_LIMIT_HW_DEFAULT, then the
+ *				boost current limit will be left at its default
+ *				hardware value.
+ * @soft_start_enable:      1 = Enable soft start for LDO and voltage switch
+ *				type regulators so that output voltage slowly
+ *				ramps up when the regulator is enabled
+ *			    0 = Disable soft start
+ *			    QPNP_REGULATOR_USE_HW_DEFAULT = do not modify
+ *			        soft start state
+ * @vs_soft_start_strength: This parameter sets the soft start strength for
+ *				voltage switch type regulators.  Its value
+ *				should be one of QPNP_VS_SOFT_START_STR_*.  If
+ *				its value is QPNP_VS_SOFT_START_STR_HW_DEFAULT,
+ *				then the soft start strength will be left at its
+ *				default hardware value.
+ * @auto_mode_enable:       1 = Enable automatic hardware selection of regulator
+ *				mode (HPM vs LPM).  Auto mode is not available
+ *				on boost type regulators
+ *			    0 = Disable auto mode selection
+ *			    QPNP_REGULATOR_USE_HW_DEFAULT = do not modify
+ *			        auto mode state
+ * @bypass_mode_enable:     1 = Enable bypass mode for an LDO type regulator so
+ *				that it acts like a switch and simply outputs
+ *				its input voltage
+ *			    0 = Do not enable bypass mode
+ *			    QPNP_REGULATOR_USE_HW_DEFAULT = do not modify
+ *			        bypass mode state
+ * @hpm_enable:             1 = Enable high power mode (HPM), also referred to
+ *				as NPM.  HPM consumes more ground current than
+ *				LPM, but it can source significantly higher load
+ *				current.  HPM is not available on boost type
+ *				regulators.  For voltage switch type regulators,
+ *				HPM implies that over current protection and
+ *				soft start are active all the time.  This
+ *				configuration can be overwritten by changing the
+ *				regulator's mode dynamically.
+ *			    0 = Do not enable HPM
+ *			    QPNP_REGULATOR_USE_HW_DEFAULT = do not modify
+ *			        HPM state
+ * @base_addr:              SMPI base address for the regulator peripheral
+ */
+struct qpnp_regulator_platform_data {
+	struct regulator_init_data		init_data;
+	int					pull_down_enable;
+	unsigned int				pin_ctrl_enable;
+	unsigned int				pin_ctrl_hpm;
+	int					system_load;
+	int					enable_time;
+	int					ocp_enable;
+	int					ocp_irq;
+	int					ocp_max_retries;
+	int					ocp_retry_delay_ms;
+	enum qpnp_boost_current_limit		boost_current_limit;
+	int					soft_start_enable;
+	enum qpnp_vs_soft_start_str		vs_soft_start_strength;
+	int					auto_mode_enable;
+	int					bypass_mode_enable;
+	int					hpm_enable;
+	u16					base_addr;
+};
+
+#ifdef CONFIG_REGULATOR_QPNP
+
+/**
+ * qpnp_regulator_init() - register spmi driver for qpnp-regulator
+ *
+ * This initialization function should be called in systems in which driver
+ * registration ordering must be controlled precisely.
+ */
+int __init qpnp_regulator_init(void);
+
+#else
+
+static inline int __init qpnp_regulator_init(void)
+{
+	return -ENODEV;
+}
+
+#endif /* CONFIG_REGULATOR_QPNP */
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/regulator/rpm-smd-regulator.h	2019-01-22 16:16:28.371290489 +0100
@@ -0,0 +1,132 @@
+/* Copyright (c) 2012-2013, 2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_REGULATOR_RPM_SMD_H
+#define _LINUX_REGULATOR_RPM_SMD_H
+
+#include <linux/device.h>
+
+struct rpm_regulator;
+
+/**
+ * enum rpm_regulator_voltage_corner - possible voltage corner values
+ *
+ * These should be used in regulator_set_voltage() and
+ * rpm_regulator_set_voltage() calls for corner type regulators as if they had
+ * units of uV.
+ *
+ * Note, the meaning of corner values is set by the RPM.  It is possible that
+ * future platforms will utilize different corner values.  The values specified
+ * in this enum correspond to MSM8974 for PMIC PM8841 SMPS 2 (VDD_Dig).
+ */
+enum rpm_regulator_voltage_corner {
+	RPM_REGULATOR_CORNER_NONE = 1,
+	RPM_REGULATOR_CORNER_RETENTION,
+	RPM_REGULATOR_CORNER_SVS_KRAIT,
+	RPM_REGULATOR_CORNER_SVS_SOC,
+	RPM_REGULATOR_CORNER_NORMAL,
+	RPM_REGULATOR_CORNER_TURBO,
+	RPM_REGULATOR_CORNER_SUPER_TURBO,
+};
+
+/**
+ * enum rpm_regulator_voltage_level - possible voltage level values
+ *
+ * These should be used in regulator_set_voltage() and
+ * rpm_regulator_set_voltage() calls for level type regulators as if they had
+ * units of uV.
+ *
+ * Note: the meaning of level values is set by the RPM.
+ */
+enum rpm_regulator_voltage_level {
+	RPM_REGULATOR_LEVEL_NONE		= 0,
+	RPM_REGULATOR_LEVEL_RETENTION		= 16,
+	RPM_REGULATOR_LEVEL_RETENTION_PLUS	= 32,
+	RPM_REGULATOR_LEVEL_MIN_SVS		= 48,
+	RPM_REGULATOR_LEVEL_LOW_SVS		= 64,
+	RPM_REGULATOR_LEVEL_SVS			= 128,
+	RPM_REGULATOR_LEVEL_SVS_PLUS		= 192,
+	RPM_REGULATOR_LEVEL_NOM			= 256,
+	RPM_REGULATOR_LEVEL_NOM_PLUS		= 320,
+	RPM_REGULATOR_LEVEL_TURBO		= 384,
+	RPM_REGULATOR_LEVEL_BINNING		= 512,
+	RPM_REGULATOR_LEVEL_MAX			= 65535,
+};
+
+/**
+ * enum rpm_regulator_mode - control mode for LDO or SMPS type regulators
+ * %RPM_REGULATOR_MODE_AUTO:	For SMPS type regulators, use SMPS auto mode so
+ *				that the hardware can automatically switch
+ *				between PFM and PWM modes based on realtime
+ *				load.
+ *				LDO type regulators do not support this mode.
+ * %RPM_REGULATOR_MODE_IPEAK:	For SMPS type regulators, use aggregated
+ *				software current requests to determine
+ *				usage of PFM or PWM mode.
+ *				For LDO type regulators, use aggregated
+ *				software current requests to determine
+ *				usage of LPM or HPM mode.
+ * %RPM_REGULATOR_MODE_HPM:	For SMPS type regulators, force the
+ *				usage of PWM mode.
+ *				For LDO type regulators, force the
+ *				usage of HPM mode.
+ *
+ * These values should be used in calls to rpm_regulator_set_mode().
+ */
+enum rpm_regulator_mode {
+	RPM_REGULATOR_MODE_AUTO,
+	RPM_REGULATOR_MODE_IPEAK,
+	RPM_REGULATOR_MODE_HPM,
+};
+
+#ifdef CONFIG_REGULATOR_RPM_SMD
+
+struct rpm_regulator *rpm_regulator_get(struct device *dev, const char *supply);
+
+void rpm_regulator_put(struct rpm_regulator *regulator);
+
+int rpm_regulator_enable(struct rpm_regulator *regulator);
+
+int rpm_regulator_disable(struct rpm_regulator *regulator);
+
+int rpm_regulator_set_voltage(struct rpm_regulator *regulator, int min_uV,
+			      int max_uV);
+
+int rpm_regulator_set_mode(struct rpm_regulator *regulator,
+				enum rpm_regulator_mode mode);
+
+int __init rpm_smd_regulator_driver_init(void);
+
+#else
+
+static inline struct rpm_regulator *rpm_regulator_get(struct device *dev,
+					const char *supply) { return NULL; }
+
+static inline void rpm_regulator_put(struct rpm_regulator *regulator) { }
+
+static inline int rpm_regulator_enable(struct rpm_regulator *regulator)
+			{ return 0; }
+
+static inline int rpm_regulator_disable(struct rpm_regulator *regulator)
+			{ return 0; }
+
+static inline int rpm_regulator_set_voltage(struct rpm_regulator *regulator,
+					int min_uV, int max_uV) { return 0; }
+
+static inline int rpm_regulator_set_mode(struct rpm_regulator *regulator,
+				enum rpm_regulator_mode mode) { return 0; }
+
+static inline int __init rpm_smd_regulator_driver_init(void) { return 0; }
+
+#endif /* CONFIG_REGULATOR_RPM_SMD */
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/regulator/spm-regulator.h	2019-01-22 16:16:28.371290489 +0100
@@ -0,0 +1,25 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_REGULATOR_SPM_H
+#define _LINUX_REGULATOR_SPM_H
+
+#include <linux/err.h>
+#include <linux/init.h>
+
+#ifdef CONFIG_REGULATOR_SPM
+int __init spm_regulator_init(void);
+#else
+static inline int __init spm_regulator_init(void) { return -ENODEV; }
+#endif
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/regulator/stub-regulator.h	2019-01-22 16:16:28.371290489 +0100
@@ -0,0 +1,54 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __STUB_REGULATOR_H__
+#define __STUB_REGULATOR_H__
+
+#include <linux/regulator/machine.h>
+
+#define STUB_REGULATOR_DRIVER_NAME "stub-regulator"
+
+/**
+ * struct stub_regulator_pdata - stub regulator device data
+ * @init_data:		regulator constraints
+ * @hpm_min_load:	minimum load in uA that will result in the regulator
+ *			being set to high power mode
+ * @system_uA:		current drawn from regulator not accounted for by any
+ *			regulator framework consumer
+ */
+struct stub_regulator_pdata {
+	struct regulator_init_data	init_data;
+	int				hpm_min_load;
+	int				system_uA;
+};
+
+#ifdef CONFIG_REGULATOR_STUB
+
+/**
+ * regulator_stub_init() - register platform driver for stub-regulator
+ *
+ * This initialization function should be called in systems in which driver
+ * registration ordering must be controlled precisely.
+ */
+
+int __init regulator_stub_init(void);
+
+#else
+
+static inline int __init regulator_stub_init(void)
+{
+	return -ENODEV;
+}
+
+#endif /* CONFIG_REGULATOR_STUB */
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/remote_spinlock.h	2019-01-22 16:16:28.371290489 +0100
@@ -0,0 +1,101 @@
+/* Copyright (c) 2008-2009, 2011, 2013-2015 The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __LINUX_REMOTE_SPINLOCK_H
+#define __LINUX_REMOTE_SPINLOCK_H
+
+#include <linux/spinlock.h>
+#include <linux/msm_remote_spinlock.h>
+
+/* Grabbing a local spin lock before going for a remote lock has several
+ * advantages:
+ * 1. Get calls to preempt enable/disable and IRQ save/restore for free.
+ * 2. For UP kernel, there is no overhead.
+ * 3. Reduces the possibility of executing the remote spin lock code. This is
+ *    especially useful when the remote CPUs' mutual exclusion instructions
+ *    don't work with the local CPUs' instructions. In such cases, one has to
+ *    use software based mutex algorithms (e.g. Lamport's bakery algorithm)
+ *    which could get expensive when the no. of contending CPUs is high.
+ * 4. In the case of software based mutex algorithm the exection time will be
+ *    smaller since the no. of contending CPUs is reduced by having just one
+ *    contender for all the local CPUs.
+ * 5. Get most of the spin lock debug features for free.
+ * 6. The code will continue to work "gracefully" even when the remote spin
+ *    lock code is stubbed out for debug purposes or when there is no remote
+ *    CPU in some board/machine types.
+ */
+typedef struct {
+	spinlock_t local;
+	_remote_spinlock_t remote;
+} remote_spinlock_t;
+
+#define remote_spin_lock_init(lock, id) \
+	({ \
+		spin_lock_init(&((lock)->local)); \
+		_remote_spin_lock_init(id, &((lock)->remote)); \
+	})
+#define remote_spin_lock(lock) \
+	do { \
+		spin_lock(&((lock)->local)); \
+		_remote_spin_lock(&((lock)->remote)); \
+	} while (0)
+#define remote_spin_unlock(lock) \
+	do { \
+		_remote_spin_unlock(&((lock)->remote)); \
+		spin_unlock(&((lock)->local)); \
+	} while (0)
+#define remote_spin_lock_irqsave(lock, flags) \
+	do { \
+		spin_lock_irqsave(&((lock)->local), flags); \
+		_remote_spin_lock(&((lock)->remote)); \
+	} while (0)
+#define remote_spin_unlock_irqrestore(lock, flags) \
+	do { \
+		_remote_spin_unlock(&((lock)->remote)); \
+		spin_unlock_irqrestore(&((lock)->local), flags); \
+	} while (0)
+#define remote_spin_trylock(lock) \
+	({ \
+		spin_trylock(&((lock)->local)) \
+		? _remote_spin_trylock(&((lock)->remote)) \
+			? 1 \
+			: ({ spin_unlock(&((lock)->local)); 0; }) \
+		: 0; \
+	})
+#define remote_spin_trylock_irqsave(lock, flags) \
+	({ \
+		spin_trylock_irqsave(&((lock)->local), flags) \
+		? _remote_spin_trylock(&((lock)->remote)) \
+			? 1 \
+			: ({ spin_unlock_irqrestore(&((lock)->local), flags); \
+				0; }) \
+		: 0; \
+	})
+#define remote_spin_lock_rlock_id(lock, tid) \
+	_remote_spin_lock_rlock_id(&((lock)->remote), tid)
+
+#define remote_spin_unlock_rlock(lock) \
+	_remote_spin_unlock_rlock(&((lock)->remote))
+
+#define remote_spin_release(lock, pid) \
+	_remote_spin_release(&((lock)->remote), pid)
+
+#define remote_spin_release_all(pid) \
+	_remote_spin_release_all(pid)
+
+#define remote_spin_owner(lock) \
+	_remote_spin_owner(&((lock)->remote))
+
+#define remote_spin_get_hw_spinlocks_element(lock) \
+	_remote_spin_get_hw_spinlocks_element(&((lock)->remote))
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/restart_block.h	2019-01-22 16:16:28.375290525 +0100
@@ -0,0 +1,51 @@
+/*
+ * Common syscall restarting data
+ */
+#ifndef __LINUX_RESTART_BLOCK_H
+#define __LINUX_RESTART_BLOCK_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+struct timespec;
+struct compat_timespec;
+struct pollfd;
+
+/*
+ * System call restart block.
+ */
+struct restart_block {
+	long (*fn)(struct restart_block *);
+	union {
+		/* For futex_wait and futex_wait_requeue_pi */
+		struct {
+			u32 __user *uaddr;
+			u32 val;
+			u32 flags;
+			u32 bitset;
+			u64 time;
+			u32 __user *uaddr2;
+		} futex;
+		/* For nanosleep */
+		struct {
+			clockid_t clockid;
+			struct timespec __user *rmtp;
+#ifdef CONFIG_COMPAT
+			struct compat_timespec __user *compat_rmtp;
+#endif
+			u64 expires;
+		} nanosleep;
+		/* For poll */
+		struct {
+			struct pollfd __user *ufds;
+			int nfds;
+			int has_timeout;
+			unsigned long tv_sec;
+			unsigned long tv_nsec;
+		} poll;
+	};
+};
+
+extern long do_no_restart_syscall(struct restart_block *parm);
+
+#endif /* __LINUX_RESTART_BLOCK_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/rq_stats.h	2019-01-22 16:16:28.375290525 +0100
@@ -0,0 +1,31 @@
+/* Copyright (c) 2011,2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+struct rq_data {
+	unsigned int rq_avg;
+	unsigned long rq_poll_jiffies;
+	unsigned long def_timer_jiffies;
+	unsigned long rq_poll_last_jiffy;
+	unsigned long rq_poll_total_jiffies;
+	unsigned long def_timer_last_jiffy;
+	unsigned int hotplug_disabled;
+	int64_t def_start_time;
+	struct attribute_group *attr_group;
+	struct kobject *kobj;
+	struct work_struct def_timer_work;
+	int init;
+};
+
+extern spinlock_t rq_lock;
+extern struct rq_data rq_info;
+extern struct workqueue_struct *rq_wq;
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/sched/core_ctl.h	2019-01-22 16:16:28.379290562 +0100
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CORE_CTL_H
+#define __CORE_CTL_H
+
+#ifdef CONFIG_SCHED_CORE_CTL
+void core_ctl_check(u64 wallclock);
+int core_ctl_set_boost(bool boost);
+#else
+static inline void core_ctl_check(u64 wallclock) {}
+static inline int core_ctl_set_boost(bool boost)
+{
+	return 0;
+}
+#endif
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/sched_energy.h	2019-01-22 16:16:28.379290562 +0100
@@ -0,0 +1,46 @@
+#ifndef _LINUX_SCHED_ENERGY_H
+#define _LINUX_SCHED_ENERGY_H
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+/*
+ * There doesn't seem to be an NR_CPUS style max number of sched domain
+ * levels so here's an arbitrary constant one for the moment.
+ *
+ * The levels alluded to here correspond to entries in struct
+ * sched_domain_topology_level that are meant to be populated by arch
+ * specific code (topology.c).
+ */
+#define NR_SD_LEVELS 8
+
+#define SD_LEVEL0   0
+#define SD_LEVEL1   1
+#define SD_LEVEL2   2
+#define SD_LEVEL3   3
+#define SD_LEVEL4   4
+#define SD_LEVEL5   5
+#define SD_LEVEL6   6
+#define SD_LEVEL7   7
+
+/*
+ * Convenience macro for iterating through said sd levels.
+ */
+#define for_each_possible_sd_level(level)		    \
+	for (level = 0; level < NR_SD_LEVELS; level++)
+
+extern bool sched_energy_aware;
+
+#ifdef CONFIG_SMP
+
+extern struct sched_group_energy *sge_array[NR_CPUS][NR_SD_LEVELS];
+
+void init_sched_energy_costs(void);
+
+#else
+
+#define init_sched_energy_costs() do { } while (0)
+
+#endif /* CONFIG_SMP */
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/sde_io_util.h	2019-01-22 16:16:28.383290598 +0100
@@ -0,0 +1,115 @@
+/* Copyright (c) 2012, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_IO_UTIL_H__
+#define __SDE_IO_UTIL_H__
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/i2c.h>
+#include <linux/types.h>
+
+#ifdef DEBUG
+#define DEV_DBG(fmt, args...)   pr_err(fmt, ##args)
+#else
+#define DEV_DBG(fmt, args...)   pr_debug(fmt, ##args)
+#endif
+#define DEV_INFO(fmt, args...)  pr_info(fmt, ##args)
+#define DEV_WARN(fmt, args...)  pr_warn(fmt, ##args)
+#define DEV_ERR(fmt, args...)   pr_err(fmt, ##args)
+
+struct dss_io_data {
+	u32 len;
+	void __iomem *base;
+};
+
+void dss_reg_w(struct dss_io_data *io, u32 offset, u32 value, u32 debug);
+u32 dss_reg_r(struct dss_io_data *io, u32 offset, u32 debug);
+void dss_reg_dump(void __iomem *base, u32 len, const char *prefix, u32 debug);
+
+#define DSS_REG_W_ND(io, offset, val)  dss_reg_w(io, offset, val, false)
+#define DSS_REG_W(io, offset, val)     dss_reg_w(io, offset, val, true)
+#define DSS_REG_R_ND(io, offset)       dss_reg_r(io, offset, false)
+#define DSS_REG_R(io, offset)          dss_reg_r(io, offset, true)
+
+enum dss_vreg_type {
+	DSS_REG_LDO,
+	DSS_REG_VS,
+};
+
+struct dss_vreg {
+	struct regulator *vreg; /* vreg handle */
+	char vreg_name[32];
+	int min_voltage;
+	int max_voltage;
+	int enable_load;
+	int disable_load;
+	int pre_on_sleep;
+	int post_on_sleep;
+	int pre_off_sleep;
+	int post_off_sleep;
+	bool lp_disable_allowed;
+	bool disabled;
+};
+
+struct dss_gpio {
+	unsigned int gpio;
+	unsigned int value;
+	char gpio_name[32];
+};
+
+enum dss_clk_type {
+	DSS_CLK_AHB, /* no set rate. rate controlled through rpm */
+	DSS_CLK_PCLK,
+	DSS_CLK_OTHER,
+};
+
+struct dss_clk {
+	struct clk *clk; /* clk handle */
+	char clk_name[32];
+	enum dss_clk_type type;
+	unsigned long rate;
+	unsigned long max_rate;
+};
+
+struct dss_module_power {
+	unsigned int num_vreg;
+	struct dss_vreg *vreg_config;
+	unsigned int num_gpio;
+	struct dss_gpio *gpio_config;
+	unsigned int num_clk;
+	struct dss_clk *clk_config;
+};
+
+int msm_dss_ioremap_byname(struct platform_device *pdev,
+	struct dss_io_data *io_data, const char *name);
+void msm_dss_iounmap(struct dss_io_data *io_data);
+
+int msm_dss_enable_gpio(struct dss_gpio *in_gpio, int num_gpio, int enable);
+int msm_dss_gpio_enable(struct dss_gpio *in_gpio, int num_gpio, int enable);
+
+int msm_dss_config_vreg(struct device *dev, struct dss_vreg *in_vreg,
+	int num_vreg, int config);
+int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg,	int enable);
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk);
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable);
+
+int sde_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr,
+		       uint8_t reg_offset, uint8_t *read_buf);
+int sde_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr,
+			uint8_t reg_offset, uint8_t *value);
+
+#endif /* __SDE_IO_UTIL_H__ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/seemp_instrumentation.h	2019-01-22 16:16:28.383290598 +0100
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+*/
+#ifndef __SEEMP_LOGK_STUB__
+#define __SEEMP_LOGK_STUB__
+
+#ifdef CONFIG_SEEMP_CORE
+#include <linux/kernel.h>
+
+#define MAX_BUF_SIZE 188
+
+#define SEEMP_LOGK_API_SIZE sizeof(int)
+
+/* Write: api_id + skip encoding byte + params */
+#define SEEMP_LOGK_RECORD(api_id, format, ...) do {            \
+	*((int *)(buf - SEEMP_LOGK_API_SIZE)) = api_id;             \
+	snprintf(buf + 1, MAX_BUF_SIZE - 1, format, ##__VA_ARGS__); \
+} while (0)
+
+extern void *(*seemp_logk_kernel_begin)(char **buf);
+extern void (*seemp_logk_kernel_end)(void *blck);
+
+static inline void *seemp_setup_buf(char **buf)
+{
+	void *blck;
+
+	if (seemp_logk_kernel_begin && seemp_logk_kernel_end) {
+		blck = seemp_logk_kernel_begin(buf);
+		if (!*buf) {
+			seemp_logk_kernel_end(blck);
+			return NULL;
+		}
+	} else {
+		return NULL;
+	}
+	return blck;
+}
+/*
+ * NOTE: only sendto is going to be instrumented
+ * since send sys call internally calls sendto
+ * with 2 extra parameters
+ */
+static inline void seemp_logk_sendto(int fd, void __user *buff, size_t len,
+		unsigned flags, struct sockaddr __user *addr, int addr_len)
+{
+	char *buf = NULL;
+	void *blck = NULL;
+
+	/*sets up buf and blck correctly*/
+	blck = seemp_setup_buf(&buf);
+	if (!blck)
+		return;
+
+	/*fill the buf*/
+	SEEMP_LOGK_RECORD(SEEMP_API_kernel__sendto, "len=%u,fd=%d",
+			(unsigned int)len, fd);
+
+	seemp_logk_kernel_end(blck);
+}
+
+/*
+ * NOTE: only recvfrom is going to be instrumented
+ * since recv sys call internally calls recvfrom
+ * with 2 extra parameters
+ */
+static inline void seemp_logk_recvfrom(int fd, void __user *ubuf,
+		size_t size, unsigned flags, struct sockaddr __user *addr,
+		int __user *addr_len)
+{
+	char *buf = NULL;
+	void *blck = NULL;
+
+	/*sets up buf and blck correctly*/
+	blck = seemp_setup_buf(&buf);
+	if (!blck)
+		return;
+
+	/*fill the buf*/
+	SEEMP_LOGK_RECORD(SEEMP_API_kernel__recvfrom, "size=%u,fd=%d",
+			(unsigned int)size, fd);
+
+	seemp_logk_kernel_end(blck);
+}
+
+static inline void seemp_logk_oom_adjust_write(pid_t pid,
+					kuid_t uid, int oom_adj)
+{
+	char *buf = NULL;
+	void *blck = NULL;
+
+	/*sets up buf and blck correctly*/
+	blck = seemp_setup_buf(&buf);
+	if (!blck)
+		return;
+
+	/*fill the buf*/
+	SEEMP_LOGK_RECORD(SEEMP_API_kernel__oom_adjust_write,
+			 "app_uid=%d,app_pid=%d,oom_adj=%d",
+			uid.val, pid, oom_adj);
+
+	seemp_logk_kernel_end(blck);
+}
+
+static inline void seemp_logk_oom_score_adj_write(pid_t pid, kuid_t uid,
+					int oom_adj_score)
+{
+	char *buf = NULL;
+	void *blck = NULL;
+
+	/*sets up buf and blck correctly*/
+	blck = seemp_setup_buf(&buf);
+	if (!blck)
+		return;
+
+	/*fill the buf*/
+	snprintf(buf, MAX_BUF_SIZE,
+		"-1|kernel|oom_score_adj_write|app_uid=%d,app_pid=%d,oom_adj=%d|--end",
+		uid.val, pid, oom_adj_score);
+
+	seemp_logk_kernel_end(blck);
+}
+
+#else
+static inline void seemp_logk_sendto(int fd, void __user *buff,
+		size_t len, unsigned flags, struct sockaddr __user *addr,
+		int addr_len)
+{
+}
+
+static inline void seemp_logk_recvfrom
+		(int fd, void __user *ubuf, size_t size,
+		unsigned flags, struct sockaddr __user *addr,
+		int __user *addr_len)
+{
+}
+
+static inline void seemp_logk_oom_adjust_write
+		(pid_t pid, kuid_t uid, int oom_adj)
+{
+}
+
+static inline void seemp_logk_oom_score_adj_write
+		(pid_t pid, kuid_t uid, int oom_adj_score)
+{
+}
+#endif
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/show_mem_notifier.h	2019-01-22 16:16:28.387290634 +0100
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/notifier.h>
+
+int show_mem_notifier_register(struct notifier_block *nb);
+
+int show_mem_notifier_unregister(struct notifier_block *nb);
+
+void show_mem_call_notifiers(void);
diff -Nruw linux-4.4.115-fbx/include/linux/slimbus./slimbus.h linux-4.4.115-fbx/include/linux/slimbus/slimbus.h
--- linux-4.4.115-fbx/include/linux/slimbus./slimbus.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/slimbus/slimbus.h	2019-01-22 16:16:28.387290634 +0100
@@ -0,0 +1,1216 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_SLIMBUS_H
+#define _LINUX_SLIMBUS_H
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/mod_devicetable.h>
+
+/* Interfaces between SLIMbus manager drivers and SLIMbus infrastructure. */
+
+extern struct bus_type slimbus_type;
+
+/* Standard values per SLIMbus spec needed by controllers and devices */
+#define SLIM_CL_PER_SUPERFRAME		6144
+#define SLIM_CL_PER_SUPERFRAME_DIV8	(SLIM_CL_PER_SUPERFRAME >> 3)
+#define SLIM_MAX_TXNS			256
+#define SLIM_MAX_CLK_GEAR		10
+#define SLIM_MIN_CLK_GEAR		1
+#define SLIM_CL_PER_SL			4
+#define SLIM_SL_PER_SUPERFRAME		(SLIM_CL_PER_SUPERFRAME >> 2)
+#define SLIM_FRM_SLOTS_PER_SUPERFRAME	16
+#define SLIM_GDE_SLOTS_PER_SUPERFRAME	2
+
+/*
+ * SLIMbus message types. Related to interpretation of message code.
+ * Values are defined in Table 32 (slimbus spec 1.01.01)
+ */
+#define SLIM_MSG_MT_CORE			0x0
+#define SLIM_MSG_MT_DEST_REFERRED_CLASS		0x1
+#define SLIM_MSG_MT_DEST_REFERRED_USER		0x2
+#define SLIM_MSG_MT_SRC_REFERRED_CLASS		0x5
+#define SLIM_MSG_MT_SRC_REFERRED_USER		0x6
+
+/*
+ * SLIMbus core type Message Codes.
+ * Values are defined in Table 65 (slimbus spec 1.01.01)
+ */
+/* Device management messages */
+#define SLIM_MSG_MC_REPORT_PRESENT               0x1
+#define SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS       0x2
+#define SLIM_MSG_MC_RESET_DEVICE                 0x4
+#define SLIM_MSG_MC_CHANGE_LOGICAL_ADDRESS       0x8
+#define SLIM_MSG_MC_CHANGE_ARBITRATION_PRIORITY  0x9
+#define SLIM_MSG_MC_REQUEST_SELF_ANNOUNCEMENT    0xC
+#define SLIM_MSG_MC_REPORT_ABSENT                0xF
+
+/* Data channel management messages */
+#define SLIM_MSG_MC_CONNECT_SOURCE               0x10
+#define SLIM_MSG_MC_CONNECT_SINK                 0x11
+#define SLIM_MSG_MC_DISCONNECT_PORT              0x14
+#define SLIM_MSG_MC_CHANGE_CONTENT               0x18
+
+/* Information management messages */
+#define SLIM_MSG_MC_REQUEST_INFORMATION          0x20
+#define SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION    0x21
+#define SLIM_MSG_MC_REPLY_INFORMATION            0x24
+#define SLIM_MSG_MC_CLEAR_INFORMATION            0x28
+#define SLIM_MSG_MC_REPORT_INFORMATION           0x29
+
+/* Reconfiguration messages */
+#define SLIM_MSG_MC_BEGIN_RECONFIGURATION        0x40
+#define SLIM_MSG_MC_NEXT_ACTIVE_FRAMER           0x44
+#define SLIM_MSG_MC_NEXT_SUBFRAME_MODE           0x45
+#define SLIM_MSG_MC_NEXT_CLOCK_GEAR              0x46
+#define SLIM_MSG_MC_NEXT_ROOT_FREQUENCY          0x47
+#define SLIM_MSG_MC_NEXT_PAUSE_CLOCK             0x4A
+#define SLIM_MSG_MC_NEXT_RESET_BUS               0x4B
+#define SLIM_MSG_MC_NEXT_SHUTDOWN_BUS            0x4C
+#define SLIM_MSG_MC_NEXT_DEFINE_CHANNEL          0x50
+#define SLIM_MSG_MC_NEXT_DEFINE_CONTENT          0x51
+#define SLIM_MSG_MC_NEXT_ACTIVATE_CHANNEL        0x54
+#define SLIM_MSG_MC_NEXT_DEACTIVATE_CHANNEL      0x55
+#define SLIM_MSG_MC_NEXT_REMOVE_CHANNEL          0x58
+#define SLIM_MSG_MC_RECONFIGURE_NOW              0x5F
+
+/*
+ * Clock pause flag to indicate that the reconfig message
+ * corresponds to clock pause sequence
+ */
+#define SLIM_MSG_CLK_PAUSE_SEQ_FLG		(1U << 8)
+
+/* Value management messages */
+#define SLIM_MSG_MC_REQUEST_VALUE                0x60
+#define SLIM_MSG_MC_REQUEST_CHANGE_VALUE         0x61
+#define SLIM_MSG_MC_REPLY_VALUE                  0x64
+#define SLIM_MSG_MC_CHANGE_VALUE                 0x68
+
+/* Clock pause values defined in Table 66 (slimbus spec 1.01.01) */
+#define SLIM_CLK_FAST				0
+#define SLIM_CLK_CONST_PHASE			1
+#define SLIM_CLK_UNSPECIFIED			2
+
+struct slim_controller;
+struct slim_device;
+
+/* Destination type Values defined in Table 33 (slimbus spec 1.01.01) */
+#define SLIM_MSG_DEST_LOGICALADDR	0
+#define SLIM_MSG_DEST_ENUMADDR		1
+#define	SLIM_MSG_DEST_BROADCAST		3
+
+/*
+ * @start_offset: Specifies starting offset in information/value element map
+ * @num_bytes: Can be 1, 2, 3, 4, 6, 8, 12, 16 per spec. This ensures that the
+ *	message will fit in the 40-byte message limit and the slicesize can be
+ *	compatible with values in table 21 (slimbus spec 1.01.01)
+ * @comp: Completion to indicate end of message-transfer. Used if client wishes
+ *	to use the API asynchronously.
+ */
+struct slim_ele_access {
+	u16			start_offset;
+	u8			num_bytes;
+	struct completion	*comp;
+};
+
+/*
+ * struct slim_framer - Represents Slimbus framer.
+ * Every controller may have multiple framers.
+ * Manager is responsible for framer hand-over.
+ * @e_addr: 6 byte Elemental address of the framer.
+ * @rootfreq: Root Frequency at which the framer can run. This is maximum
+ *	frequency (clock gear 10 per slimbus spec) at which the bus can operate.
+ * @superfreq: Superframes per root frequency. Every frame is 6144 cells (bits)
+ *	per slimbus specification.
+ */
+struct slim_framer {
+	u8	e_addr[6];
+	int	rootfreq;
+	int	superfreq;
+};
+#define to_slim_framer(d) container_of(d, struct slim_framer, dev);
+
+/*
+ * struct slim_addrt: slimbus address used internally by the slimbus framework.
+ * @valid: If the device is still there or if the address can be reused.
+ * @eaddr: 6-bytes-long elemental address
+ * @laddr: It is possible that controller will set a predefined logical address
+ *	rather than the one assigned by framework. (i.e. logical address may
+ *	not be same as index into this table). This entry will store the
+ *	logical address value for this enumeration address.
+ */
+struct slim_addrt {
+	bool	valid;
+	u8	eaddr[6];
+	u8	laddr;
+};
+
+/*
+ * struct slim_val_inf: slimbus value/information element transaction
+ * @start_offset: Specifies starting offset in information/value element map
+ * @num_bytes: number of bytes to be read/written
+ * @wbuf: buffer if this transaction has 'write' component in it
+ * @rbuf: buffer if this transaction has 'read' component in it
+ */
+struct slim_val_inf {
+	u16 start_offset;
+	u8 num_bytes;
+	u8 *wbuf;
+	u8 *rbuf;
+};
+
+/*
+ * struct slim_msg_txn: Message to be sent by the controller.
+ * Linux framework uses this structure with drivers implementing controller.
+ * This structure has packet header, payload and buffer to be filled (if any)
+ * For the header information, refer to Table 34-36.
+ * @rl: Header field. remaining length.
+ * @mt: Header field. Message type.
+ * @mc: Header field. LSB is message code for type mt. Framework will set MSB to
+ *	SLIM_MSG_CLK_PAUSE_SEQ_FLG in case "mc" in the reconfiguration sequence
+ *	is for pausing the clock.
+ * @dt: Header field. Destination type.
+ * @ec: Element size. Used for elemental access APIs.
+ * @len: Length of payload. (excludes ec)
+ * @tid: Transaction ID. Used for messages expecting response.
+ *	(e.g. relevant for mc = SLIM_MSG_MC_REQUEST_INFORMATION)
+ * @la: Logical address of the device this message is going to.
+ *	(Not used when destination type is broadcast.)
+ * @async: If this transaction is async
+ * @rbuf: Buffer to be populated by controller when response is received.
+ * @wbuf: Payload of the message. (e.g. channel number for DATA channel APIs)
+ * @comp: Completion structure. Used by controller to notify response.
+ *	(Field is relevant when tid is used)
+ */
+struct slim_msg_txn {
+	u8			rl;
+	u8			mt;
+	u16			mc;
+	u8			dt;
+	u16			ec;
+	u8			len;
+	u8			tid;
+	u8			la;
+	bool			async;
+	u8			*rbuf;
+	const u8		*wbuf;
+	struct completion	*comp;
+};
+
+/* Internal port state used by slimbus framework to manage data-ports */
+enum slim_port_state {
+	SLIM_P_FREE,
+	SLIM_P_UNCFG,
+	SLIM_P_CFG,
+};
+
+/*
+ * enum slim_port_req: Request port type by user through APIs to manage ports
+ * User can request default, half-duplex or port to be used in multi-channel
+ * configuration. Default indicates a simplex port.
+ */
+enum slim_port_req {
+	SLIM_REQ_DEFAULT,
+	SLIM_REQ_HALF_DUP,
+	SLIM_REQ_MULTI_CH,
+};
+
+/*
+ * enum slim_port_opts: Port options requested.
+ * User can request no configuration, packed data, and/or MSB aligned data port
+ */
+enum slim_port_opts {
+	SLIM_OPT_NONE = 0,
+	SLIM_OPT_NO_PACK = 1U,
+	SLIM_OPT_ALIGN_MSB = 1U << 1,
+};
+
+/* enum slim_port_flow: Port flow type (inbound/outbound). */
+enum slim_port_flow {
+	SLIM_SRC,
+	SLIM_SINK,
+};
+
+/* enum slim_port_err: Port errors */
+enum slim_port_err {
+	SLIM_P_INPROGRESS,
+	SLIM_P_OVERFLOW,
+	SLIM_P_UNDERFLOW,
+	SLIM_P_DISCONNECT,
+	SLIM_P_NOT_OWNED,
+};
+
+/*
+ * struct slim_port_cfg: Port config for the manager port
+ * port_opts: port options (bit-map) for this port
+ * watermark: watermark level set for this port
+ */
+struct slim_port_cfg {
+	u32 port_opts;
+	u32 watermark;
+};
+
+/*
+ * struct slim_port: Internal structure used by framework to manage ports
+ * @err: Port error if any for this port. Refer to enum above.
+ * @state: Port state. Refer to enum above.
+ * @req: Port request for this port.
+ * @cfg: Port configuration for this port.
+ * @flow: Flow type of this port.
+ * @ch: Channel association of this port.
+ * @xcomp: Completion to indicate error, data transfer done event.
+ * @ctrl: Controller to which this port belongs to. This is useful to associate
+ *	port with the SW since port hardware interrupts may only contain port
+ *	information.
+ */
+struct slim_port {
+	enum slim_port_err	err;
+	enum slim_port_state	state;
+	enum slim_port_req	req;
+	struct slim_port_cfg	cfg;
+	enum slim_port_flow	flow;
+	struct slim_ch		*ch;
+	struct completion	*xcomp;
+	struct slim_controller	*ctrl;
+};
+
+/*
+ * enum slim_ch_state: Channel state of a channel.
+ * Channel transition happens from free-to-allocated-to-defined-to-pending-
+ * active-to-active.
+ * Once active, channel can be removed or suspended. Suspended channels are
+ * still scheduled, but data transfer doesn't happen.
+ * Removed channels are not deallocated until dealloc_ch API is used.
+ * Deallocation reset channel state back to free.
+ * Removed channels can be defined with different parameters.
+ */
+enum slim_ch_state {
+	SLIM_CH_FREE,
+	SLIM_CH_ALLOCATED,
+	SLIM_CH_DEFINED,
+	SLIM_CH_PENDING_ACTIVE,
+	SLIM_CH_ACTIVE,
+	SLIM_CH_SUSPENDED,
+	SLIM_CH_PENDING_REMOVAL,
+};
+
+/*
+ * enum slim_ch_proto: Channel protocol used by the channel.
+ * Hard Isochronous channel is not scheduled if current frequency doesn't allow
+ * the channel to be run without flow-control.
+ * Auto isochronous channel will be scheduled as hard-isochronous or push-pull
+ * depending on current bus frequency.
+ * Currently, Push-pull or async or extended channels are not supported.
+ * For more details, refer to slimbus spec
+ */
+enum slim_ch_proto {
+	SLIM_HARD_ISO,
+	SLIM_AUTO_ISO,
+	SLIM_PUSH,
+	SLIM_PULL,
+	SLIM_ASYNC_SMPLX,
+	SLIM_ASYNC_HALF_DUP,
+	SLIM_EXT_SMPLX,
+	SLIM_EXT_HALF_DUP,
+};
+
+/*
+ * enum slim_ch_rate: Most commonly used frequency rate families.
+ * Use 1HZ for push-pull transport.
+ * 4KHz and 11.025KHz are most commonly used in audio applications.
+ * Typically, slimbus runs at frequencies to support channels running at 4KHz
+ * and/or 11.025KHz isochronously.
+ */
+enum slim_ch_rate {
+	SLIM_RATE_1HZ,
+	SLIM_RATE_4000HZ,
+	SLIM_RATE_11025HZ,
+};
+
+/*
+ * enum slim_ch_coeff: Coefficient of a channel used internally by framework.
+ * Coefficient is applicable to channels running isochronously.
+ * Coefficient is calculated based on channel rate multiplier.
+ * (If rate multiplier is power of 2, it's coeff.1 channel. Otherwise it's
+ * coeff.3 channel.
+ */
+enum slim_ch_coeff {
+	SLIM_COEFF_1,
+	SLIM_COEFF_3,
+};
+
+/*
+ * enum slim_ch_control: Channel control.
+ * Activate will schedule channel and/or group of channels in the TDM frame.
+ * Suspend will keep the schedule but data-transfer won't happen.
+ * Remove will remove the channel/group from the TDM frame.
+ */
+enum slim_ch_control {
+	SLIM_CH_ACTIVATE,
+	SLIM_CH_SUSPEND,
+	SLIM_CH_REMOVE,
+};
+
+/* enum slim_ch_dataf: Data format per table 60 from slimbus spec 1.01.01 */
+enum slim_ch_dataf {
+	SLIM_CH_DATAF_NOT_DEFINED = 0,
+	SLIM_CH_DATAF_LPCM_AUDIO = 1,
+	SLIM_CH_DATAF_IEC61937_COMP_AUDIO = 2,
+	SLIM_CH_DATAF_PACKED_PDM_AUDIO = 3,
+};
+
+/* enum slim_ch_auxf: Auxiliary field format per table 59 from slimbus spec */
+enum slim_ch_auxf {
+	SLIM_CH_AUXF_NOT_APPLICABLE = 0,
+	SLIM_CH_AUXF_ZCUV_TUNNEL_IEC60958 = 1,
+	SLIM_CH_USER_DEFINED = 0xF,
+};
+
+/*
+ * struct slim_ch: Channel structure used externally by users of channel APIs.
+ * @prot: Desired slimbus protocol.
+ * @baser: Desired base rate. (Typical isochronous rates are: 4KHz, or 11.025KHz
+ * @dataf: Data format.
+ * @auxf: Auxiliary format.
+ * @ratem: Channel rate multiplier. (e.g. 48KHz channel will have 4KHz base rate
+ *	and 12 as rate multiplier.
+ * @sampleszbits: Sample size in bits.
+ */
+struct slim_ch {
+	enum slim_ch_proto	prot;
+	enum slim_ch_rate	baser;
+	enum slim_ch_dataf	dataf;
+	enum slim_ch_auxf	auxf;
+	u32			ratem;
+	u32			sampleszbits;
+};
+
+/*
+ * struct slim_ich: Internal channel structure used by slimbus framework.
+ * @prop: structure passed by the client.
+ * @coeff: Coefficient of this channel.
+ * @state: Current state of the channel.
+ * @nextgrp: If this channel is part of group, next channel in this group.
+ * @prrate: Presence rate of this channel (per table 62 of the spec)
+ * @offset: Offset of this channel in the superframe.
+ * @newoff: Used during scheduling to hold temporary new offset until the offset
+ *	is accepted/rejected by slimbus reconfiguration.
+ * @interval: Interval of this channel per superframe.
+ * @newintr: Used during scheduling to new interval temporarily.
+ * @seglen: Segment length of this channel.
+ * @rootexp: root exponent of this channel. Rate can be found using rootexp and
+ *	coefficient. Used during scheduling.
+ * @srch: Source port used by this channel.
+ * @sinkh: Sink ports used by this channel.
+ * @nsink: number of sink ports used by this channel.
+ * @chan: Channel number sent on hardware lines for this channel. May not be
+ *	equal to array-index into chans if client requested to use number beyond
+ *	channel-array for the controller.
+ * @ref: Reference number to keep track of how many clients (upto 2) are using
+ *	this channel.
+ * @def: Used to keep track of how many times the channel definition is sent
+ *	to hardware and this will decide if channel-remove can be sent for the
+ *	channel. Channel definition may be sent upto twice (once per producer
+ *	and once per consumer). Channel removal should be sent only once to
+ *	avoid clients getting underflow/overflow errors.
+ */
+struct slim_ich {
+	struct slim_ch		prop;
+	enum slim_ch_coeff	coeff;
+	enum slim_ch_state	state;
+	u16			nextgrp;
+	u32			prrate;
+	u32			offset;
+	u32			newoff;
+	u32			interval;
+	u32			newintr;
+	u32			seglen;
+	u8			rootexp;
+	u32			srch;
+	u32			*sinkh;
+	int			nsink;
+	u8			chan;
+	int			ref;
+	int			def;
+};
+
+/*
+ * struct slim_sched: Framework uses this structure internally for scheduling.
+ * @chc3: Array of all active coeffient 3 channels.
+ * @num_cc3: Number of active coeffient 3 channels.
+ * @chc1: Array of all active coeffient 1 channels.
+ * @num_cc1: Number of active coeffient 1 channels.
+ * @subfrmcode: Current subframe-code used by TDM. This is decided based on
+ *	requested message bandwidth and current channels scheduled.
+ * @usedslots: Slots used by all active channels.
+ * @msgsl: Slots used by message-bandwidth.
+ * @pending_msgsl: Used to store pending request of message bandwidth (in slots)
+ *	until the scheduling is accepted by reconfiguration.
+ * @m_reconf: This mutex is held until current reconfiguration (data channel
+ *	scheduling, message bandwidth reservation) is done. Message APIs can
+ *	use the bus concurrently when this mutex is held since elemental access
+ *	messages can be sent on the bus when reconfiguration is in progress.
+ * @slots: Used for debugging purposes to debug/verify current schedule in TDM.
+ */
+struct slim_sched {
+	struct slim_ich	**chc3;
+	int		num_cc3;
+	struct slim_ich	**chc1;
+	int		num_cc1;
+	u32		subfrmcode;
+	u32		usedslots;
+	u32		msgsl;
+	u32		pending_msgsl;
+	struct mutex	m_reconf;
+	u8		*slots;
+};
+
+/*
+ * enum slim_clk_state: Slimbus controller's clock state used internally for
+ *	maintaining current clock state.
+ * @SLIM_CLK_ACTIVE: Slimbus clock is active
+ * @SLIM_CLK_PAUSE_FAILED: Slimbus controlled failed to go in clock pause.
+ *	Hardware-wise, this state is same as active but controller will wait on
+ *	completion before making transition to SLIM_CLK_ACTIVE in framework
+ * @SLIM_CLK_ENTERING_PAUSE: Slimbus clock pause sequence is being sent on the
+ *	bus. If this succeeds, state changes to SLIM_CLK_PAUSED. If the
+ *	transition fails, state changes to SLIM_CLK_PAUSE_FAILED
+ * @SLIM_CLK_PAUSED: Slimbus controller clock has paused.
+ */
+enum slim_clk_state {
+	SLIM_CLK_ACTIVE,
+	SLIM_CLK_ENTERING_PAUSE,
+	SLIM_CLK_PAUSE_FAILED,
+	SLIM_CLK_PAUSED,
+};
+/*
+ * struct slim_controller: Represents manager for a SlimBUS
+ *				(similar to 'master' on I2C)
+ * @dev: Device interface to this driver
+ * @nr: Board-specific number identifier for this controller/bus
+ * @list: Link with other slimbus controllers
+ * @name: Name for this controller
+ * @clkgear: Current clock gear in which this bus is running
+ * @min_cg: Minimum clock gear supported by this controller (default value: 1)
+ * @max_cg: Maximum clock gear supported by this controller (default value: 10)
+ * @clk_state: Controller's clock state from enum slim_clk_state
+ * @pause_comp: Signals completion of clock pause sequence. This is useful when
+ *	client tries to call slimbus transaction when controller may be entering
+ *	clock pause.
+ * @a_framer: Active framer which is clocking the bus managed by this controller
+ * @m_ctrl: Mutex protecting controller data structures (ports, channels etc)
+ * @addrt: Logical address table
+ * @num_dev: Number of active slimbus slaves on this bus
+ * @devs: List of devices on this controller
+ * @wq: Workqueue per controller used to notify devices when they report present
+ * @txnt: Table of transactions having transaction ID
+ * @last_tid: size of the table txnt (can't grow beyond 256 since TID is 8-bits)
+ * @ports: Ports associated with this controller
+ * @nports: Number of ports supported by the controller
+ * @chans: Channels associated with this controller
+ * @nchans: Number of channels supported
+ * @reserved: Reserved channels that controller wants to use internally
+ *		Clients will be assigned channel numbers after this number
+ * @sched: scheduler structure used by the controller
+ * @dev_released: completion used to signal when sysfs has released this
+ *	controller so that it can be deleted during shutdown
+ * @xfer_msg: Transfer a message on this controller (this can be a broadcast
+ *	control/status message like data channel setup, or a unicast message
+ *	like value element read/write.
+ * @set_laddr: Setup logical address at laddr for the slave with elemental
+ *	address e_addr. Drivers implementing controller will be expected to
+ *	send unicast message to this device with its logical address.
+ * @allocbw: Controller can override default reconfiguration and channel
+ *	scheduling algorithm.
+ * @get_laddr: It is possible that controller needs to set fixed logical
+ *	address table and get_laddr can be used in that case so that controller
+ *	can do this assignment.
+ * @wakeup: This function pointer implements controller-specific procedure
+ *	to wake it up from clock-pause. Framework will call this to bring
+ *	the controller out of clock pause.
+ * @alloc_port: Allocate a port and make it ready for data transfer. This is
+ *	called by framework to make sure controller can take necessary steps
+ *	to initialize its port
+ * @dealloc_port: Counter-part of alloc_port. This is called by framework so
+ *	that controller can free resources associated with this port
+ * @framer_handover: If this controller has multiple framers, this API will
+ *	be called to switch between framers if controller desires to change
+ *	the active framer.
+ * @port_xfer: Called to schedule a transfer on port pn. iobuf is physical
+ *	address and the buffer may have to be DMA friendly since data channels
+ *	will be using data from this buffers without SW intervention.
+ * @port_xfer_status: Called by framework when client calls get_xfer_status
+ *	API. Returns how much buffer is actually processed and the port
+ *	errors (e.g. overflow/underflow) if any.
+ * @xfer_user_msg: Send user message to specified logical address. Underlying
+ *	controller has to support sending user messages. Returns error if any.
+ * @xfer_bulk_wr: Send bulk of write messages to specified logical address.
+ *	Underlying controller has to support this. Typically useful to transfer
+ *	messages to download firmware, or messages where strict ordering for
+ *	slave is necessary
+ */
+struct slim_controller {
+	struct device		dev;
+	unsigned int		nr;
+	struct list_head	list;
+	char			name[SLIMBUS_NAME_SIZE];
+	int			clkgear;
+	int			min_cg;
+	int			max_cg;
+	enum slim_clk_state	clk_state;
+	struct completion	pause_comp;
+	struct slim_framer	*a_framer;
+	struct mutex		m_ctrl;
+	struct slim_addrt	*addrt;
+	u8			num_dev;
+	struct list_head	devs;
+	struct workqueue_struct *wq;
+	struct slim_msg_txn	*txnt[SLIM_MAX_TXNS];
+	u8			last_tid;
+	spinlock_t		txn_lock;
+	struct slim_port	*ports;
+	int			nports;
+	struct slim_ich		*chans;
+	int			nchans;
+	u8			reserved;
+	struct slim_sched	sched;
+	struct completion	dev_released;
+	int			(*xfer_msg)(struct slim_controller *ctrl,
+				struct slim_msg_txn *txn);
+	int			(*set_laddr)(struct slim_controller *ctrl,
+				const u8 *ea, u8 elen, u8 laddr);
+	int			(*allocbw)(struct slim_device *sb,
+				int *subfrmc, int *clkgear);
+	int			(*get_laddr)(struct slim_controller *ctrl,
+				const u8 *ea, u8 elen, u8 *laddr);
+	int			(*wakeup)(struct slim_controller *ctrl);
+	int			(*alloc_port)(struct slim_controller *ctrl,
+				u8 port);
+	void			(*dealloc_port)(struct slim_controller *ctrl,
+				u8 port);
+	int			(*framer_handover)(struct slim_controller *ctrl,
+				struct slim_framer *new_framer);
+	int			(*port_xfer)(struct slim_controller *ctrl,
+				u8 pn, phys_addr_t iobuf, u32 len,
+				struct completion *comp);
+	enum slim_port_err	(*port_xfer_status)(struct slim_controller *ctr,
+				u8 pn, phys_addr_t *done_buf, u32 *done_len);
+	int			(*xfer_user_msg)(struct slim_controller *ctrl,
+				u8 la, u8 mt, u8 mc,
+				struct slim_ele_access *msg, u8 *buf, u8 len);
+	int			(*xfer_bulk_wr)(struct slim_controller *ctrl,
+				u8 la, u8 mt, u8 mc, struct slim_val_inf msgs[],
+				int n, int (*comp_cb)(void *ctx, int err),
+				void *ctx);
+};
+#define to_slim_controller(d) container_of(d, struct slim_controller, dev)
+
+/*
+ * struct slim_driver: Manage Slimbus generic/slave device driver
+ * @probe: Binds this driver to a slimbus device.
+ * @remove: Unbinds this driver from the slimbus device.
+ * @shutdown: Standard shutdown callback used during powerdown/halt.
+ * @suspend: Standard suspend callback used during system suspend
+ * @resume: Standard resume callback used during system resume
+ * @device_up: This callback is called when the device reports present and
+ *		gets a logical address assigned to it
+ * @device_down: This callback is called when device reports absent, or the
+ *		bus goes down. Device will report present when bus is up and
+ *		device_up callback will be called again when that happens
+ * @reset_device: This callback is called after framer is booted.
+ *		Driver should do the needful to reset the device,
+ *		so that device acquires sync and be operational.
+ * @driver: Slimbus device drivers should initialize name and owner field of
+ *	this structure
+ * @id_table: List of slimbus devices supported by this driver
+ */
+struct slim_driver {
+	int				(*probe)(struct slim_device *sldev);
+	int				(*remove)(struct slim_device *sldev);
+	void				(*shutdown)(struct slim_device *sldev);
+	int				(*suspend)(struct slim_device *sldev,
+					pm_message_t pmesg);
+	int				(*resume)(struct slim_device *sldev);
+	int				(*device_up)(struct slim_device *sldev);
+	int				(*device_down)
+						(struct slim_device *sldev);
+	int				(*reset_device)
+						(struct slim_device *sldev);
+
+	struct device_driver		driver;
+	const struct slim_device_id	*id_table;
+};
+#define to_slim_driver(d) container_of(d, struct slim_driver, driver)
+
+/*
+ * struct slim_pending_ch: List of pending channels used by framework.
+ * @chan: Channel number
+ * @pending: list of channels
+ */
+struct slim_pending_ch {
+	u8	chan;
+	struct	list_head pending;
+};
+
+/*
+ * Client/device handle (struct slim_device):
+ * ------------------------------------------
+ *  This is the client/device handle returned when a slimbus
+ *  device is registered with a controller. This structure can be provided
+ *  during register_board_info, or can be allocated using slim_add_device API.
+ *  Pointer to this structure is used by client-driver as a handle.
+ *  @dev: Driver model representation of the device.
+ *  @name: Name of driver to use with this device.
+ *  @e_addr: 6-byte elemental address of this device.
+ *  @driver: Device's driver. Pointer to access routines.
+ *  @ctrl: Slimbus controller managing the bus hosting this device.
+ *  @laddr: 1-byte Logical address of this device.
+ *  @reported: Flag to indicate whether this device reported present. The flag
+ *	is set when device reports present, and is reset when it reports
+ *	absent. This flag alongwith notified flag below is used to call
+ *	device_up, or device_down callbacks for driver of this device.
+ *  @mark_define: List of channels pending definition/activation.
+ *  @mark_suspend: List of channels pending suspend.
+ *  @mark_removal: List of channels pending removal.
+ *  @notified: Flag to indicate whether this device has been notified. The
+ *	device may report present multiple times, but should be notified only
+ *	first time it has reported present.
+ *  @dev_list: List of devices on a controller
+ *  @wd: Work structure associated with workqueue for presence notification
+ *  @sldev_reconf: Mutex to protect the pending data-channel lists.
+ *  @pending_msgsl: Message bandwidth reservation request by this client in
+ *	slots that's pending reconfiguration.
+ *  @cur_msgsl: Message bandwidth reserved by this client in slots.
+ *  These 3 lists are managed by framework. Lists are populated when client
+ *  calls channel control API without reconfig-flag set and the lists are
+ *  emptied when the reconfiguration is done by this client.
+ */
+struct slim_device {
+	struct device		dev;
+	const char		*name;
+	u8			e_addr[6];
+	struct slim_driver	*driver;
+	struct slim_controller	*ctrl;
+	u8			laddr;
+	bool			reported;
+	struct list_head	mark_define;
+	struct list_head	mark_suspend;
+	struct list_head	mark_removal;
+	bool			notified;
+	struct list_head	dev_list;
+	struct work_struct	wd;
+	struct mutex		sldev_reconf;
+	u32			pending_msgsl;
+	u32			cur_msgsl;
+};
+#define to_slim_device(d) container_of(d, struct slim_device, dev)
+
+/*
+ * struct slim_boardinfo: Declare board info for Slimbus device bringup.
+ * @bus_num: Controller number (bus) on which this device will sit.
+ * @slim_slave: Device to be registered with slimbus.
+ */
+struct slim_boardinfo {
+	int			bus_num;
+	struct slim_device	*slim_slave;
+};
+
+/*
+ * slim_get_logical_addr: Return the logical address of a slimbus device.
+ * @sb: client handle requesting the adddress.
+ * @e_addr: Elemental address of the device.
+ * @e_len: Length of e_addr
+ * @laddr: output buffer to store the address
+ * context: can sleep
+ * -EINVAL is returned in case of invalid parameters, and -ENXIO is returned if
+ *  the device with this elemental address is not found.
+ */
+
+extern int slim_get_logical_addr(struct slim_device *sb, const u8 *e_addr,
+					u8 e_len, u8 *laddr);
+
+
+/* Message APIs Unicast message APIs used by slimbus slave drivers */
+
+/*
+ * Message API access routines.
+ * @sb: client handle requesting elemental message reads, writes.
+ * @msg: Input structure for start-offset, number of bytes to read.
+ * @rbuf: data buffer to be filled with values read.
+ * @len: data buffer size
+ * @wbuf: data buffer containing value/information to be written
+ * context: can sleep
+ * Returns:
+ * -EINVAL: Invalid parameters
+ * -ETIMEDOUT: If controller could not complete the request. This may happen if
+ *  the bus lines are not clocked, controller is not powered-on, slave with
+ *  given address is not enumerated/responding.
+ */
+extern int slim_request_val_element(struct slim_device *sb,
+					struct slim_ele_access *msg, u8 *buf,
+					u8 len);
+extern int slim_request_inf_element(struct slim_device *sb,
+					struct slim_ele_access *msg, u8 *buf,
+					u8 len);
+extern int slim_change_val_element(struct slim_device *sb,
+					struct slim_ele_access *msg,
+					const u8 *buf, u8 len);
+extern int slim_clear_inf_element(struct slim_device *sb,
+					struct slim_ele_access *msg, u8 *buf,
+					u8 len);
+extern int slim_request_change_val_element(struct slim_device *sb,
+					struct slim_ele_access *msg, u8 *rbuf,
+					const u8 *wbuf, u8 len);
+extern int slim_request_clear_inf_element(struct slim_device *sb,
+					struct slim_ele_access *msg, u8 *rbuf,
+					const u8 *wbuf, u8 len);
+
+/*
+ * Broadcast message API:
+ * call this API directly with sbdev = NULL.
+ * For broadcast reads, make sure that buffers are big-enough to incorporate
+ * replies from all logical addresses.
+ * All controllers may not support broadcast
+ */
+extern int slim_xfer_msg(struct slim_controller *ctrl,
+			struct slim_device *sbdev, struct slim_ele_access *msg,
+			u16 mc, u8 *rbuf, const u8 *wbuf, u8 len);
+
+/*
+ * User message:
+ * slim_user_msg: Send user message that is interpreted by destination device
+ * @sb: Client handle sending the message
+ * @la: Destination device for this user message
+ * @mt: Message Type (Soruce-referred, or Destination-referred)
+ * @mc: Message Code
+ * @msg: Message structure (start offset, number of bytes) to be sent
+ * @buf: data buffer to be sent
+ * @len: data buffer size in bytes
+ */
+extern int slim_user_msg(struct slim_device *sb, u8 la, u8 mt, u8 mc,
+				struct slim_ele_access *msg, u8 *buf, u8 len);
+
+/*
+ * Queue bulk of message writes:
+ * slim_bulk_msg_write: Write bulk of messages (e.g. downloading FW)
+ * @sb: Client handle sending these messages
+ * @la: Destination device for these messages
+ * @mt: Message Type
+ * @mc: Message Code
+ * @msgs: List of messages to be written in bulk
+ * @n: Number of messages in the list
+ * @cb: Callback if client needs this to be non-blocking
+ * @ctx: Context for this callback
+ * If supported by controller, this message list will be sent in bulk to the HW
+ * If the client specifies this to be non-blocking, the callback will be
+ * called from atomic context.
+ */
+extern int slim_bulk_msg_write(struct slim_device *sb, u8 mt, u8 mc,
+			struct slim_val_inf msgs[], int n,
+			int (*comp_cb)(void *ctx, int err), void *ctx);
+/* end of message apis */
+
+/* Port management for manager device APIs */
+
+/*
+ * slim_alloc_mgrports: Allocate port on manager side.
+ * @sb: device/client handle.
+ * @req: Port request type.
+ * @nports: Number of ports requested
+ * @rh: output buffer to store the port handles
+ * @hsz: size of buffer storing handles
+ * context: can sleep
+ * This port will be typically used by SW. e.g. client driver wants to receive
+ * some data from audio codec HW using a data channel.
+ * Port allocated using this API will be used to receive the data.
+ * If half-duplex ports are requested, two adjacent ports are allocated for
+ * 1 half-duplex port. So the handle-buffer size should be twice the number
+ * of half-duplex ports to be allocated.
+ * -EDQUOT is returned if all ports are in use.
+ */
+extern int slim_alloc_mgrports(struct slim_device *sb, enum slim_port_req req,
+				int nports, u32 *rh, int hsz);
+
+/* Deallocate the port(s) allocated using the API above */
+extern int slim_dealloc_mgrports(struct slim_device *sb, u32 *hdl, int hsz);
+
+/*
+ * slim_config_mgrports: Configure manager side ports
+ * @sb: device/client handle.
+ * @ph: array of port handles for which this configuration is valid
+ * @nports: Number of ports in ph
+ * @cfg: configuration requested for port(s)
+ * Configure port settings if they are different than the default ones.
+ * Returns success if the config could be applied. Returns -EISCONN if the
+ * port is in use
+ */
+extern int slim_config_mgrports(struct slim_device *sb, u32 *ph, int nports,
+				struct slim_port_cfg *cfg);
+
+/*
+ * slim_port_xfer: Schedule buffer to be transferred/received using port-handle.
+ * @sb: client handle
+ * @ph: port-handle
+ * @iobuf: buffer to be transferred or populated
+ * @len: buffer size.
+ * @comp: completion signal to indicate transfer done or error.
+ * context: can sleep
+ * Returns number of bytes transferred/received if used synchronously.
+ * Will return 0 if used asynchronously.
+ * Client will call slim_port_get_xfer_status to get error and/or number of
+ * bytes transferred if used asynchronously.
+ */
+extern int slim_port_xfer(struct slim_device *sb, u32 ph, phys_addr_t iobuf,
+				u32 len, struct completion *comp);
+
+/*
+ * slim_port_get_xfer_status: Poll for port transfers, or get transfer status
+ *	after completion is done.
+ * @sb: client handle
+ * @ph: port-handle
+ * @done_buf: return pointer (iobuf from slim_port_xfer) which is processed.
+ * @done_len: Number of bytes transferred.
+ * This can be called when port_xfer complition is signalled.
+ * The API will return port transfer error (underflow/overflow/disconnect)
+ * and/or done_len will reflect number of bytes transferred. Note that
+ * done_len may be valid even if port error (overflow/underflow) has happened.
+ * e.g. If the transfer was scheduled with a few bytes to be transferred and
+ * client has not supplied more data to be transferred, done_len will indicate
+ * number of bytes transferred with underflow error. To avoid frequent underflow
+ * errors, multiple transfers can be queued (e.g. ping-pong buffers) so that
+ * channel has data to be transferred even if client is not ready to transfer
+ * data all the time. done_buf will indicate address of the last buffer
+ * processed from the multiple transfers.
+ */
+extern enum slim_port_err slim_port_get_xfer_status(struct slim_device *sb,
+			u32 ph, phys_addr_t *done_buf, u32 *done_len);
+
+/*
+ * slim_connect_src: Connect source port to channel.
+ * @sb: client handle
+ * @srch: source handle to be connected to this channel
+ * @chanh: Channel with which the ports need to be associated with.
+ * Per slimbus specification, a channel may have 1 source port.
+ * Channel specified in chanh needs to be allocated first.
+ * Returns -EALREADY if source is already configured for this channel.
+ * Returns -ENOTCONN if channel is not allocated
+ * Returns -EINVAL if invalid direction is specified for non-manager port,
+ * or if the manager side port number is out of bounds, or in incorrect state
+ */
+extern int slim_connect_src(struct slim_device *sb, u32 srch, u16 chanh);
+
+/*
+ * slim_connect_sink: Connect sink port(s) to channel.
+ * @sb: client handle
+ * @sinkh: sink handle(s) to be connected to this channel
+ * @nsink: number of sinks
+ * @chanh: Channel with which the ports need to be associated with.
+ * Per slimbus specification, a channel may have multiple sink-ports.
+ * Channel specified in chanh needs to be allocated first.
+ * Returns -EALREADY if sink is already configured for this channel.
+ * Returns -ENOTCONN if channel is not allocated
+ * Returns -EINVAL if invalid parameters are passed, or invalid direction is
+ * specified for non-manager port, or if the manager side port number is out of
+ * bounds, or in incorrect state
+ */
+extern int slim_connect_sink(struct slim_device *sb, u32 *sinkh, int nsink,
+				u16 chanh);
+/*
+ * slim_disconnect_ports: Disconnect port(s) from channel
+ * @sb: client handle
+ * @ph: ports to be disconnected
+ * @nph: number of ports.
+ * Disconnects ports from a channel.
+ */
+extern int slim_disconnect_ports(struct slim_device *sb, u32 *ph, int nph);
+
+/*
+ * slim_get_slaveport: Get slave port handle
+ * @la: slave device logical address.
+ * @idx: port index at slave
+ * @rh: return handle
+ * @flw: Flow type (source or destination)
+ * This API only returns a slave port's representation as expected by slimbus
+ * driver. This port is not managed by the slimbus driver. Caller is expected
+ * to have visibility of this port since it's a device-port.
+ */
+extern int slim_get_slaveport(u8 la, int idx, u32 *rh, enum slim_port_flow flw);
+
+
+/* Channel functions. */
+
+/*
+ * slim_alloc_ch: Allocate a slimbus channel and return its handle.
+ * @sb: client handle.
+ * @chanh: return channel handle
+ * Slimbus channels are limited to 256 per specification.
+ * -EXFULL is returned if all channels are in use.
+ * Although slimbus specification supports 256 channels, a controller may not
+ * support that many channels.
+ */
+extern int slim_alloc_ch(struct slim_device *sb, u16 *chanh);
+
+/*
+ * slim_query_ch: Get reference-counted handle for a channel number. Every
+ * channel is reference counted by one as producer and the others as
+ * consumer)
+ * @sb: client handle
+ * @chan: slimbus channel number
+ * @chanh: return channel handle
+ * If request channel number is not in use, it is allocated, and reference
+ * count is set to one. If the channel was was already allocated, this API
+ * will return handle to that channel and reference count is incremented.
+ * -EXFULL is returned if all channels are in use
+ */
+extern int slim_query_ch(struct slim_device *sb, u8 chan, u16 *chanh);
+/*
+ * slim_dealloc_ch: Deallocate channel allocated using the API above
+ * -EISCONN is returned if the channel is tried to be deallocated without
+ *  being removed first.
+ *  -ENOTCONN is returned if deallocation is tried on a channel that's not
+ *  allocated.
+ */
+extern int slim_dealloc_ch(struct slim_device *sb, u16 chanh);
+
+
+/*
+ * slim_define_ch: Define a channel.This API defines channel parameters for a
+ *	given channel.
+ * @sb: client handle.
+ * @prop: slim_ch structure with channel parameters desired to be used.
+ * @chanh: list of channels to be defined.
+ * @nchan: number of channels in a group (1 if grp is false)
+ * @grp: Are the channels grouped
+ * @grph: return group handle if grouping of channels is desired.
+ *	Channels can be grouped if multiple channels use same parameters
+ *	(e.g. 5.1 audio has 6 channels with same parameters. They will all be
+ *	grouped and given 1 handle for simplicity and avoid repeatedly calling
+ *	the API)
+ * -EISCONN is returned if channel is already used with different parameters.
+ * -ENXIO is returned if the channel is not yet allocated.
+ */
+extern int slim_define_ch(struct slim_device *sb, struct slim_ch *prop,
+				u16 *chanh, u8 nchan, bool grp, u16 *grph);
+
+/*
+ * slim_control_ch: Channel control API.
+ * @sb: client handle
+ * @grpchanh: group or channel handle to be controlled
+ * @chctrl: Control command (activate/suspend/remove)
+ * @commit: flag to indicate whether the control should take effect right-away.
+ * This API activates, removes or suspends a channel (or group of channels)
+ * grpchanh indicates the channel or group handle (returned by the define_ch
+ * API). Reconfiguration may be time-consuming since it can change all other
+ * active channel allocations on the bus, change in clock gear used by the
+ * slimbus, and change in the control space width used for messaging.
+ * commit makes sure that multiple channels can be activated/deactivated before
+ * reconfiguration is started.
+ * -EXFULL is returned if there is no space in TDM to reserve the bandwidth.
+ * -EISCONN/-ENOTCONN is returned if the channel is already connected or not
+ * yet defined.
+ * -EINVAL is returned if individual control of a grouped-channel is attempted.
+ */
+extern int slim_control_ch(struct slim_device *sb, u16 grpchanh,
+				enum slim_ch_control chctrl, bool commit);
+
+/*
+ * slim_get_ch_state: Channel state.
+ * This API returns the channel's state (active, suspended, inactive etc)
+ */
+extern enum slim_ch_state slim_get_ch_state(struct slim_device *sb,
+						u16 chanh);
+
+/*
+ * slim_reservemsg_bw: Request to reserve bandwidth for messages.
+ * @sb: client handle
+ * @bw_bps: message bandwidth in bits per second to be requested
+ * @commit: indicates whether the reconfiguration needs to be acted upon.
+ * This API call can be grouped with slim_control_ch API call with only one of
+ * the APIs specifying the commit flag to avoid reconfiguration being called too
+ * frequently. -EXFULL is returned if there is no space in TDM to reserve the
+ * bandwidth. -EBUSY is returned if reconfiguration is requested, but a request
+ * is already in progress.
+ */
+extern int slim_reservemsg_bw(struct slim_device *sb, u32 bw_bps, bool commit);
+
+/*
+ * slim_reconfigure_now: Request reconfiguration now.
+ * @sb: client handle
+ * This API does what commit flag in other scheduling APIs do.
+ * -EXFULL is returned if there is no space in TDM to reserve the
+ * bandwidth. -EBUSY is returned if reconfiguration request is already in
+ * progress.
+ */
+extern int slim_reconfigure_now(struct slim_device *sb);
+
+/*
+ * slim_ctrl_clk_pause: Called by slimbus controller to request clock to be
+ *	paused or woken up out of clock pause
+ * @ctrl: controller requesting bus to be paused or woken up
+ * @wakeup: Wakeup this controller from clock pause.
+ * @restart: Restart time value per spec used for clock pause. This value
+ *	isn't used when controller is to be woken up.
+ * This API executes clock pause reconfiguration sequence if wakeup is false.
+ * If wakeup is true, controller's wakeup is called
+ * Slimbus clock is idle and can be disabled by the controller later.
+ */
+extern int slim_ctrl_clk_pause(struct slim_controller *ctrl, bool wakeup,
+		u8 restart);
+
+/*
+ * slim_driver_register: Client driver registration with slimbus
+ * @drv:Client driver to be associated with client-device.
+ * This API will register the client driver with the slimbus
+ * It is called from the driver's module-init function.
+ */
+extern int slim_driver_register(struct slim_driver *drv);
+
+/*
+ * slim_driver_unregister: Undo effects of slim_driver_register
+ * @drv: Client driver to be unregistered
+ */
+extern void slim_driver_unregister(struct slim_driver *drv);
+
+/*
+ * slim_add_numbered_controller: Controller bring-up.
+ * @ctrl: Controller to be registered.
+ * A controller is registered with the framework using this API. ctrl->nr is the
+ * desired number with which slimbus framework registers the controller.
+ * Function will return -EBUSY if the number is in use.
+ */
+extern int slim_add_numbered_controller(struct slim_controller *ctrl);
+
+/*
+ * slim_del_controller: Controller tear-down.
+ * Controller added with the above API is teared down using this API.
+ */
+extern int slim_del_controller(struct slim_controller *ctrl);
+
+/*
+ * slim_add_device: Add a new device without register board info.
+ * @ctrl: Controller to which this device is to be added to.
+ * Called when device doesn't have an explicit client-driver to be probed, or
+ * the client-driver is a module installed dynamically.
+ */
+extern int slim_add_device(struct slim_controller *ctrl,
+			struct slim_device *sbdev);
+
+/* slim_remove_device: Remove the effect of slim_add_device() */
+extern void slim_remove_device(struct slim_device *sbdev);
+
+/*
+ * slim_assign_laddr: Assign logical address to a device enumerated.
+ * @ctrl: Controller with which device is enumerated.
+ * @e_addr: 6-byte elemental address of the device.
+ * @e_len: buffer length for e_addr
+ * @laddr: Return logical address (if valid flag is false)
+ * @valid: true if laddr holds a valid address that controller wants to
+ *	set for this enumeration address. Otherwise framework sets index into
+ *	address table as logical address.
+ * Called by controller in response to REPORT_PRESENT. Framework will assign
+ * a logical address to this enumeration address.
+ * Function returns -EXFULL to indicate that all logical addresses are already
+ * taken.
+ */
+extern int slim_assign_laddr(struct slim_controller *ctrl, const u8 *e_addr,
+				u8 e_len, u8 *laddr, bool valid);
+
+/*
+ * slim_report_absent: Controller calls this function when a device
+ *	reports absent, OR when the device cannot be communicated with
+ * @sbdev: Device that cannot be reached, or that sent report absent
+ */
+void slim_report_absent(struct slim_device *sbdev);
+
+/*
+ * slim_framer_booted: This function is called by controller after the active
+ * framer has booted (using Bus Reset sequence, or after it has shutdown and has
+ * come back up). Components, devices on the bus may be in undefined state,
+ * and this function triggers their drivers to do the needful
+ * to bring them back in Reset state so that they can acquire sync, report
+ * present and be operational again.
+ */
+void slim_framer_booted(struct slim_controller *ctrl);
+
+/*
+ * slim_msg_response: Deliver Message response received from a device to the
+ *	framework.
+ * @ctrl: Controller handle
+ * @reply: Reply received from the device
+ * @len: Length of the reply
+ * @tid: Transaction ID received with which framework can associate reply.
+ * Called by controller to inform framework about the response received.
+ * This helps in making the API asynchronous, and controller-driver doesn't need
+ * to manage 1 more table other than the one managed by framework mapping TID
+ * with buffers
+ */
+extern void slim_msg_response(struct slim_controller *ctrl, u8 *reply, u8 tid,
+				u8 len);
+
+/*
+ * slim_busnum_to_ctrl: Map bus number to controller
+ * @busnum: Bus number
+ * Returns controller representing this bus number
+ */
+extern struct slim_controller *slim_busnum_to_ctrl(u32 busnum);
+
+/*
+ * slim_ctrl_add_boarddevs: Add devices registered by board-info
+ * @ctrl: Controller to which these devices are to be added to.
+ * This API is called by controller when it is up and running.
+ * If devices on a controller were registered before controller,
+ * this will make sure that they get probed when controller is up
+ */
+extern void slim_ctrl_add_boarddevs(struct slim_controller *ctrl);
+
+extern const
+struct slim_device_id *slim_get_device_id(const struct slim_device *sdev);
+
+/*
+ * slim_register_board_info: Board-initialization routine.
+ * @info: List of all devices on all controllers present on the board.
+ * @n: number of entries.
+ * API enumerates respective devices on corresponding controller.
+ * Called from board-init function.
+ */
+#ifdef CONFIG_SLIMBUS
+extern int slim_register_board_info(struct slim_boardinfo const *info,
+					unsigned n);
+#else
+static inline int slim_register_board_info(struct slim_boardinfo const *info,
+					unsigned n)
+{
+	return 0;
+}
+#endif
+
+static inline void *slim_get_ctrldata(const struct slim_controller *dev)
+{
+	return dev_get_drvdata(&dev->dev);
+}
+
+static inline void slim_set_ctrldata(struct slim_controller *dev, void *data)
+{
+	dev_set_drvdata(&dev->dev, data);
+}
+
+static inline void *slim_get_devicedata(const struct slim_device *dev)
+{
+	return dev_get_drvdata(&dev->dev);
+}
+
+static inline void slim_set_clientdata(struct slim_device *dev, void *data)
+{
+	dev_set_drvdata(&dev->dev, data);
+}
+#endif /* _LINUX_SLIMBUS_H */
diff -Nruw linux-4.4.115-fbx/include/linux/soundwire./soundwire.h linux-4.4.115-fbx/include/linux/soundwire/soundwire.h
--- linux-4.4.115-fbx/include/linux/soundwire./soundwire.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/soundwire/soundwire.h	2019-01-22 16:16:28.391290670 +0100
@@ -0,0 +1,312 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_SOUNDWIRE_H
+#define _LINUX_SOUNDWIRE_H
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/mod_devicetable.h>
+
+extern struct bus_type soundwire_type;
+
+/* Soundwire supports max. of 8 channels per port */
+#define SWR_MAX_CHANNEL_NUM	8
+/* Soundwire supports max. of 14 ports on each device */
+#define SWR_MAX_DEV_PORT_NUM	14
+/* Maximum number of slave devices that a master can control */
+#define SWR_MAX_DEV_NUM		11
+/* Maximum number of ports on master so that it can accomodate all the port
+ * configurations of all devices
+ */
+#define SWR_MAX_MSTR_PORT_NUM	(SWR_MAX_DEV_NUM * SWR_MAX_DEV_PORT_NUM)
+
+/* Indicates soundwire devices group information */
+enum {
+	SWR_GROUP_NONE = 0,
+	SWR_GROUP_12 = 12,
+	SWR_GROUP_13 = 13,
+	SWR_BROADCAST = 15,
+};
+
+/*
+ * struct swr_port_info - represent soundwire frame shape
+ * @dev_id: logical device number of the soundwire slave device
+ * @port_en: flag indicates whether the port is enabled
+ * @port_id: logical port number of the soundwire slave device
+ * @offset1: sample offset indicating the offset of the channel
+ * from the start of the frame
+ * @offset2: channel offset indicating offset between to channels
+ * @sinterval: sample interval indicates spacing from one sample
+ * event to the next
+ * @ch_en: channels in a port that need to be enabled
+ * @num_ch: number of channels enabled in a port
+ * @ch_rate: sampling rate of the channel with which data will be
+ * transferred
+ *
+ * Soundwire frame shape is created based on swr_port_info struct
+ * parameters.
+ */
+struct swr_port_info {
+	u8 dev_id;
+	u8 port_en;
+	u8 port_id;
+	u8 offset1;
+	u8 offset2;
+	u8 sinterval;
+	u8 ch_en;
+	u8 num_ch;
+	u32 ch_rate;
+};
+
+/*
+ * struct swr_params - represent transfer of data from soundwire slave
+ * to soundwire master
+ * @tid: transaction ID to track each transaction
+ * @dev_id: logical device number of the soundwire slave device
+ * @num_port: number of ports that needs to be configured
+ * @port_id: array of logical port numbers of the soundwire slave device
+ * @num_ch: array of number of channels enabled
+ * @ch_rate: array of sampling rate of different channels that need to
+ * be configured
+ * @ch_en: array of channels mask for all the ports
+ */
+struct swr_params {
+	u8 tid;
+	u8 dev_id;
+	u8 num_port;
+	u8 port_id[SWR_MAX_DEV_PORT_NUM];
+	u8 num_ch[SWR_MAX_DEV_PORT_NUM];
+	u32 ch_rate[SWR_MAX_DEV_PORT_NUM];
+	u8 ch_en[SWR_MAX_DEV_PORT_NUM];
+};
+
+/*
+ * struct swr_reg - struct to handle soundwire slave register read/writes
+ * @tid: transaction id for reg read/writes
+ * @dev_id: logical device number of the soundwire slave device
+ * @regaddr: 16 bit regaddr of soundwire slave
+ * @buf: value to be written/read to/from regaddr
+ * @len: length of the buffer buf
+ */
+struct swr_reg {
+	u8  tid;
+	u8  dev_id;
+	u32 regaddr;
+	u32 *buf;
+	u32 len;
+};
+
+/*
+ * struct swr_master - Interface to the soundwire master controller
+ * @dev: device interface to this driver
+ * @list: link with other soundwire master controllers
+ * @bus_num: board/SoC specific identifier for a soundwire master
+ * @mlock: mutex protecting master data structures
+ * @devices: list of devices on this master
+ * @port: logical port numbers of the soundwire master. This array
+ * can hold maximum master ports which is equal to number of slave
+ * devices multiplied by number of ports in each slave device
+ * @port_txn: table of port config transactions with transaction id
+ * @reg_txn: table of register transactions with transaction id
+ * @last_tid: size of table port_txn (can't grow beyond 256 since
+ * tid is 8 bits)
+ * @num_port: number of active ports on soundwire master
+ * @gr_sid: slave id used by the group for write operations
+ * @connect_port: callback for configuration of soundwire port(s)
+ * @disconnect_port: callback for disable of soundwire port(s)
+ * @read: callback for soundwire slave register read
+ * @write: callback for soundwire slave register write
+ * @get_logical_dev_num: callback to get soundwire slave logical
+ * device number
+ */
+struct swr_master {
+	struct device dev;
+	struct list_head list;
+	unsigned int bus_num;
+	struct mutex mlock;
+	struct list_head devices;
+	struct swr_port_info port[SWR_MAX_MSTR_PORT_NUM];
+	struct swr_params **port_txn;
+	struct swr_reg **reg_txn;
+	u8 last_tid;
+	u8 num_port;
+	u8 num_dev;
+	u8 gr_sid;
+	int (*connect_port)(struct swr_master *mstr, struct swr_params *txn);
+	int (*disconnect_port)(struct swr_master *mstr, struct swr_params *txn);
+	int (*read)(struct swr_master *mstr, u8 dev_num, u16 reg_addr,
+			void *buf, u32 len);
+	int (*write)(struct swr_master *mstr, u8 dev_num, u16 reg_addr,
+			const void *buf);
+	int (*bulk_write)(struct swr_master *master, u8 dev_num, void *reg,
+			  const void *buf, size_t len);
+	int (*get_logical_dev_num)(struct swr_master *mstr, u64 dev_id,
+				u8 *dev_num);
+	void (*slvdev_datapath_control)(struct swr_master *mstr, bool enable);
+	bool (*remove_from_group)(struct swr_master *mstr);
+};
+
+static inline struct swr_master *to_swr_master(struct device *dev)
+{
+	return dev ? container_of(dev, struct swr_master, dev) : NULL;
+}
+
+/*
+ * struct swr_device - represent a soundwire slave device
+ * @name: indicates the name of the device, defined in devicetree
+ * binding under soundwire slave device node as a compatible field.
+ * @master: soundwire master managing the bus hosting this device
+ * @driver: Device's driver. Pointer to access routines
+ * @dev_list: list of devices on a controller
+ * @dev_num: logical device number of the soundwire slave device
+ * @dev: driver model representation of the device
+ * @addr: represents "ea-addr" which is unique-id of soundwire slave
+ * device
+ * @group_id: group id supported by the slave device
+ */
+struct swr_device {
+	char name[SOUNDWIRE_NAME_SIZE];
+	struct swr_master *master;
+	struct swr_driver *driver;
+	struct list_head dev_list;
+	u8               dev_num;
+	struct device    dev;
+	unsigned long    addr;
+	u8 group_id;
+};
+
+static inline struct swr_device *to_swr_device(struct device *dev)
+{
+	return dev ? container_of(dev, struct swr_device, dev) : NULL;
+}
+
+/*
+ * struct swr_driver - Manage soundwire slave device driver
+ * @probe: binds this driver to soundwire device
+ * @remove: unbinds this driver from soundwire device
+ * @shutdown: standard shutdown callback used during power down/halt
+ * @suspend: standard suspend callback used during system suspend
+ * @resume: standard resume callback used during system resume
+ * @driver: soundwire device drivers should initialize name and
+ * owner field of this structure
+ * @id_table: list of soundwire devices supported by this driver
+ */
+struct swr_driver {
+	int	(*probe)(struct swr_device *swr);
+	int	(*remove)(struct swr_device *swr);
+	void	(*shutdown)(struct swr_device *swr);
+	int	(*suspend)(struct swr_device *swr, pm_message_t pmesg);
+	int	(*resume)(struct swr_device *swr);
+	int	(*device_up)(struct swr_device *swr);
+	int	(*device_down)(struct swr_device *swr);
+	int	(*reset_device)(struct swr_device *swr);
+	struct device_driver		driver;
+	const struct swr_device_id	*id_table;
+};
+
+static inline struct swr_driver *to_swr_driver(struct device_driver *drv)
+{
+	return drv ? container_of(drv, struct swr_driver, driver) : NULL;
+}
+
+/*
+ * struct swr_boardinfo - Declare board info for soundwire device bringup
+ * @name: name to initialize swr_device.name
+ * @bus_num: identifies which soundwire master parents the soundwire
+ * slave_device
+ * @addr: represents "ea-addr" of soundwire slave device
+ * @of_node: pointer to OpenFirmware device node
+ * @swr_slave: device to be registered with soundwire
+ */
+struct swr_boardinfo {
+	char               name[SOUNDWIRE_NAME_SIZE];
+	int                bus_num;
+	u64		   addr;
+	struct device_node *of_node;
+	struct swr_device  *swr_slave;
+};
+
+static inline void *swr_get_ctrl_data(const struct swr_master *master)
+{
+	return master ? dev_get_drvdata(&master->dev) : NULL;
+}
+
+static inline void swr_set_ctrl_data(struct swr_master *master, void *data)
+{
+	dev_set_drvdata(&master->dev, data);
+}
+
+static inline void *swr_get_dev_data(const struct swr_device *dev)
+{
+	return dev ? dev_get_drvdata(&dev->dev) : NULL;
+}
+
+static inline void swr_set_dev_data(struct swr_device *dev, void *data)
+{
+	dev_set_drvdata(&dev->dev, data);
+}
+
+extern int swr_startup_devices(struct swr_device *swr_dev);
+
+extern struct swr_device *swr_new_device(struct swr_master *master,
+				struct swr_boardinfo const *info);
+
+extern int of_register_swr_devices(struct swr_master *master);
+
+extern void swr_port_response(struct swr_master *mstr, u8 tid);
+
+extern int swr_get_logical_dev_num(struct swr_device *dev, u64 dev_id,
+			u8 *dev_num);
+
+extern int swr_read(struct swr_device *dev, u8 dev_num, u16 reg_addr,
+			void *buf, u32 len);
+
+extern int swr_write(struct swr_device *dev, u8 dev_num, u16 reg_addr,
+			const void *buf);
+
+extern int swr_bulk_write(struct swr_device *dev, u8 dev_num, void *reg_addr,
+			  const void *buf, size_t len);
+
+extern int swr_connect_port(struct swr_device *dev, u8 *port_id, u8 num_port,
+				u8 *ch_mask, u32 *ch_rate, u8 *num_ch);
+
+extern int swr_disconnect_port(struct swr_device *dev,
+				u8 *port_id, u8 num_port);
+
+extern int swr_set_device_group(struct swr_device *swr_dev, u8 id);
+
+extern int swr_driver_register(struct swr_driver *drv);
+
+extern void swr_driver_unregister(struct swr_driver *drv);
+
+extern int swr_add_device(struct swr_master *master,
+				struct swr_device *swrdev);
+extern void swr_remove_device(struct swr_device *swr);
+
+extern void swr_master_add_boarddevices(struct swr_master *master);
+
+extern void swr_unregister_master(struct swr_master *master);
+
+extern int swr_register_master(struct swr_master *master);
+
+extern int swr_device_up(struct swr_device *swr_dev);
+
+extern int swr_device_down(struct swr_device *swr_dev);
+
+extern int swr_reset_device(struct swr_device *swr_dev);
+
+extern int swr_slvdev_datapath_control(struct swr_device *swr_dev, u8 dev_num,
+				       bool enable);
+extern int swr_remove_from_group(struct swr_device *dev, u8 dev_num);
+
+extern void swr_remove_device(struct swr_device *swr_dev);
+#endif /* _LINUX_SOUNDWIRE_H */
diff -Nruw linux-4.4.115-fbx/include/linux/soundwire./swr-wcd.h linux-4.4.115-fbx/include/linux/soundwire/swr-wcd.h
--- linux-4.4.115-fbx/include/linux/soundwire./swr-wcd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/linux/soundwire/swr-wcd.h	2019-01-22 16:16:28.391290670 +0100
@@ -0,0 +1,35 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_SWR_WCD_H
+#define _LINUX_SWR_WCD_H
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/bitops.h>
+
+enum {
+	SWR_CH_MAP,
+	SWR_DEVICE_DOWN,
+	SWR_DEVICE_UP,
+	SWR_SUBSYS_RESTART,
+	SWR_SET_NUM_RX_CH,
+};
+
+struct swr_mstr_port {
+	int num_port;
+	u8 *port;
+};
+
+extern int swrm_wcd_notify(struct platform_device *pdev, u32 id, void *data);
+
+#endif /* _LINUX_SWR_WCD_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/spi/qcom-spi.h	2019-01-22 16:16:28.395290706 +0100
@@ -0,0 +1,58 @@
+/* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * SPI driver for Qualcomm MSM platforms.
+ */
+
+/**
+ * msm_spi_platform_data: msm spi-controller's configuration data
+ *
+ * @max_clock_speed max spi clock speed
+ * @active_only when set, votes when system active and removes the vote when
+ *       system goes idle (optimises for performance). When unset, voting using
+ *       runtime pm (optimizes for power).
+ * @master_id master id number of the controller's wrapper (BLSP or GSBI).
+ *       When zero, clock path voting is disabled.
+ * @gpio_config pointer to function for configuring gpio
+ * @gpio_release pointer to function for releasing gpio pins
+ * @dma_config function poniter for configuring dma engine
+ * @pm_lat power management latency
+ * @infinite_mode use FIFO mode in infinite mode
+ * @ver_reg_exists if the version register exists
+ * @use_beam true if BAM is available
+ * @bam_consumer_pipe_index BAM conusmer pipe
+ * @bam_producer_pipe_index BAM producer pipe
+ * @rt_priority true if RT thread
+ * @use_pinctrl true if pinctrl library is used
+ * @is_shared true when qup is shared between ee's
+ */
+struct msm_spi_platform_data {
+	u32 max_clock_speed;
+	u32  master_id;
+	u32 bus_width;
+	int (*gpio_config)(void);
+	void (*gpio_release)(void);
+	int (*dma_config)(void);
+	const char *rsl_id;
+	u32  pm_lat;
+	u32  infinite_mode;
+	bool ver_reg_exists;
+	bool use_bam;
+	bool slv_test;
+	u32  bam_consumer_pipe_index;
+	u32  bam_producer_pipe_index;
+	bool rt_priority;
+	bool use_pinctrl;
+	bool is_shared;
+	bool is_slv_ctrl;
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/stackdepot.h	2019-01-22 16:16:28.395290706 +0100
@@ -0,0 +1,32 @@
+/*
+ * A generic stack depot implementation
+ *
+ * Author: Alexander Potapenko <glider@google.com>
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * Based on code by Dmitry Chernenkov.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_STACKDEPOT_H
+#define _LINUX_STACKDEPOT_H
+
+typedef u32 depot_stack_handle_t;
+
+struct stack_trace;
+
+depot_stack_handle_t depot_save_stack(struct stack_trace *trace, gfp_t flags);
+
+void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace);
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/switch.h	2019-01-22 16:16:28.403290779 +0100
@@ -0,0 +1,53 @@
+/*
+ *  Switch class driver
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef __LINUX_SWITCH_H__
+#define __LINUX_SWITCH_H__
+
+struct switch_dev {
+	const char	*name;
+	struct device	*dev;
+	int		index;
+	int		state;
+
+	ssize_t	(*print_name)(struct switch_dev *sdev, char *buf);
+	ssize_t	(*print_state)(struct switch_dev *sdev, char *buf);
+};
+
+struct gpio_switch_platform_data {
+	const char *name;
+	unsigned 	gpio;
+
+	/* if NULL, switch_dev.name will be printed */
+	const char *name_on;
+	const char *name_off;
+	/* if NULL, "0" or "1" will be printed */
+	const char *state_on;
+	const char *state_off;
+};
+
+extern int switch_dev_register(struct switch_dev *sdev);
+extern void switch_dev_unregister(struct switch_dev *sdev);
+
+static inline int switch_get_state(struct switch_dev *sdev)
+{
+	return sdev->state;
+}
+
+extern void switch_set_state(struct switch_dev *sdev, int state);
+
+#endif /* __LINUX_SWITCH_H__ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/usb/audio-v3.h	2019-01-22 16:16:28.411290851 +0100
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file holds USB constants and structures defined
+ * by the USB Device Class Definition for Audio Devices in version 3.0.
+ * Comments below reference relevant sections of the documents contained
+ * in http://www.usb.org/developers/docs/devclass_docs/USB_Audio_v3.0.zip
+ */
+
+#ifndef __LINUX_USB_AUDIO_V3_H
+#define __LINUX_USB_AUDIO_V3_H
+
+#include <linux/types.h>
+
+#define UAC3_MIXER_UNIT_V3	0x05
+#define UAC3_FEATURE_UNIT_V3	0x07
+#define UAC3_CLOCK_SOURCE	0x0b
+
+#define BADD_MAXPSIZE_SYNC_MONO_16	0x0060
+#define BADD_MAXPSIZE_SYNC_MONO_24	0x0090
+#define BADD_MAXPSIZE_SYNC_STEREO_16	0x00c0
+#define BADD_MAXPSIZE_SYNC_STEREO_24	0x0120
+
+#define BADD_MAXPSIZE_ASYNC_MONO_16	0x0062
+#define BADD_MAXPSIZE_ASYNC_MONO_24	0x0093
+#define BADD_MAXPSIZE_ASYNC_STEREO_16	0x00c4
+#define BADD_MAXPSIZE_ASYNC_STEREO_24	0x0126
+
+#define BIT_RES_16_BIT		0x10
+#define BIT_RES_24_BIT		0x18
+
+#define SUBSLOTSIZE_16_BIT	0x02
+#define SUBSLOTSIZE_24_BIT	0x03
+
+#define BADD_SAMPLING_RATE	48000
+
+#define NUM_CHANNELS_MONO	1
+#define NUM_CHANNELS_STEREO	2
+#define BADD_CH_CONFIG_MONO	0
+#define BADD_CH_CONFIG_STEREO	3
+#define CLUSTER_ID_MONO		0x0001
+#define CLUSTER_ID_STEREO	0x0002
+
+#define FULL_ADC_PROFILE	0x01
+
+/* BADD Profile IDs */
+#define PROF_GENERIC_IO		0x20
+#define PROF_HEADPHONE		0x21
+#define PROF_SPEAKER		0x22
+#define PROF_MICROPHONE		0x23
+#define PROF_HEADSET		0x24
+#define PROF_HEADSET_ADAPTER	0x25
+#define PROF_SPEAKERPHONE	0x26
+
+/* BADD Entity IDs */
+#define BADD_OUT_TERM_ID_BAOF	0x03
+#define BADD_OUT_TERM_ID_BAIF	0x06
+#define BADD_IN_TERM_ID_BAOF	0x01
+#define BADD_IN_TERM_ID_BAIF	0x04
+#define BADD_FU_ID_BAOF		0x02
+#define BADD_FU_ID_BAIF		0x05
+#define BADD_CLOCK_SOURCE	0x09
+#define BADD_FU_ID_BAIOF	0x07
+#define BADD_MU_ID_BAIOF	0x08
+
+#define UAC_BIDIR_TERMINAL_HEADSET	0x0402
+#define UAC_BIDIR_TERMINAL_SPEAKERPHONE	0x0403
+
+#define NUM_BADD_DESCS		7
+
+struct uac3_input_terminal_descriptor {
+	__u8 bLength;
+	__u8 bDescriptorType;
+	__u8 bDescriptorSubtype;
+	__u8 bTerminalID;
+	__u16 wTerminalType;
+	__u8 bAssocTerminal;
+	__u8 bCSourceID;
+	__u32 bmControls;
+	__u16 wClusterDescrID;
+	__u16 wExTerminalDescrID;
+	__u16 wConnectorsDescrID;
+	__u16 wTerminalDescrStr;
+} __packed;
+
+#define UAC3_DT_INPUT_TERMINAL_SIZE	0x14
+
+extern struct uac3_input_terminal_descriptor badd_baif_in_term_desc;
+extern struct uac3_input_terminal_descriptor badd_baof_in_term_desc;
+
+struct uac3_output_terminal_descriptor {
+	__u8 bLength;
+	__u8 bDescriptorType;
+	__u8 bDescriptorSubtype;
+	__u8 bTerminalID;
+	__u16 wTerminalType;
+	__u8 bAssocTerminal;
+	__u8 bSourceID;
+	__u8 bCSourceID;
+	__u32 bmControls;
+	__u16 wExTerminalDescrID;
+	__u16 wConnectorsDescrID;
+	__u16 wTerminalDescrStr;
+} __packed;
+
+#define UAC3_DT_OUTPUT_TERMINAL_SIZE	0x13
+
+extern struct uac3_output_terminal_descriptor badd_baif_out_term_desc;
+extern struct uac3_output_terminal_descriptor badd_baof_out_term_desc;
+
+extern __u8 monoControls[];
+extern __u8 stereoControls[];
+extern __u8 badd_mu_src_ids[];
+
+struct uac3_mixer_unit_descriptor {
+	__u8 bLength;
+	__u8 bDescriptorType;
+	__u8 bDescriptorSubtype;
+	__u8 bUnitID;
+	__u8 bNrInPins;
+	__u8 *baSourceID;
+	__u16 wClusterDescrID;
+	__u8 bmMixerControls;
+	__u32 bmControls;
+	__u16 wMixerDescrStr;
+} __packed;
+
+#define UAC3_DT_MIXER_UNIT_SIZE		0x10
+
+extern struct uac3_mixer_unit_descriptor badd_baiof_mu_desc;
+
+struct uac3_feature_unit_descriptor {
+	__u8 bLength;
+	__u8 bDescriptorType;
+	__u8 bDescriptorSubtype;
+	__u8 bUnitID;
+	__u8 bSourceID;
+	__u8 *bmaControls;
+	__u16 wFeatureDescrStr;
+} __packed;
+
+extern struct uac3_feature_unit_descriptor badd_baif_fu_desc;
+extern struct uac3_feature_unit_descriptor badd_baof_fu_desc;
+extern struct uac3_feature_unit_descriptor badd_baiof_fu_desc;
+
+struct uac3_clock_source_descriptor {
+	__u8 bLength;
+	__u8 bDescriptorType;
+	__u8 bDescriptorSubtype;
+	__u8 bClockID;
+	__u8 bmAttributes;
+	__u32 bmControls;
+	__u8 bReferenceTerminal;
+	__u16 wClockSourceStr;
+} __packed;
+
+#define UAC3_DT_CLOCK_SRC_SIZE		0x0c
+
+extern struct uac3_clock_source_descriptor badd_clock_desc;
+
+extern void *badd_desc_list[];
+
+#endif /* __LINUX_USB_AUDIO_V3_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/usb/usbpd.h	2019-01-22 16:16:28.419290924 +0100
@@ -0,0 +1,159 @@
+/* Copyright (c) 2016, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_USB_USBPD_H
+#define __LINUX_USB_USBPD_H
+
+#include <linux/list.h>
+
+struct usbpd;
+
+/* Standard IDs */
+#define USBPD_SID			0xff00
+
+/* Structured VDM Command Type */
+enum usbpd_svdm_cmd_type {
+	SVDM_CMD_TYPE_INITIATOR,
+	SVDM_CMD_TYPE_RESP_ACK,
+	SVDM_CMD_TYPE_RESP_NAK,
+	SVDM_CMD_TYPE_RESP_BUSY,
+};
+
+/* Structured VDM Commands */
+#define USBPD_SVDM_DISCOVER_IDENTITY	0x1
+#define USBPD_SVDM_DISCOVER_SVIDS	0x2
+#define USBPD_SVDM_DISCOVER_MODES	0x3
+#define USBPD_SVDM_ENTER_MODE		0x4
+#define USBPD_SVDM_EXIT_MODE		0x5
+#define USBPD_SVDM_ATTENTION		0x6
+
+/*
+ * Implemented by client
+ */
+struct usbpd_svid_handler {
+	u16 svid;
+
+	/* Notified when VDM session established/reset; must be implemented */
+	void (*connect)(struct usbpd_svid_handler *hdlr);
+	void (*disconnect)(struct usbpd_svid_handler *hdlr);
+
+	/* Unstructured VDM */
+	void (*vdm_received)(struct usbpd_svid_handler *hdlr, u32 vdm_hdr,
+			const u32 *vdos, int num_vdos);
+
+	/* Structured VDM */
+	void (*svdm_received)(struct usbpd_svid_handler *hdlr, u8 cmd,
+			enum usbpd_svdm_cmd_type cmd_type, const u32 *vdos,
+			int num_vdos);
+
+	/* client should leave these blank; private members used by PD driver */
+	struct list_head entry;
+	bool discovered;
+};
+
+enum plug_orientation {
+	ORIENTATION_NONE,
+	ORIENTATION_CC1,
+	ORIENTATION_CC2,
+};
+
+#if IS_ENABLED(CONFIG_USB_PD_POLICY)
+/*
+ * Obtains an instance of usbpd from a DT phandle
+ */
+struct usbpd *devm_usbpd_get_by_phandle(struct device *dev,
+		const char *phandle);
+
+/*
+ * Called by client to handle specific SVID messages.
+ * Specify callback functions in the usbpd_svid_handler argument
+ */
+int usbpd_register_svid(struct usbpd *pd, struct usbpd_svid_handler *hdlr);
+
+void usbpd_unregister_svid(struct usbpd *pd, struct usbpd_svid_handler *hdlr);
+
+/*
+ * Transmit a VDM message.
+ */
+int usbpd_send_vdm(struct usbpd *pd, u32 vdm_hdr, const u32 *vdos,
+		int num_vdos);
+
+/*
+ * Transmit a Structured VDM message.
+ */
+int usbpd_send_svdm(struct usbpd *pd, u16 svid, u8 cmd,
+		enum usbpd_svdm_cmd_type cmd_type, int obj_pos,
+		const u32 *vdos, int num_vdos);
+
+/*
+ * Get current status of CC pin orientation.
+ *
+ * Return: ORIENTATION_CC1 or ORIENTATION_CC2 if attached,
+ *         otherwise ORIENTATION_NONE if not attached
+ */
+enum plug_orientation usbpd_get_plug_orientation(struct usbpd *pd);
+#else
+static inline struct usbpd *devm_usbpd_get_by_phandle(struct device *dev,
+		const char *phandle)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline int usbpd_register_svid(struct usbpd *pd,
+		struct usbpd_svid_handler *hdlr)
+{
+	return -EINVAL;
+}
+
+static inline void usbpd_unregister_svid(struct usbpd *pd,
+		struct usbpd_svid_handler *hdlr)
+{
+}
+
+static inline int usbpd_send_vdm(struct usbpd *pd, u32 vdm_hdr, const u32 *vdos,
+		int num_vdos)
+{
+	return -EINVAL;
+}
+
+static inline int usbpd_send_svdm(struct usbpd *pd, u16 svid, u8 cmd,
+		enum usbpd_svdm_cmd_type cmd_type, int obj_pos,
+		const u32 *vdos, int num_vdos)
+{
+	return -EINVAL;
+}
+
+static inline enum plug_orientation usbpd_get_plug_orientation(struct usbpd *pd)
+{
+	return ORIENTATION_NONE;
+}
+#endif /* IS_ENABLED(CONFIG_USB_PD_POLICY) */
+
+/*
+ * Additional helpers for Enter/Exit Mode commands
+ */
+
+static inline int usbpd_enter_mode(struct usbpd *pd, u16 svid, int mode,
+		const u32 *vdo)
+{
+	return usbpd_send_svdm(pd, svid, USBPD_SVDM_ENTER_MODE,
+			SVDM_CMD_TYPE_INITIATOR, mode, vdo, vdo ? 1 : 0);
+}
+
+static inline int usbpd_exit_mode(struct usbpd *pd, u16 svid, int mode,
+		const u32 *vdo)
+{
+	return usbpd_send_svdm(pd, svid, USBPD_SVDM_EXIT_MODE,
+			SVDM_CMD_TYPE_INITIATOR, mode, vdo, vdo ? 1 : 0);
+}
+
+#endif /* __LINUX_USB_USBPD_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/wakelock.h	2019-01-22 16:16:28.423290960 +0100
@@ -0,0 +1,67 @@
+/* include/linux/wakelock.h
+ *
+ * Copyright (C) 2007-2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_WAKELOCK_H
+#define _LINUX_WAKELOCK_H
+
+#include <linux/ktime.h>
+#include <linux/device.h>
+
+/* A wake_lock prevents the system from entering suspend or other low power
+ * states when active. If the type is set to WAKE_LOCK_SUSPEND, the wake_lock
+ * prevents a full system suspend.
+ */
+
+enum {
+	WAKE_LOCK_SUSPEND, /* Prevent suspend */
+	WAKE_LOCK_TYPE_COUNT
+};
+
+struct wake_lock {
+	struct wakeup_source ws;
+};
+
+static inline void wake_lock_init(struct wake_lock *lock, int type,
+				  const char *name)
+{
+	wakeup_source_init(&lock->ws, name);
+}
+
+static inline void wake_lock_destroy(struct wake_lock *lock)
+{
+	wakeup_source_trash(&lock->ws);
+}
+
+static inline void wake_lock(struct wake_lock *lock)
+{
+	__pm_stay_awake(&lock->ws);
+}
+
+static inline void wake_lock_timeout(struct wake_lock *lock, long timeout)
+{
+	__pm_wakeup_event(&lock->ws, jiffies_to_msecs(timeout));
+}
+
+static inline void wake_unlock(struct wake_lock *lock)
+{
+	__pm_relax(&lock->ws);
+}
+
+static inline int wake_lock_active(struct wake_lock *lock)
+{
+	return lock->ws.active;
+}
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/wakeup_reason.h	2019-01-22 16:16:28.423290960 +0100
@@ -0,0 +1,32 @@
+/*
+ * include/linux/wakeup_reason.h
+ *
+ * Logs the reason which caused the kernel to resume
+ * from the suspend mode.
+ *
+ * Copyright (C) 2014 Google, Inc.
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_WAKEUP_REASON_H
+#define _LINUX_WAKEUP_REASON_H
+
+#define MAX_SUSPEND_ABORT_LEN 256
+
+void log_wakeup_reason(int irq);
+int check_wakeup_reason(int irq);
+
+#ifdef CONFIG_SUSPEND
+void log_suspend_abort_reason(const char *fmt, ...);
+#else
+static inline void log_suspend_abort_reason(const char *fmt, ...) { }
+#endif
+
+#endif /* _LINUX_WAKEUP_REASON_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/wcnss_wlan.h	2019-01-22 16:16:28.423290960 +0100
@@ -0,0 +1,166 @@
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _WCNSS_WLAN_H_
+#define _WCNSS_WLAN_H_
+
+#include <linux/device.h>
+#include <linux/sched.h>
+
+#define IRIS_REGULATORS		4
+#define PRONTO_REGULATORS	3
+
+enum wcnss_opcode {
+	WCNSS_WLAN_SWITCH_OFF = 0,
+	WCNSS_WLAN_SWITCH_ON,
+};
+
+enum wcnss_hw_type {
+	WCNSS_RIVA_HW = 0,
+	WCNSS_PRONTO_HW,
+};
+
+struct vregs_level {
+	int nominal_min;
+	int low_power_min;
+	int max_voltage;
+	int uA_load;
+};
+
+struct wcnss_wlan_config {
+	int	use_48mhz_xo;
+	int	is_pronto_vadc;
+	int	is_pronto_v3;
+	void __iomem	*msm_wcnss_base;
+	int	iris_id;
+	int	vbatt;
+	struct vregs_level pronto_vlevel[PRONTO_REGULATORS];
+	struct vregs_level iris_vlevel[IRIS_REGULATORS];
+};
+
+enum {
+	WCNSS_XO_48MHZ = 1,
+	WCNSS_XO_19MHZ,
+	WCNSS_XO_INVALID,
+};
+
+enum {
+	WCNSS_WLAN_DATA2,
+	WCNSS_WLAN_DATA1,
+	WCNSS_WLAN_DATA0,
+	WCNSS_WLAN_SET,
+	WCNSS_WLAN_CLK,
+	WCNSS_WLAN_MAX_GPIO,
+};
+
+#define WCNSS_VBATT_THRESHOLD           3500000
+#define WCNSS_VBATT_GUARD               20000
+#define WCNSS_VBATT_HIGH                3700000
+#define WCNSS_VBATT_LOW                 3300000
+#define WCNSS_VBATT_INITIAL             3000000
+#define WCNSS_WLAN_IRQ_INVALID -1
+#define HAVE_WCNSS_SUSPEND_RESUME_NOTIFY 1
+#define HAVE_WCNSS_RESET_INTR 1
+#define HAVE_WCNSS_CAL_DOWNLOAD 1
+#define HAVE_CBC_DONE 1
+#define HAVE_WCNSS_RX_BUFF_COUNT 1
+#define HAVE_WCNSS_SNOC_HIGH_FREQ_VOTING 1
+#define HAVE_WCNSS_5G_DISABLE 1
+#define WLAN_MAC_ADDR_SIZE (6)
+#define WLAN_RF_REG_ADDR_START_OFFSET	0x3
+#define WLAN_RF_REG_DATA_START_OFFSET	0xf
+#define WLAN_RF_READ_REG_CMD		0x3
+#define WLAN_RF_WRITE_REG_CMD		0x2
+#define WLAN_RF_READ_CMD_MASK		0x3fff
+#define WLAN_RF_CLK_WAIT_CYCLE		2
+#define WLAN_RF_PREPARE_CMD_DATA	5
+#define WLAN_RF_READ_DATA		6
+#define WLAN_RF_DATA_LEN		3
+#define WLAN_RF_DATA0_SHIFT		0
+#define WLAN_RF_DATA1_SHIFT		1
+#define WLAN_RF_DATA2_SHIFT		2
+#define PRONTO_PMU_OFFSET       0x1004
+#define WCNSS_PMU_CFG_GC_BUS_MUX_SEL_TOP   BIT(5)
+
+struct device *wcnss_wlan_get_device(void);
+void wcnss_get_monotonic_boottime(struct timespec *ts);
+struct resource *wcnss_wlan_get_memory_map(struct device *dev);
+int wcnss_wlan_get_dxe_tx_irq(struct device *dev);
+int wcnss_wlan_get_dxe_rx_irq(struct device *dev);
+void wcnss_wlan_register_pm_ops(struct device *dev,
+				const struct dev_pm_ops *pm_ops);
+void wcnss_wlan_unregister_pm_ops(struct device *dev,
+				const struct dev_pm_ops *pm_ops);
+void wcnss_register_thermal_mitigation(struct device *dev,
+				void (*tm_notify)(struct device *dev, int));
+void wcnss_unregister_thermal_mitigation(
+				void (*tm_notify)(struct device *dev, int));
+struct platform_device *wcnss_get_platform_device(void);
+struct wcnss_wlan_config *wcnss_get_wlan_config(void);
+void wcnss_set_iris_xo_mode(int iris_xo_mode_set);
+int wcnss_wlan_power(struct device *dev,
+				struct wcnss_wlan_config *cfg,
+				enum wcnss_opcode opcode,
+				int *iris_xo_mode_set);
+int wcnss_req_power_on_lock(char *driver_name);
+int wcnss_free_power_on_lock(char *driver_name);
+unsigned int wcnss_get_serial_number(void);
+int wcnss_get_wlan_mac_address(char mac_addr[WLAN_MAC_ADDR_SIZE]);
+void wcnss_allow_suspend(void);
+void wcnss_prevent_suspend(void);
+int wcnss_hardware_type(void);
+void *wcnss_prealloc_get(size_t size);
+int wcnss_prealloc_put(void *ptr);
+void wcnss_reset_fiq(bool clk_chk_en);
+void wcnss_suspend_notify(void);
+void wcnss_resume_notify(void);
+void wcnss_riva_log_debug_regs(void);
+void wcnss_pronto_log_debug_regs(void);
+int wcnss_is_hw_pronto_ver3(void);
+int wcnss_device_ready(void);
+bool wcnss_cbc_complete(void);
+int wcnss_device_is_shutdown(void);
+void wcnss_riva_dump_pmic_regs(void);
+int wcnss_xo_auto_detect_enabled(void);
+u32 wcnss_get_wlan_rx_buff_count(void);
+int wcnss_wlan_iris_xo_mode(void);
+int wcnss_wlan_dual_band_disabled(void);
+void wcnss_flush_work(struct work_struct *work);
+void wcnss_flush_delayed_work(struct delayed_work *dwork);
+void wcnss_init_work(struct work_struct *work , void *callbackptr);
+void wcnss_init_delayed_work(struct delayed_work *dwork , void *callbackptr);
+int wcnss_get_iris_name(char *iris_version);
+void wcnss_dump_stack(struct task_struct *task);
+void wcnss_snoc_vote(bool clk_chk_en);
+int wcnss_parse_voltage_regulator(struct wcnss_wlan_config *wlan_config,
+				  struct device *dev);
+
+#ifdef CONFIG_WCNSS_REGISTER_DUMP_ON_BITE
+void wcnss_log_debug_regs_on_bite(void);
+#else
+static inline void wcnss_log_debug_regs_on_bite(void)
+{
+}
+#endif
+int wcnss_set_wlan_unsafe_channel(
+				u16 *unsafe_ch_list, u16 ch_count);
+int wcnss_get_wlan_unsafe_channel(
+				u16 *unsafe_ch_list, u16 buffer_size,
+				u16 *ch_count);
+#define wcnss_wlan_get_drvdata(dev) dev_get_drvdata(dev)
+#define wcnss_wlan_set_drvdata(dev, data) dev_set_drvdata((dev), (data))
+/* WLAN driver uses these names */
+#define req_riva_power_on_lock(name) wcnss_req_power_on_lock(name)
+#define free_riva_power_on_lock(name) wcnss_free_power_on_lock(name)
+
+#endif /* _WCNSS_WLAN_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/linux/zcache.h	2019-01-22 16:16:28.427290996 +0100
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _LINUX_ZCACHE_H
+#define _LINUX_ZCACHE_H
+
+#ifdef CONFIG_ZCACHE
+extern u64 zcache_pages(void);
+#else
+u64 zcache_pages(void) { return 0; }
+#endif
+
+#endif /* _LINUX_ZCACHE_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/media/cec.h	2019-10-29 09:26:25.497221322 +0100
@@ -0,0 +1,417 @@
+/*
+ * cec - HDMI Consumer Electronics Control support header
+ *
+ * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _MEDIA_CEC_H
+#define _MEDIA_CEC_H
+
+#include <linux/poll.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/kthread.h>
+#include <linux/timer.h>
+#include <linux/cec-funcs.h>
+#include <media/rc-core.h>
+#include <media/cec-notifier.h>
+
+/**
+ * struct cec_devnode - cec device node
+ * @dev:	cec device
+ * @cdev:	cec character device
+ * @minor:	device node minor number
+ * @registered:	the device was correctly registered
+ * @unregistered: the device was unregistered
+ * @fhs_lock:	lock to control access to the filehandle list
+ * @fhs:	the list of open filehandles (cec_fh)
+ *
+ * This structure represents a cec-related device node.
+ *
+ * The @parent is a physical device. It must be set by core or device drivers
+ * before registering the node.
+ */
+struct cec_devnode {
+	/* sysfs */
+	struct device dev;
+	struct cdev cdev;
+
+	/* device info */
+	int minor;
+	bool registered;
+	bool unregistered;
+	struct list_head fhs;
+	struct mutex lock;
+};
+
+struct cec_adapter;
+struct cec_data;
+
+struct cec_data {
+	struct list_head list;
+	struct list_head xfer_list;
+	struct cec_adapter *adap;
+	struct cec_msg msg;
+	struct cec_fh *fh;
+	struct delayed_work work;
+	struct completion c;
+	u8 attempts;
+	bool blocking;
+	bool completed;
+};
+
+struct cec_msg_entry {
+	struct list_head	list;
+	struct cec_msg		msg;
+};
+
+#define CEC_NUM_EVENTS		CEC_EVENT_LOST_MSGS
+
+struct cec_fh {
+	struct list_head	list;
+	struct list_head	xfer_list;
+	struct cec_adapter	*adap;
+	u8			mode_initiator;
+	u8			mode_follower;
+
+	/* Events */
+	wait_queue_head_t	wait;
+	unsigned int		pending_events;
+	struct cec_event	events[CEC_NUM_EVENTS];
+	struct mutex		lock;
+	struct list_head	msgs; /* queued messages */
+	unsigned int		queued_msgs;
+};
+
+#define CEC_SIGNAL_FREE_TIME_RETRY		3
+#define CEC_SIGNAL_FREE_TIME_NEW_INITIATOR	5
+#define CEC_SIGNAL_FREE_TIME_NEXT_XFER		7
+
+/* The nominal data bit period is 2.4 ms */
+#define CEC_FREE_TIME_TO_USEC(ft)		((ft) * 2400)
+
+struct cec_adap_ops {
+	/* Low-level callbacks */
+	int (*adap_enable)(struct cec_adapter *adap, bool enable);
+	int (*adap_monitor_all_enable)(struct cec_adapter *adap, bool enable);
+	int (*adap_log_addr)(struct cec_adapter *adap, u8 logical_addr);
+	int (*adap_transmit)(struct cec_adapter *adap, u8 attempts,
+			     u32 signal_free_time, struct cec_msg *msg);
+	void (*adap_status)(struct cec_adapter *adap, struct seq_file *file);
+	void (*adap_free)(struct cec_adapter *adap);
+
+	/* High-level CEC message callback */
+	int (*received)(struct cec_adapter *adap, struct cec_msg *msg);
+};
+
+/*
+ * The minimum message length you can receive (excepting poll messages) is 2.
+ * With a transfer rate of at most 36 bytes per second this makes 18 messages
+ * per second worst case.
+ *
+ * We queue at most 3 seconds worth of received messages. The CEC specification
+ * requires that messages are replied to within a second, so 3 seconds should
+ * give more than enough margin. Since most messages are actually more than 2
+ * bytes, this is in practice a lot more than 3 seconds.
+ */
+#define CEC_MAX_MSG_RX_QUEUE_SZ		(18 * 3)
+
+/*
+ * The transmit queue is limited to 1 second worth of messages (worst case).
+ * Messages can be transmitted by userspace and kernel space. But for both it
+ * makes no sense to have a lot of messages queued up. One second seems
+ * reasonable.
+ */
+#define CEC_MAX_MSG_TX_QUEUE_SZ		(18 * 1)
+
+struct cec_adapter {
+	struct module *owner;
+	char name[32];
+	struct cec_devnode devnode;
+	struct mutex lock;
+	struct rc_dev *rc;
+
+	struct list_head transmit_queue;
+	unsigned int transmit_queue_sz;
+	struct list_head wait_queue;
+	struct cec_data *transmitting;
+
+	struct task_struct *kthread_config;
+	struct completion config_completion;
+
+	struct task_struct *kthread;
+	wait_queue_head_t kthread_waitq;
+	wait_queue_head_t waitq;
+
+	const struct cec_adap_ops *ops;
+	void *priv;
+	u32 capabilities;
+	u8 available_log_addrs;
+
+	u16 phys_addr;
+	bool needs_hpd;
+	bool is_configuring;
+	bool is_configured;
+	u8 last_initiator;
+	u32 monitor_all_cnt;
+	u32 follower_cnt;
+	struct cec_fh *cec_follower;
+	struct cec_fh *cec_initiator;
+	bool passthrough;
+	struct cec_log_addrs log_addrs;
+
+	u32 tx_timeouts;
+
+#ifdef CONFIG_MEDIA_CEC_RC
+	bool rc_repeating;
+	int rc_last_scancode;
+	u64 rc_last_keypress;
+#endif
+#ifdef CONFIG_CEC_NOTIFIER
+	struct cec_notifier *notifier;
+#endif
+
+	struct dentry *cec_dir;
+	struct dentry *status_file;
+
+	u16 phys_addrs[15];
+	u32 sequence;
+
+	char input_name[32];
+	char input_phys[32];
+	char input_drv[32];
+};
+
+static inline bool cec_has_log_addr(const struct cec_adapter *adap, u8 log_addr)
+{
+	return adap->log_addrs.log_addr_mask & (1 << log_addr);
+}
+
+static inline bool cec_is_sink(const struct cec_adapter *adap)
+{
+	return adap->phys_addr == 0;
+}
+
+/**
+ * cec_is_registered() - is the CEC adapter registered?
+ *
+ * @adap:	the CEC adapter, may be NULL.
+ *
+ * Return: true if the adapter is registered, false otherwise.
+ */
+static inline bool cec_is_registered(const struct cec_adapter *adap)
+{
+	return adap && adap->devnode.registered;
+}
+
+#define cec_phys_addr_exp(pa) \
+	((pa) >> 12), ((pa) >> 8) & 0xf, ((pa) >> 4) & 0xf, (pa) & 0xf
+
+struct edid;
+
+#if IS_REACHABLE(CONFIG_CEC_CORE)
+struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
+		void *priv, const char *name, u32 caps, u8 available_las);
+int cec_register_adapter(struct cec_adapter *adap, struct device *parent);
+void cec_unregister_adapter(struct cec_adapter *adap);
+void cec_delete_adapter(struct cec_adapter *adap);
+
+int cec_s_log_addrs(struct cec_adapter *adap, struct cec_log_addrs *log_addrs,
+		    bool block);
+void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr,
+		     bool block);
+void cec_s_phys_addr_from_edid(struct cec_adapter *adap,
+			       const struct edid *edid);
+int cec_transmit_msg(struct cec_adapter *adap, struct cec_msg *msg,
+		     bool block);
+
+/* Called by the adapter */
+void cec_transmit_done_ts(struct cec_adapter *adap, u8 status,
+			  u8 arb_lost_cnt, u8 nack_cnt, u8 low_drive_cnt,
+			  u8 error_cnt, ktime_t ts);
+
+static inline void cec_transmit_done(struct cec_adapter *adap, u8 status,
+				     u8 arb_lost_cnt, u8 nack_cnt,
+				     u8 low_drive_cnt, u8 error_cnt)
+{
+	cec_transmit_done_ts(adap, status, arb_lost_cnt, nack_cnt,
+			     low_drive_cnt, error_cnt, ktime_get());
+}
+/*
+ * Simplified version of cec_transmit_done for hardware that doesn't retry
+ * failed transmits. So this is always just one attempt in which case
+ * the status is sufficient.
+ */
+void cec_transmit_attempt_done_ts(struct cec_adapter *adap,
+				  u8 status, ktime_t ts);
+
+static inline void cec_transmit_attempt_done(struct cec_adapter *adap,
+					     u8 status)
+{
+	cec_transmit_attempt_done_ts(adap, status, ktime_get());
+}
+
+void cec_received_msg_ts(struct cec_adapter *adap,
+			 struct cec_msg *msg, ktime_t ts);
+
+static inline void cec_received_msg(struct cec_adapter *adap,
+				    struct cec_msg *msg)
+{
+	cec_received_msg_ts(adap, msg, ktime_get());
+}
+
+/**
+ * cec_get_edid_phys_addr() - find and return the physical address
+ *
+ * @edid:	pointer to the EDID data
+ * @size:	size in bytes of the EDID data
+ * @offset:	If not %NULL then the location of the physical address
+ *		bytes in the EDID will be returned here. This is set to 0
+ *		if there is no physical address found.
+ *
+ * Return: the physical address or CEC_PHYS_ADDR_INVALID if there is none.
+ */
+u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
+			   unsigned int *offset);
+
+/**
+ * cec_set_edid_phys_addr() - find and set the physical address
+ *
+ * @edid:	pointer to the EDID data
+ * @size:	size in bytes of the EDID data
+ * @phys_addr:	the new physical address
+ *
+ * This function finds the location of the physical address in the EDID
+ * and fills in the given physical address and updates the checksum
+ * at the end of the EDID block. It does nothing if the EDID doesn't
+ * contain a physical address.
+ */
+void cec_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr);
+
+/**
+ * cec_phys_addr_for_input() - calculate the PA for an input
+ *
+ * @phys_addr:	the physical address of the parent
+ * @input:	the number of the input port, must be between 1 and 15
+ *
+ * This function calculates a new physical address based on the input
+ * port number. For example:
+ *
+ * PA = 0.0.0.0 and input = 2 becomes 2.0.0.0
+ *
+ * PA = 3.0.0.0 and input = 1 becomes 3.1.0.0
+ *
+ * PA = 3.2.1.0 and input = 5 becomes 3.2.1.5
+ *
+ * PA = 3.2.1.3 and input = 5 becomes f.f.f.f since it maxed out the depth.
+ *
+ * Return: the new physical address or CEC_PHYS_ADDR_INVALID.
+ */
+u16 cec_phys_addr_for_input(u16 phys_addr, u8 input);
+
+/**
+ * cec_phys_addr_validate() - validate a physical address from an EDID
+ *
+ * @phys_addr:	the physical address to validate
+ * @parent:	if not %NULL, then this is filled with the parents PA.
+ * @port:	if not %NULL, then this is filled with the input port.
+ *
+ * This validates a physical address as read from an EDID. If the
+ * PA is invalid (such as 1.0.1.0 since '0' is only allowed at the end),
+ * then it will return -EINVAL.
+ *
+ * The parent PA is passed into %parent and the input port is passed into
+ * %port. For example:
+ *
+ * PA = 0.0.0.0: has parent 0.0.0.0 and input port 0.
+ *
+ * PA = 1.0.0.0: has parent 0.0.0.0 and input port 1.
+ *
+ * PA = 3.2.0.0: has parent 3.0.0.0 and input port 2.
+ *
+ * PA = f.f.f.f: has parent f.f.f.f and input port 0.
+ *
+ * Return: 0 if the PA is valid, -EINVAL if not.
+ */
+int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port);
+
+#else
+
+static inline int cec_register_adapter(struct cec_adapter *adap,
+				       struct device *parent)
+{
+	return 0;
+}
+
+static inline void cec_unregister_adapter(struct cec_adapter *adap)
+{
+}
+
+static inline void cec_delete_adapter(struct cec_adapter *adap)
+{
+}
+
+static inline void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr,
+				   bool block)
+{
+}
+
+static inline void cec_s_phys_addr_from_edid(struct cec_adapter *adap,
+					     const struct edid *edid)
+{
+}
+
+static inline u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
+					 unsigned int *offset)
+{
+	if (offset)
+		*offset = 0;
+	return CEC_PHYS_ADDR_INVALID;
+}
+
+static inline void cec_set_edid_phys_addr(u8 *edid, unsigned int size,
+					  u16 phys_addr)
+{
+}
+
+static inline u16 cec_phys_addr_for_input(u16 phys_addr, u8 input)
+{
+	return CEC_PHYS_ADDR_INVALID;
+}
+
+static inline int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port)
+{
+	return 0;
+}
+
+#endif
+
+/**
+ * cec_phys_addr_invalidate() - set the physical address to INVALID
+ *
+ * @adap:	the CEC adapter
+ *
+ * This is a simple helper function to invalidate the physical
+ * address.
+ */
+static inline void cec_phys_addr_invalidate(struct cec_adapter *adap)
+{
+	cec_s_phys_addr(adap, CEC_PHYS_ADDR_INVALID, false);
+}
+
+#endif /* _MEDIA_CEC_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/media/cec-notifier.h	2019-10-29 09:26:25.497221322 +0100
@@ -0,0 +1,148 @@
+/*
+ * cec-notifier.h - notify CEC drivers of physical address changes
+ *
+ * Copyright 2016 Russell King <rmk+kernel@arm.linux.org.uk>
+ * Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef LINUX_CEC_NOTIFIER_H
+#define LINUX_CEC_NOTIFIER_H
+
+#include <linux/types.h>
+#include <media/cec.h>
+
+struct device;
+struct edid;
+struct cec_adapter;
+struct cec_notifier;
+
+#if IS_REACHABLE(CONFIG_CEC_CORE) && IS_ENABLED(CONFIG_CEC_NOTIFIER)
+
+/**
+ * cec_notifier_get - find or create a new cec_notifier for the given device.
+ * @dev: device that sends the events.
+ *
+ * If a notifier for device @dev already exists, then increase the refcount
+ * and return that notifier.
+ *
+ * If it doesn't exist, then allocate a new notifier struct and return a
+ * pointer to that new struct.
+ *
+ * Return NULL if the memory could not be allocated.
+ */
+struct cec_notifier *cec_notifier_get(struct device *dev);
+
+/**
+ * cec_notifier_put - decrease refcount and delete when the refcount reaches 0.
+ * @n: notifier
+ */
+void cec_notifier_put(struct cec_notifier *n);
+
+/**
+ * cec_notifier_set_phys_addr - set a new physical address.
+ * @n: the CEC notifier
+ * @pa: the CEC physical address
+ *
+ * Set a new CEC physical address.
+ * Does nothing if @n == NULL.
+ */
+void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa);
+
+/**
+ * cec_notifier_set_phys_addr_from_edid - set parse the PA from the EDID.
+ * @n: the CEC notifier
+ * @edid: the struct edid pointer
+ *
+ * Parses the EDID to obtain the new CEC physical address and set it.
+ * Does nothing if @n == NULL.
+ */
+void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n,
+					  const struct edid *edid);
+
+/**
+ * cec_notifier_register - register a callback with the notifier
+ * @n: the CEC notifier
+ * @adap: the CEC adapter, passed as argument to the callback function
+ * @callback: the callback function
+ */
+void cec_notifier_register(struct cec_notifier *n,
+			   struct cec_adapter *adap,
+			   void (*callback)(struct cec_adapter *adap, u16 pa));
+
+/**
+ * cec_notifier_unregister - unregister the callback from the notifier.
+ * @n: the CEC notifier
+ */
+void cec_notifier_unregister(struct cec_notifier *n);
+
+/**
+ * cec_register_cec_notifier - register the notifier with the cec adapter.
+ * @adap: the CEC adapter
+ * @notifier: the CEC notifier
+ */
+void cec_register_cec_notifier(struct cec_adapter *adap,
+			       struct cec_notifier *notifier);
+
+#else
+static inline struct cec_notifier *cec_notifier_get(struct device *dev)
+{
+	/* A non-NULL pointer is expected on success */
+	return (struct cec_notifier *)0xdeadfeed;
+}
+
+static inline void cec_notifier_put(struct cec_notifier *n)
+{
+}
+
+static inline void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa)
+{
+}
+
+static inline void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n,
+							const struct edid *edid)
+{
+}
+
+static inline void cec_notifier_register(struct cec_notifier *n,
+			 struct cec_adapter *adap,
+			 void (*callback)(struct cec_adapter *adap, u16 pa))
+{
+}
+
+static inline void cec_notifier_unregister(struct cec_notifier *n)
+{
+}
+
+static inline void cec_register_cec_notifier(struct cec_adapter *adap,
+					     struct cec_notifier *notifier)
+{
+}
+#endif
+
+/**
+ * cec_notifier_phys_addr_invalidate() - set the physical address to INVALID
+ *
+ * @n: the CEC notifier
+ *
+ * This is a simple helper function to invalidate the physical
+ * address. Does nothing if @n == NULL.
+ */
+static inline void cec_notifier_phys_addr_invalidate(struct cec_notifier *n)
+{
+	cec_notifier_set_phys_addr(n, CEC_PHYS_ADDR_INVALID);
+}
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/media/msm_vidc.h	2019-01-22 16:16:28.435291069 +0100
@@ -0,0 +1,125 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MSM_VIDC_H_
+#define _MSM_VIDC_H_
+
+#include <linux/poll.h>
+#include <linux/videodev2.h>
+#include <linux/types.h>
+#include <linux/msm_ion.h>
+#include <uapi/media/msm_vidc.h>
+
+#define HAL_BUFFER_MAX 0xb
+
+enum smem_type {
+	SMEM_ION,
+};
+
+enum smem_prop {
+	SMEM_CACHED,
+	SMEM_SECURE,
+};
+
+/* NOTE: if you change this enum you MUST update the
+ * "buffer-type-tz-usage-table" for any affected target
+ * in arch/arm/boot/dts/<arch>.dtsi
+ */
+enum hal_buffer {
+	HAL_BUFFER_NONE = 0x0,
+	HAL_BUFFER_INPUT = 0x1,
+	HAL_BUFFER_OUTPUT = 0x2,
+	HAL_BUFFER_OUTPUT2 = 0x4,
+	HAL_BUFFER_EXTRADATA_INPUT = 0x8,
+	HAL_BUFFER_EXTRADATA_OUTPUT = 0x10,
+	HAL_BUFFER_EXTRADATA_OUTPUT2 = 0x20,
+	HAL_BUFFER_INTERNAL_SCRATCH = 0x40,
+	HAL_BUFFER_INTERNAL_SCRATCH_1 = 0x80,
+	HAL_BUFFER_INTERNAL_SCRATCH_2 = 0x100,
+	HAL_BUFFER_INTERNAL_PERSIST = 0x200,
+	HAL_BUFFER_INTERNAL_PERSIST_1 = 0x400,
+	HAL_BUFFER_INTERNAL_CMD_QUEUE = 0x800,
+};
+
+struct dma_mapping_info {
+	struct device *dev;
+	struct dma_iommu_mapping *mapping;
+	struct sg_table *table;
+	struct dma_buf_attachment *attach;
+	struct dma_buf *buf;
+};
+
+struct msm_smem {
+	int mem_type;
+	size_t size;
+	void *kvaddr;
+	ion_phys_addr_t device_addr;
+	unsigned long flags;
+	void *smem_priv;
+	enum hal_buffer buffer_type;
+	struct dma_mapping_info mapping_info;
+	unsigned int offset;
+};
+
+enum smem_cache_ops {
+	SMEM_CACHE_CLEAN,
+	SMEM_CACHE_INVALIDATE,
+	SMEM_CACHE_CLEAN_INVALIDATE,
+};
+
+enum core_id {
+	MSM_VIDC_CORE_VENUS = 0,
+	MSM_VIDC_CORE_Q6,
+	MSM_VIDC_CORES_MAX,
+};
+enum session_type {
+	MSM_VIDC_ENCODER = 0,
+	MSM_VIDC_DECODER,
+	MSM_VIDC_UNKNOWN,
+	MSM_VIDC_MAX_DEVICES = MSM_VIDC_UNKNOWN,
+};
+
+union msm_v4l2_cmd {
+	struct v4l2_decoder_cmd dec;
+	struct v4l2_encoder_cmd enc;
+};
+
+void *msm_vidc_open(int core_id, int session_type);
+int msm_vidc_close(void *instance);
+int msm_vidc_suspend(int core_id);
+int msm_vidc_querycap(void *instance, struct v4l2_capability *cap);
+int msm_vidc_enum_fmt(void *instance, struct v4l2_fmtdesc *f);
+int msm_vidc_s_fmt(void *instance, struct v4l2_format *f);
+int msm_vidc_g_fmt(void *instance, struct v4l2_format *f);
+int msm_vidc_s_ctrl(void *instance, struct v4l2_control *a);
+int msm_vidc_s_ext_ctrl(void *instance, struct v4l2_ext_controls *a);
+int msm_vidc_g_ctrl(void *instance, struct v4l2_control *a);
+int msm_vidc_reqbufs(void *instance, struct v4l2_requestbuffers *b);
+int msm_vidc_prepare_buf(void *instance, struct v4l2_buffer *b);
+int msm_vidc_release_buffers(void *instance, int buffer_type);
+int msm_vidc_qbuf(void *instance, struct v4l2_buffer *b);
+int msm_vidc_dqbuf(void *instance, struct v4l2_buffer *b);
+int msm_vidc_streamon(void *instance, enum v4l2_buf_type i);
+int msm_vidc_query_ctrl(void *instance, struct v4l2_queryctrl *ctrl);
+int msm_vidc_query_ext_ctrl(void *instance, struct v4l2_query_ext_ctrl *ctrl);
+int msm_vidc_streamoff(void *instance, enum v4l2_buf_type i);
+int msm_vidc_comm_cmd(void *instance, union msm_v4l2_cmd *cmd);
+int msm_vidc_poll(void *instance, struct file *filp,
+		struct poll_table_struct *pt);
+int msm_vidc_subscribe_event(void *instance,
+		const struct v4l2_event_subscription *sub);
+int msm_vidc_unsubscribe_event(void *instance,
+		const struct v4l2_event_subscription *sub);
+int msm_vidc_dqevent(void *instance, struct v4l2_event *event);
+int msm_vidc_enum_framesizes(void *instance, struct v4l2_frmsizeenum *fsize);
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/net/cnss.h	2019-10-29 09:26:25.509221439 +0100
@@ -0,0 +1,261 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _NET_CNSS_H_
+#define _NET_CNSS_H_
+
+#include <linux/device.h>
+#include <linux/skbuff.h>
+#include <linux/pci.h>
+#include <linux/mmc/sdio_func.h>
+
+#ifdef CONFIG_CNSS
+#define CNSS_MAX_FILE_NAME	20
+#define PINCTRL_SLEEP  0
+#define PINCTRL_ACTIVE 1
+
+enum cnss_bus_width_type {
+	CNSS_BUS_WIDTH_NONE,
+	CNSS_BUS_WIDTH_LOW,
+	CNSS_BUS_WIDTH_MEDIUM,
+	CNSS_BUS_WIDTH_HIGH
+};
+
+/* FW image files */
+struct cnss_fw_files {
+	char image_file[CNSS_MAX_FILE_NAME];
+	char board_data[CNSS_MAX_FILE_NAME];
+	char otp_data[CNSS_MAX_FILE_NAME];
+	char utf_file[CNSS_MAX_FILE_NAME];
+	char utf_board_data[CNSS_MAX_FILE_NAME];
+	char epping_file[CNSS_MAX_FILE_NAME];
+	char evicted_data[CNSS_MAX_FILE_NAME];
+};
+
+struct cnss_wlan_runtime_ops {
+	int (*runtime_suspend)(struct pci_dev *pdev);
+	int (*runtime_resume)(struct pci_dev *pdev);
+};
+
+struct cnss_wlan_driver {
+	char *name;
+	int  (*probe)(struct pci_dev *pdev, const struct pci_device_id *id);
+	void (*remove)(struct pci_dev *pdev);
+	int  (*reinit)(struct pci_dev *pdev, const struct pci_device_id *id);
+	void (*shutdown)(struct pci_dev *pdev);
+	void (*crash_shutdown)(struct pci_dev *pdev);
+	int  (*suspend)(struct pci_dev *pdev, pm_message_t state);
+	int  (*resume)(struct pci_dev *pdev);
+	void (*modem_status)(struct pci_dev *, int state);
+	struct cnss_wlan_runtime_ops *runtime_ops;
+	const struct pci_device_id *id_table;
+};
+
+/*
+ * codeseg_total_bytes: Total bytes across all the codesegment blocks
+ * num_codesegs: No of Pages used
+ * codeseg_size: Size of each segment. Should be power of 2 and multiple of 4K
+ * codeseg_size_log2: log2(codeseg_size)
+ * codeseg_busaddr: Physical address of the DMAble memory;4K aligned
+ */
+
+#define CODESWAP_MAX_CODESEGS 16
+struct codeswap_codeseg_info {
+	u32   codeseg_total_bytes;
+	u32   num_codesegs;
+	u32   codeseg_size;
+	u32   codeseg_size_log2;
+	void *codeseg_busaddr[CODESWAP_MAX_CODESEGS];
+};
+
+struct image_desc_info {
+	dma_addr_t fw_addr;
+	u32 fw_size;
+	dma_addr_t bdata_addr;
+	u32 bdata_size;
+};
+
+/* platform capabilities */
+enum cnss_platform_cap_flag {
+	CNSS_HAS_EXTERNAL_SWREG = 0x01,
+	CNSS_HAS_UART_ACCESS = 0x02,
+};
+
+struct cnss_platform_cap {
+	u32 cap_flag;
+};
+
+/* WLAN driver status */
+enum cnss_driver_status {
+	CNSS_UNINITIALIZED,
+	CNSS_INITIALIZED,
+	CNSS_LOAD_UNLOAD
+};
+
+enum cnss_runtime_request {
+	CNSS_PM_RUNTIME_GET,
+	CNSS_PM_RUNTIME_PUT,
+	CNSS_PM_RUNTIME_MARK_LAST_BUSY,
+	CNSS_PM_RUNTIME_RESUME,
+	CNSS_PM_RUNTIME_PUT_NOIDLE,
+	CNSS_PM_REQUEST_RESUME,
+	CNSS_PM_RUNTIME_PUT_AUTO,
+	CNSS_PM_GET_NORESUME,
+};
+
+extern int cnss_get_fw_image(struct image_desc_info *image_desc_info);
+extern void cnss_runtime_init(struct device *dev, int auto_delay);
+extern void cnss_runtime_exit(struct device *dev);
+extern void cnss_wlan_pci_link_down(void);
+extern int cnss_pcie_shadow_control(struct pci_dev *dev, bool enable);
+extern int cnss_wlan_register_driver(struct cnss_wlan_driver *driver);
+extern void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver);
+extern int cnss_get_fw_files(struct cnss_fw_files *pfw_files);
+extern int cnss_get_fw_files_for_target(struct cnss_fw_files *pfw_files,
+					u32 target_type, u32 target_version);
+extern void cnss_get_qca9377_fw_files(struct cnss_fw_files *pfw_files,
+					u32 size, u32 tufello_dual_fw);
+
+extern int cnss_request_bus_bandwidth(int bandwidth);
+
+#ifdef CONFIG_CNSS_SECURE_FW
+extern int cnss_get_sha_hash(const u8 *data, u32 data_len,
+					u8 *hash_idx, u8 *out);
+extern void *cnss_get_fw_ptr(void);
+#endif
+
+extern int cnss_get_codeswap_struct(struct codeswap_codeseg_info *swap_seg);
+extern int cnss_get_bmi_setup(void);
+
+#ifdef CONFIG_PCI_MSM
+extern int cnss_wlan_pm_control(bool vote);
+#endif
+extern void cnss_lock_pm_sem(void);
+extern void cnss_release_pm_sem(void);
+
+extern void cnss_request_pm_qos_type(int latency_type, u32 qos_val);
+extern void cnss_request_pm_qos(u32 qos_val);
+extern void cnss_remove_pm_qos(void);
+
+extern void cnss_pci_request_pm_qos_type(int latency_type, u32 qos_val);
+extern void cnss_pci_request_pm_qos(u32 qos_val);
+extern void cnss_pci_remove_pm_qos(void);
+
+extern void cnss_sdio_request_pm_qos_type(int latency_type, u32 qos_val);
+extern void cnss_sdio_request_pm_qos(u32 qos_val);
+extern void cnss_sdio_remove_pm_qos(void);
+
+extern int cnss_get_platform_cap(struct cnss_platform_cap *cap);
+extern void cnss_set_driver_status(enum cnss_driver_status driver_status);
+
+#ifndef CONFIG_WCNSS_MEM_PRE_ALLOC
+static inline int wcnss_pre_alloc_reset(void) { return 0; }
+#endif
+
+#if !defined(CONFIG_WCNSS_MEM_PRE_ALLOC) || !defined(CONFIG_SLUB_DEBUG)
+static inline void wcnss_prealloc_check_memory_leak(void) {}
+#endif
+
+extern int msm_pcie_enumerate(u32 rc_idx);
+extern int cnss_auto_suspend(void);
+extern int cnss_auto_resume(void);
+extern int cnss_prevent_auto_suspend(const char *caller_func);
+extern int cnss_allow_auto_suspend(const char *caller_func);
+extern int cnss_is_auto_suspend_allowed(const char *caller_func);
+
+extern int cnss_pm_runtime_request(struct device *dev, enum
+		cnss_runtime_request request);
+#endif
+
+extern void cnss_pm_wake_lock_init(struct wakeup_source *ws, const char *name);
+extern void cnss_pm_wake_lock(struct wakeup_source *ws);
+
+extern void cnss_device_crashed(void);
+extern void cnss_device_self_recovery(void);
+extern void *cnss_get_virt_ramdump_mem(unsigned long *size);
+
+extern void cnss_schedule_recovery_work(void);
+extern int cnss_pcie_set_wlan_mac_address(const u8 *in, uint32_t len);
+extern u8 *cnss_get_wlan_mac_address(struct device *dev, uint32_t *num);
+extern int cnss_sdio_set_wlan_mac_address(const u8 *in, uint32_t len);
+
+enum cnss_cc_src {
+	CNSS_SOURCE_CORE,
+	CNSS_SOURCE_11D,
+	CNSS_SOURCE_USER
+};
+
+enum {
+	CNSS_RESET_SOC = 0,
+	CNSS_RESET_SUBSYS_COUPLED,
+	CNSS_RESET_LEVEL_MAX
+};
+extern int cnss_get_restart_level(void);
+
+struct cnss_sdio_wlan_driver {
+	const char *name;
+	const struct sdio_device_id *id_table;
+	int (*probe)(struct sdio_func *, const struct sdio_device_id *);
+	void (*remove)(struct sdio_func *);
+	int (*reinit)(struct sdio_func *, const struct sdio_device_id *);
+	void (*shutdown)(struct sdio_func *);
+	void (*crash_shutdown)(struct sdio_func *);
+	int (*suspend)(struct device *);
+	int (*resume)(struct device *);
+};
+
+extern int cnss_sdio_wlan_register_driver(
+	struct cnss_sdio_wlan_driver *driver);
+extern void cnss_sdio_wlan_unregister_driver(
+	struct cnss_sdio_wlan_driver *driver);
+
+typedef void (*oob_irq_handler_t)(void *dev_para);
+extern int cnss_wlan_query_oob_status(void);
+extern int cnss_wlan_register_oob_irq_handler(oob_irq_handler_t handler,
+	    void *pm_oob);
+extern int cnss_wlan_unregister_oob_irq_handler(void *pm_oob);
+
+
+extern void cnss_dump_stack(struct task_struct *task);
+extern u8 *cnss_common_get_wlan_mac_address(struct device *dev, uint32_t *num);
+extern void cnss_init_work(struct work_struct *work, work_func_t func);
+extern void cnss_flush_delayed_work(void *dwork);
+extern void cnss_flush_work(void *work);
+extern void cnss_pm_wake_lock_timeout(struct wakeup_source *ws, ulong msec);
+extern void cnss_pm_wake_lock_release(struct wakeup_source *ws);
+extern void cnss_pm_wake_lock_destroy(struct wakeup_source *ws);
+extern void cnss_get_monotonic_boottime(struct timespec *ts);
+extern void cnss_get_boottime(struct timespec *ts);
+extern void cnss_init_delayed_work(struct delayed_work *work, work_func_t
+				   func);
+extern int cnss_vendor_cmd_reply(struct sk_buff *skb);
+extern int cnss_set_cpus_allowed_ptr(struct task_struct *task, ulong cpu);
+extern int cnss_set_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 ch_count);
+extern int cnss_get_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 *ch_count,
+					u16 buf_len);
+extern int cnss_wlan_set_dfs_nol(const void *info, u16 info_len);
+extern int cnss_wlan_get_dfs_nol(void *info, u16 info_len);
+extern int cnss_common_request_bus_bandwidth(struct device *dev, int
+					     bandwidth);
+extern void cnss_common_device_crashed(struct device *dev);
+extern void cnss_common_device_self_recovery(struct device *dev);
+extern void *cnss_common_get_virt_ramdump_mem(struct device *dev, unsigned long
+					      *size);
+extern void cnss_common_schedule_recovery_work(struct device *dev);
+extern int cnss_common_set_wlan_mac_address(struct device *dev, const u8 *in,
+					    uint32_t len);
+extern u8 *cnss_common_get_wlan_mac_address(struct device *dev, uint32_t *num);
+extern int cnss_power_up(struct device *dev);
+extern int cnss_power_down(struct device *dev);
+extern int cnss_sdio_configure_spdt(bool state);
+extern void cnss_set_cc_source(enum cnss_cc_src cc_source);
+extern enum cnss_cc_src cnss_get_cc_source(void);
+#endif /* _NET_CNSS_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/net/cnss_prealloc.h	2019-10-29 09:26:25.509221439 +0100
@@ -0,0 +1,23 @@
+/* Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _NET_CNSS_PREALLOC_H_
+#define _NET_CNSS_PREALLOC_H_
+
+#define WCNSS_PRE_ALLOC_GET_THRESHOLD (4*1024)
+
+extern void *wcnss_prealloc_get(size_t size);
+extern int wcnss_prealloc_put(void *ptr);
+extern int wcnss_pre_alloc_reset(void);
+void wcnss_prealloc_check_memory_leak(void);
+
+#endif /* _NET_CNSS__PREALLOC_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/net/cnss_utils.h	2019-01-22 16:16:28.451291214 +0100
@@ -0,0 +1,43 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CNSS_UTILS_H_
+#define _CNSS_UTILS_H_
+
+enum cnss_utils_cc_src {
+	CNSS_UTILS_SOURCE_CORE,
+	CNSS_UTILS_SOURCE_11D,
+	CNSS_UTILS_SOURCE_USER
+};
+
+extern int cnss_utils_set_wlan_unsafe_channel(struct device *dev,
+					      u16 *unsafe_ch_list,
+					      u16 ch_count);
+extern int cnss_utils_get_wlan_unsafe_channel(struct device *dev,
+					      u16 *unsafe_ch_list,
+					      u16 *ch_count, u16 buf_len);
+extern int cnss_utils_wlan_set_dfs_nol(struct device *dev,
+				       const void *info, u16 info_len);
+extern int cnss_utils_wlan_get_dfs_nol(struct device *dev,
+				       void *info, u16 info_len);
+extern int cnss_utils_get_driver_load_cnt(struct device *dev);
+extern void cnss_utils_increment_driver_load_cnt(struct device *dev);
+extern int cnss_utils_set_wlan_mac_address(const u8 *in, uint32_t len);
+extern u8 *cnss_utils_get_wlan_mac_address(struct device *dev, uint32_t *num);
+extern int cnss_utils_set_wlan_derived_mac_address(const u8 *in, uint32_t len);
+extern u8 *cnss_utils_get_wlan_derived_mac_address(struct device *dev,
+							uint32_t *num);
+extern void cnss_utils_set_cc_source(struct device *dev,
+				     enum cnss_utils_cc_src cc_source);
+extern enum cnss_utils_cc_src cnss_utils_get_cc_source(struct device *dev);
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./boot_stats.h linux-4.4.115-fbx/include/soc/qcom/boot_stats.h
--- linux-4.4.115-fbx/include/soc/qcom./boot_stats.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/boot_stats.h	2019-10-29 09:26:25.529221635 +0100
@@ -0,0 +1,46 @@
+/* Copyright (c) 2013-2014,2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifdef CONFIG_MSM_BOOT_STATS
+
+#define TIMER_KHZ 32768
+extern struct boot_stats __iomem *boot_stats;
+
+struct boot_stats {
+	uint32_t bootloader_start;
+	uint32_t bootloader_end;
+	uint32_t bootloader_display;
+	uint32_t bootloader_load_kernel;
+	uint32_t load_kernel_start;
+	uint32_t load_kernel_end;
+#ifdef CONFIG_MSM_BOOT_TIME_MARKER
+	uint32_t bootloader_early_domain_start;
+	uint32_t bootloader_checksum;
+#endif
+};
+
+int boot_stats_init(void);
+int boot_stats_exit(void);
+unsigned long long int msm_timer_get_sclk_ticks(void);
+#else
+static inline int boot_stats_init(void) { return 0; }
+unsigned long long int msm_timer_get_sclk_ticks(void) { return 0; }
+#endif
+
+#ifdef CONFIG_MSM_BOOT_TIME_MARKER
+
+static inline int boot_marker_enabled(void) { return 1; }
+void place_marker(const char *name);
+#else
+inline void place_marker(char *name);
+static inline int boot_marker_enabled(void) { return 0; }
+#endif
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./clock-alpha-pll.h linux-4.4.115-fbx/include/soc/qcom/clock-alpha-pll.h
--- linux-4.4.115-fbx/include/soc/qcom./clock-alpha-pll.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/clock-alpha-pll.h	2019-01-22 16:16:28.491291576 +0100
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_CLOCK_ALPHA_PLL_H
+#define __ARCH_ARM_MACH_MSM_CLOCK_ALPHA_PLL_H
+
+#include <linux/spinlock.h>
+#include <linux/clk/msm-clk-provider.h>
+
+struct alpha_pll_masks {
+	u32 lock_mask;		/* lock_det bit */
+	u32 active_mask;	/* active_flag in FSM mode */
+	u32 update_mask;	/* update bit for dynamic update */
+	u32 vco_mask;		/* vco_sel bits */
+	u32 vco_shift;
+	u32 alpha_en_mask;	/* alpha_en bit */
+	u32 output_mask;	/* pllout_* bits */
+	u32 post_div_mask;
+
+	u32 test_ctl_lo_mask;
+	u32 test_ctl_hi_mask;
+};
+
+struct alpha_pll_vco_tbl {
+	u32 vco_val;
+	unsigned long min_freq;
+	unsigned long max_freq;
+};
+
+#define VCO(a, b, c) { \
+	.vco_val = a,\
+	.min_freq = b,\
+	.max_freq = c,\
+}
+
+struct alpha_pll_clk {
+	struct alpha_pll_masks *masks;
+	void *const __iomem *base;
+	u32 offset;
+
+	/* if fsm_en_mask is set, config PLL to FSM mode */
+	u32 fsm_reg_offset;
+	u32 fsm_en_mask;
+
+	u32 enable_config;	/* bitmask of outputs to be enabled */
+	u32 post_div_config;	/* masked post divider setting */
+	u32 config_ctl_val;	/* config register init value */
+	u32 test_ctl_lo_val;	/* test control settings */
+	u32 test_ctl_hi_val;
+
+	struct alpha_pll_vco_tbl *vco_tbl;
+	u32 num_vco;
+	u32 current_vco_val;
+	bool inited;
+	bool slew;
+	bool no_prepared_reconfig;
+
+	/* some PLLs support dynamically updating their rate
+	 * without disabling the PLL first. Set this flag
+	 * to enable this support.
+	 */
+	bool dynamic_update;
+
+	/*
+	 * Some chipsets need the offline request bit to be
+	 * cleared on a second write to the register, even though
+	 * SW wants the bit to be set. Set this flag to indicate
+	 * that the workaround is required.
+	 */
+	bool offline_bit_workaround;
+	bool no_irq_dis;
+	bool is_fabia;
+	unsigned long min_supported_freq;
+	struct clk c;
+};
+
+static inline struct alpha_pll_clk *to_alpha_pll_clk(struct clk *c)
+{
+	return container_of(c, struct alpha_pll_clk, c);
+}
+
+
+#endif
+extern void __init_alpha_pll(struct clk *c);
+extern struct clk_ops clk_ops_alpha_pll;
+extern struct clk_ops clk_ops_alpha_pll_hwfsm;
+extern struct clk_ops clk_ops_fixed_alpha_pll;
+extern struct clk_ops clk_ops_dyna_alpha_pll;
+extern struct clk_ops clk_ops_fixed_fabia_alpha_pll;
+extern struct clk_ops clk_ops_fabia_alpha_pll;
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./clock-local2.h linux-4.4.115-fbx/include/soc/qcom/clock-local2.h
--- linux-4.4.115-fbx/include/soc/qcom./clock-local2.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/clock-local2.h	2019-01-22 16:16:28.491291576 +0100
@@ -0,0 +1,284 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_CLOCK_LOCAL_2_H
+#define __ARCH_ARM_MACH_MSM_CLOCK_LOCAL_2_H
+
+#include <linux/spinlock.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+
+/*
+ * Generic frequency-definition structs and macros
+ */
+
+/**
+ * @freq_hz: output rate
+ * @src_freq: source freq for dynamic pll. For fixed plls, set to 0.
+ * @src_clk: source clock for freq_hz
+ * @m_val: M value corresponding to freq_hz
+ * @n_val: N value corresponding to freq_hz
+ * @d_val: D value corresponding to freq_hz
+ * @div_src_val: Pre divider value and source selection mux index for freq_hz
+ * @sys_vdd: Voltage level required for freq_hz
+ */
+struct clk_freq_tbl {
+	unsigned long	freq_hz;
+	unsigned long	src_freq;
+	struct clk	*src_clk;
+	u32	m_val;
+	u32	n_val;
+	u32	d_val;
+	u32	div_src_val;
+	const unsigned	sys_vdd;
+};
+
+#define FREQ_END	(ULONG_MAX-1)
+#define F_END { .freq_hz = FREQ_END }
+#define	FIXED_CLK_SRC	0
+/*
+ * Generic clock-definition struct and macros
+ */
+/**
+ * struct rcg_clk - root clock generator
+ * @cmd_rcgr_reg: command register
+ * @set_rate: function to set frequency
+ * @freq_tbl: frequency table for this RCG
+ * @current_freq: current RCG frequency
+ * @c: generic clock data
+ * @non_local_children: set if RCG has at least one branch owned by a diff EE
+ * @non_local_control_timeout: configurable RCG timeout needed when all RCG
+ *			 children can be controlled by an entity outside of
+			 HLOS.
+ * @force_enable_rcgr: set if RCG needs to be force enabled/disabled during
+ * power sequence
+ * @base: pointer to base address of ioremapped registers.
+ */
+struct rcg_clk {
+	u32 cmd_rcgr_reg;
+
+	void   (*set_rate)(struct rcg_clk *, struct clk_freq_tbl *);
+
+	struct clk_freq_tbl *freq_tbl;
+	struct clk_freq_tbl *current_freq;
+	struct clk	c;
+
+	bool non_local_children;
+	int non_local_control_timeout;
+	bool force_enable_rcgr;
+	void *const __iomem *base;
+};
+
+static inline struct rcg_clk *to_rcg_clk(struct clk *clk)
+{
+	return container_of(clk, struct rcg_clk, c);
+}
+
+extern struct clk_freq_tbl rcg_dummy_freq;
+
+/**
+ * struct branch_clk - branch clock
+ * @set_rate: Set the frequency of this branch clock.
+ * @c: clk
+ * @cbcr_reg: branch control register
+ * @bcr_reg: block reset register
+ * @has_sibling: true if other branches are derived from this branch's source
+ * @cur_div: current branch divider value
+ * @max_div: maximum branch divider value (if zero, no divider exists)
+ * @halt_check: halt checking type
+ * @toggle_memory: toggle memory during enable/disable if true
+ * @no_halt_check_on_disable: When set, do not check status bit during
+ *			      clk_disable().
+ * @check_enable_bit: Check the enable bit to determine clock status
+				during handoff.
+ * @aggr_sibling_rates: Set if there are multiple branch clocks with rate
+			setting capability on the common RCG.
+ * @is_prepared: Set if clock's prepare count is greater than 0.
+ * @base: pointer to base address of ioremapped registers.
+ */
+struct branch_clk {
+	void   (*set_rate)(struct branch_clk *, struct clk_freq_tbl *);
+	struct clk c;
+	u32 cbcr_reg;
+	u32 bcr_reg;
+	int has_sibling;
+	u32 cur_div;
+	u32 max_div;
+	const u32 halt_check;
+	bool toggle_memory;
+	bool no_halt_check_on_disable;
+	bool check_enable_bit;
+	bool aggr_sibling_rates;
+	bool is_prepared;
+	void *const __iomem *base;
+};
+
+static inline struct branch_clk *to_branch_clk(struct clk *clk)
+{
+	return container_of(clk, struct branch_clk, c);
+}
+
+/**
+ * struct hw_ctl_clk - Clock structure to enable/disable dynamic clock gating
+ * @c: clk
+ * @cbcr_reg: branch control register
+ * @base: pointer to base address of ioremapped registers.
+ */
+struct hw_ctl_clk {
+	struct clk c;
+	u32 cbcr_reg;
+	void __iomem *const *base;
+};
+
+static inline struct hw_ctl_clk *to_hw_ctl_clk(struct clk *clk)
+{
+	return container_of(clk, struct hw_ctl_clk, c);
+}
+
+/**
+ * struct local_vote_clk - Voteable branch clock
+ * @c: clk
+ * @cbcr_reg: branch control register
+ * @vote_reg: voting register
+ * @en_mask: enable mask
+ * @halt_check: halt checking type
+ * @base: pointer to base address of ioremapped registers.
+ * An on/off switch with a rate derived from the parent.
+ */
+struct local_vote_clk {
+	struct clk c;
+	u32 cbcr_reg;
+	u32 vote_reg;
+	u32 bcr_reg;
+	u32 en_mask;
+	const u32 halt_check;
+	void * __iomem *base;
+};
+
+static inline struct local_vote_clk *to_local_vote_clk(struct clk *clk)
+{
+	return container_of(clk, struct local_vote_clk, c);
+}
+
+/**
+ * struct reset_clk - Reset clock
+ * @c: clk
+ * @reset_reg: block reset register
+ * @base: pointer to base address of ioremapped registers.
+ */
+struct reset_clk {
+	struct clk c;
+	u32 reset_reg;
+	void *__iomem *base;
+};
+
+static inline struct reset_clk *to_reset_clk(struct clk *clk)
+{
+	return container_of(clk, struct reset_clk, c);
+}
+/**
+ * struct measure_clk - for rate measurement debug use
+ * @sample_ticks: sample period in reference clock ticks
+ * @multiplier: measurement scale-up factor
+ * @divider: measurement scale-down factor
+ * @c: clk
+*/
+struct measure_clk {
+	u64 sample_ticks;
+	u32 multiplier;
+	u32 divider;
+
+	struct clk c;
+};
+
+struct measure_clk_data {
+	struct clk *cxo;
+	u32 plltest_reg;
+	u32 plltest_val;
+	u32 xo_div4_cbcr;
+	u32 ctl_reg;
+	u32 status_reg;
+	void *const __iomem *base;
+};
+
+static inline struct measure_clk *to_measure_clk(struct clk *clk)
+{
+	return container_of(clk, struct measure_clk, c);
+}
+
+/**
+ * struct gate_clk
+ * @c: clk
+ * @en_mask: ORed with @en_reg to enable gate clk
+ * @en_reg: register used to enable/disable gate clk
+ * @base: pointer to base address of ioremapped registers
+ */
+struct gate_clk {
+	struct clk c;
+	u32 en_mask;
+	u32 en_reg;
+	unsigned int delay_us;
+	void *const __iomem *base;
+};
+
+static inline struct gate_clk *to_gate_clk(struct clk *clk)
+{
+	return container_of(clk, struct gate_clk, c);
+}
+
+/*
+ * Generic set-rate implementations
+ */
+void set_rate_mnd(struct rcg_clk *clk, struct clk_freq_tbl *nf);
+void set_rate_hid(struct rcg_clk *clk, struct clk_freq_tbl *nf);
+
+/*
+ * Variables from the clock-local driver
+ */
+extern spinlock_t local_clock_reg_lock;
+
+extern struct clk_ops clk_ops_empty;
+extern struct clk_ops clk_ops_rcg;
+extern struct clk_ops clk_ops_rcg_mnd;
+extern struct clk_ops clk_ops_branch;
+extern struct clk_ops clk_ops_branch_hw_ctl;
+extern struct clk_ops clk_ops_vote;
+extern struct clk_ops clk_ops_rcg_hdmi;
+extern struct clk_ops clk_ops_rcg_edp;
+extern struct clk_ops clk_ops_rcg_dp;
+extern struct clk_ops clk_ops_byte;
+extern struct clk_ops clk_ops_pixel;
+extern struct clk_ops clk_ops_byte_multiparent;
+extern struct clk_ops clk_ops_pixel_multiparent;
+extern struct clk_ops clk_ops_edppixel;
+extern struct clk_ops clk_ops_gate;
+extern struct clk_ops clk_ops_rst;
+extern struct clk_mux_ops mux_reg_ops;
+extern struct mux_div_ops rcg_mux_div_ops;
+extern struct clk_div_ops postdiv_reg_ops;
+
+enum handoff pixel_rcg_handoff(struct clk *clk);
+enum handoff byte_rcg_handoff(struct clk *clk);
+unsigned long measure_get_rate(struct clk *c);
+
+/*
+ * Clock definition macros
+ */
+#define DEFINE_CLK_MEASURE(name) \
+	struct clk name = { \
+		.ops = &clk_ops_empty, \
+		.dbg_name = #name, \
+		CLK_INIT(name), \
+	}; \
+
+#endif /* __ARCH_ARM_MACH_MSM_CLOCK_LOCAL_2_H */
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./clock-pll.h linux-4.4.115-fbx/include/soc/qcom/clock-pll.h
--- linux-4.4.115-fbx/include/soc/qcom./clock-pll.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/clock-pll.h	2019-01-22 16:16:28.491291576 +0100
@@ -0,0 +1,231 @@
+/*
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_CLOCK_PLL_H
+#define __ARCH_ARM_MACH_MSM_CLOCK_PLL_H
+
+#include <linux/clk/msm-clk-provider.h>
+
+/**
+ * struct pll_freq_tbl - generic PLL frequency definition
+ * @freq_hz: pll frequency in hz
+ * @l_val: pll l value
+ * @m_val: pll m value
+ * @n_val: pll n value
+ * @post_div_val: pll post divider value
+ * @pre_div_val: pll pre-divider value
+ * @vco_val: pll vco value
+ */
+struct pll_freq_tbl {
+	const u32 freq_hz;
+	const u32 l_val;
+	const u32 m_val;
+	const u32 n_val;
+	const u32 post_div_val;
+	const u32 pre_div_val;
+	const u32 vco_val;
+};
+
+/**
+ * struct pll_config_masks - PLL config masks struct
+ * @post_div_mask: mask for post divider bits location
+ * @pre_div_mask: mask for pre-divider bits location
+ * @vco_mask: mask for vco bits location
+ * @mn_en_mask: ORed with pll config register to enable the mn counter
+ * @main_output_mask: ORed with pll config register to enable the main output
+ * @apc_pdn_mask: ORed with pll config register to enable/disable APC PDN
+ * @lock_mask: Mask that indicates that the PLL has locked
+ */
+struct pll_config_masks {
+	u32 apc_pdn_mask;
+	u32 post_div_mask;
+	u32 pre_div_mask;
+	u32 vco_mask;
+	u32 mn_en_mask;
+	u32 main_output_mask;
+	u32 early_output_mask;
+	u32 lock_mask;
+};
+
+struct pll_config_vals {
+	u32 post_div_masked;
+	u32 pre_div_masked;
+	u32 config_ctl_val;
+	u32 config_ctl_hi_val;
+	u32 test_ctl_lo_val;
+	u32 test_ctl_hi_val;
+	u32 alpha_val;
+	bool enable_mn;
+};
+
+struct pll_spm_ctrl {
+	u32 offset;
+	u32 event_bit;
+	void __iomem *spm_base;
+};
+
+#define PLL_FREQ_END	(UINT_MAX-1)
+#define PLL_F_END { .freq_hz = PLL_FREQ_END }
+
+/**
+ * struct pll_vote_clk - phase locked loop (HW voteable)
+ * @soft_vote: soft voting variable for multiple PLL software instances
+ * @soft_vote_mask: soft voting mask for multiple PLL software instances
+ * @en_reg: enable register
+ * @en_mask: ORed with @en_reg to enable the clock
+ * @status_mask: ANDed with @status_reg to determine if PLL is active.
+ * @status_reg: status register
+ * @c: clock
+ */
+struct pll_vote_clk {
+	u32 *soft_vote;
+	u32 soft_vote_mask;
+	void __iomem *const en_reg;
+	u32 en_mask;
+	void __iomem *const status_reg;
+	u32 status_mask;
+
+	struct clk c;
+	void *const __iomem *base;
+};
+
+extern struct clk_ops clk_ops_pll_vote;
+extern struct clk_ops clk_ops_pll_acpu_vote;
+extern struct clk_ops clk_ops_pll_sleep_vote;
+
+/* Soft voting values */
+#define PLL_SOFT_VOTE_PRIMARY   BIT(0)
+#define PLL_SOFT_VOTE_ACPU      BIT(1)
+#define PLL_SOFT_VOTE_AUX       BIT(2)
+
+static inline struct pll_vote_clk *to_pll_vote_clk(struct clk *c)
+{
+	return container_of(c, struct pll_vote_clk, c);
+}
+
+/**
+ * struct pll_clk - phase locked loop
+ * @mode_reg: enable register
+ * @l_reg: l value register
+ * @m_reg: m value register
+ * @n_reg: n value register
+ * @config_reg: configuration register, contains mn divider enable, pre divider,
+ *   post divider and vco configuration. register name can be configure register
+ *   or user_ctl register depending on targets
+ * @config_ctl_reg: "expert" configuration register
+ * @config_ctl_hi_reg: upper 32 bits of the "expert" configuration register
+ * @status_reg: status register, contains the lock detection bit
+ * @init_test_ctl: initialize the test control register
+ * @pgm_test_ctl_enable: program the test_ctl register in the enable sequence
+ * @test_ctl_dbg: if false will configure the test control registers.
+ * @masks: masks used for settings in config_reg
+ * @vals: configuration values to be written to PLL registers
+ * @freq_tbl: pll freq table
+ * @no_prepared_reconfig: Fail round_rate if pll is prepared
+ * @c: clk
+ * @base: pointer to base address of ioremapped registers.
+ */
+struct pll_clk {
+	void __iomem *const mode_reg;
+	void __iomem *const l_reg;
+	void __iomem *const m_reg;
+	void __iomem *const n_reg;
+	void __iomem *const alpha_reg;
+	void __iomem *const config_reg;
+	void __iomem *const config_ctl_reg;
+	void __iomem *const config_ctl_hi_reg;
+	void __iomem *const status_reg;
+	void __iomem *const alt_status_reg;
+	void __iomem *const test_ctl_lo_reg;
+	void __iomem *const test_ctl_hi_reg;
+
+	bool init_test_ctl;
+	bool pgm_test_ctl_enable;
+	bool test_ctl_dbg;
+
+	struct pll_config_masks masks;
+	struct pll_config_vals vals;
+	struct pll_freq_tbl *freq_tbl;
+
+	unsigned long src_rate;
+	unsigned long min_rate;
+	unsigned long max_rate;
+
+	bool inited;
+	bool no_prepared_reconfig;
+
+	struct pll_spm_ctrl spm_ctrl;
+	struct clk c;
+	void *const __iomem *base;
+};
+
+extern struct clk_ops clk_ops_local_pll;
+extern struct clk_ops clk_ops_sr2_pll;
+extern struct clk_ops clk_ops_variable_rate_pll;
+extern struct clk_ops clk_ops_variable_rate_pll_hwfsm;
+
+void __variable_rate_pll_init(struct clk *c);
+
+static inline struct pll_clk *to_pll_clk(struct clk *c)
+{
+	return container_of(c, struct pll_clk, c);
+}
+
+int sr_pll_clk_enable(struct clk *c);
+int sr_hpm_lp_pll_clk_enable(struct clk *c);
+
+struct pll_alt_config {
+	u32 val;
+	u32 mask;
+};
+
+struct pll_config {
+	u32 l;
+	u32 m;
+	u32 n;
+	u32 vco_val;
+	u32 vco_mask;
+	u32 pre_div_val;
+	u32 pre_div_mask;
+	u32 post_div_val;
+	u32 post_div_mask;
+	u32 mn_ena_val;
+	u32 mn_ena_mask;
+	u32 main_output_val;
+	u32 main_output_mask;
+	u32 aux_output_val;
+	u32 aux_output_mask;
+	u32 cfg_ctl_val;
+	/* SR2 PLL specific fields */
+	u32 add_factor_val;
+	u32 add_factor_mask;
+	struct pll_alt_config alt_cfg;
+};
+
+struct pll_config_regs {
+	void __iomem *l_reg;
+	void __iomem *m_reg;
+	void __iomem *n_reg;
+	void __iomem *config_reg;
+	void __iomem *config_alt_reg;
+	void __iomem *config_ctl_reg;
+	void __iomem *mode_reg;
+	void *const __iomem *base;
+};
+
+void configure_sr_pll(struct pll_config *config, struct pll_config_regs *regs,
+				u32 ena_fsm_mode);
+void configure_sr_hpm_lp_pll(struct pll_config *config,
+				struct pll_config_regs *, u32 ena_fsm_mode);
+#endif
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./clock-rpm.h linux-4.4.115-fbx/include/soc/qcom/clock-rpm.h
--- linux-4.4.115-fbx/include/soc/qcom./clock-rpm.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/clock-rpm.h	2019-01-22 16:16:28.491291576 +0100
@@ -0,0 +1,180 @@
+/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_CLOCK_RPM_H
+#define __ARCH_ARM_MACH_MSM_CLOCK_RPM_H
+
+#include <linux/clk/msm-clk-provider.h>
+#include <soc/qcom/rpm-smd.h>
+
+#define RPM_SMD_KEY_RATE	0x007A484B
+#define RPM_SMD_KEY_ENABLE	0x62616E45
+#define RPM_SMD_KEY_STATE	0x54415453
+
+#define RPM_CLK_BUFFER_A_REQ			0x616B6C63
+#define RPM_KEY_SOFTWARE_ENABLE			0x6E657773
+#define RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY	0x62636370
+
+struct clk_ops;
+struct clk_rpmrs_data;
+extern struct clk_ops clk_ops_rpm;
+extern struct clk_ops clk_ops_rpm_branch;
+
+struct rpm_clk {
+	int rpm_res_type;
+	int rpm_key;
+	int rpm_clk_id;
+	const int rpm_status_id;
+	bool active_only;
+	bool enabled;
+	bool branch; /* true: RPM only accepts 1 for ON and 0 for OFF */
+	struct clk_rpmrs_data *rpmrs_data;
+	struct rpm_clk *peer;
+	struct clk c;
+	uint32_t *last_active_set_vote;
+	uint32_t *last_sleep_set_vote;
+};
+
+static inline struct rpm_clk *to_rpm_clk(struct clk *clk)
+{
+	return container_of(clk, struct rpm_clk, c);
+}
+
+/*
+ * RPM scaling enable function used for target that has an RPM resource for
+ * rpm clock scaling enable.
+ */
+int enable_rpm_scaling(void);
+
+int vote_bimc(struct rpm_clk *r, uint32_t value);
+
+extern struct clk_rpmrs_data clk_rpmrs_data_smd;
+
+/*
+ * A note on name##last_{active,sleep}_set_vote below:
+ * We track the last active and sleep set votes across both
+ * active-only and active+sleep set clocks. We use the same
+ * tracking variables for both clocks in order to keep both
+ * updated about the last vote irrespective of which clock
+ * actually made the request. This is the only way to allow
+ * optimizations that prevent duplicate requests from being sent
+ * to the RPM. Separate tracking does not work since it is not
+ * possible to know if the peer's last request was actually sent
+ * to the RPM.
+ */
+
+#define __DEFINE_CLK_RPM(name, active, type, r_id, stat_id, dep, key, \
+				rpmrsdata) \
+	static struct rpm_clk active; \
+	static uint32_t name##last_active_set_vote; \
+	static uint32_t name##last_sleep_set_vote; \
+	static struct rpm_clk name = { \
+		.rpm_res_type = (type), \
+		.rpm_clk_id = (r_id), \
+		.rpm_status_id = (stat_id), \
+		.rpm_key = (key), \
+		.peer = &active, \
+		.rpmrs_data = (rpmrsdata),\
+		.last_active_set_vote = &name##last_active_set_vote, \
+		.last_sleep_set_vote = &name##last_sleep_set_vote, \
+		.c = { \
+			.ops = &clk_ops_rpm, \
+			.dbg_name = #name, \
+			CLK_INIT(name.c), \
+			.depends = dep, \
+		}, \
+	}; \
+	static struct rpm_clk active = { \
+		.rpm_res_type = (type), \
+		.rpm_clk_id = (r_id), \
+		.rpm_status_id = (stat_id), \
+		.rpm_key = (key), \
+		.peer = &name, \
+		.active_only = true, \
+		.rpmrs_data = (rpmrsdata),\
+		.last_active_set_vote = &name##last_active_set_vote, \
+		.last_sleep_set_vote = &name##last_sleep_set_vote, \
+		.c = { \
+			.ops = &clk_ops_rpm, \
+			.dbg_name = #active, \
+			CLK_INIT(active.c), \
+			.depends = dep, \
+		}, \
+	};
+
+#define __DEFINE_CLK_RPM_BRANCH(name, active, type, r_id, stat_id, r, \
+					key, rpmrsdata) \
+	static struct rpm_clk active; \
+	static uint32_t name##last_active_set_vote; \
+	static uint32_t name##last_sleep_set_vote; \
+	static struct rpm_clk name = { \
+		.rpm_res_type = (type), \
+		.rpm_clk_id = (r_id), \
+		.rpm_status_id = (stat_id), \
+		.rpm_key = (key), \
+		.peer = &active, \
+		.branch = true, \
+		.rpmrs_data = (rpmrsdata),\
+		.last_active_set_vote = &name##last_active_set_vote, \
+		.last_sleep_set_vote = &name##last_sleep_set_vote, \
+		.c = { \
+			.ops = &clk_ops_rpm_branch, \
+			.dbg_name = #name, \
+			.rate = (r), \
+			CLK_INIT(name.c), \
+		}, \
+	}; \
+	static struct rpm_clk active = { \
+		.rpm_res_type = (type), \
+		.rpm_clk_id = (r_id), \
+		.rpm_status_id = (stat_id), \
+		.rpm_key = (key), \
+		.peer = &name, \
+		.active_only = true, \
+		.branch = true, \
+		.rpmrs_data = (rpmrsdata),\
+		.last_active_set_vote = &name##last_active_set_vote, \
+		.last_sleep_set_vote = &name##last_sleep_set_vote, \
+		.c = { \
+			.ops = &clk_ops_rpm_branch, \
+			.dbg_name = #active, \
+			.rate = (r), \
+			CLK_INIT(active.c), \
+		}, \
+	};
+
+#define DEFINE_CLK_RPM_SMD(name, active, type, r_id, dep) \
+	__DEFINE_CLK_RPM(name, active, type, r_id, 0, dep, \
+				RPM_SMD_KEY_RATE, &clk_rpmrs_data_smd)
+
+#define DEFINE_CLK_RPM_SMD_BRANCH(name, active, type, r_id, r) \
+	__DEFINE_CLK_RPM_BRANCH(name, active, type, r_id, 0, r, \
+				RPM_SMD_KEY_ENABLE, &clk_rpmrs_data_smd)
+
+#define DEFINE_CLK_RPM_SMD_QDSS(name, active, type, r_id) \
+	__DEFINE_CLK_RPM(name, active, type, r_id, \
+		0, 0, RPM_SMD_KEY_STATE, &clk_rpmrs_data_smd)
+/*
+ * The RPM XO buffer clock management code aggregates votes for pin-control mode
+ * and software mode separately. Software-enable has higher priority over pin-
+ * control, and if the software-mode aggregation results in a 'disable', the
+ * buffer will be left in pin-control mode if a pin-control vote is in place.
+ */
+#define DEFINE_CLK_RPM_SMD_XO_BUFFER(name, active, r_id) \
+	__DEFINE_CLK_RPM_BRANCH(name, active, RPM_CLK_BUFFER_A_REQ, r_id, 0, \
+			1000, RPM_KEY_SOFTWARE_ENABLE, &clk_rpmrs_data_smd)
+
+#define DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(name, active, r_id) \
+	__DEFINE_CLK_RPM_BRANCH(name, active, RPM_CLK_BUFFER_A_REQ, r_id, 0, \
+	1000, RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY, &clk_rpmrs_data_smd)
+#endif
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./clock-voter.h linux-4.4.115-fbx/include/soc/qcom/clock-voter.h
--- linux-4.4.115-fbx/include/soc/qcom./clock-voter.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/clock-voter.h	2019-01-22 16:16:28.491291576 +0100
@@ -0,0 +1,51 @@
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_CLOCK_VOTER_H
+#define __ARCH_ARM_MACH_MSM_CLOCK_VOTER_H
+
+#include <linux/clk/msm-clk-provider.h>
+
+struct clk_ops;
+extern struct clk_ops clk_ops_voter;
+
+struct clk_voter {
+	int is_branch;
+	bool enabled;
+	struct clk c;
+};
+
+static inline struct clk_voter *to_clk_voter(struct clk *clk)
+{
+	return container_of(clk, struct clk_voter, c);
+}
+
+#define __DEFINE_CLK_VOTER(clk_name, _parent, _default_rate, _is_branch) \
+	struct clk_voter clk_name = { \
+		.is_branch = (_is_branch), \
+		.c = { \
+			.parent = _parent, \
+			.dbg_name = #clk_name, \
+			.ops = &clk_ops_voter, \
+			.rate = _default_rate, \
+			CLK_INIT(clk_name.c), \
+		}, \
+	}
+
+#define DEFINE_CLK_VOTER(clk_name, _parent, _default_rate) \
+	 __DEFINE_CLK_VOTER(clk_name, _parent, _default_rate, 0)
+
+#define DEFINE_CLK_BRANCH_VOTER(clk_name, _parent) \
+	 __DEFINE_CLK_VOTER(clk_name, _parent, 1000, 1)
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./cx_ipeak.h linux-4.4.115-fbx/include/soc/qcom/cx_ipeak.h
--- linux-4.4.115-fbx/include/soc/qcom./cx_ipeak.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/cx_ipeak.h	2019-01-22 16:16:28.491291576 +0100
@@ -0,0 +1,46 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SOC_COM_CX_IPEAK_H
+#define __SOC_COM_CX_IPEAK_H
+
+struct device_node;
+struct cx_ipeak_client;
+
+#ifndef CONFIG_QCOM_CX_IPEAK
+
+static inline struct cx_ipeak_client *cx_ipeak_register(
+		struct device_node *dev_node,
+		const char *client_name)
+{
+	return NULL;
+}
+
+static inline void cx_ipeak_unregister(struct cx_ipeak_client *client)
+{
+}
+
+static inline int cx_ipeak_update(struct cx_ipeak_client *ipeak_client,
+			bool vote)
+{
+	return 0;
+}
+#else
+
+struct cx_ipeak_client *cx_ipeak_register(struct device_node *dev_node,
+		const char *client_name);
+void cx_ipeak_unregister(struct cx_ipeak_client *client);
+int cx_ipeak_update(struct cx_ipeak_client *ipeak_client, bool vote);
+
+#endif
+
+#endif /*__SOC_COM_CX_IPEAK_H*/
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./event_timer.h linux-4.4.115-fbx/include/soc/qcom/event_timer.h
--- linux-4.4.115-fbx/include/soc/qcom./event_timer.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/event_timer.h	2019-01-22 16:16:28.491291576 +0100
@@ -0,0 +1,80 @@
+/* Copyright (c) 2012, 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_EVENT_TIMER_H
+#define __ARCH_ARM_MACH_MSM_EVENT_TIMER_H
+
+#include <linux/hrtimer.h>
+
+struct event_timer_info;
+
+#ifdef CONFIG_MSM_EVENT_TIMER
+/**
+ * add_event_timer() : Add a wakeup event. Intended to be called
+ *                     by clients once. Returns a handle to be used
+ *                     for future transactions.
+ * @irq : Interrupt number to track affinity.
+ * @function : The callback function will be called when event
+ *             timer expires.
+ * @data : Callback data provided by client.
+ */
+struct event_timer_info *add_event_timer(uint32_t irq,
+				void (*function)(void *), void *data);
+
+/** activate_event_timer() : Set the expiration time for an event in absolute
+ *                           ktime. This is a oneshot event timer, clients
+ *                           should call this again to set another expiration.
+ *  @event : Event handle.
+ *  @event_time : Event time in absolute ktime.
+ */
+void activate_event_timer(struct event_timer_info *event, ktime_t event_time);
+
+/**
+ * deactivate_event_timer() : Deactivate an event timer.
+ * @event: event handle.
+ */
+void deactivate_event_timer(struct event_timer_info *event);
+
+/**
+ * destroy_event_timer() : Free the event info data structure allocated during
+ * add_event_timer().
+ * @event: event handle.
+ */
+void destroy_event_timer(struct event_timer_info *event);
+
+/**
+ * get_next_event_timer() : Get the next wakeup event.
+ *                          returns a ktime value of the next
+ *                          expiring event.
+ */
+ktime_t get_next_event_time(int cpu);
+#else
+static inline void *add_event_timer(uint32_t irq, void (*function)(void *),
+						void *data)
+{
+	return NULL;
+}
+
+static inline void activate_event_timer(void *event, ktime_t event_time) {}
+
+static inline void  deactivate_event_timer(void *event) {}
+
+static inline void destroy_event_timer(void *event) {}
+
+static inline ktime_t get_next_event_time(int cpu)
+{
+	return ns_to_ktime(0);
+}
+
+#endif /* CONFIG_MSM_EVENT_TIMER_MANAGER */
+#endif /* __ARCH_ARM_MACH_MSM_EVENT_TIMER_H */
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./glink.h linux-4.4.115-fbx/include/soc/qcom/glink.h
--- linux-4.4.115-fbx/include/soc/qcom./glink.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/glink.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,458 @@
+/* Copyright (c) 2014-2015,2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _SOC_QCOM_GLINK_H_
+#define _SOC_QCOM_GLINK_H_
+
+#include <linux/types.h>
+
+/* Maximum size (including null) for channel, edge, or transport names */
+#define GLINK_NAME_SIZE 32
+
+/* Maximum packet size for TX and RX */
+#define GLINK_MAX_PKT_SIZE SZ_1M
+
+/**
+ * G-Link Port State Notification Values
+ */
+enum {
+	GLINK_CONNECTED,
+	GLINK_LOCAL_DISCONNECTED,
+	GLINK_REMOTE_DISCONNECTED,
+};
+
+/**
+ * G-Link Open Options
+ *
+ * Used to define the glink_open_config::options field which is passed into
+ * glink_open().
+ */
+enum {
+	GLINK_OPT_INITIAL_XPORT = BIT(0),
+	GLINK_OPT_RX_INTENT_NOTIF = BIT(1),
+};
+
+/**
+ * Open configuration.
+ *
+ * priv:			Private data passed into user callbacks
+ * options:			Open option flags
+ * rx_intent_req_timeout_ms:	Timeout for requesting an RX intent, in
+ *			milliseconds; if set to 0, timeout is infinite
+ * notify_rx:			Receive notification function (required)
+ * notify_tx_done:		Transmit-done notification function (required)
+ * notify_state:		State-change notification (required)
+ * notify_rx_intent_req:	Receive intent request (optional)
+ * notify_rxv:			Receive notification function for vector buffers
+ *			(required if notify_rx is not provided)
+ * notify_sig:			Signal-change notification (optional)
+ * notify_rx_tracer_pkt:	Receive notification for tracer packet
+ * notify_remote_rx_intent:	Receive notification for remote-queued RX intent
+ *
+ * This structure is passed into the glink_open() call to setup
+ * configuration handles.  All unused fields should be set to 0.
+ *
+ * The structure is copied internally before the call to glink_open() returns.
+ */
+struct glink_open_config {
+	void *priv;
+	uint32_t options;
+
+	const char *transport;
+	const char *edge;
+	const char *name;
+	unsigned int rx_intent_req_timeout_ms;
+
+	void (*notify_rx)(void *handle, const void *priv, const void *pkt_priv,
+			const void *ptr, size_t size);
+	void (*notify_tx_done)(void *handle, const void *priv,
+			const void *pkt_priv, const void *ptr);
+	void (*notify_state)(void *handle, const void *priv, unsigned event);
+	bool (*notify_rx_intent_req)(void *handle, const void *priv,
+			size_t req_size);
+	void (*notify_rxv)(void *handle, const void *priv, const void *pkt_priv,
+			   void *iovec, size_t size,
+			   void * (*vbuf_provider)(void *iovec, size_t offset,
+						 size_t *size),
+			   void * (*pbuf_provider)(void *iovec, size_t offset,
+						 size_t *size));
+	void (*notify_rx_sigs)(void *handle, const void *priv,
+			uint32_t old_sigs, uint32_t new_sigs);
+	void (*notify_rx_abort)(void *handle, const void *priv,
+			const void *pkt_priv);
+	void (*notify_tx_abort)(void *handle, const void *priv,
+			const void *pkt_priv);
+	void (*notify_rx_tracer_pkt)(void *handle, const void *priv,
+			const void *pkt_priv, const void *ptr, size_t size);
+	void (*notify_remote_rx_intent)(void *handle, const void *priv,
+					size_t size);
+};
+
+enum glink_link_state {
+	GLINK_LINK_STATE_UP,
+	GLINK_LINK_STATE_DOWN,
+};
+
+/**
+ * Data structure containing information during Link State callback
+ * transport:	String identifying the transport.
+ * edge:	String identifying the edge.
+ * link_state:	Link state(UP?DOWN).
+ */
+struct glink_link_state_cb_info {
+	const char *transport;
+	const char *edge;
+	enum glink_link_state link_state;
+};
+
+/**
+ * Data structure containing information for link state registration
+ * transport:	String identifying the transport.
+ * edge:	String identifying the edge.
+ * glink_link_state_notif_cb:	Callback function used to pass the event.
+ */
+struct glink_link_info {
+	const char *transport;
+	const char *edge;
+	void (*glink_link_state_notif_cb)(
+			struct glink_link_state_cb_info *cb_info,
+			void *priv);
+};
+
+enum tx_flags {
+	GLINK_TX_REQ_INTENT = 0x1,
+	GLINK_TX_SINGLE_THREADED = 0x2,
+	GLINK_TX_TRACER_PKT = 0x4,
+	GLINK_TX_ATOMIC = 0x8,
+};
+
+#ifdef CONFIG_MSM_GLINK
+/**
+ * Open GLINK channel.
+ *
+ * @cfg_ptr:	Open configuration structure (the structure is copied before
+ *		glink_open returns).  All unused fields should be zero-filled.
+ *
+ * This should not be called from link state callback context by clients.
+ * It is recommended that client should invoke this function from their own
+ * thread.
+ *
+ * Return:  Pointer to channel on success, PTR_ERR() with standard Linux
+ * error code on failure.
+ */
+void *glink_open(const struct glink_open_config *cfg_ptr);
+
+/**
+ * glink_close() - Close a previously opened channel.
+ *
+ * @handle:	handle to close
+ *
+ * Once the closing process has been completed, the GLINK_LOCAL_DISCONNECTED
+ * state event will be sent and the channel can be reopened.
+ *
+ * Return:  0 on success; -EINVAL for invalid handle, -EBUSY is close is
+ * already in progress, standard Linux Error code otherwise.
+ */
+int glink_close(void *handle);
+
+/**
+ * glink_tx() - Transmit packet.
+ *
+ * @handle:	handle returned by glink_open()
+ * @pkt_priv:	opaque data value that will be returned to client with
+ *		notify_tx_done notification
+ * @data:	pointer to the data
+ * @size:	size of data
+ * @tx_flags:	Flags to specify transmit specific options
+ *
+ * Return:	-EINVAL for invalid handle; -EBUSY if channel isn't ready for
+ *		transmit operation (not fully opened); -EAGAIN if remote side
+ *		has not provided a receive intent that is big enough.
+ */
+int glink_tx(void *handle, void *pkt_priv, void *data, size_t size,
+							uint32_t tx_flags);
+
+/**
+ * glink_queue_rx_intent() - Register an intent to receive data.
+ *
+ * @handle:	handle returned by glink_open()
+ * @pkt_priv:	opaque data type that is returned when a packet is received
+ * size:	maximum size of data to receive
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_queue_rx_intent(void *handle, const void *pkt_priv, size_t size);
+
+/**
+ * glink_rx_intent_exists() - Check if an intent of size exists.
+ *
+ * @handle:	handle returned by glink_open()
+ * @size:	size of an intent to check or 0 for any intent
+ *
+ * Return:	TRUE if an intent exists with greater than or equal to the size
+ *		else FALSE
+ */
+bool glink_rx_intent_exists(void *handle, size_t size);
+
+/**
+ * glink_rx_done() - Return receive buffer to remote side.
+ *
+ * @handle:	handle returned by glink_open()
+ * @ptr:	data pointer provided in the notify_rx() call
+ * @reuse:	if true, receive intent is re-used
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_rx_done(void *handle, const void *ptr, bool reuse);
+
+/**
+ * glink_txv() - Transmit a packet in vector form.
+ *
+ * @handle:	handle returned by glink_open()
+ * @pkt_priv:	opaque data value that will be returned to client with
+ *		notify_tx_done notification
+ * @iovec:	pointer to the vector (must remain valid until notify_tx_done
+ *		notification)
+ * @size:	size of data/vector
+ * @vbuf_provider: Client provided helper function to iterate the vector
+ *		in physical address space
+ * @pbuf_provider: Client provided helper function to iterate the vector
+ *		in virtual address space
+ * @tx_flags:	Flags to specify transmit specific options
+ *
+ * Return: -EINVAL for invalid handle; -EBUSY if channel isn't ready for
+ *           transmit operation (not fully opened); -EAGAIN if remote side has
+ *           not provided a receive intent that is big enough.
+ */
+int glink_txv(void *handle, void *pkt_priv,
+	      void *iovec, size_t size,
+	      void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size),
+	      void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size),
+	      uint32_t tx_flags);
+
+/**
+ * glink_sigs_set() - Set the local signals for the GLINK channel
+ *
+ * @handle:	handle returned by glink_open()
+ * @sigs:	modified signal value
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_sigs_set(void *handle, uint32_t sigs);
+
+/**
+ * glink_sigs_local_get() - Get the local signals for the GLINK channel
+ *
+ * handle:	handle returned by glink_open()
+ * sigs:	Pointer to hold the signals
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_sigs_local_get(void *handle, uint32_t *sigs);
+
+/**
+ * glink_sigs_remote_get() - Get the Remote signals for the GLINK channel
+ *
+ * handle:	handle returned by glink_open()
+ * sigs:	Pointer to hold the signals
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_sigs_remote_get(void *handle, uint32_t *sigs);
+
+/**
+ * glink_register_link_state_cb() - Register for link state notification
+ * @link_info:	Data structure containing the link identification and callback.
+ * @priv:	Private information to be passed with the callback.
+ *
+ * This function is used to register a notifier to receive the updates about a
+ * link's/transport's state. This notifier needs to be registered first before
+ * an attempt to open a channel.
+ *
+ * Return: a reference to the notifier handle.
+ */
+void *glink_register_link_state_cb(struct glink_link_info *link_info,
+				   void *priv);
+
+/**
+ * glink_unregister_link_state_cb() - Unregister the link state notification
+ * notif_handle:	Handle to be unregistered.
+ *
+ * This function is used to unregister a notifier to stop receiving the updates
+ * about a link's/transport's state.
+ */
+void glink_unregister_link_state_cb(void *notif_handle);
+
+/**
+ * glink_qos_latency() - Register the latency QoS requirement
+ * @handle:	Channel handle in which the latency is required.
+ * @latency_us:	Latency requirement in units of micro-seconds.
+ * @pkt_size:	Worst case packet size for which the latency is required.
+ *
+ * This function is used to register the latency requirement for a channel
+ * and ensures that the latency requirement for this channel is met without
+ * impacting the existing latency requirements of other channels.
+ *
+ * Return: 0 if QoS request is achievable, standard Linux error codes on error
+ */
+int glink_qos_latency(void *handle, unsigned long latency_us, size_t pkt_size);
+
+/**
+ * glink_qos_cancel() - Cancel or unregister the QoS request
+ * @handle:	Channel handle for which the QoS request is cancelled.
+ *
+ * This function is used to cancel/unregister the QoS requests for a channel.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_qos_cancel(void *handle);
+
+/**
+ * glink_qos_start() - Start of the transmission requiring QoS
+ * @handle:	Channel handle in which the transmit activity is performed.
+ *
+ * This function is called by the clients to indicate G-Link regarding the
+ * start of the transmission which requires a certain QoS. The clients
+ * must account for the QoS ramp time to ensure meeting the QoS.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_qos_start(void *handle);
+
+/**
+ * glink_qos_get_ramp_time() - Get the QoS ramp time
+ * @handle:	Channel handle for which the QoS ramp time is required.
+ * @pkt_size:	Worst case packet size.
+ *
+ * This function is called by the clients to obtain the ramp time required
+ * to meet the QoS requirements.
+ *
+ * Return: QoS ramp time is returned in units of micro-seconds
+ */
+unsigned long glink_qos_get_ramp_time(void *handle, size_t pkt_size);
+
+/**
+ * glink_start_rx_rt() - Vote for RT thread priority on RX.
+ * @handle:	Channel handle for which transaction are occurring.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_start_rx_rt(void *handle);
+
+/**
+ * glink_end_rx_rt() - Vote for RT thread priority on RX.
+ * @handle:	Channel handle for which transaction are occurring.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_end_rx_rt(void *handle);
+
+#else /* CONFIG_MSM_GLINK */
+static inline void *glink_open(const struct glink_open_config *cfg_ptr)
+{
+	return NULL;
+}
+
+static inline int glink_close(void *handle)
+{
+	return -ENODEV;
+}
+
+static inline int glink_tx(void *handle, void *pkt_priv, void *data,
+					size_t size, uint32_t tx_flags)
+{
+	return -ENODEV;
+}
+
+static inline int glink_queue_rx_intent(void *handle, const void *pkt_priv,
+								size_t size)
+{
+	return -ENODEV;
+}
+
+static inline bool glink_rx_intent_exists(void *handle, size_t size)
+{
+	return -ENODEV;
+}
+
+static inline int glink_rx_done(void *handle, const void *ptr, bool reuse)
+{
+	return -ENODEV;
+}
+
+static inline int glink_txv(void *handle, void *pkt_priv,
+	      void *iovec, size_t size,
+	      void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size),
+	      void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size),
+	      uint32_t tx_flags)
+{
+	return -ENODEV;
+}
+
+static inline int glink_sigs_set(void *handle, uint32_t sigs)
+{
+	return -ENODEV;
+}
+
+static inline int glink_sigs_local_get(void *handle, uint32_t *sigs)
+{
+	return -ENODEV;
+}
+
+static inline int glink_sigs_remote_get(void *handle, uint32_t *sigs)
+{
+	return -ENODEV;
+}
+
+static inline void *glink_register_link_state_cb(
+				struct glink_link_info *link_info, void *priv)
+{
+	return NULL;
+}
+
+static inline void glink_unregister_link_state_cb(void *notif_handle)
+{
+}
+
+static inline int glink_qos_latency(void *handle, unsigned long latency_us,
+				    size_t pkt_size)
+{
+	return -ENODEV;
+}
+
+static inline int glink_qos_cancel(void *handle)
+{
+	return -ENODEV;
+}
+
+static inline int glink_qos_start(void *handle)
+{
+	return -ENODEV;
+}
+
+static inline unsigned long glink_qos_get_ramp_time(void *handle,
+						    size_t pkt_size)
+{
+	return 0;
+}
+
+static inline int glink_start_rx_rt(void *handle)
+{
+	return -ENODEV;
+}
+
+static inline int glink_end_rx_rt(void *handle)
+{
+	return -ENODEV;
+}
+
+#endif /* CONFIG_MSM_GLINK */
+#endif /* _SOC_QCOM_GLINK_H_ */
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./glink_rpm_xprt.h linux-4.4.115-fbx/include/soc/qcom/glink_rpm_xprt.h
--- linux-4.4.115-fbx/include/soc/qcom./glink_rpm_xprt.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/glink_rpm_xprt.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,78 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _SOC_QCOM_GLINK_RPM_XPRT_H_
+#define _SOC_QCOM_GLINK_RPM_XPRT_H_
+
+#include <linux/types.h>
+
+#ifdef CONFIG_MSM_GLINK
+
+/**
+ * glink_rpm_rx_poll() - Poll and receive any available events
+ * @handle:	Channel handle in which this operation is performed.
+ *
+ * This function is used to poll and receive events and packets while the
+ * receive interrupt from RPM is disabled.
+ *
+ * Note that even if a return value > 0 is returned indicating that some events
+ * were processed, clients should only use the notification functions passed
+ * into glink_open() to determine if an entire packet has been received since
+ * some events may be internal details that are not visible to clients.
+ *
+ * Return: 0 for no packets available; > 0 for events available; standard
+ * Linux error codes on failure.
+ */
+int glink_rpm_rx_poll(void *handle);
+
+/**
+ * glink_rpm_mask_rx_interrupt() - Mask or unmask the RPM receive interrupt
+ * @handle:	Channel handle in which this operation is performed.
+ * @mask:	Flag to mask or unmask the interrupt.
+ * @pstruct:	Pointer to any platform specific data.
+ *
+ * This function is used to mask or unmask the receive interrupt from RPM.
+ * "mask" set to true indicates masking the interrupt and when set to false
+ * indicates unmasking the interrupt.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int glink_rpm_mask_rx_interrupt(void *handle, bool mask, void *pstruct);
+
+/**
+ * glink_wait_link_down() - Return whether read/write indices in FIFO are all 0.
+ * @handle:	Channel handle in which this operation is performed.
+ *
+ * This function returns the status of the read/write indices in the FIFO.
+ *
+ * Return: 1 if the indices are all 0, 0 otherwise.
+ */
+int glink_wait_link_down(void *handle);
+
+#else
+static inline int glink_rpm_rx_poll(void *handle)
+{
+	return -ENODEV;
+}
+
+static inline int glink_rpm_mask_rx_interrupt(void *handle, bool mask,
+		void *pstruct)
+{
+	return -ENODEV;
+}
+static inline int glink_wait_link_down(void *handle)
+{
+	return -ENODEV;
+}
+
+#endif /* CONFIG_MSM_GLINK */
+
+#endif /* _SOC_QCOM_GLINK_RPM_XPRT_H_ */
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./hvc.h linux-4.4.115-fbx/include/soc/qcom/hvc.h
--- linux-4.4.115-fbx/include/soc/qcom./hvc.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/hvc.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,58 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_HVC_H
+#define __MSM_HVC_H
+
+#ifdef CONFIG_ARM64
+#define HVC_FN_ARM_BASE				0xC0000000
+#define HVC_FN_CPU_BASE				0xC1000000
+#define HVC_FN_SIP_BASE				0xC2000000
+#define HVC_FN_OEM_BASE				0xC3000000
+#define HVC_FN_APP_BASE				0xF0000000
+#define HVC_FN_OS_BASE				0xF2000000
+#else
+#define HVC_FN_ARM_BASE				0x80000000
+#define HVC_FN_CPU_BASE				0x81000000
+#define HVC_FN_SIP_BASE				0x82000000
+#define HVC_FN_OEM_BASE				0x83000000
+#define HVC_FN_APP_BASE				0xB0000000
+#define HVC_FN_OS_BASE				0xB2000000
+#endif
+
+#define HVC_FN_ARM(n)				(HVC_FN_ARM_BASE + (n))
+#define HVC_FN_CPU(n)				(HVC_FN_CPU_BASE + (n))
+#define HVC_FN_SIP(n)				(HVC_FN_SIP_BASE + (n))
+#define HVC_FN_OEM(n)				(HVC_FN_OEM_BASE + (n))
+#define HVC_FN_APP(n)				(HVC_FN_APP_BASE + (n))
+#define HVC_FN_OS(n)				(HVC_FN_OS_BASE + (n))
+
+#define HVC_MAX_ARGS				6
+#define HVC_MAX_RETS				3
+#define HVC_MAX_EXTRA_ARGS			4
+
+struct hvc_desc {
+	u64 arg[HVC_MAX_ARGS];
+	u64 ret[HVC_MAX_RETS];
+};
+
+struct hvc_extra_args {
+	u64 arg[HVC_MAX_EXTRA_ARGS];
+};
+
+#ifdef CONFIG_MSM_HVC
+extern int hvc(u64 func_id, struct hvc_desc *desc);
+#else
+static inline int hvc(u64 func_id, struct hvc_desc *desc) { return 0; }
+#endif
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./icnss.h linux-4.4.115-fbx/include/soc/qcom/icnss.h
--- linux-4.4.115-fbx/include/soc/qcom./icnss.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/icnss.h	2019-10-29 09:26:25.529221635 +0100
@@ -0,0 +1,166 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _ICNSS_WLAN_H_
+#define _ICNSS_WLAN_H_
+
+#include <linux/interrupt.h>
+#include <linux/device.h>
+
+#define ICNSS_MAX_IRQ_REGISTRATIONS    12
+#define ICNSS_MAX_TIMESTAMP_LEN        32
+
+#ifndef ICNSS_API_WITH_DEV
+#define ICNSS_API_WITH_DEV
+#endif
+
+enum icnss_uevent {
+	ICNSS_UEVENT_FW_READY,
+	ICNSS_UEVENT_FW_CRASHED,
+	ICNSS_UEVENT_FW_DOWN,
+};
+
+enum cnss_cc_src {
+	CNSS_SOURCE_CORE,
+	CNSS_SOURCE_11D,
+	CNSS_SOURCE_USER
+};
+
+struct icnss_uevent_fw_down_data {
+	bool crashed;
+};
+
+struct icnss_uevent_data {
+	enum icnss_uevent uevent;
+	void *data;
+};
+
+struct icnss_driver_ops {
+	char *name;
+	unsigned long drv_state;
+	struct device_driver driver;
+	int (*probe)(struct device *dev);
+	void (*remove)(struct device *dev);
+	void (*shutdown)(struct device *dev);
+	int (*reinit)(struct device *dev);
+	void (*crash_shutdown)(void *pdev);
+	int (*pm_suspend)(struct device *dev);
+	int (*pm_resume)(struct device *dev);
+	int (*suspend_noirq)(struct device *dev);
+	int (*resume_noirq)(struct device *dev);
+	int (*uevent)(struct device *dev, struct icnss_uevent_data *uevent);
+};
+
+
+struct ce_tgt_pipe_cfg {
+	u32 pipe_num;
+	u32 pipe_dir;
+	u32 nentries;
+	u32 nbytes_max;
+	u32 flags;
+	u32 reserved;
+};
+
+struct ce_svc_pipe_cfg {
+	u32 service_id;
+	u32 pipe_dir;
+	u32 pipe_num;
+};
+
+struct icnss_shadow_reg_cfg {
+	u16 ce_id;
+	u16 reg_offset;
+};
+
+/* CE configuration to target */
+struct icnss_wlan_enable_cfg {
+	u32 num_ce_tgt_cfg;
+	struct ce_tgt_pipe_cfg *ce_tgt_cfg;
+	u32 num_ce_svc_pipe_cfg;
+	struct ce_svc_pipe_cfg *ce_svc_cfg;
+	u32 num_shadow_reg_cfg;
+	struct icnss_shadow_reg_cfg *shadow_reg_cfg;
+};
+
+/* driver modes */
+enum icnss_driver_mode {
+	ICNSS_MISSION,
+	ICNSS_FTM,
+	ICNSS_EPPING,
+	ICNSS_WALTEST,
+	ICNSS_OFF,
+	ICNSS_CCPM,
+	ICNSS_QVIT,
+};
+
+struct icnss_soc_info {
+	void __iomem *v_addr;
+	phys_addr_t p_addr;
+	uint32_t chip_id;
+	uint32_t chip_family;
+	uint32_t board_id;
+	uint32_t soc_id;
+	uint32_t fw_version;
+	char fw_build_timestamp[ICNSS_MAX_TIMESTAMP_LEN + 1];
+};
+
+#define icnss_register_driver(ops)		\
+	__icnss_register_driver(ops, THIS_MODULE, KBUILD_MODNAME)
+extern int __icnss_register_driver(struct icnss_driver_ops *ops,
+				   struct module *owner, const char *mod_name);
+
+extern int icnss_unregister_driver(struct icnss_driver_ops *ops);
+
+extern int icnss_wlan_enable(struct device *dev,
+			     struct icnss_wlan_enable_cfg *config,
+			     enum icnss_driver_mode mode,
+			     const char *host_version);
+extern int icnss_wlan_disable(struct device *dev, enum icnss_driver_mode mode);
+extern void icnss_enable_irq(struct device *dev, unsigned int ce_id);
+extern void icnss_disable_irq(struct device *dev, unsigned int ce_id);
+extern int icnss_get_soc_info(struct device *dev, struct icnss_soc_info *info);
+extern int icnss_ce_free_irq(struct device *dev, unsigned int ce_id, void *ctx);
+extern int icnss_ce_request_irq(struct device *dev, unsigned int ce_id,
+	irqreturn_t (*handler)(int, void *),
+	unsigned long flags, const char *name, void *ctx);
+extern int icnss_get_ce_id(struct device *dev, int irq);
+extern int icnss_set_fw_log_mode(struct device *dev, uint8_t fw_log_mode);
+extern int icnss_athdiag_read(struct device *dev, uint32_t offset,
+			      uint32_t mem_type, uint32_t data_len,
+			      uint8_t *output);
+extern int icnss_athdiag_write(struct device *dev, uint32_t offset,
+			       uint32_t mem_type, uint32_t data_len,
+			       uint8_t *input);
+extern int icnss_get_irq(struct device *dev, int ce_id);
+extern int icnss_power_on(struct device *dev);
+extern int icnss_power_off(struct device *dev);
+extern struct dma_iommu_mapping *icnss_smmu_get_mapping(struct device *dev);
+extern int icnss_smmu_map(struct device *dev, phys_addr_t paddr,
+			  uint32_t *iova_addr, size_t size);
+extern unsigned int icnss_socinfo_get_serial_number(struct device *dev);
+extern int icnss_set_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 ch_count);
+extern int icnss_get_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 *ch_count,
+					 u16 buf_len);
+extern int icnss_wlan_set_dfs_nol(const void *info, u16 info_len);
+extern int icnss_wlan_get_dfs_nol(void *info, u16 info_len);
+extern bool icnss_is_qmi_disable(struct device *dev);
+extern bool icnss_is_fw_ready(void);
+extern bool icnss_is_fw_down(void);
+extern int icnss_set_wlan_mac_address(const u8 *in, const uint32_t len);
+extern u8 *icnss_get_wlan_mac_address(struct device *dev, uint32_t *num);
+extern int icnss_trigger_recovery(struct device *dev);
+extern void cnss_set_cc_source(enum cnss_cc_src cc_source);
+extern enum cnss_cc_src cnss_get_cc_source(void);
+extern int icnss_get_driver_load_cnt(void);
+extern void icnss_increment_driver_load_cnt(void);
+extern void icnss_set_cc_source(enum cnss_cc_src cc_source);
+extern enum cnss_cc_src icnss_get_cc_source(void);
+#endif /* _ICNSS_WLAN_H_ */
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./irq-helper.h linux-4.4.115-fbx/include/soc/qcom/irq-helper.h
--- linux-4.4.115-fbx/include/soc/qcom./irq-helper.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/irq-helper.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,20 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SOC_QCOM_IRQ_HELPER_H
+#define __SOC_QCOM_IRQ_HELPER_H
+
+int irq_blacklist_on(void);
+int irq_blacklist_off(void);
+
+#endif
+
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./jtag.h linux-4.4.115-fbx/include/soc/qcom/jtag.h
--- linux-4.4.115-fbx/include/soc/qcom./jtag.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/jtag.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,54 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MACH_JTAG_H
+#define __MACH_JTAG_H
+
+#if defined(CONFIG_MSM_JTAG) || defined(CONFIG_MSM_JTAG_MM) || \
+	defined(CONFIG_MSM_JTAGV8)
+extern void msm_jtag_save_state(void);
+extern void msm_jtag_restore_state(void);
+extern void msm_jtag_etm_save_state(void);
+extern void msm_jtag_etm_restore_state(void);
+extern bool msm_jtag_fuse_apps_access_disabled(void);
+#else
+static inline void msm_jtag_save_state(void) {}
+static inline void msm_jtag_restore_state(void) {}
+static inline void msm_jtag_etm_save_state(void) {}
+static inline void msm_jtag_etm_restore_state(void){}
+static inline bool msm_jtag_fuse_apps_access_disabled(void) { return false; }
+#endif
+#ifdef CONFIG_MSM_JTAGV8
+extern int msm_jtag_save_register(struct notifier_block *nb);
+extern int msm_jtag_save_unregister(struct notifier_block *nb);
+extern int msm_jtag_restore_register(struct notifier_block *nb);
+extern int msm_jtag_restore_unregister(struct notifier_block *nb);
+#else
+static inline int msm_jtag_save_register(struct notifier_block *nb)
+{
+	return 0;
+}
+static inline int msm_jtag_save_unregister(struct notifier_block *nb)
+{
+	return 0;
+}
+static inline int msm_jtag_restore_register(struct notifier_block *nb)
+{
+	return 0;
+}
+static inline int msm_jtag_restore_unregister(struct notifier_block *nb)
+{
+	return 0;
+}
+#endif
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./lpm-stats.h linux-4.4.115-fbx/include/soc/qcom/lpm-stats.h
--- linux-4.4.115-fbx/include/soc/qcom./lpm-stats.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/lpm-stats.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_LPM_STATS_H
+#define __ARCH_ARM_MACH_MSM_LPM_STATS_H
+
+struct lpm_stats;
+
+#define MAX_STR_LEN 256
+
+struct lifo_stats {
+	uint32_t last_in;
+	uint32_t first_out;
+};
+
+struct lpm_stats {
+	char name[MAX_STR_LEN];
+	struct level_stats *time_stats;
+	uint32_t num_levels;
+	struct lifo_stats lifo;
+	struct lpm_stats *parent;
+	struct list_head sibling;
+	struct list_head child;
+	struct cpumask mask;
+	struct dentry *directory;
+	int64_t sleep_time;
+	bool is_cpu;
+};
+
+
+
+#ifdef CONFIG_MSM_IDLE_STATS
+struct lpm_stats *lpm_stats_config_level(const char *name,
+	const char **levels, int num_levels, struct lpm_stats *parent,
+	struct cpumask *mask);
+void lpm_stats_cluster_enter(struct lpm_stats *stats, uint32_t index);
+void lpm_stats_cluster_exit(struct lpm_stats *stats, uint32_t index,
+				bool success);
+void lpm_stats_cpu_enter(uint32_t index, uint64_t time);
+void lpm_stats_cpu_exit(uint32_t index, uint64_t time, bool success);
+void lpm_stats_suspend_enter(void);
+void lpm_stats_suspend_exit(void);
+#else
+static inline struct lpm_stats *lpm_stats_config_level(const char *name,
+	const char **levels, int num_levels, struct lpm_stats *parent,
+	struct cpumask *mask)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline void lpm_stats_cluster_enter(struct lpm_stats *stats,
+						uint32_t index)
+{
+	return;
+}
+
+static inline void lpm_stats_cluster_exit(struct lpm_stats *stats,
+					uint32_t index, bool success)
+{
+	return;
+}
+
+static inline void lpm_stats_cpu_enter(uint32_t index, uint64_t time)
+{
+	return;
+}
+
+static inline void lpm_stats_cpu_exit(uint32_t index, bool success,
+							uint64_t time)
+{
+	return;
+}
+
+static inline void lpm_stats_suspend_enter(void)
+{
+	return;
+}
+
+static inline void lpm_stats_suspend_exit(void)
+{
+	return;
+}
+#endif
+#endif  /* __ARCH_ARM_MACH_MSM_LPM_STATS_H */
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./memory_dump.h linux-4.4.115-fbx/include/soc/qcom/memory_dump.h
--- linux-4.4.115-fbx/include/soc/qcom./memory_dump.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/memory_dump.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,129 @@
+/* Copyright (c) 2012, 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_MEMORY_DUMP_H
+#define __MSM_MEMORY_DUMP_H
+
+#include <linux/types.h>
+
+enum dump_client_type {
+	MSM_CPU_CTXT = 0,
+	MSM_L1_CACHE,
+	MSM_L2_CACHE,
+	MSM_OCMEM,
+	MSM_TMC_ETFETB,
+	MSM_ETM0_REG,
+	MSM_ETM1_REG,
+	MSM_ETM2_REG,
+	MSM_ETM3_REG,
+	MSM_TMC0_REG, /* TMC_ETR */
+	MSM_TMC1_REG, /* TMC_ETF */
+	MSM_LOG_BUF,
+	MSM_LOG_BUF_FIRST_IDX,
+	MAX_NUM_CLIENTS,
+};
+
+struct msm_client_dump {
+	enum dump_client_type id;
+	unsigned long start_addr;
+	unsigned long end_addr;
+};
+
+#ifdef CONFIG_QCOM_MEMORY_DUMP
+extern int msm_dump_tbl_register(struct msm_client_dump *client_entry);
+#else
+static inline int msm_dump_tbl_register(struct msm_client_dump *entry)
+{
+	return -EIO;
+}
+#endif
+
+
+#if defined(CONFIG_QCOM_MEMORY_DUMP) || defined(CONFIG_QCOM_MEMORY_DUMP_V2)
+extern uint32_t msm_dump_table_version(void);
+#else
+static inline uint32_t msm_dump_table_version(void)
+{
+	return 0;
+}
+#endif
+
+#define MSM_DUMP_MAKE_VERSION(ma, mi)	((ma << 20) | mi)
+#define MSM_DUMP_MAJOR(val)		(val >> 20)
+#define MSM_DUMP_MINOR(val)		(val & 0xFFFFF)
+
+
+#define MAX_NUM_ENTRIES		0x120
+
+enum msm_dump_data_ids {
+	MSM_DUMP_DATA_CPU_CTX = 0x00,
+	MSM_DUMP_DATA_L1_INST_CACHE = 0x60,
+	MSM_DUMP_DATA_L1_DATA_CACHE = 0x80,
+	MSM_DUMP_DATA_ETM_REG = 0xA0,
+	MSM_DUMP_DATA_L2_CACHE = 0xC0,
+	MSM_DUMP_DATA_L3_CACHE = 0xD0,
+	MSM_DUMP_DATA_OCMEM = 0xE0,
+	MSM_DUMP_DATA_CNSS_WLAN = 0xE1,
+	MSM_DUMP_DATA_WIGIG = 0xE2,
+	MSM_DUMP_DATA_PMIC = 0xE4,
+	MSM_DUMP_DATA_DBGUI_REG = 0xE5,
+	MSM_DUMP_DATA_DCC_REG = 0xE6,
+	MSM_DUMP_DATA_DCC_SRAM = 0xE7,
+	MSM_DUMP_DATA_MISC = 0xE8,
+	MSM_DUMP_DATA_VSENSE = 0xE9,
+	MSM_DUMP_DATA_RPM = 0xEA,
+	MSM_DUMP_DATA_SCANDUMP = 0xEB,
+	MSM_DUMP_DATA_TMC_ETF = 0xF0,
+	MSM_DUMP_DATA_TMC_REG = 0x100,
+	MSM_DUMP_DATA_LOG_BUF = 0x110,
+	MSM_DUMP_DATA_LOG_BUF_FIRST_IDX = 0x111,
+	MSM_DUMP_DATA_MAX = MAX_NUM_ENTRIES,
+};
+
+enum msm_dump_table_ids {
+	MSM_DUMP_TABLE_APPS,
+	MSM_DUMP_TABLE_MAX = MAX_NUM_ENTRIES,
+};
+
+enum msm_dump_type {
+	MSM_DUMP_TYPE_DATA,
+	MSM_DUMP_TYPE_TABLE,
+};
+
+struct msm_dump_data {
+	uint32_t version;
+	uint32_t magic;
+	char name[32];
+	uint64_t addr;
+	uint64_t len;
+	uint32_t reserved;
+};
+
+struct msm_dump_entry {
+	uint32_t id;
+	char name[32];
+	uint32_t type;
+	uint64_t addr;
+};
+
+#ifdef CONFIG_QCOM_MEMORY_DUMP_V2
+extern int msm_dump_data_register(enum msm_dump_table_ids id,
+				  struct msm_dump_entry *entry);
+#else
+static inline int msm_dump_data_register(enum msm_dump_table_ids id,
+					 struct msm_dump_entry *entry)
+{
+	return -ENOSYS;
+}
+#endif
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./minidump.h linux-4.4.115-fbx/include/soc/qcom/minidump.h
--- linux-4.4.115-fbx/include/soc/qcom./minidump.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/minidump.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,54 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MINIDUMP_H
+#define __MINIDUMP_H
+
+#define MAX_NAME_LENGTH		16
+/* md_region -  Minidump table entry
+ * @name:	Entry name, Minidump will dump binary with this name.
+ * @id:		Entry ID, used only for SDI dumps.
+ * @virt_addr:  Address of the entry.
+ * @phys_addr:	Physical address of the entry to dump.
+ * @size:	Number of byte to dump from @address location
+ *		it should be 4 byte aligned.
+ */
+struct md_region {
+	char	name[MAX_NAME_LENGTH];
+	u32	id;
+	u64	virt_addr;
+	u64	phys_addr;
+	u64	size;
+};
+
+/* Register an entry in Minidump table
+ * Returns:
+ *	Zero: on successful addition
+ *	Negetive error number on failures
+ */
+#ifdef CONFIG_QCOM_MINIDUMP
+extern int msm_minidump_add_region(const struct md_region *entry);
+/* Sets to true, if minidump table is initialized */
+extern bool minidump_enabled;
+extern void dump_stack_minidump(u64 sp);
+#else
+static inline int msm_minidump_add_region(const struct md_region *entry)
+{
+	/* Return quietly, if minidump is not supported */
+	return 0;
+}
+
+static inline void dump_stack_minidump(u64 sp) {}
+#endif
+
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./msm-clock-controller.h linux-4.4.115-fbx/include/soc/qcom/msm-clock-controller.h
--- linux-4.4.115-fbx/include/soc/qcom./msm-clock-controller.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/msm-clock-controller.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MSM_CLOCK_CONTROLLER_H
+#define __ARCH_ARM_MSM_CLOCK_CONTROLLER_H
+
+#include <linux/list.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#define dt_err(np, fmt, ...) \
+	pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
+#define dt_prop_err(np, str, fmt, ...) \
+	dt_err(np, "%s: " fmt, str, ##__VA_ARGS__)
+
+/**
+ * struct msmclk_parser
+ * @compatible
+ *      matches compatible property from devicetree
+ * @parsedt
+ *      constructs & returns an instance of the appropriate obj based on
+ *      the data from devicetree.
+ */
+struct msmclk_parser {
+	struct list_head list;
+	char *compatible;
+	void * (*parsedt)(struct device *dev, struct device_node *of);
+};
+
+#define MSMCLK_PARSER(fn, str, id) \
+static struct msmclk_parser _msmclk_##fn##id = {		\
+	.list = LIST_HEAD_INIT(_msmclk_##fn##id.list),		\
+	.compatible = str,					\
+	.parsedt = fn,						\
+};								\
+static int __init _msmclk_init_##fn##id(void)			\
+{								\
+	msmclk_parser_register(&_msmclk_##fn##id);		\
+	return 0;						\
+}								\
+early_initcall(_msmclk_init_##fn##id)
+
+/*
+ * struct msmclk_data
+ * @base
+ *      ioremapped region for sub_devices
+ * @list
+ *	tracks all registered driver instances
+ * @htable
+ *	tracks all registered child clocks
+ * @clk_tbl
+ *      array of clk_lookup to be registered with the clock framework
+ */
+#define HASHTABLE_SIZE 200
+struct msmclk_data {
+	void __iomem *base;
+	struct device *dev;
+	struct list_head list;
+	struct hlist_head htable[HASHTABLE_SIZE];
+	struct clk_lookup *clk_tbl;
+	int clk_tbl_size;
+	int max_clk_tbl_size;
+};
+
+#if defined(CONFIG_MSM_CLK_CONTROLLER_V2)
+
+/* Utility functions */
+int of_property_count_phandles(struct device_node *np, char *propname);
+int of_property_read_phandle_index(struct device_node *np, char *propname,
+					int index, phandle *p);
+void *msmclk_generic_clk_init(struct device *dev, struct device_node *np,
+				struct clk *c);
+
+/*
+ * msmclk_parser_register
+ *      Registers a parser which will be matched with a node from dt
+ *      according to the compatible string.
+ */
+void msmclk_parser_register(struct msmclk_parser *);
+
+/*
+ * msmclk_parse_phandle
+ *      On hashtable miss, the corresponding entry will be retrieved from
+ *      devicetree, and added to the hashtable.
+ */
+void *msmclk_parse_phandle(struct device *dev, phandle key);
+/*
+ * msmclk_lookup_phandle
+ *	Straightforward hashtable lookup
+ */
+void *msmclk_lookup_phandle(struct device *dev, phandle key);
+
+int __init msmclk_init(void);
+#else
+
+static inline int of_property_count_phandles(struct device_node *np,
+			char *propname)
+{
+	return 0;
+}
+
+static inline int of_property_read_phandle_index(struct device_node *np,
+			char *propname, int index, phandle *p)
+{
+	return 0;
+}
+
+static inline void *msmclk_generic_clk_init(struct device *dev,
+				struct device_node *np, struct clk *c)
+{
+	return ERR_PTR(-EINVAL);
+}
+
+static inline void msmclk_parser_register(struct msmclk_parser *p) {};
+
+static inline void *msmclk_parse_phandle(struct device *dev, phandle key)
+{
+	return ERR_PTR(-EINVAL);
+}
+
+static inline void *msmclk_lookup_phandle(struct device *dev, phandle key)
+{
+	return ERR_PTR(-EINVAL);
+}
+
+static inline int __init msmclk_init(void)
+{
+	return 0;
+}
+
+#endif /* CONFIG_MSM_CLK_CONTROLLER_V2 */
+#endif /* __ARCH_ARM_MSM_CLOCK_CONTROLLER_H */
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./msm-core.h linux-4.4.115-fbx/include/soc/qcom/msm-core.h
--- linux-4.4.115-fbx/include/soc/qcom./msm-core.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/msm-core.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_CORE_H
+#define __ARCH_ARM_MACH_MSM_CORE_H
+#ifdef CONFIG_APSS_CORE_EA
+void set_cpu_throttled(struct cpumask *mask, bool throttling);
+struct blocking_notifier_head *get_power_update_notifier(void);
+#else
+static inline void set_cpu_throttled(struct cpumask *mask, bool throttling) {}
+struct blocking_notifier_head *get_power_update_notifier(void) {return NULL; }
+#endif
+#endif
+
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./msm_qmi_interface.h linux-4.4.115-fbx/include/soc/qcom/msm_qmi_interface.h
--- linux-4.4.115-fbx/include/soc/qcom./msm_qmi_interface.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/msm_qmi_interface.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,502 @@
+/* Copyright (c) 2012-2015, 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_QMI_INTERFACE_H_
+#define _MSM_QMI_INTERFACE_H_
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/socket.h>
+#include <linux/gfp.h>
+#include <linux/qmi_encdec.h>
+#include <linux/workqueue.h>
+
+#define QMI_COMMON_TLV_TYPE 0
+
+enum qmi_event_type {
+	QMI_RECV_MSG = 1,
+	QMI_SERVER_ARRIVE,
+	QMI_SERVER_EXIT,
+};
+
+/**
+ * struct qmi_handle - QMI Handle Data Structure
+ * @handle_hash: Hash Table Node in which this handle is present.
+ * @src_port: Pointer to port used for message exchange.
+ * @ctl_port: Pointer to port used for out-of-band event exchange.
+ * @handle_type: Type of handle(Service/Client).
+ * @next_txn_id: Transaction ID of the next outgoing request.
+ * @handle_wq: Workqueue to handle any handle-specific events.
+ * @handle_lock: Lock to protect access to elements in the handle.
+ * @notify_lock: Lock to protect and generate notification atomically.
+ * @notify: Function to notify the handle owner of an event.
+ * @notify_priv: Private info to be passed during the notifcation.
+ * @handle_reset: Flag to hold the reset state of the handle.
+ * @reset_waitq: Wait queue to wait for any reset events.
+ * @ctl_work: Work to handle the out-of-band events for this handle.
+ * @dest_info: Destination to which this handle is connected to.
+ * @dest_service_id: service id of the service that client connected to.
+ * @txn_list: List of transactions waiting for the response.
+ * @ind_cb: Function to notify the handle owner of an indication message.
+ * @ind_cb_priv: Private info to be passed during an indication notification.
+ * @resume_tx_work: Work to resume the tx when the transport is not busy.
+ * @pending_txn_list: List of requests pending tx due to busy transport.
+ * @conn_list: List of connections handled by the service.
+ * @svc_ops_options: Service specific operations and options.
+ */
+struct qmi_handle {
+	struct hlist_node handle_hash;
+	void *src_port;
+	void *ctl_port;
+	unsigned handle_type;
+	uint16_t next_txn_id;
+	struct workqueue_struct *handle_wq;
+	struct mutex handle_lock;
+	spinlock_t notify_lock;
+	void (*notify)(struct qmi_handle *handle, enum qmi_event_type event,
+			void *notify_priv);
+	void *notify_priv;
+	int handle_reset;
+	wait_queue_head_t reset_waitq;
+	struct delayed_work ctl_work;
+
+	/* Client specific elements */
+	void *dest_info;
+	uint32_t dest_service_id;
+	struct list_head txn_list;
+	void (*ind_cb)(struct qmi_handle *handle,
+			unsigned int msg_id, void *msg,
+			unsigned int msg_len, void *ind_cb_priv);
+	void *ind_cb_priv;
+	struct delayed_work resume_tx_work;
+	struct list_head pending_txn_list;
+
+	/* Service specific elements */
+	struct list_head conn_list;
+	struct qmi_svc_ops_options *svc_ops_options;
+};
+
+enum qmi_result_type_v01 {
+	/* To force a 32 bit signed enum. Do not change or use*/
+	QMI_RESULT_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
+	QMI_RESULT_SUCCESS_V01 = 0,
+	QMI_RESULT_FAILURE_V01 = 1,
+	QMI_RESULT_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
+};
+
+enum qmi_error_type_v01 {
+	/* To force a 32 bit signed enum. Do not change or use*/
+	QMI_ERR_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
+	QMI_ERR_NONE_V01 = 0x0000,
+	QMI_ERR_MALFORMED_MSG_V01 = 0x0001,
+	QMI_ERR_NO_MEMORY_V01 = 0x0002,
+	QMI_ERR_INTERNAL_V01 = 0x0003,
+	QMI_ERR_CLIENT_IDS_EXHAUSTED_V01 = 0x0005,
+	QMI_ERR_INVALID_ID_V01 = 0x0029,
+	QMI_ERR_ENCODING_V01 = 0x003A,
+	QMI_ERR_DISABLED_V01 = 0x0045,
+	QMI_ERR_INCOMPATIBLE_STATE_V01 = 0x005A,
+	QMI_ERR_NOT_SUPPORTED_V01 = 0x005E,
+	QMI_ERR_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
+};
+
+struct qmi_response_type_v01 {
+	enum qmi_result_type_v01 result;
+	enum qmi_error_type_v01 error;
+};
+
+/**
+ * qmi_svc_ops_options - Operations and options to be specified when
+ *                       a service registers.
+ * @version: Version field to identify the ops_options structure.
+ * @service_id: Service ID of the service.
+ * @service_vers: Version to identify the client-service compatibility.
+ * @service_ins: Instance ID registered by the service.
+ * @connect_cb: Callback when a new client connects with the service.
+ * @disconnect_cb: Callback when the client exits the connection.
+ * @req_desc_cb: Callback to get request structure and its descriptor
+ *               for a message id.
+ * @req_cb: Callback to process the request.
+ */
+struct qmi_svc_ops_options {
+	unsigned version;
+	uint32_t service_id;
+	uint32_t service_vers;
+	uint32_t service_ins;
+	int (*connect_cb)(struct qmi_handle *handle,
+			  void *conn_handle);
+	int (*disconnect_cb)(struct qmi_handle *handle,
+			     void *conn_handle);
+	int (*req_desc_cb)(unsigned int msg_id,
+			   struct msg_desc **req_desc);
+	int (*req_cb)(struct qmi_handle *handle,
+		      void *conn_handle,
+		      void *req_handle,
+		      unsigned int msg_id,
+		      void *req);
+};
+
+#ifdef CONFIG_MSM_QMI_INTERFACE
+
+/* Element info array describing common qmi response structure */
+extern struct elem_info qmi_response_type_v01_ei[];
+#define get_qmi_response_type_v01_ei() qmi_response_type_v01_ei
+
+/**
+ * qmi_handle_create() - Create a QMI handle
+ * @notify: Callback to notify events on the handle created.
+ * @notify_priv: Private information to be passed along with the notification.
+ *
+ * @return: Valid QMI handle on success, NULL on error.
+ */
+struct qmi_handle *qmi_handle_create(
+	void (*notify)(struct qmi_handle *handle,
+		       enum qmi_event_type event, void *notify_priv),
+	void *notify_priv);
+
+/**
+ * qmi_handle_destroy() - Destroy the QMI handle
+ * @handle: QMI handle to be destroyed.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_handle_destroy(struct qmi_handle *handle);
+
+/**
+ * qmi_register_ind_cb() - Register the indication callback function
+ * @handle: QMI handle with which the function is registered.
+ * @ind_cb: Callback function to be registered.
+ * @ind_cb_priv: Private data to be passed with the indication callback.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_register_ind_cb(struct qmi_handle *handle,
+	void (*ind_cb)(struct qmi_handle *handle,
+		       unsigned int msg_id, void *msg,
+		       unsigned int msg_len, void *ind_cb_priv),
+	void *ind_cb_priv);
+
+/**
+ * qmi_send_req_wait() - Send a synchronous QMI request
+ * @handle: QMI handle through which the QMI request is sent.
+ * @request_desc: Structure describing the request data structure.
+ * @req: Buffer containing the request data structure.
+ * @req_len: Length of the request data structure.
+ * @resp_desc: Structure describing the response data structure.
+ * @resp: Buffer to hold the response data structure.
+ * @resp_len: Length of the response data structure.
+ * @timeout_ms: Timeout before a response is received.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_send_req_wait(struct qmi_handle *handle,
+		      struct msg_desc *req_desc,
+		      void *req, unsigned int req_len,
+		      struct msg_desc *resp_desc,
+		      void *resp, unsigned int resp_len,
+		      unsigned long timeout_ms);
+
+/**
+ * qmi_send_req_nowait() - Send an asynchronous QMI request
+ * @handle: QMI handle through which the QMI request is sent.
+ * @request_desc: Structure describing the request data structure.
+ * @req: Buffer containing the request data structure.
+ * @req_len: Length of the request data structure.
+ * @resp_desc: Structure describing the response data structure.
+ * @resp: Buffer to hold the response data structure.
+ * @resp_len: Length of the response data structure.
+ * @resp_cb: Callback function to be invoked when the response arrives.
+ * @resp_cb_data: Private information to be passed along with the callback.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_send_req_nowait(struct qmi_handle *handle,
+			struct msg_desc *req_desc,
+			void *req, unsigned int req_len,
+			struct msg_desc *resp_desc,
+			void *resp, unsigned int resp_len,
+			void (*resp_cb)(struct qmi_handle *handle,
+					unsigned int msg_id, void *msg,
+					void *resp_cb_data,
+					int stat),
+			void *resp_cb_data);
+
+/**
+ * qmi_recv_msg() - Receive the QMI message
+ * @handle: Handle for which the QMI message has to be received.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_recv_msg(struct qmi_handle *handle);
+
+/**
+ * qmi_connect_to_service() - Connect the QMI handle with a QMI service
+ * @handle: QMI handle to be connected with the QMI service.
+ * @service_id: Service id to identify the QMI service.
+ * @service_vers: Version to identify the compatibility.
+ * @service_ins: Instance id to identify the instance of the QMI service.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_connect_to_service(struct qmi_handle *handle,
+			   uint32_t service_id,
+			   uint32_t service_vers,
+			   uint32_t service_ins);
+
+/**
+ * qmi_svc_event_notifier_register() - Register a notifier block to receive
+ *                                     events regarding a QMI service
+ * @service_id: Service ID to identify the QMI service.
+ * @service_vers: Version to identify the compatibility.
+ * @service_ins: Instance ID to identify the instance of the QMI service.
+ * @nb: Notifier block used to receive the event.
+ *
+ * @return: 0 if successfully registered, < 0 on error.
+ */
+int qmi_svc_event_notifier_register(uint32_t service_id,
+				    uint32_t service_vers,
+				    uint32_t service_ins,
+				    struct notifier_block *nb);
+
+/**
+ * qmi_svc_event_notifier_unregister() - Unregister service event
+ *                                       notifier block
+ * @service_id: Service ID to identify the QMI service.
+ * @service_vers: Version to identify the compatibility.
+ * @service_ins: Instance ID to identify the instance of the QMI service.
+ * @nb: Notifier block registered to receive the events.
+ *
+ * @return: 0 if successfully registered, < 0 on error.
+ */
+int qmi_svc_event_notifier_unregister(uint32_t service_id,
+				      uint32_t service_vers,
+				      uint32_t service_ins,
+				      struct notifier_block *nb);
+
+/**
+ * qmi_svc_register() - Register a QMI service with a QMI handle
+ * @handle: QMI handle on which the service has to be registered.
+ * @ops_options: Service specific operations and options.
+ *
+ * @return: 0 if successfully registered, < 0 on error.
+ */
+int qmi_svc_register(struct qmi_handle *handle,
+		     void *ops_options);
+
+/**
+ * qmi_send_resp() - Send response to a request
+ * @handle: QMI handle from which the response is sent.
+ * @clnt: Client to which the response is sent.
+ * @req_handle: Request for which the response is sent.
+ * @resp_desc: Descriptor explaining the response structure.
+ * @resp: Pointer to the response structure.
+ * @resp_len: Length of the response structure.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_send_resp(struct qmi_handle *handle,
+		  void *conn_handle,
+		  void *req_handle,
+		  struct msg_desc *resp_desc,
+		  void *resp,
+		  unsigned int resp_len);
+
+/**
+ * qmi_send_resp_from_cb() - Send response to a request from request_cb
+ * @handle: QMI handle from which the response is sent.
+ * @clnt: Client to which the response is sent.
+ * @req_handle: Request for which the response is sent.
+ * @resp_desc: Descriptor explaining the response structure.
+ * @resp: Pointer to the response structure.
+ * @resp_len: Length of the response structure.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_send_resp_from_cb(struct qmi_handle *handle,
+			  void *conn_handle,
+			  void *req_handle,
+			  struct msg_desc *resp_desc,
+			  void *resp,
+			  unsigned int resp_len);
+
+/**
+ * qmi_send_ind() - Send unsolicited event/indication to a client
+ * @handle: QMI handle from which the indication is sent.
+ * @clnt: Client to which the indication is sent.
+ * @ind_desc: Descriptor explaining the indication structure.
+ * @ind: Pointer to the indication structure.
+ * @ind_len: Length of the indication structure.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_send_ind(struct qmi_handle *handle,
+		 void *conn_handle,
+		 struct msg_desc *ind_desc,
+		 void *ind,
+		 unsigned int ind_len);
+
+/**
+ * qmi_send_ind_from_cb() - Send indication to a client from registration_cb
+ * @handle: QMI handle from which the indication is sent.
+ * @clnt: Client to which the indication is sent.
+ * @ind_desc: Descriptor explaining the indication structure.
+ * @ind: Pointer to the indication structure.
+ * @ind_len: Length of the indication structure.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_send_ind_from_cb(struct qmi_handle *handle,
+			 void *conn_handle,
+			 struct msg_desc *ind_desc,
+			 void *ind,
+			 unsigned int ind_len);
+
+/**
+ * qmi_svc_unregister() - Unregister the service from a QMI handle
+ * @handle: QMI handle from which the service has to be unregistered.
+ *
+ * return: 0 on success, < 0 on error.
+ */
+int qmi_svc_unregister(struct qmi_handle *handle);
+
+#else
+
+#define get_qmi_response_type_v01_ei() NULL
+
+static inline struct qmi_handle *qmi_handle_create(
+	void (*notify)(struct qmi_handle *handle,
+		       enum qmi_event_type event, void *notify_priv),
+	void *notify_priv)
+{
+	return NULL;
+}
+
+static inline int qmi_handle_destroy(struct qmi_handle *handle)
+{
+	return -ENODEV;
+}
+
+static inline int qmi_register_ind_cb(struct qmi_handle *handle,
+	void (*ind_cb)(struct qmi_handle *handle,
+		       unsigned int msg_id, void *msg,
+		       unsigned int msg_len, void *ind_cb_priv),
+	void *ind_cb_priv)
+{
+	return -ENODEV;
+}
+
+static inline int qmi_send_req_wait(struct qmi_handle *handle,
+				    struct msg_desc *req_desc,
+				    void *req, unsigned int req_len,
+				    struct msg_desc *resp_desc,
+				    void *resp, unsigned int resp_len,
+				    unsigned long timeout_ms)
+{
+	return -ENODEV;
+}
+
+static inline int qmi_send_req_nowait(struct qmi_handle *handle,
+				struct msg_desc *req_desc,
+				void *req, unsigned int req_len,
+				struct msg_desc *resp_desc,
+				void *resp, unsigned int resp_len,
+				void (*resp_cb)(struct qmi_handle *handle,
+						unsigned int msg_id, void *msg,
+						void *resp_cb_data),
+				void *resp_cb_data)
+{
+	return -ENODEV;
+}
+
+static inline int qmi_recv_msg(struct qmi_handle *handle)
+{
+	return -ENODEV;
+}
+
+static inline int qmi_connect_to_service(struct qmi_handle *handle,
+					 uint32_t service_id,
+					 uint32_t service_vers,
+					 uint32_t service_ins)
+{
+	return -ENODEV;
+}
+
+static inline int qmi_svc_event_notifier_register(uint32_t service_id,
+						  uint32_t service_vers,
+						  uint32_t service_ins,
+						  struct notifier_block *nb)
+{
+	return -ENODEV;
+}
+
+static inline int qmi_svc_event_notifier_unregister(uint32_t service_id,
+						    uint32_t service_vers,
+						    uint32_t service_ins,
+						    struct notifier_block *nb)
+{
+	return -ENODEV;
+}
+
+static inline int qmi_svc_register(struct qmi_handle *handle,
+				   void *ops_options)
+{
+	return -ENODEV;
+}
+
+static inline int qmi_send_resp(struct qmi_handle *handle,
+				void *conn_handle,
+				void *req_handle,
+				struct msg_desc *resp_desc,
+				void *resp,
+				unsigned int resp_len)
+{
+	return -ENODEV;
+}
+
+static inline int qmi_send_resp_from_cb(struct qmi_handle *handle,
+					void *conn_handle,
+					void *req_handle,
+					struct msg_desc *resp_desc,
+					void *resp,
+					unsigned int resp_len)
+{
+	return -ENODEV;
+}
+
+static inline int qmi_send_ind(struct qmi_handle *handle,
+			       void *conn_handle,
+			       struct msg_desc *ind_desc,
+			       void *ind,
+			       unsigned int ind_len)
+{
+	return -ENODEV;
+}
+
+static inline int qmi_send_ind_from_cb(struct qmi_handle *handle,
+				       void *conn_handle,
+				       struct msg_desc *ind_desc,
+				       void *ind,
+				       unsigned int ind_len)
+{
+	return -ENODEV;
+}
+
+static inline int qmi_svc_unregister(struct qmi_handle *handle)
+{
+	return -ENODEV;
+}
+
+#endif
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./msm_tz_smmu.h linux-4.4.115-fbx/include/soc/qcom/msm_tz_smmu.h
--- linux-4.4.115-fbx/include/soc/qcom./msm_tz_smmu.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/msm_tz_smmu.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,82 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_TZ_SMMU_H__
+#define __MSM_TZ_SMMU_H__
+
+#include <linux/device.h>
+
+enum tz_smmu_device_id {
+	TZ_DEVICE_START = 0,
+	TZ_DEVICE_VIDEO = 0,
+	TZ_DEVICE_MDSS,
+	TZ_DEVICE_LPASS,
+	TZ_DEVICE_MDSS_BOOT,
+	TZ_DEVICE_USB1_HS,
+	TZ_DEVICE_OCMEM,
+	TZ_DEVICE_LPASS_CORE,
+	TZ_DEVICE_VPU,
+	TZ_DEVICE_COPSS_SMMU,
+	TZ_DEVICE_USB3_0,
+	TZ_DEVICE_USB3_1,
+	TZ_DEVICE_PCIE_0,
+	TZ_DEVICE_PCIE_1,
+	TZ_DEVICE_BCSS,
+	TZ_DEVICE_VCAP,
+	TZ_DEVICE_PCIE20,
+	TZ_DEVICE_IPA,
+	TZ_DEVICE_APPS,
+	TZ_DEVICE_GPU,
+	TZ_DEVICE_UFS,
+	TZ_DEVICE_ICE,
+	TZ_DEVICE_ROT,
+	TZ_DEVICE_VFE,
+	TZ_DEVICE_ANOC0,
+	TZ_DEVICE_ANOC1,
+	TZ_DEVICE_ANOC2,
+	TZ_DEVICE_CPP,
+	TZ_DEVICE_JPEG,
+	TZ_DEVICE_MAX,
+};
+
+#ifdef CONFIG_MSM_TZ_SMMU
+
+int msm_tz_smmu_atos_start(struct device *dev, int cb_num);
+int msm_tz_smmu_atos_end(struct device *dev, int cb_num);
+enum tz_smmu_device_id msm_dev_to_device_id(struct device *dev);
+void msm_tz_set_cb_format(enum tz_smmu_device_id sec_id, int cbndx);
+
+#else
+
+static inline int msm_tz_smmu_atos_start(struct device *dev, int cb_num)
+{
+	return 0;
+}
+
+static inline int msm_tz_smmu_atos_end(struct device *dev, int cb_num)
+{
+	return 0;
+}
+
+static inline enum tz_smmu_device_id msm_dev_to_device_id(struct device *dev)
+{
+	return -EINVAL;
+}
+
+static inline void msm_tz_set_cb_format(enum tz_smmu_device_id sec_id,
+					int cbndx)
+{
+}
+
+#endif /* CONFIG_MSM_TZ_SMMU */
+
+#endif /* __MSM_TZ_SMMU_H__ */
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./pm.h linux-4.4.115-fbx/include/soc/qcom/pm.h
--- linux-4.4.115-fbx/include/soc/qcom./pm.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/pm.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,214 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2016, The Linux Foundation. All rights reserved.
+ * Author: San Mehat <san@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_PM_H
+#define __ARCH_ARM_MACH_MSM_PM_H
+
+#include <linux/types.h>
+#include <linux/cpuidle.h>
+#include <asm/smp_plat.h>
+#include <asm/barrier.h>
+#include <dt-bindings/msm/pm.h>
+
+#if !defined(CONFIG_SMP)
+#define msm_secondary_startup NULL
+#elif defined(CONFIG_CPU_V7)
+#define msm_secondary_startup secondary_startup
+#else
+#define msm_secondary_startup secondary_holding_pen
+#endif
+
+enum msm_pm_sleep_mode {
+	MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT,
+	MSM_PM_SLEEP_MODE_RETENTION,
+	MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE,
+	MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
+	MSM_PM_SLEEP_MODE_FASTPC,
+	MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND,
+	MSM_PM_SLEEP_MODE_NR,
+	MSM_PM_SLEEP_MODE_NOT_SELECTED,
+};
+
+enum msm_pm_l2_scm_flag {
+	MSM_SCM_L2_ON = 0,
+	MSM_SCM_L2_OFF = 1,
+	MSM_SCM_L2_GDHS = 3,
+	MSM_SCM_L3_PC_OFF = 4,
+};
+
+#define MSM_PM_MODE(cpu, mode_nr)  ((cpu) * MSM_PM_SLEEP_MODE_NR + (mode_nr))
+
+struct msm_pm_time_params {
+	uint32_t latency_us;
+	uint32_t sleep_us;
+	uint32_t next_event_us;
+	uint32_t modified_time_us;
+};
+
+struct msm_pm_sleep_status_data {
+	void *base_addr;
+	uint32_t mask;
+};
+
+struct latency_level {
+	int affinity_level;
+	int reset_level;
+	const char *level_name;
+};
+
+/**
+ * lpm_cpu_pre_pc_cb(): API to get the L2 flag to pass to TZ
+ *
+ * @cpu: cpuid of the CPU going down.
+ *
+ * Returns the l2 flush flag enum that is passed down to TZ during power
+ * collaps
+ */
+enum msm_pm_l2_scm_flag lpm_cpu_pre_pc_cb(unsigned int cpu);
+
+/**
+ * msm_pm_sleep_mode_allow() - API to determine if sleep mode is allowed.
+ * @cpu:	CPU on which to check for the sleep mode.
+ * @mode:	Sleep Mode to check for.
+ * @idle:	Idle or Suspend Sleep Mode.
+ *
+ * Helper function to determine if a Idle or Suspend
+ * Sleep mode is allowed for a specific CPU.
+ *
+ * Return: 1 for allowed; 0 if not allowed.
+ */
+int msm_pm_sleep_mode_allow(unsigned int, unsigned int, bool);
+
+/**
+ * msm_pm_sleep_mode_supported() - API to determine if sleep mode is
+ * supported.
+ * @cpu:	CPU on which to check for the sleep mode.
+ * @mode:	Sleep Mode to check for.
+ * @idle:	Idle or Suspend Sleep Mode.
+ *
+ * Helper function to determine if a Idle or Suspend
+ * Sleep mode is allowed and enabled for a specific CPU.
+ *
+ * Return: 1 for supported; 0 if not supported.
+ */
+int msm_pm_sleep_mode_supported(unsigned int, unsigned int, bool);
+
+struct msm_pm_cpr_ops {
+	void (*cpr_suspend)(void);
+	void (*cpr_resume)(void);
+};
+
+void __init msm_pm_set_tz_retention_flag(unsigned int flag);
+void msm_pm_enable_retention(bool enable);
+bool msm_pm_retention_enabled(void);
+bool msm_cpu_pm_enter_sleep(enum msm_pm_sleep_mode mode, bool from_idle);
+static inline void msm_arch_idle(void)
+{
+	mb();
+	wfi();
+}
+
+#ifdef CONFIG_MSM_PM
+
+void msm_pm_set_rpm_wakeup_irq(unsigned int irq);
+int msm_pm_wait_cpu_shutdown(unsigned int cpu);
+int __init msm_pm_sleep_status_init(void);
+void lpm_cpu_hotplug_enter(unsigned int cpu);
+s32 msm_cpuidle_get_deep_idle_latency(void);
+int msm_pm_collapse(unsigned long unused);
+
+/**
+ * lpm_get_latency() - API to get latency for a low power mode
+ * @latency_level:	pointer to structure with below elements
+ * affinity_level: The level (CPU/L2/CCI etc.) for which the
+ *	latency is required.
+ *	LPM_AFF_LVL_CPU : CPU level
+ *	LPM_AFF_LVL_L2  : L2 level
+ *	LPM_AFF_LVL_CCI : CCI level
+ * reset_level: Can be passed "LPM_RESET_LVL_GDHS" for
+ *	low power mode with control logic power collapse or
+ *	"LPM_RESET_LVL_PC" for low power mode with control and
+ *	memory logic power collapse or "LPM_RESET_LVL_RET" for
+ *	retention mode.
+ * level_name: Pointer to the cluster name for which the latency
+ *	is required or NULL if the minimum value out of all the
+ *	clusters is to be returned. For CPU level, the name of the
+ *	L2 cluster to be passed. For CCI it has no effect.
+ * @latency:	address to get the latency value.
+ *
+ * latency value will be for the particular cluster or the minimum
+ * value out of all the clusters at the particular affinity_level
+ * and reset_level.
+ *
+ * Return: 0 for success; Error number for failure.
+ */
+int lpm_get_latency(struct latency_level *level, uint32_t *latency);
+
+#else
+static inline void msm_pm_set_rpm_wakeup_irq(unsigned int irq) {}
+static inline int msm_pm_wait_cpu_shutdown(unsigned int cpu) { return 0; }
+static inline int msm_pm_sleep_status_init(void) { return 0; };
+
+static inline void lpm_cpu_hotplug_enter(unsigned int cpu)
+{
+	msm_arch_idle();
+};
+
+static inline s32 msm_cpuidle_get_deep_idle_latency(void) { return 0; }
+#define msm_pm_collapse NULL
+
+static inline int lpm_get_latency(struct latency_level *level,
+						uint32_t *latency)
+{
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_HOTPLUG_CPU
+int msm_platform_secondary_init(unsigned int cpu);
+#else
+static inline int msm_platform_secondary_init(unsigned int cpu) { return 0; }
+#endif
+
+enum msm_pm_time_stats_id {
+	MSM_PM_STAT_REQUESTED_IDLE = 0,
+	MSM_PM_STAT_IDLE_SPIN,
+	MSM_PM_STAT_IDLE_WFI,
+	MSM_PM_STAT_RETENTION,
+	MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
+	MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE,
+	MSM_PM_STAT_IDLE_POWER_COLLAPSE,
+	MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE,
+	MSM_PM_STAT_SUSPEND,
+	MSM_PM_STAT_FAILED_SUSPEND,
+	MSM_PM_STAT_NOT_IDLE,
+	MSM_PM_STAT_COUNT
+};
+
+#ifdef CONFIG_MSM_IDLE_STATS
+void msm_pm_add_stats(enum msm_pm_time_stats_id *enable_stats, int size);
+void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t);
+void msm_pm_l2_add_stat(uint32_t id, int64_t t);
+#else
+static inline void msm_pm_add_stats(enum msm_pm_time_stats_id *enable_stats,
+		int size) {}
+static inline void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t) {}
+static inline void msm_pm_l2_add_stat(uint32_t id, int64_t t) {}
+#endif
+
+void msm_pm_set_cpr_ops(struct msm_pm_cpr_ops *ops);
+extern dma_addr_t msm_pc_debug_counters_phys;
+#endif  /* __ARCH_ARM_MACH_MSM_PM_H */
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./qseecomi.h linux-4.4.115-fbx/include/soc/qcom/qseecomi.h
--- linux-4.4.115-fbx/include/soc/qcom./qseecomi.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/qseecomi.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,731 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QSEECOMI_H_
+#define __QSEECOMI_H_
+
+#include <linux/qseecom.h>
+
+#define QSEECOM_KEY_ID_SIZE   32
+
+#define QSEOS_RESULT_FAIL_SEND_CMD_NO_THREAD  -19   /*0xFFFFFFED*/
+#define QSEOS_RESULT_FAIL_UNSUPPORTED_CE_PIPE -63
+#define QSEOS_RESULT_FAIL_KS_OP               -64
+#define QSEOS_RESULT_FAIL_KEY_ID_EXISTS       -65
+#define QSEOS_RESULT_FAIL_MAX_KEYS            -66
+#define QSEOS_RESULT_FAIL_SAVE_KS             -67
+#define QSEOS_RESULT_FAIL_LOAD_KS             -68
+#define QSEOS_RESULT_FAIL_KS_ALREADY_DONE     -69
+#define QSEOS_RESULT_FAIL_KEY_ID_DNE          -70
+#define QSEOS_RESULT_FAIL_INCORRECT_PSWD      -71
+#define QSEOS_RESULT_FAIL_MAX_ATTEMPT         -72
+#define QSEOS_RESULT_FAIL_PENDING_OPERATION   -73
+
+enum qseecom_command_scm_resp_type {
+	QSEOS_APP_ID = 0xEE01,
+	QSEOS_LISTENER_ID
+};
+
+enum qseecom_qceos_cmd_id {
+	QSEOS_APP_START_COMMAND      = 0x01,
+	QSEOS_APP_SHUTDOWN_COMMAND,
+	QSEOS_APP_LOOKUP_COMMAND,
+	QSEOS_REGISTER_LISTENER,
+	QSEOS_DEREGISTER_LISTENER,
+	QSEOS_CLIENT_SEND_DATA_COMMAND,
+	QSEOS_LISTENER_DATA_RSP_COMMAND,
+	QSEOS_LOAD_EXTERNAL_ELF_COMMAND,
+	QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND,
+	QSEOS_GET_APP_STATE_COMMAND,
+	QSEOS_LOAD_SERV_IMAGE_COMMAND,
+	QSEOS_UNLOAD_SERV_IMAGE_COMMAND,
+	QSEOS_APP_REGION_NOTIFICATION,
+	QSEOS_REGISTER_LOG_BUF_COMMAND,
+	QSEOS_RPMB_PROVISION_KEY_COMMAND,
+	QSEOS_RPMB_ERASE_COMMAND,
+	QSEOS_GENERATE_KEY  = 0x11,
+	QSEOS_DELETE_KEY,
+	QSEOS_MAX_KEY_COUNT,
+	QSEOS_SET_KEY,
+	QSEOS_UPDATE_KEY_USERINFO,
+	QSEOS_TEE_OPEN_SESSION,
+	QSEOS_TEE_INVOKE_COMMAND,
+	QSEOS_TEE_INVOKE_MODFD_COMMAND = QSEOS_TEE_INVOKE_COMMAND,
+	QSEOS_TEE_CLOSE_SESSION,
+	QSEOS_TEE_REQUEST_CANCELLATION,
+	QSEOS_CONTINUE_BLOCKED_REQ_COMMAND,
+	QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND = 0x1B,
+	QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST = 0x1C,
+	QSEOS_TEE_OPEN_SESSION_WHITELIST = 0x1D,
+	QSEOS_TEE_INVOKE_COMMAND_WHITELIST = 0x1E,
+	QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST = 0x1F,
+	QSEOS_FSM_LTEOTA_REQ_CMD = 0x109,
+	QSEOS_FSM_LTEOTA_REQ_RSP_CMD = 0x110,
+	QSEOS_FSM_IKE_REQ_CMD = 0x203,
+	QSEOS_FSM_IKE_REQ_RSP_CMD = 0x204,
+	QSEOS_FSM_OEM_FUSE_WRITE_ROW = 0x301,
+	QSEOS_FSM_OEM_FUSE_READ_ROW = 0x302,
+	QSEOS_FSM_ENCFS_REQ_CMD = 0x403,
+	QSEOS_FSM_ENCFS_REQ_RSP_CMD = 0x404,
+
+	QSEOS_CMD_MAX     = 0xEFFFFFFF
+};
+
+enum qseecom_qceos_cmd_status {
+	QSEOS_RESULT_SUCCESS = 0,
+	QSEOS_RESULT_INCOMPLETE,
+	QSEOS_RESULT_BLOCKED_ON_LISTENER,
+	QSEOS_RESULT_FAILURE  = 0xFFFFFFFF
+};
+
+enum qseecom_pipe_type {
+	QSEOS_PIPE_ENC = 0x1,
+	QSEOS_PIPE_ENC_XTS = 0x2,
+	QSEOS_PIPE_AUTH = 0x4,
+	QSEOS_PIPE_ENUM_FILL = 0x7FFFFFFF
+};
+
+/* QSEE Reentrancy support phase */
+enum qseecom_qsee_reentrancy_phase {
+	QSEE_REENTRANCY_PHASE_0 = 0,
+	QSEE_REENTRANCY_PHASE_1,
+	QSEE_REENTRANCY_PHASE_2,
+	QSEE_REENTRANCY_PHASE_3,
+	QSEE_REENTRANCY_PHASE_MAX = 0xFF
+};
+
+__packed  struct qsee_apps_region_info_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t addr;
+	uint32_t size;
+};
+
+__packed  struct qsee_apps_region_info_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint64_t addr;
+	uint32_t size;
+};
+
+__packed struct qseecom_check_app_ireq {
+	uint32_t qsee_cmd_id;
+	char     app_name[MAX_APP_NAME_SIZE];
+};
+
+__packed struct qseecom_load_app_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t mdt_len;		/* Length of the mdt file */
+	uint32_t img_len;		/* Length of .bxx and .mdt files */
+	uint32_t phy_addr;		/* phy addr of the start of image */
+	char     app_name[MAX_APP_NAME_SIZE];	/* application name*/
+};
+
+__packed struct qseecom_load_app_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t mdt_len;
+	uint32_t img_len;
+	uint64_t phy_addr;
+	char     app_name[MAX_APP_NAME_SIZE];
+};
+
+__packed struct qseecom_unload_app_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t  app_id;
+};
+
+__packed struct qseecom_load_lib_image_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t mdt_len;
+	uint32_t img_len;
+	uint32_t phy_addr;
+};
+
+__packed struct qseecom_load_lib_image_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t mdt_len;
+	uint32_t img_len;
+	uint64_t phy_addr;
+};
+
+__packed struct qseecom_unload_lib_image_ireq {
+	uint32_t qsee_cmd_id;
+};
+
+__packed struct qseecom_register_listener_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t listener_id;
+	uint32_t sb_ptr;
+	uint32_t sb_len;
+};
+
+__packed struct qseecom_register_listener_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t listener_id;
+	uint64_t sb_ptr;
+	uint32_t sb_len;
+};
+
+__packed struct qseecom_unregister_listener_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t  listener_id;
+};
+
+__packed struct qseecom_client_send_data_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t app_id;
+	uint32_t req_ptr;
+	uint32_t req_len;
+	uint32_t rsp_ptr;/* First 4 bytes should be the return status */
+	uint32_t rsp_len;
+	uint32_t sglistinfo_ptr;
+	uint32_t sglistinfo_len;
+};
+
+__packed struct qseecom_client_send_data_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t app_id;
+	uint64_t req_ptr;
+	uint32_t req_len;
+	uint64_t rsp_ptr;
+	uint32_t rsp_len;
+	uint64_t sglistinfo_ptr;
+	uint32_t sglistinfo_len;
+};
+
+__packed struct qseecom_reg_log_buf_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t phy_addr;
+	uint32_t len;
+};
+
+__packed struct qseecom_reg_log_buf_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint64_t phy_addr;
+	uint32_t len;
+};
+
+/* send_data resp */
+__packed struct qseecom_client_listener_data_irsp {
+	uint32_t qsee_cmd_id;
+	uint32_t listener_id;
+	uint32_t status;
+	uint32_t sglistinfo_ptr;
+	uint32_t sglistinfo_len;
+};
+
+__packed struct qseecom_client_listener_data_64bit_irsp {
+	uint32_t qsee_cmd_id;
+	uint32_t listener_id;
+	uint32_t status;
+	uint64_t sglistinfo_ptr;
+	uint32_t sglistinfo_len;
+};
+
+/*
+ * struct qseecom_command_scm_resp - qseecom response buffer
+ * @cmd_status: value from enum tz_sched_cmd_status
+ * @sb_in_rsp_addr: points to physical location of response
+ *                buffer
+ * @sb_in_rsp_len: length of command response
+ */
+__packed struct qseecom_command_scm_resp {
+	uint32_t result;
+	enum qseecom_command_scm_resp_type resp_type;
+	unsigned int data;
+};
+
+struct qseecom_rpmb_provision_key {
+	uint32_t key_type;
+};
+
+__packed struct qseecom_client_send_service_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t key_type; /* in */
+	unsigned int req_len; /* in */
+	uint32_t rsp_ptr; /* in/out */
+	unsigned int rsp_len; /* in/out */
+};
+
+__packed struct qseecom_client_send_service_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t key_type;
+	unsigned int req_len;
+	uint64_t rsp_ptr;
+	unsigned int rsp_len;
+};
+
+__packed struct qseecom_key_generate_ireq {
+	uint32_t qsee_command_id;
+	uint32_t flags;
+	uint8_t key_id[QSEECOM_KEY_ID_SIZE];
+	uint8_t hash32[QSEECOM_HASH_SIZE];
+};
+
+__packed struct qseecom_key_select_ireq {
+	uint32_t qsee_command_id;
+	uint32_t ce;
+	uint32_t pipe;
+	uint32_t pipe_type;
+	uint32_t flags;
+	uint8_t key_id[QSEECOM_KEY_ID_SIZE];
+	uint8_t hash32[QSEECOM_HASH_SIZE];
+};
+
+__packed struct qseecom_key_delete_ireq {
+	uint32_t qsee_command_id;
+	uint32_t flags;
+	uint8_t key_id[QSEECOM_KEY_ID_SIZE];
+	uint8_t hash32[QSEECOM_HASH_SIZE];
+
+};
+
+__packed struct qseecom_key_userinfo_update_ireq {
+	uint32_t qsee_command_id;
+	uint32_t flags;
+	uint8_t key_id[QSEECOM_KEY_ID_SIZE];
+	uint8_t current_hash32[QSEECOM_HASH_SIZE];
+	uint8_t new_hash32[QSEECOM_HASH_SIZE];
+};
+
+__packed struct qseecom_key_max_count_query_ireq {
+	uint32_t flags;
+};
+
+__packed struct qseecom_key_max_count_query_irsp {
+	uint32_t max_key_count;
+};
+
+__packed struct qseecom_qteec_ireq {
+	uint32_t    qsee_cmd_id;
+	uint32_t    app_id;
+	uint32_t    req_ptr;
+	uint32_t    req_len;
+	uint32_t    resp_ptr;
+	uint32_t    resp_len;
+	uint32_t    sglistinfo_ptr;
+	uint32_t    sglistinfo_len;
+};
+
+__packed struct qseecom_qteec_64bit_ireq {
+	uint32_t    qsee_cmd_id;
+	uint32_t    app_id;
+	uint64_t    req_ptr;
+	uint32_t    req_len;
+	uint64_t    resp_ptr;
+	uint32_t    resp_len;
+	uint64_t    sglistinfo_ptr;
+	uint32_t    sglistinfo_len;
+};
+
+__packed struct qseecom_client_send_fsm_key_req {
+	uint32_t qsee_cmd_id;
+	uint32_t req_ptr;
+	uint32_t req_len;
+	uint32_t rsp_ptr;
+	uint32_t rsp_len;
+};
+
+__packed struct qseecom_continue_blocked_request_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t app_or_session_id; /*legacy: app_id; smcinvoke: session_id*/
+};
+
+
+/**********      ARMV8 SMC INTERFACE TZ MACRO     *******************/
+
+#define TZ_SVC_APP_MGR                   1     /* Application management */
+#define TZ_SVC_LISTENER                  2     /* Listener service management */
+#define TZ_SVC_EXTERNAL                  3     /* External image loading */
+#define TZ_SVC_RPMB                      4     /* RPMB */
+#define TZ_SVC_KEYSTORE                  5     /* Keystore management */
+#define TZ_SVC_ES                        16    /* Enterprise Security */
+#define TZ_SVC_MDTP                      18    /* Mobile Device Theft */
+
+/*----------------------------------------------------------------------------
+ * Owning Entity IDs (defined by ARM SMC doc)
+ * -------------------------------------------------------------------------*/
+#define TZ_OWNER_ARM                     0     /** ARM Architecture call ID */
+#define TZ_OWNER_CPU                     1     /** CPU service call ID */
+#define TZ_OWNER_SIP                     2     /** SIP service call ID */
+#define TZ_OWNER_OEM                     3     /** OEM service call ID */
+#define TZ_OWNER_STD                     4     /** Standard service call ID */
+
+/** Values 5-47 are reserved for future use */
+
+/** Trusted Application call IDs */
+#define TZ_OWNER_TZ_APPS                 48
+#define TZ_OWNER_TZ_APPS_RESERVED        49
+/** Trusted OS Call IDs */
+#define TZ_OWNER_QSEE_OS                 50
+#define TZ_OWNER_MOBI_OS                 51
+#define TZ_OWNER_OS_RESERVED_3           52
+#define TZ_OWNER_OS_RESERVED_4           53
+#define TZ_OWNER_OS_RESERVED_5           54
+#define TZ_OWNER_OS_RESERVED_6           55
+#define TZ_OWNER_OS_RESERVED_7           56
+#define TZ_OWNER_OS_RESERVED_8           57
+#define TZ_OWNER_OS_RESERVED_9           58
+#define TZ_OWNER_OS_RESERVED_10          59
+#define TZ_OWNER_OS_RESERVED_11          60
+#define TZ_OWNER_OS_RESERVED_12          61
+#define TZ_OWNER_OS_RESERVED_13          62
+#define TZ_OWNER_OS_RESERVED_14          63
+
+#define TZ_SVC_INFO                      6     /* Misc. information services */
+
+/** Trusted Application call groups */
+#define TZ_SVC_APP_ID_PLACEHOLDER        0     /* SVC bits will contain App ID */
+
+/** General helper macro to create a bitmask from bits low to high. */
+#define TZ_MASK_BITS(h, l)     ((0xffffffff >> (32 - ((h - l) + 1))) << l)
+
+/**
+   Macro used to define an SMC ID based on the owner ID,
+   service ID, and function number.
+*/
+#define TZ_SYSCALL_CREATE_SMC_ID(o, s, f) \
+	((uint32_t)((((o & 0x3f) << 24) | (s & 0xff) << 8) | (f & 0xff)))
+
+#define TZ_SYSCALL_PARAM_NARGS_MASK  TZ_MASK_BITS(3, 0)
+#define TZ_SYSCALL_PARAM_TYPE_MASK   TZ_MASK_BITS(1, 0)
+
+#define TZ_SYSCALL_CREATE_PARAM_ID(nargs, p1, p2, p3, \
+	p4, p5, p6, p7, p8, p9, p10) \
+	((nargs&TZ_SYSCALL_PARAM_NARGS_MASK)+ \
+	((p1&TZ_SYSCALL_PARAM_TYPE_MASK)<<4)+ \
+	((p2&TZ_SYSCALL_PARAM_TYPE_MASK)<<6)+ \
+	((p3&TZ_SYSCALL_PARAM_TYPE_MASK)<<8)+ \
+	((p4&TZ_SYSCALL_PARAM_TYPE_MASK)<<10)+ \
+	((p5&TZ_SYSCALL_PARAM_TYPE_MASK)<<12)+ \
+	((p6&TZ_SYSCALL_PARAM_TYPE_MASK)<<14)+ \
+	((p7&TZ_SYSCALL_PARAM_TYPE_MASK)<<16)+ \
+	((p8&TZ_SYSCALL_PARAM_TYPE_MASK)<<18)+ \
+	((p9&TZ_SYSCALL_PARAM_TYPE_MASK)<<20)+ \
+	((p10&TZ_SYSCALL_PARAM_TYPE_MASK)<<22))
+
+/**
+   Macros used to create the Parameter ID associated with the syscall
+ */
+#define TZ_SYSCALL_CREATE_PARAM_ID_0 0
+#define TZ_SYSCALL_CREATE_PARAM_ID_1(p1) \
+	TZ_SYSCALL_CREATE_PARAM_ID(1, p1, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_2(p1, p2) \
+	TZ_SYSCALL_CREATE_PARAM_ID(2, p1, p2, 0, 0, 0, 0, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_3(p1, p2, p3) \
+	TZ_SYSCALL_CREATE_PARAM_ID(3, p1, p2, p3, 0, 0, 0, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_4(p1, p2, p3, p4) \
+	TZ_SYSCALL_CREATE_PARAM_ID(4, p1, p2, p3, p4, 0, 0, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_5(p1, p2, p3, p4, p5) \
+	TZ_SYSCALL_CREATE_PARAM_ID(5, p1, p2, p3, p4, p5, 0, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_6(p1, p2, p3, p4, p5, p6) \
+	TZ_SYSCALL_CREATE_PARAM_ID(6, p1, p2, p3, p4, p5, p6, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_7(p1, p2, p3, p4, p5, p6, p7) \
+	TZ_SYSCALL_CREATE_PARAM_ID(7, p1, p2, p3, p4, p5, p6, p7, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_8(p1, p2, p3, p4, p5, p6, p7, p8) \
+	TZ_SYSCALL_CREATE_PARAM_ID(8, p1, p2, p3, p4, p5, p6, p7, p8, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_9(p1, p2, p3, p4, p5, p6, p7, p8, p9) \
+	TZ_SYSCALL_CREATE_PARAM_ID(9, p1, p2, p3, p4, p5, p6, p7, p8, p9, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_10(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10) \
+	TZ_SYSCALL_CREATE_PARAM_ID(10, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10)
+
+/**
+   Macro used to obtain the Parameter ID associated with the syscall
+ */
+#define TZ_SYSCALL_GET_PARAM_ID(CMD_ID)        CMD_ID ## _PARAM_ID
+
+/** Helper macro to extract the owning entity from the SMC ID. */
+#define TZ_SYSCALL_OWNER_ID(r0)   ((r0 & TZ_MASK_BITS(29, 24)) >> 24)
+
+/** Helper macro for checking whether an owning entity is of type trusted OS. */
+#define IS_OWNER_TRUSTED_OS(owner_id) \
+			(((owner_id >= 50) && (owner_id <= 63)) ? 1:0)
+
+#define TZ_SYSCALL_PARAM_TYPE_VAL              0x0     /** type of value */
+#define TZ_SYSCALL_PARAM_TYPE_BUF_RO           0x1     /** type of buffer read-only */
+#define TZ_SYSCALL_PARAM_TYPE_BUF_RW           0x2     /** type of buffer read-write */
+
+#define TZ_OS_APP_START_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x01)
+
+#define TZ_OS_APP_START_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_3( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_APP_SHUTDOWN_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x02)
+
+#define TZ_OS_APP_SHUTDOWN_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_APP_LOOKUP_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x03)
+
+#define TZ_OS_APP_LOOKUP_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_APP_GET_STATE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x04)
+
+#define TZ_OS_APP_GET_STATE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_APP_REGION_NOTIFICATION_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x05)
+
+#define TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_REGISTER_LOG_BUFFER_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x06)
+
+#define TZ_OS_REGISTER_LOG_BUFFER_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_LOAD_SERVICES_IMAGE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x07)
+
+#define TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_3( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_UNLOAD_SERVICES_IMAGE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x08)
+
+#define TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_0
+
+#define TZ_OS_REGISTER_LISTENER_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x01)
+
+#define TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x06)
+
+#define TZ_OS_REGISTER_LISTENER_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_3( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_DEREGISTER_LISTENER_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x02)
+
+#define TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_LISTENER_RESPONSE_HANDLER_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x03)
+
+#define TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_LOAD_EXTERNAL_IMAGE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_EXTERNAL, 0x01)
+
+#define TZ_OS_LOAD_EXTERNAL_IMAGE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_3( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_QSAPP_SEND_DATA_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS, \
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x01)
+
+
+#define TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_5( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_EXTERNAL, 0x02)
+
+#define TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_0
+
+#define TZ_INFO_IS_SVC_AVAILABLE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_INFO, 0x01)
+
+#define TZ_INFO_IS_SVC_AVAILABLE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_INFO_GET_FEATURE_VERSION_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_INFO, 0x03)
+
+#define TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_RPMB_PROVISION_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_RPMB, 0x01)
+
+#define TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_RPMB_ERASE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_RPMB, 0x02)
+
+#define TZ_OS_RPMB_ERASE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_0
+
+#define TZ_OS_RPMB_CHECK_PROV_STATUS_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_RPMB, 0x03)
+
+#define TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_0
+
+#define TZ_OS_KS_GEN_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x01)
+
+#define TZ_OS_KS_GEN_KEY_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_KS_DEL_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x02)
+
+#define TZ_OS_KS_DEL_KEY_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_KS_GET_MAX_KEYS_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x03)
+
+#define TZ_OS_KS_GET_MAX_KEYS_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_KS_SET_PIPE_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x04)
+
+#define TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_KS_UPDATE_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x05)
+
+#define TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_ES_SAVE_PARTITION_HASH_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_ES, 0x01)
+
+#define TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_3( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_OPEN_SESSION_ID					\
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS,			\
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x02)
+
+#define TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID				\
+	TZ_SYSCALL_CREATE_PARAM_ID_5(					\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_CLOSE_SESSION_ID					\
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS,			\
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x03)
+
+#define TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID				\
+	TZ_SYSCALL_CREATE_PARAM_ID_5(					\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_INVOKE_COMMAND_ID					\
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS,			\
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x04)
+
+#define TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID				\
+	TZ_SYSCALL_CREATE_PARAM_ID_5(					\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_REQUEST_CANCELLATION_ID				\
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS,			\
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x05)
+
+#define TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID			\
+	TZ_SYSCALL_CREATE_PARAM_ID_5(					\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_MDTP_CIPHER_DIP_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_MDTP, 0x1)
+
+#define TZ_MDTP_CIPHER_DIP_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_5( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RO, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_CONTINUE_BLOCKED_REQUEST_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x04)
+
+#define TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x07)
+
+#define TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS, \
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x06)
+
+#define TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_7( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID			\
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS,			\
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x07)
+
+#define TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID		\
+	TZ_SYSCALL_CREATE_PARAM_ID_7(					\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID			\
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS,			\
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x09)
+
+#define TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID		\
+	TZ_SYSCALL_CREATE_PARAM_ID_7(					\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x05)
+
+#define TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_4( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#endif /* __QSEECOMI_H_ */
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./ramdump.h linux-4.4.115-fbx/include/soc/qcom/ramdump.h
--- linux-4.4.115-fbx/include/soc/qcom./ramdump.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/ramdump.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,59 @@
+/* Copyright (c) 2011-2014, 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _RAMDUMP_HEADER
+#define _RAMDUMP_HEADER
+
+struct device;
+
+struct ramdump_segment {
+	char *name;
+	unsigned long address;
+	void *v_address;
+	unsigned long size;
+};
+
+#ifdef CONFIG_MSM_SUBSYSTEM_RESTART
+extern void *create_ramdump_device(const char *dev_name, struct device *parent);
+extern void destroy_ramdump_device(void *dev);
+extern int do_ramdump(void *handle, struct ramdump_segment *segments,
+		int nsegments);
+extern int do_elf_ramdump(void *handle, struct ramdump_segment *segments,
+		int nsegments);
+extern int do_minidump(void *handle, struct ramdump_segment *segments,
+		       int nsegments);
+
+#else
+static inline void *create_ramdump_device(const char *dev_name,
+		struct device *parent)
+{
+	return NULL;
+}
+
+static inline void destroy_ramdump_device(void *dev)
+{
+}
+
+static inline int do_ramdump(void *handle, struct ramdump_segment *segments,
+		int nsegments)
+{
+	return -ENODEV;
+}
+
+static inline int do_elf_ramdump(void *handle, struct ramdump_segment *segments,
+		int nsegments)
+{
+	return -ENODEV;
+}
+#endif /* CONFIG_MSM_SUBSYSTEM_RESTART */
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./restart.h linux-4.4.115-fbx/include/soc/qcom/restart.h
--- linux-4.4.115-fbx/include/soc/qcom./restart.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/restart.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,24 @@
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ASM_ARCH_MSM_RESTART_H_
+#define _ASM_ARCH_MSM_RESTART_H_
+
+#define RESTART_NORMAL 0x0
+#define RESTART_DLOAD  0x1
+
+void msm_set_restart_mode(int mode);
+extern int pmic_reset_irq;
+
+#endif
+
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./rpm-notifier.h linux-4.4.115-fbx/include/soc/qcom/rpm-notifier.h
--- linux-4.4.115-fbx/include/soc/qcom./rpm-notifier.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/rpm-notifier.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,63 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ARCH_ARM_MACH_MSM_RPM_NOTIF_H
+#define __ARCH_ARM_MACH_MSM_RPM_NOTIF_H
+
+struct msm_rpm_notifier_data {
+	uint32_t rsc_type;
+	uint32_t rsc_id;
+	uint32_t key;
+	uint32_t size;
+	uint8_t *value;
+};
+/**
+ * msm_rpm_register_notifier - Register for sleep set notifications
+ *
+ * @nb - notifier block to register
+ *
+ * return 0 on success, errno on failure.
+ */
+int msm_rpm_register_notifier(struct notifier_block *nb);
+
+/**
+ * msm_rpm_unregister_notifier - Unregister previously registered notifications
+ *
+ * @nb - notifier block to unregister
+ *
+ * return 0 on success, errno on failure.
+ */
+int msm_rpm_unregister_notifier(struct notifier_block *nb);
+
+/**
+ * msm_rpm_enter_sleep - Notify RPM driver to prepare for entering sleep
+ *
+ * @bool - flag to enable print contents of sleep buffer.
+ * @cpumask - cpumask of next wakeup cpu
+ *
+ * return 0 on success errno on failure.
+ */
+int msm_rpm_enter_sleep(bool print, const struct cpumask *cpumask);
+
+/**
+ * msm_rpm_exit_sleep - Notify RPM driver about resuming from power collapse
+ */
+void msm_rpm_exit_sleep(void);
+
+/**
+ * msm_rpm_waiting_for_ack - Indicate if there is RPM message
+ *				pending acknowledgement.
+ * returns true for pending messages and false otherwise
+ */
+bool msm_rpm_waiting_for_ack(void);
+
+#endif /*__ARCH_ARM_MACH_MSM_RPM_NOTIF_H */
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./rpm-smd.h linux-4.4.115-fbx/include/soc/qcom/rpm-smd.h
--- linux-4.4.115-fbx/include/soc/qcom./rpm-smd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/rpm-smd.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,309 @@
+/* Copyright (c) 2012, 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_RPM_SMD_H
+#define __ARCH_ARM_MACH_MSM_RPM_SMD_H
+
+/**
+ * enum msm_rpm_set - RPM enumerations for sleep/active set
+ * %MSM_RPM_CTX_SET_0: Set resource parameters for active mode.
+ * %MSM_RPM_CTX_SET_SLEEP: Set resource parameters for sleep.
+ */
+enum msm_rpm_set {
+	MSM_RPM_CTX_ACTIVE_SET,
+	MSM_RPM_CTX_SLEEP_SET,
+};
+
+struct msm_rpm_request;
+
+struct msm_rpm_kvp {
+	uint32_t key;
+	uint32_t length;
+	uint8_t *data;
+};
+#ifdef CONFIG_MSM_RPM_SMD
+/**
+ * msm_rpm_request() - Creates a parent element to identify the
+ * resource on the RPM, that stores the KVPs for different fields modified
+ * for a hardware resource
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @num_elements: number of KVPs pairs associated with the resource
+ *
+ * returns pointer to a msm_rpm_request on success, NULL on error
+ */
+struct msm_rpm_request *msm_rpm_create_request(
+		enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, int num_elements);
+
+/**
+ * msm_rpm_request_noirq() - Creates a parent element to identify the
+ * resource on the RPM, that stores the KVPs for different fields modified
+ * for a hardware resource. This function is similar to msm_rpm_create_request
+ * except that it has to be called with interrupts masked.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @num_elements: number of KVPs pairs associated with the resource
+ *
+ * returns pointer to a msm_rpm_request on success, NULL on error
+ */
+struct msm_rpm_request *msm_rpm_create_request_noirq(
+		enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, int num_elements);
+
+/**
+ * msm_rpm_add_kvp_data() - Adds a Key value pair to a existing RPM resource.
+ *
+ * @handle: RPM resource handle to which the data should be appended
+ * @key:  unsigned integer identify the parameter modified
+ * @data: byte array that contains the value corresponding to key.
+ * @size:   size of data in bytes.
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
+		uint32_t key, const uint8_t *data, int size);
+
+/**
+ * msm_rpm_add_kvp_data_noirq() - Adds a Key value pair to a existing RPM
+ * resource. This function is similar to msm_rpm_add_kvp_data except that it
+ * has to be called with interrupts masked.
+ *
+ * @handle: RPM resource handle to which the data should be appended
+ * @key:  unsigned integer identify the parameter modified
+ * @data: byte array that contains the value corresponding to key.
+ * @size:   size of data in bytes.
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_add_kvp_data_noirq(struct msm_rpm_request *handle,
+		uint32_t key, const uint8_t *data, int size);
+
+/** msm_rpm_free_request() - clean up the RPM request handle created with
+ * msm_rpm_create_request
+ *
+ * @handle: RPM resource handle to be cleared.
+ */
+
+void msm_rpm_free_request(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_send_request() - Send the RPM messages using SMD. The function
+ * assigns a message id before sending the data out to the RPM. RPM hardware
+ * uses the message id to acknowledge the messages.
+ *
+ * @handle: pointer to the msm_rpm_request for the resource being modified.
+ *
+ * returns non-zero message id on success and zero on a failed transaction.
+ * The drivers use message id to wait for ACK from RPM.
+ */
+int msm_rpm_send_request(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_send_request_noack() - Send the RPM messages using SMD. The function
+ * assigns a message id before sending the data out to the RPM. RPM hardware
+ * uses the message id to acknowledge the messages, but this API does not wait
+ * on the ACK for this message id and it does not add the message id to the wait
+ * list.
+ *
+ * @handle: pointer to the msm_rpm_request for the resource being modified.
+ *
+ * returns NULL on success and PTR_ERR on a failed transaction.
+ */
+void *msm_rpm_send_request_noack(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_send_request_noirq() - Send the RPM messages using SMD. The
+ * function assigns a message id before sending the data out to the RPM.
+ * RPM hardware uses the message id to acknowledge the messages. This function
+ * is similar to msm_rpm_send_request except that it has to be called with
+ * interrupts masked.
+ *
+ * @handle: pointer to the msm_rpm_request for the resource being modified.
+ *
+ * returns non-zero message id on success and zero on a failed transaction.
+ * The drivers use message id to wait for ACK from RPM.
+ */
+int msm_rpm_send_request_noirq(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_wait_for_ack() - A blocking call that waits for acknowledgment of
+ * a message from RPM.
+ *
+ * @msg_id: the return from msm_rpm_send_requests
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_wait_for_ack(uint32_t msg_id);
+
+/**
+ * msm_rpm_wait_for_ack_noirq() - A blocking call that waits for acknowledgment
+ * of a message from RPM. This function is similar to msm_rpm_wait_for_ack
+ * except that it has to be called with interrupts masked.
+ *
+ * @msg_id: the return from msm_rpm_send_request
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_wait_for_ack_noirq(uint32_t msg_id);
+
+/**
+ * msm_rpm_send_message() -Wrapper function for clients to send data given an
+ * array of key value pairs.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @kvp: array of KVP data.
+ * @nelem: number of KVPs pairs associated with the message.
+ *
+ * returns  0 on success and errno on failure.
+ */
+int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems);
+
+/**
+ * msm_rpm_send_message_noack() -Wrapper function for clients to send data
+ * given an array of key value pairs without waiting for ack.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @kvp: array of KVP data.
+ * @nelem: number of KVPs pairs associated with the message.
+ *
+ * returns  NULL on success and PTR_ERR(errno) on failure.
+ */
+void *msm_rpm_send_message_noack(enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems);
+
+/**
+ * msm_rpm_send_message_noirq() -Wrapper function for clients to send data
+ * given an array of key value pairs. This function is similar to the
+ * msm_rpm_send_message() except that it has to be called with interrupts
+ * disabled. Clients should choose the irq version when possible for system
+ * performance.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @kvp: array of KVP data.
+ * @nelem: number of KVPs pairs associated with the message.
+ *
+ * returns  0 on success and errno on failure.
+ */
+int msm_rpm_send_message_noirq(enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems);
+
+/**
+ * msm_rpm_driver_init() - Initialization function that registers for a
+ * rpm platform driver.
+ *
+ * returns 0 on success.
+ */
+int __init msm_rpm_driver_init(void);
+
+#else
+
+static inline struct msm_rpm_request *msm_rpm_create_request(
+		enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, int num_elements)
+{
+	return NULL;
+}
+
+static inline struct msm_rpm_request *msm_rpm_create_request_noirq(
+		enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, int num_elements)
+{
+	return NULL;
+
+}
+static inline uint32_t msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
+		uint32_t key, const uint8_t *data, int count)
+{
+	return 0;
+}
+static inline uint32_t msm_rpm_add_kvp_data_noirq(
+		struct msm_rpm_request *handle, uint32_t key,
+		const uint8_t *data, int count)
+{
+	return 0;
+}
+
+static inline void msm_rpm_free_request(struct msm_rpm_request *handle)
+{
+	return;
+}
+
+static inline int msm_rpm_send_request(struct msm_rpm_request *handle)
+{
+	return 0;
+}
+
+static inline int msm_rpm_send_request_noirq(struct msm_rpm_request *handle)
+{
+	return 0;
+
+}
+
+static inline void *msm_rpm_send_request_noack(struct msm_rpm_request *handle)
+{
+	return NULL;
+}
+
+static inline int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
+{
+	return 0;
+}
+
+static inline int msm_rpm_send_message_noirq(enum msm_rpm_set set,
+		uint32_t rsc_type, uint32_t rsc_id, struct msm_rpm_kvp *kvp,
+		int nelems)
+{
+	return 0;
+}
+
+static inline void *msm_rpm_send_message_noack(enum msm_rpm_set set,
+		uint32_t rsc_type, uint32_t rsc_id, struct msm_rpm_kvp *kvp,
+		int nelems)
+{
+	return NULL;
+}
+
+static inline int msm_rpm_wait_for_ack(uint32_t msg_id)
+{
+	return 0;
+
+}
+static inline int msm_rpm_wait_for_ack_noirq(uint32_t msg_id)
+{
+	return 0;
+}
+
+static inline int __init msm_rpm_driver_init(void)
+{
+	return 0;
+}
+#endif
+#endif /*__ARCH_ARM_MACH_MSM_RPM_SMD_H*/
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./scm-boot.h linux-4.4.115-fbx/include/soc/qcom/scm-boot.h
--- linux-4.4.115-fbx/include/soc/qcom./scm-boot.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/scm-boot.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,65 @@
+/* Copyright (c) 2010, 2012, 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MACH_SCM_BOOT_H
+#define __MACH_SCM_BOOT_H
+
+#define SCM_BOOT_ADDR			0x1
+#define SCM_FLAG_COLDBOOT_CPU1		0x01
+#define SCM_FLAG_COLDBOOT_CPU2		0x08
+#define SCM_FLAG_COLDBOOT_CPU3		0x20
+#define SCM_FLAG_WARMBOOT_CPU1		0x02
+#define SCM_FLAG_WARMBOOT_CPU0		0x04
+#define SCM_FLAG_WARMBOOT_CPU2		0x10
+#define SCM_FLAG_WARMBOOT_CPU3		0x40
+
+/* Multicluster Variants */
+#define SCM_BOOT_ADDR_MC		0x11
+#define SCM_FLAG_COLDBOOT_MC		0x02
+#define SCM_FLAG_WARMBOOT_MC		0x04
+
+#ifdef CONFIG_ARM64
+#define SCM_FLAG_HLOS			0x01
+#else
+#define SCM_FLAG_HLOS			0x0
+#endif
+
+#ifdef CONFIG_QCOM_SCM
+int scm_set_boot_addr(phys_addr_t addr, unsigned int flags);
+int scm_set_boot_addr_mc(phys_addr_t addr, u32 aff0,
+		u32 aff1, u32 aff2, u32 flags);
+int scm_set_warm_boot_addr_mc_for_all(phys_addr_t addr);
+int scm_is_mc_boot_available(void);
+#else
+static inline int scm_set_boot_addr(phys_addr_t addr, unsigned int flags)
+{
+	WARN_ONCE(1, "CONFIG_QCOM_SCM disabled, SCM call will fail silently\n");
+	return 0;
+}
+static inline int scm_set_boot_addr_mc(phys_addr_t addr, u32 aff0,
+		u32 aff1, u32 aff2, u32 flags)
+{
+	WARN_ONCE(1, "CONFIG_QCOM_SCM disabled, SCM call will fail silently\n");
+	return 0;
+}
+static inline int scm_set_warm_boot_addr_mc_for_all(phys_addr_t addr)
+{
+	WARN_ONCE(1, "CONFIG_QCOM_SCM disabled, SCM call will fail silently\n");
+	return 0;
+}
+static inline int scm_is_mc_boot_available(void)
+{
+	WARN_ONCE(1, "CONFIG_QCOM_SCM disabled, SCM call will fail silently\n");
+	return 0;
+}
+#endif
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./scm.h linux-4.4.115-fbx/include/soc/qcom/scm.h
--- linux-4.4.115-fbx/include/soc/qcom./scm.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/scm.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,238 @@
+/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MACH_SCM_H
+#define __MACH_SCM_H
+
+#define SCM_SVC_BOOT			0x1
+#define SCM_SVC_PIL			0x2
+#define SCM_SVC_UTIL			0x3
+#define SCM_SVC_TZ			0x4
+#define SCM_SVC_IO			0x5
+#define SCM_SVC_INFO			0x6
+#define SCM_SVC_SSD			0x7
+#define SCM_SVC_FUSE			0x8
+#define SCM_SVC_PWR			0x9
+#define SCM_SVC_MP			0xC
+#define SCM_SVC_DCVS			0xD
+#define SCM_SVC_ES			0x10
+#define SCM_SVC_HDCP			0x11
+#define SCM_SVC_MDTP			0x12
+#define SCM_SVC_LMH			0x13
+#define SCM_SVC_SMMU_PROGRAM		0x15
+#define SCM_SVC_QDSS			0x16
+#define SCM_SVC_TZSCHEDULER		0xFC
+#define SCM_SVC_BW			0xFD
+
+#define SCM_FUSE_READ			0x7
+#define SCM_CMD_HDCP			0x01
+
+/* SCM Features */
+#define SCM_SVC_SEC_CAMERA		0xD
+
+#define DEFINE_SCM_BUFFER(__n) \
+static char __n[PAGE_SIZE] __aligned(PAGE_SIZE);
+
+#define SCM_BUFFER_SIZE(__buf)	sizeof(__buf)
+
+#define SCM_BUFFER_PHYS(__buf)	virt_to_phys(__buf)
+
+#define SCM_SIP_FNID(s, c) (((((s) & 0xFF) << 8) | ((c) & 0xFF)) | 0x02000000)
+#define SCM_QSEEOS_FNID(s, c) (((((s) & 0xFF) << 8) | ((c) & 0xFF)) | \
+			      0x32000000)
+#define SCM_SVC_ID(s) (((s) & 0xFF00) >> 8)
+
+#define MAX_SCM_ARGS 10
+#define MAX_SCM_RETS 3
+
+enum scm_arg_types {
+	SCM_VAL,
+	SCM_RO,
+	SCM_RW,
+	SCM_BUFVAL,
+};
+
+#define SCM_ARGS_IMPL(num, a, b, c, d, e, f, g, h, i, j, ...) (\
+			(((a) & 0xff) << 4) | \
+			(((b) & 0xff) << 6) | \
+			(((c) & 0xff) << 8) | \
+			(((d) & 0xff) << 10) | \
+			(((e) & 0xff) << 12) | \
+			(((f) & 0xff) << 14) | \
+			(((g) & 0xff) << 16) | \
+			(((h) & 0xff) << 18) | \
+			(((i) & 0xff) << 20) | \
+			(((j) & 0xff) << 22) | \
+			(num & 0xffff))
+
+#define SCM_ARGS(...) SCM_ARGS_IMPL(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+
+/**
+ * struct scm_desc
+ * @arginfo: Metadata describing the arguments in args[]
+ * @args: The array of arguments for the secure syscall
+ * @ret: The values returned by the secure syscall
+ * @extra_arg_buf: The buffer containing extra arguments
+		   (that don't fit in available registers)
+ * @x5: The 4rd argument to the secure syscall or physical address of
+	extra_arg_buf
+ */
+struct scm_desc {
+	u32 arginfo;
+	u64 args[MAX_SCM_ARGS];
+	u64 ret[MAX_SCM_RETS];
+
+	/* private */
+	void *extra_arg_buf;
+	u64 x5;
+};
+
+#if defined(CONFIG_QCOM_SCM) || defined(CONFIG_QCOM_SCM_QCPE)
+extern int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len,
+		void *resp_buf, size_t resp_len);
+
+extern int scm_call2(u32 cmd_id, struct scm_desc *desc);
+
+extern int scm_call2_atomic(u32 cmd_id, struct scm_desc *desc);
+
+extern int scm_call_noalloc(u32 svc_id, u32 cmd_id, const void *cmd_buf,
+		size_t cmd_len, void *resp_buf, size_t resp_len,
+		void *scm_buf, size_t scm_buf_size);
+
+
+extern s32 scm_call_atomic1(u32 svc, u32 cmd, u32 arg1);
+extern s32 scm_call_atomic1_1(u32 svc, u32 cmd, u32 arg1, u32 *ret1);
+extern s32 scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2);
+extern s32 scm_call_atomic3(u32 svc, u32 cmd, u32 arg1, u32 arg2, u32 arg3);
+extern s32 scm_call_atomic4_3(u32 svc, u32 cmd, u32 arg1, u32 arg2, u32 arg3,
+		u32 arg4, u32 *ret1, u32 *ret2);
+extern s32 scm_call_atomic5_3(u32 svc, u32 cmd, u32 arg1, u32 arg2, u32 arg3,
+		u32 arg4, u32 arg5, u32 *ret1, u32 *ret2, u32 *ret3);
+
+#define SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
+
+extern u32 scm_get_version(void);
+extern int scm_is_call_available(u32 svc_id, u32 cmd_id);
+extern int scm_get_feat_version(u32 feat, u64 *scm_ret);
+extern bool is_scm_armv8(void);
+extern int scm_restore_sec_cfg(u32 device_id, u32 spare, u64 *scm_ret);
+extern u32 scm_io_read(phys_addr_t address);
+extern int scm_io_write(phys_addr_t address, u32 val);
+extern bool scm_is_secure_device(void);
+
+#define SCM_HDCP_MAX_REG 5
+
+struct scm_hdcp_req {
+	u32 addr;
+	u32 val;
+};
+
+extern struct mutex scm_lmh_lock;
+
+#else
+
+static inline int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf,
+		size_t cmd_len, void *resp_buf, size_t resp_len)
+{
+	return 0;
+}
+
+static inline int scm_call2(u32 cmd_id, struct scm_desc *desc)
+{
+	return 0;
+}
+
+static inline int scm_call2_atomic(u32 cmd_id, struct scm_desc *desc)
+{
+	return 0;
+}
+
+static inline int scm_call_noalloc(u32 svc_id, u32 cmd_id,
+		const void *cmd_buf, size_t cmd_len, void *resp_buf,
+		size_t resp_len, void *scm_buf, size_t scm_buf_size)
+{
+	return 0;
+}
+
+static inline s32 scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
+{
+	return 0;
+}
+
+static inline s32 scm_call_atomic1_1(u32 svc, u32 cmd, u32 arg1, u32 *ret1)
+{
+	return 0;
+}
+
+static inline s32 scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2)
+{
+	return 0;
+}
+
+static inline s32 scm_call_atomic3(u32 svc, u32 cmd, u32 arg1, u32 arg2,
+		u32 arg3)
+{
+	return 0;
+}
+
+static inline s32 scm_call_atomic4_3(u32 svc, u32 cmd, u32 arg1, u32 arg2,
+		u32 arg3, u32 arg4, u32 *ret1, u32 *ret2)
+{
+	return 0;
+}
+
+static inline s32 scm_call_atomic5_3(u32 svc, u32 cmd, u32 arg1, u32 arg2,
+	u32 arg3, u32 arg4, u32 arg5, u32 *ret1, u32 *ret2, u32 *ret3)
+{
+	return 0;
+}
+
+static inline u32 scm_get_version(void)
+{
+	return 0;
+}
+
+static inline int scm_is_call_available(u32 svc_id, u32 cmd_id)
+{
+	return 0;
+}
+
+static inline int scm_get_feat_version(u32 feat, u64 *scm_ret)
+{
+	return 0;
+}
+
+static inline bool is_scm_armv8(void)
+{
+	return true;
+}
+
+static inline int scm_restore_sec_cfg(u32 device_id, u32 spare, u64 *scm_ret)
+{
+	return 0;
+}
+
+static inline u32 scm_io_read(phys_addr_t address)
+{
+	return 0;
+}
+
+static inline int scm_io_write(phys_addr_t address, u32 val)
+{
+	return 0;
+}
+
+static inline bool scm_is_secure_device(void)
+{
+	return false;
+}
+#endif
+#endif
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./secure_buffer.h linux-4.4.115-fbx/include/soc/qcom/secure_buffer.h
--- linux-4.4.115-fbx/include/soc/qcom./secure_buffer.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/secure_buffer.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MSM_SECURE_BUFFER_H__
+#define __MSM_SECURE_BUFFER_H__
+
+#include <linux/scatterlist.h>
+
+/*
+ * if you add a secure VMID here make sure you update
+ * msm_secure_vmid_to_string.
+ * Make sure to keep the VMID_LAST as the last entry in the enum.
+ * This is needed in ion to create a list and it's sized using VMID_LAST.
+ */
+enum vmid {
+	VMID_HLOS = 0x3,
+	VMID_CP_TOUCH = 0x8,
+	VMID_CP_BITSTREAM = 0x9,
+	VMID_CP_PIXEL = 0xA,
+	VMID_CP_NON_PIXEL = 0xB,
+	VMID_CP_CAMERA = 0xD,
+	VMID_HLOS_FREE = 0xE,
+	VMID_MSS_MSA = 0xF,
+	VMID_MSS_NONMSA = 0x10,
+	VMID_CP_SEC_DISPLAY = 0x11,
+	VMID_CP_APP = 0x12,
+	VMID_WLAN = 0x18,
+	VMID_WLAN_CE = 0x19,
+	VMID_CP_CAMERA_PREVIEW = 0x1D,
+	VMID_CP_SPSS_SP_SHARED = 0x22,
+	VMID_LAST,
+	VMID_INVAL = -1
+};
+
+#define PERM_READ                       0x4
+#define PERM_WRITE                      0x2
+#define PERM_EXEC			0x1
+
+#ifdef CONFIG_MSM_SECURE_BUFFER
+int msm_secure_table(struct sg_table *table);
+int msm_unsecure_table(struct sg_table *table);
+int hyp_assign_table(struct sg_table *table,
+			u32 *source_vm_list, int source_nelems,
+			int *dest_vmids, int *dest_perms,
+			int dest_nelems);
+extern int hyp_assign_phys(phys_addr_t addr, u64 size,
+			u32 *source_vmlist, int source_nelems,
+			int *dest_vmids, int *dest_perms, int dest_nelems);
+bool msm_secure_v2_is_supported(void);
+const char *msm_secure_vmid_to_string(int secure_vmid);
+#else
+static inline int msm_secure_table(struct sg_table *table)
+{
+	return -ENOSYS;
+}
+static inline int msm_unsecure_table(struct sg_table *table)
+{
+	return -ENOSYS;
+}
+static inline int hyp_assign_table(struct sg_table *table,
+			u32 *source_vm_list, int source_nelems,
+			int *dest_vmids, int *dest_perms,
+			int dest_nelems)
+{
+	return -ENOSYS;
+}
+static inline int hyp_assign_phys(phys_addr_t addr, u64 size,
+			u32 *source_vmlist, int source_nelems,
+			int *dest_vmids, int *dest_perms, int dest_nelems)
+{
+	return -ENOSYS;
+}
+
+static inline bool msm_secure_v2_is_supported(void)
+{
+	return false;
+}
+static inline const char *msm_secure_vmid_to_string(int secure_vmid)
+{
+	return "N/A";
+}
+#endif
+#endif
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./service-locator.h linux-4.4.115-fbx/include/soc/qcom/service-locator.h
--- linux-4.4.115-fbx/include/soc/qcom./service-locator.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/service-locator.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Process Domain Service Locator API header
+ */
+
+#ifndef _SERVICE_LOCATOR_H
+#define _SERVICE_LOCATOR_H
+
+#define QMI_SERVREG_LOC_NAME_LENGTH_V01 64
+#define QMI_SERVREG_LOC_LIST_LENGTH_V01 32
+
+/*
+ * @name: The full process domain path for a process domain which provides
+ *	  a particular service
+ * @instance_id: The QMI instance id corresponding to the root process
+ *		 domain which is responsible for notifications for this
+ *		 process domain
+ * @service_data_valid: Indicates if service_data field has valid data
+ * @service_data: Optional service data provided by the service locator
+ */
+struct servreg_loc_entry_v01 {
+	char name[QMI_SERVREG_LOC_NAME_LENGTH_V01 + 1];
+	uint32_t instance_id;
+	uint8_t service_data_valid;
+	uint32_t service_data;
+};
+
+/*
+ * @client_name:   Name of the client calling the api
+ * @service_name:  Name of the service for which the list of process domains
+ *		   is requested
+ * @total_domains: Length of the process domain list
+ * @db_rev_count:  Process domain list database revision number
+ * @domain_list:   List of process domains providing the service
+ */
+struct pd_qmi_client_data {
+	char client_name[QMI_SERVREG_LOC_NAME_LENGTH_V01 + 1];
+	char service_name[QMI_SERVREG_LOC_NAME_LENGTH_V01 + 1];
+	int total_domains;
+	int db_rev_count;
+	struct servreg_loc_entry_v01 *domain_list;
+};
+
+enum service_locator_state {
+	LOCATOR_DOWN = 0x0F,
+	LOCATOR_UP = 0x1F,
+};
+
+#if defined(CONFIG_MSM_SERVICE_LOCATOR)
+/*
+ * Use this api to request information regarding the process domains on
+ * which a particular service runs. The client name, the service name
+ * and notifier block pointer need to be provided by client calling the api.
+ * The total domains, db revision and the domain list will be filled in
+ * by the service locator.
+ * Returns 0 on success; otherwise a value < 0 if no valid subsystem is found.
+ */
+int get_service_location(char *client_name, char *service_name,
+		struct notifier_block *locator_nb);
+
+/*
+ * Use this api to request information regarding the subsystem the process
+ * domain runs on.
+ * @pd_path: The name field from inside the servreg_loc_entry that one
+ *	     gets back using the get_processdomains api.
+ * Returns 0 on success; otherwise a value < 0 if no valid subsystem is found.
+ */
+int find_subsys(const char *pd_path, char *subsys);
+
+#else
+
+static inline int get_service_location(char *client_name,
+		char *service_name, struct notifier_block *locator_nb)
+{
+	return -ENODEV;
+}
+
+static inline int find_subsys(const char *pd_path, const char *subsys)
+{
+	return 0;
+}
+
+#endif /* CONFIG_MSM_SERVICE_LOCATOR */
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./service-notifier.h linux-4.4.115-fbx/include/soc/qcom/service-notifier.h
--- linux-4.4.115-fbx/include/soc/qcom./service-notifier.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/service-notifier.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Process Domain Service Notifier API header
+ */
+
+#ifndef _SERVICE_NOTIFIER_H
+#define _SERVICE_NOTIFIER_H
+
+enum qmi_servreg_notif_service_state_enum_type_v01 {
+	QMI_SERVREG_NOTIF_SERVICE_STATE_ENUM_TYPE_MIN_VAL_V01 = INT_MIN,
+	QMI_SERVREG_NOTIF_SERVICE_STATE_ENUM_TYPE_MAX_VAL_V01 = INT_MAX,
+	SERVREG_NOTIF_SERVICE_STATE_DOWN_V01 = 0x0FFFFFFF,
+	SERVREG_NOTIF_SERVICE_STATE_UP_V01 = 0x1FFFFFFF,
+	SERVREG_NOTIF_SERVICE_STATE_UNINIT_V01 = 0x7FFFFFFF,
+};
+
+enum pd_subsys_state {
+	ROOT_PD_DOWN,
+	ROOT_PD_UP,
+	ROOT_PD_ERR_FATAL,
+	ROOT_PD_WDOG_BITE,
+	ROOT_PD_SHUTDOWN,
+	USER_PD_STATE_CHANGE,
+};
+#if defined(CONFIG_MSM_SERVICE_NOTIFIER)
+
+/* service_notif_register_notifier() - Register a notifier for a service
+ * On success, it returns back a handle. It takes the following arguments:
+ * service_path: Individual service identifier path for which a client
+ *		registers for notifications.
+ * instance_id: Instance id specific to a subsystem.
+ * current_state: Current state of service returned by the registration
+ *		 process.
+ * notifier block: notifier callback for service events.
+ */
+void *service_notif_register_notifier(const char *service_path, int instance_id,
+				struct notifier_block *nb, int *curr_state);
+
+/* service_notif_unregister_notifier() - Unregister a notifier for a service.
+ * service_notif_handle - The notifier handler that was provided by the
+ *			  service_notif_register_notifier function when the
+ *			  client registered for notifications.
+ * nb - The notifier block that was previously used during the registration.
+ */
+int service_notif_unregister_notifier(void *service_notif_handle,
+					struct notifier_block *nb);
+
+int service_notif_pd_restart(const char *service_path, int instance_id);
+
+#else
+
+static inline void *service_notif_register_notifier(const char *service_path,
+				int instance_id, struct notifier_block *nb,
+				int *curr_state)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline int service_notif_unregister_notifier(void *service_notif_handle,
+					struct notifier_block *nb)
+{
+	return -ENODEV;
+}
+
+static inline int service_notif_pd_restart(const char *service_path,
+						int instance_id)
+{
+	return -ENODEV;
+}
+
+#endif /* CONFIG_MSM_SERVICE_NOTIFIER */
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./smd.h linux-4.4.115-fbx/include/soc/qcom/smd.h
--- linux-4.4.115-fbx/include/soc/qcom./smd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/smd.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,381 @@
+/* include/soc/qcom/smd.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2014, The Linux Foundation. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ASM_ARCH_MSM_SMD_H
+#define __ASM_ARCH_MSM_SMD_H
+
+#include <linux/io.h>
+
+#include <soc/qcom/smem.h>
+
+typedef struct smd_channel smd_channel_t;
+struct cpumask;
+
+#define SMD_MAX_CH_NAME_LEN 20 /* includes null char at end */
+
+#define SMD_EVENT_DATA 1
+#define SMD_EVENT_OPEN 2
+#define SMD_EVENT_CLOSE 3
+#define SMD_EVENT_STATUS 4
+#define SMD_EVENT_REOPEN_READY 5
+
+/*
+ * SMD Processor ID's.
+ *
+ * For all processors that have both SMSM and SMD clients,
+ * the SMSM Processor ID and the SMD Processor ID will
+ * be the same.  In cases where a processor only supports
+ * SMD, the entry will only exist in this enum.
+ */
+enum {
+	SMD_APPS = SMEM_APPS,
+	SMD_MODEM = SMEM_MODEM,
+	SMD_Q6 = SMEM_Q6,
+	SMD_DSPS = SMEM_DSPS,
+	SMD_TZ = SMEM_DSPS,
+	SMD_WCNSS = SMEM_WCNSS,
+	SMD_MODEM_Q6_FW = SMEM_MODEM_Q6_FW,
+	SMD_RPM = SMEM_RPM,
+	NUM_SMD_SUBSYSTEMS,
+};
+
+enum {
+	SMD_APPS_MODEM = 0,
+	SMD_APPS_QDSP,
+	SMD_MODEM_QDSP,
+	SMD_APPS_DSPS,
+	SMD_MODEM_DSPS,
+	SMD_QDSP_DSPS,
+	SMD_APPS_WCNSS,
+	SMD_MODEM_WCNSS,
+	SMD_QDSP_WCNSS,
+	SMD_DSPS_WCNSS,
+	SMD_APPS_Q6FW,
+	SMD_MODEM_Q6FW,
+	SMD_QDSP_Q6FW,
+	SMD_DSPS_Q6FW,
+	SMD_WCNSS_Q6FW,
+	SMD_APPS_RPM,
+	SMD_MODEM_RPM,
+	SMD_QDSP_RPM,
+	SMD_WCNSS_RPM,
+	SMD_TZ_RPM,
+	SMD_NUM_TYPE,
+
+};
+
+#ifdef CONFIG_MSM_SMD
+int smd_close(smd_channel_t *ch);
+
+/* passing a null pointer for data reads and discards */
+int smd_read(smd_channel_t *ch, void *data, int len);
+int smd_read_from_cb(smd_channel_t *ch, void *data, int len);
+
+/* Write to stream channels may do a partial write and return
+** the length actually written.
+** Write to packet channels will never do a partial write --
+** it will return the requested length written or an error.
+*/
+int smd_write(smd_channel_t *ch, const void *data, int len);
+
+int smd_write_avail(smd_channel_t *ch);
+int smd_read_avail(smd_channel_t *ch);
+
+/* Returns the total size of the current packet being read.
+** Returns 0 if no packets available or a stream channel.
+*/
+int smd_cur_packet_size(smd_channel_t *ch);
+
+/* these are used to get and set the IF sigs of a channel.
+ * DTR and RTS can be set; DSR, CTS, CD and RI can be read.
+ */
+int smd_tiocmget(smd_channel_t *ch);
+int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear);
+int
+smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear);
+int smd_named_open_on_edge(const char *name, uint32_t edge, smd_channel_t **_ch,
+			   void *priv, void (*notify)(void *, unsigned));
+
+/* Tells the other end of the smd channel that this end wants to recieve
+ * interrupts when the written data is read.  Read interrupts should only
+ * enabled when there is no space left in the buffer to write to, thus the
+ * interrupt acts as notification that space may be avaliable.  If the
+ * other side does not support enabling/disabling interrupts on demand,
+ * then this function has no effect if called.
+ */
+void smd_enable_read_intr(smd_channel_t *ch);
+
+/* Tells the other end of the smd channel that this end does not want
+ * interrupts when written data is read.  The interrupts should be
+ * disabled by default.  If the other side does not support enabling/
+ * disabling interrupts on demand, then this function has no effect if
+ * called.
+ */
+void smd_disable_read_intr(smd_channel_t *ch);
+
+/**
+ * Enable/disable receive interrupts for the remote processor used by a
+ * particular channel.
+ * @ch:      open channel handle to use for the edge
+ * @mask:    1 = mask interrupts; 0 = unmask interrupts
+ * @cpumask  cpumask for the next cpu scheduled to be woken up
+ * @returns: 0 for success; < 0 for failure
+ *
+ * Note that this enables/disables all interrupts from the remote subsystem for
+ * all channels.  As such, it should be used with care and only for specific
+ * use cases such as power-collapse sequencing.
+ */
+int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask,
+		const struct cpumask *cpumask);
+
+/* Starts a packet transaction.  The size of the packet may exceed the total
+ * size of the smd ring buffer.
+ *
+ * @ch: channel to write the packet to
+ * @len: total length of the packet
+ *
+ * Returns:
+ *      0 - success
+ *      -ENODEV - invalid smd channel
+ *      -EACCES - non-packet channel specified
+ *      -EINVAL - invalid length
+ *      -EBUSY - transaction already in progress
+ *      -EAGAIN - no enough memory in ring buffer to start transaction
+ *      -EPERM - unable to sucessfully start transaction due to write error
+ */
+int smd_write_start(smd_channel_t *ch, int len);
+
+/* Writes a segment of the packet for a packet transaction.
+ *
+ * @ch: channel to write packet to
+ * @data: buffer of data to write
+ * @len: length of data buffer
+ *
+ * Returns:
+ *      number of bytes written
+ *      -ENODEV - invalid smd channel
+ *      -EINVAL - invalid length
+ *      -ENOEXEC - transaction not started
+ */
+int smd_write_segment(smd_channel_t *ch, const void *data, int len);
+
+/* Completes a packet transaction.  Do not call from interrupt context.
+ *
+ * @ch: channel to complete transaction on
+ *
+ * Returns:
+ *      0 - success
+ *      -ENODEV - invalid smd channel
+ *      -E2BIG - some ammount of packet is not yet written
+ */
+int smd_write_end(smd_channel_t *ch);
+
+/**
+ * smd_write_segment_avail() - available write space for packet transactions
+ * @ch: channel to write packet to
+ * @returns: number of bytes available to write to, or -ENODEV for invalid ch
+ *
+ * This is a version of smd_write_avail() intended for use with packet
+ * transactions.  This version correctly accounts for any internal reserved
+ * space at all stages of the transaction.
+ */
+int smd_write_segment_avail(smd_channel_t *ch);
+
+/*
+ * Returns a pointer to the subsystem name or NULL if no
+ * subsystem name is available.
+ *
+ * @type - Edge definition
+ */
+const char *smd_edge_to_subsystem(uint32_t type);
+
+/*
+ * Returns a pointer to the subsystem name given the
+ * remote processor ID.
+ *
+ * @pid     Remote processor ID
+ * @returns Pointer to subsystem name or NULL if not found
+ */
+const char *smd_pid_to_subsystem(uint32_t pid);
+
+/*
+ * Checks to see if a new packet has arrived on the channel.  Only to be
+ * called with interrupts disabled.
+ *
+ * @ch: channel to check if a packet has arrived
+ *
+ * Returns:
+ *      0 - packet not available
+ *      1 - packet available
+ *      -EINVAL - NULL parameter or non-packet based channel provided
+ */
+int smd_is_pkt_avail(smd_channel_t *ch);
+
+/*
+ * SMD initialization function that registers for a SMD platform driver.
+ *
+ * returns success on successful driver registration.
+ */
+int __init msm_smd_init(void);
+
+/**
+ * smd_remote_ss_to_edge() - return edge type from remote ss type
+ * @name:	remote subsystem name
+ *
+ * Returns the edge type connected between the local subsystem(APPS)
+ * and remote subsystem @name.
+ */
+int smd_remote_ss_to_edge(const char *name);
+
+/**
+ * smd_edge_to_pil_str - Returns the PIL string used to load the remote side of
+ *			the indicated edge.
+ *
+ * @type - Edge definition
+ * @returns - The PIL string to load the remove side of @type or NULL if the
+ *		PIL string does not exist.
+ */
+const char *smd_edge_to_pil_str(uint32_t type);
+
+#else
+
+static inline int smd_close(smd_channel_t *ch)
+{
+	return -ENODEV;
+}
+
+static inline int smd_read(smd_channel_t *ch, void *data, int len)
+{
+	return -ENODEV;
+}
+
+static inline int smd_read_from_cb(smd_channel_t *ch, void *data, int len)
+{
+	return -ENODEV;
+}
+
+static inline int smd_write(smd_channel_t *ch, const void *data, int len)
+{
+	return -ENODEV;
+}
+
+static inline int smd_write_avail(smd_channel_t *ch)
+{
+	return -ENODEV;
+}
+
+static inline int smd_read_avail(smd_channel_t *ch)
+{
+	return -ENODEV;
+}
+
+static inline int smd_cur_packet_size(smd_channel_t *ch)
+{
+	return -ENODEV;
+}
+
+static inline int smd_tiocmget(smd_channel_t *ch)
+{
+	return -ENODEV;
+}
+
+static inline int
+smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear)
+{
+	return -ENODEV;
+}
+
+static inline int
+smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear)
+{
+	return -ENODEV;
+}
+
+static inline int
+smd_named_open_on_edge(const char *name, uint32_t edge, smd_channel_t **_ch,
+			   void *priv, void (*notify)(void *, unsigned))
+{
+	return -ENODEV;
+}
+
+static inline void smd_enable_read_intr(smd_channel_t *ch)
+{
+}
+
+static inline void smd_disable_read_intr(smd_channel_t *ch)
+{
+}
+
+static inline int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask,
+		const struct cpumask *cpumask)
+{
+	return -ENODEV;
+}
+
+static inline int smd_write_start(smd_channel_t *ch, int len)
+{
+	return -ENODEV;
+}
+
+static inline int
+smd_write_segment(smd_channel_t *ch, const void *data, int len)
+{
+	return -ENODEV;
+}
+
+static inline int smd_write_end(smd_channel_t *ch)
+{
+	return -ENODEV;
+}
+
+static inline int smd_write_segment_avail(smd_channel_t *ch)
+{
+	return -ENODEV;
+}
+
+static inline const char *smd_edge_to_subsystem(uint32_t type)
+{
+	return NULL;
+}
+
+static inline const char *smd_pid_to_subsystem(uint32_t pid)
+{
+	return NULL;
+}
+
+static inline int smd_is_pkt_avail(smd_channel_t *ch)
+{
+	return -ENODEV;
+}
+
+static inline int __init msm_smd_init(void)
+{
+	return 0;
+}
+
+static inline int smd_remote_ss_to_edge(const char *name)
+{
+	return -EINVAL;
+}
+
+static inline const char *smd_edge_to_pil_str(uint32_t type)
+{
+	return NULL;
+}
+#endif
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./smem.h linux-4.4.115-fbx/include/soc/qcom/smem.h
--- linux-4.4.115-fbx/include/soc/qcom./smem.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/smem.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,248 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_SMEM_H_
+#define _ARCH_ARM_MACH_MSM_SMEM_H_
+
+#include <linux/types.h>
+
+enum {
+	SMEM_APPS,
+	SMEM_MODEM,
+	SMEM_Q6,
+	SMEM_DSPS,
+	SMEM_WCNSS,
+	SMEM_MODEM_Q6_FW,
+	SMEM_CDSP = SMEM_MODEM_Q6_FW,
+	SMEM_RPM,
+	SMEM_TZ,
+	SMEM_SPSS,
+	SMEM_HYP,
+	NUM_SMEM_SUBSYSTEMS,
+};
+
+/*
+ * Flag options for the XXX_to_proc() API
+ *
+ * SMEM_ITEM_CACHED_FLAG - Indicates this operation should use cachable smem
+ *
+ * SMEM_ANY_HOST_FLAG - Indicates this operation should not apply to smem items
+ *                      which are limited to a specific host pairing.  Will
+ *                      cause this operation to ignore the to_proc parameter.
+ */
+#define SMEM_ITEM_CACHED_FLAG 1
+#define SMEM_ANY_HOST_FLAG 2
+
+#define SMEM_NUM_SMD_STREAM_CHANNELS        64
+
+/**
+ * OVERFLOW_ADD_UNSIGNED() - check for unsigned overflow
+ *
+ * @type: type to check for overflow
+ * @a: left value to use
+ * @b: right value to use
+ * @returns: true if a + b will result in overflow; false otherwise
+ */
+#define OVERFLOW_ADD_UNSIGNED(type, a, b) \
+	(((type)~0 - (a)) < (b) ? true : false)
+
+enum {
+	/* fixed items */
+	SMEM_PROC_COMM = 0,
+	SMEM_HEAP_INFO,
+	SMEM_ALLOCATION_TABLE,
+	SMEM_VERSION_INFO,
+	SMEM_HW_RESET_DETECT,
+	SMEM_AARM_WARM_BOOT,
+	SMEM_DIAG_ERR_MESSAGE,
+	SMEM_SPINLOCK_ARRAY,
+	SMEM_MEMORY_BARRIER_LOCATION,
+	SMEM_FIXED_ITEM_LAST = SMEM_MEMORY_BARRIER_LOCATION,
+
+	/* dynamic items */
+	SMEM_AARM_PARTITION_TABLE,
+	SMEM_AARM_BAD_BLOCK_TABLE,
+	SMEM_ERR_CRASH_LOG_ADSP,
+	SMEM_WM_UUID,
+	SMEM_CHANNEL_ALLOC_TBL,
+	SMEM_SMD_BASE_ID,
+	SMEM_SMEM_LOG_IDX = SMEM_SMD_BASE_ID + SMEM_NUM_SMD_STREAM_CHANNELS,
+	SMEM_SMEM_LOG_EVENTS,
+	SMEM_XBL_LOADER_CORE_INFO,
+	SMEM_SMEM_STATIC_LOG_EVENTS,
+	SMEM_CHARGER_BATTERY_INFO = SMEM_SMEM_STATIC_LOG_EVENTS,
+	SMEM_SMEM_SLOW_CLOCK_SYNC,
+	SMEM_WLAN_CONFIG = SMEM_SMEM_SLOW_CLOCK_SYNC,
+	SMEM_SMEM_SLOW_CLOCK_VALUE,
+	SMEM_BIO_LED_BUF,
+	SMEM_SMSM_SHARED_STATE,
+	SMEM_SMSM_INT_INFO,
+	SMEM_SMSM_SLEEP_DELAY,
+	SMEM_SMSM_LIMIT_SLEEP,
+	SMEM_SLEEP_POWER_COLLAPSE_DISABLED,
+	SMEM_KEYPAD_KEYS_PRESSED,
+	SMEM_KEYPAD_STATE_UPDATED,
+	SMEM_KEYPAD_STATE_IDX,
+	SMEM_GPIO_INT,
+	SMEM_SMP2P_CDSP_BASE,
+	SMEM_SMD_PROFILES = SMEM_SMP2P_CDSP_BASE + 8,
+	SMEM_TSSC_BUSY,
+	SMEM_HS_SUSPEND_FILTER_INFO,
+	SMEM_BATT_INFO,
+	SMEM_APPS_BOOT_MODE,
+	SMEM_VERSION_FIRST,
+	SMEM_VERSION_SMD = SMEM_VERSION_FIRST,
+	SMEM_VERSION_LAST = SMEM_VERSION_FIRST + 24,
+	SMEM_OSS_RRCASN1_BUF1,
+	SMEM_OSS_RRCASN1_BUF2,
+	SMEM_ID_VENDOR0,
+	SMEM_ID_VENDOR1,
+	SMEM_ID_VENDOR2,
+	SMEM_HW_SW_BUILD_ID,
+	SMEM_SMD_BASE_ID_2,
+	SMEM_SMD_FIFO_BASE_ID_2 = SMEM_SMD_BASE_ID_2 +
+						SMEM_NUM_SMD_STREAM_CHANNELS,
+	SMEM_CHANNEL_ALLOC_TBL_2 = SMEM_SMD_FIFO_BASE_ID_2 +
+						SMEM_NUM_SMD_STREAM_CHANNELS,
+	SMEM_I2C_MUTEX = SMEM_CHANNEL_ALLOC_TBL_2 +
+						SMEM_NUM_SMD_STREAM_CHANNELS,
+	SMEM_SCLK_CONVERSION,
+	SMEM_SMD_SMSM_INTR_MUX,
+	SMEM_SMSM_CPU_INTR_MASK,
+	SMEM_APPS_DEM_SLAVE_DATA,
+	SMEM_QDSP6_DEM_SLAVE_DATA,
+	SMEM_VSENSE_DATA,
+	SMEM_CLKREGIM_SOURCES,
+	SMEM_SMD_FIFO_BASE_ID,
+	SMEM_USABLE_RAM_PARTITION_TABLE = SMEM_SMD_FIFO_BASE_ID +
+						SMEM_NUM_SMD_STREAM_CHANNELS,
+	SMEM_POWER_ON_STATUS_INFO,
+	SMEM_DAL_AREA,
+	SMEM_SMEM_LOG_POWER_IDX,
+	SMEM_SMEM_LOG_POWER_WRAP,
+	SMEM_SMEM_LOG_POWER_EVENTS,
+	SMEM_ERR_CRASH_LOG,
+	SMEM_ERR_F3_TRACE_LOG,
+	SMEM_SMD_BRIDGE_ALLOC_TABLE,
+	SMEM_SMDLITE_TABLE,
+	SMEM_SD_IMG_UPGRADE_STATUS,
+	SMEM_SEFS_INFO,
+	SMEM_RESET_LOG,
+	SMEM_RESET_LOG_SYMBOLS,
+	SMEM_MODEM_SW_BUILD_ID,
+	SMEM_SMEM_LOG_MPROC_WRAP,
+	SMEM_BOOT_INFO_FOR_APPS,
+	SMEM_SMSM_SIZE_INFO,
+	SMEM_SMD_LOOPBACK_REGISTER,
+	SMEM_SSR_REASON_MSS0,
+	SMEM_SSR_REASON_WCNSS0,
+	SMEM_SSR_REASON_LPASS0,
+	SMEM_SSR_REASON_DSPS0,
+	SMEM_SSR_REASON_VCODEC0,
+	SMEM_SMP2P_APPS_BASE = 427,
+	SMEM_SMP2P_MODEM_BASE = SMEM_SMP2P_APPS_BASE + 8,    /* 435 */
+	SMEM_SMP2P_AUDIO_BASE = SMEM_SMP2P_MODEM_BASE + 8,   /* 443 */
+	SMEM_SMP2P_WIRLESS_BASE = SMEM_SMP2P_AUDIO_BASE + 8, /* 451 */
+	SMEM_SMP2P_POWER_BASE = SMEM_SMP2P_WIRLESS_BASE + 8, /* 459 */
+	SMEM_FLASH_DEVICE_INFO = SMEM_SMP2P_POWER_BASE + 8,  /* 467 */
+	SMEM_BAM_PIPE_MEMORY,     /* 468 */
+	SMEM_IMAGE_VERSION_TABLE, /* 469 */
+	SMEM_LC_DEBUGGER, /* 470 */
+	SMEM_FLASH_NAND_DEV_INFO, /* 471 */
+	SMEM_A2_BAM_DESCRIPTOR_FIFO, /* 472 */
+	SMEM_CPR_CONFIG, /* 473 */
+	SMEM_CLOCK_INFO, /* 474 */
+	SMEM_IPC_FIFO, /* 475 */
+	SMEM_RF_EEPROM_DATA, /* 476 */
+	SMEM_COEX_MDM_WCN, /* 477 */
+	SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR, /* 478 */
+	SMEM_GLINK_NATIVE_XPRT_FIFO_0, /* 479 */
+	SMEM_GLINK_NATIVE_XPRT_FIFO_1, /* 480 */
+	SMEM_SMP2P_SENSOR_BASE, /* 481 */
+	SMEM_SMP2P_TZ_BASE = SMEM_SMP2P_SENSOR_BASE + 8, /* 489 */
+	SMEM_IPA_FILTER_TABLE = SMEM_SMP2P_TZ_BASE + 8, /* 497 */
+	SMEM_NUM_ITEMS, /* 498 */
+};
+
+#ifdef CONFIG_MSM_SMEM
+void *smem_alloc(unsigned id, unsigned size_in, unsigned to_proc,
+								unsigned flags);
+void *smem_find(unsigned id, unsigned size_in, unsigned to_proc,
+								unsigned flags);
+void *smem_get_entry(unsigned id, unsigned *size, unsigned to_proc,
+								unsigned flags);
+
+/**
+ * smem_get_entry_no_rlock - Get existing item without using remote spinlock
+ *
+ * @id:       ID of SMEM item
+ * @size_out: Pointer to size variable for storing the result
+ * @to_proc:  SMEM host that shares the item with apps
+ * @flags:    Item attribute flags
+ * @returns:  Pointer to SMEM item or NULL if it doesn't exist
+ *
+ * This function does not lock the remote spinlock and should only be used in
+ * failure-recover cases such as retrieving the subsystem failure reason during
+ * subsystem restart.
+ */
+void *smem_get_entry_no_rlock(unsigned id, unsigned *size_out, unsigned to_proc,
+								unsigned flags);
+
+/**
+ * smem_virt_to_phys() - Convert SMEM address to physical address.
+ *
+ * @smem_address: Virtual address returned by smem_alloc()
+ * @returns: Physical address (or NULL if there is a failure)
+ *
+ * This function should only be used if an SMEM item needs to be handed
+ * off to a DMA engine.
+ */
+phys_addr_t smem_virt_to_phys(void *smem_address);
+
+/**
+ * SMEM initialization function that registers for a SMEM platform driver.
+ *
+ * @returns: success on successful driver registration.
+ */
+int __init msm_smem_init(void);
+
+#else
+static inline void *smem_alloc(unsigned id, unsigned size_in, unsigned to_proc,
+								unsigned flags)
+{
+	return NULL;
+}
+static inline void *smem_find(unsigned id, unsigned size_in,
+					unsigned to_proc, unsigned flags)
+{
+	return NULL;
+}
+static inline void *smem_get_entry(unsigned id, unsigned *size,
+					unsigned to_proc, unsigned flags)
+{
+	return NULL;
+}
+static inline void *smem_get_entry_no_rlock(unsigned id, unsigned *size_out,
+					unsigned to_proc, unsigned flags)
+{
+	return NULL;
+}
+static inline phys_addr_t smem_virt_to_phys(void *smem_address)
+{
+	return (phys_addr_t) NULL;
+}
+static inline int __init msm_smem_init(void)
+{
+	return 0;
+}
+#endif /* CONFIG_MSM_SMEM  */
+#endif /* _ARCH_ARM_MACH_MSM_SMEM_H_ */
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./smem_log.h linux-4.4.115-fbx/include/soc/qcom/smem_log.h
--- linux-4.4.115-fbx/include/soc/qcom./smem_log.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/smem_log.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,72 @@
+/* Copyright (c) 2008-2009, 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/types.h>
+
+/* Event indentifier format:
+ * bit  31-28 is processor ID 8 => apps, 4 => Q6, 0 => modem
+ * bits 27-16 are subsystem id (event base)
+ * bits 15-0  are event id
+ */
+
+#define PROC                            0xF0000000
+#define SUB                             0x0FFF0000
+#define ID                              0x0000FFFF
+
+#define SMEM_LOG_PROC_ID_MODEM          0x00000000
+#define SMEM_LOG_PROC_ID_Q6             0x40000000
+#define SMEM_LOG_PROC_ID_APPS           0x80000000
+#define SMEM_LOG_PROC_ID_WCNSS          0xC0000000
+
+#define SMEM_LOG_CONT                   0x10000000
+
+#define SMEM_LOG_SMEM_EVENT_BASE        0x00020000
+#define SMEM_LOG_ERROR_EVENT_BASE       0x00060000
+#define SMEM_LOG_IPC_ROUTER_EVENT_BASE  0x000D0000
+#define SMEM_LOG_QMI_CCI_EVENT_BASE     0x000E0000
+#define SMEM_LOG_QMI_CSI_EVENT_BASE     0x000F0000
+#define ERR_ERROR_FATAL                 (SMEM_LOG_ERROR_EVENT_BASE + 1)
+#define ERR_ERROR_FATAL_TASK            (SMEM_LOG_ERROR_EVENT_BASE + 2)
+#define SMEM_LOG_EVENT_CB               (SMEM_LOG_SMEM_EVENT_BASE +  0)
+#define SMEM_LOG_EVENT_START            (SMEM_LOG_SMEM_EVENT_BASE +  1)
+#define SMEM_LOG_EVENT_INIT             (SMEM_LOG_SMEM_EVENT_BASE +  2)
+#define SMEM_LOG_EVENT_RUNNING          (SMEM_LOG_SMEM_EVENT_BASE +  3)
+#define SMEM_LOG_EVENT_STOP             (SMEM_LOG_SMEM_EVENT_BASE +  4)
+#define SMEM_LOG_EVENT_RESTART          (SMEM_LOG_SMEM_EVENT_BASE +  5)
+#define SMEM_LOG_EVENT_SS               (SMEM_LOG_SMEM_EVENT_BASE +  6)
+#define SMEM_LOG_EVENT_READ             (SMEM_LOG_SMEM_EVENT_BASE +  7)
+#define SMEM_LOG_EVENT_WRITE            (SMEM_LOG_SMEM_EVENT_BASE +  8)
+#define SMEM_LOG_EVENT_SIGS1            (SMEM_LOG_SMEM_EVENT_BASE +  9)
+#define SMEM_LOG_EVENT_SIGS2            (SMEM_LOG_SMEM_EVENT_BASE + 10)
+#define SMEM_LOG_EVENT_WRITE_DM         (SMEM_LOG_SMEM_EVENT_BASE + 11)
+#define SMEM_LOG_EVENT_READ_DM          (SMEM_LOG_SMEM_EVENT_BASE + 12)
+#define SMEM_LOG_EVENT_SKIP_DM          (SMEM_LOG_SMEM_EVENT_BASE + 13)
+#define SMEM_LOG_EVENT_STOP_DM          (SMEM_LOG_SMEM_EVENT_BASE + 14)
+#define SMEM_LOG_EVENT_ISR              (SMEM_LOG_SMEM_EVENT_BASE + 15)
+#define SMEM_LOG_EVENT_TASK             (SMEM_LOG_SMEM_EVENT_BASE + 16)
+#define SMEM_LOG_EVENT_RS               (SMEM_LOG_SMEM_EVENT_BASE + 17)
+
+#ifdef CONFIG_MSM_SMEM_LOGGING
+void smem_log_event(uint32_t id, uint32_t data1, uint32_t data2,
+		    uint32_t data3);
+void smem_log_event6(uint32_t id, uint32_t data1, uint32_t data2,
+		     uint32_t data3, uint32_t data4, uint32_t data5,
+		     uint32_t data6);
+#else
+void smem_log_event(uint32_t id, uint32_t data1, uint32_t data2,
+		    uint32_t data3) { }
+void smem_log_event6(uint32_t id, uint32_t data1, uint32_t data2,
+		     uint32_t data3, uint32_t data4, uint32_t data5,
+		     uint32_t data6) { }
+#endif
+
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./smsm.h linux-4.4.115-fbx/include/soc/qcom/smsm.h
--- linux-4.4.115-fbx/include/soc/qcom./smsm.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/smsm.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,147 @@
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_SMSM_H_
+#define _ARCH_ARM_MACH_MSM_SMSM_H_
+
+#include <soc/qcom/smem.h>
+
+enum {
+	SMSM_APPS_STATE,
+	SMSM_MODEM_STATE,
+	SMSM_Q6_STATE,
+	SMSM_APPS_DEM,
+	SMSM_WCNSS_STATE = SMSM_APPS_DEM,
+	SMSM_MODEM_DEM,
+	SMSM_DSPS_STATE = SMSM_MODEM_DEM,
+	SMSM_Q6_DEM,
+	SMSM_POWER_MASTER_DEM,
+	SMSM_TIME_MASTER_DEM,
+};
+extern uint32_t SMSM_NUM_ENTRIES;
+
+/*
+ * Ordered by when processors adopted the SMSM protocol.  May not be 1-to-1
+ * with SMEM PIDs, despite initial expectations.
+ */
+enum {
+	SMSM_APPS = SMEM_APPS,
+	SMSM_MODEM = SMEM_MODEM,
+	SMSM_Q6 = SMEM_Q6,
+	SMSM_WCNSS,
+	SMSM_DSPS,
+};
+extern uint32_t SMSM_NUM_HOSTS;
+
+#define SMSM_INIT              0x00000001
+#define SMSM_SMDINIT           0x00000008
+#define SMSM_RPCINIT           0x00000020
+#define SMSM_RESET             0x00000040
+#define SMSM_TIMEWAIT          0x00000400
+#define SMSM_TIMEINIT          0x00000800
+#define SMSM_PROC_AWAKE        0x00001000
+#define SMSM_SMD_LOOPBACK      0x00800000
+
+#define SMSM_USB_PLUG_UNPLUG    0x00002000
+
+#define SMSM_A2_POWER_CONTROL  0x00000002
+#define SMSM_A2_POWER_CONTROL_ACK  0x00000800
+
+#ifdef CONFIG_MSM_SMD
+int smsm_change_state(uint32_t smsm_entry,
+		      uint32_t clear_mask, uint32_t set_mask);
+
+/*
+ * Changes the global interrupt mask.  The set and clear masks are re-applied
+ * every time the global interrupt mask is updated for callback registration
+ * and de-registration.
+ *
+ * The clear mask is applied first, so if a bit is set to 1 in both the clear
+ * mask and the set mask, the result will be that the interrupt is set.
+ *
+ * @smsm_entry  SMSM entry to change
+ * @clear_mask  1 = clear bit, 0 = no-op
+ * @set_mask    1 = set bit, 0 = no-op
+ *
+ * @returns 0 for success, < 0 for error
+ */
+int smsm_change_intr_mask(uint32_t smsm_entry,
+			  uint32_t clear_mask, uint32_t set_mask);
+int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask);
+uint32_t smsm_get_state(uint32_t smsm_entry);
+int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
+	void (*notify)(void *, uint32_t old_state, uint32_t new_state),
+	void *data);
+int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
+	void (*notify)(void *, uint32_t, uint32_t), void *data);
+
+#else
+static inline int smsm_change_state(uint32_t smsm_entry,
+		      uint32_t clear_mask, uint32_t set_mask)
+{
+	return -ENODEV;
+}
+
+/*
+ * Changes the global interrupt mask.  The set and clear masks are re-applied
+ * every time the global interrupt mask is updated for callback registration
+ * and de-registration.
+ *
+ * The clear mask is applied first, so if a bit is set to 1 in both the clear
+ * mask and the set mask, the result will be that the interrupt is set.
+ *
+ * @smsm_entry  SMSM entry to change
+ * @clear_mask  1 = clear bit, 0 = no-op
+ * @set_mask    1 = set bit, 0 = no-op
+ *
+ * @returns 0 for success, < 0 for error
+ */
+static inline int smsm_change_intr_mask(uint32_t smsm_entry,
+			  uint32_t clear_mask, uint32_t set_mask)
+{
+	return -ENODEV;
+}
+
+static inline int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask)
+{
+	return -ENODEV;
+}
+static inline uint32_t smsm_get_state(uint32_t smsm_entry)
+{
+	return 0;
+}
+static inline int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
+	void (*notify)(void *, uint32_t old_state, uint32_t new_state),
+	void *data)
+{
+	return -ENODEV;
+}
+static inline int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
+	void (*notify)(void *, uint32_t, uint32_t), void *data)
+{
+	return -ENODEV;
+}
+static inline void smsm_reset_modem(unsigned mode)
+{
+}
+static inline void smsm_reset_modem_cont(void)
+{
+}
+static inline void smd_sleep_exit(void)
+{
+}
+static inline int smsm_check_for_modem_crash(void)
+{
+	return -ENODEV;
+}
+#endif
+#endif
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./socinfo.h linux-4.4.115-fbx/include/soc/qcom/socinfo.h
--- linux-4.4.115-fbx/include/soc/qcom./socinfo.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/socinfo.h	2019-10-29 09:26:25.529221635 +0100
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2009-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_SOCINFO_H_
+#define _ARCH_ARM_MACH_MSM_SOCINFO_H_
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/of_fdt.h>
+#include <linux/of.h>
+
+#include <asm/cputype.h>
+/*
+ * SOC version type with major number in the upper 16 bits and minor
+ * number in the lower 16 bits.  For example:
+ *   1.0 -> 0x00010000
+ *   2.3 -> 0x00020003
+ */
+#define SOCINFO_VERSION_MAJOR(ver) (((ver) & 0xffff0000) >> 16)
+#define SOCINFO_VERSION_MINOR(ver) ((ver) & 0x0000ffff)
+#define SOCINFO_VERSION(maj, min)  ((((maj) & 0xffff) << 16)|((min) & 0xffff))
+
+#ifdef CONFIG_OF
+#define of_board_is_cdp()	of_machine_is_compatible("qcom,cdp")
+#define of_board_is_sim()	of_machine_is_compatible("qcom,sim")
+#define of_board_is_rumi()	of_machine_is_compatible("qcom,rumi")
+#define of_board_is_fluid()	of_machine_is_compatible("qcom,fluid")
+#define of_board_is_liquid()	of_machine_is_compatible("qcom,liquid")
+#define of_board_is_dragonboard()	\
+	of_machine_is_compatible("qcom,dragonboard")
+#define of_board_is_cdp()	of_machine_is_compatible("qcom,cdp")
+#define of_board_is_mtp()	of_machine_is_compatible("qcom,mtp")
+#define of_board_is_qrd()	of_machine_is_compatible("qcom,qrd")
+#define of_board_is_xpm()	of_machine_is_compatible("qcom,xpm")
+#define of_board_is_skuf()	of_machine_is_compatible("qcom,skuf")
+#define of_board_is_sbc()	of_machine_is_compatible("qcom,sbc")
+
+#define machine_is_msm8974()	of_machine_is_compatible("qcom,msm8974")
+#define machine_is_msm9625()	of_machine_is_compatible("qcom,msm9625")
+#define machine_is_msm8610()	of_machine_is_compatible("qcom,msm8610")
+#define machine_is_msm8226()	of_machine_is_compatible("qcom,msm8226")
+#define machine_is_apq8074()	of_machine_is_compatible("qcom,apq8074")
+#define machine_is_msm8926()	of_machine_is_compatible("qcom,msm8926")
+
+#define early_machine_is_msm8610()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8610")
+#define early_machine_is_msm8909()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8909")
+#define early_machine_is_msm8916()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8916")
+#define early_machine_is_msm8936()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8936")
+#define early_machine_is_msm8939()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8939")
+#define early_machine_is_apq8084()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,apq8084")
+#define early_machine_is_mdm9630()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,mdm9630")
+#define early_machine_is_msmzirc()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmzirc")
+#define early_machine_is_fsm9900()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,fsm9900")
+#define early_machine_is_msm8994()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8994")
+#define early_machine_is_msm8992()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8992")
+#define early_machine_is_fsm9010()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,fsm9010")
+#define early_machine_is_msm8976()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8976")
+#define early_machine_is_msmtellurium()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmtellurium")
+#define early_machine_is_msm8996()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8996")
+#define early_machine_is_msm8929()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8929")
+#define early_machine_is_msm8998()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8998")
+#define early_machine_is_apq8098()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,apq8098")
+#define early_machine_is_msmhamster()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmhamster")
+#define early_machine_is_sdm660()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm660")
+#define early_machine_is_sda660()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sda660")
+#define early_machine_is_sdm636()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm636")
+#define early_machine_is_sda636()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sda636")
+#define early_machine_is_sdm658()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm658")
+#define early_machine_is_sda658()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sda658")
+#define early_machine_is_sdm630()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm630")
+#define early_machine_is_sda630()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sda630")
+#else
+#define of_board_is_sim()		0
+#define of_board_is_rumi()		0
+#define of_board_is_fluid()		0
+#define of_board_is_liquid()		0
+#define of_board_is_dragonboard()	0
+#define of_board_is_cdp()		0
+#define of_board_is_mtp()		0
+#define of_board_is_qrd()		0
+#define of_board_is_xpm()		0
+#define of_board_is_skuf()		0
+#define of_board_is_sbc()		0
+
+#define machine_is_msm8974()		0
+#define machine_is_msm9625()		0
+#define machine_is_msm8610()		0
+#define machine_is_msm8226()		0
+#define machine_is_apq8074()		0
+#define machine_is_msm8926()		0
+
+#define early_machine_is_msm8610()	0
+#define early_machine_is_msm8909()	0
+#define early_machine_is_msm8916()	0
+#define early_machine_is_msm8936()	0
+#define early_machine_is_msm8939()	0
+#define early_machine_is_apq8084()	0
+#define early_machine_is_mdm9630()	0
+#define early_machine_is_fsm9900()	0
+#define early_machine_is_fsm9010()	0
+#define early_machine_is_msmtellurium()	0
+#define early_machine_is_msm8996()	0
+#define early_machine_is_msm8976() 0
+#define early_machine_is_msm8929()	0
+#define early_machine_is_msm8998()	0
+#define early_machine_is_apq8098()	0
+#define early_machine_is_msmhamster()	0
+#define early_machine_is_sdm660()	0
+#define early_machine_is_sda660()	0
+#define early_machine_is_sdm636()	0
+#define early_machine_is_sda636()	0
+#define early_machine_is_sdm658()	0
+#define early_machine_is_sda658()	0
+#define early_machine_is_sdm630()	0
+#define early_machine_is_sda630()	0
+#endif
+
+#define PLATFORM_SUBTYPE_MDM	1
+#define PLATFORM_SUBTYPE_INTERPOSERV3 2
+#define PLATFORM_SUBTYPE_SGLTE	6
+
+enum msm_cpu {
+	MSM_CPU_UNKNOWN = 0,
+	MSM_CPU_7X01,
+	MSM_CPU_7X25,
+	MSM_CPU_7X27,
+	MSM_CPU_8X50,
+	MSM_CPU_8X50A,
+	MSM_CPU_7X30,
+	MSM_CPU_8X55,
+	MSM_CPU_8X60,
+	MSM_CPU_8960,
+	MSM_CPU_8960AB,
+	MSM_CPU_7X27A,
+	FSM_CPU_9XXX,
+	MSM_CPU_7X25A,
+	MSM_CPU_7X25AA,
+	MSM_CPU_7X25AB,
+	MSM_CPU_8064,
+	MSM_CPU_8064AB,
+	MSM_CPU_8064AA,
+	MSM_CPU_8930,
+	MSM_CPU_8930AA,
+	MSM_CPU_8930AB,
+	MSM_CPU_7X27AA,
+	MSM_CPU_9615,
+	MSM_CPU_8974,
+	MSM_CPU_8974PRO_AA,
+	MSM_CPU_8974PRO_AB,
+	MSM_CPU_8974PRO_AC,
+	MSM_CPU_8627,
+	MSM_CPU_8625,
+	MSM_CPU_9625,
+	MSM_CPU_8909,
+	MSM_CPU_8916,
+	MSM_CPU_8936,
+	MSM_CPU_8939,
+	MSM_CPU_8226,
+	MSM_CPU_8610,
+	MSM_CPU_8625Q,
+	MSM_CPU_8084,
+	MSM_CPU_9630,
+	FSM_CPU_9900,
+	MSM_CPU_ZIRC,
+	MSM_CPU_8994,
+	MSM_CPU_8992,
+	FSM_CPU_9010,
+	MSM_CPU_TELLURIUM,
+	MSM_CPU_8996,
+	MSM_CPU_8976,
+	MSM_CPU_8929,
+	MSM_CPU_8998,
+	MSM_CPU_HAMSTER,
+	MSM_CPU_660,
+	MSM_CPU_630,
+	MSM_CPU_636,
+};
+
+struct msm_soc_info {
+	enum msm_cpu generic_soc_type;
+	char *soc_id_string;
+};
+
+enum pmic_model {
+	PMIC_MODEL_PM8058	= 13,
+	PMIC_MODEL_PM8028	= 14,
+	PMIC_MODEL_PM8901	= 15,
+	PMIC_MODEL_PM8027	= 16,
+	PMIC_MODEL_ISL_9519	= 17,
+	PMIC_MODEL_PM8921	= 18,
+	PMIC_MODEL_PM8018	= 19,
+	PMIC_MODEL_PM8015	= 20,
+	PMIC_MODEL_PM8014	= 21,
+	PMIC_MODEL_PM8821	= 22,
+	PMIC_MODEL_PM8038	= 23,
+	PMIC_MODEL_PM8922	= 24,
+	PMIC_MODEL_PM8917	= 25,
+	PMIC_MODEL_UNKNOWN	= 0xFFFFFFFF
+};
+
+enum msm_cpu socinfo_get_msm_cpu(void);
+uint32_t socinfo_get_id(void);
+uint32_t socinfo_get_version(void);
+uint32_t socinfo_get_raw_id(void);
+char *socinfo_get_build_id(void);
+uint32_t socinfo_get_platform_type(void);
+uint32_t socinfo_get_platform_subtype(void);
+uint32_t socinfo_get_platform_version(void);
+uint32_t socinfo_get_serial_number(void);
+enum pmic_model socinfo_get_pmic_model(void);
+uint32_t socinfo_get_pmic_die_revision(void);
+int __init socinfo_init(void) __must_check;
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./spcom.h linux-4.4.115-fbx/include/soc/qcom/spcom.h
--- linux-4.4.115-fbx/include/soc/qcom./spcom.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/spcom.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,223 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SPCOM_H_
+#define _SPCOM_H_
+
+#include <linux/types.h>	/* uint32_t ,bool */
+
+/**
+ * @brief - Secure Processor Communication API
+ *
+ * This API should be used by Linux Kernel drivers,
+ * similar API is provided to user space applications
+ * via spcomlib.h API file.
+ * Sending Request and receiving Response is synchronous, only one at a time.
+ * The API is based on Client/Server model.
+ * The API resemble the trustzone QSEECOM API.
+ * In most cases, the Secure Processor side has servers and the HLOS
+ * side has clients. Request is initiated by the client and responded by the
+ * server.
+ */
+
+/*===========================================================================*/
+/*                           defines, enums , types                          */
+/*===========================================================================*/
+
+/* Maximum size (including null) for channel names - match glink */
+#define SPCOM_CHANNEL_NAME_SIZE		32
+
+/**
+ * Request buffer size.
+ * Any large data (multiply of 4KB) is provided by temp buffer in DDR.
+ * Request shall provide the temp buffer physical address (align to 4KB).
+ * Maximum request/response size of 268 is used to accommodate APDU size.
+ * From kernel spcom driver perspective a PAGE_SIZE of 4K
+ * is the actual maximum size for a single read/write file operation.
+ */
+#define SPCOM_MAX_REQUEST_SIZE		268
+#define SPCOM_MAX_RESPONSE_SIZE		268
+
+/**
+ * Abstract spcom handle.
+ * The actual struct definition is internal to the spcom driver.
+ */
+struct spcom_client; /* Forward declaration */
+struct spcom_server; /* Forward declaration */
+
+/**
+ * Client registration info
+ *
+ * @ch_name:	glink logical channel name
+ * @notify_ssr_cb: callback when the remote SP side reset (power down).
+ *      This is likely to happen due to remote subsystem restart (SSR).
+ *      NULL callback means no notification required.
+ *      Upon ssr callback, the user should unregister,
+ *      Poll for link up and then register again.
+ */
+struct spcom_client_info {
+	const char *ch_name;
+	void (*notify_ssr_cb)(void);
+};
+
+/**
+ * Server registration info
+ *
+ * @ch_name:	glink logical channel name
+ * @notify_ssr_cb: callback when the remote SP side reset (power down).
+ *      This is likely to happen due to remote subsystem restart (SSR).
+ *      NULL callback means no notification required.
+ *      Upon ssr callback, the user should unregister,
+ *      Poll for link up and then register again.
+ */
+struct spcom_service_info {
+	const char *ch_name;
+	void (*notify_ssr_cb)(void);
+};
+
+/*===========================================================================*/
+/*                           General API                                     */
+/*===========================================================================*/
+
+/**
+ * spcom_is_sp_subsystem_link_up() - check if SPSS link is up.
+ *
+ * return: true if link is up, false if link is down.
+ */
+bool spcom_is_sp_subsystem_link_up(void);
+
+/*===========================================================================*/
+/*                           Client Send Message                             */
+/*===========================================================================*/
+/**
+ * spcom_register_client() - register client for channel
+ *
+ * Only one client/Server can register on each side of a channel.
+ * Server on remote side is expected to be running and connected,
+ * therefore connection expected within the provided timeout.
+ * Handle is returned even if timeout expired.
+ * use spcom_client_is_server_connected() to check fully connected.
+ *
+ * @info:	Client configuration info (input).
+ *
+ * return: client handle on success, NULL on failure.
+ */
+struct spcom_client *spcom_register_client(struct spcom_client_info *info);
+
+/**
+ * spcom_unregister_client() - unregister client for channel
+ *
+ * @client:	Client Handle.
+ *
+ * return: 0 on success, negative error code on failure (see errno.h)
+ */
+int spcom_unregister_client(struct spcom_client *client);
+
+/**
+ * spcom_client_send_message_sync() - Send a synchronous request and response
+ *
+ * @client:	a pointer to spcom client
+ * @req_ptr:	a pointer to the request C struct representation
+ * @req_size:	size of the request C struct
+ * @resp_ptr:	a pointer to the response C struct representation
+ * @resp_size:  size of the response C struct
+ * @timeout_msec: Timeout in msec between command and response, 0=no timeout.
+ *
+ * return: number of rx bytes on success, negative value on failure.
+ */
+int spcom_client_send_message_sync(struct spcom_client	*client,
+				   void			*req_ptr,
+				   uint32_t		req_size,
+				   void			*resp_ptr,
+				   uint32_t		resp_size,
+				   uint32_t		timeout_msec);
+
+/**
+ * spcom_client_is_server_connected() - Check if remote server connected.
+ *
+ * This API checks that the logical channel is fully connected between
+ * the client and the server.
+ * Normally, the server should be up first and connect first.
+ *
+ * @client:	a pointer to spcom client
+ *
+ * return: true if server connected, false otherwise.
+ */
+bool spcom_client_is_server_connected(struct spcom_client *client);
+
+/*===========================================================================*/
+/*                           Service                                         */
+/*===========================================================================*/
+
+/**
+ * spcom_register_service() - register server for channel
+ *
+ * Only one client/Server can register on each side of a channel.
+ *
+ * @info:	Server configuration info (input).
+ *
+ * return: server handle on success, NULL on failure.
+ */
+struct spcom_server *spcom_register_service(struct spcom_service_info *info);
+
+/**
+ * spcom_unregister_service() - unregister server for channel
+ *
+ * @server:	server Handle.
+ *
+ * return: 0 on success, negative error code on failure (see errno.h)
+ */
+int spcom_unregister_service(struct spcom_server *server);
+
+/**
+ * spcom_server_get_next_request_size() - get the size of the
+ * next request
+ *
+ * This API MUST be called before calling spcom_server_wait_for_request().
+ * The server should allocate the relevant buffer size.
+ *
+ * @server:	a pointer to spcom server
+ *
+ * return: size of request in bytes on success, negative value on failure.
+ */
+int spcom_server_get_next_request_size(struct spcom_server *server);
+
+/**
+ * spcom_server_wait_for_request() - server wait for request
+ *
+ * @server:     a pointer to spcom server
+ * @req_ptr:	a pointer to the request buffer
+ * @req_size:	size of the buffer provided.
+ * The server should provide a buffer of at least the size
+ * returned by spcom_server_get_next_request_size() and up to
+ * SPCOM_MAX_REQUEST_SIZE.
+ *
+ * return: size of request on success, negative value on failure (see errno.h)
+ */
+int spcom_server_wait_for_request(struct spcom_server	*server,
+				  void			*req_ptr,
+				  uint32_t		req_size);
+
+/**
+ * spcom_server_send_response() - Send a the response to request
+ *
+ * @server:	a pointer to spcom server
+ * @resp_ptr:	a pointer to the response C struct representation
+ * @resp_size:  size of the response C struct
+ *
+ * return: sent data size on success, negative value on failure (see errno.h)
+ */
+int spcom_server_send_response(struct spcom_server	*server,
+			       void			*resp_ptr,
+			       uint32_t		resp_size);
+
+#endif /* _SPCOM_H_ */
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./spm.h linux-4.4.115-fbx/include/soc/qcom/spm.h
--- linux-4.4.115-fbx/include/soc/qcom./spm.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/spm.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,148 @@
+/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_SPM_H
+#define __ARCH_ARM_MACH_MSM_SPM_H
+
+enum {
+	MSM_SPM_MODE_DISABLED,
+	MSM_SPM_MODE_CLOCK_GATING,
+	MSM_SPM_MODE_RETENTION,
+	MSM_SPM_MODE_GDHS,
+	MSM_SPM_MODE_POWER_COLLAPSE,
+	MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE,
+	MSM_SPM_MODE_FASTPC,
+	MSM_SPM_MODE_NR
+};
+
+enum msm_spm_avs_irq {
+	MSM_SPM_AVS_IRQ_MIN,
+	MSM_SPM_AVS_IRQ_MAX,
+};
+
+struct msm_spm_device;
+struct device_node;
+
+#if defined(CONFIG_MSM_SPM)
+
+int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm);
+int msm_spm_probe_done(void);
+int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel);
+int msm_spm_get_vdd(unsigned int cpu);
+int msm_spm_turn_on_cpu_rail(struct device_node *l2ccc_node,
+		unsigned int val, int cpu, int vctl_offset);
+struct msm_spm_device *msm_spm_get_device_by_name(const char *name);
+int msm_spm_config_low_power_mode(struct msm_spm_device *dev,
+		unsigned int mode, bool notify_rpm);
+int msm_spm_device_init(void);
+bool msm_spm_is_mode_avail(unsigned int mode);
+void msm_spm_dump_regs(unsigned int cpu);
+int msm_spm_is_avs_enabled(unsigned int cpu);
+int msm_spm_avs_enable(unsigned int cpu);
+int msm_spm_avs_disable(unsigned int cpu);
+int msm_spm_avs_set_limit(unsigned int cpu, uint32_t min_lvl,
+		uint32_t max_lvl);
+int msm_spm_avs_enable_irq(unsigned int cpu, enum msm_spm_avs_irq irq);
+int msm_spm_avs_disable_irq(unsigned int cpu, enum msm_spm_avs_irq irq);
+int msm_spm_avs_clear_irq(unsigned int cpu, enum msm_spm_avs_irq irq);
+
+#if defined(CONFIG_MSM_L2_SPM)
+
+/* Public functions */
+
+int msm_spm_apcs_set_phase(int cpu, unsigned int phase_cnt);
+int msm_spm_enable_fts_lpm(int cpu, uint32_t mode);
+
+#else
+
+static inline int msm_spm_apcs_set_phase(int cpu, unsigned int phase_cnt)
+{
+	return -ENOSYS;
+}
+
+static inline int msm_spm_enable_fts_lpm(int cpu, uint32_t mode)
+{
+	return -ENOSYS;
+}
+#endif /* defined(CONFIG_MSM_L2_SPM) */
+#else /* defined(CONFIG_MSM_SPM) */
+static inline int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm)
+{
+	return -ENOSYS;
+}
+
+static inline int msm_spm_probe_done(void)
+{
+	return -ENOSYS;
+}
+
+static inline int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel)
+{
+	return -ENOSYS;
+}
+
+static inline int msm_spm_get_vdd(unsigned int cpu)
+{
+	return 0;
+}
+
+static inline int msm_spm_turn_on_cpu_rail(struct device_node *l2ccc_node,
+		unsigned int val, int cpu, int vctl_offset)
+{
+	return -ENOSYS;
+}
+
+static inline int msm_spm_device_init(void)
+{
+	return -ENOSYS;
+}
+
+static inline void msm_spm_dump_regs(unsigned int cpu)
+{
+	return;
+}
+
+static inline int msm_spm_config_low_power_mode(struct msm_spm_device *dev,
+		unsigned int mode, bool notify_rpm)
+{
+	return -ENODEV;
+}
+static inline struct msm_spm_device *msm_spm_get_device_by_name(const char *name)
+{
+	return NULL;
+}
+
+static inline bool msm_spm_is_mode_avail(unsigned int mode)
+{
+	return false;
+}
+
+static inline int msm_spm_avs_enable_irq(unsigned int cpu,
+		enum msm_spm_avs_irq irq)
+{
+	return -ENOSYS;
+}
+
+static inline int msm_spm_avs_disable_irq(unsigned int cpu,
+		enum msm_spm_avs_irq irq)
+{
+	return -ENOSYS;
+}
+
+static inline int msm_spm_avs_clear_irq(unsigned int cpu,
+		enum msm_spm_avs_irq irq)
+{
+	return -ENOSYS;
+}
+
+#endif  /* defined (CONFIG_MSM_SPM) */
+#endif  /* __ARCH_ARM_MACH_MSM_SPM_H */
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./subsystem_notif.h linux-4.4.115-fbx/include/soc/qcom/subsystem_notif.h
--- linux-4.4.115-fbx/include/soc/qcom./subsystem_notif.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/subsystem_notif.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,87 @@
+/* Copyright (c) 2011, 2013 - 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ *
+ * Subsystem restart notifier API header
+ *
+ */
+
+#ifndef _SUBSYS_NOTIFIER_H
+#define _SUBSYS_NOTIFIER_H
+
+#include <linux/notifier.h>
+
+enum subsys_notif_type {
+	SUBSYS_BEFORE_SHUTDOWN,
+	SUBSYS_AFTER_SHUTDOWN,
+	SUBSYS_BEFORE_POWERUP,
+	SUBSYS_AFTER_POWERUP,
+	SUBSYS_RAMDUMP_NOTIFICATION,
+	SUBSYS_POWERUP_FAILURE,
+	SUBSYS_PROXY_VOTE,
+	SUBSYS_PROXY_UNVOTE,
+	SUBSYS_SOC_RESET,
+	SUBSYS_NOTIF_TYPE_COUNT
+};
+
+#if defined(CONFIG_MSM_SUBSYSTEM_RESTART)
+/* Use the subsys_notif_register_notifier API to register for notifications for
+ * a particular subsystem. This API will return a handle that can be used to
+ * un-reg for notifications using the subsys_notif_unregister_notifier API by
+ * passing in that handle as an argument.
+ *
+ * On receiving a notification, the second (unsigned long) argument of the
+ * notifier callback will contain the notification type, and the third (void *)
+ * argument will contain the handle that was returned by
+ * subsys_notif_register_notifier.
+ */
+void *subsys_notif_register_notifier(
+			const char *subsys_name, struct notifier_block *nb);
+int subsys_notif_unregister_notifier(void *subsys_handle,
+				struct notifier_block *nb);
+
+/* Use the subsys_notif_init_subsys API to initialize the notifier chains form
+ * a particular subsystem. This API will return a handle that can be used to
+ * queue notifications using the subsys_notif_queue_notification API by passing
+ * in that handle as an argument.
+ */
+void *subsys_notif_add_subsys(const char *);
+int subsys_notif_queue_notification(void *subsys_handle,
+					enum subsys_notif_type notif_type,
+					void *data);
+#else
+
+static inline void *subsys_notif_register_notifier(
+			const char *subsys_name, struct notifier_block *nb)
+{
+	return NULL;
+}
+
+static inline int subsys_notif_unregister_notifier(void *subsys_handle,
+					struct notifier_block *nb)
+{
+	return 0;
+}
+
+static inline void *subsys_notif_add_subsys(const char *subsys_name)
+{
+	return NULL;
+}
+
+static inline int subsys_notif_queue_notification(void *subsys_handle,
+					enum subsys_notif_type notif_type,
+					void *data)
+{
+	return 0;
+}
+#endif /* CONFIG_MSM_SUBSYSTEM_RESTART */
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./subsystem_restart.h linux-4.4.115-fbx/include/soc/qcom/subsystem_restart.h
--- linux-4.4.115-fbx/include/soc/qcom./subsystem_restart.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/subsystem_restart.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,204 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SUBSYS_RESTART_H
+#define __SUBSYS_RESTART_H
+
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+
+struct subsys_device;
+extern struct bus_type subsys_bus_type;
+
+enum {
+	RESET_SOC = 0,
+	RESET_SUBSYS_COUPLED,
+	RESET_LEVEL_MAX
+};
+
+enum crash_status {
+	CRASH_STATUS_NO_CRASH = 0,
+	CRASH_STATUS_ERR_FATAL,
+	CRASH_STATUS_WDOG_BITE,
+};
+
+struct device;
+struct module;
+
+/**
+ * struct subsys_desc - subsystem descriptor
+ * @name: name of subsystem
+ * @fw_name: firmware name
+ * @depends_on: subsystem this subsystem depends on to operate
+ * @dev: parent device
+ * @owner: module the descriptor belongs to
+ * @shutdown: Stop a subsystem
+ * @powerup: Start a subsystem
+ * @crash_shutdown: Shutdown a subsystem when the system crashes (can't sleep)
+ * @ramdump: Collect a ramdump of the subsystem
+ * @free_memory: Free the memory associated with this subsystem
+ * @is_not_loadable: Indicate if subsystem firmware is not loadable via pil
+ * framework
+ * @no_auth: Set if subsystem does not rely on PIL to authenticate and bring
+ * it out of reset
+ * @ssctl_instance_id: Instance id used to connect with SSCTL service
+ * @sysmon_pid:	pdev id that sysmon is probed with for the subsystem
+ * @sysmon_shutdown_ret: Return value for the call to sysmon_send_shutdown
+ * @system_debug: If "set", triggers a device restart when the
+ * subsystem's wdog bite handler is invoked.
+ * @ignore_ssr_failure: SSR failures are usually fatal and results in panic. If
+ * set will ignore failure.
+ * @edge: GLINK logical name of the subsystem
+ */
+struct subsys_desc {
+	const char *name;
+	char fw_name[256];
+	const char *depends_on;
+	struct device *dev;
+	struct module *owner;
+
+	int (*shutdown)(const struct subsys_desc *desc, bool force_stop);
+	int (*powerup)(const struct subsys_desc *desc);
+	void (*crash_shutdown)(const struct subsys_desc *desc);
+	int (*ramdump)(int, const struct subsys_desc *desc);
+	void (*free_memory)(const struct subsys_desc *desc);
+	irqreturn_t (*err_fatal_handler) (int irq, void *dev_id);
+	irqreturn_t (*stop_ack_handler) (int irq, void *dev_id);
+	irqreturn_t (*wdog_bite_handler) (int irq, void *dev_id);
+	irqreturn_t (*generic_handler)(int irq, void *dev_id);
+	int is_not_loadable;
+	int err_fatal_gpio;
+	unsigned int err_fatal_irq;
+	unsigned int err_ready_irq;
+	unsigned int stop_ack_irq;
+	unsigned int wdog_bite_irq;
+	unsigned int generic_irq;
+	int force_stop_gpio;
+	int ramdump_disable_gpio;
+	int shutdown_ack_gpio;
+	int ramdump_disable;
+	bool no_auth;
+	bool pil_mss_memsetup;
+	int ssctl_instance_id;
+	u32 sysmon_pid;
+	int sysmon_shutdown_ret;
+	bool system_debug;
+	bool ignore_ssr_failure;
+	const char *edge;
+};
+
+/**
+ * struct notif_data - additional notif information
+ * @crashed: indicates if subsystem has crashed due to wdog bite or err fatal
+ * @enable_ramdump: ramdumps disabled if set to 0
+ * @enable_mini_ramdumps: enable flag for minimized critical-memory-only
+ * ramdumps
+ * @no_auth: set if subsystem does not use PIL to bring it out of reset
+ * @pdev: subsystem platform device pointer
+ */
+struct notif_data {
+	enum crash_status crashed;
+	int enable_ramdump;
+	int enable_mini_ramdumps;
+	bool no_auth;
+	struct platform_device *pdev;
+};
+
+#if defined(CONFIG_MSM_SUBSYSTEM_RESTART)
+
+extern int subsys_get_restart_level(struct subsys_device *dev);
+extern int subsystem_restart_dev(struct subsys_device *dev);
+extern int subsystem_restart(const char *name);
+extern int subsystem_crashed(const char *name);
+
+extern void *subsystem_get(const char *name);
+extern void *subsystem_get_with_fwname(const char *name, const char *fw_name);
+extern int subsystem_set_fwname(const char *name, const char *fw_name);
+extern void subsystem_put(void *subsystem);
+
+extern struct subsys_device *subsys_register(struct subsys_desc *desc);
+extern void subsys_unregister(struct subsys_device *dev);
+
+extern void subsys_default_online(struct subsys_device *dev);
+extern void subsys_set_crash_status(struct subsys_device *dev,
+					enum crash_status crashed);
+extern enum crash_status subsys_get_crash_status(struct subsys_device *dev);
+extern void subsys_set_error(struct subsys_device *dev, const char *error_msg);
+void notify_proxy_vote(struct device *device);
+void notify_proxy_unvote(struct device *device);
+void complete_err_ready(struct subsys_device *subsys);
+extern int wait_for_shutdown_ack(struct subsys_desc *desc);
+#else
+
+static inline int subsys_get_restart_level(struct subsys_device *dev)
+{
+	return 0;
+}
+
+static inline int subsystem_restart_dev(struct subsys_device *dev)
+{
+	return 0;
+}
+
+static inline int subsystem_restart(const char *name)
+{
+	return 0;
+}
+
+static inline int subsystem_crashed(const char *name)
+{
+	return 0;
+}
+
+static inline void *subsystem_get(const char *name)
+{
+	return NULL;
+}
+
+static inline void *subsystem_get_with_fwname(const char *name,
+				const char *fw_name) {
+	return NULL;
+}
+
+static inline int subsystem_set_fwname(const char *name,
+				const char *fw_name) {
+	return 0;
+}
+
+static inline void subsystem_put(void *subsystem) { }
+
+static inline
+struct subsys_device *subsys_register(struct subsys_desc *desc)
+{
+	return NULL;
+}
+
+static inline void subsys_unregister(struct subsys_device *dev) { }
+
+static inline void subsys_default_online(struct subsys_device *dev) { }
+static inline void subsys_set_crash_status(struct subsys_device *dev,
+						enum crash_status crashed) { }
+static inline
+enum crash_status subsys_get_crash_status(struct subsys_device *dev)
+{
+	return false;
+}
+static inline void notify_proxy_vote(struct device *device) { }
+static inline void notify_proxy_unvote(struct device *device) { }
+static inline int wait_for_shutdown_ack(struct subsys_desc *desc)
+{
+	return -ENOSYS;
+}
+#endif /* CONFIG_MSM_SUBSYSTEM_RESTART */
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./sysmon.h linux-4.4.115-fbx/include/soc/qcom/sysmon.h
--- linux-4.4.115-fbx/include/soc/qcom./sysmon.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/sysmon.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MSM_SYSMON_H
+#define __MSM_SYSMON_H
+
+#include <soc/qcom/smd.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/subsystem_restart.h>
+
+/**
+ * enum subsys_id - Destination subsystems for events.
+ */
+enum subsys_id {
+	/* SMD subsystems */
+	SYSMON_SS_MODEM     = SMD_APPS_MODEM,
+	SYSMON_SS_LPASS     = SMD_APPS_QDSP,
+	SYSMON_SS_WCNSS     = SMD_APPS_WCNSS,
+	SYSMON_SS_DSPS      = SMD_APPS_DSPS,
+	SYSMON_SS_Q6FW      = SMD_APPS_Q6FW,
+
+	/* Non-SMD subsystems */
+	SYSMON_SS_EXT_MODEM = SMD_NUM_TYPE,
+	SYSMON_NUM_SS
+};
+
+/**
+ * enum ssctl_ssr_event_enum_type - Subsystem notification type.
+ */
+enum ssctl_ssr_event_enum_type {
+	SSCTL_SSR_EVENT_ENUM_TYPE_MIN_ENUM_VAL = -2147483647,
+	SSCTL_SSR_EVENT_BEFORE_POWERUP = 0,
+	SSCTL_SSR_EVENT_AFTER_POWERUP = 1,
+	SSCTL_SSR_EVENT_BEFORE_SHUTDOWN = 2,
+	SSCTL_SSR_EVENT_AFTER_SHUTDOWN = 3,
+	SSCTL_SSR_EVENT_ENUM_TYPE_MAX_ENUM_VAL = 2147483647
+};
+
+/**
+ * enum ssctl_ssr_event_driven_enum_type - Subsystem shutdown type.
+ */
+enum ssctl_ssr_event_driven_enum_type {
+	SSCTL_SSR_EVENT_DRIVEN_ENUM_TYPE_MIN_ENUM_VAL = -2147483647,
+	SSCTL_SSR_EVENT_FORCED = 0,
+	SSCTL_SSR_EVENT_GRACEFUL = 1,
+	SSCTL_SSR_EVENT_DRIVEN_ENUM_TYPE_MAX_ENUM_VAL = 2147483647
+};
+
+#if defined(CONFIG_MSM_SYSMON_COMM) || defined(CONFIG_MSM_SYSMON_GLINK_COMM)
+extern int sysmon_send_event(struct subsys_desc *dest_desc,
+			struct subsys_desc *event_desc,
+			enum subsys_notif_type notif);
+extern int sysmon_send_event_no_qmi(struct subsys_desc *dest_desc,
+				struct subsys_desc *event_desc,
+				enum subsys_notif_type notif);
+extern int sysmon_get_reason(struct subsys_desc *dest_desc, char *buf,
+				size_t len);
+extern int sysmon_get_reason_no_qmi(struct subsys_desc *dest_desc,
+				char *buf, size_t len);
+extern int sysmon_send_shutdown(struct subsys_desc *dest_desc);
+extern int sysmon_send_shutdown_no_qmi(struct subsys_desc *dest_desc);
+extern int sysmon_notifier_register(struct subsys_desc *desc);
+extern void sysmon_notifier_unregister(struct subsys_desc *desc);
+#else
+static inline int sysmon_send_event(struct subsys_desc *dest_desc,
+					struct subsys_desc *event_desc,
+					enum subsys_notif_type notif)
+{
+	return 0;
+}
+static inline int sysmon_send_event_no_qmi(struct subsys_desc *dest_desc,
+						struct subsys_desc *event_desc,
+						enum subsys_notif_type notif)
+{
+	return 0;
+}
+static inline int sysmon_get_reason(struct subsys_desc *dest_desc,
+					char *buf, size_t len)
+{
+	return 0;
+}
+static inline int sysmon_get_reason_no_qmi(struct subsys_desc *dest_desc,
+						char *buf, size_t len)
+{
+	return 0;
+}
+static inline int sysmon_send_shutdown(struct subsys_desc *dest_desc)
+{
+	return 0;
+}
+static inline int sysmon_send_shutdown_no_qmi(struct subsys_desc *dest_desc)
+{
+	return 0;
+}
+static inline int sysmon_notifier_register(struct subsys_desc *desc)
+{
+	return 0;
+}
+static inline void sysmon_notifier_unregister(struct subsys_desc *desc)
+{
+}
+#endif
+
+#if defined(CONFIG_MSM_SYSMON_GLINK_COMM)
+extern int sysmon_glink_register(struct subsys_desc *desc);
+extern void sysmon_glink_unregister(struct subsys_desc *desc);
+#else
+static inline int sysmon_glink_register(struct subsys_desc *desc)
+{
+	return 0;
+}
+static inline void sysmon_glink_unregister(struct subsys_desc *desc)
+{
+}
+#endif
+#endif
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./tracer_pkt.h linux-4.4.115-fbx/include/soc/qcom/tracer_pkt.h
--- linux-4.4.115-fbx/include/soc/qcom./tracer_pkt.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/tracer_pkt.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,130 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _TRACER_PKT_H_
+#define _TRACER_PKT_H_
+
+#include <linux/err.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_TRACER_PKT
+
+/**
+ * tracer_pkt_init() - initialize the tracer packet
+ * @data:		Pointer to the buffer to be initialized with a tracer
+ *			packet.
+ * @data_len:		Length of the buffer.
+ * @client_event_cfg:	Client-specific event configuration mask.
+ * @glink_event_cfg:	G-Link-specific event configuration mask.
+ * @pkt_priv:		Private/Cookie information to be added to the tracer
+ *			packet.
+ * @pkt_priv_len:	Length of the private data.
+ *
+ * This function is used to initialize a buffer with the tracer packet header.
+ * The tracer packet header includes the data as passed by the elements in the
+ * parameters.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int tracer_pkt_init(void *data, size_t data_len,
+		    uint16_t client_event_cfg, uint32_t glink_event_cfg,
+		    void *pkt_priv, size_t pkt_priv_len);
+
+/**
+ * tracer_pkt_set_event_cfg() - set the event configuration mask in the tracer
+ *				packet
+ * @data:		Pointer to the buffer to be initialized with event
+ *			configuration mask.
+ * @client_event_cfg:	Client-specific event configuration mask.
+ * @glink_event_cfg:	G-Link-specific event configuration mask.
+ *
+ * This function is used to initialize a buffer with the event configuration
+ * mask as passed by the elements in the parameters.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int tracer_pkt_set_event_cfg(void *data, uint16_t client_event_cfg,
+			     uint32_t glink_event_cfg);
+
+/**
+ * tracer_pkt_log_event() - log an event specific to the tracer packet
+ * @data:	Pointer to the buffer containing tracer packet.
+ * @event_id:	Event ID to be logged.
+ *
+ * This function is used to log an event specific to the tracer packet.
+ * The event is logged either into the tracer packet itself or a different
+ * tracing mechanism as configured.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int tracer_pkt_log_event(void *data, uint32_t event_id);
+
+/**
+ * tracer_pkt_calc_hex_dump_size() - calculate the hex dump size of a tracer
+ *				     packet
+ * @data:	Pointer to the buffer containing tracer packet.
+ * @data_len:	Length of the tracer packet buffer.
+ *
+ * This function is used to calculate the length of the buffer required to
+ * hold the hex dump of the tracer packet.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+size_t tracer_pkt_calc_hex_dump_size(void *data, size_t data_len);
+
+/**
+ * tracer_pkt_hex_dump() - hex dump the tracer packet into a buffer
+ * @buf:	Buffer to contain the hex dump of the tracer packet.
+ * @buf_len:	Length of the hex dump buffer.
+ * @data:	Buffer containing the tracer packet.
+ * @data_len:	Length of the buffer containing the tracer packet.
+ *
+ * This function is used to dump the contents of the tracer packet into
+ * a buffer in a specific hexadecimal format. The hex dump buffer can then
+ * be dumped through debugfs.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int tracer_pkt_hex_dump(void *buf, size_t buf_len, void *data, size_t data_len);
+
+#else
+
+static inline int tracer_pkt_init(void *data, size_t data_len,
+		    uint16_t client_event_cfg, uint32_t glink_event_cfg,
+		    void *pkt_priv, size_t pkt_priv_len)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int tracer_pkt_set_event_cfg(uint16_t client_event_cfg,
+					   uint32_t glink_event_cfg)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int tracer_pkt_log_event(void *data, uint32_t event_id)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline size_t tracer_pkt_calc_hex_dump_size(void *data, size_t data_len)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int tracer_pkt_hex_dump(void *buf, size_t buf_len, void *data,
+				      size_t data_len)
+{
+	return -EOPNOTSUPP;
+}
+
+#endif /* CONFIG_TRACER_PKT */
+#endif /* _TRACER_PKT_H_ */
diff -Nruw linux-4.4.115-fbx/include/soc/qcom./watchdog.h linux-4.4.115-fbx/include/soc/qcom/watchdog.h
--- linux-4.4.115-fbx/include/soc/qcom./watchdog.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/soc/qcom/watchdog.h	2019-01-22 16:16:28.495291612 +0100
@@ -0,0 +1,29 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ASM_ARCH_MSM_WATCHDOG_H_
+#define _ASM_ARCH_MSM_WATCHDOG_H_
+
+#ifdef CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC
+#define WDOG_BITE_ON_PANIC 1
+#else
+#define WDOG_BITE_ON_PANIC 0
+#endif
+
+#ifdef CONFIG_QCOM_WATCHDOG_V2
+void msm_trigger_wdog_bite(void);
+#else
+static inline void msm_trigger_wdog_bite(void) { }
+#endif
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/sound/adsp_err.h	2019-01-22 16:16:28.499291648 +0100
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ADSP_ERR__
+#define __ADSP_ERR__
+
+int adsp_err_get_lnx_err_code(u32 adsp_error);
+
+char *adsp_err_get_err_str(u32 adsp_error);
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/sound/apr_audio.h	2019-01-22 16:16:28.499291648 +0100
@@ -0,0 +1,1929 @@
+/*
+ *
+ * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _APR_AUDIO_H_
+#define _APR_AUDIO_H_
+
+/* ASM opcodes without APR payloads*/
+#include <linux/qdsp6v2/apr.h>
+
+/*
+ * Audio Front End (AFE)
+ */
+
+/* Port ID. Update afe_get_port_index when a new port is added here. */
+#define PRIMARY_I2S_RX 0		/* index = 0 */
+#define PRIMARY_I2S_TX 1		/* index = 1 */
+#define PCM_RX 2			/* index = 2 */
+#define PCM_TX 3			/* index = 3 */
+#define SECONDARY_I2S_RX 4		/* index = 4 */
+#define SECONDARY_I2S_TX 5		/* index = 5 */
+#define MI2S_RX 6			/* index = 6 */
+#define MI2S_TX 7			/* index = 7 */
+#define HDMI_RX 8			/* index = 8 */
+#define RSVD_2 9			/* index = 9 */
+#define RSVD_3 10			/* index = 10 */
+#define DIGI_MIC_TX 11			/* index = 11 */
+#define VOICE_RECORD_RX 0x8003		/* index = 12 */
+#define VOICE_RECORD_TX 0x8004		/* index = 13 */
+#define VOICE_PLAYBACK_TX 0x8005	/* index = 14 */
+
+/* Slimbus Multi channel port id pool  */
+#define SLIMBUS_0_RX		0x4000		/* index = 15 */
+#define SLIMBUS_0_TX		0x4001		/* index = 16 */
+#define SLIMBUS_1_RX		0x4002		/* index = 17 */
+#define SLIMBUS_1_TX		0x4003		/* index = 18 */
+#define SLIMBUS_2_RX		0x4004
+#define SLIMBUS_2_TX		0x4005
+#define SLIMBUS_3_RX		0x4006
+#define SLIMBUS_3_TX		0x4007
+#define SLIMBUS_4_RX		0x4008
+#define SLIMBUS_4_TX		0x4009		/* index = 24 */
+
+#define INT_BT_SCO_RX 0x3000		/* index = 25 */
+#define INT_BT_SCO_TX 0x3001		/* index = 26 */
+#define INT_BT_A2DP_RX 0x3002		/* index = 27 */
+#define INT_FM_RX 0x3004		/* index = 28 */
+#define INT_FM_TX 0x3005		/* index = 29 */
+#define RT_PROXY_PORT_001_RX	0x2000    /* index = 30 */
+#define RT_PROXY_PORT_001_TX	0x2001    /* index = 31 */
+#define SECONDARY_PCM_RX 12			/* index = 32 */
+#define SECONDARY_PCM_TX 13			/* index = 33 */
+#define PSEUDOPORT_01           0x8001    /* index =34 */
+
+#define AFE_PORT_INVALID 0xFFFF
+#define SLIMBUS_EXTPROC_RX AFE_PORT_INVALID
+
+#define AFE_PORT_CMD_START 0x000100ca
+
+#define AFE_EVENT_RTPORT_START 0
+#define AFE_EVENT_RTPORT_STOP 1
+#define AFE_EVENT_RTPORT_LOW_WM 2
+#define AFE_EVENT_RTPORT_HI_WM 3
+
+struct afe_port_start_command {
+	struct apr_hdr hdr;
+	u16 port_id;
+	u16 gain;		/* Q13 */
+	u32 sample_rate;	/* 8 , 16, 48khz */
+} __attribute__ ((packed));
+
+#define AFE_PORT_CMD_STOP 0x000100cb
+struct afe_port_stop_command {
+	struct apr_hdr hdr;
+	u16 port_id;
+	u16 reserved;
+} __attribute__ ((packed));
+
+#define AFE_PORT_CMD_APPLY_GAIN 0x000100cc
+struct afe_port_gain_command {
+	struct apr_hdr hdr;
+	u16 port_id;
+	u16	gain;/* Q13 */
+} __attribute__ ((packed));
+
+#define AFE_PORT_CMD_SIDETONE_CTL 0x000100cd
+struct afe_port_sidetone_command {
+	struct apr_hdr hdr;
+	u16 rx_port_id;		/* Primary i2s tx = 1 */
+				/* PCM tx = 3 */
+				/* Secondary i2s tx = 5 */
+				/* Mi2s tx = 7 */
+				/* Digital mic tx = 11 */
+	u16 tx_port_id;		/* Primary i2s rx = 0 */
+				/* PCM rx = 2 */
+				/* Secondary i2s rx = 4 */
+				/* Mi2S rx = 6 */
+				/* HDMI rx = 8 */
+	u16 gain;		/* Q13 */
+	u16 enable;		/* 1 = enable, 0 = disable */
+} __attribute__ ((packed));
+
+#define AFE_PORT_CMD_LOOPBACK 0x000100ce
+struct afe_loopback_command {
+	struct apr_hdr hdr;
+	u16 tx_port_id;		/* Primary i2s rx = 0 */
+				/* PCM rx = 2 */
+				/* Secondary i2s rx = 4 */
+				/* Mi2S rx = 6 */
+				/* HDMI rx = 8 */
+	u16 rx_port_id;		/* Primary i2s tx = 1 */
+				/* PCM tx = 3 */
+				/* Secondary i2s tx = 5 */
+				/* Mi2s tx = 7 */
+				/* Digital mic tx = 11 */
+	u16 mode;		/* Default -1, DSP will conver
+					the tx to rx format */
+	u16 enable;		/* 1 = enable, 0 = disable */
+} __attribute__ ((packed));
+
+#define AFE_PSEUDOPORT_CMD_START 0x000100cf
+struct afe_pseudoport_start_command {
+	struct apr_hdr hdr;
+	u16 port_id;		/* Pseudo Port 1 = 0x8000 */
+				/* Pseudo Port 2 = 0x8001 */
+				/* Pseudo Port 3 = 0x8002 */
+	u16 timing;		/* FTRT = 0 , AVTimer = 1, */
+} __attribute__ ((packed));
+
+#define AFE_PSEUDOPORT_CMD_STOP 0x000100d0
+struct afe_pseudoport_stop_command {
+	struct apr_hdr hdr;
+	u16 port_id;		/* Pseudo Port 1 = 0x8000 */
+				/* Pseudo Port 2 = 0x8001 */
+				/* Pseudo Port 3 = 0x8002 */
+	u16 reserved;
+} __attribute__ ((packed));
+
+#define AFE_CMD_GET_ACTIVE_PORTS 0x000100d1
+
+
+#define AFE_CMD_GET_ACTIVE_HANDLES_FOR_PORT 0x000100d2
+struct afe_get_active_handles_command {
+	struct apr_hdr hdr;
+	u16 port_id;
+	u16 reserved;
+} __attribute__ ((packed));
+
+/*
+ * Opcode for AFE to start DTMF.
+ */
+#define AFE_PORTS_CMD_DTMF_CTL	0x00010102
+
+/** DTMF payload.*/
+struct afe_dtmf_generation_command {
+	struct apr_hdr hdr;
+
+	/*
+	 * Duration of the DTMF tone in ms.
+	 * -1      -> continuous,
+	 *  0      -> disable
+	 */
+	int64_t                   duration_in_ms;
+
+	/*
+	 * The DTMF high tone frequency.
+	 */
+	uint16_t                  high_freq;
+
+	/*
+	 * The DTMF low tone frequency.
+	 */
+	uint16_t                  low_freq;
+
+	/*
+	 * The DTMF volume setting
+	 */
+	uint16_t                  gain;
+
+	/*
+	 * The number of ports to enable/disable on.
+	 */
+	uint16_t                  num_ports;
+
+	/*
+	 * The Destination ports - array  .
+	 * For DTMF on multiple ports, portIds needs to
+	 * be populated numPorts times.
+	 */
+	uint16_t                  port_ids;
+
+	/*
+	 * variable for 32 bit alignment of APR packet.
+	 */
+	uint16_t                  reserved;
+} __packed;
+
+#define AFE_PCM_CFG_MODE_PCM			0x0
+#define AFE_PCM_CFG_MODE_AUX			0x1
+#define AFE_PCM_CFG_SYNC_EXT			0x0
+#define AFE_PCM_CFG_SYNC_INT			0x1
+#define AFE_PCM_CFG_FRM_8BPF			0x0
+#define AFE_PCM_CFG_FRM_16BPF			0x1
+#define AFE_PCM_CFG_FRM_32BPF			0x2
+#define AFE_PCM_CFG_FRM_64BPF			0x3
+#define AFE_PCM_CFG_FRM_128BPF			0x4
+#define AFE_PCM_CFG_FRM_256BPF			0x5
+#define AFE_PCM_CFG_QUANT_ALAW_NOPAD		0x0
+#define AFE_PCM_CFG_QUANT_MULAW_NOPAD		0x1
+#define AFE_PCM_CFG_QUANT_LINEAR_NOPAD		0x2
+#define AFE_PCM_CFG_QUANT_ALAW_PAD		0x3
+#define AFE_PCM_CFG_QUANT_MULAW_PAD		0x4
+#define AFE_PCM_CFG_QUANT_LINEAR_PAD		0x5
+#define AFE_PCM_CFG_CDATAOE_MASTER		0x0
+#define AFE_PCM_CFG_CDATAOE_SHARE		0x1
+
+struct afe_port_pcm_cfg {
+	u16	mode;	/* PCM (short sync) = 0, AUXPCM (long sync) = 1 */
+	u16	sync;	/* external = 0 , internal = 1 */
+	u16	frame;	/* 8 bpf = 0 */
+			/* 16 bpf = 1 */
+			/* 32 bpf = 2 */
+			/* 64 bpf = 3 */
+			/* 128 bpf = 4 */
+			/* 256 bpf = 5 */
+	u16     quant;
+	u16	slot;	/* Slot for PCM stream , 0 - 31 */
+	u16	data;	/* 0, PCM block is the only master */
+			/* 1, PCM block is shares to driver data out signal */
+			/*    other master                                  */
+	u16	reserved;
+} __attribute__ ((packed));
+
+enum {
+	AFE_I2S_SD0 = 1,
+	AFE_I2S_SD1,
+	AFE_I2S_SD2,
+	AFE_I2S_SD3,
+	AFE_I2S_QUAD01,
+	AFE_I2S_QUAD23,
+	AFE_I2S_6CHS,
+	AFE_I2S_8CHS,
+};
+
+#define AFE_MI2S_MONO 0
+#define AFE_MI2S_STEREO 3
+#define AFE_MI2S_4CHANNELS 4
+#define AFE_MI2S_6CHANNELS 6
+#define AFE_MI2S_8CHANNELS 8
+
+struct afe_port_mi2s_cfg {
+	u16	bitwidth;	/* 16,24,32 */
+	u16	line;		/* Called ChannelMode in documentation */
+				/* i2s_sd0 = 1 */
+				/* i2s_sd1 = 2 */
+				/* i2s_sd2 = 3 */
+				/* i2s_sd3 = 4 */
+				/* i2s_quad01 = 5 */
+				/* i2s_quad23 = 6 */
+				/* i2s_6chs = 7 */
+				/* i2s_8chs = 8 */
+	u16	channel;	/* Called MonoStereo in documentation */
+				/* i2s mono = 0 */
+				/* i2s mono right = 1 */
+				/* i2s mono left = 2 */
+				/* i2s stereo = 3 */
+	u16	ws;		/* 0, word select signal from external source */
+				/* 1, word select signal from internal source */
+	u16	format;	/* don't touch this field if it is not for */
+				/* AFE_PORT_CMD_I2S_CONFIG opcode */
+} __attribute__ ((packed));
+
+struct afe_port_hdmi_cfg {
+	u16	bitwidth;	/* 16,24,32 */
+	u16	channel_mode;	/* HDMI Stereo = 0 */
+				/* HDMI_3Point1 (4-ch) = 1 */
+				/* HDMI_5Point1 (6-ch) = 2 */
+				/* HDMI_6Point1 (8-ch) = 3 */
+	u16	data_type;	/* HDMI_Linear = 0 */
+				/* HDMI_non_Linear = 1 */
+} __attribute__ ((packed));
+
+
+struct afe_port_hdmi_multi_ch_cfg {
+	u16	data_type;		/* HDMI_Linear = 0 */
+					/* HDMI_non_Linear = 1 */
+	u16	channel_allocation;	/* The default is 0 (Stereo) */
+	u16	reserved;		/* must be set to 0 */
+} __packed;
+
+
+/* Slimbus Device Ids */
+#define AFE_SLIMBUS_DEVICE_1		0x0
+#define AFE_SLIMBUS_DEVICE_2		0x1
+#define AFE_PORT_MAX_AUDIO_CHAN_CNT	16
+
+struct afe_port_slimbus_cfg {
+	u16	slimbus_dev_id;		/* SLIMBUS Device id.*/
+
+	u16	slave_dev_pgd_la;	/* Slave ported generic device
+					* logical address.
+					*/
+	u16	slave_dev_intfdev_la;	/* Slave interface device logical
+					* address.
+					*/
+	u16	bit_width;		/**  bit width of the samples, 16, 24.*/
+
+	u16	data_format;		/** data format.*/
+
+	u16	num_channels;		/** Number of channels.*/
+
+	/** Slave port mapping for respective channels.*/
+	u16	slave_port_mapping[AFE_PORT_MAX_AUDIO_CHAN_CNT];
+
+	u16	reserved;
+} __packed;
+
+struct afe_port_slimbus_sch_cfg {
+	u16	slimbus_dev_id;		/* SLIMBUS Device id.*/
+	u16	bit_width;		/**  bit width of the samples, 16, 24.*/
+	u16	data_format;		/** data format.*/
+	u16	num_channels;		/** Number of channels.*/
+	u16	reserved;
+	/** Slave channel  mapping for respective channels.*/
+	u8	slave_ch_mapping[8];
+} __packed;
+
+struct afe_port_rtproxy_cfg {
+	u16	bitwidth;	/* 16,24,32 */
+	u16	interleaved;    /* interleaved = 1 */
+				/* Noninterleaved = 0 */
+	u16	frame_sz;	/* 5ms buffers = 160bytes */
+	u16	jitter;		/* 10ms of jitter = 320 */
+	u16	lw_mark;	/* Low watermark in bytes for triggering event*/
+	u16	hw_mark;	/* High watermark bytes for triggering event*/
+	u16	rsvd;
+	int	num_ch;		/* 1 to 8 */
+} __packed;
+
+struct afe_port_pseudo_cfg {
+	u16 bit_width;
+	u16 num_channels;
+	u16 data_format;
+	u16 timing_mode;
+	u16 reserved;
+} __packed;
+
+#define AFE_PORT_AUDIO_IF_CONFIG 0x000100d3
+#define AFE_PORT_AUDIO_SLIM_SCH_CONFIG 0x000100e4
+#define AFE_PORT_MULTI_CHAN_HDMI_AUDIO_IF_CONFIG	0x000100D9
+#define AFE_PORT_CMD_I2S_CONFIG	0x000100E7
+
+union afe_port_config {
+	struct afe_port_pcm_cfg           pcm;
+	struct afe_port_mi2s_cfg          mi2s;
+	struct afe_port_hdmi_cfg          hdmi;
+	struct afe_port_hdmi_multi_ch_cfg hdmi_multi_ch;
+	struct afe_port_slimbus_cfg	  slimbus;
+	struct afe_port_slimbus_sch_cfg	  slim_sch;
+	struct afe_port_rtproxy_cfg       rtproxy;
+	struct afe_port_pseudo_cfg        pseudo;
+} __attribute__((packed));
+
+struct afe_audioif_config_command {
+	struct apr_hdr hdr;
+	u16 port_id;
+	union afe_port_config port;
+} __attribute__ ((packed));
+
+#define AFE_TEST_CODEC_LOOPBACK_CTL 0x000100d5
+struct afe_codec_loopback_command {
+	u16	port_inf;	/* Primary i2s = 0 */
+				/* PCM = 2 */
+				/* Secondary i2s = 4 */
+				/* Mi2s = 6 */
+	u16	enable;		/* 0, disable. 1, enable */
+} __attribute__ ((packed));
+
+
+#define AFE_PARAM_ID_SIDETONE_GAIN	0x00010300
+struct afe_param_sidetone_gain {
+	u16 gain;
+	u16 reserved;
+} __attribute__ ((packed));
+
+#define AFE_PARAM_ID_SAMPLING_RATE	0x00010301
+struct afe_param_sampling_rate {
+	u32 sampling_rate;
+} __attribute__ ((packed));
+
+
+#define AFE_PARAM_ID_CHANNELS		0x00010302
+struct afe_param_channels {
+	u16 channels;
+	u16 reserved;
+} __attribute__ ((packed));
+
+
+#define AFE_PARAM_ID_LOOPBACK_GAIN	0x00010303
+struct afe_param_loopback_gain {
+	u16 gain;
+	u16 reserved;
+} __attribute__ ((packed));
+
+/* Parameter ID used to configure and enable/disable the loopback path. The
+ * difference with respect to the existing API, AFE_PORT_CMD_LOOPBACK, is that
+ * it allows Rx port to be configured as source port in loopback path. Port-id
+ * in AFE_PORT_CMD_SET_PARAM cmd is the source port whcih can be Tx or Rx port.
+ * In addition, we can configure the type of routing mode to handle different
+ * use cases.
+*/
+enum {
+	/* Regular loopback from source to destination port */
+	LB_MODE_DEFAULT = 1,
+	/* Sidetone feed from Tx source to Rx destination port */
+	LB_MODE_SIDETONE,
+	/* Echo canceller reference, voice + audio + DTMF */
+	LB_MODE_EC_REF_VOICE_AUDIO,
+	/* Echo canceller reference, voice alone */
+	LB_MODE_EC_REF_VOICE
+};
+
+#define AFE_PARAM_ID_LOOPBACK_CONFIG 0x0001020B
+#define AFE_API_VERSION_LOOPBACK_CONFIG 0x1
+struct afe_param_loopback_cfg {
+	/* Minor version used for tracking the version of the configuration
+	 * interface.
+	 */
+	uint32_t loopback_cfg_minor_version;
+
+	/* Destination Port Id. */
+	uint16_t dst_port_id;
+
+	/* Specifies data path type from src to dest port. Supported values:
+	 * LB_MODE_DEFAULT
+	 * LB_MODE_SIDETONE
+	 * LB_MODE_EC_REF_VOICE_AUDIO
+	 * LB_MODE_EC_REF_VOICE
+	 */
+	uint16_t routing_mode;
+
+	/* Specifies whether to enable (1) or disable (0) an AFE loopback. */
+	uint16_t enable;
+
+	/* Reserved for 32-bit alignment. This field must be set to 0. */
+	uint16_t reserved;
+} __packed;
+
+#define AFE_MODULE_ID_PORT_INFO		0x00010200
+/* Module ID for the loopback-related parameters. */
+#define AFE_MODULE_LOOPBACK           0x00010205
+struct afe_param_payload_base {
+	u32 module_id;
+	u32 param_id;
+	u16 param_size;
+	u16 reserved;
+} __packed;
+
+struct afe_param_payload {
+	struct afe_param_payload_base base;
+	union {
+		struct afe_param_sidetone_gain sidetone_gain;
+		struct afe_param_sampling_rate sampling_rate;
+		struct afe_param_channels      channels;
+		struct afe_param_loopback_gain loopback_gain;
+		struct afe_param_loopback_cfg loopback_cfg;
+	} __attribute__((packed)) param;
+} __attribute__ ((packed));
+
+#define AFE_PORT_CMD_SET_PARAM		0x000100dc
+
+struct afe_port_cmd_set_param {
+	struct apr_hdr hdr;
+	u16 port_id;
+	u16 payload_size;
+	u32 payload_address;
+	struct afe_param_payload payload;
+} __attribute__ ((packed));
+
+struct afe_port_cmd_set_param_no_payload {
+	struct apr_hdr hdr;
+	u16 port_id;
+	u16 payload_size;
+	u32 payload_address;
+} __packed;
+
+#define AFE_EVENT_GET_ACTIVE_PORTS 0x00010100
+struct afe_get_active_ports_rsp {
+	u16	num_ports;
+	u16	port_id;
+} __attribute__ ((packed));
+
+
+#define AFE_EVENT_GET_ACTIVE_HANDLES 0x00010102
+struct afe_get_active_handles_rsp {
+	u16	port_id;
+	u16	num_handles;
+	u16	mode;		/* 0, voice rx */
+				/* 1, voice tx */
+				/* 2, audio rx */
+				/* 3, audio tx */
+	u16	handle;
+} __attribute__ ((packed));
+
+#define AFE_SERVICE_CMD_MEMORY_MAP 0x000100DE
+struct afe_cmd_memory_map {
+	struct apr_hdr hdr;
+	u32 phy_addr;
+	u32 mem_sz;
+	u16 mem_id;
+	u16 rsvd;
+} __packed;
+
+#define AFE_SERVICE_CMD_MEMORY_UNMAP 0x000100DF
+struct afe_cmd_memory_unmap {
+	struct apr_hdr hdr;
+	u32 phy_addr;
+} __packed;
+
+#define AFE_SERVICE_CMD_REG_RTPORT 0x000100E0
+struct afe_cmd_reg_rtport {
+	struct apr_hdr hdr;
+	u16 port_id;
+	u16 rsvd;
+} __packed;
+
+#define AFE_SERVICE_CMD_UNREG_RTPORT 0x000100E1
+struct afe_cmd_unreg_rtport {
+	struct apr_hdr hdr;
+	u16 port_id;
+	u16 rsvd;
+} __packed;
+
+#define AFE_SERVICE_CMD_RTPORT_WR 0x000100E2
+struct afe_cmd_rtport_wr {
+	struct apr_hdr hdr;
+	u16 port_id;
+	u16 rsvd;
+	u32 buf_addr;
+	u32 bytes_avail;
+} __packed;
+
+#define AFE_SERVICE_CMD_RTPORT_RD 0x000100E3
+struct afe_cmd_rtport_rd {
+	struct apr_hdr hdr;
+	u16 port_id;
+	u16 rsvd;
+	u32 buf_addr;
+	u32 bytes_avail;
+} __packed;
+
+#define AFE_EVENT_RT_PROXY_PORT_STATUS 0x00010105
+
+#define ADM_MAX_COPPS 5
+
+#define ADM_SERVICE_CMD_GET_COPP_HANDLES                 0x00010300
+struct adm_get_copp_handles_command {
+	struct apr_hdr hdr;
+} __attribute__ ((packed));
+
+#define ADM_CMD_MATRIX_MAP_ROUTINGS                      0x00010301
+struct adm_routings_session {
+	u16 id;
+	u16 num_copps;
+	u16 copp_id[ADM_MAX_COPPS+1]; /*Padding if numCopps is odd */
+} __packed;
+
+struct adm_routings_command {
+	struct apr_hdr hdr;
+	u32 path; /* 0 = Rx, 1 Tx */
+	u32 num_sessions;
+	struct adm_routings_session session[8];
+} __attribute__ ((packed));
+
+
+#define ADM_CMD_MATRIX_RAMP_GAINS                        0x00010302
+struct adm_ramp_gain {
+	struct apr_hdr hdr;
+	u16 session_id;
+	u16 copp_id;
+	u16 initial_gain;
+	u16 gain_increment;
+	u16 ramp_duration;
+	u16 reserved;
+} __attribute__ ((packed));
+
+struct adm_ramp_gains_command {
+	struct apr_hdr hdr;
+	u32 id;
+	u32 num_gains;
+	struct adm_ramp_gain gains[ADM_MAX_COPPS];
+} __attribute__ ((packed));
+
+
+#define ADM_CMD_COPP_OPEN                                0x00010304
+struct adm_copp_open_command {
+	struct apr_hdr hdr;
+	u16 flags;
+	u16 mode; /* 1-RX, 2-Live TX, 3-Non Live TX */
+	u16 endpoint_id1;
+	u16 endpoint_id2;
+	u32 topology_id;
+	u16 channel_config;
+	u16 reserved;
+	u32 rate;
+} __attribute__ ((packed));
+
+#define ADM_CMD_COPP_CLOSE                               0x00010305
+
+#define ADM_CMD_MULTI_CHANNEL_COPP_OPEN                  0x00010310
+#define ADM_CMD_MULTI_CHANNEL_COPP_OPEN_V3               0x00010333
+struct adm_multi_ch_copp_open_command {
+	struct apr_hdr hdr;
+	u16 flags;
+	u16 mode; /* 1-RX, 2-Live TX, 3-Non Live TX */
+	u16 endpoint_id1;
+	u16 endpoint_id2;
+	u32 topology_id;
+	u16 channel_config;
+	u16 reserved;
+	u32 rate;
+	u8 dev_channel_mapping[8];
+} __packed;
+
+struct adm_multi_channel_copp_open_v3 {
+	struct apr_hdr hdr;
+	u16 flags;
+	u16 mode;
+	u16 endpoint_id1;
+	u16 endpoint_id2;
+	u32 topology_id;
+	u16 channel_config;
+	u16 bit_width;
+	u32 rate;
+	u8  dev_channel_mapping[8];
+};
+#define ADM_CMD_MEMORY_MAP				0x00010C30
+struct adm_cmd_memory_map{
+	struct apr_hdr	hdr;
+	u32		buf_add;
+	u32		buf_size;
+	u16		mempool_id;
+	u16		reserved;
+} __attribute__((packed));
+
+#define ADM_CMD_MEMORY_UNMAP				0x00010C31
+struct adm_cmd_memory_unmap{
+	struct apr_hdr	hdr;
+	u32		buf_add;
+} __attribute__((packed));
+
+#define ADM_CMD_MEMORY_MAP_REGIONS			0x00010C47
+struct adm_memory_map_regions{
+	u32		phys;
+	u32		buf_size;
+} __attribute__((packed));
+
+struct adm_cmd_memory_map_regions{
+	struct apr_hdr	hdr;
+	u16		mempool_id;
+	u16		nregions;
+} __attribute__((packed));
+
+#define ADM_CMD_MEMORY_UNMAP_REGIONS			0x00010C48
+struct adm_memory_unmap_regions{
+	u32		phys;
+} __attribute__((packed));
+
+struct adm_cmd_memory_unmap_regions{
+	struct apr_hdr	hdr;
+	u16		nregions;
+	u16		reserved;
+} __attribute__((packed));
+
+#define DEFAULT_COPP_TOPOLOGY				0x00010be3
+#define DEFAULT_POPP_TOPOLOGY				0x00010be4
+#define VPM_TX_SM_ECNS_COPP_TOPOLOGY			0x00010F71
+#define VPM_TX_DM_FLUENCE_COPP_TOPOLOGY			0x00010F72
+#define VPM_TX_QMIC_FLUENCE_COPP_TOPOLOGY		0x00010F75
+
+#define LOWLATENCY_POPP_TOPOLOGY			0x00010C68
+#define LOWLATENCY_COPP_TOPOLOGY			0x00010312
+#define PCM_BITS_PER_SAMPLE				16
+
+#define ASM_OPEN_WRITE_PERF_MODE_BIT			(1<<28)
+#define ASM_OPEN_READ_PERF_MODE_BIT			(1<<29)
+#define ADM_MULTI_CH_COPP_OPEN_PERF_MODE_BIT		(1<<13)
+
+
+#define ASM_MAX_EQ_BANDS 12
+
+struct asm_eq_band {
+	u32 band_idx; /* The band index, 0 .. 11 */
+	u32 filter_type; /* Filter band type */
+	u32 center_freq_hz; /* Filter band center frequency */
+	u32 filter_gain; /* Filter band initial gain (dB) */
+			/* Range is +12 dB to -12 dB with 1dB increments. */
+	u32 q_factor;
+} __attribute__ ((packed));
+
+struct asm_equalizer_params {
+	u32 enable;
+	u32 num_bands;
+	struct asm_eq_band eq_bands[ASM_MAX_EQ_BANDS];
+} __attribute__ ((packed));
+
+struct asm_master_gain_params {
+	u16 master_gain;
+	u16 padding;
+} __attribute__ ((packed));
+
+struct asm_lrchannel_gain_params {
+	u16 left_gain;
+	u16 right_gain;
+} __attribute__ ((packed));
+
+struct asm_mute_params {
+	u32 muteflag;
+} __attribute__ ((packed));
+
+struct asm_softvolume_params {
+	u32 period;
+	u32 step;
+	u32 rampingcurve;
+} __attribute__ ((packed));
+
+struct asm_softpause_params {
+	u32 enable;
+	u32 period;
+	u32 step;
+	u32 rampingcurve;
+} __packed;
+
+struct asm_pp_param_data_hdr {
+	u32 module_id;
+	u32 param_id;
+	u16 param_size;
+	u16 reserved;
+} __attribute__ ((packed));
+
+struct asm_pp_params_command {
+	struct apr_hdr	hdr;
+	u32    *payload;
+	u32	payload_size;
+	struct  asm_pp_param_data_hdr params;
+} __attribute__ ((packed));
+
+#define EQUALIZER_MODULE_ID		0x00010c27
+#define EQUALIZER_PARAM_ID		0x00010c28
+
+#define VOLUME_CONTROL_MODULE_ID	0x00010bfe
+#define MASTER_GAIN_PARAM_ID		0x00010bff
+#define L_R_CHANNEL_GAIN_PARAM_ID	0x00010c00
+#define MUTE_CONFIG_PARAM_ID 0x00010c01
+#define SOFT_PAUSE_PARAM_ID 0x00010D6A
+#define SOFT_VOLUME_PARAM_ID 0x00010C29
+
+#define IIR_FILTER_ENABLE_PARAM_ID 0x00010c03
+#define IIR_FILTER_PREGAIN_PARAM_ID 0x00010c04
+#define IIR_FILTER_CONFIG_PARAM_ID 0x00010c05
+
+#define MBADRC_MODULE_ID 0x00010c06
+#define MBADRC_ENABLE_PARAM_ID 0x00010c07
+#define MBADRC_CONFIG_PARAM_ID 0x00010c08
+
+
+#define ADM_CMD_SET_PARAMS                               0x00010306
+#define ADM_CMD_GET_PARAMS                               0x0001030B
+#define ADM_CMDRSP_GET_PARAMS                            0x0001030C
+struct adm_set_params_command {
+	struct apr_hdr		hdr;
+	u32			payload;
+	u32			payload_size;
+} __attribute__ ((packed));
+
+
+#define ADM_CMD_TAP_COPP_PCM                             0x00010307
+struct adm_tap_copp_pcm_command {
+	struct apr_hdr hdr;
+} __attribute__ ((packed));
+
+
+/* QDSP6 to Client messages
+*/
+#define ADM_SERVICE_CMDRSP_GET_COPP_HANDLES              0x00010308
+struct adm_get_copp_handles_respond {
+	struct apr_hdr hdr;
+	u32 handles;
+	u32 copp_id;
+} __attribute__ ((packed));
+
+#define ADM_CMDRSP_COPP_OPEN                             0x0001030A
+struct adm_copp_open_respond {
+	u32 status;
+	u16 copp_id;
+	u16 reserved;
+} __attribute__ ((packed));
+
+#define ADM_CMDRSP_MULTI_CHANNEL_COPP_OPEN               0x00010311
+#define ADM_CMDRSP_MULTI_CHANNEL_COPP_OPEN_V3            0x00010334
+
+
+#define ASM_STREAM_PRIORITY_NORMAL	0
+#define ASM_STREAM_PRIORITY_LOW		1
+#define ASM_STREAM_PRIORITY_HIGH	2
+#define ASM_STREAM_PRIORITY_RESERVED	3
+
+#define ASM_END_POINT_DEVICE_MATRIX	0
+#define ASM_END_POINT_STREAM		1
+
+#define AAC_ENC_MODE_AAC_LC            0x02
+#define AAC_ENC_MODE_AAC_P             0x05
+#define AAC_ENC_MODE_EAAC_P            0x1D
+
+#define ASM_STREAM_CMD_CLOSE                             0x00010BCD
+#define ASM_STREAM_CMD_FLUSH                             0x00010BCE
+#define ASM_STREAM_CMD_SET_PP_PARAMS                     0x00010BCF
+#define ASM_STREAM_CMD_GET_PP_PARAMS                     0x00010BD0
+#define ASM_STREAM_CMDRSP_GET_PP_PARAMS                  0x00010BD1
+#define ASM_SESSION_CMD_PAUSE                            0x00010BD3
+#define ASM_SESSION_CMD_GET_SESSION_TIME                 0x00010BD4
+#define ASM_DATA_CMD_EOS                                 0x00010BDB
+#define ASM_DATA_EVENT_EOS                               0x00010BDD
+
+#define ASM_SERVICE_CMD_GET_STREAM_HANDLES               0x00010C0B
+#define ASM_STREAM_CMD_FLUSH_READBUFS                    0x00010C09
+
+#define ASM_SESSION_EVENT_RX_UNDERFLOW			 0x00010C17
+#define ASM_SESSION_EVENT_TX_OVERFLOW			 0x00010C18
+#define ASM_SERVICE_CMD_GET_WALLCLOCK_TIME               0x00010C19
+#define ASM_DATA_CMDRSP_EOS                              0x00010C1C
+
+/* ASM Data structures */
+
+/* common declarations */
+struct asm_pcm_cfg {
+	u16 ch_cfg;
+	u16 bits_per_sample;
+	u32 sample_rate;
+	u16 is_signed;
+	u16 interleaved;
+};
+
+#define PCM_CHANNEL_NULL 0
+
+/* Front left channel. */
+#define PCM_CHANNEL_FL    1
+
+/* Front right channel. */
+#define PCM_CHANNEL_FR    2
+
+/* Front center channel. */
+#define PCM_CHANNEL_FC    3
+
+/* Left surround channel.*/
+#define PCM_CHANNEL_LS   4
+
+/* Right surround channel.*/
+#define PCM_CHANNEL_RS   5
+
+/* Low frequency effect channel. */
+#define PCM_CHANNEL_LFE  6
+
+/* Center surround channel; Rear center channel. */
+#define PCM_CHANNEL_CS   7
+
+/* Left back channel; Rear left channel. */
+#define PCM_CHANNEL_LB   8
+
+/* Right back channel; Rear right channel. */
+#define PCM_CHANNEL_RB   9
+
+/* Top surround channel. */
+#define PCM_CHANNEL_TS   10
+
+/* Center vertical height channel.*/
+#define PCM_CHANNEL_CVH  11
+
+/* Mono surround channel.*/
+#define PCM_CHANNEL_MS   12
+
+/* Front left of center. */
+#define PCM_CHANNEL_FLC  13
+
+/* Front right of center. */
+#define PCM_CHANNEL_FRC  14
+
+/* Rear left of center. */
+#define PCM_CHANNEL_RLC  15
+
+/* Rear right of center. */
+#define PCM_CHANNEL_RRC  16
+
+#define PCM_FORMAT_MAX_NUM_CHANNEL  8
+
+/* Maximum number of channels supported
+ * in ASM_ENCDEC_DEC_CHAN_MAP command
+ */
+#define MAX_CHAN_MAP_CHANNELS 16
+/*
+ *  Multiple-channel PCM decoder format block structure used in the
+ *  #ASM_STREAM_CMD_OPEN_WRITE command.
+ *  The data must be in little-endian format.
+ */
+struct asm_multi_channel_pcm_fmt_blk {
+
+	u16 num_channels;	/*
+				 * Number of channels.
+				 * Supported values:1 to 8
+				 */
+
+	u16 bits_per_sample;	/*
+				 * Number of bits per sample per channel.
+				 * Supported values: 16, 24 When used for
+				 * playback, the client must send 24-bit
+				 * samples packed in 32-bit words. The
+				 * 24-bit samples must be placed in the most
+				 * significant 24 bits of the 32-bit word. When
+				 * used for recording, the aDSP sends 24-bit
+				 * samples packed in 32-bit words. The 24-bit
+				 * samples are placed in the most significant
+				 * 24 bits of the 32-bit word.
+				 */
+
+	u32 sample_rate;	/*
+				 * Number of samples per second
+				 * (in Hertz). Supported values:
+				 * 2000 to 48000
+				 */
+
+	u16 is_signed;		/*
+				 * Flag that indicates the samples
+				 * are signed (1).
+				 */
+
+	u16 is_interleaved;	/*
+				 * Flag that indicates whether the channels are
+				 * de-interleaved (0) or interleaved (1).
+				 * Interleaved format means corresponding
+				 * samples from the left and right channels are
+				 * interleaved within the buffer.
+				 * De-interleaved format means samples from
+				 * each channel are contiguous in the buffer.
+				 * The samples from one channel immediately
+				 * follow those of the previous channel.
+				 */
+
+	u8 channel_mapping[8];	/*
+				 * Supported values:
+				 * PCM_CHANNEL_NULL, PCM_CHANNEL_FL,
+				 * PCM_CHANNEL_FR, PCM_CHANNEL_FC,
+				 * PCM_CHANNEL_LS, PCM_CHANNEL_RS,
+				 * PCM_CHANNEL_LFE, PCM_CHANNEL_CS,
+				 * PCM_CHANNEL_LB, PCM_CHANNEL_RB,
+				 * PCM_CHANNEL_TS, PCM_CHANNEL_CVH,
+				 * PCM_CHANNEL_MS, PCM_CHANNEL_FLC,
+				 * PCM_CHANNEL_FRC, PCM_CHANNEL_RLC,
+				 * PCM_CHANNEL_RRC.
+				 * Channel[i] mapping describes channel I. Each
+				 * element i of the array describes channel I
+				 * inside the buffer where  I < num_channels.
+				 * An unused channel is set to zero.
+				 */
+};
+struct asm_dts_enc_cfg {
+	uint32_t	sample_rate;
+	/*
+	* Samples at which input is to be encoded.
+	* Supported values:
+	* 44100 -- encode at 44.1 Khz
+	* 48000 -- encode at 48 Khz
+	*/
+
+	uint32_t	num_channels;
+	/*
+	* Number of channels for multi-channel encoding.
+	* Supported values: 1 to 6
+	*/
+
+	uint8_t		channel_mapping[6];
+	/*
+	* Channel array of size 16. Channel[i] mapping describes channel I.
+	* Each element i of the array describes channel I inside the buffer
+	* where num_channels. An unused channel is set to zero. Only first
+	* num_channels elements are valid
+
+	* Supported values:
+	* - # PCM_CHANNEL_L
+	* - # PCM_CHANNEL_R
+	* - # PCM_CHANNEL_C
+	* - # PCM_CHANNEL_LS
+	* - # PCM_CHANNEL_RS
+	* - # PCM_CHANNEL_LFE
+	*/
+
+};
+struct asm_adpcm_cfg {
+	u16 ch_cfg;
+	u16 bits_per_sample;
+	u32 sample_rate;
+	u32 block_size;
+};
+
+struct asm_yadpcm_cfg {
+	u16 ch_cfg;
+	u16 bits_per_sample;
+	u32 sample_rate;
+};
+
+struct asm_midi_cfg {
+	u32 nMode;
+};
+
+struct asm_wma_cfg {
+	u16 format_tag;
+	u16 ch_cfg;
+	u32 sample_rate;
+	u32 avg_bytes_per_sec;
+	u16 block_align;
+	u16 valid_bits_per_sample;
+	u32 ch_mask;
+	u16 encode_opt;
+	u16 adv_encode_opt;
+	u32 adv_encode_opt2;
+	u32 drc_peak_ref;
+	u32 drc_peak_target;
+	u32 drc_ave_ref;
+	u32 drc_ave_target;
+};
+
+struct asm_wmapro_cfg {
+	u16 format_tag;
+	u16 ch_cfg;
+	u32 sample_rate;
+	u32 avg_bytes_per_sec;
+	u16 block_align;
+	u16 valid_bits_per_sample;
+	u32 ch_mask;
+	u16 encode_opt;
+	u16 adv_encode_opt;
+	u32 adv_encode_opt2;
+	u32 drc_peak_ref;
+	u32 drc_peak_target;
+	u32 drc_ave_ref;
+	u32 drc_ave_target;
+};
+
+struct asm_aac_cfg {
+	u16 format;
+	u16 aot;
+	u16 ep_config;
+	u16 section_data_resilience;
+	u16 scalefactor_data_resilience;
+	u16 spectral_data_resilience;
+	u16 ch_cfg;
+	u16 reserved;
+	u32 sample_rate;
+};
+
+struct asm_amrwbplus_cfg {
+	u32  size_bytes;
+	u32  version;
+	u32  num_channels;
+	u32  amr_band_mode;
+	u32  amr_dtx_mode;
+	u32  amr_frame_fmt;
+	u32  amr_lsf_idx;
+};
+
+struct asm_flac_cfg {
+	u16 stream_info_present;
+	u16 min_blk_size;
+	u16 max_blk_size;
+	u16 ch_cfg;
+	u16 sample_size;
+	u16 sample_rate;
+	u16 md5_sum;
+	u32 ext_sample_rate;
+	u32 min_frame_size;
+	u32 max_frame_size;
+};
+
+struct asm_vorbis_cfg {
+	u32 ch_cfg;
+	u32 bit_rate;
+	u32 min_bit_rate;
+	u32 max_bit_rate;
+	u16 bit_depth_pcm_sample;
+	u16 bit_stream_format;
+};
+
+struct asm_aac_read_cfg {
+	u32 bitrate;
+	u32 enc_mode;
+	u16 format;
+	u16 ch_cfg;
+	u32 sample_rate;
+};
+
+struct asm_amrnb_read_cfg {
+	u16 mode;
+	u16 dtx_mode;
+};
+
+struct asm_amrwb_read_cfg {
+	u16 mode;
+	u16 dtx_mode;
+};
+
+struct asm_evrc_read_cfg {
+	u16 max_rate;
+	u16 min_rate;
+	u16 rate_modulation_cmd;
+	u16 reserved;
+};
+
+struct asm_qcelp13_read_cfg {
+	u16 max_rate;
+	u16 min_rate;
+	u16 reduced_rate_level;
+	u16 rate_modulation_cmd;
+};
+
+struct asm_sbc_read_cfg {
+	u32 subband;
+	u32 block_len;
+	u32 ch_mode;
+	u32 alloc_method;
+	u32 bit_rate;
+	u32 sample_rate;
+};
+
+struct asm_sbc_bitrate {
+	u32 bitrate;
+};
+
+struct asm_immed_decode {
+	u32 mode;
+};
+
+struct asm_sbr_ps {
+	u32 enable;
+};
+
+struct asm_dual_mono {
+	u16 sce_left;
+	u16 sce_right;
+};
+
+struct asm_dec_chan_map {
+	u32 num_channels;			  /* Number of decoder output
+						   * channels. A value of 0
+						   * indicates native channel
+						   * mapping, which is valid
+						   * only for NT mode. This
+						   * means the output of the
+						   * decoder is to be preserved
+						   * as is.
+						   */
+
+	u8 channel_mapping[MAX_CHAN_MAP_CHANNELS];/* Channel array of size
+						   * num_channels. It can grow
+						   * till MAX_CHAN_MAP_CHANNELS.
+						   * Channel[i] mapping
+						   * describes channel I inside
+						   * the decoder output buffer.
+						   * Valid channel mapping
+						   * values are to be present at
+						   * the beginning of the array.
+						   * All remaining elements of
+						   * the array are to be filled
+						   * with PCM_CHANNEL_NULL.
+						   */
+};
+
+struct asm_encode_cfg_blk {
+	u32 frames_per_buf;
+	u32 format_id;
+	u32 cfg_size;
+	union {
+		struct asm_pcm_cfg          pcm;
+		struct asm_aac_read_cfg     aac;
+		struct asm_amrnb_read_cfg   amrnb;
+		struct asm_evrc_read_cfg    evrc;
+		struct asm_qcelp13_read_cfg qcelp13;
+		struct asm_sbc_read_cfg     sbc;
+		struct asm_amrwb_read_cfg   amrwb;
+		struct asm_multi_channel_pcm_fmt_blk      mpcm;
+		struct asm_dts_enc_cfg      dts;
+	} __attribute__((packed)) cfg;
+};
+
+struct asm_frame_meta_info {
+	u32 offset_to_frame;
+	u32 frame_size;
+	u32 encoded_pcm_samples;
+	u32 msw_ts;
+	u32 lsw_ts;
+	u32 nflags;
+};
+
+/* Stream level commands */
+#define ASM_STREAM_CMD_OPEN_READ                         0x00010BCB
+#define ASM_STREAM_CMD_OPEN_READ_V2_1                    0x00010DB2
+struct asm_stream_cmd_open_read {
+	struct apr_hdr hdr;
+	u32            uMode;
+	u32            src_endpoint;
+	u32            pre_proc_top;
+	u32            format;
+} __attribute__((packed));
+
+struct asm_stream_cmd_open_read_v2_1 {
+	struct apr_hdr hdr;
+	u32            uMode;
+	u32            src_endpoint;
+	u32            pre_proc_top;
+	u32            format;
+	u16            bits_per_sample;
+	u16            reserved;
+} __packed;
+
+/* Supported formats */
+#define LINEAR_PCM   0x00010BE5
+#define DTMF         0x00010BE6
+#define ADPCM        0x00010BE7
+#define YADPCM       0x00010BE8
+#define MP3          0x00010BE9
+#define MPEG4_AAC    0x00010BEA
+#define AMRNB_FS     0x00010BEB
+#define AMRWB_FS     0x00010BEC
+#define V13K_FS      0x00010BED
+#define EVRC_FS      0x00010BEE
+#define EVRCB_FS     0x00010BEF
+#define EVRCWB_FS    0x00010BF0
+#define MIDI         0x00010BF1
+#define SBC          0x00010BF2
+#define WMA_V10PRO   0x00010BF3
+#define WMA_V9       0x00010BF4
+#define AMR_WB_PLUS  0x00010BF5
+#define AC3_DECODER  0x00010BF6
+#define EAC3_DECODER 0x00010C3C
+#define DTS	0x00010D88
+#define DTS_LBR	0x00010DBB
+#define MP2          0x00010DBE
+#define ATRAC	0x00010D89
+#define MAT	0x00010D8A
+#define G711_ALAW_FS 0x00010BF7
+#define G711_MLAW_FS 0x00010BF8
+#define G711_PCM_FS  0x00010BF9
+#define MPEG4_MULTI_AAC 0x00010D86
+#define US_POINT_EPOS_FORMAT 0x00012310
+#define US_RAW_FORMAT        0x0001127C
+#define US_PROX_FORMAT       0x0001272B
+#define MULTI_CHANNEL_PCM    0x00010C66
+
+#define ASM_ENCDEC_SBCRATE         0x00010C13
+#define ASM_ENCDEC_IMMDIATE_DECODE 0x00010C14
+#define ASM_ENCDEC_CFG_BLK         0x00010C2C
+
+#define ASM_ENCDEC_SBCRATE         0x00010C13
+#define ASM_ENCDEC_IMMDIATE_DECODE 0x00010C14
+#define ASM_ENCDEC_CFG_BLK         0x00010C2C
+
+#define ASM_STREAM_CMD_OPEN_READ_COMPRESSED               0x00010D95
+struct asm_stream_cmd_open_read_compressed {
+	struct apr_hdr hdr;
+	u32            uMode;
+	u32            frame_per_buf;
+} __packed;
+
+#define ASM_STREAM_CMD_OPEN_WRITE                        0x00010BCA
+#define ASM_STREAM_CMD_OPEN_WRITE_V2_1                   0x00010DB1
+struct asm_stream_cmd_open_write {
+	struct apr_hdr hdr;
+	u32            uMode;
+	u16            sink_endpoint;
+	u16            stream_handle;
+	u32            post_proc_top;
+	u32            format;
+} __attribute__((packed));
+
+#define IEC_61937_MASK	0x00000001
+#define IEC_60958_MASK	0x00000002
+
+#define ASM_STREAM_CMD_OPEN_WRITE_COMPRESSED	0x00010D84
+struct asm_stream_cmd_open_write_compressed {
+	struct apr_hdr hdr;
+	u32	flags;
+	u32	format;
+} __packed;
+#define ASM_STREAM_CMD_OPEN_TRANSCODE_LOOPBACK     0x00010DBA
+struct asm_stream_cmd_open_transcode_loopback {
+	struct apr_hdr hdr;
+	uint32_t	mode_flags;
+	/*
+	* All bits are reserved. Clients must set them to zero.
+	*/
+
+	uint32_t	src_format_id;
+	/*
+	* Specifies the media format of the input audio stream.
+
+	* Supported values:
+	* - #ASM_MEDIA_FMT_LINEAR_PCM
+	* - #ASM_MEDIA_FMT_MULTI_CHANNEL_PCM
+	*/
+
+	uint32_t	sink_format_id;
+	/*
+	* Specifies the media format of the output stream.
+
+	* Supported values:
+	* - #ASM_MEDIA_FMT_LINEAR_PCM
+	* - #ASM_MEDIA_FMT_MULTI_CHANNEL_PCM
+	* - #ASM_MEDIA_FMT_DTS
+	*/
+
+	uint32_t	audproc_topo_id;
+	/*
+	* Postprocessing topology ID, which specifies the topology (order of
+	* processing) of postprocessing algorithms.
+
+	* Supported values:
+	* - #ASM_STREAM_POSTPROC_TOPO_ID_DEFAULT
+	* - #ASM_STREAM_POSTPROC_TOPO_ID_PEAKMETER
+	* - #ASM_STREAM_POSTPROC_TOPO_ID_NONE
+	* - #ASM_STREAM_POSTPROC_TOPO_ID_MCH_PEAK_VOL
+	*/
+
+	uint16_t	src_endpoint_type;
+	/*
+	* Specifies the source endpoint that provides the input samples.
+
+	* Supported values:
+	* - 0 -- Tx device matrix or stream router
+	* (gateway to the hardware ports)
+	* - All other values are reserved
+
+	* Clients must set this field to zero. Otherwise, an error is returned.
+	*/
+
+	uint16_t	sink_endpoint_type;
+	/*
+	* Specifies the sink endpoint type.
+
+	* Supported values:
+	* - 0 -- Rx device matrix or stream router
+	* (gateway to the hardware ports)
+	* - All other values are reserved
+
+	* Clients must set this field to zero. Otherwise, an error is returned.
+	*/
+
+	uint16_t	bits_per_sample;
+	/*
+	* Number of bits per sample processed by the ASM modules.
+	* Supported values: 16, 24
+	*/
+
+	uint16_t	reserved;
+	/*
+	* This field must be set to zero.
+	*/
+} __packed;
+
+/*
+* ID of the DTS mix LFE channel to front channels parameter in the
+* #ASM_STREAM_CMD_SET_ENCDEC_PARAM command.
+* asm_dts_generic_param_t
+* ASM_PARAM_ID_DTS_MIX_LFE_TO_FRONT
+*/
+#define ASM_PARAM_ID_DTS_MIX_LFE_TO_FRONT                          0x00010DB6
+
+/*
+* ID of the DTS DRC ratio parameter in the
+* #ASM_STREAM_CMD_SET_ENCDEC_PARAM command.
+* asm_dts_generic_param_t
+* ASM_PARAM_ID_DTS_DRC_RATIO
+*/
+#define ASM_PARAM_ID_DTS_DRC_RATIO                                   0x00010DB7
+
+/*
+* ID of the DTS enable dialog normalization parameter in the
+* #ASM_STREAM_CMD_SET_ENCDEC_PARAM command.
+
+* asm_dts_generic_param_t
+* ASM_PARAM_ID_DTS_ENABLE_DIALNORM
+*/
+#define ASM_PARAM_ID_DTS_ENABLE_DIALNORM                             0x00010DB8
+
+/*
+* ID of the DTS enable parse REV2AUX parameter in the
+* #ASM_STREAM_CMD_SET_ENCDEC_PARAM command.
+* asm_dts_generic_param_t
+* ASM_PARAM_ID_DTS_ENABLE_PARSE_REV2AUX
+*/
+#define ASM_PARAM_ID_DTS_ENABLE_PARSE_REV2AUX                         0x00010DB9
+
+struct asm_dts_generic_param {
+	int32_t		generic_parameter;
+	/*
+	* #ASM_PARAM_ID_DTS_MIX_LFE_TO_FRONT:
+	* - if enabled, mixes LFE channel to front
+	* while downmixing (if necessary)
+	* - Supported values: 1-> enable, 0-> disable
+	* - Default: disabled
+
+	* #ASM_PARAM_ID_DTS_DRC_RATIO:
+	* - percentage of DRC ratio.
+	* - Supported values: 0-100
+	* - Default: 0, DRC is disabled.
+
+	* #ASM_PARAM_ID_DTS_ENABLE_DIALNORM:
+	* - flag to enable dialog normalization post processing.
+	* - Supported values: 1-> enable, 0-> disable.
+	* - Default: enabled.
+
+	* #ASM_PARAM_ID_DTS_ENABLE_PARSE_REV2AUX:
+	* - flag to enable parsing of rev2aux chunk in the bitstream.
+	* This chunk contains broadcast metadata.
+	* - Supported values: 1-> enable, 0-> disable.
+	* - Default: disabled.
+	*/
+};
+
+struct asm_stream_cmd_dts_dec_param {
+	struct apr_hdr hdr;
+	u32            param_id;
+	u32            param_size;
+	struct asm_dts_generic_param generic_param;
+} __packed;
+
+
+#define ASM_STREAM_CMD_OPEN_READWRITE                    0x00010BCC
+
+struct asm_stream_cmd_open_read_write {
+	struct apr_hdr     hdr;
+	u32                uMode;
+	u32                post_proc_top;
+	u32                write_format;
+	u32                read_format;
+} __attribute__((packed));
+
+#define ASM_STREAM_CMD_OPEN_LOOPBACK	0x00010D6E
+struct asm_stream_cmd_open_loopback {
+	struct apr_hdr         hdr;
+	u32                    mode_flags;
+/* Mode flags.
+ * Bit 0-31: reserved; client should set these bits to 0
+ */
+	u16                    src_endpointype;
+	/* Endpoint type. 0 = Tx Matrix */
+	u16                    sink_endpointype;
+	/* Endpoint type. 0 = Rx Matrix */
+	u32                    postprocopo_id;
+/* Postprocessor topology ID. Specifies the topology of
+ * postprocessing algorithms.
+ */
+} __packed;
+
+#define ADM_CMD_CONNECT_AFE_PORT 0x00010320
+#define ADM_CMD_DISCONNECT_AFE_PORT 0x00010321
+
+struct adm_cmd_connect_afe_port {
+	struct apr_hdr     hdr;
+	u8	mode; /*mode represent the interface is for RX or TX*/
+	u8	session_id; /*ASM session ID*/
+	u16	afe_port_id;
+} __packed;
+
+#define ADM_CMD_CONNECT_AFE_PORT_V2 0x00010332
+
+struct adm_cmd_connect_afe_port_v2 {
+	struct apr_hdr     hdr;
+	u8	mode; /*mode represent the interface is for RX or TX*/
+	u8	session_id; /*ASM session ID*/
+	u16	afe_port_id;
+	u32	num_channels;
+	u32	sampling_rate;
+} __packed;
+
+#define ASM_STREAM_CMD_SET_ENCDEC_PARAM                  0x00010C10
+#define ASM_STREAM_CMD_GET_ENCDEC_PARAM                  0x00010C11
+#define ASM_ENCDEC_CFG_BLK_ID				 0x00010C2C
+#define ASM_ENABLE_SBR_PS				 0x00010C63
+#define ASM_CONFIGURE_DUAL_MONO			 0x00010C64
+struct asm_stream_cmd_encdec_cfg_blk{
+	struct apr_hdr              hdr;
+	u32                         param_id;
+	u32                         param_size;
+	struct asm_encode_cfg_blk   enc_blk;
+} __attribute__((packed));
+
+struct asm_stream_cmd_encdec_sbc_bitrate{
+	struct apr_hdr hdr;
+	u32            param_id;
+		struct asm_sbc_bitrate      sbc_bitrate;
+} __attribute__((packed));
+
+struct asm_stream_cmd_encdec_immed_decode{
+	struct apr_hdr hdr;
+	u32            param_id;
+	u32            param_size;
+	struct asm_immed_decode dec;
+} __attribute__((packed));
+
+struct asm_stream_cmd_encdec_sbr{
+	struct apr_hdr hdr;
+	u32            param_id;
+	u32            param_size;
+	struct asm_sbr_ps sbr_ps;
+} __attribute__((packed));
+
+struct asm_stream_cmd_encdec_dualmono {
+	struct apr_hdr hdr;
+	u32            param_id;
+	u32            param_size;
+	struct asm_dual_mono channel_map;
+} __packed;
+
+#define ASM_PARAM_ID_AAC_STEREO_MIX_COEFF_SELECTION_FLAG        0x00010DD8
+
+/* Structure for AAC decoder stereo coefficient setting. */
+
+struct asm_aac_stereo_mix_coeff_selection_param {
+	struct apr_hdr				hdr;
+	u32					param_id;
+	u32					param_size;
+	u32					aac_stereo_mix_coeff_flag;
+} __packed;
+
+#define ASM_ENCDEC_DEC_CHAN_MAP				 0x00010D82
+struct asm_stream_cmd_encdec_channelmap {
+	struct apr_hdr hdr;
+	u32            param_id;
+	u32            param_size;
+	struct asm_dec_chan_map chan_map;
+} __packed;
+
+#define ASM_STREAM _CMD_ADJUST_SAMPLES                   0x00010C0A
+struct asm_stream_cmd_adjust_samples{
+	struct apr_hdr hdr;
+	u16            nsamples;
+	u16            reserved;
+} __attribute__((packed));
+
+#define ASM_STREAM_CMD_TAP_POPP_PCM                      0x00010BF9
+struct asm_stream_cmd_tap_popp_pcm{
+	struct apr_hdr hdr;
+	u16            enable;
+	u16            reserved;
+	u32            module_id;
+} __attribute__((packed));
+
+/*  Session Level commands */
+#define ASM_SESSION_CMD_MEMORY_MAP			0x00010C32
+struct asm_stream_cmd_memory_map{
+	struct apr_hdr	hdr;
+	u32		buf_add;
+	u32		buf_size;
+	u16		mempool_id;
+	u16		reserved;
+} __attribute__((packed));
+
+#define ASM_SESSION_CMD_MEMORY_UNMAP			0x00010C33
+struct asm_stream_cmd_memory_unmap{
+	struct apr_hdr	hdr;
+	u32		buf_add;
+} __attribute__((packed));
+
+#define ASM_SESSION_CMD_MEMORY_MAP_REGIONS		0x00010C45
+struct asm_memory_map_regions{
+	u32		phys;
+	u32		buf_size;
+} __attribute__((packed));
+
+struct asm_stream_cmd_memory_map_regions{
+	struct apr_hdr	hdr;
+	u16		mempool_id;
+	u16		nregions;
+} __attribute__((packed));
+
+#define ASM_SESSION_CMD_MEMORY_UNMAP_REGIONS		0x00010C46
+struct asm_memory_unmap_regions{
+	u32		phys;
+} __attribute__((packed));
+
+struct asm_stream_cmd_memory_unmap_regions{
+	struct apr_hdr	hdr;
+	u16		nregions;
+	u16		reserved;
+} __attribute__((packed));
+
+#define ASM_SESSION_CMD_RUN                              0x00010BD2
+struct asm_stream_cmd_run{
+	struct apr_hdr hdr;
+	u32            flags;
+	u32            msw_ts;
+	u32            lsw_ts;
+} __attribute__((packed));
+
+/* Session level events */
+#define ASM_SESSION_CMD_REGISTER_FOR_RX_UNDERFLOW_EVENTS 0x00010BD5
+struct asm_stream_cmd_reg_rx_underflow_event{
+	struct apr_hdr hdr;
+	u16            enable;
+	u16            reserved;
+} __attribute__((packed));
+
+#define ASM_SESSION_CMD_REGISTER_FOR_TX_OVERFLOW_EVENTS  0x00010BD6
+struct asm_stream_cmd_reg_tx_overflow_event{
+	struct apr_hdr hdr;
+	u16            enable;
+	u16            reserved;
+} __attribute__((packed));
+
+/* Data Path commands */
+#define ASM_DATA_CMD_WRITE                               0x00010BD9
+struct asm_stream_cmd_write{
+	struct apr_hdr     hdr;
+	u32	buf_add;
+	u32	avail_bytes;
+	u32	uid;
+	u32	msw_ts;
+	u32	lsw_ts;
+	u32	uflags;
+} __attribute__((packed));
+
+#define ASM_DATA_CMD_READ                                0x00010BDA
+struct asm_stream_cmd_read{
+	struct apr_hdr     hdr;
+	u32	buf_add;
+	u32	buf_size;
+	u32	uid;
+} __attribute__((packed));
+
+#define ASM_DATA_CMD_READ_COMPRESSED                     0x00010DBF
+struct asm_stream_cmd_read_compressed {
+	struct apr_hdr     hdr;
+	u32	buf_add;
+	u32	buf_size;
+	u32	uid;
+} __packed;
+
+#define ASM_DATA_CMD_MEDIA_FORMAT_UPDATE                 0x00010BDC
+#define ASM_DATA_EVENT_ENC_SR_CM_NOTIFY                  0x00010BDE
+struct asm_stream_media_format_update{
+	struct apr_hdr hdr;
+	u32            format;
+	u32            cfg_size;
+	union {
+		struct asm_pcm_cfg         pcm_cfg;
+		struct asm_adpcm_cfg       adpcm_cfg;
+		struct asm_yadpcm_cfg      yadpcm_cfg;
+		struct asm_midi_cfg        midi_cfg;
+		struct asm_wma_cfg         wma_cfg;
+		struct asm_wmapro_cfg      wmapro_cfg;
+		struct asm_aac_cfg         aac_cfg;
+		struct asm_flac_cfg        flac_cfg;
+		struct asm_vorbis_cfg      vorbis_cfg;
+		struct asm_multi_channel_pcm_fmt_blk multi_ch_pcm_cfg;
+		struct asm_amrwbplus_cfg   amrwbplus_cfg;
+	} __attribute__((packed)) write_cfg;
+} __attribute__((packed));
+
+
+/* Command Responses */
+#define ASM_STREAM_CMDRSP_GET_ENCDEC_PARAM               0x00010C12
+struct asm_stream_cmdrsp_get_readwrite_param{
+	struct apr_hdr hdr;
+	u32            status;
+	u32            param_id;
+	u16            param_size;
+	u16            padding;
+	union {
+		struct asm_sbc_bitrate      sbc_bitrate;
+		struct asm_immed_decode aac_dec;
+	} __attribute__((packed)) read_write_cfg;
+} __attribute__((packed));
+
+
+#define ASM_SESSION_CMDRSP_GET_SESSION_TIME              0x00010BD8
+struct asm_stream_cmdrsp_get_session_time{
+	struct apr_hdr hdr;
+	u32            status;
+	u32            msw_ts;
+	u32            lsw_ts;
+} __attribute__((packed));
+
+#define ASM_DATA_EVENT_WRITE_DONE                        0x00010BDF
+struct asm_data_event_write_done{
+	u32	buf_add;
+	u32            status;
+} __attribute__((packed));
+
+#define ASM_DATA_EVENT_READ_DONE                         0x00010BE0
+struct asm_data_event_read_done{
+	u32            status;
+	u32            buffer_add;
+	u32            enc_frame_size;
+	u32            offset;
+	u32            msw_ts;
+	u32            lsw_ts;
+	u32            flags;
+	u32            num_frames;
+	u32            id;
+} __attribute__((packed));
+
+#define ASM_DATA_EVENT_READ_COMPRESSED_DONE              0x00010DC0
+struct asm_data_event_read_compressed_done {
+	u32            status;
+	u32            buffer_add;
+	u32            enc_frame_size;
+	u32            offset;
+	u32            msw_ts;
+	u32            lsw_ts;
+	u32            flags;
+	u32            num_frames;
+	u32            id;
+} __packed;
+
+#define ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY               0x00010C65
+struct asm_data_event_sr_cm_change_notify {
+	u32            sample_rate;
+	u16	           no_of_channels;
+	u16            reserved;
+	u8             channel_map[8];
+} __packed;
+
+/* service level events */
+
+#define ASM_SERVICE_CMDRSP_GET_STREAM_HANDLES            0x00010C1B
+struct asm_svc_cmdrsp_get_strm_handles{
+	struct apr_hdr hdr;
+	u32            num_handles;
+	u32            stream_handles;
+} __attribute__((packed));
+
+
+#define ASM_SERVICE_CMDRSP_GET_WALLCLOCK_TIME            0x00010C1A
+struct asm_svc_cmdrsp_get_wallclock_time{
+	struct apr_hdr hdr;
+	u32            status;
+	u32            msw_ts;
+	u32            lsw_ts;
+} __attribute__((packed));
+
+/*
+ * Error code
+*/
+#define ADSP_EOK          0x00000000 /* Success / completed / no errors. */
+#define ADSP_EFAILED      0x00000001 /* General failure. */
+#define ADSP_EBADPARAM    0x00000002 /* Bad operation parameter(s). */
+#define ADSP_EUNSUPPORTED 0x00000003 /* Unsupported routine/operation. */
+#define ADSP_EVERSION     0x00000004 /* Unsupported version. */
+#define ADSP_EUNEXPECTED  0x00000005 /* Unexpected problem encountered. */
+#define ADSP_EPANIC       0x00000006 /* Unhandled problem occurred. */
+#define ADSP_ENORESOURCE  0x00000007 /* Unable to allocate resource(s). */
+#define ADSP_EHANDLE      0x00000008 /* Invalid handle. */
+#define ADSP_EALREADY     0x00000009 /* Operation is already processed. */
+#define ADSP_ENOTREADY    0x0000000A /* Operation not ready to be processed*/
+#define ADSP_EPENDING     0x0000000B /* Operation is pending completion*/
+#define ADSP_EBUSY        0x0000000C /* Operation could not be accepted or
+					 processed. */
+#define ADSP_EABORTED     0x0000000D /* Operation aborted due to an error. */
+#define ADSP_EPREEMPTED   0x0000000E /* Operation preempted by higher priority*/
+#define ADSP_ECONTINUE    0x0000000F /* Operation requests intervention
+					to complete. */
+#define ADSP_EIMMEDIATE   0x00000010 /* Operation requests immediate
+					intervention to complete. */
+#define ADSP_ENOTIMPL     0x00000011 /* Operation is not implemented. */
+#define ADSP_ENEEDMORE    0x00000012 /* Operation needs more data or resources*/
+
+/* SRS TRUMEDIA GUIDS */
+#define SRS_TRUMEDIA_TOPOLOGY_ID    0x00010D90
+#define SRS_TRUMEDIA_MODULE_ID      0x10005010
+#define SRS_TRUMEDIA_PARAMS         0x10005011
+#define SRS_TRUMEDIA_PARAMS_WOWHD   0x10005012
+#define SRS_TRUMEDIA_PARAMS_CSHP    0x10005013
+#define SRS_TRUMEDIA_PARAMS_HPF     0x10005014
+#define SRS_TRUMEDIA_PARAMS_PEQ     0x10005015
+#define SRS_TRUMEDIA_PARAMS_HL      0x10005016
+
+/* SRS STUDIO SOUND 3D GUIDS */
+#define SRS_SS3D_TOPOLOGY_ID        0x00010720
+#define SRS_SS3D_MODULE_ID          0x10005020
+#define SRS_SS3D_PARAMS             0x10005021
+#define SRS_SS3D_PARAMS_CTRL        0x10005022
+#define SRS_SS3D_PARAMS_FILTER      0x10005023
+
+/* SRS ALSA CMD MASKS */
+#define SRS_CMD_UPLOAD              0x7FFF0000
+#define SRS_PARAM_INDEX_MASK        0x80000000
+#define SRS_PARAM_OFFSET_MASK       0x3FFF0000
+#define SRS_PARAM_VALUE_MASK        0x0000FFFF
+
+/* SRS TRUMEDIA start */
+#define SRS_ID_GLOBAL               0x00000001
+#define SRS_ID_WOWHD                0x00000002
+#define SRS_ID_CSHP                 0x00000003
+#define SRS_ID_HPF                  0x00000004
+#define SRS_ID_PEQ                  0x00000005
+#define SRS_ID_HL                   0x00000006
+
+struct srs_trumedia_params_GLOBAL {
+	uint8_t                  v1;
+	uint8_t                  v2;
+	uint8_t                  v3;
+	uint8_t                  v4;
+	uint8_t                  v5;
+	uint8_t                  v6;
+	uint8_t                  v7;
+	uint8_t                  v8;
+} __packed;
+
+struct srs_trumedia_params_WOWHD {
+	uint32_t				v1;
+	uint16_t				v2;
+	uint16_t				v3;
+	uint16_t				v4;
+	uint16_t				v5;
+	uint16_t				v6;
+	uint16_t				v7;
+	uint16_t				v8;
+	uint16_t				v____A1;
+	uint32_t				v9;
+	uint16_t				v10;
+	uint16_t				v11;
+	uint32_t				v12[16];
+} __packed;
+
+struct srs_trumedia_params_CSHP {
+	uint32_t				v1;
+	uint16_t				v2;
+	uint16_t				v3;
+	uint16_t				v4;
+	uint16_t				v5;
+	uint16_t				v6;
+	uint16_t				v____A1;
+	uint32_t				v7;
+	uint16_t				v8;
+	uint16_t				v9;
+	uint32_t				v10[16];
+} __packed;
+
+struct srs_trumedia_params_HPF {
+	uint32_t				v1;
+	uint32_t				v2[26];
+} __packed;
+
+struct srs_trumedia_params_PEQ {
+	uint32_t				v1;
+	uint16_t				v2;
+	uint16_t				v3;
+	uint16_t				v4;
+	uint16_t				v____A1;
+	uint32_t				v5[26];
+	uint32_t				v6[26];
+} __packed;
+
+struct srs_trumedia_params_HL {
+	uint16_t				v1;
+	uint16_t				v2;
+	uint16_t				v3;
+	uint16_t				v____A1;
+	int32_t					v4;
+	uint32_t				v5;
+	uint16_t				v6;
+	uint16_t				v____A2;
+	uint32_t				v7;
+} __packed;
+
+struct srs_trumedia_params {
+	struct srs_trumedia_params_GLOBAL	global;
+	struct srs_trumedia_params_WOWHD	wowhd;
+	struct srs_trumedia_params_CSHP		cshp;
+	struct srs_trumedia_params_HPF		hpf;
+	struct srs_trumedia_params_PEQ		peq;
+	struct srs_trumedia_params_HL		hl;
+} __packed;
+
+int srs_trumedia_open(int port_id, int srs_tech_id, void *srs_params);
+/* SRS TruMedia end */
+
+/* SRS Studio Sound 3D start */
+#define SRS_ID_SS3D_GLOBAL	0x00000001
+#define SRS_ID_SS3D_CTRL	0x00000002
+#define SRS_ID_SS3D_FILTER	0x00000003
+
+struct srs_SS3D_params_GLOBAL {
+	uint8_t                  v1;
+	uint8_t                  v2;
+	uint8_t                  v3;
+	uint8_t                  v4;
+	uint8_t                  v5;
+	uint8_t                  v6;
+	uint8_t                  v7;
+	uint8_t                  v8;
+} __packed;
+
+struct srs_SS3D_ctrl_params {
+	uint8_t				v[236];
+} __packed;
+
+struct srs_SS3D_filter_params {
+	uint8_t				v[28 + 2752];
+} __packed;
+
+struct srs_SS3D_params {
+	struct srs_SS3D_params_GLOBAL   global;
+	struct srs_SS3D_ctrl_params     ss3d;
+	struct srs_SS3D_filter_params   ss3d_f;
+} __packed;
+
+int srs_ss3d_open(int port_id, int srs_tech_id, void *srs_params);
+/* SRS Studio Sound 3D end */
+#endif /*_APR_AUDIO_H_*/
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/sound/apr_audio-v2.h	2019-10-29 09:26:25.533221674 +0100
@@ -0,0 +1,11429 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 and
+* only version 2 as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*/
+
+
+#ifndef _APR_AUDIO_V2_H_
+#define _APR_AUDIO_V2_H_
+
+#include <linux/qdsp6v2/apr.h>
+#include <linux/msm_audio.h>
+
+/* size of header needed for passing data out of band */
+#define APR_CMD_OB_HDR_SZ  12
+
+/* size of header needed for getting data */
+#define APR_CMD_GET_HDR_SZ 16
+
+struct param_outband {
+	size_t       size;
+	void        *kvaddr;
+	phys_addr_t  paddr;
+};
+
+#define ADSP_ADM_VERSION    0x00070000
+
+#define ADM_CMD_SHARED_MEM_MAP_REGIONS    0x00010322
+#define ADM_CMDRSP_SHARED_MEM_MAP_REGIONS 0x00010323
+#define ADM_CMD_SHARED_MEM_UNMAP_REGIONS 0x00010324
+
+#define ADM_CMD_MATRIX_MAP_ROUTINGS_V5 0x00010325
+#define ADM_CMD_STREAM_DEVICE_MAP_ROUTINGS_V5 0x0001033D
+/* Enumeration for an audio Rx matrix ID.*/
+#define ADM_MATRIX_ID_AUDIO_RX              0
+
+#define ADM_MATRIX_ID_AUDIO_TX              1
+
+#define ADM_MATRIX_ID_COMPRESSED_AUDIO_RX   2
+
+#define ADM_MATRIX_ID_COMPRESSED_AUDIO_TX   3
+
+#define ADM_MATRIX_ID_LISTEN_TX             4
+/* Enumeration for an audio Tx matrix ID.*/
+#define ADM_MATRIX_ID_AUDIOX              1
+
+#define ADM_MAX_COPPS 5
+
+/* make sure this matches with msm_audio_calibration */
+#define SP_V2_NUM_MAX_SPKR 2
+
+/*  Session map node structure.
+*	Immediately following this structure are num_copps
+*	entries of COPP IDs. The COPP IDs are 16 bits, so
+*	there might be a padding 16-bit field if num_copps
+*	is odd.
+*/
+struct adm_session_map_node_v5 {
+	u16                  session_id;
+/* Handle of the ASM session to be routed. Supported values: 1
+* to 8.
+*/
+
+
+	u16                  num_copps;
+	/* Number of COPPs to which this session is to be routed.
+			Supported values: 0 < num_copps <= ADM_MAX_COPPS.
+	*/
+} __packed;
+
+/*  Payload of the #ADM_CMD_MATRIX_MAP_ROUTINGS_V5 command.
+*	Immediately following this structure are num_sessions of the session map
+*	node payload (adm_session_map_node_v5).
+*/
+
+struct adm_cmd_matrix_map_routings_v5 {
+	struct apr_hdr	hdr;
+
+	u32                  matrix_id;
+/* Specifies whether the matrix ID is Audio Rx (0) or Audio Tx
+* (1). Use the ADM_MATRIX_ID_AUDIO_RX or ADM_MATRIX_ID_AUDIOX
+* macros to set this field.
+*/
+	u32                  num_sessions;
+	/* Number of sessions being updated by this command (optional).*/
+} __packed;
+
+/* This command allows a client to open a COPP/Voice Proc. TX module
+*	and sets up	the device session: Matrix -> COPP -> AFE on the RX
+*	and AFE -> COPP -> Matrix on the TX. This enables PCM data to
+*	be transferred to/from the endpoint (AFEPortID).
+*
+*	@return
+*	#ADM_CMDRSP_DEVICE_OPEN_V5 with the resulting status and
+*	COPP ID.
+*/
+#define ADM_CMD_DEVICE_OPEN_V5                          0x00010326
+
+/* This command allows a client to open a COPP/Voice Proc the
+*	way as ADM_CMD_DEVICE_OPEN_V5 but supports multiple endpoint2
+*	channels.
+*
+*	@return
+*	#ADM_CMDRSP_DEVICE_OPEN_V6 with the resulting status and
+*	COPP ID.
+*/
+#define ADM_CMD_DEVICE_OPEN_V6                      0x00010356
+
+/* This command allows a client to open a COPP/Voice Proc the
+*	way as ADM_CMD_DEVICE_OPEN_V8 but supports any number channel
+*	of configuration.
+*
+*	@return
+*	#ADM_CMDRSP_DEVICE_OPEN_V8 with the resulting status and
+*	COPP ID.
+*/
+#define ADM_CMD_DEVICE_OPEN_V8                      0x0001036A
+
+/* Definition for a low latency stream session. */
+#define ADM_LOW_LATENCY_DEVICE_SESSION			0x2000
+
+/* Definition for a ultra low latency stream session. */
+#define ADM_ULTRA_LOW_LATENCY_DEVICE_SESSION		0x4000
+
+/* Definition for a ultra low latency with Post Processing stream session. */
+#define ADM_ULL_POST_PROCESSING_DEVICE_SESSION		0x8000
+
+/* Definition for a legacy device session. */
+#define ADM_LEGACY_DEVICE_SESSION                                      0
+
+/* Indicates that endpoint_id_2 is to be ignored.*/
+#define ADM_CMD_COPP_OPEN_END_POINT_ID_2_IGNORE				0xFFFF
+
+#define ADM_CMD_COPP_OPEN_MODE_OF_OPERATION_RX_PATH_COPP		 1
+
+#define ADM_CMD_COPP_OPEN_MODE_OF_OPERATIONX_PATH_LIVE_COPP		 2
+
+#define ADM_CMD_COPP_OPEN_MODE_OF_OPERATIONX_PATH_NON_LIVE_COPP	 3
+
+/* Indicates that an audio COPP is to send/receive a mono PCM
+ * stream to/from
+ *	END_POINT_ID_1.
+ */
+#define ADM_CMD_COPP_OPEN_CHANNEL_CONFIG_MONO		1
+
+/* Indicates that an audio COPP is to send/receive a
+ *	stereo PCM stream to/from END_POINT_ID_1.
+ */
+#define ADM_CMD_COPP_OPEN_CHANNEL_CONFIG_STEREO		2
+
+/* Sample rate is 8000 Hz.*/
+#define ADM_CMD_COPP_OPEN_SAMPLE_RATE_8K 8000
+
+/* Sample rate is 16000 Hz.*/
+#define ADM_CMD_COPP_OPEN_SAMPLE_RATE_16K 16000
+
+/* Sample rate is 48000 Hz.*/
+#define ADM_CMD_COPP_OPEN_SAMPLE_RATE_48K 48000
+
+/* Definition for a COPP live input flag bitmask.*/
+#define ADM_BIT_MASK_COPP_LIVE_INPUT_FLAG (0x0001U)
+
+/* Definition for a COPP live shift value bitmask.*/
+#define ADM_SHIFT_COPP_LIVE_INPUT_FLAG	 0
+
+/* Definition for the COPP ID bitmask.*/
+#define ADM_BIT_MASK_COPP_ID  (0x0000FFFFUL)
+
+/* Definition for the COPP ID shift value.*/
+#define ADM_SHIFT_COPP_ID	0
+
+/* Definition for the service ID bitmask.*/
+#define ADM_BIT_MASK_SERVICE_ID  (0x00FF0000UL)
+
+/* Definition for the service ID shift value.*/
+#define ADM_SHIFT_SERVICE_ID	16
+
+/* Definition for the domain ID bitmask.*/
+#define ADM_BIT_MASK_DOMAIN_ID    (0xFF000000UL)
+
+/* Definition for the domain ID shift value.*/
+#define ADM_SHIFT_DOMAIN_ID	24
+
+/*  ADM device open command payload of the
+	#ADM_CMD_DEVICE_OPEN_V5 command.
+*/
+struct adm_cmd_device_open_v5 {
+	struct apr_hdr		hdr;
+	u16                  flags;
+/* Reserved for future use. Clients must set this field
+ * to zero.
+ */
+
+	u16                  mode_of_operation;
+/* Specifies whether the COPP must be opened on the Tx or Rx
+ * path. Use the ADM_CMD_COPP_OPEN_MODE_OF_OPERATION_* macros for
+ * supported values and interpretation.
+ * Supported values:
+ * - 0x1 -- Rx path COPP
+ * - 0x2 -- Tx path live COPP
+ * - 0x3 -- Tx path nonlive COPP
+ * Live connections cause sample discarding in the Tx device
+ * matrix if the destination output ports do not pull them
+ * fast enough. Nonlive connections queue the samples
+ * indefinitely.
+ */
+
+	u16                  endpoint_id_1;
+/* Logical and physical endpoint ID of the audio path.
+ * If the ID is a voice processor Tx block, it receives near
+ * samples.	Supported values: Any pseudoport, AFE Rx port,
+ * or AFE Tx port For a list of valid IDs, refer to
+ * @xhyperref{Q4,[Q4]}.
+ * Q4 = Hexagon Multimedia: AFE Interface Specification
+ */
+
+	u16                  endpoint_id_2;
+/* Logical and physical endpoint ID 2 for a voice processor
+ * Tx block.
+ * This is not applicable to audio COPP.
+ * Supported values:
+ * - AFE Rx port
+ * - 0xFFFF -- Endpoint 2 is unavailable and the voice
+ * processor Tx
+ * block ignores this endpoint
+ * When the voice processor Tx block is created on the audio
+ * record path,
+ * it can receive far-end samples from an AFE Rx port if the
+ * voice call
+ * is active. The ID of the AFE port is provided in this
+ * field.
+ * For a list of valid IDs, refer @xhyperref{Q4,[Q4]}.
+ */
+
+	u32                  topology_id;
+	/* Audio COPP topology ID; 32-bit GUID. */
+
+	u16                  dev_num_channel;
+/* Number of channels the audio COPP sends to/receives from
+ * the endpoint.
+ * Supported values: 1 to 8.
+ * The value is ignored for the voice processor Tx block,
+ * where channel
+ * configuration is derived from the topology ID.
+ */
+
+	u16                  bit_width;
+/* Bit width (in bits) that the audio COPP sends to/receives
+ * from the
+ * endpoint. The value is ignored for the voice processing
+ * Tx block,
+ * where the PCM width is 16 bits.
+ */
+
+	u32                  sample_rate;
+/* Sampling rate at which the audio COPP/voice processor
+ * Tx block
+ * interfaces with the endpoint.
+ * Supported values for voice processor Tx: 8000, 16000,
+ * 48000 Hz
+ * Supported values for audio COPP: >0 and <=192 kHz
+ */
+
+	u8                   dev_channel_mapping[8];
+/* Array of channel mapping of buffers that the audio COPP
+ * sends to the endpoint. Channel[i] mapping describes channel
+ * I inside the buffer, where 0 < i < dev_num_channel.
+ * This value is relevant only for an audio Rx COPP.
+ * For the voice processor block and Tx audio block, this field
+ * is set to zero and is ignored.
+ */
+} __packed;
+
+/*  ADM device open command payload of the
+ *  #ADM_CMD_DEVICE_OPEN_V6 command.
+ */
+struct adm_cmd_device_open_v6 {
+	struct apr_hdr		hdr;
+	u16                  flags;
+/* Reserved for future use. Clients must set this field
+ * to zero.
+ */
+
+	u16                  mode_of_operation;
+/* Specifies whether the COPP must be opened on the Tx or Rx
+ * path. Use the ADM_CMD_COPP_OPEN_MODE_OF_OPERATION_* macros for
+ * supported values and interpretation.
+ * Supported values:
+ * - 0x1 -- Rx path COPP
+ * - 0x2 -- Tx path live COPP
+ * - 0x3 -- Tx path nonlive COPP
+ * Live connections cause sample discarding in the Tx device
+ * matrix if the destination output ports do not pull them
+ * fast enough. Nonlive connections queue the samples
+ * indefinitely.
+ */
+
+	u16                  endpoint_id_1;
+/* Logical and physical endpoint ID of the audio path.
+ * If the ID is a voice processor Tx block, it receives near
+ * samples.	Supported values: Any pseudoport, AFE Rx port,
+ * or AFE Tx port For a list of valid IDs, refer to
+ * @xhyperref{Q4,[Q4]}.
+ * Q4 = Hexagon Multimedia: AFE Interface Specification
+ */
+
+	u16                  endpoint_id_2;
+/* Logical and physical endpoint ID 2 for a voice processor
+ * Tx block.
+ * This is not applicable to audio COPP.
+ * Supported values:
+ * - AFE Rx port
+ * - 0xFFFF -- Endpoint 2 is unavailable and the voice
+ * processor Tx
+ * block ignores this endpoint
+ * When the voice processor Tx block is created on the audio
+ * record path,
+ * it can receive far-end samples from an AFE Rx port if the
+ * voice call
+ * is active. The ID of the AFE port is provided in this
+ * field.
+ * For a list of valid IDs, refer @xhyperref{Q4,[Q4]}.
+ */
+
+	u32                  topology_id;
+/* Audio COPP topology ID; 32-bit GUID. */
+
+	u16                  dev_num_channel;
+/* Number of channels the audio COPP sends to/receives from
+ * the endpoint.
+ * Supported values: 1 to 8.
+ * The value is ignored for the voice processor Tx block,
+ * where channel
+ * configuration is derived from the topology ID.
+ */
+
+	u16                  bit_width;
+/* Bit width (in bits) that the audio COPP sends to/receives
+ * from the
+ * endpoint. The value is ignored for the voice processing
+ * Tx block,
+ * where the PCM width is 16 bits.
+ */
+
+	u32                  sample_rate;
+/* Sampling rate at which the audio COPP/voice processor
+ * Tx block
+ * interfaces with the endpoint.
+ * Supported values for voice processor Tx: 8000, 16000,
+ * 48000 Hz
+ * Supported values for audio COPP: >0 and <=192 kHz
+ */
+
+	u8                   dev_channel_mapping[8];
+/* Array of channel mapping of buffers that the audio COPP
+ * sends to the endpoint. Channel[i] mapping describes channel
+ * I inside the buffer, where 0 < i < dev_num_channel.
+ * This value is relevant only for an audio Rx COPP.
+ * For the voice processor block and Tx audio block, this field
+ * is set to zero and is ignored.
+ */
+
+	u16                  dev_num_channel_eid2;
+/* Number of channels the voice processor block sends
+ * to/receives from the endpoint2.
+ * Supported values: 1 to 8.
+ * The value is ignored for audio COPP or if endpoint_id_2 is
+ * set to 0xFFFF.
+ */
+
+	u16                  bit_width_eid2;
+/* Bit width (in bits) that the voice processor sends
+ * to/receives from the endpoint2.
+ * Supported values: 16 and 24.
+ * The value is ignored for audio COPP or if endpoint_id_2 is
+ * set to 0xFFFF.
+ */
+
+	u32                  sample_rate_eid2;
+/* Sampling rate at which the voice processor Tx block
+ * interfaces with the endpoint2.
+ * Supported values for Tx voice processor: >0 and <=384 kHz
+ * The value is ignored for audio COPP or if endpoint_id_2 is
+ * set to 0xFFFF.
+ */
+
+	u8                   dev_channel_mapping_eid2[8];
+/* Array of channel mapping of buffers that the voice processor
+ * sends to the endpoint. Channel[i] mapping describes channel
+ * I inside the buffer, where 0 < i < dev_num_channel.
+ * This value is relevant only for the Tx voice processor.
+ * The values are ignored for audio COPP or if endpoint_id_2 is
+ * set to 0xFFFF.
+ */
+} __packed;
+
+/*  ADM device open command payload of the
+*   #ADM_CMD_DEVICE_OPEN_V8 command.
+*/
+struct adm_cmd_device_open_v8 {
+	struct apr_hdr       hdr;
+	u16                  flags;
+/* Bit width Native mode enabled : 11th bit of flag parameter
+*  If 11th bit of flag is set then that means matrix mixer will be
+*  running in native mode for bit width for this device session.
+*
+*  Channel Native mode enabled : 12th bit of flag parameter
+*  If 12th bit of flag is set then that means matrix mixer will be
+*  running in native mode for channel configuration for this device session.
+*  All other bits are reserved; clients must set them to 0.
+**/
+	u16                  mode_of_operation;
+/* Specifies whether the COPP must be opened on the Tx or Rx
+ * path. Use the ADM_CMD_COPP_OPEN_MODE_OF_OPERATION_* macros for
+ * supported values and interpretation.
+ * Supported values:
+ * - 0x1 -- Rx path COPP
+ * - 0x2 -- Tx path live COPP
+ * - 0x3 -- Tx path nonlive COPP
+ * Live connections cause sample discarding in the Tx device
+ * matrix if the destination output ports do not pull them
+ * fast enough. Nonlive connections queue the samples
+ * indefinitely.
+ */
+	u32                  topology_id;
+	/* Audio COPP topology ID; 32-bit GUID. */
+
+
+	u16                  endpoint_id_1;
+/* Logical and physical endpoint ID of the audio path.
+ * If the ID is a voice processor Tx block, it receives near
+ * samples.	Supported values: Any pseudoport, AFE Rx port,
+ * or AFE Tx port For a list of valid IDs, refer to
+ * @xhyperref{Q4,[Q4]}.
+ * Q4 = Hexagon Multimedia: AFE Interface Specification
+ */
+
+	u16                  endpoint_id_2;
+/* Logical and physical endpoint ID 2 for a voice processor
+ * Tx block.
+ * This is not applicable to audio COPP.
+ * Supported values:
+ * - AFE Rx port
+ * - 0xFFFF -- Endpoint 2 is unavailable and the voice
+ * processor Tx
+ * block ignores this endpoint
+ * When the voice processor Tx block is created on the audio
+ * record path,
+ * it can receive far-end samples from an AFE Rx port if the
+ * voice call
+ * is active. The ID of the AFE port is provided in this
+ * field.
+ * For a list of valid IDs, refer @xhyperref{Q4,[Q4]}.
+ */
+
+/*
+ * Logical and physical endpoint ID of the audio path.
+ * This indicated afe rx port in ADM loopback use cases.
+ * In all other use cases this should be set to 0xffff
+ */
+	u16                  endpoint_id_3;
+	u16                  reserved;
+
+	u16                  dev_num_channel;
+/* Number of channels the audio COPP sends to/receives from
+ * the endpoint.
+ * Supported values: 1 to 32.
+ * The value is ignored for the voice processor Tx block,
+ * where channel
+ * configuration is derived from the topology ID.
+ */
+
+	u16                  bit_width;
+/* Bit width (in bits) that the audio COPP sends to/receives
+ * from the
+ * endpoint. The value is ignored for the voice processing
+ * Tx block,
+ * where the PCM width is 16 bits.
+ */
+
+	u32                  sample_rate;
+/* Sampling rate at which the audio COPP/voice processor
+ * Tx block
+ * interfaces with the endpoint.
+ * Supported values for voice processor Tx: 8000, 16000,
+ * 48000 Hz
+ * Supported values for audio COPP: >0 and <=192 kHz
+ */
+
+	u8                   dev_channel_mapping[32];
+/* Array of channel mapping of buffers that the audio COPP
+ * sends to the endpoint. Channel[i] mapping describes channel
+ * I inside the buffer, where 0 < i < dev_num_channel.
+ * This value is relevant only for an audio Rx COPP.
+ * For the voice processor block and Tx audio block, this field
+ * is set to zero and is ignored.
+ */
+
+	u16                  dev_num_channel_eid2;
+/* Number of channels the audio COPP sends to/receives from
+ * the endpoint.
+ * Supported values: 1 to 32.
+ * The value is ignored for the voice processor Tx block,
+ * where channel
+ * configuration is derived from the topology ID.
+ */
+
+	u16                  bit_width_eid2;
+/* Bit width (in bits) that the audio COPP sends to/receives
+ * from the
+ * endpoint. The value is ignored for the voice processing
+ * Tx block,
+ * where the PCM width is 16 bits.
+ */
+
+	u32                  sample_rate_eid2;
+/* Sampling rate at which the audio COPP/voice processor
+ * Tx block
+ * interfaces with the endpoint.
+ * Supported values for voice processor Tx: 8000, 16000,
+ * 48000 Hz
+ * Supported values for audio COPP: >0 and <=192 kHz
+ */
+
+	u8                   dev_channel_mapping_eid2[32];
+/* Array of channel mapping of buffers that the audio COPP
+ * sends to the endpoint. Channel[i] mapping describes channel
+ * I inside the buffer, where 0 < i < dev_num_channel.
+ * This value is relevant only for an audio Rx COPP.
+ * For the voice processor block and Tx audio block, this field
+ * is set to zero and is ignored.
+ */
+
+	u16                  dev_num_channel_eid3;
+/* Number of channels the audio COPP sends to/receives from
+ * the endpoint.
+ * Supported values: 1 to 32.
+ * The value is ignored for the voice processor Tx block,
+ * where channel
+ * configuration is derived from the topology ID.
+ */
+
+	u16                  bit_width_eid3;
+/* Bit width (in bits) that the audio COPP sends to/receives
+ * from the
+ * endpoint. The value is ignored for the voice processing
+ * Tx block,
+ * where the PCM width is 16 bits.
+ */
+
+	u32                  sample_rate_eid3;
+/* Sampling rate at which the audio COPP/voice processor
+ * Tx block
+ * interfaces with the endpoint.
+ * Supported values for voice processor Tx: 8000, 16000,
+ * 48000 Hz
+ * Supported values for audio COPP: >0 and <=192 kHz
+ */
+
+	u8                   dev_channel_mapping_eid3[32];
+/* Array of channel mapping of buffers that the audio COPP
+ * sends to the endpoint. Channel[i] mapping describes channel
+ * I inside the buffer, where 0 < i < dev_num_channel.
+ * This value is relevant only for an audio Rx COPP.
+ * For the voice processor block and Tx audio block, this field
+ * is set to zero and is ignored.
+ */
+} __packed;
+
+/*
+ *	This command allows the client to close a COPP and disconnect
+ *	the device session.
+ */
+#define ADM_CMD_DEVICE_CLOSE_V5                         0x00010327
+
+/* Sets one or more parameters to a COPP.
+*/
+#define ADM_CMD_SET_PP_PARAMS_V5                        0x00010328
+
+/*  Payload of the #ADM_CMD_SET_PP_PARAMS_V5 command.
+ *	If the data_payload_addr_lsw and data_payload_addr_msw element
+ *	are NULL, a series of adm_param_datastructures immediately
+ *	follows, whose total size is data_payload_size bytes.
+ */
+struct adm_cmd_set_pp_params_v5 {
+	struct apr_hdr hdr;
+	u32		payload_addr_lsw;
+	/* LSW of parameter data payload address.*/
+	u32		payload_addr_msw;
+	/* MSW of parameter data payload address.*/
+
+	u32		mem_map_handle;
+/* Memory map handle returned by ADM_CMD_SHARED_MEM_MAP_REGIONS
+ * command */
+/* If mem_map_handle is zero implies the message is in
+ * the payload */
+
+	u32		payload_size;
+/* Size in bytes of the variable payload accompanying this
+ * message or
+ * in shared memory. This is used for parsing the parameter
+ * payload.
+ */
+} __packed;
+
+/*  Payload format for COPP parameter data.
+ * Immediately following this structure are param_size bytes
+ * of parameter
+ * data.
+ */
+struct adm_param_data_v5 {
+	u32                  module_id;
+	/* Unique ID of the module. */
+	u32                  param_id;
+	/* Unique ID of the parameter. */
+	u16                  param_size;
+	/* Data size of the param_id/module_id combination.
+	This value is a
+		multiple of 4 bytes. */
+	u16                  reserved;
+	/* Reserved for future enhancements.
+	 * This field must be set to zero.
+	 */
+} __packed;
+
+
+struct param_data_v6 {
+	/* Unique ID of the module. */
+	u32		module_id;
+	/* Unique ID of the instance. */
+	u16		instance_id;
+	/* Reserved for future enhancements.
+	 * This field must be set to zero.
+	 */
+	u16		reserved;
+	/* Unique ID of the parameter. */
+	u32		param_id;
+	/* Data size of the param_id/module_id combination.
+	 * This value is a
+	 * multiple of 4 bytes.
+	 */
+	u32		param_size;
+} __packed;
+
+/* ADM_CMD_SET_MTMX_STRTR_DEV_PARAMS_V1 command is used to set
+ * calibration data to the ADSP Matrix Mixer the payload is
+ * of struct adm_cmd_set_mtmx_params_v1.
+ *
+ * ADM_CMD_GET_MTMX_STRTR_DEV_PARAMS_V1 can be used to get
+ * the calibration data from the ADSP Matrix Mixer and
+ * ADM_CMDRSP_GET_MTMX_STRTR_DEV_PARAMS_V1 is the response
+ * ioctl to ADM_CMD_GET_MTMX_STRTR_DEV_PARAMS_V1.
+ */
+#define ADM_CMD_SET_MTMX_STRTR_DEV_PARAMS_V1	0x00010367
+#define ADM_CMD_GET_MTMX_STRTR_DEV_PARAMS_V1	0x00010368
+#define ADM_CMDRSP_GET_MTMX_STRTR_DEV_PARAMS_V1	0x00010369
+
+/* Payload of the #define ADM_CMD_SET_MTMX_STRTR_DEV_PARAMS_V1 command.
+ * If the data_payload_addr_lsw and data_payload_addr_msw element
+ * are NULL, a series of struct param_data_v6 structures immediately
+ * follows, whose total size is payload_size bytes.
+ */
+struct adm_cmd_set_mtmx_params_v1 {
+	struct apr_hdr	hdr;
+	/* LSW of parameter data payload address.*/
+	u32		payload_addr_lsw;
+
+	/* MSW of parameter data payload address.*/
+	u32		payload_addr_msw;
+
+	/* Memory map handle returned by ADM_CMD_SHARED_MEM_MAP_REGIONS
+	 * command.
+	 * If mem_map_handle is zero it implies the message is in
+	 * the payload
+	 */
+	u32		mem_map_handle;
+
+	/* Size in bytes of the variable payload accompanying this
+	 * message or in shared memory. This is used for parsing
+	 * the parameter payload.
+	 */
+	u32		payload_size;
+
+	/* COPP ID/Device ID */
+	u16		copp_id;
+
+	/* For alignment, must be set to 0 */
+	u16		reserved;
+} __packed;
+
+struct enable_param_v6 {
+	/*
+	 * Specifies whether the Audio processing module is enabled.
+	 * This parameter is generic/common parameter to configure or
+	 * determine the state of any audio processing module.
+	 */
+	struct param_data_v6		param;
+
+	/* @values 0 : Disable 1: Enable */
+	uint32_t			enable;
+} __packed;
+
+/* Defined in ADSP as VOICE_MODULE_TX_STREAM_LIMITER but
+ * used for RX stream limiter on matrix input to ADM.
+ */
+#define ADM_MTMX_MODULE_STREAM_LIMITER  0x00010F15
+
+#define ASM_STREAM_CMD_REGISTER_PP_EVENTS 0x00013213
+#define ASM_STREAM_PP_EVENT 0x00013214
+#define ASM_STREAM_CMD_REGISTER_IEC_61937_FMT_UPDATE 0x13333
+#define ASM_IEC_61937_MEDIA_FMT_EVENT 0x13334
+
+#define DSP_STREAM_CMD "ADSP Stream Cmd"
+#define DSP_STREAM_CALLBACK "ADSP Stream Callback Event"
+#define DSP_STREAM_CALLBACK_QUEUE_SIZE 1024
+
+struct dsp_stream_callback_list {
+	struct list_head list;
+	struct msm_adsp_event_data event;
+};
+
+struct dsp_stream_callback_prtd {
+	uint16_t event_count;
+	struct list_head event_queue;
+	spinlock_t prtd_spin_lock;
+};
+
+/* set customized mixing on matrix mixer */
+#define ADM_CMD_SET_PSPD_MTMX_STRTR_PARAMS_V5                        0x00010344
+struct adm_cmd_set_pspd_mtmx_strtr_params_v5 {
+	struct apr_hdr hdr;
+	/* LSW of parameter data payload address.*/
+	u32		payload_addr_lsw;
+	/* MSW of parameter data payload address.*/
+	u32		payload_addr_msw;
+	/* Memory map handle returned by ADM_CMD_SHARED_MEM_MAP_REGIONS */
+	/* command. If mem_map_handle is zero implies the message is in */
+	/* the payload */
+	u32		mem_map_handle;
+	/* Size in bytes of the variable payload accompanying this */
+	/* message or in shared memory. This is used for parsing the */
+	/* parameter payload. */
+	u32		payload_size;
+	u16		direction;
+	u16		sessionid;
+	u16		deviceid;
+	u16		reserved;
+} __packed;
+
+/* Defined specifically for in-band use, includes params */
+struct adm_cmd_set_pp_params_inband_v5 {
+	struct apr_hdr hdr;
+	/* LSW of parameter data payload address.*/
+	u32             payload_addr_lsw;
+	/* MSW of parameter data payload address.*/
+	u32             payload_addr_msw;
+	/* Memory map handle returned by ADM_CMD_SHARED_MEM_MAP_REGIONS */
+	/* command. If mem_map_handle is zero implies the message is in */
+	/* the payload */
+	u32             mem_map_handle;
+	/* Size in bytes of the variable payload accompanying this */
+	/* message or in shared memory. This is used for parsing the */
+	/* parameter payload. */
+	u32             payload_size;
+	/* Parameters passed for in band payload */
+	struct adm_param_data_v5        params;
+} __packed;
+
+/* Returns the status and COPP ID to an #ADM_CMD_DEVICE_OPEN_V5 command.
+ */
+#define ADM_CMDRSP_DEVICE_OPEN_V5                      0x00010329
+
+/*  Payload of the #ADM_CMDRSP_DEVICE_OPEN_V5 message,
+ *	which returns the
+ *	status and COPP ID to an #ADM_CMD_DEVICE_OPEN_V5 command.
+ */
+struct adm_cmd_rsp_device_open_v5 {
+	u32                  status;
+	/* Status message (error code).*/
+
+	u16                  copp_id;
+	/* COPP ID:  Supported values: 0 <= copp_id < ADM_MAX_COPPS*/
+
+	u16                  reserved;
+	/* Reserved. This field must be set to zero.*/
+} __packed;
+
+/* Returns the status and COPP ID to an #ADM_CMD_DEVICE_OPEN_V6 command.
+ */
+#define ADM_CMDRSP_DEVICE_OPEN_V6                      0x00010357
+
+/* Returns the status and COPP ID to an #ADM_CMD_DEVICE_OPEN_V8 command.
+ */
+#define ADM_CMDRSP_DEVICE_OPEN_V8                      0x0001036B
+
+/*  Payload of the #ADM_CMDRSP_DEVICE_OPEN_V6 message,
+ *	which returns the
+ *	status and COPP ID to an #ADM_CMD_DEVICE_OPEN_V6 command
+ *	is the exact same as ADM_CMDRSP_DEVICE_OPEN_V5.
+ */
+
+/* This command allows a query of one COPP parameter.
+*/
+#define ADM_CMD_GET_PP_PARAMS_V5                                0x0001032A
+
+/*  Payload an #ADM_CMD_GET_PP_PARAMS_V5 command.
+*/
+struct adm_cmd_get_pp_params_v5 {
+	struct apr_hdr hdr;
+	u32                  data_payload_addr_lsw;
+	/* LSW of parameter data payload address.*/
+
+	u32                  data_payload_addr_msw;
+	/* MSW of parameter data payload address.*/
+
+	/* If the mem_map_handle is non zero,
+	 * on ACK, the ParamData payloads begin at
+	 * the address specified (out-of-band).
+	 */
+
+	u32                  mem_map_handle;
+	/* Memory map handle returned
+	 * by ADM_CMD_SHARED_MEM_MAP_REGIONS command.
+	 * If the mem_map_handle is 0, it implies that
+	 * the ACK's payload will contain the ParamData (in-band).
+	 */
+
+	u32                  module_id;
+	/* Unique ID of the module. */
+
+	u32                  param_id;
+	/* Unique ID of the parameter. */
+
+	u16                  param_max_size;
+	/* Maximum data size of the parameter
+	 *ID/module ID combination. This
+	 * field is a multiple of 4 bytes.
+	 */
+	u16                  reserved;
+	/* Reserved for future enhancements.
+	 * This field must be set to zero.
+	 */
+} __packed;
+
+/* Returns parameter values
+ *	in response to an #ADM_CMD_GET_PP_PARAMS_V5 command.
+ */
+#define ADM_CMDRSP_GET_PP_PARAMS_V5		0x0001032B
+
+/* Payload of the #ADM_CMDRSP_GET_PP_PARAMS_V5 message,
+ * which returns parameter values in response
+ * to an #ADM_CMD_GET_PP_PARAMS_V5 command.
+ * Immediately following this
+ * structure is the adm_param_data_v5
+ * structure containing the pre/postprocessing
+ * parameter data. For an in-band
+ * scenario, the variable payload depends
+ * on the size of the parameter.
+*/
+struct adm_cmd_rsp_get_pp_params_v5 {
+	u32                  status;
+	/* Status message (error code).*/
+} __packed;
+
+/* Structure for holding soft stepping volume parameters. */
+
+/*
+ * Payload of the #ASM_PARAM_ID_SOFT_VOL_STEPPING_PARAMETERS
+ * parameters used by the Volume Control module.
+ */
+
+struct audproc_softvolume_params {
+	u32 period;
+	u32 step;
+	u32 rampingcurve;
+} __packed;
+
+/*
+ * ID of the Media Format Converter (MFC) module.
+ * This module supports the following parameter IDs:
+ * #AUDPROC_PARAM_ID_MFC_OUTPUT_MEDIA_FORMAT
+ * #AUDPROC_CHMIXER_PARAM_ID_COEFF
+ */
+#define AUDPROC_MODULE_ID_MFC                               0x00010912
+
+/* ID of the Output Media Format parameters used by AUDPROC_MODULE_ID_MFC.
+ *
+ */
+#define AUDPROC_PARAM_ID_MFC_OUTPUT_MEDIA_FORMAT            0x00010913
+
+/* ID of the Channel Mixer module, which is used to configure
+ * channel-mixer related parameters.
+ * This module supports the AUDPROC_CHMIXER_PARAM_ID_COEFF parameter ID.
+ */
+#define AUDPROC_MODULE_ID_CHMIXER                           0x00010341
+
+/* ID of the Coefficient parameter used by AUDPROC_MODULE_ID_CHMIXER to
+ *configure the channel mixer weighting coefficients.
+ */
+#define AUDPROC_CHMIXER_PARAM_ID_COEFF                      0x00010342
+
+/* Payload of the per-session, per-device parameter data of the
+ *   #ADM_CMD_SET_PSPD_MTMX_STRTR_PARAMS_V5 command or
+ *   #ADM_CMD_SET_PSPD_MTMX_STRTR_PARAMS_V6 command.
+ * Immediately following this structure are param_size bytes of parameter
+ *   data. The structure and size depend on the module_id/param_id pair.
+ */
+struct adm_pspd_param_data_t {
+	uint32_t module_id;
+	uint32_t param_id;
+	uint16_t param_size;
+	uint16_t reserved;
+} __packed;
+
+struct audproc_mfc_output_media_fmt {
+	struct adm_cmd_set_pp_params_v5 params;
+	struct adm_param_data_v5 data;
+	uint32_t sampling_rate;
+	uint16_t bits_per_sample;
+	uint16_t num_channels;
+	uint16_t channel_type[8];
+} __packed;
+
+struct audproc_volume_ctrl_master_gain {
+	struct adm_cmd_set_pp_params_v5 params;
+	struct adm_param_data_v5 data;
+	/* Linear gain in Q13 format. */
+	uint16_t                  master_gain;
+	/* Clients must set this field to zero. */
+	uint16_t                  reserved;
+} __packed;
+
+struct audproc_soft_step_volume_params {
+	struct adm_cmd_set_pp_params_v5 params;
+	struct adm_param_data_v5 data;
+/*
+ * Period in milliseconds.
+ * Supported values: 0 to 15000
+ */
+	uint32_t                  period;
+/*
+ * Step in microseconds.
+ * Supported values: 0 to 15000000
+ */
+	uint32_t                  step;
+/*
+ * Ramping curve type.
+ * Supported values:
+ * - #AUDPROC_PARAM_SVC_RAMPINGCURVE_LINEAR
+ * - #AUDPROC_PARAM_SVC_RAMPINGCURVE_EXP
+ * - #AUDPROC_PARAM_SVC_RAMPINGCURVE_LOG
+ */
+	uint32_t                  ramping_curve;
+} __packed;
+
+struct audproc_enable_param_t {
+	struct adm_cmd_set_pp_params_inband_v5 pp_params;
+	/*
+	 * Specifies whether the Audio processing module is enabled.
+	 * This parameter is generic/common parameter to configure or
+	 * determine the state of any audio processing module.
+
+	 * @values 0 : Disable 1: Enable
+	 */
+	uint32_t                  enable;
+};
+
+/*
+ * Allows a client to control the gains on various session-to-COPP paths.
+ */
+#define ADM_CMD_MATRIX_RAMP_GAINS_V5                                 0x0001032C
+
+/*
+ * Allows a client to control the gains on various session-to-COPP paths.
+ * Maximum support 32 channels
+ */
+#define ADM_CMD_MATRIX_RAMP_GAINS_V7                                 0x0001036C
+
+/* Indicates that the target gain in the
+ *	current adm_session_copp_gain_v5
+ *	structure is to be applied to all
+ *	the session-to-COPP paths that exist for
+ *	the specified session.
+ */
+#define ADM_CMD_MATRIX_RAMP_GAINS_COPP_ID_ALL_CONNECTED_COPPS     0xFFFF
+
+/* Indicates that the target gain is
+ * to be immediately applied to the
+ * specified session-to-COPP path,
+ * without a ramping fashion.
+ */
+#define ADM_CMD_MATRIX_RAMP_GAINS_RAMP_DURATION_IMMEDIATE         0x0000
+
+/* Enumeration for a linear ramping curve.*/
+#define ADM_CMD_MATRIX_RAMP_GAINS_RAMP_CURVE_LINEAR               0x0000
+
+/*  Payload of the #ADM_CMD_MATRIX_RAMP_GAINS_V5 command.
+ * Immediately following this structure are num_gains of the
+ * adm_session_copp_gain_v5structure.
+ */
+struct adm_cmd_matrix_ramp_gains_v5 {
+	u32                  matrix_id;
+/* Specifies whether the matrix ID is Audio Rx (0) or Audio Tx (1).
+ * Use the ADM_MATRIX_ID_AUDIO_RX or  ADM_MATRIX_ID_AUDIOX
+ * macros to set this field.
+*/
+
+	u16                  num_gains;
+	/* Number of gains being applied. */
+
+	u16                  reserved_for_align;
+	/* Reserved. This field must be set to zero.*/
+} __packed;
+
+/*  Session-to-COPP path gain structure, used by the
+ *	#ADM_CMD_MATRIX_RAMP_GAINS_V5 command.
+ *	This structure specifies the target
+ *	gain (per channel) that must be applied
+ *	to a particular session-to-COPP path in
+ *	the audio matrix. The structure can
+ *	also be used to apply the gain globally
+ *	to all session-to-COPP paths that
+ *	exist for the given session.
+ *	The aDSP uses device channel mapping to
+ *	determine which channel gains to
+ *	use from this command. For example,
+ *	if the device is configured as stereo,
+ *	the aDSP uses only target_gain_ch_1 and
+ *	target_gain_ch_2, and it ignores
+ *	the others.
+ */
+struct adm_session_copp_gain_v5 {
+	u16                  session_id;
+/* Handle of the ASM session.
+ *	Supported values: 1 to 8.
+ */
+
+	u16                  copp_id;
+/* Handle of the COPP. Gain will be applied on the Session ID
+ * COPP ID path.
+ */
+
+	u16                  ramp_duration;
+/* Duration (in milliseconds) of the ramp over
+ * which target gains are
+ * to be applied. Use
+ * #ADM_CMD_MATRIX_RAMP_GAINS_RAMP_DURATION_IMMEDIATE
+ * to indicate that gain must be applied immediately.
+ */
+
+	u16                  step_duration;
+/* Duration (in milliseconds) of each step in the ramp.
+ * This parameter is ignored if ramp_duration is equal to
+ * #ADM_CMD_MATRIX_RAMP_GAINS_RAMP_DURATION_IMMEDIATE.
+ * Supported value: 1
+ */
+
+	u16                  ramp_curve;
+/* Type of ramping curve.
+ * Supported value: #ADM_CMD_MATRIX_RAMP_GAINS_RAMP_CURVE_LINEAR
+ */
+
+	u16                  reserved_for_align;
+	/* Reserved. This field must be set to zero. */
+
+	u16                  target_gain_ch_1;
+	/* Target linear gain for channel 1 in Q13 format; */
+
+	u16                  target_gain_ch_2;
+	/* Target linear gain for channel 2 in Q13 format; */
+
+	u16                  target_gain_ch_3;
+	/* Target linear gain for channel 3 in Q13 format; */
+
+	u16                  target_gain_ch_4;
+	/* Target linear gain for channel 4 in Q13 format; */
+
+	u16                  target_gain_ch_5;
+	/* Target linear gain for channel 5 in Q13 format; */
+
+	u16                  target_gain_ch_6;
+	/* Target linear gain for channel 6 in Q13 format; */
+
+	u16                  target_gain_ch_7;
+	/* Target linear gain for channel 7 in Q13 format; */
+
+	u16                  target_gain_ch_8;
+	/* Target linear gain for channel 8 in Q13 format; */
+} __packed;
+
+/*  Payload of the #ADM_CMD_MATRIX_RAMP_GAINS_V7 command.
+ * Immediately following this structure are num_gains of the
+ * adm_session_copp_gain_v5structure.
+ */
+struct adm_cmd_matrix_ramp_gains_v7 {
+	struct apr_hdr       hdr;
+	u32                  matrix_id;
+/* Specifies whether the matrix ID is Audio Rx (0) or Audio Tx (1).
+ * Use the ADM_MATRIX_ID_AUDIO_RX or  ADM_MATRIX_ID_AUDIOX
+ * macros to set this field.
+*/
+
+	u16                  num_gains;
+	/* Number of gains being applied. */
+
+	u16                  reserved_for_align;
+	/* Reserved. This field must be set to zero.*/
+} __packed;
+
+/*  Session-to-COPP path gain structure, used by the
+ *	#ADM_CMD_MATRIX_RAMP_GAINS_V7 command.
+ *	This structure specifies the target
+ *	gain (per channel) that must be applied
+ *	to a particular session-to-COPP path in
+ *	the audio matrix. The structure can
+ *	also be used to apply the gain globally
+ *	to all session-to-COPP paths that
+ *	exist for the given session.
+ *	The aDSP uses device channel mapping to
+ *	determine which channel gains to
+ *	use from this command. For example,
+ *	if the device is configured as stereo,
+ *	the aDSP uses only target_gain_ch_1 and
+ *	target_gain_ch_2, and it ignores
+ *	the others.
+ */
+struct adm_session_copp_gain_v7 {
+	u16                  session_id;
+/* Handle of the ASM session.
+ *	Supported values: 1 to 8.
+ */
+
+	u16                  copp_id;
+/* Handle of the COPP. Gain will be applied on the Session ID
+ * COPP ID path.
+ */
+
+	u16                  ramp_duration;
+/* Duration (in milliseconds) of the ramp over
+ * which target gains are
+ * to be applied. Use
+ * #ADM_CMD_MATRIX_RAMP_GAINS_RAMP_DURATION_IMMEDIATE
+ * to indicate that gain must be applied immediately.
+ */
+
+	u16                  step_duration;
+/* Duration (in milliseconds) of each step in the ramp.
+ * This parameter is ignored if ramp_duration is equal to
+ * #ADM_CMD_MATRIX_RAMP_GAINS_RAMP_DURATION_IMMEDIATE.
+ * Supported value: 1
+ */
+
+	u16                  ramp_curve;
+/* Type of ramping curve.
+ * Supported value: #ADM_CMD_MATRIX_RAMP_GAINS_RAMP_CURVE_LINEAR
+ */
+
+	u16                  stream_type;
+/* Type of stream_type.
+ * Supported value: #STREAM_TYPE_ASM STREAM_TYPE_LSM
+ */
+	u16                  num_channels;
+/* Number of channels on which gain needs to be applied.
+ * Supported value: 1 to 32.
+ */
+	u16                  reserved_for_align;
+	/* Reserved. This field must be set to zero. */
+} __packed;
+
+/* Allows to set mute/unmute on various session-to-COPP paths.
+ *	For every session-to-COPP path (stream-device interconnection),
+ *	mute/unmute can be set individually on the output channels.
+ */
+#define ADM_CMD_MATRIX_MUTE_V5                                0x0001032D
+
+/* Allows to set mute/unmute on various session-to-COPP paths.
+ *	For every session-to-COPP path (stream-device interconnection),
+ *	mute/unmute can be set individually on the output channels.
+ */
+#define ADM_CMD_MATRIX_MUTE_V7                                0x0001036D
+
+/* Indicates that mute/unmute in the
+ *	current adm_session_copp_mute_v5structure
+ *	is to be applied to all the session-to-COPP
+ *	paths that exist for the specified session.
+ */
+#define ADM_CMD_MATRIX_MUTE_COPP_ID_ALL_CONNECTED_COPPS     0xFFFF
+
+/*  Payload of the #ADM_CMD_MATRIX_MUTE_V5 command*/
+struct adm_cmd_matrix_mute_v5 {
+	u32                  matrix_id;
+/* Specifies whether the matrix ID is Audio Rx (0) or Audio Tx (1).
+ * Use the ADM_MATRIX_ID_AUDIO_RX or  ADM_MATRIX_ID_AUDIOX
+ * macros to set this field.
+ */
+
+	u16                  session_id;
+/* Handle of the ASM session.
+ * Supported values: 1 to 8.
+ */
+
+	u16                  copp_id;
+/* Handle of the COPP.
+ * Use ADM_CMD_MATRIX_MUTE_COPP_ID_ALL_CONNECTED_COPPS
+ * to indicate that mute/unmute must be applied to
+ * all the COPPs connected to session_id.
+ * Supported values:
+ * - 0xFFFF -- Apply mute/unmute to all connected COPPs
+ * - Other values -- Valid COPP ID
+ */
+
+	u8                  mute_flag_ch_1;
+	/* Mute flag for channel 1 is set to unmute (0) or mute (1). */
+
+	u8                  mute_flag_ch_2;
+	/* Mute flag for channel 2 is set to unmute (0) or mute (1). */
+
+	u8                  mute_flag_ch_3;
+	/* Mute flag for channel 3 is set to unmute (0) or mute (1). */
+
+	u8                  mute_flag_ch_4;
+	/* Mute flag for channel 4 is set to unmute (0) or mute (1). */
+
+	u8                  mute_flag_ch_5;
+	/* Mute flag for channel 5 is set to unmute (0) or mute (1). */
+
+	u8                  mute_flag_ch_6;
+	/* Mute flag for channel 6 is set to unmute (0) or mute (1). */
+
+	u8                  mute_flag_ch_7;
+	/* Mute flag for channel 7 is set to unmute (0) or mute (1). */
+
+	u8                  mute_flag_ch_8;
+	/* Mute flag for channel 8 is set to unmute (0) or mute (1). */
+
+	u16                 ramp_duration;
+/* Period (in milliseconds) over which the soft mute/unmute will be
+ * applied.
+ * Supported values: 0 (Default) to 0xFFFF
+ * The default of 0 means mute/unmute will be applied immediately.
+ */
+
+	u16                 reserved_for_align;
+	/* Clients must set this field to zero.*/
+} __packed;
+
+/*  Payload of the #ADM_CMD_MATRIX_MUTE_V7 command*/
+struct adm_cmd_matrix_mute_v7 {
+	struct apr_hdr       hdr;
+	u32                  matrix_id;
+/* Specifies whether the matrix ID is Audio Rx (0) or Audio Tx (1).
+ * Use the ADM_MATRIX_ID_AUDIO_RX or  ADM_MATRIX_ID_AUDIOX
+ * macros to set this field.
+ */
+
+	u16                  session_id;
+/* Handle of the ASM session.
+ * Supported values: 1 to .
+ */
+
+	u16                  copp_id;
+/* Handle of the COPP.
+ * Use ADM_CMD_MATRIX_MUTE_COPP_ID_ALL_CONNECTED_COPPS
+ * to indicate that mute/unmute must be applied to
+ * all the COPPs connected to session_id.
+ * Supported values:
+ * - 0xFFFF -- Apply mute/unmute to all connected COPPs
+ * - Other values -- Valid COPP ID
+ */
+
+	u16                  ramp_duration;
+/* Duration (in milliseconds) of the ramp over
+ * which target gains are
+ * to be applied. Use
+ * #ADM_CMD_MATRIX_RAMP_GAINS_RAMP_DURATION_IMMEDIATE
+ * to indicate that gain must be applied immediately.
+ */
+
+	u16                  stream_type;
+/* Specify whether the stream type is connectedon the ASM or LSM
+ * Supported value: 1
+ */
+	u16                  num_channels;
+/* Number of channels on which gain needs to be applied
+ * Supported value: 1 to 32
+ */
+} __packed;
+
+#define ASM_PARAM_ID_AAC_STEREO_MIX_COEFF_SELECTION_FLAG_V2 (0x00010DD8)
+
+struct asm_aac_stereo_mix_coeff_selection_param_v2 {
+	struct apr_hdr          hdr;
+	u32                     param_id;
+	u32                     param_size;
+	u32                     aac_stereo_mix_coeff_flag;
+} __packed;
+
+/* Allows a client to connect the desired stream to
+ * the desired AFE port through the stream router
+ *
+ * This command allows the client to connect specified session to
+ * specified AFE port. This is used for compressed streams only
+ * opened using the #ASM_STREAM_CMD_OPEN_WRITE_COMPRESSED or
+ * #ASM_STREAM_CMD_OPEN_READ_COMPRESSED command.
+ *
+ * @prerequisites
+ * Session ID and AFE Port ID must be valid.
+ * #ASM_STREAM_CMD_OPEN_WRITE_COMPRESSED or
+ * #ASM_STREAM_CMD_OPEN_READ_COMPRESSED
+ * must have been called on this session.
+ */
+
+#define ADM_CMD_CONNECT_AFE_PORT_V5	0x0001032E
+#define ADM_CMD_DISCONNECT_AFE_PORT_V5	0x0001032F
+/* Enumeration for the Rx stream router ID.*/
+#define ADM_STRTR_ID_RX                    0
+/* Enumeration for the Tx stream router ID.*/
+#define ADM_STRTR_IDX                    1
+
+/*  Payload of the #ADM_CMD_CONNECT_AFE_PORT_V5 command.*/
+struct adm_cmd_connect_afe_port_v5 {
+	struct apr_hdr     hdr;
+	u8                  mode;
+/* ID of the stream router (RX/TX). Use the
+ * ADM_STRTR_ID_RX or ADM_STRTR_IDX macros
+ * to set this field.
+ */
+
+	u8                  session_id;
+	/* Session ID of the stream to connect */
+
+	u16                 afe_port_id;
+	/* Port ID of the AFE port to connect to.*/
+	u32                 num_channels;
+/* Number of device channels
+ * Supported values: 2(Audio Sample Packet),
+ * 8 (HBR Audio Stream Sample Packet)
+ */
+
+	u32                 sampling_rate;
+/* Device sampling rate
+* Supported values: Any
+*/
+} __packed;
+
+
+/* adsp_adm_api.h */
+
+
+/* Port ID. Update afe_get_port_index
+ *	when a new port is added here. */
+#define PRIMARY_I2S_RX 0
+#define PRIMARY_I2S_TX 1
+#define SECONDARY_I2S_RX 4
+#define SECONDARY_I2S_TX 5
+#define MI2S_RX 6
+#define MI2S_TX 7
+#define HDMI_RX 8
+#define RSVD_2 9
+#define RSVD_3 10
+#define DIGI_MIC_TX 11
+#define VOICE2_PLAYBACK_TX 0x8002
+#define VOICE_RECORD_RX 0x8003
+#define VOICE_RECORD_TX 0x8004
+#define VOICE_PLAYBACK_TX 0x8005
+
+/* Slimbus Multi channel port id pool  */
+#define SLIMBUS_0_RX		0x4000
+#define SLIMBUS_0_TX		0x4001
+#define SLIMBUS_1_RX		0x4002
+#define SLIMBUS_1_TX		0x4003
+#define SLIMBUS_2_RX		0x4004
+#define SLIMBUS_2_TX		0x4005
+#define SLIMBUS_3_RX		0x4006
+#define SLIMBUS_3_TX		0x4007
+#define SLIMBUS_4_RX		0x4008
+#define SLIMBUS_4_TX		0x4009
+#define SLIMBUS_5_RX		0x400a
+#define SLIMBUS_5_TX		0x400b
+#define SLIMBUS_6_RX		0x400c
+#define SLIMBUS_6_TX		0x400d
+#define SLIMBUS_7_RX		0x400e
+#define SLIMBUS_7_TX		0x400f
+#define SLIMBUS_8_RX		0x4010
+#define SLIMBUS_8_TX		0x4011
+#define SLIMBUS_PORT_LAST	SLIMBUS_8_TX
+#define INT_BT_SCO_RX 0x3000
+#define INT_BT_SCO_TX 0x3001
+#define INT_BT_A2DP_RX 0x3002
+#define INT_FM_RX 0x3004
+#define INT_FM_TX 0x3005
+#define RT_PROXY_PORT_001_RX	0x2000
+#define RT_PROXY_PORT_001_TX	0x2001
+#define DISPLAY_PORT_RX	0x6020
+
+#define AFE_PORT_INVALID 0xFFFF
+#define SLIMBUS_INVALID AFE_PORT_INVALID
+
+#define AFE_PORT_CMD_START 0x000100ca
+
+#define AFE_EVENT_RTPORT_START 0
+#define AFE_EVENT_RTPORT_STOP 1
+#define AFE_EVENT_RTPORT_LOW_WM 2
+#define AFE_EVENT_RTPORT_HI_WM 3
+
+#define ADSP_AFE_VERSION    0x00200000
+
+/* Size of the range of port IDs for the audio interface. */
+#define  AFE_PORT_ID_AUDIO_IF_PORT_RANGE_SIZE	0xF
+
+/* Size of the range of port IDs for internal BT-FM ports. */
+#define AFE_PORT_ID_INTERNAL_BT_FM_RANGE_SIZE	0x6
+
+/* Size of the range of port IDs for SLIMbus<sup>&reg;
+ * </sup> multichannel
+ * ports.
+ */
+#define AFE_PORT_ID_SLIMBUS_RANGE_SIZE	0xA
+
+/* Size of the range of port IDs for real-time proxy ports. */
+#define  AFE_PORT_ID_RT_PROXY_PORT_RANGE_SIZE	0x2
+
+/* Size of the range of port IDs for pseudoports. */
+#define AFE_PORT_ID_PSEUDOPORT_RANGE_SIZE	0x5
+
+/* Start of the range of port IDs for the audio interface. */
+#define  AFE_PORT_ID_AUDIO_IF_PORT_RANGE_START	0x1000
+
+/* End of the range of port IDs for the audio interface. */
+#define  AFE_PORT_ID_AUDIO_IF_PORT_RANGE_END \
+	(AFE_PORT_ID_AUDIO_IF_PORT_RANGE_START +\
+	AFE_PORT_ID_AUDIO_IF_PORT_RANGE_SIZE - 1)
+
+/* Start of the range of port IDs for real-time proxy ports. */
+#define  AFE_PORT_ID_RT_PROXY_PORT_RANGE_START	0x2000
+
+/* End of the range of port IDs for real-time proxy ports. */
+#define  AFE_PORT_ID_RT_PROXY_PORT_RANGE_END \
+	(AFE_PORT_ID_RT_PROXY_PORT_RANGE_START +\
+	AFE_PORT_ID_RT_PROXY_PORT_RANGE_SIZE-1)
+
+/* Start of the range of port IDs for internal BT-FM devices. */
+#define AFE_PORT_ID_INTERNAL_BT_FM_RANGE_START	0x3000
+
+/* End of the range of port IDs for internal BT-FM devices. */
+#define AFE_PORT_ID_INTERNAL_BT_FM_RANGE_END \
+	(AFE_PORT_ID_INTERNAL_BT_FM_RANGE_START +\
+	AFE_PORT_ID_INTERNAL_BT_FM_RANGE_SIZE-1)
+
+/*	Start of the range of port IDs for SLIMbus devices. */
+#define AFE_PORT_ID_SLIMBUS_RANGE_START	0x4000
+
+/*	End of the range of port IDs for SLIMbus devices. */
+#define AFE_PORT_ID_SLIMBUS_RANGE_END \
+	(AFE_PORT_ID_SLIMBUS_RANGE_START +\
+	AFE_PORT_ID_SLIMBUS_RANGE_SIZE-1)
+
+/* Start of the range of port IDs for pseudoports. */
+#define AFE_PORT_ID_PSEUDOPORT_RANGE_START	0x8001
+
+/* End of the range of port IDs for pseudoports.  */
+#define AFE_PORT_ID_PSEUDOPORT_RANGE_END \
+	(AFE_PORT_ID_PSEUDOPORT_RANGE_START +\
+	AFE_PORT_ID_PSEUDOPORT_RANGE_SIZE-1)
+
+/* Start of the range of port IDs for TDM devices. */
+#define AFE_PORT_ID_TDM_PORT_RANGE_START	0x9000
+
+/* End of the range of port IDs for TDM devices. */
+#define AFE_PORT_ID_TDM_PORT_RANGE_END \
+	(AFE_PORT_ID_TDM_PORT_RANGE_START+0x40-1)
+
+/* Size of the range of port IDs for TDM ports. */
+#define AFE_PORT_ID_TDM_PORT_RANGE_SIZE \
+	(AFE_PORT_ID_TDM_PORT_RANGE_END - \
+	AFE_PORT_ID_TDM_PORT_RANGE_START+1)
+
+#define AFE_PORT_ID_PRIMARY_MI2S_RX         0x1000
+#define AFE_PORT_ID_PRIMARY_MI2S_TX         0x1001
+#define AFE_PORT_ID_SECONDARY_MI2S_RX       0x1002
+#define AFE_PORT_ID_SECONDARY_MI2S_TX       0x1003
+#define AFE_PORT_ID_TERTIARY_MI2S_RX        0x1004
+#define AFE_PORT_ID_TERTIARY_MI2S_TX        0x1005
+#define AFE_PORT_ID_QUATERNARY_MI2S_RX      0x1006
+#define AFE_PORT_ID_QUATERNARY_MI2S_TX      0x1007
+#define AUDIO_PORT_ID_I2S_RX                0x1008
+#define AFE_PORT_ID_DIGITAL_MIC_TX          0x1009
+#define AFE_PORT_ID_PRIMARY_PCM_RX          0x100A
+#define AFE_PORT_ID_PRIMARY_PCM_TX          0x100B
+#define AFE_PORT_ID_SECONDARY_PCM_RX        0x100C
+#define AFE_PORT_ID_SECONDARY_PCM_TX        0x100D
+#define AFE_PORT_ID_MULTICHAN_HDMI_RX       0x100E
+#define AFE_PORT_ID_SECONDARY_MI2S_RX_SD1   0x1010
+#define AFE_PORT_ID_TERTIARY_PCM_RX         0x1012
+#define AFE_PORT_ID_TERTIARY_PCM_TX         0x1013
+#define AFE_PORT_ID_QUATERNARY_PCM_RX       0x1014
+#define AFE_PORT_ID_QUATERNARY_PCM_TX       0x1015
+#define AFE_PORT_ID_QUINARY_MI2S_RX         0x1016
+#define AFE_PORT_ID_QUINARY_MI2S_TX         0x1017
+/* ID of the senary MI2S Rx port. */
+#define AFE_PORT_ID_SENARY_MI2S_RX          0x1018
+/* ID of the senary MI2S Tx port. */
+#define AFE_PORT_ID_SENARY_MI2S_TX          0x1019
+/* ID of the Internal 0 MI2S Rx port */
+#define AFE_PORT_ID_INT0_MI2S_RX                 0x102E
+/* ID of the Internal 0 MI2S Tx port */
+#define AFE_PORT_ID_INT0_MI2S_TX                 0x102F
+/* ID of the Internal 1 MI2S Rx port */
+#define AFE_PORT_ID_INT1_MI2S_RX                 0x1030
+/* ID of the Internal 1 MI2S Tx port */
+#define AFE_PORT_ID_INT1_MI2S_TX                 0x1031
+/* ID of the Internal 2 MI2S Rx port */
+#define AFE_PORT_ID_INT2_MI2S_RX                 0x1032
+/* ID of the Internal 2 MI2S Tx port */
+#define AFE_PORT_ID_INT2_MI2S_TX                 0x1033
+/* ID of the Internal 3 MI2S Rx port */
+#define AFE_PORT_ID_INT3_MI2S_RX                 0x1034
+/* ID of the Internal 3 MI2S Tx port */
+#define AFE_PORT_ID_INT3_MI2S_TX                 0x1035
+/* ID of the Internal 4 MI2S Rx port */
+#define AFE_PORT_ID_INT4_MI2S_RX                 0x1036
+/* ID of the Internal 4 MI2S Tx port */
+#define AFE_PORT_ID_INT4_MI2S_TX                 0x1037
+/* ID of the Internal 5 MI2S Rx port */
+#define AFE_PORT_ID_INT5_MI2S_RX                 0x1038
+/* ID of the Internal 5 MI2S Tx port */
+#define AFE_PORT_ID_INT5_MI2S_TX                 0x1039
+/* ID of the Internal 6 MI2S Rx port */
+#define AFE_PORT_ID_INT6_MI2S_RX                 0x103A
+/* ID of the Internal 6 MI2S Tx port */
+#define AFE_PORT_ID_INT6_MI2S_TX                 0x103B
+#define AFE_PORT_ID_SPDIF_RX                0x5000
+#define  AFE_PORT_ID_RT_PROXY_PORT_001_RX   0x2000
+#define  AFE_PORT_ID_RT_PROXY_PORT_001_TX   0x2001
+#define AFE_PORT_ID_INTERNAL_BT_SCO_RX      0x3000
+#define AFE_PORT_ID_INTERNAL_BT_SCO_TX      0x3001
+#define AFE_PORT_ID_INTERNAL_BT_A2DP_RX     0x3002
+#define AFE_PORT_ID_INTERNAL_FM_RX          0x3004
+#define AFE_PORT_ID_INTERNAL_FM_TX          0x3005
+/* SLIMbus Rx port on channel 0. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_0_RX      0x4000
+/* SLIMbus Tx port on channel 0. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_0_TX      0x4001
+/* SLIMbus Rx port on channel 1. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_RX      0x4002
+/* SLIMbus Tx port on channel 1. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_TX      0x4003
+/* SLIMbus Rx port on channel 2. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_2_RX      0x4004
+/* SLIMbus Tx port on channel 2. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_2_TX      0x4005
+/* SLIMbus Rx port on channel 3. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_3_RX      0x4006
+/* SLIMbus Tx port on channel 3. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_3_TX      0x4007
+/* SLIMbus Rx port on channel 4. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_4_RX      0x4008
+/* SLIMbus Tx port on channel 4. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_4_TX      0x4009
+/* SLIMbus Rx port on channel 5. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_5_RX      0x400a
+/* SLIMbus Tx port on channel 5. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_5_TX      0x400b
+/* SLIMbus Rx port on channel 6. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_6_RX      0x400c
+/* SLIMbus Tx port on channel 6. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_6_TX      0x400d
+/* SLIMbus Rx port on channel 7. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_7_RX      0x400e
+/* SLIMbus Tx port on channel 7. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_7_TX      0x400f
+/* SLIMbus Rx port on channel 8. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_8_RX      0x4010
+/* SLIMbus Tx port on channel 8. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_8_TX      0x4011
+/* AFE Rx port for audio over Display port */
+#define AFE_PORT_ID_HDMI_OVER_DP_RX              0x6020
+/*USB AFE port */
+#define AFE_PORT_ID_USB_RX                       0x7000
+#define AFE_PORT_ID_USB_TX                       0x7001
+
+/* Generic pseudoport 1. */
+#define AFE_PORT_ID_PSEUDOPORT_01      0x8001
+/* Generic pseudoport 2. */
+#define AFE_PORT_ID_PSEUDOPORT_02      0x8002
+
+/* @xreflabel{hdr:AfePortIdPrimaryAuxPcmTx}
+	Primary Aux PCM Tx port ID.
+*/
+#define AFE_PORT_ID_PRIMARY_PCM_TX      0x100B
+/* Pseudoport that corresponds to the voice Rx path.
+ * For recording, the voice Rx path samples are written to this
+ * port and consumed by the audio path.
+ */
+
+#define AFE_PORT_ID_VOICE_RECORD_RX	0x8003
+
+/* Pseudoport that corresponds to the voice Tx path.
+ * For recording, the voice Tx path samples are written to this
+ * port and consumed by the audio path.
+ */
+
+#define AFE_PORT_ID_VOICE_RECORD_TX	0x8004
+/* Pseudoport that corresponds to in-call voice delivery samples.
+ * During in-call audio delivery, the audio path delivers samples
+ * to this port from where the voice path delivers them on the
+ * Rx path.
+ */
+#define AFE_PORT_ID_VOICE2_PLAYBACK_TX  0x8002
+#define AFE_PORT_ID_VOICE_PLAYBACK_TX   0x8005
+
+#define AFE_PORT_ID_PRIMARY_TDM_RX \
+	(AFE_PORT_ID_TDM_PORT_RANGE_START + 0x00)
+#define AFE_PORT_ID_PRIMARY_TDM_RX_1 \
+	(AFE_PORT_ID_PRIMARY_TDM_RX + 0x02)
+#define AFE_PORT_ID_PRIMARY_TDM_RX_2 \
+	(AFE_PORT_ID_PRIMARY_TDM_RX + 0x04)
+#define AFE_PORT_ID_PRIMARY_TDM_RX_3 \
+	(AFE_PORT_ID_PRIMARY_TDM_RX + 0x06)
+#define AFE_PORT_ID_PRIMARY_TDM_RX_4 \
+	(AFE_PORT_ID_PRIMARY_TDM_RX + 0x08)
+#define AFE_PORT_ID_PRIMARY_TDM_RX_5 \
+	(AFE_PORT_ID_PRIMARY_TDM_RX + 0x0A)
+#define AFE_PORT_ID_PRIMARY_TDM_RX_6 \
+	(AFE_PORT_ID_PRIMARY_TDM_RX + 0x0C)
+#define AFE_PORT_ID_PRIMARY_TDM_RX_7 \
+	(AFE_PORT_ID_PRIMARY_TDM_RX + 0x0E)
+
+#define AFE_PORT_ID_PRIMARY_TDM_TX \
+	(AFE_PORT_ID_TDM_PORT_RANGE_START + 0x01)
+#define AFE_PORT_ID_PRIMARY_TDM_TX_1 \
+	(AFE_PORT_ID_PRIMARY_TDM_TX + 0x02)
+#define AFE_PORT_ID_PRIMARY_TDM_TX_2 \
+	(AFE_PORT_ID_PRIMARY_TDM_TX + 0x04)
+#define AFE_PORT_ID_PRIMARY_TDM_TX_3 \
+	(AFE_PORT_ID_PRIMARY_TDM_TX + 0x06)
+#define AFE_PORT_ID_PRIMARY_TDM_TX_4 \
+	(AFE_PORT_ID_PRIMARY_TDM_TX + 0x08)
+#define AFE_PORT_ID_PRIMARY_TDM_TX_5 \
+	(AFE_PORT_ID_PRIMARY_TDM_TX + 0x0A)
+#define AFE_PORT_ID_PRIMARY_TDM_TX_6 \
+	(AFE_PORT_ID_PRIMARY_TDM_TX + 0x0C)
+#define AFE_PORT_ID_PRIMARY_TDM_TX_7 \
+	(AFE_PORT_ID_PRIMARY_TDM_TX + 0x0E)
+
+#define AFE_PORT_ID_SECONDARY_TDM_RX \
+	(AFE_PORT_ID_TDM_PORT_RANGE_START + 0x10)
+#define AFE_PORT_ID_SECONDARY_TDM_RX_1 \
+	(AFE_PORT_ID_SECONDARY_TDM_RX + 0x02)
+#define AFE_PORT_ID_SECONDARY_TDM_RX_2 \
+	(AFE_PORT_ID_SECONDARY_TDM_RX + 0x04)
+#define AFE_PORT_ID_SECONDARY_TDM_RX_3 \
+	(AFE_PORT_ID_SECONDARY_TDM_RX + 0x06)
+#define AFE_PORT_ID_SECONDARY_TDM_RX_4 \
+	(AFE_PORT_ID_SECONDARY_TDM_RX + 0x08)
+#define AFE_PORT_ID_SECONDARY_TDM_RX_5 \
+	(AFE_PORT_ID_SECONDARY_TDM_RX + 0x0A)
+#define AFE_PORT_ID_SECONDARY_TDM_RX_6 \
+	(AFE_PORT_ID_SECONDARY_TDM_RX + 0x0C)
+#define AFE_PORT_ID_SECONDARY_TDM_RX_7 \
+	(AFE_PORT_ID_SECONDARY_TDM_RX + 0x0E)
+
+#define AFE_PORT_ID_SECONDARY_TDM_TX \
+	(AFE_PORT_ID_TDM_PORT_RANGE_START + 0x11)
+#define AFE_PORT_ID_SECONDARY_TDM_TX_1 \
+	(AFE_PORT_ID_SECONDARY_TDM_TX + 0x02)
+#define AFE_PORT_ID_SECONDARY_TDM_TX_2 \
+	(AFE_PORT_ID_SECONDARY_TDM_TX + 0x04)
+#define AFE_PORT_ID_SECONDARY_TDM_TX_3 \
+	(AFE_PORT_ID_SECONDARY_TDM_TX + 0x06)
+#define AFE_PORT_ID_SECONDARY_TDM_TX_4 \
+	(AFE_PORT_ID_SECONDARY_TDM_TX + 0x08)
+#define AFE_PORT_ID_SECONDARY_TDM_TX_5 \
+	(AFE_PORT_ID_SECONDARY_TDM_TX + 0x0A)
+#define AFE_PORT_ID_SECONDARY_TDM_TX_6 \
+	(AFE_PORT_ID_SECONDARY_TDM_TX + 0x0C)
+#define AFE_PORT_ID_SECONDARY_TDM_TX_7 \
+	(AFE_PORT_ID_SECONDARY_TDM_TX + 0x0E)
+
+#define AFE_PORT_ID_TERTIARY_TDM_RX \
+	(AFE_PORT_ID_TDM_PORT_RANGE_START + 0x20)
+#define AFE_PORT_ID_TERTIARY_TDM_RX_1 \
+	(AFE_PORT_ID_TERTIARY_TDM_RX + 0x02)
+#define AFE_PORT_ID_TERTIARY_TDM_RX_2 \
+	(AFE_PORT_ID_TERTIARY_TDM_RX + 0x04)
+#define AFE_PORT_ID_TERTIARY_TDM_RX_3 \
+	(AFE_PORT_ID_TERTIARY_TDM_RX + 0x06)
+#define AFE_PORT_ID_TERTIARY_TDM_RX_4 \
+	(AFE_PORT_ID_TERTIARY_TDM_RX + 0x08)
+#define AFE_PORT_ID_TERTIARY_TDM_RX_5 \
+	(AFE_PORT_ID_TERTIARY_TDM_RX + 0x0A)
+#define AFE_PORT_ID_TERTIARY_TDM_RX_6 \
+	(AFE_PORT_ID_TERTIARY_TDM_RX + 0x0C)
+#define AFE_PORT_ID_TERTIARY_TDM_RX_7 \
+	(AFE_PORT_ID_TERTIARY_TDM_RX + 0x0E)
+
+#define AFE_PORT_ID_TERTIARY_TDM_TX \
+	(AFE_PORT_ID_TDM_PORT_RANGE_START + 0x21)
+#define AFE_PORT_ID_TERTIARY_TDM_TX_1 \
+	(AFE_PORT_ID_TERTIARY_TDM_TX + 0x02)
+#define AFE_PORT_ID_TERTIARY_TDM_TX_2 \
+	(AFE_PORT_ID_TERTIARY_TDM_TX + 0x04)
+#define AFE_PORT_ID_TERTIARY_TDM_TX_3 \
+	(AFE_PORT_ID_TERTIARY_TDM_TX + 0x06)
+#define AFE_PORT_ID_TERTIARY_TDM_TX_4 \
+	(AFE_PORT_ID_TERTIARY_TDM_TX + 0x08)
+#define AFE_PORT_ID_TERTIARY_TDM_TX_5 \
+	(AFE_PORT_ID_TERTIARY_TDM_TX + 0x0A)
+#define AFE_PORT_ID_TERTIARY_TDM_TX_6 \
+	(AFE_PORT_ID_TERTIARY_TDM_TX + 0x0C)
+#define AFE_PORT_ID_TERTIARY_TDM_TX_7 \
+	(AFE_PORT_ID_TERTIARY_TDM_TX + 0x0E)
+
+#define AFE_PORT_ID_QUATERNARY_TDM_RX \
+	(AFE_PORT_ID_TDM_PORT_RANGE_START + 0x30)
+#define AFE_PORT_ID_QUATERNARY_TDM_RX_1 \
+	(AFE_PORT_ID_QUATERNARY_TDM_RX + 0x02)
+#define AFE_PORT_ID_QUATERNARY_TDM_RX_2 \
+	(AFE_PORT_ID_QUATERNARY_TDM_RX + 0x04)
+#define AFE_PORT_ID_QUATERNARY_TDM_RX_3 \
+	(AFE_PORT_ID_QUATERNARY_TDM_RX + 0x06)
+#define AFE_PORT_ID_QUATERNARY_TDM_RX_4 \
+	(AFE_PORT_ID_QUATERNARY_TDM_RX + 0x08)
+#define AFE_PORT_ID_QUATERNARY_TDM_RX_5 \
+	(AFE_PORT_ID_QUATERNARY_TDM_RX + 0x0A)
+#define AFE_PORT_ID_QUATERNARY_TDM_RX_6 \
+	(AFE_PORT_ID_QUATERNARY_TDM_RX + 0x0C)
+#define AFE_PORT_ID_QUATERNARY_TDM_RX_7 \
+	(AFE_PORT_ID_QUATERNARY_TDM_RX + 0x0E)
+
+#define AFE_PORT_ID_QUATERNARY_TDM_TX \
+	(AFE_PORT_ID_TDM_PORT_RANGE_START + 0x31)
+#define AFE_PORT_ID_QUATERNARY_TDM_TX_1 \
+	(AFE_PORT_ID_QUATERNARY_TDM_TX + 0x02)
+#define AFE_PORT_ID_QUATERNARY_TDM_TX_2 \
+	(AFE_PORT_ID_QUATERNARY_TDM_TX + 0x04)
+#define AFE_PORT_ID_QUATERNARY_TDM_TX_3 \
+	(AFE_PORT_ID_QUATERNARY_TDM_TX + 0x06)
+#define AFE_PORT_ID_QUATERNARY_TDM_TX_4 \
+	(AFE_PORT_ID_QUATERNARY_TDM_TX + 0x08)
+#define AFE_PORT_ID_QUATERNARY_TDM_TX_5 \
+	(AFE_PORT_ID_QUATERNARY_TDM_TX + 0x0A)
+#define AFE_PORT_ID_QUATERNARY_TDM_TX_6 \
+	(AFE_PORT_ID_QUATERNARY_TDM_TX + 0x0C)
+#define AFE_PORT_ID_QUATERNARY_TDM_TX_7 \
+	(AFE_PORT_ID_QUATERNARY_TDM_TX + 0x0E)
+
+#define AFE_PORT_ID_INVALID             0xFFFF
+
+#define AAC_ENC_MODE_AAC_LC 0x02
+#define AAC_ENC_MODE_AAC_P 0x05
+#define AAC_ENC_MODE_EAAC_P 0x1D
+
+#define AFE_PSEUDOPORT_CMD_START 0x000100cf
+struct afe_pseudoport_start_command {
+	struct apr_hdr hdr;
+	u16 port_id;		/* Pseudo Port 1 = 0x8000 */
+				/* Pseudo Port 2 = 0x8001 */
+				/* Pseudo Port 3 = 0x8002 */
+	u16 timing;		/* FTRT = 0 , AVTimer = 1, */
+} __packed;
+
+#define AFE_PSEUDOPORT_CMD_STOP 0x000100d0
+struct afe_pseudoport_stop_command {
+	struct apr_hdr hdr;
+	u16 port_id;		/* Pseudo Port 1 = 0x8000 */
+				/* Pseudo Port 2 = 0x8001 */
+				/* Pseudo Port 3 = 0x8002 */
+	u16 reserved;
+} __packed;
+
+
+#define AFE_MODULE_SIDETONE_IIR_FILTER	0x00010202
+#define AFE_PARAM_ID_ENABLE	0x00010203
+
+/*  Payload of the #AFE_PARAM_ID_ENABLE
+ * parameter, which enables or
+ * disables any module.
+ * The fixed size of this structure is four bytes.
+ */
+
+struct afe_mod_enable_param {
+	u16                  enable;
+	/* Enables (1) or disables (0) the module. */
+
+	u16                  reserved;
+	/* This field must be set to zero.
+		*/
+} __packed;
+
+/* ID of the configuration parameter used by the
+ * #AFE_MODULE_SIDETONE_IIR_FILTER module.
+ */
+#define AFE_PARAM_ID_SIDETONE_IIR_FILTER_CONFIG	0x00010204
+#define MAX_SIDETONE_IIR_DATA_SIZE 224
+#define MAX_NO_IIR_FILTER_STAGE    10
+
+struct afe_sidetone_iir_filter_config_params {
+	u16                  num_biquad_stages;
+/* Number of stages.
+ * Supported values: Minimum of 5 and maximum of 10
+ */
+
+	u16                  pregain;
+/* Pregain for the compensating filter response.
+ * Supported values: Any number in Q13 format
+ */
+	uint8_t   iir_config[MAX_SIDETONE_IIR_DATA_SIZE];
+} __packed;
+
+#define AFE_MODULE_LOOPBACK	0x00010205
+#define AFE_PARAM_ID_LOOPBACK_GAIN_PER_PATH	0x00010206
+
+/* Payload of the #AFE_PARAM_ID_LOOPBACK_GAIN_PER_PATH parameter,
+ * which gets/sets loopback gain of a port to an Rx port.
+ * The Tx port ID of the loopback is part of the set_param command.
+ */
+
+/*  Payload of the #AFE_PORT_CMD_SET_PARAM_V2 command's
+ * configuration/calibration settings for the AFE port.
+ */
+struct afe_port_cmd_set_param_v2 {
+	u16 port_id;
+/* Port interface and direction (Rx or Tx) to start.
+ */
+
+	u16 payload_size;
+/* Actual size of the payload in bytes.
+ * This is used for parsing the parameter payload.
+ * Supported values: > 0
+ */
+
+u32 payload_address_lsw;
+/* LSW of 64 bit Payload address.
+ * Address should be 32-byte,
+ * 4kbyte aligned and must be contiguous memory.
+ */
+
+u32 payload_address_msw;
+/* MSW of 64 bit Payload address.
+ * In case of 32-bit shared memory address,
+ * this field must be set to zero.
+ * In case of 36-bit shared memory address,
+ * bit-4 to bit-31 must be set to zero.
+ * Address should be 32-byte, 4kbyte aligned
+ * and must be contiguous memory.
+ */
+
+u32 mem_map_handle;
+/* Memory map handle returned by
+ * AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS commands.
+ * Supported Values:
+ * - NULL -- Message. The parameter data is in-band.
+ * - Non-NULL -- The parameter data is Out-band.Pointer to
+ * the physical address
+ * in shared memory of the payload data.
+ * An optional field is available if parameter
+ * data is in-band:
+ * afe_param_data_v2 param_data[...].
+ * For detailed payload content, see the
+ * afe_port_param_data_v2 structure.
+ */
+} __packed;
+
+#define AFE_PORT_CMD_SET_PARAM_V2	0x000100EF
+
+struct afe_port_param_data_v2 {
+	u32 module_id;
+/* ID of the module to be configured.
+ * Supported values: Valid module ID
+ */
+
+u32 param_id;
+/* ID of the parameter corresponding to the supported parameters
+ * for the module ID.
+ * Supported values: Valid parameter ID
+ */
+
+u16 param_size;
+/* Actual size of the data for the
+ * module_id/param_id pair. The size is a
+ * multiple of four bytes.
+ * Supported values: > 0
+ */
+
+u16 reserved;
+/* This field must be set to zero.
+ */
+} __packed;
+
+struct afe_loopback_gain_per_path_param {
+	struct apr_hdr	hdr;
+	struct afe_port_cmd_set_param_v2 param;
+	struct afe_port_param_data_v2    pdata;
+	u16                  rx_port_id;
+/* Rx port of the loopback. */
+
+u16                  gain;
+/* Loopback gain per path of the port.
+ * Supported values: Any number in Q13 format
+ */
+} __packed;
+
+/* Parameter ID used to configure and enable/disable the
+ * loopback path. The difference with respect to the existing
+ * API, AFE_PORT_CMD_LOOPBACK, is that it allows Rx port to be
+ * configured as source port in loopback path. Port-id in
+ * AFE_PORT_CMD_SET_PARAM cmd is the source port whcih can be
+ * Tx or Rx port. In addition, we can configure the type of
+ * routing mode to handle different use cases.
+ */
+#define AFE_PARAM_ID_LOOPBACK_CONFIG	0x0001020B
+#define AFE_API_VERSION_LOOPBACK_CONFIG	0x1
+
+enum afe_loopback_routing_mode {
+	LB_MODE_DEFAULT = 1,
+	/* Regular loopback from source to destination port */
+	LB_MODE_SIDETONE,
+	/* Sidetone feed from Tx source to Rx destination port */
+	LB_MODE_EC_REF_VOICE_AUDIO,
+	/* Echo canceller reference, voice + audio + DTMF */
+	LB_MODE_EC_REF_VOICE
+	/* Echo canceller reference, voice alone */
+} __packed;
+
+/*  Payload of the #AFE_PARAM_ID_LOOPBACK_CONFIG ,
+ * which enables/disables one AFE loopback.
+ */
+struct afe_loopback_cfg_v1 {
+	struct apr_hdr	hdr;
+	struct afe_port_cmd_set_param_v2 param;
+	struct afe_port_param_data_v2    pdata;
+	u32		loopback_cfg_minor_version;
+/* Minor version used for tracking the version of the RMC module
+ * configuration interface.
+ * Supported values: #AFE_API_VERSION_LOOPBACK_CONFIG
+ */
+	u16                  dst_port_id;
+	/* Destination Port Id. */
+	u16                  routing_mode;
+/* Specifies data path type from src to dest port.
+ * Supported values:
+ * #LB_MODE_DEFAULT
+ * #LB_MODE_SIDETONE
+ * #LB_MODE_EC_REF_VOICE_AUDIO
+ * #LB_MODE_EC_REF_VOICE_A
+ * #LB_MODE_EC_REF_VOICE
+ */
+
+	u16                  enable;
+/* Specifies whether to enable (1) or
+ * disable (0) an AFE loopback.
+ */
+	u16                  reserved;
+/* Reserved for 32-bit alignment. This field must be set to 0.
+ */
+
+} __packed;
+
+struct afe_loopback_sidetone_gain {
+	u16                  rx_port_id;
+	u16                  gain;
+} __packed;
+
+struct loopback_cfg_data {
+	u32                  loopback_cfg_minor_version;
+/* Minor version used for tracking the version of the RMC module
+ * configuration interface.
+ * Supported values: #AFE_API_VERSION_LOOPBACK_CONFIG
+ */
+	u16                  dst_port_id;
+	/* Destination Port Id. */
+	u16                  routing_mode;
+/* Specifies data path type from src to dest port.
+ * Supported values:
+ * #LB_MODE_DEFAULT
+ * #LB_MODE_SIDETONE
+ * #LB_MODE_EC_REF_VOICE_AUDIO
+ * #LB_MODE_EC_REF_VOICE_A
+ * #LB_MODE_EC_REF_VOICE
+ */
+
+	u16                  enable;
+/* Specifies whether to enable (1) or
+ * disable (0) an AFE loopback.
+ */
+	u16                  reserved;
+/* Reserved for 32-bit alignment. This field must be set to 0.
+ */
+} __packed;
+
+struct afe_st_loopback_cfg_v1 {
+	struct apr_hdr                    hdr;
+	struct afe_port_cmd_set_param_v2  param;
+	struct afe_port_param_data_v2     gain_pdata;
+	struct afe_loopback_sidetone_gain gain_data;
+	struct afe_port_param_data_v2     cfg_pdata;
+	struct loopback_cfg_data          cfg_data;
+} __packed;
+
+struct afe_loopback_iir_cfg_v2 {
+	struct apr_hdr                          hdr;
+	struct afe_port_cmd_set_param_v2        param;
+	struct afe_port_param_data_v2           st_iir_enable_pdata;
+	struct afe_mod_enable_param             st_iir_mode_enable_data;
+	struct afe_port_param_data_v2           st_iir_filter_config_pdata;
+	struct afe_sidetone_iir_filter_config_params st_iir_filter_config_data;
+} __packed;
+#define AFE_MODULE_SPEAKER_PROTECTION	0x00010209
+#define AFE_PARAM_ID_SPKR_PROT_CONFIG	0x0001020a
+#define AFE_API_VERSION_SPKR_PROT_CONFIG	0x1
+#define AFE_SPKR_PROT_EXCURSIONF_LEN	512
+struct afe_spkr_prot_cfg_param_v1 {
+	u32       spkr_prot_minor_version;
+/*
+ * Minor version used for tracking the version of the
+ * speaker protection module configuration interface.
+ * Supported values: #AFE_API_VERSION_SPKR_PROT_CONFIG
+ */
+
+int16_t        win_size;
+/* Analysis and synthesis window size (nWinSize).
+ * Supported values: 1024, 512, 256 samples
+ */
+
+int16_t        margin;
+/* Allowable margin for excursion prediction,
+ * in L16Q15 format. This is a
+ * control parameter to allow
+ * for overestimation of peak excursion.
+ */
+
+int16_t        spkr_exc_limit;
+/* Speaker excursion limit, in L16Q15 format.*/
+
+int16_t        spkr_resonance_freq;
+/* Resonance frequency of the speaker; used
+ * to define a frequency range
+ * for signal modification.
+ *
+ * Supported values: 0 to 2000 Hz */
+
+int16_t        limhresh;
+/* Threshold of the hard limiter; used to
+ * prevent overshooting beyond a
+ * signal level that was set by the limiter
+ * prior to speaker protection.
+ * Supported values: 0 to 32767
+ */
+
+int16_t        hpf_cut_off_freq;
+/* High pass filter cutoff frequency.
+ * Supported values: 100, 200, 300 Hz
+ */
+
+int16_t        hpf_enable;
+/* Specifies whether the high pass filter
+ * is enabled (0) or disabled (1).
+ */
+
+int16_t        reserved;
+/* This field must be set to zero. */
+
+int32_t        amp_gain;
+/* Amplifier gain in L32Q15 format.
+ * This is the RMS voltage at the
+ * loudspeaker when a 0dBFS tone
+ * is played in the digital domain.
+ */
+
+int16_t        excursionf[AFE_SPKR_PROT_EXCURSIONF_LEN];
+/* Array of the excursion transfer function.
+ * The peak excursion of the
+ * loudspeaker diaphragm is
+ * measured in millimeters for 1 Vrms Sine
+ * tone at all FFT bin frequencies.
+ * Supported values: Q15 format
+ */
+} __packed;
+
+
+#define AFE_SERVICE_CMD_REGISTER_RT_PORT_DRIVER	0x000100E0
+
+/*  Payload of the #AFE_SERVICE_CMD_REGISTER_RT_PORT_DRIVER
+ * command, which registers a real-time port driver
+ * with the AFE service.
+ */
+struct afe_service_cmd_register_rt_port_driver {
+	struct apr_hdr hdr;
+	u16                  port_id;
+/* Port ID with which the real-time driver exchanges data
+ * (registers for events).
+ * Supported values: #AFE_PORT_ID_RT_PROXY_PORT_RANGE_START to
+ * #AFE_PORT_ID_RT_PROXY_PORT_RANGE_END
+ */
+
+	u16                  reserved;
+	/* This field must be set to zero. */
+} __packed;
+
+#define AFE_SERVICE_CMD_UNREGISTER_RT_PORT_DRIVER	0x000100E1
+
+/*  Payload of the #AFE_SERVICE_CMD_UNREGISTER_RT_PORT_DRIVER
+ * command, which unregisters a real-time port driver from
+ * the AFE service.
+ */
+struct afe_service_cmd_unregister_rt_port_driver {
+	struct apr_hdr hdr;
+	u16                  port_id;
+/* Port ID from which the real-time
+ * driver unregisters for events.
+ * Supported values: #AFE_PORT_ID_RT_PROXY_PORT_RANGE_START to
+ * #AFE_PORT_ID_RT_PROXY_PORT_RANGE_END
+ */
+
+	u16                  reserved;
+	/* This field must be set to zero.	*/
+} __packed;
+
+#define AFE_EVENT_RT_PROXY_PORT_STATUS	0x00010105
+#define AFE_EVENTYPE_RT_PROXY_PORT_START	0
+#define AFE_EVENTYPE_RT_PROXY_PORT_STOP	1
+#define AFE_EVENTYPE_RT_PROXY_PORT_LOW_WATER_MARK	2
+#define AFE_EVENTYPE_RT_PROXY_PORT_HIGH_WATER_MARK	3
+#define AFE_EVENTYPE_RT_PROXY_PORT_INVALID	0xFFFF
+
+/*  Payload of the #AFE_EVENT_RT_PROXY_PORT_STATUS
+ * message, which sends an event from the AFE service
+ * to a registered client.
+ */
+struct afe_event_rt_proxy_port_status {
+	u16                  port_id;
+/* Port ID to which the event is sent.
+ * Supported values: #AFE_PORT_ID_RT_PROXY_PORT_RANGE_START to
+ * #AFE_PORT_ID_RT_PROXY_PORT_RANGE_END
+ */
+
+	u16                  eventype;
+/* Type of event.
+ * Supported values:
+ * - #AFE_EVENTYPE_RT_PROXY_PORT_START
+ * - #AFE_EVENTYPE_RT_PROXY_PORT_STOP
+ * - #AFE_EVENTYPE_RT_PROXY_PORT_LOW_WATER_MARK
+ * - #AFE_EVENTYPE_RT_PROXY_PORT_HIGH_WATER_MARK
+ */
+} __packed;
+
+#define AFE_PORT_DATA_CMD_RT_PROXY_PORT_WRITE_V2 0x000100ED
+
+struct afe_port_data_cmd_rt_proxy_port_write_v2 {
+	struct apr_hdr hdr;
+	u16                  port_id;
+/* Tx (mic) proxy port ID with which the real-time
+ * driver exchanges data.
+ * Supported values: #AFE_PORT_ID_RT_PROXY_PORT_RANGE_START to
+ * #AFE_PORT_ID_RT_PROXY_PORT_RANGE_END
+ */
+
+	u16                  reserved;
+	/* This field must be set to zero. */
+
+	u32                  buffer_address_lsw;
+/* LSW Address of the buffer containing the
+ * data from the real-time source
+ * device on a client.
+ */
+
+	u32                  buffer_address_msw;
+/* MSW Address of the buffer containing the
+ * data from the real-time source
+ * device on a client.
+ */
+
+	u32					mem_map_handle;
+/* A memory map handle encapsulating shared memory
+ * attributes is returned if
+ * AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS
+ * command is successful.
+ * Supported Values:
+ * - Any 32 bit value
+ */
+
+	u32                  available_bytes;
+/* Number of valid bytes available
+ * in the buffer (including all
+ * channels: number of bytes per
+ * channel = availableBytesumChannels).
+ * Supported values: > 0
+ *
+ * This field must be equal to the frame
+ * size specified in the #AFE_PORT_AUDIO_IF_CONFIG
+ * command that was sent to configure this
+ * port.
+ */
+} __packed;
+
+#define AFE_PORT_DATA_CMD_RT_PROXY_PORT_READ_V2	0x000100EE
+
+/*  Payload of the
+ * #AFE_PORT_DATA_CMD_RT_PROXY_PORT_READ_V2 command, which
+ * delivers an empty buffer to the AFE service. On
+ * acknowledgment, data is filled in the buffer.
+ */
+struct afe_port_data_cmd_rt_proxy_port_read_v2 {
+	struct apr_hdr hdr;
+	u16                  port_id;
+/* Rx proxy port ID with which the real-time
+ * driver exchanges data.
+ * Supported values: #AFE_PORT_ID_RT_PROXY_PORT_RANGE_START to
+ * #AFE_PORT_ID_RT_PROXY_PORT_RANGE_END
+ * (This must be an Rx (speaker) port.)
+ */
+
+	u16                  reserved;
+	/* This field must be set to zero. */
+
+	u32                  buffer_address_lsw;
+/* LSW Address of the buffer containing the data sent from the AFE
+ * service to a real-time sink device on the client.
+ */
+
+
+	u32                  buffer_address_msw;
+/* MSW Address of the buffer containing the data sent from the AFE
+ * service to a real-time sink device on the client.
+ */
+
+		u32				mem_map_handle;
+/* A memory map handle encapsulating shared memory attributes is
+ * returned if AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS command is
+ * successful.
+ * Supported Values:
+ * - Any 32 bit value
+ */
+
+	u32                  available_bytes;
+/* Number of valid bytes available in the buffer (including all
+ * channels).
+ * Supported values: > 0
+ * This field must be equal to the frame size specified in the
+ * #AFE_PORT_AUDIO_IF_CONFIG command that was sent to configure
+ * this port.
+ */
+} __packed;
+
+/* This module ID is related to device configuring like I2S,PCM,
+ * HDMI, SLIMBus etc. This module supports follwing parameter ids.
+ * - #AFE_PARAM_ID_I2S_CONFIG
+ * - #AFE_PARAM_ID_PCM_CONFIG
+ * - #AFE_PARAM_ID_DIGI_MIC_CONFIG
+ * - #AFE_PARAM_ID_HDMI_CONFIG
+ * - #AFE_PARAM_ID_INTERNAL_BT_FM_CONFIG
+ * - #AFE_PARAM_ID_SLIMBUS_CONFIG
+ * - #AFE_PARAM_ID_RT_PROXY_CONFIG
+ */
+
+#define AFE_MODULE_AUDIO_DEV_INTERFACE    0x0001020C
+#define AFE_PORT_SAMPLE_RATE_8K           8000
+#define AFE_PORT_SAMPLE_RATE_16K          16000
+#define AFE_PORT_SAMPLE_RATE_48K          48000
+#define AFE_PORT_SAMPLE_RATE_96K          96000
+#define AFE_PORT_SAMPLE_RATE_176P4K       176400
+#define AFE_PORT_SAMPLE_RATE_192K         192000
+#define AFE_PORT_SAMPLE_RATE_352P8K       352800
+#define AFE_LINEAR_PCM_DATA				0x0
+#define AFE_NON_LINEAR_DATA				0x1
+#define AFE_LINEAR_PCM_DATA_PACKED_60958 0x2
+#define AFE_NON_LINEAR_DATA_PACKED_60958 0x3
+#define AFE_GENERIC_COMPRESSED           0x8
+
+/* This param id is used to configure I2S interface */
+#define AFE_PARAM_ID_I2S_CONFIG	0x0001020D
+#define AFE_API_VERSION_I2S_CONFIG	0x1
+/*	Enumeration for setting the I2S configuration
+ * channel_mode parameter to
+ * serial data wire number 1-3 (SD3).
+ */
+#define AFE_PORT_I2S_SD0                     0x1
+#define AFE_PORT_I2S_SD1                     0x2
+#define AFE_PORT_I2S_SD2                     0x3
+#define AFE_PORT_I2S_SD3                     0x4
+#define AFE_PORT_I2S_QUAD01                  0x5
+#define AFE_PORT_I2S_QUAD23                  0x6
+#define AFE_PORT_I2S_6CHS                    0x7
+#define AFE_PORT_I2S_8CHS                    0x8
+#define AFE_PORT_I2S_MONO                    0x0
+#define AFE_PORT_I2S_STEREO                  0x1
+#define AFE_PORT_CONFIG_I2S_WS_SRC_EXTERNAL  0x0
+#define AFE_PORT_CONFIG_I2S_WS_SRC_INTERNAL  0x1
+
+/*  Payload of the #AFE_PARAM_ID_I2S_CONFIG
+ * command's (I2S configuration
+ * parameter).
+ */
+struct afe_param_id_i2s_cfg {
+	u32                  i2s_cfg_minor_version;
+/* Minor version used for tracking the version of the I2S
+ * configuration interface.
+ * Supported values: #AFE_API_VERSION_I2S_CONFIG
+ */
+
+	u16                  bit_width;
+/* Bit width of the sample.
+ * Supported values: 16, 24
+ */
+
+	u16                  channel_mode;
+/* I2S lines and multichannel operation.
+ * Supported values:
+ * - #AFE_PORT_I2S_SD0
+ * - #AFE_PORT_I2S_SD1
+ * - #AFE_PORT_I2S_SD2
+ * - #AFE_PORT_I2S_SD3
+ * - #AFE_PORT_I2S_QUAD01
+ * - #AFE_PORT_I2S_QUAD23
+ * - #AFE_PORT_I2S_6CHS
+ * - #AFE_PORT_I2S_8CHS
+ */
+
+	u16                  mono_stereo;
+/* Specifies mono or stereo. This applies only when
+ * a single I2S line is used.
+ * Supported values:
+ * - #AFE_PORT_I2S_MONO
+ * - #AFE_PORT_I2S_STEREO
+ */
+
+	u16                  ws_src;
+/* Word select source: internal or external.
+ * Supported values:
+ * - #AFE_PORT_CONFIG_I2S_WS_SRC_EXTERNAL
+ * - #AFE_PORT_CONFIG_I2S_WS_SRC_INTERNAL
+ */
+
+	u32                  sample_rate;
+/* Sampling rate of the port.
+ * Supported values:
+ * - #AFE_PORT_SAMPLE_RATE_8K
+ * - #AFE_PORT_SAMPLE_RATE_16K
+ * - #AFE_PORT_SAMPLE_RATE_48K
+ * - #AFE_PORT_SAMPLE_RATE_96K
+ * - #AFE_PORT_SAMPLE_RATE_192K
+ */
+
+	u16					data_format;
+/* data format
+ * Supported values:
+ * - #LINEAR_PCM_DATA
+ * - #NON_LINEAR_DATA
+ * - #LINEAR_PCM_DATA_PACKED_IN_60958
+ * - #NON_LINEAR_DATA_PACKED_IN_60958
+ */
+		u16                  reserved;
+	/* This field must be set to zero. */
+} __packed;
+
+/*
+ * This param id is used to configure PCM interface
+ */
+
+#define AFE_API_VERSION_SPDIF_CONFIG 0x1
+#define AFE_API_VERSION_SPDIF_CH_STATUS_CONFIG 0x1
+#define AFE_API_VERSION_SPDIF_CLK_CONFIG 0x1
+#define AFE_CH_STATUS_A 1
+#define AFE_CH_STATUS_B 2
+
+#define AFE_PARAM_ID_SPDIF_CONFIG 0x00010244
+#define AFE_PARAM_ID_CH_STATUS_CONFIG 0x00010245
+#define AFE_PARAM_ID_SPDIF_CLK_CONFIG 0x00010246
+
+#define AFE_PORT_CLK_ROOT_LPAPLL 0x3
+#define AFE_PORT_CLK_ROOT_LPAQ6PLL   0x4
+
+struct afe_param_id_spdif_cfg {
+/* Minor version used for tracking the version of the SPDIF
+ * configuration interface.
+ * Supported values: #AFE_API_VERSION_SPDIF_CONFIG
+ */
+	u32	spdif_cfg_minor_version;
+
+/* Sampling rate of the port.
+ * Supported values:
+ * - #AFE_PORT_SAMPLE_RATE_22_05K
+ * - #AFE_PORT_SAMPLE_RATE_32K
+ * - #AFE_PORT_SAMPLE_RATE_44_1K
+ * - #AFE_PORT_SAMPLE_RATE_48K
+ * - #AFE_PORT_SAMPLE_RATE_96K
+ * - #AFE_PORT_SAMPLE_RATE_176_4K
+ * - #AFE_PORT_SAMPLE_RATE_192K
+ */
+	u32	sample_rate;
+
+/* data format
+ * Supported values:
+ * - #AFE_LINEAR_PCM_DATA
+ * - #AFE_NON_LINEAR_DATA
+ */
+	u16	data_format;
+/* Number of channels supported by the port
+ * - PCM - 1, Compressed Case - 2
+ */
+	u16	num_channels;
+/* Bit width of the sample.
+ * Supported values: 16, 24
+ */
+	u16	bit_width;
+/* This field must be set to zero. */
+	u16	reserved;
+} __packed;
+
+struct afe_param_id_spdif_ch_status_cfg {
+	u32 ch_status_cfg_minor_version;
+/* Minor version used for tracking the version of channel
+ * status configuration. Current supported version is 1
+ */
+
+	u32 status_type;
+/* Indicate if the channel status is for channel A or B
+ * Supported values:
+ * - #AFE_CH_STATUS_A
+ * - #AFE_CH_STATUS_B
+ */
+
+	u8 status_bits[24];
+/* Channel status - 192 bits for channel
+ * Byte ordering as defined by IEC60958-3
+ */
+
+	u8 status_mask[24];
+/* Channel status with mask bits 1 will be applied.
+ * Byte ordering as defined by IEC60958-3
+ */
+} __packed;
+
+struct afe_param_id_spdif_clk_cfg {
+	u32 clk_cfg_minor_version;
+/* Minor version used for tracking the version of SPDIF
+ * interface clock configuration. Current supported version
+ * is 1
+ */
+
+	u32 clk_value;
+/* Specifies the clock frequency in Hz to set
+ * Supported values:
+ * 0 - Disable the clock
+ * 2 (byphase) * 32 (60958 subframe size) * sampling rate * 2
+ * (channels A and B)
+ */
+
+	u32 clk_root;
+/* Specifies SPDIF root clk source
+ * Supported Values:
+ * - #AFE_PORT_CLK_ROOT_LPAPLL
+ * - #AFE_PORT_CLK_ROOT_LPAQ6PLL
+ */
+} __packed;
+
+struct afe_spdif_clk_config_command {
+	struct apr_hdr                    hdr;
+	struct afe_port_cmd_set_param_v2  param;
+	struct afe_port_param_data_v2     pdata;
+	struct afe_param_id_spdif_clk_cfg clk_cfg;
+} __packed;
+
+struct afe_spdif_chstatus_config_command {
+	struct apr_hdr                    hdr;
+	struct afe_port_cmd_set_param_v2  param;
+	struct afe_port_param_data_v2     pdata;
+	struct afe_param_id_spdif_ch_status_cfg ch_status;
+} __packed;
+
+struct afe_spdif_port_config {
+	struct afe_param_id_spdif_cfg            cfg;
+	struct afe_param_id_spdif_ch_status_cfg  ch_status;
+} __packed;
+
+#define AFE_PARAM_ID_PCM_CONFIG        0x0001020E
+#define AFE_API_VERSION_PCM_CONFIG	0x1
+/* Enumeration for the auxiliary PCM synchronization signal
+ * provided by an external source.
+ */
+
+#define AFE_PORT_PCM_SYNC_SRC_EXTERNAL 0x0
+/*	Enumeration for the auxiliary PCM synchronization signal
+ * provided by an internal source.
+ */
+#define AFE_PORT_PCM_SYNC_SRC_INTERNAL  0x1
+/*	Enumeration for the PCM configuration aux_mode parameter,
+ * which configures the auxiliary PCM interface to use
+ * short synchronization.
+ */
+#define AFE_PORT_PCM_AUX_MODE_PCM  0x0
+/*
+ * Enumeration for the PCM configuration aux_mode parameter,
+ * which configures the auxiliary PCM interface to use long
+ * synchronization.
+ */
+#define AFE_PORT_PCM_AUX_MODE_AUX    0x1
+/*
+ * Enumeration for setting the PCM configuration frame to 8.
+ */
+#define AFE_PORT_PCM_BITS_PER_FRAME_8  0x0
+/*
+ * Enumeration for setting the PCM configuration frame to 16.
+ */
+#define AFE_PORT_PCM_BITS_PER_FRAME_16   0x1
+
+/*	Enumeration for setting the PCM configuration frame to 32.*/
+#define AFE_PORT_PCM_BITS_PER_FRAME_32 0x2
+
+/*	Enumeration for setting the PCM configuration frame to 64.*/
+#define AFE_PORT_PCM_BITS_PER_FRAME_64   0x3
+
+/*	Enumeration for setting the PCM configuration frame to 128.*/
+#define AFE_PORT_PCM_BITS_PER_FRAME_128 0x4
+
+/*	Enumeration for setting the PCM configuration frame to 256.*/
+#define AFE_PORT_PCM_BITS_PER_FRAME_256 0x5
+
+/*	Enumeration for setting the PCM configuration
+ * quantype parameter to A-law with no padding.
+ */
+#define AFE_PORT_PCM_ALAW_NOPADDING 0x0
+
+/* Enumeration for setting the PCM configuration quantype
+ * parameter to mu-law with no padding.
+ */
+#define AFE_PORT_PCM_MULAW_NOPADDING 0x1
+/*	Enumeration for setting the PCM configuration quantype
+ * parameter to linear with no padding.
+ */
+#define AFE_PORT_PCM_LINEAR_NOPADDING 0x2
+/*	Enumeration for setting the PCM configuration quantype
+ * parameter to A-law with padding.
+ */
+#define AFE_PORT_PCM_ALAW_PADDING  0x3
+/*	Enumeration for setting the PCM configuration quantype
+ * parameter to mu-law with padding.
+ */
+#define AFE_PORT_PCM_MULAW_PADDING 0x4
+/*	Enumeration for setting the PCM configuration quantype
+ * parameter to linear with padding.
+ */
+#define AFE_PORT_PCM_LINEAR_PADDING 0x5
+/*	Enumeration for disabling the PCM configuration
+ * ctrl_data_out_enable parameter.
+ * The PCM block is the only master.
+ */
+#define AFE_PORT_PCM_CTRL_DATA_OE_DISABLE 0x0
+/*
+ * Enumeration for enabling the PCM configuration
+ * ctrl_data_out_enable parameter. The PCM block shares
+ * the signal with other masters.
+ */
+#define AFE_PORT_PCM_CTRL_DATA_OE_ENABLE  0x1
+
+/*  Payload of the #AFE_PARAM_ID_PCM_CONFIG command's
+ * (PCM configuration parameter).
+ */
+
+struct afe_param_id_pcm_cfg {
+	u32                  pcm_cfg_minor_version;
+/* Minor version used for tracking the version of the AUX PCM
+ * configuration interface.
+ * Supported values: #AFE_API_VERSION_PCM_CONFIG
+ */
+
+	u16                  aux_mode;
+/* PCM synchronization setting.
+ * Supported values:
+ * - #AFE_PORT_PCM_AUX_MODE_PCM
+ * - #AFE_PORT_PCM_AUX_MODE_AUX
+ */
+
+	u16                  sync_src;
+/* Synchronization source.
+ * Supported values:
+ * - #AFE_PORT_PCM_SYNC_SRC_EXTERNAL
+ * - #AFE_PORT_PCM_SYNC_SRC_INTERNAL
+ */
+
+	u16                  frame_setting;
+/* Number of bits per frame.
+ * Supported values:
+ * - #AFE_PORT_PCM_BITS_PER_FRAME_8
+ * - #AFE_PORT_PCM_BITS_PER_FRAME_16
+ * - #AFE_PORT_PCM_BITS_PER_FRAME_32
+ * - #AFE_PORT_PCM_BITS_PER_FRAME_64
+ * - #AFE_PORT_PCM_BITS_PER_FRAME_128
+ * - #AFE_PORT_PCM_BITS_PER_FRAME_256
+ */
+
+	u16                  quantype;
+/* PCM quantization type.
+ * Supported values:
+ * - #AFE_PORT_PCM_ALAW_NOPADDING
+ * - #AFE_PORT_PCM_MULAW_NOPADDING
+ * - #AFE_PORT_PCM_LINEAR_NOPADDING
+ * - #AFE_PORT_PCM_ALAW_PADDING
+ * - #AFE_PORT_PCM_MULAW_PADDING
+ * - #AFE_PORT_PCM_LINEAR_PADDING
+ */
+
+	u16                  ctrl_data_out_enable;
+/* Specifies whether the PCM block shares the data-out
+ * signal to the drive with other masters.
+ * Supported values:
+ * - #AFE_PORT_PCM_CTRL_DATA_OE_DISABLE
+ * - #AFE_PORT_PCM_CTRL_DATA_OE_ENABLE
+ */
+		u16                  reserved;
+	/* This field must be set to zero. */
+
+	u32                  sample_rate;
+/* Sampling rate of the port.
+ * Supported values:
+ * - #AFE_PORT_SAMPLE_RATE_8K
+ * - #AFE_PORT_SAMPLE_RATE_16K
+ */
+
+	u16                  bit_width;
+/* Bit width of the sample.
+ * Supported values: 16
+ */
+
+	u16                  num_channels;
+/* Number of channels.
+ * Supported values: 1 to 4
+ */
+
+	u16                  slot_number_mapping[4];
+/* Specifies the slot number for the each channel in
+ * multi channel scenario.
+ * Supported values: 1 to 32
+ */
+} __packed;
+
+/*
+ * This param id is used to configure DIGI MIC interface
+ */
+#define AFE_PARAM_ID_DIGI_MIC_CONFIG	0x0001020F
+/*  This version information is used to handle the new
+ *   additions to the config interface in future in backward
+ *   compatible manner.
+ */
+#define AFE_API_VERSION_DIGI_MIC_CONFIG 0x1
+
+/* Enumeration for setting the digital mic configuration
+ * channel_mode parameter to left 0.
+ */
+
+#define AFE_PORT_DIGI_MIC_MODE_LEFT0  0x1
+
+/*Enumeration for setting the digital mic configuration
+ * channel_mode parameter to right 0.
+ */
+
+
+#define AFE_PORT_DIGI_MIC_MODE_RIGHT0  0x2
+
+/* Enumeration for setting the digital mic configuration
+ * channel_mode parameter to left 1.
+ */
+
+#define AFE_PORT_DIGI_MIC_MODE_LEFT1  0x3
+
+/* Enumeration for setting the digital mic configuration
+ * channel_mode parameter to right 1.
+ */
+
+#define AFE_PORT_DIGI_MIC_MODE_RIGHT1 0x4
+
+/* Enumeration for setting the digital mic configuration
+ * channel_mode parameter to stereo 0.
+ */
+#define AFE_PORT_DIGI_MIC_MODE_STEREO0  0x5
+
+/* Enumeration for setting the digital mic configuration
+ * channel_mode parameter to stereo 1.
+ */
+
+
+#define AFE_PORT_DIGI_MIC_MODE_STEREO1    0x6
+
+/* Enumeration for setting the digital mic configuration
+ * channel_mode parameter to quad.
+ */
+
+#define AFE_PORT_DIGI_MIC_MODE_QUAD     0x7
+
+/*  Payload of the #AFE_PARAM_ID_DIGI_MIC_CONFIG command's
+ * (DIGI MIC configuration
+ * parameter).
+ */
+struct afe_param_id_digi_mic_cfg {
+	u32                  digi_mic_cfg_minor_version;
+/* Minor version used for tracking the version of the DIGI Mic
+ * configuration interface.
+ * Supported values: #AFE_API_VERSION_DIGI_MIC_CONFIG
+ */
+
+	u16                  bit_width;
+/* Bit width of the sample.
+ * Supported values: 16
+ */
+
+	u16                  channel_mode;
+/* Digital mic and multichannel operation.
+ * Supported values:
+ * - #AFE_PORT_DIGI_MIC_MODE_LEFT0
+ * - #AFE_PORT_DIGI_MIC_MODE_RIGHT0
+ * - #AFE_PORT_DIGI_MIC_MODE_LEFT1
+ * - #AFE_PORT_DIGI_MIC_MODE_RIGHT1
+ * - #AFE_PORT_DIGI_MIC_MODE_STEREO0
+ * - #AFE_PORT_DIGI_MIC_MODE_STEREO1
+ * - #AFE_PORT_DIGI_MIC_MODE_QUAD
+ */
+
+	u32                  sample_rate;
+/* Sampling rate of the port.
+ * Supported values:
+ * - #AFE_PORT_SAMPLE_RATE_8K
+ * - #AFE_PORT_SAMPLE_RATE_16K
+ * - #AFE_PORT_SAMPLE_RATE_48K
+ */
+} __packed;
+
+/*
+* This param id is used to configure HDMI interface
+*/
+#define AFE_PARAM_ID_HDMI_CONFIG     0x00010210
+
+/*  This version information is used to handle the new
+*   additions to the config interface in future in backward
+*   compatible manner.
+*/
+#define AFE_API_VERSION_HDMI_CONFIG 0x1
+
+/* Payload of the #AFE_PARAM_ID_HDMI_CONFIG command,
+ * which configures a multichannel HDMI audio interface.
+ */
+struct afe_param_id_hdmi_multi_chan_audio_cfg {
+	u32                  hdmi_cfg_minor_version;
+/* Minor version used for tracking the version of the HDMI
+ * configuration interface.
+ * Supported values: #AFE_API_VERSION_HDMI_CONFIG
+ */
+
+u16                  datatype;
+/* data type
+ * Supported values:
+ * - #LINEAR_PCM_DATA
+ * - #NON_LINEAR_DATA
+ * - #LINEAR_PCM_DATA_PACKED_IN_60958
+ * - #NON_LINEAR_DATA_PACKED_IN_60958
+ */
+
+u16                  channel_allocation;
+/* HDMI channel allocation information for programming an HDMI
+ * frame. The default is 0 (Stereo).
+ *
+ * This information is defined in the HDMI standard, CEA 861-D
+ * (refer to @xhyperref{S1,[S1]}). The number of channels is also
+ * inferred from this parameter.
+*/
+
+
+u32                  sample_rate;
+/* Sampling rate of the port.
+ * Supported values:
+ * - #AFE_PORT_SAMPLE_RATE_8K
+ * - #AFE_PORT_SAMPLE_RATE_16K
+ * - #AFE_PORT_SAMPLE_RATE_48K
+ * - #AFE_PORT_SAMPLE_RATE_96K
+ * - 22050, 44100, 176400 for compressed streams
+ */
+
+	u16                  bit_width;
+/* Bit width of the sample.
+ * Supported values: 16, 24
+ */
+		u16                  reserved;
+	/* This field must be set to zero. */
+} __packed;
+
+/*
+* This param id is used to configure BT or FM(RIVA) interface
+*/
+#define AFE_PARAM_ID_INTERNAL_BT_FM_CONFIG  0x00010211
+
+/*  This version information is used to handle the new
+*   additions to the config interface in future in backward
+*   compatible manner.
+*/
+#define AFE_API_VERSION_INTERNAL_BT_FM_CONFIG	0x1
+
+/*  Payload of the #AFE_PARAM_ID_INTERNAL_BT_FM_CONFIG
+ * command's BT voice/BT audio/FM configuration parameter.
+ */
+struct afe_param_id_internal_bt_fm_cfg {
+	u32                  bt_fm_cfg_minor_version;
+/* Minor version used for tracking the version of the BT and FM
+ * configuration interface.
+ * Supported values: #AFE_API_VERSION_INTERNAL_BT_FM_CONFIG
+ */
+
+	u16                  num_channels;
+/* Number of channels.
+ * Supported values: 1 to 2
+ */
+
+	u16                  bit_width;
+/* Bit width of the sample.
+ * Supported values: 16
+ */
+
+	u32                  sample_rate;
+/* Sampling rate of the port.
+ * Supported values:
+ * - #AFE_PORT_SAMPLE_RATE_8K (only for BTSCO)
+ * - #AFE_PORT_SAMPLE_RATE_16K (only for BTSCO)
+ * - #AFE_PORT_SAMPLE_RATE_48K (FM and A2DP)
+ */
+} __packed;
+
+/* This param id is used to configure SLIMBUS interface using
+ * shared channel approach.
+ */
+
+
+#define AFE_PARAM_ID_SLIMBUS_CONFIG    0x00010212
+
+/*  This version information is used to handle the new
+*   additions to the config interface in future in backward
+*   compatible manner.
+*/
+#define AFE_API_VERSION_SLIMBUS_CONFIG 0x1
+
+/*	Enumeration for setting SLIMbus device ID 1.
+*/
+#define AFE_SLIMBUS_DEVICE_1           0x0
+
+/*	Enumeration for setting SLIMbus device ID 2.
+*/
+#define AFE_SLIMBUS_DEVICE_2          0x1
+
+/*	Enumeration for setting the SLIMbus data formats.
+*/
+#define AFE_SB_DATA_FORMAT_NOT_INDICATED 0x0
+
+/* Enumeration for setting the maximum number of streams per
+ * device.
+ */
+
+#define AFE_PORT_MAX_AUDIO_CHAN_CNT	0x8
+
+#define AFE_PORT_MAX_AUDIO_CHAN_CNT_V2	0x20
+
+/* Payload of the #AFE_PORT_CMD_SLIMBUS_CONFIG command's SLIMbus
+ * port configuration parameter.
+ */
+
+struct afe_param_id_slimbus_cfg {
+	u32                  sb_cfg_minor_version;
+/* Minor version used for tracking the version of the SLIMBUS
+ * configuration interface.
+ * Supported values: #AFE_API_VERSION_SLIMBUS_CONFIG
+ */
+
+	u16                  slimbus_dev_id;
+/* SLIMbus hardware device ID, which is required to handle
+ * multiple SLIMbus hardware blocks.
+ * Supported values: - #AFE_SLIMBUS_DEVICE_1 - #AFE_SLIMBUS_DEVICE_2
+ */
+
+
+	u16                  bit_width;
+/* Bit width of the sample.
+ * Supported values: 16, 24
+ */
+
+	u16                  data_format;
+/* Data format supported by the SLIMbus hardware. The default is
+ * 0 (#AFE_SB_DATA_FORMAT_NOT_INDICATED), which indicates the
+ * hardware does not perform any format conversions before the data
+ * transfer.
+ */
+
+
+	u16                  num_channels;
+/* Number of channels.
+ * Supported values: 1 to #AFE_PORT_MAX_AUDIO_CHAN_CNT
+ */
+
+	u8  shared_ch_mapping[AFE_PORT_MAX_AUDIO_CHAN_CNT];
+/* Mapping of shared channel IDs (128 to 255) to which the
+ * master port is to be connected.
+ * Shared_channel_mapping[i] represents the shared channel assigned
+ * for audio channel i in multichannel audio data.
+ */
+
+	u32              sample_rate;
+/* Sampling rate of the port.
+ * Supported values:
+ * - #AFE_PORT_SAMPLE_RATE_8K
+ * - #AFE_PORT_SAMPLE_RATE_16K
+ * - #AFE_PORT_SAMPLE_RATE_48K
+ * - #AFE_PORT_SAMPLE_RATE_96K
+ * - #AFE_PORT_SAMPLE_RATE_192K
+ */
+} __packed;
+
+
+/* ID of the parameter used by AFE_PARAM_ID_USB_AUDIO_DEV_PARAMS to configure
+ * USB audio device parameter. It should be used with
+ * AFE_MODULE_AUDIO_DEV_INTERFACE
+ */
+#define AFE_PARAM_ID_USB_AUDIO_DEV_PARAMS    0x000102A5
+
+
+/* ID of the parameter used to set the endianness value for the
+ * USB audio device. It should be used with
+ * AFE_MODULE_AUDIO_DEV_INTERFACE
+ */
+#define AFE_PARAM_ID_USB_AUDIO_DEV_LPCM_FMT 0x000102AA
+
+/* Minor version used for tracking USB audio  configuration */
+#define AFE_API_MINIOR_VERSION_USB_AUDIO_CONFIG 0x1
+
+/* Payload of the AFE_PARAM_ID_USB_AUDIO_DEV_PARAMS parameter used by
+ * AFE_MODULE_AUDIO_DEV_INTERFACE.
+ */
+struct afe_param_id_usb_audio_dev_params {
+/* Minor version used for tracking USB audio device parameter.
+ * Supported values: AFE_API_MINIOR_VERSION_USB_AUDIO_CONFIG
+ */
+	u32                  cfg_minor_version;
+/* Token of actual end USB aduio device */
+	u32                  dev_token;
+} __packed;
+
+struct afe_param_id_usb_audio_dev_lpcm_fmt {
+/* Minor version used for tracking USB audio device parameter.
+ * Supported values: AFE_API_MINIOR_VERSION_USB_AUDIO_CONFIG
+ */
+	u32                  cfg_minor_version;
+/* Endianness of actual end USB audio device */
+	u32                  endian;
+} __packed;
+
+/* ID of the parameter used by AFE_PARAM_ID_USB_AUDIO_CONFIG to configure
+ * USB audio interface. It should be used with AFE_MODULE_AUDIO_DEV_INTERFACE
+*/
+#define AFE_PARAM_ID_USB_AUDIO_CONFIG    0x000102A4
+
+/* Payload of the AFE_PARAM_ID_USB_AUDIO_CONFIG parameter used by
+ * AFE_MODULE_AUDIO_DEV_INTERFACE.
+ */
+struct afe_param_id_usb_audio_cfg {
+/* Minor version used for tracking USB audio device configuration.
+ * Supported values: AFE_API_MINIOR_VERSION_USB_AUDIO_CONFIG
+ */
+	u32                  cfg_minor_version;
+/* Sampling rate of the port.
+ * Supported values:
+ * - AFE_PORT_SAMPLE_RATE_8K
+ * - AFE_PORT_SAMPLE_RATE_11025
+ * - AFE_PORT_SAMPLE_RATE_12K
+ * - AFE_PORT_SAMPLE_RATE_16K
+ * - AFE_PORT_SAMPLE_RATE_22050
+ * - AFE_PORT_SAMPLE_RATE_24K
+ * - AFE_PORT_SAMPLE_RATE_32K
+ * - AFE_PORT_SAMPLE_RATE_44P1K
+ * - AFE_PORT_SAMPLE_RATE_48K
+ * - AFE_PORT_SAMPLE_RATE_96K
+ * - AFE_PORT_SAMPLE_RATE_192K
+ */
+	u32                  sample_rate;
+/* Bit width of the sample.
+ * Supported values: 16, 24
+ */
+	u16                  bit_width;
+/* Number of channels.
+ * Supported values: 1 and 2
+ */
+	u16                  num_channels;
+/* Data format supported by the USB. The supported value is
+ * 0 (#AFE_USB_AUDIO_DATA_FORMAT_LINEAR_PCM).
+ */
+	u16                  data_format;
+/* this field must be 0 */
+	u16                  reserved;
+/* device token of actual end USB aduio device */
+	u32                  dev_token;
+/* endianness of this interface */
+	u32                   endian;
+} __packed;
+
+struct afe_usb_audio_dev_param_command {
+	struct apr_hdr hdr;
+	struct afe_port_cmd_set_param_v2 param;
+	struct afe_port_param_data_v2    pdata;
+	union {
+		struct afe_param_id_usb_audio_dev_params usb_dev;
+		struct afe_param_id_usb_audio_dev_lpcm_fmt lpcm_fmt;
+	};
+} __packed;
+
+/*
+* This param id is used to configure Real Time Proxy interface.
+*/
+#define AFE_PARAM_ID_RT_PROXY_CONFIG 0x00010213
+
+/*  This version information is used to handle the new
+*   additions to the config interface in future in backward
+*   compatible manner.
+*/
+#define AFE_API_VERSION_RT_PROXY_CONFIG 0x1
+
+/*  Payload of the #AFE_PARAM_ID_RT_PROXY_CONFIG
+ * command (real-time proxy port configuration parameter).
+ */
+struct afe_param_id_rt_proxy_port_cfg {
+	u32                  rt_proxy_cfg_minor_version;
+/* Minor version used for tracking the version of rt-proxy
+ * config interface.
+ */
+
+	u16                  bit_width;
+/* Bit width of the sample.
+ * Supported values: 16
+ */
+
+	u16                  interleaved;
+/* Specifies whether the data exchanged between the AFE
+ * interface and real-time port is interleaved.
+ * Supported values: - 0 -- Non-interleaved (samples from each
+ * channel are contiguous in the buffer) - 1 -- Interleaved
+ * (corresponding samples from each input channel are interleaved
+ * within the buffer)
+ */
+
+
+	u16                  frame_size;
+ /* Size of the frames that are used for PCM exchanges with this
+ * port.
+ * Supported values: > 0, in bytes
+ * For example, 5 ms buffers of 16 bits and 16 kHz stereo samples
+ * is 5 ms * 16 samples/ms * 2 bytes/sample * 2 channels = 320
+ * bytes.
+ */
+	u16                  jitter_allowance;
+/* Configures the amount of jitter that the port will allow.
+ * Supported values: > 0
+ * For example, if +/-10 ms of jitter is anticipated in the timing
+ * of sending frames to the port, and the configuration is 16 kHz
+ * mono with 16-bit samples, this field is 10 ms * 16 samples/ms * 2
+ * bytes/sample = 320.
+ */
+
+	u16                  low_water_mark;
+/* Low watermark in bytes (including all channels).
+ * Supported values:
+ * - 0 -- Do not send any low watermark events
+ * - > 0 -- Low watermark for triggering an event
+ * If the number of bytes in an internal circular buffer is lower
+ * than this low_water_mark parameter, a LOW_WATER_MARK event is
+ * sent to applications (via the #AFE_EVENT_RT_PROXY_PORT_STATUS
+ * event).
+ * Use of watermark events is optional for debugging purposes.
+ */
+
+	u16                  high_water_mark;
+/* High watermark in bytes (including all channels).
+ * Supported values:
+ * - 0 -- Do not send any high watermark events
+ * - > 0 -- High watermark for triggering an event
+ * If the number of bytes in an internal circular buffer exceeds
+ * TOTAL_CIRC_BUF_SIZE minus high_water_mark, a high watermark event
+ * is sent to applications (via the #AFE_EVENT_RT_PROXY_PORT_STATUS
+ * event).
+ * The use of watermark events is optional and for debugging
+ * purposes.
+ */
+
+
+	u32					sample_rate;
+/* Sampling rate of the port.
+ * Supported values:
+ * - #AFE_PORT_SAMPLE_RATE_8K
+ * - #AFE_PORT_SAMPLE_RATE_16K
+ * - #AFE_PORT_SAMPLE_RATE_48K
+ */
+
+	u16                  num_channels;
+/* Number of channels.
+ * Supported values: 1 to #AFE_PORT_MAX_AUDIO_CHAN_CNT
+ */
+
+	u16                  reserved;
+	/* For 32 bit alignment. */
+} __packed;
+
+
+/* This param id is used to configure the Pseudoport interface */
+
+#define AFE_PARAM_ID_PSEUDO_PORT_CONFIG	0x00010219
+
+/* Version information used to handle future additions to the configuration
+ * interface (for backward compatibility).
+ */
+#define AFE_API_VERSION_PSEUDO_PORT_CONFIG                          0x1
+
+/* Enumeration for setting the timing_mode parameter to faster than real
+ * time.
+ */
+#define AFE_PSEUDOPORT_TIMING_MODE_FTRT                             0x0
+
+/* Enumeration for setting the timing_mode parameter to real time using
+ * timers.
+ */
+#define AFE_PSEUDOPORT_TIMING_MODE_TIMER                            0x1
+
+/* Payload of the AFE_PARAM_ID_PSEUDO_PORT_CONFIG parameter used by
+    AFE_MODULE_AUDIO_DEV_INTERFACE.
+*/
+struct afe_param_id_pseudo_port_cfg {
+	u32                  pseud_port_cfg_minor_version;
+	/*
+	 * Minor version used for tracking the version of the pseudoport
+	 * configuration interface.
+	 */
+
+	u16                  bit_width;
+	/* Bit width of the sample at values 16, 24 */
+
+	u16                  num_channels;
+	/* Number of channels at values  1 to 8 */
+
+	u16                  data_format;
+	/* Non-linear data format supported by the pseudoport (for future use).
+	 * At values #AFE_LINEAR_PCM_DATA
+	 */
+
+	u16                  timing_mode;
+	/* Indicates whether the pseudoport synchronizes to the clock or
+	 * operates faster than real time.
+	 * at values
+	 * - #AFE_PSEUDOPORT_TIMING_MODE_FTRT
+	 * - #AFE_PSEUDOPORT_TIMING_MODE_TIMER @tablebulletend
+	 */
+
+	u32                  sample_rate;
+	/* Sample rate at which the pseudoport will run.
+	 * at values
+	 * - #AFE_PORT_SAMPLE_RATE_8K
+	 * - #AFE_PORT_SAMPLE_RATE_32K
+	 * - #AFE_PORT_SAMPLE_RATE_48K
+	 * - #AFE_PORT_SAMPLE_RATE_96K
+	 * - #AFE_PORT_SAMPLE_RATE_192K @tablebulletend
+	 */
+} __packed;
+
+#define AFE_PARAM_ID_TDM_CONFIG		0x0001029D
+
+#define AFE_API_VERSION_TDM_CONFIG              1
+
+#define AFE_PORT_TDM_SHORT_SYNC_BIT_MODE        0
+#define AFE_PORT_TDM_LONG_SYNC_MODE             1
+#define AFE_PORT_TDM_SHORT_SYNC_SLOT_MODE       2
+
+#define AFE_PORT_TDM_SYNC_SRC_EXTERNAL          0
+#define AFE_PORT_TDM_SYNC_SRC_INTERNAL          1
+
+#define AFE_PORT_TDM_CTRL_DATA_OE_DISABLE       0
+#define AFE_PORT_TDM_CTRL_DATA_OE_ENABLE        1
+
+#define AFE_PORT_TDM_SYNC_NORMAL                0
+#define AFE_PORT_TDM_SYNC_INVERT                1
+
+#define AFE_PORT_TDM_DATA_DELAY_0_BCLK_CYCLE    0
+#define AFE_PORT_TDM_DATA_DELAY_1_BCLK_CYCLE    1
+#define AFE_PORT_TDM_DATA_DELAY_2_BCLK_CYCLE    2
+
+/* Payload of the AFE_PARAM_ID_TDM_CONFIG parameter used by
+    AFE_MODULE_AUDIO_DEV_INTERFACE.
+*/
+struct afe_param_id_tdm_cfg {
+	u32	tdm_cfg_minor_version;
+	/**< Minor version used to track TDM configuration.
+	@values #AFE_API_VERSION_TDM_CONFIG */
+
+	u32	num_channels;
+	/**< Number of enabled slots for TDM frame.
+	@values 1 to 8 */
+
+	u32	sample_rate;
+	/**< Sampling rate of the port.
+	@values
+	- #AFE_PORT_SAMPLE_RATE_8K
+	- #AFE_PORT_SAMPLE_RATE_16K
+	- #AFE_PORT_SAMPLE_RATE_24K
+	- #AFE_PORT_SAMPLE_RATE_32K
+	- #AFE_PORT_SAMPLE_RATE_48K
+	- #AFE_PORT_SAMPLE_RATE_176P4K
+	- #AFE_PORT_SAMPLE_RATE_352P8K @tablebulletend
+	*/
+
+	u32	bit_width;
+	/**< Bit width of the sample.
+	* @values 16, 24, 32
+	*/
+
+	u16	data_format;
+	/**< Data format: linear ,compressed, generic compresssed
+	@values
+	- #AFE_LINEAR_PCM_DATA
+	- #AFE_NON_LINEAR_DATA
+	- #AFE_GENERIC_COMPRESSED
+	*/
+
+	u16	sync_mode;
+	/**< TDM synchronization setting.
+	@values (short, long, slot) sync mode
+	- #AFE_PORT_TDM_SHORT_SYNC_BIT_MODE
+	- #AFE_PORT_TDM_LONG_SYNC_MODE
+	- #AFE_PORT_TDM_SHORT_SYNC_SLOT_MODE @tablebulletend
+	*/
+
+	u16	sync_src;
+	/**< Synchronization source.
+	@values
+	- #AFE_PORT_TDM_SYNC_SRC_EXTERNAL
+	- #AFE_PORT_TDM_SYNC_SRC_INTERNAL @tablebulletend */
+
+	u16	nslots_per_frame;
+	/**< Number of slots per frame. Typical : 1, 2, 4, 8, 16, 32.
+	@values 1 - 32 */
+
+	u16	ctrl_data_out_enable;
+	/**< Specifies whether the TDM block shares the data-out signal to the
+	drive with other masters.
+	@values
+	- #AFE_PORT_TDM_CTRL_DATA_OE_DISABLE
+	- #AFE_PORT_TDM_CTRL_DATA_OE_ENABLE @tablebulletend */
+
+	u16	ctrl_invert_sync_pulse;
+	/**< Specifies whether to invert the sync or not.
+	@values
+	- #AFE_PORT_TDM_SYNC_NORMAL
+	- #AFE_PORT_TDM_SYNC_INVERT @tablebulletend */
+
+	u16	ctrl_sync_data_delay;
+	/**< Specifies the number of bit clock to delay data with respect to
+	sync edge.
+	@values
+	- #AFE_PORT_TDM_DATA_DELAY_0_BCLK_CYCLE
+	- #AFE_PORT_TDM_DATA_DELAY_1_BCLK_CYCLE
+	- #AFE_PORT_TDM_DATA_DELAY_2_BCLK_CYCLE @tablebulletend */
+
+	u16	slot_width;
+	/**< Slot width of the slot in a TDM frame.  (slot_width >= bit_width)
+	have to be satisfied.
+	@values 16, 24, 32 */
+
+	u32	slot_mask;
+	/**< Position of active slots.  When that bit is set,
+	that paricular slot is active.
+	Number of active slots can be inferred by number of
+	bits set in the mask.  Only 8 individual bits can be enabled.
+	Bits 0..31 corresponding to slot 0..31
+	@values 1 to 2^32 - 1 */
+} __packed;
+
+/** ID of Time Divsion Multiplexing (TDM) module,
+	which is used for configuring the AFE TDM.
+
+	This module supports following parameter IDs:
+	- #AFE_PORT_TDM_SLOT_CONFIG
+
+	To configure the TDM interface, the client must use the
+	#AFE_PORT_CMD_SET_PARAM command, and fill the module ID with the
+	respective parameter IDs as listed above.
+*/
+
+#define AFE_MODULE_TDM		0x0001028A
+
+/** ID of the parameter used by #AFE_MODULE_TDM to configure
+	the TDM slot mapping. #AFE_PORT_CMD_SET_PARAM can use this parameter ID.
+*/
+#define AFE_PARAM_ID_PORT_SLOT_MAPPING_CONFIG	0x00010297
+
+/** Version information used to handle future additions to slot mapping
+	configuration (for backward compatibility).
+*/
+#define AFE_API_VERSION_SLOT_MAPPING_CONFIG	0x1
+
+/** Version information used to handle future additions to slot mapping
+*	configuration support 32 channels.
+*/
+#define AFE_API_VERSION_SLOT_MAPPING_CONFIG_V2	0x2
+
+/** Data align type  */
+#define AFE_SLOT_MAPPING_DATA_ALIGN_MSB		0
+#define AFE_SLOT_MAPPING_DATA_ALIGN_LSB		1
+
+#define AFE_SLOT_MAPPING_OFFSET_INVALID		0xFFFF
+
+/* Payload of the AFE_PARAM_ID_PORT_SLOT_MAPPING_CONFIG
+	command's TDM configuration parameter.
+*/
+struct afe_param_id_slot_mapping_cfg {
+	u32	minor_version;
+	/**< Minor version used for tracking TDM slot configuration.
+	  @values #AFE_API_VERSION_TDM_SLOT_CONFIG */
+
+	u16	num_channel;
+	/**< number of channel of the audio sample.
+	@values 1, 2, 4, 6, 8 @tablebulletend */
+
+	u16	bitwidth;
+	/**< Slot bit width for each channel
+	@values 16, 24, 32 */
+
+	u32	data_align_type;
+	/**< indicate how data packed from slot_offset for 32 slot bit width
+	in case of sample bit width is 24.
+	@values
+	#AFE_SLOT_MAPPING_DATA_ALIGN_MSB
+	#AFE_SLOT_MAPPING_DATA_ALIGN_LSB  */
+
+	u16	offset[AFE_PORT_MAX_AUDIO_CHAN_CNT];
+	/**< Array of the slot mapping start offset in bytes for this frame.
+	The bytes is counted from 0. The 0 is mapped to the 1st byte
+	in or out of the digital serial data line this sub-frame belong to.
+	slot_offset[] setting is per-channel based.
+	The max num of channel supported is 8.
+	The valid offset value must always be continuly placed in from index 0.
+	Set offset as AFE_SLOT_MAPPING_OFFSET_INVALID for not used arrays.
+	If "slot_bitwidth_per_channel" is 32 and "sample_bitwidth" is 24,
+	"data_align_type" is used to indicate how 24 bit sample data in aligning
+	with 32 bit slot width per-channel.
+	@values, in byte*/
+} __packed;
+
+/* Payload of the AFE_PARAM_ID_PORT_SLOT_MAPPING_CONFIG_V2
+*  command's TDM configuration parameter.
+*/
+struct afe_param_id_slot_mapping_cfg_v2 {
+	u32	minor_version;
+	/**< Minor version used for tracking TDM slot configuration.
+	 * @values #AFE_API_VERSION_TDM_SLOT_CONFIG
+	 */
+
+	u16	num_channel;
+	/**< number of channel of the audio sample.
+	* @values 1, 2, 4, 6, 8, 16, 32 @tablebulletend
+	*/
+
+	u16	bitwidth;
+	/**< Slot bit width for each channel
+	* @values 16, 24, 32
+	*/
+
+	u32	data_align_type;
+	/**< indicate how data packed from slot_offset for 32 slot bit width
+	* in case of sample bit width is 24.
+	* @values
+	* #AFE_SLOT_MAPPING_DATA_ALIGN_MSB
+	* #AFE_SLOT_MAPPING_DATA_ALIGN_LSB
+	*/
+
+	u16	offset[AFE_PORT_MAX_AUDIO_CHAN_CNT_V2];
+	/**< Array of the slot mapping start offset in bytes for this frame.
+	* The bytes is counted from 0. The 0 is mapped to the 1st byte
+	* in or out of the digital serial data line this sub-frame belong to.
+	* slot_offset[] setting is per-channel based.
+	* The max num of channel supported is 8.
+	* The valid offset value must always be continuly placed in
+	* from index 0.
+	* Set offset as AFE_SLOT_MAPPING_OFFSET_INVALID for not used arrays.
+	* If "slot_bitwidth_per_channel" is 32 and "sample_bitwidth" is 24,
+	* "data_align_type" is used to indicate how 24 bit sample data in
+	* aligning with 32 bit slot width per-channel.
+	* @values, in byte
+	*/
+} __packed;
+
+/** ID of the parameter used by #AFE_MODULE_TDM to configure
+    the customer TDM header. #AFE_PORT_CMD_SET_PARAM can use this parameter ID.
+*/
+#define AFE_PARAM_ID_CUSTOM_TDM_HEADER_CONFIG		0x00010298
+
+/** Version information used to handle future additions to custom TDM header
+	configuration (for backward compatibility).
+*/
+#define AFE_API_VERSION_CUSTOM_TDM_HEADER_CONFIG	0x1
+
+#define AFE_CUSTOM_TDM_HEADER_TYPE_INVALID		0x0
+#define AFE_CUSTOM_TDM_HEADER_TYPE_DEFAULT		0x1
+#define AFE_CUSTOM_TDM_HEADER_TYPE_ENTERTAINMENT_MOST	0x2
+
+#define AFE_CUSTOM_TDM_HEADER_MAX_CNT	0x8
+
+/** Payload of the AFE_PARAM_ID_CUSTOM_TDM_HEADER_CONFIG parameter ID
+*/
+struct afe_param_id_custom_tdm_header_cfg {
+	u32	minor_version;
+	/**< Minor version used for tracking custom TDM header configuration.
+	@values #AFE_API_VERSION_CUSTOM_TDM_HEADER_CONFIG */
+
+	u16	start_offset;
+	/**< the slot mapping start offset in bytes from this sub-frame
+	The bytes is counted from 0. The 0 is mapped to the 1st byte in or out of
+	the digital serial data line this sub-frame belong to.
+	@values, in byte,
+	supported values are 0, 4, 8,    */
+
+	u16	header_width;
+	/**< the header width per-frame followed.
+	2 bytes for MOST/TDM case
+	@values, in byte
+	supported value is 2 */
+
+	u16	header_type;
+	/**< Indicate what kind of custom TDM header it is.
+	@values #AFE_CUSTOM_TDM_HEADER_TYPE_INVALID = 0
+	#AFE_CUSTOM_TDM_HEADER_TYPE_DEFAULT = 1  (for AAN channel per MOST)
+	#AFE_CUSTOM_TDM_HEADER_TYPE_ENTERTAINMENT_MOST = 2
+	(for entertainment channel, which will overwrite
+	AFE_API_VERSION_TDM_SAD_HEADER_TYPE_DEFAULT per MOST) */
+
+	u16	num_frame_repeat;
+	/**< num of header followed.
+	@values, supported value is 8*/
+	u16	header[AFE_CUSTOM_TDM_HEADER_MAX_CNT];
+	/** < SAD header for MOST/TDM case is followed as payload as below.
+	The size of followed SAD header in bytes is num_of_frame_repeat * header_width_per_frame
+	which is 2 * 8 = 16 bytes here.
+	the supported payload format is in uint16_t as below
+	uint16_t header0; SyncHi 0x3C Info[4] - CodecType -> 0x3C00
+	uint16_t header1; SyncLo 0xB2 Info[5] - SampleWidth -> 0xB218
+	uint16_t header2; DTCP Info     Info[6] - unused -> 0x0
+	uint16_t header3; Extension Info[7] - ASAD-Value -> 0xC0
+	uint16_t header4; Reserved Info[0] - Num of bytes following  -> 0x7
+	uint16_t header5; Reserved Info[1] - Media Type -> 0x0
+	uint16_t header6; Reserved Info[2] - Bitrate[kbps] - High Byte -> 0x0
+	uint16_t header7; Reserved Info[3] - Bitrate[kbps] - Low  Byte -> 0x0 */
+} __packed;
+
+struct afe_slot_mapping_config_command {
+	struct apr_hdr	hdr;
+	struct afe_port_cmd_set_param_v2	param;
+	struct afe_port_param_data_v2	pdata;
+	struct afe_param_id_slot_mapping_cfg	slot_mapping;
+} __packed;
+
+struct afe_custom_tdm_header_config_command {
+	struct apr_hdr	hdr;
+	struct afe_port_cmd_set_param_v2	param;
+	struct afe_port_param_data_v2	pdata;
+	struct afe_param_id_custom_tdm_header_cfg	custom_tdm_header;
+} __packed;
+
+struct afe_tdm_port_config {
+	struct afe_param_id_tdm_cfg				tdm;
+	struct afe_param_id_slot_mapping_cfg		slot_mapping;
+	struct afe_param_id_slot_mapping_cfg_v2		slot_mapping_v2;
+	struct afe_param_id_custom_tdm_header_cfg	custom_tdm_header;
+} __packed;
+
+#define AFE_PARAM_ID_DEVICE_HW_DELAY     0x00010243
+#define AFE_API_VERSION_DEVICE_HW_DELAY  0x1
+
+struct afe_param_id_device_hw_delay_cfg {
+	uint32_t    device_hw_delay_minor_version;
+	uint32_t    delay_in_us;
+} __packed;
+
+#define AFE_PARAM_ID_SET_TOPOLOGY    0x0001025A
+#define AFE_API_VERSION_TOPOLOGY_V1 0x1
+
+struct afe_param_id_set_topology_cfg {
+	/*
+	 * Minor version used for tracking afe topology id configuration.
+	 * @values #AFE_API_VERSION_TOPOLOGY_V1
+	 */
+	u32		minor_version;
+	/*
+	 * Id of the topology for the afe session.
+	 * @values Any valid AFE topology ID
+	 */
+	u32		topology_id;
+} __packed;
+
+
+/*
+ * Generic encoder module ID.
+ * This module supports the following parameter IDs:
+ * #AVS_ENCODER_PARAM_ID_ENC_FMT_ID (cannot be set run time)
+ * #AVS_ENCODER_PARAM_ID_ENC_CFG_BLK (may be set run time)
+ * #AVS_ENCODER_PARAM_ID_ENC_BITRATE (may be set run time)
+ * #AVS_ENCODER_PARAM_ID_PACKETIZER_ID (cannot be set run time)
+ * Opcode - AVS_MODULE_ID_ENCODER
+ * AFE Command AFE_PORT_CMD_SET_PARAM_V2 supports this module ID.
+ */
+#define AFE_MODULE_ID_ENCODER        0x00013229
+
+/* Macro for defining the packetizer ID: COP. */
+#define AFE_MODULE_ID_PACKETIZER_COP 0x0001322A
+
+/*
+ * Packetizer type parameter for the #AVS_MODULE_ID_ENCODER module.
+ * This parameter cannot be set runtime.
+ */
+#define AFE_ENCODER_PARAM_ID_PACKETIZER_ID 0x0001322E
+
+/*
+ * Encoder config block  parameter for the #AVS_MODULE_ID_ENCODER module.
+ * This parameter may be set runtime.
+ */
+#define AFE_ENCODER_PARAM_ID_ENC_CFG_BLK 0x0001322C
+
+/*
+ * Encoder format ID parameter for the #AVS_MODULE_ID_ENCODER module.
+ * This parameter cannot be set runtime.
+ */
+#define AFE_ENCODER_PARAM_ID_ENC_FMT_ID         0x0001322B
+
+/*
+ * Data format to send compressed data
+ * is transmitted/received over Slimbus lines.
+ */
+#define AFE_SB_DATA_FORMAT_GENERIC_COMPRESSED    0x3
+
+/*
+ * ID for AFE port module. This will be used to define port properties.
+ * This module supports following parameter IDs:
+ * #AFE_PARAM_ID_PORT_MEDIA_TYPE
+ * To configure the port property, the client must use the
+ * #AFE_PORT_CMD_SET_PARAM_V2 command,
+ * and fill the module ID with the respective parameter IDs as listed above.
+ * @apr_hdr_fields
+ * Opcode -- AFE_MODULE_PORT
+ */
+#define AFE_MODULE_PORT                          0x000102a6
+
+/*
+ * ID of the parameter used by #AFE_MODULE_PORT to set the port media type.
+ * parameter ID is currently supported using#AFE_PORT_CMD_SET_PARAM_V2 command.
+ */
+#define AFE_PARAM_ID_PORT_MEDIA_TYPE              0x000102a7
+
+/*
+ * Macros for defining the "data_format" field in the
+ * #AFE_PARAM_ID_PORT_MEDIA_TYPE
+ */
+#define AFE_PORT_DATA_FORMAT_PCM                  0x0
+#define AFE_PORT_DATA_FORMAT_GENERIC_COMPRESSED   0x1
+
+/*
+ * Macro for defining the "minor_version" field in the
+ * #AFE_PARAM_ID_PORT_MEDIA_TYPE
+ */
+#define AFE_API_VERSION_PORT_MEDIA_TYPE           0x1
+
+#define ASM_MEDIA_FMT_NONE                        0x0
+
+/*
+ * Media format ID for SBC encode configuration.
+ * @par SBC encode configuration (asm_sbc_enc_cfg_t)
+ * @table{weak__asm__sbc__enc__cfg__t}
+ */
+#define ASM_MEDIA_FMT_SBC                         0x00010BF2
+
+/* SBC channel Mono mode.*/
+#define ASM_MEDIA_FMT_SBC_CHANNEL_MODE_MONO                     1
+
+/* SBC channel Stereo mode. */
+#define ASM_MEDIA_FMT_SBC_CHANNEL_MODE_STEREO                   2
+
+/* SBC channel Dual Mono mode. */
+#define ASM_MEDIA_FMT_SBC_CHANNEL_MODE_DUAL_MONO                8
+
+/* SBC channel Joint Stereo mode. */
+#define ASM_MEDIA_FMT_SBC_CHANNEL_MODE_JOINT_STEREO             9
+
+/* SBC bit allocation method = loudness. */
+#define ASM_MEDIA_FMT_SBC_ALLOCATION_METHOD_LOUDNESS            0
+
+/* SBC bit allocation method = SNR. */
+#define ASM_MEDIA_FMT_SBC_ALLOCATION_METHOD_SNR                 1
+
+
+/*
+ * Payload of the SBC encoder configuration parameters in the
+ * #ASM_MEDIA_FMT_SBC media format.
+ */
+struct asm_sbc_enc_cfg_t {
+	/*
+	 * Number of subbands.
+	 * @values 4, 8
+	 */
+	uint32_t    num_subbands;
+
+	/*
+	 * Size of the encoded block in samples.
+	 * @values 4, 8, 12, 16
+	 */
+	uint32_t    blk_len;
+
+	/*
+	 * Mode used to allocate bits between channels.
+	 * @values
+	 * 0 (Native mode)
+	 * #ASM_MEDIA_FMT_SBC_CHANNEL_MODE_MONO
+	 * #ASM_MEDIA_FMT_SBC_CHANNEL_MODE_STEREO
+	 * #ASM_MEDIA_FMT_SBC_CHANNEL_MODE_DUAL_MONO
+	 * #ASM_MEDIA_FMT_SBC_CHANNEL_MODE_JOINT_STEREO
+	 * Native mode indicates that encoding must be performed with the number
+	 * of channels at the input.
+	 * If postprocessing outputs one-channel data, Mono mode is used. If
+	 * postprocessing outputs two-channel data, Stereo mode is used.
+	 * The number of channels must not change during encoding.
+	 */
+	uint32_t    channel_mode;
+
+	/*
+	 * Encoder bit allocation method.
+	 * @values
+	 * #ASM_MEDIA_FMT_SBC_ALLOCATION_METHOD_LOUDNESS
+	 * #ASM_MEDIA_FMT_SBC_ALLOCATION_METHOD_SNR @tablebulletend
+	 */
+	uint32_t    alloc_method;
+
+	/*
+	 * Number of encoded bits per second.
+	 * @values
+	 * Mono channel -- Maximum of 320 kbps
+	 * Stereo channel -- Maximum of 512 kbps @tablebulletend
+	 */
+	uint32_t    bit_rate;
+
+	/*
+	 * Number of samples per second.
+	 * @values 0 (Native mode), 16000, 32000, 44100, 48000&nbsp;Hz
+	 * Native mode indicates that encoding must be performed with the
+	 * sampling rate at the input.
+	 * The sampling rate must not change during encoding.
+	 */
+	uint32_t    sample_rate;
+};
+
+#define ASM_MEDIA_FMT_AAC_AOT_LC            2
+#define ASM_MEDIA_FMT_AAC_AOT_SBR           5
+#define ASM_MEDIA_FMT_AAC_AOT_PS            29
+#define ASM_MEDIA_FMT_AAC_FORMAT_FLAG_ADTS  0
+#define ASM_MEDIA_FMT_AAC_FORMAT_FLAG_RAW   3
+
+struct asm_aac_enc_cfg_v2_t {
+
+	/* Encoding rate in bits per second.*/
+	uint32_t     bit_rate;
+
+	/*
+	 * Encoding mode.
+	 * Supported values:
+	 * #ASM_MEDIA_FMT_AAC_AOT_LC
+	 * #ASM_MEDIA_FMT_AAC_AOT_SBR
+	 * #ASM_MEDIA_FMT_AAC_AOT_PS
+	 */
+	uint32_t     enc_mode;
+
+	/*
+	 * AAC format flag.
+	 * Supported values:
+	 * #ASM_MEDIA_FMT_AAC_FORMAT_FLAG_ADTS
+	 * #ASM_MEDIA_FMT_AAC_FORMAT_FLAG_RAW
+	 */
+	uint16_t     aac_fmt_flag;
+
+	/*
+	 * Number of channels to encode.
+	 * Supported values:
+	 * 0 - Native mode
+	 * 1 - Mono
+	 * 2 - Stereo
+	 * Other values are not supported.
+	 * @note1hang The eAAC+ encoder mode supports only stereo.
+	 * Native mode indicates that encoding must be performed with the
+	 * number of channels at the input.
+	 * The number of channels must not change during encoding.
+	 */
+	uint16_t     channel_cfg;
+
+	/*
+	 * Number of samples per second.
+	 * Supported values: - 0 -- Native mode - For other values,
+	 * Native mode indicates that encoding must be performed with the
+	 * sampling rate at the input.
+	 * The sampling rate must not change during encoding.
+	 */
+	uint32_t     sample_rate;
+} __packed;
+
+/* FMT ID for apt-X Classic */
+#define ASM_MEDIA_FMT_APTX 0x000131ff
+
+/* FMT ID for apt-X HD */
+#define ASM_MEDIA_FMT_APTX_HD 0x00013200
+
+#define PCM_CHANNEL_L         1
+#define PCM_CHANNEL_R         2
+#define PCM_CHANNEL_C         3
+
+struct asm_custom_enc_cfg_aptx_t {
+	uint32_t    sample_rate;
+	/* Mono or stereo */
+	uint16_t    num_channels;
+	uint16_t    reserved;
+	/* num_ch == 1, then PCM_CHANNEL_C,
+	 * num_ch == 2, then {PCM_CHANNEL_L, PCM_CHANNEL_R}
+	 */
+	uint8_t     channel_mapping[8];
+	uint32_t    custom_size;
+} __packed;
+
+struct afe_enc_fmt_id_param_t {
+	/*
+	 * Supported values:
+	 *  #ASM_MEDIA_FMT_SBC
+	 *  #ASM_MEDIA_FMT_AAC_V2
+	 * Any OpenDSP supported values
+	 */
+	uint32_t    fmt_id;
+} __packed;
+
+struct afe_port_media_type_t {
+	/*
+	 * Minor version
+	 * @values #AFE_API_VERSION_PORT_MEDIA_TYPE.
+	 */
+	uint32_t    minor_version;
+
+	/*
+	 * Sampling rate of the port.
+	 * @values
+	 * #AFE_PORT_SAMPLE_RATE_8K
+	 * #AFE_PORT_SAMPLE_RATE_11_025K
+	 * #AFE_PORT_SAMPLE_RATE_12K
+	 * #AFE_PORT_SAMPLE_RATE_16K
+	 * #AFE_PORT_SAMPLE_RATE_22_05K
+	 * #AFE_PORT_SAMPLE_RATE_24K
+	 * #AFE_PORT_SAMPLE_RATE_32K
+	 * #AFE_PORT_SAMPLE_RATE_44_1K
+	 * #AFE_PORT_SAMPLE_RATE_48K
+	 * #AFE_PORT_SAMPLE_RATE_88_2K
+	 * #AFE_PORT_SAMPLE_RATE_96K
+	 * #AFE_PORT_SAMPLE_RATE_176_4K
+	 * #AFE_PORT_SAMPLE_RATE_192K
+	 * #AFE_PORT_SAMPLE_RATE_352_8K
+	 * #AFE_PORT_SAMPLE_RATE_384K
+	 */
+	uint32_t    sample_rate;
+
+	/*
+	 * Bit width of the sample.
+	 * @values 16, 24
+	 */
+	uint16_t    bit_width;
+
+	/*
+	 * Number of channels.
+	 * @values 1 to #AFE_PORT_MAX_AUDIO_CHAN_CNT
+	 */
+	uint16_t    num_channels;
+
+	/*
+	 * Data format supported by this port.
+	 * If the port media type and device media type are different,
+	 * it signifies a encoding/decoding use case
+	 * @values
+	 * #AFE_PORT_DATA_FORMAT_PCM
+	 * #AFE_PORT_DATA_FORMAT_GENERIC_COMPRESSED
+	 */
+	uint16_t   data_format;
+
+	/*This field must be set to zero.*/
+	uint16_t   reserved;
+} __packed;
+
+union afe_enc_config_data {
+	struct asm_sbc_enc_cfg_t sbc_config;
+	struct asm_aac_enc_cfg_v2_t aac_config;
+	struct asm_custom_enc_cfg_aptx_t  aptx_config;
+};
+
+struct afe_enc_config {
+	u32 format;
+	union afe_enc_config_data data;
+};
+
+struct afe_enc_cfg_blk_param_t {
+	uint32_t enc_cfg_blk_size;
+	/*
+	 *Size of the encoder configuration block that follows this member
+	 */
+	union afe_enc_config_data enc_blk_config;
+};
+
+/*
+ * Payload of the AVS_ENCODER_PARAM_ID_PACKETIZER_ID parameter.
+ */
+struct avs_enc_packetizer_id_param_t {
+	/*
+	 * Supported values:
+	 * #AVS_MODULE_ID_PACKETIZER_COP
+	 * Any OpenDSP supported values
+	 */
+	uint32_t enc_packetizer_id;
+};
+
+union afe_port_config {
+	struct afe_param_id_pcm_cfg               pcm;
+	struct afe_param_id_i2s_cfg               i2s;
+	struct afe_param_id_hdmi_multi_chan_audio_cfg hdmi_multi_ch;
+	struct afe_param_id_slimbus_cfg           slim_sch;
+	struct afe_param_id_rt_proxy_port_cfg     rtproxy;
+	struct afe_param_id_internal_bt_fm_cfg    int_bt_fm;
+	struct afe_param_id_pseudo_port_cfg       pseudo_port;
+	struct afe_param_id_device_hw_delay_cfg   hw_delay;
+	struct afe_param_id_spdif_cfg             spdif;
+	struct afe_param_id_set_topology_cfg      topology;
+	struct afe_param_id_tdm_cfg               tdm;
+	struct afe_param_id_usb_audio_cfg         usb_audio;
+	struct afe_enc_fmt_id_param_t             enc_fmt;
+	struct afe_port_media_type_t              media_type;
+	struct afe_enc_cfg_blk_param_t            enc_blk_param;
+	struct avs_enc_packetizer_id_param_t      enc_pkt_id_param;
+} __packed;
+
+struct afe_audioif_config_command_no_payload {
+	struct apr_hdr			hdr;
+	struct afe_port_cmd_set_param_v2 param;
+} __packed;
+
+struct afe_audioif_config_command {
+	struct apr_hdr			hdr;
+	struct afe_port_cmd_set_param_v2 param;
+	struct afe_port_param_data_v2    pdata;
+	union afe_port_config            port;
+} __packed;
+
+#define AFE_PORT_CMD_DEVICE_START 0x000100E5
+
+/*  Payload of the #AFE_PORT_CMD_DEVICE_START.*/
+struct afe_port_cmd_device_start {
+	struct apr_hdr hdr;
+	u16                  port_id;
+/* Port interface and direction (Rx or Tx) to start. An even
+ * number represents the Rx direction, and an odd number represents
+ * the Tx direction.
+ */
+
+
+	u16                  reserved;
+/* Reserved for 32-bit alignment. This field must be set to 0.*/
+
+} __packed;
+
+#define AFE_PORT_CMD_DEVICE_STOP  0x000100E6
+
+/*  Payload of the #AFE_PORT_CMD_DEVICE_STOP.
+*/
+struct afe_port_cmd_device_stop {
+	struct apr_hdr hdr;
+	u16                  port_id;
+/* Port interface and direction (Rx or Tx) to start. An even
+ * number represents the Rx direction, and an odd number represents
+ * the Tx direction.
+ */
+
+	u16                  reserved;
+/* Reserved for 32-bit alignment. This field must be set to 0.*/
+} __packed;
+
+#define AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS 0x000100EA
+
+/*  Memory map regions command payload used by the
+ * #AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS .
+ * This structure allows clients to map multiple shared memory
+ * regions in a single command. Following this structure are
+ * num_regions of afe_service_shared_map_region_payload.
+ */
+struct afe_service_cmd_shared_mem_map_regions {
+	struct apr_hdr hdr;
+u16                  mem_pool_id;
+/* Type of memory on which this memory region is mapped.
+ * Supported values:
+ * - #ADSP_MEMORY_MAP_EBI_POOL
+ * - #ADSP_MEMORY_MAP_SMI_POOL
+ * - #ADSP_MEMORY_MAP_SHMEM8_4K_POOL
+ * - Other values are reserved
+ *
+ * The memory pool ID implicitly defines the characteristics of the
+ * memory. Characteristics may include alignment type, permissions,
+ * etc.
+ *
+ * ADSP_MEMORY_MAP_EBI_POOL is External Buffer Interface type memory
+ * ADSP_MEMORY_MAP_SMI_POOL is Shared Memory Interface type memory
+ * ADSP_MEMORY_MAP_SHMEM8_4K_POOL is shared memory, byte
+ * addressable, and 4 KB aligned.
+ */
+
+
+	u16                  num_regions;
+/* Number of regions to map.
+ * Supported values:
+ * - Any value greater than zero
+ */
+
+	u32                  property_flag;
+/* Configures one common property for all the regions in the
+ * payload.
+ *
+ * Supported values: - 0x00000000 to 0x00000001
+ *
+ * b0 - bit 0 indicates physical or virtual mapping 0 Shared memory
+ * address provided in afe_service_shared_map_region_payloadis a
+ * physical address. The shared memory needs to be mapped( hardware
+ * TLB entry) and a software entry needs to be added for internal
+ * book keeping.
+ *
+ * 1 Shared memory address provided in
+ * afe_service_shared_map_region_payloadis a virtual address. The
+ * shared memory must not be mapped (since hardware TLB entry is
+ * already available) but a software entry needs to be added for
+ * internal book keeping. This can be useful if two services with in
+ * ADSP is communicating via APR. They can now directly communicate
+ * via the Virtual address instead of Physical address. The virtual
+ * regions must be contiguous. num_regions must be 1 in this case.
+ *
+ * b31-b1 - reserved bits. must be set to zero
+ */
+
+
+} __packed;
+/*  Map region payload used by the
+ * afe_service_shared_map_region_payloadstructure.
+ */
+struct afe_service_shared_map_region_payload {
+	u32                  shm_addr_lsw;
+/* least significant word of starting address in the memory
+ * region to map. It must be contiguous memory, and it must be 4 KB
+ * aligned.
+ * Supported values: - Any 32 bit value
+ */
+
+
+	u32                  shm_addr_msw;
+/* most significant word of startng address in the memory region
+ * to map. For 32 bit shared memory address, this field must be set
+ * to zero. For 36 bit shared memory address, bit31 to bit 4 must be
+ * set to zero
+ *
+ * Supported values: - For 32 bit shared memory address, this field
+ * must be set to zero. - For 36 bit shared memory address, bit31 to
+ * bit 4 must be set to zero - For 64 bit shared memory address, any
+ * 32 bit value
+ */
+
+
+	u32                  mem_size_bytes;
+/* Number of bytes in the region. The aDSP will always map the
+ * regions as virtual contiguous memory, but the memory size must be
+ * in multiples of 4 KB to avoid gaps in the virtually contiguous
+ * mapped memory.
+ *
+ * Supported values: - multiples of 4KB
+ */
+
+} __packed;
+
+#define AFE_SERVICE_CMDRSP_SHARED_MEM_MAP_REGIONS 0x000100EB
+struct afe_service_cmdrsp_shared_mem_map_regions {
+	u32                  mem_map_handle;
+/* A memory map handle encapsulating shared memory attributes is
+ * returned iff AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS command is
+ * successful. In the case of failure , a generic APR error response
+ * is returned to the client.
+ *
+ * Supported Values: - Any 32 bit value
+ */
+
+} __packed;
+#define AFE_SERVICE_CMD_SHARED_MEM_UNMAP_REGIONS 0x000100EC
+/* Memory unmap regions command payload used by the
+ * #AFE_SERVICE_CMD_SHARED_MEM_UNMAP_REGIONS
+ *
+ * This structure allows clients to unmap multiple shared memory
+ * regions in a single command.
+ */
+
+
+struct afe_service_cmd_shared_mem_unmap_regions {
+	struct apr_hdr hdr;
+u32                  mem_map_handle;
+/* memory map handle returned by
+ * AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS commands
+ *
+ * Supported Values:
+ * - Any 32 bit value
+ */
+} __packed;
+
+#define  AFE_PORT_CMD_GET_PARAM_V2 0x000100F0
+
+/*  Payload of the #AFE_PORT_CMD_GET_PARAM_V2 command,
+ * which queries for one post/preprocessing parameter of a
+ * stream.
+ */
+struct afe_port_cmd_get_param_v2 {
+	u16 port_id;
+/* Port interface and direction (Rx or Tx) to start. */
+
+	u16 payload_size;
+/* Maximum data size of the parameter ID/module ID combination.
+ * This is a multiple of four bytes
+ * Supported values: > 0
+ */
+
+	u32 payload_address_lsw;
+/* LSW of 64 bit Payload address. Address should be 32-byte,
+ * 4kbyte aligned and must be contig memory.
+ */
+
+
+	u32 payload_address_msw;
+/* MSW of 64 bit Payload address. In case of 32-bit shared
+ * memory address, this field must be set to zero. In case of 36-bit
+ * shared memory address, bit-4 to bit-31 must be set to zero.
+ * Address should be 32-byte, 4kbyte aligned and must be contiguous
+ * memory.
+ */
+
+	u32 mem_map_handle;
+/* Memory map handle returned by
+ * AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS commands.
+ * Supported Values: - NULL -- Message. The parameter data is
+ * in-band. - Non-NULL -- The parameter data is Out-band.Pointer to
+ * - the physical address in shared memory of the payload data.
+ * For detailed payload content, see the afe_port_param_data_v2
+ * structure
+ */
+
+
+	u32 module_id;
+/* ID of the module to be queried.
+ * Supported values: Valid module ID
+ */
+
+	u32 param_id;
+/* ID of the parameter to be queried.
+ * Supported values: Valid parameter ID
+ */
+} __packed;
+
+#define AFE_PORT_CMDRSP_GET_PARAM_V2 0x00010106
+
+/* Payload of the #AFE_PORT_CMDRSP_GET_PARAM_V2 message, which
+ * responds to an #AFE_PORT_CMD_GET_PARAM_V2 command.
+ *
+ * Immediately following this structure is the parameters structure
+ * (afe_port_param_data) containing the response(acknowledgment)
+ * parameter payload. This payload is included for an in-band
+ * scenario. For an address/shared memory-based set parameter, this
+ * payload is not needed.
+ */
+
+
+struct afe_port_cmdrsp_get_param_v2 {
+	u32                  status;
+} __packed;
+
+#define AFE_PARAM_ID_LPASS_CORE_SHARED_CLOCK_CONFIG	0x0001028C
+#define AFE_API_VERSION_LPASS_CORE_SHARED_CLK_CONFIG	0x1
+/*
+ * Payload of the AFE_PARAM_ID_LPASS_CORE_SHARED_CLOCK_CONFIG parameter used by
+ * AFE_MODULE_AUDIO_DEV_INTERFACE.
+*/
+struct afe_param_id_lpass_core_shared_clk_cfg {
+	u32	lpass_core_shared_clk_cfg_minor_version;
+/*
+ * Minor version used for lpass core shared clock configuration
+ * Supported value: AFE_API_VERSION_LPASS_CORE_SHARED_CLK_CONFIG
+ */
+	u32	enable;
+/*
+ * Specifies whether the lpass core shared clock is
+ * enabled (1) or disabled (0).
+ */
+} __packed;
+
+struct afe_lpass_core_shared_clk_config_command {
+	struct apr_hdr		   hdr;
+	struct afe_port_cmd_set_param_v2 param;
+	struct afe_port_param_data_v2    pdata;
+	struct afe_param_id_lpass_core_shared_clk_cfg clk_cfg;
+} __packed;
+
+/* adsp_afe_service_commands.h */
+
+#define ADSP_MEMORY_MAP_EBI_POOL      0
+
+#define ADSP_MEMORY_MAP_SMI_POOL      1
+#define ADSP_MEMORY_MAP_IMEM_POOL      2
+#define ADSP_MEMORY_MAP_SHMEM8_4K_POOL      3
+/*
+* Definition of virtual memory flag
+*/
+#define ADSP_MEMORY_MAP_VIRTUAL_MEMORY 1
+
+/*
+* Definition of physical memory flag
+*/
+#define ADSP_MEMORY_MAP_PHYSICAL_MEMORY 0
+
+#define NULL_POPP_TOPOLOGY				0x00010C68
+#define NULL_COPP_TOPOLOGY				0x00010312
+#define DEFAULT_COPP_TOPOLOGY				0x00010314
+#define DEFAULT_POPP_TOPOLOGY				0x00010BE4
+#define COMPRESSED_PASSTHROUGH_DEFAULT_TOPOLOGY         0x0001076B
+#define COMPRESSED_PASSTHROUGH_NONE_TOPOLOGY            0x00010774
+#define VPM_TX_SM_ECNS_COPP_TOPOLOGY			0x00010F71
+#define VPM_TX_DM_FLUENCE_COPP_TOPOLOGY			0x00010F72
+#define VPM_TX_QMIC_FLUENCE_COPP_TOPOLOGY		0x00010F75
+#define VPM_TX_DM_RFECNS_COPP_TOPOLOGY			0x00010F86
+#define ADM_CMD_COPP_OPEN_TOPOLOGY_ID_DTS_HPX		0x10015002
+#define ADM_CMD_COPP_OPEN_TOPOLOGY_ID_AUDIOSPHERE	0x10028000
+
+/* Memory map regions command payload used by the
+ * #ASM_CMD_SHARED_MEM_MAP_REGIONS ,#ADM_CMD_SHARED_MEM_MAP_REGIONS
+ * commands.
+ *
+ * This structure allows clients to map multiple shared memory
+ * regions in a single command. Following this structure are
+ * num_regions of avs_shared_map_region_payload.
+ */
+
+
+struct avs_cmd_shared_mem_map_regions {
+	struct apr_hdr hdr;
+	u16                  mem_pool_id;
+/* Type of memory on which this memory region is mapped.
+ *
+ * Supported values: - #ADSP_MEMORY_MAP_EBI_POOL -
+ * #ADSP_MEMORY_MAP_SMI_POOL - #ADSP_MEMORY_MAP_IMEM_POOL
+ * (unsupported) - #ADSP_MEMORY_MAP_SHMEM8_4K_POOL - Other values
+ * are reserved
+ *
+ * The memory ID implicitly defines the characteristics of the
+ * memory. Characteristics may include alignment type, permissions,
+ * etc.
+ *
+ * SHMEM8_4K is shared memory, byte addressable, and 4 KB aligned.
+ */
+
+
+	u16                  num_regions;
+	/* Number of regions to map.*/
+
+	u32                  property_flag;
+/* Configures one common property for all the regions in the
+ * payload. No two regions in the same memory map regions cmd can
+ * have differnt property. Supported values: - 0x00000000 to
+ * 0x00000001
+ *
+ * b0 - bit 0 indicates physical or virtual mapping 0 shared memory
+ * address provided in avs_shared_map_regions_payload is physical
+ * address. The shared memory needs to be mapped( hardware TLB
+ * entry)
+ *
+ * and a software entry needs to be added for internal book keeping.
+ *
+ * 1 Shared memory address provided in MayPayload[usRegions] is
+ * virtual address. The shared memory must not be mapped (since
+ * hardware TLB entry is already available) but a software entry
+ * needs to be added for internal book keeping. This can be useful
+ * if two services with in ADSP is communicating via APR. They can
+ * now directly communicate via the Virtual address instead of
+ * Physical address. The virtual regions must be contiguous.
+ *
+ * b31-b1 - reserved bits. must be set to zero
+ */
+
+} __packed;
+
+struct avs_shared_map_region_payload {
+	u32                  shm_addr_lsw;
+/* least significant word of shared memory address of the memory
+ * region to map. It must be contiguous memory, and it must be 4 KB
+ * aligned.
+ */
+
+	u32                  shm_addr_msw;
+/* most significant word of shared memory address of the memory
+ * region to map. For 32 bit shared memory address, this field must
+ * tbe set to zero. For 36 bit shared memory address, bit31 to bit 4
+ * must be set to zero
+ */
+
+	u32                  mem_size_bytes;
+/* Number of bytes in the region.
+ *
+ * The aDSP will always map the regions as virtual contiguous
+ * memory, but the memory size must be in multiples of 4 KB to avoid
+ * gaps in the virtually contiguous mapped memory.
+ */
+
+} __packed;
+
+struct avs_cmd_shared_mem_unmap_regions {
+	struct apr_hdr       hdr;
+	u32                  mem_map_handle;
+/* memory map handle returned by ASM_CMD_SHARED_MEM_MAP_REGIONS
+ * , ADM_CMD_SHARED_MEM_MAP_REGIONS, commands
+ */
+
+} __packed;
+
+/* Memory map command response payload used by the
+ * #ASM_CMDRSP_SHARED_MEM_MAP_REGIONS
+ * ,#ADM_CMDRSP_SHARED_MEM_MAP_REGIONS
+ */
+
+
+struct avs_cmdrsp_shared_mem_map_regions {
+	u32                  mem_map_handle;
+/* A memory map handle encapsulating shared memory attributes is
+ * returned
+ */
+
+} __packed;
+
+/*adsp_audio_memmap_api.h*/
+
+/* ASM related data structures */
+struct asm_wma_cfg {
+	u16 format_tag;
+	u16 ch_cfg;
+	u32 sample_rate;
+	u32 avg_bytes_per_sec;
+	u16 block_align;
+	u16 valid_bits_per_sample;
+	u32 ch_mask;
+	u16 encode_opt;
+	u16 adv_encode_opt;
+	u32 adv_encode_opt2;
+	u32 drc_peak_ref;
+	u32 drc_peak_target;
+	u32 drc_ave_ref;
+	u32 drc_ave_target;
+} __packed;
+
+struct asm_wmapro_cfg {
+	u16 format_tag;
+	u16 ch_cfg;
+	u32 sample_rate;
+	u32 avg_bytes_per_sec;
+	u16 block_align;
+	u16 valid_bits_per_sample;
+	u32 ch_mask;
+	u16 encode_opt;
+	u16 adv_encode_opt;
+	u32 adv_encode_opt2;
+	u32 drc_peak_ref;
+	u32 drc_peak_target;
+	u32 drc_ave_ref;
+	u32 drc_ave_target;
+} __packed;
+
+struct asm_aac_cfg {
+	u16 format;
+	u16 aot;
+	u16 ep_config;
+	u16 section_data_resilience;
+	u16 scalefactor_data_resilience;
+	u16 spectral_data_resilience;
+	u16 ch_cfg;
+	u16 reserved;
+	u32 sample_rate;
+} __packed;
+
+struct asm_amrwbplus_cfg {
+	u32  size_bytes;
+	u32  version;
+	u32  num_channels;
+	u32  amr_band_mode;
+	u32  amr_dtx_mode;
+	u32  amr_frame_fmt;
+	u32  amr_lsf_idx;
+} __packed;
+
+struct asm_flac_cfg {
+	u32 sample_rate;
+	u32 ext_sample_rate;
+	u32 min_frame_size;
+	u32 max_frame_size;
+	u16 stream_info_present;
+	u16 min_blk_size;
+	u16 max_blk_size;
+	u16 ch_cfg;
+	u16 sample_size;
+	u16 md5_sum;
+};
+
+struct asm_alac_cfg {
+	u32 frame_length;
+	u8 compatible_version;
+	u8 bit_depth;
+	u8 pb;
+	u8 mb;
+	u8 kb;
+	u8 num_channels;
+	u16 max_run;
+	u32 max_frame_bytes;
+	u32 avg_bit_rate;
+	u32 sample_rate;
+	u32 channel_layout_tag;
+};
+
+struct asm_g711_dec_cfg {
+	u32 sample_rate;
+};
+
+struct asm_vorbis_cfg {
+	u32 bit_stream_fmt;
+};
+
+struct asm_ape_cfg {
+	u16 compatible_version;
+	u16 compression_level;
+	u32 format_flags;
+	u32 blocks_per_frame;
+	u32 final_frame_blocks;
+	u32 total_frames;
+	u16 bits_per_sample;
+	u16 num_channels;
+	u32 sample_rate;
+	u32 seek_table_present;
+};
+
+struct asm_dsd_cfg {
+	u16 num_version;
+	u16 is_bitwise_big_endian;
+	u16 dsd_channel_block_size;
+	u16 num_channels;
+	u8  channel_mapping[8];
+	u32 dsd_data_rate;
+};
+
+struct asm_softpause_params {
+	u32 enable;
+	u32 period;
+	u32 step;
+	u32 rampingcurve;
+} __packed;
+
+struct asm_softvolume_params {
+	u32 period;
+	u32 step;
+	u32 rampingcurve;
+} __packed;
+
+struct asm_stream_pan_ctrl_params {
+	uint16_t num_output_channels;
+	uint16_t num_input_channels;
+	uint16_t output_channel_map[8];
+	uint16_t input_channel_map[8];
+	uint32_t gain[64];
+} __packed;
+
+struct adm_matrix_ramp_gains_params {
+	uint16_t session_id;
+	uint16_t be_id;
+	uint16_t num_gains;
+	uint16_t path;
+	uint16_t channels;
+	uint16_t gain_value[32];
+} __packed;
+
+struct adm_matrix_mute_params {
+	uint16_t session_id;
+	uint16_t be_id;
+	uint16_t channels;
+	uint16_t path;
+	uint8_t mute_flag[32];
+} __packed;
+
+#define ASM_END_POINT_DEVICE_MATRIX     0
+
+#define PCM_CHANNEL_NULL 0
+
+/* Front left channel. */
+#define PCM_CHANNEL_FL    1
+
+/* Front right channel. */
+#define PCM_CHANNEL_FR    2
+
+/* Front center channel. */
+#define PCM_CHANNEL_FC    3
+
+/* Left surround channel.*/
+#define PCM_CHANNEL_LS   4
+
+/* Right surround channel.*/
+#define PCM_CHANNEL_RS   5
+
+/* Low frequency effect channel. */
+#define PCM_CHANNEL_LFE  6
+
+/* Center surround channel; Rear center channel. */
+#define PCM_CHANNEL_CS   7
+
+/* Left back channel; Rear left channel. */
+#define PCM_CHANNEL_LB   8
+
+/* Right back channel; Rear right channel. */
+#define PCM_CHANNEL_RB   9
+
+/* Top surround channel. */
+#define PCM_CHANNELS   10
+
+/* Center vertical height channel.*/
+#define PCM_CHANNEL_CVH  11
+
+/* Mono surround channel.*/
+#define PCM_CHANNEL_MS   12
+
+/* Front left of center. */
+#define PCM_CHANNEL_FLC  13
+
+/* Front right of center. */
+#define PCM_CHANNEL_FRC  14
+
+/* Rear left of center. */
+#define PCM_CHANNEL_RLC  15
+
+/* Rear right of center. */
+#define PCM_CHANNEL_RRC  16
+
+/* Second low frequency channel. */
+#define PCM_CHANNEL_LFE2 17
+
+/* Side left channel. */
+#define PCM_CHANNEL_SL   18
+
+/* Side right channel. */
+#define PCM_CHANNEL_SR   19
+
+/* Top front left channel. */
+#define PCM_CHANNEL_TFL  20
+
+/* Left vertical height channel. */
+#define PCM_CHANNEL_LVH  20
+
+/* Top front right channel. */
+#define PCM_CHANNEL_TFR  21
+
+/* Right vertical height channel. */
+#define PCM_CHANNEL_RVH  21
+
+/* Top center channel. */
+#define PCM_CHANNEL_TC   22
+
+/* Top back left channel. */
+#define PCM_CHANNEL_TBL  23
+
+/* Top back right channel. */
+#define PCM_CHANNEL_TBR  24
+
+/* Top side left channel. */
+#define PCM_CHANNEL_TSL  25
+
+/* Top side right channel. */
+#define PCM_CHANNEL_TSR  26
+
+/* Top back center channel. */
+#define PCM_CHANNEL_TBC  27
+
+/* Bottom front center channel. */
+#define PCM_CHANNEL_BFC  28
+
+/* Bottom front left channel. */
+#define PCM_CHANNEL_BFL  29
+
+/* Bottom front right channel. */
+#define PCM_CHANNEL_BFR  30
+
+/* Left wide channel. */
+#define PCM_CHANNEL_LW   31
+
+/* Right wide channel. */
+#define PCM_CHANNEL_RW   32
+
+/* Left side direct channel. */
+#define PCM_CHANNEL_LSD  33
+
+/* Right side direct channel. */
+#define PCM_CHANNEL_RSD  34
+
+#define PCM_FORMAT_MAX_NUM_CHANNEL 8
+
+#define PCM_FORMAT_MAX_NUM_CHANNEL_V2  32
+
+#define ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2 0x00010DA5
+
+#define ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V3 0x00010DDC
+
+#define ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V4 0x0001320C
+
+#define ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V5 0x00013222
+
+#define ASM_MEDIA_FMT_EVRCB_FS 0x00010BEF
+
+#define ASM_MEDIA_FMT_EVRCWB_FS 0x00010BF0
+
+#define ASM_MEDIA_FMT_GENERIC_COMPRESSED  0x00013212
+
+#define ASM_MAX_EQ_BANDS 12
+
+#define ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2 0x00010D98
+
+struct asm_data_cmd_media_fmt_update_v2 {
+u32                    fmt_blk_size;
+	/* Media format block size in bytes.*/
+}  __packed;
+
+struct asm_generic_compressed_fmt_blk_t {
+	struct apr_hdr hdr;
+	struct asm_data_cmd_media_fmt_update_v2 fmt_blk;
+
+	/*
+	 * Channel mapping array of bitstream output.
+	 * Channel[i] mapping describes channel i inside the buffer, where
+	 * i < num_channels. All valid used channels must be
+	 * present at the beginning of the array.
+	 */
+	uint8_t channel_mapping[8];
+
+	/*
+	 * Number of channels of the incoming bitstream.
+	 * Supported values: 1,2,3,4,5,6,7,8
+	 */
+	uint16_t num_channels;
+
+	/*
+	 * Nominal bits per sample value of the incoming bitstream.
+	 * Supported values: 16, 32
+	 */
+	uint16_t bits_per_sample;
+
+	/*
+	 * Nominal sampling rate of the incoming bitstream.
+	 * Supported values: 8000, 11025, 16000, 22050, 24000, 32000,
+	 *                   44100, 48000, 88200, 96000, 176400, 192000,
+	 *                   352800, 384000
+	 */
+	uint32_t sampling_rate;
+
+} __packed;
+
+
+/* Command to send sample rate & channels for IEC61937 (compressed) or IEC60958
+ * (pcm) streams. Both audio standards use the same format and are used for
+ * HDMI or SPDIF.
+ */
+#define ASM_DATA_CMD_IEC_60958_MEDIA_FMT        0x0001321E
+
+struct asm_iec_compressed_fmt_blk_t {
+	struct apr_hdr hdr;
+
+	/*
+	 * Nominal sampling rate of the incoming bitstream.
+	 * Supported values: 8000, 11025, 16000, 22050, 24000, 32000,
+	 *                   44100, 48000, 88200, 96000, 176400, 192000,
+	 *                   352800, 384000
+	 */
+	uint32_t sampling_rate;
+
+	/*
+	 * Number of channels of the incoming bitstream.
+	 * Supported values: 1,2,3,4,5,6,7,8
+	 */
+	uint32_t num_channels;
+
+} __packed;
+
+struct asm_multi_channel_pcm_fmt_blk_v2 {
+	struct apr_hdr hdr;
+	struct asm_data_cmd_media_fmt_update_v2 fmt_blk;
+
+	u16  num_channels;
+	/* Number of channels. Supported values: 1 to 8 */
+	u16  bits_per_sample;
+/* Number of bits per sample per channel. * Supported values:
+ * 16, 24 * When used for playback, the client must send 24-bit
+ * samples packed in 32-bit words. The 24-bit samples must be placed
+ * in the most significant 24 bits of the 32-bit word. When used for
+ * recording, the aDSP sends 24-bit samples packed in 32-bit words.
+ * The 24-bit samples are placed in the most significant 24 bits of
+ * the 32-bit word.
+ */
+
+
+	u32  sample_rate;
+/* Number of samples per second (in Hertz).
+ * Supported values: 2000 to 48000
+ */
+
+	u16  is_signed;
+	/* Flag that indicates the samples are signed (1). */
+
+	u16  reserved;
+	/* reserved field for 32 bit alignment. must be set to zero. */
+
+	u8   channel_mapping[8];
+/* Channel array of size 8.
+ * Supported values:
+ * - #PCM_CHANNEL_L
+ * - #PCM_CHANNEL_R
+ * - #PCM_CHANNEL_C
+ * - #PCM_CHANNEL_LS
+ * - #PCM_CHANNEL_RS
+ * - #PCM_CHANNEL_LFE
+ * - #PCM_CHANNEL_CS
+ * - #PCM_CHANNEL_LB
+ * - #PCM_CHANNEL_RB
+ * - #PCM_CHANNELS
+ * - #PCM_CHANNEL_CVH
+ * - #PCM_CHANNEL_MS
+ * - #PCM_CHANNEL_FLC
+ * - #PCM_CHANNEL_FRC
+ * - #PCM_CHANNEL_RLC
+ * - #PCM_CHANNEL_RRC
+ *
+ * Channel[i] mapping describes channel I. Each element i of the
+ * array describes channel I inside the buffer where 0 @le I <
+ * num_channels. An unused channel is set to zero.
+ */
+} __packed;
+
+struct asm_multi_channel_pcm_fmt_blk_v3 {
+	uint16_t                num_channels;
+/*
+ * Number of channels
+ * Supported values: 1 to 8
+ */
+
+	uint16_t                bits_per_sample;
+/*
+ * Number of bits per sample per channel
+ * Supported values: 16, 24
+ */
+
+	uint32_t                sample_rate;
+/*
+ * Number of samples per second
+ * Supported values: 2000 to 48000, 96000,192000 Hz
+ */
+
+	uint16_t                is_signed;
+/* Flag that indicates that PCM samples are signed (1) */
+
+	uint16_t                sample_word_size;
+/*
+ * Size in bits of the word that holds a sample of a channel.
+ * Supported values: 12,24,32
+ */
+
+	uint8_t                 channel_mapping[8];
+/*
+ * Each element, i, in the array describes channel i inside the buffer where
+ * 0 <= i < num_channels. Unused channels are set to 0.
+ */
+} __packed;
+
+struct asm_multi_channel_pcm_fmt_blk_v4 {
+	uint16_t                num_channels;
+/*
+ * Number of channels
+ * Supported values: 1 to 8
+ */
+
+	uint16_t                bits_per_sample;
+/*
+ * Number of bits per sample per channel
+ * Supported values: 16, 24, 32
+ */
+
+	uint32_t                sample_rate;
+/*
+ * Number of samples per second
+ * Supported values: 2000 to 48000, 96000,192000 Hz
+ */
+
+	uint16_t                is_signed;
+/* Flag that indicates that PCM samples are signed (1) */
+
+	uint16_t                sample_word_size;
+/*
+ * Size in bits of the word that holds a sample of a channel.
+ * Supported values: 12,24,32
+ */
+
+	uint8_t                 channel_mapping[8];
+/*
+ * Each element, i, in the array describes channel i inside the buffer where
+ * 0 <= i < num_channels. Unused channels are set to 0.
+ */
+	uint16_t                endianness;
+/*
+ * Flag to indicate the endianness of the pcm sample
+ * Supported values: 0 - Little endian (all other formats)
+ *                   1 - Big endian (AIFF)
+ */
+	uint16_t                mode;
+/*
+ * Mode to provide additional info about the pcm input data.
+ * Supported values: 0 - Default QFs (Q15 for 16b, Q23 for packed 24b,
+ *                       Q31 for unpacked 24b or 32b)
+ *                  15 - for 16 bit
+ *                  23 - for 24b packed or 8.24 format
+ *                  31 - for 24b unpacked or 32bit
+ */
+} __packed;
+
+struct asm_multi_channel_pcm_fmt_blk_v5 {
+	uint16_t                num_channels;
+/*
+ * Number of channels
+ * Supported values: 1 to 32
+ */
+
+	uint16_t                bits_per_sample;
+/*
+ * Number of bits per sample per channel
+ * Supported values: 16, 24, 32
+ */
+
+	uint32_t                sample_rate;
+/*
+ * Number of samples per second
+ * Supported values: 2000 to 48000, 96000,192000 Hz
+ */
+
+	uint16_t                is_signed;
+/* Flag that indicates that PCM samples are signed (1) */
+
+	uint16_t                sample_word_size;
+/*
+ * Size in bits of the word that holds a sample of a channel.
+ * Supported values: 12,24,32
+ */
+	uint16_t                endianness;
+/*
+ * Flag to indicate the endianness of the pcm sample
+ * Supported values: 0 - Little endian (all other formats)
+ *                   1 - Big endian (AIFF)
+ */
+	uint16_t                mode;
+/*
+ * Mode to provide additional info about the pcm input data.
+ * Supported values: 0 - Default QFs (Q15 for 16b, Q23 for packed 24b,
+ *                       Q31 for unpacked 24b or 32b)
+ *                  15 - for 16 bit
+ *                  23 - for 24b packed or 8.24 format
+ *                  31 - for 24b unpacked or 32bit
+ */
+
+	uint8_t                 channel_mapping[PCM_FORMAT_MAX_NUM_CHANNEL_V2];
+/*
+ * Each element, i, in the array describes channel i inside the buffer where
+ * 0 <= i < num_channels. Unused channels are set to 0.
+ */
+} __packed;
+
+/*
+ * Payload of the multichannel PCM configuration parameters in
+ * the ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V3 media format.
+ */
+struct asm_multi_channel_pcm_fmt_blk_param_v3 {
+	struct apr_hdr hdr;
+	struct asm_data_cmd_media_fmt_update_v2 fmt_blk;
+	struct asm_multi_channel_pcm_fmt_blk_v3 param;
+} __packed;
+
+/*
+ * Payload of the multichannel PCM configuration parameters in
+ * the ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V4 media format.
+ */
+struct asm_multi_channel_pcm_fmt_blk_param_v4 {
+	struct apr_hdr hdr;
+	struct asm_data_cmd_media_fmt_update_v2 fmt_blk;
+	struct asm_multi_channel_pcm_fmt_blk_v4 param;
+} __packed;
+
+/*
+ * Payload of the multichannel PCM configuration parameters in
+ * the ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V5 media format.
+ */
+struct asm_multi_channel_pcm_fmt_blk_param_v5 {
+	struct apr_hdr hdr;
+	struct asm_data_cmd_media_fmt_update_v2 fmt_blk;
+	struct asm_multi_channel_pcm_fmt_blk_v5 param;
+} __packed;
+
+struct asm_stream_cmd_set_encdec_param {
+	u32                  param_id;
+	/* ID of the parameter. */
+
+	u32                  param_size;
+/* Data size of this parameter, in bytes. The size is a multiple
+ * of 4 bytes.
+ */
+
+} __packed;
+
+struct asm_enc_cfg_blk_param_v2 {
+	u32                  frames_per_buf;
+/* Number of encoded frames to pack into each buffer.
+ *
+ * @note1hang This is only guidance information for the aDSP. The
+ * number of encoded frames put into each buffer (specified by the
+ * client) is less than or equal to this number.
+ */
+
+	u32                  enc_cfg_blk_size;
+/* Size in bytes of the encoder configuration block that follows
+ * this member.
+ */
+
+} __packed;
+
+/* @brief Dolby Digital Plus end point configuration structure
+ */
+struct asm_dec_ddp_endp_param_v2 {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param  encdec;
+	int endp_param_value;
+} __packed;
+
+/*
+ * Payload of the multichannel PCM encoder configuration parameters in
+ * the ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V5 media format.
+ */
+struct asm_multi_channel_pcm_enc_cfg_v5 {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param encdec;
+	struct asm_enc_cfg_blk_param_v2 encblk;
+	uint16_t num_channels;
+	/*
+	 * Number of PCM channels.
+	 * @values
+	 * - 0 -- Native mode
+	 * - 1 -- 8 channels
+	 * Native mode indicates that encoding must be performed with the number
+	 * of channels at the input.
+	 */
+	uint16_t  bits_per_sample;
+	/*
+	 * Number of bits per sample per channel.
+	 * @values 16, 24
+	 */
+	uint32_t  sample_rate;
+	/*
+	 * Number of samples per second.
+	 * @values 0, 8000 to 48000 Hz
+	 * A value of 0 indicates the native sampling rate. Encoding is
+	 * performed at the input sampling rate.
+	 */
+	uint16_t  is_signed;
+	/*
+	 * Flag that indicates the PCM samples are signed (1). Currently, only
+	 * signed PCM samples are supported.
+	 */
+	uint16_t    sample_word_size;
+	/*
+	 * The size in bits of the word that holds a sample of a channel.
+	 * @values 16, 24, 32
+	 * 16-bit samples are always placed in 16-bit words:
+	 * sample_word_size = 1.
+	 * 24-bit samples can be placed in 32-bit words or in consecutive
+	 * 24-bit words.
+	 * - If sample_word_size = 32, 24-bit samples are placed in the
+	 * most significant 24 bits of a 32-bit word.
+	 * - If sample_word_size = 24, 24-bit samples are placed in
+	 * 24-bit words. @tablebulletend
+	 */
+	uint16_t                endianness;
+	/*
+	 * Flag to indicate the endianness of the pcm sample
+	 * Supported values: 0 - Little endian (all other formats)
+	 *                   1 - Big endian (AIFF)
+	 */
+	uint16_t                mode;
+	/*
+	 * Mode to provide additional info about the pcm input data.
+	 * Supported values: 0 - Default QFs (Q15 for 16b, Q23 for packed 24b,
+	 *                       Q31 for unpacked 24b or 32b)
+	 *                  15 - for 16 bit
+	 *                  23 - for 24b packed or 8.24 format
+	 */
+	uint8_t   channel_mapping[PCM_FORMAT_MAX_NUM_CHANNEL_V2];
+	/*
+	 * Channel mapping array expected at the encoder output.
+	 *  Channel[i] mapping describes channel i inside the buffer, where
+	 *  0 @le i < num_channels. All valid used channels must be present at
+	 *  the beginning of the array.
+	 * If Native mode is set for the channels, this field is ignored.
+	 * @values See Section @xref{dox:PcmChannelDefs}
+	 */
+} __packed;
+
+/*
+ * Payload of the multichannel PCM encoder configuration parameters in
+ * the ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V4 media format.
+ */
+
+struct asm_multi_channel_pcm_enc_cfg_v4 {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param encdec;
+	struct asm_enc_cfg_blk_param_v2 encblk;
+	uint16_t num_channels;
+	/*
+	 * Number of PCM channels.
+	 * @values
+	 * - 0 -- Native mode
+	 * - 1 -- 8 channels
+	 * Native mode indicates that encoding must be performed with the number
+	 * of channels at the input.
+	 */
+	uint16_t  bits_per_sample;
+	/*
+	 * Number of bits per sample per channel.
+	 * @values 16, 24
+	 */
+	uint32_t  sample_rate;
+	/*
+	 * Number of samples per second.
+	 * @values 0, 8000 to 48000 Hz
+	 * A value of 0 indicates the native sampling rate. Encoding is
+	 * performed at the input sampling rate.
+	 */
+	uint16_t  is_signed;
+	/*
+	 * Flag that indicates the PCM samples are signed (1). Currently, only
+	 * signed PCM samples are supported.
+	 */
+	uint16_t    sample_word_size;
+	/*
+	 * The size in bits of the word that holds a sample of a channel.
+	 * @values 16, 24, 32
+	 * 16-bit samples are always placed in 16-bit words:
+	 * sample_word_size = 1.
+	 * 24-bit samples can be placed in 32-bit words or in consecutive
+	 * 24-bit words.
+	 * - If sample_word_size = 32, 24-bit samples are placed in the
+	 * most significant 24 bits of a 32-bit word.
+	 * - If sample_word_size = 24, 24-bit samples are placed in
+	 * 24-bit words. @tablebulletend
+	 */
+	uint8_t   channel_mapping[8];
+	/*
+	 * Channel mapping array expected at the encoder output.
+	 *  Channel[i] mapping describes channel i inside the buffer, where
+	 *  0 @le i < num_channels. All valid used channels must be present at
+	 *  the beginning of the array.
+	 * If Native mode is set for the channels, this field is ignored.
+	 * @values See Section @xref{dox:PcmChannelDefs}
+	 */
+	uint16_t                endianness;
+	/*
+	 * Flag to indicate the endianness of the pcm sample
+	 * Supported values: 0 - Little endian (all other formats)
+	 *                   1 - Big endian (AIFF)
+	 */
+	uint16_t                mode;
+	/*
+	 * Mode to provide additional info about the pcm input data.
+	 * Supported values: 0 - Default QFs (Q15 for 16b, Q23 for packed 24b,
+	 *                       Q31 for unpacked 24b or 32b)
+	 *                  15 - for 16 bit
+	 *                  23 - for 24b packed or 8.24 format
+	 *                  31 - for 24b unpacked or 32bit
+	 */
+} __packed;
+
+/*
+ * Payload of the multichannel PCM encoder configuration parameters in
+ * the ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V3 media format.
+ */
+
+struct asm_multi_channel_pcm_enc_cfg_v3 {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param encdec;
+	struct asm_enc_cfg_blk_param_v2 encblk;
+	uint16_t num_channels;
+	/*
+	 * Number of PCM channels.
+	 * @values
+	 * - 0 -- Native mode
+	 * - 1 -- 8 channels
+	 * Native mode indicates that encoding must be performed with the number
+	 * of channels at the input.
+	 */
+	uint16_t  bits_per_sample;
+	/*
+	 * Number of bits per sample per channel.
+	 * @values 16, 24
+	 */
+	uint32_t  sample_rate;
+	/*
+	 * Number of samples per second.
+	 * @values 0, 8000 to 48000 Hz
+	 * A value of 0 indicates the native sampling rate. Encoding is
+	 * performed at the input sampling rate.
+	 */
+	uint16_t  is_signed;
+	/*
+	 * Flag that indicates the PCM samples are signed (1). Currently, only
+	 * signed PCM samples are supported.
+	 */
+	uint16_t    sample_word_size;
+	/*
+	 * The size in bits of the word that holds a sample of a channel.
+	 * @values 16, 24, 32
+	 * 16-bit samples are always placed in 16-bit words:
+	 * sample_word_size = 1.
+	 * 24-bit samples can be placed in 32-bit words or in consecutive
+	 * 24-bit words.
+	 * - If sample_word_size = 32, 24-bit samples are placed in the
+	 * most significant 24 bits of a 32-bit word.
+	 * - If sample_word_size = 24, 24-bit samples are placed in
+	 * 24-bit words. @tablebulletend
+	 */
+	uint8_t   channel_mapping[8];
+	/*
+	 * Channel mapping array expected at the encoder output.
+	 *  Channel[i] mapping describes channel i inside the buffer, where
+	 *  0 @le i < num_channels. All valid used channels must be present at
+	 *  the beginning of the array.
+	 * If Native mode is set for the channels, this field is ignored.
+	 * @values See Section @xref{dox:PcmChannelDefs}
+	 */
+};
+
+/* @brief Multichannel PCM encoder configuration structure used
+ * in the #ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2 command.
+ */
+
+struct asm_multi_channel_pcm_enc_cfg_v2 {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param  encdec;
+	struct asm_enc_cfg_blk_param_v2	encblk;
+	uint16_t  num_channels;
+/*< Number of PCM channels.
+ *
+ * Supported values: - 0 -- Native mode - 1 -- 8 Native mode
+ * indicates that encoding must be performed with the number of
+ * channels at the input.
+ */
+
+	uint16_t  bits_per_sample;
+/*< Number of bits per sample per channel.
+ * Supported values: 16, 24
+ */
+
+	uint32_t  sample_rate;
+/*< Number of samples per second (in Hertz).
+ *
+ * Supported values: 0, 8000 to 48000 A value of 0 indicates the
+ * native sampling rate. Encoding is performed at the input sampling
+ * rate.
+ */
+
+	uint16_t  is_signed;
+/*< Specifies whether the samples are signed (1). Currently,
+ * only signed samples are supported.
+ */
+
+	uint16_t  reserved;
+/*< reserved field for 32 bit alignment. must be set to zero.*/
+
+
+	uint8_t   channel_mapping[8];
+} __packed;
+
+#define ASM_MEDIA_FMT_MP3 0x00010BE9
+#define ASM_MEDIA_FMT_AAC_V2 0x00010DA6
+
+/* @xreflabel
+ * {hdr:AsmMediaFmtDolbyAac} Media format ID for the
+ * Dolby AAC decoder. This format ID is be used if the client wants
+ * to use the Dolby AAC decoder to decode MPEG2 and MPEG4 AAC
+ * contents.
+ */
+
+#define ASM_MEDIA_FMT_DOLBY_AAC 0x00010D86
+
+/* Enumeration for the audio data transport stream AAC format. */
+#define ASM_MEDIA_FMT_AAC_FORMAT_FLAG_ADTS 0
+
+/* Enumeration for low overhead audio stream AAC format. */
+#define ASM_MEDIA_FMT_AAC_FORMAT_FLAG_LOAS                      1
+
+/* Enumeration for the audio data interchange format
+ * AAC format.
+ */
+#define ASM_MEDIA_FMT_AAC_FORMAT_FLAG_ADIF   2
+
+/* Enumeration for the raw AAC format. */
+#define ASM_MEDIA_FMT_AAC_FORMAT_FLAG_RAW    3
+
+/* Enumeration for the AAC LATM format. */
+#define ASM_MEDIA_FMT_AAC_FORMAT_FLAG_LATM   4
+
+#define ASM_MEDIA_FMT_AAC_AOT_LC             2
+#define ASM_MEDIA_FMT_AAC_AOT_SBR            5
+#define ASM_MEDIA_FMT_AAC_AOT_PS             29
+#define ASM_MEDIA_FMT_AAC_AOT_BSAC           22
+
+struct asm_aac_fmt_blk_v2 {
+	struct apr_hdr hdr;
+	struct asm_data_cmd_media_fmt_update_v2 fmt_blk;
+
+		u16          aac_fmt_flag;
+/* Bitstream format option.
+ * Supported values:
+ * - #ASM_MEDIA_FMT_AAC_FORMAT_FLAG_ADTS
+ * - #ASM_MEDIA_FMT_AAC_FORMAT_FLAG_LOAS
+ * - #ASM_MEDIA_FMT_AAC_FORMAT_FLAG_ADIF
+ * - #ASM_MEDIA_FMT_AAC_FORMAT_FLAG_RAW
+ */
+
+	u16          audio_objype;
+/* Audio Object Type (AOT) present in the AAC stream.
+ * Supported values:
+ * - #ASM_MEDIA_FMT_AAC_AOT_LC
+ * - #ASM_MEDIA_FMT_AAC_AOT_SBR
+ * - #ASM_MEDIA_FMT_AAC_AOT_BSAC
+ * - #ASM_MEDIA_FMT_AAC_AOT_PS
+ * - Otherwise -- Not supported
+ */
+
+	u16          channel_config;
+/* Number of channels present in the AAC stream.
+ * Supported values:
+ * - 1 -- Mono
+ * - 2 -- Stereo
+ * - 6 -- 5.1 content
+ */
+
+	u16          total_size_of_PCE_bits;
+/* greater or equal to zero. * -In case of RAW formats and
+ * channel config = 0 (PCE), client can send * the bit stream
+ * containing PCE immediately following this structure * (in-band).
+ * -This number does not include bits included for 32 bit alignment.
+ * -If zero, then the PCE info is assumed to be available in the
+ * audio -bit stream & not in-band.
+ */
+
+	u32          sample_rate;
+/* Number of samples per second (in Hertz).
+ *
+ * Supported values: 8000, 11025, 12000, 16000, 22050, 24000, 32000,
+ * 44100, 48000
+ *
+ * This field must be equal to the sample rate of the AAC-LC
+ * decoder's output. - For MP4 or 3GP containers, this is indicated
+ * by the samplingFrequencyIndex field in the AudioSpecificConfig
+ * element. - For ADTS format, this is indicated by the
+ * samplingFrequencyIndex in the ADTS fixed header. - For ADIF
+ * format, this is indicated by the samplingFrequencyIndex in the
+ * program_config_element present in the ADIF header.
+ */
+
+} __packed;
+
+struct asm_aac_enc_cfg_v2 {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param  encdec;
+	struct asm_enc_cfg_blk_param_v2	encblk;
+
+	u32          bit_rate;
+	/* Encoding rate in bits per second. */
+	u32          enc_mode;
+/* Encoding mode.
+ * Supported values:
+ * - #ASM_MEDIA_FMT_AAC_AOT_LC
+ * - #ASM_MEDIA_FMT_AAC_AOT_SBR
+ * - #ASM_MEDIA_FMT_AAC_AOT_PS
+ */
+	u16          aac_fmt_flag;
+/* AAC format flag.
+ * Supported values:
+ * - #ASM_MEDIA_FMT_AAC_FORMAT_FLAG_ADTS
+ * - #ASM_MEDIA_FMT_AAC_FORMAT_FLAG_RAW
+ */
+	u16          channel_cfg;
+/* Number of channels to encode.
+ * Supported values:
+ * - 0 -- Native mode
+ * - 1 -- Mono
+ * - 2 -- Stereo
+ * - Other values are not supported.
+ * @note1hang The eAAC+ encoder mode supports only stereo.
+ * Native mode indicates that encoding must be performed with the
+ * number of channels at the input.
+ * The number of channels must not change during encoding.
+ */
+
+	u32          sample_rate;
+/* Number of samples per second.
+ * Supported values: - 0 -- Native mode - For other values,
+ * Native mode indicates that encoding must be performed with the
+ * sampling rate at the input.
+ * The sampling rate must not change during encoding.
+ */
+
+} __packed;
+
+#define ASM_MEDIA_FMT_G711_ALAW_FS 0x00010BF7
+#define ASM_MEDIA_FMT_G711_MLAW_FS 0x00010C2E
+
+struct asm_g711_enc_cfg_v2 {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param encdec;
+	struct asm_enc_cfg_blk_param_v2 encblk;
+
+	u32          sample_rate;
+/*
+ * Number of samples per second.
+ * Supported values: 8000, 16000 Hz
+ */
+
+} __packed;
+
+struct asm_vorbis_fmt_blk_v2 {
+	struct apr_hdr hdr;
+	struct asm_data_cmd_media_fmt_update_v2 fmtblk;
+	u32          bit_stream_fmt;
+/* Bit stream format.
+ * Supported values:
+ * - 0 -- Raw bitstream
+ * - 1 -- Transcoded bitstream
+ *
+ * Transcoded bitstream containing the size of the frame as the first
+ * word in each frame.
+ */
+
+} __packed;
+
+struct asm_flac_fmt_blk_v2 {
+	struct apr_hdr hdr;
+	struct asm_data_cmd_media_fmt_update_v2 fmtblk;
+
+	u16 is_stream_info_present;
+/* Specifies whether stream information is present in the FLAC format
+ * block.
+ *
+ * Supported values:
+ * - 0 -- Stream information is not present in this message
+ * - 1 -- Stream information is present in this message
+ *
+ * When set to 1, the FLAC bitstream was successfully parsed by the
+ * client, and other fields in the FLAC format block can be read by the
+ * decoder to get metadata stream information.
+ */
+
+	u16 num_channels;
+/* Number of channels for decoding.
+ * Supported values: 1 to 2
+ */
+
+	u16 min_blk_size;
+/* Minimum block size (in samples) used in the stream. It must be less
+ * than or equal to max_blk_size.
+ */
+
+	u16 max_blk_size;
+/* Maximum block size (in samples) used in the stream. If the
+ * minimum block size equals the maximum block size, a fixed block
+ * size stream is implied.
+ */
+
+	u16 md5_sum[8];
+/* MD5 signature array of the unencoded audio data. This allows the
+ * decoder to determine if an error exists in the audio data, even when
+ * the error does not result in an invalid bitstream.
+ */
+
+	u32 sample_rate;
+/* Number of samples per second.
+ * Supported values: 8000 to 48000 Hz
+ */
+
+	u32 min_frame_size;
+/* Minimum frame size used in the stream.
+ * Supported values:
+ * - > 0 bytes
+ * - 0 -- The value is unknown
+ */
+
+	u32 max_frame_size;
+/* Maximum frame size used in the stream.
+ * Supported values:
+ * -- > 0 bytes
+ * -- 0 . The value is unknown
+ */
+
+	u16 sample_size;
+/* Bits per sample.Supported values: 8, 16 */
+
+	u16 reserved;
+/* Clients must set this field to zero
+ */
+
+} __packed;
+
+struct asm_alac_fmt_blk_v2 {
+	struct apr_hdr hdr;
+	struct asm_data_cmd_media_fmt_update_v2 fmtblk;
+
+	u32 frame_length;
+	u8 compatible_version;
+	u8 bit_depth;
+	u8 pb;
+	u8 mb;
+	u8 kb;
+	u8 num_channels;
+	u16 max_run;
+	u32 max_frame_bytes;
+	u32 avg_bit_rate;
+	u32 sample_rate;
+	u32 channel_layout_tag;
+
+} __packed;
+
+struct asm_g711_dec_fmt_blk_v2 {
+	struct apr_hdr hdr;
+	struct asm_data_cmd_media_fmt_update_v2 fmtblk;
+	u32 sample_rate;
+} __packed;
+
+struct asm_ape_fmt_blk_v2 {
+	struct apr_hdr hdr;
+	struct asm_data_cmd_media_fmt_update_v2 fmtblk;
+
+	u16 compatible_version;
+	u16 compression_level;
+	u32 format_flags;
+	u32 blocks_per_frame;
+	u32 final_frame_blocks;
+	u32 total_frames;
+	u16 bits_per_sample;
+	u16 num_channels;
+	u32 sample_rate;
+	u32 seek_table_present;
+
+} __packed;
+
+struct asm_dsd_fmt_blk_v2 {
+	struct apr_hdr hdr;
+	struct asm_data_cmd_media_fmt_update_v2 fmtblk;
+
+	u16 num_version;
+	u16 is_bitwise_big_endian;
+	u16 dsd_channel_block_size;
+	u16 num_channels;
+	u8  channel_mapping[8];
+	u32 dsd_data_rate;
+
+} __packed;
+
+#define ASM_MEDIA_FMT_AMRNB_FS                  0x00010BEB
+
+/* Enumeration for 4.75 kbps AMR-NB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRNB_FS_ENCODE_MODE_MR475                0
+
+/* Enumeration for 5.15 kbps AMR-NB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRNB_FS_ENCODE_MODE_MR515                1
+
+/* Enumeration for 5.90 kbps AMR-NB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRNB_FS_ENCODE_MODE_MMR59                2
+
+/* Enumeration for 6.70 kbps AMR-NB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRNB_FS_ENCODE_MODE_MMR67                3
+
+/* Enumeration for 7.40 kbps AMR-NB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRNB_FS_ENCODE_MODE_MMR74                4
+
+/* Enumeration for 7.95 kbps AMR-NB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRNB_FS_ENCODE_MODE_MMR795               5
+
+/* Enumeration for 10.20 kbps AMR-NB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRNB_FS_ENCODE_MODE_MMR102               6
+
+/* Enumeration for 12.20 kbps AMR-NB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRNB_FS_ENCODE_MODE_MMR122               7
+
+/* Enumeration for AMR-NB Discontinuous Transmission mode off. */
+#define ASM_MEDIA_FMT_AMRNB_FS_DTX_MODE_OFF                     0
+
+/* Enumeration for AMR-NB DTX mode VAD1. */
+#define ASM_MEDIA_FMT_AMRNB_FS_DTX_MODE_VAD1                    1
+
+/* Enumeration for AMR-NB DTX mode VAD2. */
+#define ASM_MEDIA_FMT_AMRNB_FS_DTX_MODE_VAD2                    2
+
+/* Enumeration for AMR-NB DTX mode auto.
+	*/
+#define ASM_MEDIA_FMT_AMRNB_FS_DTX_MODE_AUTO                    3
+
+struct asm_amrnb_enc_cfg {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param  encdec;
+	struct asm_enc_cfg_blk_param_v2	encblk;
+
+	u16          enc_mode;
+/* AMR-NB encoding rate.
+ * Supported values:
+ * Use the ASM_MEDIA_FMT_AMRNB_FS_ENCODE_MODE_*
+ * macros
+ */
+
+	u16          dtx_mode;
+/* Specifies whether DTX mode is disabled or enabled.
+ * Supported values:
+ * - #ASM_MEDIA_FMT_AMRNB_FS_DTX_MODE_OFF
+ * - #ASM_MEDIA_FMT_AMRNB_FS_DTX_MODE_VAD1
+ */
+} __packed;
+
+#define ASM_MEDIA_FMT_AMRWB_FS                  0x00010BEC
+
+/* Enumeration for 6.6 kbps AMR-WB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRWB_FS_ENCODE_MODE_MR66                 0
+
+/* Enumeration for 8.85 kbps AMR-WB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRWB_FS_ENCODE_MODE_MR885                1
+
+/* Enumeration for 12.65 kbps AMR-WB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRWB_FS_ENCODE_MODE_MR1265               2
+
+/* Enumeration for 14.25 kbps AMR-WB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRWB_FS_ENCODE_MODE_MR1425               3
+
+/* Enumeration for 15.85 kbps AMR-WB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRWB_FS_ENCODE_MODE_MR1585               4
+
+/* Enumeration for 18.25 kbps AMR-WB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRWB_FS_ENCODE_MODE_MR1825               5
+
+/* Enumeration for 19.85 kbps AMR-WB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRWB_FS_ENCODE_MODE_MR1985               6
+
+/* Enumeration for 23.05 kbps AMR-WB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRWB_FS_ENCODE_MODE_MR2305               7
+
+/* Enumeration for 23.85 kbps AMR-WB Encoding mode.
+	*/
+#define ASM_MEDIA_FMT_AMRWB_FS_ENCODE_MODE_MR2385               8
+
+struct asm_amrwb_enc_cfg {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param  encdec;
+	struct asm_enc_cfg_blk_param_v2	encblk;
+
+	u16          enc_mode;
+/* AMR-WB encoding rate.
+ * Suupported values:
+ * Use the ASM_MEDIA_FMT_AMRWB_FS_ENCODE_MODE_*
+ * macros
+ */
+
+	u16          dtx_mode;
+/* Specifies whether DTX mode is disabled or enabled.
+ * Supported values:
+ * - #ASM_MEDIA_FMT_AMRNB_FS_DTX_MODE_OFF
+ * - #ASM_MEDIA_FMT_AMRNB_FS_DTX_MODE_VAD1
+ */
+} __packed;
+
+#define ASM_MEDIA_FMT_V13K_FS                      0x00010BED
+
+/* Enumeration for 14.4 kbps V13K Encoding mode. */
+#define ASM_MEDIA_FMT_V13K_FS_ENCODE_MODE_MR1440                0
+
+/* Enumeration for 12.2 kbps V13K Encoding mode. */
+#define ASM_MEDIA_FMT_V13K_FS_ENCODE_MODE_MR1220                1
+
+/* Enumeration for 11.2 kbps V13K Encoding mode. */
+#define ASM_MEDIA_FMT_V13K_FS_ENCODE_MODE_MR1120                2
+
+/* Enumeration for 9.0 kbps V13K Encoding mode. */
+#define ASM_MEDIA_FMT_V13K_FS_ENCODE_MODE_MR90                  3
+
+/* Enumeration for 7.2 kbps V13K eEncoding mode. */
+#define ASM_MEDIA_FMT_V13K_FS_ENCODE_MODE_MR720                 4
+
+/* Enumeration for 1/8 vocoder rate.*/
+#define ASM_MEDIA_FMT_VOC_ONE_EIGHTH_RATE          1
+
+/* Enumeration for 1/4 vocoder rate. */
+#define ASM_MEDIA_FMT_VOC_ONE_FOURTH_RATE       2
+
+/* Enumeration for 1/2 vocoder rate. */
+#define ASM_MEDIA_FMT_VOC_HALF_RATE             3
+
+/* Enumeration for full vocoder rate.
+	*/
+#define ASM_MEDIA_FMT_VOC_FULL_RATE             4
+
+struct asm_v13k_enc_cfg {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param  encdec;
+	struct asm_enc_cfg_blk_param_v2	encblk;
+		u16          max_rate;
+/* Maximum allowed encoder frame rate.
+ * Supported values:
+ * - #ASM_MEDIA_FMT_VOC_ONE_EIGHTH_RATE
+ * - #ASM_MEDIA_FMT_VOC_ONE_FOURTH_RATE
+ * - #ASM_MEDIA_FMT_VOC_HALF_RATE
+ * - #ASM_MEDIA_FMT_VOC_FULL_RATE
+ */
+
+	u16          min_rate;
+/* Minimum allowed encoder frame rate.
+ * Supported values:
+ * - #ASM_MEDIA_FMT_VOC_ONE_EIGHTH_RATE
+ * - #ASM_MEDIA_FMT_VOC_ONE_FOURTH_RATE
+ * - #ASM_MEDIA_FMT_VOC_HALF_RATE
+ * - #ASM_MEDIA_FMT_VOC_FULL_RATE
+ */
+
+	u16          reduced_rate_cmd;
+/* Reduced rate command, used to change
+ * the average bitrate of the V13K
+ * vocoder.
+ * Supported values:
+ * - #ASM_MEDIA_FMT_V13K_FS_ENCODE_MODE_MR1440 (Default)
+ * - #ASM_MEDIA_FMT_V13K_FS_ENCODE_MODE_MR1220
+ * - #ASM_MEDIA_FMT_V13K_FS_ENCODE_MODE_MR1120
+ * - #ASM_MEDIA_FMT_V13K_FS_ENCODE_MODE_MR90
+ * - #ASM_MEDIA_FMT_V13K_FS_ENCODE_MODE_MR720
+ */
+
+	u16          rate_mod_cmd;
+/* Rate modulation command. Default = 0.
+ *- If bit 0=1, rate control is enabled.
+ *- If bit 1=1, the maximum number of consecutive full rate
+ *			frames is limited with numbers supplied in
+ *			bits 2 to 10.
+ *- If bit 1=0, the minimum number of non-full rate frames
+ *			in between two full rate frames is forced to
+ * the number supplied in bits 2 to 10. In both cases, if necessary,
+ * half rate is used to substitute full rate. - Bits 15 to 10 are
+ * reserved and must all be set to zero.
+ */
+
+} __packed;
+
+#define ASM_MEDIA_FMT_EVRC_FS                   0x00010BEE
+
+/*  EVRC encoder configuration structure used in the
+ * #ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2 command.
+ */
+struct asm_evrc_enc_cfg {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param  encdec;
+	struct asm_enc_cfg_blk_param_v2	encblk;
+	u16          max_rate;
+/* Maximum allowed encoder frame rate.
+ * Supported values:
+ * - #ASM_MEDIA_FMT_VOC_ONE_EIGHTH_RATE
+ * - #ASM_MEDIA_FMT_VOC_ONE_FOURTH_RATE
+ * - #ASM_MEDIA_FMT_VOC_HALF_RATE
+ * - #ASM_MEDIA_FMT_VOC_FULL_RATE
+ */
+
+	u16          min_rate;
+/* Minimum allowed encoder frame rate.
+ * Supported values:
+ * - #ASM_MEDIA_FMT_VOC_ONE_EIGHTH_RATE
+ * - #ASM_MEDIA_FMT_VOC_ONE_FOURTH_RATE
+ * - #ASM_MEDIA_FMT_VOC_HALF_RATE
+ * - #ASM_MEDIA_FMT_VOC_FULL_RATE
+ */
+
+	u16          rate_mod_cmd;
+/* Rate modulation command. Default: 0.
+ * - If bit 0=1, rate control is enabled.
+ * - If bit 1=1, the maximum number of consecutive full rate frames
+ * is limited with numbers supplied in bits 2 to 10.
+ *
+ * - If bit 1=0, the minimum number of non-full rate frames in
+ * between two full rate frames is forced to the number supplied in
+ * bits 2 to 10. In both cases, if necessary, half rate is used to
+ * substitute full rate.
+ *
+ * - Bits 15 to 10 are reserved and must all be set to zero.
+ */
+
+	u16          reserved;
+	/* Reserved. Clients must set this field to zero. */
+} __packed;
+
+#define ASM_MEDIA_FMT_WMA_V10PRO_V2                0x00010DA7
+
+struct asm_wmaprov10_fmt_blk_v2 {
+	struct apr_hdr hdr;
+	struct asm_data_cmd_media_fmt_update_v2 fmtblk;
+
+	u16          fmtag;
+/* WMA format type.
+ * Supported values:
+ * - 0x162 -- WMA 9 Pro
+ * - 0x163 -- WMA 9 Pro Lossless
+ * - 0x166 -- WMA 10 Pro
+ * - 0x167 -- WMA 10 Pro Lossless
+ */
+
+	u16          num_channels;
+/* Number of channels encoded in the input stream.
+ * Supported values: 1 to 8
+ */
+
+	u32          sample_rate;
+/* Number of samples per second (in Hertz).
+ * Supported values: 11025, 16000, 22050, 32000, 44100, 48000,
+ * 88200, 96000
+ */
+
+	u32          avg_bytes_per_sec;
+/* Bitrate expressed as the average bytes per second.
+ * Supported values: 2000 to 96000
+ */
+
+	u16          blk_align;
+/* Size of the bitstream packet size in bytes. WMA Pro files
+ * have a payload of one block per bitstream packet.
+ * Supported values: @le 13376
+ */
+
+	u16          bits_per_sample;
+/* Number of bits per sample in the encoded WMA stream.
+ * Supported values: 16, 24
+ */
+
+	u32          channel_mask;
+/* Bit-packed double word (32-bits) that indicates the
+ * recommended speaker positions for each source channel.
+ */
+
+	u16          enc_options;
+/* Bit-packed word with values that indicate whether certain
+ * features of the bitstream are used.
+ * Supported values: - 0x0001 -- ENCOPT3_PURE_LOSSLESS - 0x0006 --
+ * ENCOPT3_FRM_SIZE_MOD - 0x0038 -- ENCOPT3_SUBFRM_DIV - 0x0040 --
+ * ENCOPT3_WRITE_FRAMESIZE_IN_HDR - 0x0080 --
+ * ENCOPT3_GENERATE_DRC_PARAMS - 0x0100 -- ENCOPT3_RTMBITS
+ */
+
+
+	u16          usAdvancedEncodeOpt;
+	/* Advanced encoding option.  */
+
+	u32          advanced_enc_options2;
+	/* Advanced encoding option 2. */
+
+} __packed;
+
+#define ASM_MEDIA_FMT_WMA_V9_V2                    0x00010DA8
+struct asm_wmastdv9_fmt_blk_v2 {
+	struct apr_hdr hdr;
+	struct asm_data_cmd_media_fmt_update_v2 fmtblk;
+	u16          fmtag;
+/* WMA format tag.
+ * Supported values: 0x161 (WMA 9 standard)
+ */
+
+	u16          num_channels;
+/* Number of channels in the stream.
+ * Supported values: 1, 2
+ */
+
+	u32          sample_rate;
+/* Number of samples per second (in Hertz).
+ * Supported values: 48000
+ */
+
+	u32          avg_bytes_per_sec;
+	/* Bitrate expressed as the average bytes per second. */
+
+	u16          blk_align;
+/* Block align. All WMA files with a maximum packet size of
+ * 13376 are supported.
+ */
+
+
+	u16          bits_per_sample;
+/* Number of bits per sample in the output.
+ * Supported values: 16
+ */
+
+	u32          channel_mask;
+/* Channel mask.
+ * Supported values:
+ * - 3 -- Stereo (front left/front right)
+ * - 4 -- Mono (center)
+ */
+
+	u16          enc_options;
+	/* Options used during encoding. */
+
+	u16          reserved;
+
+} __packed;
+
+#define ASM_MEDIA_FMT_WMA_V8                    0x00010D91
+
+struct asm_wmastdv8_enc_cfg {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param  encdec;
+	struct asm_enc_cfg_blk_param_v2	encblk;
+	u32          bit_rate;
+	/* Encoding rate in bits per second. */
+
+	u32          sample_rate;
+/* Number of samples per second.
+ *
+ * Supported values:
+ * - 0 -- Native mode
+ * - Other Supported values are 22050, 32000, 44100, and 48000.
+ *
+ * Native mode indicates that encoding must be performed with the
+ * sampling rate at the input.
+ * The sampling rate must not change during encoding.
+ */
+
+	u16          channel_cfg;
+/* Number of channels to encode.
+ * Supported values:
+ * - 0 -- Native mode
+ * - 1 -- Mono
+ * - 2 -- Stereo
+ * - Other values are not supported.
+ *
+ * Native mode indicates that encoding must be performed with the
+ * number of channels at the input.
+ * The number of channels must not change during encoding.
+ */
+
+	u16          reserved;
+	/* Reserved. Clients must set this field to zero.*/
+	} __packed;
+
+#define ASM_MEDIA_FMT_AMR_WB_PLUS_V2               0x00010DA9
+
+struct asm_amrwbplus_fmt_blk_v2 {
+	struct apr_hdr hdr;
+	struct asm_data_cmd_media_fmt_update_v2 fmtblk;
+	u32          amr_frame_fmt;
+/* AMR frame format.
+ * Supported values:
+ * - 6 -- Transport Interface Format (TIF)
+ * - Any other value -- File storage format (FSF)
+ *
+ * TIF stream contains 2-byte header for each frame within the
+ * superframe. FSF stream contains one 2-byte header per superframe.
+ */
+
+} __packed;
+
+#define ASM_MEDIA_FMT_AC3                    0x00010DEE
+#define ASM_MEDIA_FMT_EAC3                   0x00010DEF
+#define ASM_MEDIA_FMT_DTS                    0x00010D88
+#define ASM_MEDIA_FMT_MP2                    0x00010DE9
+#define ASM_MEDIA_FMT_FLAC                   0x00010C16
+#define ASM_MEDIA_FMT_ALAC                   0x00012F31
+#define ASM_MEDIA_FMT_VORBIS                 0x00010C15
+#define ASM_MEDIA_FMT_APE                    0x00012F32
+#define ASM_MEDIA_FMT_DSD                    0x00012F3E
+#define ASM_MEDIA_FMT_TRUEHD                 0x00013215
+/* 0x0 is used for fomat ID since ADSP dynamically determines the
+ * format encapsulated in the IEC61937 (compressed) or IEC60958
+ * (pcm) packets.
+ */
+#define ASM_MEDIA_FMT_IEC                    0x00000000
+
+/* Media format ID for adaptive transform acoustic coding. This
+ * ID is used by the #ASM_STREAM_CMD_OPEN_WRITE_COMPRESSED command
+ * only.
+ */
+
+#define ASM_MEDIA_FMT_ATRAC                  0x00010D89
+
+/* Media format ID for metadata-enhanced audio transmission.
+ * This ID is used by the #ASM_STREAM_CMD_OPEN_WRITE_COMPRESSED
+ * command only.
+ */
+
+#define ASM_MEDIA_FMT_MAT                    0x00010D8A
+
+/*  adsp_media_fmt.h */
+
+#define ASM_DATA_CMD_WRITE_V2 0x00010DAB
+
+struct asm_data_cmd_write_v2 {
+	struct apr_hdr hdr;
+	u32                  buf_addr_lsw;
+/* The 64 bit address msw-lsw should be a valid, mapped address.
+ * 64 bit address should be a multiple of 32 bytes
+ */
+
+	u32                  buf_addr_msw;
+/* The 64 bit address msw-lsw should be a valid, mapped address.
+ * 64 bit address should be a multiple of 32 bytes.
+ * -Address of the buffer containing the data to be decoded.
+ * The buffer should be aligned to a 32 byte boundary.
+ * -In the case of 32 bit Shared memory address, msw field must
+ * -be set to zero.
+ * -In the case of 36 bit shared memory address, bit 31 to bit 4
+ * -of msw must be set to zero.
+ */
+	u32                  mem_map_handle;
+/* memory map handle returned by DSP through
+ * ASM_CMD_SHARED_MEM_MAP_REGIONS command
+ */
+	u32                  buf_size;
+/* Number of valid bytes available in the buffer for decoding. The
+ * first byte starts at buf_addr.
+ */
+
+	u32                  seq_id;
+	/* Optional buffer sequence ID. */
+
+	u32                  timestamp_lsw;
+/* Lower 32 bits of the 64-bit session time in microseconds of the
+ * first buffer sample.
+ */
+
+	u32                  timestamp_msw;
+/* Upper 32 bits of the 64-bit session time in microseconds of the
+ * first buffer sample.
+ */
+
+	u32                  flags;
+/* Bitfield of flags.
+ * Supported values for bit 31:
+ * - 1 -- Valid timestamp.
+ * - 0 -- Invalid timestamp.
+ * - Use #ASM_BIT_MASKIMESTAMP_VALID_FLAG as the bitmask and
+ * #ASM_SHIFTIMESTAMP_VALID_FLAG as the shift value to set this bit.
+ * Supported values for bit 30:
+ * - 1 -- Last buffer.
+ * - 0 -- Not the last buffer.
+ *
+ * Supported values for bit 29:
+ * - 1 -- Continue the timestamp from the previous buffer.
+ * - 0 -- Timestamp of the current buffer is not related
+ * to the timestamp of the previous buffer.
+ * - Use #ASM_BIT_MASKS_CONTINUE_FLAG and #ASM_SHIFTS_CONTINUE_FLAG
+ * to set this bit.
+ *
+ * Supported values for bit 4:
+ * - 1 -- End of the frame.
+ * - 0 -- Not the end of frame, or this information is not known.
+ * - Use #ASM_BIT_MASK_EOF_FLAG as the bitmask and #ASM_SHIFT_EOF_FLAG
+ * as the shift value to set this bit.
+ *
+ * All other bits are reserved and must be set to 0.
+ *
+ * If bit 31=0 and bit 29=1: The timestamp of the first sample in
+ * this buffer continues from the timestamp of the last sample in
+ * the previous buffer. If there is no previous buffer (i.e., this
+ * is the first buffer sent after opening the stream or after a
+ * flush operation), or if the previous buffer does not have a valid
+ * timestamp, the samples in the current buffer also do not have a
+ * valid timestamp. They are played out as soon as possible.
+ *
+ *
+ * If bit 31=0 and bit 29=0: No timestamp is associated with the
+ * first sample in this buffer. The samples are played out as soon
+ * as possible.
+ *
+ *
+ * If bit 31=1 and bit 29 is ignored: The timestamp specified in
+ * this payload is honored.
+ *
+ *
+ * If bit 30=0: Not the last buffer in the stream. This is useful
+ * in removing trailing samples.
+ *
+ *
+ * For bit 4: The client can set this flag for every buffer sent in
+ * which the last byte is the end of a frame. If this flag is set,
+ * the buffer can contain data from multiple frames, but it should
+ * always end at a frame boundary. Restrictions allow the aDSP to
+ * detect an end of frame without requiring additional processing.
+ */
+
+} __packed;
+
+#define ASM_DATA_CMD_READ_V2 0x00010DAC
+
+struct asm_data_cmd_read_v2 {
+	struct apr_hdr       hdr;
+	u32                  buf_addr_lsw;
+/* the 64 bit address msw-lsw should be a valid mapped address
+ * and should be a multiple of 32 bytes
+ */
+
+
+	u32                  buf_addr_msw;
+/* the 64 bit address msw-lsw should be a valid mapped address
+ * and should be a multiple of 32 bytes.
+* - Address of the buffer where the DSP puts the encoded data,
+* potentially, at an offset specified by the uOffset field in
+* ASM_DATA_EVENT_READ_DONE structure. The buffer should be aligned
+* to a 32 byte boundary.
+*- In the case of 32 bit Shared memory address, msw field must
+*- be set to zero.
+*- In the case of 36 bit shared memory address, bit 31 to bit
+*- 4 of msw must be set to zero.
+*/
+	u32                  mem_map_handle;
+/* memory map handle returned by DSP through
+ * ASM_CMD_SHARED_MEM_MAP_REGIONS command.
+ */
+
+	u32                  buf_size;
+/* Number of bytes available for the aDSP to write. The aDSP
+ * starts writing from buf_addr.
+ */
+
+	u32                  seq_id;
+	/* Optional buffer sequence ID.
+			*/
+} __packed;
+
+#define ASM_DATA_CMD_EOS               0x00010BDB
+#define ASM_DATA_EVENT_RENDERED_EOS    0x00010C1C
+#define ASM_DATA_EVENT_EOS             0x00010BDD
+
+#define ASM_DATA_EVENT_WRITE_DONE_V2 0x00010D99
+struct asm_data_event_write_done_v2 {
+	u32                  buf_addr_lsw;
+	/* lsw of the 64 bit address */
+	u32                  buf_addr_msw;
+	/* msw of the 64 bit address. address given by the client in
+	* ASM_DATA_CMD_WRITE_V2 command.
+	*/
+	u32                  mem_map_handle;
+	/* memory map handle in the ASM_DATA_CMD_WRITE_V2  */
+
+	u32                  status;
+/* Status message (error code) that indicates whether the
+ * referenced buffer has been successfully consumed.
+ * Supported values: Refer to @xhyperref{Q3,[Q3]}
+ */
+} __packed;
+
+#define ASM_DATA_EVENT_READ_DONE_V2 0x00010D9A
+
+/* Definition of the frame metadata flag bitmask.*/
+#define ASM_BIT_MASK_FRAME_METADATA_FLAG (0x40000000UL)
+
+/* Definition of the frame metadata flag shift value. */
+#define ASM_SHIFT_FRAME_METADATA_FLAG 30
+
+struct asm_data_event_read_done_v2 {
+	u32                  status;
+/* Status message (error code).
+ * Supported values: Refer to @xhyperref{Q3,[Q3]}
+ */
+
+u32                  buf_addr_lsw;
+/* 64 bit address msw-lsw is a valid, mapped address. 64 bit
+ * address is a multiple of 32 bytes.
+ */
+
+u32                  buf_addr_msw;
+/* 64 bit address msw-lsw is a valid, mapped address. 64 bit
+* address is a multiple of 32 bytes.
+*
+* -Same address provided by the client in ASM_DATA_CMD_READ_V2
+* -In the case of 32 bit Shared memory address, msw field is set to
+* zero.
+* -In the case of 36 bit shared memory address, bit 31 to bit 4
+* -of msw is set to zero.
+*/
+
+u32                  mem_map_handle;
+/* memory map handle in the ASM_DATA_CMD_READ_V2  */
+
+u32                  enc_framesotal_size;
+/* Total size of the encoded frames in bytes.
+ * Supported values: >0
+ */
+
+u32                  offset;
+/* Offset (from buf_addr) to the first byte of the first encoded
+ * frame. All encoded frames are consecutive, starting from this
+ * offset.
+ * Supported values: > 0
+ */
+
+u32                  timestamp_lsw;
+/* Lower 32 bits of the 64-bit session time in microseconds of
+ * the first sample in the buffer. If Bit 5 of mode_flags flag of
+ * ASM_STREAM_CMD_OPEN_READ_V2 is 1 then the 64 bit timestamp is
+ * absolute capture time otherwise it is relative session time. The
+ * absolute timestamp doesnt reset unless the system is reset.
+ */
+
+
+u32                  timestamp_msw;
+/* Upper 32 bits of the 64-bit session time in microseconds of
+ * the first sample in the buffer.
+ */
+
+
+u32                  flags;
+/* Bitfield of flags. Bit 30 indicates whether frame metadata is
+ * present. If frame metadata is present, num_frames consecutive
+ * instances of @xhyperref{hdr:FrameMetaData,Frame metadata} start
+ * at the buffer address.
+ * Supported values for bit 31:
+ * - 1 -- Timestamp is valid.
+ * - 0 -- Timestamp is invalid.
+ * - Use #ASM_BIT_MASKIMESTAMP_VALID_FLAG and
+ * #ASM_SHIFTIMESTAMP_VALID_FLAG to set this bit.
+ *
+ * Supported values for bit 30:
+ * - 1 -- Frame metadata is present.
+ * - 0 -- Frame metadata is absent.
+ * - Use #ASM_BIT_MASK_FRAME_METADATA_FLAG and
+ * #ASM_SHIFT_FRAME_METADATA_FLAG to set this bit.
+ *
+ * All other bits are reserved; the aDSP sets them to 0.
+ */
+
+u32                  num_frames;
+/* Number of encoded frames in the buffer. */
+
+u32                  seq_id;
+/* Optional buffer sequence ID.	*/
+} __packed;
+
+struct asm_data_read_buf_metadata_v2 {
+	u32          offset;
+/* Offset from buf_addr in #ASM_DATA_EVENT_READ_DONE_PAYLOAD to
+ * the frame associated with this metadata.
+ * Supported values: > 0
+ */
+
+u32          frm_size;
+/* Size of the encoded frame in bytes.
+ * Supported values: > 0
+ */
+
+u32          num_encoded_pcm_samples;
+/* Number of encoded PCM samples (per channel) in the frame
+ * associated with this metadata.
+ * Supported values: > 0
+ */
+
+u32          timestamp_lsw;
+/* Lower 32 bits of the 64-bit session time in microseconds of the
+ * first sample for this frame.
+ * If Bit 5 of mode_flags flag of ASM_STREAM_CMD_OPEN_READ_V2 is 1
+ * then the 64 bit timestamp is absolute capture time otherwise it
+ * is relative session time. The absolute timestamp doesnt reset
+ * unless the system is reset.
+ */
+
+
+u32          timestamp_msw;
+/* Lower 32 bits of the 64-bit session time in microseconds of the
+ * first sample for this frame.
+ */
+
+u32          flags;
+/* Frame flags.
+ * Supported values for bit 31:
+ * - 1 -- Time stamp is valid
+ * - 0 -- Time stamp is not valid
+ * - All other bits are reserved; the aDSP sets them to 0.
+*/
+} __packed;
+
+/* Notifies the client of a change in the data sampling rate or
+ * Channel mode. This event is raised by the decoder service. The
+ * event is enabled through the mode flags of
+ * #ASM_STREAM_CMD_OPEN_WRITE_V2 or
+ * #ASM_STREAM_CMD_OPEN_READWRITE_V2. - The decoder detects a change
+ * in the output sampling frequency or the number/positioning of
+ * output channels, or if it is the first frame decoded.The new
+ * sampling frequency or the new channel configuration is
+ * communicated back to the client asynchronously.
+ */
+
+#define ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY 0x00010C65
+
+/*  Payload of the #ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY event.
+ * This event is raised when the following conditions are both true:
+ * - The event is enabled through the mode_flags of
+ * #ASM_STREAM_CMD_OPEN_WRITE_V2 or
+ * #ASM_STREAM_CMD_OPEN_READWRITE_V2. - The decoder detects a change
+ * in either the output sampling frequency or the number/positioning
+ * of output channels, or if it is the first frame decoded.
+ * This event is not raised (even if enabled) if the decoder is
+ * MIDI, because
+ */
+
+
+struct asm_data_event_sr_cm_change_notify {
+	u32                  sample_rate;
+/* New sampling rate (in Hertz) after detecting a change in the
+ * bitstream.
+ * Supported values: 2000 to 48000
+ */
+
+	u16                  num_channels;
+/* New number of channels after detecting a change in the
+ * bitstream.
+ * Supported values: 1 to 8
+ */
+
+
+	u16                  reserved;
+	/* Reserved for future use. This field must be set to 0.*/
+
+	u8                   channel_mapping[8];
+
+} __packed;
+
+/* Notifies the client of a data sampling rate or channel mode
+ * change. This event is raised by the encoder service.
+ * This event is raised when :
+ * - Native mode encoding was requested in the encoder
+ * configuration (i.e., the channel number was 0), the sample rate
+ * was 0, or both were 0.
+ *
+ * - The input data frame at the encoder is the first one, or the
+ * sampling rate/channel mode is different from the previous input
+ * data frame.
+ *
+ */
+#define ASM_DATA_EVENT_ENC_SR_CM_CHANGE_NOTIFY 0x00010BDE
+
+struct asm_data_event_enc_sr_cm_change_notify {
+	u32                  sample_rate;
+/* New sampling rate (in Hertz) after detecting a change in the
+ * input data.
+ * Supported values: 2000 to 48000
+ */
+
+
+	u16                  num_channels;
+/* New number of channels after detecting a change in the input
+ * data. Supported values: 1 to 8
+ */
+
+
+	u16                  bits_per_sample;
+/* New bits per sample after detecting a change in the input
+ * data.
+ * Supported values: 16, 24
+ */
+
+
+	u8                   channel_mapping[8];
+
+} __packed;
+#define ASM_DATA_CMD_IEC_60958_FRAME_RATE 0x00010D87
+
+
+/* Payload of the #ASM_DATA_CMD_IEC_60958_FRAME_RATE command,
+ * which is used to indicate the IEC 60958 frame rate of a given
+ * packetized audio stream.
+ */
+
+struct asm_data_cmd_iec_60958_frame_rate {
+	u32                  frame_rate;
+/* IEC 60958 frame rate of the incoming IEC 61937 packetized stream.
+ * Supported values: Any valid frame rate
+ */
+} __packed;
+
+/* adsp_asm_data_commands.h*/
+/* Definition of the stream ID bitmask.*/
+#define ASM_BIT_MASK_STREAM_ID                 (0x000000FFUL)
+
+/* Definition of the stream ID shift value.*/
+#define ASM_SHIFT_STREAM_ID                    0
+
+/* Definition of the session ID bitmask.*/
+#define ASM_BIT_MASK_SESSION_ID                (0x0000FF00UL)
+
+/* Definition of the session ID shift value.*/
+#define ASM_SHIFT_SESSION_ID                   8
+
+/* Definition of the service ID bitmask.*/
+#define ASM_BIT_MASK_SERVICE_ID                (0x00FF0000UL)
+
+/* Definition of the service ID shift value.*/
+#define ASM_SHIFT_SERVICE_ID                   16
+
+/* Definition of the domain ID bitmask.*/
+#define ASM_BIT_MASK_DOMAIN_ID                (0xFF000000UL)
+
+/* Definition of the domain ID shift value.*/
+#define ASM_SHIFT_DOMAIN_ID                    24
+
+#define ASM_CMD_SHARED_MEM_MAP_REGIONS               0x00010D92
+#define ASM_CMDRSP_SHARED_MEM_MAP_REGIONS     0x00010D93
+#define ASM_CMD_SHARED_MEM_UNMAP_REGIONS              0x00010D94
+
+/* adsp_asm_service_commands.h */
+
+#define ASM_MAX_SESSION_ID  (15)
+
+/* Maximum number of sessions.*/
+#define ASM_MAX_NUM_SESSIONS                ASM_MAX_SESSION_ID
+
+/* Maximum number of streams per session.*/
+#define ASM_MAX_STREAMS_PER_SESSION (8)
+#define ASM_SESSION_CMD_RUN_V2                   0x00010DAA
+#define ASM_SESSION_CMD_RUN_STARTIME_RUN_IMMEDIATE  0
+#define ASM_SESSION_CMD_RUN_STARTIME_RUN_AT_ABSOLUTEIME 1
+#define ASM_SESSION_CMD_RUN_STARTIME_RUN_AT_RELATIVEIME 2
+#define ASM_SESSION_CMD_RUN_STARTIME_RUN_WITH_DELAY     3
+
+#define ASM_BIT_MASK_RUN_STARTIME                 (0x00000003UL)
+
+/* Bit shift value used to specify the start time for the
+ * ASM_SESSION_CMD_RUN_V2 command.
+ */
+#define ASM_SHIFT_RUN_STARTIME 0
+struct asm_session_cmd_run_v2 {
+	struct apr_hdr hdr;
+	u32                  flags;
+/* Specifies whether to run immediately or at a specific
+ * rendering time or with a specified delay. Run with delay is
+ * useful for delaying in case of ASM loopback opened through
+ * ASM_STREAM_CMD_OPEN_LOOPBACK_V2. Use #ASM_BIT_MASK_RUN_STARTIME
+ * and #ASM_SHIFT_RUN_STARTIME to set this 2-bit flag.
+ *
+ *
+ *Bits 0 and 1 can take one of four possible values:
+ *
+ *- #ASM_SESSION_CMD_RUN_STARTIME_RUN_IMMEDIATE
+ *- #ASM_SESSION_CMD_RUN_STARTIME_RUN_AT_ABSOLUTEIME
+ *- #ASM_SESSION_CMD_RUN_STARTIME_RUN_AT_RELATIVEIME
+ *- #ASM_SESSION_CMD_RUN_STARTIME_RUN_WITH_DELAY
+ *
+ *All other bits are reserved; clients must set them to zero.
+ */
+
+	u32                  time_lsw;
+/* Lower 32 bits of the time in microseconds used to align the
+ * session origin time. When bits 0-1 of flags is
+ * ASM_SESSION_CMD_RUN_START_RUN_WITH_DELAY, time lsw is the lsw of
+ * the delay in us. For ASM_SESSION_CMD_RUN_START_RUN_WITH_DELAY,
+ * maximum value of the 64 bit delay is 150 ms.
+ */
+
+	u32                  time_msw;
+/* Upper 32 bits of the time in microseconds used to align the
+ * session origin time. When bits 0-1 of flags is
+ * ASM_SESSION_CMD_RUN_START_RUN_WITH_DELAY, time msw is the msw of
+ * the delay in us. For ASM_SESSION_CMD_RUN_START_RUN_WITH_DELAY,
+ * maximum value of the 64 bit delay is 150 ms.
+ */
+
+} __packed;
+
+#define ASM_SESSION_CMD_PAUSE 0x00010BD3
+#define ASM_SESSION_CMD_SUSPEND 0x00010DEC
+#define ASM_SESSION_CMD_GET_SESSIONTIME_V3 0x00010D9D
+#define ASM_SESSION_CMD_REGISTER_FOR_RX_UNDERFLOW_EVENTS 0x00010BD5
+
+struct asm_session_cmd_rgstr_rx_underflow {
+	struct apr_hdr hdr;
+	u16                  enable_flag;
+/* Specifies whether a client is to receive events when an Rx
+ * session underflows.
+ * Supported values:
+ * - 0 -- Do not send underflow events
+ * - 1 -- Send underflow events
+ */
+	u16                  reserved;
+	/* Reserved. This field must be set to zero.*/
+} __packed;
+
+#define ASM_SESSION_CMD_REGISTER_FORX_OVERFLOW_EVENTS 0x00010BD6
+
+struct asm_session_cmd_regx_overflow {
+	struct apr_hdr hdr;
+	u16                  enable_flag;
+/* Specifies whether a client is to receive events when a Tx
+* session overflows.
+ * Supported values:
+ * - 0 -- Do not send overflow events
+ * - 1 -- Send overflow events
+ */
+
+	u16                  reserved;
+	/* Reserved. This field must be set to zero.*/
+} __packed;
+
+#define ASM_SESSION_EVENT_RX_UNDERFLOW        0x00010C17
+#define ASM_SESSION_EVENTX_OVERFLOW           0x00010C18
+#define ASM_SESSION_CMDRSP_GET_SESSIONTIME_V3 0x00010D9E
+
+struct asm_session_cmdrsp_get_sessiontime_v3 {
+	u32                  status;
+	/* Status message (error code).
+	* Supported values: Refer to @xhyperref{Q3,[Q3]}
+	*/
+
+	u32                  sessiontime_lsw;
+	/* Lower 32 bits of the current session time in microseconds.*/
+
+	u32                  sessiontime_msw;
+	/* Upper 32 bits of the current session time in microseconds.*/
+
+	u32                  absolutetime_lsw;
+/* Lower 32 bits in micro seconds of the absolute time at which
+ * the * sample corresponding to the above session time gets
+ * rendered * to hardware. This absolute time may be slightly in the
+ * future or past.
+ */
+
+
+	u32                  absolutetime_msw;
+/* Upper 32 bits in micro seconds of the absolute time at which
+ * the * sample corresponding to the above session time gets
+ * rendered to * hardware. This absolute time may be slightly in the
+ * future or past.
+ */
+
+} __packed;
+
+#define ASM_SESSION_CMD_ADJUST_SESSION_CLOCK_V2     0x00010D9F
+
+struct asm_session_cmd_adjust_session_clock_v2 {
+	struct apr_hdr hdr;
+u32                  adjustime_lsw;
+/* Lower 32 bits of the signed 64-bit quantity that specifies the
+ * adjustment time in microseconds to the session clock.
+ *
+ * Positive values indicate advancement of the session clock.
+ * Negative values indicate delay of the session clock.
+ */
+
+
+	u32                  adjustime_msw;
+/* Upper 32 bits of the signed 64-bit quantity that specifies
+ * the adjustment time in microseconds to the session clock.
+ * Positive values indicate advancement of the session clock.
+ * Negative values indicate delay of the session clock.
+ */
+
+} __packed;
+
+#define ASM_SESSION_CMDRSP_ADJUST_SESSION_CLOCK_V2    0x00010DA0
+
+struct asm_session_cmdrsp_adjust_session_clock_v2 {
+	u32                  status;
+/* Status message (error code).
+ * Supported values: Refer to @xhyperref{Q3,[Q3]}
+ * An error means the session clock is not adjusted. In this case,
+ * the next two fields are irrelevant.
+ */
+
+
+	u32                  actual_adjustime_lsw;
+/* Lower 32 bits of the signed 64-bit quantity that specifies
+ * the actual adjustment in microseconds performed by the aDSP.
+ * A positive value indicates advancement of the session clock. A
+ * negative value indicates delay of the session clock.
+ */
+
+
+	u32                  actual_adjustime_msw;
+/* Upper 32 bits of the signed 64-bit quantity that specifies
+ * the actual adjustment in microseconds performed by the aDSP.
+ * A positive value indicates advancement of the session clock. A
+ * negative value indicates delay of the session clock.
+ */
+
+
+	u32                  cmd_latency_lsw;
+/* Lower 32 bits of the unsigned 64-bit quantity that specifies
+ * the amount of time in microseconds taken to perform the session
+ * clock adjustment.
+ */
+
+
+	u32                  cmd_latency_msw;
+/* Upper 32 bits of the unsigned 64-bit quantity that specifies
+ * the amount of time in microseconds taken to perform the session
+ * clock adjustment.
+ */
+
+} __packed;
+
+#define ASM_SESSION_CMD_GET_PATH_DELAY_V2	 0x00010DAF
+#define ASM_SESSION_CMDRSP_GET_PATH_DELAY_V2 0x00010DB0
+
+struct asm_session_cmdrsp_get_path_delay_v2 {
+	u32                  status;
+/* Status message (error code). Whether this get delay operation
+ * is successful or not. Delay value is valid only if status is
+ * success.
+ * Supported values: Refer to @xhyperref{Q5,[Q5]}
+ */
+
+	u32                  audio_delay_lsw;
+	/* Upper 32 bits of the aDSP delay in microseconds. */
+
+	u32                  audio_delay_msw;
+	/* Lower 32 bits of the aDSP delay  in microseconds. */
+
+} __packed;
+
+/* adsp_asm_session_command.h*/
+#define ASM_STREAM_CMD_OPEN_WRITE_V3       0x00010DB3
+
+#define ASM_LOW_LATENCY_STREAM_SESSION				0x10000000
+
+#define ASM_ULTRA_LOW_LATENCY_STREAM_SESSION			0x20000000
+
+#define ASM_ULL_POST_PROCESSING_STREAM_SESSION			0x40000000
+
+#define ASM_LEGACY_STREAM_SESSION                                      0
+
+
+struct asm_stream_cmd_open_write_v3 {
+	struct apr_hdr			hdr;
+	uint32_t                    mode_flags;
+/* Mode flags that configure the stream to notify the client
+ * whenever it detects an SR/CM change at the input to its POPP.
+ * Supported values for bits 0 to 1:
+ * - Reserved; clients must set them to zero.
+ * Supported values for bit 2:
+ * - 0 -- SR/CM change notification event is disabled.
+ * - 1 -- SR/CM change notification event is enabled.
+ * - Use #ASM_BIT_MASK_SR_CM_CHANGE_NOTIFY_FLAG and
+ * #ASM_SHIFT_SR_CM_CHANGE_NOTIFY_FLAG to set or get this bit.
+ *
+ * Supported values for bit 31:
+ * - 0 -- Stream to be opened in on-Gapless mode.
+ * - 1 -- Stream to be opened in Gapless mode. In Gapless mode,
+ * successive streams must be opened with same session ID but
+ * different stream IDs.
+ *
+ * - Use #ASM_BIT_MASK_GAPLESS_MODE_FLAG and
+ * #ASM_SHIFT_GAPLESS_MODE_FLAG to set or get this bit.
+ *
+ *
+ * @note1hang MIDI and DTMF streams cannot be opened in Gapless mode.
+ */
+
+	uint16_t                    sink_endpointype;
+/*< Sink point type.
+ * Supported values:
+ * - 0 -- Device matrix
+ * - Other values are reserved.
+ *
+ * The device matrix is the gateway to the hardware ports.
+ */
+
+	uint16_t                    bits_per_sample;
+/*< Number of bits per sample processed by ASM modules.
+ * Supported values: 16 and 24 bits per sample
+ */
+
+	uint32_t                    postprocopo_id;
+/*< Specifies the topology (order of processing) of
+ * postprocessing algorithms. <i>None</i> means no postprocessing.
+ * Supported values:
+ * - #ASM_STREAM_POSTPROCOPO_ID_DEFAULT
+ * - #ASM_STREAM_POSTPROCOPO_ID_MCH_PEAK_VOL
+ * - #ASM_STREAM_POSTPROCOPO_ID_NONE
+ *
+ * This field can also be enabled through SetParams flags.
+ */
+
+	uint32_t                    dec_fmt_id;
+/*< Configuration ID of the decoder media format.
+ *
+ * Supported values:
+ * - #ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2
+ * - #ASM_MEDIA_FMT_ADPCM
+ * - #ASM_MEDIA_FMT_MP3
+ * - #ASM_MEDIA_FMT_AAC_V2
+ * - #ASM_MEDIA_FMT_DOLBY_AAC
+ * - #ASM_MEDIA_FMT_AMRNB_FS
+ * - #ASM_MEDIA_FMT_AMRWB_FS
+ * - #ASM_MEDIA_FMT_AMR_WB_PLUS_V2
+ * - #ASM_MEDIA_FMT_V13K_FS
+ * - #ASM_MEDIA_FMT_EVRC_FS
+ * - #ASM_MEDIA_FMT_EVRCB_FS
+ * - #ASM_MEDIA_FMT_EVRCWB_FS
+ * - #ASM_MEDIA_FMT_SBC
+ * - #ASM_MEDIA_FMT_WMA_V10PRO_V2
+ * - #ASM_MEDIA_FMT_WMA_V9_V2
+ * - #ASM_MEDIA_FMT_AC3
+ * - #ASM_MEDIA_FMT_EAC3
+ * - #ASM_MEDIA_FMT_G711_ALAW_FS
+ * - #ASM_MEDIA_FMT_G711_MLAW_FS
+ * - #ASM_MEDIA_FMT_G729A_FS
+ * - #ASM_MEDIA_FMT_FR_FS
+ * - #ASM_MEDIA_FMT_VORBIS
+ * - #ASM_MEDIA_FMT_FLAC
+ * - #ASM_MEDIA_FMT_ALAC
+ * - #ASM_MEDIA_FMT_APE
+ * - #ASM_MEDIA_FMT_EXAMPLE
+ */
+} __packed;
+
+#define ASM_STREAM_CMD_OPEN_PULL_MODE_WRITE    0x00010DD9
+
+/* Bitmask for the stream_perf_mode subfield. */
+#define ASM_BIT_MASK_STREAM_PERF_FLAG_PULL_MODE_WRITE 0xE0000000UL
+
+/* Bitmask for the stream_perf_mode subfield. */
+#define ASM_SHIFT_STREAM_PERF_FLAG_PULL_MODE_WRITE 29
+
+#define ASM_STREAM_CMD_OPEN_PUSH_MODE_READ  0x00010DDA
+
+#define ASM_BIT_MASK_STREAM_PERF_FLAG_PUSH_MODE_READ 0xE0000000UL
+
+#define ASM_SHIFT_STREAM_PERF_FLAG_PUSH_MODE_READ 29
+
+#define ASM_DATA_EVENT_WATERMARK 0x00010DDB
+
+struct asm_shared_position_buffer {
+	volatile uint32_t               frame_counter;
+/* Counter used to handle interprocessor synchronization issues.
+ * When frame_counter is 0: read_index, wall_clock_us_lsw, and
+ * wall_clock_us_msw are invalid.
+ * Supported values: >= 0.
+ */
+
+	volatile uint32_t               index;
+/* Index in bytes from where the aDSP is reading/writing.
+ * Supported values: 0 to circular buffer size - 1
+ */
+
+	volatile uint32_t               wall_clock_us_lsw;
+/* Lower 32 bits of the 64-bit wall clock time in microseconds when the
+ * read index was updated.
+ * Supported values: >= 0
+ */
+
+	volatile uint32_t               wall_clock_us_msw;
+/* Upper 32 bits of the 64 bit wall clock time in microseconds when the
+ * read index was updated
+ * Supported values: >= 0
+ */
+} __packed;
+
+struct asm_shared_watermark_level {
+	uint32_t                watermark_level_bytes;
+} __packed;
+
+struct asm_stream_cmd_open_shared_io {
+	struct apr_hdr          hdr;
+	uint32_t                mode_flags;
+	uint16_t                endpoint_type;
+	uint16_t                topo_bits_per_sample;
+	uint32_t                topo_id;
+	uint32_t                fmt_id;
+	uint32_t                shared_pos_buf_phy_addr_lsw;
+	uint32_t                shared_pos_buf_phy_addr_msw;
+	uint16_t                shared_pos_buf_mem_pool_id;
+	uint16_t                shared_pos_buf_num_regions;
+	uint32_t                shared_pos_buf_property_flag;
+	uint32_t                shared_circ_buf_start_phy_addr_lsw;
+	uint32_t                shared_circ_buf_start_phy_addr_msw;
+	uint32_t                shared_circ_buf_size;
+	uint16_t                shared_circ_buf_mem_pool_id;
+	uint16_t                shared_circ_buf_num_regions;
+	uint32_t                shared_circ_buf_property_flag;
+	uint32_t                num_watermark_levels;
+	struct asm_multi_channel_pcm_fmt_blk_v3         fmt;
+	struct avs_shared_map_region_payload            map_region_pos_buf;
+	struct avs_shared_map_region_payload            map_region_circ_buf;
+	struct asm_shared_watermark_level watermark[0];
+} __packed;
+
+#define ASM_STREAM_CMD_OPEN_READ_V3                 0x00010DB4
+
+/* Definition of the timestamp type flag bitmask */
+#define ASM_BIT_MASKIMESTAMPYPE_FLAG        (0x00000020UL)
+
+/* Definition of the timestamp type flag shift value. */
+#define ASM_SHIFTIMESTAMPYPE_FLAG 5
+
+/* Relative timestamp is identified by this value.*/
+#define ASM_RELATIVEIMESTAMP      0
+
+/* Absolute timestamp is identified by this value.*/
+#define ASM_ABSOLUTEIMESTAMP      1
+
+/* Bit value for Low Latency Tx stream subfield */
+#define ASM_LOW_LATENCY_TX_STREAM_SESSION			1
+
+/* Bit shift for the stream_perf_mode subfield. */
+#define ASM_SHIFT_STREAM_PERF_MODE_FLAG_IN_OPEN_READ              29
+
+struct asm_stream_cmd_open_read_v3 {
+	struct apr_hdr hdr;
+	u32                    mode_flags;
+/* Mode flags that indicate whether meta information per encoded
+ * frame is to be provided.
+ * Supported values for bit 4:
+ *
+ * - 0 -- Return data buffer contains all encoded frames only; it
+ * does not contain frame metadata.
+ *
+ * - 1 -- Return data buffer contains an array of metadata and
+ * encoded frames.
+ *
+ * - Use #ASM_BIT_MASK_META_INFO_FLAG as the bitmask and
+ * #ASM_SHIFT_META_INFO_FLAG as the shift value for this bit.
+ *
+ *
+ * Supported values for bit 5:
+ *
+ * - ASM_RELATIVEIMESTAMP -- ASM_DATA_EVENT_READ_DONE_V2 will have
+ * - relative time-stamp.
+ * - ASM_ABSOLUTEIMESTAMP -- ASM_DATA_EVENT_READ_DONE_V2 will
+ * - have absolute time-stamp.
+ *
+ * - Use #ASM_BIT_MASKIMESTAMPYPE_FLAG as the bitmask and
+ * #ASM_SHIFTIMESTAMPYPE_FLAG as the shift value for this bit.
+ *
+ * All other bits are reserved; clients must set them to zero.
+ */
+
+	u32                    src_endpointype;
+/* Specifies the endpoint providing the input samples.
+ * Supported values:
+ * - 0 -- Device matrix
+ * - All other values are reserved; clients must set them to zero.
+ * Otherwise, an error is returned.
+ * The device matrix is the gateway from the tunneled Tx ports.
+ */
+
+	u32                    preprocopo_id;
+/* Specifies the topology (order of processing) of preprocessing
+ * algorithms. <i>None</i> means no preprocessing.
+ * Supported values:
+ * - #ASM_STREAM_PREPROCOPO_ID_DEFAULT
+ * - #ASM_STREAM_PREPROCOPO_ID_NONE
+ *
+ * This field can also be enabled through SetParams flags.
+ */
+
+	u32                    enc_cfg_id;
+/* Media configuration ID for encoded output.
+ * Supported values:
+ * - #ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2
+ * - #ASM_MEDIA_FMT_AAC_V2
+ * - #ASM_MEDIA_FMT_AMRNB_FS
+ * - #ASM_MEDIA_FMT_AMRWB_FS
+ * - #ASM_MEDIA_FMT_V13K_FS
+ * - #ASM_MEDIA_FMT_EVRC_FS
+ * - #ASM_MEDIA_FMT_EVRCB_FS
+ * - #ASM_MEDIA_FMT_EVRCWB_FS
+ * - #ASM_MEDIA_FMT_SBC
+ * - #ASM_MEDIA_FMT_G711_ALAW_FS
+ * - #ASM_MEDIA_FMT_G711_MLAW_FS
+ * - #ASM_MEDIA_FMT_G729A_FS
+ * - #ASM_MEDIA_FMT_EXAMPLE
+ * - #ASM_MEDIA_FMT_WMA_V8
+ */
+
+	u16                    bits_per_sample;
+/* Number of bits per sample processed by ASM modules.
+ * Supported values: 16 and 24 bits per sample
+ */
+
+	u16                    reserved;
+/* Reserved for future use. This field must be set to zero.*/
+} __packed;
+
+#define ASM_POPP_OUTPUT_SR_NATIVE_RATE                                  0
+
+/* Enumeration for the maximum sampling rate at the POPP output.*/
+#define ASM_POPP_OUTPUT_SR_MAX_RATE             48000
+
+#define ASM_STREAM_CMD_OPEN_READWRITE_V2        0x00010D8D
+#define ASM_STREAM_CMD_OPEN_READWRITE_V2        0x00010D8D
+
+struct asm_stream_cmd_open_readwrite_v2 {
+	struct apr_hdr         hdr;
+	u32                    mode_flags;
+/* Mode flags.
+ * Supported values for bit 2:
+ * - 0 -- SR/CM change notification event is disabled.
+ * - 1 -- SR/CM change notification event is enabled. Use
+ * #ASM_BIT_MASK_SR_CM_CHANGE_NOTIFY_FLAG and
+ * #ASM_SHIFT_SR_CM_CHANGE_NOTIFY_FLAG to set or
+ * getting this flag.
+ *
+ * Supported values for bit 4:
+ * - 0 -- Return read data buffer contains all encoded frames only; it
+ * does not contain frame metadata.
+ * - 1 -- Return read data buffer contains an array of metadata and
+ * encoded frames.
+ *
+ * All other bits are reserved; clients must set them to zero.
+ */
+
+	u32                    postprocopo_id;
+/* Specifies the topology (order of processing) of postprocessing
+ * algorithms. <i>None</i> means no postprocessing.
+ *
+ * Supported values:
+ * - #ASM_STREAM_POSTPROCOPO_ID_DEFAULT
+ * - #ASM_STREAM_POSTPROCOPO_ID_MCH_PEAK_VOL
+ * - #ASM_STREAM_POSTPROCOPO_ID_NONE
+ */
+
+	u32                    dec_fmt_id;
+/* Specifies the media type of the input data. PCM indicates that
+ * no decoding must be performed, e.g., this is an NT encoder
+ * session.
+ * Supported values:
+ * - #ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2
+ * - #ASM_MEDIA_FMT_ADPCM
+ * - #ASM_MEDIA_FMT_MP3
+ * - #ASM_MEDIA_FMT_AAC_V2
+ * - #ASM_MEDIA_FMT_DOLBY_AAC
+ * - #ASM_MEDIA_FMT_AMRNB_FS
+ * - #ASM_MEDIA_FMT_AMRWB_FS
+ * - #ASM_MEDIA_FMT_V13K_FS
+ * - #ASM_MEDIA_FMT_EVRC_FS
+ * - #ASM_MEDIA_FMT_EVRCB_FS
+ * - #ASM_MEDIA_FMT_EVRCWB_FS
+ * - #ASM_MEDIA_FMT_SBC
+ * - #ASM_MEDIA_FMT_WMA_V10PRO_V2
+ * - #ASM_MEDIA_FMT_WMA_V9_V2
+ * - #ASM_MEDIA_FMT_AMR_WB_PLUS_V2
+ * - #ASM_MEDIA_FMT_AC3
+ * - #ASM_MEDIA_FMT_G711_ALAW_FS
+ * - #ASM_MEDIA_FMT_G711_MLAW_FS
+ * - #ASM_MEDIA_FMT_G729A_FS
+ * - #ASM_MEDIA_FMT_EXAMPLE
+ */
+
+	u32                    enc_cfg_id;
+/* Specifies the media type for the output of the stream. PCM
+ * indicates that no encoding must be performed, e.g., this is an NT
+ * decoder session.
+ * Supported values:
+ * - #ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2
+ * - #ASM_MEDIA_FMT_AAC_V2
+ * - #ASM_MEDIA_FMT_AMRNB_FS
+ * - #ASM_MEDIA_FMT_AMRWB_FS
+ * - #ASM_MEDIA_FMT_V13K_FS
+ * - #ASM_MEDIA_FMT_EVRC_FS
+ * - #ASM_MEDIA_FMT_EVRCB_FS
+ * - #ASM_MEDIA_FMT_EVRCWB_FS
+ * - #ASM_MEDIA_FMT_SBC
+ * - #ASM_MEDIA_FMT_G711_ALAW_FS
+ * - #ASM_MEDIA_FMT_G711_MLAW_FS
+ * - #ASM_MEDIA_FMT_G729A_FS
+ * - #ASM_MEDIA_FMT_EXAMPLE
+ * - #ASM_MEDIA_FMT_WMA_V8
+ */
+
+	u16                    bits_per_sample;
+/* Number of bits per sample processed by ASM modules.
+ * Supported values: 16 and 24 bits per sample
+ */
+
+	u16                    reserved;
+/* Reserved for future use. This field must be set to zero.*/
+
+} __packed;
+
+#define ASM_STREAM_CMD_OPEN_LOOPBACK_V2 0x00010D8E
+struct asm_stream_cmd_open_loopback_v2 {
+	struct apr_hdr         hdr;
+	u32                    mode_flags;
+/* Mode flags.
+ * Bit 0-31: reserved; client should set these bits to 0
+ */
+	u16                    src_endpointype;
+	/* Endpoint type. 0 = Tx Matrix */
+	u16                    sink_endpointype;
+	/* Endpoint type. 0 = Rx Matrix */
+	u32                    postprocopo_id;
+/* Postprocessor topology ID. Specifies the topology of
+ * postprocessing algorithms.
+ */
+
+	u16                    bits_per_sample;
+/* The number of bits per sample processed by ASM modules
+ * Supported values: 16 and 24 bits per sample
+ */
+	u16                    reserved;
+/* Reserved for future use. This field must be set to zero. */
+} __packed;
+
+
+#define ASM_STREAM_CMD_OPEN_TRANSCODE_LOOPBACK    0x00010DBA
+
+/* Bitmask for the stream's Performance mode. */
+#define ASM_BIT_MASK_STREAM_PERF_MODE_FLAG_IN_OPEN_TRANSCODE_LOOPBACK \
+	(0x70000000UL)
+
+/* Bit shift for the stream's Performance mode. */
+#define ASM_SHIFT_STREAM_PERF_MODE_FLAG_IN_OPEN_TRANSCODE_LOOPBACK    28
+
+/* Bitmask for the decoder converter enable flag. */
+#define ASM_BIT_MASK_DECODER_CONVERTER_FLAG    (0x00000078UL)
+
+/* Shift value for the decoder converter enable flag. */
+#define ASM_SHIFT_DECODER_CONVERTER_FLAG                              3
+
+/* Converter mode is None (Default). */
+#define ASM_CONVERTER_MODE_NONE                                       0
+
+/* Converter mode is DDP-to-DD. */
+#define ASM_DDP_DD_CONVERTER_MODE                                     1
+
+/*  Identifies a special converter mode where source and sink formats
+ *  are the same but postprocessing must applied. Therefore, Decode
+ *  @rarrow Re-encode is necessary.
+ */
+#define ASM_POST_PROCESS_CONVERTER_MODE                               2
+
+
+struct asm_stream_cmd_open_transcode_loopback_t {
+	struct apr_hdr         hdr;
+	u32                    mode_flags;
+/* Mode Flags specifies the performance mode in which this stream
+ * is to be opened.
+ * Supported values{for bits 30 to 28}(stream_perf_mode flag)
+ *
+ * #ASM_LEGACY_STREAM_SESSION -- This mode ensures backward
+ *       compatibility to the original behavior
+ *       of ASM_STREAM_CMD_OPEN_TRANSCODE_LOOPBACK
+ *
+ * #ASM_LOW_LATENCY_STREAM_SESSION -- Opens a loopback session by using
+ *  shortened buffers in low latency POPP
+ *  - Recommendation: Do not enable high latency algorithms. They might
+ *    negate the benefits of opening a low latency stream, and they
+ *    might also suffer quality degradation from unexpected jitter.
+ *  - This Low Latency mode is supported only for PCM In and PCM Out
+ *    loopbacks. An error is returned if Low Latency mode is opened for
+ *    other transcode loopback modes.
+ *  - To configure this subfield, use
+ *     ASM_BIT_MASK_STREAM_PERF_MODE_FLAG_IN_OPEN_TRANSCODE_LOOPBACK and
+ *     ASM_SHIFT_STREAM_PERF_MODE_FLAG_IN_OPEN_TRANSCODE_LOOPBACK.
+ *
+ * Supported values{for bits 6 to 3} (decoder-converter compatibility)
+ * #ASM_CONVERTER_MODE_NONE (0x0) -- Default
+ * #ASM_DDP_DD_CONVERTER_MODE (0x1)
+ * #ASM_POST_PROCESS_CONVERTER_MODE (0x2)
+ * 0x3-0xF -- Reserved for future use
+ * - Use #ASM_BIT_MASK_DECODER_CONVERTER_FLAG and
+ *        ASM_SHIFT_DECODER_CONVERTER_FLAG to set this bit
+ * All other bits are reserved; clients must set them to 0.
+ */
+
+	u32                    src_format_id;
+/* Specifies the media format of the input audio stream.
+ *
+ * Supported values
+ * - #ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2
+ * - #ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V3
+ * - #ASM_MEDIA_FMT_DTS
+ * - #ASM_MEDIA_FMT_EAC3_DEC
+ * - #ASM_MEDIA_FMT_EAC3
+ * - #ASM_MEDIA_FMT_AC3_DEC
+ * - #ASM_MEDIA_FMT_AC3
+ */
+	u32                    sink_format_id;
+/* Specifies the media format of the output stream.
+ *
+ * Supported values
+ * - #ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2
+ * - #ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V3
+ * - #ASM_MEDIA_FMT_DTS (not supported in Low Latency mode)
+ * - #ASM_MEDIA_FMT_EAC3_DEC (not supported in Low Latency mode)
+ * - #ASM_MEDIA_FMT_EAC3 (not supported in Low Latency mode)
+ * - #ASM_MEDIA_FMT_AC3_DEC (not supported in Low Latency mode)
+ * - #ASM_MEDIA_FMT_AC3 (not supported in Low Latency mode)
+ */
+
+	u32                    audproc_topo_id;
+/* Postprocessing topology ID, which specifies the topology (order of
+ *        processing) of postprocessing algorithms.
+ *
+ * Supported values
+ *    - #ASM_STREAM_POSTPROC_TOPO_ID_DEFAULT
+ *    - #ASM_STREAM_POSTPROC_TOPO_ID_PEAKMETER
+ *    - #ASM_STREAM_POSTPROC_TOPO_ID_MCH_PEAK_VOL
+ *    - #ASM_STREAM_POSTPROC_TOPO_ID_NONE
+ *  Topologies can be added through #ASM_CMD_ADD_TOPOLOGIES.
+ *  This field is ignored for the Converter mode, in which no
+ *  postprocessing is performed.
+ */
+
+	u16                    src_endpoint_type;
+/* Specifies the source endpoint that provides the input samples.
+ *
+ * Supported values
+ *  - 0 -- Tx device matrix or stream router (gateway to the hardware
+ *    ports)
+ *  - All other values are reserved
+ *  Clients must set this field to 0. Otherwise, an error is returned.
+ */
+
+	u16                    sink_endpoint_type;
+/*  Specifies the sink endpoint type.
+ *
+ *  Supported values
+ *  - 0 -- Rx device matrix or stream router (gateway to the hardware
+ *    ports)
+ *  - All other values are reserved
+ *   Clients must set this field to 0. Otherwise, an error is returned.
+ */
+
+	u16                    bits_per_sample;
+/*   Number of bits per sample processed by the ASM modules.
+ *   Supported values 16, 24
+ */
+
+	u16                    reserved;
+/*   This field must be set to 0.
+ */
+} __packed;
+
+
+#define ASM_STREAM_CMD_CLOSE             0x00010BCD
+#define ASM_STREAM_CMD_FLUSH             0x00010BCE
+
+
+#define ASM_STREAM_CMD_FLUSH_READBUFS   0x00010C09
+#define ASM_STREAM_CMD_SET_PP_PARAMS_V2 0x00010DA1
+
+struct asm_stream_cmd_set_pp_params_v2 {
+	u32                  data_payload_addr_lsw;
+/* LSW of parameter data payload address. Supported values: any. */
+	u32                  data_payload_addr_msw;
+/* MSW of Parameter data payload address. Supported values: any.
+ * - Must be set to zero for in-band data.
+ * - In the case of 32 bit Shared memory address, msw  field must be
+ * - set to zero.
+ * - In the case of 36 bit shared memory address, bit 31 to bit 4 of
+ * msw
+ *
+ * - must be set to zero.
+ */
+	u32                  mem_map_handle;
+/* Supported Values: Any.
+* memory map handle returned by DSP through
+* ASM_CMD_SHARED_MEM_MAP_REGIONS
+* command.
+* if mmhandle is NULL, the ParamData payloads are within the
+* message payload (in-band).
+* If mmhandle is non-NULL, the ParamData payloads begin at the
+* address specified in the address msw and lsw (out-of-band).
+*/
+
+	u32                  data_payload_size;
+/* Size in bytes of the variable payload accompanying the
+message, or in shared memory. This field is used for parsing the
+parameter payload. */
+
+} __packed;
+
+
+struct asm_stream_param_data_v2 {
+	u32                  module_id;
+	/* Unique module ID. */
+
+	u32                  param_id;
+	/* Unique parameter ID. */
+
+	u16                  param_size;
+/* Data size of the param_id/module_id combination. This is
+ * a multiple of 4 bytes.
+ */
+
+	u16                  reserved;
+/* Reserved for future enhancements. This field must be set to
+ * zero.
+ */
+
+} __packed;
+
+#define ASM_STREAM_CMD_GET_PP_PARAMS_V2		0x00010DA2
+
+struct asm_stream_cmd_get_pp_params_v2 {
+	u32                  data_payload_addr_lsw;
+	/* LSW of the parameter data payload address. */
+	u32                  data_payload_addr_msw;
+/* MSW of the parameter data payload address.
+ * - Size of the shared memory, if specified, shall be large enough
+ * to contain the whole ParamData payload, including Module ID,
+ * Param ID, Param Size, and Param Values
+ * - Must be set to zero for in-band data
+ * - In the case of 32 bit Shared memory address, msw field must be
+ * set to zero.
+ * - In the case of 36 bit shared memory address, bit 31 to bit 4 of
+ * msw must be set to zero.
+ */
+
+	u32                  mem_map_handle;
+/* Supported Values: Any.
+* memory map handle returned by DSP through ASM_CMD_SHARED_MEM_MAP_REGIONS
+* command.
+* if mmhandle is NULL, the ParamData payloads in the ACK are within the
+* message payload (in-band).
+* If mmhandle is non-NULL, the ParamData payloads in the ACK begin at the
+* address specified in the address msw and lsw.
+* (out-of-band).
+*/
+
+	u32                  module_id;
+	/* Unique module ID. */
+
+	u32                  param_id;
+	/* Unique parameter ID. */
+
+	u16                  param_max_size;
+/* Maximum data size of the module_id/param_id combination. This
+ * is a multiple of 4 bytes.
+ */
+
+
+	u16                  reserved;
+/* Reserved for backward compatibility. Clients must set this
+* field to zero.
+*/
+
+} __packed;
+
+#define ASM_STREAM_CMD_SET_ENCDEC_PARAM 0x00010C10
+
+#define ASM_STREAM_CMD_SET_ENCDEC_PARAM_V2     0x00013218
+
+struct asm_stream_cmd_set_encdec_param_v2 {
+	u16                  service_id;
+	/* 0 - ASM_ENCODER_SVC; 1 - ASM_DECODER_SVC */
+
+	u16                  reserved;
+
+	u32                  param_id;
+	/* ID of the parameter. */
+
+	u32                  param_size;
+	/*
+	 * Data size of this parameter, in bytes. The size is a multiple
+	 * of 4 bytes.
+	 */
+} __packed;
+
+#define ASM_STREAM_CMD_REGISTER_ENCDEC_EVENTS  0x00013219
+
+#define ASM_STREAM_CMD_ENCDEC_EVENTS           0x0001321A
+
+#define AVS_PARAM_ID_RTIC_SHARED_MEMORY_ADDR   0x00013237
+
+struct avs_rtic_shared_mem_addr {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param_v2  encdec;
+	u32                 shm_buf_addr_lsw;
+	/* Lower 32 bit of the RTIC shared memory */
+
+	u32                 shm_buf_addr_msw;
+	/* Upper 32 bit of the RTIC shared memory */
+
+	u32                 buf_size;
+	/* Size of buffer */
+
+	u16                 shm_buf_mem_pool_id;
+	/* ADSP_MEMORY_MAP_SHMEM8_4K_POOL */
+
+	u16                 shm_buf_num_regions;
+	/* number of regions to map */
+
+	u32                 shm_buf_flag;
+	/* buffer property flag */
+
+	struct avs_shared_map_region_payload map_region;
+	/* memory map region*/
+} __packed;
+
+#define AVS_PARAM_ID_RTIC_EVENT_ACK           0x00013238
+
+struct avs_param_rtic_event_ack {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param_v2  encdec;
+} __packed;
+
+#define ASM_PARAM_ID_ENCDEC_BITRATE     0x00010C13
+
+struct asm_bitrate_param {
+	u32                  bitrate;
+/* Maximum supported bitrate. Only the AAC encoder is supported.*/
+
+} __packed;
+
+#define ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2 0x00010DA3
+#define ASM_PARAM_ID_AAC_SBR_PS_FLAG		 0x00010C63
+
+/* Flag to turn off both SBR and PS processing, if they are
+ * present in the bitstream.
+ */
+
+#define ASM_AAC_SBR_OFF_PS_OFF (2)
+
+/* Flag to turn on SBR but turn off PS processing,if they are
+ * present in the bitstream.
+ */
+
+#define ASM_AAC_SBR_ON_PS_OFF  (1)
+
+/* Flag to turn on both SBR and PS processing, if they are
+ * present in the bitstream (default behavior).
+ */
+
+
+#define ASM_AAC_SBR_ON_PS_ON   (0)
+
+/* Structure for an AAC SBR PS processing flag. */
+
+/*  Payload of the #ASM_PARAM_ID_AAC_SBR_PS_FLAG parameter in the
+ * #ASM_STREAM_CMD_SET_ENCDEC_PARAM command.
+ */
+struct asm_aac_sbr_ps_flag_param {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param  encdec;
+	struct asm_enc_cfg_blk_param_v2	encblk;
+
+	u32                  sbr_ps_flag;
+/* Control parameter to enable or disable SBR/PS processing in
+ * the AAC bitstream. Use the following macros to set this field:
+ * - #ASM_AAC_SBR_OFF_PS_OFF -- Turn off both SBR and PS
+ * processing, if they are present in the bitstream.
+ * - #ASM_AAC_SBR_ON_PS_OFF -- Turn on SBR processing, but not PS
+ * processing, if they are present in the bitstream.
+ * - #ASM_AAC_SBR_ON_PS_ON -- Turn on both SBR and PS processing,
+ * if they are present in the bitstream (default behavior).
+ * - All other values are invalid.
+ * Changes are applied to the next decoded frame.
+ */
+} __packed;
+
+#define ASM_PARAM_ID_AAC_DUAL_MONO_MAPPING                      0x00010C64
+
+/*	First single channel element in a dual mono bitstream.*/
+#define ASM_AAC_DUAL_MONO_MAP_SCE_1                                 (1)
+
+/*	Second single channel element in a dual mono bitstream.*/
+#define ASM_AAC_DUAL_MONO_MAP_SCE_2                                 (2)
+
+/* Structure for AAC decoder dual mono channel mapping. */
+
+
+struct asm_aac_dual_mono_mapping_param {
+	struct apr_hdr							hdr;
+	struct asm_stream_cmd_set_encdec_param	encdec;
+	u16    left_channel_sce;
+	u16    right_channel_sce;
+
+} __packed;
+
+#define ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2 0x00010DA4
+
+struct asm_stream_cmdrsp_get_pp_params_v2 {
+	u32                  status;
+} __packed;
+
+#define ASM_PARAM_ID_AC3_KARAOKE_MODE 0x00010D73
+
+/* Enumeration for both vocals in a karaoke stream.*/
+#define AC3_KARAOKE_MODE_NO_VOCAL     (0)
+
+/* Enumeration for only the left vocal in a karaoke stream.*/
+#define AC3_KARAOKE_MODE_LEFT_VOCAL   (1)
+
+/* Enumeration for only the right vocal in a karaoke stream.*/
+#define AC3_KARAOKE_MODE_RIGHT_VOCAL (2)
+
+/* Enumeration for both vocal channels in a karaoke stream.*/
+#define AC3_KARAOKE_MODE_BOTH_VOCAL             (3)
+#define ASM_PARAM_ID_AC3_DRC_MODE               0x00010D74
+/* Enumeration for the Custom Analog mode.*/
+#define AC3_DRC_MODE_CUSTOM_ANALOG              (0)
+
+/* Enumeration for the Custom Digital mode.*/
+#define AC3_DRC_MODE_CUSTOM_DIGITAL             (1)
+/* Enumeration for the Line Out mode (light compression).*/
+#define AC3_DRC_MODE_LINE_OUT  (2)
+
+/* Enumeration for the RF remodulation mode (heavy compression).*/
+#define AC3_DRC_MODE_RF_REMOD                         (3)
+#define ASM_PARAM_ID_AC3_DUAL_MONO_MODE               0x00010D75
+
+/* Enumeration for playing dual mono in stereo mode.*/
+#define AC3_DUAL_MONO_MODE_STEREO                     (0)
+
+/* Enumeration for playing left mono.*/
+#define AC3_DUAL_MONO_MODE_LEFT_MONO                  (1)
+
+/* Enumeration for playing right mono.*/
+#define AC3_DUAL_MONO_MODE_RIGHT_MONO                 (2)
+
+/* Enumeration for mixing both dual mono channels and playing them.*/
+#define AC3_DUAL_MONO_MODE_MIXED_MONO        (3)
+#define ASM_PARAM_ID_AC3_STEREO_DOWNMIX_MODE 0x00010D76
+
+/* Enumeration for using the Downmix mode indicated in the bitstream. */
+
+#define AC3_STEREO_DOWNMIX_MODE_AUTO_DETECT  (0)
+
+/* Enumeration for Surround Compatible mode (preserves the
+ * surround information).
+ */
+
+#define AC3_STEREO_DOWNMIX_MODE_LT_RT        (1)
+/* Enumeration for Mono Compatible mode (if the output is to be
+ * further downmixed to mono).
+ */
+
+#define AC3_STEREO_DOWNMIX_MODE_LO_RO (2)
+
+/* ID of the AC3 PCM scale factor parameter in the
+ * #ASM_STREAM_CMD_SET_ENCDEC_PARAM command.
+ */
+#define ASM_PARAM_ID_AC3_PCM_SCALEFACTOR 0x00010D78
+
+/* ID of the AC3 DRC boost scale factor parameter in the
+ * #ASM_STREAM_CMD_SET_ENCDEC_PARAM command.
+ */
+#define ASM_PARAM_ID_AC3_DRC_BOOST_SCALEFACTOR 0x00010D79
+
+/* ID of the AC3 DRC cut scale factor parameter in the
+ * #ASM_STREAM_CMD_SET_ENCDEC_PARAM command.
+ */
+#define ASM_PARAM_ID_AC3_DRC_CUT_SCALEFACTOR 0x00010D7A
+
+/* Structure for AC3 Generic Parameter. */
+
+/*  Payload of the AC3 parameters in the
+ * #ASM_STREAM_CMD_SET_ENCDEC_PARAM command.
+ */
+struct asm_ac3_generic_param {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param  encdec;
+	struct asm_enc_cfg_blk_param_v2	encblk;
+	u32                  generic_parameter;
+/* AC3 generic parameter. Select from one of the following
+ * possible values.
+ *
+ * For #ASM_PARAM_ID_AC3_KARAOKE_MODE, supported values are:
+ * - AC3_KARAOKE_MODE_NO_VOCAL
+ * - AC3_KARAOKE_MODE_LEFT_VOCAL
+ * - AC3_KARAOKE_MODE_RIGHT_VOCAL
+ * - AC3_KARAOKE_MODE_BOTH_VOCAL
+ *
+ * For #ASM_PARAM_ID_AC3_DRC_MODE, supported values are:
+ * - AC3_DRC_MODE_CUSTOM_ANALOG
+ * - AC3_DRC_MODE_CUSTOM_DIGITAL
+ * - AC3_DRC_MODE_LINE_OUT
+ * - AC3_DRC_MODE_RF_REMOD
+ *
+ * For #ASM_PARAM_ID_AC3_DUAL_MONO_MODE, supported values are:
+ * - AC3_DUAL_MONO_MODE_STEREO
+ * - AC3_DUAL_MONO_MODE_LEFT_MONO
+ * - AC3_DUAL_MONO_MODE_RIGHT_MONO
+ * - AC3_DUAL_MONO_MODE_MIXED_MONO
+ *
+ * For #ASM_PARAM_ID_AC3_STEREO_DOWNMIX_MODE, supported values are:
+ * - AC3_STEREO_DOWNMIX_MODE_AUTO_DETECT
+ * - AC3_STEREO_DOWNMIX_MODE_LT_RT
+ * - AC3_STEREO_DOWNMIX_MODE_LO_RO
+ *
+ * For #ASM_PARAM_ID_AC3_PCM_SCALEFACTOR, supported values are
+ * 0 to 1 in Q31 format.
+ *
+ * For #ASM_PARAM_ID_AC3_DRC_BOOST_SCALEFACTOR, supported values are
+ * 0 to 1 in Q31 format.
+ *
+ * For #ASM_PARAM_ID_AC3_DRC_CUT_SCALEFACTOR, supported values are
+ * 0 to 1 in Q31 format.
+ */
+} __packed;
+
+/* Enumeration for Raw mode (no downmixing), which specifies
+ * that all channels in the bitstream are to be played out as is
+ * without any downmixing. (Default)
+ */
+
+#define WMAPRO_CHANNEL_MASK_RAW (-1)
+
+/* Enumeration for setting the channel mask to 0. The 7.1 mode
+ * (Home Theater) is assigned.
+ */
+
+
+#define WMAPRO_CHANNEL_MASK_ZERO 0x0000
+
+/* Speaker layout mask for one channel (Home Theater, mono).
+ * - Speaker front center
+ */
+#define WMAPRO_CHANNEL_MASK_1_C 0x0004
+
+/* Speaker layout mask for two channels (Home Theater, stereo).
+ * - Speaker front left
+ * - Speaker front right
+ */
+#define WMAPRO_CHANNEL_MASK_2_L_R 0x0003
+
+/* Speaker layout mask for three channels (Home Theater).
+ * - Speaker front left
+ * - Speaker front right
+ * - Speaker front center
+ */
+#define WMAPRO_CHANNEL_MASK_3_L_C_R 0x0007
+
+/* Speaker layout mask for two channels (stereo).
+ * - Speaker back left
+ * - Speaker back right
+ */
+#define WMAPRO_CHANNEL_MASK_2_Bl_Br  0x0030
+
+/* Speaker layout mask for four channels.
+ * - Speaker front left
+ * - Speaker front right
+ * - Speaker back left
+ * - Speaker back right
+*/
+#define WMAPRO_CHANNEL_MASK_4_L_R_Bl_Br 0x0033
+
+/* Speaker layout mask for four channels (Home Theater).
+ * - Speaker front left
+ * - Speaker front right
+ * - Speaker front center
+ * - Speaker back center
+*/
+#define WMAPRO_CHANNEL_MASK_4_L_R_C_Bc_HT 0x0107
+/* Speaker layout mask for five channels.
+ * - Speaker front left
+ * - Speaker front right
+ * - Speaker front center
+ * - Speaker back left
+ * - Speaker back right
+ */
+#define WMAPRO_CHANNEL_MASK_5_L_C_R_Bl_Br  0x0037
+
+/* Speaker layout mask for five channels (5 mode, Home Theater).
+ * - Speaker front left
+ * - Speaker front right
+ * - Speaker front center
+ * - Speaker side left
+ * - Speaker side right
+ */
+#define WMAPRO_CHANNEL_MASK_5_L_C_R_Sl_Sr_HT   0x0607
+/* Speaker layout mask for six channels (5.1 mode).
+ * - Speaker front left
+ * - Speaker front right
+ * - Speaker front center
+ * - Speaker low frequency
+ * - Speaker back left
+ * - Speaker back right
+ */
+#define WMAPRO_CHANNEL_MASK_5DOT1_L_C_R_Bl_Br_SLF  0x003F
+/* Speaker layout mask for six channels (5.1 mode, Home Theater).
+ * - Speaker front left
+ * - Speaker front right
+ * - Speaker front center
+ * - Speaker low frequency
+ * - Speaker side left
+ * - Speaker side right
+ */
+#define WMAPRO_CHANNEL_MASK_5DOT1_L_C_R_Sl_Sr_SLF_HT  0x060F
+/* Speaker layout mask for six channels (5.1 mode, no LFE).
+ * - Speaker front left
+ * - Speaker front right
+ * - Speaker front center
+ * - Speaker back left
+ * - Speaker back right
+ * - Speaker back center
+ */
+#define WMAPRO_CHANNEL_MASK_5DOT1_L_C_R_Bl_Br_Bc  0x0137
+/* Speaker layout mask for six channels (5.1 mode, Home Theater,
+  * no LFE).
+  * - Speaker front left
+  * - Speaker front right
+  * - Speaker front center
+  * - Speaker back center
+  * - Speaker side left
+  * - Speaker side right
+ */
+#define WMAPRO_CHANNEL_MASK_5DOT1_L_C_R_Sl_Sr_Bc_HT   0x0707
+
+/* Speaker layout mask for seven channels (6.1 mode).
+ * - Speaker front left
+ * - Speaker front right
+ * - Speaker front center
+ * - Speaker low frequency
+ * - Speaker back left
+ * - Speaker back right
+ * - Speaker back center
+ */
+#define WMAPRO_CHANNEL_MASK_6DOT1_L_C_R_Bl_Br_Bc_SLF   0x013F
+
+/* Speaker layout mask for seven channels (6.1 mode, Home
+  * Theater).
+  * - Speaker front left
+  * - Speaker front right
+  * - Speaker front center
+  * - Speaker low frequency
+  * - Speaker back center
+  * - Speaker side left
+  * - Speaker side right
+*/
+#define WMAPRO_CHANNEL_MASK_6DOT1_L_C_R_Sl_Sr_Bc_SLF_HT 0x070F
+
+/* Speaker layout mask for seven channels (6.1 mode, no LFE).
+ * - Speaker front left
+ * - Speaker front right
+ * - Speaker front center
+ * - Speaker back left
+ * - Speaker back right
+ * - Speaker front left of center
+ * - Speaker front right of center
+*/
+#define WMAPRO_CHANNEL_MASK_6DOT1_L_C_R_Bl_Br_SFLOC_SFROC   0x00F7
+
+/* Speaker layout mask for seven channels (6.1 mode, Home
+ * Theater, no LFE).
+ * - Speaker front left
+ * - Speaker front right
+ * - Speaker front center
+ * - Speaker side left
+ * - Speaker side right
+ * - Speaker front left of center
+ * - Speaker front right of center
+*/
+#define WMAPRO_CHANNEL_MASK_6DOT1_L_C_R_Sl_Sr_SFLOC_SFROC_HT 0x0637
+
+/* Speaker layout mask for eight channels (7.1 mode).
+ * - Speaker front left
+ * - Speaker front right
+ * - Speaker front center
+ * - Speaker back left
+ * - Speaker back right
+ * - Speaker low frequency
+ * - Speaker front left of center
+ * - Speaker front right of center
+ */
+#define WMAPRO_CHANNEL_MASK_7DOT1_L_C_R_Bl_Br_SLF_SFLOC_SFROC \
+					0x00FF
+
+/* Speaker layout mask for eight channels (7.1 mode, Home Theater).
+ * - Speaker front left
+ * - Speaker front right
+ * - Speaker front center
+ * - Speaker side left
+ * - Speaker side right
+ * - Speaker low frequency
+ * - Speaker front left of center
+ * - Speaker front right of center
+ *
+*/
+#define WMAPRO_CHANNEL_MASK_7DOT1_L_C_R_Sl_Sr_SLF_SFLOC_SFROC_HT \
+					0x063F
+
+#define ASM_PARAM_ID_DEC_OUTPUT_CHAN_MAP  0x00010D82
+
+/* Maximum number of decoder output channels.*/
+#define MAX_CHAN_MAP_CHANNELS  16
+
+#define MAX_CHAN_MAP_CHANNELS_V2 32
+
+/* Structure for decoder output channel mapping. */
+
+/* Payload of the #ASM_PARAM_ID_DEC_OUTPUT_CHAN_MAP parameter in the
+ * #ASM_STREAM_CMD_SET_ENCDEC_PARAM command.
+ */
+struct asm_dec_out_chan_map_param {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param  encdec;
+	u32                 num_channels;
+/* Number of decoder output channels.
+ * Supported values: 0 to #MAX_CHAN_MAP_CHANNELS
+ *
+ * A value of 0 indicates native channel mapping, which is valid
+ * only for NT mode. This means the output of the decoder is to be
+ * preserved as is.
+ */
+	u8                  channel_mapping[MAX_CHAN_MAP_CHANNELS];
+} __packed;
+
+/* Payload of the #ASM_PARAM_ID_DEC_OUTPUT_CHAN_MAP parameter in the
+ * #ASM_STREAM_CMD_SET_ENCDEC_PARAM command.
+ */
+struct asm_dec_out_chan_map_param_v2 {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param  encdec;
+	u32                 num_channels;
+/* Number of decoder output channels.
+ * Supported values: 0 to #MAX_CHAN_MAP_CHANNELS_V2
+ *
+ * A value of 0 indicates native channel mapping, which is valid
+ * only for NT mode. This means the output of the decoder is to be
+ * preserved as is.
+ */
+	u8                  channel_mapping[MAX_CHAN_MAP_CHANNELS_V2];
+} __packed;
+
+#define ASM_STREAM_CMD_OPEN_WRITE_COMPRESSED  0x00010D84
+
+/* Bitmask for the IEC 61937 enable flag.*/
+#define ASM_BIT_MASK_IEC_61937_STREAM_FLAG   (0x00000001UL)
+
+/* Shift value for the IEC 61937 enable flag.*/
+#define ASM_SHIFT_IEC_61937_STREAM_FLAG  0
+
+/* Bitmask for the IEC 60958 enable flag.*/
+#define ASM_BIT_MASK_IEC_60958_STREAM_FLAG   (0x00000002UL)
+
+/* Shift value for the IEC 60958 enable flag.*/
+#define ASM_SHIFT_IEC_60958_STREAM_FLAG   1
+
+/* Payload format for open write compressed comand */
+
+/* Payload format for the #ASM_STREAM_CMD_OPEN_WRITE_COMPRESSED
+ * comand, which opens a stream for a given session ID and stream ID
+ * to be rendered in the compressed format.
+ */
+
+struct asm_stream_cmd_open_write_compressed {
+	struct apr_hdr hdr;
+	u32                    flags;
+/* Mode flags that configure the stream for a specific format.
+ * Supported values:
+ * - Bit 0 -- IEC 61937 compatibility
+ *   - 0 -- Stream is not in IEC 61937 format
+ *   - 1 -- Stream is in IEC 61937 format
+ * - Bit 1 -- IEC 60958 compatibility
+ *   - 0 -- Stream is not in IEC 60958 format
+ *   - 1 -- Stream is in IEC 60958 format
+ * - Bits 2 to 31 -- 0 (Reserved)
+ *
+ * For the same stream, bit 0 cannot be set to 0 and bit 1 cannot
+ * be set to 1. A compressed stream connot have IEC 60958
+ * packetization applied without IEC 61937 packetization.
+ * @note1hang Currently, IEC 60958 packetized input streams are not
+ * supported.
+ */
+
+
+	u32                    fmt_id;
+/* Specifies the media type of the HDMI stream to be opened.
+ * Supported values:
+ * - #ASM_MEDIA_FMT_AC3
+ * - #ASM_MEDIA_FMT_EAC3
+ * - #ASM_MEDIA_FMT_DTS
+ * - #ASM_MEDIA_FMT_ATRAC
+ * - #ASM_MEDIA_FMT_MAT
+ *
+ * @note1hang This field must be set to a valid media type even if
+ * IEC 61937 packetization is not performed by the aDSP.
+ */
+
+} __packed;
+
+
+/*
+    Indicates the number of samples per channel to be removed from the
+    beginning of the stream.
+*/
+#define ASM_DATA_CMD_REMOVE_INITIAL_SILENCE 0x00010D67
+/*
+    Indicates the number of samples per channel to be removed from
+    the end of the stream.
+*/
+#define ASM_DATA_CMD_REMOVE_TRAILING_SILENCE 0x00010D68
+struct asm_data_cmd_remove_silence {
+	struct apr_hdr hdr;
+	u32	num_samples_to_remove;
+	/**< Number of samples per channel to be removed.
+
+	   @values 0 to (2@sscr{32}-1) */
+} __packed;
+
+#define ASM_STREAM_CMD_OPEN_READ_COMPRESSED                        0x00010D95
+
+struct asm_stream_cmd_open_read_compressed {
+	struct apr_hdr hdr;
+	u32                    mode_flags;
+/* Mode flags that indicate whether meta information per encoded
+ * frame is to be provided.
+ * Supported values for bit 4:
+ * - 0 -- Return data buffer contains all encoded frames only; it does
+ *      not contain frame metadata.
+ * - 1 -- Return data buffer contains an array of metadata and encoded
+ *      frames.
+ * - Use #ASM_BIT_MASK_META_INFO_FLAG to set the bitmask and
+ * #ASM_SHIFT_META_INFO_FLAG to set the shift value for this bit.
+ * All other bits are reserved; clients must set them to zero.
+ */
+
+	u32                    frames_per_buf;
+/* Indicates the number of frames that need to be returned per
+ * read buffer
+ * Supported values: should be greater than 0
+ */
+
+} __packed;
+
+/* adsp_asm_stream_commands.h*/
+
+
+/* adsp_asm_api.h (no changes)*/
+#define ASM_STREAM_POSTPROCOPO_ID_DEFAULT \
+								0x00010BE4
+#define ASM_STREAM_POSTPROCOPO_ID_PEAKMETER \
+								0x00010D83
+#define ASM_STREAM_POSTPROCOPO_ID_NONE \
+								0x00010C68
+#define ASM_STREAM_POSTPROCOPO_ID_MCH_PEAK_VOL \
+								0x00010D8B
+#define ASM_STREAM_PREPROCOPO_ID_DEFAULT \
+			ASM_STREAM_POSTPROCOPO_ID_DEFAULT
+#define ASM_STREAM_PREPROCOPO_ID_NONE \
+			ASM_STREAM_POSTPROCOPO_ID_NONE
+#define ADM_CMD_COPP_OPENOPOLOGY_ID_NONE_AUDIO_COPP \
+			0x00010312
+#define ADM_CMD_COPP_OPENOPOLOGY_ID_SPEAKER_MONO_AUDIO_COPP \
+								0x00010313
+#define ADM_CMD_COPP_OPENOPOLOGY_ID_SPEAKER_STEREO_AUDIO_COPP \
+								0x00010314
+#define ADM_CMD_COPP_OPENOPOLOGY_ID_SPEAKER_STEREO_IIR_AUDIO_COPP\
+								0x00010704
+#define ADM_CMD_COPP_OPENOPOLOGY_ID_SPEAKER_MONO_AUDIO_COPP_MBDRCV2\
+								0x0001070D
+#define ADM_CMD_COPP_OPENOPOLOGY_ID_SPEAKER_STEREO_AUDIO_COPP_MBDRCV2\
+								0x0001070E
+#define ADM_CMD_COPP_OPENOPOLOGY_ID_SPEAKER_STEREO_IIR_AUDIO_COPP_MBDRCV2\
+								0x0001070F
+#define ADM_CMD_COPP_OPENOPOLOGY_ID_SPEAKER_STEREO_AUDIO_COPP_MBDRC_V3 \
+								0x11000000
+#define ADM_CMD_COPP_OPENOPOLOGY_ID_SPEAKER_MCH_PEAK_VOL \
+								0x0001031B
+#define ADM_CMD_COPP_OPENOPOLOGY_ID_MIC_MONO_AUDIO_COPP  0x00010315
+#define ADM_CMD_COPP_OPENOPOLOGY_ID_MIC_STEREO_AUDIO_COPP 0x00010316
+#define AUDPROC_COPPOPOLOGY_ID_MCHAN_IIR_AUDIO           0x00010715
+#define ADM_CMD_COPP_OPENOPOLOGY_ID_DEFAULT_AUDIO_COPP   0x00010BE3
+#define ADM_CMD_COPP_OPENOPOLOGY_ID_PEAKMETER_AUDIO_COPP 0x00010317
+#define AUDPROC_MODULE_ID_AIG   0x00010716
+#define AUDPROC_PARAM_ID_AIG_ENABLE		0x00010717
+#define AUDPROC_PARAM_ID_AIG_CONFIG		0x00010718
+
+struct Audio_AigParam {
+	uint16_t	mode;
+/*< Mode word for enabling AIG/SIG mode .
+ * Byte offset: 0
+ */
+	int16_t		staticGainL16Q12;
+/*< Static input gain when aigMode is set to 1.
+ * Byte offset: 2
+ */
+	int16_t		initialGainDBL16Q7;
+/*<Initial value that the adaptive gain update starts from dB
+ * Q7 Byte offset: 4
+ */
+	int16_t		idealRMSDBL16Q7;
+/*<Average RMS level that AIG attempts to achieve Q8.7
+ * Byte offset: 6
+ */
+	int32_t		noiseGateL32;
+/*Threshold below which signal is considered as noise and AIG
+ * Byte offset: 8
+ */
+	int32_t		minGainL32Q15;
+/*Minimum gain that can be provided by AIG Q16.15
+ * Byte offset: 12
+ */
+	int32_t		maxGainL32Q15;
+/*Maximum gain that can be provided by AIG Q16.15
+ * Byte offset: 16
+ */
+	uint32_t		gainAtRtUL32Q31;
+/*Attack/release time for AIG update Q1.31
+ * Byte offset: 20
+ */
+	uint32_t		longGainAtRtUL32Q31;
+/*Long attack/release time while updating gain for
+ * noise/silence Q1.31 Byte offset: 24
+ */
+
+	uint32_t		rmsTavUL32Q32;
+/* RMS smoothing time constant used for long-term RMS estimate
+ * Q0.32 Byte offset: 28
+ */
+
+	uint32_t		gainUpdateStartTimMsUL32Q0;
+/* The waiting time before which AIG starts to apply adaptive
+ * gain update Q32.0 Byte offset: 32
+ */
+
+} __packed;
+
+
+#define ADM_MODULE_ID_EANS                            0x00010C4A
+#define ADM_PARAM_ID_EANS_ENABLE                      0x00010C4B
+#define ADM_PARAM_ID_EANS_PARAMS                      0x00010C4C
+
+struct adm_eans_enable {
+
+	uint32_t                  enable_flag;
+/*< Specifies whether EANS is disabled (0) or enabled
+ * (nonzero).
+ * This is supported only for sampling rates of 8, 12, 16, 24, 32,
+ * and 48 kHz. It is not supported for sampling rates of 11.025,
+ * 22.05, or 44.1 kHz.
+ */
+
+} __packed;
+
+
+struct adm_eans_params {
+	int16_t                         eans_mode;
+/*< Mode word for enabling/disabling submodules.
+ * Byte offset: 0
+ */
+
+	int16_t                         eans_input_gain;
+/*< Q2.13 input gain to the EANS module.
+ * Byte offset: 2
+ */
+
+	int16_t                         eans_output_gain;
+/*< Q2.13 output gain to the EANS module.
+ * Byte offset: 4
+ */
+
+	int16_t                         eansarget_ns;
+/*< Target noise suppression level in dB.
+ * Byte offset: 6
+ */
+
+	int16_t                         eans_s_alpha;
+/*< Q3.12 over-subtraction factor for stationary noise
+ * suppression.
+ * Byte offset: 8
+ */
+
+	int16_t                         eans_n_alpha;
+/* < Q3.12 over-subtraction factor for nonstationary noise
+ * suppression.
+ * Byte offset: 10
+ */
+
+	int16_t                         eans_n_alphamax;
+/*< Q3.12 maximum over-subtraction factor for nonstationary
+ * noise suppression.
+ * Byte offset: 12
+ */
+	int16_t                         eans_e_alpha;
+/*< Q15 scaling factor for excess noise suppression.
+ * Byte offset: 14
+ */
+
+	int16_t                         eans_ns_snrmax;
+/*< Upper boundary in dB for SNR estimation.
+ * Byte offset: 16
+ */
+
+	int16_t                         eans_sns_block;
+/*< Quarter block size for stationary noise suppression.
+ * Byte offset: 18
+ */
+
+	int16_t                         eans_ns_i;
+/*< Initialization block size for noise suppression.
+ * Byte offset: 20
+ */
+	int16_t                         eans_np_scale;
+/*< Power scale factor for nonstationary noise update.
+ * Byte offset: 22
+ */
+
+	int16_t                         eans_n_lambda;
+/*< Smoothing factor for higher level nonstationary noise
+ * update.
+ * Byte offset: 24
+ */
+
+	int16_t                         eans_n_lambdaf;
+/*< Medium averaging factor for noise update.
+ * Byte offset: 26
+ */
+
+	int16_t                         eans_gs_bias;
+/*< Bias factor in dB for gain calculation.
+ * Byte offset: 28
+ */
+
+	int16_t                         eans_gs_max;
+/*< SNR lower boundary in dB for aggressive gain calculation.
+ * Byte offset: 30
+ */
+
+	int16_t                         eans_s_alpha_hb;
+/*< Q3.12 over-subtraction factor for high-band stationary
+ * noise suppression.
+ * Byte offset: 32
+ */
+
+	int16_t                         eans_n_alphamax_hb;
+/*< Q3.12 maximum over-subtraction factor for high-band
+ * nonstationary noise suppression.
+ * Byte offset: 34
+ */
+
+	int16_t                         eans_e_alpha_hb;
+/*< Q15 scaling factor for high-band excess noise suppression.
+ * Byte offset: 36
+ */
+
+	int16_t                         eans_n_lambda0;
+/*< Smoothing factor for nonstationary noise update during
+ * speech activity.
+ * Byte offset: 38
+ */
+
+	int16_t                         thresh;
+/*< Threshold for generating a binary VAD decision.
+ * Byte offset: 40
+ */
+
+	int16_t                         pwr_scale;
+/*< Indirect lower boundary of the noise level estimate.
+ * Byte offset: 42
+ */
+
+	int16_t                         hangover_max;
+/*< Avoids mid-speech clipping and reliably detects weak speech
+ * bursts at the end of speech activity.
+ * Byte offset: 44
+ */
+
+	int16_t                         alpha_snr;
+/*< Controls responsiveness of the VAD.
+ * Byte offset: 46
+ */
+
+	int16_t                         snr_diff_max;
+/*< Maximum SNR difference. Decreasing this parameter value may
+ * help in making correct decisions during abrupt changes; however,
+ * decreasing too much may increase false alarms during long
+ * pauses/silences.
+ * Byte offset: 48
+ */
+
+	int16_t                         snr_diff_min;
+/*< Minimum SNR difference. Decreasing this parameter value may
+ * help in making correct decisions during abrupt changes; however,
+ * decreasing too much may increase false alarms during long
+ * pauses/silences.
+ * Byte offset: 50
+ */
+
+	int16_t                         init_length;
+/*< Defines the number of frames for which a noise level
+ * estimate is set to a fixed value.
+ * Byte offset: 52
+ */
+
+	int16_t                         max_val;
+/*< Defines the upper limit of the noise level.
+ * Byte offset: 54
+ */
+
+	int16_t                         init_bound;
+/*< Defines the initial bounding value for the noise level
+ * estimate. This is used during the initial segment defined by the
+ * init_length parameter.
+ * Byte offset: 56
+ */
+
+	int16_t                         reset_bound;
+/*< Reset boundary for noise tracking.
+ * Byte offset: 58
+ */
+
+	int16_t                         avar_scale;
+/*< Defines the bias factor in noise estimation.
+ * Byte offset: 60
+ */
+
+	int16_t                         sub_nc;
+/*< Defines the window length for noise estimation.
+ * Byte offset: 62
+ */
+
+	int16_t                         spow_min;
+/*< Defines the minimum signal power required to update the
+ * boundaries for the noise floor estimate.
+ * Byte offset: 64
+ */
+
+	int16_t                         eans_gs_fast;
+/*< Fast smoothing factor for postprocessor gain.
+ * Byte offset: 66
+ */
+
+	int16_t                         eans_gs_med;
+/*< Medium smoothing factor for postprocessor gain.
+ * Byte offset: 68
+ */
+
+	int16_t                         eans_gs_slow;
+/*< Slow smoothing factor for postprocessor gain.
+ * Byte offset: 70
+ */
+
+	int16_t                         eans_swb_salpha;
+/*< Q3.12 super wideband aggressiveness factor for stationary
+ * noise suppression.
+ * Byte offset: 72
+ */
+
+	int16_t                         eans_swb_nalpha;
+/*< Q3.12 super wideband aggressiveness factor for
+ * nonstationary noise suppression.
+ * Byte offset: 74
+ */
+} __packed;
+#define ADM_MODULE_IDX_MIC_GAIN_CTRL   0x00010C35
+
+/* @addtogroup audio_pp_param_ids
+ * ID of the Tx mic gain control parameter used by the
+ * #ADM_MODULE_IDX_MIC_GAIN_CTRL module.
+ * @messagepayload
+ * @structure{admx_mic_gain}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ADM_PARAM_IDX_MIC_GAIN.tex}
+ */
+#define ADM_PARAM_IDX_MIC_GAIN       0x00010C36
+
+/* Structure for a Tx mic gain parameter for the mic gain
+ * control module.
+ */
+
+
+/* @brief Payload of the #ADM_PARAM_IDX_MIC_GAIN parameter in the
+ * Tx Mic Gain Control module.
+ */
+struct admx_mic_gain {
+	uint16_t                  tx_mic_gain;
+	/*< Linear gain in Q13 format. */
+
+	uint16_t                  reserved;
+	/*< Clients must set this field to zero. */
+} __packed;
+
+struct adm_set_mic_gain_params {
+	struct adm_cmd_set_pp_params_v5 params;
+	struct adm_param_data_v5 data;
+	struct admx_mic_gain mic_gain_data;
+} __packed;
+
+/* end_addtogroup audio_pp_param_ids */
+
+/* @ingroup audio_pp_module_ids
+ * ID of the Rx Codec Gain Control module.
+ *
+ * This module supports the following parameter ID:
+ * - #ADM_PARAM_ID_RX_CODEC_GAIN
+ */
+#define ADM_MODULE_ID_RX_CODEC_GAIN_CTRL       0x00010C37
+
+/* @addtogroup audio_pp_param_ids
+ * ID of the Rx codec gain control parameter used by the
+ * #ADM_MODULE_ID_RX_CODEC_GAIN_CTRL module.
+ *
+ * @messagepayload
+ * @structure{adm_rx_codec_gain}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ADM_PARAM_ID_RX_CODEC_GAIN.tex}
+*/
+#define ADM_PARAM_ID_RX_CODEC_GAIN   0x00010C38
+
+/* Structure for the Rx common codec gain control module. */
+
+
+/* @brief Payload of the #ADM_PARAM_ID_RX_CODEC_GAIN parameter
+ * in the Rx Codec Gain Control module.
+ */
+
+
+struct adm_rx_codec_gain {
+	uint16_t                  rx_codec_gain;
+	/*< Linear gain in Q13 format. */
+
+	uint16_t                  reserved;
+	/*< Clients must set this field to zero.*/
+} __packed;
+
+/* end_addtogroup audio_pp_param_ids */
+
+/* @ingroup audio_pp_module_ids
+ * ID of the HPF Tuning Filter module on the Tx path.
+ * This module supports the following parameter IDs:
+ * - #ADM_PARAM_ID_HPF_IIRX_FILTER_ENABLE_CONFIG
+ * - #ADM_PARAM_ID_HPF_IIRX_FILTER_PRE_GAIN
+ * - #ADM_PARAM_ID_HPF_IIRX_FILTER_CONFIG_PARAMS
+ */
+#define ADM_MODULE_ID_HPF_IIRX_FILTER    0x00010C3D
+
+/* @addtogroup audio_pp_param_ids */
+/* ID of the Tx HPF IIR filter enable parameter used by the
+ * #ADM_MODULE_ID_HPF_IIRX_FILTER module.
+ * @parspace Message payload
+ * @structure{adm_hpfx_iir_filter_enable_cfg}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ADM_PARAM_ID_HPF_IIRX_FILTER_ENABLE_CONFIG.tex}
+ */
+#define ADM_PARAM_ID_HPF_IIRX_FILTER_ENABLE_CONFIG   0x00010C3E
+
+/* ID of the Tx HPF IIR filter pregain parameter used by the
+ * #ADM_MODULE_ID_HPF_IIRX_FILTER module.
+ * @parspace Message payload
+ * @structure{adm_hpfx_iir_filter_pre_gain}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ADM_PARAM_ID_HPF_IIRX_FILTER_PRE_GAIN.tex}
+ */
+#define ADM_PARAM_ID_HPF_IIRX_FILTER_PRE_GAIN   0x00010C3F
+
+/* ID of the Tx HPF IIR filter configuration parameters used by the
+ * #ADM_MODULE_ID_HPF_IIRX_FILTER module.
+ * @parspace Message payload
+ * @structure{adm_hpfx_iir_filter_cfg_params}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ADM_PARAM_ID_HPF_IIRX_FILTER_CONFIG_PA
+ * RAMS.tex}
+ */
+#define ADM_PARAM_ID_HPF_IIRX_FILTER_CONFIG_PARAMS  0x00010C40
+
+/* Structure for enabling a configuration parameter for
+ * the HPF IIR tuning filter module on the Tx path.
+ */
+
+/* @brief Payload of the #ADM_PARAM_ID_HPF_IIRX_FILTER_ENABLE_CONFIG
+ * parameter in the Tx path HPF Tuning Filter module.
+ */
+struct adm_hpfx_iir_filter_enable_cfg {
+	uint32_t                  enable_flag;
+/*< Specifies whether the HPF tuning filter is disabled (0) or
+ * enabled (nonzero).
+ */
+} __packed;
+
+
+/* Structure for the pregain parameter for the HPF
+	IIR tuning filter module on the Tx path. */
+
+
+/* @brief Payload of the #ADM_PARAM_ID_HPF_IIRX_FILTER_PRE_GAIN parameter
+ * in the Tx path HPF Tuning Filter module.
+ */
+struct adm_hpfx_iir_filter_pre_gain {
+	uint16_t                  pre_gain;
+	/*< Linear gain in Q13 format. */
+
+	uint16_t                  reserved;
+	/*< Clients must set this field to zero.*/
+} __packed;
+
+
+/* Structure for the configuration parameter for the
+	HPF IIR tuning filter module on the Tx path. */
+
+
+/* @brief Payload of the #ADM_PARAM_ID_HPF_IIRX_FILTER_CONFIG_PARAMS
+ * parameters in the Tx path HPF Tuning Filter module. \n
+ * \n
+ * This structure is followed by tuning filter coefficients as follows: \n
+ * - Sequence of int32_t FilterCoeffs.
+ * Each band has five coefficients, each in int32_t format in the order of
+ * b0, b1, b2, a1, a2.
+ * - Sequence of int16_t NumShiftFactor.
+ * One int16_t per band. The numerator shift factor is related to the Q
+ * factor of the filter coefficients.
+ * - Sequence of uint16_t PanSetting.
+ * One uint16_t for each band to indicate application of the filter to
+ * left (0), right (1), or both (2) channels.
+ */
+struct adm_hpfx_iir_filter_cfg_params {
+	uint16_t                  num_biquad_stages;
+/*< Number of bands.
+ * Supported values: 0 to 20
+ */
+
+	uint16_t                  reserved;
+	/*< Clients must set this field to zero.*/
+} __packed;
+
+/* end_addtogroup audio_pp_module_ids */
+
+/* @addtogroup audio_pp_module_ids */
+/* ID of the Tx path IIR Tuning Filter module.
+ *	This module supports the following parameter IDs:
+ *	- #ADM_PARAM_IDX_IIR_FILTER_ENABLE_CONFIG
+ */
+#define ADM_MODULE_IDX_IIR_FILTER 0x00010C41
+
+/* ID of the Rx path IIR Tuning Filter module for the left channel.
+ *	The parameter IDs of the IIR tuning filter module
+ *	(#ASM_MODULE_ID_IIRUNING_FILTER) are used for the left IIR Rx tuning
+ *	filter.
+ *
+ * Pan parameters are not required for this per-channel IIR filter; the pan
+ * parameters are ignored by this module.
+ */
+#define ADM_MODULE_ID_LEFT_IIRUNING_FILTER      0x00010705
+
+/* ID of the the Rx path IIR Tuning Filter module for the right
+ * channel.
+ * The parameter IDs of the IIR tuning filter module
+ * (#ASM_MODULE_ID_IIRUNING_FILTER) are used for the right IIR Rx
+ * tuning filter.
+ *
+ * Pan parameters are not required for this per-channel IIR filter;
+ * the pan parameters are ignored by this module.
+ */
+#define ADM_MODULE_ID_RIGHT_IIRUNING_FILTER    0x00010706
+
+/* end_addtogroup audio_pp_module_ids */
+
+/* @addtogroup audio_pp_param_ids */
+
+/* ID of the Tx IIR filter enable parameter used by the
+ * #ADM_MODULE_IDX_IIR_FILTER module.
+ * @parspace Message payload
+ * @structure{admx_iir_filter_enable_cfg}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ADM_PARAM_IDX_IIR_FILTER_ENABLE_CONFIG.tex}
+ */
+#define ADM_PARAM_IDX_IIR_FILTER_ENABLE_CONFIG   0x00010C42
+
+/* ID of the Tx IIR filter pregain parameter used by the
+ * #ADM_MODULE_IDX_IIR_FILTER module.
+ * @parspace Message payload
+ * @structure{admx_iir_filter_pre_gain}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ADM_PARAM_IDX_IIR_FILTER_PRE_GAIN.tex}
+ */
+#define ADM_PARAM_IDX_IIR_FILTER_PRE_GAIN    0x00010C43
+
+/* ID of the Tx IIR filter configuration parameters used by the
+ * #ADM_MODULE_IDX_IIR_FILTER module.
+ * @parspace Message payload
+ * @structure{admx_iir_filter_cfg_params}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ADM_PARAM_IDX_IIR_FILTER_CONFIG_PARAMS.tex}
+ */
+#define ADM_PARAM_IDX_IIR_FILTER_CONFIG_PARAMS     0x00010C44
+
+/* Structure for enabling the configuration parameter for the
+ * IIR filter module on the Tx path.
+ */
+
+/* @brief Payload of the #ADM_PARAM_IDX_IIR_FILTER_ENABLE_CONFIG
+ * parameter in the Tx Path IIR Tuning Filter module.
+ */
+
+struct admx_iir_filter_enable_cfg {
+	uint32_t                  enable_flag;
+/*< Specifies whether the IIR tuning filter is disabled (0) or
+ * enabled (nonzero).
+ */
+
+} __packed;
+
+
+/* Structure for the pregain parameter for the
+ * IIR filter module on the Tx path.
+ */
+
+
+/* @brief Payload of the #ADM_PARAM_IDX_IIR_FILTER_PRE_GAIN
+ * parameter in the Tx Path IIR Tuning Filter module.
+ */
+
+struct admx_iir_filter_pre_gain {
+	uint16_t                  pre_gain;
+	/*< Linear gain in Q13 format. */
+
+	uint16_t                  reserved;
+	/*< Clients must set this field to zero.*/
+} __packed;
+
+
+/* Structure for the configuration parameter for the
+ * IIR filter module on the Tx path.
+ */
+
+
+/* @brief Payload of the #ADM_PARAM_IDX_IIR_FILTER_CONFIG_PARAMS
+ * parameter in the Tx Path IIR Tuning Filter module. \n
+ *	\n
+ * This structure is followed by the HPF IIR filter coefficients on
+ * the Tx path as follows: \n
+ * - Sequence of int32_t ulFilterCoeffs. Each band has five
+ * coefficients, each in int32_t format in the order of b0, b1, b2,
+ * a1, a2.
+ * - Sequence of int16_t sNumShiftFactor. One int16_t per band. The
+ * numerator shift factor is related to the Q factor of the filter
+ * coefficients.
+ * - Sequence of uint16_t usPanSetting. One uint16_t for each band
+ * to indicate if the filter is applied to left (0), right (1), or
+ * both (2) channels.
+ */
+struct admx_iir_filter_cfg_params {
+	uint16_t                  num_biquad_stages;
+/*< Number of bands.
+ * Supported values: 0 to 20
+ */
+
+	uint16_t                  reserved;
+	/*< Clients must set this field to zero.*/
+} __packed;
+
+/* end_addtogroup audio_pp_module_ids */
+
+/* @ingroup audio_pp_module_ids
+ *	ID of the QEnsemble module.
+ *	This module supports the following parameter IDs:
+ *	- #ADM_PARAM_ID_QENSEMBLE_ENABLE
+ *	- #ADM_PARAM_ID_QENSEMBLE_BACKGAIN
+ *	- #ADM_PARAM_ID_QENSEMBLE_SET_NEW_ANGLE
+ */
+#define ADM_MODULE_ID_QENSEMBLE    0x00010C59
+
+/* @addtogroup audio_pp_param_ids */
+/* ID of the QEnsemble enable parameter used by the
+ * #ADM_MODULE_ID_QENSEMBLE module.
+ * @messagepayload
+ * @structure{adm_qensemble_enable}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ADM_PARAM_ID_QENSEMBLE_ENABLE.tex}
+ */
+#define ADM_PARAM_ID_QENSEMBLE_ENABLE   0x00010C60
+
+/* ID of the QEnsemble back gain parameter used by the
+ * #ADM_MODULE_ID_QENSEMBLE module.
+ * @messagepayload
+ * @structure{adm_qensemble_param_backgain}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ADM_PARAM_ID_QENSEMBLE_BACKGAIN.tex}
+ */
+#define ADM_PARAM_ID_QENSEMBLE_BACKGAIN   0x00010C61
+
+/* ID of the QEnsemble new angle parameter used by the
+ * #ADM_MODULE_ID_QENSEMBLE module.
+ * @messagepayload
+ * @structure{adm_qensemble_param_set_new_angle}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ADM_PARAM_ID_QENSEMBLE_SET_NEW_ANGLE.tex}
+ */
+#define ADM_PARAM_ID_QENSEMBLE_SET_NEW_ANGLE    0x00010C62
+
+/* Structure for enabling the configuration parameter for the
+ * QEnsemble module.
+ */
+
+
+/* @brief Payload of the #ADM_PARAM_ID_QENSEMBLE_ENABLE
+ * parameter used by the QEnsemble module.
+ */
+struct adm_qensemble_enable {
+	uint32_t                  enable_flag;
+/*< Specifies whether the QEnsemble module is disabled (0) or enabled
+ * (nonzero).
+ */
+} __packed;
+
+
+/* Structure for the background gain for the QEnsemble module. */
+
+
+/* @brief Payload of the #ADM_PARAM_ID_QENSEMBLE_BACKGAIN
+ * parameter used by
+ * the QEnsemble module.
+ */
+struct adm_qensemble_param_backgain {
+	int16_t                  back_gain;
+/*< Linear gain in Q15 format.
+ * Supported values: 0 to 32767
+ */
+
+	uint16_t                 reserved;
+	/*< Clients must set this field to zero.*/
+} __packed;
+/* Structure for setting a new angle for the QEnsemble module. */
+
+
+/* @brief Payload of the #ADM_PARAM_ID_QENSEMBLE_SET_NEW_ANGLE
+ * parameter used
+ * by the QEnsemble module.
+ */
+struct adm_qensemble_param_set_new_angle {
+	int16_t                    new_angle;
+/*< New angle in degrees.
+ * Supported values: 0 to 359
+ */
+
+	int16_t                    time_ms;
+/*< Transition time in milliseconds to set the new angle.
+ * Supported values: 0 to 32767
+ */
+} __packed;
+
+
+#define ADM_CMD_GET_PP_TOPO_MODULE_LIST				0x00010349
+#define ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST			0x00010350
+#define AUDPROC_PARAM_ID_ENABLE					0x00010904
+ /*
+  * Payload of the ADM_CMD_GET_PP_TOPO_MODULE_LIST command.
+  */
+struct adm_cmd_get_pp_topo_module_list_t {
+	struct apr_hdr hdr;
+	/* Lower 32 bits of the 64-bit parameter data payload address. */
+	uint32_t                  data_payload_addr_lsw;
+	/*
+	 * Upper 32 bits of the 64-bit parameter data payload address.
+	 *
+	 *
+	 * The size of the shared memory, if specified, must be large enough to
+	 * contain the entire parameter data payload, including the module ID,
+	 * parameter ID, parameter size, and parameter values.
+	 */
+	uint32_t                  data_payload_addr_msw;
+	/*
+	 *  Unique identifier for an address.
+	 *
+	 * This memory map handle is returned by the aDSP through the
+	 * #ADM_CMD_SHARED_MEM_MAP_REGIONS command.
+	 *
+	 * @values
+	 * - Non-NULL -- On acknowledgment, the parameter data payloads begin at
+	 * the address specified (out-of-band)
+	 * - NULL -- The acknowledgment's payload contains the parameter data
+	 * (in-band) @tablebulletend
+	 */
+	uint32_t                  mem_map_handle;
+	/*
+	 * Maximum data size of the list of modules. This
+	 * field is a multiple of 4 bytes.
+	 */
+	uint16_t                  param_max_size;
+	/* This field must be set to zero. */
+	uint16_t                  reserved;
+} __packed;
+
+/*
+ * Payload of the ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST message, which returns
+ * module ids in response to an ADM_CMD_GET_PP_TOPO_MODULE_LIST command.
+ * Immediately following this structure is the acknowledgement <b>module id
+ * data variable payload</b> containing the pre/postprocessing module id
+ * values. For an in-band scenario, the variable payload depends on the size
+ * of the parameter.
+ */
+struct adm_cmd_rsp_get_pp_topo_module_list_t {
+	/* Status message (error code). */
+	uint32_t                  status;
+} __packed;
+
+struct audproc_topology_module_id_info_t {
+	uint32_t	num_modules;
+} __packed;
+
+/* end_addtogroup audio_pp_module_ids */
+
+/* @ingroup audio_pp_module_ids
+ * ID of the Volume Control module pre/postprocessing block.
+ * This module supports the following parameter IDs:
+ * - #ASM_PARAM_ID_VOL_CTRL_MASTER_GAIN
+ * - #ASM_PARAM_ID_MULTICHANNEL_GAIN
+ * - #ASM_PARAM_ID_VOL_CTRL_MUTE_CONFIG
+ * - #ASM_PARAM_ID_SOFT_VOL_STEPPING_PARAMETERS
+ * - #ASM_PARAM_ID_SOFT_PAUSE_PARAMETERS
+ * - #ASM_PARAM_ID_MULTICHANNEL_GAIN
+ * - #ASM_PARAM_ID_MULTICHANNEL_MUTE
+ */
+#define ASM_MODULE_ID_VOL_CTRL   0x00010BFE
+#define ASM_MODULE_ID_VOL_CTRL2  0x00010910
+#define AUDPROC_MODULE_ID_VOL_CTRL ASM_MODULE_ID_VOL_CTRL
+
+/* @addtogroup audio_pp_param_ids */
+/* ID of the master gain parameter used by the #ASM_MODULE_ID_VOL_CTRL
+ * module.
+ * @messagepayload
+ * @structure{asm_volume_ctrl_master_gain}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ASM_PARAM_ID_VOL_CTRL_MASTER_GAIN.tex}
+ */
+#define ASM_PARAM_ID_VOL_CTRL_MASTER_GAIN    0x00010BFF
+#define AUDPROC_PARAM_ID_VOL_CTRL_MASTER_GAIN ASM_PARAM_ID_VOL_CTRL_MASTER_GAIN
+
+/* ID of the left/right channel gain parameter used by the
+ * #ASM_MODULE_ID_VOL_CTRL module.
+ * @messagepayload
+ * @structure{asm_volume_ctrl_lr_chan_gain}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ASM_PARAM_ID_MULTICHANNEL_GAIN.tex}
+ */
+#define ASM_PARAM_ID_VOL_CTRL_LR_CHANNEL_GAIN     0x00010C00
+
+/* ID of the mute configuration parameter used by the
+ * #ASM_MODULE_ID_VOL_CTRL module.
+ * @messagepayload
+ * @structure{asm_volume_ctrl_mute_config}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ASM_PARAM_ID_VOL_CTRL_MUTE_CONFIG.tex}
+ */
+#define ASM_PARAM_ID_VOL_CTRL_MUTE_CONFIG   0x00010C01
+
+/* ID of the soft stepping volume parameters used by the
+ * #ASM_MODULE_ID_VOL_CTRL module.
+ * @messagepayload
+ * @structure{asm_soft_step_volume_params}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ASM_PARAM_ID_SOFT_VOL_STEPPING_PARAMET
+ * ERS.tex}
+ */
+#define ASM_PARAM_ID_SOFT_VOL_STEPPING_PARAMETERS  0x00010C29
+#define AUDPROC_PARAM_ID_SOFT_VOL_STEPPING_PARAMETERS\
+			ASM_PARAM_ID_SOFT_VOL_STEPPING_PARAMETERS
+
+/* ID of the soft pause parameters used by the #ASM_MODULE_ID_VOL_CTRL
+ * module.
+ */
+#define ASM_PARAM_ID_SOFT_PAUSE_PARAMETERS   0x00010D6A
+
+/* ID of the multiple-channel volume control parameters used by the
+ * #ASM_MODULE_ID_VOL_CTRL module.
+ */
+#define ASM_PARAM_ID_MULTICHANNEL_GAIN  0x00010713
+
+/* ID of the multiple-channel mute configuration parameters used by the
+ * #ASM_MODULE_ID_VOL_CTRL module.
+ */
+
+#define ASM_PARAM_ID_MULTICHANNEL_MUTE  0x00010714
+
+/* Structure for the master gain parameter for a volume control
+ * module.
+ */
+
+
+/* @brief Payload of the #ASM_PARAM_ID_VOL_CTRL_MASTER_GAIN
+ * parameter used by the Volume Control module.
+ */
+
+
+
+struct asm_volume_ctrl_master_gain {
+	struct apr_hdr	hdr;
+	struct asm_stream_cmd_set_pp_params_v2 param;
+	struct asm_stream_param_data_v2 data;
+	uint16_t                  master_gain;
+	/*< Linear gain in Q13 format. */
+
+	uint16_t                  reserved;
+	/*< Clients must set this field to zero.
+		*/
+} __packed;
+
+
+struct asm_volume_ctrl_lr_chan_gain {
+	struct apr_hdr	hdr;
+	struct asm_stream_cmd_set_pp_params_v2 param;
+	struct asm_stream_param_data_v2 data;
+
+	uint16_t                  l_chan_gain;
+	/*< Linear gain in Q13 format for the left channel. */
+
+	uint16_t                  r_chan_gain;
+	/*< Linear gain in Q13 format for the right channel.*/
+} __packed;
+
+struct audproc_chmixer_param_coeff {
+	uint32_t index;
+	uint16_t num_output_channels;
+	uint16_t num_input_channels;
+} __packed;
+
+
+/* ID of the Multichannel Volume Control parameters used by
+ * AUDPROC_MODULE_ID_VOL_CTRL.
+ */
+#define AUDPROC_PARAM_ID_MULTICHANNEL_GAIN                          0x00010713
+
+/* Payload of the AUDPROC_PARAM_ID_MULTICHANNEL_GAIN channel type/gain
+ * pairs used by the Volume Control module.
+ * This structure immediately follows the
+ * audproc_volume_ctrl_multichannel_gain_t structure.
+ */
+struct audproc_volume_ctrl_channel_type_gain_pair {
+	uint8_t channel_type;
+	/* Channel type for which the gain setting is to be applied. */
+
+	uint8_t reserved1;
+	uint8_t reserved2;
+	uint8_t reserved3;
+
+	uint32_t gain;
+	/* Gain value for this channel in Q28 format. */
+} __packed;
+
+/* Payload of the AUDPROC_PARAM_ID_MULTICHANNEL_MUTE parameters used by
+ * the Volume Control module.
+ */
+struct audproc_volume_ctrl_multichannel_gain {
+	uint32_t num_channels;
+	/* Number of channels for which mute configuration is provided. Any
+	 * channels present in the data for which mute configuration is not
+	 * provided are set to unmute.
+	 */
+
+	struct audproc_volume_ctrl_channel_type_gain_pair *gain_data;
+	/* Array of channel type/mute setting pairs. */
+} __packed;
+
+/* Structure for the mute configuration parameter for a
+	volume control module. */
+
+
+/* @brief Payload of the #ASM_PARAM_ID_VOL_CTRL_MUTE_CONFIG
+ * parameter used by the Volume Control module.
+ */
+
+
+struct asm_volume_ctrl_mute_config {
+	struct apr_hdr	hdr;
+	struct asm_stream_cmd_set_pp_params_v2 param;
+	struct asm_stream_param_data_v2 data;
+	uint32_t                  mute_flag;
+/*< Specifies whether mute is disabled (0) or enabled (nonzero).*/
+
+} __packed;
+
+/*
+ * Supported parameters for a soft stepping linear ramping curve.
+ */
+#define ASM_PARAM_SVC_RAMPINGCURVE_LINEAR  0
+
+/*
+ * Exponential ramping curve.
+ */
+#define ASM_PARAM_SVC_RAMPINGCURVE_EXP    1
+
+/*
+ * Logarithmic ramping curve.
+ */
+#define ASM_PARAM_SVC_RAMPINGCURVE_LOG    2
+
+/* Structure for holding soft stepping volume parameters. */
+
+
+/*  Payload of the #ASM_PARAM_ID_SOFT_VOL_STEPPING_PARAMETERS
+ * parameters used by the Volume Control module.
+ */
+struct asm_soft_step_volume_params {
+	struct apr_hdr	hdr;
+	struct asm_stream_cmd_set_pp_params_v2 param;
+	struct asm_stream_param_data_v2 data;
+	uint32_t                  period;
+/*< Period in milliseconds.
+ * Supported values: 0 to 15000
+ */
+
+	uint32_t                  step;
+/*< Step in microseconds.
+ * Supported values: 0 to 15000000
+ */
+
+	uint32_t                  ramping_curve;
+/*< Ramping curve type.
+ * Supported values:
+ * - #ASM_PARAM_SVC_RAMPINGCURVE_LINEAR
+ * - #ASM_PARAM_SVC_RAMPINGCURVE_EXP
+ * - #ASM_PARAM_SVC_RAMPINGCURVE_LOG
+ */
+} __packed;
+
+
+/* Structure for holding soft pause parameters. */
+
+
+/* Payload of the #ASM_PARAM_ID_SOFT_PAUSE_PARAMETERS
+ * parameters used by the Volume Control module.
+ */
+
+
+struct asm_soft_pause_params {
+	struct apr_hdr	hdr;
+	struct asm_stream_cmd_set_pp_params_v2 param;
+	struct asm_stream_param_data_v2 data;
+	uint32_t                  enable_flag;
+/*< Specifies whether soft pause is disabled (0) or enabled
+ * (nonzero).
+ */
+
+
+
+	uint32_t                  period;
+/*< Period in milliseconds.
+ * Supported values: 0 to 15000
+ */
+
+	uint32_t                  step;
+/*< Step in microseconds.
+ * Supported values: 0 to 15000000
+ */
+
+	uint32_t                  ramping_curve;
+/*< Ramping curve.
+ * Supported values:
+ * - #ASM_PARAM_SVC_RAMPINGCURVE_LINEAR
+ * - #ASM_PARAM_SVC_RAMPINGCURVE_EXP
+ * - #ASM_PARAM_SVC_RAMPINGCURVE_LOG
+ */
+} __packed;
+
+
+/* Maximum number of channels.*/
+#define VOLUME_CONTROL_MAX_CHANNELS                       8
+
+/* Structure for holding one channel type - gain pair. */
+
+
+/* Payload of the #ASM_PARAM_ID_MULTICHANNEL_GAIN channel
+ * type/gain pairs used by the Volume Control module. \n \n This
+ * structure immediately follows the
+ * asm_volume_ctrl_multichannel_gain structure.
+ */
+
+
+struct asm_volume_ctrl_channeltype_gain_pair {
+	uint8_t                   channeltype;
+	/*
+	 * Channel type for which the gain setting is to be applied.
+	 * Supported values:
+	 * - #PCM_CHANNEL_L
+	 * - #PCM_CHANNEL_R
+	 * - #PCM_CHANNEL_C
+	 * - #PCM_CHANNEL_LS
+	 * - #PCM_CHANNEL_RS
+	 * - #PCM_CHANNEL_LFE
+	 * - #PCM_CHANNEL_CS
+	 * - #PCM_CHANNEL_LB
+	 * - #PCM_CHANNEL_RB
+	 * - #PCM_CHANNELS
+	 * - #PCM_CHANNEL_CVH
+	 * - #PCM_CHANNEL_MS
+	 * - #PCM_CHANNEL_FLC
+	 * - #PCM_CHANNEL_FRC
+	 * - #PCM_CHANNEL_RLC
+	 * - #PCM_CHANNEL_RRC
+	 */
+
+	uint8_t                   reserved1;
+	/* Clients must set this field to zero. */
+
+	uint8_t                   reserved2;
+	/* Clients must set this field to zero. */
+
+	uint8_t                   reserved3;
+	/* Clients must set this field to zero. */
+
+	uint32_t                  gain;
+	/*
+	 * Gain value for this channel in Q28 format.
+	 * Supported values: Any
+	 */
+} __packed;
+
+
+/* Structure for the multichannel gain command */
+
+
+/* Payload of the #ASM_PARAM_ID_MULTICHANNEL_GAIN
+ * parameters used by the Volume Control module.
+ */
+
+
+struct asm_volume_ctrl_multichannel_gain {
+	struct apr_hdr	hdr;
+	struct asm_stream_cmd_set_pp_params_v2 param;
+	struct asm_stream_param_data_v2 data;
+	uint32_t                  num_channels;
+	/*
+	 * Number of channels for which gain values are provided. Any
+	 * channels present in the data for which gain is not provided are
+	 * set to unity gain.
+	 * Supported values: 1 to 8
+	 */
+
+	struct asm_volume_ctrl_channeltype_gain_pair
+		gain_data[VOLUME_CONTROL_MAX_CHANNELS];
+	/* Array of channel type/gain pairs.*/
+} __packed;
+
+
+/* Structure for holding one channel type - mute pair. */
+
+
+/* Payload of the #ASM_PARAM_ID_MULTICHANNEL_MUTE channel
+ * type/mute setting pairs used by the Volume Control module. \n \n
+ * This structure immediately follows the
+ * asm_volume_ctrl_multichannel_mute structure.
+ */
+
+
+struct asm_volume_ctrl_channelype_mute_pair {
+	struct apr_hdr	hdr;
+	struct asm_stream_cmd_set_pp_params_v2 param;
+	struct asm_stream_param_data_v2 data;
+	uint8_t                   channelype;
+/*< Channel type for which the mute setting is to be applied.
+ * Supported values:
+ * - #PCM_CHANNEL_L
+ * - #PCM_CHANNEL_R
+ * - #PCM_CHANNEL_C
+ * - #PCM_CHANNEL_LS
+ * - #PCM_CHANNEL_RS
+ * - #PCM_CHANNEL_LFE
+ * - #PCM_CHANNEL_CS
+ * - #PCM_CHANNEL_LB
+ * - #PCM_CHANNEL_RB
+ * - #PCM_CHANNELS
+ * - #PCM_CHANNEL_CVH
+ * - #PCM_CHANNEL_MS
+ * - #PCM_CHANNEL_FLC
+ * - #PCM_CHANNEL_FRC
+ * - #PCM_CHANNEL_RLC
+ * - #PCM_CHANNEL_RRC
+ */
+
+	uint8_t                   reserved1;
+	/*< Clients must set this field to zero. */
+
+	uint8_t                   reserved2;
+	/*< Clients must set this field to zero. */
+
+	uint8_t                   reserved3;
+	/*< Clients must set this field to zero. */
+
+	uint32_t                  mute;
+/*< Mute setting for this channel.
+ * Supported values:
+ * - 0 = Unmute
+ * - Nonzero = Mute
+ */
+} __packed;
+
+
+/* Structure for the multichannel mute command */
+
+
+/* @brief Payload of the #ASM_PARAM_ID_MULTICHANNEL_MUTE
+ * parameters used by the Volume Control module.
+ */
+
+
+struct asm_volume_ctrl_multichannel_mute {
+	struct apr_hdr	hdr;
+	struct asm_stream_cmd_set_pp_params_v2 param;
+	struct asm_stream_param_data_v2 data;
+	uint32_t                  num_channels;
+/*< Number of channels for which mute configuration is
+ * provided. Any channels present in the data for which mute
+ * configuration is not provided are set to unmute.
+ * Supported values: 1 to 8
+ */
+
+struct asm_volume_ctrl_channelype_mute_pair
+				mute_data[VOLUME_CONTROL_MAX_CHANNELS];
+	/*< Array of channel type/mute setting pairs.*/
+} __packed;
+/* end_addtogroup audio_pp_param_ids */
+
+/* audio_pp_module_ids
+ * ID of the IIR Tuning Filter module.
+ * This module supports the following parameter IDs:
+ * - #ASM_PARAM_ID_IIRUNING_FILTER_ENABLE_CONFIG
+ * - #ASM_PARAM_ID_IIRUNING_FILTER_PRE_GAIN
+ * - #ASM_PARAM_ID_IIRUNING_FILTER_CONFIG_PARAMS
+ */
+#define ASM_MODULE_ID_IIRUNING_FILTER   0x00010C02
+
+/* @addtogroup audio_pp_param_ids */
+/* ID of the IIR tuning filter enable parameter used by the
+ * #ASM_MODULE_ID_IIRUNING_FILTER module.
+ * @messagepayload
+ * @structure{asm_iiruning_filter_enable}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ASM_PARAM_ID_IIRUNING_FILTER_ENABLE_CO
+ * NFIG.tex}
+ */
+#define ASM_PARAM_ID_IIRUNING_FILTER_ENABLE_CONFIG   0x00010C03
+
+/* ID of the IIR tuning filter pregain parameter used by the
+ * #ASM_MODULE_ID_IIRUNING_FILTER module.
+ */
+#define ASM_PARAM_ID_IIRUNING_FILTER_PRE_GAIN  0x00010C04
+
+/* ID of the IIR tuning filter configuration parameters used by the
+ * #ASM_MODULE_ID_IIRUNING_FILTER module.
+ */
+#define ASM_PARAM_ID_IIRUNING_FILTER_CONFIG_PARAMS  0x00010C05
+
+/* Structure for an enable configuration parameter for an
+ * IIR tuning filter module.
+ */
+
+
+/* @brief Payload of the #ASM_PARAM_ID_IIRUNING_FILTER_ENABLE_CONFIG
+ * parameter used by the IIR Tuning Filter module.
+ */
+struct asm_iiruning_filter_enable {
+	uint32_t                  enable_flag;
+/*< Specifies whether the IIR tuning filter is disabled (0) or
+ * enabled (1).
+ */
+} __packed;
+
+/* Structure for the pregain parameter for an IIR tuning filter module. */
+
+
+/* Payload of the #ASM_PARAM_ID_IIRUNING_FILTER_PRE_GAIN
+ * parameters used by the IIR Tuning Filter module.
+ */
+struct asm_iiruning_filter_pregain {
+	uint16_t                  pregain;
+	/*< Linear gain in Q13 format. */
+
+	uint16_t                  reserved;
+	/*< Clients must set this field to zero.*/
+} __packed;
+
+/* Structure for the configuration parameter for an IIR tuning filter
+ * module.
+ */
+
+
+/* @brief Payload of the #ASM_PARAM_ID_IIRUNING_FILTER_CONFIG_PARAMS
+ * parameters used by the IIR Tuning Filter module. \n
+ * \n
+ * This structure is followed by the IIR filter coefficients: \n
+ * - Sequence of int32_t FilterCoeffs \n
+ * Five coefficients for each band. Each coefficient is in int32_t format, in
+ * the order of b0, b1, b2, a1, a2.
+ * - Sequence of int16_t NumShiftFactor \n
+ * One int16_t per band. The numerator shift factor is related to the Q
+ * factor of the filter coefficients.
+ * - Sequence of uint16_t PanSetting \n
+ * One uint16_t per band, indicating if the filter is applied to left (0),
+ * right (1), or both (2) channels.
+ */
+struct asm_iir_filter_config_params {
+	uint16_t                  num_biquad_stages;
+/*< Number of bands.
+ * Supported values: 0 to 20
+ */
+
+	uint16_t                  reserved;
+	/*< Clients must set this field to zero.*/
+} __packed;
+
+/* audio_pp_module_ids
+ * ID of the Multiband Dynamic Range Control (MBDRC) module on the Tx/Rx
+ * paths.
+ * This module supports the following parameter IDs:
+ * - #ASM_PARAM_ID_MBDRC_ENABLE
+ * - #ASM_PARAM_ID_MBDRC_CONFIG_PARAMS
+ */
+#define ASM_MODULE_ID_MBDRC   0x00010C06
+
+/* audio_pp_param_ids */
+/* ID of the MBDRC enable parameter used by the #ASM_MODULE_ID_MBDRC module.
+ * @messagepayload
+ * @structure{asm_mbdrc_enable}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ASM_PARAM_ID_MBDRC_ENABLE.tex}
+ */
+#define ASM_PARAM_ID_MBDRC_ENABLE   0x00010C07
+
+/* ID of the MBDRC configuration parameters used by the
+ * #ASM_MODULE_ID_MBDRC module.
+ * @messagepayload
+ * @structure{asm_mbdrc_config_params}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ASM_PARAM_ID_MBDRC_CONFIG_PARAMS.tex}
+ *
+ * @parspace Sub-band DRC configuration parameters
+ * @structure{asm_subband_drc_config_params}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ASM_PARAM_ID_MBDRC_CONFIG_PARAMS_subband_DRC.tex}
+ *
+ * @keep{6}
+ * To obtain legacy ADRC from MBDRC, use the calibration tool to:
+ *
+ * - Enable MBDRC (EnableFlag = TRUE)
+ * - Set number of bands to 1 (uiNumBands = 1)
+ * - Enable the first MBDRC band (DrcMode[0] = DRC_ENABLED = 1)
+ * - Clear the first band mute flag (MuteFlag[0] = 0)
+ * - Set the first band makeup gain to unity (compMakeUpGain[0] = 0x2000)
+ * - Use the legacy ADRC parameters to calibrate the rest of the MBDRC
+ * parameters.
+ */
+#define ASM_PARAM_ID_MBDRC_CONFIG_PARAMS  0x00010C08
+
+/* end_addtogroup audio_pp_param_ids */
+
+/* audio_pp_module_ids
+ * ID of the MMBDRC module version 2 pre/postprocessing block.
+ * This module differs from the original MBDRC (#ASM_MODULE_ID_MBDRC) in
+ * the length of the filters used in each sub-band.
+ * This module supports the following parameter ID:
+ * - #ASM_PARAM_ID_MBDRC_CONFIG_PARAMS_IMPROVED_FILTBANK_V2
+ */
+#define ASM_MODULE_ID_MBDRCV2                                0x0001070B
+
+/* @addtogroup audio_pp_param_ids */
+/* ID of the configuration parameters used by the
+ * #ASM_MODULE_ID_MBDRCV2 module for the improved filter structure
+ * of the MBDRC v2 pre/postprocessing block.
+ * The update to this configuration structure from the original
+ * MBDRC is the number of filter coefficients in the filter
+ * structure. The sequence for is as follows:
+ * - 1 band = 0 FIR coefficient + 1 mute flag + uint16_t padding
+ * - 2 bands = 141 FIR coefficients + 2 mute flags + uint16_t padding
+ * - 3 bands = 141+81 FIR coefficients + 3 mute flags + uint16_t padding
+ * - 4 bands = 141+81+61 FIR coefficients + 4 mute flags + uint16_t
+ * padding
+ * - 5 bands = 141+81+61+61 FIR coefficients + 5 mute flags +
+ * uint16_t padding
+ *	This block uses the same parameter structure as
+ *	#ASM_PARAM_ID_MBDRC_CONFIG_PARAMS.
+ */
+#define ASM_PARAM_ID_MBDRC_CONFIG_PARAMS_IMPROVED_FILTBANK_V2 \
+								0x0001070C
+
+#define ASM_MODULE_ID_MBDRCV3					0x0001090B
+/*
+ * ID of the MMBDRC module version 3 pre/postprocessing block.
+ * This module differs from MBDRCv2 (#ASM_MODULE_ID_MBDRCV2) in
+ * that it supports both 16- and 24-bit data.
+ * This module supports the following parameter ID:
+ * - #ASM_PARAM_ID_MBDRC_ENABLE
+ * - #ASM_PARAM_ID_MBDRC_CONFIG_PARAMS
+ * - #ASM_PARAM_ID_MBDRC_CONFIG_PARAMS_V3
+ * - #ASM_PARAM_ID_MBDRC_FILTER_XOVER_FREQS
+ */
+
+/* Structure for the enable parameter for an MBDRC module. */
+
+
+/* Payload of the #ASM_PARAM_ID_MBDRC_ENABLE parameter used by the
+ * MBDRC module.
+ */
+struct asm_mbdrc_enable {
+	uint32_t                  enable_flag;
+/*< Specifies whether MBDRC is disabled (0) or enabled (nonzero).*/
+} __packed;
+
+/* Structure for the configuration parameters for an MBDRC module. */
+
+
+/* Payload of the #ASM_PARAM_ID_MBDRC_CONFIG_PARAMS
+ * parameters used by the MBDRC module. \n \n Following this
+ * structure is the payload for sub-band DRC configuration
+ * parameters (asm_subband_drc_config_params). This sub-band
+ * structure must be repeated for each band.
+ */
+
+
+struct asm_mbdrc_config_params {
+	uint16_t                  num_bands;
+/*< Number of bands.
+ * Supported values: 1 to 5
+ */
+
+	int16_t                   limiterhreshold;
+/*< Threshold in decibels for the limiter output.
+ * Supported values: -72 to 18 \n
+ * Recommended value: 3994 (-0.22 db in Q3.12 format)
+ */
+
+	int16_t                   limiter_makeup_gain;
+/*< Makeup gain in decibels for the limiter output.
+ * Supported values: -42 to 42 \n
+ * Recommended value: 256 (0 dB in Q7.8 format)
+ */
+
+	int16_t                   limiter_gc;
+/*< Limiter gain recovery coefficient.
+ * Supported values: 0.5 to 0.99 \n
+ * Recommended value: 32440 (0.99 in Q15 format)
+ */
+
+	int16_t                   limiter_delay;
+/*< Limiter delay in samples.
+ * Supported values: 0 to 10 \n
+ * Recommended value: 262 (0.008 samples in Q15 format)
+ */
+
+	int16_t                   limiter_max_wait;
+/*< Maximum limiter waiting time in samples.
+ * Supported values: 0 to 10 \n
+ * Recommended value: 262 (0.008 samples in Q15 format)
+ */
+} __packed;
+
+/* DRC configuration structure for each sub-band of an MBDRC module. */
+
+
+/* Payload of the #ASM_PARAM_ID_MBDRC_CONFIG_PARAMS DRC
+ * configuration parameters for each sub-band in the MBDRC module.
+ * After this DRC structure is configured for valid bands, the next
+ * MBDRC setparams expects the sequence of sub-band MBDRC filter
+ * coefficients (the length depends on the number of bands) plus the
+ * mute flag for that band plus uint16_t padding.
+ *
+ * @keep{10}
+ * The filter coefficient and mute flag are of type int16_t:
+ * - FIR coefficient = int16_t firFilter
+ * - Mute flag = int16_t fMuteFlag
+ *
+ * The sequence is as follows:
+ * - 1 band = 0 FIR coefficient + 1 mute flag + uint16_t padding
+ * - 2 bands = 97 FIR coefficients + 2 mute flags + uint16_t padding
+ * - 3 bands = 97+33 FIR coefficients + 3 mute flags + uint16_t padding
+ * - 4 bands = 97+33+33 FIR coefficients + 4 mute flags + uint16_t padding
+ * - 5 bands = 97+33+33+33 FIR coefficients + 5 mute flags + uint16_t padding
+ *
+ * For improved filterbank, the sequence is as follows:
+ * - 1 band = 0 FIR coefficient + 1 mute flag + uint16_t padding
+ * - 2 bands = 141 FIR coefficients + 2 mute flags + uint16_t padding
+ * - 3 bands = 141+81 FIR coefficients + 3 mute flags + uint16_t padding
+ * - 4 bands = 141+81+61 FIR coefficients + 4 mute flags + uint16_t padding
+ * - 5 bands = 141+81+61+61 FIR coefficients + 5 mute flags + uint16_t padding
+ */
+struct asm_subband_drc_config_params {
+	int16_t                   drc_stereo_linked_flag;
+/*< Specifies whether all stereo channels have the same applied
+ * dynamics (1) or if they process their dynamics independently (0).
+ * Supported values:
+ * - 0 -- Not linked
+ * - 1 -- Linked
+ */
+
+	int16_t                   drc_mode;
+/*< Specifies whether DRC mode is bypassed for sub-bands.
+ * Supported values:
+ * - 0 -- Disabled
+ * - 1 -- Enabled
+ */
+
+	int16_t                   drc_down_sample_level;
+/*< DRC down sample level.
+ * Supported values: @ge 1
+ */
+
+	int16_t                   drc_delay;
+/*< DRC delay in samples.
+ * Supported values: 0 to 1200
+ */
+
+	uint16_t                  drc_rmsime_avg_const;
+/*< RMS signal energy time-averaging constant.
+ * Supported values: 0 to 2^16-1
+ */
+
+	uint16_t                  drc_makeup_gain;
+/*< DRC makeup gain in decibels.
+ * Supported values: 258 to 64917
+ */
+	/* Down expander settings */
+	int16_t                   down_expdrhreshold;
+/*< Down expander threshold.
+ * Supported Q7 format values: 1320 to up_cmpsrhreshold
+ */
+
+	int16_t                   down_expdr_slope;
+/*< Down expander slope.
+ * Supported Q8 format values: -32768 to 0.
+ */
+
+	uint32_t                  down_expdr_attack;
+/*< Down expander attack constant.
+ * Supported Q31 format values: 196844 to 2^31.
+ */
+
+	uint32_t                  down_expdr_release;
+/*< Down expander release constant.
+ * Supported Q31 format values: 19685 to 2^31
+ */
+
+	uint16_t                  down_expdr_hysteresis;
+/*< Down expander hysteresis constant.
+ * Supported Q14 format values: 1 to 32690
+ */
+
+	uint16_t                  reserved;
+	/*< Clients must set this field to zero. */
+
+	int32_t                   down_expdr_min_gain_db;
+/*< Down expander minimum gain.
+ * Supported Q23 format values: -805306368 to 0.
+ */
+
+	/* Up compressor settings */
+
+	int16_t                   up_cmpsrhreshold;
+/*< Up compressor threshold.
+ * Supported Q7 format values: down_expdrhreshold to
+ * down_cmpsrhreshold.
+ */
+
+	uint16_t                  up_cmpsr_slope;
+/*< Up compressor slope.
+ * Supported Q16 format values: 0 to 64881.
+ */
+
+	uint32_t                  up_cmpsr_attack;
+/*< Up compressor attack constant.
+ * Supported Q31 format values: 196844 to 2^31.
+ */
+
+	uint32_t                  up_cmpsr_release;
+/*< Up compressor release constant.
+ * Supported Q31 format values: 19685 to 2^31.
+ */
+
+	uint16_t                  up_cmpsr_hysteresis;
+/*< Up compressor hysteresis constant.
+  * Supported Q14 format values: 1 to 32690.
+  */
+
+	/* Down compressor settings */
+
+	int16_t                   down_cmpsrhreshold;
+/*< Down compressor threshold.
+ * Supported Q7 format values: up_cmpsrhreshold to 11560.
+ */
+
+	uint16_t                  down_cmpsr_slope;
+/*< Down compressor slope.
+ * Supported Q16 format values: 0 to 64881.
+ */
+
+	uint16_t                  reserved1;
+/*< Clients must set this field to zero. */
+
+	uint32_t                  down_cmpsr_attack;
+/*< Down compressor attack constant.
+ * Supported Q31 format values: 196844 to 2^31.
+ */
+
+	uint32_t                  down_cmpsr_release;
+/*< Down compressor release constant.
+ * Supported Q31 format values: 19685 to 2^31.
+ */
+
+	uint16_t                  down_cmpsr_hysteresis;
+/*< Down compressor hysteresis constant.
+ * Supported Q14 values: 1 to 32690.
+ */
+
+	uint16_t                  reserved2;
+/*< Clients must set this field to zero.*/
+} __packed;
+
+#define ASM_MODULE_ID_EQUALIZER            0x00010C27
+#define ASM_PARAM_ID_EQUALIZER_PARAMETERS  0x00010C28
+
+#define ASM_MAX_EQ_BANDS 12
+
+struct asm_eq_per_band_params {
+	uint32_t                  band_idx;
+/*< Band index.
+ * Supported values: 0 to 11
+ */
+
+	uint32_t                  filterype;
+/*< Type of filter.
+ * Supported values:
+ * - #ASM_PARAM_EQYPE_NONE
+ * - #ASM_PARAM_EQ_BASS_BOOST
+ * - #ASM_PARAM_EQ_BASS_CUT
+ * - #ASM_PARAM_EQREBLE_BOOST
+ * - #ASM_PARAM_EQREBLE_CUT
+ * - #ASM_PARAM_EQ_BAND_BOOST
+ * - #ASM_PARAM_EQ_BAND_CUT
+ */
+
+	uint32_t                  center_freq_hz;
+	/*< Filter band center frequency in Hertz. */
+
+	int32_t                   filter_gain;
+/*< Filter band initial gain.
+ * Supported values: +12 to -12 dB in 1 dB increments
+ */
+
+	int32_t                   q_factor;
+/*< Filter band quality factor expressed as a Q8 number, i.e., a
+ * fixed-point number with q factor of 8. For example, 3000/(2^8).
+ */
+} __packed;
+
+struct asm_eq_params {
+	struct apr_hdr	hdr;
+	struct asm_stream_cmd_set_pp_params_v2 param;
+	struct asm_stream_param_data_v2 data;
+		uint32_t                  enable_flag;
+/*< Specifies whether the equalizer module is disabled (0) or enabled
+ * (nonzero).
+ */
+
+		uint32_t                  num_bands;
+/*< Number of bands.
+ * Supported values: 1 to 12
+ */
+	struct asm_eq_per_band_params eq_bands[ASM_MAX_EQ_BANDS];
+
+} __packed;
+
+/*	No equalizer effect.*/
+#define ASM_PARAM_EQYPE_NONE      0
+
+/*	Bass boost equalizer effect.*/
+#define ASM_PARAM_EQ_BASS_BOOST     1
+
+/*Bass cut equalizer effect.*/
+#define ASM_PARAM_EQ_BASS_CUT       2
+
+/*	Treble boost equalizer effect */
+#define ASM_PARAM_EQREBLE_BOOST   3
+
+/*	Treble cut equalizer effect.*/
+#define ASM_PARAM_EQREBLE_CUT     4
+
+/*	Band boost equalizer effect.*/
+#define ASM_PARAM_EQ_BAND_BOOST     5
+
+/*	Band cut equalizer effect.*/
+#define ASM_PARAM_EQ_BAND_CUT       6
+
+/* Get & set params */
+#define VSS_ICOMMON_CMD_SET_PARAM_V2	0x0001133D
+#define VSS_ICOMMON_CMD_GET_PARAM_V2	0x0001133E
+#define VSS_ICOMMON_RSP_GET_PARAM	0x00011008
+
+/** ID of the Bass Boost module.
+    This module supports the following parameter IDs:
+    - #AUDPROC_PARAM_ID_BASS_BOOST_ENABLE
+    - #AUDPROC_PARAM_ID_BASS_BOOST_MODE
+    - #AUDPROC_PARAM_ID_BASS_BOOST_STRENGTH
+*/
+#define AUDPROC_MODULE_ID_BASS_BOOST                             0x000108A1
+/** ID of the Bass Boost enable parameter used by
+    AUDPROC_MODULE_ID_BASS_BOOST.
+*/
+#define AUDPROC_PARAM_ID_BASS_BOOST_ENABLE                       0x000108A2
+/** ID of the Bass Boost mode parameter used by
+    AUDPROC_MODULE_ID_BASS_BOOST.
+*/
+#define AUDPROC_PARAM_ID_BASS_BOOST_MODE                         0x000108A3
+/** ID of the Bass Boost strength parameter used by
+    AUDPROC_MODULE_ID_BASS_BOOST.
+*/
+#define AUDPROC_PARAM_ID_BASS_BOOST_STRENGTH                     0x000108A4
+
+/** ID of the PBE module.
+    This module supports the following parameter IDs:
+    - #AUDPROC_PARAM_ID_PBE_ENABLE
+    - #AUDPROC_PARAM_ID_PBE_PARAM_CONFIG
+*/
+#define AUDPROC_MODULE_ID_PBE                                    0x00010C2A
+/** ID of the Bass Boost enable parameter used by
+    AUDPROC_MODULE_ID_BASS_BOOST.
+*/
+#define AUDPROC_PARAM_ID_PBE_ENABLE                              0x00010C2B
+/** ID of the Bass Boost mode parameter used by
+    AUDPROC_MODULE_ID_BASS_BOOST.
+*/
+#define AUDPROC_PARAM_ID_PBE_PARAM_CONFIG                        0x00010C49
+
+/** ID of the Virtualizer module. This module supports the
+    following parameter IDs:
+    - #AUDPROC_PARAM_ID_VIRTUALIZER_ENABLE
+    - #AUDPROC_PARAM_ID_VIRTUALIZER_STRENGTH
+    - #AUDPROC_PARAM_ID_VIRTUALIZER_OUT_TYPE
+    - #AUDPROC_PARAM_ID_VIRTUALIZER_GAIN_ADJUST
+*/
+#define AUDPROC_MODULE_ID_VIRTUALIZER                            0x000108A5
+/** ID of the Virtualizer enable parameter used by
+    AUDPROC_MODULE_ID_VIRTUALIZER.
+*/
+#define AUDPROC_PARAM_ID_VIRTUALIZER_ENABLE                      0x000108A6
+/** ID of the Virtualizer strength parameter used by
+    AUDPROC_MODULE_ID_VIRTUALIZER.
+*/
+#define AUDPROC_PARAM_ID_VIRTUALIZER_STRENGTH                    0x000108A7
+/** ID of the Virtualizer out type parameter used by
+    AUDPROC_MODULE_ID_VIRTUALIZER.
+*/
+#define AUDPROC_PARAM_ID_VIRTUALIZER_OUT_TYPE                    0x000108A8
+/** ID of the Virtualizer out type parameter used by
+    AUDPROC_MODULE_ID_VIRTUALIZER.
+*/
+#define AUDPROC_PARAM_ID_VIRTUALIZER_GAIN_ADJUST                 0x000108A9
+
+/** ID of the Reverb module. This module supports the following
+    parameter IDs:
+    - #AUDPROC_PARAM_ID_REVERB_ENABLE
+    - #AUDPROC_PARAM_ID_REVERB_MODE
+    - #AUDPROC_PARAM_ID_REVERB_PRESET
+    - #AUDPROC_PARAM_ID_REVERB_WET_MIX
+    - #AUDPROC_PARAM_ID_REVERB_GAIN_ADJUST
+    - #AUDPROC_PARAM_ID_REVERB_ROOM_LEVEL
+    - #AUDPROC_PARAM_ID_REVERB_ROOM_HF_LEVEL
+    - #AUDPROC_PARAM_ID_REVERB_DECAY_TIME
+    - #AUDPROC_PARAM_ID_REVERB_DECAY_HF_RATIO
+    - #AUDPROC_PARAM_ID_REVERB_REFLECTIONS_LEVEL
+    - #AUDPROC_PARAM_ID_REVERB_REFLECTIONS_DELAY
+    - #AUDPROC_PARAM_ID_REVERB_LEVEL
+    - #AUDPROC_PARAM_ID_REVERB_DELAY
+    - #AUDPROC_PARAM_ID_REVERB_DIFFUSION
+    - #AUDPROC_PARAM_ID_REVERB_DENSITY
+*/
+#define AUDPROC_MODULE_ID_REVERB                          0x000108AA
+/** ID of the Reverb enable parameter used by
+    AUDPROC_MODULE_ID_REVERB.
+*/
+#define AUDPROC_PARAM_ID_REVERB_ENABLE                    0x000108AB
+/** ID of the Reverb mode parameter used by
+    AUDPROC_MODULE_ID_REVERB.
+*/
+#define AUDPROC_PARAM_ID_REVERB_MODE                      0x000108AC
+/** ID of the Reverb preset parameter used by
+    AUDPROC_MODULE_ID_REVERB.
+*/
+#define AUDPROC_PARAM_ID_REVERB_PRESET                    0x000108AD
+/** ID of the Reverb wet mix parameter used by
+    AUDPROC_MODULE_ID_REVERB.
+*/
+#define AUDPROC_PARAM_ID_REVERB_WET_MIX                   0x000108AE
+/** ID of the Reverb gain adjust parameter used by
+    AUDPROC_MODULE_ID_REVERB.
+*/
+#define AUDPROC_PARAM_ID_REVERB_GAIN_ADJUST               0x000108AF
+/** ID of the Reverb room level parameter used by
+    AUDPROC_MODULE_ID_REVERB.
+*/
+#define AUDPROC_PARAM_ID_REVERB_ROOM_LEVEL                0x000108B0
+/** ID of the Reverb room hf level parameter used by
+    AUDPROC_MODULE_ID_REVERB.
+*/
+#define AUDPROC_PARAM_ID_REVERB_ROOM_HF_LEVEL             0x000108B1
+/** ID of the Reverb decay time parameter used by
+    AUDPROC_MODULE_ID_REVERB.
+*/
+#define AUDPROC_PARAM_ID_REVERB_DECAY_TIME                0x000108B2
+/** ID of the Reverb decay hf ratio parameter used by
+    AUDPROC_MODULE_ID_REVERB.
+*/
+#define AUDPROC_PARAM_ID_REVERB_DECAY_HF_RATIO            0x000108B3
+/** ID of the Reverb reflections level parameter used by
+    AUDPROC_MODULE_ID_REVERB.
+*/
+#define AUDPROC_PARAM_ID_REVERB_REFLECTIONS_LEVEL         0x000108B4
+/** ID of the Reverb reflections delay parameter used by
+    AUDPROC_MODULE_ID_REVERB.
+*/
+#define AUDPROC_PARAM_ID_REVERB_REFLECTIONS_DELAY         0x000108B5
+/** ID of the Reverb level parameter used by
+    AUDPROC_MODULE_ID_REVERB.
+*/
+#define AUDPROC_PARAM_ID_REVERB_LEVEL                      0x000108B6
+/** ID of the Reverb delay parameter used by
+    AUDPROC_MODULE_ID_REVERB.
+*/
+#define AUDPROC_PARAM_ID_REVERB_DELAY                      0x000108B7
+/** ID of the Reverb diffusion parameter used by
+    AUDPROC_MODULE_ID_REVERB.
+*/
+#define AUDPROC_PARAM_ID_REVERB_DIFFUSION                  0x000108B8
+/** ID of the Reverb density parameter used by
+    AUDPROC_MODULE_ID_REVERB.
+*/
+#define AUDPROC_PARAM_ID_REVERB_DENSITY                    0x000108B9
+
+/** ID of the Popless Equalizer module. This module supports the
+    following parameter IDs:
+    - #AUDPROC_PARAM_ID_EQ_ENABLE
+    - #AUDPROC_PARAM_ID_EQ_CONFIG
+    - #AUDPROC_PARAM_ID_EQ_NUM_BANDS
+    - #AUDPROC_PARAM_ID_EQ_BAND_LEVELS
+    - #AUDPROC_PARAM_ID_EQ_BAND_LEVEL_RANGE
+    - #AUDPROC_PARAM_ID_EQ_BAND_FREQS
+    - #AUDPROC_PARAM_ID_EQ_SINGLE_BAND_FREQ_RANGE
+    - #AUDPROC_PARAM_ID_EQ_SINGLE_BAND_FREQ
+    - #AUDPROC_PARAM_ID_EQ_BAND_INDEX
+    - #AUDPROC_PARAM_ID_EQ_PRESET_ID
+    - #AUDPROC_PARAM_ID_EQ_NUM_PRESETS
+    - #AUDPROC_PARAM_ID_EQ_GET_PRESET_NAME
+*/
+#define AUDPROC_MODULE_ID_POPLESS_EQUALIZER                    0x000108BA
+/** ID of the Popless Equalizer enable parameter used by
+    AUDPROC_MODULE_ID_POPLESS_EQUALIZER.
+*/
+#define AUDPROC_PARAM_ID_EQ_ENABLE                             0x000108BB
+/** ID of the Popless Equalizer config parameter used by
+    AUDPROC_MODULE_ID_POPLESS_EQUALIZER.
+*/
+#define AUDPROC_PARAM_ID_EQ_CONFIG                             0x000108BC
+/** ID of the Popless Equalizer number of bands parameter used
+    by AUDPROC_MODULE_ID_POPLESS_EQUALIZER. This param ID is
+    used for get param only.
+*/
+#define AUDPROC_PARAM_ID_EQ_NUM_BANDS                          0x000108BD
+/** ID of the Popless Equalizer band levels parameter used by
+    AUDPROC_MODULE_ID_POPLESS_EQUALIZER. This param ID is
+    used for get param only.
+*/
+#define AUDPROC_PARAM_ID_EQ_BAND_LEVELS                        0x000108BE
+/** ID of the Popless Equalizer band level range parameter used
+    by AUDPROC_MODULE_ID_POPLESS_EQUALIZER. This param ID is
+    used for get param only.
+*/
+#define AUDPROC_PARAM_ID_EQ_BAND_LEVEL_RANGE                   0x000108BF
+/** ID of the Popless Equalizer band frequencies parameter used
+    by AUDPROC_MODULE_ID_POPLESS_EQUALIZER. This param ID is
+    used for get param only.
+*/
+#define AUDPROC_PARAM_ID_EQ_BAND_FREQS                         0x000108C0
+/** ID of the Popless Equalizer single band frequency range
+    parameter used by AUDPROC_MODULE_ID_POPLESS_EQUALIZER.
+    This param ID is used for get param only.
+*/
+#define AUDPROC_PARAM_ID_EQ_SINGLE_BAND_FREQ_RANGE             0x000108C1
+/** ID of the Popless Equalizer single band frequency parameter
+    used by AUDPROC_MODULE_ID_POPLESS_EQUALIZER. This param ID
+    is used for set param only.
+*/
+#define AUDPROC_PARAM_ID_EQ_SINGLE_BAND_FREQ                   0x000108C2
+/** ID of the Popless Equalizer band index parameter used by
+    AUDPROC_MODULE_ID_POPLESS_EQUALIZER.
+*/
+#define AUDPROC_PARAM_ID_EQ_BAND_INDEX                         0x000108C3
+/** ID of the Popless Equalizer preset id parameter used by
+    AUDPROC_MODULE_ID_POPLESS_EQUALIZER. This param ID is used
+    for get param only.
+*/
+#define AUDPROC_PARAM_ID_EQ_PRESET_ID                          0x000108C4
+/** ID of the Popless Equalizer number of presets parameter used
+    by AUDPROC_MODULE_ID_POPLESS_EQUALIZER. This param ID is used
+    for get param only.
+*/
+#define AUDPROC_PARAM_ID_EQ_NUM_PRESETS                        0x000108C5
+/** ID of the Popless Equalizer preset name parameter used by
+    AUDPROC_MODULE_ID_POPLESS_EQUALIZER. This param ID is used
+    for get param only.
+*/
+#define AUDPROC_PARAM_ID_EQ_PRESET_NAME                        0x000108C6
+
+/* Set Q6 topologies */
+#define ASM_CMD_ADD_TOPOLOGIES				0x00010DBE
+#define ADM_CMD_ADD_TOPOLOGIES				0x00010335
+#define AFE_CMD_ADD_TOPOLOGIES				0x000100f8
+/* structure used for both ioctls */
+struct cmd_set_topologies {
+	struct apr_hdr hdr;
+	u32		payload_addr_lsw;
+	/* LSW of parameter data payload address.*/
+	u32		payload_addr_msw;
+	/* MSW of parameter data payload address.*/
+	u32		mem_map_handle;
+	/* Memory map handle returned by mem map command */
+	u32		payload_size;
+	/* Size in bytes of the variable payload in shared memory */
+} __packed;
+
+/* This module represents the Rx processing of Feedback speaker protection.
+ * It contains the excursion control, thermal protection,
+ * analog clip manager features in it.
+ * This module id will support following param ids.
+ * - AFE_PARAM_ID_FBSP_MODE_RX_CFG
+ */
+
+#define AFE_MODULE_FB_SPKR_PROT_RX 0x0001021C
+#define AFE_MODULE_FB_SPKR_PROT_V2_RX 0x0001025F
+
+#define AFE_PARAM_ID_FBSP_MODE_RX_CFG 0x0001021D
+#define AFE_PARAM_ID_FBSP_PTONE_RAMP_CFG 0x00010260
+
+struct asm_fbsp_mode_rx_cfg {
+	uint32_t minor_version;
+	uint32_t mode;
+} __packed;
+
+/* This module represents the VI processing of feedback speaker protection.
+ * It will receive Vsens and Isens from codec and generates necessary
+ * parameters needed by Rx processing.
+ * This module id will support following param ids.
+ * - AFE_PARAM_ID_SPKR_CALIB_VI_PROC_CFG
+ * - AFE_PARAM_ID_CALIB_RES_CFG
+ * - AFE_PARAM_ID_FEEDBACK_PATH_CFG
+ */
+
+#define AFE_MODULE_FB_SPKR_PROT_VI_PROC 0x00010226
+#define AFE_MODULE_FB_SPKR_PROT_VI_PROC_V2 0x0001026A
+
+#define AFE_PARAM_ID_SPKR_CALIB_VI_PROC_CFG 0x0001022A
+#define AFE_PARAM_ID_SPKR_CALIB_VI_PROC_CFG_V2  0x0001026B
+
+struct asm_spkr_calib_vi_proc_cfg {
+	uint32_t minor_version;
+	uint32_t operation_mode;
+	uint32_t r0_t0_selection_flag[SP_V2_NUM_MAX_SPKR];
+	int32_t r0_cali_q24[SP_V2_NUM_MAX_SPKR];
+	int16_t	t0_cali_q6[SP_V2_NUM_MAX_SPKR];
+	uint32_t quick_calib_flag;
+} __packed;
+
+#define AFE_PARAM_ID_CALIB_RES_CFG 0x0001022B
+#define AFE_PARAM_ID_CALIB_RES_CFG_V2 0x0001026E
+
+struct asm_calib_res_cfg {
+	uint32_t minor_version;
+	int32_t	r0_cali_q24[SP_V2_NUM_MAX_SPKR];
+	uint32_t th_vi_ca_state;
+} __packed;
+
+#define AFE_PARAM_ID_FEEDBACK_PATH_CFG 0x0001022C
+#define AFE_MODULE_FEEDBACK 0x00010257
+
+struct asm_feedback_path_cfg {
+	uint32_t minor_version;
+	int32_t	dst_portid;
+	int32_t	num_channels;
+	int32_t	chan_info[4];
+} __packed;
+
+#define AFE_PARAM_ID_MODE_VI_PROC_CFG 0x00010227
+
+struct asm_mode_vi_proc_cfg {
+	uint32_t minor_version;
+	uint32_t cal_mode;
+} __packed;
+
+#define AFE_MODULE_SPEAKER_PROTECTION_V2_TH_VI	0x0001026A
+#define AFE_PARAM_ID_SP_V2_TH_VI_MODE_CFG	0x0001026B
+#define AFE_PARAM_ID_SP_V2_TH_VI_FTM_CFG	0x0001029F
+#define AFE_PARAM_ID_SP_V2_TH_VI_FTM_PARAMS	0x000102A0
+
+struct afe_sp_th_vi_mode_cfg {
+	uint32_t minor_version;
+	uint32_t operation_mode;
+	/*
+	 * Operation mode of thermal VI module.
+	 *   0 -- Normal Running mode
+	 *   1 -- Calibration mode
+	 *   2 -- FTM mode
+	 */
+	uint32_t r0t0_selection_flag[SP_V2_NUM_MAX_SPKR];
+	/*
+	 * Specifies which set of R0, T0 values the algorithm will use.
+	 * This field is valid only in Normal mode (operation_mode = 0).
+	 * 0 -- Use calibrated R0, T0 value
+	 * 1 -- Use safe R0, T0 value
+	 */
+	int32_t r0_cali_q24[SP_V2_NUM_MAX_SPKR];
+	/*
+	 * Calibration point resistance per device. This field is valid
+	 * only in Normal mode (operation_mode = 0).
+	 * values 33554432 to 1073741824 Ohms (in Q24 format)
+	 */
+	int16_t t0_cali_q6[SP_V2_NUM_MAX_SPKR];
+	/*
+	 * Calibration point temperature per device. This field is valid
+	 * in both Normal mode and Calibration mode.
+	 * values -1920 to 5120 degrees C (in Q6 format)
+	 */
+	uint32_t quick_calib_flag;
+	/*
+	 * Indicates whether calibration is to be done in quick mode or not.
+	 * This field is valid only in Calibration mode (operation_mode = 1).
+	 * 0 -- Disabled
+	 * 1 -- Enabled
+	 */
+} __packed;
+
+struct afe_sp_th_vi_ftm_cfg {
+	uint32_t minor_version;
+	uint32_t wait_time_ms[SP_V2_NUM_MAX_SPKR];
+	/*
+	 * Wait time to heat up speaker before collecting statistics
+	 * for ftm mode in ms.
+	 * values 0 to 4294967295 ms
+	 */
+	uint32_t ftm_time_ms[SP_V2_NUM_MAX_SPKR];
+	/*
+	 * duration for which FTM statistics are collected in ms.
+	 * values 0 to 2000 ms
+	 */
+} __packed;
+
+struct afe_sp_th_vi_ftm_params {
+	uint32_t minor_version;
+	int32_t dc_res_q24[SP_V2_NUM_MAX_SPKR];
+	/*
+	 * DC resistance value in q24 format
+	 * values 0 to 2147483647 Ohms (in Q24 format)
+	 */
+	int32_t temp_q22[SP_V2_NUM_MAX_SPKR];
+	/*
+	 * temperature value in q22 format
+	 * values -125829120 to 2147483647 degC (in Q22 format)
+	 */
+	uint32_t status[SP_V2_NUM_MAX_SPKR];
+	/*
+	 * FTM packet status
+	 * 0 - Incorrect operation mode.This status is returned
+	 *     when GET_PARAM is called in non FTM Mode
+	 * 1 - Inactive mode -- Port is not yet started.
+	 * 2 - Wait state. wait_time_ms has not yet elapsed
+	 * 3 - In progress state. ftm_time_ms has not yet elapsed.
+	 * 4 - Success.
+	 * 5 - Failed.
+	 */
+} __packed;
+
+struct afe_sp_th_vi_get_param {
+	struct apr_hdr hdr;
+	struct afe_port_cmd_get_param_v2 get_param;
+	struct afe_port_param_data_v2 pdata;
+	struct afe_sp_th_vi_ftm_params param;
+} __packed;
+
+struct afe_sp_th_vi_get_param_resp {
+	uint32_t status;
+	struct afe_port_param_data_v2 pdata;
+	struct afe_sp_th_vi_ftm_params param;
+} __packed;
+
+
+#define AFE_MODULE_SPEAKER_PROTECTION_V2_EX_VI	0x0001026F
+#define AFE_PARAM_ID_SP_V2_EX_VI_MODE_CFG	0x000102A1
+#define AFE_PARAM_ID_SP_V2_EX_VI_FTM_CFG	0x000102A2
+#define AFE_PARAM_ID_SP_V2_EX_VI_FTM_PARAMS	0x000102A3
+
+struct afe_sp_ex_vi_mode_cfg {
+	uint32_t minor_version;
+	uint32_t operation_mode;
+	/*
+	 * Operation mode of Excursion VI module.
+	 * 0 - Normal Running mode
+	 * 2 - FTM mode
+	 */
+} __packed;
+
+struct afe_sp_ex_vi_ftm_cfg {
+	uint32_t minor_version;
+	uint32_t wait_time_ms[SP_V2_NUM_MAX_SPKR];
+	/*
+	 * Wait time to heat up speaker before collecting statistics
+	 * for ftm mode in ms.
+	 * values 0 to 4294967295 ms
+	 */
+	uint32_t ftm_time_ms[SP_V2_NUM_MAX_SPKR];
+	/*
+	 * duration for which FTM statistics are collected in ms.
+	 * values 0 to 2000 ms
+	 */
+} __packed;
+
+struct afe_sp_ex_vi_ftm_params {
+	uint32_t minor_version;
+	int32_t freq_q20[SP_V2_NUM_MAX_SPKR];
+	/*
+	 * Resonance frequency in q20 format
+	 * values 0 to 2147483647 Hz (in Q20 format)
+	 */
+	int32_t resis_q24[SP_V2_NUM_MAX_SPKR];
+	/*
+	 * Mechanical resistance in q24 format
+	 * values 0 to 2147483647 Ohms (in Q24 format)
+	 */
+	int32_t qmct_q24[SP_V2_NUM_MAX_SPKR];
+	/*
+	 * Mechanical Qfactor in q24 format
+	 * values 0 to 2147483647 (in Q24 format)
+	 */
+	uint32_t status[SP_V2_NUM_MAX_SPKR];
+	/*
+	 * FTM packet status
+	 * 0 - Incorrect operation mode.This status is returned
+	 *      when GET_PARAM is called in non FTM Mode.
+	 * 1 - Inactive mode -- Port is not yet started.
+	 * 2 - Wait state. wait_time_ms has not yet elapsed
+	 * 3 - In progress state. ftm_time_ms has not yet elapsed.
+	 * 4 - Success.
+	 * 5 - Failed.
+	 */
+} __packed;
+
+struct afe_sp_ex_vi_get_param {
+	struct apr_hdr hdr;
+	struct afe_port_cmd_get_param_v2 get_param;
+	struct afe_port_param_data_v2 pdata;
+	struct afe_sp_ex_vi_ftm_params param;
+} __packed;
+
+struct afe_sp_ex_vi_get_param_resp {
+	uint32_t status;
+	struct afe_port_param_data_v2 pdata;
+	struct afe_sp_ex_vi_ftm_params param;
+} __packed;
+
+union afe_spkr_prot_config {
+	struct asm_fbsp_mode_rx_cfg mode_rx_cfg;
+	struct asm_spkr_calib_vi_proc_cfg vi_proc_cfg;
+	struct asm_feedback_path_cfg feedback_path_cfg;
+	struct asm_mode_vi_proc_cfg mode_vi_proc_cfg;
+	struct afe_sp_th_vi_mode_cfg th_vi_mode_cfg;
+	struct afe_sp_th_vi_ftm_cfg th_vi_ftm_cfg;
+	struct afe_sp_ex_vi_mode_cfg ex_vi_mode_cfg;
+	struct afe_sp_ex_vi_ftm_cfg ex_vi_ftm_cfg;
+} __packed;
+
+struct afe_spkr_prot_config_command {
+	struct apr_hdr hdr;
+	struct afe_port_cmd_set_param_v2 param;
+	struct afe_port_param_data_v2 pdata;
+	union afe_spkr_prot_config prot_config;
+} __packed;
+
+struct afe_spkr_prot_get_vi_calib {
+	struct apr_hdr hdr;
+	struct afe_port_cmd_get_param_v2 get_param;
+	struct afe_port_param_data_v2 pdata;
+	struct asm_calib_res_cfg res_cfg;
+} __packed;
+
+struct afe_spkr_prot_calib_get_resp {
+	uint32_t status;
+	struct afe_port_param_data_v2 pdata;
+	struct asm_calib_res_cfg res_cfg;
+} __packed;
+
+
+/* SRS TRUMEDIA start */
+/* topology */
+#define SRS_TRUMEDIA_TOPOLOGY_ID			0x00010D90
+/* module */
+#define SRS_TRUMEDIA_MODULE_ID				0x10005010
+/* parameters */
+#define SRS_TRUMEDIA_PARAMS				0x10005011
+#define SRS_TRUMEDIA_PARAMS_WOWHD			0x10005012
+#define SRS_TRUMEDIA_PARAMS_CSHP			0x10005013
+#define SRS_TRUMEDIA_PARAMS_HPF				0x10005014
+#define SRS_TRUMEDIA_PARAMS_AEQ				0x10005015
+#define SRS_TRUMEDIA_PARAMS_HL				0x10005016
+#define SRS_TRUMEDIA_PARAMS_GEQ				0x10005017
+
+#define SRS_ID_GLOBAL	0x00000001
+#define SRS_ID_WOWHD	0x00000002
+#define SRS_ID_CSHP	0x00000003
+#define SRS_ID_HPF	0x00000004
+#define SRS_ID_AEQ	0x00000005
+#define SRS_ID_HL		0x00000006
+#define SRS_ID_GEQ	0x00000007
+
+#define SRS_CMD_UPLOAD		0x7FFF0000
+#define SRS_PARAM_OFFSET_MASK	0x3FFF0000
+#define SRS_PARAM_VALUE_MASK	0x0000FFFF
+
+struct srs_trumedia_params_GLOBAL {
+	uint8_t                  v1;
+	uint8_t                  v2;
+	uint8_t                  v3;
+	uint8_t                  v4;
+	uint8_t                  v5;
+	uint8_t                  v6;
+	uint8_t                  v7;
+	uint8_t                  v8;
+	uint16_t                 v9;
+} __packed;
+
+struct srs_trumedia_params_WOWHD {
+	uint32_t				v1;
+	uint16_t				v2;
+	uint16_t				v3;
+	uint16_t				v4;
+	uint16_t				v5;
+	uint16_t				v6;
+	uint16_t				v7;
+	uint16_t				v8;
+	uint16_t				v____A1;
+	uint32_t				v9;
+	uint16_t				v10;
+	uint16_t				v11;
+	uint32_t				v12[16];
+	uint32_t	v13[16];
+	uint32_t	v14[16];
+	uint32_t	v15[16];
+	uint32_t	v16;
+	uint16_t	v17;
+	uint16_t	v18;
+} __packed;
+
+struct srs_trumedia_params_CSHP {
+	uint32_t		v1;
+	uint16_t		v2;
+	uint16_t		v3;
+	uint16_t		v4;
+	uint16_t		v5;
+	uint16_t		v6;
+	uint16_t		v____A1;
+	uint32_t		v7;
+	uint16_t		v8;
+	uint16_t		v9;
+	uint32_t		v10[16];
+} __packed;
+
+struct srs_trumedia_params_HPF {
+	uint32_t		v1;
+	uint32_t		v2[26];
+} __packed;
+
+struct srs_trumedia_params_AEQ {
+	uint32_t		v1;
+	uint16_t		v2;
+	uint16_t		v3;
+	uint16_t		v4;
+	uint16_t		v____A1;
+	uint32_t	v5[74];
+	uint32_t	v6[74];
+	uint16_t	v7[2048];
+} __packed;
+
+struct srs_trumedia_params_HL {
+	uint16_t		v1;
+	uint16_t		v2;
+	uint16_t		v3;
+	uint16_t		v____A1;
+	int32_t			v4;
+	uint32_t		v5;
+	uint16_t		v6;
+	uint16_t		v____A2;
+	uint32_t		v7;
+} __packed;
+
+struct srs_trumedia_params_GEQ {
+	int16_t		v1[10];
+} __packed;
+struct srs_trumedia_params {
+	struct srs_trumedia_params_GLOBAL	global;
+	struct srs_trumedia_params_WOWHD	wowhd;
+	struct srs_trumedia_params_CSHP		cshp;
+	struct srs_trumedia_params_HPF		hpf;
+	struct srs_trumedia_params_AEQ		aeq;
+	struct srs_trumedia_params_HL		hl;
+	struct srs_trumedia_params_GEQ		geq;
+} __packed;
+/* SRS TruMedia end */
+
+#define ASM_STREAM_POSTPROC_TOPO_ID_SA_PLUS 0x1000FFFF
+/* DTS Eagle */
+#define AUDPROC_MODULE_ID_DTS_HPX_PREMIX 0x0001077C
+#define AUDPROC_MODULE_ID_DTS_HPX_POSTMIX 0x0001077B
+#define ASM_STREAM_POSTPROC_TOPO_ID_DTS_HPX 0x00010DED
+#define ASM_STREAM_POSTPROC_TOPO_ID_HPX_PLUS  0x10015000
+#define ASM_STREAM_POSTPROC_TOPO_ID_HPX_MASTER  0x10015001
+struct asm_dts_eagle_param {
+	struct apr_hdr	hdr;
+	struct asm_stream_cmd_set_pp_params_v2 param;
+	struct asm_stream_param_data_v2 data;
+} __packed;
+
+struct asm_dts_eagle_param_get {
+	struct apr_hdr	hdr;
+	struct asm_stream_cmd_get_pp_params_v2 param;
+} __packed;
+
+/* Opcode to set BT address and license for aptx decoder */
+#define APTX_DECODER_BT_ADDRESS 0x00013201
+#define APTX_CLASSIC_DEC_LICENSE_ID 0x00013202
+
+struct aptx_dec_bt_addr_cfg {
+	uint32_t lap;
+	uint32_t uap;
+	uint32_t nap;
+} __packed;
+
+struct aptx_dec_bt_dev_addr {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param encdec;
+	struct aptx_dec_bt_addr_cfg bt_addr_cfg;
+} __packed;
+
+struct asm_aptx_dec_fmt_blk_v2 {
+	struct apr_hdr hdr;
+	struct asm_data_cmd_media_fmt_update_v2 fmtblk;
+	u32     sample_rate;
+/* Number of samples per second.
+ * Supported values: 44100 and 48000 Hz
+ */
+} __packed;
+
+/* LSM Specific */
+#define VW_FEAT_DIM					(39)
+
+#define APRV2_IDS_SERVICE_ID_ADSP_LSM_V			(0xD)
+#define APRV2_IDS_DOMAIN_ID_ADSP_V			(0x4)
+#define APRV2_IDS_DOMAIN_ID_APPS_V			(0x5)
+
+#define LSM_SESSION_CMD_SHARED_MEM_MAP_REGIONS		(0x00012A7F)
+#define LSM_SESSION_CMDRSP_SHARED_MEM_MAP_REGIONS	(0x00012A80)
+#define LSM_SESSION_CMD_SHARED_MEM_UNMAP_REGIONS	(0x00012A81)
+#define LSM_SESSION_CMD_OPEN_TX				(0x00012A82)
+#define LSM_SESSION_CMD_CLOSE_TX			(0x00012A88)
+#define LSM_SESSION_CMD_SET_PARAMS			(0x00012A83)
+#define LSM_SESSION_CMD_SET_PARAMS_V2			(0x00012A8F)
+#define LSM_SESSION_CMD_REGISTER_SOUND_MODEL		(0x00012A84)
+#define LSM_SESSION_CMD_DEREGISTER_SOUND_MODEL		(0x00012A85)
+#define LSM_SESSION_CMD_START				(0x00012A86)
+#define LSM_SESSION_CMD_STOP				(0x00012A87)
+#define LSM_SESSION_CMD_EOB				(0x00012A89)
+#define LSM_SESSION_CMD_READ				(0x00012A8A)
+#define LSM_SESSION_CMD_OPEN_TX_V2			(0x00012A8B)
+#define LSM_CMD_ADD_TOPOLOGIES				(0x00012A8C)
+
+#define LSM_SESSION_EVENT_DETECTION_STATUS		(0x00012B00)
+#define LSM_SESSION_EVENT_DETECTION_STATUS_V2		(0x00012B01)
+#define LSM_DATA_EVENT_READ_DONE			(0x00012B02)
+#define LSM_DATA_EVENT_STATUS				(0x00012B03)
+#define LSM_SESSION_EVENT_DETECTION_STATUS_V3		(0x00012B04)
+
+#define LSM_MODULE_ID_VOICE_WAKEUP			(0x00012C00)
+#define LSM_PARAM_ID_ENDPOINT_DETECT_THRESHOLD		(0x00012C01)
+#define LSM_PARAM_ID_OPERATION_MODE			(0x00012C02)
+#define LSM_PARAM_ID_GAIN				(0x00012C03)
+#define LSM_PARAM_ID_CONNECT_TO_PORT			(0x00012C04)
+#define LSM_PARAM_ID_FEATURE_COMPENSATION_DATA		(0x00012C07)
+#define LSM_PARAM_ID_MIN_CONFIDENCE_LEVELS		(0x00012C07)
+#define LSM_MODULE_ID_LAB				(0x00012C08)
+#define LSM_PARAM_ID_LAB_ENABLE				(0x00012C09)
+#define LSM_PARAM_ID_LAB_CONFIG				(0x00012C0A)
+#define LSM_MODULE_ID_FRAMEWORK				(0x00012C0E)
+#define LSM_PARAM_ID_SWMAD_CFG				(0x00012C18)
+#define LSM_PARAM_ID_SWMAD_MODEL			(0x00012C19)
+#define LSM_PARAM_ID_SWMAD_ENABLE			(0x00012C1A)
+#define LSM_PARAM_ID_POLLING_ENABLE			(0x00012C1B)
+#define LSM_PARAM_ID_MEDIA_FMT				(0x00012C1E)
+#define LSM_PARAM_ID_FWK_MODE_CONFIG			(0x00012C27)
+
+/* HW MAD specific */
+#define AFE_MODULE_HW_MAD				(0x00010230)
+#define AFE_PARAM_ID_HW_MAD_CFG				(0x00010231)
+#define AFE_PARAM_ID_HW_MAD_CTRL			(0x00010232)
+#define AFE_PARAM_ID_SLIMBUS_SLAVE_PORT_CFG		(0x00010233)
+
+/* SW MAD specific */
+#define AFE_MODULE_SW_MAD				(0x0001022D)
+#define AFE_PARAM_ID_SW_MAD_CFG				(0x0001022E)
+#define AFE_PARAM_ID_SVM_MODEL				(0x0001022F)
+
+/* Commands/Params to pass the codec/slimbus data to DSP */
+#define AFE_SVC_CMD_SET_PARAM				(0x000100f3)
+#define AFE_MODULE_CDC_DEV_CFG				(0x00010234)
+#define AFE_PARAM_ID_CDC_SLIMBUS_SLAVE_CFG		(0x00010235)
+#define AFE_PARAM_ID_CDC_REG_CFG			(0x00010236)
+#define AFE_PARAM_ID_CDC_REG_CFG_INIT			(0x00010237)
+#define AFE_PARAM_ID_CDC_REG_PAGE_CFG                   (0x00010296)
+
+#define AFE_MAX_CDC_REGISTERS_TO_CONFIG			(20)
+
+/* AANC Port Config Specific */
+#define AFE_PARAM_ID_AANC_PORT_CONFIG			(0x00010215)
+#define AFE_API_VERSION_AANC_PORT_CONFIG		(0x1)
+#define AANC_TX_MIC_UNUSED				(0)
+#define AANC_TX_VOICE_MIC				(1)
+#define AANC_TX_ERROR_MIC				(2)
+#define AANC_TX_NOISE_MIC				(3)
+#define AFE_PORT_MAX_CHANNEL_CNT			(8)
+#define AFE_MODULE_AANC					(0x00010214)
+#define AFE_PARAM_ID_CDC_AANC_VERSION			(0x0001023A)
+#define AFE_API_VERSION_CDC_AANC_VERSION		(0x1)
+#define AANC_HW_BLOCK_VERSION_1				(1)
+#define AANC_HW_BLOCK_VERSION_2				(2)
+
+/*Clip bank selection*/
+#define AFE_API_VERSION_CLIP_BANK_SEL_CFG 0x1
+#define AFE_CLIP_MAX_BANKS		4
+#define AFE_PARAM_ID_CLIP_BANK_SEL_CFG 0x00010242
+
+struct afe_param_aanc_port_cfg {
+	/* Minor version used for tracking the version of the module's
+	* source port configuration.
+	*/
+	uint32_t aanc_port_cfg_minor_version;
+
+	/* Sampling rate of the source Tx port. 8k - 192k*/
+	uint32_t tx_port_sample_rate;
+
+	/* Channel mapping for the Tx port signal carrying Noise (X),
+	* Error (E), and Voice (V) signals.
+	*/
+	uint8_t tx_port_channel_map[AFE_PORT_MAX_CHANNEL_CNT];
+
+	/* Number of channels on the source Tx port. */
+	uint16_t tx_port_num_channels;
+
+	/* Port ID of the Rx path reference signal. */
+	uint16_t rx_path_ref_port_id;
+
+	/* Sampling rate of the reference port. 8k - 192k*/
+	uint32_t ref_port_sample_rate;
+} __packed;
+
+struct afe_param_id_cdc_aanc_version {
+	/* Minor version used for tracking the version of the module's
+	* hw version
+	*/
+	uint32_t cdc_aanc_minor_version;
+
+	/* HW version. */
+	uint32_t aanc_hw_version;
+} __packed;
+
+struct afe_param_id_clip_bank_sel {
+	/* Minor version used for tracking the version of the module's
+	* hw version
+	*/
+	uint32_t minor_version;
+
+	/* Number of banks to be read */
+	uint32_t num_banks;
+
+	uint32_t bank_map[AFE_CLIP_MAX_BANKS];
+} __packed;
+
+/* ERROR CODES */
+/* Success. The operation completed with no errors. */
+#define ADSP_EOK          0x00000000
+/* General failure. */
+#define ADSP_EFAILED      0x00000001
+/* Bad operation parameter. */
+#define ADSP_EBADPARAM    0x00000002
+/* Unsupported routine or operation. */
+#define ADSP_EUNSUPPORTED 0x00000003
+/* Unsupported version. */
+#define ADSP_EVERSION     0x00000004
+/* Unexpected problem encountered. */
+#define ADSP_EUNEXPECTED  0x00000005
+/* Unhandled problem occurred. */
+#define ADSP_EPANIC       0x00000006
+/* Unable to allocate resource. */
+#define ADSP_ENORESOURCE  0x00000007
+/* Invalid handle. */
+#define ADSP_EHANDLE      0x00000008
+/* Operation is already processed. */
+#define ADSP_EALREADY     0x00000009
+/* Operation is not ready to be processed. */
+#define ADSP_ENOTREADY    0x0000000A
+/* Operation is pending completion. */
+#define ADSP_EPENDING     0x0000000B
+/* Operation could not be accepted or processed. */
+#define ADSP_EBUSY        0x0000000C
+/* Operation aborted due to an error. */
+#define ADSP_EABORTED     0x0000000D
+/* Operation preempted by a higher priority. */
+#define ADSP_EPREEMPTED   0x0000000E
+/* Operation requests intervention to complete. */
+#define ADSP_ECONTINUE    0x0000000F
+/* Operation requests immediate intervention to complete. */
+#define ADSP_EIMMEDIATE   0x00000010
+/* Operation is not implemented. */
+#define ADSP_ENOTIMPL     0x00000011
+/* Operation needs more data or resources. */
+#define ADSP_ENEEDMORE    0x00000012
+/* Operation does not have memory. */
+#define ADSP_ENOMEMORY    0x00000014
+/* Item does not exist. */
+#define ADSP_ENOTEXIST    0x00000015
+/* Max count for adsp error code sent to HLOS*/
+#define ADSP_ERR_MAX      (ADSP_ENOTEXIST + 1)
+/* Operation is finished. */
+#define ADSP_ETERMINATED    0x00011174
+
+/*bharath, adsp_error_codes.h */
+
+/* LPASS clock for I2S Interface */
+
+/* Supported OSR clock values */
+#define Q6AFE_LPASS_OSR_CLK_12_P288_MHZ		0xBB8000
+#define Q6AFE_LPASS_OSR_CLK_11_P2896_MHZ		0xAC4400
+#define Q6AFE_LPASS_OSR_CLK_9_P600_MHZ		0x927C00
+#define Q6AFE_LPASS_OSR_CLK_8_P192_MHZ		0x7D0000
+#define Q6AFE_LPASS_OSR_CLK_6_P144_MHZ		0x5DC000
+#define Q6AFE_LPASS_OSR_CLK_4_P096_MHZ		0x3E8000
+#define Q6AFE_LPASS_OSR_CLK_3_P072_MHZ		0x2EE000
+#define Q6AFE_LPASS_OSR_CLK_2_P048_MHZ		0x1F4000
+#define Q6AFE_LPASS_OSR_CLK_1_P536_MHZ		0x177000
+#define Q6AFE_LPASS_OSR_CLK_1_P024_MHZ		 0xFA000
+#define Q6AFE_LPASS_OSR_CLK_768_kHZ		 0xBB800
+#define Q6AFE_LPASS_OSR_CLK_512_kHZ		 0x7D000
+#define Q6AFE_LPASS_OSR_CLK_DISABLE		     0x0
+
+/* Supported Bit clock values */
+#define Q6AFE_LPASS_IBIT_CLK_12_P288_MHZ	0xBB8000
+#define Q6AFE_LPASS_IBIT_CLK_11_P2896_MHZ	0xAC4400
+#define Q6AFE_LPASS_IBIT_CLK_8_P192_MHZ		0x7D0000
+#define Q6AFE_LPASS_IBIT_CLK_6_P144_MHZ		0x5DC000
+#define Q6AFE_LPASS_IBIT_CLK_4_P096_MHZ		0x3E8000
+#define Q6AFE_LPASS_IBIT_CLK_3_P072_MHZ		0x2EE000
+#define Q6AFE_LPASS_IBIT_CLK_2_P8224_MHZ		0x2b1100
+#define Q6AFE_LPASS_IBIT_CLK_2_P048_MHZ		0x1F4000
+#define Q6AFE_LPASS_IBIT_CLK_1_P536_MHZ		0x177000
+#define Q6AFE_LPASS_IBIT_CLK_1_P4112_MHZ		0x158880
+#define Q6AFE_LPASS_IBIT_CLK_1_P024_MHZ		 0xFA000
+#define Q6AFE_LPASS_IBIT_CLK_768_KHZ		 0xBB800
+#define Q6AFE_LPASS_IBIT_CLK_512_KHZ		 0x7D000
+#define Q6AFE_LPASS_IBIT_CLK_256_KHZ		 0x3E800
+#define Q6AFE_LPASS_IBIT_CLK_DISABLE		     0x0
+
+/* Supported LPASS CLK sources */
+#define Q6AFE_LPASS_CLK_SRC_EXTERNAL 0
+#define Q6AFE_LPASS_CLK_SRC_INTERNAL 1
+
+/* Supported LPASS CLK root*/
+#define Q6AFE_LPASS_CLK_ROOT_DEFAULT 0
+
+enum afe_lpass_clk_mode {
+	Q6AFE_LPASS_MODE_BOTH_INVALID,
+	Q6AFE_LPASS_MODE_CLK1_VALID,
+	Q6AFE_LPASS_MODE_CLK2_VALID,
+	Q6AFE_LPASS_MODE_BOTH_VALID,
+} __packed;
+
+/* Clock ID Enumeration Define. */
+/* Clock ID for Primary I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_PRI_MI2S_IBIT                          0x100
+/* Clock ID for Primary I2S EBIT */
+#define Q6AFE_LPASS_CLK_ID_PRI_MI2S_EBIT                          0x101
+/* Clock ID for Secondary I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_SEC_MI2S_IBIT                          0x102
+/* Clock ID for Secondary I2S EBIT */
+#define Q6AFE_LPASS_CLK_ID_SEC_MI2S_EBIT                          0x103
+/* Clock ID for Tertiary I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_TER_MI2S_IBIT                          0x104
+/* Clock ID for Tertiary I2S EBIT */
+#define Q6AFE_LPASS_CLK_ID_TER_MI2S_EBIT                          0x105
+/* Clock ID for Quartnery I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_QUAD_MI2S_IBIT                         0x106
+/* Clock ID for Quartnery I2S EBIT */
+#define Q6AFE_LPASS_CLK_ID_QUAD_MI2S_EBIT                         0x107
+/* Clock ID for Speaker I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_SPEAKER_I2S_IBIT                       0x108
+/* Clock ID for Speaker I2S EBIT */
+#define Q6AFE_LPASS_CLK_ID_SPEAKER_I2S_EBIT                       0x109
+/* Clock ID for Speaker I2S OSR */
+#define Q6AFE_LPASS_CLK_ID_SPEAKER_I2S_OSR                        0x10A
+
+/* Clock ID for QUINARY  I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_QUI_MI2S_IBIT			0x10B
+/* Clock ID for QUINARY  I2S EBIT */
+#define Q6AFE_LPASS_CLK_ID_QUI_MI2S_EBIT			0x10C
+/* Clock ID for SENARY  I2S IBIT */
+#define Q6AFE_LPASS_CLK_ID_SEN_MI2S_IBIT			0x10D
+/* Clock ID for SENARY  I2S EBIT */
+#define Q6AFE_LPASS_CLK_ID_SEN_MI2S_EBIT			0x10E
+/* Clock ID for INT0 I2S IBIT  */
+#define Q6AFE_LPASS_CLK_ID_INT0_MI2S_IBIT                       0x10F
+/* Clock ID for INT1 I2S IBIT  */
+#define Q6AFE_LPASS_CLK_ID_INT1_MI2S_IBIT                       0x110
+/* Clock ID for INT2 I2S IBIT  */
+#define Q6AFE_LPASS_CLK_ID_INT2_MI2S_IBIT                       0x111
+/* Clock ID for INT3 I2S IBIT  */
+#define Q6AFE_LPASS_CLK_ID_INT3_MI2S_IBIT                       0x112
+/* Clock ID for INT4 I2S IBIT  */
+#define Q6AFE_LPASS_CLK_ID_INT4_MI2S_IBIT                       0x113
+/* Clock ID for INT5 I2S IBIT  */
+#define Q6AFE_LPASS_CLK_ID_INT5_MI2S_IBIT                       0x114
+/* Clock ID for INT6 I2S IBIT  */
+#define Q6AFE_LPASS_CLK_ID_INT6_MI2S_IBIT                       0x115
+
+/* Clock ID for Primary PCM IBIT */
+#define Q6AFE_LPASS_CLK_ID_PRI_PCM_IBIT                           0x200
+/* Clock ID for Primary PCM EBIT */
+#define Q6AFE_LPASS_CLK_ID_PRI_PCM_EBIT                           0x201
+/* Clock ID for Secondary PCM IBIT */
+#define Q6AFE_LPASS_CLK_ID_SEC_PCM_IBIT                           0x202
+/* Clock ID for Secondary PCM EBIT */
+#define Q6AFE_LPASS_CLK_ID_SEC_PCM_EBIT                           0x203
+/* Clock ID for Tertiary PCM IBIT */
+#define Q6AFE_LPASS_CLK_ID_TER_PCM_IBIT                           0x204
+/* Clock ID for Tertiary PCM EBIT */
+#define Q6AFE_LPASS_CLK_ID_TER_PCM_EBIT                           0x205
+/* Clock ID for Quartery PCM IBIT */
+#define Q6AFE_LPASS_CLK_ID_QUAD_PCM_IBIT                          0x206
+/* Clock ID for Quartery PCM EBIT */
+#define Q6AFE_LPASS_CLK_ID_QUAD_PCM_EBIT                          0x207
+
+/** Clock ID for Primary TDM IBIT */
+#define Q6AFE_LPASS_CLK_ID_PRI_TDM_IBIT                           0x200
+/** Clock ID for Primary TDM EBIT */
+#define Q6AFE_LPASS_CLK_ID_PRI_TDM_EBIT                           0x201
+/** Clock ID for Secondary TDM IBIT */
+#define Q6AFE_LPASS_CLK_ID_SEC_TDM_IBIT                           0x202
+/** Clock ID for Secondary TDM EBIT */
+#define Q6AFE_LPASS_CLK_ID_SEC_TDM_EBIT                           0x203
+/** Clock ID for Tertiary TDM IBIT */
+#define Q6AFE_LPASS_CLK_ID_TER_TDM_IBIT                           0x204
+/** Clock ID for Tertiary TDM EBIT */
+#define Q6AFE_LPASS_CLK_ID_TER_TDM_EBIT                           0x205
+/** Clock ID for Quartery TDM IBIT */
+#define Q6AFE_LPASS_CLK_ID_QUAD_TDM_IBIT                          0x206
+/** Clock ID for Quartery TDM EBIT */
+#define Q6AFE_LPASS_CLK_ID_QUAD_TDM_EBIT                          0x207
+
+/* Clock ID for MCLK1 */
+#define Q6AFE_LPASS_CLK_ID_MCLK_1                                 0x300
+/* Clock ID for MCLK2 */
+#define Q6AFE_LPASS_CLK_ID_MCLK_2                                 0x301
+/* Clock ID for MCLK3 */
+#define Q6AFE_LPASS_CLK_ID_MCLK_3                                 0x302
+/* Clock ID for MCLK4 */
+#define Q6AFE_LPASS_CLK_ID_MCLK_4                                 0x304
+/* Clock ID for Internal Digital Codec Core */
+#define Q6AFE_LPASS_CLK_ID_INTERNAL_DIGITAL_CODEC_CORE            0x303
+/* Clock ID for INT MCLK0 */
+#define Q6AFE_LPASS_CLK_ID_INT_MCLK_0                             0x305
+/* Clock ID for INT MCLK1 */
+#define Q6AFE_LPASS_CLK_ID_INT_MCLK_1                             0x306
+/*
+ * Clock ID for soundwire NPL.
+ * This is the clock to be used to enable NPL clock for  internal Soundwire.
+ */
+#define AFE_CLOCK_SET_CLOCK_ID_SWR_NPL_CLK                         0x307
+
+/* Clock ID for AHB HDMI input */
+#define Q6AFE_LPASS_CLK_ID_AHB_HDMI_INPUT                         0x400
+
+/* Clock ID for SPDIF core */
+#define Q6AFE_LPASS_CLK_ID_SPDIF_CORE                             0x500
+
+
+/* Clock attribute for invalid use (reserved for internal usage) */
+#define Q6AFE_LPASS_CLK_ATTRIBUTE_INVALID		0x0
+/* Clock attribute for no couple case */
+#define Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO		0x1
+/* Clock attribute for dividend couple case */
+#define Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_DIVIDEND	0x2
+/* Clock attribute for divisor couple case */
+#define Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_DIVISOR	0x3
+/* Clock attribute for invert and no couple case */
+#define Q6AFE_LPASS_CLK_ATTRIBUTE_INVERT_COUPLE_NO	0x4
+/* Clock set API version */
+#define Q6AFE_LPASS_CLK_CONFIG_API_VERSION		0x1
+
+struct afe_clk_set {
+	/*
+	 * Minor version used for tracking clock set.
+	 *	@values #AFE_API_VERSION_CLOCK_SET
+	 */
+	uint32_t clk_set_minor_version;
+
+	/*
+	 * Clock ID
+	 *	@values
+	 *	- 0x100 to 0x10A - MSM8996
+	 *	- 0x200 to 0x207 - MSM8996
+	 *	- 0x300 to 0x302 - MSM8996 @tablebulletend
+	 */
+	uint32_t clk_id;
+
+	/*
+	 * Clock frequency  (in Hertz) to be set.
+	 *	@values
+	 *	- >= 0 for clock frequency to set @tablebulletend
+	 */
+	uint32_t clk_freq_in_hz;
+
+	/* Use to specific divider for two clocks if needed.
+	 *	Set to Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO for no divider
+	 *	relation clocks
+	 *	@values
+	 *	- #Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO
+	 *	- #Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_DIVIDEND
+	 *	- #Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_DIVISOR @tablebulletend
+	 */
+	uint16_t clk_attri;
+
+	/*
+	 * Specifies the root clock source.
+	 *	Currently, only Q6AFE_LPASS_CLK_ROOT_DEFAULT is valid
+	 *	@values
+	 *	- 0 @tablebulletend
+	 */
+	uint16_t clk_root;
+
+	/*
+	 * for enable and disable clock.
+	 *	"clk_freq_in_hz", "clk_attri", and "clk_root"
+	 *	are ignored in disable clock case.
+	 *	@values 
+	 *	- 0 -- Disabled
+	 *	- 1 -- Enabled  @tablebulletend
+	 */
+	uint32_t enable;
+};
+
+struct afe_clk_cfg {
+/* Minor version used for tracking the version of the I2S
+ * configuration interface.
+ * Supported values: #AFE_API_VERSION_I2S_CONFIG
+ */
+	u32                  i2s_cfg_minor_version;
+
+/* clk value 1 in MHz. */
+	u32                  clk_val1;
+
+/* clk value 2 in MHz. */
+	u32                  clk_val2;
+
+/* clk_src
+ * #Q6AFE_LPASS_CLK_SRC_EXTERNAL
+ * #Q6AFE_LPASS_CLK_SRC_INTERNAL
+ */
+
+	u16                  clk_src;
+
+/* clk_root -0 for default */
+	u16                  clk_root;
+
+/* clk_set_mode
+ * #Q6AFE_LPASS_MODE_BOTH_INVALID
+ * #Q6AFE_LPASS_MODE_CLK1_VALID
+ * #Q6AFE_LPASS_MODE_CLK2_VALID
+ * #Q6AFE_LPASS_MODE_BOTH_VALID
+ */
+	u16                  clk_set_mode;
+
+/* This param id is used to configure I2S clk */
+	u16                  reserved;
+} __packed;
+
+/* This param id is used to configure I2S clk */
+#define AFE_PARAM_ID_LPAIF_CLK_CONFIG	0x00010238
+#define AFE_MODULE_CLOCK_SET		0x0001028F
+#define AFE_PARAM_ID_CLOCK_SET		0x00010290
+
+struct afe_lpass_clk_config_command {
+	struct apr_hdr			 hdr;
+	struct afe_port_cmd_set_param_v2 param;
+	struct afe_port_param_data_v2    pdata;
+	struct afe_clk_cfg clk_cfg;
+} __packed;
+
+enum afe_lpass_digital_clk_src {
+	Q6AFE_LPASS_DIGITAL_ROOT_INVALID,
+	Q6AFE_LPASS_DIGITAL_ROOT_PRI_MI2S_OSR,
+	Q6AFE_LPASS_DIGITAL_ROOT_SEC_MI2S_OSR,
+	Q6AFE_LPASS_DIGITAL_ROOT_TER_MI2S_OSR,
+	Q6AFE_LPASS_DIGITAL_ROOT_QUAD_MI2S_OSR,
+	Q6AFE_LPASS_DIGITAL_ROOT_CDC_ROOT_CLK,
+} __packed;
+
+/* This param id is used to configure internal clk */
+#define AFE_PARAM_ID_INTERNAL_DIGIATL_CDC_CLK_CONFIG	0x00010239
+
+struct afe_digital_clk_cfg {
+/* Minor version used for tracking the version of the I2S
+ * configuration interface.
+ * Supported values: #AFE_API_VERSION_I2S_CONFIG
+ */
+	u32                  i2s_cfg_minor_version;
+
+/* clk value in MHz. */
+	u32                  clk_val;
+
+/*	INVALID
+ *	PRI_MI2S_OSR
+ *	SEC_MI2S_OSR
+ *	TER_MI2S_OSR
+ *	QUAD_MI2S_OSR
+ *	DIGT_CDC_ROOT
+ */
+	u16                  clk_root;
+
+/* This field must be set to zero. */
+	u16                  reserved;
+} __packed;
+
+
+struct afe_lpass_digital_clk_config_command {
+	struct apr_hdr			 hdr;
+	struct afe_port_cmd_set_param_v2 param;
+	struct afe_port_param_data_v2    pdata;
+	struct afe_digital_clk_cfg clk_cfg;
+} __packed;
+
+/*
+ * Opcode for AFE to start DTMF.
+ */
+#define AFE_PORTS_CMD_DTMF_CTL	0x00010102
+
+/** DTMF payload.*/
+struct afe_dtmf_generation_command {
+	struct apr_hdr hdr;
+
+	/*
+	 * Duration of the DTMF tone in ms.
+	 * -1      -> continuous,
+	 *  0      -> disable
+	 */
+	int64_t                   duration_in_ms;
+
+	/*
+	 * The DTMF high tone frequency.
+	 */
+	uint16_t                  high_freq;
+
+	/*
+	 * The DTMF low tone frequency.
+	 */
+	uint16_t                  low_freq;
+
+	/*
+	 * The DTMF volume setting
+	 */
+	uint16_t                  gain;
+
+	/*
+	 * The number of ports to enable/disable on.
+	 */
+	uint16_t                  num_ports;
+
+	/*
+	 * The Destination ports - array  .
+	 * For DTMF on multiple ports, portIds needs to
+	 * be populated numPorts times.
+	 */
+	uint16_t                  port_ids;
+
+	/*
+	 * variable for 32 bit alignment of APR packet.
+	 */
+	uint16_t                  reserved;
+} __packed;
+
+enum afe_config_type {
+	AFE_SLIMBUS_SLAVE_PORT_CONFIG,
+	AFE_SLIMBUS_SLAVE_CONFIG,
+	AFE_CDC_REGISTERS_CONFIG,
+	AFE_AANC_VERSION,
+	AFE_CDC_CLIP_REGISTERS_CONFIG,
+	AFE_CLIP_BANK_SEL,
+	AFE_CDC_REGISTER_PAGE_CONFIG,
+	AFE_MAX_CONFIG_TYPES,
+};
+
+struct afe_param_slimbus_slave_port_cfg {
+	uint32_t minor_version;
+	uint16_t slimbus_dev_id;
+	uint16_t slave_dev_pgd_la;
+	uint16_t slave_dev_intfdev_la;
+	uint16_t bit_width;
+	uint16_t data_format;
+	uint16_t num_channels;
+	uint16_t slave_port_mapping[AFE_PORT_MAX_AUDIO_CHAN_CNT];
+} __packed;
+
+struct afe_param_cdc_slimbus_slave_cfg {
+	uint32_t minor_version;
+	uint32_t device_enum_addr_lsw;
+	uint32_t device_enum_addr_msw;
+	uint16_t tx_slave_port_offset;
+	uint16_t rx_slave_port_offset;
+} __packed;
+
+struct afe_param_cdc_reg_cfg {
+	uint32_t minor_version;
+	uint32_t reg_logical_addr;
+	uint32_t reg_field_type;
+	uint32_t reg_field_bit_mask;
+	uint16_t reg_bit_width;
+	uint16_t reg_offset_scale;
+} __packed;
+
+#define AFE_API_VERSION_CDC_REG_PAGE_CFG   1
+
+enum {
+	AFE_CDC_REG_PAGE_ASSIGN_PROC_ID_0 = 0,
+	AFE_CDC_REG_PAGE_ASSIGN_PROC_ID_1,
+	AFE_CDC_REG_PAGE_ASSIGN_PROC_ID_2,
+	AFE_CDC_REG_PAGE_ASSIGN_PROC_ID_3,
+};
+
+struct afe_param_cdc_reg_page_cfg {
+	uint32_t minor_version;
+	uint32_t enable;
+	uint32_t proc_id;
+} __packed;
+
+struct afe_param_cdc_reg_cfg_data {
+	uint32_t num_registers;
+	struct afe_param_cdc_reg_cfg *reg_data;
+} __packed;
+
+struct afe_svc_cmd_set_param {
+	uint32_t payload_size;
+	uint32_t payload_address_lsw;
+	uint32_t payload_address_msw;
+	uint32_t mem_map_handle;
+} __packed;
+
+struct afe_svc_param_data {
+	uint32_t module_id;
+	uint32_t param_id;
+	uint16_t param_size;
+	uint16_t reserved;
+} __packed;
+
+struct afe_param_hw_mad_ctrl {
+	uint32_t minor_version;
+	uint16_t mad_type;
+	uint16_t mad_enable;
+} __packed;
+
+struct afe_cmd_hw_mad_ctrl {
+	struct apr_hdr hdr;
+	struct afe_port_cmd_set_param_v2 param;
+	struct afe_port_param_data_v2 pdata;
+	struct afe_param_hw_mad_ctrl payload;
+} __packed;
+
+struct afe_cmd_hw_mad_slimbus_slave_port_cfg {
+	struct apr_hdr hdr;
+	struct afe_port_cmd_set_param_v2 param;
+	struct afe_port_param_data_v2 pdata;
+	struct afe_param_slimbus_slave_port_cfg sb_port_cfg;
+} __packed;
+
+struct afe_cmd_sw_mad_enable {
+	struct apr_hdr hdr;
+	struct afe_port_cmd_set_param_v2 param;
+	struct afe_port_param_data_v2 pdata;
+} __packed;
+
+struct afe_param_cdc_reg_cfg_payload {
+	struct afe_svc_param_data     common;
+	struct afe_param_cdc_reg_cfg  reg_cfg;
+} __packed;
+
+struct afe_lpass_clk_config_command_v2 {
+	struct apr_hdr			hdr;
+	struct afe_svc_cmd_set_param	param;
+	struct afe_svc_param_data	pdata;
+	struct afe_clk_set		clk_cfg;
+} __packed;
+
+/*
+ * reg_data's size can be up to AFE_MAX_CDC_REGISTERS_TO_CONFIG
+ */
+struct afe_svc_cmd_cdc_reg_cfg {
+	struct apr_hdr hdr;
+	struct afe_svc_cmd_set_param param;
+	struct afe_param_cdc_reg_cfg_payload reg_data[0];
+} __packed;
+
+struct afe_svc_cmd_init_cdc_reg_cfg {
+	struct apr_hdr hdr;
+	struct afe_svc_cmd_set_param param;
+	struct afe_port_param_data_v2 init;
+} __packed;
+
+struct afe_svc_cmd_sb_slave_cfg {
+	struct apr_hdr hdr;
+	struct afe_svc_cmd_set_param param;
+	struct afe_port_param_data_v2 pdata;
+	struct afe_param_cdc_slimbus_slave_cfg sb_slave_cfg;
+} __packed;
+
+struct afe_svc_cmd_cdc_reg_page_cfg {
+	struct apr_hdr hdr;
+	struct afe_svc_cmd_set_param param;
+	struct afe_port_param_data_v2 pdata;
+	struct afe_param_cdc_reg_page_cfg cdc_reg_page_cfg;
+} __packed;
+
+struct afe_svc_cmd_cdc_aanc_version {
+	struct apr_hdr hdr;
+	struct afe_svc_cmd_set_param param;
+	struct afe_port_param_data_v2 pdata;
+	struct afe_param_id_cdc_aanc_version version;
+} __packed;
+
+struct afe_port_cmd_set_aanc_param {
+	struct apr_hdr hdr;
+	struct afe_port_cmd_set_param_v2 param;
+	struct afe_port_param_data_v2 pdata;
+	union {
+		struct afe_param_aanc_port_cfg aanc_port_cfg;
+		struct afe_mod_enable_param    mod_enable;
+	} __packed data;
+} __packed;
+
+struct afe_port_cmd_set_aanc_acdb_table {
+	struct apr_hdr hdr;
+	struct afe_port_cmd_set_param_v2 param;
+} __packed;
+
+/* Dolby DAP topology */
+#define DOLBY_ADM_COPP_TOPOLOGY_ID	0x0001033B
+#define DS2_ADM_COPP_TOPOLOGY_ID	0x1301033B
+
+/* RMS value from DSP */
+#define RMS_MODULEID_APPI_PASSTHRU  0x10009011
+#define RMS_PARAM_FIRST_SAMPLE 0x10009012
+#define RMS_PAYLOAD_LEN 4
+
+/* Customized mixing in matix mixer */
+#define MTMX_MODULE_ID_DEFAULT_CHMIXER  0x00010341
+#define DEFAULT_CHMIXER_PARAM_ID_COEFF  0x00010342
+#define CUSTOM_STEREO_PAYLOAD_SIZE	9
+#define CUSTOM_STEREO_CMD_PARAM_SIZE	24
+#define CUSTOM_STEREO_NUM_OUT_CH	0x0002
+#define CUSTOM_STEREO_NUM_IN_CH		0x0002
+#define CUSTOM_STEREO_INDEX_PARAM	0x0002
+#define Q14_GAIN_ZERO_POINT_FIVE	0x2000
+#define Q14_GAIN_UNITY			0x4000
+
+struct afe_svc_cmd_set_clip_bank_selection {
+	struct apr_hdr hdr;
+	struct afe_svc_cmd_set_param param;
+	struct afe_port_param_data_v2 pdata;
+	struct afe_param_id_clip_bank_sel bank_sel;
+} __packed;
+
+/* Ultrasound supported formats */
+#define US_POINT_EPOS_FORMAT_V2 0x0001272D
+#define US_RAW_FORMAT_V2        0x0001272C
+#define US_PROX_FORMAT_V4       0x0001273B
+#define US_RAW_SYNC_FORMAT      0x0001272F
+#define US_GES_SYNC_FORMAT      0x00012730
+
+#define AFE_MODULE_GROUP_DEVICE	0x00010254
+#define AFE_PARAM_ID_GROUP_DEVICE_CFG	0x00010255
+#define AFE_PARAM_ID_GROUP_DEVICE_ENABLE 0x00010256
+#define AFE_GROUP_DEVICE_ID_SECONDARY_MI2S_RX	0x1102
+
+/*  Payload of the #AFE_PARAM_ID_GROUP_DEVICE_CFG
+ * parameter, which configures max of 8 AFE ports
+ * into a group.
+ * The fixed size of this structure is sixteen bytes.
+ */
+struct afe_group_device_group_cfg {
+	u32 minor_version;
+	u16 group_id;
+	u16 num_channels;
+	u16 port_id[8];
+} __packed;
+
+#define AFE_GROUP_DEVICE_ID_PRIMARY_TDM_RX \
+	(AFE_PORT_ID_PRIMARY_TDM_RX + 0x100)
+#define AFE_GROUP_DEVICE_ID_PRIMARY_TDM_TX \
+	(AFE_PORT_ID_PRIMARY_TDM_TX + 0x100)
+#define AFE_GROUP_DEVICE_ID_SECONDARY_TDM_RX \
+	(AFE_PORT_ID_SECONDARY_TDM_RX + 0x100)
+#define AFE_GROUP_DEVICE_ID_SECONDARY_TDM_TX \
+	(AFE_PORT_ID_SECONDARY_TDM_TX + 0x100)
+#define AFE_GROUP_DEVICE_ID_TERTIARY_TDM_RX \
+	(AFE_PORT_ID_TERTIARY_TDM_RX + 0x100)
+#define AFE_GROUP_DEVICE_ID_TERTIARY_TDM_TX \
+	(AFE_PORT_ID_TERTIARY_TDM_TX + 0x100)
+#define AFE_GROUP_DEVICE_ID_QUATERNARY_TDM_RX \
+	(AFE_PORT_ID_QUATERNARY_TDM_RX + 0x100)
+#define AFE_GROUP_DEVICE_ID_QUATERNARY_TDM_TX \
+	(AFE_PORT_ID_QUATERNARY_TDM_TX + 0x100)
+
+/** ID of the parameter used by #AFE_MODULE_GROUP_DEVICE to configure the
+	group device. #AFE_SVC_CMD_SET_PARAM can use this parameter ID.
+
+	Requirements:
+	- Configure the group before the member ports in the group are
+	configured and started.
+	- Enable the group only after it is configured.
+	- Stop all member ports in the group before disabling the group.
+*/
+#define AFE_PARAM_ID_GROUP_DEVICE_TDM_CONFIG	0x0001029E
+
+/** Version information used to handle future additions to
+	AFE_PARAM_ID_GROUP_DEVICE_TDM_CONFIG processing (for backward compatibility).
+ */
+#define AFE_API_VERSION_GROUP_DEVICE_TDM_CONFIG	0x1
+
+/** Number of AFE ports in group device  */
+#define AFE_GROUP_DEVICE_NUM_PORTS					8
+
+/* Payload of the AFE_PARAM_ID_GROUP_DEVICE_TDM_CONFIG parameter ID
+	used by AFE_MODULE_GROUP_DEVICE.
+*/
+struct afe_param_id_group_device_tdm_cfg {
+	u32	group_device_cfg_minor_version;
+	/**< Minor version used to track group device configuration.
+	@values #AFE_API_VERSION_GROUP_DEVICE_TDM_CONFIG */
+
+	u16	group_id;
+	/**< ID for the group device.
+	@values
+	- #AFE_GROUP_DEVICE_ID_PRIMARY_TDM_RX
+	- #AFE_GROUP_DEVICE_ID_PRIMARY_TDM_TX
+	- #AFE_GROUP_DEVICE_ID_SECONDARY_TDM_RX
+	- #AFE_GROUP_DEVICE_ID_SECONDARY_TDM_TX
+	- #AFE_GROUP_DEVICE_ID_TERTIARY_TDM_RX
+	- #AFE_GROUP_DEVICE_ID_TERTIARY_TDM_TX
+	- #AFE_GROUP_DEVICE_ID_QUATERNARY_TDM_RX
+	- #AFE_GROUP_DEVICE_ID_QUATERNARY_TDM_TX */
+
+	u16	reserved;
+	/** 0 */
+
+	u16	port_id[AFE_GROUP_DEVICE_NUM_PORTS];
+	/**< Array of member port IDs of this group.
+	@values
+	- #AFE_PORT_ID_PRIMARY_TDM_RX
+	- #AFE_PORT_ID_PRIMARY_TDM_RX_1
+	- #AFE_PORT_ID_PRIMARY_TDM_RX_2
+	- #AFE_PORT_ID_PRIMARY_TDM_RX_3
+	- #AFE_PORT_ID_PRIMARY_TDM_RX_4
+	- #AFE_PORT_ID_PRIMARY_TDM_RX_5
+	- #AFE_PORT_ID_PRIMARY_TDM_RX_6
+	- #AFE_PORT_ID_PRIMARY_TDM_RX_7
+
+	- #AFE_PORT_ID_PRIMARY_TDM_TX
+	- #AFE_PORT_ID_PRIMARY_TDM_TX_1
+	- #AFE_PORT_ID_PRIMARY_TDM_TX_2
+	- #AFE_PORT_ID_PRIMARY_TDM_TX_3
+	- #AFE_PORT_ID_PRIMARY_TDM_TX_4
+	- #AFE_PORT_ID_PRIMARY_TDM_TX_5
+	- #AFE_PORT_ID_PRIMARY_TDM_TX_6
+	- #AFE_PORT_ID_PRIMARY_TDM_TX_7
+
+	- #AFE_PORT_ID_SECONDARY_TDM_RX
+	- #AFE_PORT_ID_SECONDARY_TDM_RX_1
+	- #AFE_PORT_ID_SECONDARY_TDM_RX_2
+	- #AFE_PORT_ID_SECONDARY_TDM_RX_3
+	- #AFE_PORT_ID_SECONDARY_TDM_RX_4
+	- #AFE_PORT_ID_SECONDARY_TDM_RX_5
+	- #AFE_PORT_ID_SECONDARY_TDM_RX_6
+	- #AFE_PORT_ID_SECONDARY_TDM_RX_7
+
+	- #AFE_PORT_ID_SECONDARY_TDM_TX
+	- #AFE_PORT_ID_SECONDARY_TDM_TX_1
+	- #AFE_PORT_ID_SECONDARY_TDM_TX_2
+	- #AFE_PORT_ID_SECONDARY_TDM_TX_3
+	- #AFE_PORT_ID_SECONDARY_TDM_TX_4
+	- #AFE_PORT_ID_SECONDARY_TDM_TX_5
+	- #AFE_PORT_ID_SECONDARY_TDM_TX_6
+	- #AFE_PORT_ID_SECONDARY_TDM_TX_7
+
+	- #AFE_PORT_ID_TERTIARY_TDM_RX
+	- #AFE_PORT_ID_TERTIARY_TDM_RX_1
+	- #AFE_PORT_ID_TERTIARY_TDM_RX_2
+	- #AFE_PORT_ID_TERTIARY_TDM_RX_3
+	- #AFE_PORT_ID_TERTIARY_TDM_RX_4
+	- #AFE_PORT_ID_TERTIARY_TDM_RX_5
+	- #AFE_PORT_ID_TERTIARY_TDM_RX_6
+	- #AFE_PORT_ID_TERTIARY_TDM_RX_7
+
+	- #AFE_PORT_ID_TERTIARY_TDM_TX
+	- #AFE_PORT_ID_TERTIARY_TDM_TX_1
+	- #AFE_PORT_ID_TERTIARY_TDM_TX_2
+	- #AFE_PORT_ID_TERTIARY_TDM_TX_3
+	- #AFE_PORT_ID_TERTIARY_TDM_TX_4
+	- #AFE_PORT_ID_TERTIARY_TDM_TX_5
+	- #AFE_PORT_ID_TERTIARY_TDM_TX_6
+	- #AFE_PORT_ID_TERTIARY_TDM_TX_7
+
+	- #AFE_PORT_ID_QUATERNARY_TDM_RX
+	- #AFE_PORT_ID_QUATERNARY_TDM_RX_1
+	- #AFE_PORT_ID_QUATERNARY_TDM_RX_2
+	- #AFE_PORT_ID_QUATERNARY_TDM_RX_3
+	- #AFE_PORT_ID_QUATERNARY_TDM_RX_4
+	- #AFE_PORT_ID_QUATERNARY_TDM_RX_5
+	- #AFE_PORT_ID_QUATERNARY_TDM_RX_6
+	- #AFE_PORT_ID_QUATERNARY_TDM_RX_7
+
+	- #AFE_PORT_ID_QUATERNARY_TDM_TX
+	- #AFE_PORT_ID_QUATERNARY_TDM_TX_1
+	- #AFE_PORT_ID_QUATERNARY_TDM_TX_2
+	- #AFE_PORT_ID_QUATERNARY_TDM_TX_3
+	- #AFE_PORT_ID_QUATERNARY_TDM_TX_4
+	- #AFE_PORT_ID_QUATERNARY_TDM_TX_5
+	- #AFE_PORT_ID_QUATERNARY_TDM_TX_6
+	- #AFE_PORT_ID_QUATERNARY_TDM_TX_7
+	@tablebulletend */
+
+	u32	num_channels;
+	/**< Number of enabled slots for TDM frame.
+	@values 1 to 8 */
+
+	u32	sample_rate;
+	/**< Sampling rate of the port.
+	@values
+	- #AFE_PORT_SAMPLE_RATE_8K
+	- #AFE_PORT_SAMPLE_RATE_16K
+	- #AFE_PORT_SAMPLE_RATE_24K
+	- #AFE_PORT_SAMPLE_RATE_32K
+	- #AFE_PORT_SAMPLE_RATE_48K @tablebulletend */
+
+	u32	bit_width;
+	/**< Bit width of the sample.
+	@values 16, 24, (32) */
+
+	u16	nslots_per_frame;
+	/**< Number of slots per frame. Typical : 1, 2, 4, 8, 16, 32.
+	@values 1 - 32 */
+
+	u16	slot_width;
+	/**< Slot width of the slot in a TDM frame.  (slot_width >= bit_width)
+	have to be satisfied.
+	@values 16, 24, 32 */
+
+	u32	slot_mask;
+	/**< Position of active slots.  When that bit is set, that paricular
+	slot is active.
+	Number of active slots can be inferred by number of bits set in
+	the mask.  Only 8 individual bits can be enabled.
+	Bits 0..31 corresponding to slot 0..31
+	@values 1 to 2^32 -1 */
+} __packed;
+
+/*  Payload of the #AFE_PARAM_ID_GROUP_DEVICE_ENABLE
+ * parameter, which enables or
+ * disables any module.
+ * The fixed size of this structure is four bytes.
+ */
+
+struct afe_group_device_enable {
+	u16 group_id;
+	/* valid value is AFE_GROUP_DEVICE_ID_SECONDARY_MI2S_RX */
+	u16 enable;
+	/* Enables (1) or disables (0) the module. */
+} __packed;
+
+union afe_port_group_config {
+	struct afe_group_device_group_cfg group_cfg;
+	struct afe_group_device_enable group_enable;
+	struct afe_param_id_group_device_tdm_cfg tdm_cfg;
+} __packed;
+
+struct afe_port_group_create {
+	struct apr_hdr hdr;
+	struct afe_svc_cmd_set_param param;
+	struct afe_port_param_data_v2 pdata;
+	union afe_port_group_config data;
+} __packed;
+
+/* ID of the parameter used by #AFE_MODULE_AUDIO_DEV_INTERFACE to specify
+ * the timing statistics of the corresponding device interface.
+ * Client can periodically query for the device time statistics to help adjust
+ * the PLL based on the drift value. The get param command must be sent to
+ * AFE port ID corresponding to device interface
+
+ * This parameter ID supports following get param commands:
+ * #AFE_PORT_CMD_GET_PARAM_V2 and
+ * #AFE_PORT_CMD_GET_PARAM_V3.
+ */
+#define AFE_PARAM_ID_DEV_TIMING_STATS           0x000102AD
+
+/* Version information used to handle future additions to AFE device
+ * interface timing statistics (for backward compatibility).
+ */
+#define AFE_API_VERSION_DEV_TIMING_STATS        0x1
+
+/* Enumeration for specifying a sink(Rx) device */
+#define AFE_SINK_DEVICE                         0x0
+
+/* Enumeration for specifying a source(Tx) device */
+#define AFE_SOURCE_DEVICE                       0x1
+
+/* Enumeration for specifying the drift reference is of type AV Timer */
+#define AFE_REF_TIMER_TYPE_AVTIMER              0x0
+
+/* Message payload structure for the
+ * AFE_PARAM_ID_DEV_TIMING_STATS parameter.
+ */
+struct afe_param_id_dev_timing_stats {
+	/* Minor version used to track the version of device interface timing
+	 * statistics. Currently, the supported version is 1.
+	 * @values #AFE_API_VERSION_DEV_TIMING_STATS
+	 */
+	u32       minor_version;
+
+	/* Indicates the device interface direction as either
+	 * source (Tx) or sink (Rx).
+	 * @values
+	 * #AFE_SINK_DEVICE
+	 * #AFE_SOURCE_DEVICE
+	 */
+	u16        device_direction;
+
+	/* Reference timer for drift accumulation and time stamp information.
+	 * @values
+	 * #AFE_REF_TIMER_TYPE_AVTIMER @tablebulletend
+	 */
+	u16        reference_timer;
+
+	/*
+	 * Flag to indicate if resync is required on the client side for
+	 * drift correction. Flag is set to TRUE for the first get_param
+	 * response after device interface starts. This flag value can be
+	 * used by client to identify if device interface restart has
+	 * happened and if any re-sync is required at their end for drift
+	 * correction.
+	 * @values
+	 * 0: FALSE (Resync not required)
+	 * 1: TRUE (Resync required) @tablebulletend
+	 */
+	u32        resync_flag;
+
+	/* Accumulated drift value in microseconds. This value is updated
+	 * every 100th ms.
+	 * Positive drift value indicates AV timer is running faster than device
+	 * Negative drift value indicates AV timer is running slower than device
+	 * @values Any valid int32 number
+	 */
+	s32         acc_drift_value;
+
+	/* Lower 32 bits of the 64-bit absolute timestamp of reference
+	 * timer in microseconds.
+
+	 * This timestamp corresponds to the time when the drift values
+	 * are accumlated for every 100th ms.
+	 * @values Any valid uint32 number
+	 */
+	u32        ref_timer_abs_ts_lsw;
+
+	/* Upper 32 bits of the 64-bit absolute timestamp of reference
+	 * timer in microseconds.
+	 * This timestamp corresponds to the time when the drift values
+	 * are accumlated for every 100th ms.
+	 * @values Any valid uint32 number
+	 */
+	u32        ref_timer_abs_ts_msw;
+} __packed;
+
+struct afe_av_dev_drift_get_param {
+	struct apr_hdr hdr;
+	struct afe_port_cmd_get_param_v2 get_param;
+	struct afe_port_param_data_v2 pdata;
+	struct afe_param_id_dev_timing_stats timing_stats;
+} __packed;
+
+struct afe_av_dev_drift_get_param_resp {
+	uint32_t status;
+	struct afe_port_param_data_v2 pdata;
+	struct afe_param_id_dev_timing_stats timing_stats;
+} __packed;
+
+/* Command for Matrix or Stream Router */
+#define ASM_SESSION_CMD_SET_MTMX_STRTR_PARAMS_V2    0x00010DCE
+/* Module for AVSYNC */
+#define ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC    0x00010DC6
+
+/* Parameter used by #ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC to specify the
+ * render window start value. This parameter is supported only for a Set
+ * command (not a Get command) in the Rx direction
+ * (#ASM_SESSION_CMD_SET_MTMX_STRTR_PARAMS_V2).
+ * Render window start is a value (session time minus timestamp, or ST-TS)
+ * below which frames are held, and after which frames are immediately
+ * rendered.
+ */
+#define ASM_SESSION_MTMX_STRTR_PARAM_RENDER_WINDOW_START_V2 0x00010DD1
+
+/* Parameter used by #ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC to specify the
+ * render window end value. This parameter is supported only for a Set
+ * command (not a Get command) in the Rx direction
+ * (#ASM_SESSION_CMD_SET_MTMX_STRTR_PARAMS_V2). Render window end is a value
+ * (session time minus timestamp) above which frames are dropped, and below
+ * which frames are immediately rendered.
+ */
+#define ASM_SESSION_MTMX_STRTR_PARAM_RENDER_WINDOW_END_V2   0x00010DD2
+
+/* Generic payload of the window parameters in the
+ * #ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC module.
+ * This payload is supported only for a Set command
+ * (not a Get command) on the Rx path.
+ */
+struct asm_session_mtmx_strtr_param_window_v2_t {
+	u32    window_lsw;
+	/* Lower 32 bits of the render window start value. */
+
+	u32    window_msw;
+	/* Upper 32 bits of the render window start value.
+
+	 * The 64-bit number formed by window_lsw and window_msw specifies a
+	 * signed 64-bit window value in microseconds. The sign extension is
+	 * necessary. This value is used by the following parameter IDs:
+	 * #ASM_SESSION_MTMX_STRTR_PARAM_RENDER_WINDOW_START_V2
+	 * #ASM_SESSION_MTMX_STRTR_PARAM_RENDER_WINDOW_END_V2
+	 * #ASM_SESSION_MTMX_STRTR_PARAM_STAT_WINDOW_START_V2
+	 * #ASM_SESSION_MTMX_STRTR_PARAM_STAT_WINDOW_END_V2
+	 * The value depends on which parameter ID is used.
+	 * The aDSP honors the windows at a granularity of 1 ms.
+	 */
+};
+
+struct asm_session_cmd_set_mtmx_strstr_params_v2 {
+	uint32_t                  data_payload_addr_lsw;
+	/* Lower 32 bits of the 64-bit data payload address. */
+
+	uint32_t                  data_payload_addr_msw;
+	/* Upper 32 bits of the 64-bit data payload address.
+	 * If the address is not sent (NULL), the message is in the payload.
+	 * If the address is sent (non-NULL), the parameter data payloads
+	 * begin at the specified address.
+	 */
+
+	uint32_t                  mem_map_handle;
+	/* Unique identifier for an address. This memory map handle is returned
+	 * by the aDSP through the #ASM_CMD_SHARED_MEM_MAP_REGIONS command.
+	 * values
+	 * - NULL -- Parameter data payloads are within the message payload
+	 * (in-band).
+	 * - Non-NULL -- Parameter data payloads begin at the address specified
+	 * in the data_payload_addr_lsw and data_payload_addr_msw fields
+	 * (out-of-band).
+	 */
+
+	uint32_t                  data_payload_size;
+	/* Actual size of the variable payload accompanying the message, or in
+	 * shared memory. This field is used for parsing the parameter payload.
+	 * values > 0 bytes
+	 */
+
+	uint32_t                  direction;
+	/* Direction of the entity (matrix mixer or stream router) on which
+	 * the parameter is to be set.
+	 * values
+	 * - 0 -- Rx (for Rx stream router or Rx matrix mixer)
+	 * - 1 -- Tx (for Tx stream router or Tx matrix mixer)
+	 */
+};
+
+/* Parameter used by #ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC which allows the
+ * audio client choose the rendering decision that the audio DSP should use.
+ */
+#define ASM_SESSION_MTMX_STRTR_PARAM_RENDER_MODE_CMD  0x00012F0D
+
+/* Indicates that rendering decision will be based on default rate
+ * (session clock based rendering, device driven).
+ * 1. The default session clock based rendering is inherently driven
+ *    by the timing of the device.
+ * 2. After the initial decision is made (first buffer after a run
+ *    command), subsequent data rendering decisions are made with
+ *    respect to the rate at which the device is rendering, thus deriving
+ *    its timing from the device.
+ * 3. While this decision making is simple, it has some inherent limitations
+ *    (mentioned in the next section).
+ * 4. If this API is not set, the session clock based rendering will be assumed
+ *    and this will ensure that the DSP is backward compatible.
+ */
+#define ASM_SESSION_MTMX_STRTR_PARAM_RENDER_DEFAULT 0
+
+/* Indicates that rendering decision will be based on local clock rate.
+ * 1. In the DSP loopback/client loopback use cases (frame based
+ *    inputs), the incoming data into audio DSP is time-stamped at the
+ *    local clock rate (STC).
+ * 2. This TS rate may match the incoming data rate or maybe different
+ *    from the incoming data rate.
+ * 3. Regardless, the data will be time-stamped with local STC and
+ *    therefore, the client is recommended to set this mode for these
+ *    use cases. This method is inherently more robust to sequencing
+ *    (AFE Start/Stop) and device switches, among other benefits.
+ * 4. This API will inform the DSP to compare every incoming buffer TS
+ *    against local STC.
+ * 5. DSP will continue to honor render windows APIs, as before.
+ */
+#define ASM_SESSION_MTMX_STRTR_PARAM_RENDER_LOCAL_STC 1
+
+/* Structure for rendering decision parameter */
+struct asm_session_mtmx_strtr_param_render_mode_t {
+	/* Specifies the type of rendering decision the audio DSP should use.
+	 *
+	 * @values
+	 * - #ASM_SESSION_MTMX_STRTR_PARAM_RENDER_DEFAULT
+	 * - #ASM_SESSION_MTMX_STRTR_PARAM_RENDER_LOCAL_STC
+	 */
+	u32                  flags;
+} __packed;
+
+/* Parameter used by #ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC which allows the
+ * audio client to specify the clock recovery mechanism that the audio DSP
+ * should use.
+ */
+
+#define ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC_CMD 0x00012F0E
+
+/* Indicates that default clock recovery will be used (no clock recovery).
+ * If the client wishes that no clock recovery be done, the client can
+ * choose this. This means that no attempt will made by the DSP to try and
+ * match the rates of the input and output audio.
+ */
+#define ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC_NONE 0
+
+/* Indicates that independent clock recovery needs to be used.
+ * 1. In the DSP loopback/client loopback use cases (frame based inputs),
+ *    the client should choose the independent clock recovery option.
+ * 2. This basically de-couples the audio and video from knowing each others
+ *    clock sources and lets the audio DSP independently rate match the input
+ *    and output rates.
+ * 3. After drift detection, the drift correction is achieved by either pulling
+ *    the PLLs (if applicable) or by stream to device rate matching
+ *    (for PCM use cases) by comparing drift with respect to STC.
+ * 4. For passthrough use cases, since the PLL pulling is the only option,
+ *    a best effort will be made.
+ *    If PLL pulling is not possible / available, the rendering will be
+ *    done without rate matching.
+ */
+#define ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC_AUTO 1
+
+/* Payload of the #ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC parameter.
+ */
+struct asm_session_mtmx_strtr_param_clk_rec_t {
+	/* Specifies the type of clock recovery that the audio DSP should
+	 * use for rate matching.
+	 */
+
+	/* @values
+	 * #ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC_DEFAULT
+	 * #ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC_INDEPENDENT
+	 */
+	u32                  flags;
+} __packed;
+
+
+/* Parameter used by #ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC to
+ * realize smoother adjustment of audio session clock for a specified session.
+ * The desired audio session clock adjustment(in micro seconds) is specified
+ * using the command #ASM_SESSION_CMD_ADJUST_SESSION_CLOCK_V2.
+ * Delaying/Advancing the session clock would be implemented by inserting
+ * interpolated/dropping audio samples in the playback path respectively.
+ * Also, this parameter has to be configured before the Audio Session is put
+ * to RUN state to avoid cold start latency/glitches in the playback.
+ */
+
+#define ASM_SESSION_MTMX_PARAM_ADJUST_SESSION_TIME_CTL         0x00013217
+
+struct asm_session_mtmx_param_adjust_session_time_ctl_t {
+	/* Specifies whether the module is enabled or not
+	 * @values
+	 * 0 -- disabled
+	 * 1 -- enabled
+	 */
+	u32                 enable;
+};
+
+union asm_session_mtmx_strtr_param_config {
+	struct asm_session_mtmx_strtr_param_window_v2_t window_param;
+	struct asm_session_mtmx_strtr_param_render_mode_t render_param;
+	struct asm_session_mtmx_strtr_param_clk_rec_t clk_rec_param;
+	struct asm_session_mtmx_param_adjust_session_time_ctl_t adj_time_param;
+} __packed;
+
+struct asm_mtmx_strtr_params {
+	struct apr_hdr  hdr;
+	struct asm_session_cmd_set_mtmx_strstr_params_v2 param;
+	struct asm_stream_param_data_v2 data;
+	union asm_session_mtmx_strtr_param_config config;
+} __packed;
+
+#define ASM_SESSION_CMD_GET_MTMX_STRTR_PARAMS_V2 0x00010DCF
+#define ASM_SESSION_CMDRSP_GET_MTMX_STRTR_PARAMS_V2 0x00010DD0
+
+#define ASM_SESSION_MTMX_STRTR_PARAM_SESSION_TIME_V3 0x00012F0B
+#define ASM_SESSION_MTMX_STRTR_PARAM_STIME_TSTMP_FLG_BMASK (0x80000000UL)
+
+struct asm_session_cmd_get_mtmx_strstr_params_v2 {
+	uint32_t                  data_payload_addr_lsw;
+	/* Lower 32 bits of the 64-bit data payload address. */
+
+	uint32_t                  data_payload_addr_msw;
+	/*
+	 * Upper 32 bits of the 64-bit data payload address.
+	 * If the address is not sent (NULL), the message is in the payload.
+	 * If the address is sent (non-NULL), the parameter data payloads
+	 * begin at the specified address.
+	 */
+
+	uint32_t                  mem_map_handle;
+	/*
+	 * Unique identifier for an address. This memory map handle is returned
+	 * by the aDSP through the #ASM_CMD_SHARED_MEM_MAP_REGIONS command.
+	 * values
+	 * - NULL -- Parameter data payloads are within the message payload
+	 * (in-band).
+	 * - Non-NULL -- Parameter data payloads begin at the address specified
+	 * in the data_payload_addr_lsw and data_payload_addr_msw fields
+	 * (out-of-band).
+	 */
+	uint32_t                  direction;
+	/*
+	 * Direction of the entity (matrix mixer or stream router) on which
+	 * the parameter is to be set.
+	 * values
+	 * - 0 -- Rx (for Rx stream router or Rx matrix mixer)
+	 * - 1 -- Tx (for Tx stream router or Tx matrix mixer)
+	 */
+	uint32_t                  module_id;
+	/* Unique module ID. */
+
+	uint32_t                  param_id;
+	/* Unique parameter ID. */
+
+	uint32_t                  param_max_size;
+};
+
+struct asm_session_mtmx_strtr_param_session_time_v3_t {
+	uint32_t                  session_time_lsw;
+	/* Lower 32 bits of the current session time in microseconds */
+
+	uint32_t                  session_time_msw;
+	/*
+	 * Upper 32 bits of the current session time in microseconds.
+	 * The 64-bit number formed by session_time_lsw and session_time_msw
+	 * is treated as signed.
+	 */
+
+	uint32_t                  absolute_time_lsw;
+	/*
+	 * Lower 32 bits of the 64-bit absolute time in microseconds.
+	 * This is the time when the sample corresponding to the
+	 * session_time_lsw is rendered to the hardware. This absolute
+	 * time can be slightly in the future or past.
+	 */
+
+	uint32_t                  absolute_time_msw;
+	/*
+	 * Upper 32 bits of the 64-bit absolute time in microseconds.
+	 * This is the time when the sample corresponding to the
+	 * session_time_msw is rendered to hardware. This absolute
+	 * time can be slightly in the future or past. The 64-bit number
+	 * formed by absolute_time_lsw and absolute_time_msw is treated as
+	 * unsigned.
+	 */
+
+	uint32_t                  time_stamp_lsw;
+	/* Lower 32 bits of the last processed timestamp in microseconds */
+
+	uint32_t                  time_stamp_msw;
+	/*
+	 * Upper 32 bits of the last processed timestamp in microseconds.
+	 * The 64-bit number formed by time_stamp_lsw and time_stamp_lsw
+	 * is treated as unsigned.
+	 */
+
+	uint32_t                  flags;
+	/*
+	 * Keeps track of any additional flags needed.
+	 * @values{for bit 31}
+	 * - 0 -- Uninitialized/invalid
+	 * - 1 -- Valid
+	 * All other bits are reserved; clients must set them to zero.
+	 */
+};
+
+union asm_session_mtmx_strtr_data_type {
+	struct asm_session_mtmx_strtr_param_session_time_v3_t session_time;
+};
+
+struct asm_mtmx_strtr_get_params {
+	struct apr_hdr hdr;
+	struct asm_session_cmd_get_mtmx_strstr_params_v2 param_info;
+} __packed;
+
+struct asm_mtmx_strtr_get_params_cmdrsp {
+	uint32_t err_code;
+	struct asm_stream_param_data_v2 param_info;
+	union asm_session_mtmx_strtr_data_type param_data;
+} __packed;
+
+#define AUDPROC_MODULE_ID_RESAMPLER 0x00010719
+
+enum {
+	LEGACY_PCM = 0,
+	COMPRESSED_PASSTHROUGH,
+	COMPRESSED_PASSTHROUGH_CONVERT,
+	COMPRESSED_PASSTHROUGH_DSD,
+	LISTEN,
+	COMPRESSED_PASSTHROUGH_GEN,
+	COMPRESSED_PASSTHROUGH_IEC61937
+};
+
+#define AUDPROC_MODULE_ID_COMPRESSED_MUTE                0x00010770
+#define AUDPROC_PARAM_ID_COMPRESSED_MUTE                 0x00010771
+
+struct adm_set_compressed_device_mute {
+	struct adm_cmd_set_pp_params_v5 command;
+	struct adm_param_data_v5 params;
+	u32    mute_on;
+} __packed;
+
+#define AUDPROC_MODULE_ID_COMPRESSED_LATENCY             0x0001076E
+#define AUDPROC_PARAM_ID_COMPRESSED_LATENCY              0x0001076F
+
+struct adm_set_compressed_device_latency {
+	struct adm_cmd_set_pp_params_v5 command;
+	struct adm_param_data_v5 params;
+	u32    latency;
+} __packed;
+
+#define VOICEPROC_MODULE_ID_GENERIC_TX                      0x00010EF6
+#define VOICEPROC_PARAM_ID_FLUENCE_SOUNDFOCUS               0x00010E37
+#define VOICEPROC_PARAM_ID_FLUENCE_SOURCETRACKING           0x00010E38
+#define MAX_SECTORS                                         8
+#define MAX_NOISE_SOURCE_INDICATORS                         3
+#define MAX_POLAR_ACTIVITY_INDICATORS                       360
+
+struct sound_focus_param {
+	uint16_t start_angle[MAX_SECTORS];
+	uint8_t enable[MAX_SECTORS];
+	uint16_t gain_step;
+} __packed;
+
+struct source_tracking_param {
+	uint8_t vad[MAX_SECTORS];
+	uint16_t doa_speech;
+	uint16_t doa_noise[MAX_NOISE_SOURCE_INDICATORS];
+	uint8_t polar_activity[MAX_POLAR_ACTIVITY_INDICATORS];
+} __packed;
+
+struct adm_param_fluence_soundfocus_t {
+	uint16_t start_angles[MAX_SECTORS];
+	uint8_t enables[MAX_SECTORS];
+	uint16_t gain_step;
+	uint16_t reserved;
+} __packed;
+
+struct adm_set_fluence_soundfocus_param {
+	struct adm_cmd_set_pp_params_v5 params;
+	struct adm_param_data_v5 data;
+	struct adm_param_fluence_soundfocus_t soundfocus_data;
+} __packed;
+
+struct adm_param_fluence_sourcetracking_t {
+	uint8_t vad[MAX_SECTORS];
+	uint16_t doa_speech;
+	uint16_t doa_noise[MAX_NOISE_SOURCE_INDICATORS];
+	uint8_t polar_activity[MAX_POLAR_ACTIVITY_INDICATORS];
+} __packed;
+
+#define AUDPROC_MODULE_ID_AUDIOSPHERE               0x00010916
+#define AUDPROC_PARAM_ID_AUDIOSPHERE_ENABLE         0x00010917
+#define AUDPROC_PARAM_ID_AUDIOSPHERE_STRENGTH       0x00010918
+#define AUDPROC_PARAM_ID_AUDIOSPHERE_CONFIG_MODE    0x00010919
+
+#define AUDPROC_PARAM_ID_AUDIOSPHERE_COEFFS_STEREO_INPUT         0x0001091A
+#define AUDPROC_PARAM_ID_AUDIOSPHERE_COEFFS_MULTICHANNEL_INPUT   0x0001091B
+#define AUDPROC_PARAM_ID_AUDIOSPHERE_DESIGN_STEREO_INPUT         0x0001091C
+#define AUDPROC_PARAM_ID_AUDIOSPHERE_DESIGN_MULTICHANNEL_INPUT   0x0001091D
+
+#define AUDPROC_PARAM_ID_AUDIOSPHERE_OPERATING_INPUT_MEDIA_INFO  0x0001091E
+
+#define AUDPROC_MODULE_ID_VOICE_TX_SECNS   0x10027059
+#define AUDPROC_PARAM_IDX_SEC_PRIMARY_MIC_CH 0x10014444
+
+struct admx_sec_primary_mic_ch {
+	uint16_t version;
+	uint16_t reserved;
+	uint16_t sec_primary_mic_ch;
+	uint16_t reserved1;
+} __packed;
+
+
+struct adm_set_sec_primary_ch_params {
+	struct adm_cmd_set_pp_params_v5 params;
+	struct adm_param_data_v5 data;
+	struct admx_sec_primary_mic_ch sec_primary_mic_ch_data;
+} __packed;
+#endif /*_APR_AUDIO_V2_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/sound/audio_calibration.h	2019-01-22 16:16:28.503291684 +0100
@@ -0,0 +1,40 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _AUDIO_CALIBRATION_H
+#define _AUDIO_CALIBRATION_H
+
+#include <linux/msm_audio_calibration.h>
+
+/* Used by driver in buffer_number field to notify client
+ * To update all blocks, for example: freeing all memory */
+#define ALL_CAL_BLOCKS		-1
+
+
+struct audio_cal_callbacks {
+	int (*alloc) (int32_t cal_type, size_t data_size, void *data);
+	int (*dealloc) (int32_t cal_type, size_t data_size, void *data);
+	int (*pre_cal) (int32_t cal_type, size_t data_size, void *data);
+	int (*set_cal) (int32_t cal_type, size_t data_size, void *data);
+	int (*get_cal) (int32_t cal_type, size_t data_size, void *data);
+	int (*post_cal) (int32_t cal_type, size_t data_size, void *data);
+};
+
+struct audio_cal_reg {
+	int32_t				cal_type;
+	struct audio_cal_callbacks	callbacks;
+};
+
+int audio_cal_register(int num_cal_types, struct audio_cal_reg *reg_data);
+int audio_cal_deregister(int num_cal_types, struct audio_cal_reg *reg_data);
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/sound/audio_cal_utils.h	2019-01-22 16:16:28.503291684 +0100
@@ -0,0 +1,102 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _AUDIO_CAL_UTILS_H
+#define _AUDIO_CAL_UTILS_H
+
+#include <linux/msm_ion.h>
+#include <linux/msm_audio_ion.h>
+#include <linux/msm_audio_calibration.h>
+#include "audio_calibration.h"
+
+struct cal_data {
+	size_t		size;
+	void		*kvaddr;
+	phys_addr_t	paddr;
+};
+
+struct mem_map_data {
+	size_t			map_size;
+	int32_t			q6map_handle;
+	int32_t			ion_map_handle;
+	struct ion_client	*ion_client;
+	struct ion_handle	*ion_handle;
+};
+
+struct cal_block_data {
+	size_t			client_info_size;
+	void			*client_info;
+	void			*cal_info;
+	struct list_head	list;
+	struct cal_data		cal_data;
+	struct mem_map_data	map_data;
+	int32_t			buffer_number;
+};
+
+struct cal_util_callbacks {
+	int (*map_cal)
+		(int32_t cal_type, struct cal_block_data *cal_block);
+	int (*unmap_cal)
+		(int32_t cal_type, struct cal_block_data *cal_block);
+	bool (*match_block)
+		(struct cal_block_data *cal_block, void *user_data);
+};
+
+struct cal_type_info {
+	struct audio_cal_reg		reg;
+	struct cal_util_callbacks	cal_util_callbacks;
+};
+
+struct cal_type_data {
+	struct cal_type_info		info;
+	struct mutex			lock;
+	struct list_head		cal_blocks;
+};
+
+
+/* to register & degregister with cal util driver */
+int cal_utils_create_cal_types(int num_cal_types,
+			struct cal_type_data **cal_type,
+			struct cal_type_info *info);
+void cal_utils_destroy_cal_types(int num_cal_types,
+			struct cal_type_data **cal_type);
+
+/* common functions for callbacks */
+int cal_utils_alloc_cal(size_t data_size, void *data,
+			struct cal_type_data *cal_type,
+			size_t client_info_size, void *client_info);
+int cal_utils_dealloc_cal(size_t data_size, void *data,
+			struct cal_type_data *cal_type);
+int cal_utils_set_cal(size_t data_size, void *data,
+			struct cal_type_data *cal_type,
+			size_t client_info_size, void *client_info);
+
+/* use for SSR */
+void cal_utils_clear_cal_block_q6maps(int num_cal_types,
+					struct cal_type_data **cal_type);
+
+
+/* common matching functions used to add blocks */
+bool cal_utils_match_buf_num(struct cal_block_data *cal_block,
+					void *user_data);
+
+/* common matching functions to find cal blocks */
+struct cal_block_data *cal_utils_get_only_cal_block(
+			struct cal_type_data *cal_type);
+
+/* Size of calibration specific data */
+size_t get_cal_info_size(int32_t cal_type);
+size_t get_user_cal_type_size(int32_t cal_type);
+
+/* Version of the cal type*/
+int32_t cal_utils_get_cal_type_version(void *cal_type_data);
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/sound/audio_slimslave.h	2019-01-22 16:16:28.503291684 +0100
@@ -0,0 +1,18 @@
+#ifndef __AUDIO_SLIMSLAVE_H__
+#define __AUDIO_SLIMSLAVE_H__
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define AUDIO_SLIMSLAVE_IOCTL_NAME "audio_slimslave"
+#define AUDIO_SLIMSLAVE_MAGIC 'S'
+
+#define AUDIO_SLIMSLAVE_IOCTL_UNVOTE	_IO(AUDIO_SLIMSLAVE_MAGIC, 0x00)
+#define AUDIO_SLIMSLAVE_IOCTL_VOTE	_IO(AUDIO_SLIMSLAVE_MAGIC, 0x01)
+
+enum {
+	AUDIO_SLIMSLAVE_UNVOTE,
+	AUDIO_SLIMSLAVE_VOTE
+};
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/sound/cpe_cmi.h	2019-01-22 16:16:28.503291684 +0100
@@ -0,0 +1,492 @@
+/*
+ * Copyright (c) 2014-2016, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CPE_CMI_H__
+#define __CPE_CMI_H__
+
+#include <linux/types.h>
+
+#define CPE_AFE_PORT_1_TX 1
+#define CPE_AFE_PORT_3_TX 3
+#define CPE_AFE_PORT_ID_2_OUT 0x02
+#define CMI_INBAND_MESSAGE_SIZE 127
+
+/*
+ * Multiple mad types can be supported at once.
+ * these values can be OR'ed to form the set of
+ * supported mad types
+ */
+#define MAD_TYPE_AUDIO (1 << 0)
+#define MAD_TYPE_BEACON (1 << 1)
+#define MAD_TYPE_ULTRASND (1 << 2)
+
+/* Core service command opcodes */
+#define CPE_CORE_SVC_CMD_SHARED_MEM_ALLOC	(0x3001)
+#define CPE_CORE_SVC_CMDRSP_SHARED_MEM_ALLOC	(0x3002)
+#define CPE_CORE_SVC_CMD_SHARED_MEM_DEALLOC	(0x3003)
+#define CPE_CORE_SVC_CMD_DRAM_ACCESS_REQ	(0x3004)
+#define CPE_CORE_SVC_EVENT_SYSTEM_BOOT		(0x3005)
+/* core service command opcodes for WCD9335 */
+#define CPE_CORE_SVC_CMD_CFG_CLK_PLAN		(0x3006)
+#define CPE_CORE_SVC_CMD_CLK_FREQ_REQUEST	(0x3007)
+
+#define CPE_BOOT_SUCCESS 0x00
+#define CPE_BOOT_FAILED 0x01
+
+#define CPE_CORE_VERSION_SYSTEM_BOOT_EVENT 0x01
+
+/* LSM Service command opcodes */
+#define CPE_LSM_SESSION_CMD_OPEN_TX		(0x2000)
+#define CPE_LSM_SESSION_CMD_SET_PARAMS		(0x2001)
+#define CPE_LSM_SESSION_CMD_REGISTER_SOUND_MODEL (0x2002)
+#define CPE_LSM_SESSION_CMD_DEREGISTER_SOUND_MODEL (0x2003)
+#define CPE_LSM_SESSION_CMD_START		(0x2004)
+#define CPE_LSM_SESSION_CMD_STOP		(0x2005)
+#define CPE_LSM_SESSION_EVENT_DETECTION_STATUS_V2 (0x2006)
+#define CPE_LSM_SESSION_CMD_CLOSE_TX		(0x2007)
+#define CPE_LSM_SESSION_CMD_SHARED_MEM_ALLOC	(0x2008)
+#define CPE_LSM_SESSION_CMDRSP_SHARED_MEM_ALLOC (0x2009)
+#define CPE_LSM_SESSION_CMD_SHARED_MEM_DEALLOC	(0x200A)
+#define CPE_LSM_SESSION_CMD_TX_BUFF_OUTPUT_CONFIG (0x200f)
+#define CPE_LSM_SESSION_CMD_OPEN_TX_V2		(0x200D)
+#define CPE_LSM_SESSION_CMD_SET_PARAMS_V2	(0x200E)
+
+/* LSM Service module and param IDs */
+#define CPE_LSM_MODULE_ID_VOICE_WAKEUP		(0x00012C00)
+#define CPE_LSM_MODULE_ID_VOICE_WAKEUP_V2	(0x00012C0D)
+#define CPE_LSM_MODULE_FRAMEWORK		(0x00012C0E)
+
+#define CPE_LSM_PARAM_ID_ENDPOINT_DETECT_THRESHOLD (0x00012C01)
+#define CPE_LSM_PARAM_ID_OPERATION_MODE		(0x00012C02)
+#define CPE_LSM_PARAM_ID_GAIN			(0x00012C03)
+#define CPE_LSM_PARAM_ID_CONNECT_TO_PORT	(0x00012C04)
+#define CPE_LSM_PARAM_ID_MIN_CONFIDENCE_LEVELS	(0x00012C07)
+
+/* LSM LAB command opcodes */
+#define CPE_LSM_SESSION_CMD_EOB		0x0000200B
+#define CPE_LSM_MODULE_ID_LAB		0x00012C08
+/* used for enable/disable lab*/
+#define CPE_LSM_PARAM_ID_LAB_ENABLE	0x00012C09
+/* used for T in LAB config DSP internal buffer*/
+#define CPE_LSM_PARAM_ID_LAB_CONFIG	0x00012C0A
+#define CPE_LSM_PARAM_ID_REGISTER_SOUND_MODEL	(0x00012C14)
+#define CPE_LSM_PARAM_ID_DEREGISTER_SOUND_MODEL	(0x00012C15)
+#define CPE_LSM_PARAM_ID_MEDIA_FMT		(0x00012C1E)
+
+/* AFE Service command opcodes */
+#define CPE_AFE_PORT_CMD_START			(0x1001)
+#define CPE_AFE_PORT_CMD_STOP			(0x1002)
+#define CPE_AFE_PORT_CMD_SUSPEND		(0x1003)
+#define CPE_AFE_PORT_CMD_RESUME			(0x1004)
+#define CPE_AFE_PORT_CMD_SHARED_MEM_ALLOC	(0x1005)
+#define CPE_AFE_PORT_CMDRSP_SHARED_MEM_ALLOC	(0x1006)
+#define CPE_AFE_PORT_CMD_SHARED_MEM_DEALLOC	(0x1007)
+#define CPE_AFE_PORT_CMD_GENERIC_CONFIG		(0x1008)
+#define CPE_AFE_SVC_CMD_LAB_MODE		(0x1009)
+
+/* AFE Service module and param IDs */
+#define CPE_AFE_CMD_SET_PARAM			(0x1000)
+#define CPE_AFE_MODULE_ID_SW_MAD		(0x0001022D)
+#define CPE_AFE_PARAM_ID_SW_MAD_CFG		(0x0001022E)
+#define CPE_AFE_PARAM_ID_SVM_MODEL		(0x0001022F)
+
+#define CPE_AFE_MODULE_HW_MAD			(0x00010230)
+#define CPE_AFE_PARAM_ID_HW_MAD_CTL		(0x00010232)
+#define CPE_AFE_PARAM_ID_HW_MAD_CFG		(0x00010231)
+
+#define CPE_AFE_MODULE_AUDIO_DEV_INTERFACE	(0x0001020C)
+#define CPE_AFE_PARAM_ID_GENERIC_PORT_CONFIG	(0x00010253)
+
+#define CPE_CMI_BASIC_RSP_OPCODE	(0x0001)
+#define CPE_HDR_MAX_PLD_SIZE	(0x7F)
+
+#define CMI_OBM_FLAG_IN_BAND	0
+#define CMI_OBM_FLAG_OUT_BAND	1
+
+#define CMI_SHMEM_ALLOC_FAILED 0xff
+
+/*
+ * Future Service ID's can be added one line
+ * before the CMI_CPE_SERVICE_ID_MAX
+ */
+enum {
+	CMI_CPE_SERVICE_ID_MIN = 0,
+	CMI_CPE_CORE_SERVICE_ID,
+	CMI_CPE_AFE_SERVICE_ID,
+	CMI_CPE_LSM_SERVICE_ID,
+	CMI_CPE_SERVICE_ID_MAX,
+};
+
+#define CPE_LSM_SESSION_ID_MAX 2
+
+#define IS_VALID_SESSION_ID(s_id) \
+	(s_id <= CPE_LSM_SESSION_ID_MAX)
+
+#define IS_VALID_SERVICE_ID(s_id) \
+	(s_id > CMI_CPE_SERVICE_ID_MIN && \
+	 s_id < CMI_CPE_SERVICE_ID_MAX)
+
+#define IS_VALID_PLD_SIZE(p_size) \
+	(p_size <= CPE_HDR_MAX_PLD_SIZE)
+
+#define CMI_HDR_SET_OPCODE(hdr, cmd) (hdr->opcode = cmd)
+
+
+#define CMI_HDR_SET(hdr_info, mask, shift, value) \
+		(hdr_info = (((hdr_info) & ~(mask)) | \
+			((value << shift) & mask)))
+
+#define SVC_ID_SHIFT 4
+#define SVC_ID_MASK (0x07 << SVC_ID_SHIFT)
+
+#define SESSION_ID_SHIFT 0
+#define SESSION_ID_MASK (0x0F << SESSION_ID_SHIFT)
+
+#define PAYLD_SIZE_SHIFT 0
+#define PAYLD_SIZE_MASK (0x7F << PAYLD_SIZE_SHIFT)
+
+#define OBM_FLAG_SHIFT 7
+#define OBM_FLAG_MASK (1 << OBM_FLAG_SHIFT)
+
+#define VERSION_SHIFT 7
+#define VERSION_MASK (1 << VERSION_SHIFT)
+
+#define CMI_HDR_SET_SERVICE(hdr, s_id) \
+		CMI_HDR_SET(hdr->hdr_info, SVC_ID_MASK,\
+			    SVC_ID_SHIFT, s_id)
+#define CMI_HDR_GET_SERVICE(hdr) \
+		((hdr->hdr_info >> SVC_ID_SHIFT) & \
+			(SVC_ID_MASK >> SVC_ID_SHIFT))
+
+
+#define CMI_HDR_SET_SESSION(hdr, s_id) \
+		CMI_HDR_SET(hdr->hdr_info, SESSION_ID_MASK,\
+			    SESSION_ID_SHIFT, s_id)
+
+#define CMI_HDR_GET_SESSION_ID(hdr) \
+		((hdr->hdr_info >> SESSION_ID_SHIFT) & \
+			 (SESSION_ID_MASK >> SESSION_ID_SHIFT))
+
+#define CMI_GET_HEADER(msg)	((struct cmi_hdr *)(msg))
+#define CMI_GET_PAYLOAD(msg)	((void *)(CMI_GET_HEADER(msg) + 1))
+#define CMI_GET_OPCODE(msg)	(CMI_GET_HEADER(msg)->opcode)
+
+#define CMI_HDR_SET_VERSION(hdr, ver) \
+		CMI_HDR_SET(hdr->hdr_info, VERSION_MASK, \
+				VERSION_SHIFT, ver)
+
+#define CMI_HDR_SET_PAYLOAD_SIZE(hdr, p_size) \
+		CMI_HDR_SET(hdr->pld_info, PAYLD_SIZE_MASK, \
+			    PAYLD_SIZE_SHIFT, p_size)
+
+#define CMI_HDR_GET_PAYLOAD_SIZE(hdr) \
+		((hdr->pld_info >> PAYLD_SIZE_SHIFT) & \
+			(PAYLD_SIZE_MASK >> PAYLD_SIZE_SHIFT))
+
+#define CMI_HDR_SET_OBM(hdr, obm_flag) \
+		CMI_HDR_SET(hdr->pld_info, OBM_FLAG_MASK, \
+			    OBM_FLAG_SHIFT, obm_flag)
+
+#define CMI_HDR_GET_OBM_FLAG(hdr) \
+	((hdr->pld_info >> OBM_FLAG_SHIFT) & \
+		(OBM_FLAG_MASK >> OBM_FLAG_SHIFT))
+
+struct cmi_hdr {
+	/*
+	 * bits 0:3 is session id
+	 * bits 4:6 is service id
+	 * bit 7 is the version flag
+	 */
+	u8 hdr_info;
+
+	/*
+	 * bits 0:6 is payload size in case of in-band message
+	 * bits 0:6 is size (OBM message size)
+	 * bit 7 is the OBM flag
+	 */
+	u8 pld_info;
+
+	/* 16 bit command opcode */
+	u16 opcode;
+} __packed;
+
+union cpe_addr {
+	u64 msw_lsw;
+	void *kvaddr;
+} __packed;
+
+struct cmi_obm {
+	u32 version;
+	u32 size;
+	union cpe_addr data_ptr;
+	u32 mem_handle;
+} __packed;
+
+struct cmi_obm_msg {
+	struct cmi_hdr hdr;
+	struct cmi_obm pld;
+} __packed;
+
+struct cmi_core_svc_event_system_boot {
+	u8 status;
+	u8 version;
+	u16 sfr_buff_size;
+	u32 sfr_buff_address;
+} __packed;
+
+struct cmi_core_svc_cmd_shared_mem_alloc {
+	u32 size;
+} __packed;
+
+struct cmi_core_svc_cmdrsp_shared_mem_alloc {
+	u32 addr;
+} __packed;
+
+struct cmi_core_svc_cmd_clk_freq_request {
+	u32 clk_freq;
+} __packed;
+
+struct cmi_msg_transport {
+	u32 size;
+	u32 addr;
+} __packed;
+
+struct cmi_basic_rsp_result {
+	u8 status;
+} __packed;
+
+struct cpe_lsm_cmd_open_tx {
+	struct cmi_hdr	hdr;
+	u16 app_id;
+	u16 reserved;
+	u32 sampling_rate;
+} __packed;
+
+struct cpe_lsm_cmd_open_tx_v2 {
+	struct cmi_hdr hdr;
+	u32 topology_id;
+} __packed;
+
+struct cpe_cmd_shmem_alloc {
+	struct cmi_hdr hdr;
+	u32 size;
+} __packed;
+
+struct cpe_cmdrsp_shmem_alloc {
+	struct cmi_hdr hdr;
+	u32 addr;
+} __packed;
+
+struct cpe_cmd_shmem_dealloc {
+	struct cmi_hdr	hdr;
+	u32 addr;
+} __packed;
+
+struct cpe_lsm_event_detect_v2 {
+	struct cmi_hdr	hdr;
+	u8 detection_status;
+	u8 size;
+	u8 payload[0];
+} __packed;
+
+struct cpe_lsm_psize_res {
+	u16 param_size;
+	u16 reserved;
+} __packed;
+
+union cpe_lsm_param_size {
+	u32 param_size;
+	struct cpe_lsm_psize_res sr;
+} __packed;
+
+struct cpe_param_data {
+	u32 module_id;
+	u32 param_id;
+	union cpe_lsm_param_size p_size;
+} __packed;
+
+struct cpe_lsm_param_epd_thres {
+	struct cmi_hdr hdr;
+	struct cpe_param_data param;
+	u32 minor_version;
+	u32 epd_begin;
+	u32 epd_end;
+} __packed;
+
+struct cpe_lsm_param_gain {
+	struct cmi_hdr hdr;
+	struct cpe_param_data param;
+	u32 minor_version;
+	u16 gain;
+	u16 reserved;
+} __packed;
+
+struct cpe_afe_hw_mad_ctrl {
+	struct cpe_param_data param;
+	u32 minor_version;
+	u16 mad_type;
+	u16 mad_enable;
+} __packed;
+
+struct cpe_afe_port_cfg {
+	struct cpe_param_data param;
+	u32 minor_version;
+	u16 bit_width;
+	u16 num_channels;
+	u32 sample_rate;
+} __packed;
+
+struct cpe_afe_cmd_port_cfg {
+	struct cmi_hdr hdr;
+	u8 bit_width;
+	u8 num_channels;
+	u16 buffer_size;
+	u32 sample_rate;
+} __packed;
+
+struct cpe_afe_params {
+	struct cmi_hdr hdr;
+	struct cpe_afe_hw_mad_ctrl hw_mad_ctrl;
+	struct cpe_afe_port_cfg port_cfg;
+} __packed;
+
+struct cpe_afe_svc_cmd_mode {
+	struct cmi_hdr hdr;
+	u8 mode;
+} __packed;
+
+struct cpe_lsm_param_opmode {
+	struct cmi_hdr hdr;
+	struct cpe_param_data param;
+	u32 minor_version;
+	u16 mode;
+	u16 reserved;
+} __packed;
+
+struct cpe_lsm_param_connectport {
+	struct cmi_hdr hdr;
+	struct cpe_param_data param;
+	u32 minor_version;
+	u16 afe_port_id;
+	u16 reserved;
+} __packed;
+
+/*
+ * This cannot be sent to CPE as is,
+ * need to append the conf_levels dynamically
+ */
+struct cpe_lsm_conf_level {
+	struct cmi_hdr hdr;
+	struct cpe_param_data param;
+	u8 num_active_models;
+} __packed;
+
+struct cpe_lsm_output_format_cfg {
+	struct cmi_hdr hdr;
+	u8 format;
+	u8 packing;
+	u8 data_path_events;
+} __packed;
+
+struct cpe_lsm_lab_enable {
+	struct cpe_param_data param;
+	u16 enable;
+	u16 reserved;
+} __packed;
+
+struct cpe_lsm_control_lab {
+	struct cmi_hdr hdr;
+	struct cpe_lsm_lab_enable lab_enable;
+} __packed;
+
+struct cpe_lsm_lab_config {
+	struct cpe_param_data param;
+	u32 minor_ver;
+	u32 latency;
+} __packed;
+
+struct cpe_lsm_lab_latency_config {
+	struct cmi_hdr hdr;
+	struct cpe_lsm_lab_config latency_cfg;
+} __packed;
+
+struct cpe_lsm_media_fmt_param {
+	struct cmi_hdr hdr;
+	struct cpe_param_data param;
+	u32 minor_version;
+	u32 sample_rate;
+	u16 num_channels;
+	u16 bit_width;
+} __packed;
+
+
+#define CPE_PARAM_LSM_LAB_LATENCY_SIZE (\
+				sizeof(struct cpe_lsm_lab_latency_config) - \
+				sizeof(struct cmi_hdr))
+#define PARAM_SIZE_LSM_LATENCY_SIZE (\
+					sizeof(struct cpe_lsm_lab_config) - \
+					sizeof(struct cpe_param_data))
+#define CPE_PARAM_SIZE_LSM_LAB_CONTROL (\
+				sizeof(struct cpe_lsm_control_lab) - \
+				sizeof(struct cmi_hdr))
+#define PARAM_SIZE_LSM_CONTROL_SIZE (sizeof(struct cpe_lsm_lab_enable) - \
+					sizeof(struct cpe_param_data))
+#define PARAM_SIZE_AFE_HW_MAD_CTRL (sizeof(struct cpe_afe_hw_mad_ctrl) - \
+				sizeof(struct cpe_param_data))
+#define PARAM_SIZE_AFE_PORT_CFG (sizeof(struct cpe_afe_port_cfg) - \
+				 sizeof(struct cpe_param_data))
+#define CPE_AFE_PARAM_PAYLOAD_SIZE (sizeof(struct cpe_afe_params) - \
+				sizeof(struct cmi_hdr))
+
+#define OPEN_CMD_PAYLOAD_SIZE (sizeof(struct cpe_lsm_cmd_open_tx) - \
+			       sizeof(struct cmi_hdr))
+#define OPEN_V2_CMD_PAYLOAD_SIZE (sizeof(struct cpe_lsm_cmd_open_tx_v2) - \
+			       sizeof(struct cmi_hdr))
+#define SHMEM_ALLOC_CMD_PLD_SIZE (sizeof(struct cpe_cmd_shmem_alloc) - \
+				      sizeof(struct cmi_hdr))
+
+#define SHMEM_DEALLOC_CMD_PLD_SIZE (sizeof(struct cpe_cmd_shmem_dealloc) - \
+				      sizeof(struct cmi_hdr))
+#define OUT_FMT_CFG_CMD_PAYLOAD_SIZE ( \
+		sizeof(struct cpe_lsm_output_format_cfg) - \
+		sizeof(struct cmi_hdr))
+
+#define CPE_AFE_CMD_PORT_CFG_PAYLOAD_SIZE \
+		(sizeof(struct cpe_afe_cmd_port_cfg) - \
+		 sizeof(struct cmi_hdr))
+
+#define CPE_AFE_CMD_MODE_PAYLOAD_SIZE \
+		(sizeof(struct cpe_afe_svc_cmd_mode) - \
+		 sizeof(struct cmi_hdr))
+#define CPE_CMD_EPD_THRES_PLD_SIZE (sizeof(struct cpe_lsm_param_epd_thres) - \
+				    sizeof(struct cmi_hdr))
+#define CPE_EPD_THRES_PARAM_SIZE ((CPE_CMD_EPD_THRES_PLD_SIZE) - \
+				  sizeof(struct cpe_param_data))
+#define CPE_CMD_OPMODE_PLD_SIZE (sizeof(struct cpe_lsm_param_opmode) - \
+				 sizeof(struct cmi_hdr))
+#define CPE_OPMODE_PARAM_SIZE ((CPE_CMD_OPMODE_PLD_SIZE) -\
+			       sizeof(struct cpe_param_data))
+#define CPE_CMD_CONNECTPORT_PLD_SIZE \
+	(sizeof(struct cpe_lsm_param_connectport) - \
+	 sizeof(struct cmi_hdr))
+#define CPE_CONNECTPORT_PARAM_SIZE ((CPE_CMD_CONNECTPORT_PLD_SIZE) - \
+				    sizeof(struct cpe_param_data))
+#define CPE_CMD_GAIN_PLD_SIZE (sizeof(struct cpe_lsm_param_gain) - \
+			       sizeof(struct cmi_hdr))
+#define CPE_GAIN_PARAM_SIZE ((CPE_CMD_GAIN_PLD_SIZE) - \
+			     sizeof(struct cpe_param_data))
+#define CPE_MEDIA_FMT_PLD_SIZE (sizeof(struct cpe_lsm_media_fmt_param) - \
+				sizeof(struct cmi_hdr))
+#define CPE_MEDIA_FMT_PARAM_SIZE ((CPE_MEDIA_FMT_PLD_SIZE) - \
+				  sizeof(struct cpe_param_data))
+#endif /* __CPE_CMI_H__ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/sound/cpe_core.h	2019-01-22 16:16:28.503291684 +0100
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2013-2017, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CPE_CORE_H__
+#define __CPE_CORE_H__
+
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/dma-mapping.h>
+#include <sound/lsm_params.h>
+
+enum {
+	CMD_INIT_STATE = 0,
+	CMD_SENT,
+	CMD_RESP_RCVD,
+};
+
+enum wcd_cpe_event {
+	WCD_CPE_PRE_ENABLE = 1,
+	WCD_CPE_POST_ENABLE,
+	WCD_CPE_PRE_DISABLE,
+	WCD_CPE_POST_DISABLE,
+};
+
+struct wcd_cpe_afe_port_cfg {
+	u8 port_id;
+	u16 bit_width;
+	u16 num_channels;
+	u32 sample_rate;
+};
+
+struct lsm_out_fmt_cfg {
+	u8 format;
+	u8 pack_mode;
+	u8 data_path_events;
+	u8 transfer_mode;
+};
+
+struct lsm_hw_params {
+	u32 sample_rate;
+	u16 num_chs;
+	u16 bit_width;
+};
+
+struct cpe_lsm_session {
+	/* sound model related */
+	void *snd_model_data;
+	u8 *conf_levels;
+	void *cmi_reg_handle;
+
+	/* Clients private data */
+	void *priv_d;
+
+	void (*event_cb) (void *priv_data,
+			  u8 detect_status,
+			  u8 size, u8 *payload);
+
+	struct completion cmd_comp;
+	struct wcd_cpe_afe_port_cfg afe_port_cfg;
+	struct wcd_cpe_afe_port_cfg afe_out_port_cfg;
+	struct mutex lsm_lock;
+
+	u32 snd_model_size;
+	u32 lsm_mem_handle;
+	u16 cmd_err_code;
+	u8 id;
+	u8 num_confidence_levels;
+	u16 afe_out_port_id;
+	struct task_struct *lsm_lab_thread;
+	bool started;
+
+	u32 lab_enable;
+	struct lsm_out_fmt_cfg out_fmt_cfg;
+
+	bool is_topology_used;
+};
+
+struct wcd_cpe_afe_ops {
+	int (*afe_set_params) (void *core_handle,
+			       struct wcd_cpe_afe_port_cfg *cfg,
+			       bool afe_mad_ctl);
+
+	int (*afe_port_start) (void *core_handle,
+			       struct wcd_cpe_afe_port_cfg *cfg);
+
+	int (*afe_port_stop) (void *core_handle,
+			       struct wcd_cpe_afe_port_cfg *cfg);
+
+	int (*afe_port_suspend) (void *core_handle,
+			       struct wcd_cpe_afe_port_cfg *cfg);
+
+	int (*afe_port_resume) (void *core_handle,
+			       struct wcd_cpe_afe_port_cfg *cfg);
+
+	int (*afe_port_cmd_cfg)(void *core_handle,
+				struct wcd_cpe_afe_port_cfg *cfg);
+};
+
+struct wcd_cpe_lsm_ops {
+
+	struct cpe_lsm_session *(*lsm_alloc_session)
+			(void *core_handle, void *lsm_priv_d,
+			 void (*event_cb) (void *priv_data,
+					   u8 detect_status,
+					   u8 size, u8 *payload));
+
+	int (*lsm_dealloc_session)
+		(void *core_handle, struct cpe_lsm_session *);
+
+	int (*lsm_open_tx) (void *core_handle,
+			    struct cpe_lsm_session *, u16, u16);
+
+	int (*lsm_close_tx) (void *core_handle,
+			     struct cpe_lsm_session *);
+
+	int (*lsm_shmem_alloc) (void *core_handle,
+				struct cpe_lsm_session *, u32 size);
+
+	int (*lsm_shmem_dealloc) (void *core_handle,
+				  struct cpe_lsm_session *);
+
+	int (*lsm_register_snd_model) (void *core_handle,
+				       struct cpe_lsm_session *,
+				       enum lsm_detection_mode, bool);
+
+	int (*lsm_deregister_snd_model) (void *core_handle,
+					 struct cpe_lsm_session *);
+
+	int (*lsm_get_afe_out_port_id)(void *core_handle,
+			       struct cpe_lsm_session *session);
+
+	int (*lsm_start) (void *core_handle,
+			  struct cpe_lsm_session *);
+
+	int (*lsm_stop) (void *core_handle,
+			 struct cpe_lsm_session *);
+
+	int (*lsm_lab_control)(void *core_handle,
+			       struct cpe_lsm_session *session,
+			       bool enable);
+
+	int (*lab_ch_setup)(void *core_handle,
+				   struct cpe_lsm_session *session,
+				   enum wcd_cpe_event event);
+
+	int (*lsm_set_data) (void *core_handle,
+			struct cpe_lsm_session *session,
+			enum lsm_detection_mode detect_mode,
+			bool detect_failure);
+	int (*lsm_set_fmt_cfg)(void *core_handle,
+			struct cpe_lsm_session *session);
+	int (*lsm_set_one_param)(void *core_handle,
+			struct cpe_lsm_session *session,
+			struct lsm_params_info *p_info,
+			void *data, uint32_t param_type);
+	void (*lsm_get_snd_model_offset)
+		(void *core_handle, struct cpe_lsm_session *,
+		 size_t *offset);
+	int (*lsm_set_media_fmt_params)(void *core_handle,
+				       struct cpe_lsm_session *session,
+				       struct lsm_hw_params *param);
+	int (*lsm_set_port)(void *core_handle,
+			    struct cpe_lsm_session *session, void *data);
+};
+
+int wcd_cpe_get_lsm_ops(struct wcd_cpe_lsm_ops *);
+int wcd_cpe_get_afe_ops(struct wcd_cpe_afe_ops *);
+void *wcd_cpe_get_core_handle(struct snd_soc_codec *);
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/sound/cpe_err.h	2019-01-22 16:16:28.503291684 +0100
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CPE_ERR__
+#define __CPE_ERR__
+
+#include <linux/errno.h>
+
+/* ERROR CODES */
+/* Success. The operation completed with no errors. */
+#define CPE_EOK          0x00000000
+/* General failure. */
+#define CPE_EFAILED      0x00000001
+/* Bad operation parameter. */
+#define CPE_EBADPARAM    0x00000002
+/* Unsupported routine or operation. */
+#define CPE_EUNSUPPORTED 0x00000003
+/* Unsupported version. */
+#define CPE_EVERSION     0x00000004
+/* Unexpected problem encountered. */
+#define CPE_EUNEXPECTED  0x00000005
+/* Unhandled problem occurred. */
+#define CPE_EPANIC       0x00000006
+/* Unable to allocate resource. */
+#define CPE_ENORESOURCE  0x00000007
+/* Invalid handle. */
+#define CPE_EHANDLE      0x00000008
+/* Operation is already processed. */
+#define CPE_EALREADY     0x00000009
+/* Operation is not ready to be processed. */
+#define CPE_ENOTREADY    0x0000000A
+/* Operation is pending completion. */
+#define CPE_EPENDING     0x0000000B
+/* Operation could not be accepted or processed. */
+#define CPE_EBUSY        0x0000000C
+/* Operation aborted due to an error. */
+#define CPE_EABORTED     0x0000000D
+/* Operation preempted by a higher priority. */
+#define CPE_EPREEMPTED   0x0000000E
+/* Operation requests intervention to complete. */
+#define CPE_ECONTINUE    0x0000000F
+/* Operation requests immediate intervention to complete. */
+#define CPE_EIMMEDIATE   0x00000010
+/* Operation is not implemented. */
+#define CPE_ENOTIMPL     0x00000011
+/* Operation needs more data or resources. */
+#define CPE_ENEEDMORE    0x00000012
+/* Operation does not have memory. */
+#define CPE_ENOMEMORY    0x00000014
+/* Item does not exist. */
+#define CPE_ENOTEXIST    0x00000015
+/* Operation is finished. */
+#define CPE_ETERMINATED  0x00000016
+/* Max count for adsp error code sent to HLOS*/
+#define CPE_ERR_MAX      (CPE_ETERMINATED + 1)
+
+
+/* ERROR STRING */
+/* Success. The operation completed with no errors. */
+#define CPE_EOK_STR          "CPE_EOK"
+/* General failure. */
+#define CPE_EFAILED_STR      "CPE_EFAILED"
+/* Bad operation parameter. */
+#define CPE_EBADPARAM_STR    "CPE_EBADPARAM"
+/* Unsupported routine or operation. */
+#define CPE_EUNSUPPORTED_STR "CPE_EUNSUPPORTED"
+/* Unsupported version. */
+#define CPE_EVERSION_STR     "CPE_EVERSION"
+/* Unexpected problem encountered. */
+#define CPE_EUNEXPECTED_STR  "CPE_EUNEXPECTED"
+/* Unhandled problem occurred. */
+#define CPE_EPANIC_STR       "CPE_EPANIC"
+/* Unable to allocate resource. */
+#define CPE_ENORESOURCE_STR  "CPE_ENORESOURCE"
+/* Invalid handle. */
+#define CPE_EHANDLE_STR      "CPE_EHANDLE"
+/* Operation is already processed. */
+#define CPE_EALREADY_STR     "CPE_EALREADY"
+/* Operation is not ready to be processed. */
+#define CPE_ENOTREADY_STR    "CPE_ENOTREADY"
+/* Operation is pending completion. */
+#define CPE_EPENDING_STR     "CPE_EPENDING"
+/* Operation could not be accepted or processed. */
+#define CPE_EBUSY_STR        "CPE_EBUSY"
+/* Operation aborted due to an error. */
+#define CPE_EABORTED_STR     "CPE_EABORTED"
+/* Operation preempted by a higher priority. */
+#define CPE_EPREEMPTED_STR   "CPE_EPREEMPTED"
+/* Operation requests intervention to complete. */
+#define CPE_ECONTINUE_STR    "CPE_ECONTINUE"
+/* Operation requests immediate intervention to complete. */
+#define CPE_EIMMEDIATE_STR   "CPE_EIMMEDIATE"
+/* Operation is not implemented. */
+#define CPE_ENOTIMPL_STR     "CPE_ENOTIMPL"
+/* Operation needs more data or resources. */
+#define CPE_ENEEDMORE_STR    "CPE_ENEEDMORE"
+/* Operation does not have memory. */
+#define CPE_ENOMEMORY_STR    "CPE_ENOMEMORY"
+/* Item does not exist. */
+#define CPE_ENOTEXIST_STR    "CPE_ENOTEXIST"
+/* Operation is finished. */
+#define CPE_ETERMINATED_STR  "CPE_ETERMINATED"
+/* Unexpected error code. */
+#define CPE_ERR_MAX_STR      "CPE_ERR_MAX"
+
+
+struct cpe_err_code {
+	int     lnx_err_code;
+	char    *cpe_err_str;
+};
+
+
+static struct cpe_err_code cpe_err_code_info[CPE_ERR_MAX+1] = {
+	{ 0, CPE_EOK_STR},
+	{ -ENOTRECOVERABLE, CPE_EFAILED_STR},
+	{ -EINVAL, CPE_EBADPARAM_STR},
+	{ -ENOSYS, CPE_EUNSUPPORTED_STR},
+	{ -ENOPROTOOPT, CPE_EVERSION_STR},
+	{ -ENOTRECOVERABLE, CPE_EUNEXPECTED_STR},
+	{ -ENOTRECOVERABLE, CPE_EPANIC_STR},
+	{ -ENOSPC, CPE_ENORESOURCE_STR},
+	{ -EBADR, CPE_EHANDLE_STR},
+	{ -EALREADY, CPE_EALREADY_STR},
+	{ -EPERM, CPE_ENOTREADY_STR},
+	{ -EINPROGRESS, CPE_EPENDING_STR},
+	{ -EBUSY, CPE_EBUSY_STR},
+	{ -ECANCELED, CPE_EABORTED_STR},
+	{ -EAGAIN, CPE_EPREEMPTED_STR},
+	{ -EAGAIN, CPE_ECONTINUE_STR},
+	{ -EAGAIN, CPE_EIMMEDIATE_STR},
+	{ -EAGAIN, CPE_ENOTIMPL_STR},
+	{ -ENODATA, CPE_ENEEDMORE_STR},
+	{ -EADV, CPE_ERR_MAX_STR},
+	{ -ENOMEM, CPE_ENOMEMORY_STR},
+	{ -ENODEV, CPE_ENOTEXIST_STR},
+	{ -EADV, CPE_ETERMINATED_STR},
+	{ -EADV, CPE_ERR_MAX_STR},
+};
+
+static inline int cpe_err_get_lnx_err_code(u32 cpe_error)
+{
+	if (cpe_error > CPE_ERR_MAX)
+		return cpe_err_code_info[CPE_ERR_MAX].lnx_err_code;
+	else
+		return cpe_err_code_info[cpe_error].lnx_err_code;
+}
+
+static inline char *cpe_err_get_err_str(u32 cpe_error)
+{
+	if (cpe_error > CPE_ERR_MAX)
+		return cpe_err_code_info[CPE_ERR_MAX].cpe_err_str;
+	else
+		return cpe_err_code_info[cpe_error].cpe_err_str;
+}
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/sound/msm-audio-effects-q6-v2.h	2019-01-22 16:16:28.507291721 +0100
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_AUDIO_EFFECTS_H
+#define _MSM_AUDIO_EFFECTS_H
+
+#include <sound/audio_effects.h>
+
+#define MAX_PP_PARAMS_SZ   128
+
+bool msm_audio_effects_is_effmodule_supp_in_top(int effect_module,
+						int topology);
+
+int msm_audio_effects_enable_extn(struct audio_client *ac,
+			     struct msm_nt_eff_all_config *effects,
+			     bool flag);
+
+int msm_audio_effects_reverb_handler(struct audio_client *ac,
+				     struct reverb_params *reverb,
+				     long *values);
+
+int msm_audio_effects_bass_boost_handler(struct audio_client *ac,
+					struct bass_boost_params *bass_boost,
+					long *values);
+
+int msm_audio_effects_pbe_handler(struct audio_client *ac,
+					struct pbe_params *pbe,
+					long *values);
+
+int msm_audio_effects_virtualizer_handler(struct audio_client *ac,
+				struct virtualizer_params *virtualizer,
+				long *values);
+
+int msm_audio_effects_popless_eq_handler(struct audio_client *ac,
+					 struct eq_params *eq,
+					 long *values);
+
+int msm_audio_effects_volume_handler(struct audio_client *ac,
+				     struct soft_volume_params *vol,
+				     long *values);
+
+int msm_audio_effects_volume_handler_v2(struct audio_client *ac,
+					struct soft_volume_params *vol,
+					long *values, int instance);
+#endif /*_MSM_AUDIO_EFFECTS_H*/
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/sound/msm-dai-q6-v2.h	2019-01-22 16:16:28.507291721 +0100
@@ -0,0 +1,92 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_DAI_Q6_PDATA_H__
+
+#define __MSM_DAI_Q6_PDATA_H__
+
+#define MSM_MI2S_SD0 (1 << 0)
+#define MSM_MI2S_SD1 (1 << 1)
+#define MSM_MI2S_SD2 (1 << 2)
+#define MSM_MI2S_SD3 (1 << 3)
+#define MSM_MI2S_CAP_RX 0
+#define MSM_MI2S_CAP_TX 1
+
+#define MSM_PRIM_MI2S 0
+#define MSM_SEC_MI2S  1
+#define MSM_TERT_MI2S 2
+#define MSM_QUAT_MI2S  3
+#define MSM_SEC_MI2S_SD1  4
+#define MSM_QUIN_MI2S  5
+#define MSM_SENARY_MI2S  6
+#define MSM_INT0_MI2S  7
+#define MSM_INT1_MI2S  8
+#define MSM_INT2_MI2S  9
+#define MSM_INT3_MI2S  10
+#define MSM_INT4_MI2S  11
+#define MSM_INT5_MI2S  12
+#define MSM_INT6_MI2S  13
+#define MSM_MI2S_MIN MSM_PRIM_MI2S
+#define MSM_MI2S_MAX MSM_INT6_MI2S
+
+struct msm_dai_auxpcm_config {
+	u16 mode;
+	u16 sync;
+	u16 frame;
+	u16 quant;
+	u16 num_slots;
+	u16 *slot_mapping;
+	u16 data;
+	u32 pcm_clk_rate;
+};
+
+struct msm_dai_auxpcm_pdata {
+	struct msm_dai_auxpcm_config mode_8k;
+	struct msm_dai_auxpcm_config mode_16k;
+};
+
+struct msm_mi2s_pdata {
+	u16 rx_sd_lines;
+	u16 tx_sd_lines;
+	u16 intf_id;
+};
+
+struct msm_i2s_data {
+	u32 capability; /* RX or TX */
+	u16 sd_lines;
+};
+
+struct msm_dai_tdm_group_config {
+	u16 group_id;
+	u16 num_ports;
+	u16 *port_id;
+	u32 clk_rate;
+};
+
+struct msm_dai_tdm_config {
+	u16 sync_mode;
+	u16 sync_src;
+	u16 data_out;
+	u16 invert_sync;
+	u16 data_delay;
+	u32 data_align;
+	u16 header_start_offset;
+	u16 header_width;
+	u16 header_num_frame_repeat;
+};
+
+struct msm_dai_tdm_pdata {
+	struct msm_dai_tdm_group_config group_config;
+	struct msm_dai_tdm_config config;
+};
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/sound/msm-slim-dma.h	2019-01-22 16:16:28.507291721 +0100
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _MSM_SLIMBUS_DMA_H
+#define _MSM_SLIMBUS_DMA_H
+
+#include <linux/slimbus/slimbus.h>
+
+/*
+ * struct msm_slim_dma_data - DMA data for slimbus data transfer
+ *
+ * @sdev: Handle to the slim_device instance associated with the
+ *	  data transfer.
+ * @ph:	Port handle for the slimbus ports.
+ * @dai_channel_ctl: callback function into the CPU dai driver
+ *		     to setup the data path.
+ *
+ * This structure is used to share the slimbus port handles and
+ * other data path setup related handles with other drivers.
+ */
+struct msm_slim_dma_data {
+
+	/* Handle to slimbus device */
+	struct slim_device *sdev;
+
+	/* Port Handle */
+	u32 ph;
+
+	/* Callback for data channel control */
+	int (*dai_channel_ctl) (struct msm_slim_dma_data *dma_data,
+				struct snd_soc_dai *dai, bool enable);
+};
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/sound/q6adm-v2.h	2019-10-29 09:26:25.533221674 +0100
@@ -0,0 +1,200 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __Q6_ADM_V2_H__
+#define __Q6_ADM_V2_H__
+
+
+#define ADM_PATH_PLAYBACK 0x1
+#define ADM_PATH_LIVE_REC 0x2
+#define ADM_PATH_NONLIVE_REC 0x3
+#define ADM_PATH_COMPRESSED_RX 0x5
+#define ADM_PATH_COMPRESSED_TX 0x6
+#include <linux/qdsp6v2/rtac.h>
+#include <sound/q6afe-v2.h>
+#include <sound/q6audio-v2.h>
+
+#define MAX_MODULES_IN_TOPO 16
+#define ADM_GET_TOPO_MODULE_LIST_LENGTH\
+		((MAX_MODULES_IN_TOPO + 1) * sizeof(uint32_t))
+#define AUD_PROC_BLOCK_SIZE	4096
+#define AUD_VOL_BLOCK_SIZE	4096
+#define AUDIO_RX_CALIBRATION_SIZE	(AUD_PROC_BLOCK_SIZE + \
+						AUD_VOL_BLOCK_SIZE)
+enum {
+	ADM_CUSTOM_TOP_CAL = 0,
+	ADM_AUDPROC_CAL,
+	ADM_LSM_AUDPROC_CAL,
+	ADM_AUDVOL_CAL,
+	ADM_RTAC_INFO_CAL,
+	ADM_RTAC_APR_CAL,
+	ADM_SRS_TRUMEDIA,
+	ADM_RTAC_AUDVOL_CAL,
+	ADM_MAX_CAL_TYPES
+};
+
+enum {
+	ADM_MEM_MAP_INDEX_SOURCE_TRACKING = ADM_MAX_CAL_TYPES,
+	ADM_MEM_MAP_INDEX_MAX
+};
+
+enum {
+	ADM_CLIENT_ID_DEFAULT = 0,
+	ADM_CLIENT_ID_SOURCE_TRACKING,
+	ADM_CLIENT_ID_MAX,
+};
+
+/* ENUM for adm_status & route_status */
+enum adm_status_flags {
+	ADM_STATUS_CALIBRATION_REQUIRED = 0,
+	ADM_STATUS_LIMITER,
+	ADM_STATUS_MAX,
+};
+
+#define MAX_COPPS_PER_PORT 0x8
+#define ADM_MAX_CHANNELS 8
+
+/* multiple copp per stream. */
+struct route_payload {
+	unsigned int copp_idx[MAX_COPPS_PER_PORT];
+	unsigned int port_id[MAX_COPPS_PER_PORT];
+	int app_type[MAX_COPPS_PER_PORT];
+	int acdb_dev_id[MAX_COPPS_PER_PORT];
+	int sample_rate[MAX_COPPS_PER_PORT];
+	unsigned long route_status[MAX_COPPS_PER_PORT];
+	unsigned short num_copps;
+	unsigned int session_id;
+};
+
+struct default_chmixer_param_id_coeff {
+	uint32_t index;
+	uint16_t num_output_channels;
+	uint16_t num_input_channels;
+};
+
+struct msm_pcm_channel_mixer {
+	int output_channel;
+	int input_channels[ADM_MAX_CHANNELS];
+	bool enable;
+	int rule;
+	int channel_weight[ADM_MAX_CHANNELS][ADM_MAX_CHANNELS];
+};
+
+int srs_trumedia_open(int port_id, int copp_idx, __s32 srs_tech_id,
+		      void *srs_params);
+
+int adm_dts_eagle_set(int port_id, int copp_idx, int param_id,
+		      void *data, uint32_t size);
+
+int adm_dts_eagle_get(int port_id, int copp_idx, int param_id,
+		      void *data, uint32_t size);
+
+void adm_copp_mfc_cfg(int port_id, int copp_idx, int dst_sample_rate);
+
+int adm_get_params(int port_id, int copp_idx, uint32_t module_id,
+		   uint32_t param_id, uint32_t params_length, char *params);
+
+int adm_send_params_v5(int port_id, int copp_idx, char *params,
+			      uint32_t params_length);
+
+int adm_dolby_dap_send_params(int port_id, int copp_idx, char *params,
+			      uint32_t params_length);
+
+int adm_open(int port, int path, int rate, int mode, int topology,
+			   int perf_mode, uint16_t bits_per_sample,
+			   int app_type, int acdbdev_id);
+
+int adm_map_rtac_block(struct rtac_cal_block_data *cal_block);
+
+int adm_unmap_rtac_block(uint32_t *mem_map_handle);
+
+int adm_close(int port, int topology, int perf_mode);
+
+int adm_matrix_map(int path, struct route_payload payload_map,
+		   int perf_mode, uint32_t passthr_mode);
+
+int adm_connect_afe_port(int mode, int session_id, int port_id);
+
+void adm_ec_ref_rx_id(int  port_id);
+
+void adm_num_ec_ref_rx_chans(int num_chans);
+
+void adm_ec_ref_rx_bit_width(int bit_width);
+
+void adm_ec_ref_rx_sampling_rate(int sampling_rate);
+
+int adm_get_lowlatency_copp_id(int port_id);
+
+int adm_set_multi_ch_map(char *channel_map, int path);
+
+int adm_get_multi_ch_map(char *channel_map, int path);
+
+int adm_validate_and_get_port_index(int port_id);
+
+int adm_get_default_copp_idx(int port_id);
+
+int adm_get_topology_for_port_from_copp_id(int port_id, int copp_id);
+
+int adm_get_topology_for_port_copp_idx(int port_id, int copp_idx);
+
+int adm_get_indexes_from_copp_id(int copp_id, int *port_idx, int *copp_idx);
+
+int adm_set_pspd_matrix_params(int port_id, int copp_idx,
+				unsigned int session_id,
+				char *params, uint32_t params_length);
+
+int adm_set_downmix_params(int port_id, int copp_idx,
+				unsigned int session_id, char *params,
+				uint32_t params_length);
+
+int adm_get_pp_topo_module_list(int port_id, int copp_idx, int32_t param_length,
+				char *params);
+
+int adm_set_volume(int port_id, int copp_idx, int volume);
+
+int adm_set_softvolume(int port_id, int copp_idx,
+		       struct audproc_softvolume_params *softvol_param);
+
+int adm_set_mic_gain(int port_id, int copp_idx, int volume);
+
+int adm_send_set_multichannel_ec_primary_mic_ch(int port_id, int copp_idx,
+				int primary_mic_ch);
+
+int adm_param_enable(int port_id, int copp_idx, int module_id,  int enable);
+
+int adm_send_calibration(int port_id, int copp_idx, int path, int perf_mode,
+			 int cal_type, char *params, int size);
+
+int adm_set_wait_parameters(int port_id, int copp_idx);
+
+int adm_reset_wait_parameters(int port_id, int copp_idx);
+
+int adm_wait_timeout(int port_id, int copp_idx, int wait_time);
+
+int adm_store_cal_data(int port_id, int copp_idx, int path, int perf_mode,
+		       int cal_type, char *params, int *size);
+
+int adm_send_compressed_device_mute(int port_id, int copp_idx, bool mute_on);
+
+int adm_send_compressed_device_latency(int port_id, int copp_idx, int latency);
+int adm_set_sound_focus(int port_id, int copp_idx,
+			struct sound_focus_param soundFocusData);
+int adm_get_sound_focus(int port_id, int copp_idx,
+			struct sound_focus_param *soundFocusData);
+int adm_get_source_tracking(int port_id, int copp_idx,
+			    struct source_tracking_param *sourceTrackingData);
+int adm_swap_speaker_channels(int port_id, int copp_idx, int sample_rate,
+				bool spk_swap);
+int adm_programable_channel_mixer(int port_id, int copp_idx, int session_id,
+			int session_type,
+			struct msm_pcm_channel_mixer *ch_mixer,
+			int channel_index);
+#endif /* __Q6_ADM_V2_H__ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/sound/q6afe-v2.h	2019-01-22 16:16:28.507291721 +0100
@@ -0,0 +1,371 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __Q6AFE_V2_H__
+#define __Q6AFE_V2_H__
+#include <sound/apr_audio-v2.h>
+#include <linux/qdsp6v2/rtac.h>
+
+#define IN			0x000
+#define OUT			0x001
+#define MSM_AFE_MONO        0
+#define MSM_AFE_CH_STEREO   1
+#define MSM_AFE_MONO_RIGHT  1
+#define MSM_AFE_MONO_LEFT   2
+#define MSM_AFE_STEREO      3
+#define MSM_AFE_4CHANNELS   4
+#define MSM_AFE_6CHANNELS   6
+#define MSM_AFE_8CHANNELS   8
+
+#define MSM_AFE_I2S_FORMAT_LPCM		0
+#define MSM_AFE_I2S_FORMAT_COMPR		1
+#define MSM_AFE_I2S_FORMAT_IEC60958_LPCM	2
+#define MSM_AFE_I2S_FORMAT_IEC60958_COMPR	3
+
+#define MSM_AFE_PORT_TYPE_RX 0
+#define MSM_AFE_PORT_TYPE_TX 1
+
+#define RT_PROXY_DAI_001_RX	0xE0
+#define RT_PROXY_DAI_001_TX	0xF0
+#define RT_PROXY_DAI_002_RX	0xF1
+#define RT_PROXY_DAI_002_TX	0xE1
+#define VIRTUAL_ID_TO_PORTID(val) ((val & 0xF) | 0x2000)
+
+#define AFE_CLK_VERSION_V1    1
+#define AFE_CLK_VERSION_V2    2
+
+typedef int (*routing_cb)(int port);
+
+enum {
+	/* IDX 0->4 */
+	IDX_PRIMARY_I2S_RX,
+	IDX_PRIMARY_I2S_TX,
+	IDX_AFE_PORT_ID_PRIMARY_PCM_RX,
+	IDX_AFE_PORT_ID_PRIMARY_PCM_TX,
+	IDX_SECONDARY_I2S_RX,
+	/* IDX 5->9 */
+	IDX_SECONDARY_I2S_TX,
+	IDX_MI2S_RX,
+	IDX_MI2S_TX,
+	IDX_HDMI_RX,
+	IDX_RSVD_2,
+	/* IDX 10->14 */
+	IDX_RSVD_3,
+	IDX_DIGI_MIC_TX,
+	IDX_VOICE_RECORD_RX,
+	IDX_VOICE_RECORD_TX,
+	IDX_VOICE_PLAYBACK_TX,
+	/* IDX 15->19 */
+	IDX_SLIMBUS_0_RX,
+	IDX_SLIMBUS_0_TX,
+	IDX_SLIMBUS_1_RX,
+	IDX_SLIMBUS_1_TX,
+	IDX_SLIMBUS_2_RX,
+	/* IDX 20->24 */
+	IDX_SLIMBUS_2_TX,
+	IDX_SLIMBUS_3_RX,
+	IDX_SLIMBUS_3_TX,
+	IDX_SLIMBUS_4_RX,
+	IDX_SLIMBUS_4_TX,
+	/* IDX 25->29 */
+	IDX_SLIMBUS_5_RX,
+	IDX_SLIMBUS_5_TX,
+	IDX_INT_BT_SCO_RX,
+	IDX_INT_BT_SCO_TX,
+	IDX_INT_BT_A2DP_RX,
+	/* IDX 30->34 */
+	IDX_INT_FM_RX,
+	IDX_INT_FM_TX,
+	IDX_RT_PROXY_PORT_001_RX,
+	IDX_RT_PROXY_PORT_001_TX,
+	IDX_AFE_PORT_ID_QUATERNARY_MI2S_RX,
+	/* IDX 35->39 */
+	IDX_AFE_PORT_ID_QUATERNARY_MI2S_TX,
+	IDX_AFE_PORT_ID_SECONDARY_MI2S_RX,
+	IDX_AFE_PORT_ID_SECONDARY_MI2S_TX,
+	IDX_AFE_PORT_ID_TERTIARY_MI2S_RX,
+	IDX_AFE_PORT_ID_TERTIARY_MI2S_TX,
+	/* IDX 40->44 */
+	IDX_AFE_PORT_ID_PRIMARY_MI2S_RX,
+	IDX_AFE_PORT_ID_PRIMARY_MI2S_TX,
+	IDX_AFE_PORT_ID_SECONDARY_PCM_RX,
+	IDX_AFE_PORT_ID_SECONDARY_PCM_TX,
+	IDX_VOICE2_PLAYBACK_TX,
+	/* IDX 45->49 */
+	IDX_SLIMBUS_6_RX,
+	IDX_SLIMBUS_6_TX,
+	IDX_SPDIF_RX,
+	IDX_GLOBAL_CFG,
+	IDX_AUDIO_PORT_ID_I2S_RX,
+	/* IDX 50->53 */
+	IDX_AFE_PORT_ID_SECONDARY_MI2S_RX_SD1,
+	IDX_AFE_PORT_ID_QUINARY_MI2S_RX,
+	IDX_AFE_PORT_ID_QUINARY_MI2S_TX,
+	IDX_AFE_PORT_ID_SENARY_MI2S_TX,
+	/* IDX 54->117 */
+	IDX_AFE_PORT_ID_PRIMARY_TDM_RX_0,
+	IDX_AFE_PORT_ID_PRIMARY_TDM_TX_0,
+	IDX_AFE_PORT_ID_PRIMARY_TDM_RX_1,
+	IDX_AFE_PORT_ID_PRIMARY_TDM_TX_1,
+	IDX_AFE_PORT_ID_PRIMARY_TDM_RX_2,
+	IDX_AFE_PORT_ID_PRIMARY_TDM_TX_2,
+	IDX_AFE_PORT_ID_PRIMARY_TDM_RX_3,
+	IDX_AFE_PORT_ID_PRIMARY_TDM_TX_3,
+	IDX_AFE_PORT_ID_PRIMARY_TDM_RX_4,
+	IDX_AFE_PORT_ID_PRIMARY_TDM_TX_4,
+	IDX_AFE_PORT_ID_PRIMARY_TDM_RX_5,
+	IDX_AFE_PORT_ID_PRIMARY_TDM_TX_5,
+	IDX_AFE_PORT_ID_PRIMARY_TDM_RX_6,
+	IDX_AFE_PORT_ID_PRIMARY_TDM_TX_6,
+	IDX_AFE_PORT_ID_PRIMARY_TDM_RX_7,
+	IDX_AFE_PORT_ID_PRIMARY_TDM_TX_7,
+	IDX_AFE_PORT_ID_SECONDARY_TDM_RX_0,
+	IDX_AFE_PORT_ID_SECONDARY_TDM_TX_0,
+	IDX_AFE_PORT_ID_SECONDARY_TDM_RX_1,
+	IDX_AFE_PORT_ID_SECONDARY_TDM_TX_1,
+	IDX_AFE_PORT_ID_SECONDARY_TDM_RX_2,
+	IDX_AFE_PORT_ID_SECONDARY_TDM_TX_2,
+	IDX_AFE_PORT_ID_SECONDARY_TDM_RX_3,
+	IDX_AFE_PORT_ID_SECONDARY_TDM_TX_3,
+	IDX_AFE_PORT_ID_SECONDARY_TDM_RX_4,
+	IDX_AFE_PORT_ID_SECONDARY_TDM_TX_4,
+	IDX_AFE_PORT_ID_SECONDARY_TDM_RX_5,
+	IDX_AFE_PORT_ID_SECONDARY_TDM_TX_5,
+	IDX_AFE_PORT_ID_SECONDARY_TDM_RX_6,
+	IDX_AFE_PORT_ID_SECONDARY_TDM_TX_6,
+	IDX_AFE_PORT_ID_SECONDARY_TDM_RX_7,
+	IDX_AFE_PORT_ID_SECONDARY_TDM_TX_7,
+	IDX_AFE_PORT_ID_TERTIARY_TDM_RX_0,
+	IDX_AFE_PORT_ID_TERTIARY_TDM_TX_0,
+	IDX_AFE_PORT_ID_TERTIARY_TDM_RX_1,
+	IDX_AFE_PORT_ID_TERTIARY_TDM_TX_1,
+	IDX_AFE_PORT_ID_TERTIARY_TDM_RX_2,
+	IDX_AFE_PORT_ID_TERTIARY_TDM_TX_2,
+	IDX_AFE_PORT_ID_TERTIARY_TDM_RX_3,
+	IDX_AFE_PORT_ID_TERTIARY_TDM_TX_3,
+	IDX_AFE_PORT_ID_TERTIARY_TDM_RX_4,
+	IDX_AFE_PORT_ID_TERTIARY_TDM_TX_4,
+	IDX_AFE_PORT_ID_TERTIARY_TDM_RX_5,
+	IDX_AFE_PORT_ID_TERTIARY_TDM_TX_5,
+	IDX_AFE_PORT_ID_TERTIARY_TDM_RX_6,
+	IDX_AFE_PORT_ID_TERTIARY_TDM_TX_6,
+	IDX_AFE_PORT_ID_TERTIARY_TDM_RX_7,
+	IDX_AFE_PORT_ID_TERTIARY_TDM_TX_7,
+	IDX_AFE_PORT_ID_QUATERNARY_TDM_RX_0,
+	IDX_AFE_PORT_ID_QUATERNARY_TDM_TX_0,
+	IDX_AFE_PORT_ID_QUATERNARY_TDM_RX_1,
+	IDX_AFE_PORT_ID_QUATERNARY_TDM_TX_1,
+	IDX_AFE_PORT_ID_QUATERNARY_TDM_RX_2,
+	IDX_AFE_PORT_ID_QUATERNARY_TDM_TX_2,
+	IDX_AFE_PORT_ID_QUATERNARY_TDM_RX_3,
+	IDX_AFE_PORT_ID_QUATERNARY_TDM_TX_3,
+	IDX_AFE_PORT_ID_QUATERNARY_TDM_RX_4,
+	IDX_AFE_PORT_ID_QUATERNARY_TDM_TX_4,
+	IDX_AFE_PORT_ID_QUATERNARY_TDM_RX_5,
+	IDX_AFE_PORT_ID_QUATERNARY_TDM_TX_5,
+	IDX_AFE_PORT_ID_QUATERNARY_TDM_RX_6,
+	IDX_AFE_PORT_ID_QUATERNARY_TDM_TX_6,
+	IDX_AFE_PORT_ID_QUATERNARY_TDM_RX_7,
+	IDX_AFE_PORT_ID_QUATERNARY_TDM_TX_7,
+	/* IDX 118->121 */
+	IDX_SLIMBUS_7_RX,
+	IDX_SLIMBUS_7_TX,
+	IDX_SLIMBUS_8_RX,
+	IDX_SLIMBUS_8_TX,
+	/* IDX 122-> 123 */
+	IDX_AFE_PORT_ID_USB_RX,
+	IDX_AFE_PORT_ID_USB_TX,
+	/* IDX 124 */
+	IDX_DISPLAY_PORT_RX,
+	/* IDX 125-> 128 */
+	IDX_AFE_PORT_ID_TERTIARY_PCM_RX,
+	IDX_AFE_PORT_ID_TERTIARY_PCM_TX,
+	IDX_AFE_PORT_ID_QUATERNARY_PCM_RX,
+	IDX_AFE_PORT_ID_QUATERNARY_PCM_TX,
+	/* IDX 129-> 142 */
+	IDX_AFE_PORT_ID_INT0_MI2S_RX,
+	IDX_AFE_PORT_ID_INT0_MI2S_TX,
+	IDX_AFE_PORT_ID_INT1_MI2S_RX,
+	IDX_AFE_PORT_ID_INT1_MI2S_TX,
+	IDX_AFE_PORT_ID_INT2_MI2S_RX,
+	IDX_AFE_PORT_ID_INT2_MI2S_TX,
+	IDX_AFE_PORT_ID_INT3_MI2S_RX,
+	IDX_AFE_PORT_ID_INT3_MI2S_TX,
+	IDX_AFE_PORT_ID_INT4_MI2S_RX,
+	IDX_AFE_PORT_ID_INT4_MI2S_TX,
+	IDX_AFE_PORT_ID_INT5_MI2S_RX,
+	IDX_AFE_PORT_ID_INT5_MI2S_TX,
+	IDX_AFE_PORT_ID_INT6_MI2S_RX,
+	IDX_AFE_PORT_ID_INT6_MI2S_TX,
+	AFE_MAX_PORTS
+};
+
+enum afe_mad_type {
+	MAD_HW_NONE = 0x00,
+	MAD_HW_AUDIO = 0x01,
+	MAD_HW_BEACON = 0x02,
+	MAD_HW_ULTRASOUND = 0x04,
+	MAD_SW_AUDIO = 0x05,
+};
+
+enum afe_cal_mode {
+	AFE_CAL_MODE_DEFAULT = 0x00,
+	AFE_CAL_MODE_NONE,
+};
+
+struct afe_audio_buffer {
+	dma_addr_t phys;
+	void       *data;
+	uint32_t   used;
+	uint32_t   size;/* size of buffer */
+	uint32_t   actual_size; /* actual number of bytes read by DSP */
+	struct      ion_handle *handle;
+	struct      ion_client *client;
+};
+
+struct afe_audio_port_data {
+	struct afe_audio_buffer *buf;
+	uint32_t	    max_buf_cnt;
+	uint32_t	    dsp_buf;
+	uint32_t	    cpu_buf;
+	struct list_head    mem_map_handle;
+	uint32_t	    tmp_hdl;
+	/* read or write locks */
+	struct mutex	    lock;
+	spinlock_t	    dsp_lock;
+};
+
+struct afe_audio_client {
+	atomic_t	       cmd_state;
+	/* Relative or absolute TS */
+	uint32_t	       time_flag;
+	void		       *priv;
+	uint64_t	       time_stamp;
+	struct mutex	       cmd_lock;
+	/* idx:1 out port, 0: in port*/
+	struct afe_audio_port_data port[2];
+	wait_queue_head_t      cmd_wait;
+	uint32_t               mem_map_handle;
+};
+
+struct aanc_data {
+	bool aanc_active;
+	uint16_t aanc_rx_port;
+	uint16_t aanc_tx_port;
+	uint32_t aanc_rx_port_sample_rate;
+	uint32_t aanc_tx_port_sample_rate;
+};
+
+int afe_open(u16 port_id, union afe_port_config *afe_config, int rate);
+int afe_close(int port_id);
+int afe_loopback(u16 enable, u16 rx_port, u16 tx_port);
+int afe_sidetone_enable(u16 tx_port_id, u16 rx_port_id, bool enable);
+int afe_loopback_gain(u16 port_id, u16 volume);
+int afe_validate_port(u16 port_id);
+int afe_get_port_index(u16 port_id);
+int afe_get_topology(int port_id);
+int afe_start_pseudo_port(u16 port_id);
+int afe_stop_pseudo_port(u16 port_id);
+uint32_t afe_req_mmap_handle(struct afe_audio_client *ac);
+int afe_memory_map(phys_addr_t dma_addr_p, u32 dma_buf_sz,
+		struct afe_audio_client *ac);
+int afe_cmd_memory_map(phys_addr_t dma_addr_p, u32 dma_buf_sz);
+int afe_cmd_memory_map_nowait(int port_id, phys_addr_t dma_addr_p,
+			u32 dma_buf_sz);
+int afe_cmd_memory_unmap(u32 dma_addr_p);
+int afe_cmd_memory_unmap_nowait(u32 dma_addr_p);
+void afe_set_dtmf_gen_rx_portid(u16 rx_port_id, int set);
+int afe_dtmf_generate_rx(int64_t duration_in_ms,
+			 uint16_t high_freq,
+			 uint16_t low_freq, uint16_t gain);
+int afe_register_get_events(u16 port_id,
+		void (*cb) (uint32_t opcode,
+		uint32_t token, uint32_t *payload, void *priv),
+		void *private_data);
+int afe_unregister_get_events(u16 port_id);
+int afe_rt_proxy_port_write(phys_addr_t buf_addr_p,
+			u32 mem_map_handle, int bytes);
+int afe_rt_proxy_port_read(phys_addr_t buf_addr_p,
+			u32 mem_map_handle, int bytes);
+void afe_set_cal_mode(u16 port_id, enum afe_cal_mode afe_cal_mode);
+int afe_port_start(u16 port_id, union afe_port_config *afe_config,
+	u32 rate);
+int afe_port_start_v2(u16 port_id, union afe_port_config *afe_config,
+		      u32 rate, u16 afe_in_channels, u16 afe_in_bit_width,
+		      struct afe_enc_config *enc_config);
+int afe_spk_prot_feed_back_cfg(int src_port, int dst_port,
+	int l_ch, int r_ch, u32 enable);
+int afe_spk_prot_get_calib_data(struct afe_spkr_prot_get_vi_calib *calib);
+int afe_port_stop_nowait(int port_id);
+int afe_apply_gain(u16 port_id, u16 gain);
+int afe_q6_interface_prepare(void);
+int afe_get_port_type(u16 port_id);
+int q6afe_audio_client_buf_alloc_contiguous(unsigned int dir,
+			struct afe_audio_client *ac,
+			unsigned int bufsz,
+			unsigned int bufcnt);
+struct afe_audio_client *q6afe_audio_client_alloc(void *priv);
+int q6afe_audio_client_buf_free_contiguous(unsigned int dir,
+			struct afe_audio_client *ac);
+void q6afe_audio_client_free(struct afe_audio_client *ac);
+/* if port_id is virtual, convert to physical..
+ * if port_id is already physical, return physical
+ */
+int afe_convert_virtual_to_portid(u16 port_id);
+
+int afe_pseudo_port_start_nowait(u16 port_id);
+int afe_pseudo_port_stop_nowait(u16 port_id);
+int afe_set_lpass_clock(u16 port_id, struct afe_clk_cfg *cfg);
+int afe_set_lpass_clock_v2(u16 port_id, struct afe_clk_set *cfg);
+int afe_set_lpass_clk_cfg(int index, struct afe_clk_set *cfg);
+int afe_set_digital_codec_core_clock(u16 port_id,
+			struct afe_digital_clk_cfg *cfg);
+int afe_set_lpass_internal_digital_codec_clock(u16 port_id,
+				struct afe_digital_clk_cfg *cfg);
+int afe_enable_lpass_core_shared_clock(u16 port_id, u32 enable);
+
+int q6afe_check_osr_clk_freq(u32 freq);
+
+int afe_send_spdif_clk_cfg(struct afe_param_id_spdif_clk_cfg *cfg,
+		u16 port_id);
+int afe_send_spdif_ch_status_cfg(struct afe_param_id_spdif_ch_status_cfg
+		*ch_status_cfg,	u16 port_id);
+
+int afe_spdif_port_start(u16 port_id, struct afe_spdif_port_config *spdif_port,
+		u32 rate);
+
+int afe_turn_onoff_hw_mad(u16 mad_type, u16 mad_enable);
+int afe_port_set_mad_type(u16 port_id, enum afe_mad_type mad_type);
+enum afe_mad_type afe_port_get_mad_type(u16 port_id);
+int afe_set_config(enum afe_config_type config_type, void *config_data,
+		   int arg);
+void afe_clear_config(enum afe_config_type config);
+bool afe_has_config(enum afe_config_type config);
+
+void afe_set_aanc_info(struct aanc_data *aanc_info);
+int afe_port_group_set_param(u16 group_id,
+	union afe_port_group_config *afe_group_config);
+int afe_port_group_enable(u16 group_id,
+	union afe_port_group_config *afe_group_config, u16 enable);
+int afe_unmap_rtac_block(uint32_t *mem_map_handle);
+int afe_map_rtac_block(struct rtac_cal_block_data *cal_block);
+int afe_send_slot_mapping_cfg(
+	struct afe_param_id_slot_mapping_cfg *slot_mapping_cfg,
+	u16 port_id);
+int afe_send_custom_tdm_header_cfg(
+	struct afe_param_id_custom_tdm_header_cfg *custom_tdm_header_cfg,
+	u16 port_id);
+int afe_tdm_port_start(u16 port_id, struct afe_tdm_port_config *tdm_port,
+		       u32 rate, u16 num_groups);
+void afe_set_routing_callback(routing_cb);
+int afe_get_av_dev_drift(struct afe_param_id_dev_timing_stats *timing_stats,
+		u16 port);
+#endif /* __Q6AFE_V2_H__ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/sound/q6asm-v2.h	2019-10-29 09:26:25.533221674 +0100
@@ -0,0 +1,696 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __Q6_ASM_V2_H__
+#define __Q6_ASM_V2_H__
+
+#include <linux/qdsp6v2/apr.h>
+#include <linux/qdsp6v2/rtac.h>
+#include <sound/apr_audio-v2.h>
+#include <linux/list.h>
+#include <linux/msm_ion.h>
+
+#define IN                      0x000
+#define OUT                     0x001
+#define CH_MODE_MONO            0x001
+#define CH_MODE_STEREO          0x002
+
+#define FORMAT_LINEAR_PCM   0x0000
+#define FORMAT_DTMF         0x0001
+#define FORMAT_ADPCM	    0x0002
+#define FORMAT_YADPCM       0x0003
+#define FORMAT_MP3          0x0004
+#define FORMAT_MPEG4_AAC    0x0005
+#define FORMAT_AMRNB	    0x0006
+#define FORMAT_AMRWB	    0x0007
+#define FORMAT_V13K	    0x0008
+#define FORMAT_EVRC	    0x0009
+#define FORMAT_EVRCB	    0x000a
+#define FORMAT_EVRCWB	    0x000b
+#define FORMAT_MIDI	    0x000c
+#define FORMAT_SBC	    0x000d
+#define FORMAT_WMA_V10PRO   0x000e
+#define FORMAT_WMA_V9	    0x000f
+#define FORMAT_AMR_WB_PLUS  0x0010
+#define FORMAT_MPEG4_MULTI_AAC 0x0011
+#define FORMAT_MULTI_CHANNEL_LINEAR_PCM 0x0012
+#define FORMAT_AC3          0x0013
+#define FORMAT_EAC3         0x0014
+#define FORMAT_MP2          0x0015
+#define FORMAT_FLAC         0x0016
+#define FORMAT_ALAC         0x0017
+#define FORMAT_VORBIS       0x0018
+#define FORMAT_APE          0x0019
+#define FORMAT_G711_ALAW_FS 0x001a
+#define FORMAT_G711_MLAW_FS 0x001b
+#define FORMAT_DTS          0x001c
+#define FORMAT_DSD          0x001d
+#define FORMAT_APTX         0x001e
+#define FORMAT_GEN_COMPR    0x001f
+#define FORMAT_TRUEHD       0x0020
+#define FORMAT_IEC61937     0x0021
+
+#define ENCDEC_SBCBITRATE   0x0001
+#define ENCDEC_IMMEDIATE_DECODE 0x0002
+#define ENCDEC_CFG_BLK          0x0003
+
+#define CMD_PAUSE          0x0001
+#define CMD_FLUSH          0x0002
+#define CMD_EOS            0x0003
+#define CMD_CLOSE          0x0004
+#define CMD_OUT_FLUSH      0x0005
+#define CMD_SUSPEND        0x0006
+
+/* bit 0:1 represents priority of stream */
+#define STREAM_PRIORITY_NORMAL	0x0000
+#define STREAM_PRIORITY_LOW	0x0001
+#define STREAM_PRIORITY_HIGH	0x0002
+
+/* bit 4 represents META enable of encoded data buffer */
+#define BUFFER_META_ENABLE	0x0010
+
+/* bit 5 represents timestamp */
+/* bit 5 - 0 -- ASM_DATA_EVENT_READ_DONE will have relative time-stamp*/
+/* bit 5 - 1 -- ASM_DATA_EVENT_READ_DONE will have absolute time-stamp*/
+#define ABSOLUTE_TIMESTAMP_ENABLE  0x0020
+
+/* Enable Sample_Rate/Channel_Mode notification event from Decoder */
+#define SR_CM_NOTIFY_ENABLE	0x0004
+
+#define TUN_WRITE_IO_MODE 0x0008 /* tunnel read write mode */
+#define TUN_READ_IO_MODE  0x0004 /* tunnel read write mode */
+#define SYNC_IO_MODE	0x0001
+#define ASYNC_IO_MODE	0x0002
+#define COMPRESSED_IO	0x0040
+#define COMPRESSED_STREAM_IO	0x0080
+#define NT_MODE        0x0400
+
+#define NO_TIMESTAMP    0xFF00
+#define SET_TIMESTAMP   0x0000
+
+#define SOFT_PAUSE_ENABLE	1
+#define SOFT_PAUSE_DISABLE	0
+
+#define ASM_ACTIVE_STREAMS_ALLOWED	0x9
+/* Control session is used for mapping calibration memory */
+#define ASM_CONTROL_SESSION	(ASM_ACTIVE_STREAMS_ALLOWED + 1)
+
+#define ASM_SHIFT_GAPLESS_MODE_FLAG	31
+#define ASM_SHIFT_LAST_BUFFER_FLAG	30
+
+#define ASM_LITTLE_ENDIAN 0
+#define ASM_BIG_ENDIAN 1
+
+/* PCM_MEDIA_FORMAT_Version */
+enum {
+	PCM_MEDIA_FORMAT_V2 = 0,
+	PCM_MEDIA_FORMAT_V3,
+	PCM_MEDIA_FORMAT_V4,
+};
+
+/* PCM format modes in DSP */
+enum {
+	DEFAULT_QF = 0,
+	Q15 = 15,
+	Q23 = 23,
+	Q31 = 31,
+};
+
+/* payload structure bytes */
+#define READDONE_IDX_STATUS 0
+#define READDONE_IDX_BUFADD_LSW 1
+#define READDONE_IDX_BUFADD_MSW 2
+#define READDONE_IDX_MEMMAP_HDL 3
+#define READDONE_IDX_SIZE 4
+#define READDONE_IDX_OFFSET 5
+#define READDONE_IDX_LSW_TS 6
+#define READDONE_IDX_MSW_TS 7
+#define READDONE_IDX_FLAGS 8
+#define READDONE_IDX_NUMFRAMES 9
+#define READDONE_IDX_SEQ_ID 10
+
+#define SOFT_PAUSE_PERIOD       30   /* ramp up/down for 30ms    */
+#define SOFT_PAUSE_STEP         0 /* Step value 0ms or 0us */
+enum {
+	SOFT_PAUSE_CURVE_LINEAR = 0,
+	SOFT_PAUSE_CURVE_EXP,
+	SOFT_PAUSE_CURVE_LOG,
+};
+
+#define SOFT_VOLUME_PERIOD       30   /* ramp up/down for 30ms    */
+#define SOFT_VOLUME_STEP         0 /* Step value 0ms or 0us */
+enum {
+	SOFT_VOLUME_CURVE_LINEAR = 0,
+	SOFT_VOLUME_CURVE_EXP,
+	SOFT_VOLUME_CURVE_LOG,
+};
+
+#define SOFT_VOLUME_INSTANCE_1	1
+#define SOFT_VOLUME_INSTANCE_2	2
+
+typedef void (*app_cb)(uint32_t opcode, uint32_t token,
+			uint32_t *payload, void *priv);
+
+struct audio_buffer {
+	dma_addr_t phys;
+	void       *data;
+	uint32_t   used;
+	uint32_t   size;/* size of buffer */
+	uint32_t   actual_size; /* actual number of bytes read by DSP */
+	struct      ion_handle *handle;
+	struct      ion_client *client;
+};
+
+struct audio_aio_write_param {
+	phys_addr_t   paddr;
+	uint32_t      len;
+	uint32_t      uid;
+	uint32_t      lsw_ts;
+	uint32_t      msw_ts;
+	uint32_t      flags;
+	uint32_t      metadata_len;
+	uint32_t      last_buffer;
+};
+
+struct audio_aio_read_param {
+	phys_addr_t   paddr;
+	uint32_t      len;
+	uint32_t      uid;
+	uint32_t      flags;/*meta data flags*/
+};
+
+struct audio_port_data {
+	struct audio_buffer *buf;
+	uint32_t	    max_buf_cnt;
+	uint32_t	    dsp_buf;
+	uint32_t	    cpu_buf;
+	struct list_head    mem_map_handle;
+	uint32_t	    tmp_hdl;
+	/* read or write locks */
+	struct mutex	    lock;
+	spinlock_t	    dsp_lock;
+};
+
+struct shared_io_config {
+	uint32_t format;
+	uint16_t bits_per_sample;
+	uint32_t rate;
+	uint32_t channels;
+	uint16_t sample_word_size;
+	uint32_t bufsz;
+	uint32_t bufcnt;
+};
+
+struct audio_client {
+	int                    session;
+	app_cb		       cb;
+	atomic_t	       cmd_state;
+	atomic_t	       cmd_state_pp;
+	/* Relative or absolute TS */
+	atomic_t	       time_flag;
+	atomic_t	       nowait_cmd_cnt;
+	atomic_t               mem_state;
+	void		       *priv;
+	uint32_t               io_mode;
+	uint64_t	       time_stamp;
+	struct apr_svc         *apr;
+	struct apr_svc         *mmap_apr;
+	struct apr_svc         *apr2;
+	struct mutex	       cmd_lock;
+	/* idx:1 out port, 0: in port*/
+	struct audio_port_data port[2];
+	wait_queue_head_t      cmd_wait;
+	wait_queue_head_t      time_wait;
+	wait_queue_head_t      mem_wait;
+	int                    perf_mode;
+	int					   stream_id;
+	struct device *dev;
+	int		       topology;
+	int		       app_type;
+	/* audio cache operations fptr*/
+	int (*fptr_cache_ops)(struct audio_buffer *abuff, int cache_op);
+	atomic_t               unmap_cb_success;
+	atomic_t               reset;
+	/* holds latest DSP pipeline delay */
+	uint32_t               path_delay;
+	/* shared io */
+	struct audio_buffer shared_pos_buf;
+	struct shared_io_config config;
+};
+
+void q6asm_audio_client_free(struct audio_client *ac);
+
+struct audio_client *q6asm_audio_client_alloc(app_cb cb, void *priv);
+
+struct audio_client *q6asm_get_audio_client(int session_id);
+
+int q6asm_audio_client_buf_alloc(unsigned int dir/* 1:Out,0:In */,
+				struct audio_client *ac,
+				unsigned int bufsz,
+				uint32_t bufcnt);
+int q6asm_audio_client_buf_alloc_contiguous(unsigned int dir
+				/* 1:Out,0:In */,
+				struct audio_client *ac,
+				unsigned int bufsz,
+				unsigned int bufcnt);
+
+int q6asm_audio_client_buf_free_contiguous(unsigned int dir,
+			struct audio_client *ac);
+
+int q6asm_open_read(struct audio_client *ac, uint32_t format
+		/*, uint16_t bits_per_sample*/);
+
+int q6asm_open_read_v2(struct audio_client *ac, uint32_t format,
+			uint16_t bits_per_sample);
+
+int q6asm_open_read_v3(struct audio_client *ac, uint32_t format,
+		       uint16_t bits_per_sample);
+
+int q6asm_open_read_v4(struct audio_client *ac, uint32_t format,
+		       uint16_t bits_per_sample, bool ts_mode);
+
+int q6asm_open_write(struct audio_client *ac, uint32_t format
+		/*, uint16_t bits_per_sample*/);
+
+int q6asm_open_write_v2(struct audio_client *ac, uint32_t format,
+			uint16_t bits_per_sample);
+
+int q6asm_open_shared_io(struct audio_client *ac,
+			 struct shared_io_config *c, int dir);
+
+int q6asm_open_write_v3(struct audio_client *ac, uint32_t format,
+			uint16_t bits_per_sample);
+
+int q6asm_open_write_v4(struct audio_client *ac, uint32_t format,
+			uint16_t bits_per_sample);
+
+int q6asm_stream_open_write_v2(struct audio_client *ac, uint32_t format,
+			       uint16_t bits_per_sample, int32_t stream_id,
+			       bool is_gapless_mode);
+
+int q6asm_stream_open_write_v3(struct audio_client *ac, uint32_t format,
+			       uint16_t bits_per_sample, int32_t stream_id,
+			       bool is_gapless_mode);
+
+int q6asm_stream_open_write_v4(struct audio_client *ac, uint32_t format,
+			       uint16_t bits_per_sample, int32_t stream_id,
+			       bool is_gapless_mode);
+
+int q6asm_open_write_compressed(struct audio_client *ac, uint32_t format,
+				uint32_t passthrough_flag);
+
+int q6asm_open_read_write(struct audio_client *ac,
+			uint32_t rd_format,
+			uint32_t wr_format);
+
+int q6asm_open_read_write_v2(struct audio_client *ac, uint32_t rd_format,
+			     uint32_t wr_format, bool is_meta_data_mode,
+			     uint32_t bits_per_sample, bool overwrite_topology,
+			     int topology);
+
+int q6asm_open_loopback_v2(struct audio_client *ac,
+			   uint16_t bits_per_sample);
+
+int q6asm_open_transcode_loopback(struct audio_client *ac,
+			   uint16_t bits_per_sample, uint32_t source_format,
+			   uint32_t sink_format);
+
+int q6asm_write(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
+				uint32_t lsw_ts, uint32_t flags);
+int q6asm_write_nolock(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
+				uint32_t lsw_ts, uint32_t flags);
+
+int q6asm_async_write(struct audio_client *ac,
+					  struct audio_aio_write_param *param);
+
+int q6asm_async_read(struct audio_client *ac,
+					  struct audio_aio_read_param *param);
+
+int q6asm_read(struct audio_client *ac);
+int q6asm_read_v2(struct audio_client *ac, uint32_t len);
+int q6asm_read_nolock(struct audio_client *ac);
+
+int q6asm_memory_map(struct audio_client *ac, phys_addr_t buf_add,
+			int dir, uint32_t bufsz, uint32_t bufcnt);
+
+int q6asm_memory_unmap(struct audio_client *ac, phys_addr_t buf_add,
+							int dir);
+
+struct audio_buffer *q6asm_shared_io_buf(struct audio_client *ac, int dir);
+
+int q6asm_shared_io_free(struct audio_client *ac, int dir);
+
+int q6asm_get_shared_pos(struct audio_client *ac, uint32_t *si, uint32_t *msw,
+			 uint32_t *lsw);
+
+int q6asm_map_rtac_block(struct rtac_cal_block_data *cal_block);
+
+int q6asm_unmap_rtac_block(uint32_t *mem_map_handle);
+
+int q6asm_send_cal(struct audio_client *ac);
+
+int q6asm_run(struct audio_client *ac, uint32_t flags,
+		uint32_t msw_ts, uint32_t lsw_ts);
+
+int q6asm_run_nowait(struct audio_client *ac, uint32_t flags,
+		uint32_t msw_ts, uint32_t lsw_ts);
+
+int q6asm_stream_run_nowait(struct audio_client *ac, uint32_t flags,
+		uint32_t msw_ts, uint32_t lsw_ts, uint32_t stream_id);
+
+int q6asm_reg_tx_overflow(struct audio_client *ac, uint16_t enable);
+
+int q6asm_reg_rx_underflow(struct audio_client *ac, uint16_t enable);
+
+int q6asm_cmd(struct audio_client *ac, int cmd);
+
+int q6asm_stream_cmd(struct audio_client *ac, int cmd, uint32_t stream_id);
+
+int q6asm_cmd_nowait(struct audio_client *ac, int cmd);
+
+int q6asm_stream_cmd_nowait(struct audio_client *ac, int cmd,
+			    uint32_t stream_id);
+
+void *q6asm_is_cpu_buf_avail(int dir, struct audio_client *ac,
+				uint32_t *size, uint32_t *idx);
+
+int q6asm_cpu_buf_release(int dir, struct audio_client *ac);
+
+void *q6asm_is_cpu_buf_avail_nolock(int dir, struct audio_client *ac,
+					uint32_t *size, uint32_t *idx);
+
+int q6asm_is_dsp_buf_avail(int dir, struct audio_client *ac);
+
+/* File format specific configurations to be added below */
+
+int q6asm_enc_cfg_blk_aac(struct audio_client *ac,
+			 uint32_t frames_per_buf,
+			uint32_t sample_rate, uint32_t channels,
+			 uint32_t bit_rate,
+			 uint32_t mode, uint32_t format);
+
+int q6asm_enc_cfg_blk_g711(struct audio_client *ac,
+			 uint32_t frames_per_buf,
+			uint32_t sample_rate);
+
+int q6asm_enc_cfg_blk_pcm(struct audio_client *ac,
+			uint32_t rate, uint32_t channels);
+
+int q6asm_enc_cfg_blk_pcm_v2(struct audio_client *ac,
+			uint32_t rate, uint32_t channels,
+			uint16_t bits_per_sample,
+			bool use_default_chmap, bool use_back_flavor,
+			u8 *channel_map);
+
+int q6asm_enc_cfg_blk_pcm_v3(struct audio_client *ac,
+			     uint32_t rate, uint32_t channels,
+			     uint16_t bits_per_sample, bool use_default_chmap,
+			     bool use_back_flavor, u8 *channel_map,
+			     uint16_t sample_word_size);
+
+int q6asm_enc_cfg_blk_pcm_v4(struct audio_client *ac,
+			     uint32_t rate, uint32_t channels,
+			     uint16_t bits_per_sample, bool use_default_chmap,
+			     bool use_back_flavor, u8 *channel_map,
+			     uint16_t sample_word_size, uint16_t endianness,
+			     uint16_t mode);
+
+int q6asm_enc_cfg_blk_pcm_format_support(struct audio_client *ac,
+			uint32_t rate, uint32_t channels,
+			uint16_t bits_per_sample);
+
+int q6asm_enc_cfg_blk_pcm_format_support_v3(struct audio_client *ac,
+					    uint32_t rate, uint32_t channels,
+					    uint16_t bits_per_sample,
+					    uint16_t sample_word_size);
+
+int q6asm_enc_cfg_blk_pcm_format_support_v4(struct audio_client *ac,
+					    uint32_t rate, uint32_t channels,
+					    uint16_t bits_per_sample,
+					    uint16_t sample_word_size,
+					    uint16_t endianness,
+					    uint16_t mode);
+
+int q6asm_set_encdec_chan_map(struct audio_client *ac,
+		uint32_t num_channels);
+
+int q6asm_enc_cfg_blk_pcm_native(struct audio_client *ac,
+			uint32_t rate, uint32_t channels);
+
+int q6asm_enable_sbrps(struct audio_client *ac,
+			uint32_t sbr_ps);
+
+int q6asm_cfg_dual_mono_aac(struct audio_client *ac,
+			uint16_t sce_left, uint16_t sce_right);
+
+int q6asm_cfg_aac_sel_mix_coef(struct audio_client *ac, uint32_t mix_coeff);
+
+int q6asm_enc_cfg_blk_qcelp(struct audio_client *ac, uint32_t frames_per_buf,
+		uint16_t min_rate, uint16_t max_rate,
+		uint16_t reduced_rate_level, uint16_t rate_modulation_cmd);
+
+int q6asm_enc_cfg_blk_evrc(struct audio_client *ac, uint32_t frames_per_buf,
+		uint16_t min_rate, uint16_t max_rate,
+		uint16_t rate_modulation_cmd);
+
+int q6asm_enc_cfg_blk_amrnb(struct audio_client *ac, uint32_t frames_per_buf,
+		uint16_t band_mode, uint16_t dtx_enable);
+
+int q6asm_enc_cfg_blk_amrwb(struct audio_client *ac, uint32_t frames_per_buf,
+		uint16_t band_mode, uint16_t dtx_enable);
+
+int q6asm_media_format_block_pcm(struct audio_client *ac,
+			uint32_t rate, uint32_t channels);
+
+int q6asm_media_format_block_pcm_format_support(struct audio_client *ac,
+			uint32_t rate, uint32_t channels,
+			uint16_t bits_per_sample);
+
+int q6asm_media_format_block_pcm_format_support_v2(struct audio_client *ac,
+				uint32_t rate, uint32_t channels,
+				uint16_t bits_per_sample, int stream_id,
+				bool use_default_chmap, char *channel_map);
+
+int q6asm_media_format_block_pcm_format_support_v3(struct audio_client *ac,
+						   uint32_t rate,
+						   uint32_t channels,
+						   uint16_t bits_per_sample,
+						   int stream_id,
+						   bool use_default_chmap,
+						   char *channel_map,
+						   uint16_t sample_word_size);
+
+int q6asm_media_format_block_pcm_format_support_v4(struct audio_client *ac,
+						   uint32_t rate,
+						   uint32_t channels,
+						   uint16_t bits_per_sample,
+						   int stream_id,
+						   bool use_default_chmap,
+						   char *channel_map,
+						   uint16_t sample_word_size,
+						   uint16_t endianness,
+						   uint16_t mode);
+
+int q6asm_media_format_block_multi_ch_pcm(struct audio_client *ac,
+			uint32_t rate, uint32_t channels,
+			bool use_default_chmap, char *channel_map);
+
+int q6asm_media_format_block_multi_ch_pcm_v2(
+			struct audio_client *ac,
+			uint32_t rate, uint32_t channels,
+			bool use_default_chmap, char *channel_map,
+			uint16_t bits_per_sample);
+int q6asm_media_format_block_gen_compr(
+			struct audio_client *ac,
+			uint32_t rate, uint32_t channels,
+			bool use_default_chmap, char *channel_map,
+			uint16_t bits_per_sample);
+
+int q6asm_media_format_block_iec(
+			struct audio_client *ac,
+			uint32_t rate, uint32_t channels);
+
+int q6asm_media_format_block_multi_ch_pcm_v3(struct audio_client *ac,
+					     uint32_t rate, uint32_t channels,
+					     bool use_default_chmap,
+					     char *channel_map,
+					     uint16_t bits_per_sample,
+					     uint16_t sample_word_size);
+
+int q6asm_media_format_block_multi_ch_pcm_v4(struct audio_client *ac,
+					     uint32_t rate, uint32_t channels,
+					     bool use_default_chmap,
+					     char *channel_map,
+					     uint16_t bits_per_sample,
+					     uint16_t sample_word_size,
+					     uint16_t endianness,
+					     uint16_t mode);
+
+int q6asm_media_format_block_aac(struct audio_client *ac,
+			struct asm_aac_cfg *cfg);
+
+int q6asm_stream_media_format_block_aac(struct audio_client *ac,
+			struct asm_aac_cfg *cfg, int stream_id);
+
+int q6asm_media_format_block_multi_aac(struct audio_client *ac,
+			struct asm_aac_cfg *cfg);
+
+int q6asm_media_format_block_wma(struct audio_client *ac,
+			void *cfg, int stream_id);
+
+int q6asm_media_format_block_wmapro(struct audio_client *ac,
+			void *cfg, int stream_id);
+
+int q6asm_media_format_block_amrwbplus(struct audio_client *ac,
+			struct asm_amrwbplus_cfg *cfg);
+
+int q6asm_stream_media_format_block_flac(struct audio_client *ac,
+			struct asm_flac_cfg *cfg, int stream_id);
+
+int q6asm_media_format_block_alac(struct audio_client *ac,
+			struct asm_alac_cfg *cfg, int stream_id);
+
+int q6asm_media_format_block_g711(struct audio_client *ac,
+			struct asm_g711_dec_cfg *cfg, int stream_id);
+
+int q6asm_stream_media_format_block_vorbis(struct audio_client *ac,
+			struct asm_vorbis_cfg *cfg, int stream_id);
+
+int q6asm_media_format_block_ape(struct audio_client *ac,
+			struct asm_ape_cfg *cfg, int stream_id);
+
+int q6asm_media_format_block_dsd(struct audio_client *ac,
+			struct asm_dsd_cfg *cfg, int stream_id);
+
+int q6asm_stream_media_format_block_aptx_dec(struct audio_client *ac,
+						uint32_t sr, int stream_id);
+
+int q6asm_ds1_set_endp_params(struct audio_client *ac,
+				int param_id, int param_value);
+
+/* Send stream based end params */
+int q6asm_ds1_set_stream_endp_params(struct audio_client *ac, int param_id,
+				     int param_value, int stream_id);
+
+/* PP specific */
+int q6asm_equalizer(struct audio_client *ac, void *eq);
+
+/* Send Volume Command */
+int q6asm_set_volume(struct audio_client *ac, int volume);
+
+/* Send Volume Command */
+int q6asm_set_volume_v2(struct audio_client *ac, int volume, int instance);
+
+/* DTS Eagle Params */
+int q6asm_dts_eagle_set(struct audio_client *ac, int param_id, uint32_t size,
+			void *data, struct param_outband *po, int m_id);
+int q6asm_dts_eagle_get(struct audio_client *ac, int param_id, uint32_t size,
+			void *data, struct param_outband *po, int m_id);
+
+/* Send aptx decoder BT address */
+int q6asm_set_aptx_dec_bt_addr(struct audio_client *ac,
+				struct aptx_dec_bt_addr_cfg *cfg);
+
+/* Set SoftPause Params */
+int q6asm_set_softpause(struct audio_client *ac,
+			struct asm_softpause_params *param);
+
+/* Set Softvolume Params */
+int q6asm_set_softvolume(struct audio_client *ac,
+			struct asm_softvolume_params *param);
+
+/* Set Softvolume Params */
+int q6asm_set_softvolume_v2(struct audio_client *ac,
+			    struct asm_softvolume_params *param, int instance);
+
+/* Set panning and MFC params */
+int q6asm_set_mfc_panning_params(struct audio_client *ac,
+				 struct asm_stream_pan_ctrl_params *pan_param);
+
+/* Set vol gain pair */
+int q6asm_set_vol_ctrl_gain_pair(struct audio_client *ac,
+				 struct asm_stream_pan_ctrl_params *pan_param);
+
+/* Send left-right channel gain */
+int q6asm_set_lrgain(struct audio_client *ac, int left_gain, int right_gain);
+
+/* Send multi channel gain */
+int q6asm_set_multich_gain(struct audio_client *ac, uint32_t channels,
+			   uint32_t *gains, uint8_t *ch_map, bool use_default);
+
+/* Enable Mute/unmute flag */
+int q6asm_set_mute(struct audio_client *ac, int muteflag);
+
+int q6asm_get_session_time(struct audio_client *ac, uint64_t *tstamp);
+
+int q6asm_get_session_time_legacy(struct audio_client *ac, uint64_t *tstamp);
+
+int q6asm_send_audio_effects_params(struct audio_client *ac, char *params,
+				    uint32_t params_length);
+
+int q6asm_send_stream_cmd(struct audio_client *ac,
+			  struct msm_adsp_event_data *data);
+
+int q6asm_audio_map_shm_fd(struct audio_client *ac, struct ion_client **client,
+			   struct ion_handle **handle, int fd);
+
+int q6asm_send_rtic_event_ack(struct audio_client *ac,
+			      void *param, uint32_t params_length);
+
+/* Client can set the IO mode to either AIO/SIO mode */
+int q6asm_set_io_mode(struct audio_client *ac, uint32_t mode);
+
+/* Get Service ID for APR communication */
+int q6asm_get_apr_service_id(int session_id);
+
+/* Common format block without any payload
+*/
+int q6asm_media_format_block(struct audio_client *ac, uint32_t format);
+
+/* Send the meta data to remove initial and trailing silence */
+int q6asm_send_meta_data(struct audio_client *ac, uint32_t initial_samples,
+		uint32_t trailing_samples);
+
+/* Send the stream meta data to remove initial and trailing silence */
+int q6asm_stream_send_meta_data(struct audio_client *ac, uint32_t stream_id,
+		uint32_t initial_samples, uint32_t trailing_samples);
+
+int q6asm_get_asm_topology(int session_id);
+int q6asm_get_asm_app_type(int session_id);
+
+int q6asm_send_mtmx_strtr_window(struct audio_client *ac,
+		struct asm_session_mtmx_strtr_param_window_v2_t *window_param,
+		uint32_t param_id);
+
+/* Configure DSP render mode */
+int q6asm_send_mtmx_strtr_render_mode(struct audio_client *ac,
+		uint32_t render_mode);
+
+/* Configure DSP clock recovery mode */
+int q6asm_send_mtmx_strtr_clk_rec_mode(struct audio_client *ac,
+		uint32_t clk_rec_mode);
+
+/* Enable adjust session clock in DSP */
+int q6asm_send_mtmx_strtr_enable_adjust_session_clock(struct audio_client *ac,
+		bool enable);
+
+/* Retrieve the current DSP path delay */
+int q6asm_get_path_delay(struct audio_client *ac);
+
+/* Helper functions to retrieve data from token */
+uint8_t q6asm_get_buf_index_from_token(uint32_t token);
+uint8_t q6asm_get_stream_id_from_token(uint32_t token);
+
+/* Adjust session clock in DSP */
+int q6asm_adjust_session_clock(struct audio_client *ac,
+		uint32_t adjust_time_lsw,
+		uint32_t adjust_time_msw);
+#endif /* __Q6_ASM_H__ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/sound/q6audio-v2.h	2019-01-22 16:16:28.507291721 +0100
@@ -0,0 +1,36 @@
+/* Copyright (c) 2012-2013, 2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _Q6_AUDIO_H_
+#define _Q6_AUDIO_H_
+
+#include <linux/qdsp6v2/apr.h>
+
+enum {
+	LEGACY_PCM_MODE = 0,
+	LOW_LATENCY_PCM_MODE,
+	ULTRA_LOW_LATENCY_PCM_MODE,
+	ULL_POST_PROCESSING_PCM_MODE,
+};
+
+
+int q6audio_get_port_index(u16 port_id);
+
+int q6audio_convert_virtual_to_portid(u16 port_id);
+
+int q6audio_validate_port(u16 port_id);
+
+int q6audio_is_digital_pcm_interface(u16 port_id);
+
+int q6audio_get_port_id(u16 port_id);
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/sound/q6core.h	2019-10-29 09:26:25.533221674 +0100
@@ -0,0 +1,170 @@
+/* Copyright (c) 2012-2016, 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __Q6CORE_H__
+#define __Q6CORE_H__
+#include <linux/qdsp6v2/apr.h>
+
+
+
+#define AVCS_CMD_ADSP_EVENT_GET_STATE		0x0001290C
+#define AVCS_CMDRSP_ADSP_EVENT_GET_STATE	0x0001290D
+
+bool q6core_is_adsp_ready(void);
+int q6core_add_remove_pool_pages(phys_addr_t buf_add, uint32_t bufsz,
+			uint32_t mempool_id, bool add_pages);
+
+#define ADSP_CMD_SET_DTS_EAGLE_DATA_ID 0x00012919
+#define DTS_EAGLE_LICENSE_ID           0x00028346
+struct adsp_dts_eagle {
+	struct apr_hdr hdr;
+	uint32_t id;
+	uint32_t overwrite;
+	uint32_t size;
+	char data[];
+};
+int core_dts_eagle_set(int size, char *data);
+int core_dts_eagle_get(int id, int size, char *data);
+
+#define ADSP_CMD_SET_DOLBY_MANUFACTURER_ID 0x00012918
+
+struct adsp_dolby_manufacturer_id {
+	struct apr_hdr hdr;
+	int manufacturer_id;
+};
+
+uint32_t core_set_dolby_manufacturer_id(int manufacturer_id);
+
+/* Dolby Surround1 Module License ID. This ID is used as an identifier
+   for DS1 license via ADSP generic license mechanism.
+   Please refer AVCS_CMD_SET_LICENSE for more details.
+*/
+#define DOLBY_DS1_LICENSE_ID	0x00000001
+
+#define AVCS_CMD_SET_LICENSE	0x00012919
+struct avcs_cmd_set_license {
+	struct apr_hdr hdr;
+	uint32_t id; /**< A unique ID used to refer to this license */
+	uint32_t overwrite;
+	/**< 0 = do not overwrite an existing license with this id.
+		1 = overwrite an existing license with this id. */
+	uint32_t size;
+	/**< Size in bytes of the license data following this header. */
+	/* uint8_t* data ,  data and padding follows this structure
+		total packet size needs to be multiple of 4 Bytes*/
+
+};
+
+#define AVCS_CMD_GET_LICENSE_VALIDATION_RESULT        0x0001291A
+struct avcs_cmd_get_license_validation_result {
+	struct apr_hdr hdr;
+	uint32_t id; /**< A unique ID used to refer to this license */
+};
+
+#define AVCS_CMDRSP_GET_LICENSE_VALIDATION_RESULT        0x0001291B
+struct avcs_cmdrsp_get_license_validation_result {
+	uint32_t result;
+	/* ADSP_EOK if the license validation result was successfully retrieved.
+		ADSP_ENOTEXIST if there is no license with the given id.
+		ADSP_ENOTIMPL if there is no validation function for a license
+		with this id. */
+	uint32_t size;
+	/* Length in bytes of the result that follows this structure*/
+};
+
+/* Set Q6 topologies */
+/*
+ *	Registers custom topologies in the aDSP for
+ *	use in audio, voice, AFE and LSM.
+ */
+
+
+#define AVCS_CMD_SHARED_MEM_MAP_REGIONS                             0x00012924
+#define AVCS_CMDRSP_SHARED_MEM_MAP_REGIONS                          0x00012925
+#define AVCS_CMD_SHARED_MEM_UNMAP_REGIONS                           0x00012926
+
+
+#define AVCS_CMD_REGISTER_TOPOLOGIES                                0x00012923
+
+/* The payload for the AVCS_CMD_REGISTER_TOPOLOGIES command */
+struct avcs_cmd_register_topologies {
+	struct apr_hdr hdr;
+	uint32_t                  payload_addr_lsw;
+	/* Lower 32 bits of the topology buffer address. */
+
+	uint32_t                  payload_addr_msw;
+	/* Upper 32 bits of the topology buffer address. */
+
+	uint32_t                  mem_map_handle;
+	/* Unique identifier for an address.
+	 * -This memory map handle is returned by the aDSP through the
+	 * memory map command.
+	 * -NULL mem_map_handle is interpreted as in-band parameter
+	 * passing.
+	 * -Client has the flexibility to choose in-band or out-of-band.
+	 * -Out-of-band is recommended in this case.
+	 */
+
+	uint32_t                  payload_size;
+	/* Size in bytes of the valid data in the topology buffer. */
+} __packed;
+
+
+#define AVCS_CMD_DEREGISTER_TOPOLOGIES                                0x0001292a
+
+/* The payload for the AVCS_CMD_DEREGISTER_TOPOLOGIES command */
+struct avcs_cmd_deregister_topologies {
+	struct apr_hdr hdr;
+	uint32_t                  payload_addr_lsw;
+	/* Lower 32 bits of the topology buffer address. */
+
+	uint32_t                  payload_addr_msw;
+	/* Upper 32 bits of the topology buffer address. */
+
+	uint32_t                  mem_map_handle;
+	/* Unique identifier for an address.
+	 * -This memory map handle is returned by the aDSP through the
+	 * memory map command.
+	 * -NULL mem_map_handle is interpreted as in-band parameter
+	 * passing.
+	 * -Client has the flexibility to choose in-band or out-of-band.
+	 * -Out-of-band is recommended in this case.
+	 */
+
+	uint32_t                  payload_size;
+	/* Size in bytes of the valid data in the topology buffer. */
+
+	uint32_t                  mode;
+	/* 1: Deregister selected topologies
+	 * 2: Deregister all topologies
+	 */
+} __packed;
+
+#define AVCS_MODE_DEREGISTER_ALL_CUSTOM_TOPOLOGIES	2
+
+
+int32_t core_set_license(uint32_t key, uint32_t module_id);
+int32_t core_get_license_status(uint32_t module_id);
+
+#define ADSP_MEMORY_MAP_HLOS_PHYSPOOL 4
+#define AVCS_CMD_ADD_POOL_PAGES 0x0001292E
+#define AVCS_CMD_REMOVE_POOL_PAGES 0x0001292F
+
+struct avs_mem_assign_region {
+	struct apr_hdr       hdr;
+	u32                  pool_id;
+	u32                  size;
+	u32                  addr_lsw;
+	u32                  addr_msw;
+} __packed;
+
+#endif /* __Q6CORE_H__ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/sound/q6lsm.h	2019-10-29 09:26:25.533221674 +0100
@@ -0,0 +1,338 @@
+/*
+ * Copyright (c) 2013-2017, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __Q6LSM_H__
+#define __Q6LSM_H__
+
+#include <linux/list.h>
+#include <linux/msm_ion.h>
+#include <sound/apr_audio-v2.h>
+#include <sound/lsm_params.h>
+#include <linux/qdsp6v2/apr.h>
+
+#define MAX_NUM_CONFIDENCE 20
+
+#define ADM_LSM_PORT_ID 0xADCB
+
+#define LSM_MAX_NUM_CHANNELS 8
+
+typedef void (*lsm_app_cb)(uint32_t opcode, uint32_t token,
+		       uint32_t *payload, void *priv);
+
+struct lsm_sound_model {
+	dma_addr_t      phys;
+	void		*data;
+	size_t		size; /* size of buffer */
+	uint32_t	actual_size; /* actual number of bytes read by DSP */
+	struct ion_handle *handle;
+	struct ion_client *client;
+	uint32_t	mem_map_handle;
+};
+
+struct snd_lsm_event_status_v2 {
+	uint16_t status;
+	uint16_t payload_size;
+	uint8_t  confidence_value[0];
+};
+
+struct lsm_lab_buffer {
+	dma_addr_t phys;
+	void *data;
+	size_t size;
+	struct ion_handle *handle;
+	struct ion_client *client;
+	uint32_t mem_map_handle;
+};
+
+struct lsm_hw_params {
+	u16 sample_rate;
+	u16 sample_size;
+	u32 buf_sz;
+	u32 period_count;
+	u16 num_chs;
+};
+
+struct lsm_client {
+	int		session;
+	lsm_app_cb	cb;
+	atomic_t	cmd_state;
+	void		*priv;
+	struct apr_svc  *apr;
+	struct apr_svc  *mmap_apr;
+	struct mutex    cmd_lock;
+	struct lsm_sound_model sound_model;
+	wait_queue_head_t cmd_wait;
+	uint32_t	cmd_err_code;
+	uint16_t	mode;
+	uint16_t	connect_to_port;
+	uint8_t		num_confidence_levels;
+	uint8_t		*confidence_levels;
+	bool		opened;
+	bool		started;
+	dma_addr_t	lsm_cal_phy_addr;
+	uint32_t	lsm_cal_size;
+	uint32_t	app_id;
+	bool		lab_enable;
+	bool		lab_started;
+	struct lsm_lab_buffer *lab_buffer;
+	struct lsm_hw_params hw_params;
+	bool		use_topology;
+	int		session_state;
+	bool		poll_enable;
+	int		perf_mode;
+	uint32_t	event_mode;
+};
+
+struct lsm_stream_cmd_open_tx {
+	struct apr_hdr  hdr;
+	uint16_t	app_id;
+	uint16_t	reserved;
+	uint32_t	sampling_rate;
+} __packed;
+
+struct lsm_stream_cmd_open_tx_v2 {
+	struct apr_hdr hdr;
+	uint32_t	topology_id;
+} __packed;
+
+struct lsm_custom_topologies {
+	struct apr_hdr hdr;
+	uint32_t data_payload_addr_lsw;
+	uint32_t data_payload_addr_msw;
+	uint32_t mem_map_handle;
+	uint32_t buffer_size;
+} __packed;
+
+struct lsm_param_size_reserved {
+	uint16_t param_size;
+	uint16_t reserved;
+} __packed;
+
+union lsm_param_size {
+	uint32_t param_size;
+	struct lsm_param_size_reserved sr;
+} __packed;
+
+struct lsm_param_payload_common {
+	uint32_t	module_id;
+	uint32_t	param_id;
+	union lsm_param_size p_size;
+} __packed;
+
+struct lsm_param_op_mode {
+	struct lsm_param_payload_common common;
+	uint32_t	minor_version;
+	uint16_t	mode;
+	uint16_t	reserved;
+} __packed;
+
+struct lsm_param_connect_to_port {
+	struct lsm_param_payload_common common;
+	uint32_t	minor_version;
+	/* AFE port id that receives voice wake up data */
+	uint16_t	port_id;
+	uint16_t	reserved;
+} __packed;
+
+struct lsm_param_poll_enable {
+	struct lsm_param_payload_common common;
+	uint32_t	minor_version;
+	/* indicates to voice wakeup that HW MAD/SW polling is enabled or not */
+	uint32_t	polling_enable;
+} __packed;
+
+struct lsm_param_fwk_mode_cfg {
+	struct lsm_param_payload_common common;
+	uint32_t	minor_version;
+	uint32_t	mode;
+} __packed;
+
+struct lsm_param_media_fmt {
+	struct lsm_param_payload_common common;
+	uint32_t	minor_version;
+	uint32_t	sample_rate;
+	uint16_t	num_channels;
+	uint16_t	bit_width;
+	uint8_t		channel_mapping[LSM_MAX_NUM_CHANNELS];
+} __packed;
+
+/*
+ * This param cannot be sent in this format.
+ * The actual number of confidence level values
+ * need to appended to this param payload.
+ */
+struct lsm_param_min_confidence_levels {
+	struct lsm_param_payload_common common;
+	uint8_t		num_confidence_levels;
+} __packed;
+
+struct lsm_set_params_hdr {
+	uint32_t	data_payload_size;
+	uint32_t	data_payload_addr_lsw;
+	uint32_t	data_payload_addr_msw;
+	uint32_t	mem_map_handle;
+} __packed;
+
+struct lsm_cmd_set_params {
+	struct apr_hdr  msg_hdr;
+	struct lsm_set_params_hdr param_hdr;
+} __packed;
+
+struct lsm_cmd_set_params_conf {
+	struct apr_hdr  msg_hdr;
+	struct lsm_set_params_hdr params_hdr;
+	struct lsm_param_min_confidence_levels	conf_payload;
+} __packed;
+
+struct lsm_cmd_set_params_opmode {
+	struct apr_hdr  msg_hdr;
+	struct lsm_set_params_hdr params_hdr;
+	struct lsm_param_op_mode op_mode;
+} __packed;
+
+struct lsm_cmd_set_connectport {
+	struct apr_hdr msg_hdr;
+	struct lsm_set_params_hdr params_hdr;
+	struct lsm_param_connect_to_port connect_to_port;
+} __packed;
+
+struct lsm_cmd_poll_enable {
+	struct apr_hdr  msg_hdr;
+	struct lsm_set_params_hdr params_hdr;
+	struct lsm_param_poll_enable poll_enable;
+} __packed;
+
+struct lsm_param_epd_thres {
+	struct lsm_param_payload_common common;
+	uint32_t	minor_version;
+	uint32_t	epd_begin;
+	uint32_t	epd_end;
+} __packed;
+
+struct lsm_cmd_set_epd_threshold {
+	struct apr_hdr msg_hdr;
+	struct lsm_set_params_hdr param_hdr;
+	struct lsm_param_epd_thres epd_thres;
+} __packed;
+
+struct lsm_param_gain {
+	struct lsm_param_payload_common common;
+	uint32_t	minor_version;
+	uint16_t	gain;
+	uint16_t	reserved;
+} __packed;
+
+struct lsm_cmd_set_gain {
+	struct apr_hdr msg_hdr;
+	struct lsm_set_params_hdr param_hdr;
+	struct lsm_param_gain lsm_gain;
+} __packed;
+
+struct lsm_cmd_reg_snd_model {
+	struct apr_hdr	hdr;
+	uint32_t	model_size;
+	uint32_t	model_addr_lsw;
+	uint32_t	model_addr_msw;
+	uint32_t	mem_map_handle;
+} __packed;
+
+struct lsm_lab_enable {
+	struct lsm_param_payload_common common;
+	uint16_t enable;
+	uint16_t reserved;
+} __packed;
+
+struct lsm_params_lab_enable {
+	struct apr_hdr msg_hdr;
+	struct lsm_set_params_hdr params_hdr;
+	struct lsm_lab_enable lab_enable;
+} __packed;
+
+struct lsm_lab_config {
+	struct lsm_param_payload_common common;
+	uint32_t minor_version;
+	uint32_t wake_up_latency_ms;
+} __packed;
+
+
+struct lsm_params_lab_config {
+	struct apr_hdr  msg_hdr;
+	struct lsm_set_params_hdr params_hdr;
+	struct lsm_lab_config lab_config;
+} __packed;
+
+struct lsm_cmd_read {
+	struct apr_hdr hdr;
+	uint32_t buf_addr_lsw;
+	uint32_t buf_addr_msw;
+	uint32_t mem_map_handle;
+	uint32_t buf_size;
+} __packed;
+
+struct lsm_cmd_read_done {
+	struct apr_hdr hdr;
+	uint32_t status;
+	uint32_t buf_addr_lsw;
+	uint32_t buf_addr_msw;
+	uint32_t mem_map_handle;
+	uint32_t total_size;
+	uint32_t offset;
+	uint32_t timestamp_lsw;
+	uint32_t timestamp_msw;
+	uint32_t flags;
+} __packed;
+
+struct lsm_cmd_set_fwk_mode_cfg {
+	struct apr_hdr  msg_hdr;
+	struct lsm_set_params_hdr params_hdr;
+	struct lsm_param_fwk_mode_cfg fwk_mode_cfg;
+} __packed;
+
+struct lsm_cmd_set_media_fmt {
+	struct apr_hdr  msg_hdr;
+	struct lsm_set_params_hdr params_hdr;
+	struct lsm_param_media_fmt media_fmt;
+} __packed;
+
+
+struct lsm_client *q6lsm_client_alloc(lsm_app_cb cb, void *priv);
+void q6lsm_client_free(struct lsm_client *client);
+int q6lsm_open(struct lsm_client *client, uint16_t app_id);
+int q6lsm_start(struct lsm_client *client, bool wait);
+int q6lsm_stop(struct lsm_client *client, bool wait);
+int q6lsm_snd_model_buf_alloc(struct lsm_client *client, size_t len,
+			      bool allocate_module_data);
+int q6lsm_snd_model_buf_free(struct lsm_client *client);
+int q6lsm_close(struct lsm_client *client);
+int q6lsm_register_sound_model(struct lsm_client *client,
+			       enum lsm_detection_mode mode,
+			       bool detectfailure);
+int q6lsm_set_data(struct lsm_client *client,
+		   enum lsm_detection_mode mode,
+		   bool detectfailure);
+int q6lsm_deregister_sound_model(struct lsm_client *client);
+void set_lsm_port(int);
+int get_lsm_port(void);
+int q6lsm_lab_control(struct lsm_client *client, u32 enable);
+int q6lsm_stop_lab(struct lsm_client *client);
+int q6lsm_read(struct lsm_client *client, struct lsm_cmd_read *read);
+int q6lsm_lab_buffer_alloc(struct lsm_client *client, bool alloc);
+int q6lsm_set_one_param(struct lsm_client *client,
+			struct lsm_params_info *p_info, void *data,
+			uint32_t param_type);
+void q6lsm_sm_set_param_data(struct lsm_client *client,
+		struct lsm_params_info *p_info,
+		size_t *offset);
+int q6lsm_set_port_connected(struct lsm_client *client);
+int q6lsm_set_fwk_mode_cfg(struct lsm_client *client, uint32_t event_mode);
+int q6lsm_set_media_fmt_params(struct lsm_client *client);
+#endif /* __Q6LSM_H__ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/sound/voice_params.h	2019-01-22 16:16:28.511291757 +0100
@@ -0,0 +1,14 @@
+#ifndef __VOICE_PARAMS_H__
+#define __VOICE_PARAMS_H__
+
+#include <linux/types.h>
+#include <sound/asound.h>
+
+enum voice_lch_mode {
+	VOICE_LCH_START = 1,
+	VOICE_LCH_STOP
+};
+
+#define SNDRV_VOICE_IOCTL_LCH _IOW('U', 0x00, enum voice_lch_mode)
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/sound/voice_svc.h	2019-01-22 16:16:28.515291793 +0100
@@ -0,0 +1,47 @@
+#ifndef __VOICE_SVC_H__
+#define __VOICE_SVC_H__
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define VOICE_SVC_DRIVER_NAME "voice_svc"
+
+#define VOICE_SVC_MVM_STR "MVM"
+#define VOICE_SVC_CVS_STR "CVS"
+#define MAX_APR_SERVICE_NAME_LEN  64
+
+#define MSG_REGISTER 0x1
+#define MSG_REQUEST  0x2
+#define MSG_RESPONSE 0x3
+
+struct voice_svc_write_msg {
+	__u32 msg_type;
+	__u8 payload[0];
+};
+
+struct voice_svc_register {
+	char svc_name[MAX_APR_SERVICE_NAME_LEN];
+	__u32 src_port;
+	__u8 reg_flag;
+};
+
+struct voice_svc_cmd_response {
+	__u32 src_port;
+	__u32 dest_port;
+	__u32 token;
+	__u32 opcode;
+	__u32 payload_size;
+	__u8 payload[0];
+};
+
+struct voice_svc_cmd_request {
+	char svc_name[MAX_APR_SERVICE_NAME_LEN];
+	__u32 src_port;
+	__u32 dest_port;
+	__u32 token;
+	__u32 opcode;
+	__u32 payload_size;
+	__u8 payload[0];
+};
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/sound/wcd-dsp-mgr.h	2019-01-22 16:16:28.515291793 +0100
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __WCD_DSP_MGR_H__
+#define __WCD_DSP_MGR_H__
+
+#include <linux/types.h>
+
+/*
+ * These enums correspond to the component types
+ * that wcd-dsp-manager driver will use. The order
+ * of the enums specifies the order in which the
+ * manager driver will perform the sequencing.
+ * Changing this will cause the sequencing order
+ * to be changed as well.
+ */
+enum wdsp_cmpnt_type {
+	/* Component to control the DSP */
+	WDSP_CMPNT_CONTROL = 0,
+	/* Component to perform data transfer to/from DSP */
+	WDSP_CMPNT_TRANSPORT,
+	/* Component that performs high level IPC */
+	WDSP_CMPNT_IPC,
+
+	WDSP_CMPNT_TYPE_MAX,
+};
+
+enum wdsp_event_type {
+	/* Initialization related */
+	WDSP_EVENT_POST_INIT,
+
+	/* Image download related */
+	WDSP_EVENT_PRE_DLOAD_CODE,
+	WDSP_EVENT_DLOAD_SECTION,
+	WDSP_EVENT_POST_DLOAD_CODE,
+	WDSP_EVENT_PRE_DLOAD_DATA,
+	WDSP_EVENT_POST_DLOAD_DATA,
+	WDSP_EVENT_DLOAD_FAILED,
+
+	WDSP_EVENT_READ_SECTION,
+
+	/* DSP boot related */
+	WDSP_EVENT_PRE_BOOTUP,
+	WDSP_EVENT_DO_BOOT,
+	WDSP_EVENT_POST_BOOTUP,
+	WDSP_EVENT_PRE_SHUTDOWN,
+	WDSP_EVENT_DO_SHUTDOWN,
+	WDSP_EVENT_POST_SHUTDOWN,
+
+	/* IRQ handling related */
+	WDSP_EVENT_IPC1_INTR,
+
+	/* Suspend/Resume related */
+	WDSP_EVENT_SUSPEND,
+	WDSP_EVENT_RESUME,
+};
+
+enum wdsp_signal {
+	/* Hardware generated interrupts signalled to manager */
+	WDSP_IPC1_INTR,
+	WDSP_ERR_INTR,
+
+	/* Other signals */
+	WDSP_CDC_DOWN_SIGNAL,
+	WDSP_CDC_UP_SIGNAL,
+};
+
+/*
+ * wdsp_cmpnt_ops: ops/function callbacks for components
+ * @init: called by manager driver, component is expected
+ *	  to initialize itself in this callback
+ * @deinit: called by manager driver, component should
+ *	    de-initialize itself in this callback
+ * @event_handler: Event handler for each component, called
+ *		   by the manager as per sequence
+ */
+struct wdsp_cmpnt_ops {
+	int (*init)(struct device *, void *priv_data);
+	int (*deinit)(struct device *, void *priv_data);
+	int (*event_handler)(struct device *, void *priv_data,
+			     enum wdsp_event_type, void *data);
+};
+
+struct wdsp_img_section {
+	u32 addr;
+	size_t size;
+	u8 *data;
+};
+
+struct wdsp_err_signal_arg {
+	bool mem_dumps_enabled;
+	u32 remote_start_addr;
+	size_t dump_size;
+};
+
+/*
+ * wdsp_ops: ops/function callbacks for manager driver
+ * @register_cmpnt_ops: components will use this to register
+ *			their own ops to manager driver
+ * @get_dev_for_cmpnt: components can use this to get handle
+ *		       to struct device * of any other component
+ * @signal_handler: callback to notify manager driver that signal
+ *		    has occurred. Cannot be called from interrupt
+ *		    context as this can sleep
+ * @vote_for_dsp: notifies manager that dsp should be booted up
+ * @suspend: notifies manager that one component wants to suspend.
+ *	     Manager will make sure to suspend all components in order
+ * @resume: notifies manager that one component wants to resume.
+ *	    Manager will make sure to resume all components in order
+ */
+
+struct wdsp_mgr_ops {
+	int (*register_cmpnt_ops)(struct device *wdsp_dev,
+				  struct device *cdev,
+				  void *priv_data,
+				  struct wdsp_cmpnt_ops *ops);
+	struct device *(*get_dev_for_cmpnt)(struct device *wdsp_dev,
+					    enum wdsp_cmpnt_type type);
+	int (*signal_handler)(struct device *wdsp_dev,
+			      enum wdsp_signal signal, void *arg);
+	int (*vote_for_dsp)(struct device *wdsp_dev, bool vote);
+	int (*suspend)(struct device *wdsp_dev);
+	int (*resume)(struct device *wdsp_dev);
+};
+
+#endif /* end of __WCD_DSP_MGR_H__ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/sound/wcd-spi.h	2019-01-22 16:16:28.515291793 +0100
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __WCD_SPI_H__
+#define __WCD_SPI_H__
+
+struct wcd_spi_msg {
+	/*
+	 * Caller's buffer pointer that holds data to
+	 * be transmitted in case of data_write and
+	 * data to be copied to in case of data_read.
+	 */
+	void *data;
+
+	/* Length of data to write/read */
+	size_t len;
+
+	/*
+	 * Address in remote memory to write to
+	 * or read from.
+	 */
+	u32 remote_addr;
+
+	/* Bitmask of flags, currently unused */
+	u32 flags;
+};
+
+#ifdef CONFIG_SND_SOC_WCD_SPI
+
+int wcd_spi_data_write(struct spi_device *spi, struct wcd_spi_msg *msg);
+int wcd_spi_data_read(struct spi_device *spi, struct wcd_spi_msg *msg);
+
+#else
+
+int wcd_spi_data_write(struct spi_device *spi, struct wcd_spi_msg *msg)
+{
+	return -ENODEV;
+}
+
+int wcd_spi_data_read(struct spi_device *spi, struct wcd_spi_msg *msg)
+{
+	return -ENODEV;
+}
+
+#endif /* End of CONFIG_SND_SOC_WCD_SPI */
+
+#endif /* End of __WCD_SPI_H__ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/trace/events/almk.h	2019-01-22 16:16:28.519291829 +0100
@@ -0,0 +1,84 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM almk
+
+#if !defined(_TRACE_EVENT_ALMK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_EVENT_ALMK_H
+
+#include <linux/tracepoint.h>
+#include <linux/types.h>
+
+TRACE_EVENT(almk_vmpressure,
+
+	TP_PROTO(unsigned long pressure,
+		int other_free,
+		int other_file),
+
+	TP_ARGS(pressure, other_free, other_file),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, pressure)
+		__field(int, other_free)
+		__field(int, other_file)
+	),
+
+	TP_fast_assign(
+		__entry->pressure	= pressure;
+		__entry->other_free	= other_free;
+		__entry->other_file	= other_file;
+	),
+
+	TP_printk("%lu, %d, %d",
+			__entry->pressure, __entry->other_free,
+			__entry->other_file)
+);
+
+TRACE_EVENT(almk_shrink,
+
+	TP_PROTO(int tsize,
+		 int vmp,
+		 int other_free,
+		 int other_file,
+		 short adj),
+
+	TP_ARGS(tsize, vmp, other_free, other_file, adj),
+
+	TP_STRUCT__entry(
+		__field(int, tsize)
+		__field(int, vmp)
+		__field(int, other_free)
+		__field(int, other_file)
+		__field(short, adj)
+	),
+
+	TP_fast_assign(
+		__entry->tsize		= tsize;
+		__entry->vmp		= vmp;
+		__entry->other_free     = other_free;
+		__entry->other_file     = other_file;
+		__entry->adj		= adj;
+	),
+
+	TP_printk("%d, %d, %d, %d, %d",
+		__entry->tsize,
+		__entry->vmp,
+		__entry->other_free,
+		__entry->other_file,
+		__entry->adj)
+);
+
+#endif
+
+#include <trace/define_trace.h>
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/trace/events/android_fs.h	2019-01-22 16:16:28.519291829 +0100
@@ -0,0 +1,65 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM android_fs
+
+#if !defined(_TRACE_ANDROID_FS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ANDROID_FS_H
+
+#include <linux/tracepoint.h>
+#include <trace/events/android_fs_template.h>
+
+DEFINE_EVENT(android_fs_data_start_template, android_fs_dataread_start,
+	TP_PROTO(struct inode *inode, loff_t offset, int bytes,
+		 pid_t pid, char *pathname, char *command),
+	TP_ARGS(inode, offset, bytes, pid, pathname, command));
+
+DEFINE_EVENT(android_fs_data_end_template, android_fs_dataread_end,
+	TP_PROTO(struct inode *inode, loff_t offset, int bytes),
+	TP_ARGS(inode, offset, bytes));
+
+DEFINE_EVENT(android_fs_data_start_template, android_fs_datawrite_start,
+	TP_PROTO(struct inode *inode, loff_t offset, int bytes,
+		 pid_t pid, char *pathname, char *command),
+	TP_ARGS(inode, offset, bytes, pid, pathname, command));
+
+DEFINE_EVENT(android_fs_data_end_template, android_fs_datawrite_end,
+	TP_PROTO(struct inode *inode, loff_t offset, int bytes),
+	     TP_ARGS(inode, offset, bytes));
+
+#endif /* _TRACE_ANDROID_FS_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
+
+#ifndef ANDROID_FSTRACE_GET_PATHNAME
+#define ANDROID_FSTRACE_GET_PATHNAME
+
+/* Sizes an on-stack array, so careful if sizing this up ! */
+#define MAX_TRACE_PATHBUF_LEN	256
+
+static inline char *
+android_fstrace_get_pathname(char *buf, int buflen, struct inode *inode)
+{
+	char *path;
+	struct dentry *d;
+
+	/*
+	 * d_obtain_alias() will either iput() if it locates an existing
+	 * dentry or transfer the reference to the new dentry created.
+	 * So get an extra reference here.
+	 */
+	ihold(inode);
+	d = d_obtain_alias(inode);
+	if (likely(!IS_ERR(d))) {
+		path = dentry_path_raw(d, buf, buflen);
+		if (unlikely(IS_ERR(path))) {
+			strcpy(buf, "ERROR");
+			path = buf;
+		}
+		dput(d);
+	} else {
+		strcpy(buf, "ERROR");
+		path = buf;
+	}
+	return path;
+}
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/trace/events/android_fs_template.h	2019-01-22 16:16:28.519291829 +0100
@@ -0,0 +1,64 @@
+#if !defined(_TRACE_ANDROID_FS_TEMPLATE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ANDROID_FS_TEMPLATE_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(android_fs_data_start_template,
+	TP_PROTO(struct inode *inode, loff_t offset, int bytes,
+		 pid_t pid, char *pathname, char *command),
+	TP_ARGS(inode, offset, bytes, pid, pathname, command),
+	TP_STRUCT__entry(
+		__string(pathbuf, pathname);
+		__field(loff_t,	offset);
+		__field(int,	bytes);
+		__field(loff_t,	i_size);
+		__string(cmdline, command);
+		__field(pid_t,	pid);
+		__field(ino_t,	ino);
+	),
+	TP_fast_assign(
+		{
+			/*
+			 * Replace the spaces in filenames and cmdlines
+			 * because this screws up the tooling that parses
+			 * the traces.
+			 */
+			__assign_str(pathbuf, pathname);
+			(void)strreplace(__get_str(pathbuf), ' ', '_');
+			__entry->offset		= offset;
+			__entry->bytes		= bytes;
+			__entry->i_size		= i_size_read(inode);
+			__assign_str(cmdline, command);
+			(void)strreplace(__get_str(cmdline), ' ', '_');
+			__entry->pid		= pid;
+			__entry->ino		= inode->i_ino;
+		}
+	),
+	TP_printk("entry_name %s, offset %llu, bytes %d, cmdline %s,"
+		  " pid %d, i_size %llu, ino %lu",
+		  __get_str(pathbuf), __entry->offset, __entry->bytes,
+		  __get_str(cmdline), __entry->pid, __entry->i_size,
+		  (unsigned long) __entry->ino)
+);
+
+DECLARE_EVENT_CLASS(android_fs_data_end_template,
+	TP_PROTO(struct inode *inode, loff_t offset, int bytes),
+	TP_ARGS(inode, offset, bytes),
+	TP_STRUCT__entry(
+		__field(ino_t,	ino);
+		__field(loff_t,	offset);
+		__field(int,	bytes);
+	),
+	TP_fast_assign(
+		{
+			__entry->ino		= inode->i_ino;
+			__entry->offset		= offset;
+			__entry->bytes		= bytes;
+		}
+	),
+	TP_printk("ino %lu, offset %llu, bytes %d",
+		  (unsigned long) __entry->ino,
+		  __entry->offset, __entry->bytes)
+);
+
+#endif /* _TRACE_ANDROID_FS_TEMPLATE_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/trace/events/cpufreq_interactive.h	2019-01-22 16:16:28.519291829 +0100
@@ -0,0 +1,148 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cpufreq_interactive
+
+#if !defined(_TRACE_CPUFREQ_INTERACTIVE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CPUFREQ_INTERACTIVE_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(set,
+	TP_PROTO(u32 cpu_id, unsigned long targfreq,
+	         unsigned long actualfreq),
+	TP_ARGS(cpu_id, targfreq, actualfreq),
+
+	TP_STRUCT__entry(
+	    __field(          u32, cpu_id    )
+	    __field(unsigned long, targfreq   )
+	    __field(unsigned long, actualfreq )
+	   ),
+
+	TP_fast_assign(
+	    __entry->cpu_id = (u32) cpu_id;
+	    __entry->targfreq = targfreq;
+	    __entry->actualfreq = actualfreq;
+	),
+
+	TP_printk("cpu=%u targ=%lu actual=%lu",
+	      __entry->cpu_id, __entry->targfreq,
+	      __entry->actualfreq)
+);
+
+DEFINE_EVENT(set, cpufreq_interactive_setspeed,
+	TP_PROTO(u32 cpu_id, unsigned long targfreq,
+	     unsigned long actualfreq),
+	TP_ARGS(cpu_id, targfreq, actualfreq)
+);
+
+DECLARE_EVENT_CLASS(loadeval,
+	    TP_PROTO(unsigned long cpu_id, unsigned long load,
+		     unsigned long curtarg, unsigned long curactual,
+		     unsigned long newtarg),
+		    TP_ARGS(cpu_id, load, curtarg, curactual, newtarg),
+
+	    TP_STRUCT__entry(
+		    __field(unsigned long, cpu_id    )
+		    __field(unsigned long, load      )
+		    __field(unsigned long, curtarg   )
+		    __field(unsigned long, curactual )
+		    __field(unsigned long, newtarg   )
+	    ),
+
+	    TP_fast_assign(
+		    __entry->cpu_id = cpu_id;
+		    __entry->load = load;
+		    __entry->curtarg = curtarg;
+		    __entry->curactual = curactual;
+		    __entry->newtarg = newtarg;
+	    ),
+
+	    TP_printk("cpu=%lu load=%lu cur=%lu actual=%lu targ=%lu",
+		      __entry->cpu_id, __entry->load, __entry->curtarg,
+		      __entry->curactual, __entry->newtarg)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_target,
+	    TP_PROTO(unsigned long cpu_id, unsigned long load,
+		     unsigned long curtarg, unsigned long curactual,
+		     unsigned long newtarg),
+	    TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_already,
+	    TP_PROTO(unsigned long cpu_id, unsigned long load,
+		     unsigned long curtarg, unsigned long curactual,
+		     unsigned long newtarg),
+	    TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_notyet,
+	    TP_PROTO(unsigned long cpu_id, unsigned long load,
+		     unsigned long curtarg, unsigned long curactual,
+		     unsigned long newtarg),
+	    TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+);
+
+TRACE_EVENT(cpufreq_interactive_boost,
+	    TP_PROTO(const char *s),
+	    TP_ARGS(s),
+	    TP_STRUCT__entry(
+		    __string(s, s)
+	    ),
+	    TP_fast_assign(
+		    __assign_str(s, s);
+	    ),
+	    TP_printk("%s", __get_str(s))
+);
+
+TRACE_EVENT(cpufreq_interactive_unboost,
+	    TP_PROTO(const char *s),
+	    TP_ARGS(s),
+	    TP_STRUCT__entry(
+		    __string(s, s)
+	    ),
+	    TP_fast_assign(
+		    __assign_str(s, s);
+	    ),
+	    TP_printk("%s", __get_str(s))
+);
+
+TRACE_EVENT(cpufreq_interactive_load_change,
+	    TP_PROTO(unsigned long cpu_id),
+	    TP_ARGS(cpu_id),
+	    TP_STRUCT__entry(
+		__field(unsigned long, cpu_id)
+	    ),
+	    TP_fast_assign(
+		__entry->cpu_id = cpu_id;
+	    ),
+	    TP_printk("re-evaluate for cpu=%lu", __entry->cpu_id)
+);
+
+TRACE_EVENT(cpufreq_interactive_cpuload,
+	    TP_PROTO(unsigned long cpu_id, unsigned int load,
+		     unsigned int new_task_pct, unsigned int prev,
+		     unsigned int predicted),
+	    TP_ARGS(cpu_id, load, new_task_pct, prev, predicted),
+	    TP_STRUCT__entry(
+		__field(unsigned long, cpu_id)
+		__field(unsigned int, load)
+		__field(unsigned int, new_task_pct)
+		__field(unsigned int, prev)
+		__field(unsigned int, predicted)
+	    ),
+	    TP_fast_assign(
+		__entry->cpu_id = cpu_id;
+		__entry->load = load;
+		__entry->new_task_pct = new_task_pct;
+		__entry->prev = prev;
+		__entry->predicted = predicted;
+	    ),
+	    TP_printk("cpu=%lu load=%u new_task_pct=%u prev=%u predicted=%u",
+		      __entry->cpu_id, __entry->load, __entry->new_task_pct,
+		      __entry->prev, __entry->predicted)
+);
+
+#endif /* _TRACE_CPUFREQ_INTERACTIVE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/trace/events/exception.h	2019-01-22 16:16:28.519291829 +0100
@@ -0,0 +1,124 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM exception
+
+#if !defined(_TRACE_EXCEPTION_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_EXCEPTION_H
+
+#include <linux/tracepoint.h>
+
+struct task_struct;
+
+TRACE_EVENT(user_fault,
+
+	TP_PROTO(struct task_struct *tsk, unsigned long addr, unsigned int fsr),
+
+	TP_ARGS(tsk, addr, fsr),
+
+	TP_STRUCT__entry(
+		__string(task_name, tsk->comm)
+		__field(unsigned long, addr)
+		__field(unsigned int, fsr)
+	),
+
+	TP_fast_assign(
+	__assign_str(task_name, tsk->comm)
+		__entry->addr	= addr;
+		__entry->fsr	= fsr;
+	),
+
+	TP_printk("task_name:%s addr:%lu, fsr:%u", __get_str(task_name),
+		__entry->addr, __entry->fsr)
+);
+
+
+struct pt_regs;
+
+TRACE_EVENT(undef_instr,
+
+	TP_PROTO(struct pt_regs *regs, void *prog_cnt),
+
+	TP_ARGS(regs, prog_cnt),
+
+	TP_STRUCT__entry(
+		__field(void *, prog_cnt)
+		__field(struct pt_regs *, regs)
+	),
+
+	TP_fast_assign(
+		__entry->regs		= regs;
+		__entry->prog_cnt	= prog_cnt;
+	),
+
+	TP_printk("pc:%p", __entry->prog_cnt)
+);
+
+TRACE_EVENT(unhandled_abort,
+
+	TP_PROTO(struct pt_regs *regs, unsigned long addr, unsigned int fsr),
+
+	TP_ARGS(regs, addr, fsr),
+
+	TP_STRUCT__entry(
+		__field(struct pt_regs *, regs)
+		__field(unsigned long, addr)
+		__field(unsigned int, fsr)
+	),
+
+	TP_fast_assign(
+		__entry->regs	= regs;
+		__entry->addr	= addr;
+		__entry->fsr	= fsr;
+	),
+
+	TP_printk("addr:%lu, fsr:%u", __entry->addr, __entry->fsr)
+);
+
+TRACE_EVENT(kernel_panic,
+
+	TP_PROTO(long dummy),
+
+	TP_ARGS(dummy),
+
+	TP_STRUCT__entry(
+		__field(long, dummy)
+	),
+
+	TP_fast_assign(
+		__entry->dummy	= dummy;
+	),
+
+	TP_printk("dummy:%ld", __entry->dummy)
+);
+
+TRACE_EVENT(kernel_panic_late,
+
+	TP_PROTO(long dummy),
+
+	TP_ARGS(dummy),
+
+	TP_STRUCT__entry(
+		__field(long, dummy)
+	),
+
+	TP_fast_assign(
+		__entry->dummy	= dummy;
+	),
+
+	TP_printk("dummy:%ld", __entry->dummy)
+);
+
+#endif
+
+#include <trace/define_trace.h>
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/trace/events/mmc.h	2019-01-22 16:16:28.523291866 +0100
@@ -0,0 +1,238 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mmc
+
+#if !defined(_TRACE_MMC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MMC_H
+
+#include <linux/tracepoint.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/core.h>
+
+/*
+ * Unconditional logging of mmc block erase operations,
+ * including cmd, address, size
+ */
+DECLARE_EVENT_CLASS(mmc_blk_erase_class,
+	TP_PROTO(unsigned int cmd, unsigned int addr, unsigned int size),
+	TP_ARGS(cmd, addr, size),
+	TP_STRUCT__entry(
+		__field(unsigned int, cmd)
+		__field(unsigned int, addr)
+		__field(unsigned int, size)
+	),
+	TP_fast_assign(
+		__entry->cmd = cmd;
+		__entry->addr = addr;
+		__entry->size = size;
+	),
+	TP_printk("cmd=%u,addr=0x%08x,size=0x%08x",
+		  __entry->cmd, __entry->addr, __entry->size)
+);
+
+DEFINE_EVENT(mmc_blk_erase_class, mmc_blk_erase_start,
+	TP_PROTO(unsigned int cmd, unsigned int addr, unsigned int size),
+	TP_ARGS(cmd, addr, size));
+
+DEFINE_EVENT(mmc_blk_erase_class, mmc_blk_erase_end,
+	TP_PROTO(unsigned int cmd, unsigned int addr, unsigned int size),
+	TP_ARGS(cmd, addr, size));
+
+/*
+ * Logging of start of read or write mmc block operation,
+ * including cmd, address, size
+ */
+DECLARE_EVENT_CLASS(mmc_blk_rw_class,
+	TP_PROTO(unsigned int cmd, unsigned int addr, struct mmc_data *data),
+	TP_ARGS(cmd, addr, data),
+	TP_STRUCT__entry(
+		__field(unsigned int, cmd)
+		__field(unsigned int, addr)
+		__field(unsigned int, size)
+	),
+	TP_fast_assign(
+		__entry->cmd = cmd;
+		__entry->addr = addr;
+		__entry->size = data->blocks;
+	),
+	TP_printk("cmd=%u,addr=0x%08x,size=0x%08x",
+		  __entry->cmd, __entry->addr, __entry->size)
+);
+
+DEFINE_EVENT_CONDITION(mmc_blk_rw_class, mmc_blk_rw_start,
+	TP_PROTO(unsigned int cmd, unsigned int addr, struct mmc_data *data),
+	TP_ARGS(cmd, addr, data),
+	TP_CONDITION(((cmd == MMC_READ_MULTIPLE_BLOCK) ||
+		      (cmd == MMC_WRITE_MULTIPLE_BLOCK)) &&
+		      data));
+
+DEFINE_EVENT_CONDITION(mmc_blk_rw_class, mmc_blk_rw_end,
+	TP_PROTO(unsigned int cmd, unsigned int addr, struct mmc_data *data),
+	TP_ARGS(cmd, addr, data),
+	TP_CONDITION(((cmd == MMC_READ_MULTIPLE_BLOCK) ||
+		      (cmd == MMC_WRITE_MULTIPLE_BLOCK)) &&
+		      data));
+
+TRACE_EVENT(mmc_cmd_rw_start,
+	TP_PROTO(unsigned int cmd, unsigned int arg, unsigned int flags),
+	TP_ARGS(cmd, arg, flags),
+	TP_STRUCT__entry(
+		__field(unsigned int, cmd)
+		__field(unsigned int, arg)
+		__field(unsigned int, flags)
+	),
+	TP_fast_assign(
+		__entry->cmd = cmd;
+		__entry->arg = arg;
+		__entry->flags = flags;
+	),
+	TP_printk("cmd=%u,arg=0x%08x,flags=0x%08x",
+		  __entry->cmd, __entry->arg, __entry->flags)
+);
+
+TRACE_EVENT(mmc_cmd_rw_end,
+	TP_PROTO(unsigned int cmd, unsigned int status, unsigned int resp),
+	TP_ARGS(cmd, status, resp),
+	TP_STRUCT__entry(
+		__field(unsigned int, cmd)
+		__field(unsigned int, status)
+		__field(unsigned int, resp)
+	),
+	TP_fast_assign(
+		__entry->cmd = cmd;
+		__entry->status = status;
+		__entry->resp = resp;
+	),
+	TP_printk("cmd=%u,int_status=0x%08x,response=0x%08x",
+		  __entry->cmd, __entry->status, __entry->resp)
+);
+
+TRACE_EVENT(mmc_data_rw_end,
+	TP_PROTO(unsigned int cmd, unsigned int status),
+	TP_ARGS(cmd, status),
+	TP_STRUCT__entry(
+		__field(unsigned int, cmd)
+		__field(unsigned int, status)
+	),
+	TP_fast_assign(
+		__entry->cmd = cmd;
+		__entry->status = status;
+	),
+	TP_printk("cmd=%u,int_status=0x%08x",
+		  __entry->cmd, __entry->status)
+);
+
+DECLARE_EVENT_CLASS(mmc_adma_class,
+	TP_PROTO(unsigned int cmd, unsigned int len),
+	TP_ARGS(cmd, len),
+	TP_STRUCT__entry(
+		__field(unsigned int, cmd)
+		__field(unsigned int, len)
+	),
+	TP_fast_assign(
+		__entry->cmd = cmd;
+		__entry->len = len;
+	),
+	TP_printk("cmd=%u,sg_len=0x%08x", __entry->cmd, __entry->len)
+);
+
+DEFINE_EVENT(mmc_adma_class, mmc_adma_table_pre,
+	TP_PROTO(unsigned int cmd, unsigned int len),
+	TP_ARGS(cmd, len));
+
+DEFINE_EVENT(mmc_adma_class, mmc_adma_table_post,
+	TP_PROTO(unsigned int cmd, unsigned int len),
+	TP_ARGS(cmd, len));
+
+TRACE_EVENT(mmc_clk,
+	TP_PROTO(char *print_info),
+
+	TP_ARGS(print_info),
+
+	TP_STRUCT__entry(
+		__string(print_info, print_info)
+	),
+
+	TP_fast_assign(
+		__assign_str(print_info, print_info);
+	),
+
+	TP_printk("%s",
+		__get_str(print_info)
+	)
+);
+
+DECLARE_EVENT_CLASS(mmc_pm_template,
+	TP_PROTO(const char *dev_name, int err, s64 usecs),
+
+	TP_ARGS(dev_name, err, usecs),
+
+	TP_STRUCT__entry(
+		__field(s64, usecs)
+		__field(int, err)
+		__string(dev_name, dev_name)
+	),
+
+	TP_fast_assign(
+		__entry->usecs = usecs;
+		__entry->err = err;
+		__assign_str(dev_name, dev_name);
+	),
+
+	TP_printk(
+		"took %lld usecs, %s err %d",
+		__entry->usecs,
+		__get_str(dev_name),
+		__entry->err
+	)
+);
+
+DEFINE_EVENT(mmc_pm_template, mmc_runtime_suspend,
+	     TP_PROTO(const char *dev_name, int err, s64 usecs),
+	     TP_ARGS(dev_name, err, usecs));
+
+DEFINE_EVENT(mmc_pm_template, mmc_runtime_resume,
+	     TP_PROTO(const char *dev_name, int err, s64 usecs),
+	     TP_ARGS(dev_name, err, usecs));
+
+DEFINE_EVENT(mmc_pm_template, mmc_suspend,
+	     TP_PROTO(const char *dev_name, int err, s64 usecs),
+	     TP_ARGS(dev_name, err, usecs));
+
+DEFINE_EVENT(mmc_pm_template, mmc_resume,
+	     TP_PROTO(const char *dev_name, int err, s64 usecs),
+	     TP_ARGS(dev_name, err, usecs));
+
+DEFINE_EVENT(mmc_pm_template, sdhci_msm_suspend,
+	     TP_PROTO(const char *dev_name, int err, s64 usecs),
+	     TP_ARGS(dev_name, err, usecs));
+
+DEFINE_EVENT(mmc_pm_template, sdhci_msm_resume,
+	     TP_PROTO(const char *dev_name, int err, s64 usecs),
+	     TP_ARGS(dev_name, err, usecs));
+
+DEFINE_EVENT(mmc_pm_template, sdhci_msm_runtime_suspend,
+	     TP_PROTO(const char *dev_name, int err, s64 usecs),
+	     TP_ARGS(dev_name, err, usecs));
+
+DEFINE_EVENT(mmc_pm_template, sdhci_msm_runtime_resume,
+	     TP_PROTO(const char *dev_name, int err, s64 usecs),
+	     TP_ARGS(dev_name, err, usecs));
+#endif /* if !defined(_TRACE_MMC_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/trace/events/msm_vidc.h	2019-01-22 16:16:28.523291866 +0100
@@ -0,0 +1,315 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM msm_vidc
+
+#if !defined(_TRACE_MSM_VIDC_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MSM_VIDC_H
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(msm_v4l2_vidc,
+
+	TP_PROTO(char *dummy),
+
+	TP_ARGS(dummy),
+
+	TP_STRUCT__entry(
+		__field(char *, dummy)
+	),
+
+	TP_fast_assign(
+		__entry->dummy = dummy;
+	),
+
+	TP_printk("%s", __entry->dummy)
+);
+
+DEFINE_EVENT(msm_v4l2_vidc, msm_v4l2_vidc_open_start,
+
+	TP_PROTO(char *dummy),
+
+	TP_ARGS(dummy)
+);
+
+DEFINE_EVENT(msm_v4l2_vidc, msm_v4l2_vidc_open_end,
+
+	TP_PROTO(char *dummy),
+
+	TP_ARGS(dummy)
+);
+
+DEFINE_EVENT(msm_v4l2_vidc, msm_v4l2_vidc_close_start,
+
+	TP_PROTO(char *dummy),
+
+	TP_ARGS(dummy)
+);
+
+DEFINE_EVENT(msm_v4l2_vidc, msm_v4l2_vidc_close_end,
+
+	TP_PROTO(char *dummy),
+
+	TP_ARGS(dummy)
+);
+
+DEFINE_EVENT(msm_v4l2_vidc, msm_v4l2_vidc_fw_load_start,
+
+	TP_PROTO(char *dummy),
+
+	TP_ARGS(dummy)
+);
+
+DEFINE_EVENT(msm_v4l2_vidc, msm_v4l2_vidc_fw_load_end,
+
+	TP_PROTO(char *dummy),
+
+	TP_ARGS(dummy)
+);
+
+DECLARE_EVENT_CLASS(msm_vidc_common,
+
+	TP_PROTO(void *instp, int old_state, int new_state),
+
+	TP_ARGS(instp, old_state, new_state),
+
+	TP_STRUCT__entry(
+		__field(void *, instp)
+		__field(int, old_state)
+		__field(int, new_state)
+	),
+
+	TP_fast_assign(
+		__entry->instp = instp;
+		__entry->old_state = old_state;
+		__entry->new_state = new_state;
+	),
+
+	TP_printk("Moved inst: %p from 0x%x to 0x%x",
+		__entry->instp,
+		__entry->old_state,
+		__entry->new_state)
+);
+
+DEFINE_EVENT(msm_vidc_common, msm_vidc_common_state_change,
+
+	TP_PROTO(void *instp, int old_state, int new_state),
+
+	TP_ARGS(instp, old_state, new_state)
+);
+
+DECLARE_EVENT_CLASS(venus_hfi_var,
+
+	TP_PROTO(u32 cp_start, u32 cp_size,
+		u32 cp_nonpixel_start, u32 cp_nonpixel_size),
+
+	TP_ARGS(cp_start, cp_size, cp_nonpixel_start, cp_nonpixel_size),
+
+	TP_STRUCT__entry(
+		__field(u32, cp_start)
+		__field(u32, cp_size)
+		__field(u32, cp_nonpixel_start)
+		__field(u32, cp_nonpixel_size)
+	),
+
+	TP_fast_assign(
+		__entry->cp_start = cp_start;
+		__entry->cp_size = cp_size;
+		__entry->cp_nonpixel_start = cp_nonpixel_start;
+		__entry->cp_nonpixel_size = cp_nonpixel_size;
+	),
+
+	TP_printk(
+		"TZBSP_MEM_PROTECT_VIDEO_VAR done, cp_start : 0x%x, cp_size : 0x%x, cp_nonpixel_start : 0x%x, cp_nonpixel_size : 0x%x",
+		__entry->cp_start,
+		__entry->cp_size,
+		__entry->cp_nonpixel_start,
+		__entry->cp_nonpixel_size)
+);
+
+DEFINE_EVENT(venus_hfi_var, venus_hfi_var_done,
+
+	TP_PROTO(u32 cp_start, u32 cp_size,
+		u32 cp_nonpixel_start, u32 cp_nonpixel_size),
+
+	TP_ARGS(cp_start, cp_size, cp_nonpixel_start, cp_nonpixel_size)
+);
+
+DECLARE_EVENT_CLASS(msm_v4l2_vidc_buffer_events,
+
+	TP_PROTO(char *event_type, u32 device_addr, int64_t timestamp,
+		u32 alloc_len, u32 filled_len, u32 offset),
+
+	TP_ARGS(event_type, device_addr, timestamp, alloc_len,
+		filled_len, offset),
+
+	TP_STRUCT__entry(
+		__field(char *, event_type)
+		__field(u32, device_addr)
+		__field(int64_t, timestamp)
+		__field(u32, alloc_len)
+		__field(u32, filled_len)
+		__field(u32, offset)
+	),
+
+	TP_fast_assign(
+		__entry->event_type = event_type;
+		__entry->device_addr = device_addr;
+		__entry->timestamp = timestamp;
+		__entry->alloc_len = alloc_len;
+		__entry->filled_len = filled_len;
+		__entry->offset = offset;
+	),
+
+	TP_printk(
+		"%s, device_addr : 0x%x, timestamp : %lld, alloc_len : 0x%x, filled_len : 0x%x, offset : 0x%x",
+		__entry->event_type,
+		__entry->device_addr,
+		__entry->timestamp,
+		__entry->alloc_len,
+		__entry->filled_len,
+		__entry->offset)
+);
+
+DEFINE_EVENT(msm_v4l2_vidc_buffer_events, msm_v4l2_vidc_buffer_event_start,
+
+	TP_PROTO(char *event_type, u32 device_addr, int64_t timestamp,
+		u32 alloc_len, u32 filled_len, u32 offset),
+
+	TP_ARGS(event_type, device_addr, timestamp, alloc_len,
+		filled_len, offset)
+);
+
+DEFINE_EVENT(msm_v4l2_vidc_buffer_events, msm_v4l2_vidc_buffer_event_end,
+
+	TP_PROTO(char *event_type, u32 device_addr, int64_t timestamp,
+		u32 alloc_len, u32 filled_len, u32 offset),
+
+	TP_ARGS(event_type, device_addr, timestamp, alloc_len,
+		filled_len, offset)
+);
+
+DECLARE_EVENT_CLASS(msm_smem_buffer_ion_ops,
+
+	TP_PROTO(char *buffer_op, u32 buffer_type, u32 heap_mask,
+		size_t size, u32 align, u32 flags, int map_kernel),
+
+	TP_ARGS(buffer_op, buffer_type, heap_mask, size, align,
+		flags, map_kernel),
+
+	TP_STRUCT__entry(
+		__field(char *, buffer_op)
+		__field(u32, buffer_type)
+		__field(u32, heap_mask)
+		__field(u32, size)
+		__field(u32, align)
+		__field(u32, flags)
+		__field(int, map_kernel)
+	),
+
+	TP_fast_assign(
+		__entry->buffer_op = buffer_op;
+		__entry->buffer_type = buffer_type;
+		__entry->heap_mask = heap_mask;
+		__entry->size = size;
+		__entry->align = align;
+		__entry->flags = flags;
+		__entry->map_kernel = map_kernel;
+	),
+
+	TP_printk(
+		"%s, buffer_type : 0x%x, heap_mask : 0x%x, size : 0x%x, align : 0x%x, flags : 0x%x, map_kernel : %d",
+		__entry->buffer_op,
+		__entry->buffer_type,
+		__entry->heap_mask,
+		__entry->size,
+		__entry->align,
+		__entry->flags,
+		__entry->map_kernel)
+);
+
+DEFINE_EVENT(msm_smem_buffer_ion_ops, msm_smem_buffer_ion_op_start,
+
+	TP_PROTO(char *buffer_op, u32 buffer_type, u32 heap_mask,
+		size_t size, u32 align, u32 flags, int map_kernel),
+
+	TP_ARGS(buffer_op, buffer_type, heap_mask, size, align,
+		flags, map_kernel)
+);
+
+DEFINE_EVENT(msm_smem_buffer_ion_ops, msm_smem_buffer_ion_op_end,
+
+	TP_PROTO(char *buffer_op, u32 buffer_type, u32 heap_mask,
+		size_t size, u32 align, u32 flags, int map_kernel),
+
+	TP_ARGS(buffer_op, buffer_type, heap_mask, size, align,
+		flags, map_kernel)
+);
+
+DECLARE_EVENT_CLASS(msm_smem_buffer_iommu_ops,
+
+	TP_PROTO(char *buffer_op, int domain_num, int partition_num,
+		unsigned long align, unsigned long iova,
+		unsigned long buffer_size),
+
+	TP_ARGS(buffer_op, domain_num, partition_num, align, iova, buffer_size),
+
+	TP_STRUCT__entry(
+		__field(char *, buffer_op)
+		__field(int, domain_num)
+		__field(int, partition_num)
+		__field(unsigned long, align)
+		__field(unsigned long, iova)
+		__field(unsigned long, buffer_size)
+	),
+
+	TP_fast_assign(
+		__entry->buffer_op = buffer_op;
+		__entry->domain_num = domain_num;
+		__entry->partition_num = partition_num;
+		__entry->align = align;
+		__entry->iova = iova;
+		__entry->buffer_size = buffer_size;
+	),
+
+	TP_printk(
+		"%s, domain : %d, partition : %d, align : %lx, iova : 0x%lx, buffer_size=%lx",
+		__entry->buffer_op,
+		__entry->domain_num,
+		__entry->partition_num,
+		__entry->align,
+		__entry->iova,
+		__entry->buffer_size)
+);
+
+DEFINE_EVENT(msm_smem_buffer_iommu_ops, msm_smem_buffer_iommu_op_start,
+
+	TP_PROTO(char *buffer_op, int domain_num, int partition_num,
+		unsigned long align, unsigned long iova,
+		unsigned long buffer_size),
+
+	TP_ARGS(buffer_op, domain_num, partition_num, align, iova, buffer_size)
+);
+
+DEFINE_EVENT(msm_smem_buffer_iommu_ops, msm_smem_buffer_iommu_op_end,
+
+	TP_PROTO(char *buffer_op, int domain_num, int partition_num,
+		unsigned long align, unsigned long iova,
+		unsigned long buffer_size),
+
+	TP_ARGS(buffer_op, domain_num, partition_num, align, iova, buffer_size)
+);
+
+#endif
+
+#include <trace/define_trace.h>
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/trace/events/scm.h	2019-01-22 16:16:28.523291866 +0100
@@ -0,0 +1,68 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM scm
+
+#if !defined(_TRACE_SCM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SCM_H
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <soc/qcom/scm.h>
+
+TRACE_EVENT(scm_call_start,
+
+	TP_PROTO(u64 x0, struct scm_desc *p),
+
+	TP_ARGS(x0, p),
+
+	TP_STRUCT__entry(
+		__field(u64, x0)
+		__field(u32, arginfo)
+		__array(u64, args, MAX_SCM_ARGS)
+		__field(u64, x5)
+	),
+
+	TP_fast_assign(
+		__entry->x0		= x0;
+		__entry->arginfo	= p->arginfo;
+		memcpy(__entry->args, p->args, MAX_SCM_ARGS);
+		__entry->x5		= p->x5;
+	),
+
+	TP_printk("func id=%#llx (args: %#x, %#llx, %#llx, %#llx, %#llx)",
+		__entry->x0, __entry->arginfo, __entry->args[0],
+		__entry->args[1], __entry->args[2], __entry->x5)
+);
+
+
+TRACE_EVENT(scm_call_end,
+
+	TP_PROTO(struct scm_desc *p),
+
+	TP_ARGS(p),
+
+	TP_STRUCT__entry(
+		__array(u64, ret, MAX_SCM_RETS)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->ret, p->ret, MAX_SCM_RETS);
+	),
+
+	TP_printk("ret: %#llx, %#llx, %#llx",
+		__entry->ret[0], __entry->ret[1], __entry->ret[2])
+);
+#endif /* _TRACE_SCM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/trace/events/trace_msm_bus.h	2019-01-22 16:16:28.523291866 +0100
@@ -0,0 +1,275 @@
+/* Copyright (c) 2014-2015, 2017,  The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM msm_bus
+
+#if !defined(_TRACE_MSM_BUS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MSM_BUS_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(bus_update_request,
+
+	TP_PROTO(int sec, int nsec, const char *name, int src, int dest,
+		unsigned long long ab, unsigned long long ib),
+
+	TP_ARGS(sec, nsec, name, src, dest, ab, ib),
+
+	TP_STRUCT__entry(
+		__field(int, sec)
+		__field(int, nsec)
+		__string(name, name)
+		__field(int, src)
+		__field(int, dest)
+		__field(u64, ab)
+		__field(u64, ib)
+	),
+
+	TP_fast_assign(
+		__entry->sec = sec;
+		__entry->nsec = nsec;
+		__assign_str(name, name);
+		__entry->src = src;
+		__entry->dest = dest;
+		__entry->ab = ab;
+		__entry->ib = ib;
+	),
+
+	TP_printk("time= %u.%09u name=%s src=%d dest=%d ab=%llu ib=%llu",
+		__entry->sec,
+		__entry->nsec,
+		__get_str(name),
+		__entry->src,
+		__entry->dest,
+		(unsigned long long)__entry->ab,
+		(unsigned long long)__entry->ib)
+);
+
+TRACE_EVENT(bus_update_request_end,
+
+	TP_PROTO(const char *name),
+
+	TP_ARGS(name),
+
+	TP_STRUCT__entry(
+		__string(name, name)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+	),
+
+	TP_printk("client-name=%s", __get_str(name))
+);
+
+TRACE_EVENT(bus_max_votes,
+
+	TP_PROTO(int sec, int nsec, const char *bus_name, const char *ctx,
+		const char *bw_type_name, unsigned long long bw,
+		const char *cl_name),
+
+	TP_ARGS(sec, nsec, bus_name, ctx, bw_type_name, bw, cl_name),
+
+	TP_STRUCT__entry(
+		__field(int, sec)
+		__field(int, nsec)
+		__string(bus_name, bus_name)
+		__string(ctx, ctx)
+		__string(bw_type_name, bw_type_name)
+		__field(u64, bw)
+		__string(cl_name, cl_name)
+	),
+
+	TP_fast_assign(
+		__entry->sec = sec;
+		__entry->nsec = nsec;
+		__assign_str(bus_name, bus_name);
+		__assign_str(ctx, ctx);
+		__assign_str(bw_type_name, bw_type_name);
+		__entry->bw = bw;
+		__assign_str(cl_name, cl_name);
+	),
+
+	TP_printk("time= %u.%09u %s: %s max_%s: %llu: client-name: %s",
+		__entry->sec,
+		__entry->nsec,
+		__get_str(bus_name),
+		__get_str(ctx),
+		__get_str(bw_type_name),
+		(unsigned long long)__entry->bw,
+		__get_str(cl_name))
+);
+
+TRACE_EVENT(bus_bimc_config_limiter,
+
+	TP_PROTO(int mas_id, unsigned long long cur_lim_bw),
+
+	TP_ARGS(mas_id, cur_lim_bw),
+
+	TP_STRUCT__entry(
+		__field(int, mas_id)
+		__field(u64, cur_lim_bw)
+	),
+
+	TP_fast_assign(
+		__entry->mas_id = mas_id;
+		__entry->cur_lim_bw = cur_lim_bw;
+	),
+
+	TP_printk("Master=%d cur_lim_bw=%llu",
+		__entry->mas_id,
+		(unsigned long long)__entry->cur_lim_bw)
+);
+
+TRACE_EVENT(bus_avail_bw,
+
+	TP_PROTO(unsigned long long cur_bimc_bw, unsigned long long cur_mdp_bw),
+
+	TP_ARGS(cur_bimc_bw, cur_mdp_bw),
+
+	TP_STRUCT__entry(
+		__field(u64, cur_bimc_bw)
+		__field(u64, cur_mdp_bw)
+	),
+
+	TP_fast_assign(
+		__entry->cur_bimc_bw = cur_bimc_bw;
+		__entry->cur_mdp_bw = cur_mdp_bw;
+	),
+
+	TP_printk("cur_bimc_bw = %llu cur_mdp_bw = %llu",
+		(unsigned long long)__entry->cur_bimc_bw,
+		(unsigned long long)__entry->cur_mdp_bw)
+);
+
+TRACE_EVENT(bus_rules_matches,
+
+	TP_PROTO(int node_id, int rule_id, unsigned long long node_ab,
+		unsigned long long node_ib, unsigned long long node_clk),
+
+	TP_ARGS(node_id, rule_id, node_ab, node_ib, node_clk),
+
+	TP_STRUCT__entry(
+		__field(int, node_id)
+		__field(int, rule_id)
+		__field(u64, node_ab)
+		__field(u64, node_ib)
+		__field(u64, node_clk)
+	),
+
+	TP_fast_assign(
+		__entry->node_id = node_id;
+		__entry->rule_id = rule_id;
+		__entry->node_ab = node_ab;
+		__entry->node_ib = node_ib;
+		__entry->node_clk = node_clk;
+	),
+
+	TP_printk("Rule match node%d rule%d node-ab%llu:ib%llu:clk%llu",
+		__entry->node_id, __entry->rule_id,
+		(unsigned long long)__entry->node_ab,
+		(unsigned long long)__entry->node_ib,
+		(unsigned long long)__entry->node_clk)
+);
+
+TRACE_EVENT(bus_bke_params,
+
+	TP_PROTO(u32 gc, u32 gp, u32 thl, u32 thm, u32 thh),
+
+	TP_ARGS(gc, gp, thl, thm, thh),
+
+	TP_STRUCT__entry(
+		__field(u32, gc)
+		__field(u32, gp)
+		__field(u32, thl)
+		__field(u32, thm)
+		__field(u32, thh)
+	),
+
+	TP_fast_assign(
+		__entry->gc = gc;
+		__entry->gp = gp;
+		__entry->thl = thl;
+		__entry->thm = thm;
+		__entry->thh = thh;
+	),
+
+	TP_printk("BKE Params GC=0x%x GP=0x%x THL=0x%x THM=0x%x THH=0x%x",
+		__entry->gc, __entry->gp, __entry->thl, __entry->thm,
+			__entry->thh)
+);
+
+TRACE_EVENT(bus_client_status,
+
+	TP_PROTO(const char *name, int src, int dest,
+		unsigned long long ab, unsigned long long ib, int active_only),
+
+	TP_ARGS(name, src, dest, ab, ib, active_only),
+
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(int, src)
+		__field(int, dest)
+		__field(u64, ab)
+		__field(u64, ib)
+		__field(int, active_only)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->src = src;
+		__entry->dest = dest;
+		__entry->ab = ab;
+		__entry->ib = ib;
+		__entry->active_only = active_only;
+	),
+
+	TP_printk("name=%s src=%d dest=%d ab=%llu ib=%llu active_only=%d",
+		__get_str(name),
+		__entry->src,
+		__entry->dest,
+		(unsigned long long)__entry->ab,
+		(unsigned long long)__entry->ib,
+		__entry->active_only)
+);
+
+TRACE_EVENT(bus_agg_bw,
+
+	TP_PROTO(unsigned int node_id, int rpm_id, int ctx_set,
+		unsigned long long agg_ab),
+
+	TP_ARGS(node_id, rpm_id, ctx_set, agg_ab),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, node_id)
+		__field(int, rpm_id)
+		__field(int, ctx_set)
+		__field(u64, agg_ab)
+	),
+
+	TP_fast_assign(
+		__entry->node_id = node_id;
+		__entry->rpm_id = rpm_id;
+		__entry->ctx_set = ctx_set;
+		__entry->agg_ab = agg_ab;
+	),
+
+	TP_printk("node_id:%u rpm_id:%d rpm_ctx:%d agg_ab:%llu",
+		__entry->node_id,
+		__entry->rpm_id,
+		__entry->ctx_set,
+		(unsigned long long)__entry->agg_ab)
+);
+#endif
+#define TRACE_INCLUDE_FILE trace_msm_bus
+#include <trace/define_trace.h>
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/trace/events/trace_msm_core.h	2019-01-22 16:16:28.523291866 +0100
@@ -0,0 +1,103 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM msm_core
+
+#if !defined(_TRACE_MSM_CORE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MSM_CORE_H
+
+#include <linux/tracepoint.h>
+#include <linux/thermal.h>
+
+TRACE_EVENT(cpu_stats,
+
+	TP_PROTO(unsigned int cpu, long temp,
+	uint64_t min_power, uint64_t max_power),
+
+	TP_ARGS(cpu, temp, min_power, max_power),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, cpu)
+		__field(long, temp)
+		__field(uint64_t, min_power)
+		__field(uint64_t, max_power)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->temp = temp;
+		__entry->min_power = min_power;
+		__entry->max_power = max_power;
+	),
+
+	TP_printk("Cpu%d: temp:%ld power@minfreq:%llu power@maxfreq:%llu",
+		__entry->cpu, __entry->temp, __entry->min_power,
+		__entry->max_power)
+);
+
+TRACE_EVENT(temp_threshold,
+
+	TP_PROTO(unsigned int cpu, long temp,
+		long hi_thresh, long low_thresh),
+
+	TP_ARGS(cpu, temp, hi_thresh, low_thresh),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, cpu)
+		__field(long, temp)
+		__field(long, hi_thresh)
+		__field(long, low_thresh)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->temp = temp;
+		__entry->hi_thresh = hi_thresh;
+		__entry->low_thresh = low_thresh;
+	),
+
+	TP_printk("Cpu%d: temp:%ld hi_thresh:%ld low_thresh:%ld",
+		__entry->cpu, __entry->temp, __entry->hi_thresh,
+		__entry->low_thresh)
+);
+
+TRACE_EVENT(temp_notification,
+
+	TP_PROTO(unsigned int sensor_id, enum thermal_trip_type type,
+		int temp, int prev_temp),
+
+	TP_ARGS(sensor_id, type, temp, prev_temp),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, sensor_id)
+		__field(enum thermal_trip_type, type)
+		__field(int, temp)
+		__field(int, prev_temp)
+	),
+
+	TP_fast_assign(
+		__entry->sensor_id = sensor_id;
+		__entry->type = type;
+		__entry->temp = temp;
+		__entry->prev_temp = prev_temp;
+	),
+
+	TP_printk("Sensor_id%d: %s threshold triggered temp:%d(previous:%d)",
+		__entry->sensor_id,
+		__entry->type == THERMAL_TRIP_CONFIGURABLE_HI ? "High" : "Low",
+		__entry->temp, __entry->prev_temp)
+);
+
+#endif
+#define TRACE_INCLUDE_FILE trace_msm_core
+#include <trace/define_trace.h>
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/trace/events/trace_msm_low_power.h	2019-01-22 16:16:28.523291866 +0100
@@ -0,0 +1,273 @@
+/* Copyright (c) 2012, 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM msm_low_power
+
+#if !defined(_TRACE_MSM_LOW_POWER_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MSM_LOW_POWER_H_
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(cpu_power_select,
+
+	TP_PROTO(int index, u32 sleep_us, u32 latency, u32 next_event_us),
+
+	TP_ARGS(index, sleep_us, latency, next_event_us),
+
+	TP_STRUCT__entry(
+		__field(int, index)
+		__field(u32, sleep_us)
+		__field(u32, latency)
+		__field(u32, next_event_us)
+	),
+
+	TP_fast_assign(
+		__entry->index = index;
+		__entry->sleep_us = sleep_us;
+		__entry->latency = latency;
+		__entry->next_event_us = next_event_us;
+	),
+
+	TP_printk("idx:%d sleep_time:%u latency:%u next_event:%u",
+		__entry->index, __entry->sleep_us, __entry->latency,
+		__entry->next_event_us)
+);
+
+TRACE_EVENT(cpu_pred_select,
+
+	TP_PROTO(u32 predtype, u64 predicted, u32 tmr_time),
+
+	TP_ARGS(predtype, predicted, tmr_time),
+
+	TP_STRUCT__entry(
+		__field(u32, predtype)
+		__field(u64, predicted)
+		__field(u32, tmr_time)
+	),
+
+	TP_fast_assign(
+		__entry->predtype = predtype;
+		__entry->predicted = predicted;
+		__entry->tmr_time = tmr_time;
+	),
+
+	TP_printk("pred:%u time:%lu tmr_time:%u",
+		__entry->predtype, (unsigned long)__entry->predicted,
+		__entry->tmr_time)
+);
+
+TRACE_EVENT(cpu_pred_hist,
+
+	TP_PROTO(int idx, u32 resi, u32 sample, u32 tmr),
+
+	TP_ARGS(idx, resi, sample, tmr),
+
+	TP_STRUCT__entry(
+		__field(int, idx)
+		__field(u32, resi)
+		__field(u32, sample)
+		__field(u32, tmr)
+	),
+
+	TP_fast_assign(
+		__entry->idx = idx;
+		__entry->resi = resi;
+		__entry->sample = sample;
+		__entry->tmr = tmr;
+	),
+
+	TP_printk("idx:%d resi:%u sample:%u tmr:%u",
+		__entry->idx, __entry->resi,
+		__entry->sample, __entry->tmr)
+);
+
+TRACE_EVENT(cpu_idle_enter,
+
+	TP_PROTO(int index),
+
+	TP_ARGS(index),
+
+	TP_STRUCT__entry(
+		__field(int, index)
+	),
+
+	TP_fast_assign(
+		__entry->index = index;
+	),
+
+	TP_printk("idx:%d",
+		__entry->index)
+);
+
+TRACE_EVENT(cpu_idle_exit,
+
+	TP_PROTO(int index, bool success),
+
+	TP_ARGS(index, success),
+
+	TP_STRUCT__entry(
+		__field(int, index)
+		__field(bool, success)
+	),
+
+	TP_fast_assign(
+		__entry->index = index;
+		__entry->success = success;
+	),
+
+	TP_printk("idx:%d success:%d",
+		__entry->index,
+		__entry->success)
+);
+
+TRACE_EVENT(cluster_enter,
+
+	TP_PROTO(const char *name, int index, unsigned long sync_cpus,
+		unsigned long child_cpus, bool from_idle),
+
+	TP_ARGS(name, index, sync_cpus, child_cpus, from_idle),
+
+	TP_STRUCT__entry(
+		__field(const char *, name)
+		__field(int, index)
+		__field(unsigned long, sync_cpus)
+		__field(unsigned long, child_cpus)
+		__field(bool, from_idle)
+	),
+
+	TP_fast_assign(
+		__entry->name = name;
+		__entry->index = index;
+		__entry->sync_cpus = sync_cpus;
+		__entry->child_cpus = child_cpus;
+		__entry->from_idle = from_idle;
+	),
+
+	TP_printk("cluster_name:%s idx:%d sync:0x%lx child:0x%lx idle:%d",
+		__entry->name,
+		__entry->index,
+		__entry->sync_cpus,
+		__entry->child_cpus,
+		__entry->from_idle)
+);
+
+TRACE_EVENT(cluster_exit,
+
+	TP_PROTO(const char *name, int index, unsigned long sync_cpus,
+		unsigned long child_cpus, bool from_idle),
+
+	TP_ARGS(name, index, sync_cpus, child_cpus, from_idle),
+
+	TP_STRUCT__entry(
+		__field(const char *, name)
+		__field(int, index)
+		__field(unsigned long, sync_cpus)
+		__field(unsigned long, child_cpus)
+		__field(bool, from_idle)
+	),
+
+	TP_fast_assign(
+		__entry->name = name;
+		__entry->index = index;
+		__entry->sync_cpus = sync_cpus;
+		__entry->child_cpus = child_cpus;
+		__entry->from_idle = from_idle;
+	),
+
+	TP_printk("cluster_name:%s idx:%d sync:0x%lx child:0x%lx idle:%d",
+		__entry->name,
+		__entry->index,
+		__entry->sync_cpus,
+		__entry->child_cpus,
+		__entry->from_idle)
+);
+
+TRACE_EVENT(cluster_pred_select,
+
+	TP_PROTO(const char *name, int index, u32 sleep_us,
+				u32 latency, int pred, u32 pred_us),
+
+	TP_ARGS(name, index, sleep_us, latency, pred, pred_us),
+
+	TP_STRUCT__entry(
+		__field(const char *, name)
+		__field(int, index)
+		__field(u32, sleep_us)
+		__field(u32, latency)
+		__field(int, pred)
+		__field(u32, pred_us)
+	),
+
+	TP_fast_assign(
+		__entry->name = name;
+		__entry->index = index;
+		__entry->sleep_us = sleep_us;
+		__entry->latency = latency;
+		__entry->pred = pred;
+		__entry->pred_us = pred_us;
+	),
+
+	TP_printk("name:%s idx:%d sleep_time:%u latency:%u pred:%d pred_us:%u",
+		__entry->name, __entry->index, __entry->sleep_us,
+		__entry->latency, __entry->pred, __entry->pred_us)
+);
+
+TRACE_EVENT(cluster_pred_hist,
+
+	TP_PROTO(const char *name, int idx, u32 resi,
+					u32 sample, u32 tmr),
+
+	TP_ARGS(name, idx, resi, sample, tmr),
+
+	TP_STRUCT__entry(
+		__field(const char *, name)
+		__field(int, idx)
+		__field(u32, resi)
+		__field(u32, sample)
+		__field(u32, tmr)
+	),
+
+	TP_fast_assign(
+		__entry->name = name;
+		__entry->idx = idx;
+		__entry->resi = resi;
+		__entry->sample = sample;
+		__entry->tmr = tmr;
+	),
+
+	TP_printk("name:%s idx:%d resi:%u sample:%u tmr:%u",
+		__entry->name, __entry->idx, __entry->resi,
+		__entry->sample, __entry->tmr)
+);
+
+TRACE_EVENT(pre_pc_cb,
+
+	TP_PROTO(int tzflag),
+
+	TP_ARGS(tzflag),
+
+	TP_STRUCT__entry(
+		__field(int, tzflag)
+	),
+
+	TP_fast_assign(
+		__entry->tzflag = tzflag;
+	),
+
+	TP_printk("tzflag:%d",
+		__entry->tzflag
+	)
+);
+#endif
+#define TRACE_INCLUDE_FILE trace_msm_low_power
+#include <trace/define_trace.h>
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/trace/events/trace_msm_pil_event.h	2019-01-22 16:16:28.527291902 +0100
@@ -0,0 +1,88 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM msm_pil_event
+
+#if !defined(_TRACE_MSM_PIL_EVENT_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MSM_PIL_EVENT_H_
+
+#include <linux/tracepoint.h>
+#include <../drivers/soc/qcom/peripheral-loader.h>
+
+TRACE_EVENT(pil_event,
+
+	TP_PROTO(const char *event_name, struct pil_desc *desc),
+
+	TP_ARGS(event_name, desc),
+
+	TP_STRUCT__entry(
+		__string(event_name, event_name)
+		__string(fw_name, desc->fw_name)
+	),
+
+	TP_fast_assign(
+		__assign_str(event_name, event_name);
+		__assign_str(fw_name, desc->fw_name);
+	),
+
+	TP_printk("event_name=%s fw_name=%s",
+		__get_str(event_name),
+		__get_str(fw_name))
+);
+
+TRACE_EVENT(pil_notif,
+
+	TP_PROTO(const char *event_name, unsigned long code,
+	const char *fw_name),
+
+	TP_ARGS(event_name, code, fw_name),
+
+	TP_STRUCT__entry(
+		__string(event_name, event_name)
+		__field(unsigned long, code)
+		__string(fw_name, fw_name)
+	),
+
+	TP_fast_assign(
+		__assign_str(event_name, event_name);
+		__entry->code = code;
+		__assign_str(fw_name, fw_name);
+	),
+
+	TP_printk("event_name=%s code=%lu fw=%s",
+		__get_str(event_name),
+		__entry->code,
+		__get_str(fw_name))
+);
+
+TRACE_EVENT(pil_func,
+
+	TP_PROTO(const char *func_name),
+
+	TP_ARGS(func_name),
+
+	TP_STRUCT__entry(
+		__string(func_name, func_name)
+	),
+
+	TP_fast_assign(
+		__assign_str(func_name, func_name);
+	),
+
+	TP_printk("func_name=%s",
+		__get_str(func_name))
+);
+
+#endif
+#define TRACE_INCLUDE_FILE trace_msm_pil_event
+#include <trace/define_trace.h>
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/trace/events/trace_rpm_smd.h	2019-01-22 16:16:28.527291902 +0100
@@ -0,0 +1,111 @@
+/* Copyright (c) 2012, 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rpm_smd
+
+#if !defined(_TRACE_RPM_SMD_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RPM_SMD_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(rpm_smd_ack_recvd,
+
+	TP_PROTO(unsigned int irq, unsigned int msg_id, int errno),
+
+	TP_ARGS(irq, msg_id, errno),
+
+	TP_STRUCT__entry(
+		__field(int, irq)
+		__field(int, msg_id)
+		__field(int, errno)
+	),
+
+	TP_fast_assign(
+		__entry->irq = irq;
+		__entry->msg_id = msg_id;
+		__entry->errno = errno;
+	),
+
+	TP_printk("ctx:%s msg_id:%d errno:%08x",
+		__entry->irq ? "noslp" : "sleep",
+		__entry->msg_id,
+		__entry->errno)
+);
+
+TRACE_EVENT(rpm_smd_interrupt_notify,
+
+	TP_PROTO(char *dummy),
+
+	TP_ARGS(dummy),
+
+	TP_STRUCT__entry(
+		__field(char *, dummy)
+	),
+
+	TP_fast_assign(
+		__entry->dummy = dummy;
+	),
+
+	TP_printk("%s", __entry->dummy)
+);
+
+DECLARE_EVENT_CLASS(rpm_send_msg,
+
+	TP_PROTO(unsigned int msg_id, unsigned int rsc_type,
+		unsigned int rsc_id),
+
+	TP_ARGS(msg_id, rsc_type, rsc_id),
+
+	TP_STRUCT__entry(
+		__field(u32, msg_id)
+		__field(u32, rsc_type)
+		__field(u32, rsc_id)
+		__array(char, name, 5)
+	),
+
+	TP_fast_assign(
+		__entry->msg_id = msg_id;
+		__entry->name[4] = 0;
+		__entry->rsc_type = rsc_type;
+		__entry->rsc_id = rsc_id;
+		memcpy(__entry->name, &rsc_type, sizeof(uint32_t));
+
+	),
+
+	TP_printk("msg_id:%d, rsc_type:0x%08x(%s), rsc_id:0x%08x",
+			__entry->msg_id,
+			__entry->rsc_type, __entry->name,
+			__entry->rsc_id)
+);
+
+DEFINE_EVENT(rpm_send_msg, rpm_smd_sleep_set,
+	TP_PROTO(unsigned int msg_id, unsigned int rsc_type,
+		unsigned int rsc_id),
+	TP_ARGS(msg_id, rsc_type, rsc_id)
+);
+
+DEFINE_EVENT(rpm_send_msg, rpm_smd_send_sleep_set,
+	TP_PROTO(unsigned int msg_id, unsigned int rsc_type,
+		unsigned int rsc_id),
+	TP_ARGS(msg_id, rsc_type, rsc_id)
+);
+
+DEFINE_EVENT(rpm_send_msg, rpm_smd_send_active_set,
+	TP_PROTO(unsigned int msg_id, unsigned int rsc_type,
+		unsigned int rsc_id),
+	TP_ARGS(msg_id, rsc_type, rsc_id)
+);
+
+#endif
+#define TRACE_INCLUDE_FILE trace_rpm_smd
+#include <trace/define_trace.h>
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/trace/events/ufs.h	2019-01-22 16:16:28.527291902 +0100
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ufs
+
+#if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_UFS_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(ufshcd_state_change_template,
+	TP_PROTO(const char *dev_name, int state),
+
+	TP_ARGS(dev_name, state),
+
+	TP_STRUCT__entry(
+		__string(dev_name, dev_name)
+		__field(int, state)
+	),
+
+	TP_fast_assign(
+		__assign_str(dev_name, dev_name);
+		__entry->state = state;
+	),
+
+	TP_printk("%s: state changed to %s",
+		__get_str(dev_name), __entry->state ? "ON" : "OFF")
+);
+
+DEFINE_EVENT_PRINT(ufshcd_state_change_template, ufshcd_clk_gating,
+	TP_PROTO(const char *dev_name, int state),
+	TP_ARGS(dev_name, state),
+	TP_printk("%s: state changed to %s", __get_str(dev_name),
+		__print_symbolic(__entry->state,
+				{ CLKS_OFF, "CLKS_OFF" },
+				{ CLKS_ON, "CLKS_ON" },
+				{ REQ_CLKS_OFF, "REQ_CLKS_OFF" },
+				{ REQ_CLKS_ON, "REQ_CLKS_ON" }))
+);
+
+DEFINE_EVENT_PRINT(ufshcd_state_change_template, ufshcd_hibern8_on_idle,
+	TP_PROTO(const char *dev_name, int state),
+	TP_ARGS(dev_name, state),
+	TP_printk("%s: state changed to %s", __get_str(dev_name),
+		__print_symbolic(__entry->state,
+			{ HIBERN8_ENTERED, "HIBERN8_ENTER" },
+			{ HIBERN8_EXITED, "HIBERN8_EXIT" },
+			{ REQ_HIBERN8_ENTER, "REQ_HIBERN8_ENTER" },
+			{ REQ_HIBERN8_EXIT, "REQ_HIBERN8_EXIT" }))
+);
+
+DEFINE_EVENT(ufshcd_state_change_template, ufshcd_auto_bkops_state,
+	TP_PROTO(const char *dev_name, int state),
+	TP_ARGS(dev_name, state));
+
+TRACE_EVENT(ufshcd_clk_scaling,
+
+	TP_PROTO(const char *dev_name, const char *state, const char *clk,
+		u32 prev_state, u32 curr_state),
+
+	TP_ARGS(dev_name, state, clk, prev_state, curr_state),
+
+	TP_STRUCT__entry(
+		__string(dev_name, dev_name)
+		__string(state, state)
+		__string(clk, clk)
+		__field(u32, prev_state)
+		__field(u32, curr_state)
+	),
+
+	TP_fast_assign(
+		__assign_str(dev_name, dev_name);
+		__assign_str(state, state);
+		__assign_str(clk, clk);
+		__entry->prev_state = prev_state;
+		__entry->curr_state = curr_state;
+	),
+
+	TP_printk("%s: %s %s from %u to %u Hz",
+		__get_str(dev_name), __get_str(state), __get_str(clk),
+		__entry->prev_state, __entry->curr_state)
+);
+
+DECLARE_EVENT_CLASS(ufshcd_profiling_template,
+	TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+		 int err),
+
+	TP_ARGS(dev_name, profile_info, time_us, err),
+
+	TP_STRUCT__entry(
+		__string(dev_name, dev_name)
+		__string(profile_info, profile_info)
+		__field(s64, time_us)
+		__field(int, err)
+	),
+
+	TP_fast_assign(
+		__assign_str(dev_name, dev_name);
+		__assign_str(profile_info, profile_info);
+		__entry->time_us = time_us;
+		__entry->err = err;
+	),
+
+	TP_printk("%s: %s: took %lld usecs, err %d",
+		__get_str(dev_name), __get_str(profile_info),
+		__entry->time_us, __entry->err)
+);
+
+DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_hibern8,
+	TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+		 int err),
+	TP_ARGS(dev_name, profile_info, time_us, err));
+
+DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_gating,
+	TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+		 int err),
+	TP_ARGS(dev_name, profile_info, time_us, err));
+
+DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_scaling,
+	TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+		 int err),
+	TP_ARGS(dev_name, profile_info, time_us, err));
+
+DECLARE_EVENT_CLASS(ufshcd_template,
+	TP_PROTO(const char *dev_name, int err, s64 usecs,
+		 int dev_state, int link_state),
+
+	TP_ARGS(dev_name, err, usecs, dev_state, link_state),
+
+	TP_STRUCT__entry(
+		__field(s64, usecs)
+		__field(int, err)
+		__string(dev_name, dev_name)
+		__field(int, dev_state)
+		__field(int, link_state)
+	),
+
+	TP_fast_assign(
+		__entry->usecs = usecs;
+		__entry->err = err;
+		__assign_str(dev_name, dev_name);
+		__entry->dev_state = dev_state;
+		__entry->link_state = link_state;
+	),
+
+	TP_printk(
+		"%s: took %lld usecs, dev_state: %s, link_state: %s, err %d",
+		__get_str(dev_name),
+		__entry->usecs,
+		__print_symbolic(__entry->dev_state,
+			{ UFS_ACTIVE_PWR_MODE, "ACTIVE" },
+			{ UFS_SLEEP_PWR_MODE, "SLEEP" },
+			{ UFS_POWERDOWN_PWR_MODE, "POWERDOWN" }),
+		__print_symbolic(__entry->link_state,
+			{ UIC_LINK_OFF_STATE, "LINK_OFF" },
+			{ UIC_LINK_ACTIVE_STATE, "LINK_ACTIVE" },
+			{ UIC_LINK_HIBERN8_STATE, "LINK_HIBERN8" }),
+		__entry->err
+	)
+);
+
+DEFINE_EVENT(ufshcd_template, ufshcd_system_suspend,
+	TP_PROTO(const char *dev_name, int err, s64 usecs,
+		int dev_state, int link_state),
+	TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+DEFINE_EVENT(ufshcd_template, ufshcd_system_resume,
+	TP_PROTO(const char *dev_name, int err, s64 usecs,
+		int dev_state, int link_state),
+	TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+DEFINE_EVENT(ufshcd_template, ufshcd_runtime_suspend,
+	TP_PROTO(const char *dev_name, int err, s64 usecs,
+		int dev_state, int link_state),
+	TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+DEFINE_EVENT(ufshcd_template, ufshcd_runtime_resume,
+	TP_PROTO(const char *dev_name, int err, s64 usecs,
+		int dev_state, int link_state),
+	TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+DEFINE_EVENT(ufshcd_template, ufshcd_init,
+	TP_PROTO(const char *dev_name, int err, s64 usecs,
+		int dev_state, int link_state),
+	TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+TRACE_EVENT(ufshcd_command,
+	TP_PROTO(const char *dev_name, const char *str, unsigned int tag,
+			u32 doorbell, int transfer_len, u32 intr, u64 lba,
+			u8 opcode),
+
+	TP_ARGS(dev_name, str, tag, doorbell, transfer_len, intr, lba, opcode),
+
+	TP_STRUCT__entry(
+		__string(dev_name, dev_name)
+		__string(str, str)
+		__field(unsigned int, tag)
+		__field(u32, doorbell)
+		__field(int, transfer_len)
+		__field(u32, intr)
+		__field(u64, lba)
+		__field(u8, opcode)
+	),
+
+	TP_fast_assign(
+		__assign_str(dev_name, dev_name);
+		__assign_str(str, str);
+		__entry->tag = tag;
+		__entry->doorbell = doorbell;
+		__entry->transfer_len = transfer_len;
+		__entry->intr = intr;
+		__entry->lba = lba;
+		__entry->opcode = opcode;
+	),
+
+	TP_printk(
+		"%s: %s: tag: %u, DB: 0x%x, size: %d, IS: %u, LBA: %llu, opcode: 0x%x",
+		__get_str(str), __get_str(dev_name), __entry->tag,
+		__entry->doorbell, __entry->transfer_len,
+		__entry->intr, __entry->lba, (u32)__entry->opcode
+	)
+);
+
+#endif /* if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/trace/trace_thermal.h	2019-01-22 16:16:28.527291902 +0100
@@ -0,0 +1,430 @@
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM thermal
+
+#if !defined(_TRACE_THERMAL_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_THERMAL_H
+
+#include <linux/tracepoint.h>
+
+#ifdef TRACE_MSM_LMH
+DECLARE_EVENT_CLASS(msm_lmh_print_sensor_reading,
+
+	TP_PROTO(const char *sensor_name, unsigned int intensity),
+
+	TP_ARGS(
+		sensor_name, intensity
+	),
+
+	TP_STRUCT__entry(
+		__string(_name, sensor_name)
+		__field(unsigned int, reading)
+	),
+
+	TP_fast_assign(
+		__assign_str(_name, sensor_name);
+		__entry->reading = intensity;
+	),
+
+	TP_printk(
+		"Sensor:[%s] throttling intensity:%u", __get_str(_name),
+		__entry->reading
+	)
+);
+
+DECLARE_EVENT_CLASS(msm_lmh_print_event,
+
+	TP_PROTO(const char *event_name),
+
+	TP_ARGS(
+		event_name
+	),
+
+	TP_STRUCT__entry(
+		__string(_name,	event_name)
+	),
+
+	TP_fast_assign(
+		__assign_str(_name, event_name);
+	),
+
+	TP_printk(
+		"Event:[%s]", __get_str(_name)
+	)
+);
+
+DEFINE_EVENT(msm_lmh_print_sensor_reading, lmh_sensor_interrupt,
+
+	TP_PROTO(const char *sensor_name, unsigned int intensity),
+
+	TP_ARGS(sensor_name, intensity)
+);
+
+DEFINE_EVENT(msm_lmh_print_sensor_reading, lmh_sensor_reading,
+
+	TP_PROTO(const char *sensor_name, unsigned int intensity),
+
+	TP_ARGS(sensor_name, intensity)
+);
+
+DEFINE_EVENT(msm_lmh_print_event, lmh_event_call,
+
+	TP_PROTO(const char *event_name),
+
+	TP_ARGS(event_name)
+);
+
+TRACE_EVENT(lmh_debug_data,
+	TP_PROTO(const char *pre_data, uint32_t *data_buf, uint32_t buffer_len),
+
+	TP_ARGS(
+		pre_data, data_buf, buffer_len
+	),
+
+	TP_STRUCT__entry(
+		__string(_data, pre_data)
+		__field(u32, _buffer_len)
+		__dynamic_array(u32, _buffer, buffer_len)
+	),
+
+	TP_fast_assign(
+		__assign_str(_data, pre_data);
+		__entry->_buffer_len = buffer_len * sizeof(uint32_t);
+		memcpy(__get_dynamic_array(_buffer), data_buf,
+			buffer_len * sizeof(uint32_t));
+	),
+
+	TP_printk("%s:\t %s",
+		__get_str(_data), __print_hex(__get_dynamic_array(_buffer),
+			__entry->_buffer_len)
+	)
+);
+
+#elif defined(LMH_DCVS_TRACE)
+DECLARE_EVENT_CLASS(msm_lmh_dcvs_print,
+
+	TP_PROTO(int cpu, long max_freq),
+
+	TP_ARGS(
+		cpu, max_freq
+	),
+
+	TP_STRUCT__entry(
+		__field(int, cpu)
+		__field(long, max_freq)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->max_freq = max_freq;
+	),
+
+	TP_printk(
+		"cpu:%d max frequency:%ld",
+		__entry->cpu, __entry->max_freq
+	)
+);
+
+DEFINE_EVENT(msm_lmh_dcvs_print, lmh_dcvs_freq,
+
+	TP_PROTO(int cpu, long max_freq),
+
+	TP_ARGS(cpu, max_freq)
+);
+
+#elif defined(TRACE_MSM_THERMAL)
+
+DECLARE_EVENT_CLASS(msm_thermal_post_core_ctl,
+
+	TP_PROTO(unsigned int cpu, unsigned int online),
+
+	TP_ARGS(cpu, online),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, cpu)
+		__field(unsigned int, online)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->online = online;
+	),
+
+	TP_printk("device=cpu%u online=%u",
+		 __entry->cpu,  __entry->online)
+);
+DECLARE_EVENT_CLASS(msm_thermal_pre_core_ctl,
+
+	TP_PROTO(unsigned int cpu),
+
+	TP_ARGS(cpu),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, cpu)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+	),
+
+	TP_printk("device=cpu%u", __entry->cpu)
+);
+
+DEFINE_EVENT(msm_thermal_pre_core_ctl, thermal_pre_core_offline,
+
+	TP_PROTO(unsigned int cpu),
+
+	TP_ARGS(cpu)
+);
+
+DEFINE_EVENT(msm_thermal_post_core_ctl, thermal_post_core_offline,
+
+	TP_PROTO(unsigned int cpu, unsigned int online),
+
+	TP_ARGS(cpu, online)
+);
+
+DEFINE_EVENT(msm_thermal_pre_core_ctl, thermal_pre_core_online,
+
+	TP_PROTO(unsigned int cpu),
+
+	TP_ARGS(cpu)
+);
+
+DEFINE_EVENT(msm_thermal_post_core_ctl, thermal_post_core_online,
+
+	TP_PROTO(unsigned int cpu, unsigned int online),
+
+	TP_ARGS(cpu, online)
+);
+
+DECLARE_EVENT_CLASS(msm_thermal_freq_mit,
+
+	TP_PROTO(unsigned int cpu, unsigned int max_freq,
+		unsigned int min_freq),
+
+	TP_ARGS(cpu, max_freq, min_freq),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, cpu)
+		__field(unsigned int, max_freq)
+		__field(unsigned int, min_freq)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->max_freq = max_freq;
+		__entry->min_freq = min_freq;
+	),
+
+	TP_printk("device=cpu%u max_frequency=%u min_frequency=%u",
+			 __entry->cpu, __entry->max_freq,
+			 __entry->min_freq)
+);
+
+DEFINE_EVENT(msm_thermal_freq_mit, thermal_pre_frequency_mit,
+
+	TP_PROTO(unsigned int cpu, unsigned int max_freq,
+		unsigned int min_freq),
+
+	TP_ARGS(cpu, max_freq, min_freq)
+);
+
+DEFINE_EVENT(msm_thermal_freq_mit, thermal_post_frequency_mit,
+
+	TP_PROTO(unsigned int cpu, unsigned int max_freq,
+		unsigned int min_freq),
+
+	TP_ARGS(cpu, max_freq, min_freq)
+);
+
+#elif defined(_BCL_SW_TRACE) || defined(_BCL_HW_TRACE)
+
+DECLARE_EVENT_CLASS(msm_bcl_print_reading,
+
+	TP_PROTO(const char *sensor_name, long value),
+
+	TP_ARGS(
+		sensor_name, value
+	),
+
+	TP_STRUCT__entry(
+		__string(_name, sensor_name)
+		__field(long, reading)
+	),
+
+	TP_fast_assign(
+		__assign_str(_name, sensor_name);
+		__entry->reading = value;
+	),
+
+	TP_printk(
+		"%s:[%ld]", __get_str(_name), __entry->reading
+	)
+);
+
+DECLARE_EVENT_CLASS(msm_bcl_print_event,
+
+	TP_PROTO(const char *event_name),
+
+	TP_ARGS(
+		event_name
+	),
+
+	TP_STRUCT__entry(
+		__string(_name,	event_name)
+	),
+
+	TP_fast_assign(
+		__assign_str(_name, event_name);
+	),
+
+	TP_printk(
+		"Event:[%s]", __get_str(_name)
+	)
+);
+
+#ifdef _BCL_HW_TRACE
+DECLARE_EVENT_CLASS(msm_bcl_print_reg,
+
+	TP_PROTO(const char *sensor_name, unsigned int address,
+			unsigned int value),
+
+	TP_ARGS(
+		sensor_name, address, value
+	),
+
+	TP_STRUCT__entry(
+		__string(_name, sensor_name)
+		__field(unsigned int, _address)
+		__field(unsigned int, _value)
+	),
+
+	TP_fast_assign(
+		__assign_str(_name, sensor_name);
+		__entry->_address = address;
+		__entry->_value = value;
+	),
+
+	TP_printk(
+		"%s: address 0x%x: data 0x%02x", __get_str(_name),
+		__entry->_address, __entry->_value
+	)
+);
+
+DEFINE_EVENT(msm_bcl_print_reading, bcl_hw_sensor_reading,
+
+	TP_PROTO(const char *sensor_name, long intensity),
+
+	TP_ARGS(sensor_name, intensity)
+);
+
+DEFINE_EVENT(msm_bcl_print_reg, bcl_hw_reg_access,
+
+	TP_PROTO(const char *op_name, unsigned int address, unsigned int value),
+
+	TP_ARGS(op_name, address, value)
+);
+
+DEFINE_EVENT(msm_bcl_print_reading, bcl_hw_mitigation,
+
+	TP_PROTO(const char *sensor_name, long intensity),
+
+	TP_ARGS(sensor_name, intensity)
+);
+
+DEFINE_EVENT(msm_bcl_print_event, bcl_hw_mitigation_event,
+
+	TP_PROTO(const char *event_name),
+
+	TP_ARGS(event_name)
+);
+
+DEFINE_EVENT(msm_bcl_print_reading, bcl_hw_state_event,
+
+	TP_PROTO(const char *sensor_name, long intensity),
+
+	TP_ARGS(sensor_name, intensity)
+);
+
+DEFINE_EVENT(msm_bcl_print_event, bcl_hw_event,
+
+	TP_PROTO(const char *event_name),
+
+	TP_ARGS(event_name)
+);
+#elif defined(_BCL_SW_TRACE)
+DEFINE_EVENT(msm_bcl_print_reading, bcl_sw_mitigation,
+
+	TP_PROTO(const char *sensor_name, long intensity),
+
+	TP_ARGS(sensor_name, intensity)
+);
+
+DEFINE_EVENT(msm_bcl_print_event, bcl_sw_mitigation_event,
+
+	TP_PROTO(const char *event_name),
+
+	TP_ARGS(event_name)
+);
+#endif /* _BCL_HW_TRACE */
+#else
+DECLARE_EVENT_CLASS(tsens,
+
+	TP_PROTO(unsigned long temp, unsigned int sensor),
+
+	TP_ARGS(temp, sensor),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, temp)
+		__field(unsigned int, sensor)
+	),
+
+	TP_fast_assign(
+		__entry->temp = temp;
+		__entry->sensor = sensor;
+	),
+
+	TP_printk("temp=%lu sensor=tsens_tz_sensor%u",
+				__entry->temp, __entry->sensor)
+);
+
+DEFINE_EVENT(tsens, tsens_read,
+
+	TP_PROTO(unsigned long temp, unsigned int sensor),
+
+	TP_ARGS(temp, sensor)
+);
+
+DEFINE_EVENT(tsens, tsens_threshold_hit,
+
+	TP_PROTO(unsigned long temp, unsigned int sensor),
+
+	TP_ARGS(temp, sensor)
+);
+
+DEFINE_EVENT(tsens, tsens_threshold_clear,
+
+	TP_PROTO(unsigned long temp, unsigned int sensor),
+
+	TP_ARGS(temp, sensor)
+);
+#endif
+#endif
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_thermal
+#include <trace/define_trace.h>
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/drm/msm_drm_pp.h	2019-01-22 16:16:28.531291938 +0100
@@ -0,0 +1,82 @@
+#ifndef _MSM_DRM_PP_H_
+#define _MSM_DRM_PP_H_
+
+#include <drm/drm.h>
+
+/**
+ * struct drm_msm_pcc_coeff - PCC coefficient structure for each color
+ *                            component.
+ * @c: constant coefficient.
+ * @r: red coefficient.
+ * @g: green coefficient.
+ * @b: blue coefficient.
+ * @rg: red green coefficient.
+ * @gb: green blue coefficient.
+ * @rb: red blue coefficient.
+ * @rgb: red blue green coefficient.
+ */
+
+struct drm_msm_pcc_coeff {
+	__u32 c;
+	__u32 r;
+	__u32 g;
+	__u32 b;
+	__u32 rg;
+	__u32 gb;
+	__u32 rb;
+	__u32 rgb;
+};
+
+/**
+ * struct drm_msm_pcc - pcc feature structure
+ * flags: for customizing operations
+ * r: red coefficients.
+ * g: green coefficients.
+ * b: blue coefficients.
+ */
+
+struct drm_msm_pcc {
+	__u64 flags;
+	struct drm_msm_pcc_coeff r;
+	struct drm_msm_pcc_coeff g;
+	struct drm_msm_pcc_coeff b;
+};
+
+/* struct drm_msm_pa_vlut - picture adjustment vLUT structure
+ * flags: for customizing vlut operation
+ * val: vLUT values
+ */
+#define PA_VLUT_SIZE 256
+struct drm_msm_pa_vlut {
+	__u64 flags;
+	__u32 val[PA_VLUT_SIZE];
+};
+
+/* struct drm_msm_memcol - Memory color feature strucuture.
+ *                         Skin, sky, foliage features are supported.
+ * @prot_flags: Bit mask for enabling protection feature.
+ * @color_adjust_p0: Adjustment curve.
+ * @color_adjust_p1: Adjustment curve.
+ * @color_adjust_p2: Adjustment curve.
+ * @blend_gain: Blend gain weightage from othe PA features.
+ * @sat_hold: Saturation hold value.
+ * @val_hold: Value hold info.
+ * @hue_region: Hue qualifier.
+ * @sat_region: Saturation qualifier.
+ * @val_region: Value qualifier.
+ */
+#define DRM_MSM_MEMCOL
+struct drm_msm_memcol {
+	__u64 prot_flags;
+	__u32 color_adjust_p0;
+	__u32 color_adjust_p1;
+	__u32 color_adjust_p2;
+	__u32 blend_gain;
+	__u32 sat_hold;
+	__u32 val_hold;
+	__u32 hue_region;
+	__u32 sat_region;
+	__u32 val_region;
+};
+
+#endif /* _MSM_DRM_PP_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/drm/sde_drm.h	2019-01-22 16:16:28.535291974 +0100
@@ -0,0 +1,350 @@
+#ifndef _SDE_DRM_H_
+#define _SDE_DRM_H_
+
+/* Total number of supported color planes */
+#define SDE_MAX_PLANES  4
+
+/* Total number of parameterized detail enhancer mapping curves */
+#define SDE_MAX_DE_CURVES 3
+
+ /* Y/RGB and UV filter configuration */
+#define FILTER_EDGE_DIRECTED_2D		0x0
+#define FILTER_CIRCULAR_2D		0x1
+#define FILTER_SEPARABLE_1D		0x2
+#define FILTER_BILINEAR			0x3
+
+/* Alpha filters */
+#define FILTER_ALPHA_DROP_REPEAT	0x0
+#define FILTER_ALPHA_BILINEAR		0x1
+#define FILTER_ALPHA_2D			0x3
+
+/* Blend filters */
+#define FILTER_BLEND_CIRCULAR_2D	0x0
+#define FILTER_BLEND_SEPARABLE_1D	0x1
+
+/* LUT configuration flags */
+#define SCALER_LUT_SWAP			0x1
+#define SCALER_LUT_DIR_WR		0x2
+#define SCALER_LUT_Y_CIR_WR		0x4
+#define SCALER_LUT_UV_CIR_WR		0x8
+#define SCALER_LUT_Y_SEP_WR		0x10
+#define SCALER_LUT_UV_SEP_WR		0x20
+
+/**
+ * Blend operations for "blend_op" property
+ *
+ * @SDE_DRM_BLEND_OP_NOT_DEFINED:   No blend operation defined for the layer.
+ * @SDE_DRM_BLEND_OP_OPAQUE:        Apply a constant blend operation. The layer
+ *                                  would appear opaque in case fg plane alpha
+ *                                  is 0xff.
+ * @SDE_DRM_BLEND_OP_PREMULTIPLIED: Apply source over blend rule. Layer already
+ *                                  has alpha pre-multiplication done. If the fg
+ *                                  plane alpha is less than 0xff, apply
+ *                                  modulation as well. This operation is
+ *                                  intended on layers having alpha channel.
+ * @SDE_DRM_BLEND_OP_COVERAGE:      Apply source over blend rule. Layer is not
+ *                                  alpha pre-multiplied. Apply
+ *                                  pre-multiplication. If fg plane alpha is
+ *                                  less than 0xff, apply modulation as well.
+ * @SDE_DRM_BLEND_OP_MAX:           Used to track maximum blend operation
+ *                                  possible by mdp.
+ */
+#define SDE_DRM_BLEND_OP_NOT_DEFINED    0
+#define SDE_DRM_BLEND_OP_OPAQUE         1
+#define SDE_DRM_BLEND_OP_PREMULTIPLIED  2
+#define SDE_DRM_BLEND_OP_COVERAGE       3
+#define SDE_DRM_BLEND_OP_MAX            4
+
+/**
+ * Bit masks for "src_config" property
+ * construct bitmask via (1UL << SDE_DRM_<flag>)
+ */
+#define SDE_DRM_DEINTERLACE         0   /* Specifies interlaced input */
+
+/* DRM bitmasks are restricted to 0..63 */
+#define SDE_DRM_BITMASK_COUNT       64
+
+/**
+ * Framebuffer modes for "fb_translation_mode" PLANE property
+ *
+ * @SDE_DRM_FB_NON_SEC:          IOMMU configuration for this framebuffer mode
+ *                               is non-secure domain and requires
+ *                               both stage I and stage II translations when
+ *                               this buffer is accessed by the display HW.
+ *                               This is the default mode of all frambuffers.
+ * @SDE_DRM_FB_SEC:              IOMMU configuration for this framebuffer mode
+ *                               is secure domain and requires
+ *                               both stage I and stage II translations when
+ *                               this buffer is accessed by the display HW.
+ * @SDE_DRM_FB_NON_SEC_DIR_TRANS: IOMMU configuration for this framebuffer mode
+ *                               is non-secure domain and requires
+ *                               only stage II translation when
+ *                               this buffer is accessed by the display HW.
+ * @SDE_DRM_FB_SEC_DIR_TRANS:    IOMMU configuration for this framebuffer mode
+ *                               is secure domain and requires
+ *                               only stage II translation when
+ *                               this buffer is accessed by the display HW.
+*/
+
+#define SDE_DRM_FB_NON_SEC              0
+#define SDE_DRM_FB_SEC                  1
+#define SDE_DRM_FB_NON_SEC_DIR_TRANS    2
+#define SDE_DRM_FB_SEC_DIR_TRANS        3
+
+/**
+ * Secure levels for "security_level" CRTC property.
+ *                        CRTC property which specifies what plane types
+ *                        can be attached to this CRTC. Plane component
+ *                        derives the plane type based on the FB_MODE.
+ * @ SDE_DRM_SEC_NON_SEC: Both Secure and non-secure plane types can be
+ *                        attached to this CRTC. This is the default state of
+ *                        the CRTC.
+ * @ SDE_DRM_SEC_ONLY:    Only secure planes can be added to this CRTC. If a
+ *                        CRTC is instructed to be in this mode it follows the
+ *                        platform dependent restrictions.
+ */
+#define SDE_DRM_SEC_NON_SEC            0
+#define SDE_DRM_SEC_ONLY               1
+
+/**
+ * struct sde_drm_pix_ext_v1 - version 1 of pixel ext structure
+ * @num_ext_pxls_lr: Number of total horizontal pixels
+ * @num_ext_pxls_tb: Number of total vertical lines
+ * @left_ftch:       Number of extra pixels to overfetch from left
+ * @right_ftch:      Number of extra pixels to overfetch from right
+ * @top_ftch:        Number of extra lines to overfetch from top
+ * @btm_ftch:        Number of extra lines to overfetch from bottom
+ * @left_rpt:        Number of extra pixels to repeat from left
+ * @right_rpt:       Number of extra pixels to repeat from right
+ * @top_rpt:         Number of extra lines to repeat from top
+ * @btm_rpt:         Number of extra lines to repeat from bottom
+ */
+struct sde_drm_pix_ext_v1 {
+	/*
+	 * Number of pixels ext in left, right, top and bottom direction
+	 * for all color components.
+	 */
+	int32_t num_ext_pxls_lr[SDE_MAX_PLANES];
+	int32_t num_ext_pxls_tb[SDE_MAX_PLANES];
+
+	/*
+	 * Number of pixels needs to be overfetched in left, right, top
+	 * and bottom directions from source image for scaling.
+	 */
+	int32_t left_ftch[SDE_MAX_PLANES];
+	int32_t right_ftch[SDE_MAX_PLANES];
+	int32_t top_ftch[SDE_MAX_PLANES];
+	int32_t btm_ftch[SDE_MAX_PLANES];
+	/*
+	 * Number of pixels needs to be repeated in left, right, top and
+	 * bottom directions for scaling.
+	 */
+	int32_t left_rpt[SDE_MAX_PLANES];
+	int32_t right_rpt[SDE_MAX_PLANES];
+	int32_t top_rpt[SDE_MAX_PLANES];
+	int32_t btm_rpt[SDE_MAX_PLANES];
+
+};
+
+/**
+ * struct sde_drm_scaler_v1 - version 1 of struct sde_drm_scaler
+ * @lr:            Pixel extension settings for left/right
+ * @tb:            Pixel extension settings for top/botton
+ * @init_phase_x:  Initial scaler phase values for x
+ * @phase_step_x:  Phase step values for x
+ * @init_phase_y:  Initial scaler phase values for y
+ * @phase_step_y:  Phase step values for y
+ * @horz_filter:   Horizontal filter array
+ * @vert_filter:   Vertical filter array
+ */
+struct sde_drm_scaler_v1 {
+	/*
+	 * Pix ext settings
+	 */
+	struct sde_drm_pix_ext_v1 pe;
+	/*
+	 * Phase settings
+	 */
+	int32_t init_phase_x[SDE_MAX_PLANES];
+	int32_t phase_step_x[SDE_MAX_PLANES];
+	int32_t init_phase_y[SDE_MAX_PLANES];
+	int32_t phase_step_y[SDE_MAX_PLANES];
+
+	/*
+	 * Filter type to be used for scaling in horizontal and vertical
+	 * directions
+	 */
+	uint32_t horz_filter[SDE_MAX_PLANES];
+	uint32_t vert_filter[SDE_MAX_PLANES];
+};
+
+/**
+ * struct sde_drm_de_v1 - version 1 of detail enhancer structure
+ * @enable:         Enables/disables detail enhancer
+ * @sharpen_level1: Sharpening strength for noise
+ * @sharpen_level2: Sharpening strength for context
+ * @clip:           Clip coefficient
+ * @limit:          Detail enhancer limit factor
+ * @thr_quiet:      Quite zone threshold
+ * @thr_dieout:     Die-out zone threshold
+ * @thr_low:        Linear zone left threshold
+ * @thr_high:       Linear zone right threshold
+ * @prec_shift:     Detail enhancer precision
+ * @adjust_a:       Mapping curves A coefficients
+ * @adjust_b:       Mapping curves B coefficients
+ * @adjust_c:       Mapping curves C coefficients
+ */
+struct sde_drm_de_v1 {
+	uint32_t enable;
+	int16_t sharpen_level1;
+	int16_t sharpen_level2;
+	uint16_t clip;
+	uint16_t limit;
+	uint16_t thr_quiet;
+	uint16_t thr_dieout;
+	uint16_t thr_low;
+	uint16_t thr_high;
+	uint16_t prec_shift;
+	int16_t adjust_a[SDE_MAX_DE_CURVES];
+	int16_t adjust_b[SDE_MAX_DE_CURVES];
+	int16_t adjust_c[SDE_MAX_DE_CURVES];
+};
+
+/**
+ * struct sde_drm_scaler_v2 - version 2 of struct sde_drm_scaler
+ * @enable:            Scaler enable
+ * @dir_en:            Detail enhancer enable
+ * @pe:                Pixel extension settings
+ * @horz_decimate:     Horizontal decimation factor
+ * @vert_decimate:     Vertical decimation factor
+ * @init_phase_x:      Initial scaler phase values for x
+ * @phase_step_x:      Phase step values for x
+ * @init_phase_y:      Initial scaler phase values for y
+ * @phase_step_y:      Phase step values for y
+ * @preload_x:         Horizontal preload value
+ * @preload_y:         Vertical preload value
+ * @src_width:         Source width
+ * @src_height:        Source height
+ * @dst_width:         Destination width
+ * @dst_height:        Destination height
+ * @y_rgb_filter_cfg:  Y/RGB plane filter configuration
+ * @uv_filter_cfg:     UV plane filter configuration
+ * @alpha_filter_cfg:  Alpha filter configuration
+ * @blend_cfg:         Selection of blend coefficients
+ * @lut_flag:          LUT configuration flags
+ * @dir_lut_idx:       2d 4x4 LUT index
+ * @y_rgb_cir_lut_idx: Y/RGB circular LUT index
+ * @uv_cir_lut_idx:    UV circular LUT index
+ * @y_rgb_sep_lut_idx: Y/RGB separable LUT index
+ * @uv_sep_lut_idx:    UV separable LUT index
+ * @de:                Detail enhancer settings
+*/
+struct sde_drm_scaler_v2 {
+	/*
+	 * General definitions
+	 */
+	uint32_t enable;
+	uint32_t dir_en;
+
+	/*
+	 * Pix ext settings
+	 */
+	struct sde_drm_pix_ext_v1 pe;
+
+	/*
+	 * Decimation settings
+	 */
+	uint32_t horz_decimate;
+	uint32_t vert_decimate;
+
+	/*
+	 * Phase settings
+	 */
+	int32_t init_phase_x[SDE_MAX_PLANES];
+	int32_t phase_step_x[SDE_MAX_PLANES];
+	int32_t init_phase_y[SDE_MAX_PLANES];
+	int32_t phase_step_y[SDE_MAX_PLANES];
+
+	uint32_t preload_x[SDE_MAX_PLANES];
+	uint32_t preload_y[SDE_MAX_PLANES];
+	uint32_t src_width[SDE_MAX_PLANES];
+	uint32_t src_height[SDE_MAX_PLANES];
+
+	uint32_t dst_width;
+	uint32_t dst_height;
+
+	uint32_t y_rgb_filter_cfg;
+	uint32_t uv_filter_cfg;
+	uint32_t alpha_filter_cfg;
+	uint32_t blend_cfg;
+
+	uint32_t lut_flag;
+	uint32_t dir_lut_idx;
+
+	/* for Y(RGB) and UV planes*/
+	uint32_t y_rgb_cir_lut_idx;
+	uint32_t uv_cir_lut_idx;
+	uint32_t y_rgb_sep_lut_idx;
+	uint32_t uv_sep_lut_idx;
+
+	/*
+	 * Detail enhancer settings
+	 */
+	struct sde_drm_de_v1 de;
+};
+
+
+/*
+ * Define constants for struct sde_drm_csc
+ */
+#define SDE_CSC_MATRIX_COEFF_SIZE   9
+#define SDE_CSC_CLAMP_SIZE          6
+#define SDE_CSC_BIAS_SIZE           3
+
+/**
+ * struct sde_drm_csc_v1 - version 1 of struct sde_drm_csc
+ * @ctm_coeff:          Matrix coefficients, in S31.32 format
+ * @pre_bias:           Pre-bias array values
+ * @post_bias:          Post-bias array values
+ * @pre_clamp:          Pre-clamp array values
+ * @post_clamp:         Post-clamp array values
+ */
+struct sde_drm_csc_v1 {
+	int64_t ctm_coeff[SDE_CSC_MATRIX_COEFF_SIZE];
+	uint32_t pre_bias[SDE_CSC_BIAS_SIZE];
+	uint32_t post_bias[SDE_CSC_BIAS_SIZE];
+	uint32_t pre_clamp[SDE_CSC_CLAMP_SIZE];
+	uint32_t post_clamp[SDE_CSC_CLAMP_SIZE];
+};
+
+/* Writeback Config version definition */
+#define SDE_DRM_WB_CFG		0x1
+
+/* SDE_DRM_WB_CONFIG_FLAGS - Writeback configuration flags */
+#define SDE_DRM_WB_CFG_FLAGS_CONNECTED	(1<<0)
+
+/**
+ * struct sde_drm_wb_cfg - Writeback configuration structure
+ * @flags:		see DRM_MSM_WB_CONFIG_FLAGS
+ * @connector_id:	writeback connector identifier
+ * @count_modes:	Count of modes in modes_ptr
+ * @modes:		Pointer to struct drm_mode_modeinfo
+ */
+struct sde_drm_wb_cfg {
+	uint32_t flags;
+	uint32_t connector_id;
+	uint32_t count_modes;
+	uint64_t modes;
+};
+
+/**
+ * Define extended power modes supported by the SDE connectors.
+ */
+#define SDE_MODE_DPMS_ON       0
+#define SDE_MODE_DPMS_LP1      1
+#define SDE_MODE_DPMS_LP2      2
+#define SDE_MODE_DPMS_STANDBY  3
+#define SDE_MODE_DPMS_SUSPEND  4
+#define SDE_MODE_DPMS_OFF      5
+
+#endif /* _SDE_DRM_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/android_pmem.h	2019-01-22 16:16:28.535291974 +0100
@@ -0,0 +1,169 @@
+/* include/linux/android_pmem.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ANDROID_PMEM_H_
+#define _ANDROID_PMEM_H_
+
+#include <linux/fs.h>
+
+#define PMEM_KERNEL_TEST_MAGIC 0xc0
+#define PMEM_KERNEL_TEST_NOMINAL_TEST_IOCTL \
+	_IO(PMEM_KERNEL_TEST_MAGIC, 1)
+#define PMEM_KERNEL_TEST_ADVERSARIAL_TEST_IOCTL \
+	_IO(PMEM_KERNEL_TEST_MAGIC, 2)
+#define PMEM_KERNEL_TEST_HUGE_ALLOCATION_TEST_IOCTL \
+	_IO(PMEM_KERNEL_TEST_MAGIC, 3)
+#define PMEM_KERNEL_TEST_FREE_UNALLOCATED_TEST_IOCTL \
+	_IO(PMEM_KERNEL_TEST_MAGIC, 4)
+#define PMEM_KERNEL_TEST_LARGE_REGION_NUMBER_TEST_IOCTL \
+	_IO(PMEM_KERNEL_TEST_MAGIC, 5)
+
+#define PMEM_IOCTL_MAGIC 'p'
+#define PMEM_GET_PHYS		_IOW(PMEM_IOCTL_MAGIC, 1, unsigned int)
+#define PMEM_MAP		_IOW(PMEM_IOCTL_MAGIC, 2, unsigned int)
+#define PMEM_GET_SIZE		_IOW(PMEM_IOCTL_MAGIC, 3, unsigned int)
+#define PMEM_UNMAP		_IOW(PMEM_IOCTL_MAGIC, 4, unsigned int)
+/* This ioctl will allocate pmem space, backing the file, it will fail
+ * if the file already has an allocation, pass it the len as the argument
+ * to the ioctl */
+#define PMEM_ALLOCATE		_IOW(PMEM_IOCTL_MAGIC, 5, unsigned int)
+/* This will connect a one pmem file to another, pass the file that is already
+ * backed in memory as the argument to the ioctl
+ */
+#define PMEM_CONNECT		_IOW(PMEM_IOCTL_MAGIC, 6, unsigned int)
+/* Returns the total size of the pmem region it is sent to as a pmem_region
+ * struct (with offset set to 0).
+ */
+#define PMEM_GET_TOTAL_SIZE	_IOW(PMEM_IOCTL_MAGIC, 7, unsigned int)
+/* Revokes gpu registers and resets the gpu.  Pass a pointer to the
+ * start of the mapped gpu regs (the vaddr returned by mmap) as the argument.
+ */
+#define HW3D_REVOKE_GPU		_IOW(PMEM_IOCTL_MAGIC, 8, unsigned int)
+#define HW3D_GRANT_GPU		_IOW(PMEM_IOCTL_MAGIC, 9, unsigned int)
+#define HW3D_WAIT_FOR_INTERRUPT	_IOW(PMEM_IOCTL_MAGIC, 10, unsigned int)
+
+#define PMEM_CLEAN_INV_CACHES	_IOW(PMEM_IOCTL_MAGIC, 11, unsigned int)
+#define PMEM_CLEAN_CACHES	_IOW(PMEM_IOCTL_MAGIC, 12, unsigned int)
+#define PMEM_INV_CACHES		_IOW(PMEM_IOCTL_MAGIC, 13, unsigned int)
+
+#define PMEM_GET_FREE_SPACE	_IOW(PMEM_IOCTL_MAGIC, 14, unsigned int)
+#define PMEM_ALLOCATE_ALIGNED	_IOW(PMEM_IOCTL_MAGIC, 15, unsigned int)
+struct pmem_region {
+	unsigned long offset;
+	unsigned long len;
+};
+
+struct pmem_addr {
+	unsigned long vaddr;
+	unsigned long offset;
+	unsigned long length;
+};
+
+struct pmem_freespace {
+	unsigned long total;
+	unsigned long largest;
+};
+
+struct pmem_allocation {
+	unsigned long size;
+	unsigned int align;
+};
+
+#ifdef __KERNEL__
+int get_pmem_file(unsigned int fd, unsigned long *start, unsigned long *vstart,
+		  unsigned long *end, struct file **filp);
+int get_pmem_fd(int fd, unsigned long *start, unsigned long *end);
+int get_pmem_user_addr(struct file *file, unsigned long *start,
+		       unsigned long *end);
+void put_pmem_file(struct file* file);
+void put_pmem_fd(int fd);
+void flush_pmem_fd(int fd, unsigned long start, unsigned long len);
+void flush_pmem_file(struct file *file, unsigned long start, unsigned long len);
+int pmem_cache_maint(struct file *file, unsigned int cmd,
+		struct pmem_addr *pmem_addr);
+
+enum pmem_allocator_type {
+	/* Zero is a default in platform PMEM structures in the board files,
+	 * when the "allocator_type" structure element is not explicitly
+	 * defined
+	 */
+	PMEM_ALLOCATORTYPE_BITMAP = 0, /* forced to be zero here */
+	PMEM_ALLOCATORTYPE_SYSTEM,
+
+	PMEM_ALLOCATORTYPE_ALLORNOTHING,
+	PMEM_ALLOCATORTYPE_BUDDYBESTFIT,
+
+	PMEM_ALLOCATORTYPE_MAX,
+};
+
+/* kernel api names for board specific data structures */
+#define PMEM_KERNEL_EBI1_DATA_NAME "pmem_kernel_ebi1"
+#define PMEM_KERNEL_SMI_DATA_NAME "pmem_kernel_smi"
+
+struct android_pmem_platform_data
+{
+	const char* name;
+	/* size of memory region */
+	unsigned long size;
+
+	enum pmem_allocator_type allocator_type;
+	/* treated as a 'hidden' variable in the board files. Can be
+	 * set, but default is the system init value of 0 which becomes a
+	 * quantum of 4K pages.
+	 */
+	unsigned int quantum;
+
+	/* set to indicate maps of this region should be cached, if a mix of
+	 * cached and uncached is desired, set this and open the device with
+	 * O_SYNC to get an uncached region */
+	unsigned cached;
+	/* The MSM7k has bits to enable a write buffer in the bus controller*/
+	unsigned buffered;
+	/* which memory type (i.e. SMI, EBI1) this PMEM device is backed by */
+	unsigned memory_type;
+	/*
+	 * function to be called when the number of allocations goes from
+	 * 0 -> 1
+	 */
+	int (*request_region)(void *);
+	/*
+	 * function to be called when the number of allocations goes from
+	 * 1 -> 0
+	 */
+	int (*release_region)(void *);
+	/*
+	 * function to be called upon pmem registration
+	 */
+	void *(*setup_region)(void);
+	/*
+	 * indicates that this region should be mapped/unmaped as needed
+	 */
+	int map_on_demand;
+	/*
+	 * indicates this pmem may be reused via fmem
+	 */
+	int reusable;
+};
+
+int pmem_setup(struct android_pmem_platform_data *pdata,
+	       long (*ioctl)(struct file *, unsigned int, unsigned long),
+	       int (*release)(struct inode *, struct file *));
+
+int pmem_remap(struct pmem_region *region, struct file *file,
+	       unsigned operation);
+#endif /* __KERNEL__ */
+
+#endif //_ANDROID_PPP_H_
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/ashmem.h	2019-01-22 16:16:26.719275529 +0100
@@ -0,0 +1,48 @@
+/*
+ * drivers/staging/android/uapi/ashmem.h
+ *
+ * Copyright 2008 Google Inc.
+ * Author: Robert Love
+ *
+ * This file is dual licensed.  It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#ifndef _UAPI_LINUX_ASHMEM_H
+#define _UAPI_LINUX_ASHMEM_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define ASHMEM_NAME_LEN		256
+
+#define ASHMEM_NAME_DEF		"dev/ashmem"
+
+/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
+#define ASHMEM_NOT_PURGED	0
+#define ASHMEM_WAS_PURGED	1
+
+/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
+#define ASHMEM_IS_UNPINNED	0
+#define ASHMEM_IS_PINNED	1
+
+struct ashmem_pin {
+	__u32 offset;	/* offset into region, in bytes, page-aligned */
+	__u32 len;	/* length forward from offset, in bytes, page-aligned */
+};
+
+#define __ASHMEMIOC		0x77
+
+#define ASHMEM_SET_NAME		_IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
+#define ASHMEM_GET_NAME		_IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
+#define ASHMEM_SET_SIZE		_IOW(__ASHMEMIOC, 3, size_t)
+#define ASHMEM_GET_SIZE		_IO(__ASHMEMIOC, 4)
+#define ASHMEM_SET_PROT_MASK	_IOW(__ASHMEMIOC, 5, unsigned long)
+#define ASHMEM_GET_PROT_MASK	_IO(__ASHMEMIOC, 6)
+#define ASHMEM_PIN		_IOW(__ASHMEMIOC, 7, struct ashmem_pin)
+#define ASHMEM_UNPIN		_IOW(__ASHMEMIOC, 8, struct ashmem_pin)
+#define ASHMEM_GET_PIN_STATUS	_IO(__ASHMEMIOC, 9)
+#define ASHMEM_PURGE_ALL_CACHES	_IO(__ASHMEMIOC, 10)
+
+#endif	/* _UAPI_LINUX_ASHMEM_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/avtimer.h	2019-01-22 16:16:28.539292010 +0100
@@ -0,0 +1,10 @@
+#ifndef _UAPI_AVTIMER_H
+#define _UAPI_AVTIMER_H
+
+#include <linux/ioctl.h>
+
+#define MAJOR_NUM 100
+
+#define IOCTL_GET_AVTIMER_TICK _IOR(MAJOR_NUM, 0, uint64_t)
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/batterydata-interface.h	2019-01-22 16:16:28.539292010 +0100
@@ -0,0 +1,30 @@
+#ifndef __BATTERYDATA_LIB_H__
+#define __BATTERYDATA_LIB_H__
+
+#include <linux/ioctl.h>
+
+/**
+ * struct battery_params - Battery profile data to be exchanged.
+ * @soc:	SOC (state of charge) of the battery
+ * @ocv_uv:	OCV (open circuit voltage) of the battery
+ * @rbatt_sf:	RBATT scaling factor
+ * @batt_temp:	Battery temperature in deci-degree.
+ * @slope:	Slope of the OCV-SOC curve.
+ * @fcc_mah:	FCC (full charge capacity) of the battery.
+ */
+struct battery_params {
+	int soc;
+	int ocv_uv;
+	int rbatt_sf;
+	int batt_temp;
+	int slope;
+	int fcc_mah;
+};
+
+/*  IOCTLs to query battery profile data */
+#define BPIOCXSOC	_IOWR('B', 0x01, struct battery_params) /* SOC */
+#define BPIOCXRBATT	_IOWR('B', 0x02, struct battery_params) /* RBATT SF */
+#define BPIOCXSLOPE	_IOWR('B', 0x03, struct battery_params) /* SLOPE */
+#define BPIOCXFCC	_IOWR('B', 0x04, struct battery_params) /* FCC */
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/cec-funcs.h	2019-01-22 16:16:28.539292010 +0100
@@ -0,0 +1,1969 @@
+/*
+ * cec - HDMI Consumer Electronics Control message functions
+ *
+ * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * Alternatively you can redistribute this file under the terms of the
+ * BSD license as stated below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ * 3. The names of its contributors may not be used to endorse or promote
+ *    products derived from this software without specific prior written
+ *    permission.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _CEC_UAPI_FUNCS_H
+#define _CEC_UAPI_FUNCS_H
+
+#include <linux/cec.h>
+
+/* One Touch Play Feature */
+static inline void cec_msg_active_source(struct cec_msg *msg, __u16 phys_addr)
+{
+	msg->len = 4;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_ACTIVE_SOURCE;
+	msg->msg[2] = phys_addr >> 8;
+	msg->msg[3] = phys_addr & 0xff;
+}
+
+static inline void cec_ops_active_source(const struct cec_msg *msg,
+					 __u16 *phys_addr)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+static inline void cec_msg_image_view_on(struct cec_msg *msg)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_IMAGE_VIEW_ON;
+}
+
+static inline void cec_msg_text_view_on(struct cec_msg *msg)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_TEXT_VIEW_ON;
+}
+
+
+/* Routing Control Feature */
+static inline void cec_msg_inactive_source(struct cec_msg *msg,
+					   __u16 phys_addr)
+{
+	msg->len = 4;
+	msg->msg[1] = CEC_MSG_INACTIVE_SOURCE;
+	msg->msg[2] = phys_addr >> 8;
+	msg->msg[3] = phys_addr & 0xff;
+}
+
+static inline void cec_ops_inactive_source(const struct cec_msg *msg,
+					   __u16 *phys_addr)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+static inline void cec_msg_request_active_source(struct cec_msg *msg,
+						 bool reply)
+{
+	msg->len = 2;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_REQUEST_ACTIVE_SOURCE;
+	msg->reply = reply ? CEC_MSG_ACTIVE_SOURCE : 0;
+}
+
+static inline void cec_msg_routing_information(struct cec_msg *msg,
+					       __u16 phys_addr)
+{
+	msg->len = 4;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_ROUTING_INFORMATION;
+	msg->msg[2] = phys_addr >> 8;
+	msg->msg[3] = phys_addr & 0xff;
+}
+
+static inline void cec_ops_routing_information(const struct cec_msg *msg,
+					       __u16 *phys_addr)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+static inline void cec_msg_routing_change(struct cec_msg *msg,
+					  bool reply,
+					  __u16 orig_phys_addr,
+					  __u16 new_phys_addr)
+{
+	msg->len = 6;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_ROUTING_CHANGE;
+	msg->msg[2] = orig_phys_addr >> 8;
+	msg->msg[3] = orig_phys_addr & 0xff;
+	msg->msg[4] = new_phys_addr >> 8;
+	msg->msg[5] = new_phys_addr & 0xff;
+	msg->reply = reply ? CEC_MSG_ROUTING_INFORMATION : 0;
+}
+
+static inline void cec_ops_routing_change(const struct cec_msg *msg,
+					  __u16 *orig_phys_addr,
+					  __u16 *new_phys_addr)
+{
+	*orig_phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+	*new_phys_addr = (msg->msg[4] << 8) | msg->msg[5];
+}
+
+static inline void cec_msg_set_stream_path(struct cec_msg *msg, __u16 phys_addr)
+{
+	msg->len = 4;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_SET_STREAM_PATH;
+	msg->msg[2] = phys_addr >> 8;
+	msg->msg[3] = phys_addr & 0xff;
+}
+
+static inline void cec_ops_set_stream_path(const struct cec_msg *msg,
+					   __u16 *phys_addr)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+
+/* Standby Feature */
+static inline void cec_msg_standby(struct cec_msg *msg)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_STANDBY;
+}
+
+
+/* One Touch Record Feature */
+static inline void cec_msg_record_off(struct cec_msg *msg, bool reply)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_RECORD_OFF;
+	msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0;
+}
+
+struct cec_op_arib_data {
+	__u16 transport_id;
+	__u16 service_id;
+	__u16 orig_network_id;
+};
+
+struct cec_op_atsc_data {
+	__u16 transport_id;
+	__u16 program_number;
+};
+
+struct cec_op_dvb_data {
+	__u16 transport_id;
+	__u16 service_id;
+	__u16 orig_network_id;
+};
+
+struct cec_op_channel_data {
+	__u8 channel_number_fmt;
+	__u16 major;
+	__u16 minor;
+};
+
+struct cec_op_digital_service_id {
+	__u8 service_id_method;
+	__u8 dig_bcast_system;
+	union {
+		struct cec_op_arib_data arib;
+		struct cec_op_atsc_data atsc;
+		struct cec_op_dvb_data dvb;
+		struct cec_op_channel_data channel;
+	};
+};
+
+struct cec_op_record_src {
+	__u8 type;
+	union {
+		struct cec_op_digital_service_id digital;
+		struct {
+			__u8 ana_bcast_type;
+			__u16 ana_freq;
+			__u8 bcast_system;
+		} analog;
+		struct {
+			__u8 plug;
+		} ext_plug;
+		struct {
+			__u16 phys_addr;
+		} ext_phys_addr;
+	};
+};
+
+static inline void cec_set_digital_service_id(__u8 *msg,
+	      const struct cec_op_digital_service_id *digital)
+{
+	*msg++ = (digital->service_id_method << 7) | digital->dig_bcast_system;
+	if (digital->service_id_method == CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL) {
+		*msg++ = (digital->channel.channel_number_fmt << 2) |
+			 (digital->channel.major >> 8);
+		*msg++ = digital->channel.major & 0xff;
+		*msg++ = digital->channel.minor >> 8;
+		*msg++ = digital->channel.minor & 0xff;
+		*msg++ = 0;
+		*msg++ = 0;
+		return;
+	}
+	switch (digital->dig_bcast_system) {
+	case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_GEN:
+	case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_CABLE:
+	case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_SAT:
+	case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_T:
+		*msg++ = digital->atsc.transport_id >> 8;
+		*msg++ = digital->atsc.transport_id & 0xff;
+		*msg++ = digital->atsc.program_number >> 8;
+		*msg++ = digital->atsc.program_number & 0xff;
+		*msg++ = 0;
+		*msg++ = 0;
+		break;
+	default:
+		*msg++ = digital->dvb.transport_id >> 8;
+		*msg++ = digital->dvb.transport_id & 0xff;
+		*msg++ = digital->dvb.service_id >> 8;
+		*msg++ = digital->dvb.service_id & 0xff;
+		*msg++ = digital->dvb.orig_network_id >> 8;
+		*msg++ = digital->dvb.orig_network_id & 0xff;
+		break;
+	}
+}
+
+static inline void cec_get_digital_service_id(const __u8 *msg,
+	      struct cec_op_digital_service_id *digital)
+{
+	digital->service_id_method = msg[0] >> 7;
+	digital->dig_bcast_system = msg[0] & 0x7f;
+	if (digital->service_id_method == CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL) {
+		digital->channel.channel_number_fmt = msg[1] >> 2;
+		digital->channel.major = ((msg[1] & 3) << 6) | msg[2];
+		digital->channel.minor = (msg[3] << 8) | msg[4];
+		return;
+	}
+	digital->dvb.transport_id = (msg[1] << 8) | msg[2];
+	digital->dvb.service_id = (msg[3] << 8) | msg[4];
+	digital->dvb.orig_network_id = (msg[5] << 8) | msg[6];
+}
+
+static inline void cec_msg_record_on_own(struct cec_msg *msg)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_RECORD_ON;
+	msg->msg[2] = CEC_OP_RECORD_SRC_OWN;
+}
+
+static inline void cec_msg_record_on_digital(struct cec_msg *msg,
+			     const struct cec_op_digital_service_id *digital)
+{
+	msg->len = 10;
+	msg->msg[1] = CEC_MSG_RECORD_ON;
+	msg->msg[2] = CEC_OP_RECORD_SRC_DIGITAL;
+	cec_set_digital_service_id(msg->msg + 3, digital);
+}
+
+static inline void cec_msg_record_on_analog(struct cec_msg *msg,
+					    __u8 ana_bcast_type,
+					    __u16 ana_freq,
+					    __u8 bcast_system)
+{
+	msg->len = 7;
+	msg->msg[1] = CEC_MSG_RECORD_ON;
+	msg->msg[2] = CEC_OP_RECORD_SRC_ANALOG;
+	msg->msg[3] = ana_bcast_type;
+	msg->msg[4] = ana_freq >> 8;
+	msg->msg[5] = ana_freq & 0xff;
+	msg->msg[6] = bcast_system;
+}
+
+static inline void cec_msg_record_on_plug(struct cec_msg *msg,
+					  __u8 plug)
+{
+	msg->len = 4;
+	msg->msg[1] = CEC_MSG_RECORD_ON;
+	msg->msg[2] = CEC_OP_RECORD_SRC_EXT_PLUG;
+	msg->msg[3] = plug;
+}
+
+static inline void cec_msg_record_on_phys_addr(struct cec_msg *msg,
+					       __u16 phys_addr)
+{
+	msg->len = 5;
+	msg->msg[1] = CEC_MSG_RECORD_ON;
+	msg->msg[2] = CEC_OP_RECORD_SRC_EXT_PHYS_ADDR;
+	msg->msg[3] = phys_addr >> 8;
+	msg->msg[4] = phys_addr & 0xff;
+}
+
+static inline void cec_msg_record_on(struct cec_msg *msg,
+				     bool reply,
+				     const struct cec_op_record_src *rec_src)
+{
+	switch (rec_src->type) {
+	case CEC_OP_RECORD_SRC_OWN:
+		cec_msg_record_on_own(msg);
+		break;
+	case CEC_OP_RECORD_SRC_DIGITAL:
+		cec_msg_record_on_digital(msg, &rec_src->digital);
+		break;
+	case CEC_OP_RECORD_SRC_ANALOG:
+		cec_msg_record_on_analog(msg,
+					 rec_src->analog.ana_bcast_type,
+					 rec_src->analog.ana_freq,
+					 rec_src->analog.bcast_system);
+		break;
+	case CEC_OP_RECORD_SRC_EXT_PLUG:
+		cec_msg_record_on_plug(msg, rec_src->ext_plug.plug);
+		break;
+	case CEC_OP_RECORD_SRC_EXT_PHYS_ADDR:
+		cec_msg_record_on_phys_addr(msg,
+					    rec_src->ext_phys_addr.phys_addr);
+		break;
+	}
+	msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0;
+}
+
+static inline void cec_ops_record_on(const struct cec_msg *msg,
+				     struct cec_op_record_src *rec_src)
+{
+	rec_src->type = msg->msg[2];
+	switch (rec_src->type) {
+	case CEC_OP_RECORD_SRC_OWN:
+		break;
+	case CEC_OP_RECORD_SRC_DIGITAL:
+		cec_get_digital_service_id(msg->msg + 3, &rec_src->digital);
+		break;
+	case CEC_OP_RECORD_SRC_ANALOG:
+		rec_src->analog.ana_bcast_type = msg->msg[3];
+		rec_src->analog.ana_freq =
+			(msg->msg[4] << 8) | msg->msg[5];
+		rec_src->analog.bcast_system = msg->msg[6];
+		break;
+	case CEC_OP_RECORD_SRC_EXT_PLUG:
+		rec_src->ext_plug.plug = msg->msg[3];
+		break;
+	case CEC_OP_RECORD_SRC_EXT_PHYS_ADDR:
+		rec_src->ext_phys_addr.phys_addr =
+			(msg->msg[3] << 8) | msg->msg[4];
+		break;
+	}
+}
+
+static inline void cec_msg_record_status(struct cec_msg *msg, __u8 rec_status)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_RECORD_STATUS;
+	msg->msg[2] = rec_status;
+}
+
+static inline void cec_ops_record_status(const struct cec_msg *msg,
+					 __u8 *rec_status)
+{
+	*rec_status = msg->msg[2];
+}
+
+static inline void cec_msg_record_tv_screen(struct cec_msg *msg,
+					    bool reply)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_RECORD_TV_SCREEN;
+	msg->reply = reply ? CEC_MSG_RECORD_ON : 0;
+}
+
+
+/* Timer Programming Feature */
+static inline void cec_msg_timer_status(struct cec_msg *msg,
+					__u8 timer_overlap_warning,
+					__u8 media_info,
+					__u8 prog_info,
+					__u8 prog_error,
+					__u8 duration_hr,
+					__u8 duration_min)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_TIMER_STATUS;
+	msg->msg[2] = (timer_overlap_warning << 7) |
+		(media_info << 5) |
+		(prog_info ? 0x10 : 0) |
+		(prog_info ? prog_info : prog_error);
+	if (prog_info == CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE ||
+	    prog_info == CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE ||
+	    prog_error == CEC_OP_PROG_ERROR_DUPLICATE) {
+		msg->len += 2;
+		msg->msg[3] = ((duration_hr / 10) << 4) | (duration_hr % 10);
+		msg->msg[4] = ((duration_min / 10) << 4) | (duration_min % 10);
+	}
+}
+
+static inline void cec_ops_timer_status(const struct cec_msg *msg,
+					__u8 *timer_overlap_warning,
+					__u8 *media_info,
+					__u8 *prog_info,
+					__u8 *prog_error,
+					__u8 *duration_hr,
+					__u8 *duration_min)
+{
+	*timer_overlap_warning = msg->msg[2] >> 7;
+	*media_info = (msg->msg[2] >> 5) & 3;
+	if (msg->msg[2] & 0x10) {
+		*prog_info = msg->msg[2] & 0xf;
+		*prog_error = 0;
+	} else {
+		*prog_info = 0;
+		*prog_error = msg->msg[2] & 0xf;
+	}
+	if (*prog_info == CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE ||
+	    *prog_info == CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE ||
+	    *prog_error == CEC_OP_PROG_ERROR_DUPLICATE) {
+		*duration_hr = (msg->msg[3] >> 4) * 10 + (msg->msg[3] & 0xf);
+		*duration_min = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
+	} else {
+		*duration_hr = *duration_min = 0;
+	}
+}
+
+static inline void cec_msg_timer_cleared_status(struct cec_msg *msg,
+						__u8 timer_cleared_status)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_TIMER_CLEARED_STATUS;
+	msg->msg[2] = timer_cleared_status;
+}
+
+static inline void cec_ops_timer_cleared_status(const struct cec_msg *msg,
+						__u8 *timer_cleared_status)
+{
+	*timer_cleared_status = msg->msg[2];
+}
+
+static inline void cec_msg_clear_analogue_timer(struct cec_msg *msg,
+						bool reply,
+						__u8 day,
+						__u8 month,
+						__u8 start_hr,
+						__u8 start_min,
+						__u8 duration_hr,
+						__u8 duration_min,
+						__u8 recording_seq,
+						__u8 ana_bcast_type,
+						__u16 ana_freq,
+						__u8 bcast_system)
+{
+	msg->len = 13;
+	msg->msg[1] = CEC_MSG_CLEAR_ANALOGUE_TIMER;
+	msg->msg[2] = day;
+	msg->msg[3] = month;
+	/* Hours and minutes are in BCD format */
+	msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10);
+	msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10);
+	msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10);
+	msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10);
+	msg->msg[8] = recording_seq;
+	msg->msg[9] = ana_bcast_type;
+	msg->msg[10] = ana_freq >> 8;
+	msg->msg[11] = ana_freq & 0xff;
+	msg->msg[12] = bcast_system;
+	msg->reply = reply ? CEC_MSG_TIMER_CLEARED_STATUS : 0;
+}
+
+static inline void cec_ops_clear_analogue_timer(const struct cec_msg *msg,
+						__u8 *day,
+						__u8 *month,
+						__u8 *start_hr,
+						__u8 *start_min,
+						__u8 *duration_hr,
+						__u8 *duration_min,
+						__u8 *recording_seq,
+						__u8 *ana_bcast_type,
+						__u16 *ana_freq,
+						__u8 *bcast_system)
+{
+	*day = msg->msg[2];
+	*month = msg->msg[3];
+	/* Hours and minutes are in BCD format */
+	*start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
+	*start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf);
+	*duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf);
+	*duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf);
+	*recording_seq = msg->msg[8];
+	*ana_bcast_type = msg->msg[9];
+	*ana_freq = (msg->msg[10] << 8) | msg->msg[11];
+	*bcast_system = msg->msg[12];
+}
+
+static inline void cec_msg_clear_digital_timer(struct cec_msg *msg,
+				bool reply,
+				__u8 day,
+				__u8 month,
+				__u8 start_hr,
+				__u8 start_min,
+				__u8 duration_hr,
+				__u8 duration_min,
+				__u8 recording_seq,
+				const struct cec_op_digital_service_id *digital)
+{
+	msg->len = 16;
+	msg->reply = reply ? CEC_MSG_TIMER_CLEARED_STATUS : 0;
+	msg->msg[1] = CEC_MSG_CLEAR_DIGITAL_TIMER;
+	msg->msg[2] = day;
+	msg->msg[3] = month;
+	/* Hours and minutes are in BCD format */
+	msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10);
+	msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10);
+	msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10);
+	msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10);
+	msg->msg[8] = recording_seq;
+	cec_set_digital_service_id(msg->msg + 9, digital);
+}
+
+static inline void cec_ops_clear_digital_timer(const struct cec_msg *msg,
+				__u8 *day,
+				__u8 *month,
+				__u8 *start_hr,
+				__u8 *start_min,
+				__u8 *duration_hr,
+				__u8 *duration_min,
+				__u8 *recording_seq,
+				struct cec_op_digital_service_id *digital)
+{
+	*day = msg->msg[2];
+	*month = msg->msg[3];
+	/* Hours and minutes are in BCD format */
+	*start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
+	*start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf);
+	*duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf);
+	*duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf);
+	*recording_seq = msg->msg[8];
+	cec_get_digital_service_id(msg->msg + 9, digital);
+}
+
+static inline void cec_msg_clear_ext_timer(struct cec_msg *msg,
+					   bool reply,
+					   __u8 day,
+					   __u8 month,
+					   __u8 start_hr,
+					   __u8 start_min,
+					   __u8 duration_hr,
+					   __u8 duration_min,
+					   __u8 recording_seq,
+					   __u8 ext_src_spec,
+					   __u8 plug,
+					   __u16 phys_addr)
+{
+	msg->len = 13;
+	msg->msg[1] = CEC_MSG_CLEAR_EXT_TIMER;
+	msg->msg[2] = day;
+	msg->msg[3] = month;
+	/* Hours and minutes are in BCD format */
+	msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10);
+	msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10);
+	msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10);
+	msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10);
+	msg->msg[8] = recording_seq;
+	msg->msg[9] = ext_src_spec;
+	msg->msg[10] = plug;
+	msg->msg[11] = phys_addr >> 8;
+	msg->msg[12] = phys_addr & 0xff;
+	msg->reply = reply ? CEC_MSG_TIMER_CLEARED_STATUS : 0;
+}
+
+static inline void cec_ops_clear_ext_timer(const struct cec_msg *msg,
+					   __u8 *day,
+					   __u8 *month,
+					   __u8 *start_hr,
+					   __u8 *start_min,
+					   __u8 *duration_hr,
+					   __u8 *duration_min,
+					   __u8 *recording_seq,
+					   __u8 *ext_src_spec,
+					   __u8 *plug,
+					   __u16 *phys_addr)
+{
+	*day = msg->msg[2];
+	*month = msg->msg[3];
+	/* Hours and minutes are in BCD format */
+	*start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
+	*start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf);
+	*duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf);
+	*duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf);
+	*recording_seq = msg->msg[8];
+	*ext_src_spec = msg->msg[9];
+	*plug = msg->msg[10];
+	*phys_addr = (msg->msg[11] << 8) | msg->msg[12];
+}
+
+static inline void cec_msg_set_analogue_timer(struct cec_msg *msg,
+					      bool reply,
+					      __u8 day,
+					      __u8 month,
+					      __u8 start_hr,
+					      __u8 start_min,
+					      __u8 duration_hr,
+					      __u8 duration_min,
+					      __u8 recording_seq,
+					      __u8 ana_bcast_type,
+					      __u16 ana_freq,
+					      __u8 bcast_system)
+{
+	msg->len = 13;
+	msg->msg[1] = CEC_MSG_SET_ANALOGUE_TIMER;
+	msg->msg[2] = day;
+	msg->msg[3] = month;
+	/* Hours and minutes are in BCD format */
+	msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10);
+	msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10);
+	msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10);
+	msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10);
+	msg->msg[8] = recording_seq;
+	msg->msg[9] = ana_bcast_type;
+	msg->msg[10] = ana_freq >> 8;
+	msg->msg[11] = ana_freq & 0xff;
+	msg->msg[12] = bcast_system;
+	msg->reply = reply ? CEC_MSG_TIMER_STATUS : 0;
+}
+
+static inline void cec_ops_set_analogue_timer(const struct cec_msg *msg,
+					      __u8 *day,
+					      __u8 *month,
+					      __u8 *start_hr,
+					      __u8 *start_min,
+					      __u8 *duration_hr,
+					      __u8 *duration_min,
+					      __u8 *recording_seq,
+					      __u8 *ana_bcast_type,
+					      __u16 *ana_freq,
+					      __u8 *bcast_system)
+{
+	*day = msg->msg[2];
+	*month = msg->msg[3];
+	/* Hours and minutes are in BCD format */
+	*start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
+	*start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf);
+	*duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf);
+	*duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf);
+	*recording_seq = msg->msg[8];
+	*ana_bcast_type = msg->msg[9];
+	*ana_freq = (msg->msg[10] << 8) | msg->msg[11];
+	*bcast_system = msg->msg[12];
+}
+
+static inline void cec_msg_set_digital_timer(struct cec_msg *msg,
+			bool reply,
+			__u8 day,
+			__u8 month,
+			__u8 start_hr,
+			__u8 start_min,
+			__u8 duration_hr,
+			__u8 duration_min,
+			__u8 recording_seq,
+			const struct cec_op_digital_service_id *digital)
+{
+	msg->len = 16;
+	msg->reply = reply ? CEC_MSG_TIMER_STATUS : 0;
+	msg->msg[1] = CEC_MSG_SET_DIGITAL_TIMER;
+	msg->msg[2] = day;
+	msg->msg[3] = month;
+	/* Hours and minutes are in BCD format */
+	msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10);
+	msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10);
+	msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10);
+	msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10);
+	msg->msg[8] = recording_seq;
+	cec_set_digital_service_id(msg->msg + 9, digital);
+}
+
+static inline void cec_ops_set_digital_timer(const struct cec_msg *msg,
+			__u8 *day,
+			__u8 *month,
+			__u8 *start_hr,
+			__u8 *start_min,
+			__u8 *duration_hr,
+			__u8 *duration_min,
+			__u8 *recording_seq,
+			struct cec_op_digital_service_id *digital)
+{
+	*day = msg->msg[2];
+	*month = msg->msg[3];
+	/* Hours and minutes are in BCD format */
+	*start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
+	*start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf);
+	*duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf);
+	*duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf);
+	*recording_seq = msg->msg[8];
+	cec_get_digital_service_id(msg->msg + 9, digital);
+}
+
+static inline void cec_msg_set_ext_timer(struct cec_msg *msg,
+					 bool reply,
+					 __u8 day,
+					 __u8 month,
+					 __u8 start_hr,
+					 __u8 start_min,
+					 __u8 duration_hr,
+					 __u8 duration_min,
+					 __u8 recording_seq,
+					 __u8 ext_src_spec,
+					 __u8 plug,
+					 __u16 phys_addr)
+{
+	msg->len = 13;
+	msg->msg[1] = CEC_MSG_SET_EXT_TIMER;
+	msg->msg[2] = day;
+	msg->msg[3] = month;
+	/* Hours and minutes are in BCD format */
+	msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10);
+	msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10);
+	msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10);
+	msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10);
+	msg->msg[8] = recording_seq;
+	msg->msg[9] = ext_src_spec;
+	msg->msg[10] = plug;
+	msg->msg[11] = phys_addr >> 8;
+	msg->msg[12] = phys_addr & 0xff;
+	msg->reply = reply ? CEC_MSG_TIMER_STATUS : 0;
+}
+
+static inline void cec_ops_set_ext_timer(const struct cec_msg *msg,
+					 __u8 *day,
+					 __u8 *month,
+					 __u8 *start_hr,
+					 __u8 *start_min,
+					 __u8 *duration_hr,
+					 __u8 *duration_min,
+					 __u8 *recording_seq,
+					 __u8 *ext_src_spec,
+					 __u8 *plug,
+					 __u16 *phys_addr)
+{
+	*day = msg->msg[2];
+	*month = msg->msg[3];
+	/* Hours and minutes are in BCD format */
+	*start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
+	*start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf);
+	*duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf);
+	*duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf);
+	*recording_seq = msg->msg[8];
+	*ext_src_spec = msg->msg[9];
+	*plug = msg->msg[10];
+	*phys_addr = (msg->msg[11] << 8) | msg->msg[12];
+}
+
+static inline void cec_msg_set_timer_program_title(struct cec_msg *msg,
+						   const char *prog_title)
+{
+	unsigned int len = strlen(prog_title);
+
+	if (len > 14)
+		len = 14;
+	msg->len = 2 + len;
+	msg->msg[1] = CEC_MSG_SET_TIMER_PROGRAM_TITLE;
+	memcpy(msg->msg + 2, prog_title, len);
+}
+
+static inline void cec_ops_set_timer_program_title(const struct cec_msg *msg,
+						   char *prog_title)
+{
+	unsigned int len = msg->len > 2 ? msg->len - 2 : 0;
+
+	if (len > 14)
+		len = 14;
+	memcpy(prog_title, msg->msg + 2, len);
+	prog_title[len] = '\0';
+}
+
+/* System Information Feature */
+static inline void cec_msg_cec_version(struct cec_msg *msg, __u8 cec_version)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_CEC_VERSION;
+	msg->msg[2] = cec_version;
+}
+
+static inline void cec_ops_cec_version(const struct cec_msg *msg,
+				       __u8 *cec_version)
+{
+	*cec_version = msg->msg[2];
+}
+
+static inline void cec_msg_get_cec_version(struct cec_msg *msg,
+					   bool reply)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_GET_CEC_VERSION;
+	msg->reply = reply ? CEC_MSG_CEC_VERSION : 0;
+}
+
+static inline void cec_msg_report_physical_addr(struct cec_msg *msg,
+					__u16 phys_addr, __u8 prim_devtype)
+{
+	msg->len = 5;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_REPORT_PHYSICAL_ADDR;
+	msg->msg[2] = phys_addr >> 8;
+	msg->msg[3] = phys_addr & 0xff;
+	msg->msg[4] = prim_devtype;
+}
+
+static inline void cec_ops_report_physical_addr(const struct cec_msg *msg,
+					__u16 *phys_addr, __u8 *prim_devtype)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+	*prim_devtype = msg->msg[4];
+}
+
+static inline void cec_msg_give_physical_addr(struct cec_msg *msg,
+					      bool reply)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_GIVE_PHYSICAL_ADDR;
+	msg->reply = reply ? CEC_MSG_REPORT_PHYSICAL_ADDR : 0;
+}
+
+static inline void cec_msg_set_menu_language(struct cec_msg *msg,
+					     const char *language)
+{
+	msg->len = 5;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_SET_MENU_LANGUAGE;
+	memcpy(msg->msg + 2, language, 3);
+}
+
+static inline void cec_ops_set_menu_language(const struct cec_msg *msg,
+					     char *language)
+{
+	memcpy(language, msg->msg + 2, 3);
+	language[3] = '\0';
+}
+
+static inline void cec_msg_get_menu_language(struct cec_msg *msg,
+					     bool reply)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_GET_MENU_LANGUAGE;
+	msg->reply = reply ? CEC_MSG_SET_MENU_LANGUAGE : 0;
+}
+
+/*
+ * Assumes a single RC Profile byte and a single Device Features byte,
+ * i.e. no extended features are supported by this helper function.
+ *
+ * As of CEC 2.0 no extended features are defined, should those be added
+ * in the future, then this function needs to be adapted or a new function
+ * should be added.
+ */
+static inline void cec_msg_report_features(struct cec_msg *msg,
+				__u8 cec_version, __u8 all_device_types,
+				__u8 rc_profile, __u8 dev_features)
+{
+	msg->len = 6;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_REPORT_FEATURES;
+	msg->msg[2] = cec_version;
+	msg->msg[3] = all_device_types;
+	msg->msg[4] = rc_profile;
+	msg->msg[5] = dev_features;
+}
+
+static inline void cec_ops_report_features(const struct cec_msg *msg,
+			__u8 *cec_version, __u8 *all_device_types,
+			const __u8 **rc_profile, const __u8 **dev_features)
+{
+	const __u8 *p = &msg->msg[4];
+
+	*cec_version = msg->msg[2];
+	*all_device_types = msg->msg[3];
+	*rc_profile = p;
+	while (p < &msg->msg[14] && (*p & CEC_OP_FEAT_EXT))
+		p++;
+	if (!(*p & CEC_OP_FEAT_EXT)) {
+		*dev_features = p + 1;
+		while (p < &msg->msg[15] && (*p & CEC_OP_FEAT_EXT))
+			p++;
+	}
+	if (*p & CEC_OP_FEAT_EXT)
+		*rc_profile = *dev_features = NULL;
+}
+
+static inline void cec_msg_give_features(struct cec_msg *msg,
+					 bool reply)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_GIVE_FEATURES;
+	msg->reply = reply ? CEC_MSG_REPORT_FEATURES : 0;
+}
+
+/* Deck Control Feature */
+static inline void cec_msg_deck_control(struct cec_msg *msg,
+					__u8 deck_control_mode)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_DECK_CONTROL;
+	msg->msg[2] = deck_control_mode;
+}
+
+static inline void cec_ops_deck_control(const struct cec_msg *msg,
+					__u8 *deck_control_mode)
+{
+	*deck_control_mode = msg->msg[2];
+}
+
+static inline void cec_msg_deck_status(struct cec_msg *msg,
+				       __u8 deck_info)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_DECK_STATUS;
+	msg->msg[2] = deck_info;
+}
+
+static inline void cec_ops_deck_status(const struct cec_msg *msg,
+				       __u8 *deck_info)
+{
+	*deck_info = msg->msg[2];
+}
+
+static inline void cec_msg_give_deck_status(struct cec_msg *msg,
+					    bool reply,
+					    __u8 status_req)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_GIVE_DECK_STATUS;
+	msg->msg[2] = status_req;
+	msg->reply = reply ? CEC_MSG_DECK_STATUS : 0;
+}
+
+static inline void cec_ops_give_deck_status(const struct cec_msg *msg,
+					    __u8 *status_req)
+{
+	*status_req = msg->msg[2];
+}
+
+static inline void cec_msg_play(struct cec_msg *msg,
+				__u8 play_mode)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_PLAY;
+	msg->msg[2] = play_mode;
+}
+
+static inline void cec_ops_play(const struct cec_msg *msg,
+				__u8 *play_mode)
+{
+	*play_mode = msg->msg[2];
+}
+
+
+/* Tuner Control Feature */
+struct cec_op_tuner_device_info {
+	__u8 rec_flag;
+	__u8 tuner_display_info;
+	bool is_analog;
+	union {
+		struct cec_op_digital_service_id digital;
+		struct {
+			__u8 ana_bcast_type;
+			__u16 ana_freq;
+			__u8 bcast_system;
+		} analog;
+	};
+};
+
+static inline void cec_msg_tuner_device_status_analog(struct cec_msg *msg,
+						      __u8 rec_flag,
+						      __u8 tuner_display_info,
+						      __u8 ana_bcast_type,
+						      __u16 ana_freq,
+						      __u8 bcast_system)
+{
+	msg->len = 7;
+	msg->msg[1] = CEC_MSG_TUNER_DEVICE_STATUS;
+	msg->msg[2] = (rec_flag << 7) | tuner_display_info;
+	msg->msg[3] = ana_bcast_type;
+	msg->msg[4] = ana_freq >> 8;
+	msg->msg[5] = ana_freq & 0xff;
+	msg->msg[6] = bcast_system;
+}
+
+static inline void cec_msg_tuner_device_status_digital(struct cec_msg *msg,
+		   __u8 rec_flag, __u8 tuner_display_info,
+		   const struct cec_op_digital_service_id *digital)
+{
+	msg->len = 10;
+	msg->msg[1] = CEC_MSG_TUNER_DEVICE_STATUS;
+	msg->msg[2] = (rec_flag << 7) | tuner_display_info;
+	cec_set_digital_service_id(msg->msg + 3, digital);
+}
+
+static inline void cec_msg_tuner_device_status(struct cec_msg *msg,
+			const struct cec_op_tuner_device_info *tuner_dev_info)
+{
+	if (tuner_dev_info->is_analog)
+		cec_msg_tuner_device_status_analog(msg,
+			tuner_dev_info->rec_flag,
+			tuner_dev_info->tuner_display_info,
+			tuner_dev_info->analog.ana_bcast_type,
+			tuner_dev_info->analog.ana_freq,
+			tuner_dev_info->analog.bcast_system);
+	else
+		cec_msg_tuner_device_status_digital(msg,
+			tuner_dev_info->rec_flag,
+			tuner_dev_info->tuner_display_info,
+			&tuner_dev_info->digital);
+}
+
+static inline void cec_ops_tuner_device_status(const struct cec_msg *msg,
+				struct cec_op_tuner_device_info *tuner_dev_info)
+{
+	tuner_dev_info->is_analog = msg->len < 10;
+	tuner_dev_info->rec_flag = msg->msg[2] >> 7;
+	tuner_dev_info->tuner_display_info = msg->msg[2] & 0x7f;
+	if (tuner_dev_info->is_analog) {
+		tuner_dev_info->analog.ana_bcast_type = msg->msg[3];
+		tuner_dev_info->analog.ana_freq = (msg->msg[4] << 8) | msg->msg[5];
+		tuner_dev_info->analog.bcast_system = msg->msg[6];
+		return;
+	}
+	cec_get_digital_service_id(msg->msg + 3, &tuner_dev_info->digital);
+}
+
+static inline void cec_msg_give_tuner_device_status(struct cec_msg *msg,
+						    bool reply,
+						    __u8 status_req)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_GIVE_TUNER_DEVICE_STATUS;
+	msg->msg[2] = status_req;
+	msg->reply = reply ? CEC_MSG_TUNER_DEVICE_STATUS : 0;
+}
+
+static inline void cec_ops_give_tuner_device_status(const struct cec_msg *msg,
+						    __u8 *status_req)
+{
+	*status_req = msg->msg[2];
+}
+
+static inline void cec_msg_select_analogue_service(struct cec_msg *msg,
+						   __u8 ana_bcast_type,
+						   __u16 ana_freq,
+						   __u8 bcast_system)
+{
+	msg->len = 6;
+	msg->msg[1] = CEC_MSG_SELECT_ANALOGUE_SERVICE;
+	msg->msg[2] = ana_bcast_type;
+	msg->msg[3] = ana_freq >> 8;
+	msg->msg[4] = ana_freq & 0xff;
+	msg->msg[5] = bcast_system;
+}
+
+static inline void cec_ops_select_analogue_service(const struct cec_msg *msg,
+						   __u8 *ana_bcast_type,
+						   __u16 *ana_freq,
+						   __u8 *bcast_system)
+{
+	*ana_bcast_type = msg->msg[2];
+	*ana_freq = (msg->msg[3] << 8) | msg->msg[4];
+	*bcast_system = msg->msg[5];
+}
+
+static inline void cec_msg_select_digital_service(struct cec_msg *msg,
+				const struct cec_op_digital_service_id *digital)
+{
+	msg->len = 9;
+	msg->msg[1] = CEC_MSG_SELECT_DIGITAL_SERVICE;
+	cec_set_digital_service_id(msg->msg + 2, digital);
+}
+
+static inline void cec_ops_select_digital_service(const struct cec_msg *msg,
+				struct cec_op_digital_service_id *digital)
+{
+	cec_get_digital_service_id(msg->msg + 2, digital);
+}
+
+static inline void cec_msg_tuner_step_decrement(struct cec_msg *msg)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_TUNER_STEP_DECREMENT;
+}
+
+static inline void cec_msg_tuner_step_increment(struct cec_msg *msg)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_TUNER_STEP_INCREMENT;
+}
+
+
+/* Vendor Specific Commands Feature */
+static inline void cec_msg_device_vendor_id(struct cec_msg *msg, __u32 vendor_id)
+{
+	msg->len = 5;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_DEVICE_VENDOR_ID;
+	msg->msg[2] = vendor_id >> 16;
+	msg->msg[3] = (vendor_id >> 8) & 0xff;
+	msg->msg[4] = vendor_id & 0xff;
+}
+
+static inline void cec_ops_device_vendor_id(const struct cec_msg *msg,
+					    __u32 *vendor_id)
+{
+	*vendor_id = (msg->msg[2] << 16) | (msg->msg[3] << 8) | msg->msg[4];
+}
+
+static inline void cec_msg_give_device_vendor_id(struct cec_msg *msg,
+						 bool reply)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_GIVE_DEVICE_VENDOR_ID;
+	msg->reply = reply ? CEC_MSG_DEVICE_VENDOR_ID : 0;
+}
+
+static inline void cec_msg_vendor_command(struct cec_msg *msg,
+					  __u8 size, const __u8 *vendor_cmd)
+{
+	if (size > 14)
+		size = 14;
+	msg->len = 2 + size;
+	msg->msg[1] = CEC_MSG_VENDOR_COMMAND;
+	memcpy(msg->msg + 2, vendor_cmd, size);
+}
+
+static inline void cec_ops_vendor_command(const struct cec_msg *msg,
+					  __u8 *size,
+					  const __u8 **vendor_cmd)
+{
+	*size = msg->len - 2;
+
+	if (*size > 14)
+		*size = 14;
+	*vendor_cmd = msg->msg + 2;
+}
+
+static inline void cec_msg_vendor_command_with_id(struct cec_msg *msg,
+						  __u32 vendor_id, __u8 size,
+						  const __u8 *vendor_cmd)
+{
+	if (size > 11)
+		size = 11;
+	msg->len = 5 + size;
+	msg->msg[1] = CEC_MSG_VENDOR_COMMAND_WITH_ID;
+	msg->msg[2] = vendor_id >> 16;
+	msg->msg[3] = (vendor_id >> 8) & 0xff;
+	msg->msg[4] = vendor_id & 0xff;
+	memcpy(msg->msg + 5, vendor_cmd, size);
+}
+
+static inline void cec_ops_vendor_command_with_id(const struct cec_msg *msg,
+						  __u32 *vendor_id,  __u8 *size,
+						  const __u8 **vendor_cmd)
+{
+	*size = msg->len - 5;
+
+	if (*size > 11)
+		*size = 11;
+	*vendor_id = (msg->msg[2] << 16) | (msg->msg[3] << 8) | msg->msg[4];
+	*vendor_cmd = msg->msg + 5;
+}
+
+static inline void cec_msg_vendor_remote_button_down(struct cec_msg *msg,
+						     __u8 size,
+						     const __u8 *rc_code)
+{
+	if (size > 14)
+		size = 14;
+	msg->len = 2 + size;
+	msg->msg[1] = CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN;
+	memcpy(msg->msg + 2, rc_code, size);
+}
+
+static inline void cec_ops_vendor_remote_button_down(const struct cec_msg *msg,
+						     __u8 *size,
+						     const __u8 **rc_code)
+{
+	*size = msg->len - 2;
+
+	if (*size > 14)
+		*size = 14;
+	*rc_code = msg->msg + 2;
+}
+
+static inline void cec_msg_vendor_remote_button_up(struct cec_msg *msg)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_VENDOR_REMOTE_BUTTON_UP;
+}
+
+
+/* OSD Display Feature */
+static inline void cec_msg_set_osd_string(struct cec_msg *msg,
+					  __u8 disp_ctl,
+					  const char *osd)
+{
+	unsigned int len = strlen(osd);
+
+	if (len > 13)
+		len = 13;
+	msg->len = 3 + len;
+	msg->msg[1] = CEC_MSG_SET_OSD_STRING;
+	msg->msg[2] = disp_ctl;
+	memcpy(msg->msg + 3, osd, len);
+}
+
+static inline void cec_ops_set_osd_string(const struct cec_msg *msg,
+					  __u8 *disp_ctl,
+					  char *osd)
+{
+	unsigned int len = msg->len > 3 ? msg->len - 3 : 0;
+
+	*disp_ctl = msg->msg[2];
+	if (len > 13)
+		len = 13;
+	memcpy(osd, msg->msg + 3, len);
+	osd[len] = '\0';
+}
+
+
+/* Device OSD Transfer Feature */
+static inline void cec_msg_set_osd_name(struct cec_msg *msg, const char *name)
+{
+	unsigned int len = strlen(name);
+
+	if (len > 14)
+		len = 14;
+	msg->len = 2 + len;
+	msg->msg[1] = CEC_MSG_SET_OSD_NAME;
+	memcpy(msg->msg + 2, name, len);
+}
+
+static inline void cec_ops_set_osd_name(const struct cec_msg *msg,
+					char *name)
+{
+	unsigned int len = msg->len > 2 ? msg->len - 2 : 0;
+
+	if (len > 14)
+		len = 14;
+	memcpy(name, msg->msg + 2, len);
+	name[len] = '\0';
+}
+
+static inline void cec_msg_give_osd_name(struct cec_msg *msg,
+					 bool reply)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_GIVE_OSD_NAME;
+	msg->reply = reply ? CEC_MSG_SET_OSD_NAME : 0;
+}
+
+
+/* Device Menu Control Feature */
+static inline void cec_msg_menu_status(struct cec_msg *msg,
+				       __u8 menu_state)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_MENU_STATUS;
+	msg->msg[2] = menu_state;
+}
+
+static inline void cec_ops_menu_status(const struct cec_msg *msg,
+				       __u8 *menu_state)
+{
+	*menu_state = msg->msg[2];
+}
+
+static inline void cec_msg_menu_request(struct cec_msg *msg,
+					bool reply,
+					__u8 menu_req)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_MENU_REQUEST;
+	msg->msg[2] = menu_req;
+	msg->reply = reply ? CEC_MSG_MENU_STATUS : 0;
+}
+
+static inline void cec_ops_menu_request(const struct cec_msg *msg,
+					__u8 *menu_req)
+{
+	*menu_req = msg->msg[2];
+}
+
+struct cec_op_ui_command {
+	__u8 ui_cmd;
+	bool has_opt_arg;
+	union {
+		struct cec_op_channel_data channel_identifier;
+		__u8 ui_broadcast_type;
+		__u8 ui_sound_presentation_control;
+		__u8 play_mode;
+		__u8 ui_function_media;
+		__u8 ui_function_select_av_input;
+		__u8 ui_function_select_audio_input;
+	};
+};
+
+static inline void cec_msg_user_control_pressed(struct cec_msg *msg,
+					const struct cec_op_ui_command *ui_cmd)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_USER_CONTROL_PRESSED;
+	msg->msg[2] = ui_cmd->ui_cmd;
+	if (!ui_cmd->has_opt_arg)
+		return;
+	switch (ui_cmd->ui_cmd) {
+	case 0x56:
+	case 0x57:
+	case 0x60:
+	case 0x68:
+	case 0x69:
+	case 0x6a:
+		/* The optional operand is one byte for all these ui commands */
+		msg->len++;
+		msg->msg[3] = ui_cmd->play_mode;
+		break;
+	case 0x67:
+		msg->len += 4;
+		msg->msg[3] = (ui_cmd->channel_identifier.channel_number_fmt << 2) |
+			      (ui_cmd->channel_identifier.major >> 8);
+		msg->msg[4] = ui_cmd->channel_identifier.major & 0xff;
+		msg->msg[5] = ui_cmd->channel_identifier.minor >> 8;
+		msg->msg[6] = ui_cmd->channel_identifier.minor & 0xff;
+		break;
+	}
+}
+
+static inline void cec_ops_user_control_pressed(const struct cec_msg *msg,
+						struct cec_op_ui_command *ui_cmd)
+{
+	ui_cmd->ui_cmd = msg->msg[2];
+	ui_cmd->has_opt_arg = false;
+	if (msg->len == 3)
+		return;
+	switch (ui_cmd->ui_cmd) {
+	case 0x56:
+	case 0x57:
+	case 0x60:
+	case 0x68:
+	case 0x69:
+	case 0x6a:
+		/* The optional operand is one byte for all these ui commands */
+		ui_cmd->play_mode = msg->msg[3];
+		ui_cmd->has_opt_arg = true;
+		break;
+	case 0x67:
+		if (msg->len < 7)
+			break;
+		ui_cmd->has_opt_arg = true;
+		ui_cmd->channel_identifier.channel_number_fmt = msg->msg[3] >> 2;
+		ui_cmd->channel_identifier.major = ((msg->msg[3] & 3) << 6) | msg->msg[4];
+		ui_cmd->channel_identifier.minor = (msg->msg[5] << 8) | msg->msg[6];
+		break;
+	}
+}
+
+static inline void cec_msg_user_control_released(struct cec_msg *msg)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_USER_CONTROL_RELEASED;
+}
+
+/* Remote Control Passthrough Feature */
+
+/* Power Status Feature */
+static inline void cec_msg_report_power_status(struct cec_msg *msg,
+					       __u8 pwr_state)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_REPORT_POWER_STATUS;
+	msg->msg[2] = pwr_state;
+}
+
+static inline void cec_ops_report_power_status(const struct cec_msg *msg,
+					       __u8 *pwr_state)
+{
+	*pwr_state = msg->msg[2];
+}
+
+static inline void cec_msg_give_device_power_status(struct cec_msg *msg,
+						    bool reply)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_GIVE_DEVICE_POWER_STATUS;
+	msg->reply = reply ? CEC_MSG_REPORT_POWER_STATUS : 0;
+}
+
+/* General Protocol Messages */
+static inline void cec_msg_feature_abort(struct cec_msg *msg,
+					 __u8 abort_msg, __u8 reason)
+{
+	msg->len = 4;
+	msg->msg[1] = CEC_MSG_FEATURE_ABORT;
+	msg->msg[2] = abort_msg;
+	msg->msg[3] = reason;
+}
+
+static inline void cec_ops_feature_abort(const struct cec_msg *msg,
+					 __u8 *abort_msg, __u8 *reason)
+{
+	*abort_msg = msg->msg[2];
+	*reason = msg->msg[3];
+}
+
+/* This changes the current message into a feature abort message */
+static inline void cec_msg_reply_feature_abort(struct cec_msg *msg, __u8 reason)
+{
+	cec_msg_set_reply_to(msg, msg);
+	msg->len = 4;
+	msg->msg[2] = msg->msg[1];
+	msg->msg[3] = reason;
+	msg->msg[1] = CEC_MSG_FEATURE_ABORT;
+}
+
+static inline void cec_msg_abort(struct cec_msg *msg)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_ABORT;
+}
+
+
+/* System Audio Control Feature */
+static inline void cec_msg_report_audio_status(struct cec_msg *msg,
+					       __u8 aud_mute_status,
+					       __u8 aud_vol_status)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_REPORT_AUDIO_STATUS;
+	msg->msg[2] = (aud_mute_status << 7) | (aud_vol_status & 0x7f);
+}
+
+static inline void cec_ops_report_audio_status(const struct cec_msg *msg,
+					       __u8 *aud_mute_status,
+					       __u8 *aud_vol_status)
+{
+	*aud_mute_status = msg->msg[2] >> 7;
+	*aud_vol_status = msg->msg[2] & 0x7f;
+}
+
+static inline void cec_msg_give_audio_status(struct cec_msg *msg,
+					     bool reply)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_GIVE_AUDIO_STATUS;
+	msg->reply = reply ? CEC_MSG_REPORT_AUDIO_STATUS : 0;
+}
+
+static inline void cec_msg_set_system_audio_mode(struct cec_msg *msg,
+						 __u8 sys_aud_status)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_SET_SYSTEM_AUDIO_MODE;
+	msg->msg[2] = sys_aud_status;
+}
+
+static inline void cec_ops_set_system_audio_mode(const struct cec_msg *msg,
+						 __u8 *sys_aud_status)
+{
+	*sys_aud_status = msg->msg[2];
+}
+
+static inline void cec_msg_system_audio_mode_request(struct cec_msg *msg,
+						     bool reply,
+						     __u16 phys_addr)
+{
+	msg->len = phys_addr == 0xffff ? 2 : 4;
+	msg->msg[1] = CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST;
+	msg->msg[2] = phys_addr >> 8;
+	msg->msg[3] = phys_addr & 0xff;
+	msg->reply = reply ? CEC_MSG_SET_SYSTEM_AUDIO_MODE : 0;
+
+}
+
+static inline void cec_ops_system_audio_mode_request(const struct cec_msg *msg,
+						     __u16 *phys_addr)
+{
+	if (msg->len < 4)
+		*phys_addr = 0xffff;
+	else
+		*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+static inline void cec_msg_system_audio_mode_status(struct cec_msg *msg,
+						    __u8 sys_aud_status)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_SYSTEM_AUDIO_MODE_STATUS;
+	msg->msg[2] = sys_aud_status;
+}
+
+static inline void cec_ops_system_audio_mode_status(const struct cec_msg *msg,
+						    __u8 *sys_aud_status)
+{
+	*sys_aud_status = msg->msg[2];
+}
+
+static inline void cec_msg_give_system_audio_mode_status(struct cec_msg *msg,
+							 bool reply)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS;
+	msg->reply = reply ? CEC_MSG_SYSTEM_AUDIO_MODE_STATUS : 0;
+}
+
+static inline void cec_msg_report_short_audio_descriptor(struct cec_msg *msg,
+					__u8 num_descriptors,
+					const __u32 *descriptors)
+{
+	unsigned int i;
+
+	if (num_descriptors > 4)
+		num_descriptors = 4;
+	msg->len = 2 + num_descriptors * 3;
+	msg->msg[1] = CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR;
+	for (i = 0; i < num_descriptors; i++) {
+		msg->msg[2 + i * 3] = (descriptors[i] >> 16) & 0xff;
+		msg->msg[3 + i * 3] = (descriptors[i] >> 8) & 0xff;
+		msg->msg[4 + i * 3] = descriptors[i] & 0xff;
+	}
+}
+
+static inline void cec_ops_report_short_audio_descriptor(const struct cec_msg *msg,
+							 __u8 *num_descriptors,
+							 __u32 *descriptors)
+{
+	unsigned int i;
+
+	*num_descriptors = (msg->len - 2) / 3;
+	if (*num_descriptors > 4)
+		*num_descriptors = 4;
+	for (i = 0; i < *num_descriptors; i++)
+		descriptors[i] = (msg->msg[2 + i * 3] << 16) |
+			(msg->msg[3 + i * 3] << 8) |
+			msg->msg[4 + i * 3];
+}
+
+static inline void cec_msg_request_short_audio_descriptor(struct cec_msg *msg,
+					bool reply,
+					__u8 num_descriptors,
+					const __u8 *audio_format_id,
+					const __u8 *audio_format_code)
+{
+	unsigned int i;
+
+	if (num_descriptors > 4)
+		num_descriptors = 4;
+	msg->len = 2 + num_descriptors;
+	msg->msg[1] = CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR;
+	msg->reply = reply ? CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR : 0;
+	for (i = 0; i < num_descriptors; i++)
+		msg->msg[2 + i] = (audio_format_id[i] << 6) |
+				  (audio_format_code[i] & 0x3f);
+}
+
+static inline void cec_ops_request_short_audio_descriptor(const struct cec_msg *msg,
+					__u8 *num_descriptors,
+					__u8 *audio_format_id,
+					__u8 *audio_format_code)
+{
+	unsigned int i;
+
+	*num_descriptors = msg->len - 2;
+	if (*num_descriptors > 4)
+		*num_descriptors = 4;
+	for (i = 0; i < *num_descriptors; i++) {
+		audio_format_id[i] = msg->msg[2 + i] >> 6;
+		audio_format_code[i] = msg->msg[2 + i] & 0x3f;
+	}
+}
+
+
+/* Audio Rate Control Feature */
+static inline void cec_msg_set_audio_rate(struct cec_msg *msg,
+					  __u8 audio_rate)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_SET_AUDIO_RATE;
+	msg->msg[2] = audio_rate;
+}
+
+static inline void cec_ops_set_audio_rate(const struct cec_msg *msg,
+					  __u8 *audio_rate)
+{
+	*audio_rate = msg->msg[2];
+}
+
+
+/* Audio Return Channel Control Feature */
+static inline void cec_msg_report_arc_initiated(struct cec_msg *msg)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_REPORT_ARC_INITIATED;
+}
+
+static inline void cec_msg_initiate_arc(struct cec_msg *msg,
+					bool reply)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_INITIATE_ARC;
+	msg->reply = reply ? CEC_MSG_REPORT_ARC_INITIATED : 0;
+}
+
+static inline void cec_msg_request_arc_initiation(struct cec_msg *msg,
+						  bool reply)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_REQUEST_ARC_INITIATION;
+	msg->reply = reply ? CEC_MSG_INITIATE_ARC : 0;
+}
+
+static inline void cec_msg_report_arc_terminated(struct cec_msg *msg)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_REPORT_ARC_TERMINATED;
+}
+
+static inline void cec_msg_terminate_arc(struct cec_msg *msg,
+					 bool reply)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_TERMINATE_ARC;
+	msg->reply = reply ? CEC_MSG_REPORT_ARC_TERMINATED : 0;
+}
+
+static inline void cec_msg_request_arc_termination(struct cec_msg *msg,
+						   bool reply)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_REQUEST_ARC_TERMINATION;
+	msg->reply = reply ? CEC_MSG_TERMINATE_ARC : 0;
+}
+
+
+/* Dynamic Audio Lipsync Feature */
+/* Only for CEC 2.0 and up */
+static inline void cec_msg_report_current_latency(struct cec_msg *msg,
+						  __u16 phys_addr,
+						  __u8 video_latency,
+						  __u8 low_latency_mode,
+						  __u8 audio_out_compensated,
+						  __u8 audio_out_delay)
+{
+	msg->len = 6;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_REPORT_CURRENT_LATENCY;
+	msg->msg[2] = phys_addr >> 8;
+	msg->msg[3] = phys_addr & 0xff;
+	msg->msg[4] = video_latency;
+	msg->msg[5] = (low_latency_mode << 2) | audio_out_compensated;
+	if (audio_out_compensated == 3)
+		msg->msg[msg->len++] = audio_out_delay;
+}
+
+static inline void cec_ops_report_current_latency(const struct cec_msg *msg,
+						  __u16 *phys_addr,
+						  __u8 *video_latency,
+						  __u8 *low_latency_mode,
+						  __u8 *audio_out_compensated,
+						  __u8 *audio_out_delay)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+	*video_latency = msg->msg[4];
+	*low_latency_mode = (msg->msg[5] >> 2) & 1;
+	*audio_out_compensated = msg->msg[5] & 3;
+	if (*audio_out_compensated == 3 && msg->len >= 7)
+		*audio_out_delay = msg->msg[6];
+	else
+		*audio_out_delay = 0;
+}
+
+static inline void cec_msg_request_current_latency(struct cec_msg *msg,
+						   bool reply,
+						   __u16 phys_addr)
+{
+	msg->len = 4;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_REQUEST_CURRENT_LATENCY;
+	msg->msg[2] = phys_addr >> 8;
+	msg->msg[3] = phys_addr & 0xff;
+	msg->reply = reply ? CEC_MSG_REPORT_CURRENT_LATENCY : 0;
+}
+
+static inline void cec_ops_request_current_latency(const struct cec_msg *msg,
+						   __u16 *phys_addr)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+
+/* Capability Discovery and Control Feature */
+static inline void cec_msg_cdc_hec_inquire_state(struct cec_msg *msg,
+						 __u16 phys_addr1,
+						 __u16 phys_addr2)
+{
+	msg->len = 9;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+	/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+	msg->msg[4] = CEC_MSG_CDC_HEC_INQUIRE_STATE;
+	msg->msg[5] = phys_addr1 >> 8;
+	msg->msg[6] = phys_addr1 & 0xff;
+	msg->msg[7] = phys_addr2 >> 8;
+	msg->msg[8] = phys_addr2 & 0xff;
+}
+
+static inline void cec_ops_cdc_hec_inquire_state(const struct cec_msg *msg,
+						 __u16 *phys_addr,
+						 __u16 *phys_addr1,
+						 __u16 *phys_addr2)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+	*phys_addr1 = (msg->msg[5] << 8) | msg->msg[6];
+	*phys_addr2 = (msg->msg[7] << 8) | msg->msg[8];
+}
+
+static inline void cec_msg_cdc_hec_report_state(struct cec_msg *msg,
+						__u16 target_phys_addr,
+						__u8 hec_func_state,
+						__u8 host_func_state,
+						__u8 enc_func_state,
+						__u8 cdc_errcode,
+						__u8 has_field,
+						__u16 hec_field)
+{
+	msg->len = has_field ? 10 : 8;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+	/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+	msg->msg[4] = CEC_MSG_CDC_HEC_REPORT_STATE;
+	msg->msg[5] = target_phys_addr >> 8;
+	msg->msg[6] = target_phys_addr & 0xff;
+	msg->msg[7] = (hec_func_state << 6) |
+		      (host_func_state << 4) |
+		      (enc_func_state << 2) |
+		      cdc_errcode;
+	if (has_field) {
+		msg->msg[8] = hec_field >> 8;
+		msg->msg[9] = hec_field & 0xff;
+	}
+}
+
+static inline void cec_ops_cdc_hec_report_state(const struct cec_msg *msg,
+						__u16 *phys_addr,
+						__u16 *target_phys_addr,
+						__u8 *hec_func_state,
+						__u8 *host_func_state,
+						__u8 *enc_func_state,
+						__u8 *cdc_errcode,
+						__u8 *has_field,
+						__u16 *hec_field)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+	*target_phys_addr = (msg->msg[5] << 8) | msg->msg[6];
+	*hec_func_state = msg->msg[7] >> 6;
+	*host_func_state = (msg->msg[7] >> 4) & 3;
+	*enc_func_state = (msg->msg[7] >> 4) & 3;
+	*cdc_errcode = msg->msg[7] & 3;
+	*has_field = msg->len >= 10;
+	*hec_field = *has_field ? ((msg->msg[8] << 8) | msg->msg[9]) : 0;
+}
+
+static inline void cec_msg_cdc_hec_set_state(struct cec_msg *msg,
+					     __u16 phys_addr1,
+					     __u16 phys_addr2,
+					     __u8 hec_set_state,
+					     __u16 phys_addr3,
+					     __u16 phys_addr4,
+					     __u16 phys_addr5)
+{
+	msg->len = 10;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+	/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+	msg->msg[4] = CEC_MSG_CDC_HEC_INQUIRE_STATE;
+	msg->msg[5] = phys_addr1 >> 8;
+	msg->msg[6] = phys_addr1 & 0xff;
+	msg->msg[7] = phys_addr2 >> 8;
+	msg->msg[8] = phys_addr2 & 0xff;
+	msg->msg[9] = hec_set_state;
+	if (phys_addr3 != CEC_PHYS_ADDR_INVALID) {
+		msg->msg[msg->len++] = phys_addr3 >> 8;
+		msg->msg[msg->len++] = phys_addr3 & 0xff;
+		if (phys_addr4 != CEC_PHYS_ADDR_INVALID) {
+			msg->msg[msg->len++] = phys_addr4 >> 8;
+			msg->msg[msg->len++] = phys_addr4 & 0xff;
+			if (phys_addr5 != CEC_PHYS_ADDR_INVALID) {
+				msg->msg[msg->len++] = phys_addr5 >> 8;
+				msg->msg[msg->len++] = phys_addr5 & 0xff;
+			}
+		}
+	}
+}
+
+static inline void cec_ops_cdc_hec_set_state(const struct cec_msg *msg,
+					     __u16 *phys_addr,
+					     __u16 *phys_addr1,
+					     __u16 *phys_addr2,
+					     __u8 *hec_set_state,
+					     __u16 *phys_addr3,
+					     __u16 *phys_addr4,
+					     __u16 *phys_addr5)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+	*phys_addr1 = (msg->msg[5] << 8) | msg->msg[6];
+	*phys_addr2 = (msg->msg[7] << 8) | msg->msg[8];
+	*hec_set_state = msg->msg[9];
+	*phys_addr3 = *phys_addr4 = *phys_addr5 = CEC_PHYS_ADDR_INVALID;
+	if (msg->len >= 12)
+		*phys_addr3 = (msg->msg[10] << 8) | msg->msg[11];
+	if (msg->len >= 14)
+		*phys_addr4 = (msg->msg[12] << 8) | msg->msg[13];
+	if (msg->len >= 16)
+		*phys_addr5 = (msg->msg[14] << 8) | msg->msg[15];
+}
+
+static inline void cec_msg_cdc_hec_set_state_adjacent(struct cec_msg *msg,
+						      __u16 phys_addr1,
+						      __u8 hec_set_state)
+{
+	msg->len = 8;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+	/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+	msg->msg[4] = CEC_MSG_CDC_HEC_SET_STATE_ADJACENT;
+	msg->msg[5] = phys_addr1 >> 8;
+	msg->msg[6] = phys_addr1 & 0xff;
+	msg->msg[7] = hec_set_state;
+}
+
+static inline void cec_ops_cdc_hec_set_state_adjacent(const struct cec_msg *msg,
+						      __u16 *phys_addr,
+						      __u16 *phys_addr1,
+						      __u8 *hec_set_state)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+	*phys_addr1 = (msg->msg[5] << 8) | msg->msg[6];
+	*hec_set_state = msg->msg[7];
+}
+
+static inline void cec_msg_cdc_hec_request_deactivation(struct cec_msg *msg,
+							__u16 phys_addr1,
+							__u16 phys_addr2,
+							__u16 phys_addr3)
+{
+	msg->len = 11;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+	/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+	msg->msg[4] = CEC_MSG_CDC_HEC_REQUEST_DEACTIVATION;
+	msg->msg[5] = phys_addr1 >> 8;
+	msg->msg[6] = phys_addr1 & 0xff;
+	msg->msg[7] = phys_addr2 >> 8;
+	msg->msg[8] = phys_addr2 & 0xff;
+	msg->msg[9] = phys_addr3 >> 8;
+	msg->msg[10] = phys_addr3 & 0xff;
+}
+
+static inline void cec_ops_cdc_hec_request_deactivation(const struct cec_msg *msg,
+							__u16 *phys_addr,
+							__u16 *phys_addr1,
+							__u16 *phys_addr2,
+							__u16 *phys_addr3)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+	*phys_addr1 = (msg->msg[5] << 8) | msg->msg[6];
+	*phys_addr2 = (msg->msg[7] << 8) | msg->msg[8];
+	*phys_addr3 = (msg->msg[9] << 8) | msg->msg[10];
+}
+
+static inline void cec_msg_cdc_hec_notify_alive(struct cec_msg *msg)
+{
+	msg->len = 5;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+	/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+	msg->msg[4] = CEC_MSG_CDC_HEC_NOTIFY_ALIVE;
+}
+
+static inline void cec_ops_cdc_hec_notify_alive(const struct cec_msg *msg,
+						__u16 *phys_addr)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+static inline void cec_msg_cdc_hec_discover(struct cec_msg *msg)
+{
+	msg->len = 5;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+	/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+	msg->msg[4] = CEC_MSG_CDC_HEC_DISCOVER;
+}
+
+static inline void cec_ops_cdc_hec_discover(const struct cec_msg *msg,
+					    __u16 *phys_addr)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+static inline void cec_msg_cdc_hpd_set_state(struct cec_msg *msg,
+					     __u8 input_port,
+					     __u8 hpd_state)
+{
+	msg->len = 6;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+	/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+	msg->msg[4] = CEC_MSG_CDC_HPD_SET_STATE;
+	msg->msg[5] = (input_port << 4) | hpd_state;
+}
+
+static inline void cec_ops_cdc_hpd_set_state(const struct cec_msg *msg,
+					    __u16 *phys_addr,
+					    __u8 *input_port,
+					    __u8 *hpd_state)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+	*input_port = msg->msg[5] >> 4;
+	*hpd_state = msg->msg[5] & 0xf;
+}
+
+static inline void cec_msg_cdc_hpd_report_state(struct cec_msg *msg,
+						__u8 hpd_state,
+						__u8 hpd_error)
+{
+	msg->len = 6;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+	/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+	msg->msg[4] = CEC_MSG_CDC_HPD_REPORT_STATE;
+	msg->msg[5] = (hpd_state << 4) | hpd_error;
+}
+
+static inline void cec_ops_cdc_hpd_report_state(const struct cec_msg *msg,
+						__u16 *phys_addr,
+						__u8 *hpd_state,
+						__u8 *hpd_error)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+	*hpd_state = msg->msg[5] >> 4;
+	*hpd_error = msg->msg[5] & 0xf;
+}
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/cec.h	2019-10-29 09:26:25.541221752 +0100
@@ -0,0 +1,1070 @@
+/*
+ * cec - HDMI Consumer Electronics Control public header
+ *
+ * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * Alternatively you can redistribute this file under the terms of the
+ * BSD license as stated below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ * 3. The names of its contributors may not be used to endorse or promote
+ *    products derived from this software without specific prior written
+ *    permission.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _CEC_UAPI_H
+#define _CEC_UAPI_H
+
+#include <linux/types.h>
+
+#define CEC_MAX_MSG_SIZE	16
+
+/**
+ * struct cec_msg - CEC message structure.
+ * @tx_ts:	Timestamp in nanoseconds using CLOCK_MONOTONIC. Set by the
+ *		driver when the message transmission has finished.
+ * @rx_ts:	Timestamp in nanoseconds using CLOCK_MONOTONIC. Set by the
+ *		driver when the message was received.
+ * @len:	Length in bytes of the message.
+ * @timeout:	The timeout (in ms) that is used to timeout CEC_RECEIVE.
+ *		Set to 0 if you want to wait forever. This timeout can also be
+ *		used with CEC_TRANSMIT as the timeout for waiting for a reply.
+ *		If 0, then it will use a 1 second timeout instead of waiting
+ *		forever as is done with CEC_RECEIVE.
+ * @sequence:	The framework assigns a sequence number to messages that are
+ *		sent. This can be used to track replies to previously sent
+ *		messages.
+ * @flags:	Set to 0.
+ * @msg:	The message payload.
+ * @reply:	This field is ignored with CEC_RECEIVE and is only used by
+ *		CEC_TRANSMIT. If non-zero, then wait for a reply with this
+ *		opcode. Set to CEC_MSG_FEATURE_ABORT if you want to wait for
+ *		a possible ABORT reply. If there was an error when sending the
+ *		msg or FeatureAbort was returned, then reply is set to 0.
+ *		If reply is non-zero upon return, then len/msg are set to
+ *		the received message.
+ *		If reply is zero upon return and status has the
+ *		CEC_TX_STATUS_FEATURE_ABORT bit set, then len/msg are set to
+ *		the received feature abort message.
+ *		If reply is zero upon return and status has the
+ *		CEC_TX_STATUS_MAX_RETRIES bit set, then no reply was seen at
+ *		all. If reply is non-zero for CEC_TRANSMIT and the message is a
+ *		broadcast, then -EINVAL is returned.
+ *		if reply is non-zero, then timeout is set to 1000 (the required
+ *		maximum response time).
+ * @rx_status:	The message receive status bits. Set by the driver.
+ * @tx_status:	The message transmit status bits. Set by the driver.
+ * @tx_arb_lost_cnt: The number of 'Arbitration Lost' events. Set by the driver.
+ * @tx_nack_cnt: The number of 'Not Acknowledged' events. Set by the driver.
+ * @tx_low_drive_cnt: The number of 'Low Drive Detected' events. Set by the
+ *		driver.
+ * @tx_error_cnt: The number of 'Error' events. Set by the driver.
+ */
+struct cec_msg {
+	__u64 tx_ts;
+	__u64 rx_ts;
+	__u32 len;
+	__u32 timeout;
+	__u32 sequence;
+	__u32 flags;
+	__u8 msg[CEC_MAX_MSG_SIZE];
+	__u8 reply;
+	__u8 rx_status;
+	__u8 tx_status;
+	__u8 tx_arb_lost_cnt;
+	__u8 tx_nack_cnt;
+	__u8 tx_low_drive_cnt;
+	__u8 tx_error_cnt;
+};
+
+/**
+ * cec_msg_initiator - return the initiator's logical address.
+ * @msg:	the message structure
+ */
+static inline __u8 cec_msg_initiator(const struct cec_msg *msg)
+{
+	return msg->msg[0] >> 4;
+}
+
+/**
+ * cec_msg_destination - return the destination's logical address.
+ * @msg:	the message structure
+ */
+static inline __u8 cec_msg_destination(const struct cec_msg *msg)
+{
+	return msg->msg[0] & 0xf;
+}
+
+/**
+ * cec_msg_opcode - return the opcode of the message, -1 for poll
+ * @msg:	the message structure
+ */
+static inline int cec_msg_opcode(const struct cec_msg *msg)
+{
+	return msg->len > 1 ? msg->msg[1] : -1;
+}
+
+/**
+ * cec_msg_is_broadcast - return true if this is a broadcast message.
+ * @msg:	the message structure
+ */
+static inline bool cec_msg_is_broadcast(const struct cec_msg *msg)
+{
+	return (msg->msg[0] & 0xf) == 0xf;
+}
+
+/**
+ * cec_msg_init - initialize the message structure.
+ * @msg:	the message structure
+ * @initiator:	the logical address of the initiator
+ * @destination:the logical address of the destination (0xf for broadcast)
+ *
+ * The whole structure is zeroed, the len field is set to 1 (i.e. a poll
+ * message) and the initiator and destination are filled in.
+ */
+static inline void cec_msg_init(struct cec_msg *msg,
+				__u8 initiator, __u8 destination)
+{
+	memset(msg, 0, sizeof(*msg));
+	msg->msg[0] = (initiator << 4) | destination;
+	msg->len = 1;
+}
+
+/**
+ * cec_msg_set_reply_to - fill in destination/initiator in a reply message.
+ * @msg:	the message structure for the reply
+ * @orig:	the original message structure
+ *
+ * Set the msg destination to the orig initiator and the msg initiator to the
+ * orig destination. Note that msg and orig may be the same pointer, in which
+ * case the change is done in place.
+ */
+static inline void cec_msg_set_reply_to(struct cec_msg *msg,
+					struct cec_msg *orig)
+{
+	/* The destination becomes the initiator and vice versa */
+	msg->msg[0] = (cec_msg_destination(orig) << 4) |
+		      cec_msg_initiator(orig);
+	msg->reply = msg->timeout = 0;
+}
+
+/* cec_msg flags field */
+#define CEC_MSG_FL_REPLY_TO_FOLLOWERS	(1 << 0)
+
+/* cec_msg tx/rx_status field */
+#define CEC_TX_STATUS_OK		(1 << 0)
+#define CEC_TX_STATUS_ARB_LOST		(1 << 1)
+#define CEC_TX_STATUS_NACK		(1 << 2)
+#define CEC_TX_STATUS_LOW_DRIVE		(1 << 3)
+#define CEC_TX_STATUS_ERROR		(1 << 4)
+#define CEC_TX_STATUS_MAX_RETRIES	(1 << 5)
+#define CEC_TX_STATUS_ABORTED		(1 << 6)
+#define CEC_TX_STATUS_TIMEOUT		(1 << 7)
+
+#define CEC_RX_STATUS_OK		(1 << 0)
+#define CEC_RX_STATUS_TIMEOUT		(1 << 1)
+#define CEC_RX_STATUS_FEATURE_ABORT	(1 << 2)
+#define CEC_RX_STATUS_ABORTED		(1 << 3)
+
+static inline bool cec_msg_status_is_ok(const struct cec_msg *msg)
+{
+	if (msg->tx_status && !(msg->tx_status & CEC_TX_STATUS_OK))
+		return false;
+	if (msg->rx_status && !(msg->rx_status & CEC_RX_STATUS_OK))
+		return false;
+	if (!msg->tx_status && !msg->rx_status)
+		return false;
+	return !(msg->rx_status & CEC_RX_STATUS_FEATURE_ABORT);
+}
+
+#define CEC_LOG_ADDR_INVALID		0xff
+#define CEC_PHYS_ADDR_INVALID		0xffff
+
+/*
+ * The maximum number of logical addresses one device can be assigned to.
+ * The CEC 2.0 spec allows for only 2 logical addresses at the moment. The
+ * Analog Devices CEC hardware supports 3. So let's go wild and go for 4.
+ */
+#define CEC_MAX_LOG_ADDRS 4
+
+/* The logical addresses defined by CEC 2.0 */
+#define CEC_LOG_ADDR_TV			0
+#define CEC_LOG_ADDR_RECORD_1		1
+#define CEC_LOG_ADDR_RECORD_2		2
+#define CEC_LOG_ADDR_TUNER_1		3
+#define CEC_LOG_ADDR_PLAYBACK_1		4
+#define CEC_LOG_ADDR_AUDIOSYSTEM	5
+#define CEC_LOG_ADDR_TUNER_2		6
+#define CEC_LOG_ADDR_TUNER_3		7
+#define CEC_LOG_ADDR_PLAYBACK_2		8
+#define CEC_LOG_ADDR_RECORD_3		9
+#define CEC_LOG_ADDR_TUNER_4		10
+#define CEC_LOG_ADDR_PLAYBACK_3		11
+#define CEC_LOG_ADDR_BACKUP_1		12
+#define CEC_LOG_ADDR_BACKUP_2		13
+#define CEC_LOG_ADDR_SPECIFIC		14
+#define CEC_LOG_ADDR_UNREGISTERED	15 /* as initiator address */
+#define CEC_LOG_ADDR_BROADCAST		15 /* ad destination address */
+
+/* The logical address types that the CEC device wants to claim */
+#define CEC_LOG_ADDR_TYPE_TV		0
+#define CEC_LOG_ADDR_TYPE_RECORD	1
+#define CEC_LOG_ADDR_TYPE_TUNER		2
+#define CEC_LOG_ADDR_TYPE_PLAYBACK	3
+#define CEC_LOG_ADDR_TYPE_AUDIOSYSTEM	4
+#define CEC_LOG_ADDR_TYPE_SPECIFIC	5
+#define CEC_LOG_ADDR_TYPE_UNREGISTERED	6
+/*
+ * Switches should use UNREGISTERED.
+ * Processors should use SPECIFIC.
+ */
+
+#define CEC_LOG_ADDR_MASK_TV		(1 << CEC_LOG_ADDR_TV)
+#define CEC_LOG_ADDR_MASK_RECORD	((1 << CEC_LOG_ADDR_RECORD_1) | \
+					 (1 << CEC_LOG_ADDR_RECORD_2) | \
+					 (1 << CEC_LOG_ADDR_RECORD_3))
+#define CEC_LOG_ADDR_MASK_TUNER		((1 << CEC_LOG_ADDR_TUNER_1) | \
+					 (1 << CEC_LOG_ADDR_TUNER_2) | \
+					 (1 << CEC_LOG_ADDR_TUNER_3) | \
+					 (1 << CEC_LOG_ADDR_TUNER_4))
+#define CEC_LOG_ADDR_MASK_PLAYBACK	((1 << CEC_LOG_ADDR_PLAYBACK_1) | \
+					 (1 << CEC_LOG_ADDR_PLAYBACK_2) | \
+					 (1 << CEC_LOG_ADDR_PLAYBACK_3))
+#define CEC_LOG_ADDR_MASK_AUDIOSYSTEM	(1 << CEC_LOG_ADDR_AUDIOSYSTEM)
+#define CEC_LOG_ADDR_MASK_BACKUP	((1 << CEC_LOG_ADDR_BACKUP_1) | \
+					 (1 << CEC_LOG_ADDR_BACKUP_2))
+#define CEC_LOG_ADDR_MASK_SPECIFIC	(1 << CEC_LOG_ADDR_SPECIFIC)
+#define CEC_LOG_ADDR_MASK_UNREGISTERED	(1 << CEC_LOG_ADDR_UNREGISTERED)
+
+static inline bool cec_has_tv(__u16 log_addr_mask)
+{
+	return log_addr_mask & CEC_LOG_ADDR_MASK_TV;
+}
+
+static inline bool cec_has_record(__u16 log_addr_mask)
+{
+	return log_addr_mask & CEC_LOG_ADDR_MASK_RECORD;
+}
+
+static inline bool cec_has_tuner(__u16 log_addr_mask)
+{
+	return log_addr_mask & CEC_LOG_ADDR_MASK_TUNER;
+}
+
+static inline bool cec_has_playback(__u16 log_addr_mask)
+{
+	return log_addr_mask & CEC_LOG_ADDR_MASK_PLAYBACK;
+}
+
+static inline bool cec_has_audiosystem(__u16 log_addr_mask)
+{
+	return log_addr_mask & CEC_LOG_ADDR_MASK_AUDIOSYSTEM;
+}
+
+static inline bool cec_has_backup(__u16 log_addr_mask)
+{
+	return log_addr_mask & CEC_LOG_ADDR_MASK_BACKUP;
+}
+
+static inline bool cec_has_specific(__u16 log_addr_mask)
+{
+	return log_addr_mask & CEC_LOG_ADDR_MASK_SPECIFIC;
+}
+
+static inline bool cec_is_unregistered(__u16 log_addr_mask)
+{
+	return log_addr_mask & CEC_LOG_ADDR_MASK_UNREGISTERED;
+}
+
+static inline bool cec_is_unconfigured(__u16 log_addr_mask)
+{
+	return log_addr_mask == 0;
+}
+
+/*
+ * Use this if there is no vendor ID (CEC_G_VENDOR_ID) or if the vendor ID
+ * should be disabled (CEC_S_VENDOR_ID)
+ */
+#define CEC_VENDOR_ID_NONE		0xffffffff
+
+/* The message handling modes */
+/* Modes for initiator */
+#define CEC_MODE_NO_INITIATOR		(0x0 << 0)
+#define CEC_MODE_INITIATOR		(0x1 << 0)
+#define CEC_MODE_EXCL_INITIATOR		(0x2 << 0)
+#define CEC_MODE_INITIATOR_MSK		0x0f
+
+/* Modes for follower */
+#define CEC_MODE_NO_FOLLOWER		(0x0 << 4)
+#define CEC_MODE_FOLLOWER		(0x1 << 4)
+#define CEC_MODE_EXCL_FOLLOWER		(0x2 << 4)
+#define CEC_MODE_EXCL_FOLLOWER_PASSTHRU	(0x3 << 4)
+#define CEC_MODE_MONITOR		(0xe << 4)
+#define CEC_MODE_MONITOR_ALL		(0xf << 4)
+#define CEC_MODE_FOLLOWER_MSK		0xf0
+
+/* Userspace has to configure the physical address */
+#define CEC_CAP_PHYS_ADDR	(1 << 0)
+/* Userspace has to configure the logical addresses */
+#define CEC_CAP_LOG_ADDRS	(1 << 1)
+/* Userspace can transmit messages (and thus become follower as well) */
+#define CEC_CAP_TRANSMIT	(1 << 2)
+/*
+ * Passthrough all messages instead of processing them.
+ */
+#define CEC_CAP_PASSTHROUGH	(1 << 3)
+/* Supports remote control */
+#define CEC_CAP_RC		(1 << 4)
+/* Hardware can monitor all messages, not just directed and broadcast. */
+#define CEC_CAP_MONITOR_ALL	(1 << 5)
+/* Hardware can use CEC only if the HDMI HPD pin is high. */
+#define CEC_CAP_NEEDS_HPD	(1 << 6)
+
+/**
+ * struct cec_caps - CEC capabilities structure.
+ * @driver: name of the CEC device driver.
+ * @name: name of the CEC device. @driver + @name must be unique.
+ * @available_log_addrs: number of available logical addresses.
+ * @capabilities: capabilities of the CEC adapter.
+ * @version: version of the CEC adapter framework.
+ */
+struct cec_caps {
+	char driver[32];
+	char name[32];
+	__u32 available_log_addrs;
+	__u32 capabilities;
+	__u32 version;
+};
+
+/**
+ * struct cec_log_addrs - CEC logical addresses structure.
+ * @log_addr: the claimed logical addresses. Set by the driver.
+ * @log_addr_mask: current logical address mask. Set by the driver.
+ * @cec_version: the CEC version that the adapter should implement. Set by the
+ *	caller.
+ * @num_log_addrs: how many logical addresses should be claimed. Set by the
+ *	caller.
+ * @vendor_id: the vendor ID of the device. Set by the caller.
+ * @flags: flags.
+ * @osd_name: the OSD name of the device. Set by the caller.
+ * @primary_device_type: the primary device type for each logical address.
+ *	Set by the caller.
+ * @log_addr_type: the logical address types. Set by the caller.
+ * @all_device_types: CEC 2.0: all device types represented by the logical
+ *	address. Set by the caller.
+ * @features:	CEC 2.0: The logical address features. Set by the caller.
+ */
+struct cec_log_addrs {
+	__u8 log_addr[CEC_MAX_LOG_ADDRS];
+	__u16 log_addr_mask;
+	__u8 cec_version;
+	__u8 num_log_addrs;
+	__u32 vendor_id;
+	__u32 flags;
+	char osd_name[15];
+	__u8 primary_device_type[CEC_MAX_LOG_ADDRS];
+	__u8 log_addr_type[CEC_MAX_LOG_ADDRS];
+
+	/* CEC 2.0 */
+	__u8 all_device_types[CEC_MAX_LOG_ADDRS];
+	__u8 features[CEC_MAX_LOG_ADDRS][12];
+};
+
+/* Allow a fallback to unregistered */
+#define CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK	(1 << 0)
+/* Passthrough RC messages to the input subsystem */
+#define CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU	(1 << 1)
+/* CDC-Only device: supports only CDC messages */
+#define CEC_LOG_ADDRS_FL_CDC_ONLY		(1 << 2)
+
+/* Events */
+
+/* Event that occurs when the adapter state changes */
+#define CEC_EVENT_STATE_CHANGE		1
+/*
+ * This event is sent when messages are lost because the application
+ * didn't empty the message queue in time
+ */
+#define CEC_EVENT_LOST_MSGS		2
+
+#define CEC_EVENT_FL_INITIAL_STATE	(1 << 0)
+
+/**
+ * struct cec_event_state_change - used when the CEC adapter changes state.
+ * @phys_addr: the current physical address
+ * @log_addr_mask: the current logical address mask
+ */
+struct cec_event_state_change {
+	__u16 phys_addr;
+	__u16 log_addr_mask;
+};
+
+/**
+ * struct cec_event_lost_msgs - tells you how many messages were lost due.
+ * @lost_msgs: how many messages were lost.
+ */
+struct cec_event_lost_msgs {
+	__u32 lost_msgs;
+};
+
+/**
+ * struct cec_event - CEC event structure
+ * @ts: the timestamp of when the event was sent.
+ * @event: the event.
+ * array.
+ * @state_change: the event payload for CEC_EVENT_STATE_CHANGE.
+ * @lost_msgs: the event payload for CEC_EVENT_LOST_MSGS.
+ * @raw: array to pad the union.
+ */
+struct cec_event {
+	__u64 ts;
+	__u32 event;
+	__u32 flags;
+	union {
+		struct cec_event_state_change state_change;
+		struct cec_event_lost_msgs lost_msgs;
+		__u32 raw[16];
+	};
+};
+
+/* ioctls */
+
+/* Adapter capabilities */
+#define CEC_ADAP_G_CAPS		_IOWR('a',  0, struct cec_caps)
+
+/*
+ * phys_addr is either 0 (if this is the CEC root device)
+ * or a valid physical address obtained from the sink's EDID
+ * as read by this CEC device (if this is a source device)
+ * or a physical address obtained and modified from a sink
+ * EDID and used for a sink CEC device.
+ * If nothing is connected, then phys_addr is 0xffff.
+ * See HDMI 1.4b, section 8.7 (Physical Address).
+ *
+ * The CEC_ADAP_S_PHYS_ADDR ioctl may not be available if that is handled
+ * internally.
+ */
+#define CEC_ADAP_G_PHYS_ADDR	_IOR('a',  1, __u16)
+#define CEC_ADAP_S_PHYS_ADDR	_IOW('a',  2, __u16)
+
+/*
+ * Configure the CEC adapter. It sets the device type and which
+ * logical types it will try to claim. It will return which
+ * logical addresses it could actually claim.
+ * An error is returned if the adapter is disabled or if there
+ * is no physical address assigned.
+ */
+
+#define CEC_ADAP_G_LOG_ADDRS	_IOR('a',  3, struct cec_log_addrs)
+#define CEC_ADAP_S_LOG_ADDRS	_IOWR('a',  4, struct cec_log_addrs)
+
+/* Transmit/receive a CEC command */
+#define CEC_TRANSMIT		_IOWR('a',  5, struct cec_msg)
+#define CEC_RECEIVE		_IOWR('a',  6, struct cec_msg)
+
+/* Dequeue CEC events */
+#define CEC_DQEVENT		_IOWR('a',  7, struct cec_event)
+
+/*
+ * Get and set the message handling mode for this filehandle.
+ */
+#define CEC_G_MODE		_IOR('a',  8, __u32)
+#define CEC_S_MODE		_IOW('a',  9, __u32)
+
+/*
+ * The remainder of this header defines all CEC messages and operands.
+ * The format matters since it the cec-ctl utility parses it to generate
+ * code for implementing all these messages.
+ *
+ * Comments ending with 'Feature' group messages for each feature.
+ * If messages are part of multiple features, then the "Has also"
+ * comment is used to list the previously defined messages that are
+ * supported by the feature.
+ *
+ * Before operands are defined a comment is added that gives the
+ * name of the operand and in brackets the variable name of the
+ * corresponding argument in the cec-funcs.h function.
+ */
+
+/* Messages */
+
+/* One Touch Play Feature */
+#define CEC_MSG_ACTIVE_SOURCE				0x82
+#define CEC_MSG_IMAGE_VIEW_ON				0x04
+#define CEC_MSG_TEXT_VIEW_ON				0x0d
+
+
+/* Routing Control Feature */
+
+/*
+ * Has also:
+ *	CEC_MSG_ACTIVE_SOURCE
+ */
+
+#define CEC_MSG_INACTIVE_SOURCE				0x9d
+#define CEC_MSG_REQUEST_ACTIVE_SOURCE			0x85
+#define CEC_MSG_ROUTING_CHANGE				0x80
+#define CEC_MSG_ROUTING_INFORMATION			0x81
+#define CEC_MSG_SET_STREAM_PATH				0x86
+
+
+/* Standby Feature */
+#define CEC_MSG_STANDBY					0x36
+
+
+/* One Touch Record Feature */
+#define CEC_MSG_RECORD_OFF				0x0b
+#define CEC_MSG_RECORD_ON				0x09
+/* Record Source Type Operand (rec_src_type) */
+#define CEC_OP_RECORD_SRC_OWN				1
+#define CEC_OP_RECORD_SRC_DIGITAL			2
+#define CEC_OP_RECORD_SRC_ANALOG			3
+#define CEC_OP_RECORD_SRC_EXT_PLUG			4
+#define CEC_OP_RECORD_SRC_EXT_PHYS_ADDR			5
+/* Service Identification Method Operand (service_id_method) */
+#define CEC_OP_SERVICE_ID_METHOD_BY_DIG_ID		0
+#define CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL		1
+/* Digital Service Broadcast System Operand (dig_bcast_system) */
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_GEN	0x00
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_GEN	0x01
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_GEN		0x02
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_BS		0x08
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_CS		0x09
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_T		0x0a
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_CABLE	0x10
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_SAT	0x11
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_T		0x12
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_C		0x18
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_S		0x19
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_S2		0x1a
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_T		0x1b
+/* Analogue Broadcast Type Operand (ana_bcast_type) */
+#define CEC_OP_ANA_BCAST_TYPE_CABLE			0
+#define CEC_OP_ANA_BCAST_TYPE_SATELLITE			1
+#define CEC_OP_ANA_BCAST_TYPE_TERRESTRIAL		2
+/* Broadcast System Operand (bcast_system) */
+#define CEC_OP_BCAST_SYSTEM_PAL_BG			0x00
+#define CEC_OP_BCAST_SYSTEM_SECAM_LQ			0x01 /* SECAM L' */
+#define CEC_OP_BCAST_SYSTEM_PAL_M			0x02
+#define CEC_OP_BCAST_SYSTEM_NTSC_M			0x03
+#define CEC_OP_BCAST_SYSTEM_PAL_I			0x04
+#define CEC_OP_BCAST_SYSTEM_SECAM_DK			0x05
+#define CEC_OP_BCAST_SYSTEM_SECAM_BG			0x06
+#define CEC_OP_BCAST_SYSTEM_SECAM_L			0x07
+#define CEC_OP_BCAST_SYSTEM_PAL_DK			0x08
+#define CEC_OP_BCAST_SYSTEM_OTHER			0x1f
+/* Channel Number Format Operand (channel_number_fmt) */
+#define CEC_OP_CHANNEL_NUMBER_FMT_1_PART		0x01
+#define CEC_OP_CHANNEL_NUMBER_FMT_2_PART		0x02
+
+#define CEC_MSG_RECORD_STATUS				0x0a
+/* Record Status Operand (rec_status) */
+#define CEC_OP_RECORD_STATUS_CUR_SRC			0x01
+#define CEC_OP_RECORD_STATUS_DIG_SERVICE		0x02
+#define CEC_OP_RECORD_STATUS_ANA_SERVICE		0x03
+#define CEC_OP_RECORD_STATUS_EXT_INPUT			0x04
+#define CEC_OP_RECORD_STATUS_NO_DIG_SERVICE		0x05
+#define CEC_OP_RECORD_STATUS_NO_ANA_SERVICE		0x06
+#define CEC_OP_RECORD_STATUS_NO_SERVICE			0x07
+#define CEC_OP_RECORD_STATUS_INVALID_EXT_PLUG		0x09
+#define CEC_OP_RECORD_STATUS_INVALID_EXT_PHYS_ADDR	0x0a
+#define CEC_OP_RECORD_STATUS_UNSUP_CA			0x0b
+#define CEC_OP_RECORD_STATUS_NO_CA_ENTITLEMENTS		0x0c
+#define CEC_OP_RECORD_STATUS_CANT_COPY_SRC		0x0d
+#define CEC_OP_RECORD_STATUS_NO_MORE_COPIES		0x0e
+#define CEC_OP_RECORD_STATUS_NO_MEDIA			0x10
+#define CEC_OP_RECORD_STATUS_PLAYING			0x11
+#define CEC_OP_RECORD_STATUS_ALREADY_RECORDING		0x12
+#define CEC_OP_RECORD_STATUS_MEDIA_PROT			0x13
+#define CEC_OP_RECORD_STATUS_NO_SIGNAL			0x14
+#define CEC_OP_RECORD_STATUS_MEDIA_PROBLEM		0x15
+#define CEC_OP_RECORD_STATUS_NO_SPACE			0x16
+#define CEC_OP_RECORD_STATUS_PARENTAL_LOCK		0x17
+#define CEC_OP_RECORD_STATUS_TERMINATED_OK		0x1a
+#define CEC_OP_RECORD_STATUS_ALREADY_TERM		0x1b
+#define CEC_OP_RECORD_STATUS_OTHER			0x1f
+
+#define CEC_MSG_RECORD_TV_SCREEN			0x0f
+
+
+/* Timer Programming Feature */
+#define CEC_MSG_CLEAR_ANALOGUE_TIMER			0x33
+/* Recording Sequence Operand (recording_seq) */
+#define CEC_OP_REC_SEQ_SUNDAY				0x01
+#define CEC_OP_REC_SEQ_MONDAY				0x02
+#define CEC_OP_REC_SEQ_TUESDAY				0x04
+#define CEC_OP_REC_SEQ_WEDNESDAY			0x08
+#define CEC_OP_REC_SEQ_THURSDAY				0x10
+#define CEC_OP_REC_SEQ_FRIDAY				0x20
+#define CEC_OP_REC_SEQ_SATERDAY				0x40
+#define CEC_OP_REC_SEQ_ONCE_ONLY			0x00
+
+#define CEC_MSG_CLEAR_DIGITAL_TIMER			0x99
+
+#define CEC_MSG_CLEAR_EXT_TIMER				0xa1
+/* External Source Specifier Operand (ext_src_spec) */
+#define CEC_OP_EXT_SRC_PLUG				0x04
+#define CEC_OP_EXT_SRC_PHYS_ADDR			0x05
+
+#define CEC_MSG_SET_ANALOGUE_TIMER			0x34
+#define CEC_MSG_SET_DIGITAL_TIMER			0x97
+#define CEC_MSG_SET_EXT_TIMER				0xa2
+
+#define CEC_MSG_SET_TIMER_PROGRAM_TITLE			0x67
+#define CEC_MSG_TIMER_CLEARED_STATUS			0x43
+/* Timer Cleared Status Data Operand (timer_cleared_status) */
+#define CEC_OP_TIMER_CLR_STAT_RECORDING			0x00
+#define CEC_OP_TIMER_CLR_STAT_NO_MATCHING		0x01
+#define CEC_OP_TIMER_CLR_STAT_NO_INFO			0x02
+#define CEC_OP_TIMER_CLR_STAT_CLEARED			0x80
+
+#define CEC_MSG_TIMER_STATUS				0x35
+/* Timer Overlap Warning Operand (timer_overlap_warning) */
+#define CEC_OP_TIMER_OVERLAP_WARNING_NO_OVERLAP		0
+#define CEC_OP_TIMER_OVERLAP_WARNING_OVERLAP		1
+/* Media Info Operand (media_info) */
+#define CEC_OP_MEDIA_INFO_UNPROT_MEDIA			0
+#define CEC_OP_MEDIA_INFO_PROT_MEDIA			1
+#define CEC_OP_MEDIA_INFO_NO_MEDIA			2
+/* Programmed Indicator Operand (prog_indicator) */
+#define CEC_OP_PROG_IND_NOT_PROGRAMMED			0
+#define CEC_OP_PROG_IND_PROGRAMMED			1
+/* Programmed Info Operand (prog_info) */
+#define CEC_OP_PROG_INFO_ENOUGH_SPACE			0x08
+#define CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE		0x09
+#define CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE	0x0b
+#define CEC_OP_PROG_INFO_NONE_AVAILABLE			0x0a
+/* Not Programmed Error Info Operand (prog_error) */
+#define CEC_OP_PROG_ERROR_NO_FREE_TIMER			0x01
+#define CEC_OP_PROG_ERROR_DATE_OUT_OF_RANGE		0x02
+#define CEC_OP_PROG_ERROR_REC_SEQ_ERROR			0x03
+#define CEC_OP_PROG_ERROR_INV_EXT_PLUG			0x04
+#define CEC_OP_PROG_ERROR_INV_EXT_PHYS_ADDR		0x05
+#define CEC_OP_PROG_ERROR_CA_UNSUPP			0x06
+#define CEC_OP_PROG_ERROR_INSUF_CA_ENTITLEMENTS		0x07
+#define CEC_OP_PROG_ERROR_RESOLUTION_UNSUPP		0x08
+#define CEC_OP_PROG_ERROR_PARENTAL_LOCK			0x09
+#define CEC_OP_PROG_ERROR_CLOCK_FAILURE			0x0a
+#define CEC_OP_PROG_ERROR_DUPLICATE			0x0e
+
+
+/* System Information Feature */
+#define CEC_MSG_CEC_VERSION				0x9e
+/* CEC Version Operand (cec_version) */
+#define CEC_OP_CEC_VERSION_1_3A				4
+#define CEC_OP_CEC_VERSION_1_4				5
+#define CEC_OP_CEC_VERSION_2_0				6
+
+#define CEC_MSG_GET_CEC_VERSION				0x9f
+#define CEC_MSG_GIVE_PHYSICAL_ADDR			0x83
+#define CEC_MSG_GET_MENU_LANGUAGE			0x91
+#define CEC_MSG_REPORT_PHYSICAL_ADDR			0x84
+/* Primary Device Type Operand (prim_devtype) */
+#define CEC_OP_PRIM_DEVTYPE_TV				0
+#define CEC_OP_PRIM_DEVTYPE_RECORD			1
+#define CEC_OP_PRIM_DEVTYPE_TUNER			3
+#define CEC_OP_PRIM_DEVTYPE_PLAYBACK			4
+#define CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM			5
+#define CEC_OP_PRIM_DEVTYPE_SWITCH			6
+#define CEC_OP_PRIM_DEVTYPE_PROCESSOR			7
+
+#define CEC_MSG_SET_MENU_LANGUAGE			0x32
+#define CEC_MSG_REPORT_FEATURES				0xa6	/* HDMI 2.0 */
+/* All Device Types Operand (all_device_types) */
+#define CEC_OP_ALL_DEVTYPE_TV				0x80
+#define CEC_OP_ALL_DEVTYPE_RECORD			0x40
+#define CEC_OP_ALL_DEVTYPE_TUNER			0x20
+#define CEC_OP_ALL_DEVTYPE_PLAYBACK			0x10
+#define CEC_OP_ALL_DEVTYPE_AUDIOSYSTEM			0x08
+#define CEC_OP_ALL_DEVTYPE_SWITCH			0x04
+/*
+ * And if you wondering what happened to PROCESSOR devices: those should
+ * be mapped to a SWITCH.
+ */
+
+/* Valid for RC Profile and Device Feature operands */
+#define CEC_OP_FEAT_EXT					0x80	/* Extension bit */
+/* RC Profile Operand (rc_profile) */
+#define CEC_OP_FEAT_RC_TV_PROFILE_NONE			0x00
+#define CEC_OP_FEAT_RC_TV_PROFILE_1			0x02
+#define CEC_OP_FEAT_RC_TV_PROFILE_2			0x06
+#define CEC_OP_FEAT_RC_TV_PROFILE_3			0x0a
+#define CEC_OP_FEAT_RC_TV_PROFILE_4			0x0e
+#define CEC_OP_FEAT_RC_SRC_HAS_DEV_ROOT_MENU		0x50
+#define CEC_OP_FEAT_RC_SRC_HAS_DEV_SETUP_MENU		0x48
+#define CEC_OP_FEAT_RC_SRC_HAS_CONTENTS_MENU		0x44
+#define CEC_OP_FEAT_RC_SRC_HAS_MEDIA_TOP_MENU		0x42
+#define CEC_OP_FEAT_RC_SRC_HAS_MEDIA_CONTEXT_MENU	0x41
+/* Device Feature Operand (dev_features) */
+#define CEC_OP_FEAT_DEV_HAS_RECORD_TV_SCREEN		0x40
+#define CEC_OP_FEAT_DEV_HAS_SET_OSD_STRING		0x20
+#define CEC_OP_FEAT_DEV_HAS_DECK_CONTROL		0x10
+#define CEC_OP_FEAT_DEV_HAS_SET_AUDIO_RATE		0x08
+#define CEC_OP_FEAT_DEV_SINK_HAS_ARC_TX			0x04
+#define CEC_OP_FEAT_DEV_SOURCE_HAS_ARC_RX		0x02
+
+#define CEC_MSG_GIVE_FEATURES				0xa5	/* HDMI 2.0 */
+
+
+/* Deck Control Feature */
+#define CEC_MSG_DECK_CONTROL				0x42
+/* Deck Control Mode Operand (deck_control_mode) */
+#define CEC_OP_DECK_CTL_MODE_SKIP_FWD			1
+#define CEC_OP_DECK_CTL_MODE_SKIP_REV			2
+#define CEC_OP_DECK_CTL_MODE_STOP			3
+#define CEC_OP_DECK_CTL_MODE_EJECT			4
+
+#define CEC_MSG_DECK_STATUS				0x1b
+/* Deck Info Operand (deck_info) */
+#define CEC_OP_DECK_INFO_PLAY				0x11
+#define CEC_OP_DECK_INFO_RECORD				0x12
+#define CEC_OP_DECK_INFO_PLAY_REV			0x13
+#define CEC_OP_DECK_INFO_STILL				0x14
+#define CEC_OP_DECK_INFO_SLOW				0x15
+#define CEC_OP_DECK_INFO_SLOW_REV			0x16
+#define CEC_OP_DECK_INFO_FAST_FWD			0x17
+#define CEC_OP_DECK_INFO_FAST_REV			0x18
+#define CEC_OP_DECK_INFO_NO_MEDIA			0x19
+#define CEC_OP_DECK_INFO_STOP				0x1a
+#define CEC_OP_DECK_INFO_SKIP_FWD			0x1b
+#define CEC_OP_DECK_INFO_SKIP_REV			0x1c
+#define CEC_OP_DECK_INFO_INDEX_SEARCH_FWD		0x1d
+#define CEC_OP_DECK_INFO_INDEX_SEARCH_REV		0x1e
+#define CEC_OP_DECK_INFO_OTHER				0x1f
+
+#define CEC_MSG_GIVE_DECK_STATUS			0x1a
+/* Status Request Operand (status_req) */
+#define CEC_OP_STATUS_REQ_ON				1
+#define CEC_OP_STATUS_REQ_OFF				2
+#define CEC_OP_STATUS_REQ_ONCE				3
+
+#define CEC_MSG_PLAY					0x41
+/* Play Mode Operand (play_mode) */
+#define CEC_OP_PLAY_MODE_PLAY_FWD			0x24
+#define CEC_OP_PLAY_MODE_PLAY_REV			0x20
+#define CEC_OP_PLAY_MODE_PLAY_STILL			0x25
+#define CEC_OP_PLAY_MODE_PLAY_FAST_FWD_MIN		0x05
+#define CEC_OP_PLAY_MODE_PLAY_FAST_FWD_MED		0x06
+#define CEC_OP_PLAY_MODE_PLAY_FAST_FWD_MAX		0x07
+#define CEC_OP_PLAY_MODE_PLAY_FAST_REV_MIN		0x09
+#define CEC_OP_PLAY_MODE_PLAY_FAST_REV_MED		0x0a
+#define CEC_OP_PLAY_MODE_PLAY_FAST_REV_MAX		0x0b
+#define CEC_OP_PLAY_MODE_PLAY_SLOW_FWD_MIN		0x15
+#define CEC_OP_PLAY_MODE_PLAY_SLOW_FWD_MED		0x16
+#define CEC_OP_PLAY_MODE_PLAY_SLOW_FWD_MAX		0x17
+#define CEC_OP_PLAY_MODE_PLAY_SLOW_REV_MIN		0x19
+#define CEC_OP_PLAY_MODE_PLAY_SLOW_REV_MED		0x1a
+#define CEC_OP_PLAY_MODE_PLAY_SLOW_REV_MAX		0x1b
+
+
+/* Tuner Control Feature */
+#define CEC_MSG_GIVE_TUNER_DEVICE_STATUS		0x08
+#define CEC_MSG_SELECT_ANALOGUE_SERVICE			0x92
+#define CEC_MSG_SELECT_DIGITAL_SERVICE			0x93
+#define CEC_MSG_TUNER_DEVICE_STATUS			0x07
+/* Recording Flag Operand (rec_flag) */
+#define CEC_OP_REC_FLAG_USED				0
+#define CEC_OP_REC_FLAG_NOT_USED			1
+/* Tuner Display Info Operand (tuner_display_info) */
+#define CEC_OP_TUNER_DISPLAY_INFO_DIGITAL		0
+#define CEC_OP_TUNER_DISPLAY_INFO_NONE			1
+#define CEC_OP_TUNER_DISPLAY_INFO_ANALOGUE		2
+
+#define CEC_MSG_TUNER_STEP_DECREMENT			0x06
+#define CEC_MSG_TUNER_STEP_INCREMENT			0x05
+
+
+/* Vendor Specific Commands Feature */
+
+/*
+ * Has also:
+ *	CEC_MSG_CEC_VERSION
+ *	CEC_MSG_GET_CEC_VERSION
+ */
+#define CEC_MSG_DEVICE_VENDOR_ID			0x87
+#define CEC_MSG_GIVE_DEVICE_VENDOR_ID			0x8c
+#define CEC_MSG_VENDOR_COMMAND				0x89
+#define CEC_MSG_VENDOR_COMMAND_WITH_ID			0xa0
+#define CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN		0x8a
+#define CEC_MSG_VENDOR_REMOTE_BUTTON_UP			0x8b
+
+
+/* OSD Display Feature */
+#define CEC_MSG_SET_OSD_STRING				0x64
+/* Display Control Operand (disp_ctl) */
+#define CEC_OP_DISP_CTL_DEFAULT				0x00
+#define CEC_OP_DISP_CTL_UNTIL_CLEARED			0x40
+#define CEC_OP_DISP_CTL_CLEAR				0x80
+
+
+/* Device OSD Transfer Feature */
+#define CEC_MSG_GIVE_OSD_NAME				0x46
+#define CEC_MSG_SET_OSD_NAME				0x47
+
+
+/* Device Menu Control Feature */
+#define CEC_MSG_MENU_REQUEST				0x8d
+/* Menu Request Type Operand (menu_req) */
+#define CEC_OP_MENU_REQUEST_ACTIVATE			0x00
+#define CEC_OP_MENU_REQUEST_DEACTIVATE			0x01
+#define CEC_OP_MENU_REQUEST_QUERY			0x02
+
+#define CEC_MSG_MENU_STATUS				0x8e
+/* Menu State Operand (menu_state) */
+#define CEC_OP_MENU_STATE_ACTIVATED			0x00
+#define CEC_OP_MENU_STATE_DEACTIVATED			0x01
+
+#define CEC_MSG_USER_CONTROL_PRESSED			0x44
+/* UI Broadcast Type Operand (ui_bcast_type) */
+#define CEC_OP_UI_BCAST_TYPE_TOGGLE_ALL			0x00
+#define CEC_OP_UI_BCAST_TYPE_TOGGLE_DIG_ANA		0x01
+#define CEC_OP_UI_BCAST_TYPE_ANALOGUE			0x10
+#define CEC_OP_UI_BCAST_TYPE_ANALOGUE_T			0x20
+#define CEC_OP_UI_BCAST_TYPE_ANALOGUE_CABLE		0x30
+#define CEC_OP_UI_BCAST_TYPE_ANALOGUE_SAT		0x40
+#define CEC_OP_UI_BCAST_TYPE_DIGITAL			0x50
+#define CEC_OP_UI_BCAST_TYPE_DIGITAL_T			0x60
+#define CEC_OP_UI_BCAST_TYPE_DIGITAL_CABLE		0x70
+#define CEC_OP_UI_BCAST_TYPE_DIGITAL_SAT		0x80
+#define CEC_OP_UI_BCAST_TYPE_DIGITAL_COM_SAT		0x90
+#define CEC_OP_UI_BCAST_TYPE_DIGITAL_COM_SAT2		0x91
+#define CEC_OP_UI_BCAST_TYPE_IP				0xa0
+/* UI Sound Presentation Control Operand (ui_snd_pres_ctl) */
+#define CEC_OP_UI_SND_PRES_CTL_DUAL_MONO		0x10
+#define CEC_OP_UI_SND_PRES_CTL_KARAOKE			0x20
+#define CEC_OP_UI_SND_PRES_CTL_DOWNMIX			0x80
+#define CEC_OP_UI_SND_PRES_CTL_REVERB			0x90
+#define CEC_OP_UI_SND_PRES_CTL_EQUALIZER		0xa0
+#define CEC_OP_UI_SND_PRES_CTL_BASS_UP			0xb1
+#define CEC_OP_UI_SND_PRES_CTL_BASS_NEUTRAL		0xb2
+#define CEC_OP_UI_SND_PRES_CTL_BASS_DOWN		0xb3
+#define CEC_OP_UI_SND_PRES_CTL_TREBLE_UP		0xc1
+#define CEC_OP_UI_SND_PRES_CTL_TREBLE_NEUTRAL		0xc2
+#define CEC_OP_UI_SND_PRES_CTL_TREBLE_DOWN		0xc3
+
+#define CEC_MSG_USER_CONTROL_RELEASED			0x45
+
+
+/* Remote Control Passthrough Feature */
+
+/*
+ * Has also:
+ *	CEC_MSG_USER_CONTROL_PRESSED
+ *	CEC_MSG_USER_CONTROL_RELEASED
+ */
+
+
+/* Power Status Feature */
+#define CEC_MSG_GIVE_DEVICE_POWER_STATUS		0x8f
+#define CEC_MSG_REPORT_POWER_STATUS			0x90
+/* Power Status Operand (pwr_state) */
+#define CEC_OP_POWER_STATUS_ON				0
+#define CEC_OP_POWER_STATUS_STANDBY			1
+#define CEC_OP_POWER_STATUS_TO_ON			2
+#define CEC_OP_POWER_STATUS_TO_STANDBY			3
+
+
+/* General Protocol Messages */
+#define CEC_MSG_FEATURE_ABORT				0x00
+/* Abort Reason Operand (reason) */
+#define CEC_OP_ABORT_UNRECOGNIZED_OP			0
+#define CEC_OP_ABORT_INCORRECT_MODE			1
+#define CEC_OP_ABORT_NO_SOURCE				2
+#define CEC_OP_ABORT_INVALID_OP				3
+#define CEC_OP_ABORT_REFUSED				4
+#define CEC_OP_ABORT_UNDETERMINED			5
+
+#define CEC_MSG_ABORT					0xff
+
+
+/* System Audio Control Feature */
+
+/*
+ * Has also:
+ *	CEC_MSG_USER_CONTROL_PRESSED
+ *	CEC_MSG_USER_CONTROL_RELEASED
+ */
+#define CEC_MSG_GIVE_AUDIO_STATUS			0x71
+#define CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS		0x7d
+#define CEC_MSG_REPORT_AUDIO_STATUS			0x7a
+/* Audio Mute Status Operand (aud_mute_status) */
+#define CEC_OP_AUD_MUTE_STATUS_OFF			0
+#define CEC_OP_AUD_MUTE_STATUS_ON			1
+
+#define CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR		0xa3
+#define CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR		0xa4
+#define CEC_MSG_SET_SYSTEM_AUDIO_MODE			0x72
+/* System Audio Status Operand (sys_aud_status) */
+#define CEC_OP_SYS_AUD_STATUS_OFF			0
+#define CEC_OP_SYS_AUD_STATUS_ON			1
+
+#define CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST		0x70
+#define CEC_MSG_SYSTEM_AUDIO_MODE_STATUS		0x7e
+/* Audio Format ID Operand (audio_format_id) */
+#define CEC_OP_AUD_FMT_ID_CEA861			0
+#define CEC_OP_AUD_FMT_ID_CEA861_CXT			1
+
+
+/* Audio Rate Control Feature */
+#define CEC_MSG_SET_AUDIO_RATE				0x9a
+/* Audio Rate Operand (audio_rate) */
+#define CEC_OP_AUD_RATE_OFF				0
+#define CEC_OP_AUD_RATE_WIDE_STD			1
+#define CEC_OP_AUD_RATE_WIDE_FAST			2
+#define CEC_OP_AUD_RATE_WIDE_SLOW			3
+#define CEC_OP_AUD_RATE_NARROW_STD			4
+#define CEC_OP_AUD_RATE_NARROW_FAST			5
+#define CEC_OP_AUD_RATE_NARROW_SLOW			6
+
+
+/* Audio Return Channel Control Feature */
+#define CEC_MSG_INITIATE_ARC				0xc0
+#define CEC_MSG_REPORT_ARC_INITIATED			0xc1
+#define CEC_MSG_REPORT_ARC_TERMINATED			0xc2
+#define CEC_MSG_REQUEST_ARC_INITIATION			0xc3
+#define CEC_MSG_REQUEST_ARC_TERMINATION			0xc4
+#define CEC_MSG_TERMINATE_ARC				0xc5
+
+
+/* Dynamic Audio Lipsync Feature */
+/* Only for CEC 2.0 and up */
+#define CEC_MSG_REQUEST_CURRENT_LATENCY			0xa7
+#define CEC_MSG_REPORT_CURRENT_LATENCY			0xa8
+/* Low Latency Mode Operand (low_latency_mode) */
+#define CEC_OP_LOW_LATENCY_MODE_OFF			0
+#define CEC_OP_LOW_LATENCY_MODE_ON			1
+/* Audio Output Compensated Operand (audio_out_compensated) */
+#define CEC_OP_AUD_OUT_COMPENSATED_NA			0
+#define CEC_OP_AUD_OUT_COMPENSATED_DELAY		1
+#define CEC_OP_AUD_OUT_COMPENSATED_NO_DELAY		2
+#define CEC_OP_AUD_OUT_COMPENSATED_PARTIAL_DELAY	3
+
+
+/* Capability Discovery and Control Feature */
+#define CEC_MSG_CDC_MESSAGE				0xf8
+/* Ethernet-over-HDMI: nobody ever does this... */
+#define CEC_MSG_CDC_HEC_INQUIRE_STATE			0x00
+#define CEC_MSG_CDC_HEC_REPORT_STATE			0x01
+/* HEC Functionality State Operand (hec_func_state) */
+#define CEC_OP_HEC_FUNC_STATE_NOT_SUPPORTED		0
+#define CEC_OP_HEC_FUNC_STATE_INACTIVE			1
+#define CEC_OP_HEC_FUNC_STATE_ACTIVE			2
+#define CEC_OP_HEC_FUNC_STATE_ACTIVATION_FIELD		3
+/* Host Functionality State Operand (host_func_state) */
+#define CEC_OP_HOST_FUNC_STATE_NOT_SUPPORTED		0
+#define CEC_OP_HOST_FUNC_STATE_INACTIVE			1
+#define CEC_OP_HOST_FUNC_STATE_ACTIVE			2
+/* ENC Functionality State Operand (enc_func_state) */
+#define CEC_OP_ENC_FUNC_STATE_EXT_CON_NOT_SUPPORTED	0
+#define CEC_OP_ENC_FUNC_STATE_EXT_CON_INACTIVE		1
+#define CEC_OP_ENC_FUNC_STATE_EXT_CON_ACTIVE		2
+/* CDC Error Code Operand (cdc_errcode) */
+#define CEC_OP_CDC_ERROR_CODE_NONE			0
+#define CEC_OP_CDC_ERROR_CODE_CAP_UNSUPPORTED		1
+#define CEC_OP_CDC_ERROR_CODE_WRONG_STATE		2
+#define CEC_OP_CDC_ERROR_CODE_OTHER			3
+/* HEC Support Operand (hec_support) */
+#define CEC_OP_HEC_SUPPORT_NO				0
+#define CEC_OP_HEC_SUPPORT_YES				1
+/* HEC Activation Operand (hec_activation) */
+#define CEC_OP_HEC_ACTIVATION_ON			0
+#define CEC_OP_HEC_ACTIVATION_OFF			1
+
+#define CEC_MSG_CDC_HEC_SET_STATE_ADJACENT		0x02
+#define CEC_MSG_CDC_HEC_SET_STATE			0x03
+/* HEC Set State Operand (hec_set_state) */
+#define CEC_OP_HEC_SET_STATE_DEACTIVATE			0
+#define CEC_OP_HEC_SET_STATE_ACTIVATE			1
+
+#define CEC_MSG_CDC_HEC_REQUEST_DEACTIVATION		0x04
+#define CEC_MSG_CDC_HEC_NOTIFY_ALIVE			0x05
+#define CEC_MSG_CDC_HEC_DISCOVER			0x06
+/* Hotplug Detect messages */
+#define CEC_MSG_CDC_HPD_SET_STATE			0x10
+/* HPD State Operand (hpd_state) */
+#define CEC_OP_HPD_STATE_CP_EDID_DISABLE		0
+#define CEC_OP_HPD_STATE_CP_EDID_ENABLE			1
+#define CEC_OP_HPD_STATE_CP_EDID_DISABLE_ENABLE		2
+#define CEC_OP_HPD_STATE_EDID_DISABLE			3
+#define CEC_OP_HPD_STATE_EDID_ENABLE			4
+#define CEC_OP_HPD_STATE_EDID_DISABLE_ENABLE		5
+#define CEC_MSG_CDC_HPD_REPORT_STATE			0x11
+/* HPD Error Code Operand (hpd_error) */
+#define CEC_OP_HPD_ERROR_NONE				0
+#define CEC_OP_HPD_ERROR_INITIATOR_NOT_CAPABLE		1
+#define CEC_OP_HPD_ERROR_INITIATOR_WRONG_STATE		2
+#define CEC_OP_HPD_ERROR_OTHER				3
+#define CEC_OP_HPD_ERROR_NONE_NO_VIDEO			4
+
+/* End of Messages */
+
+/* Helper functions to identify the 'special' CEC devices */
+
+static inline bool cec_is_2nd_tv(const struct cec_log_addrs *las)
+{
+	/*
+	 * It is a second TV if the logical address is 14 or 15 and the
+	 * primary device type is a TV.
+	 */
+	return las->num_log_addrs &&
+	       las->log_addr[0] >= CEC_LOG_ADDR_SPECIFIC &&
+	       las->primary_device_type[0] == CEC_OP_PRIM_DEVTYPE_TV;
+}
+
+static inline bool cec_is_processor(const struct cec_log_addrs *las)
+{
+	/*
+	 * It is a processor if the logical address is 12-15 and the
+	 * primary device type is a Processor.
+	 */
+	return las->num_log_addrs &&
+	       las->log_addr[0] >= CEC_LOG_ADDR_BACKUP_1 &&
+	       las->primary_device_type[0] == CEC_OP_PRIM_DEVTYPE_PROCESSOR;
+}
+
+static inline bool cec_is_switch(const struct cec_log_addrs *las)
+{
+	/*
+	 * It is a switch if the logical address is 15 and the
+	 * primary device type is a Switch and the CDC-Only flag is not set.
+	 */
+	return las->num_log_addrs == 1 &&
+	       las->log_addr[0] == CEC_LOG_ADDR_UNREGISTERED &&
+	       las->primary_device_type[0] == CEC_OP_PRIM_DEVTYPE_SWITCH &&
+	       !(las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY);
+}
+
+static inline bool cec_is_cdc_only(const struct cec_log_addrs *las)
+{
+	/*
+	 * It is a CDC-only device if the logical address is 15 and the
+	 * primary device type is a Switch and the CDC-Only flag is set.
+	 */
+	return las->num_log_addrs == 1 &&
+	       las->log_addr[0] == CEC_LOG_ADDR_UNREGISTERED &&
+	       las->primary_device_type[0] == CEC_OP_PRIM_DEVTYPE_SWITCH &&
+	       (las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY);
+}
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/coresight-stm.h	2019-01-22 16:16:28.543292047 +0100
@@ -0,0 +1,21 @@
+#ifndef __UAPI_CORESIGHT_STM_H_
+#define __UAPI_CORESIGHT_STM_H_
+
+enum {
+	OST_ENTITY_NONE			= 0x00,
+	OST_ENTITY_FTRACE_EVENTS	= 0x01,
+	OST_ENTITY_TRACE_PRINTK		= 0x02,
+	OST_ENTITY_TRACE_MARKER		= 0x04,
+	OST_ENTITY_DEV_NODE		= 0x08,
+	OST_ENTITY_DIAG			= 0xEE,
+	OST_ENTITY_QVIEW		= 0xFE,
+	OST_ENTITY_MAX			= 0xFF,
+};
+
+enum {
+	STM_OPTION_NONE			= 0x0,
+	STM_OPTION_TIMESTAMPED		= 0x08,
+	STM_OPTION_GUARANTEED		= 0x80,
+};
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/epm_adc.h	2019-01-22 16:16:28.547292083 +0100
@@ -0,0 +1,215 @@
+#ifndef _UAPI_EPM_ADC_H
+#define _UAPI_EPM_ADC_H
+
+struct epm_chan_request {
+	/* EPM ADC device index. 0 - ADC1, 1 - ADC2 */
+	uint32_t device_idx;
+	/* Channel number within the EPM ADC device  */
+	uint32_t channel_idx;
+	/* The data meaningful for each individual channel whether it is
+	 * voltage, current etc. */
+	int32_t physical;
+};
+
+struct epm_psoc_init_resp {
+	uint8_t	cmd;
+	uint8_t	version;
+	uint8_t	compatible_ver;
+	uint8_t	firm_ver[3];
+	uint8_t	num_dev;
+	uint8_t	num_channel;
+};
+
+struct epm_psoc_channel_configure {
+	uint8_t		cmd;
+	uint8_t		device_num;
+	uint32_t	channel_num;
+};
+
+struct epm_psoc_set_avg {
+	uint8_t	cmd;
+	uint8_t	avg_period;
+	uint8_t	return_code;
+};
+
+struct epm_psoc_get_data {
+	uint8_t		cmd;
+	uint8_t		dev_num;
+	uint8_t		chan_num;
+	uint32_t	timestamp_resp_value;
+	int16_t		reading_raw;
+	int32_t		reading_value;
+};
+
+struct epm_psoc_get_buffered_data {
+	uint8_t		cmd;
+	uint8_t		dev_num;
+	uint8_t		status_mask;
+	uint8_t		chan_idx;
+	uint32_t	chan_mask;
+	uint32_t	timestamp_start;
+	uint32_t	timestamp_end;
+	uint8_t		buff_data[48];
+};
+
+struct epm_psoc_system_time_stamp {
+	uint8_t		cmd;
+	uint32_t	timestamp;
+};
+
+struct epm_psoc_set_channel {
+	uint8_t		cmd;
+	uint8_t		dev_num;
+	uint32_t	channel_mask;
+};
+
+struct result_buffer {
+	uint32_t	channel;
+	uint32_t	avg_buffer_sample;
+	uint32_t	result;
+};
+
+struct epm_psoc_get_avg_buffered_switch_data {
+	uint8_t			cmd;
+	uint8_t			status;
+	uint32_t		timestamp_start;
+	uint32_t		channel_mask;
+	uint8_t			avg_data[54];
+	struct result_buffer	data[54];
+};
+
+struct epm_psoc_set_channel_switch {
+	uint8_t		cmd;
+	uint8_t		dev;
+	uint32_t	delay;
+};
+
+struct epm_psoc_set_vadc {
+	uint8_t		cmd;
+	uint8_t		vadc_dev;
+	uint32_t	vadc_voltage;
+};
+
+struct epm_chan_properties {
+	uint32_t resistorvalue;
+	uint32_t gain;
+};
+
+struct epm_marker_level {
+	uint8_t		level;
+};
+
+struct epm_gpio_buffer_request {
+	uint8_t		cmd;
+	uint8_t		bitmask_monitor_pin;
+	uint8_t		status;
+};
+
+struct epm_get_gpio_buffer_resp {
+	uint8_t		cmd;
+	uint8_t		status;
+	uint8_t		bitmask_monitor_pin;
+	uint32_t	timestamp;
+};
+
+struct epm_get_high_res_avg_data {
+	uint8_t		cmd;
+	uint8_t		status;
+	uint32_t	channel_mask;
+	uint32_t	timestamp;
+	uint8_t		buf_data[54];
+};
+
+struct epm_generic_request {
+	uint8_t		buf[64];
+};
+
+
+#define EPM_ADC_IOCTL_CODE		0x91
+
+#define EPM_ADC_REQUEST		_IOWR(EPM_ADC_IOCTL_CODE, 1,	\
+					struct epm_chan_request)
+
+#define EPM_ADC_INIT		_IOR(EPM_ADC_IOCTL_CODE, 2,	\
+					     uint32_t)
+
+#define EPM_ADC_DEINIT		_IOR(EPM_ADC_IOCTL_CODE, 3,	\
+					     uint32_t)
+
+#define EPM_MARKER1_REQUEST	_IOR(EPM_ADC_IOCTL_CODE, 90,	\
+						uint32_t)
+
+
+#define EPM_MARKER1_RELEASE	_IOR(EPM_ADC_IOCTL_CODE, 91,	\
+						uint32_t)
+
+#define EPM_MARKER1_SET_LEVEL	_IOWR(EPM_ADC_IOCTL_CODE, 92,	\
+						uint32_t)
+
+#define EPM_MARKER2_REQUEST	_IOR(EPM_ADC_IOCTL_CODE, 93,	\
+						uint32_t)
+
+#define EPM_MARKER2_SET_LEVEL	_IOWR(EPM_ADC_IOCTL_CODE, 94,	\
+						uint32_t)
+
+#define EPM_MARKER2_RELEASE	_IOR(EPM_ADC_IOCTL_CODE, 95,	\
+						uint32_t)
+
+#define EPM_PSOC_ADC_INIT		_IOWR(EPM_ADC_IOCTL_CODE, 4, \
+					struct epm_psoc_init_resp)
+
+#define EPM_PSOC_ADC_CHANNEL_ENABLE	_IOWR(EPM_ADC_IOCTL_CODE, 5, \
+					struct epm_psoc_channel_configure)
+
+#define EPM_PSOC_ADC_CHANNEL_DISABLE	_IOWR(EPM_ADC_IOCTL_CODE, 6, \
+					struct epm_psoc_channel_configure)
+
+#define EPM_PSOC_ADC_SET_AVERAGING	_IOWR(EPM_ADC_IOCTL_CODE, 7, \
+					struct epm_psoc_set_avg)
+
+#define EPM_PSOC_ADC_GET_LAST_MEASUREMENT	_IOWR(EPM_ADC_IOCTL_CODE, 8, \
+						struct epm_psoc_get_data)
+
+#define EPM_PSOC_ADC_GET_BUFFERED_DATA		_IOWR(EPM_ADC_IOCTL_CODE, 9, \
+					struct epm_psoc_get_buffered_data)
+
+#define EPM_PSOC_ADC_GET_SYSTEM_TIMESTAMP	_IOWR(EPM_ADC_IOCTL_CODE, 10, \
+					struct epm_psoc_system_time_stamp)
+
+#define EPM_PSOC_ADC_SET_SYSTEM_TIMESTAMP	_IOWR(EPM_ADC_IOCTL_CODE, 11, \
+					struct epm_psoc_system_time_stamp)
+
+#define EPM_PSOC_ADC_GET_AVERAGE_DATA		_IOWR(EPM_ADC_IOCTL_CODE, 12, \
+				struct epm_psoc_get_avg_buffered_switch_data)
+
+#define EPM_PSOC_SET_CHANNEL_SWITCH		_IOWR(EPM_ADC_IOCTL_CODE, 13, \
+					struct epm_psoc_set_channel_switch)
+
+#define EPM_PSOC_CLEAR_BUFFER			_IOWR(EPM_ADC_IOCTL_CODE, 14, \
+						uint32_t)
+
+#define EPM_PSOC_ADC_SET_VADC_REFERENCE		_IOWR(EPM_ADC_IOCTL_CODE, 15, \
+						struct epm_psoc_set_vadc)
+
+#define EPM_PSOC_ADC_DEINIT		_IOWR(EPM_ADC_IOCTL_CODE, 16,	\
+							     uint32_t)
+
+#define EPM_PSOC_GPIO_BUFFER_REQUEST	_IOWR(EPM_ADC_IOCTL_CODE, 17,	\
+					struct epm_gpio_buffer_request)
+
+#define EPM_PSOC_GET_GPIO_BUFFER_DATA	_IOWR(EPM_ADC_IOCTL_CODE, 18,	\
+					struct epm_get_gpio_buffer_resp)
+
+#define EPM_PSOC_PAUSE_CONVERSION_REQUEST _IOWR(EPM_ADC_IOCTL_CODE, 19,	\
+								uint32_t)
+
+#define EPM_PSOC_UNPAUSE_CONVERSION_REQUEST _IOWR(EPM_ADC_IOCTL_CODE, 20, \
+								uint32_t)
+
+#define EPM_PSOC_16_BIT_AVERAGED_REQUEST	_IOWR(EPM_ADC_IOCTL_CODE, 21, \
+					struct epm_get_high_res_avg_data)
+
+#define EPM_PSOC_GENERIC_REQUEST		_IOWR(EPM_ADC_IOCTL_CODE, 22, \
+					struct epm_generic_request)
+
+#endif /* _UAPI_EPM_ADC_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/esoc_ctrl.h	2019-01-22 16:16:28.547292083 +0100
@@ -0,0 +1,76 @@
+#ifndef _UAPI_ESOC_CTRL_H_
+#define _UAPI_ESOC_CTRL_H_
+
+#define ESOC_CODE		0xCC
+
+#define ESOC_CMD_EXE		_IOW(ESOC_CODE, 1, unsigned int)
+#define ESOC_WAIT_FOR_REQ	_IOR(ESOC_CODE, 2, unsigned int)
+#define ESOC_NOTIFY		_IOW(ESOC_CODE, 3, unsigned int)
+#define ESOC_GET_STATUS		_IOR(ESOC_CODE, 4, unsigned int)
+#define ESOC_GET_ERR_FATAL	_IOR(ESOC_CODE, 5, unsigned int)
+#define ESOC_WAIT_FOR_CRASH	_IOR(ESOC_CODE, 6, unsigned int)
+#define ESOC_REG_REQ_ENG	_IO(ESOC_CODE, 7)
+#define ESOC_REG_CMD_ENG	_IO(ESOC_CODE, 8)
+
+/*Link types for communication with external SOCs*/
+#define HSIC		"HSIC"
+#define HSICPCIe	"HSIC+PCIe"
+#define PCIe		"PCIe"
+#define ESOC_REQ_SEND_SHUTDOWN	ESOC_REQ_SEND_SHUTDOWN
+
+enum esoc_evt {
+	ESOC_RUN_STATE = 0x1,
+	ESOC_UNEXPECTED_RESET,
+	ESOC_ERR_FATAL,
+	ESOC_IN_DEBUG,
+	ESOC_REQ_ENG_ON,
+	ESOC_REQ_ENG_OFF,
+	ESOC_CMD_ENG_ON,
+	ESOC_CMD_ENG_OFF,
+	ESOC_INVALID_STATE,
+};
+
+enum esoc_cmd {
+	ESOC_PWR_ON = 1,
+	ESOC_PWR_OFF,
+	ESOC_FORCE_PWR_OFF,
+	ESOC_RESET,
+	ESOC_PREPARE_DEBUG,
+	ESOC_EXE_DEBUG,
+	ESOC_EXIT_DEBUG,
+};
+
+enum esoc_notify {
+	ESOC_IMG_XFER_DONE = 1,
+	ESOC_BOOT_DONE,
+	ESOC_BOOT_FAIL,
+	ESOC_IMG_XFER_RETRY,
+	ESOC_IMG_XFER_FAIL,
+	ESOC_UPGRADE_AVAILABLE,
+	ESOC_DEBUG_DONE,
+	ESOC_DEBUG_FAIL,
+	ESOC_PRIMARY_CRASH,
+	ESOC_PRIMARY_REBOOT,
+};
+
+enum esoc_req {
+	ESOC_REQ_IMG = 1,
+	ESOC_REQ_DEBUG,
+	ESOC_REQ_SHUTDOWN,
+	ESOC_REQ_SEND_SHUTDOWN,
+};
+
+#ifdef __KERNEL__
+/**
+ * struct esoc_handle: Handle for clients of esoc
+ * @name: name of the external soc.
+ * @link: link of external soc.
+ * @id: id of external soc.
+ */
+struct esoc_handle {
+	const char *name;
+	const char *link;
+	unsigned int id;
+};
+#endif
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/exfat_user.h	2019-01-25 20:32:47.251752438 +0100
@@ -0,0 +1,47 @@
+/*
+ * exfat_user.h for exfat
+ * Created by <nschichan@freebox.fr> on Fri Aug 23 15:31:08 2013
+ */
+
+#ifndef __EXFAT_USER_H
+# define __EXFAT_USER_H
+
+struct exfat_fragment {
+	uint32_t	fcluster_start;
+	uint32_t	dcluster_start;
+	uint32_t	nr_clusters;
+	uint64_t	sector_start;
+};
+
+struct exfat_fragment_head {
+	uint32_t		fcluster_start;
+	uint32_t		nr_fragments;
+	uint32_t		sector_size;
+	uint32_t		cluster_size;
+	struct exfat_fragment	fragments[0];
+};
+
+struct exfat_bitmap_data {
+	uint32_t		start_cluster;
+	uint32_t		nr_clusters;
+	uint64_t		sector_start;
+	uint64_t		nr_sectors;
+};
+
+struct exfat_bitmap_head {
+	uint32_t			start_cluster;
+	uint32_t			nr_entries;
+	struct exfat_bitmap_data	entries[0];
+};
+
+struct exfat_dirent_head {
+	uint32_t offset;
+	uint32_t nr_entries;
+	uint8_t entries[0];
+};
+
+#define EXFAT_IOCGETFRAGMENTS	_IOR('X', 0x01, struct exfat_fragment_head)
+#define EXFAT_IOCGETBITMAP	_IOR('X', 0x02, struct exfat_bitmap_head)
+#define EXFAT_IOCGETDIRENTS	_IOR('X', 0x03, struct exfat_dirent_head)
+
+#endif /* !__EXFAT_USER_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/fbxatm.h	2019-01-22 16:16:28.547292083 +0100
@@ -0,0 +1,159 @@
+/*
+ * Generic fbxatm definition, exported to userspace
+ */
+#ifndef LINUX_FBXATM_H_
+#define LINUX_FBXATM_H_
+
+#include <linux/types.h>
+#include <linux/if.h>
+
+#define FBXATM_IOCTL_MAGIC		0xd3
+
+/* allow userspace usage without up to date kernel headers */
+#ifndef PF_FBXATM
+#define PF_FBXATM			32
+#define AF_FBXATM			PF_FBXATM
+#endif
+
+struct fbxatm_vcc_id {
+	int				dev_idx;
+	__u32				vpi;
+	__u32				vci;
+};
+
+enum fbxatm_vcc_user {
+	FBXATM_VCC_USER_NONE = 0,
+	FBXATM_VCC_USER_2684,
+	FBXATM_VCC_USER_PPPOA,
+};
+
+enum fbxatm_vcc_traffic_class {
+	FBXATM_VCC_TC_UBR_NO_PCR = 0,
+	FBXATM_VCC_TC_UBR,
+};
+
+struct fbxatm_vcc_qos {
+	__u32				traffic_class;
+	__u32				max_sdu;
+	__u32				max_buffered_pkt;
+	__u32				priority;
+	__u32				rx_priority;
+};
+
+
+/*
+ * VCC related
+ */
+struct fbxatm_vcc_params {
+	/* ADD/DEL/GET */
+	struct fbxatm_vcc_id		id;
+
+	/* ADD/GET */
+	struct fbxatm_vcc_qos		qos;
+
+	/* GET */
+	enum fbxatm_vcc_user		user;
+};
+
+#define FBXATM_IOCADD		_IOW(FBXATM_IOCTL_MAGIC,	1,	\
+					struct fbxatm_vcc_params)
+
+#define FBXATM_IOCDEL		_IOR(FBXATM_IOCTL_MAGIC,	2,	\
+					struct fbxatm_vcc_params)
+
+#define FBXATM_IOCGET		_IOWR(FBXATM_IOCTL_MAGIC,	3,	\
+					struct fbxatm_vcc_params)
+
+
+struct fbxatm_vcc_drop_params {
+	struct fbxatm_vcc_id		id;
+	unsigned int			drop_count;
+};
+
+#define FBXATM_IOCDROP		_IOWR(FBXATM_IOCTL_MAGIC,	5,	\
+					struct fbxatm_vcc_drop_params)
+
+/*
+ * OAM related
+ */
+enum fbxatm_oam_ping_type {
+	FBXATM_OAM_PING_SEG_F4	= 0,
+	FBXATM_OAM_PING_SEG_F5,
+	FBXATM_OAM_PING_E2E_F4,
+	FBXATM_OAM_PING_E2E_F5,
+};
+
+struct fbxatm_oam_ping_req {
+	/* only dev_idx for F4 */
+	struct fbxatm_vcc_id		id;
+
+	__u8				llid[16];
+	enum fbxatm_oam_ping_type	type;
+};
+
+#define FBXATM_IOCOAMPING	_IOWR(FBXATM_IOCTL_MAGIC,	10,	\
+				      struct fbxatm_oam_ping_req)
+
+
+/*
+ * PPPOA related
+ */
+enum fbxatm_pppoa_encap {
+	FBXATM_EPPPOA_AUTODETECT = 0,
+	FBXATM_EPPPOA_VCMUX,
+	FBXATM_EPPPOA_LLC,
+};
+
+struct fbxatm_pppoa_vcc_params {
+	struct fbxatm_vcc_id		id;
+	__u32				encap;
+	__u32				cur_encap;
+};
+
+#define FBXATM_PPPOA_IOCADD	_IOW(FBXATM_IOCTL_MAGIC,	20,	\
+					struct fbxatm_pppoa_vcc_params)
+
+#define FBXATM_PPPOA_IOCDEL	_IOW(FBXATM_IOCTL_MAGIC,	21,	\
+					struct fbxatm_pppoa_vcc_params)
+
+#define FBXATM_PPPOA_IOCGET	_IOWR(FBXATM_IOCTL_MAGIC,	22,	\
+					struct fbxatm_pppoa_vcc_params)
+
+
+
+/*
+ * 2684 related
+ */
+enum fbxatm_2684_encap {
+	FBXATM_E2684_VCMUX = 0,
+	FBXATM_E2684_LLC,
+};
+
+enum fbxatm_2684_payload {
+	FBXATM_P2684_BRIDGE = 0,
+	FBXATM_P2684_ROUTED,
+};
+
+#define FBXATM_2684_MAX_VCC		8
+
+struct fbxatm_2684_vcc_params {
+	struct fbxatm_vcc_id		id_list[FBXATM_2684_MAX_VCC];
+	size_t				id_count;
+
+	__u32				encap;
+	__u32				payload;
+	char				dev_name[IFNAMSIZ];
+	__u8				perm_addr[6];
+};
+
+
+#define FBXATM_2684_IOCADD	_IOW(FBXATM_IOCTL_MAGIC,	30,	\
+					struct fbxatm_2684_vcc_params)
+
+#define FBXATM_2684_IOCDEL	_IOW(FBXATM_IOCTL_MAGIC,	31,	\
+					struct fbxatm_2684_vcc_params)
+
+#define FBXATM_2684_IOCGET	_IOWR(FBXATM_IOCTL_MAGIC,	32,	\
+					struct fbxatm_2684_vcc_params)
+
+#endif /* LINUX_FBXATM_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/fips_status.h	2019-01-22 16:16:28.547292083 +0100
@@ -0,0 +1,33 @@
+#ifndef _UAPI_FIPS_STATUS__H
+#define _UAPI_FIPS_STATUS__H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/**
+* fips_status: global FIPS140-2 status
+* @FIPS140_STATUS_NA:
+*					Not a FIPS140-2 compliant Build.
+*					The flag status won't
+*					change throughout
+*					the lifetime
+* @FIPS140_STATUS_PASS_CRYPTO:
+*					KAT self tests are passed.
+* @FIPS140_STATUS_QCRYPTO_ALLOWED:
+*					Integrity test is passed.
+* @FIPS140_STATUS_PASS:
+*					All tests are passed and build
+*					is in FIPS140-2 mode
+* @FIPS140_STATUS_FAIL:
+*					One of the test is failed.
+*					This will block all requests
+*					to crypto modules
+*/
+enum fips_status {
+		FIPS140_STATUS_NA				= 0,
+		FIPS140_STATUS_PASS_CRYPTO		= 1,
+		FIPS140_STATUS_QCRYPTO_ALLOWED	= 2,
+		FIPS140_STATUS_PASS				= 3,
+		FIPS140_STATUS_FAIL				= 0xFF
+};
+#endif /* _UAPI_FIPS_STATUS__H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/habmm.h	2019-10-29 09:26:25.545221791 +0100
@@ -0,0 +1,158 @@
+#ifndef HABMM_H
+#define HABMM_H
+
+#include <linux/types.h>
+
+struct hab_send {
+	__u64 data;
+	__s32 vcid;
+	__u32 sizebytes;
+	__u32 flags;
+};
+
+struct hab_recv {
+	__u64 data;
+	__s32 vcid;
+	__u32 sizebytes;
+	__u32 flags;
+};
+
+struct hab_open {
+	__s32 vcid;
+	__u32 mmid;
+	__u32 timeout;
+	__u32 flags;
+};
+
+struct hab_close {
+	__s32 vcid;
+	__u32 flags;
+};
+
+struct hab_export {
+	__u64 buffer;
+	__s32 vcid;
+	__u32 sizebytes;
+	__u32 exportid;
+	__u32 flags;
+};
+
+struct hab_import {
+	__u64 index;
+	__u64 kva;
+	__s32 vcid;
+	__u32 sizebytes;
+	__u32 exportid;
+	__u32 flags;
+};
+
+struct hab_unexport {
+	__s32 vcid;
+	__u32 exportid;
+	__u32 flags;
+};
+
+struct hab_unimport {
+	__s32 vcid;
+	__u32 exportid;
+	__u64 kva;
+	__u32 flags;
+};
+
+#define HAB_IOC_TYPE 0x0A
+#define HAB_MAX_MSG_SIZEBYTES 0x1000
+#define HAB_MAX_EXPORT_SIZE 0x8000000
+
+#define HAB_MMID_CREATE(major, minor) ((major&0xFFFF) | ((minor&0xFF)<<16))
+
+#define MM_AUD_START	100
+#define MM_AUD_1	101
+#define MM_AUD_2	102
+#define MM_AUD_3	103
+#define MM_AUD_4	104
+#define MM_AUD_END	105
+
+#define MM_CAM_START	200
+#define MM_CAM_1	201
+#define MM_CAM_2	202
+#define MM_CAM_END	203
+
+#define MM_DISP_START	300
+#define MM_DISP_1	301
+#define MM_DISP_2	302
+#define MM_DISP_3	303
+#define MM_DISP_4	304
+#define MM_DISP_5	305
+#define MM_DISP_END	306
+
+#define MM_GFX_START	400
+#define MM_GFX		401
+#define MM_GFX_END	402
+
+#define MM_VID_START	500
+#define MM_VID		501
+#define MM_VID_END	502
+
+#define MM_MISC_START	600
+#define MM_MISC		601
+#define MM_MISC_END	602
+
+#define MM_QCPE_START	700
+#define MM_QCPE_VM1	701
+#define MM_QCPE_VM2	702
+#define MM_QCPE_VM3	703
+#define MM_QCPE_VM4	704
+#define MM_QCPE_END	705
+
+#define MM_CLK_START	800
+#define MM_CLK_VM1	801
+#define MM_CLK_VM2	802
+#define MM_CLK_END	803
+
+#define MM_ID_MAX	804
+
+#define HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_FE        0x00000000
+#define HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_DOMU      0x00000001
+#define HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_MULTI_DOMUS      0x00000002
+
+#define HABMM_SOCKET_SEND_FLAGS_NON_BLOCKING 0x00000001
+
+/*
+ * Collect cross-VM stats: client provides stat-buffer large enough to allow 2
+ * ets of a 2-uint64_t pair to collect seconds and nano-seconds at the
+ * beginning of the stat-buffer. Stats are collected when the stat-buffer leaves
+ * VM1, then enters VM2
+ */
+#define HABMM_SOCKET_SEND_FLAGS_XING_VM_STAT 0x00000002
+
+#define HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING 0x00000001
+
+#define HABMM_EXP_MEM_TYPE_DMA 0x00000001
+
+#define HABMM_IMPORT_FLAGS_CACHED 0x00000001
+
+#define IOCTL_HAB_SEND \
+	_IOW(HAB_IOC_TYPE, 0x2, struct hab_send)
+
+#define IOCTL_HAB_RECV \
+	_IOWR(HAB_IOC_TYPE, 0x3, struct hab_recv)
+
+#define IOCTL_HAB_VC_OPEN \
+	_IOWR(HAB_IOC_TYPE, 0x4, struct hab_open)
+
+#define IOCTL_HAB_VC_CLOSE \
+	_IOW(HAB_IOC_TYPE, 0x5, struct hab_close)
+
+#define IOCTL_HAB_VC_EXPORT \
+	_IOWR(HAB_IOC_TYPE, 0x6, struct hab_export)
+
+#define IOCTL_HAB_VC_IMPORT \
+	_IOWR(HAB_IOC_TYPE, 0x7, struct hab_import)
+
+#define IOCTL_HAB_VC_UNEXPORT \
+	_IOW(HAB_IOC_TYPE, 0x8, struct hab_unexport)
+
+#define IOCTL_HAB_VC_UNIMPORT \
+	_IOW(HAB_IOC_TYPE, 0x9, struct hab_unimport)
+
+#endif /* HABMM_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/hbtp_input.h	2019-01-22 16:16:28.547292083 +0100
@@ -0,0 +1,82 @@
+#ifndef _UAPI_HBTP_INPUT_H
+#define _UAPI_HBTP_INPUT_H
+
+#include <linux/input.h>
+
+#define HBTP_MAX_FINGER		20
+#define HBTP_ABS_MT_FIRST	ABS_MT_TOUCH_MAJOR
+#define HBTP_ABS_MT_LAST	ABS_MT_TOOL_Y
+#define MAX_ROI_SIZE		144
+#define MAX_ACCEL_SIZE		128
+
+#define HBTP_EVENT_TYPE_DISPLAY	"EVENT_TYPE=HBTP_DISPLAY"
+
+struct hbtp_input_touch {
+	bool active;
+	__s32 tool;
+	__s32 x;
+	__s32 y;
+	__s32 pressure;
+	__s32 major;
+	__s32 minor;
+	__s32 orientation;
+};
+
+struct hbtp_sensor_data {
+	__s16 accelBuffer[MAX_ACCEL_SIZE];
+	__s16 ROI[MAX_ROI_SIZE];
+};
+
+struct hbtp_input_mt {
+	__s32 num_touches;
+	struct hbtp_input_touch touches[HBTP_MAX_FINGER];
+	struct timeval time_val;
+};
+
+struct hbtp_input_absinfo {
+	bool  active;
+	__u16 code;
+	__s32 minimum;
+	__s32 maximum;
+};
+
+enum hbtp_afe_power_cmd {
+	HBTP_AFE_POWER_ON,
+	HBTP_AFE_POWER_OFF,
+};
+
+struct hbtp_input_key {
+	__u32 code;
+	__s32 value;
+};
+
+enum hbtp_afe_signal {
+	HBTP_AFE_SIGNAL_ON_RESUME,
+	HBTP_AFE_SIGNAL_ON_SUSPEND,
+};
+
+enum hbtp_afe_power_ctrl {
+	HBTP_AFE_POWER_ENABLE_SYNC,
+	HBTP_AFE_POWER_ENABLE_SYNC_SIGNAL,
+};
+
+
+/* ioctl */
+#define HBTP_INPUT_IOCTL_BASE	'T'
+#define HBTP_SET_ABSPARAM	_IOW(HBTP_INPUT_IOCTL_BASE, 201, \
+					struct hbtp_input_absinfo *)
+#define HBTP_SET_TOUCHDATA	_IOW(HBTP_INPUT_IOCTL_BASE, 202, \
+					struct hbtp_input_mt)
+#define HBTP_SET_POWERSTATE	_IOW(HBTP_INPUT_IOCTL_BASE, 203, \
+					enum hbtp_afe_power_cmd)
+#define HBTP_SET_KEYDATA	_IOW(HBTP_INPUT_IOCTL_BASE, 204, \
+					struct hbtp_input_key)
+#define HBTP_SET_SYNCSIGNAL	_IOW(HBTP_INPUT_IOCTL_BASE, 205, \
+					enum hbtp_afe_signal)
+#define HBTP_SET_POWER_CTRL	_IOW(HBTP_INPUT_IOCTL_BASE, 206, \
+					enum hbtp_afe_power_ctrl)
+#define HBTP_SET_SENSORDATA	_IOW(HBTP_INPUT_IOCTL_BASE, 207, \
+					struct hbtp_sensor_data)
+
+#endif	/* _UAPI_HBTP_INPUT_H */
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/hbtp_vm.h	2019-01-22 16:16:28.547292083 +0100
@@ -0,0 +1,27 @@
+#ifndef _HBTP_VM_H
+#define _HBTP_VM_H
+
+#include <linux/input.h>
+
+struct hbtp_vm_click {
+	int x;
+	int y;
+	int mask;
+};
+
+#define HBTP_VM_BUTTON_LEFT  0x00000001
+#define HBTP_VM_BUTTON_RIGHT 0x00000002
+#define HBTP_VM_BUTTON_DOWN  0x10000000
+#define HBTP_VM_BUTTON_UP    0x20000000
+
+/* ioctls */
+#define HBTP_VM_IOCTL_BASE  'V'
+#define HBTP_VM_ENABLE	        _IO(HBTP_VM_IOCTL_BASE, 200)
+#define HBTP_VM_DISABLE	        _IO(HBTP_VM_IOCTL_BASE, 201)
+#define HBTP_VM_SET_TOUCHDATA	_IOW(HBTP_INPUT_IOCTL_BASE, 202, \
+					struct hbtp_input_mt)
+#define HBTP_VM_SEND_CLICK      _IOW(HBTP_INPUT_IOCTL_BASE, 203, \
+					struct hbtp_vm_click)
+
+#endif	/* _HBTP_VM_H */
+
diff -Nruw linux-4.4.115-fbx/include/uapi/linux/hdmi-cec./hdmi-cec.h linux-4.4.115-fbx/include/uapi/linux/hdmi-cec/hdmi-cec.h
--- linux-4.4.115-fbx/include/uapi/linux/hdmi-cec./hdmi-cec.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/hdmi-cec/hdmi-cec.h	2019-05-31 20:58:54.922934968 +0200
@@ -0,0 +1,153 @@
+#ifndef __UAPI_HDMI_CEC_H
+#define __UAPI_HDMI_CEC_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+/* Common defines for HDMI CEC */
+#define CEC_BCAST_ADDR		(0x0f)
+#define CEC_ADDR_MAX		CEC_BCAST_ADDR
+
+#define CEC_MAX_MSG_LEN		(16)	/* 16 blocks */
+
+enum cec_rx_msg_flags {
+	/*
+	 * an ACK was received for this message
+	 */
+	CEC_RX_F_ACKED			= (1 << 0),
+
+	/*
+	 * message was fully received
+	 */
+	CEC_RX_F_COMPLETE		= (1 << 1),
+};
+
+/**
+ * struct cec_rx_msg - user-space exposed cec message cookie
+ * @data:	cec message payload
+ * @len:	cec message length
+ * @valid:	0 for invalid message
+ * @flags:	flag field (cec_rx_msg_flags)
+ */
+struct cec_rx_msg {
+	__u8	data[CEC_MAX_MSG_LEN];
+	__u8	len;
+	__u8	valid;
+	__u8	flags;
+
+} __attribute__((packed));
+
+enum cec_tx_status_flags {
+	/*
+	 * message was nacked at some point
+	 */
+	CEC_TX_F_NACK			= (1 << 0),
+
+	/*
+	 * abort sending because total time to send was elapsed
+	 */
+	CEC_TX_F_TIMEOUT		= (1 << 1),
+
+	/*
+	 * abort sending because maximum number of retry has passed
+	 */
+	CEC_TX_F_MAX_RETRIES		= (1 << 2),
+
+	/*
+	 * abort sending because of arbitration loss
+	 */
+	CEC_TX_F_ARBITRATION_LOST	= (1 << 3),
+
+	/*
+	 * message failed for other reason
+	 */
+	CEC_TX_F_UNKNOWN_ERROR		= (1 << 7),
+};
+
+/**
+ * struct cec_tx_msg - user-space exposed cec message cookie
+ * @expire_ms:	how long we try to send message (milliseconds)
+ * @data:	cec message payload
+ * @len:	cec message length
+ * @success:	0 => message was sent, else => failed to send message
+ * @flags:	flag field (cec_tx_msg_flags)
+ * @tries:	number of try done to send message
+ */
+struct cec_tx_msg {
+	__u16	expire_ms;
+	__u8	data[CEC_MAX_MSG_LEN];
+	__u8	len;
+	__u8	success;
+	__u8	flags;
+	__u8	tries;
+} __attribute__((packed));
+
+struct cec_tx_status {
+	__u8	sent;
+	__u8	success;
+	__u8	flags;
+	__u8	tries;
+} __attribute__((packed));
+
+#define DETACH_CFG_F_WAKEUP		(1 << 0)
+
+struct cec_detached_config {
+	__u8	phys_addr_valid;
+	__u8	phys_addr[2];
+	__u8	flags;
+} __attribute__((packed));
+
+/* Counters */
+
+/**
+ * struct cec_rx_counters - cec adpater RX counters
+ */
+struct cec_rx_counters {
+	__u8	pkts;
+	__u8	filtered_pkts;
+	__u8	valid_pkts;
+	__u8	rx_queue_full;
+	__u8	late_ack;
+	__u8	error;
+	__u8	rx_timeout_abort;
+	__u8	rx_throttled;
+};
+
+/**
+ * struct cec_tx_counters - cec adapter TX counters
+ */
+struct cec_tx_counters {
+	__u8	done;
+	__u8	fail;
+	__u8	timeout;
+	__u8	arb_loss;
+	__u8	bad_ack_timings;
+	__u8	tx_miss_early;
+	__u8	tx_miss_late;
+};
+
+/**
+ * struct cec_counters - tx and rx cec counters
+ * @rx:	struct cec_rx_counters
+ * @tx: struct cec_tx_counters
+ */
+struct cec_counters {
+	struct cec_rx_counters	rx;
+	struct cec_tx_counters	tx;
+};
+
+/**
+ * enum cec_rx_mode - cec adapter rx mode
+ * @CEC_RX_MODE_DISABLED:	RX path is disabled (default)
+ * @CEC_RX_MODE_DEFAULT:	accept only unicast traffic
+ * @CEC_RX_MODE_ACCEPT_ALL:	accept all incoming RX traffic (sniffing mode)
+ * @CEC_RX_MODE_MAX:		sentinel
+ */
+enum cec_rx_mode {
+	CEC_RX_MODE_DISABLED = 0,
+	CEC_RX_MODE_DEFAULT,
+	CEC_RX_MODE_ACCEPT_ALL,
+	CEC_RX_MODE_MAX
+};
+
+#endif /* __UAPI_HDMI_CEC_H */
diff -Nruw linux-4.4.115-fbx/include/uapi/linux/hdmi-cec./Kbuild linux-4.4.115-fbx/include/uapi/linux/hdmi-cec/Kbuild
--- linux-4.4.115-fbx/include/uapi/linux/hdmi-cec./Kbuild	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/hdmi-cec/Kbuild	2019-05-31 20:58:54.922934968 +0200
@@ -0,0 +1 @@
+header-y	+= hdmi-cec.h dev.h
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/if_pppolac.h	2019-01-22 16:16:28.551292119 +0100
@@ -0,0 +1,33 @@
+/* include/uapi/linux/if_pppolac.h
+ *
+ * Header for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Chia-chi Yeh <chiachi@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _UAPI_LINUX_IF_PPPOLAC_H
+#define _UAPI_LINUX_IF_PPPOLAC_H
+
+#include <linux/socket.h>
+#include <linux/types.h>
+
+struct sockaddr_pppolac {
+	sa_family_t	sa_family;	/* AF_PPPOX */
+	unsigned int	sa_protocol;	/* PX_PROTO_OLAC */
+	int		udp_socket;
+	struct __attribute__((packed)) {
+		__u16	tunnel, session;
+	} local, remote;
+} __attribute__((packed));
+
+#endif /* _UAPI_LINUX_IF_PPPOLAC_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/if_pppopns.h	2019-01-22 16:16:28.551292119 +0100
@@ -0,0 +1,32 @@
+/* include/uapi/linux/if_pppopns.h
+ *
+ * Header for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Chia-chi Yeh <chiachi@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _UAPI_LINUX_IF_PPPOPNS_H
+#define _UAPI_LINUX_IF_PPPOPNS_H
+
+#include <linux/socket.h>
+#include <linux/types.h>
+
+struct sockaddr_pppopns {
+	sa_family_t	sa_family;	/* AF_PPPOX */
+	unsigned int	sa_protocol;	/* PX_PROTO_OPNS */
+	int		tcp_socket;
+	__u16		local;
+	__u16		remote;
+} __attribute__((packed));
+
+#endif /* _UAPI_LINUX_IF_PPPOPNS_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/ion.h	2019-01-22 16:16:26.719275529 +0100
@@ -0,0 +1,203 @@
+/*
+ * drivers/staging/android/uapi/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ION_H
+#define _UAPI_LINUX_ION_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+typedef int ion_user_handle_t;
+
+/**
+ * enum ion_heap_types - list of all possible types of heaps
+ * @ION_HEAP_TYPE_SYSTEM:	 memory allocated via vmalloc
+ * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
+ * @ION_HEAP_TYPE_CARVEOUT:	 memory allocated from a prereserved
+ *				 carveout heap, allocations are physically
+ *				 contiguous
+ * @ION_HEAP_TYPE_DMA:		 memory allocated via DMA API
+ * @ION_NUM_HEAPS:		 helper for iterating over heaps, a bit mask
+ *				 is used to identify the heaps, so only 32
+ *				 total heap types are supported
+ */
+enum ion_heap_type {
+	ION_HEAP_TYPE_SYSTEM,
+	ION_HEAP_TYPE_SYSTEM_CONTIG,
+	ION_HEAP_TYPE_CARVEOUT,
+	ION_HEAP_TYPE_CHUNK,
+	ION_HEAP_TYPE_DMA,
+	ION_HEAP_TYPE_CUSTOM, /*
+			       * must be last so device specific heaps always
+			       * are at the end of this enum
+			       */
+	ION_NUM_HEAPS = 16,
+};
+
+#define ION_HEAP_SYSTEM_MASK		(1 << ION_HEAP_TYPE_SYSTEM)
+#define ION_HEAP_SYSTEM_CONTIG_MASK	(1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
+#define ION_HEAP_CARVEOUT_MASK		(1 << ION_HEAP_TYPE_CARVEOUT)
+#define ION_HEAP_TYPE_DMA_MASK		(1 << ION_HEAP_TYPE_DMA)
+
+#define ION_NUM_HEAP_IDS		(sizeof(unsigned int) * 8)
+
+/**
+ * allocation flags - the lower 16 bits are used by core ion, the upper 16
+ * bits are reserved for use by the heaps themselves.
+ */
+#define ION_FLAG_CACHED 1		/*
+					 * mappings of this buffer should be
+					 * cached, ion will do cache
+					 * maintenance when the buffer is
+					 * mapped for dma
+					*/
+#define ION_FLAG_CACHED_NEEDS_SYNC 2	/*
+					 * mappings of this buffer will created
+					 * at mmap time, if this is set
+					 * caches must be managed
+					 * manually
+					 */
+
+/**
+ * DOC: Ion Userspace API
+ *
+ * create a client by opening /dev/ion
+ * most operations handled via following ioctls
+ *
+ */
+
+/**
+ * struct ion_allocation_data - metadata passed from userspace for allocations
+ * @len:		size of the allocation
+ * @align:		required alignment of the allocation
+ * @heap_id_mask:	mask of heap ids to allocate from
+ * @flags:		flags passed to heap
+ * @handle:		pointer that will be populated with a cookie to use to
+ *			refer to this allocation
+ *
+ * Provided by userspace as an argument to the ioctl
+ */
+struct ion_allocation_data {
+	size_t len;
+	size_t align;
+	unsigned int heap_id_mask;
+	unsigned int flags;
+	ion_user_handle_t handle;
+};
+
+/**
+ * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
+ * @handle:	a handle
+ * @fd:		a file descriptor representing that handle
+ *
+ * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
+ * the handle returned from ion alloc, and the kernel returns the file
+ * descriptor to share or map in the fd field.  For ION_IOC_IMPORT, userspace
+ * provides the file descriptor and the kernel returns the handle.
+ */
+struct ion_fd_data {
+	ion_user_handle_t handle;
+	int fd;
+};
+
+/**
+ * struct ion_handle_data - a handle passed to/from the kernel
+ * @handle:	a handle
+ */
+struct ion_handle_data {
+	ion_user_handle_t handle;
+};
+
+/**
+ * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
+ * @cmd:	the custom ioctl function to call
+ * @arg:	additional data to pass to the custom ioctl, typically a user
+ *		pointer to a predefined structure
+ *
+ * This works just like the regular cmd and arg fields of an ioctl.
+ */
+struct ion_custom_data {
+	unsigned int cmd;
+	unsigned long arg;
+};
+
+#define ION_IOC_MAGIC		'I'
+
+/**
+ * DOC: ION_IOC_ALLOC - allocate memory
+ *
+ * Takes an ion_allocation_data struct and returns it with the handle field
+ * populated with the opaque handle for the allocation.
+ */
+#define ION_IOC_ALLOC		_IOWR(ION_IOC_MAGIC, 0, \
+				      struct ion_allocation_data)
+
+/**
+ * DOC: ION_IOC_FREE - free memory
+ *
+ * Takes an ion_handle_data struct and frees the handle.
+ */
+#define ION_IOC_FREE		_IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
+
+/**
+ * DOC: ION_IOC_MAP - get a file descriptor to mmap
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle.  Returns the struct with the fd field set to a file
+ * descriptor open in the current address space.  This file descriptor
+ * can then be used as an argument to mmap.
+ */
+#define ION_IOC_MAP		_IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle.  Returns the struct with the fd field set to a file
+ * descriptor open in the current address space.  This file descriptor
+ * can then be passed to another process.  The corresponding opaque handle can
+ * be retrieved via ION_IOC_IMPORT.
+ */
+#define ION_IOC_SHARE		_IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_IMPORT - imports a shared file descriptor
+ *
+ * Takes an ion_fd_data struct with the fd field populated with a valid file
+ * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
+ * filed set to the corresponding opaque handle.
+ */
+#define ION_IOC_IMPORT		_IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory
+ *
+ * Deprecated in favor of using the dma_buf api's correctly (syncing
+ * will happen automatically when the buffer is mapped to a device).
+ * If necessary should be used after touching a cached buffer from the cpu,
+ * this will make the buffer in memory coherent.
+ */
+#define ION_IOC_SYNC		_IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
+ *
+ * Takes the argument of the architecture specific ioctl to call and
+ * passes appropriate userdata for that ioctl
+ */
+#define ION_IOC_CUSTOM		_IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
+
+#endif /* _UAPI_LINUX_ION_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/ion_test.h	2019-01-22 16:16:26.719275529 +0100
@@ -0,0 +1,70 @@
+/*
+ * drivers/staging/android/uapi/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ION_TEST_H
+#define _UAPI_LINUX_ION_TEST_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * struct ion_test_rw_data - metadata passed to the kernel to read handle
+ * @ptr:	a pointer to an area at least as large as size
+ * @offset:	offset into the ion buffer to start reading
+ * @size:	size to read or write
+ * @write:	1 to write, 0 to read
+ */
+struct ion_test_rw_data {
+	__u64 ptr;
+	__u64 offset;
+	__u64 size;
+	int write;
+	int __padding;
+};
+
+#define ION_IOC_MAGIC		'I'
+
+/**
+ * DOC: ION_IOC_TEST_SET_DMA_BUF - attach a dma buf to the test driver
+ *
+ * Attaches a dma buf fd to the test driver.  Passing a second fd or -1 will
+ * release the first fd.
+ */
+#define ION_IOC_TEST_SET_FD \
+			_IO(ION_IOC_MAGIC, 0xf0)
+
+/**
+ * DOC: ION_IOC_TEST_DMA_MAPPING - read or write memory from a handle as DMA
+ *
+ * Reads or writes the memory from a handle using an uncached mapping.  Can be
+ * used by unit tests to emulate a DMA engine as close as possible.  Only
+ * expected to be used for debugging and testing, may not always be available.
+ */
+#define ION_IOC_TEST_DMA_MAPPING \
+			_IOW(ION_IOC_MAGIC, 0xf1, struct ion_test_rw_data)
+
+/**
+ * DOC: ION_IOC_TEST_KERNEL_MAPPING - read or write memory from a handle
+ *
+ * Reads or writes the memory from a handle using a kernel mapping.  Can be
+ * used by unit tests to test heap map_kernel functions.  Only expected to be
+ * used for debugging and testing, may not always be available.
+ */
+#define ION_IOC_TEST_KERNEL_MAPPING \
+			_IOW(ION_IOC_MAGIC, 0xf2, struct ion_test_rw_data)
+
+
+#endif /* _UAPI_LINUX_ION_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/ipa_qmi_service_v01.h	2019-01-22 16:16:28.555292155 +0100
@@ -0,0 +1,2000 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * This header file defines the types and structures that were defined in
+ * ipa. It contains the constant values defined, enums, structures,
+ * messages, and service message IDs (in that order) Structures that were
+ * defined in the IDL as messages contain mandatory elements, optional
+ * elements, a combination of mandatory and optional elements (mandatory
+ * always come before optionals in the structure), or nothing (null message)
+
+ * An optional element in a message is preceded by a uint8_t value that must be
+ * set to true if the element is going to be included. When decoding a received
+ * message, the uint8_t values will be set to true or false by the decode
+ * routine, and should be checked before accessing the values that they
+ * correspond to.
+
+ * Variable sized arrays are defined as static sized arrays with an unsigned
+ * integer (32 bit) preceding it that must be set to the number of elements
+ * in the array that are valid. For Example:
+
+ * uint32_t test_opaque_len;
+ * uint8_t test_opaque[16];
+
+ * If only 4 elements are added to test_opaque[] then test_opaque_len must be
+ * set to 4 before sending the message.  When decoding, the _len value is set
+ * by the decode routine and should be checked so that the correct number of
+ * elements in the array will be accessed.
+ */
+#ifndef IPA_QMI_SERVICE_V01_H
+#define IPA_QMI_SERVICE_V01_H
+
+#define QMI_IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS_V01 2
+#define QMI_IPA_IPFLTR_NUM_MEQ_32_EQNS_V01 2
+#define QMI_IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS_V01 2
+#define QMI_IPA_IPFLTR_NUM_MEQ_128_EQNS_V01 2
+#define QMI_IPA_MAX_FILTERS_V01 64
+#define QMI_IPA_MAX_FILTERS_EX_V01 128
+#define QMI_IPA_MAX_PIPES_V01 20
+#define QMI_IPA_MAX_APN_V01 8
+#define QMI_IPA_MAX_PER_CLIENTS_V01 64
+/* Currently max we can use is only 1. But for scalability purpose
+ * we are having max value as 8.
+ */
+#define QMI_IPA_MAX_CLIENT_DST_PIPES_V01 8
+#define QMI_IPA_MAX_UL_FIREWALL_RULES_V01 64
+
+#define IPA_INT_MAX	((int)(~0U>>1))
+#define IPA_INT_MIN	(-IPA_INT_MAX - 1)
+
+/* IPA definition as msm_qmi_interface.h */
+
+enum ipa_qmi_result_type_v01 {
+	/* To force a 32 bit signed enum. Do not change or use*/
+	IPA_QMI_RESULT_TYPE_MIN_ENUM_VAL_V01 = IPA_INT_MIN,
+	IPA_QMI_RESULT_SUCCESS_V01 = 0,
+	IPA_QMI_RESULT_FAILURE_V01 = 1,
+	IPA_QMI_RESULT_TYPE_MAX_ENUM_VAL_V01 = IPA_INT_MAX,
+};
+
+enum ipa_qmi_error_type_v01 {
+	/* To force a 32 bit signed enum. Do not change or use*/
+	IPA_QMI_ERROR_TYPE_MIN_ENUM_VAL_V01 = IPA_INT_MIN,
+	IPA_QMI_ERR_NONE_V01 = 0x0000,
+	IPA_QMI_ERR_MALFORMED_MSG_V01 = 0x0001,
+	IPA_QMI_ERR_NO_MEMORY_V01 = 0x0002,
+	IPA_QMI_ERR_INTERNAL_V01 = 0x0003,
+	IPA_QMI_ERR_CLIENT_IDS_EXHAUSTED_V01 = 0x0005,
+	IPA_QMI_ERR_INVALID_ID_V01 = 0x0029,
+	IPA_QMI_ERR_ENCODING_V01 = 0x003A,
+	IPA_QMI_ERR_INCOMPATIBLE_STATE_V01 = 0x005A,
+	IPA_QMI_ERR_NOT_SUPPORTED_V01 = 0x005E,
+	IPA_QMI_ERROR_TYPE_MAX_ENUM_VAL_V01 = IPA_INT_MAX,
+};
+
+struct ipa_qmi_response_type_v01 {
+	enum ipa_qmi_result_type_v01 result;
+	enum ipa_qmi_error_type_v01 error;
+};
+
+enum ipa_platform_type_enum_v01 {
+	IPA_PLATFORM_TYPE_ENUM_MIN_ENUM_VAL_V01 =
+	-2147483647, /* To force a 32 bit signed enum.  Do not change or use */
+	QMI_IPA_PLATFORM_TYPE_INVALID_V01 = 0,
+	/*  Invalid platform identifier */
+	QMI_IPA_PLATFORM_TYPE_TN_V01 = 1,
+	/*  Platform identifier -	Data card device */
+	QMI_IPA_PLATFORM_TYPE_LE_V01 = 2,
+	/*  Platform identifier -	Data router device */
+	QMI_IPA_PLATFORM_TYPE_MSM_ANDROID_V01 = 3,
+	/*  Platform identifier -	MSM device with Android HLOS */
+	QMI_IPA_PLATFORM_TYPE_MSM_WINDOWS_V01 = 4,
+	/*  Platform identifier -	MSM device with Windows HLOS */
+	QMI_IPA_PLATFORM_TYPE_MSM_QNX_V01 = 5,
+	/*  Platform identifier -	MSM device with QNX HLOS */
+	IPA_PLATFORM_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+	/* To force a 32 bit signed enum.  Do not change or use */
+};
+
+struct ipa_hdr_tbl_info_type_v01 {
+	uint32_t modem_offset_start;
+	/*	Offset from the start of IPA Shared memory from which
+	 *	modem driver may insert header table entries.
+	 */
+	uint32_t modem_offset_end;
+	/*	Offset from the start of IPA shared mem beyond which modem
+	 *	driver shall not insert header table entries. The space
+	 *	available for the modem driver shall include the
+	 *	modem_offset_start and modem_offset_end.
+	 */
+};  /* Type */
+
+struct ipa_route_tbl_info_type_v01 {
+	uint32_t route_tbl_start_addr;
+	/*	Identifies the start of the routing table. Denotes the offset
+	 *	from the start of the IPA Shared Mem
+	 */
+
+	uint32_t num_indices;
+	/*	Number of indices (starting from 0) that is being allocated to
+	 *	the modem. The number indicated here is also included in the
+	 *	allocation. The value of num_indices shall not exceed 31
+	 *	(5 bits used to specify the routing table index), unless there
+	 *	is a change in the hardware.
+	 */
+};  /* Type */
+
+struct ipa_modem_mem_info_type_v01 {
+
+	uint32_t block_start_addr;
+	/*	Identifies the start of the memory block allocated for the
+	 *	modem. Denotes the offset from the start of the IPA Shared Mem
+	 */
+
+	uint32_t size;
+	/*	Size of the block allocated for the modem driver */
+};  /* Type */
+
+struct ipa_hdr_proc_ctx_tbl_info_type_v01 {
+
+	uint32_t modem_offset_start;
+	/*  Offset from the start of IPA shared memory from which the modem
+	 *	driver may insert header processing context table entries.
+	 */
+
+	uint32_t modem_offset_end;
+	/*  Offset from the start of IPA shared memory beyond which the modem
+	 *	driver may not insert header proc table entries. The space
+	 *	available for the modem driver includes modem_offset_start and
+	 *	modem_offset_end.
+	 */
+};  /* Type */
+
+struct ipa_zip_tbl_info_type_v01 {
+
+	uint32_t modem_offset_start;
+	/*  Offset from the start of IPA shared memory from which the modem
+	 *	driver may insert compression/decompression command entries.
+	 */
+
+	uint32_t modem_offset_end;
+	/*  Offset from the start of IPA shared memory beyond which the modem
+	 *	driver may not insert compression/decompression command entries.
+	 *	The space available for the modem driver includes
+	 *  modem_offset_start and modem_offset_end.
+	 */
+};  /* Type */
+
+/**
+ * Request Message; Requests the modem IPA driver
+ * to perform initializtion
+ */
+struct ipa_init_modem_driver_req_msg_v01 {
+
+	/* Optional */
+	/*  Platform info */
+	uint8_t platform_type_valid;  /**< Must be set to true if platform_type
+	is being passed */
+	enum ipa_platform_type_enum_v01 platform_type;
+	/*   Provides information about the platform (ex. TN/MN/LE/MSM,etc) */
+
+	/* Optional */
+	/*  Header table info */
+	uint8_t hdr_tbl_info_valid;
+	/* Must be set to true if hdr_tbl_info is being passed */
+	struct ipa_hdr_tbl_info_type_v01 hdr_tbl_info;
+	/*	Provides information about the header table */
+
+	/* Optional */
+	/*  IPV4 Routing table info */
+	uint8_t v4_route_tbl_info_valid;
+	/* Must be set to true if v4_route_tbl_info is being passed */
+	struct ipa_route_tbl_info_type_v01 v4_route_tbl_info;
+	/*	Provides information about the IPV4 routing table */
+
+	/* Optional */
+	/*  IPV6 Routing table info */
+	uint8_t v6_route_tbl_info_valid;  /**< Must be set to true if
+	v6_route_tbl_info is being passed */
+	struct ipa_route_tbl_info_type_v01 v6_route_tbl_info;
+	/*	Provides information about the IPV6 routing table */
+
+	/* Optional */
+	/*  IPV4 Filter table start address */
+	uint8_t v4_filter_tbl_start_addr_valid;  /**< Must be set to true
+	if v4_filter_tbl_start_addr is being passed */
+	uint32_t v4_filter_tbl_start_addr;
+	/*	Provides information about the starting address of IPV4 filter
+	 *	table in IPAv2 or non-hashable IPv4 filter table in IPAv3.
+	 *	Denotes the offset from the start of the IPA Shared Mem
+	 */
+
+	/* Optional */
+	/* IPV6 Filter table start address */
+	uint8_t v6_filter_tbl_start_addr_valid;
+	/* Must be set to true if v6_filter_tbl_start_addr is being passed */
+	uint32_t v6_filter_tbl_start_addr;
+	/*	Provides information about the starting address of IPV6 filter
+	 *	table in IPAv2 or non-hashable IPv6 filter table in IPAv3.
+	 *	Denotes the offset from the start of the IPA Shared Mem
+	 */
+
+	/* Optional */
+	/*  Modem memory block */
+	uint8_t modem_mem_info_valid;
+	/* Must be set to true if modem_mem_info is being passed */
+	struct ipa_modem_mem_info_type_v01 modem_mem_info;
+	/*  Provides information about the start address and the size of
+	 *	the memory block that is being allocated to the modem driver.
+	 *	Denotes the physical address
+	 */
+
+	/* Optional */
+	/*  Destination end point for control commands from modem */
+	uint8_t ctrl_comm_dest_end_pt_valid;  /**< Must be set to true if
+	ctrl_comm_dest_end_pt is being passed */
+	uint32_t ctrl_comm_dest_end_pt;
+	/*  Provides information about the destination end point on the
+	 *	application processor to which the modem driver can send
+	 *	control commands. The value of this parameter cannot exceed
+	 *	19 since IPA only supports 20 end points.
+	 */
+
+	/* Optional */
+	/*  Modem Bootup Information */
+	uint8_t is_ssr_bootup_valid;  /**< Must be set to true if
+	is_ssr_bootup is being passed */
+	uint8_t is_ssr_bootup;
+	/*	Specifies whether the modem is booting up after a modem only
+	 *	sub-system restart or not. This will let the modem driver
+	 *	know that it doesn't have to reinitialize some of the HW
+	 *	blocks because IPA has not been reset since the previous
+	 *	initialization.
+	 */
+
+	/* Optional */
+	/*  Header Processing Context Table Information */
+	uint8_t hdr_proc_ctx_tbl_info_valid;
+	/* Must be set to true if hdr_proc_ctx_tbl_info is being passed */
+	struct ipa_hdr_proc_ctx_tbl_info_type_v01 hdr_proc_ctx_tbl_info;
+	/* Provides information about the header processing context table.
+	*/
+
+	/* Optional */
+	/*  Compression Decompression Table Information */
+	uint8_t zip_tbl_info_valid;
+	/* Must be set to true if zip_tbl_info is being passed */
+	struct ipa_zip_tbl_info_type_v01 zip_tbl_info;
+	/* Provides information about the zip table.
+	*/
+
+	/* Optional */
+	/*  IPv4 Hashable Routing Table Information */
+	/** Must be set to true if v4_hash_route_tbl_info is being passed */
+	uint8_t v4_hash_route_tbl_info_valid;
+	struct ipa_route_tbl_info_type_v01 v4_hash_route_tbl_info;
+
+	/* Optional */
+	/*  IPv6 Hashable Routing Table Information */
+	/** Must be set to true if v6_hash_route_tbl_info is being passed */
+	uint8_t v6_hash_route_tbl_info_valid;
+	struct ipa_route_tbl_info_type_v01 v6_hash_route_tbl_info;
+
+	/* Optional */
+	/*  IPv4 Hashable Filter Table Start Address */
+	/** Must be set to true if v4_hash_filter_tbl_start_addr
+	    is being passed */
+	uint8_t v4_hash_filter_tbl_start_addr_valid;
+	uint32_t v4_hash_filter_tbl_start_addr;
+	/**	Identifies the starting address of the IPv4 hashable filter
+	    table in IPAv3 onwards. Denotes the offset from the start of
+		the IPA shared memory.
+	*/
+
+	/* Optional */
+	/*  IPv6 Hashable Filter Table Start Address */
+	/** Must be set to true if v6_hash_filter_tbl_start_addr
+	    is being passed */
+	uint8_t v6_hash_filter_tbl_start_addr_valid;
+	uint32_t v6_hash_filter_tbl_start_addr;
+	/**	Identifies the starting address of the IPv6 hashable filter
+	    table in IPAv3 onwards. Denotes the offset from the start of
+		the IPA shared memory.
+	*/
+};  /* Message */
+
+/* Response Message; Requests the modem IPA driver about initialization */
+struct ipa_init_modem_driver_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type.*/
+
+	/* Optional */
+	/* Destination end point for control commands from master driver */
+	uint8_t ctrl_comm_dest_end_pt_valid;
+	/* Must be set to true if ctrl_comm_dest_ep is being passed */
+	uint32_t ctrl_comm_dest_end_pt;
+	/*	Provides information about the destination end point on the
+	 *	modem processor to which the master driver can send control
+	 *	commands. The value of this parameter cannot exceed 19 since
+	 *	IPA only supports 20 end points. This field is looked at only
+	 *	if the result in TLV RESULT_CODE is	QMI_RESULT_SUCCESS
+	 */
+
+	/* Optional */
+	/*  Default end point */
+	uint8_t default_end_pt_valid;
+	/* Must be set to true if default_end_pt is being passed */
+	uint32_t default_end_pt;
+	/*  Provides information about the default end point. The master
+	 *	driver may or may not set the register in the hardware with
+	 *	this value. The value of this parameter cannot exceed 19
+	 *	since IPA only supports 20 end points. This field is looked
+	 *	at only if the result in TLV RESULT_CODE is QMI_RESULT_SUCCESS
+	 */
+
+	/* Optional */
+	/*  Modem Driver Initialization Pending */
+	uint8_t modem_driver_init_pending_valid;
+	/* Must be set to true if modem_driver_init_pending is being passed */
+	uint8_t modem_driver_init_pending;
+	/*
+	 * Identifies if second level message handshake is needed
+	 *	between drivers to indicate when IPA HWP loading is completed.
+	 *	If this is set by modem driver, AP driver will need to wait
+	 *	for a INIT_MODEM_DRIVER_CMPLT message before communicating with
+	 *	IPA HWP.
+	 */
+};  /* Message */
+
+/*
+ * Request Message; Request from Modem IPA driver to indicate
+ *	modem driver init completion
+ */
+struct ipa_init_modem_driver_cmplt_req_msg_v01 {
+	/* Mandatory */
+	/*  Modem Driver init complete status; */
+	uint8_t status;
+	/*
+	 * Specifies whether the modem driver initialization is complete
+	 *	including the micro controller image loading.
+	 */
+};  /* Message */
+
+/*
+ * Response Message; Request from Modem IPA driver to indicate
+ *	modem driver init completion
+ */
+struct ipa_init_modem_driver_cmplt_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/**<   Standard response type.*/
+};  /* Message */
+
+/*	Request Message; This is the message that is exchanged between the
+ *	control point and the service in order to register for indications.
+ */
+struct ipa_indication_reg_req_msg_v01 {
+	/* Optional */
+	/*  Master driver initialization completion */
+	uint8_t master_driver_init_complete_valid;
+	/* Must be set to true if master_driver_init_complete is being passed */
+	uint8_t master_driver_init_complete;
+	/*  If set to TRUE, this field indicates that the client is
+	 *	interested in getting indications about the completion
+	 *	of the initialization sequence of the master driver.
+	 *	Setting this field in the request message makes sense
+	 *	only when the QMI_IPA_INDICATION_REGISTER_REQ is being
+	 *	originated from the modem driver
+	 */
+
+	/* Optional */
+	/*  Data Usage Quota Reached */
+	uint8_t data_usage_quota_reached_valid;
+	/*  Must be set to true if data_usage_quota_reached is being passed */
+	uint8_t data_usage_quota_reached;
+	/*  If set to TRUE, this field indicates that the client wants to
+	 *  receive indications about reaching the data usage quota that
+	 *  previously set via QMI_IPA_SET_DATA_USAGE_QUOTA. Setting this field
+	 *  in the request message makes sense only when the
+	 *  QMI_IPA_INDICATION_REGISTER_REQ is being originated from the Master
+	 *  driver
+	 */
+};  /* Message */
+
+
+/* Response Message; This is the message that is exchanged between the
+ *	control point and the service in order to register for indications.
+ */
+struct ipa_indication_reg_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/**<   Standard response type.*/
+};  /* Message */
+
+
+/*	Indication Message; Indication sent to the Modem IPA driver from
+ *	master IPA driver about initialization being complete.
+ */
+struct ipa_master_driver_init_complt_ind_msg_v01 {
+	/* Mandatory */
+	/*  Master driver initialization completion status */
+	struct ipa_qmi_response_type_v01 master_driver_init_status;
+	/*	Indicates the status of initialization. If everything went
+	 *	as expected, this field is set to SUCCESS. ERROR is set
+	 *	otherwise. Extended error info may be used to convey
+	 *	additional information about the error
+	 */
+};  /* Message */
+
+struct ipa_ipfltr_range_eq_16_type_v01 {
+	uint8_t offset;
+	/*	Specifies the offset from the IHL (Internet Header length) */
+
+	uint16_t range_low;
+	/*	Specifies the lower bound of the range */
+
+	uint16_t range_high;
+	/*	Specifies the upper bound of the range */
+};  /* Type */
+
+struct ipa_ipfltr_mask_eq_32_type_v01 {
+	uint8_t offset;
+	/*	Specifies the offset either from IHL or from the start of
+	 *	the IP packet. This depends on the equation that this structure
+	 *	is used in.
+	 */
+
+	uint32_t mask;
+	/*	Specifies the mask that has to be used in the comparison.
+	 *	The field is ANDed with the mask and compared against the value.
+	 */
+
+	uint32_t value;
+	/*	Specifies the 32 bit value that used in the comparison. */
+};  /* Type */
+
+struct ipa_ipfltr_eq_16_type_v01 {
+	uint8_t offset;
+	/*  Specifies the offset into the packet */
+
+	uint16_t value;
+	/* Specifies the 16 bit value that should be used in the comparison. */
+};  /* Type */
+
+struct ipa_ipfltr_eq_32_type_v01 {
+	uint8_t offset;
+	/* Specifies the offset into the packet */
+
+	uint32_t value;
+	/* Specifies the 32 bit value that should be used in the comparison. */
+};  /* Type */
+
+struct ipa_ipfltr_mask_eq_128_type_v01 {
+	uint8_t offset;
+	/* Specifies the offset into the packet */
+
+	uint8_t mask[16];
+	/*  Specifies the mask that has to be used in the comparison.
+	 *	The field is ANDed with the mask and compared against the value.
+	 */
+
+	uint8_t value[16];
+	/* Specifies the 128 bit value that should be used in the comparison. */
+};  /* Type */
+
+
+struct ipa_filter_rule_type_v01 {
+	uint16_t rule_eq_bitmap;
+	/* 16-bit Bitmask to indicate how many eqs are valid in this rule */
+
+	uint8_t tos_eq_present;
+	/* Specifies if a type of service check rule is present */
+
+	uint8_t tos_eq;
+	/* The value to check against the type of service (ipv4) field */
+
+	uint8_t protocol_eq_present;
+	/* Specifies if a protocol check rule is present */
+
+	uint8_t protocol_eq;
+	/* The value to check against the protocol field */
+
+	uint8_t num_ihl_offset_range_16;
+	/*  The number of 16 bit range check rules at the location
+	 *	determined by IP header length plus a given offset offset
+	 *	in this rule. See the definition of the ipa_filter_range_eq_16
+	 *	for better understanding. The value of this field cannot exceed
+	 *	IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS which is set as 2
+	 */
+
+	struct ipa_ipfltr_range_eq_16_type_v01
+		ihl_offset_range_16[QMI_IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS_V01];
+	/*	Array of the registered IP header length offset 16 bit range
+	 *	check rules.
+	 */
+
+	uint8_t num_offset_meq_32;
+	/*  The number of 32 bit masked comparison rules present
+	 *  in this rule
+	 */
+
+	struct ipa_ipfltr_mask_eq_32_type_v01
+		offset_meq_32[QMI_IPA_IPFLTR_NUM_MEQ_32_EQNS_V01];
+	/*  An array of all the possible 32bit masked comparison rules
+	 *	in this rule
+	 */
+
+	uint8_t tc_eq_present;
+	/*  Specifies if the traffic class rule is present in this rule */
+
+	uint8_t tc_eq;
+	/* The value against which the IPV4 traffic class field has to
+		be checked */
+
+	uint8_t flow_eq_present;
+	/* Specifies if the "flow equals" rule is present in this rule */
+
+	uint32_t flow_eq;
+	/* The value against which the IPV6 flow field has to be checked */
+
+	uint8_t ihl_offset_eq_16_present;
+	/*	Specifies if there is a 16 bit comparison required at the
+	 *	location in	the packet determined by "Intenet Header length
+	 *	+ specified offset"
+	 */
+
+	struct ipa_ipfltr_eq_16_type_v01 ihl_offset_eq_16;
+	/* The 16 bit comparison equation */
+
+	uint8_t ihl_offset_eq_32_present;
+	/*	Specifies if there is a 32 bit comparison required at the
+	 *	location in the packet determined by "Intenet Header length
+	 *	+ specified offset"
+	 */
+
+	struct ipa_ipfltr_eq_32_type_v01 ihl_offset_eq_32;
+	/*	The 32 bit comparison equation */
+
+	uint8_t num_ihl_offset_meq_32;
+	/*	The number of 32 bit masked comparison equations in this
+	 *	rule. The location of the packet to be compared is
+	 *	determined by the IP Header length + the give offset
+	 */
+
+	struct ipa_ipfltr_mask_eq_32_type_v01
+		ihl_offset_meq_32[QMI_IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS_V01];
+	/*	Array of 32 bit masked comparison equations.
+	*/
+
+	uint8_t num_offset_meq_128;
+	/*	The number of 128 bit comparison equations in this rule */
+
+	struct ipa_ipfltr_mask_eq_128_type_v01
+		offset_meq_128[QMI_IPA_IPFLTR_NUM_MEQ_128_EQNS_V01];
+	/*	Array of 128 bit comparison equations. The location in the
+	 *	packet is determined by the specified offset
+	 */
+
+	uint8_t metadata_meq32_present;
+	/*  Boolean indicating if the 32 bit masked comparison equation
+	 *	is present or not. Comparison is done against the metadata
+	 *	in IPA. Metadata can either be extracted from the packet
+	 *	header or from the "metadata" register.
+	 */
+
+	struct ipa_ipfltr_mask_eq_32_type_v01
+			metadata_meq32;
+	/* The metadata  32 bit masked comparison equation */
+
+	uint8_t ipv4_frag_eq_present;
+	/* Specifies if the IPv4 Fragment equation is present in this rule */
+};  /* Type */
+
+
+enum ipa_ip_type_enum_v01 {
+	IPA_IP_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+	/* To force a 32 bit signed enum.  Do not change or use*/
+	QMI_IPA_IP_TYPE_INVALID_V01 = 0,
+	/*  Invalid IP type identifier */
+	QMI_IPA_IP_TYPE_V4_V01 = 1,
+	/*  IP V4 type */
+	QMI_IPA_IP_TYPE_V6_V01 = 2,
+	/*  IP V6 type */
+	QMI_IPA_IP_TYPE_V4V6_V01 = 3,
+	/*  Applies to both IP types */
+	IPA_IP_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+	/* To force a 32 bit signed enum.  Do not change or use*/
+};
+
+
+enum ipa_filter_action_enum_v01 {
+	IPA_FILTER_ACTION_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+	/* To force a 32 bit signed enum. Do not change or use */
+	QMI_IPA_FILTER_ACTION_INVALID_V01 = 0,
+	/*  Invalid action on filter hit */
+	QMI_IPA_FILTER_ACTION_SRC_NAT_V01 = 1,
+	/*  Pass packet to NAT block for Source NAT */
+	QMI_IPA_FILTER_ACTION_DST_NAT_V01 = 2,
+	/*  Pass packet to NAT block for Destination NAT */
+	QMI_IPA_FILTER_ACTION_ROUTING_V01 = 3,
+	/*  Pass packet to Routing block */
+	QMI_IPA_FILTER_ACTION_EXCEPTION_V01 = 4,
+	/*  Treat packet as exception and send to exception pipe */
+	IPA_FILTER_ACTION_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+	/* To force a 32 bit signed enum.  Do not change or use*/
+};
+
+struct ipa_filter_spec_type_v01 {
+	uint32_t filter_spec_identifier;
+	/*	This field is used to identify a filter spec in the list
+	 *	of filter specs being sent from the client. This field
+	 *	is applicable only in the filter install request and response.
+	 */
+
+	enum ipa_ip_type_enum_v01 ip_type;
+	/*	This field identifies the IP type for which this rule is
+	 *	applicable. The driver needs to identify the filter table
+	 *	(V6 or V4) and this field is essential for that
+	 */
+
+	struct ipa_filter_rule_type_v01 filter_rule;
+	/*	This field specifies the rules in the filter spec. These rules
+	 *	are the ones that are matched against fields in the packet.
+	 */
+
+	enum ipa_filter_action_enum_v01 filter_action;
+	/*	This field specifies the action to be taken when a filter match
+	 *	occurs. The remote side should install this information into the
+	 *	hardware along with the filter equations.
+	 */
+
+	uint8_t is_routing_table_index_valid;
+	/*	Specifies whether the routing table index is present or not.
+	 *	If the action is "QMI_IPA_FILTER_ACTION_EXCEPTION", this
+	 *	parameter need not be provided.
+	 */
+
+	uint32_t route_table_index;
+	/*	This is the index in the routing table that should be used
+	 *	to route the packets if the filter rule is hit
+	 */
+
+	uint8_t is_mux_id_valid;
+	/*	Specifies whether the mux_id is valid */
+
+	uint32_t mux_id;
+	/*	This field identifies the QMAP MUX ID. As a part of QMAP
+	 *	protocol, several data calls may be multiplexed over the
+	 *	same physical transport channel. This identifier is used to
+	 *	identify one such data call. The maximum value for this
+	 *	identifier is 255.
+	 */
+};  /* Type */
+
+struct ipa_filter_spec_ex_type_v01 {
+	enum ipa_ip_type_enum_v01 ip_type;
+	/*	This field identifies the IP type for which this rule is
+	 *	applicable. The driver needs to identify the filter table
+	 *	(V6 or V4) and this field is essential for that
+	 */
+
+	struct ipa_filter_rule_type_v01 filter_rule;
+	/*	This field specifies the rules in the filter spec. These rules
+	 *	are the ones that are matched against fields in the packet.
+	 */
+
+	enum ipa_filter_action_enum_v01 filter_action;
+	/*	This field specifies the action to be taken when a filter match
+	 *	occurs. The remote side should install this information into the
+	 *	hardware along with the filter equations.
+	 */
+
+	uint8_t is_routing_table_index_valid;
+	/*	Specifies whether the routing table index is present or not.
+	 *	If the action is "QMI_IPA_FILTER_ACTION_EXCEPTION", this
+	 *	parameter need not be provided.
+	 */
+
+	uint32_t route_table_index;
+	/*	This is the index in the routing table that should be used
+	 *	to route the packets if the filter rule is hit
+	 */
+
+	uint8_t is_mux_id_valid;
+	/*	Specifies whether the mux_id is valid */
+
+	uint32_t mux_id;
+	/*	This field identifies the QMAP MUX ID. As a part of QMAP
+	 *	protocol, several data calls may be multiplexed over the
+	 *	same physical transport channel. This identifier is used to
+	 *	identify one such data call. The maximum value for this
+	 *	identifier is 255.
+	 */
+
+	uint32_t rule_id;
+	/** Rule Id of the given filter. The Rule Id is populated in the rule
+		header when installing the rule in IPA.
+	*/
+
+	uint8_t is_rule_hashable;
+	/** Specifies whether the given rule is hashable.
+	*/
+};  /* Type */
+
+
+/*  Request Message; This is the message that is exchanged between the
+ *	control point and the service in order to request the installation
+ *	of filtering rules in the hardware block by the remote side.
+ */
+struct ipa_install_fltr_rule_req_msg_v01 {
+	/* Optional */
+	/*  IP type that this rule applies to
+	Filter specification to be installed in the hardware */
+	uint8_t filter_spec_list_valid;
+	/* Must be set to true if filter_spec_list is being passed */
+	uint32_t filter_spec_list_len;
+	/* Must be set to # of elements in filter_spec_list */
+	struct ipa_filter_spec_type_v01
+		filter_spec_list[QMI_IPA_MAX_FILTERS_V01];
+	/*	This structure defines the list of filters that have
+	 *		to be installed in the hardware. The driver installing
+	 *		these rules shall do so in the same order as specified
+	 *		in this list.
+	 */
+
+	/* Optional */
+	/*  Pipe index to intall rule */
+	uint8_t source_pipe_index_valid;
+	/* Must be set to true if source_pipe_index is being passed */
+	uint32_t source_pipe_index;
+	/*	This is the source pipe on which the filter rule is to be
+	 *	installed. The requestor may always not know the pipe
+	 *	indices. If not specified, the receiver shall install
+	 *	this rule on all the pipes that it controls through
+	 *	which data may be fed into IPA.
+	 */
+
+	/* Optional */
+	/*  Total number of IPv4 filters in the filter spec list */
+	uint8_t num_ipv4_filters_valid;
+	/* Must be set to true if num_ipv4_filters is being passed */
+	uint32_t num_ipv4_filters;
+	/*   Number of IPv4 rules included in filter spec list */
+
+	/* Optional */
+	/*  Total number of IPv6 filters in the filter spec list */
+	uint8_t num_ipv6_filters_valid;
+	/* Must be set to true if num_ipv6_filters is being passed */
+	uint32_t num_ipv6_filters;
+	/* Number of IPv6 rules included in filter spec list */
+
+	/* Optional */
+	/*  List of XLAT filter indices in the filter spec list */
+	uint8_t xlat_filter_indices_list_valid;
+	/* Must be set to true if xlat_filter_indices_list
+	 * is being passed
+	 */
+	uint32_t xlat_filter_indices_list_len;
+	/* Must be set to # of elements in xlat_filter_indices_list */
+	uint32_t xlat_filter_indices_list[QMI_IPA_MAX_FILTERS_V01];
+	/* List of XLAT filter indices. Filter rules at specified indices
+	 * will need to be modified by the receiver if the PDN is XLAT
+	 * before installing them on the associated IPA consumer pipe.
+	 */
+
+	/* Optional */
+	/*  Extended Filter Specification  */
+	uint8_t filter_spec_ex_list_valid;
+	/* Must be set to true if filter_spec_ex_list is being passed */
+	uint32_t filter_spec_ex_list_len;
+	/* Must be set to # of elements in filter_spec_ex_list */
+	struct ipa_filter_spec_ex_type_v01
+		filter_spec_ex_list[QMI_IPA_MAX_FILTERS_V01];
+	/*
+	 * List of filter specifications of filters that must be installed in
+	 *	the IPAv3.x hardware.
+	 *	The driver installing these rules must do so in the same
+	 *	order as specified in this list.
+	 */
+};  /* Message */
+
+struct ipa_filter_rule_identifier_to_handle_map_v01 {
+	uint32_t filter_spec_identifier;
+	/*	This field is used to identify a filter spec in the list of
+	 *	filter specs being sent from the client. This field is
+	 *	applicable only in the filter install request and response.
+	 */
+	uint32_t filter_handle;
+	/*  This field is used to identify a rule in any subsequent message.
+	 *	This is a value that is provided by the server to the control
+	 *	point
+	 */
+};  /* Type */
+
+/* Response Message; This is the message that is exchanged between the
+ * control point and the service in order to request the
+ * installation of filtering rules in the hardware block by
+ * the remote side.
+ */
+struct ipa_install_fltr_rule_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/*	Standard response type.
+	 *	Standard response type. Contains the following data members:
+	 *	- qmi_result_type -- QMI_RESULT_SUCCESS or QMI_RESULT_FAILURE
+	 *	- qmi_error_type  -- Error code. Possible error code values are
+	 *	described in the error codes section of each message definition.
+	 */
+
+	/* Optional */
+	/*  Filter Handle List */
+	uint8_t filter_handle_list_valid;
+	/* Must be set to true if filter_handle_list is being passed */
+	uint32_t filter_handle_list_len;
+	/* Must be set to # of elements in filter_handle_list */
+	struct ipa_filter_rule_identifier_to_handle_map_v01
+		filter_handle_list[QMI_IPA_MAX_FILTERS_V01];
+	/*
+	 * List of handles returned to the control point. Each handle is
+	 *	mapped to the rule identifier that was specified in the
+	 *	request message. Any further reference to the rule is done
+	 *	using the filter handle.
+	 */
+
+	/* Optional */
+	/*  Rule id List */
+	uint8_t rule_id_valid;
+	/* Must be set to true if rule_id is being passed */
+	uint32_t rule_id_len;
+	/* Must be set to # of elements in rule_id */
+	uint32_t rule_id[QMI_IPA_MAX_FILTERS_V01];
+	/*
+	 * List of rule ids returned to the control point.
+	 *	Any further reference to the rule is done using the
+	 *	filter rule id specified in this list.
+	 */
+};  /* Message */
+
+struct ipa_filter_handle_to_index_map_v01 {
+	uint32_t filter_handle;
+	/*	This is a handle that was given to the remote client that
+	 *	requested the rule addition.
+	 */
+	uint32_t filter_index;
+	/*	This index denotes the location in a filter table, where the
+	 *	filter rule has been installed. The maximum value of this
+	 *	field is 64.
+	 */
+};  /* Type */
+
+/* Request Message; This is the message that is exchanged between the
+ * control point and the service in order to notify the remote driver
+ * of the installation of the filter rule supplied earlier by the
+ * remote driver.
+ */
+struct ipa_fltr_installed_notif_req_msg_v01 {
+	/*	Mandatory	*/
+	/*  Pipe index	*/
+	uint32_t source_pipe_index;
+	/*	This is the source pipe on which the filter rule has been
+	 *	installed or was attempted to be installed
+	 */
+
+	/* Mandatory */
+	/*  Installation Status */
+	enum ipa_qmi_result_type_v01 install_status;
+	/*	This is the status of installation. If this indicates
+	 *	SUCCESS, other optional fields carry additional
+	 *	information
+	 */
+
+	/* Mandatory */
+	/*  List of Filter Indices */
+	uint32_t filter_index_list_len;
+	/* Must be set to # of elements in filter_index_list */
+	struct ipa_filter_handle_to_index_map_v01
+		filter_index_list[QMI_IPA_MAX_FILTERS_V01];
+	/*
+	 * Provides the list of filter indices and the corresponding
+	 *	filter handle. If the installation_status indicates a
+	 *	failure, the filter indices must be set to a reserve
+	 *	index (255).
+	 */
+
+	/* Optional */
+	/*  Embedded pipe index */
+	uint8_t embedded_pipe_index_valid;
+	/* Must be set to true if embedded_pipe_index is being passed */
+	uint32_t embedded_pipe_index;
+	/*	This index denotes the embedded pipe number on which a call to
+	 *	the same PDN has been made. If this field is set, it denotes
+	 *	that this is a use case where PDN sharing is happening. The
+	 *	embedded pipe is used to send data from the embedded client
+	 *	in the device
+	 */
+
+	/* Optional */
+	/*  Retain Header Configuration */
+	uint8_t retain_header_valid;
+	/* Must be set to true if retain_header is being passed */
+	uint8_t retain_header;
+	/*	This field indicates if the driver installing the rule has
+	 *	turned on the "retain header" bit. If this is true, the
+	 *	header that is removed by IPA is reinserted after the
+	 *	packet processing is completed.
+	 */
+
+	/* Optional */
+	/*  Embedded call Mux Id */
+	uint8_t embedded_call_mux_id_valid;
+	/**< Must be set to true if embedded_call_mux_id is being passed */
+	uint32_t embedded_call_mux_id;
+	/*	This identifies one of the many calls that have been originated
+	 *	on the embedded pipe. This is how we identify the PDN gateway
+	 *	to which traffic from the source pipe has to flow.
+	 */
+
+	/* Optional */
+	/*  Total number of IPv4 filters in the filter index list */
+	uint8_t num_ipv4_filters_valid;
+	/* Must be set to true if num_ipv4_filters is being passed */
+	uint32_t num_ipv4_filters;
+	/* Number of IPv4 rules included in filter index list */
+
+	/* Optional */
+	/*  Total number of IPv6 filters in the filter index list */
+	uint8_t num_ipv6_filters_valid;
+	/* Must be set to true if num_ipv6_filters is being passed */
+	uint32_t num_ipv6_filters;
+	/* Number of IPv6 rules included in filter index list */
+
+	/* Optional */
+	/*  Start index on IPv4 filters installed on source pipe */
+	uint8_t start_ipv4_filter_idx_valid;
+	/* Must be set to true if start_ipv4_filter_idx is being passed */
+	uint32_t start_ipv4_filter_idx;
+	/* Start index of IPv4 rules in filter index list */
+
+	/* Optional */
+	/*  Start index on IPv6 filters installed on source pipe */
+	uint8_t start_ipv6_filter_idx_valid;
+	/* Must be set to true if start_ipv6_filter_idx is being passed */
+	uint32_t start_ipv6_filter_idx;
+	/* Start index of IPv6 rules in filter index list */
+
+	/* Optional */
+	/*  List of Rule Ids */
+	uint8_t rule_id_valid;
+	/* Must be set to true if rule_id is being passed */
+	uint32_t rule_id_len;
+	/* Must be set to # of elements in rule_id */
+	uint32_t rule_id[QMI_IPA_MAX_FILTERS_V01];
+	/*
+	 * Provides the list of Rule Ids of rules added in IPA on the given
+	 *	source pipe index. If the install_status TLV indicates a
+	 *	failure, the Rule Ids in this list must be set to a reserved
+	 *	index (255).
+	 */
+
+	/* Optional */
+	/*	List of destination pipe IDs. */
+	uint8_t dst_pipe_id_valid;
+	/* Must be set to true if dst_pipe_id is being passed. */
+	uint32_t dst_pipe_id_len;
+	/* Must be set to # of elements in dst_pipe_id. */
+	uint32_t dst_pipe_id[QMI_IPA_MAX_CLIENT_DST_PIPES_V01];
+	/* Provides the list of destination pipe IDs for a source pipe. */
+
+};  /* Message */
+
+/* Response Message; This is the message that is exchanged between the
+ * control point and the service in order to notify the remote driver
+ * of the installation of the filter rule supplied earlier by the
+ * remote driver.
+ */
+struct ipa_fltr_installed_notif_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/*	Standard response type */
+};  /* Message */
+
+/* Request Message; Notifies the remote driver of the need to clear the data
+ * path to prevent the IPA from being blocked at the head of the processing
+ * pipeline
+ */
+struct ipa_enable_force_clear_datapath_req_msg_v01 {
+	/* Mandatory */
+	/*  Pipe Mask */
+	uint32_t source_pipe_bitmask;
+	/* Set of consumer (source) pipes that must be clear of
+	 * active data transfers.
+	 */
+
+	/* Mandatory */
+	/* Request ID */
+	uint32_t request_id;
+	/* Identifies the ID of the request that is sent to the server
+	 * The same request ID is used in the message to remove the force_clear
+	 * request. The server is expected to keep track of the request ID and
+	 * the source_pipe_bitmask so that it can revert as needed
+	 */
+
+	/* Optional */
+	/*  Source Throttle State */
+	uint8_t throttle_source_valid;
+	/* Must be set to true if throttle_source is being passed */
+	uint8_t throttle_source;
+	/*  Specifies whether the server is to throttle the data from
+	 *	these consumer (source) pipes after clearing the exisiting
+	 *	data present in the IPA that were pulled from these pipes
+	 *	The server is expected to put all the source pipes in the
+	 *	source_pipe_bitmask in the same state
+	 */
+};  /* Message */
+
+/* Response Message; Notifies the remote driver of the need to clear the
+ * data path to prevent the IPA from being blocked at the head of the
+ * processing pipeline
+ */
+struct ipa_enable_force_clear_datapath_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type */
+};  /* Message */
+
+/* Request Message; Notifies the remote driver that the forceful clearing
+ * of the data path can be lifted
+ */
+struct ipa_disable_force_clear_datapath_req_msg_v01 {
+	/* Mandatory */
+	/* Request ID */
+	uint32_t request_id;
+	/* Identifies the request that was sent to the server to
+	 * forcibly clear the data path. This request simply undoes
+	 * the operation done in that request
+	 */
+};  /* Message */
+
+/* Response Message; Notifies the remote driver that the forceful clearing
+ * of the data path can be lifted
+ */
+struct ipa_disable_force_clear_datapath_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type */
+};  /* Message */
+
+enum ipa_peripheral_speed_enum_v01 {
+	IPA_PERIPHERAL_SPEED_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+	/* To force a 32 bit signed enum.  Do not change or use */
+	QMI_IPA_PER_USB_FS_V01 = 1,
+	/*  Full-speed USB connection */
+	QMI_IPA_PER_USB_HS_V01 = 2,
+	/*  High-speed USB connection */
+	QMI_IPA_PER_USB_SS_V01 = 3,
+	/*  Super-speed USB connection */
+	QMI_IPA_PER_WLAN_V01 = 4,
+	/*  WLAN connection */
+	IPA_PERIPHERAL_SPEED_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+	/* To force a 32 bit signed enum.  Do not change or use*/
+};
+
+enum ipa_pipe_mode_enum_v01 {
+	IPA_PIPE_MODE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+	/* To force a 32 bit signed enum.  Do not change or use */
+	QMI_IPA_PIPE_MODE_HW_V01 = 1,
+	/*  Pipe is connected with a hardware block */
+	QMI_IPA_PIPE_MODE_SW_V01 = 2,
+	/*  Pipe is controlled by the software */
+	IPA_PIPE_MODE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+	/* To force a 32 bit signed enum.  Do not change or use */
+};
+
+enum ipa_peripheral_type_enum_v01 {
+	IPA_PERIPHERAL_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+	/* To force a 32 bit signed enum.  Do not change or use */
+	QMI_IPA_PERIPHERAL_USB_V01 = 1,
+	/*  Specifies a USB peripheral */
+	QMI_IPA_PERIPHERAL_HSIC_V01 = 2,
+	/*  Specifies an HSIC peripheral */
+	QMI_IPA_PERIPHERAL_PCIE_V01 = 3,
+	/*  Specifies a PCIe	peripheral */
+	IPA_PERIPHERAL_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+	/* To force a 32 bit signed enum.  Do not change or use */
+};
+
+struct ipa_config_req_msg_v01 {
+	/* Optional */
+	/*  Peripheral Type */
+	uint8_t peripheral_type_valid;
+	/* Must be set to true if peripheral_type is being passed */
+	enum ipa_peripheral_type_enum_v01 peripheral_type;
+	/* Informs the remote driver about the perhipheral for
+	 * which this configuration information is relevant. Values:
+	 *	- QMI_IPA_PERIPHERAL_USB (1) -- Specifies a USB peripheral
+	 *	- QMI_IPA_PERIPHERAL_HSIC(2) -- Specifies an HSIC peripheral
+	 *	- QMI_IPA_PERIPHERAL_PCIE(3) -- Specifies a PCIe peripheral
+	 */
+
+	/* Optional */
+	/*  HW Deaggregation Support */
+	uint8_t hw_deaggr_supported_valid;
+	/* Must be set to true if hw_deaggr_supported is being passed */
+	uint8_t hw_deaggr_supported;
+	/* Informs the remote driver whether the local IPA driver
+	 * allows de-aggregation to be performed in the hardware
+	 */
+
+	/* Optional */
+	/*  Maximum Aggregation Frame Size */
+	uint8_t max_aggr_frame_size_valid;
+	/* Must be set to true if max_aggr_frame_size is being passed */
+	uint32_t max_aggr_frame_size;
+	/* Specifies the maximum size of the aggregated frame that
+	 * the remote driver can expect from this execution environment
+	 *	- Valid range: 128 bytes to 32768 bytes
+	 */
+
+	/* Optional */
+	/*  IPA Ingress Pipe Mode */
+	uint8_t ipa_ingress_pipe_mode_valid;
+	/* Must be set to true if ipa_ingress_pipe_mode is being passed */
+
+	enum ipa_pipe_mode_enum_v01 ipa_ingress_pipe_mode;
+	/* Indicates to the remote driver if the ingress pipe into the
+	 *	IPA is in direct connection with another hardware block or
+	 *	if the producer of data to this ingress pipe is a software
+	 *  module. Values:
+	 *	-QMI_IPA_PIPE_MODE_HW(1) --Pipe is connected with hardware block
+	 *	-QMI_IPA_PIPE_MODE_SW(2) --Pipe is controlled by the software
+	 */
+
+	/* Optional */
+	/*  Peripheral Speed Info */
+	uint8_t peripheral_speed_info_valid;
+	/* Must be set to true if peripheral_speed_info is being passed */
+
+	enum ipa_peripheral_speed_enum_v01 peripheral_speed_info;
+	/* Indicates the speed that the peripheral connected to the IPA supports
+	 * Values:
+	 *	- QMI_IPA_PER_USB_FS (1) --  Full-speed USB connection
+	 *	- QMI_IPA_PER_USB_HS (2) --  High-speed USB connection
+	 *	- QMI_IPA_PER_USB_SS (3) --  Super-speed USB connection
+	 *  - QMI_IPA_PER_WLAN   (4) --  WLAN connection
+	 */
+
+	/* Optional */
+	/*  Downlink Accumulation Time limit */
+	uint8_t dl_accumulation_time_limit_valid;
+	/* Must be set to true if dl_accumulation_time_limit is being passed */
+	uint32_t dl_accumulation_time_limit;
+	/* Informs the remote driver about the time for which data
+	 * is accumulated in the downlink direction before it is pushed into the
+	 * IPA (downlink is with respect to the WWAN air interface)
+	 * - Units: milliseconds
+	 * - Maximum value: 255
+	 */
+
+	/* Optional */
+	/*  Downlink Accumulation Packet limit */
+	uint8_t dl_accumulation_pkt_limit_valid;
+	/* Must be set to true if dl_accumulation_pkt_limit is being passed */
+	uint32_t dl_accumulation_pkt_limit;
+	/* Informs the remote driver about the number of packets
+	 * that are to be accumulated in the downlink direction before it is
+	 * pushed into the IPA - Maximum value: 1023
+	 */
+
+	/* Optional */
+	/*  Downlink Accumulation Byte Limit */
+	uint8_t dl_accumulation_byte_limit_valid;
+	/* Must be set to true if dl_accumulation_byte_limit is being passed */
+	uint32_t dl_accumulation_byte_limit;
+	/* Inform the remote driver about the number of bytes
+	 * that are to be accumulated in the downlink direction before it
+	 * is pushed into the IPA - Maximum value: TBD
+	 */
+
+	/* Optional */
+	/*  Uplink Accumulation Time Limit */
+	uint8_t ul_accumulation_time_limit_valid;
+	/* Must be set to true if ul_accumulation_time_limit is being passed */
+	uint32_t ul_accumulation_time_limit;
+	/* Inform thes remote driver about the time for which data
+	 * is to be accumulated in the uplink direction before it is pushed into
+	 * the IPA (downlink is with respect to the WWAN air interface).
+	 * - Units: milliseconds
+	 * - Maximum value: 255
+	 */
+
+	/* Optional */
+	/*  HW Control Flags */
+	uint8_t hw_control_flags_valid;
+	/* Must be set to true if hw_control_flags is being passed */
+	uint32_t hw_control_flags;
+	/* Informs the remote driver about the hardware control flags:
+	 *	- Bit 0: IPA_HW_FLAG_HALT_SYSTEM_ON_NON_TERMINAL_FAILURE --
+	 *	Indicates to the hardware that it must not continue with
+	 *	any subsequent operation even if the failure is not terminal
+	 *	- Bit 1: IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_ERORR --
+	 *	Indicates to the hardware that it is not required to report
+	 *	channel errors to the host.
+	 *	- Bit 2: IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_WAKE_UP --
+	 *	Indicates to the hardware that it is not required to generate
+	 *	wake-up events to the host.
+	 *	- Bit 4: IPA_HW_FLAG_WORK_OVER_DDR --
+	 *	Indicates to the hardware that it is accessing addresses in
+	 *  the DDR and not over PCIe
+	 *	- Bit 5: IPA_HW_FLAG_INTERRUPT_MODE_CTRL_FLAG --
+	 *	Indicates whether the device must
+	 *	raise an event to let the host know that it is going into an
+	 *	interrupt mode (no longer polling for data/buffer availability)
+	 */
+
+	/* Optional */
+	/*  Uplink MSI Event Threshold */
+	uint8_t ul_msi_event_threshold_valid;
+	/* Must be set to true if ul_msi_event_threshold is being passed */
+	uint32_t ul_msi_event_threshold;
+	/* Informs the remote driver about the threshold that will
+	 * cause an interrupt (MSI) to be fired to the host. This ensures
+	 * that the remote driver does not accumulate an excesive number of
+	 * events before firing an interrupt.
+	 * This threshold is applicable for data moved in the UL direction.
+	 * - Maximum value: 65535
+	 */
+
+	/* Optional */
+	/*  Downlink MSI Event Threshold */
+	uint8_t dl_msi_event_threshold_valid;
+	/* Must be set to true if dl_msi_event_threshold is being passed */
+	uint32_t dl_msi_event_threshold;
+	/* Informs the remote driver about the threshold that will
+	 * cause an interrupt (MSI) to be fired to the host. This ensures
+	 * that the remote driver does not accumulate an excesive number of
+	 * events before firing an interrupt
+	 * This threshold is applicable for data that is moved in the
+	 * DL direction - Maximum value: 65535
+	 */
+
+	/* Optional */
+	/*  Uplink Fifo Size */
+	uint8_t ul_fifo_size_valid;
+	/* Must be set to true if ul_fifo_size is being passed */
+	uint32_t ul_fifo_size;
+	/*
+	 * Informs the remote driver about the total Uplink xDCI
+	 *	buffer size that holds the complete aggregated frame
+	 *	or BAM data fifo size of the peripheral channel/pipe(in Bytes).
+	 *	This deprecates the max_aggr_frame_size field. This TLV
+	 *	deprecates max_aggr_frame_size TLV from version 1.9 onwards
+	 *	and the max_aggr_frame_size TLV will be ignored in the presence
+	 *	of this TLV.
+	 */
+
+	/* Optional */
+	/*  Downlink Fifo Size */
+	uint8_t dl_fifo_size_valid;
+	/* Must be set to true if dl_fifo_size is being passed */
+	uint32_t dl_fifo_size;
+	/*
+	 * Informs the remote driver about the total Downlink xDCI buffering
+	 *	capacity or BAM data fifo size of the peripheral channel/pipe.
+	 *	(In Bytes). dl_fifo_size = n * dl_buf_size. This deprecates the
+	 *	max_aggr_frame_size field. If this value is set
+	 *	max_aggr_frame_size is ignored.
+	 */
+
+	/* Optional */
+	/*  Downlink Buffer Size */
+	uint8_t dl_buf_size_valid;
+	/* Must be set to true if dl_buf_size is being passed */
+	uint32_t dl_buf_size;
+	/*  Informs the remote driver about the single xDCI buffer size.
+		This is applicable only in GSI mode(in Bytes).\n */
+};  /* Message */
+
+/* Response Message; Notifies the remote driver of the configuration
+ * information
+ */
+struct ipa_config_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/**<   Standard response type.*/
+}; /* Message */
+
+enum ipa_stats_type_enum_v01 {
+	IPA_STATS_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+	/* To force a 32 bit signed enum.  Do not change or use */
+	QMI_IPA_STATS_TYPE_INVALID_V01 = 0,
+	/* Invalid stats type identifier */
+	QMI_IPA_STATS_TYPE_PIPE_V01 = 1,
+	/* Pipe stats type */
+	QMI_IPA_STATS_TYPE_FILTER_RULES_V01 = 2,
+	/* Filter rule stats type */
+	IPA_STATS_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+	/* To force a 32 bit signed enum.  Do not change or use */
+};
+
+struct ipa_pipe_stats_info_type_v01 {
+	uint32_t pipe_index;
+	/* Pipe index for statistics to be retrieved. */
+
+	uint64_t num_ipv4_packets;
+	/* Accumulated number of IPv4 packets over this pipe. */
+
+	uint64_t num_ipv4_bytes;
+	/* Accumulated number of IPv4 bytes over this pipe. */
+
+	uint64_t num_ipv6_packets;
+	/* Accumulated number of IPv6 packets over this pipe. */
+
+	uint64_t num_ipv6_bytes;
+	/* Accumulated number of IPv6 bytes over this pipe. */
+};
+
+struct ipa_stats_type_filter_rule_v01 {
+	uint32_t filter_rule_index;
+	/* Filter rule index for statistics to be retrieved. */
+
+	uint64_t num_packets;
+	/* Accumulated number of packets over this filter rule. */
+};
+
+/* Request Message; Retrieve the data statistics collected on modem
+ * IPA driver.
+ */
+struct ipa_get_data_stats_req_msg_v01 {
+	/* Mandatory */
+	/*  Stats Type  */
+	enum ipa_stats_type_enum_v01 ipa_stats_type;
+	/* Indicates the type of statistics to be retrieved. */
+
+	/* Optional */
+	/* Reset Statistics */
+	uint8_t reset_stats_valid;
+	/* Must be set to true if reset_stats is being passed */
+	uint8_t reset_stats;
+	/* Option to reset the specific type of data statistics
+	 * currently collected.
+	 */
+};  /* Message */
+
+/* Response Message; Retrieve the data statistics collected
+ * on modem IPA driver.
+ */
+struct ipa_get_data_stats_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type. */
+
+	/* Optional */
+	/*  Stats Type  */
+	uint8_t ipa_stats_type_valid;
+	/* Must be set to true if ipa_stats_type is passed */
+	enum ipa_stats_type_enum_v01 ipa_stats_type;
+	/* Indicates the type of statistics that are retrieved. */
+
+	/* Optional */
+	/*  Uplink Source Pipe Statistics List */
+	uint8_t ul_src_pipe_stats_list_valid;
+	/* Must be set to true if ul_src_pipe_stats_list is being passed */
+	uint32_t ul_src_pipe_stats_list_len;
+	/* Must be set to # of elements in ul_src_pipe_stats_list */
+	struct ipa_pipe_stats_info_type_v01
+		ul_src_pipe_stats_list[QMI_IPA_MAX_PIPES_V01];
+	/* List of all Uplink pipe statistics that are retrieved. */
+
+	/* Optional */
+	/*  Downlink Destination Pipe Statistics List */
+	uint8_t dl_dst_pipe_stats_list_valid;
+	/* Must be set to true if dl_dst_pipe_stats_list is being passed */
+	uint32_t dl_dst_pipe_stats_list_len;
+	/* Must be set to # of elements in dl_dst_pipe_stats_list */
+	struct ipa_pipe_stats_info_type_v01
+		dl_dst_pipe_stats_list[QMI_IPA_MAX_PIPES_V01];
+	/* List of all Downlink pipe statistics that are retrieved. */
+
+	/* Optional */
+	/*  Downlink Filter Rule Stats List */
+	uint8_t dl_filter_rule_stats_list_valid;
+	/* Must be set to true if dl_filter_rule_stats_list is being passed */
+	uint32_t dl_filter_rule_stats_list_len;
+	/* Must be set to # of elements in dl_filter_rule_stats_list */
+	struct ipa_stats_type_filter_rule_v01
+		dl_filter_rule_stats_list[QMI_IPA_MAX_FILTERS_V01];
+	/* List of all Downlink filter rule statistics retrieved. */
+};  /* Message */
+
+struct ipa_apn_data_stats_info_type_v01 {
+	uint32_t mux_id;
+	/* Indicates the MUX ID associated with the APN for which the data
+	 * usage statistics is queried
+	 */
+
+	uint64_t num_ul_packets;
+	/* Accumulated number of uplink packets corresponding to
+	 * this Mux ID
+	 */
+
+	uint64_t num_ul_bytes;
+	/* Accumulated number of uplink bytes corresponding to
+	 * this Mux ID
+	 */
+
+	uint64_t num_dl_packets;
+	/* Accumulated number of downlink packets corresponding
+	 * to this Mux ID
+	 */
+
+	uint64_t num_dl_bytes;
+	/* Accumulated number of downlink bytes corresponding to
+	 * this Mux ID
+	 */
+};  /* Type */
+
+/* Request Message; Retrieve the APN data statistics collected from modem */
+struct ipa_get_apn_data_stats_req_msg_v01 {
+	/* Optional */
+	/*  Mux ID List */
+	uint8_t mux_id_list_valid;
+	/* Must be set to true if mux_id_list is being passed */
+	uint32_t mux_id_list_len;
+	/* Must be set to # of elements in mux_id_list */
+	uint32_t mux_id_list[QMI_IPA_MAX_APN_V01];
+	/* The list of MUX IDs associated with APNs for which the data usage
+	 * statistics is being retrieved
+	 */
+};  /* Message */
+
+/* Response Message; Retrieve the APN data statistics collected from modem */
+struct ipa_get_apn_data_stats_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type.*/
+
+	/* Optional */
+	/* APN Data Statistics List */
+	uint8_t apn_data_stats_list_valid;
+	/* Must be set to true if apn_data_stats_list is being passed */
+	uint32_t apn_data_stats_list_len;
+	/* Must be set to # of elements in apn_data_stats_list */
+	struct ipa_apn_data_stats_info_type_v01
+		apn_data_stats_list[QMI_IPA_MAX_APN_V01];
+	/* List of APN data retrieved as per request on mux_id.
+	* For now, only one APN monitoring is supported on modem driver.
+	* Making this as list for expandability to support more APNs in future.
+	*/
+};  /* Message */
+
+struct ipa_data_usage_quota_info_type_v01 {
+	uint32_t mux_id;
+	/* Indicates the MUX ID associated with the APN for which the data usage
+	 * quota needs to be set
+	 */
+
+	uint64_t num_Mbytes;
+	/* Number of Mega-bytes of quota value to be set on this APN associated
+	 * with this Mux ID.
+	 */
+};  /* Type */
+
+/* Request Message; Master driver sets a data usage quota value on
+ * modem driver
+ */
+struct ipa_set_data_usage_quota_req_msg_v01 {
+	/* Optional */
+	/* APN Quota List */
+	uint8_t apn_quota_list_valid;
+	/* Must be set to true if apn_quota_list is being passed */
+	uint32_t apn_quota_list_len;
+	/* Must be set to # of elements in apn_quota_list */
+	struct ipa_data_usage_quota_info_type_v01
+		apn_quota_list[QMI_IPA_MAX_APN_V01];
+	/* The list of APNs on which a data usage quota to be set on modem
+	 * driver. For now, only one APN monitoring is supported on modem
+	 * driver. Making this as list for expandability to support more
+	 * APNs in future.
+	 */
+};  /* Message */
+
+/* Response Message; Master driver sets a data usage on modem driver. */
+struct ipa_set_data_usage_quota_resp_msg_v01 {
+	/* Mandatory */
+	/* Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type.*/
+};  /* Message */
+
+/* Indication Message; Modem driver sends this indication to master
+ * driver when the data usage quota is reached
+ */
+struct ipa_data_usage_quota_reached_ind_msg_v01 {
+	/* Mandatory */
+	/*  APN Quota List */
+	struct ipa_data_usage_quota_info_type_v01 apn;
+	/* This message indicates which APN has the previously set quota
+	 * reached. For now, only one APN monitoring is supported on modem
+	 * driver.
+	 */
+};  /* Message */
+
+/* Request Message; Master driver request modem driver to terminate
+ * the current data usage quota monitoring session.
+ */
+struct ipa_stop_data_usage_quota_req_msg_v01 {
+	/* This element is a placeholder to prevent the declaration of
+     *  an empty struct.  DO NOT USE THIS FIELD UNDER ANY CIRCUMSTANCE
+	 */
+	char __placeholder;
+};  /* Message */
+
+/* Response Message; Master driver request modem driver to terminate
+ * the current quota monitoring session.
+ */
+struct ipa_stop_data_usage_quota_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/**<   Standard response type.*/
+};  /* Message */
+
+/* Request Message; Request from Modem IPA driver to set DPL peripheral pipe */
+struct ipa_install_fltr_rule_req_ex_msg_v01 {
+
+	/* Optional */
+	/*  Extended Filter Specification  */
+	uint8_t filter_spec_ex_list_valid;
+	uint32_t filter_spec_ex_list_len;
+	struct ipa_filter_spec_ex_type_v01
+		filter_spec_ex_list[QMI_IPA_MAX_FILTERS_EX_V01];
+	/* List of filter specifications of filters that must be installed in
+	the IPAv3.x hardware.
+	The driver installing these rules must do so in the same order as
+	specified in this list.
+	*/
+
+	/* Optional */
+	/* Pipe Index to Install Rule */
+	uint8_t source_pipe_index_valid;
+	uint32_t source_pipe_index;
+	/* Pipe index to install the filter rule.
+	The requester may not always know the pipe indices. If not specified,
+	the receiver must install this rule on all pipes that it controls,
+	through which data may be fed into the IPA.
+	*/
+
+	/* Optional */
+	/* Total Number of IPv4 Filters in the Filter Spec List */
+	uint8_t num_ipv4_filters_valid;
+	uint32_t num_ipv4_filters;
+	/* Number of IPv4 rules included in the filter specification list.
+	*/
+
+	/* Optional */
+	/* Total Number of IPv6 Filters in the Filter Spec List */
+	uint8_t num_ipv6_filters_valid;
+	uint32_t num_ipv6_filters;
+	/* Number of IPv6 rules included in the filter specification list.
+	*/
+
+	/* Optional */
+	/* List of XLAT Filter Indices in the Filter Spec List */
+	uint8_t xlat_filter_indices_list_valid;
+	uint32_t xlat_filter_indices_list_len;
+	uint32_t xlat_filter_indices_list[QMI_IPA_MAX_FILTERS_EX_V01];
+	/* List of XLAT filter indices.
+	Filter rules at specified indices must be modified by the
+	receiver if the PDN is XLAT before installing them on the associated
+	IPA consumer pipe.
+	*/
+};  /* Message */
+
+/* Response Message; Requests installation of filtering rules in the hardware
+ * block on the remote side.
+ */
+struct ipa_install_fltr_rule_resp_ex_msg_v01 {
+	/* Mandatory */
+	/* Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type.
+	Standard response type. Contains the following data members:
+	- qmi_result_type -- QMI_RESULT_SUCCESS or QMI_RESULT_FAILURE
+	- qmi_error_type  -- Error code. Possible error code values are
+						 described in the error codes
+						 section of each message
+						 definition.
+	*/
+
+	/* Optional */
+	/* Rule ID List */
+	uint8_t rule_id_valid;
+	uint32_t rule_id_len;
+	uint32_t rule_id[QMI_IPA_MAX_FILTERS_EX_V01];
+	/* List of rule IDs returned to the control point.
+	Any further reference to the rule is done using the filter rule ID
+	specified in this list.
+	*/
+};  /* Message */
+
+/*
+ * Request Message; Requests the modem IPA driver to enable or
+ * disable collection of per client statistics.
+ */
+struct ipa_enable_per_client_stats_req_msg_v01 {
+
+	/* Mandatory */
+	/* Collect statistics per client; */
+	uint8_t enable_per_client_stats;
+	/*
+	 * Indicates whether to start or stop collecting
+	 * per client statistics.
+	 */
+};  /* Message */
+
+/*
+ * Response Message; Requests the modem IPA driver to enable or disable
+ * collection of per client statistics.
+ */
+struct ipa_enable_per_client_stats_resp_msg_v01 {
+
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type. */
+};  /* Message */
+
+struct ipa_per_client_stats_info_type_v01 {
+
+	uint32_t client_id;
+	/*
+	 * Id of the client on APPS processor side for which Modem processor
+	 * needs to send uplink/downlink statistics.
+	 */
+
+	uint32_t src_pipe_id;
+	/*
+	 * IPA consumer pipe on which client on APPS side sent uplink
+	 * data to modem.
+	 */
+
+	uint64_t num_ul_ipv4_bytes;
+	/*
+	 * Accumulated number of uplink IPv4 bytes for a client.
+	 */
+
+	uint64_t num_ul_ipv6_bytes;
+	/*
+	 * Accumulated number of uplink IPv6 bytes for a client.
+	 */
+
+	uint64_t num_dl_ipv4_bytes;
+	/*
+	 * Accumulated number of downlink IPv4 bytes for a client.
+	 */
+
+	uint64_t num_dl_ipv6_bytes;
+	/*
+	 * Accumulated number of downlink IPv6 byes for a client.
+	 */
+
+
+	uint32_t num_ul_ipv4_pkts;
+	/*
+	 * Accumulated number of uplink IPv4 packets for a client.
+	 */
+
+	uint32_t num_ul_ipv6_pkts;
+	/*
+	 * Accumulated number of uplink IPv6 packets for a client.
+	 */
+
+	uint32_t num_dl_ipv4_pkts;
+	/*
+	 * Accumulated number of downlink IPv4 packets for a client.
+	 */
+
+	uint32_t num_dl_ipv6_pkts;
+	/*
+	 * Accumulated number of downlink IPv6 packets for a client.
+	 */
+};  /* Type */
+
+/*
+ * Request Message; Requests the modem IPA driver to provide statistics
+ * for a givenclient.
+ */
+struct ipa_get_stats_per_client_req_msg_v01 {
+
+	/* Mandatory */
+	/*  Client id */
+	uint32_t client_id;
+	/*
+	 * Id of the client on APPS processor side for which Modem processor
+	 * needs to send uplink/downlink statistics. if client id is specified
+	 * as 0xffffffff, then Q6 will send the stats for all the clients of
+	 * the specified source pipe.
+	 */
+
+	/* Mandatory */
+	/*  Source pipe id */
+	uint32_t src_pipe_id;
+	/*
+	 * IPA consumer pipe on which client on APPS side sent uplink
+	 * data to modem. In future, this implementation can be extended
+	 * to provide 0xffffffff as the source pipe id, where Q6 will send
+	 * the stats of all the clients across all different tethered-pipes.
+	 */
+
+	/* Optional */
+	/*  Reset client statistics. */
+	uint8_t reset_stats_valid;
+	/* Must be set to true if reset_stats is being passed. */
+	uint8_t reset_stats;
+	/*
+	 * Option to reset the statistics currently collected by modem for this
+	 * particular client.
+	 */
+};  /* Message */
+
+/*
+ * Response Message; Requests the modem IPA driver to provide statistics
+ * for a given client.
+ */
+struct ipa_get_stats_per_client_resp_msg_v01 {
+
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type. */
+
+	/* Optional */
+	/*  Per clients Statistics List */
+	uint8_t per_client_stats_list_valid;
+	/* Must be set to true if per_client_stats_list is being passed. */
+	uint32_t per_client_stats_list_len;
+	/* Must be set to # of elements in per_client_stats_list. */
+	struct ipa_per_client_stats_info_type_v01
+		per_client_stats_list[QMI_IPA_MAX_PER_CLIENTS_V01];
+	/*
+	 * List of all per client statistics that are retrieved.
+	 */
+};  /* Message */
+
+struct ipa_ul_firewall_rule_type_v01 {
+
+	enum ipa_ip_type_enum_v01 ip_type;
+	/*
+	 * IP type for which this rule is applicable.
+	 * The driver must identify the filter table (v6 or v4), and this
+	 * field is essential for that. Values:
+	 * - QMI_IPA_IP_TYPE_INVALID (0) --  Invalid IP type identifier
+	 * - QMI_IPA_IP_TYPE_V4 (1) --  IPv4 type
+	 * - QMI_IPA_IP_TYPE_V6 (2) --  IPv6 type
+	 */
+
+	struct ipa_filter_rule_type_v01 filter_rule;
+	/*
+	 * Rules in the filter specification. These rules are the
+	 * ones that are matched against fields in the packet.
+	 * Currently we only send IPv6 whitelist rules to Q6.
+	 */
+};  /* Type */
+
+/*
+ * Request Message; Requestes remote IPA driver to install uplink
+ * firewall rules.
+ */
+struct ipa_configure_ul_firewall_rules_req_msg_v01 {
+
+	/* Optional */
+	/*  Uplink Firewall Specification  */
+	uint32_t firewall_rules_list_len;
+	/* Must be set to # of elements in firewall_rules_list. */
+	struct ipa_ul_firewall_rule_type_v01
+		firewall_rules_list[QMI_IPA_MAX_UL_FIREWALL_RULES_V01];
+	/*
+	 * List of uplink firewall specifications of filters that must be
+	 * installed.
+	 */
+
+	uint32_t mux_id;
+	/*
+	 * QMAP Mux ID. As a part of the QMAP protocol,
+	 * several data calls may be multiplexed over the same physical
+	 * transport channel. This identifier is used to identify one
+	 * such data call. The maximum value for this identifier is 255.
+	 */
+
+	/* Optional */
+	uint8_t disable_valid;
+	/* Must be set to true if enable is being passed. */
+	uint8_t disable;
+	/*
+	 * Indicates whether uplink firewall needs to be enabled or disabled.
+	 */
+
+	/* Optional */
+	uint8_t are_blacklist_filters_valid;
+	/* Must be set to true if are_blacklist_filters is being passed. */
+	uint8_t are_blacklist_filters;
+	/*
+	 * Indicates whether the filters received as part of this message are
+	 * blacklist filters. i.e. drop uplink packets matching these rules.
+	 */
+};  /* Message */
+
+/*
+ * Response Message; Requestes remote IPA driver to install
+ * uplink firewall rules.
+ */
+struct ipa_configure_ul_firewall_rules_resp_msg_v01 {
+
+	/* Mandatory */
+	/* Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/*
+	 * Standard response type.
+	 * Standard response type. Contains the following data members:
+	 * qmi_result_type -- QMI_RESULT_SUCCESS or QMI_RESULT_FAILURE
+	 * qmi_error_type  -- Error code. Possible error code values are
+	 * described in the error codes section of each message definition.
+	 */
+};  /* Message */
+
+enum ipa_ul_firewall_status_enum_v01 {
+	IPA_UL_FIREWALL_STATUS_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+	/* To force a 32 bit signed enum.  Do not change or use*/
+	QMI_IPA_UL_FIREWALL_STATUS_SUCCESS_V01 = 0,
+	/* Indicates that the uplink firewall rules
+	 * are configured successfully.
+	 */
+	QMI_IPA_UL_FIREWALL_STATUS_FAILURE_V01 = 1,
+	/* Indicates that the uplink firewall rules
+	 * are not configured successfully.
+	 */
+	IPA_UL_FIREWALL_STATUS_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+	/* To force a 32 bit signed enum.  Do not change or use*/
+};
+
+struct ipa_ul_firewall_config_result_type_v01 {
+
+	enum ipa_ul_firewall_status_enum_v01 is_success;
+	/*
+	 * Indicates whether the uplink firewall rules are configured
+	 * successfully.
+	 */
+
+	uint32_t mux_id;
+	/*
+	 * QMAP Mux ID. As a part of the QMAP protocol,
+	 * several data calls may be multiplexed over the same physical
+	 * transport channel. This identifier is used to identify one
+	 * such data call. The maximum value for this identifier is 255.
+	 */
+};
+
+/*
+ * Indication Message; Requestes remote IPA driver to install
+ * uplink firewall rules.
+ */
+struct ipa_configure_ul_firewall_rules_ind_msg_v01 {
+
+	 struct ipa_ul_firewall_config_result_type_v01 result;
+};  /* Message */
+
+
+/*Service Message Definition*/
+#define QMI_IPA_INDICATION_REGISTER_REQ_V01 0x0020
+#define QMI_IPA_INDICATION_REGISTER_RESP_V01 0x0020
+#define QMI_IPA_INIT_MODEM_DRIVER_REQ_V01 0x0021
+#define QMI_IPA_INIT_MODEM_DRIVER_RESP_V01 0x0021
+#define QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_V01 0x0022
+#define QMI_IPA_INSTALL_FILTER_RULE_REQ_V01 0x0023
+#define QMI_IPA_INSTALL_FILTER_RULE_RESP_V01 0x0023
+#define QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01 0x0024
+#define QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01 0x0024
+#define QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_V01 0x0025
+#define QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_V01 0x0025
+#define QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_V01 0x0026
+#define QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_V01 0x0026
+#define QMI_IPA_CONFIG_REQ_V01 0x0027
+#define QMI_IPA_CONFIG_RESP_V01 0x0027
+#define QMI_IPA_DISABLE_LINK_LOW_PWR_STATE_REQ_V01 0x0028
+#define QMI_IPA_DISABLE_LINK_LOW_PWR_STATE_RESP_V01 0x0028
+#define QMI_IPA_ENABLE_LINK_LOW_PWR_STATE_REQ_V01 0x0029
+#define QMI_IPA_ENABLE_LINK_LOW_PWR_STATE_RESP_V01 0x0029
+#define QMI_IPA_GET_DATA_STATS_REQ_V01 0x0030
+#define QMI_IPA_GET_DATA_STATS_RESP_V01 0x0030
+#define QMI_IPA_GET_APN_DATA_STATS_REQ_V01 0x0031
+#define QMI_IPA_GET_APN_DATA_STATS_RESP_V01 0x0031
+#define QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01 0x0032
+#define QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_V01 0x0032
+#define QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01 0x0033
+#define QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01 0x0034
+#define QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_V01 0x0034
+#define QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_V01 0x0035
+#define QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_V01 0x0035
+#define QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_V01 0x0037
+#define QMI_IPA_INSTALL_FILTER_RULE_EX_RESP_V01 0x0037
+#define QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01 0x0038
+#define QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_V01 0x0038
+#define QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01 0x0039
+#define QMI_IPA_GET_STATS_PER_CLIENT_RESP_V01 0x0039
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_V01 0x003A
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_V01 0x003A
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_V01 0x003A
+
+/* add for max length*/
+#define QMI_IPA_INIT_MODEM_DRIVER_REQ_MAX_MSG_LEN_V01 134
+#define QMI_IPA_INIT_MODEM_DRIVER_RESP_MAX_MSG_LEN_V01 25
+#define QMI_IPA_INDICATION_REGISTER_REQ_MAX_MSG_LEN_V01 8
+#define QMI_IPA_INDICATION_REGISTER_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01 22369
+#define QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01 783
+#define QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01 870
+#define QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_MAX_MSG_LEN_V01 7
+#define QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_MAX_MSG_LEN_V01 15
+
+
+#define QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01 18
+#define QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01 7
+#define QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_MAX_MSG_LEN_V01 7
+
+
+#define QMI_IPA_CONFIG_REQ_MAX_MSG_LEN_V01 102
+#define QMI_IPA_CONFIG_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_DISABLE_LINK_LOW_PWR_STATE_REQ_MAX_MSG_LEN_V01 18
+#define QMI_IPA_DISABLE_LINK_LOW_PWR_STATE_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_ENABLE_LINK_LOW_PWR_STATE_REQ_MAX_MSG_LEN_V01 7
+#define QMI_IPA_ENABLE_LINK_LOW_PWR_STATE_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_GET_DATA_STATS_REQ_MAX_MSG_LEN_V01 11
+#define QMI_IPA_GET_DATA_STATS_RESP_MAX_MSG_LEN_V01 2234
+#define QMI_IPA_GET_APN_DATA_STATS_REQ_MAX_MSG_LEN_V01 36
+#define QMI_IPA_GET_APN_DATA_STATS_RESP_MAX_MSG_LEN_V01 299
+#define QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_MAX_MSG_LEN_V01 100
+#define QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_MAX_MSG_LEN_V01 0
+#define QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_MAX_MSG_LEN_V01 7
+
+#define QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_MAX_MSG_LEN_V01 4
+#define QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_MAX_MSG_LEN_V01 7
+
+#define QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_MAX_MSG_LEN_V01 22685
+#define QMI_IPA_INSTALL_FILTER_RULE_EX_RESP_MAX_MSG_LEN_V01 523
+
+#define QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_MAX_MSG_LEN_V01 4
+#define QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_MAX_MSG_LEN_V01 7
+
+#define QMI_IPA_GET_STATS_PER_CLIENT_REQ_MAX_MSG_LEN_V01 18
+#define QMI_IPA_GET_STATS_PER_CLIENT_RESP_MAX_MSG_LEN_V01 3595
+
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_MAX_MSG_LEN_V01 9875
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_MAX_MSG_LEN_V01 11
+/* Service Object Accessor */
+
+#endif/* IPA_QMI_SERVICE_V01_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/kcov.h	2019-01-22 16:16:28.559292192 +0100
@@ -0,0 +1,34 @@
+#ifndef _LINUX_KCOV_IOCTLS_H
+#define _LINUX_KCOV_IOCTLS_H
+
+#include <linux/types.h>
+
+#define KCOV_INIT_TRACE			_IOR('c', 1, unsigned long)
+#define KCOV_ENABLE			_IO('c', 100)
+#define KCOV_DISABLE			_IO('c', 101)
+
+enum {
+	/*
+	 * Tracing coverage collection mode.
+	 * Covered PCs are collected in a per-task buffer.
+	 * In new KCOV version the mode is chosen by calling
+	 * ioctl(fd, KCOV_ENABLE, mode). In older versions the mode argument
+	 * was supposed to be 0 in such a call. So, for reasons of backward
+	 * compatibility, we have chosen the value KCOV_TRACE_PC to be 0.
+	 */
+	KCOV_TRACE_PC = 0,
+	/* Collecting comparison operands mode. */
+	KCOV_TRACE_CMP = 1,
+};
+
+/*
+ * The format for the types of collected comparisons.
+ *
+ * Bit 0 shows whether one of the arguments is a compile-time constant.
+ * Bits 1 & 2 contain log2 of the argument size, up to 8 bytes.
+ */
+#define KCOV_CMP_CONST          (1 << 0)
+#define KCOV_CMP_SIZE(n)        ((n) << 1)
+#define KCOV_CMP_MASK           KCOV_CMP_SIZE(3)
+
+#endif /* _LINUX_KCOV_IOCTLS_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/maxim_sti.h	2019-01-22 16:16:28.559292192 +0100
@@ -0,0 +1,330 @@
+/* drivers/input/touchscreen/maxim_sti.c
+ *
+ * Maxim SmartTouch Imager Touchscreen Driver
+ *
+ * Copyright (c)2013 Maxim Integrated Products, Inc.
+ * Copyright (C) 2013, NVIDIA Corporation.  All Rights Reserved.
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MAXIM_STI_H__
+#define __MAXIM_STI_H__
+
+#ifndef __KERNEL__
+#include <stdlib.h>
+#include "genetlink.h"
+#endif
+
+#define XSTR(s)               STR(s)
+#define STR(s)                #s
+
+#define DRV_VER_MAJOR         1
+#define DRV_VER_MINOR         1
+
+#define DRIVER_VERSION_STR    XSTR(DRV_VER_MAJOR) "." XSTR(DRV_VER_MINOR)
+#define DRIVER_VERSION_NUM    ((DRV_VER_MAJOR << 8) | DRV_VER_MINOR)
+
+#define DRIVER_VERSION        DRIVER_VERSION_STR
+#define DRIVER_RELEASE        "April 29, 2015"
+#define DRIVER_PROTOCOL       0x0102
+
+/****************************************************************************\
+* Netlink: common kernel/user space macros                                   *
+\****************************************************************************/
+
+#define NL_BUF_SIZE  30720
+
+#define NL_ATTR_FIRST(nptr) \
+	((struct nlattr *)((void *)nptr + NLMSG_HDRLEN + GENL_HDRLEN))
+#define NL_ATTR_LAST(nptr) \
+	((struct nlattr *)((void *)nptr + \
+			NLMSG_ALIGN(((struct nlmsghdr *)nptr)->nlmsg_len)))
+#define NL_SIZE(nptr)   NLMSG_ALIGN(((struct nlmsghdr *)nptr)->nlmsg_len)
+#define NL_TYPE(nptr)              (((struct nlmsghdr *)nptr)->nlmsg_type)
+#define NL_SEQ(nptr)               (((struct nlmsghdr *)nptr)->nlmsg_seq)
+#define NL_OK(nptr)              (NL_TYPE(nptr) >= NLMSG_MIN_TYPE)
+#define NL_ATTR_VAL(aptr, type)  ((type *)((void *)aptr + NLA_HDRLEN))
+#define NL_ATTR_NEXT(aptr) \
+	((struct nlattr *)((void *)aptr + \
+			NLA_ALIGN(((struct nlattr *)aptr)->nla_len)))
+#define GENL_CMP(name1, name2)  strncmp(name1, name2, GENL_NAMSIZ)
+#define GENL_COPY(name1, name2) strlcpy(name1, name2, GENL_NAMSIZ)
+#define GENL_CHK(name)          (strlen(name) > (GENL_NAMSIZ - 1))
+#define MSG_TYPE(nptr)          NL_ATTR_FIRST(nptr)->nla_type
+#define MSG_PAYLOAD(nptr)       NL_ATTR_VAL(NL_ATTR_FIRST(nptr), void)
+
+/****************************************************************************\
+* Netlink: common kernel/user space inline functions                         *
+\****************************************************************************/
+
+static inline void
+nl_msg_init(void *buf, __u16 family_id, __u32 sequence, __u8 dst)
+{
+	struct nlmsghdr    *nlh = (struct nlmsghdr *)buf;
+	struct genlmsghdr  *genl = (struct genlmsghdr *)(buf + NLMSG_HDRLEN);
+
+	memset(buf, 0, NLMSG_HDRLEN + GENL_HDRLEN);
+	nlh->nlmsg_type = family_id;
+	nlh->nlmsg_flags = NLM_F_REQUEST;
+	nlh->nlmsg_seq = sequence;
+	nlh->nlmsg_len = NLMSG_HDRLEN + GENL_HDRLEN;
+	genl->cmd = dst;
+}
+
+static inline void
+*nl_alloc_attr(void *buf, __u16 type, __u16 len)
+{
+	struct nlmsghdr  *nlh = (struct nlmsghdr *)buf;
+	struct nlattr    *attr = NL_ATTR_LAST(nlh);
+
+	if ((NL_SIZE(buf) + NLMSG_ALIGN(NLA_HDRLEN + len)) > NL_BUF_SIZE)
+		return NULL;
+
+	attr->nla_type = type;
+	attr->nla_len = NLA_HDRLEN + len;
+	nlh->nlmsg_len += NLMSG_ALIGN(attr->nla_len);
+	return NL_ATTR_VAL(attr, void);
+}
+
+static inline int
+nl_add_attr(void *buf, __u16 type, void *ptr, __u16 len)
+{
+	void  *a_ptr;
+
+	a_ptr = nl_alloc_attr(buf, type, len);
+	if (a_ptr == NULL)
+		return -EPERM;
+	memcpy(a_ptr, ptr, len);
+	return 0;
+}
+
+/****************************************************************************\
+* Netlink: multicast groups enum and name strings                            *
+\****************************************************************************/
+
+enum {
+	MC_DRIVER,
+	MC_FUSION,
+	MC_EVENT_BROADCAST,
+	MC_GROUPS,
+};
+
+#define MC_DRIVER_NAME     "driver"
+#define MC_FUSION_NAME     "fusion"
+#define MC_EVENT_BROADCAST_NAME  "event_broadcast"
+
+#define NL_FAMILY_VERSION  1
+
+#define TF_FAMILY_NAME     "touch_fusion"
+
+/****************************************************************************\
+* Netlink: common parameter and message definitions                          *
+\****************************************************************************/
+
+enum {
+	DR_STATE_BASIC,
+	DR_STATE_ACTIVE,
+	DR_STATE_SUSPEND,
+	DR_STATE_RESUME,
+	DR_STATE_FAULT,
+};
+
+enum {
+	DR_INPUT_FINGER,
+	DR_INPUT_STYLUS,
+	DR_INPUT_ERASER,
+};
+
+enum {
+	DR_IRQ_FALLING_EDGE,
+	DR_IRQ_RISING_EDGE,
+};
+
+enum {
+	DR_ADD_MC_GROUP,
+	DR_ECHO_REQUEST,
+	DR_CHIP_READ,
+	DR_CHIP_WRITE,
+	DR_CHIP_RESET,
+	DR_GET_IRQLINE,
+	DR_DELAY,
+	DR_CHIP_ACCESS_METHOD,
+	DR_CONFIG_IRQ,
+	DR_CONFIG_INPUT,
+	DR_CONFIG_WATCHDOG,
+	DR_DECONFIG,
+	DR_INPUT,
+	DR_RESUME_ACK,
+	DR_LEGACY_FWDL,
+	DR_LEGACY_ACCELERATION,
+	DR_HANDSHAKE,
+	DR_CONFIG_FW,
+	DR_IDLE,
+	DR_SYSFS_ACK,
+	DR_TF_STATUS,
+};
+
+struct __attribute__ ((__packed__)) dr_add_mc_group {
+	__u8  number;
+	char  name[GENL_NAMSIZ];
+};
+
+struct __attribute__ ((__packed__)) dr_echo_request {
+	__u32  cookie;
+};
+
+struct __attribute__ ((__packed__)) dr_chip_read {
+	__u16  address;
+	__u16  length;
+};
+
+struct __attribute__ ((__packed__)) dr_chip_write {
+	__u16  address;
+	__u16  length;
+	__u8   data[0];
+};
+
+struct __attribute__ ((__packed__)) dr_chip_reset {
+	__u8  state;
+};
+
+struct __attribute__ ((__packed__)) dr_delay {
+	__u32  period;
+};
+
+struct __attribute__ ((__packed__)) dr_chip_access_method {
+	__u8  method;
+};
+
+#define MAX_IRQ_PARAMS  37
+struct __attribute__ ((__packed__)) dr_config_irq {
+	__u8   irq_method;
+	__u8   irq_edge;
+	__u8   irq_params;
+	__u16  irq_param[MAX_IRQ_PARAMS];
+};
+
+struct __attribute__ ((__packed__)) dr_config_input {
+	__u16  x_range;
+	__u16  y_range;
+};
+
+struct __attribute__ ((__packed__)) dr_config_watchdog {
+	__u32  pid;
+};
+
+struct __attribute__ ((__packed__)) dr_input_event {
+	__u8   id;
+	__u8   tool_type;
+	__u16  x;
+	__u16  y;
+	__u8   z;
+};
+
+#define MAX_INPUT_EVENTS  10
+struct __attribute__ ((__packed__)) dr_input {
+	struct dr_input_event  event[MAX_INPUT_EVENTS];
+	__u8                   events;
+};
+
+struct __attribute__ ((__packed__)) dr_legacy_acceleration {
+	__u8  enable;
+};
+
+struct __attribute__ ((__packed__)) dr_handshake {
+	__u16 tf_ver;
+	__u16 chip_id;
+};
+
+#define  DR_SYSFS_UPDATE_NONE     0x0000
+#define  DR_SYSFS_UPDATE_BIT_GLOVE    0
+#define  DR_SYSFS_UPDATE_BIT_CHARGER  1
+#define  DR_SYSFS_UPDATE_BIT_LCD_FPS  2
+
+#define  DR_SYSFS_ACK_GLOVE       0x5A5A5A5A
+#define  DR_SYSFS_ACK_CHARGER     0xA5A5A5A5
+#define  DR_SYSFS_ACK_LCD_FPS     0xC3C3C3C3
+
+enum {
+	DR_NO_CHARGER,
+	DR_WIRED_CHARGER,
+	DR_WIRELESS_CHARGER,
+};
+
+struct __attribute__ ((__packed__)) dr_sysfs_ack {
+	__u32 type;
+};
+
+struct __attribute__ ((__packed__)) dr_config_fw {
+	__u16 fw_ver;
+	__u16 fw_protocol;
+};
+
+struct __attribute__ ((__packed__)) dr_idle {
+	__u8  idle;
+};
+
+#define  TF_STATUS_DEFAULT_LOADED (1 << 0)
+#define  TF_STATUS_BUSY (1 << 1)
+
+struct __attribute__ ((__packed__)) dr_tf_status {
+	__u32 tf_status;
+};
+
+enum {
+	FU_ECHO_RESPONSE,
+	FU_CHIP_READ_RESULT,
+	FU_IRQLINE_STATUS,
+	FU_ASYNC_DATA,
+	FU_RESUME,
+	FU_HANDSHAKE_RESPONSE,
+	FU_SYSFS_INFO,
+};
+
+struct __attribute__ ((__packed__)) fu_echo_response {
+	__u32  cookie;
+	__u8   driver_state;
+};
+
+struct __attribute__ ((__packed__)) fu_chip_read_result {
+	__u16  address;
+	__u16  length;
+	__u8   data[0];
+};
+
+struct __attribute__ ((__packed__)) fu_irqline_status {
+	__u8  status;
+};
+
+struct __attribute__ ((__packed__)) fu_async_data {
+	__u16  address;
+	__u16  length;
+	__u16  status;
+	__u8   data[0];
+};
+
+struct __attribute__ ((__packed__)) fu_handshake_response {
+	__u16  driver_ver;
+	__u16  panel_id;
+	__u16  driver_protocol;
+};
+
+struct __attribute__ ((__packed__)) fu_sysfs_info {
+	__u8   type;
+	__u16  glove_value;
+	__u16  charger_value;
+	__u16  lcd_fps_value;
+};
+
+#endif
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/mdss_rotator.h	2019-01-22 16:16:28.559292192 +0100
@@ -0,0 +1,144 @@
+#ifndef _UAPI_MDSS_ROTATOR_H_
+#define _UAPI_MDSS_ROTATOR_H_
+
+#include <linux/msm_mdp_ext.h>
+
+#define MDSS_ROTATOR_IOCTL_MAGIC 'w'
+
+/* open a rotation session */
+#define MDSS_ROTATION_OPEN \
+	_IOWR(MDSS_ROTATOR_IOCTL_MAGIC, 1, struct mdp_rotation_config *)
+
+/* change the rotation session configuration */
+#define MDSS_ROTATION_CONFIG \
+	_IOWR(MDSS_ROTATOR_IOCTL_MAGIC, 2, struct mdp_rotation_config *)
+
+/* queue the rotation request */
+#define MDSS_ROTATION_REQUEST \
+	_IOWR(MDSS_ROTATOR_IOCTL_MAGIC, 3, struct mdp_rotation_request *)
+
+/* close a rotation session with the specified rotation session ID */
+#define MDSS_ROTATION_CLOSE	_IOW(MDSS_ROTATOR_IOCTL_MAGIC, 4, unsigned int)
+
+/**********************************************************************
+Rotation request flag
+**********************************************************************/
+/* no rotation flag, i.e. color space conversion */
+#define MDP_ROTATION_NOP	0x01
+
+/* left/right flip */
+#define MDP_ROTATION_FLIP_LR	0x02
+
+/* up/down flip */
+#define MDP_ROTATION_FLIP_UD	0x04
+
+/* rotate 90 degree */
+#define MDP_ROTATION_90		0x08
+
+/* rotate 180 degre */
+#define MDP_ROTATION_180	(MDP_ROTATION_FLIP_LR | MDP_ROTATION_FLIP_UD)
+
+/* rotate 270 degree */
+#define MDP_ROTATION_270	(MDP_ROTATION_90 | MDP_ROTATION_180)
+
+/* format is interlaced */
+#define MDP_ROTATION_DEINTERLACE 0x10
+
+/* enable bwc */
+#define MDP_ROTATION_BWC_EN	0x40
+
+/* secure data */
+#define MDP_ROTATION_SECURE	0x80
+
+/**********************************************************************
+Rotation commit flag
+**********************************************************************/
+/* Flag indicates to validate the rotation request */
+#define MDSS_ROTATION_REQUEST_VALIDATE	0x01
+
+#define MDP_ROTATION_REQUEST_VERSION_1_0	0x00010000
+
+/*
+ * Client can let driver to allocate the hardware resources with
+ * this particular hw resource id.
+ */
+#define MDSS_ROTATION_HW_ANY	0xFFFFFFFF
+
+/**********************************************************************
+configuration structures
+**********************************************************************/
+struct mdp_rotation_buf_info {
+	uint32_t width;
+	uint32_t height;
+	uint32_t format;
+	struct mult_factor comp_ratio;
+};
+
+struct mdp_rotation_config {
+	uint32_t	version;
+	uint32_t	session_id;
+	struct mdp_rotation_buf_info	input;
+	struct mdp_rotation_buf_info	output;
+	uint32_t	frame_rate;
+	uint32_t	flags;
+	uint32_t	reserved[6];
+};
+
+struct mdp_rotation_item {
+	/* rotation request flag */
+	uint32_t	flags;
+
+	/* Source crop rectangle */
+	struct mdp_rect	src_rect;
+
+	/* Destination rectangle */
+	struct mdp_rect	dst_rect;
+
+	/* Input buffer for the request */
+	struct mdp_layer_buffer	input;
+
+	/* The output buffer for the request */
+	struct mdp_layer_buffer	output;
+
+	/*
+	  * DMA pipe selection for this request by client:
+	  * 0: DMA pipe 0
+	  * 1: DMA pipe 1
+	  * or MDSS_ROTATION_HW_ANY if client wants
+	  * driver to allocate any that is available
+	  */
+	uint32_t	pipe_idx;
+
+	/*
+	  * Write-back block selection for this request by client:
+	  * 0: Write-back block 0
+	  * 1: Write-back block 1
+	  * or MDSS_ROTATION_HW_ANY if client wants
+	  * driver to allocate any that is available
+	  */
+	uint32_t	wb_idx;
+
+	/* Which session ID is this request scheduled on */
+	uint32_t	session_id;
+
+	/* 32bits reserved value for future usage */
+	uint32_t	reserved[6];
+};
+
+struct mdp_rotation_request {
+	/* 32bit version indicates the request structure */
+	uint32_t	version;
+
+	uint32_t	flags;
+
+	/* Number of rotation request items in the list */
+	uint32_t	count;
+
+	/* Pointer to a list of rotation request items */
+	struct mdp_rotation_item __user	*list;
+
+	/* 32bits reserved value for future usage*/
+	uint32_t	reserved[6];
+};
+
+#endif /*_UAPI_MDSS_ROTATOR_H_*/
diff -Nruw linux-4.4.115-fbx/include/uapi/linux/mfd./Kbuild linux-4.4.115-fbx/include/uapi/linux/mfd/Kbuild
--- linux-4.4.115-fbx/include/uapi/linux/mfd./Kbuild	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/mfd/Kbuild	2019-01-22 16:16:28.563292228 +0100
@@ -0,0 +1,2 @@
+header-y += msm-adie-codec.h
+header-y += wcd9xxx/
diff -Nruw linux-4.4.115-fbx/include/uapi/linux/mfd./msm-adie-codec.h linux-4.4.115-fbx/include/uapi/linux/mfd/msm-adie-codec.h
--- linux-4.4.115-fbx/include/uapi/linux/mfd./msm-adie-codec.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/mfd/msm-adie-codec.h	2019-01-22 16:16:28.563292228 +0100
@@ -0,0 +1,146 @@
+#ifndef __UAPI_MFD_MSM_ADIE_CODEC_H
+#define __UAPI_MFD_MSM_ADIE_CODEC_H
+
+#include <linux/types.h>
+
+/* Value Represents a entry */
+#define ADIE_CODEC_ACTION_ENTRY       0x1
+/* Value representing a delay wait */
+#define ADIE_CODEC_ACTION_DELAY_WAIT      0x2
+/* Value representing a stage reached */
+#define ADIE_CODEC_ACTION_STAGE_REACHED   0x3
+
+/* This value is the state after the client sets the path */
+#define ADIE_CODEC_PATH_OFF                                        0x0050
+
+/* State to which client asks the drv to proceed to where it can
+ * set up the clocks and 0-fill PCM buffers
+ */
+#define ADIE_CODEC_DIGITAL_READY                                   0x0100
+
+/* State to which client asks the drv to proceed to where it can
+ * start sending data after internal steady state delay
+ */
+#define ADIE_CODEC_DIGITAL_ANALOG_READY                            0x1000
+
+
+/*  Client Asks adie to switch off the Analog portion of the
+ *  the internal codec. After the use of this path
+ */
+#define ADIE_CODEC_ANALOG_OFF                                      0x0750
+
+
+/* Client Asks adie to switch off the digital portion of the
+ *  the internal codec. After switching off the analog portion.
+ *
+ *  0-fill PCM may or maynot be sent at this point
+ *
+ */
+#define ADIE_CODEC_DIGITAL_OFF                                     0x0600
+
+/* State to which client asks the drv to write the default values
+ * to the registers */
+#define ADIE_CODEC_FLASH_IMAGE 					   0x0001
+
+/* Path type */
+#define ADIE_CODEC_RX 0
+#define ADIE_CODEC_TX 1
+#define ADIE_CODEC_LB 3
+#define ADIE_CODEC_MAX 4
+
+#define ADIE_CODEC_PACK_ENTRY(reg, mask, val) ((val)|(mask << 8)|(reg << 16))
+
+#define ADIE_CODEC_UNPACK_ENTRY(packed, reg, mask, val) \
+	do { \
+		((reg) = ((packed >> 16) & (0xff))); \
+		((mask) = ((packed >> 8) & (0xff))); \
+		((val) = ((packed) & (0xff))); \
+	} while (0);
+
+struct adie_codec_action_unit {
+	u32 type;
+	u32 action;
+};
+
+struct adie_codec_hwsetting_entry{
+	struct adie_codec_action_unit *actions;
+	u32 action_sz;
+	u32 freq_plan;
+	u32 osr;
+	/* u32  VolMask;
+	 * u32  SidetoneMask;
+	 */
+};
+
+struct adie_codec_dev_profile {
+	u32 path_type; /* RX or TX */
+	u32 setting_sz;
+	struct adie_codec_hwsetting_entry *settings;
+};
+
+struct adie_codec_register {
+	u8 reg;
+	u8 mask;
+	u8 val;
+};
+
+struct adie_codec_register_image {
+	struct adie_codec_register *regs;
+	u32 img_sz;
+};
+
+struct adie_codec_path;
+
+struct adie_codec_anc_data {
+	u32 size;
+	u32 writes[];
+};
+
+struct adie_codec_operations {
+	int	 codec_id;
+	int (*codec_open) (struct adie_codec_dev_profile *profile,
+				struct adie_codec_path **path_pptr);
+	int (*codec_close) (struct adie_codec_path *path_ptr);
+	int (*codec_setpath) (struct adie_codec_path *path_ptr,
+				u32 freq_plan, u32 osr);
+	int (*codec_proceed_stage) (struct adie_codec_path *path_ptr,
+					u32 state);
+	u32 (*codec_freq_supported) (struct adie_codec_dev_profile *profile,
+					u32 requested_freq);
+	int (*codec_enable_sidetone) (struct adie_codec_path *rx_path_ptr,
+					u32 enable);
+	int (*codec_enable_anc) (struct adie_codec_path *rx_path_ptr,
+		u32 enable, struct adie_codec_anc_data *calibration_writes);
+	int (*codec_set_device_digital_volume) (
+					struct adie_codec_path *path_ptr,
+					u32 num_channels,
+					u32 vol_percentage);
+
+	int (*codec_set_device_analog_volume) (struct adie_codec_path *path_ptr,
+						u32 num_channels,
+						u32 volume);
+	int (*codec_set_master_mode) (struct adie_codec_path *path_ptr,
+					u8 master);
+};
+
+int adie_codec_register_codec_operations(
+				const struct adie_codec_operations *codec_ops);
+int adie_codec_open(struct adie_codec_dev_profile *profile,
+	struct adie_codec_path **path_pptr);
+int adie_codec_setpath(struct adie_codec_path *path_ptr,
+	u32 freq_plan, u32 osr);
+int adie_codec_proceed_stage(struct adie_codec_path *path_ptr, u32 state);
+int adie_codec_close(struct adie_codec_path *path_ptr);
+u32 adie_codec_freq_supported(struct adie_codec_dev_profile *profile,
+							u32 requested_freq);
+int adie_codec_enable_sidetone(struct adie_codec_path *rx_path_ptr, u32 enable);
+int adie_codec_enable_anc(struct adie_codec_path *rx_path_ptr, u32 enable,
+	struct adie_codec_anc_data *calibration_writes);
+int adie_codec_set_device_digital_volume(struct adie_codec_path *path_ptr,
+		u32 num_channels, u32 vol_percentage /* in percentage */);
+
+int adie_codec_set_device_analog_volume(struct adie_codec_path *path_ptr,
+		u32 num_channels, u32 volume /* in percentage */);
+
+int adie_codec_set_master_mode(struct adie_codec_path *path_ptr, u8 master);
+#endif
diff -Nruw linux-4.4.115-fbx/include/uapi/linux/mfd./wcd9xxx/Kbuild linux-4.4.115-fbx/include/uapi/linux/mfd/wcd9xxx/Kbuild
--- linux-4.4.115-fbx/include/uapi/linux/mfd./wcd9xxx/Kbuild	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/mfd/wcd9xxx/Kbuild	2019-01-22 16:16:28.563292228 +0100
@@ -0,0 +1,2 @@
+header-y += wcd9xxx_registers.h
+header-y += wcd9320_registers.h
diff -Nruw linux-4.4.115-fbx/include/uapi/linux/mfd./wcd9xxx/wcd9320_registers.h linux-4.4.115-fbx/include/uapi/linux/mfd/wcd9xxx/wcd9320_registers.h
--- linux-4.4.115-fbx/include/uapi/linux/mfd./wcd9xxx/wcd9320_registers.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/mfd/wcd9xxx/wcd9320_registers.h	2019-01-22 16:16:28.563292228 +0100
@@ -0,0 +1,1399 @@
+#ifndef WCD9320_REGISTERS_H
+#define WCD9320_REGISTERS_H
+
+#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
+
+#define TAIKO_A_CHIP_CTL			WCD9XXX_A_CHIP_CTL
+#define TAIKO_A_CHIP_CTL__POR			WCD9XXX_A_CHIP_CTL__POR
+#define TAIKO_A_CHIP_STATUS			WCD9XXX_A_CHIP_STATUS
+#define TAIKO_A_CHIP_STATUS__POR		WCD9XXX_A_CHIP_STATUS__POR
+#define TAIKO_A_CHIP_ID_BYTE_0			WCD9XXX_A_CHIP_ID_BYTE_0
+#define TAIKO_A_CHIP_ID_BYTE_0__POR		WCD9XXX_A_CHIP_ID_BYTE_0__POR
+#define TAIKO_A_CHIP_ID_BYTE_1			WCD9XXX_A_CHIP_ID_BYTE_1
+#define TAIKO_A_CHIP_ID_BYTE_1__POR		WCD9XXX_A_CHIP_ID_BYTE_1__POR
+#define TAIKO_A_CHIP_ID_BYTE_2			WCD9XXX_A_CHIP_ID_BYTE_2
+#define TAIKO_A_CHIP_ID_BYTE_2__POR		WCD9XXX_A_CHIP_ID_BYTE_2__POR
+#define TAIKO_A_CHIP_ID_BYTE_3			WCD9XXX_A_CHIP_ID_BYTE_3
+#define TAIKO_A_CHIP_ID_BYTE_3__POR		WCD9XXX_A_CHIP_ID_BYTE_3__POR
+#define TAIKO_A_CHIP_VERSION			WCD9XXX_A_CHIP_VERSION
+#define TAIKO_A_CHIP_VERSION__POR		WCD9XXX_A_CHIP_VERSION__POR
+#define TAIKO_A_SB_VERSION			WCD9XXX_A_SB_VERSION
+#define TAIKO_A_SB_VERSION__POR			WCD9XXX_A_SB_VERSION__POR
+#define TAIKO_A_SLAVE_ID_1			WCD9XXX_A_SLAVE_ID_1
+#define TAIKO_A_SLAVE_ID_1__POR			WCD9XXX_A_SLAVE_ID_1__POR
+#define TAIKO_A_SLAVE_ID_2			WCD9XXX_A_SLAVE_ID_2
+#define TAIKO_A_SLAVE_ID_2__POR			WCD9XXX_A_SLAVE_ID_2__POR
+#define TAIKO_A_SLAVE_ID_3			WCD9XXX_A_SLAVE_ID_3
+#define TAIKO_A_SLAVE_ID_3__POR			WCD9XXX_A_SLAVE_ID_3__POR
+#define TAIKO_A_PIN_CTL_OE0			(0x010)
+#define TAIKO_A_PIN_CTL_OE0__POR				(0x00)
+#define TAIKO_A_PIN_CTL_OE1			(0x011)
+#define TAIKO_A_PIN_CTL_OE1__POR				(0x00)
+#define TAIKO_A_PIN_CTL_DATA0			(0x012)
+#define TAIKO_A_PIN_CTL_DATA0__POR				(0x00)
+#define TAIKO_A_PIN_CTL_DATA1			(0x013)
+#define TAIKO_A_PIN_CTL_DATA1__POR				(0x00)
+#define TAIKO_A_HDRIVE_GENERIC			(0x018)
+#define TAIKO_A_HDRIVE_GENERIC__POR				(0x00)
+#define TAIKO_A_HDRIVE_OVERRIDE			(0x019)
+#define TAIKO_A_HDRIVE_OVERRIDE__POR				(0x08)
+#define TAIKO_A_ANA_CSR_WAIT_STATE			(0x020)
+#define TAIKO_A_ANA_CSR_WAIT_STATE__POR				(0x44)
+#define TAIKO_A_PROCESS_MONITOR_CTL0			(0x040)
+#define TAIKO_A_PROCESS_MONITOR_CTL0__POR				(0x80)
+#define TAIKO_A_PROCESS_MONITOR_CTL1			(0x041)
+#define TAIKO_A_PROCESS_MONITOR_CTL1__POR				(0x00)
+#define TAIKO_A_PROCESS_MONITOR_CTL2			(0x042)
+#define TAIKO_A_PROCESS_MONITOR_CTL2__POR				(0x00)
+#define TAIKO_A_PROCESS_MONITOR_CTL3			(0x043)
+#define TAIKO_A_PROCESS_MONITOR_CTL3__POR				(0x01)
+#define TAIKO_A_QFUSE_CTL			(0x048)
+#define TAIKO_A_QFUSE_CTL__POR				(0x00)
+#define TAIKO_A_QFUSE_STATUS			(0x049)
+#define TAIKO_A_QFUSE_STATUS__POR				(0x00)
+#define TAIKO_A_QFUSE_DATA_OUT0			(0x04A)
+#define TAIKO_A_QFUSE_DATA_OUT0__POR				(0x00)
+#define TAIKO_A_QFUSE_DATA_OUT1			(0x04B)
+#define TAIKO_A_QFUSE_DATA_OUT1__POR				(0x00)
+#define TAIKO_A_QFUSE_DATA_OUT2			(0x04C)
+#define TAIKO_A_QFUSE_DATA_OUT2__POR				(0x00)
+#define TAIKO_A_QFUSE_DATA_OUT3			(0x04D)
+#define TAIKO_A_QFUSE_DATA_OUT3__POR				(0x00)
+#define TAIKO_A_QFUSE_DATA_OUT4			(0x04E)
+#define TAIKO_A_QFUSE_DATA_OUT4__POR				(0x00)
+#define TAIKO_A_QFUSE_DATA_OUT5			(0x04F)
+#define TAIKO_A_QFUSE_DATA_OUT5__POR				(0x00)
+#define TAIKO_A_QFUSE_DATA_OUT6			(0x050)
+#define TAIKO_A_QFUSE_DATA_OUT6__POR				(0x00)
+#define TAIKO_A_QFUSE_DATA_OUT7			(0x051)
+#define TAIKO_A_QFUSE_DATA_OUT7__POR				(0x00)
+#define TAIKO_A_CDC_CTL				WCD9XXX_A_CDC_CTL
+#define TAIKO_A_CDC_CTL__POR			WCD9XXX_A_CDC_CTL__POR
+#define TAIKO_A_LEAKAGE_CTL			WCD9XXX_A_LEAKAGE_CTL
+#define TAIKO_A_LEAKAGE_CTL__POR		WCD9XXX_A_LEAKAGE_CTL__POR
+#define TAIKO_A_INTR_MODE			(0x090)
+#define TAIKO_A_INTR_MODE__POR				(0x00)
+#define TAIKO_A_INTR_MASK0			(0x094)
+#define TAIKO_A_INTR_MASK0__POR				(0xFF)
+#define TAIKO_A_INTR_MASK1			(0x095)
+#define TAIKO_A_INTR_MASK1__POR				(0xFF)
+#define TAIKO_A_INTR_MASK2			(0x096)
+#define TAIKO_A_INTR_MASK2__POR				(0x3F)
+#define TAIKO_A_INTR_MASK3			(0x097)
+#define TAIKO_A_INTR_MASK3__POR				(0x3F)
+#define TAIKO_A_INTR_STATUS0			(0x098)
+#define TAIKO_A_INTR_STATUS0__POR				(0x00)
+#define TAIKO_A_INTR_STATUS1			(0x099)
+#define TAIKO_A_INTR_STATUS1__POR				(0x00)
+#define TAIKO_A_INTR_STATUS2			(0x09A)
+#define TAIKO_A_INTR_STATUS2__POR				(0x00)
+#define TAIKO_A_INTR_STATUS3			(0x09B)
+#define TAIKO_A_INTR_STATUS3__POR				(0x00)
+#define TAIKO_A_INTR_CLEAR0			(0x09C)
+#define TAIKO_A_INTR_CLEAR0__POR				(0x00)
+#define TAIKO_A_INTR_CLEAR1			(0x09D)
+#define TAIKO_A_INTR_CLEAR1__POR				(0x00)
+#define TAIKO_A_INTR_CLEAR2			(0x09E)
+#define TAIKO_A_INTR_CLEAR2__POR				(0x00)
+#define TAIKO_A_INTR_CLEAR3			(0x09F)
+#define TAIKO_A_INTR_CLEAR3__POR				(0x00)
+#define TAIKO_A_INTR_LEVEL0			(0x0A0)
+#define TAIKO_A_INTR_LEVEL0__POR				(0x01)
+#define TAIKO_A_INTR_LEVEL1			(0x0A1)
+#define TAIKO_A_INTR_LEVEL1__POR				(0x00)
+#define TAIKO_A_INTR_LEVEL2			(0x0A2)
+#define TAIKO_A_INTR_LEVEL2__POR				(0x00)
+#define TAIKO_A_INTR_LEVEL3			(0x0A3)
+#define TAIKO_A_INTR_LEVEL3__POR				(0x00)
+#define TAIKO_A_INTR_TEST0			(0x0A4)
+#define TAIKO_A_INTR_TEST0__POR				(0x00)
+#define TAIKO_A_INTR_TEST1			(0x0A5)
+#define TAIKO_A_INTR_TEST1__POR				(0x00)
+#define TAIKO_A_INTR_TEST2			(0x0A6)
+#define TAIKO_A_INTR_TEST2__POR				(0x00)
+#define TAIKO_A_INTR_TEST3			(0x0A7)
+#define TAIKO_A_INTR_TEST3__POR				(0x00)
+#define TAIKO_A_INTR_SET0			(0x0A8)
+#define TAIKO_A_INTR_SET0__POR				(0x00)
+#define TAIKO_A_INTR_SET1			(0x0A9)
+#define TAIKO_A_INTR_SET1__POR				(0x00)
+#define TAIKO_A_INTR_SET2			(0x0AA)
+#define TAIKO_A_INTR_SET2__POR				(0x00)
+#define TAIKO_A_INTR_SET3			(0x0AB)
+#define TAIKO_A_INTR_SET3__POR				(0x00)
+#define TAIKO_A_INTR_DESTN0			(0x0AC)
+#define TAIKO_A_INTR_DESTN0__POR				(0x00)
+#define TAIKO_A_INTR_DESTN1			(0x0AD)
+#define TAIKO_A_INTR_DESTN1__POR				(0x00)
+#define TAIKO_A_INTR_DESTN2			(0x0AE)
+#define TAIKO_A_INTR_DESTN2__POR				(0x00)
+#define TAIKO_A_INTR_DESTN3			(0x0AF)
+#define TAIKO_A_INTR_DESTN3__POR				(0x00)
+#define TAIKO_A_CDC_TX_I2S_SCK_MODE			(0x0C0)
+#define TAIKO_A_CDC_TX_I2S_SCK_MODE__POR				(0x00)
+#define TAIKO_A_CDC_TX_I2S_WS_MODE			(0x0C1)
+#define TAIKO_A_CDC_TX_I2S_WS_MODE__POR				(0x00)
+#define TAIKO_A_CDC_DMIC_DATA0_MODE			(0x0C4)
+#define TAIKO_A_CDC_DMIC_DATA0_MODE__POR				(0x00)
+#define TAIKO_A_CDC_DMIC_CLK0_MODE			(0x0C5)
+#define TAIKO_A_CDC_DMIC_CLK0_MODE__POR				(0x00)
+#define TAIKO_A_CDC_DMIC_DATA1_MODE			(0x0C6)
+#define TAIKO_A_CDC_DMIC_DATA1_MODE__POR				(0x00)
+#define TAIKO_A_CDC_DMIC_CLK1_MODE			(0x0C7)
+#define TAIKO_A_CDC_DMIC_CLK1_MODE__POR				(0x00)
+#define TAIKO_A_CDC_RX_I2S_SCK_MODE			(0x0C8)
+#define TAIKO_A_CDC_RX_I2S_SCK_MODE__POR				(0x00)
+#define TAIKO_A_CDC_RX_I2S_WS_MODE			(0x0C9)
+#define TAIKO_A_CDC_RX_I2S_WS_MODE__POR				(0x00)
+#define TAIKO_A_CDC_DMIC_DATA2_MODE			(0x0CA)
+#define TAIKO_A_CDC_DMIC_DATA2_MODE__POR				(0x00)
+#define TAIKO_A_CDC_DMIC_CLK2_MODE			(0x0CB)
+#define TAIKO_A_CDC_DMIC_CLK2_MODE__POR				(0x00)
+#define TAIKO_A_CDC_INTR1_MODE			(0x0CC)
+#define TAIKO_A_CDC_INTR1_MODE__POR				(0x00)
+#define TAIKO_A_CDC_SB_NRZ_SEL_MODE			(0x0CD)
+#define TAIKO_A_CDC_SB_NRZ_SEL_MODE__POR				(0x00)
+#define TAIKO_A_CDC_INTR2_MODE			(0x0CE)
+#define TAIKO_A_CDC_INTR2_MODE__POR				(0x00)
+#define TAIKO_A_CDC_RF_PA_ON_MODE			(0x0CF)
+#define TAIKO_A_CDC_RF_PA_ON_MODE__POR				(0x00)
+#define TAIKO_A_BIAS_REF_CTL			(0x100)
+#define TAIKO_A_BIAS_REF_CTL__POR				(0x1C)
+#define TAIKO_A_BIAS_CENTRAL_BG_CTL			(0x101)
+#define TAIKO_A_BIAS_CENTRAL_BG_CTL__POR				(0x50)
+#define TAIKO_A_BIAS_PRECHRG_CTL			(0x102)
+#define TAIKO_A_BIAS_PRECHRG_CTL__POR				(0x07)
+#define TAIKO_A_BIAS_CURR_CTL_1			(0x103)
+#define TAIKO_A_BIAS_CURR_CTL_1__POR				(0x52)
+#define TAIKO_A_BIAS_CURR_CTL_2			(0x104)
+#define TAIKO_A_BIAS_CURR_CTL_2__POR				(0x00)
+#define TAIKO_A_BIAS_OSC_BG_CTL			(0x105)
+#define TAIKO_A_BIAS_OSC_BG_CTL__POR				(0x16)
+#define TAIKO_A_CLK_BUFF_EN1			(0x108)
+#define TAIKO_A_CLK_BUFF_EN1__POR				(0x04)
+#define TAIKO_A_CLK_BUFF_EN2			(0x109)
+#define TAIKO_A_CLK_BUFF_EN2__POR				(0x02)
+#define TAIKO_A_LDO_H_MODE_1			(0x110)
+#define TAIKO_A_LDO_H_MODE_1__POR				(0x65)
+#define TAIKO_A_LDO_H_MODE_2			(0x111)
+#define TAIKO_A_LDO_H_MODE_2__POR				(0xA8)
+#define TAIKO_A_LDO_H_LOOP_CTL			(0x112)
+#define TAIKO_A_LDO_H_LOOP_CTL__POR				(0x6B)
+#define TAIKO_A_LDO_H_COMP_1			(0x113)
+#define TAIKO_A_LDO_H_COMP_1__POR				(0x84)
+#define TAIKO_A_LDO_H_COMP_2			(0x114)
+#define TAIKO_A_LDO_H_COMP_2__POR				(0xE0)
+#define TAIKO_A_LDO_H_BIAS_1			(0x115)
+#define TAIKO_A_LDO_H_BIAS_1__POR				(0x6D)
+#define TAIKO_A_LDO_H_BIAS_2			(0x116)
+#define TAIKO_A_LDO_H_BIAS_2__POR				(0xA5)
+#define TAIKO_A_LDO_H_BIAS_3			(0x117)
+#define TAIKO_A_LDO_H_BIAS_3__POR				(0x60)
+#define TAIKO_A_VBAT_CLK			(0x118)
+#define TAIKO_A_VBAT_CLK__POR				(0x03)
+#define TAIKO_A_VBAT_LOOP			(0x119)
+#define TAIKO_A_VBAT_LOOP__POR				(0x02)
+#define TAIKO_A_VBAT_REF			(0x11A)
+#define TAIKO_A_VBAT_REF__POR				(0x20)
+#define TAIKO_A_VBAT_ADC_TEST			(0x11B)
+#define TAIKO_A_VBAT_ADC_TEST__POR				(0x00)
+#define TAIKO_A_VBAT_FE			(0x11C)
+#define TAIKO_A_VBAT_FE__POR				(0x48)
+#define TAIKO_A_VBAT_BIAS_1			(0x11D)
+#define TAIKO_A_VBAT_BIAS_1__POR				(0x03)
+#define TAIKO_A_VBAT_BIAS_2			(0x11E)
+#define TAIKO_A_VBAT_BIAS_2__POR				(0x00)
+#define TAIKO_A_VBAT_ADC_DATA_MSB			(0x11F)
+#define TAIKO_A_VBAT_ADC_DATA_MSB__POR				(0x00)
+#define TAIKO_A_VBAT_ADC_DATA_LSB			(0x120)
+#define TAIKO_A_VBAT_ADC_DATA_LSB__POR				(0x00)
+#define TAIKO_A_MICB_CFILT_1_CTL			(0x128)
+#define TAIKO_A_MICB_CFILT_1_CTL__POR				(0x40)
+#define TAIKO_A_MICB_CFILT_1_VAL			(0x129)
+#define TAIKO_A_MICB_CFILT_1_VAL__POR				(0x80)
+#define TAIKO_A_MICB_CFILT_1_PRECHRG			(0x12A)
+#define TAIKO_A_MICB_CFILT_1_PRECHRG__POR				(0x38)
+#define TAIKO_A_MICB_1_CTL			(0x12B)
+#define TAIKO_A_MICB_1_CTL__POR				(0x16)
+#define TAIKO_A_MICB_1_INT_RBIAS			(0x12C)
+#define TAIKO_A_MICB_1_INT_RBIAS__POR				(0x24)
+#define TAIKO_A_MICB_1_MBHC			(0x12D)
+#define TAIKO_A_MICB_1_MBHC__POR				(0x01)
+#define TAIKO_A_MICB_CFILT_2_CTL			(0x12E)
+#define TAIKO_A_MICB_CFILT_2_CTL__POR				(0x40)
+#define TAIKO_A_MICB_CFILT_2_VAL			(0x12F)
+#define TAIKO_A_MICB_CFILT_2_VAL__POR				(0x80)
+#define TAIKO_A_MICB_CFILT_2_PRECHRG			(0x130)
+#define TAIKO_A_MICB_CFILT_2_PRECHRG__POR				(0x38)
+#define TAIKO_A_MICB_2_CTL			(0x131)
+#define TAIKO_A_MICB_2_CTL__POR				(0x16)
+#define TAIKO_A_MICB_2_INT_RBIAS			(0x132)
+#define TAIKO_A_MICB_2_INT_RBIAS__POR				(0x24)
+#define TAIKO_A_MICB_2_MBHC			(0x133)
+#define TAIKO_A_MICB_2_MBHC__POR				(0x02)
+#define TAIKO_A_MICB_CFILT_3_CTL			(0x134)
+#define TAIKO_A_MICB_CFILT_3_CTL__POR				(0x40)
+#define TAIKO_A_MICB_CFILT_3_VAL			(0x135)
+#define TAIKO_A_MICB_CFILT_3_VAL__POR				(0x80)
+#define TAIKO_A_MICB_CFILT_3_PRECHRG			(0x136)
+#define TAIKO_A_MICB_CFILT_3_PRECHRG__POR				(0x38)
+#define TAIKO_A_MICB_3_CTL			(0x137)
+#define TAIKO_A_MICB_3_CTL__POR				(0x16)
+#define TAIKO_A_MICB_3_INT_RBIAS			(0x138)
+#define TAIKO_A_MICB_3_INT_RBIAS__POR				(0x24)
+#define TAIKO_A_MICB_3_MBHC			(0x139)
+#define TAIKO_A_MICB_3_MBHC__POR				(0x00)
+#define TAIKO_A_MICB_4_CTL			(0x13D)
+#define TAIKO_A_MICB_4_CTL__POR				(0x16)
+#define TAIKO_A_MICB_4_INT_RBIAS			(0x13E)
+#define TAIKO_A_MICB_4_INT_RBIAS__POR				(0x24)
+#define TAIKO_A_MICB_4_MBHC			(0x13F)
+#define TAIKO_A_MICB_4_MBHC__POR				(0x01)
+#define TAIKO_A_MBHC_INSERT_DETECT			(0x14A)
+#define TAIKO_A_MBHC_INSERT_DETECT__POR				(0x00)
+#define TAIKO_A_MBHC_INSERT_DET_STATUS			(0x14B)
+#define TAIKO_A_MBHC_INSERT_DET_STATUS__POR				(0x00)
+#define TAIKO_A_TX_COM_BIAS			(0x14C)
+#define TAIKO_A_TX_COM_BIAS__POR				(0xF0)
+#define TAIKO_A_MBHC_SCALING_MUX_1			(0x14E)
+#define TAIKO_A_MBHC_SCALING_MUX_1__POR				(0x00)
+#define TAIKO_A_MBHC_SCALING_MUX_2			(0x14F)
+#define TAIKO_A_MBHC_SCALING_MUX_2__POR				(0x80)
+#define TAIKO_A_MAD_ANA_CTRL			(0x150)
+#define TAIKO_A_MAD_ANA_CTRL__POR				(0xF1)
+#define TAIKO_A_TX_SUP_SWITCH_CTRL_1			(0x151)
+#define TAIKO_A_TX_SUP_SWITCH_CTRL_1__POR				(0x00)
+#define TAIKO_A_TX_SUP_SWITCH_CTRL_2			(0x152)
+#define TAIKO_A_TX_SUP_SWITCH_CTRL_2__POR				(0x80)
+#define TAIKO_A_TX_1_2_EN			(0x153)
+#define TAIKO_A_TX_1_2_EN__POR				(0x00)
+#define TAIKO_A_TX_1_2_TEST_EN			(0x154)
+#define TAIKO_A_TX_1_2_TEST_EN__POR				(0xCC)
+#define TAIKO_A_TX_1_2_ADC_CH1			(0x155)
+#define TAIKO_A_TX_1_2_ADC_CH1__POR				(0x44)
+#define TAIKO_A_TX_1_2_ADC_CH2			(0x156)
+#define TAIKO_A_TX_1_2_ADC_CH2__POR				(0x44)
+#define TAIKO_A_TX_1_2_ATEST_REFCTRL			(0x157)
+#define TAIKO_A_TX_1_2_ATEST_REFCTRL__POR				(0x00)
+#define TAIKO_A_TX_1_2_TEST_CTL			(0x158)
+#define TAIKO_A_TX_1_2_TEST_CTL__POR				(0x38)
+#define TAIKO_A_TX_1_2_TEST_BLOCK_EN			(0x159)
+#define TAIKO_A_TX_1_2_TEST_BLOCK_EN__POR				(0xFC)
+#define TAIKO_A_TX_1_2_TXFE_CLKDIV			(0x15A)
+#define TAIKO_A_TX_1_2_TXFE_CLKDIV__POR				(0x55)
+#define TAIKO_A_TX_1_2_SAR_ERR_CH1			(0x15B)
+#define TAIKO_A_TX_1_2_SAR_ERR_CH1__POR				(0x00)
+#define TAIKO_A_TX_1_2_SAR_ERR_CH2			(0x15C)
+#define TAIKO_A_TX_1_2_SAR_ERR_CH2__POR				(0x00)
+#define TAIKO_A_TX_3_4_EN			(0x15D)
+#define TAIKO_A_TX_3_4_EN__POR				(0x00)
+#define TAIKO_A_TX_3_4_TEST_EN			(0x15E)
+#define TAIKO_A_TX_3_4_TEST_EN__POR				(0xCC)
+#define TAIKO_A_TX_3_4_ADC_CH3			(0x15F)
+#define TAIKO_A_TX_3_4_ADC_CH3__POR				(0x44)
+#define TAIKO_A_TX_3_4_ADC_CH4			(0x160)
+#define TAIKO_A_TX_3_4_ADC_CH4__POR				(0x44)
+#define TAIKO_A_TX_3_4_ATEST_REFCTRL			(0x161)
+#define TAIKO_A_TX_3_4_ATEST_REFCTRL__POR				(0x00)
+#define TAIKO_A_TX_3_4_TEST_CTL			(0x162)
+#define TAIKO_A_TX_3_4_TEST_CTL__POR				(0x38)
+#define TAIKO_A_TX_3_4_TEST_BLOCK_EN			(0x163)
+#define TAIKO_A_TX_3_4_TEST_BLOCK_EN__POR				(0xFC)
+#define TAIKO_A_TX_3_4_TXFE_CKDIV			(0x164)
+#define TAIKO_A_TX_3_4_TXFE_CKDIV__POR				(0x55)
+#define TAIKO_A_TX_3_4_SAR_ERR_CH3			(0x165)
+#define TAIKO_A_TX_3_4_SAR_ERR_CH3__POR				(0x00)
+#define TAIKO_A_TX_3_4_SAR_ERR_CH4			(0x166)
+#define TAIKO_A_TX_3_4_SAR_ERR_CH4__POR				(0x00)
+#define TAIKO_A_TX_5_6_EN			(0x167)
+#define TAIKO_A_TX_5_6_EN__POR				(0x11)
+#define TAIKO_A_TX_5_6_TEST_EN			(0x168)
+#define TAIKO_A_TX_5_6_TEST_EN__POR				(0xCC)
+#define TAIKO_A_TX_5_6_ADC_CH5			(0x169)
+#define TAIKO_A_TX_5_6_ADC_CH5__POR				(0x44)
+#define TAIKO_A_TX_5_6_ADC_CH6			(0x16A)
+#define TAIKO_A_TX_5_6_ADC_CH6__POR				(0x44)
+#define TAIKO_A_TX_5_6_ATEST_REFCTRL			(0x16B)
+#define TAIKO_A_TX_5_6_ATEST_REFCTRL__POR				(0x00)
+#define TAIKO_A_TX_5_6_TEST_CTL			(0x16C)
+#define TAIKO_A_TX_5_6_TEST_CTL__POR				(0x38)
+#define TAIKO_A_TX_5_6_TEST_BLOCK_EN			(0x16D)
+#define TAIKO_A_TX_5_6_TEST_BLOCK_EN__POR				(0xFC)
+#define TAIKO_A_TX_5_6_TXFE_CKDIV			(0x16E)
+#define TAIKO_A_TX_5_6_TXFE_CKDIV__POR				(0x55)
+#define TAIKO_A_TX_5_6_SAR_ERR_CH5			(0x16F)
+#define TAIKO_A_TX_5_6_SAR_ERR_CH5__POR				(0x00)
+#define TAIKO_A_TX_5_6_SAR_ERR_CH6			(0x170)
+#define TAIKO_A_TX_5_6_SAR_ERR_CH6__POR				(0x00)
+#define TAIKO_A_TX_7_MBHC_EN			(0x171)
+#define TAIKO_A_TX_7_MBHC_EN__POR				(0x0C)
+#define TAIKO_A_TX_7_MBHC_ATEST_REFCTRL			(0x172)
+#define TAIKO_A_TX_7_MBHC_ATEST_REFCTRL__POR				(0x00)
+#define TAIKO_A_TX_7_MBHC_ADC			(0x173)
+#define TAIKO_A_TX_7_MBHC_ADC__POR				(0x44)
+#define TAIKO_A_TX_7_MBHC_TEST_CTL			(0x174)
+#define TAIKO_A_TX_7_MBHC_TEST_CTL__POR				(0x38)
+#define TAIKO_A_TX_7_MBHC_SAR_ERR			(0x175)
+#define TAIKO_A_TX_7_MBHC_SAR_ERR__POR				(0x00)
+#define TAIKO_A_TX_7_TXFE_CLKDIV			(0x176)
+#define TAIKO_A_TX_7_TXFE_CLKDIV__POR				(0x0B)
+#define TAIKO_A_BUCK_MODE_1			(0x181)
+#define TAIKO_A_BUCK_MODE_1__POR				(0x21)
+#define TAIKO_A_BUCK_MODE_2			(0x182)
+#define TAIKO_A_BUCK_MODE_2__POR				(0xFF)
+#define TAIKO_A_BUCK_MODE_3			(0x183)
+#define TAIKO_A_BUCK_MODE_3__POR				(0xCC)
+#define TAIKO_A_BUCK_MODE_4			(0x184)
+#define TAIKO_A_BUCK_MODE_4__POR				(0x3A)
+#define TAIKO_A_BUCK_MODE_5			(0x185)
+#define TAIKO_A_BUCK_MODE_5__POR				(0x00)
+#define TAIKO_A_BUCK_CTRL_VCL_1			(0x186)
+#define TAIKO_A_BUCK_CTRL_VCL_1__POR				(0x48)
+#define TAIKO_A_BUCK_CTRL_VCL_2			(0x187)
+#define TAIKO_A_BUCK_CTRL_VCL_2__POR				(0xA3)
+#define TAIKO_A_BUCK_CTRL_VCL_3			(0x188)
+#define TAIKO_A_BUCK_CTRL_VCL_3__POR				(0x82)
+#define TAIKO_A_BUCK_CTRL_CCL_1			(0x189)
+#define TAIKO_A_BUCK_CTRL_CCL_1__POR				(0xAB)
+#define TAIKO_A_BUCK_CTRL_CCL_2			(0x18A)
+#define TAIKO_A_BUCK_CTRL_CCL_2__POR				(0xDC)
+#define TAIKO_A_BUCK_CTRL_CCL_3			(0x18B)
+#define TAIKO_A_BUCK_CTRL_CCL_3__POR				(0x6A)
+#define TAIKO_A_BUCK_CTRL_CCL_4			(0x18C)
+#define TAIKO_A_BUCK_CTRL_CCL_4__POR				(0x58)
+#define TAIKO_A_BUCK_CTRL_PWM_DRVR_1			(0x18D)
+#define TAIKO_A_BUCK_CTRL_PWM_DRVR_1__POR				(0x50)
+#define TAIKO_A_BUCK_CTRL_PWM_DRVR_2			(0x18E)
+#define TAIKO_A_BUCK_CTRL_PWM_DRVR_2__POR				(0x64)
+#define TAIKO_A_BUCK_CTRL_PWM_DRVR_3			(0x18F)
+#define TAIKO_A_BUCK_CTRL_PWM_DRVR_3__POR				(0x77)
+#define TAIKO_A_BUCK_TMUX_A_D			(0x190)
+#define TAIKO_A_BUCK_TMUX_A_D__POR				(0x00)
+#define TAIKO_A_NCP_BUCKREF			(0x191)
+#define TAIKO_A_NCP_BUCKREF__POR				(0x00)
+#define TAIKO_A_NCP_EN			(0x192)
+#define TAIKO_A_NCP_EN__POR				(0xFE)
+#define TAIKO_A_NCP_CLK			(0x193)
+#define TAIKO_A_NCP_CLK__POR				(0x94)
+#define TAIKO_A_NCP_STATIC			(0x194)
+#define TAIKO_A_NCP_STATIC__POR				(0x28)
+#define TAIKO_A_NCP_VTH_LOW			(0x195)
+#define TAIKO_A_NCP_VTH_LOW__POR				(0x88)
+#define TAIKO_A_NCP_VTH_HIGH			(0x196)
+#define TAIKO_A_NCP_VTH_HIGH__POR				(0xA0)
+#define TAIKO_A_NCP_ATEST			(0x197)
+#define TAIKO_A_NCP_ATEST__POR				(0x00)
+#define TAIKO_A_NCP_DTEST			(0x198)
+#define TAIKO_A_NCP_DTEST__POR				(0x00)
+#define TAIKO_A_NCP_DLY1			(0x199)
+#define TAIKO_A_NCP_DLY1__POR				(0x06)
+#define TAIKO_A_NCP_DLY2			(0x19A)
+#define TAIKO_A_NCP_DLY2__POR				(0x06)
+#define TAIKO_A_RX_AUX_SW_CTL			(0x19B)
+#define TAIKO_A_RX_AUX_SW_CTL__POR				(0x00)
+#define TAIKO_A_RX_PA_AUX_IN_CONN			(0x19C)
+#define TAIKO_A_RX_PA_AUX_IN_CONN__POR				(0x00)
+#define TAIKO_A_RX_COM_TIMER_DIV			(0x19E)
+#define TAIKO_A_RX_COM_TIMER_DIV__POR				(0xE8)
+#define TAIKO_A_RX_COM_OCP_CTL			(0x19F)
+#define TAIKO_A_RX_COM_OCP_CTL__POR				(0x1F)
+#define TAIKO_A_RX_COM_OCP_COUNT			(0x1A0)
+#define TAIKO_A_RX_COM_OCP_COUNT__POR				(0x77)
+#define TAIKO_A_RX_COM_DAC_CTL			(0x1A1)
+#define TAIKO_A_RX_COM_DAC_CTL__POR				(0x00)
+#define TAIKO_A_RX_COM_BIAS			(0x1A2)
+#define TAIKO_A_RX_COM_BIAS__POR				(0x00)
+#define TAIKO_A_RX_HPH_AUTO_CHOP			(0x1A4)
+#define TAIKO_A_RX_HPH_AUTO_CHOP__POR				(0x38)
+#define TAIKO_A_RX_HPH_CHOP_CTL			(0x1A5)
+#define TAIKO_A_RX_HPH_CHOP_CTL__POR				(0xB4)
+#define TAIKO_A_RX_HPH_BIAS_PA			(0x1A6)
+#define TAIKO_A_RX_HPH_BIAS_PA__POR				(0xAA)
+#define TAIKO_A_RX_HPH_BIAS_LDO			(0x1A7)
+#define TAIKO_A_RX_HPH_BIAS_LDO__POR				(0x87)
+#define TAIKO_A_RX_HPH_BIAS_CNP			(0x1A8)
+#define TAIKO_A_RX_HPH_BIAS_CNP__POR				(0x8A)
+#define TAIKO_A_RX_HPH_BIAS_WG_OCP			(0x1A9)
+#define TAIKO_A_RX_HPH_BIAS_WG_OCP__POR				(0x2A)
+#define TAIKO_A_RX_HPH_OCP_CTL			(0x1AA)
+#define TAIKO_A_RX_HPH_OCP_CTL__POR				(0x68)
+#define TAIKO_A_RX_HPH_CNP_EN			(0x1AB)
+#define TAIKO_A_RX_HPH_CNP_EN__POR				(0x80)
+#define TAIKO_A_RX_HPH_CNP_WG_CTL			(0x1AC)
+#define TAIKO_A_RX_HPH_CNP_WG_CTL__POR				(0xDE)
+#define TAIKO_A_RX_HPH_CNP_WG_TIME			(0x1AD)
+#define TAIKO_A_RX_HPH_CNP_WG_TIME__POR				(0x2A)
+#define TAIKO_A_RX_HPH_L_GAIN			(0x1AE)
+#define TAIKO_A_RX_HPH_L_GAIN__POR				(0x00)
+#define TAIKO_A_RX_HPH_L_TEST			(0x1AF)
+#define TAIKO_A_RX_HPH_L_TEST__POR				(0x00)
+#define TAIKO_A_RX_HPH_L_PA_CTL			(0x1B0)
+#define TAIKO_A_RX_HPH_L_PA_CTL__POR				(0x40)
+#define TAIKO_A_RX_HPH_L_DAC_CTL			(0x1B1)
+#define TAIKO_A_RX_HPH_L_DAC_CTL__POR				(0x00)
+#define TAIKO_A_RX_HPH_L_ATEST			(0x1B2)
+#define TAIKO_A_RX_HPH_L_ATEST__POR				(0x00)
+#define TAIKO_A_RX_HPH_L_STATUS			(0x1B3)
+#define TAIKO_A_RX_HPH_L_STATUS__POR				(0x00)
+#define TAIKO_A_RX_HPH_R_GAIN			(0x1B4)
+#define TAIKO_A_RX_HPH_R_GAIN__POR				(0x00)
+#define TAIKO_A_RX_HPH_R_TEST			(0x1B5)
+#define TAIKO_A_RX_HPH_R_TEST__POR				(0x00)
+#define TAIKO_A_RX_HPH_R_PA_CTL			(0x1B6)
+#define TAIKO_A_RX_HPH_R_PA_CTL__POR				(0x40)
+#define TAIKO_A_RX_HPH_R_DAC_CTL			(0x1B7)
+#define TAIKO_A_RX_HPH_R_DAC_CTL__POR				(0x00)
+#define TAIKO_A_RX_HPH_R_ATEST			(0x1B8)
+#define TAIKO_A_RX_HPH_R_ATEST__POR				(0x00)
+#define TAIKO_A_RX_HPH_R_STATUS			(0x1B9)
+#define TAIKO_A_RX_HPH_R_STATUS__POR				(0x00)
+#define TAIKO_A_RX_EAR_BIAS_PA			(0x1BA)
+#define TAIKO_A_RX_EAR_BIAS_PA__POR				(0xA6)
+#define TAIKO_A_RX_EAR_BIAS_CMBUFF			(0x1BB)
+#define TAIKO_A_RX_EAR_BIAS_CMBUFF__POR				(0xA0)
+#define TAIKO_A_RX_EAR_EN			(0x1BC)
+#define TAIKO_A_RX_EAR_EN__POR				(0x00)
+#define TAIKO_A_RX_EAR_GAIN			(0x1BD)
+#define TAIKO_A_RX_EAR_GAIN__POR				(0x02)
+#define TAIKO_A_RX_EAR_CMBUFF			(0x1BE)
+#define TAIKO_A_RX_EAR_CMBUFF__POR				(0x04)
+#define TAIKO_A_RX_EAR_ICTL			(0x1BF)
+#define TAIKO_A_RX_EAR_ICTL__POR				(0x40)
+#define TAIKO_A_RX_EAR_CCOMP			(0x1C0)
+#define TAIKO_A_RX_EAR_CCOMP__POR				(0x08)
+#define TAIKO_A_RX_EAR_VCM			(0x1C1)
+#define TAIKO_A_RX_EAR_VCM__POR				(0x03)
+#define TAIKO_A_RX_EAR_CNP			(0x1C2)
+#define TAIKO_A_RX_EAR_CNP__POR				(0xF2)
+#define TAIKO_A_RX_EAR_DAC_CTL_ATEST			(0x1C3)
+#define TAIKO_A_RX_EAR_DAC_CTL_ATEST__POR				(0x00)
+#define TAIKO_A_RX_EAR_STATUS			(0x1C5)
+#define TAIKO_A_RX_EAR_STATUS__POR				(0x04)
+#define TAIKO_A_RX_LINE_BIAS_PA			(0x1C6)
+#define TAIKO_A_RX_LINE_BIAS_PA__POR				(0xA8)
+#define TAIKO_A_RX_BUCK_BIAS1			(0x1C7)
+#define TAIKO_A_RX_BUCK_BIAS1__POR				(0x42)
+#define TAIKO_A_RX_BUCK_BIAS2			(0x1C8)
+#define TAIKO_A_RX_BUCK_BIAS2__POR				(0x84)
+#define TAIKO_A_RX_LINE_COM			(0x1C9)
+#define TAIKO_A_RX_LINE_COM__POR				(0x80)
+#define TAIKO_A_RX_LINE_CNP_EN			(0x1CA)
+#define TAIKO_A_RX_LINE_CNP_EN__POR				(0x00)
+#define TAIKO_A_RX_LINE_CNP_WG_CTL			(0x1CB)
+#define TAIKO_A_RX_LINE_CNP_WG_CTL__POR				(0x00)
+#define TAIKO_A_RX_LINE_CNP_WG_TIME			(0x1CC)
+#define TAIKO_A_RX_LINE_CNP_WG_TIME__POR				(0x04)
+#define TAIKO_A_RX_LINE_1_GAIN			(0x1CD)
+#define TAIKO_A_RX_LINE_1_GAIN__POR				(0x00)
+#define TAIKO_A_RX_LINE_1_TEST			(0x1CE)
+#define TAIKO_A_RX_LINE_1_TEST__POR				(0x00)
+#define TAIKO_A_RX_LINE_1_DAC_CTL			(0x1CF)
+#define TAIKO_A_RX_LINE_1_DAC_CTL__POR				(0x00)
+#define TAIKO_A_RX_LINE_1_STATUS			(0x1D0)
+#define TAIKO_A_RX_LINE_1_STATUS__POR				(0x00)
+#define TAIKO_A_RX_LINE_2_GAIN			(0x1D1)
+#define TAIKO_A_RX_LINE_2_GAIN__POR				(0x00)
+#define TAIKO_A_RX_LINE_2_TEST			(0x1D2)
+#define TAIKO_A_RX_LINE_2_TEST__POR				(0x00)
+#define TAIKO_A_RX_LINE_2_DAC_CTL			(0x1D3)
+#define TAIKO_A_RX_LINE_2_DAC_CTL__POR				(0x00)
+#define TAIKO_A_RX_LINE_2_STATUS			(0x1D4)
+#define TAIKO_A_RX_LINE_2_STATUS__POR				(0x00)
+#define TAIKO_A_RX_LINE_3_GAIN			(0x1D5)
+#define TAIKO_A_RX_LINE_3_GAIN__POR				(0x00)
+#define TAIKO_A_RX_LINE_3_TEST			(0x1D6)
+#define TAIKO_A_RX_LINE_3_TEST__POR				(0x00)
+#define TAIKO_A_RX_LINE_3_DAC_CTL			(0x1D7)
+#define TAIKO_A_RX_LINE_3_DAC_CTL__POR				(0x00)
+#define TAIKO_A_RX_LINE_3_STATUS			(0x1D8)
+#define TAIKO_A_RX_LINE_3_STATUS__POR				(0x00)
+#define TAIKO_A_RX_LINE_4_GAIN			(0x1D9)
+#define TAIKO_A_RX_LINE_4_GAIN__POR				(0x00)
+#define TAIKO_A_RX_LINE_4_TEST			(0x1DA)
+#define TAIKO_A_RX_LINE_4_TEST__POR				(0x00)
+#define TAIKO_A_RX_LINE_4_DAC_CTL			(0x1DB)
+#define TAIKO_A_RX_LINE_4_DAC_CTL__POR				(0x00)
+#define TAIKO_A_RX_LINE_4_STATUS			(0x1DC)
+#define TAIKO_A_RX_LINE_4_STATUS__POR				(0x00)
+#define TAIKO_A_RX_LINE_CNP_DBG			(0x1DD)
+#define TAIKO_A_RX_LINE_CNP_DBG__POR				(0x00)
+#define TAIKO_A_SPKR_DRV_EN			(0x1DF)
+#define TAIKO_A_SPKR_DRV_EN__POR				(0x6F)
+#define TAIKO_A_SPKR_DRV_GAIN			(0x1E0)
+#define TAIKO_A_SPKR_DRV_GAIN__POR				(0x00)
+#define TAIKO_A_SPKR_DRV_DAC_CTL			(0x1E1)
+#define TAIKO_A_SPKR_DRV_DAC_CTL__POR				(0x04)
+#define TAIKO_A_SPKR_DRV_OCP_CTL			(0x1E2)
+#define TAIKO_A_SPKR_DRV_OCP_CTL__POR				(0x98)
+#define TAIKO_A_SPKR_DRV_CLIP_DET			(0x1E3)
+#define TAIKO_A_SPKR_DRV_CLIP_DET__POR				(0x48)
+#define TAIKO_A_SPKR_DRV_IEC			(0x1E4)
+#define TAIKO_A_SPKR_DRV_IEC__POR				(0x20)
+#define TAIKO_A_SPKR_DRV_DBG_DAC			(0x1E5)
+#define TAIKO_A_SPKR_DRV_DBG_DAC__POR				(0x05)
+#define TAIKO_A_SPKR_DRV_DBG_PA			(0x1E6)
+#define TAIKO_A_SPKR_DRV_DBG_PA__POR				(0x18)
+#define TAIKO_A_SPKR_DRV_DBG_PWRSTG			(0x1E7)
+#define TAIKO_A_SPKR_DRV_DBG_PWRSTG__POR				(0x00)
+#define TAIKO_A_SPKR_DRV_BIAS_LDO			(0x1E8)
+#define TAIKO_A_SPKR_DRV_BIAS_LDO__POR				(0x45)
+#define TAIKO_A_SPKR_DRV_BIAS_INT			(0x1E9)
+#define TAIKO_A_SPKR_DRV_BIAS_INT__POR				(0xA5)
+#define TAIKO_A_SPKR_DRV_BIAS_PA			(0x1EA)
+#define TAIKO_A_SPKR_DRV_BIAS_PA__POR				(0x55)
+#define TAIKO_A_SPKR_DRV_STATUS_OCP			(0x1EB)
+#define TAIKO_A_SPKR_DRV_STATUS_OCP__POR				(0x00)
+#define TAIKO_A_SPKR_DRV_STATUS_PA			(0x1EC)
+#define TAIKO_A_SPKR_DRV_STATUS_PA__POR				(0x00)
+#define TAIKO_A_SPKR_PROT_EN			(0x1ED)
+#define TAIKO_A_SPKR_PROT_EN__POR				(0x00)
+#define TAIKO_A_SPKR_PROT_ADC_EN			(0x1EE)
+#define TAIKO_A_SPKR_PROT_ADC_EN__POR				(0x44)
+#define TAIKO_A_SPKR_PROT_ISENSE_BIAS			(0x1EF)
+#define TAIKO_A_SPKR_PROT_ISENSE_BIAS__POR				(0x44)
+#define TAIKO_A_SPKR_PROT_VSENSE_BIAS			(0x1F0)
+#define TAIKO_A_SPKR_PROT_VSENSE_BIAS__POR				(0x44)
+#define TAIKO_A_SPKR_PROT_ADC_ATEST_REFCTRL			(0x1F1)
+#define TAIKO_A_SPKR_PROT_ADC_ATEST_REFCTRL__POR			(0x00)
+#define TAIKO_A_SPKR_PROT_ADC_TEST_CTL			(0x1F2)
+#define TAIKO_A_SPKR_PROT_ADC_TEST_CTL__POR				(0x38)
+#define TAIKO_A_SPKR_PROT_TEST_BLOCK_EN			(0x1F3)
+#define TAIKO_A_SPKR_PROT_TEST_BLOCK_EN__POR				(0xFC)
+#define TAIKO_A_SPKR_PROT_ATEST			(0x1F4)
+#define TAIKO_A_SPKR_PROT_ATEST__POR				(0x00)
+#define TAIKO_A_SPKR_PROT_V_SAR_ERR			(0x1F5)
+#define TAIKO_A_SPKR_PROT_V_SAR_ERR__POR				(0x00)
+#define TAIKO_A_SPKR_PROT_I_SAR_ERR			(0x1F6)
+#define TAIKO_A_SPKR_PROT_I_SAR_ERR__POR				(0x00)
+#define TAIKO_A_SPKR_PROT_LDO_CTRL			(0x1F7)
+#define TAIKO_A_SPKR_PROT_LDO_CTRL__POR				(0x00)
+#define TAIKO_A_SPKR_PROT_ISENSE_CTRL			(0x1F8)
+#define TAIKO_A_SPKR_PROT_ISENSE_CTRL__POR				(0x00)
+#define TAIKO_A_SPKR_PROT_VSENSE_CTRL			(0x1F9)
+#define TAIKO_A_SPKR_PROT_VSENSE_CTRL__POR				(0x00)
+#define TAIKO_A_RC_OSC_FREQ			(0x1FA)
+#define TAIKO_A_RC_OSC_FREQ__POR				(0x46)
+#define TAIKO_A_RC_OSC_TEST			(0x1FB)
+#define TAIKO_A_RC_OSC_TEST__POR				(0x0A)
+#define TAIKO_A_RC_OSC_STATUS			(0x1FC)
+#define TAIKO_A_RC_OSC_STATUS__POR				(0x18)
+#define TAIKO_A_RC_OSC_TUNER			(0x1FD)
+#define TAIKO_A_RC_OSC_TUNER__POR				(0x00)
+#define TAIKO_A_MBHC_HPH			(0x1FE)
+#define TAIKO_A_MBHC_HPH__POR				(0x44)
+#define TAIKO_A_CDC_ANC1_B1_CTL			(0x200)
+#define TAIKO_A_CDC_ANC1_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_ANC2_B1_CTL			(0x280)
+#define TAIKO_A_CDC_ANC2_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_ANC1_SHIFT			(0x201)
+#define TAIKO_A_CDC_ANC1_SHIFT__POR				(0x00)
+#define TAIKO_A_CDC_ANC2_SHIFT			(0x281)
+#define TAIKO_A_CDC_ANC2_SHIFT__POR				(0x00)
+#define TAIKO_A_CDC_ANC1_IIR_B1_CTL			(0x202)
+#define TAIKO_A_CDC_ANC1_IIR_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_ANC2_IIR_B1_CTL			(0x282)
+#define TAIKO_A_CDC_ANC2_IIR_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_ANC1_IIR_B2_CTL			(0x203)
+#define TAIKO_A_CDC_ANC1_IIR_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_ANC2_IIR_B2_CTL			(0x283)
+#define TAIKO_A_CDC_ANC2_IIR_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_ANC1_IIR_B3_CTL			(0x204)
+#define TAIKO_A_CDC_ANC1_IIR_B3_CTL__POR				(0x00)
+#define TAIKO_A_CDC_ANC2_IIR_B3_CTL			(0x284)
+#define TAIKO_A_CDC_ANC2_IIR_B3_CTL__POR				(0x00)
+#define TAIKO_A_CDC_ANC1_LPF_B1_CTL			(0x206)
+#define TAIKO_A_CDC_ANC1_LPF_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_ANC2_LPF_B1_CTL			(0x286)
+#define TAIKO_A_CDC_ANC2_LPF_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_ANC1_LPF_B2_CTL			(0x207)
+#define TAIKO_A_CDC_ANC1_LPF_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_ANC2_LPF_B2_CTL			(0x287)
+#define TAIKO_A_CDC_ANC2_LPF_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_ANC1_SPARE			(0x209)
+#define TAIKO_A_CDC_ANC1_SPARE__POR				(0x00)
+#define TAIKO_A_CDC_ANC2_SPARE			(0x289)
+#define TAIKO_A_CDC_ANC2_SPARE__POR				(0x00)
+#define TAIKO_A_CDC_ANC1_SMLPF_CTL			(0x20A)
+#define TAIKO_A_CDC_ANC1_SMLPF_CTL__POR				(0x00)
+#define TAIKO_A_CDC_ANC2_SMLPF_CTL			(0x28A)
+#define TAIKO_A_CDC_ANC2_SMLPF_CTL__POR				(0x00)
+#define TAIKO_A_CDC_ANC1_DCFLT_CTL			(0x20B)
+#define TAIKO_A_CDC_ANC1_DCFLT_CTL__POR				(0x00)
+#define TAIKO_A_CDC_ANC2_DCFLT_CTL			(0x28B)
+#define TAIKO_A_CDC_ANC2_DCFLT_CTL__POR				(0x00)
+#define TAIKO_A_CDC_ANC1_GAIN_CTL			(0x20C)
+#define TAIKO_A_CDC_ANC1_GAIN_CTL__POR				(0x00)
+#define TAIKO_A_CDC_ANC2_GAIN_CTL			(0x28C)
+#define TAIKO_A_CDC_ANC2_GAIN_CTL__POR				(0x00)
+#define TAIKO_A_CDC_ANC1_B2_CTL			(0x20D)
+#define TAIKO_A_CDC_ANC1_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_ANC2_B2_CTL			(0x28D)
+#define TAIKO_A_CDC_ANC2_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_TX1_VOL_CTL_TIMER			(0x220)
+#define TAIKO_A_CDC_TX1_VOL_CTL_TIMER__POR				(0x00)
+#define TAIKO_A_CDC_TX2_VOL_CTL_TIMER			(0x228)
+#define TAIKO_A_CDC_TX2_VOL_CTL_TIMER__POR				(0x00)
+#define TAIKO_A_CDC_TX3_VOL_CTL_TIMER			(0x230)
+#define TAIKO_A_CDC_TX3_VOL_CTL_TIMER__POR				(0x00)
+#define TAIKO_A_CDC_TX4_VOL_CTL_TIMER			(0x238)
+#define TAIKO_A_CDC_TX4_VOL_CTL_TIMER__POR				(0x00)
+#define TAIKO_A_CDC_TX5_VOL_CTL_TIMER			(0x240)
+#define TAIKO_A_CDC_TX5_VOL_CTL_TIMER__POR				(0x00)
+#define TAIKO_A_CDC_TX6_VOL_CTL_TIMER			(0x248)
+#define TAIKO_A_CDC_TX6_VOL_CTL_TIMER__POR				(0x00)
+#define TAIKO_A_CDC_TX7_VOL_CTL_TIMER			(0x250)
+#define TAIKO_A_CDC_TX7_VOL_CTL_TIMER__POR				(0x00)
+#define TAIKO_A_CDC_TX8_VOL_CTL_TIMER			(0x258)
+#define TAIKO_A_CDC_TX8_VOL_CTL_TIMER__POR				(0x00)
+#define TAIKO_A_CDC_TX9_VOL_CTL_TIMER			(0x260)
+#define TAIKO_A_CDC_TX9_VOL_CTL_TIMER__POR				(0x00)
+#define TAIKO_A_CDC_TX10_VOL_CTL_TIMER			(0x268)
+#define TAIKO_A_CDC_TX10_VOL_CTL_TIMER__POR				(0x00)
+#define TAIKO_A_CDC_TX1_VOL_CTL_GAIN			(0x221)
+#define TAIKO_A_CDC_TX1_VOL_CTL_GAIN__POR				(0x00)
+#define TAIKO_A_CDC_TX2_VOL_CTL_GAIN			(0x229)
+#define TAIKO_A_CDC_TX2_VOL_CTL_GAIN__POR				(0x00)
+#define TAIKO_A_CDC_TX3_VOL_CTL_GAIN			(0x231)
+#define TAIKO_A_CDC_TX3_VOL_CTL_GAIN__POR				(0x00)
+#define TAIKO_A_CDC_TX4_VOL_CTL_GAIN			(0x239)
+#define TAIKO_A_CDC_TX4_VOL_CTL_GAIN__POR				(0x00)
+#define TAIKO_A_CDC_TX5_VOL_CTL_GAIN			(0x241)
+#define TAIKO_A_CDC_TX5_VOL_CTL_GAIN__POR				(0x00)
+#define TAIKO_A_CDC_TX6_VOL_CTL_GAIN			(0x249)
+#define TAIKO_A_CDC_TX6_VOL_CTL_GAIN__POR				(0x00)
+#define TAIKO_A_CDC_TX7_VOL_CTL_GAIN			(0x251)
+#define TAIKO_A_CDC_TX7_VOL_CTL_GAIN__POR				(0x00)
+#define TAIKO_A_CDC_TX8_VOL_CTL_GAIN			(0x259)
+#define TAIKO_A_CDC_TX8_VOL_CTL_GAIN__POR				(0x00)
+#define TAIKO_A_CDC_TX9_VOL_CTL_GAIN			(0x261)
+#define TAIKO_A_CDC_TX9_VOL_CTL_GAIN__POR				(0x00)
+#define TAIKO_A_CDC_TX10_VOL_CTL_GAIN			(0x269)
+#define TAIKO_A_CDC_TX10_VOL_CTL_GAIN__POR				(0x00)
+#define TAIKO_A_CDC_TX1_VOL_CTL_CFG			(0x222)
+#define TAIKO_A_CDC_TX1_VOL_CTL_CFG__POR				(0x00)
+#define TAIKO_A_CDC_TX2_VOL_CTL_CFG			(0x22A)
+#define TAIKO_A_CDC_TX2_VOL_CTL_CFG__POR				(0x00)
+#define TAIKO_A_CDC_TX3_VOL_CTL_CFG			(0x232)
+#define TAIKO_A_CDC_TX3_VOL_CTL_CFG__POR				(0x00)
+#define TAIKO_A_CDC_TX4_VOL_CTL_CFG			(0x23A)
+#define TAIKO_A_CDC_TX4_VOL_CTL_CFG__POR				(0x00)
+#define TAIKO_A_CDC_TX5_VOL_CTL_CFG			(0x242)
+#define TAIKO_A_CDC_TX5_VOL_CTL_CFG__POR				(0x00)
+#define TAIKO_A_CDC_TX6_VOL_CTL_CFG			(0x24A)
+#define TAIKO_A_CDC_TX6_VOL_CTL_CFG__POR				(0x00)
+#define TAIKO_A_CDC_TX7_VOL_CTL_CFG			(0x252)
+#define TAIKO_A_CDC_TX7_VOL_CTL_CFG__POR				(0x00)
+#define TAIKO_A_CDC_TX8_VOL_CTL_CFG			(0x25A)
+#define TAIKO_A_CDC_TX8_VOL_CTL_CFG__POR				(0x00)
+#define TAIKO_A_CDC_TX9_VOL_CTL_CFG			(0x262)
+#define TAIKO_A_CDC_TX9_VOL_CTL_CFG__POR				(0x00)
+#define TAIKO_A_CDC_TX10_VOL_CTL_CFG			(0x26A)
+#define TAIKO_A_CDC_TX10_VOL_CTL_CFG__POR				(0x00)
+#define TAIKO_A_CDC_TX1_MUX_CTL			(0x223)
+#define TAIKO_A_CDC_TX1_MUX_CTL__POR				(0x08)
+#define TAIKO_A_CDC_TX2_MUX_CTL			(0x22B)
+#define TAIKO_A_CDC_TX2_MUX_CTL__POR				(0x08)
+#define TAIKO_A_CDC_TX3_MUX_CTL			(0x233)
+#define TAIKO_A_CDC_TX3_MUX_CTL__POR				(0x08)
+#define TAIKO_A_CDC_TX4_MUX_CTL			(0x23B)
+#define TAIKO_A_CDC_TX4_MUX_CTL__POR				(0x08)
+#define TAIKO_A_CDC_TX5_MUX_CTL			(0x243)
+#define TAIKO_A_CDC_TX5_MUX_CTL__POR				(0x08)
+#define TAIKO_A_CDC_TX6_MUX_CTL			(0x24B)
+#define TAIKO_A_CDC_TX6_MUX_CTL__POR				(0x08)
+#define TAIKO_A_CDC_TX7_MUX_CTL			(0x253)
+#define TAIKO_A_CDC_TX7_MUX_CTL__POR				(0x08)
+#define TAIKO_A_CDC_TX8_MUX_CTL			(0x25B)
+#define TAIKO_A_CDC_TX8_MUX_CTL__POR				(0x08)
+#define TAIKO_A_CDC_TX9_MUX_CTL			(0x263)
+#define TAIKO_A_CDC_TX9_MUX_CTL__POR				(0x08)
+#define TAIKO_A_CDC_TX10_MUX_CTL			(0x26B)
+#define TAIKO_A_CDC_TX10_MUX_CTL__POR				(0x08)
+#define TAIKO_A_CDC_TX1_CLK_FS_CTL			(0x224)
+#define TAIKO_A_CDC_TX1_CLK_FS_CTL__POR				(0x03)
+#define TAIKO_A_CDC_TX2_CLK_FS_CTL			(0x22C)
+#define TAIKO_A_CDC_TX2_CLK_FS_CTL__POR				(0x03)
+#define TAIKO_A_CDC_TX3_CLK_FS_CTL			(0x234)
+#define TAIKO_A_CDC_TX3_CLK_FS_CTL__POR				(0x03)
+#define TAIKO_A_CDC_TX4_CLK_FS_CTL			(0x23C)
+#define TAIKO_A_CDC_TX4_CLK_FS_CTL__POR				(0x03)
+#define TAIKO_A_CDC_TX5_CLK_FS_CTL			(0x244)
+#define TAIKO_A_CDC_TX5_CLK_FS_CTL__POR				(0x03)
+#define TAIKO_A_CDC_TX6_CLK_FS_CTL			(0x24C)
+#define TAIKO_A_CDC_TX6_CLK_FS_CTL__POR				(0x03)
+#define TAIKO_A_CDC_TX7_CLK_FS_CTL			(0x254)
+#define TAIKO_A_CDC_TX7_CLK_FS_CTL__POR				(0x03)
+#define TAIKO_A_CDC_TX8_CLK_FS_CTL			(0x25C)
+#define TAIKO_A_CDC_TX8_CLK_FS_CTL__POR				(0x03)
+#define TAIKO_A_CDC_TX9_CLK_FS_CTL			(0x264)
+#define TAIKO_A_CDC_TX9_CLK_FS_CTL__POR				(0x03)
+#define TAIKO_A_CDC_TX10_CLK_FS_CTL			(0x26C)
+#define TAIKO_A_CDC_TX10_CLK_FS_CTL__POR				(0x03)
+#define TAIKO_A_CDC_TX1_DMIC_CTL			(0x225)
+#define TAIKO_A_CDC_TX1_DMIC_CTL__POR				(0x00)
+#define TAIKO_A_CDC_TX2_DMIC_CTL			(0x22D)
+#define TAIKO_A_CDC_TX2_DMIC_CTL__POR				(0x00)
+#define TAIKO_A_CDC_TX3_DMIC_CTL			(0x235)
+#define TAIKO_A_CDC_TX3_DMIC_CTL__POR				(0x00)
+#define TAIKO_A_CDC_TX4_DMIC_CTL			(0x23D)
+#define TAIKO_A_CDC_TX4_DMIC_CTL__POR				(0x00)
+#define TAIKO_A_CDC_TX5_DMIC_CTL			(0x245)
+#define TAIKO_A_CDC_TX5_DMIC_CTL__POR				(0x00)
+#define TAIKO_A_CDC_TX6_DMIC_CTL			(0x24D)
+#define TAIKO_A_CDC_TX6_DMIC_CTL__POR				(0x00)
+#define TAIKO_A_CDC_TX7_DMIC_CTL			(0x255)
+#define TAIKO_A_CDC_TX7_DMIC_CTL__POR				(0x00)
+#define TAIKO_A_CDC_TX8_DMIC_CTL			(0x25D)
+#define TAIKO_A_CDC_TX8_DMIC_CTL__POR				(0x00)
+#define TAIKO_A_CDC_TX9_DMIC_CTL			(0x265)
+#define TAIKO_A_CDC_TX9_DMIC_CTL__POR				(0x00)
+#define TAIKO_A_CDC_TX10_DMIC_CTL			(0x26D)
+#define TAIKO_A_CDC_TX10_DMIC_CTL__POR				(0x00)
+#define TAIKO_A_CDC_DEBUG_B1_CTL			(0x278)
+#define TAIKO_A_CDC_DEBUG_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_DEBUG_B2_CTL			(0x279)
+#define TAIKO_A_CDC_DEBUG_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_DEBUG_B3_CTL			(0x27A)
+#define TAIKO_A_CDC_DEBUG_B3_CTL__POR				(0x00)
+#define TAIKO_A_CDC_DEBUG_B4_CTL			(0x27B)
+#define TAIKO_A_CDC_DEBUG_B4_CTL__POR				(0x00)
+#define TAIKO_A_CDC_DEBUG_B5_CTL			(0x27C)
+#define TAIKO_A_CDC_DEBUG_B5_CTL__POR				(0x00)
+#define TAIKO_A_CDC_DEBUG_B6_CTL			(0x27D)
+#define TAIKO_A_CDC_DEBUG_B6_CTL__POR				(0x00)
+#define TAIKO_A_CDC_DEBUG_B7_CTL			(0x27E)
+#define TAIKO_A_CDC_DEBUG_B7_CTL__POR				(0x00)
+#define TAIKO_A_CDC_SRC1_PDA_CFG			(0x2A0)
+#define TAIKO_A_CDC_SRC1_PDA_CFG__POR				(0x00)
+#define TAIKO_A_CDC_SRC2_PDA_CFG			(0x2A8)
+#define TAIKO_A_CDC_SRC2_PDA_CFG__POR				(0x00)
+#define TAIKO_A_CDC_SRC1_FS_CTL			(0x2A1)
+#define TAIKO_A_CDC_SRC1_FS_CTL__POR				(0x1B)
+#define TAIKO_A_CDC_SRC2_FS_CTL			(0x2A9)
+#define TAIKO_A_CDC_SRC2_FS_CTL__POR				(0x1B)
+#define TAIKO_A_CDC_RX1_B1_CTL			(0x2B0)
+#define TAIKO_A_CDC_RX1_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX2_B1_CTL			(0x2B8)
+#define TAIKO_A_CDC_RX2_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX3_B1_CTL			(0x2C0)
+#define TAIKO_A_CDC_RX3_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX4_B1_CTL			(0x2C8)
+#define TAIKO_A_CDC_RX4_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX5_B1_CTL			(0x2D0)
+#define TAIKO_A_CDC_RX5_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX6_B1_CTL			(0x2D8)
+#define TAIKO_A_CDC_RX6_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX7_B1_CTL			(0x2E0)
+#define TAIKO_A_CDC_RX7_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX1_B2_CTL			(0x2B1)
+#define TAIKO_A_CDC_RX1_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX2_B2_CTL			(0x2B9)
+#define TAIKO_A_CDC_RX2_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX3_B2_CTL			(0x2C1)
+#define TAIKO_A_CDC_RX3_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX4_B2_CTL			(0x2C9)
+#define TAIKO_A_CDC_RX4_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX5_B2_CTL			(0x2D1)
+#define TAIKO_A_CDC_RX5_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX6_B2_CTL			(0x2D9)
+#define TAIKO_A_CDC_RX6_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX7_B2_CTL			(0x2E1)
+#define TAIKO_A_CDC_RX7_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX1_B3_CTL			(0x2B2)
+#define TAIKO_A_CDC_RX1_B3_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX2_B3_CTL			(0x2BA)
+#define TAIKO_A_CDC_RX2_B3_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX3_B3_CTL			(0x2C2)
+#define TAIKO_A_CDC_RX3_B3_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX4_B3_CTL			(0x2CA)
+#define TAIKO_A_CDC_RX4_B3_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX5_B3_CTL			(0x2D2)
+#define TAIKO_A_CDC_RX5_B3_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX6_B3_CTL			(0x2DA)
+#define TAIKO_A_CDC_RX6_B3_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX7_B3_CTL			(0x2E2)
+#define TAIKO_A_CDC_RX7_B3_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX1_B4_CTL			(0x2B3)
+#define TAIKO_A_CDC_RX1_B4_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX2_B4_CTL			(0x2BB)
+#define TAIKO_A_CDC_RX2_B4_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX3_B4_CTL			(0x2C3)
+#define TAIKO_A_CDC_RX3_B4_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX4_B4_CTL			(0x2CB)
+#define TAIKO_A_CDC_RX4_B4_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX5_B4_CTL			(0x2D3)
+#define TAIKO_A_CDC_RX5_B4_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX6_B4_CTL			(0x2DB)
+#define TAIKO_A_CDC_RX6_B4_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX7_B4_CTL			(0x2E3)
+#define TAIKO_A_CDC_RX7_B4_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX1_B5_CTL			(0x2B4)
+#define TAIKO_A_CDC_RX1_B5_CTL__POR				(0x78)
+#define TAIKO_A_CDC_RX2_B5_CTL			(0x2BC)
+#define TAIKO_A_CDC_RX2_B5_CTL__POR				(0x78)
+#define TAIKO_A_CDC_RX3_B5_CTL			(0x2C4)
+#define TAIKO_A_CDC_RX3_B5_CTL__POR				(0x78)
+#define TAIKO_A_CDC_RX4_B5_CTL			(0x2CC)
+#define TAIKO_A_CDC_RX4_B5_CTL__POR				(0x78)
+#define TAIKO_A_CDC_RX5_B5_CTL			(0x2D4)
+#define TAIKO_A_CDC_RX5_B5_CTL__POR				(0x78)
+#define TAIKO_A_CDC_RX6_B5_CTL			(0x2DC)
+#define TAIKO_A_CDC_RX6_B5_CTL__POR				(0x78)
+#define TAIKO_A_CDC_RX7_B5_CTL			(0x2E4)
+#define TAIKO_A_CDC_RX7_B5_CTL__POR				(0x78)
+#define TAIKO_A_CDC_RX1_B6_CTL			(0x2B5)
+#define TAIKO_A_CDC_RX1_B6_CTL__POR				(0x80)
+#define TAIKO_A_CDC_RX2_B6_CTL			(0x2BD)
+#define TAIKO_A_CDC_RX2_B6_CTL__POR				(0x80)
+#define TAIKO_A_CDC_RX3_B6_CTL			(0x2C5)
+#define TAIKO_A_CDC_RX3_B6_CTL__POR				(0x80)
+#define TAIKO_A_CDC_RX4_B6_CTL			(0x2CD)
+#define TAIKO_A_CDC_RX4_B6_CTL__POR				(0x80)
+#define TAIKO_A_CDC_RX5_B6_CTL			(0x2D5)
+#define TAIKO_A_CDC_RX5_B6_CTL__POR				(0x80)
+#define TAIKO_A_CDC_RX6_B6_CTL			(0x2DD)
+#define TAIKO_A_CDC_RX6_B6_CTL__POR				(0x80)
+#define TAIKO_A_CDC_RX7_B6_CTL			(0x2E5)
+#define TAIKO_A_CDC_RX7_B6_CTL__POR				(0x80)
+#define TAIKO_A_CDC_RX1_VOL_CTL_B1_CTL			(0x2B6)
+#define TAIKO_A_CDC_RX1_VOL_CTL_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX2_VOL_CTL_B1_CTL			(0x2BE)
+#define TAIKO_A_CDC_RX2_VOL_CTL_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX3_VOL_CTL_B1_CTL			(0x2C6)
+#define TAIKO_A_CDC_RX3_VOL_CTL_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX4_VOL_CTL_B1_CTL			(0x2CE)
+#define TAIKO_A_CDC_RX4_VOL_CTL_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX5_VOL_CTL_B1_CTL			(0x2D6)
+#define TAIKO_A_CDC_RX5_VOL_CTL_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX6_VOL_CTL_B1_CTL			(0x2DE)
+#define TAIKO_A_CDC_RX6_VOL_CTL_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX7_VOL_CTL_B1_CTL			(0x2E6)
+#define TAIKO_A_CDC_RX7_VOL_CTL_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX1_VOL_CTL_B2_CTL			(0x2B7)
+#define TAIKO_A_CDC_RX1_VOL_CTL_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX2_VOL_CTL_B2_CTL			(0x2BF)
+#define TAIKO_A_CDC_RX2_VOL_CTL_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX3_VOL_CTL_B2_CTL			(0x2C7)
+#define TAIKO_A_CDC_RX3_VOL_CTL_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX4_VOL_CTL_B2_CTL			(0x2CF)
+#define TAIKO_A_CDC_RX4_VOL_CTL_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX5_VOL_CTL_B2_CTL			(0x2D7)
+#define TAIKO_A_CDC_RX5_VOL_CTL_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX6_VOL_CTL_B2_CTL			(0x2DF)
+#define TAIKO_A_CDC_RX6_VOL_CTL_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_RX7_VOL_CTL_B2_CTL			(0x2E7)
+#define TAIKO_A_CDC_RX7_VOL_CTL_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_VBAT_CFG			(0x2E8)
+#define TAIKO_A_CDC_VBAT_CFG__POR				(0x1A)
+#define TAIKO_A_CDC_VBAT_ADC_CAL1			(0x2E9)
+#define TAIKO_A_CDC_VBAT_ADC_CAL1__POR				(0x00)
+#define TAIKO_A_CDC_VBAT_ADC_CAL2			(0x2EA)
+#define TAIKO_A_CDC_VBAT_ADC_CAL2__POR				(0x00)
+#define TAIKO_A_CDC_VBAT_ADC_CAL3			(0x2EB)
+#define TAIKO_A_CDC_VBAT_ADC_CAL3__POR				(0x04)
+#define TAIKO_A_CDC_VBAT_PK_EST1			(0x2EC)
+#define TAIKO_A_CDC_VBAT_PK_EST1__POR				(0xE0)
+#define TAIKO_A_CDC_VBAT_PK_EST2			(0x2ED)
+#define TAIKO_A_CDC_VBAT_PK_EST2__POR				(0x01)
+#define TAIKO_A_CDC_VBAT_PK_EST3			(0x2EE)
+#define TAIKO_A_CDC_VBAT_PK_EST3__POR				(0x40)
+#define TAIKO_A_CDC_VBAT_RF_PROC1			(0x2EF)
+#define TAIKO_A_CDC_VBAT_RF_PROC1__POR				(0x2A)
+#define TAIKO_A_CDC_VBAT_RF_PROC2			(0x2F0)
+#define TAIKO_A_CDC_VBAT_RF_PROC2__POR				(0x86)
+#define TAIKO_A_CDC_VBAT_TAC1			(0x2F1)
+#define TAIKO_A_CDC_VBAT_TAC1__POR				(0x70)
+#define TAIKO_A_CDC_VBAT_TAC2			(0x2F2)
+#define TAIKO_A_CDC_VBAT_TAC2__POR				(0x18)
+#define TAIKO_A_CDC_VBAT_TAC3			(0x2F3)
+#define TAIKO_A_CDC_VBAT_TAC3__POR				(0x18)
+#define TAIKO_A_CDC_VBAT_TAC4			(0x2F4)
+#define TAIKO_A_CDC_VBAT_TAC4__POR				(0x03)
+#define TAIKO_A_CDC_VBAT_GAIN_UPD1			(0x2F5)
+#define TAIKO_A_CDC_VBAT_GAIN_UPD1__POR				(0x01)
+#define TAIKO_A_CDC_VBAT_GAIN_UPD2			(0x2F6)
+#define TAIKO_A_CDC_VBAT_GAIN_UPD2__POR				(0x00)
+#define TAIKO_A_CDC_VBAT_GAIN_UPD3			(0x2F7)
+#define TAIKO_A_CDC_VBAT_GAIN_UPD3__POR				(0x64)
+#define TAIKO_A_CDC_VBAT_GAIN_UPD4			(0x2F8)
+#define TAIKO_A_CDC_VBAT_GAIN_UPD4__POR				(0x01)
+#define TAIKO_A_CDC_VBAT_DEBUG1			(0x2F9)
+#define TAIKO_A_CDC_VBAT_DEBUG1__POR				(0x00)
+#define TAIKO_A_CDC_CLK_ANC_RESET_CTL			(0x300)
+#define TAIKO_A_CDC_CLK_ANC_RESET_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CLK_RX_RESET_CTL			(0x301)
+#define TAIKO_A_CDC_CLK_RX_RESET_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CLK_TX_RESET_B1_CTL			(0x302)
+#define TAIKO_A_CDC_CLK_TX_RESET_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CLK_TX_RESET_B2_CTL			(0x303)
+#define TAIKO_A_CDC_CLK_TX_RESET_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CLK_DMIC_B1_CTL			(0x304)
+#define TAIKO_A_CDC_CLK_DMIC_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CLK_DMIC_B2_CTL			(0x305)
+#define TAIKO_A_CDC_CLK_DMIC_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CLK_RX_I2S_CTL			(0x306)
+#define TAIKO_A_CDC_CLK_RX_I2S_CTL__POR				(0x03)
+#define TAIKO_A_CDC_CLK_TX_I2S_CTL			(0x307)
+#define TAIKO_A_CDC_CLK_TX_I2S_CTL__POR				(0x03)
+#define TAIKO_A_CDC_CLK_OTHR_RESET_B1_CTL			(0x308)
+#define TAIKO_A_CDC_CLK_OTHR_RESET_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CLK_OTHR_RESET_B2_CTL			(0x309)
+#define TAIKO_A_CDC_CLK_OTHR_RESET_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CLK_TX_CLK_EN_B1_CTL			(0x30A)
+#define TAIKO_A_CDC_CLK_TX_CLK_EN_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CLK_TX_CLK_EN_B2_CTL			(0x30B)
+#define TAIKO_A_CDC_CLK_TX_CLK_EN_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CLK_OTHR_CTL			(0x30C)
+#define TAIKO_A_CDC_CLK_OTHR_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CLK_RDAC_CLK_EN_CTL			(0x30D)
+#define TAIKO_A_CDC_CLK_RDAC_CLK_EN_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CLK_ANC_CLK_EN_CTL			(0x30E)
+#define TAIKO_A_CDC_CLK_ANC_CLK_EN_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CLK_RX_B1_CTL			(0x30F)
+#define TAIKO_A_CDC_CLK_RX_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CLK_RX_B2_CTL			(0x310)
+#define TAIKO_A_CDC_CLK_RX_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CLK_MCLK_CTL			(0x311)
+#define TAIKO_A_CDC_CLK_MCLK_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CLK_PDM_CTL			(0x312)
+#define TAIKO_A_CDC_CLK_PDM_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CLK_SD_CTL			(0x313)
+#define TAIKO_A_CDC_CLK_SD_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CLK_POWER_CTL			(0x314)
+#define TAIKO_A_CDC_CLK_POWER_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CLSH_B1_CTL			(0x320)
+#define TAIKO_A_CDC_CLSH_B1_CTL__POR				(0xE4)
+#define TAIKO_A_CDC_CLSH_B2_CTL			(0x321)
+#define TAIKO_A_CDC_CLSH_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CLSH_B3_CTL			(0x322)
+#define TAIKO_A_CDC_CLSH_B3_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CLSH_BUCK_NCP_VARS			(0x323)
+#define TAIKO_A_CDC_CLSH_BUCK_NCP_VARS__POR				(0x00)
+#define TAIKO_A_CDC_CLSH_IDLE_HPH_THSD			(0x324)
+#define TAIKO_A_CDC_CLSH_IDLE_HPH_THSD__POR				(0x12)
+#define TAIKO_A_CDC_CLSH_IDLE_EAR_THSD			(0x325)
+#define TAIKO_A_CDC_CLSH_IDLE_EAR_THSD__POR				(0x0C)
+#define TAIKO_A_CDC_CLSH_FCLKONLY_HPH_THSD			(0x326)
+#define TAIKO_A_CDC_CLSH_FCLKONLY_HPH_THSD__POR				(0x18)
+#define TAIKO_A_CDC_CLSH_FCLKONLY_EAR_THSD			(0x327)
+#define TAIKO_A_CDC_CLSH_FCLKONLY_EAR_THSD__POR				(0x23)
+#define TAIKO_A_CDC_CLSH_K_ADDR			(0x328)
+#define TAIKO_A_CDC_CLSH_K_ADDR__POR				(0x00)
+#define TAIKO_A_CDC_CLSH_K_DATA			(0x329)
+#define TAIKO_A_CDC_CLSH_K_DATA__POR				(0xA4)
+#define TAIKO_A_CDC_CLSH_I_PA_FACT_HPH_L			(0x32A)
+#define TAIKO_A_CDC_CLSH_I_PA_FACT_HPH_L__POR				(0xD7)
+#define TAIKO_A_CDC_CLSH_I_PA_FACT_HPH_U			(0x32B)
+#define TAIKO_A_CDC_CLSH_I_PA_FACT_HPH_U__POR				(0x05)
+#define TAIKO_A_CDC_CLSH_I_PA_FACT_EAR_L			(0x32C)
+#define TAIKO_A_CDC_CLSH_I_PA_FACT_EAR_L__POR				(0x60)
+#define TAIKO_A_CDC_CLSH_I_PA_FACT_EAR_U			(0x32D)
+#define TAIKO_A_CDC_CLSH_I_PA_FACT_EAR_U__POR				(0x09)
+#define TAIKO_A_CDC_CLSH_V_PA_HD_EAR			(0x32E)
+#define TAIKO_A_CDC_CLSH_V_PA_HD_EAR__POR				(0x00)
+#define TAIKO_A_CDC_CLSH_V_PA_HD_HPH			(0x32F)
+#define TAIKO_A_CDC_CLSH_V_PA_HD_HPH__POR				(0x00)
+#define TAIKO_A_CDC_CLSH_V_PA_MIN_EAR			(0x330)
+#define TAIKO_A_CDC_CLSH_V_PA_MIN_EAR__POR				(0x00)
+#define TAIKO_A_CDC_CLSH_V_PA_MIN_HPH			(0x331)
+#define TAIKO_A_CDC_CLSH_V_PA_MIN_HPH__POR				(0x00)
+#define TAIKO_A_CDC_IIR1_GAIN_B1_CTL			(0x340)
+#define TAIKO_A_CDC_IIR1_GAIN_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_IIR2_GAIN_B1_CTL			(0x350)
+#define TAIKO_A_CDC_IIR2_GAIN_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_IIR1_GAIN_B2_CTL			(0x341)
+#define TAIKO_A_CDC_IIR1_GAIN_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_IIR2_GAIN_B2_CTL			(0x351)
+#define TAIKO_A_CDC_IIR2_GAIN_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_IIR1_GAIN_B3_CTL			(0x342)
+#define TAIKO_A_CDC_IIR1_GAIN_B3_CTL__POR				(0x00)
+#define TAIKO_A_CDC_IIR2_GAIN_B3_CTL			(0x352)
+#define TAIKO_A_CDC_IIR2_GAIN_B3_CTL__POR				(0x00)
+#define TAIKO_A_CDC_IIR1_GAIN_B4_CTL			(0x343)
+#define TAIKO_A_CDC_IIR1_GAIN_B4_CTL__POR				(0x00)
+#define TAIKO_A_CDC_IIR2_GAIN_B4_CTL			(0x353)
+#define TAIKO_A_CDC_IIR2_GAIN_B4_CTL__POR				(0x00)
+#define TAIKO_A_CDC_IIR1_GAIN_B5_CTL			(0x344)
+#define TAIKO_A_CDC_IIR1_GAIN_B5_CTL__POR				(0x00)
+#define TAIKO_A_CDC_IIR2_GAIN_B5_CTL			(0x354)
+#define TAIKO_A_CDC_IIR2_GAIN_B5_CTL__POR				(0x00)
+#define TAIKO_A_CDC_IIR1_GAIN_B6_CTL			(0x345)
+#define TAIKO_A_CDC_IIR1_GAIN_B6_CTL__POR				(0x00)
+#define TAIKO_A_CDC_IIR2_GAIN_B6_CTL			(0x355)
+#define TAIKO_A_CDC_IIR2_GAIN_B6_CTL__POR				(0x00)
+#define TAIKO_A_CDC_IIR1_GAIN_B7_CTL			(0x346)
+#define TAIKO_A_CDC_IIR1_GAIN_B7_CTL__POR				(0x00)
+#define TAIKO_A_CDC_IIR2_GAIN_B7_CTL			(0x356)
+#define TAIKO_A_CDC_IIR2_GAIN_B7_CTL__POR				(0x00)
+#define TAIKO_A_CDC_IIR1_GAIN_B8_CTL			(0x347)
+#define TAIKO_A_CDC_IIR1_GAIN_B8_CTL__POR				(0x00)
+#define TAIKO_A_CDC_IIR2_GAIN_B8_CTL			(0x357)
+#define TAIKO_A_CDC_IIR2_GAIN_B8_CTL__POR				(0x00)
+#define TAIKO_A_CDC_IIR1_CTL			(0x348)
+#define TAIKO_A_CDC_IIR1_CTL__POR				(0x40)
+#define TAIKO_A_CDC_IIR2_CTL			(0x358)
+#define TAIKO_A_CDC_IIR2_CTL__POR				(0x40)
+#define TAIKO_A_CDC_IIR1_GAIN_TIMER_CTL			(0x349)
+#define TAIKO_A_CDC_IIR1_GAIN_TIMER_CTL__POR				(0x00)
+#define TAIKO_A_CDC_IIR2_GAIN_TIMER_CTL			(0x359)
+#define TAIKO_A_CDC_IIR2_GAIN_TIMER_CTL__POR				(0x00)
+#define TAIKO_A_CDC_IIR1_COEF_B1_CTL			(0x34A)
+#define TAIKO_A_CDC_IIR1_COEF_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_IIR2_COEF_B1_CTL			(0x35A)
+#define TAIKO_A_CDC_IIR2_COEF_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_IIR1_COEF_B2_CTL			(0x34B)
+#define TAIKO_A_CDC_IIR1_COEF_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_IIR2_COEF_B2_CTL			(0x35B)
+#define TAIKO_A_CDC_IIR2_COEF_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_TOP_GAIN_UPDATE			(0x360)
+#define TAIKO_A_CDC_TOP_GAIN_UPDATE__POR				(0x00)
+#define TAIKO_A_CDC_COMP0_B1_CTL			(0x368)
+#define TAIKO_A_CDC_COMP0_B1_CTL__POR				(0x30)
+#define TAIKO_A_CDC_COMP1_B1_CTL			(0x370)
+#define TAIKO_A_CDC_COMP1_B1_CTL__POR				(0x30)
+#define TAIKO_A_CDC_COMP2_B1_CTL			(0x378)
+#define TAIKO_A_CDC_COMP2_B1_CTL__POR				(0x30)
+#define TAIKO_A_CDC_COMP0_B2_CTL			(0x369)
+#define TAIKO_A_CDC_COMP0_B2_CTL__POR				(0xB5)
+#define TAIKO_A_CDC_COMP1_B2_CTL			(0x371)
+#define TAIKO_A_CDC_COMP1_B2_CTL__POR				(0xB5)
+#define TAIKO_A_CDC_COMP2_B2_CTL			(0x379)
+#define TAIKO_A_CDC_COMP2_B2_CTL__POR				(0xB5)
+#define TAIKO_A_CDC_COMP0_B3_CTL			(0x36A)
+#define TAIKO_A_CDC_COMP0_B3_CTL__POR				(0x28)
+#define TAIKO_A_CDC_COMP1_B3_CTL			(0x372)
+#define TAIKO_A_CDC_COMP1_B3_CTL__POR				(0x28)
+#define TAIKO_A_CDC_COMP2_B3_CTL			(0x37A)
+#define TAIKO_A_CDC_COMP2_B3_CTL__POR				(0x28)
+#define TAIKO_A_CDC_COMP0_B4_CTL			(0x36B)
+#define TAIKO_A_CDC_COMP0_B4_CTL__POR				(0x3C)
+#define TAIKO_A_CDC_COMP1_B4_CTL			(0x373)
+#define TAIKO_A_CDC_COMP1_B4_CTL__POR				(0x3C)
+#define TAIKO_A_CDC_COMP2_B4_CTL			(0x37B)
+#define TAIKO_A_CDC_COMP2_B4_CTL__POR				(0x3C)
+#define TAIKO_A_CDC_COMP0_B5_CTL			(0x36C)
+#define TAIKO_A_CDC_COMP0_B5_CTL__POR				(0x1F)
+#define TAIKO_A_CDC_COMP1_B5_CTL			(0x374)
+#define TAIKO_A_CDC_COMP1_B5_CTL__POR				(0x1F)
+#define TAIKO_A_CDC_COMP2_B5_CTL			(0x37C)
+#define TAIKO_A_CDC_COMP2_B5_CTL__POR				(0x1F)
+#define TAIKO_A_CDC_COMP0_B6_CTL			(0x36D)
+#define TAIKO_A_CDC_COMP0_B6_CTL__POR				(0x00)
+#define TAIKO_A_CDC_COMP1_B6_CTL			(0x375)
+#define TAIKO_A_CDC_COMP1_B6_CTL__POR				(0x00)
+#define TAIKO_A_CDC_COMP2_B6_CTL			(0x37D)
+#define TAIKO_A_CDC_COMP2_B6_CTL__POR				(0x00)
+#define TAIKO_A_CDC_COMP0_SHUT_DOWN_STATUS			(0x36E)
+#define TAIKO_A_CDC_COMP0_SHUT_DOWN_STATUS__POR				(0x03)
+#define TAIKO_A_CDC_COMP1_SHUT_DOWN_STATUS			(0x376)
+#define TAIKO_A_CDC_COMP1_SHUT_DOWN_STATUS__POR				(0x03)
+#define TAIKO_A_CDC_COMP2_SHUT_DOWN_STATUS			(0x37E)
+#define TAIKO_A_CDC_COMP2_SHUT_DOWN_STATUS__POR				(0x03)
+#define TAIKO_A_CDC_COMP0_FS_CFG			(0x36F)
+#define TAIKO_A_CDC_COMP0_FS_CFG__POR				(0x03)
+#define TAIKO_A_CDC_COMP1_FS_CFG			(0x377)
+#define TAIKO_A_CDC_COMP1_FS_CFG__POR				(0x03)
+#define TAIKO_A_CDC_COMP2_FS_CFG			(0x37F)
+#define TAIKO_A_CDC_COMP2_FS_CFG__POR				(0x03)
+#define TAIKO_A_CDC_CONN_RX1_B1_CTL			(0x380)
+#define TAIKO_A_CDC_CONN_RX1_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_RX1_B2_CTL			(0x381)
+#define TAIKO_A_CDC_CONN_RX1_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_RX1_B3_CTL			(0x382)
+#define TAIKO_A_CDC_CONN_RX1_B3_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_RX2_B1_CTL			(0x383)
+#define TAIKO_A_CDC_CONN_RX2_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_RX2_B2_CTL			(0x384)
+#define TAIKO_A_CDC_CONN_RX2_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_RX2_B3_CTL			(0x385)
+#define TAIKO_A_CDC_CONN_RX2_B3_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_RX3_B1_CTL			(0x386)
+#define TAIKO_A_CDC_CONN_RX3_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_RX3_B2_CTL			(0x387)
+#define TAIKO_A_CDC_CONN_RX3_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_RX4_B1_CTL			(0x388)
+#define TAIKO_A_CDC_CONN_RX4_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_RX4_B2_CTL			(0x389)
+#define TAIKO_A_CDC_CONN_RX4_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_RX5_B1_CTL			(0x38A)
+#define TAIKO_A_CDC_CONN_RX5_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_RX5_B2_CTL			(0x38B)
+#define TAIKO_A_CDC_CONN_RX5_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_RX6_B1_CTL			(0x38C)
+#define TAIKO_A_CDC_CONN_RX6_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_RX6_B2_CTL			(0x38D)
+#define TAIKO_A_CDC_CONN_RX6_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_RX7_B1_CTL			(0x38E)
+#define TAIKO_A_CDC_CONN_RX7_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_RX7_B2_CTL			(0x38F)
+#define TAIKO_A_CDC_CONN_RX7_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_RX7_B3_CTL			(0x390)
+#define TAIKO_A_CDC_CONN_RX7_B3_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_ANC_B1_CTL			(0x391)
+#define TAIKO_A_CDC_CONN_ANC_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_ANC_B2_CTL			(0x392)
+#define TAIKO_A_CDC_CONN_ANC_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_TX_B1_CTL			(0x393)
+#define TAIKO_A_CDC_CONN_TX_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_TX_B2_CTL			(0x394)
+#define TAIKO_A_CDC_CONN_TX_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_TX_B3_CTL			(0x395)
+#define TAIKO_A_CDC_CONN_TX_B3_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_TX_B4_CTL			(0x396)
+#define TAIKO_A_CDC_CONN_TX_B4_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_EQ1_B1_CTL			(0x397)
+#define TAIKO_A_CDC_CONN_EQ1_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_EQ1_B2_CTL			(0x398)
+#define TAIKO_A_CDC_CONN_EQ1_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_EQ1_B3_CTL			(0x399)
+#define TAIKO_A_CDC_CONN_EQ1_B3_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_EQ1_B4_CTL			(0x39A)
+#define TAIKO_A_CDC_CONN_EQ1_B4_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_EQ2_B1_CTL			(0x39B)
+#define TAIKO_A_CDC_CONN_EQ2_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_EQ2_B2_CTL			(0x39C)
+#define TAIKO_A_CDC_CONN_EQ2_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_EQ2_B3_CTL			(0x39D)
+#define TAIKO_A_CDC_CONN_EQ2_B3_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_EQ2_B4_CTL			(0x39E)
+#define TAIKO_A_CDC_CONN_EQ2_B4_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_SRC1_B1_CTL			(0x39F)
+#define TAIKO_A_CDC_CONN_SRC1_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_SRC1_B2_CTL			(0x3A0)
+#define TAIKO_A_CDC_CONN_SRC1_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_SRC2_B1_CTL			(0x3A1)
+#define TAIKO_A_CDC_CONN_SRC2_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_SRC2_B2_CTL			(0x3A2)
+#define TAIKO_A_CDC_CONN_SRC2_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_TX_SB_B1_CTL			(0x3A3)
+#define TAIKO_A_CDC_CONN_TX_SB_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_TX_SB_B2_CTL			(0x3A4)
+#define TAIKO_A_CDC_CONN_TX_SB_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_TX_SB_B3_CTL			(0x3A5)
+#define TAIKO_A_CDC_CONN_TX_SB_B3_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_TX_SB_B4_CTL			(0x3A6)
+#define TAIKO_A_CDC_CONN_TX_SB_B4_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_TX_SB_B5_CTL			(0x3A7)
+#define TAIKO_A_CDC_CONN_TX_SB_B5_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_TX_SB_B6_CTL			(0x3A8)
+#define TAIKO_A_CDC_CONN_TX_SB_B6_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_TX_SB_B7_CTL			(0x3A9)
+#define TAIKO_A_CDC_CONN_TX_SB_B7_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_TX_SB_B8_CTL			(0x3AA)
+#define TAIKO_A_CDC_CONN_TX_SB_B8_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_TX_SB_B9_CTL			(0x3AB)
+#define TAIKO_A_CDC_CONN_TX_SB_B9_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_TX_SB_B10_CTL			(0x3AC)
+#define TAIKO_A_CDC_CONN_TX_SB_B10_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_TX_SB_B11_CTL			(0x3AD)
+#define TAIKO_A_CDC_CONN_TX_SB_B11_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_RX_SB_B1_CTL			(0x3AE)
+#define TAIKO_A_CDC_CONN_RX_SB_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_RX_SB_B2_CTL			(0x3AF)
+#define TAIKO_A_CDC_CONN_RX_SB_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_CLSH_CTL			(0x3B0)
+#define TAIKO_A_CDC_CONN_CLSH_CTL__POR				(0x00)
+#define TAIKO_A_CDC_CONN_MISC			(0x3B1)
+#define TAIKO_A_CDC_CONN_MISC__POR				(0x01)
+#define TAIKO_A_CDC_CONN_MAD			(0x3B2)
+#define TAIKO_A_CDC_CONN_MAD__POR				(0x01)
+#define TAIKO_A_CDC_MBHC_EN_CTL			(0x3C0)
+#define TAIKO_A_CDC_MBHC_EN_CTL__POR				(0x00)
+#define TAIKO_A_CDC_MBHC_FIR_B1_CFG			(0x3C1)
+#define TAIKO_A_CDC_MBHC_FIR_B1_CFG__POR				(0x00)
+#define TAIKO_A_CDC_MBHC_FIR_B2_CFG			(0x3C2)
+#define TAIKO_A_CDC_MBHC_FIR_B2_CFG__POR				(0x06)
+#define TAIKO_A_CDC_MBHC_TIMER_B1_CTL			(0x3C3)
+#define TAIKO_A_CDC_MBHC_TIMER_B1_CTL__POR				(0x03)
+#define TAIKO_A_CDC_MBHC_TIMER_B2_CTL			(0x3C4)
+#define TAIKO_A_CDC_MBHC_TIMER_B2_CTL__POR				(0x09)
+#define TAIKO_A_CDC_MBHC_TIMER_B3_CTL			(0x3C5)
+#define TAIKO_A_CDC_MBHC_TIMER_B3_CTL__POR				(0x1E)
+#define TAIKO_A_CDC_MBHC_TIMER_B4_CTL			(0x3C6)
+#define TAIKO_A_CDC_MBHC_TIMER_B4_CTL__POR				(0x45)
+#define TAIKO_A_CDC_MBHC_TIMER_B5_CTL			(0x3C7)
+#define TAIKO_A_CDC_MBHC_TIMER_B5_CTL__POR				(0x04)
+#define TAIKO_A_CDC_MBHC_TIMER_B6_CTL			(0x3C8)
+#define TAIKO_A_CDC_MBHC_TIMER_B6_CTL__POR				(0x78)
+#define TAIKO_A_CDC_MBHC_B1_STATUS			(0x3C9)
+#define TAIKO_A_CDC_MBHC_B1_STATUS__POR				(0x00)
+#define TAIKO_A_CDC_MBHC_B2_STATUS			(0x3CA)
+#define TAIKO_A_CDC_MBHC_B2_STATUS__POR				(0x00)
+#define TAIKO_A_CDC_MBHC_B3_STATUS			(0x3CB)
+#define TAIKO_A_CDC_MBHC_B3_STATUS__POR				(0x00)
+#define TAIKO_A_CDC_MBHC_B4_STATUS			(0x3CC)
+#define TAIKO_A_CDC_MBHC_B4_STATUS__POR				(0x00)
+#define TAIKO_A_CDC_MBHC_B5_STATUS			(0x3CD)
+#define TAIKO_A_CDC_MBHC_B5_STATUS__POR				(0x00)
+#define TAIKO_A_CDC_MBHC_B1_CTL			(0x3CE)
+#define TAIKO_A_CDC_MBHC_B1_CTL__POR				(0xC0)
+#define TAIKO_A_CDC_MBHC_B2_CTL			(0x3CF)
+#define TAIKO_A_CDC_MBHC_B2_CTL__POR				(0x5D)
+#define TAIKO_A_CDC_MBHC_VOLT_B1_CTL			(0x3D0)
+#define TAIKO_A_CDC_MBHC_VOLT_B1_CTL__POR				(0x00)
+#define TAIKO_A_CDC_MBHC_VOLT_B2_CTL			(0x3D1)
+#define TAIKO_A_CDC_MBHC_VOLT_B2_CTL__POR				(0x00)
+#define TAIKO_A_CDC_MBHC_VOLT_B3_CTL			(0x3D2)
+#define TAIKO_A_CDC_MBHC_VOLT_B3_CTL__POR				(0x00)
+#define TAIKO_A_CDC_MBHC_VOLT_B4_CTL			(0x3D3)
+#define TAIKO_A_CDC_MBHC_VOLT_B4_CTL__POR				(0x00)
+#define TAIKO_A_CDC_MBHC_VOLT_B5_CTL			(0x3D4)
+#define TAIKO_A_CDC_MBHC_VOLT_B5_CTL__POR				(0x00)
+#define TAIKO_A_CDC_MBHC_VOLT_B6_CTL			(0x3D5)
+#define TAIKO_A_CDC_MBHC_VOLT_B6_CTL__POR				(0x00)
+#define TAIKO_A_CDC_MBHC_VOLT_B7_CTL			(0x3D6)
+#define TAIKO_A_CDC_MBHC_VOLT_B7_CTL__POR				(0xFF)
+#define TAIKO_A_CDC_MBHC_VOLT_B8_CTL			(0x3D7)
+#define TAIKO_A_CDC_MBHC_VOLT_B8_CTL__POR				(0x07)
+#define TAIKO_A_CDC_MBHC_VOLT_B9_CTL			(0x3D8)
+#define TAIKO_A_CDC_MBHC_VOLT_B9_CTL__POR				(0xFF)
+#define TAIKO_A_CDC_MBHC_VOLT_B10_CTL			(0x3D9)
+#define TAIKO_A_CDC_MBHC_VOLT_B10_CTL__POR				(0x7F)
+#define TAIKO_A_CDC_MBHC_VOLT_B11_CTL			(0x3DA)
+#define TAIKO_A_CDC_MBHC_VOLT_B11_CTL__POR				(0x00)
+#define TAIKO_A_CDC_MBHC_VOLT_B12_CTL			(0x3DB)
+#define TAIKO_A_CDC_MBHC_VOLT_B12_CTL__POR				(0x80)
+#define TAIKO_A_CDC_MBHC_CLK_CTL			(0x3DC)
+#define TAIKO_A_CDC_MBHC_CLK_CTL__POR				(0x00)
+#define TAIKO_A_CDC_MBHC_INT_CTL			(0x3DD)
+#define TAIKO_A_CDC_MBHC_INT_CTL__POR				(0x00)
+#define TAIKO_A_CDC_MBHC_DEBUG_CTL			(0x3DE)
+#define TAIKO_A_CDC_MBHC_DEBUG_CTL__POR				(0x00)
+#define TAIKO_A_CDC_MBHC_SPARE			(0x3DF)
+#define TAIKO_A_CDC_MBHC_SPARE__POR				(0x00)
+#define TAIKO_A_CDC_MAD_MAIN_CTL_1			(0x3E0)
+#define TAIKO_A_CDC_MAD_MAIN_CTL_1__POR				(0x00)
+#define TAIKO_A_CDC_MAD_MAIN_CTL_2			(0x3E1)
+#define TAIKO_A_CDC_MAD_MAIN_CTL_2__POR				(0x00)
+#define TAIKO_A_CDC_MAD_AUDIO_CTL_1			(0x3E2)
+#define TAIKO_A_CDC_MAD_AUDIO_CTL_1__POR				(0x00)
+#define TAIKO_A_CDC_MAD_AUDIO_CTL_2			(0x3E3)
+#define TAIKO_A_CDC_MAD_AUDIO_CTL_2__POR				(0x00)
+#define TAIKO_A_CDC_MAD_AUDIO_CTL_3			(0x3E4)
+#define TAIKO_A_CDC_MAD_AUDIO_CTL_3__POR				(0x00)
+#define TAIKO_A_CDC_MAD_AUDIO_CTL_4			(0x3E5)
+#define TAIKO_A_CDC_MAD_AUDIO_CTL_4__POR				(0x00)
+#define TAIKO_A_CDC_MAD_AUDIO_CTL_5			(0x3E6)
+#define TAIKO_A_CDC_MAD_AUDIO_CTL_5__POR				(0x00)
+#define TAIKO_A_CDC_MAD_AUDIO_CTL_6			(0x3E7)
+#define TAIKO_A_CDC_MAD_AUDIO_CTL_6__POR				(0x00)
+#define TAIKO_A_CDC_MAD_AUDIO_CTL_7			(0x3E8)
+#define TAIKO_A_CDC_MAD_AUDIO_CTL_7__POR				(0x00)
+#define TAIKO_A_CDC_MAD_AUDIO_CTL_8			(0x3E9)
+#define TAIKO_A_CDC_MAD_AUDIO_CTL_8__POR				(0x00)
+#define TAIKO_A_CDC_MAD_AUDIO_IIR_CTL_PTR			(0x3EA)
+#define TAIKO_A_CDC_MAD_AUDIO_IIR_CTL_PTR__POR				(0x00)
+#define TAIKO_A_CDC_MAD_AUDIO_IIR_CTL_VAL			(0x3EB)
+#define TAIKO_A_CDC_MAD_AUDIO_IIR_CTL_VAL__POR				(0x40)
+#define TAIKO_A_CDC_MAD_ULTR_CTL_1			(0x3EC)
+#define TAIKO_A_CDC_MAD_ULTR_CTL_1__POR				(0x00)
+#define TAIKO_A_CDC_MAD_ULTR_CTL_2			(0x3ED)
+#define TAIKO_A_CDC_MAD_ULTR_CTL_2__POR				(0x00)
+#define TAIKO_A_CDC_MAD_ULTR_CTL_3			(0x3EE)
+#define TAIKO_A_CDC_MAD_ULTR_CTL_3__POR				(0x00)
+#define TAIKO_A_CDC_MAD_ULTR_CTL_4			(0x3EF)
+#define TAIKO_A_CDC_MAD_ULTR_CTL_4__POR				(0x00)
+#define TAIKO_A_CDC_MAD_ULTR_CTL_5			(0x3F0)
+#define TAIKO_A_CDC_MAD_ULTR_CTL_5__POR				(0x00)
+#define TAIKO_A_CDC_MAD_ULTR_CTL_6			(0x3F1)
+#define TAIKO_A_CDC_MAD_ULTR_CTL_6__POR				(0x00)
+#define TAIKO_A_CDC_MAD_ULTR_CTL_7			(0x3F2)
+#define TAIKO_A_CDC_MAD_ULTR_CTL_7__POR				(0x00)
+#define TAIKO_A_CDC_MAD_BEACON_CTL_1			(0x3F3)
+#define TAIKO_A_CDC_MAD_BEACON_CTL_1__POR				(0x00)
+#define TAIKO_A_CDC_MAD_BEACON_CTL_2			(0x3F4)
+#define TAIKO_A_CDC_MAD_BEACON_CTL_2__POR				(0x00)
+#define TAIKO_A_CDC_MAD_BEACON_CTL_3			(0x3F5)
+#define TAIKO_A_CDC_MAD_BEACON_CTL_3__POR				(0x00)
+#define TAIKO_A_CDC_MAD_BEACON_CTL_4			(0x3F6)
+#define TAIKO_A_CDC_MAD_BEACON_CTL_4__POR				(0x00)
+#define TAIKO_A_CDC_MAD_BEACON_CTL_5			(0x3F7)
+#define TAIKO_A_CDC_MAD_BEACON_CTL_5__POR				(0x00)
+#define TAIKO_A_CDC_MAD_BEACON_CTL_6			(0x3F8)
+#define TAIKO_A_CDC_MAD_BEACON_CTL_6__POR				(0x00)
+#define TAIKO_A_CDC_MAD_BEACON_CTL_7			(0x3F9)
+#define TAIKO_A_CDC_MAD_BEACON_CTL_7__POR				(0x00)
+#define TAIKO_A_CDC_MAD_BEACON_CTL_8			(0x3FA)
+#define TAIKO_A_CDC_MAD_BEACON_CTL_8__POR				(0x00)
+#define TAIKO_A_CDC_MAD_BEACON_IIR_CTL_PTR			(0x3FB)
+#define TAIKO_A_CDC_MAD_BEACON_IIR_CTL_PTR__POR				(0x00)
+#define TAIKO_A_CDC_MAD_BEACON_IIR_CTL_VAL			(0x3FC)
+#define TAIKO_A_CDC_MAD_BEACON_IIR_CTL_VAL__POR				(0x00)
+
+/* Taiko v2+ registers */
+#define TAIKO_A_CDC_TX_1_GAIN			(0x153)
+#define TAIKO_A_CDC_TX_1_GAIN__POR			(0x02)
+#define TAIKO_A_CDC_TX_2_GAIN			(0x155)
+#define TAIKO_A_CDC_TX_2_GAIN__POR			(0x02)
+#define TAIKO_A_CDC_TX_1_2_ADC_IB		(0x156)
+#define TAIKO_A_CDC_TX_1_2_ADC_IB__POR			(0x44)
+#define TAIKO_A_CDC_TX_3_GAIN			(0x15D)
+#define TAIKO_A_CDC_TX_3_GAIN__POR			(0x02)
+#define TAIKO_A_CDC_TX_4_GAIN			(0x15F)
+#define TAIKO_A_CDC_TX_4_GAIN__POR			(0x02)
+#define TAIKO_A_CDC_TX_3_4_ADC_IB		(0x160)
+#define TAIKO_A_CDC_TX_3_4_ADC_IB__POR			(0x44)
+#define TAIKO_A_CDC_TX_5_GAIN			(0x167)
+#define TAIKO_A_CDC_TX_5_GAIN__POR			(0x02)
+#define TAIKO_A_CDC_TX_6_GAIN			(0x169)
+#define TAIKO_A_CDC_TX_6_GAIN__POR			(0x02)
+#define TAIKO_A_CDC_TX_5_6_ADC_IB		(0x16A)
+#define TAIKO_A_CDC_TX_5_6_ADC_IB__POR			(0x44)
+#define TAIKO_A_CDC_SPKR_CLIPDET_VAL0		(0x270)
+#define TAIKO_A_CDC_SPKR_CLIPDET_VAL0__POR		(0x00)
+#define TAIKO_A_CDC_SPKR_CLIPDET_VAL1		(0x271)
+#define TAIKO_A_CDC_SPKR_CLIPDET_VAL1__POR		(0x00)
+#define TAIKO_A_CDC_SPKR_CLIPDET_VAL2		(0x272)
+#define TAIKO_A_CDC_SPKR_CLIPDET_VAL2__POR		(0x00)
+#define TAIKO_A_CDC_SPKR_CLIPDET_VAL3		(0x273)
+#define TAIKO_A_CDC_SPKR_CLIPDET_VAL3__POR		(0x00)
+#define TAIKO_A_CDC_SPKR_CLIPDET_VAL4		(0x274)
+#define TAIKO_A_CDC_SPKR_CLIPDET_VAL4__POR		(0x00)
+#define TAIKO_A_CDC_SPKR_CLIPDET_VAL5		(0x275)
+#define TAIKO_A_CDC_SPKR_CLIPDET_VAL5__POR		(0x00)
+#define TAIKO_A_CDC_SPKR_CLIPDET_VAL6		(0x276)
+#define TAIKO_A_CDC_SPKR_CLIPDET_VAL6__POR		(0x00)
+#define TAIKO_A_CDC_SPKR_CLIPDET_VAL7		(0x277)
+#define TAIKO_A_CDC_SPKR_CLIPDET_VAL7__POR		(0x00)
+#define TAIKO_A_CDC_VBAT_GAIN_UPD_MON		(0x2FA)
+#define TAIKO_A_CDC_VBAT_GAIN_UPD_MON__POR		(0x00)
+#define TAIKO_A_CDC_VBAT_GAIN_MON_VAL		(0x2FB)
+#define TAIKO_A_CDC_VBAT_GAIN_MON_VAL__POR		(0x00)
+#define TAIKO_A_CDC_PA_RAMP_B1_CTL		(0x361)
+#define TAIKO_A_CDC_PA_RAMP_B1_CTL__POR			(0x00)
+#define TAIKO_A_CDC_PA_RAMP_B2_CTL		(0x362)
+#define TAIKO_A_CDC_PA_RAMP_B2_CTL__POR			(0x00)
+#define TAIKO_A_CDC_PA_RAMP_B3_CTL		(0x363)
+#define TAIKO_A_CDC_PA_RAMP_B3_CTL__POR			(0x00)
+#define TAIKO_A_CDC_PA_RAMP_B4_CTL		(0x364)
+#define TAIKO_A_CDC_PA_RAMP_B4_CTL__POR			(0x00)
+#define TAIKO_A_CDC_SPKR_CLIPDET_B1_CTL		(0x365)
+#define TAIKO_A_CDC_SPKR_CLIPDET_B1_CTL__POR		(0x00)
+
+/* SLIMBUS Slave Registers */
+#define TAIKO_SLIM_PGD_PORT_INT_EN0                     (0x30)
+#define TAIKO_SLIM_PGD_PORT_INT_STATUS_RX_0             (0x34)
+#define TAIKO_SLIM_PGD_PORT_INT_STATUS_RX_1             (0x35)
+#define TAIKO_SLIM_PGD_PORT_INT_STATUS_TX_0             (0x36)
+#define TAIKO_SLIM_PGD_PORT_INT_STATUS_TX_1             (0x37)
+#define TAIKO_SLIM_PGD_PORT_INT_CLR_RX_0                (0x38)
+#define TAIKO_SLIM_PGD_PORT_INT_CLR_RX_1                (0x39)
+#define TAIKO_SLIM_PGD_PORT_INT_CLR_TX_0                (0x3A)
+#define TAIKO_SLIM_PGD_PORT_INT_CLR_TX_1                (0x3B)
+#define TAIKO_SLIM_PGD_PORT_INT_RX_SOURCE0		(0x60)
+#define TAIKO_SLIM_PGD_PORT_INT_TX_SOURCE0		(0x70)
+
+/* Macros for Packing Register Writes into a U32 */
+#define TAIKO_PACKED_REG_SIZE sizeof(u32)
+
+#define TAIKO_CODEC_PACK_ENTRY(reg, mask, val) ((val & 0xff)|\
+	((mask & 0xff) << 8)|((reg & 0xffff) << 16))
+
+#define TAIKO_CODEC_UNPACK_ENTRY(packed, reg, mask, val) \
+	do { \
+		((reg) = ((packed >> 16) & (0xffff))); \
+		((mask) = ((packed >> 8) & (0xff))); \
+		((val) = ((packed) & (0xff))); \
+	} while (0);
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/uapi/linux/mfd./wcd9xxx/wcd9xxx_registers.h linux-4.4.115-fbx/include/uapi/linux/mfd/wcd9xxx/wcd9xxx_registers.h
--- linux-4.4.115-fbx/include/uapi/linux/mfd./wcd9xxx/wcd9xxx_registers.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/mfd/wcd9xxx/wcd9xxx_registers.h	2019-01-22 16:16:28.563292228 +0100
@@ -0,0 +1,361 @@
+#ifndef WCD9XXX_CODEC_DIGITAL_H
+
+#define WCD9XXX_CODEC_DIGITAL_H
+
+#define WCD9XXX_A_CHIP_CTL			(0x00)
+#define WCD9XXX_A_CHIP_CTL__POR			(0x00000000)
+#define WCD9XXX_A_CHIP_STATUS			(0x01)
+#define WCD9XXX_A_CHIP_STATUS__POR		(0x00000000)
+#define WCD9XXX_A_CHIP_ID_BYTE_0		(0x04)
+#define WCD9XXX_A_CHIP_ID_BYTE_0__POR		(0x00000000)
+#define WCD9XXX_A_CHIP_ID_BYTE_1		(0x05)
+#define WCD9XXX_A_CHIP_ID_BYTE_1__POR		(0x00000000)
+#define WCD9XXX_A_CHIP_ID_BYTE_2		(0x06)
+#define WCD9XXX_A_CHIP_ID_BYTE_2__POR		(0x00000000)
+#define WCD9XXX_A_CHIP_ID_BYTE_3		(0x07)
+#define WCD9XXX_A_CHIP_ID_BYTE_3__POR		(0x00000001)
+#define WCD9XXX_A_CHIP_VERSION			(0x08)
+#define WCD9XXX_A_CHIP_VERSION__POR		(0x00000020)
+#define WCD9XXX_A_SB_VERSION			(0x09)
+#define WCD9XXX_A_SB_VERSION__POR		(0x00000010)
+#define WCD9XXX_A_SLAVE_ID_1			(0x0C)
+#define WCD9XXX_A_SLAVE_ID_1__POR		(0x00000077)
+#define WCD9XXX_A_SLAVE_ID_2			(0x0D)
+#define WCD9XXX_A_SLAVE_ID_2__POR		(0x00000066)
+#define WCD9XXX_A_SLAVE_ID_3			(0x0E)
+#define WCD9XXX_A_SLAVE_ID_3__POR		(0x00000055)
+#define WCD9XXX_A_CDC_CTL			(0x80)
+#define WCD9XXX_A_CDC_CTL__POR			(0x00000000)
+#define WCD9XXX_A_LEAKAGE_CTL			(0x88)
+#define WCD9XXX_A_LEAKAGE_CTL__POR		(0x00000004)
+#define WCD9XXX_A_INTR_MODE			(0x90)
+#define WCD9XXX_A_INTR_MASK0			(0x94)
+#define WCD9XXX_A_INTR_STATUS0			(0x98)
+#define WCD9XXX_A_INTR_CLEAR0			(0x9C)
+#define WCD9XXX_A_INTR_LEVEL0			(0xA0)
+#define WCD9XXX_A_INTR_LEVEL1			(0xA1)
+#define WCD9XXX_A_INTR_LEVEL2			(0xA2)
+#define WCD9XXX_A_RX_HPH_CNP_EN			(0x1AB)
+#define WCD9XXX_A_RX_HPH_CNP_EN__POR		(0x80)
+#define WCD9XXX_A_RX_HPH_CNP_EN			(0x1AB)
+#define WCD9XXX_A_RX_HPH_CNP_EN__POR		(0x80)
+#define WCD9XXX_A_BIAS_CENTRAL_BG_CTL		(0x101)
+#define WCD9XXX_A_BIAS_CENTRAL_BG_CTL__POR	(0x50)
+#define WCD9XXX_A_CLK_BUFF_EN1			(0x108)
+#define WCD9XXX_A_CLK_BUFF_EN1__POR		(0x04)
+#define WCD9XXX_A_CLK_BUFF_EN2			(0x109)
+#define WCD9XXX_A_CLK_BUFF_EN2__POR		(0x02)
+#define WCD9XXX_A_RX_COM_BIAS			(0x1A2)
+#define WCD9XXX_A_RX_COM_BIAS__POR		(0x00)
+#define WCD9XXX_A_RC_OSC_FREQ			(0x1FA)
+#define WCD9XXX_A_RC_OSC_FREQ__POR		(0x46)
+#define WCD9XXX_A_BIAS_OSC_BG_CTL		(0x105)
+#define WCD9XXX_A_BIAS_OSC_BG_CTL__POR		(0x16)
+#define WCD9XXX_A_RC_OSC_TEST			(0x1FB)
+#define WCD9XXX_A_RC_OSC_TEST__POR		(0x0A)
+#define WCD9XXX_A_CDC_CLK_MCLK_CTL		(0x311)
+#define WCD9XXX_A_CDC_CLK_MCLK_CTL__POR		(0x00)
+
+#define WCD9XXX_A_CDC_MBHC_EN_CTL		(0x3C0)
+#define WCD9XXX_A_CDC_MBHC_EN_CTL__POR		(0x00)
+#define WCD9XXX_A_CDC_MBHC_FIR_B1_CFG		(0x3C1)
+#define WCD9XXX_A_CDC_MBHC_FIR_B1_CFG__POR	(0x00)
+#define WCD9XXX_A_CDC_MBHC_FIR_B2_CFG		(0x3C2)
+#define WCD9XXX_A_CDC_MBHC_FIR_B2_CFG__POR	(0x06)
+#define WCD9XXX_A_CDC_MBHC_TIMER_B1_CTL		(0x3C3)
+#define WCD9XXX_A_CDC_MBHC_TIMER_B1_CTL__POR	(0x03)
+#define WCD9XXX_A_CDC_MBHC_TIMER_B2_CTL		(0x3C4)
+#define WCD9XXX_A_CDC_MBHC_TIMER_B2_CTL__POR	(0x09)
+#define WCD9XXX_A_CDC_MBHC_TIMER_B3_CTL		(0x3C5)
+#define WCD9XXX_A_CDC_MBHC_TIMER_B3_CTL__POR	(0x1E)
+#define WCD9XXX_A_CDC_MBHC_TIMER_B4_CTL		(0x3C6)
+#define WCD9XXX_A_CDC_MBHC_TIMER_B4_CTL__POR	(0x45)
+#define WCD9XXX_A_CDC_MBHC_TIMER_B5_CTL		(0x3C7)
+#define WCD9XXX_A_CDC_MBHC_TIMER_B5_CTL__POR	(0x04)
+#define WCD9XXX_A_CDC_MBHC_TIMER_B6_CTL		(0x3C8)
+#define WCD9XXX_A_CDC_MBHC_TIMER_B6_CTL__POR	(0x78)
+#define WCD9XXX_A_CDC_MBHC_B1_STATUS		(0x3C9)
+#define WCD9XXX_A_CDC_MBHC_B1_STATUS__POR	(0x00)
+#define WCD9XXX_A_CDC_MBHC_B2_STATUS		(0x3CA)
+#define WCD9XXX_A_CDC_MBHC_B2_STATUS__POR	(0x00)
+#define WCD9XXX_A_CDC_MBHC_B3_STATUS		(0x3CB)
+#define WCD9XXX_A_CDC_MBHC_B3_STATUS__POR	(0x00)
+#define WCD9XXX_A_CDC_MBHC_B4_STATUS		(0x3CC)
+#define WCD9XXX_A_CDC_MBHC_B4_STATUS__POR	(0x00)
+#define WCD9XXX_A_CDC_MBHC_B5_STATUS		(0x3CD)
+#define WCD9XXX_A_CDC_MBHC_B5_STATUS__POR	(0x00)
+#define WCD9XXX_A_CDC_MBHC_B1_CTL		(0x3CE)
+#define WCD9XXX_A_CDC_MBHC_B1_CTL__POR		(0xC0)
+#define WCD9XXX_A_CDC_MBHC_B2_CTL		(0x3CF)
+#define WCD9XXX_A_CDC_MBHC_B2_CTL__POR		(0x5D)
+#define WCD9XXX_A_CDC_MBHC_VOLT_B1_CTL		(0x3D0)
+#define WCD9XXX_A_CDC_MBHC_VOLT_B1_CTL__POR	(0x00)
+#define WCD9XXX_A_CDC_MBHC_VOLT_B2_CTL		(0x3D1)
+#define WCD9XXX_A_CDC_MBHC_VOLT_B2_CTL__POR	(0x00)
+#define WCD9XXX_A_CDC_MBHC_VOLT_B3_CTL		(0x3D2)
+#define WCD9XXX_A_CDC_MBHC_VOLT_B3_CTL__POR	(0x00)
+#define WCD9XXX_A_CDC_MBHC_VOLT_B4_CTL		(0x3D3)
+#define WCD9XXX_A_CDC_MBHC_VOLT_B4_CTL__POR	(0x00)
+#define WCD9XXX_A_CDC_MBHC_VOLT_B5_CTL		(0x3D4)
+#define WCD9XXX_A_CDC_MBHC_VOLT_B5_CTL__POR	(0x00)
+#define WCD9XXX_A_CDC_MBHC_VOLT_B6_CTL		(0x3D5)
+#define WCD9XXX_A_CDC_MBHC_VOLT_B6_CTL__POR	(0x00)
+#define WCD9XXX_A_CDC_MBHC_VOLT_B7_CTL		(0x3D6)
+#define WCD9XXX_A_CDC_MBHC_VOLT_B7_CTL__POR	(0xFF)
+#define WCD9XXX_A_CDC_MBHC_VOLT_B8_CTL		(0x3D7)
+#define WCD9XXX_A_CDC_MBHC_VOLT_B8_CTL__POR	(0x07)
+#define WCD9XXX_A_CDC_MBHC_VOLT_B9_CTL		(0x3D8)
+#define WCD9XXX_A_CDC_MBHC_VOLT_B9_CTL__POR	(0xFF)
+#define WCD9XXX_A_CDC_MBHC_VOLT_B10_CTL		(0x3D9)
+#define WCD9XXX_A_CDC_MBHC_VOLT_B10_CTL__POR	(0x7F)
+#define WCD9XXX_A_CDC_MBHC_VOLT_B11_CTL		(0x3DA)
+#define WCD9XXX_A_CDC_MBHC_VOLT_B11_CTL__POR	(0x00)
+#define WCD9XXX_A_CDC_MBHC_VOLT_B12_CTL		(0x3DB)
+#define WCD9XXX_A_CDC_MBHC_VOLT_B12_CTL__POR	(0x80)
+#define WCD9XXX_A_CDC_MBHC_CLK_CTL		(0x3DC)
+#define WCD9XXX_A_CDC_MBHC_CLK_CTL__POR		(0x00)
+#define WCD9XXX_A_CDC_MBHC_INT_CTL		(0x3DD)
+#define WCD9XXX_A_CDC_MBHC_INT_CTL__POR		(0x00)
+#define WCD9XXX_A_CDC_MBHC_DEBUG_CTL		(0x3DE)
+#define WCD9XXX_A_CDC_MBHC_DEBUG_CTL__POR	(0x00)
+#define WCD9XXX_A_CDC_MBHC_SPARE		(0x3DF)
+#define WCD9XXX_A_CDC_MBHC_SPARE__POR		(0x00)
+#define WCD9XXX_A_MBHC_SCALING_MUX_1		(0x14E)
+#define WCD9XXX_A_MBHC_SCALING_MUX_1__POR	(0x00)
+#define WCD9XXX_A_RX_HPH_OCP_CTL		(0x1AA)
+#define WCD9XXX_A_RX_HPH_OCP_CTL__POR		(0x68)
+#define WCD9XXX_A_MICB_1_CTL			(0x12B)
+#define WCD9XXX_A_MICB_1_CTL__POR		(0x16)
+#define WCD9XXX_A_MICB_1_INT_RBIAS		(0x12C)
+#define WCD9XXX_A_MICB_1_INT_RBIAS__POR		(0x24)
+#define WCD9XXX_A_MICB_1_MBHC			(0x12D)
+#define WCD9XXX_A_MICB_1_MBHC__POR		(0x01)
+#define WCD9XXX_A_MICB_CFILT_2_CTL		(0x12E)
+#define WCD9XXX_A_MICB_CFILT_2_CTL__POR		(0x40)
+#define WCD9XXX_A_MICB_CFILT_2_VAL		(0x12F)
+#define WCD9XXX_A_MICB_CFILT_2_VAL__POR		(0x80)
+#define WCD9XXX_A_MICB_CFILT_2_PRECHRG		(0x130)
+#define WCD9XXX_A_MICB_CFILT_2_PRECHRG__POR	(0x38)
+#define WCD9XXX_A_MICB_2_CTL			(0x131)
+#define WCD9XXX_A_MICB_2_CTL__POR		(0x16)
+#define WCD9XXX_A_MICB_2_INT_RBIAS		(0x132)
+#define WCD9XXX_A_MICB_2_INT_RBIAS__POR		(0x24)
+#define WCD9XXX_A_MICB_2_MBHC			(0x133)
+#define WCD9XXX_A_MICB_2_MBHC__POR		(0x02)
+#define WCD9XXX_A_MICB_CFILT_3_CTL		(0x134)
+#define WCD9XXX_A_MICB_CFILT_3_CTL__POR		(0x40)
+#define WCD9XXX_A_MICB_CFILT_3_VAL		(0x135)
+#define WCD9XXX_A_MICB_CFILT_3_VAL__POR		(0x80)
+#define WCD9XXX_A_MICB_CFILT_3_PRECHRG		(0x136)
+#define WCD9XXX_A_MICB_CFILT_3_PRECHRG__POR	(0x38)
+#define WCD9XXX_A_MICB_3_CTL			(0x137)
+#define WCD9XXX_A_MICB_3_CTL__POR		(0x16)
+#define WCD9XXX_A_MICB_3_INT_RBIAS		(0x138)
+#define WCD9XXX_A_MICB_3_INT_RBIAS__POR		(0x24)
+#define WCD9XXX_A_MICB_3_MBHC			(0x139)
+#define WCD9XXX_A_MICB_3_MBHC__POR		(0x00)
+#define WCD9XXX_A_MICB_4_CTL			(0x13D)
+#define WCD9XXX_A_MICB_4_CTL__POR		(0x16)
+#define WCD9XXX_A_MICB_4_INT_RBIAS		(0x13E)
+#define WCD9XXX_A_MICB_4_INT_RBIAS__POR		(0x24)
+#define WCD9XXX_A_MICB_4_MBHC			(0x13F)
+#define WCD9XXX_A_MICB_4_MBHC__POR		(0x01)
+#define WCD9XXX_A_MICB_CFILT_1_VAL		(0x129)
+#define WCD9XXX_A_MICB_CFILT_1_VAL__POR		(0x80)
+#define WCD9XXX_A_RX_HPH_L_STATUS		(0x1B3)
+#define WCD9XXX_A_RX_HPH_L_STATUS__POR		(0x00)
+#define WCD9XXX_A_MBHC_HPH			(0x1FE)
+#define WCD9XXX_A_MBHC_HPH__POR			(0x44)
+#define WCD9XXX_A_RX_HPH_CNP_WG_TIME		(0x1AD)
+#define WCD9XXX_A_RX_HPH_CNP_WG_TIME__POR	(0x2A)
+#define WCD9XXX_A_RX_HPH_R_DAC_CTL		(0x1B7)
+#define WCD9XXX_A_RX_HPH_R_DAC_CTL__POR		(0x00)
+#define WCD9XXX_A_RX_HPH_L_DAC_CTL		(0x1B1)
+#define WCD9XXX_A_RX_HPH_L_DAC_CTL__POR		(0x00)
+#define WCD9XXX_A_TX_7_MBHC_EN			(0x171)
+#define WCD9XXX_A_TX_7_MBHC_EN__POR		(0x0C)
+#define WCD9XXX_A_PIN_CTL_OE0			(0x010)
+#define WCD9XXX_A_PIN_CTL_OE0__POR		(0x00)
+#define WCD9XXX_A_PIN_CTL_OE1			(0x011)
+#define WCD9XXX_A_PIN_CTL_OE1__POR		(0x00)
+#define WCD9XXX_A_MICB_CFILT_1_CTL		(0x128)
+#define WCD9XXX_A_LDO_H_MODE_1			(0x110)
+#define WCD9XXX_A_LDO_H_MODE_1__POR		(0x65)
+#define WCD9XXX_A_MICB_CFILT_1_CTL__POR		(0x40)
+#define WCD9XXX_A_TX_7_MBHC_TEST_CTL		(0x174)
+#define WCD9XXX_A_TX_7_MBHC_TEST_CTL__POR	(0x38)
+#define WCD9XXX_A_MBHC_SCALING_MUX_2		(0x14F)
+#define WCD9XXX_A_MBHC_SCALING_MUX_2__POR	(0x80)
+#define WCD9XXX_A_TX_COM_BIAS			(0x14C)
+#define WCD9XXX_A_TX_COM_BIAS__POR		(0xF0)
+
+#define WCD9XXX_A_MBHC_INSERT_DETECT		(0x14A) /* TAIKO and later */
+#define WCD9XXX_A_MBHC_INSERT_DETECT__POR	(0x00)
+#define WCD9XXX_A_MBHC_INSERT_DET_STATUS	(0x14B) /* TAIKO and later */
+#define WCD9XXX_A_MBHC_INSERT_DET_STATUS__POR	(0x00)
+#define WCD9XXX_A_MAD_ANA_CTRL			(0x150)
+#define WCD9XXX_A_MAD_ANA_CTRL__POR		(0xF1)
+
+
+#define WCD9XXX_A_CDC_CLK_OTHR_CTL			(0x30C)
+#define WCD9XXX_A_CDC_CLK_OTHR_CTL__POR				(0x00)
+
+/* Class H related common registers */
+#define WCD9XXX_A_BUCK_MODE_1			(0x181)
+#define WCD9XXX_A_BUCK_MODE_1__POR				(0x21)
+#define WCD9XXX_A_BUCK_MODE_2			(0x182)
+#define WCD9XXX_A_BUCK_MODE_2__POR				(0xFF)
+#define WCD9XXX_A_BUCK_MODE_3			(0x183)
+#define WCD9XXX_A_BUCK_MODE_3__POR				(0xCC)
+#define WCD9XXX_A_BUCK_MODE_4			(0x184)
+#define WCD9XXX_A_BUCK_MODE_4__POR				(0x3A)
+#define WCD9XXX_A_BUCK_MODE_5			(0x185)
+#define WCD9XXX_A_BUCK_MODE_5__POR				(0x00)
+#define WCD9XXX_A_BUCK_CTRL_VCL_1			(0x186)
+#define WCD9XXX_A_BUCK_CTRL_VCL_1__POR				(0x48)
+#define WCD9XXX_A_BUCK_CTRL_VCL_2			(0x187)
+#define WCD9XXX_A_BUCK_CTRL_VCL_2__POR				(0xA3)
+#define WCD9XXX_A_BUCK_CTRL_VCL_3			(0x188)
+#define WCD9XXX_A_BUCK_CTRL_VCL_3__POR				(0x82)
+#define WCD9XXX_A_BUCK_CTRL_CCL_1			(0x189)
+#define WCD9XXX_A_BUCK_CTRL_CCL_1__POR				(0xAB)
+#define WCD9XXX_A_BUCK_CTRL_CCL_2			(0x18A)
+#define WCD9XXX_A_BUCK_CTRL_CCL_2__POR				(0xDC)
+#define WCD9XXX_A_BUCK_CTRL_CCL_3			(0x18B)
+#define WCD9XXX_A_BUCK_CTRL_CCL_3__POR				(0x6A)
+#define WCD9XXX_A_BUCK_CTRL_CCL_4			(0x18C)
+#define WCD9XXX_A_BUCK_CTRL_CCL_4__POR				(0x58)
+#define WCD9XXX_A_BUCK_CTRL_PWM_DRVR_1			(0x18D)
+#define WCD9XXX_A_BUCK_CTRL_PWM_DRVR_1__POR				(0x50)
+#define WCD9XXX_A_BUCK_CTRL_PWM_DRVR_2			(0x18E)
+#define WCD9XXX_A_BUCK_CTRL_PWM_DRVR_2__POR				(0x64)
+#define WCD9XXX_A_BUCK_CTRL_PWM_DRVR_3			(0x18F)
+#define WCD9XXX_A_BUCK_CTRL_PWM_DRVR_3__POR				(0x77)
+#define WCD9XXX_A_BUCK_TMUX_A_D			(0x190)
+#define WCD9XXX_A_BUCK_TMUX_A_D__POR				(0x00)
+#define WCD9XXX_A_NCP_EN			(0x192)
+#define WCD9XXX_A_NCP_EN__POR				(0xFE)
+#define WCD9XXX_A_NCP_STATIC			(0x194)
+#define WCD9XXX_A_NCP_STATIC__POR				(0x28)
+#define WCD9XXX_A_NCP_BUCKREF			(0x191)
+#define WCD9XXX_A_NCP_BUCKREF__POR				(0x00)
+#define WCD9XXX_A_CDC_CLSH_B1_CTL			(0x320)
+#define WCD9XXX_A_CDC_CLSH_B1_CTL__POR				(0xE4)
+#define WCD9XXX_A_CDC_CLSH_B2_CTL			(0x321)
+#define WCD9XXX_A_CDC_CLSH_B2_CTL__POR				(0x00)
+#define WCD9XXX_A_CDC_CLSH_B3_CTL			(0x322)
+#define WCD9XXX_A_CDC_CLSH_B3_CTL__POR				(0x00)
+#define WCD9XXX_A_CDC_CLSH_BUCK_NCP_VARS			(0x323)
+#define WCD9XXX_A_CDC_CLSH_BUCK_NCP_VARS__POR			(0x00)
+#define WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD			(0x324)
+#define WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD__POR			(0x12)
+#define WCD9XXX_A_CDC_CLSH_IDLE_EAR_THSD			(0x325)
+#define WCD9XXX_A_CDC_CLSH_IDLE_EAR_THSD__POR			(0x0C)
+#define WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD			(0x326)
+#define WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD__POR		(0x18)
+#define WCD9XXX_A_CDC_CLSH_FCLKONLY_EAR_THSD			(0x327)
+#define WCD9XXX_A_CDC_CLSH_FCLKONLY_EAR_THSD__POR		(0x23)
+#define WCD9XXX_A_CDC_CLSH_K_ADDR			(0x328)
+#define WCD9XXX_A_CDC_CLSH_K_ADDR__POR				(0x00)
+#define WCD9XXX_A_CDC_CLSH_K_DATA			(0x329)
+#define WCD9XXX_A_CDC_CLSH_K_DATA__POR				(0xA4)
+#define WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L			(0x32A)
+#define WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L__POR				(0xD7)
+#define WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U			(0x32B)
+#define WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U__POR				(0x05)
+#define WCD9XXX_A_CDC_CLSH_I_PA_FACT_EAR_L			(0x32C)
+#define WCD9XXX_A_CDC_CLSH_I_PA_FACT_EAR_L__POR				(0x60)
+#define WCD9XXX_A_CDC_CLSH_I_PA_FACT_EAR_U			(0x32D)
+#define WCD9XXX_A_CDC_CLSH_I_PA_FACT_EAR_U__POR				(0x09)
+#define WCD9XXX_A_CDC_CLSH_V_PA_HD_EAR			(0x32E)
+#define WCD9XXX_A_CDC_CLSH_V_PA_HD_EAR__POR				(0x00)
+#define WCD9XXX_A_CDC_CLSH_V_PA_HD_HPH			(0x32F)
+#define WCD9XXX_A_CDC_CLSH_V_PA_HD_HPH__POR				(0x00)
+#define WCD9XXX_A_CDC_CLSH_V_PA_MIN_EAR			(0x330)
+#define WCD9XXX_A_CDC_CLSH_V_PA_MIN_EAR__POR				(0x00)
+#define WCD9XXX_A_CDC_CLSH_V_PA_MIN_HPH			(0x331)
+#define WCD9XXX_A_CDC_CLSH_V_PA_MIN_HPH__POR				(0x00)
+
+#define WCD9XXX_A_CDC_RX1_B6_CTL			(0x2B5)
+#define WCD9XXX_A_CDC_RX1_B6_CTL__POR				(0x80)
+#define WCD9XXX_A_CDC_RX2_B6_CTL			(0x2BD)
+#define WCD9XXX_A_CDC_RX2_B6_CTL__POR				(0x80)
+#define WCD9XXX_A_RX_HPH_L_GAIN				(0x1AE)
+#define WCD9XXX_A_RX_HPH_L_GAIN__POR				(0x00)
+#define WCD9XXX_A_RX_HPH_R_GAIN				(0x1B4)
+#define WCD9XXX_A_RX_HPH_R_GAIN__POR				(0x00)
+#define WCD9XXX_A_RX_HPH_CHOP_CTL			(0x1A5)
+#define WCD9XXX_A_RX_HPH_CHOP_CTL__POR				(0xB4)
+#define WCD9XXX_A_RX_HPH_BIAS_PA			(0x1A6)
+#define WCD9XXX_A_RX_HPH_BIAS_PA__POR				(0x7A)
+#define WCD9XXX_A_RX_HPH_L_TEST				(0x1AF)
+#define WCD9XXX_A_RX_HPH_L_TEST__POR				(0x00)
+#define WCD9XXX_A_RX_HPH_R_TEST				(0x1B5)
+#define WCD9XXX_A_RX_HPH_R_TEST__POR				(0x00)
+#define WCD9XXX_A_CDC_CLK_RX_B1_CTL			(0x30F)
+#define WCD9XXX_A_CDC_CLK_RX_B1_CTL__POR			(0x00)
+#define WCD9XXX_A_NCP_CLK				(0x193)
+#define WCD9XXX_A_NCP_CLK__POR					(0x94)
+#define WCD9XXX_A_RX_HPH_BIAS_WG_OCP			(0x1A9)
+#define WCD9XXX_A_RX_HPH_BIAS_WG_OCP__POR			(0x2A)
+#define WCD9XXX_A_RX_HPH_CNP_WG_CTL			(0x1AC)
+#define WCD9XXX_A_RX_HPH_CNP_WG_CTL__POR			(0xDE)
+#define WCD9XXX_A_RX_HPH_L_PA_CTL			(0x1B0)
+#define WCD9XXX_A_RX_HPH_L_PA_CTL__POR				(0x42)
+#define WCD9XXX_A_RX_HPH_R_PA_CTL			(0x1B6)
+#define WCD9XXX_A_RX_HPH_R_PA_CTL__POR				(0x42)
+#define WCD9XXX_A_CDC_CONN_RX2_B1_CTL			(0x383)
+#define WCD9XXX_A_CDC_CONN_RX2_B1_CTL__POR			(0x00)
+#define WCD9XXX_A_CDC_PA_RAMP_B1_CTL			(0x361)
+#define WCD9XXX_A_CDC_PA_RAMP_B1_CTL__POR			(0x00)
+#define WCD9XXX_A_CDC_PA_RAMP_B2_CTL			(0x362)
+#define WCD9XXX_A_CDC_PA_RAMP_B2_CTL__POR			(0x00)
+#define WCD9XXX_A_CDC_PA_RAMP_B3_CTL			(0x363)
+#define WCD9XXX_A_CDC_PA_RAMP_B3_CTL__POR			(0x00)
+#define WCD9XXX_A_CDC_PA_RAMP_B4_CTL			(0x364)
+#define WCD9XXX_A_CDC_PA_RAMP_B4_CTL__POR			(0x00)
+
+#define WCD9330_A_LEAKAGE_CTL				(0x03C)
+#define WCD9330_A_LEAKAGE_CTL__POR				(0x04)
+#define WCD9330_A_CDC_CTL				(0x034)
+#define WCD9330_A_CDC_CTL__POR					(0x00)
+
+/* Class-H registers for codecs from and above WCD9335 */
+#define WCD9XXX_A_CDC_RX0_RX_PATH_CFG0			(0xB42)
+#define WCD9XXX_A_CDC_RX1_RX_PATH_CFG0			(0xB56)
+#define WCD9XXX_A_CDC_RX2_RX_PATH_CFG0			(0xB6A)
+#define WCD9XXX_A_CDC_CLSH_K1_MSB			(0xC08)
+#define WCD9XXX_A_CDC_CLSH_K1_LSB			(0xC09)
+#define WCD9XXX_A_ANA_RX_SUPPLIES			(0x608)
+#define WCD9XXX_A_ANA_HPH				(0x609)
+#define WCD9XXX_A_CDC_CLSH_CRC				(0xC01)
+#define WCD9XXX_FLYBACK_EN				(0x6A4)
+#define WCD9XXX_FLYBACK_VNEG_CTRL_1			(0x6A5)
+#define WCD9XXX_FLYBACK_VNEGDAC_CTRL_2			(0x6AF)
+#define WCD9XXX_RX_BIAS_FLYB_BUFF			(0x6C7)
+#define WCD9XXX_HPH_L_EN				(0x6D3)
+#define WCD9XXX_HPH_R_EN				(0x6D6)
+#define WCD9XXX_HPH_REFBUFF_UHQA_CTL			(0x6DD)
+#define WCD9XXX_CLASSH_CTRL_VCL_2                       (0x69B)
+#define WCD9XXX_CDC_CLSH_HPH_V_PA			(0xC04)
+#define WCD9XXX_CDC_RX0_RX_PATH_SEC0			(0xB49)
+#define WCD9XXX_CDC_RX1_RX_PATH_CTL			(0xB55)
+#define WCD9XXX_CDC_RX2_RX_PATH_CTL			(0xB69)
+#define WCD9XXX_CDC_CLK_RST_CTRL_MCLK_CONTROL		(0xD41)
+#define WCD9XXX_CLASSH_CTRL_CCL_1                       (0x69C)
+
+/* RX Gain control registers of codecs from and above WCD9335 */
+#define WCD9XXX_CDC_RX1_RX_VOL_CTL			(0xB59)
+#define WCD9XXX_CDC_RX1_RX_VOL_MIX_CTL			(0xB5C)
+#define WCD9XXX_CDC_RX1_RX_PATH_SEC1			(0xB5E)
+#define WCD9XXX_CDC_RX2_RX_VOL_CTL			(0xB6D)
+#define WCD9XXX_CDC_RX2_RX_VOL_MIX_CTL			(0xB70)
+#define WCD9XXX_CDC_RX2_RX_PATH_SEC1			(0xB72)
+
+/* Class-H registers for codecs from and above WCD934X */
+#define WCD9XXX_HPH_CNP_WG_CTL                          (0x06cc)
+#define WCD9XXX_FLYBACK_VNEG_CTRL_4			(0x06a8)
+#define WCD9XXX_HPH_NEW_INT_PA_MISC2			(0x0738)
+#define WCD9XXX_RX_BIAS_HPH_LOWPOWER			(0x06bf)
+#define WCD9XXX_HPH_PA_CTL1				(0x06d1)
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/mhi.h	2019-01-22 16:16:28.563292228 +0100
@@ -0,0 +1,37 @@
+#ifndef _UAPI_MHI_H
+#define _UAPI_MHI_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+enum peripheral_ep_type {
+	DATA_EP_TYPE_RESERVED,
+	DATA_EP_TYPE_HSIC,
+	DATA_EP_TYPE_HSUSB,
+	DATA_EP_TYPE_PCIE,
+	DATA_EP_TYPE_EMBEDDED,
+	DATA_EP_TYPE_BAM_DMUX,
+};
+
+struct peripheral_ep_info {
+	enum peripheral_ep_type		ep_type;
+	__u32				peripheral_iface_id;
+};
+
+struct ipa_ep_pair {
+	__u32				cons_pipe_num;
+	__u32				prod_pipe_num;
+};
+
+struct ep_info {
+	struct peripheral_ep_info	ph_ep_info;
+	struct ipa_ep_pair		ipa_ep_pair;
+
+};
+
+#define MHI_UCI_IOCTL_MAGIC	'm'
+
+#define MHI_UCI_EP_LOOKUP _IOR(MHI_UCI_IOCTL_MAGIC, 2, struct ep_info)
+
+#endif /* _UAPI_MHI_H */
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/mmc/core.h	2019-01-22 16:16:28.563292228 +0100
@@ -0,0 +1,36 @@
+#ifndef UAPI_MMC_CORE_H
+#define UAPI_MMC_CORE_H
+
+#define MMC_RSP_PRESENT	(1 << 0)
+#define MMC_RSP_136	(1 << 1)		/* 136 bit response */
+#define MMC_RSP_CRC	(1 << 2)		/* expect valid crc */
+#define MMC_RSP_BUSY	(1 << 3)		/* card may send busy */
+#define MMC_RSP_OPCODE	(1 << 4)		/* response contains opcode */
+
+#define MMC_CMD_MASK	(3 << 5)		/* non-SPI command type */
+#define MMC_CMD_AC	(0 << 5)
+#define MMC_CMD_ADTC	(1 << 5)
+#define MMC_CMD_BC	(2 << 5)
+#define MMC_CMD_BCR	(3 << 5)
+
+#define MMC_RSP_SPI_S1	(1 << 7)		/* one status byte */
+#define MMC_RSP_SPI_S2	(1 << 8)		/* second byte */
+#define MMC_RSP_SPI_B4	(1 << 9)		/* four data bytes */
+#define MMC_RSP_SPI_BUSY (1 << 10)		/* card may send busy */
+
+/*
+ * These are the native response types, and correspond to valid bit
+ * patterns of the above flags.  One additional valid pattern
+ * is all zeros, which means we don't expect a response.
+ */
+#define MMC_RSP_NONE	(0)
+#define MMC_RSP_R1	(MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
+#define MMC_RSP_R1B	(MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE|MMC_RSP_BUSY)
+#define MMC_RSP_R2	(MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC)
+#define MMC_RSP_R3	(MMC_RSP_PRESENT)
+#define MMC_RSP_R4	(MMC_RSP_PRESENT)
+#define MMC_RSP_R5	(MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
+#define MMC_RSP_R6	(MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
+#define MMC_RSP_R7	(MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
+
+#endif /* UAPI_MMC_CORE_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/mmc/mmc.h	2019-01-22 16:16:28.563292228 +0100
@@ -0,0 +1,68 @@
+#ifndef UAPI_MMC_MMC_H
+#define UAPI_MMC_MMC_H
+
+/* Standard MMC commands (4.1)           type  argument     response */
+   /* class 1 */
+#define MMC_GO_IDLE_STATE         0   /* bc                          */
+#define MMC_SEND_OP_COND          1   /* bcr  [31:0] OCR         R3  */
+#define MMC_ALL_SEND_CID          2   /* bcr                     R2  */
+#define MMC_SET_RELATIVE_ADDR     3   /* ac   [31:16] RCA        R1  */
+#define MMC_SET_DSR               4   /* bc   [31:16] RCA            */
+#define MMC_SLEEP_AWAKE		  5   /* ac   [31:16] RCA 15:flg R1b */
+#define MMC_SWITCH                6   /* ac   [31:0] See below   R1b */
+#define MMC_SELECT_CARD           7   /* ac   [31:16] RCA        R1  */
+#define MMC_SEND_EXT_CSD          8   /* adtc                    R1  */
+#define MMC_SEND_CSD              9   /* ac   [31:16] RCA        R2  */
+#define MMC_SEND_CID             10   /* ac   [31:16] RCA        R2  */
+#define MMC_READ_DAT_UNTIL_STOP  11   /* adtc [31:0] dadr        R1  */
+#define MMC_STOP_TRANSMISSION    12   /* ac                      R1b */
+#define MMC_SEND_STATUS          13   /* ac   [31:16] RCA        R1  */
+#define MMC_BUS_TEST_R           14   /* adtc                    R1  */
+#define MMC_GO_INACTIVE_STATE    15   /* ac   [31:16] RCA            */
+#define MMC_BUS_TEST_W           19   /* adtc                    R1  */
+#define MMC_SPI_READ_OCR         58   /* spi                  spi_R3 */
+#define MMC_SPI_CRC_ON_OFF       59   /* spi  [0:0] flag      spi_R1 */
+
+  /* class 2 */
+#define MMC_SET_BLOCKLEN         16   /* ac   [31:0] block len   R1  */
+#define MMC_READ_SINGLE_BLOCK    17   /* adtc [31:0] data addr   R1  */
+#define MMC_READ_MULTIPLE_BLOCK  18   /* adtc [31:0] data addr   R1  */
+#define MMC_SEND_TUNING_BLOCK    19   /* adtc                    R1  */
+#define MMC_SEND_TUNING_BLOCK_HS200	21	/* adtc R1  */
+#define MMC_SEND_TUNING_BLOCK_HS400      MMC_SEND_TUNING_BLOCK_HS200
+
+#define MMC_TUNING_BLK_PATTERN_4BIT_SIZE	 64
+#define MMC_TUNING_BLK_PATTERN_8BIT_SIZE	128
+
+  /* class 3 */
+#define MMC_WRITE_DAT_UNTIL_STOP 20   /* adtc [31:0] data addr   R1  */
+
+  /* class 4 */
+#define MMC_SET_BLOCK_COUNT      23   /* adtc [31:0] data addr   R1  */
+#define MMC_WRITE_BLOCK          24   /* adtc [31:0] data addr   R1  */
+#define MMC_WRITE_MULTIPLE_BLOCK 25   /* adtc                    R1  */
+#define MMC_PROGRAM_CID          26   /* adtc                    R1  */
+#define MMC_PROGRAM_CSD          27   /* adtc                    R1  */
+
+  /* class 6 */
+#define MMC_SET_WRITE_PROT       28   /* ac   [31:0] data addr   R1b */
+#define MMC_CLR_WRITE_PROT       29   /* ac   [31:0] data addr   R1b */
+#define MMC_SEND_WRITE_PROT      30   /* adtc [31:0] wpdata addr R1  */
+
+  /* class 5 */
+#define MMC_ERASE_GROUP_START    35   /* ac   [31:0] data addr   R1  */
+#define MMC_ERASE_GROUP_END      36   /* ac   [31:0] data addr   R1  */
+#define MMC_ERASE                38   /* ac                      R1b */
+
+  /* class 9 */
+#define MMC_FAST_IO              39   /* ac   <Complex>          R4  */
+#define MMC_GO_IRQ_STATE         40   /* bcr                     R5  */
+
+  /* class 7 */
+#define MMC_LOCK_UNLOCK          42   /* adtc                    R1b */
+
+  /* class 8 */
+#define MMC_APP_CMD              55   /* ac   [31:16] RCA        R1  */
+#define MMC_GEN_CMD              56   /* adtc [0] RD/WR          R1  */
+
+#endif /* UAPI_MMC_MMC_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_adsp.h	2019-01-22 16:16:28.563292228 +0100
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Iliyan Malchev <ibm@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _UAPI_LINUX_MSM_ADSP_H
+#define _UAPI_LINUX_MSM_ADSP_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define ADSP_IOCTL_MAGIC 'q'
+
+/* ADSP_IOCTL_WRITE_COMMAND */
+struct adsp_command_t {
+	uint16_t queue;
+	uint32_t len;		/* bytes */
+	uint8_t *data;
+};
+
+/* ADSP_IOCTL_GET_EVENT */
+struct adsp_event_t {
+	uint16_t type;		/* 1 == event (RPC), 0 == message (adsp) */
+	uint32_t timeout_ms;	/* -1 for infinite, 0 for immediate return */
+	uint16_t msg_id;
+	uint16_t flags;		/* 1 == 16--bit event, 0 == 32-bit event */
+	uint32_t len;		/* size in, number of bytes out */
+	uint8_t *data;
+};
+
+#define ADSP_IOCTL_ENABLE \
+	_IOR(ADSP_IOCTL_MAGIC, 1, unsigned)
+
+#define ADSP_IOCTL_DISABLE \
+	_IOR(ADSP_IOCTL_MAGIC, 2, unsigned)
+
+#define ADSP_IOCTL_DISABLE_ACK \
+	_IOR(ADSP_IOCTL_MAGIC, 3, unsigned)
+
+#define ADSP_IOCTL_WRITE_COMMAND \
+	_IOR(ADSP_IOCTL_MAGIC, 4, struct adsp_command_t *)
+
+#define ADSP_IOCTL_GET_EVENT \
+	_IOWR(ADSP_IOCTL_MAGIC, 5, struct adsp_event_data_t *)
+
+#define ADSP_IOCTL_SET_CLKRATE \
+	_IOR(ADSP_IOCTL_MAGIC, 6, unsigned)
+
+#define ADSP_IOCTL_DISABLE_EVENT_RSP \
+	_IOR(ADSP_IOCTL_MAGIC, 10, unsigned)
+
+#define ADSP_IOCTL_REGISTER_PMEM \
+	_IOW(ADSP_IOCTL_MAGIC, 13, unsigned)
+
+#define ADSP_IOCTL_UNREGISTER_PMEM \
+	_IOW(ADSP_IOCTL_MAGIC, 14, unsigned)
+
+/* Cause any further GET_EVENT ioctls to fail (-ENODEV)
+ * until the device is closed and reopened.  Useful for
+ * terminating event dispatch threads
+ */
+#define ADSP_IOCTL_ABORT_EVENT_READ \
+	_IOW(ADSP_IOCTL_MAGIC, 15, unsigned)
+
+#define ADSP_IOCTL_LINK_TASK \
+	_IOW(ADSP_IOCTL_MAGIC, 16, unsigned)
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_audio_aac.h	2019-01-22 16:16:28.563292228 +0100
@@ -0,0 +1,76 @@
+#ifndef _UAPI_MSM_AUDIO_AAC_H
+#define _UAPI_MSM_AUDIO_AAC_H
+
+#include <linux/msm_audio.h>
+
+#define AUDIO_SET_AAC_CONFIG  _IOW(AUDIO_IOCTL_MAGIC, \
+	(AUDIO_MAX_COMMON_IOCTL_NUM+0), struct msm_audio_aac_config)
+#define AUDIO_GET_AAC_CONFIG  _IOR(AUDIO_IOCTL_MAGIC, \
+	(AUDIO_MAX_COMMON_IOCTL_NUM+1), struct msm_audio_aac_config)
+
+#define AUDIO_SET_AAC_ENC_CONFIG  _IOW(AUDIO_IOCTL_MAGIC, \
+	(AUDIO_MAX_COMMON_IOCTL_NUM+3), struct msm_audio_aac_enc_config)
+
+#define AUDIO_GET_AAC_ENC_CONFIG  _IOR(AUDIO_IOCTL_MAGIC, \
+	(AUDIO_MAX_COMMON_IOCTL_NUM+4), struct msm_audio_aac_enc_config)
+
+#define AUDIO_SET_AAC_MIX_CONFIG  _IOR(AUDIO_IOCTL_MAGIC, \
+	(AUDIO_MAX_COMMON_IOCTL_NUM+5), uint32_t)
+
+#define AUDIO_AAC_FORMAT_ADTS		-1
+#define	AUDIO_AAC_FORMAT_RAW		0x0000
+#define	AUDIO_AAC_FORMAT_PSUEDO_RAW	0x0001
+#define AUDIO_AAC_FORMAT_LOAS		0x0002
+#define AUDIO_AAC_FORMAT_ADIF		0x0003
+
+#define AUDIO_AAC_OBJECT_LC            	0x0002
+#define AUDIO_AAC_OBJECT_LTP		0x0004
+#define AUDIO_AAC_OBJECT_ERLC  		0x0011
+#define AUDIO_AAC_OBJECT_BSAC  		0x0016
+
+#define AUDIO_AAC_SEC_DATA_RES_ON       0x0001
+#define AUDIO_AAC_SEC_DATA_RES_OFF      0x0000
+
+#define AUDIO_AAC_SCA_DATA_RES_ON       0x0001
+#define AUDIO_AAC_SCA_DATA_RES_OFF      0x0000
+
+#define AUDIO_AAC_SPEC_DATA_RES_ON      0x0001
+#define AUDIO_AAC_SPEC_DATA_RES_OFF     0x0000
+
+#define AUDIO_AAC_SBR_ON_FLAG_ON	0x0001
+#define AUDIO_AAC_SBR_ON_FLAG_OFF	0x0000
+
+#define AUDIO_AAC_SBR_PS_ON_FLAG_ON	0x0001
+#define AUDIO_AAC_SBR_PS_ON_FLAG_OFF	0x0000
+
+/* Primary channel on both left and right channels */
+#define AUDIO_AAC_DUAL_MONO_PL_PR  0
+/* Secondary channel on both left and right channels */
+#define AUDIO_AAC_DUAL_MONO_SL_SR  1
+/* Primary channel on right channel and 2nd on left channel */
+#define AUDIO_AAC_DUAL_MONO_SL_PR  2
+/* 2nd channel on right channel and primary on left channel */
+#define AUDIO_AAC_DUAL_MONO_PL_SR  3
+
+struct msm_audio_aac_config {
+	signed short format;
+	unsigned short audio_object;
+	unsigned short ep_config;	/* 0 ~ 3 useful only obj = ERLC */
+	unsigned short aac_section_data_resilience_flag;
+	unsigned short aac_scalefactor_data_resilience_flag;
+	unsigned short aac_spectral_data_resilience_flag;
+	unsigned short sbr_on_flag;
+	unsigned short sbr_ps_on_flag;
+	unsigned short dual_mono_mode;
+	unsigned short channel_configuration;
+	unsigned short sample_rate;
+};
+
+struct msm_audio_aac_enc_config {
+	uint32_t channels;
+	uint32_t sample_rate;
+	uint32_t bit_rate;
+	uint32_t stream_format;
+};
+
+#endif /* _UAPI_MSM_AUDIO_AAC_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_audio_ac3.h	2019-01-22 16:16:28.563292228 +0100
@@ -0,0 +1,41 @@
+#ifndef _UAPI_MSM_AUDIO_AC3_H
+#define _UAPI_MSM_AUDIO_AC3_H
+
+#include <linux/msm_audio.h>
+
+#define AUDIO_SET_AC3_CONFIG  _IOW(AUDIO_IOCTL_MAGIC, \
+	(AUDIO_MAX_COMMON_IOCTL_NUM+0), unsigned)
+#define AUDIO_GET_AC3_CONFIG  _IOR(AUDIO_IOCTL_MAGIC, \
+	(AUDIO_MAX_COMMON_IOCTL_NUM+1), unsigned)
+
+#define AUDAC3_DEF_WORDSIZE 0
+#define AUDAC3_DEF_USER_DOWNMIX_FLAG 0x0
+#define AUDAC3_DEF_USER_KARAOKE_FLAG 0x0
+#define AUDAC3_DEF_ERROR_CONCEALMENT 0
+#define AUDAC3_DEF_MAX_REPEAT_COUNT  0
+
+struct msm_audio_ac3_config {
+	unsigned short		numChans;
+	unsigned short		wordSize;
+	unsigned short		kCapableMode;
+	unsigned short		compMode;
+	unsigned short		outLfeOn;
+	unsigned short		outputMode;
+	unsigned short		stereoMode;
+	unsigned short		dualMonoMode;
+	unsigned short		fsCod;
+	unsigned short		pcmScaleFac;
+	unsigned short		dynRngScaleHi;
+	unsigned short		dynRngScaleLow;
+	unsigned short		user_downmix_flag;
+	unsigned short		user_karaoke_flag;
+	unsigned short		dm_address_high;
+	unsigned short		dm_address_low;
+	unsigned short		ko_address_high;
+	unsigned short		ko_address_low;
+	unsigned short		error_concealment;
+	unsigned short		max_rep_count;
+	unsigned short		channel_routing_mode[6];
+};
+
+#endif /* _UAPI_MSM_AUDIO_AC3_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_audio_alac.h	2019-01-22 16:16:28.563292228 +0100
@@ -0,0 +1,24 @@
+#ifndef _UAPI_MSM_AUDIO_ALAC_H
+#define _UAPI_MSM_AUDIO_ALAC_H
+
+#define AUDIO_GET_ALAC_CONFIG  _IOR(AUDIO_IOCTL_MAGIC, \
+	  (AUDIO_MAX_COMMON_IOCTL_NUM+0), struct msm_audio_alac_config)
+#define AUDIO_SET_ALAC_CONFIG  _IOW(AUDIO_IOCTL_MAGIC, \
+	  (AUDIO_MAX_COMMON_IOCTL_NUM+1), struct msm_audio_alac_config)
+
+struct msm_audio_alac_config {
+	uint32_t frameLength;
+	uint8_t compatVersion;
+	uint8_t bitDepth;
+	uint8_t pb; /* currently unused */
+	uint8_t mb; /* currently unused */
+	uint8_t kb; /* currently unused */
+	uint8_t channelCount;
+	uint16_t maxRun; /* currently unused */
+	uint32_t maxSize;
+	uint32_t averageBitRate;
+	uint32_t sampleRate;
+	uint32_t channelLayout;
+};
+
+#endif /* _UAPI_MSM_AUDIO_ALAC_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_audio_amrnb.h	2019-01-22 16:16:28.563292228 +0100
@@ -0,0 +1,33 @@
+#ifndef _UAPI_MSM_AUDIO_AMRNB_H
+#define _UAPI_MSM_AUDIO_AMRNB_H
+
+#include <linux/msm_audio.h>
+
+#define AUDIO_GET_AMRNB_ENC_CONFIG  _IOW(AUDIO_IOCTL_MAGIC, \
+	(AUDIO_MAX_COMMON_IOCTL_NUM+0), unsigned)
+#define AUDIO_SET_AMRNB_ENC_CONFIG  _IOR(AUDIO_IOCTL_MAGIC, \
+	(AUDIO_MAX_COMMON_IOCTL_NUM+1), unsigned)
+#define AUDIO_GET_AMRNB_ENC_CONFIG_V2  _IOW(AUDIO_IOCTL_MAGIC, \
+	(AUDIO_MAX_COMMON_IOCTL_NUM+2), \
+	struct msm_audio_amrnb_enc_config_v2)
+#define AUDIO_SET_AMRNB_ENC_CONFIG_V2  _IOR(AUDIO_IOCTL_MAGIC, \
+	(AUDIO_MAX_COMMON_IOCTL_NUM+3), \
+	struct msm_audio_amrnb_enc_config_v2)
+
+struct msm_audio_amrnb_enc_config {
+	unsigned short voicememoencweight1;
+	unsigned short voicememoencweight2;
+	unsigned short voicememoencweight3;
+	unsigned short voicememoencweight4;
+	unsigned short dtx_mode_enable; /* 0xFFFF - enable, 0- disable */
+	unsigned short test_mode_enable; /* 0xFFFF - enable, 0- disable */
+	unsigned short enc_mode; /* 0-MR475,1-MR515,2-MR59,3-MR67,4-MR74
+				5-MR795, 6- MR102, 7- MR122(default) */
+};
+
+struct msm_audio_amrnb_enc_config_v2 {
+	uint32_t band_mode;
+	uint32_t dtx_enable;
+	uint32_t frame_format;
+};
+#endif /* _UAPI_MSM_AUDIO_AMRNB_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_audio_amrwb.h	2019-01-22 16:16:28.563292228 +0100
@@ -0,0 +1,18 @@
+#ifndef _UAPI_MSM_AUDIO_AMRWB_H
+#define _UAPI_MSM_AUDIO_AMRWB_H
+
+#include <linux/msm_audio.h>
+
+#define AUDIO_GET_AMRWB_ENC_CONFIG _IOW(AUDIO_IOCTL_MAGIC, \
+	(AUDIO_MAX_COMMON_IOCTL_NUM+0), \
+	struct msm_audio_amrwb_enc_config)
+#define AUDIO_SET_AMRWB_ENC_CONFIG  _IOR(AUDIO_IOCTL_MAGIC, \
+	(AUDIO_MAX_COMMON_IOCTL_NUM+1), \
+	struct msm_audio_amrwb_enc_config)
+
+struct msm_audio_amrwb_enc_config {
+	uint32_t band_mode;
+	uint32_t dtx_enable;
+	uint32_t frame_format;
+};
+#endif /* _UAPI_MSM_AUDIO_AMRWB_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_audio_amrwbplus.h	2019-01-22 16:16:28.563292228 +0100
@@ -0,0 +1,18 @@
+#ifndef _UAPI_MSM_AUDIO_AMR_WB_PLUS_H
+#define _UAPI_MSM_AUDIO_AMR_WB_PLUS_H
+
+#define AUDIO_GET_AMRWBPLUS_CONFIG_V2  _IOR(AUDIO_IOCTL_MAGIC, \
+	(AUDIO_MAX_COMMON_IOCTL_NUM+2), struct msm_audio_amrwbplus_config_v2)
+#define AUDIO_SET_AMRWBPLUS_CONFIG_V2  _IOW(AUDIO_IOCTL_MAGIC, \
+	(AUDIO_MAX_COMMON_IOCTL_NUM+3), struct msm_audio_amrwbplus_config_v2)
+
+struct msm_audio_amrwbplus_config_v2 {
+	unsigned int size_bytes;
+	unsigned int version;
+	unsigned int num_channels;
+	unsigned int amr_band_mode;
+	unsigned int amr_dtx_mode;
+	unsigned int amr_frame_fmt;
+	unsigned int amr_lsf_idx;
+};
+#endif /* _UAPI_MSM_AUDIO_AMR_WB_PLUS_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_audio_ape.h	2019-01-22 16:16:28.563292228 +0100
@@ -0,0 +1,25 @@
+/*The following structure has been taken
+from Monkey's Audio SDK with permission*/
+
+#ifndef _UAPI_MSM_AUDIO_APE_H
+#define _UAPI_MSM_AUDIO_APE_H
+
+#define AUDIO_GET_APE_CONFIG  _IOR(AUDIO_IOCTL_MAGIC, \
+	  (AUDIO_MAX_COMMON_IOCTL_NUM+0), struct msm_audio_ape_config)
+#define AUDIO_SET_APE_CONFIG  _IOW(AUDIO_IOCTL_MAGIC, \
+	  (AUDIO_MAX_COMMON_IOCTL_NUM+1), struct msm_audio_ape_config)
+
+struct msm_audio_ape_config {
+	uint16_t compatibleVersion;
+	uint16_t compressionLevel;
+	uint32_t formatFlags;
+	uint32_t blocksPerFrame;
+	uint32_t finalFrameBlocks;
+	uint32_t totalFrames;
+	uint16_t bitsPerSample;
+	uint16_t numChannels;
+	uint32_t sampleRate;
+	uint32_t seekTablePresent;
+};
+
+#endif /* _UAPI_MSM_AUDIO_APE_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_audio_calibration.h	2019-10-29 09:26:25.545221791 +0100
@@ -0,0 +1,728 @@
+#ifndef _UAPI_MSM_AUDIO_CALIBRATION_H
+#define _UAPI_MSM_AUDIO_CALIBRATION_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define CAL_IOCTL_MAGIC 'a'
+
+#define AUDIO_ALLOCATE_CALIBRATION	_IOWR(CAL_IOCTL_MAGIC, \
+							200, void *)
+#define AUDIO_DEALLOCATE_CALIBRATION	_IOWR(CAL_IOCTL_MAGIC, \
+							201, void *)
+#define AUDIO_PREPARE_CALIBRATION	_IOWR(CAL_IOCTL_MAGIC, \
+							202, void *)
+#define AUDIO_SET_CALIBRATION		_IOWR(CAL_IOCTL_MAGIC, \
+							203, void *)
+#define AUDIO_GET_CALIBRATION		_IOWR(CAL_IOCTL_MAGIC, \
+							204, void *)
+#define AUDIO_POST_CALIBRATION		_IOWR(CAL_IOCTL_MAGIC, \
+							205, void *)
+
+/* For Real-Time Audio Calibration */
+#define AUDIO_GET_RTAC_ADM_INFO		_IOR(CAL_IOCTL_MAGIC, \
+							207, void *)
+#define AUDIO_GET_RTAC_VOICE_INFO	_IOR(CAL_IOCTL_MAGIC, \
+							208, void *)
+#define AUDIO_GET_RTAC_ADM_CAL		_IOWR(CAL_IOCTL_MAGIC, \
+							209, void *)
+#define AUDIO_SET_RTAC_ADM_CAL		_IOWR(CAL_IOCTL_MAGIC, \
+							210, void *)
+#define AUDIO_GET_RTAC_ASM_CAL		_IOWR(CAL_IOCTL_MAGIC, \
+							211, void *)
+#define AUDIO_SET_RTAC_ASM_CAL		_IOWR(CAL_IOCTL_MAGIC, \
+							212, void *)
+#define AUDIO_GET_RTAC_CVS_CAL		_IOWR(CAL_IOCTL_MAGIC, \
+							213, void *)
+#define AUDIO_SET_RTAC_CVS_CAL		_IOWR(CAL_IOCTL_MAGIC, \
+							214, void *)
+#define AUDIO_GET_RTAC_CVP_CAL		_IOWR(CAL_IOCTL_MAGIC, \
+							215, void *)
+#define AUDIO_SET_RTAC_CVP_CAL		_IOWR(CAL_IOCTL_MAGIC, \
+							216, void *)
+#define AUDIO_GET_RTAC_AFE_CAL		_IOWR(CAL_IOCTL_MAGIC, \
+							217, void *)
+#define AUDIO_SET_RTAC_AFE_CAL		_IOWR(CAL_IOCTL_MAGIC, \
+							218, void *)
+enum {
+	CVP_VOC_RX_TOPOLOGY_CAL_TYPE = 0,
+	CVP_VOC_TX_TOPOLOGY_CAL_TYPE,
+	CVP_VOCPROC_STATIC_CAL_TYPE,
+	CVP_VOCPROC_DYNAMIC_CAL_TYPE,
+	CVS_VOCSTRM_STATIC_CAL_TYPE,
+	CVP_VOCDEV_CFG_CAL_TYPE,
+	CVP_VOCPROC_STATIC_COL_CAL_TYPE,
+	CVP_VOCPROC_DYNAMIC_COL_CAL_TYPE,
+	CVS_VOCSTRM_STATIC_COL_CAL_TYPE,
+
+	ADM_TOPOLOGY_CAL_TYPE,
+	ADM_CUST_TOPOLOGY_CAL_TYPE,
+	ADM_AUDPROC_CAL_TYPE,
+	ADM_AUDVOL_CAL_TYPE,
+
+	ASM_TOPOLOGY_CAL_TYPE,
+	ASM_CUST_TOPOLOGY_CAL_TYPE,
+	ASM_AUDSTRM_CAL_TYPE,
+
+	AFE_COMMON_RX_CAL_TYPE,
+	AFE_COMMON_TX_CAL_TYPE,
+	AFE_ANC_CAL_TYPE,
+	AFE_AANC_CAL_TYPE,
+	AFE_FB_SPKR_PROT_CAL_TYPE,
+	AFE_HW_DELAY_CAL_TYPE,
+	AFE_SIDETONE_CAL_TYPE,
+	AFE_TOPOLOGY_CAL_TYPE,
+	AFE_CUST_TOPOLOGY_CAL_TYPE,
+
+	LSM_CUST_TOPOLOGY_CAL_TYPE,
+	LSM_TOPOLOGY_CAL_TYPE,
+	LSM_CAL_TYPE,
+
+	ADM_RTAC_INFO_CAL_TYPE,
+	VOICE_RTAC_INFO_CAL_TYPE,
+	ADM_RTAC_APR_CAL_TYPE,
+	ASM_RTAC_APR_CAL_TYPE,
+	VOICE_RTAC_APR_CAL_TYPE,
+
+	MAD_CAL_TYPE,
+	ULP_AFE_CAL_TYPE,
+	ULP_LSM_CAL_TYPE,
+
+	DTS_EAGLE_CAL_TYPE,
+	AUDIO_CORE_METAINFO_CAL_TYPE,
+	SRS_TRUMEDIA_CAL_TYPE,
+
+	CORE_CUSTOM_TOPOLOGIES_CAL_TYPE,
+	ADM_RTAC_AUDVOL_CAL_TYPE,
+
+	ULP_LSM_TOPOLOGY_ID_CAL_TYPE,
+	AFE_FB_SPKR_PROT_TH_VI_CAL_TYPE,
+	AFE_FB_SPKR_PROT_EX_VI_CAL_TYPE,
+	AFE_SIDETONE_IIR_CAL_TYPE,
+	AFE_LSM_TOPOLOGY_CAL_TYPE,
+	AFE_LSM_TX_CAL_TYPE,
+	ADM_LSM_TOPOLOGY_CAL_TYPE,
+	ADM_LSM_AUDPROC_CAL_TYPE,
+	MAX_CAL_TYPES,
+};
+
+#define AFE_FB_SPKR_PROT_TH_VI_CAL_TYPE AFE_FB_SPKR_PROT_TH_VI_CAL_TYPE
+#define AFE_FB_SPKR_PROT_EX_VI_CAL_TYPE AFE_FB_SPKR_PROT_EX_VI_CAL_TYPE
+
+#define AFE_SIDETONE_IIR_CAL_TYPE AFE_SIDETONE_IIR_CAL_TYPE
+#define AFE_LSM_TOPOLOGY_CAL_TYPE AFE_LSM_TOPOLOGY_CAL_TYPE
+#define AFE_LSM_TX_CAL_TYPE AFE_LSM_TX_CAL_TYPE
+#define ADM_LSM_TOPOLOGY_CAL_TYPE ADM_LSM_TOPOLOGY_CAL_TYPE
+#define ADM_LSM_AUDPROC_CAL_TYPE ADM_LSM_AUDPROC_CAL_TYPE
+#define LSM_CAL_TYPES
+
+enum {
+	VERSION_0_0,
+};
+
+enum {
+	PER_VOCODER_CAL_BIT_MASK = 0x10000,
+};
+
+#define MAX_IOCTL_CMD_SIZE	512
+
+/* common structures */
+
+struct audio_cal_header {
+	int32_t		data_size;
+	int32_t		version;
+	int32_t		cal_type;
+	int32_t		cal_type_size;
+};
+
+struct audio_cal_type_header {
+	int32_t		version;
+	int32_t		buffer_number;
+};
+
+struct audio_cal_data {
+	/* Size of cal data at mem_handle allocation or at vaddr */
+	int32_t		cal_size;
+	/* If mem_handle if shared memory is used*/
+	int32_t		mem_handle;
+	/* size of virtual memory if shared memory not used */
+};
+
+
+/* AUDIO_ALLOCATE_CALIBRATION */
+struct audio_cal_type_alloc {
+	struct audio_cal_type_header	cal_hdr;
+	struct audio_cal_data		cal_data;
+};
+
+struct audio_cal_alloc {
+	struct audio_cal_header		hdr;
+	struct audio_cal_type_alloc	cal_type;
+};
+
+
+/* AUDIO_DEALLOCATE_CALIBRATION */
+struct audio_cal_type_dealloc {
+	struct audio_cal_type_header	cal_hdr;
+	struct audio_cal_data		cal_data;
+};
+
+struct audio_cal_dealloc {
+	struct audio_cal_header		hdr;
+	struct audio_cal_type_dealloc	cal_type;
+};
+
+
+/* AUDIO_PREPARE_CALIBRATION */
+struct audio_cal_type_prepare {
+	struct audio_cal_type_header	cal_hdr;
+	struct audio_cal_data		cal_data;
+};
+
+struct audio_cal_prepare {
+	struct audio_cal_header		hdr;
+	struct audio_cal_type_prepare	cal_type;
+};
+
+
+/* AUDIO_POST_CALIBRATION */
+struct audio_cal_type_post {
+	struct audio_cal_type_header	cal_hdr;
+	struct audio_cal_data		cal_data;
+};
+
+struct audio_cal_post {
+	struct audio_cal_header		hdr;
+	struct audio_cal_type_post	cal_type;
+};
+
+/*AUDIO_CORE_META_INFO */
+
+struct audio_cal_info_metainfo {
+	uint32_t nKey;
+};
+
+/* Cal info types */
+enum {
+	RX_DEVICE,
+	TX_DEVICE,
+	MAX_PATH_TYPE
+};
+
+struct audio_cal_info_adm_top {
+	int32_t		topology;
+	int32_t		acdb_id;
+	/* RX_DEVICE or TX_DEVICE */
+	int32_t		path;
+	int32_t		app_type;
+	int32_t		sample_rate;
+};
+
+struct audio_cal_info_audproc {
+	int32_t		acdb_id;
+	/* RX_DEVICE or TX_DEVICE */
+	int32_t		path;
+	int32_t		app_type;
+	int32_t		sample_rate;
+};
+
+struct audio_cal_info_audvol {
+	int32_t		acdb_id;
+	/* RX_DEVICE or TX_DEVICE */
+	int32_t		path;
+	int32_t		app_type;
+	int32_t		vol_index;
+};
+
+struct audio_cal_info_afe {
+	int32_t		acdb_id;
+	/* RX_DEVICE or TX_DEVICE */
+	int32_t		path;
+	int32_t		sample_rate;
+};
+
+struct audio_cal_info_afe_top {
+	int32_t		topology;
+	int32_t		acdb_id;
+	/* RX_DEVICE or TX_DEVICE */
+	int32_t		path;
+	int32_t		sample_rate;
+};
+
+struct audio_cal_info_asm_top {
+	int32_t		topology;
+	int32_t		app_type;
+};
+
+struct audio_cal_info_audstrm {
+	int32_t		app_type;
+};
+
+struct audio_cal_info_aanc {
+	int32_t		acdb_id;
+};
+
+#define MAX_HW_DELAY_ENTRIES	25
+
+struct audio_cal_hw_delay_entry {
+	uint32_t sample_rate;
+	uint32_t delay_usec;
+};
+
+struct audio_cal_hw_delay_data {
+	uint32_t				num_entries;
+	struct audio_cal_hw_delay_entry		entry[MAX_HW_DELAY_ENTRIES];
+};
+
+struct audio_cal_info_hw_delay {
+	int32_t					acdb_id;
+	/* RX_DEVICE or TX_DEVICE */
+	int32_t					path;
+	int32_t					property_type;
+	struct audio_cal_hw_delay_data		data;
+};
+
+enum msm_spkr_prot_states {
+	MSM_SPKR_PROT_CALIBRATED,
+	MSM_SPKR_PROT_CALIBRATION_IN_PROGRESS,
+	MSM_SPKR_PROT_DISABLED,
+	MSM_SPKR_PROT_NOT_CALIBRATED,
+	MSM_SPKR_PROT_PRE_CALIBRATED,
+	MSM_SPKR_PROT_IN_FTM_MODE
+};
+#define MSM_SPKR_PROT_IN_FTM_MODE MSM_SPKR_PROT_IN_FTM_MODE
+
+enum msm_spkr_count {
+	SP_V2_SPKR_1,
+	SP_V2_SPKR_2,
+	SP_V2_NUM_MAX_SPKRS
+};
+
+struct audio_cal_info_spk_prot_cfg {
+	int32_t		r0[SP_V2_NUM_MAX_SPKRS];
+	int32_t		t0[SP_V2_NUM_MAX_SPKRS];
+	uint32_t	quick_calib_flag;
+	uint32_t	mode;
+	/*
+	 * 0 - Start spk prot
+	 * 1 - Start calib
+	 * 2 - Disable spk prot
+	 */
+};
+
+struct audio_cal_info_sp_th_vi_ftm_cfg {
+	uint32_t	wait_time[SP_V2_NUM_MAX_SPKRS];
+	uint32_t	ftm_time[SP_V2_NUM_MAX_SPKRS];
+	uint32_t	mode;
+	/*
+	 * 0 - normal running mode
+	 * 1 - Calibration
+	 * 2 - FTM mode
+	 */
+};
+
+struct audio_cal_info_sp_ex_vi_ftm_cfg {
+	uint32_t	wait_time[SP_V2_NUM_MAX_SPKRS];
+	uint32_t	ftm_time[SP_V2_NUM_MAX_SPKRS];
+	uint32_t	mode;
+	/*
+	 * 0 - normal running mode
+	 * 2 - FTM mode
+	 */
+};
+
+struct audio_cal_info_sp_ex_vi_param {
+	int32_t		freq_q20[SP_V2_NUM_MAX_SPKRS];
+	int32_t		resis_q24[SP_V2_NUM_MAX_SPKRS];
+	int32_t		qmct_q24[SP_V2_NUM_MAX_SPKRS];
+	int32_t		status[SP_V2_NUM_MAX_SPKRS];
+};
+
+struct audio_cal_info_sp_th_vi_param {
+	int32_t		r_dc_q24[SP_V2_NUM_MAX_SPKRS];
+	int32_t		temp_q22[SP_V2_NUM_MAX_SPKRS];
+	int32_t		status[SP_V2_NUM_MAX_SPKRS];
+};
+
+struct audio_cal_info_msm_spk_prot_status {
+	int32_t		r0[SP_V2_NUM_MAX_SPKRS];
+	int32_t		status;
+};
+
+struct audio_cal_info_sidetone {
+	uint16_t	enable;
+	uint16_t	gain;
+	int32_t		tx_acdb_id;
+	int32_t		rx_acdb_id;
+	int32_t		mid;
+	int32_t		pid;
+};
+
+#define MAX_SIDETONE_IIR_DATA_SIZE   224
+#define MAX_NO_IIR_FILTER_STAGE      10
+
+struct audio_cal_info_sidetone_iir {
+	uint16_t	iir_enable;
+	uint16_t	num_biquad_stages;
+	uint16_t	pregain;
+	int32_t	        tx_acdb_id;
+	int32_t	        rx_acdb_id;
+	int32_t	        mid;
+	int32_t	        pid;
+	uint8_t	        iir_config[MAX_SIDETONE_IIR_DATA_SIZE];
+};
+struct audio_cal_info_lsm_top {
+	int32_t		topology;
+	int32_t		acdb_id;
+	int32_t		app_type;
+};
+
+
+struct audio_cal_info_lsm {
+	int32_t		acdb_id;
+	/* RX_DEVICE or TX_DEVICE */
+	int32_t		path;
+	int32_t		app_type;
+};
+
+struct audio_cal_info_voc_top {
+	int32_t		topology;
+	int32_t		acdb_id;
+};
+
+struct audio_cal_info_vocproc {
+	int32_t		tx_acdb_id;
+	int32_t		rx_acdb_id;
+	int32_t		tx_sample_rate;
+	int32_t		rx_sample_rate;
+};
+
+enum {
+	DEFAULT_FEATURE_SET,
+	VOL_BOOST_FEATURE_SET,
+};
+
+struct audio_cal_info_vocvol {
+	int32_t		tx_acdb_id;
+	int32_t		rx_acdb_id;
+	/* DEFUALT_ or VOL_BOOST_FEATURE_SET */
+	int32_t		feature_set;
+};
+
+struct audio_cal_info_vocdev_cfg {
+	int32_t		tx_acdb_id;
+	int32_t		rx_acdb_id;
+};
+
+#define MAX_VOICE_COLUMNS	20
+
+union audio_cal_col_na {
+	uint8_t		val8;
+	uint16_t	val16;
+	uint32_t	val32;
+	uint64_t	val64;
+} __packed;
+
+struct audio_cal_col {
+	uint32_t		id;
+	uint32_t		type;
+	union audio_cal_col_na	na_value;
+} __packed;
+
+struct audio_cal_col_data {
+	uint32_t		num_columns;
+	struct audio_cal_col	column[MAX_VOICE_COLUMNS];
+} __packed;
+
+struct audio_cal_info_voc_col {
+	int32_t				table_id;
+	int32_t				tx_acdb_id;
+	int32_t				rx_acdb_id;
+	struct audio_cal_col_data	data;
+};
+
+/* AUDIO_SET_CALIBRATION & */
+struct audio_cal_type_basic {
+	struct audio_cal_type_header	cal_hdr;
+	struct audio_cal_data		cal_data;
+};
+
+struct audio_cal_basic {
+	struct audio_cal_header		hdr;
+	struct audio_cal_type_basic	cal_type;
+};
+
+struct audio_cal_type_adm_top {
+	struct audio_cal_type_header	cal_hdr;
+	struct audio_cal_data		cal_data;
+	struct audio_cal_info_adm_top	cal_info;
+};
+
+struct audio_cal_adm_top {
+	struct audio_cal_header		hdr;
+	struct audio_cal_type_adm_top	cal_type;
+};
+
+struct audio_cal_type_metainfo {
+	struct audio_cal_type_header	cal_hdr;
+	struct audio_cal_data		cal_data;
+	struct audio_cal_info_metainfo	cal_info;
+};
+
+struct audio_core_metainfo {
+	struct audio_cal_header	  hdr;
+	struct audio_cal_type_metainfo cal_type;
+};
+
+struct audio_cal_type_audproc {
+	struct audio_cal_type_header	cal_hdr;
+	struct audio_cal_data		cal_data;
+	struct audio_cal_info_audproc	cal_info;
+};
+
+struct audio_cal_audproc {
+	struct audio_cal_header		hdr;
+	struct audio_cal_type_audproc	cal_type;
+};
+
+struct audio_cal_type_audvol {
+	struct audio_cal_type_header	cal_hdr;
+	struct audio_cal_data		cal_data;
+	struct audio_cal_info_audvol	cal_info;
+};
+
+struct audio_cal_audvol {
+	struct audio_cal_header		hdr;
+	struct audio_cal_type_audvol	cal_type;
+};
+
+struct audio_cal_type_asm_top {
+	struct audio_cal_type_header	cal_hdr;
+	struct audio_cal_data		cal_data;
+	struct audio_cal_info_asm_top	cal_info;
+};
+
+struct audio_cal_asm_top {
+	struct audio_cal_header		hdr;
+	struct audio_cal_type_asm_top	cal_type;
+};
+
+struct audio_cal_type_audstrm {
+	struct audio_cal_type_header	cal_hdr;
+	struct audio_cal_data		cal_data;
+	struct audio_cal_info_audstrm	cal_info;
+};
+
+struct audio_cal_audstrm {
+	struct audio_cal_header		hdr;
+	struct audio_cal_type_audstrm	cal_type;
+};
+
+struct audio_cal_type_afe {
+	struct audio_cal_type_header	cal_hdr;
+	struct audio_cal_data		cal_data;
+	struct audio_cal_info_afe	cal_info;
+};
+
+struct audio_cal_afe {
+	struct audio_cal_header		hdr;
+	struct audio_cal_type_afe	cal_type;
+};
+
+struct audio_cal_type_afe_top {
+	struct audio_cal_type_header	cal_hdr;
+	struct audio_cal_data		cal_data;
+	struct audio_cal_info_afe_top	cal_info;
+};
+
+struct audio_cal_afe_top {
+	struct audio_cal_header		hdr;
+	struct audio_cal_type_afe_top	cal_type;
+};
+
+struct audio_cal_type_aanc {
+	struct audio_cal_type_header	cal_hdr;
+	struct audio_cal_data		cal_data;
+	struct audio_cal_info_aanc	cal_info;
+};
+
+struct audio_cal_aanc {
+	struct audio_cal_header		hdr;
+	struct audio_cal_type_aanc	cal_type;
+};
+
+struct audio_cal_type_fb_spk_prot_cfg {
+	struct audio_cal_type_header		cal_hdr;
+	struct audio_cal_data			cal_data;
+	struct audio_cal_info_spk_prot_cfg	cal_info;
+};
+
+struct audio_cal_fb_spk_prot_cfg {
+	struct audio_cal_header			hdr;
+	struct audio_cal_type_fb_spk_prot_cfg	cal_type;
+};
+
+struct audio_cal_type_sp_th_vi_ftm_cfg {
+	struct audio_cal_type_header		cal_hdr;
+	struct audio_cal_data			cal_data;
+	struct audio_cal_info_sp_th_vi_ftm_cfg	cal_info;
+};
+
+struct audio_cal_sp_th_vi_ftm_cfg {
+	struct audio_cal_header			hdr;
+	struct audio_cal_type_sp_th_vi_ftm_cfg	cal_type;
+};
+
+struct audio_cal_type_sp_ex_vi_ftm_cfg {
+	struct audio_cal_type_header		cal_hdr;
+	struct audio_cal_data			cal_data;
+	struct audio_cal_info_sp_ex_vi_ftm_cfg	cal_info;
+};
+
+struct audio_cal_sp_ex_vi_ftm_cfg {
+	struct audio_cal_header			hdr;
+	struct audio_cal_type_sp_ex_vi_ftm_cfg	cal_type;
+};
+struct audio_cal_type_hw_delay {
+	struct audio_cal_type_header	cal_hdr;
+	struct audio_cal_data		cal_data;
+	struct audio_cal_info_hw_delay	cal_info;
+};
+
+struct audio_cal_hw_delay {
+	struct audio_cal_header		hdr;
+	struct audio_cal_type_hw_delay	cal_type;
+};
+
+struct audio_cal_type_sidetone {
+	struct audio_cal_type_header		cal_hdr;
+	struct audio_cal_data			cal_data;
+	struct audio_cal_info_sidetone		cal_info;
+};
+
+struct audio_cal_sidetone {
+	struct audio_cal_header			hdr;
+	struct audio_cal_type_sidetone		cal_type;
+};
+
+struct audio_cal_type_sidetone_iir {
+	struct audio_cal_type_header	   cal_hdr;
+	struct audio_cal_data		   cal_data;
+	struct audio_cal_info_sidetone_iir cal_info;
+};
+
+struct audio_cal_sidetone_iir {
+	struct audio_cal_header		   hdr;
+	struct audio_cal_type_sidetone_iir cal_type;
+};
+
+struct audio_cal_type_lsm_top {
+	struct audio_cal_type_header	cal_hdr;
+	struct audio_cal_data		cal_data;
+	struct audio_cal_info_lsm_top	cal_info;
+};
+
+struct audio_cal_lsm_top {
+	struct audio_cal_header		hdr;
+	struct audio_cal_type_lsm_top	cal_type;
+};
+
+struct audio_cal_type_lsm {
+	struct audio_cal_type_header	cal_hdr;
+	struct audio_cal_data		cal_data;
+	struct audio_cal_info_lsm	cal_info;
+};
+
+struct audio_cal_lsm {
+	struct audio_cal_header		hdr;
+	struct audio_cal_type_lsm	cal_type;
+};
+
+struct audio_cal_type_voc_top {
+	struct audio_cal_type_header	cal_hdr;
+	struct audio_cal_data		cal_data;
+	struct audio_cal_info_voc_top	cal_info;
+};
+
+struct audio_cal_voc_top {
+	struct audio_cal_header		hdr;
+	struct audio_cal_type_voc_top	cal_type;
+};
+
+struct audio_cal_type_vocproc {
+	struct audio_cal_type_header	cal_hdr;
+	struct audio_cal_data		cal_data;
+	struct audio_cal_info_vocproc	cal_info;
+};
+
+struct audio_cal_vocproc {
+	struct audio_cal_header		hdr;
+	struct audio_cal_type_vocproc	cal_type;
+};
+
+struct audio_cal_type_vocvol {
+	struct audio_cal_type_header	cal_hdr;
+	struct audio_cal_data		cal_data;
+	struct audio_cal_info_vocvol	cal_info;
+};
+
+struct audio_cal_vocvol {
+	struct audio_cal_header		hdr;
+	struct audio_cal_type_vocvol	cal_type;
+};
+
+struct audio_cal_type_vocdev_cfg {
+	struct audio_cal_type_header		cal_hdr;
+	struct audio_cal_data			cal_data;
+	struct audio_cal_info_vocdev_cfg	cal_info;
+};
+
+struct audio_cal_vocdev_cfg {
+	struct audio_cal_header			hdr;
+	struct audio_cal_type_vocdev_cfg	cal_type;
+};
+
+struct audio_cal_type_voc_col {
+	struct audio_cal_type_header	cal_hdr;
+	struct audio_cal_data		cal_data;
+	struct audio_cal_info_voc_col	cal_info;
+};
+
+struct audio_cal_voc_col {
+	struct audio_cal_header		hdr;
+	struct audio_cal_type_voc_col	cal_type;
+};
+
+/* AUDIO_GET_CALIBRATION */
+struct audio_cal_type_fb_spk_prot_status {
+	struct audio_cal_type_header			cal_hdr;
+	struct audio_cal_data				cal_data;
+	struct audio_cal_info_msm_spk_prot_status	cal_info;
+};
+
+struct audio_cal_fb_spk_prot_status {
+	struct audio_cal_header				hdr;
+	struct audio_cal_type_fb_spk_prot_status	cal_type;
+};
+
+struct audio_cal_type_sp_th_vi_param {
+	struct audio_cal_type_header			cal_hdr;
+	struct audio_cal_data				cal_data;
+	struct audio_cal_info_sp_th_vi_param		cal_info;
+};
+
+struct audio_cal_sp_th_vi_param {
+	struct audio_cal_header				hdr;
+	struct audio_cal_type_sp_th_vi_param		cal_type;
+};
+struct audio_cal_type_sp_ex_vi_param {
+	struct audio_cal_type_header			cal_hdr;
+	struct audio_cal_data				cal_data;
+	struct audio_cal_info_sp_ex_vi_param		cal_info;
+};
+
+struct audio_cal_sp_ex_vi_param {
+	struct audio_cal_header				hdr;
+	struct audio_cal_type_sp_ex_vi_param		cal_type;
+};
+#endif /* _UAPI_MSM_AUDIO_CALIBRATION_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_audio_g711_dec.h	2019-01-22 16:16:28.563292228 +0100
@@ -0,0 +1,16 @@
+#ifndef _UAPI_MSM_AUDIO_G711_H
+#define _UAPI_MSM_AUDIO_G711_H
+
+#include <linux/msm_audio.h>
+
+struct msm_audio_g711_dec_config {
+	uint32_t sample_rate;
+};
+
+#define AUDIO_SET_G711_DEC_CONFIG  _IOW(AUDIO_IOCTL_MAGIC, \
+	(AUDIO_MAX_COMMON_IOCTL_NUM+0), struct msm_audio_g711_dec_config)
+
+#define AUDIO_GET_G711_DEC_CONFIG  _IOR(AUDIO_IOCTL_MAGIC, \
+	(AUDIO_MAX_COMMON_IOCTL_NUM+1), struct msm_audio_g711_dec_config)
+
+#endif /* _UAPI_MSM_AUDIO_G711_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_audio_g711.h	2019-01-22 16:16:28.563292228 +0100
@@ -0,0 +1,17 @@
+#ifndef _UAPI_MSM_AUDIO_G711_H
+#define _UAPI_MSM_AUDIO_G711_H
+
+#include <linux/msm_audio.h>
+
+struct msm_audio_g711_enc_config {
+	uint32_t sample_rate;
+};
+
+#define AUDIO_SET_G711_ENC_CONFIG  _IOW(AUDIO_IOCTL_MAGIC, \
+	(AUDIO_MAX_COMMON_IOCTL_NUM+0), struct msm_audio_g711_enc_config)
+
+#define AUDIO_GET_G711_ENC_CONFIG  _IOR(AUDIO_IOCTL_MAGIC, \
+	(AUDIO_MAX_COMMON_IOCTL_NUM+1), struct msm_audio_g711_enc_config)
+
+
+#endif /* _UAPI_MSM_AUDIO_G711_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_audio.h	2019-01-22 16:16:28.563292228 +0100
@@ -0,0 +1,474 @@
+/* include/linux/msm_audio.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (c) 2012, 2014 The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_MSM_AUDIO_H
+#define _UAPI_LINUX_MSM_AUDIO_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/* PCM Audio */
+
+#define AUDIO_IOCTL_MAGIC 'a'
+
+#define AUDIO_START        _IOW(AUDIO_IOCTL_MAGIC, 0, unsigned)
+#define AUDIO_STOP         _IOW(AUDIO_IOCTL_MAGIC, 1, unsigned)
+#define AUDIO_FLUSH        _IOW(AUDIO_IOCTL_MAGIC, 2, unsigned)
+#define AUDIO_GET_CONFIG   _IOR(AUDIO_IOCTL_MAGIC, 3, \
+		struct msm_audio_config)
+#define AUDIO_SET_CONFIG   _IOW(AUDIO_IOCTL_MAGIC, 4, \
+		struct msm_audio_config)
+#define AUDIO_GET_STATS    _IOR(AUDIO_IOCTL_MAGIC, 5, \
+		struct msm_audio_stats)
+#define AUDIO_ENABLE_AUDPP _IOW(AUDIO_IOCTL_MAGIC, 6, unsigned)
+#define AUDIO_SET_ADRC     _IOW(AUDIO_IOCTL_MAGIC, 7, unsigned)
+#define AUDIO_SET_EQ       _IOW(AUDIO_IOCTL_MAGIC, 8, unsigned)
+#define AUDIO_SET_RX_IIR   _IOW(AUDIO_IOCTL_MAGIC, 9, unsigned)
+#define AUDIO_SET_VOLUME   _IOW(AUDIO_IOCTL_MAGIC, 10, unsigned)
+#define AUDIO_PAUSE        _IOW(AUDIO_IOCTL_MAGIC, 11, unsigned)
+#define AUDIO_PLAY_DTMF    _IOW(AUDIO_IOCTL_MAGIC, 12, unsigned)
+#define AUDIO_GET_EVENT    _IOR(AUDIO_IOCTL_MAGIC, 13, \
+		struct msm_audio_event)
+#define AUDIO_ABORT_GET_EVENT _IOW(AUDIO_IOCTL_MAGIC, 14, unsigned)
+#define AUDIO_REGISTER_PMEM _IOW(AUDIO_IOCTL_MAGIC, 15, unsigned)
+#define AUDIO_DEREGISTER_PMEM _IOW(AUDIO_IOCTL_MAGIC, 16, unsigned)
+#define AUDIO_ASYNC_WRITE _IOW(AUDIO_IOCTL_MAGIC, 17, \
+		struct msm_audio_aio_buf)
+#define AUDIO_ASYNC_READ _IOW(AUDIO_IOCTL_MAGIC, 18, \
+		struct msm_audio_aio_buf)
+#define AUDIO_SET_INCALL _IOW(AUDIO_IOCTL_MAGIC, 19, struct msm_voicerec_mode)
+#define AUDIO_GET_NUM_SND_DEVICE _IOR(AUDIO_IOCTL_MAGIC, 20, unsigned)
+#define AUDIO_GET_SND_DEVICES _IOWR(AUDIO_IOCTL_MAGIC, 21, \
+				struct msm_snd_device_list)
+#define AUDIO_ENABLE_SND_DEVICE _IOW(AUDIO_IOCTL_MAGIC, 22, unsigned)
+#define AUDIO_DISABLE_SND_DEVICE _IOW(AUDIO_IOCTL_MAGIC, 23, unsigned)
+#define AUDIO_ROUTE_STREAM _IOW(AUDIO_IOCTL_MAGIC, 24, \
+				struct msm_audio_route_config)
+#define AUDIO_GET_PCM_CONFIG _IOR(AUDIO_IOCTL_MAGIC, 30, unsigned)
+#define AUDIO_SET_PCM_CONFIG _IOW(AUDIO_IOCTL_MAGIC, 31, unsigned)
+#define AUDIO_SWITCH_DEVICE  _IOW(AUDIO_IOCTL_MAGIC, 32, unsigned)
+#define AUDIO_SET_MUTE       _IOW(AUDIO_IOCTL_MAGIC, 33, unsigned)
+#define AUDIO_UPDATE_ACDB    _IOW(AUDIO_IOCTL_MAGIC, 34, unsigned)
+#define AUDIO_START_VOICE    _IOW(AUDIO_IOCTL_MAGIC, 35, unsigned)
+#define AUDIO_STOP_VOICE     _IOW(AUDIO_IOCTL_MAGIC, 36, unsigned)
+#define AUDIO_REINIT_ACDB    _IOW(AUDIO_IOCTL_MAGIC, 39, unsigned)
+#define AUDIO_OUTPORT_FLUSH  _IOW(AUDIO_IOCTL_MAGIC, 40, unsigned short)
+#define AUDIO_SET_ERR_THRESHOLD_VALUE _IOW(AUDIO_IOCTL_MAGIC, 41, \
+					unsigned short)
+#define AUDIO_GET_BITSTREAM_ERROR_INFO _IOR(AUDIO_IOCTL_MAGIC, 42, \
+			       struct msm_audio_bitstream_error_info)
+
+#define AUDIO_SET_SRS_TRUMEDIA_PARAM _IOW(AUDIO_IOCTL_MAGIC, 43, unsigned)
+
+/* Qualcomm extensions */
+#define AUDIO_SET_STREAM_CONFIG   _IOW(AUDIO_IOCTL_MAGIC, 80, \
+				struct msm_audio_stream_config)
+#define AUDIO_GET_STREAM_CONFIG   _IOR(AUDIO_IOCTL_MAGIC, 81, \
+				struct msm_audio_stream_config)
+#define AUDIO_GET_SESSION_ID _IOR(AUDIO_IOCTL_MAGIC, 82, unsigned short)
+#define AUDIO_GET_STREAM_INFO   _IOR(AUDIO_IOCTL_MAGIC, 83, \
+			       struct msm_audio_bitstream_info)
+#define AUDIO_SET_PAN       _IOW(AUDIO_IOCTL_MAGIC, 84, unsigned)
+#define AUDIO_SET_QCONCERT_PLUS       _IOW(AUDIO_IOCTL_MAGIC, 85, unsigned)
+#define AUDIO_SET_MBADRC       _IOW(AUDIO_IOCTL_MAGIC, 86, unsigned)
+#define AUDIO_SET_VOLUME_PATH   _IOW(AUDIO_IOCTL_MAGIC, 87, \
+				     struct msm_vol_info)
+#define AUDIO_SET_MAX_VOL_ALL _IOW(AUDIO_IOCTL_MAGIC, 88, unsigned)
+#define AUDIO_ENABLE_AUDPRE  _IOW(AUDIO_IOCTL_MAGIC, 89, unsigned)
+#define AUDIO_SET_AGC        _IOW(AUDIO_IOCTL_MAGIC, 90, unsigned)
+#define AUDIO_SET_NS         _IOW(AUDIO_IOCTL_MAGIC, 91, unsigned)
+#define AUDIO_SET_TX_IIR     _IOW(AUDIO_IOCTL_MAGIC, 92, unsigned)
+#define AUDIO_GET_BUF_CFG    _IOW(AUDIO_IOCTL_MAGIC, 93, \
+					struct msm_audio_buf_cfg)
+#define AUDIO_SET_BUF_CFG    _IOW(AUDIO_IOCTL_MAGIC, 94, \
+					struct msm_audio_buf_cfg)
+#define AUDIO_SET_ACDB_BLK _IOW(AUDIO_IOCTL_MAGIC, 95,  \
+					struct msm_acdb_cmd_device)
+#define AUDIO_GET_ACDB_BLK _IOW(AUDIO_IOCTL_MAGIC, 96,  \
+					struct msm_acdb_cmd_device)
+
+#define AUDIO_REGISTER_ION _IOW(AUDIO_IOCTL_MAGIC, 97, \
+		struct msm_audio_ion_info)
+#define AUDIO_DEREGISTER_ION _IOW(AUDIO_IOCTL_MAGIC, 98, \
+		struct msm_audio_ion_info)
+#define AUDIO_SET_EFFECTS_CONFIG   _IOW(AUDIO_IOCTL_MAGIC, 99, \
+				struct msm_hwacc_effects_config)
+#define AUDIO_EFFECTS_SET_BUF_LEN _IOW(AUDIO_IOCTL_MAGIC, 100, \
+				struct msm_hwacc_buf_cfg)
+#define AUDIO_EFFECTS_GET_BUF_AVAIL _IOW(AUDIO_IOCTL_MAGIC, 101, \
+				struct msm_hwacc_buf_avail)
+#define AUDIO_EFFECTS_WRITE _IOW(AUDIO_IOCTL_MAGIC, 102, void *)
+#define AUDIO_EFFECTS_READ _IOWR(AUDIO_IOCTL_MAGIC, 103, void *)
+#define AUDIO_EFFECTS_SET_PP_PARAMS _IOW(AUDIO_IOCTL_MAGIC, 104, void *)
+
+#define AUDIO_PM_AWAKE      _IOW(AUDIO_IOCTL_MAGIC, 105, unsigned)
+#define AUDIO_PM_RELAX      _IOW(AUDIO_IOCTL_MAGIC, 106, unsigned)
+
+#define	AUDIO_MAX_COMMON_IOCTL_NUM	107
+
+
+#define HANDSET_MIC			0x01
+#define HANDSET_SPKR			0x02
+#define HEADSET_MIC			0x03
+#define HEADSET_SPKR_MONO		0x04
+#define HEADSET_SPKR_STEREO		0x05
+#define SPKR_PHONE_MIC			0x06
+#define SPKR_PHONE_MONO			0x07
+#define SPKR_PHONE_STEREO		0x08
+#define BT_SCO_MIC			0x09
+#define BT_SCO_SPKR			0x0A
+#define BT_A2DP_SPKR			0x0B
+#define TTY_HEADSET_MIC			0x0C
+#define TTY_HEADSET_SPKR		0x0D
+
+/* Default devices are not supported in a */
+/* device switching context. Only supported */
+/* for stream devices. */
+/* DO NOT USE */
+#define DEFAULT_TX			0x0E
+#define DEFAULT_RX			0x0F
+
+#define BT_A2DP_TX			0x10
+
+#define HEADSET_MONO_PLUS_SPKR_MONO_RX         0x11
+#define HEADSET_MONO_PLUS_SPKR_STEREO_RX       0x12
+#define HEADSET_STEREO_PLUS_SPKR_MONO_RX       0x13
+#define HEADSET_STEREO_PLUS_SPKR_STEREO_RX     0x14
+
+#define I2S_RX				0x20
+#define I2S_TX				0x21
+
+#define ADRC_ENABLE		0x0001
+#define EQUALIZER_ENABLE	0x0002
+#define IIR_ENABLE		0x0004
+#define QCONCERT_PLUS_ENABLE	0x0008
+#define MBADRC_ENABLE		0x0010
+#define SRS_ENABLE		0x0020
+#define SRS_DISABLE	0x0040
+
+#define AGC_ENABLE		0x0001
+#define NS_ENABLE		0x0002
+#define TX_IIR_ENABLE		0x0004
+#define FLUENCE_ENABLE		0x0008
+
+#define VOC_REC_UPLINK		0x00
+#define VOC_REC_DOWNLINK	0x01
+#define VOC_REC_BOTH		0x02
+
+struct msm_audio_config {
+	uint32_t buffer_size;
+	uint32_t buffer_count;
+	uint32_t channel_count;
+	uint32_t sample_rate;
+	uint32_t type;
+	uint32_t meta_field;
+	uint32_t bits;
+	uint32_t unused[3];
+};
+
+struct msm_audio_stream_config {
+	uint32_t buffer_size;
+	uint32_t buffer_count;
+};
+
+struct msm_audio_buf_cfg{
+	uint32_t meta_info_enable;
+	uint32_t frames_per_buf;
+};
+
+struct msm_audio_stats {
+	uint32_t byte_count;
+	uint32_t sample_count;
+	uint32_t unused[2];
+};
+
+struct msm_audio_ion_info {
+	int fd;
+	void *vaddr;
+};
+
+struct msm_audio_pmem_info {
+	int fd;
+	void *vaddr;
+};
+
+struct msm_audio_aio_buf {
+	void *buf_addr;
+	uint32_t buf_len;
+	uint32_t data_len;
+	void *private_data;
+	unsigned short mfield_sz; /*only useful for data has meta field */
+};
+
+/* Audio routing */
+
+#define SND_IOCTL_MAGIC 's'
+
+#define SND_MUTE_UNMUTED 0
+#define SND_MUTE_MUTED   1
+
+struct msm_mute_info {
+	uint32_t mute;
+	uint32_t path;
+};
+
+struct msm_vol_info {
+	uint32_t vol;
+	uint32_t path;
+};
+
+struct msm_voicerec_mode {
+	uint32_t rec_mode;
+};
+
+struct msm_snd_device_config {
+	uint32_t device;
+	uint32_t ear_mute;
+	uint32_t mic_mute;
+};
+
+#define SND_SET_DEVICE _IOW(SND_IOCTL_MAGIC, 2, struct msm_device_config *)
+
+enum cad_device_path_type {
+	CAD_DEVICE_PATH_RX,	/*For Decoding session*/
+	CAD_DEVICE_PATH_TX,	/* For Encoding session*/
+	CAD_DEVICE_PATH_RX_TX, /* For Voice call */
+	CAD_DEVICE_PATH_LB,	/* For loopback (FM Analog)*/
+	CAD_DEVICE_PATH_MAX
+};
+
+struct cad_devices_type {
+	uint32_t rx_device;
+	uint32_t tx_device;
+	enum cad_device_path_type pathtype;
+};
+
+struct msm_cad_device_config {
+	struct cad_devices_type device;
+	uint32_t ear_mute;
+	uint32_t mic_mute;
+};
+
+#define CAD_SET_DEVICE _IOW(SND_IOCTL_MAGIC, 2, struct msm_cad_device_config *)
+
+#define SND_METHOD_VOICE 0
+#define SND_METHOD_MIDI 4
+
+struct msm_snd_volume_config {
+	uint32_t device;
+	uint32_t method;
+	uint32_t volume;
+};
+
+#define SND_SET_VOLUME _IOW(SND_IOCTL_MAGIC, 3, struct msm_snd_volume_config *)
+
+struct msm_cad_volume_config {
+	struct cad_devices_type device;
+	uint32_t method;
+	uint32_t volume;
+};
+
+#define CAD_SET_VOLUME _IOW(SND_IOCTL_MAGIC, 3, struct msm_cad_volume_config *)
+
+/* Returns the number of SND endpoints supported. */
+
+#define SND_GET_NUM_ENDPOINTS _IOR(SND_IOCTL_MAGIC, 4, unsigned *)
+
+struct msm_snd_endpoint {
+	int id; /* input and output */
+	char name[64]; /* output only */
+};
+
+/* Takes an index between 0 and one less than the number returned by
+ * SND_GET_NUM_ENDPOINTS, and returns the SND index and name of a
+ * SND endpoint.  On input, the .id field contains the number of the
+ * endpoint, and on exit it contains the SND index, while .name contains
+ * the description of the endpoint.
+ */
+
+#define SND_GET_ENDPOINT _IOWR(SND_IOCTL_MAGIC, 5, struct msm_snd_endpoint *)
+
+
+#define SND_AVC_CTL _IOW(SND_IOCTL_MAGIC, 6, unsigned *)
+#define SND_AGC_CTL _IOW(SND_IOCTL_MAGIC, 7, unsigned *)
+
+/*return the number of CAD endpoints supported. */
+
+#define CAD_GET_NUM_ENDPOINTS _IOR(SND_IOCTL_MAGIC, 4, unsigned *)
+
+struct msm_cad_endpoint {
+	int id; /* input and output */
+	char name[64]; /* output only */
+};
+
+/* Takes an index between 0 and one less than the number returned by
+ * SND_GET_NUM_ENDPOINTS, and returns the CAD index and name of a
+ * CAD endpoint.  On input, the .id field contains the number of the
+ * endpoint, and on exit it contains the SND index, while .name contains
+ * the description of the endpoint.
+ */
+
+#define CAD_GET_ENDPOINT _IOWR(SND_IOCTL_MAGIC, 5, struct msm_cad_endpoint *)
+
+struct msm_audio_pcm_config {
+	uint32_t pcm_feedback;	/* 0 - disable > 0 - enable */
+	uint32_t buffer_count;	/* Number of buffers to allocate */
+	uint32_t buffer_size;	/* Size of buffer for capturing of
+				   PCM samples */
+};
+
+#define AUDIO_EVENT_SUSPEND 0
+#define AUDIO_EVENT_RESUME 1
+#define AUDIO_EVENT_WRITE_DONE 2
+#define AUDIO_EVENT_READ_DONE   3
+#define AUDIO_EVENT_STREAM_INFO 4
+#define AUDIO_EVENT_BITSTREAM_ERROR_INFO 5
+
+#define AUDIO_CODEC_TYPE_MP3 0
+#define AUDIO_CODEC_TYPE_AAC 1
+
+struct msm_audio_bitstream_info {
+	uint32_t codec_type;
+	uint32_t chan_info;
+	uint32_t sample_rate;
+	uint32_t bit_stream_info;
+	uint32_t bit_rate;
+	uint32_t unused[3];
+};
+
+struct msm_audio_bitstream_error_info {
+	uint32_t dec_id;
+	uint32_t err_msg_indicator;
+	uint32_t err_type;
+};
+
+union msm_audio_event_payload {
+	struct msm_audio_aio_buf aio_buf;
+	struct msm_audio_bitstream_info stream_info;
+	struct msm_audio_bitstream_error_info error_info;
+	int reserved;
+};
+
+struct msm_audio_event {
+	int event_type;
+	int timeout_ms;
+	union msm_audio_event_payload event_payload;
+};
+
+#define MSM_SNDDEV_CAP_RX 0x1
+#define MSM_SNDDEV_CAP_TX 0x2
+#define MSM_SNDDEV_CAP_VOICE 0x4
+
+struct msm_snd_device_info {
+	uint32_t dev_id;
+	uint32_t dev_cap; /* bitmask describe capability of device */
+	char dev_name[64];
+};
+
+struct msm_snd_device_list {
+	uint32_t  num_dev; /* Indicate number of device info to be retrieved */
+	struct msm_snd_device_info *list;
+};
+
+struct msm_dtmf_config {
+	uint16_t path;
+	uint16_t dtmf_hi;
+	uint16_t dtmf_low;
+	uint16_t duration;
+	uint16_t tx_gain;
+	uint16_t rx_gain;
+	uint16_t mixing;
+};
+
+#define AUDIO_ROUTE_STREAM_VOICE_RX 0
+#define AUDIO_ROUTE_STREAM_VOICE_TX 1
+#define AUDIO_ROUTE_STREAM_PLAYBACK 2
+#define AUDIO_ROUTE_STREAM_REC      3
+
+struct msm_audio_route_config {
+	uint32_t stream_type;
+	uint32_t stream_id;
+	uint32_t dev_id;
+};
+
+#define AUDIO_MAX_EQ_BANDS 12
+
+struct msm_audio_eq_band {
+	uint16_t     band_idx; /* The band index, 0 .. 11 */
+	uint32_t     filter_type; /* Filter band type */
+	uint32_t     center_freq_hz; /* Filter band center frequency */
+	uint32_t     filter_gain; /* Filter band initial gain (dB) */
+			/* Range is +12 dB to -12 dB with 1dB increments. */
+	uint32_t     q_factor;
+} __attribute__ ((packed));
+
+struct msm_audio_eq_stream_config {
+	uint32_t	enable; /* Number of consequtive bands specified */
+	uint32_t	num_bands;
+	struct msm_audio_eq_band	eq_bands[AUDIO_MAX_EQ_BANDS];
+} __attribute__ ((packed));
+
+struct msm_acdb_cmd_device {
+	uint32_t     command_id;
+	uint32_t     device_id;
+	uint32_t     network_id;
+	uint32_t     sample_rate_id;      /* Actual sample rate value */
+	uint32_t     interface_id;        /* See interface id's above */
+	uint32_t     algorithm_block_id;  /* See enumerations above */
+	uint32_t     total_bytes;         /* Length in bytes used by buffer */
+	uint32_t     *phys_buf;           /* Physical Address of data */
+};
+
+struct msm_hwacc_data_config {
+	__u32 buf_size;
+	__u32 num_buf;
+	__u32 num_channels;
+	__u8 channel_map[8];
+	__u32 sample_rate;
+	__u32 bits_per_sample;
+};
+
+struct msm_hwacc_buf_cfg {
+	__u32 input_len;
+	__u32 output_len;
+};
+
+struct msm_hwacc_buf_avail {
+	__u32 input_num_avail;
+	__u32 output_num_avail;
+};
+
+struct msm_hwacc_effects_config {
+	struct msm_hwacc_data_config input;
+	struct msm_hwacc_data_config output;
+	struct msm_hwacc_buf_cfg buf_cfg;
+	__u32 meta_mode_enabled;
+	__u32 overwrite_topology;
+	__s32 topology;
+};
+
+#define ADSP_STREAM_PP_EVENT				0
+#define ADSP_STREAM_ENCDEC_EVENT			1
+#define ADSP_STREAM_IEC_61937_FMT_UPDATE_EVENT		2
+#define ADSP_STREAM_EVENT_MAX				3
+
+struct msm_adsp_event_data {
+	__u32 event_type;
+	__u32 payload_len;
+	__u8 payload[0];
+};
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_audio_mvs.h	2019-01-22 16:16:28.563292228 +0100
@@ -0,0 +1,154 @@
+#ifndef _UAPI_MSM_AUDIO_MVS_H
+#define _UAPI_MSM_AUDIO_MVS_H
+
+#include <linux/msm_audio.h>
+
+#define AUDIO_GET_MVS_CONFIG _IOW(AUDIO_IOCTL_MAGIC, \
+	(AUDIO_MAX_COMMON_IOCTL_NUM + 0), unsigned)
+#define AUDIO_SET_MVS_CONFIG _IOR(AUDIO_IOCTL_MAGIC, \
+	(AUDIO_MAX_COMMON_IOCTL_NUM + 1), unsigned)
+
+/* MVS modes */
+#define MVS_MODE_IS733 0x1 /*QCELP 13K*/
+#define MVS_MODE_IS127 0x2 /*EVRC-8k*/
+#define MVS_MODE_4GV_NB 0x3 /*EVRC-B*/
+#define MVS_MODE_4GV_WB 0x4 /*EVRC-WB*/
+#define MVS_MODE_AMR 0x5
+#define MVS_MODE_EFR 0x6
+#define MVS_MODE_FR 0x7
+#define MVS_MODE_HR 0x8
+#define MVS_MODE_LINEAR_PCM 0x9
+#define MVS_MODE_G711 0xA
+#define MVS_MODE_PCM 0xC
+#define MVS_MODE_AMR_WB 0xD
+#define MVS_MODE_G729A 0xE
+#define MVS_MODE_G711A 0xF
+#define MVS_MODE_G722 0x10
+#define MVS_MODE_PCM_WB 0x12
+
+enum msm_audio_amr_mode {
+	MVS_AMR_MODE_0475, /* AMR 4.75 kbps */
+	MVS_AMR_MODE_0515, /* AMR 5.15 kbps */
+	MVS_AMR_MODE_0590, /* AMR 5.90 kbps */
+	MVS_AMR_MODE_0670, /* AMR 6.70 kbps */
+	MVS_AMR_MODE_0740, /* AMR 7.40 kbps */
+	MVS_AMR_MODE_0795, /* AMR 7.95 kbps */
+	MVS_AMR_MODE_1020, /* AMR 10.20 kbps */
+	MVS_AMR_MODE_1220, /* AMR 12.20 kbps */
+	MVS_AMR_MODE_0660, /* AMR-WB 6.60 kbps */
+	MVS_AMR_MODE_0885, /* AMR-WB 8.85 kbps */
+	MVS_AMR_MODE_1265, /* AMR-WB 12.65 kbps */
+	MVS_AMR_MODE_1425, /* AMR-WB 14.25 kbps */
+	MVS_AMR_MODE_1585, /* AMR-WB 15.85 kbps */
+	MVS_AMR_MODE_1825, /* AMR-WB 18.25 kbps */
+	MVS_AMR_MODE_1985, /* AMR-WB 19.85 kbps */
+	MVS_AMR_MODE_2305, /* AMR-WB 23.05 kbps */
+	MVS_AMR_MODE_2385, /* AMR-WB 23.85 kbps */
+	MVS_AMR_MODE_UNDEF
+};
+
+/*The MVS VOC rate type is used to identify the rate of QCELP 13K(IS733),
+EVRC(IS127), 4GV, or 4GV-WB frame.*/
+enum msm_audio_voc_rate {
+		MVS_VOC_0_RATE, /* Blank frame */
+		MVS_VOC_8_RATE, /* 1/8 rate    */
+		MVS_VOC_4_RATE, /* 1/4 rate    */
+		MVS_VOC_2_RATE, /* 1/2 rate    */
+		MVS_VOC_1_RATE,/* Full rate   */
+		MVS_VOC_ERASURE, /* erasure frame */
+		MVS_VOC_RATE_MAX,
+		MVS_VOC_RATE_UNDEF = MVS_VOC_RATE_MAX
+};
+
+enum msm_audio_amr_frame_type {
+	MVS_AMR_SPEECH_GOOD,	      /* Good speech frame              */
+	MVS_AMR_SPEECH_DEGRADED,      /* Speech degraded                */
+	MVS_AMR_ONSET,		      /* Onset                          */
+	MVS_AMR_SPEECH_BAD,	      /* Corrupt speech frame (bad CRC) */
+	MVS_AMR_SID_FIRST,	      /* First silence descriptor       */
+	MVS_AMR_SID_UPDATE,	      /* Comfort noise frame            */
+	MVS_AMR_SID_BAD,	      /* Corrupt SID frame (bad CRC)    */
+	MVS_AMR_NO_DATA,	      /* Nothing to transmit            */
+	MVS_AMR_SPEECH_LOST	      /* Downlink speech lost           */
+};
+
+enum msm_audio_g711a_mode {
+	MVS_G711A_MODE_MULAW,
+	MVS_G711A_MODE_ALAW
+};
+
+enum msm_audio_g711_mode {
+	MVS_G711_MODE_MULAW,
+	MVS_G711_MODE_ALAW
+};
+
+enum mvs_g722_mode_type {
+	MVS_G722_MODE_01,
+	MVS_G722_MODE_02,
+	MVS_G722_MODE_03,
+	MVS_G722_MODE_MAX,
+	MVS_G722_MODE_UNDEF
+};
+
+enum msm_audio_g711a_frame_type {
+	MVS_G711A_SPEECH_GOOD,
+	MVS_G711A_SID,
+	MVS_G711A_NO_DATA,
+	MVS_G711A_ERASURE
+};
+
+enum msm_audio_g729a_frame_type {
+	MVS_G729A_NO_DATA,
+	MVS_G729A_SPEECH_GOOD,
+	MVS_G729A_SID,
+	MVS_G729A_ERASURE
+};
+
+struct min_max_rate {
+	uint32_t min_rate;
+	uint32_t max_rate;
+};
+
+struct msm_audio_mvs_config {
+	uint32_t mvs_mode;
+	uint32_t rate_type;
+	struct min_max_rate min_max_rate;
+	uint32_t dtx_mode;
+};
+
+#define MVS_MAX_VOC_PKT_SIZE 640
+
+struct gsm_header {
+	uint8_t bfi;
+	uint8_t sid;
+	uint8_t taf;
+	uint8_t ufi;
+};
+
+struct q6_msm_audio_mvs_frame {
+	union {
+	uint32_t frame_type;
+	uint32_t packet_rate;
+	struct gsm_header gsm_frame_type;
+	} header;
+	uint32_t len;
+	uint8_t voc_pkt[MVS_MAX_VOC_PKT_SIZE];
+
+};
+
+struct msm_audio_mvs_frame {
+	uint32_t frame_type;
+	uint32_t len;
+	uint8_t voc_pkt[MVS_MAX_VOC_PKT_SIZE];
+
+};
+
+#define Q5V2_MVS_MAX_VOC_PKT_SIZE 320
+
+struct q5v2_msm_audio_mvs_frame {
+	uint32_t frame_type;
+	uint32_t len;
+	uint8_t voc_pkt[Q5V2_MVS_MAX_VOC_PKT_SIZE];
+
+};
+#endif /* _UAPI_MSM_AUDIO_MVS_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_audio_qcp.h	2019-01-22 16:16:28.563292228 +0100
@@ -0,0 +1,37 @@
+#ifndef _UAPI_MSM_AUDIO_QCP_H
+#define _UAPI_MSM_AUDIO_QCP_H
+
+#include <linux/msm_audio.h>
+
+#define AUDIO_SET_QCELP_ENC_CONFIG  _IOW(AUDIO_IOCTL_MAGIC, \
+	0, struct msm_audio_qcelp_enc_config)
+
+#define AUDIO_GET_QCELP_ENC_CONFIG  _IOR(AUDIO_IOCTL_MAGIC, \
+	1, struct msm_audio_qcelp_enc_config)
+
+#define AUDIO_SET_EVRC_ENC_CONFIG  _IOW(AUDIO_IOCTL_MAGIC, \
+	2, struct msm_audio_evrc_enc_config)
+
+#define AUDIO_GET_EVRC_ENC_CONFIG  _IOR(AUDIO_IOCTL_MAGIC, \
+	3, struct msm_audio_evrc_enc_config)
+
+#define CDMA_RATE_BLANK		0x00
+#define CDMA_RATE_EIGHTH	0x01
+#define CDMA_RATE_QUARTER	0x02
+#define CDMA_RATE_HALF		0x03
+#define CDMA_RATE_FULL		0x04
+#define CDMA_RATE_ERASURE	0x05
+
+struct msm_audio_qcelp_enc_config {
+	uint32_t cdma_rate;
+	uint32_t min_bit_rate;
+	uint32_t max_bit_rate;
+};
+
+struct msm_audio_evrc_enc_config {
+	uint32_t cdma_rate;
+	uint32_t min_bit_rate;
+	uint32_t max_bit_rate;
+};
+
+#endif /* _UAPI_MSM_AUDIO_QCP_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_audio_sbc.h	2019-01-22 16:16:28.563292228 +0100
@@ -0,0 +1,36 @@
+#ifndef _UAPI_MSM_AUDIO_SBC_H
+#define _UAPI_MSM_AUDIO_SBC_H
+
+#include <linux/msm_audio.h>
+
+#define AUDIO_SET_SBC_ENC_CONFIG  _IOW(AUDIO_IOCTL_MAGIC, \
+  (AUDIO_MAX_COMMON_IOCTL_NUM+0), struct msm_audio_sbc_enc_config)
+
+#define AUDIO_GET_SBC_ENC_CONFIG  _IOR(AUDIO_IOCTL_MAGIC, \
+  (AUDIO_MAX_COMMON_IOCTL_NUM+1), struct msm_audio_sbc_enc_config)
+
+#define AUDIO_SBC_BA_LOUDNESS		0x0
+#define AUDIO_SBC_BA_SNR		0x1
+
+#define AUDIO_SBC_MODE_MONO		0x0
+#define AUDIO_SBC_MODE_DUAL		0x1
+#define AUDIO_SBC_MODE_STEREO		0x2
+#define AUDIO_SBC_MODE_JSTEREO		0x3
+
+#define AUDIO_SBC_BANDS_8		0x1
+
+#define AUDIO_SBC_BLOCKS_4		0x0
+#define AUDIO_SBC_BLOCKS_8		0x1
+#define AUDIO_SBC_BLOCKS_12		0x2
+#define AUDIO_SBC_BLOCKS_16		0x3
+
+struct msm_audio_sbc_enc_config {
+	uint32_t channels;
+	uint32_t sample_rate;
+	uint32_t bit_allocation;
+	uint32_t number_of_subbands;
+	uint32_t number_of_blocks;
+	uint32_t bit_rate;
+	uint32_t mode;
+};
+#endif /* _UAPI_MSM_AUDIO_SBC_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_audio_voicememo.h	2019-01-22 16:16:28.563292228 +0100
@@ -0,0 +1,66 @@
+#ifndef _UAPI_MSM_AUDIO_VOICEMEMO_H
+#define _UAPI_MSM_AUDIO_VOICEMEMO_H
+
+#include <linux/msm_audio.h>
+
+#define AUDIO_GET_VOICEMEMO_CONFIG  _IOW(AUDIO_IOCTL_MAGIC, \
+	(AUDIO_MAX_COMMON_IOCTL_NUM+0), unsigned)
+#define AUDIO_SET_VOICEMEMO_CONFIG  _IOR(AUDIO_IOCTL_MAGIC, \
+	(AUDIO_MAX_COMMON_IOCTL_NUM+1), unsigned)
+
+/* rec_type */
+enum rpc_voc_rec_dir_type {
+	RPC_VOC_REC_NONE,
+	RPC_VOC_REC_FORWARD,
+	RPC_VOC_REC_REVERSE,
+	RPC_VOC_REC_BOTH,
+	RPC_VOC_MAX_REC_TYPE
+};
+
+/* capability */
+enum rpc_voc_capability_type {
+	RPC_VOC_CAP_IS733 = 4,
+	RPC_VOC_CAP_IS127 = 8,
+	RPC_VOC_CAP_AMR = 64,
+	RPC_VOC_CAP_32BIT_DUMMY = 2147483647
+};
+
+/* Rate */
+enum rpc_voc_rate_type {
+	RPC_VOC_0_RATE = 0,
+	RPC_VOC_8_RATE,
+	RPC_VOC_4_RATE,
+	RPC_VOC_2_RATE,
+	RPC_VOC_1_RATE,
+	RPC_VOC_ERASURE,
+	RPC_VOC_ERR_RATE,
+	RPC_VOC_AMR_RATE_475 = 0,
+	RPC_VOC_AMR_RATE_515 = 1,
+	RPC_VOC_AMR_RATE_590 = 2,
+	RPC_VOC_AMR_RATE_670 = 3,
+	RPC_VOC_AMR_RATE_740 = 4,
+	RPC_VOC_AMR_RATE_795 = 5,
+	RPC_VOC_AMR_RATE_1020 = 6,
+	RPC_VOC_AMR_RATE_1220 = 7,
+};
+
+/* frame_format */
+enum rpc_voc_pb_len_rate_var_type {
+	RPC_VOC_PB_NATIVE_QCP = 3,
+	RPC_VOC_PB_AMR,
+	RPC_VOC_PB_EVB
+};
+
+struct msm_audio_voicememo_config {
+	uint32_t rec_type;
+	uint32_t rec_interval_ms;
+	uint32_t auto_stop_ms;
+	uint32_t capability;
+	uint32_t max_rate;
+	uint32_t min_rate;
+	uint32_t frame_format;
+	uint32_t dtx_enable;
+	uint32_t data_req_ms;
+};
+
+#endif /* _UAPI_MSM_AUDIO_VOICEMEMO_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_audio_wma.h	2019-01-22 16:16:28.563292228 +0100
@@ -0,0 +1,33 @@
+#ifndef _UAPI_MSM_AUDIO_WMA_H
+#define _UAPI_MSM_AUDIO_WMA_H
+
+#define AUDIO_GET_WMA_CONFIG  _IOR(AUDIO_IOCTL_MAGIC, \
+	  (AUDIO_MAX_COMMON_IOCTL_NUM+0), unsigned)
+#define AUDIO_SET_WMA_CONFIG  _IOW(AUDIO_IOCTL_MAGIC, \
+	  (AUDIO_MAX_COMMON_IOCTL_NUM+1), unsigned)
+
+#define AUDIO_GET_WMA_CONFIG_V2  _IOR(AUDIO_IOCTL_MAGIC, \
+	  (AUDIO_MAX_COMMON_IOCTL_NUM+2), struct msm_audio_wma_config_v2)
+#define AUDIO_SET_WMA_CONFIG_V2  _IOW(AUDIO_IOCTL_MAGIC, \
+	  (AUDIO_MAX_COMMON_IOCTL_NUM+3), struct msm_audio_wma_config_v2)
+
+struct msm_audio_wma_config {
+	unsigned short 	armdatareqthr;
+	unsigned short 	channelsdecoded;
+	unsigned short 	wmabytespersec;
+	unsigned short	wmasamplingfreq;
+	unsigned short	wmaencoderopts;
+};
+
+struct msm_audio_wma_config_v2 {
+	unsigned short	format_tag;
+	unsigned short	numchannels;
+	uint32_t	samplingrate;
+	uint32_t	avgbytespersecond;
+	unsigned short	block_align;
+	unsigned short  validbitspersample;
+	uint32_t	channelmask;
+	unsigned short	encodeopt;
+};
+
+#endif /* _UAPI_MSM_AUDIO_WMA_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_audio_wmapro.h	2019-01-22 16:16:28.563292228 +0100
@@ -0,0 +1,22 @@
+#ifndef _UAPI_MSM_AUDIO_WMAPRO_H
+#define _UAPI_MSM_AUDIO_WMAPRO_H
+
+#define AUDIO_GET_WMAPRO_CONFIG  _IOR(AUDIO_IOCTL_MAGIC, \
+	  (AUDIO_MAX_COMMON_IOCTL_NUM+0), struct msm_audio_wmapro_config)
+#define AUDIO_SET_WMAPRO_CONFIG  _IOW(AUDIO_IOCTL_MAGIC, \
+	  (AUDIO_MAX_COMMON_IOCTL_NUM+1), struct msm_audio_wmapro_config)
+
+struct msm_audio_wmapro_config {
+	unsigned short  armdatareqthr;
+	uint8_t         validbitspersample;
+	uint8_t         numchannels;
+	unsigned short  formattag;
+	uint32_t        samplingrate;
+	uint32_t        avgbytespersecond;
+	unsigned short  asfpacketlength;
+	uint32_t        channelmask;
+	unsigned short  encodeopt;
+	unsigned short  advancedencodeopt;
+	uint32_t        advancedencodeopt2;
+};
+#endif /* _UAPI_MSM_AUDIO_WMAPRO_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm-core-interface.h	2019-01-22 16:16:28.563292228 +0100
@@ -0,0 +1,29 @@
+#ifndef __MSM_CORE_LIB_H__
+#define __MSM_CORE_LIB_H__
+
+#include <linux/ioctl.h>
+
+#define TEMP_DATA_POINTS 13
+#define MAX_NUM_FREQ 200
+
+enum msm_core_ioctl_params {
+	MSM_CORE_LEAKAGE,
+	MSM_CORE_VOLTAGE,
+};
+
+#define MSM_CORE_MAGIC 0x9D
+
+struct sched_params {
+	uint32_t cpumask;
+	uint32_t cluster;
+	uint32_t power[TEMP_DATA_POINTS][MAX_NUM_FREQ];
+	uint32_t voltage[MAX_NUM_FREQ];
+	uint32_t freq[MAX_NUM_FREQ];
+};
+
+
+#define EA_LEAKAGE _IOWR(MSM_CORE_MAGIC, MSM_CORE_LEAKAGE,\
+						struct sched_params)
+#define EA_VOLT _IOWR(MSM_CORE_MAGIC, MSM_CORE_VOLTAGE,\
+						struct sched_params)
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_dsps.h	2019-01-22 16:16:28.563292228 +0100
@@ -0,0 +1,16 @@
+#ifndef _UAPI_DSPS_H_
+#define _UAPI_DSPS_H_
+
+#include <linux/ioctl.h>
+
+#define DSPS_IOCTL_MAGIC 'd'
+
+#define DSPS_IOCTL_ON	_IO(DSPS_IOCTL_MAGIC, 1)
+#define DSPS_IOCTL_OFF	_IO(DSPS_IOCTL_MAGIC, 2)
+
+#define DSPS_IOCTL_READ_SLOW_TIMER _IOR(DSPS_IOCTL_MAGIC, 3, unsigned int*)
+#define DSPS_IOCTL_READ_FAST_TIMER _IOR(DSPS_IOCTL_MAGIC, 4, unsigned int*)
+
+#define DSPS_IOCTL_RESET _IO(DSPS_IOCTL_MAGIC, 5)
+
+#endif	/* _UAPI_DSPS_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_ion.h	2019-01-22 16:16:26.719275529 +0100
@@ -0,0 +1,216 @@
+#ifndef _UAPI_MSM_ION_H
+#define _UAPI_MSM_ION_H
+
+#include "ion.h"
+
+#define ION_BIT(nr) (1UL << (nr))
+
+enum msm_ion_heap_types {
+	ION_HEAP_TYPE_MSM_START = ION_HEAP_TYPE_CUSTOM + 1,
+	ION_HEAP_TYPE_SECURE_DMA = ION_HEAP_TYPE_MSM_START,
+	ION_HEAP_TYPE_SYSTEM_SECURE,
+	ION_HEAP_TYPE_HYP_CMA,
+	/*
+	 * if you add a heap type here you should also add it to
+	 * heap_types_info[] in msm_ion.c
+	 */
+};
+
+/**
+ * These are the only ids that should be used for Ion heap ids.
+ * The ids listed are the order in which allocation will be attempted
+ * if specified. Don't swap the order of heap ids unless you know what
+ * you are doing!
+ * Id's are spaced by purpose to allow new Id's to be inserted in-between (for
+ * possible fallbacks)
+ */
+
+enum ion_heap_ids {
+	INVALID_HEAP_ID = -1,
+	ION_CP_MM_HEAP_ID = 8,
+	ION_SECURE_HEAP_ID = 9,
+	ION_SECURE_DISPLAY_HEAP_ID = 10,
+	ION_CP_MFC_HEAP_ID = 12,
+	ION_SPSS_HEAP_ID = 13, /* Secure Processor ION heap */
+	ION_CP_WB_HEAP_ID = 16, /* 8660 only */
+	ION_CAMERA_HEAP_ID = 20, /* 8660 only */
+	ION_SYSTEM_CONTIG_HEAP_ID = 21,
+	ION_ADSP_HEAP_ID = 22,
+	ION_PIL1_HEAP_ID = 23, /* Currently used for other PIL images */
+	ION_SF_HEAP_ID = 24,
+	ION_SYSTEM_HEAP_ID = 25,
+	ION_PIL2_HEAP_ID = 26, /* Currently used for modem firmware images */
+	ION_QSECOM_HEAP_ID = 27,
+	ION_AUDIO_HEAP_ID = 28,
+
+	ION_MM_FIRMWARE_HEAP_ID = 29,
+
+	ION_HEAP_ID_RESERVED = 31 /** Bit reserved for ION_FLAG_SECURE flag */
+};
+
+/*
+ * The IOMMU heap is deprecated! Here are some aliases for backwards
+ * compatibility:
+ */
+#define ION_IOMMU_HEAP_ID ION_SYSTEM_HEAP_ID
+#define ION_HEAP_TYPE_IOMMU ION_HEAP_TYPE_SYSTEM
+
+#define ION_SPSS_HEAP_ID ION_SPSS_HEAP_ID
+
+enum ion_fixed_position {
+	NOT_FIXED,
+	FIXED_LOW,
+	FIXED_MIDDLE,
+	FIXED_HIGH,
+};
+
+enum cp_mem_usage {
+	VIDEO_BITSTREAM = 0x1,
+	VIDEO_PIXEL = 0x2,
+	VIDEO_NONPIXEL = 0x3,
+	DISPLAY_SECURE_CP_USAGE = 0x4,
+	CAMERA_SECURE_CP_USAGE = 0x5,
+	MAX_USAGE = 0x6,
+	UNKNOWN = 0x7FFFFFFF,
+};
+
+/**
+ * Flags to be used when allocating from the secure heap for
+ * content protection
+ */
+#define ION_FLAG_CP_TOUCH		ION_BIT(17)
+#define ION_FLAG_CP_BITSTREAM		ION_BIT(18)
+#define ION_FLAG_CP_PIXEL		ION_BIT(19)
+#define ION_FLAG_CP_NON_PIXEL		ION_BIT(20)
+#define ION_FLAG_CP_CAMERA		ION_BIT(21)
+#define ION_FLAG_CP_HLOS		ION_BIT(22)
+#define ION_FLAG_CP_HLOS_FREE		ION_BIT(23)
+#define ION_FLAG_CP_SPSS_SP_SHARED	ION_BIT(24)
+#define ION_FLAG_CP_SEC_DISPLAY		ION_BIT(25)
+#define ION_FLAG_CP_APP			ION_BIT(26)
+#define ION_FLAG_CP_CAMERA_PREVIEW	ION_BIT(27)
+
+
+/**
+ * Flag to allow non continguous allocation of memory from secure
+ * heap
+ */
+#define ION_FLAG_ALLOW_NON_CONTIG	ION_BIT(24)
+
+/**
+ * Flag to use when allocating to indicate that a heap is secure.
+ */
+#define ION_FLAG_SECURE			ION_BIT(ION_HEAP_ID_RESERVED)
+
+/**
+ * Flag for clients to force contiguous memort allocation
+ *
+ * Use of this flag is carefully monitored!
+ */
+#define ION_FLAG_FORCE_CONTIGUOUS	ION_BIT(30)
+
+/*
+ * Used in conjunction with heap which pool memory to force an allocation
+ * to come from the page allocator directly instead of from the pool allocation
+ */
+#define ION_FLAG_POOL_FORCE_ALLOC	ION_BIT(16)
+
+/**
+* Deprecated! Please use the corresponding ION_FLAG_*
+*/
+#define ION_SECURE ION_FLAG_SECURE
+#define ION_FORCE_CONTIGUOUS ION_FLAG_FORCE_CONTIGUOUS
+
+/**
+ * Macro should be used with ion_heap_ids defined above.
+ */
+#define ION_HEAP(bit)			ION_BIT(bit)
+
+#define ION_ADSP_HEAP_NAME	"adsp"
+#define ION_SYSTEM_HEAP_NAME	"system"
+#define ION_VMALLOC_HEAP_NAME	ION_SYSTEM_HEAP_NAME
+#define ION_KMALLOC_HEAP_NAME	"kmalloc"
+#define ION_AUDIO_HEAP_NAME	"audio"
+#define ION_SF_HEAP_NAME	"sf"
+#define ION_MM_HEAP_NAME	"mm"
+#define ION_CAMERA_HEAP_NAME	"camera_preview"
+#define ION_IOMMU_HEAP_NAME	"iommu"
+#define ION_MFC_HEAP_NAME	"mfc"
+#define ION_SPSS_HEAP_NAME	"spss"
+#define ION_WB_HEAP_NAME	"wb"
+#define ION_MM_FIRMWARE_HEAP_NAME	"mm_fw"
+#define ION_PIL1_HEAP_NAME  "pil_1"
+#define ION_PIL2_HEAP_NAME  "pil_2"
+#define ION_QSECOM_HEAP_NAME	"qsecom"
+#define ION_SECURE_HEAP_NAME	"secure_heap"
+#define ION_SECURE_DISPLAY_HEAP_NAME "secure_display"
+
+#define ION_SET_CACHED(__cache)		(__cache | ION_FLAG_CACHED)
+#define ION_SET_UNCACHED(__cache)	(__cache & ~ION_FLAG_CACHED)
+
+#define ION_IS_CACHED(__flags)	((__flags) & ION_FLAG_CACHED)
+
+/* struct ion_flush_data - data passed to ion for flushing caches
+ *
+ * @handle:	handle with data to flush
+ * @fd:		fd to flush
+ * @vaddr:	userspace virtual address mapped with mmap
+ * @offset:	offset into the handle to flush
+ * @length:	length of handle to flush
+ *
+ * Performs cache operations on the handle. If p is the start address
+ * of the handle, p + offset through p + offset + length will have
+ * the cache operations performed
+ */
+struct ion_flush_data {
+	ion_user_handle_t handle;
+	int fd;
+	void *vaddr;
+	unsigned int offset;
+	unsigned int length;
+};
+
+struct ion_prefetch_regions {
+	unsigned int vmid;
+	size_t __user *sizes;
+	unsigned int nr_sizes;
+};
+
+struct ion_prefetch_data {
+	int heap_id;
+	unsigned long len;
+	struct ion_prefetch_regions __user *regions;
+	unsigned int nr_regions;
+};
+
+#define ION_IOC_MSM_MAGIC 'M'
+
+/**
+ * DOC: ION_IOC_CLEAN_CACHES - clean the caches
+ *
+ * Clean the caches of the handle specified.
+ */
+#define ION_IOC_CLEAN_CACHES	_IOWR(ION_IOC_MSM_MAGIC, 0, \
+						struct ion_flush_data)
+/**
+ * DOC: ION_IOC_INV_CACHES - invalidate the caches
+ *
+ * Invalidate the caches of the handle specified.
+ */
+#define ION_IOC_INV_CACHES	_IOWR(ION_IOC_MSM_MAGIC, 1, \
+						struct ion_flush_data)
+/**
+ * DOC: ION_IOC_CLEAN_INV_CACHES - clean and invalidate the caches
+ *
+ * Clean and invalidate the caches of the handle specified.
+ */
+#define ION_IOC_CLEAN_INV_CACHES	_IOWR(ION_IOC_MSM_MAGIC, 2, \
+						struct ion_flush_data)
+
+#define ION_IOC_PREFETCH		_IOWR(ION_IOC_MSM_MAGIC, 3, \
+						struct ion_prefetch_data)
+
+#define ION_IOC_DRAIN			_IOWR(ION_IOC_MSM_MAGIC, 4, \
+						struct ion_prefetch_data)
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_ipa.h	2019-10-29 09:26:25.545221791 +0100
@@ -0,0 +1,2049 @@
+#ifndef _UAPI_MSM_IPA_H_
+#define _UAPI_MSM_IPA_H_
+
+#ifndef __KERNEL__
+#include <stdint.h>
+#include <stddef.h>
+#include <sys/stat.h>
+#endif
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+/**
+ * unique magic number of the IPA device
+ */
+#define IPA_IOC_MAGIC 0xCF
+
+/**
+ * IPA device full path
+ */
+#define IPA_DEV_NAME "/dev/ipa"
+
+/**
+ * IPA NAT table character device name
+ */
+#define IPA_NAT_DEV_NAME "ipaNatTable"
+
+/**
+ * IPA IPv6CT table character device name
+ */
+#define IPA_IPV6CT_DEV_NAME "ipaIpv6CTTable"
+
+ /**
+ * name of the default routing tables for v4 and v6
+ */
+#define IPA_DFLT_RT_TBL_NAME "ipa_dflt_rt"
+
+/**
+ * commands supported by IPA driver
+ */
+#define IPA_IOCTL_ADD_HDR                       0
+#define IPA_IOCTL_DEL_HDR                       1
+#define IPA_IOCTL_ADD_RT_RULE                   2
+#define IPA_IOCTL_DEL_RT_RULE                   3
+#define IPA_IOCTL_ADD_FLT_RULE                  4
+#define IPA_IOCTL_DEL_FLT_RULE                  5
+#define IPA_IOCTL_COMMIT_HDR                    6
+#define IPA_IOCTL_RESET_HDR                     7
+#define IPA_IOCTL_COMMIT_RT                     8
+#define IPA_IOCTL_RESET_RT                      9
+#define IPA_IOCTL_COMMIT_FLT                    10
+#define IPA_IOCTL_RESET_FLT                     11
+#define IPA_IOCTL_DUMP                          12
+#define IPA_IOCTL_GET_RT_TBL                    13
+#define IPA_IOCTL_PUT_RT_TBL                    14
+#define IPA_IOCTL_COPY_HDR                      15
+#define IPA_IOCTL_QUERY_INTF                    16
+#define IPA_IOCTL_QUERY_INTF_TX_PROPS           17
+#define IPA_IOCTL_QUERY_INTF_RX_PROPS           18
+#define IPA_IOCTL_GET_HDR                       19
+#define IPA_IOCTL_PUT_HDR                       20
+#define IPA_IOCTL_SET_FLT                       21
+#define IPA_IOCTL_ALLOC_NAT_MEM                 22
+#define IPA_IOCTL_V4_INIT_NAT                   23
+#define IPA_IOCTL_TABLE_DMA_CMD                 24
+#define IPA_IOCTL_NAT_DMA                       IPA_IOCTL_TABLE_DMA_CMD
+#define IPA_IOCTL_INIT_IPV6CT_TABLE             25
+#define IPA_IOCTL_V4_DEL_NAT                    26
+#define IPA_IOCTL_PULL_MSG                      27
+#define IPA_IOCTL_GET_NAT_OFFSET                28
+#define IPA_IOCTL_RM_ADD_DEPENDENCY             29
+#define IPA_IOCTL_RM_DEL_DEPENDENCY             30
+#define IPA_IOCTL_GENERATE_FLT_EQ               31
+#define IPA_IOCTL_QUERY_INTF_EXT_PROPS          32
+#define IPA_IOCTL_QUERY_EP_MAPPING              33
+#define IPA_IOCTL_QUERY_RT_TBL_INDEX            34
+#define IPA_IOCTL_WRITE_QMAPID                  35
+#define IPA_IOCTL_MDFY_FLT_RULE                 36
+#define IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD 37
+#define IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL 38
+#define IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED    39
+#define IPA_IOCTL_ADD_HDR_PROC_CTX              40
+#define IPA_IOCTL_DEL_HDR_PROC_CTX              41
+#define IPA_IOCTL_MDFY_RT_RULE                  42
+#define IPA_IOCTL_ADD_RT_RULE_AFTER             43
+#define IPA_IOCTL_ADD_FLT_RULE_AFTER            44
+#define IPA_IOCTL_GET_HW_VERSION                45
+#define IPA_IOCTL_ADD_RT_RULE_EXT               46
+#define IPA_IOCTL_NAT_MODIFY_PDN                47
+#define IPA_IOCTL_ALLOC_NAT_TABLE               48
+#define IPA_IOCTL_ALLOC_IPV6CT_TABLE            49
+#define IPA_IOCTL_DEL_NAT_TABLE                 50
+#define IPA_IOCTL_DEL_IPV6CT_TABLE              51
+#define IPA_IOCTL_ADD_VLAN_IFACE                52
+#define IPA_IOCTL_DEL_VLAN_IFACE                53
+#define IPA_IOCTL_ADD_L2TP_VLAN_MAPPING         54
+#define IPA_IOCTL_DEL_L2TP_VLAN_MAPPING         55
+#define IPA_IOCTL_MAX                           56
+
+/**
+ * max size of the header to be inserted
+ */
+#define IPA_HDR_MAX_SIZE 64
+
+/**
+ * max size of the name of the resource (routing table, header)
+ */
+#define IPA_RESOURCE_NAME_MAX 32
+
+/**
+ * max number of interface properties
+ */
+#define IPA_NUM_PROPS_MAX 35
+
+/**
+ * size of the mac address
+ */
+#define IPA_MAC_ADDR_SIZE  6
+
+/**
+ * max number of mbim streams
+ */
+#define IPA_MBIM_MAX_STREAM_NUM 8
+
+/**
+ *  size of the ipv6 address
+ */
+#define IPA_WAN_MSG_IPv6_ADDR_GW_LEN 4
+
+/**
+ * max number of lan clients supported per device type
+ * for LAN stats via HW.
+ */
+#define IPA_MAX_NUM_HW_PATH_CLIENTS 16
+
+/**
+ * max number of destination pipes possible for a client.
+ */
+#define QMI_IPA_MAX_CLIENT_DST_PIPES 4
+
+/**
+ * the attributes of the rule (routing or filtering)
+ */
+#define IPA_FLT_TOS			(1ul << 0)
+#define IPA_FLT_PROTOCOL		(1ul << 1)
+#define IPA_FLT_SRC_ADDR		(1ul << 2)
+#define IPA_FLT_DST_ADDR		(1ul << 3)
+#define IPA_FLT_SRC_PORT_RANGE		(1ul << 4)
+#define IPA_FLT_DST_PORT_RANGE		(1ul << 5)
+#define IPA_FLT_TYPE			(1ul << 6)
+#define IPA_FLT_CODE			(1ul << 7)
+#define IPA_FLT_SPI			(1ul << 8)
+#define IPA_FLT_SRC_PORT		(1ul << 9)
+#define IPA_FLT_DST_PORT		(1ul << 10)
+#define IPA_FLT_TC			(1ul << 11)
+#define IPA_FLT_FLOW_LABEL		(1ul << 12)
+#define IPA_FLT_NEXT_HDR		(1ul << 13)
+#define IPA_FLT_META_DATA		(1ul << 14)
+#define IPA_FLT_FRAGMENT		(1ul << 15)
+#define IPA_FLT_TOS_MASKED		(1ul << 16)
+#define IPA_FLT_MAC_SRC_ADDR_ETHER_II	(1ul << 17)
+#define IPA_FLT_MAC_DST_ADDR_ETHER_II	(1ul << 18)
+#define IPA_FLT_MAC_SRC_ADDR_802_3	(1ul << 19)
+#define IPA_FLT_MAC_DST_ADDR_802_3	(1ul << 20)
+#define IPA_FLT_MAC_ETHER_TYPE		(1ul << 21)
+#define IPA_FLT_TCP_SYN			(1ul << 23)
+#define IPA_FLT_TCP_SYN_L2TP		(1ul << 24)
+#define IPA_FLT_L2TP_INNER_IP_TYPE  (1ul << 25)
+#define IPA_FLT_L2TP_INNER_IPV4_DST_ADDR (1ul << 26)
+
+/**
+ * maximal number of NAT PDNs in the PDN config table
+ */
+#define IPA_MAX_PDN_NUM 5
+
+/**
+ * enum ipa_client_type - names for the various IPA "clients"
+ * these are from the perspective of the clients, for e.g.
+ * HSIC1_PROD means HSIC client is the producer and IPA is the
+ * consumer
+ */
+enum ipa_client_type {
+	IPA_CLIENT_PROD,
+	IPA_CLIENT_HSIC1_PROD = IPA_CLIENT_PROD,
+	IPA_CLIENT_WLAN1_PROD,
+	IPA_CLIENT_HSIC2_PROD,
+	IPA_CLIENT_USB2_PROD,
+	IPA_CLIENT_HSIC3_PROD,
+	IPA_CLIENT_USB3_PROD,
+	IPA_CLIENT_HSIC4_PROD,
+	IPA_CLIENT_USB4_PROD,
+	IPA_CLIENT_HSIC5_PROD,
+	IPA_CLIENT_USB_PROD,
+	IPA_CLIENT_A5_WLAN_AMPDU_PROD,
+	IPA_CLIENT_A2_EMBEDDED_PROD,
+	IPA_CLIENT_A2_TETHERED_PROD,
+	IPA_CLIENT_APPS_LAN_PROD,
+	IPA_CLIENT_APPS_WAN_PROD,
+	IPA_CLIENT_APPS_LAN_WAN_PROD = IPA_CLIENT_APPS_WAN_PROD,
+	IPA_CLIENT_APPS_CMD_PROD,
+	IPA_CLIENT_ODU_PROD,
+	IPA_CLIENT_MHI_PROD,
+	IPA_CLIENT_Q6_LAN_PROD,
+	IPA_CLIENT_Q6_WAN_PROD,
+	IPA_CLIENT_Q6_CMD_PROD,
+	IPA_CLIENT_MEMCPY_DMA_SYNC_PROD,
+	IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD,
+	IPA_CLIENT_Q6_DECOMP_PROD,
+	IPA_CLIENT_Q6_DECOMP2_PROD,
+	IPA_CLIENT_UC_USB_PROD,
+
+	/* Below PROD client type is only for test purpose */
+	IPA_CLIENT_TEST_PROD,
+	IPA_CLIENT_TEST1_PROD,
+	IPA_CLIENT_TEST2_PROD,
+	IPA_CLIENT_TEST3_PROD,
+	IPA_CLIENT_TEST4_PROD,
+
+	IPA_CLIENT_CONS,
+	IPA_CLIENT_HSIC1_CONS = IPA_CLIENT_CONS,
+	IPA_CLIENT_WLAN1_CONS,
+	IPA_CLIENT_HSIC2_CONS,
+	IPA_CLIENT_USB2_CONS,
+	IPA_CLIENT_WLAN2_CONS,
+	IPA_CLIENT_HSIC3_CONS,
+	IPA_CLIENT_USB3_CONS,
+	IPA_CLIENT_WLAN3_CONS,
+	IPA_CLIENT_HSIC4_CONS,
+	IPA_CLIENT_USB4_CONS,
+	IPA_CLIENT_WLAN4_CONS,
+	IPA_CLIENT_HSIC5_CONS,
+	IPA_CLIENT_USB_CONS,
+	IPA_CLIENT_USB_DPL_CONS,
+	IPA_CLIENT_A2_EMBEDDED_CONS,
+	IPA_CLIENT_A2_TETHERED_CONS,
+	IPA_CLIENT_A5_LAN_WAN_CONS,
+	IPA_CLIENT_APPS_LAN_CONS,
+	IPA_CLIENT_APPS_WAN_CONS,
+	IPA_CLIENT_ODU_EMB_CONS,
+	IPA_CLIENT_ODU_TETH_CONS,
+	IPA_CLIENT_MHI_CONS,
+	IPA_CLIENT_Q6_LAN_CONS,
+	IPA_CLIENT_Q6_WAN_CONS,
+	IPA_CLIENT_Q6_DUN_CONS,
+	IPA_CLIENT_MEMCPY_DMA_SYNC_CONS,
+	IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS,
+	IPA_CLIENT_Q6_DECOMP_CONS,
+	IPA_CLIENT_Q6_DECOMP2_CONS,
+	IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS,
+	/* Below CONS client type is only for test purpose */
+	IPA_CLIENT_TEST_CONS,
+	IPA_CLIENT_TEST1_CONS,
+	IPA_CLIENT_TEST2_CONS,
+	IPA_CLIENT_TEST3_CONS,
+	IPA_CLIENT_TEST4_CONS,
+
+	IPA_CLIENT_MAX,
+};
+
+#define IPA_CLIENT_IS_APPS_CONS(client) \
+	((client) == IPA_CLIENT_APPS_LAN_CONS || \
+	(client) == IPA_CLIENT_APPS_WAN_CONS)
+
+#define IPA_CLIENT_IS_USB_CONS(client) \
+	((client) == IPA_CLIENT_USB_CONS || \
+	(client) == IPA_CLIENT_USB2_CONS || \
+	(client) == IPA_CLIENT_USB3_CONS || \
+	(client) == IPA_CLIENT_USB_DPL_CONS || \
+	(client) == IPA_CLIENT_USB4_CONS)
+
+#define IPA_CLIENT_IS_WLAN_CONS(client) \
+	((client) == IPA_CLIENT_WLAN1_CONS || \
+	(client) == IPA_CLIENT_WLAN2_CONS || \
+	(client) == IPA_CLIENT_WLAN3_CONS || \
+	(client) == IPA_CLIENT_WLAN4_CONS)
+
+#define IPA_CLIENT_IS_ODU_CONS(client) \
+	((client) == IPA_CLIENT_ODU_EMB_CONS || \
+	(client) == IPA_CLIENT_ODU_TETH_CONS)
+
+#define IPA_CLIENT_IS_Q6_CONS(client) \
+	((client) == IPA_CLIENT_Q6_LAN_CONS || \
+	(client) == IPA_CLIENT_Q6_WAN_CONS || \
+	(client) == IPA_CLIENT_Q6_DUN_CONS || \
+	(client) == IPA_CLIENT_Q6_DECOMP_CONS || \
+	(client) == IPA_CLIENT_Q6_DECOMP2_CONS || \
+	(client) == IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS)
+
+#define IPA_CLIENT_IS_Q6_PROD(client) \
+	((client) == IPA_CLIENT_Q6_LAN_PROD || \
+	(client) == IPA_CLIENT_Q6_WAN_PROD || \
+	(client) == IPA_CLIENT_Q6_CMD_PROD || \
+	(client) == IPA_CLIENT_Q6_DECOMP_PROD || \
+	(client) == IPA_CLIENT_Q6_DECOMP2_PROD)
+
+#define IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client) \
+	((client) == IPA_CLIENT_Q6_LAN_CONS || \
+	(client) == IPA_CLIENT_Q6_WAN_CONS || \
+	(client) == IPA_CLIENT_Q6_DUN_CONS || \
+	(client) == IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS)
+
+#define IPA_CLIENT_IS_Q6_ZIP_CONS(client) \
+	((client) == IPA_CLIENT_Q6_DECOMP_CONS || \
+	(client) == IPA_CLIENT_Q6_DECOMP2_CONS)
+
+#define IPA_CLIENT_IS_Q6_NON_ZIP_PROD(client) \
+	((client) == IPA_CLIENT_Q6_LAN_PROD || \
+	(client) == IPA_CLIENT_Q6_WAN_PROD || \
+	(client) == IPA_CLIENT_Q6_CMD_PROD)
+
+#define IPA_CLIENT_IS_Q6_ZIP_PROD(client) \
+	((client) == IPA_CLIENT_Q6_DECOMP_PROD || \
+	(client) == IPA_CLIENT_Q6_DECOMP2_PROD)
+
+#define IPA_CLIENT_IS_MEMCPY_DMA_CONS(client) \
+	((client) == IPA_CLIENT_MEMCPY_DMA_SYNC_CONS || \
+	(client) == IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS)
+
+#define IPA_CLIENT_IS_MEMCPY_DMA_PROD(client) \
+	((client) == IPA_CLIENT_MEMCPY_DMA_SYNC_PROD || \
+	(client) == IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD)
+
+#define IPA_CLIENT_IS_MHI_CONS(client) \
+	((client) == IPA_CLIENT_MHI_CONS)
+
+#define IPA_CLIENT_IS_MHI(client) \
+	((client) == IPA_CLIENT_MHI_CONS || \
+	(client) == IPA_CLIENT_MHI_PROD)
+
+#define IPA_CLIENT_IS_TEST_PROD(client) \
+	((client) == IPA_CLIENT_TEST_PROD || \
+	(client) == IPA_CLIENT_TEST1_PROD || \
+	(client) == IPA_CLIENT_TEST2_PROD || \
+	(client) == IPA_CLIENT_TEST3_PROD || \
+	(client) == IPA_CLIENT_TEST4_PROD)
+
+#define IPA_CLIENT_IS_TEST_CONS(client) \
+	((client) == IPA_CLIENT_TEST_CONS || \
+	(client) == IPA_CLIENT_TEST1_CONS || \
+	(client) == IPA_CLIENT_TEST2_CONS || \
+	(client) == IPA_CLIENT_TEST3_CONS || \
+	(client) == IPA_CLIENT_TEST4_CONS)
+
+#define IPA_CLIENT_IS_TEST(client) \
+	(IPA_CLIENT_IS_TEST_PROD(client) || IPA_CLIENT_IS_TEST_CONS(client))
+
+/**
+ * enum ipa_ip_type - Address family: IPv4 or IPv6
+ */
+enum ipa_ip_type {
+	IPA_IP_v4,
+	IPA_IP_v6,
+	IPA_IP_MAX
+};
+
+/**
+ * enum ipa_rule_type - Type of routing or filtering rule
+ * Hashable: Rule will be located at the hashable tables
+ * Non_Hashable: Rule will be located at the non-hashable tables
+ */
+enum ipa_rule_type {
+	IPA_RULE_HASHABLE,
+	IPA_RULE_NON_HASHABLE,
+	IPA_RULE_TYPE_MAX
+};
+
+/**
+ * enum ipa_flt_action - action field of filtering rule
+ *
+ * Pass to routing: 5'd0
+ * Pass to source NAT: 5'd1
+ * Pass to destination NAT: 5'd2
+ * Pass to default output pipe (e.g., Apps or Modem): 5'd3
+ */
+enum ipa_flt_action {
+	IPA_PASS_TO_ROUTING,
+	IPA_PASS_TO_SRC_NAT,
+	IPA_PASS_TO_DST_NAT,
+	IPA_PASS_TO_EXCEPTION
+};
+
+/**
+ * enum ipa_wlan_event - Events for wlan client
+ *
+ * wlan client connect: New wlan client connected
+ * wlan client disconnect: wlan client disconnected
+ * wlan client power save: wlan client moved to power save
+ * wlan client normal: wlan client moved out of power save
+ * sw routing enable: ipa routing is disabled
+ * sw routing disable: ipa routing is enabled
+ * wlan ap connect: wlan AP(access point) is up
+ * wlan ap disconnect: wlan AP(access point) is down
+ * wlan sta connect: wlan STA(station) is up
+ * wlan sta disconnect: wlan STA(station) is down
+ * wlan client connect ex: new wlan client connected
+ * wlan scc switch: wlan interfaces in scc mode
+ * wlan mcc switch: wlan interfaces in mcc mode
+ * wlan wdi enable: wdi data path completed
+ * wlan wdi disable: wdi data path teardown
+ */
+enum ipa_wlan_event {
+	WLAN_CLIENT_CONNECT,
+	WLAN_CLIENT_DISCONNECT,
+	WLAN_CLIENT_POWER_SAVE_MODE,
+	WLAN_CLIENT_NORMAL_MODE,
+	SW_ROUTING_ENABLE,
+	SW_ROUTING_DISABLE,
+	WLAN_AP_CONNECT,
+	WLAN_AP_DISCONNECT,
+	WLAN_STA_CONNECT,
+	WLAN_STA_DISCONNECT,
+	WLAN_CLIENT_CONNECT_EX,
+	WLAN_SWITCH_TO_SCC,
+	WLAN_SWITCH_TO_MCC,
+	WLAN_WDI_ENABLE,
+	WLAN_WDI_DISABLE,
+	IPA_WLAN_EVENT_MAX
+};
+
+/**
+ * enum ipa_wan_event - Events for wan client
+ *
+ * wan default route add/del
+ * wan embms connect: New wan embms interface connected
+ */
+enum ipa_wan_event {
+	WAN_UPSTREAM_ROUTE_ADD = IPA_WLAN_EVENT_MAX,
+	WAN_UPSTREAM_ROUTE_DEL,
+	WAN_EMBMS_CONNECT,
+	WAN_XLAT_CONNECT,
+	IPA_WAN_EVENT_MAX
+};
+
+enum ipa_ecm_event {
+	ECM_CONNECT = IPA_WAN_EVENT_MAX,
+	ECM_DISCONNECT,
+	IPA_ECM_EVENT_MAX,
+};
+
+enum ipa_tethering_stats_event {
+	IPA_TETHERING_STATS_UPDATE_STATS = IPA_ECM_EVENT_MAX,
+	IPA_TETHERING_STATS_UPDATE_NETWORK_STATS,
+	IPA_TETHERING_STATS_EVENT_MAX,
+};
+
+enum ipa_quota_event {
+	IPA_QUOTA_REACH = IPA_TETHERING_STATS_EVENT_MAX,
+	IPA_QUOTA_EVENT_MAX,
+};
+
+enum ipa_ssr_event {
+	IPA_SSR_BEFORE_SHUTDOWN = IPA_QUOTA_EVENT_MAX,
+	IPA_SSR_AFTER_POWERUP,
+	IPA_SSR_EVENT_MAX
+};
+
+enum ipa_vlan_l2tp_event {
+	ADD_VLAN_IFACE = IPA_SSR_EVENT_MAX,
+	DEL_VLAN_IFACE,
+	ADD_L2TP_VLAN_MAPPING,
+	DEL_L2TP_VLAN_MAPPING,
+	IPA_VLAN_L2TP_EVENT_MAX,
+};
+
+enum ipa_per_client_stats_event {
+	IPA_PER_CLIENT_STATS_CONNECT_EVENT = IPA_VLAN_L2TP_EVENT_MAX,
+	IPA_PER_CLIENT_STATS_DISCONNECT_EVENT,
+	IPA_PER_CLIENT_STATS_EVENT_MAX,
+	IPA_EVENT_MAX_NUM = IPA_PER_CLIENT_STATS_EVENT_MAX,
+};
+
+#define IPA_EVENT_MAX_NUM ((int)IPA_PER_CLIENT_STATS_EVENT_MAX)
+#define IPA_EVENT_MAX ((int)IPA_EVENT_MAX_NUM)
+
+enum ipa_wlan_fw_ssr_event {
+	WLAN_FWR_SSR_BEFORE_SHUTDOWN = IPA_SSR_EVENT_MAX,
+	IPA_WLAN_FW_SSR_EVENT_MAX,
+#define IPA_WLAN_FW_SSR_EVENT_MAX IPA_WLAN_FW_SSR_EVENT_MAX
+};
+
+/**
+ * enum ipa_rm_resource_name - IPA RM clients identification names
+ *
+ * Add new mapping to ipa_rm_prod_index() / ipa_rm_cons_index()
+ * when adding new entry to this enum.
+ */
+enum ipa_rm_resource_name {
+	IPA_RM_RESOURCE_PROD = 0,
+	IPA_RM_RESOURCE_Q6_PROD = IPA_RM_RESOURCE_PROD,
+	IPA_RM_RESOURCE_USB_PROD,
+	IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD,
+	IPA_RM_RESOURCE_HSIC_PROD,
+	IPA_RM_RESOURCE_STD_ECM_PROD,
+	IPA_RM_RESOURCE_RNDIS_PROD,
+	IPA_RM_RESOURCE_WWAN_0_PROD,
+	IPA_RM_RESOURCE_WLAN_PROD,
+	IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+	IPA_RM_RESOURCE_MHI_PROD,
+	IPA_RM_RESOURCE_PROD_MAX,
+
+	IPA_RM_RESOURCE_Q6_CONS = IPA_RM_RESOURCE_PROD_MAX,
+	IPA_RM_RESOURCE_USB_CONS,
+	IPA_RM_RESOURCE_USB_DPL_CONS,
+	IPA_RM_RESOURCE_HSIC_CONS,
+	IPA_RM_RESOURCE_WLAN_CONS,
+	IPA_RM_RESOURCE_APPS_CONS,
+	IPA_RM_RESOURCE_ODU_ADAPT_CONS,
+	IPA_RM_RESOURCE_MHI_CONS,
+	IPA_RM_RESOURCE_MAX
+};
+
+/**
+ * enum ipa_hw_type - IPA hardware version type
+ * @IPA_HW_None: IPA hardware version not defined
+ * @IPA_HW_v1_0: IPA hardware version 1.0
+ * @IPA_HW_v1_1: IPA hardware version 1.1
+ * @IPA_HW_v2_0: IPA hardware version 2.0
+ * @IPA_HW_v2_1: IPA hardware version 2.1
+ * @IPA_HW_v2_5: IPA hardware version 2.5
+ * @IPA_HW_v2_6: IPA hardware version 2.6
+ * @IPA_HW_v2_6L: IPA hardware version 2.6L
+ * @IPA_HW_v3_0: IPA hardware version 3.0
+ * @IPA_HW_v3_1: IPA hardware version 3.1
+ * @IPA_HW_v3_5: IPA hardware version 3.5
+ * @IPA_HW_v3_5_1: IPA hardware version 3.5.1
+ * @IPA_HW_v4_0: IPA hardware version 4.0
+ */
+enum ipa_hw_type {
+	IPA_HW_None = 0,
+	IPA_HW_v1_0 = 1,
+	IPA_HW_v1_1 = 2,
+	IPA_HW_v2_0 = 3,
+	IPA_HW_v2_1 = 4,
+	IPA_HW_v2_5 = 5,
+	IPA_HW_v2_6 = IPA_HW_v2_5,
+	IPA_HW_v2_6L = 6,
+	IPA_HW_v3_0 = 10,
+	IPA_HW_v3_1 = 11,
+	IPA_HW_v3_5 = 12,
+	IPA_HW_v3_5_1 = 13,
+	IPA_HW_v4_0 = 14,
+};
+#define IPA_HW_MAX (IPA_HW_v4_0 + 1)
+
+#define IPA_HW_v4_0 IPA_HW_v4_0
+
+/**
+ * struct ipa_rule_attrib - attributes of a routing/filtering
+ * rule, all in LE
+ * @attrib_mask: what attributes are valid
+ * @src_port_lo: low port of src port range
+ * @src_port_hi: high port of src port range
+ * @dst_port_lo: low port of dst port range
+ * @dst_port_hi: high port of dst port range
+ * @type: ICMP/IGMP type
+ * @code: ICMP/IGMP code
+ * @spi: IPSec SPI
+ * @src_port: exact src port
+ * @dst_port: exact dst port
+ * @meta_data: meta-data val
+ * @meta_data_mask: meta-data mask
+ * @u.v4.tos: type of service
+ * @u.v4.protocol: protocol
+ * @u.v4.src_addr: src address value
+ * @u.v4.src_addr_mask: src address mask
+ * @u.v4.dst_addr: dst address value
+ * @u.v4.dst_addr_mask: dst address mask
+ * @u.v6.tc: traffic class
+ * @u.v6.flow_label: flow label
+ * @u.v6.next_hdr: next header
+ * @u.v6.src_addr: src address val
+ * @u.v6.src_addr_mask: src address mask
+ * @u.v6.dst_addr: dst address val
+ * @u.v6.dst_addr_mask: dst address mask
+ */
+struct ipa_rule_attrib {
+	uint32_t attrib_mask;
+	uint16_t src_port_lo;
+	uint16_t src_port_hi;
+	uint16_t dst_port_lo;
+	uint16_t dst_port_hi;
+	uint8_t type;
+	uint8_t code;
+	uint8_t tos_value;
+	uint8_t tos_mask;
+	uint32_t spi;
+	uint16_t src_port;
+	uint16_t dst_port;
+	uint32_t meta_data;
+	uint32_t meta_data_mask;
+	uint8_t src_mac_addr[ETH_ALEN];
+	uint8_t src_mac_addr_mask[ETH_ALEN];
+	uint8_t dst_mac_addr[ETH_ALEN];
+	uint8_t dst_mac_addr_mask[ETH_ALEN];
+	uint16_t ether_type;
+	union {
+		struct {
+			uint8_t tos;
+			uint8_t protocol;
+			uint32_t src_addr;
+			uint32_t src_addr_mask;
+			uint32_t dst_addr;
+			uint32_t dst_addr_mask;
+		} v4;
+		struct {
+			uint8_t tc;
+			uint32_t flow_label;
+			uint8_t next_hdr;
+			uint32_t src_addr[4];
+			uint32_t src_addr_mask[4];
+			uint32_t dst_addr[4];
+			uint32_t dst_addr_mask[4];
+		} v6;
+	} u;
+};
+
+/*! @brief The maximum number of Mask Equal 32 Eqns */
+#define IPA_IPFLTR_NUM_MEQ_32_EQNS 2
+
+/*! @brief The maximum number of IHL offset Mask Equal 32 Eqns */
+#define IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS 2
+
+/*! @brief The maximum number of Mask Equal 128 Eqns */
+#define IPA_IPFLTR_NUM_MEQ_128_EQNS 2
+
+/*! @brief The maximum number of IHL offset Range Check 16 Eqns */
+#define IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS 2
+
+/*! @brief Offset and 16 bit comparison equation */
+struct ipa_ipfltr_eq_16 {
+	int8_t offset;
+	uint16_t value;
+};
+
+/*! @brief Offset and 32 bit comparison equation */
+struct ipa_ipfltr_eq_32 {
+	int8_t offset;
+	uint32_t value;
+};
+
+/*! @brief Offset and 128 bit masked comparison equation */
+struct ipa_ipfltr_mask_eq_128 {
+	int8_t offset;
+	uint8_t mask[16];
+	uint8_t value[16];
+};
+
+/*! @brief Offset and 32 bit masked comparison equation */
+struct ipa_ipfltr_mask_eq_32 {
+	int8_t offset;
+	uint32_t mask;
+	uint32_t value;
+};
+
+/*! @brief Equation for identifying a range. Ranges are inclusive */
+struct ipa_ipfltr_range_eq_16 {
+	int8_t offset;
+	uint16_t range_low;
+	uint16_t range_high;
+};
+
+/*! @brief Rule equations which are set according to DS filter installation */
+struct ipa_ipfltri_rule_eq {
+	/*! 16-bit Bitmask to indicate how many eqs are valid in this rule  */
+	uint16_t rule_eq_bitmap;
+	/*! Specifies if a type of service check rule is present */
+	uint8_t tos_eq_present;
+	/*! The value to check against the type of service (ipv4) field */
+	uint8_t tos_eq;
+	/*! Specifies if a protocol check rule is present */
+	uint8_t protocol_eq_present;
+	/*! The value to check against the protocol (ipv6) field */
+	uint8_t protocol_eq;
+	/*! The number of ip header length offset 16 bit range check
+	 * rules in this rule */
+	uint8_t num_ihl_offset_range_16;
+	/*! An array of the registered ip header length offset 16 bit
+	 * range check rules */
+	struct ipa_ipfltr_range_eq_16
+		ihl_offset_range_16[IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS];
+	/*! The number of mask equal 32 rules present in this rule */
+	uint8_t num_offset_meq_32;
+	/*! An array of all the possible mask equal 32 rules in this rule */
+	struct ipa_ipfltr_mask_eq_32
+		offset_meq_32[IPA_IPFLTR_NUM_MEQ_32_EQNS];
+	/*! Specifies if the traffic class rule is present in this rule */
+	uint8_t tc_eq_present;
+	/*! The value to check the traffic class (ipv4) field against */
+	uint8_t tc_eq;
+	/*! Specifies if the flow equals rule is present in this rule */
+	uint8_t fl_eq_present;
+	/*! The value to check the flow (ipv6) field against */
+	uint32_t fl_eq;
+	/*! The number of ip header length offset 16 bit equations in this
+	 * rule */
+	uint8_t ihl_offset_eq_16_present;
+	/*! The ip header length offset 16 bit equation */
+	struct ipa_ipfltr_eq_16 ihl_offset_eq_16;
+	/*! The number of ip header length offset 32 bit equations in this
+	 * rule */
+	uint8_t ihl_offset_eq_32_present;
+	/*! The ip header length offset 32 bit equation */
+	struct ipa_ipfltr_eq_32 ihl_offset_eq_32;
+	/*! The number of ip header length offset 32 bit mask equations in
+	 * this rule */
+	uint8_t num_ihl_offset_meq_32;
+	/*! The ip header length offset 32 bit mask equation */
+	struct ipa_ipfltr_mask_eq_32
+		ihl_offset_meq_32[IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS];
+	/*! The number of ip header length offset 128 bit equations in this
+	 * rule */
+	uint8_t num_offset_meq_128;
+	/*! The ip header length offset 128 bit equation */
+	struct ipa_ipfltr_mask_eq_128
+		offset_meq_128[IPA_IPFLTR_NUM_MEQ_128_EQNS];
+	/*! The metadata 32 bit masked comparison equation present or not */
+	/* Metadata based rules are added internally by IPA driver */
+	uint8_t metadata_meq32_present;
+	/*! The metadata 32 bit masked comparison equation */
+	struct ipa_ipfltr_mask_eq_32 metadata_meq32;
+	/*! Specifies if the Fragment equation is present in this rule */
+	uint8_t ipv4_frag_eq_present;
+};
+
+/**
+ * struct ipa_flt_rule - attributes of a filtering rule
+ * @retain_hdr: bool switch to instruct IPA core to add back to the packet
+ *  the header removed as part of header removal
+ * @to_uc: bool switch to pass packet to micro-controller
+ * @action: action field
+ * @rt_tbl_hdl: handle of table from "get"
+ * @attrib: attributes of the rule
+ * @eq_attrib: attributes of the rule in equation form (valid when
+ * eq_attrib_type is true)
+ * @rt_tbl_idx: index of RT table referred to by filter rule (valid when
+ * eq_attrib_type is true and non-exception action)
+ * @eq_attrib_type: true if equation level form used to specify attributes
+ * @max_prio: bool switch. is this rule with Max priority? meaning on rule hit,
+ *  IPA will use the rule and will not look for other rules that may have
+ *  higher priority
+ * @hashable: bool switch. is this rule hashable or not?
+ *  ipa uses hashable rules to cache their hit results to be used in
+ *  consecutive packets
+ * @rule_id: rule_id to be assigned to the filter rule. In case client specifies
+ *  rule_id as 0 the driver will assign a new rule_id
+ * @set_metadata: bool switch. should metadata replacement at the NAT block
+ *  take place?
+ * @pdn_idx: if action is "pass to source\destination NAT" then a comparison
+ * against the PDN index in the matching PDN entry will take place as an
+ * additional condition for NAT hit.
+ */
+struct ipa_flt_rule {
+	uint8_t retain_hdr;
+	uint8_t to_uc;
+	enum ipa_flt_action action;
+	uint32_t rt_tbl_hdl;
+	struct ipa_rule_attrib attrib;
+	struct ipa_ipfltri_rule_eq eq_attrib;
+	uint32_t rt_tbl_idx;
+	uint8_t eq_attrib_type;
+	uint8_t max_prio;
+	uint8_t hashable;
+	uint16_t rule_id;
+	uint8_t set_metadata;
+	uint8_t pdn_idx;
+};
+
+/**
+ * enum ipa_hdr_l2_type - L2 header type
+ * IPA_HDR_L2_NONE: L2 header which isn't Ethernet II and isn't 802_3
+ * IPA_HDR_L2_ETHERNET_II: L2 header of type Ethernet II
+ * IPA_HDR_L2_802_3: L2 header of type 802_3
+ */
+enum ipa_hdr_l2_type {
+	IPA_HDR_L2_NONE,
+	IPA_HDR_L2_ETHERNET_II,
+	IPA_HDR_L2_802_3,
+	IPA_HDR_L2_MAX,
+};
+
+/**
+ * enum ipa_hdr_l2_type - Processing context type
+ * IPA_HDR_PROC_NONE: No processing context
+ * IPA_HDR_PROC_ETHII_TO_ETHII: Process Ethernet II to Ethernet II
+ * IPA_HDR_PROC_ETHII_TO_802_3: Process Ethernet II to 802_3
+ * IPA_HDR_PROC_802_3_TO_ETHII: Process 802_3 to Ethernet II
+ * IPA_HDR_PROC_802_3_TO_802_3: Process 802_3 to 802_3
+ */
+enum ipa_hdr_proc_type {
+	IPA_HDR_PROC_NONE,
+	IPA_HDR_PROC_ETHII_TO_ETHII,
+	IPA_HDR_PROC_ETHII_TO_802_3,
+	IPA_HDR_PROC_802_3_TO_ETHII,
+	IPA_HDR_PROC_802_3_TO_802_3,
+	IPA_HDR_PROC_MAX,
+};
+
+/**
+ * struct ipa_rt_rule - attributes of a routing rule
+ * @dst: dst "client"
+ * @hdr_hdl: handle to the dynamic header
+	it is not an index or an offset
+ * @hdr_proc_ctx_hdl: handle to header processing context. if it is provided
+	hdr_hdl shall be 0
+ * @attrib: attributes of the rule
+ * @max_prio: bool switch. is this rule with Max priority? meaning on rule hit,
+ *  IPA will use the rule and will not look for other rules that may have
+ *  higher priority
+ * @hashable: bool switch. is this rule hashable or not?
+ *  ipa uses hashable rules to cache their hit results to be used in
+ *  consecutive packets
+ * @retain_hdr: bool switch to instruct IPA core to add back to the packet
+ *  the header removed as part of header removal
+ */
+struct ipa_rt_rule {
+	enum ipa_client_type dst;
+	uint32_t hdr_hdl;
+	uint32_t hdr_proc_ctx_hdl;
+	struct ipa_rule_attrib attrib;
+	uint8_t max_prio;
+	uint8_t hashable;
+	uint8_t retain_hdr;
+};
+
+/**
+ * struct ipa_hdr_add - header descriptor includes in and out
+ * parameters
+ * @name: name of the header
+ * @hdr: actual header to be inserted
+ * @hdr_len: size of above header
+ * @type: l2 header type
+ * @is_partial: header not fully specified
+ * @hdr_hdl: out parameter, handle to header, valid when status is 0
+ * @status:	out parameter, status of header add operation,
+ *		0 for success,
+ *		-1 for failure
+ * @is_eth2_ofst_valid: is eth2_ofst field valid?
+ * @eth2_ofst: offset to start of Ethernet-II/802.3 header
+ */
+struct ipa_hdr_add {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint8_t hdr[IPA_HDR_MAX_SIZE];
+	uint8_t hdr_len;
+	enum ipa_hdr_l2_type type;
+	uint8_t is_partial;
+	uint32_t hdr_hdl;
+	int status;
+	uint8_t is_eth2_ofst_valid;
+	uint16_t eth2_ofst;
+};
+
+/**
+ * struct ipa_ioc_add_hdr - header addition parameters (support
+ * multiple headers and commit)
+ * @commit: should headers be written to IPA HW also?
+ * @num_hdrs: num of headers that follow
+ * @ipa_hdr_add hdr:	all headers need to go here back to
+ *			back, no pointers
+ */
+struct ipa_ioc_add_hdr {
+	uint8_t commit;
+	uint8_t num_hdrs;
+	struct ipa_hdr_add hdr[0];
+};
+
+/**
+ * struct ipa_hdr_proc_ctx_add - processing context descriptor includes
+ * in and out parameters
+ * @type: processing context type
+ * @hdr_hdl: in parameter, handle to header
+ * @proc_ctx_hdl: out parameter, handle to proc_ctx, valid when status is 0
+ * @status:	out parameter, status of header add operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_hdr_proc_ctx_add {
+	enum ipa_hdr_proc_type type;
+	uint32_t hdr_hdl;
+	uint32_t proc_ctx_hdl;
+	int status;
+};
+
+/**
+ * struct ipa_ioc_add_hdr - processing context addition parameters (support
+ * multiple processing context and commit)
+ * @commit: should processing context be written to IPA HW also?
+ * @num_proc_ctxs: num of processing context that follow
+ * @proc_ctx:	all processing context need to go here back to
+ *			back, no pointers
+ */
+struct ipa_ioc_add_hdr_proc_ctx {
+	uint8_t commit;
+	uint8_t num_proc_ctxs;
+	struct ipa_hdr_proc_ctx_add proc_ctx[0];
+};
+
+/**
+ * struct ipa_ioc_copy_hdr - retrieve a copy of the specified
+ * header - caller can then derive the complete header
+ * @name: name of the header resource
+ * @hdr:	out parameter, contents of specified header,
+ *	valid only when ioctl return val is non-negative
+ * @hdr_len: out parameter, size of above header
+ *	valid only when ioctl return val is non-negative
+ * @type: l2 header type
+ *	valid only when ioctl return val is non-negative
+ * @is_partial:	out parameter, indicates whether specified header is partial
+ *		valid only when ioctl return val is non-negative
+ * @is_eth2_ofst_valid: is eth2_ofst field valid?
+ * @eth2_ofst: offset to start of Ethernet-II/802.3 header
+ */
+struct ipa_ioc_copy_hdr {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint8_t hdr[IPA_HDR_MAX_SIZE];
+	uint8_t hdr_len;
+	enum ipa_hdr_l2_type type;
+	uint8_t is_partial;
+	uint8_t is_eth2_ofst_valid;
+	uint16_t eth2_ofst;
+};
+
+/**
+ * struct ipa_ioc_get_hdr - header entry lookup parameters, if lookup was
+ * successful caller must call put to release the reference count when done
+ * @name: name of the header resource
+ * @hdl:	out parameter, handle of header entry
+ *		valid only when ioctl return val is non-negative
+ */
+struct ipa_ioc_get_hdr {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint32_t hdl;
+};
+
+/**
+ * struct ipa_hdr_del - header descriptor includes in and out
+ * parameters
+ *
+ * @hdl: handle returned from header add operation
+ * @status:	out parameter, status of header remove operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_hdr_del {
+	uint32_t hdl;
+	int status;
+};
+
+/**
+ * struct ipa_ioc_del_hdr - header deletion parameters (support
+ * multiple headers and commit)
+ * @commit: should headers be removed from IPA HW also?
+ * @num_hdls: num of headers being removed
+ * @ipa_hdr_del hdl: all handles need to go here back to back, no pointers
+ */
+struct ipa_ioc_del_hdr {
+	uint8_t commit;
+	uint8_t num_hdls;
+	struct ipa_hdr_del hdl[0];
+};
+
+/**
+ * struct ipa_hdr_proc_ctx_del - processing context descriptor includes
+ * in and out parameters
+ * @hdl: handle returned from processing context add operation
+ * @status:	out parameter, status of header remove operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_hdr_proc_ctx_del {
+	uint32_t hdl;
+	int status;
+};
+
+/**
+ * ipa_ioc_del_hdr_proc_ctx - processing context deletion parameters (support
+ * multiple headers and commit)
+ * @commit: should processing contexts be removed from IPA HW also?
+ * @num_hdls: num of processing contexts being removed
+ * @ipa_hdr_proc_ctx_del hdl:	all handles need to go here back to back,
+  *				no pointers
+ */
+struct ipa_ioc_del_hdr_proc_ctx {
+	uint8_t commit;
+	uint8_t num_hdls;
+	struct ipa_hdr_proc_ctx_del hdl[0];
+};
+
+/**
+ * struct ipa_rt_rule_add - routing rule descriptor includes in
+ * and out parameters
+ * @rule: actual rule to be added
+ * @at_rear:	add at back of routing table, it is NOT possible to add rules at
+ *		the rear of the "default" routing tables
+ * @rt_rule_hdl: output parameter, handle to rule, valid when status is 0
+ * @status:	output parameter, status of routing rule add operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_rt_rule_add {
+	struct ipa_rt_rule rule;
+	uint8_t at_rear;
+	uint32_t rt_rule_hdl;
+	int status;
+};
+
+/**
+ * struct ipa_ioc_add_rt_rule - routing rule addition parameters (supports
+ * multiple rules and commit);
+ *
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @rt_tbl_name: name of routing table resource
+ * @num_rules: number of routing rules that follow
+ * @ipa_rt_rule_add rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_add_rt_rule {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	char rt_tbl_name[IPA_RESOURCE_NAME_MAX];
+	uint8_t num_rules;
+	struct ipa_rt_rule_add rules[0];
+};
+
+/**
+ * struct ipa_ioc_add_rt_rule_after - routing rule addition after a specific
+ * rule parameters(supports multiple rules and commit);
+ *
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @rt_tbl_name: name of routing table resource
+ * @num_rules: number of routing rules that follow
+ * @add_after_hdl: the rules will be added after this specific rule
+ * @ipa_rt_rule_add rules: all rules need to go back to back here, no pointers
+ *			   at_rear field will be ignored when using this IOCTL
+ */
+struct ipa_ioc_add_rt_rule_after {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	char rt_tbl_name[IPA_RESOURCE_NAME_MAX];
+	uint8_t num_rules;
+	uint32_t add_after_hdl;
+	struct ipa_rt_rule_add rules[0];
+};
+
+/**
+ * struct ipa_rt_rule_mdfy - routing rule descriptor includes
+ * in and out parameters
+ * @rule: actual rule to be added
+ * @rt_rule_hdl: handle to rule which supposed to modify
+ * @status:	output parameter, status of routing rule modify  operation,
+ *		0 for success,
+ *		-1 for failure
+ *
+ */
+struct ipa_rt_rule_mdfy {
+	struct ipa_rt_rule rule;
+	uint32_t rt_rule_hdl;
+	int status;
+};
+
+/**
+ * struct ipa_ioc_mdfy_rt_rule - routing rule modify parameters (supports
+ * multiple rules and commit)
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @num_rules: number of routing rules that follow
+ * @rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_mdfy_rt_rule {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	uint8_t num_rules;
+	struct ipa_rt_rule_mdfy rules[0];
+};
+
+/**
+ * struct ipa_rt_rule_del - routing rule descriptor includes in
+ * and out parameters
+ * @hdl: handle returned from route rule add operation
+ * @status:	output parameter, status of route rule delete operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_rt_rule_del {
+	uint32_t hdl;
+	int status;
+};
+
+/**
+ * struct ipa_rt_rule_add_ext - routing rule descriptor includes in
+ * and out parameters
+ * @rule: actual rule to be added
+ * @at_rear:	add at back of routing table, it is NOT possible to add rules at
+ *		the rear of the "default" routing tables
+ * @rt_rule_hdl: output parameter, handle to rule, valid when status is 0
+ * @status:	output parameter, status of routing rule add operation,
+ * @rule_id: rule_id to be assigned to the routing rule. In case client
+ *  specifies rule_id as 0 the driver will assign a new rule_id
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_rt_rule_add_ext {
+	struct ipa_rt_rule rule;
+	uint8_t at_rear;
+	uint32_t rt_rule_hdl;
+	int status;
+	uint16_t rule_id;
+};
+
+/**
+ * struct ipa_ioc_add_rt_rule - routing rule addition parameters (supports
+ * multiple rules and commit with rule_id);
+ *
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @rt_tbl_name: name of routing table resource
+ * @num_rules: number of routing rules that follow
+ * @ipa_rt_rule_add_ext rules: all rules need to go back to back here,
+ *  no pointers
+ */
+struct ipa_ioc_add_rt_rule_ext {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	char rt_tbl_name[IPA_RESOURCE_NAME_MAX];
+	uint8_t num_rules;
+	struct ipa_rt_rule_add_ext rules[0];
+};
+
+
+/**
+ * struct ipa_ioc_del_rt_rule - routing rule deletion parameters (supports
+ * multiple headers and commit)
+ * @commit: should rules be removed from IPA HW also?
+ * @ip: IP family of rules
+ * @num_hdls: num of rules being removed
+ * @ipa_rt_rule_del hdl: all handles need to go back to back here, no pointers
+ */
+struct ipa_ioc_del_rt_rule {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	uint8_t num_hdls;
+	struct ipa_rt_rule_del hdl[0];
+};
+
+/**
+ * struct ipa_ioc_get_rt_tbl_indx - routing table index lookup parameters
+ * @ip: IP family of table
+ * @name: name of routing table resource
+ * @index:	output parameter, routing table index, valid only when ioctl
+ *		return val is non-negative
+ */
+struct ipa_ioc_get_rt_tbl_indx {
+	enum ipa_ip_type ip;
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint32_t idx;
+};
+
+/**
+ * struct ipa_flt_rule_add - filtering rule descriptor includes
+ * in and out parameters
+ * @rule: actual rule to be added
+ * @at_rear: add at back of filtering table?
+ * @flt_rule_hdl: out parameter, handle to rule, valid when status is 0
+ * @status:	output parameter, status of filtering rule add   operation,
+ *		0 for success,
+ *		-1 for failure
+ *
+ */
+struct ipa_flt_rule_add {
+	struct ipa_flt_rule rule;
+	uint8_t at_rear;
+	uint32_t flt_rule_hdl;
+	int status;
+};
+
+/**
+ * struct ipa_ioc_add_flt_rule - filtering rule addition parameters (supports
+ * multiple rules and commit)
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @ep:	which "clients" pipe does this rule apply to?
+ *	valid only when global is 0
+ * @global: does this apply to global filter table of specific IP family
+ * @num_rules: number of filtering rules that follow
+ * @rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_add_flt_rule {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	enum ipa_client_type ep;
+	uint8_t global;
+	uint8_t num_rules;
+	struct ipa_flt_rule_add rules[0];
+};
+
+/**
+ * struct ipa_ioc_add_flt_rule_after - filtering rule addition after specific
+ * rule parameters (supports multiple rules and commit)
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @ep:	which "clients" pipe does this rule apply to?
+ * @num_rules: number of filtering rules that follow
+ * @add_after_hdl: rules will be added after the rule with this handle
+ * @rules: all rules need to go back to back here, no pointers. at rear field
+ *	   is ignored when using this IOCTL
+ */
+struct ipa_ioc_add_flt_rule_after {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	enum ipa_client_type ep;
+	uint8_t num_rules;
+	uint32_t add_after_hdl;
+	struct ipa_flt_rule_add rules[0];
+};
+
+/**
+ * struct ipa_flt_rule_mdfy - filtering rule descriptor includes
+ * in and out parameters
+ * @rule: actual rule to be added
+ * @flt_rule_hdl: handle to rule
+ * @status:	output parameter, status of filtering rule modify  operation,
+ *		0 for success,
+ *		-1 for failure
+ *
+ */
+struct ipa_flt_rule_mdfy {
+	struct ipa_flt_rule rule;
+	uint32_t rule_hdl;
+	int status;
+};
+
+/**
+ * struct ipa_ioc_mdfy_flt_rule - filtering rule modify parameters (supports
+ * multiple rules and commit)
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @num_rules: number of filtering rules that follow
+ * @rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_mdfy_flt_rule {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	uint8_t num_rules;
+	struct ipa_flt_rule_mdfy rules[0];
+};
+
+/**
+ * struct ipa_flt_rule_del - filtering rule descriptor includes
+ * in and out parameters
+ *
+ * @hdl: handle returned from filtering rule add operation
+ * @status:	output parameter, status of filtering rule delete operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_flt_rule_del {
+	uint32_t hdl;
+	int status;
+};
+
+/**
+ * struct ipa_ioc_del_flt_rule - filtering rule deletion parameters (supports
+ * multiple headers and commit)
+ * @commit: should rules be removed from IPA HW also?
+ * @ip: IP family of rules
+ * @num_hdls: num of rules being removed
+ * @hdl: all handles need to go back to back here, no pointers
+ */
+struct ipa_ioc_del_flt_rule {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	uint8_t num_hdls;
+	struct ipa_flt_rule_del hdl[0];
+};
+
+/**
+ * struct ipa_ioc_get_rt_tbl - routing table lookup parameters, if lookup was
+ * successful caller must call put to release the reference
+ * count when done
+ * @ip: IP family of table
+ * @name: name of routing table resource
+ * @htl:	output parameter, handle of routing table, valid only when ioctl
+ *		return val is non-negative
+ */
+struct ipa_ioc_get_rt_tbl {
+	enum ipa_ip_type ip;
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint32_t hdl;
+};
+
+/**
+ * struct ipa_ioc_query_intf - used to lookup number of tx and
+ * rx properties of interface
+ * @name: name of interface
+ * @num_tx_props:	output parameter, number of tx properties
+ *			valid only when ioctl return val is non-negative
+ * @num_rx_props:	output parameter, number of rx properties
+ *			valid only when ioctl return val is non-negative
+ * @num_ext_props:	output parameter, number of ext properties
+ *			valid only when ioctl return val is non-negative
+ * @excp_pipe:		exception packets of this interface should be
+ *			routed to this pipe
+ */
+struct ipa_ioc_query_intf {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint32_t num_tx_props;
+	uint32_t num_rx_props;
+	uint32_t num_ext_props;
+	enum ipa_client_type excp_pipe;
+};
+
+/**
+ * struct ipa_ioc_tx_intf_prop - interface tx property
+ * @ip: IP family of routing rule
+ * @attrib: routing rule
+ * @dst_pipe: routing output pipe
+ * @alt_dst_pipe: alternate routing output pipe
+ * @hdr_name: name of associated header if any, empty string when no header
+ * @hdr_l2_type: type of associated header if any, use NONE when no header
+ */
+struct ipa_ioc_tx_intf_prop {
+	enum ipa_ip_type ip;
+	struct ipa_rule_attrib attrib;
+	enum ipa_client_type dst_pipe;
+	enum ipa_client_type alt_dst_pipe;
+	char hdr_name[IPA_RESOURCE_NAME_MAX];
+	enum ipa_hdr_l2_type hdr_l2_type;
+};
+
+/**
+ * struct ipa_ioc_query_intf_tx_props - interface tx propertie
+ * @name: name of interface
+ * @num_tx_props: number of TX properties
+ * @tx[0]: output parameter, the tx properties go here back to back
+ */
+struct ipa_ioc_query_intf_tx_props {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint32_t num_tx_props;
+	struct ipa_ioc_tx_intf_prop tx[0];
+};
+
+/**
+ * struct ipa_ioc_ext_intf_prop - interface extended property
+ * @ip: IP family of routing rule
+ * @eq_attrib: attributes of the rule in equation form
+ * @action: action field
+ * @rt_tbl_idx: index of RT table referred to by filter rule
+ * @mux_id: MUX_ID
+ * @filter_hdl: handle of filter (as specified by provider of filter rule)
+ * @is_xlat_rule: it is xlat flt rule or not
+ */
+struct ipa_ioc_ext_intf_prop {
+	enum ipa_ip_type ip;
+	struct ipa_ipfltri_rule_eq eq_attrib;
+	enum ipa_flt_action action;
+	uint32_t rt_tbl_idx;
+	uint8_t mux_id;
+	uint32_t filter_hdl;
+	uint8_t is_xlat_rule;
+	uint32_t rule_id;
+	uint8_t is_rule_hashable;
+};
+
+/**
+ * struct ipa_ioc_query_intf_ext_props - interface ext propertie
+ * @name: name of interface
+ * @num_ext_props: number of EXT properties
+ * @ext[0]: output parameter, the ext properties go here back to back
+ */
+struct ipa_ioc_query_intf_ext_props {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint32_t num_ext_props;
+	struct ipa_ioc_ext_intf_prop ext[0];
+};
+
+/**
+ * struct ipa_ioc_rx_intf_prop - interface rx property
+ * @ip: IP family of filtering rule
+ * @attrib: filtering rule
+ * @src_pipe: input pipe
+ * @hdr_l2_type: type of associated header if any, use NONE when no header
+ */
+struct ipa_ioc_rx_intf_prop {
+	enum ipa_ip_type ip;
+	struct ipa_rule_attrib attrib;
+	enum ipa_client_type src_pipe;
+	enum ipa_hdr_l2_type hdr_l2_type;
+};
+
+/**
+ * struct ipa_ioc_query_intf_rx_props - interface rx propertie
+ * @name: name of interface
+ * @num_rx_props: number of RX properties
+ * @rx: output parameter, the rx properties go here back to back
+ */
+struct ipa_ioc_query_intf_rx_props {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint32_t num_rx_props;
+	struct ipa_ioc_rx_intf_prop rx[0];
+};
+
+/**
+ * struct ipa_ioc_nat_alloc_mem - nat table memory allocation
+ * properties
+ * @dev_name: input parameter, the name of table
+ * @size: input parameter, size of table in bytes
+ * @offset: output parameter, offset into page in case of system memory
+ */
+struct ipa_ioc_nat_alloc_mem {
+	char dev_name[IPA_RESOURCE_NAME_MAX];
+	size_t size;
+	off_t offset;
+};
+
+/**
+ * struct ipa_ioc_nat_ipv6ct_table_alloc - NAT/IPv6CT table memory allocation
+ * properties
+ * @size: input parameter, size of table in bytes
+ * @offset: output parameter, offset into page in case of system memory
+ */
+struct ipa_ioc_nat_ipv6ct_table_alloc {
+	size_t size;
+	off_t offset;
+};
+
+/**
+ * struct ipa_ioc_v4_nat_init - nat table initialization parameters
+ * @tbl_index: input parameter, index of the table
+ * @ipv4_rules_offset: input parameter, ipv4 rules address offset
+ * @expn_rules_offset: input parameter, ipv4 expansion rules address offset
+ * @index_offset: input parameter, index rules offset
+ * @index_expn_offset: input parameter, index expansion rules offset
+ * @table_entries: input parameter, ipv4 rules table number of entries
+ * @expn_table_entries: input parameter, ipv4 expansion rules table number of
+ *                      entries
+ * @ip_addr: input parameter, public ip address
+ */
+struct ipa_ioc_v4_nat_init {
+	uint8_t tbl_index;
+	uint32_t ipv4_rules_offset;
+	uint32_t expn_rules_offset;
+
+	uint32_t index_offset;
+	uint32_t index_expn_offset;
+
+	uint16_t table_entries;
+	uint16_t expn_table_entries;
+	uint32_t ip_addr;
+};
+
+/**
+ * struct ipa_ioc_ipv6ct_init - IPv6CT table initialization parameters
+ * @tbl_index: input parameter, index of the table
+ * @base_table_offset: input parameter, IPv6CT base table address offset
+ * @expn_table_offset: input parameter, IPv6CT expansion table address offset
+ * @table_entries: input parameter, IPv6CT table number of entries
+ * @expn_table_entries: input parameter, IPv6CT expansion table number of
+ *                      entries
+ */
+struct ipa_ioc_ipv6ct_init {
+	uint8_t tbl_index;
+	uint32_t base_table_offset;
+	uint32_t expn_table_offset;
+	uint16_t table_entries;
+	uint16_t expn_table_entries;
+};
+
+/**
+ * struct ipa_ioc_v4_nat_del - nat table delete parameter
+ * @table_index: input parameter, index of the table
+ * @public_ip_addr: input parameter, public ip address
+ */
+struct ipa_ioc_v4_nat_del {
+	uint8_t table_index;
+	uint32_t public_ip_addr;
+};
+
+/**
+ * struct ipa_ioc_nat_ipv6ct_table_del - NAT/IPv6CT table delete parameter
+ * @table_index: input parameter, index of the table
+ */
+struct ipa_ioc_nat_ipv6ct_table_del {
+	uint8_t table_index;
+};
+
+/**
+ * struct ipa_ioc_nat_dma_one - nat/ipv6ct dma command parameter
+ * @table_index: input parameter, index of the table
+ * @base_addr:	type of table, from which the base address of the table
+ *		can be inferred
+ * @offset: destination offset within the NAT table
+ * @data: data to be written.
+ */
+struct ipa_ioc_nat_dma_one {
+	uint8_t table_index;
+	uint8_t base_addr;
+
+	uint32_t offset;
+	uint16_t data;
+
+};
+
+/**
+ * struct ipa_ioc_nat_dma_cmd - To hold multiple nat/ipv6ct dma commands
+ * @entries: number of dma commands in use
+ * @dma: data pointer to the dma commands
+ */
+struct ipa_ioc_nat_dma_cmd {
+	uint8_t entries;
+	struct ipa_ioc_nat_dma_one dma[0];
+
+};
+
+/**
+ * struct ipa_ioc_nat_pdn_entry - PDN entry modification data
+ * @pdn_index: index of the entry in the PDN config table to be changed
+ * @public_ip: PDN's public ip
+ * @src_metadata: PDN's source NAT metadata for metadata replacement
+ * @dst_metadata: PDN's destination NAT metadata for metadata replacement
+ */
+struct ipa_ioc_nat_pdn_entry {
+	uint8_t pdn_index;
+	uint32_t public_ip;
+	uint32_t src_metadata;
+	uint32_t dst_metadata;
+};
+
+/**
+ * struct ipa_ioc_vlan_iface_info - add vlan interface
+ * @name: interface name
+ * @vlan_id: VLAN ID
+ */
+struct ipa_ioc_vlan_iface_info {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint8_t vlan_id;
+};
+
+/**
+ * struct ipa_ioc_l2tp_vlan_mapping_info - l2tp->vlan mapping info
+ * @iptype: l2tp tunnel IP type
+ * @l2tp_iface_name: l2tp interface name
+ * @l2tp_session_id: l2tp session id
+ * @vlan_iface_name: vlan interface name
+ */
+struct ipa_ioc_l2tp_vlan_mapping_info {
+	enum ipa_ip_type iptype;
+	char l2tp_iface_name[IPA_RESOURCE_NAME_MAX];
+	uint8_t l2tp_session_id;
+	char vlan_iface_name[IPA_RESOURCE_NAME_MAX];
+};
+
+/**
+ * struct ipa_msg_meta - Format of the message meta-data.
+ * @msg_type: the type of the message
+ * @rsvd: reserved bits for future use.
+ * @msg_len: the length of the message in bytes
+ *
+ * For push model:
+ * Client in user-space should issue a read on the device (/dev/ipa) with a
+ * sufficiently large buffer in a continuous loop, call will block when there is
+ * no message to read. Upon return, client can read the ipa_msg_meta from start
+ * of buffer to find out type and length of message
+ * size of buffer supplied >= (size of largest message + size of metadata)
+ *
+ * For pull model:
+ * Client in user-space can also issue a pull msg IOCTL to device (/dev/ipa)
+ * with a payload containing space for the ipa_msg_meta and the message specific
+ * payload length.
+ * size of buffer supplied == (len of specific message  + size of metadata)
+ */
+struct ipa_msg_meta {
+	uint8_t msg_type;
+	uint8_t rsvd;
+	uint16_t msg_len;
+};
+
+/**
+ * struct ipa_wlan_msg - To hold information about wlan client
+ * @name: name of the wlan interface
+ * @mac_addr: mac address of wlan client
+ *
+ * wlan drivers need to pass name of wlan iface and mac address of
+ * wlan client along with ipa_wlan_event, whenever a wlan client is
+ * connected/disconnected/moved to power save/come out of power save
+ */
+struct ipa_wlan_msg {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint8_t mac_addr[IPA_MAC_ADDR_SIZE];
+};
+
+/**
+ * enum ipa_wlan_hdr_attrib_type - attribute type
+ * in wlan client header
+ *
+ * WLAN_HDR_ATTRIB_MAC_ADDR: attrib type mac address
+ * WLAN_HDR_ATTRIB_STA_ID: attrib type station id
+ */
+enum ipa_wlan_hdr_attrib_type {
+	WLAN_HDR_ATTRIB_MAC_ADDR,
+	WLAN_HDR_ATTRIB_STA_ID
+};
+
+/**
+ * struct ipa_wlan_hdr_attrib_val - header attribute value
+ * @attrib_type: type of attribute
+ * @offset: offset of attribute within header
+ * @u.mac_addr: mac address
+ * @u.sta_id: station id
+ */
+struct ipa_wlan_hdr_attrib_val {
+	enum ipa_wlan_hdr_attrib_type attrib_type;
+	uint8_t offset;
+	union {
+		uint8_t mac_addr[IPA_MAC_ADDR_SIZE];
+		uint8_t sta_id;
+	} u;
+};
+
+/**
+ * struct ipa_wlan_msg_ex - To hold information about wlan client
+ * @name: name of the wlan interface
+ * @num_of_attribs: number of attributes
+ * @attrib_val: holds attribute values
+ *
+ * wlan drivers need to pass name of wlan iface and mac address
+ * of wlan client or station id along with ipa_wlan_event,
+ * whenever a wlan client is connected/disconnected/moved to
+ * power save/come out of power save
+ */
+struct ipa_wlan_msg_ex {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint8_t num_of_attribs;
+	struct ipa_wlan_hdr_attrib_val attribs[0];
+};
+
+struct ipa_ecm_msg {
+	char name[IPA_RESOURCE_NAME_MAX];
+	int ifindex;
+};
+
+/**
+ * struct ipa_wan_msg - To hold information about wan client
+ * @name: name of the wan interface
+ *
+ * CnE need to pass the name of default wan iface when connected/disconnected.
+ * CNE need to pass the gw info in wlan AP+STA mode.
+ * netmgr need to pass the name of wan eMBMS iface when connected.
+ */
+struct ipa_wan_msg {
+	char upstream_ifname[IPA_RESOURCE_NAME_MAX];
+	char tethered_ifname[IPA_RESOURCE_NAME_MAX];
+	enum ipa_ip_type ip;
+	uint32_t ipv4_addr_gw;
+	uint32_t ipv6_addr_gw[IPA_WAN_MSG_IPv6_ADDR_GW_LEN];
+};
+
+/**
+ * struct ipa_ioc_rm_dependency - parameters for add/delete dependency
+ * @resource_name: name of dependent resource
+ * @depends_on_name: name of its dependency
+ */
+struct ipa_ioc_rm_dependency {
+	enum ipa_rm_resource_name resource_name;
+	enum ipa_rm_resource_name depends_on_name;
+};
+
+struct ipa_ioc_generate_flt_eq {
+	enum ipa_ip_type ip;
+	struct ipa_rule_attrib attrib;
+	struct ipa_ipfltri_rule_eq eq_attrib;
+};
+
+/**
+ * struct ipa_ioc_write_qmapid - to write mux id to endpoint meta register
+ * @mux_id: mux id of wan
+ */
+struct ipa_ioc_write_qmapid {
+	enum ipa_client_type client;
+	uint8_t qmap_id;
+};
+
+enum ipacm_client_enum {
+	IPACM_CLIENT_USB = 1,
+	IPACM_CLIENT_WLAN,
+	IPACM_CLIENT_MAX
+};
+
+enum ipacm_per_client_device_type {
+	IPACM_CLIENT_DEVICE_TYPE_USB = 0,
+	IPACM_CLIENT_DEVICE_TYPE_WLAN = 1,
+	IPACM_CLIENT_DEVICE_TYPE_ETH = 2
+};
+
+/**
+ * max number of device types supported.
+ */
+#define IPACM_MAX_CLIENT_DEVICE_TYPES 3
+
+/**
+ * @lanIface - Name of the lan interface
+ * @mac: Mac address of the client.
+ */
+struct ipa_lan_client_msg {
+	char lanIface[IPA_RESOURCE_NAME_MAX];
+	uint8_t mac[IPA_MAC_ADDR_SIZE];
+};
+
+/**
+ * struct ipa_lan_client - lan client data
+ * @mac: MAC Address of the client.
+ * @client_idx: Client Index.
+ * @inited: Bool to indicate whether client info is set.
+ */
+struct ipa_lan_client {
+	uint8_t mac[IPA_MAC_ADDR_SIZE];
+	int8_t client_idx;
+	uint8_t inited;
+};
+
+/**
+ * struct ipa_tether_device_info - tether device info indicated from IPACM
+ * @ul_src_pipe: Source pipe of the lan client.
+ * @hdr_len: Header length of the client.
+ * @num_clients: Number of clients connected.
+ */
+struct ipa_tether_device_info {
+	int32_t ul_src_pipe;
+	uint8_t hdr_len;
+	uint32_t num_clients;
+	struct ipa_lan_client lan_client[IPA_MAX_NUM_HW_PATH_CLIENTS];
+};
+
+/**
+ *   actual IOCTLs supported by IPA driver
+ */
+#define IPA_IOC_ADD_HDR _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_HDR, \
+					struct ipa_ioc_add_hdr *)
+#define IPA_IOC_DEL_HDR _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_HDR, \
+					struct ipa_ioc_del_hdr *)
+#define IPA_IOC_ADD_RT_RULE _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_RT_RULE, \
+					struct ipa_ioc_add_rt_rule *)
+#define IPA_IOC_ADD_RT_RULE_EXT _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_RT_RULE_EXT, \
+					struct ipa_ioc_add_rt_rule_ext *)
+#define IPA_IOC_ADD_RT_RULE_AFTER _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_RT_RULE_AFTER, \
+					struct ipa_ioc_add_rt_rule_after *)
+#define IPA_IOC_DEL_RT_RULE _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_RT_RULE, \
+					struct ipa_ioc_del_rt_rule *)
+#define IPA_IOC_ADD_FLT_RULE _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_FLT_RULE, \
+					struct ipa_ioc_add_flt_rule *)
+#define IPA_IOC_ADD_FLT_RULE_AFTER _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_FLT_RULE_AFTER, \
+					struct ipa_ioc_add_flt_rule_after *)
+#define IPA_IOC_DEL_FLT_RULE _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_FLT_RULE, \
+					struct ipa_ioc_del_flt_rule *)
+#define IPA_IOC_COMMIT_HDR _IO(IPA_IOC_MAGIC,\
+					IPA_IOCTL_COMMIT_HDR)
+#define IPA_IOC_RESET_HDR _IO(IPA_IOC_MAGIC,\
+					IPA_IOCTL_RESET_HDR)
+#define IPA_IOC_COMMIT_RT _IOW(IPA_IOC_MAGIC, \
+					IPA_IOCTL_COMMIT_RT, \
+					enum ipa_ip_type)
+#define IPA_IOC_RESET_RT _IOW(IPA_IOC_MAGIC, \
+					IPA_IOCTL_RESET_RT, \
+					enum ipa_ip_type)
+#define IPA_IOC_COMMIT_FLT _IOW(IPA_IOC_MAGIC, \
+					IPA_IOCTL_COMMIT_FLT, \
+					enum ipa_ip_type)
+#define IPA_IOC_RESET_FLT _IOW(IPA_IOC_MAGIC, \
+			IPA_IOCTL_RESET_FLT, \
+			enum ipa_ip_type)
+#define IPA_IOC_DUMP _IO(IPA_IOC_MAGIC, \
+			IPA_IOCTL_DUMP)
+#define IPA_IOC_GET_RT_TBL _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_RT_TBL, \
+				struct ipa_ioc_get_rt_tbl *)
+#define IPA_IOC_PUT_RT_TBL _IOW(IPA_IOC_MAGIC, \
+				IPA_IOCTL_PUT_RT_TBL, \
+				uint32_t)
+#define IPA_IOC_COPY_HDR _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_COPY_HDR, \
+				struct ipa_ioc_copy_hdr *)
+#define IPA_IOC_QUERY_INTF _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_INTF, \
+				struct ipa_ioc_query_intf *)
+#define IPA_IOC_QUERY_INTF_TX_PROPS _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_INTF_TX_PROPS, \
+				struct ipa_ioc_query_intf_tx_props *)
+#define IPA_IOC_QUERY_INTF_RX_PROPS _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_QUERY_INTF_RX_PROPS, \
+					struct ipa_ioc_query_intf_rx_props *)
+#define IPA_IOC_QUERY_INTF_EXT_PROPS _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_QUERY_INTF_EXT_PROPS, \
+					struct ipa_ioc_query_intf_ext_props *)
+#define IPA_IOC_GET_HDR _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_HDR, \
+				struct ipa_ioc_get_hdr *)
+#define IPA_IOC_PUT_HDR _IOW(IPA_IOC_MAGIC, \
+				IPA_IOCTL_PUT_HDR, \
+				uint32_t)
+#define IPA_IOC_ALLOC_NAT_MEM _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ALLOC_NAT_MEM, \
+				struct ipa_ioc_nat_alloc_mem *)
+#define IPA_IOC_ALLOC_NAT_TABLE _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ALLOC_NAT_TABLE, \
+				struct ipa_ioc_nat_ipv6ct_table_alloc *)
+#define IPA_IOC_ALLOC_IPV6CT_TABLE _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ALLOC_IPV6CT_TABLE, \
+				struct ipa_ioc_nat_ipv6ct_table_alloc *)
+#define IPA_IOC_V4_INIT_NAT _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_V4_INIT_NAT, \
+				struct ipa_ioc_v4_nat_init *)
+#define IPA_IOC_INIT_IPV6CT_TABLE _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_INIT_IPV6CT_TABLE, \
+				struct ipa_ioc_ipv6ct_init *)
+#define IPA_IOC_NAT_DMA _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NAT_DMA, \
+				struct ipa_ioc_nat_dma_cmd *)
+#define IPA_IOC_TABLE_DMA_CMD _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_TABLE_DMA_CMD, \
+				struct ipa_ioc_nat_dma_cmd *)
+#define IPA_IOC_V4_DEL_NAT _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_V4_DEL_NAT, \
+				struct ipa_ioc_v4_nat_del *)
+#define IPA_IOC_DEL_NAT_TABLE _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_NAT_TABLE, \
+				struct ipa_ioc_nat_ipv6ct_table_del *)
+#define IPA_IOC_DEL_IPV6CT_TABLE _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_IPV6CT_TABLE, \
+				struct ipa_ioc_nat_ipv6ct_table_del *)
+#define IPA_IOC_GET_NAT_OFFSET _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_NAT_OFFSET, \
+				uint32_t *)
+#define IPA_IOC_NAT_MODIFY_PDN _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NAT_MODIFY_PDN, \
+				struct ipa_ioc_nat_pdn_entry *)
+#define IPA_IOC_SET_FLT _IOW(IPA_IOC_MAGIC, \
+			IPA_IOCTL_SET_FLT, \
+			uint32_t)
+#define IPA_IOC_PULL_MSG _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_PULL_MSG, \
+				struct ipa_msg_meta *)
+#define IPA_IOC_RM_ADD_DEPENDENCY _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_RM_ADD_DEPENDENCY, \
+				struct ipa_ioc_rm_dependency *)
+#define IPA_IOC_RM_DEL_DEPENDENCY _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_RM_DEL_DEPENDENCY, \
+				struct ipa_ioc_rm_dependency *)
+#define IPA_IOC_GENERATE_FLT_EQ _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GENERATE_FLT_EQ, \
+				struct ipa_ioc_generate_flt_eq *)
+#define IPA_IOC_QUERY_EP_MAPPING _IOR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_EP_MAPPING, \
+				uint32_t)
+#define IPA_IOC_QUERY_RT_TBL_INDEX _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_RT_TBL_INDEX, \
+				struct ipa_ioc_get_rt_tbl_indx *)
+#define IPA_IOC_WRITE_QMAPID  _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_WRITE_QMAPID, \
+				struct ipa_ioc_write_qmapid *)
+#define IPA_IOC_MDFY_FLT_RULE _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_MDFY_FLT_RULE, \
+					struct ipa_ioc_mdfy_flt_rule *)
+#define IPA_IOC_MDFY_RT_RULE _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_MDFY_RT_RULE, \
+					struct ipa_ioc_mdfy_rt_rule *)
+
+#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD, \
+				struct ipa_wan_msg *)
+
+#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL, \
+				struct ipa_wan_msg *)
+#define IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED, \
+				struct ipa_wan_msg *)
+#define IPA_IOC_ADD_HDR_PROC_CTX _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ADD_HDR_PROC_CTX, \
+				struct ipa_ioc_add_hdr_proc_ctx *)
+#define IPA_IOC_DEL_HDR_PROC_CTX _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_HDR_PROC_CTX, \
+				struct ipa_ioc_del_hdr_proc_ctx *)
+
+#define IPA_IOC_GET_HW_VERSION _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_HW_VERSION, \
+				enum ipa_hw_type *)
+
+#define IPA_IOC_ADD_VLAN_IFACE _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ADD_VLAN_IFACE, \
+				struct ipa_ioc_vlan_iface_info *)
+
+#define IPA_IOC_DEL_VLAN_IFACE _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_VLAN_IFACE, \
+				struct ipa_ioc_vlan_iface_info *)
+
+#define IPA_IOC_ADD_L2TP_VLAN_MAPPING _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ADD_L2TP_VLAN_MAPPING, \
+				struct ipa_ioc_l2tp_vlan_mapping_info *)
+
+#define IPA_IOC_DEL_L2TP_VLAN_MAPPING _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_L2TP_VLAN_MAPPING, \
+				struct ipa_ioc_l2tp_vlan_mapping_info *)
+/*
+ * unique magic number of the Tethering bridge ioctls
+ */
+#define TETH_BRIDGE_IOC_MAGIC 0xCE
+
+/*
+ * Ioctls supported by Tethering bridge driver
+ */
+#define TETH_BRIDGE_IOCTL_SET_BRIDGE_MODE	0
+#define TETH_BRIDGE_IOCTL_SET_AGGR_PARAMS	1
+#define TETH_BRIDGE_IOCTL_GET_AGGR_PARAMS	2
+#define TETH_BRIDGE_IOCTL_GET_AGGR_CAPABILITIES	3
+#define TETH_BRIDGE_IOCTL_MAX			4
+
+
+/**
+ * enum teth_link_protocol_type - link protocol (IP / Ethernet)
+ */
+enum teth_link_protocol_type {
+	TETH_LINK_PROTOCOL_IP,
+	TETH_LINK_PROTOCOL_ETHERNET,
+	TETH_LINK_PROTOCOL_MAX,
+};
+
+/**
+ * enum teth_aggr_protocol_type - Aggregation protocol (MBIM / TLP)
+ */
+enum teth_aggr_protocol_type {
+	TETH_AGGR_PROTOCOL_NONE,
+	TETH_AGGR_PROTOCOL_MBIM,
+	TETH_AGGR_PROTOCOL_TLP,
+	TETH_AGGR_PROTOCOL_MAX,
+};
+
+/**
+ * struct teth_aggr_params_link - Aggregation parameters for uplink/downlink
+ * @aggr_prot:			Aggregation protocol (MBIM / TLP)
+ * @max_transfer_size_byte:	Maximal size of aggregated packet in bytes.
+ *				Default value is 16*1024.
+ * @max_datagrams:		Maximal number of IP packets in an aggregated
+ *				packet. Default value is 16
+ */
+struct teth_aggr_params_link {
+	enum teth_aggr_protocol_type aggr_prot;
+	uint32_t max_transfer_size_byte;
+	uint32_t max_datagrams;
+};
+
+
+/**
+ * struct teth_aggr_params - Aggregation parmeters
+ * @ul:	Uplink parameters
+ * @dl: Downlink parmaeters
+ */
+struct teth_aggr_params {
+	struct teth_aggr_params_link ul;
+	struct teth_aggr_params_link dl;
+};
+
+/**
+ * struct teth_aggr_capabilities - Aggregation capabilities
+ * @num_protocols:		Number of protocols described in the array
+ * @prot_caps[]:		Array of aggregation capabilities per protocol
+ */
+struct teth_aggr_capabilities {
+	uint16_t num_protocols;
+	struct teth_aggr_params_link prot_caps[0];
+};
+
+/**
+ * struct teth_ioc_set_bridge_mode
+ * @link_protocol: link protocol (IP / Ethernet)
+ * @lcid: logical channel number
+ */
+struct teth_ioc_set_bridge_mode {
+	enum teth_link_protocol_type link_protocol;
+	uint16_t lcid;
+};
+
+/**
+ * struct teth_ioc_set_aggr_params
+ * @aggr_params: Aggregation parmeters
+ * @lcid: logical channel number
+ */
+struct teth_ioc_aggr_params {
+	struct teth_aggr_params aggr_params;
+	uint16_t lcid;
+};
+
+
+#define TETH_BRIDGE_IOC_SET_BRIDGE_MODE _IOW(TETH_BRIDGE_IOC_MAGIC, \
+				TETH_BRIDGE_IOCTL_SET_BRIDGE_MODE, \
+				struct teth_ioc_set_bridge_mode *)
+#define TETH_BRIDGE_IOC_SET_AGGR_PARAMS _IOW(TETH_BRIDGE_IOC_MAGIC, \
+				TETH_BRIDGE_IOCTL_SET_AGGR_PARAMS, \
+				struct teth_ioc_aggr_params *)
+#define TETH_BRIDGE_IOC_GET_AGGR_PARAMS _IOR(TETH_BRIDGE_IOC_MAGIC, \
+				TETH_BRIDGE_IOCTL_GET_AGGR_PARAMS, \
+				struct teth_ioc_aggr_params *)
+#define TETH_BRIDGE_IOC_GET_AGGR_CAPABILITIES _IOWR(TETH_BRIDGE_IOC_MAGIC, \
+				TETH_BRIDGE_IOCTL_GET_AGGR_CAPABILITIES, \
+				struct teth_aggr_capabilities *)
+
+/*
+ * unique magic number of the ODU bridge ioctls
+ */
+#define ODU_BRIDGE_IOC_MAGIC 0xCD
+
+/*
+ * Ioctls supported by ODU bridge driver
+ */
+#define ODU_BRIDGE_IOCTL_SET_MODE	0
+#define ODU_BRIDGE_IOCTL_SET_LLV6_ADDR	1
+#define ODU_BRIDGE_IOCTL_MAX		2
+
+/**
+ * enum odu_bridge_mode - bridge mode
+ *			(ROUTER MODE / BRIDGE MODE)
+ */
+enum odu_bridge_mode {
+	ODU_BRIDGE_MODE_ROUTER,
+	ODU_BRIDGE_MODE_BRIDGE,
+	ODU_BRIDGE_MODE_MAX,
+};
+
+#define ODU_BRIDGE_IOC_SET_MODE _IOW(ODU_BRIDGE_IOC_MAGIC, \
+				ODU_BRIDGE_IOCTL_SET_MODE, \
+				enum odu_bridge_mode)
+
+#define ODU_BRIDGE_IOC_SET_LLV6_ADDR _IOW(ODU_BRIDGE_IOC_MAGIC, \
+				ODU_BRIDGE_IOCTL_SET_LLV6_ADDR, \
+				struct in6_addr *)
+
+#endif /* _UAPI_MSM_IPA_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_ipc.h	2019-01-22 16:16:28.567292264 +0100
@@ -0,0 +1,91 @@
+#ifndef _UAPI_MSM_IPC_H_
+#define _UAPI_MSM_IPC_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+struct msm_ipc_port_addr {
+	uint32_t node_id;
+	uint32_t port_id;
+};
+
+struct msm_ipc_port_name {
+	uint32_t service;
+	uint32_t instance;
+};
+
+struct msm_ipc_addr {
+	unsigned char  addrtype;
+	union {
+		struct msm_ipc_port_addr port_addr;
+		struct msm_ipc_port_name port_name;
+	} addr;
+};
+
+#define MSM_IPC_WAIT_FOREVER	(~0)  /* timeout for permanent subscription */
+
+/*
+ * Socket API
+ */
+
+#ifndef AF_MSM_IPC
+#define AF_MSM_IPC		27
+#endif
+
+#ifndef PF_MSM_IPC
+#define PF_MSM_IPC		AF_MSM_IPC
+#endif
+
+#define MSM_IPC_ADDR_NAME		1
+#define MSM_IPC_ADDR_ID			2
+
+struct sockaddr_msm_ipc {
+	unsigned short family;
+	struct msm_ipc_addr address;
+	unsigned char reserved;
+};
+
+struct config_sec_rules_args {
+	int num_group_info;
+	uint32_t service_id;
+	uint32_t instance_id;
+	unsigned reserved;
+	gid_t group_id[0];
+};
+
+#define IPC_ROUTER_IOCTL_MAGIC (0xC3)
+
+#define IPC_ROUTER_IOCTL_GET_VERSION \
+	_IOR(IPC_ROUTER_IOCTL_MAGIC, 0, unsigned int)
+
+#define IPC_ROUTER_IOCTL_GET_MTU \
+	_IOR(IPC_ROUTER_IOCTL_MAGIC, 1, unsigned int)
+
+#define IPC_ROUTER_IOCTL_LOOKUP_SERVER \
+	_IOWR(IPC_ROUTER_IOCTL_MAGIC, 2, struct sockaddr_msm_ipc)
+
+#define IPC_ROUTER_IOCTL_GET_CURR_PKT_SIZE \
+	_IOR(IPC_ROUTER_IOCTL_MAGIC, 3, unsigned int)
+
+#define IPC_ROUTER_IOCTL_BIND_CONTROL_PORT \
+	_IOR(IPC_ROUTER_IOCTL_MAGIC, 4, unsigned int)
+
+#define IPC_ROUTER_IOCTL_CONFIG_SEC_RULES \
+	_IOR(IPC_ROUTER_IOCTL_MAGIC, 5, struct config_sec_rules_args)
+
+struct msm_ipc_server_info {
+	uint32_t node_id;
+	uint32_t port_id;
+	uint32_t service;
+	uint32_t instance;
+};
+
+struct server_lookup_args {
+	struct msm_ipc_port_name port_name;
+	int num_entries_in_array;
+	int num_entries_found;
+	uint32_t lookup_mask;
+	struct msm_ipc_server_info srv_info[0];
+};
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_kgsl.h	2019-10-29 09:26:25.549221830 +0100
@@ -0,0 +1,1600 @@
+#ifndef _UAPI_MSM_KGSL_H
+#define _UAPI_MSM_KGSL_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/*
+ * The KGSL version has proven not to be very useful in userspace if features
+ * are cherry picked into other trees out of order so it is frozen as of 3.14.
+ * It is left here for backwards compatabilty and as a reminder that
+ * software releases are never linear. Also, I like pie.
+ */
+
+#define KGSL_VERSION_MAJOR        3
+#define KGSL_VERSION_MINOR        14
+
+/*
+ * We have traditionally mixed context and issueibcmds / command batch flags
+ * together into a big flag stew. This worked fine until we started adding a
+ * lot more command batch flags and we started running out of bits. Turns out
+ * we have a bit of room in the context type / priority mask that we could use
+ * for command batches, but that means we need to split out the flags into two
+ * coherent sets.
+ *
+ * If any future definitions are for both context and cmdbatch add both defines
+ * and link the cmdbatch to the context define as we do below. Otherwise feel
+ * free to add exclusive bits to either set.
+ */
+
+/* --- context flags --- */
+#define KGSL_CONTEXT_SAVE_GMEM		0x00000001
+#define KGSL_CONTEXT_NO_GMEM_ALLOC	0x00000002
+/* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
+#define KGSL_CONTEXT_SUBMIT_IB_LIST	0x00000004
+#define KGSL_CONTEXT_CTX_SWITCH		0x00000008
+#define KGSL_CONTEXT_PREAMBLE		0x00000010
+#define KGSL_CONTEXT_TRASH_STATE	0x00000020
+#define KGSL_CONTEXT_PER_CONTEXT_TS	0x00000040
+#define KGSL_CONTEXT_USER_GENERATED_TS	0x00000080
+/* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
+#define KGSL_CONTEXT_END_OF_FRAME	0x00000100
+#define KGSL_CONTEXT_NO_FAULT_TOLERANCE 0x00000200
+/* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
+#define KGSL_CONTEXT_SYNC               0x00000400
+#define KGSL_CONTEXT_PWR_CONSTRAINT     0x00000800
+#define KGSL_CONTEXT_PRIORITY_MASK      0x0000F000
+#define KGSL_CONTEXT_PRIORITY_SHIFT     12
+#define KGSL_CONTEXT_PRIORITY_UNDEF     0
+
+#define KGSL_CONTEXT_IFH_NOP            0x00010000
+#define KGSL_CONTEXT_SECURE             0x00020000
+#define KGSL_CONTEXT_NO_SNAPSHOT        0x00040000
+#define KGSL_CONTEXT_SPARSE             0x00080000
+
+#define KGSL_CONTEXT_PREEMPT_STYLE_MASK       0x0E000000
+#define KGSL_CONTEXT_PREEMPT_STYLE_SHIFT      25
+#define KGSL_CONTEXT_PREEMPT_STYLE_DEFAULT    0x0
+#define KGSL_CONTEXT_PREEMPT_STYLE_RINGBUFFER 0x1
+#define KGSL_CONTEXT_PREEMPT_STYLE_FINEGRAIN  0x2
+
+#define KGSL_CONTEXT_TYPE_MASK          0x01F00000
+#define KGSL_CONTEXT_TYPE_SHIFT         20
+#define KGSL_CONTEXT_TYPE_ANY		0
+#define KGSL_CONTEXT_TYPE_GL		1
+#define KGSL_CONTEXT_TYPE_CL		2
+#define KGSL_CONTEXT_TYPE_C2D		3
+#define KGSL_CONTEXT_TYPE_RS		4
+#define KGSL_CONTEXT_TYPE_UNKNOWN	0x1E
+
+#define KGSL_CONTEXT_INVALIDATE_ON_FAULT 0x10000000
+
+#define KGSL_CONTEXT_INVALID 0xffffffff
+
+/*
+ * --- command batch flags ---
+ * The bits that are linked to a KGSL_CONTEXT equivalent are either legacy
+ * definitions or bits that are valid for both contexts and cmdbatches.  To be
+ * safe the other 8 bits that are still available in the context field should be
+ * omitted here in case we need to share - the other bits are available for
+ * cmdbatch only flags as needed
+ */
+#define KGSL_CMDBATCH_MEMLIST		0x00000001
+#define KGSL_CMDBATCH_MARKER		0x00000002
+#define KGSL_CMDBATCH_SUBMIT_IB_LIST	KGSL_CONTEXT_SUBMIT_IB_LIST /* 0x004 */
+#define KGSL_CMDBATCH_CTX_SWITCH	KGSL_CONTEXT_CTX_SWITCH     /* 0x008 */
+#define KGSL_CMDBATCH_PROFILING		0x00000010
+/*
+ * KGSL_CMDBATCH_PROFILING must also be set for KGSL_CMDBATCH_PROFILING_KTIME
+ * to take effect, as the latter only affects the time data returned.
+ */
+#define KGSL_CMDBATCH_PROFILING_KTIME	0x00000020
+#define KGSL_CMDBATCH_END_OF_FRAME	KGSL_CONTEXT_END_OF_FRAME   /* 0x100 */
+#define KGSL_CMDBATCH_SYNC		KGSL_CONTEXT_SYNC           /* 0x400 */
+#define KGSL_CMDBATCH_PWR_CONSTRAINT	KGSL_CONTEXT_PWR_CONSTRAINT /* 0x800 */
+#define KGSL_CMDBATCH_SPARSE	    0x1000 /* 0x1000 */
+
+/*
+ * Reserve bits [16:19] and bits [28:31] for possible bits shared between
+ * contexts and command batches.  Update this comment as new flags are added.
+ */
+
+/*
+ * gpu_command_object flags - these flags communicate the type of command or
+ * memory object being submitted for a GPU command
+ */
+
+/* Flags for GPU command objects */
+#define KGSL_CMDLIST_IB                  0x00000001U
+#define KGSL_CMDLIST_CTXTSWITCH_PREAMBLE 0x00000002U
+#define KGSL_CMDLIST_IB_PREAMBLE         0x00000004U
+
+/* Flags for GPU command memory objects */
+#define KGSL_OBJLIST_MEMOBJ  0x00000008U
+#define KGSL_OBJLIST_PROFILE 0x00000010U
+
+/* Flags for GPU command sync points */
+#define KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP 0
+#define KGSL_CMD_SYNCPOINT_TYPE_FENCE 1
+
+/* --- Memory allocation flags --- */
+
+/* General allocation hints */
+#define KGSL_MEMFLAGS_SECURE      0x00000008ULL
+#define KGSL_MEMFLAGS_GPUREADONLY 0x01000000U
+#define KGSL_MEMFLAGS_GPUWRITEONLY 0x02000000U
+#define KGSL_MEMFLAGS_FORCE_32BIT 0x100000000ULL
+
+/* Flag for binding all the virt range to single phys data */
+#define KGSL_SPARSE_BIND_MULTIPLE_TO_PHYS 0x400000000ULL
+#define KGSL_SPARSE_BIND 0x1ULL
+#define KGSL_SPARSE_UNBIND 0x2ULL
+
+/* Memory caching hints */
+#define KGSL_CACHEMODE_MASK       0x0C000000U
+#define KGSL_CACHEMODE_SHIFT 26
+
+#define KGSL_CACHEMODE_WRITECOMBINE 0
+#define KGSL_CACHEMODE_UNCACHED 1
+#define KGSL_CACHEMODE_WRITETHROUGH 2
+#define KGSL_CACHEMODE_WRITEBACK 3
+
+#define KGSL_MEMFLAGS_USE_CPU_MAP 0x10000000ULL
+#define KGSL_MEMFLAGS_SPARSE_PHYS 0x20000000ULL
+#define KGSL_MEMFLAGS_SPARSE_VIRT 0x40000000ULL
+
+/* Memory types for which allocations are made */
+#define KGSL_MEMTYPE_MASK		0x0000FF00
+#define KGSL_MEMTYPE_SHIFT		8
+
+#define KGSL_MEMTYPE_OBJECTANY			0
+#define KGSL_MEMTYPE_FRAMEBUFFER		1
+#define KGSL_MEMTYPE_RENDERBUFFER		2
+#define KGSL_MEMTYPE_ARRAYBUFFER		3
+#define KGSL_MEMTYPE_ELEMENTARRAYBUFFER		4
+#define KGSL_MEMTYPE_VERTEXARRAYBUFFER		5
+#define KGSL_MEMTYPE_TEXTURE			6
+#define KGSL_MEMTYPE_SURFACE			7
+#define KGSL_MEMTYPE_EGL_SURFACE		8
+#define KGSL_MEMTYPE_GL				9
+#define KGSL_MEMTYPE_CL				10
+#define KGSL_MEMTYPE_CL_BUFFER_MAP		11
+#define KGSL_MEMTYPE_CL_BUFFER_NOMAP		12
+#define KGSL_MEMTYPE_CL_IMAGE_MAP		13
+#define KGSL_MEMTYPE_CL_IMAGE_NOMAP		14
+#define KGSL_MEMTYPE_CL_KERNEL_STACK		15
+#define KGSL_MEMTYPE_COMMAND			16
+#define KGSL_MEMTYPE_2D				17
+#define KGSL_MEMTYPE_EGL_IMAGE			18
+#define KGSL_MEMTYPE_EGL_SHADOW			19
+#define KGSL_MEMTYPE_MULTISAMPLE		20
+#define KGSL_MEMTYPE_KERNEL			255
+
+/*
+ * Alignment hint, passed as the power of 2 exponent.
+ * i.e 4k (2^12) would be 12, 64k (2^16)would be 16.
+ */
+#define KGSL_MEMALIGN_MASK		0x00FF0000
+#define KGSL_MEMALIGN_SHIFT		16
+
+enum kgsl_user_mem_type {
+	KGSL_USER_MEM_TYPE_PMEM		= 0x00000000,
+	KGSL_USER_MEM_TYPE_ASHMEM	= 0x00000001,
+	KGSL_USER_MEM_TYPE_ADDR		= 0x00000002,
+	KGSL_USER_MEM_TYPE_ION		= 0x00000003,
+	/*
+	 * ION type is retained for backwards compatibilty but Ion buffers are
+	 * dma-bufs so try to use that naming if we can
+	 */
+	KGSL_USER_MEM_TYPE_DMABUF       = 0x00000003,
+	KGSL_USER_MEM_TYPE_MAX		= 0x00000007,
+};
+#define KGSL_MEMFLAGS_USERMEM_MASK 0x000000e0
+#define KGSL_MEMFLAGS_USERMEM_SHIFT 5
+
+/*
+ * Unfortunately, enum kgsl_user_mem_type starts at 0 which does not
+ * leave a good value for allocated memory. In the flags we use
+ * 0 to indicate allocated memory and thus need to add 1 to the enum
+ * values.
+ */
+#define KGSL_USERMEM_FLAG(x) (((x) + 1) << KGSL_MEMFLAGS_USERMEM_SHIFT)
+
+#define KGSL_MEMFLAGS_NOT_USERMEM 0
+#define KGSL_MEMFLAGS_USERMEM_PMEM KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_PMEM)
+#define KGSL_MEMFLAGS_USERMEM_ASHMEM \
+		KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ASHMEM)
+#define KGSL_MEMFLAGS_USERMEM_ADDR KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ADDR)
+#define KGSL_MEMFLAGS_USERMEM_ION KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ION)
+
+/* --- generic KGSL flag values --- */
+
+#define KGSL_FLAGS_NORMALMODE  0x00000000
+#define KGSL_FLAGS_SAFEMODE    0x00000001
+#define KGSL_FLAGS_INITIALIZED0 0x00000002
+#define KGSL_FLAGS_INITIALIZED 0x00000004
+#define KGSL_FLAGS_STARTED     0x00000008
+#define KGSL_FLAGS_ACTIVE      0x00000010
+#define KGSL_FLAGS_RESERVED0   0x00000020
+#define KGSL_FLAGS_RESERVED1   0x00000040
+#define KGSL_FLAGS_RESERVED2   0x00000080
+#define KGSL_FLAGS_SOFT_RESET  0x00000100
+#define KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS 0x00000200
+
+/* Server Side Sync Timeout in milliseconds */
+#define KGSL_SYNCOBJ_SERVER_TIMEOUT 2000
+
+/*
+ * Reset status values for context
+ */
+enum kgsl_ctx_reset_stat {
+	KGSL_CTX_STAT_NO_ERROR				= 0x00000000,
+	KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT		= 0x00000001,
+	KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT	= 0x00000002,
+	KGSL_CTX_STAT_UNKNOWN_CONTEXT_RESET_EXT		= 0x00000003
+};
+
+#define KGSL_CONVERT_TO_MBPS(val) \
+	(val*1000*1000U)
+
+/* device id */
+enum kgsl_deviceid {
+	KGSL_DEVICE_3D0		= 0x00000000,
+	KGSL_DEVICE_MAX
+};
+
+struct kgsl_devinfo {
+
+	unsigned int device_id;
+	/* chip revision id
+	* coreid:8 majorrev:8 minorrev:8 patch:8
+	*/
+	unsigned int chip_id;
+	unsigned int mmu_enabled;
+	unsigned long gmem_gpubaseaddr;
+	/*
+	* This field contains the adreno revision
+	* number 200, 205, 220, etc...
+	*/
+	unsigned int gpu_id;
+	size_t gmem_sizebytes;
+};
+
+/*
+ * struct kgsl_devmemstore - this structure defines the region of memory
+ * that can be mmap()ed from this driver. The timestamp fields are volatile
+ * because they are written by the GPU
+ * @soptimestamp: Start of pipeline timestamp written by GPU before the
+ * commands in concern are processed
+ * @sbz: Unused, kept for 8 byte alignment
+ * @eoptimestamp: End of pipeline timestamp written by GPU after the
+ * commands in concern are processed
+ * @sbz2: Unused, kept for 8 byte alignment
+ * @preempted: Indicates if the context was preempted
+ * @sbz3: Unused, kept for 8 byte alignment
+ * @ref_wait_ts: Timestamp on which to generate interrupt, unused now.
+ * @sbz4: Unused, kept for 8 byte alignment
+ * @current_context: The current context the GPU is working on
+ * @sbz5: Unused, kept for 8 byte alignment
+ */
+struct kgsl_devmemstore {
+	volatile unsigned int soptimestamp;
+	unsigned int sbz;
+	volatile unsigned int eoptimestamp;
+	unsigned int sbz2;
+	volatile unsigned int preempted;
+	unsigned int sbz3;
+	volatile unsigned int ref_wait_ts;
+	unsigned int sbz4;
+	unsigned int current_context;
+	unsigned int sbz5;
+};
+
+#define KGSL_MEMSTORE_OFFSET(ctxt_id, field) \
+	((ctxt_id)*sizeof(struct kgsl_devmemstore) + \
+	 offsetof(struct kgsl_devmemstore, field))
+
+/* timestamp id*/
+enum kgsl_timestamp_type {
+	KGSL_TIMESTAMP_CONSUMED = 0x00000001, /* start-of-pipeline timestamp */
+	KGSL_TIMESTAMP_RETIRED  = 0x00000002, /* end-of-pipeline timestamp*/
+	KGSL_TIMESTAMP_QUEUED   = 0x00000003,
+};
+
+/* property types - used with kgsl_device_getproperty */
+#define KGSL_PROP_DEVICE_INFO		0x1
+#define KGSL_PROP_DEVICE_SHADOW		0x2
+#define KGSL_PROP_DEVICE_POWER		0x3
+#define KGSL_PROP_SHMEM			0x4
+#define KGSL_PROP_SHMEM_APERTURES	0x5
+#define KGSL_PROP_MMU_ENABLE		0x6
+#define KGSL_PROP_INTERRUPT_WAITS	0x7
+#define KGSL_PROP_VERSION		0x8
+#define KGSL_PROP_GPU_RESET_STAT	0x9
+#define KGSL_PROP_PWRCTRL		0xE
+#define KGSL_PROP_PWR_CONSTRAINT	0x12
+#define KGSL_PROP_UCHE_GMEM_VADDR	0x13
+#define KGSL_PROP_SP_GENERIC_MEM	0x14
+#define KGSL_PROP_UCODE_VERSION		0x15
+#define KGSL_PROP_GPMU_VERSION		0x16
+#define KGSL_PROP_HIGHEST_BANK_BIT	0x17
+#define KGSL_PROP_DEVICE_BITNESS	0x18
+#define KGSL_PROP_DEVICE_QDSS_STM	0x19
+#define KGSL_PROP_DEVICE_QTIMER	0x20
+#define KGSL_PROP_IB_TIMEOUT 0x21
+
+struct kgsl_shadowprop {
+	unsigned long gpuaddr;
+	size_t size;
+	unsigned int flags; /* contains KGSL_FLAGS_ values */
+};
+
+struct kgsl_qdss_stm_prop {
+	uint64_t gpuaddr;
+	uint64_t size;
+};
+
+struct kgsl_qtimer_prop {
+	uint64_t gpuaddr;
+	uint64_t size;
+};
+
+struct kgsl_version {
+	unsigned int drv_major;
+	unsigned int drv_minor;
+	unsigned int dev_major;
+	unsigned int dev_minor;
+};
+
+struct kgsl_sp_generic_mem {
+	uint64_t local;
+	uint64_t pvt;
+};
+
+struct kgsl_ucode_version {
+	unsigned int pfp;
+	unsigned int pm4;
+};
+
+struct kgsl_gpmu_version {
+	unsigned int major;
+	unsigned int minor;
+	unsigned int features;
+};
+
+/* Performance counter groups */
+
+#define KGSL_PERFCOUNTER_GROUP_CP 0x0
+#define KGSL_PERFCOUNTER_GROUP_RBBM 0x1
+#define KGSL_PERFCOUNTER_GROUP_PC 0x2
+#define KGSL_PERFCOUNTER_GROUP_VFD 0x3
+#define KGSL_PERFCOUNTER_GROUP_HLSQ 0x4
+#define KGSL_PERFCOUNTER_GROUP_VPC 0x5
+#define KGSL_PERFCOUNTER_GROUP_TSE 0x6
+#define KGSL_PERFCOUNTER_GROUP_RAS 0x7
+#define KGSL_PERFCOUNTER_GROUP_UCHE 0x8
+#define KGSL_PERFCOUNTER_GROUP_TP 0x9
+#define KGSL_PERFCOUNTER_GROUP_SP 0xA
+#define KGSL_PERFCOUNTER_GROUP_RB 0xB
+#define KGSL_PERFCOUNTER_GROUP_PWR 0xC
+#define KGSL_PERFCOUNTER_GROUP_VBIF 0xD
+#define KGSL_PERFCOUNTER_GROUP_VBIF_PWR 0xE
+#define KGSL_PERFCOUNTER_GROUP_MH 0xF
+#define KGSL_PERFCOUNTER_GROUP_PA_SU 0x10
+#define KGSL_PERFCOUNTER_GROUP_SQ 0x11
+#define KGSL_PERFCOUNTER_GROUP_SX 0x12
+#define KGSL_PERFCOUNTER_GROUP_TCF 0x13
+#define KGSL_PERFCOUNTER_GROUP_TCM 0x14
+#define KGSL_PERFCOUNTER_GROUP_TCR 0x15
+#define KGSL_PERFCOUNTER_GROUP_L2 0x16
+#define KGSL_PERFCOUNTER_GROUP_VSC 0x17
+#define KGSL_PERFCOUNTER_GROUP_CCU 0x18
+#define KGSL_PERFCOUNTER_GROUP_LRZ 0x19
+#define KGSL_PERFCOUNTER_GROUP_CMP 0x1A
+#define KGSL_PERFCOUNTER_GROUP_ALWAYSON 0x1B
+#define KGSL_PERFCOUNTER_GROUP_SP_PWR 0x1C
+#define KGSL_PERFCOUNTER_GROUP_TP_PWR 0x1D
+#define KGSL_PERFCOUNTER_GROUP_RB_PWR 0x1E
+#define KGSL_PERFCOUNTER_GROUP_CCU_PWR 0x1F
+#define KGSL_PERFCOUNTER_GROUP_UCHE_PWR 0x20
+#define KGSL_PERFCOUNTER_GROUP_CP_PWR 0x21
+#define KGSL_PERFCOUNTER_GROUP_GPMU_PWR 0x22
+#define KGSL_PERFCOUNTER_GROUP_ALWAYSON_PWR 0x23
+#define KGSL_PERFCOUNTER_GROUP_MAX 0x24
+
+#define KGSL_PERFCOUNTER_NOT_USED 0xFFFFFFFF
+#define KGSL_PERFCOUNTER_BROKEN 0xFFFFFFFE
+
+/* structure holds list of ibs */
+struct kgsl_ibdesc {
+	unsigned long gpuaddr;
+	unsigned long __pad;
+	size_t sizedwords;
+	unsigned int ctrl;
+};
+
+/**
+ * struct kgsl_cmdbatch_profiling_buffer
+ * @wall_clock_s: Ringbuffer submission time (seconds).
+ *                If KGSL_CMDBATCH_PROFILING_KTIME is set, time is provided
+ *                in kernel clocks, otherwise wall clock time is used.
+ * @wall_clock_ns: Ringbuffer submission time (nanoseconds).
+ *                 If KGSL_CMDBATCH_PROFILING_KTIME is set time is provided
+ *                 in kernel clocks, otherwise wall clock time is used.
+ * @gpu_ticks_queued: GPU ticks at ringbuffer submission
+ * @gpu_ticks_submitted: GPU ticks when starting cmdbatch execution
+ * @gpu_ticks_retired: GPU ticks when finishing cmdbatch execution
+ *
+ * This structure defines the profiling buffer used to measure cmdbatch
+ * execution time
+ */
+struct kgsl_cmdbatch_profiling_buffer {
+	uint64_t wall_clock_s;
+	uint64_t wall_clock_ns;
+	uint64_t gpu_ticks_queued;
+	uint64_t gpu_ticks_submitted;
+	uint64_t gpu_ticks_retired;
+};
+
+/* ioctls */
+#define KGSL_IOC_TYPE 0x09
+
+/* get misc info about the GPU
+   type should be a value from enum kgsl_property_type
+   value points to a structure that varies based on type
+   sizebytes is sizeof() that structure
+   for KGSL_PROP_DEVICE_INFO, use struct kgsl_devinfo
+   this structure contaings hardware versioning info.
+   for KGSL_PROP_DEVICE_SHADOW, use struct kgsl_shadowprop
+   this is used to find mmap() offset and sizes for mapping
+   struct kgsl_memstore into userspace.
+*/
+struct kgsl_device_getproperty {
+	unsigned int type;
+	void __user *value;
+	size_t sizebytes;
+};
+
+#define IOCTL_KGSL_DEVICE_GETPROPERTY \
+	_IOWR(KGSL_IOC_TYPE, 0x2, struct kgsl_device_getproperty)
+
+/* IOCTL_KGSL_DEVICE_READ (0x3) - removed 03/2012
+ */
+
+/* block until the GPU has executed past a given timestamp
+ * timeout is in milliseconds.
+ */
+struct kgsl_device_waittimestamp {
+	unsigned int timestamp;
+	unsigned int timeout;
+};
+
+#define IOCTL_KGSL_DEVICE_WAITTIMESTAMP \
+	_IOW(KGSL_IOC_TYPE, 0x6, struct kgsl_device_waittimestamp)
+
+struct kgsl_device_waittimestamp_ctxtid {
+	unsigned int context_id;
+	unsigned int timestamp;
+	unsigned int timeout;
+};
+
+#define IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID \
+	_IOW(KGSL_IOC_TYPE, 0x7, struct kgsl_device_waittimestamp_ctxtid)
+
+/* DEPRECATED: issue indirect commands to the GPU.
+ * drawctxt_id must have been created with IOCTL_KGSL_DRAWCTXT_CREATE
+ * ibaddr and sizedwords must specify a subset of a buffer created
+ * with IOCTL_KGSL_SHAREDMEM_FROM_PMEM
+ * flags may be a mask of KGSL_CONTEXT_ values
+ * timestamp is a returned counter value which can be passed to
+ * other ioctls to determine when the commands have been executed by
+ * the GPU.
+ *
+ * This fucntion is deprecated - consider using IOCTL_KGSL_SUBMIT_COMMANDS
+ * instead
+ */
+struct kgsl_ringbuffer_issueibcmds {
+	unsigned int drawctxt_id;
+	unsigned long ibdesc_addr;
+	unsigned int numibs;
+	unsigned int timestamp; /*output param */
+	unsigned int flags;
+};
+
+#define IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS \
+	_IOWR(KGSL_IOC_TYPE, 0x10, struct kgsl_ringbuffer_issueibcmds)
+
+/* read the most recently executed timestamp value
+ * type should be a value from enum kgsl_timestamp_type
+ */
+struct kgsl_cmdstream_readtimestamp {
+	unsigned int type;
+	unsigned int timestamp; /*output param */
+};
+
+#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD \
+	_IOR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
+
+#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP \
+	_IOWR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
+
+/* free memory when the GPU reaches a given timestamp.
+ * gpuaddr specify a memory region created by a
+ * IOCTL_KGSL_SHAREDMEM_FROM_PMEM call
+ * type should be a value from enum kgsl_timestamp_type
+ */
+struct kgsl_cmdstream_freememontimestamp {
+	unsigned long gpuaddr;
+	unsigned int type;
+	unsigned int timestamp;
+};
+
+#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP \
+	_IOW(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
+
+/* Previous versions of this header had incorrectly defined
+   IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP as a read-only ioctl instead
+   of a write only ioctl.  To ensure binary compatability, the following
+   #define will be used to intercept the incorrect ioctl
+*/
+
+#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD \
+	_IOR(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
+
+/* create a draw context, which is used to preserve GPU state.
+ * The flags field may contain a mask KGSL_CONTEXT_*  values
+ */
+struct kgsl_drawctxt_create {
+	unsigned int flags;
+	unsigned int drawctxt_id; /*output param */
+};
+
+#define IOCTL_KGSL_DRAWCTXT_CREATE \
+	_IOWR(KGSL_IOC_TYPE, 0x13, struct kgsl_drawctxt_create)
+
+/* destroy a draw context */
+struct kgsl_drawctxt_destroy {
+	unsigned int drawctxt_id;
+};
+
+#define IOCTL_KGSL_DRAWCTXT_DESTROY \
+	_IOW(KGSL_IOC_TYPE, 0x14, struct kgsl_drawctxt_destroy)
+
+/* add a block of pmem, fb, ashmem or user allocated address
+ * into the GPU address space */
+struct kgsl_map_user_mem {
+	int fd;
+	unsigned long gpuaddr;   /*output param */
+	size_t len;
+	size_t offset;
+	unsigned long hostptr;   /*input param */
+	enum kgsl_user_mem_type memtype;
+	unsigned int flags;
+};
+
+#define IOCTL_KGSL_MAP_USER_MEM \
+	_IOWR(KGSL_IOC_TYPE, 0x15, struct kgsl_map_user_mem)
+
+struct kgsl_cmdstream_readtimestamp_ctxtid {
+	unsigned int context_id;
+	unsigned int type;
+	unsigned int timestamp; /*output param */
+};
+
+#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID \
+	_IOWR(KGSL_IOC_TYPE, 0x16, struct kgsl_cmdstream_readtimestamp_ctxtid)
+
+struct kgsl_cmdstream_freememontimestamp_ctxtid {
+	unsigned int context_id;
+	unsigned long gpuaddr;
+	unsigned int type;
+	unsigned int timestamp;
+};
+
+#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID \
+	_IOW(KGSL_IOC_TYPE, 0x17, \
+	struct kgsl_cmdstream_freememontimestamp_ctxtid)
+
+/* add a block of pmem or fb into the GPU address space */
+struct kgsl_sharedmem_from_pmem {
+        int pmem_fd;
+        unsigned long gpuaddr;  /*output param */
+        unsigned int len;
+        unsigned int offset;
+};
+
+#define IOCTL_KGSL_SHAREDMEM_FROM_PMEM \
+        _IOWR(KGSL_IOC_TYPE, 0x20, struct kgsl_sharedmem_from_pmem)
+
+/* remove memory from the GPU's address space */
+struct kgsl_sharedmem_free {
+	unsigned long gpuaddr;
+};
+
+#define IOCTL_KGSL_SHAREDMEM_FREE \
+	_IOW(KGSL_IOC_TYPE, 0x21, struct kgsl_sharedmem_free)
+
+struct kgsl_cff_user_event {
+	unsigned char cff_opcode;
+	unsigned int op1;
+	unsigned int op2;
+	unsigned int op3;
+	unsigned int op4;
+	unsigned int op5;
+	unsigned int __pad[2];
+};
+
+#define IOCTL_KGSL_CFF_USER_EVENT \
+	_IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_cff_user_event)
+
+struct kgsl_gmem_desc {
+	unsigned int x;
+	unsigned int y;
+	unsigned int width;
+	unsigned int height;
+	unsigned int pitch;
+};
+
+struct kgsl_buffer_desc {
+	void 			*hostptr;
+	unsigned long	gpuaddr;
+	int				size;
+	unsigned int	format;
+	unsigned int  	pitch;
+	unsigned int  	enabled;
+};
+
+struct kgsl_bind_gmem_shadow {
+	unsigned int drawctxt_id;
+	struct kgsl_gmem_desc gmem_desc;
+	unsigned int shadow_x;
+	unsigned int shadow_y;
+	struct kgsl_buffer_desc shadow_buffer;
+	unsigned int buffer_id;
+};
+
+#define IOCTL_KGSL_DRAWCTXT_BIND_GMEM_SHADOW \
+    _IOW(KGSL_IOC_TYPE, 0x22, struct kgsl_bind_gmem_shadow)
+
+/* add a block of memory into the GPU address space */
+
+/*
+ * IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC deprecated 09/2012
+ * use IOCTL_KGSL_GPUMEM_ALLOC instead
+ */
+
+struct kgsl_sharedmem_from_vmalloc {
+	unsigned long gpuaddr;	/*output param */
+	unsigned int hostptr;
+	unsigned int flags;
+};
+
+#define IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC \
+	_IOWR(KGSL_IOC_TYPE, 0x23, struct kgsl_sharedmem_from_vmalloc)
+
+/*
+ * This is being deprecated in favor of IOCTL_KGSL_GPUMEM_CACHE_SYNC which
+ * supports both directions (flush and invalidate). This code will still
+ * work, but by definition it will do a flush of the cache which might not be
+ * what you want to have happen on a buffer following a GPU operation.  It is
+ * safer to go with IOCTL_KGSL_GPUMEM_CACHE_SYNC
+ */
+
+#define IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE \
+	_IOW(KGSL_IOC_TYPE, 0x24, struct kgsl_sharedmem_free)
+
+struct kgsl_drawctxt_set_bin_base_offset {
+	unsigned int drawctxt_id;
+	unsigned int offset;
+};
+
+#define IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET \
+	_IOW(KGSL_IOC_TYPE, 0x25, struct kgsl_drawctxt_set_bin_base_offset)
+
+enum kgsl_cmdwindow_type {
+	KGSL_CMDWINDOW_MIN     = 0x00000000,
+	KGSL_CMDWINDOW_2D      = 0x00000000,
+	KGSL_CMDWINDOW_3D      = 0x00000001, /* legacy */
+	KGSL_CMDWINDOW_MMU     = 0x00000002,
+	KGSL_CMDWINDOW_ARBITER = 0x000000FF,
+	KGSL_CMDWINDOW_MAX     = 0x000000FF,
+};
+
+/* write to the command window */
+struct kgsl_cmdwindow_write {
+	enum kgsl_cmdwindow_type target;
+	unsigned int addr;
+	unsigned int data;
+};
+
+#define IOCTL_KGSL_CMDWINDOW_WRITE \
+	_IOW(KGSL_IOC_TYPE, 0x2e, struct kgsl_cmdwindow_write)
+
+struct kgsl_gpumem_alloc {
+	unsigned long gpuaddr; /* output param */
+	size_t size;
+	unsigned int flags;
+};
+
+#define IOCTL_KGSL_GPUMEM_ALLOC \
+	_IOWR(KGSL_IOC_TYPE, 0x2f, struct kgsl_gpumem_alloc)
+
+struct kgsl_cff_syncmem {
+	unsigned long gpuaddr;
+	size_t len;
+	unsigned int __pad[2]; /* For future binary compatibility */
+};
+
+#define IOCTL_KGSL_CFF_SYNCMEM \
+	_IOW(KGSL_IOC_TYPE, 0x30, struct kgsl_cff_syncmem)
+
+/*
+ * A timestamp event allows the user space to register an action following an
+ * expired timestamp. Note IOCTL_KGSL_TIMESTAMP_EVENT has been redefined to
+ * _IOWR to support fences which need to return a fd for the priv parameter.
+ */
+
+struct kgsl_timestamp_event {
+	int type;                /* Type of event (see list below) */
+	unsigned int timestamp;  /* Timestamp to trigger event on */
+	unsigned int context_id; /* Context for the timestamp */
+	void __user *priv;	 /* Pointer to the event specific blob */
+	size_t len;              /* Size of the event specific blob */
+};
+
+#define IOCTL_KGSL_TIMESTAMP_EVENT_OLD \
+	_IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_timestamp_event)
+
+/* A genlock timestamp event releases an existing lock on timestamp expire */
+
+#define KGSL_TIMESTAMP_EVENT_GENLOCK 1
+
+struct kgsl_timestamp_event_genlock {
+	int handle; /* Handle of the genlock lock to release */
+};
+
+/* A fence timestamp event releases an existing lock on timestamp expire */
+
+#define KGSL_TIMESTAMP_EVENT_FENCE 2
+
+struct kgsl_timestamp_event_fence {
+	int fence_fd; /* Fence to signal */
+};
+
+/*
+ * Set a property within the kernel.  Uses the same structure as
+ * IOCTL_KGSL_GETPROPERTY
+ */
+
+#define IOCTL_KGSL_SETPROPERTY \
+	_IOW(KGSL_IOC_TYPE, 0x32, struct kgsl_device_getproperty)
+
+#define IOCTL_KGSL_TIMESTAMP_EVENT \
+	_IOWR(KGSL_IOC_TYPE, 0x33, struct kgsl_timestamp_event)
+
+/**
+ * struct kgsl_gpumem_alloc_id - argument to IOCTL_KGSL_GPUMEM_ALLOC_ID
+ * @id: returned id value for this allocation.
+ * @flags: mask of KGSL_MEM* values requested and actual flags on return.
+ * @size: requested size of the allocation and actual size on return.
+ * @mmapsize: returned size to pass to mmap() which may be larger than 'size'
+ * @gpuaddr: returned GPU address for the allocation
+ *
+ * Allocate memory for access by the GPU. The flags and size fields are echoed
+ * back by the kernel, so that the caller can know if the request was
+ * adjusted.
+ *
+ * Supported flags:
+ * KGSL_MEMFLAGS_GPUREADONLY: the GPU will be unable to write to the buffer
+ * KGSL_MEMTYPE*: usage hint for debugging aid
+ * KGSL_MEMALIGN*: alignment hint, may be ignored or adjusted by the kernel.
+ * KGSL_MEMFLAGS_USE_CPU_MAP: If set on call and return, the returned GPU
+ * address will be 0. Calling mmap() will set the GPU address.
+ */
+struct kgsl_gpumem_alloc_id {
+	unsigned int id;
+	unsigned int flags;
+	size_t size;
+	size_t mmapsize;
+	unsigned long gpuaddr;
+/* private: reserved for future use*/
+	unsigned long __pad[2];
+};
+
+#define IOCTL_KGSL_GPUMEM_ALLOC_ID \
+	_IOWR(KGSL_IOC_TYPE, 0x34, struct kgsl_gpumem_alloc_id)
+
+/**
+ * struct kgsl_gpumem_free_id - argument to IOCTL_KGSL_GPUMEM_FREE_ID
+ * @id: GPU allocation id to free
+ *
+ * Free an allocation by id, in case a GPU address has not been assigned or
+ * is unknown. Freeing an allocation by id with this ioctl or by GPU address
+ * with IOCTL_KGSL_SHAREDMEM_FREE are equivalent.
+ */
+struct kgsl_gpumem_free_id {
+	unsigned int id;
+/* private: reserved for future use*/
+	unsigned int __pad;
+};
+
+#define IOCTL_KGSL_GPUMEM_FREE_ID \
+	_IOWR(KGSL_IOC_TYPE, 0x35, struct kgsl_gpumem_free_id)
+
+/**
+ * struct kgsl_gpumem_get_info - argument to IOCTL_KGSL_GPUMEM_GET_INFO
+ * @gpuaddr: GPU address to query. Also set on return.
+ * @id: GPU allocation id to query. Also set on return.
+ * @flags: returned mask of KGSL_MEM* values.
+ * @size: returned size of the allocation.
+ * @mmapsize: returned size to pass mmap(), which may be larger than 'size'
+ * @useraddr: returned address of the userspace mapping for this buffer
+ *
+ * This ioctl allows querying of all user visible attributes of an existing
+ * allocation, by either the GPU address or the id returned by a previous
+ * call to IOCTL_KGSL_GPUMEM_ALLOC_ID. Legacy allocation ioctls may not
+ * return all attributes so this ioctl can be used to look them up if needed.
+ *
+ */
+struct kgsl_gpumem_get_info {
+	unsigned long gpuaddr;
+	unsigned int id;
+	unsigned int flags;
+	size_t size;
+	size_t mmapsize;
+	unsigned long useraddr;
+/* private: reserved for future use*/
+	unsigned long __pad[4];
+};
+
+#define IOCTL_KGSL_GPUMEM_GET_INFO\
+	_IOWR(KGSL_IOC_TYPE, 0x36, struct kgsl_gpumem_get_info)
+
+/**
+ * struct kgsl_gpumem_sync_cache - argument to IOCTL_KGSL_GPUMEM_SYNC_CACHE
+ * @gpuaddr: GPU address of the buffer to sync.
+ * @id: id of the buffer to sync. Either gpuaddr or id is sufficient.
+ * @op: a mask of KGSL_GPUMEM_CACHE_* values
+ * @offset: offset into the buffer
+ * @length: number of bytes starting from offset to perform
+ * the cache operation on
+ *
+ * Sync the L2 cache for memory headed to and from the GPU - this replaces
+ * KGSL_SHAREDMEM_FLUSH_CACHE since it can handle cache management for both
+ * directions
+ *
+ */
+struct kgsl_gpumem_sync_cache {
+	unsigned long gpuaddr;
+	unsigned int id;
+	unsigned int op;
+	size_t offset;
+	size_t length;
+};
+
+#define KGSL_GPUMEM_CACHE_CLEAN (1 << 0)
+#define KGSL_GPUMEM_CACHE_TO_GPU KGSL_GPUMEM_CACHE_CLEAN
+
+#define KGSL_GPUMEM_CACHE_INV (1 << 1)
+#define KGSL_GPUMEM_CACHE_FROM_GPU KGSL_GPUMEM_CACHE_INV
+
+#define KGSL_GPUMEM_CACHE_FLUSH \
+	(KGSL_GPUMEM_CACHE_CLEAN | KGSL_GPUMEM_CACHE_INV)
+
+/* Flag to ensure backwards compatibility of kgsl_gpumem_sync_cache struct */
+#define KGSL_GPUMEM_CACHE_RANGE (1 << 31U)
+
+#define IOCTL_KGSL_GPUMEM_SYNC_CACHE \
+	_IOW(KGSL_IOC_TYPE, 0x37, struct kgsl_gpumem_sync_cache)
+
+/**
+ * struct kgsl_perfcounter_get - argument to IOCTL_KGSL_PERFCOUNTER_GET
+ * @groupid: Performance counter group ID
+ * @countable: Countable to select within the group
+ * @offset: Return offset of the reserved LO counter
+ * @offset_hi: Return offset of the reserved HI counter
+ *
+ * Get an available performance counter from a specified groupid.  The offset
+ * of the performance counter will be returned after successfully assigning
+ * the countable to the counter for the specified group.  An error will be
+ * returned and an offset of 0 if the groupid is invalid or there are no
+ * more counters left.  After successfully getting a perfcounter, the user
+ * must call kgsl_perfcounter_put(groupid, contable) when finished with
+ * the perfcounter to clear up perfcounter resources.
+ *
+ */
+struct kgsl_perfcounter_get {
+	unsigned int groupid;
+	unsigned int countable;
+	unsigned int offset;
+	unsigned int offset_hi;
+/* private: reserved for future use */
+	unsigned int __pad; /* For future binary compatibility */
+};
+
+#define IOCTL_KGSL_PERFCOUNTER_GET \
+	_IOWR(KGSL_IOC_TYPE, 0x38, struct kgsl_perfcounter_get)
+
+/**
+ * struct kgsl_perfcounter_put - argument to IOCTL_KGSL_PERFCOUNTER_PUT
+ * @groupid: Performance counter group ID
+ * @countable: Countable to release within the group
+ *
+ * Put an allocated performance counter to allow others to have access to the
+ * resource that was previously taken.  This is only to be called after
+ * successfully getting a performance counter from kgsl_perfcounter_get().
+ *
+ */
+struct kgsl_perfcounter_put {
+	unsigned int groupid;
+	unsigned int countable;
+/* private: reserved for future use */
+	unsigned int __pad[2]; /* For future binary compatibility */
+};
+
+#define IOCTL_KGSL_PERFCOUNTER_PUT \
+	_IOW(KGSL_IOC_TYPE, 0x39, struct kgsl_perfcounter_put)
+
+/**
+ * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
+ * @groupid: Performance counter group ID
+ * @countable: Return active countables array
+ * @size: Size of active countables array
+ * @max_counters: Return total number counters for the group ID
+ *
+ * Query the available performance counters given a groupid.  The array
+ * *countables is used to return the current active countables in counters.
+ * The size of the array is passed in so the kernel will only write at most
+ * size or counter->size for the group id.  The total number of available
+ * counters for the group ID is returned in max_counters.
+ * If the array or size passed in are invalid, then only the maximum number
+ * of counters will be returned, no data will be written to *countables.
+ * If the groupid is invalid an error code will be returned.
+ *
+ */
+struct kgsl_perfcounter_query {
+	unsigned int groupid;
+	/* Array to return the current countable for up to size counters */
+	unsigned int __user *countables;
+	unsigned int count;
+	unsigned int max_counters;
+/* private: reserved for future use */
+	unsigned int __pad[2]; /* For future binary compatibility */
+};
+
+#define IOCTL_KGSL_PERFCOUNTER_QUERY \
+	_IOWR(KGSL_IOC_TYPE, 0x3A, struct kgsl_perfcounter_query)
+
+/**
+ * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
+ * @groupid: Performance counter group IDs
+ * @countable: Performance counter countable IDs
+ * @value: Return performance counter reads
+ * @size: Size of all arrays (groupid/countable pair and return value)
+ *
+ * Read in the current value of a performance counter given by the groupid
+ * and countable.
+ *
+ */
+
+struct kgsl_perfcounter_read_group {
+	unsigned int groupid;
+	unsigned int countable;
+	unsigned long long value;
+};
+
+struct kgsl_perfcounter_read {
+	struct kgsl_perfcounter_read_group __user *reads;
+	unsigned int count;
+/* private: reserved for future use */
+	unsigned int __pad[2]; /* For future binary compatibility */
+};
+
+#define IOCTL_KGSL_PERFCOUNTER_READ \
+	_IOWR(KGSL_IOC_TYPE, 0x3B, struct kgsl_perfcounter_read)
+/*
+ * struct kgsl_gpumem_sync_cache_bulk - argument to
+ * IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK
+ * @id_list: list of GPU buffer ids of the buffers to sync
+ * @count: number of GPU buffer ids in id_list
+ * @op: a mask of KGSL_GPUMEM_CACHE_* values
+ *
+ * Sync the cache for memory headed to and from the GPU. Certain
+ * optimizations can be made on the cache operation based on the total
+ * size of the working set of memory to be managed.
+ */
+struct kgsl_gpumem_sync_cache_bulk {
+	unsigned int __user *id_list;
+	unsigned int count;
+	unsigned int op;
+/* private: reserved for future use */
+	unsigned int __pad[2]; /* For future binary compatibility */
+};
+
+#define IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK \
+	_IOWR(KGSL_IOC_TYPE, 0x3C, struct kgsl_gpumem_sync_cache_bulk)
+
+/*
+ * struct kgsl_cmd_syncpoint_timestamp
+ * @context_id: ID of a KGSL context
+ * @timestamp: GPU timestamp
+ *
+ * This structure defines a syncpoint comprising a context/timestamp pair. A
+ * list of these may be passed by IOCTL_KGSL_SUBMIT_COMMANDS to define
+ * dependencies that must be met before the command can be submitted to the
+ * hardware
+ */
+struct kgsl_cmd_syncpoint_timestamp {
+	unsigned int context_id;
+	unsigned int timestamp;
+};
+
+struct kgsl_cmd_syncpoint_fence {
+	int fd;
+};
+
+/**
+ * struct kgsl_cmd_syncpoint - Define a sync point for a command batch
+ * @type: type of sync point defined here
+ * @priv: Pointer to the type specific buffer
+ * @size: Size of the type specific buffer
+ *
+ * This structure contains pointers defining a specific command sync point.
+ * The pointer and size should point to a type appropriate structure.
+ */
+struct kgsl_cmd_syncpoint {
+	int type;
+	void __user *priv;
+	size_t size;
+};
+
+/* Flag to indicate that the cmdlist may contain memlists */
+#define KGSL_IBDESC_MEMLIST 0x1
+
+/* Flag to point out the cmdbatch profiling buffer in the memlist */
+#define KGSL_IBDESC_PROFILING_BUFFER 0x2
+
+/**
+ * struct kgsl_submit_commands - Argument to IOCTL_KGSL_SUBMIT_COMMANDS
+ * @context_id: KGSL context ID that owns the commands
+ * @flags:
+ * @cmdlist: User pointer to a list of kgsl_ibdesc structures
+ * @numcmds: Number of commands listed in cmdlist
+ * @synclist: User pointer to a list of kgsl_cmd_syncpoint structures
+ * @numsyncs: Number of sync points listed in synclist
+ * @timestamp: On entry the a user defined timestamp, on exist the timestamp
+ * assigned to the command batch
+ *
+ * This structure specifies a command to send to the GPU hardware.  This is
+ * similar to kgsl_issueibcmds expect that it doesn't support the legacy way to
+ * submit IB lists and it adds sync points to block the IB until the
+ * dependencies are satisified.  This entry point is the new and preferred way
+ * to submit commands to the GPU. The memory list can be used to specify all
+ * memory that is referrenced in the current set of commands.
+ */
+
+struct kgsl_submit_commands {
+	unsigned int context_id;
+	unsigned int flags;
+	struct kgsl_ibdesc __user *cmdlist;
+	unsigned int numcmds;
+	struct kgsl_cmd_syncpoint __user *synclist;
+	unsigned int numsyncs;
+	unsigned int timestamp;
+/* private: reserved for future use */
+	unsigned int __pad[4];
+};
+
+#define IOCTL_KGSL_SUBMIT_COMMANDS \
+	_IOWR(KGSL_IOC_TYPE, 0x3D, struct kgsl_submit_commands)
+
+/**
+ * struct kgsl_device_constraint - device constraint argument
+ * @context_id: KGSL context ID
+ * @type: type of constraint i.e pwrlevel/none
+ * @data: constraint data
+ * @size: size of the constraint data
+ */
+struct kgsl_device_constraint {
+	unsigned int type;
+	unsigned int context_id;
+	void __user *data;
+	size_t size;
+};
+
+/* Constraint Type*/
+#define KGSL_CONSTRAINT_NONE 0
+#define KGSL_CONSTRAINT_PWRLEVEL 1
+
+/* PWRLEVEL constraint level*/
+/* set to min frequency */
+#define KGSL_CONSTRAINT_PWR_MIN    0
+/* set to max frequency */
+#define KGSL_CONSTRAINT_PWR_MAX    1
+
+struct kgsl_device_constraint_pwrlevel {
+	unsigned int level;
+};
+
+/**
+ * struct kgsl_syncsource_create - Argument to IOCTL_KGSL_SYNCSOURCE_CREATE
+ * @id: returned id for the syncsource that was created.
+ *
+ * This ioctl creates a userspace sync timeline.
+ */
+
+struct kgsl_syncsource_create {
+	unsigned int id;
+/* private: reserved for future use */
+	unsigned int __pad[3];
+};
+
+#define IOCTL_KGSL_SYNCSOURCE_CREATE \
+	_IOWR(KGSL_IOC_TYPE, 0x40, struct kgsl_syncsource_create)
+
+/**
+ * struct kgsl_syncsource_destroy - Argument to IOCTL_KGSL_SYNCSOURCE_DESTROY
+ * @id: syncsource id to destroy
+ *
+ * This ioctl creates a userspace sync timeline.
+ */
+
+struct kgsl_syncsource_destroy {
+	unsigned int id;
+/* private: reserved for future use */
+	unsigned int __pad[3];
+};
+
+#define IOCTL_KGSL_SYNCSOURCE_DESTROY \
+	_IOWR(KGSL_IOC_TYPE, 0x41, struct kgsl_syncsource_destroy)
+
+/**
+ * struct kgsl_syncsource_create_fence - Argument to
+ *     IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE
+ * @id: syncsource id
+ * @fence_fd: returned sync_fence fd
+ *
+ * Create a fence that may be signaled by userspace by calling
+ * IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE. There are no order dependencies between
+ * these fences.
+ */
+struct kgsl_syncsource_create_fence {
+	unsigned int id;
+	int fence_fd;
+/* private: reserved for future use */
+	unsigned int __pad[4];
+};
+
+/**
+ * struct kgsl_syncsource_signal_fence - Argument to
+ *     IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE
+ * @id: syncsource id
+ * @fence_fd: sync_fence fd to signal
+ *
+ * Signal a fence that was created by a IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE
+ * call using the same syncsource id. This allows a fence to be shared
+ * to other processes but only signaled by the process owning the fd
+ * used to create the fence.
+ */
+#define IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE \
+	_IOWR(KGSL_IOC_TYPE, 0x42, struct kgsl_syncsource_create_fence)
+
+struct kgsl_syncsource_signal_fence {
+	unsigned int id;
+	int fence_fd;
+/* private: reserved for future use */
+	unsigned int __pad[4];
+};
+
+#define IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE \
+	_IOWR(KGSL_IOC_TYPE, 0x43, struct kgsl_syncsource_signal_fence)
+
+/**
+ * struct kgsl_cff_sync_gpuobj - Argument to IOCTL_KGSL_CFF_SYNC_GPUOBJ
+ * @offset: Offset into the GPU object to sync
+ * @length: Number of bytes to sync
+ * @id: ID of the GPU object to sync
+ */
+struct kgsl_cff_sync_gpuobj {
+	uint64_t offset;
+	uint64_t length;
+	unsigned int id;
+};
+
+#define IOCTL_KGSL_CFF_SYNC_GPUOBJ \
+	_IOW(KGSL_IOC_TYPE, 0x44, struct kgsl_cff_sync_gpuobj)
+
+/**
+ * struct kgsl_gpuobj_alloc - Argument to IOCTL_KGSL_GPUOBJ_ALLOC
+ * @size: Size in bytes of the object to allocate
+ * @flags: mask of KGSL_MEMFLAG_* bits
+ * @va_len: Size in bytes of the virtual region to allocate
+ * @mmapsize: Returns the mmap() size of the object
+ * @id: Returns the GPU object ID of the new object
+ * @metadata_len: Length of the metdata to copy from the user
+ * @metadata: Pointer to the user specified metadata to store for the object
+ */
+struct kgsl_gpuobj_alloc {
+	uint64_t size;
+	uint64_t flags;
+	uint64_t va_len;
+	uint64_t mmapsize;
+	unsigned int id;
+	unsigned int metadata_len;
+	uint64_t metadata;
+};
+
+/* Let the user know that this header supports the gpuobj metadata */
+#define KGSL_GPUOBJ_ALLOC_METADATA_MAX 64
+
+#define IOCTL_KGSL_GPUOBJ_ALLOC \
+	_IOWR(KGSL_IOC_TYPE, 0x45, struct kgsl_gpuobj_alloc)
+
+/**
+ * struct kgsl_gpuobj_free - Argument to IOCTL_KGLS_GPUOBJ_FREE
+ * @flags: Mask of: KGSL_GUPOBJ_FREE_ON_EVENT
+ * @priv: Pointer to the private object if KGSL_GPUOBJ_FREE_ON_EVENT is
+ * specified
+ * @id: ID of the GPU object to free
+ * @type: If KGSL_GPUOBJ_FREE_ON_EVENT is specified, the type of asynchronous
+ * event to free on
+ * @len: Length of the data passed in priv
+ */
+struct kgsl_gpuobj_free {
+	uint64_t flags;
+	uint64_t __user priv;
+	unsigned int id;
+	unsigned int type;
+	unsigned int len;
+};
+
+#define KGSL_GPUOBJ_FREE_ON_EVENT 1
+
+#define KGSL_GPU_EVENT_TIMESTAMP 1
+#define KGSL_GPU_EVENT_FENCE     2
+
+/**
+ * struct kgsl_gpu_event_timestamp - Specifies a timestamp event to free a GPU
+ * object on
+ * @context_id: ID of the timestamp event to wait for
+ * @timestamp: Timestamp of the timestamp event to wait for
+ */
+struct kgsl_gpu_event_timestamp {
+	unsigned int context_id;
+	unsigned int timestamp;
+};
+
+/**
+ * struct kgsl_gpu_event_fence - Specifies a fence ID to to free a GPU object on
+ * @fd: File descriptor for the fence
+ */
+struct kgsl_gpu_event_fence {
+	int fd;
+};
+
+#define IOCTL_KGSL_GPUOBJ_FREE \
+	_IOW(KGSL_IOC_TYPE, 0x46, struct kgsl_gpuobj_free)
+
+/**
+ * struct kgsl_gpuobj_info - argument to IOCTL_KGSL_GPUOBJ_INFO
+ * @gpuaddr: GPU address of the object
+ * @flags: Current flags for the object
+ * @size: Size of the object
+ * @va_len: VA size of the object
+ * @va_addr: Virtual address of the object (if it is mapped)
+ * id - GPU object ID of the object to query
+ */
+struct kgsl_gpuobj_info {
+	uint64_t gpuaddr;
+	uint64_t flags;
+	uint64_t size;
+	uint64_t va_len;
+	uint64_t va_addr;
+	unsigned int id;
+};
+
+#define IOCTL_KGSL_GPUOBJ_INFO \
+	_IOWR(KGSL_IOC_TYPE, 0x47, struct kgsl_gpuobj_info)
+
+/**
+ * struct kgsl_gpuobj_import - argument to IOCTL_KGSL_GPUOBJ_IMPORT
+ * @priv: Pointer to the private data for the import type
+ * @priv_len: Length of the private data
+ * @flags: Mask of KGSL_MEMFLAG_ flags
+ * @type: Type of the import (KGSL_USER_MEM_TYPE_*)
+ * @id: Returns the ID of the new GPU object
+ */
+struct kgsl_gpuobj_import {
+	uint64_t __user priv;
+	uint64_t priv_len;
+	uint64_t flags;
+	unsigned int type;
+	unsigned int id;
+};
+
+/**
+ * struct kgsl_gpuobj_import_dma_buf - import a dmabuf object
+ * @fd: File descriptor for the dma-buf object
+ */
+struct kgsl_gpuobj_import_dma_buf {
+	int fd;
+};
+
+/**
+ * struct kgsl_gpuobj_import_useraddr - import an object based on a useraddr
+ * @virtaddr: Virtual address of the object to import
+ */
+struct kgsl_gpuobj_import_useraddr {
+	uint64_t virtaddr;
+};
+
+#define IOCTL_KGSL_GPUOBJ_IMPORT \
+	_IOWR(KGSL_IOC_TYPE, 0x48, struct kgsl_gpuobj_import)
+
+/**
+ * struct kgsl_gpuobj_sync_obj - Individual GPU object to sync
+ * @offset: Offset within the GPU object to sync
+ * @length: Number of bytes to sync
+ * @id: ID of the GPU object to sync
+ * @op: Cache operation to execute
+ */
+
+struct kgsl_gpuobj_sync_obj {
+	uint64_t offset;
+	uint64_t length;
+	unsigned int id;
+	unsigned int op;
+};
+
+/**
+ * struct kgsl_gpuobj_sync - Argument for IOCTL_KGSL_GPUOBJ_SYNC
+ * @objs: Pointer to an array of kgsl_gpuobj_sync_obj structs
+ * @obj_len: Size of each item in the array
+ * @count: Number of items in the array
+ */
+
+struct kgsl_gpuobj_sync {
+	uint64_t __user objs;
+	unsigned int obj_len;
+	unsigned int count;
+};
+
+#define IOCTL_KGSL_GPUOBJ_SYNC \
+	_IOW(KGSL_IOC_TYPE, 0x49, struct kgsl_gpuobj_sync)
+
+/**
+ * struct kgsl_command_object - GPU command object
+ * @offset: GPU address offset of the object
+ * @gpuaddr: GPU address of the object
+ * @size: Size of the object
+ * @flags: Current flags for the object
+ * @id - GPU command object ID
+ */
+struct kgsl_command_object {
+	uint64_t offset;
+	uint64_t gpuaddr;
+	uint64_t size;
+	unsigned int flags;
+	unsigned int id;
+};
+
+/**
+ * struct kgsl_command_syncpoint - GPU syncpoint object
+ * @priv: Pointer to the type specific buffer
+ * @size: Size of the type specific buffer
+ * @type: type of sync point defined here
+ */
+struct kgsl_command_syncpoint {
+	uint64_t __user priv;
+	uint64_t size;
+	unsigned int type;
+};
+
+/**
+ * struct kgsl_command_object - Argument for IOCTL_KGSL_GPU_COMMAND
+ * @flags: Current flags for the object
+ * @cmdlist: List of kgsl_command_objects for submission
+ * @cmd_size: Size of kgsl_command_objects structure
+ * @numcmds: Number of kgsl_command_objects in command list
+ * @objlist: List of kgsl_command_objects for tracking
+ * @obj_size: Size of kgsl_command_objects structure
+ * @numobjs: Number of kgsl_command_objects in object list
+ * @synclist: List of kgsl_command_syncpoints
+ * @sync_size: Size of kgsl_command_syncpoint structure
+ * @numsyncs: Number of kgsl_command_syncpoints in syncpoint list
+ * @context_id: Context ID submittin ghte kgsl_gpu_command
+ * @timestamp: Timestamp for the submitted commands
+ */
+struct kgsl_gpu_command {
+	uint64_t flags;
+	uint64_t __user cmdlist;
+	unsigned int cmdsize;
+	unsigned int numcmds;
+	uint64_t __user objlist;
+	unsigned int objsize;
+	unsigned int numobjs;
+	uint64_t __user synclist;
+	unsigned int syncsize;
+	unsigned int numsyncs;
+	unsigned int context_id;
+	unsigned int timestamp;
+};
+
+#define IOCTL_KGSL_GPU_COMMAND \
+	_IOWR(KGSL_IOC_TYPE, 0x4A, struct kgsl_gpu_command)
+
+/**
+ * struct kgsl_preemption_counters_query - argument to
+ * IOCTL_KGSL_PREEMPTIONCOUNTER_QUERY
+ * @counters: Return preemption counters array
+ * @size_user: Size allocated by userspace
+ * @size_priority_level: Size of preemption counters for each
+ * priority level
+ * @max_priority_level: Return max number of priority levels
+ *
+ * Query the available preemption counters. The array counters
+ * is used to return preemption counters. The size of the array
+ * is passed in so the kernel will only write at most size_user
+ * or max available preemption counters.  The total number of
+ * preemption counters is returned in max_priority_level. If the
+ * array or size passed in are invalid, then an error is
+ * returned back.
+ */
+struct kgsl_preemption_counters_query {
+	uint64_t __user counters;
+	unsigned int size_user;
+	unsigned int size_priority_level;
+	unsigned int max_priority_level;
+};
+
+#define IOCTL_KGSL_PREEMPTIONCOUNTER_QUERY \
+	_IOWR(KGSL_IOC_TYPE, 0x4B, struct kgsl_preemption_counters_query)
+
+/**
+ * struct kgsl_gpuobj_set_info - argument for IOCTL_KGSL_GPUOBJ_SET_INFO
+ * @flags: Flags to indicate which paramaters to change
+ * @metadata:  If KGSL_GPUOBJ_SET_INFO_METADATA is set, a pointer to the new
+ * metadata
+ * @id: GPU memory object ID to change
+ * @metadata_len:  If KGSL_GPUOBJ_SET_INFO_METADATA is set, the length of the
+ * new metadata string
+ * @type: If KGSL_GPUOBJ_SET_INFO_TYPE is set, the new type of the memory object
+ */
+
+#define KGSL_GPUOBJ_SET_INFO_METADATA (1 << 0)
+#define KGSL_GPUOBJ_SET_INFO_TYPE (1 << 1)
+
+struct kgsl_gpuobj_set_info {
+	uint64_t flags;
+	uint64_t metadata;
+	unsigned int id;
+	unsigned int metadata_len;
+	unsigned int type;
+};
+
+#define IOCTL_KGSL_GPUOBJ_SET_INFO \
+	_IOW(KGSL_IOC_TYPE, 0x4C, struct kgsl_gpuobj_set_info)
+
+/**
+ * struct kgsl_sparse_phys_alloc - Argument for IOCTL_KGSL_SPARSE_PHYS_ALLOC
+ * @size: Size in bytes to back
+ * @pagesize: Pagesize alignment required
+ * @flags: Flags for this allocation
+ * @id: Returned ID for this allocation
+ */
+struct kgsl_sparse_phys_alloc {
+	uint64_t size;
+	uint64_t pagesize;
+	uint64_t flags;
+	unsigned int id;
+};
+
+#define IOCTL_KGSL_SPARSE_PHYS_ALLOC \
+	_IOWR(KGSL_IOC_TYPE, 0x50, struct kgsl_sparse_phys_alloc)
+
+/**
+ * struct kgsl_sparse_phys_free - Argument for IOCTL_KGSL_SPARSE_PHYS_FREE
+ * @id: ID to free
+ */
+struct kgsl_sparse_phys_free {
+	unsigned int id;
+};
+
+#define IOCTL_KGSL_SPARSE_PHYS_FREE \
+	_IOW(KGSL_IOC_TYPE, 0x51, struct kgsl_sparse_phys_free)
+
+/**
+ * struct kgsl_sparse_virt_alloc - Argument for IOCTL_KGSL_SPARSE_VIRT_ALLOC
+ * @size: Size in bytes to reserve
+ * @pagesize: Pagesize alignment required
+ * @flags: Flags for this allocation
+ * @id: Returned ID for this allocation
+ * @gpuaddr: Returned GPU address for this allocation
+ */
+struct kgsl_sparse_virt_alloc {
+	uint64_t size;
+	uint64_t pagesize;
+	uint64_t flags;
+	uint64_t gpuaddr;
+	unsigned int id;
+};
+
+#define IOCTL_KGSL_SPARSE_VIRT_ALLOC \
+	_IOWR(KGSL_IOC_TYPE, 0x52, struct kgsl_sparse_virt_alloc)
+
+/**
+ * struct kgsl_sparse_virt_free - Argument for IOCTL_KGSL_SPARSE_VIRT_FREE
+ * @id: ID to free
+ */
+struct kgsl_sparse_virt_free {
+	unsigned int id;
+};
+
+#define IOCTL_KGSL_SPARSE_VIRT_FREE \
+	_IOW(KGSL_IOC_TYPE, 0x53, struct kgsl_sparse_virt_free)
+
+/**
+ * struct kgsl_sparse_binding_object - Argument for kgsl_sparse_bind
+ * @virtoffset: Offset into the virtual ID
+ * @physoffset: Offset into the physical ID (bind only)
+ * @size: Size in bytes to reserve
+ * @flags: Flags for this kgsl_sparse_binding_object
+ * @id: Physical ID to bind (bind only)
+ */
+struct kgsl_sparse_binding_object {
+	uint64_t virtoffset;
+	uint64_t physoffset;
+	uint64_t size;
+	uint64_t flags;
+	unsigned int id;
+};
+
+/**
+ * struct kgsl_sparse_bind - Argument for IOCTL_KGSL_SPARSE_BIND
+ * @list: List of kgsl_sparse_bind_objects to bind/unbind
+ * @id: Virtual ID to bind/unbind
+ * @size: Size of kgsl_sparse_bind_object
+ * @count: Number of elements in list
+ *
+ */
+struct kgsl_sparse_bind {
+	uint64_t __user list;
+	unsigned int id;
+	unsigned int size;
+	unsigned int count;
+};
+
+#define IOCTL_KGSL_SPARSE_BIND \
+	_IOW(KGSL_IOC_TYPE, 0x54, struct kgsl_sparse_bind)
+
+/**
+ * struct kgsl_gpu_sparse_command - Argument for
+ * IOCTL_KGSL_GPU_SPARSE_COMMAND
+ * @flags: Current flags for the object
+ * @sparselist: List of kgsl_sparse_binding_object to bind/unbind
+ * @synclist: List of kgsl_command_syncpoints
+ * @sparsesize: Size of kgsl_sparse_binding_object
+ * @numsparse: Number of elements in list
+ * @sync_size: Size of kgsl_command_syncpoint structure
+ * @numsyncs: Number of kgsl_command_syncpoints in syncpoint list
+ * @context_id: Context ID submitting the kgsl_gpu_command
+ * @timestamp: Timestamp for the submitted commands
+ * @id: Virtual ID to bind/unbind
+ */
+struct kgsl_gpu_sparse_command {
+	uint64_t flags;
+	uint64_t __user sparselist;
+	uint64_t __user synclist;
+	unsigned int sparsesize;
+	unsigned int numsparse;
+	unsigned int syncsize;
+	unsigned int numsyncs;
+	unsigned int context_id;
+	unsigned int timestamp;
+	unsigned int id;
+};
+
+#define IOCTL_KGSL_GPU_SPARSE_COMMAND \
+	_IOWR(KGSL_IOC_TYPE, 0x55, struct kgsl_gpu_sparse_command)
+
+#endif /* _UAPI_MSM_KGSL_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_mdp_ext.h	2019-01-22 16:16:28.567292264 +0100
@@ -0,0 +1,846 @@
+#ifndef _MSM_MDP_EXT_H_
+#define _MSM_MDP_EXT_H_
+
+#include <linux/msm_mdp.h>
+
+#define MDP_IOCTL_MAGIC 'S'
+/* atomic commit ioctl used for validate and commit request */
+#define MSMFB_ATOMIC_COMMIT	_IOWR(MDP_IOCTL_MAGIC, 128, void *)
+
+/*
+ * Ioctl for updating the layer position asynchronously. Initially, pipes
+ * should be configured with MDP_LAYER_ASYNC flag set during the atomic commit,
+ * after which any number of position update calls can be made. This would
+ * enable multiple position updates within a single vsync. However, the screen
+ * update would happen only after vsync, which would pick the latest update.
+ *
+ * Limitations:
+ * - Currently supported only for video mode panels with single LM or dual LM
+ *   with source_split enabled.
+ * - Only position update is supported with no scaling/cropping.
+ * - Async layers should have unique z_order.
+ */
+#define MSMFB_ASYNC_POSITION_UPDATE _IOWR(MDP_IOCTL_MAGIC, 129, \
+					struct mdp_position_update)
+
+/*
+ * Ioctl for sending the config information.
+ * QSEED3 coefficeint LUT tables is passed by the user space using this IOCTL.
+ */
+#define MSMFB_MDP_SET_CFG _IOW(MDP_IOCTL_MAGIC, 130, \
+					      struct mdp_set_cfg)
+
+/*
+ * Ioctl for setting the PLL PPM.
+ * PLL PPM is passed by the user space using this IOCTL.
+ */
+#define MSMFB_MDP_SET_PANEL_PPM _IOW(MDP_IOCTL_MAGIC, 131, int)
+
+/*
+ * To allow proper structure padding for 64bit/32bit target
+ */
+#ifdef __LP64
+#define MDP_LAYER_COMMIT_V1_PAD 2
+#else
+#define MDP_LAYER_COMMIT_V1_PAD 3
+#endif
+
+/**********************************************************************
+LAYER FLAG CONFIGURATION
+**********************************************************************/
+/* left-right layer flip flag */
+#define MDP_LAYER_FLIP_LR		0x1
+
+/* up-down layer flip flag */
+#define MDP_LAYER_FLIP_UD		0x2
+
+/*
+ * This flag enables pixel extension for the current layer. Validate/commit
+ * call uses scale parameters when this flag is enabled.
+ */
+#define MDP_LAYER_ENABLE_PIXEL_EXT	0x4
+
+/* Flag indicates that layer is foreground layer */
+#define MDP_LAYER_FORGROUND		0x8
+
+/* Flag indicates that layer is associated with secure session */
+#define MDP_LAYER_SECURE_SESSION	0x10
+
+/*
+ * Flag indicates that layer is drawing solid fill. Validate/commit call
+ * does not expect buffer when this flag is enabled.
+ */
+#define MDP_LAYER_SOLID_FILL		0x20
+
+/* Layer format is deinterlace */
+#define MDP_LAYER_DEINTERLACE		0x40
+
+/* layer contains bandwidth compressed format data */
+#define MDP_LAYER_BWC			0x80
+
+/* layer is async position updatable */
+#define MDP_LAYER_ASYNC			0x100
+
+/* layer contains postprocessing configuration data */
+#define MDP_LAYER_PP			0x200
+
+/* Flag indicates that layer is associated with secure display session */
+#define MDP_LAYER_SECURE_DISPLAY_SESSION 0x400
+
+/* Flag enabled qseed3 scaling for the current layer */
+#define MDP_LAYER_ENABLE_QSEED3_SCALE   0x800
+
+/*
+ * layer will work in multirect mode, where single hardware should
+ * fetch multiple rectangles with a single hardware
+ */
+#define MDP_LAYER_MULTIRECT_ENABLE		0x1000
+
+/*
+ * if flag present and multirect is enabled, multirect will work in parallel
+ * fetch mode, otherwise it will default to serial fetch mode.
+ */
+#define MDP_LAYER_MULTIRECT_PARALLEL_MODE	0x2000
+
+
+/* Flag indicates that layer is associated with secure camera session */
+#define MDP_LAYER_SECURE_CAMERA_SESSION		0x4000
+
+/**********************************************************************
+DESTINATION SCALER FLAG CONFIGURATION
+**********************************************************************/
+
+/* Enable/disable Destination scaler */
+#define MDP_DESTSCALER_ENABLE		0x1
+
+/*
+ * Indicating mdp_destination_scaler_data contains
+ * Scaling parameter update. Can be set anytime.
+ */
+#define MDP_DESTSCALER_SCALE_UPDATE	0x2
+
+/*
+ * Indicating mdp_destination_scaler_data contains
+ * Detail enhancement setting update. Can be set anytime.
+ */
+#define MDP_DESTSCALER_ENHANCER_UPDATE	0x4
+
+/*
+ * Indicating a partial update to panel ROI. ROI can be
+ * applied anytime when Destination scaler is enabled.
+ */
+#define MDP_DESTSCALER_ROI_ENABLE	0x8
+
+/**********************************************************************
+VALIDATE/COMMIT FLAG CONFIGURATION
+**********************************************************************/
+
+/*
+ * Client enables it to inform that call is to validate layers before commit.
+ * If this flag is not set then driver will use MSMFB_ATOMIC_COMMIT for commit.
+ */
+#define MDP_VALIDATE_LAYER			0x01
+
+/*
+ * This flag is only valid for commit call. Commit behavior is synchronous
+ * when this flag is defined. It blocks current call till processing is
+ * complete. Behavior is asynchronous otherwise.
+ */
+#define MDP_COMMIT_WAIT_FOR_FINISH		0x02
+
+/*
+ * This flag is only valid for commit call and used for debugging purpose. It
+ * forces the to wait for sync fences.
+ */
+#define MDP_COMMIT_SYNC_FENCE_WAIT		0x04
+
+/* Flag to enable AVR(Adaptive variable refresh) feature. */
+#define MDP_COMMIT_AVR_EN			0x08
+
+/*
+ * Flag to select one shot mode when AVR feature is enabled.
+ * Default mode is continuous mode.
+ */
+#define MDP_COMMIT_AVR_ONE_SHOT_MODE		0x10
+
+/* Flag to indicate dual partial ROI update */
+#define MDP_COMMIT_PARTIAL_UPDATE_DUAL_ROI	0x20
+
+/* Flag to update brightness when commit */
+#define MDP_COMMIT_UPDATE_BRIGHTNESS		0x40
+
+/* Flag to enable concurrent writeback for the frame */
+#define MDP_COMMIT_CWB_EN 0x800
+
+/*
+ * Flag to select DSPP as the data point for CWB. If CWB
+ * is enabled without this flag, LM will be selected as data point.
+ */
+#define MDP_COMMIT_CWB_DSPP 0x1000
+
+/*
+ * Flag to indicate that rectangle number is being assigned
+ * by userspace in multi-rectangle mode
+ */
+#define MDP_COMMIT_RECT_NUM 0x2000
+
+#define MDP_COMMIT_VERSION_1_0		0x00010000
+
+#define OUT_LAYER_COLOR_SPACE
+
+/* From CEA.861.3 */
+#define MDP_HDR_EOTF_SMTPE_ST2084	0x2
+#define MDP_HDR_EOTF_HLG		0x3
+
+/* From Vesa DPv1.4 - Pixel Encoding - Table 2-120 */
+#define MDP_PIXEL_ENCODING_RGB		0x0
+#define MDP_PIXEL_ENCODING_YCBCR_444	0x1
+#define MDP_PIXEL_ENCODING_YCBCR_422	0x2
+#define MDP_PIXEL_ENCODING_YCBCR_420	0x3
+#define MDP_PIXEL_ENCODING_Y_ONLY	0x4
+#define MDP_PIXEL_ENCODING_RAW		0x5
+
+/* From Vesa DPv1.4 - Colorimetry Formats - Table 2-120 */
+/* RGB - used with MDP_DP_PIXEL_ENCODING_RGB */
+#define MDP_COLORIMETRY_RGB_SRGB		0x0
+#define MDP_COLORIMETRY_RGB_WIDE_FIXED_POINT	0x1
+#define MDP_COLORIMETRY_RGB_WIDE_FLOAT_POINT	0x2
+#define MDP_COLORIMETRY_RGB_ADOBE		0x3
+#define MDP_COLORIMETRY_RGB_DPI_P3		0x4
+#define MDP_COLORIMETRY_RGB_CUSTOM		0x5
+#define MDP_COLORIMETRY_RGB_ITU_R_BT_2020	0x6
+
+/* YUV - used with MDP_DP_PIXEL_ENCODING_YCBCR(444 or 422 or 420) */
+#define MDP_COLORIMETRY_YCBCR_ITU_R_BT_601		0x0
+#define MDP_COLORIMETRY_YCBCR_ITU_R_BT_709		0x1
+#define MDP_COLORIMETRY_YCBCR_XV_YCC_601		0x2
+#define MDP_COLORIMETRY_YCBCR_XV_YCC_709		0x3
+#define MDP_COLORIMETRY_YCBCR_S_YCC_601		0x4
+#define MDP_COLORIMETRY_YCBCR_ADOBE_YCC_601		0x5
+#define MDP_COLORIMETRY_YCBCR_ITU_R_BT_2020_YCBCR_CONST	0x6
+#define MDP_COLORIMETRY_YCBCR_ITU_R_BT_2020_YCBCR	0x7
+
+/* Dynamic Range - Table 2-120 */
+/* Full range */
+#define MDP_DYNAMIC_RANGE_VESA	0x0
+/* Limited range */
+#define MDP_DYNAMIC_RANGE_CEA	0x1
+
+/* Bits per component(bpc) for Pixel encoding format RGB from Table 2-120 */
+#define MDP_RGB_6_BPC	0x0
+#define MDP_RGB_8_BPC	0x1
+#define MDP_RGB_10_BPC	0x2
+#define MDP_RGB_12_BPC	0x3
+#define MDP_RGB_16_BPC	0x4
+
+/*
+ * Bits per component(bpc) for Pixel encoding format YCbCr444, YCbCr422,
+ * YCbCr420 and Y only
+ * from Table 2-120
+ */
+#define MDP_YUV_8_BPC	0x1
+#define MDP_YUV_10_BPC	0x2
+#define MDP_YUV_12_BPC	0x3
+#define MDP_YUV_16_BPC	0x4
+
+/* Bits per component(bpc) for Pixel encoding format RAW from Table 2-120 */
+#define MDP_RAW_6_BPC	0x1
+#define MDP_RAW_7_BPC	0x2
+#define MDP_RAW_8_BPC	0x3
+#define MDP_RAW_10_BPC	0x4
+#define MDP_RAW_12_BPC	0x5
+#define MDP_RAW_14_BPC	0x6
+#define MDP_RAW16_BPC	0x7
+
+/* Content Type - Table 2-120 */
+#define MDP_CONTENT_TYPE_NOT_DEFINED	0x0
+#define MDP_CONTENT_TYPE_GRAPHICS		0x1
+#define MDP_CONTENT_TYPE_PHOTO			0x2
+#define MDP_CONTENT_TYPE_VIDEO		0x3
+#define MDP_CONTENT_TYPE_GAME		0x4
+
+/**********************************************************************
+Configuration structures
+All parameters are input to driver unless mentioned output parameter
+explicitly.
+**********************************************************************/
+struct mdp_layer_plane {
+	/* DMA buffer file descriptor information. */
+	int fd;
+
+	/* Pixel offset in the dma buffer. */
+	uint32_t offset;
+
+	/* Number of bytes in one scan line including padding bytes. */
+	uint32_t stride;
+};
+
+struct mdp_layer_buffer {
+	/* layer width in pixels. */
+	uint32_t width;
+
+	/* layer height in pixels. */
+	uint32_t height;
+
+	/*
+	 * layer format in DRM-style fourcc, refer drm_fourcc.h for
+	 * standard formats
+	 */
+	uint32_t format;
+
+	/* plane to hold the fd, offset, etc for all color components */
+	struct mdp_layer_plane planes[MAX_PLANES];
+
+	/* valid planes count in layer planes list */
+	uint32_t plane_count;
+
+	/* compression ratio factor, value depends on the pixel format */
+	struct mult_factor comp_ratio;
+
+	/*
+	 * SyncFence associated with this buffer. It is used in two ways.
+	 *
+	 * 1. Driver waits to consume the buffer till producer signals in case
+	 * of primary and external display.
+	 *
+	 * 2. Writeback device uses buffer structure for output buffer where
+	 * driver is producer. However, client sends the fence with buffer to
+	 * indicate that consumer is still using the buffer and it is not ready
+	 * for new content.
+	 */
+	int	 fence;
+
+	/* 32bits reserved value for future usage. */
+	uint32_t reserved;
+};
+
+/*
+ * One layer holds configuration for one pipe. If client wants to stage single
+ * layer on two pipes then it should send two different layers with relative
+ * (x,y) information. Client must send same information during validate and
+ * commit call. Commit call may fail if client sends different layer information
+ * attached to same pipe during validate and commit. Device invalidate the pipe
+ * once it receives the vsync for that commit.
+ */
+struct mdp_input_layer {
+	/*
+	 * Flag to enable/disable properties for layer configuration. Refer
+	 * layer flag configuration section for all possible flags.
+	 */
+	uint32_t		flags;
+
+	/*
+	 * Pipe selection for this layer by client. Client provides the index
+	 * in validate and commit call. Device reserves the pipe once validate
+	 * is successful. Device only uses validated pipe during commit call.
+	 * If client sends different layer/pipe configuration in validate &
+	 * commit then commit may fail.
+	 */
+	uint32_t		pipe_ndx;
+
+	/*
+	 * Horizontal decimation value, this indicates the amount of pixels
+	 * dropped for each pixel that is fetched from a line. It does not
+	 * result in bandwidth reduction because pixels are still fetched from
+	 * memory but dropped internally by hardware.
+	 * The decimation value given should be power of two of decimation
+	 * amount.
+	 * 0: no decimation
+	 * 1: decimate by 2 (drop 1 pixel for each pixel fetched)
+	 * 2: decimate by 4 (drop 3 pixels for each pixel fetched)
+	 * 3: decimate by 8 (drop 7 pixels for each pixel fetched)
+	 * 4: decimate by 16 (drop 15 pixels for each pixel fetched)
+	 */
+	uint8_t			horz_deci;
+
+	/*
+	 * Vertical decimation value, this indicates the amount of lines
+	 * dropped for each line that is fetched from overlay. It saves
+	 * bandwidth because decimated pixels are not fetched.
+	 * The decimation value given should be power of two of decimation
+	 * amount.
+	 * 0: no decimation
+	 * 1: decimation by 2 (drop 1 line for each line fetched)
+	 * 2: decimation by 4 (drop 3 lines for each line fetched)
+	 * 3: decimation by 8 (drop 7 lines for each line fetched)
+	 * 4: decimation by 16 (drop 15 lines for each line fetched)
+	 */
+	uint8_t			vert_deci;
+
+	/*
+	 * Used to set plane opacity. The range can be from 0-255, where
+	 * 0 means completely transparent and 255 means fully opaque.
+	 */
+	uint8_t			alpha;
+
+	/*
+	 * Blending stage to occupy in display, if multiple layers are present,
+	 * highest z_order usually means the top most visible layer. The range
+	 * acceptable is from 0-7 to support blending up to 8 layers.
+	 */
+	uint16_t		z_order;
+
+	/*
+	 * Color used as color key for transparency. Any pixel in fetched
+	 * image matching this color will be transparent when blending.
+	 * The color should be in same format as the source image format.
+	 */
+	uint32_t		transp_mask;
+
+	/*
+	 * Solid color used to fill the overlay surface when no source
+	 * buffer is provided.
+	 */
+	uint32_t		bg_color;
+
+	/* blend operation defined in "mdss_mdp_blend_op" enum. */
+	enum mdss_mdp_blend_op		blend_op;
+
+	/* color space of the source */
+	enum mdp_color_space	color_space;
+
+	/*
+	 * Source crop rectangle, portion of image that will be fetched. This
+	 * should always be within boundaries of source image.
+	 */
+	struct mdp_rect		src_rect;
+
+	/*
+	 * Destination rectangle, the position and size of image on screen.
+	 * This should always be within panel boundaries.
+	 */
+	struct mdp_rect		dst_rect;
+
+	/* Scaling parameters. */
+	void __user	*scale;
+
+	/* Buffer attached with each layer. Device uses it for commit call. */
+	struct mdp_layer_buffer	buffer;
+
+	/*
+	 * Source side post processing configuration information for each
+	 * layer.
+	 */
+	void __user		*pp_info;
+
+	/*
+	 * This is an output parameter.
+	 *
+	 * Only for validate call. Frame buffer device sets error code
+	 * based on validate call failure scenario.
+	 */
+	int			error_code;
+
+	/*
+	 * For source pipes supporting multi-rectangle, this field identifies
+	 * the rectangle index of the source pipe.
+	 */
+	uint32_t		rect_num;
+
+	/* 32bits reserved value for future usage. */
+	uint32_t		reserved[5];
+};
+
+struct mdp_output_layer {
+	/*
+	 * Flag to enable/disable properties for layer configuration. Refer
+	 * layer flag config section for all possible flags.
+	 */
+	uint32_t			flags;
+
+	/*
+	 * Writeback destination selection for output. Client provides the index
+	 * in validate and commit call.
+	 */
+	uint32_t			writeback_ndx;
+
+	/* Buffer attached with output layer. Device uses it for commit call */
+	struct mdp_layer_buffer		buffer;
+
+	/* color space of the destination */
+	enum mdp_color_space		color_space;
+
+	/* 32bits reserved value for future usage. */
+	uint32_t			reserved[5];
+};
+
+/*
+ * Destination scaling info structure holds setup paramaters for upscaling
+ * setting in the destination scaling block.
+ */
+struct mdp_destination_scaler_data {
+	/*
+	 * Flag to switch between mode for destination scaler. Please Refer to
+	 * destination scaler flag config for all possible setting.
+	 */
+	uint32_t			flags;
+
+	/*
+	 * Destination scaler selection index. Client provides the index in
+	 * validate and commit call.
+	 */
+	uint32_t			dest_scaler_ndx;
+
+	/*
+	 * LM width configuration per Destination scaling updates
+	 */
+	uint32_t			lm_width;
+
+	/*
+	 * LM height configuration per Destination scaling updates
+	 */
+	uint32_t			lm_height;
+
+	/*
+	 * The scaling parameters for all the mode except disable. For
+	 * disabling the scaler, there is no need to provide the scale.
+	 * A userspace pointer points to struct mdp_scale_data_v2.
+	 */
+	uint64_t	__user scale;
+
+	/*
+	 * Panel ROI is used when partial update is required in
+	 * current commit call.
+	 */
+	struct mdp_rect	panel_roi;
+};
+
+/*
+ * Commit structure holds layer stack send by client for validate and commit
+ * call. If layers are different between validate and commit call then commit
+ * call will also do validation. In such case, commit may fail.
+ */
+struct mdp_layer_commit_v1 {
+	/*
+	 * Flag to enable/disable properties for commit/validate call. Refer
+	 * validate/commit flag config section for all possible flags.
+	 */
+	uint32_t		flags;
+
+	/*
+	 * This is an output parameter.
+	 *
+	 * Frame buffer device provides release fence handle to client. It
+	 * triggers release fence when display hardware has consumed all the
+	 * buffers attached to this commit call and buffer is ready for reuse
+	 * for primary and external. For writeback case, it triggers it when
+	 * output buffer is ready for consumer.
+	 */
+	int			release_fence;
+
+	/*
+	 * Left_roi is optional configuration. Client configures it only when
+	 * partial update is enabled. It defines the "region of interest" on
+	 * left part of panel when it is split display. For non-split display,
+	 * it defines the "region of interest" on the panel.
+	 */
+	struct mdp_rect		left_roi;
+
+	/*
+	 * Right_roi is optional configuration. Client configures it only when
+	 * partial update is enabled. It defines the "region of interest" on
+	 * right part of panel for split display configuration. It is not
+	 * required for non-split display.
+	 */
+	struct mdp_rect		right_roi;
+
+	 /* Pointer to a list of input layers for composition. */
+	struct mdp_input_layer __user *input_layers;
+
+	/* Input layer count present in input list */
+	uint32_t		input_layer_cnt;
+
+	/*
+	 * Output layer for writeback display. It supports only one
+	 * layer as output layer. This is not required for primary
+	 * and external displays
+	 */
+	struct mdp_output_layer __user *output_layer;
+
+	/*
+	 * This is an output parameter.
+	 *
+	 * Frame buffer device provides retire fence handle if
+	 * COMMIT_RETIRE_FENCE flag is set in commit call. It triggers
+	 * retire fence when current layers are swapped with new layers
+	 * on display hardware. For video mode panel and writeback,
+	 * retire fence and release fences are triggered at the same
+	 * time while command mode panel triggers release fence first
+	 * (on pingpong done) and retire fence (on rdptr done)
+	 * after that.
+	 */
+	int			retire_fence;
+
+	/*
+	 * Scaler data and control for setting up destination scaler.
+	 * A userspace pointer that points to a list of
+	 * struct mdp_destination_scaler_data.
+	 */
+	void __user		*dest_scaler;
+
+	/*
+	 * Represents number of Destination scaler data provied by userspace.
+	 */
+	uint32_t		dest_scaler_cnt;
+
+	/* Backlight level that would update when display commit */
+	uint32_t		bl_level;
+
+	/* 32-bits reserved value for future usage. */
+	uint32_t		reserved[MDP_LAYER_COMMIT_V1_PAD];
+};
+
+/*
+ * mdp_overlay_list - argument for ioctl MSMFB_ATOMIC_COMMIT
+ */
+struct mdp_layer_commit {
+	/*
+	 * 32bit version indicates the commit structure selection
+	 * from union. Lower 16bits indicates the minor version while
+	 * higher 16bits indicates the major version. It selects the
+	 * commit structure based on major version selection. Minor version
+	 * indicates that reserved fields are in use.
+	 *
+	 * Current supported version is 1.0 (Major:1 Minor:0)
+	 */
+	uint32_t version;
+	union {
+		/* Layer commit/validate definition for V1 */
+		struct mdp_layer_commit_v1 commit_v1;
+	};
+};
+
+struct mdp_point {
+	uint32_t x;
+	uint32_t y;
+};
+
+/*
+ * Async updatable layers. One layer holds configuration for one pipe.
+ */
+struct mdp_async_layer {
+	/*
+	 * Flag to enable/disable properties for layer configuration. Refer
+	 * layer flag config section for all possible flags.
+	 */
+	uint32_t flags;
+
+	/*
+	 * Pipe selection for this layer by client. Client provides the
+	 * pipe index that the device reserved during ATOMIC_COMMIT.
+	 */
+	uint32_t		pipe_ndx;
+
+	/* Source start x,y. */
+	struct mdp_point	src;
+
+	/* Destination start x,y. */
+	struct mdp_point	dst;
+
+	/*
+	 * This is an output parameter.
+	 *
+	 * Frame buffer device sets error code based on the failure.
+	 */
+	int			error_code;
+
+	uint32_t		reserved[3];
+};
+
+/*
+ * mdp_position_update - argument for ioctl MSMFB_ASYNC_POSITION_UPDATE
+ */
+struct mdp_position_update {
+	 /* Pointer to a list of async updatable input layers */
+	struct mdp_async_layer __user *input_layers;
+
+	/* Input layer count present in input list */
+	uint32_t input_layer_cnt;
+};
+
+#define MAX_DET_CURVES		3
+struct mdp_det_enhance_data {
+	uint32_t enable;
+	int16_t sharpen_level1;
+	int16_t sharpen_level2;
+	uint16_t clip;
+	uint16_t limit;
+	uint16_t thr_quiet;
+	uint16_t thr_dieout;
+	uint16_t thr_low;
+	uint16_t thr_high;
+	uint16_t prec_shift;
+	int16_t adjust_a[MAX_DET_CURVES];
+	int16_t adjust_b[MAX_DET_CURVES];
+	int16_t adjust_c[MAX_DET_CURVES];
+};
+
+/* Flags to enable Scaler and its sub components */
+#define ENABLE_SCALE			0x1
+#define ENABLE_DETAIL_ENHANCE		0x2
+#define ENABLE_DIRECTION_DETECTION	0x4
+
+/* LUT configuration flags */
+#define SCALER_LUT_SWAP			0x1
+#define SCALER_LUT_DIR_WR		0x2
+#define SCALER_LUT_Y_CIR_WR		0x4
+#define SCALER_LUT_UV_CIR_WR		0x8
+#define SCALER_LUT_Y_SEP_WR		0x10
+#define SCALER_LUT_UV_SEP_WR		0x20
+
+/* Y/RGB and UV filter configuration */
+#define FILTER_EDGE_DIRECTED_2D		0x0
+#define FILTER_CIRCULAR_2D		0x1
+#define FILTER_SEPARABLE_1D		0x2
+#define FILTER_BILINEAR			0x3
+
+/* Alpha filters */
+#define FILTER_ALPHA_DROP_REPEAT	0x0
+#define FILTER_ALPHA_BILINEAR		0x1
+
+/**
+ * struct mdp_scale_data_v2
+ * Driver uses this new Data structure for storing all scaling params
+ * This structure contains all pixel extension data and QSEED3 filter
+ * configuration and coefficient table indices
+ */
+struct mdp_scale_data_v2 {
+	uint32_t enable;
+
+	/* Init phase values */
+	int32_t init_phase_x[MAX_PLANES];
+	int32_t phase_step_x[MAX_PLANES];
+	int32_t init_phase_y[MAX_PLANES];
+	int32_t phase_step_y[MAX_PLANES];
+
+	/* This should be set to toal horizontal pixels
+	 * left + right +  width */
+	uint32_t num_ext_pxls_left[MAX_PLANES];
+
+	/* Unused param for backward compatibility */
+	uint32_t num_ext_pxls_right[MAX_PLANES];
+
+	/*  This should be set to vertical pixels
+	 *  top + bottom + height */
+	uint32_t num_ext_pxls_top[MAX_PLANES];
+
+	/* Unused param for backward compatibility */
+	uint32_t num_ext_pxls_btm[MAX_PLANES];
+
+	/* over fetch pixels */
+	int32_t left_ftch[MAX_PLANES];
+	int32_t left_rpt[MAX_PLANES];
+	int32_t right_ftch[MAX_PLANES];
+	int32_t right_rpt[MAX_PLANES];
+
+	/* Repeat pixels */
+	uint32_t top_rpt[MAX_PLANES];
+	uint32_t btm_rpt[MAX_PLANES];
+	uint32_t top_ftch[MAX_PLANES];
+	uint32_t btm_ftch[MAX_PLANES];
+
+	uint32_t roi_w[MAX_PLANES];
+
+	/* alpha plane can only be scaled using bilinear or pixel
+	 * repeat/drop, specify these for Y and UV planes only */
+	uint32_t preload_x[MAX_PLANES];
+	uint32_t preload_y[MAX_PLANES];
+	uint32_t src_width[MAX_PLANES];
+	uint32_t src_height[MAX_PLANES];
+
+	uint32_t dst_width;
+	uint32_t dst_height;
+
+	uint32_t y_rgb_filter_cfg;
+	uint32_t uv_filter_cfg;
+	uint32_t alpha_filter_cfg;
+	uint32_t blend_cfg;
+
+	uint32_t lut_flag;
+	uint32_t dir_lut_idx;
+
+	/* for Y(RGB) and UV planes*/
+	uint32_t y_rgb_cir_lut_idx;
+	uint32_t uv_cir_lut_idx;
+	uint32_t y_rgb_sep_lut_idx;
+	uint32_t uv_sep_lut_idx;
+
+	struct mdp_det_enhance_data detail_enhance;
+
+	/* reserved value for future usage. */
+	uint64_t reserved[8];
+};
+
+/**
+ * struct mdp_scale_luts_info
+ * This struct pointer is received as payload in SET_CFG_IOCTL when the flags
+ * is set to MDP_QSEED3_LUT_CFG
+ * @dir_lut:      Direction detection coefficients table
+ * @cir_lut:      Circular coefficeints table
+ * @sep_lut:      Separable coefficeints table
+ * @dir_lut_size: Size of direction coefficients table
+ * @cir_lut_size: Size of circular coefficients table
+ * @sep_lut_size: Size of separable coefficients table
+ */
+struct mdp_scale_luts_info {
+	uint64_t __user dir_lut;
+	uint64_t __user cir_lut;
+	uint64_t __user sep_lut;
+	uint32_t dir_lut_size;
+	uint32_t cir_lut_size;
+	uint32_t sep_lut_size;
+};
+
+#define MDP_QSEED3_LUT_CFG 0x1
+
+struct mdp_set_cfg {
+	uint64_t flags;
+	uint32_t len;
+	uint64_t __user payload;
+};
+
+#define HDR_PRIMARIES_COUNT 3
+
+#define MDP_HDR_STREAM
+
+struct mdp_hdr_stream {
+	uint32_t eotf;
+	uint32_t display_primaries_x[HDR_PRIMARIES_COUNT];
+	uint32_t display_primaries_y[HDR_PRIMARIES_COUNT];
+	uint32_t white_point_x;
+	uint32_t white_point_y;
+	uint32_t max_luminance;
+	uint32_t min_luminance;
+	uint32_t max_content_light_level;
+	uint32_t max_average_light_level;
+	/* DP related */
+	uint32_t pixel_encoding;
+	uint32_t colorimetry;
+	uint32_t range;
+	uint32_t bits_per_component;
+	uint32_t content_type;
+	uint32_t reserved[5];
+};
+
+/* hdr hdmi state takes possible values of 1, 2 and 4 respectively */
+#define MDP_HDR_ENABLE  (1 << 0)
+#define MDP_HDR_DISABLE (1 << 1)
+#define MDP_HDR_RESET   (1 << 2)
+
+/*
+ * HDR Control
+ * This encapsulates the HDR metadata as well as a state control
+ * for the HDR metadata as required by the HDMI spec to send the
+ * relevant metadata depending on the state of the HDR playback.
+ * hdr_state: Controls HDR state, takes values MDP_HDR_ENABLE, MDP_HDR_DISABLE
+ * and MDP_HDR_RESET.
+ * hdr_meta: Metadata sent by the userspace for the HDR clip.
+ */
+
+#define DRM_MSM_EXT_PANEL_HDR_CTRL
+struct mdp_hdr_stream_ctrl {
+	__u8 hdr_state;                   /* HDR state */
+	struct mdp_hdr_stream hdr_stream; /* HDR metadata */
+};
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_mdp.h	2019-01-22 16:16:28.567292264 +0100
@@ -0,0 +1,1474 @@
+#ifndef _UAPI_MSM_MDP_H_
+#define _UAPI_MSM_MDP_H_
+
+#ifndef __KERNEL__
+#include <stdint.h>
+#else
+#include <linux/types.h>
+#endif
+#include <linux/fb.h>
+
+#define MSMFB_IOCTL_MAGIC 'm'
+#define MSMFB_GRP_DISP          _IOW(MSMFB_IOCTL_MAGIC, 1, unsigned int)
+#define MSMFB_BLIT              _IOW(MSMFB_IOCTL_MAGIC, 2, unsigned int)
+#define MSMFB_SUSPEND_SW_REFRESHER _IOW(MSMFB_IOCTL_MAGIC, 128, unsigned int)
+#define MSMFB_RESUME_SW_REFRESHER _IOW(MSMFB_IOCTL_MAGIC, 129, unsigned int)
+#define MSMFB_CURSOR _IOW(MSMFB_IOCTL_MAGIC, 130, struct fb_cursor)
+#define MSMFB_SET_LUT _IOW(MSMFB_IOCTL_MAGIC, 131, struct fb_cmap)
+#define MSMFB_HISTOGRAM _IOWR(MSMFB_IOCTL_MAGIC, 132, struct mdp_histogram_data)
+/* new ioctls's for set/get ccs matrix */
+#define MSMFB_GET_CCS_MATRIX  _IOWR(MSMFB_IOCTL_MAGIC, 133, struct mdp_ccs)
+#define MSMFB_SET_CCS_MATRIX  _IOW(MSMFB_IOCTL_MAGIC, 134, struct mdp_ccs)
+#define MSMFB_OVERLAY_SET       _IOWR(MSMFB_IOCTL_MAGIC, 135, \
+						struct mdp_overlay)
+#define MSMFB_OVERLAY_UNSET     _IOW(MSMFB_IOCTL_MAGIC, 136, unsigned int)
+
+#define MSMFB_OVERLAY_PLAY      _IOW(MSMFB_IOCTL_MAGIC, 137, \
+						struct msmfb_overlay_data)
+#define MSMFB_OVERLAY_QUEUE	MSMFB_OVERLAY_PLAY
+
+#define MSMFB_GET_PAGE_PROTECTION _IOR(MSMFB_IOCTL_MAGIC, 138, \
+					struct mdp_page_protection)
+#define MSMFB_SET_PAGE_PROTECTION _IOW(MSMFB_IOCTL_MAGIC, 139, \
+					struct mdp_page_protection)
+#define MSMFB_OVERLAY_GET      _IOR(MSMFB_IOCTL_MAGIC, 140, \
+						struct mdp_overlay)
+#define MSMFB_OVERLAY_PLAY_ENABLE     _IOW(MSMFB_IOCTL_MAGIC, 141, unsigned int)
+#define MSMFB_OVERLAY_BLT       _IOWR(MSMFB_IOCTL_MAGIC, 142, \
+						struct msmfb_overlay_blt)
+#define MSMFB_OVERLAY_BLT_OFFSET     _IOW(MSMFB_IOCTL_MAGIC, 143, unsigned int)
+#define MSMFB_HISTOGRAM_START	_IOR(MSMFB_IOCTL_MAGIC, 144, \
+						struct mdp_histogram_start_req)
+#define MSMFB_HISTOGRAM_STOP	_IOR(MSMFB_IOCTL_MAGIC, 145, unsigned int)
+#define MSMFB_NOTIFY_UPDATE	_IOWR(MSMFB_IOCTL_MAGIC, 146, unsigned int)
+
+#define MSMFB_OVERLAY_3D       _IOWR(MSMFB_IOCTL_MAGIC, 147, \
+						struct msmfb_overlay_3d)
+
+#define MSMFB_MIXER_INFO       _IOWR(MSMFB_IOCTL_MAGIC, 148, \
+						struct msmfb_mixer_info_req)
+#define MSMFB_OVERLAY_PLAY_WAIT _IOWR(MSMFB_IOCTL_MAGIC, 149, \
+						struct msmfb_overlay_data)
+#define MSMFB_WRITEBACK_INIT _IO(MSMFB_IOCTL_MAGIC, 150)
+#define MSMFB_WRITEBACK_START _IO(MSMFB_IOCTL_MAGIC, 151)
+#define MSMFB_WRITEBACK_STOP _IO(MSMFB_IOCTL_MAGIC, 152)
+#define MSMFB_WRITEBACK_QUEUE_BUFFER _IOW(MSMFB_IOCTL_MAGIC, 153, \
+						struct msmfb_data)
+#define MSMFB_WRITEBACK_DEQUEUE_BUFFER _IOW(MSMFB_IOCTL_MAGIC, 154, \
+						struct msmfb_data)
+#define MSMFB_WRITEBACK_TERMINATE _IO(MSMFB_IOCTL_MAGIC, 155)
+#define MSMFB_MDP_PP _IOWR(MSMFB_IOCTL_MAGIC, 156, struct msmfb_mdp_pp)
+#define MSMFB_OVERLAY_VSYNC_CTRL _IOW(MSMFB_IOCTL_MAGIC, 160, unsigned int)
+#define MSMFB_VSYNC_CTRL  _IOW(MSMFB_IOCTL_MAGIC, 161, unsigned int)
+#define MSMFB_BUFFER_SYNC  _IOW(MSMFB_IOCTL_MAGIC, 162, struct mdp_buf_sync)
+#define MSMFB_OVERLAY_COMMIT      _IO(MSMFB_IOCTL_MAGIC, 163)
+#define MSMFB_DISPLAY_COMMIT      _IOW(MSMFB_IOCTL_MAGIC, 164, \
+						struct mdp_display_commit)
+#define MSMFB_METADATA_SET  _IOW(MSMFB_IOCTL_MAGIC, 165, struct msmfb_metadata)
+#define MSMFB_METADATA_GET  _IOW(MSMFB_IOCTL_MAGIC, 166, struct msmfb_metadata)
+#define MSMFB_WRITEBACK_SET_MIRRORING_HINT _IOW(MSMFB_IOCTL_MAGIC, 167, \
+						unsigned int)
+#define MSMFB_ASYNC_BLIT              _IOW(MSMFB_IOCTL_MAGIC, 168, unsigned int)
+#define MSMFB_OVERLAY_PREPARE		_IOWR(MSMFB_IOCTL_MAGIC, 169, \
+						struct mdp_overlay_list)
+#define MSMFB_LPM_ENABLE	_IOWR(MSMFB_IOCTL_MAGIC, 170, unsigned int)
+#define MSMFB_MDP_PP_GET_FEATURE_VERSION _IOWR(MSMFB_IOCTL_MAGIC, 171, \
+					      struct mdp_pp_feature_version)
+
+#define FB_TYPE_3D_PANEL 0x10101010
+#define MDP_IMGTYPE2_START 0x10000
+#define MSMFB_DRIVER_VERSION	0xF9E8D701
+/* Maximum number of formats supported by MDP*/
+#define MDP_IMGTYPE_END 0x100
+
+/* HW Revisions for different MDSS targets */
+#define MDSS_GET_MAJOR(rev)		((rev) >> 28)
+#define MDSS_GET_MINOR(rev)		(((rev) >> 16) & 0xFFF)
+#define MDSS_GET_STEP(rev)		((rev) & 0xFFFF)
+#define MDSS_GET_MAJOR_MINOR(rev)	((rev) >> 16)
+
+#define IS_MDSS_MAJOR_MINOR_SAME(rev1, rev2)	\
+	(MDSS_GET_MAJOR_MINOR((rev1)) == MDSS_GET_MAJOR_MINOR((rev2)))
+
+#define MDSS_MDP_REV(major, minor, step)	\
+	((((major) & 0x000F) << 28) |		\
+	 (((minor) & 0x0FFF) << 16) |		\
+	 ((step)   & 0xFFFF))
+
+#define MDSS_MDP_HW_REV_100	MDSS_MDP_REV(1, 0, 0) /* 8974 v1.0 */
+#define MDSS_MDP_HW_REV_101	MDSS_MDP_REV(1, 1, 0) /* 8x26 v1.0 */
+#define MDSS_MDP_HW_REV_101_1	MDSS_MDP_REV(1, 1, 1) /* 8x26 v2.0, 8926 v1.0 */
+#define MDSS_MDP_HW_REV_101_2	MDSS_MDP_REV(1, 1, 2) /* 8926 v2.0 */
+#define MDSS_MDP_HW_REV_102	MDSS_MDP_REV(1, 2, 0) /* 8974 v2.0 */
+#define MDSS_MDP_HW_REV_102_1	MDSS_MDP_REV(1, 2, 1) /* 8974 v3.0 (Pro) */
+#define MDSS_MDP_HW_REV_103	MDSS_MDP_REV(1, 3, 0) /* 8084 v1.0 */
+#define MDSS_MDP_HW_REV_103_1	MDSS_MDP_REV(1, 3, 1) /* 8084 v1.1 */
+#define MDSS_MDP_HW_REV_105	MDSS_MDP_REV(1, 5, 0) /* 8994 v1.0 */
+#define MDSS_MDP_HW_REV_106	MDSS_MDP_REV(1, 6, 0) /* 8916 v1.0 */
+#define MDSS_MDP_HW_REV_107	MDSS_MDP_REV(1, 7, 0) /* 8996 v1 */
+#define MDSS_MDP_HW_REV_107_1	MDSS_MDP_REV(1, 7, 1) /* 8996 v2 */
+#define MDSS_MDP_HW_REV_107_2	MDSS_MDP_REV(1, 7, 2) /* 8996 v3 */
+#define MDSS_MDP_HW_REV_108	MDSS_MDP_REV(1, 8, 0) /* 8939 v1.0 */
+#define MDSS_MDP_HW_REV_109	MDSS_MDP_REV(1, 9, 0) /* 8994 v2.0 */
+#define MDSS_MDP_HW_REV_110	MDSS_MDP_REV(1, 10, 0) /* 8992 v1.0 */
+#define MDSS_MDP_HW_REV_200	MDSS_MDP_REV(2, 0, 0) /* 8092 v1.0 */
+#define MDSS_MDP_HW_REV_112	MDSS_MDP_REV(1, 12, 0) /* 8952 v1.0 */
+#define MDSS_MDP_HW_REV_114	MDSS_MDP_REV(1, 14, 0) /* 8937 v1.0 */
+#define MDSS_MDP_HW_REV_115	MDSS_MDP_REV(1, 15, 0) /* msmgold */
+#define MDSS_MDP_HW_REV_116	MDSS_MDP_REV(1, 16, 0) /* msmtitanium */
+#define MDSS_MDP_HW_REV_300	MDSS_MDP_REV(3, 0, 0)  /* msm8998 */
+#define MDSS_MDP_HW_REV_301	MDSS_MDP_REV(3, 0, 1)  /* msm8998 v1.0 */
+#define MDSS_MDP_HW_REV_320	MDSS_MDP_REV(3, 2, 0)  /* sdm660 */
+#define MDSS_MDP_HW_REV_330	MDSS_MDP_REV(3, 3, 0)  /* sdm630 */
+
+enum {
+	NOTIFY_UPDATE_INIT,
+	NOTIFY_UPDATE_DEINIT,
+	NOTIFY_UPDATE_START,
+	NOTIFY_UPDATE_STOP,
+	NOTIFY_UPDATE_POWER_OFF,
+};
+
+enum {
+	NOTIFY_TYPE_NO_UPDATE,
+	NOTIFY_TYPE_SUSPEND,
+	NOTIFY_TYPE_UPDATE,
+	NOTIFY_TYPE_BL_UPDATE,
+	NOTIFY_TYPE_BL_AD_ATTEN_UPDATE,
+};
+
+enum {
+	MDP_RGB_565,      /* RGB 565 planer */
+	MDP_XRGB_8888,    /* RGB 888 padded */
+	MDP_Y_CBCR_H2V2,  /* Y and CbCr, pseudo planer w/ Cb is in MSB */
+	MDP_Y_CBCR_H2V2_ADRENO,
+	MDP_ARGB_8888,    /* ARGB 888 */
+	MDP_RGB_888,      /* RGB 888 planer */
+	MDP_Y_CRCB_H2V2,  /* Y and CrCb, pseudo planer w/ Cr is in MSB */
+	MDP_YCRYCB_H2V1,  /* YCrYCb interleave */
+	MDP_CBYCRY_H2V1,  /* CbYCrY interleave */
+	MDP_Y_CRCB_H2V1,  /* Y and CrCb, pseduo planer w/ Cr is in MSB */
+	MDP_Y_CBCR_H2V1,   /* Y and CrCb, pseduo planer w/ Cr is in MSB */
+	MDP_Y_CRCB_H1V2,
+	MDP_Y_CBCR_H1V2,
+	MDP_RGBA_8888,    /* ARGB 888 */
+	MDP_BGRA_8888,	  /* ABGR 888 */
+	MDP_RGBX_8888,	  /* RGBX 888 */
+	MDP_Y_CRCB_H2V2_TILE,  /* Y and CrCb, pseudo planer tile */
+	MDP_Y_CBCR_H2V2_TILE,  /* Y and CbCr, pseudo planer tile */
+	MDP_Y_CR_CB_H2V2,  /* Y, Cr and Cb, planar */
+	MDP_Y_CR_CB_GH2V2,  /* Y, Cr and Cb, planar aligned to Android YV12 */
+	MDP_Y_CB_CR_H2V2,  /* Y, Cb and Cr, planar */
+	MDP_Y_CRCB_H1V1,  /* Y and CrCb, pseduo planer w/ Cr is in MSB */
+	MDP_Y_CBCR_H1V1,  /* Y and CbCr, pseduo planer w/ Cb is in MSB */
+	MDP_YCRCB_H1V1,   /* YCrCb interleave */
+	MDP_YCBCR_H1V1,   /* YCbCr interleave */
+	MDP_BGR_565,      /* BGR 565 planer */
+	MDP_BGR_888,      /* BGR 888 */
+	MDP_Y_CBCR_H2V2_VENUS,
+	MDP_BGRX_8888,   /* BGRX 8888 */
+	MDP_RGBA_8888_TILE,	  /* RGBA 8888 in tile format */
+	MDP_ARGB_8888_TILE,	  /* ARGB 8888 in tile format */
+	MDP_ABGR_8888_TILE,	  /* ABGR 8888 in tile format */
+	MDP_BGRA_8888_TILE,	  /* BGRA 8888 in tile format */
+	MDP_RGBX_8888_TILE,	  /* RGBX 8888 in tile format */
+	MDP_XRGB_8888_TILE,	  /* XRGB 8888 in tile format */
+	MDP_XBGR_8888_TILE,	  /* XBGR 8888 in tile format */
+	MDP_BGRX_8888_TILE,	  /* BGRX 8888 in tile format */
+	MDP_YCBYCR_H2V1,  /* YCbYCr interleave */
+	MDP_RGB_565_TILE,	  /* RGB 565 in tile format */
+	MDP_BGR_565_TILE,	  /* BGR 565 in tile format */
+	MDP_ARGB_1555,	/*ARGB 1555*/
+	MDP_RGBA_5551,	/*RGBA 5551*/
+	MDP_ARGB_4444,	/*ARGB 4444*/
+	MDP_RGBA_4444,	/*RGBA 4444*/
+	MDP_RGB_565_UBWC,
+	MDP_RGBA_8888_UBWC,
+	MDP_Y_CBCR_H2V2_UBWC,
+	MDP_RGBX_8888_UBWC,
+	MDP_Y_CRCB_H2V2_VENUS,
+	MDP_IMGTYPE_LIMIT,
+	MDP_RGB_BORDERFILL,	/* border fill pipe */
+	MDP_XRGB_1555,
+	MDP_RGBX_5551,
+	MDP_XRGB_4444,
+	MDP_RGBX_4444,
+	MDP_ABGR_1555,
+	MDP_BGRA_5551,
+	MDP_XBGR_1555,
+	MDP_BGRX_5551,
+	MDP_ABGR_4444,
+	MDP_BGRA_4444,
+	MDP_XBGR_4444,
+	MDP_BGRX_4444,
+	MDP_ABGR_8888,
+	MDP_XBGR_8888,
+	MDP_RGBA_1010102,
+	MDP_ARGB_2101010,
+	MDP_RGBX_1010102,
+	MDP_XRGB_2101010,
+	MDP_BGRA_1010102,
+	MDP_ABGR_2101010,
+	MDP_BGRX_1010102,
+	MDP_XBGR_2101010,
+	MDP_RGBA_1010102_UBWC,
+	MDP_RGBX_1010102_UBWC,
+	MDP_Y_CBCR_H2V2_P010,
+	MDP_Y_CBCR_H2V2_TP10_UBWC,
+	MDP_CRYCBY_H2V1,  /* CrYCbY interleave */
+	MDP_IMGTYPE_LIMIT1 = MDP_IMGTYPE_END,
+	MDP_FB_FORMAT = MDP_IMGTYPE2_START,    /* framebuffer format */
+	MDP_IMGTYPE_LIMIT2 /* Non valid image type after this enum */
+};
+
+#define MDP_CRYCBY_H2V1 MDP_CRYCBY_H2V1
+
+enum {
+	PMEM_IMG,
+	FB_IMG,
+};
+
+enum {
+	HSIC_HUE = 0,
+	HSIC_SAT,
+	HSIC_INT,
+	HSIC_CON,
+	NUM_HSIC_PARAM,
+};
+
+enum mdss_mdp_max_bw_mode {
+	MDSS_MAX_BW_LIMIT_DEFAULT = 0x1,
+	MDSS_MAX_BW_LIMIT_CAMERA = 0x2,
+	MDSS_MAX_BW_LIMIT_HFLIP = 0x4,
+	MDSS_MAX_BW_LIMIT_VFLIP = 0x8,
+};
+
+#define MDSS_MDP_ROT_ONLY		0x80
+#define MDSS_MDP_RIGHT_MIXER		0x100
+#define MDSS_MDP_DUAL_PIPE		0x200
+
+/* mdp_blit_req flag values */
+#define MDP_ROT_NOP 0
+#define MDP_FLIP_LR 0x1
+#define MDP_FLIP_UD 0x2
+#define MDP_ROT_90 0x4
+#define MDP_ROT_180 (MDP_FLIP_UD|MDP_FLIP_LR)
+#define MDP_ROT_270 (MDP_ROT_90|MDP_FLIP_UD|MDP_FLIP_LR)
+#define MDP_DITHER 0x8
+#define MDP_BLUR 0x10
+#define MDP_BLEND_FG_PREMULT 0x20000
+#define MDP_IS_FG 0x40000
+#define MDP_SOLID_FILL 0x00000020
+#define MDP_VPU_PIPE 0x00000040
+#define MDP_DEINTERLACE 0x80000000
+#define MDP_SHARPENING  0x40000000
+#define MDP_NO_DMA_BARRIER_START	0x20000000
+#define MDP_NO_DMA_BARRIER_END		0x10000000
+#define MDP_NO_BLIT			0x08000000
+#define MDP_BLIT_WITH_DMA_BARRIERS	0x000
+#define MDP_BLIT_WITH_NO_DMA_BARRIERS    \
+	(MDP_NO_DMA_BARRIER_START | MDP_NO_DMA_BARRIER_END)
+#define MDP_BLIT_SRC_GEM                0x04000000
+#define MDP_BLIT_DST_GEM                0x02000000
+#define MDP_BLIT_NON_CACHED		0x01000000
+#define MDP_OV_PIPE_SHARE		0x00800000
+#define MDP_DEINTERLACE_ODD		0x00400000
+#define MDP_OV_PLAY_NOWAIT		0x00200000
+#define MDP_SOURCE_ROTATED_90		0x00100000
+#define MDP_OVERLAY_PP_CFG_EN		0x00080000
+#define MDP_BACKEND_COMPOSITION		0x00040000
+#define MDP_BORDERFILL_SUPPORTED	0x00010000
+#define MDP_SECURE_OVERLAY_SESSION      0x00008000
+#define MDP_SECURE_DISPLAY_OVERLAY_SESSION	0x00002000
+#define MDP_OV_PIPE_FORCE_DMA		0x00004000
+#define MDP_MEMORY_ID_TYPE_FB		0x00001000
+#define MDP_BWC_EN			0x00000400
+#define MDP_DECIMATION_EN		0x00000800
+#define MDP_SMP_FORCE_ALLOC		0x00200000
+#define MDP_TRANSP_NOP 0xffffffff
+#define MDP_ALPHA_NOP 0xff
+
+#define MDP_FB_PAGE_PROTECTION_NONCACHED         (0)
+#define MDP_FB_PAGE_PROTECTION_WRITECOMBINE      (1)
+#define MDP_FB_PAGE_PROTECTION_WRITETHROUGHCACHE (2)
+#define MDP_FB_PAGE_PROTECTION_WRITEBACKCACHE    (3)
+#define MDP_FB_PAGE_PROTECTION_WRITEBACKWACACHE  (4)
+/* Sentinel: Don't use! */
+#define MDP_FB_PAGE_PROTECTION_INVALID           (5)
+/* Count of the number of MDP_FB_PAGE_PROTECTION_... values. */
+#define MDP_NUM_FB_PAGE_PROTECTION_VALUES        (5)
+
+#define MDP_DEEP_COLOR_YUV444    0x1
+#define MDP_DEEP_COLOR_RGB30B    0x2
+#define MDP_DEEP_COLOR_RGB36B    0x4
+#define MDP_DEEP_COLOR_RGB48B    0x8
+
+struct mdp_rect {
+	uint32_t x;
+	uint32_t y;
+	uint32_t w;
+	uint32_t h;
+};
+
+struct mdp_img {
+	uint32_t width;
+	uint32_t height;
+	uint32_t format;
+	uint32_t offset;
+	int memory_id;		/* the file descriptor */
+	uint32_t priv;
+};
+
+struct mult_factor {
+	uint32_t numer;
+	uint32_t denom;
+};
+
+/*
+ * {3x3} + {3} ccs matrix
+ */
+
+#define MDP_CCS_RGB2YUV 	0
+#define MDP_CCS_YUV2RGB 	1
+
+#define MDP_CCS_SIZE	9
+#define MDP_BV_SIZE	3
+
+struct mdp_ccs {
+	int direction;			/* MDP_CCS_RGB2YUV or YUV2RGB */
+	uint16_t ccs[MDP_CCS_SIZE];	/* 3x3 color coefficients */
+	uint16_t bv[MDP_BV_SIZE];	/* 1x3 bias vector */
+};
+
+struct mdp_csc {
+	int id;
+	uint32_t csc_mv[9];
+	uint32_t csc_pre_bv[3];
+	uint32_t csc_post_bv[3];
+	uint32_t csc_pre_lv[6];
+	uint32_t csc_post_lv[6];
+};
+
+/* The version of the mdp_blit_req structure so that
+ * user applications can selectively decide which functionality
+ * to include
+ */
+
+#define MDP_BLIT_REQ_VERSION 3
+
+struct color {
+	uint32_t r;
+	uint32_t g;
+	uint32_t b;
+	uint32_t alpha;
+};
+
+struct mdp_blit_req {
+	struct mdp_img src;
+	struct mdp_img dst;
+	struct mdp_rect src_rect;
+	struct mdp_rect dst_rect;
+	struct color const_color;
+	uint32_t alpha;
+	uint32_t transp_mask;
+	uint32_t flags;
+	int sharpening_strength;  /* -127 <--> 127, default 64 */
+	uint8_t color_space;
+	uint32_t fps;
+};
+
+struct mdp_blit_req_list {
+	uint32_t count;
+	struct mdp_blit_req req[];
+};
+
+#define MSMFB_DATA_VERSION 2
+
+struct msmfb_data {
+	uint32_t offset;
+	int memory_id;
+	int id;
+	uint32_t flags;
+	uint32_t priv;
+	uint32_t iova;
+};
+
+#define MSMFB_NEW_REQUEST -1
+
+struct msmfb_overlay_data {
+	uint32_t id;
+	struct msmfb_data data;
+	uint32_t version_key;
+	struct msmfb_data plane1_data;
+	struct msmfb_data plane2_data;
+	struct msmfb_data dst_data;
+};
+
+struct msmfb_img {
+	uint32_t width;
+	uint32_t height;
+	uint32_t format;
+};
+
+#define MSMFB_WRITEBACK_DEQUEUE_BLOCKING 0x1
+struct msmfb_writeback_data {
+	struct msmfb_data buf_info;
+	struct msmfb_img img;
+};
+
+#define MDP_PP_OPS_ENABLE 0x1
+#define MDP_PP_OPS_READ 0x2
+#define MDP_PP_OPS_WRITE 0x4
+#define MDP_PP_OPS_DISABLE 0x8
+#define MDP_PP_IGC_FLAG_ROM0	0x10
+#define MDP_PP_IGC_FLAG_ROM1	0x20
+
+
+#define MDSS_PP_DSPP_CFG	0x000
+#define MDSS_PP_SSPP_CFG	0x100
+#define MDSS_PP_LM_CFG	0x200
+#define MDSS_PP_WB_CFG	0x300
+
+#define MDSS_PP_ARG_MASK	0x3C00
+#define MDSS_PP_ARG_NUM		4
+#define MDSS_PP_ARG_SHIFT	10
+#define MDSS_PP_LOCATION_MASK	0x0300
+#define MDSS_PP_LOGICAL_MASK	0x00FF
+
+#define MDSS_PP_ADD_ARG(var, arg) ((var) | (0x1 << (MDSS_PP_ARG_SHIFT + (arg))))
+#define PP_ARG(x, var) ((var) & (0x1 << (MDSS_PP_ARG_SHIFT + (x))))
+#define PP_LOCAT(var) ((var) & MDSS_PP_LOCATION_MASK)
+#define PP_BLOCK(var) ((var) & MDSS_PP_LOGICAL_MASK)
+
+
+struct mdp_qseed_cfg {
+	uint32_t table_num;
+	uint32_t ops;
+	uint32_t len;
+	uint32_t *data;
+};
+
+struct mdp_sharp_cfg {
+	uint32_t flags;
+	uint32_t strength;
+	uint32_t edge_thr;
+	uint32_t smooth_thr;
+	uint32_t noise_thr;
+};
+
+struct mdp_qseed_cfg_data {
+	uint32_t block;
+	struct mdp_qseed_cfg qseed_data;
+};
+
+#define MDP_OVERLAY_PP_CSC_CFG         0x1
+#define MDP_OVERLAY_PP_QSEED_CFG       0x2
+#define MDP_OVERLAY_PP_PA_CFG          0x4
+#define MDP_OVERLAY_PP_IGC_CFG         0x8
+#define MDP_OVERLAY_PP_SHARP_CFG       0x10
+#define MDP_OVERLAY_PP_HIST_CFG        0x20
+#define MDP_OVERLAY_PP_HIST_LUT_CFG    0x40
+#define MDP_OVERLAY_PP_PA_V2_CFG       0x80
+#define MDP_OVERLAY_PP_PCC_CFG	       0x100
+
+#define MDP_CSC_FLAG_ENABLE	0x1
+#define MDP_CSC_FLAG_YUV_IN	0x2
+#define MDP_CSC_FLAG_YUV_OUT	0x4
+
+#define MDP_CSC_MATRIX_COEFF_SIZE	9
+#define MDP_CSC_CLAMP_SIZE		6
+#define MDP_CSC_BIAS_SIZE		3
+
+struct mdp_csc_cfg {
+	/* flags for enable CSC, toggling RGB,YUV input/output */
+	uint32_t flags;
+	uint32_t csc_mv[MDP_CSC_MATRIX_COEFF_SIZE];
+	uint32_t csc_pre_bv[MDP_CSC_BIAS_SIZE];
+	uint32_t csc_post_bv[MDP_CSC_BIAS_SIZE];
+	uint32_t csc_pre_lv[MDP_CSC_CLAMP_SIZE];
+	uint32_t csc_post_lv[MDP_CSC_CLAMP_SIZE];
+};
+
+struct mdp_csc_cfg_data {
+	uint32_t block;
+	struct mdp_csc_cfg csc_data;
+};
+
+struct mdp_pa_cfg {
+	uint32_t flags;
+	uint32_t hue_adj;
+	uint32_t sat_adj;
+	uint32_t val_adj;
+	uint32_t cont_adj;
+};
+
+struct mdp_pa_mem_col_cfg {
+	uint32_t color_adjust_p0;
+	uint32_t color_adjust_p1;
+	uint32_t hue_region;
+	uint32_t sat_region;
+	uint32_t val_region;
+};
+
+#define MDP_SIX_ZONE_LUT_SIZE		384
+
+/* PA Write/Read extension flags */
+#define MDP_PP_PA_HUE_ENABLE		0x10
+#define MDP_PP_PA_SAT_ENABLE		0x20
+#define MDP_PP_PA_VAL_ENABLE		0x40
+#define MDP_PP_PA_CONT_ENABLE		0x80
+#define MDP_PP_PA_SIX_ZONE_ENABLE	0x100
+#define MDP_PP_PA_SKIN_ENABLE		0x200
+#define MDP_PP_PA_SKY_ENABLE		0x400
+#define MDP_PP_PA_FOL_ENABLE		0x800
+
+/* PA masks */
+/* Masks used in PA v1_7 only */
+#define MDP_PP_PA_MEM_PROT_HUE_EN	0x1
+#define MDP_PP_PA_MEM_PROT_SAT_EN	0x2
+#define MDP_PP_PA_MEM_PROT_VAL_EN	0x4
+#define MDP_PP_PA_MEM_PROT_CONT_EN	0x8
+#define MDP_PP_PA_MEM_PROT_SIX_EN	0x10
+#define MDP_PP_PA_MEM_PROT_BLEND_EN	0x20
+/* Masks used in all PAv2 versions */
+#define MDP_PP_PA_HUE_MASK		0x1000
+#define MDP_PP_PA_SAT_MASK		0x2000
+#define MDP_PP_PA_VAL_MASK		0x4000
+#define MDP_PP_PA_CONT_MASK		0x8000
+#define MDP_PP_PA_SIX_ZONE_HUE_MASK	0x10000
+#define MDP_PP_PA_SIX_ZONE_SAT_MASK	0x20000
+#define MDP_PP_PA_SIX_ZONE_VAL_MASK	0x40000
+#define MDP_PP_PA_MEM_COL_SKIN_MASK	0x80000
+#define MDP_PP_PA_MEM_COL_SKY_MASK	0x100000
+#define MDP_PP_PA_MEM_COL_FOL_MASK	0x200000
+#define MDP_PP_PA_MEM_PROTECT_EN	0x400000
+#define MDP_PP_PA_SAT_ZERO_EXP_EN	0x800000
+
+/* Flags for setting PA saturation and value hold */
+#define MDP_PP_PA_LEFT_HOLD		0x1
+#define MDP_PP_PA_RIGHT_HOLD		0x2
+
+struct mdp_pa_v2_data {
+	/* Mask bits for PA features */
+	uint32_t flags;
+	uint32_t global_hue_adj;
+	uint32_t global_sat_adj;
+	uint32_t global_val_adj;
+	uint32_t global_cont_adj;
+	struct mdp_pa_mem_col_cfg skin_cfg;
+	struct mdp_pa_mem_col_cfg sky_cfg;
+	struct mdp_pa_mem_col_cfg fol_cfg;
+	uint32_t six_zone_len;
+	uint32_t six_zone_thresh;
+	uint32_t *six_zone_curve_p0;
+	uint32_t *six_zone_curve_p1;
+};
+
+struct mdp_pa_mem_col_data_v1_7 {
+	uint32_t color_adjust_p0;
+	uint32_t color_adjust_p1;
+	uint32_t color_adjust_p2;
+	uint32_t blend_gain;
+	uint8_t sat_hold;
+	uint8_t val_hold;
+	uint32_t hue_region;
+	uint32_t sat_region;
+	uint32_t val_region;
+};
+
+struct mdp_pa_data_v1_7 {
+	uint32_t mode;
+	uint32_t global_hue_adj;
+	uint32_t global_sat_adj;
+	uint32_t global_val_adj;
+	uint32_t global_cont_adj;
+	struct mdp_pa_mem_col_data_v1_7 skin_cfg;
+	struct mdp_pa_mem_col_data_v1_7 sky_cfg;
+	struct mdp_pa_mem_col_data_v1_7 fol_cfg;
+	uint32_t six_zone_thresh;
+	uint32_t six_zone_adj_p0;
+	uint32_t six_zone_adj_p1;
+	uint8_t six_zone_sat_hold;
+	uint8_t six_zone_val_hold;
+	uint32_t six_zone_len;
+	uint32_t *six_zone_curve_p0;
+	uint32_t *six_zone_curve_p1;
+};
+
+
+struct mdp_pa_v2_cfg_data {
+	uint32_t version;
+	uint32_t block;
+	uint32_t flags;
+	struct mdp_pa_v2_data pa_v2_data;
+	void *cfg_payload;
+};
+
+
+enum {
+	mdp_igc_rec601 = 1,
+	mdp_igc_rec709,
+	mdp_igc_srgb,
+	mdp_igc_custom,
+	mdp_igc_rec_max,
+};
+
+struct mdp_igc_lut_data {
+	uint32_t block;
+	uint32_t version;
+	uint32_t len, ops;
+	uint32_t *c0_c1_data;
+	uint32_t *c2_data;
+	void *cfg_payload;
+};
+
+struct mdp_igc_lut_data_v1_7 {
+	uint32_t table_fmt;
+	uint32_t len;
+	uint32_t *c0_c1_data;
+	uint32_t *c2_data;
+};
+
+struct mdp_igc_lut_data_payload {
+	uint32_t table_fmt;
+	uint32_t len;
+	uint64_t __user c0_c1_data;
+	uint64_t __user c2_data;
+	uint32_t strength;
+};
+
+struct mdp_histogram_cfg {
+	uint32_t ops;
+	uint32_t block;
+	uint8_t frame_cnt;
+	uint8_t bit_mask;
+	uint16_t num_bins;
+};
+
+struct mdp_hist_lut_data_v1_7 {
+	uint32_t len;
+	uint32_t *data;
+};
+
+struct mdp_hist_lut_data {
+	uint32_t block;
+	uint32_t version;
+	uint32_t hist_lut_first;
+	uint32_t ops;
+	uint32_t len;
+	uint32_t *data;
+	void *cfg_payload;
+};
+
+struct mdp_pcc_coeff {
+	uint32_t c, r, g, b, rr, gg, bb, rg, gb, rb, rgb_0, rgb_1;
+};
+
+struct mdp_pcc_coeff_v1_7 {
+	uint32_t c, r, g, b, rg, gb, rb, rgb;
+};
+
+struct mdp_pcc_data_v1_7 {
+	struct mdp_pcc_coeff_v1_7 r, g, b;
+};
+
+struct mdp_pcc_cfg_data {
+	uint32_t version;
+	uint32_t block;
+	uint32_t ops;
+	struct mdp_pcc_coeff r, g, b;
+	void *cfg_payload;
+};
+
+enum {
+	mdp_lut_igc,
+	mdp_lut_pgc,
+	mdp_lut_hist,
+	mdp_lut_rgb,
+	mdp_lut_max,
+};
+struct mdp_overlay_pp_params {
+	uint32_t config_ops;
+	struct mdp_csc_cfg csc_cfg;
+	struct mdp_qseed_cfg qseed_cfg[2];
+	struct mdp_pa_cfg pa_cfg;
+	struct mdp_pa_v2_data pa_v2_cfg;
+	struct mdp_igc_lut_data igc_cfg;
+	struct mdp_sharp_cfg sharp_cfg;
+	struct mdp_histogram_cfg hist_cfg;
+	struct mdp_hist_lut_data hist_lut_cfg;
+	/* PAv2 cfg data for PA 2.x versions */
+	struct mdp_pa_v2_cfg_data pa_v2_cfg_data;
+	struct mdp_pcc_cfg_data pcc_cfg_data;
+};
+
+/**
+ * enum mdss_mdp_blend_op - Different blend operations set by userspace
+ *
+ * @BLEND_OP_NOT_DEFINED:    No blend operation defined for the layer.
+ * @BLEND_OP_OPAQUE:         Apply a constant blend operation. The layer
+ *                           would appear opaque in case fg plane alpha is
+ *                           0xff.
+ * @BLEND_OP_PREMULTIPLIED:  Apply source over blend rule. Layer already has
+ *                           alpha pre-multiplication done. If fg plane alpha
+ *                           is less than 0xff, apply modulation as well. This
+ *                           operation is intended on layers having alpha
+ *                           channel.
+ * @BLEND_OP_COVERAGE:       Apply source over blend rule. Layer is not alpha
+ *                           pre-multiplied. Apply pre-multiplication. If fg
+ *                           plane alpha is less than 0xff, apply modulation as
+ *                           well.
+ * @BLEND_OP_MAX:            Used to track maximum blend operation possible by
+ *                           mdp.
+ */
+enum mdss_mdp_blend_op {
+	BLEND_OP_NOT_DEFINED = 0,
+	BLEND_OP_OPAQUE,
+	BLEND_OP_PREMULTIPLIED,
+	BLEND_OP_COVERAGE,
+	BLEND_OP_MAX,
+};
+
+#define DECIMATED_DIMENSION(dim, deci) (((dim) + ((1 << (deci)) - 1)) >> (deci))
+#define MAX_PLANES	4
+struct mdp_scale_data {
+	uint8_t enable_pxl_ext;
+
+	int init_phase_x[MAX_PLANES];
+	int phase_step_x[MAX_PLANES];
+	int init_phase_y[MAX_PLANES];
+	int phase_step_y[MAX_PLANES];
+
+	int num_ext_pxls_left[MAX_PLANES];
+	int num_ext_pxls_right[MAX_PLANES];
+	int num_ext_pxls_top[MAX_PLANES];
+	int num_ext_pxls_btm[MAX_PLANES];
+
+	int left_ftch[MAX_PLANES];
+	int left_rpt[MAX_PLANES];
+	int right_ftch[MAX_PLANES];
+	int right_rpt[MAX_PLANES];
+
+	int top_rpt[MAX_PLANES];
+	int btm_rpt[MAX_PLANES];
+	int top_ftch[MAX_PLANES];
+	int btm_ftch[MAX_PLANES];
+
+	uint32_t roi_w[MAX_PLANES];
+};
+
+/**
+ * enum mdp_overlay_pipe_type - Different pipe type set by userspace
+ *
+ * @PIPE_TYPE_AUTO:    Not specified, pipe will be selected according to flags.
+ * @PIPE_TYPE_VIG:     VIG pipe.
+ * @PIPE_TYPE_RGB:     RGB pipe.
+ * @PIPE_TYPE_DMA:     DMA pipe.
+ * @PIPE_TYPE_CURSOR:  CURSOR pipe.
+ * @PIPE_TYPE_MAX:     Used to track maximum number of pipe type.
+ */
+enum mdp_overlay_pipe_type {
+	PIPE_TYPE_AUTO = 0,
+	PIPE_TYPE_VIG,
+	PIPE_TYPE_RGB,
+	PIPE_TYPE_DMA,
+	PIPE_TYPE_CURSOR,
+	PIPE_TYPE_MAX,
+};
+
+/**
+ * struct mdp_overlay - overlay surface structure
+ * @src:	Source image information (width, height, format).
+ * @src_rect:	Source crop rectangle, portion of image that will be fetched.
+ *		This should always be within boundaries of source image.
+ * @dst_rect:	Destination rectangle, the position and size of image on screen.
+ *		This should always be within panel boundaries.
+ * @z_order:	Blending stage to occupy in display, if multiple layers are
+ *		present, highest z_order usually means the top most visible
+ *		layer. The range acceptable is from 0-3 to support blending
+ *		up to 4 layers.
+ * @is_fg:	This flag is used to disable blending of any layers with z_order
+ *		less than this overlay. It means that any layers with z_order
+ *		less than this layer will not be blended and will be replaced
+ *		by the background border color.
+ * @alpha:	Used to set plane opacity. The range can be from 0-255, where
+ *		0 means completely transparent and 255 means fully opaque.
+ * @transp_mask: Color used as color key for transparency. Any pixel in fetched
+ *		image matching this color will be transparent when blending.
+ *		The color should be in same format as the source image format.
+ * @flags:	This is used to customize operation of overlay. See MDP flags
+ *		for more information.
+ * @pipe_type:  Used to specify the type of overlay pipe.
+ * @user_data:	DEPRECATED* Used to store user application specific information.
+ * @bg_color:	Solid color used to fill the overlay surface when no source
+ *		buffer is provided.
+ * @horz_deci:	Horizontal decimation value, this indicates the amount of pixels
+ *		dropped for each pixel that is fetched from a line. The value
+ *		given should be power of two of decimation amount.
+ *		0: no decimation
+ *		1: decimate by 2 (drop 1 pixel for each pixel fetched)
+ *		2: decimate by 4 (drop 3 pixels for each pixel fetched)
+ *		3: decimate by 8 (drop 7 pixels for each pixel fetched)
+ *		4: decimate by 16 (drop 15 pixels for each pixel fetched)
+ * @vert_deci:	Vertical decimation value, this indicates the amount of lines
+ *		dropped for each line that is fetched from overlay. The value
+ *		given should be power of two of decimation amount.
+ *		0: no decimation
+ *		1: decimation by 2 (drop 1 line for each line fetched)
+ *		2: decimation by 4 (drop 3 lines for each line fetched)
+ *		3: decimation by 8 (drop 7 lines for each line fetched)
+ *		4: decimation by 16 (drop 15 lines for each line fetched)
+ * @overlay_pp_cfg: Overlay post processing configuration, for more information
+ *		see struct mdp_overlay_pp_params.
+ * @priority:	Priority is returned by the driver when overlay is set for the
+ *		first time. It indicates the priority of the underlying pipe
+ *		serving the overlay. This priority can be used by user-space
+ *		in source split when pipes are re-used and shuffled around to
+ *		reduce fallbacks.
+ */
+struct mdp_overlay {
+	struct msmfb_img src;
+	struct mdp_rect src_rect;
+	struct mdp_rect dst_rect;
+	uint32_t z_order;	/* stage number */
+	uint32_t is_fg;		/* control alpha & transp */
+	uint32_t alpha;
+	uint32_t blend_op;
+	uint32_t transp_mask;
+	uint32_t flags;
+	uint32_t pipe_type;
+	uint32_t id;
+	uint8_t priority;
+	uint32_t user_data[6];
+	uint32_t bg_color;
+	uint8_t horz_deci;
+	uint8_t vert_deci;
+	struct mdp_overlay_pp_params overlay_pp_cfg;
+	struct mdp_scale_data scale;
+	uint8_t color_space;
+	uint32_t frame_rate;
+};
+
+struct msmfb_overlay_3d {
+	uint32_t is_3d;
+	uint32_t width;
+	uint32_t height;
+};
+
+
+struct msmfb_overlay_blt {
+	uint32_t enable;
+	uint32_t offset;
+	uint32_t width;
+	uint32_t height;
+	uint32_t bpp;
+};
+
+struct mdp_histogram {
+	uint32_t frame_cnt;
+	uint32_t bin_cnt;
+	uint32_t *r;
+	uint32_t *g;
+	uint32_t *b;
+};
+
+#define MISR_CRC_BATCH_SIZE 32
+enum {
+	DISPLAY_MISR_EDP,
+	DISPLAY_MISR_DSI0,
+	DISPLAY_MISR_DSI1,
+	DISPLAY_MISR_HDMI,
+	DISPLAY_MISR_LCDC,
+	DISPLAY_MISR_MDP,
+	DISPLAY_MISR_ATV,
+	DISPLAY_MISR_DSI_CMD,
+	DISPLAY_MISR_MAX
+};
+
+enum {
+	MISR_OP_NONE,
+	MISR_OP_SFM,
+	MISR_OP_MFM,
+	MISR_OP_BM,
+	MISR_OP_MAX
+};
+
+struct mdp_misr {
+	uint32_t block_id;
+	uint32_t frame_count;
+	uint32_t crc_op_mode;
+	uint32_t crc_value[MISR_CRC_BATCH_SIZE];
+};
+
+/*
+
+	mdp_block_type defines the identifiers for pipes in MDP 4.3 and up
+
+	MDP_BLOCK_RESERVED is provided for backward compatibility and is
+	deprecated. It corresponds to DMA_P. So MDP_BLOCK_DMA_P should be used
+	instead.
+
+	MDP_LOGICAL_BLOCK_DISP_0 identifies the display pipe which fb0 uses,
+	same for others.
+
+*/
+
+enum {
+	MDP_BLOCK_RESERVED = 0,
+	MDP_BLOCK_OVERLAY_0,
+	MDP_BLOCK_OVERLAY_1,
+	MDP_BLOCK_VG_1,
+	MDP_BLOCK_VG_2,
+	MDP_BLOCK_RGB_1,
+	MDP_BLOCK_RGB_2,
+	MDP_BLOCK_DMA_P,
+	MDP_BLOCK_DMA_S,
+	MDP_BLOCK_DMA_E,
+	MDP_BLOCK_OVERLAY_2,
+	MDP_LOGICAL_BLOCK_DISP_0 = 0x10,
+	MDP_LOGICAL_BLOCK_DISP_1,
+	MDP_LOGICAL_BLOCK_DISP_2,
+	MDP_BLOCK_MAX,
+};
+
+/*
+ * mdp_histogram_start_req is used to provide the parameters for
+ * histogram start request
+ */
+
+struct mdp_histogram_start_req {
+	uint32_t block;
+	uint8_t frame_cnt;
+	uint8_t bit_mask;
+	uint16_t num_bins;
+};
+
+/*
+ * mdp_histogram_data is used to return the histogram data, once
+ * the histogram is done/stopped/cance
+ */
+
+struct mdp_histogram_data {
+	uint32_t block;
+	uint32_t bin_cnt;
+	uint32_t *c0;
+	uint32_t *c1;
+	uint32_t *c2;
+	uint32_t *extra_info;
+};
+
+
+#define GC_LUT_ENTRIES_V1_7	512
+
+struct mdp_ar_gc_lut_data {
+	uint32_t x_start;
+	uint32_t slope;
+	uint32_t offset;
+};
+
+#define MDP_PP_PGC_ROUNDING_ENABLE 0x10
+struct mdp_pgc_lut_data {
+	uint32_t version;
+	uint32_t block;
+	uint32_t flags;
+	uint8_t num_r_stages;
+	uint8_t num_g_stages;
+	uint8_t num_b_stages;
+	struct mdp_ar_gc_lut_data *r_data;
+	struct mdp_ar_gc_lut_data *g_data;
+	struct mdp_ar_gc_lut_data *b_data;
+	void *cfg_payload;
+};
+
+#define PGC_LUT_ENTRIES 1024
+struct mdp_pgc_lut_data_v1_7 {
+	uint32_t  len;
+	uint32_t  *c0_data;
+	uint32_t  *c1_data;
+	uint32_t  *c2_data;
+};
+
+/*
+ * mdp_rgb_lut_data is used to provide parameters for configuring the
+ * generic RGB lut in case of gamma correction or other LUT updation usecases
+ */
+struct mdp_rgb_lut_data {
+	uint32_t flags;
+	uint32_t lut_type;
+	struct fb_cmap cmap;
+};
+
+enum {
+	mdp_rgb_lut_gc,
+	mdp_rgb_lut_hist,
+};
+
+struct mdp_lut_cfg_data {
+	uint32_t lut_type;
+	union {
+		struct mdp_igc_lut_data igc_lut_data;
+		struct mdp_pgc_lut_data pgc_lut_data;
+		struct mdp_hist_lut_data hist_lut_data;
+		struct mdp_rgb_lut_data rgb_lut_data;
+	} data;
+};
+
+struct mdp_bl_scale_data {
+	uint32_t min_lvl;
+	uint32_t scale;
+};
+
+struct mdp_pa_cfg_data {
+	uint32_t block;
+	struct mdp_pa_cfg pa_data;
+};
+
+#define MDP_DITHER_DATA_V1_7_SZ 16
+
+struct mdp_dither_data_v1_7 {
+	uint32_t g_y_depth;
+	uint32_t r_cr_depth;
+	uint32_t b_cb_depth;
+	uint32_t len;
+	uint32_t data[MDP_DITHER_DATA_V1_7_SZ];
+	uint32_t temporal_en;
+};
+
+struct mdp_pa_dither_data {
+	uint64_t data_flags;
+	uint32_t matrix_sz;
+	uint64_t __user matrix_data;
+	uint32_t strength;
+	uint32_t offset_en;
+};
+
+struct mdp_dither_cfg_data {
+	uint32_t version;
+	uint32_t block;
+	uint32_t flags;
+	uint32_t mode;
+	uint32_t g_y_depth;
+	uint32_t r_cr_depth;
+	uint32_t b_cb_depth;
+	void *cfg_payload;
+};
+
+#define MDP_GAMUT_TABLE_NUM		8
+#define MDP_GAMUT_TABLE_NUM_V1_7	4
+#define MDP_GAMUT_SCALE_OFF_TABLE_NUM	3
+#define MDP_GAMUT_TABLE_V1_7_SZ 1229
+#define MDP_GAMUT_SCALE_OFF_SZ 16
+#define MDP_GAMUT_TABLE_V1_7_COARSE_SZ 32
+
+struct mdp_gamut_cfg_data {
+	uint32_t block;
+	uint32_t flags;
+	uint32_t version;
+	/* v1 version specific params */
+	uint32_t gamut_first;
+	uint32_t tbl_size[MDP_GAMUT_TABLE_NUM];
+	uint16_t *r_tbl[MDP_GAMUT_TABLE_NUM];
+	uint16_t *g_tbl[MDP_GAMUT_TABLE_NUM];
+	uint16_t *b_tbl[MDP_GAMUT_TABLE_NUM];
+	/* params for newer versions of gamut */
+	void *cfg_payload;
+};
+
+enum {
+	mdp_gamut_fine_mode = 0x1,
+	mdp_gamut_coarse_mode,
+};
+
+struct mdp_gamut_data_v1_7 {
+	uint32_t mode;
+	uint32_t map_en;
+	uint32_t tbl_size[MDP_GAMUT_TABLE_NUM_V1_7];
+	uint32_t *c0_data[MDP_GAMUT_TABLE_NUM_V1_7];
+	uint32_t *c1_c2_data[MDP_GAMUT_TABLE_NUM_V1_7];
+	uint32_t  tbl_scale_off_sz[MDP_GAMUT_SCALE_OFF_TABLE_NUM];
+	uint32_t  *scale_off_data[MDP_GAMUT_SCALE_OFF_TABLE_NUM];
+};
+
+struct mdp_calib_config_data {
+	uint32_t ops;
+	uint32_t addr;
+	uint32_t data;
+};
+
+struct mdp_calib_config_buffer {
+	uint32_t ops;
+	uint32_t size;
+	uint32_t *buffer;
+};
+
+struct mdp_calib_dcm_state {
+	uint32_t ops;
+	uint32_t dcm_state;
+};
+
+enum {
+	DCM_UNINIT,
+	DCM_UNBLANK,
+	DCM_ENTER,
+	DCM_EXIT,
+	DCM_BLANK,
+	DTM_ENTER,
+	DTM_EXIT,
+};
+
+#define MDSS_PP_SPLIT_LEFT_ONLY		0x10000000
+#define MDSS_PP_SPLIT_RIGHT_ONLY	0x20000000
+#define MDSS_PP_SPLIT_MASK		0x30000000
+
+#define MDSS_MAX_BL_BRIGHTNESS 255
+#define AD_BL_LIN_LEN 256
+#define AD_BL_ATT_LUT_LEN 33
+
+#define MDSS_AD_MODE_AUTO_BL	0x0
+#define MDSS_AD_MODE_AUTO_STR	0x1
+#define MDSS_AD_MODE_TARG_STR	0x3
+#define MDSS_AD_MODE_MAN_STR	0x7
+#define MDSS_AD_MODE_CALIB	0xF
+
+#define MDP_PP_AD_INIT	0x10
+#define MDP_PP_AD_CFG	0x20
+
+struct mdss_ad_init {
+	uint32_t asym_lut[33];
+	uint32_t color_corr_lut[33];
+	uint8_t i_control[2];
+	uint16_t black_lvl;
+	uint16_t white_lvl;
+	uint8_t var;
+	uint8_t limit_ampl;
+	uint8_t i_dither;
+	uint8_t slope_max;
+	uint8_t slope_min;
+	uint8_t dither_ctl;
+	uint8_t format;
+	uint8_t auto_size;
+	uint16_t frame_w;
+	uint16_t frame_h;
+	uint8_t logo_v;
+	uint8_t logo_h;
+	uint32_t alpha;
+	uint32_t alpha_base;
+	uint32_t al_thresh;
+	uint32_t bl_lin_len;
+	uint32_t bl_att_len;
+	uint32_t *bl_lin;
+	uint32_t *bl_lin_inv;
+	uint32_t *bl_att_lut;
+};
+
+#define MDSS_AD_BL_CTRL_MODE_EN 1
+#define MDSS_AD_BL_CTRL_MODE_DIS 0
+struct mdss_ad_cfg {
+	uint32_t mode;
+	uint32_t al_calib_lut[33];
+	uint16_t backlight_min;
+	uint16_t backlight_max;
+	uint16_t backlight_scale;
+	uint16_t amb_light_min;
+	uint16_t filter[2];
+	uint16_t calib[4];
+	uint8_t strength_limit;
+	uint8_t t_filter_recursion;
+	uint16_t stab_itr;
+	uint32_t bl_ctrl_mode;
+};
+
+struct mdss_ad_bl_cfg {
+	uint32_t bl_min_delta;
+	uint32_t bl_low_limit;
+};
+
+/* ops uses standard MDP_PP_* flags */
+struct mdss_ad_init_cfg {
+	uint32_t ops;
+	union {
+		struct mdss_ad_init init;
+		struct mdss_ad_cfg cfg;
+	} params;
+};
+
+/* mode uses MDSS_AD_MODE_* flags */
+struct mdss_ad_input {
+	uint32_t mode;
+	union {
+		uint32_t amb_light;
+		uint32_t strength;
+		uint32_t calib_bl;
+	} in;
+	uint32_t output;
+};
+
+#define MDSS_CALIB_MODE_BL	0x1
+struct mdss_calib_cfg {
+	uint32_t ops;
+	uint32_t calib_mask;
+};
+
+enum {
+	mdp_op_pcc_cfg,
+	mdp_op_csc_cfg,
+	mdp_op_lut_cfg,
+	mdp_op_qseed_cfg,
+	mdp_bl_scale_cfg,
+	mdp_op_pa_cfg,
+	mdp_op_pa_v2_cfg,
+	mdp_op_dither_cfg,
+	mdp_op_gamut_cfg,
+	mdp_op_calib_cfg,
+	mdp_op_ad_cfg,
+	mdp_op_ad_input,
+	mdp_op_calib_mode,
+	mdp_op_calib_buffer,
+	mdp_op_calib_dcm_state,
+	mdp_op_max,
+	mdp_op_pa_dither_cfg,
+	mdp_op_ad_bl_cfg,
+	mdp_op_pp_max = 255,
+};
+#define mdp_op_pa_dither_cfg mdp_op_pa_dither_cfg
+#define mdp_op_pp_max mdp_op_pp_max
+
+#define mdp_op_ad_bl_cfg mdp_op_ad_bl_cfg
+
+enum {
+	WB_FORMAT_NV12,
+	WB_FORMAT_RGB_565,
+	WB_FORMAT_RGB_888,
+	WB_FORMAT_xRGB_8888,
+	WB_FORMAT_ARGB_8888,
+	WB_FORMAT_BGRA_8888,
+	WB_FORMAT_BGRX_8888,
+	WB_FORMAT_ARGB_8888_INPUT_ALPHA /* Need to support */
+};
+
+struct msmfb_mdp_pp {
+	uint32_t op;
+	union {
+		struct mdp_pcc_cfg_data pcc_cfg_data;
+		struct mdp_csc_cfg_data csc_cfg_data;
+		struct mdp_lut_cfg_data lut_cfg_data;
+		struct mdp_qseed_cfg_data qseed_cfg_data;
+		struct mdp_bl_scale_data bl_scale_data;
+		struct mdp_pa_cfg_data pa_cfg_data;
+		struct mdp_pa_v2_cfg_data pa_v2_cfg_data;
+		struct mdp_dither_cfg_data dither_cfg_data;
+		struct mdp_gamut_cfg_data gamut_cfg_data;
+		struct mdp_calib_config_data calib_cfg;
+		struct mdss_ad_init_cfg ad_init_cfg;
+		struct mdss_calib_cfg mdss_calib_cfg;
+		struct mdss_ad_input ad_input;
+		struct mdp_calib_config_buffer calib_buffer;
+		struct mdp_calib_dcm_state calib_dcm;
+		struct mdss_ad_bl_cfg ad_bl_cfg;
+	} data;
+};
+
+#define FB_METADATA_VIDEO_INFO_CODE_SUPPORT 1
+enum {
+	metadata_op_none,
+	metadata_op_base_blend,
+	metadata_op_frame_rate,
+	metadata_op_vic,
+	metadata_op_wb_format,
+	metadata_op_wb_secure,
+	metadata_op_get_caps,
+	metadata_op_crc,
+	metadata_op_get_ion_fd,
+	metadata_op_max
+};
+
+struct mdp_blend_cfg {
+	uint32_t is_premultiplied;
+};
+
+struct mdp_mixer_cfg {
+	uint32_t writeback_format;
+	uint32_t alpha;
+};
+
+struct mdss_hw_caps {
+	uint32_t mdp_rev;
+	uint8_t rgb_pipes;
+	uint8_t vig_pipes;
+	uint8_t dma_pipes;
+	uint8_t max_smp_cnt;
+	uint8_t smp_per_pipe;
+	uint32_t features;
+};
+
+struct msmfb_metadata {
+	uint32_t op;
+	uint32_t flags;
+	union {
+		struct mdp_misr misr_request;
+		struct mdp_blend_cfg blend_cfg;
+		struct mdp_mixer_cfg mixer_cfg;
+		uint32_t panel_frame_rate;
+		uint32_t video_info_code;
+		struct mdss_hw_caps caps;
+		uint8_t secure_en;
+		int fbmem_ionfd;
+	} data;
+};
+
+#define MDP_MAX_FENCE_FD	32
+#define MDP_BUF_SYNC_FLAG_WAIT	1
+#define MDP_BUF_SYNC_FLAG_RETIRE_FENCE	0x10
+
+struct mdp_buf_sync {
+	uint32_t flags;
+	uint32_t acq_fen_fd_cnt;
+	uint32_t session_id;
+	int *acq_fen_fd;
+	int *rel_fen_fd;
+	int *retire_fen_fd;
+};
+
+struct mdp_async_blit_req_list {
+	struct mdp_buf_sync sync;
+	uint32_t count;
+	struct mdp_blit_req req[];
+};
+
+#define MDP_DISPLAY_COMMIT_OVERLAY	1
+
+struct mdp_display_commit {
+	uint32_t flags;
+	uint32_t wait_for_finish;
+	struct fb_var_screeninfo var;
+	/*
+	 * user needs to follow guidelines as per below rules
+	 * 1. source split is enabled: l_roi = roi and r_roi = 0
+	 * 2. source split is disabled:
+	 *	2.1 split display: l_roi = l_roi and r_roi = r_roi
+	 *	2.2 non split display: l_roi = roi and r_roi = 0
+	 */
+	struct mdp_rect l_roi;
+	struct mdp_rect r_roi;
+};
+
+/**
+ * struct mdp_overlay_list - argument for ioctl MSMFB_OVERLAY_PREPARE
+ * @num_overlays:	Number of overlay layers as part of the frame.
+ * @overlay_list:	Pointer to a list of overlay structures identifying
+ *			the layers as part of the frame
+ * @flags:		Flags can be used to extend behavior.
+ * @processed_overlays:	Output parameter indicating how many pipes were
+ *			successful. If there are no errors this number should
+ *			match num_overlays. Otherwise it will indicate the last
+ *			successful index for overlay that couldn't be set.
+ */
+struct mdp_overlay_list {
+	uint32_t num_overlays;
+	struct mdp_overlay **overlay_list;
+	uint32_t flags;
+	uint32_t processed_overlays;
+};
+
+struct mdp_page_protection {
+	uint32_t page_protection;
+};
+
+
+struct mdp_mixer_info {
+	int pndx;
+	int pnum;
+	int ptype;
+	int mixer_num;
+	int z_order;
+};
+
+#define MAX_PIPE_PER_MIXER  7
+
+struct msmfb_mixer_info_req {
+	int mixer_num;
+	int cnt;
+	struct mdp_mixer_info info[MAX_PIPE_PER_MIXER];
+};
+
+enum {
+	DISPLAY_SUBSYSTEM_ID,
+	ROTATOR_SUBSYSTEM_ID,
+};
+
+enum {
+	MDP_IOMMU_DOMAIN_CP,
+	MDP_IOMMU_DOMAIN_NS,
+};
+
+enum {
+	MDP_WRITEBACK_MIRROR_OFF,
+	MDP_WRITEBACK_MIRROR_ON,
+	MDP_WRITEBACK_MIRROR_PAUSE,
+	MDP_WRITEBACK_MIRROR_RESUME,
+};
+
+/*
+ * The enum values are continued below as preprocessor macro definitions
+ */
+enum mdp_color_space {
+	MDP_CSC_ITU_R_601,
+	MDP_CSC_ITU_R_601_FR,
+	MDP_CSC_ITU_R_709,
+};
+
+/*
+ * These definitions are a continuation of the mdp_color_space enum above
+ */
+#define MDP_CSC_ITU_R_2020	(MDP_CSC_ITU_R_709 + 1)
+#define MDP_CSC_ITU_R_2020_FR	(MDP_CSC_ITU_R_2020 + 1)
+
+enum {
+	mdp_igc_v1_7 = 1,
+	mdp_igc_vmax,
+	mdp_hist_lut_v1_7,
+	mdp_hist_lut_vmax,
+	mdp_pgc_v1_7,
+	mdp_pgc_vmax,
+	mdp_dither_v1_7,
+	mdp_dither_vmax,
+	mdp_gamut_v1_7,
+	mdp_gamut_vmax,
+	mdp_pa_v1_7,
+	mdp_pa_vmax,
+	mdp_pcc_v1_7,
+	mdp_pcc_vmax,
+	mdp_pp_legacy,
+	mdp_dither_pa_v1_7,
+	mdp_igc_v3,
+	mdp_pp_unknown = 255
+};
+
+#define mdp_dither_pa_v1_7 mdp_dither_pa_v1_7
+#define mdp_pp_unknown mdp_pp_unknown
+#define mdp_igc_v3 mdp_igc_v3
+
+/* PP Features */
+enum {
+	IGC = 1,
+	PCC,
+	GC,
+	PA,
+	GAMUT,
+	DITHER,
+	QSEED,
+	HIST_LUT,
+	HIST,
+	PP_FEATURE_MAX,
+	PA_DITHER,
+	PP_MAX_FEATURES = 25,
+};
+
+#define PA_DITHER PA_DITHER
+#define PP_MAX_FEATURES PP_MAX_FEATURES
+
+struct mdp_pp_feature_version {
+	uint32_t pp_feature;
+	uint32_t version_info;
+};
+#endif /*_UAPI_MSM_MDP_H_*/
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_pft.h	2019-01-22 16:16:28.567292264 +0100
@@ -0,0 +1,134 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_PFT_H_
+#define MSM_PFT_H_
+
+#include <linux/types.h>
+
+/**
+ *  enum pft_command_opcode - PFT driver command ID
+ *
+ *  @PFT_CMD_OPCODE_SET_STATE -
+ *      command ID to set PFT driver state
+ *  @PFT_CMD_OPCODE_UPDATE_REG_APP_UID -
+ *      command ID to update the list of registered application
+ *      UID
+ *  @PFT_CMD_OPCODE_PERFORM_IN_PLACE_FILE_ENC -
+ *      command ID to perfrom in-place file encryption
+ */
+enum pft_command_opcode {
+	PFT_CMD_OPCODE_SET_STATE,
+	PFT_CMD_OPCODE_UPDATE_REG_APP_UID,
+	PFT_CMD_OPCODE_PERFORM_IN_PLACE_FILE_ENC,
+	/* */
+	PFT_CMD_OPCODE_MAX_COMMAND_INDEX
+};
+
+/**
+ * enum pft_state - PFT driver operational states
+ *
+ * @PFT_STATE_DEACTIVATED - driver is deativated.
+ * @PFT_STATE_DEACTIVATING - driver is in the process of being deativated.
+ * @PFT_STATE_KEY_REMOVED - driver is active but no encryption key is loaded.
+ * @PFT_STATE_REMOVING_KEY - driver is active, but the encryption key is being
+ *      removed.
+ * @PFT_STATE_KEY_LOADED - driver is active, and the encryption key is loaded
+ *      to encryption block, hence registered apps can perform file operations
+ *      on encrypted files.
+ */
+enum pft_state {
+	PFT_STATE_DEACTIVATED,
+	PFT_STATE_DEACTIVATING,
+	PFT_STATE_KEY_REMOVED,
+	PFT_STATE_REMOVING_KEY,
+	PFT_STATE_KEY_LOADED,
+	/* Internal */
+	PFT_STATE_MAX_INDEX
+};
+
+/**
+ * enum pft_command_response_code - PFT response on the previous
+ * command
+ *
+ * @PFT_CMD_RESP_SUCCESS - The command was properly processed
+ *      without an error.
+ * @PFT_CMD_RESP_GENERAL_ERROR -
+ *      Indicates an error that cannot be better described by a
+ *      more specific errors below.
+ * @PFT_CMD_RESP_INVALID_COMMAND - Invalid or unsupported
+ *      command id.
+ * @PFT_CMD_RESP_INVALID_CMD_PARAMS - Invalid command
+ *	parameters.
+ * @PFT_CMD_RESP_INVALID_STATE - Invalid state
+ * @PFT_CMD_RESP_ALREADY_IN_STATE - Used to indicates that
+ *      the new state is equal to the existing one.
+ * @PFT_CMD_RESP_INPLACE_FILE_IS_OPEN - Used to indicates
+ *      that the file that should be encrypted is already open
+ *      and can be encrypted.
+ * @PFT_CMD_RESP_ENT_FILES_CLOSING_FAILURE
+ *	Indicates about failure of the PFT to close Enterprise files
+ * @PFT_CMD_RESP_MAX_INDEX
+ */
+enum pft_command_response_code {
+	PFT_CMD_RESP_SUCCESS,
+	PFT_CMD_RESP_GENERAL_ERROR,
+	PFT_CMD_RESP_INVALID_COMMAND,
+	PFT_CMD_RESP_INVALID_CMD_PARAMS,
+	PFT_CMD_RESP_INVALID_STATE,
+	PFT_CMD_RESP_ALREADY_IN_STATE,
+	PFT_CMD_RESP_INPLACE_FILE_IS_OPEN,
+	PFT_CMD_RESP_ENT_FILES_CLOSING_FAILURE,
+	/* Internal */
+	PFT_CMD_RESP_MAX_INDEX
+};
+
+/**
+ * struct pft_command_response - response structure
+ *
+ * @command_id - see enum pft_command_response_code
+ * @error_codee - see enum pft_command_response_code
+ */
+struct pft_command_response {
+	__u32 command_id;
+	__u32 error_code;
+};
+
+/**
+ * struct pft_command - pft command
+ *
+ * @opcode - see enum pft_command_opcode.
+ * @set_state.state - see enum pft_state.
+ * @update_app_list.count - number of items in the
+ *      registered applications list.
+ * @update_app_list.table - registered applications array
+ * @preform_in_place_file_enc.file_descriptor - file descriptor
+ *      of the opened file to be in-placed encrypted.
+ */
+struct pft_command {
+	__u32 opcode;
+	union {
+		struct {
+			/* @see pft_state */
+			__u32 state;
+		} set_state;
+		struct {
+			__u32 items_count; /* number of items */
+			uid_t table[0]; /* array of UIDs */
+		} update_app_list;
+		struct {
+			__u32 file_descriptor;
+		} preform_in_place_file_enc;
+	};
+};
+
+#endif /* MSM_PFT_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_rmnet.h	2019-01-22 16:16:28.567292264 +0100
@@ -0,0 +1,156 @@
+#ifndef _UAPI_MSM_RMNET_H_
+#define _UAPI_MSM_RMNET_H_
+
+/* Bitmap macros for RmNET driver operation mode. */
+#define RMNET_MODE_NONE     (0x00)
+#define RMNET_MODE_LLP_ETH  (0x01)
+#define RMNET_MODE_LLP_IP   (0x02)
+#define RMNET_MODE_QOS      (0x04)
+#define RMNET_MODE_MASK     (RMNET_MODE_LLP_ETH | \
+			     RMNET_MODE_LLP_IP  | \
+			     RMNET_MODE_QOS)
+
+#define RMNET_IS_MODE_QOS(mode)  \
+	((mode & RMNET_MODE_QOS) == RMNET_MODE_QOS)
+#define RMNET_IS_MODE_IP(mode)   \
+	((mode & RMNET_MODE_LLP_IP) == RMNET_MODE_LLP_IP)
+
+/* IOCTL command enum
+ * Values chosen to not conflict with other drivers in the ecosystem */
+enum rmnet_ioctl_cmds_e {
+	RMNET_IOCTL_SET_LLP_ETHERNET = 0x000089F1, /* Set Ethernet protocol  */
+	RMNET_IOCTL_SET_LLP_IP       = 0x000089F2, /* Set RAWIP protocol     */
+	RMNET_IOCTL_GET_LLP          = 0x000089F3, /* Get link protocol      */
+	RMNET_IOCTL_SET_QOS_ENABLE   = 0x000089F4, /* Set QoS header enabled */
+	RMNET_IOCTL_SET_QOS_DISABLE  = 0x000089F5, /* Set QoS header disabled*/
+	RMNET_IOCTL_GET_QOS          = 0x000089F6, /* Get QoS header state   */
+	RMNET_IOCTL_GET_OPMODE       = 0x000089F7, /* Get operation mode     */
+	RMNET_IOCTL_OPEN             = 0x000089F8, /* Open transport port    */
+	RMNET_IOCTL_CLOSE            = 0x000089F9, /* Close transport port   */
+	RMNET_IOCTL_FLOW_ENABLE      = 0x000089FA, /* Flow enable            */
+	RMNET_IOCTL_FLOW_DISABLE     = 0x000089FB, /* Flow disable           */
+	RMNET_IOCTL_FLOW_SET_HNDL    = 0x000089FC, /* Set flow handle        */
+	RMNET_IOCTL_EXTENDED         = 0x000089FD, /* Extended IOCTLs        */
+	RMNET_IOCTL_MAX
+};
+
+enum rmnet_ioctl_extended_cmds_e {
+/* RmNet Data Required IOCTLs */
+	RMNET_IOCTL_GET_SUPPORTED_FEATURES     = 0x0000,   /* Get features    */
+	RMNET_IOCTL_SET_MRU                    = 0x0001,   /* Set MRU         */
+	RMNET_IOCTL_GET_MRU                    = 0x0002,   /* Get MRU         */
+	RMNET_IOCTL_GET_EPID                   = 0x0003,   /* Get endpoint ID */
+	RMNET_IOCTL_GET_DRIVER_NAME            = 0x0004,   /* Get driver name */
+	RMNET_IOCTL_ADD_MUX_CHANNEL            = 0x0005,   /* Add MUX ID      */
+	RMNET_IOCTL_SET_EGRESS_DATA_FORMAT     = 0x0006,   /* Set EDF         */
+	RMNET_IOCTL_SET_INGRESS_DATA_FORMAT    = 0x0007,   /* Set IDF         */
+	RMNET_IOCTL_SET_AGGREGATION_COUNT      = 0x0008,   /* Set agg count   */
+	RMNET_IOCTL_GET_AGGREGATION_COUNT      = 0x0009,   /* Get agg count   */
+	RMNET_IOCTL_SET_AGGREGATION_SIZE       = 0x000A,   /* Set agg size    */
+	RMNET_IOCTL_GET_AGGREGATION_SIZE       = 0x000B,   /* Get agg size    */
+	RMNET_IOCTL_FLOW_CONTROL               = 0x000C,   /* Do flow control */
+	RMNET_IOCTL_GET_DFLT_CONTROL_CHANNEL   = 0x000D,   /* For legacy use  */
+	RMNET_IOCTL_GET_HWSW_MAP               = 0x000E,   /* Get HW/SW map   */
+	RMNET_IOCTL_SET_RX_HEADROOM            = 0x000F,   /* RX Headroom     */
+	RMNET_IOCTL_GET_EP_PAIR                = 0x0010,   /* Endpoint pair   */
+	RMNET_IOCTL_SET_QOS_VERSION            = 0x0011,   /* 8/6 byte QoS hdr*/
+	RMNET_IOCTL_GET_QOS_VERSION            = 0x0012,   /* 8/6 byte QoS hdr*/
+	RMNET_IOCTL_GET_SUPPORTED_QOS_MODES    = 0x0013,   /* Get QoS modes   */
+	RMNET_IOCTL_SET_SLEEP_STATE            = 0x0014,   /* Set sleep state */
+	RMNET_IOCTL_SET_XLAT_DEV_INFO          = 0x0015,   /* xlat dev name   */
+	RMNET_IOCTL_DEREGISTER_DEV             = 0x0016,   /* Dereg a net dev */
+	RMNET_IOCTL_GET_SG_SUPPORT             = 0x0017,   /* Query sg support*/
+	RMNET_IOCTL_EXTENDED_MAX               = 0x0018
+};
+
+/* Return values for the RMNET_IOCTL_GET_SUPPORTED_FEATURES IOCTL */
+#define RMNET_IOCTL_FEAT_NOTIFY_MUX_CHANNEL              (1<<0)
+#define RMNET_IOCTL_FEAT_SET_EGRESS_DATA_FORMAT          (1<<1)
+#define RMNET_IOCTL_FEAT_SET_INGRESS_DATA_FORMAT         (1<<2)
+#define RMNET_IOCTL_FEAT_SET_AGGREGATION_COUNT           (1<<3)
+#define RMNET_IOCTL_FEAT_GET_AGGREGATION_COUNT           (1<<4)
+#define RMNET_IOCTL_FEAT_SET_AGGREGATION_SIZE            (1<<5)
+#define RMNET_IOCTL_FEAT_GET_AGGREGATION_SIZE            (1<<6)
+#define RMNET_IOCTL_FEAT_FLOW_CONTROL                    (1<<7)
+#define RMNET_IOCTL_FEAT_GET_DFLT_CONTROL_CHANNEL        (1<<8)
+#define RMNET_IOCTL_FEAT_GET_HWSW_MAP                    (1<<9)
+
+/* Input values for the RMNET_IOCTL_SET_EGRESS_DATA_FORMAT IOCTL  */
+#define RMNET_IOCTL_EGRESS_FORMAT_MAP                  (1<<1)
+#define RMNET_IOCTL_EGRESS_FORMAT_AGGREGATION          (1<<2)
+#define RMNET_IOCTL_EGRESS_FORMAT_MUXING               (1<<3)
+#define RMNET_IOCTL_EGRESS_FORMAT_CHECKSUM             (1<<4)
+
+/* Input values for the RMNET_IOCTL_SET_INGRESS_DATA_FORMAT IOCTL */
+#define RMNET_IOCTL_INGRESS_FORMAT_MAP                 (1<<1)
+#define RMNET_IOCTL_INGRESS_FORMAT_DEAGGREGATION       (1<<2)
+#define RMNET_IOCTL_INGRESS_FORMAT_DEMUXING            (1<<3)
+#define RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM            (1<<4)
+#define RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA            (1<<5)
+
+/* User space may not have this defined. */
+#ifndef IFNAMSIZ
+#define IFNAMSIZ 16
+#endif
+
+struct rmnet_ioctl_extended_s {
+	uint32_t   extended_ioctl;
+	union {
+		uint32_t data; /* Generic data field for most extended IOCTLs */
+
+		/* Return values for
+		 *    RMNET_IOCTL_GET_DRIVER_NAME
+		 *    RMNET_IOCTL_GET_DFLT_CONTROL_CHANNEL */
+		int8_t if_name[IFNAMSIZ];
+
+		/* Input values for the RMNET_IOCTL_ADD_MUX_CHANNEL IOCTL */
+		struct {
+			uint32_t  mux_id;
+			int8_t    vchannel_name[IFNAMSIZ];
+		} rmnet_mux_val;
+
+		/* Input values for the RMNET_IOCTL_FLOW_CONTROL IOCTL */
+		struct {
+			uint8_t   flow_mode;
+			uint8_t   mux_id;
+		} flow_control_prop;
+
+		/* Return values for RMNET_IOCTL_GET_EP_PAIR */
+		struct {
+			uint32_t   consumer_pipe_num;
+			uint32_t   producer_pipe_num;
+		} ipa_ep_pair;
+
+		struct {
+			uint32_t __data; /* Placeholder for legacy data*/
+			uint32_t agg_size;
+			uint32_t agg_count;
+		} ingress_format;
+	} u;
+};
+
+struct rmnet_ioctl_data_s {
+	union {
+		uint32_t	operation_mode;
+		uint32_t	tcm_handle;
+	} u;
+};
+
+#define RMNET_IOCTL_QOS_MODE_6   (1<<0)
+#define RMNET_IOCTL_QOS_MODE_8   (1<<1)
+
+/* QMI QoS header definition */
+#define QMI_QOS_HDR_S  __attribute((__packed__)) qmi_qos_hdr_s
+struct QMI_QOS_HDR_S {
+	unsigned char    version;
+	unsigned char    flags;
+	uint32_t         flow_id;
+};
+
+/* QMI QoS 8-byte header. */
+struct qmi_qos_hdr8_s {
+	struct QMI_QOS_HDR_S   hdr;
+	uint8_t                reserved[2];
+} __attribute((__packed__));
+
+#endif /* _UAPI_MSM_RMNET_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_rotator.h	2019-01-22 16:16:28.567292264 +0100
@@ -0,0 +1,62 @@
+#ifndef _UAPI__MSM_ROTATOR_H__
+#define _UAPI__MSM_ROTATOR_H__
+
+#include <linux/types.h>
+#include <linux/msm_mdp.h>
+
+#define MSM_ROTATOR_IOCTL_MAGIC 'R'
+
+#define MSM_ROTATOR_IOCTL_START   \
+		_IOWR(MSM_ROTATOR_IOCTL_MAGIC, 1, struct msm_rotator_img_info)
+#define MSM_ROTATOR_IOCTL_ROTATE   \
+		_IOW(MSM_ROTATOR_IOCTL_MAGIC, 2, struct msm_rotator_data_info)
+#define MSM_ROTATOR_IOCTL_FINISH   \
+		_IOW(MSM_ROTATOR_IOCTL_MAGIC, 3, int)
+
+#define ROTATOR_VERSION_01	0xA5B4C301
+
+enum rotator_clk_type {
+	ROTATOR_CORE_CLK,
+	ROTATOR_PCLK,
+	ROTATOR_IMEM_CLK
+};
+
+struct msm_rotator_img_info {
+	unsigned int session_id;
+	struct msmfb_img  src;
+	struct msmfb_img  dst;
+	struct mdp_rect src_rect;
+	unsigned int    dst_x;
+	unsigned int    dst_y;
+	unsigned char   rotations;
+	int enable;
+	unsigned int	downscale_ratio;
+	unsigned int secure;
+};
+
+struct msm_rotator_data_info {
+	int session_id;
+	struct msmfb_data src;
+	struct msmfb_data dst;
+	unsigned int version_key;
+	struct msmfb_data src_chroma;
+	struct msmfb_data dst_chroma;
+};
+
+struct msm_rot_clocks {
+	const char *clk_name;
+	enum rotator_clk_type clk_type;
+	unsigned int clk_rate;
+};
+
+struct msm_rotator_platform_data {
+	unsigned int number_of_clocks;
+	unsigned int hardware_version_number;
+	struct msm_rot_clocks *rotator_clks;
+#ifdef CONFIG_MSM_BUS_SCALING
+	struct msm_bus_scale_pdata *bus_scale_table;
+#endif
+	char rot_iommu_split_domain;
+};
+#endif
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_thermal_ioctl.h	2019-01-22 16:16:28.567292264 +0100
@@ -0,0 +1,92 @@
+#ifndef _MSM_THERMAL_IOCTL_H
+#define _MSM_THERMAL_IOCTL_H
+
+#include <linux/ioctl.h>
+
+#define MSM_THERMAL_IOCTL_NAME "msm_thermal_query"
+#define MSM_IOCTL_FREQ_SIZE 16
+
+struct __attribute__((__packed__)) cpu_freq_arg {
+	uint32_t cpu_num;
+	uint32_t freq_req;
+};
+
+struct __attribute__((__packed__)) clock_plan_arg {
+	uint32_t cluster_num;
+	/*
+	** A value of zero for freq_table_len, will fetch the length of the
+	** cluster frequency table. A non-zero value will fetch the frequency
+	** table contents.
+	*/
+	uint32_t freq_table_len;
+	/*
+	** For clusters with frequency table length greater than
+	** MSM_IOCTL_FREQ_SIZE, the frequency table is fetched from kernel
+	** in multiple sets or iterations. The set_idx variable,
+	** indicates, which set/part of frequency table the user is requesting.
+	** The set index value starts from zero. A set index value of 'Z',
+	** will fetch MSM_IOCTL_FREQ_SIZE or maximum available number of
+	** frequency values (if it is less than MSM_IOCTL_FREQ_SIZE)
+	** from the frequency table, starting from the index
+	** (Z * MSM_IOCTL_FREQ_SIZE).
+	** For example, in a device supporting 19 different frequencies, a set
+	** index value of 0 will fetch the first 16 (MSM_IOCTL_FREQ_SIZE)
+	** frequencies starting from the index 0 and a set value of 1 will fetch
+	** the remaining 3 frequencies starting from the index 16.
+	** A successful get, will populate the freq_table_len with the
+	** number of frequency table entries fetched.
+	*/
+	uint32_t set_idx;
+	unsigned int freq_table[MSM_IOCTL_FREQ_SIZE];
+};
+
+struct __attribute__((__packed__)) voltage_plan_arg {
+	uint32_t cluster_num;
+	uint32_t voltage_table_len;
+	uint32_t set_idx;
+	uint32_t voltage_table[MSM_IOCTL_FREQ_SIZE];
+};
+
+struct __attribute__((__packed__)) msm_thermal_ioctl {
+	uint32_t size;
+	union {
+		struct cpu_freq_arg cpu_freq;
+		struct clock_plan_arg clock_freq;
+		struct voltage_plan_arg voltage;
+	};
+};
+
+enum {
+	/*Set CPU Frequency*/
+	MSM_SET_CPU_MAX_FREQ = 0x00,
+	MSM_SET_CPU_MIN_FREQ = 0x01,
+	/*Set cluster frequency*/
+	MSM_SET_CLUSTER_MAX_FREQ = 0x02,
+	MSM_SET_CLUSTER_MIN_FREQ = 0x03,
+	/*Get cluster frequency plan*/
+	MSM_GET_CLUSTER_FREQ_PLAN = 0x04,
+	/*Get cluster voltage plan */
+	MSM_GET_CLUSTER_VOLTAGE_PLAN = 0x05,
+	MSM_CMD_MAX_NR,
+};
+
+#define MSM_THERMAL_MAGIC_NUM 0xCA /*Unique magic number*/
+
+#define MSM_THERMAL_SET_CPU_MAX_FREQUENCY _IOW(MSM_THERMAL_MAGIC_NUM,\
+		MSM_SET_CPU_MAX_FREQ, struct msm_thermal_ioctl)
+
+#define MSM_THERMAL_SET_CPU_MIN_FREQUENCY _IOW(MSM_THERMAL_MAGIC_NUM,\
+		MSM_SET_CPU_MIN_FREQ, struct msm_thermal_ioctl)
+
+#define MSM_THERMAL_SET_CLUSTER_MAX_FREQUENCY _IOW(MSM_THERMAL_MAGIC_NUM,\
+		MSM_SET_CLUSTER_MAX_FREQ, struct msm_thermal_ioctl)
+
+#define MSM_THERMAL_SET_CLUSTER_MIN_FREQUENCY _IOW(MSM_THERMAL_MAGIC_NUM,\
+		MSM_SET_CLUSTER_MIN_FREQ, struct msm_thermal_ioctl)
+
+#define MSM_THERMAL_GET_CLUSTER_FREQUENCY_PLAN _IOR(MSM_THERMAL_MAGIC_NUM,\
+		MSM_GET_CLUSTER_FREQ_PLAN, struct msm_thermal_ioctl)
+
+#define MSM_THERMAL_GET_CLUSTER_VOLTAGE_PLAN _IOR(MSM_THERMAL_MAGIC_NUM,\
+		MSM_GET_CLUSTER_VOLTAGE_PLAN, struct msm_thermal_ioctl)
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_vidc_dec.h	2019-01-22 16:16:28.567292264 +0100
@@ -0,0 +1,612 @@
+#ifndef _UAPI_MSM_VIDC_DEC_H_
+#define _UAPI_MSM_VIDC_DEC_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/* STATUS CODES */
+/* Base value for status codes */
+#define VDEC_S_BASE	0x40000000
+/* Success */
+#define VDEC_S_SUCCESS	(VDEC_S_BASE)
+/* General failure */
+#define VDEC_S_EFAIL	(VDEC_S_BASE + 1)
+/* Fatal irrecoverable  failure. Need to  tear down session. */
+#define VDEC_S_EFATAL   (VDEC_S_BASE + 2)
+/* Error detected in the passed  parameters */
+#define VDEC_S_EBADPARAM	(VDEC_S_BASE + 3)
+/* Command called in invalid  state. */
+#define VDEC_S_EINVALSTATE	(VDEC_S_BASE + 4)
+ /* Insufficient OS  resources - thread, memory etc. */
+#define VDEC_S_ENOSWRES	(VDEC_S_BASE + 5)
+ /* Insufficient HW resources -  core capacity  maxed  out. */
+#define VDEC_S_ENOHWRES	(VDEC_S_BASE + 6)
+/* Invalid command  called */
+#define VDEC_S_EINVALCMD	(VDEC_S_BASE + 7)
+/* Command timeout. */
+#define VDEC_S_ETIMEOUT	(VDEC_S_BASE + 8)
+/* Pre-requirement is  not met for API. */
+#define VDEC_S_ENOPREREQ	(VDEC_S_BASE + 9)
+/* Command queue is full. */
+#define VDEC_S_ECMDQFULL	(VDEC_S_BASE + 10)
+/* Command is not supported  by this driver */
+#define VDEC_S_ENOTSUPP	(VDEC_S_BASE + 11)
+/* Command is not implemented by thedriver. */
+#define VDEC_S_ENOTIMPL	(VDEC_S_BASE + 12)
+/* Command is not implemented by the driver.  */
+#define VDEC_S_BUSY	(VDEC_S_BASE + 13)
+#define VDEC_S_INPUT_BITSTREAM_ERR (VDEC_S_BASE + 14)
+
+#define VDEC_INTF_VER	1
+#define VDEC_MSG_BASE	0x0000000
+/* Codes to identify asynchronous message responses and events that driver
+  wants to communicate to the app.*/
+#define VDEC_MSG_INVALID	(VDEC_MSG_BASE + 0)
+#define VDEC_MSG_RESP_INPUT_BUFFER_DONE	(VDEC_MSG_BASE + 1)
+#define VDEC_MSG_RESP_OUTPUT_BUFFER_DONE	(VDEC_MSG_BASE + 2)
+#define VDEC_MSG_RESP_INPUT_FLUSHED	(VDEC_MSG_BASE + 3)
+#define VDEC_MSG_RESP_OUTPUT_FLUSHED	(VDEC_MSG_BASE + 4)
+#define VDEC_MSG_RESP_FLUSH_INPUT_DONE	(VDEC_MSG_BASE + 5)
+#define VDEC_MSG_RESP_FLUSH_OUTPUT_DONE	(VDEC_MSG_BASE + 6)
+#define VDEC_MSG_RESP_START_DONE	(VDEC_MSG_BASE + 7)
+#define VDEC_MSG_RESP_STOP_DONE	(VDEC_MSG_BASE + 8)
+#define VDEC_MSG_RESP_PAUSE_DONE	(VDEC_MSG_BASE + 9)
+#define VDEC_MSG_RESP_RESUME_DONE	(VDEC_MSG_BASE + 10)
+#define VDEC_MSG_RESP_RESOURCE_LOADED	(VDEC_MSG_BASE + 11)
+#define VDEC_EVT_RESOURCES_LOST	(VDEC_MSG_BASE + 12)
+#define VDEC_MSG_EVT_CONFIG_CHANGED	(VDEC_MSG_BASE + 13)
+#define VDEC_MSG_EVT_HW_ERROR	(VDEC_MSG_BASE + 14)
+#define VDEC_MSG_EVT_INFO_CONFIG_CHANGED	(VDEC_MSG_BASE + 15)
+#define VDEC_MSG_EVT_INFO_FIELD_DROPPED	(VDEC_MSG_BASE + 16)
+#define VDEC_MSG_EVT_HW_OVERLOAD	(VDEC_MSG_BASE + 17)
+#define VDEC_MSG_EVT_MAX_CLIENTS	(VDEC_MSG_BASE + 18)
+#define VDEC_MSG_EVT_HW_UNSUPPORTED	(VDEC_MSG_BASE + 19)
+
+/*Buffer flags bits masks.*/
+#define VDEC_BUFFERFLAG_EOS	0x00000001
+#define VDEC_BUFFERFLAG_DECODEONLY	0x00000004
+#define VDEC_BUFFERFLAG_DATACORRUPT	0x00000008
+#define VDEC_BUFFERFLAG_ENDOFFRAME	0x00000010
+#define VDEC_BUFFERFLAG_SYNCFRAME	0x00000020
+#define VDEC_BUFFERFLAG_EXTRADATA	0x00000040
+#define VDEC_BUFFERFLAG_CODECCONFIG	0x00000080
+
+/*Post processing flags bit masks*/
+#define VDEC_EXTRADATA_NONE 0x001
+#define VDEC_EXTRADATA_QP 0x004
+#define VDEC_EXTRADATA_MB_ERROR_MAP 0x008
+#define VDEC_EXTRADATA_SEI 0x010
+#define VDEC_EXTRADATA_VUI 0x020
+#define VDEC_EXTRADATA_VC1 0x040
+
+#define VDEC_EXTRADATA_EXT_DATA          0x0800
+#define VDEC_EXTRADATA_USER_DATA         0x1000
+#define VDEC_EXTRADATA_EXT_BUFFER        0x2000
+
+#define VDEC_CMDBASE	0x800
+#define VDEC_CMD_SET_INTF_VERSION	(VDEC_CMDBASE)
+
+#define VDEC_IOCTL_MAGIC 'v'
+
+struct vdec_ioctl_msg {
+	void __user *in;
+	void __user *out;
+};
+
+/* CMD params: InputParam:enum vdec_codec
+   OutputParam: struct vdec_profile_level*/
+#define VDEC_IOCTL_GET_PROFILE_LEVEL_SUPPORTED \
+	_IOWR(VDEC_IOCTL_MAGIC, 0, struct vdec_ioctl_msg)
+
+/*CMD params:InputParam: NULL
+  OutputParam: uint32_t(bitmask)*/
+#define VDEC_IOCTL_GET_INTERLACE_FORMAT \
+	_IOR(VDEC_IOCTL_MAGIC, 1, struct vdec_ioctl_msg)
+
+/* CMD params: InputParam:  enum vdec_codec
+   OutputParam: struct vdec_profile_level*/
+#define VDEC_IOCTL_GET_CURRENT_PROFILE_LEVEL \
+	_IOWR(VDEC_IOCTL_MAGIC, 2, struct vdec_ioctl_msg)
+
+/*CMD params: SET: InputParam: enum vdec_output_fromat  OutputParam: NULL
+  GET:  InputParam: NULL OutputParam: enum vdec_output_fromat*/
+#define VDEC_IOCTL_SET_OUTPUT_FORMAT \
+	_IOWR(VDEC_IOCTL_MAGIC, 3, struct vdec_ioctl_msg)
+#define VDEC_IOCTL_GET_OUTPUT_FORMAT \
+	_IOWR(VDEC_IOCTL_MAGIC, 4, struct vdec_ioctl_msg)
+
+/*CMD params: SET: InputParam: enum vdec_codec OutputParam: NULL
+  GET: InputParam: NULL OutputParam: enum vdec_codec*/
+#define VDEC_IOCTL_SET_CODEC \
+	_IOW(VDEC_IOCTL_MAGIC, 5, struct vdec_ioctl_msg)
+#define VDEC_IOCTL_GET_CODEC \
+	_IOR(VDEC_IOCTL_MAGIC, 6, struct vdec_ioctl_msg)
+
+/*CMD params: SET: InputParam: struct vdec_picsize outputparam: NULL
+ GET: InputParam: NULL outputparam: struct vdec_picsize*/
+#define VDEC_IOCTL_SET_PICRES \
+	_IOW(VDEC_IOCTL_MAGIC, 7, struct vdec_ioctl_msg)
+#define VDEC_IOCTL_GET_PICRES \
+	_IOR(VDEC_IOCTL_MAGIC, 8, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_EXTRADATA \
+	_IOW(VDEC_IOCTL_MAGIC, 9, struct vdec_ioctl_msg)
+#define VDEC_IOCTL_GET_EXTRADATA \
+	_IOR(VDEC_IOCTL_MAGIC, 10, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_SEQUENCE_HEADER \
+	_IOW(VDEC_IOCTL_MAGIC, 11, struct vdec_ioctl_msg)
+
+/* CMD params: SET: InputParam - vdec_allocatorproperty, OutputParam - NULL
+   GET: InputParam - NULL, OutputParam - vdec_allocatorproperty*/
+#define VDEC_IOCTL_SET_BUFFER_REQ \
+	_IOW(VDEC_IOCTL_MAGIC, 12, struct vdec_ioctl_msg)
+#define VDEC_IOCTL_GET_BUFFER_REQ \
+	_IOR(VDEC_IOCTL_MAGIC, 13, struct vdec_ioctl_msg)
+/* CMD params: InputParam - vdec_buffer, OutputParam - uint8_t** */
+#define VDEC_IOCTL_ALLOCATE_BUFFER \
+	_IOWR(VDEC_IOCTL_MAGIC, 14, struct vdec_ioctl_msg)
+/* CMD params: InputParam - uint8_t *, OutputParam - NULL.*/
+#define VDEC_IOCTL_FREE_BUFFER \
+	_IOW(VDEC_IOCTL_MAGIC, 15, struct vdec_ioctl_msg)
+
+/*CMD params: CMD: InputParam - struct vdec_setbuffer_cmd, OutputParam - NULL*/
+#define VDEC_IOCTL_SET_BUFFER \
+	_IOW(VDEC_IOCTL_MAGIC, 16, struct vdec_ioctl_msg)
+
+/* CMD params: InputParam - struct vdec_fillbuffer_cmd, OutputParam - NULL*/
+#define VDEC_IOCTL_FILL_OUTPUT_BUFFER \
+	_IOW(VDEC_IOCTL_MAGIC, 17, struct vdec_ioctl_msg)
+
+/*CMD params: InputParam - struct vdec_frameinfo , OutputParam - NULL*/
+#define VDEC_IOCTL_DECODE_FRAME \
+	_IOW(VDEC_IOCTL_MAGIC, 18, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_LOAD_RESOURCES _IO(VDEC_IOCTL_MAGIC, 19)
+#define VDEC_IOCTL_CMD_START _IO(VDEC_IOCTL_MAGIC, 20)
+#define VDEC_IOCTL_CMD_STOP _IO(VDEC_IOCTL_MAGIC, 21)
+#define VDEC_IOCTL_CMD_PAUSE _IO(VDEC_IOCTL_MAGIC, 22)
+#define VDEC_IOCTL_CMD_RESUME _IO(VDEC_IOCTL_MAGIC, 23)
+
+/*CMD params: InputParam - enum vdec_bufferflush , OutputParam - NULL */
+#define VDEC_IOCTL_CMD_FLUSH _IOW(VDEC_IOCTL_MAGIC, 24, struct vdec_ioctl_msg)
+
+/* ========================================================
+ * IOCTL for getting asynchronous notification from driver
+ * ========================================================*/
+
+/*IOCTL params: InputParam - NULL, OutputParam - struct vdec_msginfo*/
+#define VDEC_IOCTL_GET_NEXT_MSG \
+	_IOR(VDEC_IOCTL_MAGIC, 25, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_STOP_NEXT_MSG _IO(VDEC_IOCTL_MAGIC, 26)
+
+#define VDEC_IOCTL_GET_NUMBER_INSTANCES \
+	_IOR(VDEC_IOCTL_MAGIC, 27, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_PICTURE_ORDER \
+	_IOW(VDEC_IOCTL_MAGIC, 28, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_FRAME_RATE \
+	_IOW(VDEC_IOCTL_MAGIC, 29, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_H264_MV_BUFFER \
+	_IOW(VDEC_IOCTL_MAGIC, 30, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_FREE_H264_MV_BUFFER \
+	_IOW(VDEC_IOCTL_MAGIC, 31, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_GET_MV_BUFFER_SIZE  \
+	_IOR(VDEC_IOCTL_MAGIC, 32, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_IDR_ONLY_DECODING \
+	_IO(VDEC_IOCTL_MAGIC, 33)
+
+#define VDEC_IOCTL_SET_CONT_ON_RECONFIG  \
+	_IO(VDEC_IOCTL_MAGIC, 34)
+
+#define VDEC_IOCTL_SET_DISABLE_DMX \
+	_IOW(VDEC_IOCTL_MAGIC, 35, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_GET_DISABLE_DMX \
+	_IOR(VDEC_IOCTL_MAGIC, 36, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_GET_DISABLE_DMX_SUPPORT \
+	_IOR(VDEC_IOCTL_MAGIC, 37, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_PERF_CLK \
+	_IOR(VDEC_IOCTL_MAGIC, 38, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_META_BUFFERS \
+	_IOW(VDEC_IOCTL_MAGIC, 39, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_FREE_META_BUFFERS \
+	_IO(VDEC_IOCTL_MAGIC, 40)
+
+enum vdec_picture {
+	PICTURE_TYPE_I,
+	PICTURE_TYPE_P,
+	PICTURE_TYPE_B,
+	PICTURE_TYPE_BI,
+	PICTURE_TYPE_SKIP,
+	PICTURE_TYPE_IDR,
+	PICTURE_TYPE_UNKNOWN
+};
+
+enum vdec_buffer {
+	VDEC_BUFFER_TYPE_INPUT,
+	VDEC_BUFFER_TYPE_OUTPUT
+};
+
+struct vdec_allocatorproperty {
+	enum vdec_buffer buffer_type;
+	uint32_t mincount;
+	uint32_t maxcount;
+	uint32_t actualcount;
+	size_t buffer_size;
+	uint32_t alignment;
+	uint32_t buf_poolid;
+	size_t meta_buffer_size;
+};
+
+struct vdec_bufferpayload {
+	void __user *bufferaddr;
+	size_t buffer_len;
+	int pmem_fd;
+	size_t offset;
+	size_t mmaped_size;
+};
+
+struct vdec_setbuffer_cmd {
+	enum vdec_buffer buffer_type;
+	struct vdec_bufferpayload buffer;
+};
+
+struct vdec_fillbuffer_cmd {
+	struct vdec_bufferpayload buffer;
+	void *client_data;
+};
+
+enum vdec_bufferflush {
+	VDEC_FLUSH_TYPE_INPUT,
+	VDEC_FLUSH_TYPE_OUTPUT,
+	VDEC_FLUSH_TYPE_ALL
+};
+
+enum vdec_codec {
+	VDEC_CODECTYPE_H264 = 0x1,
+	VDEC_CODECTYPE_H263 = 0x2,
+	VDEC_CODECTYPE_MPEG4 = 0x3,
+	VDEC_CODECTYPE_DIVX_3 = 0x4,
+	VDEC_CODECTYPE_DIVX_4 = 0x5,
+	VDEC_CODECTYPE_DIVX_5 = 0x6,
+	VDEC_CODECTYPE_DIVX_6 = 0x7,
+	VDEC_CODECTYPE_XVID = 0x8,
+	VDEC_CODECTYPE_MPEG1 = 0x9,
+	VDEC_CODECTYPE_MPEG2 = 0xa,
+	VDEC_CODECTYPE_VC1 = 0xb,
+	VDEC_CODECTYPE_VC1_RCV = 0xc,
+	VDEC_CODECTYPE_HEVC = 0xd,
+	VDEC_CODECTYPE_MVC = 0xe,
+	VDEC_CODECTYPE_VP8 = 0xf,
+	VDEC_CODECTYPE_VP9 = 0x10,
+};
+
+enum vdec_mpeg2_profile {
+	VDEC_MPEG2ProfileSimple = 0x1,
+	VDEC_MPEG2ProfileMain = 0x2,
+	VDEC_MPEG2Profile422 = 0x4,
+	VDEC_MPEG2ProfileSNR = 0x8,
+	VDEC_MPEG2ProfileSpatial = 0x10,
+	VDEC_MPEG2ProfileHigh = 0x20,
+	VDEC_MPEG2ProfileKhronosExtensions = 0x6F000000,
+	VDEC_MPEG2ProfileVendorStartUnused = 0x7F000000,
+	VDEC_MPEG2ProfileMax = 0x7FFFFFFF
+};
+
+enum vdec_mpeg2_level {
+
+	VDEC_MPEG2LevelLL = 0x1,
+	VDEC_MPEG2LevelML = 0x2,
+	VDEC_MPEG2LevelH14 = 0x4,
+	VDEC_MPEG2LevelHL = 0x8,
+	VDEC_MPEG2LevelKhronosExtensions = 0x6F000000,
+	VDEC_MPEG2LevelVendorStartUnused = 0x7F000000,
+	VDEC_MPEG2LevelMax = 0x7FFFFFFF
+};
+
+enum vdec_mpeg4_profile {
+	VDEC_MPEG4ProfileSimple = 0x01,
+	VDEC_MPEG4ProfileSimpleScalable = 0x02,
+	VDEC_MPEG4ProfileCore = 0x04,
+	VDEC_MPEG4ProfileMain = 0x08,
+	VDEC_MPEG4ProfileNbit = 0x10,
+	VDEC_MPEG4ProfileScalableTexture = 0x20,
+	VDEC_MPEG4ProfileSimpleFace = 0x40,
+	VDEC_MPEG4ProfileSimpleFBA = 0x80,
+	VDEC_MPEG4ProfileBasicAnimated = 0x100,
+	VDEC_MPEG4ProfileHybrid = 0x200,
+	VDEC_MPEG4ProfileAdvancedRealTime = 0x400,
+	VDEC_MPEG4ProfileCoreScalable = 0x800,
+	VDEC_MPEG4ProfileAdvancedCoding = 0x1000,
+	VDEC_MPEG4ProfileAdvancedCore = 0x2000,
+	VDEC_MPEG4ProfileAdvancedScalable = 0x4000,
+	VDEC_MPEG4ProfileAdvancedSimple = 0x8000,
+	VDEC_MPEG4ProfileKhronosExtensions = 0x6F000000,
+	VDEC_MPEG4ProfileVendorStartUnused = 0x7F000000,
+	VDEC_MPEG4ProfileMax = 0x7FFFFFFF
+};
+
+enum vdec_mpeg4_level {
+	VDEC_MPEG4Level0 = 0x01,
+	VDEC_MPEG4Level0b = 0x02,
+	VDEC_MPEG4Level1 = 0x04,
+	VDEC_MPEG4Level2 = 0x08,
+	VDEC_MPEG4Level3 = 0x10,
+	VDEC_MPEG4Level4 = 0x20,
+	VDEC_MPEG4Level4a = 0x40,
+	VDEC_MPEG4Level5 = 0x80,
+	VDEC_MPEG4LevelKhronosExtensions = 0x6F000000,
+	VDEC_MPEG4LevelVendorStartUnused = 0x7F000000,
+	VDEC_MPEG4LevelMax = 0x7FFFFFFF
+};
+
+enum vdec_avc_profile {
+	VDEC_AVCProfileBaseline = 0x01,
+	VDEC_AVCProfileMain = 0x02,
+	VDEC_AVCProfileExtended = 0x04,
+	VDEC_AVCProfileHigh = 0x08,
+	VDEC_AVCProfileHigh10 = 0x10,
+	VDEC_AVCProfileHigh422 = 0x20,
+	VDEC_AVCProfileHigh444 = 0x40,
+	VDEC_AVCProfileKhronosExtensions = 0x6F000000,
+	VDEC_AVCProfileVendorStartUnused = 0x7F000000,
+	VDEC_AVCProfileMax = 0x7FFFFFFF
+};
+
+enum vdec_avc_level {
+	VDEC_AVCLevel1 = 0x01,
+	VDEC_AVCLevel1b = 0x02,
+	VDEC_AVCLevel11 = 0x04,
+	VDEC_AVCLevel12 = 0x08,
+	VDEC_AVCLevel13 = 0x10,
+	VDEC_AVCLevel2 = 0x20,
+	VDEC_AVCLevel21 = 0x40,
+	VDEC_AVCLevel22 = 0x80,
+	VDEC_AVCLevel3 = 0x100,
+	VDEC_AVCLevel31 = 0x200,
+	VDEC_AVCLevel32 = 0x400,
+	VDEC_AVCLevel4 = 0x800,
+	VDEC_AVCLevel41 = 0x1000,
+	VDEC_AVCLevel42 = 0x2000,
+	VDEC_AVCLevel5 = 0x4000,
+	VDEC_AVCLevel51 = 0x8000,
+	VDEC_AVCLevelKhronosExtensions = 0x6F000000,
+	VDEC_AVCLevelVendorStartUnused = 0x7F000000,
+	VDEC_AVCLevelMax = 0x7FFFFFFF
+};
+
+enum vdec_divx_profile {
+	VDEC_DIVXProfile_qMobile = 0x01,
+	VDEC_DIVXProfile_Mobile = 0x02,
+	VDEC_DIVXProfile_HD = 0x04,
+	VDEC_DIVXProfile_Handheld = 0x08,
+	VDEC_DIVXProfile_Portable = 0x10,
+	VDEC_DIVXProfile_HomeTheater = 0x20
+};
+
+enum vdec_xvid_profile {
+	VDEC_XVIDProfile_Simple = 0x1,
+	VDEC_XVIDProfile_Advanced_Realtime_Simple = 0x2,
+	VDEC_XVIDProfile_Advanced_Simple = 0x4
+};
+
+enum vdec_xvid_level {
+	VDEC_XVID_LEVEL_S_L0 = 0x1,
+	VDEC_XVID_LEVEL_S_L1 = 0x2,
+	VDEC_XVID_LEVEL_S_L2 = 0x4,
+	VDEC_XVID_LEVEL_S_L3 = 0x8,
+	VDEC_XVID_LEVEL_ARTS_L1 = 0x10,
+	VDEC_XVID_LEVEL_ARTS_L2 = 0x20,
+	VDEC_XVID_LEVEL_ARTS_L3 = 0x40,
+	VDEC_XVID_LEVEL_ARTS_L4 = 0x80,
+	VDEC_XVID_LEVEL_AS_L0 = 0x100,
+	VDEC_XVID_LEVEL_AS_L1 = 0x200,
+	VDEC_XVID_LEVEL_AS_L2 = 0x400,
+	VDEC_XVID_LEVEL_AS_L3 = 0x800,
+	VDEC_XVID_LEVEL_AS_L4 = 0x1000
+};
+
+enum vdec_h263profile {
+	VDEC_H263ProfileBaseline = 0x01,
+	VDEC_H263ProfileH320Coding = 0x02,
+	VDEC_H263ProfileBackwardCompatible = 0x04,
+	VDEC_H263ProfileISWV2 = 0x08,
+	VDEC_H263ProfileISWV3 = 0x10,
+	VDEC_H263ProfileHighCompression = 0x20,
+	VDEC_H263ProfileInternet = 0x40,
+	VDEC_H263ProfileInterlace = 0x80,
+	VDEC_H263ProfileHighLatency = 0x100,
+	VDEC_H263ProfileKhronosExtensions = 0x6F000000,
+	VDEC_H263ProfileVendorStartUnused = 0x7F000000,
+	VDEC_H263ProfileMax = 0x7FFFFFFF
+};
+
+enum vdec_h263level {
+	VDEC_H263Level10 = 0x01,
+	VDEC_H263Level20 = 0x02,
+	VDEC_H263Level30 = 0x04,
+	VDEC_H263Level40 = 0x08,
+	VDEC_H263Level45 = 0x10,
+	VDEC_H263Level50 = 0x20,
+	VDEC_H263Level60 = 0x40,
+	VDEC_H263Level70 = 0x80,
+	VDEC_H263LevelKhronosExtensions = 0x6F000000,
+	VDEC_H263LevelVendorStartUnused = 0x7F000000,
+	VDEC_H263LevelMax = 0x7FFFFFFF
+};
+
+enum vdec_wmv_format {
+	VDEC_WMVFormatUnused = 0x01,
+	VDEC_WMVFormat7 = 0x02,
+	VDEC_WMVFormat8 = 0x04,
+	VDEC_WMVFormat9 = 0x08,
+	VDEC_WMFFormatKhronosExtensions = 0x6F000000,
+	VDEC_WMFFormatVendorStartUnused = 0x7F000000,
+	VDEC_WMVFormatMax = 0x7FFFFFFF
+};
+
+enum vdec_vc1_profile {
+	VDEC_VC1ProfileSimple = 0x1,
+	VDEC_VC1ProfileMain = 0x2,
+	VDEC_VC1ProfileAdvanced = 0x4
+};
+
+enum vdec_vc1_level {
+	VDEC_VC1_LEVEL_S_Low = 0x1,
+	VDEC_VC1_LEVEL_S_Medium = 0x2,
+	VDEC_VC1_LEVEL_M_Low = 0x4,
+	VDEC_VC1_LEVEL_M_Medium = 0x8,
+	VDEC_VC1_LEVEL_M_High = 0x10,
+	VDEC_VC1_LEVEL_A_L0 = 0x20,
+	VDEC_VC1_LEVEL_A_L1 = 0x40,
+	VDEC_VC1_LEVEL_A_L2 = 0x80,
+	VDEC_VC1_LEVEL_A_L3 = 0x100,
+	VDEC_VC1_LEVEL_A_L4 = 0x200
+};
+
+struct vdec_profile_level {
+	uint32_t profiles;
+	uint32_t levels;
+};
+
+enum vdec_interlaced_format {
+	VDEC_InterlaceFrameProgressive = 0x1,
+	VDEC_InterlaceInterleaveFrameTopFieldFirst = 0x2,
+	VDEC_InterlaceInterleaveFrameBottomFieldFirst = 0x4
+};
+
+#define VDEC_YUV_FORMAT_NV12_TP10_UBWC \
+	VDEC_YUV_FORMAT_NV12_TP10_UBWC
+
+enum vdec_output_fromat {
+	VDEC_YUV_FORMAT_NV12 = 0x1,
+	VDEC_YUV_FORMAT_TILE_4x2 = 0x2,
+	VDEC_YUV_FORMAT_NV12_UBWC = 0x3,
+	VDEC_YUV_FORMAT_NV12_TP10_UBWC = 0x4
+};
+
+enum vdec_output_order {
+	VDEC_ORDER_DISPLAY = 0x1,
+	VDEC_ORDER_DECODE = 0x2
+};
+
+struct vdec_picsize {
+	uint32_t frame_width;
+	uint32_t frame_height;
+	uint32_t stride;
+	uint32_t scan_lines;
+};
+
+struct vdec_seqheader {
+	void __user *ptr_seqheader;
+	size_t seq_header_len;
+	int pmem_fd;
+	size_t pmem_offset;
+};
+
+struct vdec_mberror {
+	void __user *ptr_errormap;
+	size_t err_mapsize;
+};
+
+struct vdec_input_frameinfo {
+	void __user *bufferaddr;
+	size_t offset;
+	size_t datalen;
+	uint32_t flags;
+	int64_t timestamp;
+	void *client_data;
+	int pmem_fd;
+	size_t pmem_offset;
+	void __user *desc_addr;
+	uint32_t desc_size;
+};
+
+struct vdec_framesize {
+	uint32_t   left;
+	uint32_t   top;
+	uint32_t   right;
+	uint32_t   bottom;
+};
+
+struct vdec_aspectratioinfo {
+	uint32_t aspect_ratio;
+	uint32_t par_width;
+	uint32_t par_height;
+};
+
+struct vdec_sep_metadatainfo {
+	void __user *metabufaddr;
+	uint32_t size;
+	int fd;
+	int offset;
+	uint32_t buffer_size;
+};
+
+struct vdec_output_frameinfo {
+	void __user *bufferaddr;
+	size_t offset;
+	size_t len;
+	uint32_t flags;
+	int64_t time_stamp;
+	enum vdec_picture pic_type;
+	void *client_data;
+	void *input_frame_clientdata;
+	struct vdec_picsize picsize;
+	struct vdec_framesize framesize;
+	enum vdec_interlaced_format interlaced_format;
+	struct vdec_aspectratioinfo aspect_ratio_info;
+	struct vdec_sep_metadatainfo metadata_info;
+};
+
+union vdec_msgdata {
+	struct vdec_output_frameinfo output_frame;
+	void *input_frame_clientdata;
+};
+
+struct vdec_msginfo {
+	uint32_t status_code;
+	uint32_t msgcode;
+	union vdec_msgdata msgdata;
+	size_t msgdatasize;
+};
+
+struct vdec_framerate {
+	unsigned long fps_denominator;
+	unsigned long fps_numerator;
+};
+
+struct vdec_h264_mv{
+	size_t size;
+	int count;
+	int pmem_fd;
+	int offset;
+};
+
+struct vdec_mv_buff_size{
+	int width;
+	int height;
+	int size;
+	int alignment;
+};
+
+struct vdec_meta_buffers {
+	size_t size;
+	int count;
+	int pmem_fd;
+	int pmem_fd_iommu;
+	int offset;
+};
+
+#endif /* end of macro _VDECDECODER_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/msm_vidc_enc.h	2019-01-22 16:16:28.567292264 +0100
@@ -0,0 +1,700 @@
+#ifndef _UAPI_MSM_VIDC_ENC_H_
+#define _UAPI_MSM_VIDC_ENC_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/** STATUS CODES*/
+/* Base value for status codes */
+#define VEN_S_BASE	0x00000000
+#define VEN_S_SUCCESS	(VEN_S_BASE)/* Success */
+#define VEN_S_EFAIL	(VEN_S_BASE+1)/* General failure */
+#define VEN_S_EFATAL	(VEN_S_BASE+2)/* Fatal irrecoverable failure*/
+#define VEN_S_EBADPARAM	(VEN_S_BASE+3)/* Error passed parameters*/
+/*Command called in invalid state*/
+#define VEN_S_EINVALSTATE	(VEN_S_BASE+4)
+#define VEN_S_ENOSWRES	(VEN_S_BASE+5)/* Insufficient OS resources*/
+#define VEN_S_ENOHWRES	(VEN_S_BASE+6)/*Insufficient HW resources */
+#define VEN_S_EBUFFREQ	(VEN_S_BASE+7)/* Buffer requirements were not met*/
+#define VEN_S_EINVALCMD	(VEN_S_BASE+8)/* Invalid command called */
+#define VEN_S_ETIMEOUT	(VEN_S_BASE+9)/* Command timeout. */
+/*Re-attempt was made when multiple invocation not supported for API.*/
+#define VEN_S_ENOREATMPT	(VEN_S_BASE+10)
+#define VEN_S_ENOPREREQ	(VEN_S_BASE+11)/*Pre-requirement is not met for API*/
+#define VEN_S_ECMDQFULL	(VEN_S_BASE+12)/*Command queue is full*/
+#define VEN_S_ENOTSUPP	(VEN_S_BASE+13)/*Command not supported*/
+#define VEN_S_ENOTIMPL	(VEN_S_BASE+14)/*Command not implemented.*/
+#define VEN_S_ENOTPMEM	(VEN_S_BASE+15)/*Buffer is not from PMEM*/
+#define VEN_S_EFLUSHED	(VEN_S_BASE+16)/*returned buffer was flushed*/
+#define VEN_S_EINSUFBUF	(VEN_S_BASE+17)/*provided buffer size insufficient*/
+#define VEN_S_ESAMESTATE	(VEN_S_BASE+18)
+#define VEN_S_EINVALTRANS	(VEN_S_BASE+19)
+
+#define VEN_INTF_VER			 1
+
+/*Asynchronous messages from driver*/
+#define VEN_MSG_INDICATION	0
+#define VEN_MSG_INPUT_BUFFER_DONE	1
+#define VEN_MSG_OUTPUT_BUFFER_DONE	2
+#define VEN_MSG_NEED_OUTPUT_BUFFER	3
+#define VEN_MSG_FLUSH_INPUT_DONE	4
+#define VEN_MSG_FLUSH_OUPUT_DONE	5
+#define VEN_MSG_START	6
+#define VEN_MSG_STOP	7
+#define VEN_MSG_PAUSE	8
+#define VEN_MSG_RESUME	9
+#define VEN_MSG_STOP_READING_MSG	10
+#define VEN_MSG_LTRUSE_FAILED	    11
+#define VEN_MSG_HW_OVERLOAD	12
+#define VEN_MSG_MAX_CLIENTS	13
+
+
+/*Buffer flags bits masks*/
+#define VEN_BUFFLAG_EOS	0x00000001
+#define VEN_BUFFLAG_ENDOFFRAME	0x00000010
+#define VEN_BUFFLAG_SYNCFRAME	0x00000020
+#define VEN_BUFFLAG_EXTRADATA	0x00000040
+#define VEN_BUFFLAG_CODECCONFIG	0x00000080
+
+/*Post processing flags bit masks*/
+#define VEN_EXTRADATA_NONE          0x001
+#define VEN_EXTRADATA_QCOMFILLER    0x002
+#define VEN_EXTRADATA_SLICEINFO     0x100
+#define VEN_EXTRADATA_LTRINFO       0x200
+#define VEN_EXTRADATA_MBINFO        0x400
+
+/*ENCODER CONFIGURATION CONSTANTS*/
+
+/*Encoded video frame types*/
+#define VEN_FRAME_TYPE_I	1/* I frame type */
+#define VEN_FRAME_TYPE_P	2/* P frame type */
+#define VEN_FRAME_TYPE_B	3/* B frame type */
+
+/*Video codec types*/
+#define VEN_CODEC_MPEG4	1/* MPEG4 Codec */
+#define VEN_CODEC_H264	2/* H.264 Codec */
+#define VEN_CODEC_H263	3/* H.263 Codec */
+
+/*Video codec profile types.*/
+#define VEN_PROFILE_MPEG4_SP      1/* 1 - MPEG4 SP profile      */
+#define VEN_PROFILE_MPEG4_ASP     2/* 2 - MPEG4 ASP profile     */
+#define VEN_PROFILE_H264_BASELINE 3/* 3 - H264 Baseline profile	*/
+#define VEN_PROFILE_H264_MAIN     4/* 4 - H264 Main profile     */
+#define VEN_PROFILE_H264_HIGH     5/* 5 - H264 High profile     */
+#define VEN_PROFILE_H263_BASELINE 6/* 6 - H263 Baseline profile */
+
+/*Video codec profile level types.*/
+#define VEN_LEVEL_MPEG4_0	 0x1/* MPEG4 Level 0  */
+#define VEN_LEVEL_MPEG4_1	 0x2/* MPEG4 Level 1  */
+#define VEN_LEVEL_MPEG4_2	 0x3/* MPEG4 Level 2  */
+#define VEN_LEVEL_MPEG4_3	 0x4/* MPEG4 Level 3  */
+#define VEN_LEVEL_MPEG4_4	 0x5/* MPEG4 Level 4  */
+#define VEN_LEVEL_MPEG4_5	 0x6/* MPEG4 Level 5  */
+#define VEN_LEVEL_MPEG4_3b	 0x7/* MPEG4 Level 3b */
+#define VEN_LEVEL_MPEG4_6	 0x8/* MPEG4 Level 6  */
+
+#define VEN_LEVEL_H264_1	 0x9/* H.264 Level 1   */
+#define VEN_LEVEL_H264_1b        0xA/* H.264 Level 1b  */
+#define VEN_LEVEL_H264_1p1	 0xB/* H.264 Level 1.1 */
+#define VEN_LEVEL_H264_1p2	 0xC/* H.264 Level 1.2 */
+#define VEN_LEVEL_H264_1p3	 0xD/* H.264 Level 1.3 */
+#define VEN_LEVEL_H264_2	 0xE/* H.264 Level 2   */
+#define VEN_LEVEL_H264_2p1	 0xF/* H.264 Level 2.1 */
+#define VEN_LEVEL_H264_2p2	0x10/* H.264 Level 2.2 */
+#define VEN_LEVEL_H264_3	0x11/* H.264 Level 3   */
+#define VEN_LEVEL_H264_3p1	0x12/* H.264 Level 3.1 */
+#define VEN_LEVEL_H264_3p2	0x13/* H.264 Level 3.2 */
+#define VEN_LEVEL_H264_4	0x14/* H.264 Level 4   */
+
+#define VEN_LEVEL_H263_10	0x15/* H.263 Level 10  */
+#define VEN_LEVEL_H263_20	0x16/* H.263 Level 20  */
+#define VEN_LEVEL_H263_30	0x17/* H.263 Level 30  */
+#define VEN_LEVEL_H263_40	0x18/* H.263 Level 40  */
+#define VEN_LEVEL_H263_45	0x19/* H.263 Level 45  */
+#define VEN_LEVEL_H263_50	0x1A/* H.263 Level 50  */
+#define VEN_LEVEL_H263_60	0x1B/* H.263 Level 60  */
+#define VEN_LEVEL_H263_70	0x1C/* H.263 Level 70  */
+
+/*Entropy coding model selection for H.264 encoder.*/
+#define VEN_ENTROPY_MODEL_CAVLC	1
+#define VEN_ENTROPY_MODEL_CABAC	2
+/*Cabac model number (0,1,2) for encoder.*/
+#define VEN_CABAC_MODEL_0	1/* CABAC Model 0. */
+#define VEN_CABAC_MODEL_1	2/* CABAC Model 1. */
+#define VEN_CABAC_MODEL_2	3/* CABAC Model 2. */
+
+/*Deblocking filter control type for encoder.*/
+#define VEN_DB_DISABLE	1/* 1 - Disable deblocking filter*/
+#define VEN_DB_ALL_BLKG_BNDRY	2/* 2 - All blocking boundary filtering*/
+#define VEN_DB_SKIP_SLICE_BNDRY	3/* 3 - Filtering except sliceboundary*/
+
+/*Different methods of Multi slice selection.*/
+#define VEN_MSLICE_OFF	1
+#define VEN_MSLICE_CNT_MB	2 /*number of MBscount per slice*/
+#define VEN_MSLICE_CNT_BYTE	3 /*number of bytes count per slice.*/
+#define VEN_MSLICE_GOB	4 /*Multi slice by GOB for H.263 only.*/
+
+/*Different modes for Rate Control.*/
+#define VEN_RC_OFF	1
+#define VEN_RC_VBR_VFR	2
+#define VEN_RC_VBR_CFR	3
+#define VEN_RC_CBR_VFR	4
+#define VEN_RC_CBR_CFR	5
+
+/*Different modes for flushing buffers*/
+#define VEN_FLUSH_INPUT	1
+#define VEN_FLUSH_OUTPUT	2
+#define VEN_FLUSH_ALL	3
+
+/*Different input formats for YUV data.*/
+#define VEN_INPUTFMT_NV12	1/* NV12 Linear */
+#define VEN_INPUTFMT_NV21	2/* NV21 Linear */
+#define VEN_INPUTFMT_NV12_16M2KA	3/* NV12 Linear */
+
+/*Different allowed rotation modes.*/
+#define VEN_ROTATION_0	1/* 0 degrees */
+#define VEN_ROTATION_90	2/* 90 degrees */
+#define VEN_ROTATION_180	3/* 180 degrees */
+#define VEN_ROTATION_270	4/* 270 degrees */
+
+/*IOCTL timeout values*/
+#define VEN_TIMEOUT_INFINITE	0xffffffff
+
+/*Different allowed intra refresh modes.*/
+#define VEN_IR_OFF	1
+#define VEN_IR_CYCLIC	2
+#define VEN_IR_RANDOM	3
+
+/*IOCTL BASE CODES Not to be used directly by the client.*/
+/* Base value for ioctls that are not related to encoder configuration.*/
+#define VEN_IOCTLBASE_NENC	0x800
+/* Base value for encoder configuration ioctls*/
+#define VEN_IOCTLBASE_ENC	0x850
+
+struct venc_ioctl_msg{
+	void __user *in;
+	void __user *out;
+};
+
+/*NON ENCODER CONFIGURATION IOCTLs*/
+
+/*IOCTL params:SET: InputData - unsigned long, OutputData - NULL*/
+#define VEN_IOCTL_SET_INTF_VERSION \
+	_IOW(VEN_IOCTLBASE_NENC, 0, struct venc_ioctl_msg)
+
+/*IOCTL params:CMD: InputData - venc_timeout, OutputData - venc_msg*/
+#define VEN_IOCTL_CMD_READ_NEXT_MSG \
+	_IOWR(VEN_IOCTLBASE_NENC, 1, struct venc_ioctl_msg)
+
+/*IOCTL params:CMD: InputData - NULL, OutputData - NULL*/
+#define VEN_IOCTL_CMD_STOP_READ_MSG	_IO(VEN_IOCTLBASE_NENC, 2)
+
+/*IOCTL params:SET: InputData - venc_allocatorproperty, OutputData - NULL
+ GET: InputData - NULL, OutputData - venc_allocatorproperty*/
+#define VEN_IOCTL_SET_INPUT_BUFFER_REQ \
+	_IOW(VEN_IOCTLBASE_NENC, 3, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_INPUT_BUFFER_REQ \
+	_IOR(VEN_IOCTLBASE_NENC, 4, struct venc_ioctl_msg)
+
+/*IOCTL params:CMD: InputData - venc_bufferpayload, OutputData - NULL*/
+#define VEN_IOCTL_CMD_ALLOC_INPUT_BUFFER \
+	_IOW(VEN_IOCTLBASE_NENC, 5, struct venc_ioctl_msg)
+
+/*IOCTL params:CMD: InputData - venc_bufferpayload, OutputData - NULL*/
+#define VEN_IOCTL_SET_INPUT_BUFFER \
+	_IOW(VEN_IOCTLBASE_NENC, 6, struct venc_ioctl_msg)
+
+/*IOCTL params: CMD: InputData - venc_bufferpayload, OutputData - NULL*/
+#define VEN_IOCTL_CMD_FREE_INPUT_BUFFER \
+	_IOW(VEN_IOCTLBASE_NENC, 7, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_allocatorproperty, OutputData - NULL
+ GET: InputData - NULL, OutputData - venc_allocatorproperty*/
+#define VEN_IOCTL_SET_OUTPUT_BUFFER_REQ \
+	_IOW(VEN_IOCTLBASE_NENC, 8, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_OUTPUT_BUFFER_REQ \
+	_IOR(VEN_IOCTLBASE_NENC, 9, struct venc_ioctl_msg)
+
+/*IOCTL params:CMD: InputData - venc_bufferpayload, OutputData - NULL*/
+#define VEN_IOCTL_CMD_ALLOC_OUTPUT_BUFFER \
+	_IOW(VEN_IOCTLBASE_NENC, 10, struct venc_ioctl_msg)
+
+
+/*IOCTL params:CMD: InputData - venc_bufferpayload, OutputData - NULL*/
+#define VEN_IOCTL_SET_OUTPUT_BUFFER \
+	_IOW(VEN_IOCTLBASE_NENC, 11, struct venc_ioctl_msg)
+
+/*IOCTL params:CMD: InputData - venc_bufferpayload, OutputData - NULL.*/
+#define VEN_IOCTL_CMD_FREE_OUTPUT_BUFFER \
+	_IOW(VEN_IOCTLBASE_NENC, 12, struct venc_ioctl_msg)
+
+
+/* Asynchronous respone message code:* VEN_MSG_START*/
+#define VEN_IOCTL_CMD_START	_IO(VEN_IOCTLBASE_NENC, 13)
+
+
+/*IOCTL params:CMD: InputData - venc_buffer, OutputData - NULL
+ Asynchronous respone message code:VEN_MSG_INPUT_BUFFER_DONE*/
+#define VEN_IOCTL_CMD_ENCODE_FRAME \
+	_IOW(VEN_IOCTLBASE_NENC, 14, struct venc_ioctl_msg)
+
+
+/*IOCTL params:CMD: InputData - venc_buffer, OutputData - NULL
+ Asynchronous response message code:VEN_MSG_OUTPUT_BUFFER_DONE*/
+#define VEN_IOCTL_CMD_FILL_OUTPUT_BUFFER \
+	_IOW(VEN_IOCTLBASE_NENC, 15, struct venc_ioctl_msg)
+
+/*IOCTL params:CMD: InputData - venc_bufferflush, OutputData - NULL
+ * Asynchronous response message code:VEN_MSG_INPUT_BUFFER_DONE*/
+#define VEN_IOCTL_CMD_FLUSH \
+	_IOW(VEN_IOCTLBASE_NENC, 16, struct venc_ioctl_msg)
+
+
+/*Asynchronous respone message code:VEN_MSG_PAUSE*/
+#define VEN_IOCTL_CMD_PAUSE	_IO(VEN_IOCTLBASE_NENC, 17)
+
+/*Asynchronous respone message code:VEN_MSG_RESUME*/
+#define VEN_IOCTL_CMD_RESUME _IO(VEN_IOCTLBASE_NENC, 18)
+
+/* Asynchronous respone message code:VEN_MSG_STOP*/
+#define VEN_IOCTL_CMD_STOP _IO(VEN_IOCTLBASE_NENC, 19)
+
+#define VEN_IOCTL_SET_RECON_BUFFER \
+	_IOW(VEN_IOCTLBASE_NENC, 20, struct venc_ioctl_msg)
+
+#define VEN_IOCTL_FREE_RECON_BUFFER \
+	_IOW(VEN_IOCTLBASE_NENC, 21, struct venc_ioctl_msg)
+
+#define VEN_IOCTL_GET_RECON_BUFFER_SIZE \
+	_IOW(VEN_IOCTLBASE_NENC, 22, struct venc_ioctl_msg)
+
+
+
+/*ENCODER PROPERTY CONFIGURATION & CAPABILITY IOCTLs*/
+
+/*IOCTL params:SET: InputData - venc_basecfg, OutputData - NULL
+ GET: InputData - NULL, OutputData - venc_basecfg*/
+#define VEN_IOCTL_SET_BASE_CFG \
+	_IOW(VEN_IOCTLBASE_ENC, 1, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_BASE_CFG \
+	_IOR(VEN_IOCTLBASE_ENC, 2, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_switch, OutputData - NULL
+  GET: InputData - NULL, OutputData - venc_switch*/
+#define VEN_IOCTL_SET_LIVE_MODE \
+	_IOW(VEN_IOCTLBASE_ENC, 3, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_LIVE_MODE \
+	_IOR(VEN_IOCTLBASE_ENC, 4, struct venc_ioctl_msg)
+
+
+/*IOCTL params:SET: InputData - venc_profile, OutputData - NULL
+  GET: InputData - NULL, OutputData - venc_profile*/
+#define VEN_IOCTL_SET_CODEC_PROFILE \
+	_IOW(VEN_IOCTLBASE_ENC, 5, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_CODEC_PROFILE \
+	_IOR(VEN_IOCTLBASE_ENC, 6, struct venc_ioctl_msg)
+
+
+/*IOCTL params:SET: InputData - ven_profilelevel, OutputData - NULL
+  GET: InputData - NULL, OutputData - ven_profilelevel*/
+#define VEN_IOCTL_SET_PROFILE_LEVEL \
+	_IOW(VEN_IOCTLBASE_ENC, 7, struct venc_ioctl_msg)
+
+#define VEN_IOCTL_GET_PROFILE_LEVEL \
+	_IOR(VEN_IOCTLBASE_ENC, 8, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_switch, OutputData - NULL
+ GET: InputData - NULL, OutputData - venc_switch*/
+#define VEN_IOCTL_SET_SHORT_HDR \
+	_IOW(VEN_IOCTLBASE_ENC, 9, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_SHORT_HDR \
+	_IOR(VEN_IOCTLBASE_ENC, 10, struct venc_ioctl_msg)
+
+
+/*IOCTL params: SET: InputData - venc_sessionqp, OutputData - NULL
+  GET: InputData - NULL, OutputData - venc_sessionqp*/
+#define VEN_IOCTL_SET_SESSION_QP \
+	_IOW(VEN_IOCTLBASE_ENC, 11, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_SESSION_QP \
+	_IOR(VEN_IOCTLBASE_ENC, 12, struct venc_ioctl_msg)
+
+
+/*IOCTL params:SET: InputData - venc_intraperiod, OutputData - NULL
+  GET: InputData - NULL, OutputData - venc_intraperiod*/
+#define VEN_IOCTL_SET_INTRA_PERIOD \
+	_IOW(VEN_IOCTLBASE_ENC, 13, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_INTRA_PERIOD \
+	_IOR(VEN_IOCTLBASE_ENC, 14, struct venc_ioctl_msg)
+
+
+/* Request an Iframe*/
+#define VEN_IOCTL_CMD_REQUEST_IFRAME _IO(VEN_IOCTLBASE_ENC, 15)
+
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_capability*/
+#define VEN_IOCTL_GET_CAPABILITY \
+	_IOR(VEN_IOCTLBASE_ENC, 16, struct venc_ioctl_msg)
+
+
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_seqheader*/
+#define VEN_IOCTL_GET_SEQUENCE_HDR \
+	_IOR(VEN_IOCTLBASE_ENC, 17, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_entropycfg, OutputData - NULL
+ GET: InputData - NULL, OutputData - venc_entropycfg*/
+#define VEN_IOCTL_SET_ENTROPY_CFG \
+	_IOW(VEN_IOCTLBASE_ENC, 18, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_ENTROPY_CFG \
+	_IOR(VEN_IOCTLBASE_ENC, 19, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_dbcfg, OutputData - NULL
+ GET: InputData - NULL, OutputData - venc_dbcfg*/
+#define VEN_IOCTL_SET_DEBLOCKING_CFG \
+	_IOW(VEN_IOCTLBASE_ENC, 20, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_DEBLOCKING_CFG \
+	_IOR(VEN_IOCTLBASE_ENC, 21, struct venc_ioctl_msg)
+
+
+/*IOCTL params:SET: InputData - venc_intrarefresh, OutputData - NULL
+  GET: InputData - NULL, OutputData - venc_intrarefresh*/
+#define VEN_IOCTL_SET_INTRA_REFRESH \
+	_IOW(VEN_IOCTLBASE_ENC, 22, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_INTRA_REFRESH \
+	_IOR(VEN_IOCTLBASE_ENC, 23, struct venc_ioctl_msg)
+
+
+/*IOCTL params:SET: InputData - venc_multiclicecfg, OutputData - NULL
+  GET: InputData - NULL, OutputData - venc_multiclicecfg*/
+#define VEN_IOCTL_SET_MULTI_SLICE_CFG \
+	_IOW(VEN_IOCTLBASE_ENC, 24, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_MULTI_SLICE_CFG \
+	_IOR(VEN_IOCTLBASE_ENC, 25, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_ratectrlcfg, OutputData - NULL
+ GET: InputData - NULL, OutputData - venc_ratectrlcfg*/
+#define VEN_IOCTL_SET_RATE_CTRL_CFG \
+	_IOW(VEN_IOCTLBASE_ENC, 26, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_RATE_CTRL_CFG \
+	_IOR(VEN_IOCTLBASE_ENC, 27, struct venc_ioctl_msg)
+
+
+/*IOCTL params:SET: InputData - venc_voptimingcfg, OutputData - NULL
+  GET: InputData - NULL, OutputData - venc_voptimingcfg*/
+#define VEN_IOCTL_SET_VOP_TIMING_CFG \
+	_IOW(VEN_IOCTLBASE_ENC, 28, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_VOP_TIMING_CFG \
+	_IOR(VEN_IOCTLBASE_ENC, 29, struct venc_ioctl_msg)
+
+
+/*IOCTL params:SET: InputData - venc_framerate, OutputData - NULL
+ GET: InputData - NULL, OutputData - venc_framerate*/
+#define VEN_IOCTL_SET_FRAME_RATE \
+	_IOW(VEN_IOCTLBASE_ENC, 30, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_FRAME_RATE \
+	_IOR(VEN_IOCTLBASE_ENC, 31, struct venc_ioctl_msg)
+
+
+/*IOCTL params:SET: InputData - venc_targetbitrate, OutputData - NULL
+ GET: InputData - NULL, OutputData - venc_targetbitrate*/
+#define VEN_IOCTL_SET_TARGET_BITRATE \
+	_IOW(VEN_IOCTLBASE_ENC, 32, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_TARGET_BITRATE \
+	_IOR(VEN_IOCTLBASE_ENC, 33, struct venc_ioctl_msg)
+
+
+/*IOCTL params:SET: InputData - venc_rotation, OutputData - NULL
+  GET: InputData - NULL, OutputData - venc_rotation*/
+#define VEN_IOCTL_SET_ROTATION \
+	_IOW(VEN_IOCTLBASE_ENC, 34, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_ROTATION \
+	_IOR(VEN_IOCTLBASE_ENC, 35, struct venc_ioctl_msg)
+
+
+/*IOCTL params:SET: InputData - venc_headerextension, OutputData - NULL
+ GET: InputData - NULL, OutputData - venc_headerextension*/
+#define VEN_IOCTL_SET_HEC \
+	_IOW(VEN_IOCTLBASE_ENC, 36, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_HEC \
+	_IOR(VEN_IOCTLBASE_ENC, 37, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_switch, OutputData - NULL
+ GET: InputData - NULL, OutputData - venc_switch*/
+#define VEN_IOCTL_SET_DATA_PARTITION \
+	_IOW(VEN_IOCTLBASE_ENC, 38, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_DATA_PARTITION \
+	_IOR(VEN_IOCTLBASE_ENC, 39, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_switch, OutputData - NULL
+ GET: InputData - NULL, OutputData - venc_switch*/
+#define VEN_IOCTL_SET_RVLC \
+	_IOW(VEN_IOCTLBASE_ENC, 40, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_RVLC \
+	_IOR(VEN_IOCTLBASE_ENC, 41, struct venc_ioctl_msg)
+
+
+/*IOCTL params:SET: InputData - venc_switch, OutputData - NULL
+ GET: InputData - NULL, OutputData - venc_switch*/
+#define VEN_IOCTL_SET_AC_PREDICTION \
+	_IOW(VEN_IOCTLBASE_ENC, 42, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_AC_PREDICTION \
+	_IOR(VEN_IOCTLBASE_ENC, 43, struct venc_ioctl_msg)
+
+
+/*IOCTL params:SET: InputData - venc_qprange, OutputData - NULL
+ GET: InputData - NULL, OutputData - venc_qprange*/
+#define VEN_IOCTL_SET_QP_RANGE \
+	_IOW(VEN_IOCTLBASE_ENC, 44, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_QP_RANGE \
+	_IOR(VEN_IOCTLBASE_ENC, 45, struct venc_ioctl_msg)
+
+#define VEN_IOCTL_GET_NUMBER_INSTANCES \
+	_IOR(VEN_IOCTLBASE_ENC, 46, struct venc_ioctl_msg)
+
+#define VEN_IOCTL_SET_METABUFFER_MODE \
+	_IOW(VEN_IOCTLBASE_ENC, 47, struct venc_ioctl_msg)
+
+
+/*IOCTL params:SET: InputData - unsigned int, OutputData - NULL.*/
+#define VEN_IOCTL_SET_EXTRADATA \
+	_IOW(VEN_IOCTLBASE_ENC, 48, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - unsigned int.*/
+#define VEN_IOCTL_GET_EXTRADATA \
+	_IOR(VEN_IOCTLBASE_ENC, 49, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - NULL, OutputData - NULL.*/
+#define VEN_IOCTL_SET_SLICE_DELIVERY_MODE \
+	_IO(VEN_IOCTLBASE_ENC, 50)
+
+#define VEN_IOCTL_SET_H263_PLUSPTYPE \
+	_IOW(VEN_IOCTLBASE_ENC, 51, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_range, OutputData - NULL.*/
+#define VEN_IOCTL_SET_CAPABILITY_LTRCOUNT \
+	_IOW(VEN_IOCTLBASE_ENC, 52, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_range.*/
+#define VEN_IOCTL_GET_CAPABILITY_LTRCOUNT \
+	_IOR(VEN_IOCTLBASE_ENC, 53, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_ltrmode, OutputData - NULL.*/
+#define VEN_IOCTL_SET_LTRMODE \
+	_IOW(VEN_IOCTLBASE_ENC, 54, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_ltrmode.*/
+#define VEN_IOCTL_GET_LTRMODE \
+	_IOR(VEN_IOCTLBASE_ENC, 55, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_ltrcount, OutputData - NULL.*/
+#define VEN_IOCTL_SET_LTRCOUNT \
+	_IOW(VEN_IOCTLBASE_ENC, 56, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_ltrcount.*/
+#define VEN_IOCTL_GET_LTRCOUNT \
+	_IOR(VEN_IOCTLBASE_ENC, 57, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_ltrperiod, OutputData - NULL.*/
+#define VEN_IOCTL_SET_LTRPERIOD \
+	_IOW(VEN_IOCTLBASE_ENC, 58, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_ltrperiod.*/
+#define VEN_IOCTL_GET_LTRPERIOD \
+	_IOR(VEN_IOCTLBASE_ENC, 59, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_ltruse, OutputData - NULL.*/
+#define VEN_IOCTL_SET_LTRUSE \
+	_IOW(VEN_IOCTLBASE_ENC, 60, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_ltruse.*/
+#define VEN_IOCTL_GET_LTRUSE \
+	_IOR(VEN_IOCTLBASE_ENC, 61, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_ltrmark, OutputData - NULL.*/
+#define VEN_IOCTL_SET_LTRMARK \
+	_IOW(VEN_IOCTLBASE_ENC, 62, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_ltrmark.*/
+#define VEN_IOCTL_GET_LTRMARK \
+	_IOR(VEN_IOCTLBASE_ENC, 63, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - unsigned int, OutputData - NULL*/
+#define VEN_IOCTL_SET_SPS_PPS_FOR_IDR \
+	_IOW(VEN_IOCTLBASE_ENC, 64, struct venc_ioctl_msg)
+
+struct venc_range {
+	unsigned long	max;
+	unsigned long	min;
+	unsigned long	step_size;
+};
+
+struct venc_switch{
+	unsigned char	status;
+};
+
+struct venc_allocatorproperty{
+	unsigned long	 mincount;
+	unsigned long	 maxcount;
+	unsigned long	 actualcount;
+	unsigned long	 datasize;
+	unsigned long	 suffixsize;
+	unsigned long	 alignment;
+	unsigned long	 bufpoolid;
+};
+
+struct venc_bufferpayload{
+	unsigned char *pbuffer;
+	size_t	sz;
+	int	fd;
+	unsigned int	offset;
+	unsigned int	maped_size;
+	unsigned long	filled_len;
+};
+
+struct venc_buffer{
+ unsigned char *ptrbuffer;
+ unsigned long	sz;
+ unsigned long	len;
+ unsigned long	offset;
+ long long	timestamp;
+ unsigned long	flags;
+ void	*clientdata;
+};
+
+struct venc_basecfg{
+	unsigned long	input_width;
+	unsigned long	input_height;
+	unsigned long	dvs_width;
+	unsigned long	dvs_height;
+	unsigned long	codectype;
+	unsigned long	fps_num;
+	unsigned long	fps_den;
+	unsigned long	targetbitrate;
+	unsigned long	inputformat;
+};
+
+struct venc_profile{
+	unsigned long	profile;
+};
+struct ven_profilelevel{
+	unsigned long	level;
+};
+
+struct venc_sessionqp{
+	unsigned long	iframeqp;
+	unsigned long	pframqp;
+};
+
+struct venc_qprange{
+	unsigned long	maxqp;
+	unsigned long	minqp;
+};
+
+struct venc_plusptype {
+	unsigned long	plusptype_enable;
+};
+
+struct venc_intraperiod{
+	unsigned long	num_pframes;
+	unsigned long	num_bframes;
+};
+struct venc_seqheader{
+	unsigned char *hdrbufptr;
+	unsigned long	bufsize;
+	unsigned long	hdrlen;
+};
+
+struct venc_capability{
+	unsigned long	codec_types;
+	unsigned long	maxframe_width;
+	unsigned long	maxframe_height;
+	unsigned long	maxtarget_bitrate;
+	unsigned long	maxframe_rate;
+	unsigned long	input_formats;
+	unsigned char	dvs;
+};
+
+struct venc_entropycfg{
+	unsigned longentropysel;
+	unsigned long	cabacmodel;
+};
+
+struct venc_dbcfg{
+	unsigned long	db_mode;
+	unsigned long	slicealpha_offset;
+	unsigned long	slicebeta_offset;
+};
+
+struct venc_intrarefresh{
+	unsigned long	irmode;
+	unsigned long	mbcount;
+};
+
+struct venc_multiclicecfg{
+	unsigned long	mslice_mode;
+	unsigned long	mslice_size;
+};
+
+struct venc_bufferflush{
+	unsigned long	flush_mode;
+};
+
+struct venc_ratectrlcfg{
+	unsigned long	rcmode;
+};
+
+struct	venc_voptimingcfg{
+	unsigned long	voptime_resolution;
+};
+struct venc_framerate{
+	unsigned long	fps_denominator;
+	unsigned long	fps_numerator;
+};
+
+struct venc_targetbitrate{
+	unsigned long	target_bitrate;
+};
+
+
+struct venc_rotation{
+	unsigned long	rotation;
+};
+
+struct venc_timeout{
+	 unsigned long	millisec;
+};
+
+struct venc_headerextension{
+	 unsigned long	header_extension;
+};
+
+struct venc_msg{
+	unsigned long	statuscode;
+	unsigned long	msgcode;
+	struct venc_buffer	buf;
+	unsigned long	msgdata_size;
+};
+
+struct venc_recon_addr{
+	unsigned char *pbuffer;
+	unsigned long buffer_size;
+	unsigned long pmem_fd;
+	unsigned long offset;
+};
+
+struct venc_recon_buff_size{
+	int width;
+	int height;
+	int size;
+	int alignment;
+};
+
+struct venc_ltrmode {
+	unsigned long   ltr_mode;
+};
+
+struct venc_ltrcount {
+	unsigned long   ltr_count;
+};
+
+struct venc_ltrperiod {
+	unsigned long   ltr_period;
+};
+
+struct venc_ltruse {
+	unsigned long   ltr_id;
+	unsigned long   ltr_frames;
+};
+
+#endif /* _UAPI_MSM_VIDC_ENC_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/net_map.h	2019-01-22 16:16:28.567292264 +0100
@@ -0,0 +1,30 @@
+#ifndef _NET_MAP_H_
+#define _NET_MAP_H_
+
+struct rmnet_map_header_s {
+#ifndef RMNET_USE_BIG_ENDIAN_STRUCTS
+	uint8_t  pad_len:6;
+	uint8_t  reserved_bit:1;
+	uint8_t  cd_bit:1;
+#else
+	uint8_t  cd_bit:1;
+	uint8_t  reserved_bit:1;
+	uint8_t  pad_len:6;
+#endif /* RMNET_USE_BIG_ENDIAN_STRUCTS */
+	uint8_t  mux_id;
+	uint16_t pkt_len;
+}  __aligned(1);
+
+#define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header_s *)Y->data)->mux_id)
+#define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header_s *)Y->data)->cd_bit)
+#define RMNET_MAP_GET_PAD(Y) (((struct rmnet_map_header_s *)Y->data)->pad_len)
+#define RMNET_MAP_GET_CMD_START(Y) ((struct rmnet_map_control_command_s *) \
+				  (Y->data + sizeof(struct rmnet_map_header_s)))
+#define RMNET_MAP_GET_LENGTH(Y) (ntohs( \
+			       ((struct rmnet_map_header_s *)Y->data)->pkt_len))
+
+#define RMNET_IP_VER_MASK 0xF0
+#define RMNET_IPV4        0x40
+#define RMNET_IPV6        0x60
+
+#endif /* _NET_MAP_H_ */
diff -Nruw linux-4.4.115-fbx/include/uapi/linux/nfc./Kbuild linux-4.4.115-fbx/include/uapi/linux/nfc/Kbuild
--- linux-4.4.115-fbx/include/uapi/linux/nfc./Kbuild	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/nfc/Kbuild	2019-01-22 16:16:28.579292373 +0100
@@ -0,0 +1,2 @@
+#UAPI export list
+header-y += nfcinfo.h
diff -Nruw linux-4.4.115-fbx/include/uapi/linux/nfc./nfcinfo.h linux-4.4.115-fbx/include/uapi/linux/nfc/nfcinfo.h
--- linux-4.4.115-fbx/include/uapi/linux/nfc./nfcinfo.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/linux/nfc/nfcinfo.h	2019-01-22 16:16:28.579292373 +0100
@@ -0,0 +1,21 @@
+#ifndef _UAPI_NFCINFO_H_
+#define _UAPI_NFCINFO_H_
+
+#include <linux/ioctl.h>
+
+#define NFCC_MAGIC 0xE9
+#define NFCC_GET_INFO _IOW(NFCC_MAGIC, 0x09, unsigned int)
+
+struct nqx_devinfo {
+	unsigned char chip_type;
+	unsigned char rom_version;
+	unsigned char fw_major;
+	unsigned char fw_minor;
+};
+
+union nqx_uinfo {
+	unsigned int i;
+	struct nqx_devinfo info;
+};
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/oneshot_sync.h	2019-01-22 16:16:26.719275529 +0100
@@ -0,0 +1,49 @@
+#ifndef ONESHOT_SYNC_H
+#define ONESHOT_SYNC_H
+
+/**
+ * DOC: Oneshot sync Userspace API
+ *
+ * Opening a file descriptor from /dev/oneshot_sync creates a * sync timeline
+ * for userspace signaled fences. Userspace may create new fences from a
+ * /dev/oneshot_sync file descriptor and then signal them by passing the fence
+ * file descriptor in an ioctl() call on the fd used to create the fence.
+ * Unlike most sync timelines, there is no ordering on a oneshot timeline.
+ * Each fence may be signaled in any order without affecting the state of other
+ * fences on the timeline.
+ */
+
+#define ONESHOT_SYNC_IOC_MAGIC '1'
+
+/**
+ * struct oneshot_sync_create_fence - argument to create fence ioctl
+ * @name: name of the new fence, to aid debugging.
+ * @fence_fd: returned sync_fence file descriptor
+ */
+struct oneshot_sync_create_fence {
+	char name[32];
+	int fence_fd;
+};
+
+/**
+ * DOC: ONESHOT_SYNC_IOC_CREATE_FENCE - create a userspace signaled fence
+ *
+ * Create a fence that may be signaled by userspace by calling
+ * ONESHOT_SYNC_IOC_SIGNAL_FENCE. There are no order dependencies between
+ * these fences, but otherwise they behave like normal sync fences.
+ * Argument is struct oneshot_sync_create_fence.
+ */
+#define ONESHOT_SYNC_IOC_CREATE_FENCE _IOWR(ONESHOT_SYNC_IOC_MAGIC, 1,\
+		struct oneshot_sync_create_fence)
+
+/**
+ * DOC: ONESHOT_SYNC_IOC_SIGNAL_FENCE - signal a fence
+ *
+ * Signal a fence that was created by a ONESHOT_SYNC_IOC_CREATE_FENCE
+ * call on the same file descriptor. This allows a fence to be shared
+ * to other processes but only signaled by the process owning the fd
+ * used to create the fence.  Argument is the fence file descriptor.
+ */
+#define ONESHOT_SYNC_IOC_SIGNAL_FENCE _IOWR(ONESHOT_SYNC_IOC_MAGIC, 2,\
+		int)
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/prctl-private.h	2019-01-22 16:16:28.583292409 +0100
@@ -0,0 +1,10 @@
+#ifndef _LINUX_PRCTL_PRIVATE_H
+#define _LINUX_PRCTL_PRIVATE_H
+
+/*
+ * Freebox addition: set/get exec mode.
+ */
+#define PR_SET_EXEC_MODE	54
+#define PR_GET_EXEC_MODE	55
+
+#endif /* ! _LINUX_PRCTL_PRIVATE_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/qbt1000.h	2019-01-22 16:16:28.583292409 +0100
@@ -0,0 +1,99 @@
+#ifndef _UAPI_QBT1000_H_
+#define _UAPI_QBT1000_H_
+
+#define MAX_NAME_SIZE					 32
+
+/*
+* enum qbt1000_commands -
+*      enumeration of command options
+* @QBT1000_LOAD_APP - cmd loads TZ app
+* @QBT1000_UNLOAD_APP - cmd unloads TZ app
+* @QBT1000_SEND_TZCMD - sends cmd to TZ app
+* @QBT1000_SET_FINGER_DETECT_KEY - sets the input key to send on finger detect
+* @QBT1000_CONFIGURE_POWER_KEY - enables/disables sending the power key on
+	finger down events
+*/
+enum qbt1000_commands {
+	QBT1000_LOAD_APP = 100,
+	QBT1000_UNLOAD_APP = 101,
+	QBT1000_SEND_TZCMD = 102,
+	QBT1000_SET_FINGER_DETECT_KEY = 103,
+	QBT1000_CONFIGURE_POWER_KEY = 104
+};
+
+/*
+* enum qbt1000_fw_event -
+*      enumeration of firmware events
+* @FW_EVENT_FINGER_DOWN - finger down detected
+* @FW_EVENT_FINGER_UP - finger up detected
+* @FW_EVENT_INDICATION - an indication IPC from the firmware is pending
+*/
+enum qbt1000_fw_event {
+	FW_EVENT_FINGER_DOWN = 1,
+	FW_EVENT_FINGER_UP = 2,
+	FW_EVENT_CBGE_REQUIRED = 3,
+};
+
+/*
+* struct qbt1000_app -
+*      used to load and unload apps in TZ
+* @app_handle - qseecom handle for clients
+* @name - Name of secure app to load
+* @size - Size of requested buffer of secure app
+* @high_band_width - 1 - for high bandwidth usage
+*                    0 - for normal bandwidth usage
+*/
+struct qbt1000_app {
+	struct qseecom_handle **app_handle;
+	char name[MAX_NAME_SIZE];
+	uint32_t size;
+	uint8_t high_band_width;
+};
+
+/*
+* struct qbt1000_send_tz_cmd -
+*      used to cmds to TZ App
+* @app_handle - qseecom handle for clients
+* @req_buf - Buffer containing request for secure app
+* @req_buf_len - Length of request buffer
+* @rsp_buf - Buffer containing response from secure app
+* @rsp_buf_len - Length of response buffer
+*/
+struct qbt1000_send_tz_cmd {
+	struct qseecom_handle *app_handle;
+	uint8_t *req_buf;
+	uint32_t req_buf_len;
+	uint8_t *rsp_buf;
+	uint32_t rsp_buf_len;
+};
+
+/*
+* struct qbt1000_erie_event -
+*      used to receive events from Erie
+* @buf - Buffer containing event from Erie
+* @buf_len - Length of buffer
+*/
+struct qbt1000_erie_event {
+	uint8_t *buf;
+	uint32_t buf_len;
+};
+
+/*
+* struct qbt1000_set_finger_detect_key -
+*      used to configure the input key which is sent on finger down/up event
+* @key_code - Key code to send on finger down/up. 0 disables sending key events
+*/
+struct qbt1000_set_finger_detect_key {
+	unsigned int key_code;
+};
+
+/*
+* struct qbt1000_configure_power_key -
+*      used to configure whether the power key is sent on finger down
+* @enable - if non-zero, power key is sent on finger down
+*/
+struct qbt1000_configure_power_key {
+	unsigned int enable;
+};
+
+#endif /* _UAPI_QBT1000_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/qcedev.h	2019-01-22 16:16:28.583292409 +0100
@@ -0,0 +1,259 @@
+#ifndef _UAPI_QCEDEV__H
+#define _UAPI_QCEDEV__H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include "fips_status.h"
+
+#define QCEDEV_MAX_SHA_BLOCK_SIZE	64
+#define QCEDEV_MAX_BEARER	31
+#define QCEDEV_MAX_KEY_SIZE	64
+#define QCEDEV_MAX_IV_SIZE	32
+
+#define QCEDEV_MAX_BUFFERS      16
+#define QCEDEV_MAX_SHA_DIGEST	32
+
+#define QCEDEV_USE_PMEM		1
+#define QCEDEV_NO_PMEM		0
+
+#define QCEDEV_AES_KEY_128	16
+#define QCEDEV_AES_KEY_192	24
+#define QCEDEV_AES_KEY_256	32
+/**
+*qcedev_oper_enum: Operation types
+* @QCEDEV_OPER_ENC:		Encrypt
+* @QCEDEV_OPER_DEC:		Decrypt
+* @QCEDEV_OPER_ENC_NO_KEY:	Encrypt. Do not need key to be specified by
+*				user. Key already set by an external processor.
+* @QCEDEV_OPER_DEC_NO_KEY:	Decrypt. Do not need the key to be specified by
+*				user. Key already set by an external processor.
+*/
+enum qcedev_oper_enum {
+	QCEDEV_OPER_DEC		= 0,
+	QCEDEV_OPER_ENC		= 1,
+	QCEDEV_OPER_DEC_NO_KEY	= 2,
+	QCEDEV_OPER_ENC_NO_KEY	= 3,
+	QCEDEV_OPER_LAST
+};
+
+/**
+*qcedev_oper_enum: Cipher algorithm types
+* @QCEDEV_ALG_DES:		DES
+* @QCEDEV_ALG_3DES:		3DES
+* @QCEDEV_ALG_AES:		AES
+*/
+enum qcedev_cipher_alg_enum {
+	QCEDEV_ALG_DES		= 0,
+	QCEDEV_ALG_3DES		= 1,
+	QCEDEV_ALG_AES		= 2,
+	QCEDEV_ALG_LAST
+};
+
+/**
+*qcedev_cipher_mode_enum : AES mode
+* @QCEDEV_AES_MODE_CBC:		CBC
+* @QCEDEV_AES_MODE_ECB:		ECB
+* @QCEDEV_AES_MODE_CTR:		CTR
+* @QCEDEV_AES_MODE_XTS:		XTS
+* @QCEDEV_AES_MODE_CCM:		CCM
+* @QCEDEV_DES_MODE_CBC:		CBC
+* @QCEDEV_DES_MODE_ECB:		ECB
+*/
+enum qcedev_cipher_mode_enum {
+	QCEDEV_AES_MODE_CBC	= 0,
+	QCEDEV_AES_MODE_ECB	= 1,
+	QCEDEV_AES_MODE_CTR	= 2,
+	QCEDEV_AES_MODE_XTS	= 3,
+	QCEDEV_AES_MODE_CCM	= 4,
+	QCEDEV_DES_MODE_CBC	= 5,
+	QCEDEV_DES_MODE_ECB	= 6,
+	QCEDEV_AES_DES_MODE_LAST
+};
+
+/**
+*enum qcedev_sha_alg_enum : Secure Hashing Algorithm
+* @QCEDEV_ALG_SHA1:		Digest returned: 20 bytes (160 bits)
+* @QCEDEV_ALG_SHA256:		Digest returned: 32 bytes (256 bit)
+* @QCEDEV_ALG_SHA1_HMAC:	HMAC returned 20 bytes (160 bits)
+* @QCEDEV_ALG_SHA256_HMAC:	HMAC returned 32 bytes (256 bit)
+* @QCEDEV_ALG_AES_CMAC:		Configurable MAC size
+*/
+enum qcedev_sha_alg_enum {
+	QCEDEV_ALG_SHA1		= 0,
+	QCEDEV_ALG_SHA256	= 1,
+	QCEDEV_ALG_SHA1_HMAC	= 2,
+	QCEDEV_ALG_SHA256_HMAC	= 3,
+	QCEDEV_ALG_AES_CMAC	= 4,
+	QCEDEV_ALG_SHA_ALG_LAST
+};
+
+/**
+* struct buf_info - Buffer information
+* @offset:			Offset from the base address of the buffer
+*				(Used when buffer is allocated using PMEM)
+* @vaddr:			Virtual buffer address pointer
+* @len:				Size of the buffer
+*/
+struct	buf_info {
+	union {
+		uint32_t	offset;
+		uint8_t		*vaddr;
+	};
+	uint32_t	len;
+};
+
+/**
+* struct qcedev_vbuf_info - Source and destination Buffer information
+* @src:				Array of buf_info for input/source
+* @dst:				Array of buf_info for output/destination
+*/
+struct	qcedev_vbuf_info {
+	struct buf_info	src[QCEDEV_MAX_BUFFERS];
+	struct buf_info	dst[QCEDEV_MAX_BUFFERS];
+};
+
+/**
+* struct qcedev_pmem_info - Stores PMEM buffer information
+* @fd_src:			Handle to /dev/adsp_pmem used to allocate
+*				memory for input/src buffer
+* @src:				Array of buf_info for input/source
+* @fd_dst:			Handle to /dev/adsp_pmem used to allocate
+*				memory for output/dst buffer
+* @dst:				Array of buf_info for output/destination
+* @pmem_src_offset:		The offset from input/src buffer
+*				(allocated by PMEM)
+*/
+struct	qcedev_pmem_info {
+	int		fd_src;
+	struct buf_info	src[QCEDEV_MAX_BUFFERS];
+	int		fd_dst;
+	struct buf_info	dst[QCEDEV_MAX_BUFFERS];
+};
+
+/**
+* struct qcedev_cipher_op_req - Holds the ciphering request information
+* @use_pmem (IN):	Flag to indicate if buffer source is PMEM
+*			QCEDEV_USE_PMEM/QCEDEV_NO_PMEM
+* @pmem (IN):		Stores PMEM buffer information.
+*			Refer struct qcedev_pmem_info
+* @vbuf (IN/OUT):	Stores Source and destination Buffer information
+*			Refer to struct qcedev_vbuf_info
+* @data_len (IN):	Total Length of input/src and output/dst in bytes
+* @in_place_op (IN):	Indicates whether the operation is inplace where
+*			source == destination
+*			When using PMEM allocated memory, must set this to 1
+* @enckey (IN):		128 bits of confidentiality key
+*			enckey[0] bit 127-120, enckey[1] bit 119-112,..
+*			enckey[15] bit 7-0
+* @encklen (IN):	Length of the encryption key(set to 128  bits/16
+*			bytes in the driver)
+* @iv (IN/OUT):		Initialisation vector data
+*			This is updated by the driver, incremented by
+*			number of blocks encrypted/decrypted.
+* @ivlen (IN):		Length of the IV
+* @byteoffset (IN):	Offset in the Cipher BLOCK (applicable and to be set
+*			for AES-128 CTR mode only)
+* @alg (IN):		Type of ciphering algorithm: AES/DES/3DES
+* @mode (IN):		Mode use when using AES algorithm: ECB/CBC/CTR
+*			Apllicabel when using AES algorithm only
+* @op (IN):		Type of operation: QCEDEV_OPER_DEC/QCEDEV_OPER_ENC or
+*			QCEDEV_OPER_ENC_NO_KEY/QCEDEV_OPER_DEC_NO_KEY
+*
+*If use_pmem is set to 0, the driver assumes that memory was not allocated
+* via PMEM, and kernel will need to allocate memory and copy data from user
+* space buffer (data_src/dta_dst) and process accordingly and copy data back
+* to the user space buffer
+*
+* If use_pmem is set to 1, the driver assumes that memory was allocated via
+* PMEM.
+* The kernel driver will use the fd_src to determine the kernel virtual address
+* base that maps to the user space virtual address base for the  buffer
+* allocated in user space.
+* The final input/src and output/dst buffer pointer will be determined
+* by adding the offsets to the kernel virtual addr.
+*
+* If use of hardware key is supported in the target, user can configure the
+* key paramters (encklen, enckey) to use the hardware key.
+* In order to use the hardware key, set encklen to 0 and set the enckey
+* data array to 0.
+*/
+struct	qcedev_cipher_op_req {
+	uint8_t				use_pmem;
+	union {
+		struct qcedev_pmem_info	pmem;
+		struct qcedev_vbuf_info	vbuf;
+	};
+	uint32_t			entries;
+	uint32_t			data_len;
+	uint8_t				in_place_op;
+	uint8_t				enckey[QCEDEV_MAX_KEY_SIZE];
+	uint32_t			encklen;
+	uint8_t				iv[QCEDEV_MAX_IV_SIZE];
+	uint32_t			ivlen;
+	uint32_t			byteoffset;
+	enum qcedev_cipher_alg_enum	alg;
+	enum qcedev_cipher_mode_enum	mode;
+	enum qcedev_oper_enum		op;
+};
+
+/**
+* struct qcedev_sha_op_req - Holds the hashing request information
+* @data (IN):			Array of pointers to the data to be hashed
+* @entries (IN):		Number of buf_info entries in the data array
+* @data_len (IN):		Length of data to be hashed
+* @digest (IN/OUT):		Returns the hashed data information
+* @diglen (OUT):		Size of the hashed/digest data
+* @authkey (IN):		Pointer to authentication key for HMAC
+* @authklen (IN):		Size of the authentication key
+* @alg (IN):			Secure Hash algorithm
+*/
+struct	qcedev_sha_op_req {
+	struct buf_info			data[QCEDEV_MAX_BUFFERS];
+	uint32_t			entries;
+	uint32_t			data_len;
+	uint8_t				digest[QCEDEV_MAX_SHA_DIGEST];
+	uint32_t			diglen;
+	uint8_t				*authkey;
+	uint32_t			authklen;
+	enum qcedev_sha_alg_enum	alg;
+};
+
+/**
+* struct qfips_verify_t - Holds data for FIPS Integrity test
+* @kernel_size  (IN):		Size of kernel Image
+* @kernel       (IN):		pointer to buffer containing the kernel Image
+*/
+struct qfips_verify_t {
+	unsigned kernel_size;
+	void *kernel;
+};
+
+struct file;
+extern long qcedev_ioctl(struct file *file,
+			unsigned cmd, unsigned long arg);
+
+#define QCEDEV_IOC_MAGIC	0x87
+
+#define QCEDEV_IOCTL_ENC_REQ		\
+	_IOWR(QCEDEV_IOC_MAGIC, 1, struct qcedev_cipher_op_req)
+#define QCEDEV_IOCTL_DEC_REQ		\
+	_IOWR(QCEDEV_IOC_MAGIC, 2, struct qcedev_cipher_op_req)
+#define QCEDEV_IOCTL_SHA_INIT_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 3, struct qcedev_sha_op_req)
+#define QCEDEV_IOCTL_SHA_UPDATE_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 4, struct qcedev_sha_op_req)
+#define QCEDEV_IOCTL_SHA_FINAL_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 5, struct qcedev_sha_op_req)
+#define QCEDEV_IOCTL_GET_SHA_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 6, struct qcedev_sha_op_req)
+#define QCEDEV_IOCTL_LOCK_CE	\
+	_IO(QCEDEV_IOC_MAGIC, 7)
+#define QCEDEV_IOCTL_UNLOCK_CE	\
+	_IO(QCEDEV_IOC_MAGIC, 8)
+#define QCEDEV_IOCTL_GET_CMAC_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 9, struct qcedev_sha_op_req)
+#define QCEDEV_IOCTL_UPDATE_FIPS_STATUS		\
+	_IOWR(QCEDEV_IOC_MAGIC, 10, enum fips_status)
+#define QCEDEV_IOCTL_QUERY_FIPS_STATUS	\
+	_IOR(QCEDEV_IOC_MAGIC, 11, enum fips_status)
+#endif /* _UAPI_QCEDEV__H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/qcota.h	2019-01-22 16:16:28.583292409 +0100
@@ -0,0 +1,210 @@
+#ifndef _UAPI_QCOTA_H
+#define _UAPI_QCOTA_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define QCE_OTA_MAX_BEARER   31
+#define OTA_KEY_SIZE 16   /* 128 bits of keys. */
+
+enum qce_ota_dir_enum {
+	QCE_OTA_DIR_UPLINK   = 0,
+	QCE_OTA_DIR_DOWNLINK = 1,
+	QCE_OTA_DIR_LAST
+};
+
+enum qce_ota_algo_enum {
+	QCE_OTA_ALGO_KASUMI = 0,
+	QCE_OTA_ALGO_SNOW3G = 1,
+	QCE_OTA_ALGO_LAST
+};
+
+/**
+ * struct qce_f8_req - qce f8 request
+ * @data_in:	packets input data stream to be ciphered.
+ *		If NULL, streaming mode operation.
+ * @data_out:	ciphered packets output data.
+ * @data_len:	length of data_in and data_out in bytes.
+ * @count_c:	count-C, ciphering sequence number, 32 bit
+ * @bearer:	5 bit of radio bearer identifier.
+ * @ckey:	128 bits of confidentiality key,
+ *		ckey[0] bit 127-120, ckey[1] bit 119-112,.., ckey[15] bit 7-0.
+ * @direction:	uplink or donwlink.
+ * @algorithm:	Kasumi, or Snow3G.
+ *
+ * If data_in is NULL, the engine will run in a special mode called
+ * key stream mode. In this special mode, the engine will generate
+ * key stream output for the number of bytes specified in the
+ * data_len, based on the input parameters of direction, algorithm,
+ * ckey, bearer, and count_c. The data_len is restricted to
+ * the length of multiple of 16 bytes.  Application can then take the
+ * output stream, do a exclusive or to the input data stream, and
+ * generate the final cipher data stream.
+ */
+struct qce_f8_req {
+	uint8_t  *data_in;
+	uint8_t  *data_out;
+	uint16_t  data_len;
+	uint32_t  count_c;
+	uint8_t   bearer;
+	uint8_t   ckey[OTA_KEY_SIZE];
+	enum qce_ota_dir_enum  direction;
+	enum qce_ota_algo_enum algorithm;
+};
+
+/**
+ * struct qce_f8_multi_pkt_req - qce f8 multiple packet request
+ *			Muliptle packets with uniform size, and
+ *			F8 ciphering parameters can be ciphered in a
+ *			single request.
+ *
+ * @num_pkt:		number of packets.
+ *
+ * @cipher_start:	ciphering starts offset within a packet.
+ *
+ * @cipher_size:	number of bytes to be ciphered within a packet.
+ *
+ * @qce_f8_req:		description of the packet and F8 parameters.
+ *			The following fields have special meaning for
+ *			multiple packet operation,
+ *
+ *	@data_len:	data_len indicates the length of a packet.
+ *
+ *	@data_in:	packets are concatenated together in a byte
+ *			stream started at data_in.
+ *
+ *	@data_out:	The returned ciphered output for multiple
+ *			packets.
+ *			Each packet ciphered output are concatenated
+ *			together into a byte stream started at data_out.
+ *			Note, each ciphered packet output area from
+ *			offset 0 to cipher_start-1, and from offset
+ *			cipher_size to data_len -1 are remained
+ *			unaltered from packet input area.
+ *	@count_c:	count-C of the first packet, 32 bit.
+ *
+ *
+ *   In one request, multiple packets can be ciphered, and output to the
+ *   data_out stream.
+ *
+ *   Packet data are layed out contiguously in sequence in data_in,
+ *   and data_out area. Every packet is identical size.
+ *   If the PDU is not byte aligned, set the data_len value of
+ *   to the rounded up value of the packet size. Eg, PDU size of
+ *   253 bits, set the packet size to 32 bytes. Next packet starts on
+ *   the next byte boundary.
+ *
+ *   For each packet, data from offset 0 to cipher_start
+ *   will be left unchanged and output to the data_out area.
+ *   This area of the packet can be for the RLC header, which is not
+ *   to be ciphered.
+ *
+ *   The ciphering of a packet starts from offset cipher_start, for
+ *   cipher_size bytes of data. Data starting from
+ *   offset cipher_start + cipher_size to the end of packet will be left
+ *   unchanged and output to the dataOut area.
+ *
+ *   For each packet the input arguments of bearer, direction,
+ *   ckey, algoritm have to be the same. count_c is the ciphering sequence
+ *   number of the first packet. The 2nd packet's ciphering sequence
+ *   number is assumed to be count_c + 1. The 3rd packet's ciphering sequence
+ *   number is count_c + 2.....
+ *
+ */
+struct qce_f8_multi_pkt_req {
+	uint16_t    num_pkt;
+	uint16_t    cipher_start;
+	uint16_t    cipher_size;
+	struct qce_f8_req qce_f8_req;
+};
+
+/**
+ * struct qce_f8_variable_multi_pkt_req - qce f8 multiple packet request
+ *                      Muliptle packets with variable size, and
+ *                      F8 ciphering parameters can be ciphered in a
+ *                      single request.
+ *
+ * @num_pkt:            number of packets.
+ *
+ * @cipher_iov[]:       array of iov of packets to be ciphered.
+ *
+ *
+ * @qce_f8_req:         description of the packet and F8 parameters.
+ *                      The following fields have special meaning for
+ *                      multiple packet operation,
+ *
+ *      @data_len:      ignored.
+ *
+ *      @data_in:       ignored.
+ *
+ *      @data_out:      ignored.
+ *
+ *      @count_c:       count-C of the first packet, 32 bit.
+ *
+ *
+ *   In one request, multiple packets can be ciphered.
+ *
+ *   The i-th packet are defined in cipher_iov[i-1].
+ *   The ciphering of i-th packet starts from offset 0 of the PDU specified
+ *   by cipher_iov[i-1].addr, for cipher_iov[i-1].size bytes of data.
+ *   If the PDU is not byte aligned, set the cipher_iov[i-1].size value
+ *   to the rounded up value of the packet size. Eg, PDU size of
+ *   253 bits, set the packet size to 32 bytes.
+ *
+ *   Ciphering are done in place. That is, the ciphering
+ *   input and output data are both in cipher_iov[i-1].addr for the i-th
+ *   packet.
+ *
+ *   For each packet the input arguments of bearer, direction,
+ *   ckey, algoritm have to be the same. count_c is the ciphering sequence
+ *   number of the first packet. The 2nd packet's ciphering sequence
+ *   number is assumed to be count_c + 1. The 3rd packet's ciphering sequence
+ *   number is count_c + 2.....
+ */
+
+#define MAX_NUM_V_MULTI_PKT 20
+struct cipher_iov {
+	unsigned char  *addr;
+	unsigned short  size;
+};
+
+struct qce_f8_varible_multi_pkt_req {
+	unsigned short    num_pkt;
+	struct cipher_iov cipher_iov[MAX_NUM_V_MULTI_PKT];
+	struct qce_f8_req qce_f8_req;
+};
+
+/**
+ * struct qce_f9_req - qce f9 request
+ * @message:	message
+ * @msize:	message size in bytes (include the last partial byte).
+ * @last_bits:	valid bits in the last byte of message.
+ * @mac_i:	32 bit message authentication code, to be returned.
+ * @fresh:	random 32 bit number, one per user.
+ * @count_i:	32 bit count-I integrity sequence number.
+ * @direction:	uplink or donwlink.
+ * @ikey:	128 bits of integrity key,
+ *		ikey[0] bit 127-120, ikey[1] bit 119-112,.., ikey[15] bit 7-0.
+ * @algorithm:  Kasumi, or Snow3G.
+ */
+struct qce_f9_req {
+	uint8_t   *message;
+	uint16_t   msize;
+	uint8_t    last_bits;
+	uint32_t   mac_i;
+	uint32_t   fresh;
+	uint32_t   count_i;
+	enum qce_ota_dir_enum direction;
+	uint8_t    ikey[OTA_KEY_SIZE];
+	enum qce_ota_algo_enum algorithm;
+};
+
+#define QCOTA_IOC_MAGIC     0x85
+
+#define QCOTA_F8_REQ _IOWR(QCOTA_IOC_MAGIC, 1, struct qce_f8_req)
+#define QCOTA_F8_MPKT_REQ _IOWR(QCOTA_IOC_MAGIC, 2, struct qce_f8_multi_pkt_req)
+#define QCOTA_F9_REQ _IOWR(QCOTA_IOC_MAGIC, 3, struct qce_f9_req)
+#define QCOTA_F8_V_MPKT_REQ _IOWR(QCOTA_IOC_MAGIC, 4,\
+				struct qce_f8_varible_multi_pkt_req)
+
+#endif /* _UAPI_QCOTA_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/qrng.h	2019-01-22 16:16:28.583292409 +0100
@@ -0,0 +1,12 @@
+#ifndef _UAPI_QRNG_H_
+#define _UAPI_QRNG_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define QRNG_IOC_MAGIC    0x100
+
+#define QRNG_IOCTL_RESET_BUS_BANDWIDTH\
+	_IO(QRNG_IOC_MAGIC, 1)
+
+#endif /* _UAPI_QRNG_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/qseecom.h	2019-01-22 16:16:28.583292409 +0100
@@ -0,0 +1,386 @@
+#ifndef _UAPI_QSEECOM_H_
+#define _UAPI_QSEECOM_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define MAX_ION_FD  4
+#define MAX_APP_NAME_SIZE  64
+#define QSEECOM_HASH_SIZE  32
+/*
+ * struct qseecom_register_listener_req -
+ *      for register listener ioctl request
+ * @listener_id - service id (shared between userspace and QSE)
+ * @ifd_data_fd - ion handle
+ * @virt_sb_base - shared buffer base in user space
+ * @sb_size - shared buffer size
+ */
+struct qseecom_register_listener_req {
+	uint32_t listener_id; /* in */
+	int32_t ifd_data_fd; /* in */
+	void *virt_sb_base; /* in */
+	uint32_t sb_size; /* in */
+};
+
+/*
+ * struct qseecom_send_cmd_req - for send command ioctl request
+ * @cmd_req_len - command buffer length
+ * @cmd_req_buf - command buffer
+ * @resp_len - response buffer length
+ * @resp_buf - response buffer
+ */
+struct qseecom_send_cmd_req {
+	void *cmd_req_buf; /* in */
+	unsigned int cmd_req_len; /* in */
+	void *resp_buf; /* in/out */
+	unsigned int resp_len; /* in/out */
+};
+
+/*
+ * struct qseecom_ion_fd_info - ion fd handle data information
+ * @fd - ion handle to some memory allocated in user space
+ * @cmd_buf_offset - command buffer offset
+ */
+struct qseecom_ion_fd_info {
+	int32_t fd;
+	uint32_t cmd_buf_offset;
+};
+/*
+ * struct qseecom_send_modfd_cmd_req - for send command ioctl request
+ * @cmd_req_len - command buffer length
+ * @cmd_req_buf - command buffer
+ * @resp_len - response buffer length
+ * @resp_buf - response buffer
+ * @ifd_data_fd - ion handle to memory allocated in user space
+ * @cmd_buf_offset - command buffer offset
+ */
+struct qseecom_send_modfd_cmd_req {
+	void *cmd_req_buf; /* in */
+	unsigned int cmd_req_len; /* in */
+	void *resp_buf; /* in/out */
+	unsigned int resp_len; /* in/out */
+	struct qseecom_ion_fd_info ifd_data[MAX_ION_FD];
+};
+
+/*
+ * struct qseecom_listener_send_resp_req - signal to continue the send_cmd req.
+ * Used as a trigger from HLOS service to notify QSEECOM that it's done with its
+ * operation and provide the response for QSEECOM can continue the incomplete
+ * command execution
+ * @resp_len - Length of the response
+ * @resp_buf - Response buffer where the response of the cmd should go.
+ */
+struct qseecom_send_resp_req {
+	void *resp_buf; /* in */
+	unsigned int resp_len; /* in */
+};
+
+/*
+ * struct qseecom_load_img_data - for sending image length information and
+ * ion file descriptor to the qseecom driver. ion file descriptor is used
+ * for retrieving the ion file handle and in turn the physical address of
+ * the image location.
+ * @mdt_len - Length of the .mdt file in bytes.
+ * @img_len - Length of the .mdt + .b00 +..+.bxx images files in bytes
+ * @ion_fd - Ion file descriptor used when allocating memory.
+ * @img_name - Name of the image.
+ * @app_arch - Architecture of the image, i.e. 32bit or 64bit app
+*/
+struct qseecom_load_img_req {
+	uint32_t mdt_len; /* in */
+	uint32_t img_len; /* in */
+	int32_t  ifd_data_fd; /* in */
+	char	 img_name[MAX_APP_NAME_SIZE]; /* in */
+	uint32_t app_arch; /* in */
+	uint32_t app_id; /* out*/
+};
+
+struct qseecom_set_sb_mem_param_req {
+	int32_t ifd_data_fd; /* in */
+	void *virt_sb_base; /* in */
+	uint32_t sb_len; /* in */
+};
+
+/*
+ * struct qseecom_qseos_version_req - get qseos version
+ * @qseos_version - version number
+ */
+struct qseecom_qseos_version_req {
+	unsigned int qseos_version; /* in */
+};
+
+/*
+ * struct qseecom_qseos_app_load_query - verify if app is loaded in qsee
+ * @app_name[MAX_APP_NAME_SIZE]-  name of the app.
+ * @app_id - app id.
+ */
+struct qseecom_qseos_app_load_query {
+	char app_name[MAX_APP_NAME_SIZE]; /* in */
+	uint32_t app_id; /* out */
+	uint32_t app_arch;
+};
+
+struct qseecom_send_svc_cmd_req {
+	uint32_t cmd_id;
+	void *cmd_req_buf; /* in */
+	unsigned int cmd_req_len; /* in */
+	void *resp_buf; /* in/out */
+	unsigned int resp_len; /* in/out */
+};
+
+enum qseecom_key_management_usage_type {
+	QSEOS_KM_USAGE_DISK_ENCRYPTION = 0x01,
+	QSEOS_KM_USAGE_FILE_ENCRYPTION = 0x02,
+	QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION = 0x03,
+	QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION = 0x04,
+	QSEOS_KM_USAGE_MAX
+};
+
+struct qseecom_create_key_req {
+	unsigned char hash32[QSEECOM_HASH_SIZE];
+	enum qseecom_key_management_usage_type usage;
+};
+
+struct qseecom_wipe_key_req {
+	enum qseecom_key_management_usage_type usage;
+	int wipe_key_flag;/* 1->remove key from storage(alone with clear key) */
+			  /* 0->do not remove from storage (clear key) */
+};
+
+struct qseecom_update_key_userinfo_req {
+	unsigned char current_hash32[QSEECOM_HASH_SIZE];
+	unsigned char new_hash32[QSEECOM_HASH_SIZE];
+	enum qseecom_key_management_usage_type usage;
+};
+
+#define SHA256_DIGEST_LENGTH	(256/8)
+/*
+ * struct qseecom_save_partition_hash_req
+ * @partition_id - partition id.
+ * @hash[SHA256_DIGEST_LENGTH] -  sha256 digest.
+ */
+struct qseecom_save_partition_hash_req {
+	int partition_id; /* in */
+	char digest[SHA256_DIGEST_LENGTH]; /* in */
+};
+
+/*
+ * struct qseecom_is_es_activated_req
+ * @is_activated - 1=true , 0=false
+ */
+struct qseecom_is_es_activated_req {
+	int is_activated; /* out */
+};
+
+/*
+ * struct qseecom_mdtp_cipher_dip_req
+ * @in_buf - input buffer
+ * @in_buf_size - input buffer size
+ * @out_buf - output buffer
+ * @out_buf_size - output buffer size
+ * @direction - 0=encrypt, 1=decrypt
+ */
+struct qseecom_mdtp_cipher_dip_req {
+	uint8_t *in_buf;
+	uint32_t in_buf_size;
+	uint8_t *out_buf;
+	uint32_t out_buf_size;
+	uint32_t direction;
+};
+
+enum qseecom_bandwidth_request_mode {
+	INACTIVE = 0,
+	LOW,
+	MEDIUM,
+	HIGH,
+};
+
+/*
+ * struct qseecom_send_modfd_resp - for send command ioctl request
+ * @req_len - command buffer length
+ * @req_buf - command buffer
+ * @ifd_data_fd - ion handle to memory allocated in user space
+ * @cmd_buf_offset - command buffer offset
+ */
+struct qseecom_send_modfd_listener_resp {
+	void *resp_buf_ptr; /* in */
+	unsigned int resp_len; /* in */
+	struct qseecom_ion_fd_info ifd_data[MAX_ION_FD]; /* in */
+};
+
+struct qseecom_qteec_req {
+	void    *req_ptr;
+	uint32_t    req_len;
+	void    *resp_ptr;
+	uint32_t    resp_len;
+};
+
+struct qseecom_qteec_modfd_req {
+	void    *req_ptr;
+	uint32_t    req_len;
+	void    *resp_ptr;
+	uint32_t    resp_len;
+	struct qseecom_ion_fd_info ifd_data[MAX_ION_FD];
+};
+
+struct qseecom_sg_entry {
+	uint32_t phys_addr;
+	uint32_t len;
+};
+
+struct qseecom_sg_entry_64bit {
+	uint64_t phys_addr;
+	uint32_t len;
+} __attribute__ ((packed));
+
+/*
+ * sg list buf format version
+ * 1: Legacy format to support only 512 SG list entries
+ * 2: new format to support > 512 entries
+ */
+#define QSEECOM_SG_LIST_BUF_FORMAT_VERSION_1	1
+#define QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2	2
+
+struct qseecom_sg_list_buf_hdr_64bit {
+	struct qseecom_sg_entry_64bit  blank_entry;	/* must be all 0 */
+	uint32_t version;		/* sg list buf format version */
+	uint64_t new_buf_phys_addr;	/* PA of new buffer */
+	uint32_t nents_total;		/* Total number of SG entries */
+} __attribute__ ((packed));
+
+#define QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT	\
+			sizeof(struct qseecom_sg_list_buf_hdr_64bit)
+
+#define MAX_CE_PIPE_PAIR_PER_UNIT 3
+#define INVALID_CE_INFO_UNIT_NUM 0xffffffff
+
+#define CE_PIPE_PAIR_USE_TYPE_FDE 0
+#define CE_PIPE_PAIR_USE_TYPE_PFE 1
+
+struct qseecom_ce_pipe_entry {
+	int valid;
+	unsigned int ce_num;
+	unsigned int ce_pipe_pair;
+};
+
+#define MAX_CE_INFO_HANDLE_SIZE 32
+struct qseecom_ce_info_req {
+	unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
+	unsigned int usage;
+	unsigned int unit_num;
+	unsigned int num_ce_pipe_entries;
+	struct qseecom_ce_pipe_entry ce_pipe_entry[MAX_CE_PIPE_PAIR_PER_UNIT];
+};
+
+#define SG_ENTRY_SZ		sizeof(struct qseecom_sg_entry)
+#define SG_ENTRY_SZ_64BIT	sizeof(struct qseecom_sg_entry_64bit)
+
+struct file;
+
+extern long qseecom_ioctl(struct file *file,
+					unsigned cmd, unsigned long arg);
+
+#define QSEECOM_IOC_MAGIC    0x97
+
+
+#define QSEECOM_IOCTL_REGISTER_LISTENER_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 1, struct qseecom_register_listener_req)
+
+#define QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 2)
+
+#define QSEECOM_IOCTL_SEND_CMD_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 3, struct qseecom_send_cmd_req)
+
+#define QSEECOM_IOCTL_SEND_MODFD_CMD_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 4, struct qseecom_send_modfd_cmd_req)
+
+#define QSEECOM_IOCTL_RECEIVE_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 5)
+
+#define QSEECOM_IOCTL_SEND_RESP_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 6)
+
+#define QSEECOM_IOCTL_LOAD_APP_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 7, struct qseecom_load_img_req)
+
+#define QSEECOM_IOCTL_SET_MEM_PARAM_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 8, struct qseecom_set_sb_mem_param_req)
+
+#define QSEECOM_IOCTL_UNLOAD_APP_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 9)
+
+#define QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 10, struct qseecom_qseos_version_req)
+
+#define QSEECOM_IOCTL_PERF_ENABLE_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 11)
+
+#define QSEECOM_IOCTL_PERF_DISABLE_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 12)
+
+#define QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 13, struct qseecom_load_img_req)
+
+#define QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 14)
+
+#define QSEECOM_IOCTL_APP_LOADED_QUERY_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 15, struct qseecom_qseos_app_load_query)
+
+#define QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 16, struct qseecom_send_svc_cmd_req)
+
+#define QSEECOM_IOCTL_CREATE_KEY_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 17, struct qseecom_create_key_req)
+
+#define QSEECOM_IOCTL_WIPE_KEY_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 18, struct qseecom_wipe_key_req)
+
+#define QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 19, struct qseecom_save_partition_hash_req)
+
+#define QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 20, struct qseecom_is_es_activated_req)
+
+#define QSEECOM_IOCTL_SEND_MODFD_RESP \
+	_IOWR(QSEECOM_IOC_MAGIC, 21, struct qseecom_send_modfd_listener_resp)
+
+#define QSEECOM_IOCTL_SET_BUS_SCALING_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 23, int)
+
+#define QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 24, struct qseecom_update_key_userinfo_req)
+
+#define QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 30, struct qseecom_qteec_modfd_req)
+
+#define QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 31, struct qseecom_qteec_req)
+
+#define QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 32, struct qseecom_qteec_modfd_req)
+
+#define QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 33, struct qseecom_qteec_modfd_req)
+
+#define QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 34, struct qseecom_mdtp_cipher_dip_req)
+
+#define QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 35, struct qseecom_send_modfd_cmd_req)
+
+#define QSEECOM_IOCTL_SEND_MODFD_RESP_64 \
+	_IOWR(QSEECOM_IOC_MAGIC, 36, struct qseecom_send_modfd_listener_resp)
+
+#define QSEECOM_IOCTL_GET_CE_PIPE_INFO \
+	_IOWR(QSEECOM_IOC_MAGIC, 40, struct qseecom_ce_info_req)
+
+#define QSEECOM_IOCTL_FREE_CE_PIPE_INFO \
+	_IOWR(QSEECOM_IOC_MAGIC, 41, struct qseecom_ce_info_req)
+
+#define QSEECOM_IOCTL_QUERY_CE_PIPE_INFO \
+	_IOWR(QSEECOM_IOC_MAGIC, 42, struct qseecom_ce_info_req)
+
+
+#endif /* _UAPI_QSEECOM_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/rmnet_data.h	2019-01-22 16:16:28.587292445 +0100
@@ -0,0 +1,264 @@
+ /*
+ * Copyright (c) 2013-2015, 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data configuration specification
+ */
+
+#ifndef _RMNET_DATA_H_
+#define _RMNET_DATA_H_
+
+/* ***************** Constants ********************************************** */
+#define RMNET_LOCAL_LOGICAL_ENDPOINT -1
+
+#define RMNET_EGRESS_FORMAT__RESERVED__         (1<<0)
+#define RMNET_EGRESS_FORMAT_MAP                 (1<<1)
+#define RMNET_EGRESS_FORMAT_AGGREGATION         (1<<2)
+#define RMNET_EGRESS_FORMAT_MUXING              (1<<3)
+#define RMNET_EGRESS_FORMAT_MAP_CKSUMV3         (1<<4)
+#define RMNET_EGRESS_FORMAT_MAP_CKSUMV4         (1<<5)
+
+#define RMNET_INGRESS_FIX_ETHERNET              (1<<0)
+#define RMNET_INGRESS_FORMAT_MAP                (1<<1)
+#define RMNET_INGRESS_FORMAT_DEAGGREGATION      (1<<2)
+#define RMNET_INGRESS_FORMAT_DEMUXING           (1<<3)
+#define RMNET_INGRESS_FORMAT_MAP_COMMANDS       (1<<4)
+#define RMNET_INGRESS_FORMAT_MAP_CKSUMV3        (1<<5)
+#define RMNET_INGRESS_FORMAT_MAP_CKSUMV4        (1<<6)
+
+/* ***************** Netlink API ******************************************** */
+#define RMNET_NETLINK_PROTO 31
+#define RMNET_MAX_STR_LEN  16
+#define RMNET_NL_DATA_MAX_LEN 64
+
+#define RMNET_NETLINK_MSG_COMMAND    0
+#define RMNET_NETLINK_MSG_RETURNCODE 1
+#define RMNET_NETLINK_MSG_RETURNDATA 2
+
+struct rmnet_nl_msg_s {
+	uint16_t reserved;
+	uint16_t message_type;
+	uint16_t reserved2:14;
+	uint16_t crd:2;
+	union {
+		uint16_t arg_length;
+		uint16_t return_code;
+	};
+	union {
+		uint8_t data[RMNET_NL_DATA_MAX_LEN];
+		struct {
+			uint8_t  dev[RMNET_MAX_STR_LEN];
+			uint32_t flags;
+			uint16_t agg_size;
+			uint16_t agg_count;
+			uint8_t  tail_spacing;
+		} data_format;
+		struct {
+			uint8_t dev[RMNET_MAX_STR_LEN];
+			int32_t ep_id;
+			uint8_t operating_mode;
+			uint8_t next_dev[RMNET_MAX_STR_LEN];
+		} local_ep_config;
+		struct {
+			uint32_t id;
+			uint8_t  vnd_name[RMNET_MAX_STR_LEN];
+		} vnd;
+		struct {
+			uint32_t id;
+			uint32_t map_flow_id;
+			uint32_t tc_flow_id;
+		} flow_control;
+	};
+};
+
+enum rmnet_netlink_message_types_e {
+	/*
+	 * RMNET_NETLINK_ASSOCIATE_NETWORK_DEVICE - Register RMNET data driver
+	 *                                          on a particular device.
+	 * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+	 * Returns: status code
+	 */
+	RMNET_NETLINK_ASSOCIATE_NETWORK_DEVICE,
+
+	/*
+	 * RMNET_NETLINK_UNASSOCIATE_NETWORK_DEVICE - Unregister RMNET data
+	 *                                            driver on a particular
+	 *                                            device.
+	 * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+	 * Returns: status code
+	 */
+	RMNET_NETLINK_UNASSOCIATE_NETWORK_DEVICE,
+
+	/*
+	 * RMNET_NETLINK_GET_NETWORK_DEVICE_ASSOCIATED - Get if RMNET data
+	 *                                            driver is registered on a
+	 *                                            particular device.
+	 * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+	 * Returns: 1 if registered, 0 if not
+	 */
+	RMNET_NETLINK_GET_NETWORK_DEVICE_ASSOCIATED,
+
+	/*
+	 * RMNET_NETLINK_SET_LINK_EGRESS_DATA_FORMAT - Sets the egress data
+	 *                                             format for a particular
+	 *                                             link.
+	 * Args: uint32_t egress_flags
+	 *       char[] dev_name: Null terminated ASCII string, max length: 15
+	 * Returns: status code
+	 */
+	RMNET_NETLINK_SET_LINK_EGRESS_DATA_FORMAT,
+
+	/*
+	 * RMNET_NETLINK_GET_LINK_EGRESS_DATA_FORMAT - Gets the egress data
+	 *                                             format for a particular
+	 *                                             link.
+	 * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+	 * Returns: 4-bytes data: uint32_t egress_flags
+	 */
+	RMNET_NETLINK_GET_LINK_EGRESS_DATA_FORMAT,
+
+	/*
+	 * RMNET_NETLINK_SET_LINK_INGRESS_DATA_FORMAT - Sets the ingress data
+	 *                                              format for a particular
+	 *                                              link.
+	 * Args: uint32_t ingress_flags
+	 *       char[] dev_name: Null terminated ASCII string, max length: 15
+	 * Returns: status code
+	 */
+	RMNET_NETLINK_SET_LINK_INGRESS_DATA_FORMAT,
+
+	/*
+	 * RMNET_NETLINK_GET_LINK_INGRESS_DATA_FORMAT - Gets the ingress data
+	 *                                              format for a particular
+	 *                                              link.
+	 * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+	 * Returns: 4-bytes data: uint32_t ingress_flags
+	 */
+	RMNET_NETLINK_GET_LINK_INGRESS_DATA_FORMAT,
+
+	/*
+	 * RMNET_NETLINK_SET_LOGICAL_EP_CONFIG - Sets the logical endpoint
+	 *                                       configuration for a particular
+	 *                                       link.
+	 * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+	 *     int32_t logical_ep_id, valid values are -1 through 31
+	 *     uint8_t rmnet_mode: one of none, vnd, bridged
+	 *     char[] egress_dev_name: Egress device if operating in bridge mode
+	 * Returns: status code
+	 */
+	RMNET_NETLINK_SET_LOGICAL_EP_CONFIG,
+
+	/*
+	 * RMNET_NETLINK_UNSET_LOGICAL_EP_CONFIG - Un-sets the logical endpoint
+	 *                                       configuration for a particular
+	 *                                       link.
+	 * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+	 *       int32_t logical_ep_id, valid values are -1 through 31
+	 * Returns: status code
+	 */
+	RMNET_NETLINK_UNSET_LOGICAL_EP_CONFIG,
+
+	/*
+	 * RMNET_NETLINK_GET_LOGICAL_EP_CONFIG - Gets the logical endpoint
+	 *                                       configuration for a particular
+	 *                                       link.
+	 * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+	 *        int32_t logical_ep_id, valid values are -1 through 31
+	 * Returns: uint8_t rmnet_mode: one of none, vnd, bridged
+	 * char[] egress_dev_name: Egress device
+	 */
+	RMNET_NETLINK_GET_LOGICAL_EP_CONFIG,
+
+	/*
+	 * RMNET_NETLINK_NEW_VND - Creates a new virtual network device node
+	 * Args: int32_t node number
+	 * Returns: status code
+	 */
+	RMNET_NETLINK_NEW_VND,
+
+	/*
+	 * RMNET_NETLINK_NEW_VND_WITH_PREFIX - Creates a new virtual network
+	 *                                     device node with the specified
+	 *                                     prefix for the device name
+	 * Args: int32_t node number
+	 *       char[] vnd_name - Use as prefix
+	 * Returns: status code
+	 */
+	RMNET_NETLINK_NEW_VND_WITH_PREFIX,
+
+	/*
+	 * RMNET_NETLINK_GET_VND_NAME - Gets the string name of a VND from ID
+	 * Args: int32_t node number
+	 * Returns: char[] vnd_name
+	 */
+	RMNET_NETLINK_GET_VND_NAME,
+
+	/*
+	 * RMNET_NETLINK_FREE_VND - Removes virtual network device node
+	 * Args: int32_t node number
+	 * Returns: status code
+	 */
+	RMNET_NETLINK_FREE_VND,
+
+	/*
+	 * RMNET_NETLINK_ADD_VND_TC_FLOW - Add flow control handle on VND
+	 * Args: int32_t node number
+	 *       uint32_t MAP Flow Handle
+	 *       uint32_t TC Flow Handle
+	 * Returns: status code
+	 */
+	RMNET_NETLINK_ADD_VND_TC_FLOW,
+
+	/*
+	 * RMNET_NETLINK_DEL_VND_TC_FLOW - Removes flow control handle on VND
+	 * Args: int32_t node number
+	 *       uint32_t MAP Flow Handle
+	 * Returns: status code
+	 */
+	RMNET_NETLINK_DEL_VND_TC_FLOW,
+
+	/*
+	 * RMNET_NETLINK_NEW_VND_WITH_NAME - Creates a new virtual network
+	 *                                   device node with the specified
+	 *                                   device name
+	 * Args: int32_t node number
+	 *       char[] vnd_name - Use as name
+	 * Returns: status code
+	 */
+	RMNET_NETLINK_NEW_VND_WITH_NAME
+};
+#define RMNET_NETLINK_NEW_VND_WITH_NAME RMNET_NETLINK_NEW_VND_WITH_NAME
+
+enum rmnet_config_endpoint_modes_e {
+	/* Pass the frame up the stack with no modifications to skb->dev      */
+	RMNET_EPMODE_NONE,
+	/* Replace skb->dev to a virtual rmnet device and pass up the stack   */
+	RMNET_EPMODE_VND,
+	/* Pass the frame directly to another device with dev_queue_xmit().   */
+	RMNET_EPMODE_BRIDGE,
+	/* Must be the last item in the list                                  */
+	RMNET_EPMODE_LENGTH
+};
+
+enum rmnet_config_return_codes_e {
+	RMNET_CONFIG_OK,
+	RMNET_CONFIG_UNKNOWN_MESSAGE,
+	RMNET_CONFIG_UNKNOWN_ERROR,
+	RMNET_CONFIG_NOMEM,
+	RMNET_CONFIG_DEVICE_IN_USE,
+	RMNET_CONFIG_INVALID_REQUEST,
+	RMNET_CONFIG_NO_SUCH_DEVICE,
+	RMNET_CONFIG_BAD_ARGUMENTS,
+	RMNET_CONFIG_BAD_EGRESS_DEVICE,
+	RMNET_CONFIG_TC_HANDLE_FULL
+};
+
+#endif /* _RMNET_DATA_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/rmnet.h	2019-01-22 16:16:28.587292445 +0100
@@ -0,0 +1,213 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data configuration specification
+ */
+
+#ifndef _RMNET_DATA_H_
+#define _RMNET_DATA_H_
+
+/* Netlink API */
+#define RMNET_NETLINK_PROTO 31
+#define RMNET_MAX_STR_LEN  16
+#define RMNET_NL_DATA_MAX_LEN 64
+
+#define RMNET_NETLINK_MSG_COMMAND    0
+#define RMNET_NETLINK_MSG_RETURNCODE 1
+#define RMNET_NETLINK_MSG_RETURNDATA 2
+
+/* Constants */
+#define RMNET_EGRESS_FORMAT__RESERVED__         (1<<0)
+#define RMNET_EGRESS_FORMAT_MAP                 (1<<1)
+#define RMNET_EGRESS_FORMAT_AGGREGATION         (1<<2)
+#define RMNET_EGRESS_FORMAT_MUXING              (1<<3)
+#define RMNET_EGRESS_FORMAT_MAP_CKSUMV3         (1<<4)
+#define RMNET_EGRESS_FORMAT_MAP_CKSUMV4         (1<<5)
+
+#define RMNET_INGRESS_FIX_ETHERNET              (1<<0)
+#define RMNET_INGRESS_FORMAT_MAP                (1<<1)
+#define RMNET_INGRESS_FORMAT_DEAGGREGATION      (1<<2)
+#define RMNET_INGRESS_FORMAT_DEMUXING           (1<<3)
+#define RMNET_INGRESS_FORMAT_MAP_COMMANDS       (1<<4)
+#define RMNET_INGRESS_FORMAT_MAP_CKSUMV3        (1<<5)
+#define RMNET_INGRESS_FORMAT_MAP_CKSUMV4        (1<<6)
+
+struct rmnet_nl_msg_s {
+	__be16 reserved;
+	__be16 message_type;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u16   crd:2,
+		reserved2:14;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+	__u16   reserved2:14,
+		crd:2;
+#endif
+	union {
+		__be16 arg_length;
+		__be16 return_code;
+	};
+	union {
+		__u8 data[RMNET_NL_DATA_MAX_LEN];
+		struct {
+			__u8   dev[RMNET_MAX_STR_LEN];
+			__be32 flags;
+			__be16 agg_size;
+			__be16 agg_count;
+			__u8   tail_spacing;
+		} data_format;
+		struct {
+			__u8  dev[RMNET_MAX_STR_LEN];
+			__be32 ep_id;
+			__u8  operating_mode;
+			__u8  next_dev[RMNET_MAX_STR_LEN];
+		} local_ep_config;
+		struct {
+			__be32 id;
+			__u8   vnd_name[RMNET_MAX_STR_LEN];
+		} vnd;
+	};
+};
+
+/* RMNET_NETLINK_ASSOCIATE_NETWORK_DEVICE - Register RMNET data driver
+ *                                          on a particular device.
+ * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+ * Returns: status code
+ */
+#define RMNET_NETLINK_ASSOCIATE_NETWORK_DEVICE 0
+
+/* RMNET_NETLINK_UNASSOCIATE_NETWORK_DEVICE - Unregister RMNET data
+ *                                            driver on a particular
+ *                                            device.
+ * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+ * Returns: status code
+ */
+#define RMNET_NETLINK_UNASSOCIATE_NETWORK_DEVICE 1
+
+/* RMNET_NETLINK_GET_NETWORK_DEVICE_ASSOCIATED - Get if RMNET data
+ *                                            driver is registered on a
+ *                                            particular device.
+ * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+ * Returns: 1 if registered, 0 if not
+ */
+#define RMNET_NETLINK_GET_NETWORK_DEVICE_ASSOCIATED 2
+
+/* RMNET_NETLINK_SET_LINK_EGRESS_DATA_FORMAT - Sets the egress data
+ *                                             format for a particular
+ *                                             link.
+ * Args: __be32 egress_flags
+ *       char[] dev_name: Null terminated ASCII string, max length: 15
+ * Returns: status code
+ */
+#define RMNET_NETLINK_SET_LINK_EGRESS_DATA_FORMAT 3
+
+/* RMNET_NETLINK_GET_LINK_EGRESS_DATA_FORMAT - Gets the egress data
+ *                                             format for a particular
+ *                                             link.
+ * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+ * Returns: 4-bytes data: __be32 egress_flags
+ */
+#define RMNET_NETLINK_GET_LINK_EGRESS_DATA_FORMAT 4
+
+/* RMNET_NETLINK_SET_LINK_INGRESS_DATA_FORMAT - Sets the ingress data
+ *                                              format for a particular
+ *                                              link.
+ * Args: __be32 ingress_flags
+ *       char[] dev_name: Null terminated ASCII string, max length: 15
+ * Returns: status code
+ */
+#define RMNET_NETLINK_SET_LINK_INGRESS_DATA_FORMAT 5
+
+/* RMNET_NETLINK_GET_LINK_INGRESS_DATA_FORMAT - Gets the ingress data
+ *                                              format for a particular
+ *                                              link.
+ * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+ * Returns: 4-bytes data: __be32 ingress_flags
+ */
+#define RMNET_NETLINK_GET_LINK_INGRESS_DATA_FORMAT 6
+
+/* RMNET_NETLINK_SET_LOGICAL_EP_CONFIG - Sets the logical endpoint
+ *                                       configuration for a particular
+ *                                       link.
+ * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+ *     __be32 logical_ep_id, valid values are -1 through 31
+ *     __u8 rmnet_mode: one of none, vnd, bridged
+ *     char[] egress_dev_name: Egress device if operating in bridge mode
+ * Returns: status code
+ */
+#define RMNET_NETLINK_SET_LOGICAL_EP_CONFIG 7
+
+/* RMNET_NETLINK_UNSET_LOGICAL_EP_CONFIG - Un-sets the logical endpoint
+ *                                       configuration for a particular
+ *                                       link.
+ * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+ *       __be32 logical_ep_id, valid values are -1 through 31
+ * Returns: status code
+ */
+#define RMNET_NETLINK_UNSET_LOGICAL_EP_CONFIG 8
+
+/* RMNET_NETLINK_GET_LOGICAL_EP_CONFIG - Gets the logical endpoint
+ *                                       configuration for a particular
+ *                                       link.
+ * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+ *        __be32 logical_ep_id, valid values are -1 through 31
+ * Returns: __u8 rmnet_mode: one of none, vnd, bridged
+ * char[] egress_dev_name: Egress device
+ */
+#define RMNET_NETLINK_GET_LOGICAL_EP_CONFIG 9
+
+/* RMNET_NETLINK_NEW_VND - Creates a new virtual network device node
+ * Args: __be32 node number
+ * Returns: status code
+ */
+#define RMNET_NETLINK_NEW_VND 10
+
+/* RMNET_NETLINK_NEW_VND_WITH_PREFIX - Creates a new virtual network
+ *                                     device node with the specified
+ *                                     prefix for the device name
+ * Args: __be32 node number
+ *       char[] vnd_name - Use as prefix
+ * Returns: status code
+ */
+#define RMNET_NETLINK_NEW_VND_WITH_PREFIX 11
+
+/* RMNET_NETLINK_GET_VND_NAME - Gets the string name of a VND from ID
+ * Args: __be32 node number
+ * Returns: char[] vnd_name
+ */
+#define RMNET_NETLINK_GET_VND_NAME 12
+
+/* RMNET_NETLINK_FREE_VND - Removes virtual network device node
+ * Args: __be32 node number
+ * Returns: status code
+ */
+#define RMNET_NETLINK_FREE_VND 13
+
+/* Pass the frame up the stack with no modifications to skb->dev */
+#define RMNET_EPMODE_NONE 0
+/* Replace skb->dev to a virtual rmnet device and pass up the stack */
+#define RMNET_EPMODE_VND 1
+/* Pass the frame directly to another device with dev_queue_xmit(). */
+#define RMNET_EPMODE_BRIDGE 2
+/* Must be the last item in the list */
+#define RMNET_EPMODE_LENGTH 3
+
+#define RMNET_CONFIG_OK 0
+#define RMNET_CONFIG_UNKNOWN_MESSAGE 1
+#define RMNET_CONFIG_UNKNOWN_ERROR 2
+#define RMNET_CONFIG_NOMEM 3
+#define RMNET_CONFIG_DEVICE_IN_USE 4
+#define RMNET_CONFIG_INVALID_REQUEST 5
+#define RMNET_CONFIG_NO_SUCH_DEVICE 6
+#define RMNET_CONFIG_BAD_ARGUMENTS 7
+#define RMNET_CONFIG_BAD_EGRESS_DEVICE 8
+#define RMNET_CONFIG_TC_HANDLE_FULL 9
+
+#endif /* _RMNET_DATA_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/rmnet_ipa_fd_ioctl.h	2019-01-22 16:16:28.587292445 +0100
@@ -0,0 +1,253 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _RMNET_IPA_FD_IOCTL_H
+#define _RMNET_IPA_FD_IOCTL_H
+
+#include <linux/ioctl.h>
+#include <linux/ipa_qmi_service_v01.h>
+#include <linux/msm_ipa.h>
+
+/**
+ * unique magic number of the IPA_WAN device
+ */
+#define WAN_IOC_MAGIC 0x69
+
+#define WAN_IOCTL_ADD_FLT_RULE		0
+#define WAN_IOCTL_ADD_FLT_INDEX		1
+#define WAN_IOCTL_VOTE_FOR_BW_MBPS	2
+#define WAN_IOCTL_POLL_TETHERING_STATS  3
+#define WAN_IOCTL_SET_DATA_QUOTA        4
+#define WAN_IOCTL_SET_TETHER_CLIENT_PIPE 5
+#define WAN_IOCTL_QUERY_TETHER_STATS     6
+#define WAN_IOCTL_RESET_TETHER_STATS     7
+#define WAN_IOCTL_QUERY_DL_FILTER_STATS  8
+#define WAN_IOCTL_ADD_FLT_RULE_EX        9
+#define WAN_IOCTL_QUERY_TETHER_STATS_ALL  10
+#define WAN_IOCTL_ADD_UL_FLT_RULE          11
+#define WAN_IOCTL_ENABLE_PER_CLIENT_STATS    12
+#define WAN_IOCTL_QUERY_PER_CLIENT_STATS     13
+#define WAN_IOCTL_SET_LAN_CLIENT_INFO        14
+#define WAN_IOCTL_CLEAR_LAN_CLIENT_INFO      15
+#define WAN_IOCTL_SEND_LAN_CLIENT_MSG        16
+
+/* User space may not have this defined. */
+#ifndef IFNAMSIZ
+#define IFNAMSIZ 16
+#endif
+
+/**
+ * struct wan_ioctl_poll_tethering_stats - structure used for
+ *                                         WAN_IOCTL_POLL_TETHERING_STATS IOCTL.
+ *
+ * @polling_interval_secs: Polling interval in seconds.
+ * @reset_stats:           Indicate whether to reset the stats (use 1) or not.
+ *
+ * The structure to be used by the user space in order to request for the
+ * tethering stats to be polled. Setting the interval to 0 indicates to stop
+ * the polling process.
+ */
+struct wan_ioctl_poll_tethering_stats {
+	uint64_t polling_interval_secs;
+	uint8_t  reset_stats;
+};
+
+/**
+ * struct wan_ioctl_set_data_quota - structure used for
+ *                                   WAN_IOCTL_SET_DATA_QUOTA IOCTL.
+ *
+ * @interface_name:  Name of the interface on which to set the quota.
+ * @quota_mbytes:    Quota (in Mbytes) for the above interface.
+ * @set_quota:       Indicate whether to set the quota (use 1) or
+ *                   unset the quota.
+ *
+ * The structure to be used by the user space in order to request
+ * a quota to be set on a specific interface (by specifying its name).
+ */
+struct wan_ioctl_set_data_quota {
+	char     interface_name[IFNAMSIZ];
+	uint64_t quota_mbytes;
+	uint8_t  set_quota;
+};
+
+struct wan_ioctl_set_tether_client_pipe {
+	/* enum of tether interface */
+	enum ipacm_client_enum ipa_client;
+	uint8_t reset_client;
+	uint32_t ul_src_pipe_len;
+	uint32_t ul_src_pipe_list[QMI_IPA_MAX_PIPES_V01];
+	uint32_t dl_dst_pipe_len;
+	uint32_t dl_dst_pipe_list[QMI_IPA_MAX_PIPES_V01];
+};
+
+struct wan_ioctl_query_tether_stats {
+	/* Name of the upstream interface */
+	char upstreamIface[IFNAMSIZ];
+	/* Name of the tethered interface */
+	char tetherIface[IFNAMSIZ];
+	/* enum of tether interface */
+	enum ipacm_client_enum ipa_client;
+	uint64_t ipv4_tx_packets;
+	uint64_t ipv4_tx_bytes;
+	uint64_t ipv4_rx_packets;
+	uint64_t ipv4_rx_bytes;
+	uint64_t ipv6_tx_packets;
+	uint64_t ipv6_tx_bytes;
+	uint64_t ipv6_rx_packets;
+	uint64_t ipv6_rx_bytes;
+};
+
+struct wan_ioctl_query_tether_stats_all {
+	/* Name of the upstream interface */
+	char upstreamIface[IFNAMSIZ];
+	/* enum of tether interface */
+	enum ipacm_client_enum ipa_client;
+	uint8_t reset_stats;
+	uint64_t tx_bytes;
+	uint64_t rx_bytes;
+};
+
+struct wan_ioctl_reset_tether_stats {
+	/* Name of the upstream interface, not support now */
+	char upstreamIface[IFNAMSIZ];
+	/* Indicate whether to reset the stats (use 1) or not */
+	uint8_t reset_stats;
+};
+
+struct wan_ioctl_query_dl_filter_stats {
+	/* Indicate whether to reset the filter stats (use 1) or not*/
+	uint8_t reset_stats;
+	/* Modem response QMI */
+	struct ipa_get_data_stats_resp_msg_v01 stats_resp;
+	/* provide right index to 1st firewall rule */
+	uint32_t index;
+};
+
+struct wan_ioctl_send_lan_client_msg {
+	/* Lan client info. */
+	struct ipa_lan_client_msg lan_client;
+	/* Event to indicate whether client is
+	 * connected or disconnected.
+	 */
+	enum ipa_per_client_stats_event client_event;
+};
+
+struct wan_ioctl_lan_client_info {
+	/* Device type of the client. */
+	enum ipacm_per_client_device_type device_type;
+	/* MAC Address of the client. */
+	uint8_t mac[IPA_MAC_ADDR_SIZE];
+	/* Init client. */
+	uint8_t client_init;
+	/* Client Index */
+	int8_t client_idx;
+	/* Header length of the client. */
+	uint8_t hdr_len;
+	/* Source pipe of the lan client. */
+	enum ipa_client_type ul_src_pipe;
+};
+
+struct wan_ioctl_per_client_info {
+	/* MAC Address of the client. */
+	uint8_t mac[IPA_MAC_ADDR_SIZE];
+	/* Ipv4 UL traffic bytes. */
+	uint64_t ipv4_tx_bytes;
+	/* Ipv4 DL traffic bytes. */
+	uint64_t ipv4_rx_bytes;
+	/* Ipv6 UL traffic bytes. */
+	uint64_t ipv6_tx_bytes;
+	/* Ipv6 DL traffic bytes. */
+	uint64_t ipv6_rx_bytes;
+};
+
+struct wan_ioctl_query_per_client_stats {
+	/* Device type of the client. */
+	enum ipacm_per_client_device_type device_type;
+	/* Indicate whether to reset the stats (use 1) or not */
+	uint8_t reset_stats;
+	/* Indicates whether client is disconnected. */
+	uint8_t disconnect_clnt;
+	/* Number of clients. */
+	uint8_t num_clients;
+	/* Client information. */
+	struct wan_ioctl_per_client_info
+		client_info[IPA_MAX_NUM_HW_PATH_CLIENTS];
+};
+
+#define WAN_IOC_ADD_FLT_RULE _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_ADD_FLT_RULE, \
+		struct ipa_install_fltr_rule_req_msg_v01 *)
+
+#define WAN_IOC_ADD_FLT_RULE_INDEX _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_ADD_FLT_INDEX, \
+		struct ipa_fltr_installed_notif_req_msg_v01 *)
+
+#define WAN_IOC_VOTE_FOR_BW_MBPS _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_VOTE_FOR_BW_MBPS, \
+		uint32_t *)
+
+#define WAN_IOC_POLL_TETHERING_STATS _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_POLL_TETHERING_STATS, \
+		struct wan_ioctl_poll_tethering_stats *)
+
+#define WAN_IOC_SET_DATA_QUOTA _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_SET_DATA_QUOTA, \
+		struct wan_ioctl_set_data_quota *)
+
+#define WAN_IOC_SET_TETHER_CLIENT_PIPE _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_SET_TETHER_CLIENT_PIPE, \
+		struct wan_ioctl_set_tether_client_pipe *)
+
+#define WAN_IOC_QUERY_TETHER_STATS _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_QUERY_TETHER_STATS, \
+		struct wan_ioctl_query_tether_stats *)
+
+#define WAN_IOC_RESET_TETHER_STATS _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_RESET_TETHER_STATS, \
+		struct wan_ioctl_reset_tether_stats *)
+
+#define WAN_IOC_QUERY_DL_FILTER_STATS _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_QUERY_DL_FILTER_STATS, \
+		struct wan_ioctl_query_dl_filter_stats *)
+
+#define WAN_IOC_ADD_FLT_RULE_EX _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_ADD_FLT_RULE_EX, \
+		struct ipa_install_fltr_rule_req_ex_msg_v01 *)
+
+#define WAN_IOC_QUERY_TETHER_STATS_ALL _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_QUERY_TETHER_STATS_ALL, \
+		struct wan_ioctl_query_tether_stats_all *)
+
+#define WAN_IOC_ADD_UL_FLT_RULE _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_ADD_UL_FLT_RULE, \
+		struct ipa_configure_ul_firewall_rules_req_msg_v01 *)
+
+#define WAN_IOC_ENABLE_PER_CLIENT_STATS _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_ENABLE_PER_CLIENT_STATS, \
+		bool *)
+
+#define WAN_IOC_QUERY_PER_CLIENT_STATS _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_QUERY_PER_CLIENT_STATS, \
+		struct wan_ioctl_query_per_client_stats *)
+
+#define WAN_IOC_SET_LAN_CLIENT_INFO _IOWR(WAN_IOC_MAGIC, \
+			WAN_IOCTL_SET_LAN_CLIENT_INFO, \
+			struct wan_ioctl_lan_client_info *)
+
+#define WAN_IOC_SEND_LAN_CLIENT_MSG _IOWR(WAN_IOC_MAGIC, \
+				WAN_IOCTL_SEND_LAN_CLIENT_MSG, \
+				struct wan_ioctl_send_lan_client_msg *)
+
+#define WAN_IOC_CLEAR_LAN_CLIENT_INFO _IOWR(WAN_IOC_MAGIC, \
+			WAN_IOCTL_CLEAR_LAN_CLIENT_INFO, \
+			struct wan_ioctl_lan_client_info *)
+#endif /* _RMNET_IPA_FD_IOCTL_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/rtl8367c_ioctl.h	2019-01-22 16:16:28.587292445 +0100
@@ -0,0 +1,22 @@
+#ifndef LINUX_RTL8367C_IOCTL_H_
+#define LINUX_RTL8367C_IOCTL_H_
+
+#include <linux/types.h>
+
+enum {
+	RTL8367C_IOC_OP_REG_READ,
+	RTL8367C_IOC_OP_REG_WRITE,
+	RTL8367C_IOC_OP_SPI_READ,
+	RTL8367C_IOC_OP_SPI_WRITE,
+	RTL8367C_IOC_OP_RESET,
+};
+
+struct rtl8367c_pioctl {
+	__u32		bus;
+	__u32		cs;
+	__u16		offset;
+	__u16		len;
+	void __user     *buf_addr;
+};
+
+#endif /* LINUX_RTL8367C_IOCTL_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/seemp_api.h	2019-01-22 16:16:28.587292445 +0100
@@ -0,0 +1,395 @@
+#ifndef _SEEMP_API_H_
+#define _SEEMP_API_H_
+
+#define SEEMP_API_kernel__oom_adjust_write                                     0
+#define SEEMP_API_kernel__sendto                                               1
+#define SEEMP_API_kernel__recvfrom                                             2
+#define SEEMP_API_View__onTouchEvent                                           3
+#define SEEMP_API_View__onKeyDown                                              4
+#define SEEMP_API_View__onKeyUp                                                5
+#define SEEMP_API_View__onTrackBallEvent                                       6
+#define SEEMP_API_android_provider_Settings__get_ANDROID_ID_                   7
+#define SEEMP_API_TelephonyManager__getDeviceId                                8
+#define SEEMP_API_TelephonyManager__getLine1Number                             9
+#define SEEMP_API_Telephony__query                                            10
+#define SEEMP_API_CallerInfo__getCallerId                                     11
+#define SEEMP_API_CallerInfo__getCallerInfo                                   12
+#define SEEMP_API_ContentResolver__query                                      13
+#define SEEMP_API_AccountManagerService__getPassword                          14
+#define SEEMP_API_AccountManagerService__getUserData                          15
+#define SEEMP_API_AccountManagerService__addAccount                           16
+#define SEEMP_API_AccountManagerService__removeAccount                        17
+#define SEEMP_API_AccountManagerService__setPassword                          18
+#define SEEMP_API_AccountManagerService__clearPassword                        19
+#define SEEMP_API_AccountManagerService__setUserData                          20
+#define SEEMP_API_AccountManagerService__editProperties                       21
+#define SEEMP_API_AccountManager__getPassword                                 22
+#define SEEMP_API_AccountManager__getUserData                                 23
+#define SEEMP_API_AccountManager__addAccountExplicitly                        24
+#define SEEMP_API_AccountManager__removeAccount                               25
+#define SEEMP_API_AccountManager__setPassword                                 26
+#define SEEMP_API_AccountManager__clearPassword                               27
+#define SEEMP_API_AccountManager__setUserData                                 28
+#define SEEMP_API_AccountManager__addAccount                                  29
+#define SEEMP_API_AccountManager__editProperties                              30
+#define SEEMP_API_AccountManager__doWork                                      31
+#define SEEMP_API_Browser__getAllBookmarks                                    32
+#define SEEMP_API_Browser__getAllVisitedUrls                                  33
+#define SEEMP_API_Browser__getVisitedLike                                     34
+#define SEEMP_API_Browser__getVisitedHistory                                  35
+#define SEEMP_API_Browser__requestAllIcons                                    36
+#define SEEMP_API_ContentResolver__insert                                     37
+#define SEEMP_API_CalendarContract__insert                                    38
+#define SEEMP_API_CalendarContract__alarmExists                               39
+#define SEEMP_API_CalendarContract__findNextAlarmTime                         40
+#define SEEMP_API_CalendarContract__query                                     41
+#define SEEMP_API_LocationManager___requestLocationUpdates                    42
+#define SEEMP_API_LocationManager__addGpsStatusListener                       43
+#define SEEMP_API_LocationManager__addNmeaListener                            44
+#define SEEMP_API_LocationManager__addProximityAlert                          45
+#define SEEMP_API_LocationManager__getLastKnownLocation                       46
+#define SEEMP_API_LocationManager__requestLocationUpdates                     47
+#define SEEMP_API_LocationManager__sendExtraCommand                           48
+#define SEEMP_API_TelephonyManager__getCellLocation                           49
+#define SEEMP_API_TelephonyManager__getNeighboringCellInfo                    50
+#define SEEMP_API_GeolocationService__registerForLocationUpdates              51
+#define SEEMP_API_GeolocationService__setEnableGps                            52
+#define SEEMP_API_GeolocationService__start                                   53
+#define SEEMP_API_WebChromeClient__onGeolocationPermissionsShowPrompt         54
+#define SEEMP_API_WifiManager__getScanResults                                 55
+#define SEEMP_API_adB__enable                                                56
+#define SEEMP_API_adB__disable                                                57
+#define SEEMP_API_adB__startDiscovery                                         58
+#define SEEMP_API_adB__listenUsingInsecureRfcommWithServiceRecord             59
+#define SEEMP_API_adB__listenUsingSecureRfcommWithServiceRecord               60
+#define SEEMP_API_adB__getBondedDevices                                       61
+#define SEEMP_API_adB__getRemoteDevice                                        62
+#define SEEMP_API_adB__getState                                               63
+#define SEEMP_API_adB__getProfileConnectionState                              64
+#define SEEMP_API_Camera__takePicture                                         65
+#define SEEMP_API_Camera__setPreviewCallback                                  66
+#define SEEMP_API_Camera__setPreviewCallbackWithBuffer                        67
+#define SEEMP_API_Camera__setOneShotPreviewCallback                           68
+#define SEEMP_API_android_media_MediaRecorder__start                          69
+#define SEEMP_API_AudioRecord__startRecording                                 70
+#define SEEMP_API_AudioRecord__start                                          71
+#define SEEMP_API_SpeechRecognizer__startListening                            72
+#define SEEMP_API_at_SmsManager__sendDataMessage                              73
+#define SEEMP_API_at_SmsManager__sendMultipartTextMessage                     74
+#define SEEMP_API_at_SmsManager__sendTextMessage                              75
+#define SEEMP_API_at_gsm_SmsManager__sendDataMessage                          76
+#define SEEMP_API_at_gsm_SmsManager__sendMultipartTextMessage                 77
+#define SEEMP_API_at_gsm_SmsManager__sendTextMessage                          78
+#define SEEMP_API_at_SmsManager__copyMessageToIcc                             79
+#define SEEMP_API_at_SmsManager__deleteMessageFromIcc                         80
+#define SEEMP_API_at_SmsManager__updateMessageOnIcc                           81
+#define SEEMP_API_at_gsm_SmsManager__copyMessageToSim                         82
+#define SEEMP_API_at_gsm_SmsManager__deleteMessageFromSim                     83
+#define SEEMP_API_at_gsm_SmsManager__updateMessageOnSim                       84
+#define SEEMP_API_at_gsm_SmsManager__getAllMessagesFromSim                    85
+#define SEEMP_API_ContactsContract__getLookupUri                              86
+#define SEEMP_API_ContactsContract__lookupContact                             87
+#define SEEMP_API_ContactsContract__openContactPhotoInputStream               88
+#define SEEMP_API_ContactsContract__getContactLookupUri                       89
+#define SEEMP_API_PackageManagerService__installPackage                       90
+#define SEEMP_API_URL__openConnection                                         91
+#define SEEMP_API_URI__URI                                                    92
+#define SEEMP_API_HttpGet__HttpGet                                            93
+#define SEEMP_API_HttpPut__HttpPut                                            94
+#define SEEMP_API_HttpPost__HttpPost                                          95
+#define SEEMP_API_apS__get_ACCELEROMETER_ROTATION_                           96
+#define SEEMP_API_apS__get_USER_ROTATION_                                    97
+#define SEEMP_API_apS__get_ADB_ENABLED_                                      98
+#define SEEMP_API_apS__get_DEBUG_APP_                                        99
+#define SEEMP_API_apS__get_WAIT_FOR_DEBUGGER_                               100
+#define SEEMP_API_apS__get_AIRPLANE_MODE_ON_                                101
+#define SEEMP_API_apS__get_AIRPLANE_MODE_RADIOS_                            102
+#define SEEMP_API_apS__get_ALARM_ALERT_                                     103
+#define SEEMP_API_apS__get_NEXT_ALARM_FORMATTED_                            104
+#define SEEMP_API_apS__get_ALWAYS_FINISH_ACTIVITIES_                        105
+#define SEEMP_API_apS__get_LOGGING_ID_                                      106
+#define SEEMP_API_apS__get_ANIMATOR_DURATION_SCALE_                         107
+#define SEEMP_API_apS__get_WINDOW_ANIMATION_SCALE_                          108
+#define SEEMP_API_apS__get_FONT_SCALE_                                      109
+#define SEEMP_API_apS__get_SCREEN_BRIGHTNESS_                               110
+#define SEEMP_API_apS__get_SCREEN_BRIGHTNESS_MODE_                          111
+#define SEEMP_API_apS__get_SCREEN_BRIGHTNESS_MODE_AUTOMATIC_                112
+#define SEEMP_API_apS__get_SCREEN_BRIGHTNESS_MODE_MANUAL_                   113
+#define SEEMP_API_apS__get_SCREEN_OFF_TIMEOUT_                              114
+#define SEEMP_API_apS__get_DIM_SCREEN_                                      115
+#define SEEMP_API_apS__get_TRANSITION_ANIMATION_SCALE_                      116
+#define SEEMP_API_apS__get_STAY_ON_WHILE_PLUGGED_IN_                        117
+#define SEEMP_API_apS__get_WALLPAPER_ACTIVITY_                              118
+#define SEEMP_API_apS__get_SHOW_PROCESSES_                                  119
+#define SEEMP_API_apS__get_SHOW_WEB_SUGGESTIONS_                            120
+#define SEEMP_API_apS__get_SHOW_GTALK_SERVICE_STATUS_                       121
+#define SEEMP_API_apS__get_USE_GOOGLE_MAIL_                                 122
+#define SEEMP_API_apS__get_AUTO_TIME_                                       123
+#define SEEMP_API_apS__get_AUTO_TIME_ZONE_                                  124
+#define SEEMP_API_apS__get_DATE_FORMAT_                                     125
+#define SEEMP_API_apS__get_TIME_12_24_                                      126
+#define SEEMP_API_apS__get_BLUETOOTH_DISCOVERABILITY_                       127
+#define SEEMP_API_apS__get_BLUETOOTH_DISCOVERABILITY_TIMEOUT_               128
+#define SEEMP_API_apS__get_BLUETOOTH_ON_                                    129
+#define SEEMP_API_apS__get_DEVICE_PROVISIONED_                              130
+#define SEEMP_API_apS__get_SETUP_WIZARD_HAS_RUN_                            131
+#define SEEMP_API_apS__get_DTMF_TONE_WHEN_DIALING_                          132
+#define SEEMP_API_apS__get_END_BUTTON_BEHAVIOR_                             133
+#define SEEMP_API_apS__get_RINGTONE_                                        134
+#define SEEMP_API_apS__get_MODE_RINGER_                                     135
+#define SEEMP_API_apS__get_INSTALL_NON_MARKET_APPS_                         136
+#define SEEMP_API_apS__get_LOCATION_PROVIDERS_ALLOWED_                      137
+#define SEEMP_API_apS__get_LOCK_PATTERN_ENABLED_                            138
+#define SEEMP_API_apS__get_LOCK_PATTERN_TACTILE_FEEDBACK_ENABLED_           139
+#define SEEMP_API_apS__get_LOCK_PATTERN_VISIBLE_                            140
+#define SEEMP_API_apS__get_NETWORK_PREFERENCE_                              141
+#define SEEMP_API_apS__get_DATA_ROAMING_                                    142
+#define SEEMP_API_apS__get_HTTP_PROXY_                                      143
+#define SEEMP_API_apS__get_PARENTAL_CONTROL_ENABLED_                        144
+#define SEEMP_API_apS__get_PARENTAL_CONTROL_LAST_UPDATE_                    145
+#define SEEMP_API_apS__get_PARENTAL_CONTROL_REDIRECT_URL_                   146
+#define SEEMP_API_apS__get_RADIO_BLUETOOTH_                                 147
+#define SEEMP_API_apS__get_RADIO_CELL_                                      148
+#define SEEMP_API_apS__get_RADIO_NFC_                                       149
+#define SEEMP_API_apS__get_RADIO_WIFI_                                      150
+#define SEEMP_API_apS__get_SYS_PROP_SETTING_VERSION_                        151
+#define SEEMP_API_apS__get_SETTINGS_CLASSNAME_                              152
+#define SEEMP_API_apS__get_TEXT_AUTO_CAPS_                                  153
+#define SEEMP_API_apS__get_TEXT_AUTO_PUNCTUATE_                             154
+#define SEEMP_API_apS__get_TEXT_AUTO_REPLACE_                               155
+#define SEEMP_API_apS__get_TEXT_SHOW_PASSWORD_                              156
+#define SEEMP_API_apS__get_USB_MASS_STORAGE_ENABLED_                        157
+#define SEEMP_API_apS__get_VIBRATE_ON_                                      158
+#define SEEMP_API_apS__get_HAPTIC_FEEDBACK_ENABLED_                         159
+#define SEEMP_API_apS__get_VOLUME_ALARM_                                    160
+#define SEEMP_API_apS__get_VOLUME_BLUETOOTH_SCO_                            161
+#define SEEMP_API_apS__get_VOLUME_MUSIC_                                    162
+#define SEEMP_API_apS__get_VOLUME_NOTIFICATION_                             163
+#define SEEMP_API_apS__get_VOLUME_RING_                                     164
+#define SEEMP_API_apS__get_VOLUME_SYSTEM_                                   165
+#define SEEMP_API_apS__get_VOLUME_VOICE_                                    166
+#define SEEMP_API_apS__get_SOUND_EFFECTS_ENABLED_                           167
+#define SEEMP_API_apS__get_MODE_RINGER_STREAMS_AFFECTED_                    168
+#define SEEMP_API_apS__get_MUTE_STREAMS_AFFECTED_                           169
+#define SEEMP_API_apS__get_NOTIFICATION_SOUND_                              170
+#define SEEMP_API_apS__get_APPEND_FOR_LAST_AUDIBLE_                         171
+#define SEEMP_API_apS__get_WIFI_MAX_DHCP_RETRY_COUNT_                       172
+#define SEEMP_API_apS__get_WIFI_MOBILE_DATA_TRANSITION_WAKELOCK_TIMEOUT_MS_ 173
+#define SEEMP_API_apS__get_WIFI_NETWORKS_AVAILABLE_NOTIFICATION_ON_         174
+#define SEEMP_API_apS__get_WIFI_NETWORKS_AVAILABLE_REPEAT_DELAY_            175
+#define SEEMP_API_apS__get_WIFI_NUM_OPEN_NETWORKS_KEPT_                     176
+#define SEEMP_API_apS__get_WIFI_ON_                                         177
+#define SEEMP_API_apS__get_WIFI_SLEEP_POLICY_                               178
+#define SEEMP_API_apS__get_WIFI_SLEEP_POLICY_DEFAULT_                       179
+#define SEEMP_API_apS__get_WIFI_SLEEP_POLICY_NEVER_                         180
+#define SEEMP_API_apS__get_WIFI_SLEEP_POLICY_NEVER_WHILE_PLUGGED_           181
+#define SEEMP_API_apS__get_WIFI_STATIC_DNS1_                                182
+#define SEEMP_API_apS__get_WIFI_STATIC_DNS2_                                183
+#define SEEMP_API_apS__get_WIFI_STATIC_GATEWAY_                             184
+#define SEEMP_API_apS__get_WIFI_STATIC_IP_                                  185
+#define SEEMP_API_apS__get_WIFI_STATIC_NETMASK_                             186
+#define SEEMP_API_apS__get_WIFI_USE_STATIC_IP_                              187
+#define SEEMP_API_apS__get_WIFI_WATCHDOG_ACCEPTABLE_PACKET_LOSS_PERCENTAGE_ 188
+#define SEEMP_API_apS__get_WIFI_WATCHDOG_AP_COUNT_                          189
+#define SEEMP_API_apS__get_WIFI_WATCHDOG_BACKGROUND_CHECK_DELAY_MS_         190
+#define SEEMP_API_apS__get_WIFI_WATCHDOG_BACKGROUND_CHECK_ENABLED_          191
+#define SEEMP_API_apS__get_WIFI_WATCHDOG_BACKGROUND_CHECK_TIMEOUT_MS_       192
+#define SEEMP_API_apS__get_WIFI_WATCHDOG_INITIAL_IGNORED_PING_COUNT_        193
+#define SEEMP_API_apS__get_WIFI_WATCHDOG_MAX_AP_CHECKS_                     194
+#define SEEMP_API_apS__get_WIFI_WATCHDOG_ON_                                195
+#define SEEMP_API_apS__get_WIFI_WATCHDOG_PING_COUNT_                        196
+#define SEEMP_API_apS__get_WIFI_WATCHDOG_PING_DELAY_MS_                     197
+#define SEEMP_API_apS__get_WIFI_WATCHDOG_PING_TIMEOUT_MS_                   198
+#define SEEMP_API_apS__put_ACCELEROMETER_ROTATION_                          199
+#define SEEMP_API_apS__put_USER_ROTATION_                                   200
+#define SEEMP_API_apS__put_ADB_ENABLED_                                     201
+#define SEEMP_API_apS__put_DEBUG_APP_                                       202
+#define SEEMP_API_apS__put_WAIT_FOR_DEBUGGER_                               203
+#define SEEMP_API_apS__put_AIRPLANE_MODE_ON_                                204
+#define SEEMP_API_apS__put_AIRPLANE_MODE_RADIOS_                            205
+#define SEEMP_API_apS__put_ALARM_ALERT_                                     206
+#define SEEMP_API_apS__put_NEXT_ALARM_FORMATTED_                            207
+#define SEEMP_API_apS__put_ALWAYS_FINISH_ACTIVITIES_                        208
+#define SEEMP_API_apS__put_ANDROID_ID_                                      209
+#define SEEMP_API_apS__put_LOGGING_ID_                                      210
+#define SEEMP_API_apS__put_ANIMATOR_DURATION_SCALE_                         211
+#define SEEMP_API_apS__put_WINDOW_ANIMATION_SCALE_                          212
+#define SEEMP_API_apS__put_FONT_SCALE_                                      213
+#define SEEMP_API_apS__put_SCREEN_BRIGHTNESS_                               214
+#define SEEMP_API_apS__put_SCREEN_BRIGHTNESS_MODE_                          215
+#define SEEMP_API_apS__put_SCREEN_BRIGHTNESS_MODE_AUTOMATIC_                216
+#define SEEMP_API_apS__put_SCREEN_BRIGHTNESS_MODE_MANUAL_                   217
+#define SEEMP_API_apS__put_SCREEN_OFF_TIMEOUT_                              218
+#define SEEMP_API_apS__put_DIM_SCREEN_                                      219
+#define SEEMP_API_apS__put_TRANSITION_ANIMATION_SCALE_                      220
+#define SEEMP_API_apS__put_STAY_ON_WHILE_PLUGGED_IN_                        221
+#define SEEMP_API_apS__put_WALLPAPER_ACTIVITY_                              222
+#define SEEMP_API_apS__put_SHOW_PROCESSES_                                  223
+#define SEEMP_API_apS__put_SHOW_WEB_SUGGESTIONS_                            224
+#define SEEMP_API_apS__put_SHOW_GTALK_SERVICE_STATUS_                       225
+#define SEEMP_API_apS__put_USE_GOOGLE_MAIL_                                 226
+#define SEEMP_API_apS__put_AUTO_TIME_                                       227
+#define SEEMP_API_apS__put_AUTO_TIME_ZONE_                                  228
+#define SEEMP_API_apS__put_DATE_FORMAT_                                     229
+#define SEEMP_API_apS__put_TIME_12_24_                                      230
+#define SEEMP_API_apS__put_BLUETOOTH_DISCOVERABILITY_                       231
+#define SEEMP_API_apS__put_BLUETOOTH_DISCOVERABILITY_TIMEOUT_               232
+#define SEEMP_API_apS__put_BLUETOOTH_ON_                                    233
+#define SEEMP_API_apS__put_DEVICE_PROVISIONED_                              234
+#define SEEMP_API_apS__put_SETUP_WIZARD_HAS_RUN_                            235
+#define SEEMP_API_apS__put_DTMF_TONE_WHEN_DIALING_                          236
+#define SEEMP_API_apS__put_END_BUTTON_BEHAVIOR_                             237
+#define SEEMP_API_apS__put_RINGTONE_                                        238
+#define SEEMP_API_apS__put_MODE_RINGER_                                     239
+#define SEEMP_API_apS__put_INSTALL_NON_MARKET_APPS_                         240
+#define SEEMP_API_apS__put_LOCATION_PROVIDERS_ALLOWED_                      241
+#define SEEMP_API_apS__put_LOCK_PATTERN_ENABLED_                            242
+#define SEEMP_API_apS__put_LOCK_PATTERN_TACTILE_FEEDBACK_ENABLED_           243
+#define SEEMP_API_apS__put_LOCK_PATTERN_VISIBLE_                            244
+#define SEEMP_API_apS__put_NETWORK_PREFERENCE_                              245
+#define SEEMP_API_apS__put_DATA_ROAMING_                                    246
+#define SEEMP_API_apS__put_HTTP_PROXY_                                      247
+#define SEEMP_API_apS__put_PARENTAL_CONTROL_ENABLED_                        248
+#define SEEMP_API_apS__put_PARENTAL_CONTROL_LAST_UPDATE_                    249
+#define SEEMP_API_apS__put_PARENTAL_CONTROL_REDIRECT_URL_                   250
+#define SEEMP_API_apS__put_RADIO_BLUETOOTH_                                 251
+#define SEEMP_API_apS__put_RADIO_CELL_                                      252
+#define SEEMP_API_apS__put_RADIO_NFC_                                       253
+#define SEEMP_API_apS__put_RADIO_WIFI_                                      254
+#define SEEMP_API_apS__put_SYS_PROP_SETTING_VERSION_                        255
+#define SEEMP_API_apS__put_SETTINGS_CLASSNAME_                              256
+#define SEEMP_API_apS__put_TEXT_AUTO_CAPS_                                  257
+#define SEEMP_API_apS__put_TEXT_AUTO_PUNCTUATE_                             258
+#define SEEMP_API_apS__put_TEXT_AUTO_REPLACE_                               259
+#define SEEMP_API_apS__put_TEXT_SHOW_PASSWORD_                              260
+#define SEEMP_API_apS__put_USB_MASS_STORAGE_ENABLED_                        261
+#define SEEMP_API_apS__put_VIBRATE_ON_                                      262
+#define SEEMP_API_apS__put_HAPTIC_FEEDBACK_ENABLED_                         263
+#define SEEMP_API_apS__put_VOLUME_ALARM_                                    264
+#define SEEMP_API_apS__put_VOLUME_BLUETOOTH_SCO_                            265
+#define SEEMP_API_apS__put_VOLUME_MUSIC_                                    266
+#define SEEMP_API_apS__put_VOLUME_NOTIFICATION_                             267
+#define SEEMP_API_apS__put_VOLUME_RING_                                     268
+#define SEEMP_API_apS__put_VOLUME_SYSTEM_                                   269
+#define SEEMP_API_apS__put_VOLUME_VOICE_                                    270
+#define SEEMP_API_apS__put_SOUND_EFFECTS_ENABLED_                           271
+#define SEEMP_API_apS__put_MODE_RINGER_STREAMS_AFFECTED_                    272
+#define SEEMP_API_apS__put_MUTE_STREAMS_AFFECTED_                           273
+#define SEEMP_API_apS__put_NOTIFICATION_SOUND_                              274
+#define SEEMP_API_apS__put_APPEND_FOR_LAST_AUDIBLE_                         275
+#define SEEMP_API_apS__put_WIFI_MAX_DHCP_RETRY_COUNT_                       276
+#define SEEMP_API_apS__put_WIFI_MOBILE_DATA_TRANSITION_WAKELOCK_TIMEOUT_MS_ 277
+#define SEEMP_API_apS__put_WIFI_NETWORKS_AVAILABLE_NOTIFICATION_ON_         278
+#define SEEMP_API_apS__put_WIFI_NETWORKS_AVAILABLE_REPEAT_DELAY_            279
+#define SEEMP_API_apS__put_WIFI_NUM_OPEN_NETWORKS_KEPT_                     280
+#define SEEMP_API_apS__put_WIFI_ON_                                         281
+#define SEEMP_API_apS__put_WIFI_SLEEP_POLICY_                               282
+#define SEEMP_API_apS__put_WIFI_SLEEP_POLICY_DEFAULT_                       283
+#define SEEMP_API_apS__put_WIFI_SLEEP_POLICY_NEVER_                         284
+#define SEEMP_API_apS__put_WIFI_SLEEP_POLICY_NEVER_WHILE_PLUGGED_           285
+#define SEEMP_API_apS__put_WIFI_STATIC_DNS1_                                286
+#define SEEMP_API_apS__put_WIFI_STATIC_DNS2_                                287
+#define SEEMP_API_apS__put_WIFI_STATIC_GATEWAY_                             288
+#define SEEMP_API_apS__put_WIFI_STATIC_IP_                                  289
+#define SEEMP_API_apS__put_WIFI_STATIC_NETMASK_                             290
+#define SEEMP_API_apS__put_WIFI_USE_STATIC_IP_                              291
+#define SEEMP_API_apS__put_WIFI_WATCHDOG_ACCEPTABLE_PACKET_LOSS_PERCENTAGE_ 292
+#define SEEMP_API_apS__put_WIFI_WATCHDOG_AP_COUNT_                          293
+#define SEEMP_API_apS__put_WIFI_WATCHDOG_BACKGROUND_CHECK_DELAY_MS_         294
+#define SEEMP_API_apS__put_WIFI_WATCHDOG_BACKGROUND_CHECK_ENABLED_          295
+#define SEEMP_API_apS__put_WIFI_WATCHDOG_BACKGROUND_CHECK_TIMEOUT_MS_       296
+#define SEEMP_API_apS__put_WIFI_WATCHDOG_INITIAL_IGNORED_PING_COUNT_        297
+#define SEEMP_API_apS__put_WIFI_WATCHDOG_MAX_AP_CHECKS_                     298
+#define SEEMP_API_apS__put_WIFI_WATCHDOG_ON_                                299
+#define SEEMP_API_apS__put_WIFI_WATCHDOG_PING_COUNT_                        300
+#define SEEMP_API_apS__put_WIFI_WATCHDOG_PING_DELAY_MS_                     301
+#define SEEMP_API_apS__put_WIFI_WATCHDOG_PING_TIMEOUT_MS_                   302
+#define SEEMP_API_Poll__setCumulativeWifiRxMBytes                           303
+#define SEEMP_API_Poll__setInstantaneousWifiRxMBytes                        304
+#define SEEMP_API_Poll__setCumulativeWifiRxPackets                          305
+#define SEEMP_API_Poll__setInstantaneousWifiRxPackets                       306
+#define SEEMP_API_Poll__setCumulativeWifiTxMBytes                           307
+#define SEEMP_API_Poll__setInstantaneousWifiTxMBytes                        308
+#define SEEMP_API_Poll__setCumulativeWifiTxPackets                          309
+#define SEEMP_API_Poll__setInstantaneousWifiTxPackets                       310
+#define SEEMP_API_Poll__setCumulativeWifiRxTcpMBytes                        311
+#define SEEMP_API_Poll__setInstantaneousWifiRxTcpMBytes                     312
+#define SEEMP_API_Poll__setCumulativeWifiRxTcpPackets                       313
+#define SEEMP_API_Poll__setInstantaneousWifiRxTcpPackets                    314
+#define SEEMP_API_Poll__setCumulativeWifiRxUdpMBytes                        315
+#define SEEMP_API_Poll__setInstantaneousWifiRxUdpMBytes                     316
+#define SEEMP_API_Poll__setCumulativeWifiRxUdpPackets                       317
+#define SEEMP_API_Poll__setInstantaneousWifiRxUdpPackets                    318
+#define SEEMP_API_Poll__setCumulativeWifiRxOtherMBytes                      319
+#define SEEMP_API_Poll__setInstantaneousWifiRxOtherMBytes                   320
+#define SEEMP_API_Poll__setCumulativeWifiRxOtherPackets                     321
+#define SEEMP_API_Poll__setInstantaneousWifiRxOtherPackets                  322
+#define SEEMP_API_Poll__setCumulativeWifiTxTcpMBytes                        323
+#define SEEMP_API_Poll__setInstantaneousWifiTxTcpMBytes                     324
+#define SEEMP_API_Poll__setCumulativeWifiTxTcpPackets                       325
+#define SEEMP_API_Poll__setInstantaneousWifiTxTcpPackets                    326
+#define SEEMP_API_Poll__setCumulativeWifiTxUdpMBytes                        327
+#define SEEMP_API_Poll__setInstantaneousWifiTxUdpMBytes                     328
+#define SEEMP_API_Poll__setCumulativeWifiTxUdpPackets                       329
+#define SEEMP_API_Poll__setInstantaneousWifiTxUdpPackets                    330
+#define SEEMP_API_Poll__setCumulativeWifiTxOtherMBytes                      331
+#define SEEMP_API_Poll__setInstantaneousWifiTxOtherMBytes                   332
+#define SEEMP_API_Poll__setCumulativeWifiTxOtherPackets                     333
+#define SEEMP_API_Poll__setInstantaneousWifiTxOtherPackets                  334
+#define SEEMP_API_Poll__setCumulativeMobileRxMBytes                         335
+#define SEEMP_API_Poll__setInstantaneousMobileRxMBytes                      336
+#define SEEMP_API_Poll__setCumulativeMobileRxPackets                        337
+#define SEEMP_API_Poll__setInstantaneousMobileRxPackets                     338
+#define SEEMP_API_Poll__setCumulativeMobileTxMBytes                         339
+#define SEEMP_API_Poll__setInstantaneousMobileTxMBytes                      340
+#define SEEMP_API_Poll__setCumulativeMobileTxPackets                        341
+#define SEEMP_API_Poll__setInstantaneousMobileTxPackets                     342
+#define SEEMP_API_Poll__setCumulativeMobileRxTcpMBytes                      343
+#define SEEMP_API_Poll__setInstantaneousMobileRxTcpMBytes                   344
+#define SEEMP_API_Poll__setCumulativeMobileRxTcpPackets                     345
+#define SEEMP_API_Poll__setInstantaneousMobileRxTcpPackets                  346
+#define SEEMP_API_Poll__setCumulativeMobileRxUdpMBytes                      347
+#define SEEMP_API_Poll__setInstantaneousMobileRxUdpMBytes                   348
+#define SEEMP_API_Poll__setCumulativeMobileRxUdpPackets                     349
+#define SEEMP_API_Poll__setInstantaneousMobileRxUdpPackets                  350
+#define SEEMP_API_Poll__setCumulativeMobileRxOtherMBytes                    351
+#define SEEMP_API_Poll__setInstantaneousMobileRxOtherMBytes                 352
+#define SEEMP_API_Poll__setCumulativeMobileRxOtherPackets                   353
+#define SEEMP_API_Poll__setInstantaneousMobileRxOtherPackets                354
+#define SEEMP_API_Poll__setCumulativeMobileTxTcpMBytes                      355
+#define SEEMP_API_Poll__setInstantaneousMobileTxTcpMBytes                   356
+#define SEEMP_API_Poll__setCumulativeMobileTxTcpPackets                     357
+#define SEEMP_API_Poll__setInstantaneousMobileTxTcpPackets                  358
+#define SEEMP_API_Poll__setCumulativeMobileTxUdpMBytes                      359
+#define SEEMP_API_Poll__setInstantaneousMobileTxUdpMBytes                   360
+#define SEEMP_API_Poll__setCumulativeMobileTxUdpPackets                     361
+#define SEEMP_API_Poll__setInstantaneousMobileTxUdpPackets                  362
+#define SEEMP_API_Poll__setCumulativeMobileTxOtherMBytes                    363
+#define SEEMP_API_Poll__setInstantaneousMobileTxOtherMBytes                 364
+#define SEEMP_API_Poll__setCumulativeMobileTxOtherPackets                   365
+#define SEEMP_API_Poll__setInstantaneousMobileTxOtherPackets                366
+#define SEEMP_API_Poll__setNumSockets                                       367
+#define SEEMP_API_Poll__setNumTcpStateListen                                368
+#define SEEMP_API_Poll__setNumTcpStateEstablished                           369
+#define SEEMP_API_Poll__setNumLocalIp                                       370
+#define SEEMP_API_Poll__setNumLocalPort                                     371
+#define SEEMP_API_Poll__setNumRemoteIp                                      372
+#define SEEMP_API_Poll__setNumRemotePort                                    373
+#define SEEMP_API_Poll__setNumRemoteTuple                                   374
+#define SEEMP_API_Poll__setNumInode                                         375
+#define SEEMP_API_Instrumentation__startActivitySync                        376
+#define SEEMP_API_Instrumentation__execStartActivity                        377
+#define SEEMP_API_Instrumentation__execStartActivitiesAsUser                378
+#define SEEMP_API_Instrumentation__execStartActivityAsCaller                379
+#define SEEMP_API_Instrumentation__execStartActivityFromAppTask             380
+#define SEEMP_API_ah_SystemSensorManager__registerListenerImpl              381
+#define SEEMP_API_ah_SystemSensorManager__unregisterListenerImpl            382
+#define SEEMP_API_WindowManagerImpl__addView                                383
+#define SEEMP_API_WindowManagerImpl__updateViewLayout                       384
+#define SEEMP_API_ActivityManagerService__applyOomAdjLocked                 385
+#define SEEMP_API_ProcessRecord__makeActive                                 386
+#define SEEMP_API_ProcessRecord__makeInactive                               387
+#define SEEMP_API_TelephonyManager__getSimSerialNumber                      388
+#define SEEMP_API_TelephonyManager__getSubscriberId                         389
+
+#endif /* _SEEMP_API_H_*/
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/seemp_param_id.h	2019-01-22 16:16:28.587292445 +0100
@@ -0,0 +1,95 @@
+#ifndef _PARAM_ID_H_
+#define _PARAM_ID_H_
+
+#define PARAM_ID_LEN 0
+#define PARAM_ID_OOM_ADJ 1
+#define PARAM_ID_APP_UID 2
+#define PARAM_ID_APP_PID 3
+#define PARAM_ID_VALUE 4
+#define PARAM_ID_SIZE 5
+#define PARAM_ID_FD 6
+#define PARAM_ID_RATE 7
+#define PARAM_ID_SENSOR 8
+#define PARAM_ID_WINDOW_TYPE 9
+#define PARAM_ID_WINDOW_FLAG 10
+#define NUM_PARAM_IDS 11
+
+#ifndef PROVIDE_PARAM_ID
+int param_id_index(const char *param, const char *end);
+const char *get_param_id_name(int id);
+#else
+int param_id_index(const char *param, const char *end)
+{
+	int id  = -1;
+	int len = ((end != NULL) ? (end - param) : (int)strlen(param));
+
+	if ((len == 3) && !memcmp(param, "len", 3))
+		id = 0;
+	else if ((len == 7) && !memcmp(param, "oom_adj", 7))
+		id = 1;
+	else if ((len == 7) && !memcmp(param, "app_uid", 7))
+		id = 2;
+	else if ((len == 7) && !memcmp(param, "app_pid", 7))
+		id = 3;
+	else if ((len == 5) && !memcmp(param, "value", 5))
+		id = 4;
+	else if ((len == 4) && !memcmp(param, "size", 4))
+		id = 5;
+	else if ((len == 2) && !memcmp(param, "fd", 2))
+		id = 6;
+	else if ((len == 4) && !memcmp(param, "rate", 4))
+		id = 7;
+	else if ((len == 6) && !memcmp(param, "sensor", 6))
+		id = 8;
+	else if ((len == 11) && !memcmp(param, "window_type", 11))
+		id = 9;
+	else if ((len == 11) && !memcmp(param, "window_flag", 11))
+		id = 10;
+
+	return id;
+}
+
+const char *get_param_id_name(int id)
+{
+	const char *name = "?";
+
+	switch (id) {
+	case 0:
+		name = "len";
+		break;
+	case 1:
+		name = "oom_adj";
+		break;
+	case 2:
+		name = "app_uid";
+		break;
+	case 3:
+		name = "app_pid";
+		break;
+	case 4:
+		name = "value";
+		break;
+	case 5:
+		name = "size";
+		break;
+	case 6:
+		name = "fd";
+		break;
+	case 7:
+		name = "rate";
+		break;
+	case 8:
+		name = "sensor";
+		break;
+	case 9:
+		name = "window_type";
+		break;
+	case 10:
+		name = "window_flag";
+		break;
+	}
+	return name;
+}
+#endif /* PROVIDE_PARAM_ID */
+
+#endif /* _PARAM_ID_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/smcinvoke.h	2019-01-22 16:16:28.587292445 +0100
@@ -0,0 +1,45 @@
+#ifndef _UAPI_SMCINVOKE_H_
+#define _UAPI_SMCINVOKE_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define SMCINVOKE_USERSPACE_OBJ_NULL	-1
+
+struct smcinvoke_buf {
+	uint64_t	addr;
+	uint64_t	size;
+};
+
+struct smcinvoke_obj {
+	int64_t		fd;
+	int64_t		reserved;
+};
+
+union smcinvoke_arg {
+	struct smcinvoke_buf	b;
+	struct smcinvoke_obj	o;
+};
+
+/*
+ * struct smcinvoke_cmd_req: This structure is transparently sent to TEE
+ * @op - Operation to be performed
+ * @counts - number of aruments passed
+ * @result - result of invoke operation
+ * @argsize - size of each of arguments
+ * @args - args is pointer to buffer having all arguments
+ */
+struct smcinvoke_cmd_req {
+	uint32_t	op;
+	uint32_t	counts;
+	int32_t		result;
+	uint32_t	argsize;
+	uint64_t __user args;
+};
+
+#define SMCINVOKE_IOC_MAGIC    0x98
+
+#define SMCINVOKE_IOCTL_INVOKE_REQ \
+	_IOWR(SMCINVOKE_IOC_MAGIC, 1, struct smcinvoke_cmd_req)
+
+#endif /* _UAPI_SMCINVOKE_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/sockev.h	2019-01-22 16:16:28.587292445 +0100
@@ -0,0 +1,31 @@
+#ifndef _SOCKEV_H_
+#define _SOCKEV_H_
+
+#include <linux/types.h>
+#include <linux/netlink.h>
+#include <linux/socket.h>
+
+enum sknetlink_groups {
+	SKNLGRP_UNICAST,
+	SKNLGRP_SOCKEV,
+	__SKNLGRP_MAX
+};
+
+#define SOCKEV_STR_MAX 32
+
+/********************************************************************
+*		Socket operation messages
+****/
+
+struct sknlsockevmsg {
+	__u8 event[SOCKEV_STR_MAX];
+	__u32 pid; /* (struct task_struct*)->pid */
+	__u16 skfamily; /* (struct socket*)->sk->sk_family */
+	__u8 skstate; /* (struct socket*)->sk->sk_state */
+	__u8 skprotocol; /* (struct socket*)->sk->sk_protocol */
+	__u16 sktype; /* (struct socket*)->sk->sk_type */
+	__u64 skflags; /* (struct socket*)->sk->sk_flags */
+};
+
+#endif /* _SOCKEV_H_ */
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/spcom.h	2019-01-22 16:16:28.591292481 +0100
@@ -0,0 +1,119 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _UAPI_SPCOM_H_
+#define _UAPI_SPCOM_H_
+
+#include <linux/types.h>	/* uint32_t, bool */
+#ifndef BIT
+	#define BIT(x) (1 << x)
+#endif
+#ifndef PAGE_SIZE
+	#define PAGE_SIZE 4096
+#endif
+
+/**
+ * @brief - Secure Processor Communication interface to user space spcomlib.
+ *
+ * Sending data and control commands by write() file operation.
+ * Receiving data by read() file operation.
+ * Getting the next request size by read() file operation,
+ * with special size SPCOM_GET_NEXT_REQUEST_SIZE.
+ */
+
+/*
+ * Maximum number of channel between Secure Processor and HLOS.
+ * including predefined channels, like "sp_kernel".
+ */
+#define SPCOM_MAX_CHANNELS	0x20
+
+/* Maximum size (including null) for channel names */
+#define SPCOM_CHANNEL_NAME_SIZE		32
+
+/*
+ * file read(fd, buf, size) with this size,
+ * hints the kernel that user space wants to read the next-req-size.
+ * This size is bigger than both SPCOM_MAX_REQUEST_SIZE and
+ * SPCOM_MAX_RESPONSE_SIZE , so it is not a valid data size.
+ */
+#define SPCOM_GET_NEXT_REQUEST_SIZE	(PAGE_SIZE-1)
+
+/* Command Id between spcomlib and spcom driver, on write() */
+enum spcom_cmd_id {
+	SPCOM_CMD_LOAD_APP	= 0x4C4F4144, /* "LOAD" = 0x4C4F4144 */
+	SPCOM_CMD_RESET_SP	= 0x52455354, /* "REST" = 0x52455354 */
+	SPCOM_CMD_SEND		= 0x53454E44, /* "SEND" = 0x53454E44 */
+	SPCOM_CMD_SEND_MODIFIED	= 0x534E444D, /* "SNDM" = 0x534E444D */
+	SPCOM_CMD_LOCK_ION_BUF  = 0x4C4F434B, /* "LOCK" = 0x4C4F434B */
+	SPCOM_CMD_UNLOCK_ION_BUF = 0x554C434B, /* "ULCK" = 0x4C4F434B */
+	SPCOM_CMD_FSSR		= 0x46535352, /* "FSSR" = 0x46535352 */
+	SPCOM_CMD_CREATE_CHANNEL = 0x43524554, /* "CRET" = 0x43524554 */
+};
+
+/*
+ * @note: Event types that are always implicitly polled:
+ * POLLERR=0x08 | POLLHUP=0x10 | POLLNVAL=0x20
+ * so bits 3,4,5 can't be used
+ */
+enum spcom_poll_events {
+	SPCOM_POLL_LINK_STATE	= BIT(1),
+	SPCOM_POLL_CH_CONNECT	= BIT(2),
+	SPCOM_POLL_READY_FLAG	= BIT(14), /* output */
+	SPCOM_POLL_WAIT_FLAG	= BIT(15), /* if set , wait for the event */
+};
+
+/* Common Command structure between User Space and spcom driver, on write() */
+struct spcom_user_command {
+	enum spcom_cmd_id cmd_id;
+	uint32_t arg;
+} __packed;
+
+/* Command structure between User Space and spcom driver, on write() */
+struct spcom_send_command {
+	enum spcom_cmd_id cmd_id;
+	uint32_t timeout_msec;
+	uint32_t buf_size;
+	char buf[0]; /* Variable buffer size - must be last field */
+} __packed;
+
+/* Command structure between userspace spcomlib and spcom driver, on write() */
+struct spcom_user_create_channel_command {
+	enum spcom_cmd_id cmd_id;
+	char ch_name[SPCOM_CHANNEL_NAME_SIZE];
+} __packed;
+
+/* maximum ION buf for send-modfied-command */
+#define SPCOM_MAX_ION_BUF 4
+
+struct spcom_ion_info {
+	int32_t fd; /* ION buffer File Descriptor, set -1 for invalid fd */
+	uint32_t buf_offset; /* virtual address offset in request/response */
+};
+
+/* Pass this FD to unlock all ION buffer for the specific channel */
+#define SPCOM_ION_FD_UNLOCK_ALL	0xFFFF
+
+struct spcom_ion_handle {
+	int32_t fd;		/* File Descriptor associated with the buffer */
+};
+
+/* Command structure between User Space and spcom driver, on write() */
+struct spcom_user_send_modified_command {
+	enum spcom_cmd_id cmd_id;
+	struct spcom_ion_info ion_info[SPCOM_MAX_ION_BUF];
+	uint32_t timeout_msec;
+	uint32_t buf_size;
+	char buf[0]; /* Variable buffer size - must be last field */
+} __packed;
+
+
+#endif /* _UAPI_SPCOM_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/sw_sync.h	2019-01-22 16:16:26.719275529 +0100
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_SW_SYNC_H
+#define _UAPI_LINUX_SW_SYNC_H
+
+#include <linux/types.h>
+
+struct sw_sync_create_fence_data {
+	__u32	value;
+	char	name[32];
+	__s32	fence; /* fd of new fence */
+};
+
+#define SW_SYNC_IOC_MAGIC	'W'
+
+#define SW_SYNC_IOC_CREATE_FENCE	_IOWR(SW_SYNC_IOC_MAGIC, 0,\
+		struct sw_sync_create_fence_data)
+#define SW_SYNC_IOC_INC			_IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
+
+#endif /* _UAPI_LINUX_SW_SYNC_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/sync.h	2019-01-22 16:16:26.719275529 +0100
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_SYNC_H
+#define _UAPI_LINUX_SYNC_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * struct sync_merge_data - data passed to merge ioctl
+ * @fd2:	file descriptor of second fence
+ * @name:	name of new fence
+ * @fence:	returns the fd of the new fence to userspace
+ */
+struct sync_merge_data {
+	__s32	fd2; /* fd of second fence */
+	char	name[32]; /* name of new fence */
+	__s32	fence; /* fd on newly created fence */
+};
+
+/**
+ * struct sync_pt_info - detailed sync_pt information
+ * @len:		length of sync_pt_info including any driver_data
+ * @obj_name:		name of parent sync_timeline
+ * @driver_name:	name of driver implementing the parent
+ * @status:		status of the sync_pt 0:active 1:signaled <0:error
+ * @timestamp_ns:	timestamp of status change in nanoseconds
+ * @driver_data:	any driver dependent data
+ */
+struct sync_pt_info {
+	__u32	len;
+	char	obj_name[32];
+	char	driver_name[32];
+	__s32	status;
+	__u64	timestamp_ns;
+
+	__u8	driver_data[0];
+};
+
+/**
+ * struct sync_fence_info_data - data returned from fence info ioctl
+ * @len:	ioctl caller writes the size of the buffer its passing in.
+ *		ioctl returns length of sync_fence_data returned to userspace
+ *		including pt_info.
+ * @name:	name of fence
+ * @status:	status of fence. 1: signaled 0:active <0:error
+ * @pt_info:	a sync_pt_info struct for every sync_pt in the fence
+ */
+struct sync_fence_info_data {
+	__u32	len;
+	char	name[32];
+	__s32	status;
+
+	__u8	pt_info[0];
+};
+
+#define SYNC_IOC_MAGIC		'>'
+
+/**
+ * DOC: SYNC_IOC_WAIT - wait for a fence to signal
+ *
+ * pass timeout in milliseconds.  Waits indefinitely timeout < 0.
+ */
+#define SYNC_IOC_WAIT		_IOW(SYNC_IOC_MAGIC, 0, __s32)
+
+/**
+ * DOC: SYNC_IOC_MERGE - merge two fences
+ *
+ * Takes a struct sync_merge_data.  Creates a new fence containing copies of
+ * the sync_pts in both the calling fd and sync_merge_data.fd2.  Returns the
+ * new fence's fd in sync_merge_data.fence
+ */
+#define SYNC_IOC_MERGE		_IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data)
+
+/**
+ * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
+ *
+ * Takes a struct sync_fence_info_data with extra space allocated for pt_info.
+ * Caller should write the size of the buffer into len.  On return, len is
+ * updated to reflect the total size of the sync_fence_info_data including
+ * pt_info.
+ *
+ * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
+ * To iterate over the sync_pt_infos, use the sync_pt_info.len field.
+ */
+#define SYNC_IOC_FENCE_INFO	_IOWR(SYNC_IOC_MAGIC, 2,\
+	struct sync_fence_info_data)
+
+#endif /* _UAPI_LINUX_SYNC_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/linux/usb/usb_ctrl_qti.h	2019-10-29 09:26:25.553221870 +0100
@@ -0,0 +1,41 @@
+#ifndef __UAPI_LINUX_USB_CTRL_QTI_H
+#define __UAPI_LINUX_USB_CTRL_QTI_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define MAX_QTI_PKT_SIZE 2048
+
+#define QTI_CTRL_IOCTL_MAGIC	'r'
+#define QTI_CTRL_GET_LINE_STATE	_IOR(QTI_CTRL_IOCTL_MAGIC, 2, int)
+#define QTI_CTRL_EP_LOOKUP _IOR(QTI_CTRL_IOCTL_MAGIC, 3, struct ep_info)
+#define QTI_CTRL_MODEM_OFFLINE _IO(QTI_CTRL_IOCTL_MAGIC, 4)
+#define QTI_CTRL_MODEM_ONLINE _IO(QTI_CTRL_IOCTL_MAGIC, 5)
+
+enum peripheral_ep_type {
+	DATA_EP_TYPE_RESERVED	= 0x0,
+	DATA_EP_TYPE_HSIC	= 0x1,
+	DATA_EP_TYPE_HSUSB	= 0x2,
+	DATA_EP_TYPE_PCIE	= 0x3,
+	DATA_EP_TYPE_EMBEDDED	= 0x4,
+	DATA_EP_TYPE_BAM_DMUX	= 0x5,
+};
+
+struct peripheral_ep_info {
+	enum peripheral_ep_type		ep_type;
+	__u32				peripheral_iface_id;
+};
+
+struct ipa_ep_pair {
+	__u32 cons_pipe_num;
+	__u32 prod_pipe_num;
+};
+
+struct ep_info {
+	struct peripheral_ep_info	ph_ep_info;
+	struct ipa_ep_pair		ipa_ep_pair;
+
+};
+
+#endif
+
diff -Nruw linux-4.4.115-fbx/include/uapi/media./ais/Kbuild linux-4.4.115-fbx/include/uapi/media/ais/Kbuild
--- linux-4.4.115-fbx/include/uapi/media./ais/Kbuild	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/ais/Kbuild	2019-01-22 16:16:28.599292554 +0100
@@ -0,0 +1,7 @@
+header-y += msm_ais.h
+header-y += msm_ais_buf_mgr.h
+header-y += msm_ais_isp.h
+header-y += msm_ais_ispif.h
+header-y += msm_ais_sensor.h
+header-y += msm_ais_sensor_sdk.h
+header-y += msm_ais_mgr.h
diff -Nruw linux-4.4.115-fbx/include/uapi/media./ais/msm_ais_buf_mgr.h linux-4.4.115-fbx/include/uapi/media/ais/msm_ais_buf_mgr.h
--- linux-4.4.115-fbx/include/uapi/media./ais/msm_ais_buf_mgr.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/ais/msm_ais_buf_mgr.h	2019-01-22 16:16:28.599292554 +0100
@@ -0,0 +1,66 @@
+#ifndef __UAPI_MEDIA_MSM_AIS_BUF_MGR_H__
+#define __UAPI_MEDIA_MSM_AIS_BUF_MGR_H__
+
+#include <media/ais/msm_ais.h>
+
+enum msm_camera_buf_mngr_cmd {
+	MSM_CAMERA_BUF_MNGR_CONT_MAP,
+	MSM_CAMERA_BUF_MNGR_CONT_UNMAP,
+	MSM_CAMERA_BUF_MNGR_CONT_MAX,
+};
+
+enum msm_camera_buf_mngr_buf_type {
+	MSM_CAMERA_BUF_MNGR_BUF_PLANAR,
+	MSM_CAMERA_BUF_MNGR_BUF_USER,
+	MSM_CAMERA_BUF_MNGR_BUF_INVALID,
+};
+
+struct msm_buf_mngr_info {
+	uint32_t session_id;
+	uint32_t stream_id;
+	uint32_t frame_id;
+	struct timeval timestamp;
+	uint32_t index;
+	uint32_t reserved;
+	enum msm_camera_buf_mngr_buf_type type;
+	struct msm_camera_user_buf_cont_t user_buf;
+};
+
+struct msm_buf_mngr_main_cont_info {
+	uint32_t session_id;
+	uint32_t stream_id;
+	enum msm_camera_buf_mngr_cmd cmd;
+	uint32_t cnt;
+	int32_t cont_fd;
+};
+
+#define MSM_CAMERA_BUF_MNGR_IOCTL_ID_BASE 0
+#define MSM_CAMERA_BUF_MNGR_IOCTL_ID_GET_BUF_BY_IDX 1
+
+#define VIDIOC_MSM_BUF_MNGR_GET_BUF \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 33, struct msm_buf_mngr_info)
+
+#define VIDIOC_MSM_BUF_MNGR_PUT_BUF \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 34, struct msm_buf_mngr_info)
+
+#define VIDIOC_MSM_BUF_MNGR_BUF_DONE \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 35, struct msm_buf_mngr_info)
+
+#define VIDIOC_MSM_BUF_MNGR_CONT_CMD \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 36, struct msm_buf_mngr_main_cont_info)
+
+#define VIDIOC_MSM_BUF_MNGR_INIT \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 37, struct msm_buf_mngr_info)
+
+#define VIDIOC_MSM_BUF_MNGR_DEINIT \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 38, struct msm_buf_mngr_info)
+
+#define VIDIOC_MSM_BUF_MNGR_FLUSH \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 39, struct msm_buf_mngr_info)
+
+#define VIDIOC_MSM_BUF_MNGR_IOCTL_CMD \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 40, \
+	struct msm_camera_private_ioctl_arg)
+
+#endif
+
diff -Nruw linux-4.4.115-fbx/include/uapi/media./ais/msm_ais.h linux-4.4.115-fbx/include/uapi/media/ais/msm_ais.h
--- linux-4.4.115-fbx/include/uapi/media./ais/msm_ais.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/ais/msm_ais.h	2019-01-22 16:16:28.599292554 +0100
@@ -0,0 +1,231 @@
+#ifndef __UAPI_MSM_AIS__
+#define __UAPI_MSM_AIS__
+
+#include <linux/videodev2.h>
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define MSM_CAM_LOGSYNC_FILE_NAME "logsync"
+#define MSM_CAM_LOGSYNC_FILE_BASEDIR "camera"
+
+#define MSM_CAM_V4L2_IOCTL_NOTIFY \
+	_IOW('V', BASE_VIDIOC_PRIVATE + 30, struct msm_v4l2_event_data)
+
+#define MSM_CAM_V4L2_IOCTL_NOTIFY_META \
+	_IOW('V', BASE_VIDIOC_PRIVATE + 31, struct msm_v4l2_event_data)
+
+#define MSM_CAM_V4L2_IOCTL_CMD_ACK \
+	_IOW('V', BASE_VIDIOC_PRIVATE + 32, struct msm_v4l2_event_data)
+
+#define MSM_CAM_V4L2_IOCTL_NOTIFY_ERROR \
+	_IOW('V', BASE_VIDIOC_PRIVATE + 33, struct msm_v4l2_event_data)
+
+#define MSM_CAM_V4L2_IOCTL_NOTIFY_DEBUG \
+	_IOW('V', BASE_VIDIOC_PRIVATE + 34, struct msm_v4l2_event_data)
+
+#define MSM_CAM_V4L2_IOCTL_DAEMON_DISABLED \
+	_IOW('V', BASE_VIDIOC_PRIVATE + 35, struct msm_v4l2_event_data)
+
+#define QCAMERA_DEVICE_GROUP_ID	1
+#define QCAMERA_VNODE_GROUP_ID	2
+#define MSM_CAMERA_NAME			"msm_camera"
+#define MSM_CONFIGURATION_NAME	"msm_config"
+
+#define MSM_CAMERA_SUBDEV_UNKNOWN      0
+#define MSM_CAMERA_SUBDEV_CSIPHY       1
+#define MSM_CAMERA_SUBDEV_CSID         2
+#define MSM_CAMERA_SUBDEV_ISPIF        3
+#define MSM_CAMERA_SUBDEV_VFE          4
+#define MSM_CAMERA_SUBDEV_AXI          5
+#define MSM_CAMERA_SUBDEV_VPE          6
+#define MSM_CAMERA_SUBDEV_SENSOR       7
+#define MSM_CAMERA_SUBDEV_ACTUATOR     8
+#define MSM_CAMERA_SUBDEV_EEPROM       9
+#define MSM_CAMERA_SUBDEV_CPP          10
+#define MSM_CAMERA_SUBDEV_CCI          11
+#define MSM_CAMERA_SUBDEV_LED_FLASH    12
+#define MSM_CAMERA_SUBDEV_STROBE_FLASH 13
+#define MSM_CAMERA_SUBDEV_BUF_MNGR     14
+#define MSM_CAMERA_SUBDEV_SENSOR_INIT  15
+#define MSM_CAMERA_SUBDEV_OIS          16
+#define MSM_CAMERA_SUBDEV_FLASH        17
+#define MSM_CAMERA_SUBDEV_IR_LED       18
+#define MSM_CAMERA_SUBDEV_IR_CUT       19
+#define MSM_CAMERA_SUBDEV_EXT          20
+#define MSM_CAMERA_SUBDEV_AIS_MNGR     21
+
+#define MSM_MAX_CAMERA_SENSORS  5
+
+/* The below macro is defined to put an upper limit on maximum
+ * number of buffer requested per stream. In case of extremely
+ * large value for number of buffer due to data structure corruption
+ * we return error to avoid integer overflow. Group processing
+ * can have max of 9 groups of 8 bufs each. This value may be
+ * configured in future
+ */
+#define MSM_CAMERA_MAX_STREAM_BUF 72
+
+/* Max batch size of processing */
+#define MSM_CAMERA_MAX_USER_BUFF_CNT 16
+
+/* featur base */
+#define MSM_CAMERA_FEATURE_BASE     0x00010000
+#define MSM_CAMERA_FEATURE_SHUTDOWN (MSM_CAMERA_FEATURE_BASE + 1)
+
+#define MSM_CAMERA_STATUS_BASE      0x00020000
+#define MSM_CAMERA_STATUS_FAIL      (MSM_CAMERA_STATUS_BASE + 1)
+#define MSM_CAMERA_STATUS_SUCCESS   (MSM_CAMERA_STATUS_BASE + 2)
+
+/* event type */
+#define MSM_CAMERA_V4L2_EVENT_TYPE (V4L2_EVENT_PRIVATE_START + 0x00002000)
+
+/* event id */
+#define MSM_CAMERA_EVENT_MIN    0
+#define MSM_CAMERA_NEW_SESSION  (MSM_CAMERA_EVENT_MIN + 1)
+#define MSM_CAMERA_DEL_SESSION  (MSM_CAMERA_EVENT_MIN + 2)
+#define MSM_CAMERA_SET_PARM     (MSM_CAMERA_EVENT_MIN + 3)
+#define MSM_CAMERA_GET_PARM     (MSM_CAMERA_EVENT_MIN + 4)
+#define MSM_CAMERA_MAPPING_CFG  (MSM_CAMERA_EVENT_MIN + 5)
+#define MSM_CAMERA_MAPPING_SES  (MSM_CAMERA_EVENT_MIN + 6)
+#define MSM_CAMERA_MSM_NOTIFY   (MSM_CAMERA_EVENT_MIN + 7)
+#define MSM_CAMERA_EVENT_MAX    (MSM_CAMERA_EVENT_MIN + 8)
+
+/* data.command */
+#define MSM_CAMERA_PRIV_S_CROP			(V4L2_CID_PRIVATE_BASE + 1)
+#define MSM_CAMERA_PRIV_G_CROP			(V4L2_CID_PRIVATE_BASE + 2)
+#define MSM_CAMERA_PRIV_G_FMT			(V4L2_CID_PRIVATE_BASE + 3)
+#define MSM_CAMERA_PRIV_S_FMT			(V4L2_CID_PRIVATE_BASE + 4)
+#define MSM_CAMERA_PRIV_TRY_FMT			(V4L2_CID_PRIVATE_BASE + 5)
+#define MSM_CAMERA_PRIV_METADATA		(V4L2_CID_PRIVATE_BASE + 6)
+#define MSM_CAMERA_PRIV_QUERY_CAP		(V4L2_CID_PRIVATE_BASE + 7)
+#define MSM_CAMERA_PRIV_STREAM_ON		(V4L2_CID_PRIVATE_BASE + 8)
+#define MSM_CAMERA_PRIV_STREAM_OFF		(V4L2_CID_PRIVATE_BASE + 9)
+#define MSM_CAMERA_PRIV_NEW_STREAM		(V4L2_CID_PRIVATE_BASE + 10)
+#define MSM_CAMERA_PRIV_DEL_STREAM		(V4L2_CID_PRIVATE_BASE + 11)
+#define MSM_CAMERA_PRIV_SHUTDOWN		(V4L2_CID_PRIVATE_BASE + 12)
+#define MSM_CAMERA_PRIV_STREAM_INFO_SYNC \
+	(V4L2_CID_PRIVATE_BASE + 13)
+#define MSM_CAMERA_PRIV_G_SESSION_ID (V4L2_CID_PRIVATE_BASE + 14)
+#define MSM_CAMERA_PRIV_CMD_MAX  20
+
+/* data.status - success */
+#define MSM_CAMERA_CMD_SUCCESS      0x00000001
+#define MSM_CAMERA_BUF_MAP_SUCCESS  0x00000002
+
+/* data.status - error */
+#define MSM_CAMERA_ERR_EVT_BASE 0x00010000
+#define MSM_CAMERA_ERR_CMD_FAIL		(MSM_CAMERA_ERR_EVT_BASE + 1)
+#define MSM_CAMERA_ERR_MAPPING		(MSM_CAMERA_ERR_EVT_BASE + 2)
+#define MSM_CAMERA_ERR_DEVICE_BUSY	(MSM_CAMERA_ERR_EVT_BASE + 3)
+
+/* The msm_v4l2_event_data structure should match the
+ * v4l2_event.u.data field.
+ * should not exceed 16 elements
+ */
+struct msm_v4l2_event_data {
+	/*word 0*/
+	unsigned int command;
+	/*word 1*/
+	unsigned int status;
+	/*word 2*/
+	unsigned int session_id;
+	/*word 3*/
+	unsigned int stream_id;
+	/*word 4*/
+	unsigned int map_op;
+	/*word 5*/
+	unsigned int map_buf_idx;
+	/*word 6*/
+	unsigned int notify;
+	/*word 7*/
+	unsigned int arg_value;
+	/*word 8*/
+	unsigned int ret_value;
+	/*word 9*/
+	unsigned int v4l2_event_type;
+	/*word 10*/
+	unsigned int v4l2_event_id;
+	/*word 11*/
+	unsigned int handle;
+	/*word 12*/
+	unsigned int nop6;
+	/*word 13*/
+	unsigned int nop7;
+	/*word 14*/
+	unsigned int nop8;
+	/*word 15*/
+	unsigned int nop9;
+};
+
+/* map to v4l2_format.fmt.raw_data */
+struct msm_v4l2_format_data {
+	enum v4l2_buf_type type;
+	unsigned int width;
+	unsigned int height;
+	unsigned int pixelformat; /* FOURCC */
+	unsigned char num_planes;
+	unsigned int plane_sizes[VIDEO_MAX_PLANES];
+};
+
+/*  MSM Four-character-code (FOURCC) */
+#define msm_v4l2_fourcc(a, b, c, d)\
+	((__u32)(a) | ((__u32)(b) << 8) | ((__u32)(c) << 16) |\
+	((__u32)(d) << 24))
+
+/* Composite stats */
+#define MSM_V4L2_PIX_FMT_STATS_COMB v4l2_fourcc('S', 'T', 'C', 'M')
+/* AEC stats */
+#define MSM_V4L2_PIX_FMT_STATS_AE   v4l2_fourcc('S', 'T', 'A', 'E')
+/* AF stats */
+#define MSM_V4L2_PIX_FMT_STATS_AF   v4l2_fourcc('S', 'T', 'A', 'F')
+/* AWB stats */
+#define MSM_V4L2_PIX_FMT_STATS_AWB  v4l2_fourcc('S', 'T', 'W', 'B')
+/* IHIST stats */
+#define MSM_V4L2_PIX_FMT_STATS_IHST v4l2_fourcc('I', 'H', 'S', 'T')
+/* Column count stats */
+#define MSM_V4L2_PIX_FMT_STATS_CS   v4l2_fourcc('S', 'T', 'C', 'S')
+/* Row count stats */
+#define MSM_V4L2_PIX_FMT_STATS_RS   v4l2_fourcc('S', 'T', 'R', 'S')
+/* Bayer Grid stats */
+#define MSM_V4L2_PIX_FMT_STATS_BG   v4l2_fourcc('S', 'T', 'B', 'G')
+/* Bayer focus stats */
+#define MSM_V4L2_PIX_FMT_STATS_BF   v4l2_fourcc('S', 'T', 'B', 'F')
+/* Bayer hist stats */
+#define MSM_V4L2_PIX_FMT_STATS_BHST v4l2_fourcc('B', 'H', 'S', 'T')
+
+enum smmu_attach_mode {
+	NON_SECURE_MODE = 0x01,
+	SECURE_MODE = 0x02,
+	MAX_PROTECTION_MODE = 0x03,
+};
+
+struct msm_camera_smmu_attach_type {
+	enum smmu_attach_mode attach;
+};
+
+struct msm_camera_user_buf_cont_t {
+	unsigned int buf_cnt;
+	unsigned int buf_idx[MSM_CAMERA_MAX_USER_BUFF_CNT];
+};
+
+struct msm_camera_return_buf {
+	__u32 index;
+	__u32 reserved;
+};
+
+#define MSM_CAMERA_PRIV_IOCTL_ID_BASE 0
+#define MSM_CAMERA_PRIV_IOCTL_ID_RETURN_BUF 1
+
+struct msm_camera_private_ioctl_arg {
+	__u32 id;
+	__u32 size;
+	__u32 result;
+	__u32 reserved;
+	__u64 ioctl_ptr;
+};
+
+#define VIDIOC_MSM_CAMERA_PRIVATE_IOCTL_CMD \
+	_IOWR('V', BASE_VIDIOC_PRIVATE, struct msm_camera_private_ioctl_arg)
+
+#endif /* __UAPI_MSM_AIS__ */
+
diff -Nruw linux-4.4.115-fbx/include/uapi/media./ais/msm_ais_isp.h linux-4.4.115-fbx/include/uapi/media/ais/msm_ais_isp.h
--- linux-4.4.115-fbx/include/uapi/media./ais/msm_ais_isp.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/ais/msm_ais_isp.h	2019-10-29 09:26:25.553221870 +0100
@@ -0,0 +1,1108 @@
+#ifndef __UAPI_MSM_AIS_ISP__
+#define __UAPI_MSM_AIS_ISP__
+
+#include <linux/videodev2.h>
+
+#define MAX_PLANES_PER_STREAM 3
+#define MAX_NUM_STREAM 7
+
+#define ISP_VERSION_48        48
+#define ISP_VERSION_47        47
+#define ISP_VERSION_46        46
+#define ISP_VERSION_44        44
+#define ISP_VERSION_40        40
+#define ISP_VERSION_32        32
+#define ISP_NATIVE_BUF_BIT    (0x10000 << 0)
+#define ISP0_BIT              (0x10000 << 1)
+#define ISP1_BIT              (0x10000 << 2)
+#define ISP_META_CHANNEL_BIT  (0x10000 << 3)
+#define ISP_SCRATCH_BUF_BIT   (0x10000 << 4)
+#define ISP_OFFLINE_STATS_BIT (0x10000 << 5)
+#define ISP_SVHDR_IN_BIT      (0x10000 << 6) /* RDI hw stream for SVHDR */
+#define ISP_SVHDR_OUT_BIT     (0x10000 << 7) /* SVHDR output bufq stream*/
+
+#define ISP_STATS_STREAM_BIT  0x80000000
+
+#define INTERLACE_SUPPORT_OLD_REMOVED
+
+struct msm_vfe_cfg_cmd_list;
+
+enum ISP_START_PIXEL_PATTERN {
+	ISP_BAYER_RGRGRG,
+	ISP_BAYER_GRGRGR,
+	ISP_BAYER_BGBGBG,
+	ISP_BAYER_GBGBGB,
+	ISP_YUV_YCbYCr,
+	ISP_YUV_YCrYCb,
+	ISP_YUV_CbYCrY,
+	ISP_YUV_CrYCbY,
+	ISP_PIX_PATTERN_MAX
+};
+
+enum msm_vfe_plane_fmt {
+	Y_PLANE,
+	CB_PLANE,
+	CR_PLANE,
+	CRCB_PLANE,
+	CBCR_PLANE,
+	RAW_PLANE,
+	RDI_PLANE,
+	VFE_PLANE_FMT_MAX
+};
+
+enum msm_vfe_input_src {
+	VFE_PIX_0,
+	VFE_RAW_0,
+	VFE_RAW_1,
+	VFE_RAW_2,
+	VFE_SRC_MAX,
+};
+
+enum msm_vfe_axi_stream_src {
+	PIX_ENCODER,
+	PIX_VIEWFINDER,
+	PIX_VIDEO,
+	ARGB_RAW,
+	CAMIF_RAW,
+	IDEAL_RAW,
+	RDI_INTF_0,
+	RDI_INTF_1,
+	RDI_INTF_2,
+	VFE_AXI_SRC_MAX
+};
+
+enum msm_vfe_frame_skip_pattern {
+	NO_SKIP,
+	EVERY_2FRAME,
+	EVERY_3FRAME,
+	EVERY_4FRAME,
+	EVERY_5FRAME,
+	EVERY_6FRAME,
+	EVERY_7FRAME,
+	EVERY_8FRAME,
+	EVERY_16FRAME,
+	EVERY_32FRAME,
+	SKIP_ALL,
+	SKIP_RANGE,
+	MAX_SKIP,
+};
+
+/*
+ * Define an unused period. When this period is set it means that the stream is
+ * stopped(i.e the pattern is 0). We don't track the current pattern, just the
+ * period defines what the pattern is, if period is this then pattern is 0 else
+ * pattern is 1
+ */
+#define MSM_VFE_STREAM_STOP_PERIOD 15
+
+enum msm_isp_stats_type {
+	MSM_ISP_STATS_AEC,   /* legacy based AEC */
+	MSM_ISP_STATS_AF,    /* legacy based AF */
+	MSM_ISP_STATS_AWB,   /* legacy based AWB */
+	MSM_ISP_STATS_RS,    /* legacy based RS */
+	MSM_ISP_STATS_CS,    /* legacy based CS */
+	MSM_ISP_STATS_IHIST, /* legacy based HIST */
+	MSM_ISP_STATS_SKIN,  /* legacy based SKIN */
+	MSM_ISP_STATS_BG,    /* Bayer Grids */
+	MSM_ISP_STATS_BF,    /* Bayer Focus */
+	MSM_ISP_STATS_BE,    /* Bayer Exposure*/
+	MSM_ISP_STATS_BHIST, /* Bayer Hist */
+	MSM_ISP_STATS_BF_SCALE,  /* Bayer Focus scale */
+	MSM_ISP_STATS_HDR_BE,    /* HDR Bayer Exposure */
+	MSM_ISP_STATS_HDR_BHIST, /* HDR Bayer Hist */
+	MSM_ISP_STATS_AEC_BG,   /* AEC BG */
+	MSM_ISP_STATS_MAX    /* MAX */
+};
+
+/*
+ * @stats_type_mask: Stats type mask (enum msm_isp_stats_type).
+ * @stream_src_mask: Stream src mask (enum msm_vfe_axi_stream_src)
+ * @skip_mode: skip pattern, if skip mode is range only then min/max is used
+ * @min_frame_id: minimum frame id (valid only if skip_mode = RANGE)
+ * @max_frame_id: maximum frame id (valid only if skip_mode = RANGE)
+*/
+struct msm_isp_sw_framskip {
+	uint32_t stats_type_mask;
+	uint32_t stream_src_mask;
+	enum msm_vfe_frame_skip_pattern skip_mode;
+	uint32_t min_frame_id;
+	uint32_t max_frame_id;
+};
+
+enum msm_vfe_testgen_color_pattern {
+	COLOR_BAR_8_COLOR,
+	UNICOLOR_WHITE,
+	UNICOLOR_YELLOW,
+	UNICOLOR_CYAN,
+	UNICOLOR_GREEN,
+	UNICOLOR_MAGENTA,
+	UNICOLOR_RED,
+	UNICOLOR_BLUE,
+	UNICOLOR_BLACK,
+	MAX_COLOR,
+};
+
+enum msm_vfe_camif_input {
+	CAMIF_DISABLED,
+	CAMIF_PAD_REG_INPUT,
+	CAMIF_MIDDI_INPUT,
+	CAMIF_MIPI_INPUT,
+};
+
+struct msm_vfe_fetch_engine_cfg {
+	uint32_t input_format;
+	uint32_t buf_width;
+	uint32_t buf_height;
+	uint32_t fetch_width;
+	uint32_t fetch_height;
+	uint32_t x_offset;
+	uint32_t y_offset;
+	uint32_t buf_stride;
+};
+
+enum msm_vfe_camif_output_format {
+	CAMIF_QCOM_RAW,
+	CAMIF_MIPI_RAW,
+	CAMIF_PLAIN_8,
+	CAMIF_PLAIN_16,
+	CAMIF_MAX_FORMAT,
+};
+
+/*
+ * Camif output general configuration
+ */
+struct msm_vfe_camif_subsample_cfg {
+	uint32_t irq_subsample_period;
+	uint32_t irq_subsample_pattern;
+	uint32_t sof_counter_step;
+	uint32_t pixel_skip;
+	uint32_t line_skip;
+	uint32_t first_line;
+	uint32_t last_line;
+	uint32_t first_pixel;
+	uint32_t last_pixel;
+	enum msm_vfe_camif_output_format output_format;
+};
+
+/*
+ * Camif frame and window configuration
+ */
+struct msm_vfe_camif_cfg {
+	uint32_t lines_per_frame;
+	uint32_t pixels_per_line;
+	uint32_t first_pixel;
+	uint32_t last_pixel;
+	uint32_t first_line;
+	uint32_t last_line;
+	uint32_t epoch_line0;
+	uint32_t epoch_line1;
+	uint32_t is_split;
+	uint32_t  vsync_edge;
+	uint32_t  hsync_edge;
+	uint32_t  sync_mode;
+	uint32_t  vfe_subsample_en;
+	uint32_t  bus_subsample_en;
+	uint32_t  vfe_output_en;
+	uint32_t  bus_output_en;
+	uint32_t  binning_enable;
+	uint32_t  irq_subsample_period;
+	uint32_t  misr_en;
+	uint32_t  irq_subsample_pattern;
+	uint32_t  frame_based_en;
+	uint32_t  frame_drop_Period;
+	uint32_t  frame_drop_pattern;
+	uint32_t  frame_drop_irq_en;
+	enum msm_vfe_camif_input camif_input;
+	struct msm_vfe_camif_subsample_cfg subsample_cfg;
+};
+
+struct msm_vfe_testgen_cfg {
+	uint32_t lines_per_frame;
+	uint32_t pixels_per_line;
+	uint32_t v_blank;
+	uint32_t h_blank;
+	enum ISP_START_PIXEL_PATTERN pixel_bayer_pattern;
+	uint32_t rotate_period;
+	enum msm_vfe_testgen_color_pattern color_bar_pattern;
+	uint32_t burst_num_frame;
+};
+
+enum msm_vfe_inputmux {
+	CAMIF,
+	TESTGEN,
+	EXTERNAL_READ,
+};
+
+enum msm_vfe_stats_composite_group {
+	STATS_COMPOSITE_GRP_NONE,
+	STATS_COMPOSITE_GRP_1,
+	STATS_COMPOSITE_GRP_2,
+	STATS_COMPOSITE_GRP_MAX,
+};
+
+enum msm_vfe_hvx_streaming_cmd {
+	HVX_DISABLE,
+	HVX_ONE_WAY,
+	HVX_ROUND_TRIP
+};
+
+struct msm_vfe_pix_cfg {
+	struct msm_vfe_camif_cfg camif_cfg;
+	struct msm_vfe_testgen_cfg testgen_cfg;
+	struct msm_vfe_fetch_engine_cfg fetch_engine_cfg;
+	enum msm_vfe_inputmux input_mux;
+	enum ISP_START_PIXEL_PATTERN pixel_pattern;
+	uint32_t input_format;
+	enum msm_vfe_hvx_streaming_cmd hvx_cmd;
+	uint32_t is_split;
+};
+
+struct msm_vfe_rdi_cfg {
+	uint8_t cid;
+	uint8_t frame_based;
+};
+
+struct msm_vfe_input_cfg {
+	union {
+		struct msm_vfe_pix_cfg pix_cfg;
+		struct msm_vfe_rdi_cfg rdi_cfg;
+	} d;
+	enum msm_vfe_input_src input_src;
+	uint32_t input_pix_clk;
+};
+
+struct msm_vfe_fetch_eng_start {
+	uint32_t session_id;
+	uint32_t stream_id;
+	uint32_t buf_idx;
+	uint8_t  offline_mode;
+	uint32_t fd;
+	uint32_t buf_addr;
+	uint32_t frame_id;
+};
+
+enum msm_vfe_fetch_eng_pass {
+	OFFLINE_FIRST_PASS,
+	OFFLINE_SECOND_PASS,
+	OFFLINE_MAX_PASS,
+};
+
+struct msm_vfe_fetch_eng_multi_pass_start {
+	uint32_t session_id;
+	uint32_t stream_id;
+	uint32_t buf_idx;
+	uint8_t  offline_mode;
+	uint32_t fd;
+	uint32_t buf_addr;
+	uint32_t frame_id;
+	uint32_t output_buf_idx;
+	uint32_t input_buf_offset;
+	enum msm_vfe_fetch_eng_pass  offline_pass;
+	uint32_t output_stream_id;
+};
+
+struct msm_vfe_axi_plane_cfg {
+	uint32_t output_width; /*Include padding*/
+	uint32_t output_height;
+	uint32_t output_stride;
+	uint32_t output_scan_lines;
+	uint32_t output_plane_format; /*Y/Cb/Cr/CbCr*/
+	uint32_t plane_addr_offset;
+	uint8_t csid_src; /*RDI 0-2*/
+	uint8_t rdi_cid;/*CID 1-16*/
+};
+
+enum msm_stream_memory_input_t {
+	MEMORY_INPUT_DISABLED,
+	MEMORY_INPUT_ENABLED
+};
+
+struct msm_vfe_axi_stream_request_cmd {
+	uint32_t session_id;
+	uint32_t stream_id;
+	uint32_t vt_enable;
+	uint32_t output_format;/*Planar/RAW/Misc*/
+	enum msm_vfe_axi_stream_src stream_src; /*CAMIF/IDEAL/RDIs*/
+	struct msm_vfe_axi_plane_cfg plane_cfg[MAX_PLANES_PER_STREAM];
+
+	uint32_t burst_count;
+	uint32_t hfr_mode;
+	uint8_t frame_base;
+
+	uint32_t init_frame_drop; /*MAX 31 Frames*/
+	enum msm_vfe_frame_skip_pattern frame_skip_pattern;
+	uint8_t buf_divert; /* if TRUE no vb2 buf done. */
+	/*Return values*/
+	uint32_t axi_stream_handle;
+	uint32_t controllable_output;
+	uint32_t burst_len;
+	/* Flag indicating memory input stream */
+	enum msm_stream_memory_input_t memory_input;
+};
+
+struct msm_vfe_axi_stream_release_cmd {
+	uint32_t stream_handle;
+};
+
+enum msm_vfe_axi_stream_cmd {
+	STOP_STREAM,
+	START_STREAM,
+	STOP_IMMEDIATELY,
+};
+
+enum msm_vfe_hw_state {
+	HW_STATE_NONE,
+	HW_STATE_SLEEP,
+	HW_STATE_AWAKE,
+};
+
+struct msm_vfe_axi_stream_cfg_cmd {
+	uint8_t num_streams;
+	uint32_t stream_handle[VFE_AXI_SRC_MAX];
+	enum msm_vfe_axi_stream_cmd cmd;
+	uint8_t sync_frame_id_src;
+	enum msm_vfe_hw_state hw_state;
+};
+
+enum msm_vfe_axi_stream_update_type {
+	ENABLE_STREAM_BUF_DIVERT,
+	DISABLE_STREAM_BUF_DIVERT,
+	UPDATE_STREAM_FRAMEDROP_PATTERN,
+	UPDATE_STREAM_STATS_FRAMEDROP_PATTERN,
+	UPDATE_STREAM_AXI_CONFIG,
+	UPDATE_STREAM_REQUEST_FRAMES,
+	UPDATE_STREAM_ADD_BUFQ,
+	UPDATE_STREAM_REMOVE_BUFQ,
+	UPDATE_STREAM_SW_FRAME_DROP,
+	UPDATE_STREAM_REQUEST_FRAMES_VER2,
+	UPDATE_STREAM_OFFLINE_AXI_CONFIG,
+};
+#define UPDATE_STREAM_REQUEST_FRAMES_VER2 UPDATE_STREAM_REQUEST_FRAMES_VER2
+
+enum msm_vfe_iommu_type {
+	IOMMU_ATTACH,
+	IOMMU_DETACH,
+};
+
+enum msm_vfe_buff_queue_id {
+	VFE_BUF_QUEUE_DEFAULT,
+	VFE_BUF_QUEUE_SHARED,
+	VFE_BUF_QUEUE_MAX,
+};
+
+struct msm_vfe_axi_stream_cfg_update_info {
+	uint32_t stream_handle;
+	uint32_t output_format;
+	uint32_t user_stream_id;
+	uint32_t frame_id;
+	enum msm_vfe_frame_skip_pattern skip_pattern;
+	struct msm_vfe_axi_plane_cfg plane_cfg[MAX_PLANES_PER_STREAM];
+	struct msm_isp_sw_framskip sw_skip_info;
+};
+
+struct msm_vfe_axi_stream_cfg_update_info_req_frm {
+	uint32_t stream_handle;
+	uint32_t user_stream_id;
+	uint32_t frame_id;
+	uint32_t buf_index;
+};
+
+struct msm_vfe_axi_halt_cmd {
+	uint32_t stop_camif;
+	uint32_t overflow_detected;
+	uint32_t blocking_halt;
+};
+
+struct msm_vfe_axi_reset_cmd {
+	uint32_t blocking;
+	uint32_t frame_id;
+};
+
+struct msm_vfe_axi_restart_cmd {
+	uint32_t enable_camif;
+};
+
+struct msm_vfe_axi_stream_update_cmd {
+	uint32_t num_streams;
+	enum msm_vfe_axi_stream_update_type update_type;
+	/*
+	 * For backward compatibility, ensure 1st member of any struct
+	 * in union below is uint32_t stream_handle.
+	 */
+	union {
+		struct msm_vfe_axi_stream_cfg_update_info
+					update_info[MSM_ISP_STATS_MAX];
+		struct msm_vfe_axi_stream_cfg_update_info_req_frm req_frm_ver2;
+	};
+};
+
+struct msm_vfe_smmu_attach_cmd {
+	uint32_t security_mode;
+	uint32_t iommu_attach_mode;
+};
+
+struct msm_vfe_stats_stream_request_cmd {
+	uint32_t session_id;
+	uint32_t stream_id;
+	enum msm_isp_stats_type stats_type;
+	uint32_t composite_flag;
+	uint32_t framedrop_pattern;
+	uint32_t init_frame_drop; /*MAX 31 Frames*/
+	uint32_t irq_subsample_pattern;
+	uint32_t buffer_offset;
+	uint32_t stream_handle;
+};
+
+struct msm_vfe_stats_stream_release_cmd {
+	uint32_t stream_handle;
+};
+struct msm_vfe_stats_stream_cfg_cmd {
+	uint8_t num_streams;
+	uint32_t stream_handle[MSM_ISP_STATS_MAX];
+	uint8_t enable;
+	uint32_t stats_burst_len;
+};
+
+enum msm_vfe_reg_cfg_type {
+	VFE_WRITE,
+	VFE_WRITE_MB,
+	VFE_READ,
+	VFE_CFG_MASK,
+	VFE_WRITE_DMI_16BIT,
+	VFE_WRITE_DMI_32BIT,
+	VFE_WRITE_DMI_64BIT,
+	VFE_READ_DMI_16BIT,
+	VFE_READ_DMI_32BIT,
+	VFE_READ_DMI_64BIT,
+	GET_MAX_CLK_RATE,
+	GET_CLK_RATES,
+	GET_ISP_ID,
+	VFE_HW_UPDATE_LOCK,
+	VFE_HW_UPDATE_UNLOCK,
+	SET_WM_UB_SIZE,
+	SET_UB_POLICY,
+};
+
+struct msm_vfe_cfg_cmd2 {
+	uint16_t num_cfg;
+	uint16_t cmd_len;
+	void __user *cfg_data;
+	void __user *cfg_cmd;
+};
+
+struct msm_vfe_cfg_cmd_list {
+	struct msm_vfe_cfg_cmd2      cfg_cmd;
+	struct msm_vfe_cfg_cmd_list *next;
+	uint32_t                     next_size;
+};
+
+struct msm_vfe_reg_rw_info {
+	uint32_t reg_offset;
+	uint32_t cmd_data_offset;
+	uint32_t len;
+};
+
+struct msm_vfe_reg_mask_info {
+	uint32_t reg_offset;
+	uint32_t mask;
+	uint32_t val;
+};
+
+struct msm_vfe_reg_dmi_info {
+	uint32_t hi_tbl_offset; /*Optional*/
+	uint32_t lo_tbl_offset; /*Required*/
+	uint32_t len;
+};
+
+struct msm_vfe_reg_cfg_cmd {
+	union {
+		struct msm_vfe_reg_rw_info rw_info;
+		struct msm_vfe_reg_mask_info mask_info;
+		struct msm_vfe_reg_dmi_info dmi_info;
+	} u;
+
+	enum msm_vfe_reg_cfg_type cmd_type;
+};
+
+enum vfe_sd_type {
+	VFE_SD_0 = 0,
+	VFE_SD_1,
+	VFE_SD_COMMON,
+	VFE_SD_MAX,
+};
+
+/* When you change the value below, check for the sof event_data size.
+ * V4l2 limits payload to 64 bytes
+ */
+#define MS_NUM_SLAVE_MAX 1
+
+/* Usecases when 2 HW need to be related or synced */
+enum msm_vfe_dual_hw_type {
+	DUAL_NONE = 0,
+	DUAL_HW_VFE_SPLIT = 1,
+	DUAL_HW_MASTER_SLAVE = 2,
+};
+
+/* Type for 2 INTF when used in Master-Slave mode */
+enum msm_vfe_dual_hw_ms_type {
+	MS_TYPE_NONE,
+	MS_TYPE_MASTER,
+	MS_TYPE_SLAVE,
+};
+
+struct msm_isp_set_dual_hw_ms_cmd {
+	uint8_t num_src;
+	/* Each session can be only one type but multiple intf if YUV cam */
+	enum msm_vfe_dual_hw_ms_type dual_hw_ms_type;
+	/* Primary intf is mostly associated with preview.
+	 * This primary intf SOF frame_id and timestamp is tracked
+	 * and used to calculate delta
+	 */
+	enum msm_vfe_input_src primary_intf;
+	/* input_src array indicates other input INTF that may be Master/Slave.
+	 * For these additional intf, frame_id and timestamp are not saved.
+	 * However, if these are slaves then they will still get their
+	 * frame_id from Master
+	 */
+	enum msm_vfe_input_src input_src[VFE_SRC_MAX];
+	uint32_t sof_delta_threshold; /* In milliseconds. Sent for Master */
+};
+
+enum msm_isp_buf_type {
+	ISP_PRIVATE_BUF,
+	ISP_SHARE_BUF,
+	MAX_ISP_BUF_TYPE,
+};
+
+struct msm_isp_unmap_buf_req {
+	uint32_t fd;
+};
+
+struct msm_isp_buf_request {
+	uint32_t vfe_id;
+	enum msm_vfe_axi_stream_src output_id;
+	uint32_t flags;
+	uint8_t num_buf;
+	uint32_t handle;
+	enum msm_isp_buf_type buf_type;
+};
+
+struct msm_isp_qbuf_plane {
+	uint32_t addr;
+	uint32_t offset;
+	uint32_t length;
+};
+
+struct msm_isp_qbuf_buffer {
+	struct msm_isp_qbuf_plane planes[MAX_PLANES_PER_STREAM];
+	uint32_t num_planes;
+};
+
+struct msm_isp_qbuf_info {
+	uint32_t handle;
+	int32_t buf_idx;
+	/*Only used for prepare buffer*/
+	struct msm_isp_qbuf_buffer buffer;
+	/*Only used for diverted buffer*/
+	uint32_t dirty_buf;
+};
+
+struct msm_isp_clk_rates {
+	uint32_t svs_rate;
+	uint32_t nominal_rate;
+	uint32_t high_rate;
+};
+
+struct msm_vfe_axi_src_state {
+	enum msm_vfe_input_src input_src;
+	uint32_t src_active;
+	uint32_t src_frame_id;
+};
+
+enum msm_isp_event_mask_index {
+	ISP_EVENT_MASK_INDEX_STATS_NOTIFY		= 0,
+	ISP_EVENT_MASK_INDEX_ERROR			= 1,
+	ISP_EVENT_MASK_INDEX_IOMMU_P_FAULT		= 2,
+	ISP_EVENT_MASK_INDEX_STREAM_UPDATE_DONE		= 3,
+	ISP_EVENT_MASK_INDEX_REG_UPDATE			= 4,
+	ISP_EVENT_MASK_INDEX_SOF			= 5,
+	ISP_EVENT_MASK_INDEX_BUF_DIVERT			= 6,
+	ISP_EVENT_MASK_INDEX_COMP_STATS_NOTIFY		= 7,
+	ISP_EVENT_MASK_INDEX_MASK_FE_READ_DONE		= 8,
+	ISP_EVENT_MASK_INDEX_BUF_DONE			= 9,
+	ISP_EVENT_MASK_INDEX_REG_UPDATE_MISSING		= 10,
+	ISP_EVENT_MASK_INDEX_PING_PONG_MISMATCH		= 11,
+	ISP_EVENT_MASK_INDEX_BUF_FATAL_ERROR		= 12,
+};
+
+
+#define ISP_EVENT_SUBS_MASK_NONE			0
+
+#define ISP_EVENT_SUBS_MASK_STATS_NOTIFY \
+			(1 << ISP_EVENT_MASK_INDEX_STATS_NOTIFY)
+
+#define ISP_EVENT_SUBS_MASK_ERROR \
+			(1 << ISP_EVENT_MASK_INDEX_ERROR)
+
+#define ISP_EVENT_SUBS_MASK_IOMMU_P_FAULT \
+			(1 << ISP_EVENT_MASK_INDEX_IOMMU_P_FAULT)
+
+#define ISP_EVENT_SUBS_MASK_STREAM_UPDATE_DONE \
+			(1 << ISP_EVENT_MASK_INDEX_STREAM_UPDATE_DONE)
+
+#define ISP_EVENT_SUBS_MASK_REG_UPDATE \
+			(1 << ISP_EVENT_MASK_INDEX_REG_UPDATE)
+
+#define ISP_EVENT_SUBS_MASK_SOF \
+			(1 << ISP_EVENT_MASK_INDEX_SOF)
+
+#define ISP_EVENT_SUBS_MASK_BUF_DIVERT \
+			(1 << ISP_EVENT_MASK_INDEX_BUF_DIVERT)
+
+#define ISP_EVENT_SUBS_MASK_COMP_STATS_NOTIFY \
+			(1 << ISP_EVENT_MASK_INDEX_COMP_STATS_NOTIFY)
+
+#define ISP_EVENT_SUBS_MASK_FE_READ_DONE \
+			(1 << ISP_EVENT_MASK_INDEX_MASK_FE_READ_DONE)
+
+#define ISP_EVENT_SUBS_MASK_BUF_DONE \
+			(1 << ISP_EVENT_MASK_INDEX_BUF_DONE)
+
+#define ISP_EVENT_SUBS_MASK_REG_UPDATE_MISSING \
+			(1 << ISP_EVENT_MASK_INDEX_REG_UPDATE_MISSING)
+
+#define ISP_EVENT_SUBS_MASK_PING_PONG_MISMATCH \
+			(1 << ISP_EVENT_MASK_INDEX_PING_PONG_MISMATCH)
+
+#define ISP_EVENT_SUBS_MASK_BUF_FATAL_ERROR \
+			(1 << ISP_EVENT_MASK_INDEX_BUF_FATAL_ERROR)
+
+enum msm_isp_event_idx {
+	ISP_REG_UPDATE        = 0,
+	ISP_EPOCH_0           = 1,
+	ISP_EPOCH_1           = 2,
+	ISP_START_ACK         = 3,
+	ISP_STOP_ACK          = 4,
+	ISP_IRQ_VIOLATION     = 5,
+	ISP_STATS_OVERFLOW    = 6,
+	ISP_BUF_DONE          = 7,
+	ISP_FE_RD_DONE        = 8,
+	ISP_IOMMU_P_FAULT     = 9,
+	ISP_ERROR             = 10,
+	ISP_HW_FATAL_ERROR      = 11,
+	ISP_PING_PONG_MISMATCH = 12,
+	ISP_REG_UPDATE_MISSING = 13,
+	ISP_BUF_FATAL_ERROR = 14,
+	ISP_EVENT_MAX         = 15
+};
+
+#define ISP_EVENT_OFFSET          8
+#define ISP_EVENT_BASE            (V4L2_EVENT_PRIVATE_START)
+#define ISP_BUF_EVENT_BASE        (ISP_EVENT_BASE + (1 << ISP_EVENT_OFFSET))
+#define ISP_STATS_EVENT_BASE      (ISP_EVENT_BASE + (2 << ISP_EVENT_OFFSET))
+#define ISP_CAMIF_EVENT_BASE      (ISP_EVENT_BASE + (3 << ISP_EVENT_OFFSET))
+#define ISP_STREAM_EVENT_BASE     (ISP_EVENT_BASE + (4 << ISP_EVENT_OFFSET))
+#define ISP_EVENT_REG_UPDATE      (ISP_EVENT_BASE + ISP_REG_UPDATE)
+#define ISP_EVENT_EPOCH_0         (ISP_EVENT_BASE + ISP_EPOCH_0)
+#define ISP_EVENT_EPOCH_1         (ISP_EVENT_BASE + ISP_EPOCH_1)
+#define ISP_EVENT_START_ACK       (ISP_EVENT_BASE + ISP_START_ACK)
+#define ISP_EVENT_STOP_ACK        (ISP_EVENT_BASE + ISP_STOP_ACK)
+#define ISP_EVENT_IRQ_VIOLATION   (ISP_EVENT_BASE + ISP_IRQ_VIOLATION)
+#define ISP_EVENT_STATS_OVERFLOW  (ISP_EVENT_BASE + ISP_STATS_OVERFLOW)
+#define ISP_EVENT_ERROR           (ISP_EVENT_BASE + ISP_ERROR)
+#define ISP_EVENT_SOF             (ISP_CAMIF_EVENT_BASE)
+#define ISP_EVENT_EOF             (ISP_CAMIF_EVENT_BASE + 1)
+#define ISP_EVENT_BUF_DONE        (ISP_EVENT_BASE + ISP_BUF_DONE)
+#define ISP_EVENT_BUF_DIVERT      (ISP_BUF_EVENT_BASE)
+#define ISP_EVENT_STATS_NOTIFY    (ISP_STATS_EVENT_BASE)
+#define ISP_EVENT_COMP_STATS_NOTIFY (ISP_EVENT_STATS_NOTIFY + MSM_ISP_STATS_MAX)
+#define ISP_EVENT_FE_READ_DONE    (ISP_EVENT_BASE + ISP_FE_RD_DONE)
+#define ISP_EVENT_IOMMU_P_FAULT   (ISP_EVENT_BASE + ISP_IOMMU_P_FAULT)
+#define ISP_EVENT_HW_FATAL_ERROR  (ISP_EVENT_BASE + ISP_HW_FATAL_ERROR)
+#define ISP_EVENT_PING_PONG_MISMATCH (ISP_EVENT_BASE + ISP_PING_PONG_MISMATCH)
+#define ISP_EVENT_REG_UPDATE_MISSING (ISP_EVENT_BASE + ISP_REG_UPDATE_MISSING)
+#define ISP_EVENT_BUF_FATAL_ERROR (ISP_EVENT_BASE + ISP_BUF_FATAL_ERROR)
+#define ISP_EVENT_STREAM_UPDATE_DONE   (ISP_STREAM_EVENT_BASE)
+
+/* The msm_v4l2_event_data structure should match the
+ * v4l2_event.u.data field.
+ * should not exceed 64 bytes
+ */
+
+struct msm_isp_buf_event {
+	uint32_t session_id;
+	uint32_t stream_id;
+	uint32_t handle;
+	uint32_t output_format;
+	int8_t buf_idx;
+	uint8_t field_type;
+};
+struct msm_isp_fetch_eng_event {
+	uint32_t session_id;
+	uint32_t stream_id;
+	uint32_t handle;
+	uint32_t fd;
+	int8_t buf_idx;
+	int8_t offline_mode;
+};
+struct msm_isp_stats_event {
+	uint32_t stats_mask;                        /* 4 bytes */
+	uint8_t stats_buf_idxs[MSM_ISP_STATS_MAX];  /* 11 bytes */
+};
+
+struct msm_isp_stream_ack {
+	uint32_t session_id;
+	uint32_t stream_id;
+	uint32_t handle;
+};
+
+enum msm_vfe_error_type {
+	ISP_ERROR_NONE,
+	ISP_ERROR_CAMIF,
+	ISP_ERROR_BUS_OVERFLOW,
+	ISP_ERROR_RETURN_EMPTY_BUFFER,
+	ISP_ERROR_FRAME_ID_MISMATCH,
+	ISP_ERROR_MAX,
+};
+
+struct msm_isp_error_info {
+	enum msm_vfe_error_type err_type;
+	uint32_t session_id;
+	uint32_t stream_id;
+	uint32_t stream_id_mask;
+};
+
+/* This structure reports delta between master and slave */
+struct msm_isp_ms_delta_info {
+	uint8_t num_delta_info;
+	uint32_t delta[MS_NUM_SLAVE_MAX];
+};
+
+/* This is sent in EPOCH irq */
+struct msm_isp_output_info {
+	uint8_t regs_not_updated;
+	/* mask with bufq_handle for regs not updated or return empty */
+	uint16_t output_err_mask;
+	/* mask with stream_idx for get_buf failed */
+	uint8_t stream_framedrop_mask;
+	/* mask with stats stream_idx for get_buf failed */
+	uint16_t stats_framedrop_mask;
+	/* delta between master and slave */
+};
+
+/* This structure is piggybacked with SOF event */
+struct msm_isp_sof_info {
+	uint8_t regs_not_updated;
+	/* mask with bufq_handle for regs not updated */
+	uint16_t reg_update_fail_mask;
+	/* mask with bufq_handle for get_buf failed */
+	uint32_t stream_get_buf_fail_mask;
+	/* mask with stats stream_idx for get_buf failed */
+	uint16_t stats_get_buf_fail_mask;
+	/* delta between master and slave */
+	struct msm_isp_ms_delta_info ms_delta_info;
+	/*
+	 * mask with AXI_SRC in paused state. In PAUSED
+	 * state there is no Buffer output. So this mask is used
+	 * to report drop.
+	 */
+	uint16_t axi_updating_mask;
+	/* extended mask with bufq_handle for regs not updated */
+	uint32_t reg_update_fail_mask_ext;
+};
+#define AXI_UPDATING_MASK 1
+#define REG_UPDATE_FAIL_MASK_EXT 1
+
+struct msm_isp_event_data {
+	/*Wall clock except for buffer divert events
+	 *which use monotonic clock
+	 */
+	struct timeval timestamp;
+	/* Monotonic timestamp since bootup */
+	struct timeval mono_timestamp;
+	uint32_t frame_id;
+	union {
+		/* Sent for Stats_Done event */
+		struct msm_isp_stats_event stats;
+		/* Sent for Buf_Divert event */
+		struct msm_isp_buf_event buf_done;
+		/* Sent for offline fetch done event */
+		struct msm_isp_fetch_eng_event fetch_done;
+		/* Sent for Error_Event */
+		struct msm_isp_error_info error_info;
+		/*
+		 * This struct needs to be removed once
+		 * userspace switches to sof_info
+		 */
+		struct msm_isp_output_info output_info;
+		/* Sent for SOF event */
+		struct msm_isp_sof_info sof_info;
+	} u; /* union can have max 52 bytes */
+};
+
+enum msm_vfe_ahb_clk_vote {
+	MSM_ISP_CAMERA_AHB_SVS_VOTE = 1,
+	MSM_ISP_CAMERA_AHB_TURBO_VOTE = 2,
+	MSM_ISP_CAMERA_AHB_NOMINAL_VOTE = 3,
+	MSM_ISP_CAMERA_AHB_SUSPEND_VOTE = 4,
+};
+
+struct msm_isp_ahb_clk_cfg {
+	uint32_t vote;
+	uint32_t reserved[2];
+};
+
+#define V4L2_PIX_FMT_QBGGR8  v4l2_fourcc('Q', 'B', 'G', '8')
+#define V4L2_PIX_FMT_QGBRG8  v4l2_fourcc('Q', 'G', 'B', '8')
+#define V4L2_PIX_FMT_QGRBG8  v4l2_fourcc('Q', 'G', 'R', '8')
+#define V4L2_PIX_FMT_QRGGB8  v4l2_fourcc('Q', 'R', 'G', '8')
+#define V4L2_PIX_FMT_QBGGR10 v4l2_fourcc('Q', 'B', 'G', '0')
+#define V4L2_PIX_FMT_QGBRG10 v4l2_fourcc('Q', 'G', 'B', '0')
+#define V4L2_PIX_FMT_QGRBG10 v4l2_fourcc('Q', 'G', 'R', '0')
+#define V4L2_PIX_FMT_QRGGB10 v4l2_fourcc('Q', 'R', 'G', '0')
+#define V4L2_PIX_FMT_QBGGR12 v4l2_fourcc('Q', 'B', 'G', '2')
+#define V4L2_PIX_FMT_QGBRG12 v4l2_fourcc('Q', 'G', 'B', '2')
+#define V4L2_PIX_FMT_QGRBG12 v4l2_fourcc('Q', 'G', 'R', '2')
+#define V4L2_PIX_FMT_QRGGB12 v4l2_fourcc('Q', 'R', 'G', '2')
+#define V4L2_PIX_FMT_QBGGR14 v4l2_fourcc('Q', 'B', 'G', '4')
+#define V4L2_PIX_FMT_QGBRG14 v4l2_fourcc('Q', 'G', 'B', '4')
+#define V4L2_PIX_FMT_QGRBG14 v4l2_fourcc('Q', 'G', 'R', '4')
+#define V4L2_PIX_FMT_QRGGB14 v4l2_fourcc('Q', 'R', 'G', '4')
+#define V4L2_PIX_FMT_P16BGGR10 v4l2_fourcc('P', 'B', 'G', '0')
+#define V4L2_PIX_FMT_P16GBRG10 v4l2_fourcc('P', 'G', 'B', '0')
+#define V4L2_PIX_FMT_P16GRBG10 v4l2_fourcc('P', 'G', 'R', '0')
+#define V4L2_PIX_FMT_P16RGGB10 v4l2_fourcc('P', 'R', 'G', '0')
+#define V4L2_PIX_FMT_NV14 v4l2_fourcc('N', 'V', '1', '4')
+#define V4L2_PIX_FMT_NV41 v4l2_fourcc('N', 'V', '4', '1')
+#define V4L2_PIX_FMT_META v4l2_fourcc('Q', 'M', 'E', 'T')
+#define V4L2_PIX_FMT_META10 v4l2_fourcc('Q', 'M', '1', '0')
+#define V4L2_PIX_FMT_SBGGR14 v4l2_fourcc('B', 'G', '1', '4') /* 14 BGBG.GRGR.*/
+#define V4L2_PIX_FMT_SGBRG14 v4l2_fourcc('G', 'B', '1', '4') /* 14 GBGB.RGRG.*/
+#define V4L2_PIX_FMT_SGRBG14 v4l2_fourcc('B', 'A', '1', '4') /* 14 GRGR.BGBG.*/
+#define V4L2_PIX_FMT_SRGGB14 v4l2_fourcc('R', 'G', '1', '4') /* 14 RGRG.GBGB.*/
+
+
+enum msm_vfe_pixel_data_size {
+	VFE_PIXEL_DATA_SIZE_8BIT,
+	VFE_PIXEL_DATA_SIZE_10BIT,
+	VFE_PIXEL_DATA_SIZE_12BIT,
+	VFE_PIXEL_DATA_SIZE_14BIT,
+};
+
+struct msm_vfe_operation_cfg {
+	enum msm_vfe_camif_input camif_input;
+	enum msm_vfe_pixel_data_size dataSize;
+	enum msm_vfe_inputmux input_mux;
+	enum ISP_START_PIXEL_PATTERN pixel_pattern;
+	enum msm_vfe_hvx_streaming_cmd hvx_cmd;
+	uint8_t yuv_cosited;
+};
+
+struct msm_vfe_axi_output_plane_cfg {
+	uint8_t wmIndex;
+	enum msm_vfe_plane_fmt plane_fmt;
+	uint32_t image_qwords_per_line;
+	uint32_t image_height;
+	uint32_t output_stride;
+	uint32_t output_scan_lines;
+	uint32_t output_plane_format;
+	uint32_t frame_increment;
+};
+
+struct msm_vfe_axi_output_path_cfg {
+	uint8_t enable;
+
+	uint32_t format;
+	uint8_t  raw_data_size;
+	uint32_t burst_count;
+	struct msm_vfe_axi_output_plane_cfg plane_cfg[MAX_PLANES_PER_STREAM];
+
+	uint8_t frame_based;
+	uint32_t frame_group;
+	uint32_t frame_interval;
+
+	uint8_t framedrop_period;
+	uint32_t framedrop_pattern;
+
+	uint8_t rdi_cid;
+	uint8_t rdi_frameskip_en;
+	uint32_t rdi_frameskip_pattern;
+};
+
+struct msm_vfe_axi_output_cfg {
+	struct msm_vfe_axi_output_path_cfg output_path_cfg[VFE_AXI_SRC_MAX];
+};
+
+
+enum msm_isp_ioctl_cmd_code {
+	MSM_VFE_REG_CFG = BASE_VIDIOC_PRIVATE,
+
+	MSM_ISP_REQUEST_BUFQ,
+	MSM_ISP_RELEASE_BUFQ,
+	MSM_ISP_ENQUEUE_BUF,
+	MSM_ISP_DEQUEUE_BUF,
+
+	MSM_ISP_REQUEST_STREAM,
+	MSM_ISP_CFG_STREAM,
+	MSM_ISP_RELEASE_STREAM,
+	MSM_ISP_INPUT_CFG,
+	MSM_ISP_SET_SRC_STATE,
+	MSM_ISP_REQUEST_STATS_STREAM,
+	MSM_ISP_CFG_STATS_STREAM,
+	MSM_ISP_RELEASE_STATS_STREAM,
+	MSM_ISP_REG_UPDATE_CMD,
+	MSM_ISP_UPDATE_STREAM,
+	MSM_VFE_REG_LIST_CFG,
+
+	MSM_ISP_UPDATE_STATS_STREAM,
+	MSM_ISP_AXI_HALT,
+	MSM_ISP_AXI_RESET,
+	MSM_ISP_AXI_RESTART,
+	MSM_ISP_FETCH_ENG_START,
+	MSM_ISP_SET_DUAL_HW_MASTER_SLAVE,
+	MSM_ISP_MAP_BUF_START_FE,
+	MSM_ISP_FETCH_ENG_MULTI_PASS_START,
+	MSM_ISP_MAP_BUF_START_MULTI_PASS_FE,
+	MSM_ISP_CFG_HW_STATE,
+
+	MSM_ISP_SMMU_ATTACH,
+	MSM_ISP_UNMAP_BUF,
+
+	MSM_ISP_OPERATION_CFG,
+	MSM_ISP_CAMIF_CFG,
+	MSM_ISP_AXI_OUTPUT_CFG,
+	MSM_ISP_START,
+	MSM_ISP_STOP,
+};
+
+
+#define VIDIOC_MSM_VFE_REG_CFG \
+	_IOWR('V', MSM_VFE_REG_CFG, \
+		struct msm_vfe_cfg_cmd2)
+
+#define VIDIOC_MSM_ISP_REQUEST_BUFQ \
+	_IOWR('V', MSM_ISP_REQUEST_BUFQ, \
+		struct msm_isp_buf_request)
+
+#define VIDIOC_MSM_ISP_RELEASE_BUFQ \
+	_IOWR('V', MSM_ISP_RELEASE_BUFQ, \
+		struct msm_isp_buf_request)
+
+#define VIDIOC_MSM_ISP_ENQUEUE_BUF \
+	_IOWR('V', MSM_ISP_ENQUEUE_BUF, \
+		struct msm_isp_qbuf_info)
+
+#define VIDIOC_MSM_ISP_DEQUEUE_BUF \
+	_IOWR('V', MSM_ISP_DEQUEUE_BUF, \
+		struct msm_isp_qbuf_info)
+
+#define VIDIOC_MSM_ISP_REQUEST_STREAM \
+	_IOWR('V', MSM_ISP_REQUEST_STREAM, \
+		struct msm_vfe_axi_stream_request_cmd)
+
+#define VIDIOC_MSM_ISP_CFG_STREAM \
+	_IOWR('V', MSM_ISP_CFG_STREAM, \
+		struct msm_vfe_axi_stream_cfg_cmd)
+
+#define VIDIOC_MSM_ISP_RELEASE_STREAM \
+	_IOWR('V', MSM_ISP_RELEASE_STREAM, \
+		struct msm_vfe_axi_stream_release_cmd)
+
+#define VIDIOC_MSM_ISP_INPUT_CFG \
+	_IOWR('V', MSM_ISP_INPUT_CFG, \
+		struct msm_vfe_input_cfg)
+
+#define VIDIOC_MSM_ISP_SET_SRC_STATE \
+	_IOWR('V', MSM_ISP_SET_SRC_STATE, \
+		struct msm_vfe_axi_src_state)
+
+#define VIDIOC_MSM_ISP_REQUEST_STATS_STREAM \
+	_IOWR('V', MSM_ISP_REQUEST_STATS_STREAM, \
+		struct msm_vfe_stats_stream_request_cmd)
+
+#define VIDIOC_MSM_ISP_CFG_STATS_STREAM \
+	_IOWR('V', MSM_ISP_CFG_STATS_STREAM, \
+		struct msm_vfe_stats_stream_cfg_cmd)
+
+#define VIDIOC_MSM_ISP_RELEASE_STATS_STREAM \
+	_IOWR('V', MSM_ISP_RELEASE_STATS_STREAM, \
+		struct msm_vfe_stats_stream_release_cmd)
+
+#define VIDIOC_MSM_ISP_REG_UPDATE_CMD \
+	_IOWR('V', MSM_ISP_REG_UPDATE_CMD, \
+		enum msm_vfe_input_src)
+
+#define VIDIOC_MSM_ISP_UPDATE_STREAM \
+	_IOWR('V', MSM_ISP_UPDATE_STREAM, \
+		struct msm_vfe_axi_stream_update_cmd)
+
+#define VIDIOC_MSM_VFE_REG_LIST_CFG \
+	_IOWR('V', MSM_VFE_REG_LIST_CFG, \
+		struct msm_vfe_cfg_cmd_list)
+
+#define VIDIOC_MSM_ISP_SMMU_ATTACH \
+	_IOWR('V', MSM_ISP_SMMU_ATTACH, \
+		struct msm_vfe_smmu_attach_cmd)
+
+#define VIDIOC_MSM_ISP_UPDATE_STATS_STREAM \
+	_IOWR('V', MSM_ISP_UPDATE_STATS_STREAM, \
+		struct msm_vfe_axi_stream_update_cmd)
+
+#define VIDIOC_MSM_ISP_AXI_HALT \
+	_IOWR('V', MSM_ISP_AXI_HALT, \
+		struct msm_vfe_axi_halt_cmd)
+
+#define VIDIOC_MSM_ISP_AXI_RESET \
+	_IOWR('V', MSM_ISP_AXI_RESET, \
+		struct msm_vfe_axi_reset_cmd)
+
+#define VIDIOC_MSM_ISP_AXI_RESTART \
+	_IOWR('V', MSM_ISP_AXI_RESTART, \
+		struct msm_vfe_axi_restart_cmd)
+
+#define VIDIOC_MSM_ISP_FETCH_ENG_START \
+	_IOWR('V', MSM_ISP_FETCH_ENG_START, \
+		struct msm_vfe_fetch_eng_start)
+
+#define VIDIOC_MSM_ISP_SET_DUAL_HW_MASTER_SLAVE \
+	_IOWR('V', MSM_ISP_SET_DUAL_HW_MASTER_SLAVE, \
+		struct msm_isp_set_dual_hw_ms_cmd)
+
+#define VIDIOC_MSM_ISP_MAP_BUF_START_FE \
+	_IOWR('V', MSM_ISP_MAP_BUF_START_FE, \
+		struct msm_vfe_fetch_eng_start)
+
+#define VIDIOC_MSM_ISP_UNMAP_BUF \
+	_IOWR('V', MSM_ISP_UNMAP_BUF, \
+		struct msm_isp_unmap_buf_req)
+
+#define VIDIOC_MSM_ISP_AHB_CLK_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE+25, struct msm_isp_ahb_clk_cfg)
+
+#define VIDIOC_MSM_ISP_FETCH_ENG_MULTI_PASS_START \
+	_IOWR('V', MSM_ISP_FETCH_ENG_MULTI_PASS_START, \
+		struct msm_vfe_fetch_eng_multi_pass_start)
+
+#define VIDIOC_MSM_ISP_MAP_BUF_START_MULTI_PASS_FE \
+	_IOWR('V', MSM_ISP_MAP_BUF_START_MULTI_PASS_FE, \
+		struct msm_vfe_fetch_eng_multi_pass_start)
+
+#define VIDIOC_MSM_ISP_CFG_HW_STATE \
+	_IOWR('V', MSM_ISP_CFG_HW_STATE, \
+		struct msm_vfe_axi_stream_cfg_cmd)
+
+
+#define VIDIOC_MSM_ISP_OPERATION_CFG \
+	_IOWR('V', MSM_ISP_OPERATION_CFG, \
+		struct msm_vfe_operation_cfg)
+
+#define VIDIOC_MSM_ISP_AXI_OUTPUT_CFG \
+	_IOWR('V', MSM_ISP_AXI_OUTPUT_CFG, \
+		struct msm_vfe_axi_output_cfg)
+
+#define VIDIOC_MSM_ISP_CAMIF_CFG \
+	_IOWR('V', MSM_ISP_CAMIF_CFG, \
+		struct msm_vfe_camif_cfg)
+
+
+#endif /* __UAPI_MSM_AIS_ISP__ */
diff -Nruw linux-4.4.115-fbx/include/uapi/media./ais/msm_ais_ispif.h linux-4.4.115-fbx/include/uapi/media/ais/msm_ais_ispif.h
--- linux-4.4.115-fbx/include/uapi/media./ais/msm_ais_ispif.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/ais/msm_ais_ispif.h	2019-10-29 09:26:25.553221870 +0100
@@ -0,0 +1,173 @@
+#ifndef UAPI_MSM_AIS_ISPIF_H
+#define UAPI_MSM_AIS_ISPIF_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/videodev2.h>
+
+#define CSID_VERSION_V20                      0x02000011
+#define CSID_VERSION_V22                      0x02001000
+#define CSID_VERSION_V30                      0x30000000
+#define CSID_VERSION_V3                       0x30000000
+#define CSID_VERSION_V35                      0x30050000
+
+enum msm_ispif_vfe_intf {
+	VFE0,
+	VFE1,
+	VFE_MAX
+};
+#define VFE0_MASK    (1 << VFE0)
+#define VFE1_MASK    (1 << VFE1)
+
+enum msm_ispif_intftype {
+	PIX0,
+	RDI0,
+	PIX1,
+	RDI1,
+	RDI2,
+	INTF_MAX
+};
+#define MAX_PARAM_ENTRIES (INTF_MAX * 2)
+#define MAX_CID_CH	8
+#define MAX_CID_CH_v2	4
+
+#define PIX0_MASK (1 << PIX0)
+#define PIX1_MASK (1 << PIX1)
+#define RDI0_MASK (1 << RDI0)
+#define RDI1_MASK (1 << RDI1)
+#define RDI2_MASK (1 << RDI2)
+
+
+enum msm_ispif_vc {
+	VC0,
+	VC1,
+	VC2,
+	VC3,
+	VC_MAX
+};
+
+enum msm_ispif_cid {
+	CID0,
+	CID1,
+	CID2,
+	CID3,
+	CID4,
+	CID5,
+	CID6,
+	CID7,
+	CID8,
+	CID9,
+	CID10,
+	CID11,
+	CID12,
+	CID13,
+	CID14,
+	CID15,
+	CID_MAX
+};
+
+enum msm_ispif_csid {
+	CSID0,
+	CSID1,
+	CSID2,
+	CSID3,
+	CSID_MAX
+};
+
+enum msm_ispif_pixel_odd_even {
+	PIX_EVEN,
+	PIX_ODD
+};
+
+enum msm_ispif_pixel_pack_mode {
+	PACK_BYTE,
+	PACK_PLAIN_PACK,
+	PACK_NV_P8,
+	PACK_NV_P16
+};
+
+struct msm_ispif_pack_cfg {
+	int pixel_swap_en;
+	enum msm_ispif_pixel_odd_even even_odd_sel;
+	enum msm_ispif_pixel_pack_mode pack_mode;
+};
+
+struct msm_ispif_params_entry {
+	enum msm_ispif_vfe_intf vfe_intf;
+	enum msm_ispif_intftype intftype;
+	enum msm_ispif_csid csid;
+	int num_cids;
+	enum msm_ispif_cid cids[MAX_CID_CH_v2];
+	uint8_t crop_enable;
+	uint16_t crop_start_pixel;
+	uint16_t crop_end_pixel;
+	uint8_t  rdi_frameskip_enable;
+	uint32_t rdi_framedrop_period;
+	uint32_t rdi_framedrop_pattern;
+};
+
+struct msm_ispif_param_data_ext {
+	uint32_t num;
+	struct msm_ispif_params_entry entries[MAX_PARAM_ENTRIES];
+	struct msm_ispif_pack_cfg pack_cfg[CID_MAX];
+};
+
+struct msm_ispif_param_data {
+	uint32_t num;
+	struct msm_ispif_params_entry entries[MAX_PARAM_ENTRIES];
+};
+
+struct msm_isp_info {
+	uint32_t max_resolution;
+	uint32_t id;
+	uint32_t ver;
+};
+
+struct msm_ispif_vfe_info {
+	int num_vfe;
+	struct msm_isp_info info[VFE_MAX];
+};
+
+enum ispif_cfg_type_t {
+	ISPIF_CLK_ENABLE,
+	ISPIF_CLK_DISABLE,
+	ISPIF_INIT,
+	ISPIF_RELEASE,
+	ISPIF_RESET,
+	ISPIF_CFG,
+	ISPIF_START_FRAME_BOUNDARY,
+	ISPIF_RESTART_FRAME_BOUNDARY,
+	ISPIF_STOP_FRAME_BOUNDARY,
+	ISPIF_STOP,
+	ISPIF_ENABLE_REG_DUMP,
+	ISPIF_SET_VFE_INFO,
+	ISPIF_CFG2
+};
+
+
+struct ispif_cfg_data_ext {
+	enum ispif_cfg_type_t cfg_type;
+	void __user *data;
+	uint32_t size;
+};
+
+struct ispif_cfg_data {
+	enum ispif_cfg_type_t cfg_type;
+	union {
+		int reg_dump;                        /* ISPIF_ENABLE_REG_DUMP */
+		uint32_t csid_version;               /* ISPIF_INIT */
+		struct msm_ispif_vfe_info vfe_info;  /* ISPIF_SET_VFE_INFO */
+		struct msm_ispif_param_data params;  /* CFG, START, STOP */
+	};
+};
+
+#define ISPIF_RDI_PACK_MODE_SUPPORT 1
+
+#define VIDIOC_MSM_ISPIF_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE, struct ispif_cfg_data)
+
+#define VIDIOC_MSM_ISPIF_CFG_EXT \
+	_IOWR('V', BASE_VIDIOC_PRIVATE+1, struct ispif_cfg_data_ext)
+
+#endif
+
diff -Nruw linux-4.4.115-fbx/include/uapi/media./ais/msm_ais_mgr.h linux-4.4.115-fbx/include/uapi/media/ais/msm_ais_mgr.h
--- linux-4.4.115-fbx/include/uapi/media./ais/msm_ais_mgr.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/ais/msm_ais_mgr.h	2019-10-29 09:26:25.553221870 +0100
@@ -0,0 +1,28 @@
+#ifndef __UAPI_MEDIA_MSM_AIS_MGR_H__
+#define __UAPI_MEDIA_MSM_AIS_MGR_H__
+
+#include <media/ais/msm_ais.h>
+
+enum clk_mgr_cfg_type_t {
+	AIS_CLK_ENABLE,
+	AIS_CLK_DISABLE,
+};
+
+#define AIS_CLK_ENABLE AIS_CLK_ENABLE
+#define AIS_CLK_DISABLE AIS_CLK_DISABLE
+
+struct clk_mgr_cfg_data_ext {
+	enum clk_mgr_cfg_type_t cfg_type;
+};
+
+struct clk_mgr_cfg_data {
+	enum clk_mgr_cfg_type_t cfg_type;
+};
+
+#define VIDIOC_MSM_AIS_CLK_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE, struct clk_mgr_cfg_data)
+
+#define VIDIOC_MSM_AIS_CLK_CFG_EXT \
+	_IOWR('V', BASE_VIDIOC_PRIVATE+1, struct clk_mgr_cfg_data_ext)
+
+#endif /* __UAPI_MEDIA_MSM_AIS_MGR_H__ */
diff -Nruw linux-4.4.115-fbx/include/uapi/media./ais/msm_ais_sensor.h linux-4.4.115-fbx/include/uapi/media/ais/msm_ais_sensor.h
--- linux-4.4.115-fbx/include/uapi/media./ais/msm_ais_sensor.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/ais/msm_ais_sensor.h	2019-10-29 09:26:25.553221870 +0100
@@ -0,0 +1,640 @@
+#ifndef __UAPI_LINUX_MSM_AIS_SENSOR_H
+#define __UAPI_LINUX_MSM_AIS_SENSOR_H
+
+#include <linux/v4l2-mediabus.h>
+#include <media/ais/msm_ais_sensor_sdk.h>
+
+#include <linux/types.h>
+#include <linux/i2c.h>
+
+#define I2C_SEQ_REG_SETTING_MAX   5
+
+#define MSM_SENSOR_MCLK_8HZ   8000000
+#define MSM_SENSOR_MCLK_16HZ  16000000
+#define MSM_SENSOR_MCLK_24HZ  24000000
+
+#define MAX_SENSOR_NAME 32
+#define MAX_ACTUATOR_AF_TOTAL_STEPS 1024
+
+#define MAX_OIS_MOD_NAME_SIZE 32
+#define MAX_OIS_NAME_SIZE 32
+#define MAX_OIS_REG_SETTINGS 800
+
+#define MOVE_NEAR 0
+#define MOVE_FAR  1
+
+#define MSM_ACTUATOR_MOVE_SIGNED_FAR -1
+#define MSM_ACTUATOR_MOVE_SIGNED_NEAR  1
+
+#define MAX_ACTUATOR_REGION  5
+
+#define MAX_EEPROM_NAME 32
+
+#define MAX_AF_ITERATIONS 3
+#define MAX_NUMBER_OF_STEPS 47
+#define MAX_REGULATOR 5
+
+#define MSM_V4L2_PIX_FMT_META v4l2_fourcc('M', 'E', 'T', 'A') /* META */
+#define MSM_V4L2_PIX_FMT_META10 v4l2_fourcc('M', 'E', '1', '0') /* META10 */
+#define MSM_V4L2_PIX_FMT_SBGGR14 v4l2_fourcc('B', 'G', '1', '4')
+	/* 14  BGBG.. GRGR.. */
+#define MSM_V4L2_PIX_FMT_SGBRG14 v4l2_fourcc('G', 'B', '1', '4')
+	/* 14  GBGB.. RGRG.. */
+#define MSM_V4L2_PIX_FMT_SGRBG14 v4l2_fourcc('B', 'A', '1', '4')
+	/* 14  GRGR.. BGBG.. */
+#define MSM_V4L2_PIX_FMT_SRGGB14 v4l2_fourcc('R', 'G', '1', '4')
+	/* 14  RGRG.. GBGB.. */
+
+enum flash_type {
+	LED_FLASH = 1,
+	STROBE_FLASH,
+	GPIO_FLASH
+};
+
+enum msm_sensor_resolution_t {
+	MSM_SENSOR_RES_FULL,
+	MSM_SENSOR_RES_QTR,
+	MSM_SENSOR_RES_2,
+	MSM_SENSOR_RES_3,
+	MSM_SENSOR_RES_4,
+	MSM_SENSOR_RES_5,
+	MSM_SENSOR_RES_6,
+	MSM_SENSOR_RES_7,
+	MSM_SENSOR_INVALID_RES,
+};
+
+enum msm_camera_stream_type_t {
+	MSM_CAMERA_STREAM_PREVIEW,
+	MSM_CAMERA_STREAM_SNAPSHOT,
+	MSM_CAMERA_STREAM_VIDEO,
+	MSM_CAMERA_STREAM_INVALID,
+};
+
+enum sensor_sub_module_t {
+	SUB_MODULE_SENSOR,
+	SUB_MODULE_CHROMATIX,
+	SUB_MODULE_ACTUATOR,
+	SUB_MODULE_EEPROM,
+	SUB_MODULE_LED_FLASH,
+	SUB_MODULE_STROBE_FLASH,
+	SUB_MODULE_CSID,
+	SUB_MODULE_CSID_3D,
+	SUB_MODULE_CSIPHY,
+	SUB_MODULE_CSIPHY_3D,
+	SUB_MODULE_OIS,
+	SUB_MODULE_EXT,
+	SUB_MODULE_IR_LED,
+	SUB_MODULE_IR_CUT,
+	SUB_MODULE_MAX,
+};
+
+enum {
+	MSM_CAMERA_EFFECT_MODE_OFF,
+	MSM_CAMERA_EFFECT_MODE_MONO,
+	MSM_CAMERA_EFFECT_MODE_NEGATIVE,
+	MSM_CAMERA_EFFECT_MODE_SOLARIZE,
+	MSM_CAMERA_EFFECT_MODE_SEPIA,
+	MSM_CAMERA_EFFECT_MODE_POSTERIZE,
+	MSM_CAMERA_EFFECT_MODE_WHITEBOARD,
+	MSM_CAMERA_EFFECT_MODE_BLACKBOARD,
+	MSM_CAMERA_EFFECT_MODE_AQUA,
+	MSM_CAMERA_EFFECT_MODE_EMBOSS,
+	MSM_CAMERA_EFFECT_MODE_SKETCH,
+	MSM_CAMERA_EFFECT_MODE_NEON,
+	MSM_CAMERA_EFFECT_MODE_MAX
+};
+
+enum {
+	MSM_CAMERA_WB_MODE_AUTO,
+	MSM_CAMERA_WB_MODE_CUSTOM,
+	MSM_CAMERA_WB_MODE_INCANDESCENT,
+	MSM_CAMERA_WB_MODE_FLUORESCENT,
+	MSM_CAMERA_WB_MODE_WARM_FLUORESCENT,
+	MSM_CAMERA_WB_MODE_DAYLIGHT,
+	MSM_CAMERA_WB_MODE_CLOUDY_DAYLIGHT,
+	MSM_CAMERA_WB_MODE_TWILIGHT,
+	MSM_CAMERA_WB_MODE_SHADE,
+	MSM_CAMERA_WB_MODE_OFF,
+	MSM_CAMERA_WB_MODE_MAX
+};
+
+enum {
+	MSM_CAMERA_SCENE_MODE_OFF,
+	MSM_CAMERA_SCENE_MODE_AUTO,
+	MSM_CAMERA_SCENE_MODE_LANDSCAPE,
+	MSM_CAMERA_SCENE_MODE_SNOW,
+	MSM_CAMERA_SCENE_MODE_BEACH,
+	MSM_CAMERA_SCENE_MODE_SUNSET,
+	MSM_CAMERA_SCENE_MODE_NIGHT,
+	MSM_CAMERA_SCENE_MODE_PORTRAIT,
+	MSM_CAMERA_SCENE_MODE_BACKLIGHT,
+	MSM_CAMERA_SCENE_MODE_SPORTS,
+	MSM_CAMERA_SCENE_MODE_ANTISHAKE,
+	MSM_CAMERA_SCENE_MODE_FLOWERS,
+	MSM_CAMERA_SCENE_MODE_CANDLELIGHT,
+	MSM_CAMERA_SCENE_MODE_FIREWORKS,
+	MSM_CAMERA_SCENE_MODE_PARTY,
+	MSM_CAMERA_SCENE_MODE_NIGHT_PORTRAIT,
+	MSM_CAMERA_SCENE_MODE_THEATRE,
+	MSM_CAMERA_SCENE_MODE_ACTION,
+	MSM_CAMERA_SCENE_MODE_AR,
+	MSM_CAMERA_SCENE_MODE_FACE_PRIORITY,
+	MSM_CAMERA_SCENE_MODE_BARCODE,
+	MSM_CAMERA_SCENE_MODE_HDR,
+	MSM_CAMERA_SCENE_MODE_MAX
+};
+
+enum csid_cfg_type_t {
+	CSID_INIT,
+	CSID_CFG,
+	CSID_UPDATE_CFG,
+	CSID_TESTMODE_CFG,
+	CSID_START,
+	CSID_STOP,
+	CSID_RELEASE,
+};
+
+enum csiphy_cfg_type_t {
+	CSIPHY_INIT,
+	CSIPHY_CFG,
+	CSIPHY_START,
+	CSIPHY_STOP,
+	CSIPHY_RELEASE,
+};
+
+enum camera_vreg_type {
+	VREG_TYPE_DEFAULT,
+	VREG_TYPE_CUSTOM,
+};
+
+enum sensor_af_t {
+	SENSOR_AF_FOCUSSED,
+	SENSOR_AF_NOT_FOCUSSED,
+};
+
+enum cci_i2c_master_t {
+	MASTER_0,
+	MASTER_1,
+	MASTER_MAX,
+};
+
+struct msm_sensor_event_data {
+	uint16_t sensor_slave_addr;
+};
+
+enum msm_sensor_event_mask_index {
+	SENSOR_EVENT_MASK_INDEX_SIGNAL_STATUS	= 2,
+};
+
+#define SENSOR_EVENT_SUBS_MASK_NONE			0
+
+#define SENSOR_EVENT_SUBS_MASK_SIGNAL_STATUS \
+			(1 << SENSOR_EVENT_MASK_INDEX_SIGNAL_STATUS)
+
+enum msm_sensor_event_idx {
+	SENSOR_SIGNAL_STATUS      = 2,
+	SENSOR_EVENT_MAX          = 15
+};
+
+#define SENSOR_EVENT_BASE            (V4L2_EVENT_PRIVATE_START)
+#define SENSOR_EVENT_SIGNAL_STATUS   (SENSOR_EVENT_BASE + SENSOR_SIGNAL_STATUS)
+
+struct msm_camera_i2c_array_write_config {
+	struct msm_camera_i2c_reg_setting conf_array;
+	uint16_t slave_addr;
+};
+
+struct msm_camera_i2c_read_config {
+	uint16_t slave_addr;
+	uint16_t reg_addr;
+	enum msm_camera_i2c_reg_addr_type addr_type;
+	enum msm_camera_i2c_data_type data_type;
+	uint16_t data;
+};
+
+struct msm_camera_csi2_params {
+	struct msm_camera_csid_params csid_params;
+	struct msm_camera_csiphy_params csiphy_params;
+	uint8_t csi_clk_scale_enable;
+};
+
+struct msm_camera_csi_lane_params {
+	uint16_t csi_lane_assign;
+	uint16_t csi_lane_mask;
+};
+
+struct csi_lane_params_t {
+	uint16_t csi_lane_assign;
+	uint8_t csi_lane_mask;
+	uint8_t csi_if;
+	int8_t csid_core[2];
+	uint8_t csi_phy_sel;
+};
+
+struct msm_sensor_info_t {
+	char     sensor_name[MAX_SENSOR_NAME];
+	uint32_t session_id;
+	int32_t  subdev_id[SUB_MODULE_MAX];
+	int32_t  subdev_intf[SUB_MODULE_MAX];
+	uint8_t  is_mount_angle_valid;
+	uint32_t sensor_mount_angle;
+	int modes_supported;
+	enum camb_position_t position;
+};
+
+struct camera_vreg_t {
+	const char *reg_name;
+	int min_voltage;
+	int max_voltage;
+	int op_mode;
+	uint32_t delay;
+	const char *custom_vreg_name;
+	enum camera_vreg_type type;
+};
+
+struct sensorb_cfg_data {
+	int cfgtype;
+	union {
+		struct msm_sensor_info_t      sensor_info;
+		struct msm_sensor_init_params sensor_init_params;
+		void                         *setting;
+		struct msm_sensor_i2c_sync_params sensor_i2c_sync_params;
+	} cfg;
+};
+
+struct csid_cfg_data {
+	enum csid_cfg_type_t cfgtype;
+	union {
+		uint32_t csid_version;
+		struct msm_camera_csid_params *csid_params;
+		struct msm_camera_csid_testmode_parms *csid_testmode_params;
+		uint32_t csid_cidmask;
+	} cfg;
+};
+
+struct csiphy_cfg_data {
+	enum csiphy_cfg_type_t cfgtype;
+	union {
+		struct msm_camera_csiphy_params *csiphy_params;
+		struct msm_camera_csi_lane_params *csi_lane_params;
+	} cfg;
+};
+
+enum eeprom_cfg_type_t {
+	CFG_EEPROM_GET_INFO,
+	CFG_EEPROM_GET_CAL_DATA,
+	CFG_EEPROM_READ_CAL_DATA,
+	CFG_EEPROM_WRITE_DATA,
+	CFG_EEPROM_GET_MM_INFO,
+	CFG_EEPROM_INIT,
+};
+
+struct eeprom_get_t {
+	uint32_t num_bytes;
+};
+
+struct eeprom_read_t {
+	uint8_t *dbuffer;
+	uint32_t num_bytes;
+};
+
+struct eeprom_write_t {
+	uint8_t *dbuffer;
+	uint32_t num_bytes;
+};
+
+struct eeprom_get_cmm_t {
+	uint32_t cmm_support;
+	uint32_t cmm_compression;
+	uint32_t cmm_size;
+};
+
+struct msm_eeprom_info_t {
+	struct msm_sensor_power_setting_array *power_setting_array;
+	enum i2c_freq_mode_t i2c_freq_mode;
+	struct msm_eeprom_memory_map_array *mem_map_array;
+};
+
+struct msm_ir_led_cfg_data_t {
+	enum msm_ir_led_cfg_type_t cfg_type;
+	int32_t pwm_duty_on_ns;
+	int32_t pwm_period_ns;
+};
+
+struct msm_ir_cut_cfg_data_t {
+	enum msm_ir_cut_cfg_type_t cfg_type;
+};
+
+struct msm_eeprom_cfg_data {
+	enum eeprom_cfg_type_t cfgtype;
+	uint8_t is_supported;
+	union {
+		char eeprom_name[MAX_SENSOR_NAME];
+		struct eeprom_get_t get_data;
+		struct eeprom_read_t read_data;
+		struct eeprom_write_t write_data;
+		struct eeprom_get_cmm_t get_cmm_data;
+		struct msm_eeprom_info_t eeprom_info;
+	} cfg;
+};
+
+enum msm_sensor_cfg_type_t {
+	CFG_SET_SLAVE_INFO,
+	CFG_SLAVE_READ_I2C,
+	CFG_WRITE_I2C_ARRAY,
+	CFG_SLAVE_WRITE_I2C_ARRAY,
+	CFG_WRITE_I2C_SEQ_ARRAY,
+	CFG_POWER_UP,
+	CFG_POWER_DOWN,
+	CFG_SET_STOP_STREAM_SETTING,
+	CFG_GET_SENSOR_INFO,
+	CFG_GET_SENSOR_INIT_PARAMS,
+	CFG_SET_INIT_SETTING,
+	CFG_SET_RESOLUTION,
+	CFG_SET_STOP_STREAM,
+	CFG_SET_START_STREAM,
+	CFG_SET_SATURATION,
+	CFG_SET_CONTRAST,
+	CFG_SET_SHARPNESS,
+	CFG_SET_ISO,
+	CFG_SET_EXPOSURE_COMPENSATION,
+	CFG_SET_ANTIBANDING,
+	CFG_SET_BESTSHOT_MODE,
+	CFG_SET_EFFECT,
+	CFG_SET_WHITE_BALANCE,
+	CFG_SET_AUTOFOCUS,
+	CFG_CANCEL_AUTOFOCUS,
+	CFG_SET_STREAM_TYPE,
+	CFG_SET_I2C_SYNC_PARAM,
+	CFG_WRITE_I2C_ARRAY_ASYNC,
+	CFG_WRITE_I2C_ARRAY_SYNC,
+	CFG_WRITE_I2C_ARRAY_SYNC_BLOCK,
+	CFG_CCI_POWER_UP,
+	CFG_CCI_POWER_DOWN,
+};
+
+#define CFG_CCI_POWER_UP CFG_CCI_POWER_UP
+#define CFG_CCI_POWER_DOWN CFG_CCI_POWER_DOWN
+
+enum msm_actuator_cfg_type_t {
+	CFG_GET_ACTUATOR_INFO,
+	CFG_SET_ACTUATOR_INFO,
+	CFG_SET_DEFAULT_FOCUS,
+	CFG_MOVE_FOCUS,
+	CFG_SET_POSITION,
+	CFG_ACTUATOR_POWERDOWN,
+	CFG_ACTUATOR_POWERUP,
+	CFG_ACTUATOR_INIT,
+};
+
+struct msm_ois_opcode {
+	uint32_t prog;
+	uint32_t coeff;
+	uint32_t pheripheral;
+	uint32_t memory;
+};
+
+enum msm_ois_cfg_type_t {
+	CFG_OIS_INIT,
+	CFG_OIS_POWERDOWN,
+	CFG_OIS_POWERUP,
+	CFG_OIS_CONTROL,
+	CFG_OIS_I2C_WRITE_SEQ_TABLE,
+};
+
+enum msm_ois_cfg_download_type_t {
+	CFG_OIS_DOWNLOAD,
+	CFG_OIS_DATA_CONFIG,
+};
+
+enum msm_ois_i2c_operation {
+	MSM_OIS_WRITE = 0,
+	MSM_OIS_POLL,
+};
+
+struct reg_settings_ois_t {
+	uint16_t reg_addr;
+	enum msm_camera_i2c_reg_addr_type addr_type;
+	uint32_t reg_data;
+	enum msm_camera_i2c_data_type data_type;
+	enum msm_ois_i2c_operation i2c_operation;
+	uint32_t delay;
+};
+
+struct msm_ois_params_t {
+	uint16_t data_size;
+	uint16_t setting_size;
+	uint32_t i2c_addr;
+	enum i2c_freq_mode_t i2c_freq_mode;
+	enum msm_camera_i2c_reg_addr_type i2c_addr_type;
+	enum msm_camera_i2c_data_type i2c_data_type;
+	struct reg_settings_ois_t *settings;
+};
+
+struct msm_ois_set_info_t {
+	struct msm_ois_params_t ois_params;
+};
+
+struct msm_actuator_move_params_t {
+	int8_t dir;
+	int8_t sign_dir;
+	int16_t dest_step_pos;
+	int32_t num_steps;
+	uint16_t curr_lens_pos;
+	struct damping_params_t *ringing_params;
+};
+
+struct msm_actuator_tuning_params_t {
+	int16_t initial_code;
+	uint16_t pwd_step;
+	uint16_t region_size;
+	uint32_t total_steps;
+	struct region_params_t *region_params;
+};
+
+struct park_lens_data_t {
+	uint32_t damping_step;
+	uint32_t damping_delay;
+	uint32_t hw_params;
+	uint32_t max_step;
+};
+
+struct msm_actuator_params_t {
+	enum actuator_type act_type;
+	uint8_t reg_tbl_size;
+	uint16_t data_size;
+	uint16_t init_setting_size;
+	uint32_t i2c_addr;
+	enum i2c_freq_mode_t i2c_freq_mode;
+	enum msm_camera_i2c_reg_addr_type i2c_addr_type;
+	enum msm_camera_i2c_data_type i2c_data_type;
+	struct msm_actuator_reg_params_t *reg_tbl_params;
+	struct reg_settings_t *init_settings;
+	struct park_lens_data_t park_lens;
+};
+
+struct msm_actuator_set_info_t {
+	struct msm_actuator_params_t actuator_params;
+	struct msm_actuator_tuning_params_t af_tuning_params;
+};
+
+struct msm_actuator_get_info_t {
+	uint32_t focal_length_num;
+	uint32_t focal_length_den;
+	uint32_t f_number_num;
+	uint32_t f_number_den;
+	uint32_t f_pix_num;
+	uint32_t f_pix_den;
+	uint32_t total_f_dist_num;
+	uint32_t total_f_dist_den;
+	uint32_t hor_view_angle_num;
+	uint32_t hor_view_angle_den;
+	uint32_t ver_view_angle_num;
+	uint32_t ver_view_angle_den;
+};
+
+enum af_camera_name {
+	ACTUATOR_MAIN_CAM_0,
+	ACTUATOR_MAIN_CAM_1,
+	ACTUATOR_MAIN_CAM_2,
+	ACTUATOR_MAIN_CAM_3,
+	ACTUATOR_MAIN_CAM_4,
+	ACTUATOR_MAIN_CAM_5,
+	ACTUATOR_WEB_CAM_0,
+	ACTUATOR_WEB_CAM_1,
+	ACTUATOR_WEB_CAM_2,
+};
+
+struct msm_ois_slave_info {
+	char ois_name[MAX_OIS_NAME_SIZE];
+	uint32_t i2c_addr;
+	struct msm_ois_opcode opcode;
+};
+struct msm_ois_cfg_data {
+	int cfgtype;
+	union {
+		struct msm_ois_set_info_t set_info;
+		struct msm_camera_i2c_seq_reg_setting *settings;
+	} cfg;
+};
+
+struct msm_ois_cfg_download_data {
+	int cfgtype;
+	struct msm_ois_slave_info slave_info;
+};
+
+struct msm_actuator_set_position_t {
+	uint16_t number_of_steps;
+	uint32_t hw_params;
+	uint16_t pos[MAX_NUMBER_OF_STEPS];
+	uint16_t delay[MAX_NUMBER_OF_STEPS];
+};
+
+struct msm_actuator_cfg_data {
+	int cfgtype;
+	uint8_t is_af_supported;
+	union {
+		struct msm_actuator_move_params_t move;
+		struct msm_actuator_set_info_t set_info;
+		struct msm_actuator_get_info_t get_info;
+		struct msm_actuator_set_position_t setpos;
+		enum af_camera_name cam_name;
+	} cfg;
+};
+
+enum msm_camera_led_config_t {
+	MSM_CAMERA_LED_OFF,
+	MSM_CAMERA_LED_LOW,
+	MSM_CAMERA_LED_HIGH,
+	MSM_CAMERA_LED_INIT,
+	MSM_CAMERA_LED_RELEASE,
+};
+
+struct msm_camera_led_cfg_t {
+	enum msm_camera_led_config_t cfgtype;
+	int32_t torch_current[MAX_LED_TRIGGERS];
+	int32_t flash_current[MAX_LED_TRIGGERS];
+	int32_t flash_duration[MAX_LED_TRIGGERS];
+};
+
+struct msm_flash_init_info_t {
+	enum msm_flash_driver_type flash_driver_type;
+	uint32_t slave_addr;
+	enum i2c_freq_mode_t i2c_freq_mode;
+	struct msm_sensor_power_setting_array *power_setting_array;
+	struct msm_camera_i2c_reg_setting_array *settings;
+};
+
+struct msm_flash_cfg_data_t {
+	enum msm_flash_cfg_type_t cfg_type;
+	int32_t flash_current[MAX_LED_TRIGGERS];
+	int32_t flash_duration[MAX_LED_TRIGGERS];
+	union {
+		struct msm_flash_init_info_t *flash_init_info;
+		struct msm_camera_i2c_reg_setting_array *settings;
+	} cfg;
+};
+
+/* sensor init structures and enums */
+enum msm_sensor_init_cfg_type_t {
+	CFG_SINIT_PROBE,
+	CFG_SINIT_PROBE_DONE,
+	CFG_SINIT_PROBE_WAIT_DONE,
+};
+
+struct sensor_init_cfg_data {
+	enum msm_sensor_init_cfg_type_t cfgtype;
+	struct msm_sensor_info_t        probed_info;
+	char                            entity_name[MAX_SENSOR_NAME];
+	union {
+		void *setting;
+	} cfg;
+};
+
+#define VIDIOC_MSM_SENSOR_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 1, struct sensorb_cfg_data)
+
+#define VIDIOC_MSM_SENSOR_RELEASE \
+	_IO('V', BASE_VIDIOC_PRIVATE + 2)
+
+#define VIDIOC_MSM_SENSOR_GET_SUBDEV_ID \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 3, uint32_t)
+
+#define VIDIOC_MSM_CSIPHY_IO_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 4, struct csiphy_cfg_data)
+
+#define VIDIOC_MSM_CSID_IO_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 5, struct csid_cfg_data)
+
+#define VIDIOC_MSM_ACTUATOR_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 6, struct msm_actuator_cfg_data)
+
+#define VIDIOC_MSM_FLASH_LED_DATA_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 7, struct msm_camera_led_cfg_t)
+
+#define VIDIOC_MSM_EEPROM_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 8, struct msm_eeprom_cfg_data)
+
+#define VIDIOC_MSM_SENSOR_GET_AF_STATUS \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 9, uint32_t)
+
+#define VIDIOC_MSM_SENSOR_INIT_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 10, struct sensor_init_cfg_data)
+
+#define VIDIOC_MSM_OIS_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 11, struct msm_ois_cfg_data)
+
+#define VIDIOC_MSM_FLASH_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 13, struct msm_flash_cfg_data_t)
+
+#define VIDIOC_MSM_OIS_CFG_DOWNLOAD \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 14, struct msm_ois_cfg_download_data)
+
+#define VIDIOC_MSM_IR_LED_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 15, struct msm_ir_led_cfg_data_t)
+
+#define VIDIOC_MSM_IR_CUT_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 15, struct msm_ir_cut_cfg_data_t)
+
+#endif /* __UAPI_LINUX_MSM_AIS_SENSOR_H */
+
diff -Nruw linux-4.4.115-fbx/include/uapi/media./ais/msm_ais_sensor_sdk.h linux-4.4.115-fbx/include/uapi/media/ais/msm_ais_sensor_sdk.h
--- linux-4.4.115-fbx/include/uapi/media./ais/msm_ais_sensor_sdk.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/ais/msm_ais_sensor_sdk.h	2019-01-22 16:16:28.599292554 +0100
@@ -0,0 +1,425 @@
+#ifndef __UAPI_LINUX_MSM_AIS_SENSOR_SDK_H
+#define __UAPI_LINUX_MSM_AIS_SENSOR_SDK_H
+
+#include <linux/videodev2.h>
+
+#define KVERSION 0x1
+
+#define MAX_POWER_CONFIG      12
+#define GPIO_OUT_LOW          (0 << 1)
+#define GPIO_OUT_HIGH         (1 << 1)
+#define CSI_EMBED_DATA        0x12
+#define CSI_RESERVED_DATA_0   0x13
+#define CSI_YUV422_8          0x1E
+#define CSI_RAW8              0x2A
+#define CSI_RAW10             0x2B
+#define CSI_RAW12             0x2C
+#define CSI_DECODE_6BIT         0
+#define CSI_DECODE_8BIT         1
+#define CSI_DECODE_10BIT        2
+#define CSI_DECODE_12BIT        3
+#define CSI_DECODE_DPCM_10_6_10 4
+#define CSI_DECODE_DPCM_10_8_10 5
+#define MAX_CID                 16
+#define I2C_SEQ_REG_DATA_MAX    1024
+#define I2C_REG_DATA_MAX       (8*1024)
+
+#define MSM_V4L2_PIX_FMT_META v4l2_fourcc('M', 'E', 'T', 'A') /* META */
+#define MSM_V4L2_PIX_FMT_SBGGR14 v4l2_fourcc('B', 'G', '1', '4')
+	/* 14  BGBG.. GRGR.. */
+#define MSM_V4L2_PIX_FMT_SGBRG14 v4l2_fourcc('G', 'B', '1', '4')
+	/* 14  GBGB.. RGRG.. */
+#define MSM_V4L2_PIX_FMT_SGRBG14 v4l2_fourcc('B', 'A', '1', '4')
+	/* 14  GRGR.. BGBG.. */
+#define MSM_V4L2_PIX_FMT_SRGGB14 v4l2_fourcc('R', 'G', '1', '4')
+	/* 14  RGRG.. GBGB.. */
+
+#define MAX_ACTUATOR_REG_TBL_SIZE 8
+#define MAX_ACTUATOR_REGION       5
+#define NUM_ACTUATOR_DIR          2
+#define MAX_ACTUATOR_SCENARIO     8
+#define MAX_ACT_MOD_NAME_SIZE     32
+#define MAX_ACT_NAME_SIZE         32
+#define MAX_ACTUATOR_INIT_SET     120
+#define MAX_I2C_REG_SET           12
+
+#define MAX_LED_TRIGGERS          3
+
+#define MSM_EEPROM_MEMORY_MAP_MAX_SIZE  80
+#define MSM_EEPROM_MAX_MEM_MAP_CNT      8
+
+enum msm_sensor_camera_id_t {
+	CAMERA_0,
+	CAMERA_1,
+	CAMERA_2,
+	CAMERA_3,
+	MAX_CAMERAS,
+};
+
+enum i2c_freq_mode_t {
+	I2C_STANDARD_MODE,
+	I2C_FAST_MODE,
+	I2C_CUSTOM_MODE,
+	I2C_FAST_PLUS_MODE,
+	I2C_MAX_MODES,
+};
+
+enum camb_position_t {
+	BACK_CAMERA_B,
+	FRONT_CAMERA_B,
+	AUX_CAMERA_B = 0x100,
+	INVALID_CAMERA_B,
+};
+
+enum msm_sensor_power_seq_type_t {
+	SENSOR_CLK,
+	SENSOR_GPIO,
+	SENSOR_VREG,
+	SENSOR_I2C_MUX,
+	SENSOR_I2C,
+};
+
+enum msm_camera_i2c_reg_addr_type {
+	MSM_CAMERA_I2C_BYTE_ADDR = 1,
+	MSM_CAMERA_I2C_WORD_ADDR,
+	MSM_CAMERA_I2C_3B_ADDR,
+	MSM_CAMERA_I2C_ADDR_TYPE_MAX,
+};
+
+enum msm_camera_i2c_data_type {
+	MSM_CAMERA_I2C_BYTE_DATA = 1,
+	MSM_CAMERA_I2C_WORD_DATA,
+	MSM_CAMERA_I2C_DWORD_DATA,
+	MSM_CAMERA_I2C_SET_BYTE_MASK,
+	MSM_CAMERA_I2C_UNSET_BYTE_MASK,
+	MSM_CAMERA_I2C_SET_WORD_MASK,
+	MSM_CAMERA_I2C_UNSET_WORD_MASK,
+	MSM_CAMERA_I2C_SET_BYTE_WRITE_MASK_DATA,
+	MSM_CAMERA_I2C_DATA_TYPE_MAX,
+};
+
+enum msm_sensor_power_seq_gpio_t {
+	SENSOR_GPIO_RESET,
+	SENSOR_GPIO_STANDBY,
+	SENSOR_GPIO_AF_PWDM,
+	SENSOR_GPIO_VIO,
+	SENSOR_GPIO_VANA,
+	SENSOR_GPIO_VDIG,
+	SENSOR_GPIO_VAF,
+	SENSOR_GPIO_FL_EN,
+	SENSOR_GPIO_FL_NOW,
+	SENSOR_GPIO_FL_RESET,
+	SENSOR_GPIO_CUSTOM1,
+	SENSOR_GPIO_CUSTOM2,
+	SENSOR_GPIO_MAX,
+};
+
+enum msm_ir_cut_filter_gpio_t {
+	IR_CUT_FILTER_GPIO_P = 0,
+	IR_CUT_FILTER_GPIO_M,
+	IR_CUT_FILTER_GPIO_MAX,
+};
+#define IR_CUT_FILTER_GPIO_P IR_CUT_FILTER_GPIO_P
+#define IR_CUT_FILTER_GPIO_M IR_CUT_FILTER_GPIO_M
+#define R_CUT_FILTER_GPIO_MAX IR_CUT_FILTER_GPIO_MAX
+
+enum msm_camera_vreg_name_t {
+	CAM_VDIG,
+	CAM_VIO,
+	CAM_VANA,
+	CAM_VAF,
+	CAM_V_CUSTOM1,
+	CAM_V_CUSTOM2,
+	CAM_VREG_MAX,
+};
+
+enum msm_sensor_clk_type_t {
+	SENSOR_CAM_MCLK,
+	SENSOR_CAM_CLK,
+	SENSOR_CAM_CLK_MAX,
+};
+
+enum camerab_mode_t {
+	CAMERA_MODE_2D_B = (1<<0),
+	CAMERA_MODE_3D_B = (1<<1),
+	CAMERA_MODE_INVALID = (1<<2),
+};
+
+enum msm_actuator_data_type {
+	MSM_ACTUATOR_BYTE_DATA = 1,
+	MSM_ACTUATOR_WORD_DATA,
+};
+
+enum msm_actuator_addr_type {
+	MSM_ACTUATOR_BYTE_ADDR = 1,
+	MSM_ACTUATOR_WORD_ADDR,
+};
+
+enum msm_actuator_write_type {
+	MSM_ACTUATOR_WRITE_HW_DAMP,
+	MSM_ACTUATOR_WRITE_DAC,
+	MSM_ACTUATOR_WRITE,
+	MSM_ACTUATOR_WRITE_DIR_REG,
+	MSM_ACTUATOR_POLL,
+	MSM_ACTUATOR_READ_WRITE,
+};
+
+enum msm_actuator_i2c_operation {
+	MSM_ACT_WRITE = 0,
+	MSM_ACT_POLL,
+};
+
+enum actuator_type {
+	ACTUATOR_VCM,
+	ACTUATOR_PIEZO,
+	ACTUATOR_HVCM,
+	ACTUATOR_BIVCM,
+};
+
+enum msm_flash_driver_type {
+	FLASH_DRIVER_PMIC,
+	FLASH_DRIVER_I2C,
+	FLASH_DRIVER_GPIO,
+	FLASH_DRIVER_DEFAULT
+};
+
+enum msm_flash_cfg_type_t {
+	CFG_FLASH_INIT,
+	CFG_FLASH_RELEASE,
+	CFG_FLASH_OFF,
+	CFG_FLASH_LOW,
+	CFG_FLASH_HIGH,
+};
+
+enum msm_ir_led_cfg_type_t {
+	CFG_IR_LED_INIT = 0,
+	CFG_IR_LED_RELEASE,
+	CFG_IR_LED_OFF,
+	CFG_IR_LED_ON,
+};
+#define CFG_IR_LED_INIT CFG_IR_LED_INIT
+#define CFG_IR_LED_RELEASE CFG_IR_LED_RELEASE
+#define CFG_IR_LED_OFF CFG_IR_LED_OFF
+#define CFG_IR_LED_ON CFG_IR_LED_ON
+
+enum msm_ir_cut_cfg_type_t {
+	CFG_IR_CUT_INIT = 0,
+	CFG_IR_CUT_RELEASE,
+	CFG_IR_CUT_OFF,
+	CFG_IR_CUT_ON,
+};
+#define CFG_IR_CUT_INIT CFG_IR_CUT_INIT
+#define CFG_IR_CUT_RELEASE CFG_IR_CUT_RELEASE
+#define CFG_IR_CUT_OFF CFG_IR_CUT_OFF
+#define CFG_IR_CUT_ON CFG_IR_CUT_ON
+
+enum msm_sensor_output_format_t {
+	MSM_SENSOR_BAYER,
+	MSM_SENSOR_YCBCR,
+	MSM_SENSOR_META,
+};
+
+struct msm_sensor_power_setting {
+	enum msm_sensor_power_seq_type_t seq_type;
+	unsigned short seq_val;
+	long config_val;
+	unsigned short delay;
+	void *data[10];
+};
+
+struct msm_sensor_power_setting_array {
+	struct msm_sensor_power_setting  power_setting_a[MAX_POWER_CONFIG];
+	struct msm_sensor_power_setting *power_setting;
+	unsigned short size;
+	struct msm_sensor_power_setting  power_down_setting_a[MAX_POWER_CONFIG];
+	struct msm_sensor_power_setting *power_down_setting;
+	unsigned short size_down;
+};
+
+enum msm_camera_i2c_operation {
+	MSM_CAM_WRITE = 0,
+	MSM_CAM_POLL,
+	MSM_CAM_READ,
+};
+
+struct msm_sensor_i2c_sync_params {
+	unsigned int cid;
+	int csid;
+	unsigned short line;
+	unsigned short delay;
+};
+
+struct msm_camera_reg_settings_t {
+	uint16_t reg_addr;
+	enum msm_camera_i2c_reg_addr_type addr_type;
+	uint16_t reg_data;
+	enum msm_camera_i2c_data_type data_type;
+	enum msm_camera_i2c_operation i2c_operation;
+	uint16_t delay;
+};
+
+struct msm_eeprom_mem_map_t {
+	int slave_addr;
+	struct msm_camera_reg_settings_t
+		mem_settings[MSM_EEPROM_MEMORY_MAP_MAX_SIZE];
+	int memory_map_size;
+};
+
+struct msm_eeprom_memory_map_array {
+	struct msm_eeprom_mem_map_t memory_map[MSM_EEPROM_MAX_MEM_MAP_CNT];
+	uint32_t msm_size_of_max_mappings;
+};
+
+struct msm_sensor_init_params {
+	/* mask of modes supported: 2D, 3D */
+	int                 modes_supported;
+	/* sensor position: front, back */
+	enum camb_position_t position;
+	/* sensor mount angle */
+	unsigned int            sensor_mount_angle;
+};
+
+struct msm_sensor_id_info_t {
+	unsigned short sensor_id_reg_addr;
+	unsigned short sensor_id;
+	unsigned short sensor_id_mask;
+};
+
+struct msm_camera_sensor_gpio_intr_config {
+	int gpio_num;
+	uint32_t gpio_trigger;
+};
+
+struct msm_camera_sensor_slave_info {
+	char sensor_name[32];
+	char eeprom_name[32];
+	char actuator_name[32];
+	char ois_name[32];
+	char flash_name[32];
+	enum msm_sensor_camera_id_t camera_id;
+	unsigned short slave_addr;
+	enum i2c_freq_mode_t i2c_freq_mode;
+	enum msm_camera_i2c_reg_addr_type addr_type;
+	struct msm_sensor_id_info_t sensor_id_info;
+	struct msm_sensor_power_setting_array power_setting_array;
+	unsigned char  is_init_params_valid;
+	struct msm_sensor_init_params sensor_init_params;
+	enum msm_sensor_output_format_t output_format;
+	struct msm_camera_sensor_gpio_intr_config
+				gpio_intr_config;
+	unsigned int camera_sensor_device_id;
+};
+
+struct msm_camera_i2c_reg_array {
+	unsigned short reg_addr;
+	unsigned short reg_data;
+	unsigned int delay;
+};
+
+struct msm_camera_i2c_reg_setting {
+	struct msm_camera_i2c_reg_array *reg_setting;
+	unsigned short size;
+	enum msm_camera_i2c_reg_addr_type addr_type;
+	enum msm_camera_i2c_data_type data_type;
+	unsigned short delay;
+};
+
+struct msm_camera_csid_vc_cfg {
+	unsigned char cid;
+	unsigned char dt;
+	unsigned char decode_format;
+};
+
+struct msm_camera_csid_lut_params {
+	unsigned char num_cid;
+	struct msm_camera_csid_vc_cfg vc_cfg_a[MAX_CID];
+	struct msm_camera_csid_vc_cfg *vc_cfg[MAX_CID];
+};
+
+struct msm_camera_csid_params {
+	unsigned char lane_cnt;
+	unsigned short lane_assign;
+	unsigned char phy_sel;
+	unsigned int csi_clk;
+	struct msm_camera_csid_lut_params lut_params;
+	unsigned char csi_3p_sel;
+};
+
+struct msm_camera_csid_testmode_parms {
+	unsigned int num_bytes_per_line;
+	unsigned int num_lines;
+	unsigned int h_blanking_count;
+	unsigned int v_blanking_count;
+	unsigned int payload_mode;
+};
+
+struct msm_camera_csiphy_params {
+	unsigned char lane_cnt;
+	unsigned char settle_cnt;
+	unsigned short lane_mask;
+	unsigned char combo_mode;
+	unsigned char csid_core;
+	unsigned int csiphy_clk;
+	unsigned char csi_3phase;
+};
+
+struct msm_camera_i2c_seq_reg_array {
+	unsigned short reg_addr;
+	unsigned char reg_data[I2C_SEQ_REG_DATA_MAX];
+	unsigned short reg_data_size;
+};
+
+struct msm_camera_i2c_seq_reg_setting {
+	struct msm_camera_i2c_seq_reg_array *reg_setting;
+	unsigned short size;
+	enum msm_camera_i2c_reg_addr_type addr_type;
+	unsigned short delay;
+};
+
+struct msm_actuator_reg_params_t {
+	enum msm_actuator_write_type reg_write_type;
+	unsigned int hw_mask;
+	unsigned short reg_addr;
+	unsigned short hw_shift;
+	unsigned short data_shift;
+	unsigned short data_type;
+	unsigned short addr_type;
+	unsigned short reg_data;
+	unsigned short delay;
+};
+
+
+struct damping_params_t {
+	unsigned int damping_step;
+	unsigned int damping_delay;
+	unsigned int hw_params;
+};
+
+struct region_params_t {
+	/* [0] = ForwardDirection Macro boundary
+	 * [1] = ReverseDirection Inf boundary
+	 */
+	unsigned short step_bound[2];
+	unsigned short code_per_step;
+	/* qvalue for converting float type numbers to integer format */
+	unsigned int qvalue;
+};
+
+struct reg_settings_t {
+	unsigned short reg_addr;
+	enum msm_camera_i2c_reg_addr_type addr_type;
+	unsigned short reg_data;
+	enum msm_camera_i2c_data_type data_type;
+	enum msm_actuator_i2c_operation i2c_operation;
+	unsigned int delay;
+};
+
+struct msm_camera_i2c_reg_setting_array {
+	struct msm_camera_i2c_reg_array reg_setting_a[MAX_I2C_REG_SET];
+	unsigned short size;
+	enum msm_camera_i2c_reg_addr_type addr_type;
+	enum msm_camera_i2c_data_type data_type;
+	unsigned short delay;
+};
+
+#endif /* __UAPI_LINUX_MSM_AIS_SENSOR_SDK_H */
diff -Nruw linux-4.4.115-fbx/include/uapi/media./Kbuild linux-4.4.115-fbx/include/uapi/media/Kbuild
--- linux-4.4.115-fbx/include/uapi/media./Kbuild	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/Kbuild	2019-01-22 16:16:28.599292554 +0100
@@ -0,0 +1,23 @@
+header-y += ais/
+header-y += msm_cam_sensor.h
+header-y += msm_camera.h
+header-y += msm_camsensor_sdk.h
+header-y += msm_fd.h
+header-y += msm_gemini.h
+header-y += msm_gestures.h
+header-y += msm_isp.h
+header-y += msm_jpeg.h
+header-y += msm_jpeg_dma.h
+header-y += msm_media_info.h
+header-y += msm_mercury.h
+header-y += msm_sde_rotator.h
+header-y += msm_vidc.h
+header-y += msm_vpu.h
+header-y += msmb_camera.h
+header-y += msmb_generic_buf_mgr.h
+header-y += msmb_isp.h
+header-y += msmb_ispif.h
+header-y += msmb_pproc.h
+header-y += radio-iris.h
+header-y += radio-iris-commands.h
+header-y += msm_ba.h
diff -Nruw linux-4.4.115-fbx/include/uapi/media./msm_ba.h linux-4.4.115-fbx/include/uapi/media/msm_ba.h
--- linux-4.4.115-fbx/include/uapi/media./msm_ba.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/msm_ba.h	2019-10-29 09:26:25.553221870 +0100
@@ -0,0 +1,45 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __UAPI_MSM_BA_H__
+#define __UAPI_MSM_BA_H__
+
+#include <linux/videodev2.h>
+#include <linux/types.h>
+
+/* CSI control params */
+struct csi_ctrl_params {
+	uint32_t settle_count;
+	uint32_t lane_count;
+};
+
+/* Field info params */
+struct field_info_params {
+	bool even_field;
+	struct timeval field_ts;
+};
+
+/* private ioctl structure */
+struct msm_ba_v4l2_ioctl_t {
+	size_t len;
+	void __user *ptr;
+};
+
+/* ADV7481 private ioctls for CSI control params */
+#define VIDIOC_G_CSI_PARAMS \
+	_IOWR('V', BASE_VIDIOC_PRIVATE, struct msm_ba_v4l2_ioctl_t)
+/* ADV7481 private ioctls for field info query */
+#define VIDIOC_G_FIELD_INFO \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 40, struct msm_ba_v4l2_ioctl_t)
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/uapi/media./msmb_camera.h linux-4.4.115-fbx/include/uapi/media/msmb_camera.h
--- linux-4.4.115-fbx/include/uapi/media./msmb_camera.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/msmb_camera.h	2019-10-29 09:26:25.557221909 +0100
@@ -0,0 +1,228 @@
+#ifndef __UAPI_LINUX_MSMB_CAMERA_H
+#define __UAPI_LINUX_MSMB_CAMERA_H
+
+#include <linux/videodev2.h>
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define MSM_CAM_LOGSYNC_FILE_NAME "logsync"
+#define MSM_CAM_LOGSYNC_FILE_BASEDIR "camera"
+
+#define MSM_CAM_V4L2_IOCTL_NOTIFY \
+	_IOW('V', BASE_VIDIOC_PRIVATE + 30, struct msm_v4l2_event_data)
+
+#define MSM_CAM_V4L2_IOCTL_NOTIFY_META \
+	_IOW('V', BASE_VIDIOC_PRIVATE + 31, struct msm_v4l2_event_data)
+
+#define MSM_CAM_V4L2_IOCTL_CMD_ACK \
+	_IOW('V', BASE_VIDIOC_PRIVATE + 32, struct msm_v4l2_event_data)
+
+#define MSM_CAM_V4L2_IOCTL_NOTIFY_ERROR \
+	_IOW('V', BASE_VIDIOC_PRIVATE + 33, struct msm_v4l2_event_data)
+
+#define MSM_CAM_V4L2_IOCTL_NOTIFY_DEBUG \
+	_IOW('V', BASE_VIDIOC_PRIVATE + 34, struct msm_v4l2_event_data)
+
+#define MSM_CAM_V4L2_IOCTL_DAEMON_DISABLED \
+	_IOW('V', BASE_VIDIOC_PRIVATE + 35, struct msm_v4l2_event_data)
+
+#define QCAMERA_DEVICE_GROUP_ID	1
+#define QCAMERA_VNODE_GROUP_ID	2
+#define MSM_CAMERA_NAME			"msm_camera"
+#define MSM_CONFIGURATION_NAME	"msm_config"
+
+#define MSM_CAMERA_SUBDEV_CSIPHY       0
+#define MSM_CAMERA_SUBDEV_CSID         1
+#define MSM_CAMERA_SUBDEV_ISPIF        2
+#define MSM_CAMERA_SUBDEV_VFE          3
+#define MSM_CAMERA_SUBDEV_AXI          4
+#define MSM_CAMERA_SUBDEV_VPE          5
+#define MSM_CAMERA_SUBDEV_SENSOR       6
+#define MSM_CAMERA_SUBDEV_ACTUATOR     7
+#define MSM_CAMERA_SUBDEV_EEPROM       8
+#define MSM_CAMERA_SUBDEV_CPP          9
+#define MSM_CAMERA_SUBDEV_CCI          10
+#define MSM_CAMERA_SUBDEV_LED_FLASH    11
+#define MSM_CAMERA_SUBDEV_STROBE_FLASH 12
+#define MSM_CAMERA_SUBDEV_BUF_MNGR     13
+#define MSM_CAMERA_SUBDEV_SENSOR_INIT  14
+#define MSM_CAMERA_SUBDEV_OIS          15
+#define MSM_CAMERA_SUBDEV_FLASH        16
+#define MSM_CAMERA_SUBDEV_IR_LED       17
+#define MSM_CAMERA_SUBDEV_IR_CUT       18
+#define MSM_CAMERA_SUBDEV_EXT          19
+#define MSM_CAMERA_SUBDEV_TOF          20
+#define MSM_CAMERA_SUBDEV_LASER_LED    21
+#define MSM_MAX_CAMERA_SENSORS  6
+
+/* The below macro is defined to put an upper limit on maximum
+ * number of buffer requested per stream. In case of extremely
+ * large value for number of buffer due to data structure corruption
+ * we return error to avoid integer overflow. Group processing
+ * can have max of 9 groups of 8 bufs each. This value may be
+ * configured in future*/
+#define MSM_CAMERA_MAX_STREAM_BUF 72
+
+/* Max batch size of processing */
+#define MSM_CAMERA_MAX_USER_BUFF_CNT 16
+
+/* featur base */
+#define MSM_CAMERA_FEATURE_BASE     0x00010000
+#define MSM_CAMERA_FEATURE_SHUTDOWN (MSM_CAMERA_FEATURE_BASE + 1)
+
+#define MSM_CAMERA_STATUS_BASE      0x00020000
+#define MSM_CAMERA_STATUS_FAIL      (MSM_CAMERA_STATUS_BASE + 1)
+#define MSM_CAMERA_STATUS_SUCCESS   (MSM_CAMERA_STATUS_BASE + 2)
+
+/* event type */
+#define MSM_CAMERA_V4L2_EVENT_TYPE (V4L2_EVENT_PRIVATE_START + 0x00002000)
+
+/* event id */
+#define MSM_CAMERA_EVENT_MIN    0
+#define MSM_CAMERA_NEW_SESSION  (MSM_CAMERA_EVENT_MIN + 1)
+#define MSM_CAMERA_DEL_SESSION  (MSM_CAMERA_EVENT_MIN + 2)
+#define MSM_CAMERA_SET_PARM     (MSM_CAMERA_EVENT_MIN + 3)
+#define MSM_CAMERA_GET_PARM     (MSM_CAMERA_EVENT_MIN + 4)
+#define MSM_CAMERA_MAPPING_CFG  (MSM_CAMERA_EVENT_MIN + 5)
+#define MSM_CAMERA_MAPPING_SES  (MSM_CAMERA_EVENT_MIN + 6)
+#define MSM_CAMERA_MSM_NOTIFY   (MSM_CAMERA_EVENT_MIN + 7)
+#define MSM_CAMERA_EVENT_MAX    (MSM_CAMERA_EVENT_MIN + 8)
+
+/* data.command */
+#define MSM_CAMERA_PRIV_S_CROP			(V4L2_CID_PRIVATE_BASE + 1)
+#define MSM_CAMERA_PRIV_G_CROP			(V4L2_CID_PRIVATE_BASE + 2)
+#define MSM_CAMERA_PRIV_G_FMT			(V4L2_CID_PRIVATE_BASE + 3)
+#define MSM_CAMERA_PRIV_S_FMT			(V4L2_CID_PRIVATE_BASE + 4)
+#define MSM_CAMERA_PRIV_TRY_FMT			(V4L2_CID_PRIVATE_BASE + 5)
+#define MSM_CAMERA_PRIV_METADATA		(V4L2_CID_PRIVATE_BASE + 6)
+#define MSM_CAMERA_PRIV_QUERY_CAP		(V4L2_CID_PRIVATE_BASE + 7)
+#define MSM_CAMERA_PRIV_STREAM_ON		(V4L2_CID_PRIVATE_BASE + 8)
+#define MSM_CAMERA_PRIV_STREAM_OFF		(V4L2_CID_PRIVATE_BASE + 9)
+#define MSM_CAMERA_PRIV_NEW_STREAM		(V4L2_CID_PRIVATE_BASE + 10)
+#define MSM_CAMERA_PRIV_DEL_STREAM		(V4L2_CID_PRIVATE_BASE + 11)
+#define MSM_CAMERA_PRIV_SHUTDOWN		(V4L2_CID_PRIVATE_BASE + 12)
+#define MSM_CAMERA_PRIV_STREAM_INFO_SYNC \
+	(V4L2_CID_PRIVATE_BASE + 13)
+#define MSM_CAMERA_PRIV_G_SESSION_ID (V4L2_CID_PRIVATE_BASE + 14)
+#define MSM_CAMERA_PRIV_CMD_MAX  20
+
+/* data.status - success */
+#define MSM_CAMERA_CMD_SUCESS      0x00000001
+#define MSM_CAMERA_BUF_MAP_SUCESS  0x00000002
+
+/* data.status - error */
+#define MSM_CAMERA_ERR_EVT_BASE 0x00010000
+#define MSM_CAMERA_ERR_CMD_FAIL		(MSM_CAMERA_ERR_EVT_BASE + 1)
+#define MSM_CAMERA_ERR_MAPPING		(MSM_CAMERA_ERR_EVT_BASE + 2)
+#define MSM_CAMERA_ERR_DEVICE_BUSY	(MSM_CAMERA_ERR_EVT_BASE + 3)
+
+/* The msm_v4l2_event_data structure should match the
+ * v4l2_event.u.data field.
+ * should not exceed 16 elements */
+struct msm_v4l2_event_data {
+	/*word 0*/
+	unsigned int command;
+	/*word 1*/
+	unsigned int status;
+	/*word 2*/
+	unsigned int session_id;
+	/*word 3*/
+	unsigned int stream_id;
+	/*word 4*/
+	unsigned int map_op;
+	/*word 5*/
+	unsigned int map_buf_idx;
+	/*word 6*/
+	unsigned int notify;
+	/*word 7*/
+	unsigned int arg_value;
+	/*word 8*/
+	unsigned int ret_value;
+	/*word 9*/
+	unsigned int v4l2_event_type;
+	/*word 10*/
+	unsigned int v4l2_event_id;
+	/*word 11*/
+	unsigned int handle;
+	/*word 12*/
+	unsigned int nop6;
+	/*word 13*/
+	unsigned int nop7;
+	/*word 14*/
+	unsigned int nop8;
+	/*word 15*/
+	unsigned int nop9;
+};
+
+/* map to v4l2_format.fmt.raw_data */
+struct msm_v4l2_format_data {
+	enum v4l2_buf_type type;
+	unsigned int width;
+	unsigned int height;
+	unsigned int pixelformat; /* FOURCC */
+	unsigned char num_planes;
+	unsigned int plane_sizes[VIDEO_MAX_PLANES];
+};
+
+/*  MSM Four-character-code (FOURCC) */
+#define msm_v4l2_fourcc(a, b, c, d)\
+	((__u32)(a) | ((__u32)(b) << 8) | ((__u32)(c) << 16) |\
+	((__u32)(d) << 24))
+
+/* Composite stats */
+#define MSM_V4L2_PIX_FMT_STATS_COMB v4l2_fourcc('S', 'T', 'C', 'M')
+/* AEC stats */
+#define MSM_V4L2_PIX_FMT_STATS_AE   v4l2_fourcc('S', 'T', 'A', 'E')
+/* AF stats */
+#define MSM_V4L2_PIX_FMT_STATS_AF   v4l2_fourcc('S', 'T', 'A', 'F')
+/* AWB stats */
+#define MSM_V4L2_PIX_FMT_STATS_AWB  v4l2_fourcc('S', 'T', 'W', 'B')
+/* IHIST stats */
+#define MSM_V4L2_PIX_FMT_STATS_IHST v4l2_fourcc('I', 'H', 'S', 'T')
+/* Column count stats */
+#define MSM_V4L2_PIX_FMT_STATS_CS   v4l2_fourcc('S', 'T', 'C', 'S')
+/* Row count stats */
+#define MSM_V4L2_PIX_FMT_STATS_RS   v4l2_fourcc('S', 'T', 'R', 'S')
+/* Bayer Grid stats */
+#define MSM_V4L2_PIX_FMT_STATS_BG   v4l2_fourcc('S', 'T', 'B', 'G')
+/* Bayer focus stats */
+#define MSM_V4L2_PIX_FMT_STATS_BF   v4l2_fourcc('S', 'T', 'B', 'F')
+/* Bayer hist stats */
+#define MSM_V4L2_PIX_FMT_STATS_BHST v4l2_fourcc('B', 'H', 'S', 'T')
+
+enum smmu_attach_mode {
+	NON_SECURE_MODE = 0x01,
+	SECURE_MODE = 0x02,
+	MAX_PROTECTION_MODE = 0x03,
+};
+
+struct msm_camera_smmu_attach_type {
+	enum smmu_attach_mode attach;
+};
+
+struct msm_camera_user_buf_cont_t {
+	unsigned int buf_cnt;
+	unsigned int buf_idx[MSM_CAMERA_MAX_USER_BUFF_CNT];
+};
+
+struct msm_camera_return_buf {
+	__u32 index;
+	__u32 reserved;
+};
+
+#define MSM_CAMERA_PRIV_IOCTL_ID_BASE 0
+#define MSM_CAMERA_PRIV_IOCTL_ID_RETURN_BUF 1
+
+struct msm_camera_private_ioctl_arg {
+	__u32 id;
+	__u32 size;
+	__u32 result;
+	__u32 reserved;
+	__user __u64 ioctl_ptr;
+};
+
+#define VIDIOC_MSM_CAMERA_PRIVATE_IOCTL_CMD \
+	_IOWR('V', BASE_VIDIOC_PRIVATE, struct msm_camera_private_ioctl_arg)
+
+#endif
+
diff -Nruw linux-4.4.115-fbx/include/uapi/media./msmb_generic_buf_mgr.h linux-4.4.115-fbx/include/uapi/media/msmb_generic_buf_mgr.h
--- linux-4.4.115-fbx/include/uapi/media./msmb_generic_buf_mgr.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/msmb_generic_buf_mgr.h	2019-10-29 09:26:25.557221909 +0100
@@ -0,0 +1,66 @@
+#ifndef __UAPI_MEDIA_MSMB_GENERIC_BUF_MGR_H__
+#define __UAPI_MEDIA_MSMB_GENERIC_BUF_MGR_H__
+
+#include <media/msmb_camera.h>
+
+enum msm_camera_buf_mngr_cmd {
+	MSM_CAMERA_BUF_MNGR_CONT_MAP,
+	MSM_CAMERA_BUF_MNGR_CONT_UNMAP,
+	MSM_CAMERA_BUF_MNGR_CONT_MAX,
+};
+
+enum msm_camera_buf_mngr_buf_type {
+	MSM_CAMERA_BUF_MNGR_BUF_PLANAR,
+	MSM_CAMERA_BUF_MNGR_BUF_USER,
+	MSM_CAMERA_BUF_MNGR_BUF_INVALID,
+};
+
+struct msm_buf_mngr_info {
+	uint32_t session_id;
+	uint32_t stream_id;
+	uint32_t frame_id;
+	struct timeval timestamp;
+	uint32_t index;
+	uint32_t reserved;
+	enum msm_camera_buf_mngr_buf_type type;
+	struct msm_camera_user_buf_cont_t user_buf;
+};
+
+struct msm_buf_mngr_main_cont_info {
+	uint32_t session_id;
+	uint32_t stream_id;
+	enum msm_camera_buf_mngr_cmd cmd;
+	uint32_t cnt;
+	int32_t cont_fd;
+};
+
+#define MSM_CAMERA_BUF_MNGR_IOCTL_ID_BASE 0
+#define MSM_CAMERA_BUF_MNGR_IOCTL_ID_GET_BUF_BY_IDX 1
+
+#define VIDIOC_MSM_BUF_MNGR_GET_BUF \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 33, struct msm_buf_mngr_info)
+
+#define VIDIOC_MSM_BUF_MNGR_PUT_BUF \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 34, struct msm_buf_mngr_info)
+
+#define VIDIOC_MSM_BUF_MNGR_BUF_DONE \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 35, struct msm_buf_mngr_info)
+
+#define VIDIOC_MSM_BUF_MNGR_CONT_CMD \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 36, struct msm_buf_mngr_main_cont_info)
+
+#define VIDIOC_MSM_BUF_MNGR_INIT \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 37, struct msm_buf_mngr_info)
+
+#define VIDIOC_MSM_BUF_MNGR_DEINIT \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 38, struct msm_buf_mngr_info)
+
+#define VIDIOC_MSM_BUF_MNGR_FLUSH \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 39, struct msm_buf_mngr_info)
+
+#define VIDIOC_MSM_BUF_MNGR_IOCTL_CMD \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 40, \
+	struct msm_camera_private_ioctl_arg)
+
+#endif
+
diff -Nruw linux-4.4.115-fbx/include/uapi/media./msmb_isp.h linux-4.4.115-fbx/include/uapi/media/msmb_isp.h
--- linux-4.4.115-fbx/include/uapi/media./msmb_isp.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/msmb_isp.h	2019-10-29 09:26:25.557221909 +0100
@@ -0,0 +1,1038 @@
+#ifndef __UAPI_MSMB_ISP__
+#define __UAPI_MSMB_ISP__
+
+#include <linux/videodev2.h>
+#include <media/msmb_camera.h>
+
+#define MAX_PLANES_PER_STREAM 3
+#define MAX_NUM_STREAM 7
+
+#define ISP_VERSION_48        48
+#define ISP_VERSION_47        47
+#define ISP_VERSION_46        46
+#define ISP_VERSION_44        44
+#define ISP_VERSION_40        40
+#define ISP_VERSION_32        32
+#define ISP_NATIVE_BUF_BIT    (0x10000 << 0)
+#define ISP0_BIT              (0x10000 << 1)
+#define ISP1_BIT              (0x10000 << 2)
+#define ISP_META_CHANNEL_BIT  (0x10000 << 3)
+#define ISP_SCRATCH_BUF_BIT   (0x10000 << 4)
+#define ISP_OFFLINE_STATS_BIT (0x10000 << 5)
+#define ISP_SVHDR_IN_BIT      (0x10000 << 6) /* RDI hw stream for SVHDR */
+#define ISP_SVHDR_OUT_BIT     (0x10000 << 7) /* SVHDR output bufq stream*/
+
+#define ISP_STATS_STREAM_BIT  0x80000000
+
+#define VFE_HW_LIMIT 1
+
+struct msm_vfe_cfg_cmd_list;
+
+enum ISP_START_PIXEL_PATTERN {
+	ISP_BAYER_RGRGRG,
+	ISP_BAYER_GRGRGR,
+	ISP_BAYER_BGBGBG,
+	ISP_BAYER_GBGBGB,
+	ISP_YUV_YCbYCr,
+	ISP_YUV_YCrYCb,
+	ISP_YUV_CbYCrY,
+	ISP_YUV_CrYCbY,
+	ISP_PIX_PATTERN_MAX
+};
+
+enum msm_vfe_plane_fmt {
+	Y_PLANE,
+	CB_PLANE,
+	CR_PLANE,
+	CRCB_PLANE,
+	CBCR_PLANE,
+	VFE_PLANE_FMT_MAX
+};
+
+enum msm_vfe_input_src {
+	VFE_PIX_0,
+	VFE_RAW_0,
+	VFE_RAW_1,
+	VFE_RAW_2,
+	VFE_SRC_MAX,
+};
+
+enum msm_vfe_axi_stream_src {
+	PIX_ENCODER,
+	PIX_VIEWFINDER,
+	PIX_VIDEO,
+	CAMIF_RAW,
+	IDEAL_RAW,
+	RDI_INTF_0,
+	RDI_INTF_1,
+	RDI_INTF_2,
+	VFE_AXI_SRC_MAX
+};
+
+enum msm_vfe_frame_skip_pattern {
+	NO_SKIP,
+	EVERY_2FRAME,
+	EVERY_3FRAME,
+	EVERY_4FRAME,
+	EVERY_5FRAME,
+	EVERY_6FRAME,
+	EVERY_7FRAME,
+	EVERY_8FRAME,
+	EVERY_16FRAME,
+	EVERY_32FRAME,
+	SKIP_ALL,
+	SKIP_RANGE,
+	MAX_SKIP,
+};
+
+/*
+ * Define an unused period. When this period is set it means that the stream is
+ * stopped(i.e the pattern is 0). We don't track the current pattern, just the
+ * period defines what the pattern is, if period is this then pattern is 0 else
+ * pattern is 1
+ */
+#define MSM_VFE_STREAM_STOP_PERIOD 15
+
+enum msm_isp_stats_type {
+	MSM_ISP_STATS_AEC,   /* legacy based AEC */
+	MSM_ISP_STATS_AF,    /* legacy based AF */
+	MSM_ISP_STATS_AWB,   /* legacy based AWB */
+	MSM_ISP_STATS_RS,    /* legacy based RS */
+	MSM_ISP_STATS_CS,    /* legacy based CS */
+	MSM_ISP_STATS_IHIST, /* legacy based HIST */
+	MSM_ISP_STATS_SKIN,  /* legacy based SKIN */
+	MSM_ISP_STATS_BG,    /* Bayer Grids */
+	MSM_ISP_STATS_BF,    /* Bayer Focus */
+	MSM_ISP_STATS_BE,    /* Bayer Exposure*/
+	MSM_ISP_STATS_BHIST, /* Bayer Hist */
+	MSM_ISP_STATS_BF_SCALE,  /* Bayer Focus scale */
+	MSM_ISP_STATS_HDR_BE,    /* HDR Bayer Exposure */
+	MSM_ISP_STATS_HDR_BHIST, /* HDR Bayer Hist */
+	MSM_ISP_STATS_AEC_BG,   /* AEC BG */
+	MSM_ISP_STATS_MAX    /* MAX */
+};
+
+/*
+ * @stats_type_mask: Stats type mask (enum msm_isp_stats_type).
+ * @stream_src_mask: Stream src mask (enum msm_vfe_axi_stream_src)
+ * @skip_mode: skip pattern, if skip mode is range only then min/max is used
+ * @min_frame_id: minimum frame id (valid only if skip_mode = RANGE)
+ * @max_frame_id: maximum frame id (valid only if skip_mode = RANGE)
+*/
+struct msm_isp_sw_framskip {
+	uint32_t stats_type_mask;
+	uint32_t stream_src_mask;
+	enum msm_vfe_frame_skip_pattern skip_mode;
+	uint32_t min_frame_id;
+	uint32_t max_frame_id;
+};
+
+enum msm_vfe_testgen_color_pattern {
+	COLOR_BAR_8_COLOR,
+	UNICOLOR_WHITE,
+	UNICOLOR_YELLOW,
+	UNICOLOR_CYAN,
+	UNICOLOR_GREEN,
+	UNICOLOR_MAGENTA,
+	UNICOLOR_RED,
+	UNICOLOR_BLUE,
+	UNICOLOR_BLACK,
+	MAX_COLOR,
+};
+
+enum msm_vfe_camif_input {
+	CAMIF_DISABLED,
+	CAMIF_PAD_REG_INPUT,
+	CAMIF_MIDDI_INPUT,
+	CAMIF_MIPI_INPUT,
+};
+
+struct msm_vfe_fetch_engine_cfg {
+	uint32_t input_format;
+	uint32_t buf_width;
+	uint32_t buf_height;
+	uint32_t fetch_width;
+	uint32_t fetch_height;
+	uint32_t x_offset;
+	uint32_t y_offset;
+	uint32_t buf_stride;
+};
+
+enum msm_vfe_camif_output_format {
+	CAMIF_QCOM_RAW,
+	CAMIF_MIPI_RAW,
+	CAMIF_PLAIN_8,
+	CAMIF_PLAIN_16,
+	CAMIF_MAX_FORMAT,
+};
+
+/*
+ * Camif output general configuration
+ */
+struct msm_vfe_camif_subsample_cfg {
+	uint32_t irq_subsample_period;
+	uint32_t irq_subsample_pattern;
+	uint32_t sof_counter_step;
+	uint32_t pixel_skip;
+	uint32_t line_skip;
+	uint32_t first_line;
+	uint32_t last_line;
+	uint32_t first_pixel;
+	uint32_t last_pixel;
+	enum msm_vfe_camif_output_format output_format;
+};
+
+/*
+ * Camif frame and window configuration
+ */
+struct msm_vfe_camif_cfg {
+	uint32_t lines_per_frame;
+	uint32_t pixels_per_line;
+	uint32_t first_pixel;
+	uint32_t last_pixel;
+	uint32_t first_line;
+	uint32_t last_line;
+	uint32_t epoch_line0;
+	uint32_t epoch_line1;
+	uint32_t is_split;
+	enum msm_vfe_camif_input camif_input;
+	struct msm_vfe_camif_subsample_cfg subsample_cfg;
+};
+
+struct msm_vfe_testgen_cfg {
+	uint32_t lines_per_frame;
+	uint32_t pixels_per_line;
+	uint32_t v_blank;
+	uint32_t h_blank;
+	enum ISP_START_PIXEL_PATTERN pixel_bayer_pattern;
+	uint32_t rotate_period;
+	enum msm_vfe_testgen_color_pattern color_bar_pattern;
+	uint32_t burst_num_frame;
+};
+
+enum msm_vfe_inputmux {
+	CAMIF,
+	TESTGEN,
+	EXTERNAL_READ,
+};
+
+enum msm_vfe_stats_composite_group {
+	STATS_COMPOSITE_GRP_NONE,
+	STATS_COMPOSITE_GRP_1,
+	STATS_COMPOSITE_GRP_2,
+	STATS_COMPOSITE_GRP_MAX,
+};
+
+enum msm_vfe_hvx_streaming_cmd {
+	HVX_DISABLE,
+	HVX_ONE_WAY,
+	HVX_ROUND_TRIP
+};
+
+struct msm_vfe_pix_cfg {
+	struct msm_vfe_camif_cfg camif_cfg;
+	struct msm_vfe_testgen_cfg testgen_cfg;
+	struct msm_vfe_fetch_engine_cfg fetch_engine_cfg;
+	enum msm_vfe_inputmux input_mux;
+	enum ISP_START_PIXEL_PATTERN pixel_pattern;
+	uint32_t input_format;
+	enum msm_vfe_hvx_streaming_cmd hvx_cmd;
+	uint32_t is_split;
+};
+
+struct msm_vfe_rdi_cfg {
+	uint8_t cid;
+	uint8_t frame_based;
+};
+
+struct msm_vfe_input_cfg {
+	union {
+		struct msm_vfe_pix_cfg pix_cfg;
+		struct msm_vfe_rdi_cfg rdi_cfg;
+	} d;
+	enum msm_vfe_input_src input_src;
+	uint32_t input_pix_clk;
+};
+
+struct msm_vfe_fetch_eng_start {
+	uint32_t session_id;
+	uint32_t stream_id;
+	uint32_t buf_idx;
+	uint8_t  offline_mode;
+	uint32_t fd;
+	uint32_t buf_addr;
+	uint32_t frame_id;
+};
+
+enum msm_vfe_fetch_eng_pass {
+	OFFLINE_FIRST_PASS,
+	OFFLINE_SECOND_PASS,
+	OFFLINE_MAX_PASS,
+};
+
+struct msm_vfe_fetch_eng_multi_pass_start {
+	uint32_t session_id;
+	uint32_t stream_id;
+	uint32_t buf_idx;
+	uint8_t  offline_mode;
+	uint32_t fd;
+	uint32_t buf_addr;
+	uint32_t frame_id;
+	uint32_t output_buf_idx;
+	uint32_t input_buf_offset;
+	enum msm_vfe_fetch_eng_pass  offline_pass;
+	uint32_t output_stream_id;
+};
+
+struct msm_vfe_axi_plane_cfg {
+	uint32_t output_width; /*Include padding*/
+	uint32_t output_height;
+	uint32_t output_stride;
+	uint32_t output_scan_lines;
+	uint32_t output_plane_format; /*Y/Cb/Cr/CbCr*/
+	uint32_t plane_addr_offset;
+	uint8_t csid_src; /*RDI 0-2*/
+	uint8_t rdi_cid;/*CID 1-16*/
+};
+
+enum msm_stream_rdi_input_type {
+	MSM_CAMERA_RDI_MIN,
+	MSM_CAMERA_RDI_PDAF,
+	MSM_CAMERA_RDI_MAX,
+};
+
+struct msm_vfe_axi_stream_request_cmd {
+	uint32_t session_id;
+	uint32_t stream_id;
+	uint32_t vt_enable;
+	uint32_t output_format;/*Planar/RAW/Misc*/
+	enum msm_vfe_axi_stream_src stream_src; /*CAMIF/IDEAL/RDIs*/
+	struct msm_vfe_axi_plane_cfg plane_cfg[MAX_PLANES_PER_STREAM];
+
+	uint32_t burst_count;
+	uint32_t hfr_mode;
+	uint8_t frame_base;
+
+	uint32_t init_frame_drop; /*MAX 31 Frames*/
+	enum msm_vfe_frame_skip_pattern frame_skip_pattern;
+	uint8_t buf_divert; /* if TRUE no vb2 buf done. */
+	/*Return values*/
+	uint32_t axi_stream_handle;
+	uint32_t controllable_output;
+	uint32_t burst_len;
+	/* Flag indicating memory input stream */
+	enum msm_stream_rdi_input_type rdi_input_type;
+};
+
+struct msm_vfe_axi_stream_release_cmd {
+	uint32_t stream_handle;
+};
+
+enum msm_vfe_axi_stream_cmd {
+	STOP_STREAM,
+	START_STREAM,
+	STOP_IMMEDIATELY,
+};
+
+struct msm_vfe_axi_stream_cfg_cmd {
+	uint8_t num_streams;
+	uint32_t stream_handle[VFE_AXI_SRC_MAX];
+	enum msm_vfe_axi_stream_cmd cmd;
+	uint8_t sync_frame_id_src;
+};
+
+enum msm_vfe_axi_stream_update_type {
+	ENABLE_STREAM_BUF_DIVERT,
+	DISABLE_STREAM_BUF_DIVERT,
+	UPDATE_STREAM_FRAMEDROP_PATTERN,
+	UPDATE_STREAM_STATS_FRAMEDROP_PATTERN,
+	UPDATE_STREAM_AXI_CONFIG,
+	UPDATE_STREAM_REQUEST_FRAMES,
+	UPDATE_STREAM_ADD_BUFQ,
+	UPDATE_STREAM_REMOVE_BUFQ,
+	UPDATE_STREAM_SW_FRAME_DROP,
+	UPDATE_STREAM_REQUEST_FRAMES_VER2,
+	UPDATE_STREAM_OFFLINE_AXI_CONFIG,
+};
+#define UPDATE_STREAM_REQUEST_FRAMES_VER2 UPDATE_STREAM_REQUEST_FRAMES_VER2
+
+enum msm_vfe_iommu_type {
+	IOMMU_ATTACH,
+	IOMMU_DETACH,
+};
+
+enum msm_vfe_buff_queue_id {
+	VFE_BUF_QUEUE_DEFAULT,
+	VFE_BUF_QUEUE_SHARED,
+	VFE_BUF_QUEUE_MAX,
+};
+
+struct msm_vfe_axi_stream_cfg_update_info {
+	uint32_t stream_handle;
+	uint32_t output_format;
+	uint32_t user_stream_id;
+	uint32_t frame_id;
+	enum msm_vfe_frame_skip_pattern skip_pattern;
+	struct msm_vfe_axi_plane_cfg plane_cfg[MAX_PLANES_PER_STREAM];
+	struct msm_isp_sw_framskip sw_skip_info;
+};
+
+struct msm_vfe_axi_stream_cfg_update_info_req_frm {
+	uint32_t stream_handle;
+	uint32_t user_stream_id;
+	uint32_t frame_id;
+	uint32_t buf_index;
+};
+
+struct msm_vfe_axi_halt_cmd {
+	uint32_t stop_camif;
+	uint32_t overflow_detected;
+	uint32_t blocking_halt;
+};
+
+struct msm_vfe_axi_reset_cmd {
+	uint32_t blocking;
+	uint32_t frame_id;
+};
+
+struct msm_vfe_axi_restart_cmd {
+	uint32_t enable_camif;
+};
+
+struct msm_vfe_axi_stream_update_cmd {
+	uint32_t num_streams;
+	enum msm_vfe_axi_stream_update_type update_type;
+	/*
+	 * For backward compatibility, ensure 1st member of any struct
+	 * in union below is uint32_t stream_handle.
+	 */
+	union {
+		struct msm_vfe_axi_stream_cfg_update_info
+					update_info[MSM_ISP_STATS_MAX];
+		struct msm_vfe_axi_stream_cfg_update_info_req_frm req_frm_ver2;
+	};
+};
+
+struct msm_vfe_smmu_attach_cmd {
+	uint32_t security_mode;
+	uint32_t iommu_attach_mode;
+};
+
+struct msm_vfe_stats_stream_request_cmd {
+	uint32_t session_id;
+	uint32_t stream_id;
+	enum msm_isp_stats_type stats_type;
+	uint32_t composite_flag;
+	uint32_t framedrop_pattern;
+	uint32_t init_frame_drop; /*MAX 31 Frames*/
+	uint32_t irq_subsample_pattern;
+	uint32_t buffer_offset;
+	uint32_t stream_handle;
+};
+
+struct msm_vfe_stats_stream_release_cmd {
+	uint32_t stream_handle;
+};
+struct msm_vfe_stats_stream_cfg_cmd {
+	uint8_t num_streams;
+	uint32_t stream_handle[MSM_ISP_STATS_MAX];
+	uint8_t enable;
+	uint32_t stats_burst_len;
+};
+
+enum msm_vfe_reg_cfg_type {
+	VFE_WRITE,
+	VFE_WRITE_MB,
+	VFE_READ,
+	VFE_CFG_MASK,
+	VFE_WRITE_DMI_16BIT,
+	VFE_WRITE_DMI_32BIT,
+	VFE_WRITE_DMI_64BIT,
+	VFE_READ_DMI_16BIT,
+	VFE_READ_DMI_32BIT,
+	VFE_READ_DMI_64BIT,
+	GET_MAX_CLK_RATE,
+	GET_CLK_RATES,
+	GET_ISP_ID,
+	VFE_HW_UPDATE_LOCK,
+	VFE_HW_UPDATE_UNLOCK,
+	SET_WM_UB_SIZE,
+	SET_UB_POLICY,
+	GET_VFE_HW_LIMIT,
+};
+
+struct msm_vfe_cfg_cmd2 {
+	uint16_t num_cfg;
+	uint16_t cmd_len;
+	void __user *cfg_data;
+	void __user *cfg_cmd;
+};
+
+struct msm_vfe_cfg_cmd_list {
+	struct msm_vfe_cfg_cmd2      cfg_cmd;
+	struct msm_vfe_cfg_cmd_list *next;
+	uint32_t                     next_size;
+};
+
+struct msm_vfe_reg_rw_info {
+	uint32_t reg_offset;
+	uint32_t cmd_data_offset;
+	uint32_t len;
+};
+
+struct msm_vfe_reg_mask_info {
+	uint32_t reg_offset;
+	uint32_t mask;
+	uint32_t val;
+};
+
+struct msm_vfe_reg_dmi_info {
+	uint32_t hi_tbl_offset; /*Optional*/
+	uint32_t lo_tbl_offset; /*Required*/
+	uint32_t len;
+};
+
+struct msm_vfe_reg_cfg_cmd {
+	union {
+		struct msm_vfe_reg_rw_info rw_info;
+		struct msm_vfe_reg_mask_info mask_info;
+		struct msm_vfe_reg_dmi_info dmi_info;
+	} u;
+
+	enum msm_vfe_reg_cfg_type cmd_type;
+};
+
+enum vfe_sd_type {
+	VFE_SD_0 = 0,
+	VFE_SD_1,
+	VFE_SD_COMMON,
+	VFE_SD_MAX,
+};
+
+/* When you change the value below, check for the sof event_data size.
+ * V4l2 limits payload to 64 bytes */
+#define MS_NUM_SLAVE_MAX 1
+
+/* Usecases when 2 HW need to be related or synced */
+enum msm_vfe_dual_hw_type {
+	DUAL_NONE = 0,
+	DUAL_HW_VFE_SPLIT = 1,
+	DUAL_HW_MASTER_SLAVE = 2,
+};
+
+/* Type for 2 INTF when used in Master-Slave mode */
+enum msm_vfe_dual_hw_ms_type {
+	MS_TYPE_NONE,
+	MS_TYPE_MASTER,
+	MS_TYPE_SLAVE,
+};
+
+struct msm_isp_set_dual_hw_ms_cmd {
+	uint8_t num_src;
+	/* Each session can be only one type but multiple intf if YUV cam */
+	enum msm_vfe_dual_hw_ms_type dual_hw_ms_type;
+	/* Primary intf is mostly associated with preview.
+	 * This primary intf SOF frame_id and timestamp is tracked
+	 * and used to calculate delta */
+	enum msm_vfe_input_src primary_intf;
+	/* input_src array indicates other input INTF that may be Master/Slave.
+	 * For these additional intf, frame_id and timestamp are not saved.
+	 * However, if these are slaves then they will still get their
+	 * frame_id from Master */
+	enum msm_vfe_input_src input_src[VFE_SRC_MAX];
+	uint32_t sof_delta_threshold; /* In milliseconds. Sent for Master */
+};
+
+enum msm_isp_buf_type {
+	ISP_PRIVATE_BUF,
+	ISP_SHARE_BUF,
+	MAX_ISP_BUF_TYPE,
+};
+
+struct msm_isp_unmap_buf_req {
+	uint32_t fd;
+};
+
+struct msm_isp_buf_request {
+	uint32_t session_id;
+	uint32_t stream_id;
+	uint8_t num_buf;
+	uint32_t handle;
+	enum msm_isp_buf_type buf_type;
+};
+
+struct msm_isp_buf_request_ver2 {
+	uint32_t session_id;
+	uint32_t stream_id;
+	uint8_t num_buf;
+	uint32_t handle;
+	enum msm_isp_buf_type buf_type;
+	enum smmu_attach_mode security_mode;
+	uint32_t reserved[4];
+};
+
+struct msm_isp_qbuf_plane {
+	uint32_t addr;
+	uint32_t offset;
+	uint32_t length;
+};
+
+struct msm_isp_qbuf_buffer {
+	struct msm_isp_qbuf_plane planes[MAX_PLANES_PER_STREAM];
+	uint32_t num_planes;
+};
+
+struct msm_isp_qbuf_info {
+	uint32_t handle;
+	int32_t buf_idx;
+	/*Only used for prepare buffer*/
+	struct msm_isp_qbuf_buffer buffer;
+	/*Only used for diverted buffer*/
+	uint32_t dirty_buf;
+};
+
+struct msm_isp_clk_rates {
+	uint32_t svs_rate;
+	uint32_t nominal_rate;
+	uint32_t high_rate;
+};
+
+struct msm_vfe_axi_src_state {
+	enum msm_vfe_input_src input_src;
+	uint32_t src_active;
+	uint32_t src_frame_id;
+};
+
+enum msm_isp_event_mask_index {
+	ISP_EVENT_MASK_INDEX_STATS_NOTIFY		= 0,
+	ISP_EVENT_MASK_INDEX_ERROR			= 1,
+	ISP_EVENT_MASK_INDEX_IOMMU_P_FAULT		= 2,
+	ISP_EVENT_MASK_INDEX_STREAM_UPDATE_DONE		= 3,
+	ISP_EVENT_MASK_INDEX_REG_UPDATE			= 4,
+	ISP_EVENT_MASK_INDEX_SOF			= 5,
+	ISP_EVENT_MASK_INDEX_BUF_DIVERT			= 6,
+	ISP_EVENT_MASK_INDEX_COMP_STATS_NOTIFY		= 7,
+	ISP_EVENT_MASK_INDEX_MASK_FE_READ_DONE		= 8,
+	ISP_EVENT_MASK_INDEX_BUF_DONE			= 9,
+	ISP_EVENT_MASK_INDEX_REG_UPDATE_MISSING		= 10,
+	ISP_EVENT_MASK_INDEX_PING_PONG_MISMATCH		= 11,
+	ISP_EVENT_MASK_INDEX_BUF_FATAL_ERROR		= 12,
+};
+
+
+#define ISP_EVENT_SUBS_MASK_NONE			0
+
+#define ISP_EVENT_SUBS_MASK_STATS_NOTIFY \
+			(1 << ISP_EVENT_MASK_INDEX_STATS_NOTIFY)
+
+#define ISP_EVENT_SUBS_MASK_ERROR \
+			(1 << ISP_EVENT_MASK_INDEX_ERROR)
+
+#define ISP_EVENT_SUBS_MASK_IOMMU_P_FAULT \
+			(1 << ISP_EVENT_MASK_INDEX_IOMMU_P_FAULT)
+
+#define ISP_EVENT_SUBS_MASK_STREAM_UPDATE_DONE \
+			(1 << ISP_EVENT_MASK_INDEX_STREAM_UPDATE_DONE)
+
+#define ISP_EVENT_SUBS_MASK_REG_UPDATE \
+			(1 << ISP_EVENT_MASK_INDEX_REG_UPDATE)
+
+#define ISP_EVENT_SUBS_MASK_SOF \
+			(1 << ISP_EVENT_MASK_INDEX_SOF)
+
+#define ISP_EVENT_SUBS_MASK_BUF_DIVERT \
+			(1 << ISP_EVENT_MASK_INDEX_BUF_DIVERT)
+
+#define ISP_EVENT_SUBS_MASK_COMP_STATS_NOTIFY \
+			(1 << ISP_EVENT_MASK_INDEX_COMP_STATS_NOTIFY)
+
+#define ISP_EVENT_SUBS_MASK_FE_READ_DONE \
+			(1 << ISP_EVENT_MASK_INDEX_MASK_FE_READ_DONE)
+
+#define ISP_EVENT_SUBS_MASK_BUF_DONE \
+			(1 << ISP_EVENT_MASK_INDEX_BUF_DONE)
+
+#define ISP_EVENT_SUBS_MASK_REG_UPDATE_MISSING \
+			(1 << ISP_EVENT_MASK_INDEX_REG_UPDATE_MISSING)
+
+#define ISP_EVENT_SUBS_MASK_PING_PONG_MISMATCH \
+			(1 << ISP_EVENT_MASK_INDEX_PING_PONG_MISMATCH)
+
+#define ISP_EVENT_SUBS_MASK_BUF_FATAL_ERROR \
+			(1 << ISP_EVENT_MASK_INDEX_BUF_FATAL_ERROR)
+
+enum msm_isp_event_idx {
+	ISP_REG_UPDATE        = 0,
+	ISP_EPOCH_0           = 1,
+	ISP_EPOCH_1           = 2,
+	ISP_START_ACK         = 3,
+	ISP_STOP_ACK          = 4,
+	ISP_IRQ_VIOLATION     = 5,
+	ISP_STATS_OVERFLOW    = 6,
+	ISP_BUF_DONE          = 7,
+	ISP_FE_RD_DONE        = 8,
+	ISP_IOMMU_P_FAULT     = 9,
+	ISP_ERROR             = 10,
+	ISP_HW_FATAL_ERROR      = 11,
+	ISP_PING_PONG_MISMATCH = 12,
+	ISP_REG_UPDATE_MISSING = 13,
+	ISP_BUF_FATAL_ERROR = 14,
+	ISP_EVENT_MAX         = 15
+};
+
+#define ISP_EVENT_OFFSET          8
+#define ISP_EVENT_BASE            (V4L2_EVENT_PRIVATE_START)
+#define ISP_BUF_EVENT_BASE        (ISP_EVENT_BASE + (1 << ISP_EVENT_OFFSET))
+#define ISP_STATS_EVENT_BASE      (ISP_EVENT_BASE + (2 << ISP_EVENT_OFFSET))
+#define ISP_CAMIF_EVENT_BASE      (ISP_EVENT_BASE + (3 << ISP_EVENT_OFFSET))
+#define ISP_STREAM_EVENT_BASE     (ISP_EVENT_BASE + (4 << ISP_EVENT_OFFSET))
+#define ISP_EVENT_REG_UPDATE      (ISP_EVENT_BASE + ISP_REG_UPDATE)
+#define ISP_EVENT_EPOCH_0         (ISP_EVENT_BASE + ISP_EPOCH_0)
+#define ISP_EVENT_EPOCH_1         (ISP_EVENT_BASE + ISP_EPOCH_1)
+#define ISP_EVENT_START_ACK       (ISP_EVENT_BASE + ISP_START_ACK)
+#define ISP_EVENT_STOP_ACK        (ISP_EVENT_BASE + ISP_STOP_ACK)
+#define ISP_EVENT_IRQ_VIOLATION   (ISP_EVENT_BASE + ISP_IRQ_VIOLATION)
+#define ISP_EVENT_STATS_OVERFLOW  (ISP_EVENT_BASE + ISP_STATS_OVERFLOW)
+#define ISP_EVENT_ERROR           (ISP_EVENT_BASE + ISP_ERROR)
+#define ISP_EVENT_SOF             (ISP_CAMIF_EVENT_BASE)
+#define ISP_EVENT_EOF             (ISP_CAMIF_EVENT_BASE + 1)
+#define ISP_EVENT_BUF_DONE        (ISP_EVENT_BASE + ISP_BUF_DONE)
+#define ISP_EVENT_BUF_DIVERT      (ISP_BUF_EVENT_BASE)
+#define ISP_EVENT_STATS_NOTIFY    (ISP_STATS_EVENT_BASE)
+#define ISP_EVENT_COMP_STATS_NOTIFY (ISP_EVENT_STATS_NOTIFY + MSM_ISP_STATS_MAX)
+#define ISP_EVENT_FE_READ_DONE    (ISP_EVENT_BASE + ISP_FE_RD_DONE)
+#define ISP_EVENT_IOMMU_P_FAULT   (ISP_EVENT_BASE + ISP_IOMMU_P_FAULT)
+#define ISP_EVENT_HW_FATAL_ERROR  (ISP_EVENT_BASE + ISP_HW_FATAL_ERROR)
+#define ISP_EVENT_PING_PONG_MISMATCH (ISP_EVENT_BASE + ISP_PING_PONG_MISMATCH)
+#define ISP_EVENT_REG_UPDATE_MISSING (ISP_EVENT_BASE + ISP_REG_UPDATE_MISSING)
+#define ISP_EVENT_BUF_FATAL_ERROR (ISP_EVENT_BASE + ISP_BUF_FATAL_ERROR)
+#define ISP_EVENT_STREAM_UPDATE_DONE   (ISP_STREAM_EVENT_BASE)
+
+/* The msm_v4l2_event_data structure should match the
+ * v4l2_event.u.data field.
+ * should not exceed 64 bytes */
+
+struct msm_isp_buf_event {
+	uint32_t session_id;
+	uint32_t stream_id;
+	uint32_t handle;
+	uint32_t output_format;
+	int8_t buf_idx;
+};
+struct msm_isp_fetch_eng_event {
+	uint32_t session_id;
+	uint32_t stream_id;
+	uint32_t handle;
+	uint32_t fd;
+	int8_t buf_idx;
+	int8_t offline_mode;
+};
+struct msm_isp_stats_event {
+	uint32_t stats_mask;                        /* 4 bytes */
+	uint8_t stats_buf_idxs[MSM_ISP_STATS_MAX];  /* 11 bytes */
+	uint8_t pd_stats_idx;
+};
+
+struct msm_isp_stream_ack {
+	uint32_t session_id;
+	uint32_t stream_id;
+	uint32_t handle;
+};
+
+enum msm_vfe_error_type {
+	ISP_ERROR_NONE,
+	ISP_ERROR_CAMIF,
+	ISP_ERROR_BUS_OVERFLOW,
+	ISP_ERROR_RETURN_EMPTY_BUFFER,
+	ISP_ERROR_FRAME_ID_MISMATCH,
+	ISP_ERROR_MAX,
+};
+
+struct msm_isp_error_info {
+	enum msm_vfe_error_type err_type;
+	uint32_t session_id;
+	uint32_t stream_id;
+	uint32_t stream_id_mask;
+};
+
+/* This structure reports delta between master and slave */
+struct msm_isp_ms_delta_info {
+	uint8_t num_delta_info;
+	uint32_t delta[MS_NUM_SLAVE_MAX];
+};
+
+/* This is sent in EPOCH irq */
+struct msm_isp_output_info {
+	uint8_t regs_not_updated;
+	/* mask with bufq_handle for regs not updated or return empty */
+	uint16_t output_err_mask;
+	/* mask with stream_idx for get_buf failed */
+	uint8_t stream_framedrop_mask;
+	/* mask with stats stream_idx for get_buf failed */
+	uint16_t stats_framedrop_mask;
+	/* delta between master and slave */
+};
+
+/* This structure is piggybacked with SOF event */
+struct msm_isp_sof_info {
+	uint8_t regs_not_updated;
+	/* mask with bufq_handle for regs not updated */
+	uint16_t reg_update_fail_mask;
+	/* mask with bufq_handle for get_buf failed */
+	uint32_t stream_get_buf_fail_mask;
+	/* mask with stats stream_idx for get_buf failed */
+	uint16_t stats_get_buf_fail_mask;
+	/* delta between master and slave */
+	struct msm_isp_ms_delta_info ms_delta_info;
+	/*
+	 * mask with AXI_SRC in paused state. In PAUSED
+	 * state there is no Buffer output. So this mask is used
+	 * to report drop.
+	 */
+	uint16_t axi_updating_mask;
+	/* extended mask with bufq_handle for regs not updated */
+	uint32_t reg_update_fail_mask_ext;
+};
+#define AXI_UPDATING_MASK 1
+#define REG_UPDATE_FAIL_MASK_EXT 1
+
+struct msm_isp_event_data {
+	/*Wall clock except for buffer divert events
+	 *which use monotonic clock
+	 */
+	struct timeval timestamp;
+	/* Monotonic timestamp since bootup */
+	struct timeval mono_timestamp;
+	uint32_t frame_id;
+	union {
+		/* Sent for Stats_Done event */
+		struct msm_isp_stats_event stats;
+		/* Sent for Buf_Divert event */
+		struct msm_isp_buf_event buf_done;
+		/* Sent for offline fetch done event */
+		struct msm_isp_fetch_eng_event fetch_done;
+		/* Sent for Error_Event */
+		struct msm_isp_error_info error_info;
+		/*
+		 * This struct needs to be removed once
+		 * userspace switches to sof_info
+		 */
+		struct msm_isp_output_info output_info;
+		/* Sent for SOF event */
+		struct msm_isp_sof_info sof_info;
+	} u; /* union can have max 52 bytes */
+};
+
+enum msm_vfe_ahb_clk_vote {
+	MSM_ISP_CAMERA_AHB_SVS_VOTE = 1,
+	MSM_ISP_CAMERA_AHB_TURBO_VOTE = 2,
+	MSM_ISP_CAMERA_AHB_NOMINAL_VOTE = 3,
+	MSM_ISP_CAMERA_AHB_SUSPEND_VOTE = 4,
+};
+
+struct msm_isp_ahb_clk_cfg {
+	uint32_t vote;
+	uint32_t reserved[2];
+};
+
+enum msm_vfe_dual_cam_sync_mode {
+	MSM_ISP_DUAL_CAM_ASYNC,
+	MSM_ISP_DUAL_CAM_SYNC,
+};
+
+struct msm_isp_dual_hw_master_slave_sync {
+	uint32_t sync_mode;
+	uint32_t reserved[2];
+};
+
+struct msm_vfe_dual_lpm_mode {
+	enum msm_vfe_axi_stream_src stream_src[VFE_AXI_SRC_MAX];
+	uint32_t num_src;
+	uint32_t lpm_mode;
+};
+#define V4L2_PIX_FMT_QBGGR8  v4l2_fourcc('Q', 'B', 'G', '8')
+#define V4L2_PIX_FMT_QGBRG8  v4l2_fourcc('Q', 'G', 'B', '8')
+#define V4L2_PIX_FMT_QGRBG8  v4l2_fourcc('Q', 'G', 'R', '8')
+#define V4L2_PIX_FMT_QRGGB8  v4l2_fourcc('Q', 'R', 'G', '8')
+#define V4L2_PIX_FMT_QBGGR10 v4l2_fourcc('Q', 'B', 'G', '0')
+#define V4L2_PIX_FMT_QGBRG10 v4l2_fourcc('Q', 'G', 'B', '0')
+#define V4L2_PIX_FMT_QGRBG10 v4l2_fourcc('Q', 'G', 'R', '0')
+#define V4L2_PIX_FMT_QRGGB10 v4l2_fourcc('Q', 'R', 'G', '0')
+#define V4L2_PIX_FMT_QBGGR12 v4l2_fourcc('Q', 'B', 'G', '2')
+#define V4L2_PIX_FMT_QGBRG12 v4l2_fourcc('Q', 'G', 'B', '2')
+#define V4L2_PIX_FMT_QGRBG12 v4l2_fourcc('Q', 'G', 'R', '2')
+#define V4L2_PIX_FMT_QRGGB12 v4l2_fourcc('Q', 'R', 'G', '2')
+#define V4L2_PIX_FMT_QBGGR14 v4l2_fourcc('Q', 'B', 'G', '4')
+#define V4L2_PIX_FMT_QGBRG14 v4l2_fourcc('Q', 'G', 'B', '4')
+#define V4L2_PIX_FMT_QGRBG14 v4l2_fourcc('Q', 'G', 'R', '4')
+#define V4L2_PIX_FMT_QRGGB14 v4l2_fourcc('Q', 'R', 'G', '4')
+#define V4L2_PIX_FMT_P16BGGR10 v4l2_fourcc('P', 'B', 'G', '0')
+#define V4L2_PIX_FMT_P16GBRG10 v4l2_fourcc('P', 'G', 'B', '0')
+#define V4L2_PIX_FMT_P16GRBG10 v4l2_fourcc('P', 'G', 'R', '0')
+#define V4L2_PIX_FMT_P16RGGB10 v4l2_fourcc('P', 'R', 'G', '0')
+#define V4L2_PIX_FMT_NV14 v4l2_fourcc('N', 'V', '1', '4')
+#define V4L2_PIX_FMT_NV41 v4l2_fourcc('N', 'V', '4', '1')
+#define V4L2_PIX_FMT_META v4l2_fourcc('Q', 'M', 'E', 'T')
+#define V4L2_PIX_FMT_META10 v4l2_fourcc('Q', 'M', '1', '0')
+#define V4L2_PIX_FMT_SBGGR14 v4l2_fourcc('B', 'G', '1', '4') /* 14 BGBG.GRGR.*/
+#define V4L2_PIX_FMT_SGBRG14 v4l2_fourcc('G', 'B', '1', '4') /* 14 GBGB.RGRG.*/
+#define V4L2_PIX_FMT_SGRBG14 v4l2_fourcc('B', 'A', '1', '4') /* 14 GRGR.BGBG.*/
+#define V4L2_PIX_FMT_SRGGB14 v4l2_fourcc('R', 'G', '1', '4') /* 14 RGRG.GBGB.*/
+
+enum msm_isp_ioctl_cmd_code {
+	MSM_VFE_REG_CFG = BASE_VIDIOC_PRIVATE,
+	MSM_ISP_REQUEST_BUF,
+	MSM_ISP_ENQUEUE_BUF,
+	MSM_ISP_RELEASE_BUF,
+	MSM_ISP_REQUEST_STREAM,
+	MSM_ISP_CFG_STREAM,
+	MSM_ISP_RELEASE_STREAM,
+	MSM_ISP_INPUT_CFG,
+	MSM_ISP_SET_SRC_STATE,
+	MSM_ISP_REQUEST_STATS_STREAM,
+	MSM_ISP_CFG_STATS_STREAM,
+	MSM_ISP_RELEASE_STATS_STREAM,
+	MSM_ISP_REG_UPDATE_CMD,
+	MSM_ISP_UPDATE_STREAM,
+	MSM_VFE_REG_LIST_CFG,
+	MSM_ISP_SMMU_ATTACH,
+	MSM_ISP_UPDATE_STATS_STREAM,
+	MSM_ISP_AXI_HALT,
+	MSM_ISP_AXI_RESET,
+	MSM_ISP_AXI_RESTART,
+	MSM_ISP_FETCH_ENG_START,
+	MSM_ISP_DEQUEUE_BUF,
+	MSM_ISP_SET_DUAL_HW_MASTER_SLAVE,
+	MSM_ISP_MAP_BUF_START_FE,
+	MSM_ISP_UNMAP_BUF,
+	MSM_ISP_AHB_CLK_CFG,
+	MSM_ISP_DUAL_HW_MASTER_SLAVE_SYNC,
+	MSM_ISP_FETCH_ENG_MULTI_PASS_START,
+	MSM_ISP_MAP_BUF_START_MULTI_PASS_FE,
+	MSM_ISP_REQUEST_BUF_VER2,
+	MSM_ISP_DUAL_HW_LPM_MODE,
+};
+
+#define VIDIOC_MSM_VFE_REG_CFG \
+	_IOWR('V', MSM_VFE_REG_CFG, \
+		struct msm_vfe_cfg_cmd2)
+
+#define VIDIOC_MSM_ISP_REQUEST_BUF \
+	_IOWR('V', MSM_ISP_REQUEST_BUF, \
+		struct msm_isp_buf_request)
+
+#define VIDIOC_MSM_ISP_ENQUEUE_BUF \
+	_IOWR('V', MSM_ISP_ENQUEUE_BUF, \
+		struct msm_isp_qbuf_info)
+
+#define VIDIOC_MSM_ISP_RELEASE_BUF \
+	_IOWR('V', MSM_ISP_RELEASE_BUF, \
+		struct msm_isp_buf_request)
+
+#define VIDIOC_MSM_ISP_REQUEST_STREAM \
+	_IOWR('V', MSM_ISP_REQUEST_STREAM, \
+		struct msm_vfe_axi_stream_request_cmd)
+
+#define VIDIOC_MSM_ISP_CFG_STREAM \
+	_IOWR('V', MSM_ISP_CFG_STREAM, \
+		struct msm_vfe_axi_stream_cfg_cmd)
+
+#define VIDIOC_MSM_ISP_RELEASE_STREAM \
+	_IOWR('V', MSM_ISP_RELEASE_STREAM, \
+		struct msm_vfe_axi_stream_release_cmd)
+
+#define VIDIOC_MSM_ISP_INPUT_CFG \
+	_IOWR('V', MSM_ISP_INPUT_CFG, \
+		struct msm_vfe_input_cfg)
+
+#define VIDIOC_MSM_ISP_SET_SRC_STATE \
+	_IOWR('V', MSM_ISP_SET_SRC_STATE, \
+		struct msm_vfe_axi_src_state)
+
+#define VIDIOC_MSM_ISP_REQUEST_STATS_STREAM \
+	_IOWR('V', MSM_ISP_REQUEST_STATS_STREAM, \
+		struct msm_vfe_stats_stream_request_cmd)
+
+#define VIDIOC_MSM_ISP_CFG_STATS_STREAM \
+	_IOWR('V', MSM_ISP_CFG_STATS_STREAM, \
+		struct msm_vfe_stats_stream_cfg_cmd)
+
+#define VIDIOC_MSM_ISP_RELEASE_STATS_STREAM \
+	_IOWR('V', MSM_ISP_RELEASE_STATS_STREAM, \
+		struct msm_vfe_stats_stream_release_cmd)
+
+#define VIDIOC_MSM_ISP_REG_UPDATE_CMD \
+	_IOWR('V', MSM_ISP_REG_UPDATE_CMD, \
+		enum msm_vfe_input_src)
+
+#define VIDIOC_MSM_ISP_UPDATE_STREAM \
+	_IOWR('V', MSM_ISP_UPDATE_STREAM, \
+		struct msm_vfe_axi_stream_update_cmd)
+
+#define VIDIOC_MSM_VFE_REG_LIST_CFG \
+	_IOWR('V', MSM_VFE_REG_LIST_CFG, \
+		struct msm_vfe_cfg_cmd_list)
+
+#define VIDIOC_MSM_ISP_SMMU_ATTACH \
+	_IOWR('V', MSM_ISP_SMMU_ATTACH, \
+		struct msm_vfe_smmu_attach_cmd)
+
+#define VIDIOC_MSM_ISP_UPDATE_STATS_STREAM \
+	_IOWR('V', MSM_ISP_UPDATE_STATS_STREAM, \
+		struct msm_vfe_axi_stream_update_cmd)
+
+#define VIDIOC_MSM_ISP_AXI_HALT \
+	_IOWR('V', MSM_ISP_AXI_HALT, \
+		struct msm_vfe_axi_halt_cmd)
+
+#define VIDIOC_MSM_ISP_AXI_RESET \
+	_IOWR('V', MSM_ISP_AXI_RESET, \
+		struct msm_vfe_axi_reset_cmd)
+
+#define VIDIOC_MSM_ISP_AXI_RESTART \
+	_IOWR('V', MSM_ISP_AXI_RESTART, \
+		struct msm_vfe_axi_restart_cmd)
+
+#define VIDIOC_MSM_ISP_FETCH_ENG_START \
+	_IOWR('V', MSM_ISP_FETCH_ENG_START, \
+		struct msm_vfe_fetch_eng_start)
+
+#define VIDIOC_MSM_ISP_DEQUEUE_BUF \
+	_IOWR('V', MSM_ISP_DEQUEUE_BUF, \
+		struct msm_isp_qbuf_info)
+
+#define VIDIOC_MSM_ISP_SET_DUAL_HW_MASTER_SLAVE \
+	_IOWR('V', MSM_ISP_SET_DUAL_HW_MASTER_SLAVE, \
+		struct msm_isp_set_dual_hw_ms_cmd)
+
+#define VIDIOC_MSM_ISP_MAP_BUF_START_FE \
+	_IOWR('V', MSM_ISP_MAP_BUF_START_FE, \
+		struct msm_vfe_fetch_eng_start)
+
+#define VIDIOC_MSM_ISP_UNMAP_BUF \
+	_IOWR('V', MSM_ISP_UNMAP_BUF, \
+		struct msm_isp_unmap_buf_req)
+
+#define VIDIOC_MSM_ISP_AHB_CLK_CFG \
+	_IOWR('V', MSM_ISP_AHB_CLK_CFG, struct msm_isp_ahb_clk_cfg)
+
+#define VIDIOC_MSM_ISP_DUAL_HW_MASTER_SLAVE_SYNC \
+	_IOWR('V', MSM_ISP_DUAL_HW_MASTER_SLAVE_SYNC, \
+	struct msm_isp_dual_hw_master_slave_sync)
+
+#define VIDIOC_MSM_ISP_FETCH_ENG_MULTI_PASS_START \
+	_IOWR('V', MSM_ISP_FETCH_ENG_MULTI_PASS_START, \
+		struct msm_vfe_fetch_eng_multi_pass_start)
+
+#define VIDIOC_MSM_ISP_MAP_BUF_START_MULTI_PASS_FE \
+	_IOWR('V', MSM_ISP_MAP_BUF_START_MULTI_PASS_FE, \
+		struct msm_vfe_fetch_eng_multi_pass_start)
+
+#define VIDIOC_MSM_ISP_REQUEST_BUF_VER2 \
+	_IOWR('V', MSM_ISP_REQUEST_BUF_VER2, struct msm_isp_buf_request_ver2)
+
+#define VIDIOC_MSM_ISP_DUAL_HW_LPM_MODE \
+	_IOWR('V', MSM_ISP_DUAL_HW_LPM_MODE, \
+	struct msm_vfe_dual_lpm_mode)
+
+#endif /* __MSMB_ISP__ */
diff -Nruw linux-4.4.115-fbx/include/uapi/media./msmb_ispif.h linux-4.4.115-fbx/include/uapi/media/msmb_ispif.h
--- linux-4.4.115-fbx/include/uapi/media./msmb_ispif.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/msmb_ispif.h	2019-01-22 16:16:28.603292590 +0100
@@ -0,0 +1,179 @@
+#ifndef UAPI_MSMB_ISPIF_H
+#define UAPI_MSMB_ISPIF_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/videodev2.h>
+
+#define CSID_VERSION_V20                      0x02000011
+#define CSID_VERSION_V22                      0x02001000
+#define CSID_VERSION_V30                      0x30000000
+#define CSID_VERSION_V3                      0x30000000
+
+enum msm_ispif_vfe_intf {
+	VFE0,
+	VFE1,
+	VFE_MAX
+};
+#define VFE0_MASK    (1 << VFE0)
+#define VFE1_MASK    (1 << VFE1)
+
+enum msm_ispif_intftype {
+	PIX0,
+	RDI0,
+	PIX1,
+	RDI1,
+	RDI2,
+	INTF_MAX
+};
+#define MAX_PARAM_ENTRIES (INTF_MAX * 2)
+#define MAX_CID_CH	8
+#define MAX_CID_CH_PARAM_ENTRY	3
+
+#define PIX0_MASK (1 << PIX0)
+#define PIX1_MASK (1 << PIX1)
+#define RDI0_MASK (1 << RDI0)
+#define RDI1_MASK (1 << RDI1)
+#define RDI2_MASK (1 << RDI2)
+
+enum msm_ispif_vc {
+	VC0,
+	VC1,
+	VC2,
+	VC3,
+	VC_MAX
+};
+
+enum msm_ispif_cid {
+	CID0,
+	CID1,
+	CID2,
+	CID3,
+	CID4,
+	CID5,
+	CID6,
+	CID7,
+	CID8,
+	CID9,
+	CID10,
+	CID11,
+	CID12,
+	CID13,
+	CID14,
+	CID15,
+	CID_MAX
+};
+
+enum msm_ispif_csid {
+	CSID0,
+	CSID1,
+	CSID2,
+	CSID3,
+	CSID_MAX
+};
+
+enum msm_ispif_pixel_odd_even {
+	PIX_EVEN,
+	PIX_ODD
+};
+
+enum msm_ispif_pixel_pack_mode {
+	PACK_BYTE,
+	PACK_PLAIN_PACK,
+	PACK_NV_P8,
+	PACK_NV_P16
+};
+
+struct msm_ispif_pack_cfg {
+	int pixel_swap_en;
+	enum msm_ispif_pixel_odd_even even_odd_sel;
+	enum msm_ispif_pixel_pack_mode pack_mode;
+};
+
+struct msm_ispif_params_entry {
+	enum msm_ispif_vfe_intf vfe_intf;
+	enum msm_ispif_intftype intftype;
+	int num_cids;
+	enum msm_ispif_cid cids[MAX_CID_CH_PARAM_ENTRY];
+	enum msm_ispif_csid csid;
+	int crop_enable;
+	uint16_t crop_start_pixel;
+	uint16_t crop_end_pixel;
+};
+
+struct msm_ispif_right_param_entry {
+	enum msm_ispif_cid cids[MAX_CID_CH_PARAM_ENTRY];
+	enum msm_ispif_csid csid;
+};
+
+struct msm_ispif_param_data_ext {
+	uint32_t num;
+	struct msm_ispif_params_entry entries[MAX_PARAM_ENTRIES];
+	struct msm_ispif_pack_cfg pack_cfg[CID_MAX];
+	struct msm_ispif_right_param_entry right_entries[MAX_PARAM_ENTRIES];
+	uint32_t stereo_enable;
+	uint16_t line_width[VFE_MAX];
+};
+
+struct msm_ispif_param_data {
+	uint32_t num;
+	struct msm_ispif_params_entry entries[MAX_PARAM_ENTRIES];
+};
+
+struct msm_isp_info {
+	uint32_t max_resolution;
+	uint32_t id;
+	uint32_t ver;
+};
+
+struct msm_ispif_vfe_info {
+	int num_vfe;
+	struct msm_isp_info info[VFE_MAX];
+};
+
+enum ispif_cfg_type_t {
+	ISPIF_CLK_ENABLE,
+	ISPIF_CLK_DISABLE,
+	ISPIF_INIT,
+	ISPIF_CFG,
+	ISPIF_START_FRAME_BOUNDARY,
+	ISPIF_RESTART_FRAME_BOUNDARY,
+	ISPIF_STOP_FRAME_BOUNDARY,
+	ISPIF_STOP_IMMEDIATELY,
+	ISPIF_RELEASE,
+	ISPIF_ENABLE_REG_DUMP,
+	ISPIF_SET_VFE_INFO,
+	ISPIF_CFG2,
+	ISPIF_CFG_STEREO,
+};
+
+struct ispif_cfg_data {
+	enum ispif_cfg_type_t cfg_type;
+	union {
+		int reg_dump;                        /* ISPIF_ENABLE_REG_DUMP */
+		uint32_t csid_version;               /* ISPIF_INIT */
+		struct msm_ispif_vfe_info vfe_info;  /* ISPIF_SET_VFE_INFO */
+		struct msm_ispif_param_data params;  /* CFG, START, STOP */
+	};
+};
+
+struct ispif_cfg_data_ext {
+	enum ispif_cfg_type_t cfg_type;
+	void __user *data;
+	uint32_t size;
+};
+
+#define ISPIF_RDI_PACK_MODE_SUPPORT 1
+
+#define ISPIF_3D_SUPPORT 1
+
+#define ISPIF_LINE_WIDTH_SUPPORT 1
+
+#define VIDIOC_MSM_ISPIF_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE, struct ispif_cfg_data)
+
+#define VIDIOC_MSM_ISPIF_CFG_EXT \
+	_IOWR('V', BASE_VIDIOC_PRIVATE+1, struct ispif_cfg_data_ext)
+
+#endif
+
diff -Nruw linux-4.4.115-fbx/include/uapi/media./msmb_pproc.h linux-4.4.115-fbx/include/uapi/media/msmb_pproc.h
--- linux-4.4.115-fbx/include/uapi/media./msmb_pproc.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/msmb_pproc.h	2019-01-22 16:16:28.603292590 +0100
@@ -0,0 +1,255 @@
+#ifndef __UAPI_MSMB_PPROC_H
+#define __UAPI_MSMB_PPROC_H
+
+#include <linux/videodev2.h>
+#include <linux/types.h>
+#include <media/msmb_generic_buf_mgr.h>
+
+/* Should be same as VIDEO_MAX_PLANES in videodev2.h */
+#define MAX_PLANES VIDEO_MAX_PLANES
+/* PARTIAL_FRAME_STRIPE_COUNT must be even */
+#define PARTIAL_FRAME_STRIPE_COUNT 4
+
+#define MAX_NUM_CPP_STRIPS 8
+#define MSM_CPP_MAX_NUM_PLANES 3
+#define MSM_CPP_MIN_FRAME_LENGTH 13
+#define MSM_CPP_MAX_FRAME_LENGTH 4096
+#define MSM_CPP_MAX_FW_NAME_LEN 32
+#define MAX_FREQ_TBL 10
+#define MSM_OUTPUT_BUF_CNT 8
+
+enum msm_cpp_frame_type {
+	MSM_CPP_OFFLINE_FRAME,
+	MSM_CPP_REALTIME_FRAME,
+};
+
+enum msm_vpe_frame_type {
+	MSM_VPE_OFFLINE_FRAME,
+	MSM_VPE_REALTIME_FRAME,
+};
+
+struct msm_cpp_buffer_info_t {
+	int32_t fd;
+	uint32_t index;
+	uint32_t offset;
+	uint8_t native_buff;
+	uint8_t processed_divert;
+	uint32_t identity;
+};
+
+struct msm_cpp_stream_buff_info_t {
+	uint32_t identity;
+	uint32_t num_buffs;
+	struct msm_cpp_buffer_info_t *buffer_info;
+};
+
+enum msm_cpp_batch_mode_t {
+	BATCH_MODE_NONE,
+	BATCH_MODE_VIDEO,
+	BATCH_MODE_PREVIEW
+};
+
+struct msm_cpp_batch_info_t {
+	enum msm_cpp_batch_mode_t  batch_mode;
+	uint32_t batch_size;
+	uint32_t intra_plane_offset[MAX_PLANES];
+	uint32_t pick_preview_idx;
+	uint32_t cont_idx;
+};
+
+struct msm_cpp_frame_info_t {
+	int32_t frame_id;
+	struct timeval timestamp;
+	uint32_t inst_id;
+	uint32_t identity;
+	uint32_t client_id;
+	enum msm_cpp_frame_type frame_type;
+	uint32_t num_strips;
+	uint32_t msg_len;
+	uint32_t *cpp_cmd_msg;
+	int src_fd;
+	int dst_fd;
+	struct timeval in_time, out_time;
+	void __user *cookie;
+	int32_t *status;
+	int32_t duplicate_output;
+	uint32_t duplicate_identity;
+	uint32_t feature_mask;
+	uint8_t we_disable;
+	struct msm_cpp_buffer_info_t input_buffer_info;
+	struct msm_cpp_buffer_info_t output_buffer_info[MSM_OUTPUT_BUF_CNT];
+	struct msm_cpp_buffer_info_t duplicate_buffer_info;
+	struct msm_cpp_buffer_info_t tnr_scratch_buffer_info[2];
+	uint32_t reserved;
+	uint8_t partial_frame_indicator;
+	/* the followings are used only for partial_frame type
+	 * and is only used for offline frame processing and
+	 * only if payload big enough and need to be split into partial_frame
+	 * if first_payload, kernel acquires output buffer
+	 * first payload must have the last stripe
+	 * buffer addresses from 0 to last_stripe_index are updated.
+	 * kernel updates payload with msg_len and stripe_info
+	 * kernel sends top level, plane level, then only stripes
+	 * starting with first_stripe_index and
+	 * ends with last_stripe_index
+	 * kernel then sends trailing flag at frame done,
+	 * if last payload, kernel queues the output buffer to HAL
+	 */
+	uint8_t first_payload;
+	uint8_t last_payload;
+	uint32_t first_stripe_index;
+	uint32_t last_stripe_index;
+	uint32_t stripe_info_offset;
+	uint32_t stripe_info;
+	struct msm_cpp_batch_info_t  batch_info;
+};
+
+struct msm_cpp_pop_stream_info_t {
+	int32_t frame_id;
+	uint32_t identity;
+};
+
+struct cpp_hw_info {
+	uint32_t cpp_hw_version;
+	uint32_t cpp_hw_caps;
+	unsigned long freq_tbl[MAX_FREQ_TBL];
+	uint32_t freq_tbl_count;
+};
+
+struct msm_vpe_frame_strip_info {
+	uint32_t src_w;
+	uint32_t src_h;
+	uint32_t dst_w;
+	uint32_t dst_h;
+	uint32_t src_x;
+	uint32_t src_y;
+	uint32_t phase_step_x;
+	uint32_t phase_step_y;
+	uint32_t phase_init_x;
+	uint32_t phase_init_y;
+};
+
+struct msm_vpe_buffer_info_t {
+	int32_t fd;
+	uint32_t index;
+	uint32_t offset;
+	uint8_t native_buff;
+	uint8_t processed_divert;
+};
+
+struct msm_vpe_stream_buff_info_t {
+	uint32_t identity;
+	uint32_t num_buffs;
+	struct msm_vpe_buffer_info_t *buffer_info;
+};
+
+struct msm_vpe_frame_info_t {
+	int32_t frame_id;
+	struct timeval timestamp;
+	uint32_t inst_id;
+	uint32_t identity;
+	uint32_t client_id;
+	enum msm_vpe_frame_type frame_type;
+	struct msm_vpe_frame_strip_info strip_info;
+	unsigned long src_fd;
+	unsigned long dst_fd;
+	struct ion_handle *src_ion_handle;
+	struct ion_handle *dest_ion_handle;
+	unsigned long src_phyaddr;
+	unsigned long dest_phyaddr;
+	unsigned long src_chroma_plane_offset;
+	unsigned long dest_chroma_plane_offset;
+	struct timeval in_time, out_time;
+	void *cookie;
+
+	struct msm_vpe_buffer_info_t input_buffer_info;
+	struct msm_vpe_buffer_info_t output_buffer_info;
+};
+
+struct msm_pproc_queue_buf_info {
+	struct msm_buf_mngr_info buff_mgr_info;
+	uint8_t is_buf_dirty;
+};
+
+struct msm_cpp_clock_settings_t {
+	unsigned long clock_rate;
+	uint64_t avg;
+	uint64_t inst;
+};
+
+#define VIDIOC_MSM_CPP_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE, struct msm_camera_v4l2_ioctl_t)
+
+#define VIDIOC_MSM_CPP_GET_EVENTPAYLOAD \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 1, struct msm_camera_v4l2_ioctl_t)
+
+#define VIDIOC_MSM_CPP_GET_INST_INFO \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 2, struct msm_camera_v4l2_ioctl_t)
+
+#define VIDIOC_MSM_CPP_LOAD_FIRMWARE \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 3, struct msm_camera_v4l2_ioctl_t)
+
+#define VIDIOC_MSM_CPP_GET_HW_INFO \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 4, struct msm_camera_v4l2_ioctl_t)
+
+#define VIDIOC_MSM_CPP_FLUSH_QUEUE \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 5, struct msm_camera_v4l2_ioctl_t)
+
+#define VIDIOC_MSM_CPP_ENQUEUE_STREAM_BUFF_INFO \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 6, struct msm_camera_v4l2_ioctl_t)
+
+#define VIDIOC_MSM_CPP_DEQUEUE_STREAM_BUFF_INFO \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 7, struct msm_camera_v4l2_ioctl_t)
+
+#define VIDIOC_MSM_VPE_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 8, struct msm_camera_v4l2_ioctl_t)
+
+#define VIDIOC_MSM_VPE_TRANSACTION_SETUP \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 9, struct msm_camera_v4l2_ioctl_t)
+
+#define VIDIOC_MSM_VPE_GET_EVENTPAYLOAD \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 10, struct msm_camera_v4l2_ioctl_t)
+
+#define VIDIOC_MSM_VPE_GET_INST_INFO \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 11, struct msm_camera_v4l2_ioctl_t)
+
+#define VIDIOC_MSM_VPE_ENQUEUE_STREAM_BUFF_INFO \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 12, struct msm_camera_v4l2_ioctl_t)
+
+#define VIDIOC_MSM_VPE_DEQUEUE_STREAM_BUFF_INFO \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 13, struct msm_camera_v4l2_ioctl_t)
+
+#define VIDIOC_MSM_CPP_QUEUE_BUF \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 14, struct msm_camera_v4l2_ioctl_t)
+
+#define VIDIOC_MSM_CPP_APPEND_STREAM_BUFF_INFO \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 15, struct msm_camera_v4l2_ioctl_t)
+
+#define VIDIOC_MSM_CPP_SET_CLOCK \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 16, struct msm_camera_v4l2_ioctl_t)
+
+#define VIDIOC_MSM_CPP_POP_STREAM_BUFFER \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 17, struct msm_camera_v4l2_ioctl_t)
+
+#define VIDIOC_MSM_CPP_IOMMU_ATTACH \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 18, struct msm_camera_v4l2_ioctl_t)
+
+#define VIDIOC_MSM_CPP_IOMMU_DETACH \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 19, struct msm_camera_v4l2_ioctl_t)
+
+#define VIDIOC_MSM_CPP_DELETE_STREAM_BUFF\
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 20, struct msm_camera_v4l2_ioctl_t)
+
+
+#define V4L2_EVENT_CPP_FRAME_DONE  (V4L2_EVENT_PRIVATE_START + 0)
+#define V4L2_EVENT_VPE_FRAME_DONE  (V4L2_EVENT_PRIVATE_START + 1)
+
+struct msm_camera_v4l2_ioctl_t {
+	uint32_t id;
+	size_t len;
+	int32_t trans_code;
+	void __user *ioctl_ptr;
+};
+
+#endif
+
diff -Nruw linux-4.4.115-fbx/include/uapi/media./msm_camera.h linux-4.4.115-fbx/include/uapi/media/msm_camera.h
--- linux-4.4.115-fbx/include/uapi/media./msm_camera.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/msm_camera.h	2019-01-22 16:16:28.599292554 +0100
@@ -0,0 +1,2228 @@
+/* Copyright (c) 2009-2012, 2014-2016, 2018 The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __UAPI_MSM_CAMERA_H
+#define __UAPI_MSM_CAMERA_H
+
+#include <linux/videodev2.h>
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#include <linux/msm_ion.h>
+
+#define BIT(nr)   (1UL << (nr))
+
+#define MSM_CAM_IOCTL_MAGIC 'm'
+
+#define MAX_SERVER_PAYLOAD_LENGTH 8192
+
+#define MSM_CAM_IOCTL_GET_SENSOR_INFO \
+	_IOR(MSM_CAM_IOCTL_MAGIC, 1, struct msm_camsensor_info *)
+
+#define MSM_CAM_IOCTL_REGISTER_PMEM \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 2, struct msm_pmem_info *)
+
+#define MSM_CAM_IOCTL_UNREGISTER_PMEM \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 3, unsigned)
+
+#define MSM_CAM_IOCTL_CTRL_COMMAND \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 4, struct msm_ctrl_cmd *)
+
+#define MSM_CAM_IOCTL_CONFIG_VFE  \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 5, struct msm_camera_vfe_cfg_cmd *)
+
+#define MSM_CAM_IOCTL_GET_STATS \
+	_IOR(MSM_CAM_IOCTL_MAGIC, 6, struct msm_camera_stats_event_ctrl *)
+
+#define MSM_CAM_IOCTL_GETFRAME \
+	_IOR(MSM_CAM_IOCTL_MAGIC, 7, struct msm_camera_get_frame *)
+
+#define MSM_CAM_IOCTL_ENABLE_VFE \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 8, struct camera_enable_cmd *)
+
+#define MSM_CAM_IOCTL_CTRL_CMD_DONE \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 9, struct camera_cmd *)
+
+#define MSM_CAM_IOCTL_CONFIG_CMD \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 10, struct camera_cmd *)
+
+#define MSM_CAM_IOCTL_DISABLE_VFE \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 11, struct camera_enable_cmd *)
+
+#define MSM_CAM_IOCTL_PAD_REG_RESET2 \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 12, struct camera_enable_cmd *)
+
+#define MSM_CAM_IOCTL_VFE_APPS_RESET \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 13, struct camera_enable_cmd *)
+
+#define MSM_CAM_IOCTL_RELEASE_FRAME_BUFFER \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 14, struct camera_enable_cmd *)
+
+#define MSM_CAM_IOCTL_RELEASE_STATS_BUFFER \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 15, struct msm_stats_buf *)
+
+#define MSM_CAM_IOCTL_AXI_CONFIG \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 16, struct msm_camera_vfe_cfg_cmd *)
+
+#define MSM_CAM_IOCTL_GET_PICTURE \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 17, struct msm_frame *)
+
+#define MSM_CAM_IOCTL_SET_CROP \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 18, struct crop_info *)
+
+#define MSM_CAM_IOCTL_PICT_PP \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 19, uint8_t *)
+
+#define MSM_CAM_IOCTL_PICT_PP_DONE \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 20, struct msm_snapshot_pp_status *)
+
+#define MSM_CAM_IOCTL_SENSOR_IO_CFG \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 21, struct sensor_cfg_data *)
+
+#define MSM_CAM_IOCTL_FLASH_LED_CFG \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 22, unsigned *)
+
+#define MSM_CAM_IOCTL_UNBLOCK_POLL_FRAME \
+	_IO(MSM_CAM_IOCTL_MAGIC, 23)
+
+#define MSM_CAM_IOCTL_CTRL_COMMAND_2 \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 24, struct msm_ctrl_cmd *)
+
+#define MSM_CAM_IOCTL_AF_CTRL \
+	_IOR(MSM_CAM_IOCTL_MAGIC, 25, struct msm_ctrl_cmt_t *)
+
+#define MSM_CAM_IOCTL_AF_CTRL_DONE \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 26, struct msm_ctrl_cmt_t *)
+
+#define MSM_CAM_IOCTL_CONFIG_VPE \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 27, struct msm_camera_vpe_cfg_cmd *)
+
+#define MSM_CAM_IOCTL_AXI_VPE_CONFIG \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 28, struct msm_camera_vpe_cfg_cmd *)
+
+#define MSM_CAM_IOCTL_STROBE_FLASH_CFG \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 29, uint32_t *)
+
+#define MSM_CAM_IOCTL_STROBE_FLASH_CHARGE \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 30, uint32_t *)
+
+#define MSM_CAM_IOCTL_STROBE_FLASH_RELEASE \
+	_IO(MSM_CAM_IOCTL_MAGIC, 31)
+
+#define MSM_CAM_IOCTL_FLASH_CTRL \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 32, struct flash_ctrl_data *)
+
+#define MSM_CAM_IOCTL_ERROR_CONFIG \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 33, uint32_t *)
+
+#define MSM_CAM_IOCTL_ABORT_CAPTURE \
+	_IO(MSM_CAM_IOCTL_MAGIC, 34)
+
+#define MSM_CAM_IOCTL_SET_FD_ROI \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 35, struct fd_roi_info *)
+
+#define MSM_CAM_IOCTL_GET_CAMERA_INFO \
+	_IOR(MSM_CAM_IOCTL_MAGIC, 36, struct msm_camera_info *)
+
+#define MSM_CAM_IOCTL_UNBLOCK_POLL_PIC_FRAME \
+	_IO(MSM_CAM_IOCTL_MAGIC, 37)
+
+#define MSM_CAM_IOCTL_RELEASE_PIC_BUFFER \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 38, struct camera_enable_cmd *)
+
+#define MSM_CAM_IOCTL_PUT_ST_FRAME \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 39, struct msm_camera_st_frame *)
+
+#define MSM_CAM_IOCTL_V4L2_EVT_NOTIFY \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 40, struct v4l2_event_and_payload)
+
+#define MSM_CAM_IOCTL_SET_MEM_MAP_INFO \
+	_IOR(MSM_CAM_IOCTL_MAGIC, 41, struct msm_mem_map_info *)
+
+#define MSM_CAM_IOCTL_ACTUATOR_IO_CFG \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 42, struct msm_actuator_cfg_data *)
+
+#define MSM_CAM_IOCTL_MCTL_POST_PROC \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 43, struct msm_mctl_post_proc_cmd *)
+
+#define MSM_CAM_IOCTL_RESERVE_FREE_FRAME \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 44, struct msm_cam_evt_divert_frame *)
+
+#define MSM_CAM_IOCTL_RELEASE_FREE_FRAME \
+	_IOR(MSM_CAM_IOCTL_MAGIC, 45, struct msm_cam_evt_divert_frame *)
+
+#define MSM_CAM_IOCTL_PICT_PP_DIVERT_DONE \
+	_IOR(MSM_CAM_IOCTL_MAGIC, 46, struct msm_pp_frame *)
+
+#define MSM_CAM_IOCTL_SENSOR_V4l2_S_CTRL \
+	_IOR(MSM_CAM_IOCTL_MAGIC, 47, struct v4l2_control)
+
+#define MSM_CAM_IOCTL_SENSOR_V4l2_QUERY_CTRL \
+	_IOR(MSM_CAM_IOCTL_MAGIC, 48, struct v4l2_queryctrl)
+
+#define MSM_CAM_IOCTL_GET_KERNEL_SYSTEM_TIME \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 49, struct timeval *)
+
+#define MSM_CAM_IOCTL_SET_VFE_OUTPUT_TYPE \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 50, uint32_t *)
+
+#define MSM_CAM_IOCTL_MCTL_DIVERT_DONE \
+	_IOR(MSM_CAM_IOCTL_MAGIC, 51, struct msm_cam_evt_divert_frame *)
+
+#define MSM_CAM_IOCTL_GET_ACTUATOR_INFO \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 52, struct msm_actuator_cfg_data *)
+
+#define MSM_CAM_IOCTL_EEPROM_IO_CFG \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 53, struct msm_eeprom_cfg_data *)
+
+#define MSM_CAM_IOCTL_ISPIF_IO_CFG \
+	_IOR(MSM_CAM_IOCTL_MAGIC, 54, struct ispif_cfg_data *)
+
+#define MSM_CAM_IOCTL_STATS_REQBUF \
+	_IOR(MSM_CAM_IOCTL_MAGIC, 55, struct msm_stats_reqbuf *)
+
+#define MSM_CAM_IOCTL_STATS_ENQUEUEBUF \
+	_IOR(MSM_CAM_IOCTL_MAGIC, 56, struct msm_stats_buf_info *)
+
+#define MSM_CAM_IOCTL_STATS_FLUSH_BUFQ \
+	_IOR(MSM_CAM_IOCTL_MAGIC, 57, struct msm_stats_flush_bufq *)
+
+#define MSM_CAM_IOCTL_SET_MCTL_SDEV \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 58, struct msm_mctl_set_sdev_data *)
+
+#define MSM_CAM_IOCTL_UNSET_MCTL_SDEV \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 59, struct msm_mctl_set_sdev_data *)
+
+#define MSM_CAM_IOCTL_GET_INST_HANDLE \
+	_IOR(MSM_CAM_IOCTL_MAGIC, 60, uint32_t *)
+
+#define MSM_CAM_IOCTL_STATS_UNREG_BUF \
+	_IOR(MSM_CAM_IOCTL_MAGIC, 61, struct msm_stats_flush_bufq *)
+
+#define MSM_CAM_IOCTL_CSIC_IO_CFG \
+	_IOWR(MSM_CAM_IOCTL_MAGIC, 62, struct csic_cfg_data *)
+
+#define MSM_CAM_IOCTL_CSID_IO_CFG \
+	_IOWR(MSM_CAM_IOCTL_MAGIC, 63, struct csid_cfg_data *)
+
+#define MSM_CAM_IOCTL_CSIPHY_IO_CFG \
+	_IOR(MSM_CAM_IOCTL_MAGIC, 64, struct csiphy_cfg_data *)
+
+#define MSM_CAM_IOCTL_OEM \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 65, struct sensor_cfg_data *)
+
+#define MSM_CAM_IOCTL_AXI_INIT \
+	_IOWR(MSM_CAM_IOCTL_MAGIC, 66, uint8_t *)
+
+#define MSM_CAM_IOCTL_AXI_RELEASE \
+	_IO(MSM_CAM_IOCTL_MAGIC, 67)
+
+struct v4l2_event_and_payload {
+	struct v4l2_event evt;
+	uint32_t payload_length;
+	uint32_t transaction_id;
+	void *payload;
+};
+
+struct msm_stats_reqbuf {
+	int num_buf;		/* how many buffers requested */
+	int stats_type;	/* stats type */
+};
+
+struct msm_stats_flush_bufq {
+	int stats_type;	/* enum msm_stats_enum_type */
+};
+
+struct msm_mctl_pp_cmd {
+	int32_t  id;
+	uint16_t length;
+	void     *value;
+};
+
+struct msm_mctl_post_proc_cmd {
+	int32_t type;
+	struct msm_mctl_pp_cmd cmd;
+};
+
+#define MSM_CAMERA_LED_OFF  0
+#define MSM_CAMERA_LED_LOW  1
+#define MSM_CAMERA_LED_HIGH 2
+#define MSM_CAMERA_LED_INIT 3
+#define MSM_CAMERA_LED_RELEASE 4
+
+#define MSM_CAMERA_STROBE_FLASH_NONE 0
+#define MSM_CAMERA_STROBE_FLASH_XENON 1
+
+#define MSM_MAX_CAMERA_SENSORS  6
+#define MAX_SENSOR_NAME 32
+#define MAX_CAM_NAME_SIZE 32
+#define MAX_ACT_MOD_NAME_SIZE 32
+#define MAX_ACT_NAME_SIZE 32
+#define NUM_ACTUATOR_DIR 2
+#define MAX_ACTUATOR_SCENARIO 8
+#define MAX_ACTUATOR_REGION 5
+#define MAX_ACTUATOR_INIT_SET 12
+#define MAX_ACTUATOR_TYPE_SIZE 32
+#define MAX_ACTUATOR_REG_TBL_SIZE 8
+
+
+#define MSM_MAX_CAMERA_CONFIGS 2
+
+#define PP_SNAP  0x01
+#define PP_RAW_SNAP ((0x01)<<1)
+#define PP_PREV  ((0x01)<<2)
+#define PP_THUMB ((0x01)<<3)
+#define PP_MASK		(PP_SNAP|PP_RAW_SNAP|PP_PREV|PP_THUMB)
+
+#define MSM_CAM_CTRL_CMD_DONE  0
+#define MSM_CAM_SENSOR_VFE_CMD 1
+
+/* Should be same as VIDEO_MAX_PLANES in videodev2.h */
+#define MAX_PLANES 8
+
+/*****************************************************
+ *  structure
+ *****************************************************/
+
+/* define five type of structures for userspace <==> kernel
+ * space communication:
+ * command 1 - 2 are from userspace ==> kernel
+ * command 3 - 4 are from kernel ==> userspace
+ *
+ * 1. control command: control command(from control thread),
+ *                     control status (from config thread);
+ */
+struct msm_ctrl_cmd {
+	uint16_t type;
+	uint16_t length;
+	void *value;
+	uint16_t status;
+	uint32_t timeout_ms;
+	int resp_fd; /* FIXME: to be used by the kernel, pass-through for now */
+	int vnode_id;  /* video dev id. Can we overload resp_fd? */
+	int queue_idx;
+	uint32_t evt_id;
+	uint32_t stream_type; /* used to pass value to qcamera server */
+	int config_ident; /*used as identifier for config node*/
+};
+
+struct msm_cam_evt_msg {
+	unsigned short type;	/* 1 == event (RPC), 0 == message (adsp) */
+	unsigned short msg_id;
+	unsigned int len;	/* size in, number of bytes out */
+	uint32_t frame_id;
+	void *data;
+	struct timespec timestamp;
+};
+
+struct msm_pp_frame_sp {
+	/* phy addr of the buffer */
+	unsigned long  phy_addr;
+	uint32_t       y_off;
+	uint32_t       cbcr_off;
+	/* buffer length */
+	uint32_t       length;
+	int32_t        fd;
+	uint32_t       addr_offset;
+	/* mapped addr */
+	unsigned long  vaddr;
+};
+
+struct msm_pp_frame_mp {
+	/* phy addr of the plane */
+	unsigned long  phy_addr;
+	/* offset of plane data */
+	uint32_t       data_offset;
+	/* plane length */
+	uint32_t       length;
+	int32_t        fd;
+	uint32_t       addr_offset;
+	/* mapped addr */
+	unsigned long  vaddr;
+};
+
+struct msm_pp_frame {
+	uint32_t       handle; /* stores vb cookie */
+	uint32_t       frame_id;
+	unsigned short buf_idx;
+	int            path;
+	unsigned short image_type;
+	unsigned short num_planes; /* 1 for sp */
+	struct timeval timestamp;
+	union {
+		struct msm_pp_frame_sp sp;
+		struct msm_pp_frame_mp mp[MAX_PLANES];
+	};
+	int node_type;
+	uint32_t inst_handle;
+};
+
+struct msm_pp_crop {
+	uint32_t  src_x;
+	uint32_t  src_y;
+	uint32_t  src_w;
+	uint32_t  src_h;
+	uint32_t  dst_x;
+	uint32_t  dst_y;
+	uint32_t  dst_w;
+	uint32_t  dst_h;
+	uint8_t update_flag;
+};
+
+struct msm_mctl_pp_frame_cmd {
+	uint32_t cookie;
+	uint8_t  vpe_output_action;
+	struct msm_pp_frame src_frame;
+	struct msm_pp_frame dest_frame;
+	struct msm_pp_crop crop;
+	int path;
+};
+
+struct msm_cam_evt_divert_frame {
+	unsigned short image_mode;
+	unsigned short op_mode;
+	unsigned short inst_idx;
+	unsigned short node_idx;
+	struct msm_pp_frame frame;
+	int            do_pp;
+};
+
+struct msm_mctl_pp_cmd_ack_event {
+	uint32_t cmd;        /* VPE_CMD_ZOOM? */
+	int      status;     /* 0 done, < 0 err */
+	uint32_t cookie;     /* daemon's cookie */
+};
+
+struct msm_mctl_pp_event_info {
+	int32_t  event;
+	union {
+		struct msm_mctl_pp_cmd_ack_event ack;
+	};
+};
+
+struct msm_isp_event_ctrl {
+	unsigned short resptype;
+	union {
+		struct msm_cam_evt_msg isp_msg;
+		struct msm_ctrl_cmd ctrl;
+		struct msm_cam_evt_divert_frame div_frame;
+		struct msm_mctl_pp_event_info pp_event_info;
+	} isp_data;
+};
+
+#define MSM_CAM_RESP_CTRL              0
+#define MSM_CAM_RESP_STAT_EVT_MSG      1
+#define MSM_CAM_RESP_STEREO_OP_1       2
+#define MSM_CAM_RESP_STEREO_OP_2       3
+#define MSM_CAM_RESP_V4L2              4
+#define MSM_CAM_RESP_DIV_FRAME_EVT_MSG 5
+#define MSM_CAM_RESP_DONE_EVENT        6
+#define MSM_CAM_RESP_MCTL_PP_EVENT     7
+#define MSM_CAM_RESP_MAX               8
+
+#define MSM_CAM_APP_NOTIFY_EVENT  0
+#define MSM_CAM_APP_NOTIFY_ERROR_EVENT  1
+
+/* this one is used to send ctrl/status up to config thread */
+
+struct msm_stats_event_ctrl {
+	/* 0 - ctrl_cmd from control thread,
+	 * 1 - stats/event kernel,
+	 * 2 - V4L control or read request */
+	int resptype;
+	int timeout_ms;
+	struct msm_ctrl_cmd ctrl_cmd;
+	/* struct  vfe_event_t  stats_event; */
+	struct msm_cam_evt_msg stats_event;
+};
+
+/* 2. config command: config command(from config thread); */
+struct msm_camera_cfg_cmd {
+	/* what to config:
+	 * 1 - sensor config, 2 - vfe config */
+	uint16_t cfg_type;
+
+	/* sensor config type */
+	uint16_t cmd_type;
+	uint16_t queue;
+	uint16_t length;
+	void *value;
+};
+
+#define CMD_GENERAL			0
+#define CMD_AXI_CFG_OUT1		1
+#define CMD_AXI_CFG_SNAP_O1_AND_O2	2
+#define CMD_AXI_CFG_OUT2		3
+#define CMD_PICT_T_AXI_CFG		4
+#define CMD_PICT_M_AXI_CFG		5
+#define CMD_RAW_PICT_AXI_CFG		6
+
+#define CMD_FRAME_BUF_RELEASE		7
+#define CMD_PREV_BUF_CFG		8
+#define CMD_SNAP_BUF_RELEASE		9
+#define CMD_SNAP_BUF_CFG		10
+#define CMD_STATS_DISABLE		11
+#define CMD_STATS_AEC_AWB_ENABLE	12
+#define CMD_STATS_AF_ENABLE		13
+#define CMD_STATS_AEC_ENABLE		14
+#define CMD_STATS_AWB_ENABLE		15
+#define CMD_STATS_ENABLE  		16
+
+#define CMD_STATS_AXI_CFG		17
+#define CMD_STATS_AEC_AXI_CFG		18
+#define CMD_STATS_AF_AXI_CFG 		19
+#define CMD_STATS_AWB_AXI_CFG		20
+#define CMD_STATS_RS_AXI_CFG		21
+#define CMD_STATS_CS_AXI_CFG		22
+#define CMD_STATS_IHIST_AXI_CFG		23
+#define CMD_STATS_SKIN_AXI_CFG		24
+
+#define CMD_STATS_BUF_RELEASE		25
+#define CMD_STATS_AEC_BUF_RELEASE	26
+#define CMD_STATS_AF_BUF_RELEASE	27
+#define CMD_STATS_AWB_BUF_RELEASE	28
+#define CMD_STATS_RS_BUF_RELEASE	29
+#define CMD_STATS_CS_BUF_RELEASE	30
+#define CMD_STATS_IHIST_BUF_RELEASE	31
+#define CMD_STATS_SKIN_BUF_RELEASE	32
+
+#define UPDATE_STATS_INVALID		33
+#define CMD_AXI_CFG_SNAP_GEMINI		34
+#define CMD_AXI_CFG_SNAP		35
+#define CMD_AXI_CFG_PREVIEW		36
+#define CMD_AXI_CFG_VIDEO		37
+
+#define CMD_STATS_IHIST_ENABLE 38
+#define CMD_STATS_RS_ENABLE 39
+#define CMD_STATS_CS_ENABLE 40
+#define CMD_VPE 41
+#define CMD_AXI_CFG_VPE 42
+#define CMD_AXI_CFG_ZSL 43
+#define CMD_AXI_CFG_SNAP_VPE 44
+#define CMD_AXI_CFG_SNAP_THUMB_VPE 45
+
+#define CMD_CONFIG_PING_ADDR 46
+#define CMD_CONFIG_PONG_ADDR 47
+#define CMD_CONFIG_FREE_BUF_ADDR 48
+#define CMD_AXI_CFG_ZSL_ALL_CHNLS 49
+#define CMD_AXI_CFG_VIDEO_ALL_CHNLS 50
+#define CMD_VFE_BUFFER_RELEASE 51
+#define CMD_VFE_PROCESS_IRQ 52
+#define CMD_STATS_BG_ENABLE 53
+#define CMD_STATS_BF_ENABLE 54
+#define CMD_STATS_BHIST_ENABLE 55
+#define CMD_STATS_BG_BUF_RELEASE 56
+#define CMD_STATS_BF_BUF_RELEASE 57
+#define CMD_STATS_BHIST_BUF_RELEASE 58
+#define CMD_VFE_PIX_SOF_COUNT_UPDATE 59
+#define CMD_VFE_COUNT_PIX_SOF_ENABLE 60
+#define CMD_STATS_BE_ENABLE 61
+#define CMD_STATS_BE_BUF_RELEASE 62
+
+#define CMD_AXI_CFG_PRIM               BIT(8)
+#define CMD_AXI_CFG_PRIM_ALL_CHNLS     BIT(9)
+#define CMD_AXI_CFG_SEC                BIT(10)
+#define CMD_AXI_CFG_SEC_ALL_CHNLS      BIT(11)
+#define CMD_AXI_CFG_TERT1              BIT(12)
+#define CMD_AXI_CFG_TERT2              BIT(13)
+
+#define CMD_AXI_START  0xE1
+#define CMD_AXI_STOP   0xE2
+#define CMD_AXI_RESET  0xE3
+#define CMD_AXI_ABORT  0xE4
+
+
+
+#define AXI_CMD_PREVIEW      BIT(0)
+#define AXI_CMD_CAPTURE      BIT(1)
+#define AXI_CMD_RECORD       BIT(2)
+#define AXI_CMD_ZSL          BIT(3)
+#define AXI_CMD_RAW_CAPTURE  BIT(4)
+#define AXI_CMD_LIVESHOT     BIT(5)
+
+/* vfe config command: config command(from config thread)*/
+struct msm_vfe_cfg_cmd {
+	int cmd_type;
+	uint16_t length;
+	void *value;
+};
+
+struct msm_vpe_cfg_cmd {
+	int cmd_type;
+	uint16_t length;
+	void *value;
+};
+
+#define MAX_CAMERA_ENABLE_NAME_LEN 32
+struct camera_enable_cmd {
+	char name[MAX_CAMERA_ENABLE_NAME_LEN];
+};
+
+#define MSM_PMEM_OUTPUT1		0
+#define MSM_PMEM_OUTPUT2		1
+#define MSM_PMEM_OUTPUT1_OUTPUT2	2
+#define MSM_PMEM_THUMBNAIL		3
+#define MSM_PMEM_MAINIMG		4
+#define MSM_PMEM_RAW_MAINIMG		5
+#define MSM_PMEM_AEC_AWB		6
+#define MSM_PMEM_AF			7
+#define MSM_PMEM_AEC			8
+#define MSM_PMEM_AWB			9
+#define MSM_PMEM_RS			10
+#define MSM_PMEM_CS			11
+#define MSM_PMEM_IHIST			12
+#define MSM_PMEM_SKIN			13
+#define MSM_PMEM_VIDEO			14
+#define MSM_PMEM_PREVIEW		15
+#define MSM_PMEM_VIDEO_VPE		16
+#define MSM_PMEM_C2D			17
+#define MSM_PMEM_MAINIMG_VPE    18
+#define MSM_PMEM_THUMBNAIL_VPE  19
+#define MSM_PMEM_BAYER_GRID		20
+#define MSM_PMEM_BAYER_FOCUS	21
+#define MSM_PMEM_BAYER_HIST		22
+#define MSM_PMEM_BAYER_EXPOSURE 23
+#define MSM_PMEM_MAX            24
+
+#define STAT_AEAW			0
+#define STAT_AEC			1
+#define STAT_AF				2
+#define STAT_AWB			3
+#define STAT_RS				4
+#define STAT_CS				5
+#define STAT_IHIST			6
+#define STAT_SKIN			7
+#define STAT_BG				8
+#define STAT_BF				9
+#define STAT_BE				10
+#define STAT_BHIST			11
+#define STAT_MAX			12
+
+#define FRAME_PREVIEW_OUTPUT1		0
+#define FRAME_PREVIEW_OUTPUT2		1
+#define FRAME_SNAPSHOT			2
+#define FRAME_THUMBNAIL			3
+#define FRAME_RAW_SNAPSHOT		4
+#define FRAME_MAX			5
+
+enum msm_stats_enum_type {
+	MSM_STATS_TYPE_AEC, /* legacy based AEC */
+	MSM_STATS_TYPE_AF,  /* legacy based AF */
+	MSM_STATS_TYPE_AWB, /* legacy based AWB */
+	MSM_STATS_TYPE_RS,  /* legacy based RS */
+	MSM_STATS_TYPE_CS,  /* legacy based CS */
+	MSM_STATS_TYPE_IHIST,   /* legacy based HIST */
+	MSM_STATS_TYPE_SKIN,    /* legacy based SKIN */
+	MSM_STATS_TYPE_BG,  /* Bayer Grids */
+	MSM_STATS_TYPE_BF,  /* Bayer Focus */
+	MSM_STATS_TYPE_BE,  /* Bayer Exposure*/
+	MSM_STATS_TYPE_BHIST,   /* Bayer Hist */
+	MSM_STATS_TYPE_AE_AW,   /* legacy stats for vfe 2.x*/
+	MSM_STATS_TYPE_COMP, /* Composite stats */
+	MSM_STATS_TYPE_MAX  /* MAX */
+};
+
+struct msm_stats_buf_info {
+	int type; /* msm_stats_enum_type */
+	int fd;
+	void *vaddr;
+	uint32_t offset;
+	uint32_t len;
+	uint32_t y_off;
+	uint32_t cbcr_off;
+	uint32_t planar0_off;
+	uint32_t planar1_off;
+	uint32_t planar2_off;
+	uint8_t active;
+	int buf_idx;
+};
+
+struct msm_pmem_info {
+	int type;
+	int fd;
+	void *vaddr;
+	uint32_t offset;
+	uint32_t len;
+	uint32_t y_off;
+	uint32_t cbcr_off;
+	uint32_t planar0_off;
+	uint32_t planar1_off;
+	uint32_t planar2_off;
+	uint8_t active;
+};
+
+struct outputCfg {
+	uint32_t height;
+	uint32_t width;
+
+	uint32_t window_height_firstline;
+	uint32_t window_height_lastline;
+};
+
+#define VIDEO_NODE 0
+#define MCTL_NODE 1
+
+#define OUTPUT_1	0
+#define OUTPUT_2	1
+#define OUTPUT_1_AND_2            2   /* snapshot only */
+#define OUTPUT_1_AND_3            3   /* video */
+#define CAMIF_TO_AXI_VIA_OUTPUT_2 4
+#define OUTPUT_1_AND_CAMIF_TO_AXI_VIA_OUTPUT_2 5
+#define OUTPUT_2_AND_CAMIF_TO_AXI_VIA_OUTPUT_1 6
+#define OUTPUT_1_2_AND_3 7
+#define OUTPUT_ALL_CHNLS 8
+#define OUTPUT_VIDEO_ALL_CHNLS 9
+#define OUTPUT_ZSL_ALL_CHNLS 10
+#define LAST_AXI_OUTPUT_MODE_ENUM OUTPUT_ZSL_ALL_CHNLS
+
+#define OUTPUT_PRIM              BIT(8)
+#define OUTPUT_PRIM_ALL_CHNLS    BIT(9)
+#define OUTPUT_SEC               BIT(10)
+#define OUTPUT_SEC_ALL_CHNLS     BIT(11)
+#define OUTPUT_TERT1             BIT(12)
+#define OUTPUT_TERT2             BIT(13)
+
+
+
+#define MSM_FRAME_PREV_1	0
+#define MSM_FRAME_PREV_2	1
+#define MSM_FRAME_ENC		2
+
+#define OUTPUT_TYPE_P    BIT(0)
+#define OUTPUT_TYPE_T    BIT(1)
+#define OUTPUT_TYPE_S    BIT(2)
+#define OUTPUT_TYPE_V    BIT(3)
+#define OUTPUT_TYPE_L    BIT(4)
+#define OUTPUT_TYPE_ST_L BIT(5)
+#define OUTPUT_TYPE_ST_R BIT(6)
+#define OUTPUT_TYPE_ST_D BIT(7)
+#define OUTPUT_TYPE_R    BIT(8)
+#define OUTPUT_TYPE_R1   BIT(9)
+#define OUTPUT_TYPE_SAEC   BIT(10)
+#define OUTPUT_TYPE_SAFC   BIT(11)
+#define OUTPUT_TYPE_SAWB   BIT(12)
+#define OUTPUT_TYPE_IHST   BIT(13)
+#define OUTPUT_TYPE_CSTA   BIT(14)
+
+struct fd_roi_info {
+	void *info;
+	int info_len;
+};
+
+struct msm_mem_map_info {
+	uint32_t cookie;
+	uint32_t length;
+	uint32_t mem_type;
+};
+
+#define MSM_MEM_MMAP		0
+#define MSM_MEM_USERPTR		1
+#define MSM_PLANE_MAX		8
+#define MSM_PLANE_Y			0
+#define MSM_PLANE_UV		1
+
+struct msm_frame {
+	struct timespec ts;
+	int path;
+	int type;
+	unsigned long buffer;
+	uint32_t phy_offset;
+	uint32_t y_off;
+	uint32_t cbcr_off;
+	uint32_t planar0_off;
+	uint32_t planar1_off;
+	uint32_t planar2_off;
+	int fd;
+
+	void *cropinfo;
+	int croplen;
+	uint32_t error_code;
+	struct fd_roi_info roi_info;
+	uint32_t frame_id;
+	int stcam_quality_ind;
+	uint32_t stcam_conv_value;
+
+	struct ion_allocation_data ion_alloc;
+	struct ion_fd_data fd_data;
+	int ion_dev_fd;
+};
+
+enum msm_st_frame_packing {
+	SIDE_BY_SIDE_HALF,
+	SIDE_BY_SIDE_FULL,
+	TOP_DOWN_HALF,
+	TOP_DOWN_FULL,
+};
+
+struct msm_st_crop {
+	uint32_t in_w;
+	uint32_t in_h;
+	uint32_t out_w;
+	uint32_t out_h;
+};
+
+struct msm_st_half {
+	uint32_t buf_p0_off;
+	uint32_t buf_p1_off;
+	uint32_t buf_p0_stride;
+	uint32_t buf_p1_stride;
+	uint32_t pix_x_off;
+	uint32_t pix_y_off;
+	struct msm_st_crop stCropInfo;
+};
+
+struct msm_st_frame {
+	struct msm_frame buf_info;
+	int type;
+	enum msm_st_frame_packing packing;
+	struct msm_st_half L;
+	struct msm_st_half R;
+	int frame_id;
+};
+
+#define MSM_CAMERA_ERR_MASK (0xFFFFFFFF & 1)
+
+struct stats_buff {
+	unsigned long buff;
+	int fd;
+};
+
+struct msm_stats_buf {
+	uint8_t awb_ymin;
+	struct stats_buff aec;
+	struct stats_buff awb;
+	struct stats_buff af;
+	struct stats_buff be;
+	struct stats_buff ihist;
+	struct stats_buff rs;
+	struct stats_buff cs;
+	struct stats_buff skin;
+	int type;
+	uint32_t status_bits;
+	unsigned long buffer;
+	int fd;
+	int length;
+	struct ion_handle *handle;
+	uint32_t frame_id;
+	int buf_idx;
+};
+#define MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT 0
+/* video capture mode in VIDIOC_S_PARM */
+#define MSM_V4L2_EXT_CAPTURE_MODE_PREVIEW \
+	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+1)
+/* extendedmode for video recording in VIDIOC_S_PARM */
+#define MSM_V4L2_EXT_CAPTURE_MODE_VIDEO \
+	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+2)
+/* extendedmode for the full size main image in VIDIOC_S_PARM */
+#define MSM_V4L2_EXT_CAPTURE_MODE_MAIN (MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+3)
+/* extendedmode for the thumb nail image in VIDIOC_S_PARM */
+#define MSM_V4L2_EXT_CAPTURE_MODE_THUMBNAIL \
+	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+4)
+/* ISP_PIX_OUTPUT1: no pp, directly send output1 buf to user */
+#define MSM_V4L2_EXT_CAPTURE_MODE_ISP_PIX_OUTPUT1 \
+	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+5)
+/* ISP_PIX_OUTPUT2: no pp, directly send output2 buf to user */
+#define MSM_V4L2_EXT_CAPTURE_MODE_ISP_PIX_OUTPUT2 \
+	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+6)
+/* raw image type */
+#define MSM_V4L2_EXT_CAPTURE_MODE_RAW \
+	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+7)
+/* RDI dump */
+#define MSM_V4L2_EXT_CAPTURE_MODE_RDI \
+	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+8)
+/* RDI dump 1 */
+#define MSM_V4L2_EXT_CAPTURE_MODE_RDI1 \
+	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+9)
+/* RDI dump 2 */
+#define MSM_V4L2_EXT_CAPTURE_MODE_RDI2 \
+	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+10)
+#define MSM_V4L2_EXT_CAPTURE_MODE_AEC \
+	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+11)
+#define MSM_V4L2_EXT_CAPTURE_MODE_AWB \
+	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+12)
+#define MSM_V4L2_EXT_CAPTURE_MODE_AF \
+	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+13)
+#define MSM_V4L2_EXT_CAPTURE_MODE_IHIST \
+	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+14)
+#define MSM_V4L2_EXT_CAPTURE_MODE_CS \
+	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+15)
+#define MSM_V4L2_EXT_CAPTURE_MODE_RS \
+	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+16)
+#define MSM_V4L2_EXT_CAPTURE_MODE_CSTA \
+	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+17)
+#define MSM_V4L2_EXT_CAPTURE_MODE_V2X_LIVESHOT \
+	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+18)
+#define MSM_V4L2_EXT_CAPTURE_MODE_MAX (MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+19)
+
+
+#define MSM_V4L2_PID_MOTION_ISO              V4L2_CID_PRIVATE_BASE
+#define MSM_V4L2_PID_EFFECT                 (V4L2_CID_PRIVATE_BASE+1)
+#define MSM_V4L2_PID_HJR                    (V4L2_CID_PRIVATE_BASE+2)
+#define MSM_V4L2_PID_LED_MODE               (V4L2_CID_PRIVATE_BASE+3)
+#define MSM_V4L2_PID_PREP_SNAPSHOT          (V4L2_CID_PRIVATE_BASE+4)
+#define MSM_V4L2_PID_EXP_METERING           (V4L2_CID_PRIVATE_BASE+5)
+#define MSM_V4L2_PID_ISO                    (V4L2_CID_PRIVATE_BASE+6)
+#define MSM_V4L2_PID_CAM_MODE               (V4L2_CID_PRIVATE_BASE+7)
+#define MSM_V4L2_PID_LUMA_ADAPTATION	    (V4L2_CID_PRIVATE_BASE+8)
+#define MSM_V4L2_PID_BEST_SHOT              (V4L2_CID_PRIVATE_BASE+9)
+#define MSM_V4L2_PID_FOCUS_MODE	            (V4L2_CID_PRIVATE_BASE+10)
+#define MSM_V4L2_PID_BL_DETECTION           (V4L2_CID_PRIVATE_BASE+11)
+#define MSM_V4L2_PID_SNOW_DETECTION         (V4L2_CID_PRIVATE_BASE+12)
+#define MSM_V4L2_PID_CTRL_CMD               (V4L2_CID_PRIVATE_BASE+13)
+#define MSM_V4L2_PID_EVT_SUB_INFO           (V4L2_CID_PRIVATE_BASE+14)
+#define MSM_V4L2_PID_STROBE_FLASH           (V4L2_CID_PRIVATE_BASE+15)
+#define MSM_V4L2_PID_INST_HANDLE            (V4L2_CID_PRIVATE_BASE+16)
+#define MSM_V4L2_PID_MMAP_INST              (V4L2_CID_PRIVATE_BASE+17)
+#define MSM_V4L2_PID_PP_PLANE_INFO          (V4L2_CID_PRIVATE_BASE+18)
+#define MSM_V4L2_PID_MAX                    MSM_V4L2_PID_PP_PLANE_INFO
+
+/* camera operation mode for video recording - two frame output queues */
+#define MSM_V4L2_CAM_OP_DEFAULT         0
+/* camera operation mode for video recording - two frame output queues */
+#define MSM_V4L2_CAM_OP_PREVIEW         (MSM_V4L2_CAM_OP_DEFAULT+1)
+/* camera operation mode for video recording - two frame output queues */
+#define MSM_V4L2_CAM_OP_VIDEO           (MSM_V4L2_CAM_OP_DEFAULT+2)
+/* camera operation mode for standard shapshot - two frame output queues */
+#define MSM_V4L2_CAM_OP_CAPTURE         (MSM_V4L2_CAM_OP_DEFAULT+3)
+/* camera operation mode for zsl shapshot - three output queues */
+#define MSM_V4L2_CAM_OP_ZSL             (MSM_V4L2_CAM_OP_DEFAULT+4)
+/* camera operation mode for raw snapshot - one frame output queue */
+#define MSM_V4L2_CAM_OP_RAW             (MSM_V4L2_CAM_OP_DEFAULT+5)
+/* camera operation mode for jpeg snapshot - one frame output queue */
+#define MSM_V4L2_CAM_OP_JPEG_CAPTURE    (MSM_V4L2_CAM_OP_DEFAULT+6)
+
+
+#define MSM_V4L2_VID_CAP_TYPE	0
+#define MSM_V4L2_STREAM_ON		1
+#define MSM_V4L2_STREAM_OFF		2
+#define MSM_V4L2_SNAPSHOT		3
+#define MSM_V4L2_QUERY_CTRL		4
+#define MSM_V4L2_GET_CTRL		5
+#define MSM_V4L2_SET_CTRL		6
+#define MSM_V4L2_QUERY			7
+#define MSM_V4L2_GET_CROP		8
+#define MSM_V4L2_SET_CROP		9
+#define MSM_V4L2_OPEN			10
+#define MSM_V4L2_CLOSE			11
+#define MSM_V4L2_SET_CTRL_CMD	12
+#define MSM_V4L2_EVT_SUB_MASK	13
+#define MSM_V4L2_PRIVATE_CMD    14
+#define MSM_V4L2_MAX			15
+#define V4L2_CAMERA_EXIT		43
+
+struct crop_info {
+	void *info;
+	int len;
+};
+
+struct msm_postproc {
+	int ftnum;
+	struct msm_frame fthumnail;
+	int fmnum;
+	struct msm_frame fmain;
+};
+
+struct msm_snapshot_pp_status {
+	void *status;
+};
+
+#define CFG_SET_MODE			0
+#define CFG_SET_EFFECT			1
+#define CFG_START			2
+#define CFG_PWR_UP			3
+#define CFG_PWR_DOWN			4
+#define CFG_WRITE_EXPOSURE_GAIN		5
+#define CFG_SET_DEFAULT_FOCUS		6
+#define CFG_MOVE_FOCUS			7
+#define CFG_REGISTER_TO_REAL_GAIN	8
+#define CFG_REAL_TO_REGISTER_GAIN	9
+#define CFG_SET_FPS			10
+#define CFG_SET_PICT_FPS		11
+#define CFG_SET_BRIGHTNESS		12
+#define CFG_SET_CONTRAST		13
+#define CFG_SET_ZOOM			14
+#define CFG_SET_EXPOSURE_MODE		15
+#define CFG_SET_WB			16
+#define CFG_SET_ANTIBANDING		17
+#define CFG_SET_EXP_GAIN		18
+#define CFG_SET_PICT_EXP_GAIN		19
+#define CFG_SET_LENS_SHADING		20
+#define CFG_GET_PICT_FPS		21
+#define CFG_GET_PREV_L_PF		22
+#define CFG_GET_PREV_P_PL		23
+#define CFG_GET_PICT_L_PF		24
+#define CFG_GET_PICT_P_PL		25
+#define CFG_GET_AF_MAX_STEPS		26
+#define CFG_GET_PICT_MAX_EXP_LC		27
+#define CFG_SEND_WB_INFO    28
+#define CFG_SENSOR_INIT    29
+#define CFG_GET_3D_CALI_DATA 30
+#define CFG_GET_CALIB_DATA		31
+#define CFG_GET_OUTPUT_INFO		32
+#define CFG_GET_EEPROM_INFO		33
+#define CFG_GET_EEPROM_DATA		34
+#define CFG_SET_ACTUATOR_INFO		35
+#define CFG_GET_ACTUATOR_INFO           36
+/* TBD: QRD */
+#define CFG_SET_SATURATION            37
+#define CFG_SET_SHARPNESS             38
+#define CFG_SET_TOUCHAEC              39
+#define CFG_SET_AUTO_FOCUS            40
+#define CFG_SET_AUTOFLASH             41
+#define CFG_SET_EXPOSURE_COMPENSATION 42
+#define CFG_SET_ISO                   43
+#define CFG_START_STREAM              44
+#define CFG_STOP_STREAM               45
+#define CFG_GET_CSI_PARAMS            46
+#define CFG_POWER_UP                  47
+#define CFG_POWER_DOWN                48
+#define CFG_WRITE_I2C_ARRAY           49
+#define CFG_READ_I2C_ARRAY            50
+#define CFG_PCLK_CHANGE               51
+#define CFG_CONFIG_VREG_ARRAY         52
+#define CFG_CONFIG_CLK_ARRAY          53
+#define CFG_GPIO_OP                   54
+#define CFG_MAX                       55
+
+
+#define MOVE_NEAR	0
+#define MOVE_FAR	1
+
+#define SENSOR_PREVIEW_MODE		0
+#define SENSOR_SNAPSHOT_MODE		1
+#define SENSOR_RAW_SNAPSHOT_MODE	2
+#define SENSOR_HFR_60FPS_MODE 3
+#define SENSOR_HFR_90FPS_MODE 4
+#define SENSOR_HFR_120FPS_MODE 5
+
+#define SENSOR_QTR_SIZE			0
+#define SENSOR_FULL_SIZE		1
+#define SENSOR_QVGA_SIZE		2
+#define SENSOR_INVALID_SIZE		3
+
+#define CAMERA_EFFECT_OFF		0
+#define CAMERA_EFFECT_MONO		1
+#define CAMERA_EFFECT_NEGATIVE		2
+#define CAMERA_EFFECT_SOLARIZE		3
+#define CAMERA_EFFECT_SEPIA		4
+#define CAMERA_EFFECT_POSTERIZE		5
+#define CAMERA_EFFECT_WHITEBOARD	6
+#define CAMERA_EFFECT_BLACKBOARD	7
+#define CAMERA_EFFECT_AQUA		8
+#define CAMERA_EFFECT_EMBOSS		9
+#define CAMERA_EFFECT_SKETCH		10
+#define CAMERA_EFFECT_NEON		11
+#define CAMERA_EFFECT_FADED		12
+#define CAMERA_EFFECT_VINTAGECOOL	13
+#define CAMERA_EFFECT_VINTAGEWARM	14
+#define CAMERA_EFFECT_ACCENT_BLUE       15
+#define CAMERA_EFFECT_ACCENT_GREEN      16
+#define CAMERA_EFFECT_ACCENT_ORANGE     17
+#define CAMERA_EFFECT_MAX               18
+
+/* QRD */
+#define CAMERA_EFFECT_BW		10
+#define CAMERA_EFFECT_BLUISH	12
+#define CAMERA_EFFECT_REDDISH	13
+#define CAMERA_EFFECT_GREENISH	14
+
+/* QRD */
+#define CAMERA_ANTIBANDING_OFF		0
+#define CAMERA_ANTIBANDING_50HZ		2
+#define CAMERA_ANTIBANDING_60HZ		1
+#define CAMERA_ANTIBANDING_AUTO		3
+
+#define CAMERA_CONTRAST_LV0			0
+#define CAMERA_CONTRAST_LV1			1
+#define CAMERA_CONTRAST_LV2			2
+#define CAMERA_CONTRAST_LV3			3
+#define CAMERA_CONTRAST_LV4			4
+#define CAMERA_CONTRAST_LV5			5
+#define CAMERA_CONTRAST_LV6			6
+#define CAMERA_CONTRAST_LV7			7
+#define CAMERA_CONTRAST_LV8			8
+#define CAMERA_CONTRAST_LV9			9
+
+#define CAMERA_BRIGHTNESS_LV0			0
+#define CAMERA_BRIGHTNESS_LV1			1
+#define CAMERA_BRIGHTNESS_LV2			2
+#define CAMERA_BRIGHTNESS_LV3			3
+#define CAMERA_BRIGHTNESS_LV4			4
+#define CAMERA_BRIGHTNESS_LV5			5
+#define CAMERA_BRIGHTNESS_LV6			6
+#define CAMERA_BRIGHTNESS_LV7			7
+#define CAMERA_BRIGHTNESS_LV8			8
+
+
+#define CAMERA_SATURATION_LV0			0
+#define CAMERA_SATURATION_LV1			1
+#define CAMERA_SATURATION_LV2			2
+#define CAMERA_SATURATION_LV3			3
+#define CAMERA_SATURATION_LV4			4
+#define CAMERA_SATURATION_LV5			5
+#define CAMERA_SATURATION_LV6			6
+#define CAMERA_SATURATION_LV7			7
+#define CAMERA_SATURATION_LV8			8
+
+#define CAMERA_SHARPNESS_LV0		0
+#define CAMERA_SHARPNESS_LV1		3
+#define CAMERA_SHARPNESS_LV2		6
+#define CAMERA_SHARPNESS_LV3		9
+#define CAMERA_SHARPNESS_LV4		12
+#define CAMERA_SHARPNESS_LV5		15
+#define CAMERA_SHARPNESS_LV6		18
+#define CAMERA_SHARPNESS_LV7		21
+#define CAMERA_SHARPNESS_LV8		24
+#define CAMERA_SHARPNESS_LV9		27
+#define CAMERA_SHARPNESS_LV10		30
+
+#define CAMERA_SETAE_AVERAGE		0
+#define CAMERA_SETAE_CENWEIGHT	1
+
+#define  CAMERA_WB_AUTO               1 /* This list must match aeecamera.h */
+#define  CAMERA_WB_CUSTOM             2
+#define  CAMERA_WB_INCANDESCENT       3
+#define  CAMERA_WB_FLUORESCENT        4
+#define  CAMERA_WB_DAYLIGHT           5
+#define  CAMERA_WB_CLOUDY_DAYLIGHT    6
+#define  CAMERA_WB_TWILIGHT           7
+#define  CAMERA_WB_SHADE              8
+
+#define CAMERA_EXPOSURE_COMPENSATION_LV0			12
+#define CAMERA_EXPOSURE_COMPENSATION_LV1			6
+#define CAMERA_EXPOSURE_COMPENSATION_LV2			0
+#define CAMERA_EXPOSURE_COMPENSATION_LV3			-6
+#define CAMERA_EXPOSURE_COMPENSATION_LV4			-12
+
+enum msm_v4l2_saturation_level {
+	MSM_V4L2_SATURATION_L0,
+	MSM_V4L2_SATURATION_L1,
+	MSM_V4L2_SATURATION_L2,
+	MSM_V4L2_SATURATION_L3,
+	MSM_V4L2_SATURATION_L4,
+	MSM_V4L2_SATURATION_L5,
+	MSM_V4L2_SATURATION_L6,
+	MSM_V4L2_SATURATION_L7,
+	MSM_V4L2_SATURATION_L8,
+	MSM_V4L2_SATURATION_L9,
+	MSM_V4L2_SATURATION_L10,
+};
+
+enum msm_v4l2_contrast_level {
+	MSM_V4L2_CONTRAST_L0,
+	MSM_V4L2_CONTRAST_L1,
+	MSM_V4L2_CONTRAST_L2,
+	MSM_V4L2_CONTRAST_L3,
+	MSM_V4L2_CONTRAST_L4,
+	MSM_V4L2_CONTRAST_L5,
+	MSM_V4L2_CONTRAST_L6,
+	MSM_V4L2_CONTRAST_L7,
+	MSM_V4L2_CONTRAST_L8,
+	MSM_V4L2_CONTRAST_L9,
+	MSM_V4L2_CONTRAST_L10,
+};
+
+
+enum msm_v4l2_exposure_level {
+	MSM_V4L2_EXPOSURE_N2,
+	MSM_V4L2_EXPOSURE_N1,
+	MSM_V4L2_EXPOSURE_D,
+	MSM_V4L2_EXPOSURE_P1,
+	MSM_V4L2_EXPOSURE_P2,
+};
+
+enum msm_v4l2_sharpness_level {
+	MSM_V4L2_SHARPNESS_L0,
+	MSM_V4L2_SHARPNESS_L1,
+	MSM_V4L2_SHARPNESS_L2,
+	MSM_V4L2_SHARPNESS_L3,
+	MSM_V4L2_SHARPNESS_L4,
+	MSM_V4L2_SHARPNESS_L5,
+	MSM_V4L2_SHARPNESS_L6,
+};
+
+enum msm_v4l2_expo_metering_mode {
+	MSM_V4L2_EXP_FRAME_AVERAGE,
+	MSM_V4L2_EXP_CENTER_WEIGHTED,
+	MSM_V4L2_EXP_SPOT_METERING,
+};
+
+enum msm_v4l2_iso_mode {
+	MSM_V4L2_ISO_AUTO = 0,
+	MSM_V4L2_ISO_DEBLUR,
+	MSM_V4L2_ISO_100,
+	MSM_V4L2_ISO_200,
+	MSM_V4L2_ISO_400,
+	MSM_V4L2_ISO_800,
+	MSM_V4L2_ISO_1600,
+};
+
+enum msm_v4l2_wb_mode {
+	MSM_V4L2_WB_OFF,
+	MSM_V4L2_WB_AUTO ,
+	MSM_V4L2_WB_CUSTOM,
+	MSM_V4L2_WB_INCANDESCENT,
+	MSM_V4L2_WB_FLUORESCENT,
+	MSM_V4L2_WB_DAYLIGHT,
+	MSM_V4L2_WB_CLOUDY_DAYLIGHT,
+};
+
+enum msm_v4l2_special_effect {
+	MSM_V4L2_EFFECT_OFF,
+	MSM_V4L2_EFFECT_MONO,
+	MSM_V4L2_EFFECT_NEGATIVE,
+	MSM_V4L2_EFFECT_SOLARIZE,
+	MSM_V4L2_EFFECT_SEPIA,
+	MSM_V4L2_EFFECT_POSTERAIZE,
+	MSM_V4L2_EFFECT_WHITEBOARD,
+	MSM_V4L2_EFFECT_BLACKBOARD,
+	MSM_V4L2_EFFECT_AQUA,
+	MSM_V4L2_EFFECT_EMBOSS,
+	MSM_V4L2_EFFECT_SKETCH,
+	MSM_V4L2_EFFECT_NEON,
+	MSM_V4L2_EFFECT_MAX,
+};
+
+enum msm_v4l2_power_line_frequency {
+	MSM_V4L2_POWER_LINE_OFF,
+	MSM_V4L2_POWER_LINE_60HZ,
+	MSM_V4L2_POWER_LINE_50HZ,
+	MSM_V4L2_POWER_LINE_AUTO,
+};
+
+#define CAMERA_ISO_TYPE_AUTO           0
+#define CAMEAR_ISO_TYPE_HJR            1
+#define CAMEAR_ISO_TYPE_100            2
+#define CAMERA_ISO_TYPE_200            3
+#define CAMERA_ISO_TYPE_400            4
+#define CAMEAR_ISO_TYPE_800            5
+#define CAMERA_ISO_TYPE_1600           6
+
+struct sensor_pict_fps {
+	uint16_t prevfps;
+	uint16_t pictfps;
+};
+
+struct exp_gain_cfg {
+	uint16_t gain;
+	uint32_t line;
+};
+
+struct focus_cfg {
+	int32_t steps;
+	int dir;
+};
+
+struct fps_cfg {
+	uint16_t f_mult;
+	uint16_t fps_div;
+	uint32_t pict_fps_div;
+};
+struct wb_info_cfg {
+	uint16_t red_gain;
+	uint16_t green_gain;
+	uint16_t blue_gain;
+};
+struct sensor_3d_exp_cfg {
+	uint16_t gain;
+	uint32_t line;
+	uint16_t r_gain;
+	uint16_t b_gain;
+	uint16_t gr_gain;
+	uint16_t gb_gain;
+	uint16_t gain_adjust;
+};
+struct sensor_3d_cali_data_t{
+	unsigned char left_p_matrix[3][4][8];
+	unsigned char right_p_matrix[3][4][8];
+	unsigned char square_len[8];
+	unsigned char focal_len[8];
+	unsigned char pixel_pitch[8];
+	uint16_t left_r;
+	uint16_t left_b;
+	uint16_t left_gb;
+	uint16_t left_af_far;
+	uint16_t left_af_mid;
+	uint16_t left_af_short;
+	uint16_t left_af_5um;
+	uint16_t left_af_50up;
+	uint16_t left_af_50down;
+	uint16_t right_r;
+	uint16_t right_b;
+	uint16_t right_gb;
+	uint16_t right_af_far;
+	uint16_t right_af_mid;
+	uint16_t right_af_short;
+	uint16_t right_af_5um;
+	uint16_t right_af_50up;
+	uint16_t right_af_50down;
+};
+struct sensor_init_cfg {
+	uint8_t prev_res;
+	uint8_t pict_res;
+};
+
+struct sensor_calib_data {
+	/* Color Related Measurements */
+	uint16_t r_over_g;
+	uint16_t b_over_g;
+	uint16_t gr_over_gb;
+
+	/* Lens Related Measurements */
+	uint16_t macro_2_inf;
+	uint16_t inf_2_macro;
+	uint16_t stroke_amt;
+	uint16_t af_pos_1m;
+	uint16_t af_pos_inf;
+};
+
+enum msm_sensor_resolution_t {
+	MSM_SENSOR_RES_FULL,
+	MSM_SENSOR_RES_QTR,
+	MSM_SENSOR_RES_2,
+	MSM_SENSOR_RES_3,
+	MSM_SENSOR_RES_4,
+	MSM_SENSOR_RES_5,
+	MSM_SENSOR_RES_6,
+	MSM_SENSOR_RES_7,
+	MSM_SENSOR_INVALID_RES,
+};
+
+struct msm_sensor_output_info_t {
+	uint16_t x_output;
+	uint16_t y_output;
+	uint16_t line_length_pclk;
+	uint16_t frame_length_lines;
+	uint32_t vt_pixel_clk;
+	uint32_t op_pixel_clk;
+	uint16_t binning_factor;
+};
+
+struct sensor_output_info_t {
+	struct msm_sensor_output_info_t *output_info;
+	uint16_t num_info;
+};
+
+struct msm_sensor_exp_gain_info_t {
+	uint16_t coarse_int_time_addr;
+	uint16_t global_gain_addr;
+	uint16_t vert_offset;
+};
+
+struct msm_sensor_output_reg_addr_t {
+	uint16_t x_output;
+	uint16_t y_output;
+	uint16_t line_length_pclk;
+	uint16_t frame_length_lines;
+};
+
+struct sensor_driver_params_type {
+	struct msm_camera_i2c_reg_setting *init_settings;
+	uint16_t init_settings_size;
+	struct msm_camera_i2c_reg_setting *mode_settings;
+	uint16_t mode_settings_size;
+	struct msm_sensor_output_reg_addr_t *sensor_output_reg_addr;
+	struct msm_camera_i2c_reg_setting *start_settings;
+	struct msm_camera_i2c_reg_setting *stop_settings;
+	struct msm_camera_i2c_reg_setting *groupon_settings;
+	struct msm_camera_i2c_reg_setting *groupoff_settings;
+	struct msm_sensor_exp_gain_info_t *sensor_exp_gain_info;
+	struct msm_sensor_output_info_t *output_info;
+};
+
+struct mirror_flip {
+	int32_t x_mirror;
+	int32_t y_flip;
+};
+
+struct cord {
+	uint32_t x;
+	uint32_t y;
+};
+
+struct msm_eeprom_data_t {
+	void *eeprom_data;
+	uint16_t index;
+};
+
+struct msm_camera_csid_vc_cfg {
+	uint8_t cid;
+	uint8_t dt;
+	uint8_t decode_format;
+};
+
+struct csi_lane_params_t {
+	uint16_t csi_lane_assign;
+	uint8_t csi_lane_mask;
+	uint8_t csi_if;
+	uint8_t csid_core[2];
+	uint8_t csi_phy_sel;
+};
+
+struct msm_camera_csid_lut_params {
+	uint8_t num_cid;
+	struct msm_camera_csid_vc_cfg *vc_cfg;
+};
+
+struct msm_camera_csid_params {
+	uint8_t lane_cnt;
+	uint16_t lane_assign;
+	uint8_t phy_sel;
+	struct msm_camera_csid_lut_params lut_params;
+};
+
+struct msm_camera_csiphy_params {
+	uint8_t lane_cnt;
+	uint8_t settle_cnt;
+	uint16_t lane_mask;
+	uint8_t combo_mode;
+	uint8_t csid_core;
+	uint64_t data_rate;
+};
+
+struct msm_camera_csi2_params {
+	struct msm_camera_csid_params csid_params;
+	struct msm_camera_csiphy_params csiphy_params;
+};
+
+enum msm_camera_csi_data_format {
+	CSI_8BIT,
+	CSI_10BIT,
+	CSI_12BIT,
+};
+
+struct msm_camera_csi_params {
+	enum msm_camera_csi_data_format data_format;
+	uint8_t lane_cnt;
+	uint8_t lane_assign;
+	uint8_t settle_cnt;
+	uint8_t dpcm_scheme;
+};
+
+enum csic_cfg_type_t {
+	CSIC_INIT,
+	CSIC_CFG,
+};
+
+struct csic_cfg_data {
+	enum csic_cfg_type_t cfgtype;
+	struct msm_camera_csi_params *csic_params;
+};
+
+enum csid_cfg_type_t {
+	CSID_INIT,
+	CSID_CFG,
+};
+
+struct csid_cfg_data {
+	enum csid_cfg_type_t cfgtype;
+	union {
+		uint32_t csid_version;
+		struct msm_camera_csid_params *csid_params;
+	} cfg;
+};
+
+enum csiphy_cfg_type_t {
+	CSIPHY_INIT,
+	CSIPHY_CFG,
+};
+
+struct csiphy_cfg_data {
+	enum csiphy_cfg_type_t cfgtype;
+	struct msm_camera_csiphy_params *csiphy_params;
+};
+
+#define CSI_EMBED_DATA 0x12
+#define CSI_RESERVED_DATA_0 0x13
+#define CSI_YUV422_8  0x1E
+#define CSI_RAW8    0x2A
+#define CSI_RAW10   0x2B
+#define CSI_RAW12   0x2C
+
+#define CSI_DECODE_6BIT 0
+#define CSI_DECODE_8BIT 1
+#define CSI_DECODE_10BIT 2
+#define CSI_DECODE_DPCM_10_8_10 5
+
+#define ISPIF_STREAM(intf, action, vfe) (((intf)<<ISPIF_S_STREAM_SHIFT)+\
+	(action)+((vfe)<<ISPIF_VFE_INTF_SHIFT))
+#define ISPIF_ON_FRAME_BOUNDARY   (0x01 << 0)
+#define ISPIF_OFF_FRAME_BOUNDARY  (0x01 << 1)
+#define ISPIF_OFF_IMMEDIATELY     (0x01 << 2)
+#define ISPIF_S_STREAM_SHIFT      4
+#define ISPIF_VFE_INTF_SHIFT      12
+
+#define PIX_0 (0x01 << 0)
+#define RDI_0 (0x01 << 1)
+#define PIX_1 (0x01 << 2)
+#define RDI_1 (0x01 << 3)
+#define RDI_2 (0x01 << 4)
+
+enum msm_ispif_vfe_intf {
+	VFE0,
+	VFE1,
+	VFE_MAX,
+};
+
+enum msm_ispif_intftype {
+	PIX0,
+	RDI0,
+	PIX1,
+	RDI1,
+	RDI2,
+	INTF_MAX,
+};
+
+enum msm_ispif_vc {
+	VC0,
+	VC1,
+	VC2,
+	VC3,
+};
+
+enum msm_ispif_cid {
+	CID0,
+	CID1,
+	CID2,
+	CID3,
+	CID4,
+	CID5,
+	CID6,
+	CID7,
+	CID8,
+	CID9,
+	CID10,
+	CID11,
+	CID12,
+	CID13,
+	CID14,
+	CID15,
+};
+
+struct msm_ispif_params {
+	uint8_t intftype;
+	uint16_t cid_mask;
+	uint8_t csid;
+	uint8_t vfe_intf;
+};
+
+struct msm_ispif_params_list {
+	uint32_t len;
+	struct msm_ispif_params params[4];
+};
+
+enum ispif_cfg_type_t {
+	ISPIF_INIT,
+	ISPIF_SET_CFG,
+	ISPIF_SET_ON_FRAME_BOUNDARY,
+	ISPIF_SET_OFF_FRAME_BOUNDARY,
+	ISPIF_SET_OFF_IMMEDIATELY,
+	ISPIF_RELEASE,
+};
+
+struct ispif_cfg_data {
+	enum ispif_cfg_type_t cfgtype;
+	union {
+		uint32_t csid_version;
+		int cmd;
+		struct msm_ispif_params_list ispif_params;
+	} cfg;
+};
+
+enum msm_camera_i2c_reg_addr_type {
+	MSM_CAMERA_I2C_BYTE_ADDR = 1,
+	MSM_CAMERA_I2C_WORD_ADDR,
+	MSM_CAMERA_I2C_3B_ADDR,
+	MSM_CAMERA_I2C_DWORD_ADDR,
+};
+#define MSM_CAMERA_I2C_DWORD_ADDR MSM_CAMERA_I2C_DWORD_ADDR
+
+struct msm_camera_i2c_reg_array {
+	uint16_t reg_addr;
+	uint16_t reg_data;
+};
+
+enum msm_camera_i2c_data_type {
+	MSM_CAMERA_I2C_BYTE_DATA = 1,
+	MSM_CAMERA_I2C_WORD_DATA,
+	MSM_CAMERA_I2C_SET_BYTE_MASK,
+	MSM_CAMERA_I2C_UNSET_BYTE_MASK,
+	MSM_CAMERA_I2C_SET_WORD_MASK,
+	MSM_CAMERA_I2C_UNSET_WORD_MASK,
+	MSM_CAMERA_I2C_SET_BYTE_WRITE_MASK_DATA,
+};
+
+struct msm_camera_i2c_reg_setting {
+	struct msm_camera_i2c_reg_array *reg_setting;
+	uint16_t size;
+	enum msm_camera_i2c_reg_addr_type addr_type;
+	enum msm_camera_i2c_data_type data_type;
+	uint16_t delay;
+};
+
+enum oem_setting_type {
+	I2C_READ = 1,
+	I2C_WRITE,
+	GPIO_OP,
+	EEPROM_READ,
+	VREG_SET,
+	CLK_SET,
+};
+
+struct sensor_oem_setting {
+	enum oem_setting_type type;
+	void *data;
+};
+
+enum camera_vreg_type {
+	REG_LDO,
+	REG_VS,
+	REG_GPIO,
+};
+
+enum msm_camera_vreg_name_t {
+	CAM_VDIG,
+	CAM_VIO,
+	CAM_VANA,
+	CAM_VAF,
+	CAM_VREG_MAX,
+};
+
+struct msm_camera_csi_lane_params {
+	uint16_t csi_lane_assign;
+	uint16_t csi_lane_mask;
+};
+
+struct camera_vreg_t {
+	const char *reg_name;
+	int min_voltage;
+	int max_voltage;
+	int op_mode;
+	uint32_t delay;
+};
+
+struct msm_camera_vreg_setting {
+	struct camera_vreg_t *cam_vreg;
+	uint16_t num_vreg;
+	uint8_t enable;
+};
+
+struct msm_cam_clk_info {
+	const char *clk_name;
+	long clk_rate;
+	uint32_t delay;
+};
+
+struct msm_cam_clk_setting {
+	struct msm_cam_clk_info *clk_info;
+	uint16_t num_clk_info;
+	uint8_t enable;
+};
+
+struct sensor_cfg_data {
+	int cfgtype;
+	int mode;
+	int rs;
+	uint8_t max_steps;
+
+	union {
+		int8_t effect;
+		uint8_t lens_shading;
+		uint16_t prevl_pf;
+		uint16_t prevp_pl;
+		uint16_t pictl_pf;
+		uint16_t pictp_pl;
+		uint32_t pict_max_exp_lc;
+		uint16_t p_fps;
+		uint8_t iso_type;
+		struct sensor_init_cfg init_info;
+		struct sensor_pict_fps gfps;
+		struct exp_gain_cfg exp_gain;
+		struct focus_cfg focus;
+		struct fps_cfg fps;
+		struct wb_info_cfg wb_info;
+		struct sensor_3d_exp_cfg sensor_3d_exp;
+		struct sensor_calib_data calib_info;
+		struct sensor_output_info_t output_info;
+		struct msm_eeprom_data_t eeprom_data;
+		struct csi_lane_params_t csi_lane_params;
+		/* QRD */
+		uint16_t antibanding;
+		uint8_t contrast;
+		uint8_t saturation;
+		uint8_t sharpness;
+		int8_t brightness;
+		int ae_mode;
+		uint8_t wb_val;
+		int8_t exp_compensation;
+		uint32_t pclk;
+		struct cord aec_cord;
+		int is_autoflash;
+		struct mirror_flip mirror_flip;
+		void *setting;
+	} cfg;
+};
+
+enum gpio_operation_type {
+	GPIO_REQUEST,
+	GPIO_FREE,
+	GPIO_SET_DIRECTION_OUTPUT,
+	GPIO_SET_DIRECTION_INPUT,
+	GPIO_GET_VALUE,
+	GPIO_SET_VALUE,
+};
+
+struct msm_cam_gpio_operation {
+	enum gpio_operation_type op_type;
+	unsigned address;
+	int value;
+	const char *tag;
+};
+
+struct damping_params_t {
+	uint32_t damping_step;
+	uint32_t damping_delay;
+	uint32_t hw_params;
+};
+
+enum actuator_type {
+	ACTUATOR_VCM,
+	ACTUATOR_PIEZO,
+	ACTUATOR_HVCM,
+	ACTUATOR_BIVCM,
+};
+
+enum msm_actuator_data_type {
+	MSM_ACTUATOR_BYTE_DATA = 1,
+	MSM_ACTUATOR_WORD_DATA,
+};
+
+enum msm_actuator_addr_type {
+	MSM_ACTUATOR_BYTE_ADDR = 1,
+	MSM_ACTUATOR_WORD_ADDR,
+};
+
+enum msm_actuator_write_type {
+	MSM_ACTUATOR_WRITE_HW_DAMP,
+	MSM_ACTUATOR_WRITE_DAC,
+	MSM_ACTUATOR_WRITE,
+	MSM_ACTUATOR_WRITE_DIR_REG,
+	MSM_ACTUATOR_POLL,
+	MSM_ACTUATOR_READ_WRITE,
+};
+
+struct msm_actuator_reg_params_t {
+	enum msm_actuator_write_type reg_write_type;
+	uint32_t hw_mask;
+	uint16_t reg_addr;
+	uint16_t hw_shift;
+	uint16_t data_type;
+	uint16_t addr_type;
+	uint16_t reg_data;
+	uint16_t delay;
+};
+
+struct reg_settings_t {
+	uint16_t reg_addr;
+	uint16_t reg_data;
+};
+
+struct region_params_t {
+	/* [0] = ForwardDirection Macro boundary
+	   [1] = ReverseDirection Inf boundary
+	 */
+	uint16_t step_bound[2];
+	uint16_t code_per_step;
+};
+
+struct msm_actuator_move_params_t {
+	int8_t dir;
+	int8_t sign_dir;
+	int16_t dest_step_pos;
+	int32_t num_steps;
+	struct damping_params_t *ringing_params;
+};
+
+struct msm_actuator_tuning_params_t {
+	int16_t initial_code;
+	uint16_t pwd_step;
+	uint16_t region_size;
+	uint32_t total_steps;
+	struct region_params_t *region_params;
+};
+
+struct msm_actuator_params_t {
+	enum actuator_type act_type;
+	uint8_t reg_tbl_size;
+	uint16_t data_size;
+	uint16_t init_setting_size;
+	uint32_t i2c_addr;
+	enum msm_actuator_addr_type i2c_addr_type;
+	enum msm_actuator_data_type i2c_data_type;
+	struct msm_actuator_reg_params_t *reg_tbl_params;
+	struct reg_settings_t *init_settings;
+};
+
+struct msm_actuator_set_info_t {
+	struct msm_actuator_params_t actuator_params;
+	struct msm_actuator_tuning_params_t af_tuning_params;
+};
+
+struct msm_actuator_get_info_t {
+	uint32_t focal_length_num;
+	uint32_t focal_length_den;
+	uint32_t f_number_num;
+	uint32_t f_number_den;
+	uint32_t f_pix_num;
+	uint32_t f_pix_den;
+	uint32_t total_f_dist_num;
+	uint32_t total_f_dist_den;
+	uint32_t hor_view_angle_num;
+	uint32_t hor_view_angle_den;
+	uint32_t ver_view_angle_num;
+	uint32_t ver_view_angle_den;
+};
+
+enum af_camera_name {
+	ACTUATOR_MAIN_CAM_0,
+	ACTUATOR_MAIN_CAM_1,
+	ACTUATOR_MAIN_CAM_2,
+	ACTUATOR_MAIN_CAM_3,
+	ACTUATOR_MAIN_CAM_4,
+	ACTUATOR_MAIN_CAM_5,
+	ACTUATOR_WEB_CAM_0,
+	ACTUATOR_WEB_CAM_1,
+	ACTUATOR_WEB_CAM_2,
+};
+
+struct msm_actuator_cfg_data {
+	int cfgtype;
+	uint8_t is_af_supported;
+	union {
+		struct msm_actuator_move_params_t move;
+		struct msm_actuator_set_info_t set_info;
+		struct msm_actuator_get_info_t get_info;
+		enum af_camera_name cam_name;
+	} cfg;
+};
+
+struct msm_eeprom_support {
+	uint16_t is_supported;
+	uint16_t size;
+	uint16_t index;
+	uint16_t qvalue;
+};
+
+struct msm_calib_wb {
+	uint16_t r_over_g;
+	uint16_t b_over_g;
+	uint16_t gr_over_gb;
+};
+
+struct msm_calib_af {
+	uint16_t macro_dac;
+	uint16_t inf_dac;
+	uint16_t start_dac;
+};
+
+struct msm_calib_lsc {
+	uint16_t r_gain[221];
+	uint16_t b_gain[221];
+	uint16_t gr_gain[221];
+	uint16_t gb_gain[221];
+};
+
+struct pixel_t {
+	int x;
+	int y;
+};
+
+struct msm_calib_dpc {
+	uint16_t validcount;
+	struct pixel_t snapshot_coord[128];
+	struct pixel_t preview_coord[128];
+	struct pixel_t video_coord[128];
+};
+
+struct msm_calib_raw {
+	uint8_t *data;
+	uint32_t size;
+};
+
+struct msm_camera_eeprom_info_t {
+	struct msm_eeprom_support af;
+	struct msm_eeprom_support wb;
+	struct msm_eeprom_support lsc;
+	struct msm_eeprom_support dpc;
+	struct msm_eeprom_support raw;
+};
+
+struct msm_eeprom_cfg_data {
+	int cfgtype;
+	uint8_t is_eeprom_supported;
+	union {
+		struct msm_eeprom_data_t get_data;
+		struct msm_camera_eeprom_info_t get_info;
+	} cfg;
+};
+
+struct sensor_large_data {
+	int cfgtype;
+	union {
+		struct sensor_3d_cali_data_t sensor_3d_cali_data;
+	} data;
+};
+
+enum sensor_type_t {
+	BAYER,
+	YUV,
+	JPEG_SOC,
+};
+
+enum flash_type {
+	LED_FLASH,
+	STROBE_FLASH,
+};
+
+enum strobe_flash_ctrl_type {
+	STROBE_FLASH_CTRL_INIT,
+	STROBE_FLASH_CTRL_CHARGE,
+	STROBE_FLASH_CTRL_RELEASE
+};
+
+struct strobe_flash_ctrl_data {
+	enum strobe_flash_ctrl_type type;
+	int charge_en;
+};
+
+struct msm_camera_info {
+	int num_cameras;
+	uint8_t has_3d_support[MSM_MAX_CAMERA_SENSORS];
+	uint8_t is_internal_cam[MSM_MAX_CAMERA_SENSORS];
+	uint32_t s_mount_angle[MSM_MAX_CAMERA_SENSORS];
+	const char *video_dev_name[MSM_MAX_CAMERA_SENSORS];
+	enum sensor_type_t sensor_type[MSM_MAX_CAMERA_SENSORS];
+};
+
+struct msm_cam_config_dev_info {
+	int num_config_nodes;
+	const char *config_dev_name[MSM_MAX_CAMERA_CONFIGS];
+	int config_dev_id[MSM_MAX_CAMERA_CONFIGS];
+};
+
+struct msm_mctl_node_info {
+	int num_mctl_nodes;
+	const char *mctl_node_name[MSM_MAX_CAMERA_SENSORS];
+};
+
+struct flash_ctrl_data {
+	int flashtype;
+	union {
+		int led_state;
+		struct strobe_flash_ctrl_data strobe_ctrl;
+	} ctrl_data;
+};
+
+#define GET_NAME			0
+#define GET_PREVIEW_LINE_PER_FRAME	1
+#define GET_PREVIEW_PIXELS_PER_LINE	2
+#define GET_SNAPSHOT_LINE_PER_FRAME	3
+#define GET_SNAPSHOT_PIXELS_PER_LINE	4
+#define GET_SNAPSHOT_FPS		5
+#define GET_SNAPSHOT_MAX_EP_LINE_CNT	6
+
+struct msm_camsensor_info {
+	char name[MAX_SENSOR_NAME];
+	uint8_t flash_enabled;
+	uint8_t strobe_flash_enabled;
+	uint8_t actuator_enabled;
+	uint8_t ispif_supported;
+	int8_t total_steps;
+	uint8_t support_3d;
+	enum flash_type flashtype;
+	enum sensor_type_t sensor_type;
+	uint32_t pxlcode; /* enum v4l2_mbus_pixelcode */
+	uint32_t camera_type; /* msm_camera_type */
+	int mount_angle;
+	uint32_t max_width;
+	uint32_t max_height;
+};
+
+#define V4L2_SINGLE_PLANE	0
+#define V4L2_MULTI_PLANE_Y	0
+#define V4L2_MULTI_PLANE_CBCR	1
+#define V4L2_MULTI_PLANE_CB	1
+#define V4L2_MULTI_PLANE_CR	2
+
+struct plane_data {
+	int plane_id;
+	uint32_t offset;
+	unsigned long size;
+};
+
+struct img_plane_info {
+	uint32_t width;
+	uint32_t height;
+	uint32_t pixelformat;
+	uint8_t buffer_type; /*Single/Multi planar*/
+	uint8_t output_port;
+	uint32_t ext_mode;
+	uint8_t num_planes;
+	struct plane_data plane[MAX_PLANES];
+	uint32_t sp_y_offset;
+	uint32_t inst_handle;
+};
+
+#define QCAMERA_NAME "qcamera"
+#define QCAMERA_SERVER_NAME "qcamera_server"
+#define QCAMERA_DEVICE_GROUP_ID 1
+#define QCAMERA_VNODE_GROUP_ID 2
+
+enum msm_cam_subdev_type {
+	CSIPHY_DEV,
+	CSID_DEV,
+	CSIC_DEV,
+	ISPIF_DEV,
+	VFE_DEV,
+	AXI_DEV,
+	VPE_DEV,
+	SENSOR_DEV,
+	ACTUATOR_DEV,
+	EEPROM_DEV,
+	GESTURE_DEV,
+	IRQ_ROUTER_DEV,
+	CPP_DEV,
+	CCI_DEV,
+	FLASH_DEV,
+};
+
+struct msm_mctl_set_sdev_data {
+	uint32_t revision;
+	enum msm_cam_subdev_type sdev_type;
+};
+
+#define MSM_CAM_V4L2_IOCTL_GET_CAMERA_INFO \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 1, struct msm_camera_v4l2_ioctl_t)
+
+#define MSM_CAM_V4L2_IOCTL_GET_CONFIG_INFO \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 2, struct msm_camera_v4l2_ioctl_t)
+
+#define MSM_CAM_V4L2_IOCTL_GET_MCTL_INFO \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 3, struct msm_camera_v4l2_ioctl_t)
+
+#define MSM_CAM_V4L2_IOCTL_CTRL_CMD_DONE \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 4, struct msm_camera_v4l2_ioctl_t)
+
+#define MSM_CAM_V4L2_IOCTL_GET_EVENT_PAYLOAD \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 5, struct msm_camera_v4l2_ioctl_t)
+
+#define MSM_CAM_IOCTL_SEND_EVENT \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 6, struct v4l2_event)
+
+#define MSM_CAM_V4L2_IOCTL_CFG_VPE \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 7, struct msm_vpe_cfg_cmd)
+
+#define MSM_CAM_V4L2_IOCTL_PRIVATE_S_CTRL \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 8, struct msm_camera_v4l2_ioctl_t)
+
+#define MSM_CAM_V4L2_IOCTL_PRIVATE_G_CTRL \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 9, struct msm_camera_v4l2_ioctl_t)
+
+#define MSM_CAM_V4L2_IOCTL_PRIVATE_GENERAL \
+	_IOW('V', BASE_VIDIOC_PRIVATE + 10, struct msm_camera_v4l2_ioctl_t)
+
+#define VIDIOC_MSM_VPE_INIT \
+	_IO('V', BASE_VIDIOC_PRIVATE + 15)
+
+#define VIDIOC_MSM_VPE_RELEASE \
+	_IO('V', BASE_VIDIOC_PRIVATE + 16)
+
+#define VIDIOC_MSM_VPE_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 17, struct msm_mctl_pp_params *)
+
+#define VIDIOC_MSM_AXI_INIT \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 18, uint8_t *)
+
+#define VIDIOC_MSM_AXI_RELEASE \
+	_IO('V', BASE_VIDIOC_PRIVATE + 19)
+
+#define VIDIOC_MSM_AXI_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 20, void *)
+
+#define VIDIOC_MSM_AXI_IRQ \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 21, void *)
+
+#define VIDIOC_MSM_AXI_BUF_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 22, void *)
+
+#define VIDIOC_MSM_AXI_RDI_COUNT_UPDATE \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 23, void *)
+
+#define VIDIOC_MSM_VFE_INIT \
+	_IO('V', BASE_VIDIOC_PRIVATE + 24)
+
+#define VIDIOC_MSM_VFE_RELEASE \
+	_IO('V', BASE_VIDIOC_PRIVATE + 25)
+
+struct msm_camera_v4l2_ioctl_t {
+	uint32_t id;
+	uint32_t len;
+	uint32_t trans_code;
+	void __user *ioctl_ptr;
+};
+
+struct msm_camera_vfe_params_t {
+	uint32_t operation_mode;
+	uint32_t capture_count;
+	uint8_t  skip_reset;
+	uint8_t  stop_immediately;
+	uint16_t port_info;
+	uint32_t inst_handle;
+	uint16_t cmd_type;
+};
+
+enum msm_camss_irq_idx {
+	CAMERA_SS_IRQ_0,
+	CAMERA_SS_IRQ_1,
+	CAMERA_SS_IRQ_2,
+	CAMERA_SS_IRQ_3,
+	CAMERA_SS_IRQ_4,
+	CAMERA_SS_IRQ_5,
+	CAMERA_SS_IRQ_6,
+	CAMERA_SS_IRQ_7,
+	CAMERA_SS_IRQ_8,
+	CAMERA_SS_IRQ_9,
+	CAMERA_SS_IRQ_10,
+	CAMERA_SS_IRQ_11,
+	CAMERA_SS_IRQ_12,
+	CAMERA_SS_IRQ_MAX
+};
+
+enum msm_cam_hw_idx {
+	MSM_CAM_HW_MICRO,
+	MSM_CAM_HW_CCI,
+	MSM_CAM_HW_CSI0,
+	MSM_CAM_HW_CSI1,
+	MSM_CAM_HW_CSI2,
+	MSM_CAM_HW_CSI3,
+	MSM_CAM_HW_ISPIF,
+	MSM_CAM_HW_CPP,
+	MSM_CAM_HW_VFE0,
+	MSM_CAM_HW_VFE1,
+	MSM_CAM_HW_JPEG0,
+	MSM_CAM_HW_JPEG1,
+	MSM_CAM_HW_JPEG2,
+	MSM_CAM_HW_MAX
+};
+
+struct msm_camera_irq_cfg {
+	/* Bit mask of all the camera hardwares that needs to
+	 * be composited into a single IRQ to the MSM.
+	 * Current usage: (may be updated based on hw changes)
+	 * Bits 31:13 - Reserved.
+	 * Bits 12:0
+	 * 12 - MSM_CAM_HW_JPEG2
+	 * 11 - MSM_CAM_HW_JPEG1
+	 * 10 - MSM_CAM_HW_JPEG0
+	 *  9 - MSM_CAM_HW_VFE1
+	 *  8 - MSM_CAM_HW_VFE0
+	 *  7 - MSM_CAM_HW_CPP
+	 *  6 - MSM_CAM_HW_ISPIF
+	 *  5 - MSM_CAM_HW_CSI3
+	 *  4 - MSM_CAM_HW_CSI2
+	 *  3 - MSM_CAM_HW_CSI1
+	 *  2 - MSM_CAM_HW_CSI0
+	 *  1 - MSM_CAM_HW_CCI
+	 *  0 - MSM_CAM_HW_MICRO
+	 */
+	uint32_t cam_hw_mask;
+	uint8_t  irq_idx;
+	uint8_t  num_hwcore;
+};
+
+#define MSM_IRQROUTER_CFG_COMPIRQ \
+	_IOWR('V', BASE_VIDIOC_PRIVATE, void __user *)
+
+#define MAX_NUM_CPP_STRIPS 8
+
+enum msm_cpp_frame_type {
+	MSM_CPP_OFFLINE_FRAME,
+	MSM_CPP_REALTIME_FRAME,
+};
+
+struct msm_cpp_frame_info_t {
+	int32_t frame_id;
+	uint32_t inst_id;
+	uint32_t client_id;
+	enum msm_cpp_frame_type frame_type;
+	uint32_t num_strips;
+};
+
+struct msm_ver_num_info {
+	uint32_t main;
+	uint32_t minor;
+	uint32_t rev;
+};
+
+#define VIDIOC_MSM_CPP_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE, struct msm_camera_v4l2_ioctl_t)
+
+#define VIDIOC_MSM_CPP_GET_EVENTPAYLOAD \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 1, struct msm_camera_v4l2_ioctl_t)
+
+#define VIDIOC_MSM_CPP_GET_INST_INFO \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 2, struct msm_camera_v4l2_ioctl_t)
+
+#define V4L2_EVENT_CPP_FRAME_DONE  (V4L2_EVENT_PRIVATE_START + 0)
+
+/* Instance Handle - inst_handle
+ * Data bundle containing the information about where
+ * to get a buffer for a particular camera instance.
+ * This is a bitmask containing the following data:
+ * Buffer Handle Bitmask:
+ *      ------------------------------------
+ *      Bits    :  Purpose
+ *      ------------------------------------
+ *      31      :  is Dev ID valid?
+ *      30 - 24 :  Dev ID.
+ *      23      :  is Image mode valid?
+ *      22 - 16 :  Image mode.
+ *      15      :  is MCTL PP inst idx valid?
+ *      14 - 8  :  MCTL PP inst idx.
+ *      7       :  is Video inst idx valid?
+ *      6 - 0   :  Video inst idx.
+ */
+#define CLR_DEVID_MODE(handle)	(handle &= 0x00FFFFFF)
+#define SET_DEVID_MODE(handle, data)	\
+	(handle |= ((0x1 << 31) | ((data & 0x7F) << 24)))
+#define GET_DEVID_MODE(handle)	\
+	((handle & 0x80000000) ? ((handle & 0x7F000000) >> 24) : 0xFF)
+
+#define CLR_IMG_MODE(handle)	(handle &= 0xFF00FFFF)
+#define SET_IMG_MODE(handle, data)	\
+	(handle |= ((0x1 << 23) | ((data & 0x7F) << 16)))
+#define GET_IMG_MODE(handle)	\
+	((handle & 0x800000) ? ((handle & 0x7F0000) >> 16) : 0xFF)
+
+#define CLR_MCTLPP_INST_IDX(handle)	(handle &= 0xFFFF00FF)
+#define SET_MCTLPP_INST_IDX(handle, data)	\
+	(handle |= ((0x1 << 15) | ((data & 0x7F) << 8)))
+#define GET_MCTLPP_INST_IDX(handle)	\
+	((handle & 0x8000) ? ((handle & 0x7F00) >> 8) : 0xFF)
+
+#define CLR_VIDEO_INST_IDX(handle)	(handle &= 0xFFFFFF00)
+#define GET_VIDEO_INST_IDX(handle)	\
+	((handle & 0x80) ? (handle & 0x7F) : 0xFF)
+#define SET_VIDEO_INST_IDX(handle, data)	\
+	(handle |= (0x1 << 7) | (data & 0x7F))
+#endif
diff -Nruw linux-4.4.115-fbx/include/uapi/media./msm_cam_sensor.h linux-4.4.115-fbx/include/uapi/media/msm_cam_sensor.h
--- linux-4.4.115-fbx/include/uapi/media./msm_cam_sensor.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/msm_cam_sensor.h	2019-01-22 16:16:28.599292554 +0100
@@ -0,0 +1,637 @@
+#ifndef __UAPI_LINUX_MSM_CAM_SENSOR_H
+#define __UAPI_LINUX_MSM_CAM_SENSOR_H
+
+#include <linux/v4l2-mediabus.h>
+#include <media/msm_camsensor_sdk.h>
+
+#include <linux/types.h>
+#include <linux/i2c.h>
+
+#define I2C_SEQ_REG_SETTING_MAX   5
+
+#define MSM_SENSOR_MCLK_8HZ   8000000
+#define MSM_SENSOR_MCLK_16HZ  16000000
+#define MSM_SENSOR_MCLK_24HZ  24000000
+
+#define MAX_SENSOR_NAME 32
+#define MAX_ACTUATOR_AF_TOTAL_STEPS 1024
+
+#define MAX_OIS_MOD_NAME_SIZE 32
+#define MAX_OIS_NAME_SIZE 32
+#define MAX_OIS_REG_SETTINGS 800
+
+#define MOVE_NEAR 0
+#define MOVE_FAR  1
+
+#define MSM_ACTUATOR_MOVE_SIGNED_FAR -1
+#define MSM_ACTUATOR_MOVE_SIGNED_NEAR  1
+
+#define MAX_ACTUATOR_REGION  5
+
+#define MAX_EEPROM_NAME 32
+
+#define MAX_AF_ITERATIONS 3
+#define MAX_NUMBER_OF_STEPS 47
+#define MAX_REGULATOR 5
+
+/*msm_flash_query_data_t query types*/
+#define FLASH_QUERY_CURRENT 1
+
+#define MSM_V4L2_PIX_FMT_META v4l2_fourcc('M', 'E', 'T', 'A') /* META */
+#define MSM_V4L2_PIX_FMT_META10 v4l2_fourcc('M', 'E', '1', '0') /* META10 */
+#define MSM_V4L2_PIX_FMT_META12 v4l2_fourcc('M', 'E', '1', '2') /* META12 */
+
+#define MSM_V4L2_PIX_FMT_SBGGR14 v4l2_fourcc('B', 'G', '1', '4')
+	/* 14  BGBG.. GRGR.. */
+#define MSM_V4L2_PIX_FMT_SGBRG14 v4l2_fourcc('G', 'B', '1', '4')
+	/* 14  GBGB.. RGRG.. */
+#define MSM_V4L2_PIX_FMT_SGRBG14 v4l2_fourcc('B', 'A', '1', '4')
+	/* 14  GRGR.. BGBG.. */
+#define MSM_V4L2_PIX_FMT_SRGGB14 v4l2_fourcc('R', 'G', '1', '4')
+	/* 14  RGRG.. GBGB.. */
+
+enum flash_type {
+	LED_FLASH = 1,
+	STROBE_FLASH,
+	GPIO_FLASH
+};
+
+enum msm_sensor_resolution_t {
+	MSM_SENSOR_RES_FULL,
+	MSM_SENSOR_RES_QTR,
+	MSM_SENSOR_RES_2,
+	MSM_SENSOR_RES_3,
+	MSM_SENSOR_RES_4,
+	MSM_SENSOR_RES_5,
+	MSM_SENSOR_RES_6,
+	MSM_SENSOR_RES_7,
+	MSM_SENSOR_INVALID_RES,
+};
+
+enum msm_camera_stream_type_t {
+	MSM_CAMERA_STREAM_PREVIEW,
+	MSM_CAMERA_STREAM_SNAPSHOT,
+	MSM_CAMERA_STREAM_VIDEO,
+	MSM_CAMERA_STREAM_INVALID,
+};
+
+enum sensor_sub_module_t {
+	SUB_MODULE_SENSOR,
+	SUB_MODULE_CHROMATIX,
+	SUB_MODULE_ACTUATOR,
+	SUB_MODULE_EEPROM,
+	SUB_MODULE_LED_FLASH,
+	SUB_MODULE_STROBE_FLASH,
+	SUB_MODULE_CSID,
+	SUB_MODULE_CSID_3D,
+	SUB_MODULE_CSIPHY,
+	SUB_MODULE_CSIPHY_3D,
+	SUB_MODULE_OIS,
+	SUB_MODULE_EXT,
+	SUB_MODULE_IR_LED,
+	SUB_MODULE_IR_CUT,
+	SUB_MODULE_LASER_LED,
+	SUB_MODULE_MAX,
+};
+
+enum {
+	MSM_CAMERA_EFFECT_MODE_OFF,
+	MSM_CAMERA_EFFECT_MODE_MONO,
+	MSM_CAMERA_EFFECT_MODE_NEGATIVE,
+	MSM_CAMERA_EFFECT_MODE_SOLARIZE,
+	MSM_CAMERA_EFFECT_MODE_SEPIA,
+	MSM_CAMERA_EFFECT_MODE_POSTERIZE,
+	MSM_CAMERA_EFFECT_MODE_WHITEBOARD,
+	MSM_CAMERA_EFFECT_MODE_BLACKBOARD,
+	MSM_CAMERA_EFFECT_MODE_AQUA,
+	MSM_CAMERA_EFFECT_MODE_EMBOSS,
+	MSM_CAMERA_EFFECT_MODE_SKETCH,
+	MSM_CAMERA_EFFECT_MODE_NEON,
+	MSM_CAMERA_EFFECT_MODE_MAX
+};
+
+enum {
+	MSM_CAMERA_WB_MODE_AUTO,
+	MSM_CAMERA_WB_MODE_CUSTOM,
+	MSM_CAMERA_WB_MODE_INCANDESCENT,
+	MSM_CAMERA_WB_MODE_FLUORESCENT,
+	MSM_CAMERA_WB_MODE_WARM_FLUORESCENT,
+	MSM_CAMERA_WB_MODE_DAYLIGHT,
+	MSM_CAMERA_WB_MODE_CLOUDY_DAYLIGHT,
+	MSM_CAMERA_WB_MODE_TWILIGHT,
+	MSM_CAMERA_WB_MODE_SHADE,
+	MSM_CAMERA_WB_MODE_OFF,
+	MSM_CAMERA_WB_MODE_MAX
+};
+
+enum {
+	MSM_CAMERA_SCENE_MODE_OFF,
+	MSM_CAMERA_SCENE_MODE_AUTO,
+	MSM_CAMERA_SCENE_MODE_LANDSCAPE,
+	MSM_CAMERA_SCENE_MODE_SNOW,
+	MSM_CAMERA_SCENE_MODE_BEACH,
+	MSM_CAMERA_SCENE_MODE_SUNSET,
+	MSM_CAMERA_SCENE_MODE_NIGHT,
+	MSM_CAMERA_SCENE_MODE_PORTRAIT,
+	MSM_CAMERA_SCENE_MODE_BACKLIGHT,
+	MSM_CAMERA_SCENE_MODE_SPORTS,
+	MSM_CAMERA_SCENE_MODE_ANTISHAKE,
+	MSM_CAMERA_SCENE_MODE_FLOWERS,
+	MSM_CAMERA_SCENE_MODE_CANDLELIGHT,
+	MSM_CAMERA_SCENE_MODE_FIREWORKS,
+	MSM_CAMERA_SCENE_MODE_PARTY,
+	MSM_CAMERA_SCENE_MODE_NIGHT_PORTRAIT,
+	MSM_CAMERA_SCENE_MODE_THEATRE,
+	MSM_CAMERA_SCENE_MODE_ACTION,
+	MSM_CAMERA_SCENE_MODE_AR,
+	MSM_CAMERA_SCENE_MODE_FACE_PRIORITY,
+	MSM_CAMERA_SCENE_MODE_BARCODE,
+	MSM_CAMERA_SCENE_MODE_HDR,
+	MSM_CAMERA_SCENE_MODE_MAX
+};
+
+enum csid_cfg_type_t {
+	CSID_INIT,
+	CSID_CFG,
+	CSID_TESTMODE_CFG,
+	CSID_RELEASE,
+};
+
+enum csiphy_cfg_type_t {
+	CSIPHY_INIT,
+	CSIPHY_CFG,
+	CSIPHY_RELEASE,
+};
+
+enum camera_vreg_type {
+	VREG_TYPE_DEFAULT,
+	VREG_TYPE_CUSTOM,
+};
+
+enum sensor_af_t {
+	SENSOR_AF_FOCUSSED,
+	SENSOR_AF_NOT_FOCUSSED,
+};
+
+enum cci_i2c_master_t {
+	MASTER_0,
+	MASTER_1,
+	MASTER_MAX,
+};
+
+struct msm_camera_i2c_array_write_config {
+	struct msm_camera_i2c_reg_setting conf_array;
+	uint16_t slave_addr;
+};
+
+struct msm_camera_i2c_read_config {
+	uint16_t slave_addr;
+	uint16_t reg_addr;
+	enum msm_camera_i2c_reg_addr_type addr_type;
+	enum msm_camera_i2c_data_type data_type;
+	uint16_t data;
+};
+
+struct msm_camera_csi2_params {
+	struct msm_camera_csid_params csid_params;
+	struct msm_camera_csiphy_params csiphy_params;
+	uint8_t csi_clk_scale_enable;
+};
+
+struct msm_camera_csi_lane_params {
+	uint16_t csi_lane_assign;
+	uint16_t csi_lane_mask;
+};
+
+struct csi_lane_params_t {
+	uint16_t csi_lane_assign;
+	uint8_t csi_lane_mask;
+	uint8_t csi_if;
+	int8_t csid_core[2];
+	uint8_t csi_phy_sel;
+};
+
+struct msm_sensor_info_t {
+	char     sensor_name[MAX_SENSOR_NAME];
+	uint32_t session_id;
+	int32_t  subdev_id[SUB_MODULE_MAX];
+	int32_t  subdev_intf[SUB_MODULE_MAX];
+	uint8_t  is_mount_angle_valid;
+	uint32_t sensor_mount_angle;
+	int modes_supported;
+	enum camb_position_t position;
+};
+
+struct camera_vreg_t {
+	const char *reg_name;
+	int min_voltage;
+	int max_voltage;
+	int op_mode;
+	uint32_t delay;
+	const char *custom_vreg_name;
+	enum camera_vreg_type type;
+};
+
+struct sensorb_cfg_data {
+	int cfgtype;
+	union {
+		struct msm_sensor_info_t      sensor_info;
+		struct msm_sensor_init_params sensor_init_params;
+		void                         *setting;
+		struct msm_sensor_i2c_sync_params sensor_i2c_sync_params;
+	} cfg;
+};
+
+struct csid_cfg_data {
+	enum csid_cfg_type_t cfgtype;
+	union {
+		uint32_t csid_version;
+		struct msm_camera_csid_params *csid_params;
+		struct msm_camera_csid_testmode_parms *csid_testmode_params;
+	} cfg;
+};
+
+struct csiphy_cfg_data {
+	enum csiphy_cfg_type_t cfgtype;
+	union {
+		struct msm_camera_csiphy_params *csiphy_params;
+		struct msm_camera_csi_lane_params *csi_lane_params;
+	} cfg;
+};
+
+enum eeprom_cfg_type_t {
+	CFG_EEPROM_GET_INFO,
+	CFG_EEPROM_GET_CAL_DATA,
+	CFG_EEPROM_READ_CAL_DATA,
+	CFG_EEPROM_WRITE_DATA,
+	CFG_EEPROM_GET_MM_INFO,
+	CFG_EEPROM_INIT,
+};
+
+struct eeprom_get_t {
+	uint32_t num_bytes;
+};
+
+struct eeprom_read_t {
+	uint8_t *dbuffer;
+	uint32_t num_bytes;
+};
+
+struct eeprom_write_t {
+	uint8_t *dbuffer;
+	uint32_t num_bytes;
+};
+
+struct eeprom_get_cmm_t {
+	uint32_t cmm_support;
+	uint32_t cmm_compression;
+	uint32_t cmm_size;
+};
+
+struct msm_eeprom_info_t {
+	struct msm_sensor_power_setting_array *power_setting_array;
+	enum i2c_freq_mode_t i2c_freq_mode;
+	struct msm_eeprom_memory_map_array *mem_map_array;
+};
+
+struct msm_ir_led_cfg_data_t {
+	enum msm_ir_led_cfg_type_t cfg_type;
+	int32_t pwm_duty_on_ns;
+	int32_t pwm_period_ns;
+};
+
+struct msm_ir_cut_cfg_data_t {
+	enum msm_ir_cut_cfg_type_t cfg_type;
+};
+
+struct msm_laser_led_cfg_data_t {
+	enum msm_laser_led_cfg_type_t cfg_type;
+	void __user                   *setting;
+	void __user                   *debug_reg;
+	uint32_t                      debug_reg_size;
+	uint16_t                      i2c_addr;
+	enum i2c_freq_mode_t          i2c_freq_mode;
+};
+
+struct msm_eeprom_cfg_data {
+	enum eeprom_cfg_type_t cfgtype;
+	uint8_t is_supported;
+	union {
+		char eeprom_name[MAX_EEPROM_NAME];
+		struct eeprom_get_t get_data;
+		struct eeprom_read_t read_data;
+		struct eeprom_write_t write_data;
+		struct eeprom_get_cmm_t get_cmm_data;
+		struct msm_eeprom_info_t eeprom_info;
+	} cfg;
+};
+
+enum msm_sensor_cfg_type_t {
+	CFG_SET_SLAVE_INFO,
+	CFG_SLAVE_READ_I2C,
+	CFG_WRITE_I2C_ARRAY,
+	CFG_SLAVE_WRITE_I2C_ARRAY,
+	CFG_WRITE_I2C_SEQ_ARRAY,
+	CFG_POWER_UP,
+	CFG_POWER_DOWN,
+	CFG_SET_STOP_STREAM_SETTING,
+	CFG_GET_SENSOR_INFO,
+	CFG_GET_SENSOR_INIT_PARAMS,
+	CFG_SET_INIT_SETTING,
+	CFG_SET_RESOLUTION,
+	CFG_SET_STOP_STREAM,
+	CFG_SET_START_STREAM,
+	CFG_SET_SATURATION,
+	CFG_SET_CONTRAST,
+	CFG_SET_SHARPNESS,
+	CFG_SET_ISO,
+	CFG_SET_EXPOSURE_COMPENSATION,
+	CFG_SET_ANTIBANDING,
+	CFG_SET_BESTSHOT_MODE,
+	CFG_SET_EFFECT,
+	CFG_SET_WHITE_BALANCE,
+	CFG_SET_AUTOFOCUS,
+	CFG_CANCEL_AUTOFOCUS,
+	CFG_SET_STREAM_TYPE,
+	CFG_SET_I2C_SYNC_PARAM,
+	CFG_WRITE_I2C_ARRAY_ASYNC,
+	CFG_WRITE_I2C_ARRAY_SYNC,
+	CFG_WRITE_I2C_ARRAY_SYNC_BLOCK,
+};
+
+enum msm_actuator_cfg_type_t {
+	CFG_GET_ACTUATOR_INFO,
+	CFG_SET_ACTUATOR_INFO,
+	CFG_SET_DEFAULT_FOCUS,
+	CFG_MOVE_FOCUS,
+	CFG_SET_POSITION,
+	CFG_ACTUATOR_POWERDOWN,
+	CFG_ACTUATOR_POWERUP,
+	CFG_ACTUATOR_INIT,
+};
+
+struct msm_ois_opcode {
+	uint32_t prog;
+	uint32_t coeff;
+	uint32_t pheripheral;
+	uint32_t memory;
+};
+
+enum msm_ois_cfg_type_t {
+	CFG_OIS_INIT,
+	CFG_OIS_POWERDOWN,
+	CFG_OIS_POWERUP,
+	CFG_OIS_CONTROL,
+	CFG_OIS_I2C_WRITE_SEQ_TABLE,
+};
+
+enum msm_ois_cfg_download_type_t {
+	CFG_OIS_DOWNLOAD,
+	CFG_OIS_DATA_CONFIG,
+};
+
+enum msm_ois_i2c_operation {
+	MSM_OIS_WRITE = 0,
+	MSM_OIS_POLL,
+	MSM_OIS_READ,
+};
+#define MSM_OIS_READ MSM_OIS_READ
+
+struct reg_settings_ois_t {
+	uint16_t reg_addr;
+	enum msm_camera_i2c_reg_addr_type addr_type;
+	uint32_t reg_data;
+	enum msm_camera_i2c_data_type data_type;
+	enum msm_ois_i2c_operation i2c_operation;
+	uint32_t delay;
+};
+
+struct msm_ois_params_t {
+	uint16_t data_size;
+	uint16_t setting_size;
+	uint32_t i2c_addr;
+	enum i2c_freq_mode_t i2c_freq_mode;
+	enum msm_camera_i2c_reg_addr_type i2c_addr_type;
+	enum msm_camera_i2c_data_type i2c_data_type;
+	struct reg_settings_ois_t *settings;
+};
+
+struct msm_ois_set_info_t {
+	struct msm_ois_params_t ois_params;
+};
+
+struct msm_actuator_move_params_t {
+	int8_t dir;
+	int8_t sign_dir;
+	int16_t dest_step_pos;
+	int32_t num_steps;
+	uint16_t curr_lens_pos;
+	struct damping_params_t *ringing_params;
+};
+
+struct msm_actuator_tuning_params_t {
+	int16_t initial_code;
+	uint16_t pwd_step;
+	uint16_t region_size;
+	uint32_t total_steps;
+	struct region_params_t *region_params;
+};
+
+struct park_lens_data_t {
+	uint32_t damping_step;
+	uint32_t damping_delay;
+	uint32_t hw_params;
+	uint32_t max_step;
+};
+
+struct msm_actuator_params_t {
+	enum actuator_type act_type;
+	uint8_t reg_tbl_size;
+	uint16_t data_size;
+	uint16_t init_setting_size;
+	uint32_t i2c_addr;
+	enum i2c_freq_mode_t i2c_freq_mode;
+	enum msm_camera_i2c_reg_addr_type i2c_addr_type;
+	enum msm_camera_i2c_data_type i2c_data_type;
+	struct msm_actuator_reg_params_t *reg_tbl_params;
+	struct reg_settings_t *init_settings;
+	struct park_lens_data_t park_lens;
+};
+
+struct msm_actuator_set_info_t {
+	struct msm_actuator_params_t actuator_params;
+	struct msm_actuator_tuning_params_t af_tuning_params;
+};
+
+struct msm_actuator_get_info_t {
+	uint32_t focal_length_num;
+	uint32_t focal_length_den;
+	uint32_t f_number_num;
+	uint32_t f_number_den;
+	uint32_t f_pix_num;
+	uint32_t f_pix_den;
+	uint32_t total_f_dist_num;
+	uint32_t total_f_dist_den;
+	uint32_t hor_view_angle_num;
+	uint32_t hor_view_angle_den;
+	uint32_t ver_view_angle_num;
+	uint32_t ver_view_angle_den;
+};
+
+enum af_camera_name {
+	ACTUATOR_MAIN_CAM_0,
+	ACTUATOR_MAIN_CAM_1,
+	ACTUATOR_MAIN_CAM_2,
+	ACTUATOR_MAIN_CAM_3,
+	ACTUATOR_MAIN_CAM_4,
+	ACTUATOR_MAIN_CAM_5,
+	ACTUATOR_WEB_CAM_0,
+	ACTUATOR_WEB_CAM_1,
+	ACTUATOR_WEB_CAM_2,
+};
+
+struct msm_ois_slave_info {
+	char ois_name[MAX_OIS_NAME_SIZE];
+	uint32_t i2c_addr;
+	struct msm_ois_opcode opcode;
+};
+struct msm_ois_cfg_data {
+	int cfgtype;
+	union {
+		struct msm_ois_set_info_t set_info;
+		struct msm_camera_i2c_seq_reg_setting *settings;
+	} cfg;
+};
+
+struct msm_ois_cfg_download_data {
+	int cfgtype;
+	struct msm_ois_slave_info slave_info;
+};
+
+struct msm_actuator_set_position_t {
+	uint16_t number_of_steps;
+	uint32_t hw_params;
+	uint16_t pos[MAX_NUMBER_OF_STEPS];
+	uint16_t delay[MAX_NUMBER_OF_STEPS];
+};
+
+struct msm_actuator_cfg_data {
+	int cfgtype;
+	uint8_t is_af_supported;
+	union {
+		struct msm_actuator_move_params_t move;
+		struct msm_actuator_set_info_t set_info;
+		struct msm_actuator_get_info_t get_info;
+		struct msm_actuator_set_position_t setpos;
+		enum af_camera_name cam_name;
+	} cfg;
+};
+
+enum msm_camera_led_config_t {
+	MSM_CAMERA_LED_OFF,
+	MSM_CAMERA_LED_LOW,
+	MSM_CAMERA_LED_HIGH,
+	MSM_CAMERA_LED_INIT,
+	MSM_CAMERA_LED_RELEASE,
+};
+
+struct msm_camera_led_cfg_t {
+	enum msm_camera_led_config_t cfgtype;
+	int32_t torch_current[MAX_LED_TRIGGERS];
+	int32_t flash_current[MAX_LED_TRIGGERS];
+	int32_t flash_duration[MAX_LED_TRIGGERS];
+};
+
+struct msm_flash_init_info_t {
+	enum msm_flash_driver_type flash_driver_type;
+	uint32_t slave_addr;
+	enum i2c_freq_mode_t i2c_freq_mode;
+	struct msm_sensor_power_setting_array *power_setting_array;
+	struct msm_camera_i2c_reg_setting_array *settings;
+};
+
+struct msm_flash_cfg_data_t {
+	enum msm_flash_cfg_type_t cfg_type;
+	int32_t flash_current[MAX_LED_TRIGGERS];
+	int32_t flash_duration[MAX_LED_TRIGGERS];
+	union {
+		struct msm_flash_init_info_t *flash_init_info;
+		struct msm_camera_i2c_reg_setting_array *settings;
+	} cfg;
+};
+
+struct msm_flash_query_data_t {
+	int32_t flags;
+	int32_t query_type;
+	int32_t max_avail_curr;
+};
+
+/* sensor init structures and enums */
+enum msm_sensor_init_cfg_type_t {
+	CFG_SINIT_PROBE,
+	CFG_SINIT_PROBE_DONE,
+	CFG_SINIT_PROBE_WAIT_DONE,
+};
+
+struct sensor_init_cfg_data {
+	enum msm_sensor_init_cfg_type_t cfgtype;
+	struct msm_sensor_info_t        probed_info;
+	char                            entity_name[MAX_SENSOR_NAME];
+	union {
+		void *setting;
+	} cfg;
+};
+
+#define VIDIOC_MSM_SENSOR_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 1, struct sensorb_cfg_data)
+
+#define VIDIOC_MSM_SENSOR_RELEASE \
+	_IO('V', BASE_VIDIOC_PRIVATE + 2)
+
+#define VIDIOC_MSM_SENSOR_GET_SUBDEV_ID \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 3, uint32_t)
+
+#define VIDIOC_MSM_CSIPHY_IO_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 4, struct csiphy_cfg_data)
+
+#define VIDIOC_MSM_CSID_IO_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 5, struct csid_cfg_data)
+
+#define VIDIOC_MSM_ACTUATOR_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 6, struct msm_actuator_cfg_data)
+
+#define VIDIOC_MSM_FLASH_LED_DATA_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 7, struct msm_camera_led_cfg_t)
+
+#define VIDIOC_MSM_EEPROM_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 8, struct msm_eeprom_cfg_data)
+
+#define VIDIOC_MSM_SENSOR_GET_AF_STATUS \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 9, uint32_t)
+
+#define VIDIOC_MSM_SENSOR_INIT_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 10, struct sensor_init_cfg_data)
+
+#define VIDIOC_MSM_OIS_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 11, struct msm_ois_cfg_data)
+
+#define VIDIOC_MSM_FLASH_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 13, struct msm_flash_cfg_data_t)
+
+#define VIDIOC_MSM_OIS_CFG_DOWNLOAD \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 14, struct msm_ois_cfg_download_data)
+
+#define VIDIOC_MSM_FLASH_QUERY_DATA \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 15, struct msm_flash_query_data_t)
+
+#define VIDIOC_MSM_IR_LED_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 15, struct msm_ir_led_cfg_data_t)
+
+#define VIDIOC_MSM_IR_CUT_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 15, struct msm_ir_cut_cfg_data_t)
+
+#define VIDIOC_MSM_LASER_LED_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 16, struct msm_laser_led_cfg_data_t)
+
+#endif
+
diff -Nruw linux-4.4.115-fbx/include/uapi/media./msm_camsensor_sdk.h linux-4.4.115-fbx/include/uapi/media/msm_camsensor_sdk.h
--- linux-4.4.115-fbx/include/uapi/media./msm_camsensor_sdk.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/msm_camsensor_sdk.h	2019-01-22 16:16:28.599292554 +0100
@@ -0,0 +1,435 @@
+#ifndef __UAPI_LINUX_MSM_CAMSENSOR_SDK_H
+#define __UAPI_LINUX_MSM_CAMSENSOR_SDK_H
+
+#include <linux/videodev2.h>
+
+#define KVERSION 0x1
+
+#define MAX_POWER_CONFIG      12
+#define GPIO_OUT_LOW          (0 << 1)
+#define GPIO_OUT_HIGH         (1 << 1)
+#define CSI_EMBED_DATA        0x12
+#define CSI_RESERVED_DATA_0   0x13
+#define CSI_YUV422_8          0x1E
+#define CSI_RAW8              0x2A
+#define CSI_RAW10             0x2B
+#define CSI_RAW12             0x2C
+#define CSI_DECODE_6BIT         0
+#define CSI_DECODE_8BIT         1
+#define CSI_DECODE_10BIT        2
+#define CSI_DECODE_12BIT        3
+#define CSI_DECODE_DPCM_10_6_10 4
+#define CSI_DECODE_DPCM_10_8_10 5
+#define MAX_CID                 16
+#define I2C_SEQ_REG_DATA_MAX    1024
+#define I2C_REG_DATA_MAX       (8*1024)
+
+#define MSM_V4L2_PIX_FMT_META v4l2_fourcc('M', 'E', 'T', 'A') /* META */
+#define MSM_V4L2_PIX_FMT_SBGGR14 v4l2_fourcc('B', 'G', '1', '4')
+	/* 14  BGBG.. GRGR.. */
+#define MSM_V4L2_PIX_FMT_SGBRG14 v4l2_fourcc('G', 'B', '1', '4')
+	/* 14  GBGB.. RGRG.. */
+#define MSM_V4L2_PIX_FMT_SGRBG14 v4l2_fourcc('B', 'A', '1', '4')
+	/* 14  GRGR.. BGBG.. */
+#define MSM_V4L2_PIX_FMT_SRGGB14 v4l2_fourcc('R', 'G', '1', '4')
+	/* 14  RGRG.. GBGB.. */
+
+#define MAX_ACTUATOR_REG_TBL_SIZE 8
+#define MAX_ACTUATOR_REGION       5
+#define NUM_ACTUATOR_DIR          2
+#define MAX_ACTUATOR_SCENARIO     8
+#define MAX_ACT_MOD_NAME_SIZE     32
+#define MAX_ACT_NAME_SIZE         32
+#define MAX_ACTUATOR_INIT_SET     120
+#define MAX_I2C_REG_SET           12
+
+#define MAX_LED_TRIGGERS          3
+
+#define MSM_EEPROM_MEMORY_MAP_MAX_SIZE  80
+#define MSM_EEPROM_MAX_MEM_MAP_CNT      8
+
+#define MSM_SENSOR_BYPASS_VIDEO_NODE    1
+
+#define FRONT_AUX_SENSOR_SUPPORT
+
+enum msm_sensor_camera_id_t {
+	CAMERA_0,
+	CAMERA_1,
+	CAMERA_2,
+	CAMERA_3,
+	MAX_CAMERAS,
+};
+
+enum i2c_freq_mode_t {
+	I2C_STANDARD_MODE,
+	I2C_FAST_MODE,
+	I2C_CUSTOM_MODE,
+	I2C_FAST_PLUS_MODE,
+	I2C_MAX_MODES,
+};
+
+enum camb_position_t {
+	BACK_CAMERA_B,
+	FRONT_CAMERA_B,
+	AUX_CAMERA_B = 0x100,
+	FRONT_AUX_CAMERA_B,
+	INVALID_CAMERA_B,
+};
+
+enum msm_sensor_power_seq_type_t {
+	SENSOR_CLK,
+	SENSOR_GPIO,
+	SENSOR_VREG,
+	SENSOR_I2C_MUX,
+	SENSOR_I2C,
+};
+
+enum msm_camera_i2c_reg_addr_type {
+	MSM_CAMERA_I2C_BYTE_ADDR = 1,
+	MSM_CAMERA_I2C_WORD_ADDR,
+	MSM_CAMERA_I2C_3B_ADDR,
+	MSM_CAMERA_I2C_DWORD_ADDR,
+	MSM_CAMERA_I2C_ADDR_TYPE_MAX,
+};
+#define MSM_CAMERA_I2C_DWORD_ADDR MSM_CAMERA_I2C_DWORD_ADDR
+
+enum msm_camera_i2c_data_type {
+	MSM_CAMERA_I2C_BYTE_DATA = 1,
+	MSM_CAMERA_I2C_WORD_DATA,
+	MSM_CAMERA_I2C_DWORD_DATA,
+	MSM_CAMERA_I2C_SET_BYTE_MASK,
+	MSM_CAMERA_I2C_UNSET_BYTE_MASK,
+	MSM_CAMERA_I2C_SET_WORD_MASK,
+	MSM_CAMERA_I2C_UNSET_WORD_MASK,
+	MSM_CAMERA_I2C_SET_BYTE_WRITE_MASK_DATA,
+	MSM_CAMERA_I2C_DATA_TYPE_MAX,
+};
+
+enum msm_sensor_power_seq_gpio_t {
+	SENSOR_GPIO_RESET,
+	SENSOR_GPIO_STANDBY,
+	SENSOR_GPIO_AF_PWDM,
+	SENSOR_GPIO_VIO,
+	SENSOR_GPIO_VANA,
+	SENSOR_GPIO_VDIG,
+	SENSOR_GPIO_VAF,
+	SENSOR_GPIO_FL_EN,
+	SENSOR_GPIO_FL_NOW,
+	SENSOR_GPIO_FL_RESET,
+	SENSOR_GPIO_CUSTOM1,
+	SENSOR_GPIO_CUSTOM2,
+	SENSOR_GPIO_CUSTOM3,
+	SENSOR_GPIO_MAX,
+};
+#define SENSOR_GPIO_CUSTOM3 SENSOR_GPIO_CUSTOM3
+
+enum msm_ir_cut_filter_gpio_t {
+	IR_CUT_FILTER_GPIO_P = 0,
+	IR_CUT_FILTER_GPIO_M,
+	IR_CUT_FILTER_GPIO_MAX,
+};
+#define IR_CUT_FILTER_GPIO_P IR_CUT_FILTER_GPIO_P
+#define IR_CUT_FILTER_GPIO_M IR_CUT_FILTER_GPIO_M
+#define R_CUT_FILTER_GPIO_MAX IR_CUT_FILTER_GPIO_MAX
+
+enum msm_camera_vreg_name_t {
+	CAM_VDIG,
+	CAM_VIO,
+	CAM_VANA,
+	CAM_VAF,
+	CAM_V_CUSTOM1,
+	CAM_V_CUSTOM2,
+	CAM_VREG_MAX,
+};
+
+enum msm_sensor_clk_type_t {
+	SENSOR_CAM_MCLK,
+	SENSOR_CAM_CLK,
+	SENSOR_CAM_CLK_MAX,
+};
+
+enum camerab_mode_t {
+	CAMERA_MODE_2D_B = (1<<0),
+	CAMERA_MODE_3D_B = (1<<1),
+	CAMERA_MODE_INVALID = (1<<2),
+};
+
+enum msm_actuator_data_type {
+	MSM_ACTUATOR_BYTE_DATA = 1,
+	MSM_ACTUATOR_WORD_DATA,
+};
+
+enum msm_actuator_addr_type {
+	MSM_ACTUATOR_BYTE_ADDR = 1,
+	MSM_ACTUATOR_WORD_ADDR,
+};
+
+enum msm_actuator_write_type {
+	MSM_ACTUATOR_WRITE_HW_DAMP,
+	MSM_ACTUATOR_WRITE_DAC,
+	MSM_ACTUATOR_WRITE,
+	MSM_ACTUATOR_WRITE_DIR_REG,
+	MSM_ACTUATOR_POLL,
+	MSM_ACTUATOR_READ_WRITE,
+};
+
+enum msm_actuator_i2c_operation {
+	MSM_ACT_WRITE = 0,
+	MSM_ACT_POLL,
+};
+
+enum actuator_type {
+	ACTUATOR_VCM,
+	ACTUATOR_PIEZO,
+	ACTUATOR_HVCM,
+	ACTUATOR_BIVCM,
+};
+
+enum msm_flash_driver_type {
+	FLASH_DRIVER_PMIC,
+	FLASH_DRIVER_I2C,
+	FLASH_DRIVER_GPIO,
+	FLASH_DRIVER_DEFAULT
+};
+
+enum msm_flash_cfg_type_t {
+	CFG_FLASH_INIT,
+	CFG_FLASH_RELEASE,
+	CFG_FLASH_OFF,
+	CFG_FLASH_LOW,
+	CFG_FLASH_HIGH,
+};
+
+enum msm_ir_led_cfg_type_t {
+	CFG_IR_LED_INIT = 0,
+	CFG_IR_LED_RELEASE,
+	CFG_IR_LED_OFF,
+	CFG_IR_LED_ON,
+};
+#define CFG_IR_LED_INIT CFG_IR_LED_INIT
+#define CFG_IR_LED_RELEASE CFG_IR_LED_RELEASE
+#define CFG_IR_LED_OFF CFG_IR_LED_OFF
+#define CFG_IR_LED_ON CFG_IR_LED_ON
+
+enum msm_laser_led_cfg_type_t {
+	CFG_LASER_LED_INIT,
+	CFG_LASER_LED_CONTROL,
+};
+#define CFG_LASER_LED_INIT CFG_LASER_LED_INIT
+#define CFG_LASER_LED_CONTROL CFG_LASER_LED_CONTROL
+
+enum msm_ir_cut_cfg_type_t {
+	CFG_IR_CUT_INIT = 0,
+	CFG_IR_CUT_RELEASE,
+	CFG_IR_CUT_OFF,
+	CFG_IR_CUT_ON,
+};
+#define CFG_IR_CUT_INIT CFG_IR_CUT_INIT
+#define CFG_IR_CUT_RELEASE CFG_IR_CUT_RELEASE
+#define CFG_IR_CUT_OFF CFG_IR_CUT_OFF
+#define CFG_IR_CUT_ON CFG_IR_CUT_ON
+
+enum msm_sensor_output_format_t {
+	MSM_SENSOR_BAYER,
+	MSM_SENSOR_YCBCR,
+	MSM_SENSOR_META,
+};
+
+struct msm_sensor_power_setting {
+	enum msm_sensor_power_seq_type_t seq_type;
+	unsigned short seq_val;
+	long config_val;
+	unsigned short delay;
+	void *data[10];
+};
+
+struct msm_sensor_power_setting_array {
+	struct msm_sensor_power_setting  power_setting_a[MAX_POWER_CONFIG];
+	struct msm_sensor_power_setting *power_setting;
+	unsigned short size;
+	struct msm_sensor_power_setting  power_down_setting_a[MAX_POWER_CONFIG];
+	struct msm_sensor_power_setting *power_down_setting;
+	unsigned short size_down;
+};
+
+enum msm_camera_i2c_operation {
+	MSM_CAM_WRITE = 0,
+	MSM_CAM_POLL,
+	MSM_CAM_READ,
+};
+
+struct msm_sensor_i2c_sync_params {
+	unsigned int cid;
+	int csid;
+	unsigned short line;
+	unsigned short delay;
+};
+
+struct msm_camera_reg_settings_t {
+	uint16_t reg_addr;
+	enum msm_camera_i2c_reg_addr_type addr_type;
+	uint16_t reg_data;
+	enum msm_camera_i2c_data_type data_type;
+	enum msm_camera_i2c_operation i2c_operation;
+	uint16_t delay;
+};
+
+struct msm_eeprom_mem_map_t {
+	int slave_addr;
+	struct msm_camera_reg_settings_t
+		mem_settings[MSM_EEPROM_MEMORY_MAP_MAX_SIZE];
+	int memory_map_size;
+};
+
+struct msm_eeprom_memory_map_array {
+	struct msm_eeprom_mem_map_t memory_map[MSM_EEPROM_MAX_MEM_MAP_CNT];
+	uint32_t msm_size_of_max_mappings;
+};
+
+struct msm_sensor_init_params {
+	/* mask of modes supported: 2D, 3D */
+	int                 modes_supported;
+	/* sensor position: front, back */
+	enum camb_position_t position;
+	/* sensor mount angle */
+	unsigned int            sensor_mount_angle;
+};
+
+struct msm_sensor_id_info_t {
+	unsigned short sensor_id_reg_addr;
+	unsigned short sensor_id;
+	unsigned short sensor_id_mask;
+};
+
+struct msm_camera_sensor_slave_info {
+	char sensor_name[32];
+	char eeprom_name[32];
+	char actuator_name[32];
+	char ois_name[32];
+	char flash_name[32];
+	enum msm_sensor_camera_id_t camera_id;
+	unsigned short slave_addr;
+	enum i2c_freq_mode_t i2c_freq_mode;
+	enum msm_camera_i2c_reg_addr_type addr_type;
+	struct msm_sensor_id_info_t sensor_id_info;
+	struct msm_sensor_power_setting_array power_setting_array;
+	unsigned char  is_init_params_valid;
+	struct msm_sensor_init_params sensor_init_params;
+	enum msm_sensor_output_format_t output_format;
+	uint8_t bypass_video_node_creation;
+};
+
+struct msm_camera_i2c_reg_array {
+	unsigned short reg_addr;
+	unsigned short reg_data;
+	unsigned int delay;
+};
+
+struct msm_camera_i2c_reg_setting {
+	struct msm_camera_i2c_reg_array *reg_setting;
+	unsigned short size;
+	enum msm_camera_i2c_reg_addr_type addr_type;
+	enum msm_camera_i2c_data_type data_type;
+	unsigned short delay;
+};
+
+struct msm_camera_csid_vc_cfg {
+	unsigned char cid;
+	unsigned char dt;
+	unsigned char decode_format;
+};
+
+struct msm_camera_csid_lut_params {
+	unsigned char num_cid;
+	struct msm_camera_csid_vc_cfg vc_cfg_a[MAX_CID];
+	struct msm_camera_csid_vc_cfg *vc_cfg[MAX_CID];
+};
+
+struct msm_camera_csid_params {
+	unsigned char lane_cnt;
+	unsigned short lane_assign;
+	unsigned char phy_sel;
+	unsigned int csi_clk;
+	struct msm_camera_csid_lut_params lut_params;
+	unsigned char csi_3p_sel;
+};
+
+struct msm_camera_csid_testmode_parms {
+	unsigned int num_bytes_per_line;
+	unsigned int num_lines;
+	unsigned int h_blanking_count;
+	unsigned int v_blanking_count;
+	unsigned int payload_mode;
+};
+
+struct msm_camera_csiphy_params {
+	unsigned char lane_cnt;
+	unsigned char settle_cnt;
+	unsigned short lane_mask;
+	unsigned char combo_mode;
+	unsigned char csid_core;
+	unsigned int csiphy_clk;
+	unsigned char csi_3phase;
+	uint64_t data_rate;
+};
+
+struct msm_camera_i2c_seq_reg_array {
+	unsigned short reg_addr;
+	unsigned char reg_data[I2C_SEQ_REG_DATA_MAX];
+	unsigned short reg_data_size;
+};
+
+struct msm_camera_i2c_seq_reg_setting {
+	struct msm_camera_i2c_seq_reg_array *reg_setting;
+	unsigned short size;
+	enum msm_camera_i2c_reg_addr_type addr_type;
+	unsigned short delay;
+};
+
+struct msm_actuator_reg_params_t {
+	enum msm_actuator_write_type reg_write_type;
+	unsigned int hw_mask;
+	unsigned short reg_addr;
+	unsigned short hw_shift;
+	unsigned short data_shift;
+	unsigned short data_type;
+	unsigned short addr_type;
+	unsigned short reg_data;
+	unsigned short delay;
+};
+
+
+struct damping_params_t {
+	unsigned int damping_step;
+	unsigned int damping_delay;
+	unsigned int hw_params;
+};
+
+struct region_params_t {
+	/* [0] = ForwardDirection Macro boundary
+	   [1] = ReverseDirection Inf boundary
+	*/
+	unsigned short step_bound[2];
+	unsigned short code_per_step;
+	/* qvalue for converting float type numbers to integer format */
+	unsigned int qvalue;
+};
+
+struct reg_settings_t {
+	unsigned short reg_addr;
+	enum msm_camera_i2c_reg_addr_type addr_type;
+	unsigned short reg_data;
+	enum msm_camera_i2c_data_type data_type;
+	enum msm_actuator_i2c_operation i2c_operation;
+	unsigned int delay;
+};
+
+struct msm_camera_i2c_reg_setting_array {
+	struct msm_camera_i2c_reg_array reg_setting_a[MAX_I2C_REG_SET];
+	unsigned short size;
+	enum msm_camera_i2c_reg_addr_type addr_type;
+	enum msm_camera_i2c_data_type data_type;
+	unsigned short delay;
+};
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/uapi/media./msm_fd.h linux-4.4.115-fbx/include/uapi/media/msm_fd.h
--- linux-4.4.115-fbx/include/uapi/media./msm_fd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/msm_fd.h	2019-01-22 16:16:28.603292590 +0100
@@ -0,0 +1,76 @@
+#ifndef __UAPI_MSM_FD__
+#define __UAPI_MSM_FD__
+
+#include <linux/videodev2.h>
+#include <linux/types.h>
+
+/*
+ * struct msm_fd_event - Structure contain event info.
+ * @buf_index: Buffer index.
+ * @frame_id: Frame id.
+ * @face_cnt: Detected faces.
+ */
+struct msm_fd_event {
+	__u32 buf_index;
+	__u32 frame_id;
+	__u32 face_cnt;
+};
+
+/*
+ * enum msm_fd_pose - Face pose.
+ */
+enum msm_fd_pose {
+	MSM_FD_POSE_FRONT,
+	MSM_FD_POSE_RIGHT_DIAGONAL,
+	MSM_FD_POSE_RIGHT,
+	MSM_FD_POSE_LEFT_DIAGONAL,
+	MSM_FD_POSE_LEFT,
+};
+
+/*
+ * struct msm_fd_face_data - Structure contain detected face data.
+ * @pose: refer to enum msm_fd_pose.
+ * @angle: Face angle
+ * @confidence: Face confidence level.
+ * @reserved: Reserved data for future use.
+ * @face: Face rectangle.
+ */
+struct msm_fd_face_data {
+	__u32 pose;
+	__u32 angle;
+	__u32 confidence;
+	__u32 reserved;
+	struct v4l2_rect face;
+};
+
+/*
+ * struct msm_fd_result - Structure contain detected faces result.
+ * @frame_id: Frame id of requested result.
+ * @face_cnt: Number of result faces, driver can modify this value (to smaller)
+ * @face_data: Pointer to array of face data structures.
+ *  Array size should not be smaller then face_cnt.
+ */
+struct msm_fd_result {
+	__u32 frame_id;
+	__u32 face_cnt;
+	struct msm_fd_face_data __user *face_data;
+};
+
+/* MSM FD private ioctl ID */
+#define VIDIOC_MSM_FD_GET_RESULT \
+	_IOWR('V', BASE_VIDIOC_PRIVATE, struct msm_fd_result)
+
+/* MSM FD event ID */
+#define MSM_EVENT_FD (V4L2_EVENT_PRIVATE_START)
+
+/* MSM FD control ID's */
+#define V4L2_CID_FD_SPEED                (V4L2_CID_PRIVATE_BASE)
+#define V4L2_CID_FD_FACE_ANGLE           (V4L2_CID_PRIVATE_BASE + 1)
+#define V4L2_CID_FD_MIN_FACE_SIZE        (V4L2_CID_PRIVATE_BASE + 2)
+#define V4L2_CID_FD_FACE_DIRECTION       (V4L2_CID_PRIVATE_BASE + 3)
+#define V4L2_CID_FD_DETECTION_THRESHOLD  (V4L2_CID_PRIVATE_BASE + 4)
+#define V4L2_CID_FD_WORK_MEMORY_SIZE     (V4L2_CID_PRIVATE_BASE + 5)
+#define V4L2_CID_FD_WORK_MEMORY_FD       (V4L2_CID_PRIVATE_BASE + 6)
+
+#endif
+
diff -Nruw linux-4.4.115-fbx/include/uapi/media./msm_gemini.h linux-4.4.115-fbx/include/uapi/media/msm_gemini.h
--- linux-4.4.115-fbx/include/uapi/media./msm_gemini.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/msm_gemini.h	2019-01-22 16:16:28.603292590 +0100
@@ -0,0 +1,123 @@
+#ifndef __UAPI_MSM_GEMINI_H
+#define __UAPI_MSM_GEMINI_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define MSM_GMN_IOCTL_MAGIC 'g'
+
+#define MSM_GMN_IOCTL_GET_HW_VERSION \
+	_IOW(MSM_GMN_IOCTL_MAGIC, 1, struct msm_gemini_hw_cmd *)
+
+#define MSM_GMN_IOCTL_RESET \
+	_IOW(MSM_GMN_IOCTL_MAGIC, 2, struct msm_gemini_ctrl_cmd *)
+
+#define MSM_GMN_IOCTL_STOP \
+	_IOW(MSM_GMN_IOCTL_MAGIC, 3, struct msm_gemini_hw_cmds *)
+
+#define MSM_GMN_IOCTL_START \
+	_IOW(MSM_GMN_IOCTL_MAGIC, 4, struct msm_gemini_hw_cmds *)
+
+#define MSM_GMN_IOCTL_INPUT_BUF_ENQUEUE \
+	_IOW(MSM_GMN_IOCTL_MAGIC, 5, struct msm_gemini_buf *)
+
+#define MSM_GMN_IOCTL_INPUT_GET \
+	_IOW(MSM_GMN_IOCTL_MAGIC, 6, struct msm_gemini_buf *)
+
+#define MSM_GMN_IOCTL_INPUT_GET_UNBLOCK \
+	_IOW(MSM_GMN_IOCTL_MAGIC, 7, int)
+
+#define MSM_GMN_IOCTL_OUTPUT_BUF_ENQUEUE \
+	_IOW(MSM_GMN_IOCTL_MAGIC, 8, struct msm_gemini_buf *)
+
+#define MSM_GMN_IOCTL_OUTPUT_GET \
+	_IOW(MSM_GMN_IOCTL_MAGIC, 9, struct msm_gemini_buf *)
+
+#define MSM_GMN_IOCTL_OUTPUT_GET_UNBLOCK \
+	_IOW(MSM_GMN_IOCTL_MAGIC, 10, int)
+
+#define MSM_GMN_IOCTL_EVT_GET \
+	_IOW(MSM_GMN_IOCTL_MAGIC, 11, struct msm_gemini_ctrl_cmd *)
+
+#define MSM_GMN_IOCTL_EVT_GET_UNBLOCK \
+	_IOW(MSM_GMN_IOCTL_MAGIC, 12, int)
+
+#define MSM_GMN_IOCTL_HW_CMD \
+	_IOW(MSM_GMN_IOCTL_MAGIC, 13, struct msm_gemini_hw_cmd *)
+
+#define MSM_GMN_IOCTL_HW_CMDS \
+	_IOW(MSM_GMN_IOCTL_MAGIC, 14, struct msm_gemini_hw_cmds *)
+
+#define MSM_GMN_IOCTL_TEST_DUMP_REGION \
+	_IOW(MSM_GMN_IOCTL_MAGIC, 15, unsigned long)
+
+#define MSM_GMN_IOCTL_SET_MODE \
+	_IOW(MSM_GMN_IOCTL_MAGIC, 16, enum msm_gmn_out_mode)
+
+#define MSM_GEMINI_MODE_REALTIME_ENCODE 0
+#define MSM_GEMINI_MODE_OFFLINE_ENCODE 1
+#define MSM_GEMINI_MODE_REALTIME_ROTATION 2
+#define MSM_GEMINI_MODE_OFFLINE_ROTATION 3
+
+enum msm_gmn_out_mode {
+	MSM_GMN_OUTMODE_FRAGMENTED,
+	MSM_GMN_OUTMODE_SINGLE
+};
+
+struct msm_gemini_ctrl_cmd {
+	uint32_t type;
+	uint32_t len;
+	void     *value;
+};
+
+#define MSM_GEMINI_EVT_RESET 0
+#define MSM_GEMINI_EVT_FRAMEDONE	1
+#define MSM_GEMINI_EVT_ERR 2
+
+struct msm_gemini_buf {
+	uint32_t type;
+	int      fd;
+
+	void     *vaddr;
+
+	uint32_t y_off;
+	uint32_t y_len;
+	uint32_t framedone_len;
+
+	uint32_t cbcr_off;
+	uint32_t cbcr_len;
+
+	uint32_t num_of_mcu_rows;
+	uint32_t offset;
+};
+
+#define MSM_GEMINI_HW_CMD_TYPE_READ      0
+#define MSM_GEMINI_HW_CMD_TYPE_WRITE     1
+#define MSM_GEMINI_HW_CMD_TYPE_WRITE_OR  2
+#define MSM_GEMINI_HW_CMD_TYPE_UWAIT     3
+#define MSM_GEMINI_HW_CMD_TYPE_MWAIT     4
+#define MSM_GEMINI_HW_CMD_TYPE_MDELAY    5
+#define MSM_GEMINI_HW_CMD_TYPE_UDELAY    6
+struct msm_gemini_hw_cmd {
+
+	uint32_t type:4;
+
+	/* n microseconds of timeout for WAIT */
+	/* n microseconds of time for DELAY */
+	/* repeat n times for READ/WRITE */
+	/* max is 0xFFF, 4095 */
+	uint32_t n:12;
+	uint32_t offset:16;
+	uint32_t mask;
+	union {
+		uint32_t data;   /* for single READ/WRITE/WAIT, n = 1 */
+		uint32_t *pdata;   /* for multiple READ/WRITE/WAIT, n > 1 */
+	};
+};
+
+struct msm_gemini_hw_cmds {
+	uint32_t m; /* number of elements in the hw_cmd array */
+	struct msm_gemini_hw_cmd hw_cmd[1];
+};
+
+#endif /* __UAPI_MSM_GEMINI_H */
diff -Nruw linux-4.4.115-fbx/include/uapi/media./msm_gestures.h linux-4.4.115-fbx/include/uapi/media/msm_gestures.h
--- linux-4.4.115-fbx/include/uapi/media./msm_gestures.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/msm_gestures.h	2019-01-22 16:16:28.603292590 +0100
@@ -0,0 +1,54 @@
+#ifndef __UAPI_MSM_GESTURES_H
+#define __UAPI_MSM_GESTURES_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <media/msm_camera.h>
+
+#define MSM_GES_IOCTL_CTRL_COMMAND \
+	_IOW('V', BASE_VIDIOC_PRIVATE + 20, struct v4l2_control)
+
+#define VIDIOC_MSM_GESTURE_EVT \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 21, struct v4l2_event)
+
+#define MSM_GES_GET_EVT_PAYLOAD \
+	_IOW('V', BASE_VIDIOC_PRIVATE + 22, struct msm_ges_evt)
+
+#define VIDIOC_MSM_GESTURE_CAM_EVT \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 23, int)
+
+#define MSM_GES_RESP_V4L2  MSM_CAM_RESP_MAX
+#define MSM_GES_RESP_MAX  (MSM_GES_RESP_V4L2 + 1)
+
+#define MSM_SVR_RESP_MAX  MSM_GES_RESP_MAX
+
+
+#define MSM_V4L2_GES_BASE                             100
+#define MSM_V4L2_GES_OPEN         (MSM_V4L2_GES_BASE + 0)
+#define MSM_V4L2_GES_CLOSE        (MSM_V4L2_GES_BASE + 1)
+#define MSM_V4L2_GES_CAM_OPEN     (MSM_V4L2_GES_BASE + 2)
+#define MSM_V4L2_GES_CAM_CLOSE    (MSM_V4L2_GES_BASE + 3)
+
+#define MSM_GES_APP_EVT_MIN     (V4L2_EVENT_PRIVATE_START + 0x14)
+#define MSM_GES_APP_NOTIFY_EVENT        (MSM_GES_APP_EVT_MIN + 0)
+#define MSM_GES_APP_NOTIFY_ERROR_EVENT  (MSM_GES_APP_EVT_MIN + 1)
+#define MSM_GES_APP_EVT_MAX             (MSM_GES_APP_EVT_MIN + 2)
+
+#define MSM_GESTURE_CID_CTRL_CMD V4L2_CID_BRIGHTNESS
+
+#define MAX_GES_EVENTS 25
+
+struct msm_ges_ctrl_cmd {
+	int type;
+	void *value;
+	int len;
+	int fd;
+	uint32_t cookie;
+};
+
+struct msm_ges_evt {
+	void *evt_data;
+	int evt_len;
+};
+
+#endif /*__UAPI_MSM_GESTURES_H*/
diff -Nruw linux-4.4.115-fbx/include/uapi/media./msm_isp.h linux-4.4.115-fbx/include/uapi/media/msm_isp.h
--- linux-4.4.115-fbx/include/uapi/media./msm_isp.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/msm_isp.h	2019-01-22 16:16:28.603292590 +0100
@@ -0,0 +1,344 @@
+#ifndef __UAPI_MSM_ISP_H__
+#define __UAPI_MSM_ISP_H__
+
+#define BIT(nr)			(1UL << (nr))
+
+/* ISP message IDs */
+#define MSG_ID_RESET_ACK                0
+#define MSG_ID_START_ACK                1
+#define MSG_ID_STOP_ACK                 2
+#define MSG_ID_UPDATE_ACK               3
+#define MSG_ID_OUTPUT_P                 4
+#define MSG_ID_OUTPUT_T                 5
+#define MSG_ID_OUTPUT_S                 6
+#define MSG_ID_OUTPUT_V                 7
+#define MSG_ID_SNAPSHOT_DONE            8
+#define MSG_ID_STATS_AEC                9
+#define MSG_ID_STATS_AF                 10
+#define MSG_ID_STATS_AWB                11
+#define MSG_ID_STATS_RS                 12
+#define MSG_ID_STATS_CS                 13
+#define MSG_ID_STATS_IHIST              14
+#define MSG_ID_STATS_SKIN               15
+#define MSG_ID_EPOCH1                   16
+#define MSG_ID_EPOCH2                   17
+#define MSG_ID_SYNC_TIMER0_DONE         18
+#define MSG_ID_SYNC_TIMER1_DONE         19
+#define MSG_ID_SYNC_TIMER2_DONE         20
+#define MSG_ID_ASYNC_TIMER0_DONE        21
+#define MSG_ID_ASYNC_TIMER1_DONE        22
+#define MSG_ID_ASYNC_TIMER2_DONE        23
+#define MSG_ID_ASYNC_TIMER3_DONE        24
+#define MSG_ID_AE_OVERFLOW              25
+#define MSG_ID_AF_OVERFLOW              26
+#define MSG_ID_AWB_OVERFLOW             27
+#define MSG_ID_RS_OVERFLOW              28
+#define MSG_ID_CS_OVERFLOW              29
+#define MSG_ID_IHIST_OVERFLOW           30
+#define MSG_ID_SKIN_OVERFLOW            31
+#define MSG_ID_AXI_ERROR                32
+#define MSG_ID_CAMIF_OVERFLOW           33
+#define MSG_ID_VIOLATION                34
+#define MSG_ID_CAMIF_ERROR              35
+#define MSG_ID_BUS_OVERFLOW             36
+#define MSG_ID_SOF_ACK                  37
+#define MSG_ID_STOP_REC_ACK             38
+#define MSG_ID_STATS_AWB_AEC            39
+#define MSG_ID_OUTPUT_PRIMARY           40
+#define MSG_ID_OUTPUT_SECONDARY         41
+#define MSG_ID_STATS_COMPOSITE          42
+#define MSG_ID_OUTPUT_TERTIARY1         43
+#define MSG_ID_STOP_LS_ACK              44
+#define MSG_ID_OUTPUT_TERTIARY2         45
+#define MSG_ID_STATS_BG                 46
+#define MSG_ID_STATS_BF                 47
+#define MSG_ID_STATS_BHIST              48
+#define MSG_ID_RDI0_UPDATE_ACK          49
+#define MSG_ID_RDI1_UPDATE_ACK          50
+#define MSG_ID_RDI2_UPDATE_ACK          51
+#define MSG_ID_PIX0_UPDATE_ACK          52
+#define MSG_ID_PREV_STOP_ACK            53
+#define MSG_ID_STATS_BE                 54
+
+
+/* ISP command IDs */
+#define VFE_CMD_DUMMY_0                                 0
+#define VFE_CMD_SET_CLK                                 1
+#define VFE_CMD_RESET                                   2
+#define VFE_CMD_START                                   3
+#define VFE_CMD_TEST_GEN_START                          4
+#define VFE_CMD_OPERATION_CFG                           5
+#define VFE_CMD_AXI_OUT_CFG                             6
+#define VFE_CMD_CAMIF_CFG                               7
+#define VFE_CMD_AXI_INPUT_CFG                           8
+#define VFE_CMD_BLACK_LEVEL_CFG                         9
+#define VFE_CMD_MESH_ROLL_OFF_CFG                       10
+#define VFE_CMD_DEMUX_CFG                               11
+#define VFE_CMD_FOV_CFG                                 12
+#define VFE_CMD_MAIN_SCALER_CFG                         13
+#define VFE_CMD_WB_CFG                                  14
+#define VFE_CMD_COLOR_COR_CFG                           15
+#define VFE_CMD_RGB_G_CFG                               16
+#define VFE_CMD_LA_CFG                                  17
+#define VFE_CMD_CHROMA_EN_CFG                           18
+#define VFE_CMD_CHROMA_SUP_CFG                          19
+#define VFE_CMD_MCE_CFG                                 20
+#define VFE_CMD_SK_ENHAN_CFG                            21
+#define VFE_CMD_ASF_CFG                                 22
+#define VFE_CMD_S2Y_CFG                                 23
+#define VFE_CMD_S2CbCr_CFG                              24
+#define VFE_CMD_CHROMA_SUBS_CFG                         25
+#define VFE_CMD_OUT_CLAMP_CFG                           26
+#define VFE_CMD_FRAME_SKIP_CFG                          27
+#define VFE_CMD_DUMMY_1                                 28
+#define VFE_CMD_DUMMY_2                                 29
+#define VFE_CMD_DUMMY_3                                 30
+#define VFE_CMD_UPDATE                                  31
+#define VFE_CMD_BL_LVL_UPDATE                           32
+#define VFE_CMD_DEMUX_UPDATE                            33
+#define VFE_CMD_FOV_UPDATE                              34
+#define VFE_CMD_MAIN_SCALER_UPDATE                      35
+#define VFE_CMD_WB_UPDATE                               36
+#define VFE_CMD_COLOR_COR_UPDATE                        37
+#define VFE_CMD_RGB_G_UPDATE                            38
+#define VFE_CMD_LA_UPDATE                               39
+#define VFE_CMD_CHROMA_EN_UPDATE                        40
+#define VFE_CMD_CHROMA_SUP_UPDATE                       41
+#define VFE_CMD_MCE_UPDATE                              42
+#define VFE_CMD_SK_ENHAN_UPDATE                         43
+#define VFE_CMD_S2CbCr_UPDATE                           44
+#define VFE_CMD_S2Y_UPDATE                              45
+#define VFE_CMD_ASF_UPDATE                              46
+#define VFE_CMD_FRAME_SKIP_UPDATE                       47
+#define VFE_CMD_CAMIF_FRAME_UPDATE                      48
+#define VFE_CMD_STATS_AF_UPDATE                         49
+#define VFE_CMD_STATS_AE_UPDATE                         50
+#define VFE_CMD_STATS_AWB_UPDATE                        51
+#define VFE_CMD_STATS_RS_UPDATE                         52
+#define VFE_CMD_STATS_CS_UPDATE                         53
+#define VFE_CMD_STATS_SKIN_UPDATE                       54
+#define VFE_CMD_STATS_IHIST_UPDATE                      55
+#define VFE_CMD_DUMMY_4                                 56
+#define VFE_CMD_EPOCH1_ACK                              57
+#define VFE_CMD_EPOCH2_ACK                              58
+#define VFE_CMD_START_RECORDING                         59
+#define VFE_CMD_STOP_RECORDING                          60
+#define VFE_CMD_DUMMY_5                                 61
+#define VFE_CMD_DUMMY_6                                 62
+#define VFE_CMD_CAPTURE                                 63
+#define VFE_CMD_DUMMY_7                                 64
+#define VFE_CMD_STOP                                    65
+#define VFE_CMD_GET_HW_VERSION                          66
+#define VFE_CMD_GET_FRAME_SKIP_COUNTS                   67
+#define VFE_CMD_OUTPUT1_BUFFER_ENQ                      68
+#define VFE_CMD_OUTPUT2_BUFFER_ENQ                      69
+#define VFE_CMD_OUTPUT3_BUFFER_ENQ                      70
+#define VFE_CMD_JPEG_OUT_BUF_ENQ                        71
+#define VFE_CMD_RAW_OUT_BUF_ENQ                         72
+#define VFE_CMD_RAW_IN_BUF_ENQ                          73
+#define VFE_CMD_STATS_AF_ENQ                            74
+#define VFE_CMD_STATS_AE_ENQ                            75
+#define VFE_CMD_STATS_AWB_ENQ                           76
+#define VFE_CMD_STATS_RS_ENQ                            77
+#define VFE_CMD_STATS_CS_ENQ                            78
+#define VFE_CMD_STATS_SKIN_ENQ                          79
+#define VFE_CMD_STATS_IHIST_ENQ                         80
+#define VFE_CMD_DUMMY_8                                 81
+#define VFE_CMD_JPEG_ENC_CFG                            82
+#define VFE_CMD_DUMMY_9                                 83
+#define VFE_CMD_STATS_AF_START                          84
+#define VFE_CMD_STATS_AF_STOP                           85
+#define VFE_CMD_STATS_AE_START                          86
+#define VFE_CMD_STATS_AE_STOP                           87
+#define VFE_CMD_STATS_AWB_START                         88
+#define VFE_CMD_STATS_AWB_STOP                          89
+#define VFE_CMD_STATS_RS_START                          90
+#define VFE_CMD_STATS_RS_STOP                           91
+#define VFE_CMD_STATS_CS_START                          92
+#define VFE_CMD_STATS_CS_STOP                           93
+#define VFE_CMD_STATS_SKIN_START                        94
+#define VFE_CMD_STATS_SKIN_STOP                         95
+#define VFE_CMD_STATS_IHIST_START                       96
+#define VFE_CMD_STATS_IHIST_STOP                        97
+#define VFE_CMD_DUMMY_10                                98
+#define VFE_CMD_SYNC_TIMER_SETTING                      99
+#define VFE_CMD_ASYNC_TIMER_SETTING                     100
+#define VFE_CMD_LIVESHOT                                101
+#define VFE_CMD_LA_SETUP                                102
+#define VFE_CMD_LINEARIZATION_CFG                       103
+#define VFE_CMD_DEMOSAICV3                              104
+#define VFE_CMD_DEMOSAICV3_ABCC_CFG                     105
+#define VFE_CMD_DEMOSAICV3_DBCC_CFG                     106
+#define VFE_CMD_DEMOSAICV3_DBPC_CFG                     107
+#define VFE_CMD_DEMOSAICV3_ABF_CFG                      108
+#define VFE_CMD_DEMOSAICV3_ABCC_UPDATE                  109
+#define VFE_CMD_DEMOSAICV3_DBCC_UPDATE                  110
+#define VFE_CMD_DEMOSAICV3_DBPC_UPDATE                  111
+#define VFE_CMD_XBAR_CFG                                112
+#define VFE_CMD_MODULE_CFG                              113
+#define VFE_CMD_ZSL                                     114
+#define VFE_CMD_LINEARIZATION_UPDATE                    115
+#define VFE_CMD_DEMOSAICV3_ABF_UPDATE                   116
+#define VFE_CMD_CLF_CFG                                 117
+#define VFE_CMD_CLF_LUMA_UPDATE                         118
+#define VFE_CMD_CLF_CHROMA_UPDATE                       119
+#define VFE_CMD_PCA_ROLL_OFF_CFG                        120
+#define VFE_CMD_PCA_ROLL_OFF_UPDATE                     121
+#define VFE_CMD_GET_REG_DUMP                            122
+#define VFE_CMD_GET_LINEARIZATON_TABLE                  123
+#define VFE_CMD_GET_MESH_ROLLOFF_TABLE                  124
+#define VFE_CMD_GET_PCA_ROLLOFF_TABLE                   125
+#define VFE_CMD_GET_RGB_G_TABLE                         126
+#define VFE_CMD_GET_LA_TABLE                            127
+#define VFE_CMD_DEMOSAICV3_UPDATE                       128
+#define VFE_CMD_ACTIVE_REGION_CFG                       129
+#define VFE_CMD_COLOR_PROCESSING_CONFIG                 130
+#define VFE_CMD_STATS_WB_AEC_CONFIG                     131
+#define VFE_CMD_STATS_WB_AEC_UPDATE                     132
+#define VFE_CMD_Y_GAMMA_CONFIG                          133
+#define VFE_CMD_SCALE_OUTPUT1_CONFIG                    134
+#define VFE_CMD_SCALE_OUTPUT2_CONFIG                    135
+#define VFE_CMD_CAPTURE_RAW                             136
+#define VFE_CMD_STOP_LIVESHOT                           137
+#define VFE_CMD_RECONFIG_VFE                            138
+#define VFE_CMD_STATS_REQBUF                            139
+#define VFE_CMD_STATS_ENQUEUEBUF                        140
+#define VFE_CMD_STATS_FLUSH_BUFQ                        141
+#define VFE_CMD_STATS_UNREGBUF                          142
+#define VFE_CMD_STATS_BG_START                          143
+#define VFE_CMD_STATS_BG_STOP                           144
+#define VFE_CMD_STATS_BF_START                          145
+#define VFE_CMD_STATS_BF_STOP                           146
+#define VFE_CMD_STATS_BHIST_START                       147
+#define VFE_CMD_STATS_BHIST_STOP                        148
+#define VFE_CMD_RESET_2                                 149
+#define VFE_CMD_FOV_ENC_CFG                             150
+#define VFE_CMD_FOV_VIEW_CFG                            151
+#define VFE_CMD_FOV_ENC_UPDATE                          152
+#define VFE_CMD_FOV_VIEW_UPDATE                         153
+#define VFE_CMD_SCALER_ENC_CFG                          154
+#define VFE_CMD_SCALER_VIEW_CFG                         155
+#define VFE_CMD_SCALER_ENC_UPDATE                       156
+#define VFE_CMD_SCALER_VIEW_UPDATE                      157
+#define VFE_CMD_COLORXFORM_ENC_CFG                      158
+#define VFE_CMD_COLORXFORM_VIEW_CFG                     159
+#define VFE_CMD_COLORXFORM_ENC_UPDATE                   160
+#define VFE_CMD_COLORXFORM_VIEW_UPDATE                  161
+#define VFE_CMD_TEST_GEN_CFG                            162
+#define VFE_CMD_STATS_BE_START                          163
+#define VFE_CMD_STATS_BE_STOP                           164
+struct msm_isp_cmd {
+	int32_t  id;
+	uint16_t length;
+	void     *value;
+};
+
+#define VPE_CMD_DUMMY_0                                 0
+#define VPE_CMD_INIT                                    1
+#define VPE_CMD_DEINIT                                  2
+#define VPE_CMD_ENABLE                                  3
+#define VPE_CMD_DISABLE                                 4
+#define VPE_CMD_RESET                                   5
+#define VPE_CMD_FLUSH                                   6
+#define VPE_CMD_OPERATION_MODE_CFG                      7
+#define VPE_CMD_INPUT_PLANE_CFG                         8
+#define VPE_CMD_OUTPUT_PLANE_CFG                        9
+#define VPE_CMD_INPUT_PLANE_UPDATE                      10
+#define VPE_CMD_SCALE_CFG_TYPE                          11
+#define VPE_CMD_ZOOM                                    13
+#define VPE_CMD_MAX                                     14
+
+#define MSM_PP_CMD_TYPE_NOT_USED        0  /* not used */
+#define MSM_PP_CMD_TYPE_VPE             1  /* VPE cmd */
+#define MSM_PP_CMD_TYPE_MCTL            2  /* MCTL cmd */
+
+#define MCTL_CMD_DUMMY_0                0  /* not used */
+#define MCTL_CMD_GET_FRAME_BUFFER       1  /* reserve a free frame buffer */
+#define MCTL_CMD_PUT_FRAME_BUFFER       2  /* return the free frame buffer */
+#define MCTL_CMD_DIVERT_FRAME_PP_PATH   3  /* divert frame for pp */
+
+/* event typese sending to MCTL PP module */
+#define MCTL_PP_EVENT_NOTUSED           0
+#define MCTL_PP_EVENT_CMD_ACK           1
+
+#define VPE_OPERATION_MODE_CFG_LEN      4
+#define VPE_INPUT_PLANE_CFG_LEN         24
+#define VPE_OUTPUT_PLANE_CFG_LEN        20
+#define VPE_INPUT_PLANE_UPDATE_LEN      12
+#define VPE_SCALER_CONFIG_LEN           260
+#define VPE_DIS_OFFSET_CFG_LEN          12
+
+
+#define CAPTURE_WIDTH          1280
+#define IMEM_Y_SIZE            (CAPTURE_WIDTH*16)
+#define IMEM_CBCR_SIZE         (CAPTURE_WIDTH*8)
+
+#define IMEM_Y_PING_OFFSET     0x2E000000
+#define IMEM_CBCR_PING_OFFSET  (IMEM_Y_PING_OFFSET + IMEM_Y_SIZE)
+
+#define IMEM_Y_PONG_OFFSET     (IMEM_CBCR_PING_OFFSET + IMEM_CBCR_SIZE)
+#define IMEM_CBCR_PONG_OFFSET  (IMEM_Y_PONG_OFFSET + IMEM_Y_SIZE)
+
+
+struct msm_vpe_op_mode_cfg {
+	uint8_t op_mode_cfg[VPE_OPERATION_MODE_CFG_LEN];
+};
+
+struct msm_vpe_input_plane_cfg {
+	uint8_t input_plane_cfg[VPE_INPUT_PLANE_CFG_LEN];
+};
+
+struct msm_vpe_output_plane_cfg {
+	uint8_t output_plane_cfg[VPE_OUTPUT_PLANE_CFG_LEN];
+};
+
+struct msm_vpe_input_plane_update_cfg {
+	uint8_t input_plane_update_cfg[VPE_INPUT_PLANE_UPDATE_LEN];
+};
+
+struct msm_vpe_scaler_cfg {
+	uint8_t scaler_cfg[VPE_SCALER_CONFIG_LEN];
+};
+
+struct msm_vpe_flush_frame_buffer {
+	uint32_t src_buf_handle;
+	uint32_t dest_buf_handle;
+	int path;
+};
+
+struct msm_mctl_pp_frame_buffer {
+	uint32_t buf_handle;
+	int path;
+};
+struct msm_mctl_pp_divert_pp {
+	int path;
+	int enable;
+};
+struct msm_vpe_clock_rate {
+	uint32_t rate;
+};
+
+#define MSM_MCTL_PP_VPE_FRAME_ACK    (1<<0)
+#define MSM_MCTL_PP_VPE_FRAME_TO_APP (1<<1)
+
+#define VFE_OUTPUTS_MAIN_AND_PREVIEW    BIT(0)
+#define VFE_OUTPUTS_MAIN_AND_VIDEO      BIT(1)
+#define VFE_OUTPUTS_MAIN_AND_THUMB      BIT(2)
+#define VFE_OUTPUTS_THUMB_AND_MAIN      BIT(3)
+#define VFE_OUTPUTS_PREVIEW_AND_VIDEO   BIT(4)
+#define VFE_OUTPUTS_VIDEO_AND_PREVIEW   BIT(5)
+#define VFE_OUTPUTS_PREVIEW             BIT(6)
+#define VFE_OUTPUTS_VIDEO               BIT(7)
+#define VFE_OUTPUTS_RAW                 BIT(8)
+#define VFE_OUTPUTS_JPEG_AND_THUMB      BIT(9)
+#define VFE_OUTPUTS_THUMB_AND_JPEG      BIT(10)
+#define VFE_OUTPUTS_RDI0                BIT(11)
+#define VFE_OUTPUTS_RDI1                BIT(12)
+
+struct msm_frame_info {
+	uint32_t inst_handle;
+	uint32_t path;
+};
+
+#endif /*__UAPI_MSM_ISP_H__*/
+
diff -Nruw linux-4.4.115-fbx/include/uapi/media./msm_jpeg_dma.h linux-4.4.115-fbx/include/uapi/media/msm_jpeg_dma.h
--- linux-4.4.115-fbx/include/uapi/media./msm_jpeg_dma.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/msm_jpeg_dma.h	2019-01-22 16:16:28.603292590 +0100
@@ -0,0 +1,28 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __UAPI_MSM_JPEG_DMA__
+#define __UAPI_MSM_JPEG_DMA__
+
+#include <linux/videodev2.h>
+
+/* msm jpeg dma control ID's */
+#define V4L2_CID_JPEG_DMA_SPEED (V4L2_CID_PRIVATE_BASE)
+#define V4L2_CID_JPEG_DMA_MAX_DOWN_SCALE (V4L2_CID_PRIVATE_BASE + 1)
+
+/* msm_jpeg_dma_buf */
+struct msm_jpeg_dma_buff {
+	int32_t fd;
+	uint32_t offset;
+};
+
+#endif /* __UAPI_MSM_JPEG_DMA__ */
diff -Nruw linux-4.4.115-fbx/include/uapi/media./msm_jpeg.h linux-4.4.115-fbx/include/uapi/media/msm_jpeg.h
--- linux-4.4.115-fbx/include/uapi/media./msm_jpeg.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/msm_jpeg.h	2019-01-22 16:16:28.603292590 +0100
@@ -0,0 +1,125 @@
+#ifndef __UAPI_LINUX_MSM_JPEG_H
+#define __UAPI_LINUX_MSM_JPEG_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define OUTPUT_H2V1  0
+#define OUTPUT_H2V2  1
+#define OUTPUT_BYTE  6
+
+#define MSM_JPEG_IOCTL_MAGIC 'g'
+
+#define MSM_JPEG_IOCTL_GET_HW_VERSION \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 1, struct msm_jpeg_hw_cmd)
+
+#define MSM_JPEG_IOCTL_RESET \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 2, struct msm_jpeg_ctrl_cmd)
+
+#define MSM_JPEG_IOCTL_STOP \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 3, struct msm_jpeg_hw_cmds)
+
+#define MSM_JPEG_IOCTL_START \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 4, struct msm_jpeg_hw_cmds)
+
+#define MSM_JPEG_IOCTL_INPUT_BUF_ENQUEUE \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 5, struct msm_jpeg_buf)
+
+#define MSM_JPEG_IOCTL_INPUT_GET \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 6, struct msm_jpeg_buf)
+
+#define MSM_JPEG_IOCTL_INPUT_GET_UNBLOCK \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 7, int)
+
+#define MSM_JPEG_IOCTL_OUTPUT_BUF_ENQUEUE \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 8, struct msm_jpeg_buf)
+
+#define MSM_JPEG_IOCTL_OUTPUT_GET \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 9, struct msm_jpeg_buf)
+
+#define MSM_JPEG_IOCTL_OUTPUT_GET_UNBLOCK \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 10, int)
+
+#define MSM_JPEG_IOCTL_EVT_GET \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 11, struct msm_jpeg_ctrl_cmd)
+
+#define MSM_JPEG_IOCTL_EVT_GET_UNBLOCK \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 12, int)
+
+#define MSM_JPEG_IOCTL_HW_CMD \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 13, struct msm_jpeg_hw_cmd)
+
+#define MSM_JPEG_IOCTL_HW_CMDS \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 14, struct msm_jpeg_hw_cmds)
+
+#define MSM_JPEG_IOCTL_TEST_DUMP_REGION \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 15, unsigned long)
+
+#define MSM_JPEG_IOCTL_SET_CLK_RATE \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 16, unsigned int)
+
+#define MSM_JPEG_MODE_REALTIME_ENCODE 0
+#define MSM_JPEG_MODE_OFFLINE_ENCODE 1
+#define MSM_JPEG_MODE_REALTIME_ROTATION 2
+#define MSM_JPEG_MODE_OFFLINE_ROTATION 3
+
+struct msm_jpeg_ctrl_cmd {
+	uint32_t type;
+	uint32_t len;
+	void     *value;
+};
+
+#define MSM_JPEG_EVT_RESET 0
+#define MSM_JPEG_EVT_SESSION_DONE	1
+#define MSM_JPEG_EVT_ERR 2
+
+struct msm_jpeg_buf {
+	uint32_t type;
+	int      fd;
+
+	void     *vaddr;
+
+	uint32_t y_off;
+	uint32_t y_len;
+	uint32_t framedone_len;
+
+	uint32_t cbcr_off;
+	uint32_t cbcr_len;
+
+	uint32_t num_of_mcu_rows;
+	uint32_t offset;
+	uint32_t pln2_off;
+	uint32_t pln2_len;
+};
+
+#define MSM_JPEG_HW_CMD_TYPE_READ      0
+#define MSM_JPEG_HW_CMD_TYPE_WRITE     1
+#define MSM_JPEG_HW_CMD_TYPE_WRITE_OR  2
+#define MSM_JPEG_HW_CMD_TYPE_UWAIT     3
+#define MSM_JPEG_HW_CMD_TYPE_MWAIT     4
+#define MSM_JPEG_HW_CMD_TYPE_MDELAY    5
+#define MSM_JPEG_HW_CMD_TYPE_UDELAY    6
+struct msm_jpeg_hw_cmd {
+
+	uint32_t type:4;
+
+	/* n microseconds of timeout for WAIT */
+	/* n microseconds of time for DELAY */
+	/* repeat n times for READ/WRITE */
+	/* max is 0xFFF, 4095 */
+	uint32_t n:12;
+	uint32_t offset:16;
+	uint32_t mask;
+	union {
+		uint32_t data;   /* for single READ/WRITE/WAIT, n = 1 */
+		uint32_t *pdata;   /* for multiple READ/WRITE/WAIT, n > 1 */
+	};
+};
+
+struct msm_jpeg_hw_cmds {
+	uint32_t m; /* number of elements in the hw_cmd array */
+	struct msm_jpeg_hw_cmd hw_cmd[1];
+};
+
+#endif
+
diff -Nruw linux-4.4.115-fbx/include/uapi/media./msm_media_info.h linux-4.4.115-fbx/include/uapi/media/msm_media_info.h
--- linux-4.4.115-fbx/include/uapi/media./msm_media_info.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/msm_media_info.h	2019-01-22 16:16:28.603292590 +0100
@@ -0,0 +1,1054 @@
+#ifndef __MEDIA_INFO_H__
+#define __MEDIA_INFO_H__
+
+#ifndef MSM_MEDIA_ALIGN
+#define MSM_MEDIA_ALIGN(__sz, __align) (((__align) & ((__align) - 1)) ?\
+	((((__sz) + (__align) - 1) / (__align)) * (__align)) :\
+	(((__sz) + (__align) - 1) & (~((__align) - 1))))
+#endif
+
+#ifndef MSM_MEDIA_ROUNDUP
+#define MSM_MEDIA_ROUNDUP(__sz, __r) (((__sz) + ((__r) - 1)) / (__r))
+#endif
+
+#ifndef MSM_MEDIA_MAX
+#define MSM_MEDIA_MAX(__a, __b) ((__a) > (__b)?(__a):(__b))
+#endif
+
+enum color_fmts {
+	/* Venus NV12:
+	 * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+	 * by an interleaved U/V plane containing 8 bit 2x2 subsampled
+	 * colour difference samples.
+	 *
+	 * <-------- Y/UV_Stride -------->
+	 * <------- Width ------->
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  ^           ^
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  Height      |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |          Y_Scanlines
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              V
+	 * U V U V U V U V U V U V . . . .  ^
+	 * U V U V U V U V U V U V . . . .  |
+	 * U V U V U V U V U V U V . . . .  |
+	 * U V U V U V U V U V U V . . . .  UV_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  --> Buffer size alignment
+	 *
+	 * Y_Stride : Width aligned to 128
+	 * UV_Stride : Width aligned to 128
+	 * Y_Scanlines: Height aligned to 32
+	 * UV_Scanlines: Height/2 aligned to 16
+	 * Extradata: Arbitrary (software-imposed) padding
+	 * Total size = align((Y_Stride * Y_Scanlines
+	 *          + UV_Stride * UV_Scanlines
+	 *          + max(Extradata, Y_Stride * 8), 4096)
+	 */
+	COLOR_FMT_NV12,
+
+	/* Venus NV21:
+	 * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+	 * by an interleaved V/U plane containing 8 bit 2x2 subsampled
+	 * colour difference samples.
+	 *
+	 * <-------- Y/UV_Stride -------->
+	 * <------- Width ------->
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  ^           ^
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  Height      |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |          Y_Scanlines
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              V
+	 * V U V U V U V U V U V U . . . .  ^
+	 * V U V U V U V U V U V U . . . .  |
+	 * V U V U V U V U V U V U . . . .  |
+	 * V U V U V U V U V U V U . . . .  UV_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  --> Padding & Buffer size alignment
+	 *
+	 * Y_Stride : Width aligned to 128
+	 * UV_Stride : Width aligned to 128
+	 * Y_Scanlines: Height aligned to 32
+	 * UV_Scanlines: Height/2 aligned to 16
+	 * Extradata: Arbitrary (software-imposed) padding
+	 * Total size = align((Y_Stride * Y_Scanlines
+	 *          + UV_Stride * UV_Scanlines
+	 *          + max(Extradata, Y_Stride * 8), 4096)
+	 */
+	COLOR_FMT_NV21,
+	/* Venus NV12_MVTB:
+	 * Two YUV 4:2:0 images/views one after the other
+	 * in a top-bottom layout, same as NV12
+	 * with a plane of 8 bit Y samples followed
+	 * by an interleaved U/V plane containing 8 bit 2x2 subsampled
+	 * colour difference samples.
+	 *
+	 *
+	 * <-------- Y/UV_Stride -------->
+	 * <------- Width ------->
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  ^           ^               ^
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  Height      |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |          Y_Scanlines      |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  V           |               |
+	 * . . . . . . . . . . . . . . . .              |             View_1
+	 * . . . . . . . . . . . . . . . .              |               |
+	 * . . . . . . . . . . . . . . . .              |               |
+	 * . . . . . . . . . . . . . . . .              V               |
+	 * U V U V U V U V U V U V . . . .  ^                           |
+	 * U V U V U V U V U V U V . . . .  |                           |
+	 * U V U V U V U V U V U V . . . .  |                           |
+	 * U V U V U V U V U V U V . . . .  UV_Scanlines                |
+	 * . . . . . . . . . . . . . . . .  |                           |
+	 * . . . . . . . . . . . . . . . .  V                           V
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  ^           ^               ^
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  Height      |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |          Y_Scanlines      |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  V           |               |
+	 * . . . . . . . . . . . . . . . .              |             View_2
+	 * . . . . . . . . . . . . . . . .              |               |
+	 * . . . . . . . . . . . . . . . .              |               |
+	 * . . . . . . . . . . . . . . . .              V               |
+	 * U V U V U V U V U V U V . . . .  ^                           |
+	 * U V U V U V U V U V U V . . . .  |                           |
+	 * U V U V U V U V U V U V . . . .  |                           |
+	 * U V U V U V U V U V U V . . . .  UV_Scanlines                |
+	 * . . . . . . . . . . . . . . . .  |                           |
+	 * . . . . . . . . . . . . . . . .  V                           V
+	 * . . . . . . . . . . . . . . . .  --> Buffer size alignment
+	 *
+	 * Y_Stride : Width aligned to 128
+	 * UV_Stride : Width aligned to 128
+	 * Y_Scanlines: Height aligned to 32
+	 * UV_Scanlines: Height/2 aligned to 16
+	 * View_1 begin at: 0 (zero)
+	 * View_2 begin at: Y_Stride * Y_Scanlines + UV_Stride * UV_Scanlines
+	 * Extradata: Arbitrary (software-imposed) padding
+	 * Total size = align((2*(Y_Stride * Y_Scanlines)
+	 *          + 2*(UV_Stride * UV_Scanlines) + Extradata), 4096)
+	 */
+	COLOR_FMT_NV12_MVTB,
+	/* Venus NV12 UBWC:
+	 * Compressed Macro-tile format for NV12.
+	 * Contains 4 planes in the following order -
+	 * (A) Y_Meta_Plane
+	 * (B) Y_UBWC_Plane
+	 * (C) UV_Meta_Plane
+	 * (D) UV_UBWC_Plane
+	 *
+	 * Y_Meta_Plane consists of meta information to decode compressed
+	 * tile data in Y_UBWC_Plane.
+	 * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+	 * UBWC decoder block will use the Y_Meta_Plane data together with
+	 * Y_UBWC_Plane data to produce loss-less uncompressed 8 bit Y samples.
+	 *
+	 * UV_Meta_Plane consists of meta information to decode compressed
+	 * tile data in UV_UBWC_Plane.
+	 * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+	 * UBWC decoder block will use UV_Meta_Plane data together with
+	 * UV_UBWC_Plane data to produce loss-less uncompressed 8 bit 2x2
+	 * subsampled color difference samples.
+	 *
+	 * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+	 * and randomly accessible. There is no dependency between tiles.
+	 *
+	 * <----- Y_Meta_Stride ---->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      Height      |
+	 * M M M M M M M M M M M M . .      |         Meta_Y_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <--Compressed tile Y Stride--->
+	 * <------- Width ------->
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  ^           ^
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  Height      |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |        Macro_tile_Y_Scanlines
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 * <----- UV_Meta_Stride ---->
+	 * M M M M M M M M M M M M . .      ^
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      M_UV_Scanlines
+	 * . . . . . . . . . . . . . .      |
+	 * . . . . . . . . . . . . . .      V
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * <--Compressed tile UV Stride--->
+	 * U* V* U* V* U* V* U* V* . . . .  ^
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  UV_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 *
+	 * Y_Stride = align(Width, 128)
+	 * UV_Stride = align(Width, 128)
+	 * Y_Scanlines = align(Height, 32)
+	 * UV_Scanlines = align(Height/2, 16)
+	 * Y_UBWC_Plane_size = align(Y_Stride * Y_Scanlines, 4096)
+	 * UV_UBWC_Plane_size = align(UV_Stride * UV_Scanlines, 4096)
+	 * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+	 * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+	 * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+	 * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+	 * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+	 * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+	 * Extradata = 16k
+	 *
+	 * Total size = align( Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+	 *           Y_Meta_Plane_size + UV_Meta_Plane_size
+	 *           + max(Extradata, Y_Stride * 64), 4096)
+	 */
+	COLOR_FMT_NV12_UBWC,
+	/* Venus NV12 10-bit UBWC:
+	 * Compressed Macro-tile format for NV12.
+	 * Contains 4 planes in the following order -
+	 * (A) Y_Meta_Plane
+	 * (B) Y_UBWC_Plane
+	 * (C) UV_Meta_Plane
+	 * (D) UV_UBWC_Plane
+	 *
+	 * Y_Meta_Plane consists of meta information to decode compressed
+	 * tile data in Y_UBWC_Plane.
+	 * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+	 * UBWC decoder block will use the Y_Meta_Plane data together with
+	 * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples.
+	 *
+	 * UV_Meta_Plane consists of meta information to decode compressed
+	 * tile data in UV_UBWC_Plane.
+	 * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+	 * UBWC decoder block will use UV_Meta_Plane data together with
+	 * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2
+	 * subsampled color difference samples.
+	 *
+	 * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+	 * and randomly accessible. There is no dependency between tiles.
+	 *
+	 * <----- Y_Meta_Stride ----->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      Height      |
+	 * M M M M M M M M M M M M . .      |         Meta_Y_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <--Compressed tile Y Stride--->
+	 * <------- Width ------->
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  ^           ^
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  Height      |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |        Macro_tile_Y_Scanlines
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 * <----- UV_Meta_Stride ---->
+	 * M M M M M M M M M M M M . .      ^
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      M_UV_Scanlines
+	 * . . . . . . . . . . . . . .      |
+	 * . . . . . . . . . . . . . .      V
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * <--Compressed tile UV Stride--->
+	 * U* V* U* V* U* V* U* V* . . . .  ^
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  UV_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 *
+	 *
+	 * Y_Stride = align(Width * 4/3, 128)
+	 * UV_Stride = align(Width * 4/3, 128)
+	 * Y_Scanlines = align(Height, 32)
+	 * UV_Scanlines = align(Height/2, 16)
+	 * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096)
+	 * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096)
+	 * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+	 * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+	 * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+	 * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+	 * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+	 * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+	 * Extradata = 16k
+	 *
+	 * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+	 *           Y_Meta_Plane_size + UV_Meta_Plane_size
+	 *           + max(Extradata, Y_Stride * 64), 4096)
+	 */
+	COLOR_FMT_NV12_BPP10_UBWC,
+	/* Venus RGBA8888 format:
+	 * Contains 1 plane in the following order -
+	 * (A) RGBA plane
+	 *
+	 * <-------- RGB_Stride -------->
+	 * <------- Width ------->
+	 * R R R R R R R R R R R R . . . .  ^           ^
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  Height      |
+	 * R R R R R R R R R R R R . . . .  |       RGB_Scanlines
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              V
+	 *
+	 * RGB_Stride = align(Width * 4, 128)
+	 * RGB_Scanlines = align(Height, 32)
+	 * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+	 * Extradata = 8k
+	 *
+	 * Total size = align(RGB_Plane_size + Extradata, 4096)
+	 */
+	COLOR_FMT_RGBA8888,
+	/* Venus RGBA8888 UBWC format:
+	 * Contains 2 planes in the following order -
+	 * (A) Meta plane
+	 * (B) RGBA plane
+	 *
+	 * <--- RGB_Meta_Stride ---->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      Height      |
+	 * M M M M M M M M M M M M . .      |       Meta_RGB_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <-------- RGB_Stride -------->
+	 * <------- Width ------->
+	 * R R R R R R R R R R R R . . . .  ^           ^
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  Height      |
+	 * R R R R R R R R R R R R . . . .  |       RGB_Scanlines
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .    -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 *
+	 * RGB_Stride = align(Width * 4, 128)
+	 * RGB_Scanlines = align(Height, 32)
+	 * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+	 * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+	 * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+	 * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+	 *		RGB_Meta_Scanlines, 4096)
+	 * Extradata = 8k
+	 *
+	 * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+	 *		Extradata, 4096)
+	 */
+	COLOR_FMT_RGBA8888_UBWC,
+	/* Venus RGBA1010102 UBWC format:
+	 * Contains 2 planes in the following order -
+	 * (A) Meta plane
+	 * (B) RGBA plane
+	 *
+	 * <--- RGB_Meta_Stride ---->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      Height      |
+	 * M M M M M M M M M M M M . .      |       Meta_RGB_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <-------- RGB_Stride -------->
+	 * <------- Width ------->
+	 * R R R R R R R R R R R R . . . .  ^           ^
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  Height      |
+	 * R R R R R R R R R R R R . . . .  |       RGB_Scanlines
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .    -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 *
+	 * RGB_Stride = align(Width * 4, 256)
+	 * RGB_Scanlines = align(Height, 16)
+	 * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+	 * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+	 * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+	 * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+	 *		RGB_Meta_Scanlines, 4096)
+	 * Extradata = 8k
+	 *
+	 * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+	 *		Extradata, 4096)
+	 */
+	COLOR_FMT_RGBA1010102_UBWC,
+	/* Venus RGB565 UBWC format:
+	 * Contains 2 planes in the following order -
+	 * (A) Meta plane
+	 * (B) RGB plane
+	 *
+	 * <--- RGB_Meta_Stride ---->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      Height      |
+	 * M M M M M M M M M M M M . .      |       Meta_RGB_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <-------- RGB_Stride -------->
+	 * <------- Width ------->
+	 * R R R R R R R R R R R R . . . .  ^           ^
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  Height      |
+	 * R R R R R R R R R R R R . . . .  |       RGB_Scanlines
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .    -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 *
+	 * RGB_Stride = align(Width * 2, 128)
+	 * RGB_Scanlines = align(Height, 16)
+	 * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+	 * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+	 * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+	 * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+	 *		RGB_Meta_Scanlines, 4096)
+	 * Extradata = 8k
+	 *
+	 * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+	 *		Extradata, 4096)
+	 */
+	COLOR_FMT_RGB565_UBWC,
+	/* P010 UBWC:
+	 * Compressed Macro-tile format for NV12.
+	 * Contains 4 planes in the following order -
+	 * (A) Y_Meta_Plane
+	 * (B) Y_UBWC_Plane
+	 * (C) UV_Meta_Plane
+	 * (D) UV_UBWC_Plane
+	 *
+	 * Y_Meta_Plane consists of meta information to decode compressed
+	 * tile data in Y_UBWC_Plane.
+	 * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+	 * UBWC decoder block will use the Y_Meta_Plane data together with
+	 * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples.
+	 *
+	 * UV_Meta_Plane consists of meta information to decode compressed
+	 * tile data in UV_UBWC_Plane.
+	 * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+	 * UBWC decoder block will use UV_Meta_Plane data together with
+	 * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2
+	 * subsampled color difference samples.
+	 *
+	 * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+	 * and randomly accessible. There is no dependency between tiles.
+	 *
+	 * <----- Y_Meta_Stride ----->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      Height      |
+	 * M M M M M M M M M M M M . .      |         Meta_Y_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <--Compressed tile Y Stride--->
+	 * <------- Width ------->
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  ^           ^
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  Height      |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |        Macro_tile_Y_Scanlines
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 * <----- UV_Meta_Stride ---->
+	 * M M M M M M M M M M M M . .      ^
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      M_UV_Scanlines
+	 * . . . . . . . . . . . . . .      |
+	 * . . . . . . . . . . . . . .      V
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * <--Compressed tile UV Stride--->
+	 * U* V* U* V* U* V* U* V* . . . .  ^
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  UV_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 *
+	 *
+	 * Y_Stride = align(Width * 2, 256)
+	 * UV_Stride = align(Width * 2, 256)
+	 * Y_Scanlines = align(Height, 16)
+	 * UV_Scanlines = align(Height/2, 16)
+	 * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096)
+	 * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096)
+	 * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+	 * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+	 * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+	 * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+	 * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+	 * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+	 * Extradata = 8k
+	 *
+	 * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+	 *           Y_Meta_Plane_size + UV_Meta_Plane_size
+	 *           + max(Extradata, Y_Stride * 48), 4096)
+	 */
+	COLOR_FMT_P010_UBWC,
+};
+
+#define COLOR_FMT_RGBA1010102_UBWC	COLOR_FMT_RGBA1010102_UBWC
+#define COLOR_FMT_RGB565_UBWC		COLOR_FMT_RGB565_UBWC
+#define COLOR_FMT_P010_UBWC		COLOR_FMT_P010_UBWC
+
+static inline unsigned int VENUS_EXTRADATA_SIZE(int width, int height)
+{
+	(void)height;
+	(void)width;
+
+	/*
+	 * In the future, calculate the size based on the w/h but just
+	 * hardcode it for now since 16K satisfies all current usecases.
+	 */
+	return 16 * 1024;
+}
+
+static inline unsigned int VENUS_Y_STRIDE(int color_fmt, int width)
+{
+	unsigned int alignment, stride = 0;
+
+	if (!width)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV21:
+	case COLOR_FMT_NV12:
+	case COLOR_FMT_NV12_MVTB:
+	case COLOR_FMT_NV12_UBWC:
+		alignment = 128;
+		stride = MSM_MEDIA_ALIGN(width, alignment);
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+		alignment = 256;
+		stride = MSM_MEDIA_ALIGN(width, 192);
+		stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+		break;
+	case COLOR_FMT_P010_UBWC:
+		alignment = 256;
+		stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+		break;
+	default:
+		break;
+	}
+invalid_input:
+	return stride;
+}
+
+static inline unsigned int VENUS_UV_STRIDE(int color_fmt, int width)
+{
+	unsigned int alignment, stride = 0;
+
+	if (!width)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV21:
+	case COLOR_FMT_NV12:
+	case COLOR_FMT_NV12_MVTB:
+	case COLOR_FMT_NV12_UBWC:
+		alignment = 128;
+		stride = MSM_MEDIA_ALIGN(width, alignment);
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+		alignment = 256;
+		stride = MSM_MEDIA_ALIGN(width, 192);
+		stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+		break;
+	case COLOR_FMT_P010_UBWC:
+		alignment = 256;
+		stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+		break;
+	default:
+		break;
+	}
+invalid_input:
+	return stride;
+}
+
+static inline unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
+{
+	unsigned int alignment, sclines = 0;
+
+	if (!height)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV21:
+	case COLOR_FMT_NV12:
+	case COLOR_FMT_NV12_MVTB:
+	case COLOR_FMT_NV12_UBWC:
+		alignment = 32;
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+	case COLOR_FMT_P010_UBWC:
+		alignment = 16;
+		break;
+	default:
+		return 0;
+	}
+	sclines = MSM_MEDIA_ALIGN(height, alignment);
+invalid_input:
+	return sclines;
+}
+
+static inline unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
+{
+	unsigned int alignment, sclines = 0;
+
+	if (!height)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV21:
+	case COLOR_FMT_NV12:
+	case COLOR_FMT_NV12_MVTB:
+	case COLOR_FMT_NV12_BPP10_UBWC:
+	case COLOR_FMT_P010_UBWC:
+		alignment = 16;
+		break;
+	case COLOR_FMT_NV12_UBWC:
+		alignment = 32;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	sclines = MSM_MEDIA_ALIGN(height / 2, alignment);
+
+invalid_input:
+	return sclines;
+}
+
+static inline unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
+{
+	int y_tile_width = 0, y_meta_stride = 0;
+
+	if (!width)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV12_UBWC:
+	case COLOR_FMT_P010_UBWC:
+		y_tile_width = 32;
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+		y_tile_width = 48;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	y_meta_stride = MSM_MEDIA_ROUNDUP(width, y_tile_width);
+	y_meta_stride = MSM_MEDIA_ALIGN(y_meta_stride, 64);
+
+invalid_input:
+	return y_meta_stride;
+}
+
+static inline unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
+{
+	int y_tile_height = 0, y_meta_scanlines = 0;
+
+	if (!height)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV12_UBWC:
+		y_tile_height = 8;
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+	case COLOR_FMT_P010_UBWC:
+		y_tile_height = 4;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	y_meta_scanlines = MSM_MEDIA_ROUNDUP(height, y_tile_height);
+	y_meta_scanlines = MSM_MEDIA_ALIGN(y_meta_scanlines, 16);
+
+invalid_input:
+	return y_meta_scanlines;
+}
+
+static inline unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
+{
+	int uv_tile_width = 0, uv_meta_stride = 0;
+
+	if (!width)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV12_UBWC:
+	case COLOR_FMT_P010_UBWC:
+		uv_tile_width = 16;
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+		uv_tile_width = 24;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	uv_meta_stride = MSM_MEDIA_ROUNDUP(width / 2, uv_tile_width);
+	uv_meta_stride = MSM_MEDIA_ALIGN(uv_meta_stride, 64);
+
+invalid_input:
+	return uv_meta_stride;
+}
+
+static inline unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
+{
+	int uv_tile_height = 0, uv_meta_scanlines = 0;
+
+	if (!height)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV12_UBWC:
+		uv_tile_height = 8;
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+	case COLOR_FMT_P010_UBWC:
+		uv_tile_height = 4;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	uv_meta_scanlines = MSM_MEDIA_ROUNDUP(height / 2, uv_tile_height);
+	uv_meta_scanlines = MSM_MEDIA_ALIGN(uv_meta_scanlines, 16);
+
+invalid_input:
+	return uv_meta_scanlines;
+}
+
+static inline unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
+{
+	unsigned int alignment = 0, stride = 0, bpp = 4;
+
+	if (!width)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_RGBA8888:
+		alignment = 128;
+		break;
+	case COLOR_FMT_RGB565_UBWC:
+		alignment = 128;
+		bpp = 2;
+		break;
+	case COLOR_FMT_RGBA8888_UBWC:
+	case COLOR_FMT_RGBA1010102_UBWC:
+		alignment = 256;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	stride = MSM_MEDIA_ALIGN(width * bpp, alignment);
+
+invalid_input:
+	return stride;
+}
+
+static inline unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
+{
+	unsigned int alignment = 0, scanlines = 0;
+
+	if (!height)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_RGBA8888:
+		alignment = 32;
+		break;
+	case COLOR_FMT_RGBA8888_UBWC:
+	case COLOR_FMT_RGBA1010102_UBWC:
+	case COLOR_FMT_RGB565_UBWC:
+		alignment = 16;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	scanlines = MSM_MEDIA_ALIGN(height, alignment);
+
+invalid_input:
+	return scanlines;
+}
+
+static inline unsigned int VENUS_RGB_META_STRIDE(int color_fmt, int width)
+{
+	int rgb_tile_width = 0, rgb_meta_stride = 0;
+
+	if (!width)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_RGBA8888_UBWC:
+	case COLOR_FMT_RGBA1010102_UBWC:
+	case COLOR_FMT_RGB565_UBWC:
+		rgb_tile_width = 16;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, rgb_tile_width);
+	rgb_meta_stride = MSM_MEDIA_ALIGN(rgb_meta_stride, 64);
+
+invalid_input:
+	return rgb_meta_stride;
+}
+
+static inline unsigned int VENUS_RGB_META_SCANLINES(int color_fmt, int height)
+{
+	int rgb_tile_height = 0, rgb_meta_scanlines = 0;
+
+	if (!height)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_RGBA8888_UBWC:
+	case COLOR_FMT_RGBA1010102_UBWC:
+	case COLOR_FMT_RGB565_UBWC:
+		rgb_tile_height = 4;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, rgb_tile_height);
+	rgb_meta_scanlines = MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16);
+
+invalid_input:
+	return rgb_meta_scanlines;
+}
+
+static inline unsigned int VENUS_BUFFER_SIZE(
+	int color_fmt, int width, int height)
+{
+	const unsigned int extra_size = VENUS_EXTRADATA_SIZE(width, height);
+	unsigned int uv_alignment = 0, size = 0;
+	unsigned int y_plane, uv_plane, y_stride,
+		uv_stride, y_sclines, uv_sclines;
+	unsigned int y_ubwc_plane = 0, uv_ubwc_plane = 0;
+	unsigned int y_meta_stride = 0, y_meta_scanlines = 0;
+	unsigned int uv_meta_stride = 0, uv_meta_scanlines = 0;
+	unsigned int y_meta_plane = 0, uv_meta_plane = 0;
+	unsigned int rgb_stride = 0, rgb_scanlines = 0;
+	unsigned int rgb_plane = 0, rgb_ubwc_plane = 0, rgb_meta_plane = 0;
+	unsigned int rgb_meta_stride = 0, rgb_meta_scanlines = 0;
+
+	if (!width || !height)
+		goto invalid_input;
+
+	y_stride = VENUS_Y_STRIDE(color_fmt, width);
+	uv_stride = VENUS_UV_STRIDE(color_fmt, width);
+	y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
+	uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
+	rgb_stride = VENUS_RGB_STRIDE(color_fmt, width);
+	rgb_scanlines = VENUS_RGB_SCANLINES(color_fmt, height);
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV21:
+	case COLOR_FMT_NV12:
+		uv_alignment = 4096;
+		y_plane = y_stride * y_sclines;
+		uv_plane = uv_stride * uv_sclines + uv_alignment;
+		size = y_plane + uv_plane +
+				MSM_MEDIA_MAX(extra_size, 8 * y_stride);
+		size = MSM_MEDIA_ALIGN(size, 4096);
+		break;
+	case COLOR_FMT_NV12_MVTB:
+		uv_alignment = 4096;
+		y_plane = y_stride * y_sclines;
+		uv_plane = uv_stride * uv_sclines + uv_alignment;
+		size = y_plane + uv_plane;
+		size = 2 * size + extra_size;
+		size = MSM_MEDIA_ALIGN(size, 4096);
+		break;
+	case COLOR_FMT_NV12_UBWC:
+	case COLOR_FMT_NV12_BPP10_UBWC:
+		y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+		uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+		y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+		y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
+		y_meta_plane = MSM_MEDIA_ALIGN(
+				y_meta_stride * y_meta_scanlines, 4096);
+		uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+		uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
+		uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+					uv_meta_scanlines, 4096);
+
+		size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+			uv_meta_plane + MSM_MEDIA_MAX(extra_size,
+			64 * y_stride);
+		size = MSM_MEDIA_ALIGN(size, 4096);
+		break;
+	case COLOR_FMT_P010_UBWC:
+		y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+		uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+		y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+		y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
+		y_meta_plane = MSM_MEDIA_ALIGN(
+				y_meta_stride * y_meta_scanlines, 4096);
+		uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+		uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
+		uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+					uv_meta_scanlines, 4096);
+
+		size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+			uv_meta_plane;
+		size = MSM_MEDIA_ALIGN(size, 4096);
+		break;
+	case COLOR_FMT_RGBA8888:
+		rgb_plane = MSM_MEDIA_ALIGN(rgb_stride  * rgb_scanlines, 4096);
+		size = rgb_plane;
+		size =  MSM_MEDIA_ALIGN(size, 4096);
+		break;
+	case COLOR_FMT_RGBA8888_UBWC:
+		rgb_ubwc_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines,
+							4096);
+		rgb_meta_stride = VENUS_RGB_META_STRIDE(color_fmt, width);
+		rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color_fmt,
+					height);
+		rgb_meta_plane = MSM_MEDIA_ALIGN(rgb_meta_stride *
+					rgb_meta_scanlines, 4096);
+		size = rgb_ubwc_plane + rgb_meta_plane;
+		size = MSM_MEDIA_ALIGN(size, 4096);
+		break;
+	default:
+		break;
+	}
+invalid_input:
+	return size;
+}
+
+static inline unsigned int VENUS_VIEW2_OFFSET(
+	int color_fmt, int width, int height)
+{
+	unsigned int offset = 0;
+	unsigned int y_plane, uv_plane, y_stride,
+		uv_stride, y_sclines, uv_sclines;
+	if (!width || !height)
+		goto invalid_input;
+
+	y_stride = VENUS_Y_STRIDE(color_fmt, width);
+	uv_stride = VENUS_UV_STRIDE(color_fmt, width);
+	y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
+	uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
+	switch (color_fmt) {
+	case COLOR_FMT_NV12_MVTB:
+		y_plane = y_stride * y_sclines;
+		uv_plane = uv_stride * uv_sclines;
+		offset = y_plane + uv_plane;
+		break;
+	default:
+		break;
+	}
+invalid_input:
+	return offset;
+}
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/uapi/media./msm_mercury.h linux-4.4.115-fbx/include/uapi/media/msm_mercury.h
--- linux-4.4.115-fbx/include/uapi/media./msm_mercury.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/msm_mercury.h	2019-01-22 16:16:28.603292590 +0100
@@ -0,0 +1,119 @@
+#ifndef __UAPI_MSM_MERCURY_H
+#define __UAPI_MSM_MERCURY_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define MSM_MERCURY_HW_VERSION_REG  0x0004/* this offset does not exist in HW*/
+
+#define OUTPUT_H2V1  0
+#define OUTPUT_H2V2  1
+#define OUTPUT_BYTE  6
+
+#define MSM_MERCURY_MODE_REALTIME_ENCODE 0
+#define MSM_MERCURY_MODE_OFFLINE_ENCODE 1
+#define MSM_MERCURY_MODE_REALTIME_ROTATION 2
+#define MSM_MERCURY_MODE_OFFLINE_ROTATION 3
+
+#define MSM_MERCURY_EVT_RESET       1
+#define MSM_MERCURY_EVT_FRAMEDONE	2
+#define MSM_MERCURY_EVT_ERR         3
+#define MSM_MERCURY_EVT_UNBLOCK     4
+
+#define MSM_MERCURY_HW_CMD_TYPE_READ      0
+#define MSM_MERCURY_HW_CMD_TYPE_WRITE     1
+#define MSM_MERCURY_HW_CMD_TYPE_WRITE_OR  2
+#define MSM_MERCURY_HW_CMD_TYPE_UWAIT     3
+#define MSM_MERCURY_HW_CMD_TYPE_MWAIT     4
+#define MSM_MERCURY_HW_CMD_TYPE_MDELAY    5
+#define MSM_MERCURY_HW_CMD_TYPE_UDELAY    6
+
+#define MSM_MCR_IOCTL_MAGIC 'g'
+
+#define MSM_MCR_IOCTL_GET_HW_VERSION \
+	_IOW(MSM_MCR_IOCTL_MAGIC, 1, struct msm_mercury_hw_cmd *)
+
+#define MSM_MCR_IOCTL_RESET \
+	_IOW(MSM_MCR_IOCTL_MAGIC, 2, struct msm_mercury_ctrl_cmd *)
+
+#define MSM_MCR_IOCTL_STOP \
+	_IOW(MSM_MCR_IOCTL_MAGIC, 3, struct msm_mercury_hw_cmds *)
+
+#define MSM_MCR_IOCTL_START \
+	_IOW(MSM_MCR_IOCTL_MAGIC, 4, struct msm_mercury_hw_cmds *)
+
+#define MSM_MCR_IOCTL_INPUT_BUF_CFG \
+	_IOW(MSM_MCR_IOCTL_MAGIC, 5, struct msm_mercury_buf *)
+
+#define MSM_MCR_IOCTL_INPUT_GET \
+	_IOW(MSM_MCR_IOCTL_MAGIC, 6, struct msm_mercury_buf *)
+
+#define MSM_MCR_IOCTL_INPUT_GET_UNBLOCK \
+	_IOW(MSM_MCR_IOCTL_MAGIC, 7, int)
+
+#define MSM_MCR_IOCTL_OUTPUT_BUF_CFG \
+	_IOW(MSM_MCR_IOCTL_MAGIC, 8, struct msm_mercury_buf *)
+
+#define MSM_MCR_IOCTL_OUTPUT_GET \
+	_IOW(MSM_MCR_IOCTL_MAGIC, 9, struct msm_mercury_buf *)
+
+#define MSM_MCR_IOCTL_OUTPUT_GET_UNBLOCK \
+	_IOW(MSM_MCR_IOCTL_MAGIC, 10, int)
+
+#define MSM_MCR_IOCTL_EVT_GET \
+	_IOW(MSM_MCR_IOCTL_MAGIC, 11, struct msm_mercury_ctrl_cmd *)
+
+#define MSM_MCR_IOCTL_EVT_GET_UNBLOCK \
+	_IOW(MSM_MCR_IOCTL_MAGIC, 12, int)
+
+#define MSM_MCR_IOCTL_HW_CMD \
+	_IOW(MSM_MCR_IOCTL_MAGIC, 13, struct msm_mercury_hw_cmd *)
+
+#define MSM_MCR_IOCTL_HW_CMDS \
+	_IOW(MSM_MCR_IOCTL_MAGIC, 14, struct msm_mercury_hw_cmds *)
+
+#define MSM_MCR_IOCTL_TEST_DUMP_REGION \
+	_IOW(MSM_MCR_IOCTL_MAGIC, 15, unsigned long)
+
+struct msm_mercury_ctrl_cmd {
+	uint32_t type;
+	uint32_t len;
+	void     *value;
+};
+
+struct msm_mercury_buf {
+	uint32_t type;
+	int      fd;
+	void     *vaddr;
+	uint32_t y_off;
+	uint32_t y_len;
+	uint32_t framedone_len;
+	uint32_t cbcr_off;
+	uint32_t cbcr_len;
+	uint32_t num_of_mcu_rows;
+	uint32_t offset;
+};
+
+struct msm_mercury_hw_cmd {
+
+	uint32_t type:4;
+	/* n microseconds of timeout for WAIT */
+	/* n microseconds of time for DELAY */
+	/* repeat n times for READ/WRITE */
+	/* max is 0xFFF, 4095 */
+	uint32_t n:12;
+	uint32_t offset:16;
+	uint32_t mask;
+	union {
+		/* for single READ/WRITE/WAIT, n = 1 */
+		uint32_t data;
+		uint32_t *pdata;/* for multiple READ/WRITE/WAIT, n > 1 */
+	};
+};
+
+struct msm_mercury_hw_cmds {
+	uint32_t m;	/* number of elements in the hw_cmd array */
+	struct msm_mercury_hw_cmd hw_cmd[1];
+};
+
+#endif /* __UAPI_MSM_MERCURY_H */
diff -Nruw linux-4.4.115-fbx/include/uapi/media./msm_sde_rotator.h linux-4.4.115-fbx/include/uapi/media/msm_sde_rotator.h
--- linux-4.4.115-fbx/include/uapi/media./msm_sde_rotator.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/msm_sde_rotator.h	2019-01-22 16:16:28.603292590 +0100
@@ -0,0 +1,114 @@
+#ifndef __UAPI_MSM_SDE_ROTATOR_H__
+#define __UAPI_MSM_SDE_ROTATOR_H__
+
+#include <linux/videodev2.h>
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/* SDE Rotator pixel format definitions */
+#define SDE_PIX_FMT_XRGB_8888		V4L2_PIX_FMT_XBGR32
+#define SDE_PIX_FMT_ARGB_8888		V4L2_PIX_FMT_ABGR32
+#define SDE_PIX_FMT_ABGR_8888		V4L2_PIX_FMT_SDE_ABGR_8888
+#define SDE_PIX_FMT_RGBA_8888		V4L2_PIX_FMT_SDE_RGBA_8888
+#define SDE_PIX_FMT_BGRA_8888		V4L2_PIX_FMT_ARGB32
+#define SDE_PIX_FMT_RGBX_8888		V4L2_PIX_FMT_SDE_RGBX_8888
+#define SDE_PIX_FMT_BGRX_8888		V4L2_PIX_FMT_XRGB32
+#define SDE_PIX_FMT_XBGR_8888		V4L2_PIX_FMT_SDE_XBGR_8888
+#define SDE_PIX_FMT_RGBA_5551		V4L2_PIX_FMT_SDE_RGBA_5551
+#define SDE_PIX_FMT_ARGB_1555		V4L2_PIX_FMT_ARGB555
+#define SDE_PIX_FMT_ABGR_1555		V4L2_PIX_FMT_SDE_ABGR_1555
+#define SDE_PIX_FMT_BGRA_5551		V4L2_PIX_FMT_SDE_BGRA_5551
+#define SDE_PIX_FMT_BGRX_5551		V4L2_PIX_FMT_SDE_BGRX_5551
+#define SDE_PIX_FMT_RGBX_5551		V4L2_PIX_FMT_SDE_RGBX_5551
+#define SDE_PIX_FMT_XBGR_1555		V4L2_PIX_FMT_SDE_XBGR_1555
+#define SDE_PIX_FMT_XRGB_1555		V4L2_PIX_FMT_XRGB555
+#define SDE_PIX_FMT_ARGB_4444		V4L2_PIX_FMT_ARGB444
+#define SDE_PIX_FMT_RGBA_4444		V4L2_PIX_FMT_SDE_RGBA_4444
+#define SDE_PIX_FMT_BGRA_4444		V4L2_PIX_FMT_SDE_BGRA_4444
+#define SDE_PIX_FMT_ABGR_4444		V4L2_PIX_FMT_SDE_ABGR_4444
+#define SDE_PIX_FMT_RGBX_4444		V4L2_PIX_FMT_SDE_RGBX_4444
+#define SDE_PIX_FMT_XRGB_4444		V4L2_PIX_FMT_XRGB444
+#define SDE_PIX_FMT_BGRX_4444		V4L2_PIX_FMT_SDE_BGRX_4444
+#define SDE_PIX_FMT_XBGR_4444		V4L2_PIX_FMT_SDE_XBGR_4444
+#define SDE_PIX_FMT_RGB_888		V4L2_PIX_FMT_RGB24
+#define SDE_PIX_FMT_BGR_888		V4L2_PIX_FMT_BGR24
+#define SDE_PIX_FMT_RGB_565		V4L2_PIX_FMT_RGB565
+#define SDE_PIX_FMT_BGR_565		V4L2_PIX_FMT_SDE_BGR_565
+#define SDE_PIX_FMT_Y_CB_CR_H2V2	V4L2_PIX_FMT_YUV420
+#define SDE_PIX_FMT_Y_CR_CB_H2V2	V4L2_PIX_FMT_YVU420
+#define SDE_PIX_FMT_Y_CR_CB_GH2V2	V4L2_PIX_FMT_SDE_Y_CR_CB_GH2V2
+#define SDE_PIX_FMT_Y_CBCR_H2V2		V4L2_PIX_FMT_NV12
+#define SDE_PIX_FMT_Y_CRCB_H2V2		V4L2_PIX_FMT_NV21
+#define SDE_PIX_FMT_Y_CBCR_H1V2		V4L2_PIX_FMT_SDE_Y_CBCR_H1V2
+#define SDE_PIX_FMT_Y_CRCB_H1V2		V4L2_PIX_FMT_SDE_Y_CRCB_H1V2
+#define SDE_PIX_FMT_Y_CBCR_H2V1		V4L2_PIX_FMT_NV16
+#define SDE_PIX_FMT_Y_CRCB_H2V1		V4L2_PIX_FMT_NV61
+#define SDE_PIX_FMT_YCBYCR_H2V1		V4L2_PIX_FMT_YUYV
+#define SDE_PIX_FMT_Y_CBCR_H2V2_VENUS	V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_VENUS
+#define SDE_PIX_FMT_Y_CRCB_H2V2_VENUS	V4L2_PIX_FMT_SDE_Y_CRCB_H2V2_VENUS
+#define SDE_PIX_FMT_RGBA_8888_UBWC	V4L2_PIX_FMT_RGBA8888_UBWC
+#define SDE_PIX_FMT_RGBX_8888_UBWC	V4L2_PIX_FMT_SDE_RGBX_8888_UBWC
+#define SDE_PIX_FMT_RGB_565_UBWC	V4L2_PIX_FMT_SDE_RGB_565_UBWC
+#define SDE_PIX_FMT_Y_CBCR_H2V2_UBWC	V4L2_PIX_FMT_NV12_UBWC
+#define SDE_PIX_FMT_RGBA_1010102	V4L2_PIX_FMT_SDE_RGBA_1010102
+#define SDE_PIX_FMT_RGBX_1010102	V4L2_PIX_FMT_SDE_RGBX_1010102
+#define SDE_PIX_FMT_ARGB_2101010	V4L2_PIX_FMT_SDE_ARGB_2101010
+#define SDE_PIX_FMT_XRGB_2101010	V4L2_PIX_FMT_SDE_XRGB_2101010
+#define SDE_PIX_FMT_BGRA_1010102	V4L2_PIX_FMT_SDE_BGRA_1010102
+#define SDE_PIX_FMT_BGRX_1010102	V4L2_PIX_FMT_SDE_BGRX_1010102
+#define SDE_PIX_FMT_ABGR_2101010	V4L2_PIX_FMT_SDE_ABGR_2101010
+#define SDE_PIX_FMT_XBGR_2101010	V4L2_PIX_FMT_SDE_XBGR_2101010
+#define SDE_PIX_FMT_RGBA_1010102_UBWC	V4L2_PIX_FMT_SDE_RGBA_1010102_UBWC
+#define SDE_PIX_FMT_RGBX_1010102_UBWC	V4L2_PIX_FMT_SDE_RGBX_1010102_UBWC
+#define SDE_PIX_FMT_Y_CBCR_H2V2_P010	V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010
+#define SDE_PIX_FMT_Y_CBCR_H2V2_TP10	V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_TP10
+#define SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC	V4L2_PIX_FMT_NV12_TP10_UBWC
+
+/**
+* struct msm_sde_rotator_fence - v4l2 buffer fence info
+* @index: id number of the buffer
+* @type: enum v4l2_buf_type; buffer type
+* @fd: file descriptor of the fence associated with this buffer
+**/
+struct msm_sde_rotator_fence {
+	__u32	index;
+	__u32	type;
+	__s32	fd;
+	__u32	reserved[5];
+};
+
+/**
+* struct msm_sde_rotator_comp_ratio - v4l2 buffer compression ratio
+* @index: id number of the buffer
+* @type: enum v4l2_buf_type; buffer type
+* @numer: numerator of the ratio
+* @denom: denominator of the ratio
+**/
+struct msm_sde_rotator_comp_ratio {
+	__u32	index;
+	__u32	type;
+	__u32	numer;
+	__u32	denom;
+	__u32	reserved[4];
+};
+
+/* SDE Rotator private ioctl ID */
+#define VIDIOC_G_SDE_ROTATOR_FENCE \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 10, struct msm_sde_rotator_fence)
+#define VIDIOC_S_SDE_ROTATOR_FENCE \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 11, struct msm_sde_rotator_fence)
+#define VIDIOC_G_SDE_ROTATOR_COMP_RATIO \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 12, struct msm_sde_rotator_comp_ratio)
+#define VIDIOC_S_SDE_ROTATOR_COMP_RATIO \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 13, struct msm_sde_rotator_comp_ratio)
+
+/* SDE Rotator private control ID's */
+#define V4L2_CID_SDE_ROTATOR_SECURE	(V4L2_CID_USER_BASE + 0x1000)
+
+/*
+ * This control Id indicates this context is associated with the
+ * secure camera.
+ */
+#define V4L2_CID_SDE_ROTATOR_SECURE_CAMERA	(V4L2_CID_USER_BASE + 0x2000)
+
+#endif /* __UAPI_MSM_SDE_ROTATOR_H__ */
diff -Nruw linux-4.4.115-fbx/include/uapi/media./msm_vidc.h linux-4.4.115-fbx/include/uapi/media/msm_vidc.h
--- linux-4.4.115-fbx/include/uapi/media./msm_vidc.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/msm_vidc.h	2019-10-29 09:26:25.553221870 +0100
@@ -0,0 +1,376 @@
+#ifndef __MSM_VIDC_H__
+#define __MSM_VIDC_H__
+
+#include <linux/types.h>
+
+#define MSM_VIDC_HAL_INTERLACE_COLOR_FORMAT_NV12	0x2
+#define MSM_VIDC_HAL_INTERLACE_COLOR_FORMAT_NV12_UBWC	0x8002
+
+struct msm_vidc_extradata_header {
+	unsigned int size;
+	unsigned int:32; /** Keeping binary compatibility */
+	unsigned int:32; /* with firmware and OpenMAX IL **/
+	unsigned int type; /* msm_vidc_extradata_type */
+	unsigned int data_size;
+	unsigned char data[1];
+};
+
+struct msm_vidc_interlace_payload {
+	unsigned int format;
+	unsigned int color_format;
+};
+
+struct msm_vidc_framerate_payload {
+	unsigned int frame_rate;
+};
+
+struct msm_vidc_ts_payload {
+	unsigned int timestamp_lo;
+	unsigned int timestamp_hi;
+};
+
+struct msm_vidc_concealmb_payload {
+	unsigned int num_mbs;
+};
+
+struct msm_vidc_recoverysei_payload {
+	unsigned int flags;
+};
+
+struct msm_vidc_aspect_ratio_payload {
+	unsigned int size;
+	unsigned int version;
+	unsigned int port_index;
+	unsigned int aspect_width;
+	unsigned int aspect_height;
+};
+
+struct msm_vidc_mpeg2_seqdisp_payload {
+	unsigned int video_format;
+	unsigned int color_descp;
+	unsigned int color_primaries;
+	unsigned int transfer_char;
+	unsigned int matrix_coeffs;
+	unsigned int disp_width;
+	unsigned int disp_height;
+};
+
+struct msm_vidc_vc1_seqdisp_payload {
+	unsigned int prog_seg_format;
+	unsigned int uv_sampl_fmt;
+	unsigned int color_format;
+	unsigned int color_primaries;
+	unsigned int transfer_char;
+	unsigned int matrix_coeffs;
+	unsigned int aspect_ratio;
+	unsigned int aspect_horiz;
+	unsigned int aspect_vert;
+};
+
+struct msm_vidc_input_crop_payload {
+	unsigned int size;
+	unsigned int version;
+	unsigned int port_index;
+	unsigned int left;
+	unsigned int top;
+	unsigned int width;
+	unsigned int height;
+};
+
+struct msm_vidc_output_crop_payload {
+	unsigned int size;
+	unsigned int version;
+	unsigned int port_index;
+	unsigned int left;
+	unsigned int top;
+	unsigned int display_width;
+	unsigned int display_height;
+	unsigned int width;
+	unsigned int height;
+};
+
+
+struct msm_vidc_digital_zoom_payload {
+	unsigned int size;
+	unsigned int version;
+	unsigned int port_index;
+	unsigned int zoom_width;
+	unsigned int zoom_height;
+};
+
+struct msm_vidc_extradata_index {
+	unsigned int type;
+	union {
+		struct msm_vidc_input_crop_payload input_crop;
+		struct msm_vidc_digital_zoom_payload digital_zoom;
+		struct msm_vidc_aspect_ratio_payload aspect_ratio;
+	};
+};
+
+struct msm_vidc_panscan_window {
+	unsigned int panscan_height_offset;
+	unsigned int panscan_width_offset;
+	unsigned int panscan_window_width;
+	unsigned int panscan_window_height;
+};
+
+struct msm_vidc_panscan_window_payload {
+	unsigned int num_panscan_windows;
+	struct msm_vidc_panscan_window wnd[1];
+};
+
+struct msm_vidc_stream_userdata_payload {
+	unsigned int type;
+	unsigned int data[1];
+};
+
+struct msm_vidc_frame_qp_payload {
+	unsigned int frame_qp;
+};
+
+struct msm_vidc_frame_bits_info_payload {
+	unsigned int frame_bits;
+	unsigned int header_bits;
+};
+
+struct msm_vidc_s3d_frame_packing_payload {
+	unsigned int fpa_id;
+	unsigned int cancel_flag;
+	unsigned int fpa_type;
+	unsigned int quin_cunx_flag;
+	unsigned int content_interprtation_type;
+	unsigned int spatial_flipping_flag;
+	unsigned int frame0_flipped_flag;
+	unsigned int field_views_flag;
+	unsigned int current_frame_is_frame0_flag;
+	unsigned int frame0_self_contained_flag;
+	unsigned int frame1_self_contained_flag;
+	unsigned int frame0_graid_pos_x;
+	unsigned int frame0_graid_pos_y;
+	unsigned int frame1_graid_pos_x;
+	unsigned int frame1_graid_pos_y;
+	unsigned int fpa_reserved_byte;
+	unsigned int fpa_repetition_period;
+	unsigned int fpa_extension_flag;
+};
+
+struct msm_vidc_vqzip_sei_payload {
+	unsigned int size;
+	unsigned int data[1];
+};
+
+struct msm_vidc_yuv_stats_payload {
+	unsigned int frame_qp;
+	unsigned int texture;
+	unsigned int luma_in_q16;
+	unsigned int frame_difference;
+};
+
+struct msm_vidc_vpx_colorspace_payload {
+	unsigned int color_space;
+	unsigned int yuv_range_flag;
+	unsigned int sumsampling_x;
+	unsigned int sumsampling_y;
+};
+
+struct msm_vidc_roi_qp_payload {
+	int upper_qp_offset;
+	int lower_qp_offset;
+	unsigned int b_roi_info;
+	int mbi_info_size;
+	unsigned int data[1];
+};
+
+struct msm_vidc_mastering_display_colour_sei_payload {
+	unsigned int nDisplayPrimariesX[3];
+	unsigned int nDisplayPrimariesY[3];
+	unsigned int nWhitePointX;
+	unsigned int nWhitePointY;
+	unsigned int nMaxDisplayMasteringLuminance;
+	unsigned int nMinDisplayMasteringLuminance;
+};
+
+struct msm_vidc_content_light_level_sei_payload {
+	unsigned int nMaxContentLight;
+	unsigned int nMaxPicAverageLight;
+};
+
+struct msm_vidc_vui_display_info_payload {
+	unsigned int video_signal_present_flag;
+	unsigned int video_format;
+	unsigned int bit_depth_y;
+	unsigned int bit_depth_c;
+	unsigned int video_full_range_flag;
+	unsigned int color_description_present_flag;
+	unsigned int color_primaries;
+	unsigned int transfer_characteristics;
+	unsigned int matrix_coefficients;
+	unsigned int chroma_location_info_present_flag;
+	unsigned int chroma_format_idc;
+	unsigned int separate_color_plane_flag;
+	unsigned int chroma_sample_loc_type_top_field;
+	unsigned int chroma_sample_loc_type_bottom_field;
+};
+
+enum msm_vidc_extradata_type {
+	MSM_VIDC_EXTRADATA_NONE = 0x00000000,
+	MSM_VIDC_EXTRADATA_MB_QUANTIZATION = 0x00000001,
+	MSM_VIDC_EXTRADATA_INTERLACE_VIDEO = 0x00000002,
+	MSM_VIDC_EXTRADATA_VC1_FRAMEDISP = 0x00000003,
+	MSM_VIDC_EXTRADATA_VC1_SEQDISP = 0x00000004,
+	MSM_VIDC_EXTRADATA_TIMESTAMP = 0x00000005,
+	MSM_VIDC_EXTRADATA_S3D_FRAME_PACKING = 0x00000006,
+	MSM_VIDC_EXTRADATA_FRAME_RATE = 0x00000007,
+	MSM_VIDC_EXTRADATA_PANSCAN_WINDOW = 0x00000008,
+	MSM_VIDC_EXTRADATA_RECOVERY_POINT_SEI = 0x00000009,
+	MSM_VIDC_EXTRADATA_MPEG2_SEQDISP = 0x0000000D,
+	MSM_VIDC_EXTRADATA_STREAM_USERDATA = 0x0000000E,
+	MSM_VIDC_EXTRADATA_FRAME_QP = 0x0000000F,
+	MSM_VIDC_EXTRADATA_FRAME_BITS_INFO = 0x00000010,
+	MSM_VIDC_EXTRADATA_VQZIP_SEI = 0x00000011,
+	MSM_VIDC_EXTRADATA_ROI_QP = 0x00000013,
+#define MSM_VIDC_EXTRADATA_MASTERING_DISPLAY_COLOUR_SEI \
+	MSM_VIDC_EXTRADATA_MASTERING_DISPLAY_COLOUR_SEI
+	MSM_VIDC_EXTRADATA_MASTERING_DISPLAY_COLOUR_SEI = 0x00000015,
+#define MSM_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI \
+	MSM_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI
+	MSM_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI = 0x00000016,
+#define MSM_VIDC_EXTRADATA_PQ_INFO \
+	MSM_VIDC_EXTRADATA_PQ_INFO
+	MSM_VIDC_EXTRADATA_PQ_INFO = 0x00000017,
+	MSM_VIDC_EXTRADATA_INPUT_CROP = 0x0700000E,
+#define MSM_VIDC_EXTRADATA_OUTPUT_CROP \
+	MSM_VIDC_EXTRADATA_OUTPUT_CROP
+	MSM_VIDC_EXTRADATA_OUTPUT_CROP = 0x0700000F,
+	MSM_VIDC_EXTRADATA_DIGITAL_ZOOM = 0x07000010,
+#define MSM_VIDC_EXTRADATA_VPX_COLORSPACE_INFO \
+	MSM_VIDC_EXTRADATA_VPX_COLORSPACE_INFO
+	MSM_VIDC_EXTRADATA_VPX_COLORSPACE_INFO = 0x00000014,
+	MSM_VIDC_EXTRADATA_MULTISLICE_INFO = 0x7F100000,
+	MSM_VIDC_EXTRADATA_NUM_CONCEALED_MB = 0x7F100001,
+	MSM_VIDC_EXTRADATA_INDEX = 0x7F100002,
+	MSM_VIDC_EXTRADATA_ASPECT_RATIO = 0x7F100003,
+	MSM_VIDC_EXTRADATA_METADATA_LTR = 0x7F100004,
+	MSM_VIDC_EXTRADATA_METADATA_FILLER = 0x7FE00002,
+	MSM_VIDC_EXTRADATA_METADATA_MBI = 0x7F100005,
+#define MSM_VIDC_EXTRADATA_VUI_DISPLAY_INFO \
+	MSM_VIDC_EXTRADATA_VUI_DISPLAY_INFO
+	MSM_VIDC_EXTRADATA_VUI_DISPLAY_INFO = 0x7F100006,
+	MSM_VIDC_EXTRADATA_YUVSTATS_INFO = 0x7F100007,
+};
+enum msm_vidc_interlace_type {
+	MSM_VIDC_INTERLACE_FRAME_PROGRESSIVE = 0x01,
+	MSM_VIDC_INTERLACE_INTERLEAVE_FRAME_TOPFIELDFIRST = 0x02,
+	MSM_VIDC_INTERLACE_INTERLEAVE_FRAME_BOTTOMFIELDFIRST = 0x04,
+	MSM_VIDC_INTERLACE_FRAME_TOPFIELDFIRST = 0x08,
+	MSM_VIDC_INTERLACE_FRAME_BOTTOMFIELDFIRST = 0x10,
+};
+
+/* enum msm_vidc_framepack_type */
+#define MSM_VIDC_FRAMEPACK_CHECKERBOARD 0x00
+#define MSM_VIDC_FRAMEPACK_COLUMN_INTERLEAVE 0x01
+#define MSM_VIDC_FRAMEPACK_ROW_INTERLEAVE 0x02
+#define MSM_VIDC_FRAMEPACK_SIDE_BY_SIDE 0x03
+#define MSM_VIDC_FRAMEPACK_TOP_BOTTOM 0x04
+#define MSM_VIDC_FRAMEPACK_TEMPORAL_INTERLEAVE 0x05
+
+enum msm_vidc_recovery_sei {
+	MSM_VIDC_FRAME_RECONSTRUCTION_INCORRECT = 0x0,
+	MSM_VIDC_FRAME_RECONSTRUCTION_CORRECT = 0x01,
+	MSM_VIDC_FRAME_RECONSTRUCTION_APPROXIMATELY_CORRECT = 0x02,
+};
+enum msm_vidc_userdata_type {
+	MSM_VIDC_USERDATA_TYPE_FRAME = 0x1,
+	MSM_VIDC_USERDATA_TYPE_TOP_FIELD = 0x2,
+	MSM_VIDC_USERDATA_TYPE_BOTTOM_FIELD = 0x3,
+};
+
+/* See colour_primaries of ISO/IEC 14496 for significance */
+enum msm_vidc_h264_color_primaries_values {
+	MSM_VIDC_RESERVED_1 = 0,
+	MSM_VIDC_BT709_5 = 1,
+	MSM_VIDC_UNSPECIFIED = 2,
+	MSM_VIDC_RESERVED_2 = 3,
+	MSM_VIDC_BT470_6_M = 4,
+	MSM_VIDC_BT601_6_625 = 5,
+	MSM_VIDC_BT470_6_BG = MSM_VIDC_BT601_6_625,
+	MSM_VIDC_BT601_6_525 = 6,
+	MSM_VIDC_SMPTE_240M = 7,
+	MSM_VIDC_GENERIC_FILM = 8,
+	MSM_VIDC_BT2020 = 9,
+};
+
+enum msm_vidc_vp9_color_primaries_values {
+	MSM_VIDC_CS_UNKNOWN,
+	MSM_VIDC_CS_BT_601,
+	MSM_VIDC_CS_BT_709,
+	MSM_VIDC_CS_SMPTE_170,
+	MSM_VIDC_CS_SMPTE_240,
+	MSM_VIDC_CS_BT_2020,
+	MSM_VIDC_CS_RESERVED,
+	MSM_VIDC_CS_RGB,
+};
+
+enum msm_vidc_h264_matrix_coeff_values {
+	MSM_VIDC_MATRIX_RGB = 0,
+	MSM_VIDC_MATRIX_BT_709_5 = 1,
+	MSM_VIDC_MATRIX_UNSPECIFIED = 2,
+	MSM_VIDC_MATRIX_RESERVED = 3,
+	MSM_VIDC_MATRIX_FCC_47 = 4,
+	MSM_VIDC_MATRIX_601_6_625 = 5,
+	MSM_VIDC_MATRIX_BT470_BG = MSM_VIDC_MATRIX_601_6_625,
+	MSM_VIDC_MATRIX_601_6_525 = 6,
+	MSM_VIDC_MATRIX_SMPTE_170M = MSM_VIDC_MATRIX_601_6_525,
+	MSM_VIDC_MATRIX_SMPTE_240M = 7,
+	MSM_VIDC_MATRIX_Y_CG_CO = 8,
+	MSM_VIDC_MATRIX_BT_2020 = 9,
+	MSM_VIDC_MATRIX_BT_2020_CONST = 10,
+};
+
+enum msm_vidc_h264_transfer_chars_values {
+	MSM_VIDC_TRANSFER_RESERVED_1 = 0,
+	MSM_VIDC_TRANSFER_BT709_5 = 1,
+	MSM_VIDC_TRANSFER_UNSPECIFIED = 2,
+	MSM_VIDC_TRANSFER_RESERVED_2 = 3,
+	MSM_VIDC_TRANSFER_BT_470_6_M = 4,
+	MSM_VIDC_TRANSFER_BT_470_6_BG = 5,
+	MSM_VIDC_TRANSFER_601_6_625 = 6,
+	MSM_VIDC_TRANSFER_601_6_525 = MSM_VIDC_TRANSFER_601_6_625,
+	MSM_VIDC_TRANSFER_SMPTE_240M = 7,
+	MSM_VIDC_TRANSFER_LINEAR = 8,
+	MSM_VIDC_TRANSFER_LOG_100_1 = 9,
+	MSM_VIDC_TRANSFER_LOG_100_SQRT10_1 = 10,
+	MSM_VIDC_TRANSFER_IEC_61966 = 11,
+	MSM_VIDC_TRANSFER_BT_1361 = 12,
+	MSM_VIDC_TRANSFER_SRGB = 13,
+	MSM_VIDC_TRANSFER_BT_2020_10 = 14,
+	MSM_VIDC_TRANSFER_BT_2020_12 = 15,
+};
+
+enum msm_vidc_pixel_depth {
+	MSM_VIDC_BIT_DEPTH_8,
+	MSM_VIDC_BIT_DEPTH_10,
+	MSM_VIDC_BIT_DEPTH_UNSUPPORTED = 0XFFFFFFFF,
+};
+
+enum msm_vidc_video_format {
+	MSM_VIDC_COMPONENT,
+	MSM_VIDC_PAL,
+	MSM_VIDC_NTSC,
+	MSM_VIDC_SECAM,
+	MSM_VIDC_MAC,
+	MSM_VIDC_UNSPECIFIED_FORMAT,
+	MSM_VIDC_RESERVED_1_FORMAT,
+	MSM_VIDC_RESERVED_2_FORMAT,
+};
+
+enum msm_vidc_color_desc_flag {
+	MSM_VIDC_COLOR_DESC_NOT_PRESENT,
+	MSM_VIDC_COLOR_DESC_PRESENT,
+};
+
+/*enum msm_vidc_pic_struct */
+#define MSM_VIDC_PIC_STRUCT_MAYBE_INTERLACED 0x0
+#define MSM_VIDC_PIC_STRUCT_PROGRESSIVE 0x1
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/uapi/media./msm_vpu.h linux-4.4.115-fbx/include/uapi/media/msm_vpu.h
--- linux-4.4.115-fbx/include/uapi/media./msm_vpu.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/msm_vpu.h	2019-01-22 16:16:28.603292590 +0100
@@ -0,0 +1,475 @@
+#ifndef _H_MSM_VPU_H_
+#define _H_MSM_VPU_H_
+
+#include <linux/videodev2.h>
+
+/*
+ * V 4 L 2   E X T E N S I O N S   B Y   V P U
+ */
+
+/*
+ * v4l2_buffer:
+ *
+ * VPU uses standard V4L2 buffer flags, and defines some custom
+ * flags (used in v4l2_buffer.flags field):
+ *	V4L2_QCOM_BUF_FLAG_EOS: buffer flag indicating end of stream
+ *	V4L2_BUF_FLAG_CDS_ENABLE: buffer flag to enable chroma down-sampling
+ */
+#define V4L2_BUF_FLAG_CDS_ENABLE	0x10000000
+
+/*
+ * VPU uses multi-plane v4l2_buffer in the following manner:
+ * each plane can be a separate ION buffer, or all planes are from the
+ * same ION buffer (under this case all planes have the same fd, but different
+ * offset).
+ *
+ * For struct v4l2_plane
+ *   fd: ION fd representing the ION buffer this plane is from
+ *   reserved[0]: offset of this plane from the start of the ION buffer in
+ *		bytes. Needed when all planes are from the same ION buffer.
+ */
+#define V4L2_PLANE_MEM_OFFSET		0
+
+/*
+ * struct v4l2_format:
+ * always use v4l2_pix_format_mplane, even when there is only one plane
+ *
+ * v4l2_pix_format_mplane:
+ *
+ * VPU uses v4l2_pix_format_mplane for pixel format configuration
+ * The following members of this structure is either extended or changed:
+ *    pixelformat: extended, a few more private formats added
+ *    colorspace:  possible values are enum vpu_colorspace
+ *    field: when it is V4L2_FIELD_ALTERNATE, flags from vpu format extension
+ *           specifies which field first.
+ *    reserved[]:  VPU format extension. struct v4l2_format_vpu_extension
+ */
+enum vpu_colorspace {
+	VPU_CS_MIN = 0,
+	/* RGB with full range*/
+	VPU_CS_RGB_FULL = 1,
+	/* RGB with limited range*/
+	VPU_CS_RGB_LIMITED = 2,
+	/* REC 601 with full range */
+	VPU_CS_REC601_FULL = 3,
+	/* REC 601 with limited range */
+	VPU_CS_REC601_LIMITED = 4,
+	/* REC 709 with full range */
+	VPU_CS_REC709_FULL = 5,
+	/* REC 709 with limited range */
+	VPU_CS_REC709_LIMITED = 6,
+	/* SMPTE 240 with full range */
+	VPU_CS_SMPTE240_FULL = 7,
+	/* SMPTE 240 with limited range */
+	VPU_CS_SMPTE240_LIMITED = 8,
+	VPU_CS_MAX = 9,
+};
+
+
+#define VPU_FMT_EXT_FLAG_BT	1	/* bottom field first */
+#define VPU_FMT_EXT_FLAG_TB	2	/* top field first */
+#define VPU_FMT_EXT_FLAG_3D	4	/* 3D format */
+struct v4l2_format_vpu_extension {
+	__u8		flag;
+	__u8		gap_in_lines;
+};
+
+/*
+ * Supported pixel formats:
+ *
+ * VPU supported pixel format fourcc codes (use in s_fmt pixelformat field).
+ *	Can be enumerated using VIDIOC_ENUM_FMT
+ *
+ * Standard V4L2 formats, defined in videodev2.h :
+ *
+ * V4L2_PIX_FMT_RGB24		24 bit RGB-8-8-8
+ * V4L2_PIX_FMT_RGB32		32 bit XRGB-8-8-8-8
+ * V4L2_PIX_FMT_BGR24		24 bit BGR-8-8-8
+ * V4L2_PIX_FMT_BGR32		32 bit BGRX-8-8-8-8
+ *
+ * V4L2_PIX_FMT_NV12		12 bit YUV 4:2:0  semi-planar NV12
+ * V4L2_PIX_FMT_NV21		12 bit YUV 4:2:0  semi-planar NV21
+ * V4L2_PIX_FMT_YUYV		16 bit YUYV 4:2:2 interleaved
+ * V4L2_PIX_FMT_YVYU		16 bit YVYU 4:2:2 interleaved
+ * V4L2_PIX_FMT_UYVY		16 bit UYVY 4:2:2 interleaved
+ * V4L2_PIX_FMT_VYUY		16 bit VYUY 4:2:2 interleaved
+ *
+ *
+ * Private VPU formats, defined here :
+ *
+ * V4L2_PIX_FMT_XRGB2		32 bit XRGB-2-10-10-10
+ * V4L2_PIX_FMT_XBGR2		32 bit XBGR-2-10-10-10
+ *
+ * V4L2_PIX_FMT_YUYV10		24 bit YUYV 4:2:2  10 bit per component loose
+ * V4L2_PIX_FMT_YUV8		24 bit YUV 4:4:4   8 bit per component
+ * V4L2_PIX_FMT_YUV10		32 bit YUV 4:4:4   10 bit per component loose
+ * V4L2_PIX_FMT_YUYV10BWC	10 bit YUYV 4:2:2  compressed, for output only
+ */
+#define V4L2_PIX_FMT_XRGB2		v4l2_fourcc('X', 'R', 'G', '2')
+#define V4L2_PIX_FMT_XBGR2		v4l2_fourcc('X', 'B', 'G', '2')
+#define V4L2_PIX_FMT_YUYV10		v4l2_fourcc('Y', 'U', 'Y', 'L')
+#define V4L2_PIX_FMT_YUV8		v4l2_fourcc('Y', 'U', 'V', '8')
+#define V4L2_PIX_FMT_YUV10		v4l2_fourcc('Y', 'U', 'V', 'L')
+#define V4L2_PIX_FMT_YUYV10BWC		v4l2_fourcc('Y', 'B', 'W', 'C')
+
+/*
+ * VIDIOC_S_INPUT/VIDIOC_S_OUTPUT
+ *
+ * The single integer passed by these commands specifies port type in the
+ * lower 16 bits, and pipe bit mask in the higher 16 bits.
+ */
+/* input / output types */
+#define VPU_INPUT_TYPE_HOST			0
+#define VPU_INPUT_TYPE_VCAP			1
+#define VPU_OUTPUT_TYPE_HOST			0
+#define VPU_OUTPUT_TYPE_DISPLAY			1
+
+/* input / output pipe bit fields */
+#define VPU_PIPE_VCAP0			(1 << 16)
+#define VPU_PIPE_VCAP1			(1 << 17)
+#define VPU_PIPE_DISPLAY0		(1 << 18)
+#define VPU_PIPE_DISPLAY1		(1 << 19)
+#define VPU_PIPE_DISPLAY2		(1 << 20)
+#define VPU_PIPE_DISPLAY3		(1 << 21)
+
+/*
+ * V P U   E V E N T S :   I D s   A N D   D A T A   P A Y L O A D S
+ */
+
+/*
+ * Event ID: set in type field of struct v4l2_event
+ * payload: returned in u.data array of struct v4l2_event
+ *
+ *
+ * VPU_EVENT_FLUSH_DONE: Done flushing buffers after VPU_FLUSH_BUFS ioctl
+ * payload data: enum v4l2_buf_type (buffer type of flushed port)
+ *
+ * VPU_EVENT_ACTIVE_REGION_CHANGED: New Active Region Detected
+ * payload data: struct v4l2_rect (new active region rectangle)
+ *
+ * VPU_EVENT_SESSION_TIMESTAMP: New Session timestamp
+ * payload data: vpu_frame_timestamp_info
+ *
+ * VPU_EVENT_SESSION_CREATED: New session has been created
+ * payload data: int (number of the attached session)
+ *
+ * VPU_EVENT_SESSION_FREED: Session is detached and free
+ * payload data: int (number of the detached session)
+ *
+ * VPU_EVENT_SESSION_CLIENT_EXITED: Indicates that clients of current
+ *	session have exited.
+ * payload data: int (number of all remaining clients for this session)
+ *
+ * VPU_EVENT_HW_ERROR: a hardware error occurred in VPU
+ * payload data: NULL
+ *
+ * VPU_EVENT_INVALID_CONFIG: invalid VPU session configuration
+ * payload data: NULL
+ *
+ * VPU_EVENT_FAILED_SESSION_STREAMING: Failed to stream session
+ * payload data: NULL
+ */
+#define VPU_PRIVATE_EVENT_BASE (V4L2_EVENT_PRIVATE_START + 6 * 1000)
+enum VPU_PRIVATE_EVENT {
+	VPU_EVENT_START = VPU_PRIVATE_EVENT_BASE,
+
+	VPU_EVENT_FLUSH_DONE = VPU_EVENT_START + 1,
+	VPU_EVENT_ACTIVE_REGION_CHANGED = VPU_EVENT_START + 2,
+	VPU_EVENT_SESSION_TIMESTAMP = VPU_EVENT_START + 3,
+	VPU_EVENT_SESSION_CREATED = VPU_EVENT_START + 4,
+	VPU_EVENT_SESSION_FREED = VPU_EVENT_START + 5,
+	VPU_EVENT_SESSION_CLIENT_EXITED = VPU_EVENT_START + 6,
+
+	VPU_EVENT_HW_ERROR = VPU_EVENT_START + 11,
+	VPU_EVENT_INVALID_CONFIG = VPU_EVENT_START + 12,
+	VPU_EVENT_FAILED_SESSION_STREAMING = VPU_EVENT_START + 13,
+
+	VPU_EVENT_END
+};
+
+
+/*
+ * V P U   CO N T R O L S :   S T R U C T S   A N D   I D s
+ *
+ * Controls are video processing parameters
+ */
+
+/*
+ * Standard VPU Controls
+ */
+struct vpu_ctrl_standard {
+	__u32 enable;		/* boolean: 0=disable, else=enable */
+	__s32 value;
+};
+
+struct vpu_ctrl_auto_manual {
+	__u32 enable;		/* boolean: 0=disable, else=enable */
+	__u32 auto_mode;	/* boolean: 0=manual, else=automatic */
+	__s32 value;
+};
+
+struct vpu_ctrl_range_mapping {
+	__u32 enable;		/* boolean: 0=disable, else=enable */
+	__u32 y_range;		/* the range mapping set for Y [0, 7] */
+	__u32 uv_range;		/* the range mapping set for UV [0, 7] */
+};
+
+#define VPU_ACTIVE_REGION_N_EXCLUSIONS 1
+struct vpu_ctrl_active_region_param {
+	__u32               enable; /* boolean: 0=disable, else=enable */
+	/* number of exclusion regions */
+	__u32               num_exclusions;
+	/* roi where active region detection is applied */
+	struct v4l2_rect    detection_region;
+	/* roi(s) excluded from active region detection*/
+	struct v4l2_rect    excluded_regions[VPU_ACTIVE_REGION_N_EXCLUSIONS];
+};
+
+struct vpu_ctrl_deinterlacing_mode {
+	__u32 field_polarity;
+	__u32 mvp_mode;
+};
+
+struct vpu_ctrl_hqv {
+	__u32 enable;
+	/* strength control of all sharpening features [0, 100] */
+	__u32 sharpen_strength;
+	/* strength control of Auto NR feature [0, 100] */
+	__u32 auto_nr_strength;
+};
+
+struct vpu_info_frame_timestamp {
+	/* presentation timestamp of the frame */
+	__u32 pts_low;
+	__u32 pts_high;
+	/* qtimer snapshot */
+	__u32 qtime_low;
+	__u32 qtime_high;
+};
+
+struct vpu_control {
+	__u32 control_id;
+	union control_data {
+		__s32 value;
+		struct vpu_ctrl_standard standard;
+		struct vpu_ctrl_auto_manual auto_manual;
+		struct vpu_ctrl_range_mapping range_mapping;
+		struct vpu_ctrl_active_region_param active_region_param;
+		struct v4l2_rect active_region_result;
+		struct vpu_ctrl_deinterlacing_mode deinterlacing_mode;
+		struct vpu_ctrl_hqv hqv;
+		struct vpu_info_frame_timestamp timestamp;
+		__u8 reserved[124];
+	} data;
+};
+
+/*
+ * IDs for standard controls (use in control_id field of struct vpu_control)
+ *
+ * VPU_CTRL_NOISE_REDUCTION: noise reduction level, data: auto_manual,
+ * value: [0, 100] (step in increments of 25).
+ *
+ * VPU_CTRL_IMAGE_ENHANCEMENT: image enhancement level, data: auto_manual,
+ * value: [-100, 100] (step in increments of 1).
+ *
+ * VPU_CTRL_ANAMORPHIC_SCALING: anamorphic scaling config, data: standard,
+ * value: [0, 100] (step in increments of 1).
+ *
+ * VPU_CTRL_DIRECTIONAL_INTERPOLATION: directional interpolation config
+ * data: standard, value: [0, 100] (step in increments of 1).
+ *
+ * VPU_CTRL_BACKGROUND_COLOR: , data: value,
+ * value: red[0:7] green[8:15] blue[16:23] alpha[24:31]
+ *
+ * VPU_CTRL_RANGE_MAPPING: Y/UV range mapping, data: range_mapping,
+ * y_range: [0, 7], uv_range: [0, 7] (step in increments of 1).
+ *
+ * VPU_CTRL_DEINTERLACING_MODE: deinterlacing mode, data: deinterlacing_mode,
+ * field_polarity: [0, 2], mvp_mode: [0, 2] (step in increments of 1).
+ *
+ * VPU_CTRL_ACTIVE_REGION_PARAM: active region detection parameters (set only)
+ * data: active_region_param,
+ *
+ * VPU_CTRL_ACTIVE_REGION_RESULT: detected active region roi (get only)
+ * data: active_region_result
+ *
+ * VPU_CTRL_PRIORITY: Session priority, data: value,
+ * value: high 100, normal 50
+ *
+ * VPU_CTRL_CONTENT_PROTECTION: input content protection status, data: value,
+ * value: secure 1, non-secure 0
+ *
+ * VPU_CTRL_DISPLAY_REFRESH_RATE: display refresh rate (set only)
+ * data: value (set to __u32 16.16 format)
+ *
+ * VPU_CTRL_HQV: hqv block config, data: hqv,
+ * sharpen_strength: [0, 100] (step in increments of 25),
+ * auto_nr_strength: [0, 100] (step in increments of 1).
+ *
+ * VPU_CTRL_HQV_SHARPEN: , data: value,
+ * sharpen_strength: [0, 100] (step in increments of 1).
+ *
+ * VPU_CTRL_HQV_AUTONR: , data: value,
+ * auto_nr_strength: [0, 100] (step in increments of 1).
+ *
+ * VPU_CTRL_ACE: , data: value
+ *
+ * VPU_CTRL_ACE_BRIGHTNESS: , data: value,
+ * value: [-100, 100] (step in increments of 1).
+ *
+ * VPU_CTRL_ACE_CONTRAST: , data: value,
+ * value: [-100, 100] (step in increments of 1).
+ *
+ * VPU_CTRL_2D3D: , data: value,
+ * value: 1 enabled, 0 disabled
+ *
+ * VPU_CTRL_2D3D_DEPTH: , data: value,
+ * value: [0, 100] (step in increments of 1).
+ *
+ * VPU_CTRL_TIMESTAMP_INFO_MODE: timestamp reporting mode,
+ *  data: value specifying how frequent a timestamp reporting info, value
+ *  is in frames
+ *
+ * VPU_INFO_TIMESTAMP: timestamp information (get only)
+ *  data: struct vpu_frame_timestamp_info
+ *
+ * VPU_CTRL_FRC: enable/disable FRC, data: value,
+ * value: 1 enable, 0 disable
+ *
+ * VPU_CTRL_FRC_MOTION_SMOOTHNESS: , data: value,
+ * value: [0, 100] (step in increments of 1).
+ *
+ * VPU_CTRL_FRC_MOTION_CLEAR: , data: value,
+ * value: [0, 100] (step in increments of 1).
+ *
+ * VPU_CTRL_LATENCY: session latency, data: value in us
+ *
+ * VPU_CTRL_LATENCY_MODE: data: value (ultra low, low, etc.)
+ *
+ * VPU_INFO_STATISTICS: frames dropped, etc (get only),
+ *  data: reserved
+ */
+#define VPU_CTRL_ID_MIN						0
+
+#define VPU_CTRL_NOISE_REDUCTION				1
+#define VPU_CTRL_IMAGE_ENHANCEMENT				2
+#define VPU_CTRL_ANAMORPHIC_SCALING				3
+#define VPU_CTRL_DIRECTIONAL_INTERPOLATION			4
+#define VPU_CTRL_BACKGROUND_COLOR				5
+#define VPU_CTRL_RANGE_MAPPING					6
+#define VPU_CTRL_DEINTERLACING_MODE				7
+#define VPU_CTRL_ACTIVE_REGION_PARAM				8
+#define VPU_CTRL_ACTIVE_REGION_RESULT				9
+#define VPU_CTRL_PRIORITY					10
+#define VPU_CTRL_CONTENT_PROTECTION				11
+#define VPU_CTRL_DISPLAY_REFRESH_RATE				12
+
+#define VPU_CTRL_HQV						20
+#define VPU_CTRL_HQV_SHARPEN					21
+#define VPU_CTRL_HQV_AUTONR					22
+#define VPU_CTRL_ACE						23
+#define VPU_CTRL_ACE_BRIGHTNESS					24
+#define VPU_CTRL_ACE_CONTRAST					25
+#define VPU_CTRL_2D3D						26
+#define VPU_CTRL_2D3D_DEPTH					27
+#define VPU_CTRL_FRC						28
+#define VPU_CTRL_FRC_MOTION_SMOOTHNESS				29
+#define VPU_CTRL_FRC_MOTION_CLEAR				30
+
+#define VPU_INFO_TIMESTAMP					35
+#define VPU_CTRL_TIMESTAMP_INFO_MODE				36
+#define VPU_INFO_STATISTICS					37
+#define VPU_CTRL_LATENCY					38
+#define VPU_CTRL_LATENCY_MODE					39
+
+#define VPU_CTRL_ID_MAX						40
+
+
+/*
+ * Extended VPU Controls (large data payloads)
+ */
+#define VPU_MAX_EXT_DATA_SIZE	720
+struct vpu_control_extended {
+	/*
+	 * extended control type
+	 * 0: system
+	 * 1: session
+	 */
+	__u32 type;
+
+	/*
+	 * size and ptr of the data to send
+	 * maximum VPU_MAX_EXT_DATA_SIZE bytes
+	 */
+	__u32 data_len;
+	void __user *data_ptr;
+
+	/*
+	 * size and ptr of the buffer to recv data
+	 * maximum VPU_MAX_EXT_DATA_SIZE bytes
+	 */
+	__u32 buf_size;
+	void __user *buf_ptr;
+};
+
+/*
+ * Port specific controls
+ */
+struct vpu_control_port {
+	__u32 control_id;
+	__u32 port;	/* 0: INPUT, 1: OUTPUT */
+	union control_port_data {
+		__u32 framerate;
+	} data;
+};
+
+/*
+ * IDs for port controls (use in control_id field of struct vpu_control_port)
+ *
+ * VPU_CTRL_FPS: set frame rate, data: __u32, 16.16 format
+ */
+#define	VPU_CTRL_FPS				1000
+
+
+/*
+ * V P U   D E V I C E   P R I V A T E   I O C T L   C O D E S
+ */
+
+/* VPU Session ioctls (deprecated) */
+#define VPU_ATTACH_TO_SESSION	_IOW('V', (BASE_VIDIOC_PRIVATE + 1), int)
+
+/* VPU Session ioctls */
+#define VPU_QUERY_SESSIONS	_IOR('V', (BASE_VIDIOC_PRIVATE + 0), int)
+#define VPU_CREATE_SESSION	_IOR('V', (BASE_VIDIOC_PRIVATE + 2), int)
+#define VPU_JOIN_SESSION	_IOW('V', (BASE_VIDIOC_PRIVATE + 3), int)
+
+/* Enable second VPU output port and use with current client */
+#define VPU_CREATE_OUTPUT2	_IO('V', (BASE_VIDIOC_PRIVATE + 5))
+
+/* Explicit commit of session configuration */
+#define VPU_COMMIT_CONFIGURATION    _IO('V', (BASE_VIDIOC_PRIVATE + 10))
+
+/* Flush all buffers of given type (port) */
+#define VPU_FLUSH_BUFS		_IOW('V', (BASE_VIDIOC_PRIVATE + 15), \
+		enum v4l2_buf_type)
+
+/* VPU controls get/set ioctls (for most controls with small data) */
+#define VPU_G_CONTROL		_IOWR('V', (BASE_VIDIOC_PRIVATE + 20), \
+						struct vpu_control)
+#define VPU_S_CONTROL		_IOW('V', (BASE_VIDIOC_PRIVATE + 21), \
+						struct vpu_control)
+
+/* extended control set/get ioctls (large data payloads) */
+#define VPU_G_CONTROL_EXTENDED	_IOWR('V', (BASE_VIDIOC_PRIVATE + 22), \
+		struct vpu_control_extended)
+#define VPU_S_CONTROL_EXTENDED	_IOW('V', (BASE_VIDIOC_PRIVATE + 23), \
+		struct vpu_control_extended)
+
+/* VPU port (input/output) specific controls get/set ioctls */
+#define VPU_G_CONTROL_PORT	_IOWR('V', (BASE_VIDIOC_PRIVATE + 24), \
+						struct vpu_control_port)
+#define VPU_S_CONTROL_PORT	_IOW('V', (BASE_VIDIOC_PRIVATE + 25), \
+						struct vpu_control_port)
+
+#endif /* _H_MSM_VPU_H_ */
+
diff -Nruw linux-4.4.115-fbx/include/uapi/media./radio-iris-commands.h linux-4.4.115-fbx/include/uapi/media/radio-iris-commands.h
--- linux-4.4.115-fbx/include/uapi/media./radio-iris-commands.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/radio-iris-commands.h	2019-01-22 16:16:28.603292590 +0100
@@ -0,0 +1,108 @@
+#ifndef __UAPI_RADIO_IRIS_COMMANDS_H
+#define __UAPI_RADIO_IRIS_COMMANDS_H
+
+enum v4l2_cid_private_iris_t {
+	V4L2_CID_PRIVATE_IRIS_SRCHMODE = (0x08000000 + 1),
+	V4L2_CID_PRIVATE_IRIS_SCANDWELL,
+	V4L2_CID_PRIVATE_IRIS_SRCHON,
+	V4L2_CID_PRIVATE_IRIS_STATE,
+	V4L2_CID_PRIVATE_IRIS_TRANSMIT_MODE,
+	V4L2_CID_PRIVATE_IRIS_RDSGROUP_MASK,
+	V4L2_CID_PRIVATE_IRIS_REGION,
+	V4L2_CID_PRIVATE_IRIS_SIGNAL_TH,
+	V4L2_CID_PRIVATE_IRIS_SRCH_PTY,
+	V4L2_CID_PRIVATE_IRIS_SRCH_PI,
+	V4L2_CID_PRIVATE_IRIS_SRCH_CNT,
+	V4L2_CID_PRIVATE_IRIS_EMPHASIS,
+	V4L2_CID_PRIVATE_IRIS_RDS_STD,
+	V4L2_CID_PRIVATE_IRIS_SPACING,
+	V4L2_CID_PRIVATE_IRIS_RDSON,
+	V4L2_CID_PRIVATE_IRIS_RDSGROUP_PROC,
+	V4L2_CID_PRIVATE_IRIS_LP_MODE,
+	V4L2_CID_PRIVATE_IRIS_ANTENNA,
+	V4L2_CID_PRIVATE_IRIS_RDSD_BUF,
+	V4L2_CID_PRIVATE_IRIS_PSALL,  /*0x8000014*/
+
+	/*v4l2 Tx controls*/
+	V4L2_CID_PRIVATE_IRIS_TX_SETPSREPEATCOUNT,
+	V4L2_CID_PRIVATE_IRIS_STOP_RDS_TX_PS_NAME,
+	V4L2_CID_PRIVATE_IRIS_STOP_RDS_TX_RT,
+	V4L2_CID_PRIVATE_IRIS_IOVERC,
+	V4L2_CID_PRIVATE_IRIS_INTDET,
+	V4L2_CID_PRIVATE_IRIS_MPX_DCC,
+	V4L2_CID_PRIVATE_IRIS_AF_JUMP,
+	V4L2_CID_PRIVATE_IRIS_RSSI_DELTA,
+	V4L2_CID_PRIVATE_IRIS_HLSI, /*0x800001d*/
+
+	/*Diagnostic commands*/
+	V4L2_CID_PRIVATE_IRIS_SOFT_MUTE,
+	V4L2_CID_PRIVATE_IRIS_RIVA_ACCS_ADDR,
+	V4L2_CID_PRIVATE_IRIS_RIVA_ACCS_LEN,
+	V4L2_CID_PRIVATE_IRIS_RIVA_PEEK,
+	V4L2_CID_PRIVATE_IRIS_RIVA_POKE,
+	V4L2_CID_PRIVATE_IRIS_SSBI_ACCS_ADDR,
+	V4L2_CID_PRIVATE_IRIS_SSBI_PEEK,
+	V4L2_CID_PRIVATE_IRIS_SSBI_POKE,
+	V4L2_CID_PRIVATE_IRIS_TX_TONE,
+	V4L2_CID_PRIVATE_IRIS_RDS_GRP_COUNTERS,
+	V4L2_CID_PRIVATE_IRIS_SET_NOTCH_FILTER, /* 0x8000028 */
+	V4L2_CID_PRIVATE_IRIS_SET_AUDIO_PATH, /* TAVARUA specific command */
+	V4L2_CID_PRIVATE_IRIS_DO_CALIBRATION,
+	V4L2_CID_PRIVATE_IRIS_SRCH_ALGORITHM, /* TAVARUA specific command */
+	V4L2_CID_PRIVATE_IRIS_GET_SINR,
+	V4L2_CID_PRIVATE_INTF_LOW_THRESHOLD,
+	V4L2_CID_PRIVATE_INTF_HIGH_THRESHOLD,
+	V4L2_CID_PRIVATE_SINR_THRESHOLD,
+	V4L2_CID_PRIVATE_SINR_SAMPLES,
+	V4L2_CID_PRIVATE_SPUR_FREQ,
+	V4L2_CID_PRIVATE_SPUR_FREQ_RMSSI,
+	V4L2_CID_PRIVATE_SPUR_SELECTION,
+	V4L2_CID_PRIVATE_UPDATE_SPUR_TABLE,
+	V4L2_CID_PRIVATE_VALID_CHANNEL,
+	V4L2_CID_PRIVATE_AF_RMSSI_TH,
+	V4L2_CID_PRIVATE_AF_RMSSI_SAMPLES,
+	V4L2_CID_PRIVATE_GOOD_CH_RMSSI_TH,
+	V4L2_CID_PRIVATE_SRCHALGOTYPE,
+	V4L2_CID_PRIVATE_CF0TH12,
+	V4L2_CID_PRIVATE_SINRFIRSTSTAGE,
+	V4L2_CID_PRIVATE_RMSSIFIRSTSTAGE,
+	V4L2_CID_PRIVATE_RXREPEATCOUNT,
+	V4L2_CID_PRIVATE_IRIS_RSSI_TH,
+	V4L2_CID_PRIVATE_IRIS_AF_JUMP_RSSI_TH,
+	V4L2_CID_PRIVATE_BLEND_SINRHI,
+	V4L2_CID_PRIVATE_BLEND_RMSSIHI,
+
+	/*using private CIDs under userclass*/
+	V4L2_CID_PRIVATE_IRIS_READ_DEFAULT = 0x00980928,
+	V4L2_CID_PRIVATE_IRIS_WRITE_DEFAULT,
+	V4L2_CID_PRIVATE_IRIS_SET_CALIBRATION,
+	V4L2_CID_PRIVATE_IRIS_SET_SPURTABLE = 0x0098092D,
+	V4L2_CID_PRIVATE_IRIS_GET_SPUR_TBL  = 0x0098092E,
+};
+
+enum iris_evt_t {
+	IRIS_EVT_RADIO_READY,
+	IRIS_EVT_TUNE_SUCC,
+	IRIS_EVT_SEEK_COMPLETE,
+	IRIS_EVT_SCAN_NEXT,
+	IRIS_EVT_NEW_RAW_RDS,
+	IRIS_EVT_NEW_RT_RDS,
+	IRIS_EVT_NEW_PS_RDS,
+	IRIS_EVT_ERROR,
+	IRIS_EVT_BELOW_TH,
+	IRIS_EVT_ABOVE_TH,
+	IRIS_EVT_STEREO,
+	IRIS_EVT_MONO,
+	IRIS_EVT_RDS_AVAIL,
+	IRIS_EVT_RDS_NOT_AVAIL,
+	IRIS_EVT_NEW_SRCH_LIST,
+	IRIS_EVT_NEW_AF_LIST,
+	IRIS_EVT_TXRDSDAT,
+	IRIS_EVT_TXRDSDONE,
+	IRIS_EVT_RADIO_DISABLED,
+	IRIS_EVT_NEW_ODA,
+	IRIS_EVT_NEW_RT_PLUS,
+	IRIS_EVT_NEW_ERT,
+	IRIS_EVT_SPUR_TBL,
+};
+#endif
diff -Nruw linux-4.4.115-fbx/include/uapi/media./radio-iris.h linux-4.4.115-fbx/include/uapi/media/radio-iris.h
--- linux-4.4.115-fbx/include/uapi/media./radio-iris.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/media/radio-iris.h	2019-01-22 16:16:28.603292590 +0100
@@ -0,0 +1,813 @@
+#ifndef __UAPI_RADIO_IRIS_H
+#define __UAPI_RADIO_IRIS_H
+
+#include <linux/types.h>
+#include <media/radio-iris-commands.h>
+
+#define MIN_TX_TONE_VAL  0x00
+#define MAX_TX_TONE_VAL  0x07
+#define MIN_HARD_MUTE_VAL  0x00
+#define MAX_HARD_MUTE_VAL  0x03
+#define MIN_SRCH_MODE  0x00
+#define MAX_SRCH_MODE  0x09
+#define MIN_SCAN_DWELL  0x00
+#define MAX_SCAN_DWELL  0x0F
+#define MIN_SIG_TH  0x00
+#define MAX_SIG_TH  0x03
+#define MIN_PTY  0X00
+#define MAX_PTY  0x1F
+#define MIN_PI  0x0000
+#define MAX_PI  0xFFFF
+#define MIN_SRCH_STATIONS_CNT  0x00
+#define MAX_SRCH_STATIONS_CNT  0x14
+#define MIN_CHAN_SPACING  0x00
+#define MAX_CHAN_SPACING  0x02
+#define MIN_EMPHASIS  0x00
+#define MAX_EMPHASIS  0x01
+#define MIN_RDS_STD  0x00
+#define MAX_RDS_STD  0x02
+#define MIN_ANTENNA_VAL  0x00
+#define MAX_ANTENNA_VAL  0x01
+#define MIN_TX_PS_REPEAT_CNT  0x01
+#define MAX_TX_PS_REPEAT_CNT  0x0F
+#define MIN_SOFT_MUTE  0x00
+#define MAX_SOFT_MUTE  0x01
+#define MIN_PEEK_ACCESS_LEN  0x01
+#define MAX_PEEK_ACCESS_LEN  0xF9
+#define MIN_RESET_CNTR  0x00
+#define MAX_RESET_CNTR  0x01
+#define MIN_HLSI  0x00
+#define MAX_HLSI  0x02
+#define MIN_NOTCH_FILTER  0x00
+#define MAX_NOTCH_FILTER  0x02
+#define MIN_INTF_DET_OUT_LW_TH  0x00
+#define MAX_INTF_DET_OUT_LW_TH  0xFF
+#define MIN_INTF_DET_OUT_HG_TH  0x00
+#define MAX_INTF_DET_OUT_HG_TH  0xFF
+#define MIN_SINR_TH  -128
+#define MAX_SINR_TH  127
+#define MIN_SINR_SAMPLES  0x01
+#define MAX_SINR_SAMPLES  0xFF
+#define MIN_BLEND_HI  -128
+#define MAX_BLEND_HI  127
+
+
+/* ---- HCI Packet structures ---- */
+#define RADIO_HCI_COMMAND_HDR_SIZE sizeof(struct radio_hci_command_hdr)
+#define RADIO_HCI_EVENT_HDR_SIZE   sizeof(struct radio_hci_event_hdr)
+
+/* HCI data types */
+#define RADIO_HCI_COMMAND_PKT   0x11
+#define RADIO_HCI_EVENT_PKT     0x14
+/*HCI reponce packets*/
+#define MAX_RIVA_PEEK_RSP_SIZE   251
+/* default data access */
+#define DEFAULT_DATA_OFFSET 2
+#define DEFAULT_DATA_SIZE 249
+/* Power levels are 0-7, but SOC will expect values from 0-255
+ * So the each level step size will be 255/7 = 36
+ */
+#define FM_TX_PWR_LVL_STEP_SIZE 36
+#define FM_TX_PWR_LVL_0         0 /* Lowest power lvl that can be set for Tx */
+#define FM_TX_PWR_LVL_MAX       7 /* Max power lvl for Tx */
+#define FM_TX_PHY_CFG_MODE   0x3c
+#define FM_TX_PHY_CFG_LEN    0x10
+#define FM_TX_PWR_GAIN_OFFSET 14
+/**RDS CONFIG MODE**/
+#define FM_RDS_CNFG_MODE	0x0f
+#define FM_RDS_CNFG_LEN		0x10
+#define AF_RMSSI_TH_LSB_OFFSET	10
+#define AF_RMSSI_TH_MSB_OFFSET	11
+#define AF_RMSSI_SAMPLES_OFFSET	15
+/**RX CONFIG MODE**/
+#define FM_RX_CONFG_MODE	0x15
+#define FM_RX_CNFG_LEN		0x20
+#define GD_CH_RMSSI_TH_OFFSET	12
+#define MAX_GD_CH_RMSSI_TH	127
+#define SRCH_ALGO_TYPE_OFFSET  25
+#define SINRFIRSTSTAGE_OFFSET  26
+#define RMSSIFIRSTSTAGE_OFFSET 27
+#define CF0TH12_BYTE1_OFFSET   8
+#define CF0TH12_BYTE2_OFFSET   9
+#define CF0TH12_BYTE3_OFFSET   10
+#define CF0TH12_BYTE4_OFFSET   11
+#define MAX_SINR_FIRSTSTAGE	127
+#define MAX_RMSSI_FIRSTSTAGE	127
+#define RDS_PS0_XFR_MODE 0x01
+#define RDS_PS0_LEN 6
+#define RX_REPEATE_BYTE_OFFSET 5
+#define FM_SPUR_TBL_SIZE 240
+#define SPUR_DATA_LEN 16
+#define ENTRIES_EACH_CMD 15
+#define SPUR_DATA_INDEX 2
+#define FM_AF_LIST_MAX_SIZE   200
+/* Each AF frequency consist of sizeof(int) bytes */
+#define AF_LIST_MAX     (FM_AF_LIST_MAX_SIZE / 4)
+
+#define MAX_BLEND_INDEX 49
+
+#define TUNE_PARAM 16
+#define FM_RDS_3A_GRP (0x40)
+struct radio_hci_command_hdr {
+	__le16	opcode;		/* OCF & OGF */
+	__u8	plen;
+} __packed;
+
+struct radio_hci_event_hdr {
+	__u8	evt;
+	__u8	plen;
+} __packed;
+
+/* Opcode OCF */
+/* HCI recv control commands opcode */
+#define HCI_OCF_FM_ENABLE_RECV_REQ          0x0001
+#define HCI_OCF_FM_DISABLE_RECV_REQ         0x0002
+#define HCI_OCF_FM_GET_RECV_CONF_REQ        0x0003
+#define HCI_OCF_FM_SET_RECV_CONF_REQ        0x0004
+#define HCI_OCF_FM_SET_MUTE_MODE_REQ        0x0005
+#define HCI_OCF_FM_SET_STEREO_MODE_REQ      0x0006
+#define HCI_OCF_FM_SET_ANTENNA              0x0007
+#define HCI_OCF_FM_SET_SIGNAL_THRESHOLD     0x0008
+#define HCI_OCF_FM_GET_SIGNAL_THRESHOLD     0x0009
+#define HCI_OCF_FM_GET_STATION_PARAM_REQ    0x000A
+#define HCI_OCF_FM_GET_PROGRAM_SERVICE_REQ  0x000B
+#define HCI_OCF_FM_GET_RADIO_TEXT_REQ       0x000C
+#define HCI_OCF_FM_GET_AF_LIST_REQ          0x000D
+#define HCI_OCF_FM_SEARCH_STATIONS          0x000E
+#define HCI_OCF_FM_SEARCH_RDS_STATIONS      0x000F
+#define HCI_OCF_FM_SEARCH_STATIONS_LIST     0x0010
+#define HCI_OCF_FM_CANCEL_SEARCH            0x0011
+#define HCI_OCF_FM_RDS_GRP                  0x0012
+#define HCI_OCF_FM_RDS_GRP_PROCESS          0x0013
+#define HCI_OCF_FM_EN_WAN_AVD_CTRL          0x0014
+#define HCI_OCF_FM_EN_NOTCH_CTRL            0x0015
+#define HCI_OCF_FM_SET_EVENT_MASK           0x0016
+#define HCI_OCF_FM_SET_CH_DET_THRESHOLD     0x0017
+#define HCI_OCF_FM_GET_CH_DET_THRESHOLD     0x0018
+#define HCI_OCF_FM_SET_BLND_TBL             0x001B
+#define HCI_OCF_FM_GET_BLND_TBL             0x001C
+/* HCI trans control commans opcode*/
+#define HCI_OCF_FM_ENABLE_TRANS_REQ         0x0001
+#define HCI_OCF_FM_DISABLE_TRANS_REQ        0x0002
+#define HCI_OCF_FM_GET_TRANS_CONF_REQ       0x0003
+#define HCI_OCF_FM_SET_TRANS_CONF_REQ       0x0004
+#define HCI_OCF_FM_RDS_RT_REQ               0x0008
+#define HCI_OCF_FM_RDS_PS_REQ               0x0009
+
+
+/* HCI common control commands opcode */
+#define HCI_OCF_FM_TUNE_STATION_REQ         0x0001
+#define HCI_OCF_FM_DEFAULT_DATA_READ        0x0002
+#define HCI_OCF_FM_DEFAULT_DATA_WRITE       0x0003
+#define HCI_OCF_FM_RESET                    0x0004
+#define HCI_OCF_FM_GET_FEATURE_LIST         0x0005
+#define HCI_OCF_FM_DO_CALIBRATION           0x0006
+#define HCI_OCF_FM_SET_CALIBRATION          0x0007
+#define HCI_OCF_FM_SET_SPUR_TABLE           0x0008
+#define HCI_OCF_FM_GET_SPUR_TABLE           0x0009
+
+/*HCI Status parameters commands*/
+#define HCI_OCF_FM_READ_GRP_COUNTERS        0x0001
+
+/*HCI Diagnostic commands*/
+#define HCI_OCF_FM_PEEK_DATA                0x0002
+#define HCI_OCF_FM_POKE_DATA                0x0003
+#define HCI_OCF_FM_SSBI_PEEK_REG            0x0004
+#define HCI_OCF_FM_SSBI_POKE_REG            0x0005
+#define HCI_OCF_FM_STATION_DBG_PARAM        0x0007
+#define HCI_FM_SET_INTERNAL_TONE_GENRATOR   0x0008
+
+/* Opcode OGF */
+#define HCI_OGF_FM_RECV_CTRL_CMD_REQ            0x0013
+#define HCI_OGF_FM_TRANS_CTRL_CMD_REQ           0x0014
+#define HCI_OGF_FM_COMMON_CTRL_CMD_REQ          0x0015
+#define HCI_OGF_FM_STATUS_PARAMETERS_CMD_REQ    0x0016
+#define HCI_OGF_FM_TEST_CMD_REQ                 0x0017
+#define HCI_OGF_FM_DIAGNOSTIC_CMD_REQ           0x003F
+
+/* Command opcode pack/unpack */
+#define hci_opcode_pack(ogf, ocf)  ((__u16) ((ocf & 0x03ff)|(ogf << 10)))
+#define hci_opcode_ogf(op)		(op >> 10)
+#define hci_opcode_ocf(op)		(op & 0x03ff)
+#define hci_recv_ctrl_cmd_op_pack(ocf) \
+	((__u16) hci_opcode_pack(HCI_OGF_FM_RECV_CTRL_CMD_REQ, ocf))
+#define hci_trans_ctrl_cmd_op_pack(ocf) \
+	((__u16) hci_opcode_pack(HCI_OGF_FM_TRANS_CTRL_CMD_REQ, ocf))
+#define hci_common_cmd_op_pack(ocf)	\
+	((__u16) hci_opcode_pack(HCI_OGF_FM_COMMON_CTRL_CMD_REQ, ocf))
+#define hci_status_param_op_pack(ocf)	\
+	((__u16) hci_opcode_pack(HCI_OGF_FM_STATUS_PARAMETERS_CMD_REQ, ocf))
+#define hci_diagnostic_cmd_op_pack(ocf)	\
+	((__u16) hci_opcode_pack(HCI_OGF_FM_DIAGNOSTIC_CMD_REQ, ocf))
+
+
+/* HCI commands with no arguments*/
+#define HCI_FM_ENABLE_RECV_CMD 1
+#define HCI_FM_DISABLE_RECV_CMD 2
+#define HCI_FM_GET_RECV_CONF_CMD 3
+#define HCI_FM_GET_STATION_PARAM_CMD 4
+#define HCI_FM_GET_SIGNAL_TH_CMD 5
+#define HCI_FM_GET_PROGRAM_SERVICE_CMD 6
+#define HCI_FM_GET_RADIO_TEXT_CMD 7
+#define HCI_FM_GET_AF_LIST_CMD 8
+#define HCI_FM_CANCEL_SEARCH_CMD 9
+#define HCI_FM_RESET_CMD 10
+#define HCI_FM_GET_FEATURES_CMD 11
+#define HCI_FM_STATION_DBG_PARAM_CMD 12
+#define HCI_FM_ENABLE_TRANS_CMD 13
+#define HCI_FM_DISABLE_TRANS_CMD 14
+#define HCI_FM_GET_TX_CONFIG 15
+#define HCI_FM_GET_DET_CH_TH_CMD 16
+#define HCI_FM_GET_BLND_TBL_CMD 17
+
+/* Defines for FM TX*/
+#define TX_PS_DATA_LENGTH 108
+#define TX_RT_DATA_LENGTH 64
+#define PS_STRING_LEN     9
+
+/* ----- HCI Command request ----- */
+struct hci_fm_recv_conf_req {
+	__u8	emphasis;
+	__u8	ch_spacing;
+	__u8	rds_std;
+	__u8	hlsi;
+	__u32	band_low_limit;
+	__u32	band_high_limit;
+} __packed;
+
+/* ----- HCI Command request ----- */
+struct hci_fm_trans_conf_req_struct {
+	__u8	emphasis;
+	__u8	rds_std;
+	__u32	band_low_limit;
+	__u32	band_high_limit;
+} __packed;
+
+
+/* ----- HCI Command request ----- */
+struct hci_fm_tx_ps {
+	__u8    ps_control;
+	__u16	pi;
+	__u8	pty;
+	__u8	ps_repeatcount;
+	__u8	ps_num;
+	__u8    ps_data[TX_PS_DATA_LENGTH];
+} __packed;
+
+struct hci_fm_tx_rt {
+	__u8    rt_control;
+	__u16	pi;
+	__u8	pty;
+	__u8	rt_len;
+	__u8    rt_data[TX_RT_DATA_LENGTH];
+} __packed;
+
+struct hci_fm_mute_mode_req {
+	__u8	hard_mute;
+	__u8	soft_mute;
+} __packed;
+
+struct hci_fm_stereo_mode_req {
+	__u8    stereo_mode;
+	__u8    sig_blend;
+	__u8    intf_blend;
+	__u8    most_switch;
+} __packed;
+
+struct hci_fm_search_station_req {
+	__u8    srch_mode;
+	__u8    scan_time;
+	__u8    srch_dir;
+} __packed;
+
+struct hci_fm_search_rds_station_req {
+	struct hci_fm_search_station_req srch_station;
+	__u8    srch_pty;
+	__u16   srch_pi;
+} __packed;
+
+struct hci_fm_search_station_list_req {
+	__u8    srch_list_mode;
+	__u8    srch_list_dir;
+	__u32   srch_list_max;
+	__u8    srch_pty;
+} __packed;
+
+struct hci_fm_rds_grp_req {
+	__u32   rds_grp_enable_mask;
+	__u32   rds_buf_size;
+	__u8    en_rds_change_filter;
+} __packed;
+
+struct hci_fm_en_avd_ctrl_req {
+	__u8    no_freqs;
+	__u8    freq_index;
+	__u8    lo_shft;
+	__u16   freq_min;
+	__u16   freq_max;
+} __packed;
+
+struct hci_fm_def_data_rd_req {
+	__u8    mode;
+	__u8    length;
+	__u8    param_len;
+	__u8    param;
+} __packed;
+
+struct hci_fm_def_data_wr_req {
+	__u8    mode;
+	__u8    length;
+	__u8   data[DEFAULT_DATA_SIZE];
+} __packed;
+
+struct hci_fm_riva_data {
+	__u8 subopcode;
+	__u32   start_addr;
+	__u8    length;
+} __packed;
+
+struct hci_fm_riva_poke {
+	struct hci_fm_riva_data cmd_params;
+	__u8    data[MAX_RIVA_PEEK_RSP_SIZE];
+} __packed;
+
+struct hci_fm_ssbi_req {
+	__u16   start_addr;
+	__u8    data;
+} __packed;
+struct hci_fm_ssbi_peek {
+	__u16 start_address;
+} __packed;
+
+struct hci_fm_ch_det_threshold {
+	char sinr;
+	__u8 sinr_samples;
+	__u8 low_th;
+	__u8 high_th;
+
+} __packed;
+
+struct hci_fm_blend_table {
+	__u8 ucBlendType;
+	__u8 ucBlendRampRateUp;
+	__u8 ucBlendDebounceNumSampleUp;
+	__u8 ucBlendDebounceIdxUp;
+	__u8 ucBlendSinrIdxSkipStep;
+	__u8 scBlendSinrHi;
+	__u8 scBlendRmssiHi;
+	__u8 ucBlendIndexHi;
+	__u8 ucBlendIndex[MAX_BLEND_INDEX];
+} __packed;
+
+/*HCI events*/
+#define HCI_EV_TUNE_STATUS              0x01
+#define HCI_EV_RDS_LOCK_STATUS          0x02
+#define HCI_EV_STEREO_STATUS            0x03
+#define HCI_EV_SERVICE_AVAILABLE        0x04
+#define HCI_EV_SEARCH_PROGRESS          0x05
+#define HCI_EV_SEARCH_RDS_PROGRESS      0x06
+#define HCI_EV_SEARCH_LIST_PROGRESS     0x07
+#define HCI_EV_RDS_RX_DATA              0x08
+#define HCI_EV_PROGRAM_SERVICE          0x09
+#define HCI_EV_RADIO_TEXT               0x0A
+#define HCI_EV_FM_AF_LIST               0x0B
+#define HCI_EV_TX_RDS_GRP_AVBLE         0x0C
+#define HCI_EV_TX_RDS_GRP_COMPL         0x0D
+#define HCI_EV_TX_RDS_CONT_GRP_COMPL    0x0E
+#define HCI_EV_CMD_COMPLETE             0x0F
+#define HCI_EV_CMD_STATUS               0x10
+#define HCI_EV_TUNE_COMPLETE            0x11
+#define HCI_EV_SEARCH_COMPLETE          0x12
+#define HCI_EV_SEARCH_RDS_COMPLETE      0x13
+#define HCI_EV_SEARCH_LIST_COMPLETE     0x14
+
+#define HCI_REQ_DONE	  0
+#define HCI_REQ_PEND	  1
+#define HCI_REQ_CANCELED  2
+#define HCI_REQ_STATUS    3
+
+#define MAX_RAW_RDS_GRPS	21
+
+#define RDSGRP_DATA_OFFSET	 0x1
+
+/*RT PLUS*/
+#define DUMMY_CLASS		0
+#define RT_PLUS_LEN_1_TAG	3
+#define RT_ERT_FLAG_BIT		5
+
+/*TAG1*/
+#define TAG1_MSB_OFFSET		3
+#define TAG1_MSB_MASK		7
+#define TAG1_LSB_OFFSET		5
+#define TAG1_POS_MSB_MASK	31
+#define TAG1_POS_MSB_OFFSET	1
+#define TAG1_POS_LSB_OFFSET	7
+#define TAG1_LEN_OFFSET		1
+#define TAG1_LEN_MASK		63
+
+/*TAG2*/
+#define TAG2_MSB_OFFSET		5
+#define TAG2_MSB_MASK		1
+#define TAG2_LSB_OFFSET		3
+#define TAG2_POS_MSB_MASK	7
+#define TAG2_POS_MSB_OFFSET	3
+#define TAG2_POS_LSB_OFFSET	5
+#define TAG2_LEN_MASK		31
+
+#define AGT_MASK		31
+/*Extract 5 left most bits of lsb of 2nd block*/
+#define AGT(x)			(x & AGT_MASK)
+/*16 bits of 4th block*/
+#define AID(lsb, msb)		((msb << 8) | (lsb))
+/*Extract 5 right most bits of msb of 2nd block*/
+#define GTC(blk2msb)		(blk2msb >> 3)
+
+#define GRP_3A			0x6
+#define RT_PLUS_AID		0x4bd7
+
+/*ERT*/
+#define ERT_AID			0x6552
+#define CARRIAGE_RETURN		0x000D
+#define MAX_ERT_SEGMENT		31
+#define ERT_FORMAT_DIR_BIT	1
+
+#define EXTRACT_BIT(data, bit_pos) ((data & (1 << bit_pos)) >> bit_pos)
+
+struct hci_ev_tune_status {
+	__u8    sub_event;
+	__le32  station_freq;
+	__u8    serv_avble;
+	char    rssi;
+	__u8    stereo_prg;
+	__u8    rds_sync_status;
+	__u8    mute_mode;
+	char    sinr;
+	__u8	intf_det_th;
+} __packed;
+
+struct rds_blk_data {
+	__u8	rdsMsb;
+	__u8	rdsLsb;
+	__u8	blockStatus;
+} __packed;
+
+struct rds_grp_data {
+	struct rds_blk_data rdsBlk[4];
+} __packed;
+
+struct hci_ev_rds_rx_data {
+	__u8    num_rds_grps;
+	struct  rds_grp_data rds_grp_data[MAX_RAW_RDS_GRPS];
+} __packed;
+
+struct hci_ev_prg_service {
+	__le16   pi_prg_id;
+	__u8    pty_prg_type;
+	__u8    ta_prg_code_type;
+	__u8    ta_ann_code_flag;
+	__u8    ms_switch_code_flag;
+	__u8    dec_id_ctrl_code_flag;
+	__u8    ps_num;
+	__u8    prg_service_name[119];
+} __packed;
+
+struct hci_ev_radio_text {
+	__le16   pi_prg_id;
+	__u8    pty_prg_type;
+	__u8    ta_prg_code_type;
+	__u8    txt_ab_flag;
+	__u8    radio_txt[64];
+} __packed;
+
+struct hci_ev_af_list {
+	__le32   tune_freq;
+	__le16   pi_code;
+	__u8    af_size;
+	__u8    af_list[FM_AF_LIST_MAX_SIZE];
+} __packed;
+
+struct hci_ev_cmd_complete {
+	__u8    num_hci_cmd_pkts;
+	__le16   cmd_opcode;
+} __packed;
+
+struct hci_ev_cmd_status {
+	__u8    status;
+	__u8    num_hci_cmd_pkts;
+	__le16   status_opcode;
+} __packed;
+
+struct hci_ev_srch_st {
+	__le32    station_freq;
+	__u8    rds_cap;
+	__u8   pty;
+	__le16   status_opcode;
+} __packed;
+
+struct hci_ev_rel_freq {
+	__u8  rel_freq_msb;
+	__u8  rel_freq_lsb;
+
+} __packed;
+struct hci_ev_srch_list_compl {
+	__u8    num_stations_found;
+	struct hci_ev_rel_freq  rel_freq[20];
+} __packed;
+
+/* ----- HCI Event Response ----- */
+struct hci_fm_conf_rsp {
+	__u8    status;
+	struct hci_fm_recv_conf_req recv_conf_rsp;
+} __packed;
+
+struct hci_fm_get_trans_conf_rsp {
+	__u8    status;
+	struct hci_fm_trans_conf_req_struct trans_conf_rsp;
+} __packed;
+struct hci_fm_sig_threshold_rsp {
+	__u8    status;
+	__u8    sig_threshold;
+} __packed;
+
+struct hci_fm_station_rsp {
+	struct hci_ev_tune_status station_rsp;
+} __packed;
+
+struct hci_fm_prgm_srv_rsp {
+	__u8    status;
+	struct hci_ev_prg_service prg_srv;
+} __packed;
+
+struct hci_fm_radio_txt_rsp {
+	__u8    status;
+	struct hci_ev_radio_text rd_txt;
+} __packed;
+
+struct hci_fm_af_list_rsp {
+	__u8    status;
+	struct hci_ev_af_list rd_txt;
+} __packed;
+
+struct hci_fm_data_rd_rsp {
+	__u8    status;
+	__u8    ret_data_len;
+	__u8    data[DEFAULT_DATA_SIZE];
+} __packed;
+
+struct hci_fm_feature_list_rsp {
+	__u8    status;
+	__u8    feature_mask;
+} __packed;
+
+struct hci_fm_dbg_param_rsp {
+	__u8    status;
+	__u8    blend;
+	__u8    soft_mute;
+	__u8    inf_blend;
+	__u8    inf_soft_mute;
+	__u8    pilot_pil;
+	__u8    io_verc;
+	__u8    in_det_out;
+} __packed;
+
+#define CLKSPURID_INDEX0	0
+#define CLKSPURID_INDEX1	5
+#define CLKSPURID_INDEX2	10
+#define CLKSPURID_INDEX3	15
+#define CLKSPURID_INDEX4	20
+#define CLKSPURID_INDEX5	25
+
+#define MAX_SPUR_FREQ_LIMIT	30
+#define CKK_SPUR		0x3B
+#define SPUR_DATA_SIZE		0x4
+#define SPUR_ENTRIES_PER_ID	0x5
+
+#define COMPUTE_SPUR(val)         ((((val) - (76000)) / (50)))
+#define GET_FREQ(val, bit)        ((bit == 1) ? ((val) >> 8) : ((val) & 0xFF))
+#define GET_SPUR_ENTRY_LEVEL(val) ((val) / (5))
+
+struct hci_fm_spur_data {
+	__u32	freq[MAX_SPUR_FREQ_LIMIT];
+	__s8	rmssi[MAX_SPUR_FREQ_LIMIT];
+	__u8	enable[MAX_SPUR_FREQ_LIMIT];
+} __packed;
+
+
+/* HCI dev events */
+#define RADIO_HCI_DEV_REG			1
+#define RADIO_HCI_DEV_WRITE			2
+
+/* FM RDS */
+#define RDS_PTYPE 2
+#define RDS_PID_LOWER 1
+#define RDS_PID_HIGHER 0
+#define RDS_OFFSET 5
+#define RDS_PS_LENGTH_OFFSET 7
+#define RDS_STRING 8
+#define RDS_PS_DATA_OFFSET 8
+#define RDS_CONFIG_OFFSET  3
+#define RDS_AF_JUMP_OFFSET 4
+#define PI_CODE_OFFSET 4
+#define AF_SIZE_OFFSET 6
+#define AF_LIST_OFFSET 7
+#define RT_A_B_FLAG_OFFSET 4
+/*FM states*/
+
+enum radio_state_t {
+	FM_OFF,
+	FM_RECV,
+	FM_TRANS,
+	FM_RESET,
+	FM_CALIB,
+	FM_TURNING_OFF,
+	FM_RECV_TURNING_ON,
+	FM_TRANS_TURNING_ON,
+	FM_MAX_NO_STATES,
+};
+
+enum emphasis_type {
+	FM_RX_EMP75 = 0x0,
+	FM_RX_EMP50 = 0x1
+};
+
+enum channel_space_type {
+	FM_RX_SPACE_200KHZ = 0x0,
+	FM_RX_SPACE_100KHZ = 0x1,
+	FM_RX_SPACE_50KHZ = 0x2
+};
+
+enum high_low_injection {
+	AUTO_HI_LO_INJECTION = 0x0,
+	LOW_SIDE_INJECTION = 0x1,
+	HIGH_SIDE_INJECTION = 0x2
+};
+
+enum fm_rds_type {
+	FM_RX_RDBS_SYSTEM = 0x0,
+	FM_RX_RDS_SYSTEM = 0x1
+};
+
+enum iris_region_t {
+	IRIS_REGION_US,
+	IRIS_REGION_EU,
+	IRIS_REGION_JAPAN,
+	IRIS_REGION_JAPAN_WIDE,
+	IRIS_REGION_OTHER
+};
+
+#define STD_BUF_SIZE        (256)
+
+enum iris_buf_t {
+	IRIS_BUF_SRCH_LIST,
+	IRIS_BUF_EVENTS,
+	IRIS_BUF_RT_RDS,
+	IRIS_BUF_PS_RDS,
+	IRIS_BUF_RAW_RDS,
+	IRIS_BUF_AF_LIST,
+	IRIS_BUF_PEEK,
+	IRIS_BUF_SSBI_PEEK,
+	IRIS_BUF_RDS_CNTRS,
+	IRIS_BUF_RD_DEFAULT,
+	IRIS_BUF_CAL_DATA,
+	IRIS_BUF_RT_PLUS,
+	IRIS_BUF_ERT,
+	IRIS_BUF_SPUR,
+	IRIS_BUF_MAX,
+};
+
+enum iris_xfr_t {
+	IRIS_XFR_SYNC,
+	IRIS_XFR_ERROR,
+	IRIS_XFR_SRCH_LIST,
+	IRIS_XFR_RT_RDS,
+	IRIS_XFR_PS_RDS,
+	IRIS_XFR_AF_LIST,
+	IRIS_XFR_MAX
+};
+
+/* Search options */
+enum search_t {
+	SEEK,
+	SCAN,
+	SCAN_FOR_STRONG,
+	SCAN_FOR_WEAK,
+	RDS_SEEK_PTY,
+	RDS_SCAN_PTY,
+	RDS_SEEK_PI,
+	RDS_AF_JUMP,
+};
+
+enum spur_entry_levels {
+	ENTRY_0,
+	ENTRY_1,
+	ENTRY_2,
+	ENTRY_3,
+	ENTRY_4,
+	ENTRY_5,
+};
+
+/* Band limits */
+#define REGION_US_EU_BAND_LOW              87500
+#define REGION_US_EU_BAND_HIGH             108000
+#define REGION_JAPAN_STANDARD_BAND_LOW     76000
+#define REGION_JAPAN_STANDARD_BAND_HIGH    90000
+#define REGION_JAPAN_WIDE_BAND_LOW         90000
+#define REGION_JAPAN_WIDE_BAND_HIGH        108000
+
+#define SRCH_MODE	0x07
+#define SRCH_DIR	0x08 /* 0-up 1-down */
+#define SCAN_DWELL	0x70
+#define SRCH_ON		0x80
+
+/* I/O Control */
+#define IOC_HRD_MUTE	0x03
+#define IOC_SFT_MUTE	0x01
+#define IOC_MON_STR	0x01
+#define IOC_SIG_BLND	0x01
+#define IOC_INTF_BLND	0x01
+#define IOC_ANTENNA	0x01
+
+/* RDS Control */
+#define RDS_ON		0x01
+#define RDS_BUF_SZ  100
+
+/* constants */
+#define  RDS_BLOCKS_NUM	(4)
+#define BYTES_PER_BLOCK	(3)
+#define MAX_PS_LENGTH	(108)
+#define MAX_RT_LENGTH	(64)
+#define RDS_GRP_CNTR_LEN (36)
+#define RX_RT_DATA_LENGTH (63)
+/* Search direction */
+#define SRCH_DIR_UP		(0)
+#define SRCH_DIR_DOWN		(1)
+
+/*Search RDS stations*/
+#define SEARCH_RDS_STNS_MODE_OFFSET 4
+
+/*Search Station list */
+#define PARAMS_PER_STATION 0x08
+#define STN_NUM_OFFSET     0x01
+#define STN_FREQ_OFFSET    0x02
+#define KHZ_TO_MHZ         1000
+#define GET_MSB(x)((x >> 8) & 0xFF)
+#define GET_LSB(x)((x) & 0xFF)
+
+/* control options */
+#define CTRL_ON			(1)
+#define CTRL_OFF		(0)
+
+/*Diagnostic commands*/
+
+#define RIVA_PEEK_OPCODE 0x0D
+#define RIVA_POKE_OPCODE 0x0C
+
+#define PEEK_DATA_OFSET 0x1
+#define RIVA_PEEK_PARAM     0x6
+#define RIVA_PEEK_LEN_OFSET  0x6
+#define SSBI_PEEK_LEN    0x01
+/*Calibration data*/
+#define PROCS_CALIB_MODE  1
+#define PROCS_CALIB_SIZE  23
+#define DC_CALIB_MODE     2
+#define DC_CALIB_SIZE     48
+#define RSB_CALIB_MODE    3
+#define RSB_CALIB_SIZE    4
+#define CALIB_DATA_OFSET  2
+#define CALIB_MODE_OFSET  1
+#define MAX_CALIB_SIZE 75
+
+/* Channel validity */
+#define INVALID_CHANNEL		(0)
+#define VALID_CHANNEL		(1)
+
+struct hci_fm_set_cal_req_proc {
+	__u8    mode;
+	/*Max process calibration data size*/
+	__u8    data[PROCS_CALIB_SIZE];
+} __packed;
+
+struct hci_fm_set_cal_req_dc {
+	__u8    mode;
+	/*Max DC calibration data size*/
+	__u8    data[DC_CALIB_SIZE];
+} __packed;
+
+struct hci_cc_do_calibration_rsp {
+	__u8 status;
+	__u8 mode;
+	__u8 data[MAX_CALIB_SIZE];
+} __packed;
+
+struct hci_fm_set_spur_table_req {
+	__u8 mode;
+	__u8 no_of_freqs_entries;
+	__u8 spur_data[FM_SPUR_TBL_SIZE];
+} __packed;
+/* Low Power mode*/
+#define SIG_LEVEL_INTR  (1 << 0)
+#define RDS_SYNC_INTR   (1 << 1)
+#define AUDIO_CTRL_INTR (1 << 2)
+#define AF_JUMP_ENABLE  (1 << 4)
+
+#endif
diff -Nruw linux-4.4.115-fbx/include/uapi/scsi/ufs./ioctl.h linux-4.4.115-fbx/include/uapi/scsi/ufs/ioctl.h
--- linux-4.4.115-fbx/include/uapi/scsi/ufs./ioctl.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/scsi/ufs/ioctl.h	2019-01-22 16:16:28.607292626 +0100
@@ -0,0 +1,57 @@
+#ifndef UAPI_UFS_IOCTL_H_
+#define UAPI_UFS_IOCTL_H_
+
+#include <linux/types.h>
+
+/*
+ *  IOCTL opcode for ufs queries has the following opcode after
+ *  SCSI_IOCTL_GET_PCI
+ */
+#define UFS_IOCTL_QUERY			0x5388
+
+/**
+ * struct ufs_ioctl_query_data - used to transfer data to and from user via ioctl
+ * @opcode: type of data to query (descriptor/attribute/flag)
+ * @idn: id of the data structure
+ * @buf_size: number of allocated bytes/data size on return
+ * @buffer: data location
+ *
+ * Received: buffer and buf_size (available space for transfered data)
+ * Submitted: opcode, idn, length, buf_size
+ */
+struct ufs_ioctl_query_data {
+	/*
+	 * User should select one of the opcode defined in "enum query_opcode".
+	 * Please check include/uapi/scsi/ufs/ufs.h for the definition of it.
+	 * Note that only UPIU_QUERY_OPCODE_READ_DESC,
+	 * UPIU_QUERY_OPCODE_READ_ATTR & UPIU_QUERY_OPCODE_READ_FLAG are
+	 * supported as of now. All other query_opcode would be considered
+	 * invalid.
+	 * As of now only read query operations are supported.
+	 */
+	__u32 opcode;
+	/*
+	 * User should select one of the idn from "enum flag_idn" or "enum
+	 * attr_idn" or "enum desc_idn" based on whether opcode above is
+	 * attribute, flag or descriptor.
+	 * Please check include/uapi/scsi/ufs/ufs.h for the definition of it.
+	 */
+	__u8 idn;
+	/*
+	 * User should specify the size of the buffer (buffer[0] below) where
+	 * it wants to read the query data (attribute/flag/descriptor).
+	 * As we might end up reading less data then what is specified in
+	 * buf_size. So we are updating buf_size to what exactly we have read.
+	 */
+	__u16 buf_size;
+	/*
+	 * placeholder for the start of the data buffer where kernel will copy
+	 * the query data (attribute/flag/descriptor) read from the UFS device
+	 * Note:
+	 * For Read/Write Attribute you will have to allocate 4 bytes
+	 * For Read/Write Flag you will have to allocate 1 byte
+	 */
+	__u8 buffer[0];
+};
+
+#endif /* UAPI_UFS_IOCTL_H_ */
diff -Nruw linux-4.4.115-fbx/include/uapi/scsi/ufs./Kbuild linux-4.4.115-fbx/include/uapi/scsi/ufs/Kbuild
--- linux-4.4.115-fbx/include/uapi/scsi/ufs./Kbuild	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/scsi/ufs/Kbuild	2019-01-22 16:16:28.607292626 +0100
@@ -0,0 +1,3 @@
+# UAPI Header export list
+header-y += ioctl.h
+header-y += ufs.h
diff -Nruw linux-4.4.115-fbx/include/uapi/scsi/ufs./ufs.h linux-4.4.115-fbx/include/uapi/scsi/ufs/ufs.h
--- linux-4.4.115-fbx/include/uapi/scsi/ufs./ufs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/include/uapi/scsi/ufs/ufs.h	2019-10-29 09:26:25.557221909 +0100
@@ -0,0 +1,71 @@
+#ifndef UAPI_UFS_H_
+#define UAPI_UFS_H_
+
+#define MAX_QUERY_IDN	0x12
+
+/* Flag idn for Query Requests*/
+enum flag_idn {
+	QUERY_FLAG_IDN_FDEVICEINIT		= 0x01,
+	QUERY_FLAG_IDN_PERMANENT_WPE		= 0x02,
+	QUERY_FLAG_IDN_PWR_ON_WPE		= 0x03,
+	QUERY_FLAG_IDN_BKOPS_EN			= 0x04,
+	QUERY_FLAG_IDN_RESERVED1		= 0x05,
+	QUERY_FLAG_IDN_PURGE_ENABLE		= 0x06,
+	QUERY_FLAG_IDN_RESERVED2		= 0x07,
+	QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL      = 0x08,
+	QUERY_FLAG_IDN_BUSY_RTC			= 0x09,
+};
+
+/* Attribute idn for Query requests */
+enum attr_idn {
+	QUERY_ATTR_IDN_BOOT_LU_EN		= 0x00,
+	QUERY_ATTR_IDN_RESERVED			= 0x01,
+	QUERY_ATTR_IDN_POWER_MODE		= 0x02,
+	QUERY_ATTR_IDN_ACTIVE_ICC_LVL		= 0x03,
+	QUERY_ATTR_IDN_OOO_DATA_EN		= 0x04,
+	QUERY_ATTR_IDN_BKOPS_STATUS		= 0x05,
+	QUERY_ATTR_IDN_PURGE_STATUS		= 0x06,
+	QUERY_ATTR_IDN_MAX_DATA_IN		= 0x07,
+	QUERY_ATTR_IDN_MAX_DATA_OUT		= 0x08,
+	QUERY_ATTR_IDN_DYN_CAP_NEEDED		= 0x09,
+	QUERY_ATTR_IDN_REF_CLK_FREQ		= 0x0A,
+	QUERY_ATTR_IDN_CONF_DESC_LOCK		= 0x0B,
+	QUERY_ATTR_IDN_MAX_NUM_OF_RTT		= 0x0C,
+	QUERY_ATTR_IDN_EE_CONTROL		= 0x0D,
+	QUERY_ATTR_IDN_EE_STATUS		= 0x0E,
+	QUERY_ATTR_IDN_SECONDS_PASSED		= 0x0F,
+	QUERY_ATTR_IDN_CNTX_CONF		= 0x10,
+	QUERY_ATTR_IDN_CORR_PRG_BLK_NUM		= 0x11,
+};
+
+#define QUERY_ATTR_IDN_BOOT_LU_EN_MAX	0x02
+
+/* Descriptor idn for Query requests */
+enum desc_idn {
+	QUERY_DESC_IDN_DEVICE		= 0x0,
+	QUERY_DESC_IDN_CONFIGURAION	= 0x1,
+	QUERY_DESC_IDN_UNIT		= 0x2,
+	QUERY_DESC_IDN_RFU_0		= 0x3,
+	QUERY_DESC_IDN_INTERCONNECT	= 0x4,
+	QUERY_DESC_IDN_STRING		= 0x5,
+	QUERY_DESC_IDN_RFU_1		= 0x6,
+	QUERY_DESC_IDN_GEOMETRY		= 0x7,
+	QUERY_DESC_IDN_POWER		= 0x8,
+	QUERY_DESC_IDN_RFU_2		= 0x9,
+	QUERY_DESC_IDN_MAX,
+};
+
+/* UTP QUERY Transaction Specific Fields OpCode */
+enum query_opcode {
+	UPIU_QUERY_OPCODE_NOP		= 0x0,
+	UPIU_QUERY_OPCODE_READ_DESC	= 0x1,
+	UPIU_QUERY_OPCODE_WRITE_DESC	= 0x2,
+	UPIU_QUERY_OPCODE_READ_ATTR	= 0x3,
+	UPIU_QUERY_OPCODE_WRITE_ATTR	= 0x4,
+	UPIU_QUERY_OPCODE_READ_FLAG	= 0x5,
+	UPIU_QUERY_OPCODE_SET_FLAG	= 0x6,
+	UPIU_QUERY_OPCODE_CLEAR_FLAG	= 0x7,
+	UPIU_QUERY_OPCODE_TOGGLE_FLAG	= 0x8,
+	UPIU_QUERY_OPCODE_MAX,
+};
+#endif /* UAPI_UFS_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/sound/audio_effects.h	2019-10-29 09:26:25.557221909 +0100
@@ -0,0 +1,375 @@
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _AUDIO_EFFECTS_H
+#define _AUDIO_EFFECTS_H
+
+/** AUDIO EFFECTS **/
+
+
+/* CONFIG GET/SET */
+#define CONFIG_CACHE			0
+#define CONFIG_SET			1
+#define CONFIG_GET			2
+
+/* CONFIG HEADER */
+/*
+
+	MODULE_ID,
+	DEVICE,
+	NUM_COMMANDS,
+	COMMAND_ID_1,
+	CONFIG_CACHE/SET/GET,
+	OFFSET_1,
+	LENGTH_1,
+	VALUES_1,
+	...,
+	...,
+	COMMAND_ID_2,
+	CONFIG_CACHE/SET/GET,
+	OFFSET_2,
+	LENGTH_2,
+	VALUES_2,
+	...,
+	...,
+	COMMAND_ID_3,
+	...
+*/
+
+
+/* CONFIG PARAM IDs */
+#define VIRTUALIZER_MODULE		0x00001000
+#define VIRTUALIZER_ENABLE		0x00001001
+#define VIRTUALIZER_STRENGTH		0x00001002
+#define VIRTUALIZER_OUT_TYPE		0x00001003
+#define VIRTUALIZER_GAIN_ADJUST		0x00001004
+#define VIRTUALIZER_ENABLE_PARAM_LEN		1
+#define VIRTUALIZER_STRENGTH_PARAM_LEN		1
+#define VIRTUALIZER_OUT_TYPE_PARAM_LEN		1
+#define VIRTUALIZER_GAIN_ADJUST_PARAM_LEN	1
+
+#define REVERB_MODULE			0x00002000
+#define REVERB_ENABLE			0x00002001
+#define REVERB_MODE			0x00002002
+#define REVERB_PRESET			0x00002003
+#define REVERB_WET_MIX			0x00002004
+#define REVERB_GAIN_ADJUST		0x00002005
+#define REVERB_ROOM_LEVEL		0x00002006
+#define REVERB_ROOM_HF_LEVEL		0x00002007
+#define REVERB_DECAY_TIME		0x00002008
+#define REVERB_DECAY_HF_RATIO		0x00002009
+#define REVERB_REFLECTIONS_LEVEL	0x0000200a
+#define REVERB_REFLECTIONS_DELAY	0x0000200b
+#define REVERB_LEVEL			0x0000200c
+#define REVERB_DELAY			0x0000200d
+#define REVERB_DIFFUSION		0x0000200e
+#define REVERB_DENSITY			0x0000200f
+#define REVERB_ENABLE_PARAM_LEN			1
+#define REVERB_MODE_PARAM_LEN			1
+#define REVERB_PRESET_PARAM_LEN			1
+#define REVERB_WET_MIX_PARAM_LEN		1
+#define REVERB_GAIN_ADJUST_PARAM_LEN		1
+#define REVERB_ROOM_LEVEL_PARAM_LEN		1
+#define REVERB_ROOM_HF_LEVEL_PARAM_LEN		1
+#define REVERB_DECAY_TIME_PARAM_LEN		1
+#define REVERB_DECAY_HF_RATIO_PARAM_LEN		1
+#define REVERB_REFLECTIONS_LEVEL_PARAM_LEN	1
+#define REVERB_REFLECTIONS_DELAY_PARAM_LEN	1
+#define REVERB_LEVEL_PARAM_LEN			1
+#define REVERB_DELAY_PARAM_LEN			1
+#define REVERB_DIFFUSION_PARAM_LEN		1
+#define REVERB_DENSITY_PARAM_LEN		1
+
+#define BASS_BOOST_MODULE		0x00003000
+#define BASS_BOOST_ENABLE		0x00003001
+#define BASS_BOOST_MODE			0x00003002
+#define BASS_BOOST_STRENGTH		0x00003003
+#define BASS_BOOST_ENABLE_PARAM_LEN		1
+#define BASS_BOOST_MODE_PARAM_LEN		1
+#define BASS_BOOST_STRENGTH_PARAM_LEN		1
+
+#define EQ_MODULE			0x00004000
+#define EQ_ENABLE			0x00004001
+#define EQ_CONFIG			0x00004002
+#define EQ_NUM_BANDS			0x00004003
+#define EQ_BAND_LEVELS			0x00004004
+#define EQ_BAND_LEVEL_RANGE		0x00004005
+#define EQ_BAND_FREQS			0x00004006
+#define EQ_SINGLE_BAND_FREQ_RANGE	0x00004007
+#define EQ_SINGLE_BAND_FREQ		0x00004008
+#define EQ_BAND_INDEX			0x00004009
+#define EQ_PRESET_ID			0x0000400a
+#define EQ_NUM_PRESETS			0x0000400b
+#define EQ_PRESET_NAME			0x0000400c
+#define EQ_ENABLE_PARAM_LEN			1
+#define EQ_CONFIG_PARAM_LEN			3
+#define EQ_CONFIG_PER_BAND_PARAM_LEN		5
+#define EQ_NUM_BANDS_PARAM_LEN			1
+#define EQ_BAND_LEVELS_PARAM_LEN		13
+#define EQ_BAND_LEVEL_RANGE_PARAM_LEN		2
+#define EQ_BAND_FREQS_PARAM_LEN			13
+#define EQ_SINGLE_BAND_FREQ_RANGE_PARAM_LEN	2
+#define EQ_SINGLE_BAND_FREQ_PARAM_LEN		1
+#define EQ_BAND_INDEX_PARAM_LEN			1
+#define EQ_PRESET_ID_PARAM_LEN			1
+#define EQ_NUM_PRESETS_PARAM_LEN		1
+#define EQ_PRESET_NAME_PARAM_LEN		32
+
+#define EQ_TYPE_NONE	0
+#define EQ_BASS_BOOST	1
+#define EQ_BASS_CUT	2
+#define EQ_TREBLE_BOOST	3
+#define EQ_TREBLE_CUT	4
+#define EQ_BAND_BOOST	5
+#define EQ_BAND_CUT	6
+
+#define SOFT_VOLUME_MODULE		0x00006000
+#define SOFT_VOLUME_ENABLE		0x00006001
+#define SOFT_VOLUME_GAIN_2CH		0x00006002
+#define SOFT_VOLUME_GAIN_MASTER		0x00006003
+#define SOFT_VOLUME_ENABLE_PARAM_LEN		1
+#define SOFT_VOLUME_GAIN_2CH_PARAM_LEN		2
+#define SOFT_VOLUME_GAIN_MASTER_PARAM_LEN	1
+
+#define SOFT_VOLUME2_MODULE		0x00007000
+#define SOFT_VOLUME2_ENABLE		0x00007001
+#define SOFT_VOLUME2_GAIN_2CH		0x00007002
+#define SOFT_VOLUME2_GAIN_MASTER	0x00007003
+#define SOFT_VOLUME2_ENABLE_PARAM_LEN		SOFT_VOLUME_ENABLE_PARAM_LEN
+#define SOFT_VOLUME2_GAIN_2CH_PARAM_LEN		SOFT_VOLUME_GAIN_2CH_PARAM_LEN
+#define SOFT_VOLUME2_GAIN_MASTER_PARAM_LEN	\
+					SOFT_VOLUME_GAIN_MASTER_PARAM_LEN
+
+#define PBE_CONF_MODULE_ID	0x00010C2A
+#define PBE_CONF_PARAM_ID	0x00010C49
+
+#define PBE_MODULE		0x00008000
+#define PBE_ENABLE		0x00008001
+#define PBE_CONFIG		0x00008002
+#define PBE_ENABLE_PARAM_LEN		1
+#define PBE_CONFIG_PARAM_LEN		28
+
+#define COMMAND_PAYLOAD_LEN	3
+#define COMMAND_PAYLOAD_SZ	(COMMAND_PAYLOAD_LEN * sizeof(uint32_t))
+#define MAX_INBAND_PARAM_SZ	4096
+#define Q27_UNITY		(1 << 27)
+#define Q8_UNITY		(1 << 8)
+#define CUSTOM_OPENSL_PRESET	18
+
+#define VIRTUALIZER_ENABLE_PARAM_SZ	\
+			(VIRTUALIZER_ENABLE_PARAM_LEN*sizeof(uint32_t))
+#define VIRTUALIZER_STRENGTH_PARAM_SZ	\
+			(VIRTUALIZER_STRENGTH_PARAM_LEN*sizeof(uint32_t))
+#define VIRTUALIZER_OUT_TYPE_PARAM_SZ	\
+			(VIRTUALIZER_OUT_TYPE_PARAM_LEN*sizeof(uint32_t))
+#define VIRTUALIZER_GAIN_ADJUST_PARAM_SZ	\
+			(VIRTUALIZER_GAIN_ADJUST_PARAM_LEN*sizeof(uint32_t))
+struct virtualizer_params {
+	uint32_t device;
+	uint32_t enable_flag;
+	uint32_t strength;
+	uint32_t out_type;
+	int32_t gain_adjust;
+};
+
+#define NUM_OSL_REVERB_PRESETS_SUPPORTED	6
+#define REVERB_ENABLE_PARAM_SZ		\
+			(REVERB_ENABLE_PARAM_LEN*sizeof(uint32_t))
+#define REVERB_MODE_PARAM_SZ		\
+			(REVERB_MODE_PARAM_LEN*sizeof(uint32_t))
+#define REVERB_PRESET_PARAM_SZ		\
+			(REVERB_PRESET_PARAM_LEN*sizeof(uint32_t))
+#define REVERB_WET_MIX_PARAM_SZ		\
+			(REVERB_WET_MIX_PARAM_LEN*sizeof(uint32_t))
+#define REVERB_GAIN_ADJUST_PARAM_SZ	\
+			(REVERB_GAIN_ADJUST_PARAM_LEN*sizeof(uint32_t))
+#define REVERB_ROOM_LEVEL_PARAM_SZ	\
+			(REVERB_ROOM_LEVEL_PARAM_LEN*sizeof(uint32_t))
+#define REVERB_ROOM_HF_LEVEL_PARAM_SZ	\
+			(REVERB_ROOM_HF_LEVEL_PARAM_LEN*sizeof(uint32_t))
+#define REVERB_DECAY_TIME_PARAM_SZ	\
+			(REVERB_DECAY_TIME_PARAM_LEN*sizeof(uint32_t))
+#define REVERB_DECAY_HF_RATIO_PARAM_SZ	\
+			(REVERB_DECAY_HF_RATIO_PARAM_LEN*sizeof(uint32_t))
+#define REVERB_REFLECTIONS_LEVEL_PARAM_SZ	\
+			(REVERB_REFLECTIONS_LEVEL_PARAM_LEN*sizeof(uint32_t))
+#define REVERB_REFLECTIONS_DELAY_PARAM_SZ	\
+			(REVERB_REFLECTIONS_DELAY_PARAM_LEN*sizeof(uint32_t))
+#define REVERB_LEVEL_PARAM_SZ		\
+			(REVERB_LEVEL_PARAM_LEN*sizeof(uint32_t))
+#define REVERB_DELAY_PARAM_SZ		\
+			(REVERB_DELAY_PARAM_LEN*sizeof(uint32_t))
+#define REVERB_DIFFUSION_PARAM_SZ	\
+			(REVERB_DIFFUSION_PARAM_LEN*sizeof(uint32_t))
+#define REVERB_DENSITY_PARAM_SZ		\
+			(REVERB_DENSITY_PARAM_LEN*sizeof(uint32_t))
+struct reverb_params {
+	uint32_t device;
+	uint32_t enable_flag;
+	uint32_t mode;
+	uint32_t preset;
+	uint32_t wet_mix;
+	int32_t  gain_adjust;
+	int32_t  room_level;
+	int32_t  room_hf_level;
+	uint32_t decay_time;
+	uint32_t decay_hf_ratio;
+	int32_t  reflections_level;
+	uint32_t reflections_delay;
+	int32_t  level;
+	uint32_t delay;
+	uint32_t diffusion;
+	uint32_t density;
+};
+
+#define BASS_BOOST_ENABLE_PARAM_SZ	\
+			(BASS_BOOST_ENABLE_PARAM_LEN*sizeof(uint32_t))
+#define BASS_BOOST_MODE_PARAM_SZ	\
+			(BASS_BOOST_MODE_PARAM_LEN*sizeof(uint32_t))
+#define BASS_BOOST_STRENGTH_PARAM_SZ	\
+			(BASS_BOOST_STRENGTH_PARAM_LEN*sizeof(uint32_t))
+struct bass_boost_params {
+	uint32_t device;
+	uint32_t enable_flag;
+	uint32_t mode;
+	uint32_t strength;
+};
+
+
+#define MAX_EQ_BANDS 12
+#define MAX_OSL_EQ_BANDS 5
+#define EQ_ENABLE_PARAM_SZ			\
+			(EQ_ENABLE_PARAM_LEN*sizeof(uint32_t))
+#define EQ_CONFIG_PARAM_SZ			\
+			(EQ_CONFIG_PARAM_LEN*sizeof(uint32_t))
+#define EQ_CONFIG_PER_BAND_PARAM_SZ		\
+			(EQ_CONFIG_PER_BAND_PARAM_LEN*sizeof(uint32_t))
+#define EQ_CONFIG_PARAM_MAX_LEN			(EQ_CONFIG_PARAM_LEN+\
+			MAX_EQ_BANDS*EQ_CONFIG_PER_BAND_PARAM_LEN)
+#define EQ_CONFIG_PARAM_MAX_SZ			\
+			(EQ_CONFIG_PARAM_MAX_LEN*sizeof(uint32_t))
+#define EQ_NUM_BANDS_PARAM_SZ			\
+			(EQ_NUM_BANDS_PARAM_LEN*sizeof(uint32_t))
+#define EQ_BAND_LEVELS_PARAM_SZ			\
+			(EQ_BAND_LEVELS_PARAM_LEN*sizeof(uint32_t))
+#define EQ_BAND_LEVEL_RANGE_PARAM_SZ		\
+			(EQ_BAND_LEVEL_RANGE_PARAM_LEN*sizeof(uint32_t))
+#define EQ_BAND_FREQS_PARAM_SZ			\
+			(EQ_BAND_FREQS_PARAM_LEN*sizeof(uint32_t))
+#define EQ_SINGLE_BAND_FREQ_RANGE_PARAM_SZ	\
+			(EQ_SINGLE_BAND_FREQ_RANGE_PARAM_LEN*sizeof(uint32_t))
+#define EQ_SINGLE_BAND_FREQ_PARAM_SZ		\
+			(EQ_SINGLE_BAND_FREQ_PARAM_LEN*sizeof(uint32_t))
+#define EQ_BAND_INDEX_PARAM_SZ			\
+			(EQ_BAND_INDEX_PARAM_LEN*sizeof(uint32_t))
+#define EQ_PRESET_ID_PARAM_SZ			\
+			(EQ_PRESET_ID_PARAM_LEN*sizeof(uint32_t))
+#define EQ_NUM_PRESETS_PARAM_SZ			\
+			(EQ_NUM_PRESETS_PARAM_LEN*sizeof(uint8_t))
+struct eq_config_t {
+	int32_t eq_pregain;
+	int32_t preset_id;
+	uint32_t num_bands;
+};
+struct eq_per_band_config_t {
+	int32_t band_idx;
+	uint32_t filter_type;
+	uint32_t freq_millihertz;
+	int32_t  gain_millibels;
+	uint32_t quality_factor;
+};
+struct eq_per_band_freq_range_t {
+	uint32_t band_index;
+	uint32_t min_freq_millihertz;
+	uint32_t max_freq_millihertz;
+};
+
+struct eq_params {
+	uint32_t device;
+	uint32_t enable_flag;
+	struct eq_config_t config;
+	struct eq_per_band_config_t per_band_cfg[MAX_EQ_BANDS];
+	struct eq_per_band_freq_range_t per_band_freq_range[MAX_EQ_BANDS];
+	uint32_t band_index;
+	uint32_t freq_millihertz;
+};
+
+#define PBE_ENABLE_PARAM_SZ	\
+			(PBE_ENABLE_PARAM_LEN*sizeof(uint32_t))
+#define PBE_CONFIG_PARAM_SZ	\
+			(PBE_CONFIG_PARAM_LEN*sizeof(uint16_t))
+struct pbe_config_t {
+	int16_t  real_bass_mix;
+	int16_t  bass_color_control;
+	uint16_t main_chain_delay;
+	uint16_t xover_filter_order;
+	uint16_t bandpass_filter_order;
+	int16_t  drc_delay;
+	uint16_t rms_tav;
+	int16_t exp_threshold;
+	uint16_t exp_slope;
+	int16_t comp_threshold;
+	uint16_t comp_slope;
+	uint16_t makeup_gain;
+	uint32_t comp_attack;
+	uint32_t comp_release;
+	uint32_t exp_attack;
+	uint32_t exp_release;
+	int16_t limiter_bass_threshold;
+	int16_t limiter_high_threshold;
+	int16_t limiter_bass_makeup_gain;
+	int16_t limiter_high_makeup_gain;
+	int16_t limiter_bass_gc;
+	int16_t limiter_high_gc;
+	int16_t  limiter_delay;
+	uint16_t reserved;
+	/* place holder for filter coeffs to be followed */
+	int32_t p1LowPassCoeffs[5*2];
+	int32_t p1HighPassCoeffs[5*2];
+	int32_t p1BandPassCoeffs[5*3];
+	int32_t p1BassShelfCoeffs[5];
+	int32_t p1TrebleShelfCoeffs[5];
+} __packed;
+
+struct pbe_params {
+	uint32_t device;
+	uint32_t enable_flag;
+	uint32_t cfg_len;
+	struct pbe_config_t config;
+};
+
+#define SOFT_VOLUME_ENABLE_PARAM_SZ		\
+			(SOFT_VOLUME_ENABLE_PARAM_LEN*sizeof(uint32_t))
+#define SOFT_VOLUME_GAIN_MASTER_PARAM_SZ	\
+			(SOFT_VOLUME_GAIN_MASTER_PARAM_LEN*sizeof(uint32_t))
+#define SOFT_VOLUME_GAIN_2CH_PARAM_SZ		\
+			(SOFT_VOLUME_GAIN_2CH_PARAM_LEN*sizeof(uint16_t))
+struct soft_volume_params {
+	uint32_t device;
+	uint32_t enable_flag;
+	uint32_t master_gain;
+	uint32_t left_gain;
+	uint32_t right_gain;
+};
+
+struct msm_nt_eff_all_config {
+	struct bass_boost_params bass_boost;
+	struct pbe_params pbe;
+	struct virtualizer_params virtualizer;
+	struct reverb_params reverb;
+	struct eq_params equalizer;
+	struct soft_volume_params saplus_vol;
+	struct soft_volume_params topo_switch_vol;
+};
+
+#endif /*_MSM_AUDIO_EFFECTS_H*/
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/sound/devdep_params.h	2019-01-22 16:16:28.607292626 +0100
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2013-2015,2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DEV_DEP_H
+#define _DEV_DEP_H
+
+struct dolby_param_data {
+	int32_t version;
+	int32_t device_id;
+	int32_t be_id;
+	int32_t param_id;
+	int32_t length;
+	int32_t __user *data;
+};
+
+struct dolby_param_license {
+	int32_t dmid;
+	int32_t license_key;
+};
+
+#define SNDRV_DEVDEP_DAP_IOCTL_SET_PARAM\
+		_IOWR('U', 0x10, struct dolby_param_data)
+#define SNDRV_DEVDEP_DAP_IOCTL_GET_PARAM\
+		_IOR('U', 0x11, struct dolby_param_data)
+#define SNDRV_DEVDEP_DAP_IOCTL_DAP_COMMAND\
+		_IOWR('U', 0x13, struct dolby_param_data)
+#define SNDRV_DEVDEP_DAP_IOCTL_DAP_LICENSE\
+		_IOWR('U', 0x14, struct dolby_param_license)
+#define SNDRV_DEVDEP_DAP_IOCTL_GET_VISUALIZER\
+		_IOR('U', 0x15, struct dolby_param_data)
+
+#define DTS_EAGLE_MODULE			0x00005000
+#define DTS_EAGLE_MODULE_ENABLE			0x00005001
+#define EAGLE_DRIVER_ID				0xF2
+#define DTS_EAGLE_IOCTL_GET_CACHE_SIZE		_IOR(EAGLE_DRIVER_ID, 0, int)
+#define DTS_EAGLE_IOCTL_SET_CACHE_SIZE		_IOW(EAGLE_DRIVER_ID, 1, int)
+#define DTS_EAGLE_IOCTL_GET_PARAM		_IOR(EAGLE_DRIVER_ID, 2, void*)
+#define DTS_EAGLE_IOCTL_SET_PARAM		_IOW(EAGLE_DRIVER_ID, 3, void*)
+#define DTS_EAGLE_IOCTL_SET_CACHE_BLOCK		_IOW(EAGLE_DRIVER_ID, 4, void*)
+#define DTS_EAGLE_IOCTL_SET_ACTIVE_DEVICE	_IOW(EAGLE_DRIVER_ID, 5, void*)
+#define DTS_EAGLE_IOCTL_GET_LICENSE		_IOR(EAGLE_DRIVER_ID, 6, void*)
+#define DTS_EAGLE_IOCTL_SET_LICENSE		_IOW(EAGLE_DRIVER_ID, 7, void*)
+#define DTS_EAGLE_IOCTL_SEND_LICENSE		_IOW(EAGLE_DRIVER_ID, 8, int)
+#define DTS_EAGLE_IOCTL_SET_VOLUME_COMMANDS	_IOW(EAGLE_DRIVER_ID, 9, void*)
+#define DTS_EAGLE_FLAG_IOCTL_PRE		(1<<30)
+#define DTS_EAGLE_FLAG_IOCTL_JUSTSETCACHE	(1<<31)
+#define DTS_EAGLE_FLAG_IOCTL_GETFROMCORE       DTS_EAGLE_FLAG_IOCTL_JUSTSETCACHE
+#define DTS_EAGLE_FLAG_IOCTL_MASK		(~(DTS_EAGLE_FLAG_IOCTL_PRE | \
+					     DTS_EAGLE_FLAG_IOCTL_JUSTSETCACHE))
+#define DTS_EAGLE_FLAG_ALSA_GET			(1<<31)
+
+struct dts_eagle_param_desc {
+	uint32_t id;
+	uint32_t size;
+	int32_t offset;
+	uint32_t device;
+} __packed;
+
+#define HWDEP_FE_BASE                   3000 /*unique base for FE hw dep nodes*/
+struct snd_pcm_mmap_fd {
+	int32_t dir;
+	int32_t fd;
+	int32_t size;
+	int32_t actual_size;
+};
+
+#define SNDRV_PCM_IOCTL_MMAP_DATA_FD    _IOWR('U', 0xd2, struct snd_pcm_mmap_fd)
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/sound/lsm_params.h	2019-01-22 16:16:28.607292626 +0100
@@ -0,0 +1,200 @@
+#ifndef _UAPI_LSM_PARAMS_H__
+#define _UAPI_LSM_PARAMS_H__
+
+#define LSM_POLLING_ENABLE_SUPPORT
+#define LSM_EVENT_TIMESTAMP_MODE_SUPPORT
+
+#include <linux/types.h>
+#include <sound/asound.h>
+
+#define SNDRV_LSM_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 0)
+
+#define LSM_OUT_FORMAT_PCM (0)
+#define LSM_OUT_FORMAT_ADPCM (1 << 0)
+
+#define LSM_OUT_DATA_RAW (0)
+#define LSM_OUT_DATA_PACKED (1)
+
+#define LSM_OUT_DATA_EVENTS_DISABLED (0)
+#define LSM_OUT_DATA_EVENTS_ENABLED (1)
+
+#define LSM_OUT_TRANSFER_MODE_RT (0)
+#define LSM_OUT_TRANSFER_MODE_FTRT (1)
+
+#define LSM_ENDPOINT_DETECT_THRESHOLD (0)
+#define LSM_OPERATION_MODE (1)
+#define LSM_GAIN (2)
+#define LSM_MIN_CONFIDENCE_LEVELS (3)
+#define LSM_REG_SND_MODEL (4)
+#define LSM_DEREG_SND_MODEL (5)
+#define LSM_CUSTOM_PARAMS (6)
+#define LSM_POLLING_ENABLE (7)
+#define LSM_PARAMS_MAX (LSM_POLLING_ENABLE + 1)
+
+#define LSM_EVENT_NON_TIME_STAMP_MODE (0)
+#define LSM_EVENT_TIME_STAMP_MODE (1)
+
+enum lsm_app_id {
+	LSM_VOICE_WAKEUP_APP_ID = 1,
+	LSM_VOICE_WAKEUP_APP_ID_V2 = 2,
+};
+
+enum lsm_detection_mode {
+	LSM_MODE_KEYWORD_ONLY_DETECTION = 1,
+	LSM_MODE_USER_KEYWORD_DETECTION
+};
+
+enum lsm_vw_status {
+	LSM_VOICE_WAKEUP_STATUS_RUNNING = 1,
+	LSM_VOICE_WAKEUP_STATUS_DETECTED,
+	LSM_VOICE_WAKEUP_STATUS_END_SPEECH,
+	LSM_VOICE_WAKEUP_STATUS_REJECTED
+};
+
+/*
+ * Data for LSM_ENDPOINT_DETECT_THRESHOLD param_type
+ * @epd_begin: Begin threshold
+ * @epd_end: End threshold
+ */
+struct snd_lsm_ep_det_thres {
+	__u32 epd_begin;
+	__u32 epd_end;
+};
+
+/*
+ * Data for LSM_OPERATION_MODE param_type
+ * @mode: The detection mode to be used
+ * @detect_failure: Setting to enable failure detections.
+ */
+struct snd_lsm_detect_mode {
+	enum lsm_detection_mode mode;
+	bool detect_failure;
+};
+
+/*
+ * Data for LSM_GAIN param_type
+ * @gain: The gain to be applied on LSM
+ */
+struct snd_lsm_gain {
+	__u16 gain;
+};
+
+/*
+ * Data for LSM_POLLING_ENABLE param_type
+ * @poll_en: Polling enable or disable
+ */
+struct snd_lsm_poll_enable {
+	bool poll_en;
+};
+
+
+struct snd_lsm_sound_model_v2 {
+	__u8 __user *data;
+	__u8 *confidence_level;
+	__u32 data_size;
+	enum lsm_detection_mode detection_mode;
+	__u8 num_confidence_levels;
+	bool detect_failure;
+};
+
+struct snd_lsm_session_data {
+	enum lsm_app_id app_id;
+};
+
+struct snd_lsm_event_status {
+	__u16 status;
+	__u16 payload_size;
+	__u8 payload[0];
+};
+
+struct snd_lsm_event_status_v3 {
+	__u32 timestamp_lsw;
+	__u32 timestamp_msw;
+	__u16 status;
+	__u16 payload_size;
+	__u8 payload[0];
+};
+
+struct snd_lsm_detection_params {
+	__u8 *conf_level;
+	enum lsm_detection_mode detect_mode;
+	__u8 num_confidence_levels;
+	bool detect_failure;
+	bool poll_enable;
+};
+
+/*
+ * Param info for each parameter type
+ * @module_id: Module to which parameter is to be set
+ * @param_id: Parameter that is to be set
+ * @param_size: size (in number of bytes) for the data
+ *		in param_data.
+ *		For confidence levels, this is num_conf_levels
+ *		For REG_SND_MODEL, this is size of sound model
+ *		For CUSTOM_PARAMS, this is size of the entire blob of data
+ * @param_data: Data for the parameter.
+ *		For some param_types this is a structure defined, ex: LSM_GAIN
+ *		For CONFIDENCE_LEVELS, this is array of confidence levels
+ *		For REG_SND_MODEL, this is the sound model data
+ *		For CUSTOM_PARAMS, this is the blob of custom data.
+ */
+struct lsm_params_info {
+	__u32 module_id;
+	__u32 param_id;
+	__u32 param_size;
+	__u8 __user *param_data;
+	uint32_t param_type;
+};
+
+/*
+ * Data passed to the SET_PARAM_V2 IOCTL
+ * @num_params: Number of params that are to be set
+ *		should not be greater than LSM_PARAMS_MAX
+ * @params: Points to an array of lsm_params_info
+ *	    Each entry points to one parameter to set
+ * @data_size: size (in bytes) for params
+ *	       should be equal to
+ *	       num_params * sizeof(struct lsm_parms_info)
+ */
+struct snd_lsm_module_params {
+	__u8 __user *params;
+	__u32 num_params;
+	__u32 data_size;
+};
+
+/*
+ * Data passed to LSM_OUT_FORMAT_CFG IOCTL
+ * @format: The media format enum
+ * @packing: indicates the packing method used for data path
+ * @events: indicates whether data path events need to be enabled
+ * @transfer_mode: indicates whether FTRT mode or RT mode.
+ */
+struct snd_lsm_output_format_cfg {
+	__u8 format;
+	__u8 packing;
+	__u8 events;
+	__u8 mode;
+};
+
+#define SNDRV_LSM_DEREG_SND_MODEL _IOW('U', 0x01, int)
+#define SNDRV_LSM_EVENT_STATUS	_IOW('U', 0x02, struct snd_lsm_event_status)
+#define SNDRV_LSM_ABORT_EVENT	_IOW('U', 0x03, int)
+#define SNDRV_LSM_START		_IOW('U', 0x04, int)
+#define SNDRV_LSM_STOP		_IOW('U', 0x05, int)
+#define SNDRV_LSM_SET_SESSION_DATA _IOW('U', 0x06, struct snd_lsm_session_data)
+#define SNDRV_LSM_REG_SND_MODEL_V2 _IOW('U', 0x07,\
+					struct snd_lsm_sound_model_v2)
+#define SNDRV_LSM_LAB_CONTROL	_IOW('U', 0x08, uint32_t)
+#define SNDRV_LSM_STOP_LAB	_IO('U', 0x09)
+#define SNDRV_LSM_SET_PARAMS	_IOW('U', 0x0A, \
+					struct snd_lsm_detection_params)
+#define SNDRV_LSM_SET_MODULE_PARAMS	_IOW('U', 0x0B, \
+					struct snd_lsm_module_params)
+#define SNDRV_LSM_OUT_FORMAT_CFG _IOW('U', 0x0C, \
+				      struct snd_lsm_output_format_cfg)
+#define SNDRV_LSM_SET_PORT	_IO('U', 0x0D)
+#define SNDRV_LSM_SET_FWK_MODE_CONFIG	_IOW('U', 0x0E, uint32_t)
+#define SNDRV_LSM_EVENT_STATUS_V3	_IOW('U', 0x0F, \
+					struct snd_lsm_event_status_v3)
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/sound/msmcal-hwdep.h	2019-01-22 16:16:28.607292626 +0100
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _CALIB_HWDEP_H
+#define _CALIB_HWDEP_H
+
+#define WCD9XXX_CODEC_HWDEP_NODE    1000
+enum wcd_cal_type {
+	WCD9XXX_MIN_CAL,
+	WCD9XXX_ANC_CAL = WCD9XXX_MIN_CAL,
+	WCD9XXX_MAD_CAL,
+	WCD9XXX_MBHC_CAL,
+	WCD9XXX_VBAT_CAL,
+	WCD9XXX_MAX_CAL,
+};
+
+struct wcdcal_ioctl_buffer {
+	__u32 size;
+	__u8 __user *buffer;
+	enum wcd_cal_type cal_type;
+};
+
+#define SNDRV_CTL_IOCTL_HWDEP_CAL_TYPE \
+	_IOW('U', 0x1, struct wcdcal_ioctl_buffer)
+
+#endif /*_CALIB_HWDEP_H*/
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/sound/wcd-dsp-glink.h	2019-01-22 16:16:28.607292626 +0100
@@ -0,0 +1,60 @@
+#ifndef _WCD_DSP_GLINK_H
+#define _WCD_DSP_GLINK_H
+
+#include <linux/types.h>
+
+#define WDSP_CH_NAME_MAX_LEN 50
+
+enum {
+	WDSP_REG_PKT = 1,
+	WDSP_CMD_PKT,
+	WDSP_READY_PKT,
+};
+#define WDSP_READY_PKT WDSP_READY_PKT
+
+/*
+ * struct wdsp_reg_pkt -  Glink channel information structure format
+ * @no_of_channels:   Number of glink channels to open
+ * @payload[0]:       Dynamic array contains all the glink channels information
+ */
+struct wdsp_reg_pkt {
+	__u8 no_of_channels;
+	__u8 payload[0];
+};
+
+/*
+ * struct wdsp_cmd_pkt - WDSP command packet format
+ * @ch_name:         Name of the glink channel
+ * @payload_size:    Size of the payload
+ * @payload[0]:      Actual data payload
+ */
+struct wdsp_cmd_pkt {
+	char ch_name[WDSP_CH_NAME_MAX_LEN];
+	__u32 payload_size;
+	__u8 payload[0];
+};
+
+/*
+ * struct wdsp_write_pkt - Format that userspace send the data to driver.
+ * @pkt_type:      Type of the packet(REG or CMD PKT)
+ * @payload[0]:    Payload is either cmd or reg pkt structure based on pkt type
+ */
+struct wdsp_write_pkt {
+	__u8 pkt_type;
+	__u8 payload[0];
+};
+
+/*
+ * struct wdsp_glink_ch_cfg - Defines the glink channel configuration.
+ * @ch_name:           Name of the glink channel
+ * @latency_in_us:     Latency specified in micro seconds for QOS
+ * @no_of_intents:     Number of intents prequeued
+ * @intents_size[0]:   Dynamic array to specify size of each intent
+ */
+struct wdsp_glink_ch_cfg {
+	char name[WDSP_CH_NAME_MAX_LEN];
+	__u32 latency_in_us;
+	__u32 no_of_intents;
+	__u32 intents_size[0];
+};
+#endif /* _WCD_DSP_GLINK_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/video/msm_hdmi_hdcp_mgr.h	2019-01-22 16:16:28.607292626 +0100
@@ -0,0 +1,54 @@
+#ifndef _UAPI__HDMI_HDCP_MGR_H
+#define _UAPI__MSM_HDMI_HDCP_MGR_H
+
+enum DS_TYPE {  /* type of downstream device */
+	DS_UNKNOWN,
+	DS_RECEIVER,
+	DS_REPEATER,
+};
+
+enum {
+	MSG_ID_IDX,
+	RET_CODE_IDX,
+	HEADER_LEN,
+};
+
+enum RET_CODE {
+	HDCP_NOT_AUTHED,
+	HDCP_AUTHED,
+	HDCP_DISABLE,
+};
+
+enum MSG_ID { /* List of functions expected to be called after it */
+	DOWN_CHECK_TOPOLOGY,
+	UP_REQUEST_TOPOLOGY,
+	UP_SEND_TOPOLOGY,
+	DOWN_REQUEST_TOPOLOGY,
+	MSG_NUM,
+};
+
+enum SOURCE_ID {
+	HDCP_V1_TX,
+	HDCP_V1_RX,
+	HDCP_V2_RX,
+	HDCP_V2_TX,
+	SRC_NUM,
+};
+
+/*
+ * how to parse sysfs params buffer
+ * from hdcp_tx driver.
+ */
+
+struct HDCP_V2V1_MSG_TOPOLOGY {
+	/* indicates downstream's type */
+	uint32_t ds_type;
+	uint8_t bksv[5];
+	uint8_t dev_count;
+	uint8_t depth;
+	uint8_t ksv_list[5 * 127];
+	uint32_t max_cascade_exceeded;
+	uint32_t max_dev_exceeded;
+};
+
+#endif /* _UAPI__MSM_HDMI_HDCP_MGR_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/uapi/video/msm_hdmi_modes.h	2019-01-22 16:16:28.607292626 +0100
@@ -0,0 +1,570 @@
+#ifndef _UAPI_MSM_HDMI_MODES_H__
+#define _UAPI_MSM_HDMI_MODES_H__
+#include <linux/types.h>
+#include <linux/errno.h>
+
+#define MSM_HDMI_RGB_888_24BPP_FORMAT       BIT(0)
+#define MSM_HDMI_YUV_420_12BPP_FORMAT       BIT(1)
+
+enum aspect_ratio {
+	HDMI_RES_AR_INVALID,
+	HDMI_RES_AR_4_3,
+	HDMI_RES_AR_5_4,
+	HDMI_RES_AR_16_9,
+	HDMI_RES_AR_16_10,
+	HDMI_RES_AR_64_27,
+	HDMI_RES_AR_256_135,
+	HDMI_RES_AR_MAX,
+};
+
+enum msm_hdmi_s3d_mode {
+	HDMI_S3D_NONE,
+	HDMI_S3D_SIDE_BY_SIDE,
+	HDMI_S3D_TOP_AND_BOTTOM,
+	HDMI_S3D_FRAME_PACKING,
+	HDMI_S3D_MAX,
+};
+
+struct msm_hdmi_mode_timing_info {
+	uint32_t	video_format;
+	uint32_t	active_h;
+	uint32_t	front_porch_h;
+	uint32_t	pulse_width_h;
+	uint32_t	back_porch_h;
+	uint32_t	active_low_h;
+	uint32_t	active_v;
+	uint32_t	front_porch_v;
+	uint32_t	pulse_width_v;
+	uint32_t	back_porch_v;
+	uint32_t	active_low_v;
+	/* Must divide by 1000 to get the actual frequency in MHZ */
+	uint32_t	pixel_freq;
+	/* Must divide by 1000 to get the actual frequency in HZ */
+	uint32_t	refresh_rate;
+	uint32_t	interlaced;
+	uint32_t	supported;
+	enum aspect_ratio ar;
+	/* Flags indicating support for specific pixel formats */
+	uint32_t        pixel_formats;
+};
+
+#define MSM_HDMI_INIT_RES_PAGE          1
+
+#define MSM_HDMI_MODES_CEA		(1 << 0)
+#define MSM_HDMI_MODES_XTND		(1 << 1)
+#define MSM_HDMI_MODES_DVI		(1 << 2)
+#define MSM_HDMI_MODES_ALL		(MSM_HDMI_MODES_CEA |\
+					 MSM_HDMI_MODES_XTND |\
+					 MSM_HDMI_MODES_DVI)
+
+/* all video formats defined by CEA 861D */
+#define HDMI_VFRMT_UNKNOWN		0
+#define HDMI_VFRMT_640x480p60_4_3	1
+#define HDMI_VFRMT_720x480p60_4_3	2
+#define HDMI_VFRMT_720x480p60_16_9	3
+#define HDMI_VFRMT_1280x720p60_16_9	4
+#define HDMI_VFRMT_1920x1080i60_16_9	5
+#define HDMI_VFRMT_720x480i60_4_3	6
+#define HDMI_VFRMT_1440x480i60_4_3	HDMI_VFRMT_720x480i60_4_3
+#define HDMI_VFRMT_720x480i60_16_9	7
+#define HDMI_VFRMT_1440x480i60_16_9	HDMI_VFRMT_720x480i60_16_9
+#define HDMI_VFRMT_720x240p60_4_3	8
+#define HDMI_VFRMT_1440x240p60_4_3	HDMI_VFRMT_720x240p60_4_3
+#define HDMI_VFRMT_720x240p60_16_9	9
+#define HDMI_VFRMT_1440x240p60_16_9	HDMI_VFRMT_720x240p60_16_9
+#define HDMI_VFRMT_2880x480i60_4_3	10
+#define HDMI_VFRMT_2880x480i60_16_9	11
+#define HDMI_VFRMT_2880x240p60_4_3	12
+#define HDMI_VFRMT_2880x240p60_16_9	13
+#define HDMI_VFRMT_1440x480p60_4_3	14
+#define HDMI_VFRMT_1440x480p60_16_9	15
+#define HDMI_VFRMT_1920x1080p60_16_9	16
+#define HDMI_VFRMT_720x576p50_4_3	17
+#define HDMI_VFRMT_720x576p50_16_9	18
+#define HDMI_VFRMT_1280x720p50_16_9	19
+#define HDMI_VFRMT_1920x1080i50_16_9	20
+#define HDMI_VFRMT_720x576i50_4_3	21
+#define HDMI_VFRMT_1440x576i50_4_3	HDMI_VFRMT_720x576i50_4_3
+#define HDMI_VFRMT_720x576i50_16_9	22
+#define HDMI_VFRMT_1440x576i50_16_9	HDMI_VFRMT_720x576i50_16_9
+#define HDMI_VFRMT_720x288p50_4_3	23
+#define HDMI_VFRMT_1440x288p50_4_3	HDMI_VFRMT_720x288p50_4_3
+#define HDMI_VFRMT_720x288p50_16_9	24
+#define HDMI_VFRMT_1440x288p50_16_9	HDMI_VFRMT_720x288p50_16_9
+#define HDMI_VFRMT_2880x576i50_4_3	25
+#define HDMI_VFRMT_2880x576i50_16_9	26
+#define HDMI_VFRMT_2880x288p50_4_3	27
+#define HDMI_VFRMT_2880x288p50_16_9	28
+#define HDMI_VFRMT_1440x576p50_4_3	29
+#define HDMI_VFRMT_1440x576p50_16_9	30
+#define HDMI_VFRMT_1920x1080p50_16_9	31
+#define HDMI_VFRMT_1920x1080p24_16_9	32
+#define HDMI_VFRMT_1920x1080p25_16_9	33
+#define HDMI_VFRMT_1920x1080p30_16_9	34
+#define HDMI_VFRMT_2880x480p60_4_3	35
+#define HDMI_VFRMT_2880x480p60_16_9	36
+#define HDMI_VFRMT_2880x576p50_4_3	37
+#define HDMI_VFRMT_2880x576p50_16_9	38
+#define HDMI_VFRMT_1920x1250i50_16_9	39
+#define HDMI_VFRMT_1920x1080i100_16_9	40
+#define HDMI_VFRMT_1280x720p100_16_9	41
+#define HDMI_VFRMT_720x576p100_4_3	42
+#define HDMI_VFRMT_720x576p100_16_9	43
+#define HDMI_VFRMT_720x576i100_4_3	44
+#define HDMI_VFRMT_1440x576i100_4_3	HDMI_VFRMT_720x576i100_4_3
+#define HDMI_VFRMT_720x576i100_16_9	45
+#define HDMI_VFRMT_1440x576i100_16_9	HDMI_VFRMT_720x576i100_16_9
+#define HDMI_VFRMT_1920x1080i120_16_9	46
+#define HDMI_VFRMT_1280x720p120_16_9	47
+#define HDMI_VFRMT_720x480p120_4_3	48
+#define HDMI_VFRMT_720x480p120_16_9	49
+#define HDMI_VFRMT_720x480i120_4_3	50
+#define HDMI_VFRMT_1440x480i120_4_3	HDMI_VFRMT_720x480i120_4_3
+#define HDMI_VFRMT_720x480i120_16_9	51
+#define HDMI_VFRMT_1440x480i120_16_9	HDMI_VFRMT_720x480i120_16_9
+#define HDMI_VFRMT_720x576p200_4_3	52
+#define HDMI_VFRMT_720x576p200_16_9	53
+#define HDMI_VFRMT_720x576i200_4_3	54
+#define HDMI_VFRMT_1440x576i200_4_3	HDMI_VFRMT_720x576i200_4_3
+#define HDMI_VFRMT_720x576i200_16_9	55
+#define HDMI_VFRMT_1440x576i200_16_9	HDMI_VFRMT_720x576i200_16_9
+#define HDMI_VFRMT_720x480p240_4_3	56
+#define HDMI_VFRMT_720x480p240_16_9	57
+#define HDMI_VFRMT_720x480i240_4_3	58
+#define HDMI_VFRMT_1440x480i240_4_3	HDMI_VFRMT_720x480i240_4_3
+#define HDMI_VFRMT_720x480i240_16_9	59
+#define HDMI_VFRMT_1440x480i240_16_9	HDMI_VFRMT_720x480i240_16_9
+#define HDMI_VFRMT_1280x720p24_16_9	60
+#define HDMI_VFRMT_1280x720p25_16_9	61
+#define HDMI_VFRMT_1280x720p30_16_9	62
+#define HDMI_VFRMT_1920x1080p120_16_9	63
+#define HDMI_VFRMT_1920x1080p100_16_9	64
+#define HDMI_VFRMT_1280x720p24_64_27    65
+#define HDMI_VFRMT_1280x720p25_64_27    66
+#define HDMI_VFRMT_1280x720p30_64_27    67
+#define HDMI_VFRMT_1280x720p50_64_27    68
+#define HDMI_VFRMT_1280x720p60_64_27    69
+#define HDMI_VFRMT_1280x720p100_64_27   70
+#define HDMI_VFRMT_1280x720p120_64_27   71
+#define HDMI_VFRMT_1920x1080p24_64_27   72
+#define HDMI_VFRMT_1920x1080p25_64_27   73
+#define HDMI_VFRMT_1920x1080p30_64_27   74
+#define HDMI_VFRMT_1920x1080p50_64_27   75
+#define HDMI_VFRMT_1920x1080p60_64_27   76
+#define HDMI_VFRMT_1920x1080p100_64_27  77
+#define HDMI_VFRMT_1920x1080p120_64_27  78
+#define HDMI_VFRMT_1680x720p24_64_27    79
+#define HDMI_VFRMT_1680x720p25_64_27    80
+#define HDMI_VFRMT_1680x720p30_64_27    81
+#define HDMI_VFRMT_1680x720p50_64_27    82
+#define HDMI_VFRMT_1680x720p60_64_27    83
+#define HDMI_VFRMT_1680x720p100_64_27   84
+#define HDMI_VFRMT_1680x720p120_64_27   85
+#define HDMI_VFRMT_2560x1080p24_64_27   86
+#define HDMI_VFRMT_2560x1080p25_64_27   87
+#define HDMI_VFRMT_2560x1080p30_64_27   88
+#define HDMI_VFRMT_2560x1080p50_64_27   89
+#define HDMI_VFRMT_2560x1080p60_64_27   90
+#define HDMI_VFRMT_2560x1080p100_64_27  91
+#define HDMI_VFRMT_2560x1080p120_64_27  92
+#define HDMI_VFRMT_3840x2160p24_16_9    93
+#define HDMI_VFRMT_3840x2160p25_16_9    94
+#define HDMI_VFRMT_3840x2160p30_16_9    95
+#define HDMI_VFRMT_3840x2160p50_16_9    96
+#define HDMI_VFRMT_3840x2160p60_16_9    97
+#define HDMI_VFRMT_4096x2160p24_256_135 98
+#define HDMI_VFRMT_4096x2160p25_256_135 99
+#define HDMI_VFRMT_4096x2160p30_256_135 100
+#define HDMI_VFRMT_4096x2160p50_256_135 101
+#define HDMI_VFRMT_4096x2160p60_256_135 102
+#define HDMI_VFRMT_3840x2160p24_64_27   103
+#define HDMI_VFRMT_3840x2160p25_64_27   104
+#define HDMI_VFRMT_3840x2160p30_64_27   105
+#define HDMI_VFRMT_3840x2160p50_64_27   106
+#define HDMI_VFRMT_3840x2160p60_64_27   107
+
+/* Video Identification Codes from 107-127 are reserved for the future */
+#define HDMI_VFRMT_END			127
+
+#define EVFRMT_OFF(x)			(HDMI_VFRMT_END + x)
+
+/* extended video formats */
+#define HDMI_EVFRMT_3840x2160p30_16_9	EVFRMT_OFF(1)
+#define HDMI_EVFRMT_3840x2160p25_16_9	EVFRMT_OFF(2)
+#define HDMI_EVFRMT_3840x2160p24_16_9	EVFRMT_OFF(3)
+#define HDMI_EVFRMT_4096x2160p24_16_9	EVFRMT_OFF(4)
+#define HDMI_EVFRMT_END			HDMI_EVFRMT_4096x2160p24_16_9
+
+#define WQXGA_OFF(x)			(HDMI_EVFRMT_END + x)
+
+/* WQXGA */
+#define HDMI_VFRMT_2560x1600p60_16_9	WQXGA_OFF(1)
+#define HDMI_WQXGAFRMT_END		HDMI_VFRMT_2560x1600p60_16_9
+
+#define WXGA_OFF(x)			(HDMI_WQXGAFRMT_END + x)
+
+/* WXGA */
+#define HDMI_VFRMT_1280x800p60_16_10	WXGA_OFF(1)
+#define HDMI_VFRMT_1366x768p60_16_10	WXGA_OFF(2)
+#define HDMI_WXGAFRMT_END		HDMI_VFRMT_1366x768p60_16_10
+
+#define ETI_OFF(x)			(HDMI_WXGAFRMT_END + x)
+
+/* ESTABLISHED TIMINGS I */
+#define HDMI_VFRMT_800x600p60_4_3	ETI_OFF(1)
+#define ETI_VFRMT_END			HDMI_VFRMT_800x600p60_4_3
+
+#define ETII_OFF(x)			(ETI_VFRMT_END + x)
+
+/* ESTABLISHED TIMINGS II */
+#define HDMI_VFRMT_1024x768p60_4_3	ETII_OFF(1)
+#define HDMI_VFRMT_1280x1024p60_5_4	ETII_OFF(2)
+#define ETII_VFRMT_END			HDMI_VFRMT_1280x1024p60_5_4
+
+#define ETIII_OFF(x)			(ETII_VFRMT_END + x)
+
+/* ESTABLISHED TIMINGS III */
+#define HDMI_VFRMT_848x480p60_16_9	ETIII_OFF(1)
+#define HDMI_VFRMT_1280x960p60_4_3	ETIII_OFF(2)
+#define HDMI_VFRMT_1360x768p60_16_9	ETIII_OFF(3)
+#define HDMI_VFRMT_1440x900p60_16_10	ETIII_OFF(4)
+#define HDMI_VFRMT_1400x1050p60_4_3	ETIII_OFF(5)
+#define HDMI_VFRMT_1680x1050p60_16_10	ETIII_OFF(6)
+#define HDMI_VFRMT_1600x1200p60_4_3	ETIII_OFF(7)
+#define HDMI_VFRMT_1920x1200p60_16_10	ETIII_OFF(8)
+#define ETIII_VFRMT_END			HDMI_VFRMT_1920x1200p60_16_10
+
+#define MISC_VFRMT_OFF(x)		(ETIII_VFRMT_END + x)
+#define HDMI_VFRMT_640x480p59_4_3	MISC_VFRMT_OFF(1)
+#define MISC_VFRMT_END			HDMI_VFRMT_640x480p59_4_3
+
+#define RESERVE_OFF(x)			(MISC_VFRMT_END + x)
+
+#define HDMI_VFRMT_RESERVE1		RESERVE_OFF(1)
+#define HDMI_VFRMT_RESERVE2		RESERVE_OFF(2)
+#define HDMI_VFRMT_RESERVE3		RESERVE_OFF(3)
+#define HDMI_VFRMT_RESERVE4		RESERVE_OFF(4)
+#define HDMI_VFRMT_RESERVE5		RESERVE_OFF(5)
+#define HDMI_VFRMT_RESERVE6		RESERVE_OFF(6)
+#define HDMI_VFRMT_RESERVE7		RESERVE_OFF(7)
+#define HDMI_VFRMT_RESERVE8		RESERVE_OFF(8)
+#define RESERVE_VFRMT_END		HDMI_VFRMT_RESERVE8
+
+#define HDMI_VFRMT_MAX			(RESERVE_VFRMT_END + 1)
+
+/* Timing information for supported modes */
+#define VFRMT_NOT_SUPPORTED(VFRMT) \
+	{VFRMT, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, false,		\
+		HDMI_RES_AR_INVALID}
+
+#define HDMI_VFRMT_640x480p60_4_3_TIMING				\
+	{HDMI_VFRMT_640x480p60_4_3, 640, 16, 96, 48, true,		\
+	 480, 10, 2, 33, true, 25200, 60000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_720x480p60_4_3_TIMING				\
+	{HDMI_VFRMT_720x480p60_4_3, 720, 16, 62, 60, true,		\
+	 480, 9, 6, 30, true, 27027, 60000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_720x480p60_16_9_TIMING				\
+	{HDMI_VFRMT_720x480p60_16_9, 720, 16, 62, 60, true,		\
+	 480, 9, 6, 30, true, 27027, 60000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1280x720p60_16_9_TIMING				\
+	{HDMI_VFRMT_1280x720p60_16_9, 1280, 110, 40, 220, false,	\
+	 720, 5, 5, 20, false, 74250, 60000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1920x1080i60_16_9_TIMING				\
+	{HDMI_VFRMT_1920x1080i60_16_9, 1920, 88, 44, 148, false,	\
+	 540, 2, 5, 5, false, 74250, 60000, true, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1440x480i60_4_3_TIMING				\
+	{HDMI_VFRMT_1440x480i60_4_3, 1440, 38, 124, 114, true,		\
+	 240, 4, 3, 15, true, 27000, 60000, true, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_1440x480i60_16_9_TIMING				\
+	{HDMI_VFRMT_1440x480i60_16_9, 1440, 38, 124, 114, true,		\
+	 240, 4, 3, 15, true, 27000, 60000, true, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1920x1080p60_16_9_TIMING				\
+	{HDMI_VFRMT_1920x1080p60_16_9, 1920, 88, 44, 148, false,	\
+	 1080, 4, 5, 36, false, 148500, 60000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_720x576p50_4_3_TIMING				\
+	{HDMI_VFRMT_720x576p50_4_3, 720, 12, 64, 68, true,		\
+	 576,  5, 5, 39, true, 27000, 50000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_720x576p50_16_9_TIMING				\
+	{HDMI_VFRMT_720x576p50_16_9, 720, 12, 64, 68, true,		\
+	 576,  5, 5, 39, true, 27000, 50000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1280x720p50_16_9_TIMING				\
+	{HDMI_VFRMT_1280x720p50_16_9, 1280, 440, 40, 220, false,	\
+	 720,  5, 5, 20, false, 74250, 50000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1440x576i50_4_3_TIMING				\
+	{HDMI_VFRMT_1440x576i50_4_3, 1440, 24, 126, 138, true,		\
+	 288,  2, 3, 19, true, 27000, 50000, true, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_1440x576i50_16_9_TIMING				\
+	{HDMI_VFRMT_1440x576i50_16_9, 1440, 24, 126, 138, true,		\
+	 288,  2, 3, 19, true, 27000, 50000, true, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1920x1080p50_16_9_TIMING				\
+	{HDMI_VFRMT_1920x1080p50_16_9, 1920, 528, 44, 148, false,	\
+	 1080, 4, 5, 36, false, 148500, 50000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1920x1080p24_16_9_TIMING				\
+	{HDMI_VFRMT_1920x1080p24_16_9, 1920, 638, 44, 148, false,	\
+	 1080, 4, 5, 36, false, 74250, 24000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1920x1080p25_16_9_TIMING				\
+	{HDMI_VFRMT_1920x1080p25_16_9, 1920, 528, 44, 148, false,	\
+	 1080, 4, 5, 36, false, 74250, 25000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1920x1080p30_16_9_TIMING				\
+	{HDMI_VFRMT_1920x1080p30_16_9, 1920, 88, 44, 148, false,	\
+	 1080, 4, 5, 36, false, 74250, 30000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1024x768p60_4_3_TIMING                               \
+	{HDMI_VFRMT_1024x768p60_4_3, 1024, 24, 136, 160, false,         \
+	768, 2, 6, 29, false, 65000, 60000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_1280x1024p60_5_4_TIMING				\
+	{HDMI_VFRMT_1280x1024p60_5_4, 1280, 48, 112, 248, false,	\
+	1024, 1, 3, 38, false, 108000, 60000, false, true, HDMI_RES_AR_5_4, 0}
+#define HDMI_VFRMT_2560x1600p60_16_9_TIMING				\
+	{HDMI_VFRMT_2560x1600p60_16_9, 2560, 48, 32, 80, false,		\
+	 1600, 3, 6, 37, false, 268500, 60000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_EVFRMT_3840x2160p30_16_9_TIMING				\
+	{HDMI_EVFRMT_3840x2160p30_16_9, 3840, 176, 88, 296, false,	\
+	 2160, 8, 10, 72, false, 297000, 30000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+#define HDMI_EVFRMT_3840x2160p25_16_9_TIMING				\
+	{HDMI_EVFRMT_3840x2160p25_16_9, 3840, 1056, 88, 296, false,	\
+	 2160, 8, 10, 72, false, 297000, 25000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+#define HDMI_EVFRMT_3840x2160p24_16_9_TIMING				\
+	{HDMI_EVFRMT_3840x2160p24_16_9, 3840, 1276, 88, 296, false,	\
+	 2160, 8, 10, 72, false, 297000, 24000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+#define HDMI_EVFRMT_4096x2160p24_16_9_TIMING				\
+	{HDMI_EVFRMT_4096x2160p24_16_9, 4096, 1020, 88, 296, false,	\
+	 2160, 8, 10, 72, false, 297000, 24000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+
+#define HDMI_VFRMT_800x600p60_4_3_TIMING				\
+	{HDMI_VFRMT_800x600p60_4_3, 800, 40, 128, 88, false,	\
+	 600, 1, 4, 23, false, 40000, 60000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_848x480p60_16_9_TIMING				\
+	{HDMI_VFRMT_848x480p60_16_9, 848, 16, 112, 112, false,	\
+	 480, 6, 8, 23, false, 33750, 60000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1280x960p60_4_3_TIMING\
+	{HDMI_VFRMT_1280x960p60_4_3, 1280, 96, 112, 312, false,	\
+	 960, 1, 3, 36, false, 108000, 60000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_1360x768p60_16_9_TIMING\
+	{HDMI_VFRMT_1360x768p60_16_9, 1360, 64, 112, 256, false,	\
+	 768, 3, 6, 18, false, 85500, 60000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1440x900p60_16_10_TIMING\
+	{HDMI_VFRMT_1440x900p60_16_10, 1440, 48, 32, 80, false,	\
+	 900, 3, 6, 17, true, 88750, 60000, false, true, HDMI_RES_AR_16_10, 0}
+#define HDMI_VFRMT_1400x1050p60_4_3_TIMING\
+	{HDMI_VFRMT_1400x1050p60_4_3, 1400, 48, 32, 80, false,	\
+	 1050, 3, 4, 23, true, 101000, 60000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_1680x1050p60_16_10_TIMING\
+	{HDMI_VFRMT_1680x1050p60_16_10, 1680, 48, 32, 80, false,	\
+	 1050, 3, 6, 21, true, 119000, 60000, false, true, HDMI_RES_AR_16_10, 0}
+#define HDMI_VFRMT_1600x1200p60_4_3_TIMING\
+	{HDMI_VFRMT_1600x1200p60_4_3, 1600, 64, 192, 304, false,	\
+	 1200, 1, 3, 46, false, 162000, 60000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_1920x1200p60_16_10_TIMING\
+	{HDMI_VFRMT_1920x1200p60_16_10, 1920, 48, 32, 80, false,\
+	 1200, 3, 6, 26, true, 154000, 60000, false, true, HDMI_RES_AR_16_10, 0}
+#define HDMI_VFRMT_1366x768p60_16_10_TIMING\
+	{HDMI_VFRMT_1366x768p60_16_10, 1366, 70, 143, 213, false,\
+	 768, 3, 3, 24, false, 85500, 60000, false, true, HDMI_RES_AR_16_10, 0}
+#define HDMI_VFRMT_1280x800p60_16_10_TIMING\
+	{HDMI_VFRMT_1280x800p60_16_10, 1280, 72, 128, 200, true,\
+	 800, 3, 6, 22, false, 83500, 60000, false, true, HDMI_RES_AR_16_10, 0}
+#define HDMI_VFRMT_3840x2160p24_16_9_TIMING                             \
+	{HDMI_VFRMT_3840x2160p24_16_9, 3840, 1276, 88, 296, false,      \
+	 2160, 8, 10, 72, false, 297000, 24000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_3840x2160p25_16_9_TIMING                             \
+	{HDMI_VFRMT_3840x2160p25_16_9, 3840, 1056, 88, 296, false,      \
+	 2160, 8, 10, 72, false, 297000, 25000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_3840x2160p30_16_9_TIMING                             \
+	{HDMI_VFRMT_3840x2160p30_16_9, 3840, 176, 88, 296, false,       \
+	 2160, 8, 10, 72, false, 297000, 30000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_3840x2160p50_16_9_TIMING                             \
+	{HDMI_VFRMT_3840x2160p50_16_9, 3840, 1056, 88, 296, false,      \
+	 2160, 8, 10, 72, false, 594000, 50000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_3840x2160p60_16_9_TIMING                             \
+	{HDMI_VFRMT_3840x2160p60_16_9, 3840, 176, 88, 296, false,       \
+	 2160, 8, 10, 72, false, 594000, 60000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+
+#define HDMI_VFRMT_4096x2160p24_256_135_TIMING                          \
+	{HDMI_VFRMT_4096x2160p24_256_135, 4096, 1020, 88, 296, false,   \
+	 2160, 8, 10, 72, false, 297000, 24000, false, true, \
+		HDMI_RES_AR_256_135, 0}
+#define HDMI_VFRMT_4096x2160p25_256_135_TIMING                          \
+	{HDMI_VFRMT_4096x2160p25_256_135, 4096, 968, 88, 128, false,    \
+	 2160, 8, 10, 72, false, 297000, 25000, false, true, \
+		HDMI_RES_AR_256_135, 0}
+#define HDMI_VFRMT_4096x2160p30_256_135_TIMING                          \
+	{HDMI_VFRMT_4096x2160p30_256_135, 4096, 88, 88, 128, false,     \
+	 2160, 8, 10, 72, false, 297000, 30000, false, true, \
+		HDMI_RES_AR_256_135, 0}
+#define HDMI_VFRMT_4096x2160p50_256_135_TIMING                          \
+	{HDMI_VFRMT_4096x2160p50_256_135, 4096, 968, 88, 128, false,    \
+	 2160, 8, 10, 72, false, 594000, 50000, false, true, \
+		HDMI_RES_AR_256_135, 0}
+#define HDMI_VFRMT_4096x2160p60_256_135_TIMING                          \
+	{HDMI_VFRMT_4096x2160p60_256_135, 4096, 88, 88, 128, false,     \
+	 2160, 8, 10, 72, false, 594000, 60000, false, true, \
+		HDMI_RES_AR_256_135, 0}
+
+#define HDMI_VFRMT_3840x2160p24_64_27_TIMING                             \
+	{HDMI_VFRMT_3840x2160p24_64_27, 3840, 1276, 88, 296, false,      \
+	 2160, 8, 10, 72, false, 297000, 24000, false, true, \
+		HDMI_RES_AR_64_27, 0}
+#define HDMI_VFRMT_3840x2160p25_64_27_TIMING                             \
+	{HDMI_VFRMT_3840x2160p25_64_27, 3840, 1056, 88, 296, false,      \
+	 2160, 8, 10, 72, false, 297000, 25000, false, true, \
+		HDMI_RES_AR_64_27, 0}
+#define HDMI_VFRMT_3840x2160p30_64_27_TIMING                             \
+	{HDMI_VFRMT_3840x2160p30_64_27, 3840, 176, 88, 296, false,       \
+	 2160, 8, 10, 72, false, 297000, 30000, false, true, \
+		HDMI_RES_AR_64_27, 0}
+#define HDMI_VFRMT_3840x2160p50_64_27_TIMING                             \
+	{HDMI_VFRMT_3840x2160p50_64_27, 3840, 1056, 88, 296, false,      \
+	 2160, 8, 10, 72, false, 594000, 50000, false, true, \
+		HDMI_RES_AR_64_27, 0}
+#define HDMI_VFRMT_3840x2160p60_64_27_TIMING                             \
+	{HDMI_VFRMT_3840x2160p60_64_27, 3840, 176, 88, 296, false,       \
+	 2160, 8, 10, 72, false, 594000, 60000, false, true, \
+		HDMI_RES_AR_64_27, 0}
+#define HDMI_VFRMT_640x480p59_4_3_TIMING                             \
+	{HDMI_VFRMT_640x480p59_4_3, 640, 16, 96, 48, true,       \
+	 480, 10, 2, 33, true, 25170, 59928, false, true, \
+		HDMI_RES_AR_4_3, 1}
+
+
+#define MSM_HDMI_MODES_SET_TIMING(LUT, MODE) do {		\
+	struct msm_hdmi_mode_timing_info mode = MODE##_TIMING;	\
+	LUT[MODE] = mode;\
+	} while (0)
+
+#define MSM_HDMI_MODES_INIT_TIMINGS(__lut)	\
+do {	\
+	unsigned int i;	\
+	for (i = 0; i < HDMI_VFRMT_MAX; i++) {	\
+		struct msm_hdmi_mode_timing_info mode =	\
+			VFRMT_NOT_SUPPORTED(i);	\
+		(__lut)[i] = mode;	\
+	}	\
+} while (0)
+
+#define MSM_HDMI_MODES_SET_SUPP_TIMINGS(__lut, __type)	\
+do {	\
+	if (__type & MSM_HDMI_MODES_CEA) {	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_640x480p60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_720x480p60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_720x480p60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1280x720p60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1920x1080i60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1440x480i60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1440x480i60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1920x1080p60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_720x576p50_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_720x576p50_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1280x720p50_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1440x576i50_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1440x576i50_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1920x1080p50_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1920x1080p24_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1920x1080p25_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1920x1080p30_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p24_16_9);  \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p25_16_9);  \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p30_16_9);  \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p50_16_9);  \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p60_16_9);  \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_4096x2160p24_256_135);\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_4096x2160p25_256_135);\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_4096x2160p30_256_135);\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_4096x2160p50_256_135);\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_4096x2160p60_256_135);\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p24_64_27); \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p25_64_27); \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p30_64_27); \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p50_64_27); \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p60_64_27); \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_640x480p59_4_3); \
+	}	\
+	if (__type & MSM_HDMI_MODES_XTND) {	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_EVFRMT_3840x2160p30_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_EVFRMT_3840x2160p25_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_EVFRMT_3840x2160p24_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_EVFRMT_4096x2160p24_16_9);	\
+	}	\
+	if (__type & MSM_HDMI_MODES_DVI) {	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1024x768p60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1280x1024p60_5_4);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_2560x1600p60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_800x600p60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_848x480p60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1280x960p60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1360x768p60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1440x900p60_16_10);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1400x1050p60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1680x1050p60_16_10);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1600x1200p60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1920x1200p60_16_10);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1366x768p60_16_10);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1280x800p60_16_10);	\
+	}	\
+} while (0)
+
+#define MSM_HDMI_MODES_GET_DETAILS(mode, MODE) do {		\
+	struct msm_hdmi_mode_timing_info info = MODE##_TIMING;	\
+	*mode = info;						\
+	} while (0)
+
+#endif /* _UAPI_MSM_HDMI_MODES_H__ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/include/video/msm_dba.h	2019-01-22 16:16:28.611292662 +0100
@@ -0,0 +1,599 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_DBA_H
+#define _MSM_DBA_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#define MSM_DBA_CHIP_NAME_MAX_LEN 20
+#define MSM_DBA_CLIENT_NAME_LEN   20
+
+#define MSM_DBA_DEFER_PROPERTY_FLAG 0x1
+#define MSM_DBA_ASYNC_FLAG          0x2
+
+/**
+ * enum msm_dba_callback_event - event types for callback notification
+ * @MSM_DBA_CB_REMOTE_INT: Event associated with remote devices on an interface
+ *			   that supports a bi-directional control channel.
+ * @MSM_DBA_CB_HDCP_LINK_AUTHENTICATED: Authentication session is successful.
+ *					The link is authenticated and encryption
+ *					can be enabled if not enabled already.
+ * @MSM_DBA_CB_HDCP_LINK_UNAUTHENTICATED: A previously authenticated link has
+ *					  failed. The content on the interface
+ *					  is no longer secure.
+ * @MSM_DBA_CB_HPD_CONNECT: Detected a cable connect event.
+ * @MSM_DBA_CB_HPD_DISCONNECT: Detected a cable disconnect event.
+ * @MSM_DBA_CB_VIDEO_FAILURE: Detected a failure with respect to video data on
+ *			      the interface. This is a generic failure and
+ *			      client should request a debug dump to debug the
+ *			      issue. Client can also attempt a reset to recover
+ *			      the device.
+ * @MSM_DBA_CB_AUDIO_FAILURE: Detected a failure with respect to audio data on
+ *			      the interface. This is a generic failure and
+ *			      client should request a debug dump. Client can
+ *			      also attempt a reset to recover the device.
+ * @MSM_DBA_CB_CEC_WRITE_SUCCESS: The asynchronous CEC write request is
+ *				  successful.
+ * @MSM_DBA_CB_CEC_WRITE_FAIL: The asynchronous CEC write request failed.
+ * @MSM_DBA_CB_CEC_READ_PENDING: There is a pending CEC read message.
+ * @MSM_DBA_CB_PRE_RESET: This callback is called just before the device is
+ *			  being reset.
+ * @MSM_DBA_CB_POST_RESET: This callback is called after device reset is
+ *			   complete and the driver has applied back all the
+ *			   properties.
+ *
+ * Clients for this driver can register for receiving callbacks for specific
+ * events. This enum defines the type of events supported by the driver. An
+ * event mask is typically used to denote multiple events.
+ */
+enum msm_dba_callback_event {
+	MSM_DBA_CB_REMOTE_INT = BIT(0),
+	MSM_DBA_CB_HDCP_LINK_AUTHENTICATED = BIT(1),
+	MSM_DBA_CB_HDCP_LINK_UNAUTHENTICATED = BIT(2),
+	MSM_DBA_CB_HPD_CONNECT = BIT(3),
+	MSM_DBA_CB_HPD_DISCONNECT = BIT(4),
+	MSM_DBA_CB_VIDEO_FAILURE = BIT(5),
+	MSM_DBA_CB_AUDIO_FAILURE = BIT(6),
+	MSM_DBA_CB_CEC_WRITE_SUCCESS = BIT(7),
+	MSM_DBA_CB_CEC_WRITE_FAIL = BIT(8),
+	MSM_DBA_CB_CEC_READ_PENDING = BIT(9),
+	MSM_DBA_CB_PRE_RESET = BIT(10),
+	MSM_DBA_CB_POST_RESET = BIT(11),
+};
+
+/**
+ * enum msm_dba_audio_interface_type - audio interface type
+ * @MSM_DBA_AUDIO_I2S_INTERFACE: I2S interface for audio
+ * @MSM_DBA_AUDIO_SPDIF_INTERFACE: SPDIF interface for audio
+ */
+enum msm_dba_audio_interface_type {
+	MSM_DBA_AUDIO_I2S_INTERFACE = BIT(0),
+	MSM_DBA_AUDIO_SPDIF_INTERFACE = BIT(1),
+};
+
+/**
+ * enum msm_dba_audio_format_type - audio format type
+ * @MSM_DBA_AUDIO_FMT_UNCOMPRESSED_LPCM: uncompressed format
+ * @MSM_DBA_AUDIO_FMT_COMPRESSED: compressed formats
+ */
+enum msm_dba_audio_format_type {
+	MSM_DBA_AUDIO_FMT_UNCOMPRESSED_LPCM = BIT(0),
+	MSM_DBA_AUDIO_FMT_COMPRESSED = BIT(1),
+};
+
+/**
+ * enum msm_dba_audio_copyright_type - audio copyright
+ * @MSM_DBA_AUDIO_COPYRIGHT_PROTECTED: copy right protected
+ * @MSM_DBA_AUDIO_COPYRIGHT_NOT_PROTECTED: not copy right protected
+ */
+enum msm_dba_audio_copyright_type {
+	MSM_DBA_AUDIO_COPYRIGHT_PROTECTED = BIT(0),
+	MSM_DBA_AUDIO_COPYRIGHT_NOT_PROTECTED = BIT(1),
+};
+
+/**
+ * enum msm_dba_audio_pre_emphasis_type - pre-emphasis
+ * @MSM_DBA_AUDIO_NO_PRE_EMPHASIS: 2 audio channels w/o pre-emphasis
+ * @MSM_DBA_AUDIO_PRE_EMPHASIS_50_15us: 2 audio channels with 50/15uS
+ */
+enum msm_dba_audio_pre_emphasis_type {
+	MSM_DBA_AUDIO_NO_PRE_EMPHASIS = BIT(0),
+	MSM_DBA_AUDIO_PRE_EMPHASIS_50_15us = BIT(1),
+};
+
+/**
+ * enum msm_dba_audio_clock_accuracy - Audio Clock Accuracy
+ * @MSM_DBA_AUDIO_CLOCK_ACCURACY_LVL1: normal accuracy +/-1000 x 10^-6
+ * @MSM_DBA_AUDIO_CLOCK_ACCURACY_LVL2: high accuracy +/- 50 x 10^-6
+ * @MSM_DBA_AUDIO_CLOCK_ACCURACY_LVL3: variable pitch shifted clock
+ */
+enum msm_dba_audio_clock_accuracy {
+	MSM_DBA_AUDIO_CLOCK_ACCURACY_LVL1 = BIT(1),
+	MSM_DBA_AUDIO_CLOCK_ACCURACY_LVL2 = BIT(0),
+	MSM_DBA_AUDIO_CLOCK_ACCURACY_LVL3 = BIT(2),
+};
+
+/**
+ * enum msm_dba_channel_status_source - CS override
+ * @MSM_DBA_AUDIO_CS_SOURCE_I2S_STREAM: use channel status bits from I2S stream
+ * @MSM_DBA_AUDIO_CS_SOURCE_REGISTERS: use channel status bits from registers
+ */
+enum msm_dba_channel_status_source {
+	MSM_DBA_AUDIO_CS_SOURCE_I2S_STREAM,
+	MSM_DBA_AUDIO_CS_SOURCE_REGISTERS
+};
+
+/**
+ * enum msm_dba_audio_sampling_rates_type - audio sampling rates
+ * @MSM_DBA_AUDIO_32KHZ: 32KHz sampling rate
+ * @MSM_DBA_AUDIO_44P1KHZ: 44.1KHz sampling rate
+ * @MSM_DBA_AUDIO_48KHZ: 48KHz sampling rate
+ * @MSM_DBA_AUDIO_96KHZ: 96KHz sampling rate
+ * @MSM_DBA_AUDIO_192KHZ: 192KHz sampling rate
+ */
+enum msm_dba_audio_sampling_rates_type {
+	MSM_DBA_AUDIO_32KHZ = BIT(0),
+	MSM_DBA_AUDIO_44P1KHZ = BIT(1),
+	MSM_DBA_AUDIO_48KHZ = BIT(2),
+	MSM_DBA_AUDIO_88P2KHZ = BIT(1),
+	MSM_DBA_AUDIO_96KHZ = BIT(3),
+	MSM_DBA_AUDIO_176P4KHZ = BIT(1),
+	MSM_DBA_AUDIO_192KHZ = BIT(4),
+};
+
+/**
+ * enum msm_dba_audio_word_bit_depth - audio word size
+ * @MSM_DBA_AUDIO_WORD_16BIT: 16 bits per word
+ * @MSM_DBA_AUDIO_WORD_24BIT: 24 bits per word
+ * @MSM_DBA_AUDIO_WORD_32BIT: 32 bits per word
+ */
+enum msm_dba_audio_word_bit_depth {
+	MSM_DBA_AUDIO_WORD_16BIT = BIT(0),
+	MSM_DBA_AUDIO_WORD_24BIT = BIT(1),
+	MSM_DBA_AUDIO_WORD_32BIT = BIT(2),
+};
+
+/**
+ * enum msm_dba_audio_channel_count - audio channel count
+ * @MSM_DBA_AUDIO_CHANNEL_2: 2 channel audio
+ * @MSM_DBA_AUDIO_CHANNEL_4: 4 channel audio
+ * @MSM_DBA_AUDIO_CHANNEL_8: 8 channel audio
+ */
+enum msm_dba_audio_channel_count {
+	MSM_DBA_AUDIO_CHANNEL_2 = BIT(0),
+	MSM_DBA_AUDIO_CHANNEL_4 = BIT(1),
+	MSM_DBA_AUDIO_CHANNEL_8 = BIT(2),
+};
+
+/**
+ * enum msm_dba_audio_i2s_format - i2s audio data format
+ * @MSM_DBA_AUDIO_I2S_FMT_STANDARD: Standard format
+ * @MSM_DBA_AUDIO_I2S_FMT_RIGHT_JUSTIFIED: i2s data is right justified
+ * @MSM_DBA_AUDIO_I2S_FMT_LEFT_JUSTIFIED: i2s data is left justified
+ * @MSM_DBA_AUDIO_I2S_FMT_AES3_DIRECT: AES signal format
+ */
+enum msm_dba_audio_i2s_format {
+	MSM_DBA_AUDIO_I2S_FMT_STANDARD = 0,
+	MSM_DBA_AUDIO_I2S_FMT_RIGHT_JUSTIFIED,
+	MSM_DBA_AUDIO_I2S_FMT_LEFT_JUSTIFIED,
+	MSM_DBA_AUDIO_I2S_FMT_AES3_DIRECT,
+	MSM_DBA_AUDIO_I2S_FMT_MAX,
+};
+
+enum msm_dba_video_aspect_ratio {
+	MSM_DBA_AR_UNKNOWN = 0,
+	MSM_DBA_AR_4_3,
+	MSM_DBA_AR_5_4,
+	MSM_DBA_AR_16_9,
+	MSM_DBA_AR_16_10,
+	MSM_DBA_AR_64_27,
+	MSM_DBA_AR_256_135,
+	MSM_DBA_AR_MAX
+};
+
+enum msm_dba_audio_word_endian_type {
+	MSM_DBA_AUDIO_WORD_LITTLE_ENDIAN = 0,
+	MSM_DBA_AUDIO_WORD_BIG_ENDIAN,
+	MSM_DBA_AUDIO_WORD_ENDIAN_MAX
+};
+
+/**
+ * msm_dba_audio_op_mode - i2s audio operation mode
+ * @MSM_DBA_AUDIO_MODE_MANUAL: Manual mode
+ * @MSM_DBA_AUDIO_MODE_AUTOMATIC: Automatic mode
+ */
+enum msm_dba_audio_op_mode {
+	MSM_DBA_AUDIO_MODE_MANUAL,
+	MSM_DBA_AUDIO_MODE_AUTOMATIC,
+};
+
+/**
+ * typedef *msm_dba_cb() - Prototype for callback function
+ * @data: Pointer to user data provided with register API
+ * @event: Event type associated with callback. This can be a bitmask.
+ */
+typedef void (*msm_dba_cb)(void *data, enum msm_dba_callback_event event);
+
+/**
+ * struct msm_dba_reg_info - Client information used with register API
+ * @client_name: Name of the client for debug purposes
+ * @chip_name: Bridge chip ID
+ * @instance_id: Instance ID of the bridge chip in case of multiple instances
+ * @cb: callback function called in case of events.
+ * @cb_data: pointer to a data structure that will be returned with callback
+ *
+ * msm_dba_reg_info structure will be used to provide information during
+ * registering with driver. This structure will contain the information required
+ * to identify the specific bridge chip the client wants to use.
+ *
+ * Client should also specify the callback function which needs to be called in
+ * case of events. There is an optional data field which is a pointer that will
+ * be returned as one of arguments in the callback function. This data field can
+ * be NULL if client does not wish to use it.
+ */
+struct msm_dba_reg_info {
+	char client_name[MSM_DBA_CLIENT_NAME_LEN];
+	char chip_name[MSM_DBA_CHIP_NAME_MAX_LEN];
+	u32 instance_id;
+	msm_dba_cb cb;
+	void *cb_data;
+};
+
+/**
+ * struct msm_dba_video_caps_info - video capabilities of the bridge chip
+ * @hdcp_support: if hdcp is supported
+ * @edid_support: if reading edid from sink is supported
+ * @data_lanes_lp_support: if low power mode is supported on data lanes
+ * @clock_lanes_lp_support: If low power mode is supported on clock lanes
+ * @max_pclk_khz: maximum pixel clock supported
+ * @num_of_input_lanes: Number of input data lanes supported by the bridge chip
+ */
+struct msm_dba_video_caps_info {
+	bool hdcp_support;
+	bool edid_support;
+	bool data_lanes_lp_support;
+	bool clock_lanes_lp_support;
+	u32 max_pclk_khz;
+	u32 num_of_input_lanes;
+};
+
+/**
+ * struct msm_dba_audio_caps_info - audio capabilities of the bridge chip
+ * @audio_support: if audio is supported
+ * @audio_rates: audio sampling rates supported
+ * @audio_fmts: audio formats supported
+ */
+struct msm_dba_audio_caps_info {
+	u32 audio_support;
+	u32 audio_rates;
+	u32 audio_fmts;
+};
+
+/**
+ * struct msm_dba_capabilities - general capabilities of the bridge chip
+ * @vid_caps: video capabilities
+ * @aud_caps: audio capabilities
+ * @av_mute_support: av mute support in bridge chip
+ * @deferred_commit_support: support for deferred commit
+ */
+struct msm_dba_capabilities {
+	struct msm_dba_video_caps_info vid_caps;
+	struct msm_dba_audio_caps_info aud_caps;
+	bool av_mute_support;
+	bool deferred_commit_support;
+};
+
+/**
+ * struct msm_dba_audio_cfg - Structure for audio configuration
+ * @interface: Specifies audio interface type. Client should check the
+ *	       capabilities for the interfaces supported by the bridge.
+ * @format: Compressed vs Uncompressed formats.
+ * @channels: Number of channels.
+ * @i2s_fmt: I2S data packing format. This is valid only if interface is I2S.
+ * @sampling_rate: sampling rate of audio data
+ * @word_size: word size
+ * @word_endianness: little or big endian words
+ */
+struct msm_dba_audio_cfg {
+	enum msm_dba_audio_interface_type interface;
+	enum msm_dba_audio_format_type format;
+	enum msm_dba_audio_channel_count channels;
+	enum msm_dba_audio_i2s_format i2s_fmt;
+	enum msm_dba_audio_sampling_rates_type sampling_rate;
+	enum msm_dba_audio_word_bit_depth word_size;
+	enum msm_dba_audio_word_endian_type word_endianness;
+	enum msm_dba_audio_copyright_type copyright;
+	enum msm_dba_audio_pre_emphasis_type pre_emphasis;
+	enum msm_dba_audio_clock_accuracy clock_accuracy;
+	enum msm_dba_channel_status_source channel_status_source;
+	enum msm_dba_audio_op_mode mode;
+
+	u32 channel_status_category_code;
+	u32 channel_status_source_number;
+	u32 channel_status_v_bit;
+	u32 channel_allocation;
+	u32 channel_status_word_length;
+
+	u32 n;
+	u32 cts;
+};
+
+/**
+ * struct msm_dba_video_cfg - video configuration data
+ * @h_active: active width of the video signal
+ * @h_front_porch: horizontal front porch in pixels
+ * @h_pulse_width: pulse width of hsync in pixels
+ * @h_back_porch: horizontal back porch in pixels
+ * @h_polarity: polarity of hsync signal
+ * @v_active: active height of the video signal
+ * @v_front_porch: vertical front porch in lines
+ * @v_pulse_width: pulse width of vsync in lines
+ * @v_back_porch: vertical back porch in lines
+ * @v_polarity: polarity of vsync signal
+ * @pclk_khz: pixel clock in KHz
+ * @interlaced: if video is interlaced
+ * @vic: video indetification code
+ * @hdmi_mode: hdmi or dvi mode for the sink
+ * @ar: aspect ratio of the signal
+ * @num_of_input_lanes: number of input lanes in case of DSI/LVDS
+ */
+struct msm_dba_video_cfg {
+	u32  h_active;
+	u32  h_front_porch;
+	u32  h_pulse_width;
+	u32  h_back_porch;
+	bool h_polarity;
+	u32  v_active;
+	u32  v_front_porch;
+	u32  v_pulse_width;
+	u32  v_back_porch;
+	bool v_polarity;
+	u32  pclk_khz;
+	bool interlaced;
+	u32  vic;
+	bool hdmi_mode;
+	enum msm_dba_video_aspect_ratio ar;
+	u32  num_of_input_lanes;
+	u8 scaninfo;
+};
+
+/**
+ * struct msm_dba_ops- operation supported by bridge chip
+ * @get_caps: returns the bridge chip capabilities
+ *	      DEFER and ASYNC flags are not supported.
+ * @power_on: powers on/off the bridge chip. This usually involves turning on
+ *	      the power regulators and bringing the chip out of reset. Chip
+ *	      should be capable of raising interrupts at this point.
+ *	      DEFER and ASYNC flags are supported.
+ * @video_on: turn on/off video stream. This function also requires the video
+ *	      timing information that might be needed for programming the bridge
+ *	      chip.
+ *	      DEFER flag is supported.
+ *	      ASYNC flag is not supported.
+ * @audio_on: turn on/off audio stream.
+ *	      DEFER flag is supported.
+ *	      ASYNC flag is not supported.
+ * @configure_audio: setup audio configuration
+ *		     DEFER flag is supported.
+ *		     ASYNC flag is not supported.
+ * @av_mute: controls av mute functionalities if supported. AV mute is different
+ *	     from audio_on and video_on where in even though the actual data is
+ *	     sent, mute is specified through control packets.
+ *	     DEFER flag is supported.
+ *	     ASYNC flag is not supported.
+ * @interupts_enable: enables interrupts to get event callbacks. Clients need
+ *		      to specify an event mask of the events they are
+ *		      interested in. If a client provides an event as part of
+ *		      the mask, it will receive the interrupt regardless of the
+ *		      client modifying the property.
+ *		      DEFER flag is supported.
+ *		      ASYNC flag is not supported.
+ * @hdcp_enable: enable/disable hdcp. If HDCP is enabled, this function will
+ *		 start a new authentication session. There is a separate
+ *		 argument for enabling encryption. Encryption can be enabled any
+ *		 time after HDCP has been fully authenticated. This function
+ *		 will support an asynchronous mode where calling this function
+ *		 will kick off HDCP and return to the caller. Caller has to wait
+ *		 for MSM_DBA_CB_HDCP_SUCCESS callback to ensure link is
+ *		 authenticated.
+ *		 DEFER flag is not supported.
+ *		 ASYNC flag is supported.
+ * @hdcp_get_ksv_list_size: returns the KSV list size. In case of a simple sink
+ *			    the size will be 1. In case of a repeater, this can
+ *			    be more than one.
+ *			    DEFER and ASYNC flags are not supported.
+ * @hdcp_get_ksv_list: return the KSV list. Client can query the KSV information
+ *		       from the bridge. Client should call
+ *		       hdcp_get_ksv_list_size first and then allocate 40*size
+ *		       bytes to hold all the KSVs.
+ *		       DEFER and ASYNC flags are not supported.
+ * @hdmi_cec_on: enable or disable cec module. Clients need to enable CEC
+ *		 feature before they do read or write CEC messages.
+ * @hdmi_cec_write: perform a CEC write. For bridges with HDMI as output
+ *		    interface, this function allows clients to send a CEC
+ *		    message. Client should pack the data according to the CEC
+ *		    specification and provide the final buffer. Since CEC writes
+ *		    can take longer time to ascertaining if they are successful,
+ *		    this function supports the ASYNC flag. Driver will return
+ *		    either MSM_DBA_CB_CEC_WRITE_SUCCESS or
+ *		    MSM_DBA_CB_CEC_WRITE_FAIL callbacks.
+ *		    DEFER is not supported.
+ *		    ASYNC flag is supported.
+ * @hdmi_cec_read: get a pending CEC read message. In case of an incoming CEC
+ *		   message, driver will return MSM_DBA_CB_CEC_READ_PENDING
+ *		   callback. On getting this event callback, client should call
+ *		   hdmi_cec_read to get the message. The buffer should at least
+ *		   be 15 bytes or more. Client should read the CEC message from
+ *		   a thread different from the callback.
+ *		   DEFER and ASYNC flags are not supported.
+ * @get_edid_size: returns size of the edid.
+ *		   DEFER and ASYNC flags are not supported.
+ * @get_raw_edid: returns raw edid data.
+ *		   DEFER and ASYNC flags are not supported.
+ * @enable_remote_comm: enable/disable remote communication. Some interfaces
+ *		        like FPDLINK III support a bi-directional control
+ *		        channel that could be used to send control data using an
+ *		        I2C or SPI protocol. This Function will enable this
+ *		        control channel if supported.
+ *		        DEFER and ASYNC flags are not supported.
+ * @add_remote_device: add slaves on remote side for enabling communication. For
+ *		       interfaces that support bi directional control channel,
+ *		       this function allows clients to specify slave IDs of
+ *		       devices on remote bus. Messages addressed to these IDs
+ *		       will be trapped by the bridge chip and put on the remote
+ *		       bus.
+ *		       DEFER and ASYNC flags are not supported.
+ * @commit_deferred_props: commits deferred properties
+ *			   DEFER and ASYNC flags are not supported.
+ * @force_reset: reset the device forcefully. In case the device goes into a bad
+ *		 state, a client can force reset to try and recover the device.
+ *		 The reset will be applied in spite of different configurations
+ *		 from other clients. Driver will apply all the properties that
+ *		 have been applied so far after the reset is complete. In case
+ *		 of multiple clients, driver will issue a reset callback.
+ * @dump_debug_info: dumps debug information to dmesg.
+ * @check_hpd: Check if cable is connected or not. if cable is connected we
+ *		send notification to display framework.
+ * @set_audio_block: This function will populate the raw audio speaker block
+ *		     data along with size of each block in bridgechip buffer.
+ * @get_audio_block: This function will return the raw audio speaker block
+ *		     along with size of each block.
+ *
+ * The msm_dba_ops structure represents a set of operations that can be
+ * supported by each bridge chip. Depending on the functionality supported by a
+ * specific bridge chip, some of the operations need not be supported. For
+ * example if a bridge chip does not support reading EDID from a sink device,
+ * get_edid_size and get_raw_edid can be NULL.
+ *
+ * Deferring properties: The deferred flag allows us to address any quirks with
+ * respect to specific bridge chips. If there is a need for some properties to
+ * be committed together, turning on video and audio at the same time, the
+ * deferred flag can be used. Properties that are set using a DEFER flag will
+ * not be committed to hardware until commit_deferred_props() function is
+ * called.
+ *
+ */
+struct msm_dba_ops {
+	int (*get_caps)(void *client,
+			struct msm_dba_capabilities *caps);
+
+	int (*power_on)(void *client,
+			bool on,
+			u32 flags);
+
+	int (*video_on)(void *client,
+			bool on,
+			struct msm_dba_video_cfg *cfg,
+			u32 flags);
+
+	int (*audio_on)(void *client,
+			bool on,
+			u32 flags);
+
+	int (*configure_audio)(void *client,
+			       struct msm_dba_audio_cfg *cfg,
+			       u32 flags);
+
+	int (*av_mute)(void *client,
+		       bool video_mute,
+		       bool audio_mute,
+		       u32 flags);
+
+	int (*interrupts_enable)(void *client,
+				bool on,
+				u32 event_mask,
+				u32 flags);
+
+	int (*hdcp_enable)(void *client,
+			   bool hdcp_on,
+			   bool enc_on,
+			   u32 flags);
+
+	int (*hdcp_get_ksv_list_size)(void *client,
+				      u32 *count,
+				      u32 flags);
+
+	int (*hdcp_get_ksv_list)(void *client,
+				 u32 count,
+				 char *buf,
+				 u32 flags);
+
+	int (*hdmi_cec_on)(void *client,
+			      bool enable,
+			      u32 flags);
+
+	int (*hdmi_cec_write)(void *client,
+			      u32 size,
+			      char *buf,
+			      u32 flags);
+
+	int (*hdmi_cec_read)(void *client,
+			     u32 *size,
+			     char *buf,
+			     u32 flags);
+
+	int (*get_edid_size)(void *client,
+			     u32 *size,
+			     u32 flags);
+
+	int (*get_raw_edid)(void *client,
+			    u32 size,
+			    char *buf,
+			    u32 flags);
+
+	int (*enable_remote_comm)(void *client,
+				  bool on,
+				  u32 flags);
+
+	int (*add_remote_device)(void *client,
+				 u32 *slave_ids,
+				 u32 count,
+				 u32 flags);
+
+	int (*commit_deferred_props)(void *client,
+				    u32 flags);
+
+	int (*force_reset)(void *client, u32 flags);
+	int (*dump_debug_info)(void *client, u32 flags);
+	int (*check_hpd)(void *client, u32 flags);
+	void (*set_audio_block)(void *client, u32 size, void *buf);
+	void (*get_audio_block)(void *client, u32 size, void *buf);
+};
+
+/**
+ * msm_dba_register_client() - Allows a client to register with the driver.
+ * @info: Client information along with the bridge chip id the client wishes to
+ *	  program.
+ * @ops: Function pointers to bridge chip operations. Some function pointers can
+ *	 be NULL depending on the functionalities supported by bridge chip.
+ *
+ * The register API supports multiple clients to register for the same bridge
+ * chip. If Successful, this will return a pointer that should be used as a
+ * handle for all subsequent function calls.
+ */
+void *msm_dba_register_client(struct msm_dba_reg_info *info,
+			      struct msm_dba_ops *ops);
+
+/**
+ * msm_dba_deregister_client() - Allows client to de-register with the driver.
+ * @client: client handle returned by register API.
+ *
+ * This function will release all the resources used by a particular client. If
+ * it is the only client using the bridge chip, the bridge chip will be powered
+ * down and put into reset.
+ */
+int msm_dba_deregister_client(void *client);
+
+#endif /* _MSM_DBA_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/init/do_mounts_dm.c	2019-01-22 16:16:28.623292771 +0100
@@ -0,0 +1,470 @@
+/* do_mounts_dm.c
+ * Copyright (C) 2010 The Chromium OS Authors <chromium-os-dev@chromium.org>
+ *                    All Rights Reserved.
+ * Based on do_mounts_md.c
+ *
+ * This file is released under the GPL.
+ */
+#include <linux/async.h>
+#include <linux/ctype.h>
+#include <linux/device-mapper.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+
+#include "do_mounts.h"
+
+#define DM_MAX_DEVICES 256
+#define DM_MAX_TARGETS 256
+#define DM_MAX_NAME 32
+#define DM_MAX_UUID 129
+#define DM_NO_UUID "none"
+
+#define DM_MSG_PREFIX "init"
+
+/* Separators used for parsing the dm= argument. */
+#define DM_FIELD_SEP " "
+#define DM_LINE_SEP ","
+#define DM_ANY_SEP DM_FIELD_SEP DM_LINE_SEP
+
+/*
+ * When the device-mapper and any targets are compiled into the kernel
+ * (not a module), one or more device-mappers may be created and used
+ * as the root device at boot time with the parameters given with the
+ * boot line dm=...
+ *
+ * Multiple device-mappers can be stacked specifing the number of
+ * devices. A device can have multiple targets if the the number of
+ * targets is specified.
+ *
+ * TODO(taysom:defect 32847)
+ * In the future, the <num> field will be mandatory.
+ *
+ * <device>        ::= [<num>] <device-mapper>+
+ * <device-mapper> ::= <head> "," <target>+
+ * <head>          ::= <name> <uuid> <mode> [<num>]
+ * <target>        ::= <start> <length> <type> <options> ","
+ * <mode>          ::= "ro" | "rw"
+ * <uuid>          ::= xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx | "none"
+ * <type>          ::= "verity" | "bootcache" | ...
+ *
+ * Example:
+ * 2 vboot none ro 1,
+ *     0 1768000 bootcache
+ *       device=aa55b119-2a47-8c45-946a-5ac57765011f+1
+ *       signature=76e9be054b15884a9fa85973e9cb274c93afadb6
+ *       cache_start=1768000 max_blocks=100000 size_limit=23 max_trace=20000,
+ *   vroot none ro 1,
+ *     0 1740800 verity payload=254:0 hashtree=254:0 hashstart=1740800 alg=sha1
+ *       root_hexdigest=76e9be054b15884a9fa85973e9cb274c93afadb6
+ *       salt=5b3549d54d6c7a3837b9b81ed72e49463a64c03680c47835bef94d768e5646fe
+ *
+ * Notes:
+ *  1. uuid is a label for the device and we set it to "none".
+ *  2. The <num> field will be optional initially and assumed to be 1.
+ *     Once all the scripts that set these fields have been set, it will
+ *     be made mandatory.
+ */
+
+struct dm_setup_target {
+	sector_t begin;
+	sector_t length;
+	char *type;
+	char *params;
+	/* simple singly linked list */
+	struct dm_setup_target *next;
+};
+
+struct dm_device {
+	int minor;
+	int ro;
+	char name[DM_MAX_NAME];
+	char uuid[DM_MAX_UUID];
+	unsigned long num_targets;
+	struct dm_setup_target *target;
+	int target_count;
+	struct dm_device *next;
+};
+
+struct dm_option {
+	char *start;
+	char *next;
+	size_t len;
+	char delim;
+};
+
+static struct {
+	unsigned long num_devices;
+	char *str;
+} dm_setup_args __initdata;
+
+static __initdata int dm_early_setup;
+
+static int __init get_dm_option(struct dm_option *opt, const char *accept)
+{
+	char *str = opt->next;
+	char *endp;
+
+	if (!str)
+		return 0;
+
+	str = skip_spaces(str);
+	opt->start = str;
+	endp = strpbrk(str, accept);
+	if (!endp) {  /* act like strchrnul */
+		opt->len = strlen(str);
+		endp = str + opt->len;
+	} else {
+		opt->len = endp - str;
+	}
+	opt->delim = *endp;
+	if (*endp == 0) {
+		/* Don't advance past the nul. */
+		opt->next = endp;
+	} else {
+		opt->next = endp + 1;
+	}
+	return opt->len != 0;
+}
+
+static int __init dm_setup_cleanup(struct dm_device *devices)
+{
+	struct dm_device *dev = devices;
+
+	while (dev) {
+		struct dm_device *old_dev = dev;
+		struct dm_setup_target *target = dev->target;
+		while (target) {
+			struct dm_setup_target *old_target = target;
+			kfree(target->type);
+			kfree(target->params);
+			target = target->next;
+			kfree(old_target);
+			dev->target_count--;
+		}
+		BUG_ON(dev->target_count);
+		dev = dev->next;
+		kfree(old_dev);
+	}
+	return 0;
+}
+
+static char * __init dm_parse_device(struct dm_device *dev, char *str)
+{
+	struct dm_option opt;
+	size_t len;
+
+	/* Grab the logical name of the device to be exported to udev */
+	opt.next = str;
+	if (!get_dm_option(&opt, DM_FIELD_SEP)) {
+		DMERR("failed to parse device name");
+		goto parse_fail;
+	}
+	len = min(opt.len + 1, sizeof(dev->name));
+	strlcpy(dev->name, opt.start, len);  /* includes nul */
+
+	/* Grab the UUID value or "none" */
+	if (!get_dm_option(&opt, DM_FIELD_SEP)) {
+		DMERR("failed to parse device uuid");
+		goto parse_fail;
+	}
+	len = min(opt.len + 1, sizeof(dev->uuid));
+	strlcpy(dev->uuid, opt.start, len);
+
+	/* Determine if the table/device will be read only or read-write */
+	get_dm_option(&opt, DM_ANY_SEP);
+	if (!strncmp("ro", opt.start, opt.len)) {
+		dev->ro = 1;
+	} else if (!strncmp("rw", opt.start, opt.len)) {
+		dev->ro = 0;
+	} else {
+		DMERR("failed to parse table mode");
+		goto parse_fail;
+	}
+
+	/* Optional number field */
+	/* XXX: The <num> field will be mandatory in the next round */
+	if (opt.delim == DM_FIELD_SEP[0]) {
+		if (!get_dm_option(&opt, DM_LINE_SEP))
+			return NULL;
+		dev->num_targets = simple_strtoul(opt.start, NULL, 10);
+	} else {
+		dev->num_targets = 1;
+	}
+	if (dev->num_targets > DM_MAX_TARGETS) {
+		DMERR("too many targets %lu > %d",
+			dev->num_targets, DM_MAX_TARGETS);
+	}
+	return opt.next;
+
+parse_fail:
+	return NULL;
+}
+
+static char * __init dm_parse_targets(struct dm_device *dev, char *str)
+{
+	struct dm_option opt;
+	struct dm_setup_target **target = &dev->target;
+	unsigned long num_targets = dev->num_targets;
+	unsigned long i;
+
+	/* Targets are defined as per the table format but with a
+	 * comma as a newline separator. */
+	opt.next = str;
+	for (i = 0; i < num_targets; i++) {
+		*target = kzalloc(sizeof(struct dm_setup_target), GFP_KERNEL);
+		if (!*target) {
+			DMERR("failed to allocate memory for target %s<%ld>",
+				dev->name, i);
+			goto parse_fail;
+		}
+		dev->target_count++;
+
+		if (!get_dm_option(&opt, DM_FIELD_SEP)) {
+			DMERR("failed to parse starting sector"
+				" for target %s<%ld>", dev->name, i);
+			goto parse_fail;
+		}
+		(*target)->begin = simple_strtoull(opt.start, NULL, 10);
+
+		if (!get_dm_option(&opt, DM_FIELD_SEP)) {
+			DMERR("failed to parse length for target %s<%ld>",
+				dev->name, i);
+			goto parse_fail;
+		}
+		(*target)->length = simple_strtoull(opt.start, NULL, 10);
+
+		if (get_dm_option(&opt, DM_FIELD_SEP))
+			(*target)->type = kstrndup(opt.start, opt.len,
+							GFP_KERNEL);
+		if (!((*target)->type)) {
+			DMERR("failed to parse type for target %s<%ld>",
+				dev->name, i);
+			goto parse_fail;
+		}
+		if (get_dm_option(&opt, DM_LINE_SEP))
+			(*target)->params = kstrndup(opt.start, opt.len,
+							GFP_KERNEL);
+		if (!((*target)->params)) {
+			DMERR("failed to parse params for target %s<%ld>",
+				dev->name, i);
+			goto parse_fail;
+		}
+		target = &((*target)->next);
+	}
+	DMDEBUG("parsed %d targets", dev->target_count);
+
+	return opt.next;
+
+parse_fail:
+	return NULL;
+}
+
+static struct dm_device * __init dm_parse_args(void)
+{
+	struct dm_device *devices = NULL;
+	struct dm_device **tail = &devices;
+	struct dm_device *dev;
+	char *str = dm_setup_args.str;
+	unsigned long num_devices = dm_setup_args.num_devices;
+	unsigned long i;
+
+	if (!str)
+		return NULL;
+	for (i = 0; i < num_devices; i++) {
+		dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+		if (!dev) {
+			DMERR("failed to allocated memory for dev");
+			goto error;
+		}
+		*tail = dev;
+		tail = &dev->next;
+		/*
+		 * devices are given minor numbers 0 - n-1
+		 * in the order they are found in the arg
+		 * string.
+		 */
+		dev->minor = i;
+		str = dm_parse_device(dev, str);
+		if (!str)	/* NULL indicates error in parsing, bail */
+			goto error;
+
+		str = dm_parse_targets(dev, str);
+		if (!str)
+			goto error;
+	}
+	return devices;
+error:
+	dm_setup_cleanup(devices);
+	return NULL;
+}
+
+/*
+ * Parse the command-line parameters given our kernel, but do not
+ * actually try to invoke the DM device now; that is handled by
+ * dm_setup_drives after the low-level disk drivers have initialised.
+ * dm format is described at the top of the file.
+ *
+ * Because dm minor numbers are assigned in assending order starting with 0,
+ * You can assume the first device is /dev/dm-0, the next device is /dev/dm-1,
+ * and so forth.
+ */
+static int __init dm_setup(char *str)
+{
+	struct dm_option opt;
+	unsigned long num_devices;
+
+	if (!str) {
+		DMDEBUG("str is NULL");
+		goto parse_fail;
+	}
+	opt.next = str;
+	if (!get_dm_option(&opt, DM_FIELD_SEP))
+		goto parse_fail;
+	if (isdigit(opt.start[0])) {	/* XXX: Optional number field */
+		num_devices = simple_strtoul(opt.start, NULL, 10);
+		str = opt.next;
+	} else {
+		num_devices = 1;
+		/* Don't advance str */
+	}
+	if (num_devices > DM_MAX_DEVICES) {
+		DMDEBUG("too many devices %lu > %d",
+			num_devices, DM_MAX_DEVICES);
+	}
+	dm_setup_args.str = str;
+	dm_setup_args.num_devices = num_devices;
+	DMINFO("will configure %lu devices", num_devices);
+	dm_early_setup = 1;
+	return 1;
+
+parse_fail:
+	DMWARN("Invalid arguments supplied to dm=.");
+	return 0;
+}
+
+static void __init dm_setup_drives(void)
+{
+	struct mapped_device *md = NULL;
+	struct dm_table *table = NULL;
+	struct dm_setup_target *target;
+	struct dm_device *dev;
+	char *uuid;
+	fmode_t fmode = FMODE_READ;
+	struct dm_device *devices;
+
+	devices = dm_parse_args();
+
+	for (dev = devices; dev; dev = dev->next) {
+		if (dm_create(dev->minor, &md)) {
+			DMDEBUG("failed to create the device");
+			goto dm_create_fail;
+		}
+		DMDEBUG("created device '%s'", dm_device_name(md));
+
+		/*
+		 * In addition to flagging the table below, the disk must be
+		 * set explicitly ro/rw.
+		 */
+		set_disk_ro(dm_disk(md), dev->ro);
+
+		if (!dev->ro)
+			fmode |= FMODE_WRITE;
+		if (dm_table_create(&table, fmode, dev->target_count, md)) {
+			DMDEBUG("failed to create the table");
+			goto dm_table_create_fail;
+		}
+
+		dm_lock_md_type(md);
+
+		for (target = dev->target; target; target = target->next) {
+			DMINFO("adding target '%llu %llu %s %s'",
+			       (unsigned long long) target->begin,
+			       (unsigned long long) target->length,
+			       target->type, target->params);
+			if (dm_table_add_target(table, target->type,
+						target->begin,
+						target->length,
+						target->params)) {
+				DMDEBUG("failed to add the target"
+					" to the table");
+				goto add_target_fail;
+			}
+		}
+		if (dm_table_complete(table)) {
+			DMDEBUG("failed to complete the table");
+			goto table_complete_fail;
+		}
+
+		/* Suspend the device so that we can bind it to the table. */
+		if (dm_suspend(md, 0)) {
+			DMDEBUG("failed to suspend the device pre-bind");
+			goto suspend_fail;
+		}
+
+		/* Initial table load: acquire type of table. */
+		dm_set_md_type(md, dm_table_get_type(table));
+
+		/* Setup md->queue to reflect md's type. */
+		if (dm_setup_md_queue(md)) {
+			DMWARN("unable to set up device queue for new table.");
+			goto setup_md_queue_fail;
+		}
+
+		/*
+		 * Bind the table to the device. This is the only way
+		 * to associate md->map with the table and set the disk
+		 * capacity directly.
+		 */
+		if (dm_swap_table(md, table)) {  /* should return NULL. */
+			DMDEBUG("failed to bind the device to the table");
+			goto table_bind_fail;
+		}
+
+		/* Finally, resume and the device should be ready. */
+		if (dm_resume(md)) {
+			DMDEBUG("failed to resume the device");
+			goto resume_fail;
+		}
+
+		/* Export the dm device via the ioctl interface */
+		if (!strcmp(DM_NO_UUID, dev->uuid))
+			uuid = NULL;
+		if (dm_ioctl_export(md, dev->name, uuid)) {
+			DMDEBUG("failed to export device with given"
+				" name and uuid");
+			goto export_fail;
+		}
+
+		dm_unlock_md_type(md);
+
+		DMINFO("dm-%d is ready", dev->minor);
+	}
+	dm_setup_cleanup(devices);
+	return;
+
+export_fail:
+resume_fail:
+table_bind_fail:
+setup_md_queue_fail:
+suspend_fail:
+table_complete_fail:
+add_target_fail:
+	dm_unlock_md_type(md);
+dm_table_create_fail:
+	dm_put(md);
+dm_create_fail:
+	DMWARN("starting dm-%d (%s) failed",
+	       dev->minor, dev->name);
+	dm_setup_cleanup(devices);
+}
+
+__setup("dm=", dm_setup);
+
+void __init dm_run_setup(void)
+{
+	if (!dm_early_setup)
+		return;
+	DMINFO("attempting early device configuration.");
+	dm_setup_drives();
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/kernel/power/wakeup_reason.c	2019-01-22 16:16:28.679293278 +0100
@@ -0,0 +1,225 @@
+/*
+ * kernel/power/wakeup_reason.c
+ *
+ * Logs the reasons which caused the kernel to resume from
+ * the suspend mode.
+ *
+ * Copyright (C) 2014 Google, Inc.
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/wakeup_reason.h>
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/notifier.h>
+#include <linux/suspend.h>
+
+
+#define MAX_WAKEUP_REASON_IRQS 32
+static int irq_list[MAX_WAKEUP_REASON_IRQS];
+static int irqcount;
+static bool suspend_abort;
+static char abort_reason[MAX_SUSPEND_ABORT_LEN];
+static struct kobject *wakeup_reason;
+static DEFINE_SPINLOCK(resume_reason_lock);
+
+static ktime_t last_monotime; /* monotonic time before last suspend */
+static ktime_t curr_monotime; /* monotonic time after last suspend */
+static ktime_t last_stime; /* monotonic boottime offset before last suspend */
+static ktime_t curr_stime; /* monotonic boottime offset after last suspend */
+
+static ssize_t last_resume_reason_show(struct kobject *kobj, struct kobj_attribute *attr,
+		char *buf)
+{
+	int irq_no, buf_offset = 0;
+	struct irq_desc *desc;
+	spin_lock(&resume_reason_lock);
+	if (suspend_abort) {
+		buf_offset = sprintf(buf, "Abort: %s", abort_reason);
+	} else {
+		for (irq_no = 0; irq_no < irqcount; irq_no++) {
+			desc = irq_to_desc(irq_list[irq_no]);
+			if (desc && desc->action && desc->action->name)
+				buf_offset += sprintf(buf + buf_offset, "%d %s\n",
+						irq_list[irq_no], desc->action->name);
+			else
+				buf_offset += sprintf(buf + buf_offset, "%d\n",
+						irq_list[irq_no]);
+		}
+	}
+	spin_unlock(&resume_reason_lock);
+	return buf_offset;
+}
+
+static ssize_t last_suspend_time_show(struct kobject *kobj,
+			struct kobj_attribute *attr, char *buf)
+{
+	struct timespec sleep_time;
+	struct timespec total_time;
+	struct timespec suspend_resume_time;
+
+	/*
+	 * total_time is calculated from monotonic bootoffsets because
+	 * unlike CLOCK_MONOTONIC it include the time spent in suspend state.
+	 */
+	total_time = ktime_to_timespec(ktime_sub(curr_stime, last_stime));
+
+	/*
+	 * suspend_resume_time is calculated as monotonic (CLOCK_MONOTONIC)
+	 * time interval before entering suspend and post suspend.
+	 */
+	suspend_resume_time = ktime_to_timespec(ktime_sub(curr_monotime, last_monotime));
+
+	/* sleep_time = total_time - suspend_resume_time */
+	sleep_time = timespec_sub(total_time, suspend_resume_time);
+
+	/* Export suspend_resume_time and sleep_time in pair here. */
+	return sprintf(buf, "%lu.%09lu %lu.%09lu\n",
+				suspend_resume_time.tv_sec, suspend_resume_time.tv_nsec,
+				sleep_time.tv_sec, sleep_time.tv_nsec);
+}
+
+static struct kobj_attribute resume_reason = __ATTR_RO(last_resume_reason);
+static struct kobj_attribute suspend_time = __ATTR_RO(last_suspend_time);
+
+static struct attribute *attrs[] = {
+	&resume_reason.attr,
+	&suspend_time.attr,
+	NULL,
+};
+static struct attribute_group attr_group = {
+	.attrs = attrs,
+};
+
+/*
+ * logs all the wake up reasons to the kernel
+ * stores the irqs to expose them to the userspace via sysfs
+ */
+void log_wakeup_reason(int irq)
+{
+	struct irq_desc *desc;
+	desc = irq_to_desc(irq);
+	if (desc && desc->action && desc->action->name)
+		printk(KERN_INFO "Resume caused by IRQ %d, %s\n", irq,
+				desc->action->name);
+	else
+		printk(KERN_INFO "Resume caused by IRQ %d\n", irq);
+
+	spin_lock(&resume_reason_lock);
+	if (irqcount == MAX_WAKEUP_REASON_IRQS) {
+		spin_unlock(&resume_reason_lock);
+		printk(KERN_WARNING "Resume caused by more than %d IRQs\n",
+				MAX_WAKEUP_REASON_IRQS);
+		return;
+	}
+
+	irq_list[irqcount++] = irq;
+	spin_unlock(&resume_reason_lock);
+}
+
+int check_wakeup_reason(int irq)
+{
+	int irq_no;
+	int ret = false;
+
+	spin_lock(&resume_reason_lock);
+	for (irq_no = 0; irq_no < irqcount; irq_no++)
+		if (irq_list[irq_no] == irq) {
+			ret = true;
+			break;
+	}
+	spin_unlock(&resume_reason_lock);
+	return ret;
+}
+
+void log_suspend_abort_reason(const char *fmt, ...)
+{
+	va_list args;
+
+	spin_lock(&resume_reason_lock);
+
+	//Suspend abort reason has already been logged.
+	if (suspend_abort) {
+		spin_unlock(&resume_reason_lock);
+		return;
+	}
+
+	suspend_abort = true;
+	va_start(args, fmt);
+	vsnprintf(abort_reason, MAX_SUSPEND_ABORT_LEN, fmt, args);
+	va_end(args);
+	spin_unlock(&resume_reason_lock);
+}
+
+/* Detects a suspend and clears all the previous wake up reasons*/
+static int wakeup_reason_pm_event(struct notifier_block *notifier,
+		unsigned long pm_event, void *unused)
+{
+	switch (pm_event) {
+	case PM_SUSPEND_PREPARE:
+		spin_lock(&resume_reason_lock);
+		irqcount = 0;
+		suspend_abort = false;
+		spin_unlock(&resume_reason_lock);
+		/* monotonic time since boot */
+		last_monotime = ktime_get();
+		/* monotonic time since boot including the time spent in suspend */
+		last_stime = ktime_get_boottime();
+		break;
+	case PM_POST_SUSPEND:
+		/* monotonic time since boot */
+		curr_monotime = ktime_get();
+		/* monotonic time since boot including the time spent in suspend */
+		curr_stime = ktime_get_boottime();
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block wakeup_reason_pm_notifier_block = {
+	.notifier_call = wakeup_reason_pm_event,
+};
+
+/* Initializes the sysfs parameter
+ * registers the pm_event notifier
+ */
+int __init wakeup_reason_init(void)
+{
+	int retval;
+
+	retval = register_pm_notifier(&wakeup_reason_pm_notifier_block);
+	if (retval)
+		printk(KERN_WARNING "[%s] failed to register PM notifier %d\n",
+				__func__, retval);
+
+	wakeup_reason = kobject_create_and_add("wakeup_reasons", kernel_kobj);
+	if (!wakeup_reason) {
+		printk(KERN_WARNING "[%s] failed to create a sysfs kobject\n",
+				__func__);
+		return 1;
+	}
+	retval = sysfs_create_group(wakeup_reason, &attr_group);
+	if (retval) {
+		kobject_put(wakeup_reason);
+		printk(KERN_WARNING "[%s] failed to create a sysfs group %d\n",
+				__func__, retval);
+	}
+	return 0;
+}
+
+late_initcall(wakeup_reason_init);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/kernel/sched/boost.c	2019-01-22 16:16:28.691293387 +0100
@@ -0,0 +1,217 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "sched.h"
+#include <linux/of.h>
+#include <linux/sched/core_ctl.h>
+#include <trace/events/sched.h>
+
+/*
+ * Scheduler boost is a mechanism to temporarily place tasks on CPUs
+ * with higher capacity than those where a task would have normally
+ * ended up with their load characteristics. Any entity enabling
+ * boost is responsible for disabling it as well.
+ */
+
+unsigned int sysctl_sched_boost;
+static enum sched_boost_policy boost_policy;
+static enum sched_boost_policy boost_policy_dt = SCHED_BOOST_NONE;
+static DEFINE_MUTEX(boost_mutex);
+static unsigned int freq_aggr_threshold_backup;
+
+static inline void boost_kick(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	if (!test_and_set_bit(BOOST_KICK, &rq->hmp_flags))
+		smp_send_reschedule(cpu);
+}
+
+static void boost_kick_cpus(void)
+{
+	int i;
+	struct cpumask kick_mask;
+
+	if (boost_policy != SCHED_BOOST_ON_BIG)
+		return;
+
+	cpumask_andnot(&kick_mask, cpu_online_mask, cpu_isolated_mask);
+
+	for_each_cpu(i, &kick_mask) {
+		if (cpu_capacity(i) != max_capacity)
+			boost_kick(i);
+	}
+}
+
+int got_boost_kick(void)
+{
+	int cpu = smp_processor_id();
+	struct rq *rq = cpu_rq(cpu);
+
+	return test_bit(BOOST_KICK, &rq->hmp_flags);
+}
+
+void clear_boost_kick(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	clear_bit(BOOST_KICK, &rq->hmp_flags);
+}
+
+/*
+ * Scheduler boost type and boost policy might at first seem unrelated,
+ * however, there exists a connection between them that will allow us
+ * to use them interchangeably during placement decisions. We'll explain
+ * the connection here in one possible way so that the implications are
+ * clear when looking at placement policies.
+ *
+ * When policy = SCHED_BOOST_NONE, type is either none or RESTRAINED
+ * When policy = SCHED_BOOST_ON_ALL or SCHED_BOOST_ON_BIG, type can
+ * neither be none nor RESTRAINED.
+ */
+static void set_boost_policy(int type)
+{
+	if (type == SCHED_BOOST_NONE || type == RESTRAINED_BOOST) {
+		boost_policy = SCHED_BOOST_NONE;
+		return;
+	}
+
+	if (boost_policy_dt) {
+		boost_policy = boost_policy_dt;
+		return;
+	}
+
+	if (min_possible_efficiency != max_possible_efficiency) {
+		boost_policy = SCHED_BOOST_ON_BIG;
+		return;
+	}
+
+	boost_policy = SCHED_BOOST_ON_ALL;
+}
+
+enum sched_boost_policy sched_boost_policy(void)
+{
+	return boost_policy;
+}
+
+static bool verify_boost_params(int old_val, int new_val)
+{
+	/*
+	 * Boost can only be turned on or off. There is no possiblity of
+	 * switching from one boost type to another or to set the same
+	 * kind of boost several times.
+	 */
+	return !(!!old_val == !!new_val);
+}
+
+static void _sched_set_boost(int old_val, int type)
+{
+	switch (type) {
+	case NO_BOOST:
+		if (old_val == FULL_THROTTLE_BOOST)
+			core_ctl_set_boost(false);
+		else if (old_val == CONSERVATIVE_BOOST)
+			restore_cgroup_boost_settings();
+		else
+			update_freq_aggregate_threshold(
+				freq_aggr_threshold_backup);
+		break;
+
+	case FULL_THROTTLE_BOOST:
+		core_ctl_set_boost(true);
+		boost_kick_cpus();
+		break;
+
+	case CONSERVATIVE_BOOST:
+		update_cgroup_boost_settings();
+		boost_kick_cpus();
+		break;
+
+	case RESTRAINED_BOOST:
+		freq_aggr_threshold_backup =
+			update_freq_aggregate_threshold(1);
+		break;
+
+	default:
+		WARN_ON(1);
+		return;
+	}
+
+	set_boost_policy(type);
+	sysctl_sched_boost = type;
+	trace_sched_set_boost(type);
+}
+
+void sched_boost_parse_dt(void)
+{
+	struct device_node *sn;
+	const char *boost_policy;
+
+	sn = of_find_node_by_path("/sched-hmp");
+	if (!sn)
+		return;
+
+	if (!of_property_read_string(sn, "boost-policy", &boost_policy)) {
+		if (!strcmp(boost_policy, "boost-on-big"))
+			boost_policy_dt = SCHED_BOOST_ON_BIG;
+		else if (!strcmp(boost_policy, "boost-on-all"))
+			boost_policy_dt = SCHED_BOOST_ON_ALL;
+	}
+}
+
+int sched_set_boost(int type)
+{
+	int ret = 0;
+
+	mutex_lock(&boost_mutex);
+
+	if (verify_boost_params(sysctl_sched_boost, type))
+		_sched_set_boost(sysctl_sched_boost, type);
+	else
+		ret = -EINVAL;
+
+	mutex_unlock(&boost_mutex);
+	return ret;
+}
+
+int sched_boost_handler(struct ctl_table *table, int write,
+		void __user *buffer, size_t *lenp,
+		loff_t *ppos)
+{
+	int ret;
+	unsigned int *data = (unsigned int *)table->data;
+	unsigned int old_val;
+
+	mutex_lock(&boost_mutex);
+
+	old_val = *data;
+	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+	if (ret || !write)
+		goto done;
+
+	if (verify_boost_params(old_val, *data)) {
+		_sched_set_boost(old_val, *data);
+	} else {
+		*data = old_val;
+		ret = -EINVAL;
+	}
+
+done:
+	mutex_unlock(&boost_mutex);
+	return ret;
+}
+
+int sched_boost(void)
+{
+	return sysctl_sched_boost;
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/kernel/sched/core_ctl.c	2019-01-22 16:16:28.695293423 +0100
@@ -0,0 +1,1169 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"core_ctl: " fmt
+
+#include <linux/init.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/cpufreq.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/sched/rt.h>
+
+#include <trace/events/sched.h>
+
+#define MAX_CPUS_PER_CLUSTER 4
+#define MAX_CLUSTERS 2
+
+struct cluster_data {
+	bool inited;
+	unsigned int min_cpus;
+	unsigned int max_cpus;
+	unsigned int offline_delay_ms;
+	unsigned int busy_up_thres[MAX_CPUS_PER_CLUSTER];
+	unsigned int busy_down_thres[MAX_CPUS_PER_CLUSTER];
+	unsigned int active_cpus;
+	unsigned int num_cpus;
+	unsigned int nr_isolated_cpus;
+	cpumask_t cpu_mask;
+	unsigned int need_cpus;
+	unsigned int task_thres;
+	unsigned int max_nr;
+	s64 need_ts;
+	struct list_head lru;
+	bool pending;
+	spinlock_t pending_lock;
+	bool is_big_cluster;
+	bool enable;
+	int nrrun;
+	bool nrrun_changed;
+	struct task_struct *core_ctl_thread;
+	unsigned int first_cpu;
+	unsigned int boost;
+	struct kobject kobj;
+};
+
+struct cpu_data {
+	bool is_busy;
+	unsigned int busy;
+	unsigned int cpu;
+	bool not_preferred;
+	struct cluster_data *cluster;
+	struct list_head sib;
+	bool isolated_by_us;
+	unsigned int max_nr;
+};
+
+static DEFINE_PER_CPU(struct cpu_data, cpu_state);
+static struct cluster_data cluster_state[MAX_CLUSTERS];
+static unsigned int num_clusters;
+
+#define for_each_cluster(cluster, idx) \
+	for ((cluster) = &cluster_state[idx]; (idx) < num_clusters;\
+		(idx)++, (cluster) = &cluster_state[idx])
+
+static DEFINE_SPINLOCK(state_lock);
+static void apply_need(struct cluster_data *state);
+static void wake_up_core_ctl_thread(struct cluster_data *state);
+static bool initialized;
+
+static unsigned int get_active_cpu_count(const struct cluster_data *cluster);
+
+/* ========================= sysfs interface =========================== */
+
+static ssize_t store_min_cpus(struct cluster_data *state,
+				const char *buf, size_t count)
+{
+	unsigned int val;
+
+	if (sscanf(buf, "%u\n", &val) != 1)
+		return -EINVAL;
+
+	state->min_cpus = min(val, state->max_cpus);
+	wake_up_core_ctl_thread(state);
+
+	return count;
+}
+
+static ssize_t show_min_cpus(const struct cluster_data *state, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", state->min_cpus);
+}
+
+static ssize_t store_max_cpus(struct cluster_data *state,
+				const char *buf, size_t count)
+{
+	unsigned int val;
+
+	if (sscanf(buf, "%u\n", &val) != 1)
+		return -EINVAL;
+
+	val = min(val, state->num_cpus);
+	state->max_cpus = val;
+	state->min_cpus = min(state->min_cpus, state->max_cpus);
+	wake_up_core_ctl_thread(state);
+
+	return count;
+}
+
+static ssize_t show_max_cpus(const struct cluster_data *state, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", state->max_cpus);
+}
+
+static ssize_t store_offline_delay_ms(struct cluster_data *state,
+					const char *buf, size_t count)
+{
+	unsigned int val;
+
+	if (sscanf(buf, "%u\n", &val) != 1)
+		return -EINVAL;
+
+	state->offline_delay_ms = val;
+	apply_need(state);
+
+	return count;
+}
+
+static ssize_t show_task_thres(const struct cluster_data *state, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", state->task_thres);
+}
+
+static ssize_t store_task_thres(struct cluster_data *state,
+				const char *buf, size_t count)
+{
+	unsigned int val;
+
+	if (sscanf(buf, "%u\n", &val) != 1)
+		return -EINVAL;
+
+	if (val < state->num_cpus)
+		return -EINVAL;
+
+	state->task_thres = val;
+	apply_need(state);
+
+	return count;
+}
+
+static ssize_t show_offline_delay_ms(const struct cluster_data *state,
+				     char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", state->offline_delay_ms);
+}
+
+static ssize_t store_busy_up_thres(struct cluster_data *state,
+					const char *buf, size_t count)
+{
+	unsigned int val[MAX_CPUS_PER_CLUSTER];
+	int ret, i;
+
+	ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]);
+	if (ret != 1 && ret != state->num_cpus)
+		return -EINVAL;
+
+	if (ret == 1) {
+		for (i = 0; i < state->num_cpus; i++)
+			state->busy_up_thres[i] = val[0];
+	} else {
+		for (i = 0; i < state->num_cpus; i++)
+			state->busy_up_thres[i] = val[i];
+	}
+	apply_need(state);
+	return count;
+}
+
+static ssize_t show_busy_up_thres(const struct cluster_data *state, char *buf)
+{
+	int i, count = 0;
+
+	for (i = 0; i < state->num_cpus; i++)
+		count += snprintf(buf + count, PAGE_SIZE - count, "%u ",
+				  state->busy_up_thres[i]);
+
+	count += snprintf(buf + count, PAGE_SIZE - count, "\n");
+	return count;
+}
+
+static ssize_t store_busy_down_thres(struct cluster_data *state,
+					const char *buf, size_t count)
+{
+	unsigned int val[MAX_CPUS_PER_CLUSTER];
+	int ret, i;
+
+	ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]);
+	if (ret != 1 && ret != state->num_cpus)
+		return -EINVAL;
+
+	if (ret == 1) {
+		for (i = 0; i < state->num_cpus; i++)
+			state->busy_down_thres[i] = val[0];
+	} else {
+		for (i = 0; i < state->num_cpus; i++)
+			state->busy_down_thres[i] = val[i];
+	}
+	apply_need(state);
+	return count;
+}
+
+static ssize_t show_busy_down_thres(const struct cluster_data *state, char *buf)
+{
+	int i, count = 0;
+
+	for (i = 0; i < state->num_cpus; i++)
+		count += snprintf(buf + count, PAGE_SIZE - count, "%u ",
+				  state->busy_down_thres[i]);
+
+	count += snprintf(buf + count, PAGE_SIZE - count, "\n");
+	return count;
+}
+
+static ssize_t store_is_big_cluster(struct cluster_data *state,
+				const char *buf, size_t count)
+{
+	unsigned int val;
+
+	if (sscanf(buf, "%u\n", &val) != 1)
+		return -EINVAL;
+
+	state->is_big_cluster = val ? 1 : 0;
+	return count;
+}
+
+static ssize_t show_is_big_cluster(const struct cluster_data *state, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", state->is_big_cluster);
+}
+
+static ssize_t store_enable(struct cluster_data *state,
+				const char *buf, size_t count)
+{
+	unsigned int val;
+	bool bval;
+
+	if (sscanf(buf, "%u\n", &val) != 1)
+		return -EINVAL;
+
+	bval = !!val;
+	if (bval != state->enable) {
+		state->enable = bval;
+		apply_need(state);
+	}
+
+	return count;
+}
+
+static ssize_t show_enable(const struct cluster_data *state, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%u\n", state->enable);
+}
+
+static ssize_t show_need_cpus(const struct cluster_data *state, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", state->need_cpus);
+}
+
+static ssize_t show_active_cpus(const struct cluster_data *state, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", state->active_cpus);
+}
+
+static ssize_t show_global_state(const struct cluster_data *state, char *buf)
+{
+	struct cpu_data *c;
+	struct cluster_data *cluster;
+	ssize_t count = 0;
+	unsigned int cpu;
+
+	spin_lock_irq(&state_lock);
+	for_each_possible_cpu(cpu) {
+		c = &per_cpu(cpu_state, cpu);
+		cluster = c->cluster;
+		if (!cluster || !cluster->inited)
+			continue;
+
+		count += snprintf(buf + count, PAGE_SIZE - count,
+					"CPU%u\n", cpu);
+		count += snprintf(buf + count, PAGE_SIZE - count,
+					"\tCPU: %u\n", c->cpu);
+		count += snprintf(buf + count, PAGE_SIZE - count,
+					"\tOnline: %u\n",
+					cpu_online(c->cpu));
+		count += snprintf(buf + count, PAGE_SIZE - count,
+					"\tIsolated: %u\n",
+					cpu_isolated(c->cpu));
+		count += snprintf(buf + count, PAGE_SIZE - count,
+					"\tFirst CPU: %u\n",
+						cluster->first_cpu);
+		count += snprintf(buf + count, PAGE_SIZE - count,
+					"\tBusy%%: %u\n", c->busy);
+		count += snprintf(buf + count, PAGE_SIZE - count,
+					"\tIs busy: %u\n", c->is_busy);
+		count += snprintf(buf + count, PAGE_SIZE - count,
+					"\tNot preferred: %u\n",
+						c->not_preferred);
+		count += snprintf(buf + count, PAGE_SIZE - count,
+					"\tNr running: %u\n", cluster->nrrun);
+		count += snprintf(buf + count, PAGE_SIZE - count,
+			"\tActive CPUs: %u\n", get_active_cpu_count(cluster));
+		count += snprintf(buf + count, PAGE_SIZE - count,
+				"\tNeed CPUs: %u\n", cluster->need_cpus);
+		count += snprintf(buf + count, PAGE_SIZE - count,
+				"\tNr isolated CPUs: %u\n",
+						cluster->nr_isolated_cpus);
+		count += snprintf(buf + count, PAGE_SIZE - count,
+				"\tBoost: %u\n", (unsigned int) cluster->boost);
+	}
+	spin_unlock_irq(&state_lock);
+
+	return count;
+}
+
+static ssize_t store_not_preferred(struct cluster_data *state,
+				   const char *buf, size_t count)
+{
+	struct cpu_data *c;
+	unsigned int i;
+	unsigned int val[MAX_CPUS_PER_CLUSTER];
+	unsigned long flags;
+	int ret;
+
+	ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]);
+	if (ret != state->num_cpus)
+		return -EINVAL;
+
+	spin_lock_irqsave(&state_lock, flags);
+	for (i = 0; i < state->num_cpus; i++) {
+		c = &per_cpu(cpu_state, i + state->first_cpu);
+		c->not_preferred = val[i];
+	}
+	spin_unlock_irqrestore(&state_lock, flags);
+
+	return count;
+}
+
+static ssize_t show_not_preferred(const struct cluster_data *state, char *buf)
+{
+	struct cpu_data *c;
+	ssize_t count = 0;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&state_lock, flags);
+	for (i = 0; i < state->num_cpus; i++) {
+		c = &per_cpu(cpu_state, i + state->first_cpu);
+		count += scnprintf(buf + count, PAGE_SIZE - count,
+				"CPU#%d: %u\n", c->cpu, c->not_preferred);
+	}
+	spin_unlock_irqrestore(&state_lock, flags);
+
+	return count;
+}
+
+
+struct core_ctl_attr {
+	struct attribute attr;
+	ssize_t (*show)(const struct cluster_data *, char *);
+	ssize_t (*store)(struct cluster_data *, const char *, size_t count);
+};
+
+#define core_ctl_attr_ro(_name)		\
+static struct core_ctl_attr _name =	\
+__ATTR(_name, 0444, show_##_name, NULL)
+
+#define core_ctl_attr_rw(_name)			\
+static struct core_ctl_attr _name =		\
+__ATTR(_name, 0644, show_##_name, store_##_name)
+
+core_ctl_attr_rw(min_cpus);
+core_ctl_attr_rw(max_cpus);
+core_ctl_attr_rw(offline_delay_ms);
+core_ctl_attr_rw(busy_up_thres);
+core_ctl_attr_rw(busy_down_thres);
+core_ctl_attr_rw(task_thres);
+core_ctl_attr_rw(is_big_cluster);
+core_ctl_attr_ro(need_cpus);
+core_ctl_attr_ro(active_cpus);
+core_ctl_attr_ro(global_state);
+core_ctl_attr_rw(not_preferred);
+core_ctl_attr_rw(enable);
+
+static struct attribute *default_attrs[] = {
+	&min_cpus.attr,
+	&max_cpus.attr,
+	&offline_delay_ms.attr,
+	&busy_up_thres.attr,
+	&busy_down_thres.attr,
+	&task_thres.attr,
+	&is_big_cluster.attr,
+	&enable.attr,
+	&need_cpus.attr,
+	&active_cpus.attr,
+	&global_state.attr,
+	&not_preferred.attr,
+	NULL
+};
+
+#define to_cluster_data(k) container_of(k, struct cluster_data, kobj)
+#define to_attr(a) container_of(a, struct core_ctl_attr, attr)
+static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+	struct cluster_data *data = to_cluster_data(kobj);
+	struct core_ctl_attr *cattr = to_attr(attr);
+	ssize_t ret = -EIO;
+
+	if (cattr->show)
+		ret = cattr->show(data, buf);
+
+	return ret;
+}
+
+static ssize_t store(struct kobject *kobj, struct attribute *attr,
+		     const char *buf, size_t count)
+{
+	struct cluster_data *data = to_cluster_data(kobj);
+	struct core_ctl_attr *cattr = to_attr(attr);
+	ssize_t ret = -EIO;
+
+	if (cattr->store)
+		ret = cattr->store(data, buf, count);
+
+	return ret;
+}
+
+static const struct sysfs_ops sysfs_ops = {
+	.show	= show,
+	.store	= store,
+};
+
+static struct kobj_type ktype_core_ctl = {
+	.sysfs_ops	= &sysfs_ops,
+	.default_attrs	= default_attrs,
+};
+
+/* ==================== runqueue based core count =================== */
+
+#define RQ_AVG_TOLERANCE 2
+#define RQ_AVG_DEFAULT_MS 20
+static unsigned int rq_avg_period_ms = RQ_AVG_DEFAULT_MS;
+
+static s64 rq_avg_timestamp_ms;
+
+static void update_running_avg(bool trigger_update)
+{
+	int avg, iowait_avg, big_avg, old_nrrun;
+	int old_max_nr, max_nr, big_max_nr;
+	s64 now;
+	unsigned long flags;
+	struct cluster_data *cluster;
+	unsigned int index = 0;
+
+	spin_lock_irqsave(&state_lock, flags);
+
+	now = ktime_to_ms(ktime_get());
+	if (now - rq_avg_timestamp_ms < rq_avg_period_ms - RQ_AVG_TOLERANCE) {
+		spin_unlock_irqrestore(&state_lock, flags);
+		return;
+	}
+	rq_avg_timestamp_ms = now;
+	sched_get_nr_running_avg(&avg, &iowait_avg, &big_avg,
+				 &max_nr, &big_max_nr);
+
+	spin_unlock_irqrestore(&state_lock, flags);
+
+	for_each_cluster(cluster, index) {
+		if (!cluster->inited)
+			continue;
+
+		old_nrrun = cluster->nrrun;
+		old_max_nr = cluster->max_nr;
+		cluster->nrrun = cluster->is_big_cluster ? big_avg : avg;
+		cluster->max_nr = cluster->is_big_cluster ? big_max_nr : max_nr;
+
+		if (cluster->nrrun != old_nrrun ||
+			cluster->max_nr != old_max_nr) {
+
+			if (trigger_update)
+				apply_need(cluster);
+			else
+				cluster->nrrun_changed = true;
+		}
+	}
+	return;
+}
+
+#define MAX_NR_THRESHOLD	4
+/* adjust needed CPUs based on current runqueue information */
+static unsigned int apply_task_need(const struct cluster_data *cluster,
+				    unsigned int new_need)
+{
+	/* unisolate all cores if there are enough tasks */
+	if (cluster->nrrun >= cluster->task_thres)
+		return cluster->num_cpus;
+
+	/* only unisolate more cores if there are tasks to run */
+	if (cluster->nrrun > new_need)
+		new_need = new_need + 1;
+
+	/*
+	 * We don't want tasks to be overcrowded in a cluster.
+	 * If any CPU has more than MAX_NR_THRESHOLD in the last
+	 * window, bring another CPU to help out.
+	 */
+	if (cluster->max_nr > MAX_NR_THRESHOLD)
+		new_need = new_need + 1;
+
+	return new_need;
+}
+
+/* ======================= load based core count  ====================== */
+
+static unsigned int apply_limits(const struct cluster_data *cluster,
+				 unsigned int need_cpus)
+{
+	return min(max(cluster->min_cpus, need_cpus), cluster->max_cpus);
+}
+
+static unsigned int get_active_cpu_count(const struct cluster_data *cluster)
+{
+	return cluster->num_cpus -
+				sched_isolate_count(&cluster->cpu_mask, true);
+}
+
+static bool is_active(const struct cpu_data *state)
+{
+	return cpu_online(state->cpu) && !cpu_isolated(state->cpu);
+}
+
+static bool adjustment_possible(const struct cluster_data *cluster,
+							unsigned int need)
+{
+	return (need < cluster->active_cpus || (need > cluster->active_cpus &&
+						cluster->nr_isolated_cpus));
+}
+
+static bool eval_need(struct cluster_data *cluster)
+{
+	unsigned long flags;
+	struct cpu_data *c;
+	unsigned int need_cpus = 0, last_need, thres_idx;
+	int ret = 0;
+	bool need_flag = false;
+	unsigned int new_need;
+	s64 now, elapsed;
+
+	if (unlikely(!cluster->inited))
+		return 0;
+
+	spin_lock_irqsave(&state_lock, flags);
+
+	if (cluster->boost || !cluster->enable) {
+		need_cpus = cluster->max_cpus;
+	} else {
+		cluster->active_cpus = get_active_cpu_count(cluster);
+		thres_idx = cluster->active_cpus ? cluster->active_cpus - 1 : 0;
+		list_for_each_entry(c, &cluster->lru, sib) {
+			if (c->busy >= cluster->busy_up_thres[thres_idx])
+				c->is_busy = true;
+			else if (c->busy < cluster->busy_down_thres[thres_idx])
+				c->is_busy = false;
+			need_cpus += c->is_busy;
+		}
+		need_cpus = apply_task_need(cluster, need_cpus);
+	}
+	new_need = apply_limits(cluster, need_cpus);
+	need_flag = adjustment_possible(cluster, new_need);
+
+	last_need = cluster->need_cpus;
+	now = ktime_to_ms(ktime_get());
+
+	if (new_need > cluster->active_cpus) {
+		ret = 1;
+	} else {
+		if (new_need == last_need) {
+			cluster->need_ts = now;
+			spin_unlock_irqrestore(&state_lock, flags);
+			return 0;
+		}
+
+		elapsed =  now - cluster->need_ts;
+		ret = elapsed >= cluster->offline_delay_ms;
+	}
+
+	if (ret) {
+		cluster->need_ts = now;
+		cluster->need_cpus = new_need;
+	}
+	trace_core_ctl_eval_need(cluster->first_cpu, last_need, new_need,
+				 ret && need_flag);
+	spin_unlock_irqrestore(&state_lock, flags);
+
+	return ret && need_flag;
+}
+
+static void apply_need(struct cluster_data *cluster)
+{
+	if (eval_need(cluster))
+		wake_up_core_ctl_thread(cluster);
+}
+
+static int core_ctl_set_busy(unsigned int cpu, unsigned int busy)
+{
+	struct cpu_data *c = &per_cpu(cpu_state, cpu);
+	struct cluster_data *cluster = c->cluster;
+	unsigned int old_is_busy = c->is_busy;
+
+	if (!cluster || !cluster->inited)
+		return 0;
+
+	update_running_avg(false);
+	if (c->busy == busy && !cluster->nrrun_changed)
+		return 0;
+	c->busy = busy;
+	cluster->nrrun_changed = false;
+
+	apply_need(cluster);
+	trace_core_ctl_set_busy(cpu, busy, old_is_busy, c->is_busy);
+	return 0;
+}
+
+/* ========================= core count enforcement ==================== */
+
+static void wake_up_core_ctl_thread(struct cluster_data *cluster)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&cluster->pending_lock, flags);
+	cluster->pending = true;
+	spin_unlock_irqrestore(&cluster->pending_lock, flags);
+
+	wake_up_process_no_notif(cluster->core_ctl_thread);
+}
+
+static u64 core_ctl_check_timestamp;
+static u64 core_ctl_check_interval;
+
+static bool do_check(u64 wallclock)
+{
+	bool do_check = false;
+	unsigned long flags;
+
+	spin_lock_irqsave(&state_lock, flags);
+	if ((wallclock - core_ctl_check_timestamp) >= core_ctl_check_interval) {
+		core_ctl_check_timestamp = wallclock;
+		do_check = true;
+	}
+	spin_unlock_irqrestore(&state_lock, flags);
+	return do_check;
+}
+
+int core_ctl_set_boost(bool boost)
+{
+	unsigned int index = 0;
+	struct cluster_data *cluster;
+	unsigned long flags;
+	int ret = 0;
+	bool boost_state_changed = false;
+
+	if (unlikely(!initialized))
+		return 0;
+
+	spin_lock_irqsave(&state_lock, flags);
+	for_each_cluster(cluster, index) {
+		if (cluster->is_big_cluster) {
+			if (boost) {
+				boost_state_changed = !cluster->boost;
+				++cluster->boost;
+			} else {
+				if (!cluster->boost) {
+					pr_err("Error turning off boost. Boost already turned off\n");
+					ret = -EINVAL;
+				} else {
+					--cluster->boost;
+					boost_state_changed = !cluster->boost;
+				}
+			}
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&state_lock, flags);
+
+	if (boost_state_changed)
+		apply_need(cluster);
+
+	trace_core_ctl_set_boost(cluster->boost, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL(core_ctl_set_boost);
+
+void core_ctl_check(u64 wallclock)
+{
+	if (unlikely(!initialized))
+		return;
+
+	if (do_check(wallclock)) {
+		unsigned int index = 0;
+		struct cluster_data *cluster;
+
+		update_running_avg(true);
+
+		for_each_cluster(cluster, index) {
+			if (eval_need(cluster))
+				wake_up_core_ctl_thread(cluster);
+		}
+	}
+}
+
+static void move_cpu_lru(struct cpu_data *cpu_data)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&state_lock, flags);
+	list_del(&cpu_data->sib);
+	list_add_tail(&cpu_data->sib, &cpu_data->cluster->lru);
+	spin_unlock_irqrestore(&state_lock, flags);
+}
+
+static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
+{
+	struct cpu_data *c, *tmp;
+	unsigned long flags;
+	unsigned int num_cpus = cluster->num_cpus;
+	unsigned int nr_isolated = 0;
+
+	/*
+	 * Protect against entry being removed (and added at tail) by other
+	 * thread (hotplug).
+	 */
+	spin_lock_irqsave(&state_lock, flags);
+	list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
+		if (!num_cpus--)
+			break;
+
+		if (!is_active(c))
+			continue;
+		if (cluster->active_cpus == need)
+			break;
+		/* Don't offline busy CPUs. */
+		if (c->is_busy)
+			continue;
+
+		spin_unlock_irqrestore(&state_lock, flags);
+
+		pr_debug("Trying to isolate CPU%u\n", c->cpu);
+		if (!sched_isolate_cpu(c->cpu)) {
+			c->isolated_by_us = true;
+			move_cpu_lru(c);
+			nr_isolated++;
+		} else {
+			pr_debug("Unable to isolate CPU%u\n", c->cpu);
+		}
+		cluster->active_cpus = get_active_cpu_count(cluster);
+		spin_lock_irqsave(&state_lock, flags);
+	}
+	cluster->nr_isolated_cpus += nr_isolated;
+	spin_unlock_irqrestore(&state_lock, flags);
+
+	/*
+	 * If the number of active CPUs is within the limits, then
+	 * don't force isolation of any busy CPUs.
+	 */
+	if (cluster->active_cpus <= cluster->max_cpus)
+		return;
+
+	nr_isolated = 0;
+	num_cpus = cluster->num_cpus;
+	spin_lock_irqsave(&state_lock, flags);
+	list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
+		if (!num_cpus--)
+			break;
+
+		if (!is_active(c))
+			continue;
+		if (cluster->active_cpus <= cluster->max_cpus)
+			break;
+
+		spin_unlock_irqrestore(&state_lock, flags);
+
+		pr_debug("Trying to isolate CPU%u\n", c->cpu);
+		if (!sched_isolate_cpu(c->cpu)) {
+			c->isolated_by_us = true;
+			move_cpu_lru(c);
+			nr_isolated++;
+		} else {
+			pr_debug("Unable to isolate CPU%u\n", c->cpu);
+		}
+		cluster->active_cpus = get_active_cpu_count(cluster);
+		spin_lock_irqsave(&state_lock, flags);
+	}
+	cluster->nr_isolated_cpus += nr_isolated;
+	spin_unlock_irqrestore(&state_lock, flags);
+
+}
+
+static void __try_to_unisolate(struct cluster_data *cluster,
+			       unsigned int need, bool force)
+{
+	struct cpu_data *c, *tmp;
+	unsigned long flags;
+	unsigned int num_cpus = cluster->num_cpus;
+	unsigned int nr_unisolated = 0;
+
+	/*
+	 * Protect against entry being removed (and added at tail) by other
+	 * thread (hotplug).
+	 */
+	spin_lock_irqsave(&state_lock, flags);
+	list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
+		if (!num_cpus--)
+			break;
+
+		if (!c->isolated_by_us)
+			continue;
+		if ((cpu_online(c->cpu) && !cpu_isolated(c->cpu)) ||
+			(!force && c->not_preferred))
+			continue;
+		if (cluster->active_cpus == need)
+			break;
+
+		spin_unlock_irqrestore(&state_lock, flags);
+
+		pr_debug("Trying to unisolate CPU%u\n", c->cpu);
+		if (!sched_unisolate_cpu(c->cpu)) {
+			c->isolated_by_us = false;
+			move_cpu_lru(c);
+			nr_unisolated++;
+		} else {
+			pr_debug("Unable to unisolate CPU%u\n", c->cpu);
+		}
+		cluster->active_cpus = get_active_cpu_count(cluster);
+		spin_lock_irqsave(&state_lock, flags);
+	}
+	cluster->nr_isolated_cpus -= nr_unisolated;
+	spin_unlock_irqrestore(&state_lock, flags);
+}
+
+static void try_to_unisolate(struct cluster_data *cluster, unsigned int need)
+{
+	bool force_use_non_preferred = false;
+
+	__try_to_unisolate(cluster, need, force_use_non_preferred);
+
+	if (cluster->active_cpus == need)
+		return;
+
+	force_use_non_preferred = true;
+	__try_to_unisolate(cluster, need, force_use_non_preferred);
+}
+
+static void __ref do_core_ctl(struct cluster_data *cluster)
+{
+	unsigned int need;
+
+	need = apply_limits(cluster, cluster->need_cpus);
+
+	if (adjustment_possible(cluster, need)) {
+		pr_debug("Trying to adjust group %u from %u to %u\n",
+				cluster->first_cpu, cluster->active_cpus, need);
+
+		if (cluster->active_cpus > need)
+			try_to_isolate(cluster, need);
+		else if (cluster->active_cpus < need)
+			try_to_unisolate(cluster, need);
+	}
+}
+
+static int __ref try_core_ctl(void *data)
+{
+	struct cluster_data *cluster = data;
+	unsigned long flags;
+
+	while (1) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		spin_lock_irqsave(&cluster->pending_lock, flags);
+		if (!cluster->pending) {
+			spin_unlock_irqrestore(&cluster->pending_lock, flags);
+			schedule();
+			if (kthread_should_stop())
+				break;
+			spin_lock_irqsave(&cluster->pending_lock, flags);
+		}
+		set_current_state(TASK_RUNNING);
+		cluster->pending = false;
+		spin_unlock_irqrestore(&cluster->pending_lock, flags);
+
+		do_core_ctl(cluster);
+	}
+
+	return 0;
+}
+
+static int __ref cpu_callback(struct notifier_block *nfb,
+				unsigned long action, void *hcpu)
+{
+	uint32_t cpu = (uintptr_t)hcpu;
+	struct cpu_data *state = &per_cpu(cpu_state, cpu);
+	struct cluster_data *cluster = state->cluster;
+	unsigned int need;
+	bool do_wakeup, unisolated = false;
+	unsigned long flags;
+
+	if (unlikely(!cluster || !cluster->inited))
+		return NOTIFY_DONE;
+
+	switch (action & ~CPU_TASKS_FROZEN) {
+	case CPU_ONLINE:
+		cluster->active_cpus = get_active_cpu_count(cluster);
+
+		/*
+		 * Moving to the end of the list should only happen in
+		 * CPU_ONLINE and not on CPU_UP_PREPARE to prevent an
+		 * infinite list traversal when thermal (or other entities)
+		 * reject trying to online CPUs.
+		 */
+		move_cpu_lru(state);
+		break;
+
+	case CPU_DEAD:
+		/*
+		 * We don't want to have a CPU both offline and isolated.
+		 * So unisolate a CPU that went down if it was isolated by us.
+		 */
+		if (state->isolated_by_us) {
+			sched_unisolate_cpu_unlocked(cpu);
+			state->isolated_by_us = false;
+			unisolated = true;
+		}
+
+		/* Move a CPU to the end of the LRU when it goes offline. */
+		move_cpu_lru(state);
+
+		state->busy = 0;
+		cluster->active_cpus = get_active_cpu_count(cluster);
+		break;
+	default:
+		return NOTIFY_DONE;
+	}
+
+	need = apply_limits(cluster, cluster->need_cpus);
+	spin_lock_irqsave(&state_lock, flags);
+	if (unisolated)
+		cluster->nr_isolated_cpus--;
+	do_wakeup = adjustment_possible(cluster, need);
+	spin_unlock_irqrestore(&state_lock, flags);
+	if (do_wakeup)
+		wake_up_core_ctl_thread(cluster);
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block __refdata cpu_notifier = {
+	.notifier_call = cpu_callback,
+};
+
+/* ============================ init code ============================== */
+
+static cpumask_var_t core_ctl_disable_cpumask;
+static bool core_ctl_disable_cpumask_present;
+
+static int __init core_ctl_disable_setup(char *str)
+{
+	if (!*str)
+		return -EINVAL;
+
+	alloc_bootmem_cpumask_var(&core_ctl_disable_cpumask);
+
+	if (cpulist_parse(str, core_ctl_disable_cpumask) < 0) {
+		free_bootmem_cpumask_var(core_ctl_disable_cpumask);
+		return -EINVAL;
+	}
+
+	core_ctl_disable_cpumask_present = true;
+	pr_info("disable_cpumask=%*pbl\n",
+			cpumask_pr_args(core_ctl_disable_cpumask));
+
+	return 0;
+}
+early_param("core_ctl_disable_cpumask", core_ctl_disable_setup);
+
+static bool should_skip(const struct cpumask *mask)
+{
+	if (!core_ctl_disable_cpumask_present)
+		return false;
+
+	/*
+	 * We operate on a cluster basis. Disable the core_ctl for
+	 * a cluster, if all of it's cpus are specified in
+	 * core_ctl_disable_cpumask
+	 */
+	return cpumask_subset(mask, core_ctl_disable_cpumask);
+}
+
+static struct cluster_data *find_cluster_by_first_cpu(unsigned int first_cpu)
+{
+	unsigned int i;
+
+	for (i = 0; i < num_clusters; ++i) {
+		if (cluster_state[i].first_cpu == first_cpu)
+			return &cluster_state[i];
+	}
+
+	return NULL;
+}
+
+static int cluster_init(const struct cpumask *mask)
+{
+	struct device *dev;
+	unsigned int first_cpu = cpumask_first(mask);
+	struct cluster_data *cluster;
+	struct cpu_data *state;
+	unsigned int cpu;
+	struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+
+	if (should_skip(mask))
+		return 0;
+
+	if (find_cluster_by_first_cpu(first_cpu))
+		return 0;
+
+	dev = get_cpu_device(first_cpu);
+	if (!dev)
+		return -ENODEV;
+
+	pr_info("Creating CPU group %d\n", first_cpu);
+
+	if (num_clusters == MAX_CLUSTERS) {
+		pr_err("Unsupported number of clusters. Only %u supported\n",
+								MAX_CLUSTERS);
+		return -EINVAL;
+	}
+	cluster = &cluster_state[num_clusters];
+	++num_clusters;
+
+	cpumask_copy(&cluster->cpu_mask, mask);
+	cluster->num_cpus = cpumask_weight(mask);
+	if (cluster->num_cpus > MAX_CPUS_PER_CLUSTER) {
+		pr_err("HW configuration not supported\n");
+		return -EINVAL;
+	}
+	cluster->first_cpu = first_cpu;
+	cluster->min_cpus = 1;
+	cluster->max_cpus = cluster->num_cpus;
+	cluster->need_cpus = cluster->num_cpus;
+	cluster->offline_delay_ms = 100;
+	cluster->task_thres = UINT_MAX;
+	cluster->nrrun = cluster->num_cpus;
+	cluster->enable = true;
+	INIT_LIST_HEAD(&cluster->lru);
+	spin_lock_init(&cluster->pending_lock);
+
+	for_each_cpu(cpu, mask) {
+		pr_info("Init CPU%u state\n", cpu);
+
+		state = &per_cpu(cpu_state, cpu);
+		state->cluster = cluster;
+		state->cpu = cpu;
+		list_add_tail(&state->sib, &cluster->lru);
+	}
+	cluster->active_cpus = get_active_cpu_count(cluster);
+
+	cluster->core_ctl_thread = kthread_run(try_core_ctl, (void *) cluster,
+					"core_ctl/%d", first_cpu);
+	if (IS_ERR(cluster->core_ctl_thread))
+		return PTR_ERR(cluster->core_ctl_thread);
+
+	sched_setscheduler_nocheck(cluster->core_ctl_thread, SCHED_FIFO,
+				   &param);
+
+	cluster->inited = true;
+
+	kobject_init(&cluster->kobj, &ktype_core_ctl);
+	return kobject_add(&cluster->kobj, &dev->kobj, "core_ctl");
+}
+
+static int cpufreq_policy_cb(struct notifier_block *nb, unsigned long val,
+				void *data)
+{
+	struct cpufreq_policy *policy = data;
+	int ret;
+
+	switch (val) {
+	case CPUFREQ_CREATE_POLICY:
+		ret = cluster_init(policy->related_cpus);
+		if (ret)
+			pr_warn("unable to create core ctl group: %d\n", ret);
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block cpufreq_pol_nb = {
+	.notifier_call = cpufreq_policy_cb,
+};
+
+static int cpufreq_gov_cb(struct notifier_block *nb, unsigned long val,
+				void *data)
+{
+	struct cpufreq_govinfo *info = data;
+
+	switch (val) {
+	case CPUFREQ_LOAD_CHANGE:
+		core_ctl_set_busy(info->cpu, info->load);
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block cpufreq_gov_nb = {
+	.notifier_call = cpufreq_gov_cb,
+};
+
+static int __init core_ctl_init(void)
+{
+	unsigned int cpu;
+
+	if (should_skip(cpu_possible_mask))
+		return 0;
+
+	core_ctl_check_interval = (rq_avg_period_ms - RQ_AVG_TOLERANCE)
+					* NSEC_PER_MSEC;
+
+	register_cpu_notifier(&cpu_notifier);
+	cpufreq_register_notifier(&cpufreq_pol_nb, CPUFREQ_POLICY_NOTIFIER);
+	cpufreq_register_notifier(&cpufreq_gov_nb, CPUFREQ_GOVINFO_NOTIFIER);
+
+	cpu_maps_update_begin();
+	for_each_online_cpu(cpu) {
+		struct cpufreq_policy *policy;
+		int ret;
+
+		policy = cpufreq_cpu_get(cpu);
+		if (policy) {
+			ret = cluster_init(policy->related_cpus);
+			if (ret)
+				pr_warn("unable to create core ctl group: %d\n"
+					, ret);
+			cpufreq_cpu_put(policy);
+		}
+	}
+	cpu_maps_update_done();
+	initialized = true;
+	return 0;
+}
+
+late_initcall(core_ctl_init);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/kernel/sched/cpufreq.c	2019-01-22 16:16:28.695293423 +0100
@@ -0,0 +1,63 @@
+/*
+ * Scheduler code and data structures related to cpufreq.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "sched.h"
+
+DEFINE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
+
+/**
+ * cpufreq_add_update_util_hook - Populate the CPU's update_util_data pointer.
+ * @cpu: The CPU to set the pointer for.
+ * @data: New pointer value.
+ * @func: Callback function to set for the CPU.
+ *
+ * Set and publish the update_util_data pointer for the given CPU.
+ *
+ * The update_util_data pointer of @cpu is set to @data and the callback
+ * function pointer in the target struct update_util_data is set to @func.
+ * That function will be called by cpufreq_update_util() from RCU-sched
+ * read-side critical sections, so it must not sleep.  @data will always be
+ * passed to it as the first argument which allows the function to get to the
+ * target update_util_data structure and its container.
+ *
+ * The update_util_data pointer of @cpu must be NULL when this function is
+ * called or it will WARN() and return with no effect.
+ */
+void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
+			void (*func)(struct update_util_data *data, u64 time,
+				     unsigned int flags))
+{
+	if (WARN_ON(!data || !func))
+		return;
+
+	if (WARN_ON(per_cpu(cpufreq_update_util_data, cpu)))
+		return;
+
+	data->func = func;
+	rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), data);
+}
+EXPORT_SYMBOL_GPL(cpufreq_add_update_util_hook);
+
+/**
+ * cpufreq_remove_update_util_hook - Clear the CPU's update_util_data pointer.
+ * @cpu: The CPU to clear the pointer for.
+ *
+ * Clear the update_util_data pointer for the given CPU.
+ *
+ * Callers must use RCU-sched callbacks to free any memory that might be
+ * accessed via the old update_util_data pointer or invoke synchronize_sched()
+ * right after this function to avoid use-after-free.
+ */
+void cpufreq_remove_update_util_hook(int cpu)
+{
+	rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), NULL);
+}
+EXPORT_SYMBOL_GPL(cpufreq_remove_update_util_hook);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/kernel/sched/energy.c	2019-01-22 16:16:28.699293459 +0100
@@ -0,0 +1,134 @@
+/*
+ * Obtain energy cost data from DT and populate relevant scheduler data
+ * structures.
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#define pr_fmt(fmt) "sched-energy: " fmt
+
+#define DEBUG
+
+#include <linux/gfp.h>
+#include <linux/of.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <linux/sched_energy.h>
+#include <linux/stddef.h>
+
+#include "sched.h"
+
+struct sched_group_energy *sge_array[NR_CPUS][NR_SD_LEVELS];
+bool sched_energy_aware;
+
+static void free_resources(void)
+{
+	int cpu, sd_level;
+	struct sched_group_energy *sge;
+
+	for_each_possible_cpu(cpu) {
+		for_each_possible_sd_level(sd_level) {
+			sge = sge_array[cpu][sd_level];
+			if (sge) {
+				kfree(sge->cap_states);
+				kfree(sge->idle_states);
+				kfree(sge);
+			}
+		}
+	}
+}
+
+void init_sched_energy_costs(void)
+{
+	struct device_node *cn, *cp;
+	struct capacity_state *cap_states;
+	struct idle_state *idle_states;
+	struct sched_group_energy *sge;
+	const struct property *prop;
+	int sd_level, i, nstates, cpu;
+	const __be32 *val;
+
+	if (!energy_aware()) {
+		sched_energy_aware = false;
+		return;
+	}
+
+	sched_energy_aware = true;
+
+	for_each_possible_cpu(cpu) {
+		cn = of_get_cpu_node(cpu, NULL);
+		if (!cn) {
+			pr_warn("CPU device node missing for CPU %d\n", cpu);
+			return;
+		}
+
+		if (!of_find_property(cn, "sched-energy-costs", NULL)) {
+			pr_warn("CPU device node has no sched-energy-costs\n");
+			return;
+		}
+
+		for_each_possible_sd_level(sd_level) {
+			cp = of_parse_phandle(cn, "sched-energy-costs", sd_level);
+			if (!cp)
+				break;
+
+			prop = of_find_property(cp, "busy-cost-data", NULL);
+			if (!prop || !prop->value) {
+				pr_warn("No busy-cost data, skipping sched_energy init\n");
+				goto out;
+			}
+
+			sge = kcalloc(1, sizeof(struct sched_group_energy),
+				      GFP_NOWAIT);
+
+			nstates = (prop->length / sizeof(u32)) / 2;
+			cap_states = kcalloc(nstates,
+					     sizeof(struct capacity_state),
+					     GFP_NOWAIT);
+
+			for (i = 0, val = prop->value; i < nstates; i++) {
+				cap_states[i].cap = be32_to_cpup(val++);
+				cap_states[i].power = be32_to_cpup(val++);
+			}
+
+			sge->nr_cap_states = nstates;
+			sge->cap_states = cap_states;
+
+			prop = of_find_property(cp, "idle-cost-data", NULL);
+			if (!prop || !prop->value) {
+				pr_warn("No idle-cost data, skipping sched_energy init\n");
+				goto out;
+			}
+
+			nstates = (prop->length / sizeof(u32));
+			idle_states = kcalloc(nstates,
+					      sizeof(struct idle_state),
+					      GFP_NOWAIT);
+
+			for (i = 0, val = prop->value; i < nstates; i++)
+				idle_states[i].power = be32_to_cpup(val++);
+
+			sge->nr_idle_states = nstates;
+			sge->idle_states = idle_states;
+
+			sge_array[cpu][sd_level] = sge;
+		}
+	}
+
+	pr_info("Sched-energy-costs installed from DT\n");
+	return;
+
+out:
+	free_resources();
+}
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/kernel/sched/hmp.c	2019-10-29 09:26:25.625222574 +0100
@@ -0,0 +1,4410 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Implementation credits: Srivatsa Vaddagiri, Steve Muckle
+ * Syed Rameez Mustafa, Olav haugan, Joonwoo Park, Pavan Kumar Kondeti
+ * and Vikram Mulukutla
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/list_sort.h>
+#include <linux/syscore_ops.h>
+
+#include "sched.h"
+
+#include <trace/events/sched.h>
+
+#define CSTATE_LATENCY_GRANULARITY_SHIFT (6)
+
+const char *task_event_names[] = {"PUT_PREV_TASK", "PICK_NEXT_TASK",
+				  "TASK_WAKE", "TASK_MIGRATE", "TASK_UPDATE",
+				"IRQ_UPDATE"};
+
+const char *migrate_type_names[] = {"GROUP_TO_RQ", "RQ_TO_GROUP"};
+
+static ktime_t ktime_last;
+static bool sched_ktime_suspended;
+
+static bool use_cycle_counter;
+static struct cpu_cycle_counter_cb cpu_cycle_counter_cb;
+
+u64 sched_ktime_clock(void)
+{
+	if (unlikely(sched_ktime_suspended))
+		return ktime_to_ns(ktime_last);
+	return ktime_get_ns();
+}
+
+static void sched_resume(void)
+{
+	sched_ktime_suspended = false;
+}
+
+static int sched_suspend(void)
+{
+	ktime_last = ktime_get();
+	sched_ktime_suspended = true;
+	return 0;
+}
+
+static struct syscore_ops sched_syscore_ops = {
+	.resume	= sched_resume,
+	.suspend = sched_suspend
+};
+
+static int __init sched_init_ops(void)
+{
+	register_syscore_ops(&sched_syscore_ops);
+	return 0;
+}
+late_initcall(sched_init_ops);
+
+inline void clear_ed_task(struct task_struct *p, struct rq *rq)
+{
+	if (p == rq->ed_task)
+		rq->ed_task = NULL;
+}
+
+inline void set_task_last_switch_out(struct task_struct *p, u64 wallclock)
+{
+	p->last_switch_out_ts = wallclock;
+}
+
+/*
+ * Note C-state for (idle) cpus.
+ *
+ * @cstate = cstate index, 0 -> active state
+ * @wakeup_energy = energy spent in waking up cpu
+ * @wakeup_latency = latency to wakeup from cstate
+ *
+ */
+void
+sched_set_cpu_cstate(int cpu, int cstate, int wakeup_energy, int wakeup_latency)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	rq->cstate = cstate; /* C1, C2 etc */
+	rq->wakeup_energy = wakeup_energy;
+	/* disregard small latency delta (64 us). */
+	rq->wakeup_latency = ((wakeup_latency >>
+			       CSTATE_LATENCY_GRANULARITY_SHIFT) <<
+			      CSTATE_LATENCY_GRANULARITY_SHIFT);
+}
+
+/*
+ * Note D-state for (idle) cluster.
+ *
+ * @dstate = dstate index, 0 -> active state
+ * @wakeup_energy = energy spent in waking up cluster
+ * @wakeup_latency = latency to wakeup from cluster
+ *
+ */
+void sched_set_cluster_dstate(const cpumask_t *cluster_cpus, int dstate,
+			int wakeup_energy, int wakeup_latency)
+{
+	struct sched_cluster *cluster =
+		cpu_rq(cpumask_first(cluster_cpus))->cluster;
+	cluster->dstate = dstate;
+	cluster->dstate_wakeup_energy = wakeup_energy;
+	cluster->dstate_wakeup_latency = wakeup_latency;
+}
+
+u32 __weak get_freq_max_load(int cpu, u32 freq)
+{
+	/* 100% by default */
+	return 100;
+}
+
+struct freq_max_load_entry {
+	/* The maximum load which has accounted governor's headroom. */
+	u64 hdemand;
+};
+
+struct freq_max_load {
+	struct rcu_head rcu;
+	int length;
+	struct freq_max_load_entry freqs[0];
+};
+
+static DEFINE_PER_CPU(struct freq_max_load *, freq_max_load);
+static DEFINE_SPINLOCK(freq_max_load_lock);
+
+struct cpu_pwr_stats __weak *get_cpu_pwr_stats(void)
+{
+	return NULL;
+}
+
+int sched_update_freq_max_load(const cpumask_t *cpumask)
+{
+	int i, cpu, ret;
+	unsigned int freq;
+	struct cpu_pstate_pwr *costs;
+	struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats();
+	struct freq_max_load *max_load, *old_max_load;
+	struct freq_max_load_entry *entry;
+	u64 max_demand_capacity, max_demand;
+	unsigned long flags;
+	u32 hfreq;
+	int hpct;
+
+	if (!per_cpu_info)
+		return 0;
+
+	spin_lock_irqsave(&freq_max_load_lock, flags);
+	max_demand_capacity = div64_u64(max_task_load(), max_possible_capacity);
+	for_each_cpu(cpu, cpumask) {
+		if (!per_cpu_info[cpu].ptable) {
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		old_max_load = rcu_dereference(per_cpu(freq_max_load, cpu));
+
+		/*
+		 * allocate len + 1 and leave the last power cost as 0 for
+		 * power_cost() can stop iterating index when
+		 * per_cpu_info[cpu].len > len of max_load due to race between
+		 * cpu power stats update and get_cpu_pwr_stats().
+		 */
+		max_load = kzalloc(sizeof(struct freq_max_load) +
+				   sizeof(struct freq_max_load_entry) *
+				   (per_cpu_info[cpu].len + 1), GFP_ATOMIC);
+		if (unlikely(!max_load)) {
+			ret = -ENOMEM;
+			goto fail;
+		}
+
+		max_load->length = per_cpu_info[cpu].len;
+
+		max_demand = max_demand_capacity *
+			     cpu_max_possible_capacity(cpu);
+
+		i = 0;
+		costs = per_cpu_info[cpu].ptable;
+		while (costs[i].freq) {
+			entry = &max_load->freqs[i];
+			freq = costs[i].freq;
+			hpct = get_freq_max_load(cpu, freq);
+			if (hpct <= 0 || hpct > 100)
+				hpct = 100;
+			hfreq = div64_u64((u64)freq * hpct, 100);
+			entry->hdemand =
+			    div64_u64(max_demand * hfreq,
+				      cpu_max_possible_freq(cpu));
+			i++;
+		}
+
+		rcu_assign_pointer(per_cpu(freq_max_load, cpu), max_load);
+		if (old_max_load)
+			kfree_rcu(old_max_load, rcu);
+	}
+
+	spin_unlock_irqrestore(&freq_max_load_lock, flags);
+	return 0;
+
+fail:
+	for_each_cpu(cpu, cpumask) {
+		max_load = rcu_dereference(per_cpu(freq_max_load, cpu));
+		if (max_load) {
+			rcu_assign_pointer(per_cpu(freq_max_load, cpu), NULL);
+			kfree_rcu(max_load, rcu);
+		}
+	}
+
+	spin_unlock_irqrestore(&freq_max_load_lock, flags);
+	return ret;
+}
+
+unsigned int max_possible_efficiency = 1;
+unsigned int min_possible_efficiency = UINT_MAX;
+
+unsigned long __weak arch_get_cpu_efficiency(int cpu)
+{
+	return SCHED_LOAD_SCALE;
+}
+
+/* Keep track of max/min capacity possible across CPUs "currently" */
+static void __update_min_max_capacity(void)
+{
+	int i;
+	int max_cap = 0, min_cap = INT_MAX;
+
+	for_each_online_cpu(i) {
+		max_cap = max(max_cap, cpu_capacity(i));
+		min_cap = min(min_cap, cpu_capacity(i));
+	}
+
+	max_capacity = max_cap;
+	min_capacity = min_cap;
+}
+
+static void update_min_max_capacity(void)
+{
+	unsigned long flags;
+	int i;
+
+	local_irq_save(flags);
+	for_each_possible_cpu(i)
+		raw_spin_lock(&cpu_rq(i)->lock);
+
+	__update_min_max_capacity();
+
+	for_each_possible_cpu(i)
+		raw_spin_unlock(&cpu_rq(i)->lock);
+	local_irq_restore(flags);
+}
+
+/*
+ * Return 'capacity' of a cpu in reference to "least" efficient cpu, such that
+ * least efficient cpu gets capacity of 1024
+ */
+static unsigned long
+capacity_scale_cpu_efficiency(struct sched_cluster *cluster)
+{
+	return (1024 * cluster->efficiency) / min_possible_efficiency;
+}
+
+/*
+ * Return 'capacity' of a cpu in reference to cpu with lowest max_freq
+ * (min_max_freq), such that one with lowest max_freq gets capacity of 1024.
+ */
+static unsigned long capacity_scale_cpu_freq(struct sched_cluster *cluster)
+{
+	return (1024 * cluster_max_freq(cluster)) / min_max_freq;
+}
+
+/*
+ * Return load_scale_factor of a cpu in reference to "most" efficient cpu, so
+ * that "most" efficient cpu gets a load_scale_factor of 1
+ */
+static inline unsigned long
+load_scale_cpu_efficiency(struct sched_cluster *cluster)
+{
+	return DIV_ROUND_UP(1024 * max_possible_efficiency,
+			    cluster->efficiency);
+}
+
+/*
+ * Return load_scale_factor of a cpu in reference to cpu with best max_freq
+ * (max_possible_freq), so that one with best max_freq gets a load_scale_factor
+ * of 1.
+ */
+static inline unsigned long load_scale_cpu_freq(struct sched_cluster *cluster)
+{
+	return DIV_ROUND_UP(1024 * max_possible_freq,
+			   cluster_max_freq(cluster));
+}
+
+static int compute_capacity(struct sched_cluster *cluster)
+{
+	int capacity = 1024;
+
+	capacity *= capacity_scale_cpu_efficiency(cluster);
+	capacity >>= 10;
+
+	capacity *= capacity_scale_cpu_freq(cluster);
+	capacity >>= 10;
+
+	return capacity;
+}
+
+static int compute_max_possible_capacity(struct sched_cluster *cluster)
+{
+	int capacity = 1024;
+
+	capacity *= capacity_scale_cpu_efficiency(cluster);
+	capacity >>= 10;
+
+	capacity *= (1024 * cluster->max_possible_freq) / min_max_freq;
+	capacity >>= 10;
+
+	return capacity;
+}
+
+static int compute_load_scale_factor(struct sched_cluster *cluster)
+{
+	int load_scale = 1024;
+
+	/*
+	 * load_scale_factor accounts for the fact that task load
+	 * is in reference to "best" performing cpu. Task's load will need to be
+	 * scaled (up) by a factor to determine suitability to be placed on a
+	 * (little) cpu.
+	 */
+	load_scale *= load_scale_cpu_efficiency(cluster);
+	load_scale >>= 10;
+
+	load_scale *= load_scale_cpu_freq(cluster);
+	load_scale >>= 10;
+
+	return load_scale;
+}
+
+struct list_head cluster_head;
+static DEFINE_MUTEX(cluster_lock);
+static cpumask_t all_cluster_cpus = CPU_MASK_NONE;
+DECLARE_BITMAP(all_cluster_ids, NR_CPUS);
+struct sched_cluster *sched_cluster[NR_CPUS];
+int num_clusters;
+
+unsigned int max_power_cost = 1;
+
+struct sched_cluster init_cluster = {
+	.list			=	LIST_HEAD_INIT(init_cluster.list),
+	.id			=	0,
+	.max_power_cost		=	1,
+	.min_power_cost		=	1,
+	.capacity		=	1024,
+	.max_possible_capacity	=	1024,
+	.efficiency		=	1,
+	.load_scale_factor	=	1024,
+	.cur_freq		=	1,
+	.max_freq		=	1,
+	.max_mitigated_freq	=	UINT_MAX,
+	.min_freq		=	1,
+	.max_possible_freq	=	1,
+	.dstate			=	0,
+	.dstate_wakeup_energy	=	0,
+	.dstate_wakeup_latency	=	0,
+	.exec_scale_factor	=	1024,
+	.notifier_sent		=	0,
+	.wake_up_idle		=	0,
+};
+
+static void update_all_clusters_stats(void)
+{
+	struct sched_cluster *cluster;
+	u64 highest_mpc = 0, lowest_mpc = U64_MAX;
+
+	pre_big_task_count_change(cpu_possible_mask);
+
+	for_each_sched_cluster(cluster) {
+		u64 mpc;
+
+		cluster->capacity = compute_capacity(cluster);
+		mpc = cluster->max_possible_capacity =
+			compute_max_possible_capacity(cluster);
+		cluster->load_scale_factor = compute_load_scale_factor(cluster);
+
+		cluster->exec_scale_factor =
+			DIV_ROUND_UP(cluster->efficiency * 1024,
+				     max_possible_efficiency);
+
+		if (mpc > highest_mpc)
+			highest_mpc = mpc;
+
+		if (mpc < lowest_mpc)
+			lowest_mpc = mpc;
+	}
+
+	max_possible_capacity = highest_mpc;
+	min_max_possible_capacity = lowest_mpc;
+
+	__update_min_max_capacity();
+	sched_update_freq_max_load(cpu_possible_mask);
+	post_big_task_count_change(cpu_possible_mask);
+}
+
+static void assign_cluster_ids(struct list_head *head)
+{
+	struct sched_cluster *cluster;
+	int pos = 0;
+
+	list_for_each_entry(cluster, head, list) {
+		cluster->id = pos;
+		sched_cluster[pos++] = cluster;
+	}
+}
+
+static void
+move_list(struct list_head *dst, struct list_head *src, bool sync_rcu)
+{
+	struct list_head *first, *last;
+
+	first = src->next;
+	last = src->prev;
+
+	if (sync_rcu) {
+		INIT_LIST_HEAD_RCU(src);
+		synchronize_rcu();
+	}
+
+	first->prev = dst;
+	dst->prev = last;
+	last->next = dst;
+
+	/* Ensure list sanity before making the head visible to all CPUs. */
+	smp_mb();
+	dst->next = first;
+}
+
+static int
+compare_clusters(void *priv, struct list_head *a, struct list_head *b)
+{
+	struct sched_cluster *cluster1, *cluster2;
+	int ret;
+
+	cluster1 = container_of(a, struct sched_cluster, list);
+	cluster2 = container_of(b, struct sched_cluster, list);
+
+	/*
+	 * Don't assume higher capacity means higher power. If the
+	 * power cost is same, sort the higher capacity cluster before
+	 * the lower capacity cluster to start placing the tasks
+	 * on the higher capacity cluster.
+	 */
+	ret = cluster1->max_power_cost > cluster2->max_power_cost ||
+		(cluster1->max_power_cost == cluster2->max_power_cost &&
+		cluster1->max_possible_capacity <
+				cluster2->max_possible_capacity);
+
+	return ret;
+}
+
+static void sort_clusters(void)
+{
+	struct sched_cluster *cluster;
+	struct list_head new_head;
+	unsigned int tmp_max = 1;
+
+	INIT_LIST_HEAD(&new_head);
+
+	for_each_sched_cluster(cluster) {
+		cluster->max_power_cost = power_cost(cluster_first_cpu(cluster),
+							       max_task_load());
+		cluster->min_power_cost = power_cost(cluster_first_cpu(cluster),
+							       0);
+
+		if (cluster->max_power_cost > tmp_max)
+			tmp_max = cluster->max_power_cost;
+	}
+	max_power_cost = tmp_max;
+
+	move_list(&new_head, &cluster_head, true);
+
+	list_sort(NULL, &new_head, compare_clusters);
+	assign_cluster_ids(&new_head);
+
+	/*
+	 * Ensure cluster ids are visible to all CPUs before making
+	 * cluster_head visible.
+	 */
+	move_list(&cluster_head, &new_head, false);
+}
+
+static void
+insert_cluster(struct sched_cluster *cluster, struct list_head *head)
+{
+	struct sched_cluster *tmp;
+	struct list_head *iter = head;
+
+	list_for_each_entry(tmp, head, list) {
+		if (cluster->max_power_cost < tmp->max_power_cost)
+			break;
+		iter = &tmp->list;
+	}
+
+	list_add(&cluster->list, iter);
+}
+
+static struct sched_cluster *alloc_new_cluster(const struct cpumask *cpus)
+{
+	struct sched_cluster *cluster = NULL;
+
+	cluster = kzalloc(sizeof(struct sched_cluster), GFP_ATOMIC);
+	if (!cluster) {
+		__WARN_printf("Cluster allocation failed. \
+				Possible bad scheduling\n");
+		return NULL;
+	}
+
+	INIT_LIST_HEAD(&cluster->list);
+	cluster->max_power_cost		=	1;
+	cluster->min_power_cost		=	1;
+	cluster->capacity		=	1024;
+	cluster->max_possible_capacity	=	1024;
+	cluster->efficiency		=	1;
+	cluster->load_scale_factor	=	1024;
+	cluster->cur_freq		=	1;
+	cluster->max_freq		=	1;
+	cluster->max_mitigated_freq	=	UINT_MAX;
+	cluster->min_freq		=	1;
+	cluster->max_possible_freq	=	1;
+	cluster->dstate			=	0;
+	cluster->dstate_wakeup_energy	=	0;
+	cluster->dstate_wakeup_latency	=	0;
+	cluster->freq_init_done		=	false;
+
+	raw_spin_lock_init(&cluster->load_lock);
+	cluster->cpus = *cpus;
+	cluster->efficiency = arch_get_cpu_efficiency(cpumask_first(cpus));
+
+	if (cluster->efficiency > max_possible_efficiency)
+		max_possible_efficiency = cluster->efficiency;
+	if (cluster->efficiency < min_possible_efficiency)
+		min_possible_efficiency = cluster->efficiency;
+
+	cluster->notifier_sent = 0;
+	return cluster;
+}
+
+static void add_cluster(const struct cpumask *cpus, struct list_head *head)
+{
+	struct sched_cluster *cluster = alloc_new_cluster(cpus);
+	int i;
+
+	if (!cluster)
+		return;
+
+	for_each_cpu(i, cpus)
+		cpu_rq(i)->cluster = cluster;
+
+	insert_cluster(cluster, head);
+	set_bit(num_clusters, all_cluster_ids);
+	num_clusters++;
+}
+
+void update_cluster_topology(void)
+{
+	struct cpumask cpus = *cpu_possible_mask;
+	const struct cpumask *cluster_cpus;
+	struct list_head new_head;
+	int i;
+
+	INIT_LIST_HEAD(&new_head);
+
+	for_each_cpu(i, &cpus) {
+		cluster_cpus = cpu_coregroup_mask(i);
+		cpumask_or(&all_cluster_cpus, &all_cluster_cpus, cluster_cpus);
+		cpumask_andnot(&cpus, &cpus, cluster_cpus);
+		add_cluster(cluster_cpus, &new_head);
+	}
+
+	assign_cluster_ids(&new_head);
+
+	/*
+	 * Ensure cluster ids are visible to all CPUs before making
+	 * cluster_head visible.
+	 */
+	move_list(&cluster_head, &new_head, false);
+	update_all_clusters_stats();
+}
+
+void init_clusters(void)
+{
+	bitmap_clear(all_cluster_ids, 0, NR_CPUS);
+	init_cluster.cpus = *cpu_possible_mask;
+	raw_spin_lock_init(&init_cluster.load_lock);
+	INIT_LIST_HEAD(&cluster_head);
+}
+
+int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
+{
+	mutex_lock(&cluster_lock);
+	if (!cb->get_cpu_cycle_counter) {
+		mutex_unlock(&cluster_lock);
+		return -EINVAL;
+	}
+
+	cpu_cycle_counter_cb = *cb;
+	use_cycle_counter = true;
+	mutex_unlock(&cluster_lock);
+
+	return 0;
+}
+
+/* Clear any HMP scheduler related requests pending from or on cpu */
+void clear_hmp_request(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+	unsigned long flags;
+
+	clear_boost_kick(cpu);
+	clear_reserved(cpu);
+	if (rq->push_task) {
+		struct task_struct *push_task = NULL;
+
+		raw_spin_lock_irqsave(&rq->lock, flags);
+		if (rq->push_task) {
+			clear_reserved(rq->push_cpu);
+			push_task = rq->push_task;
+			rq->push_task = NULL;
+		}
+		rq->active_balance = 0;
+		raw_spin_unlock_irqrestore(&rq->lock, flags);
+		if (push_task)
+			put_task_struct(push_task);
+	}
+}
+
+int sched_set_static_cpu_pwr_cost(int cpu, unsigned int cost)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	rq->static_cpu_pwr_cost = cost;
+	return 0;
+}
+
+unsigned int sched_get_static_cpu_pwr_cost(int cpu)
+{
+	return cpu_rq(cpu)->static_cpu_pwr_cost;
+}
+
+int sched_set_static_cluster_pwr_cost(int cpu, unsigned int cost)
+{
+	struct sched_cluster *cluster = cpu_rq(cpu)->cluster;
+
+	cluster->static_cluster_pwr_cost = cost;
+	return 0;
+}
+
+unsigned int sched_get_static_cluster_pwr_cost(int cpu)
+{
+	return cpu_rq(cpu)->cluster->static_cluster_pwr_cost;
+}
+
+int sched_set_cluster_wake_idle(int cpu, unsigned int wake_idle)
+{
+	struct sched_cluster *cluster = cpu_rq(cpu)->cluster;
+
+	cluster->wake_up_idle = !!wake_idle;
+	return 0;
+}
+
+unsigned int sched_get_cluster_wake_idle(int cpu)
+{
+	return cpu_rq(cpu)->cluster->wake_up_idle;
+}
+
+/*
+ * sched_window_stats_policy and sched_ravg_hist_size have a 'sysctl' copy
+ * associated with them. This is required for atomic update of those variables
+ * when being modifed via sysctl interface.
+ *
+ * IMPORTANT: Initialize both copies to same value!!
+ */
+
+/*
+ * Tasks that are runnable continuously for a period greather than
+ * EARLY_DETECTION_DURATION can be flagged early as potential
+ * high load tasks.
+ */
+#define EARLY_DETECTION_DURATION 9500000
+
+static __read_mostly unsigned int sched_ravg_hist_size = 5;
+__read_mostly unsigned int sysctl_sched_ravg_hist_size = 5;
+
+static __read_mostly unsigned int sched_window_stats_policy =
+	 WINDOW_STATS_MAX_RECENT_AVG;
+__read_mostly unsigned int sysctl_sched_window_stats_policy =
+	WINDOW_STATS_MAX_RECENT_AVG;
+
+#define SCHED_ACCOUNT_WAIT_TIME 1
+
+__read_mostly unsigned int sysctl_sched_cpu_high_irqload = (10 * NSEC_PER_MSEC);
+
+/*
+ * Enable colocation and frequency aggregation for all threads in a process.
+ * The children inherits the group id from the parent.
+ */
+unsigned int __read_mostly sysctl_sched_enable_thread_grouping;
+
+
+#define SCHED_NEW_TASK_WINDOWS 5
+
+#define SCHED_FREQ_ACCOUNT_WAIT_TIME 0
+
+/*
+ * This governs what load needs to be used when reporting CPU busy time
+ * to the cpufreq governor.
+ */
+__read_mostly unsigned int sysctl_sched_freq_reporting_policy;
+
+/*
+ * For increase, send notification if
+ *      freq_required - cur_freq > sysctl_sched_freq_inc_notify
+ */
+__read_mostly int sysctl_sched_freq_inc_notify = 10 * 1024 * 1024; /* + 10GHz */
+
+/*
+ * For decrease, send notification if
+ *      cur_freq - freq_required > sysctl_sched_freq_dec_notify
+ */
+__read_mostly int sysctl_sched_freq_dec_notify = 10 * 1024 * 1024; /* - 10GHz */
+
+static __read_mostly unsigned int sched_io_is_busy;
+
+__read_mostly unsigned int sysctl_sched_pred_alert_freq = 10 * 1024 * 1024;
+
+/*
+ * Maximum possible frequency across all cpus. Task demand and cpu
+ * capacity (cpu_power) metrics are scaled in reference to it.
+ */
+unsigned int max_possible_freq = 1;
+
+/*
+ * Minimum possible max_freq across all cpus. This will be same as
+ * max_possible_freq on homogeneous systems and could be different from
+ * max_possible_freq on heterogenous systems. min_max_freq is used to derive
+ * capacity (cpu_power) of cpus.
+ */
+unsigned int min_max_freq = 1;
+
+unsigned int max_capacity = 1024; /* max(rq->capacity) */
+unsigned int min_capacity = 1024; /* min(rq->capacity) */
+unsigned int max_possible_capacity = 1024; /* max(rq->max_possible_capacity) */
+unsigned int
+min_max_possible_capacity = 1024; /* min(rq->max_possible_capacity) */
+
+/* Min window size (in ns) = 10ms */
+#define MIN_SCHED_RAVG_WINDOW 10000000
+
+/* Max window size (in ns) = 1s */
+#define MAX_SCHED_RAVG_WINDOW 1000000000
+
+/* Window size (in ns) */
+__read_mostly unsigned int sched_ravg_window = MIN_SCHED_RAVG_WINDOW;
+
+/* Maximum allowed threshold before freq aggregation must be enabled */
+#define MAX_FREQ_AGGR_THRESH 1000
+
+/* Temporarily disable window-stats activity on all cpus */
+unsigned int __read_mostly sched_disable_window_stats;
+
+struct related_thread_group *related_thread_groups[MAX_NUM_CGROUP_COLOC_ID];
+static LIST_HEAD(active_related_thread_groups);
+static DEFINE_RWLOCK(related_thread_group_lock);
+
+#define for_each_related_thread_group(grp) \
+	list_for_each_entry(grp, &active_related_thread_groups, list)
+
+/*
+ * Task load is categorized into buckets for the purpose of top task tracking.
+ * The entire range of load from 0 to sched_ravg_window needs to be covered
+ * in NUM_LOAD_INDICES number of buckets. Therefore the size of each bucket
+ * is given by sched_ravg_window / NUM_LOAD_INDICES. Since the default value
+ * of sched_ravg_window is MIN_SCHED_RAVG_WINDOW, use that to compute
+ * sched_load_granule.
+ */
+__read_mostly unsigned int sched_load_granule =
+			MIN_SCHED_RAVG_WINDOW / NUM_LOAD_INDICES;
+
+/* Size of bitmaps maintained to track top tasks */
+static const unsigned int top_tasks_bitmap_size =
+		BITS_TO_LONGS(NUM_LOAD_INDICES + 1) * sizeof(unsigned long);
+
+/*
+ * Demand aggregation for frequency purpose:
+ *
+ * 'sched_freq_aggregate' controls aggregation of cpu demand of related threads
+ * for frequency determination purpose. This aggregation is done per-cluster.
+ *
+ * CPU demand of tasks from various related groups is aggregated per-cluster and
+ * added to the "max_busy_cpu" in that cluster, where max_busy_cpu is determined
+ * by just rq->prev_runnable_sum.
+ *
+ * Some examples follow, which assume:
+ *	Cluster0 = CPU0-3, Cluster1 = CPU4-7
+ *	One related thread group A that has tasks A0, A1, A2
+ *
+ *	A->cpu_time[X].curr/prev_sum = counters in which cpu execution stats of
+ *	tasks belonging to group A are accumulated when they run on cpu X.
+ *
+ *	CX->curr/prev_sum = counters in which cpu execution stats of all tasks
+ *	not belonging to group A are accumulated when they run on cpu X
+ *
+ * Lets say the stats for window M was as below:
+ *
+ *	C0->prev_sum = 1ms, A->cpu_time[0].prev_sum = 5ms
+ *		Task A0 ran 5ms on CPU0
+ *		Task B0 ran 1ms on CPU0
+ *
+ *	C1->prev_sum = 5ms, A->cpu_time[1].prev_sum = 6ms
+ *		Task A1 ran 4ms on CPU1
+ *		Task A2 ran 2ms on CPU1
+ *		Task B1 ran 5ms on CPU1
+ *
+ *	C2->prev_sum = 0ms, A->cpu_time[2].prev_sum = 0
+ *		CPU2 idle
+ *
+ *	C3->prev_sum = 0ms, A->cpu_time[3].prev_sum = 0
+ *		CPU3 idle
+ *
+ * In this case, CPU1 was most busy going by just its prev_sum counter. Demand
+ * from all group A tasks are added to CPU1. IOW, at end of window M, cpu busy
+ * time reported to governor will be:
+ *
+ *
+ *	C0 busy time = 1ms
+ *	C1 busy time = 5 + 5 + 6 = 16ms
+ *
+ */
+static __read_mostly unsigned int sched_freq_aggregate = 1;
+__read_mostly unsigned int sysctl_sched_freq_aggregate = 1;
+
+unsigned int __read_mostly sysctl_sched_freq_aggregate_threshold_pct;
+static unsigned int __read_mostly sched_freq_aggregate_threshold;
+
+/* Initial task load. Newly created tasks are assigned this load. */
+unsigned int __read_mostly sched_init_task_load_windows;
+unsigned int __read_mostly sysctl_sched_init_task_load_pct = 15;
+
+unsigned int max_task_load(void)
+{
+	return sched_ravg_window;
+}
+
+/* A cpu can no longer accommodate more tasks if:
+ *
+ *	rq->nr_running > sysctl_sched_spill_nr_run ||
+ *	rq->hmp_stats.cumulative_runnable_avg > sched_spill_load
+ */
+unsigned int __read_mostly sysctl_sched_spill_nr_run = 10;
+
+/*
+ * Place sync wakee tasks those have less than configured demand to the waker's
+ * cluster.
+ */
+unsigned int __read_mostly sched_small_wakee_task_load;
+unsigned int __read_mostly sysctl_sched_small_wakee_task_load_pct = 10;
+
+unsigned int __read_mostly sched_big_waker_task_load;
+unsigned int __read_mostly sysctl_sched_big_waker_task_load_pct = 25;
+
+/*
+ * CPUs with load greater than the sched_spill_load_threshold are not
+ * eligible for task placement. When all CPUs in a cluster achieve a
+ * load higher than this level, tasks becomes eligible for inter
+ * cluster migration.
+ */
+unsigned int __read_mostly sched_spill_load;
+unsigned int __read_mostly sysctl_sched_spill_load_pct = 100;
+
+/*
+ * Prefer the waker CPU for sync wakee task, if the CPU has only 1 runnable
+ * task. This eliminates the LPM exit latency associated with the idle
+ * CPUs in the waker cluster.
+ */
+unsigned int __read_mostly sysctl_sched_prefer_sync_wakee_to_waker;
+
+/*
+ * Tasks whose bandwidth consumption on a cpu is more than
+ * sched_upmigrate are considered "big" tasks. Big tasks will be
+ * considered for "up" migration, i.e migrating to a cpu with better
+ * capacity.
+ */
+unsigned int __read_mostly sched_upmigrate;
+unsigned int __read_mostly sysctl_sched_upmigrate_pct = 80;
+
+/*
+ * Big tasks, once migrated, will need to drop their bandwidth
+ * consumption to less than sched_downmigrate before they are "down"
+ * migrated.
+ */
+unsigned int __read_mostly sched_downmigrate;
+unsigned int __read_mostly sysctl_sched_downmigrate_pct = 60;
+
+/*
+ * Task groups whose aggregate demand on a cpu is more than
+ * sched_group_upmigrate need to be up-migrated if possible.
+ */
+unsigned int __read_mostly sched_group_upmigrate;
+unsigned int __read_mostly sysctl_sched_group_upmigrate_pct = 100;
+
+/*
+ * Task groups, once up-migrated, will need to drop their aggregate
+ * demand to less than sched_group_downmigrate before they are "down"
+ * migrated.
+ */
+unsigned int __read_mostly sched_group_downmigrate;
+unsigned int __read_mostly sysctl_sched_group_downmigrate_pct = 95;
+
+/*
+ * The load scale factor of a CPU gets boosted when its max frequency
+ * is restricted due to which the tasks are migrating to higher capacity
+ * CPUs early. The sched_upmigrate threshold is auto-upgraded by
+ * rq->max_possible_freq/rq->max_freq of a lower capacity CPU.
+ */
+unsigned int up_down_migrate_scale_factor = 1024;
+
+/*
+ * Scheduler selects and places task to its previous CPU if sleep time is
+ * less than sysctl_sched_select_prev_cpu_us.
+ */
+unsigned int __read_mostly
+sched_short_sleep_task_threshold = 2000 * NSEC_PER_USEC;
+
+unsigned int __read_mostly sysctl_sched_select_prev_cpu_us = 2000;
+
+unsigned int __read_mostly
+sched_long_cpu_selection_threshold = 100 * NSEC_PER_MSEC;
+
+unsigned int __read_mostly sysctl_sched_restrict_cluster_spill;
+
+/*
+ * Scheduler tries to avoid waking up idle CPUs for tasks running
+ * in short bursts. If the task average burst is less than
+ * sysctl_sched_short_burst nanoseconds and it sleeps on an average
+ * for more than sysctl_sched_short_sleep nanoseconds, then the
+ * task is eligible for packing.
+ */
+unsigned int __read_mostly sysctl_sched_short_burst;
+unsigned int __read_mostly sysctl_sched_short_sleep = 1 * NSEC_PER_MSEC;
+
+static void _update_up_down_migrate(unsigned int *up_migrate,
+			unsigned int *down_migrate, bool is_group)
+{
+	unsigned int delta;
+
+	if (up_down_migrate_scale_factor == 1024)
+		return;
+
+	delta = *up_migrate - *down_migrate;
+
+	*up_migrate /= NSEC_PER_USEC;
+	*up_migrate *= up_down_migrate_scale_factor;
+	*up_migrate >>= 10;
+	*up_migrate *= NSEC_PER_USEC;
+
+	if (!is_group)
+		*up_migrate = min(*up_migrate, sched_ravg_window);
+
+	*down_migrate /= NSEC_PER_USEC;
+	*down_migrate *= up_down_migrate_scale_factor;
+	*down_migrate >>= 10;
+	*down_migrate *= NSEC_PER_USEC;
+
+	*down_migrate = min(*down_migrate, *up_migrate - delta);
+}
+
+static void update_up_down_migrate(void)
+{
+	unsigned int up_migrate = pct_to_real(sysctl_sched_upmigrate_pct);
+	unsigned int down_migrate = pct_to_real(sysctl_sched_downmigrate_pct);
+
+	_update_up_down_migrate(&up_migrate, &down_migrate, false);
+	sched_upmigrate = up_migrate;
+	sched_downmigrate = down_migrate;
+
+	up_migrate = pct_to_real(sysctl_sched_group_upmigrate_pct);
+	down_migrate = pct_to_real(sysctl_sched_group_downmigrate_pct);
+
+	_update_up_down_migrate(&up_migrate, &down_migrate, true);
+	sched_group_upmigrate = up_migrate;
+	sched_group_downmigrate = down_migrate;
+}
+
+void set_hmp_defaults(void)
+{
+	sched_spill_load =
+		pct_to_real(sysctl_sched_spill_load_pct);
+
+	update_up_down_migrate();
+
+	sched_init_task_load_windows =
+		div64_u64((u64)sysctl_sched_init_task_load_pct *
+			  (u64)sched_ravg_window, 100);
+
+	sched_short_sleep_task_threshold = sysctl_sched_select_prev_cpu_us *
+					   NSEC_PER_USEC;
+
+	sched_small_wakee_task_load =
+		div64_u64((u64)sysctl_sched_small_wakee_task_load_pct *
+			  (u64)sched_ravg_window, 100);
+
+	sched_big_waker_task_load =
+		div64_u64((u64)sysctl_sched_big_waker_task_load_pct *
+			  (u64)sched_ravg_window, 100);
+
+	sched_freq_aggregate_threshold =
+		pct_to_real(sysctl_sched_freq_aggregate_threshold_pct);
+}
+
+u32 sched_get_init_task_load(struct task_struct *p)
+{
+	return p->init_load_pct;
+}
+
+int sched_set_init_task_load(struct task_struct *p, int init_load_pct)
+{
+	if (init_load_pct < 0 || init_load_pct > 100)
+		return -EINVAL;
+
+	p->init_load_pct = init_load_pct;
+
+	return 0;
+}
+
+#ifdef CONFIG_CGROUP_SCHED
+
+int upmigrate_discouraged(struct task_struct *p)
+{
+	return task_group(p)->upmigrate_discouraged;
+}
+
+#else
+
+static inline int upmigrate_discouraged(struct task_struct *p)
+{
+	return 0;
+}
+
+#endif
+
+/* Is a task "big" on its current cpu */
+static inline int __is_big_task(struct task_struct *p, u64 scaled_load)
+{
+	int nice = task_nice(p);
+
+	if (nice > SCHED_UPMIGRATE_MIN_NICE || upmigrate_discouraged(p))
+		return 0;
+
+	return scaled_load > sched_upmigrate;
+}
+
+int is_big_task(struct task_struct *p)
+{
+	return __is_big_task(p, scale_load_to_cpu(task_load(p), task_cpu(p)));
+}
+
+u64 cpu_load(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	return scale_load_to_cpu(rq->hmp_stats.cumulative_runnable_avg, cpu);
+}
+
+u64 cpu_load_sync(int cpu, int sync)
+{
+	return scale_load_to_cpu(cpu_cravg_sync(cpu, sync), cpu);
+}
+
+/*
+ * Task will fit on a cpu if it's bandwidth consumption on that cpu
+ * will be less than sched_upmigrate. A big task that was previously
+ * "up" migrated will be considered fitting on "little" cpu if its
+ * bandwidth consumption on "little" cpu will be less than
+ * sched_downmigrate. This will help avoid frequenty migrations for
+ * tasks with load close to the upmigrate threshold
+ */
+int task_load_will_fit(struct task_struct *p, u64 task_load, int cpu,
+			      enum sched_boost_policy boost_policy)
+{
+	int upmigrate = sched_upmigrate;
+
+	if (cpu_capacity(cpu) == max_capacity)
+		return 1;
+
+	if (cpu_capacity(task_cpu(p)) > cpu_capacity(cpu))
+		upmigrate = sched_downmigrate;
+
+	if (boost_policy != SCHED_BOOST_ON_BIG) {
+		if (task_nice(p) > SCHED_UPMIGRATE_MIN_NICE ||
+		    upmigrate_discouraged(p))
+			return 1;
+
+		if (task_load < upmigrate)
+			return 1;
+	} else {
+		if (task_sched_boost(p) || task_load >= upmigrate)
+			return 0;
+
+		return 1;
+	}
+
+	return 0;
+}
+
+int task_will_fit(struct task_struct *p, int cpu)
+{
+	u64 tload = scale_load_to_cpu(task_load(p), cpu);
+
+	return task_load_will_fit(p, tload, cpu, sched_boost_policy());
+}
+
+static int
+group_will_fit(struct sched_cluster *cluster, struct related_thread_group *grp,
+						u64 demand, bool group_boost)
+{
+	int cpu = cluster_first_cpu(cluster);
+	int prev_capacity = 0;
+	unsigned int threshold = sched_group_upmigrate;
+	u64 load;
+
+	if (cluster->capacity == max_capacity)
+		return 1;
+
+	if (group_boost)
+		return 0;
+
+	if (!demand)
+		return 1;
+
+	if (grp->preferred_cluster)
+		prev_capacity = grp->preferred_cluster->capacity;
+
+	if (cluster->capacity < prev_capacity)
+		threshold = sched_group_downmigrate;
+
+	load = scale_load_to_cpu(demand, cpu);
+	if (load < threshold)
+		return 1;
+
+	return 0;
+}
+
+/*
+ * Return the cost of running task p on CPU cpu. This function
+ * currently assumes that task p is the only task which will run on
+ * the CPU.
+ */
+unsigned int power_cost(int cpu, u64 demand)
+{
+	int first, mid, last;
+	struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats();
+	struct cpu_pstate_pwr *costs;
+	struct freq_max_load *max_load;
+	int total_static_pwr_cost = 0;
+	struct rq *rq = cpu_rq(cpu);
+	unsigned int pc;
+
+	if (!per_cpu_info || !per_cpu_info[cpu].ptable)
+		/*
+		 * When power aware scheduling is not in use, or CPU
+		 * power data is not available, just use the CPU
+		 * capacity as a rough stand-in for real CPU power
+		 * numbers, assuming bigger CPUs are more power
+		 * hungry.
+		 */
+		return cpu_max_possible_capacity(cpu);
+
+	rcu_read_lock();
+	max_load = rcu_dereference(per_cpu(freq_max_load, cpu));
+	if (!max_load) {
+		pc = cpu_max_possible_capacity(cpu);
+		goto unlock;
+	}
+
+	costs = per_cpu_info[cpu].ptable;
+
+	if (demand <= max_load->freqs[0].hdemand) {
+		pc = costs[0].power;
+		goto unlock;
+	} else if (demand > max_load->freqs[max_load->length - 1].hdemand) {
+		pc = costs[max_load->length - 1].power;
+		goto unlock;
+	}
+
+	first = 0;
+	last = max_load->length - 1;
+	mid = (last - first) >> 1;
+	while (1) {
+		if (demand <= max_load->freqs[mid].hdemand)
+			last = mid;
+		else
+			first = mid;
+
+		if (last - first == 1)
+			break;
+		mid = first + ((last - first) >> 1);
+	}
+
+	pc = costs[last].power;
+
+unlock:
+	rcu_read_unlock();
+
+	if (idle_cpu(cpu) && rq->cstate) {
+		total_static_pwr_cost += rq->static_cpu_pwr_cost;
+		if (rq->cluster->dstate)
+			total_static_pwr_cost +=
+				rq->cluster->static_cluster_pwr_cost;
+	}
+
+	return pc + total_static_pwr_cost;
+
+}
+
+void inc_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
+{
+	if (sched_disable_window_stats)
+		return;
+
+	if (is_big_task(p))
+		stats->nr_big_tasks++;
+}
+
+void dec_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
+{
+	if (sched_disable_window_stats)
+		return;
+
+	if (is_big_task(p))
+		stats->nr_big_tasks--;
+
+	BUG_ON(stats->nr_big_tasks < 0);
+}
+
+void inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
+{
+	inc_nr_big_task(&rq->hmp_stats, p);
+	if (change_cra)
+		inc_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+void dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
+{
+	dec_nr_big_task(&rq->hmp_stats, p);
+	if (change_cra)
+		dec_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra)
+{
+	stats->nr_big_tasks = 0;
+	if (reset_cra) {
+		stats->cumulative_runnable_avg = 0;
+		stats->pred_demands_sum = 0;
+	}
+}
+
+int preferred_cluster(struct sched_cluster *cluster, struct task_struct *p)
+{
+	struct related_thread_group *grp;
+	int rc = 1;
+
+	rcu_read_lock();
+
+	grp = task_related_thread_group(p);
+	if (grp)
+		rc = (grp->preferred_cluster == cluster);
+
+	rcu_read_unlock();
+	return rc;
+}
+
+struct sched_cluster *rq_cluster(struct rq *rq)
+{
+	return rq->cluster;
+}
+
+/*
+ * reset_cpu_hmp_stats - reset HMP stats for a cpu
+ *	nr_big_tasks
+ *	cumulative_runnable_avg (iff reset_cra is true)
+ */
+void reset_cpu_hmp_stats(int cpu, int reset_cra)
+{
+	reset_cfs_rq_hmp_stats(cpu, reset_cra);
+	reset_hmp_stats(&cpu_rq(cpu)->hmp_stats, reset_cra);
+}
+
+void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
+				struct task_struct *p, s64 delta)
+{
+	u64 new_task_load;
+	u64 old_task_load;
+
+	if (sched_disable_window_stats)
+		return;
+
+	old_task_load = scale_load_to_cpu(task_load(p), task_cpu(p));
+	new_task_load = scale_load_to_cpu(delta + task_load(p), task_cpu(p));
+
+	if (__is_big_task(p, old_task_load) && !__is_big_task(p, new_task_load))
+		stats->nr_big_tasks--;
+	else if (!__is_big_task(p, old_task_load) &&
+		 __is_big_task(p, new_task_load))
+		stats->nr_big_tasks++;
+
+	BUG_ON(stats->nr_big_tasks < 0);
+}
+
+/*
+ * Walk runqueue of cpu and re-initialize 'nr_big_tasks' counters.
+ */
+static void update_nr_big_tasks(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+	struct task_struct *p;
+
+	/* Do not reset cumulative_runnable_avg */
+	reset_cpu_hmp_stats(cpu, 0);
+
+	list_for_each_entry(p, &rq->cfs_tasks, se.group_node)
+		_inc_hmp_sched_stats_fair(rq, p, 0);
+}
+
+/* Disable interrupts and grab runqueue lock of all cpus listed in @cpus */
+void pre_big_task_count_change(const struct cpumask *cpus)
+{
+	int i;
+
+	local_irq_disable();
+
+	for_each_cpu(i, cpus)
+		raw_spin_lock(&cpu_rq(i)->lock);
+}
+
+/*
+ * Reinitialize 'nr_big_tasks' counters on all affected cpus
+ */
+void post_big_task_count_change(const struct cpumask *cpus)
+{
+	int i;
+
+	/* Assumes local_irq_disable() keeps online cpumap stable */
+	for_each_cpu(i, cpus)
+		update_nr_big_tasks(i);
+
+	for_each_cpu(i, cpus)
+		raw_spin_unlock(&cpu_rq(i)->lock);
+
+	local_irq_enable();
+}
+
+DEFINE_MUTEX(policy_mutex);
+
+unsigned int update_freq_aggregate_threshold(unsigned int threshold)
+{
+	unsigned int old_threshold;
+
+	mutex_lock(&policy_mutex);
+
+	old_threshold = sysctl_sched_freq_aggregate_threshold_pct;
+
+	sysctl_sched_freq_aggregate_threshold_pct = threshold;
+	sched_freq_aggregate_threshold =
+		pct_to_real(sysctl_sched_freq_aggregate_threshold_pct);
+
+	mutex_unlock(&policy_mutex);
+
+	return old_threshold;
+}
+
+static inline int invalid_value_freq_input(unsigned int *data)
+{
+	if (data == &sysctl_sched_freq_aggregate)
+		return !(*data == 0 || *data == 1);
+
+	return 0;
+}
+
+static inline int invalid_value(unsigned int *data)
+{
+	unsigned int val = *data;
+
+	if (data == &sysctl_sched_ravg_hist_size)
+		return (val < 2 || val > RAVG_HIST_SIZE_MAX);
+
+	if (data == &sysctl_sched_window_stats_policy)
+		return val >= WINDOW_STATS_INVALID_POLICY;
+
+	return invalid_value_freq_input(data);
+}
+
+/*
+ * Handle "atomic" update of sysctl_sched_window_stats_policy,
+ * sysctl_sched_ravg_hist_size variables.
+ */
+int sched_window_update_handler(struct ctl_table *table, int write,
+		void __user *buffer, size_t *lenp,
+		loff_t *ppos)
+{
+	int ret;
+	unsigned int *data = (unsigned int *)table->data;
+	unsigned int old_val;
+
+	mutex_lock(&policy_mutex);
+
+	old_val = *data;
+
+	ret = proc_dointvec(table, write, buffer, lenp, ppos);
+	if (ret || !write || (write && (old_val == *data)))
+		goto done;
+
+	if (invalid_value(data)) {
+		*data = old_val;
+		ret = -EINVAL;
+		goto done;
+	}
+
+	reset_all_window_stats(0, 0);
+
+done:
+	mutex_unlock(&policy_mutex);
+
+	return ret;
+}
+
+/*
+ * Convert percentage value into absolute form. This will avoid div() operation
+ * in fast path, to convert task load in percentage scale.
+ */
+int sched_hmp_proc_update_handler(struct ctl_table *table, int write,
+		void __user *buffer, size_t *lenp,
+		loff_t *ppos)
+{
+	int ret;
+	unsigned int old_val;
+	unsigned int *data = (unsigned int *)table->data;
+	int update_task_count = 0;
+
+	/*
+	 * The policy mutex is acquired with cpu_hotplug.lock
+	 * held from cpu_up()->cpufreq_governor_interactive()->
+	 * sched_set_window(). So enforce the same order here.
+	 */
+	if (write && (data == &sysctl_sched_upmigrate_pct)) {
+		update_task_count = 1;
+		get_online_cpus();
+	}
+
+	mutex_lock(&policy_mutex);
+
+	old_val = *data;
+
+	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+	if (ret || !write)
+		goto done;
+
+	if (write && (old_val == *data))
+		goto done;
+
+	if (sysctl_sched_downmigrate_pct > sysctl_sched_upmigrate_pct ||
+				sysctl_sched_group_downmigrate_pct >
+				sysctl_sched_group_upmigrate_pct) {
+		*data = old_val;
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/*
+	 * Big task tunable change will need to re-classify tasks on
+	 * runqueue as big and set their counters appropriately.
+	 * sysctl interface affects secondary variables (*_pct), which is then
+	 * "atomically" carried over to the primary variables. Atomic change
+	 * includes taking runqueue lock of all online cpus and re-initiatizing
+	 * their big counter values based on changed criteria.
+	 */
+	if (update_task_count)
+		pre_big_task_count_change(cpu_online_mask);
+
+	set_hmp_defaults();
+
+	if (update_task_count)
+		post_big_task_count_change(cpu_online_mask);
+
+done:
+	mutex_unlock(&policy_mutex);
+	if (update_task_count)
+		put_online_cpus();
+	return ret;
+}
+
+inline int nr_big_tasks(struct rq *rq)
+{
+	return rq->hmp_stats.nr_big_tasks;
+}
+
+unsigned int cpu_temp(int cpu)
+{
+	struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats();
+
+	if (per_cpu_info)
+		return per_cpu_info[cpu].temp;
+	else
+		return 0;
+}
+
+/*
+ * kfree() may wakeup kswapd. So this function should NOT be called
+ * with any CPU's rq->lock acquired.
+ */
+void free_task_load_ptrs(struct task_struct *p)
+{
+	kfree(p->ravg.curr_window_cpu);
+	kfree(p->ravg.prev_window_cpu);
+
+	/*
+	 * update_task_ravg() can be called for exiting tasks. While the
+	 * function itself ensures correct behavior, the corresponding
+	 * trace event requires that these pointers be NULL.
+	 */
+	p->ravg.curr_window_cpu = NULL;
+	p->ravg.prev_window_cpu = NULL;
+}
+
+void init_new_task_load(struct task_struct *p, bool idle_task)
+{
+	int i;
+	u32 init_load_windows = sched_init_task_load_windows;
+	u32 init_load_pct = current->init_load_pct;
+
+	p->init_load_pct = 0;
+	rcu_assign_pointer(p->grp, NULL);
+	INIT_LIST_HEAD(&p->grp_list);
+	memset(&p->ravg, 0, sizeof(struct ravg));
+	p->cpu_cycles = 0;
+	p->ravg.curr_burst = 0;
+	/*
+	 * Initialize the avg_burst to twice the threshold, so that
+	 * a task would not be classified as short burst right away
+	 * after fork. It takes at least 6 sleep-wakeup cycles for
+	 * the avg_burst to go below the threshold.
+	 */
+	p->ravg.avg_burst = 2 * (u64)sysctl_sched_short_burst;
+	p->ravg.avg_sleep_time = 0;
+
+	p->ravg.curr_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_KERNEL);
+	p->ravg.prev_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_KERNEL);
+
+	/* Don't have much choice. CPU frequency would be bogus */
+	BUG_ON(!p->ravg.curr_window_cpu || !p->ravg.prev_window_cpu);
+
+	if (idle_task)
+		return;
+
+	if (init_load_pct)
+		init_load_windows = div64_u64((u64)init_load_pct *
+			  (u64)sched_ravg_window, 100);
+
+	p->ravg.demand = init_load_windows;
+	p->ravg.pred_demand = 0;
+	for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i)
+		p->ravg.sum_history[i] = init_load_windows;
+}
+
+/* Return task demand in percentage scale */
+unsigned int pct_task_load(struct task_struct *p)
+{
+	unsigned int load;
+
+	load = div64_u64((u64)task_load(p) * 100, (u64)max_task_load());
+
+	return load;
+}
+
+/*
+ * Return total number of tasks "eligible" to run on highest capacity cpu
+ *
+ * This is simply nr_big_tasks for cpus which are not of max_capacity and
+ * nr_running for cpus of max_capacity
+ */
+unsigned int nr_eligible_big_tasks(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+	int nr_big = rq->hmp_stats.nr_big_tasks;
+	int nr = rq->nr_running;
+
+	if (!is_max_capacity_cpu(cpu))
+		return nr_big;
+
+	return nr;
+}
+
+static inline int exiting_task(struct task_struct *p)
+{
+	return (p->ravg.sum_history[0] == EXITING_TASK_MARKER);
+}
+
+static int __init set_sched_ravg_window(char *str)
+{
+	unsigned int window_size;
+
+	get_option(&str, &window_size);
+
+	if (window_size < MIN_SCHED_RAVG_WINDOW ||
+			window_size > MAX_SCHED_RAVG_WINDOW) {
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	sched_ravg_window = window_size;
+	return 0;
+}
+
+early_param("sched_ravg_window", set_sched_ravg_window);
+
+static inline void
+update_window_start(struct rq *rq, u64 wallclock)
+{
+	s64 delta;
+	int nr_windows;
+
+	delta = wallclock - rq->window_start;
+	BUG_ON(delta < 0);
+	if (delta < sched_ravg_window)
+		return;
+
+	nr_windows = div64_u64(delta, sched_ravg_window);
+	rq->window_start += (u64)nr_windows * (u64)sched_ravg_window;
+}
+
+#define DIV64_U64_ROUNDUP(X, Y) div64_u64((X) + (Y - 1), Y)
+
+static inline u64 scale_exec_time(u64 delta, struct rq *rq)
+{
+	u32 freq;
+
+	freq = cpu_cycles_to_freq(rq->cc.cycles, rq->cc.time);
+	delta = DIV64_U64_ROUNDUP(delta * freq, max_possible_freq);
+	delta *= rq->cluster->exec_scale_factor;
+	delta >>= 10;
+
+	return delta;
+}
+
+static inline int cpu_is_waiting_on_io(struct rq *rq)
+{
+	if (!sched_io_is_busy)
+		return 0;
+
+	return atomic_read(&rq->nr_iowait);
+}
+
+/* Does freq_required sufficiently exceed or fall behind cur_freq? */
+static inline int
+nearly_same_freq(unsigned int cur_freq, unsigned int freq_required)
+{
+	int delta = freq_required - cur_freq;
+
+	if (freq_required > cur_freq)
+		return delta < sysctl_sched_freq_inc_notify;
+
+	delta = -delta;
+
+	return delta < sysctl_sched_freq_dec_notify;
+}
+
+/* Convert busy time to frequency equivalent */
+static inline unsigned int load_to_freq(struct rq *rq, u64 load)
+{
+	unsigned int freq;
+
+	load = scale_load_to_cpu(load, cpu_of(rq));
+	load *= 128;
+	load = div64_u64(load, max_task_load());
+
+	freq = load * cpu_max_possible_freq(cpu_of(rq));
+	freq /= 128;
+
+	return freq;
+}
+
+/*
+ * Return load from all related groups in given frequency domain.
+ */
+static void group_load_in_freq_domain(struct cpumask *cpus,
+				u64 *grp_load, u64 *new_grp_load)
+{
+	int j;
+
+	for_each_cpu(j, cpus) {
+		struct rq *rq = cpu_rq(j);
+
+		*grp_load += rq->grp_time.prev_runnable_sum;
+		*new_grp_load += rq->grp_time.nt_prev_runnable_sum;
+	}
+}
+
+static inline u64 freq_policy_load(struct rq *rq, u64 load);
+/*
+ * Should scheduler alert governor for changing frequency?
+ *
+ * @check_pred - evaluate frequency based on the predictive demand
+ * @check_groups - add load from all related groups on given cpu
+ *
+ * check_groups is set to 1 if a "related" task movement/wakeup is triggering
+ * the notification check. To avoid "re-aggregation" of demand in such cases,
+ * we check whether the migrated/woken tasks demand (along with demand from
+ * existing tasks on the cpu) can be met on target cpu
+ *
+ */
+
+static int send_notification(struct rq *rq, int check_pred, int check_groups)
+{
+	unsigned int cur_freq, freq_required;
+	unsigned long flags;
+	int rc = 0;
+	u64 group_load = 0, new_load  = 0;
+
+	if (check_pred) {
+		u64 prev = rq->old_busy_time;
+		u64 predicted = rq->hmp_stats.pred_demands_sum;
+
+		if (rq->cluster->cur_freq == cpu_max_freq(cpu_of(rq)))
+			return 0;
+
+		prev = max(prev, rq->old_estimated_time);
+		if (prev > predicted)
+			return 0;
+
+		cur_freq = load_to_freq(rq, prev);
+		freq_required = load_to_freq(rq, predicted);
+
+		if (freq_required < cur_freq + sysctl_sched_pred_alert_freq)
+			return 0;
+	} else {
+		/*
+		 * Protect from concurrent update of rq->prev_runnable_sum and
+		 * group cpu load
+		 */
+		raw_spin_lock_irqsave(&rq->lock, flags);
+		if (check_groups)
+			group_load = rq->grp_time.prev_runnable_sum;
+
+		new_load = rq->prev_runnable_sum + group_load;
+		new_load = freq_policy_load(rq, new_load);
+
+		raw_spin_unlock_irqrestore(&rq->lock, flags);
+
+		cur_freq = load_to_freq(rq, rq->old_busy_time);
+		freq_required = load_to_freq(rq, new_load);
+
+		if (nearly_same_freq(cur_freq, freq_required))
+			return 0;
+	}
+
+	raw_spin_lock_irqsave(&rq->lock, flags);
+	if (!rq->cluster->notifier_sent) {
+		rq->cluster->notifier_sent = 1;
+		rc = 1;
+		trace_sched_freq_alert(cpu_of(rq), check_pred, check_groups, rq,
+				       new_load);
+	}
+	raw_spin_unlock_irqrestore(&rq->lock, flags);
+
+	return rc;
+}
+
+/* Alert governor if there is a need to change frequency */
+void check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups)
+{
+	int cpu = cpu_of(rq);
+
+	if (!send_notification(rq, check_pred, check_groups))
+		return;
+
+	atomic_notifier_call_chain(
+		&load_alert_notifier_head, 0,
+		(void *)(long)cpu);
+}
+
+void notify_migration(int src_cpu, int dest_cpu, bool src_cpu_dead,
+			     struct task_struct *p)
+{
+	bool check_groups;
+
+	rcu_read_lock();
+	check_groups = task_in_related_thread_group(p);
+	rcu_read_unlock();
+
+	if (!same_freq_domain(src_cpu, dest_cpu)) {
+		if (!src_cpu_dead)
+			check_for_freq_change(cpu_rq(src_cpu), false,
+					      check_groups);
+		check_for_freq_change(cpu_rq(dest_cpu), false, check_groups);
+	} else {
+		check_for_freq_change(cpu_rq(dest_cpu), true, check_groups);
+	}
+}
+
+static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p,
+				     u64 irqtime, int event)
+{
+	if (is_idle_task(p)) {
+		/* TASK_WAKE && TASK_MIGRATE is not possible on idle task! */
+		if (event == PICK_NEXT_TASK)
+			return 0;
+
+		/* PUT_PREV_TASK, TASK_UPDATE && IRQ_UPDATE are left */
+		return irqtime || cpu_is_waiting_on_io(rq);
+	}
+
+	if (event == TASK_WAKE)
+		return 0;
+
+	if (event == PUT_PREV_TASK || event == IRQ_UPDATE)
+		return 1;
+
+	/*
+	 * TASK_UPDATE can be called on sleeping task, when its moved between
+	 * related groups
+	 */
+	if (event == TASK_UPDATE) {
+		if (rq->curr == p)
+			return 1;
+
+		return p->on_rq ? SCHED_FREQ_ACCOUNT_WAIT_TIME : 0;
+	}
+
+	/* TASK_MIGRATE, PICK_NEXT_TASK left */
+	return SCHED_FREQ_ACCOUNT_WAIT_TIME;
+}
+
+static inline bool is_new_task(struct task_struct *p)
+{
+	return p->ravg.active_windows < SCHED_NEW_TASK_WINDOWS;
+}
+
+#define INC_STEP 8
+#define DEC_STEP 2
+#define CONSISTENT_THRES 16
+#define INC_STEP_BIG 16
+/*
+ * bucket_increase - update the count of all buckets
+ *
+ * @buckets: array of buckets tracking busy time of a task
+ * @idx: the index of bucket to be incremented
+ *
+ * Each time a complete window finishes, count of bucket that runtime
+ * falls in (@idx) is incremented. Counts of all other buckets are
+ * decayed. The rate of increase and decay could be different based
+ * on current count in the bucket.
+ */
+static inline void bucket_increase(u8 *buckets, int idx)
+{
+	int i, step;
+
+	for (i = 0; i < NUM_BUSY_BUCKETS; i++) {
+		if (idx != i) {
+			if (buckets[i] > DEC_STEP)
+				buckets[i] -= DEC_STEP;
+			else
+				buckets[i] = 0;
+		} else {
+			step = buckets[i] >= CONSISTENT_THRES ?
+						INC_STEP_BIG : INC_STEP;
+			if (buckets[i] > U8_MAX - step)
+				buckets[i] = U8_MAX;
+			else
+				buckets[i] += step;
+		}
+	}
+}
+
+static inline int busy_to_bucket(u32 normalized_rt)
+{
+	int bidx;
+
+	bidx = mult_frac(normalized_rt, NUM_BUSY_BUCKETS, max_task_load());
+	bidx = min(bidx, NUM_BUSY_BUCKETS - 1);
+
+	/*
+	 * Combine lowest two buckets. The lowest frequency falls into
+	 * 2nd bucket and thus keep predicting lowest bucket is not
+	 * useful.
+	 */
+	if (!bidx)
+		bidx++;
+
+	return bidx;
+}
+
+static inline u64
+scale_load_to_freq(u64 load, unsigned int src_freq, unsigned int dst_freq)
+{
+	return div64_u64(load * (u64)src_freq, (u64)dst_freq);
+}
+
+/*
+ * get_pred_busy - calculate predicted demand for a task on runqueue
+ *
+ * @rq: runqueue of task p
+ * @p: task whose prediction is being updated
+ * @start: starting bucket. returned prediction should not be lower than
+ *         this bucket.
+ * @runtime: runtime of the task. returned prediction should not be lower
+ *           than this runtime.
+ * Note: @start can be derived from @runtime. It's passed in only to
+ * avoid duplicated calculation in some cases.
+ *
+ * A new predicted busy time is returned for task @p based on @runtime
+ * passed in. The function searches through buckets that represent busy
+ * time equal to or bigger than @runtime and attempts to find the bucket to
+ * to use for prediction. Once found, it searches through historical busy
+ * time and returns the latest that falls into the bucket. If no such busy
+ * time exists, it returns the medium of that bucket.
+ */
+static u32 get_pred_busy(struct rq *rq, struct task_struct *p,
+				int start, u32 runtime)
+{
+	int i;
+	u8 *buckets = p->ravg.busy_buckets;
+	u32 *hist = p->ravg.sum_history;
+	u32 dmin, dmax;
+	u64 cur_freq_runtime = 0;
+	int first = NUM_BUSY_BUCKETS, final;
+	u32 ret = runtime;
+
+	/* skip prediction for new tasks due to lack of history */
+	if (unlikely(is_new_task(p)))
+		goto out;
+
+	/* find minimal bucket index to pick */
+	for (i = start; i < NUM_BUSY_BUCKETS; i++) {
+		if (buckets[i]) {
+			first = i;
+			break;
+		}
+	}
+	/* if no higher buckets are filled, predict runtime */
+	if (first >= NUM_BUSY_BUCKETS)
+		goto out;
+
+	/* compute the bucket for prediction */
+	final = first;
+
+	/* determine demand range for the predicted bucket */
+	if (final < 2) {
+		/* lowest two buckets are combined */
+		dmin = 0;
+		final = 1;
+	} else {
+		dmin = mult_frac(final, max_task_load(), NUM_BUSY_BUCKETS);
+	}
+	dmax = mult_frac(final + 1, max_task_load(), NUM_BUSY_BUCKETS);
+
+	/*
+	 * search through runtime history and return first runtime that falls
+	 * into the range of predicted bucket.
+	 */
+	for (i = 0; i < sched_ravg_hist_size; i++) {
+		if (hist[i] >= dmin && hist[i] < dmax) {
+			ret = hist[i];
+			break;
+		}
+	}
+	/* no historical runtime within bucket found, use average of the bin */
+	if (ret < dmin)
+		ret = (dmin + dmax) / 2;
+	/*
+	 * when updating in middle of a window, runtime could be higher
+	 * than all recorded history. Always predict at least runtime.
+	 */
+	ret = max(runtime, ret);
+out:
+	trace_sched_update_pred_demand(rq, p, runtime,
+		mult_frac((unsigned int)cur_freq_runtime, 100,
+			  sched_ravg_window), ret);
+	return ret;
+}
+
+static inline u32 calc_pred_demand(struct rq *rq, struct task_struct *p)
+{
+	if (p->ravg.pred_demand >= p->ravg.curr_window)
+		return p->ravg.pred_demand;
+
+	return get_pred_busy(rq, p, busy_to_bucket(p->ravg.curr_window),
+			     p->ravg.curr_window);
+}
+
+/*
+ * predictive demand of a task is calculated at the window roll-over.
+ * if the task current window busy time exceeds the predicted
+ * demand, update it here to reflect the task needs.
+ */
+void update_task_pred_demand(struct rq *rq, struct task_struct *p, int event)
+{
+	u32 new, old;
+
+	if (is_idle_task(p) || exiting_task(p))
+		return;
+
+	if (event != PUT_PREV_TASK && event != TASK_UPDATE &&
+			(!SCHED_FREQ_ACCOUNT_WAIT_TIME ||
+			 (event != TASK_MIGRATE &&
+			 event != PICK_NEXT_TASK)))
+		return;
+
+	/*
+	 * TASK_UPDATE can be called on sleeping task, when its moved between
+	 * related groups
+	 */
+	if (event == TASK_UPDATE) {
+		if (!p->on_rq && !SCHED_FREQ_ACCOUNT_WAIT_TIME)
+			return;
+	}
+
+	new = calc_pred_demand(rq, p);
+	old = p->ravg.pred_demand;
+
+	if (old >= new)
+		return;
+
+	if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
+				!p->dl.dl_throttled))
+		p->sched_class->fixup_hmp_sched_stats(rq, p,
+				p->ravg.demand,
+				new);
+
+	p->ravg.pred_demand = new;
+}
+
+void clear_top_tasks_bitmap(unsigned long *bitmap)
+{
+	memset(bitmap, 0, top_tasks_bitmap_size);
+	__set_bit(NUM_LOAD_INDICES, bitmap);
+}
+
+/*
+ * Special case the last index and provide a fast path for index = 0.
+ * Note that sched_load_granule can change underneath us if we are not
+ * holding any runqueue locks while calling the two functions below.
+ */
+static u32  top_task_load(struct rq *rq)
+{
+	int index = rq->prev_top;
+	u8 prev = 1 - rq->curr_table;
+
+	if (!index) {
+		int msb = NUM_LOAD_INDICES - 1;
+
+		if (!test_bit(msb, rq->top_tasks_bitmap[prev]))
+			return 0;
+		else
+			return sched_load_granule;
+	} else if (index == NUM_LOAD_INDICES - 1) {
+		return sched_ravg_window;
+	} else {
+		return (index + 1) * sched_load_granule;
+	}
+}
+
+static int load_to_index(u32 load)
+{
+	if (load < sched_load_granule)
+		return 0;
+	else if (load >= sched_ravg_window)
+		return NUM_LOAD_INDICES - 1;
+	else
+		return load / sched_load_granule;
+}
+
+static void update_top_tasks(struct task_struct *p, struct rq *rq,
+		u32 old_curr_window, int new_window, bool full_window)
+{
+	u8 curr = rq->curr_table;
+	u8 prev = 1 - curr;
+	u8 *curr_table = rq->top_tasks[curr];
+	u8 *prev_table = rq->top_tasks[prev];
+	int old_index, new_index, update_index;
+	u32 curr_window = p->ravg.curr_window;
+	u32 prev_window = p->ravg.prev_window;
+	bool zero_index_update;
+
+	if (old_curr_window == curr_window && !new_window)
+		return;
+
+	old_index = load_to_index(old_curr_window);
+	new_index = load_to_index(curr_window);
+
+	if (!new_window) {
+		zero_index_update = !old_curr_window && curr_window;
+		if (old_index != new_index || zero_index_update) {
+			if (old_curr_window)
+				curr_table[old_index] -= 1;
+			if (curr_window)
+				curr_table[new_index] += 1;
+			if (new_index > rq->curr_top)
+				rq->curr_top = new_index;
+		}
+
+		if (!curr_table[old_index])
+			__clear_bit(NUM_LOAD_INDICES - old_index - 1,
+				rq->top_tasks_bitmap[curr]);
+
+		if (curr_table[new_index] == 1)
+			__set_bit(NUM_LOAD_INDICES - new_index - 1,
+				rq->top_tasks_bitmap[curr]);
+
+		return;
+	}
+
+	/*
+	 * The window has rolled over for this task. By the time we get
+	 * here, curr/prev swaps would has already occurred. So we need
+	 * to use prev_window for the new index.
+	 */
+	update_index = load_to_index(prev_window);
+
+	if (full_window) {
+		/*
+		 * Two cases here. Either 'p' ran for the entire window or
+		 * it didn't run at all. In either case there is no entry
+		 * in the prev table. If 'p' ran the entire window, we just
+		 * need to create a new entry in the prev table. In this case
+		 * update_index will be correspond to sched_ravg_window
+		 * so we can unconditionally update the top index.
+		 */
+		if (prev_window) {
+			prev_table[update_index] += 1;
+			rq->prev_top = update_index;
+		}
+
+		if (prev_table[update_index] == 1)
+			__set_bit(NUM_LOAD_INDICES - update_index - 1,
+				rq->top_tasks_bitmap[prev]);
+	} else {
+		zero_index_update = !old_curr_window && prev_window;
+		if (old_index != update_index || zero_index_update) {
+			if (old_curr_window)
+				prev_table[old_index] -= 1;
+
+			prev_table[update_index] += 1;
+
+			if (update_index > rq->prev_top)
+				rq->prev_top = update_index;
+
+			if (!prev_table[old_index])
+				__clear_bit(NUM_LOAD_INDICES - old_index - 1,
+						rq->top_tasks_bitmap[prev]);
+
+			if (prev_table[update_index] == 1)
+				__set_bit(NUM_LOAD_INDICES - update_index - 1,
+						rq->top_tasks_bitmap[prev]);
+		}
+	}
+
+	if (curr_window) {
+		curr_table[new_index] += 1;
+
+		if (new_index > rq->curr_top)
+			rq->curr_top = new_index;
+
+		if (curr_table[new_index] == 1)
+			__set_bit(NUM_LOAD_INDICES - new_index - 1,
+				rq->top_tasks_bitmap[curr]);
+	}
+}
+
+static inline void clear_top_tasks_table(u8 *table)
+{
+	memset(table, 0, NUM_LOAD_INDICES * sizeof(u8));
+}
+
+static void rollover_top_tasks(struct rq *rq, bool full_window)
+{
+	u8 curr_table = rq->curr_table;
+	u8 prev_table = 1 - curr_table;
+	int curr_top = rq->curr_top;
+
+	clear_top_tasks_table(rq->top_tasks[prev_table]);
+	clear_top_tasks_bitmap(rq->top_tasks_bitmap[prev_table]);
+
+	if (full_window) {
+		curr_top = 0;
+		clear_top_tasks_table(rq->top_tasks[curr_table]);
+		clear_top_tasks_bitmap(
+				rq->top_tasks_bitmap[curr_table]);
+	}
+
+	rq->curr_table = prev_table;
+	rq->prev_top = curr_top;
+	rq->curr_top = 0;
+}
+
+static u32 empty_windows[NR_CPUS];
+
+static void rollover_task_window(struct task_struct *p, bool full_window)
+{
+	u32 *curr_cpu_windows = empty_windows;
+	u32 curr_window;
+	int i;
+
+	/* Rollover the sum */
+	curr_window = 0;
+
+	if (!full_window) {
+		curr_window = p->ravg.curr_window;
+		curr_cpu_windows = p->ravg.curr_window_cpu;
+	}
+
+	p->ravg.prev_window = curr_window;
+	p->ravg.curr_window = 0;
+
+	/* Roll over individual CPU contributions */
+	for (i = 0; i < nr_cpu_ids; i++) {
+		p->ravg.prev_window_cpu[i] = curr_cpu_windows[i];
+		p->ravg.curr_window_cpu[i] = 0;
+	}
+}
+
+static void rollover_cpu_window(struct rq *rq, bool full_window)
+{
+	u64 curr_sum = rq->curr_runnable_sum;
+	u64 nt_curr_sum = rq->nt_curr_runnable_sum;
+	u64 grp_curr_sum = rq->grp_time.curr_runnable_sum;
+	u64 grp_nt_curr_sum = rq->grp_time.nt_curr_runnable_sum;
+
+	if (unlikely(full_window)) {
+		curr_sum = 0;
+		nt_curr_sum = 0;
+		grp_curr_sum = 0;
+		grp_nt_curr_sum = 0;
+	}
+
+	rq->prev_runnable_sum = curr_sum;
+	rq->nt_prev_runnable_sum = nt_curr_sum;
+	rq->grp_time.prev_runnable_sum = grp_curr_sum;
+	rq->grp_time.nt_prev_runnable_sum = grp_nt_curr_sum;
+
+	rq->curr_runnable_sum = 0;
+	rq->nt_curr_runnable_sum = 0;
+	rq->grp_time.curr_runnable_sum = 0;
+	rq->grp_time.nt_curr_runnable_sum = 0;
+}
+
+/*
+ * Account cpu activity in its busy time counters (rq->curr/prev_runnable_sum)
+ */
+static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
+				 int event, u64 wallclock, u64 irqtime)
+{
+	int new_window, full_window = 0;
+	int p_is_curr_task = (p == rq->curr);
+	u64 mark_start = p->ravg.mark_start;
+	u64 window_start = rq->window_start;
+	u32 window_size = sched_ravg_window;
+	u64 delta;
+	u64 *curr_runnable_sum = &rq->curr_runnable_sum;
+	u64 *prev_runnable_sum = &rq->prev_runnable_sum;
+	u64 *nt_curr_runnable_sum = &rq->nt_curr_runnable_sum;
+	u64 *nt_prev_runnable_sum = &rq->nt_prev_runnable_sum;
+	bool new_task;
+	struct related_thread_group *grp;
+	int cpu = rq->cpu;
+	u32 old_curr_window = p->ravg.curr_window;
+
+	new_window = mark_start < window_start;
+	if (new_window) {
+		full_window = (window_start - mark_start) >= window_size;
+		if (p->ravg.active_windows < USHRT_MAX)
+			p->ravg.active_windows++;
+	}
+
+	new_task = is_new_task(p);
+
+	/*
+	 * Handle per-task window rollover. We don't care about the idle
+	 * task or exiting tasks.
+	 */
+	if (!is_idle_task(p) && !exiting_task(p)) {
+		if (new_window)
+			rollover_task_window(p, full_window);
+	}
+
+	if (p_is_curr_task && new_window) {
+		rollover_cpu_window(rq, full_window);
+		rollover_top_tasks(rq, full_window);
+	}
+
+	if (!account_busy_for_cpu_time(rq, p, irqtime, event))
+		goto done;
+
+	grp = p->grp;
+	if (grp && sched_freq_aggregate) {
+		struct group_cpu_time *cpu_time = &rq->grp_time;
+
+		curr_runnable_sum = &cpu_time->curr_runnable_sum;
+		prev_runnable_sum = &cpu_time->prev_runnable_sum;
+
+		nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+		nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
+	}
+
+	if (!new_window) {
+		/*
+		 * account_busy_for_cpu_time() = 1 so busy time needs
+		 * to be accounted to the current window. No rollover
+		 * since we didn't start a new window. An example of this is
+		 * when a task starts execution and then sleeps within the
+		 * same window.
+		 */
+
+		if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq))
+			delta = wallclock - mark_start;
+		else
+			delta = irqtime;
+		delta = scale_exec_time(delta, rq);
+		*curr_runnable_sum += delta;
+		if (new_task)
+			*nt_curr_runnable_sum += delta;
+
+		if (!is_idle_task(p) && !exiting_task(p)) {
+			p->ravg.curr_window += delta;
+			p->ravg.curr_window_cpu[cpu] += delta;
+		}
+
+		goto done;
+	}
+
+	if (!p_is_curr_task) {
+		/*
+		 * account_busy_for_cpu_time() = 1 so busy time needs
+		 * to be accounted to the current window. A new window
+		 * has also started, but p is not the current task, so the
+		 * window is not rolled over - just split up and account
+		 * as necessary into curr and prev. The window is only
+		 * rolled over when a new window is processed for the current
+		 * task.
+		 *
+		 * Irqtime can't be accounted by a task that isn't the
+		 * currently running task.
+		 */
+
+		if (!full_window) {
+			/*
+			 * A full window hasn't elapsed, account partial
+			 * contribution to previous completed window.
+			 */
+			delta = scale_exec_time(window_start - mark_start, rq);
+			if (!exiting_task(p)) {
+				p->ravg.prev_window += delta;
+				p->ravg.prev_window_cpu[cpu] += delta;
+			}
+		} else {
+			/*
+			 * Since at least one full window has elapsed,
+			 * the contribution to the previous window is the
+			 * full window (window_size).
+			 */
+			delta = scale_exec_time(window_size, rq);
+			if (!exiting_task(p)) {
+				p->ravg.prev_window = delta;
+				p->ravg.prev_window_cpu[cpu] = delta;
+			}
+		}
+
+		*prev_runnable_sum += delta;
+		if (new_task)
+			*nt_prev_runnable_sum += delta;
+
+		/* Account piece of busy time in the current window. */
+		delta = scale_exec_time(wallclock - window_start, rq);
+		*curr_runnable_sum += delta;
+		if (new_task)
+			*nt_curr_runnable_sum += delta;
+
+		if (!exiting_task(p)) {
+			p->ravg.curr_window = delta;
+			p->ravg.curr_window_cpu[cpu] = delta;
+		}
+
+		goto done;
+	}
+
+	if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq)) {
+		/*
+		 * account_busy_for_cpu_time() = 1 so busy time needs
+		 * to be accounted to the current window. A new window
+		 * has started and p is the current task so rollover is
+		 * needed. If any of these three above conditions are true
+		 * then this busy time can't be accounted as irqtime.
+		 *
+		 * Busy time for the idle task or exiting tasks need not
+		 * be accounted.
+		 *
+		 * An example of this would be a task that starts execution
+		 * and then sleeps once a new window has begun.
+		 */
+
+		if (!full_window) {
+			/*
+			 * A full window hasn't elapsed, account partial
+			 * contribution to previous completed window.
+			 */
+			delta = scale_exec_time(window_start - mark_start, rq);
+			if (!is_idle_task(p) && !exiting_task(p)) {
+				p->ravg.prev_window += delta;
+				p->ravg.prev_window_cpu[cpu] += delta;
+			}
+		} else {
+			/*
+			 * Since at least one full window has elapsed,
+			 * the contribution to the previous window is the
+			 * full window (window_size).
+			 */
+			delta = scale_exec_time(window_size, rq);
+			if (!is_idle_task(p) && !exiting_task(p)) {
+				p->ravg.prev_window = delta;
+				p->ravg.prev_window_cpu[cpu] = delta;
+			}
+		}
+
+		/*
+		 * Rollover is done here by overwriting the values in
+		 * prev_runnable_sum and curr_runnable_sum.
+		 */
+		*prev_runnable_sum += delta;
+		if (new_task)
+			*nt_prev_runnable_sum += delta;
+
+		/* Account piece of busy time in the current window. */
+		delta = scale_exec_time(wallclock - window_start, rq);
+		*curr_runnable_sum += delta;
+		if (new_task)
+			*nt_curr_runnable_sum += delta;
+
+		if (!is_idle_task(p) && !exiting_task(p)) {
+			p->ravg.curr_window = delta;
+			p->ravg.curr_window_cpu[cpu] = delta;
+		}
+
+		goto done;
+	}
+
+	if (irqtime) {
+		/*
+		 * account_busy_for_cpu_time() = 1 so busy time needs
+		 * to be accounted to the current window. A new window
+		 * has started and p is the current task so rollover is
+		 * needed. The current task must be the idle task because
+		 * irqtime is not accounted for any other task.
+		 *
+		 * Irqtime will be accounted each time we process IRQ activity
+		 * after a period of idleness, so we know the IRQ busy time
+		 * started at wallclock - irqtime.
+		 */
+
+		BUG_ON(!is_idle_task(p));
+		mark_start = wallclock - irqtime;
+
+		/*
+		 * Roll window over. If IRQ busy time was just in the current
+		 * window then that is all that need be accounted.
+		 */
+		if (mark_start > window_start) {
+			*curr_runnable_sum = scale_exec_time(irqtime, rq);
+			return;
+		}
+
+		/*
+		 * The IRQ busy time spanned multiple windows. Process the
+		 * busy time preceding the current window start first.
+		 */
+		delta = window_start - mark_start;
+		if (delta > window_size)
+			delta = window_size;
+		delta = scale_exec_time(delta, rq);
+		*prev_runnable_sum += delta;
+
+		/* Process the remaining IRQ busy time in the current window. */
+		delta = wallclock - window_start;
+		rq->curr_runnable_sum = scale_exec_time(delta, rq);
+
+		return;
+	}
+
+done:
+	if (!is_idle_task(p) && !exiting_task(p))
+		update_top_tasks(p, rq, old_curr_window,
+					new_window, full_window);
+}
+
+static inline u32 predict_and_update_buckets(struct rq *rq,
+			struct task_struct *p, u32 runtime) {
+
+	int bidx;
+	u32 pred_demand;
+
+	bidx = busy_to_bucket(runtime);
+	pred_demand = get_pred_busy(rq, p, bidx, runtime);
+	bucket_increase(p->ravg.busy_buckets, bidx);
+
+	return pred_demand;
+}
+
+#define THRESH_CC_UPDATE (2 * NSEC_PER_USEC)
+
+/*
+ * Assumes rq_lock is held and wallclock was recorded in the same critical
+ * section as this function's invocation.
+ */
+static inline u64 read_cycle_counter(int cpu, u64 wallclock)
+{
+	struct sched_cluster *cluster = cpu_rq(cpu)->cluster;
+	u64 delta;
+
+	if (unlikely(!cluster))
+		return cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu);
+
+	/*
+	 * Why don't we need locking here? Let's say that delta is negative
+	 * because some other CPU happened to update last_cc_update with a
+	 * more recent timestamp. We simply read the conter again in that case
+	 * with no harmful side effects. This can happen if there is an FIQ
+	 * between when we read the wallclock and when we use it here.
+	 */
+	delta = wallclock - atomic64_read(&cluster->last_cc_update);
+	if (delta > THRESH_CC_UPDATE) {
+		atomic64_set(&cluster->cycles,
+			     cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu));
+		atomic64_set(&cluster->last_cc_update, wallclock);
+	}
+
+	return atomic64_read(&cluster->cycles);
+}
+
+static void update_task_cpu_cycles(struct task_struct *p, int cpu,
+				   u64 wallclock)
+{
+	if (use_cycle_counter)
+		p->cpu_cycles = read_cycle_counter(cpu, wallclock);
+}
+
+static void
+update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
+			  u64 wallclock, u64 irqtime)
+{
+	u64 cur_cycles;
+	int cpu = cpu_of(rq);
+
+	lockdep_assert_held(&rq->lock);
+
+	if (!use_cycle_counter) {
+		rq->cc.cycles = cpu_cur_freq(cpu);
+		rq->cc.time = 1;
+		return;
+	}
+
+	cur_cycles = read_cycle_counter(cpu, wallclock);
+
+	/*
+	 * If current task is idle task and irqtime == 0 CPU was
+	 * indeed idle and probably its cycle counter was not
+	 * increasing.  We still need estimatied CPU frequency
+	 * for IO wait time accounting.  Use the previously
+	 * calculated frequency in such a case.
+	 */
+	if (!is_idle_task(rq->curr) || irqtime) {
+		if (unlikely(cur_cycles < p->cpu_cycles))
+			rq->cc.cycles = cur_cycles + (U64_MAX - p->cpu_cycles);
+		else
+			rq->cc.cycles = cur_cycles - p->cpu_cycles;
+		rq->cc.cycles = rq->cc.cycles * NSEC_PER_MSEC;
+
+		if (event == IRQ_UPDATE && is_idle_task(p))
+			/*
+			 * Time between mark_start of idle task and IRQ handler
+			 * entry time is CPU cycle counter stall period.
+			 * Upon IRQ handler entry sched_account_irqstart()
+			 * replenishes idle task's cpu cycle counter so
+			 * rq->cc.cycles now represents increased cycles during
+			 * IRQ handler rather than time between idle entry and
+			 * IRQ exit.  Thus use irqtime as time delta.
+			 */
+			rq->cc.time = irqtime;
+		else
+			rq->cc.time = wallclock - p->ravg.mark_start;
+		BUG_ON((s64)rq->cc.time < 0);
+	}
+
+	p->cpu_cycles = cur_cycles;
+
+	trace_sched_get_task_cpu_cycles(cpu, event, rq->cc.cycles,
+					rq->cc.time, p);
+}
+
+static int
+account_busy_for_task_demand(struct rq *rq, struct task_struct *p, int event)
+{
+	/*
+	 * No need to bother updating task demand for exiting tasks
+	 * or the idle task.
+	 */
+	if (exiting_task(p) || is_idle_task(p))
+		return 0;
+
+	/*
+	 * When a task is waking up it is completing a segment of non-busy
+	 * time. Likewise, if wait time is not treated as busy time, then
+	 * when a task begins to run or is migrated, it is not running and
+	 * is completing a segment of non-busy time.
+	 */
+	if (event == TASK_WAKE || (!SCHED_ACCOUNT_WAIT_TIME &&
+			 (event == PICK_NEXT_TASK || event == TASK_MIGRATE)))
+		return 0;
+
+	/*
+	 * TASK_UPDATE can be called on sleeping task, when its moved between
+	 * related groups
+	 */
+	if (event == TASK_UPDATE) {
+		if (rq->curr == p)
+			return 1;
+
+		return p->on_rq ? SCHED_ACCOUNT_WAIT_TIME : 0;
+	}
+
+	return 1;
+}
+
+/*
+ * Called when new window is starting for a task, to record cpu usage over
+ * recently concluded window(s). Normally 'samples' should be 1. It can be > 1
+ * when, say, a real-time task runs without preemption for several windows at a
+ * stretch.
+ */
+static void update_history(struct rq *rq, struct task_struct *p,
+			 u32 runtime, int samples, int event)
+{
+	u32 *hist = &p->ravg.sum_history[0];
+	int ridx, widx;
+	u32 max = 0, avg, demand, pred_demand;
+	u64 sum = 0;
+
+	/* Ignore windows where task had no activity */
+	if (!runtime || is_idle_task(p) || exiting_task(p) || !samples)
+		goto done;
+
+	/* Push new 'runtime' value onto stack */
+	widx = sched_ravg_hist_size - 1;
+	ridx = widx - samples;
+	for (; ridx >= 0; --widx, --ridx) {
+		hist[widx] = hist[ridx];
+		sum += hist[widx];
+		if (hist[widx] > max)
+			max = hist[widx];
+	}
+
+	for (widx = 0; widx < samples && widx < sched_ravg_hist_size; widx++) {
+		hist[widx] = runtime;
+		sum += hist[widx];
+		if (hist[widx] > max)
+			max = hist[widx];
+	}
+
+	p->ravg.sum = 0;
+
+	if (sched_window_stats_policy == WINDOW_STATS_RECENT) {
+		demand = runtime;
+	} else if (sched_window_stats_policy == WINDOW_STATS_MAX) {
+		demand = max;
+	} else {
+		avg = div64_u64(sum, sched_ravg_hist_size);
+		if (sched_window_stats_policy == WINDOW_STATS_AVG)
+			demand = avg;
+		else
+			demand = max(avg, runtime);
+	}
+	pred_demand = predict_and_update_buckets(rq, p, runtime);
+
+	/*
+	 * A throttled deadline sched class task gets dequeued without
+	 * changing p->on_rq. Since the dequeue decrements hmp stats
+	 * avoid decrementing it here again.
+	 */
+	if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
+						!p->dl.dl_throttled))
+		p->sched_class->fixup_hmp_sched_stats(rq, p, demand,
+						      pred_demand);
+
+	p->ravg.demand = demand;
+	p->ravg.pred_demand = pred_demand;
+
+done:
+	trace_sched_update_history(rq, p, runtime, samples, event);
+}
+
+static u64 add_to_task_demand(struct rq *rq, struct task_struct *p, u64 delta)
+{
+	delta = scale_exec_time(delta, rq);
+	p->ravg.sum += delta;
+	if (unlikely(p->ravg.sum > sched_ravg_window))
+		p->ravg.sum = sched_ravg_window;
+
+	return delta;
+}
+
+/*
+ * Account cpu demand of task and/or update task's cpu demand history
+ *
+ * ms = p->ravg.mark_start;
+ * wc = wallclock
+ * ws = rq->window_start
+ *
+ * Three possibilities:
+ *
+ *	a) Task event is contained within one window.
+ *		window_start < mark_start < wallclock
+ *
+ *		ws   ms  wc
+ *		|    |   |
+ *		V    V   V
+ *		|---------------|
+ *
+ *	In this case, p->ravg.sum is updated *iff* event is appropriate
+ *	(ex: event == PUT_PREV_TASK)
+ *
+ *	b) Task event spans two windows.
+ *		mark_start < window_start < wallclock
+ *
+ *		ms   ws   wc
+ *		|    |    |
+ *		V    V    V
+ *		-----|-------------------
+ *
+ *	In this case, p->ravg.sum is updated with (ws - ms) *iff* event
+ *	is appropriate, then a new window sample is recorded followed
+ *	by p->ravg.sum being set to (wc - ws) *iff* event is appropriate.
+ *
+ *	c) Task event spans more than two windows.
+ *
+ *		ms ws_tmp			   ws  wc
+ *		|  |				   |   |
+ *		V  V				   V   V
+ *		---|-------|-------|-------|-------|------
+ *		   |				   |
+ *		   |<------ nr_full_windows ------>|
+ *
+ *	In this case, p->ravg.sum is updated with (ws_tmp - ms) first *iff*
+ *	event is appropriate, window sample of p->ravg.sum is recorded,
+ *	'nr_full_window' samples of window_size is also recorded *iff*
+ *	event is appropriate and finally p->ravg.sum is set to (wc - ws)
+ *	*iff* event is appropriate.
+ *
+ * IMPORTANT : Leave p->ravg.mark_start unchanged, as update_cpu_busy_time()
+ * depends on it!
+ */
+static u64 update_task_demand(struct task_struct *p, struct rq *rq,
+			       int event, u64 wallclock)
+{
+	u64 mark_start = p->ravg.mark_start;
+	u64 delta, window_start = rq->window_start;
+	int new_window, nr_full_windows;
+	u32 window_size = sched_ravg_window;
+	u64 runtime;
+
+	new_window = mark_start < window_start;
+	if (!account_busy_for_task_demand(rq, p, event)) {
+		if (new_window)
+			/*
+			 * If the time accounted isn't being accounted as
+			 * busy time, and a new window started, only the
+			 * previous window need be closed out with the
+			 * pre-existing demand. Multiple windows may have
+			 * elapsed, but since empty windows are dropped,
+			 * it is not necessary to account those.
+			 */
+			update_history(rq, p, p->ravg.sum, 1, event);
+		return 0;
+	}
+
+	if (!new_window) {
+		/*
+		 * The simple case - busy time contained within the existing
+		 * window.
+		 */
+		return add_to_task_demand(rq, p, wallclock - mark_start);
+	}
+
+	/*
+	 * Busy time spans at least two windows. Temporarily rewind
+	 * window_start to first window boundary after mark_start.
+	 */
+	delta = window_start - mark_start;
+	nr_full_windows = div64_u64(delta, window_size);
+	window_start -= (u64)nr_full_windows * (u64)window_size;
+
+	/* Process (window_start - mark_start) first */
+	runtime = add_to_task_demand(rq, p, window_start - mark_start);
+
+	/* Push new sample(s) into task's demand history */
+	update_history(rq, p, p->ravg.sum, 1, event);
+	if (nr_full_windows) {
+		u64 scaled_window = scale_exec_time(window_size, rq);
+
+		update_history(rq, p, scaled_window, nr_full_windows, event);
+		runtime += nr_full_windows * scaled_window;
+	}
+
+	/*
+	 * Roll window_start back to current to process any remainder
+	 * in current window.
+	 */
+	window_start += (u64)nr_full_windows * (u64)window_size;
+
+	/* Process (wallclock - window_start) next */
+	mark_start = window_start;
+	runtime += add_to_task_demand(rq, p, wallclock - mark_start);
+
+	return runtime;
+}
+
+static inline void
+update_task_burst(struct task_struct *p, struct rq *rq, int event, u64 runtime)
+{
+	/*
+	 * update_task_demand() has checks for idle task and
+	 * exit task. The runtime may include the wait time,
+	 * so update the burst only for the cases where the
+	 * task is running.
+	 */
+	if (event == PUT_PREV_TASK || (event == TASK_UPDATE &&
+				rq->curr == p))
+		p->ravg.curr_burst += runtime;
+}
+
+/* Reflect task activity on its demand and cpu's busy time statistics */
+void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
+						u64 wallclock, u64 irqtime)
+{
+	u64 runtime;
+
+	if (!rq->window_start || sched_disable_window_stats ||
+	    p->ravg.mark_start == wallclock)
+		return;
+
+	lockdep_assert_held(&rq->lock);
+
+	update_window_start(rq, wallclock);
+
+	if (!p->ravg.mark_start) {
+		update_task_cpu_cycles(p, cpu_of(rq), wallclock);
+		goto done;
+	}
+
+	update_task_rq_cpu_cycles(p, rq, event, wallclock, irqtime);
+	runtime = update_task_demand(p, rq, event, wallclock);
+	if (runtime)
+		update_task_burst(p, rq, event, runtime);
+	update_cpu_busy_time(p, rq, event, wallclock, irqtime);
+	update_task_pred_demand(rq, p, event);
+
+	if (exiting_task(p))
+		goto done;
+
+	trace_sched_update_task_ravg(p, rq, event, wallclock, irqtime,
+				     rq->cc.cycles, rq->cc.time,
+				     p->grp ? &rq->grp_time : NULL);
+
+done:
+	p->ravg.mark_start = wallclock;
+}
+
+void sched_account_irqtime(int cpu, struct task_struct *curr,
+				 u64 delta, u64 wallclock)
+{
+	struct rq *rq = cpu_rq(cpu);
+	unsigned long flags, nr_windows;
+	u64 cur_jiffies_ts;
+
+	raw_spin_lock_irqsave(&rq->lock, flags);
+
+	/*
+	 * cputime (wallclock) uses sched_clock so use the same here for
+	 * consistency.
+	 */
+	delta += sched_clock() - wallclock;
+	cur_jiffies_ts = get_jiffies_64();
+
+	if (is_idle_task(curr))
+		update_task_ravg(curr, rq, IRQ_UPDATE, sched_ktime_clock(),
+				 delta);
+
+	nr_windows = cur_jiffies_ts - rq->irqload_ts;
+
+	if (nr_windows) {
+		if (nr_windows < 10) {
+			/* Decay CPU's irqload by 3/4 for each window. */
+			rq->avg_irqload *= (3 * nr_windows);
+			rq->avg_irqload = div64_u64(rq->avg_irqload,
+						    4 * nr_windows);
+		} else {
+			rq->avg_irqload = 0;
+		}
+		rq->avg_irqload += rq->cur_irqload;
+		rq->cur_irqload = 0;
+	}
+
+	rq->cur_irqload += delta;
+	rq->irqload_ts = cur_jiffies_ts;
+	raw_spin_unlock_irqrestore(&rq->lock, flags);
+}
+
+void sched_account_irqstart(int cpu, struct task_struct *curr, u64 wallclock)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	if (!rq->window_start || sched_disable_window_stats)
+		return;
+
+	if (is_idle_task(curr)) {
+		/* We're here without rq->lock held, IRQ disabled */
+		raw_spin_lock(&rq->lock);
+		update_task_cpu_cycles(curr, cpu, sched_ktime_clock());
+		raw_spin_unlock(&rq->lock);
+	}
+}
+
+void reset_task_stats(struct task_struct *p)
+{
+	u32 sum = 0;
+	u32 *curr_window_ptr = NULL;
+	u32 *prev_window_ptr = NULL;
+
+	if (exiting_task(p)) {
+		sum = EXITING_TASK_MARKER;
+	} else {
+		curr_window_ptr =  p->ravg.curr_window_cpu;
+		prev_window_ptr = p->ravg.prev_window_cpu;
+		memset(curr_window_ptr, 0, sizeof(u32) * nr_cpu_ids);
+		memset(prev_window_ptr, 0, sizeof(u32) * nr_cpu_ids);
+	}
+
+	memset(&p->ravg, 0, sizeof(struct ravg));
+
+	p->ravg.curr_window_cpu = curr_window_ptr;
+	p->ravg.prev_window_cpu = prev_window_ptr;
+
+	p->ravg.avg_burst = 2 * (u64)sysctl_sched_short_burst;
+
+	/* Retain EXITING_TASK marker */
+	p->ravg.sum_history[0] = sum;
+}
+
+void mark_task_starting(struct task_struct *p)
+{
+	u64 wallclock;
+	struct rq *rq = task_rq(p);
+
+	if (!rq->window_start || sched_disable_window_stats) {
+		reset_task_stats(p);
+		return;
+	}
+
+	wallclock = sched_ktime_clock();
+	p->ravg.mark_start = p->last_wake_ts = wallclock;
+	p->last_cpu_selected_ts = wallclock;
+	p->last_switch_out_ts = 0;
+	update_task_cpu_cycles(p, cpu_of(rq), wallclock);
+}
+
+void set_window_start(struct rq *rq)
+{
+	static int sync_cpu_available;
+
+	if (rq->window_start)
+		return;
+
+	if (!sync_cpu_available) {
+		rq->window_start = sched_ktime_clock();
+		sync_cpu_available = 1;
+	} else {
+		struct rq *sync_rq = cpu_rq(cpumask_any(cpu_online_mask));
+
+		raw_spin_unlock(&rq->lock);
+		double_rq_lock(rq, sync_rq);
+		rq->window_start = sync_rq->window_start;
+		rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
+		rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
+		raw_spin_unlock(&sync_rq->lock);
+	}
+
+	rq->curr->ravg.mark_start = rq->window_start;
+}
+
+static void reset_all_task_stats(void)
+{
+	struct task_struct *g, *p;
+
+	do_each_thread(g, p) {
+		reset_task_stats(p);
+	}  while_each_thread(g, p);
+}
+
+enum reset_reason_code {
+	WINDOW_CHANGE,
+	POLICY_CHANGE,
+	HIST_SIZE_CHANGE,
+	FREQ_AGGREGATE_CHANGE,
+};
+
+const char *sched_window_reset_reasons[] = {
+	"WINDOW_CHANGE",
+	"POLICY_CHANGE",
+	"HIST_SIZE_CHANGE",
+	"FREQ_AGGREGATE_CHANGE",
+};
+
+/* Called with IRQs enabled */
+void reset_all_window_stats(u64 window_start, unsigned int window_size)
+{
+	int cpu, i;
+	unsigned long flags;
+	u64 start_ts = sched_ktime_clock();
+	int reason = WINDOW_CHANGE;
+	unsigned int old = 0, new = 0;
+
+	local_irq_save(flags);
+
+	read_lock(&tasklist_lock);
+
+	read_lock(&related_thread_group_lock);
+
+	/* Taking all runqueue locks prevents race with sched_exit(). */
+	for_each_possible_cpu(cpu)
+		raw_spin_lock(&cpu_rq(cpu)->lock);
+
+	sched_disable_window_stats = 1;
+
+	reset_all_task_stats();
+
+	read_unlock(&tasklist_lock);
+
+	if (window_size) {
+		sched_ravg_window = window_size * TICK_NSEC;
+		set_hmp_defaults();
+		sched_load_granule = sched_ravg_window / NUM_LOAD_INDICES;
+	}
+
+	sched_disable_window_stats = 0;
+
+	for_each_possible_cpu(cpu) {
+		struct rq *rq = cpu_rq(cpu);
+
+		if (window_start)
+			rq->window_start = window_start;
+		rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
+		rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
+		memset(&rq->grp_time, 0, sizeof(struct group_cpu_time));
+		for (i = 0; i < NUM_TRACKED_WINDOWS; i++) {
+			memset(&rq->load_subs[i], 0,
+					sizeof(struct load_subtractions));
+			clear_top_tasks_table(rq->top_tasks[i]);
+			clear_top_tasks_bitmap(rq->top_tasks_bitmap[i]);
+		}
+
+		rq->curr_table = 0;
+		rq->curr_top = 0;
+		rq->prev_top = 0;
+		reset_cpu_hmp_stats(cpu, 1);
+	}
+
+	if (sched_window_stats_policy != sysctl_sched_window_stats_policy) {
+		reason = POLICY_CHANGE;
+		old = sched_window_stats_policy;
+		new = sysctl_sched_window_stats_policy;
+		sched_window_stats_policy = sysctl_sched_window_stats_policy;
+	} else if (sched_ravg_hist_size != sysctl_sched_ravg_hist_size) {
+		reason = HIST_SIZE_CHANGE;
+		old = sched_ravg_hist_size;
+		new = sysctl_sched_ravg_hist_size;
+		sched_ravg_hist_size = sysctl_sched_ravg_hist_size;
+	} else if (sched_freq_aggregate !=
+					sysctl_sched_freq_aggregate) {
+		reason = FREQ_AGGREGATE_CHANGE;
+		old = sched_freq_aggregate;
+		new = sysctl_sched_freq_aggregate;
+		sched_freq_aggregate = sysctl_sched_freq_aggregate;
+	}
+
+	for_each_possible_cpu(cpu)
+		raw_spin_unlock(&cpu_rq(cpu)->lock);
+
+	read_unlock(&related_thread_group_lock);
+
+	local_irq_restore(flags);
+
+	trace_sched_reset_all_window_stats(window_start, window_size,
+		sched_ktime_clock() - start_ts, reason, old, new);
+}
+
+/*
+ * In this function we match the accumulated subtractions with the current
+ * and previous windows we are operating with. Ignore any entries where
+ * the window start in the load_subtraction struct does not match either
+ * the curent or the previous window. This could happen whenever CPUs
+ * become idle or busy with interrupts disabled for an extended period.
+ */
+static inline void account_load_subtractions(struct rq *rq)
+{
+	u64 ws = rq->window_start;
+	u64 prev_ws = ws - sched_ravg_window;
+	struct load_subtractions *ls = rq->load_subs;
+	int i;
+
+	for (i = 0; i < NUM_TRACKED_WINDOWS; i++) {
+		if (ls[i].window_start == ws) {
+			rq->curr_runnable_sum -= ls[i].subs;
+			rq->nt_curr_runnable_sum -= ls[i].new_subs;
+		} else if (ls[i].window_start == prev_ws) {
+			rq->prev_runnable_sum -= ls[i].subs;
+			rq->nt_prev_runnable_sum -= ls[i].new_subs;
+		}
+
+		ls[i].subs = 0;
+		ls[i].new_subs = 0;
+	}
+
+	BUG_ON((s64)rq->prev_runnable_sum < 0);
+	BUG_ON((s64)rq->curr_runnable_sum < 0);
+	BUG_ON((s64)rq->nt_prev_runnable_sum < 0);
+	BUG_ON((s64)rq->nt_curr_runnable_sum < 0);
+}
+
+static inline u64 freq_policy_load(struct rq *rq, u64 load)
+{
+	unsigned int reporting_policy = sysctl_sched_freq_reporting_policy;
+
+	switch (reporting_policy) {
+	case FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK:
+		load = max_t(u64, load, top_task_load(rq));
+		break;
+	case FREQ_REPORT_TOP_TASK:
+		load = top_task_load(rq);
+		break;
+	case FREQ_REPORT_CPU_LOAD:
+		break;
+	default:
+		break;
+	}
+
+	return load;
+}
+
+void sched_get_cpus_busy(struct sched_load *busy,
+			 const struct cpumask *query_cpus)
+{
+	unsigned long flags;
+	struct rq *rq;
+	const int cpus = cpumask_weight(query_cpus);
+	u64 load[cpus], group_load[cpus];
+	u64 nload[cpus], ngload[cpus];
+	u64 pload[cpus];
+	unsigned int max_freq[cpus];
+	int notifier_sent = 0;
+	int early_detection[cpus];
+	int cpu, i = 0;
+	unsigned int window_size;
+	u64 max_prev_sum = 0;
+	int max_busy_cpu = cpumask_first(query_cpus);
+	u64 total_group_load = 0, total_ngload = 0;
+	bool aggregate_load = false;
+	struct sched_cluster *cluster = cpu_cluster(cpumask_first(query_cpus));
+
+	if (unlikely(cpus == 0))
+		return;
+
+	local_irq_save(flags);
+
+	/*
+	 * This function could be called in timer context, and the
+	 * current task may have been executing for a long time. Ensure
+	 * that the window stats are current by doing an update.
+	 */
+
+	for_each_cpu(cpu, query_cpus)
+		raw_spin_lock(&cpu_rq(cpu)->lock);
+
+	window_size = sched_ravg_window;
+
+	/*
+	 * We don't really need the cluster lock for this entire for loop
+	 * block. However, there is no advantage in optimizing this as rq
+	 * locks are held regardless and would prevent migration anyways
+	 */
+	raw_spin_lock(&cluster->load_lock);
+
+	for_each_cpu(cpu, query_cpus) {
+		rq = cpu_rq(cpu);
+
+		update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_ktime_clock(),
+				 0);
+
+		account_load_subtractions(rq);
+		load[i] = rq->prev_runnable_sum;
+		nload[i] = rq->nt_prev_runnable_sum;
+		pload[i] = rq->hmp_stats.pred_demands_sum;
+		rq->old_estimated_time = pload[i];
+
+		if (load[i] > max_prev_sum) {
+			max_prev_sum = load[i];
+			max_busy_cpu = cpu;
+		}
+
+		/*
+		 * sched_get_cpus_busy() is called for all CPUs in a
+		 * frequency domain. So the notifier_sent flag per
+		 * cluster works even when a frequency domain spans
+		 * more than 1 cluster.
+		 */
+		if (rq->cluster->notifier_sent) {
+			notifier_sent = 1;
+			rq->cluster->notifier_sent = 0;
+		}
+		early_detection[i] = (rq->ed_task != NULL);
+		max_freq[i] = cpu_max_freq(cpu);
+		i++;
+	}
+
+	raw_spin_unlock(&cluster->load_lock);
+
+	group_load_in_freq_domain(
+			&cpu_rq(max_busy_cpu)->freq_domain_cpumask,
+			&total_group_load, &total_ngload);
+	aggregate_load = !!(total_group_load > sched_freq_aggregate_threshold);
+
+	i = 0;
+	for_each_cpu(cpu, query_cpus) {
+		group_load[i] = 0;
+		ngload[i] = 0;
+
+		if (early_detection[i])
+			goto skip_early;
+
+		rq = cpu_rq(cpu);
+		if (aggregate_load) {
+			if (cpu == max_busy_cpu) {
+				group_load[i] = total_group_load;
+				ngload[i] = total_ngload;
+			}
+		} else {
+			group_load[i] = rq->grp_time.prev_runnable_sum;
+			ngload[i] = rq->grp_time.nt_prev_runnable_sum;
+		}
+
+		load[i] += group_load[i];
+		nload[i] += ngload[i];
+
+		load[i] = freq_policy_load(rq, load[i]);
+		rq->old_busy_time = load[i];
+
+		/*
+		 * Scale load in reference to cluster max_possible_freq.
+		 *
+		 * Note that scale_load_to_cpu() scales load in reference to
+		 * the cluster max_freq.
+		 */
+		load[i] = scale_load_to_cpu(load[i], cpu);
+		nload[i] = scale_load_to_cpu(nload[i], cpu);
+		pload[i] = scale_load_to_cpu(pload[i], cpu);
+skip_early:
+		i++;
+	}
+
+	for_each_cpu(cpu, query_cpus)
+		raw_spin_unlock(&(cpu_rq(cpu))->lock);
+
+	local_irq_restore(flags);
+
+	i = 0;
+	for_each_cpu(cpu, query_cpus) {
+		rq = cpu_rq(cpu);
+
+		if (early_detection[i]) {
+			busy[i].prev_load = div64_u64(sched_ravg_window,
+							NSEC_PER_USEC);
+			busy[i].new_task_load = 0;
+			busy[i].predicted_load = 0;
+			goto exit_early;
+		}
+
+		load[i] = scale_load_to_freq(load[i], max_freq[i],
+				cpu_max_possible_freq(cpu));
+		nload[i] = scale_load_to_freq(nload[i], max_freq[i],
+				cpu_max_possible_freq(cpu));
+
+		pload[i] = scale_load_to_freq(pload[i], max_freq[i],
+					     rq->cluster->max_possible_freq);
+
+		busy[i].prev_load = div64_u64(load[i], NSEC_PER_USEC);
+		busy[i].new_task_load = div64_u64(nload[i], NSEC_PER_USEC);
+		busy[i].predicted_load = div64_u64(pload[i], NSEC_PER_USEC);
+
+exit_early:
+		trace_sched_get_busy(cpu, busy[i].prev_load,
+				     busy[i].new_task_load,
+				     busy[i].predicted_load,
+				     early_detection[i],
+				     aggregate_load &&
+				      cpu == max_busy_cpu);
+		i++;
+	}
+}
+
+void sched_set_io_is_busy(int val)
+{
+	sched_io_is_busy = val;
+}
+
+int sched_set_window(u64 window_start, unsigned int window_size)
+{
+	u64 now, cur_jiffies, jiffy_ktime_ns;
+	s64 ws;
+	unsigned long flags;
+
+	if (window_size * TICK_NSEC <  MIN_SCHED_RAVG_WINDOW)
+		return -EINVAL;
+
+	mutex_lock(&policy_mutex);
+
+	/*
+	 * Get a consistent view of ktime, jiffies, and the time
+	 * since the last jiffy (based on last_jiffies_update).
+	 */
+	local_irq_save(flags);
+	cur_jiffies = jiffy_to_ktime_ns(&now, &jiffy_ktime_ns);
+	local_irq_restore(flags);
+
+	/* translate window_start from jiffies to nanoseconds */
+	ws = (window_start - cur_jiffies); /* jiffy difference */
+	ws *= TICK_NSEC;
+	ws += jiffy_ktime_ns;
+
+	/*
+	 * Roll back calculated window start so that it is in
+	 * the past (window stats must have a current window).
+	 */
+	while (ws > now)
+		ws -= (window_size * TICK_NSEC);
+
+	BUG_ON(sched_ktime_clock() < ws);
+
+	reset_all_window_stats(ws, window_size);
+
+	sched_update_freq_max_load(cpu_possible_mask);
+
+	mutex_unlock(&policy_mutex);
+
+	return 0;
+}
+
+static inline void create_subtraction_entry(struct rq *rq, u64 ws, int index)
+{
+	rq->load_subs[index].window_start = ws;
+	rq->load_subs[index].subs = 0;
+	rq->load_subs[index].new_subs = 0;
+}
+
+static bool get_subtraction_index(struct rq *rq, u64 ws)
+{
+	int i;
+	u64 oldest = ULLONG_MAX;
+	int oldest_index = 0;
+
+	for (i = 0; i < NUM_TRACKED_WINDOWS; i++) {
+		u64 entry_ws = rq->load_subs[i].window_start;
+
+		if (ws == entry_ws)
+			return i;
+
+		if (entry_ws < oldest) {
+			oldest = entry_ws;
+			oldest_index = i;
+		}
+	}
+
+	create_subtraction_entry(rq, ws, oldest_index);
+	return oldest_index;
+}
+
+static void update_rq_load_subtractions(int index, struct rq *rq,
+					u32 sub_load, bool new_task)
+{
+	rq->load_subs[index].subs +=  sub_load;
+	if (new_task)
+		rq->load_subs[index].new_subs += sub_load;
+}
+
+static void update_cluster_load_subtractions(struct task_struct *p,
+					int cpu, u64 ws, bool new_task)
+{
+	struct sched_cluster *cluster = cpu_cluster(cpu);
+	struct cpumask cluster_cpus = cluster->cpus;
+	u64 prev_ws = ws - sched_ravg_window;
+	int i;
+
+	cpumask_clear_cpu(cpu, &cluster_cpus);
+	raw_spin_lock(&cluster->load_lock);
+
+	for_each_cpu(i, &cluster_cpus) {
+		struct rq *rq = cpu_rq(i);
+		int index;
+
+		if (p->ravg.curr_window_cpu[i]) {
+			index = get_subtraction_index(rq, ws);
+			update_rq_load_subtractions(index, rq,
+				p->ravg.curr_window_cpu[i], new_task);
+			p->ravg.curr_window_cpu[i] = 0;
+		}
+
+		if (p->ravg.prev_window_cpu[i]) {
+			index = get_subtraction_index(rq, prev_ws);
+			update_rq_load_subtractions(index, rq,
+				p->ravg.prev_window_cpu[i], new_task);
+			p->ravg.prev_window_cpu[i] = 0;
+		}
+	}
+
+	raw_spin_unlock(&cluster->load_lock);
+}
+
+static inline void inter_cluster_migration_fixup
+	(struct task_struct *p, int new_cpu, int task_cpu, bool new_task)
+{
+	struct rq *dest_rq = cpu_rq(new_cpu);
+	struct rq *src_rq = cpu_rq(task_cpu);
+
+	if (same_freq_domain(new_cpu, task_cpu))
+		return;
+
+	p->ravg.curr_window_cpu[new_cpu] = p->ravg.curr_window;
+	p->ravg.prev_window_cpu[new_cpu] = p->ravg.prev_window;
+
+	dest_rq->curr_runnable_sum += p->ravg.curr_window;
+	dest_rq->prev_runnable_sum += p->ravg.prev_window;
+
+	src_rq->curr_runnable_sum -=  p->ravg.curr_window_cpu[task_cpu];
+	src_rq->prev_runnable_sum -=  p->ravg.prev_window_cpu[task_cpu];
+
+	if (new_task) {
+		dest_rq->nt_curr_runnable_sum += p->ravg.curr_window;
+		dest_rq->nt_prev_runnable_sum += p->ravg.prev_window;
+
+		src_rq->nt_curr_runnable_sum -=
+				p->ravg.curr_window_cpu[task_cpu];
+		src_rq->nt_prev_runnable_sum -=
+				p->ravg.prev_window_cpu[task_cpu];
+	}
+
+	p->ravg.curr_window_cpu[task_cpu] = 0;
+	p->ravg.prev_window_cpu[task_cpu] = 0;
+
+	update_cluster_load_subtractions(p, task_cpu,
+			src_rq->window_start, new_task);
+
+	BUG_ON((s64)src_rq->prev_runnable_sum < 0);
+	BUG_ON((s64)src_rq->curr_runnable_sum < 0);
+	BUG_ON((s64)src_rq->nt_prev_runnable_sum < 0);
+	BUG_ON((s64)src_rq->nt_curr_runnable_sum < 0);
+}
+
+static int get_top_index(unsigned long *bitmap, unsigned long old_top)
+{
+	int index = find_next_bit(bitmap, NUM_LOAD_INDICES, old_top);
+
+	if (index == NUM_LOAD_INDICES)
+		return 0;
+
+	return NUM_LOAD_INDICES - 1 - index;
+}
+
+static void
+migrate_top_tasks(struct task_struct *p, struct rq *src_rq, struct rq *dst_rq)
+{
+	int index;
+	int top_index;
+	u32 curr_window = p->ravg.curr_window;
+	u32 prev_window = p->ravg.prev_window;
+	u8 src = src_rq->curr_table;
+	u8 dst = dst_rq->curr_table;
+	u8 *src_table;
+	u8 *dst_table;
+
+	if (curr_window) {
+		src_table = src_rq->top_tasks[src];
+		dst_table = dst_rq->top_tasks[dst];
+		index = load_to_index(curr_window);
+		src_table[index] -= 1;
+		dst_table[index] += 1;
+
+		if (!src_table[index])
+			__clear_bit(NUM_LOAD_INDICES - index - 1,
+				src_rq->top_tasks_bitmap[src]);
+
+		if (dst_table[index] == 1)
+			__set_bit(NUM_LOAD_INDICES - index - 1,
+				dst_rq->top_tasks_bitmap[dst]);
+
+		if (index > dst_rq->curr_top)
+			dst_rq->curr_top = index;
+
+		top_index = src_rq->curr_top;
+		if (index == top_index && !src_table[index])
+			src_rq->curr_top = get_top_index(
+				src_rq->top_tasks_bitmap[src], top_index);
+	}
+
+	if (prev_window) {
+		src = 1 - src;
+		dst = 1 - dst;
+		src_table = src_rq->top_tasks[src];
+		dst_table = dst_rq->top_tasks[dst];
+		index = load_to_index(prev_window);
+		src_table[index] -= 1;
+		dst_table[index] += 1;
+
+		if (!src_table[index])
+			__clear_bit(NUM_LOAD_INDICES - index - 1,
+				src_rq->top_tasks_bitmap[src]);
+
+		if (dst_table[index] == 1)
+			__set_bit(NUM_LOAD_INDICES - index - 1,
+				dst_rq->top_tasks_bitmap[dst]);
+
+		if (index > dst_rq->prev_top)
+			dst_rq->prev_top = index;
+
+		top_index = src_rq->prev_top;
+		if (index == top_index && !src_table[index])
+			src_rq->prev_top = get_top_index(
+				src_rq->top_tasks_bitmap[src], top_index);
+	}
+}
+
+void fixup_busy_time(struct task_struct *p, int new_cpu)
+{
+	struct rq *src_rq = task_rq(p);
+	struct rq *dest_rq = cpu_rq(new_cpu);
+	u64 wallclock;
+	u64 *src_curr_runnable_sum, *dst_curr_runnable_sum;
+	u64 *src_prev_runnable_sum, *dst_prev_runnable_sum;
+	u64 *src_nt_curr_runnable_sum, *dst_nt_curr_runnable_sum;
+	u64 *src_nt_prev_runnable_sum, *dst_nt_prev_runnable_sum;
+	bool new_task;
+	struct related_thread_group *grp;
+
+	if (!p->on_rq && p->state != TASK_WAKING)
+		return;
+
+	if (exiting_task(p)) {
+		clear_ed_task(p, src_rq);
+		return;
+	}
+
+	if (p->state == TASK_WAKING)
+		double_rq_lock(src_rq, dest_rq);
+
+	if (sched_disable_window_stats)
+		goto done;
+
+	wallclock = sched_ktime_clock();
+
+	update_task_ravg(task_rq(p)->curr, task_rq(p),
+			 TASK_UPDATE,
+			 wallclock, 0);
+	update_task_ravg(dest_rq->curr, dest_rq,
+			 TASK_UPDATE, wallclock, 0);
+
+	update_task_ravg(p, task_rq(p), TASK_MIGRATE,
+			 wallclock, 0);
+
+	update_task_cpu_cycles(p, new_cpu, wallclock);
+
+	new_task = is_new_task(p);
+	/* Protected by rq_lock */
+	grp = p->grp;
+
+	/*
+	 * For frequency aggregation, we continue to do migration fixups
+	 * even for intra cluster migrations. This is because, the aggregated
+	 * load has to reported on a single CPU regardless.
+	 */
+	if (grp && sched_freq_aggregate) {
+		struct group_cpu_time *cpu_time;
+
+		cpu_time = &src_rq->grp_time;
+		src_curr_runnable_sum = &cpu_time->curr_runnable_sum;
+		src_prev_runnable_sum = &cpu_time->prev_runnable_sum;
+		src_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+		src_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
+
+		cpu_time = &dest_rq->grp_time;
+		dst_curr_runnable_sum = &cpu_time->curr_runnable_sum;
+		dst_prev_runnable_sum = &cpu_time->prev_runnable_sum;
+		dst_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+		dst_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
+
+		if (p->ravg.curr_window) {
+			*src_curr_runnable_sum -= p->ravg.curr_window;
+			*dst_curr_runnable_sum += p->ravg.curr_window;
+			if (new_task) {
+				*src_nt_curr_runnable_sum -=
+							p->ravg.curr_window;
+				*dst_nt_curr_runnable_sum +=
+							p->ravg.curr_window;
+			}
+		}
+
+		if (p->ravg.prev_window) {
+			*src_prev_runnable_sum -= p->ravg.prev_window;
+			*dst_prev_runnable_sum += p->ravg.prev_window;
+			if (new_task) {
+				*src_nt_prev_runnable_sum -=
+							p->ravg.prev_window;
+				*dst_nt_prev_runnable_sum +=
+							p->ravg.prev_window;
+			}
+		}
+	} else {
+		inter_cluster_migration_fixup(p, new_cpu,
+						task_cpu(p), new_task);
+	}
+
+	migrate_top_tasks(p, src_rq, dest_rq);
+
+	if (p == src_rq->ed_task) {
+		src_rq->ed_task = NULL;
+		if (!dest_rq->ed_task)
+			dest_rq->ed_task = p;
+	}
+
+done:
+	if (p->state == TASK_WAKING)
+		double_rq_unlock(src_rq, dest_rq);
+}
+
+#define sched_up_down_migrate_auto_update 1
+static void check_for_up_down_migrate_update(const struct cpumask *cpus)
+{
+	int i = cpumask_first(cpus);
+
+	if (!sched_up_down_migrate_auto_update)
+		return;
+
+	if (cpu_max_possible_capacity(i) == max_possible_capacity)
+		return;
+
+	if (cpu_max_possible_freq(i) == cpu_max_freq(i))
+		up_down_migrate_scale_factor = 1024;
+	else
+		up_down_migrate_scale_factor = (1024 *
+				 cpu_max_possible_freq(i)) / cpu_max_freq(i);
+
+	update_up_down_migrate();
+}
+
+/* Return cluster which can offer required capacity for group */
+static struct sched_cluster *best_cluster(struct related_thread_group *grp,
+					u64 total_demand, bool group_boost)
+{
+	struct sched_cluster *cluster = NULL;
+
+	for_each_sched_cluster(cluster) {
+		if (group_will_fit(cluster, grp, total_demand, group_boost))
+			return cluster;
+	}
+
+	return sched_cluster[0];
+}
+
+static void _set_preferred_cluster(struct related_thread_group *grp)
+{
+	struct task_struct *p;
+	u64 combined_demand = 0;
+	bool boost_on_big = sched_boost_policy() == SCHED_BOOST_ON_BIG;
+	bool group_boost = false;
+	u64 wallclock;
+
+	if (list_empty(&grp->tasks))
+		return;
+
+	wallclock = sched_ktime_clock();
+
+	/*
+	 * wakeup of two or more related tasks could race with each other and
+	 * could result in multiple calls to _set_preferred_cluster being issued
+	 * at same time. Avoid overhead in such cases of rechecking preferred
+	 * cluster
+	 */
+	if (wallclock - grp->last_update < sched_ravg_window / 10)
+		return;
+
+	list_for_each_entry(p, &grp->tasks, grp_list) {
+		if (boost_on_big && task_sched_boost(p)) {
+			group_boost = true;
+			break;
+		}
+
+		if (p->ravg.mark_start < wallclock -
+		    (sched_ravg_window * sched_ravg_hist_size))
+			continue;
+
+		combined_demand += p->ravg.demand;
+
+	}
+
+	grp->preferred_cluster = best_cluster(grp,
+			combined_demand, group_boost);
+	grp->last_update = sched_ktime_clock();
+	trace_sched_set_preferred_cluster(grp, combined_demand);
+}
+
+void set_preferred_cluster(struct related_thread_group *grp)
+{
+	raw_spin_lock(&grp->lock);
+	_set_preferred_cluster(grp);
+	raw_spin_unlock(&grp->lock);
+}
+
+#define ADD_TASK	0
+#define REM_TASK	1
+
+#define DEFAULT_CGROUP_COLOC_ID 1
+
+/*
+ * Task's cpu usage is accounted in:
+ *	rq->curr/prev_runnable_sum,  when its ->grp is NULL
+ *	grp->cpu_time[cpu]->curr/prev_runnable_sum, when its ->grp is !NULL
+ *
+ * Transfer task's cpu usage between those counters when transitioning between
+ * groups
+ */
+static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
+				struct task_struct *p, int event)
+{
+	u64 wallclock;
+	struct group_cpu_time *cpu_time;
+	u64 *src_curr_runnable_sum, *dst_curr_runnable_sum;
+	u64 *src_prev_runnable_sum, *dst_prev_runnable_sum;
+	u64 *src_nt_curr_runnable_sum, *dst_nt_curr_runnable_sum;
+	u64 *src_nt_prev_runnable_sum, *dst_nt_prev_runnable_sum;
+	int migrate_type;
+	int cpu = cpu_of(rq);
+	bool new_task;
+	int i;
+
+	if (!sched_freq_aggregate)
+		return;
+
+	wallclock = sched_ktime_clock();
+
+	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+	update_task_ravg(p, rq, TASK_UPDATE, wallclock, 0);
+	new_task = is_new_task(p);
+
+	cpu_time = &rq->grp_time;
+	if (event == ADD_TASK) {
+		migrate_type = RQ_TO_GROUP;
+
+		src_curr_runnable_sum = &rq->curr_runnable_sum;
+		dst_curr_runnable_sum = &cpu_time->curr_runnable_sum;
+		src_prev_runnable_sum = &rq->prev_runnable_sum;
+		dst_prev_runnable_sum = &cpu_time->prev_runnable_sum;
+
+		src_nt_curr_runnable_sum = &rq->nt_curr_runnable_sum;
+		dst_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+		src_nt_prev_runnable_sum = &rq->nt_prev_runnable_sum;
+		dst_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
+
+		*src_curr_runnable_sum -= p->ravg.curr_window_cpu[cpu];
+		*src_prev_runnable_sum -= p->ravg.prev_window_cpu[cpu];
+		if (new_task) {
+			*src_nt_curr_runnable_sum -=
+					p->ravg.curr_window_cpu[cpu];
+			*src_nt_prev_runnable_sum -=
+					p->ravg.prev_window_cpu[cpu];
+		}
+
+		update_cluster_load_subtractions(p, cpu,
+				rq->window_start, new_task);
+
+	} else {
+		migrate_type = GROUP_TO_RQ;
+
+		src_curr_runnable_sum = &cpu_time->curr_runnable_sum;
+		dst_curr_runnable_sum = &rq->curr_runnable_sum;
+		src_prev_runnable_sum = &cpu_time->prev_runnable_sum;
+		dst_prev_runnable_sum = &rq->prev_runnable_sum;
+
+		src_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+		dst_nt_curr_runnable_sum = &rq->nt_curr_runnable_sum;
+		src_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
+		dst_nt_prev_runnable_sum = &rq->nt_prev_runnable_sum;
+
+		*src_curr_runnable_sum -= p->ravg.curr_window;
+		*src_prev_runnable_sum -= p->ravg.prev_window;
+		if (new_task) {
+			*src_nt_curr_runnable_sum -= p->ravg.curr_window;
+			*src_nt_prev_runnable_sum -= p->ravg.prev_window;
+		}
+
+		/*
+		 * Need to reset curr/prev windows for all CPUs, not just the
+		 * ones in the same cluster. Since inter cluster migrations
+		 * did not result in the appropriate book keeping, the values
+		 * per CPU would be inaccurate.
+		 */
+		for_each_possible_cpu(i) {
+			p->ravg.curr_window_cpu[i] = 0;
+			p->ravg.prev_window_cpu[i] = 0;
+		}
+	}
+
+	*dst_curr_runnable_sum += p->ravg.curr_window;
+	*dst_prev_runnable_sum += p->ravg.prev_window;
+	if (new_task) {
+		*dst_nt_curr_runnable_sum += p->ravg.curr_window;
+		*dst_nt_prev_runnable_sum += p->ravg.prev_window;
+	}
+
+	/*
+	 * When a task enter or exits a group, it's curr and prev windows are
+	 * moved to a single CPU. This behavior might be sub-optimal in the
+	 * exit case, however, it saves us the overhead of handling inter
+	 * cluster migration fixups while the task is part of a related group.
+	 */
+	p->ravg.curr_window_cpu[cpu] = p->ravg.curr_window;
+	p->ravg.prev_window_cpu[cpu] = p->ravg.prev_window;
+
+	trace_sched_migration_update_sum(p, migrate_type, rq);
+
+	BUG_ON((s64)*src_curr_runnable_sum < 0);
+	BUG_ON((s64)*src_prev_runnable_sum < 0);
+	BUG_ON((s64)*src_nt_curr_runnable_sum < 0);
+	BUG_ON((s64)*src_nt_prev_runnable_sum < 0);
+}
+
+static inline struct related_thread_group*
+lookup_related_thread_group(unsigned int group_id)
+{
+	return related_thread_groups[group_id];
+}
+
+int alloc_related_thread_groups(void)
+{
+	int i, ret;
+	struct related_thread_group *grp;
+
+	/* groupd_id = 0 is invalid as it's special id to remove group. */
+	for (i = 1; i < MAX_NUM_CGROUP_COLOC_ID; i++) {
+		grp = kzalloc(sizeof(*grp), GFP_NOWAIT);
+		if (!grp) {
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		grp->id = i;
+		INIT_LIST_HEAD(&grp->tasks);
+		INIT_LIST_HEAD(&grp->list);
+		raw_spin_lock_init(&grp->lock);
+
+		related_thread_groups[i] = grp;
+	}
+
+	return 0;
+
+err:
+	for (i = 1; i < MAX_NUM_CGROUP_COLOC_ID; i++) {
+		grp = lookup_related_thread_group(i);
+		if (grp) {
+			kfree(grp);
+			related_thread_groups[i] = NULL;
+		} else {
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static void remove_task_from_group(struct task_struct *p)
+{
+	struct related_thread_group *grp = p->grp;
+	struct rq *rq;
+	int empty_group = 1;
+
+	raw_spin_lock(&grp->lock);
+
+	rq = __task_rq_lock(p);
+	transfer_busy_time(rq, p->grp, p, REM_TASK);
+	list_del_init(&p->grp_list);
+	rcu_assign_pointer(p->grp, NULL);
+	__task_rq_unlock(rq);
+
+	if (!list_empty(&grp->tasks)) {
+		empty_group = 0;
+		_set_preferred_cluster(grp);
+	}
+
+	raw_spin_unlock(&grp->lock);
+
+	/* Reserved groups cannot be destroyed */
+	if (empty_group && grp->id != DEFAULT_CGROUP_COLOC_ID)
+		 /*
+		  * We test whether grp->list is attached with list_empty()
+		  * hence re-init the list after deletion.
+		  */
+		list_del_init(&grp->list);
+}
+
+static int
+add_task_to_group(struct task_struct *p, struct related_thread_group *grp)
+{
+	struct rq *rq;
+
+	raw_spin_lock(&grp->lock);
+
+	/*
+	 * Change p->grp under rq->lock. Will prevent races with read-side
+	 * reference of p->grp in various hot-paths
+	 */
+	rq = __task_rq_lock(p);
+	transfer_busy_time(rq, grp, p, ADD_TASK);
+	list_add(&p->grp_list, &grp->tasks);
+	rcu_assign_pointer(p->grp, grp);
+	__task_rq_unlock(rq);
+
+	_set_preferred_cluster(grp);
+
+	raw_spin_unlock(&grp->lock);
+
+	return 0;
+}
+
+void add_new_task_to_grp(struct task_struct *new)
+{
+	unsigned long flags;
+	struct related_thread_group *grp;
+	struct task_struct *leader = new->group_leader;
+	unsigned int leader_grp_id = sched_get_group_id(leader);
+
+	if (!sysctl_sched_enable_thread_grouping &&
+	    leader_grp_id != DEFAULT_CGROUP_COLOC_ID)
+		return;
+
+	if (thread_group_leader(new))
+		return;
+
+	if (leader_grp_id == DEFAULT_CGROUP_COLOC_ID) {
+		if (!same_schedtune(new, leader))
+			return;
+	}
+
+	write_lock_irqsave(&related_thread_group_lock, flags);
+
+	rcu_read_lock();
+	grp = task_related_thread_group(leader);
+	rcu_read_unlock();
+
+	/*
+	 * It's possible that someone already added the new task to the
+	 * group. A leader's thread group is updated prior to calling
+	 * this function. It's also possible that the leader has exited
+	 * the group. In either case, there is nothing else to do.
+	 */
+	if (!grp || new->grp) {
+		write_unlock_irqrestore(&related_thread_group_lock, flags);
+		return;
+	}
+
+	raw_spin_lock(&grp->lock);
+
+	rcu_assign_pointer(new->grp, grp);
+	list_add(&new->grp_list, &grp->tasks);
+
+	raw_spin_unlock(&grp->lock);
+	write_unlock_irqrestore(&related_thread_group_lock, flags);
+}
+
+static int __sched_set_group_id(struct task_struct *p, unsigned int group_id)
+{
+	int rc = 0;
+	unsigned long flags;
+	struct related_thread_group *grp = NULL;
+
+	if (group_id >= MAX_NUM_CGROUP_COLOC_ID)
+		return -EINVAL;
+
+	raw_spin_lock_irqsave(&p->pi_lock, flags);
+	write_lock(&related_thread_group_lock);
+
+	/* Switching from one group to another directly is not permitted */
+	if ((current != p && p->flags & PF_EXITING) ||
+			(!p->grp && !group_id) ||
+			(p->grp && group_id))
+		goto done;
+
+	if (!group_id) {
+		remove_task_from_group(p);
+		goto done;
+	}
+
+	grp = lookup_related_thread_group(group_id);
+	if (list_empty(&grp->list))
+		list_add(&grp->list, &active_related_thread_groups);
+
+	rc = add_task_to_group(p, grp);
+done:
+	write_unlock(&related_thread_group_lock);
+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+
+	return rc;
+}
+
+int sched_set_group_id(struct task_struct *p, unsigned int group_id)
+{
+	/* DEFAULT_CGROUP_COLOC_ID is a reserved id */
+	if (group_id == DEFAULT_CGROUP_COLOC_ID)
+		return -EINVAL;
+
+	return __sched_set_group_id(p, group_id);
+}
+
+unsigned int sched_get_group_id(struct task_struct *p)
+{
+	unsigned int group_id;
+	struct related_thread_group *grp;
+
+	rcu_read_lock();
+	grp = task_related_thread_group(p);
+	group_id = grp ? grp->id : 0;
+	rcu_read_unlock();
+
+	return group_id;
+}
+
+#if defined(CONFIG_SCHED_TUNE) && defined(CONFIG_CGROUP_SCHEDTUNE)
+/*
+ * We create a default colocation group at boot. There is no need to
+ * synchronize tasks between cgroups at creation time because the
+ * correct cgroup hierarchy is not available at boot. Therefore cgroup
+ * colocation is turned off by default even though the colocation group
+ * itself has been allocated. Furthermore this colocation group cannot
+ * be destroyted once it has been created. All of this has been as part
+ * of runtime optimizations.
+ *
+ * The job of synchronizing tasks to the colocation group is done when
+ * the colocation flag in the cgroup is turned on.
+ */
+static int __init create_default_coloc_group(void)
+{
+	struct related_thread_group *grp = NULL;
+	unsigned long flags;
+
+	grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID);
+	write_lock_irqsave(&related_thread_group_lock, flags);
+	list_add(&grp->list, &active_related_thread_groups);
+	write_unlock_irqrestore(&related_thread_group_lock, flags);
+
+	update_freq_aggregate_threshold(MAX_FREQ_AGGR_THRESH);
+	return 0;
+}
+late_initcall(create_default_coloc_group);
+
+int sync_cgroup_colocation(struct task_struct *p, bool insert)
+{
+	unsigned int grp_id = insert ? DEFAULT_CGROUP_COLOC_ID : 0;
+
+	return __sched_set_group_id(p, grp_id);
+}
+#endif
+
+static void update_cpu_cluster_capacity(const cpumask_t *cpus)
+{
+	int i;
+	struct sched_cluster *cluster;
+	struct cpumask cpumask;
+
+	cpumask_copy(&cpumask, cpus);
+	pre_big_task_count_change(cpu_possible_mask);
+
+	for_each_cpu(i, &cpumask) {
+		cluster = cpu_rq(i)->cluster;
+		cpumask_andnot(&cpumask, &cpumask, &cluster->cpus);
+
+		cluster->capacity = compute_capacity(cluster);
+		cluster->load_scale_factor = compute_load_scale_factor(cluster);
+
+		/* 'cpus' can contain cpumask more than one cluster */
+		check_for_up_down_migrate_update(&cluster->cpus);
+	}
+
+	__update_min_max_capacity();
+
+	post_big_task_count_change(cpu_possible_mask);
+}
+
+static DEFINE_SPINLOCK(cpu_freq_min_max_lock);
+void sched_update_cpu_freq_min_max(const cpumask_t *cpus, u32 fmin, u32 fmax)
+{
+	struct cpumask cpumask;
+	struct sched_cluster *cluster;
+	int i, update_capacity = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cpu_freq_min_max_lock, flags);
+	cpumask_copy(&cpumask, cpus);
+	for_each_cpu(i, &cpumask) {
+		cluster = cpu_rq(i)->cluster;
+		cpumask_andnot(&cpumask, &cpumask, &cluster->cpus);
+
+		update_capacity += (cluster->max_mitigated_freq != fmax);
+		cluster->max_mitigated_freq = fmax;
+	}
+	spin_unlock_irqrestore(&cpu_freq_min_max_lock, flags);
+
+	if (update_capacity)
+		update_cpu_cluster_capacity(cpus);
+}
+
+static int cpufreq_notifier_policy(struct notifier_block *nb,
+		unsigned long val, void *data)
+{
+	struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
+	struct sched_cluster *cluster = NULL;
+	struct cpumask policy_cluster = *policy->related_cpus;
+	unsigned int orig_max_freq = 0;
+	int i, j, update_capacity = 0;
+
+	if (val != CPUFREQ_NOTIFY && val != CPUFREQ_REMOVE_POLICY &&
+						val != CPUFREQ_CREATE_POLICY)
+		return 0;
+
+	if (val == CPUFREQ_REMOVE_POLICY || val == CPUFREQ_CREATE_POLICY) {
+		update_min_max_capacity();
+		return 0;
+	}
+
+	max_possible_freq = max(max_possible_freq, policy->cpuinfo.max_freq);
+	if (min_max_freq == 1)
+		min_max_freq = UINT_MAX;
+	min_max_freq = min(min_max_freq, policy->cpuinfo.max_freq);
+	BUG_ON(!min_max_freq);
+	BUG_ON(!policy->max);
+
+	for_each_cpu(i, &policy_cluster) {
+		cluster = cpu_rq(i)->cluster;
+		cpumask_andnot(&policy_cluster, &policy_cluster,
+						&cluster->cpus);
+
+		orig_max_freq = cluster->max_freq;
+		cluster->min_freq = policy->min;
+		cluster->max_freq = policy->max;
+		cluster->cur_freq = policy->cur;
+
+		if (!cluster->freq_init_done) {
+			mutex_lock(&cluster_lock);
+			for_each_cpu(j, &cluster->cpus)
+				cpumask_copy(&cpu_rq(j)->freq_domain_cpumask,
+						policy->related_cpus);
+			cluster->max_possible_freq = policy->cpuinfo.max_freq;
+			cluster->max_possible_capacity =
+				compute_max_possible_capacity(cluster);
+			cluster->freq_init_done = true;
+
+			sort_clusters();
+			update_all_clusters_stats();
+			mutex_unlock(&cluster_lock);
+			continue;
+		}
+
+		update_capacity += (orig_max_freq != cluster->max_freq);
+	}
+
+	if (update_capacity)
+		update_cpu_cluster_capacity(policy->related_cpus);
+
+	return 0;
+}
+
+static int cpufreq_notifier_trans(struct notifier_block *nb,
+		unsigned long val, void *data)
+{
+	struct cpufreq_freqs *freq = (struct cpufreq_freqs *)data;
+	unsigned int cpu = freq->cpu, new_freq = freq->new;
+	unsigned long flags;
+	struct sched_cluster *cluster;
+	struct cpumask policy_cpus = cpu_rq(cpu)->freq_domain_cpumask;
+	int i, j;
+
+	if (val != CPUFREQ_POSTCHANGE)
+		return 0;
+
+	BUG_ON(!new_freq);
+
+	if (cpu_cur_freq(cpu) == new_freq)
+		return 0;
+
+	for_each_cpu(i, &policy_cpus) {
+		cluster = cpu_rq(i)->cluster;
+
+		for_each_cpu(j, &cluster->cpus) {
+			struct rq *rq = cpu_rq(j);
+
+			raw_spin_lock_irqsave(&rq->lock, flags);
+			update_task_ravg(rq->curr, rq, TASK_UPDATE,
+						sched_ktime_clock(), 0);
+			raw_spin_unlock_irqrestore(&rq->lock, flags);
+		}
+
+		cluster->cur_freq = new_freq;
+		cpumask_andnot(&policy_cpus, &policy_cpus, &cluster->cpus);
+	}
+
+	return 0;
+}
+
+static int pwr_stats_ready_notifier(struct notifier_block *nb,
+				    unsigned long cpu, void *data)
+{
+	cpumask_t mask = CPU_MASK_NONE;
+
+	cpumask_set_cpu(cpu, &mask);
+	sched_update_freq_max_load(&mask);
+
+	mutex_lock(&cluster_lock);
+	sort_clusters();
+	mutex_unlock(&cluster_lock);
+
+	return 0;
+}
+
+static struct notifier_block notifier_policy_block = {
+	.notifier_call = cpufreq_notifier_policy
+};
+
+static struct notifier_block notifier_trans_block = {
+	.notifier_call = cpufreq_notifier_trans
+};
+
+static struct notifier_block notifier_pwr_stats_ready = {
+	.notifier_call = pwr_stats_ready_notifier
+};
+
+int __weak register_cpu_pwr_stats_ready_notifier(struct notifier_block *nb)
+{
+	return -EINVAL;
+}
+
+static int register_sched_callback(void)
+{
+	int ret;
+
+	ret = cpufreq_register_notifier(&notifier_policy_block,
+						CPUFREQ_POLICY_NOTIFIER);
+
+	if (!ret)
+		ret = cpufreq_register_notifier(&notifier_trans_block,
+						CPUFREQ_TRANSITION_NOTIFIER);
+
+	register_cpu_pwr_stats_ready_notifier(&notifier_pwr_stats_ready);
+
+	return 0;
+}
+
+/*
+ * cpufreq callbacks can be registered at core_initcall or later time.
+ * Any registration done prior to that is "forgotten" by cpufreq. See
+ * initialization of variable init_cpufreq_transition_notifier_list_called
+ * for further information.
+ */
+core_initcall(register_sched_callback);
+
+int update_preferred_cluster(struct related_thread_group *grp,
+		struct task_struct *p, u32 old_load)
+{
+	u32 new_load = task_load(p);
+
+	if (!grp)
+		return 0;
+
+	/*
+	 * Update if task's load has changed significantly or a complete window
+	 * has passed since we last updated preference
+	 */
+	if (abs(new_load - old_load) > sched_ravg_window / 4 ||
+		sched_ktime_clock() - grp->last_update > sched_ravg_window)
+		return 1;
+
+	return 0;
+}
+
+bool early_detection_notify(struct rq *rq, u64 wallclock)
+{
+	struct task_struct *p;
+	int loop_max = 10;
+
+	if (sched_boost_policy() == SCHED_BOOST_NONE || !rq->cfs.h_nr_running)
+		return 0;
+
+	rq->ed_task = NULL;
+	list_for_each_entry(p, &rq->cfs_tasks, se.group_node) {
+		if (!loop_max)
+			break;
+
+		if (wallclock - p->last_wake_ts >= EARLY_DETECTION_DURATION) {
+			rq->ed_task = p;
+			return 1;
+		}
+
+		loop_max--;
+	}
+
+	return 0;
+}
+
+void update_avg_burst(struct task_struct *p)
+{
+	update_avg(&p->ravg.avg_burst, p->ravg.curr_burst);
+	p->ravg.curr_burst = 0;
+}
+
+void note_task_waking(struct task_struct *p, u64 wallclock)
+{
+	u64 sleep_time = wallclock - p->last_switch_out_ts;
+
+	/*
+	 * When a short burst and short sleeping task goes for a long
+	 * sleep, the task's avg_sleep_time gets boosted. It will not
+	 * come below short_sleep threshold for a lot of time and it
+	 * results in incorrect packing. The idead behind tracking
+	 * avg_sleep_time is to detect if a task is short sleeping
+	 * or not. So limit the sleep time to twice the short sleep
+	 * threshold. For regular long sleeping tasks, the avg_sleep_time
+	 * would be higher than threshold, and packing happens correctly.
+	 */
+	sleep_time = min_t(u64, sleep_time, 2 * sysctl_sched_short_sleep);
+	update_avg(&p->ravg.avg_sleep_time, sleep_time);
+
+	p->last_wake_ts = wallclock;
+}
+
+#ifdef CONFIG_CGROUP_SCHED
+u64 cpu_upmigrate_discourage_read_u64(struct cgroup_subsys_state *css,
+					  struct cftype *cft)
+{
+	struct task_group *tg = css_tg(css);
+
+	return tg->upmigrate_discouraged;
+}
+
+int cpu_upmigrate_discourage_write_u64(struct cgroup_subsys_state *css,
+				struct cftype *cft, u64 upmigrate_discourage)
+{
+	struct task_group *tg = css_tg(css);
+	int discourage = upmigrate_discourage > 0;
+
+	if (tg->upmigrate_discouraged == discourage)
+		return 0;
+
+	/*
+	 * Revisit big-task classification for tasks of this cgroup. It would
+	 * have been efficient to walk tasks of just this cgroup in running
+	 * state, but we don't have easy means to do that. Walk all tasks in
+	 * running state on all cpus instead and re-visit their big task
+	 * classification.
+	 */
+	get_online_cpus();
+	pre_big_task_count_change(cpu_online_mask);
+
+	tg->upmigrate_discouraged = discourage;
+
+	post_big_task_count_change(cpu_online_mask);
+	put_online_cpus();
+
+	return 0;
+}
+#endif /* CONFIG_CGROUP_SCHED */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/kernel/sched/sched_avg.c	2019-10-29 09:26:25.629222613 +0100
@@ -0,0 +1,155 @@
+/* Copyright (c) 2012, 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Scheduler hook for average runqueue determination
+ */
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/hrtimer.h>
+#include <linux/sched.h>
+#include <linux/math64.h>
+
+#include "sched.h"
+#include <trace/events/sched.h>
+
+static DEFINE_PER_CPU(u64, nr_prod_sum);
+static DEFINE_PER_CPU(u64, last_time);
+static DEFINE_PER_CPU(u64, nr_big_prod_sum);
+static DEFINE_PER_CPU(u64, nr);
+static DEFINE_PER_CPU(u64, nr_max);
+
+static DEFINE_PER_CPU(unsigned long, iowait_prod_sum);
+static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock);
+static s64 last_get_time;
+
+#define DIV64_U64_ROUNDUP(X, Y) div64_u64((X) + (Y - 1), Y)
+/**
+ * sched_get_nr_running_avg
+ * @return: Average nr_running, iowait and nr_big_tasks value since last poll.
+ *	    Returns the avg * 100 to return up to two decimal points
+ *	    of accuracy.
+ *
+ * Obtains the average nr_running value since the last poll.
+ * This function may not be called concurrently with itself
+ */
+void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg,
+			      unsigned int *max_nr, unsigned int *big_max_nr)
+{
+	int cpu;
+	u64 curr_time = sched_clock();
+	u64 diff = curr_time - last_get_time;
+	u64 tmp_avg = 0, tmp_iowait = 0, tmp_big_avg = 0;
+
+	*avg = 0;
+	*iowait_avg = 0;
+	*big_avg = 0;
+	*max_nr = 0;
+	*big_max_nr = 0;
+
+	if (!diff)
+		return;
+
+	/* read and reset nr_running counts */
+	for_each_possible_cpu(cpu) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags);
+		curr_time = sched_clock();
+		diff = curr_time - per_cpu(last_time, cpu);
+		BUG_ON((s64)diff < 0);
+
+		tmp_avg += per_cpu(nr_prod_sum, cpu);
+		tmp_avg += per_cpu(nr, cpu) * diff;
+
+		tmp_big_avg += per_cpu(nr_big_prod_sum, cpu);
+		tmp_big_avg += nr_eligible_big_tasks(cpu) * diff;
+
+		tmp_iowait += per_cpu(iowait_prod_sum, cpu);
+		tmp_iowait +=  nr_iowait_cpu(cpu) * diff;
+
+		per_cpu(last_time, cpu) = curr_time;
+
+		per_cpu(nr_prod_sum, cpu) = 0;
+		per_cpu(nr_big_prod_sum, cpu) = 0;
+		per_cpu(iowait_prod_sum, cpu) = 0;
+
+		if (*max_nr < per_cpu(nr_max, cpu))
+			*max_nr = per_cpu(nr_max, cpu);
+
+		if (is_max_capacity_cpu(cpu)) {
+			if (*big_max_nr < per_cpu(nr_max, cpu))
+				*big_max_nr = per_cpu(nr_max, cpu);
+		}
+
+		per_cpu(nr_max, cpu) = per_cpu(nr, cpu);
+		spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
+	}
+
+	diff = curr_time - last_get_time;
+	last_get_time = curr_time;
+
+	/*
+	 * Any task running on BIG cluster and BIG tasks running on little
+	 * cluster contributes to big_avg. Small or medium tasks can also
+	 * run on BIG cluster when co-location and scheduler boost features
+	 * are activated. We don't want these tasks to downmigrate to little
+	 * cluster when BIG CPUs are available but isolated. Round up the
+	 * average values so that core_ctl aggressively unisolate BIG CPUs.
+	 */
+	*avg = (int)DIV64_U64_ROUNDUP(tmp_avg, diff);
+	*big_avg = (int)DIV64_U64_ROUNDUP(tmp_big_avg, diff);
+	*iowait_avg = (int)DIV64_U64_ROUNDUP(tmp_iowait, diff);
+
+	trace_sched_get_nr_running_avg(*avg, *big_avg, *iowait_avg,
+				       *max_nr, *big_max_nr);
+
+	BUG_ON(*avg < 0 || *big_avg < 0 || *iowait_avg < 0);
+	pr_debug("%s - avg:%d big_avg:%d iowait_avg:%d\n",
+				 __func__, *avg, *big_avg, *iowait_avg);
+}
+EXPORT_SYMBOL(sched_get_nr_running_avg);
+
+/**
+ * sched_update_nr_prod
+ * @cpu: The core id of the nr running driver.
+ * @delta: Adjust nr by 'delta' amount
+ * @inc: Whether we are increasing or decreasing the count
+ * @return: N/A
+ *
+ * Update average with latest nr_running value for CPU
+ */
+void sched_update_nr_prod(int cpu, long delta, bool inc)
+{
+	u64 diff;
+	u64 curr_time;
+	unsigned long flags, nr_running;
+
+	spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags);
+	nr_running = per_cpu(nr, cpu);
+	curr_time = sched_clock();
+	diff = curr_time - per_cpu(last_time, cpu);
+	BUG_ON((s64)diff < 0);
+	per_cpu(last_time, cpu) = curr_time;
+	per_cpu(nr, cpu) = nr_running + (inc ? delta : -delta);
+
+	BUG_ON((s64)per_cpu(nr, cpu) < 0);
+
+	if (per_cpu(nr, cpu) > per_cpu(nr_max, cpu))
+		per_cpu(nr_max, cpu) = per_cpu(nr, cpu);
+
+	per_cpu(nr_prod_sum, cpu) += nr_running * diff;
+	per_cpu(nr_big_prod_sum, cpu) += nr_eligible_big_tasks(cpu) * diff;
+	per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff;
+	spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
+}
+EXPORT_SYMBOL(sched_update_nr_prod);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/kernel/sched/tune.c	2019-10-29 09:26:25.629222613 +0100
@@ -0,0 +1,1140 @@
+#include <linux/cgroup.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/percpu.h>
+#include <linux/printk.h>
+#include <linux/rcupdate.h>
+#include <linux/slab.h>
+
+#include <trace/events/sched.h>
+
+#include "sched.h"
+#include "tune.h"
+
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+bool schedtune_initialized = false;
+#endif
+
+unsigned int sysctl_sched_cfs_boost __read_mostly;
+
+extern struct reciprocal_value schedtune_spc_rdiv;
+extern struct target_nrg schedtune_target_nrg;
+
+/* Performance Boost region (B) threshold params */
+static int perf_boost_idx;
+
+/* Performance Constraint region (C) threshold params */
+static int perf_constrain_idx;
+
+/**
+ * Performance-Energy (P-E) Space thresholds constants
+ */
+struct threshold_params {
+	int nrg_gain;
+	int cap_gain;
+};
+
+/*
+ * System specific P-E space thresholds constants
+ */
+static struct threshold_params
+threshold_gains[] = {
+	{ 0, 5 }, /*   < 10% */
+	{ 1, 5 }, /*   < 20% */
+	{ 2, 5 }, /*   < 30% */
+	{ 3, 5 }, /*   < 40% */
+	{ 4, 5 }, /*   < 50% */
+	{ 5, 4 }, /*   < 60% */
+	{ 5, 3 }, /*   < 70% */
+	{ 5, 2 }, /*   < 80% */
+	{ 5, 1 }, /*   < 90% */
+	{ 5, 0 }  /* <= 100% */
+};
+
+static int
+__schedtune_accept_deltas(int nrg_delta, int cap_delta,
+			  int perf_boost_idx, int perf_constrain_idx)
+{
+	int payoff = -INT_MAX;
+	int gain_idx = -1;
+
+	/* Performance Boost (B) region */
+	if (nrg_delta >= 0 && cap_delta > 0)
+		gain_idx = perf_boost_idx;
+	/* Performance Constraint (C) region */
+	else if (nrg_delta < 0 && cap_delta <= 0)
+		gain_idx = perf_constrain_idx;
+
+	/* Default: reject schedule candidate */
+	if (gain_idx == -1)
+		return payoff;
+
+	/*
+	 * Evaluate "Performance Boost" vs "Energy Increase"
+	 *
+	 * - Performance Boost (B) region
+	 *
+	 *   Condition: nrg_delta > 0 && cap_delta > 0
+	 *   Payoff criteria:
+	 *     cap_gain / nrg_gain  < cap_delta / nrg_delta =
+	 *     cap_gain * nrg_delta < cap_delta * nrg_gain
+	 *   Note that since both nrg_gain and nrg_delta are positive, the
+	 *   inequality does not change. Thus:
+	 *
+	 *     payoff = (cap_delta * nrg_gain) - (cap_gain * nrg_delta)
+	 *
+	 * - Performance Constraint (C) region
+	 *
+	 *   Condition: nrg_delta < 0 && cap_delta < 0
+	 *   payoff criteria:
+	 *     cap_gain / nrg_gain  > cap_delta / nrg_delta =
+	 *     cap_gain * nrg_delta < cap_delta * nrg_gain
+	 *   Note that since nrg_gain > 0 while nrg_delta < 0, the
+	 *   inequality change. Thus:
+	 *
+	 *     payoff = (cap_delta * nrg_gain) - (cap_gain * nrg_delta)
+	 *
+	 * This means that, in case of same positive defined {cap,nrg}_gain
+	 * for both the B and C regions, we can use the same payoff formula
+	 * where a positive value represents the accept condition.
+	 */
+	payoff  = cap_delta * threshold_gains[gain_idx].nrg_gain;
+	payoff -= nrg_delta * threshold_gains[gain_idx].cap_gain;
+
+	return payoff;
+}
+
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+
+/*
+ * EAS scheduler tunables for task groups.
+ */
+
+/* SchdTune tunables for a group of tasks */
+struct schedtune {
+	/* SchedTune CGroup subsystem */
+	struct cgroup_subsys_state css;
+
+	/* Boost group allocated ID */
+	int idx;
+
+	/* Boost value for tasks on that SchedTune CGroup */
+	int boost;
+
+#ifdef CONFIG_SCHED_HMP
+	/* Toggle ability to override sched boost enabled */
+	bool sched_boost_no_override;
+
+	/*
+	 * Controls whether a cgroup is eligible for sched boost or not. This
+	 * can temporariliy be disabled by the kernel based on the no_override
+	 * flag above.
+	 */
+	bool sched_boost_enabled;
+
+	/*
+	 * This tracks the default value of sched_boost_enabled and is used
+	 * restore the value following any temporary changes to that flag.
+	 */
+	bool sched_boost_enabled_backup;
+
+	/*
+	 * Controls whether tasks of this cgroup should be colocated with each
+	 * other and tasks of other cgroups that have the same flag turned on.
+	 */
+	bool colocate;
+
+	/* Controls whether further updates are allowed to the colocate flag */
+	bool colocate_update_disabled;
+#endif
+
+	/* Performance Boost (B) region threshold params */
+	int perf_boost_idx;
+
+	/* Performance Constraint (C) region threshold params */
+	int perf_constrain_idx;
+
+	/* Hint to bias scheduling of tasks on that SchedTune CGroup
+	 * towards idle CPUs */
+	int prefer_idle;
+};
+
+static inline struct schedtune *css_st(struct cgroup_subsys_state *css)
+{
+	return container_of(css, struct schedtune, css);
+}
+
+static inline struct schedtune *task_schedtune(struct task_struct *tsk)
+{
+	return css_st(task_css(tsk, schedtune_cgrp_id));
+}
+
+static inline struct schedtune *parent_st(struct schedtune *st)
+{
+	return css_st(st->css.parent);
+}
+
+/*
+ * SchedTune root control group
+ * The root control group is used to defined a system-wide boosting tuning,
+ * which is applied to all tasks in the system.
+ * Task specific boost tuning could be specified by creating and
+ * configuring a child control group under the root one.
+ * By default, system-wide boosting is disabled, i.e. no boosting is applied
+ * to tasks which are not into a child control group.
+ */
+static struct schedtune
+root_schedtune = {
+	.boost	= 0,
+#ifdef CONFIG_SCHED_HMP
+	.sched_boost_no_override = false,
+	.sched_boost_enabled = true,
+	.sched_boost_enabled_backup = true,
+	.colocate = false,
+	.colocate_update_disabled = false,
+#endif
+	.perf_boost_idx = 0,
+	.perf_constrain_idx = 0,
+	.prefer_idle = 0,
+};
+
+int
+schedtune_accept_deltas(int nrg_delta, int cap_delta,
+			struct task_struct *task)
+{
+	struct schedtune *ct;
+	int perf_boost_idx;
+	int perf_constrain_idx;
+
+	/* Optimal (O) region */
+	if (nrg_delta < 0 && cap_delta > 0) {
+		trace_sched_tune_filter(nrg_delta, cap_delta, 0, 0, 1, 0);
+		return INT_MAX;
+	}
+
+	/* Suboptimal (S) region */
+	if (nrg_delta > 0 && cap_delta < 0) {
+		trace_sched_tune_filter(nrg_delta, cap_delta, 0, 0, -1, 5);
+		return -INT_MAX;
+	}
+
+	/* Get task specific perf Boost/Constraints indexes */
+	rcu_read_lock();
+	ct = task_schedtune(task);
+	perf_boost_idx = ct->perf_boost_idx;
+	perf_constrain_idx = ct->perf_constrain_idx;
+	rcu_read_unlock();
+
+	return __schedtune_accept_deltas(nrg_delta, cap_delta,
+			perf_boost_idx, perf_constrain_idx);
+}
+
+/*
+ * Maximum number of boost groups to support
+ * When per-task boosting is used we still allow only limited number of
+ * boost groups for two main reasons:
+ * 1. on a real system we usually have only few classes of workloads which
+ *    make sense to boost with different values (e.g. background vs foreground
+ *    tasks, interactive vs low-priority tasks)
+ * 2. a limited number allows for a simpler and more memory/time efficient
+ *    implementation especially for the computation of the per-CPU boost
+ *    value
+ */
+#define BOOSTGROUPS_COUNT 5
+
+/* Array of configured boostgroups */
+static struct schedtune *allocated_group[BOOSTGROUPS_COUNT] = {
+	&root_schedtune,
+	NULL,
+};
+
+/* SchedTune boost groups
+ * Keep track of all the boost groups which impact on CPU, for example when a
+ * CPU has two RUNNABLE tasks belonging to two different boost groups and thus
+ * likely with different boost values.
+ * Since on each system we expect only a limited number of boost groups, here
+ * we use a simple array to keep track of the metrics required to compute the
+ * maximum per-CPU boosting value.
+ */
+struct boost_groups {
+	/* Maximum boost value for all RUNNABLE tasks on a CPU */
+	bool idle;
+	int boost_max;
+	struct {
+		/* The boost for tasks on that boost group */
+		int boost;
+		/* Count of RUNNABLE tasks on that boost group */
+		unsigned tasks;
+	} group[BOOSTGROUPS_COUNT];
+	/* CPU's boost group locking */
+	raw_spinlock_t lock;
+};
+
+/* Boost groups affecting each CPU in the system */
+DEFINE_PER_CPU(struct boost_groups, cpu_boost_groups);
+
+#ifdef CONFIG_SCHED_HMP
+static inline void init_sched_boost(struct schedtune *st)
+{
+	st->sched_boost_no_override = false;
+	st->sched_boost_enabled = true;
+	st->sched_boost_enabled_backup = st->sched_boost_enabled;
+	st->colocate = false;
+	st->colocate_update_disabled = false;
+}
+
+bool same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2)
+{
+	return task_schedtune(tsk1) == task_schedtune(tsk2);
+}
+
+void update_cgroup_boost_settings(void)
+{
+	int i;
+
+	for (i = 0; i < BOOSTGROUPS_COUNT; i++) {
+		if (!allocated_group[i])
+			break;
+
+		if (allocated_group[i]->sched_boost_no_override)
+			continue;
+
+		allocated_group[i]->sched_boost_enabled = false;
+	}
+}
+
+void restore_cgroup_boost_settings(void)
+{
+	int i;
+
+	for (i = 0; i < BOOSTGROUPS_COUNT; i++) {
+		if (!allocated_group[i])
+			break;
+
+		allocated_group[i]->sched_boost_enabled =
+			allocated_group[i]->sched_boost_enabled_backup;
+	}
+}
+
+bool task_sched_boost(struct task_struct *p)
+{
+	struct schedtune *st = task_schedtune(p);
+
+	return st->sched_boost_enabled;
+}
+
+static u64
+sched_boost_override_read(struct cgroup_subsys_state *css,
+			struct cftype *cft)
+{
+	struct schedtune *st = css_st(css);
+
+	return st->sched_boost_no_override;
+}
+
+static int sched_boost_override_write(struct cgroup_subsys_state *css,
+			struct cftype *cft, u64 override)
+{
+	struct schedtune *st = css_st(css);
+
+	st->sched_boost_no_override = !!override;
+
+	return 0;
+}
+
+static u64 sched_boost_enabled_read(struct cgroup_subsys_state *css,
+			struct cftype *cft)
+{
+	struct schedtune *st = css_st(css);
+
+	return st->sched_boost_enabled;
+}
+
+static int sched_boost_enabled_write(struct cgroup_subsys_state *css,
+			struct cftype *cft, u64 enable)
+{
+	struct schedtune *st = css_st(css);
+
+	st->sched_boost_enabled = !!enable;
+	st->sched_boost_enabled_backup = st->sched_boost_enabled;
+
+	return 0;
+}
+
+static u64 sched_colocate_read(struct cgroup_subsys_state *css,
+			struct cftype *cft)
+{
+	struct schedtune *st = css_st(css);
+
+	return st->colocate;
+}
+
+static int sched_colocate_write(struct cgroup_subsys_state *css,
+			struct cftype *cft, u64 colocate)
+{
+	struct schedtune *st = css_st(css);
+
+	if (st->colocate_update_disabled)
+		return -EPERM;
+
+	st->colocate = !!colocate;
+	st->colocate_update_disabled = true;
+	return 0;
+}
+
+#else /* CONFIG_SCHED_HMP */
+
+static inline void init_sched_boost(struct schedtune *st) { }
+
+#endif /* CONFIG_SCHED_HMP */
+
+static void
+schedtune_cpu_update(int cpu)
+{
+	struct boost_groups *bg;
+	int boost_max;
+	int idx;
+
+	bg = &per_cpu(cpu_boost_groups, cpu);
+
+	/* The root boost group is always active */
+	boost_max = bg->group[0].boost;
+	for (idx = 1; idx < BOOSTGROUPS_COUNT; ++idx) {
+		/*
+		 * A boost group affects a CPU only if it has
+		 * RUNNABLE tasks on that CPU
+		 */
+		if (bg->group[idx].tasks == 0)
+			continue;
+
+		boost_max = max(boost_max, bg->group[idx].boost);
+	}
+	/* Ensures boost_max is non-negative when all cgroup boost values
+	 * are neagtive. Avoids under-accounting of cpu capacity which may cause
+	 * task stacking and frequency spikes.*/
+	boost_max = max(boost_max, 0);
+	bg->boost_max = boost_max;
+}
+
+static int
+schedtune_boostgroup_update(int idx, int boost)
+{
+	struct boost_groups *bg;
+	int cur_boost_max;
+	int old_boost;
+	int cpu;
+
+	/* Update per CPU boost groups */
+	for_each_possible_cpu(cpu) {
+		bg = &per_cpu(cpu_boost_groups, cpu);
+
+		/*
+		 * Keep track of current boost values to compute the per CPU
+		 * maximum only when it has been affected by the new value of
+		 * the updated boost group
+		 */
+		cur_boost_max = bg->boost_max;
+		old_boost = bg->group[idx].boost;
+
+		/* Update the boost value of this boost group */
+		bg->group[idx].boost = boost;
+
+		/* Check if this update increase current max */
+		if (boost > cur_boost_max && bg->group[idx].tasks) {
+			bg->boost_max = boost;
+			trace_sched_tune_boostgroup_update(cpu, 1, bg->boost_max);
+			continue;
+		}
+
+		/* Check if this update has decreased current max */
+		if (cur_boost_max == old_boost && old_boost > boost) {
+			schedtune_cpu_update(cpu);
+			trace_sched_tune_boostgroup_update(cpu, -1, bg->boost_max);
+			continue;
+		}
+
+		trace_sched_tune_boostgroup_update(cpu, 0, bg->boost_max);
+	}
+
+	return 0;
+}
+
+#define ENQUEUE_TASK  1
+#define DEQUEUE_TASK -1
+
+static inline void
+schedtune_tasks_update(struct task_struct *p, int cpu, int idx, int task_count)
+{
+	struct boost_groups *bg = &per_cpu(cpu_boost_groups, cpu);
+	int tasks = bg->group[idx].tasks + task_count;
+
+	/* Update boosted tasks count while avoiding to make it negative */
+	bg->group[idx].tasks = max(0, tasks);
+
+	trace_sched_tune_tasks_update(p, cpu, tasks, idx,
+			bg->group[idx].boost, bg->boost_max);
+
+	/* Boost group activation or deactivation on that RQ */
+	if (tasks == 1 || tasks == 0)
+		schedtune_cpu_update(cpu);
+}
+
+/*
+ * NOTE: This function must be called while holding the lock on the CPU RQ
+ */
+void schedtune_enqueue_task(struct task_struct *p, int cpu)
+{
+	struct boost_groups *bg = &per_cpu(cpu_boost_groups, cpu);
+	unsigned long irq_flags;
+	struct schedtune *st;
+	int idx;
+
+	if (!unlikely(schedtune_initialized))
+		return;
+
+	/*
+	 * When a task is marked PF_EXITING by do_exit() it's going to be
+	 * dequeued and enqueued multiple times in the exit path.
+	 * Thus we avoid any further update, since we do not want to change
+	 * CPU boosting while the task is exiting.
+	 */
+	if (p->flags & PF_EXITING)
+		return;
+
+	/*
+	 * Boost group accouting is protected by a per-cpu lock and requires
+	 * interrupt to be disabled to avoid race conditions for example on
+	 * do_exit()::cgroup_exit() and task migration.
+	 */
+	raw_spin_lock_irqsave(&bg->lock, irq_flags);
+	rcu_read_lock();
+
+	st = task_schedtune(p);
+	idx = st->idx;
+
+	schedtune_tasks_update(p, cpu, idx, ENQUEUE_TASK);
+
+	rcu_read_unlock();
+	raw_spin_unlock_irqrestore(&bg->lock, irq_flags);
+}
+
+int schedtune_can_attach(struct cgroup_taskset *tset)
+{
+	struct task_struct *task;
+	struct cgroup_subsys_state *css;
+	struct boost_groups *bg;
+	unsigned long irq_flags;
+	unsigned int cpu;
+	struct rq *rq;
+	int src_bg; /* Source boost group index */
+	int dst_bg; /* Destination boost group index */
+	int tasks;
+
+	if (!unlikely(schedtune_initialized))
+		return 0;
+
+
+	cgroup_taskset_for_each(task, css, tset) {
+
+		/*
+		 * Lock the CPU's RQ the task is enqueued to avoid race
+		 * conditions with migration code while the task is being
+		 * accounted
+		 */
+		rq = lock_rq_of(task, &irq_flags);
+
+		if (!task->on_rq) {
+			unlock_rq_of(rq, task, &irq_flags);
+			continue;
+		}
+
+		/*
+		 * Boost group accouting is protected by a per-cpu lock and requires
+		 * interrupt to be disabled to avoid race conditions on...
+		 */
+		cpu = cpu_of(rq);
+		bg = &per_cpu(cpu_boost_groups, cpu);
+		raw_spin_lock(&bg->lock);
+
+		dst_bg = css_st(css)->idx;
+		src_bg = task_schedtune(task)->idx;
+
+		/*
+		 * Current task is not changing boostgroup, which can
+		 * happen when the new hierarchy is in use.
+		 */
+		if (unlikely(dst_bg == src_bg)) {
+			raw_spin_unlock(&bg->lock);
+			unlock_rq_of(rq, task, &irq_flags);
+			continue;
+		}
+
+		/*
+		 * This is the case of a RUNNABLE task which is switching its
+		 * current boost group.
+		 */
+
+		/* Move task from src to dst boost group */
+		tasks = bg->group[src_bg].tasks - 1;
+		bg->group[src_bg].tasks = max(0, tasks);
+		bg->group[dst_bg].tasks += 1;
+
+		raw_spin_unlock(&bg->lock);
+		unlock_rq_of(rq, task, &irq_flags);
+
+		/* Update CPU boost group */
+		if (bg->group[src_bg].tasks == 0 || bg->group[dst_bg].tasks == 1)
+			schedtune_cpu_update(task_cpu(task));
+
+	}
+
+	return 0;
+}
+
+void schedtune_cancel_attach(struct cgroup_taskset *tset)
+{
+	/* This can happen only if SchedTune controller is mounted with
+	 * other hierarchies ane one of them fails. Since usually SchedTune is
+	 * mouted on its own hierarcy, for the time being we do not implement
+	 * a proper rollback mechanism */
+	WARN(1, "SchedTune cancel attach not implemented");
+}
+
+/*
+ * NOTE: This function must be called while holding the lock on the CPU RQ
+ */
+void schedtune_dequeue_task(struct task_struct *p, int cpu)
+{
+	struct boost_groups *bg = &per_cpu(cpu_boost_groups, cpu);
+	unsigned long irq_flags;
+	struct schedtune *st;
+	int idx;
+
+	if (!unlikely(schedtune_initialized))
+		return;
+
+	/*
+	 * When a task is marked PF_EXITING by do_exit() it's going to be
+	 * dequeued and enqueued multiple times in the exit path.
+	 * Thus we avoid any further update, since we do not want to change
+	 * CPU boosting while the task is exiting.
+	 * The last dequeue is already enforce by the do_exit() code path
+	 * via schedtune_exit_task().
+	 */
+	if (p->flags & PF_EXITING)
+		return;
+
+	/*
+	 * Boost group accouting is protected by a per-cpu lock and requires
+	 * interrupt to be disabled to avoid race conditions on...
+	 */
+	raw_spin_lock_irqsave(&bg->lock, irq_flags);
+	rcu_read_lock();
+
+	st = task_schedtune(p);
+	idx = st->idx;
+
+	schedtune_tasks_update(p, cpu, idx, DEQUEUE_TASK);
+
+	rcu_read_unlock();
+	raw_spin_unlock_irqrestore(&bg->lock, irq_flags);
+}
+
+void schedtune_exit_task(struct task_struct *tsk)
+{
+	struct schedtune *st;
+	unsigned long irq_flags;
+	unsigned int cpu;
+	struct rq *rq;
+	int idx;
+
+	if (!unlikely(schedtune_initialized))
+		return;
+
+	rq = lock_rq_of(tsk, &irq_flags);
+	rcu_read_lock();
+
+	cpu = cpu_of(rq);
+	st = task_schedtune(tsk);
+	idx = st->idx;
+	schedtune_tasks_update(tsk, cpu, idx, DEQUEUE_TASK);
+
+	rcu_read_unlock();
+	unlock_rq_of(rq, tsk, &irq_flags);
+}
+
+int schedtune_cpu_boost(int cpu)
+{
+	struct boost_groups *bg;
+
+	bg = &per_cpu(cpu_boost_groups, cpu);
+	return bg->boost_max;
+}
+
+int schedtune_task_boost(struct task_struct *p)
+{
+	struct schedtune *st;
+	int task_boost;
+
+	if (!unlikely(schedtune_initialized))
+		return 0;
+
+	/* Get task boost value */
+	rcu_read_lock();
+	st = task_schedtune(p);
+	task_boost = st->boost;
+	rcu_read_unlock();
+
+	return task_boost;
+}
+
+int schedtune_prefer_idle(struct task_struct *p)
+{
+	struct schedtune *st;
+	int prefer_idle;
+
+	if (!unlikely(schedtune_initialized))
+		return 0;
+
+	/* Get prefer_idle value */
+	rcu_read_lock();
+	st = task_schedtune(p);
+	prefer_idle = st->prefer_idle;
+	rcu_read_unlock();
+
+	return prefer_idle;
+}
+
+static u64
+prefer_idle_read(struct cgroup_subsys_state *css, struct cftype *cft)
+{
+	struct schedtune *st = css_st(css);
+
+	return st->prefer_idle;
+}
+
+static int
+prefer_idle_write(struct cgroup_subsys_state *css, struct cftype *cft,
+	    u64 prefer_idle)
+{
+	struct schedtune *st = css_st(css);
+	st->prefer_idle = prefer_idle;
+
+	return 0;
+}
+
+static s64
+boost_read(struct cgroup_subsys_state *css, struct cftype *cft)
+{
+	struct schedtune *st = css_st(css);
+
+	return st->boost;
+}
+
+static int
+boost_write(struct cgroup_subsys_state *css, struct cftype *cft,
+	    s64 boost)
+{
+	struct schedtune *st = css_st(css);
+	unsigned threshold_idx;
+	int boost_pct;
+
+	if (boost < -100 || boost > 100)
+		return -EINVAL;
+	boost_pct = boost;
+
+	/*
+	 * Update threshold params for Performance Boost (B)
+	 * and Performance Constraint (C) regions.
+	 * The current implementatio uses the same cuts for both
+	 * B and C regions.
+	 */
+	threshold_idx = clamp(boost_pct, 0, 99) / 10;
+	st->perf_boost_idx = threshold_idx;
+	st->perf_constrain_idx = threshold_idx;
+
+	st->boost = boost;
+	if (css == &root_schedtune.css) {
+		sysctl_sched_cfs_boost = boost;
+		perf_boost_idx  = threshold_idx;
+		perf_constrain_idx  = threshold_idx;
+	}
+
+	/* Update CPU boost */
+	schedtune_boostgroup_update(st->idx, st->boost);
+
+	trace_sched_tune_config(st->boost);
+
+	return 0;
+}
+
+static void schedtune_attach(struct cgroup_taskset *tset)
+{
+	struct task_struct *task;
+	struct cgroup_subsys_state *css;
+	struct schedtune *st;
+	bool colocate;
+
+	cgroup_taskset_first(tset, &css);
+	st = css_st(css);
+
+	colocate = st->colocate;
+
+	cgroup_taskset_for_each(task, css, tset)
+		sync_cgroup_colocation(task, colocate);
+}
+
+static struct cftype files[] = {
+	{
+		.name = "boost",
+		.read_s64 = boost_read,
+		.write_s64 = boost_write,
+	},
+	{
+		.name = "prefer_idle",
+		.read_u64 = prefer_idle_read,
+		.write_u64 = prefer_idle_write,
+	},
+#ifdef CONFIG_SCHED_HMP
+	{
+		.name = "sched_boost_no_override",
+		.read_u64 = sched_boost_override_read,
+		.write_u64 = sched_boost_override_write,
+	},
+	{
+		.name = "sched_boost_enabled",
+		.read_u64 = sched_boost_enabled_read,
+		.write_u64 = sched_boost_enabled_write,
+	},
+	{
+		.name = "colocate",
+		.read_u64 = sched_colocate_read,
+		.write_u64 = sched_colocate_write,
+	},
+#endif
+	{ }	/* terminate */
+};
+
+static int
+schedtune_boostgroup_init(struct schedtune *st)
+{
+	struct boost_groups *bg;
+	int cpu;
+
+	/* Keep track of allocated boost groups */
+	allocated_group[st->idx] = st;
+
+	/* Initialize the per CPU boost groups */
+	for_each_possible_cpu(cpu) {
+		bg = &per_cpu(cpu_boost_groups, cpu);
+		bg->group[st->idx].boost = 0;
+		bg->group[st->idx].tasks = 0;
+	}
+
+	return 0;
+}
+
+static struct cgroup_subsys_state *
+schedtune_css_alloc(struct cgroup_subsys_state *parent_css)
+{
+	struct schedtune *st;
+	int idx;
+
+	if (!parent_css)
+		return &root_schedtune.css;
+
+	/* Allow only single level hierachies */
+	if (parent_css != &root_schedtune.css) {
+		pr_err("Nested SchedTune boosting groups not allowed\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* Allow only a limited number of boosting groups */
+	for (idx = 1; idx < BOOSTGROUPS_COUNT; ++idx)
+		if (!allocated_group[idx])
+			break;
+	if (idx == BOOSTGROUPS_COUNT) {
+		pr_err("Trying to create more than %d SchedTune boosting groups\n",
+		       BOOSTGROUPS_COUNT);
+		return ERR_PTR(-ENOSPC);
+	}
+
+	st = kzalloc(sizeof(*st), GFP_KERNEL);
+	if (!st)
+		goto out;
+
+	/* Initialize per CPUs boost group support */
+	st->idx = idx;
+	init_sched_boost(st);
+	if (schedtune_boostgroup_init(st))
+		goto release;
+
+	return &st->css;
+
+release:
+	kfree(st);
+out:
+	return ERR_PTR(-ENOMEM);
+}
+
+static void
+schedtune_boostgroup_release(struct schedtune *st)
+{
+	/* Reset this boost group */
+	schedtune_boostgroup_update(st->idx, 0);
+
+	/* Keep track of allocated boost groups */
+	allocated_group[st->idx] = NULL;
+}
+
+static void
+schedtune_css_free(struct cgroup_subsys_state *css)
+{
+	struct schedtune *st = css_st(css);
+
+	schedtune_boostgroup_release(st);
+	kfree(st);
+}
+
+struct cgroup_subsys schedtune_cgrp_subsys = {
+	.css_alloc	= schedtune_css_alloc,
+	.css_free	= schedtune_css_free,
+	.can_attach     = schedtune_can_attach,
+	.cancel_attach  = schedtune_cancel_attach,
+	.legacy_cftypes	= files,
+	.early_init	= 1,
+	.attach		= schedtune_attach,
+};
+
+static inline void
+schedtune_init_cgroups(void)
+{
+	struct boost_groups *bg;
+	int cpu;
+
+	/* Initialize the per CPU boost groups */
+	for_each_possible_cpu(cpu) {
+		bg = &per_cpu(cpu_boost_groups, cpu);
+		memset(bg, 0, sizeof(struct boost_groups));
+		raw_spin_lock_init(&bg->lock);
+	}
+
+	pr_info("schedtune: configured to support %d boost groups\n",
+		BOOSTGROUPS_COUNT);
+
+	schedtune_initialized = true;
+}
+
+#else /* CONFIG_CGROUP_SCHEDTUNE */
+
+int
+schedtune_accept_deltas(int nrg_delta, int cap_delta,
+			struct task_struct *task)
+{
+	/* Optimal (O) region */
+	if (nrg_delta < 0 && cap_delta > 0) {
+		trace_sched_tune_filter(nrg_delta, cap_delta, 0, 0, 1, 0);
+		return INT_MAX;
+	}
+
+	/* Suboptimal (S) region */
+	if (nrg_delta > 0 && cap_delta < 0) {
+		trace_sched_tune_filter(nrg_delta, cap_delta, 0, 0, -1, 5);
+		return -INT_MAX;
+	}
+
+	return __schedtune_accept_deltas(nrg_delta, cap_delta,
+			perf_boost_idx, perf_constrain_idx);
+}
+
+#endif /* CONFIG_CGROUP_SCHEDTUNE */
+
+int
+sysctl_sched_cfs_boost_handler(struct ctl_table *table, int write,
+			       void __user *buffer, size_t *lenp,
+			       loff_t *ppos)
+{
+	int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+	unsigned threshold_idx;
+	int boost_pct;
+
+	if (ret || !write)
+		return ret;
+
+	if (sysctl_sched_cfs_boost < -100 || sysctl_sched_cfs_boost > 100)
+		return -EINVAL;
+	boost_pct = sysctl_sched_cfs_boost;
+
+	/*
+	 * Update threshold params for Performance Boost (B)
+	 * and Performance Constraint (C) regions.
+	 * The current implementatio uses the same cuts for both
+	 * B and C regions.
+	 */
+	threshold_idx = clamp(boost_pct, 0, 99) / 10;
+	perf_boost_idx = threshold_idx;
+	perf_constrain_idx = threshold_idx;
+
+	return 0;
+}
+
+#ifdef CONFIG_SCHED_DEBUG
+static void
+schedtune_test_nrg(unsigned long delta_pwr)
+{
+	unsigned long test_delta_pwr;
+	unsigned long test_norm_pwr;
+	int idx;
+
+	/*
+	 * Check normalization constants using some constant system
+	 * energy values
+	 */
+	pr_info("schedtune: verify normalization constants...\n");
+	for (idx = 0; idx < 6; ++idx) {
+		test_delta_pwr = delta_pwr >> idx;
+
+		/* Normalize on max energy for target platform */
+		test_norm_pwr = reciprocal_divide(
+					test_delta_pwr << SCHED_LOAD_SHIFT,
+					schedtune_target_nrg.rdiv);
+
+		pr_info("schedtune: max_pwr/2^%d: %4lu => norm_pwr: %5lu\n",
+			idx, test_delta_pwr, test_norm_pwr);
+	}
+}
+#else
+#define schedtune_test_nrg(delta_pwr)
+#endif
+
+/*
+ * Compute the min/max power consumption of a cluster and all its CPUs
+ */
+static void
+schedtune_add_cluster_nrg(
+		struct sched_domain *sd,
+		struct sched_group *sg,
+		struct target_nrg *ste)
+{
+	struct sched_domain *sd2;
+	struct sched_group *sg2;
+
+	struct cpumask *cluster_cpus;
+	char str[32];
+
+	unsigned long min_pwr;
+	unsigned long max_pwr;
+	int cpu;
+
+	/* Get Cluster energy using EM data for the first CPU */
+	cluster_cpus = sched_group_cpus(sg);
+	snprintf(str, 32, "CLUSTER[%*pbl]",
+		 cpumask_pr_args(cluster_cpus));
+
+	min_pwr = sg->sge->idle_states[sg->sge->nr_idle_states - 1].power;
+	max_pwr = sg->sge->cap_states[sg->sge->nr_cap_states - 1].power;
+	pr_info("schedtune: %-17s min_pwr: %5lu max_pwr: %5lu\n",
+		str, min_pwr, max_pwr);
+
+	/*
+	 * Keep track of this cluster's energy in the computation of the
+	 * overall system energy
+	 */
+	ste->min_power += min_pwr;
+	ste->max_power += max_pwr;
+
+	/* Get CPU energy using EM data for each CPU in the group */
+	for_each_cpu(cpu, cluster_cpus) {
+		/* Get a SD view for the specific CPU */
+		for_each_domain(cpu, sd2) {
+			/* Get the CPU group */
+			sg2 = sd2->groups;
+			min_pwr = sg2->sge->idle_states[sg2->sge->nr_idle_states - 1].power;
+			max_pwr = sg2->sge->cap_states[sg2->sge->nr_cap_states - 1].power;
+
+			ste->min_power += min_pwr;
+			ste->max_power += max_pwr;
+
+			snprintf(str, 32, "CPU[%d]", cpu);
+			pr_info("schedtune: %-17s min_pwr: %5lu max_pwr: %5lu\n",
+				str, min_pwr, max_pwr);
+
+			/*
+			 * Assume we have EM data only at the CPU and
+			 * the upper CLUSTER level
+			 */
+			BUG_ON(!cpumask_equal(
+				sched_group_cpus(sg),
+				sched_group_cpus(sd2->parent->groups)
+				));
+			break;
+		}
+	}
+}
+
+/*
+ * Initialize the constants required to compute normalized energy.
+ * The values of these constants depends on the EM data for the specific
+ * target system and topology.
+ * Thus, this function is expected to be called by the code
+ * that bind the EM to the topology information.
+ */
+static int
+schedtune_init(void)
+{
+	struct target_nrg *ste = &schedtune_target_nrg;
+	unsigned long delta_pwr = 0;
+	struct sched_domain *sd;
+	struct sched_group *sg;
+
+	pr_info("schedtune: init normalization constants...\n");
+	ste->max_power = 0;
+	ste->min_power = 0;
+
+	rcu_read_lock();
+
+	/*
+	 * When EAS is in use, we always have a pointer to the highest SD
+	 * which provides EM data.
+	 */
+	sd = rcu_dereference(per_cpu(sd_ea, cpumask_first(cpu_online_mask)));
+	if (!sd) {
+		if (energy_aware())
+			pr_warn("schedtune: no energy model data\n");
+		goto nodata;
+	}
+
+	sg = sd->groups;
+	do {
+		schedtune_add_cluster_nrg(sd, sg, ste);
+	} while (sg = sg->next, sg != sd->groups);
+
+	rcu_read_unlock();
+
+	pr_info("schedtune: %-17s min_pwr: %5lu max_pwr: %5lu\n",
+		"SYSTEM", ste->min_power, ste->max_power);
+
+	/* Compute normalization constants */
+	delta_pwr = ste->max_power - ste->min_power;
+	ste->rdiv = reciprocal_value(delta_pwr);
+	pr_info("schedtune: using normalization constants mul: %u sh1: %u sh2: %u\n",
+		ste->rdiv.m, ste->rdiv.sh1, ste->rdiv.sh2);
+
+	schedtune_test_nrg(delta_pwr);
+
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+	schedtune_init_cgroups();
+#else
+	pr_info("schedtune: configured to support global boosting only\n");
+#endif
+
+	schedtune_spc_rdiv = reciprocal_value(100);
+
+	return 0;
+
+nodata:
+	pr_warning("schedtune: disabled!\n");
+	rcu_read_unlock();
+	return -EINVAL;
+}
+postcore_initcall(schedtune_init);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/kernel/sched/tune.h	2019-01-22 16:16:28.707293532 +0100
@@ -0,0 +1,55 @@
+
+#ifdef CONFIG_SCHED_TUNE
+
+#include <linux/reciprocal_div.h>
+
+/*
+ * System energy normalization constants
+ */
+struct target_nrg {
+	unsigned long min_power;
+	unsigned long max_power;
+	struct reciprocal_value rdiv;
+};
+
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+
+int schedtune_cpu_boost(int cpu);
+int schedtune_task_boost(struct task_struct *tsk);
+
+int schedtune_prefer_idle(struct task_struct *tsk);
+
+void schedtune_exit_task(struct task_struct *tsk);
+
+void schedtune_enqueue_task(struct task_struct *p, int cpu);
+void schedtune_dequeue_task(struct task_struct *p, int cpu);
+
+#else /* CONFIG_CGROUP_SCHEDTUNE */
+
+#define schedtune_cpu_boost(cpu)  get_sysctl_sched_cfs_boost()
+#define schedtune_task_boost(tsk) get_sysctl_sched_cfs_boost()
+
+#define schedtune_exit_task(task) do { } while (0)
+
+#define schedtune_enqueue_task(task, cpu) do { } while (0)
+#define schedtune_dequeue_task(task, cpu) do { } while (0)
+
+#endif /* CONFIG_CGROUP_SCHEDTUNE */
+
+int schedtune_normalize_energy(int energy);
+int schedtune_accept_deltas(int nrg_delta, int cap_delta,
+			    struct task_struct *task);
+
+#else /* CONFIG_SCHED_TUNE */
+
+#define schedtune_cpu_boost(cpu)  0
+#define schedtune_task_boost(tsk) 0
+
+#define schedtune_exit_task(task) do { } while (0)
+
+#define schedtune_enqueue_task(task, cpu) do { } while (0)
+#define schedtune_dequeue_task(task, cpu) do { } while (0)
+
+#define schedtune_accept_deltas(nrg_delta, cap_delta, task) nrg_delta
+
+#endif /* CONFIG_SCHED_TUNE */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/kernel/sched/walt.h	2019-01-22 16:16:28.707293532 +0100
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __WALT_H
+#define __WALT_H
+
+#ifdef CONFIG_SCHED_WALT
+
+void walt_update_task_ravg(struct task_struct *p, struct rq *rq, int event,
+		u64 wallclock, u64 irqtime);
+void walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p);
+void walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p);
+void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
+		struct task_struct *p);
+void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
+		struct task_struct *p);
+void walt_fixup_busy_time(struct task_struct *p, int new_cpu);
+void walt_init_new_task_load(struct task_struct *p);
+void walt_mark_task_starting(struct task_struct *p);
+void walt_set_window_start(struct rq *rq);
+void walt_migrate_sync_cpu(int cpu);
+void walt_init_cpu_efficiency(void);
+u64 walt_ktime_clock(void);
+void walt_account_irqtime(int cpu, struct task_struct *curr, u64 delta,
+                                  u64 wallclock);
+
+u64 walt_irqload(int cpu);
+int walt_cpu_high_irqload(int cpu);
+
+#else /* CONFIG_SCHED_WALT */
+
+static inline void walt_update_task_ravg(struct task_struct *p, struct rq *rq,
+		int event, u64 wallclock, u64 irqtime) { }
+static inline void walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) { }
+static inline void walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) { }
+static inline void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
+		struct task_struct *p) { }
+static inline void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
+		struct task_struct *p) { }
+static inline void walt_fixup_busy_time(struct task_struct *p, int new_cpu) { }
+static inline void walt_init_new_task_load(struct task_struct *p) { }
+static inline void walt_mark_task_starting(struct task_struct *p) { }
+static inline void walt_set_window_start(struct rq *rq) { }
+static inline void walt_migrate_sync_cpu(int cpu) { }
+static inline void walt_init_cpu_efficiency(void) { }
+static inline u64 walt_ktime_clock(void) { return 0; }
+
+#define walt_cpu_high_irqload(cpu) false
+
+#endif /* CONFIG_SCHED_WALT */
+
+extern bool walt_disabled;
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/kernel/trace/ipc_logging.c	2019-01-22 16:16:28.727293713 +0100
@@ -0,0 +1,876 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/arch_timer.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/idr.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/ipc_logging.h>
+
+#include "ipc_logging_private.h"
+
+#define LOG_PAGE_DATA_SIZE	sizeof(((struct ipc_log_page *)0)->data)
+#define LOG_PAGE_FLAG (1 << 31)
+
+static LIST_HEAD(ipc_log_context_list);
+static DEFINE_RWLOCK(context_list_lock_lha1);
+static void *get_deserialization_func(struct ipc_log_context *ilctxt,
+				      int type);
+
+static struct ipc_log_page *get_first_page(struct ipc_log_context *ilctxt)
+{
+	struct ipc_log_page_header *p_pghdr;
+	struct ipc_log_page *pg = NULL;
+
+	if (!ilctxt)
+		return NULL;
+	p_pghdr = list_first_entry(&ilctxt->page_list,
+				   struct ipc_log_page_header, list);
+	pg = container_of(p_pghdr, struct ipc_log_page, hdr);
+	return pg;
+}
+
+/**
+ * is_nd_read_empty - Returns true if no data is available to read in log
+ *
+ * @ilctxt: logging context
+ * @returns: > 1 if context is empty; 0 if not empty; <0 for failure
+ *
+ * This is for the debugfs read pointer which allows for a non-destructive read.
+ * There may still be data in the log, but it may have already been read.
+ */
+static int is_nd_read_empty(struct ipc_log_context *ilctxt)
+{
+	if (!ilctxt)
+		return -EINVAL;
+
+	return ((ilctxt->nd_read_page == ilctxt->write_page) &&
+		(ilctxt->nd_read_page->hdr.nd_read_offset ==
+		 ilctxt->write_page->hdr.write_offset));
+}
+
+/**
+ * is_read_empty - Returns true if no data is available in log
+ *
+ * @ilctxt: logging context
+ * @returns: > 1 if context is empty; 0 if not empty; <0 for failure
+ *
+ * This is for the actual log contents.  If it is empty, then there
+ * is no data at all in the log.
+ */
+static int is_read_empty(struct ipc_log_context *ilctxt)
+{
+	if (!ilctxt)
+		return -EINVAL;
+
+	return ((ilctxt->read_page == ilctxt->write_page) &&
+		(ilctxt->read_page->hdr.read_offset ==
+		 ilctxt->write_page->hdr.write_offset));
+}
+
+/**
+ * is_nd_read_equal_read - Return true if the non-destructive read is equal to
+ * the destructive read
+ *
+ * @ilctxt: logging context
+ * @returns: true if nd read is equal to read; false otherwise
+ */
+static bool is_nd_read_equal_read(struct ipc_log_context *ilctxt)
+{
+	uint16_t read_offset;
+	uint16_t nd_read_offset;
+
+	if (ilctxt->nd_read_page == ilctxt->read_page) {
+		read_offset = ilctxt->read_page->hdr.read_offset;
+		nd_read_offset = ilctxt->nd_read_page->hdr.nd_read_offset;
+
+		if (read_offset == nd_read_offset)
+			return true;
+	}
+
+	return false;
+}
+
+
+static struct ipc_log_page *get_next_page(struct ipc_log_context *ilctxt,
+					  struct ipc_log_page *cur_pg)
+{
+	struct ipc_log_page_header *p_pghdr;
+	struct ipc_log_page *pg = NULL;
+
+	if (!ilctxt || !cur_pg)
+		return NULL;
+
+	if (ilctxt->last_page == cur_pg)
+		return ilctxt->first_page;
+
+	p_pghdr = list_first_entry(&cur_pg->hdr.list,
+			struct ipc_log_page_header, list);
+	pg = container_of(p_pghdr, struct ipc_log_page, hdr);
+
+	return pg;
+}
+
+/**
+ * ipc_log_read - do non-destructive read of the log
+ *
+ * @ilctxt:  Logging context
+ * @data:  Data pointer to receive the data
+ * @data_size:  Number of bytes to read (must be <= bytes available in log)
+ *
+ * This read will update a runtime read pointer, but will not affect the actual
+ * contents of the log which allows for reading the logs continuously while
+ * debugging and if the system crashes, then the full logs can still be
+ * extracted.
+ */
+static void ipc_log_read(struct ipc_log_context *ilctxt,
+			 void *data, int data_size)
+{
+	int bytes_to_read;
+
+	bytes_to_read = MIN(LOG_PAGE_DATA_SIZE
+				- ilctxt->nd_read_page->hdr.nd_read_offset,
+			      data_size);
+
+	memcpy(data, (ilctxt->nd_read_page->data +
+		ilctxt->nd_read_page->hdr.nd_read_offset), bytes_to_read);
+
+	if (bytes_to_read != data_size) {
+		/* not enough space, wrap read to next page */
+		ilctxt->nd_read_page->hdr.nd_read_offset = 0;
+		ilctxt->nd_read_page = get_next_page(ilctxt,
+			ilctxt->nd_read_page);
+		BUG_ON(ilctxt->nd_read_page == NULL);
+
+		memcpy((data + bytes_to_read),
+			   (ilctxt->nd_read_page->data +
+			ilctxt->nd_read_page->hdr.nd_read_offset),
+			   (data_size - bytes_to_read));
+		bytes_to_read = (data_size - bytes_to_read);
+	}
+	ilctxt->nd_read_page->hdr.nd_read_offset += bytes_to_read;
+}
+
+/**
+ * ipc_log_drop - do destructive read of the log
+ *
+ * @ilctxt:  Logging context
+ * @data:  Data pointer to receive the data (or NULL)
+ * @data_size:  Number of bytes to read (must be <= bytes available in log)
+ */
+static void ipc_log_drop(struct ipc_log_context *ilctxt, void *data,
+		int data_size)
+{
+	int bytes_to_read;
+	bool push_nd_read;
+
+	bytes_to_read = MIN(LOG_PAGE_DATA_SIZE
+				- ilctxt->read_page->hdr.read_offset,
+			      data_size);
+	if (data)
+		memcpy(data, (ilctxt->read_page->data +
+			ilctxt->read_page->hdr.read_offset), bytes_to_read);
+
+	if (bytes_to_read != data_size) {
+		/* not enough space, wrap read to next page */
+		push_nd_read = is_nd_read_equal_read(ilctxt);
+
+		ilctxt->read_page->hdr.read_offset = 0;
+		if (push_nd_read) {
+			ilctxt->read_page->hdr.nd_read_offset = 0;
+			ilctxt->read_page = get_next_page(ilctxt,
+				ilctxt->read_page);
+			BUG_ON(ilctxt->read_page == NULL);
+			ilctxt->nd_read_page = ilctxt->read_page;
+		} else {
+			ilctxt->read_page = get_next_page(ilctxt,
+				ilctxt->read_page);
+			BUG_ON(ilctxt->read_page == NULL);
+		}
+
+		if (data)
+			memcpy((data + bytes_to_read),
+				   (ilctxt->read_page->data +
+				ilctxt->read_page->hdr.read_offset),
+				   (data_size - bytes_to_read));
+
+		bytes_to_read = (data_size - bytes_to_read);
+	}
+
+	/* update non-destructive read pointer if necessary */
+	push_nd_read = is_nd_read_equal_read(ilctxt);
+	ilctxt->read_page->hdr.read_offset += bytes_to_read;
+	ilctxt->write_avail += data_size;
+
+	if (push_nd_read)
+		ilctxt->nd_read_page->hdr.nd_read_offset += bytes_to_read;
+}
+
+/**
+ * msg_read - Reads a message.
+ *
+ * If a message is read successfully, then the message context
+ * will be set to:
+ *     .hdr    message header .size and .type values
+ *     .offset beginning of message data
+ *
+ * @ilctxt	Logging context
+ * @ectxt   Message context
+ *
+ * @returns 0 - no message available; >0 message size; <0 error
+ */
+static int msg_read(struct ipc_log_context *ilctxt,
+	     struct encode_context *ectxt)
+{
+	struct tsv_header hdr;
+
+	if (!ectxt)
+		return -EINVAL;
+
+	if (is_nd_read_empty(ilctxt))
+		return 0;
+
+	ipc_log_read(ilctxt, &hdr, sizeof(hdr));
+	ectxt->hdr.type = hdr.type;
+	ectxt->hdr.size = hdr.size;
+	ectxt->offset = sizeof(hdr);
+	ipc_log_read(ilctxt, (ectxt->buff + ectxt->offset),
+			 (int)hdr.size);
+
+	return sizeof(hdr) + (int)hdr.size;
+}
+
+/**
+ * msg_drop - Drops a message.
+ *
+ * @ilctxt	Logging context
+ */
+static void msg_drop(struct ipc_log_context *ilctxt)
+{
+	struct tsv_header hdr;
+
+	if (!is_read_empty(ilctxt)) {
+		ipc_log_drop(ilctxt, &hdr, sizeof(hdr));
+		ipc_log_drop(ilctxt, NULL, (int)hdr.size);
+	}
+}
+
+/*
+ * Commits messages to the FIFO.  If the FIFO is full, then enough
+ * messages are dropped to create space for the new message.
+ */
+void ipc_log_write(void *ctxt, struct encode_context *ectxt)
+{
+	struct ipc_log_context *ilctxt = (struct ipc_log_context *)ctxt;
+	int bytes_to_write;
+	unsigned long flags;
+
+	if (!ilctxt || !ectxt) {
+		pr_err("%s: Invalid ipc_log or encode context\n", __func__);
+		return;
+	}
+
+	read_lock_irqsave(&context_list_lock_lha1, flags);
+	spin_lock(&ilctxt->context_lock_lhb1);
+	while (ilctxt->write_avail <= ectxt->offset)
+		msg_drop(ilctxt);
+
+	bytes_to_write = MIN(LOG_PAGE_DATA_SIZE
+				- ilctxt->write_page->hdr.write_offset,
+				ectxt->offset);
+	memcpy((ilctxt->write_page->data +
+		ilctxt->write_page->hdr.write_offset),
+		ectxt->buff, bytes_to_write);
+
+	if (bytes_to_write != ectxt->offset) {
+		uint64_t t_now = sched_clock();
+
+		ilctxt->write_page->hdr.write_offset += bytes_to_write;
+		ilctxt->write_page->hdr.end_time = t_now;
+
+		ilctxt->write_page = get_next_page(ilctxt, ilctxt->write_page);
+		BUG_ON(ilctxt->write_page == NULL);
+		ilctxt->write_page->hdr.write_offset = 0;
+		ilctxt->write_page->hdr.start_time = t_now;
+		memcpy((ilctxt->write_page->data +
+			ilctxt->write_page->hdr.write_offset),
+		       (ectxt->buff + bytes_to_write),
+		       (ectxt->offset - bytes_to_write));
+		bytes_to_write = (ectxt->offset - bytes_to_write);
+	}
+	ilctxt->write_page->hdr.write_offset += bytes_to_write;
+	ilctxt->write_avail -= ectxt->offset;
+	complete(&ilctxt->read_avail);
+	spin_unlock(&ilctxt->context_lock_lhb1);
+	read_unlock_irqrestore(&context_list_lock_lha1, flags);
+}
+EXPORT_SYMBOL(ipc_log_write);
+
+/*
+ * Starts a new message after which you can add serialized data and
+ * then complete the message by calling msg_encode_end().
+ */
+void msg_encode_start(struct encode_context *ectxt, uint32_t type)
+{
+	if (!ectxt) {
+		pr_err("%s: Invalid encode context\n", __func__);
+		return;
+	}
+
+	ectxt->hdr.type = type;
+	ectxt->hdr.size = 0;
+	ectxt->offset = sizeof(ectxt->hdr);
+}
+EXPORT_SYMBOL(msg_encode_start);
+
+/*
+ * Completes the message
+ */
+void msg_encode_end(struct encode_context *ectxt)
+{
+	if (!ectxt) {
+		pr_err("%s: Invalid encode context\n", __func__);
+		return;
+	}
+
+	/* finalize data size */
+	ectxt->hdr.size = ectxt->offset - sizeof(ectxt->hdr);
+	BUG_ON(ectxt->hdr.size > MAX_MSG_SIZE);
+	memcpy(ectxt->buff, &ectxt->hdr, sizeof(ectxt->hdr));
+}
+EXPORT_SYMBOL(msg_encode_end);
+
+/*
+ * Helper funtion used to write data to a message context.
+ *
+ * @ectxt context initialized by calling msg_encode_start()
+ * @data  data to write
+ * @size  number of bytes of data to write
+ */
+static inline int tsv_write_data(struct encode_context *ectxt,
+				 void *data, uint32_t size)
+{
+	if (!ectxt) {
+		pr_err("%s: Invalid encode context\n", __func__);
+		return -EINVAL;
+	}
+	if ((ectxt->offset + size) > MAX_MSG_SIZE) {
+		pr_err("%s: No space to encode further\n", __func__);
+		return -EINVAL;
+	}
+
+	memcpy((void *)(ectxt->buff + ectxt->offset), data, size);
+	ectxt->offset += size;
+	return 0;
+}
+
+/*
+ * Helper function that writes a type to the context.
+ *
+ * @ectxt context initialized by calling msg_encode_start()
+ * @type  primitive type
+ * @size  size of primitive in bytes
+ */
+static inline int tsv_write_header(struct encode_context *ectxt,
+				   uint32_t type, uint32_t size)
+{
+	struct tsv_header hdr;
+
+	hdr.type = (unsigned char)type;
+	hdr.size = (unsigned char)size;
+	return tsv_write_data(ectxt, &hdr, sizeof(hdr));
+}
+
+/*
+ * Writes the current timestamp count.
+ *
+ * @ectxt   context initialized by calling msg_encode_start()
+ */
+int tsv_timestamp_write(struct encode_context *ectxt)
+{
+	int ret;
+	uint64_t t_now = sched_clock();
+
+	ret = tsv_write_header(ectxt, TSV_TYPE_TIMESTAMP, sizeof(t_now));
+	if (ret)
+		return ret;
+	return tsv_write_data(ectxt, &t_now, sizeof(t_now));
+}
+EXPORT_SYMBOL(tsv_timestamp_write);
+
+/*
+ * Writes the current QTimer timestamp count.
+ *
+ * @ectxt   context initialized by calling msg_encode_start()
+ */
+int tsv_qtimer_write(struct encode_context *ectxt)
+{
+	int ret;
+	uint64_t t_now = arch_counter_get_cntvct();
+
+	ret = tsv_write_header(ectxt, TSV_TYPE_QTIMER, sizeof(t_now));
+	if (ret)
+		return ret;
+	return tsv_write_data(ectxt, &t_now, sizeof(t_now));
+}
+EXPORT_SYMBOL(tsv_qtimer_write);
+
+/*
+ * Writes a data pointer.
+ *
+ * @ectxt   context initialized by calling msg_encode_start()
+ * @pointer pointer value to write
+ */
+int tsv_pointer_write(struct encode_context *ectxt, void *pointer)
+{
+	int ret;
+	ret = tsv_write_header(ectxt, TSV_TYPE_POINTER, sizeof(pointer));
+	if (ret)
+		return ret;
+	return tsv_write_data(ectxt, &pointer, sizeof(pointer));
+}
+EXPORT_SYMBOL(tsv_pointer_write);
+
+/*
+ * Writes a 32-bit integer value.
+ *
+ * @ectxt context initialized by calling msg_encode_start()
+ * @n     integer to write
+ */
+int tsv_int32_write(struct encode_context *ectxt, int32_t n)
+{
+	int ret;
+	ret = tsv_write_header(ectxt, TSV_TYPE_INT32, sizeof(n));
+	if (ret)
+		return ret;
+	return tsv_write_data(ectxt, &n, sizeof(n));
+}
+EXPORT_SYMBOL(tsv_int32_write);
+
+/*
+ * Writes a byte array.
+ *
+ * @ectxt context initialized by calling msg_write_start()
+ * @data  Beginning address of data
+ * @data_size Size of data to be written
+ */
+int tsv_byte_array_write(struct encode_context *ectxt,
+			 void *data, int data_size)
+{
+	int ret;
+	ret = tsv_write_header(ectxt, TSV_TYPE_BYTE_ARRAY, data_size);
+	if (ret)
+		return ret;
+	return tsv_write_data(ectxt, data, data_size);
+}
+EXPORT_SYMBOL(tsv_byte_array_write);
+
+/*
+ * Helper function to log a string
+ *
+ * @ilctxt ipc_log_context created using ipc_log_context_create()
+ * @fmt Data specified using format specifiers
+ */
+int ipc_log_string(void *ilctxt, const char *fmt, ...)
+{
+	struct encode_context ectxt;
+	int avail_size, data_size, hdr_size = sizeof(struct tsv_header);
+	va_list arg_list;
+
+	if (!ilctxt)
+		return -EINVAL;
+
+	msg_encode_start(&ectxt, TSV_TYPE_STRING);
+	tsv_timestamp_write(&ectxt);
+	tsv_qtimer_write(&ectxt);
+	avail_size = (MAX_MSG_SIZE - (ectxt.offset + hdr_size));
+	va_start(arg_list, fmt);
+	data_size = vscnprintf((ectxt.buff + ectxt.offset + hdr_size),
+				avail_size, fmt, arg_list);
+	va_end(arg_list);
+	tsv_write_header(&ectxt, TSV_TYPE_BYTE_ARRAY, data_size);
+	ectxt.offset += data_size;
+	msg_encode_end(&ectxt);
+	ipc_log_write(ilctxt, &ectxt);
+	return 0;
+}
+EXPORT_SYMBOL(ipc_log_string);
+
+/**
+ * ipc_log_extract - Reads and deserializes log
+ *
+ * @ctxt:  logging context
+ * @buff:    buffer to receive the data
+ * @size:    size of the buffer
+ * @returns: 0 if no data read; >0 number of bytes read; < 0 error
+ *
+ * If no data is available to be read, then the ilctxt::read_avail
+ * completion is reinitialized.  This allows clients to block
+ * until new log data is save.
+ */
+int ipc_log_extract(void *ctxt, char *buff, int size)
+{
+	struct encode_context ectxt;
+	struct decode_context dctxt;
+	void (*deserialize_func)(struct encode_context *ectxt,
+				 struct decode_context *dctxt);
+	struct ipc_log_context *ilctxt = (struct ipc_log_context *)ctxt;
+	unsigned long flags;
+
+	if (size < MAX_MSG_DECODED_SIZE)
+		return -EINVAL;
+
+	dctxt.output_format = OUTPUT_DEBUGFS;
+	dctxt.buff = buff;
+	dctxt.size = size;
+	read_lock_irqsave(&context_list_lock_lha1, flags);
+	spin_lock(&ilctxt->context_lock_lhb1);
+	while (dctxt.size >= MAX_MSG_DECODED_SIZE &&
+	       !is_nd_read_empty(ilctxt)) {
+		msg_read(ilctxt, &ectxt);
+		deserialize_func = get_deserialization_func(ilctxt,
+							ectxt.hdr.type);
+		spin_unlock(&ilctxt->context_lock_lhb1);
+		read_unlock_irqrestore(&context_list_lock_lha1, flags);
+		if (deserialize_func)
+			deserialize_func(&ectxt, &dctxt);
+		else
+			pr_err("%s: unknown message 0x%x\n",
+				__func__, ectxt.hdr.type);
+		read_lock_irqsave(&context_list_lock_lha1, flags);
+		spin_lock(&ilctxt->context_lock_lhb1);
+	}
+	if ((size - dctxt.size) == 0)
+		reinit_completion(&ilctxt->read_avail);
+	spin_unlock(&ilctxt->context_lock_lhb1);
+	read_unlock_irqrestore(&context_list_lock_lha1, flags);
+	return size - dctxt.size;
+}
+EXPORT_SYMBOL(ipc_log_extract);
+
+/*
+ * Helper funtion used to read data from a message context.
+ *
+ * @ectxt  context initialized by calling msg_read()
+ * @data  data to read
+ * @size  number of bytes of data to read
+ */
+static void tsv_read_data(struct encode_context *ectxt,
+			  void *data, uint32_t size)
+{
+	BUG_ON((ectxt->offset + size) > MAX_MSG_SIZE);
+	memcpy(data, (ectxt->buff + ectxt->offset), size);
+	ectxt->offset += size;
+}
+
+/*
+ * Helper function that reads a type from the context and updates the
+ * context pointers.
+ *
+ * @ectxt  context initialized by calling msg_read()
+ * @hdr   type header
+ */
+static void tsv_read_header(struct encode_context *ectxt,
+			    struct tsv_header *hdr)
+{
+	BUG_ON((ectxt->offset + sizeof(*hdr)) > MAX_MSG_SIZE);
+	memcpy(hdr, (ectxt->buff + ectxt->offset), sizeof(*hdr));
+	ectxt->offset += sizeof(*hdr);
+}
+
+/*
+ * Reads a timestamp.
+ *
+ * @ectxt   context initialized by calling msg_read()
+ * @dctxt   deserialization context
+ * @format output format (appended to %6u.09u timestamp format)
+ */
+void tsv_timestamp_read(struct encode_context *ectxt,
+			struct decode_context *dctxt, const char *format)
+{
+	struct tsv_header hdr;
+	uint64_t val;
+	unsigned long nanosec_rem;
+
+	tsv_read_header(ectxt, &hdr);
+	BUG_ON(hdr.type != TSV_TYPE_TIMESTAMP);
+	tsv_read_data(ectxt, &val, sizeof(val));
+	nanosec_rem = do_div(val, 1000000000U);
+	IPC_SPRINTF_DECODE(dctxt, "[%6u.%09lu%s/",
+			(unsigned)val, nanosec_rem, format);
+}
+EXPORT_SYMBOL(tsv_timestamp_read);
+
+/*
+ * Reads a QTimer timestamp.
+ *
+ * @ectxt   context initialized by calling msg_read()
+ * @dctxt   deserialization context
+ * @format output format (appended to %#18llx timestamp format)
+ */
+void tsv_qtimer_read(struct encode_context *ectxt,
+		     struct decode_context *dctxt, const char *format)
+{
+	struct tsv_header hdr;
+	uint64_t val;
+
+	tsv_read_header(ectxt, &hdr);
+	BUG_ON(hdr.type != TSV_TYPE_QTIMER);
+	tsv_read_data(ectxt, &val, sizeof(val));
+
+	/*
+	 * This gives 16 hex digits of output. The # prefix prepends
+	 * a 0x, and these characters count as part of the number.
+	 */
+	IPC_SPRINTF_DECODE(dctxt, "%#18llx]%s", val, format);
+}
+EXPORT_SYMBOL(tsv_qtimer_read);
+
+/*
+ * Reads a data pointer.
+ *
+ * @ectxt   context initialized by calling msg_read()
+ * @dctxt   deserialization context
+ * @format output format
+ */
+void tsv_pointer_read(struct encode_context *ectxt,
+		      struct decode_context *dctxt, const char *format)
+{
+	struct tsv_header hdr;
+	void *val;
+
+	tsv_read_header(ectxt, &hdr);
+	BUG_ON(hdr.type != TSV_TYPE_POINTER);
+	tsv_read_data(ectxt, &val, sizeof(val));
+
+	IPC_SPRINTF_DECODE(dctxt, format, val);
+}
+EXPORT_SYMBOL(tsv_pointer_read);
+
+/*
+ * Reads a 32-bit integer value.
+ *
+ * @ectxt   context initialized by calling msg_read()
+ * @dctxt   deserialization context
+ * @format output format
+ */
+int32_t tsv_int32_read(struct encode_context *ectxt,
+		       struct decode_context *dctxt, const char *format)
+{
+	struct tsv_header hdr;
+	int32_t val;
+
+	tsv_read_header(ectxt, &hdr);
+	BUG_ON(hdr.type != TSV_TYPE_INT32);
+	tsv_read_data(ectxt, &val, sizeof(val));
+
+	IPC_SPRINTF_DECODE(dctxt, format, val);
+	return val;
+}
+EXPORT_SYMBOL(tsv_int32_read);
+
+/*
+ * Reads a byte array/string.
+ *
+ * @ectxt   context initialized by calling msg_read()
+ * @dctxt   deserialization context
+ * @format output format
+ */
+void tsv_byte_array_read(struct encode_context *ectxt,
+			 struct decode_context *dctxt, const char *format)
+{
+	struct tsv_header hdr;
+
+	tsv_read_header(ectxt, &hdr);
+	BUG_ON(hdr.type != TSV_TYPE_BYTE_ARRAY);
+	tsv_read_data(ectxt, dctxt->buff, hdr.size);
+	dctxt->buff += hdr.size;
+	dctxt->size -= hdr.size;
+}
+EXPORT_SYMBOL(tsv_byte_array_read);
+
+int add_deserialization_func(void *ctxt, int type,
+			void (*dfunc)(struct encode_context *,
+				      struct decode_context *))
+{
+	struct ipc_log_context *ilctxt = (struct ipc_log_context *)ctxt;
+	struct dfunc_info *df_info;
+	unsigned long flags;
+
+	if (!ilctxt || !dfunc)
+		return -EINVAL;
+
+	df_info = kmalloc(sizeof(struct dfunc_info), GFP_KERNEL);
+	if (!df_info)
+		return -ENOSPC;
+
+	read_lock_irqsave(&context_list_lock_lha1, flags);
+	spin_lock(&ilctxt->context_lock_lhb1);
+	df_info->type = type;
+	df_info->dfunc = dfunc;
+	list_add_tail(&df_info->list, &ilctxt->dfunc_info_list);
+	spin_unlock(&ilctxt->context_lock_lhb1);
+	read_unlock_irqrestore(&context_list_lock_lha1, flags);
+	return 0;
+}
+EXPORT_SYMBOL(add_deserialization_func);
+
+static void *get_deserialization_func(struct ipc_log_context *ilctxt,
+				      int type)
+{
+	struct dfunc_info *df_info = NULL;
+
+	if (!ilctxt)
+		return NULL;
+
+	list_for_each_entry(df_info, &ilctxt->dfunc_info_list, list) {
+		if (df_info->type == type)
+			return df_info->dfunc;
+	}
+	return NULL;
+}
+
+/**
+ * ipc_log_context_create: Create a debug log context
+ *                         Should not be called from atomic context
+ *
+ * @max_num_pages: Number of pages of logging space required (max. 10)
+ * @mod_name     : Name of the directory entry under DEBUGFS
+ * @user_version : Version number of user-defined message formats
+ *
+ * returns context id on success, NULL on failure
+ */
+void *ipc_log_context_create(int max_num_pages,
+			     const char *mod_name, uint16_t user_version)
+{
+	struct ipc_log_context *ctxt;
+	struct ipc_log_page *pg = NULL;
+	int page_cnt;
+	unsigned long flags;
+
+	ctxt = kzalloc(sizeof(struct ipc_log_context), GFP_KERNEL);
+	if (!ctxt) {
+		pr_err("%s: cannot create ipc_log_context\n", __func__);
+		return 0;
+	}
+
+	init_completion(&ctxt->read_avail);
+	INIT_LIST_HEAD(&ctxt->page_list);
+	INIT_LIST_HEAD(&ctxt->dfunc_info_list);
+	spin_lock_init(&ctxt->context_lock_lhb1);
+	for (page_cnt = 0; page_cnt < max_num_pages; page_cnt++) {
+		pg = kzalloc(sizeof(struct ipc_log_page), GFP_KERNEL);
+		if (!pg) {
+			pr_err("%s: cannot create ipc_log_page\n", __func__);
+			goto release_ipc_log_context;
+		}
+		pg->hdr.log_id = (uint64_t)(uintptr_t)ctxt;
+		pg->hdr.page_num = LOG_PAGE_FLAG | page_cnt;
+		pg->hdr.ctx_offset = (int64_t)((uint64_t)(uintptr_t)ctxt -
+			(uint64_t)(uintptr_t)&pg->hdr);
+
+		/* set magic last to signal that page init is complete */
+		pg->hdr.magic = IPC_LOGGING_MAGIC_NUM;
+		pg->hdr.nmagic = ~(IPC_LOGGING_MAGIC_NUM);
+
+		spin_lock_irqsave(&ctxt->context_lock_lhb1, flags);
+		list_add_tail(&pg->hdr.list, &ctxt->page_list);
+		spin_unlock_irqrestore(&ctxt->context_lock_lhb1, flags);
+	}
+
+	ctxt->log_id = (uint64_t)(uintptr_t)ctxt;
+	ctxt->version = IPC_LOG_VERSION;
+	strlcpy(ctxt->name, mod_name, IPC_LOG_MAX_CONTEXT_NAME_LEN);
+	ctxt->user_version = user_version;
+	ctxt->first_page = get_first_page(ctxt);
+	ctxt->last_page = pg;
+	ctxt->write_page = ctxt->first_page;
+	ctxt->read_page = ctxt->first_page;
+	ctxt->nd_read_page = ctxt->first_page;
+	ctxt->write_avail = max_num_pages * LOG_PAGE_DATA_SIZE;
+	ctxt->header_size = sizeof(struct ipc_log_page_header);
+	create_ctx_debugfs(ctxt, mod_name);
+
+	/* set magic last to signal context init is complete */
+	ctxt->magic = IPC_LOG_CONTEXT_MAGIC_NUM;
+	ctxt->nmagic = ~(IPC_LOG_CONTEXT_MAGIC_NUM);
+
+	write_lock_irqsave(&context_list_lock_lha1, flags);
+	list_add_tail(&ctxt->list, &ipc_log_context_list);
+	write_unlock_irqrestore(&context_list_lock_lha1, flags);
+	return (void *)ctxt;
+
+release_ipc_log_context:
+	while (page_cnt-- > 0) {
+		pg = get_first_page(ctxt);
+		list_del(&pg->hdr.list);
+		kfree(pg);
+	}
+	kfree(ctxt);
+	return 0;
+}
+EXPORT_SYMBOL(ipc_log_context_create);
+
+/*
+ * Destroy debug log context
+ *
+ * @ctxt: debug log context created by calling ipc_log_context_create API.
+ */
+int ipc_log_context_destroy(void *ctxt)
+{
+	struct ipc_log_context *ilctxt = (struct ipc_log_context *)ctxt;
+	struct ipc_log_page *pg = NULL;
+	unsigned long flags;
+
+	if (!ilctxt)
+		return 0;
+
+	while (!list_empty(&ilctxt->page_list)) {
+		pg = get_first_page(ctxt);
+		list_del(&pg->hdr.list);
+		kfree(pg);
+	}
+
+	write_lock_irqsave(&context_list_lock_lha1, flags);
+	list_del(&ilctxt->list);
+	write_unlock_irqrestore(&context_list_lock_lha1, flags);
+
+	debugfs_remove_recursive(ilctxt->dent);
+
+	kfree(ilctxt);
+	return 0;
+}
+EXPORT_SYMBOL(ipc_log_context_destroy);
+
+static int __init ipc_logging_init(void)
+{
+	check_and_create_debugfs();
+	return 0;
+}
+
+module_init(ipc_logging_init);
+
+MODULE_DESCRIPTION("ipc logging");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/kernel/trace/ipc_logging_debug.c	2019-01-22 16:16:28.727293713 +0100
@@ -0,0 +1,184 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/idr.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/ipc_logging.h>
+
+#include "ipc_logging_private.h"
+
+static DEFINE_MUTEX(ipc_log_debugfs_init_lock);
+static struct dentry *root_dent;
+
+static int debug_log(struct ipc_log_context *ilctxt,
+		     char *buff, int size, int cont)
+{
+	int i = 0;
+	int ret;
+
+	if (size < MAX_MSG_DECODED_SIZE) {
+		pr_err("%s: buffer size %d < %d\n", __func__, size,
+			MAX_MSG_DECODED_SIZE);
+		return -ENOMEM;
+	}
+	do {
+		i = ipc_log_extract(ilctxt, buff, size - 1);
+		if (cont && i == 0) {
+			ret = wait_for_completion_interruptible(
+				&ilctxt->read_avail);
+			if (ret < 0)
+				return ret;
+		}
+	} while (cont && i == 0);
+
+	return i;
+}
+
+/*
+ * VFS Read operation helper which dispatches the call to the debugfs
+ * read command stored in file->private_data.
+ *
+ * @file  File structure
+ * @buff   user buffer
+ * @count size of user buffer
+ * @ppos  file position to read from (only a value of 0 is accepted)
+ * @cont  1 = continuous mode (don't return 0 to signal end-of-file)
+ *
+ * @returns ==0 end of file
+ *           >0 number of bytes read
+ *           <0 error
+ */
+static ssize_t debug_read_helper(struct file *file, char __user *buff,
+				 size_t count, loff_t *ppos, int cont)
+{
+	struct ipc_log_context *ilctxt = file->private_data;
+	char *buffer;
+	int bsize;
+
+	buffer = kmalloc(count, GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+
+	bsize = debug_log(ilctxt, buffer, count, cont);
+	if (bsize > 0) {
+		if (copy_to_user(buff, buffer, bsize)) {
+			kfree(buffer);
+			return -EFAULT;
+		}
+		*ppos += bsize;
+	}
+	kfree(buffer);
+	return bsize;
+}
+
+static ssize_t debug_read(struct file *file, char __user *buff,
+			  size_t count, loff_t *ppos)
+{
+	return debug_read_helper(file, buff, count, ppos, 0);
+}
+
+static ssize_t debug_read_cont(struct file *file, char __user *buff,
+			       size_t count, loff_t *ppos)
+{
+	return debug_read_helper(file, buff, count, ppos, 1);
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static const struct file_operations debug_ops = {
+	.read = debug_read,
+	.open = debug_open,
+};
+
+static const struct file_operations debug_ops_cont = {
+	.read = debug_read_cont,
+	.open = debug_open,
+};
+
+static void debug_create(const char *name, mode_t mode,
+			 struct dentry *dent,
+			 struct ipc_log_context *ilctxt,
+			 const struct file_operations *fops)
+{
+	debugfs_create_file(name, mode, dent, ilctxt, fops);
+}
+
+static void dfunc_string(struct encode_context *ectxt,
+			 struct decode_context *dctxt)
+{
+	tsv_timestamp_read(ectxt, dctxt, "");
+	tsv_qtimer_read(ectxt, dctxt, " ");
+	tsv_byte_array_read(ectxt, dctxt, "");
+
+	/* add trailing \n if necessary */
+	if (*(dctxt->buff - 1) != '\n') {
+		if (dctxt->size) {
+			++dctxt->buff;
+			--dctxt->size;
+		}
+		*(dctxt->buff - 1) = '\n';
+	}
+}
+
+void check_and_create_debugfs(void)
+{
+	mutex_lock(&ipc_log_debugfs_init_lock);
+	if (!root_dent) {
+		root_dent = debugfs_create_dir("ipc_logging", 0);
+
+		if (IS_ERR(root_dent)) {
+			pr_err("%s: unable to create debugfs %ld\n",
+				__func__, PTR_ERR(root_dent));
+			root_dent = NULL;
+		}
+	}
+	mutex_unlock(&ipc_log_debugfs_init_lock);
+}
+EXPORT_SYMBOL(check_and_create_debugfs);
+
+void create_ctx_debugfs(struct ipc_log_context *ctxt,
+			const char *mod_name)
+{
+	if (!root_dent)
+		check_and_create_debugfs();
+
+	if (root_dent) {
+		ctxt->dent = debugfs_create_dir(mod_name, root_dent);
+		if (!IS_ERR(ctxt->dent)) {
+			debug_create("log", 0444, ctxt->dent,
+				     ctxt, &debug_ops);
+			debug_create("log_cont", 0444, ctxt->dent,
+				     ctxt, &debug_ops_cont);
+		}
+	}
+	add_deserialization_func((void *)ctxt,
+				 TSV_TYPE_STRING, dfunc_string);
+}
+EXPORT_SYMBOL(create_ctx_debugfs);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/kernel/trace/ipc_logging_private.h	2019-01-22 16:16:28.727293713 +0100
@@ -0,0 +1,165 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _IPC_LOGGING_PRIVATE_H
+#define _IPC_LOGGING_PRIVATE_H
+
+#include <linux/ipc_logging.h>
+
+#define IPC_LOG_VERSION 0x0003
+#define IPC_LOG_MAX_CONTEXT_NAME_LEN 32
+
+/**
+ * struct ipc_log_page_header - Individual log page header
+ *
+ * @magic: Magic number (used for log extraction)
+ * @nmagic: Inverse of magic number (used for log extraction)
+ * @page_num: Index of page (0.. N - 1) (note top bit is always set)
+ * @read_offset:  Read offset in page
+ * @write_offset: Write offset in page (or 0xFFFF if full)
+ * @log_id: ID of logging context that owns this page
+ * @start_time:  Scheduler clock for first write time in page
+ * @end_time:  Scheduler clock for last write time in page
+ * @ctx_offset:  Signed offset from page to the logging context.  Used to
+ *               optimize ram-dump extraction.
+ *
+ * @list:  Linked list of pages that make up a log
+ * @nd_read_offset:  Non-destructive read offset used for debugfs
+ *
+ * The first part of the structure defines data that is used to extract the
+ * logs from a memory dump and elements in this section should not be changed
+ * or re-ordered.  New local data structures can be added to the end of the
+ * structure since they will be ignored by the extraction tool.
+ */
+struct ipc_log_page_header {
+	uint32_t magic;
+	uint32_t nmagic;
+	uint32_t page_num;
+	uint16_t read_offset;
+	uint16_t write_offset;
+	uint64_t log_id;
+	uint64_t start_time;
+	uint64_t end_time;
+	int64_t ctx_offset;
+
+	/* add local data structures after this point */
+	struct list_head list;
+	uint16_t nd_read_offset;
+};
+
+/**
+ * struct ipc_log_page - Individual log page
+ *
+ * @hdr: Log page header
+ * @data: Log data
+ *
+ * Each log consists of 1 to N log pages.  Data size is adjusted to always fit
+ * the structure into a single kernel page.
+ */
+struct ipc_log_page {
+	struct ipc_log_page_header hdr;
+	char data[PAGE_SIZE - sizeof(struct ipc_log_page_header)];
+};
+
+/**
+ * struct ipc_log_context - main logging context
+ *
+ * @magic:  Magic number (used for log extraction)
+ * @nmagic:  Inverse of magic number (used for log extraction)
+ * @version:  IPC Logging version of log format
+ * @user_version:  Version number for user-defined messages
+ * @header_size:  Size of the log header which is used to determine the offset
+ *                of ipc_log_page::data
+ * @log_id:  Log ID (assigned when log is created)
+ * @name:  Name of the log used to uniquely identify the log during extraction
+ *
+ * @list:  List of log contexts (struct ipc_log_context)
+ * @page_list:  List of log pages (struct ipc_log_page)
+ * @first_page:  First page in list of logging pages
+ * @last_page:  Last page in list of logging pages
+ * @write_page:  Current write page
+ * @read_page:  Current read page (for internal reads)
+ * @nd_read_page:  Current debugfs extraction page (non-destructive)
+ *
+ * @write_avail:  Number of bytes available to write in all pages
+ * @dent:  Debugfs node for run-time log extraction
+ * @dfunc_info_list:  List of deserialization functions
+ * @context_lock_lhb1:  Lock for entire structure
+ * @read_avail:  Completed when new data is added to the log
+ */
+struct ipc_log_context {
+	uint32_t magic;
+	uint32_t nmagic;
+	uint32_t version;
+	uint16_t user_version;
+	uint16_t header_size;
+	uint64_t log_id;
+	char name[IPC_LOG_MAX_CONTEXT_NAME_LEN];
+
+	/* add local data structures after this point */
+	struct list_head list;
+	struct list_head page_list;
+	struct ipc_log_page *first_page;
+	struct ipc_log_page *last_page;
+	struct ipc_log_page *write_page;
+	struct ipc_log_page *read_page;
+	struct ipc_log_page *nd_read_page;
+
+	uint32_t write_avail;
+	struct dentry *dent;
+	struct list_head dfunc_info_list;
+	spinlock_t context_lock_lhb1;
+	struct completion read_avail;
+};
+
+struct dfunc_info {
+	struct list_head list;
+	int type;
+	void (*dfunc) (struct encode_context *, struct decode_context *);
+};
+
+enum {
+	TSV_TYPE_INVALID,
+	TSV_TYPE_TIMESTAMP,
+	TSV_TYPE_POINTER,
+	TSV_TYPE_INT32,
+	TSV_TYPE_BYTE_ARRAY,
+	TSV_TYPE_QTIMER,
+};
+
+enum {
+	OUTPUT_DEBUGFS,
+};
+
+#define IPC_LOG_CONTEXT_MAGIC_NUM 0x25874452
+#define IPC_LOGGING_MAGIC_NUM 0x52784425
+#define MIN(x, y) ((x) < (y) ? (x) : (y))
+#define IS_MSG_TYPE(x) (((x) > TSV_TYPE_MSG_START) && \
+			((x) < TSV_TYPE_MSG_END))
+#define MAX_MSG_DECODED_SIZE (MAX_MSG_SIZE*4)
+
+#if (defined(CONFIG_DEBUG_FS))
+void check_and_create_debugfs(void);
+
+void create_ctx_debugfs(struct ipc_log_context *ctxt,
+			const char *mod_name);
+#else
+void check_and_create_debugfs(void)
+{
+}
+
+void create_ctx_debugfs(struct ipc_log_context *ctxt, const char *mod_name)
+{
+}
+#endif
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/lib/fbxserial.c	2019-01-22 16:16:28.759294003 +0100
@@ -0,0 +1,195 @@
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/crc32.h>
+#include <linux/slab.h>
+
+#include <asm/io.h>
+
+#include <linux/fbxserial.h>
+#include <linux/etherdevice.h>
+
+#define PFX "builtin-fbxserial: "
+
+/*
+ * default value to use in case magic is wrong (no cksum in that case)
+ */
+static void __init
+fbxserialinfo_use_default(struct fbx_serial *serial)
+{
+	memset(serial, 0, sizeof (*serial));
+	serial->magic = FBXSERIAL_MAGIC;
+	serial->struct_version = FBXSERIAL_VERSION;
+	serial->len = sizeof (*serial);
+	serial->manufacturer = '_';
+	eth_random_addr(serial->mac_addr_base);
+	serial->mac_count = 1;
+
+	printk(KERN_WARNING PFX "using default serial infos with MAC %pM\n",
+	       serial->mac_addr_base);
+}
+
+/*
+ * add trailing 0 for bundle string here.
+ */
+static void __init
+bundle_fixup(struct fbx_serial *serial)
+{
+	struct fbx_serial_extinfo *p;
+	int i;
+
+	for (i = 0; i < be32_to_cpu(serial->extinfo_count); i++) {
+
+		if (i >= EXTINFO_MAX_COUNT)
+			break;
+
+		p = &serial->extinfos[i];
+		if (be32_to_cpu(p->type) == EXTINFO_TYPE_EXTDEV &&
+		    be32_to_cpu(p->u.extdev.type) == EXTDEV_TYPE_BUNDLE) {
+			int size;
+
+			size = sizeof (p->u.extdev.serial);
+			p->u.extdev.serial[size - 1] = 0;
+		}
+	}
+}
+
+/*
+ * called from  arch code early  in the boot sequence.   This function
+ * returns 1  in case serial infos are  invalid/unreadable and default
+ * values have been used.
+ */
+int __init
+fbxserialinfo_read(const void *data, struct fbx_serial *out)
+{
+	uint32_t sum;
+
+	if (!data) {
+		printk(KERN_NOTICE PFX "no serial data\n");
+		goto out_default;
+	}
+
+	/*
+	 * get partial serial data from flash/whatever.
+	 */
+	memcpy(out, data, sizeof (*out));
+
+	/* check magic first */
+	if (be32_to_cpu(out->magic) != FBXSERIAL_MAGIC) {
+		printk(KERN_NOTICE PFX "invalid magic (%08x, expected %08x), "
+			"using defaults !\n", be32_to_cpu(out->magic),
+		       FBXSERIAL_MAGIC);
+		goto out_default;
+	}
+
+	/* fetch size for which we have to check CRC */
+	if (be32_to_cpu(out->len) > FBXSERIAL_MAX_SIZE) {
+		printk(KERN_NOTICE PFX "structure size too big (%d), "
+		       "using defaults !\n", be32_to_cpu(out->len));
+		goto out_default;
+	}
+
+	/* compute and check checksum */
+	sum = crc32(0, data + 4, be32_to_cpu(out->len) - 4);
+
+	if (be32_to_cpu(out->crc32) != sum) {
+		printk(KERN_NOTICE PFX "invalid checksum (%08x, "
+		       "expected %08x), using defaults !\n", sum,
+		       be32_to_cpu(out->crc32));
+		goto out_default;
+	}
+
+	printk(KERN_INFO PFX "Found valid serial infos !\n");
+	bundle_fixup(out);
+	return 0;
+
+ out_default:
+	fbxserialinfo_use_default(out);
+	bundle_fixup(out);
+	return 1;
+}
+
+void
+fbxserialinfo_get_random(unsigned char *data, unsigned int len)
+{
+	const struct fbx_serial *s;
+
+	memset(data, 0, 6);
+	s = arch_get_fbxserial();
+	if (WARN(!s, "arch_get_fbxserial returned NULL"))
+		return;
+
+	if (len > sizeof (s->random_data))
+		len = sizeof (s->random_data);
+
+	memcpy(data, s->random_data, len);
+}
+EXPORT_SYMBOL(fbxserialinfo_get_random);
+
+static u8 *mac_table;
+
+static void inc_mac(u8 *mac, int count)
+{
+	int index = 5;
+	int overflow;
+
+	do {
+		unsigned int val = mac[index] + count;
+
+		overflow = val >> 8;
+		mac[index] = val;
+		count = (count + 255) >> 8;
+		--index;
+	} while (index >= 0 && overflow);
+}
+
+static int gen_mac_table(const struct fbx_serial *s)
+{
+	int i;
+
+	mac_table = kmalloc(6 * s->mac_count, GFP_KERNEL);
+	if (!mac_table)
+		return -ENOMEM;
+
+	for (i = 0; i < s->mac_count; ++i) {
+		u8 *mac = &mac_table[6 * i];
+
+		memcpy(mac, s->mac_addr_base, 6);
+		inc_mac(mac, i);
+	}
+	return 0;
+}
+
+const void *
+fbxserialinfo_get_mac_addr(unsigned int index)
+{
+	const struct fbx_serial *s;
+
+	s = arch_get_fbxserial();
+
+	if (!s) {
+		pr_warn(PFX "no serial available: using default.\n");
+		goto default_mac;
+	}
+
+	if (index >= s->mac_count) {
+		pr_warn(PFX "mac index %d too high: using default.\n",
+			index);
+		goto default_mac;
+	}
+
+	if (!mac_table) {
+		int error = gen_mac_table(s);
+		if (error) {
+			pr_err(PFX "gen_mac_table() failed: using default.\n");
+			goto default_mac;
+		}
+	}
+
+	return &mac_table[6 * index];
+
+default_mac:
+	 return "\x00\x07\xcb\x00\x00\xfd";
+}
+EXPORT_SYMBOL(fbxserialinfo_get_mac_addr);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/lib/hash.c	2019-01-22 16:16:28.763294039 +0100
@@ -0,0 +1,39 @@
+/* General purpose hashing library
+ *
+ * That's a start of a kernel hashing library, which can be extended
+ * with further algorithms in future. arch_fast_hash{2,}() will
+ * eventually resolve to an architecture optimized implementation.
+ *
+ * Copyright 2013 Francesco Fusco <ffusco@redhat.com>
+ * Copyright 2013 Daniel Borkmann <dborkman@redhat.com>
+ * Copyright 2013 Thomas Graf <tgraf@redhat.com>
+ * Licensed under the GNU General Public License, version 2.0 (GPLv2)
+ */
+
+#include <linux/jhash.h>
+#include <linux/hash.h>
+#include <linux/cache.h>
+
+static struct fast_hash_ops arch_hash_ops __read_mostly = {
+	.hash  = jhash,
+	.hash2 = jhash2,
+};
+
+u32 arch_fast_hash(const void *data, u32 len, u32 seed)
+{
+	return arch_hash_ops.hash(data, len, seed);
+}
+EXPORT_SYMBOL_GPL(arch_fast_hash);
+
+u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed)
+{
+	return arch_hash_ops.hash2(data, len, seed);
+}
+EXPORT_SYMBOL_GPL(arch_fast_hash2);
+
+static int __init hashlib_init(void)
+{
+	setup_arch_fast_hash(&arch_hash_ops);
+	return 0;
+}
+early_initcall(hashlib_init);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/lib/Kconfig.ubsan	2019-01-22 16:16:28.751293930 +0100
@@ -0,0 +1,29 @@
+config ARCH_HAS_UBSAN_SANITIZE_ALL
+	bool
+
+config UBSAN
+	bool "Undefined behaviour sanity checker"
+	help
+	  This option enables undefined behaviour sanity checker
+	  Compile-time instrumentation is used to detect various undefined
+	  behaviours in runtime. Various types of checks may be enabled
+	  via boot parameter ubsan_handle (see: Documentation/ubsan.txt).
+
+config UBSAN_SANITIZE_ALL
+	bool "Enable instrumentation for the entire kernel"
+	depends on UBSAN
+	depends on ARCH_HAS_UBSAN_SANITIZE_ALL
+	default y
+	help
+	  This option activates instrumentation for the entire kernel.
+	  If you don't enable this option, you have to explicitly specify
+	  UBSAN_SANITIZE := y for the files/directories you want to check for UB.
+
+config UBSAN_ALIGNMENT
+	bool "Enable checking of pointers alignment"
+	depends on UBSAN
+	default y if !HAVE_EFFICIENT_UNALIGNED_ACCESS
+	help
+	  This option enables detection of unaligned memory accesses.
+	  Enabling this option on architectures that support unalligned
+	  accesses may produce a lot of false positives.
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/lib/qmi_encdec.c	2019-10-29 09:26:25.673223044 +0100
@@ -0,0 +1,880 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <linux/qmi_encdec.h>
+
+#include "qmi_encdec_priv.h"
+
+#define TLV_LEN_SIZE sizeof(uint16_t)
+#define TLV_TYPE_SIZE sizeof(uint8_t)
+#define OPTIONAL_TLV_TYPE_START 0x10
+
+#ifdef CONFIG_QMI_ENCDEC_DEBUG
+
+#define qmi_encdec_dump(prefix_str, buf, buf_len) do { \
+	const u8 *ptr = buf; \
+	int i, linelen, remaining = buf_len; \
+	int rowsize = 16, groupsize = 1; \
+	unsigned char linebuf[256]; \
+	for (i = 0; i < buf_len; i += rowsize) { \
+		linelen = min(remaining, rowsize); \
+		remaining -= linelen; \
+		hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, \
+				   linebuf, sizeof(linebuf), false); \
+		pr_debug("%s: %s\n", prefix_str, linebuf); \
+	} \
+} while (0)
+
+#define QMI_ENCODE_LOG_MSG(buf, buf_len) do { \
+	qmi_encdec_dump("QMI_ENCODE_MSG", buf, buf_len); \
+} while (0)
+
+#define QMI_DECODE_LOG_MSG(buf, buf_len) do { \
+	qmi_encdec_dump("QMI_DECODE_MSG", buf, buf_len); \
+} while (0)
+
+#define QMI_ENCODE_LOG_ELEM(level, elem_len, elem_size, buf) do { \
+	pr_debug("QMI_ENCODE_ELEM lvl: %d, len: %d, size: %d\n", \
+		 level, elem_len, elem_size); \
+	qmi_encdec_dump("QMI_ENCODE_ELEM", buf, (elem_len * elem_size)); \
+} while (0)
+
+#define QMI_DECODE_LOG_ELEM(level, elem_len, elem_size, buf) do { \
+	pr_debug("QMI_DECODE_ELEM lvl: %d, len: %d, size: %d\n", \
+		 level, elem_len, elem_size); \
+	qmi_encdec_dump("QMI_DECODE_ELEM", buf, (elem_len * elem_size)); \
+} while (0)
+
+#define QMI_ENCODE_LOG_TLV(tlv_type, tlv_len) do { \
+	pr_debug("QMI_ENCODE_TLV type: %d, len: %d\n", tlv_type, tlv_len); \
+} while (0)
+
+#define QMI_DECODE_LOG_TLV(tlv_type, tlv_len) do { \
+	pr_debug("QMI_DECODE_TLV type: %d, len: %d\n", tlv_type, tlv_len); \
+} while (0)
+
+#else
+
+#define QMI_ENCODE_LOG_MSG(buf, buf_len) { }
+#define QMI_DECODE_LOG_MSG(buf, buf_len) { }
+#define QMI_ENCODE_LOG_ELEM(level, elem_len, elem_size, buf) { }
+#define QMI_DECODE_LOG_ELEM(level, elem_len, elem_size, buf) { }
+#define QMI_ENCODE_LOG_TLV(tlv_type, tlv_len) { }
+#define QMI_DECODE_LOG_TLV(tlv_type, tlv_len) { }
+
+#endif
+
+static int _qmi_kernel_encode(struct elem_info *ei_array,
+			      void *out_buf, void *in_c_struct,
+			      uint32_t out_buf_len, int enc_level);
+
+static int _qmi_kernel_decode(struct elem_info *ei_array,
+			      void *out_c_struct,
+			      void *in_buf, uint32_t in_buf_len,
+			      int dec_level);
+static struct elem_info *skip_to_next_elem(struct elem_info *ei_array,
+					   int level);
+
+/**
+ * qmi_calc_max_msg_len() - Calculate the maximum length of a QMI message
+ * @ei_array: Struct info array describing the structure.
+ * @level: Level to identify the depth of the nested structures.
+ *
+ * @return: expected maximum length of the QMI message or 0 on failure.
+ */
+static int qmi_calc_max_msg_len(struct elem_info *ei_array,
+				int level)
+{
+	int max_msg_len = 0;
+	struct elem_info *temp_ei;
+
+	if (!ei_array)
+		return max_msg_len;
+
+	for (temp_ei = ei_array; temp_ei->data_type != QMI_EOTI; temp_ei++) {
+		/* Flag to identify the optional element is not encoded */
+		if (temp_ei->data_type == QMI_OPT_FLAG)
+			continue;
+
+		if (temp_ei->data_type == QMI_DATA_LEN) {
+			max_msg_len += (temp_ei->elem_size == sizeof(uint8_t) ?
+					sizeof(uint8_t) : sizeof(uint16_t));
+			continue;
+		} else if (temp_ei->data_type == QMI_STRUCT) {
+			max_msg_len += (temp_ei->elem_len *
+					qmi_calc_max_msg_len(temp_ei->ei_array,
+							    (level + 1)));
+		} else if (temp_ei->data_type == QMI_STRING) {
+			if (level > 1)
+				max_msg_len += temp_ei->elem_len <= U8_MAX ?
+					sizeof(uint8_t) : sizeof(uint16_t);
+			max_msg_len += temp_ei->elem_len * temp_ei->elem_size;
+		} else {
+			max_msg_len += (temp_ei->elem_len * temp_ei->elem_size);
+		}
+
+		/*
+		 * Type & Length info. not prepended for elements in the
+		 * nested structure.
+		 */
+		if (level == 1)
+			max_msg_len += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
+	}
+	return max_msg_len;
+}
+
+/**
+ * qmi_calc_min_msg_len() - Calculate the minimum length of a QMI message
+ * @ei_array: Struct info array describing the structure.
+ * @level: Level to identify the depth of the nested structures.
+ *
+ * @return: expected minimum length of the QMI message or 0 on failure.
+ */
+static int qmi_calc_min_msg_len(struct elem_info *ei_array,
+				int level)
+{
+	int min_msg_len = 0;
+	struct elem_info *temp_ei = ei_array;
+
+	if (!ei_array)
+		return min_msg_len;
+
+	while (temp_ei->data_type != QMI_EOTI) {
+		/* Optional elements do not count in minimum length */
+		if (temp_ei->data_type == QMI_OPT_FLAG) {
+			temp_ei = skip_to_next_elem(temp_ei, level);
+			continue;
+		}
+
+		if (temp_ei->data_type == QMI_DATA_LEN) {
+			min_msg_len += (temp_ei->elem_size == sizeof(uint8_t) ?
+					sizeof(uint8_t) : sizeof(uint16_t));
+			temp_ei++;
+			continue;
+		} else if (temp_ei->data_type == QMI_STRUCT) {
+			min_msg_len += qmi_calc_min_msg_len(temp_ei->ei_array,
+							    (level + 1));
+			temp_ei++;
+		} else if (temp_ei->data_type == QMI_STRING) {
+			if (level > 1)
+				min_msg_len += temp_ei->elem_len <= U8_MAX ?
+					sizeof(uint8_t) : sizeof(uint16_t);
+			min_msg_len += temp_ei->elem_len * temp_ei->elem_size;
+			temp_ei++;
+		} else {
+			min_msg_len += (temp_ei->elem_len * temp_ei->elem_size);
+			temp_ei++;
+		}
+
+		/*
+		 * Type & Length info. not prepended for elements in the
+		 * nested structure.
+		 */
+		if (level == 1)
+			min_msg_len += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
+	}
+	return min_msg_len;
+}
+
+/**
+ * qmi_verify_max_msg_len() - Verify the maximum length of a QMI message
+ * @desc: Pointer to structure descriptor.
+ *
+ * @return: true if the maximum message length embedded in structure
+ *          descriptor matches the calculated value, else false.
+ */
+bool qmi_verify_max_msg_len(struct msg_desc *desc)
+{
+	int calc_max_msg_len;
+
+	if (!desc)
+		return false;
+
+	calc_max_msg_len = qmi_calc_max_msg_len(desc->ei_array, 1);
+	if (calc_max_msg_len != desc->max_msg_len) {
+		pr_err("%s: Calc. len %d != Passed len %d\n",
+			__func__, calc_max_msg_len, desc->max_msg_len);
+		return false;
+	}
+	return true;
+}
+
+/**
+ * qmi_kernel_encode() - Encode to QMI message wire format
+ * @desc: Pointer to structure descriptor.
+ * @out_buf: Buffer to hold the encoded QMI message.
+ * @out_buf_len: Length of the out buffer.
+ * @in_c_struct: C Structure to be encoded.
+ *
+ * @return: size of encoded message on success, < 0 for error.
+ */
+int qmi_kernel_encode(struct msg_desc *desc,
+		      void *out_buf, uint32_t out_buf_len,
+		      void *in_c_struct)
+{
+	int enc_level = 1;
+	int ret, calc_max_msg_len, calc_min_msg_len;
+
+	if (!desc)
+		return -EINVAL;
+
+	/* Check the possibility of a zero length QMI message */
+	if (!in_c_struct) {
+		calc_min_msg_len = qmi_calc_min_msg_len(desc->ei_array, 1);
+		if (calc_min_msg_len) {
+			pr_err("%s: Calc. len %d != 0, but NULL in_c_struct\n",
+				__func__, calc_min_msg_len);
+			return -EINVAL;
+		} else {
+			return 0;
+		}
+	}
+
+	/*
+	 * Not a zero-length message. Ensure the output buffer and
+	 * element information array are not NULL.
+	 */
+	if (!out_buf || !desc->ei_array)
+		return -EINVAL;
+
+	if (desc->max_msg_len < out_buf_len)
+		return -ETOOSMALL;
+
+	ret = _qmi_kernel_encode(desc->ei_array, out_buf,
+				 in_c_struct, out_buf_len, enc_level);
+	if (ret == -ETOOSMALL) {
+		calc_max_msg_len = qmi_calc_max_msg_len(desc->ei_array, 1);
+		pr_err("%s: Calc. len %d != Out buf len %d\n",
+			__func__, calc_max_msg_len, out_buf_len);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(qmi_kernel_encode);
+
+/**
+ * qmi_encode_basic_elem() - Encodes elements of basic/primary data type
+ * @buf_dst: Buffer to store the encoded information.
+ * @buf_src: Buffer containing the elements to be encoded.
+ * @elem_len: Number of elements, in the buf_src, to be encoded.
+ * @elem_size: Size of a single instance of the element to be encoded.
+ *
+ * @return: number of bytes of encoded information.
+ *
+ * This function encodes the "elem_len" number of data elements, each of
+ * size "elem_size" bytes from the source buffer "buf_src" and stores the
+ * encoded information in the destination buffer "buf_dst". The elements are
+ * of primary data type which include uint8_t - uint64_t or similar. This
+ * function returns the number of bytes of encoded information.
+ */
+static int qmi_encode_basic_elem(void *buf_dst, void *buf_src,
+				 uint32_t elem_len, uint32_t elem_size)
+{
+	uint32_t i, rc = 0;
+
+	for (i = 0; i < elem_len; i++) {
+		QMI_ENCDEC_ENCODE_N_BYTES(buf_dst, buf_src, elem_size);
+		rc += elem_size;
+	}
+
+	return rc;
+}
+
+/**
+ * qmi_encode_struct_elem() - Encodes elements of struct data type
+ * @ei_array: Struct info array descibing the struct element.
+ * @buf_dst: Buffer to store the encoded information.
+ * @buf_src: Buffer containing the elements to be encoded.
+ * @elem_len: Number of elements, in the buf_src, to be encoded.
+ * @out_buf_len: Available space in the encode buffer.
+ * @enc_level: Depth of the nested structure from the main structure.
+ *
+ * @return: Mumber of bytes of encoded information, on success.
+ *          < 0 on error.
+ *
+ * This function encodes the "elem_len" number of struct elements, each of
+ * size "ei_array->elem_size" bytes from the source buffer "buf_src" and
+ * stores the encoded information in the destination buffer "buf_dst". The
+ * elements are of struct data type which includes any C structure. This
+ * function returns the number of bytes of encoded information.
+ */
+static int qmi_encode_struct_elem(struct elem_info *ei_array,
+				  void *buf_dst, void *buf_src,
+				  uint32_t elem_len, uint32_t out_buf_len,
+				  int enc_level)
+{
+	int i, rc, encoded_bytes = 0;
+	struct elem_info *temp_ei = ei_array;
+
+	for (i = 0; i < elem_len; i++) {
+		rc = _qmi_kernel_encode(temp_ei->ei_array, buf_dst, buf_src,
+					(out_buf_len - encoded_bytes),
+					enc_level);
+		if (rc < 0) {
+			pr_err("%s: STRUCT Encode failure\n", __func__);
+			return rc;
+		}
+		buf_dst = buf_dst + rc;
+		buf_src = buf_src + temp_ei->elem_size;
+		encoded_bytes += rc;
+	}
+
+	return encoded_bytes;
+}
+
+/**
+ * qmi_encode_string_elem() - Encodes elements of string data type
+ * @ei_array: Struct info array descibing the string element.
+ * @buf_dst: Buffer to store the encoded information.
+ * @buf_src: Buffer containing the elements to be encoded.
+ * @out_buf_len: Available space in the encode buffer.
+ * @enc_level: Depth of the string element from the main structure.
+ *
+ * @return: Mumber of bytes of encoded information, on success.
+ *          < 0 on error.
+ *
+ * This function encodes a string element of maximum length "ei_array->elem_len"
+ * bytes from the source buffer "buf_src" and stores the encoded information in
+ * the destination buffer "buf_dst". This function returns the number of bytes
+ * of encoded information.
+ */
+static int qmi_encode_string_elem(struct elem_info *ei_array,
+				  void *buf_dst, void *buf_src,
+				  uint32_t out_buf_len, int enc_level)
+{
+	int rc;
+	int encoded_bytes = 0;
+	struct elem_info *temp_ei = ei_array;
+	uint32_t string_len = 0;
+	uint32_t string_len_sz = 0;
+
+	string_len = strlen(buf_src);
+	string_len_sz = temp_ei->elem_len <= U8_MAX ?
+			sizeof(uint8_t) : sizeof(uint16_t);
+	if (string_len > temp_ei->elem_len) {
+		pr_err("%s: String to be encoded is longer - %d > %d\n",
+			__func__, string_len, temp_ei->elem_len);
+		return -EINVAL;
+	}
+
+	if (enc_level == 1) {
+		if (string_len + TLV_LEN_SIZE + TLV_TYPE_SIZE >
+		    out_buf_len) {
+			pr_err("%s: Output len %d > Out Buf len %d\n",
+				__func__, string_len, out_buf_len);
+			return -ETOOSMALL;
+		}
+	} else {
+		if (string_len + string_len_sz > out_buf_len) {
+			pr_err("%s: Output len %d > Out Buf len %d\n",
+				__func__, string_len, out_buf_len);
+			return -ETOOSMALL;
+		}
+		rc = qmi_encode_basic_elem(buf_dst, &string_len,
+					   1, string_len_sz);
+		encoded_bytes += rc;
+	}
+
+	rc = qmi_encode_basic_elem(buf_dst + encoded_bytes, buf_src,
+				   string_len, temp_ei->elem_size);
+	encoded_bytes += rc;
+	QMI_ENCODE_LOG_ELEM(enc_level, string_len, temp_ei->elem_size, buf_src);
+	return encoded_bytes;
+}
+
+/**
+ * skip_to_next_elem() - Skip to next element in the structure to be encoded
+ * @ei_array: Struct info describing the element to be skipped.
+ * @level: Depth level of encoding/decoding to identify nested structures.
+ *
+ * @return: Struct info of the next element that can be encoded.
+ *
+ * This function is used while encoding optional elements. If the flag
+ * corresponding to an optional element is not set, then encoding the
+ * optional element can be skipped. This function can be used to perform
+ * that operation.
+ */
+static struct elem_info *skip_to_next_elem(struct elem_info *ei_array,
+					   int level)
+{
+	struct elem_info *temp_ei = ei_array;
+	uint8_t tlv_type;
+
+	if (level > 1) {
+		temp_ei = temp_ei + 1;
+	} else {
+		do {
+			tlv_type = temp_ei->tlv_type;
+			temp_ei = temp_ei + 1;
+		} while (tlv_type == temp_ei->tlv_type);
+	}
+
+	return temp_ei;
+}
+
+/**
+ * _qmi_kernel_encode() - Core Encode Function
+ * @ei_array: Struct info array describing the structure to be encoded.
+ * @out_buf: Buffer to hold the encoded QMI message.
+ * @in_c_struct: Pointer to the C structure to be encoded.
+ * @out_buf_len: Available space in the encode buffer.
+ * @enc_level: Encode level to indicate the depth of the nested structure,
+ *             within the main structure, being encoded.
+ *
+ * @return: Number of bytes of encoded information, on success.
+ *          < 0 on error.
+ */
+static int _qmi_kernel_encode(struct elem_info *ei_array,
+			      void *out_buf, void *in_c_struct,
+			      uint32_t out_buf_len, int enc_level)
+{
+	struct elem_info *temp_ei = ei_array;
+	uint8_t opt_flag_value = 0;
+	uint32_t data_len_value = 0, data_len_sz;
+	uint8_t *buf_dst = (uint8_t *)out_buf;
+	uint8_t *tlv_pointer;
+	uint32_t tlv_len;
+	uint8_t tlv_type;
+	uint32_t encoded_bytes = 0;
+	void *buf_src;
+	int encode_tlv = 0;
+	int rc;
+
+	tlv_pointer = buf_dst;
+	tlv_len = 0;
+	if (enc_level == 1)
+		buf_dst = buf_dst + (TLV_LEN_SIZE + TLV_TYPE_SIZE);
+
+	while (temp_ei->data_type != QMI_EOTI) {
+		buf_src = in_c_struct + temp_ei->offset;
+		tlv_type = temp_ei->tlv_type;
+
+		if (temp_ei->is_array == NO_ARRAY) {
+			data_len_value = 1;
+		} else if (temp_ei->is_array == STATIC_ARRAY) {
+			data_len_value = temp_ei->elem_len;
+		} else if (data_len_value <= 0 ||
+			    temp_ei->elem_len < data_len_value) {
+			pr_err("%s: Invalid data length\n", __func__);
+			return -EINVAL;
+		}
+
+		switch (temp_ei->data_type) {
+		case QMI_OPT_FLAG:
+			rc = qmi_encode_basic_elem(&opt_flag_value, buf_src,
+						   1, sizeof(uint8_t));
+			if (opt_flag_value)
+				temp_ei = temp_ei + 1;
+			else
+				temp_ei = skip_to_next_elem(temp_ei, enc_level);
+			break;
+
+		case QMI_DATA_LEN:
+			memcpy(&data_len_value, buf_src, temp_ei->elem_size);
+			data_len_sz = temp_ei->elem_size == sizeof(uint8_t) ?
+					sizeof(uint8_t) : sizeof(uint16_t);
+			/* Check to avoid out of range buffer access */
+			if ((data_len_sz + encoded_bytes + TLV_LEN_SIZE +
+			    TLV_TYPE_SIZE) > out_buf_len) {
+				pr_err("%s: Too Small Buffer @DATA_LEN\n",
+					__func__);
+				return -ETOOSMALL;
+			}
+			rc = qmi_encode_basic_elem(buf_dst, &data_len_value,
+						   1, data_len_sz);
+			UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
+				encoded_bytes, tlv_len, encode_tlv, rc);
+			if (!data_len_value)
+				temp_ei = skip_to_next_elem(temp_ei, enc_level);
+			else
+				encode_tlv = 0;
+			break;
+
+		case QMI_UNSIGNED_1_BYTE:
+		case QMI_UNSIGNED_2_BYTE:
+		case QMI_UNSIGNED_4_BYTE:
+		case QMI_UNSIGNED_8_BYTE:
+		case QMI_SIGNED_2_BYTE_ENUM:
+		case QMI_SIGNED_4_BYTE_ENUM:
+			/* Check to avoid out of range buffer access */
+			if (((data_len_value * temp_ei->elem_size) +
+			    encoded_bytes + TLV_LEN_SIZE + TLV_TYPE_SIZE) >
+			    out_buf_len) {
+				pr_err("%s: Too Small Buffer @data_type:%d\n",
+					__func__, temp_ei->data_type);
+				return -ETOOSMALL;
+			}
+			rc = qmi_encode_basic_elem(buf_dst, buf_src,
+				data_len_value, temp_ei->elem_size);
+			QMI_ENCODE_LOG_ELEM(enc_level, data_len_value,
+				temp_ei->elem_size, buf_src);
+			UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
+				encoded_bytes, tlv_len, encode_tlv, rc);
+			break;
+
+		case QMI_STRUCT:
+			rc = qmi_encode_struct_elem(temp_ei, buf_dst, buf_src,
+				data_len_value, (out_buf_len - encoded_bytes),
+				(enc_level + 1));
+			if (rc < 0)
+				return rc;
+			UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
+				encoded_bytes, tlv_len, encode_tlv, rc);
+			break;
+
+		case QMI_STRING:
+			rc = qmi_encode_string_elem(temp_ei, buf_dst, buf_src,
+				out_buf_len - encoded_bytes, enc_level);
+			if (rc < 0)
+				return rc;
+			UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
+				encoded_bytes, tlv_len, encode_tlv, rc);
+			break;
+		default:
+			pr_err("%s: Unrecognized data type\n", __func__);
+			return -EINVAL;
+
+		}
+
+		if (encode_tlv && enc_level == 1) {
+			QMI_ENCDEC_ENCODE_TLV(tlv_type, tlv_len, tlv_pointer);
+			QMI_ENCODE_LOG_TLV(tlv_type, tlv_len);
+			encoded_bytes += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
+			tlv_pointer = buf_dst;
+			tlv_len = 0;
+			buf_dst = buf_dst + TLV_LEN_SIZE + TLV_TYPE_SIZE;
+			encode_tlv = 0;
+		}
+	}
+	QMI_ENCODE_LOG_MSG(out_buf, encoded_bytes);
+	return encoded_bytes;
+}
+
+/**
+ * qmi_kernel_decode() - Decode to C Structure format
+ * @desc: Pointer to structure descriptor.
+ * @out_c_struct: Buffer to hold the decoded C structure.
+ * @in_buf: Buffer containg the QMI message to be decoded.
+ * @in_buf_len: Length of the incoming QMI message.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_kernel_decode(struct msg_desc *desc, void *out_c_struct,
+		      void *in_buf, uint32_t in_buf_len)
+{
+	int dec_level = 1;
+	int rc = 0;
+
+	if (!desc || !desc->ei_array)
+		return -EINVAL;
+
+	if (!out_c_struct || !in_buf || !in_buf_len)
+		return -EINVAL;
+
+	if (desc->max_msg_len < in_buf_len)
+		return -EINVAL;
+
+	rc = _qmi_kernel_decode(desc->ei_array, out_c_struct,
+				in_buf, in_buf_len, dec_level);
+	if (rc < 0)
+		return rc;
+	else
+		return 0;
+}
+EXPORT_SYMBOL(qmi_kernel_decode);
+
+/**
+ * qmi_decode_basic_elem() - Decodes elements of basic/primary data type
+ * @buf_dst: Buffer to store the decoded element.
+ * @buf_src: Buffer containing the elements in QMI wire format.
+ * @elem_len: Number of elements to be decoded.
+ * @elem_size: Size of a single instance of the element to be decoded.
+ *
+ * @return: Total size of the decoded data elements, in bytes.
+ *
+ * This function decodes the "elem_len" number of elements in QMI wire format,
+ * each of size "elem_size" bytes from the source buffer "buf_src" and stores
+ * the decoded elements in the destination buffer "buf_dst". The elements are
+ * of primary data type which include uint8_t - uint64_t or similar. This
+ * function returns the number of bytes of decoded information.
+ */
+static int qmi_decode_basic_elem(void *buf_dst, void *buf_src,
+				 uint32_t elem_len, uint32_t elem_size)
+{
+	uint32_t i, rc = 0;
+
+	for (i = 0; i < elem_len; i++) {
+		QMI_ENCDEC_DECODE_N_BYTES(buf_dst, buf_src, elem_size);
+		rc += elem_size;
+	}
+
+	return rc;
+}
+
+/**
+ * qmi_decode_struct_elem() - Decodes elements of struct data type
+ * @ei_array: Struct info array descibing the struct element.
+ * @buf_dst: Buffer to store the decoded element.
+ * @buf_src: Buffer containing the elements in QMI wire format.
+ * @elem_len: Number of elements to be decoded.
+ * @tlv_len: Total size of the encoded inforation corresponding to
+ *           this struct element.
+ * @dec_level: Depth of the nested structure from the main structure.
+ *
+ * @return: Total size of the decoded data elements, on success.
+ *          < 0 on error.
+ *
+ * This function decodes the "elem_len" number of elements in QMI wire format,
+ * each of size "(tlv_len/elem_len)" bytes from the source buffer "buf_src"
+ * and stores the decoded elements in the destination buffer "buf_dst". The
+ * elements are of struct data type which includes any C structure. This
+ * function returns the number of bytes of decoded information.
+ */
+static int qmi_decode_struct_elem(struct elem_info *ei_array, void *buf_dst,
+				  void *buf_src, uint32_t elem_len,
+				  uint32_t tlv_len, int dec_level)
+{
+	int i, rc, decoded_bytes = 0;
+	struct elem_info *temp_ei = ei_array;
+
+	for (i = 0; i < elem_len && decoded_bytes < tlv_len; i++) {
+		rc = _qmi_kernel_decode(temp_ei->ei_array, buf_dst, buf_src,
+					(tlv_len - decoded_bytes), dec_level);
+		if (rc < 0)
+			return rc;
+		buf_src = buf_src + rc;
+		buf_dst = buf_dst + temp_ei->elem_size;
+		decoded_bytes += rc;
+	}
+
+	if ((dec_level <= 2 && decoded_bytes != tlv_len) ||
+	    (dec_level > 2 && (i < elem_len || decoded_bytes > tlv_len))) {
+		pr_err("%s: Fault in decoding: dl(%d), db(%d), tl(%d), i(%d), el(%d)\n",
+			__func__, dec_level, decoded_bytes, tlv_len,
+			i, elem_len);
+		return -EFAULT;
+	}
+	return decoded_bytes;
+}
+
+/**
+ * qmi_decode_string_elem() - Decodes elements of string data type
+ * @ei_array: Struct info array descibing the string element.
+ * @buf_dst: Buffer to store the decoded element.
+ * @buf_src: Buffer containing the elements in QMI wire format.
+ * @tlv_len: Total size of the encoded inforation corresponding to
+ *           this string element.
+ * @dec_level: Depth of the string element from the main structure.
+ *
+ * @return: Total size of the decoded data elements, on success.
+ *          < 0 on error.
+ *
+ * This function decodes the string element of maximum length
+ * "ei_array->elem_len" from the source buffer "buf_src" and puts it into
+ * the destination buffer "buf_dst". This function returns number of bytes
+ * decoded from the input buffer.
+ */
+static int qmi_decode_string_elem(struct elem_info *ei_array, void *buf_dst,
+				  void *buf_src, uint32_t tlv_len,
+				  int dec_level)
+{
+	int rc;
+	int decoded_bytes = 0;
+	uint32_t string_len = 0;
+	uint32_t string_len_sz = 0;
+	struct elem_info *temp_ei = ei_array;
+
+	if (dec_level == 1) {
+		string_len = tlv_len;
+	} else {
+		string_len_sz = temp_ei->elem_len <= U8_MAX ?
+				sizeof(uint8_t) : sizeof(uint16_t);
+		rc = qmi_decode_basic_elem(&string_len, buf_src,
+					   1, string_len_sz);
+		decoded_bytes += rc;
+	}
+
+	if (string_len > temp_ei->elem_len) {
+		pr_err("%s: String len %d > Max Len %d\n",
+			__func__, string_len, temp_ei->elem_len);
+		return -ETOOSMALL;
+	} else if (string_len > tlv_len) {
+		pr_err("%s: String len %d > Input Buffer Len %d\n",
+			__func__, string_len, tlv_len);
+		return -EFAULT;
+	}
+
+	rc = qmi_decode_basic_elem(buf_dst, buf_src + decoded_bytes,
+				   string_len, temp_ei->elem_size);
+	*((char *)buf_dst + string_len) = '\0';
+	decoded_bytes += rc;
+	QMI_DECODE_LOG_ELEM(dec_level, string_len, temp_ei->elem_size, buf_dst);
+	return decoded_bytes;
+}
+
+/**
+ * find_ei() - Find element info corresponding to TLV Type
+ * @ei_array: Struct info array of the message being decoded.
+ * @type: TLV Type of the element being searched.
+ *
+ * @return: Pointer to struct info, if found
+ *
+ * Every element that got encoded in the QMI message will have a type
+ * information associated with it. While decoding the QMI message,
+ * this function is used to find the struct info regarding the element
+ * that corresponds to the type being decoded.
+ */
+static struct elem_info *find_ei(struct elem_info *ei_array,
+				   uint32_t type)
+{
+	struct elem_info *temp_ei = ei_array;
+	while (temp_ei->data_type != QMI_EOTI) {
+		if (temp_ei->tlv_type == (uint8_t)type)
+			return temp_ei;
+		temp_ei = temp_ei + 1;
+	}
+	return NULL;
+}
+
+/**
+ * _qmi_kernel_decode() - Core Decode Function
+ * @ei_array: Struct info array describing the structure to be decoded.
+ * @out_c_struct: Buffer to hold the decoded C struct
+ * @in_buf: Buffer containing the QMI message to be decoded
+ * @in_buf_len: Length of the QMI message to be decoded
+ * @dec_level: Decode level to indicate the depth of the nested structure,
+ *             within the main structure, being decoded
+ *
+ * @return: Number of bytes of decoded information, on success
+ *          < 0 on error.
+ */
+static int _qmi_kernel_decode(struct elem_info *ei_array,
+			      void *out_c_struct,
+			      void *in_buf, uint32_t in_buf_len,
+			      int dec_level)
+{
+	struct elem_info *temp_ei = ei_array;
+	uint8_t opt_flag_value = 1;
+	uint32_t data_len_value = 0, data_len_sz = 0;
+	uint8_t *buf_dst = out_c_struct;
+	uint8_t *tlv_pointer;
+	uint32_t tlv_len = 0;
+	uint32_t tlv_type;
+	uint32_t decoded_bytes = 0;
+	void *buf_src = in_buf;
+	int rc;
+
+	QMI_DECODE_LOG_MSG(in_buf, in_buf_len);
+	while (decoded_bytes < in_buf_len) {
+		if (dec_level >= 2 && temp_ei->data_type == QMI_EOTI)
+			return decoded_bytes;
+
+		if (dec_level == 1) {
+			tlv_pointer = buf_src;
+			QMI_ENCDEC_DECODE_TLV(&tlv_type,
+					      &tlv_len, tlv_pointer);
+			QMI_DECODE_LOG_TLV(tlv_type, tlv_len);
+			buf_src += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
+			decoded_bytes += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
+			temp_ei = find_ei(ei_array, tlv_type);
+			if (!temp_ei && (tlv_type < OPTIONAL_TLV_TYPE_START)) {
+				pr_err("%s: Inval element info\n", __func__);
+				return -EINVAL;
+			} else if (!temp_ei) {
+				UPDATE_DECODE_VARIABLES(buf_src,
+						decoded_bytes, tlv_len);
+				continue;
+			}
+		} else {
+			/*
+			 * No length information for elements in nested
+			 * structures. So use remaining decodable buffer space.
+			 */
+			tlv_len = in_buf_len - decoded_bytes;
+		}
+
+		buf_dst = out_c_struct + temp_ei->offset;
+		if (temp_ei->data_type == QMI_OPT_FLAG) {
+			memcpy(buf_dst, &opt_flag_value, sizeof(uint8_t));
+			temp_ei = temp_ei + 1;
+			buf_dst = out_c_struct + temp_ei->offset;
+		}
+
+		if (temp_ei->data_type == QMI_DATA_LEN) {
+			data_len_sz = temp_ei->elem_size == sizeof(uint8_t) ?
+					sizeof(uint8_t) : sizeof(uint16_t);
+			rc = qmi_decode_basic_elem(&data_len_value, buf_src,
+						   1, data_len_sz);
+			memcpy(buf_dst, &data_len_value, sizeof(uint32_t));
+			temp_ei = temp_ei + 1;
+			buf_dst = out_c_struct + temp_ei->offset;
+			tlv_len -= data_len_sz;
+			UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
+		}
+
+		if (temp_ei->is_array == NO_ARRAY) {
+			data_len_value = 1;
+		} else if (temp_ei->is_array == STATIC_ARRAY) {
+			data_len_value = temp_ei->elem_len;
+		} else if (data_len_value > temp_ei->elem_len) {
+			pr_err("%s: Data len %d > max spec %d\n",
+				__func__, data_len_value, temp_ei->elem_len);
+			return -ETOOSMALL;
+		}
+
+		switch (temp_ei->data_type) {
+		case QMI_UNSIGNED_1_BYTE:
+		case QMI_UNSIGNED_2_BYTE:
+		case QMI_UNSIGNED_4_BYTE:
+		case QMI_UNSIGNED_8_BYTE:
+		case QMI_SIGNED_2_BYTE_ENUM:
+		case QMI_SIGNED_4_BYTE_ENUM:
+			rc = qmi_decode_basic_elem(buf_dst, buf_src,
+				data_len_value, temp_ei->elem_size);
+			QMI_DECODE_LOG_ELEM(dec_level, data_len_value,
+				temp_ei->elem_size, buf_dst);
+			UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
+			break;
+
+		case QMI_STRUCT:
+			rc = qmi_decode_struct_elem(temp_ei, buf_dst, buf_src,
+				data_len_value, tlv_len, (dec_level + 1));
+			if (rc < 0)
+				return rc;
+			UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
+			break;
+
+		case QMI_STRING:
+			rc = qmi_decode_string_elem(temp_ei, buf_dst, buf_src,
+						     tlv_len, dec_level);
+			if (rc < 0)
+				return rc;
+			UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
+			break;
+
+		default:
+			pr_err("%s: Unrecognized data type\n", __func__);
+			return -EINVAL;
+		}
+		temp_ei = temp_ei + 1;
+	}
+	return decoded_bytes;
+}
+MODULE_DESCRIPTION("QMI kernel enc/dec");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/lib/qmi_encdec_priv.h	2019-01-22 16:16:28.771294111 +0100
@@ -0,0 +1,66 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QMI_ENCDEC_PRIV_H_
+#define _QMI_ENCDEC_PRIV_H_
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/socket.h>
+#include <linux/gfp.h>
+#include <linux/qmi_encdec.h>
+
+#define QMI_ENCDEC_ENCODE_TLV(type, length, p_dst) do { \
+	*p_dst++ = type; \
+	*p_dst++ = ((uint8_t)((length) & 0xFF)); \
+	*p_dst++ = ((uint8_t)(((length) >> 8) & 0xFF)); \
+} while (0)
+
+#define QMI_ENCDEC_DECODE_TLV(p_type, p_length, p_src) do { \
+	*p_type = (uint8_t)*p_src++; \
+	*p_length = (uint8_t)*p_src++; \
+	*p_length |= ((uint8_t)*p_src) << 8; \
+} while (0)
+
+#define QMI_ENCDEC_ENCODE_N_BYTES(p_dst, p_src, size) \
+do { \
+	memcpy(p_dst, p_src, size); \
+	p_dst = (uint8_t *)p_dst + size; \
+	p_src = (uint8_t *)p_src + size; \
+} while (0)
+
+#define QMI_ENCDEC_DECODE_N_BYTES(p_dst, p_src, size) \
+do { \
+	memcpy(p_dst, p_src, size); \
+	p_dst = (uint8_t *)p_dst + size; \
+	p_src = (uint8_t *)p_src + size; \
+} while (0)
+
+#define UPDATE_ENCODE_VARIABLES(temp_si, buf_dst, \
+				encoded_bytes, tlv_len, encode_tlv, rc) \
+do { \
+	buf_dst = (uint8_t *)buf_dst + rc; \
+	encoded_bytes += rc; \
+	tlv_len += rc; \
+	temp_si = temp_si + 1; \
+	encode_tlv = 1; \
+} while (0)
+
+#define UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc) \
+do { \
+	buf_src = (uint8_t *)buf_src + rc; \
+	decoded_bytes += rc; \
+} while (0)
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/lib/stackdepot.c	2019-01-22 16:16:28.775294148 +0100
@@ -0,0 +1,287 @@
+/*
+ * Generic stack depot for storing stack traces.
+ *
+ * Some debugging tools need to save stack traces of certain events which can
+ * be later presented to the user. For example, KASAN needs to safe alloc and
+ * free stacks for each object, but storing two stack traces per object
+ * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
+ * that).
+ *
+ * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
+ * and free stacks repeat a lot, we save about 100x space.
+ * Stacks are never removed from depot, so we store them contiguously one after
+ * another in a contiguos memory allocation.
+ *
+ * Author: Alexander Potapenko <glider@google.com>
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * Based on code by Dmitry Chernenkov.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#include <linux/gfp.h>
+#include <linux/jhash.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/percpu.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/stacktrace.h>
+#include <linux/stackdepot.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
+
+#define STACK_ALLOC_NULL_PROTECTION_BITS 1
+#define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
+#define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
+#define STACK_ALLOC_ALIGN 4
+#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
+					STACK_ALLOC_ALIGN)
+#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
+		STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
+#define STACK_ALLOC_SLABS_CAP 8192
+#define STACK_ALLOC_MAX_SLABS \
+	(((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
+	 (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
+
+/* The compact structure to store the reference to stacks. */
+union handle_parts {
+	depot_stack_handle_t handle;
+	struct {
+		u32 slabindex : STACK_ALLOC_INDEX_BITS;
+		u32 offset : STACK_ALLOC_OFFSET_BITS;
+		u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
+	};
+};
+
+struct stack_record {
+	struct stack_record *next;	/* Link in the hashtable */
+	u32 hash;			/* Hash in the hastable */
+	u32 size;			/* Number of frames in the stack */
+	union handle_parts handle;
+	unsigned long entries[1];	/* Variable-sized array of entries. */
+};
+
+static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
+
+static int depot_index;
+static int next_slab_inited;
+static size_t depot_offset;
+static DEFINE_SPINLOCK(depot_lock);
+
+static bool init_stack_slab(void **prealloc)
+{
+	if (!*prealloc)
+		return false;
+	/*
+	 * This smp_load_acquire() pairs with smp_store_release() to
+	 * |next_slab_inited| below and in depot_alloc_stack().
+	 */
+	if (smp_load_acquire(&next_slab_inited))
+		return true;
+	if (stack_slabs[depot_index] == NULL) {
+		stack_slabs[depot_index] = *prealloc;
+	} else {
+		stack_slabs[depot_index + 1] = *prealloc;
+		/*
+		 * This smp_store_release pairs with smp_load_acquire() from
+		 * |next_slab_inited| above and in depot_save_stack().
+		 */
+		smp_store_release(&next_slab_inited, 1);
+	}
+	*prealloc = NULL;
+	return true;
+}
+
+/* Allocation of a new stack in raw storage */
+static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
+		u32 hash, void **prealloc, gfp_t alloc_flags)
+{
+	int required_size = offsetof(struct stack_record, entries) +
+		sizeof(unsigned long) * size;
+	struct stack_record *stack;
+
+	required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
+
+	if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
+		if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
+			WARN_ONCE(1, "Stack depot reached limit capacity");
+			return NULL;
+		}
+		depot_index++;
+		depot_offset = 0;
+		/*
+		 * smp_store_release() here pairs with smp_load_acquire() from
+		 * |next_slab_inited| in depot_save_stack() and
+		 * init_stack_slab().
+		 */
+		if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
+			smp_store_release(&next_slab_inited, 0);
+	}
+	init_stack_slab(prealloc);
+	if (stack_slabs[depot_index] == NULL)
+		return NULL;
+
+	stack = stack_slabs[depot_index] + depot_offset;
+
+	stack->hash = hash;
+	stack->size = size;
+	stack->handle.slabindex = depot_index;
+	stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
+	stack->handle.valid = 1;
+	memcpy(stack->entries, entries, size * sizeof(unsigned long));
+	depot_offset += required_size;
+
+	return stack;
+}
+
+#define STACK_HASH_ORDER 18
+#define STACK_HASH_SIZE (1L << STACK_HASH_ORDER)
+#define STACK_HASH_MASK (STACK_HASH_SIZE - 1)
+#define STACK_HASH_SEED 0x9747b28c
+
+static struct stack_record *stack_table[STACK_HASH_SIZE] = {
+	[0 ...	STACK_HASH_SIZE - 1] = NULL
+};
+
+/* Calculate hash for a stack */
+static inline u32 hash_stack(unsigned long *entries, unsigned int size)
+{
+	return jhash2((u32 *)entries,
+			       size * sizeof(unsigned long) / sizeof(u32),
+			       STACK_HASH_SEED);
+}
+
+/* Find a stack that is equal to the one stored in entries in the hash */
+static inline struct stack_record *find_stack(struct stack_record *bucket,
+					     unsigned long *entries, int size,
+					     u32 hash)
+{
+	struct stack_record *found;
+
+	for (found = bucket; found; found = found->next) {
+		if (found->hash == hash &&
+		    found->size == size &&
+		    !memcmp(entries, found->entries,
+			    size * sizeof(unsigned long))) {
+			return found;
+		}
+	}
+	return NULL;
+}
+
+void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace)
+{
+	union handle_parts parts = { .handle = handle };
+	void *slab = stack_slabs[parts.slabindex];
+	size_t offset = parts.offset << STACK_ALLOC_ALIGN;
+	struct stack_record *stack = slab + offset;
+
+	trace->nr_entries = trace->max_entries = stack->size;
+	trace->entries = stack->entries;
+	trace->skip = 0;
+}
+EXPORT_SYMBOL_GPL(depot_fetch_stack);
+
+/**
+ * depot_save_stack - save stack in a stack depot.
+ * @trace - the stacktrace to save.
+ * @alloc_flags - flags for allocating additional memory if required.
+ *
+ * Returns the handle of the stack struct stored in depot.
+ */
+depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
+				    gfp_t alloc_flags)
+{
+	u32 hash;
+	depot_stack_handle_t retval = 0;
+	struct stack_record *found = NULL, **bucket;
+	unsigned long flags;
+	struct page *page = NULL;
+	void *prealloc = NULL;
+
+	if (unlikely(trace->nr_entries == 0))
+		goto fast_exit;
+
+	hash = hash_stack(trace->entries, trace->nr_entries);
+	bucket = &stack_table[hash & STACK_HASH_MASK];
+
+	/*
+	 * Fast path: look the stack trace up without locking.
+	 * The smp_load_acquire() here pairs with smp_store_release() to
+	 * |bucket| below.
+	 */
+	found = find_stack(smp_load_acquire(bucket), trace->entries,
+			   trace->nr_entries, hash);
+	if (found)
+		goto exit;
+
+	/*
+	 * Check if the current or the next stack slab need to be initialized.
+	 * If so, allocate the memory - we won't be able to do that under the
+	 * lock.
+	 *
+	 * The smp_load_acquire() here pairs with smp_store_release() to
+	 * |next_slab_inited| in depot_alloc_stack() and init_stack_slab().
+	 */
+	if (unlikely(!smp_load_acquire(&next_slab_inited))) {
+		/*
+		 * Zero out zone modifiers, as we don't have specific zone
+		 * requirements. Keep the flags related to allocation in atomic
+		 * contexts and I/O.
+		 */
+		alloc_flags &= ~GFP_ZONEMASK;
+		alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
+		alloc_flags |= __GFP_NOWARN;
+		page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
+		if (page)
+			prealloc = page_address(page);
+	}
+
+	spin_lock_irqsave(&depot_lock, flags);
+
+	found = find_stack(*bucket, trace->entries, trace->nr_entries, hash);
+	if (!found) {
+		struct stack_record *new =
+			depot_alloc_stack(trace->entries, trace->nr_entries,
+					  hash, &prealloc, alloc_flags);
+		if (new) {
+			new->next = *bucket;
+			/*
+			 * This smp_store_release() pairs with
+			 * smp_load_acquire() from |bucket| above.
+			 */
+			smp_store_release(bucket, new);
+			found = new;
+		}
+	} else if (prealloc) {
+		/*
+		 * We didn't need to store this stack trace, but let's keep
+		 * the preallocated memory for the future.
+		 */
+		WARN_ON(!init_stack_slab(&prealloc));
+	}
+
+	spin_unlock_irqrestore(&depot_lock, flags);
+exit:
+	if (prealloc) {
+		/* Nobody used this memory, ok to free it. */
+		free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
+	}
+	if (found)
+		retval = found->handle.handle;
+fast_exit:
+	return retval;
+}
+EXPORT_SYMBOL_GPL(depot_save_stack);
diff -Nruw linux-4.4.115-fbx/lib/zstd./Makefile linux-4.4.115-fbx/lib/zstd/Makefile
--- linux-4.4.115-fbx/lib/zstd./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/lib/zstd/Makefile	2019-01-22 16:16:28.783294220 +0100
@@ -0,0 +1,18 @@
+obj-$(CONFIG_ZSTD_COMPRESS) += zstd_compress.o
+obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd_decompress.o
+
+ccflags-y += -O3
+
+# Object files unique to zstd_compress and zstd_decompress
+zstd_compress-y := fse_compress.o huf_compress.o compress.o
+zstd_decompress-y := huf_decompress.o decompress.o
+
+# These object files are shared between the modules.
+# Always add them to zstd_compress.
+# Unless both zstd_compress and zstd_decompress are built in
+# then also add them to zstd_decompress.
+zstd_compress-y += entropy_common.o fse_decompress.o zstd_common.o
+
+ifneq ($(CONFIG_ZSTD_COMPRESS)$(CONFIG_ZSTD_DECOMPRESS),yy)
+	zstd_decompress-y += entropy_common.o fse_decompress.o zstd_common.o
+endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/mm/showmem.c	2019-01-22 16:16:28.827294618 +0100
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/notifier.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+
+ATOMIC_NOTIFIER_HEAD(show_mem_notifier);
+
+int show_mem_notifier_register(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(&show_mem_notifier, nb);
+}
+
+int show_mem_notifier_unregister(struct notifier_block *nb)
+{
+	return  atomic_notifier_chain_unregister(&show_mem_notifier, nb);
+}
+
+void show_mem_call_notifiers(void)
+{
+	atomic_notifier_call_chain(&show_mem_notifier, 0, NULL);
+}
+
+static int show_mem_notifier_get(void *dat, u64 *val)
+{
+	show_mem_call_notifiers();
+	*val = 0;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(show_mem_notifier_debug_ops, show_mem_notifier_get,
+				NULL, "%llu\n");
+
+int show_mem_notifier_debugfs_register(void)
+{
+	debugfs_create_file("show_mem_notifier", 0664, NULL, NULL,
+				&show_mem_notifier_debug_ops);
+
+	return 0;
+}
+late_initcall(show_mem_notifier_debugfs_register);
diff -Nruw linux-4.4.115-fbx/net/ipc_router./ipc_router_core.c linux-4.4.115-fbx/net/ipc_router/ipc_router_core.c
--- linux-4.4.115-fbx/net/ipc_router./ipc_router_core.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/net/ipc_router/ipc_router_core.c	2019-10-29 09:26:25.821224492 +0100
@@ -0,0 +1,4399 @@
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/poll.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/rwsem.h>
+#include <linux/ipc_logging.h>
+#include <linux/uaccess.h>
+#include <linux/ipc_router.h>
+#include <linux/ipc_router_xprt.h>
+#include <linux/kref.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/subsystem_restart.h>
+
+#include <asm/byteorder.h>
+
+#include <soc/qcom/smem_log.h>
+
+#include "ipc_router_private.h"
+#include "ipc_router_security.h"
+
+enum {
+	SMEM_LOG = 1U << 0,
+	RTR_DBG = 1U << 1,
+};
+
+static int msm_ipc_router_debug_mask;
+module_param_named(debug_mask, msm_ipc_router_debug_mask,
+		   int, S_IRUGO | S_IWUSR | S_IWGRP);
+#define MODULE_NAME "ipc_router"
+
+#define IPC_RTR_INFO_PAGES 6
+
+#define IPC_RTR_INFO(log_ctx, x...) do { \
+if (log_ctx) \
+	ipc_log_string(log_ctx, x); \
+if (msm_ipc_router_debug_mask & RTR_DBG) \
+	pr_info("[IPCRTR] "x); \
+} while (0)
+
+#define IPC_ROUTER_LOG_EVENT_TX         0x01
+#define IPC_ROUTER_LOG_EVENT_RX         0x02
+#define IPC_ROUTER_LOG_EVENT_TX_ERR     0x03
+#define IPC_ROUTER_LOG_EVENT_RX_ERR     0x04
+#define IPC_ROUTER_DUMMY_DEST_NODE	0xFFFFFFFF
+
+#define ipc_port_sk(port) ((struct sock *)(port))
+
+static LIST_HEAD(control_ports);
+static DECLARE_RWSEM(control_ports_lock_lha5);
+
+#define LP_HASH_SIZE 32
+static struct list_head local_ports[LP_HASH_SIZE];
+static DECLARE_RWSEM(local_ports_lock_lhc2);
+
+/* Server info is organized as a hash table. The server's service ID is
+ * used to index into the hash table. The instance ID of most of the servers
+ * are 1 or 2. The service IDs are well distributed compared to the instance
+ * IDs and hence choosing service ID to index into this hash table optimizes
+ * the hash table operations like add, lookup, destroy.
+ */
+#define SRV_HASH_SIZE 32
+static struct list_head server_list[SRV_HASH_SIZE];
+static DECLARE_RWSEM(server_list_lock_lha2);
+
+struct msm_ipc_server {
+	struct list_head list;
+	struct kref ref;
+	struct msm_ipc_port_name name;
+	char pdev_name[32];
+	int next_pdev_id;
+	int synced_sec_rule;
+	struct list_head server_port_list;
+};
+
+struct msm_ipc_server_port {
+	struct list_head list;
+	struct platform_device *pdev;
+	struct msm_ipc_port_addr server_addr;
+	struct msm_ipc_router_xprt_info *xprt_info;
+};
+
+struct msm_ipc_resume_tx_port {
+	struct list_head list;
+	uint32_t port_id;
+	uint32_t node_id;
+};
+
+struct ipc_router_conn_info {
+	struct list_head list;
+	uint32_t port_id;
+};
+
+enum {
+	RESET = 0,
+	VALID = 1,
+};
+
+#define RP_HASH_SIZE 32
+struct msm_ipc_router_remote_port {
+	struct list_head list;
+	struct kref ref;
+	struct mutex rport_lock_lhb2;
+	uint32_t node_id;
+	uint32_t port_id;
+	int status;
+	uint32_t tx_quota_cnt;
+	struct list_head resume_tx_port_list;
+	struct list_head conn_info_list;
+	void *sec_rule;
+	struct msm_ipc_server *server;
+};
+
+struct msm_ipc_router_xprt_info {
+	struct list_head list;
+	struct msm_ipc_router_xprt *xprt;
+	uint32_t remote_node_id;
+	uint32_t initialized;
+	struct list_head pkt_list;
+	struct wakeup_source ws;
+	struct mutex rx_lock_lhb2;
+	struct mutex tx_lock_lhb2;
+	uint32_t need_len;
+	uint32_t abort_data_read;
+	struct work_struct read_data;
+	struct workqueue_struct *workqueue;
+	void *log_ctx;
+	struct kref ref;
+	struct completion ref_complete;
+	bool dynamic_ws;
+};
+
+#define RT_HASH_SIZE 4
+struct msm_ipc_routing_table_entry {
+	struct list_head list;
+	struct kref ref;
+	uint32_t node_id;
+	uint32_t neighbor_node_id;
+	struct list_head remote_port_list[RP_HASH_SIZE];
+	struct msm_ipc_router_xprt_info *xprt_info;
+	struct rw_semaphore lock_lha4;
+	unsigned long num_tx_bytes;
+	unsigned long num_rx_bytes;
+};
+
+#define LOG_CTX_NAME_LEN 32
+struct ipc_rtr_log_ctx {
+	struct list_head list;
+	char log_ctx_name[LOG_CTX_NAME_LEN];
+	void *log_ctx;
+};
+
+static struct list_head routing_table[RT_HASH_SIZE];
+static DECLARE_RWSEM(routing_table_lock_lha3);
+static int routing_table_inited;
+
+static void do_read_data(struct work_struct *work);
+
+static LIST_HEAD(xprt_info_list);
+static DECLARE_RWSEM(xprt_info_list_lock_lha5);
+
+static DEFINE_MUTEX(log_ctx_list_lock_lha0);
+static LIST_HEAD(log_ctx_list);
+static DEFINE_MUTEX(ipc_router_init_lock);
+static bool is_ipc_router_inited;
+static int ipc_router_core_init(void);
+#define IPC_ROUTER_INIT_TIMEOUT (10 * HZ)
+
+static uint32_t next_port_id;
+static DEFINE_MUTEX(next_port_id_lock_lhc1);
+static struct workqueue_struct *msm_ipc_router_workqueue;
+
+static void *local_log_ctx;
+static void *ipc_router_get_log_ctx(char *sub_name);
+static int process_resume_tx_msg(union rr_control_msg *msg,
+				 struct rr_packet *pkt);
+static void ipc_router_reset_conn(struct msm_ipc_router_remote_port *rport_ptr);
+static int ipc_router_get_xprt_info_ref(
+		struct msm_ipc_router_xprt_info *xprt_info);
+static void ipc_router_put_xprt_info_ref(
+		struct msm_ipc_router_xprt_info *xprt_info);
+static void ipc_router_release_xprt_info_ref(struct kref *ref);
+
+struct pil_vote_info {
+	void *pil_handle;
+	struct work_struct load_work;
+	struct work_struct unload_work;
+};
+
+#define PIL_SUBSYSTEM_NAME_LEN 32
+static char default_peripheral[PIL_SUBSYSTEM_NAME_LEN];
+
+enum {
+	DOWN,
+	UP,
+};
+
+static bool is_wakeup_source_allowed;
+
+void msm_ipc_router_set_ws_allowed(bool flag)
+{
+	is_wakeup_source_allowed = flag;
+}
+
+static void init_routing_table(void)
+{
+	int i;
+	for (i = 0; i < RT_HASH_SIZE; i++)
+		INIT_LIST_HEAD(&routing_table[i]);
+}
+
+/**
+ * ipc_router_calc_checksum() - compute the checksum for extended HELLO message
+ * @msg:	Reference to the IPC Router HELLO message.
+ *
+ * Return: Computed checksum value, 0 if msg is NULL.
+ */
+static uint32_t ipc_router_calc_checksum(union rr_control_msg *msg)
+{
+	uint32_t checksum = 0;
+	int i, len;
+	uint16_t upper_nb;
+	uint16_t lower_nb;
+	void *hello;
+
+	if (!msg)
+		return checksum;
+	hello = msg;
+	len = sizeof(*msg);
+
+	for (i = 0; i < len/IPCR_WORD_SIZE; i++) {
+		lower_nb = (*((uint32_t *)hello)) & IPC_ROUTER_CHECKSUM_MASK;
+		upper_nb = ((*((uint32_t *)hello)) >> 16) &
+				IPC_ROUTER_CHECKSUM_MASK;
+		checksum = checksum + upper_nb + lower_nb;
+		hello = ((uint32_t *)hello) + 1;
+	}
+	while (checksum > 0xFFFF)
+		checksum = (checksum & IPC_ROUTER_CHECKSUM_MASK) +
+				((checksum >> 16) & IPC_ROUTER_CHECKSUM_MASK);
+
+	checksum = ~checksum & IPC_ROUTER_CHECKSUM_MASK;
+	return checksum;
+}
+
+/**
+ * skb_copy_to_log_buf() - copies the required number bytes from the skb_queue
+ * @skb_head:	skb_queue head that contains the data.
+ * @pl_len:	length of payload need to be copied.
+ * @hdr_offset:	length of the header present in first skb
+ * @log_buf:	The output buffer which will contain the formatted log string
+ *
+ * This function copies the first specified number of bytes from the skb_queue
+ * to a new buffer and formats them to a string for logging.
+ */
+static void skb_copy_to_log_buf(struct sk_buff_head *skb_head,
+				unsigned int pl_len, unsigned int hdr_offset,
+				unsigned char *log_buf)
+{
+	struct sk_buff *temp_skb;
+	unsigned int copied_len = 0, copy_len = 0;
+	int remaining;
+
+	if (!skb_head) {
+		IPC_RTR_ERR("%s: NULL skb_head\n", __func__);
+		return;
+	}
+	temp_skb = skb_peek(skb_head);
+	if (unlikely(!temp_skb || !temp_skb->data)) {
+		IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
+		return;
+	}
+
+	remaining = temp_skb->len - hdr_offset;
+	skb_queue_walk(skb_head, temp_skb) {
+		copy_len = remaining < pl_len ? remaining : pl_len;
+		memcpy(log_buf + copied_len,
+			temp_skb->data + hdr_offset, copy_len);
+		copied_len += copy_len;
+		hdr_offset = 0;
+		if (copied_len == pl_len)
+			break;
+		remaining = pl_len - remaining;
+	}
+	return;
+}
+
+/**
+ * ipc_router_log_msg() - log all data messages exchanged
+ * @log_ctx:	IPC Logging context specfic to each transport
+ * @xchng_type:	Identifies the data to be a receive or send.
+ * @data:	IPC Router data packet or control msg recieved or to be send.
+ * @hdr:	Reference to the router header
+ * @port_ptr:	Local IPC Router port.
+ * @rport_ptr:	Remote IPC Router port
+ *
+ * This function builds the log message that would be passed on to the IPC
+ * logging framework. The data messages that would be passed corresponds to
+ * the information that is exchanged between the IPC Router and it's clients.
+ */
+static void ipc_router_log_msg(void *log_ctx, uint32_t xchng_type,
+			void *data, struct rr_header_v1 *hdr,
+			struct msm_ipc_port *port_ptr,
+			struct msm_ipc_router_remote_port *rport_ptr)
+{
+	struct sk_buff_head *skb_head = NULL;
+	union rr_control_msg *msg = NULL;
+	struct rr_packet *pkt = NULL;
+	uint64_t pl_buf = 0;
+	struct sk_buff *skb;
+	uint32_t buf_len = 8;
+	uint32_t svcId = 0;
+	uint32_t svcIns = 0;
+	unsigned int hdr_offset = 0;
+	uint32_t port_type = 0;
+
+	if (!log_ctx || !hdr || !data)
+		return;
+
+	if (hdr->type == IPC_ROUTER_CTRL_CMD_DATA) {
+		pkt = (struct rr_packet *)data;
+		skb_head = pkt->pkt_fragment_q;
+		skb = skb_peek(skb_head);
+		if (!skb || !skb->data) {
+			IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
+			return;
+		}
+
+		if (skb_queue_len(skb_head) == 1 && skb->len < 8)
+			buf_len = skb->len;
+		if (xchng_type == IPC_ROUTER_LOG_EVENT_TX && hdr->dst_node_id
+				!= IPC_ROUTER_NID_LOCAL) {
+			if (hdr->version == IPC_ROUTER_V1)
+				hdr_offset = sizeof(struct rr_header_v1);
+			else if (hdr->version == IPC_ROUTER_V2)
+				hdr_offset = sizeof(struct rr_header_v2);
+		}
+		skb_copy_to_log_buf(skb_head, buf_len, hdr_offset,
+				    (unsigned char *)&pl_buf);
+
+		if (port_ptr && rport_ptr && (port_ptr->type == CLIENT_PORT)
+				&& (rport_ptr->server != NULL)) {
+			svcId = rport_ptr->server->name.service;
+			svcIns = rport_ptr->server->name.instance;
+			port_type = CLIENT_PORT;
+			port_ptr->last_served_svc_id =
+					rport_ptr->server->name.service;
+		} else if (port_ptr && (port_ptr->type == SERVER_PORT)) {
+			svcId = port_ptr->port_name.service;
+			svcIns = port_ptr->port_name.instance;
+			port_type = SERVER_PORT;
+		}
+		IPC_RTR_INFO(log_ctx,
+			"%s %s %s Len:0x%x T:0x%x CF:0x%x SVC:<0x%x:0x%x> SRC:<0x%x:0x%x> DST:<0x%x:0x%x> DATA: %08x %08x",
+			(xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "" :
+			(xchng_type == IPC_ROUTER_LOG_EVENT_TX ?
+			 current->comm : "")),
+			(port_type == CLIENT_PORT ? "CLI" : "SRV"),
+			(xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
+			(xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" :
+			(xchng_type == IPC_ROUTER_LOG_EVENT_TX_ERR ? "TX_ERR" :
+			(xchng_type == IPC_ROUTER_LOG_EVENT_RX_ERR ? "RX_ERR" :
+			 "UNKNOWN")))),
+			hdr->size, hdr->type, hdr->control_flag,
+			svcId, svcIns, hdr->src_node_id, hdr->src_port_id,
+			hdr->dst_node_id, hdr->dst_port_id,
+			(unsigned int)pl_buf, (unsigned int)(pl_buf>>32));
+
+	} else {
+		msg = (union rr_control_msg *)data;
+		if (msg->cmd == IPC_ROUTER_CTRL_CMD_NEW_SERVER ||
+			msg->cmd == IPC_ROUTER_CTRL_CMD_REMOVE_SERVER)
+			IPC_RTR_INFO(log_ctx,
+			"CTL MSG: %s cmd:0x%x SVC:<0x%x:0x%x> ADDR:<0x%x:0x%x>",
+			(xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
+			(xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" :
+			(xchng_type == IPC_ROUTER_LOG_EVENT_TX_ERR ? "TX_ERR" :
+			(xchng_type == IPC_ROUTER_LOG_EVENT_RX_ERR ? "RX_ERR" :
+			 "UNKNOWN")))),
+			msg->cmd, msg->srv.service, msg->srv.instance,
+			msg->srv.node_id, msg->srv.port_id);
+		else if (msg->cmd == IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT ||
+				msg->cmd == IPC_ROUTER_CTRL_CMD_RESUME_TX)
+			IPC_RTR_INFO(log_ctx,
+			"CTL MSG: %s cmd:0x%x ADDR: <0x%x:0x%x>",
+			(xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
+			(xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" : "ERR")),
+			msg->cmd, msg->cli.node_id, msg->cli.port_id);
+		else if (msg->cmd == IPC_ROUTER_CTRL_CMD_HELLO && hdr)
+			IPC_RTR_INFO(log_ctx, "CTL MSG %s cmd:0x%x ADDR:0x%x",
+			(xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
+			(xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" : "ERR")),
+			msg->cmd, hdr->src_node_id);
+		else
+			IPC_RTR_INFO(log_ctx, "%s UNKNOWN cmd:0x%x",
+			(xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
+			(xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" : "ERR")),
+			msg->cmd);
+	}
+}
+
+/* Must be called with routing_table_lock_lha3 locked. */
+static struct msm_ipc_routing_table_entry *lookup_routing_table(
+	uint32_t node_id)
+{
+	uint32_t key = (node_id % RT_HASH_SIZE);
+	struct msm_ipc_routing_table_entry *rt_entry;
+
+	list_for_each_entry(rt_entry, &routing_table[key], list) {
+		if (rt_entry->node_id == node_id)
+			return rt_entry;
+	}
+	return NULL;
+}
+
+/**
+ * create_routing_table_entry() - Lookup and create a routing table entry
+ * @node_id: Node ID of the routing table entry to be created.
+ * @xprt_info: XPRT through which the node ID is reachable.
+ *
+ * @return: a reference to the routing table entry on success, NULL on failure.
+ */
+static struct msm_ipc_routing_table_entry *create_routing_table_entry(
+	uint32_t node_id, struct msm_ipc_router_xprt_info *xprt_info)
+{
+	int i;
+	struct msm_ipc_routing_table_entry *rt_entry;
+	uint32_t key;
+
+	down_write(&routing_table_lock_lha3);
+	rt_entry = lookup_routing_table(node_id);
+	if (rt_entry)
+		goto out_create_rtentry1;
+
+	rt_entry = kmalloc(sizeof(struct msm_ipc_routing_table_entry),
+			   GFP_KERNEL);
+	if (!rt_entry) {
+		IPC_RTR_ERR("%s: rt_entry allocation failed for %d\n",
+			__func__, node_id);
+		goto out_create_rtentry2;
+	}
+
+	for (i = 0; i < RP_HASH_SIZE; i++)
+		INIT_LIST_HEAD(&rt_entry->remote_port_list[i]);
+	init_rwsem(&rt_entry->lock_lha4);
+	kref_init(&rt_entry->ref);
+	rt_entry->node_id = node_id;
+	rt_entry->xprt_info = xprt_info;
+	if (xprt_info)
+		rt_entry->neighbor_node_id = xprt_info->remote_node_id;
+
+	key = (node_id % RT_HASH_SIZE);
+	list_add_tail(&rt_entry->list, &routing_table[key]);
+out_create_rtentry1:
+	kref_get(&rt_entry->ref);
+out_create_rtentry2:
+	up_write(&routing_table_lock_lha3);
+	return rt_entry;
+}
+
+/**
+ * ipc_router_get_rtentry_ref() - Get a reference to the routing table entry
+ * @node_id: Node ID of the routing table entry.
+ *
+ * @return: a reference to the routing table entry on success, NULL on failure.
+ *
+ * This function is used to obtain a reference to the rounting table entry
+ * corresponding to a node id.
+ */
+static struct msm_ipc_routing_table_entry *ipc_router_get_rtentry_ref(
+	uint32_t node_id)
+{
+	struct msm_ipc_routing_table_entry *rt_entry;
+
+	down_read(&routing_table_lock_lha3);
+	rt_entry = lookup_routing_table(node_id);
+	if (rt_entry)
+		kref_get(&rt_entry->ref);
+	up_read(&routing_table_lock_lha3);
+	return rt_entry;
+}
+
+/**
+ * ipc_router_release_rtentry() - Cleanup and release the routing table entry
+ * @ref: Reference to the entry.
+ *
+ * This function is called when all references to the routing table entry are
+ * released.
+ */
+void ipc_router_release_rtentry(struct kref *ref)
+{
+	struct msm_ipc_routing_table_entry *rt_entry =
+		container_of(ref, struct msm_ipc_routing_table_entry, ref);
+
+	/*
+	 * All references to a routing entry will be put only under SSR.
+	 * As part of SSR, all the internals of the routing table entry
+	 * are cleaned. So just free the routing table entry.
+	 */
+	kfree(rt_entry);
+}
+
+struct rr_packet *rr_read(struct msm_ipc_router_xprt_info *xprt_info)
+{
+	struct rr_packet *temp_pkt;
+
+	if (!xprt_info)
+		return NULL;
+
+	mutex_lock(&xprt_info->rx_lock_lhb2);
+	if (xprt_info->abort_data_read) {
+		mutex_unlock(&xprt_info->rx_lock_lhb2);
+		IPC_RTR_ERR("%s detected SSR & exiting now\n",
+			xprt_info->xprt->name);
+		return NULL;
+	}
+
+	if (list_empty(&xprt_info->pkt_list)) {
+		mutex_unlock(&xprt_info->rx_lock_lhb2);
+		return NULL;
+	}
+
+	temp_pkt = list_first_entry(&xprt_info->pkt_list,
+				    struct rr_packet, list);
+	list_del(&temp_pkt->list);
+	if (list_empty(&xprt_info->pkt_list))
+		__pm_relax(&xprt_info->ws);
+	mutex_unlock(&xprt_info->rx_lock_lhb2);
+	return temp_pkt;
+}
+
+struct rr_packet *clone_pkt(struct rr_packet *pkt)
+{
+	struct rr_packet *cloned_pkt;
+	struct sk_buff *temp_skb, *cloned_skb;
+	struct sk_buff_head *pkt_fragment_q;
+
+	cloned_pkt = kzalloc(sizeof(struct rr_packet), GFP_KERNEL);
+	if (!cloned_pkt) {
+		IPC_RTR_ERR("%s: failure\n", __func__);
+		return NULL;
+	}
+	memcpy(&(cloned_pkt->hdr), &(pkt->hdr), sizeof(struct rr_header_v1));
+	if (pkt->opt_hdr.len > 0) {
+		cloned_pkt->opt_hdr.data = kmalloc(pkt->opt_hdr.len,
+							GFP_KERNEL);
+		if (!cloned_pkt->opt_hdr.data) {
+			IPC_RTR_ERR("%s: Memory allocation Failed\n", __func__);
+		} else {
+			cloned_pkt->opt_hdr.len = pkt->opt_hdr.len;
+			memcpy(cloned_pkt->opt_hdr.data, pkt->opt_hdr.data,
+			       pkt->opt_hdr.len);
+		}
+	}
+
+	pkt_fragment_q = kmalloc(sizeof(struct sk_buff_head), GFP_KERNEL);
+	if (!pkt_fragment_q) {
+		IPC_RTR_ERR("%s: pkt_frag_q alloc failure\n", __func__);
+		kfree(cloned_pkt);
+		return NULL;
+	}
+	skb_queue_head_init(pkt_fragment_q);
+	kref_init(&cloned_pkt->ref);
+
+	skb_queue_walk(pkt->pkt_fragment_q, temp_skb) {
+		cloned_skb = skb_clone(temp_skb, GFP_KERNEL);
+		if (!cloned_skb)
+			goto fail_clone;
+		skb_queue_tail(pkt_fragment_q, cloned_skb);
+	}
+	cloned_pkt->pkt_fragment_q = pkt_fragment_q;
+	cloned_pkt->length = pkt->length;
+	cloned_pkt->ws_need = pkt->ws_need;
+	return cloned_pkt;
+
+fail_clone:
+	while (!skb_queue_empty(pkt_fragment_q)) {
+		temp_skb = skb_dequeue(pkt_fragment_q);
+		kfree_skb(temp_skb);
+	}
+	kfree(pkt_fragment_q);
+	if (cloned_pkt->opt_hdr.len > 0)
+		kfree(cloned_pkt->opt_hdr.data);
+	kfree(cloned_pkt);
+	return NULL;
+}
+
+/**
+ * create_pkt() - Create a Router packet
+ * @data: SKB queue to be contained inside the packet.
+ *
+ * @return: pointer to packet on success, NULL on failure.
+ */
+struct rr_packet *create_pkt(struct sk_buff_head *data)
+{
+	struct rr_packet *pkt;
+	struct sk_buff *temp_skb;
+
+	pkt = kzalloc(sizeof(struct rr_packet), GFP_KERNEL);
+	if (!pkt) {
+		IPC_RTR_ERR("%s: failure\n", __func__);
+		return NULL;
+	}
+
+	if (data) {
+		pkt->pkt_fragment_q = data;
+		skb_queue_walk(pkt->pkt_fragment_q, temp_skb)
+			pkt->length += temp_skb->len;
+	} else {
+		pkt->pkt_fragment_q = kmalloc(sizeof(struct sk_buff_head),
+					      GFP_KERNEL);
+		if (!pkt->pkt_fragment_q) {
+			IPC_RTR_ERR("%s: Couldn't alloc pkt_fragment_q\n",
+				    __func__);
+			kfree(pkt);
+			return NULL;
+		}
+		skb_queue_head_init(pkt->pkt_fragment_q);
+	}
+	kref_init(&pkt->ref);
+	return pkt;
+}
+
+void release_pkt(struct rr_packet *pkt)
+{
+	struct sk_buff *temp_skb;
+
+	if (!pkt)
+		return;
+
+	if (!pkt->pkt_fragment_q) {
+		kfree(pkt);
+		return;
+	}
+
+	while (!skb_queue_empty(pkt->pkt_fragment_q)) {
+		temp_skb = skb_dequeue(pkt->pkt_fragment_q);
+		kfree_skb(temp_skb);
+	}
+	kfree(pkt->pkt_fragment_q);
+	if (pkt->opt_hdr.len > 0)
+		kfree(pkt->opt_hdr.data);
+	kfree(pkt);
+	return;
+}
+
+static struct sk_buff_head *msm_ipc_router_buf_to_skb(void *buf,
+						unsigned int buf_len)
+{
+	struct sk_buff_head *skb_head;
+	struct sk_buff *skb;
+	int first = 1, offset = 0;
+	int skb_size, data_size;
+	void *data;
+	int last = 1;
+	int align_size;
+
+	skb_head = kmalloc(sizeof(struct sk_buff_head), GFP_KERNEL);
+	if (!skb_head) {
+		IPC_RTR_ERR("%s: Couldnot allocate skb_head\n", __func__);
+		return NULL;
+	}
+	skb_queue_head_init(skb_head);
+
+	data_size = buf_len;
+	align_size = ALIGN_SIZE(data_size);
+	while (offset != buf_len) {
+		skb_size = data_size;
+		if (first)
+			skb_size += IPC_ROUTER_HDR_SIZE;
+		if (last)
+			skb_size += align_size;
+
+		skb = alloc_skb(skb_size, GFP_KERNEL);
+		if (!skb) {
+			if (skb_size <= (PAGE_SIZE/2)) {
+				IPC_RTR_ERR("%s: cannot allocate skb\n",
+								__func__);
+				goto buf_to_skb_error;
+			}
+			data_size = data_size / 2;
+			last = 0;
+			continue;
+		}
+
+		if (first) {
+			skb_reserve(skb, IPC_ROUTER_HDR_SIZE);
+			first = 0;
+		}
+
+		data = skb_put(skb, data_size);
+		memcpy(skb->data, buf + offset, data_size);
+		skb_queue_tail(skb_head, skb);
+		offset += data_size;
+		data_size = buf_len - offset;
+		last = 1;
+	}
+	return skb_head;
+
+buf_to_skb_error:
+	while (!skb_queue_empty(skb_head)) {
+		skb = skb_dequeue(skb_head);
+		kfree_skb(skb);
+	}
+	kfree(skb_head);
+	return NULL;
+}
+
+static void *msm_ipc_router_skb_to_buf(struct sk_buff_head *skb_head,
+				       unsigned int len)
+{
+	struct sk_buff *temp;
+	unsigned int offset = 0, buf_len = 0, copy_len;
+	void *buf;
+
+	if (!skb_head) {
+		IPC_RTR_ERR("%s: NULL skb_head\n", __func__);
+		return NULL;
+	}
+
+	temp = skb_peek(skb_head);
+	buf_len = len;
+	buf = kmalloc(buf_len, GFP_KERNEL);
+	if (!buf) {
+		IPC_RTR_ERR("%s: cannot allocate buf\n", __func__);
+		return NULL;
+	}
+	skb_queue_walk(skb_head, temp) {
+		copy_len = buf_len < temp->len ? buf_len : temp->len;
+		memcpy(buf + offset, temp->data, copy_len);
+		offset += copy_len;
+		buf_len -= copy_len;
+	}
+	return buf;
+}
+
+void msm_ipc_router_free_skb(struct sk_buff_head *skb_head)
+{
+	struct sk_buff *temp_skb;
+
+	if (!skb_head)
+		return;
+
+	while (!skb_queue_empty(skb_head)) {
+		temp_skb = skb_dequeue(skb_head);
+		kfree_skb(temp_skb);
+	}
+	kfree(skb_head);
+}
+
+/**
+ * extract_optional_header() - Extract the optional header from skb
+ * @pkt:	Packet structure into which the header has to be extracted.
+ * @opt_len:	The optional header length in word size.
+ *
+ * @return:	Length of optional header in bytes if success, zero otherwise.
+ */
+static int extract_optional_header(struct rr_packet *pkt, uint8_t opt_len)
+{
+	size_t offset = 0, buf_len = 0, copy_len, opt_hdr_len;
+	struct sk_buff *temp;
+	struct sk_buff_head *skb_head;
+
+	opt_hdr_len = opt_len * IPCR_WORD_SIZE;
+	pkt->opt_hdr.data = kmalloc(opt_hdr_len, GFP_KERNEL);
+	if (!pkt->opt_hdr.data) {
+		IPC_RTR_ERR("%s: Memory allocation Failed\n", __func__);
+		return 0;
+	}
+	skb_head = pkt->pkt_fragment_q;
+	buf_len = opt_hdr_len;
+	skb_queue_walk(skb_head, temp) {
+		copy_len = buf_len < temp->len ? buf_len : temp->len;
+		memcpy(pkt->opt_hdr.data + offset, temp->data, copy_len);
+		offset += copy_len;
+		buf_len -= copy_len;
+		skb_pull(temp, copy_len);
+		if (temp->len == 0) {
+			skb_dequeue(skb_head);
+			kfree_skb(temp);
+		}
+	}
+	pkt->opt_hdr.len = opt_hdr_len;
+	return opt_hdr_len;
+}
+
+/**
+ * extract_header_v1() - Extract IPC Router header of version 1
+ * @pkt: Packet structure into which the header has to be extraced.
+ * @skb: SKB from which the header has to be extracted.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ */
+static int extract_header_v1(struct rr_packet *pkt, struct sk_buff *skb)
+{
+	if (!pkt || !skb) {
+		IPC_RTR_ERR("%s: Invalid pkt or skb\n", __func__);
+		return -EINVAL;
+	}
+
+	memcpy(&pkt->hdr, skb->data, sizeof(struct rr_header_v1));
+	skb_pull(skb, sizeof(struct rr_header_v1));
+	pkt->length -= sizeof(struct rr_header_v1);
+	return 0;
+}
+
+/**
+ * extract_header_v2() - Extract IPC Router header of version 2
+ * @pkt: Packet structure into which the header has to be extraced.
+ * @skb: SKB from which the header has to be extracted.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ */
+static int extract_header_v2(struct rr_packet *pkt, struct sk_buff *skb)
+{
+	struct rr_header_v2 *hdr;
+	uint8_t opt_len;
+	size_t opt_hdr_len;
+	size_t total_hdr_size = sizeof(*hdr);
+
+	if (!pkt || !skb) {
+		IPC_RTR_ERR("%s: Invalid pkt or skb\n", __func__);
+		return -EINVAL;
+	}
+
+	hdr = (struct rr_header_v2 *)skb->data;
+	pkt->hdr.version = (uint32_t)hdr->version;
+	pkt->hdr.type = (uint32_t)hdr->type;
+	pkt->hdr.src_node_id = (uint32_t)hdr->src_node_id;
+	pkt->hdr.src_port_id = (uint32_t)hdr->src_port_id;
+	pkt->hdr.size = (uint32_t)hdr->size;
+	pkt->hdr.control_flag = (uint32_t)hdr->control_flag;
+	pkt->hdr.dst_node_id = (uint32_t)hdr->dst_node_id;
+	pkt->hdr.dst_port_id = (uint32_t)hdr->dst_port_id;
+	opt_len = hdr->opt_len;
+	skb_pull(skb, total_hdr_size);
+	if (opt_len > 0) {
+		opt_hdr_len = extract_optional_header(pkt, opt_len);
+		total_hdr_size += opt_hdr_len;
+	}
+	pkt->length -= total_hdr_size;
+	return 0;
+}
+
+/**
+ * extract_header() - Extract IPC Router header
+ * @pkt: Packet from which the header has to be extraced.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ *
+ * This function will check if the header version is v1 or v2 and invoke
+ * the corresponding helper function to extract the IPC Router header.
+ */
+static int extract_header(struct rr_packet *pkt)
+{
+	struct sk_buff *temp_skb;
+	int ret;
+
+	if (!pkt) {
+		IPC_RTR_ERR("%s: NULL PKT\n", __func__);
+		return -EINVAL;
+	}
+
+	temp_skb = skb_peek(pkt->pkt_fragment_q);
+	if (!temp_skb || !temp_skb->data) {
+		IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
+		return -EINVAL;
+	}
+
+	if (temp_skb->data[0] == IPC_ROUTER_V1) {
+		ret = extract_header_v1(pkt, temp_skb);
+	} else if (temp_skb->data[0] == IPC_ROUTER_V2) {
+		ret = extract_header_v2(pkt, temp_skb);
+	} else {
+		IPC_RTR_ERR("%s: Invalid Header version %02x\n",
+			__func__, temp_skb->data[0]);
+		print_hex_dump(KERN_ERR, "Header: ", DUMP_PREFIX_ADDRESS,
+			       16, 1, temp_skb->data, pkt->length, true);
+		return -EINVAL;
+	}
+	return ret;
+}
+
+/**
+ * calc_tx_header_size() - Calculate header size to be reserved in SKB
+ * @pkt: Packet in which the space for header has to be reserved.
+ * @dst_xprt_info: XPRT through which the destination is reachable.
+ *
+ * @return: required header size on success,
+ *          starndard Linux error codes on failure.
+ *
+ * This function is used to calculate the header size that has to be reserved
+ * in a transmit SKB. The header size is calculated based on the XPRT through
+ * which the destination node is reachable.
+ */
+static int calc_tx_header_size(struct rr_packet *pkt,
+			       struct msm_ipc_router_xprt_info *dst_xprt_info)
+{
+	int hdr_size = 0;
+	int xprt_version = 0;
+	struct msm_ipc_router_xprt_info *xprt_info = dst_xprt_info;
+
+	if (!pkt) {
+		IPC_RTR_ERR("%s: NULL PKT\n", __func__);
+		return -EINVAL;
+	}
+
+	if (xprt_info)
+		xprt_version = xprt_info->xprt->get_version(xprt_info->xprt);
+
+	if (xprt_version == IPC_ROUTER_V1) {
+		pkt->hdr.version = IPC_ROUTER_V1;
+		hdr_size = sizeof(struct rr_header_v1);
+	} else if (xprt_version == IPC_ROUTER_V2) {
+		pkt->hdr.version = IPC_ROUTER_V2;
+		hdr_size = sizeof(struct rr_header_v2) + pkt->opt_hdr.len;
+	} else {
+		IPC_RTR_ERR("%s: Invalid xprt_version %d\n",
+			__func__, xprt_version);
+		hdr_size = -EINVAL;
+	}
+
+	return hdr_size;
+}
+
+/**
+ * calc_rx_header_size() - Calculate the RX header size
+ * @xprt_info: XPRT info of the received message.
+ *
+ * @return: valid header size on success, INT_MAX on failure.
+ */
+static int calc_rx_header_size(struct msm_ipc_router_xprt_info *xprt_info)
+{
+	int xprt_version = 0;
+	int hdr_size = INT_MAX;
+
+	if (xprt_info)
+		xprt_version = xprt_info->xprt->get_version(xprt_info->xprt);
+
+	if (xprt_version == IPC_ROUTER_V1)
+		hdr_size = sizeof(struct rr_header_v1);
+	else if (xprt_version == IPC_ROUTER_V2)
+		hdr_size = sizeof(struct rr_header_v2);
+	return hdr_size;
+}
+
+/**
+ * prepend_header_v1() - Prepend IPC Router header of version 1
+ * @pkt: Packet structure which contains the header info to be prepended.
+ * @hdr_size: Size of the header
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ */
+static int prepend_header_v1(struct rr_packet *pkt, int hdr_size)
+{
+	struct sk_buff *temp_skb;
+	struct rr_header_v1 *hdr;
+
+	if (!pkt || hdr_size <= 0) {
+		IPC_RTR_ERR("%s: Invalid input parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	temp_skb = skb_peek(pkt->pkt_fragment_q);
+	if (!temp_skb || !temp_skb->data) {
+		IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
+		return -EINVAL;
+	}
+
+	if (skb_headroom(temp_skb) < hdr_size) {
+		temp_skb = alloc_skb(hdr_size, GFP_KERNEL);
+		if (!temp_skb) {
+			IPC_RTR_ERR("%s: Could not allocate SKB of size %d\n",
+				__func__, hdr_size);
+			return -ENOMEM;
+		}
+		skb_reserve(temp_skb, hdr_size);
+	}
+
+	hdr = (struct rr_header_v1 *)skb_push(temp_skb, hdr_size);
+	memcpy(hdr, &pkt->hdr, hdr_size);
+	if (temp_skb != skb_peek(pkt->pkt_fragment_q))
+		skb_queue_head(pkt->pkt_fragment_q, temp_skb);
+	pkt->length += hdr_size;
+	return 0;
+}
+
+/**
+ * prepend_header_v2() - Prepend IPC Router header of version 2
+ * @pkt: Packet structure which contains the header info to be prepended.
+ * @hdr_size: Size of the header
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ */
+static int prepend_header_v2(struct rr_packet *pkt, int hdr_size)
+{
+	struct sk_buff *temp_skb;
+	struct rr_header_v2 *hdr;
+
+	if (!pkt || hdr_size <= 0) {
+		IPC_RTR_ERR("%s: Invalid input parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	temp_skb = skb_peek(pkt->pkt_fragment_q);
+	if (!temp_skb || !temp_skb->data) {
+		IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
+		return -EINVAL;
+	}
+
+	if (skb_headroom(temp_skb) < hdr_size) {
+		temp_skb = alloc_skb(hdr_size, GFP_KERNEL);
+		if (!temp_skb) {
+			IPC_RTR_ERR("%s: Could not allocate SKB of size %d\n",
+				__func__, hdr_size);
+			return -ENOMEM;
+		}
+		skb_reserve(temp_skb, hdr_size);
+	}
+
+	hdr = (struct rr_header_v2 *)skb_push(temp_skb, hdr_size);
+	hdr->version = (uint8_t)pkt->hdr.version;
+	hdr->type = (uint8_t)pkt->hdr.type;
+	hdr->control_flag = (uint8_t)pkt->hdr.control_flag;
+	hdr->size = (uint32_t)pkt->hdr.size;
+	hdr->src_node_id = (uint16_t)pkt->hdr.src_node_id;
+	hdr->src_port_id = (uint16_t)pkt->hdr.src_port_id;
+	hdr->dst_node_id = (uint16_t)pkt->hdr.dst_node_id;
+	hdr->dst_port_id = (uint16_t)pkt->hdr.dst_port_id;
+	if (pkt->opt_hdr.len > 0) {
+		hdr->opt_len = pkt->opt_hdr.len/IPCR_WORD_SIZE;
+		memcpy(hdr + sizeof(*hdr), pkt->opt_hdr.data, pkt->opt_hdr.len);
+	} else {
+		hdr->opt_len = 0;
+	}
+	if (temp_skb != skb_peek(pkt->pkt_fragment_q))
+		skb_queue_head(pkt->pkt_fragment_q, temp_skb);
+	pkt->length += hdr_size;
+	return 0;
+}
+
+/**
+ * prepend_header() - Prepend IPC Router header
+ * @pkt: Packet structure which contains the header info to be prepended.
+ * @xprt_info: XPRT through which the packet is transmitted.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ *
+ * This function prepends the header to the packet to be transmitted. The
+ * IPC Router header version to be prepended depends on the XPRT through
+ * which the destination is reachable.
+ */
+static int prepend_header(struct rr_packet *pkt,
+			  struct msm_ipc_router_xprt_info *xprt_info)
+{
+	int hdr_size;
+	struct sk_buff *temp_skb;
+
+	if (!pkt) {
+		IPC_RTR_ERR("%s: NULL PKT\n", __func__);
+		return -EINVAL;
+	}
+
+	temp_skb = skb_peek(pkt->pkt_fragment_q);
+	if (!temp_skb || !temp_skb->data) {
+		IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
+		return -EINVAL;
+	}
+
+	hdr_size = calc_tx_header_size(pkt, xprt_info);
+	if (hdr_size <= 0)
+		return hdr_size;
+
+	if (pkt->hdr.version == IPC_ROUTER_V1)
+		return prepend_header_v1(pkt, hdr_size);
+	else if (pkt->hdr.version == IPC_ROUTER_V2)
+		return prepend_header_v2(pkt, hdr_size);
+	else
+		return -EINVAL;
+}
+
+/**
+ * defragment_pkt() - Defragment and linearize the packet
+ * @pkt: Packet to be linearized.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ *
+ * Some packets contain fragments of data over multiple SKBs. If an XPRT
+ * does not supported fragmented writes, linearize multiple SKBs into one
+ * single SKB.
+ */
+static int defragment_pkt(struct rr_packet *pkt)
+{
+	struct sk_buff *dst_skb, *src_skb, *temp_skb;
+	int offset = 0, buf_len = 0, copy_len;
+	void *buf;
+	int align_size;
+
+	if (!pkt || pkt->length <= 0) {
+		IPC_RTR_ERR("%s: Invalid PKT\n", __func__);
+		return -EINVAL;
+	}
+
+	if (skb_queue_len(pkt->pkt_fragment_q) == 1)
+		return 0;
+
+	align_size = ALIGN_SIZE(pkt->length);
+	dst_skb = alloc_skb(pkt->length + align_size, GFP_KERNEL);
+	if (!dst_skb) {
+		IPC_RTR_ERR("%s: could not allocate one skb of size %d\n",
+			__func__, pkt->length);
+		return -ENOMEM;
+	}
+	buf = skb_put(dst_skb, pkt->length);
+	buf_len = pkt->length;
+
+	skb_queue_walk(pkt->pkt_fragment_q, src_skb) {
+		copy_len =  buf_len < src_skb->len ? buf_len : src_skb->len;
+		memcpy(buf + offset, src_skb->data, copy_len);
+		offset += copy_len;
+		buf_len -= copy_len;
+	}
+
+	while (!skb_queue_empty(pkt->pkt_fragment_q)) {
+		temp_skb = skb_dequeue(pkt->pkt_fragment_q);
+		kfree_skb(temp_skb);
+	}
+	skb_queue_tail(pkt->pkt_fragment_q, dst_skb);
+	return 0;
+}
+
+static int post_pkt_to_port(struct msm_ipc_port *port_ptr,
+			    struct rr_packet *pkt, int clone)
+{
+	struct rr_packet *temp_pkt = pkt;
+	void (*notify)(unsigned event, void *oob_data,
+		       size_t oob_data_len, void *priv);
+	void (*data_ready)(struct sock *sk) = NULL;
+	struct sock *sk;
+	uint32_t pkt_type;
+
+	if (unlikely(!port_ptr || !pkt))
+		return -EINVAL;
+
+	if (clone) {
+		temp_pkt = clone_pkt(pkt);
+		if (!temp_pkt) {
+			IPC_RTR_ERR(
+			"%s: Error cloning packet for port %08x:%08x\n",
+				__func__, port_ptr->this_port.node_id,
+				port_ptr->this_port.port_id);
+			return -ENOMEM;
+		}
+	}
+
+	mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
+	if (pkt->ws_need)
+		__pm_stay_awake(port_ptr->port_rx_ws);
+	list_add_tail(&temp_pkt->list, &port_ptr->port_rx_q);
+	wake_up(&port_ptr->port_rx_wait_q);
+	notify = port_ptr->notify;
+	pkt_type = temp_pkt->hdr.type;
+	sk = (struct sock *)port_ptr->endpoint;
+	if (sk) {
+		read_lock(&sk->sk_callback_lock);
+		data_ready = sk->sk_data_ready;
+		read_unlock(&sk->sk_callback_lock);
+	}
+	mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
+	if (notify)
+		notify(pkt_type, NULL, 0, port_ptr->priv);
+	else if (sk && data_ready)
+		data_ready(sk);
+
+	return 0;
+}
+
+/**
+ * ipc_router_peek_pkt_size() - Peek into the packet header to get potential packet size
+ * @data: Starting address of the packet which points to router header.
+ *
+ * @returns: potential packet size on success, < 0 on error.
+ *
+ * This function is used by the underlying transport abstraction layer to
+ * peek into the potential packet size of an incoming packet. This information
+ * is used to perform link layer fragmentation and re-assembly
+ */
+int ipc_router_peek_pkt_size(char *data)
+{
+	int size;
+
+	if (!data) {
+		pr_err("%s: NULL PKT\n", __func__);
+		return -EINVAL;
+	}
+
+	if (data[0] == IPC_ROUTER_V1)
+		size = ((struct rr_header_v1 *)data)->size +
+			sizeof(struct rr_header_v1);
+	else if (data[0] == IPC_ROUTER_V2)
+		size = ((struct rr_header_v2 *)data)->size +
+			((struct rr_header_v2 *)data)->opt_len * IPCR_WORD_SIZE
+			+ sizeof(struct rr_header_v2);
+	else
+		return -EINVAL;
+
+	size += ALIGN_SIZE(size);
+	return size;
+}
+
+static int post_control_ports(struct rr_packet *pkt)
+{
+	struct msm_ipc_port *port_ptr;
+
+	if (!pkt)
+		return -EINVAL;
+
+	down_read(&control_ports_lock_lha5);
+	list_for_each_entry(port_ptr, &control_ports, list)
+		post_pkt_to_port(port_ptr, pkt, 1);
+	up_read(&control_ports_lock_lha5);
+	return 0;
+}
+
+static uint32_t allocate_port_id(void)
+{
+	uint32_t port_id = 0, prev_port_id, key;
+	struct msm_ipc_port *port_ptr;
+
+	mutex_lock(&next_port_id_lock_lhc1);
+	prev_port_id = next_port_id;
+	down_read(&local_ports_lock_lhc2);
+	do {
+		next_port_id++;
+		if ((next_port_id & IPC_ROUTER_ADDRESS) == IPC_ROUTER_ADDRESS)
+			next_port_id = 1;
+
+		key = (next_port_id & (LP_HASH_SIZE - 1));
+		if (list_empty(&local_ports[key])) {
+			port_id = next_port_id;
+			break;
+		}
+		list_for_each_entry(port_ptr, &local_ports[key], list) {
+			if (port_ptr->this_port.port_id == next_port_id) {
+				port_id = next_port_id;
+				break;
+			}
+		}
+		if (!port_id) {
+			port_id = next_port_id;
+			break;
+		}
+		port_id = 0;
+	} while (next_port_id != prev_port_id);
+	up_read(&local_ports_lock_lhc2);
+	mutex_unlock(&next_port_id_lock_lhc1);
+
+	return port_id;
+}
+
+void msm_ipc_router_add_local_port(struct msm_ipc_port *port_ptr)
+{
+	uint32_t key;
+
+	if (!port_ptr)
+		return;
+
+	key = (port_ptr->this_port.port_id & (LP_HASH_SIZE - 1));
+	down_write(&local_ports_lock_lhc2);
+	list_add_tail(&port_ptr->list, &local_ports[key]);
+	up_write(&local_ports_lock_lhc2);
+}
+
+/**
+ * msm_ipc_router_create_raw_port() - Create an IPC Router port
+ * @endpoint: User-space space socket information to be cached.
+ * @notify: Function to notify incoming events on the port.
+ *   @event: Event ID to be handled.
+ *   @oob_data: Any out-of-band data associated with the event.
+ *   @oob_data_len: Size of the out-of-band data, if valid.
+ *   @priv: Private data registered during the port creation.
+ * @priv: Private Data to be passed during the event notification.
+ *
+ * @return: Valid pointer to port on success, NULL on failure.
+ *
+ * This function is used to create an IPC Router port. The port is used for
+ * communication locally or outside the subsystem.
+ */
+struct msm_ipc_port *msm_ipc_router_create_raw_port(void *endpoint,
+	void (*notify)(unsigned event, void *oob_data,
+		       size_t oob_data_len, void *priv),
+	void *priv)
+{
+	struct msm_ipc_port *port_ptr;
+
+	port_ptr = kzalloc(sizeof(struct msm_ipc_port), GFP_KERNEL);
+	if (!port_ptr)
+		return NULL;
+
+	port_ptr->this_port.node_id = IPC_ROUTER_NID_LOCAL;
+	port_ptr->this_port.port_id = allocate_port_id();
+	if (!port_ptr->this_port.port_id) {
+		IPC_RTR_ERR("%s: All port ids are in use\n", __func__);
+		kfree(port_ptr);
+		return NULL;
+	}
+
+	mutex_init(&port_ptr->port_lock_lhc3);
+	INIT_LIST_HEAD(&port_ptr->port_rx_q);
+	mutex_init(&port_ptr->port_rx_q_lock_lhc3);
+	init_waitqueue_head(&port_ptr->port_rx_wait_q);
+	snprintf(port_ptr->rx_ws_name, MAX_WS_NAME_SZ,
+		 "ipc%08x_%d_%s",
+		 port_ptr->this_port.port_id,
+		 task_pid_nr(current),
+		 current->comm);
+	port_ptr->port_rx_ws = wakeup_source_register(port_ptr->rx_ws_name);
+	if (!port_ptr->port_rx_ws) {
+		kfree(port_ptr);
+		return NULL;
+	}
+	init_waitqueue_head(&port_ptr->port_tx_wait_q);
+	kref_init(&port_ptr->ref);
+
+	port_ptr->endpoint = endpoint;
+	port_ptr->notify = notify;
+	port_ptr->priv = priv;
+
+	msm_ipc_router_add_local_port(port_ptr);
+	if (endpoint)
+		sock_hold(ipc_port_sk(endpoint));
+	return port_ptr;
+}
+
+/**
+ * ipc_router_get_port_ref() - Get a reference to the local port
+ * @port_id: Port ID of the local port for which reference is get.
+ *
+ * @return: If port is found, a reference to the port is returned.
+ *          Else NULL is returned.
+ */
+static struct msm_ipc_port *ipc_router_get_port_ref(uint32_t port_id)
+{
+	int key = (port_id & (LP_HASH_SIZE - 1));
+	struct msm_ipc_port *port_ptr;
+
+	down_read(&local_ports_lock_lhc2);
+	list_for_each_entry(port_ptr, &local_ports[key], list) {
+		if (port_ptr->this_port.port_id == port_id) {
+			kref_get(&port_ptr->ref);
+			up_read(&local_ports_lock_lhc2);
+			return port_ptr;
+		}
+	}
+	up_read(&local_ports_lock_lhc2);
+	return NULL;
+}
+
+/**
+ * ipc_router_release_port() - Cleanup and release the port
+ * @ref: Reference to the port.
+ *
+ * This function is called when all references to the port are released.
+ */
+void ipc_router_release_port(struct kref *ref)
+{
+	struct rr_packet *pkt, *temp_pkt;
+	struct msm_ipc_port *port_ptr =
+		container_of(ref, struct msm_ipc_port, ref);
+
+	mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
+	list_for_each_entry_safe(pkt, temp_pkt, &port_ptr->port_rx_q, list) {
+		list_del(&pkt->list);
+		release_pkt(pkt);
+	}
+	mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
+	wakeup_source_unregister(port_ptr->port_rx_ws);
+	if (port_ptr->endpoint)
+		sock_put(ipc_port_sk(port_ptr->endpoint));
+	kfree(port_ptr);
+}
+
+/**
+ * ipc_router_get_rport_ref()- Get reference to the remote port
+ * @node_id: Node ID corresponding to the remote port.
+ * @port_id: Port ID corresponding to the remote port.
+ *
+ * @return: a reference to the remote port on success, NULL on failure.
+ */
+static struct msm_ipc_router_remote_port *ipc_router_get_rport_ref(
+		uint32_t node_id, uint32_t port_id)
+{
+	struct msm_ipc_router_remote_port *rport_ptr;
+	struct msm_ipc_routing_table_entry *rt_entry;
+	int key = (port_id & (RP_HASH_SIZE - 1));
+
+	rt_entry = ipc_router_get_rtentry_ref(node_id);
+	if (!rt_entry) {
+		IPC_RTR_ERR("%s: Node is not up\n", __func__);
+		return NULL;
+	}
+
+	down_read(&rt_entry->lock_lha4);
+	list_for_each_entry(rport_ptr,
+			    &rt_entry->remote_port_list[key], list) {
+		if (rport_ptr->port_id == port_id) {
+			kref_get(&rport_ptr->ref);
+			goto out_lookup_rmt_port1;
+		}
+	}
+	rport_ptr = NULL;
+out_lookup_rmt_port1:
+	up_read(&rt_entry->lock_lha4);
+	kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+	return rport_ptr;
+}
+
+/**
+ * ipc_router_create_rport() - Create a remote port
+ * @node_id: Node ID corresponding to the remote port.
+ * @port_id: Port ID corresponding to the remote port.
+ * @xprt_info: XPRT through which the concerned node is reachable.
+ *
+ * @return: a reference to the remote port on success, NULL on failure.
+ */
+static struct msm_ipc_router_remote_port *ipc_router_create_rport(
+				uint32_t node_id, uint32_t port_id,
+				struct msm_ipc_router_xprt_info *xprt_info)
+{
+	struct msm_ipc_router_remote_port *rport_ptr;
+	struct msm_ipc_routing_table_entry *rt_entry;
+	int key = (port_id & (RP_HASH_SIZE - 1));
+
+	rt_entry = create_routing_table_entry(node_id, xprt_info);
+	if (!rt_entry) {
+		IPC_RTR_ERR("%s: Node cannot be created\n", __func__);
+		return NULL;
+	}
+
+	down_write(&rt_entry->lock_lha4);
+	list_for_each_entry(rport_ptr,
+			    &rt_entry->remote_port_list[key], list) {
+		if (rport_ptr->port_id == port_id)
+			goto out_create_rmt_port1;
+	}
+
+	rport_ptr = kmalloc(sizeof(struct msm_ipc_router_remote_port),
+			    GFP_KERNEL);
+	if (!rport_ptr) {
+		IPC_RTR_ERR("%s: Remote port alloc failed\n", __func__);
+		goto out_create_rmt_port2;
+	}
+	rport_ptr->port_id = port_id;
+	rport_ptr->node_id = node_id;
+	rport_ptr->status = VALID;
+	rport_ptr->sec_rule = NULL;
+	rport_ptr->server = NULL;
+	rport_ptr->tx_quota_cnt = 0;
+	kref_init(&rport_ptr->ref);
+	mutex_init(&rport_ptr->rport_lock_lhb2);
+	INIT_LIST_HEAD(&rport_ptr->resume_tx_port_list);
+	INIT_LIST_HEAD(&rport_ptr->conn_info_list);
+	list_add_tail(&rport_ptr->list,
+		      &rt_entry->remote_port_list[key]);
+out_create_rmt_port1:
+	kref_get(&rport_ptr->ref);
+out_create_rmt_port2:
+	up_write(&rt_entry->lock_lha4);
+	kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+	return rport_ptr;
+}
+
+/**
+ * msm_ipc_router_free_resume_tx_port() - Free the resume_tx ports
+ * @rport_ptr: Pointer to the remote port.
+ *
+ * This function deletes all the resume_tx ports associated with a remote port
+ * and frees the memory allocated to each resume_tx port.
+ *
+ * Must be called with rport_ptr->rport_lock_lhb2 locked.
+ */
+static void msm_ipc_router_free_resume_tx_port(
+	struct msm_ipc_router_remote_port *rport_ptr)
+{
+	struct msm_ipc_resume_tx_port *rtx_port, *tmp_rtx_port;
+
+	list_for_each_entry_safe(rtx_port, tmp_rtx_port,
+			&rport_ptr->resume_tx_port_list, list) {
+		list_del(&rtx_port->list);
+		kfree(rtx_port);
+	}
+}
+
+/**
+ * msm_ipc_router_lookup_resume_tx_port() - Lookup resume_tx port list
+ * @rport_ptr: Remote port whose resume_tx port list needs to be looked.
+ * @port_id: Port ID which needs to be looked from the list.
+ *
+ * return 1 if the port_id is found in the list, else 0.
+ *
+ * This function is used to lookup the existence of a local port in
+ * remote port's resume_tx list. This function is used to ensure that
+ * the same port is not added to the remote_port's resume_tx list repeatedly.
+ *
+ * Must be called with rport_ptr->rport_lock_lhb2 locked.
+ */
+static int msm_ipc_router_lookup_resume_tx_port(
+	struct msm_ipc_router_remote_port *rport_ptr, uint32_t port_id)
+{
+	struct msm_ipc_resume_tx_port *rtx_port;
+
+	list_for_each_entry(rtx_port, &rport_ptr->resume_tx_port_list, list) {
+		if (port_id == rtx_port->port_id)
+			return 1;
+	}
+	return 0;
+}
+
+/**
+ * ipc_router_dummy_write_space() - Dummy write space available callback
+ * @sk:	Socket pointer for which the callback is called.
+ */
+void ipc_router_dummy_write_space(struct sock *sk)
+{
+}
+
+/**
+ * post_resume_tx() - Post the resume_tx event
+ * @rport_ptr: Pointer to the remote port
+ * @pkt : The data packet that is received on a resume_tx event
+ * @msg: Out of band data to be passed to kernel drivers
+ *
+ * This function informs about the reception of the resume_tx message from a
+ * remote port pointed by rport_ptr to all the local ports that are in the
+ * resume_tx_ports_list of this remote port. On posting the information, this
+ * function sequentially deletes each entry in the resume_tx_port_list of the
+ * remote port.
+ *
+ * Must be called with rport_ptr->rport_lock_lhb2 locked.
+ */
+static void post_resume_tx(struct msm_ipc_router_remote_port *rport_ptr,
+			   struct rr_packet *pkt, union rr_control_msg *msg)
+{
+	struct msm_ipc_resume_tx_port *rtx_port, *tmp_rtx_port;
+	struct msm_ipc_port *local_port;
+	struct sock *sk;
+	void (*write_space)(struct sock *sk) = NULL;
+
+	list_for_each_entry_safe(rtx_port, tmp_rtx_port,
+				&rport_ptr->resume_tx_port_list, list) {
+		local_port = ipc_router_get_port_ref(rtx_port->port_id);
+		if (local_port && local_port->notify) {
+			wake_up(&local_port->port_tx_wait_q);
+			local_port->notify(IPC_ROUTER_CTRL_CMD_RESUME_TX, msg,
+					   sizeof(*msg), local_port->priv);
+		} else if (local_port) {
+			wake_up(&local_port->port_tx_wait_q);
+			sk = ipc_port_sk(local_port->endpoint);
+			if (sk) {
+				read_lock(&sk->sk_callback_lock);
+				write_space = sk->sk_write_space;
+				read_unlock(&sk->sk_callback_lock);
+			}
+			if (write_space &&
+			    write_space != ipc_router_dummy_write_space)
+				write_space(sk);
+			else
+				post_pkt_to_port(local_port, pkt, 1);
+		} else {
+			IPC_RTR_ERR("%s: Local Port %d not Found",
+				__func__, rtx_port->port_id);
+		}
+		if (local_port)
+			kref_put(&local_port->ref, ipc_router_release_port);
+		list_del(&rtx_port->list);
+		kfree(rtx_port);
+	}
+}
+
+/**
+ * signal_rport_exit() - Signal the local ports of remote port exit
+ * @rport_ptr: Remote port that is exiting.
+ *
+ * This function is used to signal the local ports that are waiting
+ * to resume transmission to a remote port that is exiting.
+ */
+static void signal_rport_exit(struct msm_ipc_router_remote_port *rport_ptr)
+{
+	struct msm_ipc_resume_tx_port *rtx_port, *tmp_rtx_port;
+	struct msm_ipc_port *local_port;
+
+	mutex_lock(&rport_ptr->rport_lock_lhb2);
+	rport_ptr->status = RESET;
+	list_for_each_entry_safe(rtx_port, tmp_rtx_port,
+				 &rport_ptr->resume_tx_port_list, list) {
+		local_port = ipc_router_get_port_ref(rtx_port->port_id);
+		if (local_port) {
+			wake_up(&local_port->port_tx_wait_q);
+			kref_put(&local_port->ref, ipc_router_release_port);
+		}
+		list_del(&rtx_port->list);
+		kfree(rtx_port);
+	}
+	mutex_unlock(&rport_ptr->rport_lock_lhb2);
+}
+
+/**
+ * ipc_router_release_rport() - Cleanup and release the remote port
+ * @ref: Reference to the remote port.
+ *
+ * This function is called when all references to the remote port are released.
+ */
+static void ipc_router_release_rport(struct kref *ref)
+{
+	struct msm_ipc_router_remote_port *rport_ptr =
+		container_of(ref, struct msm_ipc_router_remote_port, ref);
+
+	mutex_lock(&rport_ptr->rport_lock_lhb2);
+	msm_ipc_router_free_resume_tx_port(rport_ptr);
+	mutex_unlock(&rport_ptr->rport_lock_lhb2);
+	kfree(rport_ptr);
+}
+
+/**
+ * ipc_router_destroy_rport() - Destroy the remote port
+ * @rport_ptr: Pointer to the remote port to be destroyed.
+ */
+static void ipc_router_destroy_rport(
+	struct msm_ipc_router_remote_port *rport_ptr)
+{
+	uint32_t node_id;
+	struct msm_ipc_routing_table_entry *rt_entry;
+
+	if (!rport_ptr)
+		return;
+
+	node_id = rport_ptr->node_id;
+	rt_entry = ipc_router_get_rtentry_ref(node_id);
+	if (!rt_entry) {
+		IPC_RTR_ERR("%s: Node %d is not up\n", __func__, node_id);
+		return;
+	}
+	down_write(&rt_entry->lock_lha4);
+	list_del(&rport_ptr->list);
+	up_write(&rt_entry->lock_lha4);
+	signal_rport_exit(rport_ptr);
+	kref_put(&rport_ptr->ref, ipc_router_release_rport);
+	kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+	return;
+}
+
+/**
+ * msm_ipc_router_lookup_server() - Lookup server information
+ * @service: Service ID of the server info to be looked up.
+ * @instance: Instance ID of the server info to be looked up.
+ * @node_id: Node/Processor ID in which the server is hosted.
+ * @port_id: Port ID within the node in which the server is hosted.
+ *
+ * @return: If found Pointer to server structure, else NULL.
+ *
+ * Note1: Lock the server_list_lock_lha2 before accessing this function.
+ * Note2: If the <node_id:port_id> are <0:0>, then the lookup is restricted
+ *        to <service:instance>. Used only when a client wants to send a
+ *        message to any QMI server.
+ */
+static struct msm_ipc_server *msm_ipc_router_lookup_server(
+				uint32_t service,
+				uint32_t instance,
+				uint32_t node_id,
+				uint32_t port_id)
+{
+	struct msm_ipc_server *server;
+	struct msm_ipc_server_port *server_port;
+	int key = (service & (SRV_HASH_SIZE - 1));
+
+	list_for_each_entry(server, &server_list[key], list) {
+		if ((server->name.service != service) ||
+		    (server->name.instance != instance))
+			continue;
+		if ((node_id == 0) && (port_id == 0))
+			return server;
+		list_for_each_entry(server_port, &server->server_port_list,
+				    list) {
+			if ((server_port->server_addr.node_id == node_id) &&
+			    (server_port->server_addr.port_id == port_id))
+				return server;
+		}
+	}
+	return NULL;
+}
+
+/**
+ * ipc_router_get_server_ref() - Get reference to the server
+ * @svc: Service ID for which the reference is required.
+ * @ins: Instance ID for which the reference is required.
+ * @node_id: Node/Processor ID in which the server is hosted.
+ * @port_id: Port ID within the node in which the server is hosted.
+ *
+ * @return: If found return reference to server, else NULL.
+ */
+static struct msm_ipc_server *ipc_router_get_server_ref(
+	uint32_t svc, uint32_t ins, uint32_t node_id, uint32_t port_id)
+{
+	struct msm_ipc_server *server;
+
+	down_read(&server_list_lock_lha2);
+	server = msm_ipc_router_lookup_server(svc, ins, node_id, port_id);
+	if (server)
+		kref_get(&server->ref);
+	up_read(&server_list_lock_lha2);
+	return server;
+}
+
+/**
+ * ipc_router_release_server() - Cleanup and release the server
+ * @ref: Reference to the server.
+ *
+ * This function is called when all references to the server are released.
+ */
+static void ipc_router_release_server(struct kref *ref)
+{
+	struct msm_ipc_server *server =
+		container_of(ref, struct msm_ipc_server, ref);
+
+	kfree(server);
+}
+
+/**
+ * msm_ipc_router_create_server() - Add server info to hash table
+ * @service: Service ID of the server info to be created.
+ * @instance: Instance ID of the server info to be created.
+ * @node_id: Node/Processor ID in which the server is hosted.
+ * @port_id: Port ID within the node in which the server is hosted.
+ * @xprt_info: XPRT through which the node hosting the server is reached.
+ *
+ * @return: Pointer to server structure on success, else NULL.
+ *
+ * This function adds the server info to the hash table. If the same
+ * server(i.e. <service_id:instance_id>) is hosted in different nodes,
+ * they are maintained as list of "server_port" under "server" structure.
+ */
+static struct msm_ipc_server *msm_ipc_router_create_server(
+					uint32_t service,
+					uint32_t instance,
+					uint32_t node_id,
+					uint32_t port_id,
+		struct msm_ipc_router_xprt_info *xprt_info)
+{
+	struct msm_ipc_server *server = NULL;
+	struct msm_ipc_server_port *server_port;
+	struct platform_device *pdev;
+	int key = (service & (SRV_HASH_SIZE - 1));
+
+	down_write(&server_list_lock_lha2);
+	server = msm_ipc_router_lookup_server(service, instance, 0, 0);
+	if (server) {
+		list_for_each_entry(server_port, &server->server_port_list,
+				    list) {
+			if ((server_port->server_addr.node_id == node_id) &&
+			    (server_port->server_addr.port_id == port_id))
+				goto return_server;
+		}
+		goto create_srv_port;
+	}
+
+	server = kzalloc(sizeof(struct msm_ipc_server), GFP_KERNEL);
+	if (!server) {
+		up_write(&server_list_lock_lha2);
+		IPC_RTR_ERR("%s: Server allocation failed\n", __func__);
+		return NULL;
+	}
+	server->name.service = service;
+	server->name.instance = instance;
+	server->synced_sec_rule = 0;
+	INIT_LIST_HEAD(&server->server_port_list);
+	kref_init(&server->ref);
+	list_add_tail(&server->list, &server_list[key]);
+	scnprintf(server->pdev_name, sizeof(server->pdev_name),
+		  "SVC%08x:%08x", service, instance);
+	server->next_pdev_id = 1;
+
+create_srv_port:
+	server_port = kzalloc(sizeof(struct msm_ipc_server_port), GFP_KERNEL);
+	pdev = platform_device_alloc(server->pdev_name, server->next_pdev_id);
+	if (!server_port || !pdev) {
+		kfree(server_port);
+		if (pdev)
+			platform_device_put(pdev);
+		if (list_empty(&server->server_port_list)) {
+			list_del(&server->list);
+			kfree(server);
+		}
+		up_write(&server_list_lock_lha2);
+		IPC_RTR_ERR("%s: Server Port allocation failed\n", __func__);
+		return NULL;
+	}
+	server_port->pdev = pdev;
+	server_port->server_addr.node_id = node_id;
+	server_port->server_addr.port_id = port_id;
+	server_port->xprt_info = xprt_info;
+	list_add_tail(&server_port->list, &server->server_port_list);
+	server->next_pdev_id++;
+	platform_device_add(server_port->pdev);
+
+return_server:
+	/* Add a reference so that the caller can put it back */
+	kref_get(&server->ref);
+	up_write(&server_list_lock_lha2);
+	return server;
+}
+
+/**
+ * ipc_router_destroy_server_nolock() - Remove server info from hash table
+ * @server: Server info to be removed.
+ * @node_id: Node/Processor ID in which the server is hosted.
+ * @port_id: Port ID within the node in which the server is hosted.
+ *
+ * This function removes the server_port identified using <node_id:port_id>
+ * from the server structure. If the server_port list under server structure
+ * is empty after removal, then remove the server structure from the server
+ * hash table. This function must be called with server_list_lock_lha2 locked.
+ */
+static void ipc_router_destroy_server_nolock(struct msm_ipc_server *server,
+					  uint32_t node_id, uint32_t port_id)
+{
+	struct msm_ipc_server_port *server_port;
+	bool server_port_found = false;
+
+	if (!server)
+		return;
+
+	list_for_each_entry(server_port, &server->server_port_list, list) {
+		if ((server_port->server_addr.node_id == node_id) &&
+		    (server_port->server_addr.port_id == port_id)) {
+			server_port_found = true;
+			break;
+		}
+	}
+	if (server_port_found && server_port) {
+		platform_device_unregister(server_port->pdev);
+		list_del(&server_port->list);
+		kfree(server_port);
+	}
+	if (list_empty(&server->server_port_list)) {
+		list_del(&server->list);
+		kref_put(&server->ref, ipc_router_release_server);
+	}
+	return;
+}
+
+/**
+ * ipc_router_destroy_server() - Remove server info from hash table
+ * @server: Server info to be removed.
+ * @node_id: Node/Processor ID in which the server is hosted.
+ * @port_id: Port ID within the node in which the server is hosted.
+ *
+ * This function removes the server_port identified using <node_id:port_id>
+ * from the server structure. If the server_port list under server structure
+ * is empty after removal, then remove the server structure from the server
+ * hash table.
+ */
+static void ipc_router_destroy_server(struct msm_ipc_server *server,
+				      uint32_t node_id, uint32_t port_id)
+{
+	down_write(&server_list_lock_lha2);
+	ipc_router_destroy_server_nolock(server, node_id, port_id);
+	up_write(&server_list_lock_lha2);
+	return;
+}
+
+static int ipc_router_send_ctl_msg(
+		struct msm_ipc_router_xprt_info *xprt_info,
+		union rr_control_msg *msg,
+		uint32_t dst_node_id)
+{
+	struct rr_packet *pkt;
+	struct sk_buff *ipc_rtr_pkt;
+	struct rr_header_v1 *hdr;
+	int pkt_size;
+	void *data;
+	int ret = -EINVAL;
+
+	pkt = create_pkt(NULL);
+	if (!pkt) {
+		IPC_RTR_ERR("%s: pkt alloc failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	pkt_size = IPC_ROUTER_HDR_SIZE + sizeof(*msg);
+	ipc_rtr_pkt = alloc_skb(pkt_size, GFP_KERNEL);
+	if (!ipc_rtr_pkt) {
+		IPC_RTR_ERR("%s: ipc_rtr_pkt alloc failed\n", __func__);
+		release_pkt(pkt);
+		return -ENOMEM;
+	}
+
+	skb_reserve(ipc_rtr_pkt, IPC_ROUTER_HDR_SIZE);
+	data = skb_put(ipc_rtr_pkt, sizeof(*msg));
+	memcpy(data, msg, sizeof(*msg));
+	skb_queue_tail(pkt->pkt_fragment_q, ipc_rtr_pkt);
+	pkt->length = sizeof(*msg);
+
+	hdr = &(pkt->hdr);
+	hdr->version = IPC_ROUTER_V1;
+	hdr->type = msg->cmd;
+	hdr->src_node_id = IPC_ROUTER_NID_LOCAL;
+	hdr->src_port_id = IPC_ROUTER_ADDRESS;
+	hdr->control_flag = 0;
+	hdr->size = sizeof(*msg);
+	if (hdr->type == IPC_ROUTER_CTRL_CMD_RESUME_TX ||
+	    (!xprt_info && dst_node_id == IPC_ROUTER_NID_LOCAL))
+		hdr->dst_node_id = dst_node_id;
+	else if (xprt_info)
+		hdr->dst_node_id = xprt_info->remote_node_id;
+	hdr->dst_port_id = IPC_ROUTER_ADDRESS;
+
+	if (dst_node_id == IPC_ROUTER_NID_LOCAL &&
+	    msg->cmd != IPC_ROUTER_CTRL_CMD_RESUME_TX) {
+		ipc_router_log_msg(local_log_ctx,
+				IPC_ROUTER_LOG_EVENT_TX, msg, hdr, NULL, NULL);
+		ret = post_control_ports(pkt);
+	} else if (dst_node_id == IPC_ROUTER_NID_LOCAL &&
+		   msg->cmd == IPC_ROUTER_CTRL_CMD_RESUME_TX) {
+		ipc_router_log_msg(local_log_ctx,
+				IPC_ROUTER_LOG_EVENT_TX, msg, hdr, NULL, NULL);
+		ret = process_resume_tx_msg(msg, pkt);
+	} else if (xprt_info && (msg->cmd == IPC_ROUTER_CTRL_CMD_HELLO ||
+		   xprt_info->initialized)) {
+		mutex_lock(&xprt_info->tx_lock_lhb2);
+		ipc_router_log_msg(xprt_info->log_ctx,
+				IPC_ROUTER_LOG_EVENT_TX, msg, hdr, NULL, NULL);
+		ret = prepend_header(pkt, xprt_info);
+		if (ret < 0) {
+			mutex_unlock(&xprt_info->tx_lock_lhb2);
+			IPC_RTR_ERR("%s: Prepend Header failed\n", __func__);
+			release_pkt(pkt);
+			return ret;
+		}
+
+		ret = xprt_info->xprt->write(pkt, pkt->length, xprt_info->xprt);
+		mutex_unlock(&xprt_info->tx_lock_lhb2);
+	}
+
+	release_pkt(pkt);
+	return ret;
+}
+
+static int msm_ipc_router_send_server_list(uint32_t node_id,
+		struct msm_ipc_router_xprt_info *xprt_info)
+{
+	union rr_control_msg ctl;
+	struct msm_ipc_server *server;
+	struct msm_ipc_server_port *server_port;
+	int i;
+
+	if (!xprt_info || !xprt_info->initialized) {
+		IPC_RTR_ERR("%s: Xprt info not initialized\n", __func__);
+		return -EINVAL;
+	}
+
+	memset(&ctl, 0, sizeof(ctl));
+	ctl.cmd = IPC_ROUTER_CTRL_CMD_NEW_SERVER;
+
+	for (i = 0; i < SRV_HASH_SIZE; i++) {
+		list_for_each_entry(server, &server_list[i], list) {
+			ctl.srv.service = server->name.service;
+			ctl.srv.instance = server->name.instance;
+			list_for_each_entry(server_port,
+					    &server->server_port_list, list) {
+				if (server_port->server_addr.node_id !=
+				    node_id)
+					continue;
+
+				ctl.srv.node_id =
+					server_port->server_addr.node_id;
+				ctl.srv.port_id =
+					server_port->server_addr.port_id;
+				ipc_router_send_ctl_msg(xprt_info,
+					&ctl, IPC_ROUTER_DUMMY_DEST_NODE);
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int broadcast_ctl_msg_locally(union rr_control_msg *msg)
+{
+	return ipc_router_send_ctl_msg(NULL, msg, IPC_ROUTER_NID_LOCAL);
+}
+
+static int broadcast_ctl_msg(union rr_control_msg *ctl)
+{
+	struct msm_ipc_router_xprt_info *xprt_info;
+
+	down_read(&xprt_info_list_lock_lha5);
+	list_for_each_entry(xprt_info, &xprt_info_list, list) {
+		ipc_router_send_ctl_msg(xprt_info, ctl,
+					IPC_ROUTER_DUMMY_DEST_NODE);
+	}
+	up_read(&xprt_info_list_lock_lha5);
+	broadcast_ctl_msg_locally(ctl);
+
+	return 0;
+}
+
+static int relay_ctl_msg(struct msm_ipc_router_xprt_info *xprt_info,
+			 union rr_control_msg *ctl)
+{
+	struct msm_ipc_router_xprt_info *fwd_xprt_info;
+
+	if (!xprt_info || !ctl)
+		return -EINVAL;
+
+	down_read(&xprt_info_list_lock_lha5);
+	list_for_each_entry(fwd_xprt_info, &xprt_info_list, list) {
+		if (xprt_info->xprt->link_id != fwd_xprt_info->xprt->link_id)
+			ipc_router_send_ctl_msg(fwd_xprt_info, ctl,
+						IPC_ROUTER_DUMMY_DEST_NODE);
+	}
+	up_read(&xprt_info_list_lock_lha5);
+
+	return 0;
+}
+
+static int forward_msg(struct msm_ipc_router_xprt_info *xprt_info,
+		       struct rr_packet *pkt)
+{
+	struct rr_header_v1 *hdr;
+	struct msm_ipc_router_xprt_info *fwd_xprt_info;
+	struct msm_ipc_routing_table_entry *rt_entry;
+	int ret = 0;
+	int fwd_xprt_option;
+
+	if (!xprt_info || !pkt)
+		return -EINVAL;
+
+	hdr = &(pkt->hdr);
+	rt_entry = ipc_router_get_rtentry_ref(hdr->dst_node_id);
+	if (!(rt_entry) || !(rt_entry->xprt_info)) {
+		IPC_RTR_ERR("%s: Routing table not initialized\n", __func__);
+		ret = -ENODEV;
+		goto fm_error1;
+	}
+
+	down_read(&rt_entry->lock_lha4);
+	fwd_xprt_info = rt_entry->xprt_info;
+	ret = ipc_router_get_xprt_info_ref(fwd_xprt_info);
+	if (ret < 0) {
+		IPC_RTR_ERR("%s: Abort invalid xprt\n", __func__);
+		goto fm_error_xprt;
+	}
+	ret = prepend_header(pkt, fwd_xprt_info);
+	if (ret < 0) {
+		IPC_RTR_ERR("%s: Prepend Header failed\n", __func__);
+		goto fm_error2;
+	}
+	fwd_xprt_option = fwd_xprt_info->xprt->get_option(fwd_xprt_info->xprt);
+	if (!(fwd_xprt_option & FRAG_PKT_WRITE_ENABLE)) {
+		ret = defragment_pkt(pkt);
+		if (ret < 0)
+			goto fm_error2;
+	}
+
+	mutex_lock(&fwd_xprt_info->tx_lock_lhb2);
+	if (xprt_info->remote_node_id == fwd_xprt_info->remote_node_id) {
+		IPC_RTR_ERR("%s: Discarding Command to route back\n", __func__);
+		ret = -EINVAL;
+		goto fm_error3;
+	}
+
+	if (xprt_info->xprt->link_id == fwd_xprt_info->xprt->link_id) {
+		IPC_RTR_ERR("%s: DST in the same cluster\n", __func__);
+		ret = 0;
+		goto fm_error3;
+	}
+	fwd_xprt_info->xprt->write(pkt, pkt->length, fwd_xprt_info->xprt);
+	IPC_RTR_INFO(fwd_xprt_info->log_ctx,
+		"%s %s Len:0x%x T:0x%x CF:0x%x SRC:<0x%x:0x%x> DST:<0x%x:0x%x>\n",
+		"FWD", "TX", hdr->size, hdr->type, hdr->control_flag,
+		hdr->src_node_id, hdr->src_port_id,
+		hdr->dst_node_id, hdr->dst_port_id);
+
+fm_error3:
+	mutex_unlock(&fwd_xprt_info->tx_lock_lhb2);
+fm_error2:
+	ipc_router_put_xprt_info_ref(fwd_xprt_info);
+fm_error_xprt:
+	up_read(&rt_entry->lock_lha4);
+fm_error1:
+	if (rt_entry)
+		kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+	return ret;
+}
+
+static int msm_ipc_router_send_remove_client(struct comm_mode_info *mode_info,
+					uint32_t node_id, uint32_t port_id)
+{
+	union rr_control_msg msg;
+	struct msm_ipc_router_xprt_info *tmp_xprt_info;
+	int mode;
+	void *xprt_info;
+	int rc = 0;
+
+	if (!mode_info) {
+		IPC_RTR_ERR("%s: NULL mode_info\n", __func__);
+		return -EINVAL;
+	}
+	mode = mode_info->mode;
+	xprt_info = mode_info->xprt_info;
+
+	memset(&msg, 0, sizeof(msg));
+	msg.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT;
+	msg.cli.node_id = node_id;
+	msg.cli.port_id = port_id;
+
+	if ((mode == SINGLE_LINK_MODE) && xprt_info) {
+		down_read(&xprt_info_list_lock_lha5);
+		list_for_each_entry(tmp_xprt_info, &xprt_info_list, list) {
+			if (tmp_xprt_info != xprt_info)
+				continue;
+			ipc_router_send_ctl_msg(tmp_xprt_info, &msg,
+						IPC_ROUTER_DUMMY_DEST_NODE);
+			break;
+		}
+		up_read(&xprt_info_list_lock_lha5);
+	} else if ((mode == SINGLE_LINK_MODE) && !xprt_info) {
+		broadcast_ctl_msg_locally(&msg);
+	} else if (mode == MULTI_LINK_MODE) {
+		broadcast_ctl_msg(&msg);
+	} else if (mode != NULL_MODE) {
+		IPC_RTR_ERR(
+		"%s: Invalid mode(%d) + xprt_inf(%p) for %08x:%08x\n",
+			__func__, mode, xprt_info, node_id, port_id);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+static void update_comm_mode_info(struct comm_mode_info *mode_info,
+				  struct msm_ipc_router_xprt_info *xprt_info)
+{
+	if (!mode_info) {
+		IPC_RTR_ERR("%s: NULL mode_info\n", __func__);
+		return;
+	}
+
+	if (mode_info->mode == NULL_MODE) {
+		mode_info->xprt_info = xprt_info;
+		mode_info->mode = SINGLE_LINK_MODE;
+	} else if (mode_info->mode == SINGLE_LINK_MODE &&
+		   mode_info->xprt_info != xprt_info) {
+		mode_info->mode = MULTI_LINK_MODE;
+	}
+
+	return;
+}
+
+/**
+ * cleanup_rmt_server() - Cleanup server hosted in the remote port
+ * @xprt_info: XPRT through which this cleanup event is handled.
+ * @rport_ptr: Remote port that is being cleaned up.
+ * @server: Server that is hosted in the remote port.
+ */
+static void cleanup_rmt_server(struct msm_ipc_router_xprt_info *xprt_info,
+			       struct msm_ipc_router_remote_port *rport_ptr,
+			       struct msm_ipc_server *server)
+{
+	union rr_control_msg ctl;
+
+	memset(&ctl, 0, sizeof(ctl));
+	ctl.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER;
+	ctl.srv.service = server->name.service;
+	ctl.srv.instance = server->name.instance;
+	ctl.srv.node_id = rport_ptr->node_id;
+	ctl.srv.port_id = rport_ptr->port_id;
+	if (xprt_info)
+		relay_ctl_msg(xprt_info, &ctl);
+	broadcast_ctl_msg_locally(&ctl);
+	ipc_router_destroy_server_nolock(server,
+			rport_ptr->node_id, rport_ptr->port_id);
+}
+
+static void cleanup_rmt_ports(struct msm_ipc_router_xprt_info *xprt_info,
+			      struct msm_ipc_routing_table_entry *rt_entry)
+{
+	struct msm_ipc_router_remote_port *rport_ptr, *tmp_rport_ptr;
+	struct msm_ipc_server *server;
+	union rr_control_msg ctl;
+	int j;
+
+	memset(&ctl, 0, sizeof(ctl));
+	for (j = 0; j < RP_HASH_SIZE; j++) {
+		list_for_each_entry_safe(rport_ptr, tmp_rport_ptr,
+				&rt_entry->remote_port_list[j], list) {
+			list_del(&rport_ptr->list);
+			mutex_lock(&rport_ptr->rport_lock_lhb2);
+			server = rport_ptr->server;
+			rport_ptr->server = NULL;
+			mutex_unlock(&rport_ptr->rport_lock_lhb2);
+			ipc_router_reset_conn(rport_ptr);
+			if (server) {
+				cleanup_rmt_server(xprt_info, rport_ptr,
+						   server);
+				server = NULL;
+			}
+
+			ctl.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT;
+			ctl.cli.node_id = rport_ptr->node_id;
+			ctl.cli.port_id = rport_ptr->port_id;
+			kref_put(&rport_ptr->ref, ipc_router_release_rport);
+
+			relay_ctl_msg(xprt_info, &ctl);
+			broadcast_ctl_msg_locally(&ctl);
+		}
+	}
+}
+
+static void msm_ipc_cleanup_routing_table(
+	struct msm_ipc_router_xprt_info *xprt_info)
+{
+	int i;
+	struct msm_ipc_routing_table_entry *rt_entry, *tmp_rt_entry;
+
+	if (!xprt_info) {
+		IPC_RTR_ERR("%s: Invalid xprt_info\n", __func__);
+		return;
+	}
+
+	down_write(&server_list_lock_lha2);
+	down_write(&routing_table_lock_lha3);
+	for (i = 0; i < RT_HASH_SIZE; i++) {
+		list_for_each_entry_safe(rt_entry, tmp_rt_entry,
+					 &routing_table[i], list) {
+			down_write(&rt_entry->lock_lha4);
+			if (rt_entry->xprt_info != xprt_info) {
+				up_write(&rt_entry->lock_lha4);
+				continue;
+			}
+			cleanup_rmt_ports(xprt_info, rt_entry);
+			rt_entry->xprt_info = NULL;
+			up_write(&rt_entry->lock_lha4);
+			list_del(&rt_entry->list);
+			kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+		}
+	}
+	up_write(&routing_table_lock_lha3);
+	up_write(&server_list_lock_lha2);
+}
+
+/**
+ * sync_sec_rule() - Synchrnoize the security rule into the server structure
+ * @server: Server structure where the rule has to be synchronized.
+ * @rule: Security tule to be synchronized.
+ *
+ * This function is used to update the server structure with the security
+ * rule configured for the <service:instance> corresponding to that server.
+ */
+static void sync_sec_rule(struct msm_ipc_server *server, void *rule)
+{
+	struct msm_ipc_server_port *server_port;
+	struct msm_ipc_router_remote_port *rport_ptr = NULL;
+
+	list_for_each_entry(server_port, &server->server_port_list, list) {
+		rport_ptr = ipc_router_get_rport_ref(
+				server_port->server_addr.node_id,
+				server_port->server_addr.port_id);
+		if (!rport_ptr)
+			continue;
+		rport_ptr->sec_rule = rule;
+		kref_put(&rport_ptr->ref, ipc_router_release_rport);
+	}
+	server->synced_sec_rule = 1;
+}
+
+/**
+ * msm_ipc_sync_sec_rule() - Sync the security rule to the service
+ * @service: Service for which the rule has to be synchronized.
+ * @instance: Instance for which the rule has to be synchronized.
+ * @rule: Security rule to be synchronized.
+ *
+ * This function is used to syncrhonize the security rule with the server
+ * hash table, if the user-space script configures the rule after the service
+ * has come up. This function is used to synchronize the security rule to a
+ * specific service and optionally a specific instance.
+ */
+void msm_ipc_sync_sec_rule(uint32_t service, uint32_t instance, void *rule)
+{
+	int key = (service & (SRV_HASH_SIZE - 1));
+	struct msm_ipc_server *server;
+
+	down_write(&server_list_lock_lha2);
+	list_for_each_entry(server, &server_list[key], list) {
+		if (server->name.service != service)
+			continue;
+
+		if (server->name.instance != instance &&
+		    instance != ALL_INSTANCE)
+			continue;
+
+		/* If the rule applies to all instances and if the specific
+		 * instance of a service has a rule synchronized already,
+		 * do not apply the rule for that specific instance.
+		 */
+		if (instance == ALL_INSTANCE && server->synced_sec_rule)
+			continue;
+
+		sync_sec_rule(server, rule);
+	}
+	up_write(&server_list_lock_lha2);
+}
+
+/**
+ * msm_ipc_sync_default_sec_rule() - Default security rule to all services
+ * @rule: Security rule to be synchronized.
+ *
+ * This function is used to syncrhonize the security rule with the server
+ * hash table, if the user-space script configures the rule after the service
+ * has come up. This function is used to synchronize the security rule that
+ * applies to all services, if the concerned service do not have any rule
+ * defined.
+ */
+void msm_ipc_sync_default_sec_rule(void *rule)
+{
+	int key;
+	struct msm_ipc_server *server;
+
+	down_write(&server_list_lock_lha2);
+	for (key = 0; key < SRV_HASH_SIZE; key++) {
+		list_for_each_entry(server, &server_list[key], list) {
+			if (server->synced_sec_rule)
+				continue;
+
+			sync_sec_rule(server, rule);
+		}
+	}
+	up_write(&server_list_lock_lha2);
+}
+
+/**
+ * ipc_router_reset_conn() - Reset the connection to remote port
+ * @rport_ptr: Pointer to the remote port to be disconnected.
+ *
+ * This function is used to reset all the local ports that are connected to
+ * the remote port being passed.
+ */
+static void ipc_router_reset_conn(struct msm_ipc_router_remote_port *rport_ptr)
+{
+	struct msm_ipc_port *port_ptr;
+	struct ipc_router_conn_info *conn_info, *tmp_conn_info;
+
+	mutex_lock(&rport_ptr->rport_lock_lhb2);
+	list_for_each_entry_safe(conn_info, tmp_conn_info,
+				&rport_ptr->conn_info_list, list) {
+		port_ptr = ipc_router_get_port_ref(conn_info->port_id);
+		if (port_ptr) {
+			mutex_lock(&port_ptr->port_lock_lhc3);
+			port_ptr->conn_status = CONNECTION_RESET;
+			mutex_unlock(&port_ptr->port_lock_lhc3);
+			wake_up(&port_ptr->port_rx_wait_q);
+			kref_put(&port_ptr->ref, ipc_router_release_port);
+		}
+
+		list_del(&conn_info->list);
+		kfree(conn_info);
+	}
+	mutex_unlock(&rport_ptr->rport_lock_lhb2);
+}
+
+/**
+ * ipc_router_set_conn() - Set the connection by initializing dest address
+ * @port_ptr: Local port in which the connection has to be set.
+ * @addr: Destination address of the connection.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ */
+int ipc_router_set_conn(struct msm_ipc_port *port_ptr,
+			struct msm_ipc_addr *addr)
+{
+	struct msm_ipc_router_remote_port *rport_ptr;
+	struct ipc_router_conn_info *conn_info;
+
+	if (unlikely(!port_ptr || !addr))
+		return -EINVAL;
+
+	if (addr->addrtype != MSM_IPC_ADDR_ID) {
+		IPC_RTR_ERR("%s: Invalid Address type\n", __func__);
+		return -EINVAL;
+	}
+
+	if (port_ptr->type == SERVER_PORT) {
+		IPC_RTR_ERR("%s: Connection refused on a server port\n",
+			    __func__);
+		return -ECONNREFUSED;
+	}
+
+	if (port_ptr->conn_status == CONNECTED) {
+		IPC_RTR_ERR("%s: Port %08x already connected\n",
+			    __func__, port_ptr->this_port.port_id);
+		return -EISCONN;
+	}
+
+	conn_info = kzalloc(sizeof(struct ipc_router_conn_info), GFP_KERNEL);
+	if (!conn_info) {
+		IPC_RTR_ERR("%s: Error allocating conn_info\n", __func__);
+		return -ENOMEM;
+	}
+	INIT_LIST_HEAD(&conn_info->list);
+	conn_info->port_id = port_ptr->this_port.port_id;
+
+	rport_ptr = ipc_router_get_rport_ref(addr->addr.port_addr.node_id,
+					     addr->addr.port_addr.port_id);
+	if (!rport_ptr) {
+		IPC_RTR_ERR("%s: Invalid remote endpoint\n", __func__);
+		kfree(conn_info);
+		return -ENODEV;
+	}
+	mutex_lock(&rport_ptr->rport_lock_lhb2);
+	list_add_tail(&conn_info->list, &rport_ptr->conn_info_list);
+	mutex_unlock(&rport_ptr->rport_lock_lhb2);
+
+	mutex_lock(&port_ptr->port_lock_lhc3);
+	memcpy(&port_ptr->dest_addr, &addr->addr.port_addr,
+	       sizeof(struct msm_ipc_port_addr));
+	port_ptr->conn_status = CONNECTED;
+	mutex_unlock(&port_ptr->port_lock_lhc3);
+	kref_put(&rport_ptr->ref, ipc_router_release_rport);
+	return 0;
+}
+
+/**
+ * do_version_negotiation() - perform a version negotiation and set the version
+ * @xprt_info:	Pointer to the IPC Router transport info structure.
+ * @msg:	Pointer to the IPC Router HELLO message.
+ *
+ * This function performs the version negotiation by verifying the computed
+ * checksum first. If the checksum matches with the magic number, it sets the
+ * negotiated IPC Router version in transport.
+ */
+static void do_version_negotiation(struct msm_ipc_router_xprt_info *xprt_info,
+				   union rr_control_msg *msg)
+{
+	uint32_t magic;
+	unsigned version;
+
+	if (!xprt_info)
+		return;
+	magic = ipc_router_calc_checksum(msg);
+	if (magic == IPC_ROUTER_HELLO_MAGIC) {
+		version = fls(msg->hello.versions & IPC_ROUTER_VER_BITMASK) - 1;
+		/*Bit 0 & 31 are reserved for future usage*/
+		if ((version > 0) &&
+		    (version != (sizeof(version) * BITS_PER_BYTE - 1)) &&
+			xprt_info->xprt->set_version)
+			xprt_info->xprt->set_version(xprt_info->xprt, version);
+	}
+}
+
+static int process_hello_msg(struct msm_ipc_router_xprt_info *xprt_info,
+				union rr_control_msg *msg,
+				struct rr_header_v1 *hdr)
+{
+	int i, rc = 0;
+	union rr_control_msg ctl;
+	struct msm_ipc_routing_table_entry *rt_entry;
+
+	if (!hdr)
+		return -EINVAL;
+
+	xprt_info->remote_node_id = hdr->src_node_id;
+	rt_entry = create_routing_table_entry(hdr->src_node_id, xprt_info);
+	if (!rt_entry) {
+		IPC_RTR_ERR("%s: rt_entry allocation failed\n", __func__);
+		return -ENOMEM;
+	}
+	kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+
+	do_version_negotiation(xprt_info, msg);
+	/* Send a reply HELLO message */
+	memset(&ctl, 0, sizeof(ctl));
+	ctl.hello.cmd = IPC_ROUTER_CTRL_CMD_HELLO;
+	ctl.hello.checksum = IPC_ROUTER_HELLO_MAGIC;
+	ctl.hello.versions = (uint32_t)IPC_ROUTER_VER_BITMASK;
+	ctl.hello.checksum = ipc_router_calc_checksum(&ctl);
+	rc = ipc_router_send_ctl_msg(xprt_info, &ctl,
+				     IPC_ROUTER_DUMMY_DEST_NODE);
+	if (rc < 0) {
+		IPC_RTR_ERR("%s: Error sending reply HELLO message\n",
+								__func__);
+		return rc;
+	}
+	xprt_info->initialized = 1;
+
+	/* Send list of servers from the local node and from nodes
+	 * outside the mesh network in which this XPRT is part of.
+	 */
+	down_read(&server_list_lock_lha2);
+	down_read(&routing_table_lock_lha3);
+	for (i = 0; i < RT_HASH_SIZE; i++) {
+		list_for_each_entry(rt_entry, &routing_table[i], list) {
+			if ((rt_entry->node_id != IPC_ROUTER_NID_LOCAL) &&
+			    (!rt_entry->xprt_info ||
+			     (rt_entry->xprt_info->xprt->link_id ==
+			      xprt_info->xprt->link_id)))
+				continue;
+			rc = msm_ipc_router_send_server_list(rt_entry->node_id,
+							     xprt_info);
+			if (rc < 0) {
+				up_read(&routing_table_lock_lha3);
+				up_read(&server_list_lock_lha2);
+				return rc;
+			}
+		}
+	}
+	up_read(&routing_table_lock_lha3);
+	up_read(&server_list_lock_lha2);
+	return rc;
+}
+
+static int process_resume_tx_msg(union rr_control_msg *msg,
+				 struct rr_packet *pkt)
+{
+	struct msm_ipc_router_remote_port *rport_ptr;
+
+
+	rport_ptr = ipc_router_get_rport_ref(msg->cli.node_id,
+					     msg->cli.port_id);
+	if (!rport_ptr) {
+		IPC_RTR_ERR("%s: Unable to resume client\n", __func__);
+		return -ENODEV;
+	}
+	mutex_lock(&rport_ptr->rport_lock_lhb2);
+	rport_ptr->tx_quota_cnt = 0;
+	post_resume_tx(rport_ptr, pkt, msg);
+	mutex_unlock(&rport_ptr->rport_lock_lhb2);
+	kref_put(&rport_ptr->ref, ipc_router_release_rport);
+	return 0;
+}
+
+static int process_new_server_msg(struct msm_ipc_router_xprt_info *xprt_info,
+			union rr_control_msg *msg, struct rr_packet *pkt)
+{
+	struct msm_ipc_routing_table_entry *rt_entry;
+	struct msm_ipc_server *server;
+	struct msm_ipc_router_remote_port *rport_ptr;
+
+	if (msg->srv.instance == 0) {
+		IPC_RTR_ERR("%s: Server %08x create rejected, version = 0\n",
+			__func__, msg->srv.service);
+		return -EINVAL;
+	}
+
+	rt_entry = ipc_router_get_rtentry_ref(msg->srv.node_id);
+	if (!rt_entry) {
+		rt_entry = create_routing_table_entry(msg->srv.node_id,
+						      xprt_info);
+		if (!rt_entry) {
+			IPC_RTR_ERR("%s: rt_entry allocation failed\n",
+								__func__);
+			return -ENOMEM;
+		}
+	}
+	kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+
+	/* If the service already exists in the table, create_server returns
+	 * a reference to it.
+	 */
+	rport_ptr = ipc_router_create_rport(msg->srv.node_id,
+				msg->srv.port_id, xprt_info);
+	if (!rport_ptr)
+		return -ENOMEM;
+
+	server = msm_ipc_router_create_server(
+			msg->srv.service, msg->srv.instance,
+			msg->srv.node_id, msg->srv.port_id, xprt_info);
+	if (!server) {
+		IPC_RTR_ERR("%s: Server %08x:%08x Create failed\n",
+			    __func__, msg->srv.service, msg->srv.instance);
+		kref_put(&rport_ptr->ref, ipc_router_release_rport);
+		ipc_router_destroy_rport(rport_ptr);
+		return -ENOMEM;
+	}
+	mutex_lock(&rport_ptr->rport_lock_lhb2);
+	rport_ptr->server = server;
+	mutex_unlock(&rport_ptr->rport_lock_lhb2);
+	rport_ptr->sec_rule = msm_ipc_get_security_rule(
+					msg->srv.service, msg->srv.instance);
+	kref_put(&rport_ptr->ref, ipc_router_release_rport);
+	kref_put(&server->ref, ipc_router_release_server);
+
+	/* Relay the new server message to other subsystems that do not belong
+	 * to the cluster from which this message is received. Notify the
+	 * local clients waiting for this service.
+	 */
+	relay_ctl_msg(xprt_info, msg);
+	post_control_ports(pkt);
+	return 0;
+}
+
+static int process_rmv_server_msg(struct msm_ipc_router_xprt_info *xprt_info,
+			union rr_control_msg *msg, struct rr_packet *pkt)
+{
+	struct msm_ipc_server *server;
+	struct msm_ipc_router_remote_port *rport_ptr;
+
+	server = ipc_router_get_server_ref(msg->srv.service, msg->srv.instance,
+					   msg->srv.node_id, msg->srv.port_id);
+	rport_ptr = ipc_router_get_rport_ref(msg->srv.node_id,
+					     msg->srv.port_id);
+	if (rport_ptr) {
+		mutex_lock(&rport_ptr->rport_lock_lhb2);
+		if (rport_ptr->server == server)
+			rport_ptr->server = NULL;
+		mutex_unlock(&rport_ptr->rport_lock_lhb2);
+		kref_put(&rport_ptr->ref, ipc_router_release_rport);
+	}
+
+	if (server) {
+		kref_put(&server->ref, ipc_router_release_server);
+		ipc_router_destroy_server(server, msg->srv.node_id,
+					  msg->srv.port_id);
+		/*
+		 * Relay the new server message to other subsystems that do not
+		 * belong to the cluster from which this message is received.
+		 * Notify the local clients communicating with the service.
+		 */
+		relay_ctl_msg(xprt_info, msg);
+		post_control_ports(pkt);
+	}
+	return 0;
+}
+
+static int process_rmv_client_msg(struct msm_ipc_router_xprt_info *xprt_info,
+			union rr_control_msg *msg, struct rr_packet *pkt)
+{
+	struct msm_ipc_router_remote_port *rport_ptr;
+	struct msm_ipc_server *server;
+
+	rport_ptr = ipc_router_get_rport_ref(msg->cli.node_id,
+					     msg->cli.port_id);
+	if (rport_ptr) {
+		mutex_lock(&rport_ptr->rport_lock_lhb2);
+		server = rport_ptr->server;
+		rport_ptr->server = NULL;
+		mutex_unlock(&rport_ptr->rport_lock_lhb2);
+		ipc_router_reset_conn(rport_ptr);
+		down_write(&server_list_lock_lha2);
+		if (server)
+			cleanup_rmt_server(NULL, rport_ptr, server);
+		up_write(&server_list_lock_lha2);
+		kref_put(&rport_ptr->ref, ipc_router_release_rport);
+		ipc_router_destroy_rport(rport_ptr);
+	}
+
+	relay_ctl_msg(xprt_info, msg);
+	post_control_ports(pkt);
+	return 0;
+}
+
+static int process_control_msg(struct msm_ipc_router_xprt_info *xprt_info,
+			       struct rr_packet *pkt)
+{
+	union rr_control_msg *msg;
+	int rc = 0;
+	struct rr_header_v1 *hdr;
+
+	if (pkt->length != sizeof(*msg)) {
+		IPC_RTR_ERR("%s: r2r msg size %d != %zu\n",
+				__func__, pkt->length, sizeof(*msg));
+		return -EINVAL;
+	}
+
+	hdr = &(pkt->hdr);
+	msg = msm_ipc_router_skb_to_buf(pkt->pkt_fragment_q, sizeof(*msg));
+	if (!msg) {
+		IPC_RTR_ERR("%s: Error extracting control msg\n", __func__);
+		return -ENOMEM;
+	}
+
+	ipc_router_log_msg(xprt_info->log_ctx, IPC_ROUTER_LOG_EVENT_RX,
+					msg, hdr, NULL, NULL);
+
+	switch (msg->cmd) {
+	case IPC_ROUTER_CTRL_CMD_HELLO:
+		rc = process_hello_msg(xprt_info, msg, hdr);
+		break;
+	case IPC_ROUTER_CTRL_CMD_RESUME_TX:
+		rc = process_resume_tx_msg(msg, pkt);
+		break;
+	case IPC_ROUTER_CTRL_CMD_NEW_SERVER:
+		rc = process_new_server_msg(xprt_info, msg, pkt);
+		break;
+	case IPC_ROUTER_CTRL_CMD_REMOVE_SERVER:
+		rc = process_rmv_server_msg(xprt_info, msg, pkt);
+		break;
+	case IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT:
+		rc = process_rmv_client_msg(xprt_info, msg, pkt);
+		break;
+	default:
+		rc = -ENOSYS;
+	}
+	kfree(msg);
+	return rc;
+}
+
+static void do_read_data(struct work_struct *work)
+{
+	struct rr_header_v1 *hdr;
+	struct rr_packet *pkt = NULL;
+	struct msm_ipc_port *port_ptr;
+	struct msm_ipc_router_remote_port *rport_ptr;
+	int ret;
+
+	struct msm_ipc_router_xprt_info *xprt_info =
+		container_of(work,
+			     struct msm_ipc_router_xprt_info,
+			     read_data);
+
+	while ((pkt = rr_read(xprt_info)) != NULL) {
+		if (pkt->length < calc_rx_header_size(xprt_info) ||
+		    pkt->length > MAX_IPC_PKT_SIZE) {
+			IPC_RTR_ERR("%s: Invalid pkt length %d\n",
+				__func__, pkt->length);
+			goto read_next_pkt1;
+		}
+
+		ret = extract_header(pkt);
+		if (ret < 0)
+			goto read_next_pkt1;
+		hdr = &(pkt->hdr);
+
+		if ((hdr->dst_node_id != IPC_ROUTER_NID_LOCAL) &&
+		    ((hdr->type == IPC_ROUTER_CTRL_CMD_RESUME_TX) ||
+		     (hdr->type == IPC_ROUTER_CTRL_CMD_DATA))) {
+			IPC_RTR_INFO(xprt_info->log_ctx,
+			"%s %s Len:0x%x T:0x%x CF:0x%x SRC:<0x%x:0x%x> DST:<0x%x:0x%x>\n",
+			"FWD", "RX", hdr->size, hdr->type, hdr->control_flag,
+			hdr->src_node_id, hdr->src_port_id,
+			hdr->dst_node_id, hdr->dst_port_id);
+			forward_msg(xprt_info, pkt);
+			goto read_next_pkt1;
+		}
+
+		if (hdr->type != IPC_ROUTER_CTRL_CMD_DATA) {
+			process_control_msg(xprt_info, pkt);
+			goto read_next_pkt1;
+		}
+
+		if (msm_ipc_router_debug_mask & SMEM_LOG) {
+			smem_log_event((SMEM_LOG_PROC_ID_APPS |
+				SMEM_LOG_IPC_ROUTER_EVENT_BASE |
+				IPC_ROUTER_LOG_EVENT_RX),
+				(hdr->src_node_id << 24) |
+				(hdr->src_port_id & 0xffffff),
+				(hdr->dst_node_id << 24) |
+				(hdr->dst_port_id & 0xffffff),
+				(hdr->type << 24) | (hdr->control_flag << 16) |
+				(hdr->size & 0xffff));
+		}
+
+		port_ptr = ipc_router_get_port_ref(hdr->dst_port_id);
+		if (!port_ptr) {
+			IPC_RTR_ERR("%s: No local port id %08x\n", __func__,
+				hdr->dst_port_id);
+			goto read_next_pkt1;
+		}
+
+		rport_ptr = ipc_router_get_rport_ref(hdr->src_node_id,
+						     hdr->src_port_id);
+		if (!rport_ptr) {
+			rport_ptr = ipc_router_create_rport(hdr->src_node_id,
+						hdr->src_port_id, xprt_info);
+			if (!rport_ptr) {
+				IPC_RTR_ERR(
+				"%s: Rmt Prt %08x:%08x create failed\n",
+				__func__, hdr->src_node_id, hdr->src_port_id);
+				goto read_next_pkt2;
+			}
+		}
+
+		ipc_router_log_msg(xprt_info->log_ctx, IPC_ROUTER_LOG_EVENT_RX,
+				pkt, hdr, port_ptr, rport_ptr);
+		kref_put(&rport_ptr->ref, ipc_router_release_rport);
+		post_pkt_to_port(port_ptr, pkt, 0);
+		kref_put(&port_ptr->ref, ipc_router_release_port);
+		continue;
+read_next_pkt2:
+		kref_put(&port_ptr->ref, ipc_router_release_port);
+read_next_pkt1:
+		release_pkt(pkt);
+	}
+}
+
+int msm_ipc_router_register_server(struct msm_ipc_port *port_ptr,
+				   struct msm_ipc_addr *name)
+{
+	struct msm_ipc_server *server;
+	union rr_control_msg ctl;
+	struct msm_ipc_router_remote_port *rport_ptr;
+
+	if (!port_ptr || !name)
+		return -EINVAL;
+
+	if (port_ptr->type != CLIENT_PORT)
+		return -EINVAL;
+
+	if (name->addrtype != MSM_IPC_ADDR_NAME)
+		return -EINVAL;
+
+	rport_ptr = ipc_router_create_rport(IPC_ROUTER_NID_LOCAL,
+			port_ptr->this_port.port_id, NULL);
+	if (!rport_ptr) {
+		IPC_RTR_ERR("%s: RPort %08x:%08x creation failed\n", __func__,
+			    IPC_ROUTER_NID_LOCAL, port_ptr->this_port.port_id);
+		return -ENOMEM;
+	}
+
+	server = msm_ipc_router_create_server(name->addr.port_name.service,
+					      name->addr.port_name.instance,
+					      IPC_ROUTER_NID_LOCAL,
+					      port_ptr->this_port.port_id,
+					      NULL);
+	if (!server) {
+		IPC_RTR_ERR("%s: Server %08x:%08x Create failed\n",
+			    __func__, name->addr.port_name.service,
+			    name->addr.port_name.instance);
+		kref_put(&rport_ptr->ref, ipc_router_release_rport);
+		ipc_router_destroy_rport(rport_ptr);
+		return -ENOMEM;
+	}
+
+	memset(&ctl, 0, sizeof(ctl));
+	ctl.cmd = IPC_ROUTER_CTRL_CMD_NEW_SERVER;
+	ctl.srv.service = server->name.service;
+	ctl.srv.instance = server->name.instance;
+	ctl.srv.node_id = IPC_ROUTER_NID_LOCAL;
+	ctl.srv.port_id = port_ptr->this_port.port_id;
+	broadcast_ctl_msg(&ctl);
+	mutex_lock(&port_ptr->port_lock_lhc3);
+	port_ptr->type = SERVER_PORT;
+	port_ptr->mode_info.mode = MULTI_LINK_MODE;
+	port_ptr->port_name.service = server->name.service;
+	port_ptr->port_name.instance = server->name.instance;
+	port_ptr->rport_info = rport_ptr;
+	mutex_unlock(&port_ptr->port_lock_lhc3);
+	kref_put(&rport_ptr->ref, ipc_router_release_rport);
+	kref_put(&server->ref, ipc_router_release_server);
+	return 0;
+}
+
+int msm_ipc_router_unregister_server(struct msm_ipc_port *port_ptr)
+{
+	struct msm_ipc_server *server;
+	union rr_control_msg ctl;
+	struct msm_ipc_router_remote_port *rport_ptr;
+
+	if (!port_ptr)
+		return -EINVAL;
+
+	if (port_ptr->type != SERVER_PORT) {
+		IPC_RTR_ERR("%s: Trying to unregister a non-server port\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (port_ptr->this_port.node_id != IPC_ROUTER_NID_LOCAL) {
+		IPC_RTR_ERR(
+		"%s: Trying to unregister a remote server locally\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	server = ipc_router_get_server_ref(port_ptr->port_name.service,
+					   port_ptr->port_name.instance,
+					   port_ptr->this_port.node_id,
+					   port_ptr->this_port.port_id);
+	if (!server) {
+		IPC_RTR_ERR("%s: Server lookup failed\n", __func__);
+		return -ENODEV;
+	}
+
+	mutex_lock(&port_ptr->port_lock_lhc3);
+	port_ptr->type = CLIENT_PORT;
+	rport_ptr = (struct msm_ipc_router_remote_port *)port_ptr->rport_info;
+	mutex_unlock(&port_ptr->port_lock_lhc3);
+	if (rport_ptr)
+		ipc_router_reset_conn(rport_ptr);
+	memset(&ctl, 0, sizeof(ctl));
+	ctl.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER;
+	ctl.srv.service = server->name.service;
+	ctl.srv.instance = server->name.instance;
+	ctl.srv.node_id = IPC_ROUTER_NID_LOCAL;
+	ctl.srv.port_id = port_ptr->this_port.port_id;
+	kref_put(&server->ref, ipc_router_release_server);
+	ipc_router_destroy_server(server, port_ptr->this_port.node_id,
+				  port_ptr->this_port.port_id);
+	broadcast_ctl_msg(&ctl);
+	mutex_lock(&port_ptr->port_lock_lhc3);
+	port_ptr->type = CLIENT_PORT;
+	mutex_unlock(&port_ptr->port_lock_lhc3);
+	return 0;
+}
+
+static int loopback_data(struct msm_ipc_port *src,
+			uint32_t port_id,
+			struct rr_packet *pkt)
+{
+	struct msm_ipc_port *port_ptr;
+	struct sk_buff *temp_skb;
+	int align_size;
+
+	if (!pkt) {
+		IPC_RTR_ERR("%s: Invalid pkt pointer\n", __func__);
+		return -EINVAL;
+	}
+
+	temp_skb = skb_peek_tail(pkt->pkt_fragment_q);
+	if (!temp_skb) {
+		IPC_RTR_ERR("%s: Empty skb\n", __func__);
+		return -EINVAL;
+	}
+	align_size = ALIGN_SIZE(pkt->length);
+	skb_put(temp_skb, align_size);
+	pkt->length += align_size;
+
+	port_ptr = ipc_router_get_port_ref(port_id);
+	if (!port_ptr) {
+		IPC_RTR_ERR("%s: Local port %d not present\n",
+						__func__, port_id);
+		return -ENODEV;
+	}
+	post_pkt_to_port(port_ptr, pkt, 1);
+	update_comm_mode_info(&src->mode_info, NULL);
+	kref_put(&port_ptr->ref, ipc_router_release_port);
+
+	return pkt->hdr.size;
+}
+
+static int ipc_router_tx_wait(struct msm_ipc_port *src,
+			      struct msm_ipc_router_remote_port *rport_ptr,
+			      uint32_t *set_confirm_rx,
+			      long timeout)
+{
+	struct msm_ipc_resume_tx_port *resume_tx_port;
+	int ret;
+
+	if (unlikely(!src || !rport_ptr))
+		return -EINVAL;
+
+	for (;;) {
+		mutex_lock(&rport_ptr->rport_lock_lhb2);
+		if (rport_ptr->status == RESET) {
+			mutex_unlock(&rport_ptr->rport_lock_lhb2);
+			IPC_RTR_ERR("%s: RPort %08x:%08x is in reset state\n",
+			    __func__, rport_ptr->node_id, rport_ptr->port_id);
+			return -ENETRESET;
+		}
+
+		if (rport_ptr->tx_quota_cnt < IPC_ROUTER_HIGH_RX_QUOTA)
+			break;
+
+		if (msm_ipc_router_lookup_resume_tx_port(
+			rport_ptr, src->this_port.port_id))
+			goto check_timeo;
+
+		resume_tx_port =
+			kzalloc(sizeof(struct msm_ipc_resume_tx_port),
+				GFP_KERNEL);
+		if (!resume_tx_port) {
+			IPC_RTR_ERR("%s: Resume_Tx port allocation failed\n",
+				    __func__);
+			mutex_unlock(&rport_ptr->rport_lock_lhb2);
+			return -ENOMEM;
+		}
+		INIT_LIST_HEAD(&resume_tx_port->list);
+		resume_tx_port->port_id = src->this_port.port_id;
+		resume_tx_port->node_id = src->this_port.node_id;
+		list_add_tail(&resume_tx_port->list,
+			      &rport_ptr->resume_tx_port_list);
+check_timeo:
+		mutex_unlock(&rport_ptr->rport_lock_lhb2);
+		if (!timeout) {
+			return -EAGAIN;
+		} else if (timeout < 0) {
+			ret = wait_event_interruptible(src->port_tx_wait_q,
+					(rport_ptr->tx_quota_cnt !=
+					 IPC_ROUTER_HIGH_RX_QUOTA ||
+					 rport_ptr->status == RESET));
+			if (ret)
+				return ret;
+		} else {
+			ret = wait_event_interruptible_timeout(
+					src->port_tx_wait_q,
+					(rport_ptr->tx_quota_cnt !=
+					 IPC_ROUTER_HIGH_RX_QUOTA ||
+					 rport_ptr->status == RESET),
+					msecs_to_jiffies(timeout));
+			if (ret < 0) {
+				return ret;
+			} else if (ret == 0) {
+				IPC_RTR_ERR("%s: Resume_tx Timeout %08x:%08x\n",
+					__func__, rport_ptr->node_id,
+					rport_ptr->port_id);
+				return -ETIMEDOUT;
+			}
+		}
+	}
+	rport_ptr->tx_quota_cnt++;
+	if (rport_ptr->tx_quota_cnt == IPC_ROUTER_LOW_RX_QUOTA)
+		*set_confirm_rx = 1;
+	mutex_unlock(&rport_ptr->rport_lock_lhb2);
+	return 0;
+}
+
+static int msm_ipc_router_write_pkt(struct msm_ipc_port *src,
+				struct msm_ipc_router_remote_port *rport_ptr,
+				struct rr_packet *pkt,
+				long timeout)
+{
+	struct rr_header_v1 *hdr;
+	struct msm_ipc_router_xprt_info *xprt_info;
+	struct msm_ipc_routing_table_entry *rt_entry;
+	struct sk_buff *temp_skb;
+	int xprt_option;
+	int ret;
+	int align_size;
+	uint32_t set_confirm_rx = 0;
+
+	if (!rport_ptr || !src || !pkt)
+		return -EINVAL;
+
+	hdr = &(pkt->hdr);
+	hdr->version = IPC_ROUTER_V1;
+	hdr->type = IPC_ROUTER_CTRL_CMD_DATA;
+	hdr->src_node_id = src->this_port.node_id;
+	hdr->src_port_id = src->this_port.port_id;
+	hdr->size = pkt->length;
+	hdr->control_flag = 0;
+	hdr->dst_node_id = rport_ptr->node_id;
+	hdr->dst_port_id = rport_ptr->port_id;
+
+	ret = ipc_router_tx_wait(src, rport_ptr, &set_confirm_rx, timeout);
+	if (ret < 0)
+		return ret;
+	if (set_confirm_rx)
+		hdr->control_flag |= CONTROL_FLAG_CONFIRM_RX;
+
+	if (hdr->dst_node_id == IPC_ROUTER_NID_LOCAL) {
+		ipc_router_log_msg(local_log_ctx,
+		IPC_ROUTER_LOG_EVENT_TX, pkt, hdr, src, rport_ptr);
+		ret = loopback_data(src, hdr->dst_port_id, pkt);
+		return ret;
+	}
+
+	rt_entry = ipc_router_get_rtentry_ref(hdr->dst_node_id);
+	if (!rt_entry) {
+		IPC_RTR_ERR("%s: Remote node %d not up\n",
+			__func__, hdr->dst_node_id);
+		return -ENODEV;
+	}
+	down_read(&rt_entry->lock_lha4);
+	xprt_info = rt_entry->xprt_info;
+	ret = ipc_router_get_xprt_info_ref(xprt_info);
+	if (ret < 0) {
+		IPC_RTR_ERR("%s: Abort invalid xprt\n", __func__);
+		up_read(&rt_entry->lock_lha4);
+		kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+		return ret;
+	}
+	ret = prepend_header(pkt, xprt_info);
+	if (ret < 0) {
+		IPC_RTR_ERR("%s: Prepend Header failed\n", __func__);
+		goto out_write_pkt;
+	}
+	xprt_option = xprt_info->xprt->get_option(xprt_info->xprt);
+	if (!(xprt_option & FRAG_PKT_WRITE_ENABLE)) {
+		ret = defragment_pkt(pkt);
+		if (ret < 0)
+			goto out_write_pkt;
+	}
+
+	temp_skb = skb_peek_tail(pkt->pkt_fragment_q);
+	if (!temp_skb) {
+		IPC_RTR_ERR("%s: Abort invalid pkt\n", __func__);
+		ret = -EINVAL;
+		goto out_write_pkt;
+	}
+	align_size = ALIGN_SIZE(pkt->length);
+	skb_put(temp_skb, align_size);
+	pkt->length += align_size;
+	mutex_lock(&xprt_info->tx_lock_lhb2);
+	ret = xprt_info->xprt->write(pkt, pkt->length, xprt_info->xprt);
+	mutex_unlock(&xprt_info->tx_lock_lhb2);
+out_write_pkt:
+	up_read(&rt_entry->lock_lha4);
+	kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+
+	if (ret < 0) {
+		IPC_RTR_ERR("%s: Write on XPRT failed\n", __func__);
+		ipc_router_log_msg(xprt_info->log_ctx,
+			IPC_ROUTER_LOG_EVENT_TX_ERR, pkt, hdr, src, rport_ptr);
+
+		ipc_router_put_xprt_info_ref(xprt_info);
+		return ret;
+	}
+	update_comm_mode_info(&src->mode_info, xprt_info);
+	ipc_router_log_msg(xprt_info->log_ctx,
+		IPC_ROUTER_LOG_EVENT_TX, pkt, hdr, src, rport_ptr);
+	if (msm_ipc_router_debug_mask & SMEM_LOG) {
+		smem_log_event((SMEM_LOG_PROC_ID_APPS |
+			SMEM_LOG_IPC_ROUTER_EVENT_BASE |
+			IPC_ROUTER_LOG_EVENT_TX),
+			(hdr->src_node_id << 24) |
+			(hdr->src_port_id & 0xffffff),
+			(hdr->dst_node_id << 24) |
+			(hdr->dst_port_id & 0xffffff),
+			(hdr->type << 24) | (hdr->control_flag << 16) |
+			(hdr->size & 0xffff));
+	}
+
+	ipc_router_put_xprt_info_ref(xprt_info);
+	return hdr->size;
+}
+
+int msm_ipc_router_send_to(struct msm_ipc_port *src,
+			   struct sk_buff_head *data,
+			   struct msm_ipc_addr *dest,
+			   long timeout)
+{
+	uint32_t dst_node_id = 0, dst_port_id = 0;
+	struct msm_ipc_server *server;
+	struct msm_ipc_server_port *server_port;
+	struct msm_ipc_router_remote_port *rport_ptr = NULL;
+	struct msm_ipc_router_remote_port *src_rport_ptr = NULL;
+	struct rr_packet *pkt;
+	int ret;
+
+	if (!src || !data || !dest) {
+		IPC_RTR_ERR("%s: Invalid Parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Resolve Address*/
+	if (dest->addrtype == MSM_IPC_ADDR_ID) {
+		dst_node_id = dest->addr.port_addr.node_id;
+		dst_port_id = dest->addr.port_addr.port_id;
+	} else if (dest->addrtype == MSM_IPC_ADDR_NAME) {
+		server = ipc_router_get_server_ref(
+					dest->addr.port_name.service,
+					dest->addr.port_name.instance,
+					0, 0);
+		if (!server) {
+			IPC_RTR_ERR("%s: Destination not reachable\n",
+								__func__);
+			return -ENODEV;
+		}
+		server_port = list_first_entry(&server->server_port_list,
+					       struct msm_ipc_server_port,
+					       list);
+		dst_node_id = server_port->server_addr.node_id;
+		dst_port_id = server_port->server_addr.port_id;
+		kref_put(&server->ref, ipc_router_release_server);
+	}
+
+	rport_ptr = ipc_router_get_rport_ref(dst_node_id, dst_port_id);
+	if (!rport_ptr) {
+		IPC_RTR_ERR("%s: Remote port not found\n", __func__);
+		return -ENODEV;
+	}
+
+	if (src->check_send_permissions) {
+		ret = src->check_send_permissions(rport_ptr->sec_rule);
+		if (ret <= 0) {
+			kref_put(&rport_ptr->ref, ipc_router_release_rport);
+			IPC_RTR_ERR("%s: permission failure for %s\n",
+				__func__, current->comm);
+			return -EPERM;
+		}
+	}
+
+	if (dst_node_id == IPC_ROUTER_NID_LOCAL && !src->rport_info) {
+		src_rport_ptr = ipc_router_create_rport(IPC_ROUTER_NID_LOCAL,
+					src->this_port.port_id, NULL);
+		if (!src_rport_ptr) {
+			kref_put(&rport_ptr->ref, ipc_router_release_rport);
+			IPC_RTR_ERR("%s: RPort creation failed\n", __func__);
+			return -ENOMEM;
+		}
+		mutex_lock(&src->port_lock_lhc3);
+		src->rport_info = src_rport_ptr;
+		mutex_unlock(&src->port_lock_lhc3);
+		kref_put(&src_rport_ptr->ref, ipc_router_release_rport);
+	}
+
+	pkt = create_pkt(data);
+	if (!pkt) {
+		kref_put(&rport_ptr->ref, ipc_router_release_rport);
+		IPC_RTR_ERR("%s: Pkt creation failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	ret = msm_ipc_router_write_pkt(src, rport_ptr, pkt, timeout);
+	kref_put(&rport_ptr->ref, ipc_router_release_rport);
+	if (ret < 0)
+		pkt->pkt_fragment_q = NULL;
+	release_pkt(pkt);
+
+	return ret;
+}
+
+int msm_ipc_router_send_msg(struct msm_ipc_port *src,
+			    struct msm_ipc_addr *dest,
+			    void *data, unsigned int data_len)
+{
+	struct sk_buff_head *out_skb_head;
+	int ret;
+
+	out_skb_head = msm_ipc_router_buf_to_skb(data, data_len);
+	if (!out_skb_head) {
+		IPC_RTR_ERR("%s: SKB conversion failed\n", __func__);
+		return -EFAULT;
+	}
+
+	ret = msm_ipc_router_send_to(src, out_skb_head, dest, 0);
+	if (ret < 0) {
+		if (ret != -EAGAIN)
+			IPC_RTR_ERR(
+			"%s: msm_ipc_router_send_to failed - ret: %d\n",
+				__func__, ret);
+		msm_ipc_router_free_skb(out_skb_head);
+		return ret;
+	}
+	return 0;
+}
+
+/**
+ * msm_ipc_router_send_resume_tx() - Send Resume_Tx message
+ * @data: Pointer to received data packet that has confirm_rx bit set
+ *
+ * @return: On success, number of bytes transferred is returned, else
+ *	    standard linux error code is returned.
+ *
+ * This function sends the Resume_Tx event to the remote node that
+ * sent the data with confirm_rx field set. In case of a multi-hop
+ * scenario also, this function makes sure that the destination node_id
+ * to which the resume_tx event should reach is right.
+ */
+static int msm_ipc_router_send_resume_tx(void *data)
+{
+	union rr_control_msg msg;
+	struct rr_header_v1 *hdr = (struct rr_header_v1 *)data;
+	struct msm_ipc_routing_table_entry *rt_entry;
+	int ret;
+
+	memset(&msg, 0, sizeof(msg));
+	msg.cmd = IPC_ROUTER_CTRL_CMD_RESUME_TX;
+	msg.cli.node_id = hdr->dst_node_id;
+	msg.cli.port_id = hdr->dst_port_id;
+	rt_entry = ipc_router_get_rtentry_ref(hdr->src_node_id);
+	if (!rt_entry) {
+		IPC_RTR_ERR("%s: %d Node is not present",
+				__func__, hdr->src_node_id);
+		return -ENODEV;
+	}
+	ret = ipc_router_get_xprt_info_ref(rt_entry->xprt_info);
+	if (ret < 0) {
+		IPC_RTR_ERR("%s: Abort invalid xprt\n", __func__);
+		kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+		return ret;
+	}
+	ret = ipc_router_send_ctl_msg(rt_entry->xprt_info, &msg,
+				      hdr->src_node_id);
+	ipc_router_put_xprt_info_ref(rt_entry->xprt_info);
+	kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+	if (ret < 0)
+		IPC_RTR_ERR(
+		"%s: Send Resume_Tx Failed SRC_NODE: %d SRC_PORT: %d DEST_NODE: %d",
+			__func__, hdr->dst_node_id, hdr->dst_port_id,
+			hdr->src_node_id);
+
+	return ret;
+}
+
+int msm_ipc_router_read(struct msm_ipc_port *port_ptr,
+			struct rr_packet **read_pkt,
+			size_t buf_len)
+{
+	struct rr_packet *pkt;
+
+	if (!port_ptr || !read_pkt)
+		return -EINVAL;
+
+	mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
+	if (list_empty(&port_ptr->port_rx_q)) {
+		mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
+		return -EAGAIN;
+	}
+
+	pkt = list_first_entry(&port_ptr->port_rx_q, struct rr_packet, list);
+	if ((buf_len) && (pkt->hdr.size > buf_len)) {
+		mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
+		return -ETOOSMALL;
+	}
+	list_del(&pkt->list);
+	if (list_empty(&port_ptr->port_rx_q))
+		__pm_relax(port_ptr->port_rx_ws);
+	*read_pkt = pkt;
+	mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
+	if (pkt->hdr.control_flag & CONTROL_FLAG_CONFIRM_RX)
+		msm_ipc_router_send_resume_tx(&pkt->hdr);
+
+	return pkt->length;
+}
+
+/**
+ * msm_ipc_router_rx_data_wait() - Wait for new message destined to a local port.
+ * @port_ptr: Pointer to the local port
+ * @timeout: < 0 timeout indicates infinite wait till a message arrives.
+ *	     > 0 timeout indicates the wait time.
+ *	     0 indicates that we do not wait.
+ * @return: 0 if there are pending messages to read,
+ *	    standard Linux error code otherwise.
+ *
+ * Checks for the availability of messages that are destined to a local port.
+ * If no messages are present then waits as per @timeout.
+ */
+int msm_ipc_router_rx_data_wait(struct msm_ipc_port *port_ptr, long timeout)
+{
+	int ret = 0;
+
+	mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
+	while (list_empty(&port_ptr->port_rx_q)) {
+		mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
+		if (timeout < 0) {
+			ret = wait_event_interruptible(
+					port_ptr->port_rx_wait_q,
+					!list_empty(&port_ptr->port_rx_q));
+			if (ret)
+				return ret;
+		} else if (timeout > 0) {
+			timeout = wait_event_interruptible_timeout(
+					port_ptr->port_rx_wait_q,
+					!list_empty(&port_ptr->port_rx_q),
+					timeout);
+			if (timeout < 0)
+				return -EFAULT;
+		}
+		if (timeout == 0)
+			return -ENOMSG;
+		mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
+	}
+	mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
+
+	return ret;
+}
+
+/**
+ * msm_ipc_router_recv_from() - Recieve messages destined to a local port.
+ * @port_ptr: Pointer to the local port
+ * @pkt : Pointer to the router-to-router packet
+ * @src: Pointer to local port address
+ * @timeout: < 0 timeout indicates infinite wait till a message arrives.
+ *	     > 0 timeout indicates the wait time.
+ *	     0 indicates that we do not wait.
+ * @return: = Number of bytes read(On successful read operation).
+ *	    = -ENOMSG (If there are no pending messages and timeout is 0).
+ *	    = -EINVAL (If either of the arguments, port_ptr or data is invalid)
+ *	    = -EFAULT (If there are no pending messages when timeout is > 0
+ *	      and the wait_event_interruptible_timeout has returned value > 0)
+ *	    = -ERESTARTSYS (If there are no pending messages when timeout
+ *	      is < 0 and wait_event_interruptible was interrupted by a signal)
+ *
+ * This function reads the messages that are destined for a local port. It
+ * is used by modules that exist with-in the kernel and use IPC Router for
+ * transport. The function checks if there are any messages that are already
+ * received. If yes, it reads them, else it waits as per the timeout value.
+ * On a successful read, the return value of the function indicates the number
+ * of bytes that are read.
+ */
+int msm_ipc_router_recv_from(struct msm_ipc_port *port_ptr,
+			     struct rr_packet **pkt,
+			     struct msm_ipc_addr *src,
+			     long timeout)
+{
+	int ret, data_len, align_size;
+	struct sk_buff *temp_skb;
+	struct rr_header_v1 *hdr = NULL;
+
+	if (!port_ptr || !pkt) {
+		IPC_RTR_ERR("%s: Invalid pointers being passed\n", __func__);
+		return -EINVAL;
+	}
+
+	*pkt = NULL;
+
+	ret = msm_ipc_router_rx_data_wait(port_ptr, timeout);
+	if (ret)
+		return ret;
+
+	ret = msm_ipc_router_read(port_ptr, pkt, 0);
+	if (ret <= 0 || !(*pkt))
+		return ret;
+
+	hdr = &((*pkt)->hdr);
+	if (src) {
+		src->addrtype = MSM_IPC_ADDR_ID;
+		src->addr.port_addr.node_id = hdr->src_node_id;
+		src->addr.port_addr.port_id = hdr->src_port_id;
+	}
+
+	data_len = hdr->size;
+	align_size = ALIGN_SIZE(data_len);
+	if (align_size) {
+		temp_skb = skb_peek_tail((*pkt)->pkt_fragment_q);
+		if (temp_skb)
+			skb_trim(temp_skb, (temp_skb->len - align_size));
+	}
+	return data_len;
+}
+
+int msm_ipc_router_read_msg(struct msm_ipc_port *port_ptr,
+			    struct msm_ipc_addr *src,
+			    unsigned char **data,
+			    unsigned int *len)
+{
+	struct rr_packet *pkt;
+	int ret;
+
+	ret = msm_ipc_router_recv_from(port_ptr, &pkt, src, 0);
+	if (ret < 0) {
+		if (ret != -ENOMSG)
+			IPC_RTR_ERR(
+			"%s: msm_ipc_router_recv_from failed - ret: %d\n",
+				__func__, ret);
+		return ret;
+	}
+
+	*data = msm_ipc_router_skb_to_buf(pkt->pkt_fragment_q, ret);
+	if (!(*data)) {
+		IPC_RTR_ERR("%s: Buf conversion failed\n", __func__);
+		release_pkt(pkt);
+		return -ENOMEM;
+	}
+
+	*len = ret;
+	release_pkt(pkt);
+	return 0;
+}
+
+/**
+ * msm_ipc_router_create_port() - Create a IPC Router port/endpoint
+ * @notify: Callback function to notify any event on the port.
+ *   @event: Event ID to be handled.
+ *   @oob_data: Any out-of-band data associated with the event.
+ *   @oob_data_len: Size of the out-of-band data, if valid.
+ *   @priv: Private data registered during the port creation.
+ * @priv: Private info to be passed while the notification is generated.
+ *
+ * @return: Pointer to the port on success, NULL on error.
+ */
+struct msm_ipc_port *msm_ipc_router_create_port(
+	void (*notify)(unsigned event, void *oob_data,
+		       size_t oob_data_len, void *priv),
+	void *priv)
+{
+	struct msm_ipc_port *port_ptr;
+	int ret;
+
+	ret = ipc_router_core_init();
+	if (ret < 0) {
+		IPC_RTR_ERR("%s: Error %d initializing IPC Router\n",
+			    __func__, ret);
+		return NULL;
+	}
+
+	port_ptr = msm_ipc_router_create_raw_port(NULL, notify, priv);
+	if (!port_ptr)
+		IPC_RTR_ERR("%s: port_ptr alloc failed\n", __func__);
+
+	return port_ptr;
+}
+
+int msm_ipc_router_close_port(struct msm_ipc_port *port_ptr)
+{
+	union rr_control_msg msg;
+	struct msm_ipc_server *server;
+	struct msm_ipc_router_remote_port *rport_ptr;
+
+	if (!port_ptr)
+		return -EINVAL;
+
+	if (port_ptr->type == SERVER_PORT || port_ptr->type == CLIENT_PORT) {
+		down_write(&local_ports_lock_lhc2);
+		list_del(&port_ptr->list);
+		up_write(&local_ports_lock_lhc2);
+
+		mutex_lock(&port_ptr->port_lock_lhc3);
+		rport_ptr = (struct msm_ipc_router_remote_port *)
+						port_ptr->rport_info;
+		port_ptr->rport_info = NULL;
+		mutex_unlock(&port_ptr->port_lock_lhc3);
+		if (rport_ptr) {
+			ipc_router_reset_conn(rport_ptr);
+			ipc_router_destroy_rport(rport_ptr);
+		}
+
+		if (port_ptr->type == SERVER_PORT) {
+			memset(&msg, 0, sizeof(msg));
+			msg.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER;
+			msg.srv.service = port_ptr->port_name.service;
+			msg.srv.instance = port_ptr->port_name.instance;
+			msg.srv.node_id = port_ptr->this_port.node_id;
+			msg.srv.port_id = port_ptr->this_port.port_id;
+			broadcast_ctl_msg(&msg);
+		}
+
+		/* Server port could have been a client port earlier.
+		 * Send REMOVE_CLIENT message in either case.
+		 */
+		msm_ipc_router_send_remove_client(&port_ptr->mode_info,
+			port_ptr->this_port.node_id,
+			port_ptr->this_port.port_id);
+	} else if (port_ptr->type == CONTROL_PORT) {
+		down_write(&control_ports_lock_lha5);
+		list_del(&port_ptr->list);
+		up_write(&control_ports_lock_lha5);
+	} else if (port_ptr->type == IRSC_PORT) {
+		down_write(&local_ports_lock_lhc2);
+		list_del(&port_ptr->list);
+		up_write(&local_ports_lock_lhc2);
+	}
+
+	if (port_ptr->type == SERVER_PORT) {
+		server = ipc_router_get_server_ref(
+				port_ptr->port_name.service,
+				port_ptr->port_name.instance,
+				port_ptr->this_port.node_id,
+				port_ptr->this_port.port_id);
+		if (server) {
+			kref_put(&server->ref, ipc_router_release_server);
+			ipc_router_destroy_server(server,
+				port_ptr->this_port.node_id,
+				port_ptr->this_port.port_id);
+		}
+	}
+
+	mutex_lock(&port_ptr->port_lock_lhc3);
+	rport_ptr = (struct msm_ipc_router_remote_port *)port_ptr->rport_info;
+	port_ptr->rport_info = NULL;
+	mutex_unlock(&port_ptr->port_lock_lhc3);
+	if (rport_ptr)
+		ipc_router_destroy_rport(rport_ptr);
+
+	kref_put(&port_ptr->ref, ipc_router_release_port);
+	return 0;
+}
+
+int msm_ipc_router_get_curr_pkt_size(struct msm_ipc_port *port_ptr)
+{
+	struct rr_packet *pkt;
+	int rc = 0;
+
+	if (!port_ptr)
+		return -EINVAL;
+
+	mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
+	if (!list_empty(&port_ptr->port_rx_q)) {
+		pkt = list_first_entry(&port_ptr->port_rx_q,
+					struct rr_packet, list);
+		rc = pkt->hdr.size;
+	}
+	mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
+
+	return rc;
+}
+
+int msm_ipc_router_bind_control_port(struct msm_ipc_port *port_ptr)
+{
+	if (unlikely(!port_ptr || port_ptr->type != CLIENT_PORT))
+		return -EINVAL;
+
+	down_write(&local_ports_lock_lhc2);
+	list_del(&port_ptr->list);
+	up_write(&local_ports_lock_lhc2);
+	port_ptr->type = CONTROL_PORT;
+	down_write(&control_ports_lock_lha5);
+	list_add_tail(&port_ptr->list, &control_ports);
+	up_write(&control_ports_lock_lha5);
+
+	return 0;
+}
+
+int msm_ipc_router_lookup_server_name(struct msm_ipc_port_name *srv_name,
+				struct msm_ipc_server_info *srv_info,
+				int num_entries_in_array,
+				uint32_t lookup_mask)
+{
+	struct msm_ipc_server *server;
+	struct msm_ipc_server_port *server_port;
+	int key, i = 0; /*num_entries_found*/
+
+	if (!srv_name) {
+		IPC_RTR_ERR("%s: Invalid srv_name\n", __func__);
+		return -EINVAL;
+	}
+
+	if (num_entries_in_array && !srv_info) {
+		IPC_RTR_ERR("%s: srv_info NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	down_read(&server_list_lock_lha2);
+	key = (srv_name->service & (SRV_HASH_SIZE - 1));
+	list_for_each_entry(server, &server_list[key], list) {
+		if ((server->name.service != srv_name->service) ||
+		    ((server->name.instance & lookup_mask) !=
+			srv_name->instance))
+			continue;
+
+		list_for_each_entry(server_port,
+			&server->server_port_list, list) {
+			if (i < num_entries_in_array) {
+				srv_info[i].node_id =
+					  server_port->server_addr.node_id;
+				srv_info[i].port_id =
+					  server_port->server_addr.port_id;
+				srv_info[i].service = server->name.service;
+				srv_info[i].instance = server->name.instance;
+			}
+			i++;
+		}
+	}
+	up_read(&server_list_lock_lha2);
+
+	return i;
+}
+
+int msm_ipc_router_close(void)
+{
+	struct msm_ipc_router_xprt_info *xprt_info, *tmp_xprt_info;
+
+	down_write(&xprt_info_list_lock_lha5);
+	list_for_each_entry_safe(xprt_info, tmp_xprt_info,
+				 &xprt_info_list, list) {
+		xprt_info->xprt->close(xprt_info->xprt);
+		list_del(&xprt_info->list);
+		kfree(xprt_info);
+	}
+	up_write(&xprt_info_list_lock_lha5);
+	return 0;
+}
+
+/**
+ * pil_vote_load_worker() - Process vote to load the modem
+ *
+ * @work: Work item to process
+ *
+ * This function is called to process votes to load the modem that have been
+ * queued by msm_ipc_load_default_node().
+ */
+static void pil_vote_load_worker(struct work_struct *work)
+{
+	struct pil_vote_info *vote_info;
+
+	vote_info = container_of(work, struct pil_vote_info, load_work);
+	if (strlen(default_peripheral)) {
+		vote_info->pil_handle = subsystem_get(default_peripheral);
+		if (IS_ERR(vote_info->pil_handle)) {
+			IPC_RTR_ERR("%s: Failed to load %s\n",
+				    __func__, default_peripheral);
+			vote_info->pil_handle = NULL;
+		}
+	} else {
+		vote_info->pil_handle = NULL;
+	}
+}
+
+/**
+ * pil_vote_unload_worker() - Process vote to unload the modem
+ *
+ * @work: Work item to process
+ *
+ * This function is called to process votes to unload the modem that have been
+ * queued by msm_ipc_unload_default_node().
+ */
+static void pil_vote_unload_worker(struct work_struct *work)
+{
+	struct pil_vote_info *vote_info;
+
+	vote_info = container_of(work, struct pil_vote_info, unload_work);
+
+	if (vote_info->pil_handle) {
+		subsystem_put(vote_info->pil_handle);
+		vote_info->pil_handle = NULL;
+	}
+	kfree(vote_info);
+}
+
+/**
+ * msm_ipc_load_default_node() - Queue a vote to load the modem.
+ *
+ * @return: PIL vote info structure on success, NULL on failure.
+ *
+ * This function places a work item that loads the modem on the
+ * single-threaded workqueue used for processing PIL votes to load
+ * or unload the modem.
+ */
+void *msm_ipc_load_default_node(void)
+{
+	struct pil_vote_info *vote_info;
+
+	vote_info = kmalloc(sizeof(*vote_info), GFP_KERNEL);
+	if (!vote_info)
+		return vote_info;
+
+	INIT_WORK(&vote_info->load_work, pil_vote_load_worker);
+	queue_work(msm_ipc_router_workqueue, &vote_info->load_work);
+
+	return vote_info;
+}
+
+/**
+ * msm_ipc_unload_default_node() - Queue a vote to unload the modem.
+ *
+ * @pil_vote: PIL vote info structure, containing the PIL handle
+ * and work structure.
+ *
+ * This function places a work item that unloads the modem on the
+ * single-threaded workqueue used for processing PIL votes to load
+ * or unload the modem.
+ */
+void msm_ipc_unload_default_node(void *pil_vote)
+{
+	struct pil_vote_info *vote_info;
+
+	if (pil_vote) {
+		vote_info = (struct pil_vote_info *)pil_vote;
+		INIT_WORK(&vote_info->unload_work, pil_vote_unload_worker);
+		queue_work(msm_ipc_router_workqueue, &vote_info->unload_work);
+	}
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static void dump_routing_table(struct seq_file *s)
+{
+	int j;
+	struct msm_ipc_routing_table_entry *rt_entry;
+
+	seq_printf(s, "%-10s|%-20s|%-10s|\n",
+			"Node Id", "XPRT Name", "Next Hop");
+	seq_puts(s, "----------------------------------------------\n");
+	for (j = 0; j < RT_HASH_SIZE; j++) {
+		down_read(&routing_table_lock_lha3);
+		list_for_each_entry(rt_entry, &routing_table[j], list) {
+			down_read(&rt_entry->lock_lha4);
+			seq_printf(s, "0x%08x|", rt_entry->node_id);
+			if (rt_entry->node_id == IPC_ROUTER_NID_LOCAL)
+				seq_printf(s, "%-20s|0x%08x|\n",
+				       "Loopback", rt_entry->node_id);
+			else
+				seq_printf(s, "%-20s|0x%08x|\n",
+				       rt_entry->xprt_info->xprt->name,
+				       rt_entry->node_id);
+			up_read(&rt_entry->lock_lha4);
+		}
+		up_read(&routing_table_lock_lha3);
+	}
+}
+
+static void dump_xprt_info(struct seq_file *s)
+{
+	struct msm_ipc_router_xprt_info *xprt_info;
+
+	seq_printf(s, "%-20s|%-10s|%-12s|%-15s|\n",
+			"XPRT Name", "Link ID",
+			"Initialized", "Remote Node Id");
+	seq_puts(s, "------------------------------------------------------------\n");
+	down_read(&xprt_info_list_lock_lha5);
+	list_for_each_entry(xprt_info, &xprt_info_list, list)
+		seq_printf(s, "%-20s|0x%08x|%-12s|0x%08x|\n",
+			       xprt_info->xprt->name,
+			       xprt_info->xprt->link_id,
+			       (xprt_info->initialized ? "Y" : "N"),
+			       xprt_info->remote_node_id);
+	up_read(&xprt_info_list_lock_lha5);
+}
+
+static void dump_servers(struct seq_file *s)
+{
+	int j;
+	struct msm_ipc_server *server;
+	struct msm_ipc_server_port *server_port;
+
+	seq_printf(s, "%-11s|%-11s|%-11s|%-11s|\n",
+			"Service", "Instance", "Node_id", "Port_id");
+	seq_puts(s, "------------------------------------------------------------\n");
+	down_read(&server_list_lock_lha2);
+	for (j = 0; j < SRV_HASH_SIZE; j++) {
+		list_for_each_entry(server, &server_list[j], list) {
+			list_for_each_entry(server_port,
+					    &server->server_port_list,
+					    list)
+				seq_printf(s, "0x%08x |0x%08x |0x%08x |0x%08x |\n",
+					server->name.service,
+					server->name.instance,
+					server_port->server_addr.node_id,
+					server_port->server_addr.port_id);
+		}
+	}
+	up_read(&server_list_lock_lha2);
+}
+
+static void dump_remote_ports(struct seq_file *s)
+{
+	int j, k;
+	struct msm_ipc_router_remote_port *rport_ptr;
+	struct msm_ipc_routing_table_entry *rt_entry;
+
+	seq_printf(s, "%-11s|%-11s|%-10s|\n",
+			"Node_id", "Port_id", "Quota_cnt");
+	seq_puts(s, "------------------------------------------------------------\n");
+	for (j = 0; j < RT_HASH_SIZE; j++) {
+		down_read(&routing_table_lock_lha3);
+		list_for_each_entry(rt_entry, &routing_table[j], list) {
+			down_read(&rt_entry->lock_lha4);
+			for (k = 0; k < RP_HASH_SIZE; k++) {
+				list_for_each_entry(rport_ptr,
+					&rt_entry->remote_port_list[k],
+					list)
+					seq_printf(s, "0x%08x |0x%08x |0x%08x|\n",
+						rport_ptr->node_id,
+						rport_ptr->port_id,
+						rport_ptr->tx_quota_cnt);
+			}
+			up_read(&rt_entry->lock_lha4);
+		}
+		up_read(&routing_table_lock_lha3);
+	}
+}
+
+static void dump_control_ports(struct seq_file *s)
+{
+	struct msm_ipc_port *port_ptr;
+
+	seq_printf(s, "%-11s|%-11s|\n",
+			"Node_id", "Port_id");
+	seq_puts(s, "------------------------------------------------------------\n");
+	down_read(&control_ports_lock_lha5);
+	list_for_each_entry(port_ptr, &control_ports, list)
+		seq_printf(s, "0x%08x |0x%08x |\n",
+			 port_ptr->this_port.node_id,
+			 port_ptr->this_port.port_id);
+	up_read(&control_ports_lock_lha5);
+}
+
+static void dump_local_ports(struct seq_file *s)
+{
+	int j;
+	struct msm_ipc_port *port_ptr;
+
+	seq_printf(s, "%-11s|%-11s|%-32s|%-11s|\n",
+		   "Node_id", "Port_id", "Wakelock", "Last SVCID");
+	seq_puts(s, "------------------------------------------------------------\n");
+	down_read(&local_ports_lock_lhc2);
+	for (j = 0; j < LP_HASH_SIZE; j++) {
+		list_for_each_entry(port_ptr, &local_ports[j], list) {
+			mutex_lock(&port_ptr->port_lock_lhc3);
+			seq_printf(s, "0x%08x |0x%08x |%-32s|0x%08x |\n",
+				   port_ptr->this_port.node_id,
+				   port_ptr->this_port.port_id,
+				   port_ptr->rx_ws_name,
+				   port_ptr->last_served_svc_id);
+			mutex_unlock(&port_ptr->port_lock_lhc3);
+		}
+	}
+	up_read(&local_ports_lock_lhc2);
+}
+
+static int debugfs_show(struct seq_file *s, void *data)
+{
+	void (*show)(struct seq_file *) = s->private;
+	show(s);
+	return 0;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, debugfs_show, inode->i_private);
+}
+
+static const struct file_operations debug_ops = {
+	.open = debug_open,
+	.release = single_release,
+	.read = seq_read,
+	.llseek = seq_lseek,
+};
+
+static void debug_create(const char *name, struct dentry *dent,
+			 void (*show)(struct seq_file *))
+{
+	debugfs_create_file(name, 0444, dent, show, &debug_ops);
+}
+
+static void debugfs_init(void)
+{
+	struct dentry *dent;
+
+	dent = debugfs_create_dir("msm_ipc_router", 0);
+	if (IS_ERR(dent))
+		return;
+
+	debug_create("dump_local_ports", dent, dump_local_ports);
+	debug_create("dump_remote_ports", dent, dump_remote_ports);
+	debug_create("dump_control_ports", dent, dump_control_ports);
+	debug_create("dump_servers", dent, dump_servers);
+	debug_create("dump_xprt_info", dent, dump_xprt_info);
+	debug_create("dump_routing_table", dent, dump_routing_table);
+}
+
+#else
+static void debugfs_init(void) {}
+#endif
+
+/**
+ * ipc_router_create_log_ctx() - Create and add the log context based on transport
+ * @name:	subsystem name
+ *
+ * Return:	a reference to the log context created
+ *
+ * This function creates ipc log context based on transport and adds it to a
+ * global list. This log context can be reused from the list in case of a
+ * subsystem restart.
+ */
+static void *ipc_router_create_log_ctx(char *name)
+{
+	struct ipc_rtr_log_ctx *sub_log_ctx;
+
+	sub_log_ctx = kmalloc(sizeof(struct ipc_rtr_log_ctx),
+				GFP_KERNEL);
+	if (!sub_log_ctx)
+		return NULL;
+	sub_log_ctx->log_ctx = ipc_log_context_create(
+				IPC_RTR_INFO_PAGES, name, 0);
+	if (!sub_log_ctx->log_ctx) {
+		IPC_RTR_ERR("%s: Unable to create IPC logging for [%s]",
+			__func__, name);
+		kfree(sub_log_ctx);
+		return NULL;
+	}
+	strlcpy(sub_log_ctx->log_ctx_name, name,
+			LOG_CTX_NAME_LEN);
+	INIT_LIST_HEAD(&sub_log_ctx->list);
+	list_add_tail(&sub_log_ctx->list, &log_ctx_list);
+	return sub_log_ctx->log_ctx;
+}
+
+static void ipc_router_log_ctx_init(void)
+{
+	mutex_lock(&log_ctx_list_lock_lha0);
+	local_log_ctx = ipc_router_create_log_ctx("local_IPCRTR");
+	mutex_unlock(&log_ctx_list_lock_lha0);
+}
+
+/**
+ * ipc_router_get_log_ctx() - Retrieves the ipc log context based on subsystem name.
+ * @sub_name:	subsystem name
+ *
+ * Return:	a reference to the log context
+ */
+static void *ipc_router_get_log_ctx(char *sub_name)
+{
+	void *log_ctx = NULL;
+	struct ipc_rtr_log_ctx *temp_log_ctx;
+
+	mutex_lock(&log_ctx_list_lock_lha0);
+	list_for_each_entry(temp_log_ctx, &log_ctx_list, list)
+		if (!strcmp(temp_log_ctx->log_ctx_name, sub_name)) {
+			log_ctx = temp_log_ctx->log_ctx;
+			mutex_unlock(&log_ctx_list_lock_lha0);
+			return log_ctx;
+		}
+	log_ctx = ipc_router_create_log_ctx(sub_name);
+	mutex_unlock(&log_ctx_list_lock_lha0);
+
+	return log_ctx;
+}
+
+/**
+ * ipc_router_get_xprt_info_ref() - Get a reference to the xprt_info structure
+ * @xprt_info: pointer to the xprt_info.
+ *
+ * @return: Zero on success, -ENODEV on failure.
+ *
+ * This function is used to obtain a reference to the xprt_info structure
+ * corresponding to the requested @xprt_info pointer.
+ */
+static int ipc_router_get_xprt_info_ref(
+		struct msm_ipc_router_xprt_info *xprt_info)
+{
+	int ret = -ENODEV;
+	struct msm_ipc_router_xprt_info *tmp_xprt_info;
+
+	if (!xprt_info)
+		return 0;
+
+	down_read(&xprt_info_list_lock_lha5);
+	list_for_each_entry(tmp_xprt_info, &xprt_info_list, list) {
+		if (tmp_xprt_info == xprt_info) {
+			kref_get(&xprt_info->ref);
+			ret = 0;
+			break;
+		}
+	}
+	up_read(&xprt_info_list_lock_lha5);
+
+	return ret;
+}
+
+/**
+ * ipc_router_put_xprt_info_ref() - Put a reference to the xprt_info structure
+ * @xprt_info: pointer to the xprt_info.
+ *
+ * This function is used to put the reference to the xprt_info structure
+ * corresponding to the requested @xprt_info pointer.
+ */
+static void ipc_router_put_xprt_info_ref(
+		struct msm_ipc_router_xprt_info *xprt_info)
+{
+	if (xprt_info)
+		kref_put(&xprt_info->ref, ipc_router_release_xprt_info_ref);
+}
+
+/**
+ * ipc_router_release_xprt_info_ref() - release the xprt_info last reference
+ * @ref: Reference to the xprt_info structure.
+ *
+ * This function is called when all references to the xprt_info structure
+ * are released.
+ */
+static void ipc_router_release_xprt_info_ref(struct kref *ref)
+{
+	struct msm_ipc_router_xprt_info *xprt_info =
+		container_of(ref, struct msm_ipc_router_xprt_info, ref);
+
+	complete_all(&xprt_info->ref_complete);
+}
+
+static int msm_ipc_router_add_xprt(struct msm_ipc_router_xprt *xprt)
+{
+	struct msm_ipc_router_xprt_info *xprt_info;
+
+	xprt_info = kmalloc(sizeof(struct msm_ipc_router_xprt_info),
+			    GFP_KERNEL);
+	if (!xprt_info)
+		return -ENOMEM;
+
+	xprt_info->xprt = xprt;
+	xprt_info->initialized = 0;
+	xprt_info->remote_node_id = -1;
+	INIT_LIST_HEAD(&xprt_info->pkt_list);
+	mutex_init(&xprt_info->rx_lock_lhb2);
+	mutex_init(&xprt_info->tx_lock_lhb2);
+	wakeup_source_init(&xprt_info->ws, xprt->name);
+	xprt_info->need_len = 0;
+	xprt_info->abort_data_read = 0;
+	INIT_WORK(&xprt_info->read_data, do_read_data);
+	INIT_LIST_HEAD(&xprt_info->list);
+	kref_init(&xprt_info->ref);
+	init_completion(&xprt_info->ref_complete);
+	xprt_info->dynamic_ws = 0;
+	if (xprt->get_ws_info)
+		xprt_info->dynamic_ws = xprt->get_ws_info(xprt);
+
+	xprt_info->workqueue = create_singlethread_workqueue(xprt->name);
+	if (!xprt_info->workqueue) {
+		kfree(xprt_info);
+		return -ENOMEM;
+	}
+
+	xprt_info->log_ctx = ipc_router_get_log_ctx(xprt->name);
+
+	if (!strcmp(xprt->name, "msm_ipc_router_loopback_xprt")) {
+		xprt_info->remote_node_id = IPC_ROUTER_NID_LOCAL;
+		xprt_info->initialized = 1;
+	}
+
+	IPC_RTR_INFO(xprt_info->log_ctx, "Adding xprt: [%s]\n",
+						xprt->name);
+	down_write(&xprt_info_list_lock_lha5);
+	list_add_tail(&xprt_info->list, &xprt_info_list);
+	up_write(&xprt_info_list_lock_lha5);
+
+	down_write(&routing_table_lock_lha3);
+	if (!routing_table_inited) {
+		init_routing_table();
+		routing_table_inited = 1;
+	}
+	up_write(&routing_table_lock_lha3);
+
+	xprt->priv = xprt_info;
+
+	return 0;
+}
+
+static void msm_ipc_router_remove_xprt(struct msm_ipc_router_xprt *xprt)
+{
+	struct msm_ipc_router_xprt_info *xprt_info;
+	struct rr_packet *temp_pkt, *pkt;
+
+	if (xprt && xprt->priv) {
+		xprt_info = xprt->priv;
+
+		IPC_RTR_INFO(xprt_info->log_ctx, "Removing xprt: [%s]\n",
+						xprt->name);
+		mutex_lock(&xprt_info->rx_lock_lhb2);
+		xprt_info->abort_data_read = 1;
+		mutex_unlock(&xprt_info->rx_lock_lhb2);
+		flush_workqueue(xprt_info->workqueue);
+		destroy_workqueue(xprt_info->workqueue);
+		mutex_lock(&xprt_info->rx_lock_lhb2);
+		list_for_each_entry_safe(pkt, temp_pkt,
+					 &xprt_info->pkt_list, list) {
+			list_del(&pkt->list);
+			release_pkt(pkt);
+		}
+		mutex_unlock(&xprt_info->rx_lock_lhb2);
+
+		down_write(&xprt_info_list_lock_lha5);
+		list_del(&xprt_info->list);
+		up_write(&xprt_info_list_lock_lha5);
+
+		msm_ipc_cleanup_routing_table(xprt_info);
+
+		wakeup_source_trash(&xprt_info->ws);
+
+		ipc_router_put_xprt_info_ref(xprt_info);
+		wait_for_completion(&xprt_info->ref_complete);
+
+		xprt->priv = 0;
+		kfree(xprt_info);
+	}
+}
+
+
+struct msm_ipc_router_xprt_work {
+	struct msm_ipc_router_xprt *xprt;
+	struct work_struct work;
+};
+
+static void xprt_open_worker(struct work_struct *work)
+{
+	struct msm_ipc_router_xprt_work *xprt_work =
+		container_of(work, struct msm_ipc_router_xprt_work, work);
+
+	msm_ipc_router_add_xprt(xprt_work->xprt);
+	kfree(xprt_work);
+}
+
+static void xprt_close_worker(struct work_struct *work)
+{
+	struct msm_ipc_router_xprt_work *xprt_work =
+		container_of(work, struct msm_ipc_router_xprt_work, work);
+
+	msm_ipc_router_remove_xprt(xprt_work->xprt);
+	xprt_work->xprt->sft_close_done(xprt_work->xprt);
+	kfree(xprt_work);
+}
+
+void msm_ipc_router_xprt_notify(struct msm_ipc_router_xprt *xprt,
+				unsigned event,
+				void *data)
+{
+	struct msm_ipc_router_xprt_info *xprt_info = xprt->priv;
+	struct msm_ipc_router_xprt_work *xprt_work;
+	struct rr_packet *pkt;
+	int ret;
+
+	ret = ipc_router_core_init();
+	if (ret < 0) {
+		IPC_RTR_ERR("%s: Error %d initializing IPC Router\n",
+			    __func__, ret);
+		return;
+	}
+
+	switch (event) {
+	case IPC_ROUTER_XPRT_EVENT_OPEN:
+		xprt_work = kmalloc(sizeof(struct msm_ipc_router_xprt_work),
+				GFP_ATOMIC);
+		if (xprt_work) {
+			xprt_work->xprt = xprt;
+			INIT_WORK(&xprt_work->work, xprt_open_worker);
+			queue_work(msm_ipc_router_workqueue, &xprt_work->work);
+		} else {
+			IPC_RTR_ERR(
+			"%s: malloc failure - Couldn't notify OPEN event",
+				__func__);
+		}
+		break;
+
+	case IPC_ROUTER_XPRT_EVENT_CLOSE:
+		xprt_work = kmalloc(sizeof(struct msm_ipc_router_xprt_work),
+				GFP_ATOMIC);
+		if (xprt_work) {
+			xprt_work->xprt = xprt;
+			INIT_WORK(&xprt_work->work, xprt_close_worker);
+			queue_work(msm_ipc_router_workqueue, &xprt_work->work);
+		} else {
+			IPC_RTR_ERR(
+			"%s: malloc failure - Couldn't notify CLOSE event",
+				__func__);
+		}
+		break;
+	}
+
+	if (!data)
+		return;
+
+	while (!xprt_info) {
+		msleep(100);
+		xprt_info = xprt->priv;
+	}
+
+	pkt = clone_pkt((struct rr_packet *)data);
+	if (!pkt)
+		return;
+
+	pkt->ws_need = false;
+	mutex_lock(&xprt_info->rx_lock_lhb2);
+	list_add_tail(&pkt->list, &xprt_info->pkt_list);
+	if (!xprt_info->dynamic_ws) {
+		__pm_stay_awake(&xprt_info->ws);
+		pkt->ws_need = true;
+	} else {
+		if (is_wakeup_source_allowed) {
+			__pm_stay_awake(&xprt_info->ws);
+			pkt->ws_need = true;
+		}
+	}
+	mutex_unlock(&xprt_info->rx_lock_lhb2);
+	queue_work(xprt_info->workqueue, &xprt_info->read_data);
+}
+
+/**
+ * parse_devicetree() - parse device tree binding
+ *
+ * @node: pointer to device tree node
+ *
+ * @return: 0 on success, -ENODEV on failure.
+ */
+static int parse_devicetree(struct device_node *node)
+{
+	char *key;
+	const char *peripheral = NULL;
+
+	key = "qcom,default-peripheral";
+	peripheral = of_get_property(node, key, NULL);
+	if (peripheral)
+		strlcpy(default_peripheral, peripheral, PIL_SUBSYSTEM_NAME_LEN);
+
+	return 0;
+}
+
+/**
+ * ipc_router_probe() - Probe the IPC Router
+ *
+ * @pdev: Platform device corresponding to IPC Router.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying device tree driver registers
+ * a platform device, mapped to IPC Router.
+ */
+static int ipc_router_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	if (pdev && pdev->dev.of_node) {
+		ret = parse_devicetree(pdev->dev.of_node);
+		if (ret)
+			IPC_RTR_ERR("%s: Failed to parse device tree\n",
+				    __func__);
+	}
+	return ret;
+}
+
+static struct of_device_id ipc_router_match_table[] = {
+	{ .compatible = "qcom,ipc_router" },
+	{},
+};
+
+static struct platform_driver ipc_router_driver = {
+	.probe = ipc_router_probe,
+	.driver = {
+		.name = MODULE_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = ipc_router_match_table,
+	 },
+};
+
+/**
+ * ipc_router_core_init() - Initialize all IPC Router core data structures
+ *
+ * Return: 0 on Success or Standard error code otherwise.
+ *
+ * This function only initializes all the core data structures to the IPC Router
+ * module. The remaining initialization is done inside msm_ipc_router_init().
+ */
+static int ipc_router_core_init(void)
+{
+	int i;
+	int ret;
+	struct msm_ipc_routing_table_entry *rt_entry;
+
+	mutex_lock(&ipc_router_init_lock);
+	if (likely(is_ipc_router_inited)) {
+		mutex_unlock(&ipc_router_init_lock);
+		return 0;
+	}
+
+	debugfs_init();
+
+	for (i = 0; i < SRV_HASH_SIZE; i++)
+		INIT_LIST_HEAD(&server_list[i]);
+
+	for (i = 0; i < LP_HASH_SIZE; i++)
+		INIT_LIST_HEAD(&local_ports[i]);
+
+	down_write(&routing_table_lock_lha3);
+	if (!routing_table_inited) {
+		init_routing_table();
+		routing_table_inited = 1;
+	}
+	up_write(&routing_table_lock_lha3);
+	rt_entry = create_routing_table_entry(IPC_ROUTER_NID_LOCAL, NULL);
+	kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+
+	msm_ipc_router_workqueue =
+		create_singlethread_workqueue("msm_ipc_router");
+	if (!msm_ipc_router_workqueue) {
+		mutex_unlock(&ipc_router_init_lock);
+		return -ENOMEM;
+	}
+
+	ret = msm_ipc_router_security_init();
+	if (ret < 0)
+		IPC_RTR_ERR("%s: Security Init failed\n", __func__);
+	else
+		is_ipc_router_inited = true;
+	mutex_unlock(&ipc_router_init_lock);
+
+	return ret;
+}
+
+static int msm_ipc_router_init(void)
+{
+	int ret;
+
+	ret = ipc_router_core_init();
+	if (ret < 0)
+		return ret;
+
+	ret = platform_driver_register(&ipc_router_driver);
+	if (ret)
+		IPC_RTR_ERR(
+		"%s: ipc_router_driver register failed %d\n", __func__, ret);
+
+	ret = msm_ipc_router_init_sockets();
+	if (ret < 0)
+		IPC_RTR_ERR("%s: Init sockets failed\n", __func__);
+
+	ipc_router_log_ctx_init();
+	return ret;
+}
+
+module_init(msm_ipc_router_init);
+MODULE_DESCRIPTION("MSM IPC Router");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/net/ipc_router./ipc_router_private.h linux-4.4.115-fbx/net/ipc_router/ipc_router_private.h
--- linux-4.4.115-fbx/net/ipc_router./ipc_router_private.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/net/ipc_router/ipc_router_private.h	2019-01-22 16:16:28.947295705 +0100
@@ -0,0 +1,150 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPC_ROUTER_PRIVATE_H
+#define _IPC_ROUTER_PRIVATE_H
+
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/msm_ipc.h>
+#include <linux/ipc_router.h>
+#include <linux/ipc_router_xprt.h>
+
+#include <net/sock.h>
+
+/* definitions for the R2R wire protcol */
+#define IPC_ROUTER_V1		1
+/*
+ * Ambiguous definition but will enable multiplexing IPC_ROUTER_V2 packets
+ * with an existing alternate transport in user-space, if needed.
+ */
+#define IPC_ROUTER_V2		3
+#define IPC_ROUTER_VER_BITMASK ((BIT(IPC_ROUTER_V1)) | (BIT(IPC_ROUTER_V2)))
+#define IPC_ROUTER_HELLO_MAGIC 0xE110
+#define IPC_ROUTER_CHECKSUM_MASK 0xFFFF
+
+#define IPC_ROUTER_ADDRESS			0x0000FFFF
+
+#define IPC_ROUTER_NID_LOCAL			1
+#define MAX_IPC_PKT_SIZE 66000
+
+#define IPC_ROUTER_LOW_RX_QUOTA		5
+#define IPC_ROUTER_HIGH_RX_QUOTA	10
+
+#define IPC_ROUTER_INFINITY -1
+#define DEFAULT_RCV_TIMEO IPC_ROUTER_INFINITY
+#define DEFAULT_SND_TIMEO IPC_ROUTER_INFINITY
+
+#define ALIGN_SIZE(x) ((4 - ((x) & 3)) & 3)
+
+#define ALL_SERVICE 0xFFFFFFFF
+#define ALL_INSTANCE 0xFFFFFFFF
+
+#define CONTROL_FLAG_CONFIRM_RX 0x1
+#define CONTROL_FLAG_OPT_HDR 0x2
+
+enum {
+	CLIENT_PORT,
+	SERVER_PORT,
+	CONTROL_PORT,
+	IRSC_PORT,
+};
+
+enum {
+	NULL_MODE,
+	SINGLE_LINK_MODE,
+	MULTI_LINK_MODE,
+};
+
+enum {
+	CONNECTION_RESET = -1,
+	NOT_CONNECTED,
+	CONNECTED,
+};
+
+struct msm_ipc_sock {
+	struct sock sk;
+	struct msm_ipc_port *port;
+	void *default_node_vote_info;
+};
+
+/**
+ * msm_ipc_router_create_raw_port() - Create an IPC Router port
+ * @endpoint: User-space space socket information to be cached.
+ * @notify: Function to notify incoming events on the port.
+ *   @event: Event ID to be handled.
+ *   @oob_data: Any out-of-band data associated with the event.
+ *   @oob_data_len: Size of the out-of-band data, if valid.
+ *   @priv: Private data registered during the port creation.
+ * @priv: Private Data to be passed during the event notification.
+ *
+ * @return: Valid pointer to port on success, NULL on failure.
+ *
+ * This function is used to create an IPC Router port. The port is used for
+ * communication locally or outside the subsystem.
+ */
+struct msm_ipc_port *msm_ipc_router_create_raw_port(void *endpoint,
+	void (*notify)(unsigned event, void *oob_data,
+		       size_t oob_data_len, void *priv),
+	void *priv);
+int msm_ipc_router_send_to(struct msm_ipc_port *src,
+			   struct sk_buff_head *data,
+			   struct msm_ipc_addr *dest,
+			   long timeout);
+int msm_ipc_router_read(struct msm_ipc_port *port_ptr,
+			struct rr_packet **pkt,
+			size_t buf_len);
+
+int msm_ipc_router_recv_from(struct msm_ipc_port *port_ptr,
+		      struct rr_packet **pkt,
+		      struct msm_ipc_addr *src_addr,
+		      long timeout);
+int msm_ipc_router_register_server(struct msm_ipc_port *server_port,
+			    struct msm_ipc_addr *name);
+int msm_ipc_router_unregister_server(struct msm_ipc_port *server_port);
+
+int msm_ipc_router_init_sockets(void);
+void msm_ipc_router_exit_sockets(void);
+
+void msm_ipc_sync_sec_rule(uint32_t service, uint32_t instance, void *rule);
+
+void msm_ipc_sync_default_sec_rule(void *rule);
+
+int msm_ipc_router_rx_data_wait(struct msm_ipc_port *port_ptr, long timeout);
+
+void msm_ipc_router_free_skb(struct sk_buff_head *skb_head);
+
+/**
+ * ipc_router_set_conn() - Set the connection by initializing dest address
+ * @port_ptr: Local port in which the connection has to be set.
+ * @addr: Destination address of the connection.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ */
+int ipc_router_set_conn(struct msm_ipc_port *port_ptr,
+			struct msm_ipc_addr *addr);
+
+void *msm_ipc_load_default_node(void);
+
+void msm_ipc_unload_default_node(void *pil);
+
+/**
+ * ipc_router_dummy_write_space() - Dummy write space available callback
+ * @sk:	Socket pointer for which the callback is called.
+ */
+void ipc_router_dummy_write_space(struct sock *sk);
+
+#endif
diff -Nruw linux-4.4.115-fbx/net/ipc_router./ipc_router_security.c linux-4.4.115-fbx/net/ipc_router/ipc_router_security.c
--- linux-4.4.115-fbx/net/ipc_router./ipc_router_security.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/net/ipc_router/ipc_router_security.c	2019-01-22 16:16:28.947295705 +0100
@@ -0,0 +1,300 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/net.h>
+#include <linux/socket.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/fcntl.h>
+#include <linux/gfp.h>
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+#include <linux/msm_ipc.h>
+#include <linux/rwsem.h>
+#include <linux/uaccess.h>
+
+#include <net/sock.h>
+#include "ipc_router_private.h"
+#include "ipc_router_security.h"
+
+#define SEC_RULES_HASH_SZ 32
+
+#ifndef SIZE_MAX
+#define SIZE_MAX ((size_t)-1)
+#endif
+
+struct security_rule {
+	struct list_head list;
+	uint32_t service_id;
+	uint32_t instance_id;
+	unsigned reserved;
+	int num_group_info;
+	kgid_t *group_id;
+};
+
+static DECLARE_RWSEM(security_rules_lock_lha4);
+static struct list_head security_rules[SEC_RULES_HASH_SZ];
+
+/**
+ * check_permisions() - Check whether the process has permissions to
+ *                      create an interface handle with IPC Router
+ *
+ * @return: true if the process has permissions, else false.
+ */
+int check_permissions(void)
+{
+	int rc = 0;
+	if (capable(CAP_NET_RAW) || capable(CAP_NET_BIND_SERVICE))
+		rc = 1;
+	return rc;
+}
+EXPORT_SYMBOL(check_permissions);
+
+/**
+ * msm_ipc_config_sec_rules() - Add a security rule to the database
+ * @arg: Pointer to the buffer containing the rule.
+ *
+ * @return: 0 if successfully added, < 0 for error.
+ *
+ * A security rule is defined using <Service_ID: Group_ID> tuple. The rule
+ * implies that a user-space process in order to send a QMI message to
+ * service Service_ID should belong to the Linux group Group_ID.
+ */
+int msm_ipc_config_sec_rules(void *arg)
+{
+	struct config_sec_rules_args sec_rules_arg;
+	struct security_rule *rule, *temp_rule;
+	int key;
+	size_t kgroup_info_sz;
+	int ret;
+	size_t group_info_sz;
+	gid_t *group_id = NULL;
+	int loop;
+
+	if (!uid_eq(current_euid(), GLOBAL_ROOT_UID))
+		return -EPERM;
+
+	ret = copy_from_user(&sec_rules_arg, (void *)arg,
+			     sizeof(sec_rules_arg));
+	if (ret)
+		return -EFAULT;
+
+	if (sec_rules_arg.num_group_info <= 0)
+		return -EINVAL;
+
+	if (sec_rules_arg.num_group_info > (SIZE_MAX / sizeof(gid_t))) {
+		pr_err("%s: Integer Overflow %zu * %d\n", __func__,
+			sizeof(gid_t), sec_rules_arg.num_group_info);
+		return -EINVAL;
+	}
+	group_info_sz = sec_rules_arg.num_group_info * sizeof(gid_t);
+
+	if (sec_rules_arg.num_group_info > (SIZE_MAX / sizeof(kgid_t))) {
+		pr_err("%s: Integer Overflow %zu * %d\n", __func__,
+			sizeof(kgid_t), sec_rules_arg.num_group_info);
+		return -EINVAL;
+	}
+	kgroup_info_sz = sec_rules_arg.num_group_info * sizeof(kgid_t);
+
+	rule = kzalloc(sizeof(struct security_rule), GFP_KERNEL);
+	if (!rule) {
+		pr_err("%s: security_rule alloc failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	rule->group_id = kzalloc(kgroup_info_sz, GFP_KERNEL);
+	if (!rule->group_id) {
+		pr_err("%s: kgroup_id alloc failed\n", __func__);
+		kfree(rule);
+		return -ENOMEM;
+	}
+
+	group_id = kzalloc(group_info_sz, GFP_KERNEL);
+	if (!group_id) {
+		pr_err("%s: group_id alloc failed\n", __func__);
+		kfree(rule->group_id);
+		kfree(rule);
+		return -ENOMEM;
+	}
+
+	rule->service_id = sec_rules_arg.service_id;
+	rule->instance_id = sec_rules_arg.instance_id;
+	rule->reserved = sec_rules_arg.reserved;
+	rule->num_group_info = sec_rules_arg.num_group_info;
+	ret = copy_from_user(group_id, ((void *)(arg + sizeof(sec_rules_arg))),
+			     group_info_sz);
+	if (ret) {
+		kfree(group_id);
+		kfree(rule->group_id);
+		kfree(rule);
+		return -EFAULT;
+	}
+	for (loop = 0; loop < rule->num_group_info; loop++)
+		rule->group_id[loop] = make_kgid(current_user_ns(),
+						 group_id[loop]);
+	kfree(group_id);
+
+	key = rule->service_id & (SEC_RULES_HASH_SZ - 1);
+	down_write(&security_rules_lock_lha4);
+	if (rule->service_id == ALL_SERVICE) {
+		temp_rule = list_first_entry(&security_rules[key],
+					     struct security_rule, list);
+		list_del(&temp_rule->list);
+		kfree(temp_rule->group_id);
+		kfree(temp_rule);
+	}
+	list_add_tail(&rule->list, &security_rules[key]);
+	up_write(&security_rules_lock_lha4);
+
+	if (rule->service_id == ALL_SERVICE)
+		msm_ipc_sync_default_sec_rule((void *)rule);
+	else
+		msm_ipc_sync_sec_rule(rule->service_id, rule->instance_id,
+				      (void *)rule);
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_ipc_config_sec_rules);
+
+/**
+ * msm_ipc_add_default_rule() - Add default security rule
+ *
+ * @return: 0 on success, < 0 on error/
+ *
+ * This function is used to ensure the basic security, if there is no
+ * security rule defined for a service. It can be overwritten by the
+ * default security rule from user-space script.
+ */
+static int msm_ipc_add_default_rule(void)
+{
+	struct security_rule *rule;
+	int key;
+
+	rule = kzalloc(sizeof(struct security_rule), GFP_KERNEL);
+	if (!rule) {
+		pr_err("%s: security_rule alloc failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	rule->service_id = ALL_SERVICE;
+	rule->instance_id = ALL_INSTANCE;
+	rule->num_group_info = 0;
+	down_write(&security_rules_lock_lha4);
+	key = (ALL_SERVICE & (SEC_RULES_HASH_SZ - 1));
+	list_add_tail(&rule->list, &security_rules[key]);
+	up_write(&security_rules_lock_lha4);
+	return 0;
+}
+
+/**
+ * msm_ipc_get_security_rule() - Get the security rule corresponding to a
+ *                               service
+ * @service_id: Service ID for which the rule has to be got.
+ * @instance_id: Instance ID for which the rule has to be got.
+ *
+ * @return: Returns the rule info on success, NULL on error.
+ *
+ * This function is used when the service comes up and gets registered with
+ * the IPC Router.
+ */
+void *msm_ipc_get_security_rule(uint32_t service_id, uint32_t instance_id)
+{
+	int key;
+	struct security_rule *rule;
+
+	key = (service_id & (SEC_RULES_HASH_SZ - 1));
+	down_read(&security_rules_lock_lha4);
+	/* Return the rule for a specific <service:instance>, if found. */
+	list_for_each_entry(rule, &security_rules[key], list) {
+		if ((rule->service_id == service_id) &&
+		    (rule->instance_id == instance_id)) {
+			up_read(&security_rules_lock_lha4);
+			return (void *)rule;
+		}
+	}
+
+	/* Return the rule for a specific service, if found. */
+	list_for_each_entry(rule, &security_rules[key], list) {
+		if ((rule->service_id == service_id) &&
+		    (rule->instance_id == ALL_INSTANCE)) {
+			up_read(&security_rules_lock_lha4);
+			return (void *)rule;
+		}
+	}
+
+	/* Return the default rule, if no rule defined for a service. */
+	key = (ALL_SERVICE & (SEC_RULES_HASH_SZ - 1));
+	list_for_each_entry(rule, &security_rules[key], list) {
+		if ((rule->service_id == ALL_SERVICE) &&
+		    (rule->instance_id == ALL_INSTANCE)) {
+			up_read(&security_rules_lock_lha4);
+			return (void *)rule;
+		}
+	}
+	up_read(&security_rules_lock_lha4);
+	return NULL;
+}
+EXPORT_SYMBOL(msm_ipc_get_security_rule);
+
+/**
+ * msm_ipc_check_send_permissions() - Check if the sendng process has
+ *                                    permissions specified as per the rule
+ * @data: Security rule to be checked.
+ *
+ * @return: true if the process has permissions, else false.
+ *
+ * This function is used to check if the current executing process has
+ * permissions to send message to the remote entity. The security rule
+ * corresponding to the remote entity is specified by "data" parameter
+ */
+int msm_ipc_check_send_permissions(void *data)
+{
+	int i;
+	struct security_rule *rule = (struct security_rule *)data;
+
+	/* Source/Sender is Root user */
+	if (uid_eq(current_euid(), GLOBAL_ROOT_UID))
+		return 1;
+
+	/* Destination has no rules defined, possibly a client. */
+	if (!rule)
+		return 1;
+
+	for (i = 0; i < rule->num_group_info; i++) {
+		if (!gid_valid(rule->group_id[i]))
+			continue;
+		if (in_egroup_p(rule->group_id[i]))
+			return 1;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(msm_ipc_check_send_permissions);
+
+/**
+ * msm_ipc_router_security_init() - Initialize the security rule database
+ *
+ * @return: 0 if successful, < 0 for error.
+ */
+int msm_ipc_router_security_init(void)
+{
+	int i;
+
+	for (i = 0; i < SEC_RULES_HASH_SZ; i++)
+		INIT_LIST_HEAD(&security_rules[i]);
+
+	msm_ipc_add_default_rule();
+	return 0;
+}
+EXPORT_SYMBOL(msm_ipc_router_security_init);
diff -Nruw linux-4.4.115-fbx/net/ipc_router./ipc_router_security.h linux-4.4.115-fbx/net/ipc_router/ipc_router_security.h
--- linux-4.4.115-fbx/net/ipc_router./ipc_router_security.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/net/ipc_router/ipc_router_security.h	2019-01-22 16:16:28.947295705 +0100
@@ -0,0 +1,104 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPC_ROUTER_SECURITY_H
+#define _IPC_ROUTER_SECURITY_H
+
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/errno.h>
+
+#ifdef CONFIG_IPC_ROUTER_SECURITY
+
+/**
+ * check_permisions() - Check whether the process has permissions to
+ *                      create an interface handle with IPC Router
+ *
+ * @return: true if the process has permissions, else false.
+ */
+int check_permissions(void);
+
+/**
+ * msm_ipc_config_sec_rules() - Add a security rule to the database
+ * @arg: Pointer to the buffer containing the rule.
+ *
+ * @return: 0 if successfully added, < 0 for error.
+ *
+ * A security rule is defined using <Service_ID: Group_ID> tuple. The rule
+ * implies that a user-space process in order to send a QMI message to
+ * service Service_ID should belong to the Linux group Group_ID.
+ */
+int msm_ipc_config_sec_rules(void *arg);
+
+/**
+ * msm_ipc_get_security_rule() - Get the security rule corresponding to a
+ *                               service
+ * @service_id: Service ID for which the rule has to be got.
+ * @instance_id: Instance ID for which the rule has to be got.
+ *
+ * @return: Returns the rule info on success, NULL on error.
+ *
+ * This function is used when the service comes up and gets registered with
+ * the IPC Router.
+ */
+void *msm_ipc_get_security_rule(uint32_t service_id, uint32_t instance_id);
+
+/**
+ * msm_ipc_check_send_permissions() - Check if the sendng process has
+ *                                    permissions specified as per the rule
+ * @data: Security rule to be checked.
+ *
+ * @return: true if the process has permissions, else false.
+ *
+ * This function is used to check if the current executing process has
+ * permissions to send message to the remote entity. The security rule
+ * corresponding to the remote entity is specified by "data" parameter
+ */
+int msm_ipc_check_send_permissions(void *data);
+
+/**
+ * msm_ipc_router_security_init() - Initialize the security rule database
+ *
+ * @return: 0 if successful, < 0 for error.
+ */
+int msm_ipc_router_security_init(void);
+
+#else
+
+static inline int check_permissions(void)
+{
+	return 1;
+}
+
+static inline int msm_ipc_config_sec_rules(void *arg)
+{
+	return -ENODEV;
+}
+
+static inline void *msm_ipc_get_security_rule(uint32_t service_id,
+					      uint32_t instance_id)
+{
+	return NULL;
+}
+
+static inline int msm_ipc_check_send_permissions(void *data)
+{
+	return 1;
+}
+
+static inline int msm_ipc_router_security_init(void)
+{
+	return 0;
+}
+
+#endif
+#endif
diff -Nruw linux-4.4.115-fbx/net/ipc_router./ipc_router_socket.c linux-4.4.115-fbx/net/ipc_router/ipc_router_socket.c
--- linux-4.4.115-fbx/net/ipc_router./ipc_router_socket.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/net/ipc_router/ipc_router_socket.c	2019-01-22 16:16:28.947295705 +0100
@@ -0,0 +1,691 @@
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/net.h>
+#include <linux/socket.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/fcntl.h>
+#include <linux/gfp.h>
+#include <linux/msm_ipc.h>
+#include <linux/sched.h>
+#include <linux/thread_info.h>
+#include <linux/slab.h>
+#include <linux/kmemleak.h>
+#include <linux/ipc_logging.h>
+#include <linux/string.h>
+#include <linux/atomic.h>
+#include <linux/ipc_router.h>
+
+#include <net/sock.h>
+
+#include "ipc_router_private.h"
+#include "ipc_router_security.h"
+
+#define msm_ipc_sk(sk) ((struct msm_ipc_sock *)(sk))
+#define msm_ipc_sk_port(sk) ((struct msm_ipc_port *)(msm_ipc_sk(sk)->port))
+
+#ifndef SIZE_MAX
+#define SIZE_MAX ((size_t)-1)
+#endif
+
+static int sockets_enabled;
+static struct proto msm_ipc_proto;
+static const struct proto_ops msm_ipc_proto_ops;
+static RAW_NOTIFIER_HEAD(ipcrtr_af_init_chain);
+static DEFINE_MUTEX(ipcrtr_af_init_lock);
+
+static struct sk_buff_head *msm_ipc_router_build_msg(struct msghdr *m,
+					  size_t total_len)
+{
+	struct sk_buff_head *msg_head;
+	struct sk_buff *msg;
+	int first = 1;
+	int last = 1;
+	size_t data_size = 0;
+	size_t alloc_size, align_size;
+	void *data;
+	size_t total_copied_size = 0, copied_size;
+
+	if (iov_iter_count(&m->msg_iter) == total_len)
+		data_size = total_len;
+
+	if (!data_size)
+		return NULL;
+	align_size = ALIGN_SIZE(data_size);
+
+	msg_head = kmalloc(sizeof(struct sk_buff_head), GFP_KERNEL);
+	if (!msg_head) {
+		IPC_RTR_ERR("%s: cannot allocate skb_head\n", __func__);
+		return NULL;
+	}
+	skb_queue_head_init(msg_head);
+
+	while (total_copied_size < total_len) {
+		alloc_size = data_size;
+		if (first)
+			alloc_size += IPC_ROUTER_HDR_SIZE;
+		if (last)
+			alloc_size += align_size;
+
+		msg = alloc_skb(alloc_size, GFP_KERNEL);
+		if (!msg) {
+			if (alloc_size <= (PAGE_SIZE/2)) {
+				IPC_RTR_ERR("%s: cannot allocated skb\n",
+					__func__);
+				goto msg_build_failure;
+			}
+			data_size = data_size / 2;
+			last = 0;
+			continue;
+		}
+
+		if (first) {
+			skb_reserve(msg, IPC_ROUTER_HDR_SIZE);
+			first = 0;
+		}
+
+		data = skb_put(msg, data_size);
+		copied_size = copy_from_iter(msg->data, data_size, &m->msg_iter);
+		if (copied_size != data_size) {
+			IPC_RTR_ERR("%s: copy_from_iter failed %zu %zu %zu\n", __func__, alloc_size, data_size, copied_size);
+			kfree_skb(msg);
+			goto msg_build_failure;
+		}
+		skb_queue_tail(msg_head, msg);
+		total_copied_size += data_size;
+		data_size = total_len - total_copied_size;
+		last = 1;
+	}
+	return msg_head;
+
+msg_build_failure:
+	while (!skb_queue_empty(msg_head)) {
+		msg = skb_dequeue(msg_head);
+		kfree_skb(msg);
+	}
+	kfree(msg_head);
+	return NULL;
+}
+
+static int msm_ipc_router_extract_msg(struct msghdr *m,
+				      struct rr_packet *pkt)
+{
+	struct sockaddr_msm_ipc *addr;
+	struct rr_header_v1 *hdr;
+	struct sk_buff *temp;
+	union rr_control_msg *ctl_msg;
+	int offset = 0, data_len = 0, copy_len, copied_len;
+
+	if (!m || !pkt) {
+		IPC_RTR_ERR("%s: Invalid pointers passed\n", __func__);
+		return -EINVAL;
+	}
+	addr = (struct sockaddr_msm_ipc *)m->msg_name;
+
+	hdr = &(pkt->hdr);
+	if (addr && (hdr->type == IPC_ROUTER_CTRL_CMD_RESUME_TX)) {
+		temp = skb_peek(pkt->pkt_fragment_q);
+		if (!temp || !temp->data) {
+			IPC_RTR_ERR("%s: Invalid skb\n", __func__);
+			return -EINVAL;
+		}
+		ctl_msg = (union rr_control_msg *)(temp->data);
+		memset(addr, 0x0, sizeof(*addr));
+		addr->family = AF_MSM_IPC;
+		addr->address.addrtype = MSM_IPC_ADDR_ID;
+		addr->address.addr.port_addr.node_id = ctl_msg->cli.node_id;
+		addr->address.addr.port_addr.port_id = ctl_msg->cli.port_id;
+		m->msg_namelen = sizeof(struct sockaddr_msm_ipc);
+		return offset;
+	}
+	if (addr && (hdr->type == IPC_ROUTER_CTRL_CMD_DATA)) {
+		memset(addr, 0x0, sizeof(*addr));
+		addr->family = AF_MSM_IPC;
+		addr->address.addrtype = MSM_IPC_ADDR_ID;
+		addr->address.addr.port_addr.node_id = hdr->src_node_id;
+		addr->address.addr.port_addr.port_id = hdr->src_port_id;
+		m->msg_namelen = sizeof(struct sockaddr_msm_ipc);
+	}
+
+	data_len = hdr->size;
+	skb_queue_walk(pkt->pkt_fragment_q, temp) {
+		copy_len = data_len < temp->len ? data_len : temp->len;
+		copied_len = copy_to_iter(temp->data, copy_len, &m->msg_iter);
+		if (copy_len != copied_len) {
+			IPC_RTR_ERR("%s: Copy to user failed\n", __func__);
+			return -EFAULT;
+		}
+		offset += copy_len;
+		data_len -= copy_len;
+	}
+	return offset;
+}
+
+static int msm_ipc_router_create(struct net *net,
+				 struct socket *sock,
+				 int protocol,
+				 int kern)
+{
+	struct sock *sk;
+	struct msm_ipc_port *port_ptr;
+
+	if (unlikely(protocol != 0)) {
+		IPC_RTR_ERR("%s: Protocol not supported\n", __func__);
+		return -EPROTONOSUPPORT;
+	}
+
+	switch (sock->type) {
+	case SOCK_DGRAM:
+		break;
+	default:
+		IPC_RTR_ERR("%s: Protocol type not supported\n", __func__);
+		return -EPROTOTYPE;
+	}
+
+	sk = sk_alloc(net, AF_MSM_IPC, GFP_KERNEL, &msm_ipc_proto, kern);
+	if (!sk) {
+		IPC_RTR_ERR("%s: sk_alloc failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	sock->ops = &msm_ipc_proto_ops;
+	sock_init_data(sock, sk);
+	sk->sk_data_ready = NULL;
+	sk->sk_write_space = ipc_router_dummy_write_space;
+	sk->sk_rcvtimeo = DEFAULT_RCV_TIMEO;
+	sk->sk_sndtimeo = DEFAULT_SND_TIMEO;
+
+	port_ptr = msm_ipc_router_create_raw_port(sk, NULL, NULL);
+	if (!port_ptr) {
+		IPC_RTR_ERR("%s: port_ptr alloc failed\n", __func__);
+		sock_put(sk);
+		sock->sk = NULL;
+		return -ENOMEM;
+	}
+
+	port_ptr->check_send_permissions = msm_ipc_check_send_permissions;
+	msm_ipc_sk(sk)->port = port_ptr;
+	msm_ipc_sk(sk)->default_node_vote_info = NULL;
+
+	return 0;
+}
+
+int msm_ipc_router_bind(struct socket *sock, struct sockaddr *uaddr,
+			       int uaddr_len)
+{
+	struct sockaddr_msm_ipc *addr = (struct sockaddr_msm_ipc *)uaddr;
+	struct sock *sk = sock->sk;
+	struct msm_ipc_port *port_ptr;
+	int ret;
+
+	if (!sk)
+		return -EINVAL;
+
+	if (!check_permissions()) {
+		IPC_RTR_ERR("%s: %s Do not have permissions\n",
+			__func__, current->comm);
+		return -EPERM;
+	}
+
+	if (!uaddr_len) {
+		IPC_RTR_ERR("%s: Invalid address length\n", __func__);
+		return -EINVAL;
+	}
+
+	if (addr->family != AF_MSM_IPC) {
+		IPC_RTR_ERR("%s: Address family is incorrect\n", __func__);
+		return -EAFNOSUPPORT;
+	}
+
+	if (addr->address.addrtype != MSM_IPC_ADDR_NAME) {
+		IPC_RTR_ERR("%s: Address type is incorrect\n", __func__);
+		return -EINVAL;
+	}
+
+	port_ptr = msm_ipc_sk_port(sk);
+	if (!port_ptr)
+		return -ENODEV;
+
+	if (!msm_ipc_sk(sk)->default_node_vote_info)
+		msm_ipc_sk(sk)->default_node_vote_info =
+			msm_ipc_load_default_node();
+	lock_sock(sk);
+
+	ret = msm_ipc_router_register_server(port_ptr, &addr->address);
+
+	release_sock(sk);
+	return ret;
+}
+
+static int ipc_router_connect(struct socket *sock, struct sockaddr *uaddr,
+			      int uaddr_len, int flags)
+{
+	struct sockaddr_msm_ipc *addr = (struct sockaddr_msm_ipc *)uaddr;
+	struct sock *sk = sock->sk;
+	struct msm_ipc_port *port_ptr;
+	int ret;
+
+	if (!sk)
+		return -EINVAL;
+
+	if (uaddr_len <= 0) {
+		IPC_RTR_ERR("%s: Invalid address length\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!addr) {
+		IPC_RTR_ERR("%s: Invalid address\n", __func__);
+		return -EINVAL;
+	}
+
+	if (addr->family != AF_MSM_IPC) {
+		IPC_RTR_ERR("%s: Address family is incorrect\n", __func__);
+		return -EAFNOSUPPORT;
+	}
+
+	port_ptr = msm_ipc_sk_port(sk);
+	if (!port_ptr)
+		return -ENODEV;
+
+	lock_sock(sk);
+	ret = ipc_router_set_conn(port_ptr, &addr->address);
+	release_sock(sk);
+	return ret;
+}
+
+static int msm_ipc_router_sendmsg(struct socket *sock,
+				  struct msghdr *m, size_t total_len)
+{
+	struct sock *sk = sock->sk;
+	struct msm_ipc_port *port_ptr = msm_ipc_sk_port(sk);
+	struct sockaddr_msm_ipc *dest = (struct sockaddr_msm_ipc *)m->msg_name;
+	struct sk_buff_head *msg;
+	int ret;
+	struct msm_ipc_addr dest_addr = {0};
+	long timeout;
+
+	if (dest) {
+		if (m->msg_namelen < sizeof(*dest) ||
+		    dest->family != AF_MSM_IPC)
+			return -EINVAL;
+		memcpy(&dest_addr, &dest->address, sizeof(dest_addr));
+	} else {
+		if (port_ptr->conn_status == NOT_CONNECTED) {
+			return -EDESTADDRREQ;
+		} else if (port_ptr->conn_status < CONNECTION_RESET) {
+			return -ENETRESET;
+		} else {
+			memcpy(&dest_addr.addr.port_addr, &port_ptr->dest_addr,
+				sizeof(struct msm_ipc_port_addr));
+			dest_addr.addrtype = MSM_IPC_ADDR_ID;
+		}
+	}
+
+	if (total_len > MAX_IPC_PKT_SIZE)
+		return -EINVAL;
+
+	lock_sock(sk);
+	timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
+	msg = msm_ipc_router_build_msg(m, total_len);
+	if (!msg) {
+		IPC_RTR_ERR("%s: Msg build failure\n", __func__);
+		ret = -ENOMEM;
+		goto out_sendmsg;
+	}
+	kmemleak_not_leak(msg);
+
+	ret = msm_ipc_router_send_to(port_ptr, msg, &dest_addr, timeout);
+	if (ret != total_len) {
+		if (ret < 0) {
+			if (ret != -EAGAIN)
+				IPC_RTR_ERR("%s: Send_to failure %d\n",
+							__func__, ret);
+			msm_ipc_router_free_skb(msg);
+		} else if (ret >= 0) {
+			ret = -EFAULT;
+		}
+	}
+
+out_sendmsg:
+	release_sock(sk);
+	return ret;
+}
+
+static int msm_ipc_router_recvmsg(struct socket *sock,
+				  struct msghdr *m, size_t buf_len, int flags)
+{
+	struct sock *sk = sock->sk;
+	struct msm_ipc_port *port_ptr = msm_ipc_sk_port(sk);
+	struct rr_packet *pkt;
+	long timeout;
+	int ret;
+
+	lock_sock(sk);
+	if (!buf_len) {
+		if (flags & MSG_PEEK)
+			ret = msm_ipc_router_get_curr_pkt_size(port_ptr);
+		else
+			ret = -EINVAL;
+		release_sock(sk);
+		return ret;
+	}
+	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+
+	ret = msm_ipc_router_rx_data_wait(port_ptr, timeout);
+	if (ret) {
+		release_sock(sk);
+		if (ret == -ENOMSG)
+			m->msg_namelen = 0;
+		return ret;
+	}
+
+	ret = msm_ipc_router_read(port_ptr, &pkt, buf_len);
+	if (ret <= 0 || !pkt) {
+		release_sock(sk);
+		return ret;
+	}
+
+	ret = msm_ipc_router_extract_msg(m, pkt);
+	release_pkt(pkt);
+	release_sock(sk);
+	return ret;
+}
+
+static int msm_ipc_router_ioctl(struct socket *sock,
+				unsigned int cmd, unsigned long arg)
+{
+	struct sock *sk = sock->sk;
+	struct msm_ipc_port *port_ptr;
+	struct server_lookup_args server_arg;
+	struct msm_ipc_server_info *srv_info = NULL;
+	unsigned int n;
+	size_t srv_info_sz = 0;
+	int ret;
+
+	if (!sk)
+		return -EINVAL;
+
+	lock_sock(sk);
+	port_ptr = msm_ipc_sk_port(sock->sk);
+	if (!port_ptr) {
+		release_sock(sk);
+		return -EINVAL;
+	}
+
+	switch (cmd) {
+	case IPC_ROUTER_IOCTL_GET_VERSION:
+		n = IPC_ROUTER_V1;
+		ret = put_user(n, (unsigned int *)arg);
+		break;
+
+	case IPC_ROUTER_IOCTL_GET_MTU:
+		n = (MAX_IPC_PKT_SIZE - IPC_ROUTER_HDR_SIZE);
+		ret = put_user(n, (unsigned int *)arg);
+		break;
+
+	case IPC_ROUTER_IOCTL_GET_CURR_PKT_SIZE:
+		ret = msm_ipc_router_get_curr_pkt_size(port_ptr);
+		break;
+
+	case IPC_ROUTER_IOCTL_LOOKUP_SERVER:
+		if (!msm_ipc_sk(sk)->default_node_vote_info)
+			msm_ipc_sk(sk)->default_node_vote_info =
+				msm_ipc_load_default_node();
+
+		ret = copy_from_user(&server_arg, (void *)arg,
+				     sizeof(server_arg));
+		if (ret) {
+			ret = -EFAULT;
+			break;
+		}
+
+		if (server_arg.num_entries_in_array < 0) {
+			ret = -EINVAL;
+			break;
+		}
+		if (server_arg.num_entries_in_array) {
+			if (server_arg.num_entries_in_array >
+				(SIZE_MAX / sizeof(*srv_info))) {
+				IPC_RTR_ERR("%s: Integer Overflow %zu * %d\n",
+					__func__, sizeof(*srv_info),
+					server_arg.num_entries_in_array);
+				ret = -EINVAL;
+				break;
+			}
+			srv_info_sz = server_arg.num_entries_in_array *
+					sizeof(*srv_info);
+			srv_info = kmalloc(srv_info_sz, GFP_KERNEL);
+			if (!srv_info) {
+				ret = -ENOMEM;
+				break;
+			}
+		}
+		ret = msm_ipc_router_lookup_server_name(&server_arg.port_name,
+				srv_info, server_arg.num_entries_in_array,
+				server_arg.lookup_mask);
+		if (ret < 0) {
+			IPC_RTR_ERR("%s: Server not found\n", __func__);
+			ret = -ENODEV;
+			kfree(srv_info);
+			break;
+		}
+		server_arg.num_entries_found = ret;
+
+		ret = copy_to_user((void *)arg, &server_arg,
+				   sizeof(server_arg));
+
+		n = min(server_arg.num_entries_found,
+			server_arg.num_entries_in_array);
+
+		if (ret == 0 && n) {
+			ret = copy_to_user((void *)(arg + sizeof(server_arg)),
+					   srv_info, n * sizeof(*srv_info));
+		}
+
+		if (ret)
+			ret = -EFAULT;
+		kfree(srv_info);
+		break;
+
+	case IPC_ROUTER_IOCTL_BIND_CONTROL_PORT:
+		ret = msm_ipc_router_bind_control_port(port_ptr);
+		break;
+
+	case IPC_ROUTER_IOCTL_CONFIG_SEC_RULES:
+		ret = msm_ipc_config_sec_rules((void *)arg);
+		if (ret != -EPERM)
+			port_ptr->type = IRSC_PORT;
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+	release_sock(sk);
+	return ret;
+}
+
+static unsigned int msm_ipc_router_poll(struct file *file,
+			struct socket *sock, poll_table *wait)
+{
+	struct sock *sk = sock->sk;
+	struct msm_ipc_port *port_ptr;
+	uint32_t mask = 0;
+
+	if (!sk)
+		return -EINVAL;
+
+	port_ptr = msm_ipc_sk_port(sk);
+	if (!port_ptr)
+		return -EINVAL;
+
+	poll_wait(file, &port_ptr->port_rx_wait_q, wait);
+
+	if (!list_empty(&port_ptr->port_rx_q))
+		mask |= (POLLRDNORM | POLLIN);
+
+	if (port_ptr->conn_status == CONNECTION_RESET)
+		mask |= (POLLHUP | POLLERR);
+
+	return mask;
+}
+
+static int msm_ipc_router_close(struct socket *sock)
+{
+	struct sock *sk = sock->sk;
+	struct msm_ipc_port *port_ptr;
+	int ret;
+
+	if (!sk)
+		return -EINVAL;
+
+	lock_sock(sk);
+	port_ptr = msm_ipc_sk_port(sk);
+	if (!port_ptr) {
+		release_sock(sk);
+		return -EINVAL;
+	}
+	ret = msm_ipc_router_close_port(port_ptr);
+	msm_ipc_unload_default_node(msm_ipc_sk(sk)->default_node_vote_info);
+	release_sock(sk);
+	sock_put(sk);
+	sock->sk = NULL;
+
+	return ret;
+}
+
+/**
+ * register_ipcrtr_af_init_notifier() - Register for ipc router socket
+ *				address family initialization callback
+ * @nb: Notifier block which will be notified when address family is
+ *	initialized.
+ *
+ * Return: 0 on success, standard error code otherwise.
+ */
+int register_ipcrtr_af_init_notifier(struct notifier_block *nb)
+{
+	int ret;
+
+	if (!nb)
+		return -EINVAL;
+	mutex_lock(&ipcrtr_af_init_lock);
+	if (sockets_enabled)
+		nb->notifier_call(nb, IPCRTR_AF_INIT, NULL);
+	ret = raw_notifier_chain_register(&ipcrtr_af_init_chain, nb);
+	mutex_unlock(&ipcrtr_af_init_lock);
+	return ret;
+}
+EXPORT_SYMBOL(register_ipcrtr_af_init_notifier);
+
+/**
+ * unregister_ipcrtr_af_init_notifier() - Unregister for ipc router socket
+ *				address family initialization callback
+ * @nb: Notifier block which will be notified once address family is
+ *	initialized.
+ *
+ * Return: 0 on success, standard error code otherwise.
+ */
+int unregister_ipcrtr_af_init_notifier(struct notifier_block *nb)
+{
+	int ret;
+
+	if (!nb)
+		return -EINVAL;
+	ret = raw_notifier_chain_unregister(&ipcrtr_af_init_chain, nb);
+	return ret;
+}
+EXPORT_SYMBOL(unregister_ipcrtr_af_init_notifier);
+
+static const struct net_proto_family msm_ipc_family_ops = {
+	.owner		= THIS_MODULE,
+	.family		= AF_MSM_IPC,
+	.create		= msm_ipc_router_create
+};
+
+static const struct proto_ops msm_ipc_proto_ops = {
+	.family			= AF_MSM_IPC,
+	.owner			= THIS_MODULE,
+	.release		= msm_ipc_router_close,
+	.bind			= msm_ipc_router_bind,
+	.connect		= ipc_router_connect,
+	.socketpair		= sock_no_socketpair,
+	.accept			= sock_no_accept,
+	.getname		= sock_no_getname,
+	.poll			= msm_ipc_router_poll,
+	.ioctl			= msm_ipc_router_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl		= msm_ipc_router_ioctl,
+#endif
+	.listen			= sock_no_listen,
+	.shutdown		= sock_no_shutdown,
+	.setsockopt		= sock_no_setsockopt,
+	.getsockopt		= sock_no_getsockopt,
+#ifdef CONFIG_COMPAT
+	.compat_setsockopt	= sock_no_setsockopt,
+	.compat_getsockopt	= sock_no_getsockopt,
+#endif
+	.sendmsg		= msm_ipc_router_sendmsg,
+	.recvmsg		= msm_ipc_router_recvmsg,
+	.mmap			= sock_no_mmap,
+	.sendpage		= sock_no_sendpage,
+};
+
+static struct proto msm_ipc_proto = {
+	.name           = "MSM_IPC",
+	.owner          = THIS_MODULE,
+	.obj_size       = sizeof(struct msm_ipc_sock),
+};
+
+int msm_ipc_router_init_sockets(void)
+{
+	int ret;
+
+	ret = proto_register(&msm_ipc_proto, 1);
+	if (ret) {
+		IPC_RTR_ERR("%s: Failed to register MSM_IPC protocol type\n",
+								__func__);
+		goto out_init_sockets;
+	}
+
+	ret = sock_register(&msm_ipc_family_ops);
+	if (ret) {
+		IPC_RTR_ERR("%s: Failed to register MSM_IPC socket type\n",
+								__func__);
+		proto_unregister(&msm_ipc_proto);
+		goto out_init_sockets;
+	}
+
+	mutex_lock(&ipcrtr_af_init_lock);
+	sockets_enabled = 1;
+	raw_notifier_call_chain(&ipcrtr_af_init_chain,
+				IPCRTR_AF_INIT, NULL);
+	mutex_unlock(&ipcrtr_af_init_lock);
+out_init_sockets:
+	return ret;
+}
+
+void msm_ipc_router_exit_sockets(void)
+{
+	if (!sockets_enabled)
+		return;
+
+	sock_unregister(msm_ipc_family_ops.family);
+	proto_unregister(&msm_ipc_proto);
+	mutex_lock(&ipcrtr_af_init_lock);
+	sockets_enabled = 0;
+	raw_notifier_call_chain(&ipcrtr_af_init_chain,
+				IPCRTR_AF_DEINIT, NULL);
+	mutex_unlock(&ipcrtr_af_init_lock);
+}
diff -Nruw linux-4.4.115-fbx/net/ipc_router./Kconfig linux-4.4.115-fbx/net/ipc_router/Kconfig
--- linux-4.4.115-fbx/net/ipc_router./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/net/ipc_router/Kconfig	2019-01-22 16:16:28.947295705 +0100
@@ -0,0 +1,25 @@
+#
+# IPC_ROUTER Configuration
+#
+
+menuconfig IPC_ROUTER
+	bool "IPC Router support"
+	help
+	  IPC Router provides a connectionless message routing service
+	  between multiple modules within a System-on-Chip(SoC). The
+	  communicating entities can run either in the same processor or
+	  in a different processor within the SoC. The IPC Router has been
+	  designed to route messages of any types and support a broader
+	  network of processors.
+
+	  If in doubt, say N.
+
+config IPC_ROUTER_SECURITY
+	depends on IPC_ROUTER
+	bool "IPC Router Security support"
+	help
+	  This feature of IPC Router will enforce security rules
+	  configured by a security script from the user-space. IPC Router
+	  once configured with the security rules will ensure that the
+	  sender of the message to a service belongs to the relevant
+	  Linux group as configured by the security script.
diff -Nruw linux-4.4.115-fbx/net/ipc_router./Makefile linux-4.4.115-fbx/net/ipc_router/Makefile
--- linux-4.4.115-fbx/net/ipc_router./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/net/ipc_router/Makefile	2019-01-22 16:16:28.947295705 +0100
@@ -0,0 +1,7 @@
+#
+# Makefile for the Linux IPC_ROUTER
+#
+
+obj-$(CONFIG_IPC_ROUTER) := ipc_router_core.o
+obj-$(CONFIG_IPC_ROUTER) += ipc_router_socket.o
+obj-$(CONFIG_IPC_ROUTER_SECURITY) += ipc_router_security.o
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/net/ipv4/sysfs_net_ipv4.c	2019-01-22 16:16:28.983296031 +0100
@@ -0,0 +1,88 @@
+/*
+ * net/ipv4/sysfs_net_ipv4.c
+ *
+ * sysfs-based networking knobs (so we can, unlike with sysctl, control perms)
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * Robert Love <rlove@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/init.h>
+#include <net/tcp.h>
+
+#define CREATE_IPV4_FILE(_name, _var) \
+static ssize_t _name##_show(struct kobject *kobj, \
+			    struct kobj_attribute *attr, char *buf) \
+{ \
+	return sprintf(buf, "%d\n", _var); \
+} \
+static ssize_t _name##_store(struct kobject *kobj, \
+			     struct kobj_attribute *attr, \
+			     const char *buf, size_t count) \
+{ \
+	int val, ret; \
+	ret = sscanf(buf, "%d", &val); \
+	if (ret != 1) \
+		return -EINVAL; \
+	if (val < 0) \
+		return -EINVAL; \
+	_var = val; \
+	return count; \
+} \
+static struct kobj_attribute _name##_attr = \
+	__ATTR(_name, 0644, _name##_show, _name##_store)
+
+CREATE_IPV4_FILE(tcp_wmem_min, sysctl_tcp_wmem[0]);
+CREATE_IPV4_FILE(tcp_wmem_def, sysctl_tcp_wmem[1]);
+CREATE_IPV4_FILE(tcp_wmem_max, sysctl_tcp_wmem[2]);
+
+CREATE_IPV4_FILE(tcp_rmem_min, sysctl_tcp_rmem[0]);
+CREATE_IPV4_FILE(tcp_rmem_def, sysctl_tcp_rmem[1]);
+CREATE_IPV4_FILE(tcp_rmem_max, sysctl_tcp_rmem[2]);
+
+static struct attribute *ipv4_attrs[] = {
+	&tcp_wmem_min_attr.attr,
+	&tcp_wmem_def_attr.attr,
+	&tcp_wmem_max_attr.attr,
+	&tcp_rmem_min_attr.attr,
+	&tcp_rmem_def_attr.attr,
+	&tcp_rmem_max_attr.attr,
+	NULL
+};
+
+static struct attribute_group ipv4_attr_group = {
+	.attrs = ipv4_attrs,
+};
+
+static __init int sysfs_ipv4_init(void)
+{
+	struct kobject *ipv4_kobject;
+	int ret;
+
+	ipv4_kobject = kobject_create_and_add("ipv4", kernel_kobj);
+	if (!ipv4_kobject)
+		return -ENOMEM;
+
+	ret = sysfs_create_group(ipv4_kobject, &ipv4_attr_group);
+	if (ret) {
+		kobject_put(ipv4_kobject);
+		return ret;
+	}
+
+	return 0;
+}
+
+subsys_initcall(sysfs_ipv4_init);
diff -Nruw linux-4.4.115-fbx/net/rmnet_data./Kconfig linux-4.4.115-fbx/net/rmnet_data/Kconfig
--- linux-4.4.115-fbx/net/rmnet_data./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/net/rmnet_data/Kconfig	2019-01-22 16:16:29.175297770 +0100
@@ -0,0 +1,29 @@
+#
+# RMNET Data and MAP driver
+#
+
+menuconfig RMNET_DATA
+	depends on NETDEVICES
+	bool "RmNet Data and MAP driver"
+	---help---
+	  If you say Y here, then the rmnet_data module will be statically
+	  compiled into the kernel. The rmnet data module provides MAP
+	  functionality for embedded and bridged traffic.
+if RMNET_DATA
+
+config RMNET_DATA_FC
+	bool "RmNet Data Flow Control"
+	depends on NET_SCHED && NET_SCH_PRIO
+	---help---
+	  Say Y here if you want RmNet data to handle in-band flow control and
+	  ioctl based flow control. This depends on net scheduler and prio queue
+	  capability being present in the kernel. In-band flow control requires
+	  MAP protocol be used.
+config RMNET_DATA_DEBUG_PKT
+	bool "Packet Debug Logging"
+	---help---
+	  Say Y here if you want RmNet data to be able to log packets in main
+	  system log. This should not be enabled on production builds as it can
+	  impact system performance. Note that simply enabling it here will not
+	  enable the logging; it must be enabled at run-time as well.
+endif # RMNET_DATA
diff -Nruw linux-4.4.115-fbx/net/rmnet_data./Makefile linux-4.4.115-fbx/net/rmnet_data/Makefile
--- linux-4.4.115-fbx/net/rmnet_data./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/net/rmnet_data/Makefile	2019-01-22 16:16:29.175297770 +0100
@@ -0,0 +1,14 @@
+#
+# Makefile for the RMNET Data module
+#
+
+rmnet_data-y		 := rmnet_data_main.o
+rmnet_data-y		 += rmnet_data_config.o
+rmnet_data-y		 += rmnet_data_vnd.o
+rmnet_data-y		 += rmnet_data_handlers.o
+rmnet_data-y		 += rmnet_map_data.o
+rmnet_data-y		 += rmnet_map_command.o
+rmnet_data-y		 += rmnet_data_stats.o
+obj-$(CONFIG_RMNET_DATA) += rmnet_data.o
+
+CFLAGS_rmnet_data_main.o := -I$(src)
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/scripts/dtbs.sh	2019-01-22 16:16:29.275298675 +0100
@@ -0,0 +1,20 @@
+#!/bin/sh
+##  dtbs.sh for kernel
+##  Created by <nschichan@freebox.fr> on Thu Jul 26 13:28:58 2018
+##
+
+dtb_align=32
+
+out=$1
+shift
+
+echo $*
+for i in $*; do
+    sz=$(stat -c %s $i)
+    mod=$((sz % $dtb_align))
+    padd=$((mod > 0 ? $dtb_align - $mod : 0))
+
+    # echo $(basename $i): mod $mod padd $padd
+    cat $i
+    dd if=/dev/zero bs=$padd count=1 2>/dev/null
+done > $out
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/scripts/Makefile.ubsan	2019-01-22 16:16:29.267298603 +0100
@@ -0,0 +1,17 @@
+ifdef CONFIG_UBSAN
+      CFLAGS_UBSAN += $(call cc-option, -fsanitize=shift)
+      CFLAGS_UBSAN += $(call cc-option, -fsanitize=integer-divide-by-zero)
+      CFLAGS_UBSAN += $(call cc-option, -fsanitize=unreachable)
+      CFLAGS_UBSAN += $(call cc-option, -fsanitize=vla-bound)
+      CFLAGS_UBSAN += $(call cc-option, -fsanitize=null)
+      CFLAGS_UBSAN += $(call cc-option, -fsanitize=signed-integer-overflow)
+      CFLAGS_UBSAN += $(call cc-option, -fsanitize=bounds)
+      CFLAGS_UBSAN += $(call cc-option, -fsanitize=object-size)
+      CFLAGS_UBSAN += $(call cc-option, -fsanitize=returns-nonnull-attribute)
+      CFLAGS_UBSAN += $(call cc-option, -fsanitize=bool)
+      CFLAGS_UBSAN += $(call cc-option, -fsanitize=enum)
+
+ifdef CONFIG_UBSAN_ALIGNMENT
+      CFLAGS_UBSAN += $(call cc-option, -fsanitize=alignment)
+endif
+endif
diff -Nruw linux-4.4.115-fbx/security/pfe./Kconfig linux-4.4.115-fbx/security/pfe/Kconfig
--- linux-4.4.115-fbx/security/pfe./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/security/pfe/Kconfig	2019-01-22 16:16:29.315299038 +0100
@@ -0,0 +1,28 @@
+menu "Qualcomm Technologies, Inc Per File Encryption security device drivers"
+	depends on ARCH_QCOM
+
+config PFT
+	bool "Per-File-Tagger driver"
+	depends on SECURITY
+	default n
+	help
+		This driver is used for tagging enterprise files.
+		It is part of the Per-File-Encryption (PFE) feature.
+		The driver is tagging files when created by
+		registered application.
+		Tagged files are encrypted using the dm-req-crypt driver.
+
+config PFK
+	bool "Per-File-Key driver"
+	depends on SECURITY
+	depends on SECURITY_SELINUX
+	default n
+	help
+		This driver is used for storing eCryptfs information
+		in file node.
+		This is part of eCryptfs hardware enhanced solution
+		provided by Qualcomm Technologies, Inc.
+		Information is used when file is encrypted later using
+		ICE or dm crypto engine
+
+endmenu
diff -Nruw linux-4.4.115-fbx/security/pfe./Makefile linux-4.4.115-fbx/security/pfe/Makefile
--- linux-4.4.115-fbx/security/pfe./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/security/pfe/Makefile	2019-01-22 16:16:29.315299038 +0100
@@ -0,0 +1,9 @@
+#
+# Makefile for the MSM specific security device drivers.
+#
+
+ccflags-y += -Isecurity/selinux -Isecurity/selinux/include -Ifs/ecryptfs
+ccflags-y += -Ifs/ext4
+
+obj-$(CONFIG_PFT) += pft.o
+obj-$(CONFIG_PFK) += pfk.o pfk_kc.o pfk_ice.o pfk_ext4.o pfk_ecryptfs.o
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/audio-ext-clk.c	2019-01-22 16:16:29.507300776 +0100
@@ -0,0 +1,349 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <dt-bindings/clock/audio-ext-clk.h>
+#include <sound/q6afe-v2.h>
+
+struct pinctrl_info {
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *sleep;
+	struct pinctrl_state *active;
+};
+
+struct audio_ext_ap_clk {
+	bool enabled;
+	int gpio;
+	struct clk c;
+};
+
+struct audio_ext_pmi_clk {
+	int gpio;
+	struct clk c;
+};
+
+struct audio_ext_ap_clk2 {
+	bool enabled;
+	struct pinctrl_info pnctrl_info;
+	struct clk c;
+};
+
+static struct afe_clk_set clk2_config = {
+	Q6AFE_LPASS_CLK_CONFIG_API_VERSION,
+	Q6AFE_LPASS_CLK_ID_SPEAKER_I2S_OSR,
+	Q6AFE_LPASS_IBIT_CLK_11_P2896_MHZ,
+	Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO,
+	Q6AFE_LPASS_CLK_ROOT_DEFAULT,
+	0,
+};
+
+static inline struct audio_ext_ap_clk *to_audio_ap_clk(struct clk *clk)
+{
+	return container_of(clk, struct audio_ext_ap_clk, c);
+}
+
+static int audio_ext_clk_prepare(struct clk *clk)
+{
+	struct audio_ext_ap_clk *audio_clk = to_audio_ap_clk(clk);
+
+	pr_debug("%s: gpio: %d\n", __func__, audio_clk->gpio);
+	if (gpio_is_valid(audio_clk->gpio))
+		return gpio_direction_output(audio_clk->gpio, 1);
+	return 0;
+}
+
+static void audio_ext_clk_unprepare(struct clk *clk)
+{
+	struct audio_ext_ap_clk *audio_clk = to_audio_ap_clk(clk);
+
+	pr_debug("%s: gpio: %d\n", __func__, audio_clk->gpio);
+	if (gpio_is_valid(audio_clk->gpio))
+		gpio_direction_output(audio_clk->gpio, 0);
+}
+
+static inline struct audio_ext_ap_clk2 *to_audio_ap_clk2(struct clk *clk)
+{
+	return container_of(clk, struct audio_ext_ap_clk2, c);
+}
+
+static int audio_ext_clk2_prepare(struct clk *clk)
+{
+	struct audio_ext_ap_clk2 *audio_clk2 = to_audio_ap_clk2(clk);
+	struct pinctrl_info *pnctrl_info = &audio_clk2->pnctrl_info;
+	int ret;
+
+
+	if (!pnctrl_info->pinctrl || !pnctrl_info->active)
+		return 0;
+
+	ret = pinctrl_select_state(pnctrl_info->pinctrl,
+				   pnctrl_info->active);
+	if (ret) {
+		pr_err("%s: active state select failed with %d\n",
+			__func__, ret);
+		return -EIO;
+	}
+
+	clk2_config.enable = 1;
+	ret = afe_set_lpass_clk_cfg(IDX_RSVD_3, &clk2_config);
+	if (ret < 0) {
+		pr_err("%s: failed to set clock, ret = %d\n", __func__, ret);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void audio_ext_clk2_unprepare(struct clk *clk)
+{
+	struct audio_ext_ap_clk2 *audio_clk2 = to_audio_ap_clk2(clk);
+	struct pinctrl_info *pnctrl_info = &audio_clk2->pnctrl_info;
+	int ret;
+
+	if (!pnctrl_info->pinctrl || !pnctrl_info->sleep)
+		return;
+
+	ret = pinctrl_select_state(pnctrl_info->pinctrl,
+				   pnctrl_info->sleep);
+	if (ret)
+		pr_err("%s: sleep state select failed with %d\n",
+			__func__, ret);
+
+	clk2_config.enable = 0;
+	ret = afe_set_lpass_clk_cfg(IDX_RSVD_3, &clk2_config);
+	if (ret < 0)
+		pr_err("%s: failed to reset clock, ret = %d\n", __func__, ret);
+}
+
+static struct clk_ops audio_ext_ap_clk_ops = {
+	.prepare = audio_ext_clk_prepare,
+	.unprepare = audio_ext_clk_unprepare,
+};
+
+static struct clk_ops audio_ext_ap_clk2_ops = {
+	.prepare = audio_ext_clk2_prepare,
+	.unprepare = audio_ext_clk2_unprepare,
+};
+
+static struct audio_ext_pmi_clk audio_pmi_clk = {
+	.gpio = -EINVAL,
+	.c = {
+		.dbg_name = "audio_ext_pmi_clk",
+		.ops = &clk_ops_dummy,
+		CLK_INIT(audio_pmi_clk.c),
+	},
+};
+
+static struct audio_ext_pmi_clk audio_pmi_lnbb_clk = {
+	.gpio = -EINVAL,
+	.c = {
+		.dbg_name = "audio_ext_pmi_lnbb_clk",
+		.ops = &clk_ops_dummy,
+		CLK_INIT(audio_pmi_lnbb_clk.c),
+	},
+};
+
+static struct audio_ext_ap_clk audio_ap_clk = {
+	.gpio = -EINVAL,
+	.c = {
+		.dbg_name = "audio_ext_ap_clk",
+		.ops = &audio_ext_ap_clk_ops,
+		CLK_INIT(audio_ap_clk.c),
+	},
+};
+
+static struct audio_ext_ap_clk2 audio_ap_clk2 = {
+	.c = {
+		.dbg_name = "audio_ext_ap_clk2",
+		.ops = &audio_ext_ap_clk2_ops,
+		CLK_INIT(audio_ap_clk2.c),
+	},
+};
+
+static struct clk_lookup audio_ref_clock[] = {
+	CLK_LIST(audio_ap_clk),
+	CLK_LIST(audio_pmi_clk),
+	CLK_LIST(audio_pmi_lnbb_clk),
+	CLK_LIST(audio_ap_clk2),
+};
+
+static int audio_get_pinctrl(struct platform_device *pdev)
+{
+	struct pinctrl_info *pnctrl_info;
+	struct pinctrl *pinctrl;
+	int ret;
+
+	pnctrl_info = &audio_ap_clk2.pnctrl_info;
+
+	if (pnctrl_info->pinctrl) {
+		dev_dbg(&pdev->dev, "%s: already requested before\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	pinctrl = devm_pinctrl_get(&pdev->dev);
+	if (IS_ERR_OR_NULL(pinctrl)) {
+		dev_dbg(&pdev->dev, "%s: Unable to get pinctrl handle\n",
+			__func__);
+		return -EINVAL;
+	}
+	pnctrl_info->pinctrl = pinctrl;
+	/* get all state handles from Device Tree */
+	pnctrl_info->sleep = pinctrl_lookup_state(pinctrl, "sleep");
+	if (IS_ERR(pnctrl_info->sleep)) {
+		dev_err(&pdev->dev, "%s: could not get sleep pinstate\n",
+			__func__);
+		goto err;
+	}
+	pnctrl_info->active = pinctrl_lookup_state(pinctrl, "active");
+	if (IS_ERR(pnctrl_info->active)) {
+		dev_err(&pdev->dev, "%s: could not get active pinstate\n",
+			__func__);
+		goto err;
+	}
+	/* Reset the TLMM pins to a default state */
+	ret = pinctrl_select_state(pnctrl_info->pinctrl,
+				   pnctrl_info->sleep);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: Disable TLMM pins failed with %d\n",
+			__func__, ret);
+		goto err;
+	}
+	return 0;
+
+err:
+	devm_pinctrl_put(pnctrl_info->pinctrl);
+	return -EINVAL;
+}
+
+static int audio_ref_clk_probe(struct platform_device *pdev)
+{
+	int clk_gpio;
+	int ret;
+	struct clk *audio_clk;
+
+	clk_gpio = of_get_named_gpio(pdev->dev.of_node,
+				     "qcom,audio-ref-clk-gpio", 0);
+	if (clk_gpio > 0) {
+		ret = gpio_request(clk_gpio, "EXT_CLK");
+		if (ret) {
+			dev_err(&pdev->dev,
+				"Request ext clk gpio failed %d, err:%d\n",
+				clk_gpio, ret);
+			goto err;
+		}
+		if (of_property_read_bool(pdev->dev.of_node,
+					"qcom,node_has_rpm_clock")) {
+			audio_clk = clk_get(&pdev->dev, NULL);
+			if (IS_ERR(audio_clk)) {
+				dev_err(&pdev->dev, "Failed to get RPM div clk\n");
+				ret = PTR_ERR(audio_clk);
+				goto err_gpio;
+			}
+			audio_pmi_clk.c.parent = audio_clk;
+			audio_pmi_clk.gpio = clk_gpio;
+		} else
+			audio_ap_clk.gpio = clk_gpio;
+
+	} else {
+		if (of_property_read_bool(pdev->dev.of_node,
+					"qcom,node_has_rpm_clock")) {
+			audio_clk = clk_get(&pdev->dev, NULL);
+			if (IS_ERR(audio_clk)) {
+				dev_err(&pdev->dev, "Failed to get lnbbclk2\n");
+				ret = PTR_ERR(audio_clk);
+				goto err;
+			}
+			audio_pmi_lnbb_clk.c.parent = audio_clk;
+			audio_pmi_lnbb_clk.gpio = -EINVAL;
+		}
+	}
+
+	ret = audio_get_pinctrl(pdev);
+	if (ret)
+		dev_dbg(&pdev->dev, "%s: Parsing pinctrl failed\n",
+			__func__);
+
+	ret = of_msm_clock_register(pdev->dev.of_node, audio_ref_clock,
+			      ARRAY_SIZE(audio_ref_clock));
+	if (ret) {
+		dev_err(&pdev->dev, "%s: audio ref clock register failed\n",
+			__func__);
+		goto err_gpio;
+	}
+
+	return 0;
+
+err_gpio:
+	gpio_free(clk_gpio);
+
+err:
+	return ret;
+}
+
+static int audio_ref_clk_remove(struct platform_device *pdev)
+{
+	struct pinctrl_info *pnctrl_info = &audio_ap_clk2.pnctrl_info;
+
+	if (audio_pmi_clk.gpio > 0)
+		gpio_free(audio_pmi_clk.gpio);
+	else if (audio_ap_clk.gpio > 0)
+		gpio_free(audio_ap_clk.gpio);
+
+	if (pnctrl_info->pinctrl) {
+		devm_pinctrl_put(pnctrl_info->pinctrl);
+		pnctrl_info->pinctrl = NULL;
+	}
+
+	return 0;
+}
+
+static const struct of_device_id audio_ref_clk_match[] = {
+	{.compatible = "qcom,audio-ref-clk"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, audio_ref_clk_match);
+
+static struct platform_driver audio_ref_clk_driver = {
+	.driver = {
+		.name = "audio-ref-clk",
+		.owner = THIS_MODULE,
+		.of_match_table = audio_ref_clk_match,
+	},
+	.probe = audio_ref_clk_probe,
+	.remove = audio_ref_clk_remove,
+};
+
+static int __init audio_ref_clk_platform_init(void)
+{
+	return platform_driver_register(&audio_ref_clk_driver);
+}
+module_init(audio_ref_clk_platform_init);
+
+static void __exit audio_ref_clk_platform_exit(void)
+{
+	platform_driver_unregister(&audio_ref_clk_driver);
+}
+module_exit(audio_ref_clk_platform_exit);
+
+MODULE_DESCRIPTION("Audio Ref Clock module platform driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/msm_hdmi_codec_rx.c	2019-01-22 16:16:29.519300885 +0100
@@ -0,0 +1,563 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/err.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <linux/msm_ext_display.h>
+
+#define MSM_EXT_DISP_PCM_RATES	SNDRV_PCM_RATE_48000
+#define AUD_EXT_DISP_ACK_DISCONNECT (AUDIO_ACK_CONNECT ^ AUDIO_ACK_CONNECT)
+#define AUD_EXT_DISP_ACK_CONNECT    (AUDIO_ACK_CONNECT)
+#define AUD_EXT_DISP_ACK_ENABLE     (AUDIO_ACK_SET_ENABLE | AUDIO_ACK_ENABLE)
+
+static const char *const ext_disp_audio_type_text[] = {"None", "HDMI", "DP"};
+static const char *const ext_disp_audio_ack_text[] = {"Disconnect",  "Connect",
+						      "Ack_Enable"};
+
+static SOC_ENUM_SINGLE_EXT_DECL(ext_disp_audio_type, ext_disp_audio_type_text);
+static SOC_ENUM_SINGLE_EXT_DECL(ext_disp_audio_ack_state,
+				ext_disp_audio_ack_text);
+
+struct msm_ext_disp_audio_codec_rx_data {
+	struct platform_device *ext_disp_core_pdev;
+	struct msm_ext_disp_audio_codec_ops ext_disp_ops;
+	int cable_status;
+};
+
+static int msm_ext_disp_edid_ctl_info(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_info *uinfo)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct msm_ext_disp_audio_codec_rx_data *codec_data;
+	struct msm_ext_disp_audio_edid_blk edid_blk;
+	int rc;
+
+	codec_data = snd_soc_codec_get_drvdata(codec);
+
+	if (!codec_data) {
+		dev_err(codec->dev, "%s: codec_data is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!codec_data->ext_disp_ops.get_audio_edid_blk) {
+		dev_dbg(codec->dev, "%s: get_audio_edid_blk() is NULL\n",
+			__func__);
+		uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+		uinfo->count = 0;
+		return 0;
+	}
+
+	rc = codec_data->ext_disp_ops.get_audio_edid_blk(
+				codec_data->ext_disp_core_pdev, &edid_blk);
+
+	if (!IS_ERR_VALUE(rc)) {
+		uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+		uinfo->count = edid_blk.audio_data_blk_size +
+			edid_blk.spk_alloc_data_blk_size;
+	}
+
+	dev_dbg(codec->dev, "%s: count: %d\n", __func__, uinfo->count);
+
+	return rc;
+}
+
+static int msm_ext_disp_edid_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol) {
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct msm_ext_disp_audio_codec_rx_data *codec_data;
+	struct msm_ext_disp_audio_edid_blk edid_blk;
+	int rc;
+
+	codec_data = snd_soc_codec_get_drvdata(codec);
+	if (!codec_data || !codec_data->ext_disp_ops.get_audio_edid_blk) {
+		dev_err(codec->dev, "%s: codec_data or get_audio_edid_blk() is NULL\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	rc = codec_data->ext_disp_ops.get_audio_edid_blk(
+			codec_data->ext_disp_core_pdev, &edid_blk);
+	if (!IS_ERR_VALUE(rc)) {
+		if (sizeof(ucontrol->value.bytes.data) <
+			  (edid_blk.audio_data_blk_size +
+			   edid_blk.spk_alloc_data_blk_size)) {
+			dev_err(codec->dev,
+				"%s: Not enough memory to copy EDID data\n",
+				__func__);
+			return -ENOMEM;
+		}
+
+		memcpy(ucontrol->value.bytes.data,
+		       edid_blk.audio_data_blk,
+		       edid_blk.audio_data_blk_size);
+		memcpy((ucontrol->value.bytes.data +
+		       edid_blk.audio_data_blk_size),
+		       edid_blk.spk_alloc_data_blk,
+		       edid_blk.spk_alloc_data_blk_size);
+
+		dev_dbg(codec->dev, "%s: data_blk_size:%d, spk_alloc_data_blk_size:%d\n",
+			__func__, edid_blk.audio_data_blk_size,
+			edid_blk.spk_alloc_data_blk_size);
+	}
+
+	return rc;
+}
+
+static int msm_ext_disp_audio_type_get(struct snd_kcontrol *kcontrol,
+				       struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct msm_ext_disp_audio_codec_rx_data *codec_data;
+	enum msm_ext_disp_cable_state cable_state;
+	enum msm_ext_disp_type disp_type;
+	int rc;
+
+	codec_data = snd_soc_codec_get_drvdata(codec);
+	if (!codec_data ||
+	    !codec_data->ext_disp_ops.get_audio_edid_blk ||
+	    !codec_data->ext_disp_ops.get_intf_id) {
+		dev_err(codec->dev, "%s: codec_data, get_audio_edid_blk() or get_intf_id is NULL\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	cable_state = codec_data->ext_disp_ops.cable_status(
+				   codec_data->ext_disp_core_pdev, 1);
+	if (IS_ERR_VALUE(cable_state)) {
+		dev_err(codec->dev, "%s: Error retrieving cable state from ext_disp, err:%d\n",
+			__func__, cable_state);
+		rc = cable_state;
+		goto done;
+	}
+
+	codec_data->cable_status = cable_state;
+	if (cable_state == EXT_DISPLAY_CABLE_DISCONNECT) {
+		dev_err(codec->dev, "%s: Display cable disconnected\n",
+			__func__);
+		ucontrol->value.integer.value[0] = 0;
+		rc = 0;
+		goto done;
+	}
+
+	disp_type = codec_data->ext_disp_ops.get_intf_id(
+						codec_data->ext_disp_core_pdev);
+	if (!IS_ERR_VALUE(disp_type)) {
+		switch (disp_type) {
+		case EXT_DISPLAY_TYPE_DP:
+			ucontrol->value.integer.value[0] = 2;
+			rc = 0;
+			break;
+		case EXT_DISPLAY_TYPE_HDMI:
+			ucontrol->value.integer.value[0] = 1;
+			rc = 0;
+			break;
+		default:
+			rc = -EINVAL;
+			dev_err(codec->dev, "%s: Invalid disp_type:%d\n",
+			       __func__, disp_type);
+			goto done;
+		}
+		dev_dbg(codec->dev, "%s: Display type: %d\n",
+			__func__, disp_type);
+	} else {
+		dev_err(codec->dev, "%s: Error retrieving disp_type from ext_disp, err:%d\n",
+			__func__, disp_type);
+		rc = disp_type;
+	}
+
+done:
+	return rc;
+}
+
+static int msm_ext_disp_audio_ack_set(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct msm_ext_disp_audio_codec_rx_data *codec_data;
+	u32 ack_state = 0;
+	int rc;
+
+	codec_data = snd_soc_codec_get_drvdata(codec);
+	if (!codec_data ||
+	    !codec_data->ext_disp_ops.acknowledge) {
+		dev_err(codec->dev,
+			"%s: codec_data or ops acknowledge() is NULL\n",
+			__func__);
+		rc = -EINVAL;
+		goto done;
+	}
+
+	switch (ucontrol->value.enumerated.item[0]) {
+	case 0:
+		ack_state = AUD_EXT_DISP_ACK_DISCONNECT;
+		break;
+	case 1:
+		ack_state = AUD_EXT_DISP_ACK_CONNECT;
+		break;
+	case 2:
+		ack_state = AUD_EXT_DISP_ACK_ENABLE;
+		break;
+	default:
+		rc = -EINVAL;
+		dev_err(codec->dev,
+			"%s: invalid value %d for mixer ctl\n",
+			__func__, ucontrol->value.enumerated.item[0]);
+		goto done;
+	}
+	dev_dbg(codec->dev, "%s: control %d, ack set value 0x%x\n",
+		__func__, ucontrol->value.enumerated.item[0], ack_state);
+
+	rc = codec_data->ext_disp_ops.acknowledge(
+			 codec_data->ext_disp_core_pdev, ack_state);
+	if (rc < 0) {
+		dev_err(codec->dev, "%s: error from acknowledge(), err:%d\n",
+			__func__, rc);
+	}
+
+done:
+	return rc;
+}
+
+static const struct snd_kcontrol_new msm_ext_disp_codec_rx_controls[] = {
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READ |
+			  SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+		.iface  = SNDRV_CTL_ELEM_IFACE_PCM,
+		.name   = "HDMI EDID",
+		.info   = msm_ext_disp_edid_ctl_info,
+		.get    = msm_ext_disp_edid_get,
+	},
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READ |
+			  SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+		.iface  = SNDRV_CTL_ELEM_IFACE_PCM,
+		.name   = "Display Port EDID",
+		.info   = msm_ext_disp_edid_ctl_info,
+		.get    = msm_ext_disp_edid_get,
+	},
+	SOC_ENUM_EXT("External Display Type", ext_disp_audio_type,
+		     msm_ext_disp_audio_type_get, NULL),
+	SOC_ENUM_EXT("External Display Audio Ack", ext_disp_audio_ack_state,
+		     NULL, msm_ext_disp_audio_ack_set),
+};
+
+static int msm_ext_disp_audio_codec_rx_dai_startup(
+		struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	int ret = 0;
+	struct msm_ext_disp_audio_codec_rx_data *codec_data =
+			dev_get_drvdata(dai->codec->dev);
+
+	if (!codec_data || !codec_data->ext_disp_ops.cable_status) {
+		dev_err(dai->dev, "%s() codec_data or cable_status is null\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	codec_data->cable_status =
+		codec_data->ext_disp_ops.cable_status(
+		codec_data->ext_disp_core_pdev, 1);
+	if (IS_ERR_VALUE(codec_data->cable_status)) {
+		dev_err(dai->dev,
+			"%s() ext disp core is not ready (ret val = %d)\n",
+			__func__, codec_data->cable_status);
+		ret = codec_data->cable_status;
+	} else if (!codec_data->cable_status) {
+		dev_err(dai->dev,
+			"%s() ext disp cable is not connected (ret val = %d)\n",
+			__func__, codec_data->cable_status);
+		ret = -ENODEV;
+	}
+
+	return ret;
+}
+
+static int msm_ext_disp_audio_codec_rx_dai_hw_params(
+		struct snd_pcm_substream *substream,
+		struct snd_pcm_hw_params *params,
+		struct snd_soc_dai *dai)
+{
+	u32 channel_allocation = 0;
+	u32 level_shift  = 0; /* 0dB */
+	bool down_mix = 0;
+	u32 num_channels = params_channels(params);
+	int rc = 0;
+	struct msm_ext_disp_audio_setup_params audio_setup_params = {0};
+
+	struct msm_ext_disp_audio_codec_rx_data *codec_data =
+			dev_get_drvdata(dai->codec->dev);
+
+	if (!codec_data || !codec_data->ext_disp_ops.audio_info_setup) {
+		dev_err(dai->dev, "%s: codec_data or audio_info_setup is null\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (IS_ERR_VALUE(codec_data->cable_status)) {
+		dev_err_ratelimited(dai->dev,
+			"%s() ext disp core is not ready (ret val = %d)\n",
+			__func__, codec_data->cable_status);
+		return codec_data->cable_status;
+	} else if (!codec_data->cable_status) {
+		dev_err_ratelimited(dai->dev,
+			"%s() ext disp cable is not connected (ret val = %d)\n",
+			__func__, codec_data->cable_status);
+		return -ENODEV;
+	}
+
+	/*refer to HDMI spec CEA-861-E: Table 28 Audio InfoFrame Data Byte 4*/
+	switch (num_channels) {
+	case 2:
+		channel_allocation  = 0;
+		break;
+	case 3:
+		channel_allocation  = 0x02;/*default to FL/FR/FC*/
+		audio_setup_params.sample_present = 0x3;
+		break;
+	case 4:
+		channel_allocation  = 0x06;/*default to FL/FR/FC/RC*/
+		audio_setup_params.sample_present = 0x7;
+		break;
+	case 5:
+		channel_allocation  = 0x0A;/*default to FL/FR/FC/RR/RL*/
+		audio_setup_params.sample_present = 0x7;
+		break;
+	case 6:
+		channel_allocation  = 0x0B;
+		audio_setup_params.sample_present = 0x7;
+		break;
+	case 7:
+		channel_allocation  = 0x12;/*default to FL/FR/FC/RL/RR/RRC/RLC*/
+		audio_setup_params.sample_present = 0xf;
+		break;
+	case 8:
+		channel_allocation  = 0x13;
+		audio_setup_params.sample_present = 0xf;
+		break;
+	default:
+		dev_err(dai->dev, "invalid Channels = %u\n", num_channels);
+		return -EINVAL;
+	}
+
+	dev_dbg(dai->dev,
+		"%s() num_ch %u  samplerate %u channel_allocation = %u\n",
+		__func__, num_channels, params_rate(params),
+		channel_allocation);
+
+	audio_setup_params.sample_rate_hz = params_rate(params);
+	audio_setup_params.num_of_channels = num_channels;
+	audio_setup_params.channel_allocation = channel_allocation;
+	audio_setup_params.level_shift = level_shift;
+	audio_setup_params.down_mix = down_mix;
+
+	rc = codec_data->ext_disp_ops.audio_info_setup(
+			codec_data->ext_disp_core_pdev, &audio_setup_params);
+	if (IS_ERR_VALUE(rc)) {
+		dev_err_ratelimited(dai->dev,
+			"%s() ext disp core is not ready, rc: %d\n",
+			__func__, rc);
+	}
+
+	return rc;
+}
+
+static void msm_ext_disp_audio_codec_rx_dai_shutdown(
+		struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	int rc;
+
+	struct msm_ext_disp_audio_codec_rx_data *codec_data =
+			dev_get_drvdata(dai->codec->dev);
+
+	if (!codec_data || !codec_data->ext_disp_ops.teardown_done ||
+	    !codec_data->ext_disp_ops.cable_status) {
+		dev_err(dai->dev, "%s: codec data or teardown_done or cable_status is null\n",
+			__func__);
+		return;
+	}
+
+	rc = codec_data->ext_disp_ops.cable_status(
+			codec_data->ext_disp_core_pdev, 0);
+	if (IS_ERR_VALUE(rc)) {
+		dev_err(dai->dev,
+			"%s: ext disp core had problems releasing audio flag\n",
+			__func__);
+	}
+
+	codec_data->ext_disp_ops.teardown_done(
+		codec_data->ext_disp_core_pdev);
+	return;
+}
+
+static int msm_ext_disp_audio_codec_rx_probe(struct snd_soc_codec *codec)
+{
+	struct msm_ext_disp_audio_codec_rx_data *codec_data;
+	struct device_node *of_node_parent = NULL;
+
+	codec_data = kzalloc(sizeof(struct msm_ext_disp_audio_codec_rx_data),
+		GFP_KERNEL);
+
+	if (!codec_data) {
+		dev_err(codec->dev, "%s(): fail to allocate dai data\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	of_node_parent = of_get_parent(codec->dev->of_node);
+	if (!of_node_parent) {
+		dev_err(codec->dev, "%s(): Parent device tree node not found\n",
+				__func__);
+		kfree(codec_data);
+		return -ENODEV;
+	}
+
+	codec_data->ext_disp_core_pdev = of_find_device_by_node(of_node_parent);
+	if (!codec_data->ext_disp_core_pdev) {
+		dev_err(codec->dev, "%s(): can't get parent pdev\n", __func__);
+		kfree(codec_data);
+		return -ENODEV;
+	}
+
+	if (msm_ext_disp_register_audio_codec(codec_data->ext_disp_core_pdev,
+				&codec_data->ext_disp_ops)) {
+		dev_err(codec->dev, "%s(): can't register with ext disp core",
+				__func__);
+		kfree(codec_data);
+		return -ENODEV;
+	}
+
+	dev_set_drvdata(codec->dev, codec_data);
+
+	dev_dbg(codec->dev, "%s(): registered %s with ext disp core\n",
+		__func__, codec->component.name);
+
+	return 0;
+}
+
+static int msm_ext_disp_audio_codec_rx_remove(struct snd_soc_codec *codec)
+{
+	struct msm_ext_disp_audio_codec_rx_data *codec_data;
+
+	codec_data = dev_get_drvdata(codec->dev);
+	kfree(codec_data);
+
+	return 0;
+}
+
+static struct snd_soc_dai_ops msm_ext_disp_audio_codec_rx_dai_ops = {
+	.startup   = msm_ext_disp_audio_codec_rx_dai_startup,
+	.hw_params = msm_ext_disp_audio_codec_rx_dai_hw_params,
+	.shutdown  = msm_ext_disp_audio_codec_rx_dai_shutdown
+};
+
+static struct snd_soc_dai_driver msm_ext_disp_audio_codec_rx_dais[] = {
+	{
+		.name = "msm_hdmi_audio_codec_rx_dai",
+		.playback = {
+			.stream_name = "HDMI Playback",
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 48000,
+			.rate_max = 48000,
+			.rates = MSM_EXT_DISP_PCM_RATES,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		},
+		.ops = &msm_ext_disp_audio_codec_rx_dai_ops,
+	},
+	{
+		.name = "msm_dp_audio_codec_rx_dai",
+		.playback = {
+			.stream_name = "Display Port Playback",
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 48000,
+			.rate_max = 192000,
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+				 SNDRV_PCM_RATE_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE,
+		},
+		.ops = &msm_ext_disp_audio_codec_rx_dai_ops,
+	},
+};
+
+static struct snd_soc_codec_driver msm_ext_disp_audio_codec_rx_soc_driver = {
+	.probe = msm_ext_disp_audio_codec_rx_probe,
+	.remove =  msm_ext_disp_audio_codec_rx_remove,
+	.controls = msm_ext_disp_codec_rx_controls,
+	.num_controls = ARRAY_SIZE(msm_ext_disp_codec_rx_controls),
+};
+
+static int msm_ext_disp_audio_codec_rx_plat_probe(
+		struct platform_device *pdev)
+{
+	dev_dbg(&pdev->dev, "%s(): dev name %s\n", __func__,
+		dev_name(&pdev->dev));
+
+	return snd_soc_register_codec(&pdev->dev,
+		&msm_ext_disp_audio_codec_rx_soc_driver,
+		msm_ext_disp_audio_codec_rx_dais,
+		ARRAY_SIZE(msm_ext_disp_audio_codec_rx_dais));
+}
+
+static int msm_ext_disp_audio_codec_rx_plat_remove(
+		struct platform_device *pdev)
+{
+	snd_soc_unregister_codec(&pdev->dev);
+	return 0;
+}
+static const struct of_device_id msm_ext_disp_audio_codec_rx_dt_match[] = {
+	{ .compatible = "qcom,msm-ext-disp-audio-codec-rx", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, msm_ext_disp_audio_codec_rx_dt_match);
+
+static struct platform_driver msm_ext_disp_audio_codec_rx_driver = {
+	.driver = {
+		.name = "msm-ext-disp-audio-codec-rx",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_ext_disp_audio_codec_rx_dt_match,
+	},
+	.probe = msm_ext_disp_audio_codec_rx_plat_probe,
+	.remove = msm_ext_disp_audio_codec_rx_plat_remove,
+};
+
+static int __init msm_ext_disp_audio_codec_rx_init(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&msm_ext_disp_audio_codec_rx_driver);
+	if (rc) {
+		pr_err("%s: failed to register ext disp codec driver err:%d\n",
+		       __func__, rc);
+	}
+
+	return rc;
+}
+module_init(msm_ext_disp_audio_codec_rx_init);
+
+static void __exit msm_ext_disp_audio_codec_rx_exit(void)
+{
+	platform_driver_unregister(&msm_ext_disp_audio_codec_rx_driver);
+}
+module_exit(msm_ext_disp_audio_codec_rx_exit);
+
+MODULE_DESCRIPTION("MSM External Display Audio CODEC Driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/sound/soc/codecs/msm_sdw./Kconfig linux-4.4.115-fbx/sound/soc/codecs/msm_sdw/Kconfig
--- linux-4.4.115-fbx/sound/soc/codecs/msm_sdw./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/codecs/msm_sdw/Kconfig	2019-01-22 16:16:29.519300885 +0100
@@ -0,0 +1,6 @@
+config SND_SOC_MSM_SDW
+	tristate "MSM Internal soundwire codec"
+	 help
+	 MSM-based soundwire codec core driver
+	 supported along with internal digital
+	 codec core.
diff -Nruw linux-4.4.115-fbx/sound/soc/codecs/msm_sdw./Makefile linux-4.4.115-fbx/sound/soc/codecs/msm_sdw/Makefile
--- linux-4.4.115-fbx/sound/soc/codecs/msm_sdw./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/codecs/msm_sdw/Makefile	2019-01-22 16:16:29.519300885 +0100
@@ -0,0 +1,3 @@
+snd-soc-msm-sdw-objs := msm_sdw_cdc.o msm_sdw_regmap.o msm-sdw-tables.o msm_sdw_cdc_utils.o
+obj-$(CONFIG_SND_SOC_MSM_SDW)	+= snd-soc-msm-sdw.o
+ccflags-y += -I$(srctree)/sound/soc/msm
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/msm_stub.c	2019-01-22 16:16:29.519300885 +0100
@@ -0,0 +1,89 @@
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+
+/* A dummy driver useful only to advertise hardware parameters */
+static struct snd_soc_dai_driver msm_stub_dais[] = {
+	{
+		.name = "msm-stub-rx",
+		.playback = { /* Support maximum range */
+			.stream_name = "Playback",
+			.channels_min = 1,
+			.channels_max = 8,
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		},
+	},
+	{
+		.name = "msm-stub-tx",
+		.capture = { /* Support maximum range */
+			.stream_name = "Record",
+			.channels_min = 1,
+			.channels_max = 8,
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+		},
+	},
+};
+
+static struct snd_soc_codec_driver soc_msm_stub = {};
+
+static int msm_stub_dev_probe(struct platform_device *pdev)
+{
+	dev_dbg(&pdev->dev, "dev name %s\n", dev_name(&pdev->dev));
+
+	return snd_soc_register_codec(&pdev->dev,
+	&soc_msm_stub, msm_stub_dais, ARRAY_SIZE(msm_stub_dais));
+}
+
+static int msm_stub_dev_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_codec(&pdev->dev);
+	return 0;
+}
+static const struct of_device_id msm_stub_codec_dt_match[] = {
+	{ .compatible = "qcom,msm-stub-codec", },
+	{}
+};
+
+static struct platform_driver msm_stub_driver = {
+	.driver = {
+		.name = "msm-stub-codec",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_stub_codec_dt_match,
+	},
+	.probe = msm_stub_dev_probe,
+	.remove = msm_stub_dev_remove,
+};
+
+static int __init msm_stub_init(void)
+{
+	return platform_driver_register(&msm_stub_driver);
+}
+module_init(msm_stub_init);
+
+static void __exit msm_stub_exit(void)
+{
+	platform_driver_unregister(&msm_stub_driver);
+}
+module_exit(msm_stub_exit);
+
+MODULE_DESCRIPTION("Generic MSM CODEC driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/sound/soc/codecs/sdm660_cdc./Kconfig linux-4.4.115-fbx/sound/soc/codecs/sdm660_cdc/Kconfig
--- linux-4.4.115-fbx/sound/soc/codecs/sdm660_cdc./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/codecs/sdm660_cdc/Kconfig	2019-01-22 16:16:29.531300994 +0100
@@ -0,0 +1,3 @@
+
+config SND_SOC_SDM660_CDC
+	tristate "MSM Internal PMIC based codec"
diff -Nruw linux-4.4.115-fbx/sound/soc/codecs/sdm660_cdc./Makefile linux-4.4.115-fbx/sound/soc/codecs/sdm660_cdc/Makefile
--- linux-4.4.115-fbx/sound/soc/codecs/sdm660_cdc./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/codecs/sdm660_cdc/Makefile	2019-01-22 16:16:29.531300994 +0100
@@ -0,0 +1,2 @@
+snd-soc-sdm660-cdc-objs := msm-analog-cdc.o msm-digital-cdc.o sdm660-regmap.o
+obj-$(CONFIG_SND_SOC_SDM660_CDC) += snd-soc-sdm660-cdc.o sdm660-cdc-irq.o
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/sil9437.c	2019-01-22 16:16:29.535301030 +0100
@@ -0,0 +1,908 @@
+/*
+ * sil9437.c -- Silicon Image Sil9437 ARC/eARC receiver driver
+ *
+ * Copyright 2017 Freebox SAS
+ *
+ * Author: Arnaud Vrac <avrac@freebox.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/i2c.h>
+#include <linux/of.h>
+#include <linux/gpio/consumer.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+
+#include "sil9437.h"
+
+#define SIL9437_EARC_CAPS_DS_MAX_LENGTH	256
+
+static const u8 sil9437_earc_cap_ds[SIL9437_EARC_CAPS_DS_MAX_LENGTH] = {
+	0x01, 0x01, 0x11, 0x2c, 0x09, 0x7f,
+	0x05, 0x0d, 0x7f, 0x05, 0x15, 0x07,
+	0x50, 0x57, 0x06, 0x00, 0x83, 0x07
+};
+
+static const char *sil9437_sm_states[] = {
+	"IDLE1", "DISC1", "IDLE2", "DISC2", "ARC", "EARC"
+};
+
+struct sil9437_priv {
+	struct device *dev;
+	struct regmap *sys_regmap;
+	struct regmap *phy_regmap;
+	struct regmap *earc_regmap;
+	struct gpio_desc *gpio_rst;
+	struct i2c_client *phy_i2c;
+	struct i2c_client *earc_i2c;
+	struct mutex lock;
+	u8 erx_latency;
+};
+
+static const struct snd_soc_dapm_widget sil9437_dapm_widgets[] = {
+	SND_SOC_DAPM_INPUT("eARC RX"),
+	SND_SOC_DAPM_AIF_OUT("SDOUT", NULL, 0, SND_SOC_NOPM, 0, 0),
+};
+
+static const struct snd_soc_dapm_route sil9437_dapm_routes[] = {
+	{ "SDOUT", NULL, "eARC RX" },
+	{ "Capture", NULL, "SDOUT" },
+};
+
+static int sil9437_edid_put(struct snd_kcontrol *kcontrol,
+			    const unsigned int __user *bytes,
+			    unsigned int size)
+{
+	struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+	struct sil9437_priv *sil9437 = snd_soc_component_get_drvdata(component);
+	u8 edid[SIL9437_EARC_CAPS_DS_MAX_LENGTH];
+	unsigned int i;
+
+	if (copy_from_user(edid, bytes, size))
+		return -EFAULT;
+
+	mutex_lock(&sil9437->lock);
+
+	regmap_write(sil9437->earc_regmap, SIL9437_EDID_FIFO_ADDR, 0);
+	for (i = 0; i < size; i++) {
+		regmap_write(sil9437->earc_regmap, SIL9437_EDID_FIFO_DATA,
+			     edid[i]);
+	}
+
+	mutex_unlock(&sil9437->lock);
+
+	return 0;
+}
+
+static int sil9437_edid_get(struct snd_kcontrol *kcontrol,
+			    unsigned int __user *bytes,
+			    unsigned int size)
+{
+	struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+	struct sil9437_priv *sil9437 = snd_soc_component_get_drvdata(component);
+	uint8_t edid[SIL9437_EARC_CAPS_DS_MAX_LENGTH];
+	unsigned int i, val;
+
+	mutex_lock(&sil9437->lock);
+
+	regmap_write(sil9437->earc_regmap, SIL9437_EDID_FIFO_ADDR, 0);
+	for (i = 0; i < size; i++) {
+		regmap_read(sil9437->earc_regmap, SIL9437_EDID_FIFO_DATA, &val);
+		edid[i] = val;
+	}
+
+	mutex_unlock(&sil9437->lock);
+
+	if (copy_to_user(bytes, edid, size))
+		return -EFAULT;
+
+	return 0;
+}
+
+static const struct snd_kcontrol_new sil9437_snd_controls[] = {
+	SND_SOC_BYTES_TLV("eARC RX EDID", SIL9437_EARC_CAPS_DS_MAX_LENGTH,
+			  sil9437_edid_get, sil9437_edid_put),
+};
+
+static int sil9437_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct sil9437_priv *sil9437 = snd_soc_codec_get_drvdata(codec);
+	u8 format, inv_ws, inv_sck;
+
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+		dev_dbg(dai->dev, "sil9437: set format i2s\n");
+		format = 0x40;
+		break;
+	case SND_SOC_DAIFMT_RIGHT_J:
+		dev_dbg(dai->dev, "sil9437: set format right j\n");
+		format = 0x4d;
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		dev_dbg(dai->dev, "sil9437: set format left j\n");
+		format = 0x49;
+		break;
+	default:
+		dev_err(dai->dev, "Unsupported dai format\n");
+		return -EINVAL;
+	}
+
+	inv_sck = inv_ws = 0;
+	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF:
+		break;
+	case SND_SOC_DAIFMT_IB_IF:
+		inv_sck = inv_ws = 1;
+		break;
+	case SND_SOC_DAIFMT_IB_NF:
+		inv_sck = 1;
+		break;
+	case SND_SOC_DAIFMT_NB_IF:
+		inv_ws = 1;
+		break;
+	default:
+		dev_err(dai->dev, "Unknown polarity configuration\n");
+		return -EINVAL;
+	}
+
+	dev_dbg(dai->dev, "sil9437: set inv_sck=%d inv_ws=%d\n",
+		inv_sck, inv_ws);
+
+	if (inv_sck)
+		format ^= 0x40;
+	if (inv_ws)
+		format ^= 0x08;
+
+	regmap_update_bits(sil9437->earc_regmap,
+			   SIL9437_RX_I2S_CTRL1, 0x5F, format);
+
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBM_CFM:
+		dev_dbg(dai->dev, "sil9437: set master\n");
+		break;
+	default:
+		dev_err(dai->dev, "Unsupported master/slave configuration\n");
+		return -EINVAL;
+	}
+
+	regmap_update_bits(sil9437->sys_regmap,
+			   SIL9437_MCLK_IO_CTRL, 0x01, 0x00);
+
+	return 0;
+}
+
+static int sil9437_hw_params(struct snd_pcm_substream *substream,
+			    struct snd_pcm_hw_params *params,
+			    struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct sil9437_priv *sil9437 = snd_soc_codec_get_drvdata(codec);
+	unsigned int channels;
+	u8 blen, sden;
+
+	dev_dbg(dai->dev, "sil9437: set hw_params word_length=%u\n",
+		params_width(params));
+
+	codec = dai->codec;
+
+	switch (params_width(params)) {
+	case 16:
+		blen = 0x20;
+		break;
+	case 32:
+		blen = 0x00;
+		break;
+	default:
+		dev_err(dai->dev, "Unsupported word length: %u\n",
+			params_width(params));
+		return -EINVAL;
+	}
+
+	regmap_update_bits(sil9437->earc_regmap,
+			   SIL9437_RX_I2S_CTRL1, 0x20, blen);
+
+	channels = params_channels(params);
+
+	sden = 0;
+	if (channels > 0)
+		sden |= 0x10;
+	if (channels > 2)
+		sden |= 0x20;
+	if (channels > 4)
+		sden |= 0x40;
+	if (channels > 6)
+		sden |= 0x80;
+
+	regmap_update_bits(sil9437->earc_regmap,
+			   SIL9437_RX_I2S_CTRL2, 0xF0, sden);
+
+	return 0;
+}
+
+
+static const struct snd_soc_dai_ops sil9437_dai_ops = {
+	.hw_params = sil9437_hw_params,
+	.set_fmt = sil9437_set_fmt,
+};
+
+#define SIL9437_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+#define SIL9437_RATES (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
+		       SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | \
+		       SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 | \
+		       SNDRV_PCM_RATE_192000)
+
+static struct snd_soc_dai_driver sil9437_dai = {
+	.name = "sil9437-earc",
+	.capture = {
+		.stream_name = "Capture",
+		.channels_min = 2,
+		.channels_max = 8,
+		.rates = SIL9437_RATES,
+		.formats = SIL9437_FORMATS,
+	},
+	.ops = &sil9437_dai_ops,
+};
+
+static const struct snd_soc_codec_driver soc_codec_dev_sil9437 = {
+	.controls = sil9437_snd_controls,
+	.num_controls = ARRAY_SIZE(sil9437_snd_controls),
+	.dapm_widgets = sil9437_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(sil9437_dapm_widgets),
+	.dapm_routes = sil9437_dapm_routes,
+	.num_dapm_routes = ARRAY_SIZE(sil9437_dapm_routes),
+};
+
+static bool sil9437_sys_volatile(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case SIL9437_GPIO_C_CTRL:
+	case SIL9437_TOP_INTR_STATE:
+	case SIL9437_TOP_INTR1:
+	case SIL9437_TOP_INTR2:
+	case SIL9437_TOP_INTR3:
+	case SIL9437_RX_FREQ_VAL0:
+	case SIL9437_RX_FREQ_VAL1:
+	case SIL9437_RX_FREQ_ZONE:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static const struct regmap_config sil9437_sys_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.max_register = 0xff,
+	.volatile_reg = sil9437_sys_volatile,
+	.cache_type = REGCACHE_NONE,
+};
+
+static bool sil9437_earc_volatile(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	default:
+		return true;
+	}
+}
+
+static const struct regmap_config sil9437_earc_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.max_register = 0xff,
+	.volatile_reg = sil9437_earc_volatile,
+	.cache_type = REGCACHE_NONE,
+};
+
+static bool sil9437_phy_volatile(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case SIL9437_VIOLA_STATUS:
+	case SIL9437_VIOLA_MODE:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static const struct regmap_config sil9437_phy_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.max_register = 0xff,
+	.volatile_reg = sil9437_phy_volatile,
+	.cache_type = REGCACHE_NONE,
+};
+
+static int sil9437_soft_reset(struct sil9437_priv *sil9437)
+{
+	return regmap_write_bits(sil9437->sys_regmap,
+				 SIL9437_SWHRST, 0x80, 0x80);
+}
+
+static u16 sil9437_get_slave_addr(struct sil9437_priv *sil9437,
+				  const char *name, u16 default_addr)
+{
+	struct device_node *np = sil9437->dev->of_node;
+	u32 addr = default_addr;
+	int i;
+
+	if (np) {
+		i = of_property_match_string(np, "reg-names", name);
+		if (i >= 0)
+			of_property_read_u32_index(np, "reg", i, &addr);
+	}
+
+	return addr;
+}
+
+static int sil9437_get_earc_int_status(struct sil9437_priv *sil9437,
+				       u8 *regs, unsigned int num_regs)
+{
+	unsigned int i;
+
+	regmap_bulk_read(sil9437->earc_regmap, SIL9437_INTR0, regs, num_regs);
+	for (i = 0; i < num_regs; i++) {
+		if (!regs[i])
+			continue;
+		regmap_write(sil9437->earc_regmap, SIL9437_INTR0 + i, regs[i]);
+	}
+
+	return 0;
+}
+
+static int sil9437_get_top_int_status(struct sil9437_priv *sil9437,
+				      u8 *regs, unsigned int num_regs)
+{
+	unsigned int i;
+
+	regmap_bulk_read(sil9437->sys_regmap, SIL9437_TOP_INTR1, regs, num_regs);
+
+	for (i = 0; i < num_regs; i++) {
+		if (!regs[i])
+			continue;
+		regmap_write(sil9437->sys_regmap, SIL9437_TOP_INTR1 + i, regs[i]);
+	}
+
+	return 0;
+}
+
+static int sil9437_enable_interrupts(struct sil9437_priv *sil9437, bool enable)
+{
+	return regmap_update_bits(sil9437->sys_regmap, SIL9437_TOP_INT_CTRL,
+				  0x80, enable ? 0x80 : 0x00);
+}
+
+static void sil9437_earc_configure(struct sil9437_priv *sil9437, bool connected)
+{
+	if (connected) {
+		/* set latency */
+		regmap_write(sil9437->earc_regmap, SIL9437_CMC_SLAVE_RX9,
+			     sil9437->erx_latency);
+
+		/* notify TX we are ready */
+		regmap_update_bits(sil9437->earc_regmap, SIL9437_CMC_SLAVE_RX6,
+				   0x18, 0x18);
+	} else {
+		regmap_write(sil9437->earc_regmap, SIL9437_CMC_SLAVE_RX9, 0x00);
+		regmap_update_bits(sil9437->earc_regmap, SIL9437_CMC_SLAVE_RX6,
+				   0x18, 0x00);
+	}
+}
+
+static int sil9437_get_rx_earc_clk_freq(struct sil9437_priv *sil9437,
+					unsigned int *freq)
+{
+	unsigned int f0, f1;
+	unsigned int w0, w1;
+
+	/* reset eARC clock */
+	regmap_update_bits(sil9437->sys_regmap, SIL9437_SRST,
+			   0x02, 0x02);
+
+	regmap_read(sil9437->sys_regmap, SIL9437_RX_FREQ_VAL0, &f0);
+	regmap_read(sil9437->sys_regmap, SIL9437_RX_FREQ_VAL1, &f1);
+	regmap_read(sil9437->sys_regmap, SIL9437_RX_FREQ_WIN0, &w0);
+	regmap_read(sil9437->sys_regmap, SIL9437_RX_FREQ_WIN1, &w1);
+
+	*freq = ((f1 << 8) | f0) * (20000000 / ((w1 << 8) | w0));
+
+	return 0;
+}
+
+static irqreturn_t sil9437_irq_handler_thread(int irq, void *data)
+{
+	struct sil9437_priv *sil9437 = data;
+	int top_state;
+	int reg;
+	u8 status;
+
+	regmap_read(sil9437->sys_regmap, SIL9437_TOP_INTR_STATE, &top_state);
+	if (top_state & 0x01) {
+		u8 top[SIL9437_NUM_TOP_INTERRUPTS];
+
+		sil9437_get_top_int_status(sil9437, top, ARRAY_SIZE(top));
+
+		status = top[SIL9437_TOP_INTR1_IDX];
+
+		if (status & 0x01)
+			dev_dbg(sil9437->dev, "top: sw interrupt\n");
+
+		if (status & 0x02)
+			dev_dbg(sil9437->dev, "top: timer interrupt\n");
+
+		if (status & 0x0C) {
+			unsigned int freq;
+			int ckdt_mode;
+
+			regmap_read(sil9437->phy_regmap, SIL9437_VIOLA_STATUS, &reg);
+			if (status & 0x04) {
+				dev_dbg(sil9437->dev, "viola clock detect = %d\n",
+					!!(reg & 0x01));
+			}
+			if (status & 0x08) {
+				dev_dbg(sil9437->dev, "viola lock detect = %d\n",
+					!!(reg & 0x02));
+			}
+
+			sil9437_get_rx_earc_clk_freq(sil9437, &freq);
+			dev_dbg(sil9437->dev, "eARC clock frequency: %u Hz\n",
+				freq);
+
+			if (freq > 65000000 || freq == 0)
+				ckdt_mode = 1;
+			else
+				ckdt_mode = 0;
+
+			regmap_update_bits(sil9437->phy_regmap, SIL9437_EARC_CFG0,
+					   0x10, ckdt_mode << 4);
+		}
+
+		if ((top[SIL9437_TOP_INTR2_IDX] & 0x10) ||
+		    (top[SIL9437_TOP_INTR3_IDX] & 0x04)) {
+			regmap_read(sil9437->sys_regmap, SIL9437_GPIO_C_CTRL, &reg);
+			dev_dbg(sil9437->dev, "HPD = %d\n", !!(reg & 0x10));
+		}
+	}
+
+	if (top_state & 0x04) {
+		u8 earc[SIL9437_NUM_INTERRUPTS];
+
+		sil9437_get_earc_int_status(sil9437, earc, ARRAY_SIZE(earc));
+
+		if ((earc[SIL9437_INTR0_IDX] & 0x03) ||
+		    (earc[SIL9437_INTR1_IDX] & 0x07) ||
+		    (earc[SIL9437_INTR4_IDX] & 0x08)) {
+			regmap_read(sil9437->earc_regmap, SIL9437_DISC_DISCONN_RX8, &reg);
+			dev_dbg(sil9437->dev, "eARC RX state changed to %s\n",
+				sil9437_sm_states[reg & 0x7]);
+
+			if (reg == 0x02) { /*IDLE2*/
+				regmap_update_bits(sil9437->earc_regmap,
+						   SIL9437_CMC_SLAVE_RX7,
+						   0x03, 0x01);
+			}
+		}
+
+		status = earc[SIL9437_INTR2_IDX];
+		if (status & 0x01)
+			dev_dbg(sil9437->dev, "eARC RX SPDIF new Fs\n");
+		if (status & 0x04)
+			dev_dbg(sil9437->dev, "eARC RX received CS\n");
+		if (status & 0x08)
+			dev_dbg(sil9437->dev, "eARC RX CS changed\n");
+
+		status = earc[SIL9437_INTR3_IDX];
+		if (status & 0x04)
+			dev_dbg(sil9437->dev, "MUTE set\n");
+		if (status & 0x80)
+			dev_dbg(sil9437->dev, "eARC Tx reads EDID through Common Mode Channel\n");
+
+		status = earc[SIL9437_INTR4_IDX];
+		if (status & 0x02)
+			dev_dbg(sil9437->dev, "eARC RX heartbeat lost\n");
+
+		status = earc[SIL9437_INTR6_IDX];
+		if (status & 0x08)
+			dev_dbg(sil9437->dev, "eARC RX discovery timeout is asserted\n");
+		if (status & 0x10)
+			dev_dbg(sil9437->dev, "eARC RX discovery timeout is deasserted\n");
+
+		status = earc[SIL9437_INTR7_IDX];
+		if (status & 0x01)
+			dev_dbg(sil9437->dev, "eARC Virtual HPD changed\n");
+		if (status & 0x08) {
+			regmap_read(sil9437->earc_regmap, SIL9437_CMC_SLAVE_RX5,
+				    &reg);
+			dev_dbg(sil9437->dev, "eARC CAP_CHNG_CONF from TX = %d\n",
+				!!(reg & 0x08));
+			if (reg & 0x08) {
+				regmap_update_bits(sil9437->earc_regmap,
+						   SIL9437_CMC_SLAVE_RX6,
+						   0x08, 0x00);
+			}
+		}
+		if (status & 0x10) {
+			regmap_read(sil9437->earc_regmap, SIL9437_CMC_SLAVE_RX5,
+				    &reg);
+			dev_dbg(sil9437->dev, "eARC STAT_CHNG_CONF from TX = %d\n",
+				!!(reg & 0x10));
+			if (reg & 0x10) {
+				regmap_update_bits(sil9437->earc_regmap,
+						   SIL9437_CMC_SLAVE_RX6,
+						   0x10, 0x00);
+			}
+		}
+
+		status = earc[SIL9437_INTR8_IDX];
+		if (status & 0x01) {
+			regmap_read(sil9437->earc_regmap, SIL9437_DISC_DISCONN_RX8,
+				    &reg);
+			dev_dbg(sil9437->dev, "eARC RX %s\n", reg & 0x80 ?
+				"connected" : "disconnected");
+
+			sil9437_earc_configure(sil9437, !!(reg & 0x80));
+		}
+		if (status & 0x02)
+			dev_dbg(sil9437->dev, "eARC TX read Latency successfully\n");
+		if (status & 0x04) {
+			regmap_read(sil9437->earc_regmap, SIL9437_CMC_SLAVE_RX8,
+				    &reg);
+			dev_dbg(sil9437->dev, "eARC Latency Request from TX %ums\n", reg);
+			sil9437->erx_latency = reg;
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int sil9437_irq_setup(struct sil9437_priv *sil9437, int irq)
+{
+	struct device_node *np = sil9437->dev->of_node;
+	unsigned long irq_flags;
+	u8 int_ctrl = 0;
+
+	irq_flags = IRQF_ONESHOT;
+	int_ctrl = 0;
+
+	if (of_get_property(np, "sil,irq-open-drain", NULL))
+		int_ctrl |= 0x04;
+
+	if (of_get_property(np, "sil,irq-active-low", NULL)) {
+		int_ctrl |= 0x02;
+		irq_flags |= IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW;
+	} else {
+		irq_flags |= IRQF_TRIGGER_RISING | IRQF_TRIGGER_HIGH;
+	}
+
+	regmap_update_bits(sil9437->sys_regmap,
+			   SIL9437_TOP_INT_CTRL, 0x8e, int_ctrl);
+
+	return devm_request_threaded_irq(sil9437->dev, irq, NULL,
+					 sil9437_irq_handler_thread, irq_flags,
+					 "sil9437_irq", sil9437);
+}
+
+static int sil9437_irq_probe(struct sil9437_priv *sil9437)
+{
+	struct gpio_desc *gpio_int;
+
+	gpio_int = devm_gpiod_get_optional(sil9437->dev, "sil,irq", GPIOD_IN);
+	if (IS_ERR(gpio_int)) {
+		dev_err(sil9437->dev, "failed to get irq line\n");
+		return PTR_ERR(gpio_int);
+	}
+
+	return gpiod_to_irq(gpio_int);
+}
+
+static int sil9437_probe(struct i2c_client *i2c,
+			 const struct i2c_device_id *id)
+{
+	struct device *dev = &i2c->dev;
+	struct sil9437_priv *sil9437;
+	struct regmap *regmap;
+	unsigned int id1, id2, chip_id;
+	u16 phy_addr, earc_addr;
+	int ret, i;
+	bool earc = false;
+
+	regmap = devm_regmap_init_i2c(i2c, &sil9437_sys_regmap_config);
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
+	sil9437 = devm_kzalloc(dev, sizeof(*sil9437), GFP_KERNEL);
+	if (!sil9437)
+		return -ENOMEM;
+
+	i2c_set_clientdata(i2c, sil9437);
+	dev_set_drvdata(dev, sil9437);
+
+	sil9437->dev = dev;
+	sil9437->sys_regmap = regmap;
+
+	mutex_init(&sil9437->lock);
+
+	sil9437->gpio_rst = devm_gpiod_get_optional(dev, "sil,reset",
+						    GPIOD_OUT_HIGH);
+	if (IS_ERR(sil9437->gpio_rst)) {
+		ret = PTR_ERR(sil9437->gpio_rst);
+		dev_err(dev, "failed to get reset line: %d\n", ret);
+		return ret;
+	}
+
+	if (sil9437->gpio_rst) {
+		gpiod_set_value_cansleep(sil9437->gpio_rst, 1);
+		usleep_range(200, 300);
+		gpiod_set_value_cansleep(sil9437->gpio_rst, 0);
+		usleep_range(50, 100);
+	}
+
+	ret = regmap_read(regmap, SIL9437_DEV_IDL, &id1);
+	if (ret < 0) {
+		if (ret == -ENOTCONN) {
+			/* HACK: i2c address pin is floating on freebox's early
+			 * design, try to probe alternative address */
+			i2c->addr ^= 1;
+			ret = regmap_read(regmap, SIL9437_DEV_IDL, &id1);
+		}
+		if (ret < 0) {
+			dev_err(dev, "failed to read device ID: %d\n", ret);
+			return ret;
+		}
+	}
+
+	ret = regmap_read(regmap, SIL9437_DEV_IDH, &id2);
+	if (ret < 0) {
+		dev_err(dev, "failed to read device ID: %d\n", ret);
+		return ret;
+	}
+
+	chip_id = (id2 << 8) | id1;
+	if (chip_id != 0x9437 && chip_id != 0x9439) {
+		dev_err(dev, "invalid device ID: %#x\n", chip_id);
+		return -EINVAL;
+	}
+
+	ret = regmap_read(regmap, SIL9437_DEV_REV, &id1);
+	if (ret < 0) {
+		dev_err(dev, "failed to read device revision: %d\n", ret);
+		return ret;
+	}
+
+	dev_info(dev, "revision %c\n", id1 + 'A');
+
+	if (!sil9437->gpio_rst) {
+		ret = sil9437_soft_reset(sil9437);
+		if (ret < 0) {
+			dev_err(dev, "failed to issue reset: %d\n", ret);
+			return ret;
+		}
+	}
+
+	/* disable comma */
+	regmap_update_bits(sil9437->sys_regmap, SIL9437_SRST4, 0x02, 0x00);
+
+	/* setup irq handler */
+	if (i2c->irq <= 0)
+		i2c->irq = sil9437_irq_probe(sil9437);
+
+	if (i2c->irq < 0) {
+		dev_err(dev, "no irq resource found\n");
+		return i2c->irq;
+	}
+
+	ret = sil9437_irq_setup(sil9437, i2c->irq);
+	if (ret) {
+		dev_err(dev, "failed to request irq: %d\n", ret);
+		return ret;
+	}
+
+	/* create dummy i2c clients to access eARC RX and PHY registers */
+	earc_addr = sil9437_get_slave_addr(sil9437, "earc", 0x37);
+	phy_addr = sil9437_get_slave_addr(sil9437, "phy", 0x32);
+
+	regmap_write(regmap, SIL9437_I2C_ADDR0, 0x00);
+	regmap_write(regmap, SIL9437_I2C_ADDR1, 0x00);
+	regmap_write(regmap, SIL9437_I2C_ADDR2, 0x00);
+	regmap_write(regmap, SIL9437_I2C_ADDR3, earc_addr << 1);
+	regmap_write(regmap, SIL9437_I2C_ADDR4, phy_addr << 1);
+	regmap_write_bits(regmap, SIL9437_I2C_CONFIG, 0x01, 0x01);
+
+	sil9437->earc_i2c = i2c_new_dummy(i2c->adapter, earc_addr);
+	if (!sil9437->earc_i2c) {
+		dev_err(dev, "failed to register i2c client addr 0x%x\n",
+			earc_addr);
+		return -ENODEV;
+	}
+
+	sil9437->earc_regmap = devm_regmap_init_i2c(sil9437->earc_i2c,
+						    &sil9437_earc_regmap_config);
+	if (IS_ERR(sil9437->earc_regmap)) {
+		ret = PTR_ERR(sil9437->earc_regmap);
+		dev_err(dev, "failed to allocate eARC regmap: %d\n", ret);
+		goto err_unregister_i2c_earc;
+	}
+
+	sil9437->phy_i2c = i2c_new_dummy(i2c->adapter, phy_addr);
+	if (!sil9437->phy_i2c) {
+		ret = -ENODEV;
+		dev_err(dev, "failed to register i2c client addr 0x%x\n",
+			phy_addr);
+		goto err_unregister_i2c_earc;
+	}
+
+	sil9437->phy_regmap = devm_regmap_init_i2c(sil9437->phy_i2c,
+						   &sil9437_phy_regmap_config);
+	if (IS_ERR(sil9437->phy_regmap)) {
+		ret = PTR_ERR(sil9437->phy_regmap);
+		dev_err(dev, "failed to allocate PHY regmap: %d\n", ret);
+		goto err_unregister_i2c_phy;
+	}
+
+	/* disable test mode */
+	regmap_update_bits(sil9437->earc_regmap, SIL9437_TEST_CTRL, 0x01, 0x00);
+
+	/* enable timer */
+	regmap_update_bits(sil9437->sys_regmap, SIL9437_TIMER_CTRL, 0x03, 0x01);
+	regmap_write(sil9437->sys_regmap, SIL9437_TIMER_SCALE1, 0x0F);
+	regmap_write(sil9437->sys_regmap, SIL9437_TIMER_SCALE2, 0x27);
+
+	/* disable outputs */
+	regmap_write(sil9437->sys_regmap, SIL9437_IO_OEN, 0xFF);
+
+	/* set all GPIOs as input */
+	regmap_update_bits(sil9437->sys_regmap, SIL9437_GPIO_OEN_CTRL, 0x3f, 0xff);
+
+	/* configure HPD_IN (GPIO4) so that is will go low on HPD removal */
+	regmap_update_bits(sil9437->sys_regmap, SIL9437_GPIO_PE_CTRL, 0x10, 0x10);
+	regmap_update_bits(sil9437->sys_regmap, SIL9437_GPIO_PU_CTRL, 0x10, 0x00);
+
+	/* set defaults as recommend by the datasheet */
+	regmap_write(sil9437->sys_regmap, SIL9437_CHIP_ID_I2C, 0x89);
+	regmap_write(sil9437->sys_regmap, SIL9437_SPDIF_SEL, 0xC0);
+	regmap_write(sil9437->earc_regmap, SIL9437_RX_I2S_CTRL1, 0xC0);
+	regmap_write(sil9437->earc_regmap, SIL9437_RX_I2S_CTRL2, 0xF0);
+	regmap_write(sil9437->earc_regmap, SIL9437_RX_AUDRX_CTRL, 0x21);
+	regmap_update_bits(sil9437->earc_regmap,
+			   SIL9437_RX_AUD_OUT_SPDIF_CTRL_1, 0x02, 0x02);
+	regmap_write(sil9437->earc_regmap, SIL9437_RX_CS_DEBUG1, 0x00);
+	regmap_write(sil9437->earc_regmap, SIL9437_RX_CS_DEBUG2, 0x00);
+	regmap_write(sil9437->earc_regmap, SIL9437_RX_CS_DEBUG3, 0x00);
+	regmap_update_bits(sil9437->earc_regmap, SIL9437_DISC_DISCONN_RX9,
+			   0x40, 0x40);
+	regmap_write(sil9437->earc_regmap, SIL9437_MUTE_EXP_EN_1, 0x84);
+	regmap_write(sil9437->phy_regmap, SIL9437_VIOLA_VCO_CAL, 0x83);
+	regmap_write(sil9437->phy_regmap, SIL9437_VIOLA_SLEW_N_SWING, 0x52);
+	regmap_write(sil9437->phy_regmap, SIL9437_VIOLA_SPRX, 0x21);
+	regmap_write(sil9437->phy_regmap, SIL9437_VIOLA_CM_BW_CTL, 0x08);
+	regmap_write(sil9437->phy_regmap, SIL9437_VIOLA_RX_CFG, 0x18);
+	regmap_write(sil9437->phy_regmap, SIL9437_VIOLA_MODE, 0xED);
+	regmap_write(sil9437->phy_regmap, SIL9437_VIOLA_TERM60, 0x0C);
+	regmap_write(sil9437->phy_regmap, SIL9437_VIOLA_TERM300, 0x0C);
+	regmap_update_bits(sil9437->sys_regmap, SIL9437_MCLK_IO_CTRL, 0x01, 0x00);
+	regmap_write(sil9437->phy_regmap, SIL9437_EARC_CFG0, 0xFF);
+	regmap_write(sil9437->phy_regmap, SIL9437_EARC_CFG1, 0x8C);
+
+	regmap_write(sil9437->earc_regmap, SIL9437_I2S_CS_PTR, 0x11);
+	regmap_update_bits(sil9437->earc_regmap, SIL9437_MUTE_CTRL, 0x01, 0x01);
+	regmap_write(sil9437->earc_regmap, SIL9437_RX_CS_MUTE_PTR, 0x90);
+	regmap_write(sil9437->earc_regmap, SIL9437_RX_CS_MUTE_MASK, 0x04);
+	regmap_write(sil9437->earc_regmap, SIL9437_RX_CS_MUTE_PATTERN, 0x04);
+
+	/* configure ECC mask */
+	regmap_write(sil9437->earc_regmap, SIL9437_RX_CS_ECC_PTR, 0x00);
+	regmap_write(sil9437->earc_regmap, SIL9437_RX_CS_ECC_MASK, 0x3A);
+	regmap_write(sil9437->earc_regmap, SIL9437_RX_CS_ECC_PATTERN, 0x02);
+	regmap_write(sil9437->earc_regmap, SIL9437_BCH_CTRL, 0x02);
+
+	/* write default EDID */
+	regmap_write(sil9437->earc_regmap, SIL9437_EDID_FIFO_ADDR, 0);
+	for (i = 0; i < ARRAY_SIZE(sil9437_earc_cap_ds); i++) {
+		regmap_write(sil9437->earc_regmap, SIL9437_EDID_FIFO_DATA,
+			     sil9437_earc_cap_ds[i]);
+	}
+
+	if (!earc) {
+		regmap_write(sil9437->sys_regmap, SIL9437_SPDIF_SEL, 0x40);
+		regmap_write(sil9437->phy_regmap, SIL9437_VIOLA_MODE, 0x85);
+		regmap_write(sil9437->phy_regmap, SIL9437_EARC_CFG1, 0x18);
+		regmap_write(sil9437->phy_regmap, SIL9437_VIOLA_SLEW_N_SWING, 0x2B);
+		regmap_write(sil9437->phy_regmap, SIL9437_VIOLA_SPRX, 0x04);
+		regmap_write(sil9437->phy_regmap, SIL9437_VIOLA_TERM60, 0x0F);
+		regmap_write(sil9437->phy_regmap, SIL9437_VIOLA_TERM300, 0x0C);
+		regmap_write(sil9437->phy_regmap, SIL9437_VIOLA_CM_BW_CTL, 0x00);
+	}
+
+	/* enable interrupts */
+	regmap_write(sil9437->earc_regmap, SIL9437_INTR0_MASK, 0x03);
+	regmap_write(sil9437->earc_regmap, SIL9437_INTR1_MASK, 0x07);
+	regmap_write(sil9437->earc_regmap, SIL9437_INTR2_MASK, 0x0D);
+	regmap_write(sil9437->earc_regmap, SIL9437_INTR3_MASK, 0x84);
+	regmap_write(sil9437->earc_regmap, SIL9437_INTR4_MASK, 0x0A);
+	regmap_write(sil9437->earc_regmap, SIL9437_INTR5_MASK, 0x00);
+	regmap_write(sil9437->earc_regmap, SIL9437_INTR6_MASK, 0x18);
+	regmap_write(sil9437->earc_regmap, SIL9437_INTR7_MASK, 0x19);
+	regmap_write(sil9437->earc_regmap, SIL9437_INTR8_MASK, 0x07);
+	regmap_write(sil9437->sys_regmap, SIL9437_TOP_INTR1_MASK, 0x0E);
+	regmap_write(sil9437->sys_regmap, SIL9437_TOP_INTR2_MASK, 0x10);
+	regmap_write(sil9437->sys_regmap, SIL9437_TOP_INTR3_MASK, 0x04);
+
+	sil9437_enable_interrupts(sil9437, 1);
+
+	/* enable comma */
+	regmap_update_bits(sil9437->sys_regmap, SIL9437_SRST4, 0x02, 0x02);
+
+	/* disable spdif output */
+	regmap_update_bits(sil9437->earc_regmap,
+			   SIL9437_RX_AUD_OUT_SPDIF_CTRL_1, 0x01, 0x00);
+
+	/* enable outputs */
+	regmap_write(sil9437->sys_regmap, SIL9437_IO_OEN, earc ? 0x00 : 0xBF);
+
+	/* disable mute */
+	regmap_update_bits(sil9437->earc_regmap, SIL9437_MUTE_CTRL, 0x01, 0x00);
+
+	ret = snd_soc_register_codec(dev, &soc_codec_dev_sil9437,
+				     &sil9437_dai, 1);
+	if (ret < 0) {
+		dev_err(dev, "failed to register CODEC: %d\n", ret);
+		goto err_unregister_i2c_phy;
+	}
+
+	return 0;
+
+err_unregister_i2c_phy:
+	i2c_unregister_device(sil9437->phy_i2c);
+err_unregister_i2c_earc:
+	i2c_unregister_device(sil9437->earc_i2c);
+	return ret;
+}
+
+static int sil9437_remove(struct i2c_client *i2c)
+{
+	struct sil9437_priv *sil9437 = i2c_get_clientdata(i2c);
+
+	snd_soc_unregister_codec(sil9437->dev);
+	i2c_unregister_device(sil9437->earc_i2c);
+	i2c_unregister_device(sil9437->phy_i2c);
+
+	return 0;
+}
+
+static const struct i2c_device_id sil9437_id[] = {
+	{ "sil9437", 0 },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, sil9437_id);
+
+static const struct of_device_id sil9437_of_match[] = {
+	{ .compatible = "sil,sil9437" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sil9437_of_match);
+
+static struct i2c_driver sil9437_driver = {
+	.driver = {
+		.name = "sil9437",
+		.of_match_table = sil9437_of_match,
+	},
+	.probe = sil9437_probe,
+	.remove = sil9437_remove,
+	.id_table = sil9437_id
+};
+
+module_i2c_driver(sil9437_driver);
+
+MODULE_DESCRIPTION("ASoC Sil9437 driver");
+MODULE_AUTHOR("Arnaud Vrac <avrac@freebox.fr>");
+MODULE_LICENSE("GPL");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/sil9437.h	2019-01-22 16:16:29.535301030 +0100
@@ -0,0 +1,175 @@
+#ifndef SIL9437_H_
+# define SIL9437_H_
+
+/* System control and status registers */
+#define SIL9437_VND_IDL			0x00
+#define SIL9437_VND_IDH			0x01
+#define SIL9437_DEV_IDL			0x02
+#define SIL9437_DEV_IDH			0x03
+#define SIL9437_DEV_REV			0x04
+#define SIL9437_SRST			0x05
+#define SIL9437_SRST2			0x06
+#define SIL9437_SRST3			0x07
+#define SIL9437_SRST4			0x08
+#define SIL9437_IO_OEN			0x09
+#define SIL9437_GPIO_I_CTRL		0x0C
+#define SIL9437_GPIO_OEN_CTRL		0x0D
+#define SIL9437_GPIO_PE_CTRL		0x0E
+#define SIL9437_GPIO_PU_CTRL		0x0F
+#define SIL9437_GPIO_C_CTRL		0x10
+#define SIL9437_SPDIF_SEL		0x11
+#define SIL9437_TIMER_CTRL		0x12
+#define SIL9437_TIMER_VAL0		0x13
+#define SIL9437_TIMER_VAL1		0x14
+#define SIL9437_TIMER_SCALE1		0x15
+#define SIL9437_TIMER_SCALE2		0x16
+#define SIL9437_I2C_ADDR0		0x18
+#define SIL9437_I2C_ADDR1		0x19
+#define SIL9437_CHIP_ID_I2C		0x24
+#define SIL9437_MCLK_IO_CTRL		0x25
+#define SIL9437_I2C_ADDR2		0x29
+#define SIL9437_I2C_ADDR3		0x2A
+#define SIL9437_I2C_ADDR4		0x2B
+#define SIL9437_I2C_CONFIG		0x2C
+#define SIL9437_TOP_INTR_STATE		0x30
+#define SIL9437_TOP_INTR1		0x31
+#define SIL9437_TOP_INTR2		0x32
+#define SIL9437_TOP_INTR3		0x33
+#define SIL9437_TOP_INTR1_MASK		0x34
+#define SIL9437_TOP_INTR2_MASK		0x35
+#define SIL9437_TOP_INTR3_MASK		0x36
+#define SIL9437_TOP_INT_CTRL		0x37
+#define SIL9437_RX_FREQ_VAL0		0x38
+#define SIL9437_RX_FREQ_VAL1		0x39
+#define SIL9437_RX_FREQ_ZONE		0x3A
+#define SIL9437_RX_FREQ_THRES0		0x3B
+#define SIL9437_RX_FREQ_THRES1		0x3C
+#define SIL9437_RX_CLKDET_LOW_THRES0	0x3D
+#define SIL9437_RX_CLKDET_LOW_THRES1	0x3E
+#define SIL9437_RX_CLKDET_HIGH_THRES0	0x3F
+#define SIL9437_RX_CLKDET_HIGH_THRES1	0x40
+#define SIL9437_RX_FREQ_WIN0		0x41
+#define SIL9437_RX_FREQ_WIN1		0x42
+#define SIL9437_SWHRST			0xFF
+
+/* PHY registers */
+#define SIL9437_VIOLA_STATUS		0x60
+#define SIL9437_VIOLA_MODE		0x64
+#define SIL9437_EARC_CFG0		0x65
+#define SIL9437_EARC_CFG1		0x66
+#define SIL9437_VIOLA_TERM60		0x6C
+#define SIL9437_VIOLA_TERM300		0x6D
+#define SIL9437_VIOLA_SLEW_N_SWING	0x6E
+#define SIL9437_VIOLA_SPRX		0x6F
+#define SIL9437_VIOLA_VCO_CAL		0x70
+#define SIL9437_VIOLA_OSC_CFG		0x71
+#define SIL9437_VIOLA_CM_BW_CTL		0x74
+#define SIL9437_VIOLA_RX_CFG		0x78
+
+#define SIL9437_REG_PHY_MAX		SIL9437_VIOLA_RX_CFG
+
+/* eARC RX registers */
+#define SIL9437_RX_I2S_CTRL2		0x00
+#define SIL9437_RX_I2S_MAP		0x01
+#define SIL9437_RX_I2S_CTRL1		0x02
+#define SIL9437_RX_AUD_OUT_SPDIF_CTRL_1	0x03
+#define SIL9437_RX_AUD_OUT_CH_CTRL	0x06
+#define SIL9437_AUD_CHST0		0x07
+#define SIL9437_AUD_CHST1		0x08
+#define SIL9437_I2S_CHST2		0x09
+#define SIL9437_AUD_CHST3		0x0A
+#define SIL9437_AUD_CHST4		0x0B
+#define SIL9437_AUD_CHST5		0x0C
+#define SIL9437_AUD_CHST6		0x0D
+#define SIL9437_AUD_CS_EXTRA1		0x0E
+#define SIL9437_AUD_CS_EXTRA2		0x0F
+#define SIL9437_RX_CHST0		0x19
+#define SIL9437_RX_CHST1		0x1A
+#define SIL9437_RX_CHST2		0x1B
+#define SIL9437_RX_CHST3		0x1C
+#define SIL9437_RX_CHST4		0x1D
+#define SIL9437_RX_CHST5		0x1E
+#define SIL9437_RX_CHST6		0x1F
+#define SIL9437_RX_CS_EXTRA1		0x20
+#define SIL9437_RX_CS_EXTRA2		0x21
+#define SIL9437_I2S_CS_PTR		0x22
+#define SIL9437_RX_AUDRX_CTRL		0x23
+#define SIL9437_BSYNC_CTRL		0x27
+#define SIL9437_RX_SPDIF_CS_CTRL	0x2C
+#define SIL9437_RX_CS_DEBUG1		0x2D
+#define SIL9437_RX_CS_DEBUG2		0x2E
+#define SIL9437_RX_CS_DEBUG3		0x2F
+#define SIL9437_MUTE_CTRL		0x30
+#define SIL9437_MUTE_EXP_EN_0		0x31
+#define SIL9437_MUTE_EXP_EN_1		0x32
+#define SIL9437_MUTE_AUTO_RELEASE	0x33
+#define SIL9437_RX_CS_MUTE_PTR		0x35
+#define SIL9437_RX_CS_MUTE_MASK		0x36
+#define SIL9437_RX_CS_MUTE_PATTERN	0x37
+#define SIL9437_MCLK_CTRL		0x3C
+#define SIL9437_PKT_TOP_RX12		0x4C
+#define SIL9437_PKT_TOP_RX13		0x4D
+#define SIL9437_PKT_TOP_RX14		0x4E
+#define SIL9437_PKT_TOP_RX15		0x55
+#define SIL9437_PKT_TOP_RX16		0x56
+#define SIL9437_PKT_TOP_RX17		0x57
+#define SIL9437_DISC_DISCONN_RX4	0x64
+#define SIL9437_DISC_DISCONN_RX7	0x67
+#define SIL9437_DISC_DISCONN_RX8	0x68
+#define SIL9437_DISC_DISCONN_RX9	0x69
+#define SIL9437_CMC_SLAVE_RX4		0x73
+#define SIL9437_CMC_SLAVE_RX5		0x74
+#define SIL9437_CMC_SLAVE_RX6		0x75
+#define SIL9437_CMC_SLAVE_RX7		0x76
+#define SIL9437_CMC_SLAVE_RX8		0x77
+#define SIL9437_CMC_SLAVE_RX9		0x78
+#define SIL9437_CMC_SLAVE_RX10		0x79
+#define SIL9437_INTR_STATE		0xB0
+#define SIL9437_INTR0			0xB1
+#define SIL9437_INTR1			0xB2
+#define SIL9437_INTR2			0xB3
+#define SIL9437_INTR3			0xB4
+#define SIL9437_INTR4			0xB5
+#define SIL9437_INTR5			0xB6
+#define SIL9437_INTR6			0xB7
+#define SIL9437_INTR7			0xB8
+#define SIL9437_INTR8			0xB9
+#define SIL9437_INTR0_MASK		0xBA
+#define SIL9437_INTR1_MASK		0xBB
+#define SIL9437_INTR2_MASK		0xBC
+#define SIL9437_INTR3_MASK		0xBD
+#define SIL9437_INTR4_MASK		0xBE
+#define SIL9437_INTR5_MASK		0xBF
+#define SIL9437_INTR6_MASK		0xC0
+#define SIL9437_INTR7_MASK		0xC1
+#define SIL9437_INTR8_MASK		0xC2
+#define SIL9437_TEST_CTRL		0xD0
+#define SIL9437_TEST_IN_VAL0		0xD1
+#define SIL9437_RX_CS_ECC_PTR		0xDB
+#define SIL9437_RX_CS_ECC_MASK		0xDC
+#define SIL9437_RX_CS_ECC_PATTERN	0xDD
+#define SIL9437_BCH_CTRL		0xDE
+#define SIL9437_BCH_STAT		0xDF
+#define SIL9437_EDID_CTRL		0xE0
+#define SIL9437_EDID_FIFO_ADDR		0xE1
+#define SIL9437_EDID_FIFO_DATA		0xE4
+#define SIL9437_I2S_FS_MEAS_STAT_0	0xF1
+#define SIL9437_I2S_FS_MEAS_STAT_1	0xF2
+
+#define SIL9437_TOP_INTR1_IDX		0
+#define SIL9437_TOP_INTR2_IDX		1
+#define SIL9437_TOP_INTR3_IDX		2
+#define SIL9437_NUM_TOP_INTERRUPTS	3
+
+#define SIL9437_INTR0_IDX		0
+#define SIL9437_INTR1_IDX		1
+#define SIL9437_INTR2_IDX		2
+#define SIL9437_INTR3_IDX		3
+#define SIL9437_INTR4_IDX		4
+#define SIL9437_INTR5_IDX		5
+#define SIL9437_INTR6_IDX		6
+#define SIL9437_INTR7_IDX		7
+#define SIL9437_INTR8_IDX		8
+#define SIL9437_NUM_INTERRUPTS		9
+
+#endif /* !SIL9437_H_ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/tas5766.c	2019-01-22 16:16:29.539301066 +0100
@@ -0,0 +1,200 @@
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+/* Reset Registers */
+#define TAS5766M_RSTR_ADDR      1
+#define TAS5766M_RSTR_BIT       0x01
+
+/* Reset modules */
+#define TAS5766M_RSTM_ADDR      1
+#define TAS5766M_RSTM_BIT       0x10
+
+/* Standby request */
+#define TAS5766M_RQST_ADDR      2
+#define TAS5766M_RQST_BIT       0x10
+
+/* Power down request */
+#define TAS5766M_RQPD_ADDR      2
+#define TAS5766M_RQPD_BIT       0x01
+
+/* Mute requests */
+/* Mute Left channel */
+#define TAS5766M_RQML_ADDR      3
+#define TAS5766M_RQML_BIT       0x10
+/* Mute Right channel */
+#define TAS5766M_RQMR_ADDR      3
+#define TAS5766M_RQMR_BIT       0x01
+
+#define TAS5766M_PLCK_ADDR      4
+#define TAS5766M_PLCK_BIT       0x10
+
+#define TAS5766M_SREF_ADDR      13
+#define TAS5766M_PPDV_ADDR      20
+#define TAS5766M_PJDV_ADDR      21
+#define TAS5766M_PDDV_MSB_ADDR  22
+#define TAS5766M_PDDV_LSB_ADDR  23
+#define TAS5766M_PRDV_ADDR      24
+#define TAS5766M_DDSP_ADDR      27
+#define TAS5766M_DDAC_ADDR      28
+#define TAS5766M_DNCP_ADDR      29
+#define TAS5766M_DOSR_ADDR      30
+#define TAS5766M_IDAC_MSB_ADDR  35
+#define TAS5766M_IDAC_LSB_ADDR  36
+
+/* Disable clock divider autoset */
+#define TAS5766M_DCAS_ADDR      37
+#define TAS5766M_DCAS_BIT       0x02
+
+/* Ignore SCK detection */
+#define TAS5766M_IDSK_ADDR      37
+#define TAS5766M_IDSK_BIT       0x10
+
+/* Ignore Clock halt detection */
+#define TAS5766M_IDCH_ADDR      37
+#define TAS5766M_IDCH_BIT       0x08
+
+#define TAS5766M_AFMT_ADDR      40
+#define TAS5766M_AOFS_ADDR      41
+
+#define tas5766m_i2c_write(c, addr, val) \
+	i2c_smbus_write_byte_data(c, addr, val)
+
+#define tas5766m_i2c_read(c, addr) \
+	i2c_smbus_read_byte_data(c, addr)
+
+static int tas5766_i2c_probe(struct i2c_client *client,
+			     const struct i2c_device_id *id)
+{
+	int reg;
+	int ret;
+	int offset;
+
+	/* Select page 0 */
+	ret = tas5766m_i2c_write(client, 0, 0);
+	if (ret < 0)
+		return ret;
+
+	/* First reset the chip */
+	tas5766m_i2c_write(client, TAS5766M_RQST_ADDR, TAS5766M_RQST_BIT);
+	tas5766m_i2c_write(client, TAS5766M_RSTM_ADDR,
+			   TAS5766M_RSTM_BIT | TAS5766M_RSTR_BIT);
+	tas5766m_i2c_write(client, TAS5766M_RQST_ADDR, 0);
+
+	/* Mute outputs */
+	tas5766m_i2c_write(client, TAS5766M_RQML_ADDR,
+			   TAS5766M_RQML_BIT | TAS5766M_RQMR_BIT);
+
+	/* Disable clock divider autoset */
+	reg = tas5766m_i2c_read(client, TAS5766M_DCAS_ADDR);
+	reg |= TAS5766M_DCAS_BIT;
+	tas5766m_i2c_write(client, TAS5766M_DCAS_ADDR, reg);
+
+	/* Ignore SCK detection */
+	reg = tas5766m_i2c_read(client, TAS5766M_IDSK_ADDR);
+	reg |= TAS5766M_IDSK_BIT;
+	tas5766m_i2c_write(client, TAS5766M_IDSK_ADDR, reg);
+
+	/* Ignore clock halt */
+	reg = tas5766m_i2c_read(client, TAS5766M_IDCH_ADDR);
+	reg |= TAS5766M_IDCH_BIT;
+	tas5766m_i2c_write(client, TAS5766M_IDCH_ADDR, reg);
+
+	/* Set BCK as PLL reference */
+	tas5766m_i2c_write(client, TAS5766M_SREF_ADDR, 0x10);
+
+	/*
+	 * Configure PLL for 48 kHz operations
+	 * Input SCK (CLKIN) is 12.288 MHz
+	 * PLLCK = (CLKIN * J.D * R) / P = (12.288 * 16.0 * 1) / 2
+	 *       = 98.304 MHz
+	 *  J = 16
+	 *  D = 0
+	 *  R = 1
+	 *  P = 2
+	 */
+	tas5766m_i2c_write(client, TAS5766M_PPDV_ADDR,     1);
+	tas5766m_i2c_write(client, TAS5766M_PJDV_ADDR,    16);
+	tas5766m_i2c_write(client, TAS5766M_PDDV_MSB_ADDR, 0);
+	tas5766m_i2c_write(client, TAS5766M_PDDV_LSB_ADDR, 0);
+	tas5766m_i2c_write(client, TAS5766M_PRDV_ADDR,     0);
+
+	/* Configure output clock dividers */
+	tas5766m_i2c_write(client, TAS5766M_DDSP_ADDR,  1);
+	tas5766m_i2c_write(client, TAS5766M_DDAC_ADDR, 15);
+	tas5766m_i2c_write(client, TAS5766M_DNCP_ADDR,  3);
+	tas5766m_i2c_write(client, TAS5766M_DOSR_ADDR,  7);
+
+	/*
+	 * Number of DSP clock cycle in one sample frame
+	 * DSPCK = 49.152 MHz
+	 * DSPCK / 48 kHz = 1024
+	 */
+	tas5766m_i2c_write(client, TAS5766M_IDAC_MSB_ADDR, 0x04);
+	tas5766m_i2c_write(client, TAS5766M_IDAC_LSB_ADDR, 0x00);
+
+	tas5766m_i2c_write(client, TAS5766M_AFMT_ADDR, 0x13);
+
+	/* Set TDM slots offset */
+	switch (client->addr) {
+	default:
+	case 0x4c:
+		offset = 0;
+		break;
+	case 0x4d:
+		offset = 64;
+		break;
+	case 0x4e:
+		offset = 128;
+		break;
+	}
+
+	tas5766m_i2c_write(client, TAS5766M_AOFS_ADDR, offset);
+
+	/* Unmute outputs */
+	tas5766m_i2c_write(client, TAS5766M_RQML_ADDR, 0);
+
+	dev_info(&client->dev, "init done with offset %d", offset);
+
+	return 0;
+}
+
+static int tas5766_i2c_remove(struct i2c_client *client)
+{
+	/* Mute outputs */
+	tas5766m_i2c_write(client, TAS5766M_RQML_ADDR,
+			   TAS5766M_RQML_BIT | TAS5766M_RQMR_BIT);
+
+	/* Put in standby */
+	tas5766m_i2c_write(client, TAS5766M_RQST_ADDR, TAS5766M_RQST_BIT);
+
+	return 0;
+}
+
+static const struct of_device_id tas5766_of_match[] = {
+	{ .compatible = "ti,tas5766", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tas5766_of_match);
+
+static const struct i2c_device_id tas5766_i2c_id[] = {
+	{ "tas5766", 0 },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, tas5766_i2c_id);
+
+static struct i2c_driver tas5766_i2c_driver = {
+	.driver = {
+		.name = "tas5766",
+		.of_match_table = of_match_ptr(tas5766_of_match),
+	},
+	.probe = tas5766_i2c_probe,
+	.remove = tas5766_i2c_remove,
+	.id_table = tas5766_i2c_id,
+};
+module_i2c_driver(tas5766_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC TAS5766 driver");
+MODULE_AUTHOR("Arnaud Vrac <avrac@freebox.fr");
+MODULE_LICENSE("GPL");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd9335.c	2019-10-29 09:26:26.125227467 +0100
@@ -0,0 +1,14444 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
+#include <linux/debugfs.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <linux/mfd/wcd9xxx/core.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx-irq.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
+#include <linux/mfd/wcd9335/registers.h>
+#include <linux/mfd/wcd9xxx/pdata.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/soundwire/swr-wcd.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+#include <sound/info.h>
+#include "wcd9335.h"
+#include "wcd-mbhc-v2.h"
+#include "wcd9xxx-common-v2.h"
+#include "wcd9xxx-resmgr-v2.h"
+#include "wcd_cpe_core.h"
+#include "wcdcal-hwdep.h"
+
+#define TASHA_RX_PORT_START_NUMBER  16
+
+#define WCD9335_RATES_MASK (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
+			    SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |\
+			    SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_192000)
+/* Fractional Rates */
+#define WCD9335_FRAC_RATES_MASK (SNDRV_PCM_RATE_44100)
+
+#define WCD9335_MIX_RATES_MASK (SNDRV_PCM_RATE_48000 |\
+				SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_192000)
+
+#define TASHA_FORMATS_S16_S24_LE (SNDRV_PCM_FMTBIT_S16_LE | \
+				  SNDRV_PCM_FMTBIT_S24_LE | \
+				  SNDRV_PCM_FMTBIT_S24_3LE)
+
+#define TASHA_FORMATS_S16_S24_S32_LE (SNDRV_PCM_FMTBIT_S16_LE | \
+				  SNDRV_PCM_FMTBIT_S24_LE | \
+				  SNDRV_PCM_FMTBIT_S24_3LE | \
+				  SNDRV_PCM_FMTBIT_S32_LE)
+
+#define TASHA_FORMATS (SNDRV_PCM_FMTBIT_S16_LE)
+
+/*
+ * Timeout in milli seconds and it is the wait time for
+ * slim channel removal interrupt to receive.
+ */
+#define TASHA_SLIM_CLOSE_TIMEOUT 1000
+#define TASHA_SLIM_IRQ_OVERFLOW (1 << 0)
+#define TASHA_SLIM_IRQ_UNDERFLOW (1 << 1)
+#define TASHA_SLIM_IRQ_PORT_CLOSED (1 << 2)
+#define TASHA_MCLK_CLK_12P288MHZ 12288000
+#define TASHA_MCLK_CLK_9P6MHZ 9600000
+
+#define TASHA_SLIM_PGD_PORT_INT_TX_EN0 (TASHA_SLIM_PGD_PORT_INT_EN0 + 2)
+
+#define TASHA_NUM_INTERPOLATORS 9
+#define TASHA_NUM_DECIMATORS 9
+
+#define BYTE_BIT_MASK(nr) (1 << ((nr) % BITS_PER_BYTE))
+#define TASHA_MAD_AUDIO_FIRMWARE_PATH "wcd9335/wcd9335_mad_audio.bin"
+#define TASHA_CPE_SS_ERR_STATUS_MEM_ACCESS (1 << 0)
+#define TASHA_CPE_SS_ERR_STATUS_WDOG_BITE (1 << 1)
+
+#define TASHA_CPE_FATAL_IRQS \
+	(TASHA_CPE_SS_ERR_STATUS_WDOG_BITE | \
+	 TASHA_CPE_SS_ERR_STATUS_MEM_ACCESS)
+
+#define SLIM_BW_CLK_GEAR_9 6200000
+#define SLIM_BW_UNVOTE 0
+
+#define CPE_FLL_CLK_75MHZ 75000000
+#define CPE_FLL_CLK_150MHZ 150000000
+#define WCD9335_REG_BITS 8
+
+#define WCD9335_MAX_VALID_ADC_MUX  13
+#define WCD9335_INVALID_ADC_MUX 9
+
+#define TASHA_DIG_CORE_REG_MIN  WCD9335_CDC_ANC0_CLK_RESET_CTL
+#define TASHA_DIG_CORE_REG_MAX  0xDFF
+
+/* Convert from vout ctl to micbias voltage in mV */
+#define WCD_VOUT_CTL_TO_MICB(v) (1000 + v * 50)
+
+#define TASHA_ZDET_NUM_MEASUREMENTS 900
+#define TASHA_MBHC_GET_C1(c)  ((c & 0xC000) >> 14)
+#define TASHA_MBHC_GET_X1(x)  (x & 0x3FFF)
+/* z value compared in milliOhm */
+#define TASHA_MBHC_IS_SECOND_RAMP_REQUIRED(z) ((z > 400000) || (z < 32000))
+#define TASHA_MBHC_ZDET_CONST  (86 * 16384)
+#define TASHA_MBHC_MOISTURE_VREF  V_45_MV
+#define TASHA_MBHC_MOISTURE_IREF  I_3P0_UA
+
+#define TASHA_VERSION_ENTRY_SIZE 17
+
+#define WCD9335_AMIC_PWR_LEVEL_LP 0
+#define WCD9335_AMIC_PWR_LEVEL_DEFAULT 1
+#define WCD9335_AMIC_PWR_LEVEL_HP 2
+#define WCD9335_AMIC_PWR_LVL_MASK 0x60
+#define WCD9335_AMIC_PWR_LVL_SHIFT 0x5
+
+#define WCD9335_DEC_PWR_LVL_MASK 0x06
+#define WCD9335_DEC_PWR_LVL_LP 0x02
+#define WCD9335_DEC_PWR_LVL_HP 0x04
+#define WCD9335_DEC_PWR_LVL_DF 0x00
+#define WCD9335_STRING_LEN 100
+
+#define CALCULATE_VOUT_D(req_mv) (((req_mv - 650) * 10) / 25)
+
+static int cpe_debug_mode;
+
+#define TASHA_MAX_MICBIAS 4
+#define DAPM_MICBIAS1_STANDALONE "MIC BIAS1 Standalone"
+#define DAPM_MICBIAS2_STANDALONE "MIC BIAS2 Standalone"
+#define DAPM_MICBIAS3_STANDALONE "MIC BIAS3 Standalone"
+#define DAPM_MICBIAS4_STANDALONE "MIC BIAS4 Standalone"
+
+#define DAPM_LDO_H_STANDALONE "LDO_H"
+module_param(cpe_debug_mode, int,
+	     S_IRUGO | S_IWUSR | S_IWGRP);
+MODULE_PARM_DESC(cpe_debug_mode, "boot cpe in debug mode");
+
+#define TASHA_DIG_CORE_COLLAPSE_TIMER_MS  (5 * 1000)
+
+#define MAX_ON_DEMAND_SUPPLY_NAME_LENGTH    64
+
+static char on_demand_supply_name[][MAX_ON_DEMAND_SUPPLY_NAME_LENGTH] = {
+	"cdc-vdd-mic-bias",
+};
+
+enum {
+	POWER_COLLAPSE,
+	POWER_RESUME,
+};
+
+enum tasha_sido_voltage {
+	SIDO_VOLTAGE_SVS_MV = 950,
+	SIDO_VOLTAGE_NOMINAL_MV = 1100,
+};
+
+static enum codec_variant codec_ver;
+
+static int dig_core_collapse_enable = 1;
+module_param(dig_core_collapse_enable, int,
+		S_IRUGO | S_IWUSR | S_IWGRP);
+MODULE_PARM_DESC(dig_core_collapse_enable, "enable/disable power gating");
+
+/* dig_core_collapse timer in seconds */
+static int dig_core_collapse_timer = (TASHA_DIG_CORE_COLLAPSE_TIMER_MS/1000);
+module_param(dig_core_collapse_timer, int,
+		S_IRUGO | S_IWUSR | S_IWGRP);
+MODULE_PARM_DESC(dig_core_collapse_timer, "timer for power gating");
+
+/* SVS Scaling enable/disable */
+static int svs_scaling_enabled = 1;
+module_param(svs_scaling_enabled, int,
+		S_IRUGO | S_IWUSR | S_IWGRP);
+MODULE_PARM_DESC(svs_scaling_enabled, "enable/disable svs scaling");
+
+/* SVS buck setting */
+static int sido_buck_svs_voltage = SIDO_VOLTAGE_SVS_MV;
+module_param(sido_buck_svs_voltage, int,
+		S_IRUGO | S_IWUSR | S_IWGRP);
+MODULE_PARM_DESC(sido_buck_svs_voltage,
+			"setting for SVS voltage for SIDO BUCK");
+
+#define TASHA_TX_UNMUTE_DELAY_MS	40
+
+static int tx_unmute_delay = TASHA_TX_UNMUTE_DELAY_MS;
+module_param(tx_unmute_delay, int,
+		S_IRUGO | S_IWUSR | S_IWGRP);
+MODULE_PARM_DESC(tx_unmute_delay, "delay to unmute the tx path");
+
+static struct afe_param_slimbus_slave_port_cfg tasha_slimbus_slave_port_cfg = {
+	.minor_version = 1,
+	.slimbus_dev_id = AFE_SLIMBUS_DEVICE_1,
+	.slave_dev_pgd_la = 0,
+	.slave_dev_intfdev_la = 0,
+	.bit_width = 16,
+	.data_format = 0,
+	.num_channels = 1
+};
+
+struct tasha_mbhc_zdet_param {
+	u16 ldo_ctl;
+	u16 noff;
+	u16 nshift;
+	u16 btn5;
+	u16 btn6;
+	u16 btn7;
+};
+
+static struct afe_param_cdc_reg_page_cfg tasha_cdc_reg_page_cfg = {
+	.minor_version = AFE_API_VERSION_CDC_REG_PAGE_CFG,
+	.enable = 1,
+	.proc_id = AFE_CDC_REG_PAGE_ASSIGN_PROC_ID_1,
+};
+
+static struct afe_param_cdc_reg_cfg audio_reg_cfg[] = {
+	{
+		1,
+		(TASHA_REGISTER_START_OFFSET + WCD9335_SOC_MAD_MAIN_CTL_1),
+		HW_MAD_AUDIO_ENABLE, 0x1, WCD9335_REG_BITS, 0
+	},
+	{
+		1,
+		(TASHA_REGISTER_START_OFFSET + WCD9335_SOC_MAD_AUDIO_CTL_3),
+		HW_MAD_AUDIO_SLEEP_TIME, 0xF, WCD9335_REG_BITS, 0
+	},
+	{
+		1,
+		(TASHA_REGISTER_START_OFFSET + WCD9335_SOC_MAD_AUDIO_CTL_4),
+		HW_MAD_TX_AUDIO_SWITCH_OFF, 0x1, WCD9335_REG_BITS, 0
+	},
+	{
+		1,
+		(TASHA_REGISTER_START_OFFSET + WCD9335_INTR_CFG),
+		MAD_AUDIO_INT_DEST_SELECT_REG, 0x2, WCD9335_REG_BITS, 0
+	},
+	{
+		1,
+		(TASHA_REGISTER_START_OFFSET + WCD9335_INTR_PIN2_MASK3),
+		MAD_AUDIO_INT_MASK_REG, 0x1, WCD9335_REG_BITS, 0
+	},
+	{
+		1,
+		(TASHA_REGISTER_START_OFFSET + WCD9335_INTR_PIN2_STATUS3),
+		MAD_AUDIO_INT_STATUS_REG, 0x1, WCD9335_REG_BITS, 0
+	},
+	{
+		1,
+		(TASHA_REGISTER_START_OFFSET + WCD9335_INTR_PIN2_CLEAR3),
+		MAD_AUDIO_INT_CLEAR_REG, 0x1, WCD9335_REG_BITS, 0
+	},
+	{
+		1,
+		(TASHA_REGISTER_START_OFFSET + WCD9335_INTR_CFG),
+		VBAT_INT_DEST_SELECT_REG, 0x2, WCD9335_REG_BITS, 0
+	},
+	{
+		1,
+		(TASHA_REGISTER_START_OFFSET + WCD9335_INTR_PIN2_MASK3),
+		VBAT_INT_MASK_REG, 0x08, WCD9335_REG_BITS, 0
+	},
+	{
+		1,
+		(TASHA_REGISTER_START_OFFSET + WCD9335_INTR_PIN2_STATUS3),
+		VBAT_INT_STATUS_REG, 0x08, WCD9335_REG_BITS, 0
+	},
+	{
+		1,
+		(TASHA_REGISTER_START_OFFSET + WCD9335_INTR_PIN2_CLEAR3),
+		VBAT_INT_CLEAR_REG, 0x08, WCD9335_REG_BITS, 0
+	},
+	{
+		1,
+		(TASHA_REGISTER_START_OFFSET + WCD9335_INTR_CFG),
+		VBAT_RELEASE_INT_DEST_SELECT_REG, 0x2, WCD9335_REG_BITS, 0
+	},
+	{
+		1,
+		(TASHA_REGISTER_START_OFFSET + WCD9335_INTR_PIN2_MASK3),
+		VBAT_RELEASE_INT_MASK_REG, 0x10, WCD9335_REG_BITS, 0
+	},
+	{
+		1,
+		(TASHA_REGISTER_START_OFFSET + WCD9335_INTR_PIN2_STATUS3),
+		VBAT_RELEASE_INT_STATUS_REG, 0x10, WCD9335_REG_BITS, 0
+	},
+	{
+		1,
+		(TASHA_REGISTER_START_OFFSET + WCD9335_INTR_PIN2_CLEAR3),
+		VBAT_RELEASE_INT_CLEAR_REG, 0x10, WCD9335_REG_BITS, 0
+	},
+	{
+		1,
+		(TASHA_REGISTER_START_OFFSET + TASHA_SB_PGD_PORT_TX_BASE),
+		SB_PGD_PORT_TX_WATERMARK_N, 0x1E, WCD9335_REG_BITS, 0x1
+	},
+	{
+		1,
+		(TASHA_REGISTER_START_OFFSET + TASHA_SB_PGD_PORT_TX_BASE),
+		SB_PGD_PORT_TX_ENABLE_N, 0x1, WCD9335_REG_BITS, 0x1
+	},
+	{
+		1,
+		(TASHA_REGISTER_START_OFFSET + TASHA_SB_PGD_PORT_RX_BASE),
+		SB_PGD_PORT_RX_WATERMARK_N, 0x1E, WCD9335_REG_BITS, 0x1
+	},
+	{
+		1,
+		(TASHA_REGISTER_START_OFFSET + TASHA_SB_PGD_PORT_RX_BASE),
+		SB_PGD_PORT_RX_ENABLE_N, 0x1, WCD9335_REG_BITS, 0x1
+	},
+	{	1,
+		(TASHA_REGISTER_START_OFFSET + WCD9335_CDC_ANC0_IIR_ADAPT_CTL),
+		AANC_FF_GAIN_ADAPTIVE, 0x4, WCD9335_REG_BITS, 0
+	},
+	{	1,
+		(TASHA_REGISTER_START_OFFSET + WCD9335_CDC_ANC0_IIR_ADAPT_CTL),
+		AANC_FFGAIN_ADAPTIVE_EN, 0x8, WCD9335_REG_BITS, 0
+	},
+	{
+		1,
+		(TASHA_REGISTER_START_OFFSET + WCD9335_CDC_ANC0_FF_A_GAIN_CTL),
+		AANC_GAIN_CONTROL, 0xFF, WCD9335_REG_BITS, 0
+	},
+};
+
+static struct afe_param_cdc_reg_cfg_data tasha_audio_reg_cfg = {
+	.num_registers = ARRAY_SIZE(audio_reg_cfg),
+	.reg_data = audio_reg_cfg,
+};
+
+static struct afe_param_id_cdc_aanc_version tasha_cdc_aanc_version = {
+	.cdc_aanc_minor_version = AFE_API_VERSION_CDC_AANC_VERSION,
+	.aanc_hw_version        = AANC_HW_BLOCK_VERSION_2,
+};
+
+enum {
+	VI_SENSE_1,
+	VI_SENSE_2,
+	AIF4_SWITCH_VALUE,
+	AUDIO_NOMINAL,
+	CPE_NOMINAL,
+	HPH_PA_DELAY,
+	ANC_MIC_AMIC1,
+	ANC_MIC_AMIC2,
+	ANC_MIC_AMIC3,
+	ANC_MIC_AMIC4,
+	ANC_MIC_AMIC5,
+	ANC_MIC_AMIC6,
+	CLASSH_CONFIG,
+};
+
+enum {
+	AIF1_PB = 0,
+	AIF1_CAP,
+	AIF2_PB,
+	AIF2_CAP,
+	AIF3_PB,
+	AIF3_CAP,
+	AIF4_PB,
+	AIF_MIX1_PB,
+	AIF4_MAD_TX,
+	AIF4_VIFEED,
+	AIF5_CPE_TX,
+	NUM_CODEC_DAIS,
+};
+
+enum {
+	INTn_1_MIX_INP_SEL_ZERO = 0,
+	INTn_1_MIX_INP_SEL_DEC0,
+	INTn_1_MIX_INP_SEL_DEC1,
+	INTn_1_MIX_INP_SEL_IIR0,
+	INTn_1_MIX_INP_SEL_IIR1,
+	INTn_1_MIX_INP_SEL_RX0,
+	INTn_1_MIX_INP_SEL_RX1,
+	INTn_1_MIX_INP_SEL_RX2,
+	INTn_1_MIX_INP_SEL_RX3,
+	INTn_1_MIX_INP_SEL_RX4,
+	INTn_1_MIX_INP_SEL_RX5,
+	INTn_1_MIX_INP_SEL_RX6,
+	INTn_1_MIX_INP_SEL_RX7,
+
+};
+
+#define IS_VALID_NATIVE_FIFO_PORT(inp) \
+	((inp >= INTn_1_MIX_INP_SEL_RX0) && \
+	 (inp <= INTn_1_MIX_INP_SEL_RX3))
+
+enum {
+	INTn_2_INP_SEL_ZERO = 0,
+	INTn_2_INP_SEL_RX0,
+	INTn_2_INP_SEL_RX1,
+	INTn_2_INP_SEL_RX2,
+	INTn_2_INP_SEL_RX3,
+	INTn_2_INP_SEL_RX4,
+	INTn_2_INP_SEL_RX5,
+	INTn_2_INP_SEL_RX6,
+	INTn_2_INP_SEL_RX7,
+	INTn_2_INP_SEL_PROXIMITY,
+};
+
+enum {
+	INTERP_EAR = 0,
+	INTERP_HPHL,
+	INTERP_HPHR,
+	INTERP_LO1,
+	INTERP_LO2,
+	INTERP_LO3,
+	INTERP_LO4,
+	INTERP_SPKR1,
+	INTERP_SPKR2,
+};
+
+struct interp_sample_rate {
+	int sample_rate;
+	int rate_val;
+};
+
+static struct interp_sample_rate int_prim_sample_rate_val[] = {
+	{8000, 0x0},	/* 8K */
+	{16000, 0x1},	/* 16K */
+	{24000, -EINVAL},/* 24K */
+	{32000, 0x3},	/* 32K */
+	{48000, 0x4},	/* 48K */
+	{96000, 0x5},	/* 96K */
+	{192000, 0x6},	/* 192K */
+	{384000, 0x7},	/* 384K */
+	{44100, 0x8}, /* 44.1K */
+};
+
+static struct interp_sample_rate int_mix_sample_rate_val[] = {
+	{48000, 0x4},	/* 48K */
+	{96000, 0x5},	/* 96K */
+	{192000, 0x6},	/* 192K */
+};
+
+static const struct wcd9xxx_ch tasha_rx_chs[TASHA_RX_MAX] = {
+	WCD9XXX_CH(TASHA_RX_PORT_START_NUMBER, 0),
+	WCD9XXX_CH(TASHA_RX_PORT_START_NUMBER + 1, 1),
+	WCD9XXX_CH(TASHA_RX_PORT_START_NUMBER + 2, 2),
+	WCD9XXX_CH(TASHA_RX_PORT_START_NUMBER + 3, 3),
+	WCD9XXX_CH(TASHA_RX_PORT_START_NUMBER + 4, 4),
+	WCD9XXX_CH(TASHA_RX_PORT_START_NUMBER + 5, 5),
+	WCD9XXX_CH(TASHA_RX_PORT_START_NUMBER + 6, 6),
+	WCD9XXX_CH(TASHA_RX_PORT_START_NUMBER + 7, 7),
+	WCD9XXX_CH(TASHA_RX_PORT_START_NUMBER + 8, 8),
+	WCD9XXX_CH(TASHA_RX_PORT_START_NUMBER + 9, 9),
+	WCD9XXX_CH(TASHA_RX_PORT_START_NUMBER + 10, 10),
+	WCD9XXX_CH(TASHA_RX_PORT_START_NUMBER + 11, 11),
+	WCD9XXX_CH(TASHA_RX_PORT_START_NUMBER + 12, 12),
+};
+
+static const struct wcd9xxx_ch tasha_tx_chs[TASHA_TX_MAX] = {
+	WCD9XXX_CH(0, 0),
+	WCD9XXX_CH(1, 1),
+	WCD9XXX_CH(2, 2),
+	WCD9XXX_CH(3, 3),
+	WCD9XXX_CH(4, 4),
+	WCD9XXX_CH(5, 5),
+	WCD9XXX_CH(6, 6),
+	WCD9XXX_CH(7, 7),
+	WCD9XXX_CH(8, 8),
+	WCD9XXX_CH(9, 9),
+	WCD9XXX_CH(10, 10),
+	WCD9XXX_CH(11, 11),
+	WCD9XXX_CH(12, 12),
+	WCD9XXX_CH(13, 13),
+	WCD9XXX_CH(14, 14),
+	WCD9XXX_CH(15, 15),
+};
+
+static const u32 vport_slim_check_table[NUM_CODEC_DAIS] = {
+	/* Needs to define in the same order of DAI enum definitions */
+	0,
+	BIT(AIF2_CAP) | BIT(AIF3_CAP) | BIT(AIF4_MAD_TX) | BIT(AIF5_CPE_TX),
+	0,
+	BIT(AIF1_CAP) | BIT(AIF3_CAP) | BIT(AIF4_MAD_TX) | BIT(AIF5_CPE_TX),
+	0,
+	BIT(AIF1_CAP) | BIT(AIF2_CAP) | BIT(AIF4_MAD_TX) | BIT(AIF5_CPE_TX),
+	0,
+	0,
+	BIT(AIF1_CAP) | BIT(AIF2_CAP) | BIT(AIF3_CAP) | BIT(AIF5_CPE_TX),
+	0,
+	BIT(AIF1_CAP) | BIT(AIF2_CAP) | BIT(AIF3_CAP) | BIT(AIF4_MAD_TX),
+};
+
+static const u32 vport_i2s_check_table[NUM_CODEC_DAIS] = {
+	0,			/* AIF1_PB */
+	BIT(AIF2_CAP),		/* AIF1_CAP */
+	0,			/* AIF2_PB */
+	BIT(AIF1_CAP),		/* AIF2_CAP */
+};
+
+/* Codec supports 2 IIR filters */
+enum {
+	IIR0 = 0,
+	IIR1,
+	IIR_MAX,
+};
+
+/* Each IIR has 5 Filter Stages */
+enum {
+	BAND1 = 0,
+	BAND2,
+	BAND3,
+	BAND4,
+	BAND5,
+	BAND_MAX,
+};
+
+enum {
+	COMPANDER_1, /* HPH_L */
+	COMPANDER_2, /* HPH_R */
+	COMPANDER_3, /* LO1_DIFF */
+	COMPANDER_4, /* LO2_DIFF */
+	COMPANDER_5, /* LO3_SE */
+	COMPANDER_6, /* LO4_SE */
+	COMPANDER_7, /* SWR SPK CH1 */
+	COMPANDER_8, /* SWR SPK CH2 */
+	COMPANDER_MAX,
+};
+
+enum {
+	SRC_IN_HPHL,
+	SRC_IN_LO1,
+	SRC_IN_HPHR,
+	SRC_IN_LO2,
+	SRC_IN_SPKRL,
+	SRC_IN_LO3,
+	SRC_IN_SPKRR,
+	SRC_IN_LO4,
+};
+
+enum {
+	SPLINE_SRC0,
+	SPLINE_SRC1,
+	SPLINE_SRC2,
+	SPLINE_SRC3,
+	SPLINE_SRC_MAX,
+};
+
+/* wcd9335 interrupt table  */
+static const struct intr_data wcd9335_intr_table[] = {
+	{WCD9XXX_IRQ_SLIMBUS, false},
+	{WCD9335_IRQ_MBHC_SW_DET, true},
+	{WCD9335_IRQ_MBHC_BUTTON_PRESS_DET, true},
+	{WCD9335_IRQ_MBHC_BUTTON_RELEASE_DET, true},
+	{WCD9335_IRQ_MBHC_ELECT_INS_REM_DET, true},
+	{WCD9335_IRQ_MBHC_ELECT_INS_REM_LEG_DET, true},
+	{WCD9335_IRQ_FLL_LOCK_LOSS, false},
+	{WCD9335_IRQ_HPH_PA_CNPL_COMPLETE, false},
+	{WCD9335_IRQ_HPH_PA_CNPR_COMPLETE, false},
+	{WCD9335_IRQ_EAR_PA_CNP_COMPLETE, false},
+	{WCD9335_IRQ_LINE_PA1_CNP_COMPLETE, false},
+	{WCD9335_IRQ_LINE_PA2_CNP_COMPLETE, false},
+	{WCD9335_IRQ_LINE_PA3_CNP_COMPLETE, false},
+	{WCD9335_IRQ_LINE_PA4_CNP_COMPLETE, false},
+	{WCD9335_IRQ_HPH_PA_OCPL_FAULT, false},
+	{WCD9335_IRQ_HPH_PA_OCPR_FAULT, false},
+	{WCD9335_IRQ_EAR_PA_OCP_FAULT, false},
+	{WCD9335_IRQ_SOUNDWIRE, false},
+	{WCD9335_IRQ_VDD_DIG_RAMP_COMPLETE, false},
+	{WCD9335_IRQ_RCO_ERROR, false},
+	{WCD9335_IRQ_SVA_ERROR, false},
+	{WCD9335_IRQ_MAD_AUDIO, false},
+	{WCD9335_IRQ_MAD_BEACON, false},
+	{WCD9335_IRQ_SVA_OUTBOX1, true},
+	{WCD9335_IRQ_SVA_OUTBOX2, true},
+	{WCD9335_IRQ_MAD_ULTRASOUND, false},
+	{WCD9335_IRQ_VBAT_ATTACK, false},
+	{WCD9335_IRQ_VBAT_RESTORE, false},
+};
+
+static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0);
+static const DECLARE_TLV_DB_SCALE(line_gain, 0, 7, 1);
+static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1);
+
+static struct snd_soc_dai_driver tasha_dai[];
+static int wcd9335_get_micb_vout_ctl_val(u32 micb_mv);
+
+static int tasha_config_compander(struct snd_soc_codec *, int, int);
+static void tasha_codec_set_tx_hold(struct snd_soc_codec *, u16, bool);
+static int tasha_codec_internal_rco_ctrl(struct snd_soc_codec *codec,
+				  bool enable);
+
+/* Hold instance to soundwire platform device */
+struct tasha_swr_ctrl_data {
+	struct platform_device *swr_pdev;
+	struct ida swr_ida;
+};
+
+struct wcd_swr_ctrl_platform_data {
+	void *handle; /* holds codec private data */
+	int (*read)(void *handle, int reg);
+	int (*write)(void *handle, int reg, int val);
+	int (*bulk_write)(void *handle, u32 *reg, u32 *val, size_t len);
+	int (*clk)(void *handle, bool enable);
+	int (*handle_irq)(void *handle,
+			  irqreturn_t (*swrm_irq_handler)(int irq,
+							  void *data),
+			  void *swrm_handle,
+			  int action);
+};
+
+static struct wcd_mbhc_register
+	wcd_mbhc_registers[WCD_MBHC_REG_FUNC_MAX] = {
+	WCD_MBHC_REGISTER("WCD_MBHC_L_DET_EN",
+			  WCD9335_ANA_MBHC_MECH, 0x80, 7, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_GND_DET_EN",
+			  WCD9335_ANA_MBHC_MECH, 0x40, 6, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_MECH_DETECTION_TYPE",
+			  WCD9335_ANA_MBHC_MECH, 0x20, 5, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_MIC_CLAMP_CTL",
+			  WCD9335_MBHC_PLUG_DETECT_CTL, 0x30, 4, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_ELECT_DETECTION_TYPE",
+			  WCD9335_ANA_MBHC_ELECT, 0x08, 3, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HS_L_DET_PULL_UP_CTRL",
+			  WCD9335_MBHC_PLUG_DETECT_CTL, 0xC0, 6, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HS_L_DET_PULL_UP_COMP_CTRL",
+			  WCD9335_ANA_MBHC_MECH, 0x04, 2, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HPHL_PLUG_TYPE",
+			  WCD9335_ANA_MBHC_MECH, 0x10, 4, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_GND_PLUG_TYPE",
+			  WCD9335_ANA_MBHC_MECH, 0x08, 3, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_SW_HPH_LP_100K_TO_GND",
+			  WCD9335_ANA_MBHC_MECH, 0x01, 0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_ELECT_SCHMT_ISRC",
+			  WCD9335_ANA_MBHC_ELECT, 0x06, 1, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_FSM_EN",
+			  WCD9335_ANA_MBHC_ELECT, 0x80, 7, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_INSREM_DBNC",
+			  WCD9335_MBHC_PLUG_DETECT_CTL, 0x0F, 0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_BTN_DBNC",
+			  WCD9335_MBHC_CTL_1, 0x03, 0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HS_VREF",
+			  WCD9335_MBHC_CTL_2, 0x03, 0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HS_COMP_RESULT",
+			  WCD9335_ANA_MBHC_RESULT_3, 0x08, 3, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_MIC_SCHMT_RESULT",
+			  WCD9335_ANA_MBHC_RESULT_3, 0x20, 5, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HPHL_SCHMT_RESULT",
+			  WCD9335_ANA_MBHC_RESULT_3, 0x80, 7, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HPHR_SCHMT_RESULT",
+			  WCD9335_ANA_MBHC_RESULT_3, 0x40, 6, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_OCP_FSM_EN",
+			  WCD9335_HPH_OCP_CTL, 0x10, 4, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_BTN_RESULT",
+			  WCD9335_ANA_MBHC_RESULT_3, 0x07, 0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_BTN_ISRC_CTL",
+			  WCD9335_ANA_MBHC_ELECT, 0x70, 4, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_ELECT_RESULT",
+			  WCD9335_ANA_MBHC_RESULT_3, 0xFF, 0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_MICB_CTRL",
+			  WCD9335_ANA_MICB2, 0xC0, 6, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HPH_CNP_WG_TIME",
+			  WCD9335_HPH_CNP_WG_TIME, 0xFF, 0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HPHR_PA_EN",
+			  WCD9335_ANA_HPH, 0x40, 6, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HPHL_PA_EN",
+			  WCD9335_ANA_HPH, 0x80, 7, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HPH_PA_EN",
+			  WCD9335_ANA_HPH, 0xC0, 6, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_SWCH_LEVEL_REMOVE",
+			  WCD9335_ANA_MBHC_RESULT_3, 0x10, 4, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_PULLDOWN_CTRL",
+			  0, 0, 0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_ANC_DET_EN",
+			  WCD9335_ANA_MBHC_ZDET, 0x01, 0, 0),
+	/*
+	 * MBHC FSM status register is only available in Tasha 2.0.
+	 * So, init with 0 later once the version is known, then values
+	 * will be updated.
+	 */
+	WCD_MBHC_REGISTER("WCD_MBHC_FSM_STATUS",
+			  0, 0, 0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_MUX_CTL",
+			  WCD9335_MBHC_CTL_2, 0x70, 4, 0),
+};
+
+static const struct wcd_mbhc_intr intr_ids = {
+	.mbhc_sw_intr =  WCD9335_IRQ_MBHC_SW_DET,
+	.mbhc_btn_press_intr = WCD9335_IRQ_MBHC_BUTTON_PRESS_DET,
+	.mbhc_btn_release_intr = WCD9335_IRQ_MBHC_BUTTON_RELEASE_DET,
+	.mbhc_hs_ins_intr = WCD9335_IRQ_MBHC_ELECT_INS_REM_LEG_DET,
+	.mbhc_hs_rem_intr = WCD9335_IRQ_MBHC_ELECT_INS_REM_DET,
+	.hph_left_ocp = WCD9335_IRQ_HPH_PA_OCPL_FAULT,
+	.hph_right_ocp = WCD9335_IRQ_HPH_PA_OCPR_FAULT,
+};
+
+struct wcd_vbat {
+	bool is_enabled;
+	bool adc_config;
+	/* Variables to cache Vbat ADC output values */
+	u16 dcp1;
+	u16 dcp2;
+};
+
+struct hpf_work {
+	struct tasha_priv *tasha;
+	u8 decimator;
+	u8 hpf_cut_off_freq;
+	struct delayed_work dwork;
+};
+
+#define WCD9335_SPK_ANC_EN_DELAY_MS 350
+static int spk_anc_en_delay = WCD9335_SPK_ANC_EN_DELAY_MS;
+module_param(spk_anc_en_delay, int, S_IRUGO | S_IWUSR | S_IWGRP);
+MODULE_PARM_DESC(spk_anc_en_delay, "delay to enable anc in speaker path");
+
+struct spk_anc_work {
+	struct tasha_priv *tasha;
+	struct delayed_work dwork;
+};
+
+struct tx_mute_work {
+	struct tasha_priv *tasha;
+	u8 decimator;
+	struct delayed_work dwork;
+};
+
+struct tasha_priv {
+	struct device *dev;
+	struct wcd9xxx *wcd9xxx;
+
+	struct snd_soc_codec *codec;
+	u32 adc_count;
+	u32 rx_bias_count;
+	s32 dmic_0_1_clk_cnt;
+	s32 dmic_2_3_clk_cnt;
+	s32 dmic_4_5_clk_cnt;
+	s32 ldo_h_users;
+	s32 micb_ref[TASHA_MAX_MICBIAS];
+	s32 pullup_ref[TASHA_MAX_MICBIAS];
+
+	u32 anc_slot;
+	bool anc_func;
+
+	/* Vbat module */
+	struct wcd_vbat vbat;
+
+	/* cal info for codec */
+	struct fw_info *fw_data;
+
+	/*track tasha interface type*/
+	u8 intf_type;
+
+	/* num of slim ports required */
+	struct wcd9xxx_codec_dai_data  dai[NUM_CODEC_DAIS];
+
+	/* SoundWire data structure */
+	struct tasha_swr_ctrl_data *swr_ctrl_data;
+	int nr;
+
+	/*compander*/
+	int comp_enabled[COMPANDER_MAX];
+
+	/* Maintain the status of AUX PGA */
+	int aux_pga_cnt;
+	u8 aux_l_gain;
+	u8 aux_r_gain;
+
+	bool spkr_pa_widget_on;
+	struct regulator *spkdrv_reg;
+	struct regulator *spkdrv2_reg;
+
+	bool mbhc_started;
+	/* class h specific data */
+	struct wcd_clsh_cdc_data clsh_d;
+
+	struct afe_param_cdc_slimbus_slave_cfg slimbus_slave_cfg;
+
+	/*
+	 * list used to save/restore registers at start and
+	 * end of impedance measurement
+	 */
+	struct list_head reg_save_restore;
+
+	/* handle to cpe core */
+	struct wcd_cpe_core *cpe_core;
+	u32 current_cpe_clk_freq;
+	enum tasha_sido_voltage sido_voltage;
+	int sido_ccl_cnt;
+
+	u32 ana_rx_supplies;
+	/* Multiplication factor used for impedance detection */
+	int zdet_gain_mul_fact;
+
+	/* to track the status */
+	unsigned long status_mask;
+
+	struct work_struct tasha_add_child_devices_work;
+	struct wcd_swr_ctrl_platform_data swr_plat_data;
+
+	/* Port values for Rx and Tx codec_dai */
+	unsigned int rx_port_value[TASHA_RX_MAX];
+	unsigned int tx_port_value;
+
+	unsigned int vi_feed_value;
+	/* Tasha Interpolator Mode Select for EAR, HPH_L and HPH_R */
+	u32 hph_mode;
+
+	u16 prim_int_users[TASHA_NUM_INTERPOLATORS];
+	int spl_src_users[SPLINE_SRC_MAX];
+
+	struct wcd9xxx_resmgr_v2 *resmgr;
+	struct delayed_work power_gate_work;
+	struct mutex power_lock;
+	struct mutex sido_lock;
+
+	/* mbhc module */
+	struct wcd_mbhc mbhc;
+	struct blocking_notifier_head notifier;
+	struct mutex micb_lock;
+
+	struct clk *wcd_ext_clk;
+	struct clk *wcd_native_clk;
+	struct mutex swr_read_lock;
+	struct mutex swr_write_lock;
+	struct mutex swr_clk_lock;
+	int swr_clk_users;
+	int native_clk_users;
+	int (*zdet_gpio_cb)(struct snd_soc_codec *codec, bool high);
+
+	struct snd_info_entry *entry;
+	struct snd_info_entry *version_entry;
+	int power_active_ref;
+
+	struct on_demand_supply on_demand_list[ON_DEMAND_SUPPLIES_MAX];
+
+	int (*machine_codec_event_cb)(struct snd_soc_codec *codec,
+				      enum wcd9335_codec_event);
+	int spkr_gain_offset;
+	int spkr_mode;
+	int ear_spkr_gain;
+	struct hpf_work tx_hpf_work[TASHA_NUM_DECIMATORS];
+	struct tx_mute_work tx_mute_dwork[TASHA_NUM_DECIMATORS];
+	struct spk_anc_work spk_anc_dwork;
+	struct mutex codec_mutex;
+	int hph_l_gain;
+	int hph_r_gain;
+	int rx_7_count;
+	int rx_8_count;
+	bool clk_mode;
+	bool clk_internal;
+	/* Lock to prevent multiple functions voting at same time */
+	struct mutex sb_clk_gear_lock;
+	/* Count for functions voting or un-voting */
+	u32 ref_count;
+	/* Lock to protect mclk enablement */
+	struct mutex mclk_lock;
+};
+
+static int tasha_codec_vote_max_bw(struct snd_soc_codec *codec,
+				   bool vote);
+
+static const struct tasha_reg_mask_val tasha_spkr_default[] = {
+	{WCD9335_CDC_COMPANDER7_CTL3, 0x80, 0x80},
+	{WCD9335_CDC_COMPANDER8_CTL3, 0x80, 0x80},
+	{WCD9335_CDC_COMPANDER7_CTL7, 0x01, 0x01},
+	{WCD9335_CDC_COMPANDER8_CTL7, 0x01, 0x01},
+	{WCD9335_CDC_BOOST0_BOOST_CTL, 0x7C, 0x50},
+	{WCD9335_CDC_BOOST1_BOOST_CTL, 0x7C, 0x50},
+};
+
+static const struct tasha_reg_mask_val tasha_spkr_mode1[] = {
+	{WCD9335_CDC_COMPANDER7_CTL3, 0x80, 0x00},
+	{WCD9335_CDC_COMPANDER8_CTL3, 0x80, 0x00},
+	{WCD9335_CDC_COMPANDER7_CTL7, 0x01, 0x00},
+	{WCD9335_CDC_COMPANDER8_CTL7, 0x01, 0x00},
+	{WCD9335_CDC_BOOST0_BOOST_CTL, 0x7C, 0x44},
+	{WCD9335_CDC_BOOST1_BOOST_CTL, 0x7C, 0x44},
+};
+
+/*
+ * wcd9335_get_codec_info: Get codec specific information
+ *
+ * @wcd9xxx: pointer to wcd9xxx structure
+ * @wcd_type: pointer to wcd9xxx_codec_type structure
+ *
+ * Returns 0 for success or negative error code for failure
+ */
+int wcd9335_get_codec_info(struct wcd9xxx *wcd9xxx,
+			   struct wcd9xxx_codec_type *wcd_type)
+{
+	u16 id_minor, id_major;
+	struct regmap *wcd_regmap;
+	int rc, val, version = 0;
+
+	if (!wcd9xxx || !wcd_type)
+		return -EINVAL;
+
+	if (!wcd9xxx->regmap) {
+		dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null!\n",
+			__func__);
+		return -EINVAL;
+	}
+	wcd_regmap = wcd9xxx->regmap;
+
+	rc = regmap_bulk_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE0,
+			(u8 *)&id_minor, sizeof(u16));
+	if (rc)
+		return -EINVAL;
+
+	rc = regmap_bulk_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE2,
+			      (u8 *)&id_major, sizeof(u16));
+	if (rc)
+		return -EINVAL;
+
+	dev_info(wcd9xxx->dev, "%s: wcd9xxx chip id major 0x%x, minor 0x%x\n",
+		 __func__, id_major, id_minor);
+
+	/* Version detection */
+	if (id_major == TASHA_MAJOR) {
+		regmap_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT0,
+			    &val);
+		version = ((u8)val & 0x80) >> 7;
+	} else if (id_major == TASHA2P0_MAJOR)
+		version = 2;
+	else
+		dev_err(wcd9xxx->dev, "%s: wcd9335 version unknown (major 0x%x, minor 0x%x)\n",
+			__func__, id_major, id_minor);
+
+	/* Fill codec type info */
+	wcd_type->id_major = id_major;
+	wcd_type->id_minor = id_minor;
+	wcd_type->num_irqs = WCD9335_NUM_IRQS;
+	wcd_type->version = version;
+	wcd_type->slim_slave_type = WCD9XXX_SLIM_SLAVE_ADDR_TYPE_1;
+	wcd_type->i2c_chip_status = 0x01;
+	wcd_type->intr_tbl = wcd9335_intr_table;
+	wcd_type->intr_tbl_size = ARRAY_SIZE(wcd9335_intr_table);
+
+	wcd_type->intr_reg[WCD9XXX_INTR_STATUS_BASE] =
+						WCD9335_INTR_PIN1_STATUS0;
+	wcd_type->intr_reg[WCD9XXX_INTR_CLEAR_BASE] =
+						WCD9335_INTR_PIN1_CLEAR0;
+	wcd_type->intr_reg[WCD9XXX_INTR_MASK_BASE] =
+						WCD9335_INTR_PIN1_MASK0;
+	wcd_type->intr_reg[WCD9XXX_INTR_LEVEL_BASE] =
+						WCD9335_INTR_LEVEL0;
+	wcd_type->intr_reg[WCD9XXX_INTR_CLR_COMMIT] =
+						WCD9335_INTR_CLR_COMMIT;
+
+	return rc;
+}
+EXPORT_SYMBOL(wcd9335_get_codec_info);
+
+/*
+ * wcd9335_bringdown: Bringdown WCD Codec
+ *
+ * @wcd9xxx: Pointer to wcd9xxx structure
+ *
+ * Returns 0 for success or negative error code for failure
+ */
+int wcd9335_bringdown(struct wcd9xxx *wcd9xxx)
+{
+	if (!wcd9xxx || !wcd9xxx->regmap)
+		return -EINVAL;
+
+	regmap_write(wcd9xxx->regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+		     0x04);
+
+	return 0;
+}
+EXPORT_SYMBOL(wcd9335_bringdown);
+
+/*
+ * wcd9335_bringup: Bringup WCD Codec
+ *
+ * @wcd9xxx: Pointer to the wcd9xxx structure
+ *
+ * Returns 0 for success or negative error code for failure
+ */
+int wcd9335_bringup(struct wcd9xxx *wcd9xxx)
+{
+	int ret = 0;
+	int val, byte0;
+	struct regmap *wcd_regmap;
+
+	if (!wcd9xxx)
+		return -EINVAL;
+
+	if (!wcd9xxx->regmap) {
+		dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null!\n",
+			__func__);
+		return -EINVAL;
+	}
+	wcd_regmap = wcd9xxx->regmap;
+
+	regmap_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT0, &val);
+	regmap_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE0, &byte0);
+
+	if ((val < 0) || (byte0 < 0)) {
+		dev_err(wcd9xxx->dev, "%s: tasha codec version detection fail!\n",
+			__func__);
+		return -EINVAL;
+	}
+	if ((val & 0x80) && (byte0 == 0x0)) {
+		dev_info(wcd9xxx->dev, "%s: wcd9335 codec version is v1.1\n",
+			 __func__);
+		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x01);
+		regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_2, 0xFC);
+		regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_4, 0x21);
+		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+			     0x5);
+		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+			     0x7);
+		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+			     0x3);
+		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x3);
+	} else if (byte0 == 0x1) {
+		dev_info(wcd9xxx->dev, "%s: wcd9335 codec version is v2.0\n",
+			 __func__);
+		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x01);
+		regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_TEST_2, 0x00);
+		regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_8, 0x6F);
+		regmap_write(wcd_regmap, WCD9335_BIAS_VBG_FINE_ADJ, 0x65);
+		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+			     0x5);
+		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+			     0x7);
+		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+			     0x3);
+		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x3);
+	} else if ((byte0 == 0) && (!(val & 0x80))) {
+		dev_info(wcd9xxx->dev, "%s: wcd9335 codec version is v1.0\n",
+			 __func__);
+		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x01);
+		regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_2, 0xFC);
+		regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_4, 0x21);
+		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+			     0x3);
+		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x3);
+	} else {
+		dev_err(wcd9xxx->dev, "%s: tasha codec version unknown\n",
+			__func__);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(wcd9335_bringup);
+
+/**
+ * tasha_set_spkr_gain_offset - offset the speaker path
+ * gain with the given offset value.
+ *
+ * @codec: codec instance
+ * @offset: Indicates speaker path gain offset value.
+ *
+ * Returns 0 on success or -EINVAL on error.
+ */
+int tasha_set_spkr_gain_offset(struct snd_soc_codec *codec, int offset)
+{
+	struct tasha_priv *priv = snd_soc_codec_get_drvdata(codec);
+
+	if (!priv)
+		return -EINVAL;
+
+	priv->spkr_gain_offset = offset;
+	return 0;
+}
+EXPORT_SYMBOL(tasha_set_spkr_gain_offset);
+
+/**
+ * tasha_set_spkr_mode - Configures speaker compander and smartboost
+ * settings based on speaker mode.
+ *
+ * @codec: codec instance
+ * @mode: Indicates speaker configuration mode.
+ *
+ * Returns 0 on success or -EINVAL on error.
+ */
+int tasha_set_spkr_mode(struct snd_soc_codec *codec, int mode)
+{
+	struct tasha_priv *priv = snd_soc_codec_get_drvdata(codec);
+	int i;
+	const struct tasha_reg_mask_val *regs;
+	int size;
+
+	if (!priv)
+		return -EINVAL;
+
+	switch (mode) {
+	case SPKR_MODE_1:
+		regs = tasha_spkr_mode1;
+		size = ARRAY_SIZE(tasha_spkr_mode1);
+		break;
+	default:
+		regs = tasha_spkr_default;
+		size = ARRAY_SIZE(tasha_spkr_default);
+		break;
+	}
+
+	priv->spkr_mode = mode;
+	for (i = 0; i < size; i++)
+		snd_soc_update_bits(codec, regs[i].reg,
+				    regs[i].mask, regs[i].val);
+	return 0;
+}
+EXPORT_SYMBOL(tasha_set_spkr_mode);
+
+static void tasha_enable_sido_buck(struct snd_soc_codec *codec)
+{
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	snd_soc_update_bits(codec, WCD9335_ANA_RCO, 0x80, 0x80);
+	snd_soc_update_bits(codec, WCD9335_ANA_BUCK_CTL, 0x02, 0x02);
+	/* 100us sleep needed after IREF settings */
+	usleep_range(100, 110);
+	snd_soc_update_bits(codec, WCD9335_ANA_BUCK_CTL, 0x04, 0x04);
+	/* 100us sleep needed after VREF settings */
+	usleep_range(100, 110);
+	tasha->resmgr->sido_input_src = SIDO_SOURCE_RCO_BG;
+}
+
+static void tasha_cdc_sido_ccl_enable(struct tasha_priv *tasha, bool ccl_flag)
+{
+	struct snd_soc_codec *codec = tasha->codec;
+
+	if (!codec)
+		return;
+
+	if (!TASHA_IS_2_0(tasha->wcd9xxx)) {
+		dev_dbg(codec->dev, "%s: tasha version < 2p0, return\n",
+			__func__);
+		return;
+	}
+	dev_dbg(codec->dev, "%s: sido_ccl_cnt=%d, ccl_flag:%d\n",
+			__func__, tasha->sido_ccl_cnt, ccl_flag);
+	if (ccl_flag) {
+		if (++tasha->sido_ccl_cnt == 1)
+			snd_soc_update_bits(codec,
+				WCD9335_SIDO_SIDO_CCL_10, 0xFF, 0x6E);
+	} else {
+		if (tasha->sido_ccl_cnt == 0) {
+			dev_dbg(codec->dev, "%s: sido_ccl already disabled\n",
+				__func__);
+			return;
+		}
+		if (--tasha->sido_ccl_cnt == 0)
+			snd_soc_update_bits(codec,
+				WCD9335_SIDO_SIDO_CCL_10, 0xFF, 0x02);
+	}
+}
+
+static bool tasha_cdc_is_svs_enabled(struct tasha_priv *tasha)
+{
+	if (TASHA_IS_2_0(tasha->wcd9xxx) &&
+		svs_scaling_enabled)
+		return true;
+
+	return false;
+}
+
+static int tasha_cdc_req_mclk_enable(struct tasha_priv *tasha,
+				     bool enable)
+{
+	int ret = 0;
+
+	mutex_lock(&tasha->mclk_lock);
+	if (enable) {
+		tasha_cdc_sido_ccl_enable(tasha, true);
+		ret = clk_prepare_enable(tasha->wcd_ext_clk);
+		if (ret) {
+			dev_err(tasha->dev, "%s: ext clk enable failed\n",
+				__func__);
+			goto unlock_mutex;
+		}
+		/* get BG */
+		wcd_resmgr_enable_master_bias(tasha->resmgr);
+		/* get MCLK */
+		wcd_resmgr_enable_clk_block(tasha->resmgr, WCD_CLK_MCLK);
+	} else {
+		/* put MCLK */
+		wcd_resmgr_disable_clk_block(tasha->resmgr, WCD_CLK_MCLK);
+		/* put BG */
+		wcd_resmgr_disable_master_bias(tasha->resmgr);
+		clk_disable_unprepare(tasha->wcd_ext_clk);
+		tasha_cdc_sido_ccl_enable(tasha, false);
+	}
+unlock_mutex:
+	mutex_unlock(&tasha->mclk_lock);
+	return ret;
+}
+
+static int tasha_cdc_check_sido_value(enum tasha_sido_voltage req_mv)
+{
+	if ((req_mv != SIDO_VOLTAGE_SVS_MV) &&
+		(req_mv != SIDO_VOLTAGE_NOMINAL_MV))
+		return -EINVAL;
+
+	return 0;
+}
+
+static void tasha_codec_apply_sido_voltage(
+				struct tasha_priv *tasha,
+				enum tasha_sido_voltage req_mv)
+{
+	u32 vout_d_val;
+	struct snd_soc_codec *codec = tasha->codec;
+	int ret;
+
+	if (!codec)
+		return;
+
+	if (!tasha_cdc_is_svs_enabled(tasha))
+		return;
+
+	if ((sido_buck_svs_voltage != SIDO_VOLTAGE_SVS_MV) &&
+		(sido_buck_svs_voltage != SIDO_VOLTAGE_NOMINAL_MV))
+		sido_buck_svs_voltage = SIDO_VOLTAGE_SVS_MV;
+
+	ret = tasha_cdc_check_sido_value(req_mv);
+	if (ret < 0) {
+		dev_dbg(codec->dev, "%s: requested mv=%d not in range\n",
+			__func__, req_mv);
+		return;
+	}
+	if (req_mv == tasha->sido_voltage) {
+		dev_dbg(codec->dev, "%s: Already at requested mv=%d\n",
+			__func__, req_mv);
+		return;
+	}
+	if (req_mv == sido_buck_svs_voltage) {
+		if (test_bit(AUDIO_NOMINAL, &tasha->status_mask) ||
+			test_bit(CPE_NOMINAL, &tasha->status_mask)) {
+			dev_dbg(codec->dev,
+				"%s: nominal client running, status_mask=%lu\n",
+				__func__, tasha->status_mask);
+			return;
+		}
+	}
+	/* compute the vout_d step value */
+	vout_d_val = CALCULATE_VOUT_D(req_mv);
+	snd_soc_write(codec, WCD9335_ANA_BUCK_VOUT_D, vout_d_val & 0xFF);
+	snd_soc_update_bits(codec, WCD9335_ANA_BUCK_CTL, 0x80, 0x80);
+
+	/* 1 msec sleep required after SIDO Vout_D voltage change */
+	usleep_range(1000, 1100);
+	tasha->sido_voltage = req_mv;
+	dev_dbg(codec->dev,
+		"%s: updated SIDO buck Vout_D to %d, vout_d step = %u\n",
+		__func__, tasha->sido_voltage, vout_d_val);
+
+	snd_soc_update_bits(codec, WCD9335_ANA_BUCK_CTL,
+				0x80, 0x00);
+}
+
+static int tasha_codec_update_sido_voltage(
+				struct tasha_priv *tasha,
+				enum tasha_sido_voltage req_mv)
+{
+	int ret = 0;
+
+	if (!tasha_cdc_is_svs_enabled(tasha))
+		return ret;
+
+	mutex_lock(&tasha->sido_lock);
+	/* enable mclk before setting SIDO voltage */
+	ret = tasha_cdc_req_mclk_enable(tasha, true);
+	if (ret) {
+		dev_err(tasha->dev, "%s: ext clk enable failed\n",
+			__func__);
+		goto err;
+	}
+	tasha_codec_apply_sido_voltage(tasha, req_mv);
+	tasha_cdc_req_mclk_enable(tasha, false);
+
+err:
+	mutex_unlock(&tasha->sido_lock);
+	return ret;
+}
+
+int tasha_enable_efuse_sensing(struct snd_soc_codec *codec)
+{
+	struct tasha_priv *priv = snd_soc_codec_get_drvdata(codec);
+
+	tasha_cdc_mclk_enable(codec, true, false);
+
+	if (!TASHA_IS_2_0(priv->wcd9xxx))
+		snd_soc_update_bits(codec, WCD9335_CHIP_TIER_CTRL_EFUSE_CTL,
+				    0x1E, 0x02);
+	snd_soc_update_bits(codec, WCD9335_CHIP_TIER_CTRL_EFUSE_CTL,
+			    0x01, 0x01);
+	/*
+	 * 5ms sleep required after enabling efuse control
+	 * before checking the status.
+	 */
+	usleep_range(5000, 5500);
+	if (!(snd_soc_read(codec, WCD9335_CHIP_TIER_CTRL_EFUSE_STATUS) & 0x01))
+		WARN(1, "%s: Efuse sense is not complete\n", __func__);
+
+	if (TASHA_IS_2_0(priv->wcd9xxx)) {
+		if (!(snd_soc_read(codec,
+			WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT0) & 0x40))
+			snd_soc_update_bits(codec, WCD9335_HPH_R_ATEST,
+					    0x04, 0x00);
+		tasha_enable_sido_buck(codec);
+	}
+
+	tasha_cdc_mclk_enable(codec, false, false);
+
+	return 0;
+}
+EXPORT_SYMBOL(tasha_enable_efuse_sensing);
+
+void *tasha_get_afe_config(struct snd_soc_codec *codec,
+			   enum afe_config_type config_type)
+{
+	struct tasha_priv *priv = snd_soc_codec_get_drvdata(codec);
+
+	switch (config_type) {
+	case AFE_SLIMBUS_SLAVE_CONFIG:
+		return &priv->slimbus_slave_cfg;
+	case AFE_CDC_REGISTERS_CONFIG:
+		return &tasha_audio_reg_cfg;
+	case AFE_SLIMBUS_SLAVE_PORT_CONFIG:
+		return &tasha_slimbus_slave_port_cfg;
+	case AFE_AANC_VERSION:
+		return &tasha_cdc_aanc_version;
+	case AFE_CLIP_BANK_SEL:
+		return NULL;
+	case AFE_CDC_CLIP_REGISTERS_CONFIG:
+		return NULL;
+	case AFE_CDC_REGISTER_PAGE_CONFIG:
+		return &tasha_cdc_reg_page_cfg;
+	default:
+		dev_err(codec->dev, "%s: Unknown config_type 0x%x\n",
+			__func__, config_type);
+		return NULL;
+	}
+}
+EXPORT_SYMBOL(tasha_get_afe_config);
+
+/*
+ * tasha_event_register: Registers a machine driver callback
+ * function with codec private data for post ADSP sub-system
+ * restart (SSR). This callback function will be called from
+ * codec driver once codec comes out of reset after ADSP SSR.
+ *
+ * @machine_event_cb: callback function from machine driver
+ * @codec: Codec instance
+ *
+ * Return: none
+ */
+void tasha_event_register(
+	int (*machine_event_cb)(struct snd_soc_codec *codec,
+				enum wcd9335_codec_event),
+	struct snd_soc_codec *codec)
+{
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	if (tasha)
+		tasha->machine_codec_event_cb = machine_event_cb;
+	else
+		dev_dbg(codec->dev, "%s: Invalid tasha_priv data\n", __func__);
+}
+EXPORT_SYMBOL(tasha_event_register);
+
+static int tasha_mbhc_request_irq(struct snd_soc_codec *codec,
+				   int irq, irq_handler_t handler,
+				   const char *name, void *data)
+{
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	struct wcd9xxx *wcd9xxx = tasha->wcd9xxx;
+	struct wcd9xxx_core_resource *core_res =
+				&wcd9xxx->core_res;
+
+	return wcd9xxx_request_irq(core_res, irq, handler, name, data);
+}
+
+static void tasha_mbhc_irq_control(struct snd_soc_codec *codec,
+				   int irq, bool enable)
+{
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	struct wcd9xxx *wcd9xxx = tasha->wcd9xxx;
+	struct wcd9xxx_core_resource *core_res =
+				&wcd9xxx->core_res;
+	if (enable)
+		wcd9xxx_enable_irq(core_res, irq);
+	else
+		wcd9xxx_disable_irq(core_res, irq);
+}
+
+static int tasha_mbhc_free_irq(struct snd_soc_codec *codec,
+			       int irq, void *data)
+{
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	struct wcd9xxx *wcd9xxx = tasha->wcd9xxx;
+	struct wcd9xxx_core_resource *core_res =
+				&wcd9xxx->core_res;
+
+	wcd9xxx_free_irq(core_res, irq, data);
+	return 0;
+}
+
+static void tasha_mbhc_clk_setup(struct snd_soc_codec *codec,
+				 bool enable)
+{
+	if (enable)
+		snd_soc_update_bits(codec, WCD9335_MBHC_CTL_1,
+				    0x80, 0x80);
+	else
+		snd_soc_update_bits(codec, WCD9335_MBHC_CTL_1,
+				    0x80, 0x00);
+}
+
+static int tasha_mbhc_btn_to_num(struct snd_soc_codec *codec)
+{
+	return snd_soc_read(codec, WCD9335_ANA_MBHC_RESULT_3) & 0x7;
+}
+
+static void tasha_mbhc_mbhc_bias_control(struct snd_soc_codec *codec,
+					 bool enable)
+{
+	if (enable)
+		snd_soc_update_bits(codec, WCD9335_ANA_MBHC_ELECT,
+				    0x01, 0x01);
+	else
+		snd_soc_update_bits(codec, WCD9335_ANA_MBHC_ELECT,
+				    0x01, 0x00);
+}
+
+static void tasha_mbhc_program_btn_thr(struct snd_soc_codec *codec,
+				       s16 *btn_low, s16 *btn_high,
+				       int num_btn, bool is_micbias)
+{
+	int i;
+	int vth;
+
+	if (num_btn > WCD_MBHC_DEF_BUTTONS) {
+		dev_err(codec->dev, "%s: invalid number of buttons: %d\n",
+			__func__, num_btn);
+		return;
+	}
+	/*
+	 * Tasha just needs one set of thresholds for button detection
+	 * due to micbias voltage ramp to pullup upon button press. So
+	 * btn_low and is_micbias are ignored and always program button
+	 * thresholds using btn_high.
+	 */
+	for (i = 0; i < num_btn; i++) {
+		vth = ((btn_high[i] * 2) / 25) & 0x3F;
+		snd_soc_update_bits(codec, WCD9335_ANA_MBHC_BTN0 + i,
+				    0xFC, vth << 2);
+		dev_dbg(codec->dev, "%s: btn_high[%d]: %d, vth: %d\n",
+			__func__, i, btn_high[i], vth);
+	}
+}
+
+static bool tasha_mbhc_lock_sleep(struct wcd_mbhc *mbhc, bool lock)
+{
+	struct snd_soc_codec *codec = mbhc->codec;
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	struct wcd9xxx *wcd9xxx = tasha->wcd9xxx;
+	struct wcd9xxx_core_resource *core_res =
+				&wcd9xxx->core_res;
+	if (lock)
+		return wcd9xxx_lock_sleep(core_res);
+	else {
+		wcd9xxx_unlock_sleep(core_res);
+		return 0;
+	}
+}
+
+static int tasha_mbhc_register_notifier(struct wcd_mbhc *mbhc,
+					struct notifier_block *nblock,
+					bool enable)
+{
+	struct snd_soc_codec *codec = mbhc->codec;
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	if (enable)
+		return blocking_notifier_chain_register(&tasha->notifier,
+							nblock);
+	else
+		return blocking_notifier_chain_unregister(&tasha->notifier,
+							  nblock);
+}
+
+static bool tasha_mbhc_micb_en_status(struct wcd_mbhc *mbhc, int micb_num)
+{
+	u8 val;
+	if (micb_num == MIC_BIAS_2) {
+		val = (snd_soc_read(mbhc->codec, WCD9335_ANA_MICB2) >> 6);
+		if (val == 0x01)
+			return true;
+	}
+	return false;
+}
+
+static bool tasha_mbhc_hph_pa_on_status(struct snd_soc_codec *codec)
+{
+	return (snd_soc_read(codec, WCD9335_ANA_HPH) & 0xC0) ? true : false;
+}
+
+static void tasha_mbhc_hph_l_pull_up_control(struct snd_soc_codec *codec,
+					enum mbhc_hs_pullup_iref pull_up_cur)
+{
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	if (!tasha)
+		return;
+
+	/* Default pull up current to 2uA */
+	if (pull_up_cur < I_OFF || pull_up_cur > I_3P0_UA ||
+	    pull_up_cur == I_DEFAULT)
+		pull_up_cur = I_2P0_UA;
+
+	dev_dbg(codec->dev, "%s: HS pull up current:%d\n",
+		__func__, pull_up_cur);
+
+	if (TASHA_IS_2_0(tasha->wcd9xxx))
+		snd_soc_update_bits(codec, WCD9335_MBHC_PLUG_DETECT_CTL,
+			    0xC0, pull_up_cur << 6);
+	else
+		snd_soc_update_bits(codec, WCD9335_MBHC_PLUG_DETECT_CTL,
+			    0xC0, 0x40);
+}
+
+static int tasha_enable_ext_mb_source(struct wcd_mbhc *mbhc,
+		bool turn_on)
+{
+	struct snd_soc_codec *codec = mbhc->codec;
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	int ret = 0;
+	struct on_demand_supply *supply;
+
+	if (!tasha)
+		return -EINVAL;
+
+	supply =  &tasha->on_demand_list[ON_DEMAND_MICBIAS];
+	if (!supply->supply) {
+		dev_dbg(codec->dev, "%s: warning supply not present ond for %s\n",
+				__func__, "onDemand Micbias");
+		return ret;
+	}
+
+	dev_dbg(codec->dev, "%s turn_on: %d count: %d\n", __func__, turn_on,
+		supply->ondemand_supply_count);
+
+	if (turn_on) {
+		if (!(supply->ondemand_supply_count)) {
+			ret = snd_soc_dapm_force_enable_pin(
+				snd_soc_codec_get_dapm(codec),
+				"MICBIAS_REGULATOR");
+			snd_soc_dapm_sync(snd_soc_codec_get_dapm(codec));
+		}
+		supply->ondemand_supply_count++;
+	} else {
+		if (supply->ondemand_supply_count > 0)
+			supply->ondemand_supply_count--;
+		if (!(supply->ondemand_supply_count)) {
+			ret = snd_soc_dapm_disable_pin(
+				snd_soc_codec_get_dapm(codec),
+				"MICBIAS_REGULATOR");
+		snd_soc_dapm_sync(snd_soc_codec_get_dapm(codec));
+		}
+	}
+
+	if (ret)
+		dev_err(codec->dev, "%s: Failed to %s external micbias source\n",
+			__func__, turn_on ? "enable" : "disabled");
+	else
+		dev_dbg(codec->dev, "%s: %s external micbias source\n",
+			__func__, turn_on ? "Enabled" : "Disabled");
+
+	return ret;
+}
+
+static int tasha_micbias_control(struct snd_soc_codec *codec,
+				 int micb_num,
+				 int req, bool is_dapm)
+{
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	int micb_index = micb_num - 1;
+	u16 micb_reg;
+	int pre_off_event = 0, post_off_event = 0;
+	int post_on_event = 0, post_dapm_off = 0;
+	int post_dapm_on = 0;
+
+	if ((micb_index < 0) || (micb_index > TASHA_MAX_MICBIAS - 1)) {
+		dev_err(codec->dev, "%s: Invalid micbias index, micb_ind:%d\n",
+			__func__, micb_index);
+		return -EINVAL;
+	}
+	switch (micb_num) {
+	case MIC_BIAS_1:
+		micb_reg = WCD9335_ANA_MICB1;
+		break;
+	case MIC_BIAS_2:
+		micb_reg = WCD9335_ANA_MICB2;
+		pre_off_event = WCD_EVENT_PRE_MICBIAS_2_OFF;
+		post_off_event = WCD_EVENT_POST_MICBIAS_2_OFF;
+		post_on_event = WCD_EVENT_POST_MICBIAS_2_ON;
+		post_dapm_on = WCD_EVENT_POST_DAPM_MICBIAS_2_ON;
+		post_dapm_off = WCD_EVENT_POST_DAPM_MICBIAS_2_OFF;
+		break;
+	case MIC_BIAS_3:
+		micb_reg = WCD9335_ANA_MICB3;
+		break;
+	case MIC_BIAS_4:
+		micb_reg = WCD9335_ANA_MICB4;
+		break;
+	default:
+		dev_err(codec->dev, "%s: Invalid micbias number: %d\n",
+			__func__, micb_num);
+		return -EINVAL;
+	}
+	mutex_lock(&tasha->micb_lock);
+
+	switch (req) {
+	case MICB_PULLUP_ENABLE:
+		tasha->pullup_ref[micb_index]++;
+		if ((tasha->pullup_ref[micb_index] == 1) &&
+		    (tasha->micb_ref[micb_index] == 0))
+			snd_soc_update_bits(codec, micb_reg, 0xC0, 0x80);
+		break;
+	case MICB_PULLUP_DISABLE:
+		if (tasha->pullup_ref[micb_index] > 0)
+			tasha->pullup_ref[micb_index]--;
+		if ((tasha->pullup_ref[micb_index] == 0) &&
+		    (tasha->micb_ref[micb_index] == 0))
+			snd_soc_update_bits(codec, micb_reg, 0xC0, 0x00);
+		break;
+	case MICB_ENABLE:
+		tasha->micb_ref[micb_index]++;
+		if (tasha->micb_ref[micb_index] == 1) {
+			snd_soc_update_bits(codec, micb_reg, 0xC0, 0x40);
+			if (post_on_event)
+				blocking_notifier_call_chain(&tasha->notifier,
+						post_on_event, &tasha->mbhc);
+		}
+		if (is_dapm && post_dapm_on)
+			blocking_notifier_call_chain(&tasha->notifier,
+					post_dapm_on, &tasha->mbhc);
+		break;
+	case MICB_DISABLE:
+		if (tasha->micb_ref[micb_index] > 0)
+			tasha->micb_ref[micb_index]--;
+		if ((tasha->micb_ref[micb_index] == 0) &&
+		    (tasha->pullup_ref[micb_index] > 0))
+			snd_soc_update_bits(codec, micb_reg, 0xC0, 0x80);
+		else if ((tasha->micb_ref[micb_index] == 0) &&
+			 (tasha->pullup_ref[micb_index] == 0)) {
+			if (pre_off_event)
+				blocking_notifier_call_chain(&tasha->notifier,
+						pre_off_event, &tasha->mbhc);
+			snd_soc_update_bits(codec, micb_reg, 0xC0, 0x00);
+			if (post_off_event)
+				blocking_notifier_call_chain(&tasha->notifier,
+						post_off_event, &tasha->mbhc);
+		}
+		if (is_dapm && post_dapm_off)
+			blocking_notifier_call_chain(&tasha->notifier,
+					post_dapm_off, &tasha->mbhc);
+		break;
+	};
+
+	dev_dbg(codec->dev, "%s: micb_num:%d, micb_ref: %d, pullup_ref: %d\n",
+		__func__, micb_num, tasha->micb_ref[micb_index],
+		tasha->pullup_ref[micb_index]);
+
+	mutex_unlock(&tasha->micb_lock);
+
+	return 0;
+}
+
+static int tasha_mbhc_request_micbias(struct snd_soc_codec *codec,
+				      int micb_num, int req)
+{
+	int ret;
+
+	/*
+	 * If micbias is requested, make sure that there
+	 * is vote to enable mclk
+	 */
+	if (req == MICB_ENABLE)
+		tasha_cdc_mclk_enable(codec, true, false);
+
+	ret = tasha_micbias_control(codec, micb_num, req, false);
+
+	/*
+	 * Release vote for mclk while requesting for
+	 * micbias disable
+	 */
+	if (req == MICB_DISABLE)
+		tasha_cdc_mclk_enable(codec, false, false);
+
+	return ret;
+}
+
+static void tasha_mbhc_micb_ramp_control(struct snd_soc_codec *codec,
+					bool enable)
+{
+	if (enable) {
+		snd_soc_update_bits(codec, WCD9335_ANA_MICB2_RAMP,
+				    0x1C, 0x0C);
+		snd_soc_update_bits(codec, WCD9335_ANA_MICB2_RAMP,
+				    0x80, 0x80);
+	} else {
+		snd_soc_update_bits(codec, WCD9335_ANA_MICB2_RAMP,
+				    0x80, 0x00);
+		snd_soc_update_bits(codec, WCD9335_ANA_MICB2_RAMP,
+				    0x1C, 0x00);
+	}
+}
+
+static struct firmware_cal *tasha_get_hwdep_fw_cal(struct wcd_mbhc *mbhc,
+						   enum wcd_cal_type type)
+{
+	struct tasha_priv *tasha;
+	struct firmware_cal *hwdep_cal;
+	struct snd_soc_codec *codec = mbhc->codec;
+
+	if (!codec) {
+		pr_err("%s: NULL codec pointer\n", __func__);
+		return NULL;
+	}
+	tasha = snd_soc_codec_get_drvdata(codec);
+	hwdep_cal = wcdcal_get_fw_cal(tasha->fw_data, type);
+	if (!hwdep_cal)
+		dev_err(codec->dev, "%s: cal not sent by %d\n",
+			__func__, type);
+
+	return hwdep_cal;
+}
+
+static int tasha_mbhc_micb_adjust_voltage(struct snd_soc_codec *codec,
+					  int req_volt,
+					  int micb_num)
+{
+	int cur_vout_ctl, req_vout_ctl;
+	int micb_reg, micb_val, micb_en;
+
+	switch (micb_num) {
+	case MIC_BIAS_1:
+		micb_reg = WCD9335_ANA_MICB1;
+		break;
+	case MIC_BIAS_2:
+		micb_reg = WCD9335_ANA_MICB2;
+		break;
+	case MIC_BIAS_3:
+		micb_reg = WCD9335_ANA_MICB3;
+		break;
+	case MIC_BIAS_4:
+		micb_reg = WCD9335_ANA_MICB4;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/*
+	 * If requested micbias voltage is same as current micbias
+	 * voltage, then just return. Otherwise, adjust voltage as
+	 * per requested value. If micbias is already enabled, then
+	 * to avoid slow micbias ramp-up or down enable pull-up
+	 * momentarily, change the micbias value and then re-enable
+	 * micbias.
+	 */
+	micb_val = snd_soc_read(codec, micb_reg);
+	micb_en = (micb_val & 0xC0) >> 6;
+	cur_vout_ctl = micb_val & 0x3F;
+
+	req_vout_ctl = wcd9335_get_micb_vout_ctl_val(req_volt);
+	if (IS_ERR_VALUE(req_vout_ctl))
+		return -EINVAL;
+	if (cur_vout_ctl == req_vout_ctl)
+		return 0;
+
+	dev_dbg(codec->dev, "%s: micb_num: %d, cur_mv: %d, req_mv: %d, micb_en: %d\n",
+		 __func__, micb_num, WCD_VOUT_CTL_TO_MICB(cur_vout_ctl),
+		 req_volt, micb_en);
+
+	if (micb_en == 0x1)
+		snd_soc_update_bits(codec, micb_reg, 0xC0, 0x80);
+
+	snd_soc_update_bits(codec, micb_reg, 0x3F, req_vout_ctl);
+
+	if (micb_en == 0x1) {
+		snd_soc_update_bits(codec, micb_reg, 0xC0, 0x40);
+		/*
+		 * Add 2ms delay as per HW requirement after enabling
+		 * micbias
+		 */
+		usleep_range(2000, 2100);
+	}
+
+	return 0;
+}
+
+static int tasha_mbhc_micb_ctrl_threshold_mic(struct snd_soc_codec *codec,
+					      int micb_num, bool req_en)
+{
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	struct wcd9xxx_pdata *pdata = dev_get_platdata(codec->dev->parent);
+	int rc, micb_mv;
+
+	if (micb_num != MIC_BIAS_2)
+		return -EINVAL;
+
+	/*
+	 * If device tree micbias level is already above the minimum
+	 * voltage needed to detect threshold microphone, then do
+	 * not change the micbias, just return.
+	 */
+	if (pdata->micbias.micb2_mv >= WCD_MBHC_THR_HS_MICB_MV)
+		return 0;
+
+	micb_mv = req_en ? WCD_MBHC_THR_HS_MICB_MV : pdata->micbias.micb2_mv;
+
+	mutex_lock(&tasha->micb_lock);
+	rc = tasha_mbhc_micb_adjust_voltage(codec, micb_mv, MIC_BIAS_2);
+	mutex_unlock(&tasha->micb_lock);
+
+	return rc;
+}
+
+static inline void tasha_mbhc_get_result_params(struct wcd9xxx *wcd9xxx,
+						s16 *d1_a, u16 noff,
+						int32_t *zdet)
+{
+	int i;
+	int val, val1;
+	s16 c1;
+	s32 x1, d1;
+	int32_t denom;
+	int minCode_param[] = {
+			3277, 1639, 820, 410, 205, 103, 52, 26
+	};
+
+	regmap_update_bits(wcd9xxx->regmap, WCD9335_ANA_MBHC_ZDET, 0x20, 0x20);
+	for (i = 0; i < TASHA_ZDET_NUM_MEASUREMENTS; i++) {
+		regmap_read(wcd9xxx->regmap, WCD9335_ANA_MBHC_RESULT_2, &val);
+		if (val & 0x80)
+			break;
+	}
+	val = val << 0x8;
+	regmap_read(wcd9xxx->regmap, WCD9335_ANA_MBHC_RESULT_1, &val1);
+	val |= val1;
+	regmap_update_bits(wcd9xxx->regmap, WCD9335_ANA_MBHC_ZDET, 0x20, 0x00);
+	x1 = TASHA_MBHC_GET_X1(val);
+	c1 = TASHA_MBHC_GET_C1(val);
+	/* If ramp is not complete, give additional 5ms */
+	if ((c1 < 2) && x1)
+		usleep_range(5000, 5050);
+
+	if (!c1 || !x1) {
+		dev_dbg(wcd9xxx->dev,
+			"%s: Impedance detect ramp error, c1=%d, x1=0x%x\n",
+			__func__, c1, x1);
+		goto ramp_down;
+	}
+	d1 = d1_a[c1];
+	denom = (x1 * d1) - (1 << (14 - noff));
+	if (denom > 0)
+		*zdet = (TASHA_MBHC_ZDET_CONST * 1000) / denom;
+	else if (x1 < minCode_param[noff])
+		*zdet = TASHA_ZDET_FLOATING_IMPEDANCE;
+
+	dev_dbg(wcd9xxx->dev, "%s: d1=%d, c1=%d, x1=0x%x, z_val=%d(milliOhm)\n",
+		__func__, d1, c1, x1, *zdet);
+ramp_down:
+	i = 0;
+	while (x1) {
+		regmap_bulk_read(wcd9xxx->regmap,
+				 WCD9335_ANA_MBHC_RESULT_1, (u8 *)&val, 2);
+		x1 = TASHA_MBHC_GET_X1(val);
+		i++;
+		if (i == TASHA_ZDET_NUM_MEASUREMENTS)
+			break;
+	}
+}
+
+/*
+ * tasha_mbhc_zdet_gpio_ctrl: Register callback function for
+ * controlling the switch on hifi amps. Default switch state
+ * will put a 51ohm load in parallel to the hph load. So,
+ * impedance detection function will pull the gpio high
+ * to make the switch open.
+ *
+ * @zdet_gpio_cb: callback function from machine driver
+ * @codec: Codec instance
+ *
+ * Return: none
+ */
+void tasha_mbhc_zdet_gpio_ctrl(
+		int (*zdet_gpio_cb)(struct snd_soc_codec *codec, bool high),
+		struct snd_soc_codec *codec)
+{
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	tasha->zdet_gpio_cb = zdet_gpio_cb;
+}
+EXPORT_SYMBOL(tasha_mbhc_zdet_gpio_ctrl);
+
+static void tasha_mbhc_zdet_ramp(struct snd_soc_codec *codec,
+				 struct tasha_mbhc_zdet_param *zdet_param,
+				 int32_t *zl, int32_t *zr, s16 *d1_a)
+{
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+	int32_t zdet = 0;
+
+	snd_soc_update_bits(codec, WCD9335_MBHC_ZDET_ANA_CTL, 0x70,
+			    zdet_param->ldo_ctl << 4);
+	snd_soc_update_bits(codec, WCD9335_ANA_MBHC_BTN5, 0xFC,
+			    zdet_param->btn5);
+	snd_soc_update_bits(codec, WCD9335_ANA_MBHC_BTN6, 0xFC,
+			    zdet_param->btn6);
+	snd_soc_update_bits(codec, WCD9335_ANA_MBHC_BTN7, 0xFC,
+			    zdet_param->btn7);
+	snd_soc_update_bits(codec, WCD9335_MBHC_ZDET_ANA_CTL, 0x0F,
+			    zdet_param->noff);
+	snd_soc_update_bits(codec, WCD9335_MBHC_ZDET_RAMP_CTL, 0x0F,
+			    zdet_param->nshift);
+
+	if (!zl)
+		goto z_right;
+	/* Start impedance measurement for HPH_L */
+	regmap_update_bits(wcd9xxx->regmap,
+			   WCD9335_ANA_MBHC_ZDET, 0x80, 0x80);
+	dev_dbg(wcd9xxx->dev, "%s: ramp for HPH_L, noff = %d\n",
+					__func__, zdet_param->noff);
+	tasha_mbhc_get_result_params(wcd9xxx, d1_a, zdet_param->noff, &zdet);
+	regmap_update_bits(wcd9xxx->regmap,
+			   WCD9335_ANA_MBHC_ZDET, 0x80, 0x00);
+
+	*zl = zdet;
+
+z_right:
+	if (!zr)
+		return;
+	/* Start impedance measurement for HPH_R */
+	regmap_update_bits(wcd9xxx->regmap,
+			   WCD9335_ANA_MBHC_ZDET, 0x40, 0x40);
+	dev_dbg(wcd9xxx->dev, "%s: ramp for HPH_R, noff = %d\n",
+					__func__, zdet_param->noff);
+	tasha_mbhc_get_result_params(wcd9xxx, d1_a, zdet_param->noff, &zdet);
+	regmap_update_bits(wcd9xxx->regmap,
+			   WCD9335_ANA_MBHC_ZDET, 0x40, 0x00);
+
+	*zr = zdet;
+}
+
+static inline void tasha_wcd_mbhc_qfuse_cal(struct snd_soc_codec *codec,
+					int32_t *z_val, int flag_l_r)
+{
+	s16 q1;
+	int q1_cal;
+
+	if (*z_val < (TASHA_ZDET_VAL_400/1000))
+		q1 = snd_soc_read(codec,
+			WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT1 + (2 * flag_l_r));
+	else
+		q1 = snd_soc_read(codec,
+			WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT2 + (2 * flag_l_r));
+	if (q1 & 0x80)
+		q1_cal = (10000 - ((q1 & 0x7F) * 25));
+	else
+		q1_cal = (10000 + (q1 * 25));
+	if (q1_cal > 0)
+		*z_val = ((*z_val) * 10000) / q1_cal;
+}
+
+static void tasha_wcd_mbhc_calc_impedance(struct wcd_mbhc *mbhc, uint32_t *zl,
+					  uint32_t *zr)
+{
+	struct snd_soc_codec *codec = mbhc->codec;
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	struct wcd9xxx *wcd9xxx = tasha->wcd9xxx;
+	s16 reg0, reg1, reg2, reg3, reg4;
+	int32_t z1L, z1R, z1Ls;
+	int zMono, z_diff1, z_diff2;
+	bool is_fsm_disable = false;
+	bool is_change = false;
+	struct tasha_mbhc_zdet_param zdet_param[] = {
+		{4, 0, 4, 0x08, 0x14, 0x18}, /* < 32ohm */
+		{2, 0, 3, 0x18, 0x7C, 0x90}, /* 32ohm < Z < 400ohm */
+		{1, 4, 5, 0x18, 0x7C, 0x90}, /* 400ohm < Z < 1200ohm */
+		{1, 6, 7, 0x18, 0x7C, 0x90}, /* >1200ohm */
+	};
+	struct tasha_mbhc_zdet_param *zdet_param_ptr = NULL;
+	s16 d1_a[][4] = {
+		{0, 30, 90, 30},
+		{0, 30, 30, 5},
+		{0, 30, 30, 5},
+		{0, 30, 30, 5},
+	};
+	s16 *d1 = NULL;
+
+	if (!TASHA_IS_2_0(wcd9xxx)) {
+		dev_dbg(codec->dev, "%s: Z-det is not supported for this codec version\n",
+					__func__);
+		*zl = 0;
+		*zr = 0;
+		return;
+	}
+	WCD_MBHC_RSC_ASSERT_LOCKED(mbhc);
+
+	if (tasha->zdet_gpio_cb)
+		is_change = tasha->zdet_gpio_cb(codec, true);
+
+	reg0 = snd_soc_read(codec, WCD9335_ANA_MBHC_BTN5);
+	reg1 = snd_soc_read(codec, WCD9335_ANA_MBHC_BTN6);
+	reg2 = snd_soc_read(codec, WCD9335_ANA_MBHC_BTN7);
+	reg3 = snd_soc_read(codec, WCD9335_MBHC_CTL_1);
+	reg4 = snd_soc_read(codec, WCD9335_MBHC_ZDET_ANA_CTL);
+
+	if (snd_soc_read(codec, WCD9335_ANA_MBHC_ELECT) & 0x80) {
+		is_fsm_disable = true;
+		regmap_update_bits(wcd9xxx->regmap,
+				   WCD9335_ANA_MBHC_ELECT, 0x80, 0x00);
+	}
+
+	/* For NO-jack, disable L_DET_EN before Z-det measurements */
+	if (mbhc->hphl_swh)
+		regmap_update_bits(wcd9xxx->regmap,
+				   WCD9335_ANA_MBHC_MECH, 0x80, 0x00);
+
+	/* Enable AZ */
+	snd_soc_update_bits(codec, WCD9335_MBHC_CTL_1, 0x0C, 0x04);
+	/* Turn off 100k pull down on HPHL */
+	regmap_update_bits(wcd9xxx->regmap,
+			   WCD9335_ANA_MBHC_MECH, 0x01, 0x00);
+
+	/* First get impedance on Left */
+	d1 = d1_a[1];
+	zdet_param_ptr = &zdet_param[1];
+	tasha_mbhc_zdet_ramp(codec, zdet_param_ptr, &z1L, NULL, d1);
+
+	if (!TASHA_MBHC_IS_SECOND_RAMP_REQUIRED(z1L))
+		goto left_ch_impedance;
+
+	/* second ramp for left ch */
+	if (z1L < TASHA_ZDET_VAL_32) {
+		zdet_param_ptr = &zdet_param[0];
+		d1 = d1_a[0];
+	} else if ((z1L > TASHA_ZDET_VAL_400) && (z1L <= TASHA_ZDET_VAL_1200)) {
+		zdet_param_ptr = &zdet_param[2];
+		d1 = d1_a[2];
+	} else if (z1L > TASHA_ZDET_VAL_1200) {
+		zdet_param_ptr = &zdet_param[3];
+		d1 = d1_a[3];
+	}
+	tasha_mbhc_zdet_ramp(codec, zdet_param_ptr, &z1L, NULL, d1);
+
+left_ch_impedance:
+	if ((z1L == TASHA_ZDET_FLOATING_IMPEDANCE) ||
+		(z1L > TASHA_ZDET_VAL_100K)) {
+		*zl = TASHA_ZDET_FLOATING_IMPEDANCE;
+		zdet_param_ptr = &zdet_param[1];
+		d1 = d1_a[1];
+	} else {
+		*zl = z1L/1000;
+		tasha_wcd_mbhc_qfuse_cal(codec, zl, 0);
+	}
+	dev_dbg(codec->dev, "%s: impedance on HPH_L = %d(ohms)\n",
+				__func__, *zl);
+
+	/* start of right impedance ramp and calculation */
+	tasha_mbhc_zdet_ramp(codec, zdet_param_ptr, NULL, &z1R, d1);
+	if (TASHA_MBHC_IS_SECOND_RAMP_REQUIRED(z1R)) {
+		if (((z1R > TASHA_ZDET_VAL_1200) &&
+			(zdet_param_ptr->noff == 0x6)) ||
+			((*zl) != TASHA_ZDET_FLOATING_IMPEDANCE))
+			goto right_ch_impedance;
+		/* second ramp for right ch */
+		if (z1R < TASHA_ZDET_VAL_32) {
+			zdet_param_ptr = &zdet_param[0];
+			d1 = d1_a[0];
+		} else if ((z1R > TASHA_ZDET_VAL_400) &&
+			(z1R <= TASHA_ZDET_VAL_1200)) {
+			zdet_param_ptr = &zdet_param[2];
+			d1 = d1_a[2];
+		} else if (z1R > TASHA_ZDET_VAL_1200) {
+			zdet_param_ptr = &zdet_param[3];
+			d1 = d1_a[3];
+		}
+		tasha_mbhc_zdet_ramp(codec, zdet_param_ptr, NULL, &z1R, d1);
+	}
+right_ch_impedance:
+	if ((z1R == TASHA_ZDET_FLOATING_IMPEDANCE) ||
+		(z1R > TASHA_ZDET_VAL_100K)) {
+		*zr = TASHA_ZDET_FLOATING_IMPEDANCE;
+	} else {
+		*zr = z1R/1000;
+		tasha_wcd_mbhc_qfuse_cal(codec, zr, 1);
+	}
+	dev_dbg(codec->dev, "%s: impedance on HPH_R = %d(ohms)\n",
+				__func__, *zr);
+
+	/* mono/stereo detection */
+	if ((*zl == TASHA_ZDET_FLOATING_IMPEDANCE) &&
+		(*zr == TASHA_ZDET_FLOATING_IMPEDANCE)) {
+		dev_dbg(codec->dev,
+			"%s: plug type is invalid or extension cable\n",
+			__func__);
+		goto zdet_complete;
+	}
+	if ((*zl == TASHA_ZDET_FLOATING_IMPEDANCE) ||
+	    (*zr == TASHA_ZDET_FLOATING_IMPEDANCE) ||
+	    ((*zl < WCD_MONO_HS_MIN_THR) && (*zr > WCD_MONO_HS_MIN_THR)) ||
+	    ((*zl > WCD_MONO_HS_MIN_THR) && (*zr < WCD_MONO_HS_MIN_THR))) {
+		dev_dbg(codec->dev,
+			"%s: Mono plug type with one ch floating or shorted to GND\n",
+			__func__);
+		mbhc->hph_type = WCD_MBHC_HPH_MONO;
+		goto zdet_complete;
+	}
+	snd_soc_update_bits(codec, WCD9335_HPH_R_ATEST, 0x02, 0x02);
+	snd_soc_update_bits(codec, WCD9335_HPH_PA_CTL2, 0x40, 0x01);
+	if (*zl < (TASHA_ZDET_VAL_32/1000))
+		tasha_mbhc_zdet_ramp(codec, &zdet_param[0], &z1Ls, NULL, d1);
+	else
+		tasha_mbhc_zdet_ramp(codec, &zdet_param[1], &z1Ls, NULL, d1);
+	snd_soc_update_bits(codec, WCD9335_HPH_PA_CTL2, 0x40, 0x00);
+	snd_soc_update_bits(codec, WCD9335_HPH_R_ATEST, 0x02, 0x00);
+	z1Ls /= 1000;
+	tasha_wcd_mbhc_qfuse_cal(codec, &z1Ls, 0);
+	/* parallel of left Z and 9 ohm pull down resistor */
+	zMono = ((*zl) * 9) / ((*zl) + 9);
+	z_diff1 = (z1Ls > zMono) ? (z1Ls - zMono) : (zMono - z1Ls);
+	z_diff2 = ((*zl) > z1Ls) ? ((*zl) - z1Ls) : (z1Ls - (*zl));
+	if ((z_diff1 * (*zl + z1Ls)) > (z_diff2 * (z1Ls + zMono))) {
+		dev_dbg(codec->dev, "%s: stereo plug type detected\n",
+				__func__);
+		mbhc->hph_type = WCD_MBHC_HPH_STEREO;
+	} else {
+		dev_dbg(codec->dev, "%s: MONO plug type detected\n",
+			 __func__);
+		mbhc->hph_type = WCD_MBHC_HPH_MONO;
+	}
+
+zdet_complete:
+	snd_soc_write(codec, WCD9335_ANA_MBHC_BTN5, reg0);
+	snd_soc_write(codec, WCD9335_ANA_MBHC_BTN6, reg1);
+	snd_soc_write(codec, WCD9335_ANA_MBHC_BTN7, reg2);
+	/* Turn on 100k pull down on HPHL */
+	regmap_update_bits(wcd9xxx->regmap,
+			   WCD9335_ANA_MBHC_MECH, 0x01, 0x01);
+
+	/* For NO-jack, re-enable L_DET_EN after Z-det measurements */
+	if (mbhc->hphl_swh)
+		regmap_update_bits(wcd9xxx->regmap,
+				   WCD9335_ANA_MBHC_MECH, 0x80, 0x80);
+
+	snd_soc_write(codec, WCD9335_MBHC_ZDET_ANA_CTL, reg4);
+	snd_soc_write(codec, WCD9335_MBHC_CTL_1, reg3);
+	if (is_fsm_disable)
+		regmap_update_bits(wcd9xxx->regmap,
+				   WCD9335_ANA_MBHC_ELECT, 0x80, 0x80);
+	if (tasha->zdet_gpio_cb && is_change)
+		tasha->zdet_gpio_cb(codec, false);
+}
+
+static void tasha_mbhc_gnd_det_ctrl(struct snd_soc_codec *codec, bool enable)
+{
+	if (enable) {
+		snd_soc_update_bits(codec, WCD9335_ANA_MBHC_MECH,
+				    0x02, 0x02);
+		snd_soc_update_bits(codec, WCD9335_ANA_MBHC_MECH,
+				    0x40, 0x40);
+	} else {
+		snd_soc_update_bits(codec, WCD9335_ANA_MBHC_MECH,
+				    0x40, 0x00);
+		snd_soc_update_bits(codec, WCD9335_ANA_MBHC_MECH,
+				    0x02, 0x00);
+	}
+}
+
+static void tasha_mbhc_hph_pull_down_ctrl(struct snd_soc_codec *codec,
+					  bool enable)
+{
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	if (enable) {
+		snd_soc_update_bits(codec, WCD9335_HPH_PA_CTL2,
+				    0x40, 0x40);
+		if (TASHA_IS_2_0(tasha->wcd9xxx))
+			snd_soc_update_bits(codec, WCD9335_HPH_PA_CTL2,
+					    0x10, 0x10);
+	} else {
+		snd_soc_update_bits(codec, WCD9335_HPH_PA_CTL2,
+				    0x40, 0x00);
+		if (TASHA_IS_2_0(tasha->wcd9xxx))
+			snd_soc_update_bits(codec, WCD9335_HPH_PA_CTL2,
+					    0x10, 0x00);
+	}
+}
+
+static void tasha_mbhc_moisture_config(struct wcd_mbhc *mbhc)
+{
+	struct snd_soc_codec *codec = mbhc->codec;
+
+	if (mbhc->moist_vref == V_OFF)
+		return;
+
+	/* Donot enable moisture detection if jack type is NC */
+	if (!mbhc->hphl_swh) {
+		dev_dbg(codec->dev, "%s: disable moisture detection for NC\n",
+			__func__);
+		return;
+	}
+
+	snd_soc_update_bits(codec, WCD9335_MBHC_CTL_2,
+			    0x0C, mbhc->moist_vref << 2);
+	tasha_mbhc_hph_l_pull_up_control(codec, mbhc->moist_iref);
+}
+
+static void tasha_update_anc_state(struct snd_soc_codec *codec, bool enable,
+				   int anc_num)
+{
+	if (enable)
+		snd_soc_update_bits(codec, WCD9335_CDC_RX1_RX_PATH_CFG0 +
+				(20 * anc_num), 0x10, 0x10);
+	else
+		snd_soc_update_bits(codec, WCD9335_CDC_RX1_RX_PATH_CFG0 +
+				(20 * anc_num), 0x10, 0x00);
+}
+
+static bool tasha_is_anc_on(struct wcd_mbhc *mbhc)
+{
+	bool anc_on = false;
+	u16 ancl, ancr;
+
+	ancl =
+	(snd_soc_read(mbhc->codec, WCD9335_CDC_RX1_RX_PATH_CFG0)) & 0x10;
+	ancr =
+	(snd_soc_read(mbhc->codec, WCD9335_CDC_RX2_RX_PATH_CFG0)) & 0x10;
+
+	anc_on = !!(ancl | ancr);
+
+	return anc_on;
+}
+
+static const struct wcd_mbhc_cb mbhc_cb = {
+	.request_irq = tasha_mbhc_request_irq,
+	.irq_control = tasha_mbhc_irq_control,
+	.free_irq = tasha_mbhc_free_irq,
+	.clk_setup = tasha_mbhc_clk_setup,
+	.map_btn_code_to_num = tasha_mbhc_btn_to_num,
+	.enable_mb_source = tasha_enable_ext_mb_source,
+	.mbhc_bias = tasha_mbhc_mbhc_bias_control,
+	.set_btn_thr = tasha_mbhc_program_btn_thr,
+	.lock_sleep = tasha_mbhc_lock_sleep,
+	.register_notifier = tasha_mbhc_register_notifier,
+	.micbias_enable_status = tasha_mbhc_micb_en_status,
+	.hph_pa_on_status = tasha_mbhc_hph_pa_on_status,
+	.hph_pull_up_control = tasha_mbhc_hph_l_pull_up_control,
+	.mbhc_micbias_control = tasha_mbhc_request_micbias,
+	.mbhc_micb_ramp_control = tasha_mbhc_micb_ramp_control,
+	.get_hwdep_fw_cal = tasha_get_hwdep_fw_cal,
+	.mbhc_micb_ctrl_thr_mic = tasha_mbhc_micb_ctrl_threshold_mic,
+	.compute_impedance = tasha_wcd_mbhc_calc_impedance,
+	.mbhc_gnd_det_ctrl = tasha_mbhc_gnd_det_ctrl,
+	.hph_pull_down_ctrl = tasha_mbhc_hph_pull_down_ctrl,
+	.mbhc_moisture_config = tasha_mbhc_moisture_config,
+	.update_anc_state = tasha_update_anc_state,
+	.is_anc_on = tasha_is_anc_on,
+};
+
+static int tasha_get_anc_slot(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.integer.value[0] = tasha->anc_slot;
+	return 0;
+}
+
+static int tasha_put_anc_slot(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	tasha->anc_slot = ucontrol->value.integer.value[0];
+	return 0;
+}
+
+static int tasha_get_anc_func(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.integer.value[0] = (tasha->anc_func == true ? 1 : 0);
+	return 0;
+}
+
+static int tasha_put_anc_func(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+
+	mutex_lock(&tasha->codec_mutex);
+	tasha->anc_func = (!ucontrol->value.integer.value[0] ? false : true);
+
+	dev_dbg(codec->dev, "%s: anc_func %x", __func__, tasha->anc_func);
+
+	if (tasha->anc_func == true) {
+		snd_soc_dapm_enable_pin(dapm, "ANC LINEOUT2 PA");
+		snd_soc_dapm_enable_pin(dapm, "ANC LINEOUT2");
+		snd_soc_dapm_enable_pin(dapm, "ANC LINEOUT1 PA");
+		snd_soc_dapm_enable_pin(dapm, "ANC LINEOUT1");
+		snd_soc_dapm_enable_pin(dapm, "ANC HPHR PA");
+		snd_soc_dapm_enable_pin(dapm, "ANC HPHR");
+		snd_soc_dapm_enable_pin(dapm, "ANC HPHL PA");
+		snd_soc_dapm_enable_pin(dapm, "ANC HPHL");
+		snd_soc_dapm_enable_pin(dapm, "ANC EAR PA");
+		snd_soc_dapm_enable_pin(dapm, "ANC EAR");
+		snd_soc_dapm_enable_pin(dapm, "ANC SPK1 PA");
+		snd_soc_dapm_disable_pin(dapm, "LINEOUT2");
+		snd_soc_dapm_disable_pin(dapm, "LINEOUT2 PA");
+		snd_soc_dapm_disable_pin(dapm, "LINEOUT1");
+		snd_soc_dapm_disable_pin(dapm, "LINEOUT1 PA");
+		snd_soc_dapm_disable_pin(dapm, "HPHR");
+		snd_soc_dapm_disable_pin(dapm, "HPHL");
+		snd_soc_dapm_disable_pin(dapm, "HPHR PA");
+		snd_soc_dapm_disable_pin(dapm, "HPHL PA");
+		snd_soc_dapm_disable_pin(dapm, "EAR PA");
+		snd_soc_dapm_disable_pin(dapm, "EAR");
+	} else {
+		snd_soc_dapm_disable_pin(dapm, "ANC LINEOUT2 PA");
+		snd_soc_dapm_disable_pin(dapm, "ANC LINEOUT2");
+		snd_soc_dapm_disable_pin(dapm, "ANC LINEOUT1 PA");
+		snd_soc_dapm_disable_pin(dapm, "ANC LINEOUT1");
+		snd_soc_dapm_disable_pin(dapm, "ANC HPHR");
+		snd_soc_dapm_disable_pin(dapm, "ANC HPHL");
+		snd_soc_dapm_disable_pin(dapm, "ANC HPHR PA");
+		snd_soc_dapm_disable_pin(dapm, "ANC HPHL PA");
+		snd_soc_dapm_disable_pin(dapm, "ANC EAR PA");
+		snd_soc_dapm_disable_pin(dapm, "ANC EAR");
+		snd_soc_dapm_disable_pin(dapm, "ANC SPK1 PA");
+		snd_soc_dapm_enable_pin(dapm, "LINEOUT2");
+		snd_soc_dapm_enable_pin(dapm, "LINEOUT2 PA");
+		snd_soc_dapm_enable_pin(dapm, "LINEOUT1");
+		snd_soc_dapm_enable_pin(dapm, "LINEOUT1 PA");
+		snd_soc_dapm_enable_pin(dapm, "HPHR");
+		snd_soc_dapm_enable_pin(dapm, "HPHL");
+		snd_soc_dapm_enable_pin(dapm, "HPHR PA");
+		snd_soc_dapm_enable_pin(dapm, "HPHL PA");
+		snd_soc_dapm_enable_pin(dapm, "EAR PA");
+		snd_soc_dapm_enable_pin(dapm, "EAR");
+	}
+	mutex_unlock(&tasha->codec_mutex);
+	snd_soc_dapm_sync(dapm);
+	return 0;
+}
+
+static int tasha_get_clkmode(struct snd_kcontrol *kcontrol,
+			    struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.enumerated.item[0] = tasha->clk_mode;
+	dev_dbg(codec->dev, "%s: clk_mode: %d\n", __func__, tasha->clk_mode);
+
+	return 0;
+}
+
+static int tasha_put_clkmode(struct snd_kcontrol *kcontrol,
+			    struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	tasha->clk_mode = ucontrol->value.enumerated.item[0];
+	dev_dbg(codec->dev, "%s: clk_mode: %d\n", __func__, tasha->clk_mode);
+
+	return 0;
+}
+
+static int tasha_get_iir_enable_audio_mixer(
+					struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	int iir_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->reg;
+	int band_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->shift;
+	/* IIR filter band registers are at integer multiples of 16 */
+	u16 iir_reg = WCD9335_CDC_SIDETONE_IIR0_IIR_CTL + 16 * iir_idx;
+
+	ucontrol->value.integer.value[0] = (snd_soc_read(codec, iir_reg) &
+					    (1 << band_idx)) != 0;
+
+	dev_dbg(codec->dev, "%s: IIR #%d band #%d enable %d\n", __func__,
+		iir_idx, band_idx,
+		(uint32_t)ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int tasha_hph_impedance_get(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	uint32_t zl, zr;
+	bool hphr;
+	struct soc_multi_mixer_control *mc;
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tasha_priv *priv = snd_soc_codec_get_drvdata(codec);
+
+	mc = (struct soc_multi_mixer_control *)(kcontrol->private_value);
+	hphr = mc->shift;
+	wcd_mbhc_get_impedance(&priv->mbhc, &zl, &zr);
+	dev_dbg(codec->dev, "%s: zl=%u(ohms), zr=%u(ohms)\n", __func__, zl, zr);
+	ucontrol->value.integer.value[0] = hphr ? zr : zl;
+
+	return 0;
+}
+
+static const struct snd_kcontrol_new impedance_detect_controls[] = {
+	SOC_SINGLE_EXT("HPHL Impedance", 0, 0, UINT_MAX, 0,
+		       tasha_hph_impedance_get, NULL),
+	SOC_SINGLE_EXT("HPHR Impedance", 0, 1, UINT_MAX, 0,
+		       tasha_hph_impedance_get, NULL),
+};
+
+static int tasha_get_hph_type(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tasha_priv *priv = snd_soc_codec_get_drvdata(codec);
+	struct wcd_mbhc *mbhc;
+
+	if (!priv) {
+		dev_dbg(codec->dev, "%s: wcd9335 private data is NULL\n",
+				__func__);
+		return 0;
+	}
+
+	mbhc = &priv->mbhc;
+	if (!mbhc) {
+		dev_dbg(codec->dev, "%s: mbhc not initialized\n", __func__);
+		return 0;
+	}
+
+	ucontrol->value.integer.value[0] = (u32) mbhc->hph_type;
+	dev_dbg(codec->dev, "%s: hph_type = %u\n", __func__, mbhc->hph_type);
+
+	return 0;
+}
+
+static const struct snd_kcontrol_new hph_type_detect_controls[] = {
+	SOC_SINGLE_EXT("HPH Type", 0, 0, UINT_MAX, 0,
+		       tasha_get_hph_type, NULL),
+};
+
+static int tasha_vi_feed_mixer_get(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
+	struct tasha_priv *tasha_p = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.integer.value[0] = tasha_p->vi_feed_value;
+
+	return 0;
+}
+
+static int tasha_vi_feed_mixer_put(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
+	struct tasha_priv *tasha_p = snd_soc_codec_get_drvdata(codec);
+	struct wcd9xxx *core = tasha_p->wcd9xxx;
+	struct soc_multi_mixer_control *mixer =
+		((struct soc_multi_mixer_control *)kcontrol->private_value);
+	u32 dai_id = widget->shift;
+	u32 port_id = mixer->shift;
+	u32 enable = ucontrol->value.integer.value[0];
+
+	dev_dbg(codec->dev, "%s: enable: %d, port_id:%d, dai_id: %d\n",
+		__func__, enable, port_id, dai_id);
+
+	tasha_p->vi_feed_value = ucontrol->value.integer.value[0];
+
+	mutex_lock(&tasha_p->codec_mutex);
+	if (enable) {
+		if (port_id == TASHA_TX14 && !test_bit(VI_SENSE_1,
+						&tasha_p->status_mask)) {
+			list_add_tail(&core->tx_chs[TASHA_TX14].list,
+					&tasha_p->dai[dai_id].wcd9xxx_ch_list);
+			set_bit(VI_SENSE_1, &tasha_p->status_mask);
+		}
+		if (port_id == TASHA_TX15 && !test_bit(VI_SENSE_2,
+						&tasha_p->status_mask)) {
+			list_add_tail(&core->tx_chs[TASHA_TX15].list,
+					&tasha_p->dai[dai_id].wcd9xxx_ch_list);
+			set_bit(VI_SENSE_2, &tasha_p->status_mask);
+		}
+	} else {
+		if (port_id == TASHA_TX14 && test_bit(VI_SENSE_1,
+					&tasha_p->status_mask)) {
+			list_del_init(&core->tx_chs[TASHA_TX14].list);
+			clear_bit(VI_SENSE_1, &tasha_p->status_mask);
+		}
+		if (port_id == TASHA_TX15 && test_bit(VI_SENSE_2,
+					&tasha_p->status_mask)) {
+			list_del_init(&core->tx_chs[TASHA_TX15].list);
+			clear_bit(VI_SENSE_2, &tasha_p->status_mask);
+		}
+	}
+	mutex_unlock(&tasha_p->codec_mutex);
+	snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, enable, NULL);
+
+	return 0;
+}
+
+/* virtual port entries */
+static int slim_tx_mixer_get(struct snd_kcontrol *kcontrol,
+			     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
+	struct tasha_priv *tasha_p = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.integer.value[0] = tasha_p->tx_port_value;
+	return 0;
+}
+
+static int slim_tx_mixer_put(struct snd_kcontrol *kcontrol,
+			     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
+	struct tasha_priv *tasha_p = snd_soc_codec_get_drvdata(codec);
+	struct wcd9xxx *core = dev_get_drvdata(codec->dev->parent);
+	struct snd_soc_dapm_update *update = NULL;
+	struct soc_multi_mixer_control *mixer =
+		((struct soc_multi_mixer_control *)kcontrol->private_value);
+	u32 dai_id = widget->shift;
+	u32 port_id = mixer->shift;
+	u32 enable = ucontrol->value.integer.value[0];
+	u32 vtable;
+
+
+	dev_dbg(codec->dev, "%s: wname %s cname %s value %u shift %d item %ld\n",
+		  __func__,
+		widget->name, ucontrol->id.name, tasha_p->tx_port_value,
+		widget->shift, ucontrol->value.integer.value[0]);
+
+	mutex_lock(&tasha_p->codec_mutex);
+
+	if (tasha_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
+		if (dai_id != AIF1_CAP) {
+			dev_err(codec->dev, "%s: invalid AIF for I2C mode\n",
+				__func__);
+			mutex_unlock(&tasha_p->codec_mutex);
+			return -EINVAL;
+		}
+		vtable = vport_slim_check_table[dai_id];
+	} else {
+		if (dai_id >= ARRAY_SIZE(vport_i2s_check_table)) {
+			dev_err(codec->dev, "%s: dai_id: %d, out of bounds\n",
+				__func__, dai_id);
+			return -EINVAL;
+		}
+		vtable = vport_i2s_check_table[dai_id];
+	}
+	switch (dai_id) {
+	case AIF1_CAP:
+	case AIF2_CAP:
+	case AIF3_CAP:
+		/* only add to the list if value not set */
+		if (enable && !(tasha_p->tx_port_value & 1 << port_id)) {
+
+			if (wcd9xxx_tx_vport_validation(vtable, port_id,
+					tasha_p->dai, NUM_CODEC_DAIS)) {
+				dev_dbg(codec->dev, "%s: TX%u is used by other virtual port\n",
+					__func__, port_id);
+				mutex_unlock(&tasha_p->codec_mutex);
+				return 0;
+			}
+			tasha_p->tx_port_value |= 1 << port_id;
+			list_add_tail(&core->tx_chs[port_id].list,
+			      &tasha_p->dai[dai_id].wcd9xxx_ch_list
+					      );
+		} else if (!enable && (tasha_p->tx_port_value &
+					1 << port_id)) {
+			tasha_p->tx_port_value &= ~(1 << port_id);
+			list_del_init(&core->tx_chs[port_id].list);
+		} else {
+			if (enable)
+				dev_dbg(codec->dev, "%s: TX%u port is used by\n"
+					"this virtual port\n",
+					__func__, port_id);
+			else
+				dev_dbg(codec->dev, "%s: TX%u port is not used by\n"
+					"this virtual port\n",
+					__func__, port_id);
+			/* avoid update power function */
+			mutex_unlock(&tasha_p->codec_mutex);
+			return 0;
+		}
+		break;
+	case AIF4_MAD_TX:
+	case AIF5_CPE_TX:
+		break;
+	default:
+		pr_err("Unknown AIF %d\n", dai_id);
+		mutex_unlock(&tasha_p->codec_mutex);
+		return -EINVAL;
+	}
+	pr_debug("%s: name %s sname %s updated value %u shift %d\n", __func__,
+		widget->name, widget->sname, tasha_p->tx_port_value,
+		widget->shift);
+
+	mutex_unlock(&tasha_p->codec_mutex);
+	snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, enable, update);
+
+	return 0;
+}
+
+static int slim_rx_mux_get(struct snd_kcontrol *kcontrol,
+			   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
+	struct tasha_priv *tasha_p = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.enumerated.item[0] =
+			tasha_p->rx_port_value[widget->shift];
+	return 0;
+}
+
+static const char *const slim_rx_mux_text[] = {
+	"ZERO", "AIF1_PB", "AIF2_PB", "AIF3_PB", "AIF4_PB", "AIF_MIX1_PB"
+};
+
+static int slim_rx_mux_put(struct snd_kcontrol *kcontrol,
+			   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
+	struct tasha_priv *tasha_p = snd_soc_codec_get_drvdata(codec);
+	struct wcd9xxx *core = dev_get_drvdata(codec->dev->parent);
+	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+	struct snd_soc_dapm_update *update = NULL;
+	unsigned int rx_port_value;
+	u32 port_id = widget->shift;
+
+	tasha_p->rx_port_value[port_id] = ucontrol->value.enumerated.item[0];
+	rx_port_value = tasha_p->rx_port_value[port_id];
+
+	pr_debug("%s: wname %s cname %s value %u shift %d item %ld\n", __func__,
+		widget->name, ucontrol->id.name, rx_port_value,
+		widget->shift, ucontrol->value.integer.value[0]);
+
+	mutex_lock(&tasha_p->codec_mutex);
+
+	if (tasha_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
+		if (rx_port_value > 2) {
+			dev_err(codec->dev, "%s: invalid AIF for I2C mode\n",
+				__func__);
+			goto err;
+		}
+	}
+	/* value need to match the Virtual port and AIF number */
+	switch (rx_port_value) {
+	case 0:
+		list_del_init(&core->rx_chs[port_id].list);
+		break;
+	case 1:
+		if (wcd9xxx_rx_vport_validation(port_id +
+			TASHA_RX_PORT_START_NUMBER,
+			&tasha_p->dai[AIF1_PB].wcd9xxx_ch_list)) {
+			dev_dbg(codec->dev, "%s: RX%u is used by current requesting AIF_PB itself\n",
+				__func__, port_id);
+			goto rtn;
+		}
+		list_add_tail(&core->rx_chs[port_id].list,
+			      &tasha_p->dai[AIF1_PB].wcd9xxx_ch_list);
+		break;
+	case 2:
+		if (wcd9xxx_rx_vport_validation(port_id +
+			TASHA_RX_PORT_START_NUMBER,
+			&tasha_p->dai[AIF2_PB].wcd9xxx_ch_list)) {
+			dev_dbg(codec->dev, "%s: RX%u is used by current requesting AIF_PB itself\n",
+				__func__, port_id);
+			goto rtn;
+		}
+		list_add_tail(&core->rx_chs[port_id].list,
+			      &tasha_p->dai[AIF2_PB].wcd9xxx_ch_list);
+		break;
+	case 3:
+		if (wcd9xxx_rx_vport_validation(port_id +
+			TASHA_RX_PORT_START_NUMBER,
+			&tasha_p->dai[AIF3_PB].wcd9xxx_ch_list)) {
+			dev_dbg(codec->dev, "%s: RX%u is used by current requesting AIF_PB itself\n",
+				__func__, port_id);
+			goto rtn;
+		}
+		list_add_tail(&core->rx_chs[port_id].list,
+			      &tasha_p->dai[AIF3_PB].wcd9xxx_ch_list);
+		break;
+	case 4:
+		if (wcd9xxx_rx_vport_validation(port_id +
+			TASHA_RX_PORT_START_NUMBER,
+			&tasha_p->dai[AIF4_PB].wcd9xxx_ch_list)) {
+			dev_dbg(codec->dev, "%s: RX%u is used by current requesting AIF_PB itself\n",
+				__func__, port_id);
+			goto rtn;
+		}
+		list_add_tail(&core->rx_chs[port_id].list,
+			      &tasha_p->dai[AIF4_PB].wcd9xxx_ch_list);
+		break;
+	case 5:
+		if (wcd9xxx_rx_vport_validation(port_id +
+			TASHA_RX_PORT_START_NUMBER,
+			&tasha_p->dai[AIF_MIX1_PB].wcd9xxx_ch_list)) {
+			dev_dbg(codec->dev, "%s: RX%u is used by current requesting AIF_PB itself\n",
+				__func__, port_id);
+			goto rtn;
+		}
+		list_add_tail(&core->rx_chs[port_id].list,
+			      &tasha_p->dai[AIF_MIX1_PB].wcd9xxx_ch_list);
+		break;
+	default:
+		pr_err("Unknown AIF %d\n", rx_port_value);
+		goto err;
+	}
+rtn:
+	mutex_unlock(&tasha_p->codec_mutex);
+	snd_soc_dapm_mux_update_power(widget->dapm, kcontrol,
+					rx_port_value, e, update);
+
+	return 0;
+err:
+	mutex_unlock(&tasha_p->codec_mutex);
+	return -EINVAL;
+}
+
+static const struct soc_enum slim_rx_mux_enum =
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(slim_rx_mux_text), slim_rx_mux_text);
+
+static const struct snd_kcontrol_new slim_rx_mux[TASHA_RX_MAX] = {
+	SOC_DAPM_ENUM_EXT("SLIM RX0 Mux", slim_rx_mux_enum,
+			  slim_rx_mux_get, slim_rx_mux_put),
+	SOC_DAPM_ENUM_EXT("SLIM RX1 Mux", slim_rx_mux_enum,
+			  slim_rx_mux_get, slim_rx_mux_put),
+	SOC_DAPM_ENUM_EXT("SLIM RX2 Mux", slim_rx_mux_enum,
+			  slim_rx_mux_get, slim_rx_mux_put),
+	SOC_DAPM_ENUM_EXT("SLIM RX3 Mux", slim_rx_mux_enum,
+			  slim_rx_mux_get, slim_rx_mux_put),
+	SOC_DAPM_ENUM_EXT("SLIM RX4 Mux", slim_rx_mux_enum,
+			  slim_rx_mux_get, slim_rx_mux_put),
+	SOC_DAPM_ENUM_EXT("SLIM RX5 Mux", slim_rx_mux_enum,
+			  slim_rx_mux_get, slim_rx_mux_put),
+	SOC_DAPM_ENUM_EXT("SLIM RX6 Mux", slim_rx_mux_enum,
+			  slim_rx_mux_get, slim_rx_mux_put),
+	SOC_DAPM_ENUM_EXT("SLIM RX7 Mux", slim_rx_mux_enum,
+			  slim_rx_mux_get, slim_rx_mux_put),
+};
+
+static const struct snd_kcontrol_new aif4_vi_mixer[] = {
+	SOC_SINGLE_EXT("SPKR_VI_1", SND_SOC_NOPM, TASHA_TX14, 1, 0,
+			tasha_vi_feed_mixer_get, tasha_vi_feed_mixer_put),
+	SOC_SINGLE_EXT("SPKR_VI_2", SND_SOC_NOPM, TASHA_TX15, 1, 0,
+			tasha_vi_feed_mixer_get, tasha_vi_feed_mixer_put),
+};
+
+static const struct snd_kcontrol_new aif1_cap_mixer[] = {
+	SOC_SINGLE_EXT("SLIM TX0", SND_SOC_NOPM, TASHA_TX0, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX1", SND_SOC_NOPM, TASHA_TX1, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX2", SND_SOC_NOPM, TASHA_TX2, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX3", SND_SOC_NOPM, TASHA_TX3, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX4", SND_SOC_NOPM, TASHA_TX4, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX5", SND_SOC_NOPM, TASHA_TX5, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX6", SND_SOC_NOPM, TASHA_TX6, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX7", SND_SOC_NOPM, TASHA_TX7, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX8", SND_SOC_NOPM, TASHA_TX8, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX9", SND_SOC_NOPM, TASHA_TX9, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX10", SND_SOC_NOPM, TASHA_TX10, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX11", SND_SOC_NOPM, TASHA_TX11, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX13", SND_SOC_NOPM, TASHA_TX13, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+};
+
+static const struct snd_kcontrol_new aif2_cap_mixer[] = {
+	SOC_SINGLE_EXT("SLIM TX0", SND_SOC_NOPM, TASHA_TX0, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX1", SND_SOC_NOPM, TASHA_TX1, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX2", SND_SOC_NOPM, TASHA_TX2, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX3", SND_SOC_NOPM, TASHA_TX3, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX4", SND_SOC_NOPM, TASHA_TX4, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX5", SND_SOC_NOPM, TASHA_TX5, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX6", SND_SOC_NOPM, TASHA_TX6, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX7", SND_SOC_NOPM, TASHA_TX7, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX8", SND_SOC_NOPM, TASHA_TX8, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX9", SND_SOC_NOPM, TASHA_TX9, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX10", SND_SOC_NOPM, TASHA_TX10, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX11", SND_SOC_NOPM, TASHA_TX11, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX13", SND_SOC_NOPM, TASHA_TX13, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+};
+
+static const struct snd_kcontrol_new aif3_cap_mixer[] = {
+	SOC_SINGLE_EXT("SLIM TX0", SND_SOC_NOPM, TASHA_TX0, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX1", SND_SOC_NOPM, TASHA_TX1, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX2", SND_SOC_NOPM, TASHA_TX2, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX3", SND_SOC_NOPM, TASHA_TX3, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX4", SND_SOC_NOPM, TASHA_TX4, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX5", SND_SOC_NOPM, TASHA_TX5, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX6", SND_SOC_NOPM, TASHA_TX6, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX7", SND_SOC_NOPM, TASHA_TX7, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX8", SND_SOC_NOPM, TASHA_TX8, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX9", SND_SOC_NOPM, TASHA_TX9, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX10", SND_SOC_NOPM, TASHA_TX10, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX11", SND_SOC_NOPM, TASHA_TX11, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX13", SND_SOC_NOPM, TASHA_TX13, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+};
+
+static const struct snd_kcontrol_new aif4_mad_mixer[] = {
+	SOC_SINGLE_EXT("SLIM TX12", SND_SOC_NOPM, TASHA_TX12, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX13", SND_SOC_NOPM, TASHA_TX13, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX1", SND_SOC_NOPM, 0, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+
+};
+
+static const struct snd_kcontrol_new rx_int1_spline_mix_switch[] = {
+	SOC_DAPM_SINGLE("HPHL Switch", SND_SOC_NOPM, 0, 1, 0)
+};
+
+static const struct snd_kcontrol_new rx_int2_spline_mix_switch[] = {
+	SOC_DAPM_SINGLE("HPHR Switch", SND_SOC_NOPM, 0, 1, 0)
+};
+
+static const struct snd_kcontrol_new rx_int3_spline_mix_switch[] = {
+	SOC_DAPM_SINGLE("LO1 Switch", SND_SOC_NOPM, 0, 1, 0)
+};
+
+static const struct snd_kcontrol_new rx_int4_spline_mix_switch[] = {
+	SOC_DAPM_SINGLE("LO2 Switch", SND_SOC_NOPM, 0, 1, 0)
+};
+
+static const struct snd_kcontrol_new rx_int5_spline_mix_switch[] = {
+	SOC_DAPM_SINGLE("LO3 Switch", SND_SOC_NOPM, 0, 1, 0)
+};
+
+static const struct snd_kcontrol_new rx_int6_spline_mix_switch[] = {
+	SOC_DAPM_SINGLE("LO4 Switch", SND_SOC_NOPM, 0, 1, 0)
+};
+
+static const struct snd_kcontrol_new rx_int7_spline_mix_switch[] = {
+	SOC_DAPM_SINGLE("SPKRL Switch", SND_SOC_NOPM, 0, 1, 0)
+};
+
+static const struct snd_kcontrol_new rx_int8_spline_mix_switch[] = {
+	SOC_DAPM_SINGLE("SPKRR Switch", SND_SOC_NOPM, 0, 1, 0)
+};
+
+static const struct snd_kcontrol_new rx_int5_vbat_mix_switch[] = {
+	SOC_DAPM_SINGLE("LO3 VBAT Enable", SND_SOC_NOPM, 0, 1, 0)
+};
+
+static const struct snd_kcontrol_new rx_int6_vbat_mix_switch[] = {
+	SOC_DAPM_SINGLE("LO4 VBAT Enable", SND_SOC_NOPM, 0, 1, 0)
+};
+
+static const struct snd_kcontrol_new rx_int7_vbat_mix_switch[] = {
+	SOC_DAPM_SINGLE("SPKRL VBAT Enable", SND_SOC_NOPM, 0, 1, 0)
+};
+
+static const struct snd_kcontrol_new rx_int8_vbat_mix_switch[] = {
+	SOC_DAPM_SINGLE("SPKRR VBAT Enable", SND_SOC_NOPM, 0, 1, 0)
+};
+
+static const struct snd_kcontrol_new cpe_in_mix_switch[] = {
+	SOC_DAPM_SINGLE("MAD_BYPASS", SND_SOC_NOPM, 0, 1, 0)
+};
+
+
+
+static int tasha_put_iir_enable_audio_mixer(
+					struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	int iir_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->reg;
+	int band_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->shift;
+	bool iir_band_en_status;
+	int value = ucontrol->value.integer.value[0];
+	u16 iir_reg = WCD9335_CDC_SIDETONE_IIR0_IIR_CTL + 16 * iir_idx;
+
+	/* Mask first 5 bits, 6-8 are reserved */
+	snd_soc_update_bits(codec, iir_reg, (1 << band_idx),
+			    (value << band_idx));
+
+	iir_band_en_status = ((snd_soc_read(codec, iir_reg) &
+			      (1 << band_idx)) != 0);
+	pr_debug("%s: IIR #%d band #%d enable %d\n", __func__,
+		iir_idx, band_idx, iir_band_en_status);
+	return 0;
+}
+
+static uint32_t get_iir_band_coeff(struct snd_soc_codec *codec,
+				int iir_idx, int band_idx,
+				int coeff_idx)
+{
+	uint32_t value = 0;
+
+	/* Address does not automatically update if reading */
+	snd_soc_write(codec,
+		(WCD9335_CDC_SIDETONE_IIR0_IIR_COEF_B1_CTL + 16 * iir_idx),
+		((band_idx * BAND_MAX + coeff_idx)
+		* sizeof(uint32_t)) & 0x7F);
+
+	value |= snd_soc_read(codec,
+		(WCD9335_CDC_SIDETONE_IIR0_IIR_COEF_B2_CTL + 16 * iir_idx));
+
+	snd_soc_write(codec,
+		(WCD9335_CDC_SIDETONE_IIR0_IIR_COEF_B1_CTL + 16 * iir_idx),
+		((band_idx * BAND_MAX + coeff_idx)
+		* sizeof(uint32_t) + 1) & 0x7F);
+
+	value |= (snd_soc_read(codec,
+			       (WCD9335_CDC_SIDETONE_IIR0_IIR_COEF_B2_CTL +
+				16 * iir_idx)) << 8);
+
+	snd_soc_write(codec,
+		(WCD9335_CDC_SIDETONE_IIR0_IIR_COEF_B1_CTL + 16 * iir_idx),
+		((band_idx * BAND_MAX + coeff_idx)
+		* sizeof(uint32_t) + 2) & 0x7F);
+
+	value |= (snd_soc_read(codec,
+			       (WCD9335_CDC_SIDETONE_IIR0_IIR_COEF_B2_CTL +
+				16 * iir_idx)) << 16);
+
+	snd_soc_write(codec,
+		(WCD9335_CDC_SIDETONE_IIR0_IIR_COEF_B1_CTL + 16 * iir_idx),
+		((band_idx * BAND_MAX + coeff_idx)
+		* sizeof(uint32_t) + 3) & 0x7F);
+
+	/* Mask bits top 2 bits since they are reserved */
+	value |= ((snd_soc_read(codec,
+				(WCD9335_CDC_SIDETONE_IIR0_IIR_COEF_B2_CTL +
+				 16 * iir_idx)) & 0x3F) << 24);
+
+	return value;
+}
+
+static int tasha_get_iir_band_audio_mixer(
+					struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	int iir_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->reg;
+	int band_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->shift;
+
+	ucontrol->value.integer.value[0] =
+		get_iir_band_coeff(codec, iir_idx, band_idx, 0);
+	ucontrol->value.integer.value[1] =
+		get_iir_band_coeff(codec, iir_idx, band_idx, 1);
+	ucontrol->value.integer.value[2] =
+		get_iir_band_coeff(codec, iir_idx, band_idx, 2);
+	ucontrol->value.integer.value[3] =
+		get_iir_band_coeff(codec, iir_idx, band_idx, 3);
+	ucontrol->value.integer.value[4] =
+		get_iir_band_coeff(codec, iir_idx, band_idx, 4);
+
+	pr_debug("%s: IIR #%d band #%d b0 = 0x%x\n"
+		"%s: IIR #%d band #%d b1 = 0x%x\n"
+		"%s: IIR #%d band #%d b2 = 0x%x\n"
+		"%s: IIR #%d band #%d a1 = 0x%x\n"
+		"%s: IIR #%d band #%d a2 = 0x%x\n",
+		__func__, iir_idx, band_idx,
+		(uint32_t)ucontrol->value.integer.value[0],
+		__func__, iir_idx, band_idx,
+		(uint32_t)ucontrol->value.integer.value[1],
+		__func__, iir_idx, band_idx,
+		(uint32_t)ucontrol->value.integer.value[2],
+		__func__, iir_idx, band_idx,
+		(uint32_t)ucontrol->value.integer.value[3],
+		__func__, iir_idx, band_idx,
+		(uint32_t)ucontrol->value.integer.value[4]);
+	return 0;
+}
+
+static void set_iir_band_coeff(struct snd_soc_codec *codec,
+				int iir_idx, int band_idx,
+				uint32_t value)
+{
+	snd_soc_write(codec,
+		(WCD9335_CDC_SIDETONE_IIR0_IIR_COEF_B2_CTL + 16 * iir_idx),
+		(value & 0xFF));
+
+	snd_soc_write(codec,
+		(WCD9335_CDC_SIDETONE_IIR0_IIR_COEF_B2_CTL + 16 * iir_idx),
+		(value >> 8) & 0xFF);
+
+	snd_soc_write(codec,
+		(WCD9335_CDC_SIDETONE_IIR0_IIR_COEF_B2_CTL + 16 * iir_idx),
+		(value >> 16) & 0xFF);
+
+	/* Mask top 2 bits, 7-8 are reserved */
+	snd_soc_write(codec,
+		(WCD9335_CDC_SIDETONE_IIR0_IIR_COEF_B2_CTL + 16 * iir_idx),
+		(value >> 24) & 0x3F);
+}
+
+static void tasha_codec_enable_int_port(struct wcd9xxx_codec_dai_data *dai,
+					struct snd_soc_codec *codec)
+{
+	struct wcd9xxx_ch *ch;
+	int port_num = 0;
+	unsigned short reg = 0;
+	u8 val = 0;
+	struct tasha_priv *tasha_p;
+
+	if (!dai || !codec) {
+		pr_err("%s: Invalid params\n", __func__);
+		return;
+	}
+
+	tasha_p = snd_soc_codec_get_drvdata(codec);
+	list_for_each_entry(ch, &dai->wcd9xxx_ch_list, list) {
+		if (ch->port >= TASHA_RX_PORT_START_NUMBER) {
+			port_num = ch->port - TASHA_RX_PORT_START_NUMBER;
+			reg = TASHA_SLIM_PGD_PORT_INT_EN0 + (port_num / 8);
+			val = wcd9xxx_interface_reg_read(tasha_p->wcd9xxx,
+				reg);
+			if (!(val & BYTE_BIT_MASK(port_num))) {
+				val |= BYTE_BIT_MASK(port_num);
+				wcd9xxx_interface_reg_write(
+					tasha_p->wcd9xxx, reg, val);
+				val = wcd9xxx_interface_reg_read(
+					tasha_p->wcd9xxx, reg);
+			}
+		} else {
+			port_num = ch->port;
+			reg = TASHA_SLIM_PGD_PORT_INT_TX_EN0 + (port_num / 8);
+			val = wcd9xxx_interface_reg_read(tasha_p->wcd9xxx,
+				reg);
+			if (!(val & BYTE_BIT_MASK(port_num))) {
+				val |= BYTE_BIT_MASK(port_num);
+				wcd9xxx_interface_reg_write(tasha_p->wcd9xxx,
+					reg, val);
+				val = wcd9xxx_interface_reg_read(
+					tasha_p->wcd9xxx, reg);
+			}
+		}
+	}
+}
+
+static int tasha_codec_enable_slim_chmask(struct wcd9xxx_codec_dai_data *dai,
+					  bool up)
+{
+	int ret = 0;
+	struct wcd9xxx_ch *ch;
+
+	if (up) {
+		list_for_each_entry(ch, &dai->wcd9xxx_ch_list, list) {
+			ret = wcd9xxx_get_slave_port(ch->ch_num);
+			if (ret < 0) {
+				pr_err("%s: Invalid slave port ID: %d\n",
+				       __func__, ret);
+				ret = -EINVAL;
+			} else {
+				set_bit(ret, &dai->ch_mask);
+			}
+		}
+	} else {
+		ret = wait_event_timeout(dai->dai_wait, (dai->ch_mask == 0),
+					 msecs_to_jiffies(
+						TASHA_SLIM_CLOSE_TIMEOUT));
+		if (!ret) {
+			pr_err("%s: Slim close tx/rx wait timeout, ch_mask:0x%lx\n",
+				__func__, dai->ch_mask);
+			ret = -ETIMEDOUT;
+		} else {
+			ret = 0;
+		}
+	}
+	return ret;
+}
+
+static int tasha_codec_enable_slimrx(struct snd_soc_dapm_widget *w,
+				     struct snd_kcontrol *kcontrol,
+				     int event)
+{
+	struct wcd9xxx *core;
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tasha_priv *tasha_p = snd_soc_codec_get_drvdata(codec);
+	int ret = 0;
+	struct wcd9xxx_codec_dai_data *dai;
+
+	core = dev_get_drvdata(codec->dev->parent);
+
+	dev_dbg(codec->dev, "%s: event called! codec name %s num_dai %d\n"
+		"stream name %s event %d\n",
+		__func__, codec->component.name,
+		codec->component.num_dai, w->sname, event);
+
+	/* Execute the callback only if interface type is slimbus */
+	if (tasha_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS)
+		return 0;
+
+	dai = &tasha_p->dai[w->shift];
+	dev_dbg(codec->dev, "%s: w->name %s w->shift %d event %d\n",
+		 __func__, w->name, w->shift, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		dai->bus_down_in_recovery = false;
+		tasha_codec_enable_int_port(dai, codec);
+		(void) tasha_codec_enable_slim_chmask(dai, true);
+		ret = wcd9xxx_cfg_slim_sch_rx(core, &dai->wcd9xxx_ch_list,
+					      dai->rate, dai->bit_width,
+					      &dai->grph);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		tasha_codec_vote_max_bw(codec, true);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		ret = wcd9xxx_disconnect_port(core, &dai->wcd9xxx_ch_list,
+					      dai->grph);
+		dev_dbg(codec->dev, "%s: Disconnect RX port, ret = %d\n",
+			__func__, ret);
+
+		if (!dai->bus_down_in_recovery)
+			ret = tasha_codec_enable_slim_chmask(dai, false);
+		else
+			dev_dbg(codec->dev,
+				"%s: bus in recovery skip enable slim_chmask",
+				__func__);
+		ret = wcd9xxx_close_slim_sch_rx(core, &dai->wcd9xxx_ch_list,
+						dai->grph);
+		break;
+	}
+	return ret;
+}
+
+static int tasha_codec_enable_slimvi_feedback(struct snd_soc_dapm_widget *w,
+					      struct snd_kcontrol *kcontrol,
+					      int event)
+{
+	struct wcd9xxx *core = NULL;
+	struct snd_soc_codec *codec = NULL;
+	struct tasha_priv *tasha_p = NULL;
+	int ret = 0;
+	struct wcd9xxx_codec_dai_data *dai = NULL;
+
+	if (!w) {
+		pr_err("%s invalid params\n", __func__);
+		return -EINVAL;
+	}
+	codec = snd_soc_dapm_to_codec(w->dapm);
+	tasha_p = snd_soc_codec_get_drvdata(codec);
+	core = tasha_p->wcd9xxx;
+
+	dev_dbg(codec->dev, "%s: num_dai %d stream name %s\n",
+		__func__, codec->component.num_dai, w->sname);
+
+	/* Execute the callback only if interface type is slimbus */
+	if (tasha_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
+		dev_err(codec->dev, "%s Interface is not correct", __func__);
+		return 0;
+	}
+
+	dev_dbg(codec->dev, "%s(): w->name %s event %d w->shift %d\n",
+		__func__, w->name, event, w->shift);
+	if (w->shift != AIF4_VIFEED) {
+		pr_err("%s Error in enabling the tx path\n", __func__);
+		ret = -EINVAL;
+		goto out_vi;
+	}
+	dai = &tasha_p->dai[w->shift];
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		if (test_bit(VI_SENSE_1, &tasha_p->status_mask)) {
+			dev_dbg(codec->dev, "%s: spkr1 enabled\n", __func__);
+			/* Enable V&I sensing */
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_TX9_SPKR_PROT_PATH_CTL, 0x20, 0x20);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_TX10_SPKR_PROT_PATH_CTL, 0x20,
+				0x20);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_TX9_SPKR_PROT_PATH_CTL, 0x0F, 0x00);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_TX10_SPKR_PROT_PATH_CTL, 0x0F,
+				0x00);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_TX9_SPKR_PROT_PATH_CTL, 0x10, 0x10);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_TX10_SPKR_PROT_PATH_CTL, 0x10,
+				0x10);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_TX9_SPKR_PROT_PATH_CTL, 0x20, 0x00);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_TX10_SPKR_PROT_PATH_CTL, 0x20,
+				0x00);
+		}
+		if (test_bit(VI_SENSE_2, &tasha_p->status_mask)) {
+			pr_debug("%s: spkr2 enabled\n", __func__);
+			/* Enable V&I sensing */
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_TX11_SPKR_PROT_PATH_CTL, 0x20,
+				0x20);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_TX12_SPKR_PROT_PATH_CTL, 0x20,
+				0x20);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_TX11_SPKR_PROT_PATH_CTL, 0x0F,
+				0x00);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_TX12_SPKR_PROT_PATH_CTL, 0x0F,
+				0x00);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_TX11_SPKR_PROT_PATH_CTL, 0x10,
+				0x10);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_TX12_SPKR_PROT_PATH_CTL, 0x10,
+				0x10);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_TX11_SPKR_PROT_PATH_CTL, 0x20,
+				0x00);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_TX12_SPKR_PROT_PATH_CTL, 0x20,
+				0x00);
+		}
+		dai->bus_down_in_recovery = false;
+		tasha_codec_enable_int_port(dai, codec);
+		(void) tasha_codec_enable_slim_chmask(dai, true);
+		ret = wcd9xxx_cfg_slim_sch_tx(core, &dai->wcd9xxx_ch_list,
+					      dai->rate, dai->bit_width,
+					      &dai->grph);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		ret = wcd9xxx_close_slim_sch_tx(core, &dai->wcd9xxx_ch_list,
+						dai->grph);
+		if (ret)
+			dev_err(codec->dev, "%s error in close_slim_sch_tx %d\n",
+				__func__, ret);
+		if (!dai->bus_down_in_recovery)
+			ret = tasha_codec_enable_slim_chmask(dai, false);
+		if (ret < 0) {
+			ret = wcd9xxx_disconnect_port(core,
+				&dai->wcd9xxx_ch_list,
+				dai->grph);
+			dev_dbg(codec->dev, "%s: Disconnect TX port, ret = %d\n",
+				__func__, ret);
+		}
+		if (test_bit(VI_SENSE_1, &tasha_p->status_mask)) {
+			/* Disable V&I sensing */
+			dev_dbg(codec->dev, "%s: spkr1 disabled\n", __func__);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_TX9_SPKR_PROT_PATH_CTL, 0x20, 0x20);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_TX10_SPKR_PROT_PATH_CTL, 0x20,
+				0x20);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_TX9_SPKR_PROT_PATH_CTL, 0x10, 0x00);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_TX10_SPKR_PROT_PATH_CTL, 0x10,
+				0x00);
+		}
+		if (test_bit(VI_SENSE_2, &tasha_p->status_mask)) {
+			/* Disable V&I sensing */
+			dev_dbg(codec->dev, "%s: spkr2 disabled\n", __func__);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_TX11_SPKR_PROT_PATH_CTL, 0x20,
+				0x20);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_TX12_SPKR_PROT_PATH_CTL, 0x20,
+				0x20);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_TX11_SPKR_PROT_PATH_CTL, 0x10,
+				0x00);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_TX12_SPKR_PROT_PATH_CTL, 0x10,
+				0x00);
+		}
+		break;
+	}
+out_vi:
+	return ret;
+}
+
+/*
+ * __tasha_codec_enable_slimtx: Enable the slimbus slave port
+ *				 for TX path
+ * @codec: Handle to the codec for which the slave port is to be
+ *	   enabled.
+ * @dai_data: The dai specific data for dai which is enabled.
+ */
+static int __tasha_codec_enable_slimtx(struct snd_soc_codec *codec,
+		int event, struct wcd9xxx_codec_dai_data *dai)
+{
+	struct wcd9xxx *core;
+	struct tasha_priv *tasha_p = snd_soc_codec_get_drvdata(codec);
+	int ret = 0;
+
+	/* Execute the callback only if interface type is slimbus */
+	if (tasha_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS)
+		return 0;
+
+	dev_dbg(codec->dev,
+		"%s: event = %d\n", __func__, event);
+	core = dev_get_drvdata(codec->dev->parent);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		dai->bus_down_in_recovery = false;
+		tasha_codec_enable_int_port(dai, codec);
+		(void) tasha_codec_enable_slim_chmask(dai, true);
+		ret = wcd9xxx_cfg_slim_sch_tx(core, &dai->wcd9xxx_ch_list,
+					      dai->rate, dai->bit_width,
+					      &dai->grph);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		ret = wcd9xxx_close_slim_sch_tx(core, &dai->wcd9xxx_ch_list,
+						dai->grph);
+		if (!dai->bus_down_in_recovery)
+			ret = tasha_codec_enable_slim_chmask(dai, false);
+		if (ret < 0) {
+			ret = wcd9xxx_disconnect_port(core,
+						      &dai->wcd9xxx_ch_list,
+						      dai->grph);
+			pr_debug("%s: Disconnect TX port, ret = %d\n",
+				 __func__, ret);
+		}
+
+		break;
+	}
+
+	return ret;
+}
+
+static int tasha_codec_enable_slimtx(struct snd_soc_dapm_widget *w,
+				     struct snd_kcontrol *kcontrol,
+				     int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tasha_priv *tasha_p = snd_soc_codec_get_drvdata(codec);
+	struct wcd9xxx_codec_dai_data *dai;
+
+	dev_dbg(codec->dev,
+		"%s: w->name %s, w->shift = %d, num_dai %d stream name %s\n",
+		__func__, w->name, w->shift,
+		codec->component.num_dai, w->sname);
+
+	dai = &tasha_p->dai[w->shift];
+	return __tasha_codec_enable_slimtx(codec, event, dai);
+}
+
+static void tasha_codec_cpe_pp_set_cfg(struct snd_soc_codec *codec, int event)
+{
+	struct tasha_priv *tasha_p = snd_soc_codec_get_drvdata(codec);
+	struct wcd9xxx_codec_dai_data *dai;
+	u8 bit_width, rate, buf_period;
+
+	dai = &tasha_p->dai[AIF4_MAD_TX];
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		switch (dai->bit_width) {
+		case 32:
+			bit_width = 0xF;
+			break;
+		case 24:
+			bit_width = 0xE;
+			break;
+		case 20:
+			bit_width = 0xD;
+			break;
+		case 16:
+		default:
+			bit_width = 0x0;
+			break;
+		}
+		snd_soc_update_bits(codec, WCD9335_CPE_SS_TX_PP_CFG, 0x0F,
+				    bit_width);
+
+		switch (dai->rate) {
+		case 384000:
+			rate = 0x30;
+			break;
+		case 192000:
+			rate = 0x20;
+			break;
+		case 48000:
+			rate = 0x10;
+			break;
+		case 16000:
+		default:
+			rate = 0x00;
+			break;
+		}
+		snd_soc_update_bits(codec, WCD9335_CPE_SS_TX_PP_CFG, 0x70,
+				    rate);
+
+		buf_period = (dai->rate * (dai->bit_width/8)) / (16*1000);
+		snd_soc_update_bits(codec, WCD9335_CPE_SS_TX_PP_BUF_INT_PERIOD,
+				    0xFF, buf_period);
+		dev_dbg(codec->dev, "%s: PP buffer period= 0x%x\n",
+			__func__, buf_period);
+		break;
+
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_write(codec, WCD9335_CPE_SS_TX_PP_CFG, 0x3C);
+		snd_soc_write(codec, WCD9335_CPE_SS_TX_PP_BUF_INT_PERIOD, 0x60);
+		break;
+
+	default:
+		break;
+	}
+}
+
+/*
+ * tasha_codec_get_mad_port_id: Callback function that will be invoked
+ *	to get the port ID for MAD.
+ * @codec: Handle to the codec
+ * @port_id: cpe port_id needs to enable
+ */
+static int tasha_codec_get_mad_port_id(struct snd_soc_codec *codec,
+				       u16 *port_id)
+{
+	struct tasha_priv *tasha_p;
+	struct wcd9xxx_codec_dai_data *dai;
+	struct wcd9xxx_ch *ch;
+
+	if (!port_id || !codec)
+		return -EINVAL;
+
+	tasha_p = snd_soc_codec_get_drvdata(codec);
+	if (!tasha_p)
+		return -EINVAL;
+
+	dai = &tasha_p->dai[AIF4_MAD_TX];
+	list_for_each_entry(ch, &dai->wcd9xxx_ch_list, list) {
+		if (ch->port == TASHA_TX12)
+			*port_id = WCD_CPE_AFE_OUT_PORT_2;
+		else if (ch->port == TASHA_TX13)
+			*port_id = WCD_CPE_AFE_OUT_PORT_4;
+		else {
+			dev_err(codec->dev, "%s: invalid mad_port = %d\n",
+					__func__, ch->port);
+			return -EINVAL;
+		}
+	}
+	dev_dbg(codec->dev, "%s: port_id = %d\n", __func__, *port_id);
+
+	return 0;
+}
+
+/*
+ * tasha_codec_enable_slimtx_mad: Callback function that will be invoked
+ *	to setup the slave port for MAD.
+ * @codec: Handle to the codec
+ * @event: Indicates whether to enable or disable the slave port
+ */
+static int tasha_codec_enable_slimtx_mad(struct snd_soc_codec *codec,
+					 u8 event)
+{
+	struct tasha_priv *tasha_p = snd_soc_codec_get_drvdata(codec);
+	struct wcd9xxx_codec_dai_data *dai;
+	struct wcd9xxx_ch *ch;
+	int dapm_event = SND_SOC_DAPM_POST_PMU;
+	u16 port = 0;
+	int ret = 0;
+
+	dai = &tasha_p->dai[AIF4_MAD_TX];
+
+	if (event == 0)
+		dapm_event = SND_SOC_DAPM_POST_PMD;
+
+	dev_dbg(codec->dev,
+		"%s: mad_channel, event = 0x%x\n",
+		 __func__, event);
+
+	list_for_each_entry(ch, &dai->wcd9xxx_ch_list, list) {
+		dev_dbg(codec->dev, "%s: mad_port = %d, event = 0x%x\n",
+			__func__, ch->port, event);
+		if (ch->port == TASHA_TX13) {
+			tasha_codec_cpe_pp_set_cfg(codec, dapm_event);
+			port = TASHA_TX13;
+			break;
+		}
+	}
+
+	ret = __tasha_codec_enable_slimtx(codec, dapm_event, dai);
+
+	if (port == TASHA_TX13) {
+		switch (dapm_event) {
+		case SND_SOC_DAPM_POST_PMU:
+			snd_soc_update_bits(codec,
+				WCD9335_CODEC_RPM_PWR_CPE_DRAM1_SHUTDOWN,
+				0x20, 0x00);
+			snd_soc_update_bits(codec,
+				WCD9335_DATA_HUB_DATA_HUB_SB_TX13_INP_CFG,
+				0x03, 0x02);
+			snd_soc_update_bits(codec, WCD9335_CPE_SS_CFG,
+					    0x80, 0x80);
+			break;
+		case SND_SOC_DAPM_POST_PMD:
+			snd_soc_update_bits(codec,
+				WCD9335_CODEC_RPM_PWR_CPE_DRAM1_SHUTDOWN,
+				0x20, 0x20);
+			snd_soc_update_bits(codec,
+				WCD9335_DATA_HUB_DATA_HUB_SB_TX13_INP_CFG,
+				0x03, 0x00);
+			snd_soc_update_bits(codec, WCD9335_CPE_SS_CFG,
+					    0x80, 0x00);
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static int tasha_put_iir_band_audio_mixer(
+					struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	int iir_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->reg;
+	int band_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->shift;
+
+	/*
+	 * Mask top bit it is reserved
+	 * Updates addr automatically for each B2 write
+	 */
+	snd_soc_write(codec,
+		(WCD9335_CDC_SIDETONE_IIR0_IIR_COEF_B1_CTL + 16 * iir_idx),
+		(band_idx * BAND_MAX * sizeof(uint32_t)) & 0x7F);
+
+	set_iir_band_coeff(codec, iir_idx, band_idx,
+				ucontrol->value.integer.value[0]);
+	set_iir_band_coeff(codec, iir_idx, band_idx,
+				ucontrol->value.integer.value[1]);
+	set_iir_band_coeff(codec, iir_idx, band_idx,
+				ucontrol->value.integer.value[2]);
+	set_iir_band_coeff(codec, iir_idx, band_idx,
+				ucontrol->value.integer.value[3]);
+	set_iir_band_coeff(codec, iir_idx, band_idx,
+				ucontrol->value.integer.value[4]);
+
+	pr_debug("%s: IIR #%d band #%d b0 = 0x%x\n"
+		"%s: IIR #%d band #%d b1 = 0x%x\n"
+		"%s: IIR #%d band #%d b2 = 0x%x\n"
+		"%s: IIR #%d band #%d a1 = 0x%x\n"
+		"%s: IIR #%d band #%d a2 = 0x%x\n",
+		__func__, iir_idx, band_idx,
+		get_iir_band_coeff(codec, iir_idx, band_idx, 0),
+		__func__, iir_idx, band_idx,
+		get_iir_band_coeff(codec, iir_idx, band_idx, 1),
+		__func__, iir_idx, band_idx,
+		get_iir_band_coeff(codec, iir_idx, band_idx, 2),
+		__func__, iir_idx, band_idx,
+		get_iir_band_coeff(codec, iir_idx, band_idx, 3),
+		__func__, iir_idx, band_idx,
+		get_iir_band_coeff(codec, iir_idx, band_idx, 4));
+	return 0;
+}
+
+static int tasha_get_compander(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	int comp = ((struct soc_multi_mixer_control *)
+		    kcontrol->private_value)->shift;
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.integer.value[0] = tasha->comp_enabled[comp];
+	return 0;
+}
+
+static int tasha_set_compander(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	int comp = ((struct soc_multi_mixer_control *)
+		    kcontrol->private_value)->shift;
+	int value = ucontrol->value.integer.value[0];
+
+	pr_debug("%s: Compander %d enable current %d, new %d\n",
+		 __func__, comp + 1, tasha->comp_enabled[comp], value);
+	tasha->comp_enabled[comp] = value;
+
+	/* Any specific register configuration for compander */
+	switch (comp) {
+	case COMPANDER_1:
+		/* Set Gain Source Select based on compander enable/disable */
+		snd_soc_update_bits(codec, WCD9335_HPH_L_EN, 0x20,
+				(value ? 0x00:0x20));
+		break;
+	case COMPANDER_2:
+		snd_soc_update_bits(codec, WCD9335_HPH_R_EN, 0x20,
+				(value ? 0x00:0x20));
+		break;
+	case COMPANDER_3:
+		break;
+	case COMPANDER_4:
+		break;
+	case COMPANDER_5:
+		snd_soc_update_bits(codec, WCD9335_SE_LO_LO3_GAIN, 0x20,
+				(value ? 0x00:0x20));
+		break;
+	case COMPANDER_6:
+		snd_soc_update_bits(codec, WCD9335_SE_LO_LO4_GAIN, 0x20,
+				(value ? 0x00:0x20));
+		break;
+	case COMPANDER_7:
+		break;
+	case COMPANDER_8:
+		break;
+	default:
+		/*
+		 * if compander is not enabled for any interpolator,
+		 * it does not cause any audio failure, so do not
+		 * return error in this case, but just print a log
+		 */
+		dev_warn(codec->dev, "%s: unknown compander: %d\n",
+			__func__, comp);
+	};
+	return 0;
+}
+
+static void tasha_codec_init_flyback(struct snd_soc_codec *codec)
+{
+	snd_soc_update_bits(codec, WCD9335_HPH_L_EN, 0xC0, 0x00);
+	snd_soc_update_bits(codec, WCD9335_HPH_R_EN, 0xC0, 0x00);
+	snd_soc_update_bits(codec, WCD9335_RX_BIAS_FLYB_BUFF, 0x0F, 0x00);
+	snd_soc_update_bits(codec, WCD9335_RX_BIAS_FLYB_BUFF, 0xF0, 0x00);
+}
+
+static int tasha_codec_enable_rx_bias(struct snd_soc_dapm_widget *w,
+		struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		tasha->rx_bias_count++;
+		if (tasha->rx_bias_count == 1) {
+			if (TASHA_IS_2_0(tasha->wcd9xxx))
+				tasha_codec_init_flyback(codec);
+			snd_soc_update_bits(codec, WCD9335_ANA_RX_SUPPLIES,
+					    0x01, 0x01);
+		}
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		tasha->rx_bias_count--;
+		if (!tasha->rx_bias_count)
+			snd_soc_update_bits(codec, WCD9335_ANA_RX_SUPPLIES,
+					    0x01, 0x00);
+		break;
+	};
+	dev_dbg(codec->dev, "%s: Current RX BIAS user count: %d\n", __func__,
+		tasha->rx_bias_count);
+
+	return 0;
+}
+
+static void tasha_realign_anc_coeff(struct snd_soc_codec *codec,
+				    u16 reg1, u16 reg2)
+{
+	u8 val1, val2, tmpval1, tmpval2;
+
+	snd_soc_write(codec, reg1, 0x00);
+	tmpval1 = snd_soc_read(codec, reg2);
+	tmpval2 = snd_soc_read(codec, reg2);
+	snd_soc_write(codec, reg1, 0x00);
+	snd_soc_write(codec, reg2, 0xFF);
+	snd_soc_write(codec, reg1, 0x01);
+	snd_soc_write(codec, reg2, 0xFF);
+
+	snd_soc_write(codec, reg1, 0x00);
+	val1 = snd_soc_read(codec, reg2);
+	val2 = snd_soc_read(codec, reg2);
+
+	if (val1 == 0x0F && val2 == 0xFF) {
+		dev_dbg(codec->dev, "%s: ANC0 co-eff index re-aligned\n",
+			__func__);
+		snd_soc_read(codec, reg2);
+		snd_soc_write(codec, reg1, 0x00);
+		snd_soc_write(codec, reg2, tmpval2);
+		snd_soc_write(codec, reg1, 0x01);
+		snd_soc_write(codec, reg2, tmpval1);
+	} else if (val1 == 0xFF && val2 == 0x0F) {
+		dev_dbg(codec->dev, "%s: ANC1 co-eff index already aligned\n",
+			__func__);
+		snd_soc_write(codec, reg1, 0x00);
+		snd_soc_write(codec, reg2, tmpval1);
+		snd_soc_write(codec, reg1, 0x01);
+		snd_soc_write(codec, reg2, tmpval2);
+	} else {
+		dev_err(codec->dev, "%s: ANC0 co-eff index not aligned\n",
+			__func__);
+	}
+}
+
+static int tasha_codec_enable_anc(struct snd_soc_dapm_widget *w,
+				  struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	const char *filename;
+	const struct firmware *fw;
+	int i;
+	int ret = 0;
+	int num_anc_slots;
+	struct wcd9xxx_anc_header *anc_head;
+	struct firmware_cal *hwdep_cal = NULL;
+	u32 anc_writes_size = 0;
+	u32 anc_cal_size = 0;
+	int anc_size_remaining;
+	u32 *anc_ptr;
+	u16 reg;
+	u8 mask, val;
+	size_t cal_size;
+	const void *data;
+
+	if (!tasha->anc_func)
+		return 0;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		hwdep_cal = wcdcal_get_fw_cal(tasha->fw_data, WCD9XXX_ANC_CAL);
+		if (hwdep_cal) {
+			data = hwdep_cal->data;
+			cal_size = hwdep_cal->size;
+			dev_dbg(codec->dev, "%s: using hwdep calibration\n",
+				__func__);
+		} else {
+			filename = "wcd9335/wcd9335_anc.bin";
+			ret = request_firmware(&fw, filename, codec->dev);
+			if (ret != 0) {
+				dev_err(codec->dev,
+				"Failed to acquire ANC data: %d\n", ret);
+				return -ENODEV;
+			}
+			if (!fw) {
+				dev_err(codec->dev, "failed to get anc fw");
+				return -ENODEV;
+			}
+			data = fw->data;
+			cal_size = fw->size;
+			dev_dbg(codec->dev,
+			"%s: using request_firmware calibration\n", __func__);
+		}
+		if (cal_size < sizeof(struct wcd9xxx_anc_header)) {
+			dev_err(codec->dev, "Not enough data\n");
+			ret = -ENOMEM;
+			goto err;
+		}
+		/* First number is the number of register writes */
+		anc_head = (struct wcd9xxx_anc_header *)(data);
+		anc_ptr = (u32 *)(data +
+				  sizeof(struct wcd9xxx_anc_header));
+		anc_size_remaining = cal_size -
+				     sizeof(struct wcd9xxx_anc_header);
+		num_anc_slots = anc_head->num_anc_slots;
+
+		if (tasha->anc_slot >= num_anc_slots) {
+			dev_err(codec->dev, "Invalid ANC slot selected\n");
+			ret = -EINVAL;
+			goto err;
+		}
+		for (i = 0; i < num_anc_slots; i++) {
+			if (anc_size_remaining < TASHA_PACKED_REG_SIZE) {
+				dev_err(codec->dev,
+					"Invalid register format\n");
+				ret = -EINVAL;
+				goto err;
+			}
+			anc_writes_size = (u32)(*anc_ptr);
+			anc_size_remaining -= sizeof(u32);
+			anc_ptr += 1;
+
+			if (anc_writes_size * TASHA_PACKED_REG_SIZE
+				> anc_size_remaining) {
+				dev_err(codec->dev,
+					"Invalid register format\n");
+				ret = -EINVAL;
+				goto err;
+			}
+
+			if (tasha->anc_slot == i)
+				break;
+
+			anc_size_remaining -= (anc_writes_size *
+				TASHA_PACKED_REG_SIZE);
+			anc_ptr += anc_writes_size;
+		}
+		if (i == num_anc_slots) {
+			dev_err(codec->dev, "Selected ANC slot not present\n");
+			ret = -EINVAL;
+			goto err;
+		}
+
+		i = 0;
+		anc_cal_size = anc_writes_size;
+
+		if (!strcmp(w->name, "RX INT0 DAC") ||
+		    !strcmp(w->name, "ANC SPK1 PA"))
+			tasha_realign_anc_coeff(codec,
+					WCD9335_CDC_ANC0_IIR_COEFF_1_CTL,
+					WCD9335_CDC_ANC0_IIR_COEFF_2_CTL);
+
+		if (!strcmp(w->name, "RX INT1 DAC") ||
+			!strcmp(w->name, "RX INT3 DAC")) {
+			tasha_realign_anc_coeff(codec,
+					WCD9335_CDC_ANC0_IIR_COEFF_1_CTL,
+					WCD9335_CDC_ANC0_IIR_COEFF_2_CTL);
+			anc_writes_size = anc_cal_size / 2;
+			snd_soc_update_bits(codec,
+			WCD9335_CDC_ANC0_CLK_RESET_CTL, 0x39, 0x39);
+		} else if (!strcmp(w->name, "RX INT2 DAC") ||
+				!strcmp(w->name, "RX INT4 DAC")) {
+			tasha_realign_anc_coeff(codec,
+					WCD9335_CDC_ANC1_IIR_COEFF_1_CTL,
+					WCD9335_CDC_ANC1_IIR_COEFF_2_CTL);
+			i = anc_cal_size / 2;
+			snd_soc_update_bits(codec,
+			WCD9335_CDC_ANC1_CLK_RESET_CTL, 0x39, 0x39);
+		}
+
+		for (; i < anc_writes_size; i++) {
+			TASHA_CODEC_UNPACK_ENTRY(anc_ptr[i], reg, mask, val);
+			snd_soc_write(codec, reg, (val & mask));
+		}
+		if (!strcmp(w->name, "RX INT1 DAC") ||
+			!strcmp(w->name, "RX INT3 DAC")) {
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_ANC0_CLK_RESET_CTL, 0x08, 0x08);
+		} else if (!strcmp(w->name, "RX INT2 DAC") ||
+				!strcmp(w->name, "RX INT4 DAC")) {
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_ANC1_CLK_RESET_CTL, 0x08, 0x08);
+		}
+
+		if (!hwdep_cal)
+			release_firmware(fw);
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		/* Remove ANC Rx from reset */
+		snd_soc_update_bits(codec, WCD9335_CDC_ANC0_CLK_RESET_CTL,
+				    0x08, 0x00);
+		snd_soc_update_bits(codec, WCD9335_CDC_ANC1_CLK_RESET_CTL,
+				    0x08, 0x00);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		if (!strcmp(w->name, "ANC HPHL PA") ||
+		    !strcmp(w->name, "ANC EAR PA") ||
+		    !strcmp(w->name, "ANC SPK1 PA") ||
+		    !strcmp(w->name, "ANC LINEOUT1 PA")) {
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_ANC0_MODE_1_CTL, 0x30, 0x00);
+			msleep(50);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_ANC0_MODE_1_CTL, 0x01, 0x00);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_ANC0_CLK_RESET_CTL, 0x38, 0x38);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_ANC0_CLK_RESET_CTL, 0x07, 0x00);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_ANC0_CLK_RESET_CTL, 0x38, 0x00);
+		} else if (!strcmp(w->name, "ANC HPHR PA") ||
+			   !strcmp(w->name, "ANC LINEOUT2 PA")) {
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_ANC1_MODE_1_CTL, 0x30, 0x00);
+			msleep(50);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_ANC1_MODE_1_CTL, 0x01, 0x00);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_ANC1_CLK_RESET_CTL, 0x38, 0x38);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_ANC1_CLK_RESET_CTL, 0x07, 0x00);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_ANC1_CLK_RESET_CTL, 0x38, 0x00);
+		}
+		break;
+	}
+
+	return 0;
+err:
+	if (!hwdep_cal)
+		release_firmware(fw);
+	return ret;
+}
+
+static void tasha_codec_clear_anc_tx_hold(struct tasha_priv *tasha)
+{
+	if (test_and_clear_bit(ANC_MIC_AMIC1, &tasha->status_mask))
+		tasha_codec_set_tx_hold(tasha->codec, WCD9335_ANA_AMIC1, false);
+	if (test_and_clear_bit(ANC_MIC_AMIC2, &tasha->status_mask))
+		tasha_codec_set_tx_hold(tasha->codec, WCD9335_ANA_AMIC2, false);
+	if (test_and_clear_bit(ANC_MIC_AMIC3, &tasha->status_mask))
+		tasha_codec_set_tx_hold(tasha->codec, WCD9335_ANA_AMIC3, false);
+	if (test_and_clear_bit(ANC_MIC_AMIC4, &tasha->status_mask))
+		tasha_codec_set_tx_hold(tasha->codec, WCD9335_ANA_AMIC4, false);
+	if (test_and_clear_bit(ANC_MIC_AMIC5, &tasha->status_mask))
+		tasha_codec_set_tx_hold(tasha->codec, WCD9335_ANA_AMIC5, false);
+	if (test_and_clear_bit(ANC_MIC_AMIC6, &tasha->status_mask))
+		tasha_codec_set_tx_hold(tasha->codec, WCD9335_ANA_AMIC6, false);
+}
+
+static void tasha_codec_hph_post_pa_config(struct tasha_priv *tasha,
+					   int mode, int event)
+{
+	u8 scale_val = 0;
+
+	if (!TASHA_IS_2_0(tasha->wcd9xxx))
+		return;
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		switch (mode) {
+		case CLS_H_HIFI:
+			scale_val = 0x3;
+			break;
+		case CLS_H_LOHIFI:
+			scale_val = 0x1;
+			break;
+		}
+		if (tasha->anc_func) {
+			/* Clear Tx FE HOLD if both PAs are enabled */
+			if ((snd_soc_read(tasha->codec, WCD9335_ANA_HPH) &
+			     0xC0) == 0xC0) {
+				tasha_codec_clear_anc_tx_hold(tasha);
+			}
+		}
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		scale_val = 0x6;
+		break;
+	}
+
+	if (scale_val)
+		snd_soc_update_bits(tasha->codec, WCD9335_HPH_PA_CTL1, 0x0E,
+				    scale_val << 1);
+	if (SND_SOC_DAPM_EVENT_ON(event)) {
+		if (tasha->comp_enabled[COMPANDER_1] ||
+		    tasha->comp_enabled[COMPANDER_2]) {
+			snd_soc_update_bits(tasha->codec, WCD9335_HPH_L_EN,
+					    0x20, 0x00);
+			snd_soc_update_bits(tasha->codec, WCD9335_HPH_R_EN,
+					    0x20, 0x00);
+			snd_soc_update_bits(tasha->codec, WCD9335_HPH_AUTO_CHOP,
+					    0x20, 0x20);
+		}
+		snd_soc_update_bits(tasha->codec, WCD9335_HPH_L_EN, 0x1F,
+				    tasha->hph_l_gain);
+		snd_soc_update_bits(tasha->codec, WCD9335_HPH_R_EN, 0x1F,
+				    tasha->hph_r_gain);
+	}
+
+	if (SND_SOC_DAPM_EVENT_OFF(event)) {
+		snd_soc_update_bits(tasha->codec, WCD9335_HPH_AUTO_CHOP, 0x20,
+				    0x00);
+	}
+}
+
+static void tasha_codec_override(struct snd_soc_codec *codec,
+				 int mode,
+				 int event)
+{
+	if (mode == CLS_AB) {
+		switch (event) {
+		case SND_SOC_DAPM_POST_PMU:
+			if (!(snd_soc_read(codec,
+					WCD9335_CDC_RX2_RX_PATH_CTL) & 0x10) &&
+				(!(snd_soc_read(codec,
+					WCD9335_CDC_RX1_RX_PATH_CTL) & 0x10)))
+				snd_soc_update_bits(codec,
+					WCD9XXX_A_ANA_RX_SUPPLIES, 0x02, 0x02);
+		break;
+		case SND_SOC_DAPM_POST_PMD:
+			snd_soc_update_bits(codec,
+				WCD9XXX_A_ANA_RX_SUPPLIES, 0x02, 0x00);
+		break;
+		}
+	}
+}
+
+static int tasha_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
+				      struct snd_kcontrol *kcontrol,
+				      int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	int hph_mode = tasha->hph_mode;
+	int ret = 0;
+
+	dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		if ((!(strcmp(w->name, "ANC HPHR PA"))) &&
+		    (test_bit(HPH_PA_DELAY, &tasha->status_mask))) {
+			snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0xC0, 0xC0);
+		}
+		set_bit(HPH_PA_DELAY, &tasha->status_mask);
+		if (!(strcmp(w->name, "HPHR PA")))
+			snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x40, 0x40);
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		if (!(strcmp(w->name, "ANC HPHR PA"))) {
+			if ((snd_soc_read(codec, WCD9335_ANA_HPH) & 0xC0)
+							!= 0xC0)
+				/*
+				 * If PA_EN is not set (potentially in ANC case)
+				 * then do nothing for POST_PMU and let left
+				 * channel handle everything.
+				 */
+				break;
+		}
+		/*
+		 * 7ms sleep is required after PA is enabled as per
+		 * HW requirement
+		 */
+		if (test_bit(HPH_PA_DELAY, &tasha->status_mask)) {
+			usleep_range(7000, 7100);
+			clear_bit(HPH_PA_DELAY, &tasha->status_mask);
+		}
+		tasha_codec_hph_post_pa_config(tasha, hph_mode, event);
+		snd_soc_update_bits(codec, WCD9335_CDC_RX2_RX_PATH_CTL,
+				    0x10, 0x00);
+		/* Remove mix path mute if it is enabled */
+		if ((snd_soc_read(codec, WCD9335_CDC_RX2_RX_PATH_MIX_CTL)) &
+				  0x10)
+			snd_soc_update_bits(codec,
+					    WCD9335_CDC_RX2_RX_PATH_MIX_CTL,
+					    0x10, 0x00);
+
+		if (!(strcmp(w->name, "ANC HPHR PA"))) {
+			/* Do everything needed for left channel */
+			snd_soc_update_bits(codec, WCD9335_CDC_RX1_RX_PATH_CTL,
+					    0x10, 0x00);
+			/* Remove mix path mute if it is enabled */
+			if ((snd_soc_read(codec,
+					  WCD9335_CDC_RX1_RX_PATH_MIX_CTL)) &
+					  0x10)
+				snd_soc_update_bits(codec,
+						WCD9335_CDC_RX1_RX_PATH_MIX_CTL,
+						0x10, 0x00);
+			/* Remove ANC Rx from reset */
+			ret = tasha_codec_enable_anc(w, kcontrol, event);
+		}
+		tasha_codec_override(codec, hph_mode, event);
+		break;
+
+	case SND_SOC_DAPM_PRE_PMD:
+		blocking_notifier_call_chain(&tasha->notifier,
+					WCD_EVENT_PRE_HPHR_PA_OFF,
+					&tasha->mbhc);
+		tasha_codec_hph_post_pa_config(tasha, hph_mode, event);
+		if (!(strcmp(w->name, "ANC HPHR PA")))
+			snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x40, 0x00);
+		if (!(strcmp(w->name, "HPHR PA")))
+			snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x40, 0x00);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		/* 5ms sleep is required after PA is disabled as per
+		 * HW requirement
+		 */
+		usleep_range(5000, 5500);
+		tasha_codec_override(codec, hph_mode, event);
+		blocking_notifier_call_chain(&tasha->notifier,
+					WCD_EVENT_POST_HPHR_PA_OFF,
+					&tasha->mbhc);
+
+		if (!(strcmp(w->name, "ANC HPHR PA"))) {
+			ret = tasha_codec_enable_anc(w, kcontrol, event);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_RX2_RX_PATH_CFG0, 0x10, 0x00);
+		}
+		break;
+	};
+
+	return ret;
+}
+
+static int tasha_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
+				      struct snd_kcontrol *kcontrol,
+				      int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	int hph_mode = tasha->hph_mode;
+	int ret = 0;
+
+	dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		if ((!(strcmp(w->name, "ANC HPHL PA"))) &&
+		    (test_bit(HPH_PA_DELAY, &tasha->status_mask))) {
+			snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0xC0, 0xC0);
+		}
+		if (!(strcmp(w->name, "HPHL PA")))
+			snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x80, 0x80);
+		set_bit(HPH_PA_DELAY, &tasha->status_mask);
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		if (!(strcmp(w->name, "ANC HPHL PA"))) {
+			if ((snd_soc_read(codec, WCD9335_ANA_HPH) & 0xC0)
+								!= 0xC0)
+				/*
+				 * If PA_EN is not set (potentially in ANC case)
+				 * then do nothing for POST_PMU and let right
+				 * channel handle everything.
+				 */
+				break;
+		}
+		/*
+		 * 7ms sleep is required after PA is enabled as per
+		 * HW requirement
+		 */
+		if (test_bit(HPH_PA_DELAY, &tasha->status_mask)) {
+			usleep_range(7000, 7100);
+			clear_bit(HPH_PA_DELAY, &tasha->status_mask);
+		}
+
+		tasha_codec_hph_post_pa_config(tasha, hph_mode, event);
+		snd_soc_update_bits(codec, WCD9335_CDC_RX1_RX_PATH_CTL,
+				    0x10, 0x00);
+		/* Remove mix path mute if it is enabled */
+		if ((snd_soc_read(codec, WCD9335_CDC_RX1_RX_PATH_MIX_CTL)) &
+				  0x10)
+			snd_soc_update_bits(codec,
+					    WCD9335_CDC_RX1_RX_PATH_MIX_CTL,
+					    0x10, 0x00);
+
+		if (!(strcmp(w->name, "ANC HPHL PA"))) {
+			/* Do everything needed for right channel */
+			snd_soc_update_bits(codec, WCD9335_CDC_RX2_RX_PATH_CTL,
+					    0x10, 0x00);
+			/* Remove mix path mute if it is enabled */
+			if ((snd_soc_read(codec,
+					  WCD9335_CDC_RX2_RX_PATH_MIX_CTL)) &
+					  0x10)
+				snd_soc_update_bits(codec,
+						WCD9335_CDC_RX2_RX_PATH_MIX_CTL,
+						0x10, 0x00);
+
+			/* Remove ANC Rx from reset */
+			ret = tasha_codec_enable_anc(w, kcontrol, event);
+		}
+		tasha_codec_override(codec, hph_mode, event);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		blocking_notifier_call_chain(&tasha->notifier,
+					WCD_EVENT_PRE_HPHL_PA_OFF,
+					&tasha->mbhc);
+		tasha_codec_hph_post_pa_config(tasha, hph_mode, event);
+		if (!(strcmp(w->name, "ANC HPHL PA")))
+			snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x80, 0x00);
+		if (!(strcmp(w->name, "HPHL PA")))
+			snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x80, 0x00);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		/* 5ms sleep is required after PA is disabled as per
+		 * HW requirement
+		 */
+		usleep_range(5000, 5500);
+		tasha_codec_override(codec, hph_mode, event);
+		blocking_notifier_call_chain(&tasha->notifier,
+					WCD_EVENT_POST_HPHL_PA_OFF,
+					&tasha->mbhc);
+
+		if (!(strcmp(w->name, "ANC HPHL PA"))) {
+			ret = tasha_codec_enable_anc(w, kcontrol, event);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_RX1_RX_PATH_CFG0, 0x10, 0x00);
+		}
+		break;
+	};
+
+	return ret;
+}
+
+static int tasha_codec_enable_lineout_pa(struct snd_soc_dapm_widget *w,
+					 struct snd_kcontrol *kcontrol,
+					 int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	u16 lineout_vol_reg = 0, lineout_mix_vol_reg = 0;
+	int ret = 0;
+
+	dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
+
+	if (w->reg == WCD9335_ANA_LO_1_2) {
+		if (w->shift == 7) {
+			lineout_vol_reg = WCD9335_CDC_RX3_RX_PATH_CTL;
+			lineout_mix_vol_reg = WCD9335_CDC_RX3_RX_PATH_MIX_CTL;
+		} else if (w->shift == 6) {
+			lineout_vol_reg = WCD9335_CDC_RX4_RX_PATH_CTL;
+			lineout_mix_vol_reg = WCD9335_CDC_RX4_RX_PATH_MIX_CTL;
+		}
+	} else if (w->reg == WCD9335_ANA_LO_3_4) {
+		if (w->shift == 7) {
+			lineout_vol_reg = WCD9335_CDC_RX5_RX_PATH_CTL;
+			lineout_mix_vol_reg = WCD9335_CDC_RX5_RX_PATH_MIX_CTL;
+		} else if (w->shift == 6) {
+			lineout_vol_reg = WCD9335_CDC_RX6_RX_PATH_CTL;
+			lineout_mix_vol_reg = WCD9335_CDC_RX6_RX_PATH_MIX_CTL;
+		}
+	} else {
+		dev_err(codec->dev, "%s: Error enabling lineout PA\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		/* 5ms sleep is required after PA is enabled as per
+		 * HW requirement
+		 */
+		usleep_range(5000, 5500);
+		snd_soc_update_bits(codec, lineout_vol_reg,
+				    0x10, 0x00);
+		/* Remove mix path mute if it is enabled */
+		if ((snd_soc_read(codec, lineout_mix_vol_reg)) & 0x10)
+			snd_soc_update_bits(codec,
+					    lineout_mix_vol_reg,
+					    0x10, 0x00);
+		if (!(strcmp(w->name, "ANC LINEOUT1 PA")) ||
+		    !(strcmp(w->name, "ANC LINEOUT2 PA")))
+			ret = tasha_codec_enable_anc(w, kcontrol, event);
+		tasha_codec_override(codec, CLS_AB, event);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		/* 5ms sleep is required after PA is disabled as per
+		 * HW requirement
+		 */
+		usleep_range(5000, 5500);
+		tasha_codec_override(codec, CLS_AB, event);
+		if (!(strcmp(w->name, "ANC LINEOUT1 PA")) ||
+			!(strcmp(w->name, "ANC LINEOUT2 PA"))) {
+			ret = tasha_codec_enable_anc(w, kcontrol, event);
+			if (!(strcmp(w->name, "ANC LINEOUT1 PA")))
+				snd_soc_update_bits(codec,
+				WCD9335_CDC_RX3_RX_PATH_CFG0, 0x10, 0x10);
+			else
+				snd_soc_update_bits(codec,
+				WCD9335_CDC_RX4_RX_PATH_CFG0, 0x10, 0x10);
+		}
+		break;
+	};
+
+	return ret;
+}
+
+static void tasha_spk_anc_update_callback(struct work_struct *work)
+{
+	struct spk_anc_work *spk_anc_dwork;
+	struct tasha_priv *tasha;
+	struct delayed_work *delayed_work;
+	struct snd_soc_codec *codec;
+
+	delayed_work = to_delayed_work(work);
+	spk_anc_dwork = container_of(delayed_work, struct spk_anc_work, dwork);
+	tasha = spk_anc_dwork->tasha;
+	codec = tasha->codec;
+
+	snd_soc_update_bits(codec, WCD9335_CDC_RX7_RX_PATH_CFG0, 0x10, 0x10);
+}
+
+static int tasha_codec_enable_spk_anc(struct snd_soc_dapm_widget *w,
+				      struct snd_kcontrol *kcontrol,
+				      int event)
+{
+	int ret = 0;
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s %s %d %d\n", __func__, w->name, event,
+		tasha->anc_func);
+
+	if (!tasha->anc_func)
+		return 0;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		ret = tasha_codec_enable_anc(w, kcontrol, event);
+		schedule_delayed_work(&tasha->spk_anc_dwork.dwork,
+				      msecs_to_jiffies(spk_anc_en_delay));
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		cancel_delayed_work_sync(&tasha->spk_anc_dwork.dwork);
+		snd_soc_update_bits(codec, WCD9335_CDC_RX7_RX_PATH_CFG0,
+				    0x10, 0x00);
+		ret = tasha_codec_enable_anc(w, kcontrol, event);
+		break;
+	}
+	return ret;
+}
+
+static int tasha_codec_enable_ear_pa(struct snd_soc_dapm_widget *w,
+				     struct snd_kcontrol *kcontrol,
+				     int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	int ret = 0;
+
+	dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		/* 5ms sleep is required after PA is enabled as per
+		 * HW requirement
+		 */
+		usleep_range(5000, 5500);
+		snd_soc_update_bits(codec, WCD9335_CDC_RX0_RX_PATH_CTL,
+				    0x10, 0x00);
+		/* Remove mix path mute if it is enabled */
+		if ((snd_soc_read(codec, WCD9335_CDC_RX0_RX_PATH_MIX_CTL)) &
+		     0x10)
+			snd_soc_update_bits(codec,
+					    WCD9335_CDC_RX0_RX_PATH_MIX_CTL,
+					    0x10, 0x00);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		/* 5ms sleep is required after PA is disabled as per
+		 * HW requirement
+		 */
+		usleep_range(5000, 5500);
+
+		if (!(strcmp(w->name, "ANC EAR PA"))) {
+			ret = tasha_codec_enable_anc(w, kcontrol, event);
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_RX0_RX_PATH_CFG0, 0x10, 0x00);
+		}
+		break;
+	};
+
+	return ret;
+}
+
+static void tasha_codec_hph_mode_gain_opt(struct snd_soc_codec *codec,
+					  u8 gain)
+{
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	u8 hph_l_en, hph_r_en;
+	u8 l_val, r_val;
+	u8 hph_pa_status;
+	bool is_hphl_pa, is_hphr_pa;
+
+	hph_pa_status = snd_soc_read(codec, WCD9335_ANA_HPH);
+	is_hphl_pa = hph_pa_status >> 7;
+	is_hphr_pa = (hph_pa_status & 0x40) >> 6;
+
+	hph_l_en = snd_soc_read(codec, WCD9335_HPH_L_EN);
+	hph_r_en = snd_soc_read(codec, WCD9335_HPH_R_EN);
+
+	l_val = (hph_l_en & 0xC0) | 0x20 | gain;
+	r_val = (hph_r_en & 0xC0) | 0x20 | gain;
+
+	/*
+	 * Set HPH_L & HPH_R gain source selection to REGISTER
+	 * for better click and pop only if corresponding PAs are
+	 * not enabled. Also cache the values of the HPHL/R
+	 * PA gains to be applied after PAs are enabled
+	 */
+	if ((l_val != hph_l_en) && !is_hphl_pa) {
+		snd_soc_write(codec, WCD9335_HPH_L_EN, l_val);
+		tasha->hph_l_gain = hph_l_en & 0x1F;
+	}
+
+	if ((r_val != hph_r_en) && !is_hphr_pa) {
+		snd_soc_write(codec, WCD9335_HPH_R_EN, r_val);
+		tasha->hph_r_gain = hph_r_en & 0x1F;
+	}
+}
+
+static void tasha_codec_hph_lohifi_config(struct snd_soc_codec *codec,
+					  int event)
+{
+	if (SND_SOC_DAPM_EVENT_ON(event)) {
+		snd_soc_update_bits(codec, WCD9335_RX_BIAS_HPH_PA, 0x0F, 0x06);
+		snd_soc_update_bits(codec, WCD9335_RX_BIAS_HPH_RDACBUFF_CNP2,
+				    0xF0, 0x40);
+		snd_soc_update_bits(codec, WCD9335_HPH_CNP_WG_CTL, 0x07, 0x03);
+		snd_soc_update_bits(codec, WCD9335_HPH_PA_CTL2, 0x08, 0x08);
+		snd_soc_update_bits(codec, WCD9335_HPH_PA_CTL1, 0x0E, 0x0C);
+		tasha_codec_hph_mode_gain_opt(codec, 0x11);
+	}
+
+	if (SND_SOC_DAPM_EVENT_OFF(event)) {
+		snd_soc_update_bits(codec, WCD9335_HPH_PA_CTL2, 0x08, 0x00);
+		snd_soc_update_bits(codec, WCD9335_HPH_CNP_WG_CTL, 0x07, 0x02);
+		snd_soc_write(codec, WCD9335_RX_BIAS_HPH_RDACBUFF_CNP2, 0x8A);
+		snd_soc_update_bits(codec, WCD9335_RX_BIAS_HPH_PA, 0x0F, 0x0A);
+	}
+}
+
+static void tasha_codec_hph_lp_config(struct snd_soc_codec *codec,
+				      int event)
+{
+	if (SND_SOC_DAPM_EVENT_ON(event)) {
+		snd_soc_update_bits(codec, WCD9335_HPH_PA_CTL1, 0x0E, 0x0C);
+		tasha_codec_hph_mode_gain_opt(codec, 0x10);
+		snd_soc_update_bits(codec, WCD9335_HPH_CNP_WG_CTL, 0x07, 0x03);
+		snd_soc_update_bits(codec, WCD9335_HPH_PA_CTL2, 0x08, 0x08);
+		snd_soc_update_bits(codec, WCD9335_HPH_PA_CTL2, 0x04, 0x04);
+		snd_soc_update_bits(codec, WCD9335_HPH_PA_CTL2, 0x20, 0x20);
+		snd_soc_update_bits(codec, WCD9335_HPH_RDAC_LDO_CTL, 0x07,
+				    0x01);
+		snd_soc_update_bits(codec, WCD9335_HPH_RDAC_LDO_CTL, 0x70,
+				    0x10);
+		snd_soc_update_bits(codec, WCD9335_RX_BIAS_HPH_RDAC_LDO,
+				    0x0F, 0x01);
+		snd_soc_update_bits(codec, WCD9335_RX_BIAS_HPH_RDAC_LDO,
+				    0xF0, 0x10);
+	}
+
+	if (SND_SOC_DAPM_EVENT_OFF(event)) {
+		snd_soc_write(codec, WCD9335_RX_BIAS_HPH_RDAC_LDO, 0x88);
+		snd_soc_write(codec, WCD9335_HPH_RDAC_LDO_CTL, 0x33);
+		snd_soc_update_bits(codec, WCD9335_HPH_PA_CTL2, 0x20, 0x00);
+		snd_soc_update_bits(codec, WCD9335_HPH_PA_CTL2, 0x04, 0x00);
+		snd_soc_update_bits(codec, WCD9335_HPH_PA_CTL2, 0x08, 0x00);
+		snd_soc_update_bits(codec, WCD9335_HPH_CNP_WG_CTL, 0x07, 0x02);
+		snd_soc_update_bits(codec, WCD9335_HPH_R_EN, 0xC0, 0x80);
+		snd_soc_update_bits(codec, WCD9335_HPH_L_EN, 0xC0, 0x80);
+	}
+}
+
+static void tasha_codec_hph_hifi_config(struct snd_soc_codec *codec,
+					int event)
+{
+	if (SND_SOC_DAPM_EVENT_ON(event)) {
+		snd_soc_update_bits(codec, WCD9335_HPH_CNP_WG_CTL, 0x07, 0x03);
+		snd_soc_update_bits(codec, WCD9335_HPH_PA_CTL2, 0x08, 0x08);
+		snd_soc_update_bits(codec, WCD9335_HPH_PA_CTL1, 0x0E, 0x0C);
+		tasha_codec_hph_mode_gain_opt(codec, 0x11);
+	}
+
+	if (SND_SOC_DAPM_EVENT_OFF(event)) {
+		snd_soc_update_bits(codec, WCD9335_HPH_PA_CTL2, 0x08, 0x00);
+		snd_soc_update_bits(codec, WCD9335_HPH_CNP_WG_CTL, 0x07, 0x02);
+	}
+}
+
+static void tasha_codec_hph_mode_config(struct snd_soc_codec *codec,
+					int event, int mode)
+{
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	if (!TASHA_IS_2_0(tasha->wcd9xxx))
+		return;
+
+	switch (mode) {
+	case CLS_H_LP:
+		tasha_codec_hph_lp_config(codec, event);
+		break;
+	case CLS_H_LOHIFI:
+		tasha_codec_hph_lohifi_config(codec, event);
+		break;
+	case CLS_H_HIFI:
+		tasha_codec_hph_hifi_config(codec, event);
+		break;
+	}
+}
+
+static int tasha_codec_hphr_dac_event(struct snd_soc_dapm_widget *w,
+				      struct snd_kcontrol *kcontrol,
+				      int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+	int hph_mode = tasha->hph_mode;
+	u8 dem_inp;
+	int ret = 0;
+
+	dev_dbg(codec->dev, "%s wname: %s event: %d hph_mode: %d\n", __func__,
+		w->name, event, hph_mode);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		if (!(strcmp(w->name, "RX INT2 DAC"))) {
+			snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x20, 0x20);
+			snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x10, 0x10);
+		}
+		if (tasha->anc_func) {
+			ret = tasha_codec_enable_anc(w, kcontrol, event);
+			/* 40 msec delay is needed to avoid click and pop */
+			msleep(40);
+		}
+
+		/* Read DEM INP Select */
+		dem_inp = snd_soc_read(codec, WCD9335_CDC_RX2_RX_PATH_SEC0) &
+			  0x03;
+		if (((hph_mode == CLS_H_HIFI) || (hph_mode == CLS_H_LOHIFI) ||
+		     (hph_mode == CLS_H_LP)) && (dem_inp != 0x01)) {
+			dev_err(codec->dev, "%s: DEM Input not set correctly, hph_mode: %d\n",
+					__func__, hph_mode);
+			return -EINVAL;
+		}
+		wcd_clsh_fsm(codec, &tasha->clsh_d,
+			     WCD_CLSH_EVENT_PRE_DAC,
+			     WCD_CLSH_STATE_HPHR,
+			     ((hph_mode == CLS_H_LOHIFI) ?
+			       CLS_H_HIFI : hph_mode));
+
+		tasha_codec_hph_mode_config(codec, event, hph_mode);
+
+		if (tasha->anc_func)
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_RX2_RX_PATH_CFG0, 0x10, 0x10);
+
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		/* 1000us required as per HW requirement */
+		usleep_range(1000, 1100);
+		if ((hph_mode == CLS_H_LP) &&
+		   (TASHA_IS_1_1(wcd9xxx))) {
+			snd_soc_update_bits(codec, WCD9335_HPH_L_DAC_CTL,
+					    0x03, 0x03);
+		}
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		if (!(strcmp(w->name, "RX INT2 DAC")))
+			snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x30, 0x00);
+		if ((hph_mode == CLS_H_LP) &&
+		   (TASHA_IS_1_1(wcd9xxx))) {
+			snd_soc_update_bits(codec, WCD9335_HPH_L_DAC_CTL,
+					    0x03, 0x00);
+		}
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		/* 1000us required as per HW requirement */
+		usleep_range(1000, 1100);
+
+		if (!(wcd_clsh_get_clsh_state(&tasha->clsh_d) &
+		     WCD_CLSH_STATE_HPHL))
+			tasha_codec_hph_mode_config(codec, event, hph_mode);
+
+		wcd_clsh_fsm(codec, &tasha->clsh_d,
+			     WCD_CLSH_EVENT_POST_PA,
+			     WCD_CLSH_STATE_HPHR,
+			     ((hph_mode == CLS_H_LOHIFI) ?
+			       CLS_H_HIFI : hph_mode));
+		break;
+	};
+
+	return ret;
+}
+
+static int tasha_codec_hphl_dac_event(struct snd_soc_dapm_widget *w,
+				      struct snd_kcontrol *kcontrol,
+				      int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+	int hph_mode = tasha->hph_mode;
+	u8 dem_inp;
+	int ret = 0;
+	uint32_t impedl = 0, impedr = 0;
+
+	dev_dbg(codec->dev, "%s wname: %s event: %d hph_mode: %d\n", __func__,
+		w->name, event, hph_mode);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		if (tasha->anc_func) {
+			ret = tasha_codec_enable_anc(w, kcontrol, event);
+			/* 40 msec delay is needed to avoid click and pop */
+			msleep(40);
+		}
+
+		/* Read DEM INP Select */
+		dem_inp = snd_soc_read(codec, WCD9335_CDC_RX1_RX_PATH_SEC0) &
+			  0x03;
+		if (((hph_mode == CLS_H_HIFI) || (hph_mode == CLS_H_LOHIFI) ||
+		     (hph_mode == CLS_H_LP)) && (dem_inp != 0x01)) {
+			dev_err(codec->dev, "%s: DEM Input not set correctly, hph_mode: %d\n",
+					__func__, hph_mode);
+			return -EINVAL;
+		}
+		wcd_clsh_fsm(codec, &tasha->clsh_d,
+			     WCD_CLSH_EVENT_PRE_DAC,
+			     WCD_CLSH_STATE_HPHL,
+			     ((hph_mode == CLS_H_LOHIFI) ?
+			       CLS_H_HIFI : hph_mode));
+
+		if (!(strcmp(w->name, "RX INT1 DAC")))
+			snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x20, 0x20);
+		tasha_codec_hph_mode_config(codec, event, hph_mode);
+
+		if (tasha->anc_func)
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_RX1_RX_PATH_CFG0, 0x10, 0x10);
+
+		ret = wcd_mbhc_get_impedance(&tasha->mbhc,
+					&impedl, &impedr);
+		if (!ret) {
+			wcd_clsh_imped_config(codec, impedl, false);
+			set_bit(CLASSH_CONFIG, &tasha->status_mask);
+		} else {
+			dev_dbg(codec->dev, "%s: Failed to get mbhc impedance %d\n",
+						__func__, ret);
+			ret = 0;
+		}
+
+
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		/* 1000us required as per HW requirement */
+		usleep_range(1000, 1100);
+		if ((hph_mode == CLS_H_LP) &&
+		   (TASHA_IS_1_1(wcd9xxx))) {
+			snd_soc_update_bits(codec, WCD9335_HPH_L_DAC_CTL,
+					    0x03, 0x03);
+		}
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		if (!(strcmp(w->name, "RX INT1 DAC")))
+			snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x20, 0x00);
+		if ((hph_mode == CLS_H_LP) &&
+		   (TASHA_IS_1_1(wcd9xxx))) {
+			snd_soc_update_bits(codec, WCD9335_HPH_L_DAC_CTL,
+					    0x03, 0x00);
+		}
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		/* 1000us required as per HW requirement */
+		usleep_range(1000, 1100);
+
+		if (!(wcd_clsh_get_clsh_state(&tasha->clsh_d) &
+		     WCD_CLSH_STATE_HPHR))
+			tasha_codec_hph_mode_config(codec, event, hph_mode);
+		wcd_clsh_fsm(codec, &tasha->clsh_d,
+			     WCD_CLSH_EVENT_POST_PA,
+			     WCD_CLSH_STATE_HPHL,
+			     ((hph_mode == CLS_H_LOHIFI) ?
+			       CLS_H_HIFI : hph_mode));
+
+		if (test_bit(CLASSH_CONFIG, &tasha->status_mask)) {
+			wcd_clsh_imped_config(codec, impedl, true);
+			clear_bit(CLASSH_CONFIG, &tasha->status_mask);
+		} else
+			dev_dbg(codec->dev, "%s: Failed to get mbhc impedance %d\n",
+						__func__, ret);
+
+
+		break;
+	};
+
+	return ret;
+}
+
+static int tasha_codec_lineout_dac_event(struct snd_soc_dapm_widget *w,
+					 struct snd_kcontrol *kcontrol,
+					 int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	int ret = 0;
+
+	dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		if (tasha->anc_func &&
+			(!strcmp(w->name, "RX INT3 DAC") ||
+				!strcmp(w->name, "RX INT4 DAC")))
+			ret = tasha_codec_enable_anc(w, kcontrol, event);
+
+		wcd_clsh_fsm(codec, &tasha->clsh_d,
+			     WCD_CLSH_EVENT_PRE_DAC,
+			     WCD_CLSH_STATE_LO,
+			     CLS_AB);
+
+		if (tasha->anc_func) {
+			if (!strcmp(w->name, "RX INT3 DAC"))
+				snd_soc_update_bits(codec,
+				WCD9335_CDC_RX3_RX_PATH_CFG0, 0x10, 0x10);
+			else if (!strcmp(w->name, "RX INT4 DAC"))
+				snd_soc_update_bits(codec,
+				WCD9335_CDC_RX4_RX_PATH_CFG0, 0x10, 0x10);
+		}
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		wcd_clsh_fsm(codec, &tasha->clsh_d,
+			     WCD_CLSH_EVENT_POST_PA,
+			     WCD_CLSH_STATE_LO,
+			     CLS_AB);
+		break;
+	}
+
+	return 0;
+}
+
+static const struct snd_soc_dapm_widget tasha_dapm_i2s_widgets[] = {
+	SND_SOC_DAPM_SUPPLY("RX_I2S_CTL", WCD9335_DATA_HUB_DATA_HUB_RX_I2S_CTL,
+	0, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("TX_I2S_CTL", WCD9335_DATA_HUB_DATA_HUB_TX_I2S_CTL,
+	0, 0, NULL, 0),
+};
+
+static int tasha_codec_ear_dac_event(struct snd_soc_dapm_widget *w,
+				     struct snd_kcontrol *kcontrol,
+				     int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	int ret = 0;
+
+	dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		if (tasha->anc_func)
+			ret = tasha_codec_enable_anc(w, kcontrol, event);
+
+		wcd_clsh_fsm(codec, &tasha->clsh_d,
+			     WCD_CLSH_EVENT_PRE_DAC,
+			     WCD_CLSH_STATE_EAR,
+			     CLS_H_NORMAL);
+		if (tasha->anc_func)
+			snd_soc_update_bits(codec,
+				WCD9335_CDC_RX0_RX_PATH_CFG0, 0x10, 0x10);
+
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		wcd_clsh_fsm(codec, &tasha->clsh_d,
+			     WCD_CLSH_EVENT_POST_PA,
+			     WCD_CLSH_STATE_EAR,
+			     CLS_H_NORMAL);
+		break;
+	};
+
+	return ret;
+}
+
+static int tasha_codec_spk_boost_event(struct snd_soc_dapm_widget *w,
+				struct snd_kcontrol *kcontrol,
+				int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	u16 boost_path_ctl, boost_path_cfg1;
+	u16 reg, reg_mix;
+
+	dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
+
+	if (!strcmp(w->name, "RX INT7 CHAIN")) {
+		boost_path_ctl = WCD9335_CDC_BOOST0_BOOST_PATH_CTL;
+		boost_path_cfg1 = WCD9335_CDC_RX7_RX_PATH_CFG1;
+		reg = WCD9335_CDC_RX7_RX_PATH_CTL;
+		reg_mix = WCD9335_CDC_RX7_RX_PATH_MIX_CTL;
+	} else if (!strcmp(w->name, "RX INT8 CHAIN")) {
+		boost_path_ctl = WCD9335_CDC_BOOST1_BOOST_PATH_CTL;
+		boost_path_cfg1 = WCD9335_CDC_RX8_RX_PATH_CFG1;
+		reg = WCD9335_CDC_RX8_RX_PATH_CTL;
+		reg_mix = WCD9335_CDC_RX8_RX_PATH_MIX_CTL;
+	} else {
+		dev_err(codec->dev, "%s: unknown widget: %s\n",
+			__func__, w->name);
+		return -EINVAL;
+	}
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_update_bits(codec, boost_path_ctl, 0x10, 0x10);
+		snd_soc_update_bits(codec, boost_path_cfg1, 0x01, 0x01);
+		snd_soc_update_bits(codec, reg, 0x10, 0x00);
+		if ((snd_soc_read(codec, reg_mix)) & 0x10)
+			snd_soc_update_bits(codec, reg_mix, 0x10, 0x00);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_update_bits(codec, boost_path_cfg1, 0x01, 0x00);
+		snd_soc_update_bits(codec, boost_path_ctl, 0x10, 0x00);
+		break;
+	};
+
+	return 0;
+}
+
+static u16 tasha_interp_get_primary_reg(u16 reg, u16 *ind)
+{
+	u16 prim_int_reg = 0;
+
+	switch (reg) {
+	case WCD9335_CDC_RX0_RX_PATH_CTL:
+	case WCD9335_CDC_RX0_RX_PATH_MIX_CTL:
+		prim_int_reg = WCD9335_CDC_RX0_RX_PATH_CTL;
+		*ind = 0;
+		break;
+	case WCD9335_CDC_RX1_RX_PATH_CTL:
+	case WCD9335_CDC_RX1_RX_PATH_MIX_CTL:
+		prim_int_reg = WCD9335_CDC_RX1_RX_PATH_CTL;
+		*ind = 1;
+		break;
+	case WCD9335_CDC_RX2_RX_PATH_CTL:
+	case WCD9335_CDC_RX2_RX_PATH_MIX_CTL:
+		prim_int_reg = WCD9335_CDC_RX2_RX_PATH_CTL;
+		*ind = 2;
+		break;
+	case WCD9335_CDC_RX3_RX_PATH_CTL:
+	case WCD9335_CDC_RX3_RX_PATH_MIX_CTL:
+		prim_int_reg = WCD9335_CDC_RX3_RX_PATH_CTL;
+		*ind = 3;
+		break;
+	case WCD9335_CDC_RX4_RX_PATH_CTL:
+	case WCD9335_CDC_RX4_RX_PATH_MIX_CTL:
+		prim_int_reg = WCD9335_CDC_RX4_RX_PATH_CTL;
+		*ind = 4;
+		break;
+	case WCD9335_CDC_RX5_RX_PATH_CTL:
+	case WCD9335_CDC_RX5_RX_PATH_MIX_CTL:
+		prim_int_reg = WCD9335_CDC_RX5_RX_PATH_CTL;
+		*ind = 5;
+		break;
+	case WCD9335_CDC_RX6_RX_PATH_CTL:
+	case WCD9335_CDC_RX6_RX_PATH_MIX_CTL:
+		prim_int_reg = WCD9335_CDC_RX6_RX_PATH_CTL;
+		*ind = 6;
+		break;
+	case WCD9335_CDC_RX7_RX_PATH_CTL:
+	case WCD9335_CDC_RX7_RX_PATH_MIX_CTL:
+		prim_int_reg = WCD9335_CDC_RX7_RX_PATH_CTL;
+		*ind = 7;
+		break;
+	case WCD9335_CDC_RX8_RX_PATH_CTL:
+	case WCD9335_CDC_RX8_RX_PATH_MIX_CTL:
+		prim_int_reg = WCD9335_CDC_RX8_RX_PATH_CTL;
+		*ind = 8;
+		break;
+	};
+
+	return prim_int_reg;
+}
+
+static void tasha_codec_hd2_control(struct snd_soc_codec *codec,
+				    u16 prim_int_reg, int event)
+{
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	u16 hd2_scale_reg;
+	u16 hd2_enable_reg = 0;
+
+	if (!TASHA_IS_2_0(tasha->wcd9xxx))
+		return;
+
+	if (prim_int_reg == WCD9335_CDC_RX1_RX_PATH_CTL) {
+		hd2_scale_reg = WCD9335_CDC_RX1_RX_PATH_SEC3;
+		hd2_enable_reg = WCD9335_CDC_RX1_RX_PATH_CFG0;
+	}
+	if (prim_int_reg == WCD9335_CDC_RX2_RX_PATH_CTL) {
+		hd2_scale_reg = WCD9335_CDC_RX2_RX_PATH_SEC3;
+		hd2_enable_reg = WCD9335_CDC_RX2_RX_PATH_CFG0;
+	}
+
+	if (hd2_enable_reg && SND_SOC_DAPM_EVENT_ON(event)) {
+		snd_soc_update_bits(codec, hd2_scale_reg, 0x3C, 0x10);
+		snd_soc_update_bits(codec, hd2_scale_reg, 0x03, 0x01);
+		snd_soc_update_bits(codec, hd2_enable_reg, 0x04, 0x04);
+	}
+
+	if (hd2_enable_reg && SND_SOC_DAPM_EVENT_OFF(event)) {
+		snd_soc_update_bits(codec, hd2_enable_reg, 0x04, 0x00);
+		snd_soc_update_bits(codec, hd2_scale_reg, 0x03, 0x00);
+		snd_soc_update_bits(codec, hd2_scale_reg, 0x3C, 0x00);
+	}
+}
+
+static int tasha_codec_enable_prim_interpolator(
+				struct snd_soc_codec *codec,
+				u16 reg, int event)
+{
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	u16 prim_int_reg;
+	u16 ind = 0;
+
+	prim_int_reg = tasha_interp_get_primary_reg(reg, &ind);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		tasha->prim_int_users[ind]++;
+		if (tasha->prim_int_users[ind] == 1) {
+			snd_soc_update_bits(codec, prim_int_reg,
+					    0x10, 0x10);
+			tasha_codec_hd2_control(codec, prim_int_reg, event);
+			snd_soc_update_bits(codec, prim_int_reg,
+					    1 << 0x5, 1 << 0x5);
+		}
+		if ((reg != prim_int_reg) &&
+		    ((snd_soc_read(codec, prim_int_reg)) & 0x10))
+			snd_soc_update_bits(codec, reg, 0x10, 0x10);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		tasha->prim_int_users[ind]--;
+		if (tasha->prim_int_users[ind] == 0) {
+			snd_soc_update_bits(codec, prim_int_reg,
+					1 << 0x5, 0 << 0x5);
+			snd_soc_update_bits(codec, prim_int_reg,
+					0x40, 0x40);
+			snd_soc_update_bits(codec, prim_int_reg,
+					0x40, 0x00);
+			tasha_codec_hd2_control(codec, prim_int_reg, event);
+		}
+		break;
+	};
+
+	dev_dbg(codec->dev, "%s: primary interpolator: INT%d, users: %d\n",
+		__func__, ind, tasha->prim_int_users[ind]);
+	return 0;
+}
+
+static int tasha_codec_enable_spline_src(struct snd_soc_codec *codec,
+					 int src_num,
+					 int event)
+{
+	u16 src_paired_reg = 0;
+	struct tasha_priv *tasha;
+	u16 rx_path_cfg_reg = WCD9335_CDC_RX1_RX_PATH_CFG0;
+	u16 rx_path_ctl_reg = WCD9335_CDC_RX1_RX_PATH_CTL;
+	int *src_users, count, spl_src = SPLINE_SRC0;
+	u16 src_clk_reg = WCD9335_SPLINE_SRC0_CLK_RST_CTL_0;
+
+	tasha = snd_soc_codec_get_drvdata(codec);
+
+	switch (src_num) {
+	case SRC_IN_HPHL:
+		rx_path_cfg_reg = WCD9335_CDC_RX1_RX_PATH_CFG0;
+		src_clk_reg = WCD9335_SPLINE_SRC0_CLK_RST_CTL_0;
+		src_paired_reg = WCD9335_SPLINE_SRC1_CLK_RST_CTL_0;
+		rx_path_ctl_reg = WCD9335_CDC_RX1_RX_PATH_CTL;
+		spl_src = SPLINE_SRC0;
+		break;
+	case SRC_IN_LO1:
+		rx_path_cfg_reg = WCD9335_CDC_RX3_RX_PATH_CFG0;
+		src_clk_reg = WCD9335_SPLINE_SRC0_CLK_RST_CTL_0;
+		src_paired_reg = WCD9335_SPLINE_SRC1_CLK_RST_CTL_0;
+		rx_path_ctl_reg = WCD9335_CDC_RX3_RX_PATH_CTL;
+		spl_src = SPLINE_SRC0;
+		break;
+	case SRC_IN_HPHR:
+		rx_path_cfg_reg = WCD9335_CDC_RX2_RX_PATH_CFG0;
+		src_clk_reg = WCD9335_SPLINE_SRC1_CLK_RST_CTL_0;
+		src_paired_reg = WCD9335_SPLINE_SRC0_CLK_RST_CTL_0;
+		rx_path_ctl_reg = WCD9335_CDC_RX2_RX_PATH_CTL;
+		spl_src = SPLINE_SRC1;
+		break;
+	case SRC_IN_LO2:
+		rx_path_cfg_reg = WCD9335_CDC_RX4_RX_PATH_CFG0;
+		src_clk_reg = WCD9335_SPLINE_SRC1_CLK_RST_CTL_0;
+		src_paired_reg = WCD9335_SPLINE_SRC0_CLK_RST_CTL_0;
+		rx_path_ctl_reg = WCD9335_CDC_RX4_RX_PATH_CTL;
+		spl_src = SPLINE_SRC1;
+		break;
+	case SRC_IN_SPKRL:
+		rx_path_cfg_reg = WCD9335_CDC_RX7_RX_PATH_CFG0;
+		src_clk_reg = WCD9335_SPLINE_SRC2_CLK_RST_CTL_0;
+		src_paired_reg = WCD9335_SPLINE_SRC3_CLK_RST_CTL_0;
+		rx_path_ctl_reg = WCD9335_CDC_RX7_RX_PATH_CTL;
+		spl_src = SPLINE_SRC2;
+		break;
+	case SRC_IN_LO3:
+		rx_path_cfg_reg = WCD9335_CDC_RX5_RX_PATH_CFG0;
+		src_clk_reg = WCD9335_SPLINE_SRC2_CLK_RST_CTL_0;
+		src_paired_reg = WCD9335_SPLINE_SRC3_CLK_RST_CTL_0;
+		rx_path_ctl_reg = WCD9335_CDC_RX5_RX_PATH_CTL;
+		spl_src = SPLINE_SRC2;
+		break;
+	case SRC_IN_SPKRR:
+		rx_path_cfg_reg = WCD9335_CDC_RX8_RX_PATH_CFG0;
+		src_clk_reg = WCD9335_SPLINE_SRC3_CLK_RST_CTL_0;
+		src_paired_reg = WCD9335_SPLINE_SRC2_CLK_RST_CTL_0;
+		rx_path_ctl_reg = WCD9335_CDC_RX8_RX_PATH_CTL;
+		spl_src = SPLINE_SRC3;
+		break;
+	case SRC_IN_LO4:
+		rx_path_cfg_reg = WCD9335_CDC_RX6_RX_PATH_CFG0;
+		src_clk_reg = WCD9335_SPLINE_SRC3_CLK_RST_CTL_0;
+		src_paired_reg = WCD9335_SPLINE_SRC2_CLK_RST_CTL_0;
+		rx_path_ctl_reg = WCD9335_CDC_RX6_RX_PATH_CTL;
+		spl_src = SPLINE_SRC3;
+		break;
+	};
+
+	src_users = &tasha->spl_src_users[spl_src];
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		count = *src_users;
+		count++;
+		if (count == 1) {
+			if ((snd_soc_read(codec, src_clk_reg) & 0x02) ||
+			    (snd_soc_read(codec, src_paired_reg) & 0x02)) {
+				snd_soc_update_bits(codec, src_clk_reg, 0x02,
+						    0x00);
+				snd_soc_update_bits(codec, src_paired_reg,
+						    0x02, 0x00);
+			}
+			snd_soc_update_bits(codec, src_clk_reg,	0x01, 0x01);
+			snd_soc_update_bits(codec, rx_path_cfg_reg, 0x80,
+					    0x80);
+		}
+		*src_users = count;
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		count = *src_users;
+		count--;
+		if (count == 0) {
+			snd_soc_update_bits(codec, rx_path_cfg_reg, 0x80,
+					    0x00);
+			snd_soc_update_bits(codec, src_clk_reg, 0x03, 0x02);
+			/* default sample rate */
+			snd_soc_update_bits(codec, rx_path_ctl_reg, 0x0f,
+					    0x04);
+		}
+		*src_users = count;
+		break;
+	};
+
+	dev_dbg(codec->dev, "%s: Spline SRC%d, users: %d\n",
+		__func__, spl_src, *src_users);
+	return 0;
+}
+
+static int tasha_codec_enable_spline_resampler(struct snd_soc_dapm_widget *w,
+				struct snd_kcontrol *kcontrol,
+				int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	int ret = 0;
+	u8 src_in;
+
+	src_in = snd_soc_read(codec, WCD9335_CDC_RX_INP_MUX_SPLINE_SRC_CFG0);
+	if (!(src_in & 0xFF)) {
+		dev_err(codec->dev, "%s: Spline SRC%u input not selected\n",
+			__func__, w->shift);
+		return -EINVAL;
+	}
+
+	switch (w->shift) {
+	case SPLINE_SRC0:
+		ret = tasha_codec_enable_spline_src(codec,
+			((src_in & 0x03) == 1) ? SRC_IN_HPHL : SRC_IN_LO1,
+			event);
+		break;
+	case SPLINE_SRC1:
+		ret = tasha_codec_enable_spline_src(codec,
+			((src_in & 0x0C) == 4) ? SRC_IN_HPHR : SRC_IN_LO2,
+			event);
+		break;
+	case SPLINE_SRC2:
+		ret = tasha_codec_enable_spline_src(codec,
+			((src_in & 0x30) == 0x10) ? SRC_IN_LO3 : SRC_IN_SPKRL,
+			event);
+		break;
+	case SPLINE_SRC3:
+		ret = tasha_codec_enable_spline_src(codec,
+			((src_in & 0xC0) == 0x40) ? SRC_IN_LO4 : SRC_IN_SPKRR,
+			event);
+		break;
+	default:
+		dev_err(codec->dev, "%s: Invalid spline src:%u\n", __func__,
+			w->shift);
+		ret = -EINVAL;
+	};
+
+	return ret;
+}
+
+static int tasha_codec_enable_swr(struct snd_soc_dapm_widget *w,
+		struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tasha_priv *tasha;
+	int i, ch_cnt;
+
+	tasha = snd_soc_codec_get_drvdata(codec);
+
+	if (!tasha->nr)
+		return 0;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		if ((strnstr(w->name, "INT7_", sizeof("RX INT7_"))) &&
+		    !tasha->rx_7_count)
+			tasha->rx_7_count++;
+		if ((strnstr(w->name, "INT8_", sizeof("RX INT8_"))) &&
+		    !tasha->rx_8_count)
+			tasha->rx_8_count++;
+		ch_cnt = tasha->rx_7_count + tasha->rx_8_count;
+
+		for (i = 0; i < tasha->nr; i++) {
+			swrm_wcd_notify(tasha->swr_ctrl_data[i].swr_pdev,
+					SWR_DEVICE_UP, NULL);
+			swrm_wcd_notify(tasha->swr_ctrl_data[i].swr_pdev,
+					SWR_SET_NUM_RX_CH, &ch_cnt);
+		}
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		if ((strnstr(w->name, "INT7_", sizeof("RX INT7_"))) &&
+		    tasha->rx_7_count)
+			tasha->rx_7_count--;
+		if ((strnstr(w->name, "INT8_", sizeof("RX INT8_"))) &&
+		    tasha->rx_8_count)
+			tasha->rx_8_count--;
+		ch_cnt = tasha->rx_7_count + tasha->rx_8_count;
+
+		for (i = 0; i < tasha->nr; i++)
+			swrm_wcd_notify(tasha->swr_ctrl_data[i].swr_pdev,
+					SWR_SET_NUM_RX_CH, &ch_cnt);
+
+		break;
+	}
+	dev_dbg(tasha->dev, "%s: current swr ch cnt: %d\n",
+		__func__, tasha->rx_7_count + tasha->rx_8_count);
+
+	return 0;
+}
+
+static int tasha_codec_config_ear_spkr_gain(struct snd_soc_codec *codec,
+					    int event, int gain_reg)
+{
+	int comp_gain_offset, val;
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	switch (tasha->spkr_mode) {
+	/* Compander gain in SPKR_MODE1 case is 12 dB */
+	case SPKR_MODE_1:
+		comp_gain_offset = -12;
+		break;
+	/* Default case compander gain is 15 dB */
+	default:
+		comp_gain_offset = -15;
+		break;
+	}
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		/* Apply ear spkr gain only if compander is enabled */
+		if (tasha->comp_enabled[COMPANDER_7] &&
+		    (gain_reg == WCD9335_CDC_RX7_RX_VOL_CTL ||
+		     gain_reg == WCD9335_CDC_RX7_RX_VOL_MIX_CTL) &&
+		    (tasha->ear_spkr_gain != 0)) {
+			/* For example, val is -8(-12+5-1) for 4dB of gain */
+			val = comp_gain_offset + tasha->ear_spkr_gain - 1;
+			snd_soc_write(codec, gain_reg, val);
+
+			dev_dbg(codec->dev, "%s: RX7 Volume %d dB\n",
+				__func__, val);
+		}
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		/*
+		 * Reset RX7 volume to 0 dB if compander is enabled and
+		 * ear_spkr_gain is non-zero.
+		 */
+		if (tasha->comp_enabled[COMPANDER_7] &&
+		    (gain_reg == WCD9335_CDC_RX7_RX_VOL_CTL ||
+		     gain_reg == WCD9335_CDC_RX7_RX_VOL_MIX_CTL) &&
+		    (tasha->ear_spkr_gain != 0)) {
+			snd_soc_write(codec, gain_reg, 0x0);
+
+			dev_dbg(codec->dev, "%s: Reset RX7 Volume to 0 dB\n",
+				__func__);
+		}
+		break;
+	}
+
+	return 0;
+}
+
+static int tasha_codec_enable_mix_path(struct snd_soc_dapm_widget *w,
+		struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	u16 gain_reg;
+	int offset_val = 0;
+	int val = 0;
+
+	dev_dbg(codec->dev, "%s %d %s\n", __func__, event, w->name);
+
+	switch (w->reg) {
+	case WCD9335_CDC_RX0_RX_PATH_MIX_CTL:
+		gain_reg = WCD9335_CDC_RX0_RX_VOL_MIX_CTL;
+		break;
+	case WCD9335_CDC_RX1_RX_PATH_MIX_CTL:
+		gain_reg = WCD9335_CDC_RX1_RX_VOL_MIX_CTL;
+		break;
+	case WCD9335_CDC_RX2_RX_PATH_MIX_CTL:
+		gain_reg = WCD9335_CDC_RX2_RX_VOL_MIX_CTL;
+		break;
+	case WCD9335_CDC_RX3_RX_PATH_MIX_CTL:
+		gain_reg = WCD9335_CDC_RX3_RX_VOL_MIX_CTL;
+		break;
+	case WCD9335_CDC_RX4_RX_PATH_MIX_CTL:
+		gain_reg = WCD9335_CDC_RX4_RX_VOL_MIX_CTL;
+		break;
+	case WCD9335_CDC_RX5_RX_PATH_MIX_CTL:
+		gain_reg = WCD9335_CDC_RX5_RX_VOL_MIX_CTL;
+		break;
+	case WCD9335_CDC_RX6_RX_PATH_MIX_CTL:
+		gain_reg = WCD9335_CDC_RX6_RX_VOL_MIX_CTL;
+		break;
+	case WCD9335_CDC_RX7_RX_PATH_MIX_CTL:
+		gain_reg = WCD9335_CDC_RX7_RX_VOL_MIX_CTL;
+		break;
+	case WCD9335_CDC_RX8_RX_PATH_MIX_CTL:
+		gain_reg = WCD9335_CDC_RX8_RX_VOL_MIX_CTL;
+		break;
+	default:
+		dev_err(codec->dev, "%s: No gain register avail for %s\n",
+			__func__, w->name);
+		return 0;
+	};
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		if ((tasha->spkr_gain_offset == RX_GAIN_OFFSET_M1P5_DB) &&
+		    (tasha->comp_enabled[COMPANDER_7] ||
+		     tasha->comp_enabled[COMPANDER_8]) &&
+		    (gain_reg == WCD9335_CDC_RX7_RX_VOL_MIX_CTL ||
+		     gain_reg == WCD9335_CDC_RX8_RX_VOL_MIX_CTL)) {
+			snd_soc_update_bits(codec, WCD9335_CDC_RX7_RX_PATH_SEC1,
+					    0x01, 0x01);
+			snd_soc_update_bits(codec,
+					    WCD9335_CDC_RX7_RX_PATH_MIX_SEC0,
+					    0x01, 0x01);
+			snd_soc_update_bits(codec, WCD9335_CDC_RX8_RX_PATH_SEC1,
+					    0x01, 0x01);
+			snd_soc_update_bits(codec,
+					    WCD9335_CDC_RX8_RX_PATH_MIX_SEC0,
+					    0x01, 0x01);
+			offset_val = -2;
+		}
+		val = snd_soc_read(codec, gain_reg);
+		val += offset_val;
+		snd_soc_write(codec, gain_reg, val);
+		tasha_codec_config_ear_spkr_gain(codec, event, gain_reg);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		if ((tasha->spkr_gain_offset == RX_GAIN_OFFSET_M1P5_DB) &&
+		    (tasha->comp_enabled[COMPANDER_7] ||
+		     tasha->comp_enabled[COMPANDER_8]) &&
+		    (gain_reg == WCD9335_CDC_RX7_RX_VOL_MIX_CTL ||
+		     gain_reg == WCD9335_CDC_RX8_RX_VOL_MIX_CTL)) {
+			snd_soc_update_bits(codec, WCD9335_CDC_RX7_RX_PATH_SEC1,
+					    0x01, 0x00);
+			snd_soc_update_bits(codec,
+					    WCD9335_CDC_RX7_RX_PATH_MIX_SEC0,
+					    0x01, 0x00);
+			snd_soc_update_bits(codec, WCD9335_CDC_RX8_RX_PATH_SEC1,
+					    0x01, 0x00);
+			snd_soc_update_bits(codec,
+					    WCD9335_CDC_RX8_RX_PATH_MIX_SEC0,
+					    0x01, 0x00);
+			offset_val = 2;
+			val = snd_soc_read(codec, gain_reg);
+			val += offset_val;
+			snd_soc_write(codec, gain_reg, val);
+		}
+		tasha_codec_config_ear_spkr_gain(codec, event, gain_reg);
+		break;
+	};
+
+	return 0;
+}
+
+static int __tasha_cdc_native_clk_enable(struct tasha_priv *tasha,
+					 bool enable)
+{
+	int ret = 0;
+	struct snd_soc_codec *codec = tasha->codec;
+
+	if (!tasha->wcd_native_clk) {
+		dev_err(tasha->dev, "%s: wcd native clock is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	dev_dbg(tasha->dev, "%s: native_clk_enable = %u\n", __func__, enable);
+
+	if (enable) {
+		ret = clk_prepare_enable(tasha->wcd_native_clk);
+		if (ret) {
+			dev_err(tasha->dev, "%s: native clk enable failed\n",
+				__func__);
+			goto err;
+		}
+		if (++tasha->native_clk_users == 1) {
+			snd_soc_update_bits(codec, WCD9335_CLOCK_TEST_CTL,
+					    0x10, 0x10);
+			snd_soc_update_bits(codec, WCD9335_CLOCK_TEST_CTL,
+					    0x80, 0x80);
+			snd_soc_update_bits(codec, WCD9335_CODEC_RPM_CLK_GATE,
+					    0x04, 0x00);
+			snd_soc_update_bits(codec,
+					WCD9335_CDC_CLK_RST_CTRL_MCLK_CONTROL,
+					0x02, 0x02);
+		}
+	} else {
+		if (tasha->native_clk_users &&
+		    (--tasha->native_clk_users == 0)) {
+			snd_soc_update_bits(codec,
+					WCD9335_CDC_CLK_RST_CTRL_MCLK_CONTROL,
+					0x02, 0x00);
+			snd_soc_update_bits(codec, WCD9335_CODEC_RPM_CLK_GATE,
+					    0x04, 0x04);
+			snd_soc_update_bits(codec, WCD9335_CLOCK_TEST_CTL,
+					    0x80, 0x00);
+			snd_soc_update_bits(codec, WCD9335_CLOCK_TEST_CTL,
+					    0x10, 0x00);
+		}
+		clk_disable_unprepare(tasha->wcd_native_clk);
+	}
+
+	dev_dbg(codec->dev, "%s: native_clk_users: %d\n", __func__,
+		tasha->native_clk_users);
+err:
+	return ret;
+}
+
+static int tasha_codec_get_native_fifo_sync_mask(struct snd_soc_codec *codec,
+						 int interp_n)
+{
+	int mask = 0;
+	u16 reg;
+	u8 val1, val2, inp0 = 0;
+	u8 inp1 = 0, inp2 = 0;
+
+	reg = WCD9335_CDC_RX_INP_MUX_RX_INT1_CFG0 + (2 * interp_n) - 2;
+
+	val1 = snd_soc_read(codec, reg);
+	val2 = snd_soc_read(codec, reg + 1);
+
+	inp0 = val1 & 0x0F;
+	inp1 = (val1 >> 4) & 0x0F;
+	inp2 = (val2 >> 4) & 0x0F;
+
+	if (IS_VALID_NATIVE_FIFO_PORT(inp0))
+		mask |= (1 << (inp0 - 5));
+	if (IS_VALID_NATIVE_FIFO_PORT(inp1))
+		mask |= (1 << (inp1 - 5));
+	if (IS_VALID_NATIVE_FIFO_PORT(inp2))
+		mask |= (1 << (inp2 - 5));
+
+	dev_dbg(codec->dev, "%s: native fifo mask: 0x%x\n", __func__, mask);
+	if (!mask)
+		dev_err(codec->dev, "native fifo err,int:%d,inp0:%d,inp1:%d,inp2:%d\n",
+			interp_n, inp0, inp1, inp2);
+	return mask;
+}
+
+static int tasha_enable_native_supply(struct snd_soc_dapm_widget *w,
+				      struct snd_kcontrol *kcontrol, int event)
+{
+	int mask;
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	u16 interp_reg;
+
+	dev_dbg(codec->dev, "%s: event: %d, shift:%d\n", __func__, event,
+		w->shift);
+
+	if (w->shift < INTERP_HPHL || w->shift > INTERP_LO2)
+		return -EINVAL;
+
+	interp_reg = WCD9335_CDC_RX1_RX_PATH_CTL + 20 * (w->shift - 1);
+
+	mask = tasha_codec_get_native_fifo_sync_mask(codec, w->shift);
+	if (!mask)
+		return -EINVAL;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		/* Adjust interpolator rate to 44P1_NATIVE */
+		snd_soc_update_bits(codec, interp_reg, 0x0F, 0x09);
+		__tasha_cdc_native_clk_enable(tasha, true);
+		snd_soc_update_bits(codec, WCD9335_DATA_HUB_NATIVE_FIFO_SYNC,
+				    mask, mask);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		snd_soc_update_bits(codec, WCD9335_DATA_HUB_NATIVE_FIFO_SYNC,
+				    mask, 0x0);
+		__tasha_cdc_native_clk_enable(tasha, false);
+		/* Adjust interpolator rate to default */
+		snd_soc_update_bits(codec, interp_reg, 0x0F, 0x04);
+		break;
+	}
+
+	return 0;
+}
+
+static int tasha_codec_enable_interpolator(struct snd_soc_dapm_widget *w,
+		struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	u16 gain_reg;
+	u16 reg;
+	int val;
+	int offset_val = 0;
+
+	dev_dbg(codec->dev, "%s %d %s\n", __func__, event, w->name);
+
+	if (!(strcmp(w->name, "RX INT0 INTERP"))) {
+		reg = WCD9335_CDC_RX0_RX_PATH_CTL;
+		gain_reg = WCD9335_CDC_RX0_RX_VOL_CTL;
+	} else if (!(strcmp(w->name, "RX INT1 INTERP"))) {
+		reg = WCD9335_CDC_RX1_RX_PATH_CTL;
+		gain_reg = WCD9335_CDC_RX1_RX_VOL_CTL;
+	} else if (!(strcmp(w->name, "RX INT2 INTERP"))) {
+		reg = WCD9335_CDC_RX2_RX_PATH_CTL;
+		gain_reg = WCD9335_CDC_RX2_RX_VOL_CTL;
+	} else if (!(strcmp(w->name, "RX INT3 INTERP"))) {
+		reg = WCD9335_CDC_RX3_RX_PATH_CTL;
+		gain_reg = WCD9335_CDC_RX3_RX_VOL_CTL;
+	} else if (!(strcmp(w->name, "RX INT4 INTERP"))) {
+		reg = WCD9335_CDC_RX4_RX_PATH_CTL;
+		gain_reg = WCD9335_CDC_RX4_RX_VOL_CTL;
+	} else if (!(strcmp(w->name, "RX INT5 INTERP"))) {
+		reg = WCD9335_CDC_RX5_RX_PATH_CTL;
+		gain_reg = WCD9335_CDC_RX5_RX_VOL_CTL;
+	} else if (!(strcmp(w->name, "RX INT6 INTERP"))) {
+		reg = WCD9335_CDC_RX6_RX_PATH_CTL;
+		gain_reg = WCD9335_CDC_RX6_RX_VOL_CTL;
+	} else if (!(strcmp(w->name, "RX INT7 INTERP"))) {
+		reg = WCD9335_CDC_RX7_RX_PATH_CTL;
+		gain_reg = WCD9335_CDC_RX7_RX_VOL_CTL;
+	} else if (!(strcmp(w->name, "RX INT8 INTERP"))) {
+		reg = WCD9335_CDC_RX8_RX_PATH_CTL;
+		gain_reg = WCD9335_CDC_RX8_RX_VOL_CTL;
+	} else {
+		dev_err(codec->dev, "%s: Interpolator reg not found\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		tasha_codec_vote_max_bw(codec, true);
+		/* Reset if needed */
+		tasha_codec_enable_prim_interpolator(codec, reg, event);
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		tasha_config_compander(codec, w->shift, event);
+		/* apply gain after int clk is enabled */
+		if ((tasha->spkr_gain_offset == RX_GAIN_OFFSET_M1P5_DB) &&
+		    (tasha->comp_enabled[COMPANDER_7] ||
+		     tasha->comp_enabled[COMPANDER_8]) &&
+		    (gain_reg == WCD9335_CDC_RX7_RX_VOL_CTL ||
+		     gain_reg == WCD9335_CDC_RX8_RX_VOL_CTL)) {
+			snd_soc_update_bits(codec, WCD9335_CDC_RX7_RX_PATH_SEC1,
+					    0x01, 0x01);
+			snd_soc_update_bits(codec,
+					    WCD9335_CDC_RX7_RX_PATH_MIX_SEC0,
+					    0x01, 0x01);
+			snd_soc_update_bits(codec, WCD9335_CDC_RX8_RX_PATH_SEC1,
+					    0x01, 0x01);
+			snd_soc_update_bits(codec,
+					    WCD9335_CDC_RX8_RX_PATH_MIX_SEC0,
+					    0x01, 0x01);
+			offset_val = -2;
+		}
+		val = snd_soc_read(codec, gain_reg);
+		val += offset_val;
+		snd_soc_write(codec, gain_reg, val);
+		tasha_codec_config_ear_spkr_gain(codec, event, gain_reg);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		tasha_config_compander(codec, w->shift, event);
+		tasha_codec_enable_prim_interpolator(codec, reg, event);
+		if ((tasha->spkr_gain_offset == RX_GAIN_OFFSET_M1P5_DB) &&
+		    (tasha->comp_enabled[COMPANDER_7] ||
+		     tasha->comp_enabled[COMPANDER_8]) &&
+		    (gain_reg == WCD9335_CDC_RX7_RX_VOL_CTL ||
+		     gain_reg == WCD9335_CDC_RX8_RX_VOL_CTL)) {
+			snd_soc_update_bits(codec, WCD9335_CDC_RX7_RX_PATH_SEC1,
+					    0x01, 0x00);
+			snd_soc_update_bits(codec,
+					    WCD9335_CDC_RX7_RX_PATH_MIX_SEC0,
+					    0x01, 0x00);
+			snd_soc_update_bits(codec, WCD9335_CDC_RX8_RX_PATH_SEC1,
+					    0x01, 0x00);
+			snd_soc_update_bits(codec,
+					    WCD9335_CDC_RX8_RX_PATH_MIX_SEC0,
+					    0x01, 0x00);
+			offset_val = 2;
+			val = snd_soc_read(codec, gain_reg);
+			val += offset_val;
+			snd_soc_write(codec, gain_reg, val);
+		}
+		tasha_codec_config_ear_spkr_gain(codec, event, gain_reg);
+		break;
+	};
+
+	return 0;
+}
+
+static int tasha_codec_set_iir_gain(struct snd_soc_dapm_widget *w,
+				    struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	dev_dbg(codec->dev, "%s: event = %d\n", __func__, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU: /* fall through */
+	case SND_SOC_DAPM_PRE_PMD:
+		if (strnstr(w->name, "IIR0", sizeof("IIR0"))) {
+			snd_soc_write(codec,
+				WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B1_CTL,
+				snd_soc_read(codec,
+				WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B1_CTL));
+			snd_soc_write(codec,
+				WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B2_CTL,
+				snd_soc_read(codec,
+				WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B2_CTL));
+			snd_soc_write(codec,
+				WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B3_CTL,
+				snd_soc_read(codec,
+				WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B3_CTL));
+			snd_soc_write(codec,
+				WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B4_CTL,
+				snd_soc_read(codec,
+				WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B4_CTL));
+		} else {
+			snd_soc_write(codec,
+				WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B1_CTL,
+				snd_soc_read(codec,
+				WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B1_CTL));
+			snd_soc_write(codec,
+				WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B2_CTL,
+				snd_soc_read(codec,
+				WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B2_CTL));
+			snd_soc_write(codec,
+				WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B3_CTL,
+				snd_soc_read(codec,
+				WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B3_CTL));
+		}
+		break;
+	}
+	return 0;
+}
+
+static int tasha_codec_enable_on_demand_supply(
+	struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	int ret = 0;
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	struct on_demand_supply *supply;
+
+	if (w->shift >= ON_DEMAND_SUPPLIES_MAX) {
+		dev_err(codec->dev, "%s: error index > MAX Demand supplies",
+			__func__);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	dev_dbg(codec->dev, "%s: supply: %s event: %d\n",
+		__func__, on_demand_supply_name[w->shift], event);
+
+	supply = &tasha->on_demand_list[w->shift];
+	WARN_ONCE(!supply->supply, "%s isn't defined\n",
+		on_demand_supply_name[w->shift]);
+	if (!supply->supply) {
+		dev_err(codec->dev, "%s: err supply not present ond for %d",
+			__func__, w->shift);
+		goto out;
+	}
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		ret = regulator_enable(supply->supply);
+		if (ret)
+			dev_err(codec->dev, "%s: Failed to enable %s\n",
+				__func__,
+				on_demand_supply_name[w->shift]);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		ret = regulator_disable(supply->supply);
+		if (ret)
+			dev_err(codec->dev, "%s: Failed to disable %s\n",
+				__func__,
+				on_demand_supply_name[w->shift]);
+		break;
+	default:
+		break;
+	};
+
+out:
+	return ret;
+}
+
+static int tasha_codec_find_amic_input(struct snd_soc_codec *codec,
+				       int adc_mux_n)
+{
+	u16 mask, shift, adc_mux_in_reg;
+	u16 amic_mux_sel_reg;
+	bool is_amic;
+
+	if (adc_mux_n < 0 || adc_mux_n > WCD9335_MAX_VALID_ADC_MUX ||
+	    adc_mux_n == WCD9335_INVALID_ADC_MUX)
+		return 0;
+
+	/* Check whether adc mux input is AMIC or DMIC */
+	if (adc_mux_n < 4) {
+		adc_mux_in_reg = WCD9335_CDC_TX_INP_MUX_ADC_MUX0_CFG1 +
+				 2 * adc_mux_n;
+		amic_mux_sel_reg = WCD9335_CDC_TX_INP_MUX_ADC_MUX0_CFG0 +
+				   2 * adc_mux_n;
+		mask = 0x03;
+		shift = 0;
+	} else {
+		adc_mux_in_reg = WCD9335_CDC_TX_INP_MUX_ADC_MUX4_CFG0 +
+				 adc_mux_n - 4;
+		amic_mux_sel_reg = adc_mux_in_reg;
+		mask = 0xC0;
+		shift = 6;
+	}
+	is_amic = (((snd_soc_read(codec, adc_mux_in_reg) & mask) >> shift)
+		    == 1);
+	if (!is_amic)
+		return 0;
+
+	return snd_soc_read(codec, amic_mux_sel_reg) & 0x07;
+}
+
+static void tasha_codec_set_tx_hold(struct snd_soc_codec *codec,
+				    u16 amic_reg, bool set)
+{
+	u8 mask = 0x20;
+	u8 val;
+
+	if (amic_reg == WCD9335_ANA_AMIC1 ||
+	    amic_reg == WCD9335_ANA_AMIC3 ||
+	    amic_reg == WCD9335_ANA_AMIC5)
+		mask = 0x40;
+
+	val = set ? mask : 0x00;
+
+	switch (amic_reg) {
+	case WCD9335_ANA_AMIC1:
+	case WCD9335_ANA_AMIC2:
+		snd_soc_update_bits(codec, WCD9335_ANA_AMIC2, mask, val);
+		break;
+	case WCD9335_ANA_AMIC3:
+	case WCD9335_ANA_AMIC4:
+		snd_soc_update_bits(codec, WCD9335_ANA_AMIC4, mask, val);
+		break;
+	case WCD9335_ANA_AMIC5:
+	case WCD9335_ANA_AMIC6:
+		snd_soc_update_bits(codec, WCD9335_ANA_AMIC6, mask, val);
+		break;
+	default:
+		dev_dbg(codec->dev, "%s: invalid amic: %d\n",
+			__func__, amic_reg);
+		break;
+	}
+}
+
+static int tasha_codec_tx_adc_cfg(struct snd_soc_dapm_widget *w,
+				  struct snd_kcontrol *kcontrol, int event)
+{
+	int adc_mux_n = w->shift;
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	int amic_n;
+
+	dev_dbg(codec->dev, "%s: event: %d\n", __func__, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		amic_n = tasha_codec_find_amic_input(codec, adc_mux_n);
+		if (amic_n) {
+			/*
+			 * Prevent ANC Rx pop by leaving Tx FE in HOLD
+			 * state until PA is up. Track AMIC being used
+			 * so we can release the HOLD later.
+			 */
+			set_bit(ANC_MIC_AMIC1 + amic_n - 1,
+				&tasha->status_mask);
+		}
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static u16 tasha_codec_get_amic_pwlvl_reg(struct snd_soc_codec *codec, int amic)
+{
+	u16 pwr_level_reg = 0;
+
+	switch (amic) {
+	case 1:
+	case 2:
+		pwr_level_reg = WCD9335_ANA_AMIC1;
+		break;
+
+	case 3:
+	case 4:
+		pwr_level_reg = WCD9335_ANA_AMIC3;
+		break;
+
+	case 5:
+	case 6:
+		pwr_level_reg = WCD9335_ANA_AMIC5;
+		break;
+	default:
+		dev_dbg(codec->dev, "%s: invalid amic: %d\n",
+			__func__, amic);
+		break;
+	}
+
+	return pwr_level_reg;
+}
+
+#define  TX_HPF_CUT_OFF_FREQ_MASK	0x60
+#define  CF_MIN_3DB_4HZ			0x0
+#define  CF_MIN_3DB_75HZ		0x1
+#define  CF_MIN_3DB_150HZ		0x2
+
+static void tasha_tx_hpf_corner_freq_callback(struct work_struct *work)
+{
+	struct delayed_work *hpf_delayed_work;
+	struct hpf_work *hpf_work;
+	struct tasha_priv *tasha;
+	struct snd_soc_codec *codec;
+	u16 dec_cfg_reg, amic_reg;
+	u8 hpf_cut_off_freq;
+	int amic_n;
+
+	hpf_delayed_work = to_delayed_work(work);
+	hpf_work = container_of(hpf_delayed_work, struct hpf_work, dwork);
+	tasha = hpf_work->tasha;
+	codec = tasha->codec;
+	hpf_cut_off_freq = hpf_work->hpf_cut_off_freq;
+
+	dec_cfg_reg = WCD9335_CDC_TX0_TX_PATH_CFG0 + 16 * hpf_work->decimator;
+
+	dev_dbg(codec->dev, "%s: decimator %u hpf_cut_of_freq 0x%x\n",
+		__func__, hpf_work->decimator, hpf_cut_off_freq);
+
+	amic_n = tasha_codec_find_amic_input(codec, hpf_work->decimator);
+	if (amic_n) {
+		amic_reg = WCD9335_ANA_AMIC1 + amic_n - 1;
+		tasha_codec_set_tx_hold(codec, amic_reg, false);
+	}
+	tasha_codec_vote_max_bw(codec, true);
+	snd_soc_update_bits(codec, dec_cfg_reg, TX_HPF_CUT_OFF_FREQ_MASK,
+			    hpf_cut_off_freq << 5);
+	tasha_codec_vote_max_bw(codec, false);
+}
+
+static void tasha_tx_mute_update_callback(struct work_struct *work)
+{
+	struct tx_mute_work *tx_mute_dwork;
+	struct tasha_priv *tasha;
+	struct delayed_work *delayed_work;
+	struct snd_soc_codec *codec;
+	u16 tx_vol_ctl_reg, hpf_gate_reg;
+
+	delayed_work = to_delayed_work(work);
+	tx_mute_dwork = container_of(delayed_work, struct tx_mute_work, dwork);
+	tasha = tx_mute_dwork->tasha;
+	codec = tasha->codec;
+
+	tx_vol_ctl_reg = WCD9335_CDC_TX0_TX_PATH_CTL +
+					16 * tx_mute_dwork->decimator;
+	hpf_gate_reg = WCD9335_CDC_TX0_TX_PATH_SEC2 +
+					16 * tx_mute_dwork->decimator;
+	snd_soc_update_bits(codec, hpf_gate_reg, 0x01, 0x01);
+	snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x10, 0x00);
+}
+
+static int tasha_codec_enable_dec(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	unsigned int decimator;
+	char *dec_adc_mux_name = NULL;
+	char *widget_name = NULL;
+	char *wname;
+	int ret = 0, amic_n;
+	u16 tx_vol_ctl_reg, pwr_level_reg = 0, dec_cfg_reg, hpf_gate_reg;
+	u16 tx_gain_ctl_reg;
+	char *dec;
+	u8 hpf_cut_off_freq;
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s %d\n", __func__, event);
+
+	widget_name = kstrndup(w->name, 15, GFP_KERNEL);
+	if (!widget_name)
+		return -ENOMEM;
+
+	wname = widget_name;
+	dec_adc_mux_name = strsep(&widget_name, " ");
+	if (!dec_adc_mux_name) {
+		dev_err(codec->dev, "%s: Invalid decimator = %s\n",
+			__func__, w->name);
+		ret =  -EINVAL;
+		goto out;
+	}
+	dec_adc_mux_name = widget_name;
+
+	dec = strpbrk(dec_adc_mux_name, "012345678");
+	if (!dec) {
+		dev_err(codec->dev, "%s: decimator index not found\n",
+			__func__);
+		ret =  -EINVAL;
+		goto out;
+	}
+
+	ret = kstrtouint(dec, 10, &decimator);
+	if (ret < 0) {
+		dev_err(codec->dev, "%s: Invalid decimator = %s\n",
+			__func__, wname);
+		ret =  -EINVAL;
+		goto out;
+	}
+
+	dev_dbg(codec->dev, "%s(): widget = %s decimator = %u\n", __func__,
+			w->name, decimator);
+
+	tx_vol_ctl_reg = WCD9335_CDC_TX0_TX_PATH_CTL + 16 * decimator;
+	hpf_gate_reg = WCD9335_CDC_TX0_TX_PATH_SEC2 + 16 * decimator;
+	dec_cfg_reg = WCD9335_CDC_TX0_TX_PATH_CFG0 + 16 * decimator;
+	tx_gain_ctl_reg = WCD9335_CDC_TX0_TX_VOL_CTL + 16 * decimator;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		amic_n = tasha_codec_find_amic_input(codec, decimator);
+		if (amic_n)
+			pwr_level_reg = tasha_codec_get_amic_pwlvl_reg(codec,
+								       amic_n);
+
+		if (pwr_level_reg) {
+			switch ((snd_soc_read(codec, pwr_level_reg) &
+					      WCD9335_AMIC_PWR_LVL_MASK) >>
+					      WCD9335_AMIC_PWR_LVL_SHIFT) {
+			case WCD9335_AMIC_PWR_LEVEL_LP:
+				snd_soc_update_bits(codec, dec_cfg_reg,
+						    WCD9335_DEC_PWR_LVL_MASK,
+						    WCD9335_DEC_PWR_LVL_LP);
+				break;
+
+			case WCD9335_AMIC_PWR_LEVEL_HP:
+				snd_soc_update_bits(codec, dec_cfg_reg,
+						    WCD9335_DEC_PWR_LVL_MASK,
+						    WCD9335_DEC_PWR_LVL_HP);
+				break;
+			case WCD9335_AMIC_PWR_LEVEL_DEFAULT:
+			default:
+				snd_soc_update_bits(codec, dec_cfg_reg,
+						    WCD9335_DEC_PWR_LVL_MASK,
+						    WCD9335_DEC_PWR_LVL_DF);
+				break;
+			}
+		}
+		hpf_cut_off_freq = (snd_soc_read(codec, dec_cfg_reg) &
+				   TX_HPF_CUT_OFF_FREQ_MASK) >> 5;
+		tasha->tx_hpf_work[decimator].hpf_cut_off_freq =
+							hpf_cut_off_freq;
+
+		if (hpf_cut_off_freq != CF_MIN_3DB_150HZ)
+			snd_soc_update_bits(codec, dec_cfg_reg,
+					    TX_HPF_CUT_OFF_FREQ_MASK,
+					    CF_MIN_3DB_150HZ << 5);
+		/* Enable TX PGA Mute */
+		snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x10, 0x10);
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		snd_soc_update_bits(codec, hpf_gate_reg, 0x01, 0x00);
+
+		if (decimator == 0) {
+			snd_soc_write(codec, WCD9335_MBHC_ZDET_RAMP_CTL, 0x83);
+			snd_soc_write(codec, WCD9335_MBHC_ZDET_RAMP_CTL, 0xA3);
+			snd_soc_write(codec, WCD9335_MBHC_ZDET_RAMP_CTL, 0x83);
+			snd_soc_write(codec, WCD9335_MBHC_ZDET_RAMP_CTL, 0x03);
+		}
+		/* schedule work queue to Remove Mute */
+		schedule_delayed_work(&tasha->tx_mute_dwork[decimator].dwork,
+				      msecs_to_jiffies(tx_unmute_delay));
+		if (tasha->tx_hpf_work[decimator].hpf_cut_off_freq !=
+							CF_MIN_3DB_150HZ)
+			schedule_delayed_work(
+					&tasha->tx_hpf_work[decimator].dwork,
+					msecs_to_jiffies(300));
+		/* apply gain after decimator is enabled */
+		snd_soc_write(codec, tx_gain_ctl_reg,
+			      snd_soc_read(codec, tx_gain_ctl_reg));
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		hpf_cut_off_freq =
+			tasha->tx_hpf_work[decimator].hpf_cut_off_freq;
+		snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x10, 0x10);
+		if (cancel_delayed_work_sync(
+		    &tasha->tx_hpf_work[decimator].dwork)) {
+			if (hpf_cut_off_freq != CF_MIN_3DB_150HZ) {
+				tasha_codec_vote_max_bw(codec, true);
+				snd_soc_update_bits(codec, dec_cfg_reg,
+						    TX_HPF_CUT_OFF_FREQ_MASK,
+						    hpf_cut_off_freq << 5);
+				tasha_codec_vote_max_bw(codec, false);
+			}
+		}
+		cancel_delayed_work_sync(
+				&tasha->tx_mute_dwork[decimator].dwork);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x10, 0x00);
+		break;
+	};
+out:
+	kfree(wname);
+	return ret;
+}
+
+static u32 tasha_get_dmic_sample_rate(struct snd_soc_codec *codec,
+				unsigned int dmic, struct wcd9xxx_pdata *pdata)
+{
+	u8 tx_stream_fs;
+	u8 adc_mux_index = 0, adc_mux_sel = 0;
+	bool dec_found = false;
+	u16 adc_mux_ctl_reg, tx_fs_reg;
+	u32 dmic_fs;
+
+	while (dec_found == 0 && adc_mux_index < WCD9335_MAX_VALID_ADC_MUX) {
+		if (adc_mux_index < 4) {
+			adc_mux_ctl_reg = WCD9335_CDC_TX_INP_MUX_ADC_MUX0_CFG0 +
+						(adc_mux_index * 2);
+			adc_mux_sel = ((snd_soc_read(codec, adc_mux_ctl_reg) &
+						0x78) >> 3) - 1;
+		} else if (adc_mux_index < 9) {
+			adc_mux_ctl_reg = WCD9335_CDC_TX_INP_MUX_ADC_MUX4_CFG0 +
+						((adc_mux_index - 4) * 1);
+			adc_mux_sel = ((snd_soc_read(codec, adc_mux_ctl_reg) &
+						0x38) >> 3) - 1;
+		} else if (adc_mux_index == 9) {
+			++adc_mux_index;
+			continue;
+		}
+		if (adc_mux_sel == dmic)
+			dec_found = true;
+		else
+			++adc_mux_index;
+	}
+
+	if (dec_found == true && adc_mux_index <= 8) {
+		tx_fs_reg = WCD9335_CDC_TX0_TX_PATH_CTL + (16 * adc_mux_index);
+		tx_stream_fs = snd_soc_read(codec, tx_fs_reg) & 0x0F;
+		dmic_fs = tx_stream_fs <= 4 ? WCD9XXX_DMIC_SAMPLE_RATE_2P4MHZ :
+					WCD9XXX_DMIC_SAMPLE_RATE_4P8MHZ;
+
+		/*
+		 * Check for ECPP path selection and DEC1 not connected to
+		 * any other audio path to apply ECPP DMIC sample rate
+		 */
+		if ((adc_mux_index == 1) &&
+		    ((snd_soc_read(codec, WCD9335_CPE_SS_US_EC_MUX_CFG)
+				   & 0x0F) == 0x0A) &&
+		    ((snd_soc_read(codec, WCD9335_CDC_IF_ROUTER_TX_MUX_CFG0)
+				   & 0x0C) == 0x00)) {
+			dmic_fs = pdata->ecpp_dmic_sample_rate;
+		}
+	} else {
+		dmic_fs = pdata->dmic_sample_rate;
+	}
+
+	return dmic_fs;
+}
+
+static u8 tasha_get_dmic_clk_val(struct snd_soc_codec *codec,
+				 u32 mclk_rate, u32 dmic_clk_rate)
+{
+	u32 div_factor;
+	u8 dmic_ctl_val;
+
+	dev_dbg(codec->dev,
+		"%s: mclk_rate = %d, dmic_sample_rate = %d\n",
+		__func__, mclk_rate, dmic_clk_rate);
+
+	/* Default value to return in case of error */
+	if (mclk_rate == TASHA_MCLK_CLK_9P6MHZ)
+		dmic_ctl_val = WCD9335_DMIC_CLK_DIV_2;
+	else
+		dmic_ctl_val = WCD9335_DMIC_CLK_DIV_3;
+
+	if (dmic_clk_rate == 0) {
+		dev_err(codec->dev,
+			"%s: dmic_sample_rate cannot be 0\n",
+			__func__);
+		goto done;
+	}
+
+	div_factor = mclk_rate / dmic_clk_rate;
+	switch (div_factor) {
+	case 2:
+		dmic_ctl_val = WCD9335_DMIC_CLK_DIV_2;
+		break;
+	case 3:
+		dmic_ctl_val = WCD9335_DMIC_CLK_DIV_3;
+		break;
+	case 4:
+		dmic_ctl_val = WCD9335_DMIC_CLK_DIV_4;
+		break;
+	case 6:
+		dmic_ctl_val = WCD9335_DMIC_CLK_DIV_6;
+		break;
+	case 8:
+		dmic_ctl_val = WCD9335_DMIC_CLK_DIV_8;
+		break;
+	case 16:
+		dmic_ctl_val = WCD9335_DMIC_CLK_DIV_16;
+		break;
+	default:
+		dev_err(codec->dev,
+			"%s: Invalid div_factor %u, clk_rate(%u), dmic_rate(%u)\n",
+			__func__, div_factor, mclk_rate, dmic_clk_rate);
+		break;
+	}
+
+done:
+	return dmic_ctl_val;
+}
+
+static int tasha_codec_enable_adc(struct snd_soc_dapm_widget *w,
+		struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	dev_dbg(codec->dev, "%s: event:%d\n", __func__, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		tasha_codec_set_tx_hold(codec, w->reg, true);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int tasha_codec_enable_dmic(struct snd_soc_dapm_widget *w,
+		struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	struct wcd9xxx_pdata *pdata = dev_get_platdata(codec->dev->parent);
+	u8  dmic_clk_en = 0x01;
+	u16 dmic_clk_reg;
+	s32 *dmic_clk_cnt;
+	u8 dmic_rate_val, dmic_rate_shift = 1;
+	unsigned int dmic;
+	u32 dmic_sample_rate;
+	int ret;
+	char *wname;
+
+	wname = strpbrk(w->name, "012345");
+	if (!wname) {
+		dev_err(codec->dev, "%s: widget not found\n", __func__);
+		return -EINVAL;
+	}
+
+	ret = kstrtouint(wname, 10, &dmic);
+	if (ret < 0) {
+		dev_err(codec->dev, "%s: Invalid DMIC line on the codec\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	switch (dmic) {
+	case 0:
+	case 1:
+		dmic_clk_cnt = &(tasha->dmic_0_1_clk_cnt);
+		dmic_clk_reg = WCD9335_CPE_SS_DMIC0_CTL;
+		break;
+	case 2:
+	case 3:
+		dmic_clk_cnt = &(tasha->dmic_2_3_clk_cnt);
+		dmic_clk_reg = WCD9335_CPE_SS_DMIC1_CTL;
+		break;
+	case 4:
+	case 5:
+		dmic_clk_cnt = &(tasha->dmic_4_5_clk_cnt);
+		dmic_clk_reg = WCD9335_CPE_SS_DMIC2_CTL;
+		break;
+	default:
+		dev_err(codec->dev, "%s: Invalid DMIC Selection\n",
+			__func__);
+		return -EINVAL;
+	};
+	dev_dbg(codec->dev, "%s: event %d DMIC%d dmic_clk_cnt %d\n",
+			__func__, event,  dmic, *dmic_clk_cnt);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		dmic_sample_rate = tasha_get_dmic_sample_rate(codec, dmic,
+						pdata);
+		dmic_rate_val =
+			tasha_get_dmic_clk_val(codec,
+					pdata->mclk_rate,
+					dmic_sample_rate);
+
+		(*dmic_clk_cnt)++;
+		if (*dmic_clk_cnt == 1) {
+			snd_soc_update_bits(codec, dmic_clk_reg,
+				0x07 << dmic_rate_shift,
+				dmic_rate_val << dmic_rate_shift);
+			snd_soc_update_bits(codec, dmic_clk_reg,
+					dmic_clk_en, dmic_clk_en);
+		}
+
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		dmic_rate_val =
+			tasha_get_dmic_clk_val(codec,
+					pdata->mclk_rate,
+					pdata->mad_dmic_sample_rate);
+		(*dmic_clk_cnt)--;
+		if (*dmic_clk_cnt  == 0) {
+			snd_soc_update_bits(codec, dmic_clk_reg,
+					dmic_clk_en, 0);
+			snd_soc_update_bits(codec, dmic_clk_reg,
+				0x07 << dmic_rate_shift,
+				dmic_rate_val << dmic_rate_shift);
+		}
+		break;
+	};
+
+	return 0;
+}
+
+static int __tasha_codec_enable_micbias(struct snd_soc_dapm_widget *w,
+					int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	int micb_num;
+
+	dev_dbg(codec->dev, "%s: wname: %s, event: %d\n",
+		__func__, w->name, event);
+
+	if (strnstr(w->name, "MIC BIAS1", sizeof("MIC BIAS1")))
+		micb_num = MIC_BIAS_1;
+	else if (strnstr(w->name, "MIC BIAS2", sizeof("MIC BIAS2")))
+		micb_num = MIC_BIAS_2;
+	else if (strnstr(w->name, "MIC BIAS3", sizeof("MIC BIAS3")))
+		micb_num = MIC_BIAS_3;
+	else if (strnstr(w->name, "MIC BIAS4", sizeof("MIC BIAS4")))
+		micb_num = MIC_BIAS_4;
+	else
+		return -EINVAL;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		/*
+		 * MIC BIAS can also be requested by MBHC,
+		 * so use ref count to handle micbias pullup
+		 * and enable requests
+		 */
+		tasha_micbias_control(codec, micb_num, MICB_ENABLE, true);
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		/* wait for cnp time */
+		usleep_range(1000, 1100);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		tasha_micbias_control(codec, micb_num, MICB_DISABLE, true);
+		break;
+	};
+
+	return 0;
+}
+
+static int tasha_codec_ldo_h_control(struct snd_soc_dapm_widget *w,
+				     int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	if (SND_SOC_DAPM_EVENT_ON(event)) {
+		tasha->ldo_h_users++;
+
+		if (tasha->ldo_h_users == 1)
+			snd_soc_update_bits(codec, WCD9335_LDOH_MODE,
+					    0x80, 0x80);
+	}
+
+	if (SND_SOC_DAPM_EVENT_OFF(event)) {
+		tasha->ldo_h_users--;
+
+		if (tasha->ldo_h_users < 0)
+			tasha->ldo_h_users = 0;
+
+		if (tasha->ldo_h_users == 0)
+			snd_soc_update_bits(codec, WCD9335_LDOH_MODE,
+					    0x80, 0x00);
+	}
+
+	return 0;
+}
+
+static int tasha_codec_force_enable_ldo_h(struct snd_soc_dapm_widget *w,
+					  struct snd_kcontrol *kcontrol,
+					  int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		wcd_resmgr_enable_master_bias(tasha->resmgr);
+		tasha_codec_ldo_h_control(w, event);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		tasha_codec_ldo_h_control(w, event);
+		wcd_resmgr_disable_master_bias(tasha->resmgr);
+		break;
+	}
+
+	return 0;
+}
+
+static int tasha_codec_force_enable_micbias(struct snd_soc_dapm_widget *w,
+					    struct snd_kcontrol *kcontrol,
+					    int event)
+{
+	int ret = 0;
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		wcd_resmgr_enable_master_bias(tasha->resmgr);
+		tasha_cdc_mclk_enable(codec, true, true);
+		ret = __tasha_codec_enable_micbias(w, SND_SOC_DAPM_PRE_PMU);
+		/* Wait for 1ms for better cnp */
+		usleep_range(1000, 1100);
+		tasha_cdc_mclk_enable(codec, false, true);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		ret = __tasha_codec_enable_micbias(w, SND_SOC_DAPM_POST_PMD);
+		wcd_resmgr_disable_master_bias(tasha->resmgr);
+		break;
+	}
+
+	return ret;
+}
+
+static int tasha_codec_enable_micbias(struct snd_soc_dapm_widget *w,
+		struct snd_kcontrol *kcontrol, int event)
+{
+	return __tasha_codec_enable_micbias(w, event);
+}
+
+static int tasha_codec_enable_standalone_ldo_h(struct snd_soc_codec *codec,
+					       bool enable)
+{
+	int rc;
+
+	if (enable)
+		rc = snd_soc_dapm_force_enable_pin(
+					snd_soc_codec_get_dapm(codec),
+					DAPM_LDO_H_STANDALONE);
+	else
+		rc = snd_soc_dapm_disable_pin(
+					snd_soc_codec_get_dapm(codec),
+					DAPM_LDO_H_STANDALONE);
+
+	if (!rc)
+		snd_soc_dapm_sync(snd_soc_codec_get_dapm(codec));
+	else
+		dev_err(codec->dev, "%s: ldo_h force %s pin failed\n",
+			__func__, (enable ? "enable" : "disable"));
+
+	return rc;
+}
+
+/*
+ * tasha_codec_enable_standalone_micbias - enable micbias standalone
+ * @codec: pointer to codec instance
+ * @micb_num: number of micbias to be enabled
+ * @enable: true to enable micbias or false to disable
+ *
+ * This function is used to enable micbias (1, 2, 3 or 4) during
+ * standalone independent of whether TX use-case is running or not
+ *
+ * Return: error code in case of failure or 0 for success
+ */
+int tasha_codec_enable_standalone_micbias(struct snd_soc_codec *codec,
+					  int micb_num,
+					  bool enable)
+{
+	const char * const micb_names[] = {
+		DAPM_MICBIAS1_STANDALONE, DAPM_MICBIAS2_STANDALONE,
+		DAPM_MICBIAS3_STANDALONE, DAPM_MICBIAS4_STANDALONE
+	};
+	int micb_index = micb_num - 1;
+	int rc;
+
+	if (!codec) {
+		pr_err("%s: Codec memory is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	if ((micb_index < 0) || (micb_index > TASHA_MAX_MICBIAS - 1)) {
+		dev_err(codec->dev, "%s: Invalid micbias index, micb_ind:%d\n",
+			__func__, micb_index);
+		return -EINVAL;
+	}
+
+	if (enable)
+		rc = snd_soc_dapm_force_enable_pin(
+						 snd_soc_codec_get_dapm(codec),
+						   micb_names[micb_index]);
+	else
+		rc = snd_soc_dapm_disable_pin(snd_soc_codec_get_dapm(codec),
+					      micb_names[micb_index]);
+
+	if (!rc)
+		snd_soc_dapm_sync(snd_soc_codec_get_dapm(codec));
+	else
+		dev_err(codec->dev, "%s: micbias%d force %s pin failed\n",
+			__func__, micb_num, (enable ? "enable" : "disable"));
+
+	return rc;
+}
+EXPORT_SYMBOL(tasha_codec_enable_standalone_micbias);
+
+static const char *const tasha_anc_func_text[] = {"OFF", "ON"};
+static const struct soc_enum tasha_anc_func_enum =
+		SOC_ENUM_SINGLE_EXT(2, tasha_anc_func_text);
+
+static const char *const tasha_clkmode_text[] = {"EXTERNAL", "INTERNAL"};
+static SOC_ENUM_SINGLE_EXT_DECL(tasha_clkmode_enum, tasha_clkmode_text);
+
+/* Cutoff frequency for high pass filter */
+static const char * const cf_text[] = {
+	"CF_NEG_3DB_4HZ", "CF_NEG_3DB_75HZ", "CF_NEG_3DB_150HZ"
+};
+
+static const char * const rx_cf_text[] = {
+	"CF_NEG_3DB_4HZ", "CF_NEG_3DB_75HZ", "CF_NEG_3DB_150HZ",
+	"CF_NEG_3DB_0P48HZ"
+};
+
+static const struct soc_enum cf_dec0_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX0_TX_PATH_CFG0, 5, 3, cf_text);
+
+static const struct soc_enum cf_dec1_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX1_TX_PATH_CFG0, 5, 3, cf_text);
+
+static const struct soc_enum cf_dec2_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX2_TX_PATH_CFG0, 5, 3, cf_text);
+
+static const struct soc_enum cf_dec3_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX3_TX_PATH_CFG0, 5, 3, cf_text);
+
+static const struct soc_enum cf_dec4_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX4_TX_PATH_CFG0, 5, 3, cf_text);
+
+static const struct soc_enum cf_dec5_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX5_TX_PATH_CFG0, 5, 3, cf_text);
+
+static const struct soc_enum cf_dec6_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX6_TX_PATH_CFG0, 5, 3, cf_text);
+
+static const struct soc_enum cf_dec7_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX7_TX_PATH_CFG0, 5, 3, cf_text);
+
+static const struct soc_enum cf_dec8_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX8_TX_PATH_CFG0, 5, 3, cf_text);
+
+static const struct soc_enum cf_int0_1_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX0_RX_PATH_CFG2, 0, 4, rx_cf_text);
+
+static SOC_ENUM_SINGLE_DECL(cf_int0_2_enum, WCD9335_CDC_RX0_RX_PATH_MIX_CFG, 2,
+		     rx_cf_text);
+
+static const struct soc_enum cf_int1_1_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX1_RX_PATH_CFG2, 0, 4, rx_cf_text);
+
+static SOC_ENUM_SINGLE_DECL(cf_int1_2_enum, WCD9335_CDC_RX1_RX_PATH_MIX_CFG, 2,
+		     rx_cf_text);
+
+static const struct soc_enum cf_int2_1_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX2_RX_PATH_CFG2, 0, 4, rx_cf_text);
+
+static SOC_ENUM_SINGLE_DECL(cf_int2_2_enum, WCD9335_CDC_RX2_RX_PATH_MIX_CFG, 2,
+		     rx_cf_text);
+
+static const struct soc_enum cf_int3_1_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX3_RX_PATH_CFG2, 0, 4, rx_cf_text);
+
+static SOC_ENUM_SINGLE_DECL(cf_int3_2_enum, WCD9335_CDC_RX3_RX_PATH_MIX_CFG, 2,
+		     rx_cf_text);
+
+static const struct soc_enum cf_int4_1_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX4_RX_PATH_CFG2, 0, 4, rx_cf_text);
+
+static SOC_ENUM_SINGLE_DECL(cf_int4_2_enum, WCD9335_CDC_RX4_RX_PATH_MIX_CFG, 2,
+		     rx_cf_text);
+
+static const struct soc_enum cf_int5_1_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX5_RX_PATH_CFG2, 0, 4, rx_cf_text);
+
+static SOC_ENUM_SINGLE_DECL(cf_int5_2_enum, WCD9335_CDC_RX5_RX_PATH_MIX_CFG, 2,
+		     rx_cf_text);
+
+static const struct soc_enum cf_int6_1_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX6_RX_PATH_CFG2, 0, 4, rx_cf_text);
+
+static SOC_ENUM_SINGLE_DECL(cf_int6_2_enum, WCD9335_CDC_RX6_RX_PATH_MIX_CFG, 2,
+		     rx_cf_text);
+
+static const struct soc_enum cf_int7_1_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX7_RX_PATH_CFG2, 0, 4, rx_cf_text);
+
+static SOC_ENUM_SINGLE_DECL(cf_int7_2_enum, WCD9335_CDC_RX7_RX_PATH_MIX_CFG, 2,
+		     rx_cf_text);
+
+static const struct soc_enum cf_int8_1_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX8_RX_PATH_CFG2, 0, 4, rx_cf_text);
+
+static SOC_ENUM_SINGLE_DECL(cf_int8_2_enum, WCD9335_CDC_RX8_RX_PATH_MIX_CFG, 2,
+		     rx_cf_text);
+
+static const struct snd_soc_dapm_route audio_i2s_map[] = {
+	{"SLIM RX0 MUX", NULL, "RX_I2S_CTL"},
+	{"SLIM RX1 MUX", NULL, "RX_I2S_CTL"},
+	{"SLIM RX2 MUX", NULL, "RX_I2S_CTL"},
+	{"SLIM RX3 MUX", NULL, "RX_I2S_CTL"},
+
+	{"SLIM TX6 MUX", NULL, "TX_I2S_CTL"},
+	{"SLIM TX7 MUX", NULL, "TX_I2S_CTL"},
+	{"SLIM TX8 MUX", NULL, "TX_I2S_CTL"},
+	{"SLIM TX11 MUX", NULL, "TX_I2S_CTL"},
+};
+
+static const struct snd_soc_dapm_route audio_map[] = {
+
+	/* MAD */
+	{"MAD_SEL MUX", "SPE", "MAD_CPE_INPUT"},
+	{"MAD_SEL MUX", "MSM", "MADINPUT"},
+	{"MADONOFF", "Switch", "MAD_SEL MUX"},
+	{"MAD_BROADCAST", "Switch", "MAD_SEL MUX"},
+	{"TX13 INP MUX", "CPE_TX_PP", "MADONOFF"},
+
+	/* CPE HW MAD bypass */
+	{"CPE IN Mixer", "MAD_BYPASS", "SLIM TX1 MUX"},
+
+	{"AIF4_MAD Mixer", "SLIM TX1", "CPE IN Mixer"},
+	{"AIF4_MAD Mixer", "SLIM TX12", "MADONOFF"},
+	{"AIF4_MAD Mixer", "SLIM TX13", "TX13 INP MUX"},
+	{"AIF4 MAD", NULL, "AIF4_MAD Mixer"},
+	{"AIF4 MAD", NULL, "AIF4"},
+
+	{"EC BUF MUX INP", "DEC1", "ADC MUX1"},
+	{"AIF5 CPE", NULL, "EC BUF MUX INP"},
+
+	/* SLIMBUS Connections */
+	{"AIF1 CAP", NULL, "AIF1_CAP Mixer"},
+	{"AIF2 CAP", NULL, "AIF2_CAP Mixer"},
+	{"AIF3 CAP", NULL, "AIF3_CAP Mixer"},
+
+	/* VI Feedback */
+	{"AIF4_VI Mixer", "SPKR_VI_1", "VIINPUT"},
+	{"AIF4_VI Mixer", "SPKR_VI_2", "VIINPUT"},
+	{"AIF4 VI", NULL, "AIF4_VI Mixer"},
+
+	/* SLIM_MIXER("AIF1_CAP Mixer"),*/
+	{"AIF1_CAP Mixer", "SLIM TX0", "SLIM TX0 MUX"},
+	{"AIF1_CAP Mixer", "SLIM TX1", "SLIM TX1 MUX"},
+	{"AIF1_CAP Mixer", "SLIM TX2", "SLIM TX2 MUX"},
+	{"AIF1_CAP Mixer", "SLIM TX3", "SLIM TX3 MUX"},
+	{"AIF1_CAP Mixer", "SLIM TX4", "SLIM TX4 MUX"},
+	{"AIF1_CAP Mixer", "SLIM TX5", "SLIM TX5 MUX"},
+	{"AIF1_CAP Mixer", "SLIM TX6", "SLIM TX6 MUX"},
+	{"AIF1_CAP Mixer", "SLIM TX7", "SLIM TX7 MUX"},
+	{"AIF1_CAP Mixer", "SLIM TX8", "SLIM TX8 MUX"},
+	{"AIF1_CAP Mixer", "SLIM TX9", "SLIM TX9 MUX"},
+	{"AIF1_CAP Mixer", "SLIM TX10", "SLIM TX10 MUX"},
+	{"AIF1_CAP Mixer", "SLIM TX11", "SLIM TX11 MUX"},
+	{"AIF1_CAP Mixer", "SLIM TX13", "TX13 INP MUX"},
+	/* SLIM_MIXER("AIF2_CAP Mixer"),*/
+	{"AIF2_CAP Mixer", "SLIM TX0", "SLIM TX0 MUX"},
+	{"AIF2_CAP Mixer", "SLIM TX1", "SLIM TX1 MUX"},
+	{"AIF2_CAP Mixer", "SLIM TX2", "SLIM TX2 MUX"},
+	{"AIF2_CAP Mixer", "SLIM TX3", "SLIM TX3 MUX"},
+	{"AIF2_CAP Mixer", "SLIM TX4", "SLIM TX4 MUX"},
+	{"AIF2_CAP Mixer", "SLIM TX5", "SLIM TX5 MUX"},
+	{"AIF2_CAP Mixer", "SLIM TX6", "SLIM TX6 MUX"},
+	{"AIF2_CAP Mixer", "SLIM TX7", "SLIM TX7 MUX"},
+	{"AIF2_CAP Mixer", "SLIM TX8", "SLIM TX8 MUX"},
+	{"AIF2_CAP Mixer", "SLIM TX9", "SLIM TX9 MUX"},
+	{"AIF2_CAP Mixer", "SLIM TX10", "SLIM TX10 MUX"},
+	{"AIF2_CAP Mixer", "SLIM TX11", "SLIM TX11 MUX"},
+	{"AIF2_CAP Mixer", "SLIM TX13", "TX13 INP MUX"},
+	/* SLIM_MIXER("AIF3_CAP Mixer"),*/
+	{"AIF3_CAP Mixer", "SLIM TX0", "SLIM TX0 MUX"},
+	{"AIF3_CAP Mixer", "SLIM TX1", "SLIM TX1 MUX"},
+	{"AIF3_CAP Mixer", "SLIM TX2", "SLIM TX2 MUX"},
+	{"AIF3_CAP Mixer", "SLIM TX3", "SLIM TX3 MUX"},
+	{"AIF3_CAP Mixer", "SLIM TX4", "SLIM TX4 MUX"},
+	{"AIF3_CAP Mixer", "SLIM TX5", "SLIM TX5 MUX"},
+	{"AIF3_CAP Mixer", "SLIM TX6", "SLIM TX6 MUX"},
+	{"AIF3_CAP Mixer", "SLIM TX7", "SLIM TX7 MUX"},
+	{"AIF3_CAP Mixer", "SLIM TX8", "SLIM TX8 MUX"},
+	{"AIF3_CAP Mixer", "SLIM TX9", "SLIM TX9 MUX"},
+	{"AIF3_CAP Mixer", "SLIM TX10", "SLIM TX10 MUX"},
+	{"AIF3_CAP Mixer", "SLIM TX11", "SLIM TX11 MUX"},
+	{"AIF3_CAP Mixer", "SLIM TX13", "TX13 INP MUX"},
+
+	{"SLIM TX0 MUX", "DEC0", "ADC MUX0"},
+	{"SLIM TX0 MUX", "RX_MIX_TX0", "RX MIX TX0 MUX"},
+	{"SLIM TX0 MUX", "DEC0_192", "ADC US MUX0"},
+
+	{"SLIM TX1 MUX", "DEC1", "ADC MUX1"},
+	{"SLIM TX1 MUX", "RX_MIX_TX1", "RX MIX TX1 MUX"},
+	{"SLIM TX1 MUX", "DEC1_192", "ADC US MUX1"},
+
+	{"SLIM TX2 MUX", "DEC2", "ADC MUX2"},
+	{"SLIM TX2 MUX", "RX_MIX_TX2", "RX MIX TX2 MUX"},
+	{"SLIM TX2 MUX", "DEC2_192", "ADC US MUX2"},
+
+	{"SLIM TX3 MUX", "DEC3", "ADC MUX3"},
+	{"SLIM TX3 MUX", "RX_MIX_TX3", "RX MIX TX3 MUX"},
+	{"SLIM TX3 MUX", "DEC3_192", "ADC US MUX3"},
+
+	{"SLIM TX4 MUX", "DEC4", "ADC MUX4"},
+	{"SLIM TX4 MUX", "RX_MIX_TX4", "RX MIX TX4 MUX"},
+	{"SLIM TX4 MUX", "DEC4_192", "ADC US MUX4"},
+
+	{"SLIM TX5 MUX", "DEC5", "ADC MUX5"},
+	{"SLIM TX5 MUX", "RX_MIX_TX5", "RX MIX TX5 MUX"},
+	{"SLIM TX5 MUX", "DEC5_192", "ADC US MUX5"},
+
+	{"SLIM TX6 MUX", "DEC6", "ADC MUX6"},
+	{"SLIM TX6 MUX", "RX_MIX_TX6", "RX MIX TX6 MUX"},
+	{"SLIM TX6 MUX", "DEC6_192", "ADC US MUX6"},
+
+	{"SLIM TX7 MUX", "DEC7", "ADC MUX7"},
+	{"SLIM TX7 MUX", "RX_MIX_TX7", "RX MIX TX7 MUX"},
+	{"SLIM TX7 MUX", "DEC7_192", "ADC US MUX7"},
+
+	{"SLIM TX8 MUX", "DEC8", "ADC MUX8"},
+	{"SLIM TX8 MUX", "RX_MIX_TX8", "RX MIX TX8 MUX"},
+	{"SLIM TX8 MUX", "DEC8_192", "ADC US MUX8"},
+
+	{"SLIM TX9 MUX", "DEC7", "ADC MUX7"},
+	{"SLIM TX9 MUX", "DEC7_192", "ADC US MUX7"},
+	{"SLIM TX10 MUX", "DEC6", "ADC MUX6"},
+	{"SLIM TX10 MUX", "DEC6_192", "ADC US MUX6"},
+
+	{"SLIM TX11 MUX", "DEC_0_5", "SLIM TX11 INP1 MUX"},
+	{"SLIM TX11 MUX", "DEC_9_12", "SLIM TX11 INP1 MUX"},
+	{"SLIM TX11 INP1 MUX", "DEC0", "ADC MUX0"},
+	{"SLIM TX11 INP1 MUX", "DEC1", "ADC MUX1"},
+	{"SLIM TX11 INP1 MUX", "DEC2", "ADC MUX2"},
+	{"SLIM TX11 INP1 MUX", "DEC3", "ADC MUX3"},
+	{"SLIM TX11 INP1 MUX", "DEC4", "ADC MUX4"},
+	{"SLIM TX11 INP1 MUX", "DEC5", "ADC MUX5"},
+	{"SLIM TX11 INP1 MUX", "RX_MIX_TX5", "RX MIX TX5 MUX"},
+
+	{"TX13 INP MUX", "MAD_BRDCST", "MAD_BROADCAST"},
+	{"TX13 INP MUX", "CDC_DEC_5", "SLIM TX13 MUX"},
+	{"SLIM TX13 MUX", "DEC5", "ADC MUX5"},
+
+	{"RX MIX TX0 MUX", "RX_MIX0", "RX INT0 SEC MIX"},
+	{"RX MIX TX0 MUX", "RX_MIX1", "RX INT1 SEC MIX"},
+	{"RX MIX TX0 MUX", "RX_MIX2", "RX INT2 SEC MIX"},
+	{"RX MIX TX0 MUX", "RX_MIX3", "RX INT3 SEC MIX"},
+	{"RX MIX TX0 MUX", "RX_MIX4", "RX INT4 SEC MIX"},
+	{"RX MIX TX0 MUX", "RX_MIX5", "RX INT5 SEC MIX"},
+	{"RX MIX TX0 MUX", "RX_MIX6", "RX INT6 SEC MIX"},
+	{"RX MIX TX0 MUX", "RX_MIX7", "RX INT7 SEC MIX"},
+	{"RX MIX TX0 MUX", "RX_MIX8", "RX INT8 SEC MIX"},
+	{"RX MIX TX0 MUX", "RX_MIX_VBAT5", "RX INT5 VBAT"},
+	{"RX MIX TX0 MUX", "RX_MIX_VBAT6", "RX INT6 VBAT"},
+	{"RX MIX TX0 MUX", "RX_MIX_VBAT7", "RX INT7 VBAT"},
+	{"RX MIX TX0 MUX", "RX_MIX_VBAT8", "RX INT8 VBAT"},
+
+	{"RX MIX TX1 MUX", "RX_MIX0", "RX INT0 SEC MIX"},
+	{"RX MIX TX1 MUX", "RX_MIX1", "RX INT1 SEC MIX"},
+	{"RX MIX TX1 MUX", "RX_MIX2", "RX INT2 SEC MIX"},
+	{"RX MIX TX1 MUX", "RX_MIX3", "RX INT3 SEC MIX"},
+	{"RX MIX TX1 MUX", "RX_MIX4", "RX INT4 SEC MIX"},
+	{"RX MIX TX1 MUX", "RX_MIX5", "RX INT5 SEC MIX"},
+	{"RX MIX TX1 MUX", "RX_MIX6", "RX INT6 SEC MIX"},
+	{"RX MIX TX1 MUX", "RX_MIX7", "RX INT7 SEC MIX"},
+	{"RX MIX TX1 MUX", "RX_MIX8", "RX INT8 SEC MIX"},
+	{"RX MIX TX1 MUX", "RX_MIX_VBAT5", "RX INT5 VBAT"},
+	{"RX MIX TX1 MUX", "RX_MIX_VBAT6", "RX INT6 VBAT"},
+	{"RX MIX TX1 MUX", "RX_MIX_VBAT7", "RX INT7 VBAT"},
+	{"RX MIX TX1 MUX", "RX_MIX_VBAT8", "RX INT8 VBAT"},
+
+	{"RX MIX TX2 MUX", "RX_MIX0", "RX INT0 SEC MIX"},
+	{"RX MIX TX2 MUX", "RX_MIX1", "RX INT1 SEC MIX"},
+	{"RX MIX TX2 MUX", "RX_MIX2", "RX INT2 SEC MIX"},
+	{"RX MIX TX2 MUX", "RX_MIX3", "RX INT3 SEC MIX"},
+	{"RX MIX TX2 MUX", "RX_MIX4", "RX INT4 SEC MIX"},
+	{"RX MIX TX2 MUX", "RX_MIX5", "RX INT5 SEC MIX"},
+	{"RX MIX TX2 MUX", "RX_MIX6", "RX INT6 SEC MIX"},
+	{"RX MIX TX2 MUX", "RX_MIX7", "RX INT7 SEC MIX"},
+	{"RX MIX TX2 MUX", "RX_MIX8", "RX INT8 SEC MIX"},
+	{"RX MIX TX2 MUX", "RX_MIX_VBAT5", "RX INT5 VBAT"},
+	{"RX MIX TX2 MUX", "RX_MIX_VBAT6", "RX INT6 VBAT"},
+	{"RX MIX TX2 MUX", "RX_MIX_VBAT7", "RX INT7 VBAT"},
+	{"RX MIX TX2 MUX", "RX_MIX_VBAT8", "RX INT8 VBAT"},
+
+	{"RX MIX TX3 MUX", "RX_MIX0", "RX INT0 SEC MIX"},
+	{"RX MIX TX3 MUX", "RX_MIX1", "RX INT1 SEC MIX"},
+	{"RX MIX TX3 MUX", "RX_MIX2", "RX INT2 SEC MIX"},
+	{"RX MIX TX3 MUX", "RX_MIX3", "RX INT3 SEC MIX"},
+	{"RX MIX TX3 MUX", "RX_MIX4", "RX INT4 SEC MIX"},
+	{"RX MIX TX3 MUX", "RX_MIX5", "RX INT5 SEC MIX"},
+	{"RX MIX TX3 MUX", "RX_MIX6", "RX INT6 SEC MIX"},
+	{"RX MIX TX3 MUX", "RX_MIX7", "RX INT7 SEC MIX"},
+	{"RX MIX TX3 MUX", "RX_MIX8", "RX INT8 SEC MIX"},
+	{"RX MIX TX3 MUX", "RX_MIX_VBAT5", "RX INT5 VBAT"},
+	{"RX MIX TX3 MUX", "RX_MIX_VBAT6", "RX INT6 VBAT"},
+	{"RX MIX TX3 MUX", "RX_MIX_VBAT7", "RX INT7 VBAT"},
+	{"RX MIX TX3 MUX", "RX_MIX_VBAT8", "RX INT8 VBAT"},
+
+	{"RX MIX TX4 MUX", "RX_MIX0", "RX INT0 SEC MIX"},
+	{"RX MIX TX4 MUX", "RX_MIX1", "RX INT1 SEC MIX"},
+	{"RX MIX TX4 MUX", "RX_MIX2", "RX INT2 SEC MIX"},
+	{"RX MIX TX4 MUX", "RX_MIX3", "RX INT3 SEC MIX"},
+	{"RX MIX TX4 MUX", "RX_MIX4", "RX INT4 SEC MIX"},
+	{"RX MIX TX4 MUX", "RX_MIX5", "RX INT5 SEC MIX"},
+	{"RX MIX TX4 MUX", "RX_MIX6", "RX INT6 SEC MIX"},
+	{"RX MIX TX4 MUX", "RX_MIX7", "RX INT7 SEC MIX"},
+	{"RX MIX TX4 MUX", "RX_MIX8", "RX INT8 SEC MIX"},
+	{"RX MIX TX4 MUX", "RX_MIX_VBAT5", "RX INT5 VBAT"},
+	{"RX MIX TX4 MUX", "RX_MIX_VBAT6", "RX INT6 VBAT"},
+	{"RX MIX TX4 MUX", "RX_MIX_VBAT7", "RX INT7 VBAT"},
+	{"RX MIX TX4 MUX", "RX_MIX_VBAT8", "RX INT8 VBAT"},
+
+	{"RX MIX TX5 MUX", "RX_MIX0", "RX INT0 SEC MIX"},
+	{"RX MIX TX5 MUX", "RX_MIX1", "RX INT1 SEC MIX"},
+	{"RX MIX TX5 MUX", "RX_MIX2", "RX INT2 SEC MIX"},
+	{"RX MIX TX5 MUX", "RX_MIX3", "RX INT3 SEC MIX"},
+	{"RX MIX TX5 MUX", "RX_MIX4", "RX INT4 SEC MIX"},
+	{"RX MIX TX5 MUX", "RX_MIX5", "RX INT5 SEC MIX"},
+	{"RX MIX TX5 MUX", "RX_MIX6", "RX INT6 SEC MIX"},
+	{"RX MIX TX5 MUX", "RX_MIX7", "RX INT7 SEC MIX"},
+	{"RX MIX TX5 MUX", "RX_MIX8", "RX INT8 SEC MIX"},
+	{"RX MIX TX5 MUX", "RX_MIX_VBAT5", "RX INT5 VBAT"},
+	{"RX MIX TX5 MUX", "RX_MIX_VBAT6", "RX INT6 VBAT"},
+	{"RX MIX TX5 MUX", "RX_MIX_VBAT7", "RX INT7 VBAT"},
+	{"RX MIX TX5 MUX", "RX_MIX_VBAT8", "RX INT8 VBAT"},
+
+	{"RX MIX TX6 MUX", "RX_MIX0", "RX INT0 SEC MIX"},
+	{"RX MIX TX6 MUX", "RX_MIX1", "RX INT1 SEC MIX"},
+	{"RX MIX TX6 MUX", "RX_MIX2", "RX INT2 SEC MIX"},
+	{"RX MIX TX6 MUX", "RX_MIX3", "RX INT3 SEC MIX"},
+	{"RX MIX TX6 MUX", "RX_MIX4", "RX INT4 SEC MIX"},
+	{"RX MIX TX6 MUX", "RX_MIX5", "RX INT5 SEC MIX"},
+	{"RX MIX TX6 MUX", "RX_MIX6", "RX INT6 SEC MIX"},
+	{"RX MIX TX6 MUX", "RX_MIX7", "RX INT7 SEC MIX"},
+	{"RX MIX TX6 MUX", "RX_MIX8", "RX INT8 SEC MIX"},
+	{"RX MIX TX6 MUX", "RX_MIX_VBAT5", "RX INT5 VBAT"},
+	{"RX MIX TX6 MUX", "RX_MIX_VBAT6", "RX INT6 VBAT"},
+	{"RX MIX TX6 MUX", "RX_MIX_VBAT7", "RX INT7 VBAT"},
+	{"RX MIX TX6 MUX", "RX_MIX_VBAT8", "RX INT8 VBAT"},
+
+	{"RX MIX TX7 MUX", "RX_MIX0", "RX INT0 SEC MIX"},
+	{"RX MIX TX7 MUX", "RX_MIX1", "RX INT1 SEC MIX"},
+	{"RX MIX TX7 MUX", "RX_MIX2", "RX INT2 SEC MIX"},
+	{"RX MIX TX7 MUX", "RX_MIX3", "RX INT3 SEC MIX"},
+	{"RX MIX TX7 MUX", "RX_MIX4", "RX INT4 SEC MIX"},
+	{"RX MIX TX7 MUX", "RX_MIX5", "RX INT5 SEC MIX"},
+	{"RX MIX TX7 MUX", "RX_MIX6", "RX INT6 SEC MIX"},
+	{"RX MIX TX7 MUX", "RX_MIX7", "RX INT7 SEC MIX"},
+	{"RX MIX TX7 MUX", "RX_MIX8", "RX INT8 SEC MIX"},
+	{"RX MIX TX7 MUX", "RX_MIX_VBAT5", "RX INT5 VBAT"},
+	{"RX MIX TX7 MUX", "RX_MIX_VBAT6", "RX INT6 VBAT"},
+	{"RX MIX TX7 MUX", "RX_MIX_VBAT7", "RX INT7 VBAT"},
+	{"RX MIX TX7 MUX", "RX_MIX_VBAT8", "RX INT8 VBAT"},
+
+	{"RX MIX TX8 MUX", "RX_MIX0", "RX INT0 SEC MIX"},
+	{"RX MIX TX8 MUX", "RX_MIX1", "RX INT1 SEC MIX"},
+	{"RX MIX TX8 MUX", "RX_MIX2", "RX INT2 SEC MIX"},
+	{"RX MIX TX8 MUX", "RX_MIX3", "RX INT3 SEC MIX"},
+	{"RX MIX TX8 MUX", "RX_MIX4", "RX INT4 SEC MIX"},
+	{"RX MIX TX8 MUX", "RX_MIX5", "RX INT5 SEC MIX"},
+	{"RX MIX TX8 MUX", "RX_MIX6", "RX INT6 SEC MIX"},
+	{"RX MIX TX8 MUX", "RX_MIX7", "RX INT7 SEC MIX"},
+	{"RX MIX TX8 MUX", "RX_MIX8", "RX INT8 SEC MIX"},
+	{"RX MIX TX8 MUX", "RX_MIX_VBAT5", "RX INT5 VBAT"},
+	{"RX MIX TX8 MUX", "RX_MIX_VBAT6", "RX INT6 VBAT"},
+	{"RX MIX TX8 MUX", "RX_MIX_VBAT7", "RX INT7 VBAT"},
+	{"RX MIX TX8 MUX", "RX_MIX_VBAT8", "RX INT8 VBAT"},
+
+	{"ADC US MUX0", "US_Switch", "ADC MUX0"},
+	{"ADC US MUX1", "US_Switch", "ADC MUX1"},
+	{"ADC US MUX2", "US_Switch", "ADC MUX2"},
+	{"ADC US MUX3", "US_Switch", "ADC MUX3"},
+	{"ADC US MUX4", "US_Switch", "ADC MUX4"},
+	{"ADC US MUX5", "US_Switch", "ADC MUX5"},
+	{"ADC US MUX6", "US_Switch", "ADC MUX6"},
+	{"ADC US MUX7", "US_Switch", "ADC MUX7"},
+	{"ADC US MUX8", "US_Switch", "ADC MUX8"},
+	{"ADC MUX0", "DMIC", "DMIC MUX0"},
+	{"ADC MUX0", "AMIC", "AMIC MUX0"},
+	{"ADC MUX1", "DMIC", "DMIC MUX1"},
+	{"ADC MUX1", "AMIC", "AMIC MUX1"},
+	{"ADC MUX2", "DMIC", "DMIC MUX2"},
+	{"ADC MUX2", "AMIC", "AMIC MUX2"},
+	{"ADC MUX3", "DMIC", "DMIC MUX3"},
+	{"ADC MUX3", "AMIC", "AMIC MUX3"},
+	{"ADC MUX4", "DMIC", "DMIC MUX4"},
+	{"ADC MUX4", "AMIC", "AMIC MUX4"},
+	{"ADC MUX5", "DMIC", "DMIC MUX5"},
+	{"ADC MUX5", "AMIC", "AMIC MUX5"},
+	{"ADC MUX6", "DMIC", "DMIC MUX6"},
+	{"ADC MUX6", "AMIC", "AMIC MUX6"},
+	{"ADC MUX7", "DMIC", "DMIC MUX7"},
+	{"ADC MUX7", "AMIC", "AMIC MUX7"},
+	{"ADC MUX8", "DMIC", "DMIC MUX8"},
+	{"ADC MUX8", "AMIC", "AMIC MUX8"},
+	{"ADC MUX10", "DMIC", "DMIC MUX10"},
+	{"ADC MUX10", "AMIC", "AMIC MUX10"},
+	{"ADC MUX11", "DMIC", "DMIC MUX11"},
+	{"ADC MUX11", "AMIC", "AMIC MUX11"},
+	{"ADC MUX12", "DMIC", "DMIC MUX12"},
+	{"ADC MUX12", "AMIC", "AMIC MUX12"},
+	{"ADC MUX13", "DMIC", "DMIC MUX13"},
+	{"ADC MUX13", "AMIC", "AMIC MUX13"},
+
+	{"ADC MUX0", "ANC_FB_TUNE1", "ADC MUX10"},
+	{"ADC MUX0", "ANC_FB_TUNE1", "ADC MUX11"},
+	{"ADC MUX0", "ANC_FB_TUNE2", "ADC MUX12"},
+	{"ADC MUX0", "ANC_FB_TUNE2", "ADC MUX13"},
+	{"ADC MUX1", "ANC_FB_TUNE1", "ADC MUX10"},
+	{"ADC MUX1", "ANC_FB_TUNE1", "ADC MUX11"},
+	{"ADC MUX1", "ANC_FB_TUNE2", "ADC MUX12"},
+	{"ADC MUX1", "ANC_FB_TUNE2", "ADC MUX13"},
+	{"ADC MUX2", "ANC_FB_TUNE1", "ADC MUX10"},
+	{"ADC MUX2", "ANC_FB_TUNE1", "ADC MUX11"},
+	{"ADC MUX2", "ANC_FB_TUNE2", "ADC MUX12"},
+	{"ADC MUX2", "ANC_FB_TUNE2", "ADC MUX13"},
+	{"ADC MUX3", "ANC_FB_TUNE1", "ADC MUX10"},
+	{"ADC MUX3", "ANC_FB_TUNE1", "ADC MUX11"},
+	{"ADC MUX3", "ANC_FB_TUNE2", "ADC MUX12"},
+	{"ADC MUX3", "ANC_FB_TUNE2", "ADC MUX13"},
+	{"ADC MUX4", "ANC_FB_TUNE1", "ADC MUX10"},
+	{"ADC MUX4", "ANC_FB_TUNE1", "ADC MUX11"},
+	{"ADC MUX4", "ANC_FB_TUNE2", "ADC MUX12"},
+	{"ADC MUX4", "ANC_FB_TUNE2", "ADC MUX13"},
+	{"ADC MUX5", "ANC_FB_TUNE1", "ADC MUX10"},
+	{"ADC MUX5", "ANC_FB_TUNE1", "ADC MUX11"},
+	{"ADC MUX5", "ANC_FB_TUNE2", "ADC MUX12"},
+	{"ADC MUX5", "ANC_FB_TUNE2", "ADC MUX13"},
+	{"ADC MUX6", "ANC_FB_TUNE1", "ADC MUX10"},
+	{"ADC MUX6", "ANC_FB_TUNE1", "ADC MUX11"},
+	{"ADC MUX6", "ANC_FB_TUNE2", "ADC MUX12"},
+	{"ADC MUX6", "ANC_FB_TUNE2", "ADC MUX13"},
+	{"ADC MUX7", "ANC_FB_TUNE1", "ADC MUX10"},
+	{"ADC MUX7", "ANC_FB_TUNE1", "ADC MUX11"},
+	{"ADC MUX7", "ANC_FB_TUNE2", "ADC MUX12"},
+	{"ADC MUX7", "ANC_FB_TUNE2", "ADC MUX13"},
+	{"ADC MUX8", "ANC_FB_TUNE1", "ADC MUX10"},
+	{"ADC MUX8", "ANC_FB_TUNE1", "ADC MUX11"},
+	{"ADC MUX8", "ANC_FB_TUNE2", "ADC MUX12"},
+	{"ADC MUX8", "ANC_FB_TUNE2", "ADC MUX13"},
+
+	{"DMIC MUX0", "DMIC0", "DMIC0"},
+	{"DMIC MUX0", "DMIC1", "DMIC1"},
+	{"DMIC MUX0", "DMIC2", "DMIC2"},
+	{"DMIC MUX0", "DMIC3", "DMIC3"},
+	{"DMIC MUX0", "DMIC4", "DMIC4"},
+	{"DMIC MUX0", "DMIC5", "DMIC5"},
+	{"AMIC MUX0", "ADC1", "ADC1"},
+	{"AMIC MUX0", "ADC2", "ADC2"},
+	{"AMIC MUX0", "ADC3", "ADC3"},
+	{"AMIC MUX0", "ADC4", "ADC4"},
+	{"AMIC MUX0", "ADC5", "ADC5"},
+	{"AMIC MUX0", "ADC6", "ADC6"},
+
+	{"DMIC MUX1", "DMIC0", "DMIC0"},
+	{"DMIC MUX1", "DMIC1", "DMIC1"},
+	{"DMIC MUX1", "DMIC2", "DMIC2"},
+	{"DMIC MUX1", "DMIC3", "DMIC3"},
+	{"DMIC MUX1", "DMIC4", "DMIC4"},
+	{"DMIC MUX1", "DMIC5", "DMIC5"},
+	{"AMIC MUX1", "ADC1", "ADC1"},
+	{"AMIC MUX1", "ADC2", "ADC2"},
+	{"AMIC MUX1", "ADC3", "ADC3"},
+	{"AMIC MUX1", "ADC4", "ADC4"},
+	{"AMIC MUX1", "ADC5", "ADC5"},
+	{"AMIC MUX1", "ADC6", "ADC6"},
+
+	{"DMIC MUX2", "DMIC0", "DMIC0"},
+	{"DMIC MUX2", "DMIC1", "DMIC1"},
+	{"DMIC MUX2", "DMIC2", "DMIC2"},
+	{"DMIC MUX2", "DMIC3", "DMIC3"},
+	{"DMIC MUX2", "DMIC4", "DMIC4"},
+	{"DMIC MUX2", "DMIC5", "DMIC5"},
+	{"AMIC MUX2", "ADC1", "ADC1"},
+	{"AMIC MUX2", "ADC2", "ADC2"},
+	{"AMIC MUX2", "ADC3", "ADC3"},
+	{"AMIC MUX2", "ADC4", "ADC4"},
+	{"AMIC MUX2", "ADC5", "ADC5"},
+	{"AMIC MUX2", "ADC6", "ADC6"},
+
+	{"DMIC MUX3", "DMIC0", "DMIC0"},
+	{"DMIC MUX3", "DMIC1", "DMIC1"},
+	{"DMIC MUX3", "DMIC2", "DMIC2"},
+	{"DMIC MUX3", "DMIC3", "DMIC3"},
+	{"DMIC MUX3", "DMIC4", "DMIC4"},
+	{"DMIC MUX3", "DMIC5", "DMIC5"},
+	{"AMIC MUX3", "ADC1", "ADC1"},
+	{"AMIC MUX3", "ADC2", "ADC2"},
+	{"AMIC MUX3", "ADC3", "ADC3"},
+	{"AMIC MUX3", "ADC4", "ADC4"},
+	{"AMIC MUX3", "ADC5", "ADC5"},
+	{"AMIC MUX3", "ADC6", "ADC6"},
+
+	{"DMIC MUX4", "DMIC0", "DMIC0"},
+	{"DMIC MUX4", "DMIC1", "DMIC1"},
+	{"DMIC MUX4", "DMIC2", "DMIC2"},
+	{"DMIC MUX4", "DMIC3", "DMIC3"},
+	{"DMIC MUX4", "DMIC4", "DMIC4"},
+	{"DMIC MUX4", "DMIC5", "DMIC5"},
+	{"AMIC MUX4", "ADC1", "ADC1"},
+	{"AMIC MUX4", "ADC2", "ADC2"},
+	{"AMIC MUX4", "ADC3", "ADC3"},
+	{"AMIC MUX4", "ADC4", "ADC4"},
+	{"AMIC MUX4", "ADC5", "ADC5"},
+	{"AMIC MUX4", "ADC6", "ADC6"},
+
+	{"DMIC MUX5", "DMIC0", "DMIC0"},
+	{"DMIC MUX5", "DMIC1", "DMIC1"},
+	{"DMIC MUX5", "DMIC2", "DMIC2"},
+	{"DMIC MUX5", "DMIC3", "DMIC3"},
+	{"DMIC MUX5", "DMIC4", "DMIC4"},
+	{"DMIC MUX5", "DMIC5", "DMIC5"},
+	{"AMIC MUX5", "ADC1", "ADC1"},
+	{"AMIC MUX5", "ADC2", "ADC2"},
+	{"AMIC MUX5", "ADC3", "ADC3"},
+	{"AMIC MUX5", "ADC4", "ADC4"},
+	{"AMIC MUX5", "ADC5", "ADC5"},
+	{"AMIC MUX5", "ADC6", "ADC6"},
+
+	{"DMIC MUX6", "DMIC0", "DMIC0"},
+	{"DMIC MUX6", "DMIC1", "DMIC1"},
+	{"DMIC MUX6", "DMIC2", "DMIC2"},
+	{"DMIC MUX6", "DMIC3", "DMIC3"},
+	{"DMIC MUX6", "DMIC4", "DMIC4"},
+	{"DMIC MUX6", "DMIC5", "DMIC5"},
+	{"AMIC MUX6", "ADC1", "ADC1"},
+	{"AMIC MUX6", "ADC2", "ADC2"},
+	{"AMIC MUX6", "ADC3", "ADC3"},
+	{"AMIC MUX6", "ADC4", "ADC4"},
+	{"AMIC MUX6", "ADC5", "ADC5"},
+	{"AMIC MUX6", "ADC6", "ADC6"},
+
+	{"DMIC MUX7", "DMIC0", "DMIC0"},
+	{"DMIC MUX7", "DMIC1", "DMIC1"},
+	{"DMIC MUX7", "DMIC2", "DMIC2"},
+	{"DMIC MUX7", "DMIC3", "DMIC3"},
+	{"DMIC MUX7", "DMIC4", "DMIC4"},
+	{"DMIC MUX7", "DMIC5", "DMIC5"},
+	{"AMIC MUX7", "ADC1", "ADC1"},
+	{"AMIC MUX7", "ADC2", "ADC2"},
+	{"AMIC MUX7", "ADC3", "ADC3"},
+	{"AMIC MUX7", "ADC4", "ADC4"},
+	{"AMIC MUX7", "ADC5", "ADC5"},
+	{"AMIC MUX7", "ADC6", "ADC6"},
+
+	{"DMIC MUX8", "DMIC0", "DMIC0"},
+	{"DMIC MUX8", "DMIC1", "DMIC1"},
+	{"DMIC MUX8", "DMIC2", "DMIC2"},
+	{"DMIC MUX8", "DMIC3", "DMIC3"},
+	{"DMIC MUX8", "DMIC4", "DMIC4"},
+	{"DMIC MUX8", "DMIC5", "DMIC5"},
+	{"AMIC MUX8", "ADC1", "ADC1"},
+	{"AMIC MUX8", "ADC2", "ADC2"},
+	{"AMIC MUX8", "ADC3", "ADC3"},
+	{"AMIC MUX8", "ADC4", "ADC4"},
+	{"AMIC MUX8", "ADC5", "ADC5"},
+	{"AMIC MUX8", "ADC6", "ADC6"},
+
+	{"DMIC MUX10", "DMIC0", "DMIC0"},
+	{"DMIC MUX10", "DMIC1", "DMIC1"},
+	{"DMIC MUX10", "DMIC2", "DMIC2"},
+	{"DMIC MUX10", "DMIC3", "DMIC3"},
+	{"DMIC MUX10", "DMIC4", "DMIC4"},
+	{"DMIC MUX10", "DMIC5", "DMIC5"},
+	{"AMIC MUX10", "ADC1", "ADC1"},
+	{"AMIC MUX10", "ADC2", "ADC2"},
+	{"AMIC MUX10", "ADC3", "ADC3"},
+	{"AMIC MUX10", "ADC4", "ADC4"},
+	{"AMIC MUX10", "ADC5", "ADC5"},
+	{"AMIC MUX10", "ADC6", "ADC6"},
+
+	{"DMIC MUX11", "DMIC0", "DMIC0"},
+	{"DMIC MUX11", "DMIC1", "DMIC1"},
+	{"DMIC MUX11", "DMIC2", "DMIC2"},
+	{"DMIC MUX11", "DMIC3", "DMIC3"},
+	{"DMIC MUX11", "DMIC4", "DMIC4"},
+	{"DMIC MUX11", "DMIC5", "DMIC5"},
+	{"AMIC MUX11", "ADC1", "ADC1"},
+	{"AMIC MUX11", "ADC2", "ADC2"},
+	{"AMIC MUX11", "ADC3", "ADC3"},
+	{"AMIC MUX11", "ADC4", "ADC4"},
+	{"AMIC MUX11", "ADC5", "ADC5"},
+	{"AMIC MUX11", "ADC6", "ADC6"},
+
+	{"DMIC MUX12", "DMIC0", "DMIC0"},
+	{"DMIC MUX12", "DMIC1", "DMIC1"},
+	{"DMIC MUX12", "DMIC2", "DMIC2"},
+	{"DMIC MUX12", "DMIC3", "DMIC3"},
+	{"DMIC MUX12", "DMIC4", "DMIC4"},
+	{"DMIC MUX12", "DMIC5", "DMIC5"},
+	{"AMIC MUX12", "ADC1", "ADC1"},
+	{"AMIC MUX12", "ADC2", "ADC2"},
+	{"AMIC MUX12", "ADC3", "ADC3"},
+	{"AMIC MUX12", "ADC4", "ADC4"},
+	{"AMIC MUX12", "ADC5", "ADC5"},
+	{"AMIC MUX12", "ADC6", "ADC6"},
+
+	{"DMIC MUX13", "DMIC0", "DMIC0"},
+	{"DMIC MUX13", "DMIC1", "DMIC1"},
+	{"DMIC MUX13", "DMIC2", "DMIC2"},
+	{"DMIC MUX13", "DMIC3", "DMIC3"},
+	{"DMIC MUX13", "DMIC4", "DMIC4"},
+	{"DMIC MUX13", "DMIC5", "DMIC5"},
+	{"AMIC MUX13", "ADC1", "ADC1"},
+	{"AMIC MUX13", "ADC2", "ADC2"},
+	{"AMIC MUX13", "ADC3", "ADC3"},
+	{"AMIC MUX13", "ADC4", "ADC4"},
+	{"AMIC MUX13", "ADC5", "ADC5"},
+	{"AMIC MUX13", "ADC6", "ADC6"},
+	/* ADC Connections */
+	{"ADC1", NULL, "AMIC1"},
+	{"ADC2", NULL, "AMIC2"},
+	{"ADC3", NULL, "AMIC3"},
+	{"ADC4", NULL, "AMIC4"},
+	{"ADC5", NULL, "AMIC5"},
+	{"ADC6", NULL, "AMIC6"},
+
+	{"RX INT0_1 MIX1", NULL, "RX INT0_1 MIX1 INP0"},
+	{"RX INT0_1 MIX1", NULL, "RX INT0_1 MIX1 INP1"},
+	{"RX INT0_1 MIX1", NULL, "RX INT0_1 MIX1 INP2"},
+	{"RX INT1_1 MIX1", NULL, "RX INT1_1 MIX1 INP0"},
+	{"RX INT1_1 MIX1", NULL, "RX INT1_1 MIX1 INP1"},
+	{"RX INT1_1 MIX1", NULL, "RX INT1_1 MIX1 INP2"},
+	{"RX INT2_1 MIX1", NULL, "RX INT2_1 MIX1 INP0"},
+	{"RX INT2_1 MIX1", NULL, "RX INT2_1 MIX1 INP1"},
+	{"RX INT2_1 MIX1", NULL, "RX INT2_1 MIX1 INP2"},
+	{"RX INT3_1 MIX1", NULL, "RX INT3_1 MIX1 INP0"},
+	{"RX INT3_1 MIX1", NULL, "RX INT3_1 MIX1 INP1"},
+	{"RX INT3_1 MIX1", NULL, "RX INT3_1 MIX1 INP2"},
+	{"RX INT4_1 MIX1", NULL, "RX INT4_1 MIX1 INP0"},
+	{"RX INT4_1 MIX1", NULL, "RX INT4_1 MIX1 INP1"},
+	{"RX INT4_1 MIX1", NULL, "RX INT4_1 MIX1 INP2"},
+	{"RX INT5_1 MIX1", NULL, "RX INT5_1 MIX1 INP0"},
+	{"RX INT5_1 MIX1", NULL, "RX INT5_1 MIX1 INP1"},
+	{"RX INT5_1 MIX1", NULL, "RX INT5_1 MIX1 INP2"},
+	{"RX INT6_1 MIX1", NULL, "RX INT6_1 MIX1 INP0"},
+	{"RX INT6_1 MIX1", NULL, "RX INT6_1 MIX1 INP1"},
+	{"RX INT6_1 MIX1", NULL, "RX INT6_1 MIX1 INP2"},
+	{"RX INT7_1 MIX1", NULL, "RX INT7_1 MIX1 INP0"},
+	{"RX INT7_1 MIX1", NULL, "RX INT7_1 MIX1 INP1"},
+	{"RX INT7_1 MIX1", NULL, "RX INT7_1 MIX1 INP2"},
+	{"RX INT8_1 MIX1", NULL, "RX INT8_1 MIX1 INP0"},
+	{"RX INT8_1 MIX1", NULL, "RX INT8_1 MIX1 INP1"},
+	{"RX INT8_1 MIX1", NULL, "RX INT8_1 MIX1 INP2"},
+
+	{"RX INT0 SEC MIX", NULL, "RX INT0_1 MIX1"},
+	{"RX INT0 MIX2", NULL, "RX INT0 SEC MIX"},
+	{"RX INT0 MIX2", NULL, "RX INT0 MIX2 INP"},
+	{"RX INT0 INTERP", NULL, "RX INT0 MIX2"},
+	{"RX INT0 DEM MUX", "CLSH_DSM_OUT", "RX INT0 INTERP"},
+	{"RX INT0 DAC", NULL, "RX INT0 DEM MUX"},
+	{"RX INT0 DAC", NULL, "RX_BIAS"},
+	{"EAR PA", NULL, "RX INT0 DAC"},
+	{"EAR", NULL, "EAR PA"},
+
+	{"SPL SRC0 MUX", "SRC_IN_HPHL", "RX INT1_1 MIX1"},
+	{"RX INT1 SPLINE MIX", NULL, "RX INT1_1 MIX1"},
+	{"RX INT1 SPLINE MIX", "HPHL Switch", "SPL SRC0 MUX"},
+	{"RX INT1_1 NATIVE MUX", "ON", "RX INT1_1 MIX1"},
+	{"RX INT1 SPLINE MIX", NULL, "RX INT1_1 NATIVE MUX"},
+	{"RX INT1_1 NATIVE MUX", NULL, "RX INT1 NATIVE SUPPLY"},
+	{"RX INT1 SEC MIX", NULL, "RX INT1 SPLINE MIX"},
+	{"RX INT1 MIX2", NULL, "RX INT1 SEC MIX"},
+	{"RX INT1 MIX2", NULL, "RX INT1 MIX2 INP"},
+	{"RX INT1 INTERP", NULL, "RX INT1 MIX2"},
+	{"RX INT1 DEM MUX", "CLSH_DSM_OUT", "RX INT1 INTERP"},
+	{"RX INT1 DAC", NULL, "RX INT1 DEM MUX"},
+	{"RX INT1 DAC", NULL, "RX_BIAS"},
+	{"HPHL PA", NULL, "RX INT1 DAC"},
+	{"HPHL", NULL, "HPHL PA"},
+
+	{"SPL SRC1 MUX", "SRC_IN_HPHR", "RX INT2_1 MIX1"},
+	{"RX INT2 SPLINE MIX", NULL, "RX INT2_1 MIX1"},
+	{"RX INT2 SPLINE MIX", "HPHR Switch", "SPL SRC1 MUX"},
+	{"RX INT2_1 NATIVE MUX", "ON", "RX INT2_1 MIX1"},
+	{"RX INT2 SPLINE MIX", NULL, "RX INT2_1 NATIVE MUX"},
+	{"RX INT2_1 NATIVE MUX", NULL, "RX INT2 NATIVE SUPPLY"},
+	{"RX INT2 SEC MIX", NULL, "RX INT2 SPLINE MIX"},
+	{"RX INT2 MIX2", NULL, "RX INT2 SEC MIX"},
+	{"RX INT2 MIX2", NULL, "RX INT2 MIX2 INP"},
+	{"RX INT2 INTERP", NULL, "RX INT2 MIX2"},
+	{"RX INT2 DEM MUX", "CLSH_DSM_OUT", "RX INT2 INTERP"},
+	{"RX INT2 DAC", NULL, "RX INT2 DEM MUX"},
+	{"RX INT2 DAC", NULL, "RX_BIAS"},
+	{"HPHR PA", NULL, "RX INT2 DAC"},
+	{"HPHR", NULL, "HPHR PA"},
+
+	{"SPL SRC0 MUX", "SRC_IN_LO1", "RX INT3_1 MIX1"},
+	{"RX INT3 SPLINE MIX", NULL, "RX INT3_1 MIX1"},
+	{"RX INT3 SPLINE MIX", "LO1 Switch", "SPL SRC0 MUX"},
+	{"RX INT3_1 NATIVE MUX", "ON", "RX INT3_1 MIX1"},
+	{"RX INT3 SPLINE MIX", NULL, "RX INT3_1 NATIVE MUX"},
+	{"RX INT3_1 NATIVE MUX", NULL, "RX INT3 NATIVE SUPPLY"},
+	{"RX INT3 SEC MIX", NULL, "RX INT3 SPLINE MIX"},
+	{"RX INT3 MIX2", NULL, "RX INT3 SEC MIX"},
+	{"RX INT3 MIX2", NULL, "RX INT3 MIX2 INP"},
+	{"RX INT3 INTERP", NULL, "RX INT3 MIX2"},
+	{"RX INT3 DAC", NULL, "RX INT3 INTERP"},
+	{"RX INT3 DAC", NULL, "RX_BIAS"},
+	{"LINEOUT1 PA", NULL, "RX INT3 DAC"},
+	{"LINEOUT1", NULL, "LINEOUT1 PA"},
+
+	{"SPL SRC1 MUX", "SRC_IN_LO2", "RX INT4_1 MIX1"},
+	{"RX INT4 SPLINE MIX", NULL, "RX INT4_1 MIX1"},
+	{"RX INT4 SPLINE MIX", "LO2 Switch", "SPL SRC1 MUX"},
+	{"RX INT4_1 NATIVE MUX", "ON", "RX INT4_1 MIX1"},
+	{"RX INT4 SPLINE MIX", NULL, "RX INT4_1 NATIVE MUX"},
+	{"RX INT4_1 NATIVE MUX", NULL, "RX INT4 NATIVE SUPPLY"},
+	{"RX INT4 SEC MIX", NULL, "RX INT4 SPLINE MIX"},
+	{"RX INT4 MIX2", NULL, "RX INT4 SEC MIX"},
+	{"RX INT4 MIX2", NULL, "RX INT4 MIX2 INP"},
+	{"RX INT4 INTERP", NULL, "RX INT4 MIX2"},
+	{"RX INT4 DAC", NULL, "RX INT4 INTERP"},
+	{"RX INT4 DAC", NULL, "RX_BIAS"},
+	{"LINEOUT2 PA", NULL, "RX INT4 DAC"},
+	{"LINEOUT2", NULL, "LINEOUT2 PA"},
+
+	{"SPL SRC2 MUX", "SRC_IN_LO3", "RX INT5_1 MIX1"},
+	{"RX INT5 SPLINE MIX", NULL, "RX INT5_1 MIX1"},
+	{"RX INT5 SPLINE MIX", "LO3 Switch", "SPL SRC2 MUX"},
+	{"RX INT5 SEC MIX", NULL, "RX INT5 SPLINE MIX"},
+	{"RX INT5 MIX2", NULL, "RX INT5 SEC MIX"},
+	{"RX INT5 INTERP", NULL, "RX INT5 MIX2"},
+
+	{"RX INT5 VBAT", "LO3 VBAT Enable", "RX INT5 INTERP"},
+	{"RX INT5 DAC", NULL, "RX INT5 VBAT"},
+
+	{"RX INT5 DAC", NULL, "RX INT5 INTERP"},
+	{"RX INT5 DAC", NULL, "RX_BIAS"},
+	{"LINEOUT3 PA", NULL, "RX INT5 DAC"},
+	{"LINEOUT3", NULL, "LINEOUT3 PA"},
+
+	{"SPL SRC3 MUX", "SRC_IN_LO4", "RX INT6_1 MIX1"},
+	{"RX INT6 SPLINE MIX", NULL, "RX INT6_1 MIX1"},
+	{"RX INT6 SPLINE MIX", "LO4 Switch", "SPL SRC3 MUX"},
+	{"RX INT6 SEC MIX", NULL, "RX INT6 SPLINE MIX"},
+	{"RX INT6 MIX2", NULL, "RX INT6 SEC MIX"},
+	{"RX INT6 INTERP", NULL, "RX INT6 MIX2"},
+
+	{"RX INT6 VBAT", "LO4 VBAT Enable", "RX INT6 INTERP"},
+	{"RX INT6 DAC", NULL, "RX INT6 VBAT"},
+
+	{"RX INT6 DAC", NULL, "RX INT6 INTERP"},
+	{"RX INT6 DAC", NULL, "RX_BIAS"},
+	{"LINEOUT4 PA", NULL, "RX INT6 DAC"},
+	{"LINEOUT4", NULL, "LINEOUT4 PA"},
+
+	{"SPL SRC2 MUX", "SRC_IN_SPKRL", "RX INT7_1 MIX1"},
+	{"RX INT7 SPLINE MIX", NULL, "RX INT7_1 MIX1"},
+	{"RX INT7 SPLINE MIX", "SPKRL Switch", "SPL SRC2 MUX"},
+	{"RX INT7 SEC MIX", NULL, "RX INT7 SPLINE MIX"},
+	{"RX INT7 MIX2", NULL, "RX INT7 SEC MIX"},
+	{"RX INT7 MIX2", NULL, "RX INT7 MIX2 INP"},
+
+	{"RX INT7 INTERP", NULL, "RX INT7 MIX2"},
+
+	{"RX INT7 VBAT", "SPKRL VBAT Enable", "RX INT7 INTERP"},
+	{"RX INT7 CHAIN", NULL, "RX INT7 VBAT"},
+
+	{"RX INT7 CHAIN", NULL, "RX INT7 INTERP"},
+	{"RX INT7 CHAIN", NULL, "RX_BIAS"},
+	{"SPK1 OUT", NULL, "RX INT7 CHAIN"},
+
+	{"ANC SPKR PA Enable", "Switch", "RX INT7 CHAIN"},
+	{"ANC SPK1 PA", NULL, "ANC SPKR PA Enable"},
+	{"SPK1 OUT", NULL, "ANC SPK1 PA"},
+
+	{"SPL SRC3 MUX", "SRC_IN_SPKRR", "RX INT8_1 MIX1"},
+	{"RX INT8 SPLINE MIX", NULL, "RX INT8_1 MIX1"},
+	{"RX INT8 SPLINE MIX", "SPKRR Switch", "SPL SRC3 MUX"},
+	{"RX INT8 SEC MIX", NULL, "RX INT8 SPLINE MIX"},
+	{"RX INT8 INTERP", NULL, "RX INT8 SEC MIX"},
+
+	{"RX INT8 VBAT", "SPKRR VBAT Enable", "RX INT8 INTERP"},
+	{"RX INT8 CHAIN", NULL, "RX INT8 VBAT"},
+
+	{"RX INT8 CHAIN", NULL, "RX INT8 INTERP"},
+	{"RX INT8 CHAIN", NULL, "RX_BIAS"},
+	{"SPK2 OUT", NULL, "RX INT8 CHAIN"},
+
+	{"ANC0 FB MUX", "ANC_IN_EAR", "RX INT0 MIX2"},
+	{"ANC0 FB MUX", "ANC_IN_HPHL", "RX INT1 MIX2"},
+	{"ANC0 FB MUX", "ANC_IN_LO1", "RX INT3 MIX2"},
+	{"ANC0 FB MUX", "ANC_IN_EAR_SPKR", "RX INT7 MIX2"},
+	{"ANC1 FB MUX", "ANC_IN_HPHR", "RX INT2 MIX2"},
+	{"ANC1 FB MUX", "ANC_IN_LO2", "RX INT4 MIX2"},
+
+	{"ANC HPHL Enable", "Switch", "ADC MUX10"},
+	{"ANC HPHL Enable", "Switch", "ADC MUX11"},
+	{"RX INT1 MIX2", NULL, "ANC HPHL Enable"},
+
+	{"ANC HPHR Enable", "Switch", "ADC MUX12"},
+	{"ANC HPHR Enable", "Switch", "ADC MUX13"},
+	{"RX INT2 MIX2", NULL, "ANC HPHR Enable"},
+
+	{"ANC EAR Enable", "Switch", "ADC MUX10"},
+	{"ANC EAR Enable", "Switch", "ADC MUX11"},
+	{"RX INT0 MIX2", NULL, "ANC EAR Enable"},
+
+	{"ANC OUT EAR SPKR Enable", "Switch", "ADC MUX10"},
+	{"ANC OUT EAR SPKR Enable", "Switch", "ADC MUX11"},
+	{"RX INT7 MIX2", NULL, "ANC OUT EAR SPKR Enable"},
+
+	{"ANC LINEOUT1 Enable", "Switch", "ADC MUX10"},
+	{"ANC LINEOUT1 Enable", "Switch", "ADC MUX11"},
+	{"RX INT3 MIX2", NULL, "ANC LINEOUT1 Enable"},
+
+	{"ANC LINEOUT2 Enable", "Switch", "ADC MUX12"},
+	{"ANC LINEOUT2 Enable", "Switch", "ADC MUX13"},
+	{"RX INT4 MIX2", NULL, "ANC LINEOUT2 Enable"},
+
+	{"ANC EAR PA", NULL, "RX INT0 DAC"},
+	{"ANC EAR", NULL, "ANC EAR PA"},
+	{"ANC HPHL PA", NULL, "RX INT1 DAC"},
+	{"ANC HPHL", NULL, "ANC HPHL PA"},
+	{"ANC HPHR PA", NULL, "RX INT2 DAC"},
+	{"ANC HPHR", NULL, "ANC HPHR PA"},
+	{"ANC LINEOUT1 PA", NULL, "RX INT3 DAC"},
+	{"ANC LINEOUT1", NULL, "ANC LINEOUT1 PA"},
+	{"ANC LINEOUT2 PA", NULL, "RX INT4 DAC"},
+	{"ANC LINEOUT2", NULL, "ANC LINEOUT2 PA"},
+
+	/* SLIM_MUX("AIF1_PB", "AIF1 PB"),*/
+	{"SLIM RX0 MUX", "AIF1_PB", "AIF1 PB"},
+	{"SLIM RX1 MUX", "AIF1_PB", "AIF1 PB"},
+	{"SLIM RX2 MUX", "AIF1_PB", "AIF1 PB"},
+	{"SLIM RX3 MUX", "AIF1_PB", "AIF1 PB"},
+	{"SLIM RX4 MUX", "AIF1_PB", "AIF1 PB"},
+	{"SLIM RX5 MUX", "AIF1_PB", "AIF1 PB"},
+	{"SLIM RX6 MUX", "AIF1_PB", "AIF1 PB"},
+	{"SLIM RX7 MUX", "AIF1_PB", "AIF1 PB"},
+	/* SLIM_MUX("AIF2_PB", "AIF2 PB"),*/
+	{"SLIM RX0 MUX", "AIF2_PB", "AIF2 PB"},
+	{"SLIM RX1 MUX", "AIF2_PB", "AIF2 PB"},
+	{"SLIM RX2 MUX", "AIF2_PB", "AIF2 PB"},
+	{"SLIM RX3 MUX", "AIF2_PB", "AIF2 PB"},
+	{"SLIM RX4 MUX", "AIF2_PB", "AIF2 PB"},
+	{"SLIM RX5 MUX", "AIF2_PB", "AIF2 PB"},
+	{"SLIM RX6 MUX", "AIF2_PB", "AIF2 PB"},
+	{"SLIM RX7 MUX", "AIF2_PB", "AIF2 PB"},
+	/* SLIM_MUX("AIF3_PB", "AIF3 PB"),*/
+	{"SLIM RX0 MUX", "AIF3_PB", "AIF3 PB"},
+	{"SLIM RX1 MUX", "AIF3_PB", "AIF3 PB"},
+	{"SLIM RX2 MUX", "AIF3_PB", "AIF3 PB"},
+	{"SLIM RX3 MUX", "AIF3_PB", "AIF3 PB"},
+	{"SLIM RX4 MUX", "AIF3_PB", "AIF3 PB"},
+	{"SLIM RX5 MUX", "AIF3_PB", "AIF3 PB"},
+	{"SLIM RX6 MUX", "AIF3_PB", "AIF3 PB"},
+	{"SLIM RX7 MUX", "AIF3_PB", "AIF3 PB"},
+	/* SLIM_MUX("AIF4_PB", "AIF4 PB"),*/
+	{"SLIM RX0 MUX", "AIF4_PB", "AIF4 PB"},
+	{"SLIM RX1 MUX", "AIF4_PB", "AIF4 PB"},
+	{"SLIM RX2 MUX", "AIF4_PB", "AIF4 PB"},
+	{"SLIM RX3 MUX", "AIF4_PB", "AIF4 PB"},
+	{"SLIM RX4 MUX", "AIF4_PB", "AIF4 PB"},
+	{"SLIM RX5 MUX", "AIF4_PB", "AIF4 PB"},
+	{"SLIM RX6 MUX", "AIF4_PB", "AIF4 PB"},
+	{"SLIM RX7 MUX", "AIF4_PB", "AIF4 PB"},
+
+	/* SLIM_MUX("AIF_MIX1_PB", "AIF MIX1 PB"),*/
+	{"SLIM RX0 MUX", "AIF_MIX1_PB", "AIF MIX1 PB"},
+	{"SLIM RX1 MUX", "AIF_MIX1_PB", "AIF MIX1 PB"},
+	{"SLIM RX2 MUX", "AIF_MIX1_PB", "AIF MIX1 PB"},
+	{"SLIM RX3 MUX", "AIF_MIX1_PB", "AIF MIX1 PB"},
+	{"SLIM RX4 MUX", "AIF_MIX1_PB", "AIF MIX1 PB"},
+	{"SLIM RX5 MUX", "AIF_MIX1_PB", "AIF MIX1 PB"},
+	{"SLIM RX6 MUX", "AIF_MIX1_PB", "AIF MIX1 PB"},
+	{"SLIM RX7 MUX", "AIF_MIX1_PB", "AIF MIX1 PB"},
+
+	{"SLIM RX0", NULL, "SLIM RX0 MUX"},
+	{"SLIM RX1", NULL, "SLIM RX1 MUX"},
+	{"SLIM RX2", NULL, "SLIM RX2 MUX"},
+	{"SLIM RX3", NULL, "SLIM RX3 MUX"},
+	{"SLIM RX4", NULL, "SLIM RX4 MUX"},
+	{"SLIM RX5", NULL, "SLIM RX5 MUX"},
+	{"SLIM RX6", NULL, "SLIM RX6 MUX"},
+	{"SLIM RX7", NULL, "SLIM RX7 MUX"},
+
+	{"RX INT0_1 MIX1 INP0", "RX0", "SLIM RX0"},
+	{"RX INT0_1 MIX1 INP0", "RX1", "SLIM RX1"},
+	{"RX INT0_1 MIX1 INP0", "RX2", "SLIM RX2"},
+	{"RX INT0_1 MIX1 INP0", "RX3", "SLIM RX3"},
+	{"RX INT0_1 MIX1 INP0", "RX4", "SLIM RX4"},
+	{"RX INT0_1 MIX1 INP0", "RX5", "SLIM RX5"},
+	{"RX INT0_1 MIX1 INP0", "RX6", "SLIM RX6"},
+	{"RX INT0_1 MIX1 INP0", "RX7", "SLIM RX7"},
+	{"RX INT0_1 MIX1 INP0", "IIR0", "IIR0"},
+	{"RX INT0_1 MIX1 INP0", "IIR1", "IIR1"},
+	{"RX INT0_1 MIX1 INP1", "RX0", "SLIM RX0"},
+	{"RX INT0_1 MIX1 INP1", "RX1", "SLIM RX1"},
+	{"RX INT0_1 MIX1 INP1", "RX2", "SLIM RX2"},
+	{"RX INT0_1 MIX1 INP1", "RX3", "SLIM RX3"},
+	{"RX INT0_1 MIX1 INP1", "RX4", "SLIM RX4"},
+	{"RX INT0_1 MIX1 INP1", "RX5", "SLIM RX5"},
+	{"RX INT0_1 MIX1 INP1", "RX6", "SLIM RX6"},
+	{"RX INT0_1 MIX1 INP1", "RX7", "SLIM RX7"},
+	{"RX INT0_1 MIX1 INP1", "IIR0", "IIR0"},
+	{"RX INT0_1 MIX1 INP1", "IIR1", "IIR1"},
+	{"RX INT0_1 MIX1 INP2", "RX0", "SLIM RX0"},
+	{"RX INT0_1 MIX1 INP2", "RX1", "SLIM RX1"},
+	{"RX INT0_1 MIX1 INP2", "RX2", "SLIM RX2"},
+	{"RX INT0_1 MIX1 INP2", "RX3", "SLIM RX3"},
+	{"RX INT0_1 MIX1 INP2", "RX4", "SLIM RX4"},
+	{"RX INT0_1 MIX1 INP2", "RX5", "SLIM RX5"},
+	{"RX INT0_1 MIX1 INP2", "RX6", "SLIM RX6"},
+	{"RX INT0_1 MIX1 INP2", "RX7", "SLIM RX7"},
+	{"RX INT0_1 MIX1 INP2", "IIR0", "IIR0"},
+	{"RX INT0_1 MIX1 INP2", "IIR1", "IIR1"},
+
+	/* MIXing path INT0 */
+	{"RX INT0_2 MUX", "RX0", "SLIM RX0"},
+	{"RX INT0_2 MUX", "RX1", "SLIM RX1"},
+	{"RX INT0_2 MUX", "RX2", "SLIM RX2"},
+	{"RX INT0_2 MUX", "RX3", "SLIM RX3"},
+	{"RX INT0_2 MUX", "RX4", "SLIM RX4"},
+	{"RX INT0_2 MUX", "RX5", "SLIM RX5"},
+	{"RX INT0_2 MUX", "RX6", "SLIM RX6"},
+	{"RX INT0_2 MUX", "RX7", "SLIM RX7"},
+	{"RX INT0 SEC MIX", NULL, "RX INT0_2 MUX"},
+
+	/* MIXing path INT1 */
+	{"RX INT1_2 MUX", "RX0", "SLIM RX0"},
+	{"RX INT1_2 MUX", "RX1", "SLIM RX1"},
+	{"RX INT1_2 MUX", "RX2", "SLIM RX2"},
+	{"RX INT1_2 MUX", "RX3", "SLIM RX3"},
+	{"RX INT1_2 MUX", "RX4", "SLIM RX4"},
+	{"RX INT1_2 MUX", "RX5", "SLIM RX5"},
+	{"RX INT1_2 MUX", "RX6", "SLIM RX6"},
+	{"RX INT1_2 MUX", "RX7", "SLIM RX7"},
+	{"RX INT1 SEC MIX", NULL, "RX INT1_2 MUX"},
+
+	/* MIXing path INT2 */
+	{"RX INT2_2 MUX", "RX0", "SLIM RX0"},
+	{"RX INT2_2 MUX", "RX1", "SLIM RX1"},
+	{"RX INT2_2 MUX", "RX2", "SLIM RX2"},
+	{"RX INT2_2 MUX", "RX3", "SLIM RX3"},
+	{"RX INT2_2 MUX", "RX4", "SLIM RX4"},
+	{"RX INT2_2 MUX", "RX5", "SLIM RX5"},
+	{"RX INT2_2 MUX", "RX6", "SLIM RX6"},
+	{"RX INT2_2 MUX", "RX7", "SLIM RX7"},
+	{"RX INT2 SEC MIX", NULL, "RX INT2_2 MUX"},
+
+	/* MIXing path INT3 */
+	{"RX INT3_2 MUX", "RX0", "SLIM RX0"},
+	{"RX INT3_2 MUX", "RX1", "SLIM RX1"},
+	{"RX INT3_2 MUX", "RX2", "SLIM RX2"},
+	{"RX INT3_2 MUX", "RX3", "SLIM RX3"},
+	{"RX INT3_2 MUX", "RX4", "SLIM RX4"},
+	{"RX INT3_2 MUX", "RX5", "SLIM RX5"},
+	{"RX INT3_2 MUX", "RX6", "SLIM RX6"},
+	{"RX INT3_2 MUX", "RX7", "SLIM RX7"},
+	{"RX INT3 SEC MIX", NULL, "RX INT3_2 MUX"},
+
+	/* MIXing path INT4 */
+	{"RX INT4_2 MUX", "RX0", "SLIM RX0"},
+	{"RX INT4_2 MUX", "RX1", "SLIM RX1"},
+	{"RX INT4_2 MUX", "RX2", "SLIM RX2"},
+	{"RX INT4_2 MUX", "RX3", "SLIM RX3"},
+	{"RX INT4_2 MUX", "RX4", "SLIM RX4"},
+	{"RX INT4_2 MUX", "RX5", "SLIM RX5"},
+	{"RX INT4_2 MUX", "RX6", "SLIM RX6"},
+	{"RX INT4_2 MUX", "RX7", "SLIM RX7"},
+	{"RX INT4 SEC MIX", NULL, "RX INT4_2 MUX"},
+
+	/* MIXing path INT5 */
+	{"RX INT5_2 MUX", "RX0", "SLIM RX0"},
+	{"RX INT5_2 MUX", "RX1", "SLIM RX1"},
+	{"RX INT5_2 MUX", "RX2", "SLIM RX2"},
+	{"RX INT5_2 MUX", "RX3", "SLIM RX3"},
+	{"RX INT5_2 MUX", "RX4", "SLIM RX4"},
+	{"RX INT5_2 MUX", "RX5", "SLIM RX5"},
+	{"RX INT5_2 MUX", "RX6", "SLIM RX6"},
+	{"RX INT5_2 MUX", "RX7", "SLIM RX7"},
+	{"RX INT5 SEC MIX", NULL, "RX INT5_2 MUX"},
+
+	/* MIXing path INT6 */
+	{"RX INT6_2 MUX", "RX0", "SLIM RX0"},
+	{"RX INT6_2 MUX", "RX1", "SLIM RX1"},
+	{"RX INT6_2 MUX", "RX2", "SLIM RX2"},
+	{"RX INT6_2 MUX", "RX3", "SLIM RX3"},
+	{"RX INT6_2 MUX", "RX4", "SLIM RX4"},
+	{"RX INT6_2 MUX", "RX5", "SLIM RX5"},
+	{"RX INT6_2 MUX", "RX6", "SLIM RX6"},
+	{"RX INT6_2 MUX", "RX7", "SLIM RX7"},
+	{"RX INT6 SEC MIX", NULL, "RX INT6_2 MUX"},
+
+	/* MIXing path INT7 */
+	{"RX INT7_2 MUX", "RX0", "SLIM RX0"},
+	{"RX INT7_2 MUX", "RX1", "SLIM RX1"},
+	{"RX INT7_2 MUX", "RX2", "SLIM RX2"},
+	{"RX INT7_2 MUX", "RX3", "SLIM RX3"},
+	{"RX INT7_2 MUX", "RX4", "SLIM RX4"},
+	{"RX INT7_2 MUX", "RX5", "SLIM RX5"},
+	{"RX INT7_2 MUX", "RX6", "SLIM RX6"},
+	{"RX INT7_2 MUX", "RX7", "SLIM RX7"},
+	{"RX INT7 SEC MIX", NULL, "RX INT7_2 MUX"},
+
+	/* MIXing path INT8 */
+	{"RX INT8_2 MUX", "RX0", "SLIM RX0"},
+	{"RX INT8_2 MUX", "RX1", "SLIM RX1"},
+	{"RX INT8_2 MUX", "RX2", "SLIM RX2"},
+	{"RX INT8_2 MUX", "RX3", "SLIM RX3"},
+	{"RX INT8_2 MUX", "RX4", "SLIM RX4"},
+	{"RX INT8_2 MUX", "RX5", "SLIM RX5"},
+	{"RX INT8_2 MUX", "RX6", "SLIM RX6"},
+	{"RX INT8_2 MUX", "RX7", "SLIM RX7"},
+	{"RX INT8 SEC MIX", NULL, "RX INT8_2 MUX"},
+
+	{"RX INT1_1 MIX1 INP0", "RX0", "SLIM RX0"},
+	{"RX INT1_1 MIX1 INP0", "RX1", "SLIM RX1"},
+	{"RX INT1_1 MIX1 INP0", "RX2", "SLIM RX2"},
+	{"RX INT1_1 MIX1 INP0", "RX3", "SLIM RX3"},
+	{"RX INT1_1 MIX1 INP0", "RX4", "SLIM RX4"},
+	{"RX INT1_1 MIX1 INP0", "RX5", "SLIM RX5"},
+	{"RX INT1_1 MIX1 INP0", "RX6", "SLIM RX6"},
+	{"RX INT1_1 MIX1 INP0", "RX7", "SLIM RX7"},
+	{"RX INT1_1 MIX1 INP0", "IIR0", "IIR0"},
+	{"RX INT1_1 MIX1 INP0", "IIR1", "IIR1"},
+	{"RX INT1_1 MIX1 INP1", "RX0", "SLIM RX0"},
+	{"RX INT1_1 MIX1 INP1", "RX1", "SLIM RX1"},
+	{"RX INT1_1 MIX1 INP1", "RX2", "SLIM RX2"},
+	{"RX INT1_1 MIX1 INP1", "RX3", "SLIM RX3"},
+	{"RX INT1_1 MIX1 INP1", "RX4", "SLIM RX4"},
+	{"RX INT1_1 MIX1 INP1", "RX5", "SLIM RX5"},
+	{"RX INT1_1 MIX1 INP1", "RX6", "SLIM RX6"},
+	{"RX INT1_1 MIX1 INP1", "RX7", "SLIM RX7"},
+	{"RX INT1_1 MIX1 INP1", "IIR0", "IIR0"},
+	{"RX INT1_1 MIX1 INP1", "IIR1", "IIR1"},
+	{"RX INT1_1 MIX1 INP2", "RX0", "SLIM RX0"},
+	{"RX INT1_1 MIX1 INP2", "RX1", "SLIM RX1"},
+	{"RX INT1_1 MIX1 INP2", "RX2", "SLIM RX2"},
+	{"RX INT1_1 MIX1 INP2", "RX3", "SLIM RX3"},
+	{"RX INT1_1 MIX1 INP2", "RX4", "SLIM RX4"},
+	{"RX INT1_1 MIX1 INP2", "RX5", "SLIM RX5"},
+	{"RX INT1_1 MIX1 INP2", "RX6", "SLIM RX6"},
+	{"RX INT1_1 MIX1 INP2", "RX7", "SLIM RX7"},
+	{"RX INT1_1 MIX1 INP2", "IIR0", "IIR0"},
+	{"RX INT1_1 MIX1 INP2", "IIR1", "IIR1"},
+	{"RX INT2_1 MIX1 INP0", "RX0", "SLIM RX0"},
+	{"RX INT2_1 MIX1 INP0", "RX1", "SLIM RX1"},
+	{"RX INT2_1 MIX1 INP0", "RX2", "SLIM RX2"},
+	{"RX INT2_1 MIX1 INP0", "RX3", "SLIM RX3"},
+	{"RX INT2_1 MIX1 INP0", "RX4", "SLIM RX4"},
+	{"RX INT2_1 MIX1 INP0", "RX5", "SLIM RX5"},
+	{"RX INT2_1 MIX1 INP0", "RX6", "SLIM RX6"},
+	{"RX INT2_1 MIX1 INP0", "RX7", "SLIM RX7"},
+	{"RX INT2_1 MIX1 INP0", "IIR0", "IIR0"},
+	{"RX INT2_1 MIX1 INP0", "IIR1", "IIR1"},
+	{"RX INT2_1 MIX1 INP1", "RX0", "SLIM RX0"},
+	{"RX INT2_1 MIX1 INP1", "RX1", "SLIM RX1"},
+	{"RX INT2_1 MIX1 INP1", "RX2", "SLIM RX2"},
+	{"RX INT2_1 MIX1 INP1", "RX3", "SLIM RX3"},
+	{"RX INT2_1 MIX1 INP1", "RX4", "SLIM RX4"},
+	{"RX INT2_1 MIX1 INP1", "RX5", "SLIM RX5"},
+	{"RX INT2_1 MIX1 INP1", "RX6", "SLIM RX6"},
+	{"RX INT2_1 MIX1 INP1", "RX7", "SLIM RX7"},
+	{"RX INT2_1 MIX1 INP1", "IIR0", "IIR0"},
+	{"RX INT2_1 MIX1 INP1", "IIR1", "IIR1"},
+	{"RX INT2_1 MIX1 INP2", "RX0", "SLIM RX0"},
+	{"RX INT2_1 MIX1 INP2", "RX1", "SLIM RX1"},
+	{"RX INT2_1 MIX1 INP2", "RX2", "SLIM RX2"},
+	{"RX INT2_1 MIX1 INP2", "RX3", "SLIM RX3"},
+	{"RX INT2_1 MIX1 INP2", "RX4", "SLIM RX4"},
+	{"RX INT2_1 MIX1 INP2", "RX5", "SLIM RX5"},
+	{"RX INT2_1 MIX1 INP2", "RX6", "SLIM RX6"},
+	{"RX INT2_1 MIX1 INP2", "RX7", "SLIM RX7"},
+	{"RX INT2_1 MIX1 INP2", "IIR0", "IIR0"},
+	{"RX INT2_1 MIX1 INP2", "IIR1", "IIR1"},
+
+	{"RX INT3_1 MIX1 INP0", "RX0", "SLIM RX0"},
+	{"RX INT3_1 MIX1 INP0", "RX1", "SLIM RX1"},
+	{"RX INT3_1 MIX1 INP0", "RX2", "SLIM RX2"},
+	{"RX INT3_1 MIX1 INP0", "RX3", "SLIM RX3"},
+	{"RX INT3_1 MIX1 INP0", "RX4", "SLIM RX4"},
+	{"RX INT3_1 MIX1 INP0", "RX5", "SLIM RX5"},
+	{"RX INT3_1 MIX1 INP0", "RX6", "SLIM RX6"},
+	{"RX INT3_1 MIX1 INP0", "RX7", "SLIM RX7"},
+	{"RX INT3_1 MIX1 INP0", "IIR0", "IIR0"},
+	{"RX INT3_1 MIX1 INP0", "IIR1", "IIR1"},
+	{"RX INT3_1 MIX1 INP1", "RX0", "SLIM RX0"},
+	{"RX INT3_1 MIX1 INP1", "RX1", "SLIM RX1"},
+	{"RX INT3_1 MIX1 INP1", "RX2", "SLIM RX2"},
+	{"RX INT3_1 MIX1 INP1", "RX3", "SLIM RX3"},
+	{"RX INT3_1 MIX1 INP1", "RX4", "SLIM RX4"},
+	{"RX INT3_1 MIX1 INP1", "RX5", "SLIM RX5"},
+	{"RX INT3_1 MIX1 INP1", "RX6", "SLIM RX6"},
+	{"RX INT3_1 MIX1 INP1", "RX7", "SLIM RX7"},
+	{"RX INT3_1 MIX1 INP1", "IIR0", "IIR0"},
+	{"RX INT3_1 MIX1 INP1", "IIR1", "IIR1"},
+	{"RX INT3_1 MIX1 INP2", "RX0", "SLIM RX0"},
+	{"RX INT3_1 MIX1 INP2", "RX1", "SLIM RX1"},
+	{"RX INT3_1 MIX1 INP2", "RX2", "SLIM RX2"},
+	{"RX INT3_1 MIX1 INP2", "RX3", "SLIM RX3"},
+	{"RX INT3_1 MIX1 INP2", "RX4", "SLIM RX4"},
+	{"RX INT3_1 MIX1 INP2", "RX5", "SLIM RX5"},
+	{"RX INT3_1 MIX1 INP2", "RX6", "SLIM RX6"},
+	{"RX INT3_1 MIX1 INP2", "RX7", "SLIM RX7"},
+	{"RX INT3_1 MIX1 INP2", "IIR0", "IIR0"},
+	{"RX INT3_1 MIX1 INP2", "IIR1", "IIR1"},
+
+	{"RX INT4_1 MIX1 INP0", "RX0", "SLIM RX0"},
+	{"RX INT4_1 MIX1 INP0", "RX1", "SLIM RX1"},
+	{"RX INT4_1 MIX1 INP0", "RX2", "SLIM RX2"},
+	{"RX INT4_1 MIX1 INP0", "RX3", "SLIM RX3"},
+	{"RX INT4_1 MIX1 INP0", "RX4", "SLIM RX4"},
+	{"RX INT4_1 MIX1 INP0", "RX5", "SLIM RX5"},
+	{"RX INT4_1 MIX1 INP0", "RX6", "SLIM RX6"},
+	{"RX INT4_1 MIX1 INP0", "RX7", "SLIM RX7"},
+	{"RX INT4_1 MIX1 INP0", "IIR0", "IIR0"},
+	{"RX INT4_1 MIX1 INP0", "IIR1", "IIR1"},
+	{"RX INT4_1 MIX1 INP1", "RX0", "SLIM RX0"},
+	{"RX INT4_1 MIX1 INP1", "RX1", "SLIM RX1"},
+	{"RX INT4_1 MIX1 INP1", "RX2", "SLIM RX2"},
+	{"RX INT4_1 MIX1 INP1", "RX3", "SLIM RX3"},
+	{"RX INT4_1 MIX1 INP1", "RX4", "SLIM RX4"},
+	{"RX INT4_1 MIX1 INP1", "RX5", "SLIM RX5"},
+	{"RX INT4_1 MIX1 INP1", "RX6", "SLIM RX6"},
+	{"RX INT4_1 MIX1 INP1", "RX7", "SLIM RX7"},
+	{"RX INT4_1 MIX1 INP1", "IIR0", "IIR0"},
+	{"RX INT4_1 MIX1 INP1", "IIR1", "IIR1"},
+	{"RX INT4_1 MIX1 INP2", "RX0", "SLIM RX0"},
+	{"RX INT4_1 MIX1 INP2", "RX1", "SLIM RX1"},
+	{"RX INT4_1 MIX1 INP2", "RX2", "SLIM RX2"},
+	{"RX INT4_1 MIX1 INP2", "RX3", "SLIM RX3"},
+	{"RX INT4_1 MIX1 INP2", "RX4", "SLIM RX4"},
+	{"RX INT4_1 MIX1 INP2", "RX5", "SLIM RX5"},
+	{"RX INT4_1 MIX1 INP2", "RX6", "SLIM RX6"},
+	{"RX INT4_1 MIX1 INP2", "RX7", "SLIM RX7"},
+	{"RX INT4_1 MIX1 INP2", "IIR0", "IIR0"},
+	{"RX INT4_1 MIX1 INP2", "IIR1", "IIR1"},
+
+	{"RX INT5_1 MIX1 INP0", "RX0", "SLIM RX0"},
+	{"RX INT5_1 MIX1 INP0", "RX1", "SLIM RX1"},
+	{"RX INT5_1 MIX1 INP0", "RX2", "SLIM RX2"},
+	{"RX INT5_1 MIX1 INP0", "RX3", "SLIM RX3"},
+	{"RX INT5_1 MIX1 INP0", "RX4", "SLIM RX4"},
+	{"RX INT5_1 MIX1 INP0", "RX5", "SLIM RX5"},
+	{"RX INT5_1 MIX1 INP0", "RX6", "SLIM RX6"},
+	{"RX INT5_1 MIX1 INP0", "RX7", "SLIM RX7"},
+	{"RX INT5_1 MIX1 INP0", "IIR0", "IIR0"},
+	{"RX INT5_1 MIX1 INP0", "IIR1", "IIR1"},
+	{"RX INT5_1 MIX1 INP1", "RX0", "SLIM RX0"},
+	{"RX INT5_1 MIX1 INP1", "RX1", "SLIM RX1"},
+	{"RX INT5_1 MIX1 INP1", "RX2", "SLIM RX2"},
+	{"RX INT5_1 MIX1 INP1", "RX3", "SLIM RX3"},
+	{"RX INT5_1 MIX1 INP1", "RX4", "SLIM RX4"},
+	{"RX INT5_1 MIX1 INP1", "RX5", "SLIM RX5"},
+	{"RX INT5_1 MIX1 INP1", "RX6", "SLIM RX6"},
+	{"RX INT5_1 MIX1 INP1", "RX7", "SLIM RX7"},
+	{"RX INT5_1 MIX1 INP1", "IIR0", "IIR0"},
+	{"RX INT5_1 MIX1 INP1", "IIR1", "IIR1"},
+	{"RX INT5_1 MIX1 INP2", "RX0", "SLIM RX0"},
+	{"RX INT5_1 MIX1 INP2", "RX1", "SLIM RX1"},
+	{"RX INT5_1 MIX1 INP2", "RX2", "SLIM RX2"},
+	{"RX INT5_1 MIX1 INP2", "RX3", "SLIM RX3"},
+	{"RX INT5_1 MIX1 INP2", "RX4", "SLIM RX4"},
+	{"RX INT5_1 MIX1 INP2", "RX5", "SLIM RX5"},
+	{"RX INT5_1 MIX1 INP2", "RX6", "SLIM RX6"},
+	{"RX INT5_1 MIX1 INP2", "RX7", "SLIM RX7"},
+	{"RX INT5_1 MIX1 INP2", "IIR0", "IIR0"},
+	{"RX INT5_1 MIX1 INP2", "IIR1", "IIR1"},
+
+	{"RX INT6_1 MIX1 INP0", "RX0", "SLIM RX0"},
+	{"RX INT6_1 MIX1 INP0", "RX1", "SLIM RX1"},
+	{"RX INT6_1 MIX1 INP0", "RX2", "SLIM RX2"},
+	{"RX INT6_1 MIX1 INP0", "RX3", "SLIM RX3"},
+	{"RX INT6_1 MIX1 INP0", "RX4", "SLIM RX4"},
+	{"RX INT6_1 MIX1 INP0", "RX5", "SLIM RX5"},
+	{"RX INT6_1 MIX1 INP0", "RX6", "SLIM RX6"},
+	{"RX INT6_1 MIX1 INP0", "RX7", "SLIM RX7"},
+	{"RX INT6_1 MIX1 INP0", "IIR0", "IIR0"},
+	{"RX INT6_1 MIX1 INP0", "IIR1", "IIR1"},
+	{"RX INT6_1 MIX1 INP1", "RX0", "SLIM RX0"},
+	{"RX INT6_1 MIX1 INP1", "RX1", "SLIM RX1"},
+	{"RX INT6_1 MIX1 INP1", "RX2", "SLIM RX2"},
+	{"RX INT6_1 MIX1 INP1", "RX3", "SLIM RX3"},
+	{"RX INT6_1 MIX1 INP1", "RX4", "SLIM RX4"},
+	{"RX INT6_1 MIX1 INP1", "RX5", "SLIM RX5"},
+	{"RX INT6_1 MIX1 INP1", "RX6", "SLIM RX6"},
+	{"RX INT6_1 MIX1 INP1", "RX7", "SLIM RX7"},
+	{"RX INT6_1 MIX1 INP1", "IIR0", "IIR0"},
+	{"RX INT6_1 MIX1 INP1", "IIR1", "IIR1"},
+	{"RX INT6_1 MIX1 INP2", "RX0", "SLIM RX0"},
+	{"RX INT6_1 MIX1 INP2", "RX1", "SLIM RX1"},
+	{"RX INT6_1 MIX1 INP2", "RX2", "SLIM RX2"},
+	{"RX INT6_1 MIX1 INP2", "RX3", "SLIM RX3"},
+	{"RX INT6_1 MIX1 INP2", "RX4", "SLIM RX4"},
+	{"RX INT6_1 MIX1 INP2", "RX5", "SLIM RX5"},
+	{"RX INT6_1 MIX1 INP2", "RX6", "SLIM RX6"},
+	{"RX INT6_1 MIX1 INP2", "RX7", "SLIM RX7"},
+	{"RX INT6_1 MIX1 INP2", "IIR0", "IIR0"},
+	{"RX INT6_1 MIX1 INP2", "IIR1", "IIR1"},
+
+	{"RX INT7_1 MIX1 INP0", "RX0", "SLIM RX0"},
+	{"RX INT7_1 MIX1 INP0", "RX1", "SLIM RX1"},
+	{"RX INT7_1 MIX1 INP0", "RX2", "SLIM RX2"},
+	{"RX INT7_1 MIX1 INP0", "RX3", "SLIM RX3"},
+	{"RX INT7_1 MIX1 INP0", "RX4", "SLIM RX4"},
+	{"RX INT7_1 MIX1 INP0", "RX5", "SLIM RX5"},
+	{"RX INT7_1 MIX1 INP0", "RX6", "SLIM RX6"},
+	{"RX INT7_1 MIX1 INP0", "RX7", "SLIM RX7"},
+	{"RX INT7_1 MIX1 INP0", "IIR0", "IIR0"},
+	{"RX INT7_1 MIX1 INP0", "IIR1", "IIR1"},
+	{"RX INT7_1 MIX1 INP1", "RX0", "SLIM RX0"},
+	{"RX INT7_1 MIX1 INP1", "RX1", "SLIM RX1"},
+	{"RX INT7_1 MIX1 INP1", "RX2", "SLIM RX2"},
+	{"RX INT7_1 MIX1 INP1", "RX3", "SLIM RX3"},
+	{"RX INT7_1 MIX1 INP1", "RX4", "SLIM RX4"},
+	{"RX INT7_1 MIX1 INP1", "RX5", "SLIM RX5"},
+	{"RX INT7_1 MIX1 INP1", "RX6", "SLIM RX6"},
+	{"RX INT7_1 MIX1 INP1", "RX7", "SLIM RX7"},
+	{"RX INT7_1 MIX1 INP1", "IIR0", "IIR0"},
+	{"RX INT7_1 MIX1 INP1", "IIR1", "IIR1"},
+	{"RX INT7_1 MIX1 INP2", "RX0", "SLIM RX0"},
+	{"RX INT7_1 MIX1 INP2", "RX1", "SLIM RX1"},
+	{"RX INT7_1 MIX1 INP2", "RX2", "SLIM RX2"},
+	{"RX INT7_1 MIX1 INP2", "RX3", "SLIM RX3"},
+	{"RX INT7_1 MIX1 INP2", "RX4", "SLIM RX4"},
+	{"RX INT7_1 MIX1 INP2", "RX5", "SLIM RX5"},
+	{"RX INT7_1 MIX1 INP2", "RX6", "SLIM RX6"},
+	{"RX INT7_1 MIX1 INP2", "RX7", "SLIM RX7"},
+	{"RX INT7_1 MIX1 INP2", "IIR0", "IIR0"},
+	{"RX INT7_1 MIX1 INP2", "IIR1", "IIR1"},
+
+	{"RX INT8_1 MIX1 INP0", "RX0", "SLIM RX0"},
+	{"RX INT8_1 MIX1 INP0", "RX1", "SLIM RX1"},
+	{"RX INT8_1 MIX1 INP0", "RX2", "SLIM RX2"},
+	{"RX INT8_1 MIX1 INP0", "RX3", "SLIM RX3"},
+	{"RX INT8_1 MIX1 INP0", "RX4", "SLIM RX4"},
+	{"RX INT8_1 MIX1 INP0", "RX5", "SLIM RX5"},
+	{"RX INT8_1 MIX1 INP0", "RX6", "SLIM RX6"},
+	{"RX INT8_1 MIX1 INP0", "RX7", "SLIM RX7"},
+	{"RX INT8_1 MIX1 INP0", "IIR0", "IIR0"},
+	{"RX INT8_1 MIX1 INP0", "IIR1", "IIR1"},
+	{"RX INT8_1 MIX1 INP1", "RX0", "SLIM RX0"},
+	{"RX INT8_1 MIX1 INP1", "RX1", "SLIM RX1"},
+	{"RX INT8_1 MIX1 INP1", "RX2", "SLIM RX2"},
+	{"RX INT8_1 MIX1 INP1", "RX3", "SLIM RX3"},
+	{"RX INT8_1 MIX1 INP1", "RX4", "SLIM RX4"},
+	{"RX INT8_1 MIX1 INP1", "RX5", "SLIM RX5"},
+	{"RX INT8_1 MIX1 INP1", "RX6", "SLIM RX6"},
+	{"RX INT8_1 MIX1 INP1", "RX7", "SLIM RX7"},
+	{"RX INT8_1 MIX1 INP1", "IIR0", "IIR0"},
+	{"RX INT8_1 MIX1 INP1", "IIR1", "IIR1"},
+	{"RX INT8_1 MIX1 INP2", "RX0", "SLIM RX0"},
+	{"RX INT8_1 MIX1 INP2", "RX1", "SLIM RX1"},
+	{"RX INT8_1 MIX1 INP2", "RX2", "SLIM RX2"},
+	{"RX INT8_1 MIX1 INP2", "RX3", "SLIM RX3"},
+	{"RX INT8_1 MIX1 INP2", "RX4", "SLIM RX4"},
+	{"RX INT8_1 MIX1 INP2", "RX5", "SLIM RX5"},
+	{"RX INT8_1 MIX1 INP2", "RX6", "SLIM RX6"},
+	{"RX INT8_1 MIX1 INP2", "RX7", "SLIM RX7"},
+	{"RX INT8_1 MIX1 INP2", "IIR0", "IIR0"},
+	{"RX INT8_1 MIX1 INP2", "IIR1", "IIR1"},
+
+	/* SRC0, SRC1 inputs to Sidetone RX Mixer
+	 * on RX0, RX1, RX2, RX3, RX4 and RX7 chains
+	 */
+	{"IIR0", NULL, "IIR0 INP0 MUX"},
+	{"IIR0 INP0 MUX", "DEC0", "ADC MUX0"},
+	{"IIR0 INP0 MUX", "DEC1", "ADC MUX1"},
+	{"IIR0 INP0 MUX", "DEC2", "ADC MUX2"},
+	{"IIR0 INP0 MUX", "DEC3", "ADC MUX3"},
+	{"IIR0 INP0 MUX", "DEC4", "ADC MUX4"},
+	{"IIR0 INP0 MUX", "DEC5", "ADC MUX5"},
+	{"IIR0 INP0 MUX", "DEC6", "ADC MUX6"},
+	{"IIR0 INP0 MUX", "DEC7", "ADC MUX7"},
+	{"IIR0 INP0 MUX", "DEC8", "ADC MUX8"},
+	{"IIR0 INP0 MUX", "RX0", "SLIM RX0"},
+	{"IIR0 INP0 MUX", "RX1", "SLIM RX1"},
+	{"IIR0 INP0 MUX", "RX2", "SLIM RX2"},
+	{"IIR0 INP0 MUX", "RX3", "SLIM RX3"},
+	{"IIR0 INP0 MUX", "RX4", "SLIM RX4"},
+	{"IIR0 INP0 MUX", "RX5", "SLIM RX5"},
+	{"IIR0 INP0 MUX", "RX6", "SLIM RX6"},
+	{"IIR0 INP0 MUX", "RX7", "SLIM RX7"},
+	{"IIR0", NULL, "IIR0 INP1 MUX"},
+	{"IIR0 INP1 MUX", "DEC0", "ADC MUX0"},
+	{"IIR0 INP1 MUX", "DEC1", "ADC MUX1"},
+	{"IIR0 INP1 MUX", "DEC2", "ADC MUX2"},
+	{"IIR0 INP1 MUX", "DEC3", "ADC MUX3"},
+	{"IIR0 INP1 MUX", "DEC4", "ADC MUX4"},
+	{"IIR0 INP1 MUX", "DEC5", "ADC MUX5"},
+	{"IIR0 INP1 MUX", "DEC6", "ADC MUX6"},
+	{"IIR0 INP1 MUX", "DEC7", "ADC MUX7"},
+	{"IIR0 INP1 MUX", "DEC8", "ADC MUX8"},
+	{"IIR0 INP1 MUX", "RX0", "SLIM RX0"},
+	{"IIR0 INP1 MUX", "RX1", "SLIM RX1"},
+	{"IIR0 INP1 MUX", "RX2", "SLIM RX2"},
+	{"IIR0 INP1 MUX", "RX3", "SLIM RX3"},
+	{"IIR0 INP1 MUX", "RX4", "SLIM RX4"},
+	{"IIR0 INP1 MUX", "RX5", "SLIM RX5"},
+	{"IIR0 INP1 MUX", "RX6", "SLIM RX6"},
+	{"IIR0 INP1 MUX", "RX7", "SLIM RX7"},
+	{"IIR0", NULL, "IIR0 INP2 MUX"},
+	{"IIR0 INP2 MUX", "DEC0", "ADC MUX0"},
+	{"IIR0 INP2 MUX", "DEC1", "ADC MUX1"},
+	{"IIR0 INP2 MUX", "DEC2", "ADC MUX2"},
+	{"IIR0 INP2 MUX", "DEC3", "ADC MUX3"},
+	{"IIR0 INP2 MUX", "DEC4", "ADC MUX4"},
+	{"IIR0 INP2 MUX", "DEC5", "ADC MUX5"},
+	{"IIR0 INP2 MUX", "DEC6", "ADC MUX6"},
+	{"IIR0 INP2 MUX", "DEC7", "ADC MUX7"},
+	{"IIR0 INP2 MUX", "DEC8", "ADC MUX8"},
+	{"IIR0 INP2 MUX", "RX0", "SLIM RX0"},
+	{"IIR0 INP2 MUX", "RX1", "SLIM RX1"},
+	{"IIR0 INP2 MUX", "RX2", "SLIM RX2"},
+	{"IIR0 INP2 MUX", "RX3", "SLIM RX3"},
+	{"IIR0 INP2 MUX", "RX4", "SLIM RX4"},
+	{"IIR0 INP2 MUX", "RX5", "SLIM RX5"},
+	{"IIR0 INP2 MUX", "RX6", "SLIM RX6"},
+	{"IIR0 INP2 MUX", "RX7", "SLIM RX7"},
+	{"IIR0", NULL, "IIR0 INP3 MUX"},
+	{"IIR0 INP3 MUX", "DEC0", "ADC MUX0"},
+	{"IIR0 INP3 MUX", "DEC1", "ADC MUX1"},
+	{"IIR0 INP3 MUX", "DEC2", "ADC MUX2"},
+	{"IIR0 INP3 MUX", "DEC3", "ADC MUX3"},
+	{"IIR0 INP3 MUX", "DEC4", "ADC MUX4"},
+	{"IIR0 INP3 MUX", "DEC5", "ADC MUX5"},
+	{"IIR0 INP3 MUX", "DEC6", "ADC MUX6"},
+	{"IIR0 INP3 MUX", "DEC7", "ADC MUX7"},
+	{"IIR0 INP3 MUX", "DEC8", "ADC MUX8"},
+	{"IIR0 INP3 MUX", "RX0", "SLIM RX0"},
+	{"IIR0 INP3 MUX", "RX1", "SLIM RX1"},
+	{"IIR0 INP3 MUX", "RX2", "SLIM RX2"},
+	{"IIR0 INP3 MUX", "RX3", "SLIM RX3"},
+	{"IIR0 INP3 MUX", "RX4", "SLIM RX4"},
+	{"IIR0 INP3 MUX", "RX5", "SLIM RX5"},
+	{"IIR0 INP3 MUX", "RX6", "SLIM RX6"},
+	{"IIR0 INP3 MUX", "RX7", "SLIM RX7"},
+
+	{"IIR1", NULL, "IIR1 INP0 MUX"},
+	{"IIR1 INP0 MUX", "DEC0", "ADC MUX0"},
+	{"IIR1 INP0 MUX", "DEC1", "ADC MUX1"},
+	{"IIR1 INP0 MUX", "DEC2", "ADC MUX2"},
+	{"IIR1 INP0 MUX", "DEC3", "ADC MUX3"},
+	{"IIR1 INP0 MUX", "DEC4", "ADC MUX4"},
+	{"IIR1 INP0 MUX", "DEC5", "ADC MUX5"},
+	{"IIR1 INP0 MUX", "DEC6", "ADC MUX6"},
+	{"IIR1 INP0 MUX", "DEC7", "ADC MUX7"},
+	{"IIR1 INP0 MUX", "DEC8", "ADC MUX8"},
+	{"IIR1 INP0 MUX", "RX0", "SLIM RX0"},
+	{"IIR1 INP0 MUX", "RX1", "SLIM RX1"},
+	{"IIR1 INP0 MUX", "RX2", "SLIM RX2"},
+	{"IIR1 INP0 MUX", "RX3", "SLIM RX3"},
+	{"IIR1 INP0 MUX", "RX4", "SLIM RX4"},
+	{"IIR1 INP0 MUX", "RX5", "SLIM RX5"},
+	{"IIR1 INP0 MUX", "RX6", "SLIM RX6"},
+	{"IIR1 INP0 MUX", "RX7", "SLIM RX7"},
+	{"IIR1", NULL, "IIR1 INP1 MUX"},
+	{"IIR1 INP1 MUX", "DEC0", "ADC MUX0"},
+	{"IIR1 INP1 MUX", "DEC1", "ADC MUX1"},
+	{"IIR1 INP1 MUX", "DEC2", "ADC MUX2"},
+	{"IIR1 INP1 MUX", "DEC3", "ADC MUX3"},
+	{"IIR1 INP1 MUX", "DEC4", "ADC MUX4"},
+	{"IIR1 INP1 MUX", "DEC5", "ADC MUX5"},
+	{"IIR1 INP1 MUX", "DEC6", "ADC MUX6"},
+	{"IIR1 INP1 MUX", "DEC7", "ADC MUX7"},
+	{"IIR1 INP1 MUX", "DEC8", "ADC MUX8"},
+	{"IIR1 INP1 MUX", "RX0", "SLIM RX0"},
+	{"IIR1 INP1 MUX", "RX1", "SLIM RX1"},
+	{"IIR1 INP1 MUX", "RX2", "SLIM RX2"},
+	{"IIR1 INP1 MUX", "RX3", "SLIM RX3"},
+	{"IIR1 INP1 MUX", "RX4", "SLIM RX4"},
+	{"IIR1 INP1 MUX", "RX5", "SLIM RX5"},
+	{"IIR1 INP1 MUX", "RX6", "SLIM RX6"},
+	{"IIR1 INP1 MUX", "RX7", "SLIM RX7"},
+	{"IIR1", NULL, "IIR1 INP2 MUX"},
+	{"IIR1 INP2 MUX", "DEC0", "ADC MUX0"},
+	{"IIR1 INP2 MUX", "DEC1", "ADC MUX1"},
+	{"IIR1 INP2 MUX", "DEC2", "ADC MUX2"},
+	{"IIR1 INP2 MUX", "DEC3", "ADC MUX3"},
+	{"IIR1 INP2 MUX", "DEC4", "ADC MUX4"},
+	{"IIR1 INP2 MUX", "DEC5", "ADC MUX5"},
+	{"IIR1 INP2 MUX", "DEC6", "ADC MUX6"},
+	{"IIR1 INP2 MUX", "DEC7", "ADC MUX7"},
+	{"IIR1 INP2 MUX", "DEC8", "ADC MUX8"},
+	{"IIR1 INP2 MUX", "RX0", "SLIM RX0"},
+	{"IIR1 INP2 MUX", "RX1", "SLIM RX1"},
+	{"IIR1 INP2 MUX", "RX2", "SLIM RX2"},
+	{"IIR1 INP2 MUX", "RX3", "SLIM RX3"},
+	{"IIR1 INP2 MUX", "RX4", "SLIM RX4"},
+	{"IIR1 INP2 MUX", "RX5", "SLIM RX5"},
+	{"IIR1 INP2 MUX", "RX6", "SLIM RX6"},
+	{"IIR1 INP2 MUX", "RX7", "SLIM RX7"},
+	{"IIR1", NULL, "IIR1 INP3 MUX"},
+	{"IIR1 INP3 MUX", "DEC0", "ADC MUX0"},
+	{"IIR1 INP3 MUX", "DEC1", "ADC MUX1"},
+	{"IIR1 INP3 MUX", "DEC2", "ADC MUX2"},
+	{"IIR1 INP3 MUX", "DEC3", "ADC MUX3"},
+	{"IIR1 INP3 MUX", "DEC4", "ADC MUX4"},
+	{"IIR1 INP3 MUX", "DEC5", "ADC MUX5"},
+	{"IIR1 INP3 MUX", "DEC6", "ADC MUX6"},
+	{"IIR1 INP3 MUX", "DEC7", "ADC MUX7"},
+	{"IIR1 INP3 MUX", "DEC8", "ADC MUX8"},
+	{"IIR1 INP3 MUX", "RX0", "SLIM RX0"},
+	{"IIR1 INP3 MUX", "RX1", "SLIM RX1"},
+	{"IIR1 INP3 MUX", "RX2", "SLIM RX2"},
+	{"IIR1 INP3 MUX", "RX3", "SLIM RX3"},
+	{"IIR1 INP3 MUX", "RX4", "SLIM RX4"},
+	{"IIR1 INP3 MUX", "RX5", "SLIM RX5"},
+	{"IIR1 INP3 MUX", "RX6", "SLIM RX6"},
+	{"IIR1 INP3 MUX", "RX7", "SLIM RX7"},
+
+	{"SRC0", NULL, "IIR0"},
+	{"SRC1", NULL, "IIR1"},
+	{"RX INT0 MIX2 INP", "SRC0", "SRC0"},
+	{"RX INT0 MIX2 INP", "SRC1", "SRC1"},
+	{"RX INT1 MIX2 INP", "SRC0", "SRC0"},
+	{"RX INT1 MIX2 INP", "SRC1", "SRC1"},
+	{"RX INT2 MIX2 INP", "SRC0", "SRC0"},
+	{"RX INT2 MIX2 INP", "SRC1", "SRC1"},
+	{"RX INT3 MIX2 INP", "SRC0", "SRC0"},
+	{"RX INT3 MIX2 INP", "SRC1", "SRC1"},
+	{"RX INT4 MIX2 INP", "SRC0", "SRC0"},
+	{"RX INT4 MIX2 INP", "SRC1", "SRC1"},
+	{"RX INT7 MIX2 INP", "SRC0", "SRC0"},
+	{"RX INT7 MIX2 INP", "SRC1", "SRC1"},
+};
+
+static int tasha_amic_pwr_lvl_get(struct snd_kcontrol *kcontrol,
+				 struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	u16 amic_reg;
+
+	if (!strcmp(kcontrol->id.name, "AMIC_1_2 PWR MODE"))
+		amic_reg = WCD9335_ANA_AMIC1;
+	if (!strcmp(kcontrol->id.name, "AMIC_3_4 PWR MODE"))
+		amic_reg = WCD9335_ANA_AMIC3;
+	if (!strcmp(kcontrol->id.name, "AMIC_5_6 PWR MODE"))
+		amic_reg = WCD9335_ANA_AMIC5;
+
+	ucontrol->value.integer.value[0] =
+		(snd_soc_read(codec, amic_reg) & WCD9335_AMIC_PWR_LVL_MASK) >>
+			     WCD9335_AMIC_PWR_LVL_SHIFT;
+
+	return 0;
+}
+
+static int tasha_amic_pwr_lvl_put(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	u32 mode_val;
+	u16 amic_reg;
+
+	mode_val = ucontrol->value.enumerated.item[0];
+
+	dev_dbg(codec->dev, "%s: mode: %d\n",
+		__func__, mode_val);
+
+	if (!strcmp(kcontrol->id.name, "AMIC_1_2 PWR MODE"))
+		amic_reg = WCD9335_ANA_AMIC1;
+	if (!strcmp(kcontrol->id.name, "AMIC_3_4 PWR MODE"))
+		amic_reg = WCD9335_ANA_AMIC3;
+	if (!strcmp(kcontrol->id.name, "AMIC_5_6 PWR MODE"))
+		amic_reg = WCD9335_ANA_AMIC5;
+
+	snd_soc_update_bits(codec, amic_reg, WCD9335_AMIC_PWR_LVL_MASK,
+			    mode_val << WCD9335_AMIC_PWR_LVL_SHIFT);
+
+	return 0;
+}
+
+static int tasha_rx_hph_mode_get(struct snd_kcontrol *kcontrol,
+				 struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.integer.value[0] = tasha->hph_mode;
+	return 0;
+}
+
+static int tasha_rx_hph_mode_put(struct snd_kcontrol *kcontrol,
+				 struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	u32 mode_val;
+
+	mode_val = ucontrol->value.enumerated.item[0];
+
+	dev_dbg(codec->dev, "%s: mode: %d\n",
+		__func__, mode_val);
+
+	if (mode_val == 0) {
+		dev_warn(codec->dev, "%s:Invalid HPH Mode, default to Cls-H HiFi\n",
+			__func__);
+		mode_val = CLS_H_HIFI;
+	}
+	tasha->hph_mode = mode_val;
+	return 0;
+}
+
+static const char *const tasha_conn_mad_text[] = {
+	"NOTUSED1", "ADC1", "ADC2", "ADC3", "ADC4", "ADC5", "ADC6",
+	"NOTUSED2", "DMIC0", "DMIC1", "DMIC2", "DMIC3", "DMIC4",
+	"DMIC5", "NOTUSED3", "NOTUSED4"
+};
+
+static const struct soc_enum tasha_conn_mad_enum =
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(tasha_conn_mad_text),
+			    tasha_conn_mad_text);
+
+static int tasha_enable_ldo_h_get(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	u8 val = 0;
+
+	if (codec)
+		val = snd_soc_read(codec, WCD9335_LDOH_MODE) & 0x80;
+
+	ucontrol->value.integer.value[0] = !!val;
+
+	return 0;
+}
+
+static int tasha_enable_ldo_h_put(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	int value = ucontrol->value.integer.value[0];
+	bool enable;
+
+	enable = !!value;
+	if (codec)
+		tasha_codec_enable_standalone_ldo_h(codec, enable);
+
+	return 0;
+}
+
+static int tasha_mad_input_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	u8 tasha_mad_input;
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+
+	tasha_mad_input = snd_soc_read(codec,
+				WCD9335_SOC_MAD_INP_SEL) & 0x0F;
+	ucontrol->value.integer.value[0] = tasha_mad_input;
+
+	dev_dbg(codec->dev,
+		"%s: tasha_mad_input = %s\n", __func__,
+		tasha_conn_mad_text[tasha_mad_input]);
+	return 0;
+}
+
+static int tasha_mad_input_put(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	u8 tasha_mad_input;
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct snd_soc_card *card = codec->component.card;
+	char mad_amic_input_widget[6];
+	const char *mad_input_widget;
+	const char *source_widget = NULL;
+	u32 adc, i, mic_bias_found = 0;
+	int ret = 0;
+	char *mad_input;
+
+	tasha_mad_input = ucontrol->value.integer.value[0];
+
+	if (tasha_mad_input >= ARRAY_SIZE(tasha_conn_mad_text)) {
+		dev_err(codec->dev,
+			"%s: tasha_mad_input = %d out of bounds\n",
+			__func__, tasha_mad_input);
+		return -EINVAL;
+	}
+
+	if (!strcmp(tasha_conn_mad_text[tasha_mad_input], "NOTUSED1") ||
+	    !strcmp(tasha_conn_mad_text[tasha_mad_input], "NOTUSED2") ||
+	    !strcmp(tasha_conn_mad_text[tasha_mad_input], "NOTUSED3") ||
+	    !strcmp(tasha_conn_mad_text[tasha_mad_input], "NOTUSED4")) {
+		dev_err(codec->dev,
+			"%s: Unsupported tasha_mad_input = %s\n",
+			__func__, tasha_conn_mad_text[tasha_mad_input]);
+		return -EINVAL;
+	}
+
+	if (strnstr(tasha_conn_mad_text[tasha_mad_input],
+		    "ADC", sizeof("ADC"))) {
+		mad_input = strpbrk(tasha_conn_mad_text[tasha_mad_input],
+				    "123456");
+		if (!mad_input) {
+			dev_err(codec->dev, "%s: Invalid MAD input %s\n",
+				__func__,
+				tasha_conn_mad_text[tasha_mad_input]);
+			return -EINVAL;
+		}
+		ret = kstrtouint(mad_input, 10, &adc);
+		if ((ret < 0) || (adc > 6)) {
+			dev_err(codec->dev,
+				"%s: Invalid ADC = %s\n", __func__,
+				tasha_conn_mad_text[tasha_mad_input]);
+			ret =  -EINVAL;
+		}
+
+		snprintf(mad_amic_input_widget, 6, "%s%u", "AMIC", adc);
+
+		mad_input_widget = mad_amic_input_widget;
+	} else {
+		/* DMIC type input widget*/
+		mad_input_widget = tasha_conn_mad_text[tasha_mad_input];
+	}
+
+	dev_dbg(codec->dev,
+		"%s: tasha input widget = %s\n", __func__,
+		mad_input_widget);
+
+	for (i = 0; i < card->num_of_dapm_routes; i++) {
+		if (!strcmp(card->of_dapm_routes[i].sink, mad_input_widget)) {
+			source_widget = card->of_dapm_routes[i].source;
+			if (!source_widget) {
+				dev_err(codec->dev,
+					"%s: invalid source widget\n",
+					__func__);
+				return -EINVAL;
+			}
+
+			if (strnstr(source_widget,
+				"MIC BIAS1", sizeof("MIC BIAS1"))) {
+				mic_bias_found = 1;
+				break;
+			} else if (strnstr(source_widget,
+				"MIC BIAS2", sizeof("MIC BIAS2"))) {
+				mic_bias_found = 2;
+				break;
+			} else if (strnstr(source_widget,
+				"MIC BIAS3", sizeof("MIC BIAS3"))) {
+				mic_bias_found = 3;
+				break;
+			} else if (strnstr(source_widget,
+				"MIC BIAS4", sizeof("MIC BIAS4"))) {
+				mic_bias_found = 4;
+				break;
+			}
+		}
+	}
+
+	if (!mic_bias_found) {
+		dev_err(codec->dev,
+			"%s: mic bias source not found for input = %s\n",
+			__func__, mad_input_widget);
+		return -EINVAL;
+	}
+
+	dev_dbg(codec->dev,
+		"%s: mic_bias found = %d\n", __func__,
+		mic_bias_found);
+
+	snd_soc_update_bits(codec, WCD9335_SOC_MAD_INP_SEL,
+			    0x0F, tasha_mad_input);
+	snd_soc_update_bits(codec, WCD9335_ANA_MAD_SETUP,
+			    0x07, mic_bias_found);
+
+	return 0;
+}
+
+static int tasha_pinctl_mode_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	u16 ctl_reg;
+	u8 reg_val, pinctl_position;
+
+	pinctl_position = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->shift;
+	switch (pinctl_position >> 3) {
+	case 0:
+		ctl_reg = WCD9335_TEST_DEBUG_PIN_CTL_OE_0;
+		break;
+	case 1:
+		ctl_reg = WCD9335_TEST_DEBUG_PIN_CTL_OE_1;
+		break;
+	case 2:
+		ctl_reg = WCD9335_TEST_DEBUG_PIN_CTL_OE_2;
+		break;
+	case 3:
+		ctl_reg = WCD9335_TEST_DEBUG_PIN_CTL_OE_3;
+		break;
+	default:
+		dev_err(codec->dev, "%s: Invalid pinctl position = %d\n",
+			__func__, pinctl_position);
+		return -EINVAL;
+	}
+
+	reg_val = snd_soc_read(codec, ctl_reg);
+	reg_val = (reg_val >> (pinctl_position & 0x07)) & 0x1;
+	ucontrol->value.integer.value[0] = reg_val;
+
+	return 0;
+}
+
+static int tasha_pinctl_mode_put(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	u16 ctl_reg, cfg_reg;
+	u8 ctl_val, cfg_val, pinctl_position, pinctl_mode, mask;
+
+	/* 1- high or low; 0- high Z */
+	pinctl_mode = ucontrol->value.integer.value[0];
+	pinctl_position = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->shift;
+
+	switch (pinctl_position >> 3) {
+	case 0:
+		ctl_reg = WCD9335_TEST_DEBUG_PIN_CTL_OE_0;
+		break;
+	case 1:
+		ctl_reg = WCD9335_TEST_DEBUG_PIN_CTL_OE_1;
+		break;
+	case 2:
+		ctl_reg = WCD9335_TEST_DEBUG_PIN_CTL_OE_2;
+		break;
+	case 3:
+		ctl_reg = WCD9335_TEST_DEBUG_PIN_CTL_OE_3;
+		break;
+	default:
+		dev_err(codec->dev, "%s: Invalid pinctl position = %d\n",
+			__func__, pinctl_position);
+		return -EINVAL;
+	}
+
+	ctl_val = pinctl_mode << (pinctl_position & 0x07);
+	mask = 1 << (pinctl_position & 0x07);
+	snd_soc_update_bits(codec, ctl_reg, mask, ctl_val);
+
+	cfg_reg = WCD9335_TLMM_BIST_MODE_PINCFG + pinctl_position;
+	if (!pinctl_mode) {
+		if (tasha->intf_type == WCD9XXX_INTERFACE_TYPE_SLIMBUS)
+			cfg_val = 0x4;
+		else
+			cfg_val = 0xC;
+	} else {
+		cfg_val = 0;
+	}
+	snd_soc_update_bits(codec, cfg_reg, 0x07, cfg_val);
+
+	dev_dbg(codec->dev, "%s: reg=0x%x mask=0x%x val=%d reg=0x%x val=%d\n",
+			__func__, ctl_reg, mask, ctl_val, cfg_reg, cfg_val);
+
+	return 0;
+}
+
+static void wcd_vbat_adc_out_config_2_0(struct wcd_vbat *vbat,
+					struct snd_soc_codec *codec)
+{
+	u8 val1, val2;
+
+	/*
+	 * Measure dcp1 by using "ALT" branch of band gap
+	 * voltage(Vbg) and use it in FAST mode
+	 */
+	snd_soc_update_bits(codec, WCD9335_BIAS_CTL, 0x82, 0x82);
+	snd_soc_update_bits(codec, WCD9335_CDC_VBAT_VBAT_PATH_CTL, 0x10, 0x10);
+	snd_soc_update_bits(codec, WCD9335_CDC_VBAT_VBAT_DEBUG1, 0x01, 0x01);
+	snd_soc_update_bits(codec, WCD9335_ANA_VBADC, 0x80, 0x80);
+	snd_soc_update_bits(codec, WCD9335_VBADC_SUBBLOCK_EN, 0x20, 0x00);
+
+	snd_soc_update_bits(codec, WCD9335_VBADC_FE_CTRL, 0x20, 0x20);
+	/* Wait 100 usec after calibration select as Vbg */
+	usleep_range(100, 110);
+
+	snd_soc_update_bits(codec, WCD9335_VBADC_ADC_IO, 0x40, 0x40);
+	val1 = snd_soc_read(codec, WCD9335_VBADC_ADC_DOUTMSB);
+	val2 = snd_soc_read(codec, WCD9335_VBADC_ADC_DOUTLSB);
+	snd_soc_update_bits(codec, WCD9335_VBADC_ADC_IO, 0x40, 0x00);
+
+	vbat->dcp1 = (((val1 & 0xFF) << 3) | (val2 & 0x07));
+
+	snd_soc_update_bits(codec, WCD9335_BIAS_CTL, 0x40, 0x40);
+	/* Wait 100 usec after selecting Vbg as 1.05V */
+	usleep_range(100, 110);
+
+	snd_soc_update_bits(codec, WCD9335_VBADC_ADC_IO, 0x40, 0x40);
+	val1 = snd_soc_read(codec, WCD9335_VBADC_ADC_DOUTMSB);
+	val2 = snd_soc_read(codec, WCD9335_VBADC_ADC_DOUTLSB);
+	snd_soc_update_bits(codec, WCD9335_VBADC_ADC_IO, 0x40, 0x00);
+
+	vbat->dcp2 = (((val1 & 0xFF) << 3) | (val2 & 0x07));
+
+	dev_dbg(codec->dev, "%s: dcp1:0x%x, dcp2:0x%x\n",
+		__func__, vbat->dcp1, vbat->dcp2);
+
+	snd_soc_write(codec, WCD9335_BIAS_CTL, 0x28);
+	/* Wait 100 usec after selecting Vbg as 0.85V */
+	usleep_range(100, 110);
+
+	snd_soc_update_bits(codec, WCD9335_VBADC_FE_CTRL, 0x20, 0x00);
+	snd_soc_update_bits(codec, WCD9335_VBADC_SUBBLOCK_EN, 0x20, 0x20);
+	snd_soc_update_bits(codec, WCD9335_ANA_VBADC, 0x80, 0x00);
+
+	snd_soc_update_bits(codec, WCD9335_CDC_VBAT_VBAT_PATH_CTL, 0x10, 0x00);
+	snd_soc_update_bits(codec, WCD9335_CDC_VBAT_VBAT_DEBUG1, 0x01, 0x00);
+}
+
+static void wcd_vbat_adc_out_config_1_x(struct wcd_vbat *vbat,
+					struct snd_soc_codec *codec)
+{
+	u8 val1, val2;
+
+	/*
+	 * Measure dcp1 by applying band gap voltage(Vbg)
+	 * of 0.85V
+	 */
+	snd_soc_write(codec, WCD9335_ANA_BIAS, 0x20);
+	snd_soc_write(codec, WCD9335_BIAS_CTL, 0x28);
+	snd_soc_write(codec, WCD9335_BIAS_VBG_FINE_ADJ, 0x05);
+	snd_soc_write(codec, WCD9335_ANA_BIAS, 0xA0);
+	/* Wait 2 sec after enabling band gap bias */
+	usleep_range(2000000, 2000100);
+
+	snd_soc_write(codec, WCD9335_ANA_CLK_TOP, 0x82);
+	snd_soc_write(codec, WCD9335_ANA_CLK_TOP, 0x87);
+	snd_soc_update_bits(codec, WCD9335_CDC_VBAT_VBAT_PATH_CTL, 0x10, 0x10);
+	snd_soc_write(codec, WCD9335_CDC_VBAT_VBAT_CFG, 0x0D);
+	snd_soc_write(codec, WCD9335_CDC_VBAT_VBAT_DEBUG1, 0x01);
+
+	snd_soc_write(codec, WCD9335_ANA_VBADC, 0x80);
+	snd_soc_write(codec, WCD9335_VBADC_SUBBLOCK_EN, 0xDE);
+	snd_soc_write(codec, WCD9335_VBADC_FE_CTRL, 0x3C);
+	/* Wait 1 msec after calibration select as Vbg */
+	usleep_range(1000, 1100);
+
+	snd_soc_write(codec, WCD9335_VBADC_ADC_IO, 0xC0);
+	val1 = snd_soc_read(codec, WCD9335_VBADC_ADC_DOUTMSB);
+	val2 = snd_soc_read(codec, WCD9335_VBADC_ADC_DOUTLSB);
+	snd_soc_write(codec, WCD9335_VBADC_ADC_IO, 0x80);
+
+	vbat->dcp1 = (((val1 & 0xFF) << 3) | (val2 & 0x07));
+
+	/*
+	 * Measure dcp2 by applying band gap voltage(Vbg)
+	 * of 1.05V
+	 */
+	snd_soc_write(codec, WCD9335_ANA_BIAS, 0x80);
+	snd_soc_write(codec, WCD9335_ANA_BIAS, 0xC0);
+	snd_soc_write(codec, WCD9335_BIAS_CTL, 0x68);
+	/* Wait 2 msec after selecting Vbg as 1.05V */
+	usleep_range(2000, 2100);
+
+	snd_soc_write(codec, WCD9335_ANA_BIAS, 0x80);
+	/* Wait 1 sec after enabling band gap bias */
+	usleep_range(1000000, 1000100);
+
+	snd_soc_write(codec, WCD9335_VBADC_ADC_IO, 0xC0);
+	val1 = snd_soc_read(codec, WCD9335_VBADC_ADC_DOUTMSB);
+	val2 = snd_soc_read(codec, WCD9335_VBADC_ADC_DOUTLSB);
+	snd_soc_write(codec, WCD9335_VBADC_ADC_IO, 0x80);
+
+	vbat->dcp2 = (((val1 & 0xFF) << 3) | (val2 & 0x07));
+
+	dev_dbg(codec->dev, "%s: dcp1:0x%x, dcp2:0x%x\n",
+		__func__, vbat->dcp1, vbat->dcp2);
+
+	/* Reset the Vbat ADC configuration */
+	snd_soc_write(codec, WCD9335_ANA_BIAS, 0x80);
+	snd_soc_write(codec, WCD9335_ANA_BIAS, 0xC0);
+
+	snd_soc_write(codec, WCD9335_BIAS_CTL, 0x28);
+	/* Wait 2 msec after selecting Vbg as 0.85V */
+	usleep_range(2000, 2100);
+
+	snd_soc_write(codec, WCD9335_ANA_BIAS, 0xA0);
+	/* Wait 1 sec after enabling band gap bias */
+	usleep_range(1000000, 1000100);
+
+	snd_soc_write(codec, WCD9335_VBADC_FE_CTRL, 0x1C);
+	snd_soc_write(codec, WCD9335_VBADC_SUBBLOCK_EN, 0xFE);
+	snd_soc_write(codec, WCD9335_VBADC_ADC_IO, 0x80);
+	snd_soc_write(codec, WCD9335_ANA_VBADC, 0x00);
+
+	snd_soc_write(codec, WCD9335_CDC_VBAT_VBAT_DEBUG1, 0x00);
+	snd_soc_write(codec, WCD9335_CDC_VBAT_VBAT_PATH_CTL, 0x00);
+	snd_soc_write(codec, WCD9335_CDC_VBAT_VBAT_CFG, 0x0A);
+}
+
+static void wcd_vbat_adc_out_config(struct wcd_vbat *vbat,
+				struct snd_soc_codec *codec)
+{
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+
+	if (!vbat->adc_config) {
+		tasha_cdc_mclk_enable(codec, true, false);
+
+		if (TASHA_IS_2_0(wcd9xxx))
+			wcd_vbat_adc_out_config_2_0(vbat, codec);
+		else
+			wcd_vbat_adc_out_config_1_x(vbat, codec);
+
+		tasha_cdc_mclk_enable(codec, false, false);
+		vbat->adc_config = true;
+	}
+}
+
+static int tasha_update_vbat_reg_config(struct snd_soc_codec *codec)
+{
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	struct firmware_cal *hwdep_cal = NULL;
+	struct vbat_monitor_reg *vbat_reg_ptr = NULL;
+	const void *data;
+	size_t cal_size, vbat_size_remaining;
+	int ret = 0, i;
+	u32 vbat_writes_size = 0;
+	u16 reg;
+	u8 mask, val, old_val;
+
+	hwdep_cal = wcdcal_get_fw_cal(tasha->fw_data, WCD9XXX_VBAT_CAL);
+	if (hwdep_cal) {
+		data = hwdep_cal->data;
+		cal_size = hwdep_cal->size;
+		dev_dbg(codec->dev, "%s: using hwdep calibration\n",
+			__func__);
+	} else {
+		dev_err(codec->dev, "%s: Vbat cal not received\n",
+			__func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (cal_size < sizeof(*vbat_reg_ptr)) {
+		dev_err(codec->dev,
+			"%s: Incorrect size %zd for Vbat Cal, expected %zd\n",
+			__func__, cal_size, sizeof(*vbat_reg_ptr));
+		ret = -EINVAL;
+		goto done;
+	}
+
+	vbat_reg_ptr = (struct vbat_monitor_reg *) (data);
+
+	if (!vbat_reg_ptr) {
+		dev_err(codec->dev,
+			"%s: Invalid calibration data for Vbat\n",
+			__func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	vbat_writes_size = vbat_reg_ptr->size;
+	vbat_size_remaining = cal_size - sizeof(u32);
+	dev_dbg(codec->dev, "%s: vbat_writes_sz: %d, vbat_sz_remaining: %zd\n",
+			__func__, vbat_writes_size, vbat_size_remaining);
+
+	if ((vbat_writes_size * TASHA_PACKED_REG_SIZE)
+					> vbat_size_remaining) {
+		pr_err("%s: Incorrect Vbat calibration data\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	for (i = 0 ; i < vbat_writes_size; i++) {
+		TASHA_CODEC_UNPACK_ENTRY(vbat_reg_ptr->writes[i],
+					reg, mask, val);
+		old_val = snd_soc_read(codec, reg);
+		snd_soc_write(codec, reg, (old_val & ~mask) | (val & mask));
+	}
+
+done:
+	return ret;
+}
+
+static int tasha_vbat_adc_data_get(struct snd_kcontrol *kcontrol,
+		struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	wcd_vbat_adc_out_config(&tasha->vbat, codec);
+
+	ucontrol->value.integer.value[0] = tasha->vbat.dcp1;
+	ucontrol->value.integer.value[1] = tasha->vbat.dcp2;
+
+	dev_dbg(codec->dev,
+		"%s: Vbat ADC output values, Dcp1 : %lu, Dcp2: %lu\n",
+		__func__, ucontrol->value.integer.value[0],
+		ucontrol->value.integer.value[1]);
+
+	return 0;
+}
+
+static const char * const tasha_vbat_gsm_mode_text[] = {
+	"OFF", "ON"};
+
+static const struct soc_enum tasha_vbat_gsm_mode_enum =
+	SOC_ENUM_SINGLE_EXT(2, tasha_vbat_gsm_mode_text);
+
+static int tasha_vbat_gsm_mode_func_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+
+	ucontrol->value.integer.value[0] =
+		((snd_soc_read(codec, WCD9335_CDC_VBAT_VBAT_CFG) & 0x04) ?
+		  1 : 0);
+
+	dev_dbg(codec->dev, "%s: value: %lu\n", __func__,
+		ucontrol->value.integer.value[0]);
+
+	return 0;
+}
+
+static int tasha_vbat_gsm_mode_func_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+
+	dev_dbg(codec->dev, "%s: value: %lu\n", __func__,
+		ucontrol->value.integer.value[0]);
+
+	/* Set Vbat register configuration for GSM mode bit based on value */
+	if (ucontrol->value.integer.value[0])
+		snd_soc_update_bits(codec, WCD9335_CDC_VBAT_VBAT_CFG,
+						0x04, 0x04);
+	else
+		snd_soc_update_bits(codec, WCD9335_CDC_VBAT_VBAT_CFG,
+						0x04, 0x00);
+
+	return 0;
+}
+
+static int tasha_codec_vbat_enable_event(struct snd_soc_dapm_widget *w,
+				struct snd_kcontrol *kcontrol,
+				int event)
+{
+	int ret = 0;
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	u16 vbat_path_ctl, vbat_cfg, vbat_path_cfg;
+
+	vbat_path_ctl = WCD9335_CDC_VBAT_VBAT_PATH_CTL;
+	vbat_cfg = WCD9335_CDC_VBAT_VBAT_CFG;
+	vbat_path_cfg = WCD9335_CDC_RX8_RX_PATH_CFG1;
+
+	if (!strcmp(w->name, "RX INT8 VBAT"))
+		vbat_path_cfg = WCD9335_CDC_RX8_RX_PATH_CFG1;
+	else if (!strcmp(w->name, "RX INT7 VBAT"))
+		vbat_path_cfg = WCD9335_CDC_RX7_RX_PATH_CFG1;
+	else if (!strcmp(w->name, "RX INT6 VBAT"))
+		vbat_path_cfg = WCD9335_CDC_RX6_RX_PATH_CFG1;
+	else if (!strcmp(w->name, "RX INT5 VBAT"))
+		vbat_path_cfg = WCD9335_CDC_RX5_RX_PATH_CFG1;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		ret = tasha_update_vbat_reg_config(codec);
+		if (ret) {
+			dev_dbg(codec->dev,
+				"%s : VBAT isn't calibrated, So not enabling it\n",
+				__func__);
+			return 0;
+		}
+		snd_soc_write(codec, WCD9335_ANA_VBADC, 0x80);
+		snd_soc_update_bits(codec, vbat_path_cfg, 0x02, 0x02);
+		snd_soc_update_bits(codec, vbat_path_ctl, 0x10, 0x10);
+		snd_soc_update_bits(codec, vbat_cfg, 0x01, 0x01);
+		tasha->vbat.is_enabled = true;
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		if (tasha->vbat.is_enabled) {
+			snd_soc_update_bits(codec, vbat_cfg, 0x01, 0x00);
+			snd_soc_update_bits(codec, vbat_path_ctl, 0x10, 0x00);
+			snd_soc_update_bits(codec, vbat_path_cfg, 0x02, 0x00);
+			snd_soc_write(codec, WCD9335_ANA_VBADC, 0x00);
+			tasha->vbat.is_enabled = false;
+		}
+		break;
+	};
+
+	return ret;
+}
+
+static const char * const rx_hph_mode_mux_text[] = {
+	"CLS_H_INVALID", "CLS_H_HIFI", "CLS_H_LP", "CLS_AB", "CLS_H_LOHIFI"
+};
+
+static const struct soc_enum rx_hph_mode_mux_enum =
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(rx_hph_mode_mux_text),
+			    rx_hph_mode_mux_text);
+
+static const char * const amic_pwr_lvl_text[] = {
+	"LOW_PWR", "DEFAULT", "HIGH_PERF"
+};
+
+static const struct soc_enum amic_pwr_lvl_enum =
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(amic_pwr_lvl_text),
+			    amic_pwr_lvl_text);
+
+static const struct snd_kcontrol_new tasha_snd_controls[] = {
+	SOC_SINGLE_SX_TLV("RX0 Digital Volume", WCD9335_CDC_RX0_RX_VOL_CTL,
+		0, -84, 40, digital_gain), /* -84dB min - 40dB max */
+	SOC_SINGLE_SX_TLV("RX1 Digital Volume", WCD9335_CDC_RX1_RX_VOL_CTL,
+		0, -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("RX2 Digital Volume", WCD9335_CDC_RX2_RX_VOL_CTL,
+		0, -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("RX3 Digital Volume", WCD9335_CDC_RX3_RX_VOL_CTL,
+		0, -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("RX4 Digital Volume", WCD9335_CDC_RX4_RX_VOL_CTL,
+		0, -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("RX5 Digital Volume", WCD9335_CDC_RX5_RX_VOL_CTL,
+		0, -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("RX6 Digital Volume", WCD9335_CDC_RX6_RX_VOL_CTL,
+		0, -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("RX7 Digital Volume", WCD9335_CDC_RX7_RX_VOL_CTL,
+		0, -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("RX8 Digital Volume", WCD9335_CDC_RX8_RX_VOL_CTL,
+		0, -84, 40, digital_gain),
+
+	SOC_SINGLE_SX_TLV("RX0 Mix Digital Volume",
+			  WCD9335_CDC_RX0_RX_VOL_MIX_CTL,
+			  0, -84, 40, digital_gain), /* -84dB min - 40dB max */
+	SOC_SINGLE_SX_TLV("RX1 Mix Digital Volume",
+			  WCD9335_CDC_RX1_RX_VOL_MIX_CTL,
+			  0, -84, 40, digital_gain), /* -84dB min - 40dB max */
+	SOC_SINGLE_SX_TLV("RX2 Mix Digital Volume",
+			  WCD9335_CDC_RX2_RX_VOL_MIX_CTL,
+			  0, -84, 40, digital_gain), /* -84dB min - 40dB max */
+	SOC_SINGLE_SX_TLV("RX3 Mix Digital Volume",
+			  WCD9335_CDC_RX3_RX_VOL_MIX_CTL,
+			  0, -84, 40, digital_gain), /* -84dB min - 40dB max */
+	SOC_SINGLE_SX_TLV("RX4 Mix Digital Volume",
+			  WCD9335_CDC_RX4_RX_VOL_MIX_CTL,
+			  0, -84, 40, digital_gain), /* -84dB min - 40dB max */
+	SOC_SINGLE_SX_TLV("RX5 Mix Digital Volume",
+			  WCD9335_CDC_RX5_RX_VOL_MIX_CTL,
+			  0, -84, 40, digital_gain), /* -84dB min - 40dB max */
+	SOC_SINGLE_SX_TLV("RX6 Mix Digital Volume",
+			  WCD9335_CDC_RX6_RX_VOL_MIX_CTL,
+			  0, -84, 40, digital_gain), /* -84dB min - 40dB max */
+	SOC_SINGLE_SX_TLV("RX7 Mix Digital Volume",
+			  WCD9335_CDC_RX7_RX_VOL_MIX_CTL,
+			  0, -84, 40, digital_gain), /* -84dB min - 40dB max */
+	SOC_SINGLE_SX_TLV("RX8 Mix Digital Volume",
+			  WCD9335_CDC_RX8_RX_VOL_MIX_CTL,
+			  0, -84, 40, digital_gain), /* -84dB min - 40dB max */
+
+	SOC_SINGLE_SX_TLV("DEC0 Volume", WCD9335_CDC_TX0_TX_VOL_CTL, 0,
+					  -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("DEC1 Volume", WCD9335_CDC_TX1_TX_VOL_CTL, 0,
+					  -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("DEC2 Volume", WCD9335_CDC_TX2_TX_VOL_CTL, 0,
+					  -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("DEC3 Volume", WCD9335_CDC_TX3_TX_VOL_CTL, 0,
+					  -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("DEC4 Volume", WCD9335_CDC_TX4_TX_VOL_CTL, 0,
+					  -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("DEC5 Volume", WCD9335_CDC_TX5_TX_VOL_CTL, 0,
+					  -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("DEC6 Volume", WCD9335_CDC_TX6_TX_VOL_CTL, 0,
+					  -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("DEC7 Volume", WCD9335_CDC_TX7_TX_VOL_CTL, 0,
+					  -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("DEC8 Volume", WCD9335_CDC_TX8_TX_VOL_CTL, 0,
+					  -84, 40, digital_gain),
+
+	SOC_SINGLE_SX_TLV("IIR0 INP0 Volume",
+			  WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B1_CTL, 0, -84,
+			  40, digital_gain),
+	SOC_SINGLE_SX_TLV("IIR0 INP1 Volume",
+			  WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B2_CTL, 0, -84,
+			  40, digital_gain),
+	SOC_SINGLE_SX_TLV("IIR0 INP2 Volume",
+			  WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B3_CTL, 0, -84,
+			  40, digital_gain),
+	SOC_SINGLE_SX_TLV("IIR0 INP3 Volume",
+			  WCD9335_CDC_SIDETONE_IIR0_IIR_GAIN_B4_CTL, 0, -84,
+			  40, digital_gain),
+	SOC_SINGLE_SX_TLV("IIR1 INP0 Volume",
+			  WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B1_CTL, 0, -84,
+			  40, digital_gain),
+	SOC_SINGLE_SX_TLV("IIR1 INP1 Volume",
+			  WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B2_CTL, 0, -84,
+			  40, digital_gain),
+	SOC_SINGLE_SX_TLV("IIR1 INP2 Volume",
+			  WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B3_CTL, 0, -84,
+			  40, digital_gain),
+	SOC_SINGLE_SX_TLV("IIR1 INP3 Volume",
+			  WCD9335_CDC_SIDETONE_IIR1_IIR_GAIN_B4_CTL, 0, -84,
+			  40, digital_gain),
+
+	SOC_SINGLE_EXT("ANC Slot", SND_SOC_NOPM, 0, 100, 0, tasha_get_anc_slot,
+		       tasha_put_anc_slot),
+	SOC_ENUM_EXT("ANC Function", tasha_anc_func_enum, tasha_get_anc_func,
+		     tasha_put_anc_func),
+
+	SOC_ENUM_EXT("CLK MODE", tasha_clkmode_enum, tasha_get_clkmode,
+		     tasha_put_clkmode),
+
+	SOC_ENUM("TX0 HPF cut off", cf_dec0_enum),
+	SOC_ENUM("TX1 HPF cut off", cf_dec1_enum),
+	SOC_ENUM("TX2 HPF cut off", cf_dec2_enum),
+	SOC_ENUM("TX3 HPF cut off", cf_dec3_enum),
+	SOC_ENUM("TX4 HPF cut off", cf_dec4_enum),
+	SOC_ENUM("TX5 HPF cut off", cf_dec5_enum),
+	SOC_ENUM("TX6 HPF cut off", cf_dec6_enum),
+	SOC_ENUM("TX7 HPF cut off", cf_dec7_enum),
+	SOC_ENUM("TX8 HPF cut off", cf_dec8_enum),
+
+	SOC_ENUM("RX INT0_1 HPF cut off", cf_int0_1_enum),
+	SOC_ENUM("RX INT0_2 HPF cut off", cf_int0_2_enum),
+	SOC_ENUM("RX INT1_1 HPF cut off", cf_int1_1_enum),
+	SOC_ENUM("RX INT1_2 HPF cut off", cf_int1_2_enum),
+	SOC_ENUM("RX INT2_1 HPF cut off", cf_int2_1_enum),
+	SOC_ENUM("RX INT2_2 HPF cut off", cf_int2_2_enum),
+	SOC_ENUM("RX INT3_1 HPF cut off", cf_int3_1_enum),
+	SOC_ENUM("RX INT3_2 HPF cut off", cf_int3_2_enum),
+	SOC_ENUM("RX INT4_1 HPF cut off", cf_int4_1_enum),
+	SOC_ENUM("RX INT4_2 HPF cut off", cf_int4_2_enum),
+	SOC_ENUM("RX INT5_1 HPF cut off", cf_int5_1_enum),
+	SOC_ENUM("RX INT5_2 HPF cut off", cf_int5_2_enum),
+	SOC_ENUM("RX INT6_1 HPF cut off", cf_int6_1_enum),
+	SOC_ENUM("RX INT6_2 HPF cut off", cf_int6_2_enum),
+	SOC_ENUM("RX INT7_1 HPF cut off", cf_int7_1_enum),
+	SOC_ENUM("RX INT7_2 HPF cut off", cf_int7_2_enum),
+	SOC_ENUM("RX INT8_1 HPF cut off", cf_int8_1_enum),
+	SOC_ENUM("RX INT8_2 HPF cut off", cf_int8_2_enum),
+
+	SOC_SINGLE_EXT("IIR0 Enable Band1", IIR0, BAND1, 1, 0,
+	tasha_get_iir_enable_audio_mixer, tasha_put_iir_enable_audio_mixer),
+	SOC_SINGLE_EXT("IIR0 Enable Band2", IIR0, BAND2, 1, 0,
+	tasha_get_iir_enable_audio_mixer, tasha_put_iir_enable_audio_mixer),
+	SOC_SINGLE_EXT("IIR0 Enable Band3", IIR0, BAND3, 1, 0,
+	tasha_get_iir_enable_audio_mixer, tasha_put_iir_enable_audio_mixer),
+	SOC_SINGLE_EXT("IIR0 Enable Band4", IIR0, BAND4, 1, 0,
+	tasha_get_iir_enable_audio_mixer, tasha_put_iir_enable_audio_mixer),
+	SOC_SINGLE_EXT("IIR0 Enable Band5", IIR0, BAND5, 1, 0,
+	tasha_get_iir_enable_audio_mixer, tasha_put_iir_enable_audio_mixer),
+	SOC_SINGLE_EXT("IIR1 Enable Band1", IIR1, BAND1, 1, 0,
+	tasha_get_iir_enable_audio_mixer, tasha_put_iir_enable_audio_mixer),
+	SOC_SINGLE_EXT("IIR1 Enable Band2", IIR1, BAND2, 1, 0,
+	tasha_get_iir_enable_audio_mixer, tasha_put_iir_enable_audio_mixer),
+	SOC_SINGLE_EXT("IIR1 Enable Band3", IIR1, BAND3, 1, 0,
+	tasha_get_iir_enable_audio_mixer, tasha_put_iir_enable_audio_mixer),
+	SOC_SINGLE_EXT("IIR1 Enable Band4", IIR1, BAND4, 1, 0,
+	tasha_get_iir_enable_audio_mixer, tasha_put_iir_enable_audio_mixer),
+	SOC_SINGLE_EXT("IIR1 Enable Band5", IIR1, BAND5, 1, 0,
+	tasha_get_iir_enable_audio_mixer, tasha_put_iir_enable_audio_mixer),
+
+	SOC_SINGLE_MULTI_EXT("IIR0 Band1", IIR0, BAND1, 255, 0, 5,
+	tasha_get_iir_band_audio_mixer, tasha_put_iir_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("IIR0 Band2", IIR0, BAND2, 255, 0, 5,
+	tasha_get_iir_band_audio_mixer, tasha_put_iir_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("IIR0 Band3", IIR0, BAND3, 255, 0, 5,
+	tasha_get_iir_band_audio_mixer, tasha_put_iir_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("IIR0 Band4", IIR0, BAND4, 255, 0, 5,
+	tasha_get_iir_band_audio_mixer, tasha_put_iir_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("IIR0 Band5", IIR0, BAND5, 255, 0, 5,
+	tasha_get_iir_band_audio_mixer, tasha_put_iir_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("IIR1 Band1", IIR1, BAND1, 255, 0, 5,
+	tasha_get_iir_band_audio_mixer, tasha_put_iir_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("IIR1 Band2", IIR1, BAND2, 255, 0, 5,
+	tasha_get_iir_band_audio_mixer, tasha_put_iir_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("IIR1 Band3", IIR1, BAND3, 255, 0, 5,
+	tasha_get_iir_band_audio_mixer, tasha_put_iir_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("IIR1 Band4", IIR1, BAND4, 255, 0, 5,
+	tasha_get_iir_band_audio_mixer, tasha_put_iir_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("IIR1 Band5", IIR1, BAND5, 255, 0, 5,
+	tasha_get_iir_band_audio_mixer, tasha_put_iir_band_audio_mixer),
+
+	SOC_SINGLE_EXT("COMP1 Switch", SND_SOC_NOPM, COMPANDER_1, 1, 0,
+		       tasha_get_compander, tasha_set_compander),
+	SOC_SINGLE_EXT("COMP2 Switch", SND_SOC_NOPM, COMPANDER_2, 1, 0,
+		       tasha_get_compander, tasha_set_compander),
+	SOC_SINGLE_EXT("COMP3 Switch", SND_SOC_NOPM, COMPANDER_3, 1, 0,
+		       tasha_get_compander, tasha_set_compander),
+	SOC_SINGLE_EXT("COMP4 Switch", SND_SOC_NOPM, COMPANDER_4, 1, 0,
+		       tasha_get_compander, tasha_set_compander),
+	SOC_SINGLE_EXT("COMP5 Switch", SND_SOC_NOPM, COMPANDER_5, 1, 0,
+		       tasha_get_compander, tasha_set_compander),
+	SOC_SINGLE_EXT("COMP6 Switch", SND_SOC_NOPM, COMPANDER_6, 1, 0,
+		       tasha_get_compander, tasha_set_compander),
+	SOC_SINGLE_EXT("COMP7 Switch", SND_SOC_NOPM, COMPANDER_7, 1, 0,
+		       tasha_get_compander, tasha_set_compander),
+	SOC_SINGLE_EXT("COMP8 Switch", SND_SOC_NOPM, COMPANDER_8, 1, 0,
+		       tasha_get_compander, tasha_set_compander),
+
+	SOC_ENUM_EXT("RX HPH Mode", rx_hph_mode_mux_enum,
+		       tasha_rx_hph_mode_get, tasha_rx_hph_mode_put),
+
+	SOC_ENUM_EXT("MAD Input", tasha_conn_mad_enum,
+		     tasha_mad_input_get, tasha_mad_input_put),
+	SOC_SINGLE_EXT("LDO_H Enable", SND_SOC_NOPM, 0, 1, 0,
+			tasha_enable_ldo_h_get, tasha_enable_ldo_h_put),
+
+	SOC_SINGLE_EXT("DMIC1_CLK_PIN_MODE", SND_SOC_NOPM, 17, 1, 0,
+		       tasha_pinctl_mode_get, tasha_pinctl_mode_put),
+
+	SOC_SINGLE_EXT("DMIC1_DATA_PIN_MODE", SND_SOC_NOPM, 18, 1, 0,
+		       tasha_pinctl_mode_get, tasha_pinctl_mode_put),
+
+	SOC_SINGLE_EXT("DMIC2_CLK_PIN_MODE", SND_SOC_NOPM, 19, 1, 0,
+		       tasha_pinctl_mode_get, tasha_pinctl_mode_put),
+
+	SOC_SINGLE_EXT("DMIC2_DATA_PIN_MODE", SND_SOC_NOPM, 20, 1, 0,
+		       tasha_pinctl_mode_get, tasha_pinctl_mode_put),
+
+	SOC_SINGLE_EXT("DMIC3_CLK_PIN_MODE", SND_SOC_NOPM, 21, 1, 0,
+		       tasha_pinctl_mode_get, tasha_pinctl_mode_put),
+
+	SOC_SINGLE_EXT("DMIC3_DATA_PIN_MODE", SND_SOC_NOPM, 22, 1, 0,
+		       tasha_pinctl_mode_get, tasha_pinctl_mode_put),
+	SOC_ENUM_EXT("AMIC_1_2 PWR MODE", amic_pwr_lvl_enum,
+		       tasha_amic_pwr_lvl_get, tasha_amic_pwr_lvl_put),
+	SOC_ENUM_EXT("AMIC_3_4 PWR MODE", amic_pwr_lvl_enum,
+		       tasha_amic_pwr_lvl_get, tasha_amic_pwr_lvl_put),
+	SOC_ENUM_EXT("AMIC_5_6 PWR MODE", amic_pwr_lvl_enum,
+		       tasha_amic_pwr_lvl_get, tasha_amic_pwr_lvl_put),
+
+	SOC_SINGLE_MULTI_EXT("Vbat ADC data", SND_SOC_NOPM, 0, 0xFFFF, 0, 2,
+			tasha_vbat_adc_data_get, NULL),
+
+	SOC_ENUM_EXT("GSM mode Enable", tasha_vbat_gsm_mode_enum,
+			tasha_vbat_gsm_mode_func_get,
+			tasha_vbat_gsm_mode_func_put),
+};
+
+static int tasha_put_dec_enum(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
+	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+	unsigned int val;
+	u16 mic_sel_reg;
+	u8 mic_sel;
+
+	val = ucontrol->value.enumerated.item[0];
+	if (val > e->items - 1)
+		return -EINVAL;
+
+	dev_dbg(codec->dev, "%s: wname: %s, val: 0x%x\n", __func__,
+		widget->name, val);
+
+	switch (e->reg) {
+	case WCD9335_CDC_TX_INP_MUX_ADC_MUX0_CFG1:
+		mic_sel_reg = WCD9335_CDC_TX0_TX_PATH_CFG0;
+		break;
+	case WCD9335_CDC_TX_INP_MUX_ADC_MUX1_CFG1:
+		mic_sel_reg = WCD9335_CDC_TX1_TX_PATH_CFG0;
+		break;
+	case WCD9335_CDC_TX_INP_MUX_ADC_MUX2_CFG1:
+		mic_sel_reg = WCD9335_CDC_TX2_TX_PATH_CFG0;
+		break;
+	case WCD9335_CDC_TX_INP_MUX_ADC_MUX3_CFG1:
+		mic_sel_reg = WCD9335_CDC_TX3_TX_PATH_CFG0;
+		break;
+	case WCD9335_CDC_TX_INP_MUX_ADC_MUX4_CFG0:
+		mic_sel_reg = WCD9335_CDC_TX4_TX_PATH_CFG0;
+		break;
+	case WCD9335_CDC_TX_INP_MUX_ADC_MUX5_CFG0:
+		mic_sel_reg = WCD9335_CDC_TX5_TX_PATH_CFG0;
+		break;
+	case WCD9335_CDC_TX_INP_MUX_ADC_MUX6_CFG0:
+		mic_sel_reg = WCD9335_CDC_TX6_TX_PATH_CFG0;
+		break;
+	case WCD9335_CDC_TX_INP_MUX_ADC_MUX7_CFG0:
+		mic_sel_reg = WCD9335_CDC_TX7_TX_PATH_CFG0;
+		break;
+	case WCD9335_CDC_TX_INP_MUX_ADC_MUX8_CFG0:
+		mic_sel_reg = WCD9335_CDC_TX8_TX_PATH_CFG0;
+		break;
+	default:
+		dev_err(codec->dev, "%s: e->reg: 0x%x not expected\n",
+			__func__, e->reg);
+		return -EINVAL;
+	}
+
+	/* ADC: 0, DMIC: 1 */
+	mic_sel = val ? 0x0 : 0x1;
+	snd_soc_update_bits(codec, mic_sel_reg, 1 << 7, mic_sel << 7);
+
+	return snd_soc_dapm_put_enum_double(kcontrol, ucontrol);
+}
+
+static int tasha_int_dem_inp_mux_put(struct snd_kcontrol *kcontrol,
+				 struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
+	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+	unsigned int val;
+	unsigned short look_ahead_dly_reg = WCD9335_CDC_RX0_RX_PATH_CFG0;
+
+	val = ucontrol->value.enumerated.item[0];
+	if (val >= e->items)
+		return -EINVAL;
+
+	dev_dbg(codec->dev, "%s: wname: %s, val: 0x%x\n", __func__,
+		widget->name, val);
+
+	if (e->reg == WCD9335_CDC_RX0_RX_PATH_SEC0)
+		look_ahead_dly_reg = WCD9335_CDC_RX0_RX_PATH_CFG0;
+	else if (e->reg == WCD9335_CDC_RX1_RX_PATH_SEC0)
+		look_ahead_dly_reg = WCD9335_CDC_RX1_RX_PATH_CFG0;
+	else if (e->reg == WCD9335_CDC_RX2_RX_PATH_SEC0)
+		look_ahead_dly_reg = WCD9335_CDC_RX2_RX_PATH_CFG0;
+
+	/* Set Look Ahead Delay */
+	snd_soc_update_bits(codec, look_ahead_dly_reg,
+			    0x08, (val ? 0x08 : 0x00));
+	/* Set DEM INP Select */
+	return snd_soc_dapm_put_enum_double(kcontrol, ucontrol);
+}
+
+static int tasha_ear_pa_gain_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	u8 ear_pa_gain;
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+
+	ear_pa_gain = snd_soc_read(codec, WCD9335_ANA_EAR);
+
+	ear_pa_gain = (ear_pa_gain & 0x70) >> 4;
+
+	ucontrol->value.integer.value[0] = ear_pa_gain;
+
+	dev_dbg(codec->dev, "%s: ear_pa_gain = 0x%x\n", __func__,
+		ear_pa_gain);
+
+	return 0;
+}
+
+static int tasha_ear_pa_gain_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	u8 ear_pa_gain;
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+
+	dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0]  = %ld\n",
+			__func__, ucontrol->value.integer.value[0]);
+
+	ear_pa_gain =  ucontrol->value.integer.value[0] << 4;
+
+	snd_soc_update_bits(codec, WCD9335_ANA_EAR, 0x70, ear_pa_gain);
+	return 0;
+}
+
+static int tasha_ear_spkr_pa_gain_get(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.integer.value[0] = tasha->ear_spkr_gain;
+
+	dev_dbg(codec->dev, "%s: ear_spkr_gain = %ld\n", __func__,
+		ucontrol->value.integer.value[0]);
+
+	return 0;
+}
+
+static int tasha_ear_spkr_pa_gain_put(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0]  = %ld\n",
+		__func__, ucontrol->value.integer.value[0]);
+
+	tasha->ear_spkr_gain =  ucontrol->value.integer.value[0];
+
+	return 0;
+}
+
+static int tasha_config_compander(struct snd_soc_codec *codec, int interp_n,
+				  int event)
+{
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	int comp;
+	u16 comp_ctl0_reg, rx_path_cfg0_reg;
+
+	/* EAR does not have compander */
+	if (!interp_n)
+		return 0;
+
+	comp = interp_n - 1;
+	dev_dbg(codec->dev, "%s: event %d compander %d, enabled %d\n",
+		__func__, event, comp + 1, tasha->comp_enabled[comp]);
+
+	if (!tasha->comp_enabled[comp])
+		return 0;
+
+	comp_ctl0_reg = WCD9335_CDC_COMPANDER1_CTL0 + (comp * 8);
+	rx_path_cfg0_reg = WCD9335_CDC_RX1_RX_PATH_CFG0 + (comp * 20);
+
+	if (SND_SOC_DAPM_EVENT_ON(event)) {
+		/* Enable Compander Clock */
+		snd_soc_update_bits(codec, comp_ctl0_reg, 0x01, 0x01);
+		snd_soc_update_bits(codec, comp_ctl0_reg, 0x02, 0x02);
+		snd_soc_update_bits(codec, comp_ctl0_reg, 0x02, 0x00);
+		snd_soc_update_bits(codec, rx_path_cfg0_reg, 0x02, 0x02);
+	}
+
+	if (SND_SOC_DAPM_EVENT_OFF(event)) {
+		snd_soc_update_bits(codec, comp_ctl0_reg, 0x04, 0x04);
+		snd_soc_update_bits(codec, rx_path_cfg0_reg, 0x02, 0x00);
+		snd_soc_update_bits(codec, comp_ctl0_reg, 0x02, 0x02);
+		snd_soc_update_bits(codec, comp_ctl0_reg, 0x02, 0x00);
+		snd_soc_update_bits(codec, comp_ctl0_reg, 0x01, 0x00);
+		snd_soc_update_bits(codec, comp_ctl0_reg, 0x04, 0x00);
+	}
+
+	return 0;
+}
+
+static int tasha_codec_config_mad(struct snd_soc_codec *codec)
+{
+	int ret = 0;
+	int idx;
+	const struct firmware *fw;
+	struct firmware_cal *hwdep_cal = NULL;
+	struct wcd_mad_audio_cal *mad_cal = NULL;
+	const void *data;
+	const char *filename = TASHA_MAD_AUDIO_FIRMWARE_PATH;
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	size_t cal_size;
+
+	hwdep_cal = wcdcal_get_fw_cal(tasha->fw_data, WCD9XXX_MAD_CAL);
+	if (hwdep_cal) {
+		data = hwdep_cal->data;
+		cal_size = hwdep_cal->size;
+		dev_dbg(codec->dev, "%s: using hwdep calibration\n",
+			__func__);
+	} else {
+		ret = request_firmware(&fw, filename, codec->dev);
+		if (ret || !fw) {
+			dev_err(codec->dev,
+				"%s: MAD firmware acquire failed, err = %d\n",
+				__func__, ret);
+			return -ENODEV;
+		}
+		data = fw->data;
+		cal_size = fw->size;
+		dev_dbg(codec->dev, "%s: using request_firmware calibration\n",
+			__func__);
+	}
+
+	if (cal_size < sizeof(*mad_cal)) {
+		dev_err(codec->dev,
+			"%s: Incorrect size %zd for MAD Cal, expected %zd\n",
+			__func__, cal_size, sizeof(*mad_cal));
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	mad_cal = (struct wcd_mad_audio_cal *) (data);
+	if (!mad_cal) {
+		dev_err(codec->dev,
+			"%s: Invalid calibration data\n",
+			__func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	snd_soc_write(codec, WCD9335_SOC_MAD_MAIN_CTL_2,
+		      mad_cal->microphone_info.cycle_time);
+	snd_soc_update_bits(codec, WCD9335_SOC_MAD_MAIN_CTL_1, 0xFF << 3,
+			    ((uint16_t)mad_cal->microphone_info.settle_time)
+			    << 3);
+
+	/* Audio */
+	snd_soc_write(codec, WCD9335_SOC_MAD_AUDIO_CTL_8,
+		      mad_cal->audio_info.rms_omit_samples);
+	snd_soc_update_bits(codec, WCD9335_SOC_MAD_AUDIO_CTL_1,
+			    0x07 << 4, mad_cal->audio_info.rms_comp_time << 4);
+	snd_soc_update_bits(codec, WCD9335_SOC_MAD_AUDIO_CTL_2, 0x03 << 2,
+			    mad_cal->audio_info.detection_mechanism << 2);
+	snd_soc_write(codec, WCD9335_SOC_MAD_AUDIO_CTL_7,
+		      mad_cal->audio_info.rms_diff_threshold & 0x3F);
+	snd_soc_write(codec, WCD9335_SOC_MAD_AUDIO_CTL_5,
+		      mad_cal->audio_info.rms_threshold_lsb);
+	snd_soc_write(codec, WCD9335_SOC_MAD_AUDIO_CTL_6,
+		      mad_cal->audio_info.rms_threshold_msb);
+
+	for (idx = 0; idx < ARRAY_SIZE(mad_cal->audio_info.iir_coefficients);
+	     idx++) {
+		snd_soc_update_bits(codec, WCD9335_SOC_MAD_AUDIO_IIR_CTL_PTR,
+				    0x3F, idx);
+		snd_soc_write(codec, WCD9335_SOC_MAD_AUDIO_IIR_CTL_VAL,
+			      mad_cal->audio_info.iir_coefficients[idx]);
+		dev_dbg(codec->dev, "%s:MAD Audio IIR Coef[%d] = 0X%x",
+			__func__, idx,
+			mad_cal->audio_info.iir_coefficients[idx]);
+	}
+
+	/* Beacon */
+	snd_soc_write(codec, WCD9335_SOC_MAD_BEACON_CTL_8,
+		      mad_cal->beacon_info.rms_omit_samples);
+	snd_soc_update_bits(codec, WCD9335_SOC_MAD_BEACON_CTL_1,
+			    0x07 << 4, mad_cal->beacon_info.rms_comp_time << 4);
+	snd_soc_update_bits(codec, WCD9335_SOC_MAD_BEACON_CTL_2, 0x03 << 2,
+			    mad_cal->beacon_info.detection_mechanism << 2);
+	snd_soc_write(codec, WCD9335_SOC_MAD_BEACON_CTL_7,
+		      mad_cal->beacon_info.rms_diff_threshold & 0x1F);
+	snd_soc_write(codec, WCD9335_SOC_MAD_BEACON_CTL_5,
+		      mad_cal->beacon_info.rms_threshold_lsb);
+	snd_soc_write(codec, WCD9335_SOC_MAD_BEACON_CTL_6,
+		      mad_cal->beacon_info.rms_threshold_msb);
+
+	for (idx = 0; idx < ARRAY_SIZE(mad_cal->beacon_info.iir_coefficients);
+	     idx++) {
+		snd_soc_update_bits(codec, WCD9335_SOC_MAD_BEACON_IIR_CTL_PTR,
+				    0x3F, idx);
+		snd_soc_write(codec, WCD9335_SOC_MAD_BEACON_IIR_CTL_VAL,
+			      mad_cal->beacon_info.iir_coefficients[idx]);
+		dev_dbg(codec->dev, "%s:MAD Beacon IIR Coef[%d] = 0X%x",
+			__func__, idx,
+			mad_cal->beacon_info.iir_coefficients[idx]);
+	}
+
+	/* Ultrasound */
+	snd_soc_update_bits(codec, WCD9335_SOC_MAD_ULTR_CTL_1,
+			    0x07 << 4,
+			    mad_cal->ultrasound_info.rms_comp_time << 4);
+	snd_soc_update_bits(codec, WCD9335_SOC_MAD_ULTR_CTL_2, 0x03 << 2,
+			    mad_cal->ultrasound_info.detection_mechanism << 2);
+	snd_soc_write(codec, WCD9335_SOC_MAD_ULTR_CTL_7,
+		      mad_cal->ultrasound_info.rms_diff_threshold & 0x1F);
+	snd_soc_write(codec, WCD9335_SOC_MAD_ULTR_CTL_5,
+		      mad_cal->ultrasound_info.rms_threshold_lsb);
+	snd_soc_write(codec, WCD9335_SOC_MAD_ULTR_CTL_6,
+		      mad_cal->ultrasound_info.rms_threshold_msb);
+
+done:
+	if (!hwdep_cal)
+		release_firmware(fw);
+
+	return ret;
+}
+
+static int tasha_codec_enable_mad(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	int ret = 0;
+
+	dev_dbg(codec->dev,
+		"%s: event = %d\n", __func__, event);
+
+	/* Return if CPE INPUT is DEC1 */
+	if (snd_soc_read(codec, WCD9335_CPE_SS_SVA_CFG) & 0x01)
+		return ret;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+
+		/* Turn on MAD clk */
+		snd_soc_update_bits(codec, WCD9335_CPE_SS_MAD_CTL,
+				    0x01, 0x01);
+
+		/* Undo reset for MAD */
+		snd_soc_update_bits(codec, WCD9335_CPE_SS_MAD_CTL,
+				    0x02, 0x00);
+		ret = tasha_codec_config_mad(codec);
+		if (ret)
+			dev_err(codec->dev,
+				"%s: Failed to config MAD, err = %d\n",
+				__func__, ret);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		/* Reset the MAD block */
+		snd_soc_update_bits(codec, WCD9335_CPE_SS_MAD_CTL,
+				    0x02, 0x02);
+		/* Turn off MAD clk */
+		snd_soc_update_bits(codec, WCD9335_CPE_SS_MAD_CTL,
+				    0x01, 0x00);
+		break;
+	}
+
+	return ret;
+}
+
+static int tasha_codec_configure_cpe_input(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	dev_dbg(codec->dev,
+		"%s: event = %d\n", __func__, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		/* Configure CPE input as DEC1 */
+		snd_soc_update_bits(codec, WCD9335_CPE_SS_SVA_CFG,
+				    0x01, 0x01);
+
+		/* Configure DEC1 Tx out with sample rate as 16K */
+		snd_soc_update_bits(codec, WCD9335_CDC_TX1_TX_PATH_CTL,
+				    0x0F, 0x01);
+
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		/* Reset DEC1 Tx out sample rate */
+		snd_soc_update_bits(codec, WCD9335_CDC_TX1_TX_PATH_CTL,
+				    0x0F, 0x04);
+		snd_soc_update_bits(codec, WCD9335_CPE_SS_SVA_CFG,
+				    0x01, 0x00);
+
+		break;
+	}
+
+	return 0;
+}
+
+
+static int tasha_codec_aif4_mixer_switch_get(struct snd_kcontrol *kcontrol,
+		struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+			dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
+	struct tasha_priv *tasha_p = snd_soc_codec_get_drvdata(codec);
+
+	if (test_bit(AIF4_SWITCH_VALUE, &tasha_p->status_mask))
+		ucontrol->value.integer.value[0] = 1;
+	else
+		ucontrol->value.integer.value[0] = 0;
+
+	dev_dbg(codec->dev, "%s: AIF4 switch value = %ld\n",
+		__func__, ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int tasha_codec_aif4_mixer_switch_put(struct snd_kcontrol *kcontrol,
+		struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+			dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_dapm_update *update = NULL;
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
+	struct tasha_priv *tasha_p = snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s: AIF4 switch value = %ld\n",
+		__func__, ucontrol->value.integer.value[0]);
+
+	if (ucontrol->value.integer.value[0]) {
+		snd_soc_dapm_mixer_update_power(widget->dapm,
+						kcontrol, 1, update);
+		set_bit(AIF4_SWITCH_VALUE, &tasha_p->status_mask);
+	} else {
+		snd_soc_dapm_mixer_update_power(widget->dapm,
+						kcontrol, 0, update);
+		clear_bit(AIF4_SWITCH_VALUE, &tasha_p->status_mask);
+	}
+
+	return 1;
+}
+
+static const char * const tasha_ear_pa_gain_text[] = {
+	"G_6_DB", "G_4P5_DB", "G_3_DB", "G_1P5_DB",
+	"G_0_DB", "G_M2P5_DB", "UNDEFINED", "G_M12_DB"
+};
+
+static const char * const tasha_ear_spkr_pa_gain_text[] = {
+	"G_DEFAULT", "G_0_DB", "G_1_DB", "G_2_DB", "G_3_DB", "G_4_DB",
+	"G_5_DB", "G_6_DB"
+};
+
+static const struct soc_enum tasha_ear_pa_gain_enum =
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(tasha_ear_pa_gain_text),
+			tasha_ear_pa_gain_text);
+
+static const struct soc_enum tasha_ear_spkr_pa_gain_enum =
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(tasha_ear_spkr_pa_gain_text),
+			    tasha_ear_spkr_pa_gain_text);
+
+static const struct snd_kcontrol_new tasha_analog_gain_controls[] = {
+	SOC_ENUM_EXT("EAR PA Gain", tasha_ear_pa_gain_enum,
+		tasha_ear_pa_gain_get, tasha_ear_pa_gain_put),
+
+	SOC_ENUM_EXT("EAR SPKR PA Gain", tasha_ear_spkr_pa_gain_enum,
+		     tasha_ear_spkr_pa_gain_get, tasha_ear_spkr_pa_gain_put),
+
+	SOC_SINGLE_TLV("HPHL Volume", WCD9335_HPH_L_EN, 0, 20, 1,
+		line_gain),
+	SOC_SINGLE_TLV("HPHR Volume", WCD9335_HPH_R_EN, 0, 20, 1,
+		line_gain),
+	SOC_SINGLE_TLV("LINEOUT1 Volume", WCD9335_DIFF_LO_LO1_COMPANDER,
+			3, 16, 1, line_gain),
+	SOC_SINGLE_TLV("LINEOUT2 Volume", WCD9335_DIFF_LO_LO2_COMPANDER,
+			3, 16, 1, line_gain),
+	SOC_SINGLE_TLV("LINEOUT3 Volume", WCD9335_SE_LO_LO3_GAIN, 0, 20, 1,
+			line_gain),
+	SOC_SINGLE_TLV("LINEOUT4 Volume", WCD9335_SE_LO_LO4_GAIN, 0, 20, 1,
+			line_gain),
+
+	SOC_SINGLE_TLV("ADC1 Volume", WCD9335_ANA_AMIC1, 0, 20, 0,
+			analog_gain),
+	SOC_SINGLE_TLV("ADC2 Volume", WCD9335_ANA_AMIC2, 0, 20, 0,
+			analog_gain),
+	SOC_SINGLE_TLV("ADC3 Volume", WCD9335_ANA_AMIC3, 0, 20, 0,
+			analog_gain),
+	SOC_SINGLE_TLV("ADC4 Volume", WCD9335_ANA_AMIC4, 0, 20, 0,
+			analog_gain),
+	SOC_SINGLE_TLV("ADC5 Volume", WCD9335_ANA_AMIC5, 0, 20, 0,
+			analog_gain),
+	SOC_SINGLE_TLV("ADC6 Volume", WCD9335_ANA_AMIC6, 0, 20, 0,
+			analog_gain),
+};
+
+static const char * const spl_src0_mux_text[] = {
+	"ZERO", "SRC_IN_HPHL", "SRC_IN_LO1",
+};
+
+static const char * const spl_src1_mux_text[] = {
+	"ZERO", "SRC_IN_HPHR", "SRC_IN_LO2",
+};
+
+static const char * const spl_src2_mux_text[] = {
+	"ZERO", "SRC_IN_LO3", "SRC_IN_SPKRL",
+};
+
+static const char * const spl_src3_mux_text[] = {
+	"ZERO", "SRC_IN_LO4", "SRC_IN_SPKRR",
+};
+
+static const char * const rx_int0_7_mix_mux_text[] = {
+	"ZERO", "RX0", "RX1", "RX2", "RX3", "RX4", "RX5",
+	"RX6", "RX7", "PROXIMITY"
+};
+
+static const char * const rx_int_mix_mux_text[] = {
+	"ZERO", "RX0", "RX1", "RX2", "RX3", "RX4", "RX5",
+	"RX6", "RX7"
+};
+
+static const char * const rx_prim_mix_text[] = {
+	"ZERO", "DEC0", "DEC1", "IIR0", "IIR1", "RX0", "RX1", "RX2",
+	"RX3", "RX4", "RX5", "RX6", "RX7"
+};
+
+static const char * const rx_sidetone_mix_text[] = {
+	"ZERO", "SRC0", "SRC1", "SRC_SUM"
+};
+
+static const char * const sb_tx0_mux_text[] = {
+	"ZERO", "RX_MIX_TX0", "DEC0", "DEC0_192"
+};
+
+static const char * const sb_tx1_mux_text[] = {
+	"ZERO", "RX_MIX_TX1", "DEC1", "DEC1_192"
+};
+
+static const char * const sb_tx2_mux_text[] = {
+	"ZERO", "RX_MIX_TX2", "DEC2", "DEC2_192"
+};
+
+static const char * const sb_tx3_mux_text[] = {
+	"ZERO", "RX_MIX_TX3", "DEC3", "DEC3_192"
+};
+
+static const char * const sb_tx4_mux_text[] = {
+	"ZERO", "RX_MIX_TX4", "DEC4", "DEC4_192"
+};
+
+static const char * const sb_tx5_mux_text[] = {
+	"ZERO", "RX_MIX_TX5", "DEC5", "DEC5_192"
+};
+
+static const char * const sb_tx6_mux_text[] = {
+	"ZERO", "RX_MIX_TX6", "DEC6", "DEC6_192"
+};
+
+static const char * const sb_tx7_mux_text[] = {
+	"ZERO", "RX_MIX_TX7", "DEC7", "DEC7_192"
+};
+
+static const char * const sb_tx8_mux_text[] = {
+	"ZERO", "RX_MIX_TX8", "DEC8", "DEC8_192"
+};
+
+static const char * const sb_tx9_mux_text[] = {
+	"ZERO", "DEC7", "DEC7_192"
+};
+
+static const char * const sb_tx10_mux_text[] = {
+	"ZERO", "DEC6", "DEC6_192"
+};
+
+static const char * const sb_tx11_mux_text[] = {
+	"DEC_0_5", "DEC_9_12", "MAD_AUDIO", "MAD_BRDCST"
+};
+
+static const char * const sb_tx11_inp1_mux_text[] = {
+	"ZERO", "DEC0", "DEC1", "DEC2", "DEC3", "DEC4",
+	"DEC5", "RX_MIX_TX5", "DEC9_10", "DEC11_12"
+};
+
+static const char * const sb_tx13_mux_text[] = {
+	"ZERO", "DEC5", "DEC5_192"
+};
+
+static const char * const tx13_inp_mux_text[] = {
+	"CDC_DEC_5", "MAD_BRDCST", "CPE_TX_PP"
+};
+
+static const char * const iir_inp_mux_text[] = {
+	"ZERO", "DEC0", "DEC1", "DEC2", "DEC3", "DEC4", "DEC5", "DEC6",
+	"DEC7", "DEC8",	"RX0", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7"
+};
+
+static const char * const rx_int_dem_inp_mux_text[] = {
+	"NORMAL_DSM_OUT", "CLSH_DSM_OUT",
+};
+
+static const char * const rx_int0_interp_mux_text[] = {
+	"ZERO", "RX INT0 MIX2",
+};
+
+static const char * const rx_int1_interp_mux_text[] = {
+	"ZERO", "RX INT1 MIX2",
+};
+
+static const char * const rx_int2_interp_mux_text[] = {
+	"ZERO", "RX INT2 MIX2",
+};
+
+static const char * const rx_int3_interp_mux_text[] = {
+	"ZERO", "RX INT3 MIX2",
+};
+
+static const char * const rx_int4_interp_mux_text[] = {
+	"ZERO", "RX INT4 MIX2",
+};
+
+static const char * const rx_int5_interp_mux_text[] = {
+	"ZERO", "RX INT5 MIX2",
+};
+
+static const char * const rx_int6_interp_mux_text[] = {
+	"ZERO", "RX INT6 MIX2",
+};
+
+static const char * const rx_int7_interp_mux_text[] = {
+	"ZERO", "RX INT7 MIX2",
+};
+
+static const char * const rx_int8_interp_mux_text[] = {
+	"ZERO", "RX INT8 SEC MIX"
+};
+
+static const char * const mad_sel_text[] = {
+	"SPE", "MSM"
+};
+
+static const char * const adc_mux_text[] = {
+	"DMIC", "AMIC", "ANC_FB_TUNE1", "ANC_FB_TUNE2"
+};
+
+static const char * const dmic_mux_text[] = {
+	"ZERO", "DMIC0", "DMIC1", "DMIC2", "DMIC3", "DMIC4", "DMIC5",
+	"SMIC0", "SMIC1", "SMIC2", "SMIC3"
+};
+
+static const char * const dmic_mux_alt_text[] = {
+	"ZERO", "DMIC0", "DMIC1", "DMIC2", "DMIC3", "DMIC4", "DMIC5",
+};
+
+static const char * const amic_mux_text[] = {
+	"ZERO", "ADC1", "ADC2", "ADC3", "ADC4", "ADC5", "ADC6"
+};
+
+static const char * const rx_echo_mux_text[] = {
+	"ZERO", "RX_MIX0", "RX_MIX1", "RX_MIX2", "RX_MIX3", "RX_MIX4",
+	"RX_MIX5", "RX_MIX6", "RX_MIX7", "RX_MIX8", "RX_MIX_VBAT5",
+	"RX_MIX_VBAT6",	"RX_MIX_VBAT7", "RX_MIX_VBAT8"
+};
+
+static const char * const anc0_fb_mux_text[] = {
+	"ZERO", "ANC_IN_HPHL", "ANC_IN_EAR", "ANC_IN_EAR_SPKR",
+	"ANC_IN_LO1"
+};
+
+static const char * const anc1_fb_mux_text[] = {
+	"ZERO", "ANC_IN_HPHR", "ANC_IN_LO2"
+};
+
+static const char * const native_mux_text[] = {
+	"OFF", "ON",
+};
+
+static const struct soc_enum spl_src0_mux_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_SPLINE_SRC_CFG0, 0, 3,
+			spl_src0_mux_text);
+
+static const struct soc_enum spl_src1_mux_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_SPLINE_SRC_CFG0, 2, 3,
+			spl_src1_mux_text);
+
+static const struct soc_enum spl_src2_mux_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_SPLINE_SRC_CFG0, 4, 3,
+			spl_src2_mux_text);
+
+static const struct soc_enum spl_src3_mux_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_SPLINE_SRC_CFG0, 6, 3,
+			spl_src3_mux_text);
+
+static const struct soc_enum rx_int0_2_mux_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT0_CFG1, 0, 10,
+			rx_int0_7_mix_mux_text);
+
+static const struct soc_enum rx_int1_2_mux_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT1_CFG1, 0, 9,
+			rx_int_mix_mux_text);
+
+static const struct soc_enum rx_int2_2_mux_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT2_CFG1, 0, 9,
+			rx_int_mix_mux_text);
+
+static const struct soc_enum rx_int3_2_mux_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT3_CFG1, 0, 9,
+			rx_int_mix_mux_text);
+
+static const struct soc_enum rx_int4_2_mux_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT4_CFG1, 0, 9,
+			rx_int_mix_mux_text);
+
+static const struct soc_enum rx_int5_2_mux_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT5_CFG1, 0, 9,
+			rx_int_mix_mux_text);
+
+static const struct soc_enum rx_int6_2_mux_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT6_CFG1, 0, 9,
+			rx_int_mix_mux_text);
+
+static const struct soc_enum rx_int7_2_mux_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT7_CFG1, 0, 10,
+			rx_int0_7_mix_mux_text);
+
+static const struct soc_enum rx_int8_2_mux_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT8_CFG1, 0, 9,
+			rx_int_mix_mux_text);
+
+static const struct soc_enum int1_1_native_enum =
+	SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, ARRAY_SIZE(native_mux_text),
+			native_mux_text);
+
+static const struct soc_enum int2_1_native_enum =
+	SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, ARRAY_SIZE(native_mux_text),
+			native_mux_text);
+
+static const struct soc_enum int3_1_native_enum =
+	SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, ARRAY_SIZE(native_mux_text),
+			native_mux_text);
+
+static const struct soc_enum int4_1_native_enum =
+	SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, ARRAY_SIZE(native_mux_text),
+			native_mux_text);
+
+static const struct soc_enum rx_int0_1_mix_inp0_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT0_CFG0, 0, 13,
+			rx_prim_mix_text);
+
+static const struct soc_enum rx_int0_1_mix_inp1_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT0_CFG0, 4, 13,
+			rx_prim_mix_text);
+
+static const struct soc_enum rx_int0_1_mix_inp2_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT0_CFG1, 4, 13,
+			rx_prim_mix_text);
+
+static const struct soc_enum rx_int1_1_mix_inp0_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT1_CFG0, 0, 13,
+			rx_prim_mix_text);
+
+static const struct soc_enum rx_int1_1_mix_inp1_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT1_CFG0, 4, 13,
+			rx_prim_mix_text);
+
+static const struct soc_enum rx_int1_1_mix_inp2_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT1_CFG1, 4, 13,
+			rx_prim_mix_text);
+
+static const struct soc_enum rx_int2_1_mix_inp0_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT2_CFG0, 0, 13,
+			rx_prim_mix_text);
+
+static const struct soc_enum rx_int2_1_mix_inp1_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT2_CFG0, 4, 13,
+			rx_prim_mix_text);
+
+static const struct soc_enum rx_int2_1_mix_inp2_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT2_CFG1, 4, 13,
+			rx_prim_mix_text);
+
+static const struct soc_enum rx_int3_1_mix_inp0_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT3_CFG0, 0, 13,
+			rx_prim_mix_text);
+
+static const struct soc_enum rx_int3_1_mix_inp1_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT3_CFG0, 4, 13,
+			rx_prim_mix_text);
+
+static const struct soc_enum rx_int3_1_mix_inp2_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT3_CFG1, 4, 13,
+			rx_prim_mix_text);
+
+static const struct soc_enum rx_int4_1_mix_inp0_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT4_CFG0, 0, 13,
+			rx_prim_mix_text);
+
+static const struct soc_enum rx_int4_1_mix_inp1_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT4_CFG0, 4, 13,
+			rx_prim_mix_text);
+
+static const struct soc_enum rx_int4_1_mix_inp2_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT4_CFG1, 4, 13,
+			rx_prim_mix_text);
+
+static const struct soc_enum rx_int5_1_mix_inp0_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT5_CFG0, 0, 13,
+			rx_prim_mix_text);
+
+static const struct soc_enum rx_int5_1_mix_inp1_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT5_CFG0, 4, 13,
+			rx_prim_mix_text);
+
+static const struct soc_enum rx_int5_1_mix_inp2_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT5_CFG1, 4, 13,
+			rx_prim_mix_text);
+
+static const struct soc_enum rx_int6_1_mix_inp0_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT6_CFG0, 0, 13,
+			rx_prim_mix_text);
+
+static const struct soc_enum rx_int6_1_mix_inp1_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT6_CFG0, 4, 13,
+			rx_prim_mix_text);
+
+static const struct soc_enum rx_int6_1_mix_inp2_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT6_CFG1, 4, 13,
+			rx_prim_mix_text);
+
+static const struct soc_enum rx_int7_1_mix_inp0_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT7_CFG0, 0, 13,
+			rx_prim_mix_text);
+
+static const struct soc_enum rx_int7_1_mix_inp1_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT7_CFG0, 4, 13,
+			rx_prim_mix_text);
+
+static const struct soc_enum rx_int7_1_mix_inp2_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT7_CFG1, 4, 13,
+			rx_prim_mix_text);
+
+static const struct soc_enum rx_int8_1_mix_inp0_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT8_CFG0, 0, 13,
+			rx_prim_mix_text);
+
+static const struct soc_enum rx_int8_1_mix_inp1_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT8_CFG0, 4, 13,
+			rx_prim_mix_text);
+
+static const struct soc_enum rx_int8_1_mix_inp2_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT8_CFG1, 4, 13,
+			rx_prim_mix_text);
+
+static const struct soc_enum rx_int0_sidetone_mix_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_SIDETONE_SRC_CFG0, 0, 4,
+			rx_sidetone_mix_text);
+
+static const struct soc_enum rx_int1_sidetone_mix_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_SIDETONE_SRC_CFG0, 2, 4,
+			rx_sidetone_mix_text);
+
+static const struct soc_enum rx_int2_sidetone_mix_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_SIDETONE_SRC_CFG0, 4, 4,
+			rx_sidetone_mix_text);
+
+static const struct soc_enum rx_int3_sidetone_mix_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_SIDETONE_SRC_CFG0, 6, 4,
+			rx_sidetone_mix_text);
+
+static const struct soc_enum rx_int4_sidetone_mix_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_SIDETONE_SRC_CFG1, 0, 4,
+			rx_sidetone_mix_text);
+
+static const struct soc_enum rx_int7_sidetone_mix_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_SIDETONE_SRC_CFG1, 2, 4,
+			rx_sidetone_mix_text);
+
+static const struct soc_enum tx_adc_mux0_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX0_CFG1, 0, 4,
+			adc_mux_text);
+
+static const struct soc_enum tx_adc_mux1_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX1_CFG1, 0, 4,
+			adc_mux_text);
+
+static const struct soc_enum tx_adc_mux2_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX2_CFG1, 0, 4,
+			adc_mux_text);
+
+static const struct soc_enum tx_adc_mux3_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX3_CFG1, 0, 4,
+			adc_mux_text);
+
+static const struct soc_enum tx_adc_mux4_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX4_CFG0, 6, 4,
+			adc_mux_text);
+
+static const struct soc_enum tx_adc_mux5_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX5_CFG0, 6, 4,
+			adc_mux_text);
+
+static const struct soc_enum tx_adc_mux6_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX6_CFG0, 6, 4,
+			adc_mux_text);
+
+static const struct soc_enum tx_adc_mux7_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX7_CFG0, 6, 4,
+			adc_mux_text);
+
+static const struct soc_enum tx_adc_mux8_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX8_CFG0, 6, 4,
+			adc_mux_text);
+
+static const struct soc_enum tx_adc_mux10_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX10_CFG0, 6, 4,
+			adc_mux_text);
+
+static const struct soc_enum tx_adc_mux11_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX11_CFG0, 6, 4,
+			adc_mux_text);
+
+static const struct soc_enum tx_adc_mux12_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX12_CFG0, 6, 4,
+			adc_mux_text);
+
+static const struct soc_enum tx_adc_mux13_chain_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX13_CFG0, 6, 4,
+			adc_mux_text);
+
+static const struct soc_enum tx_dmic_mux0_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX0_CFG0, 3, 11,
+			dmic_mux_text);
+
+static const struct soc_enum tx_dmic_mux1_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX1_CFG0, 3, 11,
+			dmic_mux_text);
+
+static const struct soc_enum tx_dmic_mux2_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX2_CFG0, 3, 11,
+			dmic_mux_text);
+
+static const struct soc_enum tx_dmic_mux3_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX3_CFG0, 3, 11,
+			dmic_mux_text);
+
+static const struct soc_enum tx_dmic_mux4_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX4_CFG0, 3, 7,
+			dmic_mux_alt_text);
+
+static const struct soc_enum tx_dmic_mux5_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX5_CFG0, 3, 7,
+			dmic_mux_alt_text);
+
+static const struct soc_enum tx_dmic_mux6_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX6_CFG0, 3, 7,
+			dmic_mux_alt_text);
+
+static const struct soc_enum tx_dmic_mux7_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX7_CFG0, 3, 7,
+			dmic_mux_alt_text);
+
+static const struct soc_enum tx_dmic_mux8_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX8_CFG0, 3, 7,
+			dmic_mux_alt_text);
+
+static const struct soc_enum tx_dmic_mux10_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX10_CFG0, 3, 7,
+			dmic_mux_alt_text);
+
+static const struct soc_enum tx_dmic_mux11_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX11_CFG0, 3, 7,
+			dmic_mux_alt_text);
+
+static const struct soc_enum tx_dmic_mux12_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX12_CFG0, 3, 7,
+			dmic_mux_alt_text);
+
+static const struct soc_enum tx_dmic_mux13_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX13_CFG0, 3, 7,
+			dmic_mux_alt_text);
+
+static const struct soc_enum tx_amic_mux0_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX0_CFG0, 0, 7,
+			amic_mux_text);
+
+static const struct soc_enum tx_amic_mux1_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX1_CFG0, 0, 7,
+			amic_mux_text);
+
+static const struct soc_enum tx_amic_mux2_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX2_CFG0, 0, 7,
+			amic_mux_text);
+
+static const struct soc_enum tx_amic_mux3_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX3_CFG0, 0, 7,
+			amic_mux_text);
+
+static const struct soc_enum tx_amic_mux4_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX4_CFG0, 0, 7,
+			amic_mux_text);
+
+static const struct soc_enum tx_amic_mux5_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX5_CFG0, 0, 7,
+			amic_mux_text);
+
+static const struct soc_enum tx_amic_mux6_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX6_CFG0, 0, 7,
+			amic_mux_text);
+
+static const struct soc_enum tx_amic_mux7_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX7_CFG0, 0, 7,
+			amic_mux_text);
+
+static const struct soc_enum tx_amic_mux8_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX8_CFG0, 0, 7,
+			amic_mux_text);
+
+static const struct soc_enum tx_amic_mux10_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX10_CFG0, 0, 7,
+			amic_mux_text);
+
+static const struct soc_enum tx_amic_mux11_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX11_CFG0, 0, 7,
+			amic_mux_text);
+
+static const struct soc_enum tx_amic_mux12_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX12_CFG0, 0, 7,
+			amic_mux_text);
+
+static const struct soc_enum tx_amic_mux13_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX13_CFG0, 0, 7,
+			amic_mux_text);
+
+static const struct soc_enum sb_tx0_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_IF_ROUTER_TX_MUX_CFG0, 0, 4,
+			sb_tx0_mux_text);
+
+static const struct soc_enum sb_tx1_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_IF_ROUTER_TX_MUX_CFG0, 2, 4,
+			sb_tx1_mux_text);
+
+static const struct soc_enum sb_tx2_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_IF_ROUTER_TX_MUX_CFG0, 4, 4,
+			sb_tx2_mux_text);
+
+static const struct soc_enum sb_tx3_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_IF_ROUTER_TX_MUX_CFG0, 6, 4,
+			sb_tx3_mux_text);
+
+static const struct soc_enum sb_tx4_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_IF_ROUTER_TX_MUX_CFG1, 0, 4,
+			sb_tx4_mux_text);
+
+static const struct soc_enum sb_tx5_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_IF_ROUTER_TX_MUX_CFG1, 2, 4,
+			sb_tx5_mux_text);
+
+static const struct soc_enum sb_tx6_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_IF_ROUTER_TX_MUX_CFG1, 4, 4,
+			sb_tx6_mux_text);
+
+static const struct soc_enum sb_tx7_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_IF_ROUTER_TX_MUX_CFG1, 6, 4,
+			sb_tx7_mux_text);
+
+static const struct soc_enum sb_tx8_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_IF_ROUTER_TX_MUX_CFG2, 0, 4,
+			sb_tx8_mux_text);
+
+static const struct soc_enum sb_tx9_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_IF_ROUTER_TX_MUX_CFG2, 2, 3,
+			sb_tx9_mux_text);
+
+static const struct soc_enum sb_tx10_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_IF_ROUTER_TX_MUX_CFG2, 4, 3,
+			sb_tx10_mux_text);
+
+static const struct soc_enum sb_tx11_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_DATA_HUB_DATA_HUB_SB_TX11_INP_CFG, 0, 4,
+			sb_tx11_mux_text);
+
+static const struct soc_enum sb_tx11_inp1_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_IF_ROUTER_TX_MUX_CFG3, 0, 10,
+			sb_tx11_inp1_mux_text);
+
+static const struct soc_enum sb_tx13_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_IF_ROUTER_TX_MUX_CFG3, 4, 3,
+			sb_tx13_mux_text);
+
+static const struct soc_enum tx13_inp_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_DATA_HUB_DATA_HUB_SB_TX13_INP_CFG, 0, 3,
+			tx13_inp_mux_text);
+
+static const struct soc_enum rx_mix_tx0_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_MIX_CFG0, 0, 14,
+			rx_echo_mux_text);
+
+static const struct soc_enum rx_mix_tx1_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_MIX_CFG0, 4, 14,
+			rx_echo_mux_text);
+
+static const struct soc_enum rx_mix_tx2_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_MIX_CFG1, 0, 14,
+			rx_echo_mux_text);
+
+static const struct soc_enum rx_mix_tx3_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_MIX_CFG1, 4, 14,
+			rx_echo_mux_text);
+
+static const struct soc_enum rx_mix_tx4_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_MIX_CFG2, 0, 14,
+			rx_echo_mux_text);
+
+static const struct soc_enum rx_mix_tx5_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_MIX_CFG2, 4, 14,
+			rx_echo_mux_text);
+
+static const struct soc_enum rx_mix_tx6_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_MIX_CFG3, 0, 14,
+			rx_echo_mux_text);
+
+static const struct soc_enum rx_mix_tx7_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_MIX_CFG3, 4, 14,
+			rx_echo_mux_text);
+
+static const struct soc_enum rx_mix_tx8_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_MIX_CFG4, 0, 14,
+			rx_echo_mux_text);
+
+static const struct soc_enum iir0_inp0_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG0, 0, 18,
+			iir_inp_mux_text);
+
+static const struct soc_enum iir0_inp1_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG1, 0, 18,
+			iir_inp_mux_text);
+
+static const struct soc_enum iir0_inp2_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG2, 0, 18,
+			iir_inp_mux_text);
+
+static const struct soc_enum iir0_inp3_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG3, 0, 18,
+			iir_inp_mux_text);
+
+static const struct soc_enum iir1_inp0_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG0, 0, 18,
+			iir_inp_mux_text);
+
+static const struct soc_enum iir1_inp1_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG1, 0, 18,
+			iir_inp_mux_text);
+
+static const struct soc_enum iir1_inp2_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG2, 0, 18,
+			iir_inp_mux_text);
+
+static const struct soc_enum iir1_inp3_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG3, 0, 18,
+			iir_inp_mux_text);
+
+static const struct soc_enum rx_int0_dem_inp_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX0_RX_PATH_SEC0, 0,
+			ARRAY_SIZE(rx_int_dem_inp_mux_text),
+			rx_int_dem_inp_mux_text);
+
+static const struct soc_enum rx_int1_dem_inp_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX1_RX_PATH_SEC0, 0,
+			ARRAY_SIZE(rx_int_dem_inp_mux_text),
+			rx_int_dem_inp_mux_text);
+
+static const struct soc_enum rx_int2_dem_inp_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX2_RX_PATH_SEC0, 0,
+			ARRAY_SIZE(rx_int_dem_inp_mux_text),
+			rx_int_dem_inp_mux_text);
+
+static const struct soc_enum rx_int0_interp_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX0_RX_PATH_CTL, 5, 2,
+			rx_int0_interp_mux_text);
+
+static const struct soc_enum rx_int1_interp_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX1_RX_PATH_CTL, 5, 2,
+			rx_int1_interp_mux_text);
+
+static const struct soc_enum rx_int2_interp_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX2_RX_PATH_CTL, 5, 2,
+			rx_int2_interp_mux_text);
+
+static const struct soc_enum rx_int3_interp_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX3_RX_PATH_CTL, 5, 2,
+			rx_int3_interp_mux_text);
+
+static const struct soc_enum rx_int4_interp_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX4_RX_PATH_CTL, 5, 2,
+			rx_int4_interp_mux_text);
+
+static const struct soc_enum rx_int5_interp_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX5_RX_PATH_CTL, 5, 2,
+			rx_int5_interp_mux_text);
+
+static const struct soc_enum rx_int6_interp_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX6_RX_PATH_CTL, 5, 2,
+			rx_int6_interp_mux_text);
+
+static const struct soc_enum rx_int7_interp_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX7_RX_PATH_CTL, 5, 2,
+			rx_int7_interp_mux_text);
+
+static const struct soc_enum rx_int8_interp_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX8_RX_PATH_CTL, 5, 2,
+			rx_int8_interp_mux_text);
+
+static const struct soc_enum mad_sel_enum =
+	SOC_ENUM_SINGLE(WCD9335_CPE_SS_CFG, 0, 2, mad_sel_text);
+
+static const struct soc_enum anc0_fb_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_ANC_CFG0, 0, 5,
+			anc0_fb_mux_text);
+
+static const struct soc_enum anc1_fb_mux_enum =
+	SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_ANC_CFG0, 3, 3,
+			anc1_fb_mux_text);
+
+static const struct snd_kcontrol_new rx_int0_dem_inp_mux =
+	SOC_DAPM_ENUM_EXT("RX INT0 DEM MUX Mux", rx_int0_dem_inp_mux_enum,
+			  snd_soc_dapm_get_enum_double,
+			  tasha_int_dem_inp_mux_put);
+
+static const struct snd_kcontrol_new rx_int1_dem_inp_mux =
+	SOC_DAPM_ENUM_EXT("RX INT1 DEM MUX Mux", rx_int1_dem_inp_mux_enum,
+			  snd_soc_dapm_get_enum_double,
+			  tasha_int_dem_inp_mux_put);
+
+static const struct snd_kcontrol_new rx_int2_dem_inp_mux =
+	SOC_DAPM_ENUM_EXT("RX INT2 DEM MUX Mux", rx_int2_dem_inp_mux_enum,
+			  snd_soc_dapm_get_enum_double,
+			  tasha_int_dem_inp_mux_put);
+
+static const struct snd_kcontrol_new spl_src0_mux =
+	SOC_DAPM_ENUM("SPL SRC0 MUX Mux", spl_src0_mux_chain_enum);
+
+static const struct snd_kcontrol_new spl_src1_mux =
+	SOC_DAPM_ENUM("SPL SRC1 MUX Mux", spl_src1_mux_chain_enum);
+
+static const struct snd_kcontrol_new spl_src2_mux =
+	SOC_DAPM_ENUM("SPL SRC2 MUX Mux", spl_src2_mux_chain_enum);
+
+static const struct snd_kcontrol_new spl_src3_mux =
+	SOC_DAPM_ENUM("SPL SRC3 MUX Mux", spl_src3_mux_chain_enum);
+
+static const struct snd_kcontrol_new rx_int0_2_mux =
+	SOC_DAPM_ENUM("RX INT0_2 MUX Mux", rx_int0_2_mux_chain_enum);
+
+static const struct snd_kcontrol_new rx_int1_2_mux =
+	SOC_DAPM_ENUM("RX INT1_2 MUX Mux", rx_int1_2_mux_chain_enum);
+
+static const struct snd_kcontrol_new rx_int2_2_mux =
+	SOC_DAPM_ENUM("RX INT2_2 MUX Mux", rx_int2_2_mux_chain_enum);
+
+static const struct snd_kcontrol_new rx_int3_2_mux =
+	SOC_DAPM_ENUM("RX INT3_2 MUX Mux", rx_int3_2_mux_chain_enum);
+
+static const struct snd_kcontrol_new rx_int4_2_mux =
+	SOC_DAPM_ENUM("RX INT4_2 MUX Mux", rx_int4_2_mux_chain_enum);
+
+static const struct snd_kcontrol_new rx_int5_2_mux =
+	SOC_DAPM_ENUM("RX INT5_2 MUX Mux", rx_int5_2_mux_chain_enum);
+
+static const struct snd_kcontrol_new rx_int6_2_mux =
+	SOC_DAPM_ENUM("RX INT6_2 MUX Mux", rx_int6_2_mux_chain_enum);
+
+static const struct snd_kcontrol_new rx_int7_2_mux =
+	SOC_DAPM_ENUM("RX INT7_2 MUX Mux", rx_int7_2_mux_chain_enum);
+
+static const struct snd_kcontrol_new rx_int8_2_mux =
+	SOC_DAPM_ENUM("RX INT8_2 MUX Mux", rx_int8_2_mux_chain_enum);
+
+static const struct snd_kcontrol_new int1_1_native_mux =
+	SOC_DAPM_ENUM("RX INT1_1 NATIVE MUX Mux", int1_1_native_enum);
+
+static const struct snd_kcontrol_new int2_1_native_mux =
+	SOC_DAPM_ENUM("RX INT2_1 NATIVE MUX Mux", int2_1_native_enum);
+
+static const struct snd_kcontrol_new int3_1_native_mux =
+	SOC_DAPM_ENUM("RX INT3_1 NATIVE MUX Mux", int3_1_native_enum);
+
+static const struct snd_kcontrol_new int4_1_native_mux =
+	SOC_DAPM_ENUM("RX INT4_1 NATIVE MUX Mux", int4_1_native_enum);
+
+static const struct snd_kcontrol_new rx_int0_1_mix_inp0_mux =
+	SOC_DAPM_ENUM("RX INT0_1 MIX1 INP0 Mux", rx_int0_1_mix_inp0_chain_enum);
+
+static const struct snd_kcontrol_new rx_int0_1_mix_inp1_mux =
+	SOC_DAPM_ENUM("RX INT0_1 MIX1 INP1 Mux", rx_int0_1_mix_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx_int0_1_mix_inp2_mux =
+	SOC_DAPM_ENUM("RX INT0_1 MIX1 INP2 Mux", rx_int0_1_mix_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx_int1_1_mix_inp0_mux =
+	SOC_DAPM_ENUM("RX INT1_1 MIX1 INP0 Mux", rx_int1_1_mix_inp0_chain_enum);
+
+static const struct snd_kcontrol_new rx_int1_1_mix_inp1_mux =
+	SOC_DAPM_ENUM("RX INT1_1 MIX1 INP1 Mux", rx_int1_1_mix_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx_int1_1_mix_inp2_mux =
+	SOC_DAPM_ENUM("RX INT1_1 MIX1 INP2 Mux", rx_int1_1_mix_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx_int2_1_mix_inp0_mux =
+	SOC_DAPM_ENUM("RX INT2_1 MIX1 INP0 Mux", rx_int2_1_mix_inp0_chain_enum);
+
+static const struct snd_kcontrol_new rx_int2_1_mix_inp1_mux =
+	SOC_DAPM_ENUM("RX INT2_1 MIX1 INP1 Mux", rx_int2_1_mix_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx_int2_1_mix_inp2_mux =
+	SOC_DAPM_ENUM("RX INT2_1 MIX1 INP2 Mux", rx_int2_1_mix_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx_int3_1_mix_inp0_mux =
+	SOC_DAPM_ENUM("RX INT3_1 MIX1 INP0 Mux", rx_int3_1_mix_inp0_chain_enum);
+
+static const struct snd_kcontrol_new rx_int3_1_mix_inp1_mux =
+	SOC_DAPM_ENUM("RX INT3_1 MIX1 INP1 Mux", rx_int3_1_mix_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx_int3_1_mix_inp2_mux =
+	SOC_DAPM_ENUM("RX INT3_1 MIX1 INP2 Mux", rx_int3_1_mix_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx_int4_1_mix_inp0_mux =
+	SOC_DAPM_ENUM("RX INT4_1 MIX1 INP0 Mux", rx_int4_1_mix_inp0_chain_enum);
+
+static const struct snd_kcontrol_new rx_int4_1_mix_inp1_mux =
+	SOC_DAPM_ENUM("RX INT4_1 MIX1 INP1 Mux", rx_int4_1_mix_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx_int4_1_mix_inp2_mux =
+	SOC_DAPM_ENUM("RX INT4_1 MIX1 INP2 Mux", rx_int4_1_mix_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx_int5_1_mix_inp0_mux =
+	SOC_DAPM_ENUM("RX INT5_1 MIX1 INP0 Mux", rx_int5_1_mix_inp0_chain_enum);
+
+static const struct snd_kcontrol_new rx_int5_1_mix_inp1_mux =
+	SOC_DAPM_ENUM("RX INT5_1 MIX1 INP1 Mux", rx_int5_1_mix_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx_int5_1_mix_inp2_mux =
+	SOC_DAPM_ENUM("RX INT5_1 MIX1 INP2 Mux", rx_int5_1_mix_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx_int6_1_mix_inp0_mux =
+	SOC_DAPM_ENUM("RX INT6_1 MIX1 INP0 Mux", rx_int6_1_mix_inp0_chain_enum);
+
+static const struct snd_kcontrol_new rx_int6_1_mix_inp1_mux =
+	SOC_DAPM_ENUM("RX INT6_1 MIX1 INP1 Mux", rx_int6_1_mix_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx_int6_1_mix_inp2_mux =
+	SOC_DAPM_ENUM("RX INT6_1 MIX1 INP2 Mux", rx_int6_1_mix_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx_int7_1_mix_inp0_mux =
+	SOC_DAPM_ENUM("RX INT7_1 MIX1 INP0 Mux", rx_int7_1_mix_inp0_chain_enum);
+
+static const struct snd_kcontrol_new rx_int7_1_mix_inp1_mux =
+	SOC_DAPM_ENUM("RX INT7_1 MIX1 INP1 Mux", rx_int7_1_mix_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx_int7_1_mix_inp2_mux =
+	SOC_DAPM_ENUM("RX INT7_1 MIX1 INP2 Mux", rx_int7_1_mix_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx_int8_1_mix_inp0_mux =
+	SOC_DAPM_ENUM("RX INT8_1 MIX1 INP0 Mux", rx_int8_1_mix_inp0_chain_enum);
+
+static const struct snd_kcontrol_new rx_int8_1_mix_inp1_mux =
+	SOC_DAPM_ENUM("RX INT8_1 MIX1 INP1 Mux", rx_int8_1_mix_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx_int8_1_mix_inp2_mux =
+	SOC_DAPM_ENUM("RX INT8_1 MIX1 INP2 Mux", rx_int8_1_mix_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx_int0_mix2_inp_mux =
+	SOC_DAPM_ENUM("RX INT0 MIX2 INP Mux", rx_int0_sidetone_mix_chain_enum);
+
+static const struct snd_kcontrol_new rx_int1_mix2_inp_mux =
+	SOC_DAPM_ENUM("RX INT1 MIX2 INP Mux", rx_int1_sidetone_mix_chain_enum);
+
+static const struct snd_kcontrol_new rx_int2_mix2_inp_mux =
+	SOC_DAPM_ENUM("RX INT2 MIX2 INP Mux", rx_int2_sidetone_mix_chain_enum);
+
+static const struct snd_kcontrol_new rx_int3_mix2_inp_mux =
+	SOC_DAPM_ENUM("RX INT3 MIX2 INP Mux", rx_int3_sidetone_mix_chain_enum);
+
+static const struct snd_kcontrol_new rx_int4_mix2_inp_mux =
+	SOC_DAPM_ENUM("RX INT4 MIX2 INP Mux", rx_int4_sidetone_mix_chain_enum);
+
+static const struct snd_kcontrol_new rx_int7_mix2_inp_mux =
+	SOC_DAPM_ENUM("RX INT7 MIX2 INP Mux", rx_int7_sidetone_mix_chain_enum);
+
+static const struct snd_kcontrol_new tx_adc_mux0 =
+	SOC_DAPM_ENUM_EXT("ADC MUX0 Mux", tx_adc_mux0_chain_enum,
+			  snd_soc_dapm_get_enum_double,
+			  tasha_put_dec_enum);
+
+static const struct snd_kcontrol_new tx_adc_mux1 =
+	SOC_DAPM_ENUM_EXT("ADC MUX1 Mux", tx_adc_mux1_chain_enum,
+			  snd_soc_dapm_get_enum_double,
+			  tasha_put_dec_enum);
+
+static const struct snd_kcontrol_new tx_adc_mux2 =
+	SOC_DAPM_ENUM_EXT("ADC MUX2 Mux", tx_adc_mux2_chain_enum,
+			  snd_soc_dapm_get_enum_double,
+			  tasha_put_dec_enum);
+
+static const struct snd_kcontrol_new tx_adc_mux3 =
+	SOC_DAPM_ENUM_EXT("ADC MUX3 Mux", tx_adc_mux3_chain_enum,
+			  snd_soc_dapm_get_enum_double,
+			  tasha_put_dec_enum);
+
+static const struct snd_kcontrol_new tx_adc_mux4 =
+	SOC_DAPM_ENUM_EXT("ADC MUX4 Mux", tx_adc_mux4_chain_enum,
+			  snd_soc_dapm_get_enum_double,
+			  tasha_put_dec_enum);
+
+static const struct snd_kcontrol_new tx_adc_mux5 =
+	SOC_DAPM_ENUM_EXT("ADC MUX5 Mux", tx_adc_mux5_chain_enum,
+			  snd_soc_dapm_get_enum_double,
+			  tasha_put_dec_enum);
+
+static const struct snd_kcontrol_new tx_adc_mux6 =
+	SOC_DAPM_ENUM_EXT("ADC MUX6 Mux", tx_adc_mux6_chain_enum,
+			  snd_soc_dapm_get_enum_double,
+			  tasha_put_dec_enum);
+
+static const struct snd_kcontrol_new tx_adc_mux7 =
+	SOC_DAPM_ENUM_EXT("ADC MUX7 Mux", tx_adc_mux7_chain_enum,
+			  snd_soc_dapm_get_enum_double,
+			  tasha_put_dec_enum);
+
+static const struct snd_kcontrol_new tx_adc_mux8 =
+	SOC_DAPM_ENUM_EXT("ADC MUX8 Mux", tx_adc_mux8_chain_enum,
+			  snd_soc_dapm_get_enum_double,
+			  tasha_put_dec_enum);
+
+static const struct snd_kcontrol_new tx_adc_mux10 =
+	SOC_DAPM_ENUM("ADC MUX10 Mux", tx_adc_mux10_chain_enum);
+
+static const struct snd_kcontrol_new tx_adc_mux11 =
+	SOC_DAPM_ENUM("ADC MUX11 Mux", tx_adc_mux11_chain_enum);
+
+static const struct snd_kcontrol_new tx_adc_mux12 =
+	SOC_DAPM_ENUM("ADC MUX12 Mux", tx_adc_mux12_chain_enum);
+
+static const struct snd_kcontrol_new tx_adc_mux13 =
+	SOC_DAPM_ENUM("ADC MUX13 Mux", tx_adc_mux13_chain_enum);
+
+static const struct snd_kcontrol_new tx_dmic_mux0 =
+	SOC_DAPM_ENUM("DMIC MUX0 Mux", tx_dmic_mux0_enum);
+
+static const struct snd_kcontrol_new tx_dmic_mux1 =
+	SOC_DAPM_ENUM("DMIC MUX1 Mux", tx_dmic_mux1_enum);
+
+static const struct snd_kcontrol_new tx_dmic_mux2 =
+	SOC_DAPM_ENUM("DMIC MUX2 Mux", tx_dmic_mux2_enum);
+
+static const struct snd_kcontrol_new tx_dmic_mux3 =
+	SOC_DAPM_ENUM("DMIC MUX3 Mux", tx_dmic_mux3_enum);
+
+static const struct snd_kcontrol_new tx_dmic_mux4 =
+	SOC_DAPM_ENUM("DMIC MUX4 Mux", tx_dmic_mux4_enum);
+
+static const struct snd_kcontrol_new tx_dmic_mux5 =
+	SOC_DAPM_ENUM("DMIC MUX5 Mux", tx_dmic_mux5_enum);
+
+static const struct snd_kcontrol_new tx_dmic_mux6 =
+	SOC_DAPM_ENUM("DMIC MUX6 Mux", tx_dmic_mux6_enum);
+
+static const struct snd_kcontrol_new tx_dmic_mux7 =
+	SOC_DAPM_ENUM("DMIC MUX7 Mux", tx_dmic_mux7_enum);
+
+static const struct snd_kcontrol_new tx_dmic_mux8 =
+	SOC_DAPM_ENUM("DMIC MUX8 Mux", tx_dmic_mux8_enum);
+
+static const struct snd_kcontrol_new tx_dmic_mux10 =
+	SOC_DAPM_ENUM("DMIC MUX10 Mux", tx_dmic_mux10_enum);
+
+static const struct snd_kcontrol_new tx_dmic_mux11 =
+	SOC_DAPM_ENUM("DMIC MUX11 Mux", tx_dmic_mux11_enum);
+
+static const struct snd_kcontrol_new tx_dmic_mux12 =
+	SOC_DAPM_ENUM("DMIC MUX12 Mux", tx_dmic_mux12_enum);
+
+static const struct snd_kcontrol_new tx_dmic_mux13 =
+	SOC_DAPM_ENUM("DMIC MUX13 Mux", tx_dmic_mux13_enum);
+
+static const struct snd_kcontrol_new tx_amic_mux0 =
+	SOC_DAPM_ENUM("AMIC MUX0 Mux", tx_amic_mux0_enum);
+
+static const struct snd_kcontrol_new tx_amic_mux1 =
+	SOC_DAPM_ENUM("AMIC MUX1 Mux", tx_amic_mux1_enum);
+
+static const struct snd_kcontrol_new tx_amic_mux2 =
+	SOC_DAPM_ENUM("AMIC MUX2 Mux", tx_amic_mux2_enum);
+
+static const struct snd_kcontrol_new tx_amic_mux3 =
+	SOC_DAPM_ENUM("AMIC MUX3 Mux", tx_amic_mux3_enum);
+
+static const struct snd_kcontrol_new tx_amic_mux4 =
+	SOC_DAPM_ENUM("AMIC MUX4 Mux", tx_amic_mux4_enum);
+
+static const struct snd_kcontrol_new tx_amic_mux5 =
+	SOC_DAPM_ENUM("AMIC MUX5 Mux", tx_amic_mux5_enum);
+
+static const struct snd_kcontrol_new tx_amic_mux6 =
+	SOC_DAPM_ENUM("AMIC MUX6 Mux", tx_amic_mux6_enum);
+
+static const struct snd_kcontrol_new tx_amic_mux7 =
+	SOC_DAPM_ENUM("AMIC MUX7 Mux", tx_amic_mux7_enum);
+
+static const struct snd_kcontrol_new tx_amic_mux8 =
+	SOC_DAPM_ENUM("AMIC MUX8 Mux", tx_amic_mux8_enum);
+
+static const struct snd_kcontrol_new tx_amic_mux10 =
+	SOC_DAPM_ENUM("AMIC MUX10 Mux", tx_amic_mux10_enum);
+
+static const struct snd_kcontrol_new tx_amic_mux11 =
+	SOC_DAPM_ENUM("AMIC MUX11 Mux", tx_amic_mux11_enum);
+
+static const struct snd_kcontrol_new tx_amic_mux12 =
+	SOC_DAPM_ENUM("AMIC MUX12 Mux", tx_amic_mux12_enum);
+
+static const struct snd_kcontrol_new tx_amic_mux13 =
+	SOC_DAPM_ENUM("AMIC MUX13 Mux", tx_amic_mux13_enum);
+
+static const struct snd_kcontrol_new sb_tx0_mux =
+	SOC_DAPM_ENUM("SLIM TX0 MUX Mux", sb_tx0_mux_enum);
+
+static const struct snd_kcontrol_new sb_tx1_mux =
+	SOC_DAPM_ENUM("SLIM TX1 MUX Mux", sb_tx1_mux_enum);
+
+static const struct snd_kcontrol_new sb_tx2_mux =
+	SOC_DAPM_ENUM("SLIM TX2 MUX Mux", sb_tx2_mux_enum);
+
+static const struct snd_kcontrol_new sb_tx3_mux =
+	SOC_DAPM_ENUM("SLIM TX3 MUX Mux", sb_tx3_mux_enum);
+
+static const struct snd_kcontrol_new sb_tx4_mux =
+	SOC_DAPM_ENUM("SLIM TX4 MUX Mux", sb_tx4_mux_enum);
+
+static const struct snd_kcontrol_new sb_tx5_mux =
+	SOC_DAPM_ENUM("SLIM TX5 MUX Mux", sb_tx5_mux_enum);
+
+static const struct snd_kcontrol_new sb_tx6_mux =
+	SOC_DAPM_ENUM("SLIM TX6 MUX Mux", sb_tx6_mux_enum);
+
+static const struct snd_kcontrol_new sb_tx7_mux =
+	SOC_DAPM_ENUM("SLIM TX7 MUX Mux", sb_tx7_mux_enum);
+
+static const struct snd_kcontrol_new sb_tx8_mux =
+	SOC_DAPM_ENUM("SLIM TX8 MUX Mux", sb_tx8_mux_enum);
+
+static const struct snd_kcontrol_new sb_tx9_mux =
+	SOC_DAPM_ENUM("SLIM TX9 MUX Mux", sb_tx9_mux_enum);
+
+static const struct snd_kcontrol_new sb_tx10_mux =
+	SOC_DAPM_ENUM("SLIM TX10 MUX Mux", sb_tx10_mux_enum);
+
+static const struct snd_kcontrol_new sb_tx11_mux =
+	SOC_DAPM_ENUM("SLIM TX11 MUX Mux", sb_tx11_mux_enum);
+
+static const struct snd_kcontrol_new sb_tx11_inp1_mux =
+	SOC_DAPM_ENUM("SLIM TX11 INP1 MUX Mux", sb_tx11_inp1_mux_enum);
+
+static const struct snd_kcontrol_new sb_tx13_mux =
+	SOC_DAPM_ENUM("SLIM TX13 MUX Mux", sb_tx13_mux_enum);
+
+static const struct snd_kcontrol_new tx13_inp_mux =
+	SOC_DAPM_ENUM("TX13 INP MUX Mux", tx13_inp_mux_enum);
+
+static const struct snd_kcontrol_new rx_mix_tx0_mux =
+	SOC_DAPM_ENUM("RX MIX TX0 MUX Mux", rx_mix_tx0_mux_enum);
+
+static const struct snd_kcontrol_new rx_mix_tx1_mux =
+	SOC_DAPM_ENUM("RX MIX TX1 MUX Mux", rx_mix_tx1_mux_enum);
+
+static const struct snd_kcontrol_new rx_mix_tx2_mux =
+	SOC_DAPM_ENUM("RX MIX TX2 MUX Mux", rx_mix_tx2_mux_enum);
+
+static const struct snd_kcontrol_new rx_mix_tx3_mux =
+	SOC_DAPM_ENUM("RX MIX TX3 MUX Mux", rx_mix_tx3_mux_enum);
+
+static const struct snd_kcontrol_new rx_mix_tx4_mux =
+	SOC_DAPM_ENUM("RX MIX TX4 MUX Mux", rx_mix_tx4_mux_enum);
+
+static const struct snd_kcontrol_new rx_mix_tx5_mux =
+	SOC_DAPM_ENUM("RX MIX TX5 MUX Mux", rx_mix_tx5_mux_enum);
+
+static const struct snd_kcontrol_new rx_mix_tx6_mux =
+	SOC_DAPM_ENUM("RX MIX TX6 MUX Mux", rx_mix_tx6_mux_enum);
+
+static const struct snd_kcontrol_new rx_mix_tx7_mux =
+	SOC_DAPM_ENUM("RX MIX TX7 MUX Mux", rx_mix_tx7_mux_enum);
+
+static const struct snd_kcontrol_new rx_mix_tx8_mux =
+	SOC_DAPM_ENUM("RX MIX TX8 MUX Mux", rx_mix_tx8_mux_enum);
+
+static const struct snd_kcontrol_new iir0_inp0_mux =
+	SOC_DAPM_ENUM("IIR0 INP0 Mux", iir0_inp0_mux_enum);
+
+static const struct snd_kcontrol_new iir0_inp1_mux =
+	SOC_DAPM_ENUM("IIR0 INP1 Mux", iir0_inp1_mux_enum);
+
+static const struct snd_kcontrol_new iir0_inp2_mux =
+	SOC_DAPM_ENUM("IIR0 INP2 Mux", iir0_inp2_mux_enum);
+
+static const struct snd_kcontrol_new iir0_inp3_mux =
+	SOC_DAPM_ENUM("IIR0 INP3 Mux", iir0_inp3_mux_enum);
+
+static const struct snd_kcontrol_new iir1_inp0_mux =
+	SOC_DAPM_ENUM("IIR1 INP0 Mux", iir1_inp0_mux_enum);
+
+static const struct snd_kcontrol_new iir1_inp1_mux =
+	SOC_DAPM_ENUM("IIR1 INP1 Mux", iir1_inp1_mux_enum);
+
+static const struct snd_kcontrol_new iir1_inp2_mux =
+	SOC_DAPM_ENUM("IIR1 INP2 Mux", iir1_inp2_mux_enum);
+
+static const struct snd_kcontrol_new iir1_inp3_mux =
+	SOC_DAPM_ENUM("IIR1 INP3 Mux", iir1_inp3_mux_enum);
+
+static const struct snd_kcontrol_new rx_int0_interp_mux =
+	SOC_DAPM_ENUM("RX INT0 INTERP Mux", rx_int0_interp_mux_enum);
+
+static const struct snd_kcontrol_new rx_int1_interp_mux =
+	SOC_DAPM_ENUM("RX INT1 INTERP Mux", rx_int1_interp_mux_enum);
+
+static const struct snd_kcontrol_new rx_int2_interp_mux =
+	SOC_DAPM_ENUM("RX INT2 INTERP Mux", rx_int2_interp_mux_enum);
+
+static const struct snd_kcontrol_new rx_int3_interp_mux =
+	SOC_DAPM_ENUM("RX INT3 INTERP Mux", rx_int3_interp_mux_enum);
+
+static const struct snd_kcontrol_new rx_int4_interp_mux =
+	SOC_DAPM_ENUM("RX INT4 INTERP Mux", rx_int4_interp_mux_enum);
+
+static const struct snd_kcontrol_new rx_int5_interp_mux =
+	SOC_DAPM_ENUM("RX INT5 INTERP Mux", rx_int5_interp_mux_enum);
+
+static const struct snd_kcontrol_new rx_int6_interp_mux =
+	SOC_DAPM_ENUM("RX INT6 INTERP Mux", rx_int6_interp_mux_enum);
+
+static const struct snd_kcontrol_new rx_int7_interp_mux =
+	SOC_DAPM_ENUM("RX INT7 INTERP Mux", rx_int7_interp_mux_enum);
+
+static const struct snd_kcontrol_new rx_int8_interp_mux =
+	SOC_DAPM_ENUM("RX INT8 INTERP Mux", rx_int8_interp_mux_enum);
+
+static const struct snd_kcontrol_new mad_sel_mux =
+	SOC_DAPM_ENUM("MAD_SEL MUX Mux", mad_sel_enum);
+
+static const struct snd_kcontrol_new aif4_mad_switch =
+	SOC_DAPM_SINGLE("Switch", WCD9335_CPE_SS_CFG, 5, 1, 0);
+
+static const struct snd_kcontrol_new mad_brdcst_switch =
+	SOC_DAPM_SINGLE("Switch", WCD9335_CPE_SS_CFG, 6, 1, 0);
+
+static const struct snd_kcontrol_new aif4_switch_mixer_controls =
+	SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
+			0, 1, 0, tasha_codec_aif4_mixer_switch_get,
+			tasha_codec_aif4_mixer_switch_put);
+
+static const struct snd_kcontrol_new anc_hphl_switch =
+	SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new anc_hphr_switch =
+	SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new anc_ear_switch =
+	SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new anc_ear_spkr_switch =
+	SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new anc_lineout1_switch =
+	SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new anc_lineout2_switch =
+	SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new anc_spkr_pa_switch =
+	SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new adc_us_mux0_switch =
+	SOC_DAPM_SINGLE("US_Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new adc_us_mux1_switch =
+	SOC_DAPM_SINGLE("US_Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new adc_us_mux2_switch =
+	SOC_DAPM_SINGLE("US_Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new adc_us_mux3_switch =
+	SOC_DAPM_SINGLE("US_Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new adc_us_mux4_switch =
+	SOC_DAPM_SINGLE("US_Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new adc_us_mux5_switch =
+	SOC_DAPM_SINGLE("US_Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new adc_us_mux6_switch =
+	SOC_DAPM_SINGLE("US_Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new adc_us_mux7_switch =
+	SOC_DAPM_SINGLE("US_Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new adc_us_mux8_switch =
+	SOC_DAPM_SINGLE("US_Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new anc0_fb_mux =
+	SOC_DAPM_ENUM("ANC0 FB MUX Mux", anc0_fb_mux_enum);
+
+static const struct snd_kcontrol_new anc1_fb_mux =
+	SOC_DAPM_ENUM("ANC1 FB MUX Mux", anc1_fb_mux_enum);
+
+static int tasha_codec_ec_buf_mux_enable(struct snd_soc_dapm_widget *w,
+					 struct snd_kcontrol *kcontrol,
+					 int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	dev_dbg(codec->dev, "%s: event = %d name = %s\n",
+		__func__, event, w->name);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		snd_soc_write(codec, WCD9335_CPE_SS_EC_BUF_INT_PERIOD, 0x3B);
+		snd_soc_update_bits(codec, WCD9335_CPE_SS_CFG, 0x08, 0x08);
+		snd_soc_update_bits(codec, WCD9335_CDC_IF_ROUTER_TX_MUX_CFG0,
+				    0x08, 0x08);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_update_bits(codec, WCD9335_CDC_IF_ROUTER_TX_MUX_CFG0,
+				    0x08, 0x00);
+		snd_soc_update_bits(codec, WCD9335_CPE_SS_CFG, 0x08, 0x00);
+		snd_soc_write(codec, WCD9335_CPE_SS_EC_BUF_INT_PERIOD, 0x00);
+		break;
+	}
+
+	return 0;
+};
+
+static const char * const ec_buf_mux_text[] = {
+	"ZERO", "RXMIXEC", "SB_RX0", "SB_RX1", "SB_RX2", "SB_RX3",
+	"I2S_RX_SD0_L", "I2S_RX_SD0_R", "I2S_RX_SD1_L", "I2S_RX_SD1_R",
+	"DEC1"
+};
+
+static SOC_ENUM_SINGLE_DECL(ec_buf_mux_enum, WCD9335_CPE_SS_US_EC_MUX_CFG,
+			    0, ec_buf_mux_text);
+
+static const struct snd_kcontrol_new ec_buf_mux =
+	SOC_DAPM_ENUM("EC BUF Mux", ec_buf_mux_enum);
+
+static const struct snd_soc_dapm_widget tasha_dapm_widgets[] = {
+	SND_SOC_DAPM_OUTPUT("EAR"),
+	SND_SOC_DAPM_OUTPUT("ANC EAR"),
+	SND_SOC_DAPM_AIF_IN_E("AIF1 PB", "AIF1 Playback", 0, SND_SOC_NOPM,
+				AIF1_PB, 0, tasha_codec_enable_slimrx,
+				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD |
+				SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_AIF_IN_E("AIF2 PB", "AIF2 Playback", 0, SND_SOC_NOPM,
+				AIF2_PB, 0, tasha_codec_enable_slimrx,
+				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD |
+				SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_AIF_IN_E("AIF3 PB", "AIF3 Playback", 0, SND_SOC_NOPM,
+				AIF3_PB, 0, tasha_codec_enable_slimrx,
+				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD |
+				SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_AIF_IN_E("AIF4 PB", "AIF4 Playback", 0, SND_SOC_NOPM,
+				AIF4_PB, 0, tasha_codec_enable_slimrx,
+				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD |
+				SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_AIF_IN_E("AIF MIX1 PB", "AIF Mix Playback", 0,
+			       SND_SOC_NOPM, AIF_MIX1_PB, 0,
+			       tasha_codec_enable_slimrx,
+			       SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD |
+			       SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX("SLIM RX0 MUX", SND_SOC_NOPM, TASHA_RX0, 0,
+				&slim_rx_mux[TASHA_RX0]),
+	SND_SOC_DAPM_MUX("SLIM RX1 MUX", SND_SOC_NOPM, TASHA_RX1, 0,
+				&slim_rx_mux[TASHA_RX1]),
+	SND_SOC_DAPM_MUX("SLIM RX2 MUX", SND_SOC_NOPM, TASHA_RX2, 0,
+				&slim_rx_mux[TASHA_RX2]),
+	SND_SOC_DAPM_MUX("SLIM RX3 MUX", SND_SOC_NOPM, TASHA_RX3, 0,
+				&slim_rx_mux[TASHA_RX3]),
+	SND_SOC_DAPM_MUX("SLIM RX4 MUX", SND_SOC_NOPM, TASHA_RX4, 0,
+				&slim_rx_mux[TASHA_RX4]),
+	SND_SOC_DAPM_MUX("SLIM RX5 MUX", SND_SOC_NOPM, TASHA_RX5, 0,
+				&slim_rx_mux[TASHA_RX5]),
+	SND_SOC_DAPM_MUX("SLIM RX6 MUX", SND_SOC_NOPM, TASHA_RX6, 0,
+				&slim_rx_mux[TASHA_RX6]),
+	SND_SOC_DAPM_MUX("SLIM RX7 MUX", SND_SOC_NOPM, TASHA_RX7, 0,
+				&slim_rx_mux[TASHA_RX7]),
+
+	SND_SOC_DAPM_MIXER("SLIM RX0", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("SLIM RX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("SLIM RX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("SLIM RX3", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("SLIM RX4", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("SLIM RX5", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("SLIM RX6", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("SLIM RX7", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+	SND_SOC_DAPM_MUX_E("SPL SRC0 MUX", SND_SOC_NOPM, SPLINE_SRC0, 0,
+			 &spl_src0_mux, tasha_codec_enable_spline_resampler,
+			 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("SPL SRC1 MUX", SND_SOC_NOPM, SPLINE_SRC1, 0,
+			 &spl_src1_mux, tasha_codec_enable_spline_resampler,
+			 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("SPL SRC2 MUX", SND_SOC_NOPM, SPLINE_SRC2, 0,
+			 &spl_src2_mux, tasha_codec_enable_spline_resampler,
+			 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("SPL SRC3 MUX", SND_SOC_NOPM, SPLINE_SRC3, 0,
+			 &spl_src3_mux, tasha_codec_enable_spline_resampler,
+			 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("RX INT0_2 MUX", WCD9335_CDC_RX0_RX_PATH_MIX_CTL,
+			5, 0, &rx_int0_2_mux, tasha_codec_enable_mix_path,
+			SND_SOC_DAPM_POST_PMU),
+	SND_SOC_DAPM_MUX_E("RX INT1_2 MUX", WCD9335_CDC_RX1_RX_PATH_MIX_CTL,
+			5, 0, &rx_int1_2_mux, tasha_codec_enable_mix_path,
+			SND_SOC_DAPM_POST_PMU),
+	SND_SOC_DAPM_MUX_E("RX INT2_2 MUX", WCD9335_CDC_RX2_RX_PATH_MIX_CTL,
+			5, 0, &rx_int2_2_mux, tasha_codec_enable_mix_path,
+			SND_SOC_DAPM_POST_PMU),
+	SND_SOC_DAPM_MUX_E("RX INT3_2 MUX", WCD9335_CDC_RX3_RX_PATH_MIX_CTL,
+			5, 0, &rx_int3_2_mux, tasha_codec_enable_mix_path,
+			SND_SOC_DAPM_POST_PMU),
+	SND_SOC_DAPM_MUX_E("RX INT4_2 MUX", WCD9335_CDC_RX4_RX_PATH_MIX_CTL,
+			5, 0, &rx_int4_2_mux, tasha_codec_enable_mix_path,
+			SND_SOC_DAPM_POST_PMU),
+	SND_SOC_DAPM_MUX_E("RX INT5_2 MUX", WCD9335_CDC_RX5_RX_PATH_MIX_CTL,
+			5, 0, &rx_int5_2_mux, tasha_codec_enable_mix_path,
+			SND_SOC_DAPM_POST_PMU),
+	SND_SOC_DAPM_MUX_E("RX INT6_2 MUX", WCD9335_CDC_RX6_RX_PATH_MIX_CTL,
+			5, 0, &rx_int6_2_mux, tasha_codec_enable_mix_path,
+			SND_SOC_DAPM_POST_PMU),
+	SND_SOC_DAPM_MUX_E("RX INT7_2 MUX", WCD9335_CDC_RX7_RX_PATH_MIX_CTL,
+			5, 0, &rx_int7_2_mux, tasha_codec_enable_mix_path,
+			SND_SOC_DAPM_POST_PMU),
+	SND_SOC_DAPM_MUX_E("RX INT8_2 MUX", WCD9335_CDC_RX8_RX_PATH_MIX_CTL,
+			5, 0, &rx_int8_2_mux, tasha_codec_enable_mix_path,
+			SND_SOC_DAPM_POST_PMU),
+
+	SND_SOC_DAPM_MUX("RX INT0_1 MIX1 INP0", SND_SOC_NOPM, 0, 0,
+		&rx_int0_1_mix_inp0_mux),
+	SND_SOC_DAPM_MUX("RX INT0_1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+		&rx_int0_1_mix_inp1_mux),
+	SND_SOC_DAPM_MUX("RX INT0_1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+		&rx_int0_1_mix_inp2_mux),
+	SND_SOC_DAPM_MUX("RX INT1_1 MIX1 INP0", SND_SOC_NOPM, 0, 0,
+		&rx_int1_1_mix_inp0_mux),
+	SND_SOC_DAPM_MUX("RX INT1_1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+		&rx_int1_1_mix_inp1_mux),
+	SND_SOC_DAPM_MUX("RX INT1_1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+		&rx_int1_1_mix_inp2_mux),
+	SND_SOC_DAPM_MUX("RX INT2_1 MIX1 INP0", SND_SOC_NOPM, 0, 0,
+		&rx_int2_1_mix_inp0_mux),
+	SND_SOC_DAPM_MUX("RX INT2_1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+		&rx_int2_1_mix_inp1_mux),
+	SND_SOC_DAPM_MUX("RX INT2_1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+		&rx_int2_1_mix_inp2_mux),
+	SND_SOC_DAPM_MUX("RX INT3_1 MIX1 INP0", SND_SOC_NOPM, 0, 0,
+		&rx_int3_1_mix_inp0_mux),
+	SND_SOC_DAPM_MUX("RX INT3_1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+		&rx_int3_1_mix_inp1_mux),
+	SND_SOC_DAPM_MUX("RX INT3_1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+		&rx_int3_1_mix_inp2_mux),
+	SND_SOC_DAPM_MUX("RX INT4_1 MIX1 INP0", SND_SOC_NOPM, 0, 0,
+		&rx_int4_1_mix_inp0_mux),
+	SND_SOC_DAPM_MUX("RX INT4_1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+		&rx_int4_1_mix_inp1_mux),
+	SND_SOC_DAPM_MUX("RX INT4_1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+		&rx_int4_1_mix_inp2_mux),
+	SND_SOC_DAPM_MUX("RX INT5_1 MIX1 INP0", SND_SOC_NOPM, 0, 0,
+		&rx_int5_1_mix_inp0_mux),
+	SND_SOC_DAPM_MUX("RX INT5_1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+		&rx_int5_1_mix_inp1_mux),
+	SND_SOC_DAPM_MUX("RX INT5_1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+		&rx_int5_1_mix_inp2_mux),
+	SND_SOC_DAPM_MUX("RX INT6_1 MIX1 INP0", SND_SOC_NOPM, 0, 0,
+		&rx_int6_1_mix_inp0_mux),
+	SND_SOC_DAPM_MUX("RX INT6_1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+		&rx_int6_1_mix_inp1_mux),
+	SND_SOC_DAPM_MUX("RX INT6_1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+		&rx_int6_1_mix_inp2_mux),
+	SND_SOC_DAPM_MUX_E("RX INT7_1 MIX1 INP0", SND_SOC_NOPM, 0, 0,
+		&rx_int7_1_mix_inp0_mux, tasha_codec_enable_swr,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT7_1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+		&rx_int7_1_mix_inp1_mux, tasha_codec_enable_swr,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT7_1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+		&rx_int7_1_mix_inp2_mux, tasha_codec_enable_swr,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT8_1 MIX1 INP0", SND_SOC_NOPM, 0, 0,
+		&rx_int8_1_mix_inp0_mux, tasha_codec_enable_swr,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT8_1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+		&rx_int8_1_mix_inp1_mux, tasha_codec_enable_swr,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT8_1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+		&rx_int8_1_mix_inp2_mux, tasha_codec_enable_swr,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MIXER("RX INT0_1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT0 SEC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT1_1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT1 SPLINE MIX", SND_SOC_NOPM, 0, 0,
+			rx_int1_spline_mix_switch,
+			ARRAY_SIZE(rx_int1_spline_mix_switch)),
+	SND_SOC_DAPM_MIXER("RX INT1 SEC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT2_1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT2 SPLINE MIX", SND_SOC_NOPM, 0, 0,
+			rx_int2_spline_mix_switch,
+			ARRAY_SIZE(rx_int2_spline_mix_switch)),
+	SND_SOC_DAPM_MIXER("RX INT2 SEC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT3_1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT3 SPLINE MIX", SND_SOC_NOPM, 0, 0,
+			rx_int3_spline_mix_switch,
+			ARRAY_SIZE(rx_int3_spline_mix_switch)),
+	SND_SOC_DAPM_MIXER("RX INT3 SEC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT4_1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT4 SPLINE MIX", SND_SOC_NOPM, 0, 0,
+			rx_int4_spline_mix_switch,
+			ARRAY_SIZE(rx_int4_spline_mix_switch)),
+	SND_SOC_DAPM_MIXER("RX INT4 SEC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT5_1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT5 SPLINE MIX", SND_SOC_NOPM, 0, 0,
+			rx_int5_spline_mix_switch,
+			ARRAY_SIZE(rx_int5_spline_mix_switch)),
+	SND_SOC_DAPM_MIXER("RX INT5 SEC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+	SND_SOC_DAPM_MIXER("RX INT6_1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT6 SPLINE MIX", SND_SOC_NOPM, 0, 0,
+			rx_int6_spline_mix_switch,
+			ARRAY_SIZE(rx_int6_spline_mix_switch)),
+	SND_SOC_DAPM_MIXER("RX INT6 SEC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+	SND_SOC_DAPM_MIXER("RX INT7_1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT7 SEC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT7 SPLINE MIX", SND_SOC_NOPM, 0, 0,
+			rx_int7_spline_mix_switch,
+			ARRAY_SIZE(rx_int7_spline_mix_switch)),
+
+	SND_SOC_DAPM_MIXER("RX INT8_1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT8 SEC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT8 SPLINE MIX", SND_SOC_NOPM, 0, 0,
+			rx_int8_spline_mix_switch,
+			ARRAY_SIZE(rx_int8_spline_mix_switch)),
+
+	SND_SOC_DAPM_MIXER("RX INT0 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT1 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT2 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT3 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT4 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT5 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT6 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT7 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER_E("RX INT7 CHAIN", SND_SOC_NOPM, 0, 0,
+			NULL, 0, tasha_codec_spk_boost_event,
+			SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MIXER_E("RX INT8 CHAIN", SND_SOC_NOPM, 0, 0,
+			NULL, 0, tasha_codec_spk_boost_event,
+			SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MIXER_E("RX INT5 VBAT", SND_SOC_NOPM, 0, 0,
+			rx_int5_vbat_mix_switch,
+			ARRAY_SIZE(rx_int5_vbat_mix_switch),
+			tasha_codec_vbat_enable_event,
+			SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MIXER_E("RX INT6 VBAT", SND_SOC_NOPM, 0, 0,
+			rx_int6_vbat_mix_switch,
+			ARRAY_SIZE(rx_int6_vbat_mix_switch),
+			tasha_codec_vbat_enable_event,
+			SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MIXER_E("RX INT7 VBAT", SND_SOC_NOPM, 0, 0,
+			rx_int7_vbat_mix_switch,
+			ARRAY_SIZE(rx_int7_vbat_mix_switch),
+			tasha_codec_vbat_enable_event,
+			SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MIXER_E("RX INT8 VBAT", SND_SOC_NOPM, 0, 0,
+			rx_int8_vbat_mix_switch,
+			ARRAY_SIZE(rx_int8_vbat_mix_switch),
+			tasha_codec_vbat_enable_event,
+			SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX("RX INT0 MIX2 INP", WCD9335_CDC_RX0_RX_PATH_CFG1, 4,
+			   0, &rx_int0_mix2_inp_mux),
+	SND_SOC_DAPM_MUX("RX INT1 MIX2 INP", WCD9335_CDC_RX1_RX_PATH_CFG1, 4,
+			   0, &rx_int1_mix2_inp_mux),
+	SND_SOC_DAPM_MUX("RX INT2 MIX2 INP", WCD9335_CDC_RX2_RX_PATH_CFG1, 4,
+			   0, &rx_int2_mix2_inp_mux),
+	SND_SOC_DAPM_MUX("RX INT3 MIX2 INP", WCD9335_CDC_RX3_RX_PATH_CFG1, 4,
+			   0, &rx_int3_mix2_inp_mux),
+	SND_SOC_DAPM_MUX("RX INT4 MIX2 INP", WCD9335_CDC_RX4_RX_PATH_CFG1, 4,
+			   0, &rx_int4_mix2_inp_mux),
+	SND_SOC_DAPM_MUX("RX INT7 MIX2 INP", WCD9335_CDC_RX7_RX_PATH_CFG1, 4,
+			   0, &rx_int7_mix2_inp_mux),
+
+	SND_SOC_DAPM_MUX("SLIM TX0 MUX", SND_SOC_NOPM, TASHA_TX0, 0,
+		&sb_tx0_mux),
+	SND_SOC_DAPM_MUX("SLIM TX1 MUX", SND_SOC_NOPM, TASHA_TX1, 0,
+		&sb_tx1_mux),
+	SND_SOC_DAPM_MUX("SLIM TX2 MUX", SND_SOC_NOPM, TASHA_TX2, 0,
+		&sb_tx2_mux),
+	SND_SOC_DAPM_MUX("SLIM TX3 MUX", SND_SOC_NOPM, TASHA_TX3, 0,
+		&sb_tx3_mux),
+	SND_SOC_DAPM_MUX("SLIM TX4 MUX", SND_SOC_NOPM, TASHA_TX4, 0,
+		&sb_tx4_mux),
+	SND_SOC_DAPM_MUX("SLIM TX5 MUX", SND_SOC_NOPM, TASHA_TX5, 0,
+		&sb_tx5_mux),
+	SND_SOC_DAPM_MUX("SLIM TX6 MUX", SND_SOC_NOPM, TASHA_TX6, 0,
+		&sb_tx6_mux),
+	SND_SOC_DAPM_MUX("SLIM TX7 MUX", SND_SOC_NOPM, TASHA_TX7, 0,
+		&sb_tx7_mux),
+	SND_SOC_DAPM_MUX("SLIM TX8 MUX", SND_SOC_NOPM, TASHA_TX8, 0,
+		&sb_tx8_mux),
+	SND_SOC_DAPM_MUX("SLIM TX9 MUX", SND_SOC_NOPM, TASHA_TX9, 0,
+		&sb_tx9_mux),
+	SND_SOC_DAPM_MUX("SLIM TX10 MUX", SND_SOC_NOPM, TASHA_TX10, 0,
+		&sb_tx10_mux),
+	SND_SOC_DAPM_MUX("SLIM TX11 MUX", SND_SOC_NOPM, TASHA_TX11, 0,
+		&sb_tx11_mux),
+	SND_SOC_DAPM_MUX("SLIM TX11 INP1 MUX", SND_SOC_NOPM, TASHA_TX11, 0,
+		&sb_tx11_inp1_mux),
+	SND_SOC_DAPM_MUX("SLIM TX13 MUX", SND_SOC_NOPM, TASHA_TX13, 0,
+		&sb_tx13_mux),
+	SND_SOC_DAPM_MUX("TX13 INP MUX", SND_SOC_NOPM, 0, 0,
+			 &tx13_inp_mux),
+
+	SND_SOC_DAPM_MUX_E("ADC MUX0", WCD9335_CDC_TX0_TX_PATH_CTL, 5, 0,
+			   &tx_adc_mux0, tasha_codec_enable_dec,
+			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+			   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("ADC MUX1", WCD9335_CDC_TX1_TX_PATH_CTL, 5, 0,
+			   &tx_adc_mux1, tasha_codec_enable_dec,
+			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+			   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("ADC MUX2", WCD9335_CDC_TX2_TX_PATH_CTL, 5, 0,
+			   &tx_adc_mux2, tasha_codec_enable_dec,
+			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+			   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("ADC MUX3", WCD9335_CDC_TX3_TX_PATH_CTL, 5, 0,
+			   &tx_adc_mux3, tasha_codec_enable_dec,
+			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+			   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("ADC MUX4", WCD9335_CDC_TX4_TX_PATH_CTL, 5, 0,
+			   &tx_adc_mux4, tasha_codec_enable_dec,
+			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+			   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("ADC MUX5", WCD9335_CDC_TX5_TX_PATH_CTL, 5, 0,
+			   &tx_adc_mux5, tasha_codec_enable_dec,
+			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+			   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("ADC MUX6", WCD9335_CDC_TX6_TX_PATH_CTL, 5, 0,
+			   &tx_adc_mux6, tasha_codec_enable_dec,
+			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+			   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("ADC MUX7", WCD9335_CDC_TX7_TX_PATH_CTL, 5, 0,
+			   &tx_adc_mux7, tasha_codec_enable_dec,
+			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+			   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("ADC MUX8", WCD9335_CDC_TX8_TX_PATH_CTL, 5, 0,
+			   &tx_adc_mux8, tasha_codec_enable_dec,
+			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+			   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("ADC MUX10", SND_SOC_NOPM, 10, 0,
+			 &tx_adc_mux10, tasha_codec_tx_adc_cfg,
+			 SND_SOC_DAPM_POST_PMU),
+
+	SND_SOC_DAPM_MUX_E("ADC MUX11", SND_SOC_NOPM, 11, 0,
+			 &tx_adc_mux11, tasha_codec_tx_adc_cfg,
+			 SND_SOC_DAPM_POST_PMU),
+
+	SND_SOC_DAPM_MUX_E("ADC MUX12", SND_SOC_NOPM, 12, 0,
+			 &tx_adc_mux12, tasha_codec_tx_adc_cfg,
+			 SND_SOC_DAPM_POST_PMU),
+
+	SND_SOC_DAPM_MUX_E("ADC MUX13", SND_SOC_NOPM, 13, 0,
+			 &tx_adc_mux13, tasha_codec_tx_adc_cfg,
+			 SND_SOC_DAPM_POST_PMU),
+
+	SND_SOC_DAPM_MUX("DMIC MUX0", SND_SOC_NOPM, 0, 0,
+		&tx_dmic_mux0),
+	SND_SOC_DAPM_MUX("DMIC MUX1", SND_SOC_NOPM, 0, 0,
+		&tx_dmic_mux1),
+	SND_SOC_DAPM_MUX("DMIC MUX2", SND_SOC_NOPM, 0, 0,
+		&tx_dmic_mux2),
+	SND_SOC_DAPM_MUX("DMIC MUX3", SND_SOC_NOPM, 0, 0,
+		&tx_dmic_mux3),
+	SND_SOC_DAPM_MUX("DMIC MUX4", SND_SOC_NOPM, 0, 0,
+		&tx_dmic_mux4),
+	SND_SOC_DAPM_MUX("DMIC MUX5", SND_SOC_NOPM, 0, 0,
+		&tx_dmic_mux5),
+	SND_SOC_DAPM_MUX("DMIC MUX6", SND_SOC_NOPM, 0, 0,
+		&tx_dmic_mux6),
+	SND_SOC_DAPM_MUX("DMIC MUX7", SND_SOC_NOPM, 0, 0,
+		&tx_dmic_mux7),
+	SND_SOC_DAPM_MUX("DMIC MUX8", SND_SOC_NOPM, 0, 0,
+		&tx_dmic_mux8),
+	SND_SOC_DAPM_MUX("DMIC MUX10", SND_SOC_NOPM, 0, 0,
+		&tx_dmic_mux10),
+	SND_SOC_DAPM_MUX("DMIC MUX11", SND_SOC_NOPM, 0, 0,
+		&tx_dmic_mux11),
+	SND_SOC_DAPM_MUX("DMIC MUX12", SND_SOC_NOPM, 0, 0,
+		&tx_dmic_mux12),
+	SND_SOC_DAPM_MUX("DMIC MUX13", SND_SOC_NOPM, 0, 0,
+		&tx_dmic_mux13),
+
+	SND_SOC_DAPM_MUX("AMIC MUX0", SND_SOC_NOPM, 0, 0,
+		&tx_amic_mux0),
+	SND_SOC_DAPM_MUX("AMIC MUX1", SND_SOC_NOPM, 0, 0,
+		&tx_amic_mux1),
+	SND_SOC_DAPM_MUX("AMIC MUX2", SND_SOC_NOPM, 0, 0,
+		&tx_amic_mux2),
+	SND_SOC_DAPM_MUX("AMIC MUX3", SND_SOC_NOPM, 0, 0,
+		&tx_amic_mux3),
+	SND_SOC_DAPM_MUX("AMIC MUX4", SND_SOC_NOPM, 0, 0,
+		&tx_amic_mux4),
+	SND_SOC_DAPM_MUX("AMIC MUX5", SND_SOC_NOPM, 0, 0,
+		&tx_amic_mux5),
+	SND_SOC_DAPM_MUX("AMIC MUX6", SND_SOC_NOPM, 0, 0,
+		&tx_amic_mux6),
+	SND_SOC_DAPM_MUX("AMIC MUX7", SND_SOC_NOPM, 0, 0,
+		&tx_amic_mux7),
+	SND_SOC_DAPM_MUX("AMIC MUX8", SND_SOC_NOPM, 0, 0,
+		&tx_amic_mux8),
+	SND_SOC_DAPM_MUX("AMIC MUX10", SND_SOC_NOPM, 0, 0,
+		&tx_amic_mux10),
+	SND_SOC_DAPM_MUX("AMIC MUX11", SND_SOC_NOPM, 0, 0,
+		&tx_amic_mux11),
+	SND_SOC_DAPM_MUX("AMIC MUX12", SND_SOC_NOPM, 0, 0,
+		&tx_amic_mux12),
+	SND_SOC_DAPM_MUX("AMIC MUX13", SND_SOC_NOPM, 0, 0,
+		&tx_amic_mux13),
+
+	SND_SOC_DAPM_ADC_E("ADC1", NULL, WCD9335_ANA_AMIC1, 7, 0,
+			   tasha_codec_enable_adc, SND_SOC_DAPM_PRE_PMU),
+	SND_SOC_DAPM_ADC_E("ADC2", NULL, WCD9335_ANA_AMIC2, 7, 0,
+			   tasha_codec_enable_adc, SND_SOC_DAPM_PRE_PMU),
+	SND_SOC_DAPM_ADC_E("ADC3", NULL, WCD9335_ANA_AMIC3, 7, 0,
+			   tasha_codec_enable_adc, SND_SOC_DAPM_PRE_PMU),
+	SND_SOC_DAPM_ADC_E("ADC4", NULL, WCD9335_ANA_AMIC4, 7, 0,
+			   tasha_codec_enable_adc, SND_SOC_DAPM_PRE_PMU),
+	SND_SOC_DAPM_ADC_E("ADC5", NULL, WCD9335_ANA_AMIC5, 7, 0,
+			   tasha_codec_enable_adc, SND_SOC_DAPM_PRE_PMU),
+	SND_SOC_DAPM_ADC_E("ADC6", NULL, WCD9335_ANA_AMIC6, 7, 0,
+			   tasha_codec_enable_adc, SND_SOC_DAPM_PRE_PMU),
+
+	SND_SOC_DAPM_SUPPLY("RX INT1 NATIVE SUPPLY", SND_SOC_NOPM,
+			    INTERP_HPHL, 0, tasha_enable_native_supply,
+			    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+
+	SND_SOC_DAPM_SUPPLY("RX INT2 NATIVE SUPPLY", SND_SOC_NOPM,
+			    INTERP_HPHR, 0, tasha_enable_native_supply,
+			    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+
+	SND_SOC_DAPM_SUPPLY("RX INT3 NATIVE SUPPLY", SND_SOC_NOPM,
+			    INTERP_LO1, 0, tasha_enable_native_supply,
+			    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+
+	SND_SOC_DAPM_SUPPLY("RX INT4 NATIVE SUPPLY", SND_SOC_NOPM,
+			    INTERP_LO2, 0, tasha_enable_native_supply,
+			    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+
+	SND_SOC_DAPM_INPUT("AMIC1"),
+	SND_SOC_DAPM_MICBIAS_E("MIC BIAS1", SND_SOC_NOPM, 0, 0,
+			       tasha_codec_enable_micbias,
+			       SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+			       SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MICBIAS_E("MIC BIAS2", SND_SOC_NOPM, 0, 0,
+			       tasha_codec_enable_micbias,
+			       SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+			       SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MICBIAS_E("MIC BIAS3", SND_SOC_NOPM, 0, 0,
+			       tasha_codec_enable_micbias,
+			       SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+			       SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MICBIAS_E("MIC BIAS4", SND_SOC_NOPM, 0, 0,
+			       tasha_codec_enable_micbias,
+			       SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+			       SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MICBIAS_E(DAPM_MICBIAS1_STANDALONE, SND_SOC_NOPM, 0, 0,
+			       tasha_codec_force_enable_micbias,
+			       SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MICBIAS_E(DAPM_MICBIAS2_STANDALONE, SND_SOC_NOPM, 0, 0,
+			       tasha_codec_force_enable_micbias,
+			       SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MICBIAS_E(DAPM_MICBIAS3_STANDALONE, SND_SOC_NOPM, 0, 0,
+			       tasha_codec_force_enable_micbias,
+			       SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MICBIAS_E(DAPM_MICBIAS4_STANDALONE, SND_SOC_NOPM, 0, 0,
+			       tasha_codec_force_enable_micbias,
+			       SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_SUPPLY(DAPM_LDO_H_STANDALONE, SND_SOC_NOPM, 0, 0,
+			    tasha_codec_force_enable_ldo_h,
+			    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX("ANC0 FB MUX", SND_SOC_NOPM, 0, 0, &anc0_fb_mux),
+	SND_SOC_DAPM_MUX("ANC1 FB MUX", SND_SOC_NOPM, 0, 0, &anc1_fb_mux),
+
+	SND_SOC_DAPM_INPUT("AMIC2"),
+	SND_SOC_DAPM_INPUT("AMIC3"),
+	SND_SOC_DAPM_INPUT("AMIC4"),
+	SND_SOC_DAPM_INPUT("AMIC5"),
+	SND_SOC_DAPM_INPUT("AMIC6"),
+
+	SND_SOC_DAPM_AIF_OUT_E("AIF1 CAP", "AIF1 Capture", 0, SND_SOC_NOPM,
+		AIF1_CAP, 0, tasha_codec_enable_slimtx,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_AIF_OUT_E("AIF2 CAP", "AIF2 Capture", 0, SND_SOC_NOPM,
+		AIF2_CAP, 0, tasha_codec_enable_slimtx,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_AIF_OUT_E("AIF3 CAP", "AIF3 Capture", 0, SND_SOC_NOPM,
+		AIF3_CAP, 0, tasha_codec_enable_slimtx,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_AIF_OUT_E("AIF4 VI", "VIfeed", 0, SND_SOC_NOPM,
+		AIF4_VIFEED, 0, tasha_codec_enable_slimvi_feedback,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MIXER("AIF4_VI Mixer", SND_SOC_NOPM, AIF4_VIFEED, 0,
+		aif4_vi_mixer, ARRAY_SIZE(aif4_vi_mixer)),
+
+	SND_SOC_DAPM_MIXER("AIF1_CAP Mixer", SND_SOC_NOPM, AIF1_CAP, 0,
+		aif1_cap_mixer, ARRAY_SIZE(aif1_cap_mixer)),
+
+	SND_SOC_DAPM_MIXER("AIF2_CAP Mixer", SND_SOC_NOPM, AIF2_CAP, 0,
+		aif2_cap_mixer, ARRAY_SIZE(aif2_cap_mixer)),
+
+	SND_SOC_DAPM_MIXER("AIF3_CAP Mixer", SND_SOC_NOPM, AIF3_CAP, 0,
+		aif3_cap_mixer, ARRAY_SIZE(aif3_cap_mixer)),
+
+	SND_SOC_DAPM_MIXER("AIF4_MAD Mixer", SND_SOC_NOPM, AIF4_MAD_TX, 0,
+		aif4_mad_mixer, ARRAY_SIZE(aif4_mad_mixer)),
+
+	SND_SOC_DAPM_INPUT("VIINPUT"),
+
+	SND_SOC_DAPM_AIF_OUT("AIF5 CPE", "AIF5 CPE TX", 0, SND_SOC_NOPM,
+			     AIF5_CPE_TX, 0),
+
+	SND_SOC_DAPM_MUX_E("EC BUF MUX INP", SND_SOC_NOPM, 0, 0, &ec_buf_mux,
+		tasha_codec_ec_buf_mux_enable,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	/* Digital Mic Inputs */
+	SND_SOC_DAPM_ADC_E("DMIC0", NULL, SND_SOC_NOPM, 0, 0,
+		tasha_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_ADC_E("DMIC1", NULL, SND_SOC_NOPM, 0, 0,
+		tasha_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_ADC_E("DMIC2", NULL, SND_SOC_NOPM, 0, 0,
+		tasha_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_ADC_E("DMIC3", NULL, SND_SOC_NOPM, 0, 0,
+		tasha_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_ADC_E("DMIC4", NULL, SND_SOC_NOPM, 0, 0,
+		tasha_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_ADC_E("DMIC5", NULL, SND_SOC_NOPM, 0, 0,
+		tasha_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX("IIR0 INP0 MUX", SND_SOC_NOPM, 0, 0, &iir0_inp0_mux),
+	SND_SOC_DAPM_MUX("IIR0 INP1 MUX", SND_SOC_NOPM, 0, 0, &iir0_inp1_mux),
+	SND_SOC_DAPM_MUX("IIR0 INP2 MUX", SND_SOC_NOPM, 0, 0, &iir0_inp2_mux),
+	SND_SOC_DAPM_MUX("IIR0 INP3 MUX", SND_SOC_NOPM, 0, 0, &iir0_inp3_mux),
+	SND_SOC_DAPM_MUX("IIR1 INP0 MUX", SND_SOC_NOPM, 0, 0, &iir1_inp0_mux),
+	SND_SOC_DAPM_MUX("IIR1 INP1 MUX", SND_SOC_NOPM, 0, 0, &iir1_inp1_mux),
+	SND_SOC_DAPM_MUX("IIR1 INP2 MUX", SND_SOC_NOPM, 0, 0, &iir1_inp2_mux),
+	SND_SOC_DAPM_MUX("IIR1 INP3 MUX", SND_SOC_NOPM, 0, 0, &iir1_inp3_mux),
+
+	SND_SOC_DAPM_MIXER_E("IIR0", WCD9335_CDC_SIDETONE_IIR0_IIR_PATH_CTL,
+			     4, 0, NULL, 0, tasha_codec_set_iir_gain,
+			     SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_MIXER_E("IIR1", WCD9335_CDC_SIDETONE_IIR1_IIR_PATH_CTL,
+			     4, 0, NULL, 0, tasha_codec_set_iir_gain,
+			     SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_MIXER("SRC0", WCD9335_CDC_SIDETONE_SRC0_ST_SRC_PATH_CTL,
+			     4, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("SRC1", WCD9335_CDC_SIDETONE_SRC1_ST_SRC_PATH_CTL,
+			     4, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER_E("CPE IN Mixer", SND_SOC_NOPM, 0, 0,
+				cpe_in_mix_switch,
+				ARRAY_SIZE(cpe_in_mix_switch),
+				tasha_codec_configure_cpe_input,
+				SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX("RX INT1_1 NATIVE MUX", SND_SOC_NOPM, 0, 0,
+		&int1_1_native_mux),
+	SND_SOC_DAPM_MUX("RX INT2_1 NATIVE MUX", SND_SOC_NOPM, 0, 0,
+		&int2_1_native_mux),
+	SND_SOC_DAPM_MUX("RX INT3_1 NATIVE MUX", SND_SOC_NOPM, 0, 0,
+		&int3_1_native_mux),
+	SND_SOC_DAPM_MUX("RX INT4_1 NATIVE MUX", SND_SOC_NOPM, 0, 0,
+		&int4_1_native_mux),
+	SND_SOC_DAPM_MUX("RX MIX TX0 MUX", SND_SOC_NOPM, 0, 0,
+		&rx_mix_tx0_mux),
+	SND_SOC_DAPM_MUX("RX MIX TX1 MUX", SND_SOC_NOPM, 0, 0,
+		&rx_mix_tx1_mux),
+	SND_SOC_DAPM_MUX("RX MIX TX2 MUX", SND_SOC_NOPM, 0, 0,
+		&rx_mix_tx2_mux),
+	SND_SOC_DAPM_MUX("RX MIX TX3 MUX", SND_SOC_NOPM, 0, 0,
+		&rx_mix_tx3_mux),
+	SND_SOC_DAPM_MUX("RX MIX TX4 MUX", SND_SOC_NOPM, 0, 0,
+		&rx_mix_tx4_mux),
+	SND_SOC_DAPM_MUX("RX MIX TX5 MUX", SND_SOC_NOPM, 0, 0,
+		&rx_mix_tx5_mux),
+	SND_SOC_DAPM_MUX("RX MIX TX6 MUX", SND_SOC_NOPM, 0, 0,
+		&rx_mix_tx6_mux),
+	SND_SOC_DAPM_MUX("RX MIX TX7 MUX", SND_SOC_NOPM, 0, 0,
+		&rx_mix_tx7_mux),
+	SND_SOC_DAPM_MUX("RX MIX TX8 MUX", SND_SOC_NOPM, 0, 0,
+		&rx_mix_tx8_mux),
+
+	SND_SOC_DAPM_MUX("RX INT0 DEM MUX", SND_SOC_NOPM, 0, 0,
+		&rx_int0_dem_inp_mux),
+	SND_SOC_DAPM_MUX("RX INT1 DEM MUX", SND_SOC_NOPM, 0, 0,
+		&rx_int1_dem_inp_mux),
+	SND_SOC_DAPM_MUX("RX INT2 DEM MUX", SND_SOC_NOPM, 0, 0,
+		&rx_int2_dem_inp_mux),
+
+	SND_SOC_DAPM_MUX_E("RX INT0 INTERP", SND_SOC_NOPM,
+		INTERP_EAR, 0, &rx_int0_interp_mux,
+		tasha_codec_enable_interpolator,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT1 INTERP", SND_SOC_NOPM,
+		INTERP_HPHL, 0, &rx_int1_interp_mux,
+		tasha_codec_enable_interpolator,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT2 INTERP", SND_SOC_NOPM,
+		INTERP_HPHR, 0, &rx_int2_interp_mux,
+		tasha_codec_enable_interpolator,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT3 INTERP", SND_SOC_NOPM,
+		INTERP_LO1, 0, &rx_int3_interp_mux,
+		tasha_codec_enable_interpolator,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT4 INTERP", SND_SOC_NOPM,
+		INTERP_LO2, 0, &rx_int4_interp_mux,
+		tasha_codec_enable_interpolator,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT5 INTERP", SND_SOC_NOPM,
+		INTERP_LO3, 0, &rx_int5_interp_mux,
+		tasha_codec_enable_interpolator,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT6 INTERP", SND_SOC_NOPM,
+		INTERP_LO4, 0, &rx_int6_interp_mux,
+		tasha_codec_enable_interpolator,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT7 INTERP", SND_SOC_NOPM,
+		INTERP_SPKR1, 0, &rx_int7_interp_mux,
+		tasha_codec_enable_interpolator,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT8 INTERP", SND_SOC_NOPM,
+		INTERP_SPKR2, 0, &rx_int8_interp_mux,
+		tasha_codec_enable_interpolator,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_DAC_E("RX INT0 DAC", NULL, SND_SOC_NOPM,
+		0, 0, tasha_codec_ear_dac_event,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_DAC_E("RX INT1 DAC", NULL, SND_SOC_NOPM,
+		0, 0, tasha_codec_hphl_dac_event,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_DAC_E("RX INT2 DAC", NULL, SND_SOC_NOPM,
+		0, 0, tasha_codec_hphr_dac_event,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_DAC_E("RX INT3 DAC", NULL, SND_SOC_NOPM,
+		0, 0, tasha_codec_lineout_dac_event,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_DAC_E("RX INT4 DAC", NULL, SND_SOC_NOPM,
+		0, 0, tasha_codec_lineout_dac_event,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_DAC_E("RX INT5 DAC", NULL, SND_SOC_NOPM,
+		0, 0, tasha_codec_lineout_dac_event,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_DAC_E("RX INT6 DAC", NULL, SND_SOC_NOPM,
+		0, 0, tasha_codec_lineout_dac_event,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_PGA_E("HPHL PA", SND_SOC_NOPM, 0, 0, NULL, 0,
+			   tasha_codec_enable_hphl_pa,
+			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+			   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_PGA_E("HPHR PA", SND_SOC_NOPM, 0, 0, NULL, 0,
+			   tasha_codec_enable_hphr_pa,
+			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+			   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_PGA_E("EAR PA", WCD9335_ANA_EAR, 7, 0, NULL, 0,
+			   tasha_codec_enable_ear_pa,
+			   SND_SOC_DAPM_POST_PMU |
+			   SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_PGA_E("LINEOUT1 PA", WCD9335_ANA_LO_1_2, 7, 0, NULL, 0,
+			   tasha_codec_enable_lineout_pa,
+			   SND_SOC_DAPM_POST_PMU |
+			   SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_PGA_E("LINEOUT2 PA", WCD9335_ANA_LO_1_2, 6, 0, NULL, 0,
+			   tasha_codec_enable_lineout_pa,
+			   SND_SOC_DAPM_POST_PMU |
+			   SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_PGA_E("LINEOUT3 PA", WCD9335_ANA_LO_3_4, 7, 0, NULL, 0,
+			   tasha_codec_enable_lineout_pa,
+			   SND_SOC_DAPM_POST_PMU |
+			   SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_PGA_E("LINEOUT4 PA", WCD9335_ANA_LO_3_4, 6, 0, NULL, 0,
+			   tasha_codec_enable_lineout_pa,
+			   SND_SOC_DAPM_POST_PMU |
+			   SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_PGA_E("ANC EAR PA", WCD9335_ANA_EAR, 7, 0, NULL, 0,
+			   tasha_codec_enable_ear_pa,
+			   SND_SOC_DAPM_POST_PMU |
+			   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_PGA_E("ANC HPHL PA", SND_SOC_NOPM, 0, 0, NULL, 0,
+			   tasha_codec_enable_hphl_pa,
+			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+			   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_PGA_E("ANC HPHR PA", SND_SOC_NOPM, 0, 0, NULL, 0,
+			   tasha_codec_enable_hphr_pa,
+			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+			   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_PGA_E("ANC LINEOUT1 PA", WCD9335_ANA_LO_1_2,
+				7, 0, NULL, 0,
+				tasha_codec_enable_lineout_pa,
+				SND_SOC_DAPM_POST_PMU |
+				SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_PGA_E("ANC LINEOUT2 PA", WCD9335_ANA_LO_1_2,
+				6, 0, NULL, 0,
+				tasha_codec_enable_lineout_pa,
+				SND_SOC_DAPM_POST_PMU |
+				SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_PGA_E("ANC SPK1 PA", SND_SOC_NOPM, 0, 0, NULL, 0,
+			   tasha_codec_enable_spk_anc,
+			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_OUTPUT("HPHL"),
+	SND_SOC_DAPM_OUTPUT("HPHR"),
+	SND_SOC_DAPM_OUTPUT("ANC HPHL"),
+	SND_SOC_DAPM_OUTPUT("ANC HPHR"),
+	SND_SOC_DAPM_SUPPLY("RX_BIAS", SND_SOC_NOPM, 0, 0,
+		tasha_codec_enable_rx_bias, SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_OUTPUT("SPK1 OUT"),
+	SND_SOC_DAPM_OUTPUT("SPK2 OUT"),
+	SND_SOC_DAPM_OUTPUT("LINEOUT1"),
+	SND_SOC_DAPM_OUTPUT("LINEOUT2"),
+	SND_SOC_DAPM_OUTPUT("LINEOUT3"),
+	SND_SOC_DAPM_OUTPUT("LINEOUT4"),
+	SND_SOC_DAPM_OUTPUT("ANC LINEOUT1"),
+	SND_SOC_DAPM_OUTPUT("ANC LINEOUT2"),
+	SND_SOC_DAPM_SUPPLY("MICBIAS_REGULATOR", SND_SOC_NOPM,
+		ON_DEMAND_MICBIAS, 0,
+		tasha_codec_enable_on_demand_supply,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_SWITCH("ADC US MUX0", WCD9335_CDC_TX0_TX_PATH_192_CTL, 0,
+			    0, &adc_us_mux0_switch),
+	SND_SOC_DAPM_SWITCH("ADC US MUX1", WCD9335_CDC_TX1_TX_PATH_192_CTL, 0,
+			    0, &adc_us_mux1_switch),
+	SND_SOC_DAPM_SWITCH("ADC US MUX2", WCD9335_CDC_TX2_TX_PATH_192_CTL, 0,
+			    0, &adc_us_mux2_switch),
+	SND_SOC_DAPM_SWITCH("ADC US MUX3", WCD9335_CDC_TX3_TX_PATH_192_CTL, 0,
+			    0, &adc_us_mux3_switch),
+	SND_SOC_DAPM_SWITCH("ADC US MUX4", WCD9335_CDC_TX4_TX_PATH_192_CTL, 0,
+			    0, &adc_us_mux4_switch),
+	SND_SOC_DAPM_SWITCH("ADC US MUX5", WCD9335_CDC_TX5_TX_PATH_192_CTL, 0,
+			    0, &adc_us_mux5_switch),
+	SND_SOC_DAPM_SWITCH("ADC US MUX6", WCD9335_CDC_TX6_TX_PATH_192_CTL, 0,
+			    0, &adc_us_mux6_switch),
+	SND_SOC_DAPM_SWITCH("ADC US MUX7", WCD9335_CDC_TX7_TX_PATH_192_CTL, 0,
+			    0, &adc_us_mux7_switch),
+	SND_SOC_DAPM_SWITCH("ADC US MUX8", WCD9335_CDC_TX8_TX_PATH_192_CTL, 0,
+			    0, &adc_us_mux8_switch),
+	/* MAD related widgets */
+	SND_SOC_DAPM_AIF_OUT_E("AIF4 MAD", "AIF4 MAD TX", 0,
+			       SND_SOC_NOPM, 0, 0,
+			       tasha_codec_enable_mad,
+			       SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX("MAD_SEL MUX", SND_SOC_NOPM, 0, 0,
+			 &mad_sel_mux),
+	SND_SOC_DAPM_INPUT("MAD_CPE_INPUT"),
+	SND_SOC_DAPM_INPUT("MADINPUT"),
+	SND_SOC_DAPM_SWITCH("MADONOFF", SND_SOC_NOPM, 0, 0,
+			    &aif4_mad_switch),
+	SND_SOC_DAPM_SWITCH("MAD_BROADCAST", SND_SOC_NOPM, 0, 0,
+			    &mad_brdcst_switch),
+	SND_SOC_DAPM_SWITCH("AIF4", SND_SOC_NOPM, 0, 0,
+			    &aif4_switch_mixer_controls),
+	SND_SOC_DAPM_SWITCH("ANC HPHL Enable", SND_SOC_NOPM, 0, 0,
+			&anc_hphl_switch),
+	SND_SOC_DAPM_SWITCH("ANC HPHR Enable", SND_SOC_NOPM, 0, 0,
+			&anc_hphr_switch),
+	SND_SOC_DAPM_SWITCH("ANC EAR Enable", SND_SOC_NOPM, 0, 0,
+			&anc_ear_switch),
+	SND_SOC_DAPM_SWITCH("ANC OUT EAR SPKR Enable", SND_SOC_NOPM, 0, 0,
+			    &anc_ear_spkr_switch),
+	SND_SOC_DAPM_SWITCH("ANC LINEOUT1 Enable", SND_SOC_NOPM, 0, 0,
+			&anc_lineout1_switch),
+	SND_SOC_DAPM_SWITCH("ANC LINEOUT2 Enable", SND_SOC_NOPM, 0, 0,
+			&anc_lineout2_switch),
+	SND_SOC_DAPM_SWITCH("ANC SPKR PA Enable", SND_SOC_NOPM, 0, 0,
+			    &anc_spkr_pa_switch),
+};
+
+static int tasha_get_channel_map(struct snd_soc_dai *dai,
+				 unsigned int *tx_num, unsigned int *tx_slot,
+				 unsigned int *rx_num, unsigned int *rx_slot)
+{
+	struct tasha_priv *tasha_p = snd_soc_codec_get_drvdata(dai->codec);
+	u32 i = 0;
+	struct wcd9xxx_ch *ch;
+
+	switch (dai->id) {
+	case AIF1_PB:
+	case AIF2_PB:
+	case AIF3_PB:
+	case AIF4_PB:
+	case AIF_MIX1_PB:
+		if (!rx_slot || !rx_num) {
+			pr_err("%s: Invalid rx_slot %pK or rx_num %pK\n",
+				 __func__, rx_slot, rx_num);
+			return -EINVAL;
+		}
+		list_for_each_entry(ch, &tasha_p->dai[dai->id].wcd9xxx_ch_list,
+				    list) {
+			pr_debug("%s: slot_num %u ch->ch_num %d\n",
+				 __func__, i, ch->ch_num);
+			rx_slot[i++] = ch->ch_num;
+		}
+		pr_debug("%s: rx_num %d\n", __func__, i);
+		*rx_num = i;
+		break;
+	case AIF1_CAP:
+	case AIF2_CAP:
+	case AIF3_CAP:
+	case AIF4_MAD_TX:
+	case AIF4_VIFEED:
+		if (!tx_slot || !tx_num) {
+			pr_err("%s: Invalid tx_slot %pK or tx_num %pK\n",
+				 __func__, tx_slot, tx_num);
+			return -EINVAL;
+		}
+		list_for_each_entry(ch, &tasha_p->dai[dai->id].wcd9xxx_ch_list,
+				    list) {
+			pr_debug("%s: slot_num %u ch->ch_num %d\n",
+				 __func__, i,  ch->ch_num);
+			tx_slot[i++] = ch->ch_num;
+		}
+		pr_debug("%s: tx_num %d\n", __func__, i);
+		*tx_num = i;
+		break;
+
+	default:
+		pr_err("%s: Invalid DAI ID %x\n", __func__, dai->id);
+		break;
+	}
+
+	return 0;
+}
+
+static int tasha_set_channel_map(struct snd_soc_dai *dai,
+				 unsigned int tx_num, unsigned int *tx_slot,
+				 unsigned int rx_num, unsigned int *rx_slot)
+{
+	struct tasha_priv *tasha;
+	struct wcd9xxx *core;
+	struct wcd9xxx_codec_dai_data *dai_data = NULL;
+
+	if (!dai) {
+		pr_err("%s: dai is empty\n", __func__);
+		return -EINVAL;
+	}
+	tasha = snd_soc_codec_get_drvdata(dai->codec);
+	core = dev_get_drvdata(dai->codec->dev->parent);
+
+	if (!tx_slot || !rx_slot) {
+		pr_err("%s: Invalid tx_slot=%pK, rx_slot=%pK\n",
+			__func__, tx_slot, rx_slot);
+		return -EINVAL;
+	}
+	pr_debug("%s(): dai_name = %s DAI-ID %x tx_ch %d rx_ch %d\n"
+		 "tasha->intf_type %d\n",
+		 __func__, dai->name, dai->id, tx_num, rx_num,
+		 tasha->intf_type);
+
+	if (tasha->intf_type == WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
+		wcd9xxx_init_slimslave(core, core->slim->laddr,
+					   tx_num, tx_slot, rx_num, rx_slot);
+		/* Reserve TX12/TX13 for MAD data channel */
+		dai_data = &tasha->dai[AIF4_MAD_TX];
+		if (dai_data) {
+			if (TASHA_IS_2_0(tasha->wcd9xxx))
+				list_add_tail(&core->tx_chs[TASHA_TX13].list,
+					      &dai_data->wcd9xxx_ch_list);
+			else
+				list_add_tail(&core->tx_chs[TASHA_TX12].list,
+					      &dai_data->wcd9xxx_ch_list);
+		}
+	}
+	return 0;
+}
+
+static int tasha_startup(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	pr_debug("%s(): substream = %s  stream = %d\n" , __func__,
+		 substream->name, substream->stream);
+
+	return 0;
+}
+
+static void tasha_shutdown(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(dai->codec);
+
+	pr_debug("%s(): substream = %s  stream = %d\n" , __func__,
+		 substream->name, substream->stream);
+
+	if (tasha->intf_type == WCD9XXX_INTERFACE_TYPE_I2C)
+		return;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		tasha_codec_vote_max_bw(dai->codec, false);
+}
+
+static int tasha_set_decimator_rate(struct snd_soc_dai *dai,
+				    u8 tx_fs_rate_reg_val, u32 sample_rate)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct wcd9xxx_ch *ch;
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	u32 tx_port = 0;
+	u8 shift = 0, shift_val = 0, tx_mux_sel = 0;
+	int decimator = -1;
+	u16 tx_port_reg = 0, tx_fs_reg = 0;
+
+	list_for_each_entry(ch, &tasha->dai[dai->id].wcd9xxx_ch_list, list) {
+		tx_port = ch->port;
+		dev_dbg(codec->dev, "%s: dai->id = %d, tx_port = %d",
+			__func__, dai->id, tx_port);
+
+		if ((tx_port < 0) || (tx_port == 12) || (tx_port >= 14)) {
+			dev_err(codec->dev, "%s: Invalid SLIM TX%u port. DAI ID: %d\n",
+				__func__, tx_port, dai->id);
+			return -EINVAL;
+		}
+		/* Find the SB TX MUX input - which decimator is connected */
+		if (tx_port < 4) {
+			tx_port_reg = WCD9335_CDC_IF_ROUTER_TX_MUX_CFG0;
+			shift = (tx_port << 1);
+			shift_val = 0x03;
+		} else if ((tx_port >= 4) && (tx_port < 8)) {
+			tx_port_reg = WCD9335_CDC_IF_ROUTER_TX_MUX_CFG1;
+			shift = ((tx_port - 4) << 1);
+			shift_val = 0x03;
+		} else if ((tx_port >= 8) && (tx_port < 11)) {
+			tx_port_reg = WCD9335_CDC_IF_ROUTER_TX_MUX_CFG2;
+			shift = ((tx_port - 8) << 1);
+			shift_val = 0x03;
+		} else if (tx_port == 11) {
+			tx_port_reg = WCD9335_CDC_IF_ROUTER_TX_MUX_CFG3;
+			shift = 0;
+			shift_val = 0x0F;
+		} else if (tx_port == 13) {
+			tx_port_reg = WCD9335_CDC_IF_ROUTER_TX_MUX_CFG3;
+			shift = 4;
+			shift_val = 0x03;
+		}
+		tx_mux_sel = snd_soc_read(codec, tx_port_reg) &
+					  (shift_val << shift);
+		tx_mux_sel = tx_mux_sel >> shift;
+
+		if (tx_port <= 8) {
+			if ((tx_mux_sel == 0x2) || (tx_mux_sel == 0x3))
+				decimator = tx_port;
+		} else if (tx_port <= 10) {
+			if ((tx_mux_sel == 0x1) || (tx_mux_sel == 0x2))
+				decimator = ((tx_port == 9) ? 7 : 6);
+		} else if (tx_port == 11) {
+			if ((tx_mux_sel >= 1) && (tx_mux_sel < 7))
+				decimator = tx_mux_sel - 1;
+		} else if (tx_port == 13) {
+			if ((tx_mux_sel == 0x1) || (tx_mux_sel == 0x2))
+				decimator = 5;
+		}
+
+		if (decimator >= 0) {
+			tx_fs_reg = WCD9335_CDC_TX0_TX_PATH_CTL +
+				    16 * decimator;
+			dev_dbg(codec->dev, "%s: set DEC%u (-> SLIM_TX%u) rate to %u\n",
+				__func__, decimator, tx_port, sample_rate);
+			snd_soc_update_bits(codec, tx_fs_reg, 0x0F,
+					    tx_fs_rate_reg_val);
+		} else if ((tx_port <= 8) && (tx_mux_sel == 0x01)) {
+			/* Check if the TX Mux input is RX MIX TXn */
+			dev_dbg(codec->dev, "%s: RX_MIX_TX%u going to SLIM TX%u\n",
+					__func__, tx_port, tx_port);
+		} else {
+			dev_err(codec->dev, "%s: ERROR: Invalid decimator: %d\n",
+				__func__, decimator);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int tasha_set_mix_interpolator_rate(struct snd_soc_dai *dai,
+					   u8 int_mix_fs_rate_reg_val,
+					   u32 sample_rate)
+{
+	u8 int_2_inp;
+	u32 j;
+	u16 int_mux_cfg1, int_fs_reg;
+	u8 int_mux_cfg1_val;
+	struct snd_soc_codec *codec = dai->codec;
+	struct wcd9xxx_ch *ch;
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	list_for_each_entry(ch, &tasha->dai[dai->id].wcd9xxx_ch_list, list) {
+		int_2_inp = ch->port + INTn_2_INP_SEL_RX0 -
+				  TASHA_RX_PORT_START_NUMBER;
+		if ((int_2_inp < INTn_2_INP_SEL_RX0) ||
+		   (int_2_inp > INTn_2_INP_SEL_RX7)) {
+			pr_err("%s: Invalid RX%u port, Dai ID is %d\n",
+				__func__,
+				(ch->port - TASHA_RX_PORT_START_NUMBER),
+				dai->id);
+			return -EINVAL;
+		}
+
+		int_mux_cfg1 = WCD9335_CDC_RX_INP_MUX_RX_INT0_CFG1;
+		for (j = 0; j < TASHA_NUM_INTERPOLATORS; j++) {
+			int_mux_cfg1_val = snd_soc_read(codec, int_mux_cfg1) &
+						0x0F;
+			if (int_mux_cfg1_val == int_2_inp) {
+				int_fs_reg = WCD9335_CDC_RX0_RX_PATH_MIX_CTL +
+						20 * j;
+				pr_debug("%s: AIF_MIX_PB DAI(%d) connected to INT%u_2\n",
+					  __func__, dai->id, j);
+				pr_debug("%s: set INT%u_2 sample rate to %u\n",
+					__func__, j, sample_rate);
+				snd_soc_update_bits(codec, int_fs_reg,
+						0x0F, int_mix_fs_rate_reg_val);
+			}
+			int_mux_cfg1 += 2;
+		}
+	}
+	return 0;
+}
+
+static int tasha_set_prim_interpolator_rate(struct snd_soc_dai *dai,
+					    u8 int_prim_fs_rate_reg_val,
+					    u32 sample_rate)
+{
+	u8 int_1_mix1_inp;
+	u32 j;
+	u16 int_mux_cfg0, int_mux_cfg1;
+	u16 int_fs_reg;
+	u8 int_mux_cfg0_val, int_mux_cfg1_val;
+	u8 inp0_sel, inp1_sel, inp2_sel;
+	struct snd_soc_codec *codec = dai->codec;
+	struct wcd9xxx_ch *ch;
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	list_for_each_entry(ch, &tasha->dai[dai->id].wcd9xxx_ch_list, list) {
+		int_1_mix1_inp = ch->port + INTn_1_MIX_INP_SEL_RX0 -
+				  TASHA_RX_PORT_START_NUMBER;
+		if ((int_1_mix1_inp < INTn_1_MIX_INP_SEL_RX0) ||
+		   (int_1_mix1_inp > INTn_1_MIX_INP_SEL_RX7)) {
+			pr_err("%s: Invalid RX%u port, Dai ID is %d\n",
+				__func__,
+				(ch->port - TASHA_RX_PORT_START_NUMBER),
+				dai->id);
+			return -EINVAL;
+		}
+
+		int_mux_cfg0 = WCD9335_CDC_RX_INP_MUX_RX_INT0_CFG0;
+
+		/*
+		 * Loop through all interpolator MUX inputs and find out
+		 * to which interpolator input, the slim rx port
+		 * is connected
+		 */
+		for (j = 0; j < TASHA_NUM_INTERPOLATORS; j++) {
+			int_mux_cfg1 = int_mux_cfg0 + 1;
+
+			int_mux_cfg0_val = snd_soc_read(codec, int_mux_cfg0);
+			int_mux_cfg1_val = snd_soc_read(codec, int_mux_cfg1);
+			inp0_sel = int_mux_cfg0_val & 0x0F;
+			inp1_sel = (int_mux_cfg0_val >> 4) & 0x0F;
+			inp2_sel = (int_mux_cfg1_val >> 4) & 0x0F;
+			if ((inp0_sel == int_1_mix1_inp) ||
+			    (inp1_sel == int_1_mix1_inp) ||
+			    (inp2_sel == int_1_mix1_inp)) {
+				int_fs_reg = WCD9335_CDC_RX0_RX_PATH_CTL +
+					     20 * j;
+				pr_debug("%s: AIF_PB DAI(%d) connected to INT%u_1\n",
+					  __func__, dai->id, j);
+				pr_debug("%s: set INT%u_1 sample rate to %u\n",
+					__func__, j, sample_rate);
+				/* sample_rate is in Hz */
+				if ((j == 0) && (sample_rate == 44100)) {
+					pr_info("%s: Cannot set 44.1KHz on INT0\n",
+						__func__);
+				} else
+					snd_soc_update_bits(codec, int_fs_reg,
+						0x0F, int_prim_fs_rate_reg_val);
+			}
+			int_mux_cfg0 += 2;
+		}
+	}
+
+	return 0;
+}
+
+
+static int tasha_set_interpolator_rate(struct snd_soc_dai *dai,
+				       u32 sample_rate)
+{
+	int rate_val = 0;
+	int i, ret;
+
+	/* set mixing path rate */
+	for (i = 0; i < ARRAY_SIZE(int_mix_sample_rate_val); i++) {
+		if (sample_rate ==
+				int_mix_sample_rate_val[i].sample_rate) {
+			rate_val =
+				int_mix_sample_rate_val[i].rate_val;
+			break;
+		}
+	}
+	if ((i == ARRAY_SIZE(int_mix_sample_rate_val)) ||
+			(rate_val < 0))
+		goto prim_rate;
+	ret = tasha_set_mix_interpolator_rate(dai,
+			(u8) rate_val, sample_rate);
+prim_rate:
+	/* set primary path sample rate */
+	for (i = 0; i < ARRAY_SIZE(int_prim_sample_rate_val); i++) {
+		if (sample_rate ==
+				int_prim_sample_rate_val[i].sample_rate) {
+			rate_val =
+				int_prim_sample_rate_val[i].rate_val;
+			break;
+		}
+	}
+	if ((i == ARRAY_SIZE(int_prim_sample_rate_val)) ||
+			(rate_val < 0))
+		return -EINVAL;
+	ret = tasha_set_prim_interpolator_rate(dai,
+			(u8) rate_val, sample_rate);
+	return ret;
+}
+
+static int tasha_prepare(struct snd_pcm_substream *substream,
+			 struct snd_soc_dai *dai)
+{
+	pr_debug("%s(): substream = %s  stream = %d\n" , __func__,
+		 substream->name, substream->stream);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		tasha_codec_vote_max_bw(dai->codec, false);
+	return 0;
+}
+
+static int tasha_hw_params(struct snd_pcm_substream *substream,
+			   struct snd_pcm_hw_params *params,
+			   struct snd_soc_dai *dai)
+{
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(dai->codec);
+	int ret;
+	int tx_fs_rate = -EINVAL;
+	int rx_fs_rate = -EINVAL;
+	int i2s_bit_mode;
+	struct snd_soc_codec *codec = dai->codec;
+
+	pr_debug("%s: dai_name = %s DAI-ID %x rate %d num_ch %d\n", __func__,
+		 dai->name, dai->id, params_rate(params),
+		 params_channels(params));
+
+	switch (substream->stream) {
+	case SNDRV_PCM_STREAM_PLAYBACK:
+		ret = tasha_set_interpolator_rate(dai, params_rate(params));
+		if (ret) {
+			pr_err("%s: cannot set sample rate: %u\n",
+				__func__, params_rate(params));
+			return ret;
+		}
+		switch (params_width(params)) {
+		case 16:
+			tasha->dai[dai->id].bit_width = 16;
+			i2s_bit_mode = 0x01;
+			break;
+		case 24:
+			tasha->dai[dai->id].bit_width = 24;
+			i2s_bit_mode = 0x00;
+			break;
+		default:
+			return -EINVAL;
+		}
+		tasha->dai[dai->id].rate = params_rate(params);
+		if (tasha->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
+			switch (params_rate(params)) {
+			case 8000:
+				rx_fs_rate = 0;
+				break;
+			case 16000:
+				rx_fs_rate = 1;
+				break;
+			case 32000:
+				rx_fs_rate = 2;
+				break;
+			case 48000:
+				rx_fs_rate = 3;
+				break;
+			case 96000:
+				rx_fs_rate = 4;
+				break;
+			case 192000:
+				rx_fs_rate = 5;
+				break;
+			default:
+				dev_err(tasha->dev,
+				"%s: Invalid RX sample rate: %d\n",
+				__func__, params_rate(params));
+				return -EINVAL;
+			};
+			snd_soc_update_bits(codec,
+					WCD9335_DATA_HUB_DATA_HUB_RX_I2S_CTL,
+					0x20, i2s_bit_mode << 5);
+			snd_soc_update_bits(codec,
+					WCD9335_DATA_HUB_DATA_HUB_RX_I2S_CTL,
+					0x1c, (rx_fs_rate << 2));
+		}
+		break;
+	case SNDRV_PCM_STREAM_CAPTURE:
+		switch (params_rate(params)) {
+		case 8000:
+			tx_fs_rate = 0;
+			break;
+		case 16000:
+			tx_fs_rate = 1;
+			break;
+		case 32000:
+			tx_fs_rate = 3;
+			break;
+		case 48000:
+			tx_fs_rate = 4;
+			break;
+		case 96000:
+			tx_fs_rate = 5;
+			break;
+		case 192000:
+			tx_fs_rate = 6;
+			break;
+		case 384000:
+			tx_fs_rate = 7;
+			break;
+		default:
+			dev_err(tasha->dev, "%s: Invalid TX sample rate: %d\n",
+				__func__, params_rate(params));
+			return -EINVAL;
+
+		};
+		if (dai->id != AIF4_VIFEED &&
+		    dai->id != AIF4_MAD_TX) {
+			ret = tasha_set_decimator_rate(dai, tx_fs_rate,
+					params_rate(params));
+			if (ret < 0) {
+				dev_err(tasha->dev, "%s: cannot set TX Decimator rate: %d\n",
+					__func__, tx_fs_rate);
+				return ret;
+			}
+		}
+		tasha->dai[dai->id].rate = params_rate(params);
+		switch (params_width(params)) {
+		case 16:
+			tasha->dai[dai->id].bit_width = 16;
+			i2s_bit_mode = 0x01;
+			break;
+		case 24:
+			tasha->dai[dai->id].bit_width = 24;
+			i2s_bit_mode = 0x00;
+			break;
+		case 32:
+			tasha->dai[dai->id].bit_width = 32;
+			i2s_bit_mode = 0x00;
+			break;
+		default:
+			dev_err(tasha->dev, "%s: Invalid format 0x%x\n",
+				__func__, params_width(params));
+			return -EINVAL;
+		};
+		if (tasha->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
+			snd_soc_update_bits(codec,
+				WCD9335_DATA_HUB_DATA_HUB_TX_I2S_CTL,
+				0x20, i2s_bit_mode << 5);
+			if (tx_fs_rate > 1)
+				tx_fs_rate--;
+			snd_soc_update_bits(codec,
+				WCD9335_DATA_HUB_DATA_HUB_TX_I2S_CTL,
+				0x1c, tx_fs_rate << 2);
+			snd_soc_update_bits(codec,
+				WCD9335_DATA_HUB_DATA_HUB_TX_I2S_SD0_L_CFG,
+				0x05, 0x05);
+
+			snd_soc_update_bits(codec,
+				WCD9335_DATA_HUB_DATA_HUB_TX_I2S_SD0_R_CFG,
+				0x05, 0x05);
+
+			snd_soc_update_bits(codec,
+				WCD9335_DATA_HUB_DATA_HUB_TX_I2S_SD1_L_CFG,
+				0x05, 0x05);
+
+			snd_soc_update_bits(codec,
+				WCD9335_DATA_HUB_DATA_HUB_TX_I2S_SD1_R_CFG,
+				0x05, 0x05);
+		}
+		break;
+	default:
+		pr_err("%s: Invalid stream type %d\n", __func__,
+			substream->stream);
+		return -EINVAL;
+	};
+	if (dai->id == AIF4_VIFEED)
+		tasha->dai[dai->id].bit_width = 32;
+
+	return 0;
+}
+
+static int tasha_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(dai->codec);
+
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBS_CFS:
+		/* CPU is master */
+		if (tasha->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
+			if (dai->id == AIF1_CAP)
+				snd_soc_update_bits(dai->codec,
+					WCD9335_DATA_HUB_DATA_HUB_TX_I2S_CTL,
+					0x2, 0);
+			else if (dai->id == AIF1_PB)
+				snd_soc_update_bits(dai->codec,
+					WCD9335_DATA_HUB_DATA_HUB_RX_I2S_CTL,
+					0x2, 0);
+		}
+		break;
+	case SND_SOC_DAIFMT_CBM_CFM:
+		/* CPU is slave */
+		if (tasha->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
+			if (dai->id == AIF1_CAP)
+				snd_soc_update_bits(dai->codec,
+					WCD9335_DATA_HUB_DATA_HUB_TX_I2S_CTL,
+					0x2, 0x2);
+			else if (dai->id == AIF1_PB)
+				snd_soc_update_bits(dai->codec,
+					WCD9335_DATA_HUB_DATA_HUB_RX_I2S_CTL,
+					0x2, 0x2);
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int tasha_set_dai_sysclk(struct snd_soc_dai *dai,
+		int clk_id, unsigned int freq, int dir)
+{
+	pr_debug("%s\n", __func__);
+	return 0;
+}
+
+static struct snd_soc_dai_ops tasha_dai_ops = {
+	.startup = tasha_startup,
+	.shutdown = tasha_shutdown,
+	.hw_params = tasha_hw_params,
+	.prepare = tasha_prepare,
+	.set_sysclk = tasha_set_dai_sysclk,
+	.set_fmt = tasha_set_dai_fmt,
+	.set_channel_map = tasha_set_channel_map,
+	.get_channel_map = tasha_get_channel_map,
+};
+
+static struct snd_soc_dai_driver tasha_dai[] = {
+	{
+		.name = "tasha_rx1",
+		.id = AIF1_PB,
+		.playback = {
+			.stream_name = "AIF1 Playback",
+			.rates = WCD9335_RATES_MASK | WCD9335_FRAC_RATES_MASK,
+			.formats = TASHA_FORMATS_S16_S24_LE,
+			.rate_max = 192000,
+			.rate_min = 8000,
+			.channels_min = 1,
+			.channels_max = 2,
+		},
+		.ops = &tasha_dai_ops,
+	},
+	{
+		.name = "tasha_tx1",
+		.id = AIF1_CAP,
+		.capture = {
+			.stream_name = "AIF1 Capture",
+			.rates = WCD9335_RATES_MASK,
+			.formats = TASHA_FORMATS_S16_S24_LE,
+			.rate_max = 192000,
+			.rate_min = 8000,
+			.channels_min = 1,
+			.channels_max = 4,
+		},
+		.ops = &tasha_dai_ops,
+	},
+	{
+		.name = "tasha_rx2",
+		.id = AIF2_PB,
+		.playback = {
+			.stream_name = "AIF2 Playback",
+			.rates = WCD9335_RATES_MASK | WCD9335_FRAC_RATES_MASK,
+			.formats = TASHA_FORMATS_S16_S24_LE,
+			.rate_min = 8000,
+			.rate_max = 192000,
+			.channels_min = 1,
+			.channels_max = 2,
+		},
+		.ops = &tasha_dai_ops,
+	},
+	{
+		.name = "tasha_tx2",
+		.id = AIF2_CAP,
+		.capture = {
+			.stream_name = "AIF2 Capture",
+			.rates = WCD9335_RATES_MASK,
+			.formats = TASHA_FORMATS_S16_S24_LE,
+			.rate_max = 192000,
+			.rate_min = 8000,
+			.channels_min = 1,
+			.channels_max = 8,
+		},
+		.ops = &tasha_dai_ops,
+	},
+	{
+		.name = "tasha_rx3",
+		.id = AIF3_PB,
+		.playback = {
+			.stream_name = "AIF3 Playback",
+			.rates = WCD9335_RATES_MASK | WCD9335_FRAC_RATES_MASK,
+			.formats = TASHA_FORMATS_S16_S24_LE,
+			.rate_min = 8000,
+			.rate_max = 192000,
+			.channels_min = 1,
+			.channels_max = 2,
+		},
+		.ops = &tasha_dai_ops,
+	},
+	{
+		.name = "tasha_tx3",
+		.id = AIF3_CAP,
+		.capture = {
+			.stream_name = "AIF3 Capture",
+			.rates = WCD9335_RATES_MASK,
+			.formats = TASHA_FORMATS_S16_S24_LE,
+			.rate_max = 48000,
+			.rate_min = 8000,
+			.channels_min = 1,
+			.channels_max = 2,
+		},
+		.ops = &tasha_dai_ops,
+	},
+	{
+		.name = "tasha_rx4",
+		.id = AIF4_PB,
+		.playback = {
+			.stream_name = "AIF4 Playback",
+			.rates = WCD9335_RATES_MASK | WCD9335_FRAC_RATES_MASK,
+			.formats = TASHA_FORMATS_S16_S24_LE,
+			.rate_min = 8000,
+			.rate_max = 192000,
+			.channels_min = 1,
+			.channels_max = 2,
+		},
+		.ops = &tasha_dai_ops,
+	},
+	{
+		.name = "tasha_mix_rx1",
+		.id = AIF_MIX1_PB,
+		.playback = {
+			.stream_name = "AIF Mix Playback",
+			.rates = WCD9335_RATES_MASK | WCD9335_FRAC_RATES_MASK,
+			.formats = TASHA_FORMATS_S16_S24_LE,
+			.rate_min = 8000,
+			.rate_max = 192000,
+			.channels_min = 1,
+			.channels_max = 8,
+		},
+		.ops = &tasha_dai_ops,
+	},
+	{
+		.name = "tasha_mad1",
+		.id = AIF4_MAD_TX,
+		.capture = {
+			.stream_name = "AIF4 MAD TX",
+			.rates = SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_48000 |
+				 SNDRV_PCM_RATE_192000 | SNDRV_PCM_RATE_384000,
+			.formats = TASHA_FORMATS_S16_S24_S32_LE,
+			.rate_min = 16000,
+			.rate_max = 384000,
+			.channels_min = 1,
+			.channels_max = 1,
+		},
+		.ops = &tasha_dai_ops,
+	},
+	{
+		.name = "tasha_vifeedback",
+		.id = AIF4_VIFEED,
+		.capture = {
+			.stream_name = "VIfeed",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_48000,
+			.formats = TASHA_FORMATS_S16_S24_S32_LE,
+			.rate_max = 48000,
+			.rate_min = 8000,
+			.channels_min = 1,
+			.channels_max = 4,
+		 },
+		.ops = &tasha_dai_ops,
+	},
+	{
+		.name = "tasha_cpe",
+		.id = AIF5_CPE_TX,
+		.capture = {
+			.stream_name = "AIF5 CPE TX",
+			.rates = SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_48000,
+			.formats = TASHA_FORMATS_S16_S24_S32_LE,
+			.rate_min = 16000,
+			.rate_max = 48000,
+			.channels_min = 1,
+			.channels_max = 1,
+		},
+	},
+};
+
+static struct snd_soc_dai_driver tasha_i2s_dai[] = {
+	{
+		.name = "tasha_i2s_rx1",
+		.id = AIF1_PB,
+		.playback = {
+			.stream_name = "AIF1 Playback",
+			.rates = WCD9335_RATES_MASK,
+			.formats = TASHA_FORMATS_S16_S24_LE,
+			.rate_max = 192000,
+			.rate_min = 8000,
+			.channels_min = 1,
+			.channels_max = 2,
+		},
+		.ops = &tasha_dai_ops,
+	},
+	{
+		.name = "tasha_i2s_tx1",
+		.id = AIF1_CAP,
+		.capture = {
+			.stream_name = "AIF1 Capture",
+			.rates = WCD9335_RATES_MASK,
+			.formats = TASHA_FORMATS_S16_S24_LE,
+			.rate_max = 192000,
+			.rate_min = 8000,
+			.channels_min = 1,
+			.channels_max = 4,
+		},
+		.ops = &tasha_dai_ops,
+	},
+	{
+		.name = "tasha_i2s_rx2",
+		.id = AIF2_PB,
+		.playback = {
+			.stream_name = "AIF2 Playback",
+			.rates = WCD9335_RATES_MASK,
+			.formats = TASHA_FORMATS_S16_S24_LE,
+			.rate_max = 192000,
+			.rate_min = 8000,
+			.channels_min = 1,
+			.channels_max = 2,
+		},
+		.ops = &tasha_dai_ops,
+	},
+	{
+		.name = "tasha_i2s_tx2",
+		.id = AIF2_CAP,
+		.capture = {
+			.stream_name = "AIF2 Capture",
+			.rates = WCD9335_RATES_MASK,
+			.formats = TASHA_FORMATS_S16_S24_LE,
+			.rate_max = 192000,
+			.rate_min = 8000,
+			.channels_min = 1,
+			.channels_max = 4,
+		},
+		.ops = &tasha_dai_ops,
+	},
+};
+
+static void tasha_codec_power_gate_digital_core(struct tasha_priv *tasha)
+{
+	struct snd_soc_codec *codec = tasha->codec;
+
+	if (!codec)
+		return;
+
+	mutex_lock(&tasha->power_lock);
+	dev_dbg(codec->dev, "%s: Entering power gating function, %d\n",
+		__func__, tasha->power_active_ref);
+
+	if (tasha->power_active_ref > 0)
+		goto exit;
+
+	wcd9xxx_set_power_state(tasha->wcd9xxx,
+			WCD_REGION_POWER_COLLAPSE_BEGIN,
+			WCD9XXX_DIG_CORE_REGION_1);
+	snd_soc_update_bits(codec, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+			0x04, 0x04);
+	snd_soc_update_bits(codec, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+			0x01, 0x00);
+	snd_soc_update_bits(codec, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+			0x02, 0x00);
+	clear_bit(AUDIO_NOMINAL, &tasha->status_mask);
+	tasha_codec_update_sido_voltage(tasha, sido_buck_svs_voltage);
+	wcd9xxx_set_power_state(tasha->wcd9xxx, WCD_REGION_POWER_DOWN,
+				WCD9XXX_DIG_CORE_REGION_1);
+exit:
+	dev_dbg(codec->dev, "%s: Exiting power gating function, %d\n",
+		__func__, tasha->power_active_ref);
+	mutex_unlock(&tasha->power_lock);
+}
+
+static void tasha_codec_power_gate_work(struct work_struct *work)
+{
+	struct tasha_priv *tasha;
+	struct delayed_work *dwork;
+	struct snd_soc_codec *codec;
+
+	dwork = to_delayed_work(work);
+	tasha = container_of(dwork, struct tasha_priv, power_gate_work);
+	codec = tasha->codec;
+
+	if (!codec)
+		return;
+
+	tasha_codec_power_gate_digital_core(tasha);
+}
+
+/* called under power_lock acquisition */
+static int tasha_dig_core_remove_power_collapse(struct snd_soc_codec *codec)
+{
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	tasha_codec_vote_max_bw(codec, true);
+	snd_soc_write(codec, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x5);
+	snd_soc_write(codec, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x7);
+	snd_soc_write(codec, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x3);
+	snd_soc_update_bits(codec, WCD9335_CODEC_RPM_RST_CTL, 0x02, 0x00);
+	snd_soc_update_bits(codec, WCD9335_CODEC_RPM_RST_CTL, 0x02, 0x02);
+
+	wcd9xxx_set_power_state(tasha->wcd9xxx,
+			WCD_REGION_POWER_COLLAPSE_REMOVE,
+			WCD9XXX_DIG_CORE_REGION_1);
+	regcache_mark_dirty(codec->component.regmap);
+	regcache_sync_region(codec->component.regmap,
+			     TASHA_DIG_CORE_REG_MIN, TASHA_DIG_CORE_REG_MAX);
+	tasha_codec_vote_max_bw(codec, false);
+
+	return 0;
+}
+
+static int tasha_dig_core_power_collapse(struct tasha_priv *tasha,
+					 int req_state)
+{
+	struct snd_soc_codec *codec;
+	int cur_state;
+
+	/* Exit if feature is disabled */
+	if (!dig_core_collapse_enable)
+		return 0;
+
+	mutex_lock(&tasha->power_lock);
+	if (req_state == POWER_COLLAPSE)
+		tasha->power_active_ref--;
+	else if (req_state == POWER_RESUME)
+		tasha->power_active_ref++;
+	else
+		goto unlock_mutex;
+
+	if (tasha->power_active_ref < 0) {
+		dev_info(tasha->dev,
+			"%s: power_active_ref is negative, resetting it\n",
+			__func__);
+		tasha->power_active_ref = 0;
+		goto unlock_mutex;
+	}
+
+	codec = tasha->codec;
+	if (!codec)
+		goto unlock_mutex;
+
+	if (req_state == POWER_COLLAPSE) {
+		if (tasha->power_active_ref == 0) {
+			schedule_delayed_work(&tasha->power_gate_work,
+			msecs_to_jiffies(dig_core_collapse_timer * 1000));
+		}
+	} else if (req_state == POWER_RESUME) {
+		if (tasha->power_active_ref == 1) {
+			/*
+			 * At this point, there can be two cases:
+			 * 1. Core already in power collapse state
+			 * 2. Timer kicked in and still did not expire or
+			 * waiting for the power_lock
+			 */
+			cur_state = wcd9xxx_get_current_power_state(
+						tasha->wcd9xxx,
+						WCD9XXX_DIG_CORE_REGION_1);
+			if (cur_state == WCD_REGION_POWER_DOWN)
+				tasha_dig_core_remove_power_collapse(codec);
+			else {
+				mutex_unlock(&tasha->power_lock);
+				cancel_delayed_work_sync(
+						&tasha->power_gate_work);
+				mutex_lock(&tasha->power_lock);
+			}
+		}
+	}
+
+unlock_mutex:
+	mutex_unlock(&tasha->power_lock);
+
+	return 0;
+}
+
+static int __tasha_cdc_mclk_enable_locked(struct tasha_priv *tasha,
+					  bool enable)
+{
+	int ret = 0;
+
+	if (!tasha->wcd_ext_clk) {
+		dev_err(tasha->dev, "%s: wcd ext clock is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	dev_dbg(tasha->dev, "%s: mclk_enable = %u\n", __func__, enable);
+
+	if (enable) {
+		tasha_dig_core_power_collapse(tasha, POWER_RESUME);
+		ret = tasha_cdc_req_mclk_enable(tasha, true);
+		if (ret)
+			goto err;
+
+		set_bit(AUDIO_NOMINAL, &tasha->status_mask);
+		tasha_codec_apply_sido_voltage(tasha,
+				SIDO_VOLTAGE_NOMINAL_MV);
+	} else {
+		if (!dig_core_collapse_enable) {
+			clear_bit(AUDIO_NOMINAL, &tasha->status_mask);
+			tasha_codec_update_sido_voltage(tasha,
+						sido_buck_svs_voltage);
+		}
+		tasha_cdc_req_mclk_enable(tasha, false);
+		tasha_dig_core_power_collapse(tasha, POWER_COLLAPSE);
+	}
+
+err:
+	return ret;
+}
+
+static int __tasha_cdc_mclk_enable(struct tasha_priv *tasha,
+				   bool enable)
+{
+	int ret;
+
+	WCD9XXX_V2_BG_CLK_LOCK(tasha->resmgr);
+	ret = __tasha_cdc_mclk_enable_locked(tasha, enable);
+	WCD9XXX_V2_BG_CLK_UNLOCK(tasha->resmgr);
+
+	return ret;
+}
+
+int tasha_cdc_mclk_enable(struct snd_soc_codec *codec, int enable, bool dapm)
+{
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	return __tasha_cdc_mclk_enable(tasha, enable);
+}
+EXPORT_SYMBOL(tasha_cdc_mclk_enable);
+
+int tasha_cdc_mclk_tx_enable(struct snd_soc_codec *codec, int enable, bool dapm)
+{
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	int ret = 0;
+
+	dev_dbg(tasha->dev, "%s: clk_mode: %d, enable: %d, clk_internal: %d\n",
+		__func__, tasha->clk_mode, enable, tasha->clk_internal);
+	if (tasha->clk_mode || tasha->clk_internal) {
+		if (enable) {
+			tasha_cdc_sido_ccl_enable(tasha, true);
+			wcd_resmgr_enable_master_bias(tasha->resmgr);
+			tasha_dig_core_power_collapse(tasha, POWER_RESUME);
+			snd_soc_update_bits(codec,
+					WCD9335_CDC_CLK_RST_CTRL_FS_CNT_CONTROL,
+					0x01, 0x01);
+			snd_soc_update_bits(codec,
+					WCD9335_CDC_CLK_RST_CTRL_MCLK_CONTROL,
+					0x01, 0x01);
+			set_bit(CPE_NOMINAL, &tasha->status_mask);
+			tasha_codec_update_sido_voltage(tasha,
+						SIDO_VOLTAGE_NOMINAL_MV);
+			tasha->clk_internal = true;
+		} else {
+			tasha->clk_internal = false;
+			clear_bit(CPE_NOMINAL, &tasha->status_mask);
+			tasha_codec_update_sido_voltage(tasha,
+						sido_buck_svs_voltage);
+			tasha_dig_core_power_collapse(tasha, POWER_COLLAPSE);
+			wcd_resmgr_disable_master_bias(tasha->resmgr);
+			tasha_cdc_sido_ccl_enable(tasha, false);
+		}
+	} else {
+		ret = __tasha_cdc_mclk_enable(tasha, enable);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(tasha_cdc_mclk_tx_enable);
+
+static ssize_t tasha_codec_version_read(struct snd_info_entry *entry,
+			       void *file_private_data, struct file *file,
+			       char __user *buf, size_t count, loff_t pos)
+{
+	struct tasha_priv *tasha;
+	struct wcd9xxx *wcd9xxx;
+	char buffer[TASHA_VERSION_ENTRY_SIZE];
+	int len = 0;
+
+	tasha = (struct tasha_priv *) entry->private_data;
+	if (!tasha) {
+		pr_err("%s: tasha priv is null\n", __func__);
+		return -EINVAL;
+	}
+
+	wcd9xxx = tasha->wcd9xxx;
+
+	if (wcd9xxx->codec_type->id_major == TASHA_MAJOR) {
+		if (TASHA_IS_1_0(wcd9xxx))
+			len = snprintf(buffer, sizeof(buffer), "WCD9335_1_0\n");
+		else if (TASHA_IS_1_1(wcd9xxx))
+			len = snprintf(buffer, sizeof(buffer), "WCD9335_1_1\n");
+		else
+			snprintf(buffer, sizeof(buffer), "VER_UNDEFINED\n");
+	} else if (wcd9xxx->codec_type->id_major == TASHA2P0_MAJOR) {
+			len = snprintf(buffer, sizeof(buffer), "WCD9335_2_0\n");
+	} else
+		len = snprintf(buffer, sizeof(buffer), "VER_UNDEFINED\n");
+
+	return simple_read_from_buffer(buf, count, &pos, buffer, len);
+}
+
+static struct snd_info_entry_ops tasha_codec_info_ops = {
+	.read = tasha_codec_version_read,
+};
+
+/*
+ * tasha_codec_info_create_codec_entry - creates wcd9335 module
+ * @codec_root: The parent directory
+ * @codec: Codec instance
+ *
+ * Creates wcd9335 module and version entry under the given
+ * parent directory.
+ *
+ * Return: 0 on success or negative error code on failure.
+ */
+int tasha_codec_info_create_codec_entry(struct snd_info_entry *codec_root,
+					struct snd_soc_codec *codec)
+{
+	struct snd_info_entry *version_entry;
+	struct tasha_priv *tasha;
+	struct snd_soc_card *card;
+
+	if (!codec_root || !codec)
+		return -EINVAL;
+
+	tasha = snd_soc_codec_get_drvdata(codec);
+	card = codec->component.card;
+	tasha->entry = snd_register_module_info(codec_root->module,
+						"tasha",
+						codec_root);
+	if (!tasha->entry) {
+		dev_dbg(codec->dev, "%s: failed to create wcd9335 entry\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	version_entry = snd_info_create_card_entry(card->snd_card,
+						   "version",
+						   tasha->entry);
+	if (!version_entry) {
+		dev_dbg(codec->dev, "%s: failed to create wcd9335 version entry\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	version_entry->private_data = tasha;
+	version_entry->size = TASHA_VERSION_ENTRY_SIZE;
+	version_entry->content = SNDRV_INFO_CONTENT_DATA;
+	version_entry->c.ops = &tasha_codec_info_ops;
+
+	if (snd_info_register(version_entry) < 0) {
+		snd_info_free_entry(version_entry);
+		return -ENOMEM;
+	}
+	tasha->version_entry = version_entry;
+
+	return 0;
+}
+EXPORT_SYMBOL(tasha_codec_info_create_codec_entry);
+
+static int __tasha_codec_internal_rco_ctrl(
+	struct snd_soc_codec *codec, bool enable)
+{
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	int ret = 0;
+
+	if (enable) {
+		tasha_cdc_sido_ccl_enable(tasha, true);
+		if (wcd_resmgr_get_clk_type(tasha->resmgr) ==
+		    WCD_CLK_RCO) {
+			ret = wcd_resmgr_enable_clk_block(tasha->resmgr,
+							  WCD_CLK_RCO);
+		} else {
+			ret = tasha_cdc_req_mclk_enable(tasha, true);
+			ret |= wcd_resmgr_enable_clk_block(tasha->resmgr,
+							   WCD_CLK_RCO);
+			ret |= tasha_cdc_req_mclk_enable(tasha, false);
+		}
+
+	} else {
+		ret = wcd_resmgr_disable_clk_block(tasha->resmgr,
+						   WCD_CLK_RCO);
+		tasha_cdc_sido_ccl_enable(tasha, false);
+	}
+
+	if (ret) {
+		dev_err(codec->dev, "%s: Error in %s RCO\n",
+			__func__, (enable ? "enabling" : "disabling"));
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+/*
+ * tasha_codec_internal_rco_ctrl()
+ * Make sure that the caller does not acquire
+ * BG_CLK_LOCK.
+ */
+static int tasha_codec_internal_rco_ctrl(struct snd_soc_codec *codec,
+				  bool enable)
+{
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	int ret = 0;
+
+	WCD9XXX_V2_BG_CLK_LOCK(tasha->resmgr);
+	ret = __tasha_codec_internal_rco_ctrl(codec, enable);
+	WCD9XXX_V2_BG_CLK_UNLOCK(tasha->resmgr);
+	return ret;
+}
+
+/*
+ * tasha_mbhc_hs_detect: starts mbhc insertion/removal functionality
+ * @codec: handle to snd_soc_codec *
+ * @mbhc_cfg: handle to mbhc configuration structure
+ * return 0 if mbhc_start is success or error code in case of failure
+ */
+int tasha_mbhc_hs_detect(struct snd_soc_codec *codec,
+			 struct wcd_mbhc_config *mbhc_cfg)
+{
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	return wcd_mbhc_start(&tasha->mbhc, mbhc_cfg);
+}
+EXPORT_SYMBOL(tasha_mbhc_hs_detect);
+
+/*
+ * tasha_mbhc_hs_detect_exit: stop mbhc insertion/removal functionality
+ * @codec: handle to snd_soc_codec *
+ */
+void tasha_mbhc_hs_detect_exit(struct snd_soc_codec *codec)
+{
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	wcd_mbhc_stop(&tasha->mbhc);
+}
+EXPORT_SYMBOL(tasha_mbhc_hs_detect_exit);
+
+static int wcd9335_get_micb_vout_ctl_val(u32 micb_mv)
+{
+	/* min micbias voltage is 1V and maximum is 2.85V */
+	if (micb_mv < 1000 || micb_mv > 2850) {
+		pr_err("%s: unsupported micbias voltage\n", __func__);
+		return -EINVAL;
+	}
+
+	return (micb_mv - 1000) / 50;
+}
+
+static const struct tasha_reg_mask_val tasha_reg_update_reset_val_1_1[] = {
+	{WCD9335_RCO_CTRL_2, 0xFF, 0x47},
+	{WCD9335_FLYBACK_VNEG_DAC_CTRL_4, 0xFF, 0x60},
+};
+
+static const struct tasha_reg_mask_val tasha_codec_reg_init_val_1_1[] = {
+	{WCD9335_FLYBACK_VNEG_DAC_CTRL_1, 0xFF, 0x65},
+	{WCD9335_FLYBACK_VNEG_DAC_CTRL_2, 0xFF, 0x52},
+	{WCD9335_FLYBACK_VNEG_DAC_CTRL_3, 0xFF, 0xAF},
+	{WCD9335_FLYBACK_VNEG_DAC_CTRL_4, 0xFF, 0x60},
+	{WCD9335_FLYBACK_VNEG_CTRL_3, 0xFF, 0xF4},
+	{WCD9335_FLYBACK_VNEG_CTRL_9, 0xFF, 0x40},
+	{WCD9335_FLYBACK_VNEG_CTRL_2, 0xFF, 0x4F},
+	{WCD9335_FLYBACK_EN, 0xFF, 0x6E},
+	{WCD9335_CDC_RX2_RX_PATH_SEC0, 0xF8, 0xF8},
+	{WCD9335_CDC_RX1_RX_PATH_SEC0, 0xF8, 0xF8},
+};
+
+static const struct tasha_reg_mask_val tasha_codec_reg_init_val_1_0[] = {
+	{WCD9335_FLYBACK_VNEG_CTRL_3, 0xFF, 0x54},
+	{WCD9335_CDC_RX2_RX_PATH_SEC0, 0xFC, 0xFC},
+	{WCD9335_CDC_RX1_RX_PATH_SEC0, 0xFC, 0xFC},
+};
+
+static const struct tasha_reg_mask_val tasha_codec_reg_init_val_2_0[] = {
+	{WCD9335_RCO_CTRL_2, 0x0F, 0x08},
+	{WCD9335_RX_BIAS_FLYB_MID_RST, 0xF0, 0x10},
+	{WCD9335_FLYBACK_CTRL_1, 0x20, 0x20},
+	{WCD9335_HPH_OCP_CTL, 0xFF, 0x7A},
+	{WCD9335_HPH_L_TEST, 0x01, 0x01},
+	{WCD9335_HPH_R_TEST, 0x01, 0x01},
+	{WCD9335_CDC_BOOST0_BOOST_CFG1, 0x3F, 0x12},
+	{WCD9335_CDC_BOOST0_BOOST_CFG2, 0x1C, 0x08},
+	{WCD9335_CDC_COMPANDER7_CTL7, 0x1E, 0x18},
+	{WCD9335_CDC_BOOST1_BOOST_CFG1, 0x3F, 0x12},
+	{WCD9335_CDC_BOOST1_BOOST_CFG2, 0x1C, 0x08},
+	{WCD9335_CDC_COMPANDER8_CTL7, 0x1E, 0x18},
+	{WCD9335_CDC_TX0_TX_PATH_SEC7, 0xFF, 0x45},
+	{WCD9335_CDC_RX0_RX_PATH_SEC0, 0xFC, 0xF4},
+	{WCD9335_HPH_REFBUFF_LP_CTL, 0x08, 0x08},
+	{WCD9335_HPH_REFBUFF_LP_CTL, 0x06, 0x02},
+	{WCD9335_DIFF_LO_CORE_OUT_PROG, 0xFC, 0xA0},
+	{WCD9335_SE_LO_COM1, 0xFF, 0xC0},
+	{WCD9335_CDC_RX3_RX_PATH_SEC0, 0xFC, 0xF4},
+	{WCD9335_CDC_RX4_RX_PATH_SEC0, 0xFC, 0xF4},
+	{WCD9335_CDC_RX5_RX_PATH_SEC0, 0xFC, 0xF8},
+	{WCD9335_CDC_RX6_RX_PATH_SEC0, 0xFC, 0xF8},
+};
+
+static const struct tasha_reg_mask_val tasha_codec_reg_defaults[] = {
+	{WCD9335_CODEC_RPM_CLK_GATE, 0x03, 0x00},
+	{WCD9335_CODEC_RPM_CLK_MCLK_CFG, 0x03, 0x01},
+	{WCD9335_CODEC_RPM_CLK_MCLK_CFG, 0x04, 0x04},
+};
+
+static const struct tasha_reg_mask_val tasha_codec_reg_i2c_defaults[] = {
+	{WCD9335_ANA_CLK_TOP, 0x20, 0x20},
+	{WCD9335_CODEC_RPM_CLK_GATE, 0x03, 0x01},
+	{WCD9335_CODEC_RPM_CLK_MCLK_CFG, 0x03, 0x00},
+	{WCD9335_CODEC_RPM_CLK_MCLK_CFG, 0x05, 0x05},
+	{WCD9335_DATA_HUB_DATA_HUB_RX0_INP_CFG, 0x01, 0x01},
+	{WCD9335_DATA_HUB_DATA_HUB_RX1_INP_CFG, 0x01, 0x01},
+	{WCD9335_DATA_HUB_DATA_HUB_RX2_INP_CFG, 0x01, 0x01},
+	{WCD9335_DATA_HUB_DATA_HUB_RX3_INP_CFG, 0x01, 0x01},
+	{WCD9335_DATA_HUB_DATA_HUB_TX_I2S_SD0_L_CFG, 0x05, 0x05},
+	{WCD9335_DATA_HUB_DATA_HUB_TX_I2S_SD0_R_CFG, 0x05, 0x05},
+	{WCD9335_DATA_HUB_DATA_HUB_TX_I2S_SD1_L_CFG, 0x05, 0x05},
+	{WCD9335_DATA_HUB_DATA_HUB_TX_I2S_SD1_R_CFG, 0x05, 0x05},
+};
+
+static const struct tasha_reg_mask_val tasha_codec_reg_init_common_val[] = {
+	/* Rbuckfly/R_EAR(32) */
+	{WCD9335_CDC_CLSH_K2_MSB, 0x0F, 0x00},
+	{WCD9335_CDC_CLSH_K2_LSB, 0xFF, 0x60},
+	{WCD9335_CPE_SS_DMIC_CFG, 0x80, 0x00},
+	{WCD9335_CDC_BOOST0_BOOST_CTL, 0x70, 0x50},
+	{WCD9335_CDC_BOOST1_BOOST_CTL, 0x70, 0x50},
+	{WCD9335_CDC_RX7_RX_PATH_CFG1, 0x08, 0x08},
+	{WCD9335_CDC_RX8_RX_PATH_CFG1, 0x08, 0x08},
+	{WCD9335_ANA_LO_1_2, 0x3C, 0X3C},
+	{WCD9335_DIFF_LO_COM_SWCAP_REFBUF_FREQ, 0x70, 0x00},
+	{WCD9335_SOC_MAD_AUDIO_CTL_2, 0x03, 0x03},
+	{WCD9335_CDC_TOP_TOP_CFG1, 0x02, 0x02},
+	{WCD9335_CDC_TOP_TOP_CFG1, 0x01, 0x01},
+	{WCD9335_EAR_CMBUFF, 0x08, 0x00},
+	{WCD9335_CDC_TX9_SPKR_PROT_PATH_CFG0, 0x01, 0x01},
+	{WCD9335_CDC_TX10_SPKR_PROT_PATH_CFG0, 0x01, 0x01},
+	{WCD9335_CDC_TX11_SPKR_PROT_PATH_CFG0, 0x01, 0x01},
+	{WCD9335_CDC_TX12_SPKR_PROT_PATH_CFG0, 0x01, 0x01},
+	{WCD9335_CDC_COMPANDER7_CTL3, 0x80, 0x80},
+	{WCD9335_CDC_COMPANDER8_CTL3, 0x80, 0x80},
+	{WCD9335_CDC_COMPANDER7_CTL7, 0x01, 0x01},
+	{WCD9335_CDC_COMPANDER8_CTL7, 0x01, 0x01},
+	{WCD9335_CDC_RX0_RX_PATH_CFG0, 0x01, 0x01},
+	{WCD9335_CDC_RX1_RX_PATH_CFG0, 0x01, 0x01},
+	{WCD9335_CDC_RX2_RX_PATH_CFG0, 0x01, 0x01},
+	{WCD9335_CDC_RX3_RX_PATH_CFG0, 0x01, 0x01},
+	{WCD9335_CDC_RX4_RX_PATH_CFG0, 0x01, 0x01},
+	{WCD9335_CDC_RX5_RX_PATH_CFG0, 0x01, 0x01},
+	{WCD9335_CDC_RX6_RX_PATH_CFG0, 0x01, 0x01},
+	{WCD9335_CDC_RX7_RX_PATH_CFG0, 0x01, 0x01},
+	{WCD9335_CDC_RX8_RX_PATH_CFG0, 0x01, 0x01},
+	{WCD9335_CDC_RX0_RX_PATH_MIX_CFG, 0x01, 0x01},
+	{WCD9335_CDC_RX1_RX_PATH_MIX_CFG, 0x01, 0x01},
+	{WCD9335_CDC_RX2_RX_PATH_MIX_CFG, 0x01, 0x01},
+	{WCD9335_CDC_RX3_RX_PATH_MIX_CFG, 0x01, 0x01},
+	{WCD9335_CDC_RX4_RX_PATH_MIX_CFG, 0x01, 0x01},
+	{WCD9335_CDC_RX5_RX_PATH_MIX_CFG, 0x01, 0x01},
+	{WCD9335_CDC_RX6_RX_PATH_MIX_CFG, 0x01, 0x01},
+	{WCD9335_CDC_RX7_RX_PATH_MIX_CFG, 0x01, 0x01},
+	{WCD9335_CDC_RX8_RX_PATH_MIX_CFG, 0x01, 0x01},
+	{WCD9335_VBADC_IBIAS_FE, 0x0C, 0x08},
+};
+
+static const struct tasha_reg_mask_val tasha_codec_reg_init_1_x_val[] = {
+	/* Enable TX HPF Filter & Linear Phase */
+	{WCD9335_CDC_TX0_TX_PATH_CFG0, 0x11, 0x11},
+	{WCD9335_CDC_TX1_TX_PATH_CFG0, 0x11, 0x11},
+	{WCD9335_CDC_TX2_TX_PATH_CFG0, 0x11, 0x11},
+	{WCD9335_CDC_TX3_TX_PATH_CFG0, 0x11, 0x11},
+	{WCD9335_CDC_TX4_TX_PATH_CFG0, 0x11, 0x11},
+	{WCD9335_CDC_TX5_TX_PATH_CFG0, 0x11, 0x11},
+	{WCD9335_CDC_TX6_TX_PATH_CFG0, 0x11, 0x11},
+	{WCD9335_CDC_TX7_TX_PATH_CFG0, 0x11, 0x11},
+	{WCD9335_CDC_TX8_TX_PATH_CFG0, 0x11, 0x11},
+	{WCD9335_CDC_RX0_RX_PATH_SEC0, 0xF8, 0xF8},
+	{WCD9335_CDC_RX0_RX_PATH_SEC1, 0x08, 0x08},
+	{WCD9335_CDC_RX1_RX_PATH_SEC1, 0x08, 0x08},
+	{WCD9335_CDC_RX2_RX_PATH_SEC1, 0x08, 0x08},
+	{WCD9335_CDC_RX3_RX_PATH_SEC1, 0x08, 0x08},
+	{WCD9335_CDC_RX4_RX_PATH_SEC1, 0x08, 0x08},
+	{WCD9335_CDC_RX5_RX_PATH_SEC1, 0x08, 0x08},
+	{WCD9335_CDC_RX6_RX_PATH_SEC1, 0x08, 0x08},
+	{WCD9335_CDC_RX7_RX_PATH_SEC1, 0x08, 0x08},
+	{WCD9335_CDC_RX8_RX_PATH_SEC1, 0x08, 0x08},
+	{WCD9335_CDC_RX0_RX_PATH_MIX_SEC0, 0x08, 0x08},
+	{WCD9335_CDC_RX1_RX_PATH_MIX_SEC0, 0x08, 0x08},
+	{WCD9335_CDC_RX2_RX_PATH_MIX_SEC0, 0x08, 0x08},
+	{WCD9335_CDC_RX3_RX_PATH_MIX_SEC0, 0x08, 0x08},
+	{WCD9335_CDC_RX4_RX_PATH_MIX_SEC0, 0x08, 0x08},
+	{WCD9335_CDC_RX5_RX_PATH_MIX_SEC0, 0x08, 0x08},
+	{WCD9335_CDC_RX6_RX_PATH_MIX_SEC0, 0x08, 0x08},
+	{WCD9335_CDC_RX7_RX_PATH_MIX_SEC0, 0x08, 0x08},
+	{WCD9335_CDC_RX8_RX_PATH_MIX_SEC0, 0x08, 0x08},
+	{WCD9335_CDC_TX0_TX_PATH_SEC2, 0x01, 0x01},
+	{WCD9335_CDC_TX1_TX_PATH_SEC2, 0x01, 0x01},
+	{WCD9335_CDC_TX2_TX_PATH_SEC2, 0x01, 0x01},
+	{WCD9335_CDC_TX3_TX_PATH_SEC2, 0x01, 0x01},
+	{WCD9335_CDC_TX4_TX_PATH_SEC2, 0x01, 0x01},
+	{WCD9335_CDC_TX5_TX_PATH_SEC2, 0x01, 0x01},
+	{WCD9335_CDC_TX6_TX_PATH_SEC2, 0x01, 0x01},
+	{WCD9335_CDC_TX7_TX_PATH_SEC2, 0x01, 0x01},
+	{WCD9335_CDC_TX8_TX_PATH_SEC2, 0x01, 0x01},
+	{WCD9335_CDC_RX3_RX_PATH_SEC0, 0xF8, 0xF0},
+	{WCD9335_CDC_RX4_RX_PATH_SEC0, 0xF8, 0xF0},
+	{WCD9335_CDC_RX5_RX_PATH_SEC0, 0xF8, 0xF8},
+	{WCD9335_CDC_RX6_RX_PATH_SEC0, 0xF8, 0xF8},
+	{WCD9335_RX_OCP_COUNT, 0xFF, 0xFF},
+	{WCD9335_HPH_OCP_CTL, 0xF0, 0x70},
+	{WCD9335_CPE_SS_CPAR_CFG, 0xFF, 0x00},
+	{WCD9335_FLYBACK_VNEG_CTRL_1, 0xFF, 0x63},
+	{WCD9335_FLYBACK_VNEG_CTRL_4, 0xFF, 0x7F},
+	{WCD9335_CLASSH_CTRL_VCL_1, 0xFF, 0x60},
+	{WCD9335_CLASSH_CTRL_CCL_5, 0xFF, 0x40},
+	{WCD9335_RX_TIMER_DIV, 0xFF, 0x32},
+	{WCD9335_SE_LO_COM2, 0xFF, 0x01},
+	{WCD9335_MBHC_ZDET_ANA_CTL, 0x0F, 0x07},
+	{WCD9335_RX_BIAS_HPH_PA, 0xF0, 0x60},
+	{WCD9335_HPH_RDAC_LDO_CTL, 0x88, 0x88},
+	{WCD9335_HPH_L_EN, 0x20, 0x20},
+	{WCD9335_HPH_R_EN, 0x20, 0x20},
+	{WCD9335_DIFF_LO_CORE_OUT_PROG, 0xFC, 0xD8},
+	{WCD9335_CDC_RX5_RX_PATH_SEC3, 0xBD, 0xBD},
+	{WCD9335_CDC_RX6_RX_PATH_SEC3, 0xBD, 0xBD},
+	{WCD9335_DIFF_LO_COM_PA_FREQ, 0x70, 0x40},
+};
+
+static void tasha_update_reg_reset_values(struct snd_soc_codec *codec)
+{
+	u32 i;
+	struct wcd9xxx *tasha_core = dev_get_drvdata(codec->dev->parent);
+
+	if (TASHA_IS_1_1(tasha_core)) {
+		for (i = 0; i < ARRAY_SIZE(tasha_reg_update_reset_val_1_1);
+		     i++)
+			snd_soc_write(codec,
+				      tasha_reg_update_reset_val_1_1[i].reg,
+				      tasha_reg_update_reset_val_1_1[i].val);
+	}
+}
+
+static void tasha_codec_init_reg(struct snd_soc_codec *codec)
+{
+	u32 i;
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+
+	for (i = 0; i < ARRAY_SIZE(tasha_codec_reg_init_common_val); i++)
+		snd_soc_update_bits(codec,
+				tasha_codec_reg_init_common_val[i].reg,
+				tasha_codec_reg_init_common_val[i].mask,
+				tasha_codec_reg_init_common_val[i].val);
+
+	if (TASHA_IS_1_1(wcd9xxx) ||
+	    TASHA_IS_1_0(wcd9xxx))
+		for (i = 0; i < ARRAY_SIZE(tasha_codec_reg_init_1_x_val); i++)
+			snd_soc_update_bits(codec,
+					tasha_codec_reg_init_1_x_val[i].reg,
+					tasha_codec_reg_init_1_x_val[i].mask,
+					tasha_codec_reg_init_1_x_val[i].val);
+
+	if (TASHA_IS_1_1(wcd9xxx)) {
+		for (i = 0; i < ARRAY_SIZE(tasha_codec_reg_init_val_1_1); i++)
+			snd_soc_update_bits(codec,
+					tasha_codec_reg_init_val_1_1[i].reg,
+					tasha_codec_reg_init_val_1_1[i].mask,
+					tasha_codec_reg_init_val_1_1[i].val);
+	} else if (TASHA_IS_1_0(wcd9xxx)) {
+		for (i = 0; i < ARRAY_SIZE(tasha_codec_reg_init_val_1_0); i++)
+			snd_soc_update_bits(codec,
+					tasha_codec_reg_init_val_1_0[i].reg,
+					tasha_codec_reg_init_val_1_0[i].mask,
+					tasha_codec_reg_init_val_1_0[i].val);
+	} else if (TASHA_IS_2_0(wcd9xxx)) {
+		for (i = 0; i < ARRAY_SIZE(tasha_codec_reg_init_val_2_0); i++)
+			snd_soc_update_bits(codec,
+					tasha_codec_reg_init_val_2_0[i].reg,
+					tasha_codec_reg_init_val_2_0[i].mask,
+					tasha_codec_reg_init_val_2_0[i].val);
+	}
+}
+
+static void tasha_update_reg_defaults(struct tasha_priv *tasha)
+{
+	u32 i;
+	struct wcd9xxx *wcd9xxx;
+
+	wcd9xxx = tasha->wcd9xxx;
+	for (i = 0; i < ARRAY_SIZE(tasha_codec_reg_defaults); i++)
+		regmap_update_bits(wcd9xxx->regmap,
+				   tasha_codec_reg_defaults[i].reg,
+				   tasha_codec_reg_defaults[i].mask,
+				   tasha_codec_reg_defaults[i].val);
+
+	tasha->intf_type = wcd9xxx_get_intf_type();
+	if (tasha->intf_type == WCD9XXX_INTERFACE_TYPE_I2C)
+		for (i = 0; i < ARRAY_SIZE(tasha_codec_reg_i2c_defaults); i++)
+			regmap_update_bits(wcd9xxx->regmap,
+					   tasha_codec_reg_i2c_defaults[i].reg,
+					   tasha_codec_reg_i2c_defaults[i].mask,
+					   tasha_codec_reg_i2c_defaults[i].val);
+
+	return;
+}
+
+static void tasha_slim_interface_init_reg(struct snd_soc_codec *codec)
+{
+	int i;
+	struct tasha_priv *priv = snd_soc_codec_get_drvdata(codec);
+
+	for (i = 0; i < WCD9XXX_SLIM_NUM_PORT_REG; i++)
+		wcd9xxx_interface_reg_write(priv->wcd9xxx,
+					    TASHA_SLIM_PGD_PORT_INT_EN0 + i,
+					    0xFF);
+}
+
+static irqreturn_t tasha_slimbus_irq(int irq, void *data)
+{
+	struct tasha_priv *priv = data;
+	unsigned long status = 0;
+	int i, j, port_id, k;
+	u32 bit;
+	u8 val, int_val = 0;
+	bool tx, cleared;
+	unsigned short reg = 0;
+
+	for (i = TASHA_SLIM_PGD_PORT_INT_STATUS_RX_0, j = 0;
+	     i <= TASHA_SLIM_PGD_PORT_INT_STATUS_TX_1; i++, j++) {
+		val = wcd9xxx_interface_reg_read(priv->wcd9xxx, i);
+		status |= ((u32)val << (8 * j));
+	}
+
+	for_each_set_bit(j, &status, 32) {
+		tx = (j >= 16 ? true : false);
+		port_id = (tx ? j - 16 : j);
+		val = wcd9xxx_interface_reg_read(priv->wcd9xxx,
+				TASHA_SLIM_PGD_PORT_INT_RX_SOURCE0 + j);
+		if (val) {
+			if (!tx)
+				reg = TASHA_SLIM_PGD_PORT_INT_EN0 +
+					(port_id / 8);
+			else
+				reg = TASHA_SLIM_PGD_PORT_INT_TX_EN0 +
+					(port_id / 8);
+			int_val = wcd9xxx_interface_reg_read(
+				priv->wcd9xxx, reg);
+			/*
+			 * Ignore interrupts for ports for which the
+			 * interrupts are not specifically enabled.
+			 */
+			if (!(int_val & (1 << (port_id % 8))))
+				continue;
+		}
+		if (val & TASHA_SLIM_IRQ_OVERFLOW)
+			pr_err_ratelimited(
+			   "%s: overflow error on %s port %d, value %x\n",
+			   __func__, (tx ? "TX" : "RX"), port_id, val);
+		if (val & TASHA_SLIM_IRQ_UNDERFLOW)
+			pr_err_ratelimited(
+			   "%s: underflow error on %s port %d, value %x\n",
+			   __func__, (tx ? "TX" : "RX"), port_id, val);
+		if ((val & TASHA_SLIM_IRQ_OVERFLOW) ||
+			(val & TASHA_SLIM_IRQ_UNDERFLOW)) {
+			if (!tx)
+				reg = TASHA_SLIM_PGD_PORT_INT_EN0 +
+					(port_id / 8);
+			else
+				reg = TASHA_SLIM_PGD_PORT_INT_TX_EN0 +
+					(port_id / 8);
+			int_val = wcd9xxx_interface_reg_read(
+				priv->wcd9xxx, reg);
+			if (int_val & (1 << (port_id % 8))) {
+				int_val = int_val ^ (1 << (port_id % 8));
+				wcd9xxx_interface_reg_write(priv->wcd9xxx,
+					reg, int_val);
+			}
+		}
+		if (val & TASHA_SLIM_IRQ_PORT_CLOSED) {
+			/*
+			 * INT SOURCE register starts from RX to TX
+			 * but port number in the ch_mask is in opposite way
+			 */
+			bit = (tx ? j - 16 : j + 16);
+			pr_debug("%s: %s port %d closed value %x, bit %u\n",
+				 __func__, (tx ? "TX" : "RX"), port_id, val,
+				 bit);
+			for (k = 0, cleared = false; k < NUM_CODEC_DAIS; k++) {
+				pr_debug("%s: priv->dai[%d].ch_mask = 0x%lx\n",
+					 __func__, k, priv->dai[k].ch_mask);
+				if (test_and_clear_bit(bit,
+						       &priv->dai[k].ch_mask)) {
+					cleared = true;
+					if (!priv->dai[k].ch_mask)
+						wake_up(&priv->dai[k].dai_wait);
+					/*
+					 * There are cases when multiple DAIs
+					 * might be using the same slimbus
+					 * channel. Hence don't break here.
+					 */
+				}
+			}
+			WARN(!cleared,
+			     "Couldn't find slimbus %s port %d for closing\n",
+			     (tx ? "TX" : "RX"), port_id);
+		}
+		wcd9xxx_interface_reg_write(priv->wcd9xxx,
+					    TASHA_SLIM_PGD_PORT_INT_CLR_RX_0 +
+					    (j / 8),
+					    1 << (j % 8));
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int tasha_setup_irqs(struct tasha_priv *tasha)
+{
+	int ret = 0;
+	struct snd_soc_codec *codec = tasha->codec;
+	struct wcd9xxx *wcd9xxx = tasha->wcd9xxx;
+	struct wcd9xxx_core_resource *core_res =
+				&wcd9xxx->core_res;
+
+	ret = wcd9xxx_request_irq(core_res, WCD9XXX_IRQ_SLIMBUS,
+				  tasha_slimbus_irq, "SLIMBUS Slave", tasha);
+	if (ret)
+		pr_err("%s: Failed to request irq %d\n", __func__,
+		       WCD9XXX_IRQ_SLIMBUS);
+	else
+		tasha_slim_interface_init_reg(codec);
+
+	return ret;
+}
+
+static void tasha_init_slim_slave_cfg(struct snd_soc_codec *codec)
+{
+	struct tasha_priv *priv = snd_soc_codec_get_drvdata(codec);
+	struct afe_param_cdc_slimbus_slave_cfg *cfg;
+	struct wcd9xxx *wcd9xxx = priv->wcd9xxx;
+	uint64_t eaddr = 0;
+
+	cfg = &priv->slimbus_slave_cfg;
+	cfg->minor_version = 1;
+	cfg->tx_slave_port_offset = 0;
+	cfg->rx_slave_port_offset = 16;
+
+	memcpy(&eaddr, &wcd9xxx->slim->e_addr, sizeof(wcd9xxx->slim->e_addr));
+	WARN_ON(sizeof(wcd9xxx->slim->e_addr) != 6);
+	cfg->device_enum_addr_lsw = eaddr & 0xFFFFFFFF;
+	cfg->device_enum_addr_msw = eaddr >> 32;
+
+	dev_dbg(codec->dev, "%s: slimbus logical address 0x%llx\n",
+		__func__, eaddr);
+}
+
+static void tasha_cleanup_irqs(struct tasha_priv *tasha)
+{
+	struct wcd9xxx *wcd9xxx = tasha->wcd9xxx;
+	struct wcd9xxx_core_resource *core_res =
+				&wcd9xxx->core_res;
+
+	wcd9xxx_free_irq(core_res, WCD9XXX_IRQ_SLIMBUS, tasha);
+}
+
+static int tasha_handle_pdata(struct tasha_priv *tasha,
+			      struct wcd9xxx_pdata *pdata)
+{
+	struct snd_soc_codec *codec = tasha->codec;
+	u8 dmic_ctl_val, mad_dmic_ctl_val;
+	u8 anc_ctl_value;
+	u32 def_dmic_rate, dmic_clk_drv;
+	int vout_ctl_1, vout_ctl_2, vout_ctl_3, vout_ctl_4;
+	int rc = 0;
+
+	if (!pdata) {
+		dev_err(codec->dev, "%s: NULL pdata\n", __func__);
+		return -ENODEV;
+	}
+
+	/* set micbias voltage */
+	vout_ctl_1 = wcd9335_get_micb_vout_ctl_val(pdata->micbias.micb1_mv);
+	vout_ctl_2 = wcd9335_get_micb_vout_ctl_val(pdata->micbias.micb2_mv);
+	vout_ctl_3 = wcd9335_get_micb_vout_ctl_val(pdata->micbias.micb3_mv);
+	vout_ctl_4 = wcd9335_get_micb_vout_ctl_val(pdata->micbias.micb4_mv);
+
+	if (IS_ERR_VALUE(vout_ctl_1) || IS_ERR_VALUE(vout_ctl_2) ||
+	    IS_ERR_VALUE(vout_ctl_3) || IS_ERR_VALUE(vout_ctl_4)) {
+		rc = -EINVAL;
+		goto done;
+	}
+	snd_soc_update_bits(codec, WCD9335_ANA_MICB1, 0x3F, vout_ctl_1);
+	snd_soc_update_bits(codec, WCD9335_ANA_MICB2, 0x3F, vout_ctl_2);
+	snd_soc_update_bits(codec, WCD9335_ANA_MICB3, 0x3F, vout_ctl_3);
+	snd_soc_update_bits(codec, WCD9335_ANA_MICB4, 0x3F, vout_ctl_4);
+
+	/* Set the DMIC sample rate */
+	switch (pdata->mclk_rate) {
+	case TASHA_MCLK_CLK_9P6MHZ:
+		def_dmic_rate = WCD9XXX_DMIC_SAMPLE_RATE_4P8MHZ;
+		break;
+	case TASHA_MCLK_CLK_12P288MHZ:
+		def_dmic_rate = WCD9XXX_DMIC_SAMPLE_RATE_4P096MHZ;
+		break;
+	default:
+		/* should never happen */
+		dev_err(codec->dev, "%s: Invalid mclk_rate %d\n",
+			__func__, pdata->mclk_rate);
+		rc = -EINVAL;
+		goto done;
+	};
+
+	if (pdata->dmic_sample_rate ==
+	    WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED) {
+		dev_info(codec->dev, "%s: dmic_rate invalid default = %d\n",
+			__func__, def_dmic_rate);
+		pdata->dmic_sample_rate = def_dmic_rate;
+	}
+	if (pdata->mad_dmic_sample_rate ==
+	    WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED) {
+		dev_info(codec->dev, "%s: mad_dmic_rate invalid default = %d\n",
+			__func__, def_dmic_rate);
+		/*
+		 * use dmic_sample_rate as the default for MAD
+		 * if mad dmic sample rate is undefined
+		 */
+		pdata->mad_dmic_sample_rate = pdata->dmic_sample_rate;
+	}
+	if (pdata->ecpp_dmic_sample_rate ==
+	    WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED) {
+		dev_info(codec->dev,
+			 "%s: ecpp_dmic_rate invalid default = %d\n",
+			 __func__, def_dmic_rate);
+		/*
+		 * use dmic_sample_rate as the default for ECPP DMIC
+		 * if ecpp dmic sample rate is undefined
+		 */
+		pdata->ecpp_dmic_sample_rate = pdata->dmic_sample_rate;
+	}
+
+	if (pdata->dmic_clk_drv ==
+	    WCD9XXX_DMIC_CLK_DRIVE_UNDEFINED) {
+		pdata->dmic_clk_drv = WCD9335_DMIC_CLK_DRIVE_DEFAULT;
+		dev_info(codec->dev,
+			 "%s: dmic_clk_strength invalid, default = %d\n",
+			 __func__, pdata->dmic_clk_drv);
+	}
+
+	switch (pdata->dmic_clk_drv) {
+	case 2:
+		dmic_clk_drv = 0;
+		break;
+	case 4:
+		dmic_clk_drv = 1;
+		break;
+	case 8:
+		dmic_clk_drv = 2;
+		break;
+	case 16:
+		dmic_clk_drv = 3;
+		break;
+	default:
+		dev_err(codec->dev,
+			"%s: invalid dmic_clk_drv %d, using default\n",
+			__func__, pdata->dmic_clk_drv);
+		dmic_clk_drv = 0;
+		break;
+	}
+
+	snd_soc_update_bits(codec, WCD9335_TEST_DEBUG_PAD_DRVCTL,
+			    0x0C, dmic_clk_drv << 2);
+
+	/*
+	 * Default the DMIC clk rates to mad_dmic_sample_rate,
+	 * whereas, the anc/txfe dmic rates to dmic_sample_rate
+	 * since the anc/txfe are independent of mad block.
+	 */
+	mad_dmic_ctl_val = tasha_get_dmic_clk_val(tasha->codec,
+				pdata->mclk_rate,
+				pdata->mad_dmic_sample_rate);
+	snd_soc_update_bits(codec, WCD9335_CPE_SS_DMIC0_CTL,
+		0x0E, mad_dmic_ctl_val << 1);
+	snd_soc_update_bits(codec, WCD9335_CPE_SS_DMIC1_CTL,
+		0x0E, mad_dmic_ctl_val << 1);
+	snd_soc_update_bits(codec, WCD9335_CPE_SS_DMIC2_CTL,
+		0x0E, mad_dmic_ctl_val << 1);
+
+	dmic_ctl_val = tasha_get_dmic_clk_val(tasha->codec,
+				pdata->mclk_rate,
+				pdata->dmic_sample_rate);
+
+	if (dmic_ctl_val == WCD9335_DMIC_CLK_DIV_2)
+		anc_ctl_value = WCD9335_ANC_DMIC_X2_FULL_RATE;
+	else
+		anc_ctl_value = WCD9335_ANC_DMIC_X2_HALF_RATE;
+
+	snd_soc_update_bits(codec, WCD9335_CDC_ANC0_MODE_2_CTL,
+			    0x40, anc_ctl_value << 6);
+	snd_soc_update_bits(codec, WCD9335_CDC_ANC0_MODE_2_CTL,
+			    0x20, anc_ctl_value << 5);
+	snd_soc_update_bits(codec, WCD9335_CDC_ANC1_MODE_2_CTL,
+			    0x40, anc_ctl_value << 6);
+	snd_soc_update_bits(codec, WCD9335_CDC_ANC1_MODE_2_CTL,
+			    0x20, anc_ctl_value << 5);
+done:
+	return rc;
+}
+
+static struct wcd_cpe_core *tasha_codec_get_cpe_core(
+		struct snd_soc_codec *codec)
+{
+	struct tasha_priv *priv = snd_soc_codec_get_drvdata(codec);
+	return priv->cpe_core;
+}
+
+static int tasha_codec_cpe_fll_update_divider(
+	struct snd_soc_codec *codec, u32 cpe_fll_rate)
+{
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	u32 div_val = 0, l_val = 0;
+	u32 computed_cpe_fll;
+
+	if (cpe_fll_rate != CPE_FLL_CLK_75MHZ &&
+	    cpe_fll_rate != CPE_FLL_CLK_150MHZ) {
+		dev_err(codec->dev,
+			"%s: Invalid CPE fll rate request %u\n",
+			__func__, cpe_fll_rate);
+		return -EINVAL;
+	}
+
+	if (wcd9xxx->mclk_rate == TASHA_MCLK_CLK_12P288MHZ) {
+		/* update divider to 10 and enable 5x divider */
+		snd_soc_write(codec, WCD9335_CPE_FLL_USER_CTL_1,
+			      0x55);
+		div_val = 10;
+	} else if (wcd9xxx->mclk_rate == TASHA_MCLK_CLK_9P6MHZ) {
+		/* update divider to 8 and enable 2x divider */
+		snd_soc_update_bits(codec, WCD9335_CPE_FLL_USER_CTL_0,
+				    0x7C, 0x70);
+		snd_soc_update_bits(codec, WCD9335_CPE_FLL_USER_CTL_1,
+				    0xE0, 0x20);
+		div_val = 8;
+	} else {
+		dev_err(codec->dev,
+			"%s: Invalid MCLK rate %u\n",
+			__func__, wcd9xxx->mclk_rate);
+		return -EINVAL;
+	}
+
+	l_val = ((cpe_fll_rate / 1000) * div_val) /
+		 (wcd9xxx->mclk_rate / 1000);
+
+	/* If l_val was integer truncated, increment l_val once */
+	computed_cpe_fll = (wcd9xxx->mclk_rate / div_val) * l_val;
+	if (computed_cpe_fll < cpe_fll_rate)
+		l_val++;
+
+
+	/* update L value LSB and MSB */
+	snd_soc_write(codec, WCD9335_CPE_FLL_L_VAL_CTL_0,
+		      (l_val & 0xFF));
+	snd_soc_write(codec, WCD9335_CPE_FLL_L_VAL_CTL_1,
+		      ((l_val >> 8) & 0xFF));
+
+	tasha->current_cpe_clk_freq = cpe_fll_rate;
+	dev_dbg(codec->dev,
+		"%s: updated l_val to %u for cpe_clk %u and mclk %u\n",
+		__func__, l_val, cpe_fll_rate, wcd9xxx->mclk_rate);
+
+	return 0;
+}
+
+static int __tasha_cdc_change_cpe_clk(struct snd_soc_codec *codec,
+		u32 clk_freq)
+{
+	int ret = 0;
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	if (!tasha_cdc_is_svs_enabled(tasha)) {
+		dev_dbg(codec->dev,
+			"%s: SVS not enabled or tasha is not 2p0, return\n",
+			__func__);
+		return 0;
+	}
+	dev_dbg(codec->dev, "%s: clk_freq = %u\n", __func__, clk_freq);
+
+	if (clk_freq == CPE_FLL_CLK_75MHZ) {
+		/* Change to SVS */
+		snd_soc_update_bits(codec, WCD9335_CPE_FLL_FLL_MODE,
+				    0x08, 0x08);
+		if (tasha_codec_cpe_fll_update_divider(codec, clk_freq)) {
+			ret = -EINVAL;
+			goto done;
+		}
+
+		snd_soc_update_bits(codec, WCD9335_CPE_FLL_FLL_MODE,
+				    0x10, 0x10);
+
+		clear_bit(CPE_NOMINAL, &tasha->status_mask);
+		tasha_codec_update_sido_voltage(tasha, sido_buck_svs_voltage);
+
+	} else if (clk_freq == CPE_FLL_CLK_150MHZ) {
+		/* change to nominal */
+		snd_soc_update_bits(codec, WCD9335_CPE_FLL_FLL_MODE,
+				    0x08, 0x08);
+
+		set_bit(CPE_NOMINAL, &tasha->status_mask);
+		tasha_codec_update_sido_voltage(tasha, SIDO_VOLTAGE_NOMINAL_MV);
+
+		if (tasha_codec_cpe_fll_update_divider(codec, clk_freq)) {
+			ret = -EINVAL;
+			goto done;
+		}
+		snd_soc_update_bits(codec, WCD9335_CPE_FLL_FLL_MODE,
+				    0x10, 0x10);
+	} else {
+		dev_err(codec->dev,
+			"%s: Invalid clk_freq request %d for CPE FLL\n",
+			__func__, clk_freq);
+		ret = -EINVAL;
+	}
+
+done:
+	snd_soc_update_bits(codec, WCD9335_CPE_FLL_FLL_MODE,
+			    0x10, 0x00);
+	snd_soc_update_bits(codec, WCD9335_CPE_FLL_FLL_MODE,
+			    0x08, 0x00);
+	return ret;
+}
+
+
+static int tasha_codec_cpe_fll_enable(struct snd_soc_codec *codec,
+				   bool enable)
+{
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	u8 clk_sel_reg_val = 0x00;
+
+	dev_dbg(codec->dev, "%s: enable = %s\n",
+			__func__, enable ? "true" : "false");
+
+	if (enable) {
+		if (tasha_cdc_is_svs_enabled(tasha)) {
+			/* FLL enable is always at SVS */
+			if (__tasha_cdc_change_cpe_clk(codec,
+					CPE_FLL_CLK_75MHZ)) {
+				dev_err(codec->dev,
+					"%s: clk change to %d failed\n",
+					__func__, CPE_FLL_CLK_75MHZ);
+				return -EINVAL;
+			}
+		} else {
+			if (tasha_codec_cpe_fll_update_divider(codec,
+							CPE_FLL_CLK_75MHZ)) {
+				dev_err(codec->dev,
+					"%s: clk change to %d failed\n",
+					__func__, CPE_FLL_CLK_75MHZ);
+				return -EINVAL;
+			}
+		}
+
+		if (TASHA_IS_1_0(wcd9xxx)) {
+			tasha_cdc_mclk_enable(codec, true, false);
+			clk_sel_reg_val = 0x02;
+		}
+
+		/* Setup CPE reference clk */
+		snd_soc_update_bits(codec, WCD9335_ANA_CLK_TOP,
+				    0x02, clk_sel_reg_val);
+
+		/* enable CPE FLL reference clk */
+		snd_soc_update_bits(codec, WCD9335_ANA_CLK_TOP,
+				    0x01, 0x01);
+
+		/* program the PLL */
+		snd_soc_update_bits(codec, WCD9335_CPE_FLL_USER_CTL_0,
+				    0x01, 0x01);
+
+		/* TEST clk setting */
+		snd_soc_update_bits(codec, WCD9335_CPE_FLL_TEST_CTL_0,
+				    0x80, 0x80);
+		/* set FLL mode to HW controlled */
+		snd_soc_update_bits(codec, WCD9335_CPE_FLL_FLL_MODE,
+				    0x60, 0x00);
+		snd_soc_write(codec, WCD9335_CPE_FLL_FLL_MODE, 0x80);
+	} else {
+		/* disable CPE FLL reference clk */
+		snd_soc_update_bits(codec, WCD9335_ANA_CLK_TOP,
+				    0x01, 0x00);
+		/* undo TEST clk setting */
+		snd_soc_update_bits(codec, WCD9335_CPE_FLL_TEST_CTL_0,
+				    0x80, 0x00);
+		/* undo FLL mode to HW control */
+		snd_soc_write(codec, WCD9335_CPE_FLL_FLL_MODE, 0x00);
+		snd_soc_update_bits(codec, WCD9335_CPE_FLL_FLL_MODE,
+				    0x60, 0x20);
+		/* undo the PLL */
+		snd_soc_update_bits(codec, WCD9335_CPE_FLL_USER_CTL_0,
+				    0x01, 0x00);
+
+		if (TASHA_IS_1_0(wcd9xxx))
+			tasha_cdc_mclk_enable(codec, false, false);
+
+		/*
+		 * FLL could get disabled while at nominal,
+		 * scale it back to SVS
+		 */
+		if (tasha_cdc_is_svs_enabled(tasha))
+			__tasha_cdc_change_cpe_clk(codec,
+						CPE_FLL_CLK_75MHZ);
+	}
+
+	return 0;
+
+}
+
+static void tasha_cdc_query_cpe_clk_plan(void *data,
+		struct cpe_svc_cfg_clk_plan *clk_freq)
+{
+	struct snd_soc_codec *codec = data;
+	struct tasha_priv *tasha;
+	u32 cpe_clk_khz;
+
+	if (!codec) {
+		pr_err("%s: Invalid codec handle\n",
+			__func__);
+		return;
+	}
+
+	tasha = snd_soc_codec_get_drvdata(codec);
+	cpe_clk_khz = tasha->current_cpe_clk_freq / 1000;
+
+	dev_dbg(codec->dev,
+		"%s: current_clk_freq = %u\n",
+		__func__, tasha->current_cpe_clk_freq);
+
+	clk_freq->current_clk_feq = cpe_clk_khz;
+	clk_freq->num_clk_freqs = 2;
+
+	if (tasha_cdc_is_svs_enabled(tasha)) {
+		clk_freq->clk_freqs[0] = CPE_FLL_CLK_75MHZ / 1000;
+		clk_freq->clk_freqs[1] = CPE_FLL_CLK_150MHZ / 1000;
+	} else {
+		clk_freq->clk_freqs[0] = CPE_FLL_CLK_75MHZ;
+		clk_freq->clk_freqs[1] = CPE_FLL_CLK_150MHZ;
+	}
+}
+
+static void tasha_cdc_change_cpe_clk(void *data,
+		u32 clk_freq)
+{
+	struct snd_soc_codec *codec = data;
+	struct tasha_priv *tasha;
+	u32 cpe_clk_khz, req_freq = 0;
+
+	if (!codec) {
+		pr_err("%s: Invalid codec handle\n",
+			__func__);
+		return;
+	}
+
+	tasha = snd_soc_codec_get_drvdata(codec);
+	cpe_clk_khz = tasha->current_cpe_clk_freq / 1000;
+
+	if (tasha_cdc_is_svs_enabled(tasha)) {
+		if ((clk_freq * 1000) <= CPE_FLL_CLK_75MHZ)
+			req_freq = CPE_FLL_CLK_75MHZ;
+		else
+			req_freq = CPE_FLL_CLK_150MHZ;
+	}
+
+	dev_dbg(codec->dev,
+		"%s: requested clk_freq = %u, current clk_freq = %u\n",
+		__func__, clk_freq * 1000,
+		tasha->current_cpe_clk_freq);
+
+	if (tasha_cdc_is_svs_enabled(tasha)) {
+		if (__tasha_cdc_change_cpe_clk(codec, req_freq))
+			dev_err(codec->dev,
+				"%s: clock/voltage scaling failed\n",
+				__func__);
+	}
+}
+
+static int tasha_codec_slim_reserve_bw(struct snd_soc_codec *codec,
+		u32 bw_ops, bool commit)
+{
+	struct wcd9xxx *wcd9xxx;
+	if (!codec) {
+		pr_err("%s: Invalid handle to codec\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	wcd9xxx = dev_get_drvdata(codec->dev->parent);
+
+	if (!wcd9xxx) {
+		dev_err(codec->dev, "%s: Invalid parent drv_data\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	return wcd9xxx_slim_reserve_bw(wcd9xxx, bw_ops, commit);
+}
+
+static int tasha_codec_vote_max_bw(struct snd_soc_codec *codec,
+			bool vote)
+{
+	u32 bw_ops;
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+	if (tasha->intf_type == WCD9XXX_INTERFACE_TYPE_I2C)
+		return 0;
+
+	mutex_lock(&tasha->sb_clk_gear_lock);
+	if (vote) {
+		tasha->ref_count++;
+		if (tasha->ref_count == 1) {
+			bw_ops = SLIM_BW_CLK_GEAR_9;
+			tasha_codec_slim_reserve_bw(codec,
+				bw_ops, true);
+		}
+	} else if (!vote && tasha->ref_count > 0) {
+		tasha->ref_count--;
+		if (tasha->ref_count == 0) {
+			bw_ops = SLIM_BW_UNVOTE;
+			tasha_codec_slim_reserve_bw(codec,
+				bw_ops, true);
+		}
+	};
+
+	dev_dbg(codec->dev, "%s Value of counter after vote or un-vote is %d\n",
+		__func__, tasha->ref_count);
+
+	mutex_unlock(&tasha->sb_clk_gear_lock);
+
+	return 0;
+}
+
+static int tasha_cpe_err_irq_control(struct snd_soc_codec *codec,
+	enum cpe_err_irq_cntl_type cntl_type, u8 *status)
+{
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	u8 irq_bits;
+
+	if (TASHA_IS_2_0(tasha->wcd9xxx))
+		irq_bits = 0xFF;
+	else
+		irq_bits = 0x3F;
+
+	if (status)
+		irq_bits = (*status) & irq_bits;
+
+	switch (cntl_type) {
+	case CPE_ERR_IRQ_MASK:
+		snd_soc_update_bits(codec,
+				    WCD9335_CPE_SS_SS_ERROR_INT_MASK,
+				    irq_bits, irq_bits);
+		break;
+	case CPE_ERR_IRQ_UNMASK:
+		snd_soc_update_bits(codec,
+				    WCD9335_CPE_SS_SS_ERROR_INT_MASK,
+				    irq_bits, 0x00);
+		break;
+	case CPE_ERR_IRQ_CLEAR:
+		snd_soc_write(codec, WCD9335_CPE_SS_SS_ERROR_INT_CLEAR,
+			      irq_bits);
+		break;
+	case CPE_ERR_IRQ_STATUS:
+		if (!status)
+			return -EINVAL;
+		*status = snd_soc_read(codec,
+				       WCD9335_CPE_SS_SS_ERROR_INT_STATUS);
+		break;
+	}
+
+	return 0;
+}
+
+static const struct wcd_cpe_cdc_cb cpe_cb = {
+	.cdc_clk_en = tasha_codec_internal_rco_ctrl,
+	.cpe_clk_en = tasha_codec_cpe_fll_enable,
+	.get_afe_out_port_id = tasha_codec_get_mad_port_id,
+	.lab_cdc_ch_ctl = tasha_codec_enable_slimtx_mad,
+	.cdc_ext_clk = tasha_cdc_mclk_enable,
+	.bus_vote_bw = tasha_codec_vote_max_bw,
+	.cpe_err_irq_control = tasha_cpe_err_irq_control,
+};
+
+static struct cpe_svc_init_param cpe_svc_params = {
+	.version = CPE_SVC_INIT_PARAM_V1,
+	.query_freq_plans_cb = tasha_cdc_query_cpe_clk_plan,
+	.change_freq_plan_cb = tasha_cdc_change_cpe_clk,
+};
+
+static int tasha_cpe_initialize(struct snd_soc_codec *codec)
+{
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	struct wcd_cpe_params cpe_params;
+
+	memset(&cpe_params, 0,
+	       sizeof(struct wcd_cpe_params));
+	cpe_params.codec = codec;
+	cpe_params.get_cpe_core = tasha_codec_get_cpe_core;
+	cpe_params.cdc_cb = &cpe_cb;
+	cpe_params.dbg_mode = cpe_debug_mode;
+	cpe_params.cdc_major_ver = CPE_SVC_CODEC_WCD9335;
+	cpe_params.cdc_minor_ver = CPE_SVC_CODEC_V1P0;
+	cpe_params.cdc_id = CPE_SVC_CODEC_WCD9335;
+
+	cpe_params.cdc_irq_info.cpe_engine_irq =
+			WCD9335_IRQ_SVA_OUTBOX1;
+	cpe_params.cdc_irq_info.cpe_err_irq =
+			WCD9335_IRQ_SVA_ERROR;
+	cpe_params.cdc_irq_info.cpe_fatal_irqs =
+			TASHA_CPE_FATAL_IRQS;
+
+	cpe_svc_params.context = codec;
+	cpe_params.cpe_svc_params = &cpe_svc_params;
+
+	tasha->cpe_core = wcd_cpe_init("cpe_9335", codec,
+					&cpe_params);
+	if (IS_ERR_OR_NULL(tasha->cpe_core)) {
+		dev_err(codec->dev,
+			"%s: Failed to enable CPE\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static const struct wcd_resmgr_cb tasha_resmgr_cb = {
+	.cdc_rco_ctrl = __tasha_codec_internal_rco_ctrl,
+};
+
+static int tasha_device_down(struct wcd9xxx *wcd9xxx)
+{
+	struct snd_soc_codec *codec;
+	struct tasha_priv *priv;
+	int count;
+	int i = 0;
+
+	codec = (struct snd_soc_codec *)(wcd9xxx->ssr_priv);
+	priv = snd_soc_codec_get_drvdata(codec);
+	wcd_cpe_ssr_event(priv->cpe_core, WCD_CPE_BUS_DOWN_EVENT);
+	for (i = 0; i < priv->nr; i++)
+		swrm_wcd_notify(priv->swr_ctrl_data[i].swr_pdev,
+				SWR_DEVICE_DOWN, NULL);
+	snd_soc_card_change_online_state(codec->component.card, 0);
+	for (count = 0; count < NUM_CODEC_DAIS; count++)
+		priv->dai[count].bus_down_in_recovery = true;
+
+	priv->resmgr->sido_input_src = SIDO_SOURCE_INTERNAL;
+
+	return 0;
+}
+
+static int tasha_post_reset_cb(struct wcd9xxx *wcd9xxx)
+{
+	int i, ret = 0;
+	struct wcd9xxx *control;
+	struct snd_soc_codec *codec;
+	struct tasha_priv *tasha;
+	struct wcd9xxx_pdata *pdata;
+
+	codec = (struct snd_soc_codec *)(wcd9xxx->ssr_priv);
+	tasha = snd_soc_codec_get_drvdata(codec);
+	control = dev_get_drvdata(codec->dev->parent);
+
+	wcd9xxx_set_power_state(tasha->wcd9xxx,
+				WCD_REGION_POWER_COLLAPSE_REMOVE,
+				WCD9XXX_DIG_CORE_REGION_1);
+
+	mutex_lock(&tasha->codec_mutex);
+
+	tasha_slimbus_slave_port_cfg.slave_dev_intfdev_la =
+		control->slim_slave->laddr;
+	tasha_slimbus_slave_port_cfg.slave_dev_pgd_la =
+		control->slim->laddr;
+	tasha_init_slim_slave_cfg(codec);
+	if (tasha->machine_codec_event_cb)
+		tasha->machine_codec_event_cb(codec,
+				WCD9335_CODEC_EVENT_CODEC_UP);
+	snd_soc_card_change_online_state(codec->component.card, 1);
+
+	/* Class-H Init*/
+	wcd_clsh_init(&tasha->clsh_d);
+
+	for (i = 0; i < TASHA_MAX_MICBIAS; i++)
+		tasha->micb_ref[i] = 0;
+
+	tasha_update_reg_defaults(tasha);
+
+	tasha->codec = codec;
+
+	dev_dbg(codec->dev, "%s: MCLK Rate = %x\n",
+		__func__, control->mclk_rate);
+
+	if (control->mclk_rate == TASHA_MCLK_CLK_12P288MHZ)
+		snd_soc_update_bits(codec, WCD9335_CODEC_RPM_CLK_MCLK_CFG,
+				    0x03, 0x00);
+	else if (control->mclk_rate == TASHA_MCLK_CLK_9P6MHZ)
+		snd_soc_update_bits(codec, WCD9335_CODEC_RPM_CLK_MCLK_CFG,
+				    0x03, 0x01);
+	tasha_codec_init_reg(codec);
+
+	wcd_resmgr_post_ssr_v2(tasha->resmgr);
+
+	tasha_enable_efuse_sensing(codec);
+
+	regcache_mark_dirty(codec->component.regmap);
+	regcache_sync(codec->component.regmap);
+
+	pdata = dev_get_platdata(codec->dev->parent);
+	ret = tasha_handle_pdata(tasha, pdata);
+	if (IS_ERR_VALUE(ret))
+		dev_err(codec->dev, "%s: invalid pdata\n", __func__);
+
+	/* Reset reference counter for voting for max bw */
+	tasha->ref_count = 0;
+	/* MBHC Init */
+	wcd_mbhc_deinit(&tasha->mbhc);
+	tasha->mbhc_started = false;
+
+	/* Initialize MBHC module */
+	ret = wcd_mbhc_init(&tasha->mbhc, codec, &mbhc_cb, &intr_ids,
+		      wcd_mbhc_registers, TASHA_ZDET_SUPPORTED);
+	if (ret)
+		dev_err(codec->dev, "%s: mbhc initialization failed\n",
+			__func__);
+	else
+		tasha_mbhc_hs_detect(codec, tasha->mbhc.mbhc_cfg);
+
+	tasha_cleanup_irqs(tasha);
+	ret = tasha_setup_irqs(tasha);
+	if (ret) {
+		dev_err(codec->dev, "%s: tasha irq setup failed %d\n",
+			__func__, ret);
+		goto err;
+	}
+
+	tasha_set_spkr_mode(codec, tasha->spkr_mode);
+	wcd_cpe_ssr_event(tasha->cpe_core, WCD_CPE_BUS_UP_EVENT);
+
+err:
+	mutex_unlock(&tasha->codec_mutex);
+	return ret;
+}
+
+static struct regulator *tasha_codec_find_ondemand_regulator(
+		struct snd_soc_codec *codec, const char *name)
+{
+	int i;
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	struct wcd9xxx *wcd9xxx = tasha->wcd9xxx;
+	struct wcd9xxx_pdata *pdata = dev_get_platdata(codec->dev->parent);
+
+	for (i = 0; i < wcd9xxx->num_of_supplies; ++i) {
+		if (pdata->regulator[i].ondemand &&
+			wcd9xxx->supplies[i].supply &&
+			!strcmp(wcd9xxx->supplies[i].supply, name))
+			return wcd9xxx->supplies[i].consumer;
+	}
+
+	dev_dbg(tasha->dev, "Warning: regulator not found:%s\n",
+		name);
+	return NULL;
+}
+
+static int tasha_codec_probe(struct snd_soc_codec *codec)
+{
+	struct wcd9xxx *control;
+	struct tasha_priv *tasha;
+	struct wcd9xxx_pdata *pdata;
+	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+	int i, ret;
+	void *ptr = NULL;
+	struct regulator *supply;
+
+	control = dev_get_drvdata(codec->dev->parent);
+
+	dev_info(codec->dev, "%s()\n", __func__);
+	tasha = snd_soc_codec_get_drvdata(codec);
+	tasha->intf_type = wcd9xxx_get_intf_type();
+
+	if (tasha->intf_type == WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
+		control->dev_down = tasha_device_down;
+		control->post_reset = tasha_post_reset_cb;
+		control->ssr_priv = (void *)codec;
+	}
+
+	/* Resource Manager post Init */
+	ret = wcd_resmgr_post_init(tasha->resmgr, &tasha_resmgr_cb, codec);
+	if (ret) {
+		dev_err(codec->dev, "%s: wcd resmgr post init failed\n",
+			__func__);
+		goto err;
+	}
+	/* Class-H Init*/
+	wcd_clsh_init(&tasha->clsh_d);
+	/* Default HPH Mode to Class-H HiFi */
+	tasha->hph_mode = CLS_H_HIFI;
+
+	tasha->codec = codec;
+	for (i = 0; i < COMPANDER_MAX; i++)
+		tasha->comp_enabled[i] = 0;
+
+	tasha->spkr_gain_offset = RX_GAIN_OFFSET_0_DB;
+	tasha->intf_type = wcd9xxx_get_intf_type();
+	tasha_update_reg_reset_values(codec);
+	pr_debug("%s: MCLK Rate = %x\n", __func__, control->mclk_rate);
+	if (control->mclk_rate == TASHA_MCLK_CLK_12P288MHZ)
+		snd_soc_update_bits(codec, WCD9335_CODEC_RPM_CLK_MCLK_CFG,
+				    0x03, 0x00);
+	else if (control->mclk_rate == TASHA_MCLK_CLK_9P6MHZ)
+		snd_soc_update_bits(codec, WCD9335_CODEC_RPM_CLK_MCLK_CFG,
+				    0x03, 0x01);
+	tasha_codec_init_reg(codec);
+
+	tasha_enable_efuse_sensing(codec);
+
+	pdata = dev_get_platdata(codec->dev->parent);
+	ret = tasha_handle_pdata(tasha, pdata);
+	if (IS_ERR_VALUE(ret)) {
+		pr_err("%s: bad pdata\n", __func__);
+		goto err;
+	}
+
+	supply = tasha_codec_find_ondemand_regulator(codec,
+		on_demand_supply_name[ON_DEMAND_MICBIAS]);
+	if (supply) {
+		tasha->on_demand_list[ON_DEMAND_MICBIAS].supply = supply;
+		tasha->on_demand_list[ON_DEMAND_MICBIAS].ondemand_supply_count =
+				0;
+	}
+
+	tasha->fw_data = devm_kzalloc(codec->dev,
+				      sizeof(*(tasha->fw_data)), GFP_KERNEL);
+	if (!tasha->fw_data) {
+		dev_err(codec->dev, "Failed to allocate fw_data\n");
+		goto err;
+	}
+	set_bit(WCD9XXX_ANC_CAL, tasha->fw_data->cal_bit);
+	set_bit(WCD9XXX_MBHC_CAL, tasha->fw_data->cal_bit);
+	set_bit(WCD9XXX_MAD_CAL, tasha->fw_data->cal_bit);
+	set_bit(WCD9XXX_VBAT_CAL, tasha->fw_data->cal_bit);
+
+	ret = wcd_cal_create_hwdep(tasha->fw_data,
+				   WCD9XXX_CODEC_HWDEP_NODE, codec);
+	if (ret < 0) {
+		dev_err(codec->dev, "%s hwdep failed %d\n", __func__, ret);
+		goto err_hwdep;
+	}
+
+	/* Initialize MBHC module */
+	if (TASHA_IS_2_0(tasha->wcd9xxx)) {
+		wcd_mbhc_registers[WCD_MBHC_FSM_STATUS].reg =
+			WCD9335_MBHC_FSM_STATUS;
+		wcd_mbhc_registers[WCD_MBHC_FSM_STATUS].mask = 0x01;
+	}
+	ret = wcd_mbhc_init(&tasha->mbhc, codec, &mbhc_cb, &intr_ids,
+		      wcd_mbhc_registers, TASHA_ZDET_SUPPORTED);
+	if (ret) {
+		pr_err("%s: mbhc initialization failed\n", __func__);
+		goto err_hwdep;
+	}
+
+	ptr = devm_kzalloc(codec->dev, (sizeof(tasha_rx_chs) +
+			   sizeof(tasha_tx_chs)), GFP_KERNEL);
+	if (!ptr) {
+		pr_err("%s: no mem for slim chan ctl data\n", __func__);
+		ret = -ENOMEM;
+		goto err_hwdep;
+	}
+
+	if (tasha->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
+		snd_soc_dapm_new_controls(dapm, tasha_dapm_i2s_widgets,
+			ARRAY_SIZE(tasha_dapm_i2s_widgets));
+		snd_soc_dapm_add_routes(dapm, audio_i2s_map,
+			ARRAY_SIZE(audio_i2s_map));
+		for (i = 0; i < ARRAY_SIZE(tasha_i2s_dai); i++) {
+			INIT_LIST_HEAD(&tasha->dai[i].wcd9xxx_ch_list);
+			init_waitqueue_head(&tasha->dai[i].dai_wait);
+		}
+	} else if (tasha->intf_type == WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
+		for (i = 0; i < NUM_CODEC_DAIS; i++) {
+			INIT_LIST_HEAD(&tasha->dai[i].wcd9xxx_ch_list);
+			init_waitqueue_head(&tasha->dai[i].dai_wait);
+		}
+		tasha_slimbus_slave_port_cfg.slave_dev_intfdev_la =
+					control->slim_slave->laddr;
+		tasha_slimbus_slave_port_cfg.slave_dev_pgd_la =
+					control->slim->laddr;
+		tasha_slimbus_slave_port_cfg.slave_port_mapping[0] =
+					TASHA_TX13;
+		tasha_init_slim_slave_cfg(codec);
+	}
+
+	snd_soc_add_codec_controls(codec, impedance_detect_controls,
+				   ARRAY_SIZE(impedance_detect_controls));
+	snd_soc_add_codec_controls(codec, hph_type_detect_controls,
+				   ARRAY_SIZE(hph_type_detect_controls));
+
+	snd_soc_add_codec_controls(codec,
+			tasha_analog_gain_controls,
+			ARRAY_SIZE(tasha_analog_gain_controls));
+	control->num_rx_port = TASHA_RX_MAX;
+	control->rx_chs = ptr;
+	memcpy(control->rx_chs, tasha_rx_chs, sizeof(tasha_rx_chs));
+	control->num_tx_port = TASHA_TX_MAX;
+	control->tx_chs = ptr + sizeof(tasha_rx_chs);
+	memcpy(control->tx_chs, tasha_tx_chs, sizeof(tasha_tx_chs));
+
+	snd_soc_dapm_ignore_suspend(dapm, "AIF1 Playback");
+	snd_soc_dapm_ignore_suspend(dapm, "AIF1 Capture");
+	snd_soc_dapm_ignore_suspend(dapm, "AIF2 Playback");
+	snd_soc_dapm_ignore_suspend(dapm, "AIF2 Capture");
+
+	if (tasha->intf_type == WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
+		snd_soc_dapm_ignore_suspend(dapm, "AIF3 Playback");
+		snd_soc_dapm_ignore_suspend(dapm, "AIF3 Capture");
+		snd_soc_dapm_ignore_suspend(dapm, "AIF4 Playback");
+		snd_soc_dapm_ignore_suspend(dapm, "AIF Mix Playback");
+		snd_soc_dapm_ignore_suspend(dapm, "AIF4 MAD TX");
+		snd_soc_dapm_ignore_suspend(dapm, "VIfeed");
+		snd_soc_dapm_ignore_suspend(dapm, "AIF5 CPE TX");
+	}
+
+	snd_soc_dapm_sync(dapm);
+
+	ret = tasha_setup_irqs(tasha);
+	if (ret) {
+		pr_err("%s: tasha irq setup failed %d\n", __func__, ret);
+		goto err_pdata;
+	}
+
+	ret = tasha_cpe_initialize(codec);
+	if (ret) {
+		dev_err(codec->dev,
+			"%s: cpe initialization failed, err = %d\n",
+			__func__, ret);
+		/* Do not fail probe if CPE failed */
+		ret = 0;
+	}
+
+	for (i = 0; i < TASHA_NUM_DECIMATORS; i++) {
+		tasha->tx_hpf_work[i].tasha = tasha;
+		tasha->tx_hpf_work[i].decimator = i;
+		INIT_DELAYED_WORK(&tasha->tx_hpf_work[i].dwork,
+			tasha_tx_hpf_corner_freq_callback);
+	}
+
+	for (i = 0; i < TASHA_NUM_DECIMATORS; i++) {
+		tasha->tx_mute_dwork[i].tasha = tasha;
+		tasha->tx_mute_dwork[i].decimator = i;
+		INIT_DELAYED_WORK(&tasha->tx_mute_dwork[i].dwork,
+			  tasha_tx_mute_update_callback);
+	}
+
+	tasha->spk_anc_dwork.tasha = tasha;
+	INIT_DELAYED_WORK(&tasha->spk_anc_dwork.dwork,
+			  tasha_spk_anc_update_callback);
+
+	mutex_lock(&tasha->codec_mutex);
+	snd_soc_dapm_disable_pin(dapm, "ANC LINEOUT1");
+	snd_soc_dapm_disable_pin(dapm, "ANC LINEOUT2");
+	snd_soc_dapm_disable_pin(dapm, "ANC LINEOUT1 PA");
+	snd_soc_dapm_disable_pin(dapm, "ANC LINEOUT2 PA");
+	snd_soc_dapm_disable_pin(dapm, "ANC HPHL");
+	snd_soc_dapm_disable_pin(dapm, "ANC HPHR");
+	snd_soc_dapm_disable_pin(dapm, "ANC HPHL PA");
+	snd_soc_dapm_disable_pin(dapm, "ANC HPHR PA");
+	snd_soc_dapm_disable_pin(dapm, "ANC EAR PA");
+	snd_soc_dapm_disable_pin(dapm, "ANC EAR");
+	snd_soc_dapm_disable_pin(dapm, "ANC SPK1 PA");
+	mutex_unlock(&tasha->codec_mutex);
+	snd_soc_dapm_sync(dapm);
+
+	return ret;
+
+err_pdata:
+	devm_kfree(codec->dev, ptr);
+	control->rx_chs = NULL;
+	control->tx_chs = NULL;
+err_hwdep:
+	devm_kfree(codec->dev, tasha->fw_data);
+	tasha->fw_data = NULL;
+err:
+	return ret;
+}
+
+static int tasha_codec_remove(struct snd_soc_codec *codec)
+{
+	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+	struct wcd9xxx *control;
+
+	control = dev_get_drvdata(codec->dev->parent);
+	control->rx_chs = NULL;
+	control->tx_chs = NULL;
+
+	tasha_cleanup_irqs(tasha);
+	/* Cleanup MBHC */
+	/* Cleanup resmgr */
+
+	return 0;
+}
+
+static struct regmap *tasha_get_regmap(struct device *dev)
+{
+	struct wcd9xxx *control = dev_get_drvdata(dev->parent);
+
+	return control->regmap;
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_tasha = {
+	.probe = tasha_codec_probe,
+	.remove = tasha_codec_remove,
+	.controls = tasha_snd_controls,
+	.num_controls = ARRAY_SIZE(tasha_snd_controls),
+	.dapm_widgets = tasha_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(tasha_dapm_widgets),
+	.dapm_routes = audio_map,
+	.num_dapm_routes = ARRAY_SIZE(audio_map),
+	.get_regmap = tasha_get_regmap,
+};
+
+#ifdef CONFIG_PM
+static int tasha_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct tasha_priv *tasha = platform_get_drvdata(pdev);
+
+	dev_dbg(dev, "%s: system suspend\n", __func__);
+	if (cancel_delayed_work_sync(&tasha->power_gate_work))
+		tasha_codec_power_gate_digital_core(tasha);
+
+	return 0;
+}
+
+static int tasha_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct tasha_priv *tasha = platform_get_drvdata(pdev);
+
+	if (!tasha) {
+		dev_err(dev, "%s: tasha private data is NULL\n", __func__);
+		return -EINVAL;
+	}
+	dev_dbg(dev, "%s: system resume\n", __func__);
+	return 0;
+}
+
+static const struct dev_pm_ops tasha_pm_ops = {
+	.suspend = tasha_suspend,
+	.resume = tasha_resume,
+};
+#endif
+
+static int tasha_swrm_read(void *handle, int reg)
+{
+	struct tasha_priv *tasha;
+	struct wcd9xxx *wcd9xxx;
+	unsigned short swr_rd_addr_base;
+	unsigned short swr_rd_data_base;
+	int val, ret;
+
+	if (!handle) {
+		pr_err("%s: NULL handle\n", __func__);
+		return -EINVAL;
+	}
+	tasha = (struct tasha_priv *)handle;
+	wcd9xxx = tasha->wcd9xxx;
+
+	dev_dbg(tasha->dev, "%s: Reading soundwire register, 0x%x\n",
+		__func__, reg);
+	swr_rd_addr_base = WCD9335_SWR_AHB_BRIDGE_RD_ADDR_0;
+	swr_rd_data_base = WCD9335_SWR_AHB_BRIDGE_RD_DATA_0;
+	/* read_lock */
+	mutex_lock(&tasha->swr_read_lock);
+	ret = regmap_bulk_write(wcd9xxx->regmap, swr_rd_addr_base,
+				(u8 *)&reg, 4);
+	if (ret < 0) {
+		pr_err("%s: RD Addr Failure\n", __func__);
+		goto err;
+	}
+	/* Check for RD status */
+	ret = regmap_bulk_read(wcd9xxx->regmap, swr_rd_data_base,
+			       (u8 *)&val, 4);
+	if (ret < 0) {
+		pr_err("%s: RD Data Failure\n", __func__);
+		goto err;
+	}
+	ret = val;
+err:
+	/* read_unlock */
+	mutex_unlock(&tasha->swr_read_lock);
+	return ret;
+}
+
+static int tasha_swrm_i2s_bulk_write(struct wcd9xxx *wcd9xxx,
+				struct wcd9xxx_reg_val *bulk_reg,
+				size_t len)
+{
+	int i, ret = 0;
+	unsigned short swr_wr_addr_base;
+	unsigned short swr_wr_data_base;
+
+	swr_wr_addr_base = WCD9335_SWR_AHB_BRIDGE_WR_ADDR_0;
+	swr_wr_data_base = WCD9335_SWR_AHB_BRIDGE_WR_DATA_0;
+
+	for (i = 0; i < (len * 2); i += 2) {
+		/* First Write the Data to register */
+		ret = regmap_bulk_write(wcd9xxx->regmap,
+			swr_wr_data_base, bulk_reg[i].buf, 4);
+		if (ret < 0) {
+			dev_err(wcd9xxx->dev, "%s: WR Data Failure\n",
+				__func__);
+			break;
+		}
+		/* Next Write Address */
+		ret = regmap_bulk_write(wcd9xxx->regmap,
+			swr_wr_addr_base, bulk_reg[i+1].buf, 4);
+		if (ret < 0) {
+			dev_err(wcd9xxx->dev, "%s: WR Addr Failure\n",
+				__func__);
+			break;
+		}
+	}
+	return ret;
+}
+
+static int tasha_swrm_bulk_write(void *handle, u32 *reg, u32 *val, size_t len)
+{
+	struct tasha_priv *tasha;
+	struct wcd9xxx *wcd9xxx;
+	struct wcd9xxx_reg_val *bulk_reg;
+	unsigned short swr_wr_addr_base;
+	unsigned short swr_wr_data_base;
+	int i, j, ret;
+
+	if (!handle) {
+		pr_err("%s: NULL handle\n", __func__);
+		return -EINVAL;
+	}
+	if (len <= 0) {
+		pr_err("%s: Invalid size: %zu\n", __func__, len);
+		return -EINVAL;
+	}
+	tasha = (struct tasha_priv *)handle;
+	wcd9xxx = tasha->wcd9xxx;
+
+	swr_wr_addr_base = WCD9335_SWR_AHB_BRIDGE_WR_ADDR_0;
+	swr_wr_data_base = WCD9335_SWR_AHB_BRIDGE_WR_DATA_0;
+
+	bulk_reg = kzalloc((2 * len * sizeof(struct wcd9xxx_reg_val)),
+			   GFP_KERNEL);
+	if (!bulk_reg)
+		return -ENOMEM;
+
+	for (i = 0, j = 0; i < (len * 2); i += 2, j++) {
+		bulk_reg[i].reg = swr_wr_data_base;
+		bulk_reg[i].buf = (u8 *)(&val[j]);
+		bulk_reg[i].bytes = 4;
+		bulk_reg[i+1].reg = swr_wr_addr_base;
+		bulk_reg[i+1].buf = (u8 *)(&reg[j]);
+		bulk_reg[i+1].bytes = 4;
+	}
+	mutex_lock(&tasha->swr_write_lock);
+
+	if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C) {
+		ret = tasha_swrm_i2s_bulk_write(wcd9xxx, bulk_reg, len);
+		if (ret) {
+			dev_err(tasha->dev, "%s: i2s bulk write failed, ret: %d\n",
+				__func__, ret);
+		}
+	} else {
+		ret = wcd9xxx_slim_bulk_write(wcd9xxx, bulk_reg,
+				 (len * 2), false);
+		if (ret) {
+			dev_err(tasha->dev, "%s: swrm bulk write failed, ret: %d\n",
+				__func__, ret);
+		}
+	}
+
+	mutex_unlock(&tasha->swr_write_lock);
+	kfree(bulk_reg);
+
+	return ret;
+}
+
+static int tasha_swrm_write(void *handle, int reg, int val)
+{
+	struct tasha_priv *tasha;
+	struct wcd9xxx *wcd9xxx;
+	unsigned short swr_wr_addr_base;
+	unsigned short swr_wr_data_base;
+	struct wcd9xxx_reg_val bulk_reg[2];
+	int ret;
+
+	if (!handle) {
+		pr_err("%s: NULL handle\n", __func__);
+		return -EINVAL;
+	}
+	tasha = (struct tasha_priv *)handle;
+	wcd9xxx = tasha->wcd9xxx;
+
+	swr_wr_addr_base = WCD9335_SWR_AHB_BRIDGE_WR_ADDR_0;
+	swr_wr_data_base = WCD9335_SWR_AHB_BRIDGE_WR_DATA_0;
+
+	/* First Write the Data to register */
+	bulk_reg[0].reg = swr_wr_data_base;
+	bulk_reg[0].buf = (u8 *)(&val);
+	bulk_reg[0].bytes = 4;
+	bulk_reg[1].reg = swr_wr_addr_base;
+	bulk_reg[1].buf = (u8 *)(&reg);
+	bulk_reg[1].bytes = 4;
+
+	mutex_lock(&tasha->swr_write_lock);
+
+	if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C) {
+		ret = tasha_swrm_i2s_bulk_write(wcd9xxx, bulk_reg, 1);
+		if (ret) {
+			dev_err(tasha->dev, "%s: i2s swrm write failed, ret: %d\n",
+				__func__, ret);
+		}
+	} else {
+		ret = wcd9xxx_slim_bulk_write(wcd9xxx, bulk_reg, 2, false);
+		if (ret < 0)
+			pr_err("%s: WR Data Failure\n", __func__);
+	}
+
+	mutex_unlock(&tasha->swr_write_lock);
+	return ret;
+}
+
+static int tasha_swrm_clock(void *handle, bool enable)
+{
+	struct tasha_priv *tasha = (struct tasha_priv *) handle;
+
+	mutex_lock(&tasha->swr_clk_lock);
+
+	dev_dbg(tasha->dev, "%s: swrm clock %s\n",
+		__func__, (enable?"enable" : "disable"));
+	if (enable) {
+		tasha->swr_clk_users++;
+		if (tasha->swr_clk_users == 1) {
+			if (TASHA_IS_2_0(tasha->wcd9xxx))
+				regmap_update_bits(
+					tasha->wcd9xxx->regmap,
+					WCD9335_TEST_DEBUG_NPL_DLY_TEST_1,
+					0x10, 0x00);
+			__tasha_cdc_mclk_enable(tasha, true);
+			regmap_update_bits(tasha->wcd9xxx->regmap,
+				WCD9335_CDC_CLK_RST_CTRL_SWR_CONTROL,
+				0x01, 0x01);
+		}
+	} else {
+		tasha->swr_clk_users--;
+		if (tasha->swr_clk_users == 0) {
+			regmap_update_bits(tasha->wcd9xxx->regmap,
+				WCD9335_CDC_CLK_RST_CTRL_SWR_CONTROL,
+				0x01, 0x00);
+			__tasha_cdc_mclk_enable(tasha, false);
+			if (TASHA_IS_2_0(tasha->wcd9xxx))
+				regmap_update_bits(
+					tasha->wcd9xxx->regmap,
+					WCD9335_TEST_DEBUG_NPL_DLY_TEST_1,
+					0x10, 0x10);
+		}
+	}
+	dev_dbg(tasha->dev, "%s: swrm clock users %d\n",
+		__func__, tasha->swr_clk_users);
+	mutex_unlock(&tasha->swr_clk_lock);
+	return 0;
+}
+
+static int tasha_swrm_handle_irq(void *handle,
+				   irqreturn_t (*swrm_irq_handler)(int irq,
+								   void *data),
+				    void *swrm_handle,
+				    int action)
+{
+	struct tasha_priv *tasha;
+	int ret = 0;
+	struct wcd9xxx *wcd9xxx;
+
+	if (!handle) {
+		pr_err("%s: null handle received\n", __func__);
+		return -EINVAL;
+	}
+	tasha = (struct tasha_priv *) handle;
+	wcd9xxx = tasha->wcd9xxx;
+
+	if (action) {
+		ret = wcd9xxx_request_irq(&wcd9xxx->core_res,
+					  WCD9335_IRQ_SOUNDWIRE,
+					  swrm_irq_handler,
+					  "Tasha SWR Master", swrm_handle);
+		if (ret)
+			dev_err(tasha->dev, "%s: Failed to request irq %d\n",
+				__func__, WCD9335_IRQ_SOUNDWIRE);
+	} else
+		wcd9xxx_free_irq(&wcd9xxx->core_res, WCD9335_IRQ_SOUNDWIRE,
+				 swrm_handle);
+
+	return ret;
+}
+
+static void tasha_add_child_devices(struct work_struct *work)
+{
+	struct tasha_priv *tasha;
+	struct platform_device *pdev;
+	struct device_node *node;
+	struct wcd9xxx *wcd9xxx;
+	struct tasha_swr_ctrl_data *swr_ctrl_data = NULL, *temp;
+	int ret, ctrl_num = 0;
+	struct wcd_swr_ctrl_platform_data *platdata;
+	char plat_dev_name[WCD9335_STRING_LEN];
+
+	tasha = container_of(work, struct tasha_priv,
+			     tasha_add_child_devices_work);
+	if (!tasha) {
+		pr_err("%s: Memory for WCD9335 does not exist\n",
+			__func__);
+		return;
+	}
+	wcd9xxx = tasha->wcd9xxx;
+	if (!wcd9xxx) {
+		pr_err("%s: Memory for WCD9XXX does not exist\n",
+			__func__);
+		return;
+	}
+	if (!wcd9xxx->dev->of_node) {
+		pr_err("%s: DT node for wcd9xxx does not exist\n",
+			__func__);
+		return;
+	}
+
+	platdata = &tasha->swr_plat_data;
+
+	for_each_child_of_node(wcd9xxx->dev->of_node, node) {
+		if (!strcmp(node->name, "swr_master"))
+			strlcpy(plat_dev_name, "tasha_swr_ctrl",
+				(WCD9335_STRING_LEN - 1));
+		else if (strnstr(node->name, "msm_cdc_pinctrl",
+				 strlen("msm_cdc_pinctrl")) != NULL)
+			strlcpy(plat_dev_name, node->name,
+				(WCD9335_STRING_LEN - 1));
+		else
+			continue;
+
+		pdev = platform_device_alloc(plat_dev_name, -1);
+		if (!pdev) {
+			dev_err(wcd9xxx->dev, "%s: pdev memory alloc failed\n",
+				__func__);
+			ret = -ENOMEM;
+			goto err;
+		}
+		pdev->dev.parent = tasha->dev;
+		pdev->dev.of_node = node;
+
+		if (!strcmp(node->name, "swr_master")) {
+			ret = platform_device_add_data(pdev, platdata,
+						       sizeof(*platdata));
+			if (ret) {
+				dev_err(&pdev->dev,
+					"%s: cannot add plat data ctrl:%d\n",
+					__func__, ctrl_num);
+				goto fail_pdev_add;
+			}
+		}
+
+		ret = platform_device_add(pdev);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"%s: Cannot add platform device\n",
+				__func__);
+			goto fail_pdev_add;
+		}
+
+		if (!strcmp(node->name, "swr_master")) {
+			temp = krealloc(swr_ctrl_data,
+					(ctrl_num + 1) * sizeof(
+					struct tasha_swr_ctrl_data),
+					GFP_KERNEL);
+			if (!temp) {
+				dev_err(wcd9xxx->dev, "out of memory\n");
+				ret = -ENOMEM;
+				goto err;
+			}
+			swr_ctrl_data = temp;
+			swr_ctrl_data[ctrl_num].swr_pdev = pdev;
+			ctrl_num++;
+			dev_dbg(&pdev->dev,
+				"%s: Added soundwire ctrl device(s)\n",
+				__func__);
+			tasha->nr = ctrl_num;
+			tasha->swr_ctrl_data = swr_ctrl_data;
+		}
+	}
+
+	return;
+fail_pdev_add:
+	platform_device_put(pdev);
+err:
+	return;
+}
+
+/*
+ * tasha_codec_ver: to get tasha codec version
+ * @codec: handle to snd_soc_codec *
+ * return enum codec_variant - version
+ */
+enum codec_variant tasha_codec_ver(void)
+{
+	return codec_ver;
+}
+EXPORT_SYMBOL(tasha_codec_ver);
+
+static int __tasha_enable_efuse_sensing(struct tasha_priv *tasha)
+{
+	int val, rc;
+
+	__tasha_cdc_mclk_enable(tasha, true);
+
+	regmap_update_bits(tasha->wcd9xxx->regmap,
+			   WCD9335_CHIP_TIER_CTRL_EFUSE_CTL, 0x1E, 0x20);
+	regmap_update_bits(tasha->wcd9xxx->regmap,
+			   WCD9335_CHIP_TIER_CTRL_EFUSE_CTL, 0x01, 0x01);
+
+	/*
+	 * 5ms sleep required after enabling efuse control
+	 * before checking the status.
+	 */
+	usleep_range(5000, 5500);
+	rc = regmap_read(tasha->wcd9xxx->regmap,
+			 WCD9335_CHIP_TIER_CTRL_EFUSE_STATUS, &val);
+
+	if (rc || (!(val & 0x01)))
+		WARN(1, "%s: Efuse sense is not complete\n", __func__);
+
+	__tasha_cdc_mclk_enable(tasha, false);
+
+	return rc;
+}
+
+void tasha_get_codec_ver(struct tasha_priv *tasha)
+{
+	int i;
+	int val;
+	struct tasha_reg_mask_val codec_reg[] = {
+		{WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT10, 0xFF, 0xFF},
+		{WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT11, 0xFF, 0x83},
+		{WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT12, 0xFF, 0x0A},
+	};
+
+	__tasha_enable_efuse_sensing(tasha);
+	for (i = 0; i < ARRAY_SIZE(codec_reg); i++) {
+		regmap_read(tasha->wcd9xxx->regmap, codec_reg[i].reg, &val);
+		if (!(val && codec_reg[i].val)) {
+			codec_ver = WCD9335;
+			goto ret;
+		}
+	}
+	codec_ver = WCD9326;
+ret:
+	pr_debug("%s: codec is %d\n", __func__, codec_ver);
+}
+EXPORT_SYMBOL(tasha_get_codec_ver);
+
+static int tasha_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct tasha_priv *tasha;
+	struct clk *wcd_ext_clk, *wcd_native_clk;
+	struct wcd9xxx_resmgr_v2 *resmgr;
+	struct wcd9xxx_power_region *cdc_pwr;
+
+	if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C) {
+		if (apr_get_subsys_state() == APR_SUBSYS_DOWN) {
+			dev_err(&pdev->dev, "%s: dsp down\n", __func__);
+			return -EPROBE_DEFER;
+		}
+	}
+
+	tasha = devm_kzalloc(&pdev->dev, sizeof(struct tasha_priv),
+			    GFP_KERNEL);
+	if (!tasha) {
+		dev_err(&pdev->dev, "%s: cannot create memory for wcd9335\n",
+			__func__);
+		return -ENOMEM;
+	}
+	platform_set_drvdata(pdev, tasha);
+
+	tasha->wcd9xxx = dev_get_drvdata(pdev->dev.parent);
+	tasha->dev = &pdev->dev;
+	INIT_DELAYED_WORK(&tasha->power_gate_work, tasha_codec_power_gate_work);
+	mutex_init(&tasha->power_lock);
+	mutex_init(&tasha->sido_lock);
+	INIT_WORK(&tasha->tasha_add_child_devices_work,
+		  tasha_add_child_devices);
+	BLOCKING_INIT_NOTIFIER_HEAD(&tasha->notifier);
+	mutex_init(&tasha->micb_lock);
+	mutex_init(&tasha->swr_read_lock);
+	mutex_init(&tasha->swr_write_lock);
+	mutex_init(&tasha->swr_clk_lock);
+	mutex_init(&tasha->sb_clk_gear_lock);
+	mutex_init(&tasha->mclk_lock);
+
+	cdc_pwr = devm_kzalloc(&pdev->dev, sizeof(struct wcd9xxx_power_region),
+			       GFP_KERNEL);
+	if (!cdc_pwr) {
+		ret = -ENOMEM;
+		goto err_cdc_pwr;
+	}
+	tasha->wcd9xxx->wcd9xxx_pwr[WCD9XXX_DIG_CORE_REGION_1] = cdc_pwr;
+	cdc_pwr->pwr_collapse_reg_min = TASHA_DIG_CORE_REG_MIN;
+	cdc_pwr->pwr_collapse_reg_max = TASHA_DIG_CORE_REG_MAX;
+	wcd9xxx_set_power_state(tasha->wcd9xxx,
+				WCD_REGION_POWER_COLLAPSE_REMOVE,
+				WCD9XXX_DIG_CORE_REGION_1);
+
+	mutex_init(&tasha->codec_mutex);
+	/*
+	 * Init resource manager so that if child nodes such as SoundWire
+	 * requests for clock, resource manager can honor the request
+	 */
+	resmgr = wcd_resmgr_init(&tasha->wcd9xxx->core_res, NULL);
+	if (IS_ERR(resmgr)) {
+		ret = PTR_ERR(resmgr);
+		dev_err(&pdev->dev, "%s: Failed to initialize wcd resmgr\n",
+			__func__);
+		goto err_resmgr;
+	}
+	tasha->resmgr = resmgr;
+	tasha->swr_plat_data.handle = (void *) tasha;
+	tasha->swr_plat_data.read = tasha_swrm_read;
+	tasha->swr_plat_data.write = tasha_swrm_write;
+	tasha->swr_plat_data.bulk_write = tasha_swrm_bulk_write;
+	tasha->swr_plat_data.clk = tasha_swrm_clock;
+	tasha->swr_plat_data.handle_irq = tasha_swrm_handle_irq;
+
+	/* Register for Clock */
+	wcd_ext_clk = clk_get(tasha->wcd9xxx->dev, "wcd_clk");
+	if (IS_ERR(wcd_ext_clk)) {
+		dev_err(tasha->wcd9xxx->dev, "%s: clk get %s failed\n",
+			__func__, "wcd_ext_clk");
+		goto err_clk;
+	}
+	tasha->wcd_ext_clk = wcd_ext_clk;
+	tasha->sido_voltage = SIDO_VOLTAGE_NOMINAL_MV;
+	set_bit(AUDIO_NOMINAL, &tasha->status_mask);
+	tasha->sido_ccl_cnt = 0;
+
+	/* Register native clk for 44.1 playback */
+	wcd_native_clk = clk_get(tasha->wcd9xxx->dev, "wcd_native_clk");
+	if (IS_ERR(wcd_native_clk))
+		dev_dbg(tasha->wcd9xxx->dev, "%s: clk get %s failed\n",
+			__func__, "wcd_native_clk");
+	else
+		tasha->wcd_native_clk = wcd_native_clk;
+
+	if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_SLIMBUS)
+		ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_tasha,
+					     tasha_dai, ARRAY_SIZE(tasha_dai));
+	else if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
+		ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_tasha,
+					     tasha_i2s_dai,
+					     ARRAY_SIZE(tasha_i2s_dai));
+	else
+		ret = -EINVAL;
+	if (ret) {
+		dev_err(&pdev->dev, "%s: Codec registration failed, ret = %d\n",
+			__func__, ret);
+		goto err_cdc_reg;
+	}
+	/* Update codec register default values */
+	tasha_update_reg_defaults(tasha);
+	schedule_work(&tasha->tasha_add_child_devices_work);
+	tasha_get_codec_ver(tasha);
+
+	dev_info(&pdev->dev, "%s: Tasha driver probe done\n", __func__);
+	return ret;
+
+err_cdc_reg:
+	clk_put(tasha->wcd_ext_clk);
+	if (tasha->wcd_native_clk)
+		clk_put(tasha->wcd_native_clk);
+err_clk:
+	wcd_resmgr_remove(tasha->resmgr);
+err_resmgr:
+	devm_kfree(&pdev->dev, cdc_pwr);
+err_cdc_pwr:
+	mutex_destroy(&tasha->mclk_lock);
+	devm_kfree(&pdev->dev, tasha);
+	return ret;
+}
+
+static int tasha_remove(struct platform_device *pdev)
+{
+	struct tasha_priv *tasha;
+
+	tasha = platform_get_drvdata(pdev);
+
+	mutex_destroy(&tasha->codec_mutex);
+	clk_put(tasha->wcd_ext_clk);
+	if (tasha->wcd_native_clk)
+		clk_put(tasha->wcd_native_clk);
+	mutex_destroy(&tasha->mclk_lock);
+	devm_kfree(&pdev->dev, tasha);
+	snd_soc_unregister_codec(&pdev->dev);
+	mutex_destroy(&tasha->sb_clk_gear_lock);
+	return 0;
+}
+
+static struct platform_driver tasha_codec_driver = {
+	.probe = tasha_probe,
+	.remove = tasha_remove,
+	.driver = {
+		.name = "tasha_codec",
+		.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm = &tasha_pm_ops,
+#endif
+	},
+};
+
+module_platform_driver(tasha_codec_driver);
+
+MODULE_DESCRIPTION("Tasha Codec driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd9335.h	2019-01-22 16:16:29.551301175 +0100
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef WCD9335_H
+#define WCD9335_H
+
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include <sound/apr_audio-v2.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx-slimslave.h>
+#include "wcd-mbhc-v2.h"
+
+#define TASHA_REG_VAL(reg, val)      {reg, 0, val}
+
+#define TASHA_REGISTER_START_OFFSET  0x800
+#define TASHA_SB_PGD_PORT_RX_BASE   0x40
+#define TASHA_SB_PGD_PORT_TX_BASE   0x50
+
+#define TASHA_ZDET_SUPPORTED true
+/* z value defined in milliohm */
+#define TASHA_ZDET_VAL_32	32000
+#define TASHA_ZDET_VAL_400	400000
+#define TASHA_ZDET_VAL_1200	1200000
+#define TASHA_ZDET_VAL_100K	100000000
+/* z floating defined in ohms */
+#define TASHA_ZDET_FLOATING_IMPEDANCE 0x0FFFFFFE
+
+#define WCD9335_DMIC_CLK_DIV_2  0x0
+#define WCD9335_DMIC_CLK_DIV_3  0x1
+#define WCD9335_DMIC_CLK_DIV_4  0x2
+#define WCD9335_DMIC_CLK_DIV_6  0x3
+#define WCD9335_DMIC_CLK_DIV_8  0x4
+#define WCD9335_DMIC_CLK_DIV_16  0x5
+#define WCD9335_DMIC_CLK_DRIVE_DEFAULT 0x02
+
+#define WCD9335_ANC_DMIC_X2_FULL_RATE 1
+#define WCD9335_ANC_DMIC_X2_HALF_RATE 0
+
+/* Number of input and output Slimbus port */
+enum {
+	TASHA_RX0 = 0,
+	TASHA_RX1,
+	TASHA_RX2,
+	TASHA_RX3,
+	TASHA_RX4,
+	TASHA_RX5,
+	TASHA_RX6,
+	TASHA_RX7,
+	TASHA_RX8,
+	TASHA_RX9,
+	TASHA_RX10,
+	TASHA_RX11,
+	TASHA_RX12,
+	TASHA_RX_MAX,
+};
+
+enum {
+	TASHA_TX0 = 0,
+	TASHA_TX1,
+	TASHA_TX2,
+	TASHA_TX3,
+	TASHA_TX4,
+	TASHA_TX5,
+	TASHA_TX6,
+	TASHA_TX7,
+	TASHA_TX8,
+	TASHA_TX9,
+	TASHA_TX10,
+	TASHA_TX11,
+	TASHA_TX12,
+	TASHA_TX13,
+	TASHA_TX14,
+	TASHA_TX15,
+	TASHA_TX_MAX,
+};
+
+enum {
+	/* INTR_REG 0 */
+	WCD9335_IRQ_FLL_LOCK_LOSS = 1,
+	WCD9335_IRQ_HPH_PA_OCPL_FAULT,
+	WCD9335_IRQ_HPH_PA_OCPR_FAULT,
+	WCD9335_IRQ_EAR_PA_OCP_FAULT,
+	WCD9335_IRQ_HPH_PA_CNPL_COMPLETE,
+	WCD9335_IRQ_HPH_PA_CNPR_COMPLETE,
+	WCD9335_IRQ_EAR_PA_CNP_COMPLETE,
+	/* INTR_REG 1 */
+	WCD9335_IRQ_MBHC_SW_DET,
+	WCD9335_IRQ_MBHC_ELECT_INS_REM_DET,
+	WCD9335_IRQ_MBHC_BUTTON_PRESS_DET,
+	WCD9335_IRQ_MBHC_BUTTON_RELEASE_DET,
+	WCD9335_IRQ_MBHC_ELECT_INS_REM_LEG_DET,
+	WCD9335_IRQ_RESERVED_0,
+	WCD9335_IRQ_RESERVED_1,
+	WCD9335_IRQ_RESERVED_2,
+	/* INTR_REG 2 */
+	WCD9335_IRQ_LINE_PA1_CNP_COMPLETE,
+	WCD9335_IRQ_LINE_PA2_CNP_COMPLETE,
+	WCD9335_IRQ_LINE_PA3_CNP_COMPLETE,
+	WCD9335_IRQ_LINE_PA4_CNP_COMPLETE,
+	WCD9335_IRQ_SOUNDWIRE,
+	WCD9335_IRQ_VDD_DIG_RAMP_COMPLETE,
+	WCD9335_IRQ_RCO_ERROR,
+	WCD9335_IRQ_SVA_ERROR,
+	/* INTR_REG 3 */
+	WCD9335_IRQ_MAD_AUDIO,
+	WCD9335_IRQ_MAD_BEACON,
+	WCD9335_IRQ_MAD_ULTRASOUND,
+	WCD9335_IRQ_VBAT_ATTACK,
+	WCD9335_IRQ_VBAT_RESTORE,
+	WCD9335_IRQ_SVA_OUTBOX1,
+	WCD9335_IRQ_SVA_OUTBOX2,
+	WCD9335_NUM_IRQS,
+};
+
+enum wcd9335_codec_event {
+	WCD9335_CODEC_EVENT_CODEC_UP = 0,
+};
+
+enum tasha_on_demand_supply {
+	ON_DEMAND_MICBIAS = 0,
+	ON_DEMAND_SUPPLIES_MAX,
+};
+
+/* structure used to put the defined
+ * ondemand supply for codec
+ * and count being used.
+ */
+struct on_demand_supply {
+	struct regulator *supply;
+	int ondemand_supply_count;
+};
+
+/* Dai data structure holds the
+ * dai specific info like rate,
+ * channel number etc.
+ */
+struct tasha_codec_dai_data {
+	u32 rate;
+	u32 *ch_num;
+	u32 ch_act;
+	u32 ch_tot;
+};
+
+/* Structure used to update codec
+ * register defaults after reset
+ */
+struct tasha_reg_mask_val {
+	u16 reg;
+	u8 mask;
+	u8 val;
+};
+
+/* Selects compander and smart boost settings
+ * for a given speaker mode
+ */
+enum {
+	SPKR_MODE_DEFAULT,
+	SPKR_MODE_1,          /* COMP Gain = 12dB, Smartboost Max = 5.5V */
+};
+
+/*
+ * Rx path gain offsets
+ */
+enum {
+	RX_GAIN_OFFSET_M1P5_DB,
+	RX_GAIN_OFFSET_0_DB,
+};
+
+extern void *tasha_get_afe_config(struct snd_soc_codec *codec,
+				  enum afe_config_type config_type);
+extern int tasha_cdc_mclk_enable(struct snd_soc_codec *codec, int enable,
+				 bool dapm);
+extern int tasha_cdc_mclk_tx_enable(struct snd_soc_codec *codec, int enable,
+				    bool dapm);
+extern int tasha_enable_efuse_sensing(struct snd_soc_codec *codec);
+extern int tasha_mbhc_hs_detect(struct snd_soc_codec *codec,
+				struct wcd_mbhc_config *mbhc_cfg);
+extern void tasha_mbhc_hs_detect_exit(struct snd_soc_codec *codec);
+extern void tasha_mbhc_zdet_gpio_ctrl(
+		int (*zdet_gpio_cb)(struct snd_soc_codec *codec, bool high),
+		struct snd_soc_codec *codec);
+extern int tasha_codec_info_create_codec_entry(struct snd_info_entry *,
+					       struct snd_soc_codec *);
+extern void tasha_event_register(
+	int (*machine_event_cb)(struct snd_soc_codec *codec,
+				enum wcd9335_codec_event),
+	struct snd_soc_codec *codec);
+extern int tasha_codec_enable_standalone_micbias(struct snd_soc_codec *codec,
+						 int micb_num,
+						 bool enable);
+extern int tasha_set_spkr_mode(struct snd_soc_codec *codec, int mode);
+extern int tasha_set_spkr_gain_offset(struct snd_soc_codec *codec, int offset);
+extern enum codec_variant tasha_codec_ver(void);
+#endif
diff -Nruw linux-4.4.115-fbx/sound/soc/codecs/wcd934x./Makefile linux-4.4.115-fbx/sound/soc/codecs/wcd934x/Makefile
--- linux-4.4.115-fbx/sound/soc/codecs/wcd934x./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd934x/Makefile	2019-01-22 16:16:29.551301175 +0100
@@ -0,0 +1,9 @@
+#
+# Makefile for wcd934x codec driver.
+#
+snd-soc-wcd934x-objs := wcd934x.o wcd934x-dsp-cntl.o
+obj-$(CONFIG_SND_SOC_WCD934X) += snd-soc-wcd934x.o
+snd-soc-wcd934x-mbhc-objs := wcd934x-mbhc.o
+obj-$(CONFIG_SND_SOC_WCD934X_MBHC) += snd-soc-wcd934x-mbhc.o
+snd-soc-wcd934x-dsd-objs := wcd934x-dsd.o
+obj-$(CONFIG_SND_SOC_WCD934X_DSD) += snd-soc-wcd934x-dsd.o
diff -Nruw linux-4.4.115-fbx/sound/soc/codecs/wcd934x./wcd934x.c linux-4.4.115-fbx/sound/soc/codecs/wcd934x/wcd934x.c
--- linux-4.4.115-fbx/sound/soc/codecs/wcd934x./wcd934x.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd934x/wcd934x.c	2019-01-22 16:16:29.551301175 +0100
@@ -0,0 +1,10133 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
+#include <linux/debugfs.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+#include <linux/mfd/wcd9xxx/core.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx-irq.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
+#include <linux/mfd/wcd934x/registers.h>
+#include <linux/mfd/wcd9xxx/pdata.h>
+#include <linux/regulator/consumer.h>
+#include <linux/soundwire/swr-wcd.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+#include <sound/info.h>
+#include "wcd934x.h"
+#include "wcd934x-mbhc.h"
+#include "wcd934x-routing.h"
+#include "wcd934x-dsp-cntl.h"
+#include "../wcd9xxx-common-v2.h"
+#include "../wcd9xxx-resmgr-v2.h"
+#include "../wcdcal-hwdep.h"
+#include "wcd934x-dsd.h"
+
+#define WCD934X_RATES_MASK (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
+			    SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |\
+			    SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_192000 |\
+			    SNDRV_PCM_RATE_384000)
+/* Fractional Rates */
+#define WCD934X_FRAC_RATES_MASK (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_88200 |\
+				 SNDRV_PCM_RATE_176400)
+
+#define WCD934X_FORMATS_S16_S24_LE (SNDRV_PCM_FMTBIT_S16_LE | \
+				    SNDRV_PCM_FMTBIT_S24_LE)
+
+#define WCD934X_FORMATS_S16_S24_S32_LE (SNDRV_PCM_FMTBIT_S16_LE | \
+					SNDRV_PCM_FMTBIT_S24_LE | \
+					SNDRV_PCM_FMTBIT_S32_LE)
+
+#define WCD934X_FORMATS_S16_LE (SNDRV_PCM_FMTBIT_S16_LE)
+
+/* Macros for packing register writes into a U32 */
+#define WCD934X_PACKED_REG_SIZE sizeof(u32)
+#define WCD934X_CODEC_UNPACK_ENTRY(packed, reg, mask, val) \
+	do { \
+		((reg) = ((packed >> 16) & (0xffff))); \
+		((mask) = ((packed >> 8) & (0xff))); \
+		((val) = ((packed) & (0xff))); \
+	} while (0)
+
+#define STRING(name) #name
+#define WCD_DAPM_ENUM(name, reg, offset, text) \
+static SOC_ENUM_SINGLE_DECL(name##_enum, reg, offset, text); \
+static const struct snd_kcontrol_new name##_mux = \
+		SOC_DAPM_ENUM(STRING(name), name##_enum)
+
+#define WCD_DAPM_ENUM_EXT(name, reg, offset, text, getname, putname) \
+static SOC_ENUM_SINGLE_DECL(name##_enum, reg, offset, text); \
+static const struct snd_kcontrol_new name##_mux = \
+		SOC_DAPM_ENUM_EXT(STRING(name), name##_enum, getname, putname)
+
+#define WCD_DAPM_MUX(name, shift, kctl) \
+		SND_SOC_DAPM_MUX(name, SND_SOC_NOPM, shift, 0, &kctl##_mux)
+
+/*
+ * Timeout in milli seconds and it is the wait time for
+ * slim channel removal interrupt to receive.
+ */
+#define WCD934X_SLIM_CLOSE_TIMEOUT 1000
+#define WCD934X_SLIM_IRQ_OVERFLOW (1 << 0)
+#define WCD934X_SLIM_IRQ_UNDERFLOW (1 << 1)
+#define WCD934X_SLIM_IRQ_PORT_CLOSED (1 << 2)
+#define WCD934X_MCLK_CLK_12P288MHZ 12288000
+#define WCD934X_MCLK_CLK_9P6MHZ 9600000
+
+#define WCD934X_INTERP_MUX_NUM_INPUTS 3
+#define WCD934X_NUM_INTERPOLATORS 9
+#define WCD934X_NUM_DECIMATORS 9
+#define WCD934X_RX_PATH_CTL_OFFSET 20
+
+#define BYTE_BIT_MASK(nr) (1 << ((nr) % BITS_PER_BYTE))
+
+#define WCD934X_REG_BITS 8
+#define WCD934X_MAX_VALID_ADC_MUX  13
+#define WCD934X_INVALID_ADC_MUX 9
+
+#define WCD934X_AMIC_PWR_LEVEL_LP 0
+#define WCD934X_AMIC_PWR_LEVEL_DEFAULT 1
+#define WCD934X_AMIC_PWR_LEVEL_HP 2
+#define WCD934X_AMIC_PWR_LVL_MASK 0x60
+#define WCD934X_AMIC_PWR_LVL_SHIFT 0x5
+
+#define WCD934X_DEC_PWR_LVL_MASK 0x06
+#define WCD934X_DEC_PWR_LVL_LP 0x02
+#define WCD934X_DEC_PWR_LVL_HP 0x04
+#define WCD934X_DEC_PWR_LVL_DF 0x00
+#define WCD934X_STRING_LEN 100
+
+#define WCD934X_CDC_SIDETONE_IIR_COEFF_MAX 5
+#define WCD934X_DIG_CORE_REG_MIN  WCD934X_CDC_ANC0_CLK_RESET_CTL
+#define WCD934X_DIG_CORE_REG_MAX  0xFFF
+
+#define WCD934X_MAX_MICBIAS 4
+#define DAPM_MICBIAS1_STANDALONE "MIC BIAS1 Standalone"
+#define DAPM_MICBIAS2_STANDALONE "MIC BIAS2 Standalone"
+#define DAPM_MICBIAS3_STANDALONE "MIC BIAS3 Standalone"
+#define DAPM_MICBIAS4_STANDALONE "MIC BIAS4 Standalone"
+
+#define  TX_HPF_CUT_OFF_FREQ_MASK	0x60
+#define  CF_MIN_3DB_4HZ			0x0
+#define  CF_MIN_3DB_75HZ		0x1
+#define  CF_MIN_3DB_150HZ		0x2
+
+#define CPE_ERR_WDOG_BITE BIT(0)
+#define CPE_FATAL_IRQS CPE_ERR_WDOG_BITE
+
+#define WCD934X_MAD_AUDIO_FIRMWARE_PATH "wcd934x/wcd934x_mad_audio.bin"
+
+#define TAVIL_VERSION_ENTRY_SIZE 17
+
+#define WCD934X_DIG_CORE_COLLAPSE_TIMER_MS  (5 * 1000)
+
+enum {
+	POWER_COLLAPSE,
+	POWER_RESUME,
+};
+
+static int dig_core_collapse_enable = 1;
+module_param(dig_core_collapse_enable, int,
+		S_IRUGO | S_IWUSR | S_IWGRP);
+MODULE_PARM_DESC(dig_core_collapse_enable, "enable/disable power gating");
+
+/* dig_core_collapse timer in seconds */
+static int dig_core_collapse_timer = (WCD934X_DIG_CORE_COLLAPSE_TIMER_MS/1000);
+module_param(dig_core_collapse_timer, int,
+		S_IRUGO | S_IWUSR | S_IWGRP);
+MODULE_PARM_DESC(dig_core_collapse_timer, "timer for power gating");
+
+#define TAVIL_HPH_REG_RANGE_1  (WCD934X_HPH_R_DAC_CTL - WCD934X_HPH_CNP_EN + 1)
+#define TAVIL_HPH_REG_RANGE_2  (WCD934X_HPH_NEW_ANA_HPH3 -\
+				WCD934X_HPH_NEW_ANA_HPH2 + 1)
+#define TAVIL_HPH_REG_RANGE_3  (WCD934X_HPH_NEW_INT_PA_RDAC_MISC3 -\
+				WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL + 1)
+#define TAVIL_HPH_TOTAL_REG    (TAVIL_HPH_REG_RANGE_1 + TAVIL_HPH_REG_RANGE_2 +\
+				TAVIL_HPH_REG_RANGE_3)
+
+enum {
+	VI_SENSE_1,
+	VI_SENSE_2,
+	AUDIO_NOMINAL,
+	HPH_PA_DELAY,
+	CLSH_Z_CONFIG,
+	ANC_MIC_AMIC1,
+	ANC_MIC_AMIC2,
+	ANC_MIC_AMIC3,
+	ANC_MIC_AMIC4,
+};
+
+enum {
+	AIF1_PB = 0,
+	AIF1_CAP,
+	AIF2_PB,
+	AIF2_CAP,
+	AIF3_PB,
+	AIF3_CAP,
+	AIF4_PB,
+	AIF4_VIFEED,
+	AIF4_MAD_TX,
+	NUM_CODEC_DAIS,
+};
+
+enum {
+	INTn_1_INP_SEL_ZERO = 0,
+	INTn_1_INP_SEL_DEC0,
+	INTn_1_INP_SEL_DEC1,
+	INTn_1_INP_SEL_IIR0,
+	INTn_1_INP_SEL_IIR1,
+	INTn_1_INP_SEL_RX0,
+	INTn_1_INP_SEL_RX1,
+	INTn_1_INP_SEL_RX2,
+	INTn_1_INP_SEL_RX3,
+	INTn_1_INP_SEL_RX4,
+	INTn_1_INP_SEL_RX5,
+	INTn_1_INP_SEL_RX6,
+	INTn_1_INP_SEL_RX7,
+};
+
+enum {
+	INTn_2_INP_SEL_ZERO = 0,
+	INTn_2_INP_SEL_RX0,
+	INTn_2_INP_SEL_RX1,
+	INTn_2_INP_SEL_RX2,
+	INTn_2_INP_SEL_RX3,
+	INTn_2_INP_SEL_RX4,
+	INTn_2_INP_SEL_RX5,
+	INTn_2_INP_SEL_RX6,
+	INTn_2_INP_SEL_RX7,
+	INTn_2_INP_SEL_PROXIMITY,
+};
+
+enum {
+	INTERP_MAIN_PATH,
+	INTERP_MIX_PATH,
+};
+
+struct tavil_idle_detect_config {
+	u8 hph_idle_thr;
+	u8 hph_idle_detect_en;
+};
+
+static const struct intr_data wcd934x_intr_table[] = {
+	{WCD9XXX_IRQ_SLIMBUS, false},
+	{WCD934X_IRQ_MBHC_SW_DET, true},
+	{WCD934X_IRQ_MBHC_BUTTON_PRESS_DET, true},
+	{WCD934X_IRQ_MBHC_BUTTON_RELEASE_DET, true},
+	{WCD934X_IRQ_MBHC_ELECT_INS_REM_DET, true},
+	{WCD934X_IRQ_MBHC_ELECT_INS_REM_LEG_DET, true},
+	{WCD934X_IRQ_MISC, false},
+	{WCD934X_IRQ_HPH_PA_CNPL_COMPLETE, false},
+	{WCD934X_IRQ_HPH_PA_CNPR_COMPLETE, false},
+	{WCD934X_IRQ_EAR_PA_CNP_COMPLETE, false},
+	{WCD934X_IRQ_LINE_PA1_CNP_COMPLETE, false},
+	{WCD934X_IRQ_LINE_PA2_CNP_COMPLETE, false},
+	{WCD934X_IRQ_SLNQ_ANALOG_ERROR, false},
+	{WCD934X_IRQ_RESERVED_3, false},
+	{WCD934X_IRQ_HPH_PA_OCPL_FAULT, false},
+	{WCD934X_IRQ_HPH_PA_OCPR_FAULT, false},
+	{WCD934X_IRQ_EAR_PA_OCP_FAULT, false},
+	{WCD934X_IRQ_SOUNDWIRE, false},
+	{WCD934X_IRQ_VDD_DIG_RAMP_COMPLETE, false},
+	{WCD934X_IRQ_RCO_ERROR, false},
+	{WCD934X_IRQ_CPE_ERROR, false},
+	{WCD934X_IRQ_MAD_AUDIO, false},
+	{WCD934X_IRQ_MAD_BEACON, false},
+	{WCD934X_IRQ_CPE1_INTR, true},
+	{WCD934X_IRQ_RESERVED_4, false},
+	{WCD934X_IRQ_MAD_ULTRASOUND, false},
+	{WCD934X_IRQ_VBAT_ATTACK, false},
+	{WCD934X_IRQ_VBAT_RESTORE, false},
+};
+
+struct tavil_cpr_reg_defaults {
+	int wr_data;
+	int wr_addr;
+};
+
+struct interp_sample_rate {
+	int sample_rate;
+	int rate_val;
+};
+
+static struct interp_sample_rate sr_val_tbl[] = {
+	{8000, 0x0}, {16000, 0x1}, {32000, 0x3}, {48000, 0x4}, {96000, 0x5},
+	{192000, 0x6}, {384000, 0x7}, {44100, 0x9}, {88200, 0xA},
+	{176400, 0xB}, {352800, 0xC},
+};
+
+static const struct wcd9xxx_ch tavil_rx_chs[WCD934X_RX_MAX] = {
+	WCD9XXX_CH(WCD934X_RX_PORT_START_NUMBER, 0),
+	WCD9XXX_CH(WCD934X_RX_PORT_START_NUMBER + 1, 1),
+	WCD9XXX_CH(WCD934X_RX_PORT_START_NUMBER + 2, 2),
+	WCD9XXX_CH(WCD934X_RX_PORT_START_NUMBER + 3, 3),
+	WCD9XXX_CH(WCD934X_RX_PORT_START_NUMBER + 4, 4),
+	WCD9XXX_CH(WCD934X_RX_PORT_START_NUMBER + 5, 5),
+	WCD9XXX_CH(WCD934X_RX_PORT_START_NUMBER + 6, 6),
+	WCD9XXX_CH(WCD934X_RX_PORT_START_NUMBER + 7, 7),
+};
+
+static const struct wcd9xxx_ch tavil_tx_chs[WCD934X_TX_MAX] = {
+	WCD9XXX_CH(0, 0),
+	WCD9XXX_CH(1, 1),
+	WCD9XXX_CH(2, 2),
+	WCD9XXX_CH(3, 3),
+	WCD9XXX_CH(4, 4),
+	WCD9XXX_CH(5, 5),
+	WCD9XXX_CH(6, 6),
+	WCD9XXX_CH(7, 7),
+	WCD9XXX_CH(8, 8),
+	WCD9XXX_CH(9, 9),
+	WCD9XXX_CH(10, 10),
+	WCD9XXX_CH(11, 11),
+	WCD9XXX_CH(12, 12),
+	WCD9XXX_CH(13, 13),
+	WCD9XXX_CH(14, 14),
+	WCD9XXX_CH(15, 15),
+};
+
+static const u32 vport_slim_check_table[NUM_CODEC_DAIS] = {
+	0,							/* AIF1_PB */
+	BIT(AIF2_CAP) | BIT(AIF3_CAP) | BIT(AIF4_MAD_TX),	/* AIF1_CAP */
+	0,							/* AIF2_PB */
+	BIT(AIF1_CAP) | BIT(AIF3_CAP) | BIT(AIF4_MAD_TX),	/* AIF2_CAP */
+	0,							/* AIF3_PB */
+	BIT(AIF1_CAP) | BIT(AIF2_CAP) | BIT(AIF4_MAD_TX),	/* AIF3_CAP */
+	0,							/* AIF4_PB */
+};
+
+/* Codec supports 2 IIR filters */
+enum {
+	IIR0 = 0,
+	IIR1,
+	IIR_MAX,
+};
+
+/* Each IIR has 5 Filter Stages */
+enum {
+	BAND1 = 0,
+	BAND2,
+	BAND3,
+	BAND4,
+	BAND5,
+	BAND_MAX,
+};
+
+enum {
+	COMPANDER_1, /* HPH_L */
+	COMPANDER_2, /* HPH_R */
+	COMPANDER_3, /* LO1_DIFF */
+	COMPANDER_4, /* LO2_DIFF */
+	COMPANDER_5, /* LO3_SE - not used in Tavil */
+	COMPANDER_6, /* LO4_SE - not used in Tavil */
+	COMPANDER_7, /* SWR SPK CH1 */
+	COMPANDER_8, /* SWR SPK CH2 */
+	COMPANDER_MAX,
+};
+
+enum {
+	ASRC_IN_HPHL,
+	ASRC_IN_LO1,
+	ASRC_IN_HPHR,
+	ASRC_IN_LO2,
+	ASRC_IN_SPKR1,
+	ASRC_IN_SPKR2,
+	ASRC_INVALID,
+};
+
+enum {
+	ASRC0,
+	ASRC1,
+	ASRC2,
+	ASRC3,
+	ASRC_MAX,
+};
+
+enum {
+	CONV_88P2K_TO_384K,
+	CONV_96K_TO_352P8K,
+	CONV_352P8K_TO_384K,
+	CONV_384K_TO_352P8K,
+	CONV_384K_TO_384K,
+	CONV_96K_TO_384K,
+};
+
+static struct afe_param_slimbus_slave_port_cfg tavil_slimbus_slave_port_cfg = {
+	.minor_version = 1,
+	.slimbus_dev_id = AFE_SLIMBUS_DEVICE_1,
+	.slave_dev_pgd_la = 0,
+	.slave_dev_intfdev_la = 0,
+	.bit_width = 16,
+	.data_format = 0,
+	.num_channels = 1
+};
+
+static struct afe_param_cdc_reg_page_cfg tavil_cdc_reg_page_cfg = {
+	.minor_version = AFE_API_VERSION_CDC_REG_PAGE_CFG,
+	.enable = 1,
+	.proc_id = AFE_CDC_REG_PAGE_ASSIGN_PROC_ID_1,
+};
+
+static struct afe_param_cdc_reg_cfg audio_reg_cfg[] = {
+	{
+		1,
+		(WCD934X_REGISTER_START_OFFSET + WCD934X_SOC_MAD_MAIN_CTL_1),
+		HW_MAD_AUDIO_ENABLE, 0x1, WCD934X_REG_BITS, 0
+	},
+	{
+		1,
+		(WCD934X_REGISTER_START_OFFSET + WCD934X_SOC_MAD_AUDIO_CTL_3),
+		HW_MAD_AUDIO_SLEEP_TIME, 0xF, WCD934X_REG_BITS, 0
+	},
+	{
+		1,
+		(WCD934X_REGISTER_START_OFFSET + WCD934X_SOC_MAD_AUDIO_CTL_4),
+		HW_MAD_TX_AUDIO_SWITCH_OFF, 0x1, WCD934X_REG_BITS, 0
+	},
+	{
+		1,
+		(WCD934X_REGISTER_START_OFFSET + WCD934X_INTR_CFG),
+		MAD_AUDIO_INT_DEST_SELECT_REG, 0x2, WCD934X_REG_BITS, 0
+	},
+	{
+		1,
+		(WCD934X_REGISTER_START_OFFSET + WCD934X_INTR_PIN2_MASK3),
+		MAD_AUDIO_INT_MASK_REG, 0x1, WCD934X_REG_BITS, 0
+	},
+	{
+		1,
+		(WCD934X_REGISTER_START_OFFSET + WCD934X_INTR_PIN2_STATUS3),
+		MAD_AUDIO_INT_STATUS_REG, 0x1, WCD934X_REG_BITS, 0
+	},
+	{
+		1,
+		(WCD934X_REGISTER_START_OFFSET + WCD934X_INTR_PIN2_CLEAR3),
+		MAD_AUDIO_INT_CLEAR_REG, 0x1, WCD934X_REG_BITS, 0
+	},
+	{
+		1,
+		(WCD934X_REGISTER_START_OFFSET + WCD934X_SB_PGD_PORT_TX_BASE),
+		SB_PGD_PORT_TX_WATERMARK_N, 0x1E, WCD934X_REG_BITS, 0x1
+	},
+	{
+		1,
+		(WCD934X_REGISTER_START_OFFSET + WCD934X_SB_PGD_PORT_TX_BASE),
+		SB_PGD_PORT_TX_ENABLE_N, 0x1, WCD934X_REG_BITS, 0x1
+	},
+	{
+		1,
+		(WCD934X_REGISTER_START_OFFSET + WCD934X_SB_PGD_PORT_RX_BASE),
+		SB_PGD_PORT_RX_WATERMARK_N, 0x1E, WCD934X_REG_BITS, 0x1
+	},
+	{
+		1,
+		(WCD934X_REGISTER_START_OFFSET + WCD934X_SB_PGD_PORT_RX_BASE),
+		SB_PGD_PORT_RX_ENABLE_N, 0x1, WCD934X_REG_BITS, 0x1
+	},
+	{
+		1,
+		(WCD934X_REGISTER_START_OFFSET +
+		 WCD934X_CDC_ANC0_IIR_ADAPT_CTL),
+		AANC_FF_GAIN_ADAPTIVE, 0x4, WCD934X_REG_BITS, 0
+	},
+	{
+		1,
+		(WCD934X_REGISTER_START_OFFSET +
+		 WCD934X_CDC_ANC0_IIR_ADAPT_CTL),
+		AANC_FFGAIN_ADAPTIVE_EN, 0x8, WCD934X_REG_BITS, 0
+	},
+	{
+		1,
+		(WCD934X_REGISTER_START_OFFSET +
+		 WCD934X_CDC_ANC0_FF_A_GAIN_CTL),
+		AANC_GAIN_CONTROL, 0xFF, WCD934X_REG_BITS, 0
+	},
+	{
+		1,
+		(WCD934X_REGISTER_START_OFFSET +
+		 SB_PGD_TX_PORT_MULTI_CHANNEL_0(0)),
+		SB_PGD_TX_PORTn_MULTI_CHNL_0, 0xFF, WCD934X_REG_BITS, 0x4
+	},
+	{
+		1,
+		(WCD934X_REGISTER_START_OFFSET +
+		 SB_PGD_TX_PORT_MULTI_CHANNEL_1(0)),
+		SB_PGD_TX_PORTn_MULTI_CHNL_1, 0xFF, WCD934X_REG_BITS, 0x4
+	},
+	{
+		1,
+		(WCD934X_REGISTER_START_OFFSET +
+		 SB_PGD_RX_PORT_MULTI_CHANNEL_0(0x180, 0)),
+		SB_PGD_RX_PORTn_MULTI_CHNL_0, 0xFF, WCD934X_REG_BITS, 0x4
+	},
+	{
+		1,
+		(WCD934X_REGISTER_START_OFFSET +
+		 SB_PGD_RX_PORT_MULTI_CHANNEL_0(0x181, 0)),
+		SB_PGD_RX_PORTn_MULTI_CHNL_1, 0xFF, WCD934X_REG_BITS, 0x4
+	},
+};
+
+static struct afe_param_cdc_reg_cfg_data tavil_audio_reg_cfg = {
+	.num_registers = ARRAY_SIZE(audio_reg_cfg),
+	.reg_data = audio_reg_cfg,
+};
+
+static struct afe_param_id_cdc_aanc_version tavil_cdc_aanc_version = {
+	.cdc_aanc_minor_version = AFE_API_VERSION_CDC_AANC_VERSION,
+	.aanc_hw_version        = AANC_HW_BLOCK_VERSION_2,
+};
+
+static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0);
+static const DECLARE_TLV_DB_SCALE(line_gain, 0, 7, 1);
+static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1);
+
+#define WCD934X_TX_UNMUTE_DELAY_MS 40
+
+static int tx_unmute_delay = WCD934X_TX_UNMUTE_DELAY_MS;
+module_param(tx_unmute_delay, int,
+	     S_IRUGO | S_IWUSR | S_IWGRP);
+MODULE_PARM_DESC(tx_unmute_delay, "delay to unmute the tx path");
+
+static void tavil_codec_set_tx_hold(struct snd_soc_codec *, u16, bool);
+
+/* Hold instance to soundwire platform device */
+struct tavil_swr_ctrl_data {
+	struct platform_device *swr_pdev;
+};
+
+struct wcd_swr_ctrl_platform_data {
+	void *handle; /* holds codec private data */
+	int (*read)(void *handle, int reg);
+	int (*write)(void *handle, int reg, int val);
+	int (*bulk_write)(void *handle, u32 *reg, u32 *val, size_t len);
+	int (*clk)(void *handle, bool enable);
+	int (*handle_irq)(void *handle,
+			  irqreturn_t (*swrm_irq_handler)(int irq, void *data),
+			  void *swrm_handle, int action);
+};
+
+/* Holds all Soundwire and speaker related information */
+struct wcd934x_swr {
+	struct tavil_swr_ctrl_data *ctrl_data;
+	struct wcd_swr_ctrl_platform_data plat_data;
+	struct mutex read_mutex;
+	struct mutex write_mutex;
+	struct mutex clk_mutex;
+	int spkr_gain_offset;
+	int spkr_mode;
+	int clk_users;
+	int rx_7_count;
+	int rx_8_count;
+};
+
+struct tx_mute_work {
+	struct tavil_priv *tavil;
+	u8 decimator;
+	struct delayed_work dwork;
+};
+
+#define WCD934X_SPK_ANC_EN_DELAY_MS 350
+static int spk_anc_en_delay = WCD934X_SPK_ANC_EN_DELAY_MS;
+module_param(spk_anc_en_delay, int, S_IRUGO | S_IWUSR | S_IWGRP);
+MODULE_PARM_DESC(spk_anc_en_delay, "delay to enable anc in speaker path");
+
+struct spk_anc_work {
+	struct tavil_priv *tavil;
+	struct delayed_work dwork;
+};
+
+struct hpf_work {
+	struct tavil_priv *tavil;
+	u8 decimator;
+	u8 hpf_cut_off_freq;
+	struct delayed_work dwork;
+};
+
+struct tavil_priv {
+	struct device *dev;
+	struct wcd9xxx *wcd9xxx;
+	struct snd_soc_codec *codec;
+	u32 rx_bias_count;
+	s32 dmic_0_1_clk_cnt;
+	s32 dmic_2_3_clk_cnt;
+	s32 dmic_4_5_clk_cnt;
+	s32 micb_ref[TAVIL_MAX_MICBIAS];
+	s32 pullup_ref[TAVIL_MAX_MICBIAS];
+
+	/* ANC related */
+	u32 anc_slot;
+	bool anc_func;
+
+	/* compander */
+	int comp_enabled[COMPANDER_MAX];
+	int ear_spkr_gain;
+
+	/* class h specific data */
+	struct wcd_clsh_cdc_data clsh_d;
+	/* Tavil Interpolator Mode Select for EAR, HPH_L and HPH_R */
+	u32 hph_mode;
+
+	/* Mad switch reference count */
+	int mad_switch_cnt;
+
+	/* track tavil interface type */
+	u8 intf_type;
+
+	/* to track the status */
+	unsigned long status_mask;
+
+	struct afe_param_cdc_slimbus_slave_cfg slimbus_slave_cfg;
+
+	/* num of slim ports required */
+	struct wcd9xxx_codec_dai_data  dai[NUM_CODEC_DAIS];
+	/* Port values for Rx and Tx codec_dai */
+	unsigned int rx_port_value[WCD934X_RX_MAX];
+	unsigned int tx_port_value;
+
+	struct wcd9xxx_resmgr_v2 *resmgr;
+	struct wcd934x_swr swr;
+	struct mutex micb_lock;
+
+	struct delayed_work power_gate_work;
+	struct mutex power_lock;
+
+	struct clk *wcd_ext_clk;
+
+	/* mbhc module */
+	struct wcd934x_mbhc *mbhc;
+
+	struct mutex codec_mutex;
+	struct work_struct tavil_add_child_devices_work;
+	struct hpf_work tx_hpf_work[WCD934X_NUM_DECIMATORS];
+	struct tx_mute_work tx_mute_dwork[WCD934X_NUM_DECIMATORS];
+	struct spk_anc_work spk_anc_dwork;
+
+	unsigned int vi_feed_value;
+
+	/* DSP control */
+	struct wcd_dsp_cntl *wdsp_cntl;
+
+	/* cal info for codec */
+	struct fw_info *fw_data;
+
+	/* Entry for version info */
+	struct snd_info_entry *entry;
+	struct snd_info_entry *version_entry;
+
+	/* SVS voting related */
+	struct mutex svs_mutex;
+	int svs_ref_cnt;
+
+	int native_clk_users;
+	/* ASRC users count */
+	int asrc_users[ASRC_MAX];
+	int asrc_output_mode[ASRC_MAX];
+	/* Main path clock users count */
+	int main_clk_users[WCD934X_NUM_INTERPOLATORS];
+	struct tavil_dsd_config *dsd_config;
+	struct tavil_idle_detect_config idle_det_cfg;
+
+	int power_active_ref;
+	int sidetone_coeff_array[IIR_MAX][BAND_MAX]
+		[WCD934X_CDC_SIDETONE_IIR_COEFF_MAX];
+};
+
+static const struct tavil_reg_mask_val tavil_spkr_default[] = {
+	{WCD934X_CDC_COMPANDER7_CTL3, 0x80, 0x80},
+	{WCD934X_CDC_COMPANDER8_CTL3, 0x80, 0x80},
+	{WCD934X_CDC_COMPANDER7_CTL7, 0x01, 0x01},
+	{WCD934X_CDC_COMPANDER8_CTL7, 0x01, 0x01},
+	{WCD934X_CDC_BOOST0_BOOST_CTL, 0x7C, 0x50},
+	{WCD934X_CDC_BOOST1_BOOST_CTL, 0x7C, 0x50},
+};
+
+static const struct tavil_reg_mask_val tavil_spkr_mode1[] = {
+	{WCD934X_CDC_COMPANDER7_CTL3, 0x80, 0x00},
+	{WCD934X_CDC_COMPANDER8_CTL3, 0x80, 0x00},
+	{WCD934X_CDC_COMPANDER7_CTL7, 0x01, 0x00},
+	{WCD934X_CDC_COMPANDER8_CTL7, 0x01, 0x00},
+	{WCD934X_CDC_BOOST0_BOOST_CTL, 0x7C, 0x44},
+	{WCD934X_CDC_BOOST1_BOOST_CTL, 0x7C, 0x44},
+};
+
+static int __tavil_enable_efuse_sensing(struct tavil_priv *tavil);
+
+/*
+ * wcd934x_get_codec_info: Get codec specific information
+ *
+ * @wcd9xxx: pointer to wcd9xxx structure
+ * @wcd_type: pointer to wcd9xxx_codec_type structure
+ *
+ * Returns 0 for success or negative error code for failure
+ */
+int wcd934x_get_codec_info(struct wcd9xxx *wcd9xxx,
+			   struct wcd9xxx_codec_type *wcd_type)
+{
+	u16 id_minor, id_major;
+	struct regmap *wcd_regmap;
+	int rc, version = -1;
+
+	if (!wcd9xxx || !wcd_type)
+		return -EINVAL;
+
+	if (!wcd9xxx->regmap) {
+		dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null\n", __func__);
+		return -EINVAL;
+	}
+	wcd_regmap = wcd9xxx->regmap;
+
+	rc = regmap_bulk_read(wcd_regmap, WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE0,
+			      (u8 *)&id_minor, sizeof(u16));
+	if (rc)
+		return -EINVAL;
+
+	rc = regmap_bulk_read(wcd_regmap, WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE2,
+			      (u8 *)&id_major, sizeof(u16));
+	if (rc)
+		return -EINVAL;
+
+	dev_info(wcd9xxx->dev, "%s: wcd9xxx chip id major 0x%x, minor 0x%x\n",
+		 __func__, id_major, id_minor);
+
+	if (id_major != TAVIL_MAJOR)
+		goto version_unknown;
+
+	/*
+	 * As fine version info cannot be retrieved before tavil probe.
+	 * Assign coarse versions for possible future use before tavil probe.
+	 */
+	if (id_minor == cpu_to_le16(0))
+		version = TAVIL_VERSION_1_0;
+	else if (id_minor == cpu_to_le16(0x01))
+		version = TAVIL_VERSION_1_1;
+
+version_unknown:
+	if (version < 0)
+		dev_err(wcd9xxx->dev, "%s: wcd934x version unknown\n",
+			__func__);
+
+	/* Fill codec type info */
+	wcd_type->id_major = id_major;
+	wcd_type->id_minor = id_minor;
+	wcd_type->num_irqs = WCD934X_NUM_IRQS;
+	wcd_type->version = version;
+	wcd_type->slim_slave_type = WCD9XXX_SLIM_SLAVE_ADDR_TYPE_1;
+	wcd_type->i2c_chip_status = 0x01;
+	wcd_type->intr_tbl = wcd934x_intr_table;
+	wcd_type->intr_tbl_size = ARRAY_SIZE(wcd934x_intr_table);
+
+	wcd_type->intr_reg[WCD9XXX_INTR_STATUS_BASE] =
+						WCD934X_INTR_PIN1_STATUS0;
+	wcd_type->intr_reg[WCD9XXX_INTR_CLEAR_BASE] =
+						WCD934X_INTR_PIN1_CLEAR0;
+	wcd_type->intr_reg[WCD9XXX_INTR_MASK_BASE] =
+						WCD934X_INTR_PIN1_MASK0;
+	wcd_type->intr_reg[WCD9XXX_INTR_LEVEL_BASE] =
+						WCD934X_INTR_LEVEL0;
+	wcd_type->intr_reg[WCD9XXX_INTR_CLR_COMMIT] =
+						WCD934X_INTR_CLR_COMMIT;
+
+	return rc;
+}
+EXPORT_SYMBOL(wcd934x_get_codec_info);
+
+/*
+ * wcd934x_bringdown: Bringdown WCD Codec
+ *
+ * @wcd9xxx: Pointer to wcd9xxx structure
+ *
+ * Returns 0 for success or negative error code for failure
+ */
+int wcd934x_bringdown(struct wcd9xxx *wcd9xxx)
+{
+	if (!wcd9xxx || !wcd9xxx->regmap)
+		return -EINVAL;
+
+	regmap_write(wcd9xxx->regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+		     0x04);
+
+	return 0;
+}
+EXPORT_SYMBOL(wcd934x_bringdown);
+
+/*
+ * wcd934x_bringup: Bringup WCD Codec
+ *
+ * @wcd9xxx: Pointer to the wcd9xxx structure
+ *
+ * Returns 0 for success or negative error code for failure
+ */
+int wcd934x_bringup(struct wcd9xxx *wcd9xxx)
+{
+	struct regmap *wcd_regmap;
+
+	if (!wcd9xxx)
+		return -EINVAL;
+
+	if (!wcd9xxx->regmap) {
+		dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null!\n",
+			__func__);
+		return -EINVAL;
+	}
+	wcd_regmap = wcd9xxx->regmap;
+
+	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x01);
+	regmap_write(wcd_regmap, WCD934X_SIDO_NEW_VOUT_A_STARTUP, 0x19);
+	regmap_write(wcd_regmap, WCD934X_SIDO_NEW_VOUT_D_STARTUP, 0x15);
+	/* Add 1msec delay for VOUT to settle */
+	usleep_range(1000, 1100);
+	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x5);
+	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x7);
+	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x3);
+	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x7);
+	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x3);
+
+	return 0;
+}
+EXPORT_SYMBOL(wcd934x_bringup);
+
+/**
+ * tavil_set_spkr_gain_offset - offset the speaker path
+ * gain with the given offset value.
+ *
+ * @codec: codec instance
+ * @offset: Indicates speaker path gain offset value.
+ *
+ * Returns 0 on success or -EINVAL on error.
+ */
+int tavil_set_spkr_gain_offset(struct snd_soc_codec *codec, int offset)
+{
+	struct tavil_priv *priv = snd_soc_codec_get_drvdata(codec);
+
+	if (!priv)
+		return -EINVAL;
+
+	priv->swr.spkr_gain_offset = offset;
+	return 0;
+}
+EXPORT_SYMBOL(tavil_set_spkr_gain_offset);
+
+/**
+ * tavil_set_spkr_mode - Configures speaker compander and smartboost
+ * settings based on speaker mode.
+ *
+ * @codec: codec instance
+ * @mode: Indicates speaker configuration mode.
+ *
+ * Returns 0 on success or -EINVAL on error.
+ */
+int tavil_set_spkr_mode(struct snd_soc_codec *codec, int mode)
+{
+	struct tavil_priv *priv = snd_soc_codec_get_drvdata(codec);
+	int i;
+	const struct tavil_reg_mask_val *regs;
+	int size;
+
+	if (!priv)
+		return -EINVAL;
+
+	switch (mode) {
+	case WCD934X_SPKR_MODE_1:
+		regs = tavil_spkr_mode1;
+		size = ARRAY_SIZE(tavil_spkr_mode1);
+		break;
+	default:
+		regs = tavil_spkr_default;
+		size = ARRAY_SIZE(tavil_spkr_default);
+		break;
+	}
+
+	priv->swr.spkr_mode = mode;
+	for (i = 0; i < size; i++)
+		snd_soc_update_bits(codec, regs[i].reg,
+				    regs[i].mask, regs[i].val);
+	return 0;
+}
+EXPORT_SYMBOL(tavil_set_spkr_mode);
+
+/**
+ * tavil_get_afe_config - returns specific codec configuration to afe to write
+ *
+ * @codec: codec instance
+ * @config_type: Indicates type of configuration to write.
+ */
+void *tavil_get_afe_config(struct snd_soc_codec *codec,
+			   enum afe_config_type config_type)
+{
+	struct tavil_priv *priv = snd_soc_codec_get_drvdata(codec);
+
+	switch (config_type) {
+	case AFE_SLIMBUS_SLAVE_CONFIG:
+		return &priv->slimbus_slave_cfg;
+	case AFE_CDC_REGISTERS_CONFIG:
+		return &tavil_audio_reg_cfg;
+	case AFE_SLIMBUS_SLAVE_PORT_CONFIG:
+		return &tavil_slimbus_slave_port_cfg;
+	case AFE_AANC_VERSION:
+		return &tavil_cdc_aanc_version;
+	case AFE_CDC_REGISTER_PAGE_CONFIG:
+		return &tavil_cdc_reg_page_cfg;
+	default:
+		dev_info(codec->dev, "%s: Unknown config_type 0x%x\n",
+			__func__, config_type);
+		return NULL;
+	}
+}
+EXPORT_SYMBOL(tavil_get_afe_config);
+
+static bool is_tavil_playback_dai(int dai_id)
+{
+	if ((dai_id == AIF1_PB) || (dai_id == AIF2_PB) ||
+	    (dai_id == AIF3_PB) || (dai_id == AIF4_PB))
+		return true;
+
+	return false;
+}
+
+static int tavil_find_playback_dai_id_for_port(int port_id,
+					       struct tavil_priv *tavil)
+{
+	struct wcd9xxx_codec_dai_data *dai;
+	struct wcd9xxx_ch *ch;
+	int i, slv_port_id;
+
+	for (i = AIF1_PB; i < NUM_CODEC_DAIS; i++) {
+		if (!is_tavil_playback_dai(i))
+			continue;
+
+		dai = &tavil->dai[i];
+		list_for_each_entry(ch, &dai->wcd9xxx_ch_list, list) {
+			slv_port_id = wcd9xxx_get_slave_port(ch->ch_num);
+			if ((slv_port_id > 0) && (slv_port_id == port_id))
+				return i;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static void tavil_vote_svs(struct tavil_priv *tavil, bool vote)
+{
+	struct wcd9xxx *wcd9xxx;
+
+	wcd9xxx = tavil->wcd9xxx;
+
+	mutex_lock(&tavil->svs_mutex);
+	if (vote) {
+		tavil->svs_ref_cnt++;
+		if (tavil->svs_ref_cnt == 1)
+			regmap_update_bits(wcd9xxx->regmap,
+					   WCD934X_CPE_SS_PWR_SYS_PSTATE_CTL_0,
+					   0x01, 0x01);
+	} else {
+		/* Do not decrement ref count if it is already 0 */
+		if (tavil->svs_ref_cnt == 0)
+			goto done;
+
+		tavil->svs_ref_cnt--;
+		if (tavil->svs_ref_cnt == 0)
+			regmap_update_bits(wcd9xxx->regmap,
+					   WCD934X_CPE_SS_PWR_SYS_PSTATE_CTL_0,
+					   0x01, 0x00);
+	}
+done:
+	dev_dbg(tavil->dev, "%s: vote = %s, updated ref cnt = %u\n", __func__,
+		vote ? "vote" : "Unvote", tavil->svs_ref_cnt);
+	mutex_unlock(&tavil->svs_mutex);
+}
+
+static int tavil_get_anc_slot(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.integer.value[0] = tavil->anc_slot;
+	return 0;
+}
+
+static int tavil_put_anc_slot(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+
+	tavil->anc_slot = ucontrol->value.integer.value[0];
+	return 0;
+}
+
+static int tavil_get_anc_func(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.integer.value[0] = (tavil->anc_func == true ? 1 : 0);
+	return 0;
+}
+
+static int tavil_put_anc_func(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+
+	mutex_lock(&tavil->codec_mutex);
+	tavil->anc_func = (!ucontrol->value.integer.value[0] ? false : true);
+	dev_dbg(codec->dev, "%s: anc_func %x", __func__, tavil->anc_func);
+
+	if (tavil->anc_func == true) {
+		snd_soc_dapm_enable_pin(dapm, "ANC EAR PA");
+		snd_soc_dapm_enable_pin(dapm, "ANC EAR");
+		snd_soc_dapm_enable_pin(dapm, "ANC SPK1 PA");
+		snd_soc_dapm_enable_pin(dapm, "ANC HPHL PA");
+		snd_soc_dapm_enable_pin(dapm, "ANC HPHR PA");
+		snd_soc_dapm_enable_pin(dapm, "ANC HPHL");
+		snd_soc_dapm_enable_pin(dapm, "ANC HPHR");
+		snd_soc_dapm_disable_pin(dapm, "EAR PA");
+		snd_soc_dapm_disable_pin(dapm, "EAR");
+		snd_soc_dapm_disable_pin(dapm, "HPHL PA");
+		snd_soc_dapm_disable_pin(dapm, "HPHR PA");
+		snd_soc_dapm_disable_pin(dapm, "HPHL");
+		snd_soc_dapm_disable_pin(dapm, "HPHR");
+	} else {
+		snd_soc_dapm_disable_pin(dapm, "ANC EAR PA");
+		snd_soc_dapm_disable_pin(dapm, "ANC EAR");
+		snd_soc_dapm_disable_pin(dapm, "ANC SPK1 PA");
+		snd_soc_dapm_disable_pin(dapm, "ANC HPHL PA");
+		snd_soc_dapm_disable_pin(dapm, "ANC HPHR PA");
+		snd_soc_dapm_disable_pin(dapm, "ANC HPHL");
+		snd_soc_dapm_disable_pin(dapm, "ANC HPHR");
+		snd_soc_dapm_enable_pin(dapm, "EAR PA");
+		snd_soc_dapm_enable_pin(dapm, "EAR");
+		snd_soc_dapm_enable_pin(dapm, "HPHL");
+		snd_soc_dapm_enable_pin(dapm, "HPHR");
+		snd_soc_dapm_enable_pin(dapm, "HPHL PA");
+		snd_soc_dapm_enable_pin(dapm, "HPHR PA");
+	}
+	mutex_unlock(&tavil->codec_mutex);
+
+	snd_soc_dapm_sync(dapm);
+	return 0;
+}
+
+static int tavil_codec_enable_anc(struct snd_soc_dapm_widget *w,
+				  struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	const char *filename;
+	const struct firmware *fw;
+	int i;
+	int ret = 0;
+	int num_anc_slots;
+	struct wcd9xxx_anc_header *anc_head;
+	struct firmware_cal *hwdep_cal = NULL;
+	u32 anc_writes_size = 0;
+	u32 anc_cal_size = 0;
+	int anc_size_remaining;
+	u32 *anc_ptr;
+	u16 reg;
+	u8 mask, val;
+	size_t cal_size;
+	const void *data;
+
+	if (!tavil->anc_func)
+		return 0;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		hwdep_cal = wcdcal_get_fw_cal(tavil->fw_data, WCD9XXX_ANC_CAL);
+		if (hwdep_cal) {
+			data = hwdep_cal->data;
+			cal_size = hwdep_cal->size;
+			dev_dbg(codec->dev, "%s: using hwdep calibration, cal_size %zd",
+				__func__, cal_size);
+		} else {
+			filename = "WCD934X/WCD934X_anc.bin";
+			ret = request_firmware(&fw, filename, codec->dev);
+			if (IS_ERR_VALUE(ret)) {
+				dev_err(codec->dev, "%s: Failed to acquire ANC data: %d\n",
+					__func__, ret);
+				return ret;
+			}
+			if (!fw) {
+				dev_err(codec->dev, "%s: Failed to get anc fw\n",
+					__func__);
+				return -ENODEV;
+			}
+			data = fw->data;
+			cal_size = fw->size;
+			dev_dbg(codec->dev, "%s: using request_firmware calibration\n",
+				__func__);
+		}
+		if (cal_size < sizeof(struct wcd9xxx_anc_header)) {
+			dev_err(codec->dev, "%s: Invalid cal_size %zd\n",
+				__func__, cal_size);
+			ret = -EINVAL;
+			goto err;
+		}
+		/* First number is the number of register writes */
+		anc_head = (struct wcd9xxx_anc_header *)(data);
+		anc_ptr = (u32 *)(data + sizeof(struct wcd9xxx_anc_header));
+		anc_size_remaining = cal_size -
+				     sizeof(struct wcd9xxx_anc_header);
+		num_anc_slots = anc_head->num_anc_slots;
+
+		if (tavil->anc_slot >= num_anc_slots) {
+			dev_err(codec->dev, "%s: Invalid ANC slot selected\n",
+				__func__);
+			ret = -EINVAL;
+			goto err;
+		}
+		for (i = 0; i < num_anc_slots; i++) {
+			if (anc_size_remaining < WCD934X_PACKED_REG_SIZE) {
+				dev_err(codec->dev, "%s: Invalid register format\n",
+					__func__);
+				ret = -EINVAL;
+				goto err;
+			}
+			anc_writes_size = (u32)(*anc_ptr);
+			anc_size_remaining -= sizeof(u32);
+			anc_ptr += 1;
+
+			if ((anc_writes_size * WCD934X_PACKED_REG_SIZE) >
+			    anc_size_remaining) {
+				dev_err(codec->dev, "%s: Invalid register format\n",
+					__func__);
+				ret = -EINVAL;
+				goto err;
+			}
+
+			if (tavil->anc_slot == i)
+				break;
+
+			anc_size_remaining -= (anc_writes_size *
+				WCD934X_PACKED_REG_SIZE);
+			anc_ptr += anc_writes_size;
+		}
+		if (i == num_anc_slots) {
+			dev_err(codec->dev, "%s: Selected ANC slot not present\n",
+				__func__);
+			ret = -EINVAL;
+			goto err;
+		}
+
+		anc_cal_size = anc_writes_size;
+		for (i = 0; i < anc_writes_size; i++) {
+			WCD934X_CODEC_UNPACK_ENTRY(anc_ptr[i], reg, mask, val);
+			snd_soc_write(codec, reg, (val & mask));
+		}
+
+		/* Rate converter clk enable and set bypass mode */
+		if (!strcmp(w->name, "RX INT0 DAC") ||
+		    !strcmp(w->name, "RX INT1 DAC") ||
+		    !strcmp(w->name, "ANC SPK1 PA")) {
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_ANC0_RC_COMMON_CTL,
+					    0x05, 0x05);
+			if (!strcmp(w->name, "RX INT1 DAC")) {
+				snd_soc_update_bits(codec,
+					WCD934X_CDC_ANC0_FIFO_COMMON_CTL,
+					0x66, 0x66);
+			}
+		} else if (!strcmp(w->name, "RX INT2 DAC")) {
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_ANC1_RC_COMMON_CTL,
+					    0x05, 0x05);
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_ANC1_FIFO_COMMON_CTL,
+					    0x66, 0x66);
+		}
+		if (!strcmp(w->name, "RX INT1 DAC"))
+			snd_soc_update_bits(codec,
+				WCD934X_CDC_ANC0_CLK_RESET_CTL, 0x08, 0x08);
+		else if (!strcmp(w->name, "RX INT2 DAC"))
+			snd_soc_update_bits(codec,
+				WCD934X_CDC_ANC1_CLK_RESET_CTL, 0x08, 0x08);
+
+		if (!hwdep_cal)
+			release_firmware(fw);
+		break;
+
+	case SND_SOC_DAPM_POST_PMU:
+		if (!strcmp(w->name, "ANC HPHL PA") ||
+		    !strcmp(w->name, "ANC HPHR PA")) {
+			/* Remove ANC Rx from reset */
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_ANC0_CLK_RESET_CTL,
+					    0x08, 0x00);
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_ANC1_CLK_RESET_CTL,
+					    0x08, 0x00);
+		}
+
+		break;
+
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_update_bits(codec, WCD934X_CDC_ANC0_RC_COMMON_CTL,
+				    0x05, 0x00);
+		if (!strcmp(w->name, "ANC EAR PA") ||
+		    !strcmp(w->name, "ANC SPK1 PA") ||
+		    !strcmp(w->name, "ANC HPHL PA")) {
+			snd_soc_update_bits(codec, WCD934X_CDC_ANC0_MODE_1_CTL,
+					    0x30, 0x00);
+			msleep(50);
+			snd_soc_update_bits(codec, WCD934X_CDC_ANC0_MODE_1_CTL,
+					    0x01, 0x00);
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_ANC0_CLK_RESET_CTL,
+					    0x38, 0x38);
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_ANC0_CLK_RESET_CTL,
+					    0x07, 0x00);
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_ANC0_CLK_RESET_CTL,
+					    0x38, 0x00);
+		} else if (!strcmp(w->name, "ANC HPHR PA")) {
+			snd_soc_update_bits(codec, WCD934X_CDC_ANC1_MODE_1_CTL,
+					    0x30, 0x00);
+			msleep(50);
+			snd_soc_update_bits(codec, WCD934X_CDC_ANC1_MODE_1_CTL,
+					    0x01, 0x00);
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_ANC1_CLK_RESET_CTL,
+					    0x38, 0x38);
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_ANC1_CLK_RESET_CTL,
+					    0x07, 0x00);
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_ANC1_CLK_RESET_CTL,
+					    0x38, 0x00);
+		}
+		break;
+	}
+
+	return 0;
+err:
+	if (!hwdep_cal)
+		release_firmware(fw);
+	return ret;
+}
+
+static int tavil_vi_feed_mixer_get(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
+	struct tavil_priv *tavil_p = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.integer.value[0] = tavil_p->vi_feed_value;
+
+	return 0;
+}
+
+static int tavil_vi_feed_mixer_put(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
+	struct tavil_priv *tavil_p = snd_soc_codec_get_drvdata(codec);
+	struct wcd9xxx *core = dev_get_drvdata(codec->dev->parent);
+	struct soc_multi_mixer_control *mixer =
+		((struct soc_multi_mixer_control *)kcontrol->private_value);
+	u32 dai_id = widget->shift;
+	u32 port_id = mixer->shift;
+	u32 enable = ucontrol->value.integer.value[0];
+
+	dev_dbg(codec->dev, "%s: enable: %d, port_id:%d, dai_id: %d\n",
+		__func__, enable, port_id, dai_id);
+
+	tavil_p->vi_feed_value = ucontrol->value.integer.value[0];
+
+	mutex_lock(&tavil_p->codec_mutex);
+	if (enable) {
+		if (port_id == WCD934X_TX14 && !test_bit(VI_SENSE_1,
+						&tavil_p->status_mask)) {
+			list_add_tail(&core->tx_chs[WCD934X_TX14].list,
+					&tavil_p->dai[dai_id].wcd9xxx_ch_list);
+			set_bit(VI_SENSE_1, &tavil_p->status_mask);
+		}
+		if (port_id == WCD934X_TX15 && !test_bit(VI_SENSE_2,
+						&tavil_p->status_mask)) {
+			list_add_tail(&core->tx_chs[WCD934X_TX15].list,
+					&tavil_p->dai[dai_id].wcd9xxx_ch_list);
+			set_bit(VI_SENSE_2, &tavil_p->status_mask);
+		}
+	} else {
+		if (port_id == WCD934X_TX14 && test_bit(VI_SENSE_1,
+					&tavil_p->status_mask)) {
+			list_del_init(&core->tx_chs[WCD934X_TX14].list);
+			clear_bit(VI_SENSE_1, &tavil_p->status_mask);
+		}
+		if (port_id == WCD934X_TX15 && test_bit(VI_SENSE_2,
+					&tavil_p->status_mask)) {
+			list_del_init(&core->tx_chs[WCD934X_TX15].list);
+			clear_bit(VI_SENSE_2, &tavil_p->status_mask);
+		}
+	}
+	mutex_unlock(&tavil_p->codec_mutex);
+	snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, enable, NULL);
+
+	return 0;
+}
+
+static int slim_tx_mixer_get(struct snd_kcontrol *kcontrol,
+			     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
+	struct tavil_priv *tavil_p = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.integer.value[0] = tavil_p->tx_port_value;
+	return 0;
+}
+
+static int slim_tx_mixer_put(struct snd_kcontrol *kcontrol,
+			     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
+	struct tavil_priv *tavil_p = snd_soc_codec_get_drvdata(codec);
+	struct wcd9xxx *core = dev_get_drvdata(codec->dev->parent);
+	struct snd_soc_dapm_update *update = NULL;
+	struct soc_multi_mixer_control *mixer =
+		((struct soc_multi_mixer_control *)kcontrol->private_value);
+	u32 dai_id = widget->shift;
+	u32 port_id = mixer->shift;
+	u32 enable = ucontrol->value.integer.value[0];
+	u32 vtable;
+
+	dev_dbg(codec->dev, "%s: wname %s cname %s value %u shift %d item %ld\n",
+		  __func__,
+		widget->name, ucontrol->id.name, tavil_p->tx_port_value,
+		widget->shift, ucontrol->value.integer.value[0]);
+
+	mutex_lock(&tavil_p->codec_mutex);
+	if (dai_id >= ARRAY_SIZE(vport_slim_check_table)) {
+		dev_err(codec->dev, "%s: dai_id: %d, out of bounds\n",
+			__func__, dai_id);
+		mutex_unlock(&tavil_p->codec_mutex);
+		return -EINVAL;
+	}
+	vtable = vport_slim_check_table[dai_id];
+
+	switch (dai_id) {
+	case AIF1_CAP:
+	case AIF2_CAP:
+	case AIF3_CAP:
+		/* only add to the list if value not set */
+		if (enable && !(tavil_p->tx_port_value & 1 << port_id)) {
+			if (wcd9xxx_tx_vport_validation(vtable, port_id,
+			    tavil_p->dai, NUM_CODEC_DAIS)) {
+				dev_dbg(codec->dev, "%s: TX%u is used by other virtual port\n",
+					__func__, port_id);
+				mutex_unlock(&tavil_p->codec_mutex);
+				return 0;
+			}
+			tavil_p->tx_port_value |= 1 << port_id;
+			list_add_tail(&core->tx_chs[port_id].list,
+				      &tavil_p->dai[dai_id].wcd9xxx_ch_list);
+		} else if (!enable && (tavil_p->tx_port_value &
+			   1 << port_id)) {
+			tavil_p->tx_port_value &= ~(1 << port_id);
+			list_del_init(&core->tx_chs[port_id].list);
+		} else {
+			if (enable)
+				dev_dbg(codec->dev, "%s: TX%u port is used by\n"
+					"this virtual port\n",
+					__func__, port_id);
+			else
+				dev_dbg(codec->dev, "%s: TX%u port is not used by\n"
+					"this virtual port\n",
+					__func__, port_id);
+			/* avoid update power function */
+			mutex_unlock(&tavil_p->codec_mutex);
+			return 0;
+		}
+		break;
+	case AIF4_MAD_TX:
+		break;
+	default:
+		dev_err(codec->dev, "Unknown AIF %d\n", dai_id);
+		mutex_unlock(&tavil_p->codec_mutex);
+		return -EINVAL;
+	}
+	dev_dbg(codec->dev, "%s: name %s sname %s updated value %u shift %d\n",
+		__func__, widget->name, widget->sname, tavil_p->tx_port_value,
+		widget->shift);
+
+	mutex_unlock(&tavil_p->codec_mutex);
+	snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, enable, update);
+
+	return 0;
+}
+
+static int slim_rx_mux_get(struct snd_kcontrol *kcontrol,
+			   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
+	struct tavil_priv *tavil_p = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.enumerated.item[0] =
+				tavil_p->rx_port_value[widget->shift];
+	return 0;
+}
+
+static int slim_rx_mux_put(struct snd_kcontrol *kcontrol,
+			   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
+	struct tavil_priv *tavil_p = snd_soc_codec_get_drvdata(codec);
+	struct wcd9xxx *core = dev_get_drvdata(codec->dev->parent);
+	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+	struct snd_soc_dapm_update *update = NULL;
+	unsigned int rx_port_value;
+	u32 port_id = widget->shift;
+
+	tavil_p->rx_port_value[port_id] = ucontrol->value.enumerated.item[0];
+	rx_port_value = tavil_p->rx_port_value[port_id];
+
+	mutex_lock(&tavil_p->codec_mutex);
+	dev_dbg(codec->dev, "%s: wname %s cname %s value %u shift %d item %ld\n",
+		__func__, widget->name, ucontrol->id.name,
+		rx_port_value, widget->shift,
+		ucontrol->value.integer.value[0]);
+
+	/* value need to match the Virtual port and AIF number */
+	switch (rx_port_value) {
+	case 0:
+		list_del_init(&core->rx_chs[port_id].list);
+		break;
+	case 1:
+		if (wcd9xxx_rx_vport_validation(port_id +
+			WCD934X_RX_PORT_START_NUMBER,
+			&tavil_p->dai[AIF1_PB].wcd9xxx_ch_list)) {
+			dev_dbg(codec->dev, "%s: RX%u is used by current requesting AIF_PB itself\n",
+				__func__, port_id);
+			goto rtn;
+		}
+		list_add_tail(&core->rx_chs[port_id].list,
+			      &tavil_p->dai[AIF1_PB].wcd9xxx_ch_list);
+		break;
+	case 2:
+		if (wcd9xxx_rx_vport_validation(port_id +
+			WCD934X_RX_PORT_START_NUMBER,
+			&tavil_p->dai[AIF2_PB].wcd9xxx_ch_list)) {
+			dev_dbg(codec->dev, "%s: RX%u is used by current requesting AIF_PB itself\n",
+				__func__, port_id);
+			goto rtn;
+		}
+		list_add_tail(&core->rx_chs[port_id].list,
+			      &tavil_p->dai[AIF2_PB].wcd9xxx_ch_list);
+		break;
+	case 3:
+		if (wcd9xxx_rx_vport_validation(port_id +
+			WCD934X_RX_PORT_START_NUMBER,
+			&tavil_p->dai[AIF3_PB].wcd9xxx_ch_list)) {
+			dev_dbg(codec->dev, "%s: RX%u is used by current requesting AIF_PB itself\n",
+				__func__, port_id);
+			goto rtn;
+		}
+		list_add_tail(&core->rx_chs[port_id].list,
+			      &tavil_p->dai[AIF3_PB].wcd9xxx_ch_list);
+		break;
+	case 4:
+		if (wcd9xxx_rx_vport_validation(port_id +
+			WCD934X_RX_PORT_START_NUMBER,
+			&tavil_p->dai[AIF4_PB].wcd9xxx_ch_list)) {
+			dev_dbg(codec->dev, "%s: RX%u is used by current requesting AIF_PB itself\n",
+				__func__, port_id);
+			goto rtn;
+		}
+		list_add_tail(&core->rx_chs[port_id].list,
+			      &tavil_p->dai[AIF4_PB].wcd9xxx_ch_list);
+		break;
+	default:
+		dev_err(codec->dev, "Unknown AIF %d\n", rx_port_value);
+		goto err;
+	}
+rtn:
+	mutex_unlock(&tavil_p->codec_mutex);
+	snd_soc_dapm_mux_update_power(widget->dapm, kcontrol,
+				      rx_port_value, e, update);
+
+	return 0;
+err:
+	mutex_unlock(&tavil_p->codec_mutex);
+	return -EINVAL;
+}
+
+static void tavil_codec_enable_slim_port_intr(
+					struct wcd9xxx_codec_dai_data *dai,
+					struct snd_soc_codec *codec)
+{
+	struct wcd9xxx_ch *ch;
+	int port_num = 0;
+	unsigned short reg = 0;
+	u8 val = 0;
+	struct tavil_priv *tavil_p;
+
+	if (!dai || !codec) {
+		pr_err("%s: Invalid params\n", __func__);
+		return;
+	}
+
+	tavil_p = snd_soc_codec_get_drvdata(codec);
+	list_for_each_entry(ch, &dai->wcd9xxx_ch_list, list) {
+		if (ch->port >= WCD934X_RX_PORT_START_NUMBER) {
+			port_num = ch->port - WCD934X_RX_PORT_START_NUMBER;
+			reg = WCD934X_SLIM_PGD_PORT_INT_RX_EN0 + (port_num / 8);
+			val = wcd9xxx_interface_reg_read(tavil_p->wcd9xxx,
+				reg);
+			if (!(val & BYTE_BIT_MASK(port_num))) {
+				val |= BYTE_BIT_MASK(port_num);
+				wcd9xxx_interface_reg_write(
+					tavil_p->wcd9xxx, reg, val);
+				val = wcd9xxx_interface_reg_read(
+					tavil_p->wcd9xxx, reg);
+			}
+		} else {
+			port_num = ch->port;
+			reg = WCD934X_SLIM_PGD_PORT_INT_TX_EN0 + (port_num / 8);
+			val = wcd9xxx_interface_reg_read(tavil_p->wcd9xxx,
+				reg);
+			if (!(val & BYTE_BIT_MASK(port_num))) {
+				val |= BYTE_BIT_MASK(port_num);
+				wcd9xxx_interface_reg_write(tavil_p->wcd9xxx,
+					reg, val);
+				val = wcd9xxx_interface_reg_read(
+					tavil_p->wcd9xxx, reg);
+			}
+		}
+	}
+}
+
+static int tavil_codec_enable_slim_chmask(struct wcd9xxx_codec_dai_data *dai,
+					  bool up)
+{
+	int ret = 0;
+	struct wcd9xxx_ch *ch;
+
+	if (up) {
+		list_for_each_entry(ch, &dai->wcd9xxx_ch_list, list) {
+			ret = wcd9xxx_get_slave_port(ch->ch_num);
+			if (ret < 0) {
+				pr_err("%s: Invalid slave port ID: %d\n",
+				       __func__, ret);
+				ret = -EINVAL;
+			} else {
+				set_bit(ret, &dai->ch_mask);
+			}
+		}
+	} else {
+		ret = wait_event_timeout(dai->dai_wait, (dai->ch_mask == 0),
+					 msecs_to_jiffies(
+						WCD934X_SLIM_CLOSE_TIMEOUT));
+		if (!ret) {
+			pr_err("%s: Slim close tx/rx wait timeout, ch_mask:0x%lx\n",
+				__func__, dai->ch_mask);
+			ret = -ETIMEDOUT;
+		} else {
+			ret = 0;
+		}
+	}
+	return ret;
+}
+
+static void tavil_codec_mute_dsd(struct snd_soc_codec *codec,
+				 struct list_head *ch_list)
+{
+	u8 dsd0_in;
+	u8 dsd1_in;
+	struct wcd9xxx_ch *ch;
+
+	/* Read DSD Input Ports */
+	dsd0_in = (snd_soc_read(codec, WCD934X_CDC_DSD0_CFG0) & 0x3C) >> 2;
+	dsd1_in = (snd_soc_read(codec, WCD934X_CDC_DSD1_CFG0) & 0x3C) >> 2;
+
+	if ((dsd0_in == 0) && (dsd1_in == 0))
+		return;
+
+	/*
+	 * Check if the ports getting disabled are connected to DSD inputs.
+	 * If connected, enable DSD mute to avoid DC entering into DSD Filter
+	 */
+	list_for_each_entry(ch, ch_list, list) {
+		if (ch->port == (dsd0_in + WCD934X_RX_PORT_START_NUMBER - 1))
+			snd_soc_update_bits(codec, WCD934X_CDC_DSD0_CFG2,
+					    0x04, 0x04);
+		if (ch->port == (dsd1_in + WCD934X_RX_PORT_START_NUMBER - 1))
+			snd_soc_update_bits(codec, WCD934X_CDC_DSD1_CFG2,
+					    0x04, 0x04);
+	}
+}
+
+static int tavil_codec_enable_slimrx(struct snd_soc_dapm_widget *w,
+				     struct snd_kcontrol *kcontrol,
+				     int event)
+{
+	struct wcd9xxx *core;
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tavil_priv *tavil_p = snd_soc_codec_get_drvdata(codec);
+	int ret = 0;
+	struct wcd9xxx_codec_dai_data *dai;
+	struct tavil_dsd_config *dsd_conf = tavil_p->dsd_config;
+
+	core = dev_get_drvdata(codec->dev->parent);
+
+	dev_dbg(codec->dev, "%s: event called! codec name %s num_dai %d\n"
+		"stream name %s event %d\n",
+		__func__, codec->component.name,
+		codec->component.num_dai, w->sname, event);
+
+	dai = &tavil_p->dai[w->shift];
+	dev_dbg(codec->dev, "%s: w->name %s w->shift %d event %d\n",
+		 __func__, w->name, w->shift, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		dai->bus_down_in_recovery = false;
+		tavil_codec_enable_slim_port_intr(dai, codec);
+		(void) tavil_codec_enable_slim_chmask(dai, true);
+		ret = wcd9xxx_cfg_slim_sch_rx(core, &dai->wcd9xxx_ch_list,
+					      dai->rate, dai->bit_width,
+					      &dai->grph);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		if (dsd_conf)
+			tavil_codec_mute_dsd(codec, &dai->wcd9xxx_ch_list);
+
+		ret = wcd9xxx_disconnect_port(core, &dai->wcd9xxx_ch_list,
+					      dai->grph);
+		dev_dbg(codec->dev, "%s: Disconnect RX port, ret = %d\n",
+			__func__, ret);
+
+		if (!dai->bus_down_in_recovery)
+			ret = tavil_codec_enable_slim_chmask(dai, false);
+		else
+			dev_dbg(codec->dev,
+				"%s: bus in recovery skip enable slim_chmask",
+				__func__);
+		ret = wcd9xxx_close_slim_sch_rx(core, &dai->wcd9xxx_ch_list,
+						dai->grph);
+		break;
+	}
+	return ret;
+}
+
+static int tavil_codec_enable_slimtx(struct snd_soc_dapm_widget *w,
+				     struct snd_kcontrol *kcontrol,
+				     int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tavil_priv *tavil_p = snd_soc_codec_get_drvdata(codec);
+	struct wcd9xxx_codec_dai_data *dai;
+	struct wcd9xxx *core;
+	int ret = 0;
+
+	dev_dbg(codec->dev,
+		"%s: w->name %s, w->shift = %d, num_dai %d stream name %s\n",
+		__func__, w->name, w->shift,
+		codec->component.num_dai, w->sname);
+
+	dai = &tavil_p->dai[w->shift];
+	core = dev_get_drvdata(codec->dev->parent);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		dai->bus_down_in_recovery = false;
+		tavil_codec_enable_slim_port_intr(dai, codec);
+		(void) tavil_codec_enable_slim_chmask(dai, true);
+		ret = wcd9xxx_cfg_slim_sch_tx(core, &dai->wcd9xxx_ch_list,
+					      dai->rate, dai->bit_width,
+					      &dai->grph);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		ret = wcd9xxx_close_slim_sch_tx(core, &dai->wcd9xxx_ch_list,
+						dai->grph);
+		if (!dai->bus_down_in_recovery)
+			ret = tavil_codec_enable_slim_chmask(dai, false);
+		if (ret < 0) {
+			ret = wcd9xxx_disconnect_port(core,
+						      &dai->wcd9xxx_ch_list,
+						      dai->grph);
+			dev_dbg(codec->dev, "%s: Disconnect RX port, ret = %d\n",
+				 __func__, ret);
+		}
+		break;
+	}
+	return ret;
+}
+
+static int tavil_codec_enable_slimvi_feedback(struct snd_soc_dapm_widget *w,
+					      struct snd_kcontrol *kcontrol,
+					      int event)
+{
+	struct wcd9xxx *core = NULL;
+	struct snd_soc_codec *codec = NULL;
+	struct tavil_priv *tavil_p = NULL;
+	int ret = 0;
+	struct wcd9xxx_codec_dai_data *dai = NULL;
+
+	codec = snd_soc_dapm_to_codec(w->dapm);
+	tavil_p = snd_soc_codec_get_drvdata(codec);
+	core = dev_get_drvdata(codec->dev->parent);
+
+	dev_dbg(codec->dev,
+		"%s: num_dai %d stream name %s w->name %s event %d shift %d\n",
+		__func__, codec->component.num_dai, w->sname,
+		w->name, event, w->shift);
+
+	if (w->shift != AIF4_VIFEED) {
+		pr_err("%s Error in enabling the tx path\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+	dai = &tavil_p->dai[w->shift];
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		if (test_bit(VI_SENSE_1, &tavil_p->status_mask)) {
+			dev_dbg(codec->dev, "%s: spkr1 enabled\n", __func__);
+			/* Enable V&I sensing */
+			snd_soc_update_bits(codec,
+				WCD934X_CDC_TX9_SPKR_PROT_PATH_CTL, 0x20, 0x20);
+			snd_soc_update_bits(codec,
+				WCD934X_CDC_TX10_SPKR_PROT_PATH_CTL, 0x20,
+				0x20);
+			snd_soc_update_bits(codec,
+				WCD934X_CDC_TX9_SPKR_PROT_PATH_CTL, 0x0F, 0x00);
+			snd_soc_update_bits(codec,
+				WCD934X_CDC_TX10_SPKR_PROT_PATH_CTL, 0x0F,
+				0x00);
+			snd_soc_update_bits(codec,
+				WCD934X_CDC_TX9_SPKR_PROT_PATH_CTL, 0x10, 0x10);
+			snd_soc_update_bits(codec,
+				WCD934X_CDC_TX10_SPKR_PROT_PATH_CTL, 0x10,
+				0x10);
+			snd_soc_update_bits(codec,
+				WCD934X_CDC_TX9_SPKR_PROT_PATH_CTL, 0x20, 0x00);
+			snd_soc_update_bits(codec,
+				WCD934X_CDC_TX10_SPKR_PROT_PATH_CTL, 0x20,
+				0x00);
+		}
+		if (test_bit(VI_SENSE_2, &tavil_p->status_mask)) {
+			pr_debug("%s: spkr2 enabled\n", __func__);
+			/* Enable V&I sensing */
+			snd_soc_update_bits(codec,
+				WCD934X_CDC_TX11_SPKR_PROT_PATH_CTL, 0x20,
+				0x20);
+			snd_soc_update_bits(codec,
+				WCD934X_CDC_TX12_SPKR_PROT_PATH_CTL, 0x20,
+				0x20);
+			snd_soc_update_bits(codec,
+				WCD934X_CDC_TX11_SPKR_PROT_PATH_CTL, 0x0F,
+				0x00);
+			snd_soc_update_bits(codec,
+				WCD934X_CDC_TX12_SPKR_PROT_PATH_CTL, 0x0F,
+				0x00);
+			snd_soc_update_bits(codec,
+				WCD934X_CDC_TX11_SPKR_PROT_PATH_CTL, 0x10,
+				0x10);
+			snd_soc_update_bits(codec,
+				WCD934X_CDC_TX12_SPKR_PROT_PATH_CTL, 0x10,
+				0x10);
+			snd_soc_update_bits(codec,
+				WCD934X_CDC_TX11_SPKR_PROT_PATH_CTL, 0x20,
+				0x00);
+			snd_soc_update_bits(codec,
+				WCD934X_CDC_TX12_SPKR_PROT_PATH_CTL, 0x20,
+				0x00);
+		}
+		dai->bus_down_in_recovery = false;
+		tavil_codec_enable_slim_port_intr(dai, codec);
+		(void) tavil_codec_enable_slim_chmask(dai, true);
+		ret = wcd9xxx_cfg_slim_sch_tx(core, &dai->wcd9xxx_ch_list,
+					      dai->rate, dai->bit_width,
+					      &dai->grph);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		ret = wcd9xxx_close_slim_sch_tx(core, &dai->wcd9xxx_ch_list,
+						dai->grph);
+		if (ret)
+			dev_err(codec->dev, "%s error in close_slim_sch_tx %d\n",
+				__func__, ret);
+		if (!dai->bus_down_in_recovery)
+			ret = tavil_codec_enable_slim_chmask(dai, false);
+		if (ret < 0) {
+			ret = wcd9xxx_disconnect_port(core,
+				&dai->wcd9xxx_ch_list,
+				dai->grph);
+			dev_dbg(codec->dev, "%s: Disconnect TX port, ret = %d\n",
+				__func__, ret);
+		}
+		if (test_bit(VI_SENSE_1, &tavil_p->status_mask)) {
+			/* Disable V&I sensing */
+			dev_dbg(codec->dev, "%s: spkr1 disabled\n", __func__);
+			snd_soc_update_bits(codec,
+				WCD934X_CDC_TX9_SPKR_PROT_PATH_CTL, 0x20, 0x20);
+			snd_soc_update_bits(codec,
+				WCD934X_CDC_TX10_SPKR_PROT_PATH_CTL, 0x20,
+				0x20);
+			snd_soc_update_bits(codec,
+				WCD934X_CDC_TX9_SPKR_PROT_PATH_CTL, 0x10, 0x00);
+			snd_soc_update_bits(codec,
+				WCD934X_CDC_TX10_SPKR_PROT_PATH_CTL, 0x10,
+				0x00);
+		}
+		if (test_bit(VI_SENSE_2, &tavil_p->status_mask)) {
+			/* Disable V&I sensing */
+			dev_dbg(codec->dev, "%s: spkr2 disabled\n", __func__);
+			snd_soc_update_bits(codec,
+				WCD934X_CDC_TX11_SPKR_PROT_PATH_CTL, 0x20,
+				0x20);
+			snd_soc_update_bits(codec,
+				WCD934X_CDC_TX12_SPKR_PROT_PATH_CTL, 0x20,
+				0x20);
+			snd_soc_update_bits(codec,
+				WCD934X_CDC_TX11_SPKR_PROT_PATH_CTL, 0x10,
+				0x00);
+			snd_soc_update_bits(codec,
+				WCD934X_CDC_TX12_SPKR_PROT_PATH_CTL, 0x10,
+				0x00);
+		}
+		break;
+	}
+done:
+	return ret;
+}
+
+static int tavil_codec_enable_rx_bias(struct snd_soc_dapm_widget *w,
+				      struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		tavil->rx_bias_count++;
+		if (tavil->rx_bias_count == 1) {
+			snd_soc_update_bits(codec, WCD934X_ANA_RX_SUPPLIES,
+					    0x01, 0x01);
+		}
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		tavil->rx_bias_count--;
+		if (!tavil->rx_bias_count)
+			snd_soc_update_bits(codec, WCD934X_ANA_RX_SUPPLIES,
+					    0x01, 0x00);
+		break;
+	};
+	dev_dbg(codec->dev, "%s: Current RX BIAS user count: %d\n", __func__,
+		tavil->rx_bias_count);
+
+	return 0;
+}
+
+static void tavil_spk_anc_update_callback(struct work_struct *work)
+{
+	struct spk_anc_work *spk_anc_dwork;
+	struct tavil_priv *tavil;
+	struct delayed_work *delayed_work;
+	struct snd_soc_codec *codec;
+
+	delayed_work = to_delayed_work(work);
+	spk_anc_dwork = container_of(delayed_work, struct spk_anc_work, dwork);
+	tavil = spk_anc_dwork->tavil;
+	codec = tavil->codec;
+
+	snd_soc_update_bits(codec, WCD934X_CDC_RX7_RX_PATH_CFG0, 0x10, 0x10);
+}
+
+static int tavil_codec_enable_spkr_anc(struct snd_soc_dapm_widget *w,
+				     struct snd_kcontrol *kcontrol,
+				     int event)
+{
+	int ret = 0;
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+
+	if (!tavil->anc_func)
+		return 0;
+
+	dev_dbg(codec->dev, "%s: w: %s event: %d anc: %d\n", __func__,
+		w->name, event, tavil->anc_func);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		ret = tavil_codec_enable_anc(w, kcontrol, event);
+		schedule_delayed_work(&tavil->spk_anc_dwork.dwork,
+				      msecs_to_jiffies(spk_anc_en_delay));
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		cancel_delayed_work_sync(&tavil->spk_anc_dwork.dwork);
+		snd_soc_update_bits(codec, WCD934X_CDC_RX7_RX_PATH_CFG0,
+				    0x10, 0x00);
+		ret = tavil_codec_enable_anc(w, kcontrol, event);
+		break;
+	}
+	return ret;
+}
+
+static int tavil_codec_enable_ear_pa(struct snd_soc_dapm_widget *w,
+				     struct snd_kcontrol *kcontrol,
+				     int event)
+{
+	int ret = 0;
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		/*
+		 * 5ms sleep is required after PA is enabled as per
+		 * HW requirement
+		 */
+		usleep_range(5000, 5500);
+		snd_soc_update_bits(codec, WCD934X_CDC_RX0_RX_PATH_CTL,
+				    0x10, 0x00);
+		/* Remove mix path mute if it is enabled */
+		if ((snd_soc_read(codec, WCD934X_CDC_RX0_RX_PATH_MIX_CTL)) &
+		     0x10)
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_RX0_RX_PATH_MIX_CTL,
+					    0x10, 0x00);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		/*
+		 * 5ms sleep is required after PA is disabled as per
+		 * HW requirement
+		 */
+		usleep_range(5000, 5500);
+
+		if (!(strcmp(w->name, "ANC EAR PA"))) {
+			ret = tavil_codec_enable_anc(w, kcontrol, event);
+			snd_soc_update_bits(codec, WCD934X_CDC_RX0_RX_PATH_CFG0,
+					    0x10, 0x00);
+		}
+		break;
+	};
+
+	return ret;
+}
+
+static void tavil_codec_override(struct snd_soc_codec *codec, int mode,
+				 int event)
+{
+	if (mode == CLS_AB || mode == CLS_AB_HIFI) {
+		switch (event) {
+		case SND_SOC_DAPM_PRE_PMU:
+		case SND_SOC_DAPM_POST_PMU:
+			snd_soc_update_bits(codec,
+				WCD9XXX_A_ANA_RX_SUPPLIES, 0x02, 0x02);
+		break;
+		case SND_SOC_DAPM_POST_PMD:
+			snd_soc_update_bits(codec,
+				WCD9XXX_A_ANA_RX_SUPPLIES, 0x02, 0x00);
+		break;
+		}
+	}
+}
+
+static void tavil_codec_clear_anc_tx_hold(struct tavil_priv *tavil)
+{
+	if (test_and_clear_bit(ANC_MIC_AMIC1, &tavil->status_mask))
+		tavil_codec_set_tx_hold(tavil->codec, WCD934X_ANA_AMIC1, false);
+	if (test_and_clear_bit(ANC_MIC_AMIC2, &tavil->status_mask))
+		tavil_codec_set_tx_hold(tavil->codec, WCD934X_ANA_AMIC2, false);
+	if (test_and_clear_bit(ANC_MIC_AMIC3, &tavil->status_mask))
+		tavil_codec_set_tx_hold(tavil->codec, WCD934X_ANA_AMIC3, false);
+	if (test_and_clear_bit(ANC_MIC_AMIC4, &tavil->status_mask))
+		tavil_codec_set_tx_hold(tavil->codec, WCD934X_ANA_AMIC4, false);
+}
+
+static int tavil_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
+				      struct snd_kcontrol *kcontrol,
+				      int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	struct tavil_dsd_config *dsd_conf = tavil->dsd_config;
+	int ret = 0;
+
+	dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		if (TAVIL_IS_1_0(tavil->wcd9xxx))
+			snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
+					    0x06, (0x03 << 1));
+
+		if ((!(strcmp(w->name, "ANC HPHR PA"))) &&
+		    (test_bit(HPH_PA_DELAY, &tavil->status_mask)))
+			snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0xC0, 0xC0);
+
+		set_bit(HPH_PA_DELAY, &tavil->status_mask);
+		if (dsd_conf &&
+		    (snd_soc_read(codec, WCD934X_CDC_DSD1_PATH_CTL) & 0x01)) {
+			/* Set regulator mode to AB if DSD is enabled */
+			snd_soc_update_bits(codec, WCD934X_ANA_RX_SUPPLIES,
+					    0x02, 0x02);
+		}
+		if (!(strcmp(w->name, "HPHR PA")))
+			snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x40, 0x40);
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		if ((!(strcmp(w->name, "ANC HPHR PA")))) {
+			if ((snd_soc_read(codec, WCD934X_ANA_HPH) & 0xC0)
+					!= 0xC0)
+				/*
+				 * If PA_EN is not set (potentially in ANC case)
+				 * then do nothing for POST_PMU and let left
+				 * channel handle everything.
+				 */
+				break;
+		}
+		/*
+		 * 7ms sleep is required after PA is enabled as per
+		 * HW requirement. If compander is disabled, then
+		 * 20ms delay is needed.
+		 */
+		if (test_bit(HPH_PA_DELAY, &tavil->status_mask)) {
+			if (!tavil->comp_enabled[COMPANDER_2])
+				usleep_range(20000, 20100);
+			else
+				usleep_range(7000, 7100);
+			clear_bit(HPH_PA_DELAY, &tavil->status_mask);
+		}
+		if (tavil->anc_func) {
+			/* Clear Tx FE HOLD if both PAs are enabled */
+			if ((snd_soc_read(tavil->codec, WCD934X_ANA_HPH) &
+					0xC0) == 0xC0)
+				tavil_codec_clear_anc_tx_hold(tavil);
+		}
+
+		snd_soc_update_bits(codec, WCD934X_HPH_R_TEST, 0x01, 0x01);
+
+		/* Remove mute */
+		snd_soc_update_bits(codec, WCD934X_CDC_RX2_RX_PATH_CTL,
+				    0x10, 0x00);
+		/* Enable GM3 boost */
+		snd_soc_update_bits(codec, WCD934X_HPH_CNP_WG_CTL,
+				    0x80, 0x80);
+		/* Enable AutoChop timer at the end of power up */
+		snd_soc_update_bits(codec, WCD934X_HPH_NEW_INT_HPH_TIMER1,
+				    0x02, 0x02);
+		/* Remove mix path mute if it is enabled */
+		if ((snd_soc_read(codec, WCD934X_CDC_RX2_RX_PATH_MIX_CTL)) &
+				  0x10)
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_RX2_RX_PATH_MIX_CTL,
+					    0x10, 0x00);
+		if (dsd_conf &&
+		    (snd_soc_read(codec, WCD934X_CDC_DSD1_PATH_CTL) & 0x01))
+			snd_soc_update_bits(codec, WCD934X_CDC_DSD1_CFG2,
+					    0x04, 0x00);
+		if (!(strcmp(w->name, "ANC HPHR PA"))) {
+			pr_debug("%s:Do everything needed for left channel\n",
+				__func__);
+			/* Do everything needed for left channel */
+			snd_soc_update_bits(codec, WCD934X_HPH_L_TEST,
+					    0x01, 0x01);
+
+			/* Remove mute */
+			snd_soc_update_bits(codec, WCD934X_CDC_RX1_RX_PATH_CTL,
+					    0x10, 0x00);
+
+			/* Remove mix path mute if it is enabled */
+			if ((snd_soc_read(codec,
+					WCD934X_CDC_RX1_RX_PATH_MIX_CTL)) &
+					0x10)
+				snd_soc_update_bits(codec,
+					WCD934X_CDC_RX1_RX_PATH_MIX_CTL,
+					0x10, 0x00);
+
+			if (dsd_conf && (snd_soc_read(codec,
+						WCD934X_CDC_DSD0_PATH_CTL) &
+						0x01))
+				snd_soc_update_bits(codec,
+						    WCD934X_CDC_DSD0_CFG2,
+						    0x04, 0x00);
+			/* Remove ANC Rx from reset */
+			ret = tavil_codec_enable_anc(w, kcontrol, event);
+		}
+		tavil_codec_override(codec, tavil->hph_mode, event);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		blocking_notifier_call_chain(&tavil->mbhc->notifier,
+					     WCD_EVENT_PRE_HPHR_PA_OFF,
+					     &tavil->mbhc->wcd_mbhc);
+		/* Enable DSD Mute before PA disable */
+		if (dsd_conf &&
+		    (snd_soc_read(codec, WCD934X_CDC_DSD1_PATH_CTL) & 0x01))
+			snd_soc_update_bits(codec, WCD934X_CDC_DSD1_CFG2,
+					    0x04, 0x04);
+		snd_soc_update_bits(codec, WCD934X_HPH_R_TEST, 0x01, 0x00);
+		snd_soc_update_bits(codec, WCD934X_CDC_RX2_RX_PATH_CTL,
+				    0x10, 0x10);
+		snd_soc_update_bits(codec, WCD934X_CDC_RX2_RX_PATH_MIX_CTL,
+				    0x10, 0x10);
+		if (!(strcmp(w->name, "ANC HPHR PA")))
+			snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x40, 0x00);
+		if (!(strcmp(w->name, "HPHR PA")))
+			snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x40, 0x00);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		/*
+		 * 5ms sleep is required after PA disable. If compander is
+		 * disabled, then 20ms delay is needed after PA disable.
+		 */
+		if (!tavil->comp_enabled[COMPANDER_2])
+			usleep_range(20000, 20100);
+		else
+			usleep_range(5000, 5100);
+		tavil_codec_override(codec, tavil->hph_mode, event);
+		blocking_notifier_call_chain(&tavil->mbhc->notifier,
+					     WCD_EVENT_POST_HPHR_PA_OFF,
+					     &tavil->mbhc->wcd_mbhc);
+		if (TAVIL_IS_1_0(tavil->wcd9xxx))
+			snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
+					    0x06, 0x0);
+		if (!(strcmp(w->name, "ANC HPHR PA"))) {
+			ret = tavil_codec_enable_anc(w, kcontrol, event);
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_RX2_RX_PATH_CFG0,
+					    0x10, 0x00);
+		}
+		break;
+	};
+
+	return ret;
+}
+
+static int tavil_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
+				      struct snd_kcontrol *kcontrol,
+				      int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	struct tavil_dsd_config *dsd_conf = tavil->dsd_config;
+	int ret = 0;
+
+	dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		if (TAVIL_IS_1_0(tavil->wcd9xxx))
+			snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
+					    0x06, (0x03 << 1));
+		if ((!(strcmp(w->name, "ANC HPHL PA"))) &&
+		    (test_bit(HPH_PA_DELAY, &tavil->status_mask)))
+			snd_soc_update_bits(codec, WCD934X_ANA_HPH,
+					    0xC0, 0xC0);
+		if (!(strcmp(w->name, "HPHL PA")))
+			snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x80, 0x80);
+		set_bit(HPH_PA_DELAY, &tavil->status_mask);
+		if (dsd_conf &&
+		    (snd_soc_read(codec, WCD934X_CDC_DSD0_PATH_CTL) & 0x01)) {
+			/* Set regulator mode to AB if DSD is enabled */
+			snd_soc_update_bits(codec, WCD934X_ANA_RX_SUPPLIES,
+					    0x02, 0x02);
+		}
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		if (!(strcmp(w->name, "ANC HPHL PA"))) {
+			if ((snd_soc_read(codec, WCD934X_ANA_HPH) & 0xC0)
+								!= 0xC0)
+				/*
+				 * If PA_EN is not set (potentially in ANC
+				 * case) then do nothing for POST_PMU and
+				 * let right channel handle everything.
+				 */
+				break;
+		}
+		/*
+		 * 7ms sleep is required after PA is enabled as per
+		 * HW requirement. If compander is disabled, then
+		 * 20ms delay is needed.
+		 */
+		if (test_bit(HPH_PA_DELAY, &tavil->status_mask)) {
+			if (!tavil->comp_enabled[COMPANDER_1])
+				usleep_range(20000, 20100);
+			else
+				usleep_range(7000, 7100);
+			clear_bit(HPH_PA_DELAY, &tavil->status_mask);
+		}
+		if (tavil->anc_func) {
+			/* Clear Tx FE HOLD if both PAs are enabled */
+			if ((snd_soc_read(tavil->codec, WCD934X_ANA_HPH) &
+					0xC0) == 0xC0)
+				tavil_codec_clear_anc_tx_hold(tavil);
+		}
+
+		snd_soc_update_bits(codec, WCD934X_HPH_L_TEST, 0x01, 0x01);
+		/* Remove Mute on primary path */
+		snd_soc_update_bits(codec, WCD934X_CDC_RX1_RX_PATH_CTL,
+				    0x10, 0x00);
+		/* Enable GM3 boost */
+		snd_soc_update_bits(codec, WCD934X_HPH_CNP_WG_CTL,
+				    0x80, 0x80);
+		/* Enable AutoChop timer at the end of power up */
+		snd_soc_update_bits(codec, WCD934X_HPH_NEW_INT_HPH_TIMER1,
+				    0x02, 0x02);
+		/* Remove mix path mute if it is enabled */
+		if ((snd_soc_read(codec, WCD934X_CDC_RX1_RX_PATH_MIX_CTL)) &
+				  0x10)
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_RX1_RX_PATH_MIX_CTL,
+					    0x10, 0x00);
+		if (dsd_conf &&
+		    (snd_soc_read(codec, WCD934X_CDC_DSD0_PATH_CTL) & 0x01))
+			snd_soc_update_bits(codec, WCD934X_CDC_DSD0_CFG2,
+					    0x04, 0x00);
+		if (!(strcmp(w->name, "ANC HPHL PA"))) {
+			pr_debug("%s:Do everything needed for right channel\n",
+				__func__);
+
+			/* Do everything needed for right channel */
+			snd_soc_update_bits(codec, WCD934X_HPH_R_TEST,
+					    0x01, 0x01);
+
+			/* Remove mute */
+			snd_soc_update_bits(codec, WCD934X_CDC_RX2_RX_PATH_CTL,
+						0x10, 0x00);
+
+			/* Remove mix path mute if it is enabled */
+			if ((snd_soc_read(codec,
+					WCD934X_CDC_RX2_RX_PATH_MIX_CTL)) &
+					0x10)
+				snd_soc_update_bits(codec,
+						WCD934X_CDC_RX2_RX_PATH_MIX_CTL,
+						0x10, 0x00);
+			if (dsd_conf && (snd_soc_read(codec,
+					WCD934X_CDC_DSD1_PATH_CTL) & 0x01))
+				snd_soc_update_bits(codec,
+						    WCD934X_CDC_DSD1_CFG2,
+						    0x04, 0x00);
+			/* Remove ANC Rx from reset */
+			ret = tavil_codec_enable_anc(w, kcontrol, event);
+		}
+		tavil_codec_override(codec, tavil->hph_mode, event);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		blocking_notifier_call_chain(&tavil->mbhc->notifier,
+					     WCD_EVENT_PRE_HPHL_PA_OFF,
+					     &tavil->mbhc->wcd_mbhc);
+		/* Enable DSD Mute before PA disable */
+		if (dsd_conf &&
+		    (snd_soc_read(codec, WCD934X_CDC_DSD0_PATH_CTL) & 0x01))
+			snd_soc_update_bits(codec, WCD934X_CDC_DSD0_CFG2,
+					    0x04, 0x04);
+
+		snd_soc_update_bits(codec, WCD934X_HPH_L_TEST, 0x01, 0x00);
+		snd_soc_update_bits(codec, WCD934X_CDC_RX1_RX_PATH_CTL,
+				    0x10, 0x10);
+		snd_soc_update_bits(codec, WCD934X_CDC_RX1_RX_PATH_MIX_CTL,
+				    0x10, 0x10);
+		if (!(strcmp(w->name, "ANC HPHL PA")))
+			snd_soc_update_bits(codec, WCD934X_ANA_HPH,
+					    0x80, 0x00);
+		if (!(strcmp(w->name, "HPHL PA")))
+			snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x80, 0x00);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		/*
+		 * 5ms sleep is required after PA disable. If compander is
+		 * disabled, then 20ms delay is needed after PA disable.
+		 */
+		if (!tavil->comp_enabled[COMPANDER_1])
+			usleep_range(20000, 20100);
+		else
+			usleep_range(5000, 5100);
+		tavil_codec_override(codec, tavil->hph_mode, event);
+		blocking_notifier_call_chain(&tavil->mbhc->notifier,
+					     WCD_EVENT_POST_HPHL_PA_OFF,
+					     &tavil->mbhc->wcd_mbhc);
+		if (TAVIL_IS_1_0(tavil->wcd9xxx))
+			snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
+					    0x06, 0x0);
+		if (!(strcmp(w->name, "ANC HPHL PA"))) {
+			ret = tavil_codec_enable_anc(w, kcontrol, event);
+			snd_soc_update_bits(codec,
+				WCD934X_CDC_RX1_RX_PATH_CFG0, 0x10, 0x00);
+		}
+		break;
+	};
+
+	return ret;
+}
+
+static int tavil_codec_enable_lineout_pa(struct snd_soc_dapm_widget *w,
+					 struct snd_kcontrol *kcontrol,
+					 int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	u16 lineout_vol_reg = 0, lineout_mix_vol_reg = 0;
+	u16 dsd_mute_reg = 0, dsd_clk_reg = 0;
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	struct tavil_dsd_config *dsd_conf = tavil->dsd_config;
+
+	dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
+
+	if (w->reg == WCD934X_ANA_LO_1_2) {
+		if (w->shift == 7) {
+			lineout_vol_reg = WCD934X_CDC_RX3_RX_PATH_CTL;
+			lineout_mix_vol_reg = WCD934X_CDC_RX3_RX_PATH_MIX_CTL;
+			dsd_mute_reg = WCD934X_CDC_DSD0_CFG2;
+			dsd_clk_reg = WCD934X_CDC_DSD0_PATH_CTL;
+		} else if (w->shift == 6) {
+			lineout_vol_reg = WCD934X_CDC_RX4_RX_PATH_CTL;
+			lineout_mix_vol_reg = WCD934X_CDC_RX4_RX_PATH_MIX_CTL;
+			dsd_mute_reg = WCD934X_CDC_DSD1_CFG2;
+			dsd_clk_reg = WCD934X_CDC_DSD1_PATH_CTL;
+		}
+	} else {
+		dev_err(codec->dev, "%s: Error enabling lineout PA\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		tavil_codec_override(codec, CLS_AB, event);
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		/*
+		 * 5ms sleep is required after PA is enabled as per
+		 * HW requirement
+		 */
+		usleep_range(5000, 5500);
+		snd_soc_update_bits(codec, lineout_vol_reg,
+				    0x10, 0x00);
+		/* Remove mix path mute if it is enabled */
+		if ((snd_soc_read(codec, lineout_mix_vol_reg)) & 0x10)
+			snd_soc_update_bits(codec,
+					    lineout_mix_vol_reg,
+					    0x10, 0x00);
+		if (dsd_conf && (snd_soc_read(codec, dsd_clk_reg) & 0x01))
+			snd_soc_update_bits(codec, dsd_mute_reg, 0x04, 0x00);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		if (dsd_conf && (snd_soc_read(codec, dsd_clk_reg) & 0x01))
+			snd_soc_update_bits(codec, dsd_mute_reg, 0x04, 0x04);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		/*
+		 * 5ms sleep is required after PA is disabled as per
+		 * HW requirement
+		 */
+		usleep_range(5000, 5500);
+		tavil_codec_override(codec, CLS_AB, event);
+	default:
+		break;
+	};
+
+	return 0;
+}
+
+static int tavil_codec_ear_dac_event(struct snd_soc_dapm_widget *w,
+				     struct snd_kcontrol *kcontrol,
+				     int event)
+{
+	int ret = 0;
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		/* Disable AutoChop timer during power up */
+		snd_soc_update_bits(codec, WCD934X_HPH_NEW_INT_HPH_TIMER1,
+				    0x02, 0x00);
+
+		if (tavil->anc_func)
+			ret = tavil_codec_enable_anc(w, kcontrol, event);
+
+		wcd_clsh_fsm(codec, &tavil->clsh_d,
+			     WCD_CLSH_EVENT_PRE_DAC,
+			     WCD_CLSH_STATE_EAR,
+			     CLS_H_NORMAL);
+		if (tavil->anc_func)
+			snd_soc_update_bits(codec, WCD934X_CDC_RX0_RX_PATH_CFG0,
+					    0x10, 0x10);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		wcd_clsh_fsm(codec, &tavil->clsh_d,
+			     WCD_CLSH_EVENT_POST_PA,
+			     WCD_CLSH_STATE_EAR,
+			     CLS_H_NORMAL);
+		break;
+	default:
+		break;
+	};
+
+	return ret;
+}
+
+static int tavil_codec_hphr_dac_event(struct snd_soc_dapm_widget *w,
+				      struct snd_kcontrol *kcontrol,
+				      int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	int hph_mode = tavil->hph_mode;
+	u8 dem_inp;
+	struct tavil_dsd_config *dsd_conf = tavil->dsd_config;
+	int ret = 0;
+
+	dev_dbg(codec->dev, "%s wname: %s event: %d hph_mode: %d\n", __func__,
+		w->name, event, hph_mode);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		if (!(strcmp(w->name, "RX INT2 DAC"))) {
+			snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x20, 0x20);
+			snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x10, 0x10);
+		}
+		if (tavil->anc_func) {
+			ret = tavil_codec_enable_anc(w, kcontrol, event);
+			/* 40 msec delay is needed to avoid click and pop */
+			msleep(40);
+		}
+		/* Read DEM INP Select */
+		dem_inp = snd_soc_read(codec, WCD934X_CDC_RX2_RX_PATH_SEC0) &
+			  0x03;
+		if (((hph_mode == CLS_H_HIFI) || (hph_mode == CLS_H_LOHIFI) ||
+		     (hph_mode == CLS_H_LP)) && (dem_inp != 0x01)) {
+			dev_err(codec->dev, "%s: DEM Input not set correctly, hph_mode: %d\n",
+					__func__, hph_mode);
+			return -EINVAL;
+		}
+		if ((hph_mode != CLS_H_LP) && (hph_mode != CLS_H_ULP))
+			/* Ripple freq control enable */
+			snd_soc_update_bits(codec,
+					     WCD934X_SIDO_NEW_VOUT_D_FREQ2,
+					     0x01, 0x01);
+		/* Disable AutoChop timer during power up */
+		snd_soc_update_bits(codec, WCD934X_HPH_NEW_INT_HPH_TIMER1,
+				    0x02, 0x00);
+		/* Set RDAC gain */
+		if (TAVIL_IS_1_0(tavil->wcd9xxx))
+			snd_soc_update_bits(codec,
+					    WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL,
+					    0xF0, 0x40);
+		if (dsd_conf &&
+		    (snd_soc_read(codec, WCD934X_CDC_DSD1_PATH_CTL) & 0x01))
+			hph_mode = CLS_H_HIFI;
+
+		wcd_clsh_fsm(codec, &tavil->clsh_d,
+			     WCD_CLSH_EVENT_PRE_DAC,
+			     WCD_CLSH_STATE_HPHR,
+			     hph_mode);
+		if (tavil->anc_func)
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_RX2_RX_PATH_CFG0,
+					    0x10, 0x10);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		if (!(strcmp(w->name, "RX INT2 DAC")))
+			snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x30, 0x00);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		/* 1000us required as per HW requirement */
+		usleep_range(1000, 1100);
+		wcd_clsh_fsm(codec, &tavil->clsh_d,
+			     WCD_CLSH_EVENT_POST_PA,
+			     WCD_CLSH_STATE_HPHR,
+			     hph_mode);
+		if ((hph_mode != CLS_H_LP) && (hph_mode != CLS_H_ULP))
+			/* Ripple freq control disable */
+			snd_soc_update_bits(codec,
+					    WCD934X_SIDO_NEW_VOUT_D_FREQ2,
+					    0x01, 0x0);
+		/* Re-set RDAC gain */
+		if (TAVIL_IS_1_0(tavil->wcd9xxx))
+			snd_soc_update_bits(codec,
+					    WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL,
+					    0xF0, 0x0);
+		break;
+	default:
+		break;
+	};
+
+	return 0;
+}
+
+static int tavil_codec_hphl_dac_event(struct snd_soc_dapm_widget *w,
+				      struct snd_kcontrol *kcontrol,
+				      int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	int hph_mode = tavil->hph_mode;
+	u8 dem_inp;
+	int ret = 0;
+	struct tavil_dsd_config *dsd_conf = tavil->dsd_config;
+	uint32_t impedl = 0, impedr = 0;
+
+	dev_dbg(codec->dev, "%s wname: %s event: %d hph_mode: %d\n", __func__,
+		w->name, event, hph_mode);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		if (tavil->anc_func) {
+			ret = tavil_codec_enable_anc(w, kcontrol, event);
+			/* 40 msec delay is needed to avoid click and pop */
+			msleep(40);
+		}
+		/* Read DEM INP Select */
+		dem_inp = snd_soc_read(codec, WCD934X_CDC_RX1_RX_PATH_SEC0) &
+			  0x03;
+		if (((hph_mode == CLS_H_HIFI) || (hph_mode == CLS_H_LOHIFI) ||
+		     (hph_mode == CLS_H_LP)) && (dem_inp != 0x01)) {
+			dev_err(codec->dev, "%s: DEM Input not set correctly, hph_mode: %d\n",
+					__func__, hph_mode);
+			return -EINVAL;
+		}
+		if ((hph_mode != CLS_H_LP) && (hph_mode != CLS_H_ULP))
+			/* Ripple freq control enable */
+			snd_soc_update_bits(codec,
+					     WCD934X_SIDO_NEW_VOUT_D_FREQ2,
+					     0x01, 0x01);
+		/* Disable AutoChop timer during power up */
+		snd_soc_update_bits(codec, WCD934X_HPH_NEW_INT_HPH_TIMER1,
+				    0x02, 0x00);
+		/* Set RDAC gain */
+		if (TAVIL_IS_1_0(tavil->wcd9xxx))
+			snd_soc_update_bits(codec,
+					    WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL,
+					    0xF0, 0x40);
+		if (dsd_conf &&
+		    (snd_soc_read(codec, WCD934X_CDC_DSD0_PATH_CTL) & 0x01))
+			hph_mode = CLS_H_HIFI;
+
+		wcd_clsh_fsm(codec, &tavil->clsh_d,
+			     WCD_CLSH_EVENT_PRE_DAC,
+			     WCD_CLSH_STATE_HPHL,
+			     hph_mode);
+
+		if (tavil->anc_func)
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_RX1_RX_PATH_CFG0,
+					    0x10, 0x10);
+
+		ret = tavil_mbhc_get_impedance(tavil->mbhc,
+					       &impedl, &impedr);
+		if (!ret) {
+			wcd_clsh_imped_config(codec, impedl, false);
+			set_bit(CLSH_Z_CONFIG, &tavil->status_mask);
+		} else {
+			dev_dbg(codec->dev, "%s: Failed to get mbhc impedance %d\n",
+				__func__, ret);
+			ret = 0;
+		}
+
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		/* 1000us required as per HW requirement */
+		usleep_range(1000, 1100);
+		wcd_clsh_fsm(codec, &tavil->clsh_d,
+			     WCD_CLSH_EVENT_POST_PA,
+			     WCD_CLSH_STATE_HPHL,
+			     hph_mode);
+		if ((hph_mode != CLS_H_LP) && (hph_mode != CLS_H_ULP))
+			/* Ripple freq control disable */
+			snd_soc_update_bits(codec,
+					    WCD934X_SIDO_NEW_VOUT_D_FREQ2,
+					    0x01, 0x0);
+		/* Re-set RDAC gain */
+		if (TAVIL_IS_1_0(tavil->wcd9xxx))
+			snd_soc_update_bits(codec,
+					    WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL,
+					    0xF0, 0x0);
+
+		if (test_bit(CLSH_Z_CONFIG, &tavil->status_mask)) {
+			wcd_clsh_imped_config(codec, impedl, true);
+			clear_bit(CLSH_Z_CONFIG, &tavil->status_mask);
+		}
+		break;
+	default:
+		break;
+	};
+
+	return ret;
+}
+
+static int tavil_codec_lineout_dac_event(struct snd_soc_dapm_widget *w,
+					 struct snd_kcontrol *kcontrol,
+					 int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		wcd_clsh_fsm(codec, &tavil->clsh_d,
+			     WCD_CLSH_EVENT_PRE_DAC,
+			     WCD_CLSH_STATE_LO,
+			     CLS_AB);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		wcd_clsh_fsm(codec, &tavil->clsh_d,
+			     WCD_CLSH_EVENT_POST_PA,
+			     WCD_CLSH_STATE_LO,
+			     CLS_AB);
+		break;
+	}
+
+	return 0;
+}
+
+static int tavil_codec_spk_boost_event(struct snd_soc_dapm_widget *w,
+					struct snd_kcontrol *kcontrol,
+					int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	u16 boost_path_ctl, boost_path_cfg1;
+	u16 reg, reg_mix;
+
+	dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
+
+	if (!strcmp(w->name, "RX INT7 CHAIN")) {
+		boost_path_ctl = WCD934X_CDC_BOOST0_BOOST_PATH_CTL;
+		boost_path_cfg1 = WCD934X_CDC_RX7_RX_PATH_CFG1;
+		reg = WCD934X_CDC_RX7_RX_PATH_CTL;
+		reg_mix = WCD934X_CDC_RX7_RX_PATH_MIX_CTL;
+	} else if (!strcmp(w->name, "RX INT8 CHAIN")) {
+		boost_path_ctl = WCD934X_CDC_BOOST1_BOOST_PATH_CTL;
+		boost_path_cfg1 = WCD934X_CDC_RX8_RX_PATH_CFG1;
+		reg = WCD934X_CDC_RX8_RX_PATH_CTL;
+		reg_mix = WCD934X_CDC_RX8_RX_PATH_MIX_CTL;
+	} else {
+		dev_err(codec->dev, "%s: unknown widget: %s\n",
+			__func__, w->name);
+		return -EINVAL;
+	}
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_update_bits(codec, boost_path_cfg1, 0x01, 0x01);
+		snd_soc_update_bits(codec, boost_path_ctl, 0x10, 0x10);
+		snd_soc_update_bits(codec, reg, 0x10, 0x00);
+		if ((snd_soc_read(codec, reg_mix)) & 0x10)
+			snd_soc_update_bits(codec, reg_mix, 0x10, 0x00);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_update_bits(codec, boost_path_ctl, 0x10, 0x00);
+		snd_soc_update_bits(codec, boost_path_cfg1, 0x01, 0x00);
+		break;
+	};
+
+	return 0;
+}
+
+static int __tavil_codec_enable_swr(struct snd_soc_dapm_widget *w, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tavil_priv *tavil;
+	int ch_cnt = 0;
+
+	tavil = snd_soc_codec_get_drvdata(codec);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		if (((strnstr(w->name, "INT7_", sizeof("RX INT7_"))) ||
+			(strnstr(w->name, "INT7 MIX2",
+						sizeof("RX INT7 MIX2")))))
+			tavil->swr.rx_7_count++;
+		if ((strnstr(w->name, "INT8_", sizeof("RX INT8_"))) &&
+		    !tavil->swr.rx_8_count)
+			tavil->swr.rx_8_count++;
+		ch_cnt = !!(tavil->swr.rx_7_count) + tavil->swr.rx_8_count;
+
+		swrm_wcd_notify(tavil->swr.ctrl_data[0].swr_pdev,
+				SWR_DEVICE_UP, NULL);
+		swrm_wcd_notify(tavil->swr.ctrl_data[0].swr_pdev,
+				SWR_SET_NUM_RX_CH, &ch_cnt);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		if ((strnstr(w->name, "INT7_", sizeof("RX INT7_")))  ||
+			(strnstr(w->name, "INT7 MIX2",
+			sizeof("RX INT7 MIX2"))))
+			tavil->swr.rx_7_count--;
+		if ((strnstr(w->name, "INT8_", sizeof("RX INT8_"))) &&
+		    tavil->swr.rx_8_count)
+			tavil->swr.rx_8_count--;
+		ch_cnt = !!(tavil->swr.rx_7_count) + tavil->swr.rx_8_count;
+
+		swrm_wcd_notify(tavil->swr.ctrl_data[0].swr_pdev,
+				SWR_SET_NUM_RX_CH, &ch_cnt);
+
+		break;
+	}
+	dev_dbg(tavil->dev, "%s: %s: current swr ch cnt: %d\n",
+		__func__, w->name, ch_cnt);
+
+	return 0;
+}
+
+static int tavil_codec_enable_swr(struct snd_soc_dapm_widget *w,
+				  struct snd_kcontrol *kcontrol, int event)
+{
+	return __tavil_codec_enable_swr(w, event);
+}
+
+static int tavil_codec_config_mad(struct snd_soc_codec *codec)
+{
+	int ret = 0;
+	int idx;
+	const struct firmware *fw;
+	struct firmware_cal *hwdep_cal = NULL;
+	struct wcd_mad_audio_cal *mad_cal = NULL;
+	const void *data;
+	const char *filename = WCD934X_MAD_AUDIO_FIRMWARE_PATH;
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	size_t cal_size;
+
+	hwdep_cal = wcdcal_get_fw_cal(tavil->fw_data, WCD9XXX_MAD_CAL);
+	if (hwdep_cal) {
+		data = hwdep_cal->data;
+		cal_size = hwdep_cal->size;
+		dev_dbg(codec->dev, "%s: using hwdep calibration\n",
+			__func__);
+	} else {
+		ret = request_firmware(&fw, filename, codec->dev);
+		if (ret || !fw) {
+			dev_err(codec->dev,
+				"%s: MAD firmware acquire failed, err = %d\n",
+				__func__, ret);
+			return -ENODEV;
+		}
+		data = fw->data;
+		cal_size = fw->size;
+		dev_dbg(codec->dev, "%s: using request_firmware calibration\n",
+			__func__);
+	}
+
+	if (cal_size < sizeof(*mad_cal)) {
+		dev_err(codec->dev,
+			"%s: Incorrect size %zd for MAD Cal, expected %zd\n",
+			__func__, cal_size, sizeof(*mad_cal));
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	mad_cal = (struct wcd_mad_audio_cal *) (data);
+	if (!mad_cal) {
+		dev_err(codec->dev,
+			"%s: Invalid calibration data\n",
+			__func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	snd_soc_write(codec, WCD934X_SOC_MAD_MAIN_CTL_2,
+		      mad_cal->microphone_info.cycle_time);
+	snd_soc_update_bits(codec, WCD934X_SOC_MAD_MAIN_CTL_1, 0xFF << 3,
+			    ((uint16_t)mad_cal->microphone_info.settle_time)
+			    << 3);
+
+	/* Audio */
+	snd_soc_write(codec, WCD934X_SOC_MAD_AUDIO_CTL_8,
+		      mad_cal->audio_info.rms_omit_samples);
+	snd_soc_update_bits(codec, WCD934X_SOC_MAD_AUDIO_CTL_1,
+			    0x07 << 4, mad_cal->audio_info.rms_comp_time << 4);
+	snd_soc_update_bits(codec, WCD934X_SOC_MAD_AUDIO_CTL_2, 0x03 << 2,
+			    mad_cal->audio_info.detection_mechanism << 2);
+	snd_soc_write(codec, WCD934X_SOC_MAD_AUDIO_CTL_7,
+		      mad_cal->audio_info.rms_diff_threshold & 0x3F);
+	snd_soc_write(codec, WCD934X_SOC_MAD_AUDIO_CTL_5,
+		      mad_cal->audio_info.rms_threshold_lsb);
+	snd_soc_write(codec, WCD934X_SOC_MAD_AUDIO_CTL_6,
+		      mad_cal->audio_info.rms_threshold_msb);
+
+	for (idx = 0; idx < ARRAY_SIZE(mad_cal->audio_info.iir_coefficients);
+	     idx++) {
+		snd_soc_update_bits(codec, WCD934X_SOC_MAD_AUDIO_IIR_CTL_PTR,
+				    0x3F, idx);
+		snd_soc_write(codec, WCD934X_SOC_MAD_AUDIO_IIR_CTL_VAL,
+			      mad_cal->audio_info.iir_coefficients[idx]);
+		dev_dbg(codec->dev, "%s:MAD Audio IIR Coef[%d] = 0X%x",
+			__func__, idx,
+			mad_cal->audio_info.iir_coefficients[idx]);
+	}
+
+	/* Beacon */
+	snd_soc_write(codec, WCD934X_SOC_MAD_BEACON_CTL_8,
+		      mad_cal->beacon_info.rms_omit_samples);
+	snd_soc_update_bits(codec, WCD934X_SOC_MAD_BEACON_CTL_1,
+			    0x07 << 4, mad_cal->beacon_info.rms_comp_time << 4);
+	snd_soc_update_bits(codec, WCD934X_SOC_MAD_BEACON_CTL_2, 0x03 << 2,
+			    mad_cal->beacon_info.detection_mechanism << 2);
+	snd_soc_write(codec, WCD934X_SOC_MAD_BEACON_CTL_7,
+		      mad_cal->beacon_info.rms_diff_threshold & 0x1F);
+	snd_soc_write(codec, WCD934X_SOC_MAD_BEACON_CTL_5,
+		      mad_cal->beacon_info.rms_threshold_lsb);
+	snd_soc_write(codec, WCD934X_SOC_MAD_BEACON_CTL_6,
+		      mad_cal->beacon_info.rms_threshold_msb);
+
+	for (idx = 0; idx < ARRAY_SIZE(mad_cal->beacon_info.iir_coefficients);
+	     idx++) {
+		snd_soc_update_bits(codec, WCD934X_SOC_MAD_BEACON_IIR_CTL_PTR,
+				    0x3F, idx);
+		snd_soc_write(codec, WCD934X_SOC_MAD_BEACON_IIR_CTL_VAL,
+			      mad_cal->beacon_info.iir_coefficients[idx]);
+		dev_dbg(codec->dev, "%s:MAD Beacon IIR Coef[%d] = 0X%x",
+			__func__, idx,
+			mad_cal->beacon_info.iir_coefficients[idx]);
+	}
+
+	/* Ultrasound */
+	snd_soc_update_bits(codec, WCD934X_SOC_MAD_ULTR_CTL_1,
+			    0x07 << 4,
+			    mad_cal->ultrasound_info.rms_comp_time << 4);
+	snd_soc_update_bits(codec, WCD934X_SOC_MAD_ULTR_CTL_2, 0x03 << 2,
+			    mad_cal->ultrasound_info.detection_mechanism << 2);
+	snd_soc_write(codec, WCD934X_SOC_MAD_ULTR_CTL_7,
+		      mad_cal->ultrasound_info.rms_diff_threshold & 0x1F);
+	snd_soc_write(codec, WCD934X_SOC_MAD_ULTR_CTL_5,
+		      mad_cal->ultrasound_info.rms_threshold_lsb);
+	snd_soc_write(codec, WCD934X_SOC_MAD_ULTR_CTL_6,
+		      mad_cal->ultrasound_info.rms_threshold_msb);
+
+done:
+	if (!hwdep_cal)
+		release_firmware(fw);
+
+	return ret;
+}
+
+static int __tavil_codec_enable_mad(struct snd_soc_codec *codec, bool enable)
+{
+	int rc = 0;
+
+	/* Return if CPE INPUT is DEC1 */
+	if (snd_soc_read(codec, WCD934X_CPE_SS_SVA_CFG) & 0x04) {
+		dev_dbg(codec->dev, "%s: MAD is bypassed, skip mad %s\n",
+			__func__, enable ? "enable" : "disable");
+		return rc;
+	}
+
+	dev_dbg(codec->dev, "%s: enable = %s\n", __func__,
+		enable ? "enable" : "disable");
+
+	if (enable) {
+		snd_soc_update_bits(codec, WCD934X_SOC_MAD_AUDIO_CTL_2,
+				    0x03, 0x03);
+		rc = tavil_codec_config_mad(codec);
+		if (IS_ERR_VALUE(rc)) {
+			snd_soc_update_bits(codec, WCD934X_SOC_MAD_AUDIO_CTL_2,
+					    0x03, 0x00);
+			goto done;
+		}
+
+		/* Turn on MAD clk */
+		snd_soc_update_bits(codec, WCD934X_CPE_SS_MAD_CTL,
+				    0x01, 0x01);
+
+		/* Undo reset for MAD */
+		snd_soc_update_bits(codec, WCD934X_CPE_SS_MAD_CTL,
+				    0x02, 0x00);
+		snd_soc_update_bits(codec, WCD934X_CODEC_RPM_CLK_MCLK_CFG,
+					0x04, 0x04);
+	} else {
+		snd_soc_update_bits(codec, WCD934X_SOC_MAD_AUDIO_CTL_2,
+				    0x03, 0x00);
+		/* Reset the MAD block */
+		snd_soc_update_bits(codec, WCD934X_CPE_SS_MAD_CTL,
+				    0x02, 0x02);
+		/* Turn off MAD clk */
+		snd_soc_update_bits(codec, WCD934X_CPE_SS_MAD_CTL,
+				    0x01, 0x00);
+		snd_soc_update_bits(codec, WCD934X_CODEC_RPM_CLK_MCLK_CFG,
+					0x04, 0x00);
+	}
+done:
+	return rc;
+}
+
+static int tavil_codec_ape_enable_mad(struct snd_soc_dapm_widget *w,
+				      struct snd_kcontrol *kcontrol,
+				      int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	int rc = 0;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_update_bits(codec, WCD934X_CPE_SS_SVA_CFG, 0x40, 0x40);
+		rc = __tavil_codec_enable_mad(codec, true);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		snd_soc_update_bits(codec, WCD934X_CPE_SS_SVA_CFG, 0x40, 0x00);
+		__tavil_codec_enable_mad(codec, false);
+		break;
+	}
+
+	dev_dbg(tavil->dev, "%s: event = %d\n", __func__, event);
+	return rc;
+}
+
+static int tavil_codec_cpe_mad_ctl(struct snd_soc_dapm_widget *w,
+				   struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	int rc = 0;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		tavil->mad_switch_cnt++;
+		if (tavil->mad_switch_cnt != 1)
+			goto done;
+
+		snd_soc_update_bits(codec, WCD934X_CPE_SS_SVA_CFG, 0x20, 0x20);
+		rc = __tavil_codec_enable_mad(codec, true);
+		if (IS_ERR_VALUE(rc)) {
+			tavil->mad_switch_cnt--;
+			goto done;
+		}
+
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		tavil->mad_switch_cnt--;
+		if (tavil->mad_switch_cnt != 0)
+			goto done;
+
+		snd_soc_update_bits(codec, WCD934X_CPE_SS_SVA_CFG, 0x20, 0x00);
+		__tavil_codec_enable_mad(codec, false);
+		break;
+	}
+done:
+	dev_dbg(tavil->dev, "%s: event = %d, mad_switch_cnt = %d\n",
+		__func__, event, tavil->mad_switch_cnt);
+	return rc;
+}
+
+static int tavil_get_asrc_mode(struct tavil_priv *tavil, int asrc,
+			       u8 main_sr, u8 mix_sr)
+{
+	u8 asrc_output_mode;
+	int asrc_mode = CONV_88P2K_TO_384K;
+
+	if ((asrc < 0) || (asrc >= ASRC_MAX))
+		return 0;
+
+	asrc_output_mode = tavil->asrc_output_mode[asrc];
+
+	if (asrc_output_mode) {
+		/*
+		 * If Mix sample rate is < 96KHz, use 96K to 352.8K
+		 * conversion, or else use 384K to 352.8K conversion
+		 */
+		if (mix_sr < 5)
+			asrc_mode = CONV_96K_TO_352P8K;
+		else
+			asrc_mode = CONV_384K_TO_352P8K;
+	} else {
+		/* Integer main and Fractional mix path */
+		if (main_sr < 8 && mix_sr > 9) {
+			asrc_mode = CONV_352P8K_TO_384K;
+		} else if (main_sr > 8 && mix_sr < 8) {
+			/* Fractional main and Integer mix path */
+			if (mix_sr < 5)
+				asrc_mode = CONV_96K_TO_352P8K;
+			else
+				asrc_mode = CONV_384K_TO_352P8K;
+		} else if (main_sr < 8 && mix_sr < 8) {
+			/* Integer main and Integer mix path */
+			asrc_mode = CONV_96K_TO_384K;
+		}
+	}
+
+	return asrc_mode;
+}
+
+static int tavil_codec_enable_asrc(struct snd_soc_codec *codec,
+				   int asrc_in, int event)
+{
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	u16 cfg_reg, ctl_reg, clk_reg, asrc_ctl, mix_ctl_reg, paired_reg;
+	int asrc, ret = 0;
+	u8 main_sr, mix_sr, asrc_mode = 0;
+
+	switch (asrc_in) {
+	case ASRC_IN_HPHL:
+		cfg_reg = WCD934X_CDC_RX1_RX_PATH_CFG0;
+		ctl_reg = WCD934X_CDC_RX1_RX_PATH_CTL;
+		clk_reg = WCD934X_MIXING_ASRC0_CLK_RST_CTL;
+		paired_reg = WCD934X_MIXING_ASRC1_CLK_RST_CTL;
+		asrc_ctl = WCD934X_MIXING_ASRC0_CTL1;
+		asrc = ASRC0;
+		break;
+	case ASRC_IN_LO1:
+		cfg_reg = WCD934X_CDC_RX3_RX_PATH_CFG0;
+		ctl_reg = WCD934X_CDC_RX3_RX_PATH_CTL;
+		clk_reg = WCD934X_MIXING_ASRC0_CLK_RST_CTL;
+		paired_reg = WCD934X_MIXING_ASRC1_CLK_RST_CTL;
+		asrc_ctl = WCD934X_MIXING_ASRC0_CTL1;
+		asrc = ASRC0;
+		break;
+	case ASRC_IN_HPHR:
+		cfg_reg = WCD934X_CDC_RX2_RX_PATH_CFG0;
+		ctl_reg = WCD934X_CDC_RX2_RX_PATH_CTL;
+		clk_reg = WCD934X_MIXING_ASRC1_CLK_RST_CTL;
+		paired_reg = WCD934X_MIXING_ASRC0_CLK_RST_CTL;
+		asrc_ctl = WCD934X_MIXING_ASRC1_CTL1;
+		asrc = ASRC1;
+		break;
+	case ASRC_IN_LO2:
+		cfg_reg = WCD934X_CDC_RX4_RX_PATH_CFG0;
+		ctl_reg = WCD934X_CDC_RX4_RX_PATH_CTL;
+		clk_reg = WCD934X_MIXING_ASRC1_CLK_RST_CTL;
+		paired_reg = WCD934X_MIXING_ASRC0_CLK_RST_CTL;
+		asrc_ctl = WCD934X_MIXING_ASRC1_CTL1;
+		asrc = ASRC1;
+		break;
+	case ASRC_IN_SPKR1:
+		cfg_reg = WCD934X_CDC_RX7_RX_PATH_CFG0;
+		ctl_reg = WCD934X_CDC_RX7_RX_PATH_CTL;
+		clk_reg = WCD934X_MIXING_ASRC2_CLK_RST_CTL;
+		paired_reg = WCD934X_MIXING_ASRC3_CLK_RST_CTL;
+		asrc_ctl = WCD934X_MIXING_ASRC2_CTL1;
+		asrc = ASRC2;
+		break;
+	case ASRC_IN_SPKR2:
+		cfg_reg = WCD934X_CDC_RX8_RX_PATH_CFG0;
+		ctl_reg = WCD934X_CDC_RX8_RX_PATH_CTL;
+		clk_reg = WCD934X_MIXING_ASRC3_CLK_RST_CTL;
+		paired_reg = WCD934X_MIXING_ASRC2_CLK_RST_CTL;
+		asrc_ctl = WCD934X_MIXING_ASRC3_CTL1;
+		asrc = ASRC3;
+		break;
+	default:
+		dev_err(codec->dev, "%s: Invalid asrc input :%d\n", __func__,
+			asrc_in);
+		ret = -EINVAL;
+		goto done;
+	};
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		if (tavil->asrc_users[asrc] == 0) {
+			if ((snd_soc_read(codec, clk_reg) & 0x02) ||
+			    (snd_soc_read(codec, paired_reg) & 0x02)) {
+				snd_soc_update_bits(codec, clk_reg,
+						    0x02, 0x00);
+				snd_soc_update_bits(codec, paired_reg,
+						    0x02, 0x00);
+			}
+			snd_soc_update_bits(codec, cfg_reg, 0x80, 0x80);
+			snd_soc_update_bits(codec, clk_reg, 0x01, 0x01);
+			main_sr = snd_soc_read(codec, ctl_reg) & 0x0F;
+			mix_ctl_reg = ctl_reg + 5;
+			mix_sr = snd_soc_read(codec, mix_ctl_reg) & 0x0F;
+			asrc_mode = tavil_get_asrc_mode(tavil, asrc,
+							main_sr, mix_sr);
+			dev_dbg(codec->dev, "%s: main_sr:%d mix_sr:%d asrc_mode %d\n",
+				__func__, main_sr, mix_sr, asrc_mode);
+			snd_soc_update_bits(codec, asrc_ctl, 0x07, asrc_mode);
+		}
+		tavil->asrc_users[asrc]++;
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		tavil->asrc_users[asrc]--;
+		if (tavil->asrc_users[asrc] <= 0) {
+			tavil->asrc_users[asrc] = 0;
+			snd_soc_update_bits(codec, asrc_ctl, 0x07, 0x00);
+			snd_soc_update_bits(codec, cfg_reg, 0x80, 0x00);
+			snd_soc_update_bits(codec, clk_reg, 0x03, 0x02);
+		}
+		break;
+	};
+
+	dev_dbg(codec->dev, "%s: ASRC%d, users: %d\n",
+		__func__, asrc, tavil->asrc_users[asrc]);
+
+done:
+	return ret;
+}
+
+static int tavil_codec_enable_asrc_resampler(struct snd_soc_dapm_widget *w,
+					     struct snd_kcontrol *kcontrol,
+					     int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	int ret = 0;
+	u8 cfg, asrc_in;
+
+	cfg = snd_soc_read(codec, WCD934X_CDC_RX_INP_MUX_SPLINE_ASRC_CFG0);
+	if (!(cfg & 0xFF)) {
+		dev_err(codec->dev, "%s: ASRC%u input not selected\n",
+			__func__, w->shift);
+		return -EINVAL;
+	}
+
+	switch (w->shift) {
+	case ASRC0:
+		asrc_in = ((cfg & 0x03) == 1) ? ASRC_IN_HPHL : ASRC_IN_LO1;
+		ret = tavil_codec_enable_asrc(codec, asrc_in, event);
+		break;
+	case ASRC1:
+		asrc_in = ((cfg & 0x0C) == 4) ? ASRC_IN_HPHR : ASRC_IN_LO2;
+		ret = tavil_codec_enable_asrc(codec, asrc_in, event);
+		break;
+	case ASRC2:
+		asrc_in = ((cfg & 0x30) == 0x20) ? ASRC_IN_SPKR1 : ASRC_INVALID;
+		ret = tavil_codec_enable_asrc(codec, asrc_in, event);
+		break;
+	case ASRC3:
+		asrc_in = ((cfg & 0xC0) == 0x80) ? ASRC_IN_SPKR2 : ASRC_INVALID;
+		ret = tavil_codec_enable_asrc(codec, asrc_in, event);
+		break;
+	default:
+		dev_err(codec->dev, "%s: Invalid asrc:%u\n", __func__,
+			w->shift);
+		ret = -EINVAL;
+		break;
+	};
+
+	return ret;
+}
+
+static int tavil_enable_native_supply(struct snd_soc_dapm_widget *w,
+				      struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		if (++tavil->native_clk_users == 1) {
+			snd_soc_update_bits(codec, WCD934X_CLK_SYS_PLL_ENABLES,
+					    0x01, 0x01);
+			usleep_range(100, 120);
+			snd_soc_update_bits(codec, WCD934X_CLK_SYS_MCLK2_PRG1,
+					    0x06, 0x02);
+			snd_soc_update_bits(codec, WCD934X_CLK_SYS_MCLK2_PRG1,
+					    0x01, 0x01);
+			snd_soc_update_bits(codec, WCD934X_CODEC_RPM_CLK_GATE,
+					    0x04, 0x00);
+			usleep_range(30, 50);
+			snd_soc_update_bits(codec,
+					WCD934X_CDC_CLK_RST_CTRL_MCLK_CONTROL,
+					0x02, 0x02);
+			snd_soc_update_bits(codec,
+					WCD934X_CDC_CLK_RST_CTRL_FS_CNT_CONTROL,
+					0x10, 0x10);
+		}
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		if (tavil->native_clk_users &&
+		    (--tavil->native_clk_users == 0)) {
+			snd_soc_update_bits(codec,
+					WCD934X_CDC_CLK_RST_CTRL_FS_CNT_CONTROL,
+					0x10, 0x00);
+			snd_soc_update_bits(codec,
+					WCD934X_CDC_CLK_RST_CTRL_MCLK_CONTROL,
+					0x02, 0x00);
+			snd_soc_update_bits(codec, WCD934X_CODEC_RPM_CLK_GATE,
+					    0x04, 0x04);
+			snd_soc_update_bits(codec, WCD934X_CLK_SYS_MCLK2_PRG1,
+					    0x01, 0x00);
+			snd_soc_update_bits(codec, WCD934X_CLK_SYS_MCLK2_PRG1,
+					    0x06, 0x00);
+			snd_soc_update_bits(codec, WCD934X_CLK_SYS_PLL_ENABLES,
+					    0x01, 0x00);
+		}
+		break;
+	}
+
+	dev_dbg(codec->dev, "%s: native_clk_users: %d, event: %d\n",
+		__func__, tavil->native_clk_users, event);
+
+	return 0;
+}
+
+static void tavil_codec_hphdelay_lutbypass(struct snd_soc_codec *codec,
+				    u16 interp_idx, int event)
+{
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	u8 hph_dly_mask;
+	u16 hph_lut_bypass_reg = 0;
+	u16 hph_comp_ctrl7 = 0;
+
+
+	switch (interp_idx) {
+	case INTERP_HPHL:
+		hph_dly_mask = 1;
+		hph_lut_bypass_reg = WCD934X_CDC_TOP_HPHL_COMP_LUT;
+		hph_comp_ctrl7 = WCD934X_CDC_COMPANDER1_CTL7;
+		break;
+	case INTERP_HPHR:
+		hph_dly_mask = 2;
+		hph_lut_bypass_reg = WCD934X_CDC_TOP_HPHR_COMP_LUT;
+		hph_comp_ctrl7 = WCD934X_CDC_COMPANDER2_CTL7;
+		break;
+	default:
+		break;
+	}
+
+	if (hph_lut_bypass_reg && SND_SOC_DAPM_EVENT_ON(event)) {
+		snd_soc_update_bits(codec, WCD934X_CDC_CLSH_TEST0,
+				    hph_dly_mask, 0x0);
+		snd_soc_update_bits(codec, hph_lut_bypass_reg, 0x80, 0x80);
+		if (tavil->hph_mode == CLS_H_ULP)
+			snd_soc_update_bits(codec, hph_comp_ctrl7, 0x20, 0x20);
+	}
+
+	if (hph_lut_bypass_reg && SND_SOC_DAPM_EVENT_OFF(event)) {
+		snd_soc_update_bits(codec, WCD934X_CDC_CLSH_TEST0,
+				    hph_dly_mask, hph_dly_mask);
+		snd_soc_update_bits(codec, hph_lut_bypass_reg, 0x80, 0x00);
+		snd_soc_update_bits(codec, hph_comp_ctrl7, 0x20, 0x0);
+	}
+}
+
+static void tavil_codec_hd2_control(struct tavil_priv *priv,
+				    u16 interp_idx, int event)
+{
+	u16 hd2_scale_reg;
+	u16 hd2_enable_reg = 0;
+	struct snd_soc_codec *codec = priv->codec;
+
+	if (TAVIL_IS_1_1(priv->wcd9xxx))
+		return;
+
+	switch (interp_idx) {
+	case INTERP_HPHL:
+		hd2_scale_reg = WCD934X_CDC_RX1_RX_PATH_SEC3;
+		hd2_enable_reg = WCD934X_CDC_RX1_RX_PATH_CFG0;
+		break;
+	case INTERP_HPHR:
+		hd2_scale_reg = WCD934X_CDC_RX2_RX_PATH_SEC3;
+		hd2_enable_reg = WCD934X_CDC_RX2_RX_PATH_CFG0;
+		break;
+	}
+
+	if (hd2_enable_reg && SND_SOC_DAPM_EVENT_ON(event)) {
+		snd_soc_update_bits(codec, hd2_scale_reg, 0x3C, 0x14);
+		snd_soc_update_bits(codec, hd2_enable_reg, 0x04, 0x04);
+	}
+
+	if (hd2_enable_reg && SND_SOC_DAPM_EVENT_OFF(event)) {
+		snd_soc_update_bits(codec, hd2_enable_reg, 0x04, 0x00);
+		snd_soc_update_bits(codec, hd2_scale_reg, 0x3C, 0x00);
+	}
+}
+
+static int tavil_codec_config_ear_spkr_gain(struct snd_soc_codec *codec,
+					    int event, int gain_reg)
+{
+	int comp_gain_offset, val;
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+
+	switch (tavil->swr.spkr_mode) {
+	/* Compander gain in SPKR_MODE1 case is 12 dB */
+	case WCD934X_SPKR_MODE_1:
+		comp_gain_offset = -12;
+		break;
+	/* Default case compander gain is 15 dB */
+	default:
+		comp_gain_offset = -15;
+		break;
+	}
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		/* Apply ear spkr gain only if compander is enabled */
+		if (tavil->comp_enabled[COMPANDER_7] &&
+		    (gain_reg == WCD934X_CDC_RX7_RX_VOL_CTL ||
+		     gain_reg == WCD934X_CDC_RX7_RX_VOL_MIX_CTL) &&
+		    (tavil->ear_spkr_gain != 0)) {
+			/* For example, val is -8(-12+5-1) for 4dB of gain */
+			val = comp_gain_offset + tavil->ear_spkr_gain - 1;
+			snd_soc_write(codec, gain_reg, val);
+
+			dev_dbg(codec->dev, "%s: RX7 Volume %d dB\n",
+				__func__, val);
+		}
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		/*
+		 * Reset RX7 volume to 0 dB if compander is enabled and
+		 * ear_spkr_gain is non-zero.
+		 */
+		if (tavil->comp_enabled[COMPANDER_7] &&
+		    (gain_reg == WCD934X_CDC_RX7_RX_VOL_CTL ||
+		     gain_reg == WCD934X_CDC_RX7_RX_VOL_MIX_CTL) &&
+		    (tavil->ear_spkr_gain != 0)) {
+			snd_soc_write(codec, gain_reg, 0x0);
+
+			dev_dbg(codec->dev, "%s: Reset RX7 Volume to 0 dB\n",
+				__func__);
+		}
+		break;
+	}
+
+	return 0;
+}
+
+static int tavil_config_compander(struct snd_soc_codec *codec, int interp_n,
+				  int event)
+{
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	int comp;
+	u16 comp_ctl0_reg, rx_path_cfg0_reg;
+
+	/* EAR does not have compander */
+	if (!interp_n)
+		return 0;
+
+	comp = interp_n - 1;
+	dev_dbg(codec->dev, "%s: event %d compander %d, enabled %d\n",
+		__func__, event, comp + 1, tavil->comp_enabled[comp]);
+
+	if (!tavil->comp_enabled[comp])
+		return 0;
+
+	comp_ctl0_reg = WCD934X_CDC_COMPANDER1_CTL0 + (comp * 8);
+	rx_path_cfg0_reg = WCD934X_CDC_RX1_RX_PATH_CFG0 + (comp * 20);
+
+	if (SND_SOC_DAPM_EVENT_ON(event)) {
+		/* Enable Compander Clock */
+		snd_soc_update_bits(codec, comp_ctl0_reg, 0x01, 0x01);
+		snd_soc_update_bits(codec, comp_ctl0_reg, 0x02, 0x02);
+		snd_soc_update_bits(codec, comp_ctl0_reg, 0x02, 0x00);
+		snd_soc_update_bits(codec, rx_path_cfg0_reg, 0x02, 0x02);
+	}
+
+	if (SND_SOC_DAPM_EVENT_OFF(event)) {
+		snd_soc_update_bits(codec, rx_path_cfg0_reg, 0x02, 0x00);
+		snd_soc_update_bits(codec, comp_ctl0_reg, 0x04, 0x04);
+		snd_soc_update_bits(codec, comp_ctl0_reg, 0x02, 0x02);
+		snd_soc_update_bits(codec, comp_ctl0_reg, 0x02, 0x00);
+		snd_soc_update_bits(codec, comp_ctl0_reg, 0x01, 0x00);
+		snd_soc_update_bits(codec, comp_ctl0_reg, 0x04, 0x00);
+	}
+
+	return 0;
+}
+
+static void tavil_codec_idle_detect_control(struct snd_soc_codec *codec,
+					    int interp, int event)
+{
+	int reg = 0, mask, val;
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+
+	if (!tavil->idle_det_cfg.hph_idle_detect_en)
+		return;
+
+	if (interp == INTERP_HPHL) {
+		reg = WCD934X_CDC_RX_IDLE_DET_PATH_CTL;
+		mask = 0x01;
+		val = 0x01;
+	}
+	if (interp == INTERP_HPHR) {
+		reg = WCD934X_CDC_RX_IDLE_DET_PATH_CTL;
+		mask = 0x02;
+		val = 0x02;
+	}
+
+	if (reg && SND_SOC_DAPM_EVENT_ON(event))
+		snd_soc_update_bits(codec, reg, mask, val);
+
+	if (reg && SND_SOC_DAPM_EVENT_OFF(event)) {
+		snd_soc_update_bits(codec, reg, mask, 0x00);
+		tavil->idle_det_cfg.hph_idle_thr = 0;
+		snd_soc_write(codec, WCD934X_CDC_RX_IDLE_DET_CFG3, 0x0);
+	}
+}
+
+/**
+ * tavil_codec_enable_interp_clk - Enable main path Interpolator
+ * clock.
+ *
+ * @codec:    Codec instance
+ * @event:    Indicates speaker path gain offset value
+ * @intp_idx: Interpolator index
+ * Returns number of main clock users
+ */
+int tavil_codec_enable_interp_clk(struct snd_soc_codec *codec,
+				  int event, int interp_idx)
+{
+	struct tavil_priv *tavil;
+	u16 main_reg;
+
+	if (!codec) {
+		pr_err("%s: codec is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	tavil  = snd_soc_codec_get_drvdata(codec);
+	main_reg = WCD934X_CDC_RX0_RX_PATH_CTL + (interp_idx * 20);
+
+	if (SND_SOC_DAPM_EVENT_ON(event)) {
+		if (tavil->main_clk_users[interp_idx] == 0) {
+			/* Main path PGA mute enable */
+			snd_soc_update_bits(codec, main_reg, 0x10, 0x10);
+			/* Clk enable */
+			snd_soc_update_bits(codec, main_reg, 0x20, 0x20);
+			tavil_codec_idle_detect_control(codec, interp_idx,
+							event);
+			tavil_codec_hd2_control(tavil, interp_idx, event);
+			tavil_codec_hphdelay_lutbypass(codec, interp_idx,
+						       event);
+			tavil_config_compander(codec, interp_idx, event);
+		}
+		tavil->main_clk_users[interp_idx]++;
+	}
+
+	if (SND_SOC_DAPM_EVENT_OFF(event)) {
+		tavil->main_clk_users[interp_idx]--;
+		if (tavil->main_clk_users[interp_idx] <= 0) {
+			tavil->main_clk_users[interp_idx] = 0;
+			tavil_config_compander(codec, interp_idx, event);
+			tavil_codec_hphdelay_lutbypass(codec, interp_idx,
+						       event);
+			tavil_codec_hd2_control(tavil, interp_idx, event);
+			tavil_codec_idle_detect_control(codec, interp_idx,
+							event);
+			/* Clk Disable */
+			snd_soc_update_bits(codec, main_reg, 0x20, 0x00);
+			/* Reset enable and disable */
+			snd_soc_update_bits(codec, main_reg, 0x40, 0x40);
+			snd_soc_update_bits(codec, main_reg, 0x40, 0x00);
+			/* Reset rate to 48K*/
+			snd_soc_update_bits(codec, main_reg, 0x0F, 0x04);
+		}
+	}
+
+	dev_dbg(codec->dev, "%s event %d main_clk_users %d\n",
+		__func__,  event, tavil->main_clk_users[interp_idx]);
+
+	return tavil->main_clk_users[interp_idx];
+}
+EXPORT_SYMBOL(tavil_codec_enable_interp_clk);
+
+static int tavil_anc_out_switch_cb(struct snd_soc_dapm_widget *w,
+				   struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	tavil_codec_enable_interp_clk(codec, event, w->shift);
+
+	return 0;
+}
+static int tavil_codec_set_idle_detect_thr(struct snd_soc_codec *codec,
+					   int interp, int path_type)
+{
+	int port_id[4] = { 0, 0, 0, 0 };
+	int *port_ptr, num_ports;
+	int bit_width = 0, i;
+	int mux_reg, mux_reg_val;
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	int dai_id, idle_thr;
+
+	if ((interp != INTERP_HPHL) && (interp != INTERP_HPHR))
+		return 0;
+
+	if (!tavil->idle_det_cfg.hph_idle_detect_en)
+		return 0;
+
+	port_ptr = &port_id[0];
+	num_ports = 0;
+
+	/*
+	 * Read interpolator MUX input registers and find
+	 * which slimbus port is connected and store the port
+	 * numbers in port_id array.
+	 */
+	if (path_type == INTERP_MIX_PATH) {
+		mux_reg = WCD934X_CDC_RX_INP_MUX_RX_INT1_CFG1 +
+			  2 * (interp - 1);
+		mux_reg_val = snd_soc_read(codec, mux_reg) & 0x0f;
+
+		if ((mux_reg_val >= INTn_2_INP_SEL_RX0) &&
+		   (mux_reg_val < INTn_2_INP_SEL_PROXIMITY)) {
+			*port_ptr++ = mux_reg_val +
+				      WCD934X_RX_PORT_START_NUMBER - 1;
+			num_ports++;
+		}
+	}
+
+	if (path_type == INTERP_MAIN_PATH) {
+		mux_reg = WCD934X_CDC_RX_INP_MUX_RX_INT1_CFG0 +
+			  2 * (interp - 1);
+		mux_reg_val = snd_soc_read(codec, mux_reg) & 0x0f;
+		i = WCD934X_INTERP_MUX_NUM_INPUTS;
+
+		while (i) {
+			if ((mux_reg_val >= INTn_1_INP_SEL_RX0) &&
+			    (mux_reg_val <= INTn_1_INP_SEL_RX7)) {
+				*port_ptr++ = mux_reg_val +
+					WCD934X_RX_PORT_START_NUMBER -
+					INTn_1_INP_SEL_RX0;
+				num_ports++;
+			}
+			mux_reg_val = (snd_soc_read(codec, mux_reg) &
+						    0xf0) >> 4;
+			mux_reg += 1;
+			i--;
+		}
+	}
+
+	dev_dbg(codec->dev, "%s: num_ports: %d, ports[%d %d %d %d]\n",
+		__func__, num_ports, port_id[0], port_id[1],
+		port_id[2], port_id[3]);
+
+	i = 0;
+	while (num_ports) {
+		dai_id = tavil_find_playback_dai_id_for_port(port_id[i++],
+							     tavil);
+
+		if ((dai_id >= 0) && (dai_id < NUM_CODEC_DAIS)) {
+			dev_dbg(codec->dev, "%s: dai_id: %d bit_width: %d\n",
+				__func__, dai_id,
+				tavil->dai[dai_id].bit_width);
+
+			if (tavil->dai[dai_id].bit_width > bit_width)
+				bit_width = tavil->dai[dai_id].bit_width;
+		}
+
+		num_ports--;
+	}
+
+	switch (bit_width) {
+	case 16:
+		idle_thr = 0xff; /* F16 */
+		break;
+	case 24:
+	case 32:
+		idle_thr = 0x03; /* F22 */
+		break;
+	default:
+		idle_thr = 0x00;
+		break;
+	}
+
+	dev_dbg(codec->dev, "%s: (new) idle_thr: %d, (cur) idle_thr: %d\n",
+		__func__, idle_thr, tavil->idle_det_cfg.hph_idle_thr);
+
+	if ((tavil->idle_det_cfg.hph_idle_thr == 0) ||
+	    (idle_thr < tavil->idle_det_cfg.hph_idle_thr)) {
+		snd_soc_write(codec, WCD934X_CDC_RX_IDLE_DET_CFG3, idle_thr);
+		tavil->idle_det_cfg.hph_idle_thr = idle_thr;
+	}
+
+	return 0;
+}
+
+static int tavil_codec_enable_mix_path(struct snd_soc_dapm_widget *w,
+				       struct snd_kcontrol *kcontrol,
+				       int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	u16 gain_reg, mix_reg;
+	int offset_val = 0;
+	int val = 0;
+
+	if (w->shift >= WCD934X_NUM_INTERPOLATORS ||
+	    w->shift == INTERP_LO3_NA || w->shift == INTERP_LO4_NA) {
+		dev_err(codec->dev, "%s: Invalid Interpolator value %d for name %s\n",
+			__func__, w->shift, w->name);
+		return -EINVAL;
+	};
+
+	gain_reg = WCD934X_CDC_RX0_RX_VOL_MIX_CTL +
+					(w->shift * WCD934X_RX_PATH_CTL_OFFSET);
+	mix_reg = WCD934X_CDC_RX0_RX_PATH_MIX_CTL +
+					(w->shift * WCD934X_RX_PATH_CTL_OFFSET);
+
+	if (w->shift == INTERP_SPKR1 ||  w->shift == INTERP_SPKR2)
+		__tavil_codec_enable_swr(w, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		tavil_codec_set_idle_detect_thr(codec, w->shift,
+						INTERP_MIX_PATH);
+		tavil_codec_enable_interp_clk(codec, event, w->shift);
+		/* Clk enable */
+		snd_soc_update_bits(codec, mix_reg, 0x20, 0x20);
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		if ((tavil->swr.spkr_gain_offset ==
+		     WCD934X_RX_GAIN_OFFSET_M1P5_DB) &&
+		    (tavil->comp_enabled[COMPANDER_7] ||
+		     tavil->comp_enabled[COMPANDER_8]) &&
+		    (gain_reg == WCD934X_CDC_RX7_RX_VOL_MIX_CTL ||
+		     gain_reg == WCD934X_CDC_RX8_RX_VOL_MIX_CTL)) {
+			snd_soc_update_bits(codec, WCD934X_CDC_RX7_RX_PATH_SEC1,
+					    0x01, 0x01);
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_RX7_RX_PATH_MIX_SEC0,
+					    0x01, 0x01);
+			snd_soc_update_bits(codec, WCD934X_CDC_RX8_RX_PATH_SEC1,
+					    0x01, 0x01);
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_RX8_RX_PATH_MIX_SEC0,
+					    0x01, 0x01);
+			offset_val = -2;
+		}
+		val = snd_soc_read(codec, gain_reg);
+		val += offset_val;
+		snd_soc_write(codec, gain_reg, val);
+		tavil_codec_config_ear_spkr_gain(codec, event, gain_reg);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		/* Clk Disable */
+		snd_soc_update_bits(codec, mix_reg, 0x20, 0x00);
+		tavil_codec_enable_interp_clk(codec, event, w->shift);
+		/* Reset enable and disable */
+		snd_soc_update_bits(codec, mix_reg, 0x40, 0x40);
+		snd_soc_update_bits(codec, mix_reg, 0x40, 0x00);
+
+		if ((tavil->swr.spkr_gain_offset ==
+		     WCD934X_RX_GAIN_OFFSET_M1P5_DB) &&
+		    (tavil->comp_enabled[COMPANDER_7] ||
+		     tavil->comp_enabled[COMPANDER_8]) &&
+		    (gain_reg == WCD934X_CDC_RX7_RX_VOL_MIX_CTL ||
+		     gain_reg == WCD934X_CDC_RX8_RX_VOL_MIX_CTL)) {
+			snd_soc_update_bits(codec, WCD934X_CDC_RX7_RX_PATH_SEC1,
+					    0x01, 0x00);
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_RX7_RX_PATH_MIX_SEC0,
+					    0x01, 0x00);
+			snd_soc_update_bits(codec, WCD934X_CDC_RX8_RX_PATH_SEC1,
+					    0x01, 0x00);
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_RX8_RX_PATH_MIX_SEC0,
+					    0x01, 0x00);
+			offset_val = 2;
+			val = snd_soc_read(codec, gain_reg);
+			val += offset_val;
+			snd_soc_write(codec, gain_reg, val);
+		}
+		tavil_codec_config_ear_spkr_gain(codec, event, gain_reg);
+		break;
+	};
+	dev_dbg(codec->dev, "%s event %d name %s\n", __func__, event, w->name);
+
+	return 0;
+}
+
+/**
+ * tavil_get_dsd_config - Get pointer to dsd config structure
+ *
+ * @codec: pointer to snd_soc_codec structure
+ *
+ * Returns pointer to tavil_dsd_config structure
+ */
+struct tavil_dsd_config *tavil_get_dsd_config(struct snd_soc_codec *codec)
+{
+	struct tavil_priv *tavil;
+
+	if (!codec)
+		return NULL;
+
+	tavil = snd_soc_codec_get_drvdata(codec);
+
+	if (!tavil)
+		return NULL;
+
+	return tavil->dsd_config;
+}
+EXPORT_SYMBOL(tavil_get_dsd_config);
+
+static int tavil_codec_enable_main_path(struct snd_soc_dapm_widget *w,
+					struct snd_kcontrol *kcontrol,
+					int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	u16 gain_reg;
+	u16 reg;
+	int val;
+	int offset_val = 0;
+
+	dev_dbg(codec->dev, "%s %d %s\n", __func__, event, w->name);
+
+	if (w->shift >= WCD934X_NUM_INTERPOLATORS ||
+	    w->shift == INTERP_LO3_NA || w->shift == INTERP_LO4_NA) {
+		dev_err(codec->dev, "%s: Invalid Interpolator value %d for name %s\n",
+			__func__, w->shift, w->name);
+		return -EINVAL;
+	};
+
+	reg = WCD934X_CDC_RX0_RX_PATH_CTL + (w->shift *
+					     WCD934X_RX_PATH_CTL_OFFSET);
+	gain_reg = WCD934X_CDC_RX0_RX_VOL_CTL + (w->shift *
+						 WCD934X_RX_PATH_CTL_OFFSET);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		tavil_codec_set_idle_detect_thr(codec, w->shift,
+						INTERP_MAIN_PATH);
+		tavil_codec_enable_interp_clk(codec, event, w->shift);
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		/* apply gain after int clk is enabled */
+		if ((tavil->swr.spkr_gain_offset ==
+					WCD934X_RX_GAIN_OFFSET_M1P5_DB) &&
+		    (tavil->comp_enabled[COMPANDER_7] ||
+		     tavil->comp_enabled[COMPANDER_8]) &&
+		    (gain_reg == WCD934X_CDC_RX7_RX_VOL_CTL ||
+		     gain_reg == WCD934X_CDC_RX8_RX_VOL_CTL)) {
+			snd_soc_update_bits(codec, WCD934X_CDC_RX7_RX_PATH_SEC1,
+					    0x01, 0x01);
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_RX7_RX_PATH_MIX_SEC0,
+					    0x01, 0x01);
+			snd_soc_update_bits(codec, WCD934X_CDC_RX8_RX_PATH_SEC1,
+					    0x01, 0x01);
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_RX8_RX_PATH_MIX_SEC0,
+					    0x01, 0x01);
+			offset_val = -2;
+		}
+		val = snd_soc_read(codec, gain_reg);
+		val += offset_val;
+		snd_soc_write(codec, gain_reg, val);
+		tavil_codec_config_ear_spkr_gain(codec, event, gain_reg);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		tavil_codec_enable_interp_clk(codec, event, w->shift);
+
+		if ((tavil->swr.spkr_gain_offset ==
+					WCD934X_RX_GAIN_OFFSET_M1P5_DB) &&
+		    (tavil->comp_enabled[COMPANDER_7] ||
+		     tavil->comp_enabled[COMPANDER_8]) &&
+		    (gain_reg == WCD934X_CDC_RX7_RX_VOL_CTL ||
+		     gain_reg == WCD934X_CDC_RX8_RX_VOL_CTL)) {
+			snd_soc_update_bits(codec, WCD934X_CDC_RX7_RX_PATH_SEC1,
+					    0x01, 0x00);
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_RX7_RX_PATH_MIX_SEC0,
+					    0x01, 0x00);
+			snd_soc_update_bits(codec, WCD934X_CDC_RX8_RX_PATH_SEC1,
+					    0x01, 0x00);
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_RX8_RX_PATH_MIX_SEC0,
+					    0x01, 0x00);
+			offset_val = 2;
+			val = snd_soc_read(codec, gain_reg);
+			val += offset_val;
+			snd_soc_write(codec, gain_reg, val);
+		}
+		tavil_codec_config_ear_spkr_gain(codec, event, gain_reg);
+		break;
+	};
+
+	return 0;
+}
+
+static int tavil_codec_set_iir_gain(struct snd_soc_dapm_widget *w,
+				    struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	dev_dbg(codec->dev, "%s: event = %d\n", __func__, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU: /* fall through */
+	case SND_SOC_DAPM_PRE_PMD:
+		if (strnstr(w->name, "IIR0", sizeof("IIR0"))) {
+			snd_soc_write(codec,
+				WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B1_CTL,
+			snd_soc_read(codec,
+				WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B1_CTL));
+			snd_soc_write(codec,
+				WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B2_CTL,
+			snd_soc_read(codec,
+				WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B2_CTL));
+			snd_soc_write(codec,
+				WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B3_CTL,
+			snd_soc_read(codec,
+				WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B3_CTL));
+			snd_soc_write(codec,
+				WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B4_CTL,
+			snd_soc_read(codec,
+				WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B4_CTL));
+		} else {
+			snd_soc_write(codec,
+				WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B1_CTL,
+			snd_soc_read(codec,
+				WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B1_CTL));
+			snd_soc_write(codec,
+				WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B2_CTL,
+			snd_soc_read(codec,
+				WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B2_CTL));
+			snd_soc_write(codec,
+				WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B3_CTL,
+			snd_soc_read(codec,
+				WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B3_CTL));
+		}
+		break;
+	}
+	return 0;
+}
+
+static int tavil_codec_find_amic_input(struct snd_soc_codec *codec,
+				       int adc_mux_n)
+{
+	u16 mask, shift, adc_mux_in_reg;
+	u16 amic_mux_sel_reg;
+	bool is_amic;
+
+	if (adc_mux_n < 0 || adc_mux_n > WCD934X_MAX_VALID_ADC_MUX ||
+	    adc_mux_n == WCD934X_INVALID_ADC_MUX)
+		return 0;
+
+	if (adc_mux_n < 3) {
+		adc_mux_in_reg = WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG1 +
+				 adc_mux_n;
+		mask = 0x03;
+		shift = 0;
+		amic_mux_sel_reg = WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG0 +
+				   2 * adc_mux_n;
+	} else if (adc_mux_n < 4) {
+		adc_mux_in_reg = WCD934X_CDC_TX_INP_MUX_ADC_MUX3_CFG1;
+		mask = 0x03;
+		shift = 0;
+		amic_mux_sel_reg = WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG0 +
+				   2 * adc_mux_n;
+	} else if (adc_mux_n < 7) {
+		adc_mux_in_reg = WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG1 +
+				 (adc_mux_n - 4);
+		mask = 0x0C;
+		shift = 2;
+		amic_mux_sel_reg = WCD934X_CDC_TX_INP_MUX_ADC_MUX4_CFG0 +
+				   adc_mux_n - 4;
+	} else if (adc_mux_n < 8) {
+		adc_mux_in_reg = WCD934X_CDC_TX_INP_MUX_ADC_MUX3_CFG1;
+		mask = 0x0C;
+		shift = 2;
+		amic_mux_sel_reg = WCD934X_CDC_TX_INP_MUX_ADC_MUX4_CFG0 +
+				   adc_mux_n - 4;
+	} else if (adc_mux_n < 12) {
+		adc_mux_in_reg = WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG1 +
+				 ((adc_mux_n == 8) ? (adc_mux_n - 8) :
+				  (adc_mux_n - 9));
+		mask = 0x30;
+		shift = 4;
+		amic_mux_sel_reg = WCD934X_CDC_TX_INP_MUX_ADC_MUX4_CFG0 +
+				   adc_mux_n - 4;
+	} else if (adc_mux_n < 13) {
+		adc_mux_in_reg = WCD934X_CDC_TX_INP_MUX_ADC_MUX3_CFG1;
+		mask = 0x30;
+		shift = 4;
+		amic_mux_sel_reg = WCD934X_CDC_TX_INP_MUX_ADC_MUX4_CFG0 +
+				   adc_mux_n - 4;
+	} else {
+		adc_mux_in_reg = WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG1;
+		mask = 0xC0;
+		shift = 6;
+		amic_mux_sel_reg = WCD934X_CDC_TX_INP_MUX_ADC_MUX4_CFG0 +
+				   adc_mux_n - 4;
+	}
+
+	is_amic = (((snd_soc_read(codec, adc_mux_in_reg) & mask) >> shift)
+		    == 1);
+	if (!is_amic)
+		return 0;
+
+	return snd_soc_read(codec, amic_mux_sel_reg) & 0x07;
+}
+
+static void tavil_codec_set_tx_hold(struct snd_soc_codec *codec,
+				    u16 amic_reg, bool set)
+{
+	u8 mask = 0x20;
+	u8 val;
+
+	if (amic_reg == WCD934X_ANA_AMIC1 ||
+	    amic_reg == WCD934X_ANA_AMIC3)
+		mask = 0x40;
+
+	val = set ? mask : 0x00;
+
+	switch (amic_reg) {
+	case WCD934X_ANA_AMIC1:
+	case WCD934X_ANA_AMIC2:
+		snd_soc_update_bits(codec, WCD934X_ANA_AMIC2, mask, val);
+		break;
+	case WCD934X_ANA_AMIC3:
+	case WCD934X_ANA_AMIC4:
+		snd_soc_update_bits(codec, WCD934X_ANA_AMIC4, mask, val);
+		break;
+	default:
+		dev_dbg(codec->dev, "%s: invalid amic: %d\n",
+			__func__, amic_reg);
+		break;
+	}
+}
+
+static int tavil_codec_tx_adc_cfg(struct snd_soc_dapm_widget *w,
+				  struct snd_kcontrol *kcontrol, int event)
+{
+	int adc_mux_n = w->shift;
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	int amic_n;
+
+	dev_dbg(codec->dev, "%s: event: %d\n", __func__, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		amic_n = tavil_codec_find_amic_input(codec, adc_mux_n);
+		if (amic_n) {
+			/*
+			 * Prevent ANC Rx pop by leaving Tx FE in HOLD
+			 * state until PA is up. Track AMIC being used
+			 * so we can release the HOLD later.
+			 */
+			set_bit(ANC_MIC_AMIC1 + amic_n - 1,
+				&tavil->status_mask);
+		}
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static u16 tavil_codec_get_amic_pwlvl_reg(struct snd_soc_codec *codec, int amic)
+{
+	u16 pwr_level_reg = 0;
+
+	switch (amic) {
+	case 1:
+	case 2:
+		pwr_level_reg = WCD934X_ANA_AMIC1;
+		break;
+
+	case 3:
+	case 4:
+		pwr_level_reg = WCD934X_ANA_AMIC3;
+		break;
+	default:
+		dev_dbg(codec->dev, "%s: invalid amic: %d\n",
+			__func__, amic);
+		break;
+	}
+
+	return pwr_level_reg;
+}
+
+#define  TX_HPF_CUT_OFF_FREQ_MASK 0x60
+#define  CF_MIN_3DB_4HZ     0x0
+#define  CF_MIN_3DB_75HZ    0x1
+#define  CF_MIN_3DB_150HZ   0x2
+
+static void tavil_tx_hpf_corner_freq_callback(struct work_struct *work)
+{
+	struct delayed_work *hpf_delayed_work;
+	struct hpf_work *hpf_work;
+	struct tavil_priv *tavil;
+	struct snd_soc_codec *codec;
+	u16 dec_cfg_reg, amic_reg, go_bit_reg;
+	u8 hpf_cut_off_freq;
+	int amic_n;
+
+	hpf_delayed_work = to_delayed_work(work);
+	hpf_work = container_of(hpf_delayed_work, struct hpf_work, dwork);
+	tavil = hpf_work->tavil;
+	codec = tavil->codec;
+	hpf_cut_off_freq = hpf_work->hpf_cut_off_freq;
+
+	dec_cfg_reg = WCD934X_CDC_TX0_TX_PATH_CFG0 + 16 * hpf_work->decimator;
+	go_bit_reg = dec_cfg_reg + 7;
+
+	dev_dbg(codec->dev, "%s: decimator %u hpf_cut_of_freq 0x%x\n",
+		__func__, hpf_work->decimator, hpf_cut_off_freq);
+
+	amic_n = tavil_codec_find_amic_input(codec, hpf_work->decimator);
+	if (amic_n) {
+		amic_reg = WCD934X_ANA_AMIC1 + amic_n - 1;
+		tavil_codec_set_tx_hold(codec, amic_reg, false);
+	}
+	snd_soc_update_bits(codec, dec_cfg_reg, TX_HPF_CUT_OFF_FREQ_MASK,
+			    hpf_cut_off_freq << 5);
+	snd_soc_update_bits(codec, go_bit_reg, 0x02, 0x02);
+	/* Minimum 1 clk cycle delay is required as per HW spec */
+	usleep_range(1000, 1010);
+	snd_soc_update_bits(codec, go_bit_reg, 0x02, 0x00);
+}
+
+static void tavil_tx_mute_update_callback(struct work_struct *work)
+{
+	struct tx_mute_work *tx_mute_dwork;
+	struct tavil_priv *tavil;
+	struct delayed_work *delayed_work;
+	struct snd_soc_codec *codec;
+	u16 tx_vol_ctl_reg, hpf_gate_reg;
+
+	delayed_work = to_delayed_work(work);
+	tx_mute_dwork = container_of(delayed_work, struct tx_mute_work, dwork);
+	tavil = tx_mute_dwork->tavil;
+	codec = tavil->codec;
+
+	tx_vol_ctl_reg = WCD934X_CDC_TX0_TX_PATH_CTL +
+			 16 * tx_mute_dwork->decimator;
+	hpf_gate_reg = WCD934X_CDC_TX0_TX_PATH_SEC2 +
+		       16 * tx_mute_dwork->decimator;
+	snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x10, 0x00);
+}
+
+static int tavil_codec_enable_rx_path_clk(struct snd_soc_dapm_widget *w,
+				  struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	u16 sidetone_reg;
+
+	dev_dbg(codec->dev, "%s %d %d\n", __func__, event, w->shift);
+	sidetone_reg = WCD934X_CDC_RX0_RX_PATH_CFG1 + 0x14*(w->shift);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		if (!strcmp(w->name, "RX INT7 MIX2 INP"))
+			__tavil_codec_enable_swr(w, event);
+		tavil_codec_enable_interp_clk(codec, event, w->shift);
+		snd_soc_update_bits(codec, sidetone_reg, 0x10, 0x10);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_update_bits(codec, sidetone_reg, 0x10, 0x00);
+		tavil_codec_enable_interp_clk(codec, event, w->shift);
+		if (!strcmp(w->name, "RX INT7 MIX2 INP"))
+			__tavil_codec_enable_swr(w, event);
+		break;
+	default:
+		break;
+	};
+	return 0;
+}
+
+static int tavil_codec_enable_dec(struct snd_soc_dapm_widget *w,
+				  struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	unsigned int decimator;
+	char *dec_adc_mux_name = NULL;
+	char *widget_name = NULL;
+	char *wname;
+	int ret = 0, amic_n;
+	u16 tx_vol_ctl_reg, pwr_level_reg = 0, dec_cfg_reg, hpf_gate_reg;
+	u16 tx_gain_ctl_reg;
+	char *dec;
+	u8 hpf_cut_off_freq;
+
+	dev_dbg(codec->dev, "%s %d\n", __func__, event);
+
+	widget_name = kstrndup(w->name, 15, GFP_KERNEL);
+	if (!widget_name)
+		return -ENOMEM;
+
+	wname = widget_name;
+	dec_adc_mux_name = strsep(&widget_name, " ");
+	if (!dec_adc_mux_name) {
+		dev_err(codec->dev, "%s: Invalid decimator = %s\n",
+			__func__, w->name);
+		ret =  -EINVAL;
+		goto out;
+	}
+	dec_adc_mux_name = widget_name;
+
+	dec = strpbrk(dec_adc_mux_name, "012345678");
+	if (!dec) {
+		dev_err(codec->dev, "%s: decimator index not found\n",
+			__func__);
+		ret =  -EINVAL;
+		goto out;
+	}
+
+	ret = kstrtouint(dec, 10, &decimator);
+	if (ret < 0) {
+		dev_err(codec->dev, "%s: Invalid decimator = %s\n",
+			__func__, wname);
+		ret =  -EINVAL;
+		goto out;
+	}
+
+	dev_dbg(codec->dev, "%s(): widget = %s decimator = %u\n", __func__,
+			w->name, decimator);
+
+	tx_vol_ctl_reg = WCD934X_CDC_TX0_TX_PATH_CTL + 16 * decimator;
+	hpf_gate_reg = WCD934X_CDC_TX0_TX_PATH_SEC2 + 16 * decimator;
+	dec_cfg_reg = WCD934X_CDC_TX0_TX_PATH_CFG0 + 16 * decimator;
+	tx_gain_ctl_reg = WCD934X_CDC_TX0_TX_VOL_CTL + 16 * decimator;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		amic_n = tavil_codec_find_amic_input(codec, decimator);
+		if (amic_n)
+			pwr_level_reg = tavil_codec_get_amic_pwlvl_reg(codec,
+								       amic_n);
+
+		if (pwr_level_reg) {
+			switch ((snd_soc_read(codec, pwr_level_reg) &
+					      WCD934X_AMIC_PWR_LVL_MASK) >>
+					      WCD934X_AMIC_PWR_LVL_SHIFT) {
+			case WCD934X_AMIC_PWR_LEVEL_LP:
+				snd_soc_update_bits(codec, dec_cfg_reg,
+						    WCD934X_DEC_PWR_LVL_MASK,
+						    WCD934X_DEC_PWR_LVL_LP);
+				break;
+
+			case WCD934X_AMIC_PWR_LEVEL_HP:
+				snd_soc_update_bits(codec, dec_cfg_reg,
+						    WCD934X_DEC_PWR_LVL_MASK,
+						    WCD934X_DEC_PWR_LVL_HP);
+				break;
+			case WCD934X_AMIC_PWR_LEVEL_DEFAULT:
+			default:
+				snd_soc_update_bits(codec, dec_cfg_reg,
+						    WCD934X_DEC_PWR_LVL_MASK,
+						    WCD934X_DEC_PWR_LVL_DF);
+				break;
+			}
+		}
+		/* Enable TX PGA Mute */
+		snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x10, 0x10);
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		hpf_cut_off_freq = (snd_soc_read(codec, dec_cfg_reg) &
+				   TX_HPF_CUT_OFF_FREQ_MASK) >> 5;
+
+		tavil->tx_hpf_work[decimator].hpf_cut_off_freq =
+							hpf_cut_off_freq;
+		if (hpf_cut_off_freq != CF_MIN_3DB_150HZ) {
+			snd_soc_update_bits(codec, dec_cfg_reg,
+					    TX_HPF_CUT_OFF_FREQ_MASK,
+					    CF_MIN_3DB_150HZ << 5);
+			snd_soc_update_bits(codec, hpf_gate_reg, 0x02, 0x02);
+			/*
+			 * Minimum 1 clk cycle delay is required as per
+			 * HW spec.
+			 */
+			usleep_range(1000, 1010);
+			snd_soc_update_bits(codec, hpf_gate_reg, 0x02, 0x00);
+		}
+		/* schedule work queue to Remove Mute */
+		schedule_delayed_work(&tavil->tx_mute_dwork[decimator].dwork,
+				      msecs_to_jiffies(tx_unmute_delay));
+		if (tavil->tx_hpf_work[decimator].hpf_cut_off_freq !=
+							CF_MIN_3DB_150HZ)
+			schedule_delayed_work(
+					&tavil->tx_hpf_work[decimator].dwork,
+					msecs_to_jiffies(300));
+		/* apply gain after decimator is enabled */
+		snd_soc_write(codec, tx_gain_ctl_reg,
+			      snd_soc_read(codec, tx_gain_ctl_reg));
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		hpf_cut_off_freq =
+			tavil->tx_hpf_work[decimator].hpf_cut_off_freq;
+		snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x10, 0x10);
+		if (cancel_delayed_work_sync(
+		    &tavil->tx_hpf_work[decimator].dwork)) {
+			if (hpf_cut_off_freq != CF_MIN_3DB_150HZ) {
+				snd_soc_update_bits(codec, dec_cfg_reg,
+						    TX_HPF_CUT_OFF_FREQ_MASK,
+						    hpf_cut_off_freq << 5);
+				snd_soc_update_bits(codec, hpf_gate_reg,
+						    0x02, 0x02);
+				/*
+				 * Minimum 1 clk cycle delay is required as per
+				 * HW spec.
+				 */
+				usleep_range(1000, 1010);
+				snd_soc_update_bits(codec, hpf_gate_reg,
+						    0x02, 0x00);
+			}
+		}
+		cancel_delayed_work_sync(
+				&tavil->tx_mute_dwork[decimator].dwork);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x10, 0x00);
+		snd_soc_update_bits(codec, dec_cfg_reg,
+				    WCD934X_DEC_PWR_LVL_MASK,
+				    WCD934X_DEC_PWR_LVL_DF);
+		break;
+	};
+out:
+	kfree(wname);
+	return ret;
+}
+
+static u32 tavil_get_dmic_sample_rate(struct snd_soc_codec *codec,
+				      unsigned int dmic,
+				      struct wcd9xxx_pdata *pdata)
+{
+	u8 tx_stream_fs;
+	u8 adc_mux_index = 0, adc_mux_sel = 0;
+	bool dec_found = false;
+	u16 adc_mux_ctl_reg, tx_fs_reg;
+	u32 dmic_fs;
+
+	while (dec_found == 0 && adc_mux_index < WCD934X_MAX_VALID_ADC_MUX) {
+		if (adc_mux_index < 4) {
+			adc_mux_ctl_reg = WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG0 +
+						(adc_mux_index * 2);
+		} else if (adc_mux_index < WCD934X_INVALID_ADC_MUX) {
+			adc_mux_ctl_reg = WCD934X_CDC_TX_INP_MUX_ADC_MUX4_CFG0 +
+						adc_mux_index - 4;
+		} else if (adc_mux_index == WCD934X_INVALID_ADC_MUX) {
+			++adc_mux_index;
+			continue;
+		}
+		adc_mux_sel = ((snd_soc_read(codec, adc_mux_ctl_reg) &
+					0xF8) >> 3) - 1;
+
+		if (adc_mux_sel == dmic) {
+			dec_found = true;
+			break;
+		}
+
+		++adc_mux_index;
+	}
+
+	if (dec_found && adc_mux_index <= 8) {
+		tx_fs_reg = WCD934X_CDC_TX0_TX_PATH_CTL + (16 * adc_mux_index);
+		tx_stream_fs = snd_soc_read(codec, tx_fs_reg) & 0x0F;
+		if (tx_stream_fs <= 4)  {
+			if (pdata->dmic_sample_rate <=
+					WCD9XXX_DMIC_SAMPLE_RATE_2P4MHZ)
+				dmic_fs = pdata->dmic_sample_rate;
+			else
+				dmic_fs = WCD9XXX_DMIC_SAMPLE_RATE_2P4MHZ;
+		} else
+			dmic_fs = WCD9XXX_DMIC_SAMPLE_RATE_4P8MHZ;
+	} else {
+		dmic_fs = pdata->dmic_sample_rate;
+	}
+
+	return dmic_fs;
+}
+
+static u8 tavil_get_dmic_clk_val(struct snd_soc_codec *codec,
+				 u32 mclk_rate, u32 dmic_clk_rate)
+{
+	u32 div_factor;
+	u8 dmic_ctl_val;
+
+	dev_dbg(codec->dev,
+		"%s: mclk_rate = %d, dmic_sample_rate = %d\n",
+		__func__, mclk_rate, dmic_clk_rate);
+
+	/* Default value to return in case of error */
+	if (mclk_rate == WCD934X_MCLK_CLK_9P6MHZ)
+		dmic_ctl_val = WCD934X_DMIC_CLK_DIV_2;
+	else
+		dmic_ctl_val = WCD934X_DMIC_CLK_DIV_3;
+
+	if (dmic_clk_rate == 0) {
+		dev_err(codec->dev,
+			"%s: dmic_sample_rate cannot be 0\n",
+			__func__);
+		goto done;
+	}
+
+	div_factor = mclk_rate / dmic_clk_rate;
+	switch (div_factor) {
+	case 2:
+		dmic_ctl_val = WCD934X_DMIC_CLK_DIV_2;
+		break;
+	case 3:
+		dmic_ctl_val = WCD934X_DMIC_CLK_DIV_3;
+		break;
+	case 4:
+		dmic_ctl_val = WCD934X_DMIC_CLK_DIV_4;
+		break;
+	case 6:
+		dmic_ctl_val = WCD934X_DMIC_CLK_DIV_6;
+		break;
+	case 8:
+		dmic_ctl_val = WCD934X_DMIC_CLK_DIV_8;
+		break;
+	case 16:
+		dmic_ctl_val = WCD934X_DMIC_CLK_DIV_16;
+		break;
+	default:
+		dev_err(codec->dev,
+			"%s: Invalid div_factor %u, clk_rate(%u), dmic_rate(%u)\n",
+			__func__, div_factor, mclk_rate, dmic_clk_rate);
+		break;
+	}
+
+done:
+	return dmic_ctl_val;
+}
+
+static int tavil_codec_enable_adc(struct snd_soc_dapm_widget *w,
+				  struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	dev_dbg(codec->dev, "%s: event:%d\n", __func__, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		tavil_codec_set_tx_hold(codec, w->reg, true);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int tavil_codec_enable_dmic(struct snd_soc_dapm_widget *w,
+				   struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	struct wcd9xxx_pdata *pdata = dev_get_platdata(codec->dev->parent);
+	u8  dmic_clk_en = 0x01;
+	u16 dmic_clk_reg;
+	s32 *dmic_clk_cnt;
+	u8 dmic_rate_val, dmic_rate_shift = 1;
+	unsigned int dmic;
+	u32 dmic_sample_rate;
+	int ret;
+	char *wname;
+
+	wname = strpbrk(w->name, "012345");
+	if (!wname) {
+		dev_err(codec->dev, "%s: widget not found\n", __func__);
+		return -EINVAL;
+	}
+
+	ret = kstrtouint(wname, 10, &dmic);
+	if (ret < 0) {
+		dev_err(codec->dev, "%s: Invalid DMIC line on the codec\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	switch (dmic) {
+	case 0:
+	case 1:
+		dmic_clk_cnt = &(tavil->dmic_0_1_clk_cnt);
+		dmic_clk_reg = WCD934X_CPE_SS_DMIC0_CTL;
+		break;
+	case 2:
+	case 3:
+		dmic_clk_cnt = &(tavil->dmic_2_3_clk_cnt);
+		dmic_clk_reg = WCD934X_CPE_SS_DMIC1_CTL;
+		break;
+	case 4:
+	case 5:
+		dmic_clk_cnt = &(tavil->dmic_4_5_clk_cnt);
+		dmic_clk_reg = WCD934X_CPE_SS_DMIC2_CTL;
+		break;
+	default:
+		dev_err(codec->dev, "%s: Invalid DMIC Selection\n",
+			__func__);
+		return -EINVAL;
+	};
+	dev_dbg(codec->dev, "%s: event %d DMIC%d dmic_clk_cnt %d\n",
+			__func__, event,  dmic, *dmic_clk_cnt);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		dmic_sample_rate = tavil_get_dmic_sample_rate(codec, dmic,
+							      pdata);
+		dmic_rate_val =
+			tavil_get_dmic_clk_val(codec,
+					       pdata->mclk_rate,
+					       dmic_sample_rate);
+
+		(*dmic_clk_cnt)++;
+		if (*dmic_clk_cnt == 1) {
+			snd_soc_update_bits(codec, dmic_clk_reg,
+					    0x07 << dmic_rate_shift,
+					    dmic_rate_val << dmic_rate_shift);
+			snd_soc_update_bits(codec, dmic_clk_reg,
+					    dmic_clk_en, dmic_clk_en);
+		}
+
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		dmic_rate_val =
+			tavil_get_dmic_clk_val(codec,
+					       pdata->mclk_rate,
+					       pdata->mad_dmic_sample_rate);
+		(*dmic_clk_cnt)--;
+		if (*dmic_clk_cnt  == 0) {
+			snd_soc_update_bits(codec, dmic_clk_reg,
+					    dmic_clk_en, 0);
+			snd_soc_update_bits(codec, dmic_clk_reg,
+					    0x07 << dmic_rate_shift,
+					    dmic_rate_val << dmic_rate_shift);
+		}
+		break;
+	};
+
+	return 0;
+}
+
+/*
+ * tavil_mbhc_micb_adjust_voltage: adjust specific micbias voltage
+ * @codec: handle to snd_soc_codec *
+ * @req_volt: micbias voltage to be set
+ * @micb_num: micbias to be set, e.g. micbias1 or micbias2
+ *
+ * return 0 if adjustment is success or error code in case of failure
+ */
+int tavil_mbhc_micb_adjust_voltage(struct snd_soc_codec *codec,
+				   int req_volt, int micb_num)
+{
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	int cur_vout_ctl, req_vout_ctl;
+	int micb_reg, micb_val, micb_en;
+	int ret = 0;
+
+	switch (micb_num) {
+	case MIC_BIAS_1:
+		micb_reg = WCD934X_ANA_MICB1;
+		break;
+	case MIC_BIAS_2:
+		micb_reg = WCD934X_ANA_MICB2;
+		break;
+	case MIC_BIAS_3:
+		micb_reg = WCD934X_ANA_MICB3;
+		break;
+	case MIC_BIAS_4:
+		micb_reg = WCD934X_ANA_MICB4;
+		break;
+	default:
+		return -EINVAL;
+	}
+	mutex_lock(&tavil->micb_lock);
+
+	/*
+	 * If requested micbias voltage is same as current micbias
+	 * voltage, then just return. Otherwise, adjust voltage as
+	 * per requested value. If micbias is already enabled, then
+	 * to avoid slow micbias ramp-up or down enable pull-up
+	 * momentarily, change the micbias value and then re-enable
+	 * micbias.
+	 */
+	micb_val = snd_soc_read(codec, micb_reg);
+	micb_en = (micb_val & 0xC0) >> 6;
+	cur_vout_ctl = micb_val & 0x3F;
+
+	req_vout_ctl = wcd934x_get_micb_vout_ctl_val(req_volt);
+	if (IS_ERR_VALUE(req_vout_ctl)) {
+		ret = -EINVAL;
+		goto exit;
+	}
+	if (cur_vout_ctl == req_vout_ctl) {
+		ret = 0;
+		goto exit;
+	}
+
+	dev_dbg(codec->dev, "%s: micb_num: %d, cur_mv: %d, req_mv: %d, micb_en: %d\n",
+		 __func__, micb_num, WCD_VOUT_CTL_TO_MICB(cur_vout_ctl),
+		 req_volt, micb_en);
+
+	if (micb_en == 0x1)
+		snd_soc_update_bits(codec, micb_reg, 0xC0, 0x80);
+
+	snd_soc_update_bits(codec, micb_reg, 0x3F, req_vout_ctl);
+
+	if (micb_en == 0x1) {
+		snd_soc_update_bits(codec, micb_reg, 0xC0, 0x40);
+		/*
+		 * Add 2ms delay as per HW requirement after enabling
+		 * micbias
+		 */
+		usleep_range(2000, 2100);
+	}
+exit:
+	mutex_unlock(&tavil->micb_lock);
+	return ret;
+}
+EXPORT_SYMBOL(tavil_mbhc_micb_adjust_voltage);
+
+/*
+ * tavil_micbias_control: enable/disable micbias
+ * @codec: handle to snd_soc_codec *
+ * @micb_num: micbias to be enabled/disabled, e.g. micbias1 or micbias2
+ * @req: control requested, enable/disable or pullup enable/disable
+ * @is_dapm: triggered by dapm or not
+ *
+ * return 0 if control is success or error code in case of failure
+ */
+int tavil_micbias_control(struct snd_soc_codec *codec,
+			  int micb_num, int req, bool is_dapm)
+{
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	int micb_index = micb_num - 1;
+	u16 micb_reg;
+	int pre_off_event = 0, post_off_event = 0;
+	int post_on_event = 0, post_dapm_off = 0;
+	int post_dapm_on = 0;
+
+	if ((micb_index < 0) || (micb_index > TAVIL_MAX_MICBIAS - 1)) {
+		dev_err(codec->dev, "%s: Invalid micbias index, micb_ind:%d\n",
+			__func__, micb_index);
+		return -EINVAL;
+	}
+
+	switch (micb_num) {
+	case MIC_BIAS_1:
+		micb_reg = WCD934X_ANA_MICB1;
+		break;
+	case MIC_BIAS_2:
+		micb_reg = WCD934X_ANA_MICB2;
+		pre_off_event = WCD_EVENT_PRE_MICBIAS_2_OFF;
+		post_off_event = WCD_EVENT_POST_MICBIAS_2_OFF;
+		post_on_event = WCD_EVENT_POST_MICBIAS_2_ON;
+		post_dapm_on = WCD_EVENT_POST_DAPM_MICBIAS_2_ON;
+		post_dapm_off = WCD_EVENT_POST_DAPM_MICBIAS_2_OFF;
+		break;
+	case MIC_BIAS_3:
+		micb_reg = WCD934X_ANA_MICB3;
+		break;
+	case MIC_BIAS_4:
+		micb_reg = WCD934X_ANA_MICB4;
+		break;
+	default:
+		dev_err(codec->dev, "%s: Invalid micbias number: %d\n",
+			__func__, micb_num);
+		return -EINVAL;
+	}
+	mutex_lock(&tavil->micb_lock);
+
+	switch (req) {
+	case MICB_PULLUP_ENABLE:
+		tavil->pullup_ref[micb_index]++;
+		if ((tavil->pullup_ref[micb_index] == 1) &&
+		    (tavil->micb_ref[micb_index] == 0))
+			snd_soc_update_bits(codec, micb_reg, 0xC0, 0x80);
+		break;
+	case MICB_PULLUP_DISABLE:
+		if (tavil->pullup_ref[micb_index] > 0)
+			tavil->pullup_ref[micb_index]--;
+		if ((tavil->pullup_ref[micb_index] == 0) &&
+		    (tavil->micb_ref[micb_index] == 0))
+			snd_soc_update_bits(codec, micb_reg, 0xC0, 0x00);
+		break;
+	case MICB_ENABLE:
+		tavil->micb_ref[micb_index]++;
+		if (tavil->micb_ref[micb_index] == 1) {
+			snd_soc_update_bits(codec, micb_reg, 0xC0, 0x40);
+			if (post_on_event && tavil->mbhc)
+				blocking_notifier_call_chain(
+						&tavil->mbhc->notifier,
+						post_on_event,
+						&tavil->mbhc->wcd_mbhc);
+		}
+		if (is_dapm && post_dapm_on && tavil->mbhc)
+			blocking_notifier_call_chain(&tavil->mbhc->notifier,
+					post_dapm_on, &tavil->mbhc->wcd_mbhc);
+		break;
+	case MICB_DISABLE:
+		if (tavil->micb_ref[micb_index] > 0)
+			tavil->micb_ref[micb_index]--;
+		if ((tavil->micb_ref[micb_index] == 0) &&
+		    (tavil->pullup_ref[micb_index] > 0))
+			snd_soc_update_bits(codec, micb_reg, 0xC0, 0x80);
+		else if ((tavil->micb_ref[micb_index] == 0) &&
+			 (tavil->pullup_ref[micb_index] == 0)) {
+			if (pre_off_event && tavil->mbhc)
+				blocking_notifier_call_chain(
+						&tavil->mbhc->notifier,
+						pre_off_event,
+						&tavil->mbhc->wcd_mbhc);
+			snd_soc_update_bits(codec, micb_reg, 0xC0, 0x00);
+			if (post_off_event && tavil->mbhc)
+				blocking_notifier_call_chain(
+						&tavil->mbhc->notifier,
+						post_off_event,
+						&tavil->mbhc->wcd_mbhc);
+		}
+		if (is_dapm && post_dapm_off && tavil->mbhc)
+			blocking_notifier_call_chain(&tavil->mbhc->notifier,
+					post_dapm_off, &tavil->mbhc->wcd_mbhc);
+		break;
+	};
+
+	dev_dbg(codec->dev, "%s: micb_num:%d, micb_ref: %d, pullup_ref: %d\n",
+		__func__, micb_num, tavil->micb_ref[micb_index],
+		tavil->pullup_ref[micb_index]);
+
+	mutex_unlock(&tavil->micb_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(tavil_micbias_control);
+
+static int __tavil_codec_enable_micbias(struct snd_soc_dapm_widget *w,
+					int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	int micb_num;
+
+	dev_dbg(codec->dev, "%s: wname: %s, event: %d\n",
+		__func__, w->name, event);
+
+	if (strnstr(w->name, "MIC BIAS1", sizeof("MIC BIAS1")))
+		micb_num = MIC_BIAS_1;
+	else if (strnstr(w->name, "MIC BIAS2", sizeof("MIC BIAS2")))
+		micb_num = MIC_BIAS_2;
+	else if (strnstr(w->name, "MIC BIAS3", sizeof("MIC BIAS3")))
+		micb_num = MIC_BIAS_3;
+	else if (strnstr(w->name, "MIC BIAS4", sizeof("MIC BIAS4")))
+		micb_num = MIC_BIAS_4;
+	else
+		return -EINVAL;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		/*
+		 * MIC BIAS can also be requested by MBHC,
+		 * so use ref count to handle micbias pullup
+		 * and enable requests
+		 */
+		tavil_micbias_control(codec, micb_num, MICB_ENABLE, true);
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		/* wait for cnp time */
+		usleep_range(1000, 1100);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		tavil_micbias_control(codec, micb_num, MICB_DISABLE, true);
+		break;
+	};
+
+	return 0;
+}
+
+/*
+ * tavil_codec_enable_standalone_micbias - enable micbias standalone
+ * @codec: pointer to codec instance
+ * @micb_num: number of micbias to be enabled
+ * @enable: true to enable micbias or false to disable
+ *
+ * This function is used to enable micbias (1, 2, 3 or 4) during
+ * standalone independent of whether TX use-case is running or not
+ *
+ * Return: error code in case of failure or 0 for success
+ */
+int tavil_codec_enable_standalone_micbias(struct snd_soc_codec *codec,
+					  int micb_num,
+					  bool enable)
+{
+	const char * const micb_names[] = {
+		DAPM_MICBIAS1_STANDALONE, DAPM_MICBIAS2_STANDALONE,
+		DAPM_MICBIAS3_STANDALONE, DAPM_MICBIAS4_STANDALONE
+	};
+	int micb_index = micb_num - 1;
+	int rc;
+
+	if (!codec) {
+		pr_err("%s: Codec memory is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	if ((micb_index < 0) || (micb_index > TAVIL_MAX_MICBIAS - 1)) {
+		dev_err(codec->dev, "%s: Invalid micbias index, micb_ind:%d\n",
+			__func__, micb_index);
+		return -EINVAL;
+	}
+
+	if (enable)
+		rc = snd_soc_dapm_force_enable_pin(
+						snd_soc_codec_get_dapm(codec),
+						micb_names[micb_index]);
+	else
+		rc = snd_soc_dapm_disable_pin(snd_soc_codec_get_dapm(codec),
+					      micb_names[micb_index]);
+
+	if (!rc)
+		snd_soc_dapm_sync(snd_soc_codec_get_dapm(codec));
+	else
+		dev_err(codec->dev, "%s: micbias%d force %s pin failed\n",
+			__func__, micb_num, (enable ? "enable" : "disable"));
+
+	return rc;
+}
+EXPORT_SYMBOL(tavil_codec_enable_standalone_micbias);
+
+static int tavil_codec_force_enable_micbias(struct snd_soc_dapm_widget *w,
+					    struct snd_kcontrol *kcontrol,
+					    int event)
+{
+	int ret = 0;
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		wcd_resmgr_enable_master_bias(tavil->resmgr);
+		tavil_cdc_mclk_enable(codec, true);
+		ret = __tavil_codec_enable_micbias(w, SND_SOC_DAPM_PRE_PMU);
+		/* Wait for 1ms for better cnp */
+		usleep_range(1000, 1100);
+		tavil_cdc_mclk_enable(codec, false);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		ret = __tavil_codec_enable_micbias(w, SND_SOC_DAPM_POST_PMD);
+		wcd_resmgr_disable_master_bias(tavil->resmgr);
+		break;
+	}
+
+	return ret;
+}
+
+static int tavil_codec_enable_micbias(struct snd_soc_dapm_widget *w,
+				      struct snd_kcontrol *kcontrol, int event)
+{
+	return __tavil_codec_enable_micbias(w, event);
+}
+
+
+static const struct reg_sequence tavil_hph_reset_tbl[] = {
+	{ WCD934X_HPH_CNP_EN, 0x80 },
+	{ WCD934X_HPH_CNP_WG_CTL, 0x9A },
+	{ WCD934X_HPH_CNP_WG_TIME, 0x14 },
+	{ WCD934X_HPH_OCP_CTL, 0x28 },
+	{ WCD934X_HPH_AUTO_CHOP, 0x16 },
+	{ WCD934X_HPH_CHOP_CTL, 0x83 },
+	{ WCD934X_HPH_PA_CTL1, 0x46 },
+	{ WCD934X_HPH_PA_CTL2, 0x50 },
+	{ WCD934X_HPH_L_EN, 0x80 },
+	{ WCD934X_HPH_L_TEST, 0xE0 },
+	{ WCD934X_HPH_L_ATEST, 0x50 },
+	{ WCD934X_HPH_R_EN, 0x80 },
+	{ WCD934X_HPH_R_TEST, 0xE0 },
+	{ WCD934X_HPH_R_ATEST, 0x54 },
+	{ WCD934X_HPH_RDAC_CLK_CTL1, 0x99 },
+	{ WCD934X_HPH_RDAC_CLK_CTL2, 0x9B },
+	{ WCD934X_HPH_RDAC_LDO_CTL, 0x33 },
+	{ WCD934X_HPH_RDAC_CHOP_CLK_LP_CTL, 0x00 },
+	{ WCD934X_HPH_REFBUFF_UHQA_CTL, 0xA8 },
+};
+
+static const struct reg_sequence tavil_hph_reset_tbl_1_0[] = {
+	{ WCD934X_HPH_REFBUFF_LP_CTL, 0x0A },
+	{ WCD934X_HPH_L_DAC_CTL, 0x00 },
+	{ WCD934X_HPH_R_DAC_CTL, 0x00 },
+	{ WCD934X_HPH_NEW_ANA_HPH2, 0x00 },
+	{ WCD934X_HPH_NEW_ANA_HPH3, 0x00 },
+	{ WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL, 0x00 },
+	{ WCD934X_HPH_NEW_INT_RDAC_HD2_CTL, 0xA0 },
+	{ WCD934X_HPH_NEW_INT_RDAC_VREF_CTL, 0x10 },
+	{ WCD934X_HPH_NEW_INT_RDAC_OVERRIDE_CTL, 0x00 },
+	{ WCD934X_HPH_NEW_INT_RDAC_MISC1, 0x00 },
+	{ WCD934X_HPH_NEW_INT_PA_MISC1, 0x22 },
+	{ WCD934X_HPH_NEW_INT_PA_MISC2, 0x00 },
+	{ WCD934X_HPH_NEW_INT_PA_RDAC_MISC, 0x00 },
+	{ WCD934X_HPH_NEW_INT_HPH_TIMER1, 0xFE },
+	{ WCD934X_HPH_NEW_INT_HPH_TIMER2, 0x2 },
+	{ WCD934X_HPH_NEW_INT_HPH_TIMER3, 0x4e},
+	{ WCD934X_HPH_NEW_INT_HPH_TIMER4, 0x54 },
+	{ WCD934X_HPH_NEW_INT_PA_RDAC_MISC2, 0x00 },
+	{ WCD934X_HPH_NEW_INT_PA_RDAC_MISC3, 0x00 },
+};
+
+static const struct reg_sequence tavil_hph_reset_tbl_1_1[] = {
+	{ WCD934X_HPH_REFBUFF_LP_CTL, 0x0E },
+	{ WCD934X_HPH_L_DAC_CTL, 0x00 },
+	{ WCD934X_HPH_R_DAC_CTL, 0x00 },
+	{ WCD934X_HPH_NEW_ANA_HPH2, 0x00 },
+	{ WCD934X_HPH_NEW_ANA_HPH3, 0x00 },
+	{ WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL, 0x40 },
+	{ WCD934X_HPH_NEW_INT_RDAC_HD2_CTL, 0x81 },
+	{ WCD934X_HPH_NEW_INT_RDAC_VREF_CTL, 0x10 },
+	{ WCD934X_HPH_NEW_INT_RDAC_OVERRIDE_CTL, 0x00 },
+	{ WCD934X_HPH_NEW_INT_RDAC_MISC1, 0x81 },
+	{ WCD934X_HPH_NEW_INT_PA_MISC1, 0x22 },
+	{ WCD934X_HPH_NEW_INT_PA_MISC2, 0x00 },
+	{ WCD934X_HPH_NEW_INT_PA_RDAC_MISC, 0x00 },
+	{ WCD934X_HPH_NEW_INT_HPH_TIMER1, 0xFE },
+	{ WCD934X_HPH_NEW_INT_HPH_TIMER2, 0x2 },
+	{ WCD934X_HPH_NEW_INT_HPH_TIMER3, 0x4e},
+	{ WCD934X_HPH_NEW_INT_HPH_TIMER4, 0x54 },
+	{ WCD934X_HPH_NEW_INT_PA_RDAC_MISC2, 0x00 },
+	{ WCD934X_HPH_NEW_INT_PA_RDAC_MISC3, 0x00 },
+};
+
+static const struct tavil_reg_mask_val tavil_pa_disable[] = {
+	{ WCD934X_CDC_RX1_RX_PATH_CTL, 0x30, 0x10 }, /* RX1 mute enable */
+	{ WCD934X_CDC_RX2_RX_PATH_CTL, 0x30, 0x10 }, /* RX2 mute enable */
+	{ WCD934X_HPH_CNP_WG_CTL, 0x80, 0x00 }, /* GM3 boost disable */
+	{ WCD934X_ANA_HPH, 0x80, 0x00 }, /* HPHL PA disable */
+	{ WCD934X_ANA_HPH, 0x40, 0x00 }, /* HPHR PA disable */
+	{ WCD934X_ANA_HPH, 0x20, 0x00 }, /* HPHL REF dsable */
+	{ WCD934X_ANA_HPH, 0x10, 0x00 }, /* HPHR REF disable */
+};
+
+static const struct tavil_reg_mask_val tavil_ocp_en_seq[] = {
+	{ WCD934X_RX_OCP_CTL, 0x0F, 0x02 }, /* OCP number of attempts is 2 */
+	{ WCD934X_HPH_OCP_CTL, 0xFA, 0x3A }, /* OCP current limit */
+	{ WCD934X_HPH_L_TEST, 0x01, 0x01 }, /* Enable HPHL OCP */
+	{ WCD934X_HPH_R_TEST, 0x01, 0x01 }, /* Enable HPHR OCP */
+};
+
+static const struct tavil_reg_mask_val tavil_ocp_en_seq_1[] = {
+	{ WCD934X_RX_OCP_CTL, 0x0F, 0x02 }, /* OCP number of attempts is 2 */
+	{ WCD934X_HPH_OCP_CTL, 0xFA, 0x3A }, /* OCP current limit */
+};
+
+/* LO-HIFI */
+static const struct tavil_reg_mask_val tavil_pre_pa_en_lohifi[] = {
+	{ WCD934X_HPH_NEW_INT_HPH_TIMER1, 0x02, 0x00 },
+	{ WCD934X_FLYBACK_VNEG_CTRL_4, 0xf0, 0x80 },
+	{ WCD934X_HPH_NEW_INT_PA_MISC2, 0x20, 0x20 },
+	{ WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL, 0xf0, 0x40 },
+	{ WCD934X_HPH_CNP_WG_CTL, 0x80, 0x00 },
+	{ WCD934X_RX_BIAS_HPH_LOWPOWER, 0xf0, 0xc0 },
+	{ WCD934X_HPH_PA_CTL1, 0x0e, 0x02 },
+	{ WCD934X_HPH_REFBUFF_LP_CTL, 0x06, 0x06 },
+};
+
+static const struct tavil_reg_mask_val tavil_pre_pa_en[] = {
+	{ WCD934X_HPH_NEW_INT_HPH_TIMER1, 0x02, 0x00 },
+	{ WCD934X_HPH_NEW_INT_PA_MISC2, 0x20, 0x0 },
+	{ WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL, 0xf0, 0x40 },
+	{ WCD934X_HPH_CNP_WG_CTL, 0x80, 0x00 },
+	{ WCD934X_RX_BIAS_HPH_LOWPOWER, 0xf0, 0x80 },
+	{ WCD934X_HPH_PA_CTL1, 0x0e, 0x06 },
+	{ WCD934X_HPH_REFBUFF_LP_CTL, 0x06, 0x06 },
+};
+
+static const struct tavil_reg_mask_val tavil_post_pa_en[] = {
+	{ WCD934X_HPH_L_TEST, 0x01, 0x01 }, /* Enable HPHL OCP */
+	{ WCD934X_HPH_R_TEST, 0x01, 0x01 }, /* Enable HPHR OCP */
+	{ WCD934X_CDC_RX1_RX_PATH_CTL, 0x30, 0x20 }, /* RX1 mute disable */
+	{ WCD934X_CDC_RX2_RX_PATH_CTL, 0x30, 0x20 }, /* RX2 mute disable */
+	{ WCD934X_HPH_CNP_WG_CTL, 0x80, 0x80 }, /* GM3 boost enable */
+	{ WCD934X_HPH_NEW_INT_HPH_TIMER1, 0x02, 0x02 },
+};
+
+static void tavil_codec_hph_reg_range_read(struct regmap *map, u8 *buf)
+{
+	regmap_bulk_read(map, WCD934X_HPH_CNP_EN, buf, TAVIL_HPH_REG_RANGE_1);
+	regmap_bulk_read(map, WCD934X_HPH_NEW_ANA_HPH2,
+			 buf + TAVIL_HPH_REG_RANGE_1, TAVIL_HPH_REG_RANGE_2);
+	regmap_bulk_read(map, WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL,
+			 buf + TAVIL_HPH_REG_RANGE_1 + TAVIL_HPH_REG_RANGE_2,
+			 TAVIL_HPH_REG_RANGE_3);
+}
+
+static void tavil_codec_hph_reg_recover(struct tavil_priv *tavil,
+					struct regmap *map, int pa_status)
+{
+	int i;
+	unsigned int reg;
+
+	blocking_notifier_call_chain(&tavil->mbhc->notifier,
+				     WCD_EVENT_OCP_OFF,
+				     &tavil->mbhc->wcd_mbhc);
+
+	if (pa_status & 0xC0)
+		goto pa_en_restore;
+
+	dev_dbg(tavil->dev, "%s: HPH PA in disable state (0x%x)\n",
+		__func__, pa_status);
+
+	regmap_write_bits(map, WCD934X_CDC_RX1_RX_PATH_CTL, 0x10, 0x10);
+	regmap_write_bits(map, WCD934X_CDC_RX2_RX_PATH_CTL, 0x10, 0x10);
+	regmap_write_bits(map, WCD934X_ANA_HPH, 0xC0, 0x00);
+	regmap_write_bits(map, WCD934X_ANA_HPH, 0x30, 0x00);
+	regmap_write_bits(map, WCD934X_CDC_RX1_RX_PATH_CTL, 0x10, 0x00);
+	regmap_write_bits(map, WCD934X_CDC_RX2_RX_PATH_CTL, 0x10, 0x00);
+
+	/* Restore to HW defaults */
+	regmap_multi_reg_write(map, tavil_hph_reset_tbl,
+			       ARRAY_SIZE(tavil_hph_reset_tbl));
+	if (TAVIL_IS_1_1(tavil->wcd9xxx))
+		regmap_multi_reg_write(map, tavil_hph_reset_tbl_1_1,
+				ARRAY_SIZE(tavil_hph_reset_tbl_1_1));
+	if (TAVIL_IS_1_0(tavil->wcd9xxx))
+		regmap_multi_reg_write(map, tavil_hph_reset_tbl_1_0,
+				ARRAY_SIZE(tavil_hph_reset_tbl_1_0));
+
+	for (i = 0; i < ARRAY_SIZE(tavil_ocp_en_seq); i++)
+		regmap_write_bits(map, tavil_ocp_en_seq[i].reg,
+				  tavil_ocp_en_seq[i].mask,
+				  tavil_ocp_en_seq[i].val);
+	goto end;
+
+
+pa_en_restore:
+	dev_dbg(tavil->dev, "%s: HPH PA in enable state (0x%x)\n",
+		__func__, pa_status);
+
+	/* Disable PA and other registers before restoring */
+	for (i = 0; i < ARRAY_SIZE(tavil_pa_disable); i++) {
+		if (TAVIL_IS_1_1(tavil->wcd9xxx) &&
+		    (tavil_pa_disable[i].reg == WCD934X_HPH_CNP_WG_CTL))
+			continue;
+		regmap_write_bits(map, tavil_pa_disable[i].reg,
+				  tavil_pa_disable[i].mask,
+				  tavil_pa_disable[i].val);
+	}
+
+	regmap_multi_reg_write(map, tavil_hph_reset_tbl,
+			       ARRAY_SIZE(tavil_hph_reset_tbl));
+	if (TAVIL_IS_1_1(tavil->wcd9xxx))
+		regmap_multi_reg_write(map, tavil_hph_reset_tbl_1_1,
+				ARRAY_SIZE(tavil_hph_reset_tbl_1_1));
+	if (TAVIL_IS_1_0(tavil->wcd9xxx))
+		regmap_multi_reg_write(map, tavil_hph_reset_tbl_1_0,
+				ARRAY_SIZE(tavil_hph_reset_tbl_1_0));
+
+	for (i = 0; i < ARRAY_SIZE(tavil_ocp_en_seq_1); i++)
+		regmap_write_bits(map, tavil_ocp_en_seq_1[i].reg,
+				  tavil_ocp_en_seq_1[i].mask,
+				  tavil_ocp_en_seq_1[i].val);
+
+	if (tavil->hph_mode == CLS_H_LOHIFI) {
+		for (i = 0; i < ARRAY_SIZE(tavil_pre_pa_en_lohifi); i++) {
+			reg = tavil_pre_pa_en_lohifi[i].reg;
+			if ((TAVIL_IS_1_1(tavil->wcd9xxx)) &&
+			    ((reg == WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL) ||
+			     (reg == WCD934X_HPH_CNP_WG_CTL) ||
+			     (reg == WCD934X_HPH_REFBUFF_LP_CTL)))
+				continue;
+			regmap_write_bits(map,
+					  tavil_pre_pa_en_lohifi[i].reg,
+					  tavil_pre_pa_en_lohifi[i].mask,
+					  tavil_pre_pa_en_lohifi[i].val);
+		}
+	} else {
+		for (i = 0; i < ARRAY_SIZE(tavil_pre_pa_en); i++) {
+			reg = tavil_pre_pa_en[i].reg;
+			if ((TAVIL_IS_1_1(tavil->wcd9xxx)) &&
+			    ((reg == WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL) ||
+			     (reg == WCD934X_HPH_CNP_WG_CTL) ||
+			     (reg == WCD934X_HPH_REFBUFF_LP_CTL)))
+				continue;
+			regmap_write_bits(map, tavil_pre_pa_en[i].reg,
+					  tavil_pre_pa_en[i].mask,
+					  tavil_pre_pa_en[i].val);
+		}
+	}
+
+	if (TAVIL_IS_1_1(tavil->wcd9xxx)) {
+		regmap_write(map, WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_L, 0x84);
+		regmap_write(map, WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_R, 0x84);
+	}
+
+	regmap_write_bits(map, WCD934X_ANA_HPH, 0x0C, pa_status & 0x0C);
+	regmap_write_bits(map, WCD934X_ANA_HPH, 0x30, 0x30);
+	/* wait for 100usec after HPH DAC is enabled */
+	usleep_range(100, 110);
+	regmap_write(map, WCD934X_ANA_HPH, pa_status);
+	/* Sleep for 7msec after PA is enabled */
+	usleep_range(7000, 7100);
+
+	for (i = 0; i < ARRAY_SIZE(tavil_post_pa_en); i++) {
+		if ((TAVIL_IS_1_1(tavil->wcd9xxx)) &&
+		    (tavil_post_pa_en[i].reg == WCD934X_HPH_CNP_WG_CTL))
+			continue;
+		regmap_write_bits(map, tavil_post_pa_en[i].reg,
+				  tavil_post_pa_en[i].mask,
+				  tavil_post_pa_en[i].val);
+	}
+
+end:
+	tavil->mbhc->is_hph_recover = true;
+	blocking_notifier_call_chain(
+			&tavil->mbhc->notifier,
+			WCD_EVENT_OCP_ON,
+			&tavil->mbhc->wcd_mbhc);
+}
+
+static int tavil_codec_reset_hph_registers(struct snd_soc_dapm_widget *w,
+					   struct snd_kcontrol *kcontrol,
+					   int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+	u8 cache_val[TAVIL_HPH_TOTAL_REG];
+	u8 hw_val[TAVIL_HPH_TOTAL_REG];
+	int pa_status;
+	int ret;
+
+	dev_dbg(wcd9xxx->dev, "%s: event: %d\n", __func__, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		memset(cache_val, 0, TAVIL_HPH_TOTAL_REG);
+		memset(hw_val, 0, TAVIL_HPH_TOTAL_REG);
+
+		regmap_read(wcd9xxx->regmap, WCD934X_ANA_HPH, &pa_status);
+
+		tavil_codec_hph_reg_range_read(wcd9xxx->regmap, cache_val);
+
+		/* Read register values from HW directly */
+		regcache_cache_bypass(wcd9xxx->regmap, true);
+		tavil_codec_hph_reg_range_read(wcd9xxx->regmap, hw_val);
+		regcache_cache_bypass(wcd9xxx->regmap, false);
+
+		/* compare both the registers to know if there is corruption */
+		ret = memcmp(cache_val, hw_val, TAVIL_HPH_TOTAL_REG);
+
+		/* If both the values are same, it means no corruption */
+		if (ret) {
+			dev_dbg(codec->dev, "%s: cache and hw reg are not same\n",
+				__func__);
+			tavil_codec_hph_reg_recover(tavil, wcd9xxx->regmap,
+						    pa_status);
+		} else {
+			dev_dbg(codec->dev, "%s: cache and hw reg are same\n",
+				__func__);
+			tavil->mbhc->is_hph_recover = false;
+		}
+		break;
+	default:
+		break;
+	};
+
+	return 0;
+}
+
+static int tavil_iir_enable_audio_mixer_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	int iir_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->reg;
+	int band_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->shift;
+	/* IIR filter band registers are at integer multiples of 16 */
+	u16 iir_reg = WCD934X_CDC_SIDETONE_IIR0_IIR_CTL + 16 * iir_idx;
+
+	ucontrol->value.integer.value[0] = (snd_soc_read(codec, iir_reg) &
+					    (1 << band_idx)) != 0;
+
+	dev_dbg(codec->dev, "%s: IIR #%d band #%d enable %d\n", __func__,
+		iir_idx, band_idx,
+		(uint32_t)ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int tavil_iir_enable_audio_mixer_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	int iir_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->reg;
+	int band_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->shift;
+	bool iir_band_en_status;
+	int value = ucontrol->value.integer.value[0];
+	u16 iir_reg = WCD934X_CDC_SIDETONE_IIR0_IIR_CTL + 16 * iir_idx;
+
+	/* Mask first 5 bits, 6-8 are reserved */
+	snd_soc_update_bits(codec, iir_reg, (1 << band_idx),
+			    (value << band_idx));
+
+	iir_band_en_status = ((snd_soc_read(codec, iir_reg) &
+			      (1 << band_idx)) != 0);
+	dev_dbg(codec->dev, "%s: IIR #%d band #%d enable %d\n", __func__,
+		iir_idx, band_idx, iir_band_en_status);
+	return 0;
+}
+
+static uint32_t get_iir_band_coeff(struct snd_soc_codec *codec,
+				   int iir_idx, int band_idx,
+				   int coeff_idx)
+{
+	uint32_t value = 0;
+
+	/* Address does not automatically update if reading */
+	snd_soc_write(codec,
+		(WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B1_CTL + 16 * iir_idx),
+		((band_idx * BAND_MAX + coeff_idx)
+		* sizeof(uint32_t)) & 0x7F);
+
+	value |= snd_soc_read(codec,
+		(WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B2_CTL + 16 * iir_idx));
+
+	snd_soc_write(codec,
+		(WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B1_CTL + 16 * iir_idx),
+		((band_idx * BAND_MAX + coeff_idx)
+		* sizeof(uint32_t) + 1) & 0x7F);
+
+	value |= (snd_soc_read(codec,
+			       (WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B2_CTL +
+				16 * iir_idx)) << 8);
+
+	snd_soc_write(codec,
+		(WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B1_CTL + 16 * iir_idx),
+		((band_idx * BAND_MAX + coeff_idx)
+		* sizeof(uint32_t) + 2) & 0x7F);
+
+	value |= (snd_soc_read(codec,
+			       (WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B2_CTL +
+				16 * iir_idx)) << 16);
+
+	snd_soc_write(codec,
+		(WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B1_CTL + 16 * iir_idx),
+		((band_idx * BAND_MAX + coeff_idx)
+		* sizeof(uint32_t) + 3) & 0x7F);
+
+	/* Mask bits top 2 bits since they are reserved */
+	value |= ((snd_soc_read(codec,
+				(WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B2_CTL +
+				 16 * iir_idx)) & 0x3F) << 24);
+
+	return value;
+}
+
+static int tavil_iir_band_audio_mixer_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	int iir_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->reg;
+	int band_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->shift;
+
+	ucontrol->value.integer.value[0] =
+		get_iir_band_coeff(codec, iir_idx, band_idx, 0);
+	ucontrol->value.integer.value[1] =
+		get_iir_band_coeff(codec, iir_idx, band_idx, 1);
+	ucontrol->value.integer.value[2] =
+		get_iir_band_coeff(codec, iir_idx, band_idx, 2);
+	ucontrol->value.integer.value[3] =
+		get_iir_band_coeff(codec, iir_idx, band_idx, 3);
+	ucontrol->value.integer.value[4] =
+		get_iir_band_coeff(codec, iir_idx, band_idx, 4);
+
+	dev_dbg(codec->dev, "%s: IIR #%d band #%d b0 = 0x%x\n"
+		"%s: IIR #%d band #%d b1 = 0x%x\n"
+		"%s: IIR #%d band #%d b2 = 0x%x\n"
+		"%s: IIR #%d band #%d a1 = 0x%x\n"
+		"%s: IIR #%d band #%d a2 = 0x%x\n",
+		__func__, iir_idx, band_idx,
+		(uint32_t)ucontrol->value.integer.value[0],
+		__func__, iir_idx, band_idx,
+		(uint32_t)ucontrol->value.integer.value[1],
+		__func__, iir_idx, band_idx,
+		(uint32_t)ucontrol->value.integer.value[2],
+		__func__, iir_idx, band_idx,
+		(uint32_t)ucontrol->value.integer.value[3],
+		__func__, iir_idx, band_idx,
+		(uint32_t)ucontrol->value.integer.value[4]);
+	return 0;
+}
+
+static void set_iir_band_coeff(struct snd_soc_codec *codec,
+			       int iir_idx, int band_idx,
+			       uint32_t value)
+{
+	snd_soc_write(codec,
+		(WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B2_CTL + 16 * iir_idx),
+		(value & 0xFF));
+
+	snd_soc_write(codec,
+		(WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B2_CTL + 16 * iir_idx),
+		(value >> 8) & 0xFF);
+
+	snd_soc_write(codec,
+		(WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B2_CTL + 16 * iir_idx),
+		(value >> 16) & 0xFF);
+
+	/* Mask top 2 bits, 7-8 are reserved */
+	snd_soc_write(codec,
+		(WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B2_CTL + 16 * iir_idx),
+		(value >> 24) & 0x3F);
+}
+
+static int tavil_iir_band_audio_mixer_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	int iir_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->reg;
+	int band_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->shift;
+	int coeff_idx;
+
+	/*
+	 * Mask top bit it is reserved
+	 * Updates addr automatically for each B2 write
+	 */
+	snd_soc_write(codec,
+		(WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B1_CTL + 16 * iir_idx),
+		(band_idx * BAND_MAX * sizeof(uint32_t)) & 0x7F);
+
+	/* Store the coefficients in sidetone coeff array */
+	for (coeff_idx = 0; coeff_idx < WCD934X_CDC_SIDETONE_IIR_COEFF_MAX;
+		coeff_idx++) {
+		tavil->sidetone_coeff_array[iir_idx][band_idx][coeff_idx] =
+			ucontrol->value.integer.value[coeff_idx];
+		set_iir_band_coeff(codec, iir_idx, band_idx,
+			tavil->sidetone_coeff_array[iir_idx][band_idx]
+							[coeff_idx]);
+	}
+
+	pr_debug("%s: IIR #%d band #%d b0 = 0x%x\n"
+		"%s: IIR #%d band #%d b1 = 0x%x\n"
+		"%s: IIR #%d band #%d b2 = 0x%x\n"
+		"%s: IIR #%d band #%d a1 = 0x%x\n"
+		"%s: IIR #%d band #%d a2 = 0x%x\n",
+		__func__, iir_idx, band_idx,
+		get_iir_band_coeff(codec, iir_idx, band_idx, 0),
+		__func__, iir_idx, band_idx,
+		get_iir_band_coeff(codec, iir_idx, band_idx, 1),
+		__func__, iir_idx, band_idx,
+		get_iir_band_coeff(codec, iir_idx, band_idx, 2),
+		__func__, iir_idx, band_idx,
+		get_iir_band_coeff(codec, iir_idx, band_idx, 3),
+		__func__, iir_idx, band_idx,
+		get_iir_band_coeff(codec, iir_idx, band_idx, 4));
+	return 0;
+}
+
+static void tavil_restore_iir_coeff(struct tavil_priv *tavil, int iir_idx)
+{
+	int band_idx = 0, coeff_idx = 0;
+	struct snd_soc_codec *codec = tavil->codec;
+
+	for (band_idx = 0; band_idx < BAND_MAX; band_idx++) {
+		snd_soc_write(codec,
+		(WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B1_CTL + 16 * iir_idx),
+		(band_idx * BAND_MAX * sizeof(uint32_t)) & 0x7F);
+
+		for (coeff_idx = 0;
+			coeff_idx < WCD934X_CDC_SIDETONE_IIR_COEFF_MAX;
+			coeff_idx++) {
+			set_iir_band_coeff(codec, iir_idx, band_idx,
+				tavil->sidetone_coeff_array[iir_idx][band_idx]
+								[coeff_idx]);
+		}
+	}
+}
+
+static int tavil_compander_get(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	int comp = ((struct soc_multi_mixer_control *)
+		    kcontrol->private_value)->shift;
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.integer.value[0] = tavil->comp_enabled[comp];
+	return 0;
+}
+
+static int tavil_compander_put(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	int comp = ((struct soc_multi_mixer_control *)
+		    kcontrol->private_value)->shift;
+	int value = ucontrol->value.integer.value[0];
+
+	dev_dbg(codec->dev, "%s: Compander %d enable current %d, new %d\n",
+		 __func__, comp + 1, tavil->comp_enabled[comp], value);
+	tavil->comp_enabled[comp] = value;
+
+	/* Any specific register configuration for compander */
+	switch (comp) {
+	case COMPANDER_1:
+		/* Set Gain Source Select based on compander enable/disable */
+		snd_soc_update_bits(codec, WCD934X_HPH_L_EN, 0x20,
+				(value ? 0x00:0x20));
+		break;
+	case COMPANDER_2:
+		snd_soc_update_bits(codec, WCD934X_HPH_R_EN, 0x20,
+				(value ? 0x00:0x20));
+		break;
+	case COMPANDER_3:
+	case COMPANDER_4:
+	case COMPANDER_7:
+	case COMPANDER_8:
+		break;
+	default:
+		/*
+		 * if compander is not enabled for any interpolator,
+		 * it does not cause any audio failure, so do not
+		 * return error in this case, but just print a log
+		 */
+		dev_warn(codec->dev, "%s: unknown compander: %d\n",
+			__func__, comp);
+	};
+	return 0;
+}
+
+static int tavil_hph_asrc_mode_put(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	int index = -EINVAL;
+
+	if (!strcmp(kcontrol->id.name, "ASRC0 Output Mode"))
+		index = ASRC0;
+	if (!strcmp(kcontrol->id.name, "ASRC1 Output Mode"))
+		index = ASRC1;
+
+	if (tavil && (index >= 0) && (index < ASRC_MAX))
+		tavil->asrc_output_mode[index] =
+			ucontrol->value.integer.value[0];
+
+	return 0;
+}
+
+static int tavil_hph_asrc_mode_get(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	int val = 0;
+	int index = -EINVAL;
+
+	if (!strcmp(kcontrol->id.name, "ASRC0 Output Mode"))
+		index = ASRC0;
+	if (!strcmp(kcontrol->id.name, "ASRC1 Output Mode"))
+		index = ASRC1;
+
+	if (tavil && (index >= 0) && (index < ASRC_MAX))
+		val = tavil->asrc_output_mode[index];
+
+	ucontrol->value.integer.value[0] = val;
+
+	return 0;
+}
+
+static int tavil_hph_idle_detect_get(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	int val = 0;
+
+	if (tavil)
+		val = tavil->idle_det_cfg.hph_idle_detect_en;
+
+	ucontrol->value.integer.value[0] = val;
+
+	return 0;
+}
+
+static int tavil_hph_idle_detect_put(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+
+	if (tavil)
+		tavil->idle_det_cfg.hph_idle_detect_en =
+			ucontrol->value.integer.value[0];
+
+	return 0;
+}
+
+static int tavil_dmic_pin_mode_get(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	u16 dmic_pin;
+	u8 reg_val, pinctl_position;
+
+	pinctl_position = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->shift;
+
+	dmic_pin = pinctl_position & 0x07;
+	reg_val = snd_soc_read(codec,
+			WCD934X_TLMM_DMIC1_CLK_PINCFG + dmic_pin - 1);
+
+	ucontrol->value.integer.value[0] = !!reg_val;
+
+	return 0;
+}
+
+static int tavil_dmic_pin_mode_put(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	u16 ctl_reg, cfg_reg, dmic_pin;
+	u8 ctl_val, cfg_val, pinctl_position, pinctl_mode, mask;
+
+	/* 0- high or low; 1- high Z */
+	pinctl_mode = ucontrol->value.integer.value[0];
+	pinctl_position = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->shift;
+
+	switch (pinctl_position >> 3) {
+	case 0:
+		ctl_reg = WCD934X_TEST_DEBUG_PIN_CTL_OE_0;
+		break;
+	case 1:
+		ctl_reg = WCD934X_TEST_DEBUG_PIN_CTL_OE_1;
+		break;
+	case 2:
+		ctl_reg = WCD934X_TEST_DEBUG_PIN_CTL_OE_2;
+		break;
+	case 3:
+		ctl_reg = WCD934X_TEST_DEBUG_PIN_CTL_OE_3;
+		break;
+	default:
+		dev_err(codec->dev, "%s: Invalid pinctl position = %d\n",
+			__func__, pinctl_position);
+		return -EINVAL;
+	}
+
+	ctl_val = ~(pinctl_mode << (pinctl_position & 0x07));
+	mask = 1 << (pinctl_position & 0x07);
+	snd_soc_update_bits(codec, ctl_reg, mask, ctl_val);
+
+	dmic_pin = pinctl_position & 0x07;
+	cfg_reg = WCD934X_TLMM_DMIC1_CLK_PINCFG + dmic_pin - 1;
+	if (pinctl_mode) {
+		if (tavil->intf_type == WCD9XXX_INTERFACE_TYPE_SLIMBUS)
+			cfg_val = 0x6;
+		else
+			cfg_val = 0xD;
+	} else
+		cfg_val = 0;
+	snd_soc_update_bits(codec, cfg_reg, 0x1F, cfg_val);
+
+	dev_dbg(codec->dev, "%s: reg=0x%x mask=0x%x val=%d reg=0x%x val=%d\n",
+			__func__, ctl_reg, mask, ctl_val, cfg_reg, cfg_val);
+
+	return 0;
+}
+
+static int tavil_amic_pwr_lvl_get(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	u16 amic_reg = 0;
+
+	if (!strcmp(kcontrol->id.name, "AMIC_1_2 PWR MODE"))
+		amic_reg = WCD934X_ANA_AMIC1;
+	if (!strcmp(kcontrol->id.name, "AMIC_3_4 PWR MODE"))
+		amic_reg = WCD934X_ANA_AMIC3;
+
+	if (amic_reg)
+		ucontrol->value.integer.value[0] =
+			(snd_soc_read(codec, amic_reg) &
+			 WCD934X_AMIC_PWR_LVL_MASK) >>
+			  WCD934X_AMIC_PWR_LVL_SHIFT;
+	return 0;
+}
+
+static int tavil_amic_pwr_lvl_put(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	u32 mode_val;
+	u16 amic_reg = 0;
+
+	mode_val = ucontrol->value.enumerated.item[0];
+
+	dev_dbg(codec->dev, "%s: mode: %d\n", __func__, mode_val);
+
+	if (!strcmp(kcontrol->id.name, "AMIC_1_2 PWR MODE"))
+		amic_reg = WCD934X_ANA_AMIC1;
+	if (!strcmp(kcontrol->id.name, "AMIC_3_4 PWR MODE"))
+		amic_reg = WCD934X_ANA_AMIC3;
+
+	if (amic_reg)
+		snd_soc_update_bits(codec, amic_reg, WCD934X_AMIC_PWR_LVL_MASK,
+				    mode_val << WCD934X_AMIC_PWR_LVL_SHIFT);
+	return 0;
+}
+
+static const char *const tavil_conn_mad_text[] = {
+	"NOTUSED1", "ADC1", "ADC2", "ADC3", "ADC4", "NOTUSED5",
+	"NOTUSED6", "NOTUSED2", "DMIC0", "DMIC1", "DMIC2", "DMIC3",
+	"DMIC4", "DMIC5", "NOTUSED3", "NOTUSED4"
+};
+
+static const struct soc_enum tavil_conn_mad_enum =
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(tavil_conn_mad_text),
+			    tavil_conn_mad_text);
+
+static int tavil_mad_input_get(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	u8 tavil_mad_input;
+
+	tavil_mad_input = snd_soc_read(codec, WCD934X_SOC_MAD_INP_SEL) & 0x0F;
+	ucontrol->value.integer.value[0] = tavil_mad_input;
+
+	dev_dbg(codec->dev, "%s: tavil_mad_input = %s\n", __func__,
+		tavil_conn_mad_text[tavil_mad_input]);
+
+	return 0;
+}
+
+static int tavil_mad_input_put(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct snd_soc_card *card = codec->component.card;
+	u8 tavil_mad_input;
+	char mad_amic_input_widget[6];
+	const char *mad_input_widget;
+	const char *source_widget = NULL;
+	u32 adc, i, mic_bias_found = 0;
+	int ret = 0;
+	char *mad_input;
+	bool is_adc_input = false;
+
+	tavil_mad_input = ucontrol->value.integer.value[0];
+
+	if (tavil_mad_input >= sizeof(tavil_conn_mad_text)/
+	    sizeof(tavil_conn_mad_text[0])) {
+		dev_err(codec->dev,
+			"%s: tavil_mad_input = %d out of bounds\n",
+			__func__, tavil_mad_input);
+		return -EINVAL;
+	}
+
+	if (strnstr(tavil_conn_mad_text[tavil_mad_input], "NOTUSED",
+				sizeof("NOTUSED"))) {
+		dev_dbg(codec->dev,
+			"%s: Unsupported tavil_mad_input = %s\n",
+			__func__, tavil_conn_mad_text[tavil_mad_input]);
+		/* Make sure the MAD register is updated */
+		snd_soc_update_bits(codec, WCD934X_ANA_MAD_SETUP,
+				    0x88, 0x00);
+		return -EINVAL;
+	}
+
+	if (strnstr(tavil_conn_mad_text[tavil_mad_input],
+		    "ADC", sizeof("ADC"))) {
+		mad_input = strpbrk(tavil_conn_mad_text[tavil_mad_input],
+				    "1234");
+		if (!mad_input) {
+			dev_err(codec->dev, "%s: Invalid MAD input %s\n",
+				__func__, tavil_conn_mad_text[tavil_mad_input]);
+			return -EINVAL;
+		}
+
+		ret = kstrtouint(mad_input, 10, &adc);
+		if ((ret < 0) || (adc > 4)) {
+			dev_err(codec->dev, "%s: Invalid ADC = %s\n", __func__,
+				tavil_conn_mad_text[tavil_mad_input]);
+			return -EINVAL;
+		}
+
+		/*AMIC4 and AMIC5 share ADC4*/
+		if ((adc == 4) &&
+		    (snd_soc_read(codec, WCD934X_TX_NEW_AMIC_4_5_SEL) & 0x10))
+			adc = 5;
+
+		snprintf(mad_amic_input_widget, 6, "%s%u", "AMIC", adc);
+
+		mad_input_widget = mad_amic_input_widget;
+		is_adc_input = true;
+	} else {
+		/* DMIC type input widget*/
+		mad_input_widget = tavil_conn_mad_text[tavil_mad_input];
+	}
+
+	dev_dbg(codec->dev,
+		"%s: tavil input widget = %s, adc_input = %s\n", __func__,
+		mad_input_widget, is_adc_input ? "true" : "false");
+
+	for (i = 0; i < card->num_of_dapm_routes; i++) {
+		if (!strcmp(card->of_dapm_routes[i].sink, mad_input_widget)) {
+			source_widget = card->of_dapm_routes[i].source;
+			if (!source_widget) {
+				dev_err(codec->dev,
+					"%s: invalid source widget\n",
+					__func__);
+				return -EINVAL;
+			}
+
+			if (strnstr(source_widget,
+				"MIC BIAS1", sizeof("MIC BIAS1"))) {
+				mic_bias_found = 1;
+				break;
+			} else if (strnstr(source_widget,
+				"MIC BIAS2", sizeof("MIC BIAS2"))) {
+				mic_bias_found = 2;
+				break;
+			} else if (strnstr(source_widget,
+				"MIC BIAS3", sizeof("MIC BIAS3"))) {
+				mic_bias_found = 3;
+				break;
+			} else if (strnstr(source_widget,
+				"MIC BIAS4", sizeof("MIC BIAS4"))) {
+				mic_bias_found = 4;
+				break;
+			}
+		}
+	}
+
+	if (!mic_bias_found) {
+		dev_err(codec->dev, "%s: mic bias not found for input %s\n",
+			__func__, mad_input_widget);
+		return -EINVAL;
+	}
+
+	dev_dbg(codec->dev, "%s: mic_bias found = %d\n", __func__,
+		mic_bias_found);
+
+	snd_soc_update_bits(codec, WCD934X_SOC_MAD_INP_SEL,
+			    0x0F, tavil_mad_input);
+	snd_soc_update_bits(codec, WCD934X_ANA_MAD_SETUP,
+			    0x07, mic_bias_found);
+	/* for all adc inputs, mad should be in micbias mode with BG enabled */
+	if (is_adc_input)
+		snd_soc_update_bits(codec, WCD934X_ANA_MAD_SETUP,
+				    0x88, 0x88);
+	else
+		snd_soc_update_bits(codec, WCD934X_ANA_MAD_SETUP,
+				    0x88, 0x00);
+	return 0;
+}
+
+static int tavil_ear_pa_gain_get(struct snd_kcontrol *kcontrol,
+				 struct snd_ctl_elem_value *ucontrol)
+{
+	u8 ear_pa_gain;
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+
+	ear_pa_gain = snd_soc_read(codec, WCD934X_ANA_EAR);
+
+	ear_pa_gain = (ear_pa_gain & 0x70) >> 4;
+
+	ucontrol->value.integer.value[0] = ear_pa_gain;
+
+	dev_dbg(codec->dev, "%s: ear_pa_gain = 0x%x\n", __func__,
+		ear_pa_gain);
+
+	return 0;
+}
+
+static int tavil_ear_pa_gain_put(struct snd_kcontrol *kcontrol,
+				 struct snd_ctl_elem_value *ucontrol)
+{
+	u8 ear_pa_gain;
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+
+	dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0]  = %ld\n",
+			__func__, ucontrol->value.integer.value[0]);
+
+	ear_pa_gain =  ucontrol->value.integer.value[0] << 4;
+
+	snd_soc_update_bits(codec, WCD934X_ANA_EAR, 0x70, ear_pa_gain);
+	return 0;
+}
+
+static int tavil_ear_spkr_pa_gain_get(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.integer.value[0] = tavil->ear_spkr_gain;
+
+	dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
+		__func__, ucontrol->value.integer.value[0]);
+
+	return 0;
+}
+
+static int tavil_ear_spkr_pa_gain_put(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+
+	tavil->ear_spkr_gain =  ucontrol->value.integer.value[0];
+
+	dev_dbg(codec->dev, "%s: gain = %d\n", __func__, tavil->ear_spkr_gain);
+
+	return 0;
+}
+
+static int tavil_rx_hph_mode_get(struct snd_kcontrol *kcontrol,
+				 struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.integer.value[0] = tavil->hph_mode;
+	return 0;
+}
+
+static int tavil_rx_hph_mode_put(struct snd_kcontrol *kcontrol,
+				 struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	u32 mode_val;
+
+	mode_val = ucontrol->value.enumerated.item[0];
+
+	dev_dbg(codec->dev, "%s: mode: %d\n", __func__, mode_val);
+
+	if (mode_val == 0) {
+		dev_warn(codec->dev, "%s:Invalid HPH Mode, default to Cls-H LOHiFi\n",
+			__func__);
+		mode_val = CLS_H_LOHIFI;
+	}
+	tavil->hph_mode = mode_val;
+	return 0;
+}
+
+static const char * const rx_hph_mode_mux_text[] = {
+	"CLS_H_INVALID", "CLS_H_HIFI", "CLS_H_LP", "CLS_AB", "CLS_H_LOHIFI",
+	"CLS_H_ULP", "CLS_AB_HIFI",
+};
+
+static const struct soc_enum rx_hph_mode_mux_enum =
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(rx_hph_mode_mux_text),
+			    rx_hph_mode_mux_text);
+
+static const char *const tavil_anc_func_text[] = {"OFF", "ON"};
+static const struct soc_enum tavil_anc_func_enum =
+	SOC_ENUM_SINGLE_EXT(2, tavil_anc_func_text);
+
+/* Cutoff frequency for high pass filter */
+static const char * const cf_text[] = {
+	"CF_NEG_3DB_4HZ", "CF_NEG_3DB_75HZ", "CF_NEG_3DB_150HZ"
+};
+
+static const char * const rx_cf_text[] = {
+	"CF_NEG_3DB_4HZ", "CF_NEG_3DB_75HZ", "CF_NEG_3DB_150HZ",
+	"CF_NEG_3DB_0P48HZ"
+};
+
+static const char * const amic_pwr_lvl_text[] = {
+	"LOW_PWR", "DEFAULT", "HIGH_PERF"
+};
+
+static const char * const hph_idle_detect_text[] = {
+	"OFF", "ON"
+};
+
+static const char * const asrc_mode_text[] = {
+	"INT", "FRAC"
+};
+
+static const char * const tavil_ear_pa_gain_text[] = {
+	"G_6_DB", "G_4P5_DB", "G_3_DB", "G_1P5_DB",
+	"G_0_DB", "G_M2P5_DB", "UNDEFINED", "G_M12_DB"
+};
+
+static const char * const tavil_ear_spkr_pa_gain_text[] = {
+	"G_DEFAULT", "G_0_DB", "G_1_DB", "G_2_DB", "G_3_DB",
+	"G_4_DB", "G_5_DB", "G_6_DB"
+};
+
+static SOC_ENUM_SINGLE_EXT_DECL(tavil_ear_pa_gain_enum, tavil_ear_pa_gain_text);
+static SOC_ENUM_SINGLE_EXT_DECL(tavil_ear_spkr_pa_gain_enum,
+				tavil_ear_spkr_pa_gain_text);
+static SOC_ENUM_SINGLE_EXT_DECL(amic_pwr_lvl_enum, amic_pwr_lvl_text);
+static SOC_ENUM_SINGLE_EXT_DECL(hph_idle_detect_enum, hph_idle_detect_text);
+static SOC_ENUM_SINGLE_EXT_DECL(asrc_mode_enum, asrc_mode_text);
+static SOC_ENUM_SINGLE_DECL(cf_dec0_enum, WCD934X_CDC_TX0_TX_PATH_CFG0, 5,
+							cf_text);
+static SOC_ENUM_SINGLE_DECL(cf_dec1_enum, WCD934X_CDC_TX1_TX_PATH_CFG0, 5,
+							cf_text);
+static SOC_ENUM_SINGLE_DECL(cf_dec2_enum, WCD934X_CDC_TX2_TX_PATH_CFG0, 5,
+							cf_text);
+static SOC_ENUM_SINGLE_DECL(cf_dec3_enum, WCD934X_CDC_TX3_TX_PATH_CFG0, 5,
+							cf_text);
+static SOC_ENUM_SINGLE_DECL(cf_dec4_enum, WCD934X_CDC_TX4_TX_PATH_CFG0, 5,
+							cf_text);
+static SOC_ENUM_SINGLE_DECL(cf_dec5_enum, WCD934X_CDC_TX5_TX_PATH_CFG0, 5,
+							cf_text);
+static SOC_ENUM_SINGLE_DECL(cf_dec6_enum, WCD934X_CDC_TX6_TX_PATH_CFG0, 5,
+							cf_text);
+static SOC_ENUM_SINGLE_DECL(cf_dec7_enum, WCD934X_CDC_TX7_TX_PATH_CFG0, 5,
+							cf_text);
+static SOC_ENUM_SINGLE_DECL(cf_dec8_enum, WCD934X_CDC_TX8_TX_PATH_CFG0, 5,
+							cf_text);
+static SOC_ENUM_SINGLE_DECL(cf_int0_1_enum, WCD934X_CDC_RX0_RX_PATH_CFG2, 0,
+							rx_cf_text);
+static SOC_ENUM_SINGLE_DECL(cf_int0_2_enum, WCD934X_CDC_RX0_RX_PATH_MIX_CFG, 2,
+							rx_cf_text);
+static SOC_ENUM_SINGLE_DECL(cf_int1_1_enum, WCD934X_CDC_RX1_RX_PATH_CFG2, 0,
+							rx_cf_text);
+static SOC_ENUM_SINGLE_DECL(cf_int1_2_enum, WCD934X_CDC_RX1_RX_PATH_MIX_CFG, 2,
+							rx_cf_text);
+static SOC_ENUM_SINGLE_DECL(cf_int2_1_enum, WCD934X_CDC_RX2_RX_PATH_CFG2, 0,
+							rx_cf_text);
+static SOC_ENUM_SINGLE_DECL(cf_int2_2_enum, WCD934X_CDC_RX2_RX_PATH_MIX_CFG, 2,
+							rx_cf_text);
+static SOC_ENUM_SINGLE_DECL(cf_int3_1_enum, WCD934X_CDC_RX3_RX_PATH_CFG2, 0,
+							rx_cf_text);
+static SOC_ENUM_SINGLE_DECL(cf_int3_2_enum, WCD934X_CDC_RX3_RX_PATH_MIX_CFG, 2,
+							rx_cf_text);
+static SOC_ENUM_SINGLE_DECL(cf_int4_1_enum, WCD934X_CDC_RX4_RX_PATH_CFG2, 0,
+							rx_cf_text);
+static SOC_ENUM_SINGLE_DECL(cf_int4_2_enum, WCD934X_CDC_RX4_RX_PATH_MIX_CFG, 2,
+							rx_cf_text);
+static SOC_ENUM_SINGLE_DECL(cf_int7_1_enum, WCD934X_CDC_RX7_RX_PATH_CFG2, 0,
+							rx_cf_text);
+static SOC_ENUM_SINGLE_DECL(cf_int7_2_enum, WCD934X_CDC_RX7_RX_PATH_MIX_CFG, 2,
+							rx_cf_text);
+static SOC_ENUM_SINGLE_DECL(cf_int8_1_enum, WCD934X_CDC_RX8_RX_PATH_CFG2, 0,
+							rx_cf_text);
+static SOC_ENUM_SINGLE_DECL(cf_int8_2_enum, WCD934X_CDC_RX8_RX_PATH_MIX_CFG, 2,
+							rx_cf_text);
+
+static const struct snd_kcontrol_new tavil_snd_controls[] = {
+	SOC_ENUM_EXT("EAR PA Gain", tavil_ear_pa_gain_enum,
+		tavil_ear_pa_gain_get, tavil_ear_pa_gain_put),
+	SOC_ENUM_EXT("EAR SPKR PA Gain", tavil_ear_spkr_pa_gain_enum,
+		     tavil_ear_spkr_pa_gain_get, tavil_ear_spkr_pa_gain_put),
+	SOC_SINGLE_TLV("HPHL Volume", WCD934X_HPH_L_EN, 0, 20, 1, line_gain),
+	SOC_SINGLE_TLV("HPHR Volume", WCD934X_HPH_R_EN, 0, 20, 1, line_gain),
+	SOC_SINGLE_TLV("LINEOUT1 Volume", WCD934X_DIFF_LO_LO1_COMPANDER,
+		3, 16, 1, line_gain),
+	SOC_SINGLE_TLV("LINEOUT2 Volume", WCD934X_DIFF_LO_LO2_COMPANDER,
+		3, 16, 1, line_gain),
+	SOC_SINGLE_TLV("ADC1 Volume", WCD934X_ANA_AMIC1, 0, 20, 0, analog_gain),
+	SOC_SINGLE_TLV("ADC2 Volume", WCD934X_ANA_AMIC2, 0, 20, 0, analog_gain),
+	SOC_SINGLE_TLV("ADC3 Volume", WCD934X_ANA_AMIC3, 0, 20, 0, analog_gain),
+	SOC_SINGLE_TLV("ADC4 Volume", WCD934X_ANA_AMIC4, 0, 20, 0, analog_gain),
+
+	SOC_SINGLE_SX_TLV("RX0 Digital Volume", WCD934X_CDC_RX0_RX_VOL_CTL,
+		0, -84, 40, digital_gain), /* -84dB min - 40dB max */
+	SOC_SINGLE_SX_TLV("RX1 Digital Volume", WCD934X_CDC_RX1_RX_VOL_CTL,
+		0, -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("RX2 Digital Volume", WCD934X_CDC_RX2_RX_VOL_CTL,
+		0, -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("RX3 Digital Volume", WCD934X_CDC_RX3_RX_VOL_CTL,
+		0, -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("RX4 Digital Volume", WCD934X_CDC_RX4_RX_VOL_CTL,
+		0, -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("RX7 Digital Volume", WCD934X_CDC_RX7_RX_VOL_CTL,
+		0, -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("RX8 Digital Volume", WCD934X_CDC_RX8_RX_VOL_CTL,
+		0, -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("RX0 Mix Digital Volume",
+		WCD934X_CDC_RX0_RX_VOL_MIX_CTL, 0, -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("RX1 Mix Digital Volume",
+		WCD934X_CDC_RX1_RX_VOL_MIX_CTL, 0, -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("RX2 Mix Digital Volume",
+		WCD934X_CDC_RX2_RX_VOL_MIX_CTL, 0, -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("RX3 Mix Digital Volume",
+		WCD934X_CDC_RX3_RX_VOL_MIX_CTL, 0, -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("RX4 Mix Digital Volume",
+		WCD934X_CDC_RX4_RX_VOL_MIX_CTL, 0, -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("RX7 Mix Digital Volume",
+		WCD934X_CDC_RX7_RX_VOL_MIX_CTL, 0, -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("RX8 Mix Digital Volume",
+		WCD934X_CDC_RX8_RX_VOL_MIX_CTL, 0, -84, 40, digital_gain),
+
+	SOC_SINGLE_SX_TLV("DEC0 Volume", WCD934X_CDC_TX0_TX_VOL_CTL, 0,
+		-84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("DEC1 Volume", WCD934X_CDC_TX1_TX_VOL_CTL, 0,
+		-84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("DEC2 Volume", WCD934X_CDC_TX2_TX_VOL_CTL, 0,
+		-84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("DEC3 Volume", WCD934X_CDC_TX3_TX_VOL_CTL, 0,
+		-84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("DEC4 Volume", WCD934X_CDC_TX4_TX_VOL_CTL, 0,
+		-84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("DEC5 Volume", WCD934X_CDC_TX5_TX_VOL_CTL, 0,
+		-84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("DEC6 Volume", WCD934X_CDC_TX6_TX_VOL_CTL, 0,
+		-84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("DEC7 Volume", WCD934X_CDC_TX7_TX_VOL_CTL, 0,
+		-84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("DEC8 Volume", WCD934X_CDC_TX8_TX_VOL_CTL, 0,
+		-84, 40, digital_gain),
+
+	SOC_SINGLE_SX_TLV("IIR0 INP0 Volume",
+		WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B1_CTL, 0, -84, 40,
+		digital_gain),
+	SOC_SINGLE_SX_TLV("IIR0 INP1 Volume",
+		WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B2_CTL, 0, -84, 40,
+		digital_gain),
+	SOC_SINGLE_SX_TLV("IIR0 INP2 Volume",
+		WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B3_CTL, 0, -84, 40,
+		digital_gain),
+	SOC_SINGLE_SX_TLV("IIR0 INP3 Volume",
+		WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B4_CTL, 0, -84, 40,
+		digital_gain),
+	SOC_SINGLE_SX_TLV("IIR1 INP0 Volume",
+		WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B1_CTL, 0, -84, 40,
+		digital_gain),
+	SOC_SINGLE_SX_TLV("IIR1 INP1 Volume",
+		WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B2_CTL, 0, -84, 40,
+		digital_gain),
+	SOC_SINGLE_SX_TLV("IIR1 INP2 Volume",
+		WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B3_CTL, 0, -84, 40,
+		digital_gain),
+	SOC_SINGLE_SX_TLV("IIR1 INP3 Volume",
+		WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B4_CTL, 0, -84, 40,
+		digital_gain),
+
+	SOC_SINGLE_EXT("ANC Slot", SND_SOC_NOPM, 0, 100, 0, tavil_get_anc_slot,
+		tavil_put_anc_slot),
+	SOC_ENUM_EXT("ANC Function", tavil_anc_func_enum, tavil_get_anc_func,
+		tavil_put_anc_func),
+
+	SOC_ENUM("TX0 HPF cut off", cf_dec0_enum),
+	SOC_ENUM("TX1 HPF cut off", cf_dec1_enum),
+	SOC_ENUM("TX2 HPF cut off", cf_dec2_enum),
+	SOC_ENUM("TX3 HPF cut off", cf_dec3_enum),
+	SOC_ENUM("TX4 HPF cut off", cf_dec4_enum),
+	SOC_ENUM("TX5 HPF cut off", cf_dec5_enum),
+	SOC_ENUM("TX6 HPF cut off", cf_dec6_enum),
+	SOC_ENUM("TX7 HPF cut off", cf_dec7_enum),
+	SOC_ENUM("TX8 HPF cut off", cf_dec8_enum),
+
+	SOC_ENUM("RX INT0_1 HPF cut off", cf_int0_1_enum),
+	SOC_ENUM("RX INT0_2 HPF cut off", cf_int0_2_enum),
+	SOC_ENUM("RX INT1_1 HPF cut off", cf_int1_1_enum),
+	SOC_ENUM("RX INT1_2 HPF cut off", cf_int1_2_enum),
+	SOC_ENUM("RX INT2_1 HPF cut off", cf_int2_1_enum),
+	SOC_ENUM("RX INT2_2 HPF cut off", cf_int2_2_enum),
+	SOC_ENUM("RX INT3_1 HPF cut off", cf_int3_1_enum),
+	SOC_ENUM("RX INT3_2 HPF cut off", cf_int3_2_enum),
+	SOC_ENUM("RX INT4_1 HPF cut off", cf_int4_1_enum),
+	SOC_ENUM("RX INT4_2 HPF cut off", cf_int4_2_enum),
+	SOC_ENUM("RX INT7_1 HPF cut off", cf_int7_1_enum),
+	SOC_ENUM("RX INT7_2 HPF cut off", cf_int7_2_enum),
+	SOC_ENUM("RX INT8_1 HPF cut off", cf_int8_1_enum),
+	SOC_ENUM("RX INT8_2 HPF cut off", cf_int8_2_enum),
+
+	SOC_ENUM_EXT("RX HPH Mode", rx_hph_mode_mux_enum,
+		tavil_rx_hph_mode_get, tavil_rx_hph_mode_put),
+
+	SOC_SINGLE_EXT("IIR0 Enable Band1", IIR0, BAND1, 1, 0,
+		tavil_iir_enable_audio_mixer_get,
+		tavil_iir_enable_audio_mixer_put),
+	SOC_SINGLE_EXT("IIR0 Enable Band2", IIR0, BAND2, 1, 0,
+		tavil_iir_enable_audio_mixer_get,
+		tavil_iir_enable_audio_mixer_put),
+	SOC_SINGLE_EXT("IIR0 Enable Band3", IIR0, BAND3, 1, 0,
+		tavil_iir_enable_audio_mixer_get,
+		tavil_iir_enable_audio_mixer_put),
+	SOC_SINGLE_EXT("IIR0 Enable Band4", IIR0, BAND4, 1, 0,
+		tavil_iir_enable_audio_mixer_get,
+		tavil_iir_enable_audio_mixer_put),
+	SOC_SINGLE_EXT("IIR0 Enable Band5", IIR0, BAND5, 1, 0,
+		tavil_iir_enable_audio_mixer_get,
+		tavil_iir_enable_audio_mixer_put),
+	SOC_SINGLE_EXT("IIR1 Enable Band1", IIR1, BAND1, 1, 0,
+		tavil_iir_enable_audio_mixer_get,
+		tavil_iir_enable_audio_mixer_put),
+	SOC_SINGLE_EXT("IIR1 Enable Band2", IIR1, BAND2, 1, 0,
+		tavil_iir_enable_audio_mixer_get,
+		tavil_iir_enable_audio_mixer_put),
+	SOC_SINGLE_EXT("IIR1 Enable Band3", IIR1, BAND3, 1, 0,
+		tavil_iir_enable_audio_mixer_get,
+		tavil_iir_enable_audio_mixer_put),
+	SOC_SINGLE_EXT("IIR1 Enable Band4", IIR1, BAND4, 1, 0,
+		tavil_iir_enable_audio_mixer_get,
+		tavil_iir_enable_audio_mixer_put),
+	SOC_SINGLE_EXT("IIR1 Enable Band5", IIR1, BAND5, 1, 0,
+		tavil_iir_enable_audio_mixer_get,
+		tavil_iir_enable_audio_mixer_put),
+
+	SOC_SINGLE_MULTI_EXT("IIR0 Band1", IIR0, BAND1, 255, 0, 5,
+		tavil_iir_band_audio_mixer_get, tavil_iir_band_audio_mixer_put),
+	SOC_SINGLE_MULTI_EXT("IIR0 Band2", IIR0, BAND2, 255, 0, 5,
+		tavil_iir_band_audio_mixer_get, tavil_iir_band_audio_mixer_put),
+	SOC_SINGLE_MULTI_EXT("IIR0 Band3", IIR0, BAND3, 255, 0, 5,
+		tavil_iir_band_audio_mixer_get, tavil_iir_band_audio_mixer_put),
+	SOC_SINGLE_MULTI_EXT("IIR0 Band4", IIR0, BAND4, 255, 0, 5,
+		tavil_iir_band_audio_mixer_get, tavil_iir_band_audio_mixer_put),
+	SOC_SINGLE_MULTI_EXT("IIR0 Band5", IIR0, BAND5, 255, 0, 5,
+		tavil_iir_band_audio_mixer_get, tavil_iir_band_audio_mixer_put),
+	SOC_SINGLE_MULTI_EXT("IIR1 Band1", IIR1, BAND1, 255, 0, 5,
+		tavil_iir_band_audio_mixer_get, tavil_iir_band_audio_mixer_put),
+	SOC_SINGLE_MULTI_EXT("IIR1 Band2", IIR1, BAND2, 255, 0, 5,
+		tavil_iir_band_audio_mixer_get, tavil_iir_band_audio_mixer_put),
+	SOC_SINGLE_MULTI_EXT("IIR1 Band3", IIR1, BAND3, 255, 0, 5,
+		tavil_iir_band_audio_mixer_get, tavil_iir_band_audio_mixer_put),
+	SOC_SINGLE_MULTI_EXT("IIR1 Band4", IIR1, BAND4, 255, 0, 5,
+		tavil_iir_band_audio_mixer_get, tavil_iir_band_audio_mixer_put),
+	SOC_SINGLE_MULTI_EXT("IIR1 Band5", IIR1, BAND5, 255, 0, 5,
+		tavil_iir_band_audio_mixer_get, tavil_iir_band_audio_mixer_put),
+
+	SOC_SINGLE_EXT("COMP1 Switch", SND_SOC_NOPM, COMPANDER_1, 1, 0,
+		tavil_compander_get, tavil_compander_put),
+	SOC_SINGLE_EXT("COMP2 Switch", SND_SOC_NOPM, COMPANDER_2, 1, 0,
+		tavil_compander_get, tavil_compander_put),
+	SOC_SINGLE_EXT("COMP3 Switch", SND_SOC_NOPM, COMPANDER_3, 1, 0,
+		tavil_compander_get, tavil_compander_put),
+	SOC_SINGLE_EXT("COMP4 Switch", SND_SOC_NOPM, COMPANDER_4, 1, 0,
+		tavil_compander_get, tavil_compander_put),
+	SOC_SINGLE_EXT("COMP7 Switch", SND_SOC_NOPM, COMPANDER_7, 1, 0,
+		tavil_compander_get, tavil_compander_put),
+	SOC_SINGLE_EXT("COMP8 Switch", SND_SOC_NOPM, COMPANDER_8, 1, 0,
+		tavil_compander_get, tavil_compander_put),
+
+	SOC_ENUM_EXT("ASRC0 Output Mode", asrc_mode_enum,
+		tavil_hph_asrc_mode_get, tavil_hph_asrc_mode_put),
+	SOC_ENUM_EXT("ASRC1 Output Mode", asrc_mode_enum,
+		tavil_hph_asrc_mode_get, tavil_hph_asrc_mode_put),
+
+	SOC_ENUM_EXT("HPH Idle Detect", hph_idle_detect_enum,
+		tavil_hph_idle_detect_get, tavil_hph_idle_detect_put),
+
+	SOC_ENUM_EXT("MAD Input", tavil_conn_mad_enum,
+		     tavil_mad_input_get, tavil_mad_input_put),
+
+	SOC_SINGLE_EXT("DMIC1_CLK_PIN_MODE", SND_SOC_NOPM, 17, 1, 0,
+		tavil_dmic_pin_mode_get, tavil_dmic_pin_mode_put),
+
+	SOC_SINGLE_EXT("DMIC1_DATA_PIN_MODE", SND_SOC_NOPM, 18, 1, 0,
+		tavil_dmic_pin_mode_get, tavil_dmic_pin_mode_put),
+
+	SOC_SINGLE_EXT("DMIC2_CLK_PIN_MODE", SND_SOC_NOPM, 19, 1, 0,
+		tavil_dmic_pin_mode_get, tavil_dmic_pin_mode_put),
+
+	SOC_SINGLE_EXT("DMIC2_DATA_PIN_MODE", SND_SOC_NOPM, 20, 1, 0,
+		tavil_dmic_pin_mode_get, tavil_dmic_pin_mode_put),
+
+	SOC_SINGLE_EXT("DMIC3_CLK_PIN_MODE", SND_SOC_NOPM, 21, 1, 0,
+		tavil_dmic_pin_mode_get, tavil_dmic_pin_mode_put),
+
+	SOC_SINGLE_EXT("DMIC3_DATA_PIN_MODE", SND_SOC_NOPM, 22, 1, 0,
+		tavil_dmic_pin_mode_get, tavil_dmic_pin_mode_put),
+	SOC_ENUM_EXT("AMIC_1_2 PWR MODE", amic_pwr_lvl_enum,
+		tavil_amic_pwr_lvl_get, tavil_amic_pwr_lvl_put),
+	SOC_ENUM_EXT("AMIC_3_4 PWR MODE", amic_pwr_lvl_enum,
+		tavil_amic_pwr_lvl_get, tavil_amic_pwr_lvl_put),
+	SOC_ENUM_EXT("AMIC_5_6 PWR MODE", amic_pwr_lvl_enum,
+		tavil_amic_pwr_lvl_get, tavil_amic_pwr_lvl_put),
+};
+
+static int tavil_dec_enum_put(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
+	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+	unsigned int val;
+	u16 mic_sel_reg = 0;
+	u8 mic_sel;
+
+	val = ucontrol->value.enumerated.item[0];
+	if (val > e->items - 1)
+		return -EINVAL;
+
+	dev_dbg(codec->dev, "%s: wname: %s, val: 0x%x\n", __func__,
+		widget->name, val);
+
+	switch (e->reg) {
+	case WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG1:
+		if (e->shift_l == 0)
+			mic_sel_reg = WCD934X_CDC_TX0_TX_PATH_CFG0;
+		else if (e->shift_l == 2)
+			mic_sel_reg = WCD934X_CDC_TX4_TX_PATH_CFG0;
+		else if (e->shift_l == 4)
+			mic_sel_reg = WCD934X_CDC_TX8_TX_PATH_CFG0;
+		break;
+	case WCD934X_CDC_TX_INP_MUX_ADC_MUX1_CFG1:
+		if (e->shift_l == 0)
+			mic_sel_reg = WCD934X_CDC_TX1_TX_PATH_CFG0;
+		else if (e->shift_l == 2)
+			mic_sel_reg = WCD934X_CDC_TX5_TX_PATH_CFG0;
+		break;
+	case WCD934X_CDC_TX_INP_MUX_ADC_MUX2_CFG1:
+		if (e->shift_l == 0)
+			mic_sel_reg = WCD934X_CDC_TX2_TX_PATH_CFG0;
+		else if (e->shift_l == 2)
+			mic_sel_reg = WCD934X_CDC_TX6_TX_PATH_CFG0;
+		break;
+	case WCD934X_CDC_TX_INP_MUX_ADC_MUX3_CFG1:
+		if (e->shift_l == 0)
+			mic_sel_reg = WCD934X_CDC_TX3_TX_PATH_CFG0;
+		else if (e->shift_l == 2)
+			mic_sel_reg = WCD934X_CDC_TX7_TX_PATH_CFG0;
+		break;
+	default:
+		dev_err(codec->dev, "%s: e->reg: 0x%x not expected\n",
+			__func__, e->reg);
+		return -EINVAL;
+	}
+
+	/* ADC: 0, DMIC: 1 */
+	mic_sel = val ? 0x0 : 0x1;
+	if (mic_sel_reg)
+		snd_soc_update_bits(codec, mic_sel_reg, 1 << 7, mic_sel << 7);
+
+	return snd_soc_dapm_put_enum_double(kcontrol, ucontrol);
+}
+
+static int tavil_int_dem_inp_mux_put(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
+	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+	unsigned int val;
+	unsigned short look_ahead_dly_reg = WCD934X_CDC_RX0_RX_PATH_CFG0;
+
+	val = ucontrol->value.enumerated.item[0];
+	if (val >= e->items)
+		return -EINVAL;
+
+	dev_dbg(codec->dev, "%s: wname: %s, val: 0x%x\n", __func__,
+		widget->name, val);
+
+	if (e->reg == WCD934X_CDC_RX0_RX_PATH_SEC0)
+		look_ahead_dly_reg = WCD934X_CDC_RX0_RX_PATH_CFG0;
+	else if (e->reg == WCD934X_CDC_RX1_RX_PATH_SEC0)
+		look_ahead_dly_reg = WCD934X_CDC_RX1_RX_PATH_CFG0;
+	else if (e->reg == WCD934X_CDC_RX2_RX_PATH_SEC0)
+		look_ahead_dly_reg = WCD934X_CDC_RX2_RX_PATH_CFG0;
+
+	/* Set Look Ahead Delay */
+	snd_soc_update_bits(codec, look_ahead_dly_reg,
+			    0x08, (val ? 0x08 : 0x00));
+	/* Set DEM INP Select */
+	return snd_soc_dapm_put_enum_double(kcontrol, ucontrol);
+}
+
+static const char * const rx_int0_7_mix_mux_text[] = {
+	"ZERO", "RX0", "RX1", "RX2", "RX3", "RX4", "RX5",
+	"RX6", "RX7", "PROXIMITY"
+};
+
+static const char * const rx_int_mix_mux_text[] = {
+	"ZERO", "RX0", "RX1", "RX2", "RX3", "RX4", "RX5",
+	"RX6", "RX7"
+};
+
+static const char * const rx_prim_mix_text[] = {
+	"ZERO", "DEC0", "DEC1", "IIR0", "IIR1", "RX0", "RX1", "RX2",
+	"RX3", "RX4", "RX5", "RX6", "RX7"
+};
+
+static const char * const rx_sidetone_mix_text[] = {
+	"ZERO", "SRC0", "SRC1", "SRC_SUM"
+};
+
+static const char * const cdc_if_tx0_mux_text[] = {
+	"ZERO", "RX_MIX_TX0", "DEC0", "DEC0_192"
+};
+static const char * const cdc_if_tx1_mux_text[] = {
+	"ZERO", "RX_MIX_TX1", "DEC1", "DEC1_192"
+};
+static const char * const cdc_if_tx2_mux_text[] = {
+	"ZERO", "RX_MIX_TX2", "DEC2", "DEC2_192"
+};
+static const char * const cdc_if_tx3_mux_text[] = {
+	"ZERO", "RX_MIX_TX3", "DEC3", "DEC3_192"
+};
+static const char * const cdc_if_tx4_mux_text[] = {
+	"ZERO", "RX_MIX_TX4", "DEC4", "DEC4_192"
+};
+static const char * const cdc_if_tx5_mux_text[] = {
+	"ZERO", "RX_MIX_TX5", "DEC5", "DEC5_192"
+};
+static const char * const cdc_if_tx6_mux_text[] = {
+	"ZERO", "RX_MIX_TX6", "DEC6", "DEC6_192"
+};
+static const char * const cdc_if_tx7_mux_text[] = {
+	"ZERO", "RX_MIX_TX7", "DEC7", "DEC7_192"
+};
+static const char * const cdc_if_tx8_mux_text[] = {
+	"ZERO", "RX_MIX_TX8", "DEC8", "DEC8_192"
+};
+static const char * const cdc_if_tx9_mux_text[] = {
+	"ZERO", "DEC7", "DEC7_192"
+};
+static const char * const cdc_if_tx10_mux_text[] = {
+	"ZERO", "DEC6", "DEC6_192"
+};
+static const char * const cdc_if_tx11_mux_text[] = {
+	"DEC_0_5", "DEC_9_12", "MAD_AUDIO", "MAD_BRDCST"
+};
+static const char * const cdc_if_tx11_inp1_mux_text[] = {
+	"ZERO", "DEC0", "DEC1", "DEC2", "DEC3", "DEC4",
+	"DEC5", "RX_MIX_TX5", "DEC9_10", "DEC11_12"
+};
+static const char * const cdc_if_tx13_mux_text[] = {
+	"CDC_DEC_5", "MAD_BRDCST"
+};
+static const char * const cdc_if_tx13_inp1_mux_text[] = {
+	"ZERO", "DEC5", "DEC5_192"
+};
+
+static const char * const iir_inp_mux_text[] = {
+	"ZERO", "DEC0", "DEC1", "DEC2", "DEC3", "DEC4", "DEC5", "DEC6",
+	"DEC7", "DEC8", "RX0", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7"
+};
+
+static const char * const rx_int_dem_inp_mux_text[] = {
+	"NORMAL_DSM_OUT", "CLSH_DSM_OUT",
+};
+
+static const char * const rx_int0_1_interp_mux_text[] = {
+	"ZERO", "RX INT0_1 MIX1",
+};
+
+static const char * const rx_int1_1_interp_mux_text[] = {
+	"ZERO", "RX INT1_1 MIX1",
+};
+
+static const char * const rx_int2_1_interp_mux_text[] = {
+	"ZERO", "RX INT2_1 MIX1",
+};
+
+static const char * const rx_int3_1_interp_mux_text[] = {
+	"ZERO", "RX INT3_1 MIX1",
+};
+
+static const char * const rx_int4_1_interp_mux_text[] = {
+	"ZERO", "RX INT4_1 MIX1",
+};
+
+static const char * const rx_int7_1_interp_mux_text[] = {
+	"ZERO", "RX INT7_1 MIX1",
+};
+
+static const char * const rx_int8_1_interp_mux_text[] = {
+	"ZERO", "RX INT8_1 MIX1",
+};
+
+static const char * const rx_int0_2_interp_mux_text[] = {
+	"ZERO", "RX INT0_2 MUX",
+};
+
+static const char * const rx_int1_2_interp_mux_text[] = {
+	"ZERO", "RX INT1_2 MUX",
+};
+
+static const char * const rx_int2_2_interp_mux_text[] = {
+	"ZERO", "RX INT2_2 MUX",
+};
+
+static const char * const rx_int3_2_interp_mux_text[] = {
+	"ZERO", "RX INT3_2 MUX",
+};
+
+static const char * const rx_int4_2_interp_mux_text[] = {
+	"ZERO", "RX INT4_2 MUX",
+};
+
+static const char * const rx_int7_2_interp_mux_text[] = {
+	"ZERO", "RX INT7_2 MUX",
+};
+
+static const char * const rx_int8_2_interp_mux_text[] = {
+	"ZERO", "RX INT8_2 MUX",
+};
+
+static const char * const mad_sel_txt[] = {
+	"SPE", "MSM"
+};
+
+static const char * const mad_inp_mux_txt[] = {
+	"MAD", "DEC1"
+};
+
+static const char * const adc_mux_text[] = {
+	"DMIC", "AMIC", "ANC_FB_TUNE1", "ANC_FB_TUNE2"
+};
+
+static const char * const dmic_mux_text[] = {
+	"ZERO", "DMIC0", "DMIC1", "DMIC2", "DMIC3", "DMIC4", "DMIC5"
+};
+
+static const char * const amic_mux_text[] = {
+	"ZERO", "ADC1", "ADC2", "ADC3", "ADC4"
+};
+
+static const char * const amic4_5_sel_text[] = {
+	"AMIC4", "AMIC5"
+};
+
+static const char * const anc0_fb_mux_text[] = {
+	"ZERO", "ANC_IN_HPHL", "ANC_IN_EAR", "ANC_IN_EAR_SPKR",
+	"ANC_IN_LO1"
+};
+
+static const char * const anc1_fb_mux_text[] = {
+	"ZERO", "ANC_IN_HPHR", "ANC_IN_LO2"
+};
+
+static const char * const rx_echo_mux_text[] = {
+	"ZERO", "RX_MIX0", "RX_MIX1", "RX_MIX2", "RX_MIX3", "RX_MIX4",
+	"RX_MIX5", "RX_MIX6", "RX_MIX7", "RX_MIX8"
+};
+
+static const char *const slim_rx_mux_text[] = {
+	"ZERO", "AIF1_PB", "AIF2_PB", "AIF3_PB", "AIF4_PB"
+};
+
+static const char *const cdc_if_rx0_mux_text[] = {
+	"SLIM RX0", "I2S_0 RX0"
+};
+static const char *const cdc_if_rx1_mux_text[] = {
+	"SLIM RX1", "I2S_0 RX1"
+};
+static const char *const cdc_if_rx2_mux_text[] = {
+	"SLIM RX2", "I2S_0 RX2"
+};
+static const char *const cdc_if_rx3_mux_text[] = {
+	"SLIM RX3", "I2S_0 RX3"
+};
+static const char *const cdc_if_rx4_mux_text[] = {
+	"SLIM RX4", "I2S_0 RX4"
+};
+static const char *const cdc_if_rx5_mux_text[] = {
+	"SLIM RX5", "I2S_0 RX5"
+};
+static const char *const cdc_if_rx6_mux_text[] = {
+	"SLIM RX6", "I2S_0 RX6"
+};
+static const char *const cdc_if_rx7_mux_text[] = {
+	"SLIM RX7", "I2S_0 RX7"
+};
+
+static const char * const asrc0_mux_text[] = {
+	"ZERO", "ASRC_IN_HPHL", "ASRC_IN_LO1",
+};
+
+static const char * const asrc1_mux_text[] = {
+	"ZERO", "ASRC_IN_HPHR", "ASRC_IN_LO2",
+};
+
+static const char * const asrc2_mux_text[] = {
+	"ZERO", "ASRC_IN_SPKR1",
+};
+
+static const char * const asrc3_mux_text[] = {
+	"ZERO", "ASRC_IN_SPKR2",
+};
+
+static const char * const native_mux_text[] = {
+	"OFF", "ON",
+};
+
+static const struct snd_kcontrol_new aif4_vi_mixer[] = {
+	SOC_SINGLE_EXT("SPKR_VI_1", SND_SOC_NOPM, WCD934X_TX14, 1, 0,
+			tavil_vi_feed_mixer_get, tavil_vi_feed_mixer_put),
+	SOC_SINGLE_EXT("SPKR_VI_2", SND_SOC_NOPM, WCD934X_TX15, 1, 0,
+			tavil_vi_feed_mixer_get, tavil_vi_feed_mixer_put),
+};
+
+static const struct snd_kcontrol_new aif1_cap_mixer[] = {
+	SOC_SINGLE_EXT("SLIM TX0", SND_SOC_NOPM, WCD934X_TX0, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX1", SND_SOC_NOPM, WCD934X_TX1, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX2", SND_SOC_NOPM, WCD934X_TX2, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX3", SND_SOC_NOPM, WCD934X_TX3, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX4", SND_SOC_NOPM, WCD934X_TX4, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX5", SND_SOC_NOPM, WCD934X_TX5, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX6", SND_SOC_NOPM, WCD934X_TX6, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX7", SND_SOC_NOPM, WCD934X_TX7, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX8", SND_SOC_NOPM, WCD934X_TX8, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX9", SND_SOC_NOPM, WCD934X_TX9, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX10", SND_SOC_NOPM, WCD934X_TX10, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX11", SND_SOC_NOPM, WCD934X_TX11, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX13", SND_SOC_NOPM, WCD934X_TX13, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+};
+
+static const struct snd_kcontrol_new aif2_cap_mixer[] = {
+	SOC_SINGLE_EXT("SLIM TX0", SND_SOC_NOPM, WCD934X_TX0, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX1", SND_SOC_NOPM, WCD934X_TX1, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX2", SND_SOC_NOPM, WCD934X_TX2, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX3", SND_SOC_NOPM, WCD934X_TX3, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX4", SND_SOC_NOPM, WCD934X_TX4, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX5", SND_SOC_NOPM, WCD934X_TX5, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX6", SND_SOC_NOPM, WCD934X_TX6, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX7", SND_SOC_NOPM, WCD934X_TX7, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX8", SND_SOC_NOPM, WCD934X_TX8, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX9", SND_SOC_NOPM, WCD934X_TX9, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX10", SND_SOC_NOPM, WCD934X_TX10, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX11", SND_SOC_NOPM, WCD934X_TX11, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX13", SND_SOC_NOPM, WCD934X_TX13, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+};
+
+static const struct snd_kcontrol_new aif3_cap_mixer[] = {
+	SOC_SINGLE_EXT("SLIM TX0", SND_SOC_NOPM, WCD934X_TX0, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX1", SND_SOC_NOPM, WCD934X_TX1, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX2", SND_SOC_NOPM, WCD934X_TX2, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX3", SND_SOC_NOPM, WCD934X_TX3, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX4", SND_SOC_NOPM, WCD934X_TX4, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX5", SND_SOC_NOPM, WCD934X_TX5, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX6", SND_SOC_NOPM, WCD934X_TX6, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX7", SND_SOC_NOPM, WCD934X_TX7, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX8", SND_SOC_NOPM, WCD934X_TX8, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX9", SND_SOC_NOPM, WCD934X_TX9, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX10", SND_SOC_NOPM, WCD934X_TX10, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX11", SND_SOC_NOPM, WCD934X_TX11, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+	SOC_SINGLE_EXT("SLIM TX13", SND_SOC_NOPM, WCD934X_TX13, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+};
+
+static const struct snd_kcontrol_new aif4_mad_mixer[] = {
+	SOC_SINGLE_EXT("SLIM TX13", SND_SOC_NOPM, WCD934X_TX13, 1, 0,
+			slim_tx_mixer_get, slim_tx_mixer_put),
+};
+
+WCD_DAPM_ENUM_EXT(slim_rx0, SND_SOC_NOPM, 0, slim_rx_mux_text,
+	slim_rx_mux_get, slim_rx_mux_put);
+WCD_DAPM_ENUM_EXT(slim_rx1, SND_SOC_NOPM, 0, slim_rx_mux_text,
+	slim_rx_mux_get, slim_rx_mux_put);
+WCD_DAPM_ENUM_EXT(slim_rx2, SND_SOC_NOPM, 0, slim_rx_mux_text,
+	slim_rx_mux_get, slim_rx_mux_put);
+WCD_DAPM_ENUM_EXT(slim_rx3, SND_SOC_NOPM, 0, slim_rx_mux_text,
+	slim_rx_mux_get, slim_rx_mux_put);
+WCD_DAPM_ENUM_EXT(slim_rx4, SND_SOC_NOPM, 0, slim_rx_mux_text,
+	slim_rx_mux_get, slim_rx_mux_put);
+WCD_DAPM_ENUM_EXT(slim_rx5, SND_SOC_NOPM, 0, slim_rx_mux_text,
+	slim_rx_mux_get, slim_rx_mux_put);
+WCD_DAPM_ENUM_EXT(slim_rx6, SND_SOC_NOPM, 0, slim_rx_mux_text,
+	slim_rx_mux_get, slim_rx_mux_put);
+WCD_DAPM_ENUM_EXT(slim_rx7, SND_SOC_NOPM, 0, slim_rx_mux_text,
+	slim_rx_mux_get, slim_rx_mux_put);
+
+WCD_DAPM_ENUM(cdc_if_rx0, SND_SOC_NOPM, 0, cdc_if_rx0_mux_text);
+WCD_DAPM_ENUM(cdc_if_rx1, SND_SOC_NOPM, 0, cdc_if_rx1_mux_text);
+WCD_DAPM_ENUM(cdc_if_rx2, SND_SOC_NOPM, 0, cdc_if_rx2_mux_text);
+WCD_DAPM_ENUM(cdc_if_rx3, SND_SOC_NOPM, 0, cdc_if_rx3_mux_text);
+WCD_DAPM_ENUM(cdc_if_rx4, SND_SOC_NOPM, 0, cdc_if_rx4_mux_text);
+WCD_DAPM_ENUM(cdc_if_rx5, SND_SOC_NOPM, 0, cdc_if_rx5_mux_text);
+WCD_DAPM_ENUM(cdc_if_rx6, SND_SOC_NOPM, 0, cdc_if_rx6_mux_text);
+WCD_DAPM_ENUM(cdc_if_rx7, SND_SOC_NOPM, 0, cdc_if_rx7_mux_text);
+
+WCD_DAPM_ENUM(rx_int0_2, WCD934X_CDC_RX_INP_MUX_RX_INT0_CFG1, 0,
+	rx_int0_7_mix_mux_text);
+WCD_DAPM_ENUM(rx_int1_2, WCD934X_CDC_RX_INP_MUX_RX_INT1_CFG1, 0,
+	rx_int_mix_mux_text);
+WCD_DAPM_ENUM(rx_int2_2, WCD934X_CDC_RX_INP_MUX_RX_INT2_CFG1, 0,
+	rx_int_mix_mux_text);
+WCD_DAPM_ENUM(rx_int3_2, WCD934X_CDC_RX_INP_MUX_RX_INT3_CFG1, 0,
+	rx_int_mix_mux_text);
+WCD_DAPM_ENUM(rx_int4_2, WCD934X_CDC_RX_INP_MUX_RX_INT4_CFG1, 0,
+	rx_int_mix_mux_text);
+WCD_DAPM_ENUM(rx_int7_2, WCD934X_CDC_RX_INP_MUX_RX_INT7_CFG1, 0,
+	rx_int0_7_mix_mux_text);
+WCD_DAPM_ENUM(rx_int8_2, WCD934X_CDC_RX_INP_MUX_RX_INT8_CFG1, 0,
+	rx_int_mix_mux_text);
+
+WCD_DAPM_ENUM(rx_int0_1_mix_inp0, WCD934X_CDC_RX_INP_MUX_RX_INT0_CFG0, 0,
+	rx_prim_mix_text);
+WCD_DAPM_ENUM(rx_int0_1_mix_inp1, WCD934X_CDC_RX_INP_MUX_RX_INT0_CFG0, 4,
+	rx_prim_mix_text);
+WCD_DAPM_ENUM(rx_int0_1_mix_inp2, WCD934X_CDC_RX_INP_MUX_RX_INT0_CFG1, 4,
+	rx_prim_mix_text);
+WCD_DAPM_ENUM(rx_int1_1_mix_inp0, WCD934X_CDC_RX_INP_MUX_RX_INT1_CFG0, 0,
+	rx_prim_mix_text);
+WCD_DAPM_ENUM(rx_int1_1_mix_inp1, WCD934X_CDC_RX_INP_MUX_RX_INT1_CFG0, 4,
+	rx_prim_mix_text);
+WCD_DAPM_ENUM(rx_int1_1_mix_inp2, WCD934X_CDC_RX_INP_MUX_RX_INT1_CFG1, 4,
+	rx_prim_mix_text);
+WCD_DAPM_ENUM(rx_int2_1_mix_inp0, WCD934X_CDC_RX_INP_MUX_RX_INT2_CFG0, 0,
+	rx_prim_mix_text);
+WCD_DAPM_ENUM(rx_int2_1_mix_inp1, WCD934X_CDC_RX_INP_MUX_RX_INT2_CFG0, 4,
+	rx_prim_mix_text);
+WCD_DAPM_ENUM(rx_int2_1_mix_inp2, WCD934X_CDC_RX_INP_MUX_RX_INT2_CFG1, 4,
+	rx_prim_mix_text);
+WCD_DAPM_ENUM(rx_int3_1_mix_inp0, WCD934X_CDC_RX_INP_MUX_RX_INT3_CFG0, 0,
+	rx_prim_mix_text);
+WCD_DAPM_ENUM(rx_int3_1_mix_inp1, WCD934X_CDC_RX_INP_MUX_RX_INT3_CFG0, 4,
+	rx_prim_mix_text);
+WCD_DAPM_ENUM(rx_int3_1_mix_inp2, WCD934X_CDC_RX_INP_MUX_RX_INT3_CFG1, 4,
+	rx_prim_mix_text);
+WCD_DAPM_ENUM(rx_int4_1_mix_inp0, WCD934X_CDC_RX_INP_MUX_RX_INT4_CFG0, 0,
+	rx_prim_mix_text);
+WCD_DAPM_ENUM(rx_int4_1_mix_inp1, WCD934X_CDC_RX_INP_MUX_RX_INT4_CFG0, 4,
+	rx_prim_mix_text);
+WCD_DAPM_ENUM(rx_int4_1_mix_inp2, WCD934X_CDC_RX_INP_MUX_RX_INT4_CFG1, 4,
+	rx_prim_mix_text);
+WCD_DAPM_ENUM(rx_int7_1_mix_inp0, WCD934X_CDC_RX_INP_MUX_RX_INT7_CFG0, 0,
+	rx_prim_mix_text);
+WCD_DAPM_ENUM(rx_int7_1_mix_inp1, WCD934X_CDC_RX_INP_MUX_RX_INT7_CFG0, 4,
+	rx_prim_mix_text);
+WCD_DAPM_ENUM(rx_int7_1_mix_inp2, WCD934X_CDC_RX_INP_MUX_RX_INT7_CFG1, 4,
+	rx_prim_mix_text);
+WCD_DAPM_ENUM(rx_int8_1_mix_inp0, WCD934X_CDC_RX_INP_MUX_RX_INT8_CFG0, 0,
+	rx_prim_mix_text);
+WCD_DAPM_ENUM(rx_int8_1_mix_inp1, WCD934X_CDC_RX_INP_MUX_RX_INT8_CFG0, 4,
+	rx_prim_mix_text);
+WCD_DAPM_ENUM(rx_int8_1_mix_inp2, WCD934X_CDC_RX_INP_MUX_RX_INT8_CFG1, 4,
+	rx_prim_mix_text);
+
+WCD_DAPM_ENUM(rx_int0_mix2_inp, WCD934X_CDC_RX_INP_MUX_SIDETONE_SRC_CFG0, 0,
+	rx_sidetone_mix_text);
+WCD_DAPM_ENUM(rx_int1_mix2_inp, WCD934X_CDC_RX_INP_MUX_SIDETONE_SRC_CFG0, 2,
+	rx_sidetone_mix_text);
+WCD_DAPM_ENUM(rx_int2_mix2_inp, WCD934X_CDC_RX_INP_MUX_SIDETONE_SRC_CFG0, 4,
+	rx_sidetone_mix_text);
+WCD_DAPM_ENUM(rx_int3_mix2_inp, WCD934X_CDC_RX_INP_MUX_SIDETONE_SRC_CFG0, 6,
+	rx_sidetone_mix_text);
+WCD_DAPM_ENUM(rx_int4_mix2_inp, WCD934X_CDC_RX_INP_MUX_SIDETONE_SRC_CFG1, 0,
+	rx_sidetone_mix_text);
+WCD_DAPM_ENUM(rx_int7_mix2_inp, WCD934X_CDC_RX_INP_MUX_SIDETONE_SRC_CFG1, 2,
+	rx_sidetone_mix_text);
+
+WCD_DAPM_ENUM(tx_adc_mux10, WCD934X_CDC_TX_INP_MUX_ADC_MUX1_CFG1, 4,
+	adc_mux_text);
+WCD_DAPM_ENUM(tx_adc_mux11, WCD934X_CDC_TX_INP_MUX_ADC_MUX2_CFG1, 4,
+	adc_mux_text);
+WCD_DAPM_ENUM(tx_adc_mux12, WCD934X_CDC_TX_INP_MUX_ADC_MUX3_CFG1, 4,
+	adc_mux_text);
+WCD_DAPM_ENUM(tx_adc_mux13, WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG1, 6,
+	adc_mux_text);
+
+
+WCD_DAPM_ENUM(tx_dmic_mux0, WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG0, 3,
+	dmic_mux_text);
+WCD_DAPM_ENUM(tx_dmic_mux1, WCD934X_CDC_TX_INP_MUX_ADC_MUX1_CFG0, 3,
+	dmic_mux_text);
+WCD_DAPM_ENUM(tx_dmic_mux2, WCD934X_CDC_TX_INP_MUX_ADC_MUX2_CFG0, 3,
+	dmic_mux_text);
+WCD_DAPM_ENUM(tx_dmic_mux3, WCD934X_CDC_TX_INP_MUX_ADC_MUX3_CFG0, 3,
+	dmic_mux_text);
+WCD_DAPM_ENUM(tx_dmic_mux4, WCD934X_CDC_TX_INP_MUX_ADC_MUX4_CFG0, 3,
+	dmic_mux_text);
+WCD_DAPM_ENUM(tx_dmic_mux5, WCD934X_CDC_TX_INP_MUX_ADC_MUX5_CFG0, 3,
+	dmic_mux_text);
+WCD_DAPM_ENUM(tx_dmic_mux6, WCD934X_CDC_TX_INP_MUX_ADC_MUX6_CFG0, 3,
+	dmic_mux_text);
+WCD_DAPM_ENUM(tx_dmic_mux7, WCD934X_CDC_TX_INP_MUX_ADC_MUX7_CFG0, 3,
+	dmic_mux_text);
+WCD_DAPM_ENUM(tx_dmic_mux8, WCD934X_CDC_TX_INP_MUX_ADC_MUX8_CFG0, 3,
+	dmic_mux_text);
+WCD_DAPM_ENUM(tx_dmic_mux10, WCD934X_CDC_TX_INP_MUX_ADC_MUX10_CFG0, 3,
+	dmic_mux_text);
+WCD_DAPM_ENUM(tx_dmic_mux11, WCD934X_CDC_TX_INP_MUX_ADC_MUX11_CFG0, 3,
+	dmic_mux_text);
+WCD_DAPM_ENUM(tx_dmic_mux12, WCD934X_CDC_TX_INP_MUX_ADC_MUX12_CFG0, 3,
+	dmic_mux_text);
+WCD_DAPM_ENUM(tx_dmic_mux13, WCD934X_CDC_TX_INP_MUX_ADC_MUX13_CFG0, 3,
+	dmic_mux_text);
+
+
+WCD_DAPM_ENUM(tx_amic_mux0, WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG0, 0,
+	amic_mux_text);
+WCD_DAPM_ENUM(tx_amic_mux1, WCD934X_CDC_TX_INP_MUX_ADC_MUX1_CFG0, 0,
+	amic_mux_text);
+WCD_DAPM_ENUM(tx_amic_mux2, WCD934X_CDC_TX_INP_MUX_ADC_MUX2_CFG0, 0,
+	amic_mux_text);
+WCD_DAPM_ENUM(tx_amic_mux3, WCD934X_CDC_TX_INP_MUX_ADC_MUX3_CFG0, 0,
+	amic_mux_text);
+WCD_DAPM_ENUM(tx_amic_mux4, WCD934X_CDC_TX_INP_MUX_ADC_MUX4_CFG0, 0,
+	amic_mux_text);
+WCD_DAPM_ENUM(tx_amic_mux5, WCD934X_CDC_TX_INP_MUX_ADC_MUX5_CFG0, 0,
+	amic_mux_text);
+WCD_DAPM_ENUM(tx_amic_mux6, WCD934X_CDC_TX_INP_MUX_ADC_MUX6_CFG0, 0,
+	amic_mux_text);
+WCD_DAPM_ENUM(tx_amic_mux7, WCD934X_CDC_TX_INP_MUX_ADC_MUX7_CFG0, 0,
+	amic_mux_text);
+WCD_DAPM_ENUM(tx_amic_mux8, WCD934X_CDC_TX_INP_MUX_ADC_MUX8_CFG0, 0,
+	amic_mux_text);
+WCD_DAPM_ENUM(tx_amic_mux10, WCD934X_CDC_TX_INP_MUX_ADC_MUX10_CFG0, 0,
+	amic_mux_text);
+WCD_DAPM_ENUM(tx_amic_mux11, WCD934X_CDC_TX_INP_MUX_ADC_MUX11_CFG0, 0,
+	amic_mux_text);
+WCD_DAPM_ENUM(tx_amic_mux12, WCD934X_CDC_TX_INP_MUX_ADC_MUX12_CFG0, 0,
+	amic_mux_text);
+WCD_DAPM_ENUM(tx_amic_mux13, WCD934X_CDC_TX_INP_MUX_ADC_MUX13_CFG0, 0,
+	amic_mux_text);
+
+WCD_DAPM_ENUM(tx_amic4_5, WCD934X_TX_NEW_AMIC_4_5_SEL, 7, amic4_5_sel_text);
+
+WCD_DAPM_ENUM(cdc_if_tx0, WCD934X_CDC_IF_ROUTER_TX_MUX_CFG0, 0,
+	cdc_if_tx0_mux_text);
+WCD_DAPM_ENUM(cdc_if_tx1, WCD934X_CDC_IF_ROUTER_TX_MUX_CFG0, 2,
+	cdc_if_tx1_mux_text);
+WCD_DAPM_ENUM(cdc_if_tx2, WCD934X_CDC_IF_ROUTER_TX_MUX_CFG0, 4,
+	cdc_if_tx2_mux_text);
+WCD_DAPM_ENUM(cdc_if_tx3, WCD934X_CDC_IF_ROUTER_TX_MUX_CFG0, 6,
+	cdc_if_tx3_mux_text);
+WCD_DAPM_ENUM(cdc_if_tx4, WCD934X_CDC_IF_ROUTER_TX_MUX_CFG1, 0,
+	cdc_if_tx4_mux_text);
+WCD_DAPM_ENUM(cdc_if_tx5, WCD934X_CDC_IF_ROUTER_TX_MUX_CFG1, 2,
+	cdc_if_tx5_mux_text);
+WCD_DAPM_ENUM(cdc_if_tx6, WCD934X_CDC_IF_ROUTER_TX_MUX_CFG1, 4,
+	cdc_if_tx6_mux_text);
+WCD_DAPM_ENUM(cdc_if_tx7, WCD934X_CDC_IF_ROUTER_TX_MUX_CFG1, 6,
+	cdc_if_tx7_mux_text);
+WCD_DAPM_ENUM(cdc_if_tx8, WCD934X_CDC_IF_ROUTER_TX_MUX_CFG2, 0,
+	cdc_if_tx8_mux_text);
+WCD_DAPM_ENUM(cdc_if_tx9, WCD934X_CDC_IF_ROUTER_TX_MUX_CFG2, 2,
+	cdc_if_tx9_mux_text);
+WCD_DAPM_ENUM(cdc_if_tx10, WCD934X_CDC_IF_ROUTER_TX_MUX_CFG2, 4,
+	cdc_if_tx10_mux_text);
+WCD_DAPM_ENUM(cdc_if_tx11_inp1, WCD934X_CDC_IF_ROUTER_TX_MUX_CFG3, 0,
+	cdc_if_tx11_inp1_mux_text);
+WCD_DAPM_ENUM(cdc_if_tx11, WCD934X_DATA_HUB_SB_TX11_INP_CFG, 0,
+	cdc_if_tx11_mux_text);
+WCD_DAPM_ENUM(cdc_if_tx13_inp1, WCD934X_CDC_IF_ROUTER_TX_MUX_CFG3, 4,
+	cdc_if_tx13_inp1_mux_text);
+WCD_DAPM_ENUM(cdc_if_tx13, WCD934X_DATA_HUB_SB_TX13_INP_CFG, 0,
+	cdc_if_tx13_mux_text);
+
+WCD_DAPM_ENUM(rx_mix_tx0, WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG0, 0,
+	rx_echo_mux_text);
+WCD_DAPM_ENUM(rx_mix_tx1, WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG0, 4,
+	rx_echo_mux_text);
+WCD_DAPM_ENUM(rx_mix_tx2, WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG1, 0,
+	rx_echo_mux_text);
+WCD_DAPM_ENUM(rx_mix_tx3, WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG1, 4,
+	rx_echo_mux_text);
+WCD_DAPM_ENUM(rx_mix_tx4, WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG2, 0,
+	rx_echo_mux_text);
+WCD_DAPM_ENUM(rx_mix_tx5, WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG2, 4,
+	rx_echo_mux_text);
+WCD_DAPM_ENUM(rx_mix_tx6, WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG3, 0,
+	rx_echo_mux_text);
+WCD_DAPM_ENUM(rx_mix_tx7, WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG3, 4,
+	rx_echo_mux_text);
+WCD_DAPM_ENUM(rx_mix_tx8, WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG4, 0,
+	rx_echo_mux_text);
+
+WCD_DAPM_ENUM(iir0_inp0, WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG0, 0,
+	iir_inp_mux_text);
+WCD_DAPM_ENUM(iir0_inp1, WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG1, 0,
+	iir_inp_mux_text);
+WCD_DAPM_ENUM(iir0_inp2, WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG2, 0,
+	iir_inp_mux_text);
+WCD_DAPM_ENUM(iir0_inp3, WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG3, 0,
+	iir_inp_mux_text);
+WCD_DAPM_ENUM(iir1_inp0, WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG0, 0,
+	iir_inp_mux_text);
+WCD_DAPM_ENUM(iir1_inp1, WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG1, 0,
+	iir_inp_mux_text);
+WCD_DAPM_ENUM(iir1_inp2, WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG2, 0,
+	iir_inp_mux_text);
+WCD_DAPM_ENUM(iir1_inp3, WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG3, 0,
+	iir_inp_mux_text);
+
+WCD_DAPM_ENUM(rx_int0_1_interp, SND_SOC_NOPM, 0, rx_int0_1_interp_mux_text);
+WCD_DAPM_ENUM(rx_int1_1_interp, SND_SOC_NOPM, 0, rx_int1_1_interp_mux_text);
+WCD_DAPM_ENUM(rx_int2_1_interp, SND_SOC_NOPM, 0, rx_int2_1_interp_mux_text);
+WCD_DAPM_ENUM(rx_int3_1_interp, SND_SOC_NOPM, 0, rx_int3_1_interp_mux_text);
+WCD_DAPM_ENUM(rx_int4_1_interp, SND_SOC_NOPM, 0, rx_int4_1_interp_mux_text);
+WCD_DAPM_ENUM(rx_int7_1_interp, SND_SOC_NOPM, 0, rx_int7_1_interp_mux_text);
+WCD_DAPM_ENUM(rx_int8_1_interp, SND_SOC_NOPM, 0, rx_int8_1_interp_mux_text);
+
+WCD_DAPM_ENUM(rx_int0_2_interp, SND_SOC_NOPM, 0, rx_int0_2_interp_mux_text);
+WCD_DAPM_ENUM(rx_int1_2_interp, SND_SOC_NOPM, 0, rx_int1_2_interp_mux_text);
+WCD_DAPM_ENUM(rx_int2_2_interp, SND_SOC_NOPM, 0, rx_int2_2_interp_mux_text);
+WCD_DAPM_ENUM(rx_int3_2_interp, SND_SOC_NOPM, 0, rx_int3_2_interp_mux_text);
+WCD_DAPM_ENUM(rx_int4_2_interp, SND_SOC_NOPM, 0, rx_int4_2_interp_mux_text);
+WCD_DAPM_ENUM(rx_int7_2_interp, SND_SOC_NOPM, 0, rx_int7_2_interp_mux_text);
+WCD_DAPM_ENUM(rx_int8_2_interp, SND_SOC_NOPM, 0, rx_int8_2_interp_mux_text);
+
+WCD_DAPM_ENUM(mad_sel, WCD934X_CPE_SS_SVA_CFG, 0,
+	mad_sel_txt);
+
+WCD_DAPM_ENUM(mad_inp_mux, WCD934X_CPE_SS_SVA_CFG, 2,
+	mad_inp_mux_txt);
+
+WCD_DAPM_ENUM_EXT(rx_int0_dem_inp, WCD934X_CDC_RX0_RX_PATH_SEC0, 0,
+	rx_int_dem_inp_mux_text, snd_soc_dapm_get_enum_double,
+	tavil_int_dem_inp_mux_put);
+WCD_DAPM_ENUM_EXT(rx_int1_dem_inp, WCD934X_CDC_RX1_RX_PATH_SEC0, 0,
+	rx_int_dem_inp_mux_text, snd_soc_dapm_get_enum_double,
+	tavil_int_dem_inp_mux_put);
+WCD_DAPM_ENUM_EXT(rx_int2_dem_inp, WCD934X_CDC_RX2_RX_PATH_SEC0, 0,
+	rx_int_dem_inp_mux_text, snd_soc_dapm_get_enum_double,
+	tavil_int_dem_inp_mux_put);
+
+WCD_DAPM_ENUM_EXT(tx_adc_mux0, WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG1, 0,
+	adc_mux_text, snd_soc_dapm_get_enum_double, tavil_dec_enum_put);
+WCD_DAPM_ENUM_EXT(tx_adc_mux1, WCD934X_CDC_TX_INP_MUX_ADC_MUX1_CFG1, 0,
+	adc_mux_text, snd_soc_dapm_get_enum_double, tavil_dec_enum_put);
+WCD_DAPM_ENUM_EXT(tx_adc_mux2, WCD934X_CDC_TX_INP_MUX_ADC_MUX2_CFG1, 0,
+	adc_mux_text, snd_soc_dapm_get_enum_double, tavil_dec_enum_put);
+WCD_DAPM_ENUM_EXT(tx_adc_mux3, WCD934X_CDC_TX_INP_MUX_ADC_MUX3_CFG1, 0,
+	adc_mux_text, snd_soc_dapm_get_enum_double, tavil_dec_enum_put);
+WCD_DAPM_ENUM_EXT(tx_adc_mux4, WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG1, 2,
+	adc_mux_text, snd_soc_dapm_get_enum_double, tavil_dec_enum_put);
+WCD_DAPM_ENUM_EXT(tx_adc_mux5, WCD934X_CDC_TX_INP_MUX_ADC_MUX1_CFG1, 2,
+	adc_mux_text, snd_soc_dapm_get_enum_double, tavil_dec_enum_put);
+WCD_DAPM_ENUM_EXT(tx_adc_mux6, WCD934X_CDC_TX_INP_MUX_ADC_MUX2_CFG1, 2,
+	adc_mux_text, snd_soc_dapm_get_enum_double, tavil_dec_enum_put);
+WCD_DAPM_ENUM_EXT(tx_adc_mux7, WCD934X_CDC_TX_INP_MUX_ADC_MUX3_CFG1, 2,
+	adc_mux_text, snd_soc_dapm_get_enum_double, tavil_dec_enum_put);
+WCD_DAPM_ENUM_EXT(tx_adc_mux8, WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG1, 4,
+	adc_mux_text, snd_soc_dapm_get_enum_double, tavil_dec_enum_put);
+
+WCD_DAPM_ENUM(asrc0, WCD934X_CDC_RX_INP_MUX_SPLINE_ASRC_CFG0, 0,
+	asrc0_mux_text);
+WCD_DAPM_ENUM(asrc1, WCD934X_CDC_RX_INP_MUX_SPLINE_ASRC_CFG0, 2,
+	asrc1_mux_text);
+WCD_DAPM_ENUM(asrc2, WCD934X_CDC_RX_INP_MUX_SPLINE_ASRC_CFG0, 4,
+	asrc2_mux_text);
+WCD_DAPM_ENUM(asrc3, WCD934X_CDC_RX_INP_MUX_SPLINE_ASRC_CFG0, 6,
+	asrc3_mux_text);
+
+WCD_DAPM_ENUM(int1_1_native, SND_SOC_NOPM, 0, native_mux_text);
+WCD_DAPM_ENUM(int2_1_native, SND_SOC_NOPM, 0, native_mux_text);
+WCD_DAPM_ENUM(int3_1_native, SND_SOC_NOPM, 0, native_mux_text);
+WCD_DAPM_ENUM(int4_1_native, SND_SOC_NOPM, 0, native_mux_text);
+
+WCD_DAPM_ENUM(int1_2_native, SND_SOC_NOPM, 0, native_mux_text);
+WCD_DAPM_ENUM(int2_2_native, SND_SOC_NOPM, 0, native_mux_text);
+WCD_DAPM_ENUM(int3_2_native, SND_SOC_NOPM, 0, native_mux_text);
+WCD_DAPM_ENUM(int4_2_native, SND_SOC_NOPM, 0, native_mux_text);
+WCD_DAPM_ENUM(int7_2_native, SND_SOC_NOPM, 0, native_mux_text);
+WCD_DAPM_ENUM(int8_2_native, SND_SOC_NOPM, 0, native_mux_text);
+
+WCD_DAPM_ENUM(anc0_fb, WCD934X_CDC_RX_INP_MUX_ANC_CFG0, 0, anc0_fb_mux_text);
+WCD_DAPM_ENUM(anc1_fb, WCD934X_CDC_RX_INP_MUX_ANC_CFG0, 3, anc1_fb_mux_text);
+
+static const struct snd_kcontrol_new anc_ear_switch =
+	SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new anc_ear_spkr_switch =
+	SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new anc_spkr_pa_switch =
+	SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new anc_hphl_pa_switch =
+	SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new anc_hphr_pa_switch =
+	SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new mad_cpe1_switch =
+	SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new mad_cpe2_switch =
+	SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new mad_brdcst_switch =
+	SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new adc_us_mux0_switch =
+	SOC_DAPM_SINGLE("US_Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new adc_us_mux1_switch =
+	SOC_DAPM_SINGLE("US_Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new adc_us_mux2_switch =
+	SOC_DAPM_SINGLE("US_Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new adc_us_mux3_switch =
+	SOC_DAPM_SINGLE("US_Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new adc_us_mux4_switch =
+	SOC_DAPM_SINGLE("US_Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new adc_us_mux5_switch =
+	SOC_DAPM_SINGLE("US_Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new adc_us_mux6_switch =
+	SOC_DAPM_SINGLE("US_Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new adc_us_mux7_switch =
+	SOC_DAPM_SINGLE("US_Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new adc_us_mux8_switch =
+	SOC_DAPM_SINGLE("US_Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new rx_int1_asrc_switch[] = {
+	SOC_DAPM_SINGLE("HPHL Switch", SND_SOC_NOPM, 0, 1, 0),
+};
+
+static const struct snd_kcontrol_new rx_int2_asrc_switch[] = {
+	SOC_DAPM_SINGLE("HPHR Switch", SND_SOC_NOPM, 0, 1, 0),
+};
+
+static const struct snd_kcontrol_new rx_int3_asrc_switch[] = {
+	SOC_DAPM_SINGLE("LO1 Switch", SND_SOC_NOPM, 0, 1, 0),
+};
+
+static const struct snd_kcontrol_new rx_int4_asrc_switch[] = {
+	SOC_DAPM_SINGLE("LO2 Switch", SND_SOC_NOPM, 0, 1, 0),
+};
+
+static int tavil_dsd_mixer_get(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_context *dapm =
+				snd_soc_dapm_kcontrol_dapm(kcontrol);
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm);
+	struct tavil_priv *tavil_p = snd_soc_codec_get_drvdata(codec);
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+	struct tavil_dsd_config *dsd_conf = tavil_p->dsd_config;
+	int val;
+
+	val = tavil_dsd_get_current_mixer_value(dsd_conf, mc->shift);
+
+	ucontrol->value.integer.value[0] = ((val < 0) ? 0 : val);
+
+	return 0;
+}
+
+static int tavil_dsd_mixer_put(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+	struct snd_soc_dapm_context *dapm =
+		snd_soc_dapm_kcontrol_dapm(kcontrol);
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm);
+	struct tavil_priv *tavil_p = snd_soc_codec_get_drvdata(codec);
+	unsigned int wval = ucontrol->value.integer.value[0];
+	struct tavil_dsd_config *dsd_conf = tavil_p->dsd_config;
+
+	if (!dsd_conf)
+		return 0;
+
+	mutex_lock(&tavil_p->codec_mutex);
+
+	tavil_dsd_set_out_select(dsd_conf, mc->shift);
+	tavil_dsd_set_mixer_value(dsd_conf, mc->shift, wval);
+
+	mutex_unlock(&tavil_p->codec_mutex);
+	snd_soc_dapm_mixer_update_power(dapm, kcontrol, wval, NULL);
+
+	return 0;
+}
+
+static const struct snd_kcontrol_new hphl_mixer[] = {
+	SOC_SINGLE_EXT("DSD HPHL Switch", SND_SOC_NOPM, INTERP_HPHL, 1, 0,
+			tavil_dsd_mixer_get, tavil_dsd_mixer_put),
+};
+
+static const struct snd_kcontrol_new hphr_mixer[] = {
+	SOC_SINGLE_EXT("DSD HPHR Switch", SND_SOC_NOPM, INTERP_HPHR, 1, 0,
+			tavil_dsd_mixer_get, tavil_dsd_mixer_put),
+};
+
+static const struct snd_kcontrol_new lo1_mixer[] = {
+	SOC_SINGLE_EXT("DSD LO1 Switch", SND_SOC_NOPM, INTERP_LO1, 1, 0,
+			tavil_dsd_mixer_get, tavil_dsd_mixer_put),
+};
+
+static const struct snd_kcontrol_new lo2_mixer[] = {
+	SOC_SINGLE_EXT("DSD LO2 Switch", SND_SOC_NOPM, INTERP_LO2, 1, 0,
+			tavil_dsd_mixer_get, tavil_dsd_mixer_put),
+};
+
+static const struct snd_soc_dapm_widget tavil_dapm_widgets[] = {
+	SND_SOC_DAPM_AIF_IN_E("AIF1 PB", "AIF1 Playback", 0, SND_SOC_NOPM,
+		AIF1_PB, 0, tavil_codec_enable_slimrx,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_AIF_IN_E("AIF2 PB", "AIF2 Playback", 0, SND_SOC_NOPM,
+		AIF2_PB, 0, tavil_codec_enable_slimrx,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_AIF_IN_E("AIF3 PB", "AIF3 Playback", 0, SND_SOC_NOPM,
+		AIF3_PB, 0, tavil_codec_enable_slimrx,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_AIF_IN_E("AIF4 PB", "AIF4 Playback", 0, SND_SOC_NOPM,
+		AIF4_PB, 0, tavil_codec_enable_slimrx,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	WCD_DAPM_MUX("SLIM RX0 MUX", WCD934X_RX0, slim_rx0),
+	WCD_DAPM_MUX("SLIM RX1 MUX", WCD934X_RX1, slim_rx1),
+	WCD_DAPM_MUX("SLIM RX2 MUX", WCD934X_RX2, slim_rx2),
+	WCD_DAPM_MUX("SLIM RX3 MUX", WCD934X_RX3, slim_rx3),
+	WCD_DAPM_MUX("SLIM RX4 MUX", WCD934X_RX4, slim_rx4),
+	WCD_DAPM_MUX("SLIM RX5 MUX", WCD934X_RX5, slim_rx5),
+	WCD_DAPM_MUX("SLIM RX6 MUX", WCD934X_RX6, slim_rx6),
+	WCD_DAPM_MUX("SLIM RX7 MUX", WCD934X_RX7, slim_rx7),
+
+	SND_SOC_DAPM_MIXER("SLIM RX0", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("SLIM RX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("SLIM RX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("SLIM RX3", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("SLIM RX4", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("SLIM RX5", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("SLIM RX6", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("SLIM RX7", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+	WCD_DAPM_MUX("CDC_IF RX0 MUX", WCD934X_RX0, cdc_if_rx0),
+	WCD_DAPM_MUX("CDC_IF RX1 MUX", WCD934X_RX1, cdc_if_rx1),
+	WCD_DAPM_MUX("CDC_IF RX2 MUX", WCD934X_RX2, cdc_if_rx2),
+	WCD_DAPM_MUX("CDC_IF RX3 MUX", WCD934X_RX3, cdc_if_rx3),
+	WCD_DAPM_MUX("CDC_IF RX4 MUX", WCD934X_RX4, cdc_if_rx4),
+	WCD_DAPM_MUX("CDC_IF RX5 MUX", WCD934X_RX5, cdc_if_rx5),
+	WCD_DAPM_MUX("CDC_IF RX6 MUX", WCD934X_RX6, cdc_if_rx6),
+	WCD_DAPM_MUX("CDC_IF RX7 MUX", WCD934X_RX7, cdc_if_rx7),
+
+	SND_SOC_DAPM_MUX_E("RX INT0_2 MUX", SND_SOC_NOPM, INTERP_EAR, 0,
+		&rx_int0_2_mux, tavil_codec_enable_mix_path,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT1_2 MUX", SND_SOC_NOPM, INTERP_HPHL, 0,
+		&rx_int1_2_mux, tavil_codec_enable_mix_path,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT2_2 MUX", SND_SOC_NOPM, INTERP_HPHR, 0,
+		&rx_int2_2_mux, tavil_codec_enable_mix_path,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT3_2 MUX", SND_SOC_NOPM, INTERP_LO1, 0,
+		&rx_int3_2_mux, tavil_codec_enable_mix_path,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT4_2 MUX", SND_SOC_NOPM, INTERP_LO2, 0,
+		&rx_int4_2_mux, tavil_codec_enable_mix_path,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT7_2 MUX", SND_SOC_NOPM, INTERP_SPKR1, 0,
+		&rx_int7_2_mux, tavil_codec_enable_mix_path,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT8_2 MUX", SND_SOC_NOPM, INTERP_SPKR2, 0,
+		&rx_int8_2_mux, tavil_codec_enable_mix_path,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_POST_PMD),
+
+	WCD_DAPM_MUX("RX INT0_1 MIX1 INP0", 0, rx_int0_1_mix_inp0),
+	WCD_DAPM_MUX("RX INT0_1 MIX1 INP1", 0, rx_int0_1_mix_inp1),
+	WCD_DAPM_MUX("RX INT0_1 MIX1 INP2", 0, rx_int0_1_mix_inp2),
+	WCD_DAPM_MUX("RX INT1_1 MIX1 INP0", 0, rx_int1_1_mix_inp0),
+	WCD_DAPM_MUX("RX INT1_1 MIX1 INP1", 0, rx_int1_1_mix_inp1),
+	WCD_DAPM_MUX("RX INT1_1 MIX1 INP2", 0, rx_int1_1_mix_inp2),
+	WCD_DAPM_MUX("RX INT2_1 MIX1 INP0", 0, rx_int2_1_mix_inp0),
+	WCD_DAPM_MUX("RX INT2_1 MIX1 INP1", 0, rx_int2_1_mix_inp1),
+	WCD_DAPM_MUX("RX INT2_1 MIX1 INP2", 0, rx_int2_1_mix_inp2),
+	WCD_DAPM_MUX("RX INT3_1 MIX1 INP0", 0, rx_int3_1_mix_inp0),
+	WCD_DAPM_MUX("RX INT3_1 MIX1 INP1", 0, rx_int3_1_mix_inp1),
+	WCD_DAPM_MUX("RX INT3_1 MIX1 INP2", 0, rx_int3_1_mix_inp2),
+	WCD_DAPM_MUX("RX INT4_1 MIX1 INP0", 0, rx_int4_1_mix_inp0),
+	WCD_DAPM_MUX("RX INT4_1 MIX1 INP1", 0, rx_int4_1_mix_inp1),
+	WCD_DAPM_MUX("RX INT4_1 MIX1 INP2", 0, rx_int4_1_mix_inp2),
+
+	SND_SOC_DAPM_MUX_E("RX INT7_1 MIX1 INP0", SND_SOC_NOPM, 0, 0,
+		&rx_int7_1_mix_inp0_mux, tavil_codec_enable_swr,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT7_1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+		&rx_int7_1_mix_inp1_mux, tavil_codec_enable_swr,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT7_1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+		&rx_int7_1_mix_inp2_mux, tavil_codec_enable_swr,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT8_1 MIX1 INP0", SND_SOC_NOPM, 0, 0,
+		&rx_int8_1_mix_inp0_mux, tavil_codec_enable_swr,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT8_1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+		&rx_int8_1_mix_inp1_mux, tavil_codec_enable_swr,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT8_1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+		&rx_int8_1_mix_inp2_mux, tavil_codec_enable_swr,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MIXER("RX INT0_1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT0 SEC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT1_1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT1 SEC MIX", SND_SOC_NOPM, 0, 0,
+		rx_int1_asrc_switch, ARRAY_SIZE(rx_int1_asrc_switch)),
+	SND_SOC_DAPM_MIXER("RX INT2_1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT2 SEC MIX", SND_SOC_NOPM, 0, 0,
+		rx_int2_asrc_switch, ARRAY_SIZE(rx_int2_asrc_switch)),
+	SND_SOC_DAPM_MIXER("RX INT3_1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT3 SEC MIX", SND_SOC_NOPM, 0, 0,
+		rx_int3_asrc_switch, ARRAY_SIZE(rx_int3_asrc_switch)),
+	SND_SOC_DAPM_MIXER("RX INT4_1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT4 SEC MIX", SND_SOC_NOPM, 0, 0,
+		rx_int4_asrc_switch, ARRAY_SIZE(rx_int4_asrc_switch)),
+	SND_SOC_DAPM_MIXER("RX INT7_1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT7 SEC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT8_1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT8 SEC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+	SND_SOC_DAPM_MIXER("RX INT0 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT1 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT1 MIX3", SND_SOC_NOPM, 0, 0, hphl_mixer,
+			   ARRAY_SIZE(hphl_mixer)),
+	SND_SOC_DAPM_MIXER("RX INT2 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT2 MIX3", SND_SOC_NOPM, 0, 0, hphr_mixer,
+			   ARRAY_SIZE(hphr_mixer)),
+	SND_SOC_DAPM_MIXER("RX INT3 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT3 MIX3", SND_SOC_NOPM, 0, 0, lo1_mixer,
+			   ARRAY_SIZE(lo1_mixer)),
+	SND_SOC_DAPM_MIXER("RX INT4 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT4 MIX3", SND_SOC_NOPM, 0, 0, lo2_mixer,
+			   ARRAY_SIZE(lo2_mixer)),
+	SND_SOC_DAPM_MIXER("RX INT7 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER_E("RX INT7 CHAIN", SND_SOC_NOPM, 0, 0,
+		NULL, 0, tavil_codec_spk_boost_event,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MIXER_E("RX INT8 CHAIN", SND_SOC_NOPM, 0, 0,
+		NULL, 0, tavil_codec_spk_boost_event,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("RX INT0 MIX2 INP", SND_SOC_NOPM, INTERP_EAR,
+		0, &rx_int0_mix2_inp_mux, tavil_codec_enable_rx_path_clk,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT1 MIX2 INP", SND_SOC_NOPM, INTERP_HPHL,
+		0, &rx_int1_mix2_inp_mux, tavil_codec_enable_rx_path_clk,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT2 MIX2 INP", SND_SOC_NOPM, INTERP_HPHR,
+		0, &rx_int2_mix2_inp_mux, tavil_codec_enable_rx_path_clk,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT3 MIX2 INP", SND_SOC_NOPM, INTERP_LO1,
+		0, &rx_int3_mix2_inp_mux, tavil_codec_enable_rx_path_clk,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT4 MIX2 INP", SND_SOC_NOPM, INTERP_LO2,
+		0, &rx_int4_mix2_inp_mux, tavil_codec_enable_rx_path_clk,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT7 MIX2 INP", SND_SOC_NOPM, INTERP_SPKR1,
+		0, &rx_int7_mix2_inp_mux, tavil_codec_enable_rx_path_clk,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	WCD_DAPM_MUX("CDC_IF TX0 MUX", WCD934X_TX0, cdc_if_tx0),
+	WCD_DAPM_MUX("CDC_IF TX1 MUX", WCD934X_TX1, cdc_if_tx1),
+	WCD_DAPM_MUX("CDC_IF TX2 MUX", WCD934X_TX2, cdc_if_tx2),
+	WCD_DAPM_MUX("CDC_IF TX3 MUX", WCD934X_TX3, cdc_if_tx3),
+	WCD_DAPM_MUX("CDC_IF TX4 MUX", WCD934X_TX4, cdc_if_tx4),
+	WCD_DAPM_MUX("CDC_IF TX5 MUX", WCD934X_TX5, cdc_if_tx5),
+	WCD_DAPM_MUX("CDC_IF TX6 MUX", WCD934X_TX6, cdc_if_tx6),
+	WCD_DAPM_MUX("CDC_IF TX7 MUX", WCD934X_TX7, cdc_if_tx7),
+	WCD_DAPM_MUX("CDC_IF TX8 MUX", WCD934X_TX8, cdc_if_tx8),
+	WCD_DAPM_MUX("CDC_IF TX9 MUX", WCD934X_TX9, cdc_if_tx9),
+	WCD_DAPM_MUX("CDC_IF TX10 MUX", WCD934X_TX10, cdc_if_tx10),
+	WCD_DAPM_MUX("CDC_IF TX11 MUX", WCD934X_TX11, cdc_if_tx11),
+	WCD_DAPM_MUX("CDC_IF TX11 INP1 MUX", WCD934X_TX11, cdc_if_tx11_inp1),
+	WCD_DAPM_MUX("CDC_IF TX13 MUX", WCD934X_TX13, cdc_if_tx13),
+	WCD_DAPM_MUX("CDC_IF TX13 INP1 MUX", WCD934X_TX13, cdc_if_tx13_inp1),
+
+	SND_SOC_DAPM_MUX_E("ADC MUX0", WCD934X_CDC_TX0_TX_PATH_CTL, 5, 0,
+		&tx_adc_mux0_mux, tavil_codec_enable_dec,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("ADC MUX1", WCD934X_CDC_TX1_TX_PATH_CTL, 5, 0,
+		&tx_adc_mux1_mux, tavil_codec_enable_dec,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("ADC MUX2", WCD934X_CDC_TX2_TX_PATH_CTL, 5, 0,
+		&tx_adc_mux2_mux, tavil_codec_enable_dec,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("ADC MUX3", WCD934X_CDC_TX3_TX_PATH_CTL, 5, 0,
+		&tx_adc_mux3_mux, tavil_codec_enable_dec,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("ADC MUX4", WCD934X_CDC_TX4_TX_PATH_CTL, 5, 0,
+		&tx_adc_mux4_mux, tavil_codec_enable_dec,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("ADC MUX5", WCD934X_CDC_TX5_TX_PATH_CTL, 5, 0,
+		&tx_adc_mux5_mux, tavil_codec_enable_dec,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("ADC MUX6", WCD934X_CDC_TX6_TX_PATH_CTL, 5, 0,
+		&tx_adc_mux6_mux, tavil_codec_enable_dec,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("ADC MUX7", WCD934X_CDC_TX7_TX_PATH_CTL, 5, 0,
+		&tx_adc_mux7_mux, tavil_codec_enable_dec,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("ADC MUX8", WCD934X_CDC_TX8_TX_PATH_CTL, 5, 0,
+		&tx_adc_mux8_mux, tavil_codec_enable_dec,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("ADC MUX10", SND_SOC_NOPM, 10, 0, &tx_adc_mux10_mux,
+		tavil_codec_tx_adc_cfg, SND_SOC_DAPM_POST_PMU),
+
+	SND_SOC_DAPM_MUX_E("ADC MUX11", SND_SOC_NOPM, 11, 0, &tx_adc_mux11_mux,
+		tavil_codec_tx_adc_cfg, SND_SOC_DAPM_POST_PMU),
+
+	SND_SOC_DAPM_MUX_E("ADC MUX12", SND_SOC_NOPM, 12, 0, &tx_adc_mux12_mux,
+		tavil_codec_tx_adc_cfg, SND_SOC_DAPM_POST_PMU),
+
+	SND_SOC_DAPM_MUX_E("ADC MUX13", SND_SOC_NOPM, 13, 0, &tx_adc_mux13_mux,
+		tavil_codec_tx_adc_cfg, SND_SOC_DAPM_POST_PMU),
+
+	WCD_DAPM_MUX("DMIC MUX0", 0, tx_dmic_mux0),
+	WCD_DAPM_MUX("DMIC MUX1", 0, tx_dmic_mux1),
+	WCD_DAPM_MUX("DMIC MUX2", 0, tx_dmic_mux2),
+	WCD_DAPM_MUX("DMIC MUX3", 0, tx_dmic_mux3),
+	WCD_DAPM_MUX("DMIC MUX4", 0, tx_dmic_mux4),
+	WCD_DAPM_MUX("DMIC MUX5", 0, tx_dmic_mux5),
+	WCD_DAPM_MUX("DMIC MUX6", 0, tx_dmic_mux6),
+	WCD_DAPM_MUX("DMIC MUX7", 0, tx_dmic_mux7),
+	WCD_DAPM_MUX("DMIC MUX8", 0, tx_dmic_mux8),
+	WCD_DAPM_MUX("DMIC MUX10", 0, tx_dmic_mux10),
+	WCD_DAPM_MUX("DMIC MUX11", 0, tx_dmic_mux11),
+	WCD_DAPM_MUX("DMIC MUX12", 0, tx_dmic_mux12),
+	WCD_DAPM_MUX("DMIC MUX13", 0, tx_dmic_mux13),
+
+	WCD_DAPM_MUX("AMIC MUX0", 0, tx_amic_mux0),
+	WCD_DAPM_MUX("AMIC MUX1", 0, tx_amic_mux1),
+	WCD_DAPM_MUX("AMIC MUX2", 0, tx_amic_mux2),
+	WCD_DAPM_MUX("AMIC MUX3", 0, tx_amic_mux3),
+	WCD_DAPM_MUX("AMIC MUX4", 0, tx_amic_mux4),
+	WCD_DAPM_MUX("AMIC MUX5", 0, tx_amic_mux5),
+	WCD_DAPM_MUX("AMIC MUX6", 0, tx_amic_mux6),
+	WCD_DAPM_MUX("AMIC MUX7", 0, tx_amic_mux7),
+	WCD_DAPM_MUX("AMIC MUX8", 0, tx_amic_mux8),
+	WCD_DAPM_MUX("AMIC MUX10", 0, tx_amic_mux10),
+	WCD_DAPM_MUX("AMIC MUX11", 0, tx_amic_mux11),
+	WCD_DAPM_MUX("AMIC MUX12", 0, tx_amic_mux12),
+	WCD_DAPM_MUX("AMIC MUX13", 0, tx_amic_mux13),
+
+	SND_SOC_DAPM_ADC_E("ADC1", NULL, WCD934X_ANA_AMIC1, 7, 0,
+		tavil_codec_enable_adc, SND_SOC_DAPM_PRE_PMU),
+	SND_SOC_DAPM_ADC_E("ADC2", NULL, WCD934X_ANA_AMIC2, 7, 0,
+		tavil_codec_enable_adc, SND_SOC_DAPM_PRE_PMU),
+	SND_SOC_DAPM_ADC_E("ADC3", NULL, WCD934X_ANA_AMIC3, 7, 0,
+		tavil_codec_enable_adc, SND_SOC_DAPM_PRE_PMU),
+	SND_SOC_DAPM_ADC_E("ADC4", NULL, WCD934X_ANA_AMIC4, 7, 0,
+		tavil_codec_enable_adc, SND_SOC_DAPM_PRE_PMU),
+
+	WCD_DAPM_MUX("AMIC4_5 SEL", 0, tx_amic4_5),
+
+	WCD_DAPM_MUX("ANC0 FB MUX", 0, anc0_fb),
+	WCD_DAPM_MUX("ANC1 FB MUX", 0, anc1_fb),
+
+	SND_SOC_DAPM_INPUT("AMIC1"),
+	SND_SOC_DAPM_INPUT("AMIC2"),
+	SND_SOC_DAPM_INPUT("AMIC3"),
+	SND_SOC_DAPM_INPUT("AMIC4"),
+	SND_SOC_DAPM_INPUT("AMIC5"),
+
+	SND_SOC_DAPM_MICBIAS_E("MIC BIAS1", SND_SOC_NOPM, 0, 0,
+		tavil_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MICBIAS_E("MIC BIAS2", SND_SOC_NOPM, 0, 0,
+		tavil_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MICBIAS_E("MIC BIAS3", SND_SOC_NOPM, 0, 0,
+		tavil_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MICBIAS_E("MIC BIAS4", SND_SOC_NOPM, 0, 0,
+		tavil_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	/*
+	 * Not supply widget, this is used to recover HPH registers.
+	 * It is not connected to any other widgets
+	 */
+	SND_SOC_DAPM_SUPPLY("RESET_HPH_REGISTERS", SND_SOC_NOPM,
+		0, 0, tavil_codec_reset_hph_registers,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MICBIAS_E(DAPM_MICBIAS1_STANDALONE, SND_SOC_NOPM, 0, 0,
+		tavil_codec_force_enable_micbias,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MICBIAS_E(DAPM_MICBIAS2_STANDALONE, SND_SOC_NOPM, 0, 0,
+		tavil_codec_force_enable_micbias,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MICBIAS_E(DAPM_MICBIAS3_STANDALONE, SND_SOC_NOPM, 0, 0,
+		tavil_codec_force_enable_micbias,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MICBIAS_E(DAPM_MICBIAS4_STANDALONE, SND_SOC_NOPM, 0, 0,
+		tavil_codec_force_enable_micbias,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_AIF_OUT_E("AIF1 CAP", "AIF1 Capture", 0, SND_SOC_NOPM,
+		AIF1_CAP, 0, tavil_codec_enable_slimtx,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_AIF_OUT_E("AIF2 CAP", "AIF2 Capture", 0, SND_SOC_NOPM,
+		AIF2_CAP, 0, tavil_codec_enable_slimtx,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_AIF_OUT_E("AIF3 CAP", "AIF3 Capture", 0, SND_SOC_NOPM,
+		AIF3_CAP, 0, tavil_codec_enable_slimtx,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MIXER("AIF1_CAP Mixer", SND_SOC_NOPM, AIF1_CAP, 0,
+		aif1_cap_mixer, ARRAY_SIZE(aif1_cap_mixer)),
+	SND_SOC_DAPM_MIXER("AIF2_CAP Mixer", SND_SOC_NOPM, AIF2_CAP, 0,
+		aif2_cap_mixer, ARRAY_SIZE(aif2_cap_mixer)),
+	SND_SOC_DAPM_MIXER("AIF3_CAP Mixer", SND_SOC_NOPM, AIF3_CAP, 0,
+		aif3_cap_mixer, ARRAY_SIZE(aif3_cap_mixer)),
+	SND_SOC_DAPM_MIXER("AIF4_MAD Mixer", SND_SOC_NOPM, AIF4_MAD_TX, 0,
+		aif4_mad_mixer, ARRAY_SIZE(aif4_mad_mixer)),
+
+	SND_SOC_DAPM_AIF_OUT_E("AIF4 VI", "VIfeed", 0, SND_SOC_NOPM,
+		AIF4_VIFEED, 0, tavil_codec_enable_slimvi_feedback,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_AIF_OUT("AIF4 MAD", "AIF4 MAD TX", 0,
+		SND_SOC_NOPM, 0, 0),
+
+	SND_SOC_DAPM_MIXER("AIF4_VI Mixer", SND_SOC_NOPM, AIF4_VIFEED, 0,
+		aif4_vi_mixer, ARRAY_SIZE(aif4_vi_mixer)),
+	SND_SOC_DAPM_INPUT("VIINPUT"),
+
+	SND_SOC_DAPM_MIXER("SLIM TX0", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("SLIM TX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("SLIM TX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("SLIM TX3", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("SLIM TX4", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("SLIM TX5", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("SLIM TX6", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("SLIM TX7", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("SLIM TX8", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("SLIM TX9", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("SLIM TX10", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("SLIM TX11", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("SLIM TX13", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+	/* Digital Mic Inputs */
+	SND_SOC_DAPM_ADC_E("DMIC0", NULL, SND_SOC_NOPM, 0, 0,
+		tavil_codec_enable_dmic,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_ADC_E("DMIC1", NULL, SND_SOC_NOPM, 0, 0,
+		tavil_codec_enable_dmic,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_ADC_E("DMIC2", NULL, SND_SOC_NOPM, 0, 0,
+		tavil_codec_enable_dmic,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_ADC_E("DMIC3", NULL, SND_SOC_NOPM, 0, 0,
+		tavil_codec_enable_dmic,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_ADC_E("DMIC4", NULL, SND_SOC_NOPM, 0, 0,
+		tavil_codec_enable_dmic,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_ADC_E("DMIC5", NULL, SND_SOC_NOPM, 0, 0,
+		tavil_codec_enable_dmic,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	WCD_DAPM_MUX("IIR0 INP0 MUX", 0, iir0_inp0),
+	WCD_DAPM_MUX("IIR0 INP1 MUX", 0, iir0_inp1),
+	WCD_DAPM_MUX("IIR0 INP2 MUX", 0, iir0_inp2),
+	WCD_DAPM_MUX("IIR0 INP3 MUX", 0, iir0_inp3),
+	WCD_DAPM_MUX("IIR1 INP0 MUX", 0, iir1_inp0),
+	WCD_DAPM_MUX("IIR1 INP1 MUX", 0, iir1_inp1),
+	WCD_DAPM_MUX("IIR1 INP2 MUX", 0, iir1_inp2),
+	WCD_DAPM_MUX("IIR1 INP3 MUX", 0, iir1_inp3),
+
+	SND_SOC_DAPM_MIXER_E("IIR0", WCD934X_CDC_SIDETONE_IIR0_IIR_PATH_CTL,
+		4, 0, NULL, 0, tavil_codec_set_iir_gain,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_MIXER_E("IIR1", WCD934X_CDC_SIDETONE_IIR1_IIR_PATH_CTL,
+		4, 0, NULL, 0, tavil_codec_set_iir_gain,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_MIXER("SRC0", WCD934X_CDC_SIDETONE_SRC0_ST_SRC_PATH_CTL,
+		4, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("SRC1", WCD934X_CDC_SIDETONE_SRC1_ST_SRC_PATH_CTL,
+		4, 0, NULL, 0),
+
+	WCD_DAPM_MUX("RX MIX TX0 MUX", 0, rx_mix_tx0),
+	WCD_DAPM_MUX("RX MIX TX1 MUX", 0, rx_mix_tx1),
+	WCD_DAPM_MUX("RX MIX TX2 MUX", 0, rx_mix_tx2),
+	WCD_DAPM_MUX("RX MIX TX3 MUX", 0, rx_mix_tx3),
+	WCD_DAPM_MUX("RX MIX TX4 MUX", 0, rx_mix_tx4),
+	WCD_DAPM_MUX("RX MIX TX5 MUX", 0, rx_mix_tx5),
+	WCD_DAPM_MUX("RX MIX TX6 MUX", 0, rx_mix_tx6),
+	WCD_DAPM_MUX("RX MIX TX7 MUX", 0, rx_mix_tx7),
+	WCD_DAPM_MUX("RX MIX TX8 MUX", 0, rx_mix_tx8),
+	WCD_DAPM_MUX("RX INT0 DEM MUX", 0, rx_int0_dem_inp),
+	WCD_DAPM_MUX("RX INT1 DEM MUX", 0, rx_int1_dem_inp),
+	WCD_DAPM_MUX("RX INT2 DEM MUX", 0, rx_int2_dem_inp),
+
+	SND_SOC_DAPM_MUX_E("RX INT0_1 INTERP", SND_SOC_NOPM, INTERP_EAR, 0,
+		&rx_int0_1_interp_mux, tavil_codec_enable_main_path,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT1_1 INTERP", SND_SOC_NOPM, INTERP_HPHL, 0,
+		&rx_int1_1_interp_mux, tavil_codec_enable_main_path,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT2_1 INTERP", SND_SOC_NOPM, INTERP_HPHR, 0,
+		&rx_int2_1_interp_mux, tavil_codec_enable_main_path,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT3_1 INTERP", SND_SOC_NOPM, INTERP_LO1, 0,
+		&rx_int3_1_interp_mux, tavil_codec_enable_main_path,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT4_1 INTERP", SND_SOC_NOPM, INTERP_LO2, 0,
+		&rx_int4_1_interp_mux, tavil_codec_enable_main_path,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT7_1 INTERP", SND_SOC_NOPM, INTERP_SPKR1, 0,
+		&rx_int7_1_interp_mux, tavil_codec_enable_main_path,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX INT8_1 INTERP", SND_SOC_NOPM, INTERP_SPKR2, 0,
+		&rx_int8_1_interp_mux, tavil_codec_enable_main_path,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_POST_PMD),
+
+	WCD_DAPM_MUX("RX INT0_2 INTERP", 0, rx_int0_2_interp),
+	WCD_DAPM_MUX("RX INT1_2 INTERP", 0, rx_int1_2_interp),
+	WCD_DAPM_MUX("RX INT2_2 INTERP", 0, rx_int2_2_interp),
+	WCD_DAPM_MUX("RX INT3_2 INTERP", 0, rx_int3_2_interp),
+	WCD_DAPM_MUX("RX INT4_2 INTERP", 0, rx_int4_2_interp),
+	WCD_DAPM_MUX("RX INT7_2 INTERP", 0, rx_int7_2_interp),
+	WCD_DAPM_MUX("RX INT8_2 INTERP", 0, rx_int8_2_interp),
+
+	SND_SOC_DAPM_SWITCH("ADC US MUX0", WCD934X_CDC_TX0_TX_PATH_192_CTL, 0,
+		0, &adc_us_mux0_switch),
+	SND_SOC_DAPM_SWITCH("ADC US MUX1", WCD934X_CDC_TX1_TX_PATH_192_CTL, 0,
+		0, &adc_us_mux1_switch),
+	SND_SOC_DAPM_SWITCH("ADC US MUX2", WCD934X_CDC_TX2_TX_PATH_192_CTL, 0,
+		0, &adc_us_mux2_switch),
+	SND_SOC_DAPM_SWITCH("ADC US MUX3", WCD934X_CDC_TX3_TX_PATH_192_CTL, 0,
+		0, &adc_us_mux3_switch),
+	SND_SOC_DAPM_SWITCH("ADC US MUX4", WCD934X_CDC_TX4_TX_PATH_192_CTL, 0,
+		0, &adc_us_mux4_switch),
+	SND_SOC_DAPM_SWITCH("ADC US MUX5", WCD934X_CDC_TX5_TX_PATH_192_CTL, 0,
+		0, &adc_us_mux5_switch),
+	SND_SOC_DAPM_SWITCH("ADC US MUX6", WCD934X_CDC_TX6_TX_PATH_192_CTL, 0,
+		0, &adc_us_mux6_switch),
+	SND_SOC_DAPM_SWITCH("ADC US MUX7", WCD934X_CDC_TX7_TX_PATH_192_CTL, 0,
+		0, &adc_us_mux7_switch),
+	SND_SOC_DAPM_SWITCH("ADC US MUX8", WCD934X_CDC_TX8_TX_PATH_192_CTL, 0,
+		0, &adc_us_mux8_switch),
+
+	/* MAD related widgets */
+	SND_SOC_DAPM_INPUT("MAD_CPE_INPUT"),
+	SND_SOC_DAPM_INPUT("MADINPUT"),
+
+	WCD_DAPM_MUX("MAD_SEL MUX", 0, mad_sel),
+	WCD_DAPM_MUX("MAD_INP MUX", 0, mad_inp_mux),
+
+	SND_SOC_DAPM_SWITCH_E("MAD_BROADCAST", SND_SOC_NOPM, 0, 0,
+			      &mad_brdcst_switch, tavil_codec_ape_enable_mad,
+			      SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+
+	SND_SOC_DAPM_SWITCH_E("MAD_CPE1", SND_SOC_NOPM, 0, 0,
+			      &mad_cpe1_switch, tavil_codec_cpe_mad_ctl,
+			      SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_SWITCH_E("MAD_CPE2", SND_SOC_NOPM, 0, 0,
+			      &mad_cpe2_switch, tavil_codec_cpe_mad_ctl,
+			      SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+
+	SND_SOC_DAPM_OUTPUT("MAD_CPE_OUT1"),
+	SND_SOC_DAPM_OUTPUT("MAD_CPE_OUT2"),
+
+	SND_SOC_DAPM_DAC_E("RX INT0 DAC", NULL, SND_SOC_NOPM,
+		0, 0, tavil_codec_ear_dac_event,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_DAC_E("RX INT1 DAC", NULL, SND_SOC_NOPM,
+		0, 0, tavil_codec_hphl_dac_event,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_DAC_E("RX INT2 DAC", NULL, SND_SOC_NOPM,
+		0, 0, tavil_codec_hphr_dac_event,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_DAC_E("RX INT3 DAC", NULL, SND_SOC_NOPM,
+		0, 0, tavil_codec_lineout_dac_event,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_DAC_E("RX INT4 DAC", NULL, SND_SOC_NOPM,
+		0, 0, tavil_codec_lineout_dac_event,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_PGA_E("EAR PA", WCD934X_ANA_EAR, 7, 0, NULL, 0,
+		tavil_codec_enable_ear_pa,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_PGA_E("HPHL PA", SND_SOC_NOPM, 0, 0, NULL, 0,
+		tavil_codec_enable_hphl_pa,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_PGA_E("HPHR PA", SND_SOC_NOPM, 0, 0, NULL, 0,
+		tavil_codec_enable_hphr_pa,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_PGA_E("LINEOUT1 PA", WCD934X_ANA_LO_1_2, 7, 0, NULL, 0,
+		tavil_codec_enable_lineout_pa,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_PGA_E("LINEOUT2 PA", WCD934X_ANA_LO_1_2, 6, 0, NULL, 0,
+		tavil_codec_enable_lineout_pa,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_PGA_E("ANC EAR PA", WCD934X_ANA_EAR, 7, 0, NULL, 0,
+		tavil_codec_enable_ear_pa, SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_PGA_E("ANC SPK1 PA", SND_SOC_NOPM, 0, 0, NULL, 0,
+		tavil_codec_enable_spkr_anc,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_PGA_E("ANC HPHL PA", SND_SOC_NOPM, 0, 0, NULL, 0,
+		tavil_codec_enable_hphl_pa,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_PGA_E("ANC HPHR PA", SND_SOC_NOPM, 0, 0, NULL, 0,
+		tavil_codec_enable_hphr_pa,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_OUTPUT("EAR"),
+	SND_SOC_DAPM_OUTPUT("HPHL"),
+	SND_SOC_DAPM_OUTPUT("HPHR"),
+	SND_SOC_DAPM_OUTPUT("LINEOUT1"),
+	SND_SOC_DAPM_OUTPUT("LINEOUT2"),
+	SND_SOC_DAPM_OUTPUT("SPK1 OUT"),
+	SND_SOC_DAPM_OUTPUT("SPK2 OUT"),
+	SND_SOC_DAPM_OUTPUT("ANC EAR"),
+	SND_SOC_DAPM_OUTPUT("ANC HPHL"),
+	SND_SOC_DAPM_OUTPUT("ANC HPHR"),
+
+	SND_SOC_DAPM_SWITCH("ANC OUT EAR Enable", SND_SOC_NOPM, 0, 0,
+		&anc_ear_switch),
+	SND_SOC_DAPM_SWITCH("ANC OUT EAR SPKR Enable", SND_SOC_NOPM, 0, 0,
+		&anc_ear_spkr_switch),
+	SND_SOC_DAPM_SWITCH("ANC SPKR PA Enable", SND_SOC_NOPM, 0, 0,
+		&anc_spkr_pa_switch),
+
+	SND_SOC_DAPM_SWITCH_E("ANC OUT HPHL Enable", SND_SOC_NOPM, INTERP_HPHL,
+		0, &anc_hphl_pa_switch, tavil_anc_out_switch_cb,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+	 SND_SOC_DAPM_SWITCH_E("ANC OUT HPHR Enable", SND_SOC_NOPM, INTERP_HPHR,
+		0, &anc_hphr_pa_switch, tavil_anc_out_switch_cb,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+
+	SND_SOC_DAPM_SUPPLY("RX_BIAS", SND_SOC_NOPM, 0, 0,
+		tavil_codec_enable_rx_bias,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_SUPPLY("RX INT1 NATIVE SUPPLY", SND_SOC_NOPM,
+		INTERP_HPHL, 0, tavil_enable_native_supply,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_SUPPLY("RX INT2 NATIVE SUPPLY", SND_SOC_NOPM,
+		INTERP_HPHR, 0, tavil_enable_native_supply,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_SUPPLY("RX INT3 NATIVE SUPPLY", SND_SOC_NOPM,
+		INTERP_LO1, 0, tavil_enable_native_supply,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_SUPPLY("RX INT4 NATIVE SUPPLY", SND_SOC_NOPM,
+		INTERP_LO2, 0, tavil_enable_native_supply,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_SUPPLY("RX INT7 NATIVE SUPPLY", SND_SOC_NOPM,
+		INTERP_SPKR1, 0, tavil_enable_native_supply,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_SUPPLY("RX INT8 NATIVE SUPPLY", SND_SOC_NOPM,
+		INTERP_SPKR2, 0, tavil_enable_native_supply,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+
+	WCD_DAPM_MUX("RX INT1_1 NATIVE MUX", 0, int1_1_native),
+	WCD_DAPM_MUX("RX INT2_1 NATIVE MUX", 0, int2_1_native),
+	WCD_DAPM_MUX("RX INT3_1 NATIVE MUX", 0, int3_1_native),
+	WCD_DAPM_MUX("RX INT4_1 NATIVE MUX", 0, int4_1_native),
+
+	WCD_DAPM_MUX("RX INT1_2 NATIVE MUX", 0, int1_2_native),
+	WCD_DAPM_MUX("RX INT2_2 NATIVE MUX", 0, int2_2_native),
+	WCD_DAPM_MUX("RX INT3_2 NATIVE MUX", 0, int3_2_native),
+	WCD_DAPM_MUX("RX INT4_2 NATIVE MUX", 0, int4_2_native),
+	WCD_DAPM_MUX("RX INT7_2 NATIVE MUX", 0, int7_2_native),
+	WCD_DAPM_MUX("RX INT8_2 NATIVE MUX", 0, int8_2_native),
+
+	SND_SOC_DAPM_MUX_E("ASRC0 MUX", SND_SOC_NOPM, ASRC0, 0,
+		&asrc0_mux, tavil_codec_enable_asrc_resampler,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("ASRC1 MUX", SND_SOC_NOPM, ASRC1, 0,
+		&asrc1_mux, tavil_codec_enable_asrc_resampler,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("ASRC2 MUX", SND_SOC_NOPM, ASRC2, 0,
+		&asrc2_mux, tavil_codec_enable_asrc_resampler,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("ASRC3 MUX", SND_SOC_NOPM, ASRC3, 0,
+		&asrc3_mux, tavil_codec_enable_asrc_resampler,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+};
+
+static int tavil_get_channel_map(struct snd_soc_dai *dai,
+				 unsigned int *tx_num, unsigned int *tx_slot,
+				 unsigned int *rx_num, unsigned int *rx_slot)
+{
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(dai->codec);
+	u32 i = 0;
+	struct wcd9xxx_ch *ch;
+	int ret = 0;
+
+	switch (dai->id) {
+	case AIF1_PB:
+	case AIF2_PB:
+	case AIF3_PB:
+	case AIF4_PB:
+		if (!rx_slot || !rx_num) {
+			dev_err(tavil->dev, "%s: Invalid rx_slot 0x%pK or rx_num 0x%pK\n",
+				 __func__, rx_slot, rx_num);
+			ret = -EINVAL;
+			break;
+		}
+		list_for_each_entry(ch, &tavil->dai[dai->id].wcd9xxx_ch_list,
+				    list) {
+			dev_dbg(tavil->dev, "%s: slot_num %u ch->ch_num %d\n",
+				 __func__, i, ch->ch_num);
+			rx_slot[i++] = ch->ch_num;
+		}
+		*rx_num = i;
+		dev_dbg(tavil->dev, "%s: dai_name = %s dai_id = %x  rx_num = %d\n",
+			__func__, dai->name, dai->id, i);
+		if (*rx_num == 0) {
+			dev_err(tavil->dev, "%s: Channel list empty for dai_name = %s dai_id = %x\n",
+				__func__, dai->name, dai->id);
+			ret = -EINVAL;
+		}
+		break;
+	case AIF1_CAP:
+	case AIF2_CAP:
+	case AIF3_CAP:
+	case AIF4_MAD_TX:
+	case AIF4_VIFEED:
+		if (!tx_slot || !tx_num) {
+			dev_err(tavil->dev, "%s: Invalid tx_slot 0x%pK or tx_num 0x%pK\n",
+				 __func__, tx_slot, tx_num);
+			ret = -EINVAL;
+			break;
+		}
+		list_for_each_entry(ch, &tavil->dai[dai->id].wcd9xxx_ch_list,
+				    list) {
+			dev_dbg(tavil->dev, "%s: slot_num %u ch->ch_num %d\n",
+				 __func__, i,  ch->ch_num);
+			tx_slot[i++] = ch->ch_num;
+		}
+		*tx_num = i;
+		dev_dbg(tavil->dev, "%s: dai_name = %s dai_id = %x  tx_num = %d\n",
+			 __func__, dai->name, dai->id, i);
+		if (*tx_num == 0) {
+			dev_err(tavil->dev, "%s: Channel list empty for dai_name = %s dai_id = %x\n",
+				 __func__, dai->name, dai->id);
+			ret = -EINVAL;
+		}
+		break;
+	default:
+		dev_err(tavil->dev, "%s: Invalid DAI ID %x\n",
+			__func__, dai->id);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int tavil_set_channel_map(struct snd_soc_dai *dai,
+				 unsigned int tx_num, unsigned int *tx_slot,
+				 unsigned int rx_num, unsigned int *rx_slot)
+{
+	struct tavil_priv *tavil;
+	struct wcd9xxx *core;
+	struct wcd9xxx_codec_dai_data *dai_data = NULL;
+
+	tavil = snd_soc_codec_get_drvdata(dai->codec);
+	core = dev_get_drvdata(dai->codec->dev->parent);
+
+	if (!tx_slot || !rx_slot) {
+		dev_err(tavil->dev, "%s: Invalid tx_slot 0x%pK, rx_slot 0x%pK\n",
+			__func__, tx_slot, rx_slot);
+		return -EINVAL;
+	}
+	dev_dbg(tavil->dev, "%s(): dai_name = %s DAI-ID %x tx_ch %d rx_ch %d\n",
+		 __func__, dai->name, dai->id, tx_num, rx_num);
+
+	wcd9xxx_init_slimslave(core, core->slim->laddr,
+				tx_num, tx_slot, rx_num, rx_slot);
+	/* Reserve TX13 for MAD data channel */
+	dai_data = &tavil->dai[AIF4_MAD_TX];
+	if (dai_data)
+		list_add_tail(&core->tx_chs[WCD934X_TX13].list,
+			      &dai_data->wcd9xxx_ch_list);
+
+	return 0;
+}
+
+static int tavil_startup(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	pr_debug("%s(): substream = %s  stream = %d\n", __func__,
+		 substream->name, substream->stream);
+
+	return 0;
+}
+
+static void tavil_shutdown(struct snd_pcm_substream *substream,
+			   struct snd_soc_dai *dai)
+{
+	pr_debug("%s(): substream = %s  stream = %d\n", __func__,
+		 substream->name, substream->stream);
+}
+
+static int tavil_set_decimator_rate(struct snd_soc_dai *dai,
+				    u32 sample_rate)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct wcd9xxx_ch *ch;
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	u32 tx_port = 0, tx_fs_rate = 0;
+	u8 shift = 0, shift_val = 0, tx_mux_sel = 0;
+	int decimator = -1;
+	u16 tx_port_reg = 0, tx_fs_reg = 0;
+
+	switch (sample_rate) {
+	case 8000:
+		tx_fs_rate = 0;
+		break;
+	case 16000:
+		tx_fs_rate = 1;
+		break;
+	case 32000:
+		tx_fs_rate = 3;
+		break;
+	case 48000:
+		tx_fs_rate = 4;
+		break;
+	case 96000:
+		tx_fs_rate = 5;
+		break;
+	case 192000:
+		tx_fs_rate = 6;
+		break;
+	default:
+		dev_err(tavil->dev, "%s: Invalid TX sample rate: %d\n",
+			__func__, sample_rate);
+		return -EINVAL;
+
+	};
+
+	list_for_each_entry(ch, &tavil->dai[dai->id].wcd9xxx_ch_list, list) {
+		tx_port = ch->port;
+		dev_dbg(codec->dev, "%s: dai->id = %d, tx_port = %d",
+			__func__, dai->id, tx_port);
+
+		if ((tx_port < 0) || (tx_port == 12) || (tx_port >= 14)) {
+			dev_err(codec->dev, "%s: Invalid SLIM TX%u port. DAI ID: %d\n",
+				__func__, tx_port, dai->id);
+			return -EINVAL;
+		}
+		/* Find the SB TX MUX input - which decimator is connected */
+		if (tx_port < 4) {
+			tx_port_reg = WCD934X_CDC_IF_ROUTER_TX_MUX_CFG0;
+			shift = (tx_port << 1);
+			shift_val = 0x03;
+		} else if ((tx_port >= 4) && (tx_port < 8)) {
+			tx_port_reg = WCD934X_CDC_IF_ROUTER_TX_MUX_CFG1;
+			shift = ((tx_port - 4) << 1);
+			shift_val = 0x03;
+		} else if ((tx_port >= 8) && (tx_port < 11)) {
+			tx_port_reg = WCD934X_CDC_IF_ROUTER_TX_MUX_CFG2;
+			shift = ((tx_port - 8) << 1);
+			shift_val = 0x03;
+		} else if (tx_port == 11) {
+			tx_port_reg = WCD934X_CDC_IF_ROUTER_TX_MUX_CFG3;
+			shift = 0;
+			shift_val = 0x0F;
+		} else if (tx_port == 13) {
+			tx_port_reg = WCD934X_CDC_IF_ROUTER_TX_MUX_CFG3;
+			shift = 4;
+			shift_val = 0x03;
+		}
+		tx_mux_sel = snd_soc_read(codec, tx_port_reg) &
+					  (shift_val << shift);
+		tx_mux_sel = tx_mux_sel >> shift;
+
+		if (tx_port <= 8) {
+			if ((tx_mux_sel == 0x2) || (tx_mux_sel == 0x3))
+				decimator = tx_port;
+		} else if (tx_port <= 10) {
+			if ((tx_mux_sel == 0x1) || (tx_mux_sel == 0x2))
+				decimator = ((tx_port == 9) ? 7 : 6);
+		} else if (tx_port == 11) {
+			if ((tx_mux_sel >= 1) && (tx_mux_sel < 7))
+				decimator = tx_mux_sel - 1;
+		} else if (tx_port == 13) {
+			if ((tx_mux_sel == 0x1) || (tx_mux_sel == 0x2))
+				decimator = 5;
+		}
+
+		if (decimator >= 0) {
+			tx_fs_reg = WCD934X_CDC_TX0_TX_PATH_CTL +
+				    16 * decimator;
+			dev_dbg(codec->dev, "%s: set DEC%u (-> SLIM_TX%u) rate to %u\n",
+				__func__, decimator, tx_port, sample_rate);
+			snd_soc_update_bits(codec, tx_fs_reg, 0x0F, tx_fs_rate);
+		} else if ((tx_port <= 8) && (tx_mux_sel == 0x01)) {
+			/* Check if the TX Mux input is RX MIX TXn */
+			dev_dbg(codec->dev, "%s: RX_MIX_TX%u going to CDC_IF TX%u\n",
+					__func__, tx_port, tx_port);
+		} else {
+			dev_err(codec->dev, "%s: ERROR: Invalid decimator: %d\n",
+				__func__, decimator);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int tavil_set_mix_interpolator_rate(struct snd_soc_dai *dai,
+					   u8 rate_reg_val,
+					   u32 sample_rate)
+{
+	u8 int_2_inp;
+	u32 j;
+	u16 int_mux_cfg1, int_fs_reg;
+	u8 int_mux_cfg1_val;
+	struct snd_soc_codec *codec = dai->codec;
+	struct wcd9xxx_ch *ch;
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+
+	list_for_each_entry(ch, &tavil->dai[dai->id].wcd9xxx_ch_list, list) {
+		int_2_inp = INTn_2_INP_SEL_RX0 + ch->port -
+						WCD934X_RX_PORT_START_NUMBER;
+		if ((int_2_inp < INTn_2_INP_SEL_RX0) ||
+		    (int_2_inp > INTn_2_INP_SEL_RX7)) {
+			dev_err(codec->dev, "%s: Invalid RX%u port, Dai ID is %d\n",
+				__func__,
+				(ch->port - WCD934X_RX_PORT_START_NUMBER),
+				dai->id);
+			return -EINVAL;
+		}
+
+		int_mux_cfg1 = WCD934X_CDC_RX_INP_MUX_RX_INT0_CFG1;
+		for (j = 0; j < WCD934X_NUM_INTERPOLATORS; j++) {
+			/* Interpolators 5 and 6 are not aviliable in Tavil */
+			if (j == INTERP_LO3_NA || j == INTERP_LO4_NA) {
+				int_mux_cfg1 += 2;
+				continue;
+			}
+			int_mux_cfg1_val = snd_soc_read(codec, int_mux_cfg1) &
+									0x0F;
+			if (int_mux_cfg1_val == int_2_inp) {
+				/*
+				 * Ear mix path supports only 48, 96, 192,
+				 * 384KHz only
+				 */
+				if ((j == INTERP_EAR) &&
+				    (rate_reg_val < 0x4 ||
+				     rate_reg_val > 0x7)) {
+					dev_err_ratelimited(codec->dev,
+					"%s: Invalid rate for AIF_PB DAI(%d)\n",
+					  __func__, dai->id);
+					return -EINVAL;
+				}
+
+				int_fs_reg = WCD934X_CDC_RX0_RX_PATH_MIX_CTL +
+									20 * j;
+				dev_dbg(codec->dev, "%s: AIF_PB DAI(%d) connected to INT%u_2\n",
+					  __func__, dai->id, j);
+				dev_dbg(codec->dev, "%s: set INT%u_2 sample rate to %u\n",
+					__func__, j, sample_rate);
+				snd_soc_update_bits(codec, int_fs_reg, 0x0F,
+						    rate_reg_val);
+			}
+			int_mux_cfg1 += 2;
+		}
+	}
+	return 0;
+}
+
+static int tavil_set_prim_interpolator_rate(struct snd_soc_dai *dai,
+					    u8 rate_reg_val,
+					    u32 sample_rate)
+{
+	u8 int_1_mix1_inp;
+	u32 j;
+	u16 int_mux_cfg0, int_mux_cfg1;
+	u16 int_fs_reg;
+	u8 int_mux_cfg0_val, int_mux_cfg1_val;
+	u8 inp0_sel, inp1_sel, inp2_sel;
+	struct snd_soc_codec *codec = dai->codec;
+	struct wcd9xxx_ch *ch;
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	struct tavil_dsd_config *dsd_conf = tavil->dsd_config;
+
+	list_for_each_entry(ch, &tavil->dai[dai->id].wcd9xxx_ch_list, list) {
+		int_1_mix1_inp = INTn_1_INP_SEL_RX0 + ch->port -
+						WCD934X_RX_PORT_START_NUMBER;
+		if ((int_1_mix1_inp < INTn_1_INP_SEL_RX0) ||
+		    (int_1_mix1_inp > INTn_1_INP_SEL_RX7)) {
+			dev_err(codec->dev, "%s: Invalid RX%u port, Dai ID is %d\n",
+				__func__,
+				(ch->port - WCD934X_RX_PORT_START_NUMBER),
+				dai->id);
+			return -EINVAL;
+		}
+
+		int_mux_cfg0 = WCD934X_CDC_RX_INP_MUX_RX_INT0_CFG0;
+
+		/*
+		 * Loop through all interpolator MUX inputs and find out
+		 * to which interpolator input, the slim rx port
+		 * is connected
+		 */
+		for (j = 0; j < WCD934X_NUM_INTERPOLATORS; j++) {
+			/* Interpolators 5 and 6 are not aviliable in Tavil */
+			if (j == INTERP_LO3_NA || j == INTERP_LO4_NA) {
+				int_mux_cfg0 += 2;
+				continue;
+			}
+			int_mux_cfg1 = int_mux_cfg0 + 1;
+
+			int_mux_cfg0_val = snd_soc_read(codec, int_mux_cfg0);
+			int_mux_cfg1_val = snd_soc_read(codec, int_mux_cfg1);
+			inp0_sel = int_mux_cfg0_val & 0x0F;
+			inp1_sel = (int_mux_cfg0_val >> 4) & 0x0F;
+			inp2_sel = (int_mux_cfg1_val >> 4) & 0x0F;
+			if ((inp0_sel == int_1_mix1_inp) ||
+			    (inp1_sel == int_1_mix1_inp) ||
+			    (inp2_sel == int_1_mix1_inp)) {
+				/*
+				 * Ear and speaker primary path does not support
+				 * native sample rates
+				 */
+				if ((j == INTERP_EAR || j == INTERP_SPKR1 ||
+					j == INTERP_SPKR2) &&
+					(rate_reg_val > 0x7)) {
+					dev_err_ratelimited(codec->dev,
+					"%s: Invalid rate for AIF_PB DAI(%d)\n",
+					  __func__, dai->id);
+					return -EINVAL;
+				}
+
+				int_fs_reg = WCD934X_CDC_RX0_RX_PATH_CTL +
+									20 * j;
+				dev_dbg(codec->dev,
+				"%s: AIF_PB DAI(%d) connected to INT%u_1\n",
+				  __func__, dai->id, j);
+				dev_dbg(codec->dev,
+					"%s: set INT%u_1 sample rate to %u\n",
+					__func__, j, sample_rate);
+				snd_soc_update_bits(codec, int_fs_reg, 0x0F,
+						    rate_reg_val);
+			}
+			int_mux_cfg0 += 2;
+		}
+		if (dsd_conf)
+			tavil_dsd_set_interp_rate(dsd_conf, ch->port,
+						  sample_rate, rate_reg_val);
+	}
+
+	return 0;
+}
+
+
+static int tavil_set_interpolator_rate(struct snd_soc_dai *dai,
+				       u32 sample_rate)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	int rate_val = 0;
+	int i, ret;
+
+	for (i = 0; i < ARRAY_SIZE(sr_val_tbl); i++) {
+		if (sample_rate == sr_val_tbl[i].sample_rate) {
+			rate_val = sr_val_tbl[i].rate_val;
+			break;
+		}
+	}
+	if ((i == ARRAY_SIZE(sr_val_tbl)) || (rate_val < 0)) {
+		dev_err(codec->dev, "%s: Unsupported sample rate: %d\n",
+			__func__, sample_rate);
+		return -EINVAL;
+	}
+
+	ret = tavil_set_prim_interpolator_rate(dai, (u8)rate_val, sample_rate);
+	if (ret)
+		return ret;
+	ret = tavil_set_mix_interpolator_rate(dai, (u8)rate_val, sample_rate);
+	if (ret)
+		return ret;
+
+	return ret;
+}
+
+static int tavil_prepare(struct snd_pcm_substream *substream,
+			 struct snd_soc_dai *dai)
+{
+	pr_debug("%s(): substream = %s  stream = %d\n", __func__,
+		 substream->name, substream->stream);
+	return 0;
+}
+
+static int tavil_vi_hw_params(struct snd_pcm_substream *substream,
+			      struct snd_pcm_hw_params *params,
+			      struct snd_soc_dai *dai)
+{
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(dai->codec);
+
+	dev_dbg(tavil->dev, "%s: dai_name = %s DAI-ID %x rate %d num_ch %d\n",
+		 __func__, dai->name, dai->id, params_rate(params),
+		 params_channels(params));
+
+	tavil->dai[dai->id].rate = params_rate(params);
+	tavil->dai[dai->id].bit_width = 32;
+
+	return 0;
+}
+
+static int tavil_hw_params(struct snd_pcm_substream *substream,
+			   struct snd_pcm_hw_params *params,
+			   struct snd_soc_dai *dai)
+{
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(dai->codec);
+	int ret = 0;
+
+	dev_dbg(tavil->dev, "%s: dai_name = %s DAI-ID %x rate %d num_ch %d\n",
+		 __func__, dai->name, dai->id, params_rate(params),
+		 params_channels(params));
+
+	switch (substream->stream) {
+	case SNDRV_PCM_STREAM_PLAYBACK:
+		ret = tavil_set_interpolator_rate(dai, params_rate(params));
+		if (ret) {
+			dev_err(tavil->dev, "%s: cannot set sample rate: %u\n",
+				__func__, params_rate(params));
+			return ret;
+		}
+		switch (params_width(params)) {
+		case 16:
+			tavil->dai[dai->id].bit_width = 16;
+			break;
+		case 24:
+			tavil->dai[dai->id].bit_width = 24;
+			break;
+		case 32:
+			tavil->dai[dai->id].bit_width = 32;
+			break;
+		default:
+			return -EINVAL;
+		}
+		tavil->dai[dai->id].rate = params_rate(params);
+		break;
+	case SNDRV_PCM_STREAM_CAPTURE:
+		if (dai->id != AIF4_MAD_TX)
+			ret = tavil_set_decimator_rate(dai,
+						       params_rate(params));
+		if (ret) {
+			dev_err(tavil->dev, "%s: cannot set TX Decimator rate: %d\n",
+				__func__, ret);
+			return ret;
+		}
+		switch (params_width(params)) {
+		case 16:
+			tavil->dai[dai->id].bit_width = 16;
+			break;
+		case 24:
+			tavil->dai[dai->id].bit_width = 24;
+			break;
+		default:
+			dev_err(tavil->dev, "%s: Invalid format 0x%x\n",
+				__func__, params_width(params));
+			return -EINVAL;
+		};
+		tavil->dai[dai->id].rate = params_rate(params);
+		break;
+	default:
+		dev_err(tavil->dev, "%s: Invalid stream type %d\n", __func__,
+			substream->stream);
+		return -EINVAL;
+	};
+
+	return 0;
+}
+
+static struct snd_soc_dai_ops tavil_dai_ops = {
+	.startup = tavil_startup,
+	.shutdown = tavil_shutdown,
+	.hw_params = tavil_hw_params,
+	.prepare = tavil_prepare,
+	.set_channel_map = tavil_set_channel_map,
+	.get_channel_map = tavil_get_channel_map,
+};
+
+static struct snd_soc_dai_ops tavil_vi_dai_ops = {
+	.hw_params = tavil_vi_hw_params,
+	.set_channel_map = tavil_set_channel_map,
+	.get_channel_map = tavil_get_channel_map,
+};
+
+static struct snd_soc_dai_driver tavil_dai[] = {
+	{
+		.name = "tavil_rx1",
+		.id = AIF1_PB,
+		.playback = {
+			.stream_name = "AIF1 Playback",
+			.rates = WCD934X_RATES_MASK | WCD934X_FRAC_RATES_MASK,
+			.formats = WCD934X_FORMATS_S16_S24_S32_LE,
+			.rate_min = 8000,
+			.rate_max = 384000,
+			.channels_min = 1,
+			.channels_max = 2,
+		},
+		.ops = &tavil_dai_ops,
+	},
+	{
+		.name = "tavil_tx1",
+		.id = AIF1_CAP,
+		.capture = {
+			.stream_name = "AIF1 Capture",
+			.rates = WCD934X_RATES_MASK,
+			.formats = WCD934X_FORMATS_S16_S24_LE,
+			.rate_min = 8000,
+			.rate_max = 192000,
+			.channels_min = 1,
+			.channels_max = 4,
+		},
+		.ops = &tavil_dai_ops,
+	},
+	{
+		.name = "tavil_rx2",
+		.id = AIF2_PB,
+		.playback = {
+			.stream_name = "AIF2 Playback",
+			.rates = WCD934X_RATES_MASK | WCD934X_FRAC_RATES_MASK,
+			.formats = WCD934X_FORMATS_S16_S24_S32_LE,
+			.rate_min = 8000,
+			.rate_max = 384000,
+			.channels_min = 1,
+			.channels_max = 2,
+		},
+		.ops = &tavil_dai_ops,
+	},
+	{
+		.name = "tavil_tx2",
+		.id = AIF2_CAP,
+		.capture = {
+			.stream_name = "AIF2 Capture",
+			.rates = WCD934X_RATES_MASK,
+			.formats = WCD934X_FORMATS_S16_S24_LE,
+			.rate_min = 8000,
+			.rate_max = 192000,
+			.channels_min = 1,
+			.channels_max = 4,
+		},
+		.ops = &tavil_dai_ops,
+	},
+	{
+		.name = "tavil_rx3",
+		.id = AIF3_PB,
+		.playback = {
+			.stream_name = "AIF3 Playback",
+			.rates = WCD934X_RATES_MASK | WCD934X_FRAC_RATES_MASK,
+			.formats = WCD934X_FORMATS_S16_S24_S32_LE,
+			.rate_min = 8000,
+			.rate_max = 384000,
+			.channels_min = 1,
+			.channels_max = 2,
+		},
+		.ops = &tavil_dai_ops,
+	},
+	{
+		.name = "tavil_tx3",
+		.id = AIF3_CAP,
+		.capture = {
+			.stream_name = "AIF3 Capture",
+			.rates = WCD934X_RATES_MASK,
+			.formats = WCD934X_FORMATS_S16_S24_LE,
+			.rate_min = 8000,
+			.rate_max = 192000,
+			.channels_min = 1,
+			.channels_max = 4,
+		},
+		.ops = &tavil_dai_ops,
+	},
+	{
+		.name = "tavil_rx4",
+		.id = AIF4_PB,
+		.playback = {
+			.stream_name = "AIF4 Playback",
+			.rates = WCD934X_RATES_MASK | WCD934X_FRAC_RATES_MASK,
+			.formats = WCD934X_FORMATS_S16_S24_S32_LE,
+			.rate_min = 8000,
+			.rate_max = 384000,
+			.channels_min = 1,
+			.channels_max = 2,
+		},
+		.ops = &tavil_dai_ops,
+	},
+	{
+		.name = "tavil_vifeedback",
+		.id = AIF4_VIFEED,
+		.capture = {
+			.stream_name = "VIfeed",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_48000,
+			.formats = WCD934X_FORMATS_S16_S24_S32_LE,
+			.rate_min = 8000,
+			.rate_max = 48000,
+			.channels_min = 1,
+			.channels_max = 4,
+		 },
+		.ops = &tavil_vi_dai_ops,
+	},
+	{
+		.name = "tavil_mad1",
+		.id = AIF4_MAD_TX,
+		.capture = {
+			.stream_name = "AIF4 MAD TX",
+			.rates = SNDRV_PCM_RATE_16000,
+			.formats = WCD934X_FORMATS_S16_LE,
+			.rate_min = 16000,
+			.rate_max = 16000,
+			.channels_min = 1,
+			.channels_max = 1,
+		},
+		.ops = &tavil_dai_ops,
+	},
+};
+
+static void tavil_codec_power_gate_digital_core(struct tavil_priv *tavil)
+{
+	mutex_lock(&tavil->power_lock);
+	dev_dbg(tavil->dev, "%s: Entering power gating function, %d\n",
+		__func__, tavil->power_active_ref);
+
+	if (tavil->power_active_ref > 0)
+		goto exit;
+
+	wcd9xxx_set_power_state(tavil->wcd9xxx,
+			WCD_REGION_POWER_COLLAPSE_BEGIN,
+			WCD9XXX_DIG_CORE_REGION_1);
+	regmap_update_bits(tavil->wcd9xxx->regmap,
+			   WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x04, 0x04);
+	regmap_update_bits(tavil->wcd9xxx->regmap,
+			   WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x01, 0x00);
+	regmap_update_bits(tavil->wcd9xxx->regmap,
+			   WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x02, 0x00);
+	wcd9xxx_set_power_state(tavil->wcd9xxx, WCD_REGION_POWER_DOWN,
+				WCD9XXX_DIG_CORE_REGION_1);
+exit:
+	dev_dbg(tavil->dev, "%s: Exiting power gating function, %d\n",
+		__func__, tavil->power_active_ref);
+	mutex_unlock(&tavil->power_lock);
+}
+
+static void tavil_codec_power_gate_work(struct work_struct *work)
+{
+	struct tavil_priv *tavil;
+	struct delayed_work *dwork;
+
+	dwork = to_delayed_work(work);
+	tavil = container_of(dwork, struct tavil_priv, power_gate_work);
+
+	tavil_codec_power_gate_digital_core(tavil);
+}
+
+/* called under power_lock acquisition */
+static int tavil_dig_core_remove_power_collapse(struct tavil_priv *tavil)
+{
+	regmap_write(tavil->wcd9xxx->regmap,
+		     WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x05);
+	regmap_write(tavil->wcd9xxx->regmap,
+		     WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x07);
+	regmap_update_bits(tavil->wcd9xxx->regmap,
+			   WCD934X_CODEC_RPM_RST_CTL, 0x02, 0x00);
+	regmap_update_bits(tavil->wcd9xxx->regmap,
+			   WCD934X_CODEC_RPM_RST_CTL, 0x02, 0x02);
+	regmap_write(tavil->wcd9xxx->regmap,
+		     WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x03);
+
+	wcd9xxx_set_power_state(tavil->wcd9xxx,
+			WCD_REGION_POWER_COLLAPSE_REMOVE,
+			WCD9XXX_DIG_CORE_REGION_1);
+	regcache_mark_dirty(tavil->wcd9xxx->regmap);
+	regcache_sync_region(tavil->wcd9xxx->regmap,
+			     WCD934X_DIG_CORE_REG_MIN,
+			     WCD934X_DIG_CORE_REG_MAX);
+
+	tavil_restore_iir_coeff(tavil, IIR0);
+	tavil_restore_iir_coeff(tavil, IIR1);
+	return 0;
+}
+
+static int tavil_dig_core_power_collapse(struct tavil_priv *tavil,
+					 int req_state)
+{
+	int cur_state;
+
+	/* Exit if feature is disabled */
+	if (!dig_core_collapse_enable)
+		return 0;
+
+	mutex_lock(&tavil->power_lock);
+	if (req_state == POWER_COLLAPSE)
+		tavil->power_active_ref--;
+	else if (req_state == POWER_RESUME)
+		tavil->power_active_ref++;
+	else
+		goto unlock_mutex;
+
+	if (tavil->power_active_ref < 0) {
+		dev_dbg(tavil->dev, "%s: power_active_ref is negative\n",
+			__func__);
+		goto unlock_mutex;
+	}
+
+	if (req_state == POWER_COLLAPSE) {
+		if (tavil->power_active_ref == 0) {
+			schedule_delayed_work(&tavil->power_gate_work,
+			msecs_to_jiffies(dig_core_collapse_timer * 1000));
+		}
+	} else if (req_state == POWER_RESUME) {
+		if (tavil->power_active_ref == 1) {
+			/*
+			 * At this point, there can be two cases:
+			 * 1. Core already in power collapse state
+			 * 2. Timer kicked in and still did not expire or
+			 * waiting for the power_lock
+			 */
+			cur_state = wcd9xxx_get_current_power_state(
+						tavil->wcd9xxx,
+						WCD9XXX_DIG_CORE_REGION_1);
+			if (cur_state == WCD_REGION_POWER_DOWN) {
+				tavil_dig_core_remove_power_collapse(tavil);
+			} else {
+				mutex_unlock(&tavil->power_lock);
+				cancel_delayed_work_sync(
+						&tavil->power_gate_work);
+				mutex_lock(&tavil->power_lock);
+			}
+		}
+	}
+
+unlock_mutex:
+	mutex_unlock(&tavil->power_lock);
+
+	return 0;
+}
+
+static int tavil_cdc_req_mclk_enable(struct tavil_priv *tavil,
+				     bool enable)
+{
+	int ret = 0;
+
+	if (enable) {
+		ret = clk_prepare_enable(tavil->wcd_ext_clk);
+		if (ret) {
+			dev_err(tavil->dev, "%s: ext clk enable failed\n",
+				__func__);
+			goto done;
+		}
+		/* get BG */
+		wcd_resmgr_enable_master_bias(tavil->resmgr);
+		/* get MCLK */
+		wcd_resmgr_enable_clk_block(tavil->resmgr, WCD_CLK_MCLK);
+	} else {
+		/* put MCLK */
+		wcd_resmgr_disable_clk_block(tavil->resmgr, WCD_CLK_MCLK);
+		/* put BG */
+		wcd_resmgr_disable_master_bias(tavil->resmgr);
+		clk_disable_unprepare(tavil->wcd_ext_clk);
+	}
+
+done:
+	return ret;
+}
+
+static int __tavil_cdc_mclk_enable_locked(struct tavil_priv *tavil,
+					  bool enable)
+{
+	int ret = 0;
+
+	if (!tavil->wcd_ext_clk) {
+		dev_err(tavil->dev, "%s: wcd ext clock is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	dev_dbg(tavil->dev, "%s: mclk_enable = %u\n", __func__, enable);
+
+	if (enable) {
+		tavil_dig_core_power_collapse(tavil, POWER_RESUME);
+		tavil_vote_svs(tavil, true);
+		ret = tavil_cdc_req_mclk_enable(tavil, true);
+		if (ret)
+			goto done;
+	} else {
+		tavil_cdc_req_mclk_enable(tavil, false);
+		tavil_vote_svs(tavil, false);
+		tavil_dig_core_power_collapse(tavil, POWER_COLLAPSE);
+	}
+
+done:
+	return ret;
+}
+
+static int __tavil_cdc_mclk_enable(struct tavil_priv *tavil,
+				   bool enable)
+{
+	int ret;
+
+	WCD9XXX_V2_BG_CLK_LOCK(tavil->resmgr);
+	ret = __tavil_cdc_mclk_enable_locked(tavil, enable);
+	if (enable)
+		wcd_resmgr_set_sido_input_src(tavil->resmgr,
+						     SIDO_SOURCE_RCO_BG);
+	WCD9XXX_V2_BG_CLK_UNLOCK(tavil->resmgr);
+
+	return ret;
+}
+
+static ssize_t tavil_codec_version_read(struct snd_info_entry *entry,
+					void *file_private_data,
+					struct file *file,
+					char __user *buf, size_t count,
+					loff_t pos)
+{
+	struct tavil_priv *tavil;
+	struct wcd9xxx *wcd9xxx;
+	char buffer[TAVIL_VERSION_ENTRY_SIZE];
+	int len = 0;
+
+	tavil = (struct tavil_priv *) entry->private_data;
+	if (!tavil) {
+		pr_err("%s: tavil priv is null\n", __func__);
+		return -EINVAL;
+	}
+
+	wcd9xxx = tavil->wcd9xxx;
+
+	switch (wcd9xxx->version) {
+	case TAVIL_VERSION_WCD9340_1_0:
+	    len = snprintf(buffer, sizeof(buffer), "WCD9340_1_0\n");
+	    break;
+	case TAVIL_VERSION_WCD9341_1_0:
+	    len = snprintf(buffer, sizeof(buffer), "WCD9341_1_0\n");
+	    break;
+	case TAVIL_VERSION_WCD9340_1_1:
+	    len = snprintf(buffer, sizeof(buffer), "WCD9340_1_1\n");
+	    break;
+	case TAVIL_VERSION_WCD9341_1_1:
+	    len = snprintf(buffer, sizeof(buffer), "WCD9341_1_1\n");
+	    break;
+	default:
+	    len = snprintf(buffer, sizeof(buffer), "VER_UNDEFINED\n");
+	}
+
+	return simple_read_from_buffer(buf, count, &pos, buffer, len);
+}
+
+static struct snd_info_entry_ops tavil_codec_info_ops = {
+	.read = tavil_codec_version_read,
+};
+
+/*
+ * tavil_codec_info_create_codec_entry - creates wcd934x module
+ * @codec_root: The parent directory
+ * @codec: Codec instance
+ *
+ * Creates wcd934x module and version entry under the given
+ * parent directory.
+ *
+ * Return: 0 on success or negative error code on failure.
+ */
+int tavil_codec_info_create_codec_entry(struct snd_info_entry *codec_root,
+					struct snd_soc_codec *codec)
+{
+	struct snd_info_entry *version_entry;
+	struct tavil_priv *tavil;
+	struct snd_soc_card *card;
+
+	if (!codec_root || !codec)
+		return -EINVAL;
+
+	tavil = snd_soc_codec_get_drvdata(codec);
+	card = codec->component.card;
+	tavil->entry = snd_register_module_info(codec_root->module,
+						"tavil",
+						codec_root);
+	if (!tavil->entry) {
+		dev_dbg(codec->dev, "%s: failed to create wcd934x entry\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	version_entry = snd_info_create_card_entry(card->snd_card,
+						   "version",
+						   tavil->entry);
+	if (!version_entry) {
+		dev_dbg(codec->dev, "%s: failed to create wcd934x version entry\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	version_entry->private_data = tavil;
+	version_entry->size = TAVIL_VERSION_ENTRY_SIZE;
+	version_entry->content = SNDRV_INFO_CONTENT_DATA;
+	version_entry->c.ops = &tavil_codec_info_ops;
+
+	if (snd_info_register(version_entry) < 0) {
+		snd_info_free_entry(version_entry);
+		return -ENOMEM;
+	}
+	tavil->version_entry = version_entry;
+
+	return 0;
+}
+EXPORT_SYMBOL(tavil_codec_info_create_codec_entry);
+
+/**
+ * tavil_cdc_mclk_enable - Enable/disable codec mclk
+ *
+ * @codec: codec instance
+ * @enable: Indicates clk enable or disable
+ *
+ * Returns 0 on Success and error on failure
+ */
+int tavil_cdc_mclk_enable(struct snd_soc_codec *codec, bool enable)
+{
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+
+	return __tavil_cdc_mclk_enable(tavil, enable);
+}
+EXPORT_SYMBOL(tavil_cdc_mclk_enable);
+
+static int __tavil_codec_internal_rco_ctrl(struct snd_soc_codec *codec,
+					   bool enable)
+{
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	int ret = 0;
+
+	if (enable) {
+		if (wcd_resmgr_get_clk_type(tavil->resmgr) ==
+		    WCD_CLK_RCO) {
+			ret = wcd_resmgr_enable_clk_block(tavil->resmgr,
+							  WCD_CLK_RCO);
+		} else {
+			ret = tavil_cdc_req_mclk_enable(tavil, true);
+			if (ret) {
+				dev_err(codec->dev,
+					"%s: mclk_enable failed, err = %d\n",
+					__func__, ret);
+				goto done;
+			}
+			wcd_resmgr_set_sido_input_src(tavil->resmgr,
+							SIDO_SOURCE_RCO_BG);
+			ret = wcd_resmgr_enable_clk_block(tavil->resmgr,
+							   WCD_CLK_RCO);
+			ret |= tavil_cdc_req_mclk_enable(tavil, false);
+		}
+
+	} else {
+		ret = wcd_resmgr_disable_clk_block(tavil->resmgr,
+						   WCD_CLK_RCO);
+	}
+
+	if (ret) {
+		dev_err(codec->dev, "%s: Error in %s RCO\n",
+			__func__, (enable ? "enabling" : "disabling"));
+		ret = -EINVAL;
+	}
+
+done:
+	return ret;
+}
+
+/*
+ * tavil_codec_internal_rco_ctrl: Enable/Disable codec's RCO clock
+ * @codec: Handle to the codec
+ * @enable: Indicates whether clock should be enabled or disabled
+ */
+static int tavil_codec_internal_rco_ctrl(struct snd_soc_codec *codec,
+					 bool enable)
+{
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	int ret = 0;
+
+	WCD9XXX_V2_BG_CLK_LOCK(tavil->resmgr);
+	ret = __tavil_codec_internal_rco_ctrl(codec, enable);
+	WCD9XXX_V2_BG_CLK_UNLOCK(tavil->resmgr);
+	return ret;
+}
+
+static const struct wcd_resmgr_cb tavil_resmgr_cb = {
+	.cdc_rco_ctrl = __tavil_codec_internal_rco_ctrl,
+};
+
+static const struct tavil_reg_mask_val tavil_codec_mclk2_1_1_defaults[] = {
+	{WCD934X_CLK_SYS_MCLK2_PRG1, 0x60, 0x20},
+};
+
+static const struct tavil_reg_mask_val tavil_codec_mclk2_1_0_defaults[] = {
+	/*
+	 * PLL Settings:
+	 * Clock Root: MCLK2,
+	 * Clock Source: EXT_CLK,
+	 * Clock Destination: MCLK2
+	 * Clock Freq In: 19.2MHz,
+	 * Clock Freq Out: 11.2896MHz
+	 */
+	{WCD934X_CLK_SYS_MCLK2_PRG1, 0x60, 0x20},
+	{WCD934X_CLK_SYS_INT_POST_DIV_REG0, 0xFF, 0x5E},
+	{WCD934X_CLK_SYS_INT_POST_DIV_REG1, 0x1F, 0x1F},
+	{WCD934X_CLK_SYS_INT_REF_DIV_REG0, 0xFF, 0x54},
+	{WCD934X_CLK_SYS_INT_REF_DIV_REG1, 0xFF, 0x01},
+	{WCD934X_CLK_SYS_INT_FILTER_REG1, 0x07, 0x04},
+	{WCD934X_CLK_SYS_INT_PLL_L_VAL, 0xFF, 0x93},
+	{WCD934X_CLK_SYS_INT_PLL_N_VAL, 0xFF, 0xFA},
+	{WCD934X_CLK_SYS_INT_TEST_REG0, 0xFF, 0x90},
+	{WCD934X_CLK_SYS_INT_PFD_CP_DSM_PROG, 0xFF, 0x7E},
+	{WCD934X_CLK_SYS_INT_VCO_PROG, 0xFF, 0xF8},
+	{WCD934X_CLK_SYS_INT_TEST_REG1, 0xFF, 0x68},
+	{WCD934X_CLK_SYS_INT_LDO_LOCK_CFG, 0xFF, 0x40},
+	{WCD934X_CLK_SYS_INT_DIG_LOCK_DET_CFG, 0xFF, 0x32},
+};
+
+static const struct tavil_reg_mask_val tavil_codec_reg_defaults[] = {
+	{WCD934X_BIAS_VBG_FINE_ADJ, 0xFF, 0x75},
+	{WCD934X_CODEC_CPR_SVS_CX_VDD, 0xFF, 0x7C}, /* value in svs mode */
+	{WCD934X_CODEC_CPR_SVS2_CX_VDD, 0xFF, 0x58}, /* value in svs2 mode */
+	{WCD934X_CDC_RX0_RX_PATH_DSMDEM_CTL, 0x01, 0x01},
+	{WCD934X_CDC_RX1_RX_PATH_DSMDEM_CTL, 0x01, 0x01},
+	{WCD934X_CDC_RX2_RX_PATH_DSMDEM_CTL, 0x01, 0x01},
+	{WCD934X_CDC_RX3_RX_PATH_DSMDEM_CTL, 0x01, 0x01},
+	{WCD934X_CDC_RX4_RX_PATH_DSMDEM_CTL, 0x01, 0x01},
+	{WCD934X_CDC_RX7_RX_PATH_DSMDEM_CTL, 0x01, 0x01},
+	{WCD934X_CDC_RX8_RX_PATH_DSMDEM_CTL, 0x01, 0x01},
+	{WCD934X_CDC_COMPANDER8_CTL7, 0x1E, 0x18},
+	{WCD934X_CDC_COMPANDER7_CTL7, 0x1E, 0x18},
+	{WCD934X_CDC_RX0_RX_PATH_SEC0, 0x08, 0x0},
+	{WCD934X_CDC_CLSH_DECAY_CTRL, 0x03, 0x0},
+	{WCD934X_MICB1_TEST_CTL_2, 0x07, 0x01},
+	{WCD934X_CDC_BOOST0_BOOST_CFG1, 0x3F, 0x12},
+	{WCD934X_CDC_BOOST0_BOOST_CFG2, 0x1C, 0x08},
+	{WCD934X_CDC_BOOST1_BOOST_CFG1, 0x3F, 0x12},
+	{WCD934X_CDC_BOOST1_BOOST_CFG2, 0x1C, 0x08},
+	{WCD934X_CPE_SS_CPARMAD_BUFRDY_INT_PERIOD, 0x1F, 0x09},
+	{WCD934X_CDC_TX0_TX_PATH_CFG1, 0x01, 0x00},
+	{WCD934X_CDC_TX1_TX_PATH_CFG1, 0x01, 0x00},
+	{WCD934X_CDC_TX2_TX_PATH_CFG1, 0x01, 0x00},
+	{WCD934X_CDC_TX3_TX_PATH_CFG1, 0x01, 0x00},
+	{WCD934X_CDC_TX4_TX_PATH_CFG1, 0x01, 0x00},
+	{WCD934X_CDC_TX5_TX_PATH_CFG1, 0x01, 0x00},
+	{WCD934X_CDC_TX6_TX_PATH_CFG1, 0x01, 0x00},
+	{WCD934X_CDC_TX7_TX_PATH_CFG1, 0x01, 0x00},
+	{WCD934X_CDC_TX8_TX_PATH_CFG1, 0x01, 0x00},
+	{WCD934X_RX_OCP_CTL, 0x0F, 0x02}, /* OCP number of attempts is 2 */
+	{WCD934X_HPH_OCP_CTL, 0xFF, 0x3A}, /* OCP current limit */
+	{WCD934X_HPH_L_TEST, 0x01, 0x01},
+	{WCD934X_HPH_R_TEST, 0x01, 0x01},
+	{WCD934X_CPE_FLL_CONFIG_CTL_2, 0xFF, 0x20},
+};
+
+static const struct tavil_reg_mask_val tavil_codec_reg_init_1_1_val[] = {
+	{WCD934X_CDC_COMPANDER1_CTL7, 0x1E, 0x06},
+	{WCD934X_CDC_COMPANDER2_CTL7, 0x1E, 0x06},
+	{WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_L, 0xFF, 0x84},
+	{WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_R, 0xFF, 0x84},
+	{WCD934X_CDC_RX3_RX_PATH_SEC0, 0xFC, 0xF4},
+	{WCD934X_CDC_RX4_RX_PATH_SEC0, 0xFC, 0xF4},
+};
+
+static const struct tavil_cpr_reg_defaults cpr_defaults[] = {
+	{ 0x00000820, 0x00000094 },
+	{ 0x00000fC0, 0x00000048 },
+	{ 0x0000f000, 0x00000044 },
+	{ 0x0000bb80, 0xC0000178 },
+	{ 0x00000000, 0x00000160 },
+	{ 0x10854522, 0x00000060 },
+	{ 0x10854509, 0x00000064 },
+	{ 0x108544dd, 0x00000068 },
+	{ 0x108544ad, 0x0000006C },
+	{ 0x0000077E, 0x00000070 },
+	{ 0x000007da, 0x00000074 },
+	{ 0x00000000, 0x00000078 },
+	{ 0x00000000, 0x0000007C },
+	{ 0x00042029, 0x00000080 },
+	{ 0x4002002A, 0x00000090 },
+	{ 0x4002002B, 0x00000090 },
+};
+
+static const struct tavil_reg_mask_val tavil_codec_reg_init_common_val[] = {
+	{WCD934X_CDC_CLSH_K2_MSB, 0x0F, 0x00},
+	{WCD934X_CDC_CLSH_K2_LSB, 0xFF, 0x60},
+	{WCD934X_CPE_SS_DMIC_CFG, 0x80, 0x00},
+	{WCD934X_CDC_BOOST0_BOOST_CTL, 0x70, 0x50},
+	{WCD934X_CDC_BOOST1_BOOST_CTL, 0x70, 0x50},
+	{WCD934X_CDC_RX7_RX_PATH_CFG1, 0x08, 0x08},
+	{WCD934X_CDC_RX8_RX_PATH_CFG1, 0x08, 0x08},
+	{WCD934X_CDC_TOP_TOP_CFG1, 0x02, 0x02},
+	{WCD934X_CDC_TOP_TOP_CFG1, 0x01, 0x01},
+	{WCD934X_CDC_TX9_SPKR_PROT_PATH_CFG0, 0x01, 0x01},
+	{WCD934X_CDC_TX10_SPKR_PROT_PATH_CFG0, 0x01, 0x01},
+	{WCD934X_CDC_TX11_SPKR_PROT_PATH_CFG0, 0x01, 0x01},
+	{WCD934X_CDC_TX12_SPKR_PROT_PATH_CFG0, 0x01, 0x01},
+	{WCD934X_DATA_HUB_SB_TX11_INP_CFG, 0x01, 0x01},
+	{WCD934X_CDC_CLK_RST_CTRL_FS_CNT_CONTROL, 0x01, 0x01},
+	{WCD934X_CDC_COMPANDER7_CTL3, 0x80, 0x80},
+	{WCD934X_CDC_COMPANDER8_CTL3, 0x80, 0x80},
+	{WCD934X_CDC_COMPANDER7_CTL7, 0x01, 0x01},
+	{WCD934X_CDC_COMPANDER8_CTL7, 0x01, 0x01},
+	{WCD934X_CODEC_RPM_CLK_GATE, 0x08, 0x00},
+	{WCD934X_TLMM_DMIC3_CLK_PINCFG, 0xFF, 0x0a},
+	{WCD934X_TLMM_DMIC3_DATA_PINCFG, 0xFF, 0x0a},
+	{WCD934X_CPE_SS_SVA_CFG, 0x60, 0x00},
+	{WCD934X_CPE_SS_CPAR_CFG, 0x10, 0x10},
+};
+
+static void tavil_codec_init_reg(struct tavil_priv *priv)
+{
+	struct snd_soc_codec *codec = priv->codec;
+	u32 i;
+
+	for (i = 0; i < ARRAY_SIZE(tavil_codec_reg_init_common_val); i++)
+		snd_soc_update_bits(codec,
+				    tavil_codec_reg_init_common_val[i].reg,
+				    tavil_codec_reg_init_common_val[i].mask,
+				    tavil_codec_reg_init_common_val[i].val);
+
+	if (TAVIL_IS_1_1(priv->wcd9xxx)) {
+		for (i = 0; i < ARRAY_SIZE(tavil_codec_reg_init_1_1_val); i++)
+			snd_soc_update_bits(codec,
+					tavil_codec_reg_init_1_1_val[i].reg,
+					tavil_codec_reg_init_1_1_val[i].mask,
+					tavil_codec_reg_init_1_1_val[i].val);
+	}
+}
+
+static void tavil_update_reg_defaults(struct tavil_priv *tavil)
+{
+	u32 i;
+	struct wcd9xxx *wcd9xxx;
+
+	wcd9xxx = tavil->wcd9xxx;
+	for (i = 0; i < ARRAY_SIZE(tavil_codec_reg_defaults); i++)
+		regmap_update_bits(wcd9xxx->regmap,
+				   tavil_codec_reg_defaults[i].reg,
+				   tavil_codec_reg_defaults[i].mask,
+				   tavil_codec_reg_defaults[i].val);
+}
+
+static void tavil_update_cpr_defaults(struct tavil_priv *tavil)
+{
+	int i;
+	struct wcd9xxx *wcd9xxx;
+
+	wcd9xxx = tavil->wcd9xxx;
+	if (!TAVIL_IS_1_1(wcd9xxx))
+		return;
+
+	__tavil_cdc_mclk_enable(tavil, true);
+
+	regmap_write(wcd9xxx->regmap, WCD934X_CODEC_CPR_SVS2_MIN_CX_VDD, 0x2C);
+	regmap_update_bits(wcd9xxx->regmap, WCD934X_CODEC_RPM_CLK_GATE,
+			   0x10, 0x00);
+
+	for (i = 0; i < ARRAY_SIZE(cpr_defaults); i++) {
+		regmap_bulk_write(wcd9xxx->regmap,
+				WCD934X_CODEC_CPR_WR_DATA_0,
+				(u8 *)&cpr_defaults[i].wr_data, 4);
+		regmap_bulk_write(wcd9xxx->regmap,
+				WCD934X_CODEC_CPR_WR_ADDR_0,
+				(u8 *)&cpr_defaults[i].wr_addr, 4);
+	}
+
+	__tavil_cdc_mclk_enable(tavil, false);
+}
+
+static void tavil_slim_interface_init_reg(struct snd_soc_codec *codec)
+{
+	int i;
+	struct tavil_priv *priv = snd_soc_codec_get_drvdata(codec);
+
+	for (i = 0; i < WCD9XXX_SLIM_NUM_PORT_REG; i++)
+		wcd9xxx_interface_reg_write(priv->wcd9xxx,
+				    WCD934X_SLIM_PGD_PORT_INT_RX_EN0 + i,
+				    0xFF);
+}
+
+static irqreturn_t tavil_misc_irq(int irq, void *data)
+{
+	struct tavil_priv *tavil = data;
+	int misc_val;
+
+	/* Find source of interrupt */
+	regmap_read(tavil->wcd9xxx->regmap, WCD934X_INTR_CODEC_MISC_STATUS,
+		    &misc_val);
+
+	if (misc_val & 0x08) {
+		dev_info(tavil->dev, "%s: irq: %d, DSD DC detected!\n",
+			 __func__, irq);
+		/* DSD DC interrupt, reset DSD path */
+		tavil_dsd_reset(tavil->dsd_config);
+	} else {
+		dev_err(tavil->dev, "%s: Codec misc irq: %d, val: 0x%x\n",
+			__func__, irq, misc_val);
+	}
+
+	/* Clear interrupt status */
+	regmap_update_bits(tavil->wcd9xxx->regmap,
+			   WCD934X_INTR_CODEC_MISC_CLEAR, misc_val, 0x00);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t tavil_slimbus_irq(int irq, void *data)
+{
+	struct tavil_priv *tavil = data;
+	unsigned long status = 0;
+	int i, j, port_id, k;
+	u32 bit;
+	u8 val, int_val = 0;
+	bool tx, cleared;
+	unsigned short reg = 0;
+
+	for (i = WCD934X_SLIM_PGD_PORT_INT_STATUS_RX_0, j = 0;
+	     i <= WCD934X_SLIM_PGD_PORT_INT_STATUS_TX_1; i++, j++) {
+		val = wcd9xxx_interface_reg_read(tavil->wcd9xxx, i);
+		status |= ((u32)val << (8 * j));
+	}
+
+	for_each_set_bit(j, &status, 32) {
+		tx = (j >= 16 ? true : false);
+		port_id = (tx ? j - 16 : j);
+		val = wcd9xxx_interface_reg_read(tavil->wcd9xxx,
+				WCD934X_SLIM_PGD_PORT_INT_RX_SOURCE0 + j);
+		if (val) {
+			if (!tx)
+				reg = WCD934X_SLIM_PGD_PORT_INT_RX_EN0 +
+					(port_id / 8);
+			else
+				reg = WCD934X_SLIM_PGD_PORT_INT_TX_EN0 +
+					(port_id / 8);
+			int_val = wcd9xxx_interface_reg_read(
+				tavil->wcd9xxx, reg);
+			/*
+			 * Ignore interrupts for ports for which the
+			 * interrupts are not specifically enabled.
+			 */
+			if (!(int_val & (1 << (port_id % 8))))
+				continue;
+		}
+		if (val & WCD934X_SLIM_IRQ_OVERFLOW)
+			dev_err_ratelimited(tavil->dev, "%s: overflow error on %s port %d, value %x\n",
+			   __func__, (tx ? "TX" : "RX"), port_id, val);
+		if (val & WCD934X_SLIM_IRQ_UNDERFLOW)
+			dev_err_ratelimited(tavil->dev, "%s: underflow error on %s port %d, value %x\n",
+			   __func__, (tx ? "TX" : "RX"), port_id, val);
+		if ((val & WCD934X_SLIM_IRQ_OVERFLOW) ||
+			(val & WCD934X_SLIM_IRQ_UNDERFLOW)) {
+			if (!tx)
+				reg = WCD934X_SLIM_PGD_PORT_INT_RX_EN0 +
+					(port_id / 8);
+			else
+				reg = WCD934X_SLIM_PGD_PORT_INT_TX_EN0 +
+					(port_id / 8);
+			int_val = wcd9xxx_interface_reg_read(
+				tavil->wcd9xxx, reg);
+			if (int_val & (1 << (port_id % 8))) {
+				int_val = int_val ^ (1 << (port_id % 8));
+				wcd9xxx_interface_reg_write(tavil->wcd9xxx,
+					reg, int_val);
+			}
+		}
+		if (val & WCD934X_SLIM_IRQ_PORT_CLOSED) {
+			/*
+			 * INT SOURCE register starts from RX to TX
+			 * but port number in the ch_mask is in opposite way
+			 */
+			bit = (tx ? j - 16 : j + 16);
+			dev_dbg(tavil->dev, "%s: %s port %d closed value %x, bit %u\n",
+				 __func__, (tx ? "TX" : "RX"), port_id, val,
+				 bit);
+			for (k = 0, cleared = false; k < NUM_CODEC_DAIS; k++) {
+				dev_dbg(tavil->dev, "%s: tavil->dai[%d].ch_mask = 0x%lx\n",
+					 __func__, k, tavil->dai[k].ch_mask);
+				if (test_and_clear_bit(bit,
+						&tavil->dai[k].ch_mask)) {
+					cleared = true;
+					if (!tavil->dai[k].ch_mask)
+						wake_up(
+						      &tavil->dai[k].dai_wait);
+					/*
+					 * There are cases when multiple DAIs
+					 * might be using the same slimbus
+					 * channel. Hence don't break here.
+					 */
+				}
+			}
+			WARN(!cleared,
+			     "Couldn't find slimbus %s port %d for closing\n",
+			     (tx ? "TX" : "RX"), port_id);
+		}
+		wcd9xxx_interface_reg_write(tavil->wcd9xxx,
+					    WCD934X_SLIM_PGD_PORT_INT_CLR_RX_0 +
+					    (j / 8),
+					    1 << (j % 8));
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int tavil_setup_irqs(struct tavil_priv *tavil)
+{
+	int ret = 0;
+	struct snd_soc_codec *codec = tavil->codec;
+	struct wcd9xxx *wcd9xxx = tavil->wcd9xxx;
+	struct wcd9xxx_core_resource *core_res =
+				&wcd9xxx->core_res;
+
+	ret = wcd9xxx_request_irq(core_res, WCD9XXX_IRQ_SLIMBUS,
+				  tavil_slimbus_irq, "SLIMBUS Slave", tavil);
+	if (ret)
+		dev_err(codec->dev, "%s: Failed to request irq %d\n", __func__,
+		       WCD9XXX_IRQ_SLIMBUS);
+	else
+		tavil_slim_interface_init_reg(codec);
+
+	/* Register for misc interrupts as well */
+	ret = wcd9xxx_request_irq(core_res, WCD934X_IRQ_MISC,
+				  tavil_misc_irq, "CDC MISC Irq", tavil);
+	if (ret)
+		dev_err(codec->dev, "%s: Failed to request cdc misc irq\n",
+			__func__);
+
+	return ret;
+}
+
+static void tavil_init_slim_slave_cfg(struct snd_soc_codec *codec)
+{
+	struct tavil_priv *priv = snd_soc_codec_get_drvdata(codec);
+	struct afe_param_cdc_slimbus_slave_cfg *cfg;
+	struct wcd9xxx *wcd9xxx = priv->wcd9xxx;
+	uint64_t eaddr = 0;
+
+	cfg = &priv->slimbus_slave_cfg;
+	cfg->minor_version = 1;
+	cfg->tx_slave_port_offset = 0;
+	cfg->rx_slave_port_offset = 16;
+
+	memcpy(&eaddr, &wcd9xxx->slim->e_addr, sizeof(wcd9xxx->slim->e_addr));
+	WARN_ON(sizeof(wcd9xxx->slim->e_addr) != 6);
+	cfg->device_enum_addr_lsw = eaddr & 0xFFFFFFFF;
+	cfg->device_enum_addr_msw = eaddr >> 32;
+
+	dev_dbg(codec->dev, "%s: slimbus logical address 0x%llx\n",
+		__func__, eaddr);
+}
+
+static void tavil_cleanup_irqs(struct tavil_priv *tavil)
+{
+	struct wcd9xxx *wcd9xxx = tavil->wcd9xxx;
+	struct wcd9xxx_core_resource *core_res =
+				&wcd9xxx->core_res;
+
+	wcd9xxx_free_irq(core_res, WCD9XXX_IRQ_SLIMBUS, tavil);
+	wcd9xxx_free_irq(core_res, WCD934X_IRQ_MISC, tavil);
+}
+
+/*
+ * wcd934x_get_micb_vout_ctl_val: converts micbias from volts to register value
+ * @micb_mv: micbias in mv
+ *
+ * return register value converted
+ */
+int wcd934x_get_micb_vout_ctl_val(u32 micb_mv)
+{
+	/* min micbias voltage is 1V and maximum is 2.85V */
+	if (micb_mv < 1000 || micb_mv > 2850) {
+		pr_err("%s: unsupported micbias voltage\n", __func__);
+		return -EINVAL;
+	}
+
+	return (micb_mv - 1000) / 50;
+}
+EXPORT_SYMBOL(wcd934x_get_micb_vout_ctl_val);
+
+static int tavil_handle_pdata(struct tavil_priv *tavil,
+			      struct wcd9xxx_pdata *pdata)
+{
+	struct snd_soc_codec *codec = tavil->codec;
+	u8 mad_dmic_ctl_val;
+	u8 anc_ctl_value;
+	u32 def_dmic_rate, dmic_clk_drv;
+	int vout_ctl_1, vout_ctl_2, vout_ctl_3, vout_ctl_4;
+	int rc = 0;
+
+	if (!pdata) {
+		dev_err(codec->dev, "%s: NULL pdata\n", __func__);
+		return -ENODEV;
+	}
+
+	/* set micbias voltage */
+	vout_ctl_1 = wcd934x_get_micb_vout_ctl_val(pdata->micbias.micb1_mv);
+	vout_ctl_2 = wcd934x_get_micb_vout_ctl_val(pdata->micbias.micb2_mv);
+	vout_ctl_3 = wcd934x_get_micb_vout_ctl_val(pdata->micbias.micb3_mv);
+	vout_ctl_4 = wcd934x_get_micb_vout_ctl_val(pdata->micbias.micb4_mv);
+
+	if (IS_ERR_VALUE(vout_ctl_1) || IS_ERR_VALUE(vout_ctl_2) ||
+	    IS_ERR_VALUE(vout_ctl_3) || IS_ERR_VALUE(vout_ctl_4)) {
+		rc = -EINVAL;
+		goto done;
+	}
+	snd_soc_update_bits(codec, WCD934X_ANA_MICB1, 0x3F, vout_ctl_1);
+	snd_soc_update_bits(codec, WCD934X_ANA_MICB2, 0x3F, vout_ctl_2);
+	snd_soc_update_bits(codec, WCD934X_ANA_MICB3, 0x3F, vout_ctl_3);
+	snd_soc_update_bits(codec, WCD934X_ANA_MICB4, 0x3F, vout_ctl_4);
+
+	/* Set the DMIC sample rate */
+	switch (pdata->mclk_rate) {
+	case WCD934X_MCLK_CLK_9P6MHZ:
+		def_dmic_rate = WCD9XXX_DMIC_SAMPLE_RATE_4P8MHZ;
+		break;
+	case WCD934X_MCLK_CLK_12P288MHZ:
+		def_dmic_rate = WCD9XXX_DMIC_SAMPLE_RATE_4P096MHZ;
+		break;
+	default:
+		/* should never happen */
+		dev_err(codec->dev, "%s: Invalid mclk_rate %d\n",
+			__func__, pdata->mclk_rate);
+		rc = -EINVAL;
+		goto done;
+	};
+
+	if (pdata->dmic_sample_rate ==
+	    WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED) {
+		dev_info(codec->dev, "%s: dmic_rate invalid default = %d\n",
+			__func__, def_dmic_rate);
+		pdata->dmic_sample_rate = def_dmic_rate;
+	}
+	if (pdata->mad_dmic_sample_rate ==
+	    WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED) {
+		dev_info(codec->dev, "%s: mad_dmic_rate invalid default = %d\n",
+			__func__, def_dmic_rate);
+		/*
+		 * use dmic_sample_rate as the default for MAD
+		 * if mad dmic sample rate is undefined
+		 */
+		pdata->mad_dmic_sample_rate = pdata->dmic_sample_rate;
+	}
+
+	if (pdata->dmic_clk_drv ==
+	    WCD9XXX_DMIC_CLK_DRIVE_UNDEFINED) {
+		pdata->dmic_clk_drv = WCD934X_DMIC_CLK_DRIVE_DEFAULT;
+		dev_dbg(codec->dev,
+			 "%s: dmic_clk_strength invalid, default = %d\n",
+			 __func__, pdata->dmic_clk_drv);
+	}
+
+	switch (pdata->dmic_clk_drv) {
+	case 2:
+		dmic_clk_drv = 0;
+		break;
+	case 4:
+		dmic_clk_drv = 1;
+		break;
+	case 8:
+		dmic_clk_drv = 2;
+		break;
+	case 16:
+		dmic_clk_drv = 3;
+		break;
+	default:
+		dev_err(codec->dev,
+			"%s: invalid dmic_clk_drv %d, using default\n",
+			__func__, pdata->dmic_clk_drv);
+		dmic_clk_drv = 0;
+		break;
+	}
+
+	snd_soc_update_bits(codec, WCD934X_TEST_DEBUG_PAD_DRVCTL_0,
+			    0x0C, dmic_clk_drv << 2);
+
+	/*
+	 * Default the DMIC clk rates to mad_dmic_sample_rate,
+	 * whereas, the anc/txfe dmic rates to dmic_sample_rate
+	 * since the anc/txfe are independent of mad block.
+	 */
+	mad_dmic_ctl_val = tavil_get_dmic_clk_val(tavil->codec,
+				pdata->mclk_rate,
+				pdata->mad_dmic_sample_rate);
+	snd_soc_update_bits(codec, WCD934X_CPE_SS_DMIC0_CTL,
+		0x0E, mad_dmic_ctl_val << 1);
+	snd_soc_update_bits(codec, WCD934X_CPE_SS_DMIC1_CTL,
+		0x0E, mad_dmic_ctl_val << 1);
+	snd_soc_update_bits(codec, WCD934X_CPE_SS_DMIC2_CTL,
+		0x0E, mad_dmic_ctl_val << 1);
+
+	if (dmic_clk_drv == WCD934X_DMIC_CLK_DIV_2)
+		anc_ctl_value = WCD934X_ANC_DMIC_X2_FULL_RATE;
+	else
+		anc_ctl_value = WCD934X_ANC_DMIC_X2_HALF_RATE;
+
+	snd_soc_update_bits(codec, WCD934X_CDC_ANC0_MODE_2_CTL,
+			    0x40, anc_ctl_value << 6);
+	snd_soc_update_bits(codec, WCD934X_CDC_ANC0_MODE_2_CTL,
+			    0x20, anc_ctl_value << 5);
+	snd_soc_update_bits(codec, WCD934X_CDC_ANC1_MODE_2_CTL,
+			    0x40, anc_ctl_value << 6);
+	snd_soc_update_bits(codec, WCD934X_CDC_ANC1_MODE_2_CTL,
+			    0x20, anc_ctl_value << 5);
+
+done:
+	return rc;
+}
+
+static void tavil_cdc_vote_svs(struct snd_soc_codec *codec, bool vote)
+{
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+
+	return tavil_vote_svs(tavil, vote);
+}
+
+struct wcd_dsp_cdc_cb cdc_cb = {
+	.cdc_clk_en = tavil_codec_internal_rco_ctrl,
+	.cdc_vote_svs = tavil_cdc_vote_svs,
+};
+
+static int tavil_wdsp_initialize(struct snd_soc_codec *codec)
+{
+	struct wcd9xxx *control;
+	struct tavil_priv *tavil;
+	struct wcd_dsp_params params;
+	int ret = 0;
+
+	control = dev_get_drvdata(codec->dev->parent);
+	tavil = snd_soc_codec_get_drvdata(codec);
+
+	params.cb = &cdc_cb;
+	params.irqs.cpe_ipc1_irq = WCD934X_IRQ_CPE1_INTR;
+	params.irqs.cpe_err_irq = WCD934X_IRQ_CPE_ERROR;
+	params.irqs.fatal_irqs = CPE_FATAL_IRQS;
+	params.clk_rate = control->mclk_rate;
+	params.dsp_instance = 0;
+
+	wcd_dsp_cntl_init(codec, &params, &tavil->wdsp_cntl);
+	if (!tavil->wdsp_cntl) {
+		dev_err(tavil->dev, "%s: wcd-dsp-control init failed\n",
+			__func__);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+/*
+ * tavil_soc_get_mbhc: get wcd934x_mbhc handle of corresponding codec
+ * @codec: handle to snd_soc_codec *
+ *
+ * return wcd934x_mbhc handle or error code in case of failure
+ */
+struct wcd934x_mbhc *tavil_soc_get_mbhc(struct snd_soc_codec *codec)
+{
+	struct tavil_priv *tavil;
+
+	if (!codec) {
+		pr_err("%s: Invalid params, NULL codec\n", __func__);
+		return NULL;
+	}
+	tavil = snd_soc_codec_get_drvdata(codec);
+
+	if (!tavil) {
+		pr_err("%s: Invalid params, NULL tavil\n", __func__);
+		return NULL;
+	}
+
+	return tavil->mbhc;
+}
+EXPORT_SYMBOL(tavil_soc_get_mbhc);
+
+static void tavil_mclk2_reg_defaults(struct tavil_priv *tavil)
+{
+	int i;
+	struct snd_soc_codec *codec = tavil->codec;
+
+	if (TAVIL_IS_1_0(tavil->wcd9xxx)) {
+		/* MCLK2 configuration */
+		for (i = 0; i < ARRAY_SIZE(tavil_codec_mclk2_1_0_defaults); i++)
+			snd_soc_update_bits(codec,
+					tavil_codec_mclk2_1_0_defaults[i].reg,
+					tavil_codec_mclk2_1_0_defaults[i].mask,
+					tavil_codec_mclk2_1_0_defaults[i].val);
+	}
+	if (TAVIL_IS_1_1(tavil->wcd9xxx)) {
+		/* MCLK2 configuration */
+		for (i = 0; i < ARRAY_SIZE(tavil_codec_mclk2_1_1_defaults); i++)
+			snd_soc_update_bits(codec,
+					tavil_codec_mclk2_1_1_defaults[i].reg,
+					tavil_codec_mclk2_1_1_defaults[i].mask,
+					tavil_codec_mclk2_1_1_defaults[i].val);
+	}
+}
+
+static int tavil_device_down(struct wcd9xxx *wcd9xxx)
+{
+	struct snd_soc_codec *codec;
+	struct tavil_priv *priv;
+	int count;
+
+	codec = (struct snd_soc_codec *)(wcd9xxx->ssr_priv);
+	priv = snd_soc_codec_get_drvdata(codec);
+	if (priv->swr.ctrl_data)
+		swrm_wcd_notify(priv->swr.ctrl_data[0].swr_pdev,
+				SWR_DEVICE_DOWN, NULL);
+	tavil_dsd_reset(priv->dsd_config);
+	snd_soc_card_change_online_state(codec->component.card, 0);
+	for (count = 0; count < NUM_CODEC_DAIS; count++)
+		priv->dai[count].bus_down_in_recovery = true;
+	wcd_dsp_ssr_event(priv->wdsp_cntl, WCD_CDC_DOWN_EVENT);
+	wcd_resmgr_set_sido_input_src_locked(priv->resmgr,
+					     SIDO_SOURCE_INTERNAL);
+
+	return 0;
+}
+
+static int tavil_post_reset_cb(struct wcd9xxx *wcd9xxx)
+{
+	int i, ret = 0;
+	struct wcd9xxx *control;
+	struct snd_soc_codec *codec;
+	struct tavil_priv *tavil;
+	struct wcd9xxx_pdata *pdata;
+	struct wcd_mbhc *mbhc;
+
+	codec = (struct snd_soc_codec *)(wcd9xxx->ssr_priv);
+	tavil = snd_soc_codec_get_drvdata(codec);
+	control = dev_get_drvdata(codec->dev->parent);
+
+	wcd9xxx_set_power_state(tavil->wcd9xxx,
+				WCD_REGION_POWER_COLLAPSE_REMOVE,
+				WCD9XXX_DIG_CORE_REGION_1);
+
+	mutex_lock(&tavil->codec_mutex);
+
+	tavil_vote_svs(tavil, true);
+	tavil_slimbus_slave_port_cfg.slave_dev_intfdev_la =
+				control->slim_slave->laddr;
+	tavil_slimbus_slave_port_cfg.slave_dev_pgd_la =
+					control->slim->laddr;
+	tavil_init_slim_slave_cfg(codec);
+	snd_soc_card_change_online_state(codec->component.card, 1);
+
+	for (i = 0; i < TAVIL_MAX_MICBIAS; i++)
+		tavil->micb_ref[i] = 0;
+
+	dev_dbg(codec->dev, "%s: MCLK Rate = %x\n",
+		__func__, control->mclk_rate);
+
+	if (control->mclk_rate == WCD934X_MCLK_CLK_12P288MHZ)
+		snd_soc_update_bits(codec, WCD934X_CODEC_RPM_CLK_MCLK_CFG,
+				    0x03, 0x00);
+	else if (control->mclk_rate == WCD934X_MCLK_CLK_9P6MHZ)
+		snd_soc_update_bits(codec, WCD934X_CODEC_RPM_CLK_MCLK_CFG,
+				    0x03, 0x01);
+	wcd_resmgr_post_ssr_v2(tavil->resmgr);
+	tavil_update_reg_defaults(tavil);
+	tavil_codec_init_reg(tavil);
+	__tavil_enable_efuse_sensing(tavil);
+	tavil_mclk2_reg_defaults(tavil);
+
+	__tavil_cdc_mclk_enable(tavil, true);
+	regcache_mark_dirty(codec->component.regmap);
+	regcache_sync(codec->component.regmap);
+	__tavil_cdc_mclk_enable(tavil, false);
+
+	tavil_update_cpr_defaults(tavil);
+
+	pdata = dev_get_platdata(codec->dev->parent);
+	ret = tavil_handle_pdata(tavil, pdata);
+	if (IS_ERR_VALUE(ret))
+		dev_err(codec->dev, "%s: invalid pdata\n", __func__);
+
+	/* Initialize MBHC module */
+	mbhc = &tavil->mbhc->wcd_mbhc;
+	ret = tavil_mbhc_post_ssr_init(tavil->mbhc, codec);
+	if (ret) {
+		dev_err(codec->dev, "%s: mbhc initialization failed\n",
+			__func__);
+		goto done;
+	} else {
+		tavil_mbhc_hs_detect(codec, mbhc->mbhc_cfg);
+	}
+
+	/* DSD initialization */
+	ret = tavil_dsd_post_ssr_init(tavil->dsd_config);
+	if (ret)
+		dev_dbg(tavil->dev, "%s: DSD init failed\n", __func__);
+
+	tavil_cleanup_irqs(tavil);
+	ret = tavil_setup_irqs(tavil);
+	if (ret) {
+		dev_err(codec->dev, "%s: tavil irq setup failed %d\n",
+			__func__, ret);
+		goto done;
+	}
+
+	tavil_set_spkr_mode(codec, tavil->swr.spkr_mode);
+	/*
+	 * Once the codec initialization is completed, the svs vote
+	 * can be released allowing the codec to go to SVS2.
+	 */
+	tavil_vote_svs(tavil, false);
+	wcd_dsp_ssr_event(tavil->wdsp_cntl, WCD_CDC_UP_EVENT);
+
+done:
+	mutex_unlock(&tavil->codec_mutex);
+	return ret;
+}
+
+static int tavil_soc_codec_probe(struct snd_soc_codec *codec)
+{
+	struct wcd9xxx *control;
+	struct tavil_priv *tavil;
+	struct wcd9xxx_pdata *pdata;
+	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+	int i, ret;
+	void *ptr = NULL;
+
+	control = dev_get_drvdata(codec->dev->parent);
+
+	dev_info(codec->dev, "%s()\n", __func__);
+	tavil = snd_soc_codec_get_drvdata(codec);
+	tavil->intf_type = wcd9xxx_get_intf_type();
+
+	control->dev_down = tavil_device_down;
+	control->post_reset = tavil_post_reset_cb;
+	control->ssr_priv = (void *)codec;
+
+	/* Resource Manager post Init */
+	ret = wcd_resmgr_post_init(tavil->resmgr, &tavil_resmgr_cb, codec);
+	if (ret) {
+		dev_err(codec->dev, "%s: wcd resmgr post init failed\n",
+			__func__);
+		goto err;
+	}
+	/* Class-H Init */
+	wcd_clsh_init(&tavil->clsh_d);
+	/* Default HPH Mode to Class-H Low HiFi */
+	tavil->hph_mode = CLS_H_LOHIFI;
+
+	tavil->fw_data = devm_kzalloc(codec->dev, sizeof(*(tavil->fw_data)),
+				      GFP_KERNEL);
+	if (!tavil->fw_data)
+		goto err;
+
+	set_bit(WCD9XXX_ANC_CAL, tavil->fw_data->cal_bit);
+	set_bit(WCD9XXX_MBHC_CAL, tavil->fw_data->cal_bit);
+	set_bit(WCD9XXX_MAD_CAL, tavil->fw_data->cal_bit);
+	set_bit(WCD9XXX_VBAT_CAL, tavil->fw_data->cal_bit);
+
+	ret = wcd_cal_create_hwdep(tavil->fw_data,
+				   WCD9XXX_CODEC_HWDEP_NODE, codec);
+	if (IS_ERR_VALUE(ret)) {
+		dev_err(codec->dev, "%s hwdep failed %d\n", __func__, ret);
+		goto err_hwdep;
+	}
+
+	/* Initialize MBHC module */
+	ret = tavil_mbhc_init(&tavil->mbhc, codec, tavil->fw_data);
+	if (ret) {
+		pr_err("%s: mbhc initialization failed\n", __func__);
+		goto err_hwdep;
+	}
+
+	tavil->codec = codec;
+	for (i = 0; i < COMPANDER_MAX; i++)
+		tavil->comp_enabled[i] = 0;
+
+	tavil_codec_init_reg(tavil);
+
+	pdata = dev_get_platdata(codec->dev->parent);
+	ret = tavil_handle_pdata(tavil, pdata);
+	if (IS_ERR_VALUE(ret)) {
+		dev_err(codec->dev, "%s: bad pdata\n", __func__);
+		goto err_hwdep;
+	}
+
+	ptr = devm_kzalloc(codec->dev, (sizeof(tavil_rx_chs) +
+			   sizeof(tavil_tx_chs)), GFP_KERNEL);
+	if (!ptr) {
+		ret = -ENOMEM;
+		goto err_hwdep;
+	}
+
+	snd_soc_dapm_add_routes(dapm, tavil_slim_audio_map,
+			ARRAY_SIZE(tavil_slim_audio_map));
+	for (i = 0; i < NUM_CODEC_DAIS; i++) {
+		INIT_LIST_HEAD(&tavil->dai[i].wcd9xxx_ch_list);
+		init_waitqueue_head(&tavil->dai[i].dai_wait);
+	}
+	tavil_slimbus_slave_port_cfg.slave_dev_intfdev_la =
+				control->slim_slave->laddr;
+	tavil_slimbus_slave_port_cfg.slave_dev_pgd_la =
+				control->slim->laddr;
+	tavil_slimbus_slave_port_cfg.slave_port_mapping[0] =
+				WCD934X_TX13;
+	tavil_init_slim_slave_cfg(codec);
+
+	control->num_rx_port = WCD934X_RX_MAX;
+	control->rx_chs = ptr;
+	memcpy(control->rx_chs, tavil_rx_chs, sizeof(tavil_rx_chs));
+	control->num_tx_port = WCD934X_TX_MAX;
+	control->tx_chs = ptr + sizeof(tavil_rx_chs);
+	memcpy(control->tx_chs, tavil_tx_chs, sizeof(tavil_tx_chs));
+
+	ret = tavil_setup_irqs(tavil);
+	if (ret) {
+		dev_err(tavil->dev, "%s: tavil irq setup failed %d\n",
+			__func__, ret);
+		goto err_pdata;
+	}
+
+	for (i = 0; i < WCD934X_NUM_DECIMATORS; i++) {
+		tavil->tx_hpf_work[i].tavil = tavil;
+		tavil->tx_hpf_work[i].decimator = i;
+		INIT_DELAYED_WORK(&tavil->tx_hpf_work[i].dwork,
+				  tavil_tx_hpf_corner_freq_callback);
+
+		tavil->tx_mute_dwork[i].tavil = tavil;
+		tavil->tx_mute_dwork[i].decimator = i;
+		INIT_DELAYED_WORK(&tavil->tx_mute_dwork[i].dwork,
+				  tavil_tx_mute_update_callback);
+	}
+
+	tavil->spk_anc_dwork.tavil = tavil;
+	INIT_DELAYED_WORK(&tavil->spk_anc_dwork.dwork,
+			  tavil_spk_anc_update_callback);
+
+	tavil_mclk2_reg_defaults(tavil);
+
+	/* DSD initialization */
+	tavil->dsd_config = tavil_dsd_init(codec);
+	if (IS_ERR_OR_NULL(tavil->dsd_config))
+		dev_dbg(tavil->dev, "%s: DSD init failed\n", __func__);
+
+	mutex_lock(&tavil->codec_mutex);
+	snd_soc_dapm_disable_pin(dapm, "ANC EAR PA");
+	snd_soc_dapm_disable_pin(dapm, "ANC EAR");
+	snd_soc_dapm_disable_pin(dapm, "ANC HPHL PA");
+	snd_soc_dapm_disable_pin(dapm, "ANC HPHR PA");
+	snd_soc_dapm_disable_pin(dapm, "ANC HPHL");
+	snd_soc_dapm_disable_pin(dapm, "ANC HPHR");
+	snd_soc_dapm_enable_pin(dapm, "ANC SPK1 PA");
+	mutex_unlock(&tavil->codec_mutex);
+
+	snd_soc_dapm_ignore_suspend(dapm, "AIF1 Playback");
+	snd_soc_dapm_ignore_suspend(dapm, "AIF1 Capture");
+	snd_soc_dapm_ignore_suspend(dapm, "AIF2 Playback");
+	snd_soc_dapm_ignore_suspend(dapm, "AIF2 Capture");
+	snd_soc_dapm_ignore_suspend(dapm, "AIF3 Playback");
+	snd_soc_dapm_ignore_suspend(dapm, "AIF3 Capture");
+	snd_soc_dapm_ignore_suspend(dapm, "AIF4 Playback");
+	snd_soc_dapm_ignore_suspend(dapm, "AIF4 MAD TX");
+	snd_soc_dapm_ignore_suspend(dapm, "VIfeed");
+
+	snd_soc_dapm_sync(dapm);
+
+	tavil_wdsp_initialize(codec);
+
+	/*
+	 * Once the codec initialization is completed, the svs vote
+	 * can be released allowing the codec to go to SVS2.
+	 */
+	tavil_vote_svs(tavil, false);
+
+	return ret;
+
+err_pdata:
+	devm_kfree(codec->dev, ptr);
+	control->rx_chs = NULL;
+	control->tx_chs = NULL;
+err_hwdep:
+	devm_kfree(codec->dev, tavil->fw_data);
+	tavil->fw_data = NULL;
+err:
+	return ret;
+}
+
+static int tavil_soc_codec_remove(struct snd_soc_codec *codec)
+{
+	struct wcd9xxx *control;
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+
+	control = dev_get_drvdata(codec->dev->parent);
+	devm_kfree(codec->dev, control->rx_chs);
+	control->rx_chs = NULL;
+	control->tx_chs = NULL;
+	tavil_cleanup_irqs(tavil);
+
+	if (tavil->wdsp_cntl)
+		wcd_dsp_cntl_deinit(&tavil->wdsp_cntl);
+
+	/* Deinitialize MBHC module */
+	tavil_mbhc_deinit(codec);
+	tavil->mbhc = NULL;
+
+	return 0;
+}
+
+static struct regmap *tavil_get_regmap(struct device *dev)
+{
+	struct wcd9xxx *control = dev_get_drvdata(dev->parent);
+
+	return control->regmap;
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_tavil = {
+	.probe = tavil_soc_codec_probe,
+	.remove = tavil_soc_codec_remove,
+	.controls = tavil_snd_controls,
+	.num_controls = ARRAY_SIZE(tavil_snd_controls),
+	.dapm_widgets = tavil_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(tavil_dapm_widgets),
+	.dapm_routes = tavil_audio_map,
+	.num_dapm_routes = ARRAY_SIZE(tavil_audio_map),
+	.get_regmap = tavil_get_regmap,
+};
+
+#ifdef CONFIG_PM
+static int tavil_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct tavil_priv *tavil = platform_get_drvdata(pdev);
+
+	if (!tavil) {
+		dev_err(dev, "%s: tavil private data is NULL\n", __func__);
+		return -EINVAL;
+	}
+	dev_dbg(dev, "%s: system suspend\n", __func__);
+	if (delayed_work_pending(&tavil->power_gate_work) &&
+	    cancel_delayed_work_sync(&tavil->power_gate_work))
+		tavil_codec_power_gate_digital_core(tavil);
+	return 0;
+}
+
+static int tavil_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct tavil_priv *tavil = platform_get_drvdata(pdev);
+
+	if (!tavil) {
+		dev_err(dev, "%s: tavil private data is NULL\n", __func__);
+		return -EINVAL;
+	}
+	dev_dbg(dev, "%s: system resume\n", __func__);
+	return 0;
+}
+
+static const struct dev_pm_ops tavil_pm_ops = {
+	.suspend = tavil_suspend,
+	.resume = tavil_resume,
+};
+#endif
+
+static int tavil_swrm_read(void *handle, int reg)
+{
+	struct tavil_priv *tavil;
+	struct wcd9xxx *wcd9xxx;
+	unsigned short swr_rd_addr_base;
+	unsigned short swr_rd_data_base;
+	int val, ret;
+
+	if (!handle) {
+		pr_err("%s: NULL handle\n", __func__);
+		return -EINVAL;
+	}
+	tavil = (struct tavil_priv *)handle;
+	wcd9xxx = tavil->wcd9xxx;
+
+	dev_dbg(tavil->dev, "%s: Reading soundwire register, 0x%x\n",
+		__func__, reg);
+	swr_rd_addr_base = WCD934X_SWR_AHB_BRIDGE_RD_ADDR_0;
+	swr_rd_data_base = WCD934X_SWR_AHB_BRIDGE_RD_DATA_0;
+
+	mutex_lock(&tavil->swr.read_mutex);
+	ret = regmap_bulk_write(wcd9xxx->regmap, swr_rd_addr_base,
+				 (u8 *)&reg, 4);
+	if (ret < 0) {
+		dev_err(tavil->dev, "%s: RD Addr Failure\n", __func__);
+		goto done;
+	}
+	ret = regmap_bulk_read(wcd9xxx->regmap, swr_rd_data_base,
+				(u8 *)&val, 4);
+	if (ret < 0) {
+		dev_err(tavil->dev, "%s: RD Data Failure\n", __func__);
+		goto done;
+	}
+	ret = val;
+done:
+	mutex_unlock(&tavil->swr.read_mutex);
+
+	return ret;
+}
+
+static int tavil_swrm_bulk_write(void *handle, u32 *reg, u32 *val, size_t len)
+{
+	struct tavil_priv *tavil;
+	struct wcd9xxx *wcd9xxx;
+	struct wcd9xxx_reg_val *bulk_reg;
+	unsigned short swr_wr_addr_base;
+	unsigned short swr_wr_data_base;
+	int i, j, ret;
+
+	if (!handle || !reg || !val) {
+		pr_err("%s: NULL parameter\n", __func__);
+		return -EINVAL;
+	}
+	if (len <= 0) {
+		pr_err("%s: Invalid size: %zu\n", __func__, len);
+		return -EINVAL;
+	}
+	tavil = (struct tavil_priv *)handle;
+	wcd9xxx = tavil->wcd9xxx;
+
+	swr_wr_addr_base = WCD934X_SWR_AHB_BRIDGE_WR_ADDR_0;
+	swr_wr_data_base = WCD934X_SWR_AHB_BRIDGE_WR_DATA_0;
+
+	bulk_reg = kzalloc((2 * len * sizeof(struct wcd9xxx_reg_val)),
+			   GFP_KERNEL);
+	if (!bulk_reg)
+		return -ENOMEM;
+
+	for (i = 0, j = 0; i < (len * 2); i += 2, j++) {
+		bulk_reg[i].reg = swr_wr_data_base;
+		bulk_reg[i].buf = (u8 *)(&val[j]);
+		bulk_reg[i].bytes = 4;
+		bulk_reg[i+1].reg = swr_wr_addr_base;
+		bulk_reg[i+1].buf = (u8 *)(&reg[j]);
+		bulk_reg[i+1].bytes = 4;
+	}
+
+	mutex_lock(&tavil->swr.write_mutex);
+	ret = wcd9xxx_slim_bulk_write(wcd9xxx, bulk_reg,
+			 (len * 2), false);
+	if (ret) {
+		dev_err(tavil->dev, "%s: swrm bulk write failed, ret: %d\n",
+			__func__, ret);
+	}
+	mutex_unlock(&tavil->swr.write_mutex);
+
+	kfree(bulk_reg);
+	return ret;
+}
+
+static int tavil_swrm_write(void *handle, int reg, int val)
+{
+	struct tavil_priv *tavil;
+	struct wcd9xxx *wcd9xxx;
+	unsigned short swr_wr_addr_base;
+	unsigned short swr_wr_data_base;
+	struct wcd9xxx_reg_val bulk_reg[2];
+	int ret;
+
+	if (!handle) {
+		pr_err("%s: NULL handle\n", __func__);
+		return -EINVAL;
+	}
+	tavil = (struct tavil_priv *)handle;
+	wcd9xxx = tavil->wcd9xxx;
+
+	swr_wr_addr_base = WCD934X_SWR_AHB_BRIDGE_WR_ADDR_0;
+	swr_wr_data_base = WCD934X_SWR_AHB_BRIDGE_WR_DATA_0;
+
+	/* First Write the Data to register */
+	bulk_reg[0].reg = swr_wr_data_base;
+	bulk_reg[0].buf = (u8 *)(&val);
+	bulk_reg[0].bytes = 4;
+	bulk_reg[1].reg = swr_wr_addr_base;
+	bulk_reg[1].buf = (u8 *)(&reg);
+	bulk_reg[1].bytes = 4;
+
+	mutex_lock(&tavil->swr.write_mutex);
+	ret = wcd9xxx_slim_bulk_write(wcd9xxx, bulk_reg, 2, false);
+	if (ret < 0)
+		dev_err(tavil->dev, "%s: WR Data Failure\n", __func__);
+	mutex_unlock(&tavil->swr.write_mutex);
+
+	return ret;
+}
+
+static int tavil_swrm_clock(void *handle, bool enable)
+{
+	struct tavil_priv *tavil;
+
+	if (!handle) {
+		pr_err("%s: NULL handle\n", __func__);
+		return -EINVAL;
+	}
+	tavil = (struct tavil_priv *)handle;
+
+	mutex_lock(&tavil->swr.clk_mutex);
+	dev_dbg(tavil->dev, "%s: swrm clock %s\n",
+		__func__, (enable?"enable" : "disable"));
+	if (enable) {
+		tavil->swr.clk_users++;
+		if (tavil->swr.clk_users == 1) {
+			regmap_update_bits(tavil->wcd9xxx->regmap,
+					WCD934X_TEST_DEBUG_NPL_DLY_TEST_1,
+					0x10, 0x00);
+			__tavil_cdc_mclk_enable(tavil, true);
+			regmap_update_bits(tavil->wcd9xxx->regmap,
+				WCD934X_CDC_CLK_RST_CTRL_SWR_CONTROL,
+				0x01, 0x01);
+		}
+	} else {
+		tavil->swr.clk_users--;
+		if (tavil->swr.clk_users == 0) {
+			regmap_update_bits(tavil->wcd9xxx->regmap,
+				WCD934X_CDC_CLK_RST_CTRL_SWR_CONTROL,
+				0x01, 0x00);
+			__tavil_cdc_mclk_enable(tavil, false);
+			regmap_update_bits(tavil->wcd9xxx->regmap,
+					WCD934X_TEST_DEBUG_NPL_DLY_TEST_1,
+					0x10, 0x10);
+		}
+	}
+	dev_dbg(tavil->dev, "%s: swrm clock users %d\n",
+		__func__, tavil->swr.clk_users);
+	mutex_unlock(&tavil->swr.clk_mutex);
+
+	return 0;
+}
+
+static int tavil_swrm_handle_irq(void *handle,
+				 irqreturn_t (*swrm_irq_handler)(int irq,
+								 void *data),
+				 void *swrm_handle,
+				 int action)
+{
+	struct tavil_priv *tavil;
+	int ret = 0;
+	struct wcd9xxx *wcd9xxx;
+
+	if (!handle) {
+		pr_err("%s: NULL handle\n", __func__);
+		return -EINVAL;
+	}
+	tavil = (struct tavil_priv *) handle;
+	wcd9xxx = tavil->wcd9xxx;
+
+	if (action) {
+		ret = wcd9xxx_request_irq(&wcd9xxx->core_res,
+					  WCD934X_IRQ_SOUNDWIRE,
+					  swrm_irq_handler,
+					  "Tavil SWR Master", swrm_handle);
+		if (ret)
+			dev_err(tavil->dev, "%s: Failed to request irq %d\n",
+				__func__, WCD934X_IRQ_SOUNDWIRE);
+	} else
+		wcd9xxx_free_irq(&wcd9xxx->core_res, WCD934X_IRQ_SOUNDWIRE,
+				 swrm_handle);
+
+	return ret;
+}
+
+static void tavil_codec_add_spi_device(struct tavil_priv *tavil,
+				       struct device_node *node)
+{
+	struct spi_master *master;
+	struct spi_device *spi;
+	u32 prop_value;
+	int rc;
+
+	/* Read the master bus num from DT node */
+	rc = of_property_read_u32(node, "qcom,master-bus-num",
+				  &prop_value);
+	if (IS_ERR_VALUE(rc)) {
+		dev_err(tavil->dev, "%s: prop %s not found in node %s",
+			__func__, "qcom,master-bus-num", node->full_name);
+		goto done;
+	}
+
+	/* Get the reference to SPI master */
+	master = spi_busnum_to_master(prop_value);
+	if (!master) {
+		dev_err(tavil->dev, "%s: Invalid spi_master for bus_num %u\n",
+			__func__, prop_value);
+		goto done;
+	}
+
+	/* Allocate the spi device */
+	spi = spi_alloc_device(master);
+	if (!spi) {
+		dev_err(tavil->dev, "%s: spi_alloc_device failed\n",
+			__func__);
+		goto err_spi_alloc_dev;
+	}
+
+	/* Initialize device properties */
+	if (of_modalias_node(node, spi->modalias,
+			     sizeof(spi->modalias)) < 0) {
+		dev_err(tavil->dev, "%s: cannot find modalias for %s\n",
+			__func__, node->full_name);
+		goto err_dt_parse;
+	}
+
+	rc = of_property_read_u32(node, "qcom,chip-select",
+				  &prop_value);
+	if (IS_ERR_VALUE(rc)) {
+		dev_err(tavil->dev, "%s: prop %s not found in node %s",
+			__func__, "qcom,chip-select", node->full_name);
+		goto err_dt_parse;
+	}
+	spi->chip_select = prop_value;
+
+	rc = of_property_read_u32(node, "qcom,max-frequency",
+				  &prop_value);
+	if (IS_ERR_VALUE(rc)) {
+		dev_err(tavil->dev, "%s: prop %s not found in node %s",
+			__func__, "qcom,max-frequency", node->full_name);
+		goto err_dt_parse;
+	}
+	spi->max_speed_hz = prop_value;
+
+	spi->dev.of_node = node;
+
+	rc = spi_add_device(spi);
+	if (IS_ERR_VALUE(rc)) {
+		dev_err(tavil->dev, "%s: spi_add_device failed\n", __func__);
+		goto err_dt_parse;
+	}
+
+	/* Put the reference to SPI master */
+	put_device(&master->dev);
+
+	return;
+
+err_dt_parse:
+	spi_dev_put(spi);
+
+err_spi_alloc_dev:
+	/* Put the reference to SPI master */
+	put_device(&master->dev);
+done:
+	return;
+}
+
+static void tavil_add_child_devices(struct work_struct *work)
+{
+	struct tavil_priv *tavil;
+	struct platform_device *pdev;
+	struct device_node *node;
+	struct wcd9xxx *wcd9xxx;
+	struct tavil_swr_ctrl_data *swr_ctrl_data = NULL, *temp;
+	int ret, ctrl_num = 0;
+	struct wcd_swr_ctrl_platform_data *platdata;
+	char plat_dev_name[WCD934X_STRING_LEN];
+
+	tavil = container_of(work, struct tavil_priv,
+			     tavil_add_child_devices_work);
+	if (!tavil) {
+		pr_err("%s: Memory for WCD934X does not exist\n",
+			__func__);
+		return;
+	}
+	wcd9xxx = tavil->wcd9xxx;
+	if (!wcd9xxx) {
+		pr_err("%s: Memory for WCD9XXX does not exist\n",
+			__func__);
+		return;
+	}
+	if (!wcd9xxx->dev->of_node) {
+		dev_err(wcd9xxx->dev, "%s: DT node for wcd9xxx does not exist\n",
+			__func__);
+		return;
+	}
+
+	platdata = &tavil->swr.plat_data;
+
+	for_each_child_of_node(wcd9xxx->dev->of_node, node) {
+
+		/* Parse and add the SPI device node */
+		if (!strcmp(node->name, "wcd_spi")) {
+			tavil_codec_add_spi_device(tavil, node);
+			continue;
+		}
+
+		/* Parse other child device nodes and add platform device */
+		if (!strcmp(node->name, "swr_master"))
+			strlcpy(plat_dev_name, "tavil_swr_ctrl",
+				(WCD934X_STRING_LEN - 1));
+		else if (strnstr(node->name, "msm_cdc_pinctrl",
+				 strlen("msm_cdc_pinctrl")) != NULL)
+			strlcpy(plat_dev_name, node->name,
+				(WCD934X_STRING_LEN - 1));
+		else
+			continue;
+
+		pdev = platform_device_alloc(plat_dev_name, -1);
+		if (!pdev) {
+			dev_err(wcd9xxx->dev, "%s: pdev memory alloc failed\n",
+				__func__);
+			ret = -ENOMEM;
+			goto err_mem;
+		}
+		pdev->dev.parent = tavil->dev;
+		pdev->dev.of_node = node;
+
+		if (strcmp(node->name, "swr_master") == 0) {
+			ret = platform_device_add_data(pdev, platdata,
+						       sizeof(*platdata));
+			if (ret) {
+				dev_err(&pdev->dev,
+					"%s: cannot add plat data ctrl:%d\n",
+					__func__, ctrl_num);
+				goto err_pdev_add;
+			}
+		}
+
+		ret = platform_device_add(pdev);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"%s: Cannot add platform device\n",
+				__func__);
+			goto err_pdev_add;
+		}
+
+		if (strcmp(node->name, "swr_master") == 0) {
+			temp = krealloc(swr_ctrl_data,
+					(ctrl_num + 1) * sizeof(
+					struct tavil_swr_ctrl_data),
+					GFP_KERNEL);
+			if (!temp) {
+				dev_err(wcd9xxx->dev, "out of memory\n");
+				ret = -ENOMEM;
+				goto err_pdev_add;
+			}
+			swr_ctrl_data = temp;
+			swr_ctrl_data[ctrl_num].swr_pdev = pdev;
+			ctrl_num++;
+			dev_dbg(&pdev->dev,
+				"%s: Added soundwire ctrl device(s)\n",
+				__func__);
+			tavil->swr.ctrl_data = swr_ctrl_data;
+		}
+	}
+
+	return;
+
+err_pdev_add:
+	platform_device_put(pdev);
+err_mem:
+	return;
+}
+
+static int __tavil_enable_efuse_sensing(struct tavil_priv *tavil)
+{
+	int val, rc;
+
+	WCD9XXX_V2_BG_CLK_LOCK(tavil->resmgr);
+	__tavil_cdc_mclk_enable_locked(tavil, true);
+
+	regmap_update_bits(tavil->wcd9xxx->regmap,
+			WCD934X_CHIP_TIER_CTRL_EFUSE_CTL, 0x1E, 0x10);
+	regmap_update_bits(tavil->wcd9xxx->regmap,
+			WCD934X_CHIP_TIER_CTRL_EFUSE_CTL, 0x01, 0x01);
+	/*
+	 * 5ms sleep required after enabling efuse control
+	 * before checking the status.
+	 */
+	usleep_range(5000, 5500);
+	wcd_resmgr_set_sido_input_src(tavil->resmgr,
+					     SIDO_SOURCE_RCO_BG);
+
+	WCD9XXX_V2_BG_CLK_UNLOCK(tavil->resmgr);
+
+	rc = regmap_read(tavil->wcd9xxx->regmap,
+			 WCD934X_CHIP_TIER_CTRL_EFUSE_STATUS, &val);
+	if (rc || (!(val & 0x01)))
+		WARN(1, "%s: Efuse sense is not complete val=%x, ret=%d\n",
+			__func__, val, rc);
+
+	__tavil_cdc_mclk_enable(tavil, false);
+
+	return rc;
+}
+
+static void ___tavil_get_codec_fine_version(struct tavil_priv *tavil)
+{
+	int val1, val2, version;
+	struct regmap *regmap;
+	u16 id_minor;
+	u32 version_mask = 0;
+
+	regmap = tavil->wcd9xxx->regmap;
+	version = tavil->wcd9xxx->version;
+	id_minor = tavil->wcd9xxx->codec_type->id_minor;
+
+	regmap_read(regmap, WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT14, &val1);
+	regmap_read(regmap, WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT15, &val2);
+
+	dev_dbg(tavil->dev, "%s: chip version :0x%x 0x:%x\n",
+		__func__, val1, val2);
+
+	version_mask |= (!!((u8)val1 & 0x80)) << DSD_DISABLED_MASK;
+	version_mask |= (!!((u8)val2 & 0x01)) << SLNQ_DISABLED_MASK;
+
+	switch (version_mask) {
+	case DSD_DISABLED | SLNQ_DISABLED:
+	    if (id_minor == cpu_to_le16(0))
+		version = TAVIL_VERSION_WCD9340_1_0;
+	    else if (id_minor == cpu_to_le16(0x01))
+		version = TAVIL_VERSION_WCD9340_1_1;
+	    break;
+	case SLNQ_DISABLED:
+	    if (id_minor == cpu_to_le16(0))
+		version = TAVIL_VERSION_WCD9341_1_0;
+	    else if (id_minor == cpu_to_le16(0x01))
+		version = TAVIL_VERSION_WCD9341_1_1;
+	    break;
+	}
+
+	tavil->wcd9xxx->version = version;
+	tavil->wcd9xxx->codec_type->version = version;
+}
+
+/*
+ * tavil_get_wcd_dsp_cntl: Get the reference to wcd_dsp_cntl
+ * @dev: Device pointer for codec device
+ *
+ * This API gets the reference to codec's struct wcd_dsp_cntl
+ */
+struct wcd_dsp_cntl *tavil_get_wcd_dsp_cntl(struct device *dev)
+{
+	struct platform_device *pdev;
+	struct tavil_priv *tavil;
+
+	if (!dev) {
+		pr_err("%s: Invalid device\n", __func__);
+		return NULL;
+	}
+
+	pdev = to_platform_device(dev);
+	tavil = platform_get_drvdata(pdev);
+
+	return tavil->wdsp_cntl;
+}
+EXPORT_SYMBOL(tavil_get_wcd_dsp_cntl);
+
+static int tavil_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct tavil_priv *tavil;
+	struct clk *wcd_ext_clk;
+	struct wcd9xxx_resmgr_v2 *resmgr;
+	struct wcd9xxx_power_region *cdc_pwr;
+
+	tavil = devm_kzalloc(&pdev->dev, sizeof(struct tavil_priv),
+			    GFP_KERNEL);
+	if (!tavil)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, tavil);
+
+	tavil->wcd9xxx = dev_get_drvdata(pdev->dev.parent);
+	tavil->dev = &pdev->dev;
+	INIT_DELAYED_WORK(&tavil->power_gate_work, tavil_codec_power_gate_work);
+	mutex_init(&tavil->power_lock);
+	INIT_WORK(&tavil->tavil_add_child_devices_work,
+		  tavil_add_child_devices);
+	mutex_init(&tavil->micb_lock);
+	mutex_init(&tavil->swr.read_mutex);
+	mutex_init(&tavil->swr.write_mutex);
+	mutex_init(&tavil->swr.clk_mutex);
+	mutex_init(&tavil->codec_mutex);
+	mutex_init(&tavil->svs_mutex);
+
+	/*
+	 * Codec hardware by default comes up in SVS mode.
+	 * Initialize the svs_ref_cnt to 1 to reflect the hardware
+	 * state in the driver.
+	 */
+	tavil->svs_ref_cnt = 1;
+
+	cdc_pwr = devm_kzalloc(&pdev->dev, sizeof(struct wcd9xxx_power_region),
+				GFP_KERNEL);
+	if (!cdc_pwr) {
+		ret = -ENOMEM;
+		goto err_resmgr;
+	}
+	tavil->wcd9xxx->wcd9xxx_pwr[WCD9XXX_DIG_CORE_REGION_1] = cdc_pwr;
+	cdc_pwr->pwr_collapse_reg_min = WCD934X_DIG_CORE_REG_MIN;
+	cdc_pwr->pwr_collapse_reg_max = WCD934X_DIG_CORE_REG_MAX;
+	wcd9xxx_set_power_state(tavil->wcd9xxx,
+				WCD_REGION_POWER_COLLAPSE_REMOVE,
+				WCD9XXX_DIG_CORE_REGION_1);
+	/*
+	 * Init resource manager so that if child nodes such as SoundWire
+	 * requests for clock, resource manager can honor the request
+	 */
+	resmgr = wcd_resmgr_init(&tavil->wcd9xxx->core_res, NULL);
+	if (IS_ERR(resmgr)) {
+		ret = PTR_ERR(resmgr);
+		dev_err(&pdev->dev, "%s: Failed to initialize wcd resmgr\n",
+			__func__);
+		goto err_resmgr;
+	}
+	tavil->resmgr = resmgr;
+	tavil->swr.plat_data.handle = (void *) tavil;
+	tavil->swr.plat_data.read = tavil_swrm_read;
+	tavil->swr.plat_data.write = tavil_swrm_write;
+	tavil->swr.plat_data.bulk_write = tavil_swrm_bulk_write;
+	tavil->swr.plat_data.clk = tavil_swrm_clock;
+	tavil->swr.plat_data.handle_irq = tavil_swrm_handle_irq;
+	tavil->swr.spkr_gain_offset = WCD934X_RX_GAIN_OFFSET_0_DB;
+
+	/* Register for Clock */
+	wcd_ext_clk = clk_get(tavil->wcd9xxx->dev, "wcd_clk");
+	if (IS_ERR(wcd_ext_clk)) {
+		dev_err(tavil->wcd9xxx->dev, "%s: clk get %s failed\n",
+			__func__, "wcd_ext_clk");
+		goto err_clk;
+	}
+	tavil->wcd_ext_clk = wcd_ext_clk;
+	set_bit(AUDIO_NOMINAL, &tavil->status_mask);
+	/* Update codec register default values */
+	dev_dbg(&pdev->dev, "%s: MCLK Rate = %x\n", __func__,
+		tavil->wcd9xxx->mclk_rate);
+	if (tavil->wcd9xxx->mclk_rate == WCD934X_MCLK_CLK_12P288MHZ)
+		regmap_update_bits(tavil->wcd9xxx->regmap,
+				   WCD934X_CODEC_RPM_CLK_MCLK_CFG,
+				   0x03, 0x00);
+	else if (tavil->wcd9xxx->mclk_rate == WCD934X_MCLK_CLK_9P6MHZ)
+		regmap_update_bits(tavil->wcd9xxx->regmap,
+				   WCD934X_CODEC_RPM_CLK_MCLK_CFG,
+				   0x03, 0x01);
+	tavil_update_reg_defaults(tavil);
+	__tavil_enable_efuse_sensing(tavil);
+	___tavil_get_codec_fine_version(tavil);
+	tavil_update_cpr_defaults(tavil);
+
+	/* Register with soc framework */
+	ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_tavil,
+				  tavil_dai, ARRAY_SIZE(tavil_dai));
+	if (ret) {
+		dev_err(&pdev->dev, "%s: Codec registration failed\n",
+		 __func__);
+		goto err_cdc_reg;
+	}
+	schedule_work(&tavil->tavil_add_child_devices_work);
+
+	return ret;
+
+err_cdc_reg:
+	clk_put(tavil->wcd_ext_clk);
+err_clk:
+	wcd_resmgr_remove(tavil->resmgr);
+err_resmgr:
+	mutex_destroy(&tavil->micb_lock);
+	mutex_destroy(&tavil->svs_mutex);
+	mutex_destroy(&tavil->codec_mutex);
+	mutex_destroy(&tavil->swr.read_mutex);
+	mutex_destroy(&tavil->swr.write_mutex);
+	mutex_destroy(&tavil->swr.clk_mutex);
+	devm_kfree(&pdev->dev, tavil);
+
+	return ret;
+}
+
+static int tavil_remove(struct platform_device *pdev)
+{
+	struct tavil_priv *tavil;
+
+	tavil = platform_get_drvdata(pdev);
+	if (!tavil)
+		return -EINVAL;
+
+	mutex_destroy(&tavil->micb_lock);
+	mutex_destroy(&tavil->svs_mutex);
+	mutex_destroy(&tavil->codec_mutex);
+	mutex_destroy(&tavil->swr.read_mutex);
+	mutex_destroy(&tavil->swr.write_mutex);
+	mutex_destroy(&tavil->swr.clk_mutex);
+
+	snd_soc_unregister_codec(&pdev->dev);
+	clk_put(tavil->wcd_ext_clk);
+	wcd_resmgr_remove(tavil->resmgr);
+	if (tavil->dsd_config) {
+		tavil_dsd_deinit(tavil->dsd_config);
+		tavil->dsd_config = NULL;
+	}
+	devm_kfree(&pdev->dev, tavil);
+	return 0;
+}
+
+static struct platform_driver tavil_codec_driver = {
+	.probe = tavil_probe,
+	.remove = tavil_remove,
+	.driver = {
+		.name = "tavil_codec",
+		.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm = &tavil_pm_ops,
+#endif
+	},
+};
+
+module_platform_driver(tavil_codec_driver);
+
+MODULE_DESCRIPTION("Tavil Codec driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/sound/soc/codecs/wcd934x./wcd934x-dsd.c linux-4.4.115-fbx/sound/soc/codecs/wcd934x/wcd934x-dsd.c
--- linux-4.4.115-fbx/sound/soc/codecs/wcd934x./wcd934x-dsd.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd934x/wcd934x-dsd.c	2019-01-22 16:16:29.551301175 +0100
@@ -0,0 +1,772 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mfd/wcd934x/registers.h>
+#include <sound/tlv.h>
+#include <sound/control.h>
+#include "wcd934x-dsd.h"
+
+#define DSD_VOLUME_MAX_0dB      0
+#define DSD_VOLUME_MIN_M110dB   -110
+
+#define DSD_VOLUME_RANGE_CHECK(x)   ((x >= DSD_VOLUME_MIN_M110dB) &&\
+				     (x <= DSD_VOLUME_MAX_0dB))
+#define DSD_VOLUME_STEPS            3
+#define DSD_VOLUME_UPDATE_DELAY_MS  30
+#define DSD_VOLUME_USLEEP_MARGIN_US 100
+#define DSD_VOLUME_STEP_DELAY_US    ((1000 * DSD_VOLUME_UPDATE_DELAY_MS) / \
+				     (2 * DSD_VOLUME_STEPS))
+
+#define TAVIL_VERSION_1_0  0
+#define TAVIL_VERSION_1_1  1
+
+static const DECLARE_TLV_DB_MINMAX(tavil_dsd_db_scale, DSD_VOLUME_MIN_M110dB,
+				   DSD_VOLUME_MAX_0dB);
+
+static const char *const dsd_if_text[] = {
+	"ZERO", "RX0", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7",
+	"DSD_DATA_PAD"
+};
+
+static const char * const dsd_filt0_mux_text[] = {
+	"ZERO", "DSD_L IF MUX",
+};
+
+static const char * const dsd_filt1_mux_text[] = {
+	"ZERO", "DSD_R IF MUX",
+};
+
+static const struct soc_enum dsd_filt0_mux_enum =
+	SOC_ENUM_SINGLE(WCD934X_CDC_DSD0_PATH_CTL, 0,
+			ARRAY_SIZE(dsd_filt0_mux_text), dsd_filt0_mux_text);
+
+static const struct soc_enum dsd_filt1_mux_enum =
+	SOC_ENUM_SINGLE(WCD934X_CDC_DSD1_PATH_CTL, 0,
+			ARRAY_SIZE(dsd_filt1_mux_text), dsd_filt1_mux_text);
+
+static SOC_ENUM_SINGLE_DECL(dsd_l_if_enum, WCD934X_CDC_DSD0_CFG0,
+			    2, dsd_if_text);
+static SOC_ENUM_SINGLE_DECL(dsd_r_if_enum, WCD934X_CDC_DSD1_CFG0,
+			    2, dsd_if_text);
+
+static const struct snd_kcontrol_new dsd_filt0_mux =
+		SOC_DAPM_ENUM("DSD Filt0 Mux", dsd_filt0_mux_enum);
+
+static const struct snd_kcontrol_new dsd_filt1_mux =
+		SOC_DAPM_ENUM("DSD Filt1 Mux", dsd_filt1_mux_enum);
+
+static const struct snd_kcontrol_new dsd_l_if_mux =
+		SOC_DAPM_ENUM("DSD Left If Mux", dsd_l_if_enum);
+static const struct snd_kcontrol_new dsd_r_if_mux =
+		SOC_DAPM_ENUM("DSD Right If Mux", dsd_r_if_enum);
+
+static const struct snd_soc_dapm_route tavil_dsd_audio_map[] = {
+	{"DSD_L IF MUX", "RX0", "CDC_IF RX0 MUX"},
+	{"DSD_L IF MUX", "RX1", "CDC_IF RX1 MUX"},
+	{"DSD_L IF MUX", "RX2", "CDC_IF RX2 MUX"},
+	{"DSD_L IF MUX", "RX3", "CDC_IF RX3 MUX"},
+	{"DSD_L IF MUX", "RX4", "CDC_IF RX4 MUX"},
+	{"DSD_L IF MUX", "RX5", "CDC_IF RX5 MUX"},
+	{"DSD_L IF MUX", "RX6", "CDC_IF RX6 MUX"},
+	{"DSD_L IF MUX", "RX7", "CDC_IF RX7 MUX"},
+
+	{"DSD_FILTER_0", NULL, "DSD_L IF MUX"},
+	{"DSD_FILTER_0", NULL, "RX INT1 NATIVE SUPPLY"},
+	{"RX INT1 MIX3", "DSD HPHL Switch", "DSD_FILTER_0"},
+
+	{"DSD_R IF MUX", "RX0", "CDC_IF RX0 MUX"},
+	{"DSD_R IF MUX", "RX1", "CDC_IF RX1 MUX"},
+	{"DSD_R IF MUX", "RX2", "CDC_IF RX2 MUX"},
+	{"DSD_R IF MUX", "RX3", "CDC_IF RX3 MUX"},
+	{"DSD_R IF MUX", "RX4", "CDC_IF RX4 MUX"},
+	{"DSD_R IF MUX", "RX5", "CDC_IF RX5 MUX"},
+	{"DSD_R IF MUX", "RX6", "CDC_IF RX6 MUX"},
+	{"DSD_R IF MUX", "RX7", "CDC_IF RX7 MUX"},
+
+	{"DSD_FILTER_1", NULL, "DSD_R IF MUX"},
+	{"DSD_FILTER_1", NULL, "RX INT2 NATIVE SUPPLY"},
+	{"RX INT2 MIX3", "DSD HPHR Switch", "DSD_FILTER_1"},
+
+	{"DSD_FILTER_0", NULL, "RX INT3 NATIVE SUPPLY"},
+	{"RX INT3 MIX3", "DSD LO1 Switch", "DSD_FILTER_0"},
+	{"DSD_FILTER_1", NULL, "RX INT4 NATIVE SUPPLY"},
+	{"RX INT4 MIX3", "DSD LO2 Switch", "DSD_FILTER_1"},
+};
+
+static bool is_valid_dsd_interpolator(int interp_num)
+{
+	if ((interp_num == INTERP_HPHL) || (interp_num == INTERP_HPHR) ||
+	    (interp_num == INTERP_LO1) || (interp_num == INTERP_LO2))
+		return true;
+
+	return false;
+}
+
+/**
+ * tavil_dsd_set_mixer_value - Set DSD HPH/LO mixer value
+ *
+ * @dsd_conf: pointer to dsd config
+ * @interp_num: Interpolator number (HPHL/R, LO1/2)
+ * @sw_value: Mixer switch value
+ *
+ * Returns 0 on success or -EINVAL on failure
+ */
+int tavil_dsd_set_mixer_value(struct tavil_dsd_config *dsd_conf,
+			      int interp_num, int sw_value)
+{
+	if (!dsd_conf)
+		return -EINVAL;
+
+	if (!is_valid_dsd_interpolator(interp_num))
+		return -EINVAL;
+
+	dsd_conf->dsd_interp_mixer[interp_num] = !!sw_value;
+
+	return 0;
+}
+EXPORT_SYMBOL(tavil_dsd_set_mixer_value);
+
+/**
+ * tavil_dsd_get_current_mixer_value - Get DSD HPH/LO mixer value
+ *
+ * @dsd_conf: pointer to dsd config
+ * @interp_num: Interpolator number (HPHL/R, LO1/2)
+ *
+ * Returns current mixer val for success or -EINVAL for failure
+ */
+int tavil_dsd_get_current_mixer_value(struct tavil_dsd_config *dsd_conf,
+				      int interp_num)
+{
+	if (!dsd_conf)
+		return -EINVAL;
+
+	if (!is_valid_dsd_interpolator(interp_num))
+		return -EINVAL;
+
+	return dsd_conf->dsd_interp_mixer[interp_num];
+}
+EXPORT_SYMBOL(tavil_dsd_get_current_mixer_value);
+
+/**
+ * tavil_dsd_set_out_select - DSD0/1 out select to HPH or LO
+ *
+ * @dsd_conf: pointer to dsd config
+ * @interp_num: Interpolator number (HPHL/R, LO1/2)
+ *
+ * Returns 0 for success or -EINVAL for failure
+ */
+int tavil_dsd_set_out_select(struct tavil_dsd_config *dsd_conf,
+			     int interp_num)
+{
+	unsigned int reg, val;
+	struct snd_soc_codec *codec;
+
+	if (!dsd_conf || !dsd_conf->codec)
+		return -EINVAL;
+
+	codec = dsd_conf->codec;
+
+	if (!is_valid_dsd_interpolator(interp_num)) {
+		dev_err(codec->dev, "%s: Invalid Interpolator: %d for DSD\n",
+			__func__, interp_num);
+		return -EINVAL;
+	}
+
+	switch (interp_num) {
+	case INTERP_HPHL:
+		reg = WCD934X_CDC_DSD0_CFG0;
+		val = 0x00;
+		break;
+	case INTERP_HPHR:
+		reg = WCD934X_CDC_DSD1_CFG0;
+		val = 0x00;
+		break;
+	case INTERP_LO1:
+		reg = WCD934X_CDC_DSD0_CFG0;
+		val = 0x02;
+		break;
+	case INTERP_LO2:
+		reg = WCD934X_CDC_DSD1_CFG0;
+		val = 0x02;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	snd_soc_update_bits(codec, reg, 0x02, val);
+
+	return 0;
+}
+EXPORT_SYMBOL(tavil_dsd_set_out_select);
+
+/**
+ * tavil_dsd_reset - Reset DSD block
+ *
+ * @dsd_conf: pointer to dsd config
+ *
+ */
+void tavil_dsd_reset(struct tavil_dsd_config *dsd_conf)
+{
+	if (!dsd_conf || !dsd_conf->codec)
+		return;
+
+	snd_soc_update_bits(dsd_conf->codec, WCD934X_CDC_DSD0_PATH_CTL,
+			    0x02, 0x02);
+	snd_soc_update_bits(dsd_conf->codec, WCD934X_CDC_DSD0_PATH_CTL,
+			    0x01, 0x00);
+	snd_soc_update_bits(dsd_conf->codec, WCD934X_CDC_DSD1_PATH_CTL,
+			    0x02, 0x02);
+	snd_soc_update_bits(dsd_conf->codec, WCD934X_CDC_DSD1_PATH_CTL,
+			    0x01, 0x00);
+}
+EXPORT_SYMBOL(tavil_dsd_reset);
+
+/**
+ * tavil_dsd_set_interp_rate - Set interpolator rate for DSD
+ *
+ * @dsd_conf: pointer to dsd config
+ * @rx_port: RX port number
+ * @sample_rate: Sample rate of the RX interpolator
+ * @sample_rate_val: Interpolator rate value
+ */
+void tavil_dsd_set_interp_rate(struct tavil_dsd_config *dsd_conf, u16 rx_port,
+			       u32 sample_rate, u8 sample_rate_val)
+{
+	u8 dsd_inp_sel;
+	u8 dsd0_inp, dsd1_inp;
+	u8 val0, val1;
+	u8 dsd0_out_sel, dsd1_out_sel;
+	u16 int_fs_reg, interp_num = 0;
+	struct snd_soc_codec *codec;
+
+	if (!dsd_conf || !dsd_conf->codec)
+		return;
+
+	codec = dsd_conf->codec;
+
+	dsd_inp_sel = DSD_INP_SEL_RX0 + rx_port - WCD934X_RX_PORT_START_NUMBER;
+
+	val0 = snd_soc_read(codec, WCD934X_CDC_DSD0_CFG0);
+	val1 = snd_soc_read(codec, WCD934X_CDC_DSD1_CFG0);
+	dsd0_inp = (val0 & 0x3C) >> 2;
+	dsd1_inp = (val1 & 0x3C) >> 2;
+	dsd0_out_sel = (val0 & 0x02) >> 1;
+	dsd1_out_sel = (val1 & 0x02) >> 1;
+
+	/* Set HPHL or LO1 interp rate based on out select */
+	if (dsd_inp_sel == dsd0_inp) {
+		interp_num = dsd0_out_sel ? INTERP_LO1 : INTERP_HPHL;
+		dsd_conf->base_sample_rate[DSD0] = sample_rate;
+	}
+
+	/* Set HPHR or LO2 interp rate based on out select */
+	if (dsd_inp_sel == dsd1_inp) {
+		interp_num = dsd1_out_sel ? INTERP_LO2 : INTERP_HPHR;
+		dsd_conf->base_sample_rate[DSD1] = sample_rate;
+	}
+
+	if (interp_num) {
+		int_fs_reg = WCD934X_CDC_RX0_RX_PATH_CTL + 20 * interp_num;
+		if ((snd_soc_read(codec, int_fs_reg) & 0x0f) < 0x09) {
+			dev_dbg(codec->dev, "%s: Set Interp %d to sample_rate val 0x%x\n",
+				__func__, interp_num, sample_rate_val);
+			snd_soc_update_bits(codec, int_fs_reg, 0x0F,
+					    sample_rate_val);
+		}
+	}
+}
+EXPORT_SYMBOL(tavil_dsd_set_interp_rate);
+
+static int tavil_set_dsd_mode(struct snd_soc_codec *codec, int dsd_num,
+			      u8 *pcm_rate_val)
+{
+	unsigned int dsd_out_sel_reg;
+	u8 dsd_mode;
+	u32 sample_rate;
+	struct tavil_dsd_config *dsd_conf = tavil_get_dsd_config(codec);
+
+	if (!dsd_conf)
+		return -EINVAL;
+
+	if ((dsd_num < 0) || (dsd_num > 1))
+		return -EINVAL;
+
+	sample_rate = dsd_conf->base_sample_rate[dsd_num];
+	dsd_out_sel_reg = WCD934X_CDC_DSD0_CFG0 + dsd_num * 16;
+
+	switch (sample_rate) {
+	case 176400:
+		dsd_mode = 0; /* DSD_64 */
+		*pcm_rate_val = 0xb;
+		break;
+	case 352800:
+		dsd_mode = 1; /* DSD_128 */
+		*pcm_rate_val = 0xc;
+		break;
+	default:
+		dev_err(codec->dev, "%s: Invalid DSD rate: %d\n",
+			__func__, sample_rate);
+		return -EINVAL;
+	}
+
+	snd_soc_update_bits(codec, dsd_out_sel_reg, 0x01, dsd_mode);
+
+	return 0;
+}
+
+static void tavil_dsd_data_pull(struct snd_soc_codec *codec, int dsd_num,
+				u8 pcm_rate_val, bool enable)
+{
+	u8 clk_en, mute_en;
+	u8 dsd_inp_sel;
+
+	if (enable) {
+		clk_en = 0x20;
+		mute_en = 0x10;
+	} else {
+		clk_en = 0x00;
+		mute_en = 0x00;
+	}
+
+	if (dsd_num & 0x01) {
+		snd_soc_update_bits(codec, WCD934X_CDC_RX7_RX_PATH_MIX_CTL,
+				    0x20, clk_en);
+		dsd_inp_sel = (snd_soc_read(codec, WCD934X_CDC_DSD0_CFG0) &
+				0x3C) >> 2;
+		dsd_inp_sel = (enable) ? dsd_inp_sel : 0;
+		if (dsd_inp_sel < 9) {
+			snd_soc_update_bits(codec,
+					WCD934X_CDC_RX_INP_MUX_RX_INT7_CFG1,
+					0x0F, dsd_inp_sel);
+			snd_soc_update_bits(codec,
+					WCD934X_CDC_RX7_RX_PATH_MIX_CTL,
+					0x0F, pcm_rate_val);
+			snd_soc_update_bits(codec,
+					WCD934X_CDC_RX7_RX_PATH_MIX_CTL,
+					0x10, mute_en);
+		}
+	}
+	if (dsd_num & 0x02) {
+		snd_soc_update_bits(codec, WCD934X_CDC_RX8_RX_PATH_MIX_CTL,
+				    0x20, clk_en);
+		dsd_inp_sel = (snd_soc_read(codec, WCD934X_CDC_DSD1_CFG0) &
+				0x3C) >> 2;
+		dsd_inp_sel = (enable) ? dsd_inp_sel : 0;
+		if (dsd_inp_sel < 9) {
+			snd_soc_update_bits(codec,
+					WCD934X_CDC_RX_INP_MUX_RX_INT8_CFG1,
+					0x0F, dsd_inp_sel);
+			snd_soc_update_bits(codec,
+					WCD934X_CDC_RX8_RX_PATH_MIX_CTL,
+					0x0F, pcm_rate_val);
+			snd_soc_update_bits(codec,
+					WCD934X_CDC_RX8_RX_PATH_MIX_CTL,
+					0x10, mute_en);
+		}
+	}
+}
+
+static void tavil_dsd_update_volume(struct tavil_dsd_config *dsd_conf)
+{
+	snd_soc_update_bits(dsd_conf->codec, WCD934X_CDC_TOP_TOP_CFG0,
+			    0x01, 0x01);
+	snd_soc_update_bits(dsd_conf->codec, WCD934X_CDC_TOP_TOP_CFG0,
+			    0x01, 0x00);
+}
+
+static int tavil_enable_dsd(struct snd_soc_dapm_widget *w,
+			    struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tavil_dsd_config *dsd_conf = tavil_get_dsd_config(codec);
+	int rc, clk_users;
+	int interp_idx;
+	u8 pcm_rate_val;
+
+	if (!dsd_conf) {
+		dev_err(codec->dev, "%s: null dsd_config pointer\n", __func__);
+		return -EINVAL;
+	}
+
+	dev_dbg(codec->dev, "%s: DSD%d, event: %d\n", __func__,
+		w->shift, event);
+
+	if (w->shift == DSD0) {
+		/* Read out select */
+		if (snd_soc_read(codec, WCD934X_CDC_DSD0_CFG0) & 0x02)
+			interp_idx = INTERP_LO1;
+		else
+			interp_idx = INTERP_HPHL;
+	} else if (w->shift == DSD1) {
+		/* Read out select */
+		if (snd_soc_read(codec, WCD934X_CDC_DSD1_CFG0) & 0x02)
+			interp_idx = INTERP_LO2;
+		else
+			interp_idx = INTERP_HPHR;
+	} else {
+		dev_err(codec->dev, "%s: Unsupported DSD:%d\n",
+			__func__, w->shift);
+		return -EINVAL;
+	}
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		clk_users = tavil_codec_enable_interp_clk(codec, event,
+							  interp_idx);
+
+		rc = tavil_set_dsd_mode(codec, w->shift, &pcm_rate_val);
+		if (rc)
+			return rc;
+
+		tavil_dsd_data_pull(codec, (1 << w->shift), pcm_rate_val,
+				    true);
+
+		snd_soc_update_bits(codec,
+				    WCD934X_CDC_CLK_RST_CTRL_DSD_CONTROL, 0x01,
+				    0x01);
+		if (w->shift == DSD0) {
+			snd_soc_update_bits(codec, WCD934X_CDC_DSD0_PATH_CTL,
+					    0x02, 0x02);
+			snd_soc_update_bits(codec, WCD934X_CDC_DSD0_PATH_CTL,
+					    0x02, 0x00);
+			snd_soc_update_bits(codec, WCD934X_CDC_DSD0_PATH_CTL,
+					    0x01, 0x01);
+			/* Apply Gain */
+			snd_soc_write(codec, WCD934X_CDC_DSD0_CFG1,
+				      dsd_conf->volume[DSD0]);
+			if (dsd_conf->version == TAVIL_VERSION_1_1)
+				tavil_dsd_update_volume(dsd_conf);
+
+		} else if (w->shift == DSD1) {
+			snd_soc_update_bits(codec, WCD934X_CDC_DSD1_PATH_CTL,
+					    0x02, 0x02);
+			snd_soc_update_bits(codec, WCD934X_CDC_DSD1_PATH_CTL,
+					    0x02, 0x00);
+			snd_soc_update_bits(codec, WCD934X_CDC_DSD1_PATH_CTL,
+					    0x01, 0x01);
+			/* Apply Gain */
+			snd_soc_write(codec, WCD934X_CDC_DSD1_CFG1,
+				      dsd_conf->volume[DSD1]);
+			if (dsd_conf->version == TAVIL_VERSION_1_1)
+				tavil_dsd_update_volume(dsd_conf);
+		}
+		/* 10msec sleep required after DSD clock is set */
+		usleep_range(10000, 10100);
+
+		if (clk_users > 1) {
+			snd_soc_update_bits(codec, WCD934X_ANA_RX_SUPPLIES,
+					    0x02, 0x02);
+			if (w->shift == DSD0)
+				snd_soc_update_bits(codec,
+						    WCD934X_CDC_DSD0_CFG2,
+						    0x04, 0x00);
+			if (w->shift == DSD1)
+				snd_soc_update_bits(codec,
+						    WCD934X_CDC_DSD1_CFG2,
+						    0x04, 0x00);
+
+		}
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		if (w->shift == DSD0) {
+			snd_soc_update_bits(codec, WCD934X_CDC_DSD0_CFG2,
+					    0x04, 0x04);
+			snd_soc_update_bits(codec, WCD934X_CDC_DSD0_PATH_CTL,
+					    0x01, 0x00);
+		} else if (w->shift == DSD1) {
+			snd_soc_update_bits(codec, WCD934X_CDC_DSD1_CFG2,
+					    0x04, 0x04);
+			snd_soc_update_bits(codec, WCD934X_CDC_DSD1_PATH_CTL,
+					    0x01, 0x00);
+		}
+
+		tavil_codec_enable_interp_clk(codec, event, interp_idx);
+
+		if (!(snd_soc_read(codec, WCD934X_CDC_DSD0_PATH_CTL) & 0x01) &&
+		    !(snd_soc_read(codec, WCD934X_CDC_DSD1_PATH_CTL) & 0x01)) {
+			snd_soc_update_bits(codec,
+					WCD934X_CDC_CLK_RST_CTRL_DSD_CONTROL,
+					0x01, 0x00);
+			tavil_dsd_data_pull(codec, 0x03, 0x04, false);
+			tavil_dsd_reset(dsd_conf);
+		}
+		break;
+	}
+
+	return 0;
+}
+
+static int tavil_dsd_vol_info(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	uinfo->count = 2;
+	uinfo->value.integer.min = DSD_VOLUME_MIN_M110dB;
+	uinfo->value.integer.max = DSD_VOLUME_MAX_0dB;
+
+	return 0;
+}
+
+static int tavil_dsd_vol_put(struct snd_kcontrol *kcontrol,
+			     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tavil_dsd_config *dsd_conf = tavil_get_dsd_config(codec);
+	int nv[DSD_MAX], cv[DSD_MAX];
+	int step_size, nv1;
+	int i, dsd_idx;
+
+	if (!dsd_conf)
+		return 0;
+
+	mutex_lock(&dsd_conf->vol_mutex);
+
+	for (dsd_idx = DSD0; dsd_idx < DSD_MAX; dsd_idx++) {
+		cv[dsd_idx] = dsd_conf->volume[dsd_idx];
+		nv[dsd_idx] = ucontrol->value.integer.value[dsd_idx];
+	}
+
+	if ((!DSD_VOLUME_RANGE_CHECK(nv[DSD0])) ||
+	    (!DSD_VOLUME_RANGE_CHECK(nv[DSD1])))
+		goto done;
+
+	for (dsd_idx = DSD0; dsd_idx < DSD_MAX; dsd_idx++) {
+		if (cv[dsd_idx] == nv[dsd_idx])
+			continue;
+
+		dev_dbg(codec->dev, "%s: DSD%d cur.vol: %d, new vol: %d\n",
+			__func__, dsd_idx, cv[dsd_idx], nv[dsd_idx]);
+
+		step_size =  (nv[dsd_idx] - cv[dsd_idx]) /
+			      DSD_VOLUME_STEPS;
+
+		nv1 = cv[dsd_idx];
+
+		for (i = 0; i < DSD_VOLUME_STEPS; i++) {
+			nv1 += step_size;
+			snd_soc_write(codec,
+				      WCD934X_CDC_DSD0_CFG1 + 16 * dsd_idx,
+				      nv1);
+			if (dsd_conf->version == TAVIL_VERSION_1_1)
+				tavil_dsd_update_volume(dsd_conf);
+
+			/* sleep required after each volume step */
+			usleep_range(DSD_VOLUME_STEP_DELAY_US,
+				     (DSD_VOLUME_STEP_DELAY_US +
+				      DSD_VOLUME_USLEEP_MARGIN_US));
+		}
+		if (nv1 != nv[dsd_idx]) {
+			snd_soc_write(codec,
+				      WCD934X_CDC_DSD0_CFG1 + 16 * dsd_idx,
+				      nv[dsd_idx]);
+
+			if (dsd_conf->version == TAVIL_VERSION_1_1)
+				tavil_dsd_update_volume(dsd_conf);
+		}
+
+		dsd_conf->volume[dsd_idx] = nv[dsd_idx];
+	}
+
+done:
+	mutex_unlock(&dsd_conf->vol_mutex);
+
+	return 0;
+}
+
+static int tavil_dsd_vol_get(struct snd_kcontrol *kcontrol,
+			     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tavil_dsd_config *dsd_conf = tavil_get_dsd_config(codec);
+
+	if (dsd_conf) {
+		ucontrol->value.integer.value[0] = dsd_conf->volume[DSD0];
+		ucontrol->value.integer.value[1] = dsd_conf->volume[DSD1];
+	}
+
+	return 0;
+}
+
+static const struct snd_kcontrol_new tavil_dsd_vol_controls[] = {
+	{
+	   .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	   .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
+		      SNDRV_CTL_ELEM_ACCESS_TLV_READ),
+	   .name = "DSD Volume",
+	   .info = tavil_dsd_vol_info,
+	   .get = tavil_dsd_vol_get,
+	   .put = tavil_dsd_vol_put,
+	   .tlv = { .p = tavil_dsd_db_scale },
+	},
+};
+
+static const struct snd_soc_dapm_widget tavil_dsd_widgets[] = {
+	SND_SOC_DAPM_MUX("DSD_L IF MUX", SND_SOC_NOPM, 0, 0, &dsd_l_if_mux),
+	SND_SOC_DAPM_MUX_E("DSD_FILTER_0", SND_SOC_NOPM, 0, 0, &dsd_filt0_mux,
+			   tavil_enable_dsd,
+			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX("DSD_R IF MUX", SND_SOC_NOPM, 0, 0, &dsd_r_if_mux),
+	SND_SOC_DAPM_MUX_E("DSD_FILTER_1", SND_SOC_NOPM, 1, 0, &dsd_filt1_mux,
+			   tavil_enable_dsd,
+			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+};
+
+/**
+ * tavil_dsd_post_ssr_init - DSD intialization after subsystem restart
+ *
+ * @codec: pointer to snd_soc_codec
+ *
+ * Returns 0 on success or error on failure
+ */
+int tavil_dsd_post_ssr_init(struct tavil_dsd_config *dsd_conf)
+{
+	struct snd_soc_codec *codec;
+
+	if (!dsd_conf || !dsd_conf->codec)
+		return -EINVAL;
+
+	codec = dsd_conf->codec;
+	/* Disable DSD Interrupts */
+	snd_soc_update_bits(codec, WCD934X_INTR_CODEC_MISC_MASK, 0x08, 0x08);
+
+	/* DSD registers init */
+	if (dsd_conf->version == TAVIL_VERSION_1_0) {
+		snd_soc_update_bits(codec, WCD934X_CDC_DSD0_CFG2, 0x02, 0x00);
+		snd_soc_update_bits(codec, WCD934X_CDC_DSD1_CFG2, 0x02, 0x00);
+	}
+	/* DSD0: Mute EN */
+	snd_soc_update_bits(codec, WCD934X_CDC_DSD0_CFG2, 0x04, 0x04);
+	/* DSD1: Mute EN */
+	snd_soc_update_bits(codec, WCD934X_CDC_DSD1_CFG2, 0x04, 0x04);
+	snd_soc_update_bits(codec, WCD934X_CDC_DEBUG_DSD0_DEBUG_CFG3, 0x10,
+			    0x10);
+	snd_soc_update_bits(codec, WCD934X_CDC_DEBUG_DSD1_DEBUG_CFG3, 0x10,
+			    0x10);
+	snd_soc_update_bits(codec, WCD934X_CDC_DEBUG_DSD0_DEBUG_CFG0, 0x0E,
+			    0x0A);
+	snd_soc_update_bits(codec, WCD934X_CDC_DEBUG_DSD1_DEBUG_CFG0, 0x0E,
+			    0x0A);
+	snd_soc_update_bits(codec, WCD934X_CDC_DEBUG_DSD0_DEBUG_CFG1, 0x07,
+			    0x04);
+	snd_soc_update_bits(codec, WCD934X_CDC_DEBUG_DSD1_DEBUG_CFG1, 0x07,
+			    0x04);
+
+	/* Enable DSD Interrupts */
+	snd_soc_update_bits(codec, WCD934X_INTR_CODEC_MISC_MASK, 0x08, 0x00);
+
+	return 0;
+}
+EXPORT_SYMBOL(tavil_dsd_post_ssr_init);
+
+/**
+ * tavil_dsd_init - DSD intialization
+ *
+ * @codec: pointer to snd_soc_codec
+ *
+ * Returns pointer to tavil_dsd_config for success or NULL for failure
+ */
+struct tavil_dsd_config *tavil_dsd_init(struct snd_soc_codec *codec)
+{
+	struct snd_soc_dapm_context *dapm;
+	struct tavil_dsd_config *dsd_conf;
+	u8 val;
+
+	if (!codec)
+		return NULL;
+
+	dapm = snd_soc_codec_get_dapm(codec);
+
+	/* Read efuse register to check if DSD is supported */
+	val = snd_soc_read(codec, WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT14);
+	if (val & 0x80) {
+		dev_info(codec->dev, "%s: DSD unsupported for this codec version\n",
+			 __func__);
+		return NULL;
+	}
+
+	dsd_conf = devm_kzalloc(codec->dev, sizeof(struct tavil_dsd_config),
+				GFP_KERNEL);
+	if (!dsd_conf)
+		return NULL;
+
+	dsd_conf->codec = codec;
+
+	/* Read version */
+	dsd_conf->version = snd_soc_read(codec,
+					 WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE0);
+	/* DSD registers init */
+	if (dsd_conf->version == TAVIL_VERSION_1_0) {
+		snd_soc_update_bits(codec, WCD934X_CDC_DSD0_CFG2, 0x02, 0x00);
+		snd_soc_update_bits(codec, WCD934X_CDC_DSD1_CFG2, 0x02, 0x00);
+	}
+	/* DSD0: Mute EN */
+	snd_soc_update_bits(codec, WCD934X_CDC_DSD0_CFG2, 0x04, 0x04);
+	/* DSD1: Mute EN */
+	snd_soc_update_bits(codec, WCD934X_CDC_DSD1_CFG2, 0x04, 0x04);
+	snd_soc_update_bits(codec, WCD934X_CDC_DEBUG_DSD0_DEBUG_CFG3, 0x10,
+			    0x10);
+	snd_soc_update_bits(codec, WCD934X_CDC_DEBUG_DSD1_DEBUG_CFG3, 0x10,
+			    0x10);
+	snd_soc_update_bits(codec, WCD934X_CDC_DEBUG_DSD0_DEBUG_CFG0, 0x0E,
+			    0x0A);
+	snd_soc_update_bits(codec, WCD934X_CDC_DEBUG_DSD1_DEBUG_CFG0, 0x0E,
+			    0x0A);
+	snd_soc_update_bits(codec, WCD934X_CDC_DEBUG_DSD0_DEBUG_CFG1, 0x07,
+			    0x04);
+	snd_soc_update_bits(codec, WCD934X_CDC_DEBUG_DSD1_DEBUG_CFG1, 0x07,
+			    0x04);
+
+	snd_soc_dapm_new_controls(dapm, tavil_dsd_widgets,
+				  ARRAY_SIZE(tavil_dsd_widgets));
+
+	snd_soc_dapm_add_routes(dapm, tavil_dsd_audio_map,
+				ARRAY_SIZE(tavil_dsd_audio_map));
+
+	mutex_init(&dsd_conf->vol_mutex);
+	dsd_conf->volume[DSD0] = DSD_VOLUME_MAX_0dB;
+	dsd_conf->volume[DSD1] = DSD_VOLUME_MAX_0dB;
+
+	snd_soc_add_codec_controls(codec, tavil_dsd_vol_controls,
+				   ARRAY_SIZE(tavil_dsd_vol_controls));
+
+	/* Enable DSD Interrupts */
+	snd_soc_update_bits(codec, WCD934X_INTR_CODEC_MISC_MASK, 0x08, 0x00);
+
+	return dsd_conf;
+}
+EXPORT_SYMBOL(tavil_dsd_init);
+
+/**
+ * tavil_dsd_deinit - DSD de-intialization
+ *
+ * @dsd_conf: pointer to tavil_dsd_config
+ */
+void tavil_dsd_deinit(struct tavil_dsd_config *dsd_conf)
+{
+	struct snd_soc_codec *codec;
+
+	if (!dsd_conf)
+		return;
+
+	codec = dsd_conf->codec;
+
+	mutex_destroy(&dsd_conf->vol_mutex);
+
+	/* Disable DSD Interrupts */
+	snd_soc_update_bits(codec, WCD934X_INTR_CODEC_MISC_MASK, 0x08, 0x08);
+
+	devm_kfree(codec->dev, dsd_conf);
+}
+EXPORT_SYMBOL(tavil_dsd_deinit);
diff -Nruw linux-4.4.115-fbx/sound/soc/codecs/wcd934x./wcd934x-dsd.h linux-4.4.115-fbx/sound/soc/codecs/wcd934x/wcd934x-dsd.h
--- linux-4.4.115-fbx/sound/soc/codecs/wcd934x./wcd934x-dsd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd934x/wcd934x-dsd.h	2019-01-22 16:16:29.551301175 +0100
@@ -0,0 +1,97 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __WCD934X_DSD_H__
+#define __WCD934X_DSD_H__
+
+#include <sound/soc.h>
+#include "wcd934x.h"
+
+enum {
+	DSD0,
+	DSD1,
+	DSD_MAX,
+};
+
+enum {
+	DSD_INP_SEL_ZERO = 0,
+	DSD_INP_SEL_RX0,
+	DSD_INP_SEL_RX1,
+	DSD_INP_SEL_RX2,
+	DSD_INP_SEL_RX3,
+	DSD_INP_SEL_RX4,
+	DSD_INP_SEL_RX5,
+	DSD_INP_SEL_RX6,
+	DSD_INP_SEL_RX7,
+};
+
+struct tavil_dsd_config {
+	struct snd_soc_codec *codec;
+	unsigned int dsd_interp_mixer[INTERP_MAX];
+	u32 base_sample_rate[DSD_MAX];
+	int volume[DSD_MAX];
+	struct mutex vol_mutex;
+	int version;
+};
+
+#ifdef CONFIG_SND_SOC_WCD934X_DSD
+int tavil_dsd_set_mixer_value(struct tavil_dsd_config *dsd_conf,
+			      int interp_num, int sw_value);
+int tavil_dsd_get_current_mixer_value(struct tavil_dsd_config *dsd_conf,
+				      int interp_num);
+int tavil_dsd_set_out_select(struct tavil_dsd_config *dsd_conf,
+			     int interp_num);
+void tavil_dsd_reset(struct tavil_dsd_config *dsd_conf);
+void tavil_dsd_set_interp_rate(struct tavil_dsd_config *dsd_conf, u16 rx_port,
+			       u32 sample_rate, u8 sample_rate_val);
+struct tavil_dsd_config *tavil_dsd_init(struct snd_soc_codec *codec);
+void tavil_dsd_deinit(struct tavil_dsd_config *dsd_config);
+int tavil_dsd_post_ssr_init(struct tavil_dsd_config *dsd_config);
+#else
+int tavil_dsd_set_mixer_value(struct tavil_dsd_config *dsd_conf,
+			      int interp_num, int sw_value)
+{
+	return 0;
+}
+
+int tavil_dsd_get_current_mixer_value(struct tavil_dsd_config *dsd_conf,
+				      int interp_num)
+{
+	return 0;
+}
+
+int tavil_dsd_set_out_select(struct tavil_dsd_config *dsd_conf,
+			     int interp_num)
+{
+	return 0;
+}
+
+void tavil_dsd_reset(struct tavil_dsd_config *dsd_conf)
+{  }
+
+void tavil_dsd_set_interp_rate(struct tavil_dsd_config *dsd_conf, u16 rx_port,
+			       u32 sample_rate, u8 sample_rate_val)
+{  }
+
+struct tavil_dsd_config *tavil_dsd_init(struct snd_soc_codec *codec)
+{
+	return NULL;
+}
+
+void tavil_dsd_deinit(struct tavil_dsd_config *dsd_config)
+{  }
+int tavil_dsd_post_ssr_init(struct tavil_dsd_config *dsd_config)
+{
+	return 0;
+}
+#endif
+#endif
diff -Nruw linux-4.4.115-fbx/sound/soc/codecs/wcd934x./wcd934x-dsp-cntl.c linux-4.4.115-fbx/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
--- linux-4.4.115-fbx/sound/soc/codecs/wcd934x./wcd934x-dsp-cntl.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c	2019-01-22 16:16:29.551301175 +0100
@@ -0,0 +1,1373 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/component.h>
+#include <linux/debugfs.h>
+#include <linux/mfd/wcd9xxx/core.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx-irq.h>
+#include <linux/mfd/wcd934x/registers.h>
+#include <sound/soc.h>
+#include <sound/wcd-dsp-mgr.h>
+#include "wcd934x.h"
+#include "wcd934x-dsp-cntl.h"
+
+#define WCD_CNTL_DIR_NAME_LEN_MAX 32
+#define WCD_CPE_FLL_MAX_RETRIES 5
+#define WCD_MEM_ENABLE_MAX_RETRIES 20
+#define WCD_DSP_BOOT_TIMEOUT_MS 3000
+#define WCD_SYSFS_ENTRY_MAX_LEN 8
+#define WCD_PROCFS_ENTRY_MAX_LEN 16
+#define WCD_934X_RAMDUMP_START_ADDR 0x20100000
+#define WCD_934X_RAMDUMP_SIZE ((1024 * 1024) - 128)
+#define WCD_DSP_CNTL_MAX_COUNT 2
+
+#define WCD_CNTL_MUTEX_LOCK(codec, lock)             \
+{                                                    \
+	dev_dbg(codec->dev, "%s: mutex_lock(%s)\n",  \
+		__func__, __stringify_1(lock));      \
+	mutex_lock(&lock);                           \
+}
+
+#define WCD_CNTL_MUTEX_UNLOCK(codec, lock)            \
+{                                                     \
+	dev_dbg(codec->dev, "%s: mutex_unlock(%s)\n", \
+		__func__, __stringify_1(lock));       \
+	mutex_unlock(&lock);                          \
+}
+
+enum wcd_mem_type {
+	WCD_MEM_TYPE_ALWAYS_ON,
+	WCD_MEM_TYPE_SWITCHABLE,
+};
+
+struct wcd_cntl_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct wcd_dsp_cntl *cntl, char *buf);
+	ssize_t (*store)(struct wcd_dsp_cntl *cntl, const char *buf,
+			 ssize_t count);
+};
+
+#define WCD_CNTL_ATTR(_name, _mode, _show, _store) \
+static struct wcd_cntl_attribute cntl_attr_##_name = {	\
+	.attr = {.name = __stringify(_name), .mode = _mode},	\
+	.show = _show,	\
+	.store = _store,	\
+}
+
+#define to_wcd_cntl_attr(a) \
+	container_of((a), struct wcd_cntl_attribute, attr)
+
+#define to_wcd_cntl(kobj) \
+	container_of((kobj), struct wcd_dsp_cntl, wcd_kobj)
+
+static u8 mem_enable_values[] = {
+	0xFE, 0xFC, 0xF8, 0xF0,
+	0xE0, 0xC0, 0x80, 0x00,
+};
+
+static ssize_t wdsp_boot_show(struct wcd_dsp_cntl *cntl, char *buf)
+{
+	return snprintf(buf, WCD_SYSFS_ENTRY_MAX_LEN,
+			"%u", cntl->boot_reqs);
+}
+
+static ssize_t wdsp_boot_store(struct wcd_dsp_cntl *cntl,
+			       const char *buf, ssize_t count)
+{
+	u32 val;
+	bool vote;
+	int ret;
+
+	ret = kstrtou32(buf, 10, &val);
+	if (ret) {
+		dev_err(cntl->codec->dev,
+			"%s: Invalid entry, ret = %d\n", __func__, ret);
+		return -EINVAL;
+	}
+
+	if (val > 0) {
+		cntl->boot_reqs++;
+		vote = true;
+	} else {
+		cntl->boot_reqs--;
+		vote = false;
+	}
+
+	if (cntl->m_dev && cntl->m_ops &&
+	    cntl->m_ops->vote_for_dsp)
+		ret = cntl->m_ops->vote_for_dsp(cntl->m_dev, vote);
+	else
+		ret = -EINVAL;
+
+	if (IS_ERR_VALUE(ret))
+		dev_err(cntl->codec->dev,
+			"%s: failed to %s dsp\n", __func__,
+			vote ? "enable" : "disable");
+	return count;
+}
+
+WCD_CNTL_ATTR(boot, 0660, wdsp_boot_show, wdsp_boot_store);
+
+static ssize_t wcd_cntl_sysfs_show(struct kobject *kobj,
+				   struct attribute *attr, char *buf)
+{
+	struct wcd_cntl_attribute *wcd_attr = to_wcd_cntl_attr(attr);
+	struct wcd_dsp_cntl *cntl = to_wcd_cntl(kobj);
+	ssize_t ret = -EINVAL;
+
+	if (cntl && wcd_attr->show)
+		ret = wcd_attr->show(cntl, buf);
+
+	return ret;
+}
+
+static ssize_t wcd_cntl_sysfs_store(struct kobject *kobj,
+				    struct attribute *attr, const char *buf,
+				    size_t count)
+{
+	struct wcd_cntl_attribute *wcd_attr = to_wcd_cntl_attr(attr);
+	struct wcd_dsp_cntl *cntl = to_wcd_cntl(kobj);
+	ssize_t ret = -EINVAL;
+
+	if (cntl && wcd_attr->store)
+		ret = wcd_attr->store(cntl, buf, count);
+
+	return ret;
+}
+
+static const struct sysfs_ops wcd_cntl_sysfs_ops = {
+	.show = wcd_cntl_sysfs_show,
+	.store = wcd_cntl_sysfs_store,
+};
+
+static struct kobj_type wcd_cntl_ktype = {
+	.sysfs_ops = &wcd_cntl_sysfs_ops,
+};
+
+static void wcd_cntl_change_online_state(struct wcd_dsp_cntl *cntl,
+					 u8 online)
+{
+	struct wdsp_ssr_entry *ssr_entry = &cntl->ssr_entry;
+	unsigned long ret;
+
+	WCD_CNTL_MUTEX_LOCK(cntl->codec, cntl->ssr_mutex);
+	ssr_entry->offline = !online;
+	/* Make sure the write is complete */
+	wmb();
+	ret = xchg(&ssr_entry->offline_change, 1);
+	wake_up_interruptible(&ssr_entry->offline_poll_wait);
+	dev_dbg(cntl->codec->dev,
+		"%s: requested %u, offline %u offline_change %u, ret = %ldn",
+		__func__, online, ssr_entry->offline,
+		ssr_entry->offline_change, ret);
+	WCD_CNTL_MUTEX_UNLOCK(cntl->codec, cntl->ssr_mutex);
+}
+
+static ssize_t wdsp_ssr_entry_read(struct snd_info_entry *entry,
+				   void *file_priv_data, struct file *file,
+				   char __user *buf, size_t count, loff_t pos)
+{
+	int len = 0;
+	char buffer[WCD_PROCFS_ENTRY_MAX_LEN];
+	struct wcd_dsp_cntl *cntl;
+	struct wdsp_ssr_entry *ssr_entry;
+	ssize_t ret;
+	u8 offline;
+
+	cntl = (struct wcd_dsp_cntl *) entry->private_data;
+	if (!cntl) {
+		pr_err("%s: Invalid private data for SSR procfs entry\n",
+		       __func__);
+		return -EINVAL;
+	}
+
+	ssr_entry = &cntl->ssr_entry;
+
+	WCD_CNTL_MUTEX_LOCK(cntl->codec, cntl->ssr_mutex);
+	offline = ssr_entry->offline;
+	/* Make sure the read is complete */
+	rmb();
+	dev_dbg(cntl->codec->dev, "%s: offline = %s\n", __func__,
+		offline ? "true" : "false");
+	len = snprintf(buffer, sizeof(buffer), "%s\n",
+		       offline ? "OFFLINE" : "ONLINE");
+	ret = simple_read_from_buffer(buf, count, &pos, buffer, len);
+	WCD_CNTL_MUTEX_UNLOCK(cntl->codec, cntl->ssr_mutex);
+
+	return ret;
+}
+
+static unsigned int wdsp_ssr_entry_poll(struct snd_info_entry *entry,
+					void *private_data, struct file *file,
+					poll_table *wait)
+{
+	struct wcd_dsp_cntl *cntl;
+	struct wdsp_ssr_entry *ssr_entry;
+	unsigned int ret = 0;
+
+	if (!entry || !entry->private_data) {
+		pr_err("%s: %s is NULL\n", __func__,
+		       (!entry) ? "entry" : "private_data");
+		return -EINVAL;
+	}
+
+	cntl = (struct wcd_dsp_cntl *) entry->private_data;
+	ssr_entry = &cntl->ssr_entry;
+
+	dev_dbg(cntl->codec->dev, "%s: Poll wait, offline = %u\n",
+		__func__, ssr_entry->offline);
+	poll_wait(file, &ssr_entry->offline_poll_wait, wait);
+	dev_dbg(cntl->codec->dev, "%s: Woken up Poll wait, offline = %u\n",
+		__func__, ssr_entry->offline);
+
+	WCD_CNTL_MUTEX_LOCK(cntl->codec, cntl->ssr_mutex);
+	if (xchg(&ssr_entry->offline_change, 0))
+		ret = POLLIN | POLLPRI | POLLRDNORM;
+	dev_dbg(cntl->codec->dev, "%s: ret (%d) from poll_wait\n",
+		__func__, ret);
+	WCD_CNTL_MUTEX_UNLOCK(cntl->codec, cntl->ssr_mutex);
+
+	return ret;
+}
+
+static struct snd_info_entry_ops wdsp_ssr_entry_ops = {
+	.read = wdsp_ssr_entry_read,
+	.poll = wdsp_ssr_entry_poll,
+};
+
+static int wcd_cntl_cpe_fll_calibrate(struct wcd_dsp_cntl *cntl)
+{
+	struct snd_soc_codec *codec = cntl->codec;
+	int ret = 0, retry = 0;
+	u8 cal_lsb, cal_msb;
+	u8 lock_det;
+
+	/* Make sure clocks are gated */
+	snd_soc_update_bits(codec, WCD934X_CPE_SS_CPE_CTL,
+			    0x05, 0x00);
+
+	/* Enable CPE FLL reference clock */
+	snd_soc_update_bits(codec, WCD934X_CLK_SYS_MCLK2_PRG1,
+			    0x80, 0x80);
+
+	snd_soc_update_bits(codec, WCD934X_CPE_FLL_USER_CTL_5,
+			    0xF3, 0x13);
+	snd_soc_write(codec, WCD934X_CPE_FLL_L_VAL_CTL_0, 0x50);
+
+	/* Disable CPAR reset and Enable CPAR clk */
+	snd_soc_update_bits(codec, WCD934X_CPE_SS_CPAR_CTL,
+			    0x02, 0x02);
+
+	/* Write calibration l-value based on cdc clk rate */
+	if (cntl->clk_rate == 9600000) {
+		cal_lsb = 0x6d;
+		cal_msb = 0x00;
+	} else {
+		cal_lsb = 0x56;
+		cal_msb = 0x00;
+	}
+	snd_soc_write(codec, WCD934X_CPE_FLL_USER_CTL_6, cal_lsb);
+	snd_soc_write(codec, WCD934X_CPE_FLL_USER_CTL_7, cal_msb);
+
+	/* FLL mode to follow power up sequence */
+	snd_soc_update_bits(codec, WCD934X_CPE_FLL_FLL_MODE,
+			    0x60, 0x00);
+
+	/* HW controlled CPE FLL */
+	snd_soc_update_bits(codec, WCD934X_CPE_FLL_FLL_MODE,
+			    0x80, 0x80);
+
+	/* Force on CPE FLL */
+	snd_soc_update_bits(codec, WCD934X_CPE_SS_CPAR_CFG,
+			    0x04, 0x04);
+
+	do {
+		/* Time for FLL calibration to complete */
+		usleep_range(1000, 1100);
+		lock_det = snd_soc_read(codec, WCD934X_CPE_FLL_STATUS_3);
+		retry++;
+	} while (!(lock_det & 0x01) &&
+		 retry <= WCD_CPE_FLL_MAX_RETRIES);
+
+	if (!(lock_det & 0x01)) {
+		dev_err(codec->dev, "%s: lock detect not set, 0x%02x\n",
+			__func__, lock_det);
+		ret = -EIO;
+		goto err_lock_det;
+	}
+
+	snd_soc_update_bits(codec, WCD934X_CPE_FLL_FLL_MODE,
+			    0x60, 0x20);
+	snd_soc_update_bits(codec, WCD934X_CPE_SS_CPAR_CFG,
+			    0x04, 0x00);
+	return ret;
+
+err_lock_det:
+	/* Undo the register settings */
+	snd_soc_update_bits(codec, WCD934X_CPE_SS_CPAR_CFG,
+			    0x04, 0x00);
+	snd_soc_update_bits(codec, WCD934X_CPE_FLL_FLL_MODE,
+			    0x80, 0x00);
+	snd_soc_update_bits(codec, WCD934X_CPE_SS_CPAR_CTL,
+			    0x02, 0x00);
+	return ret;
+}
+
+static void wcd_cntl_config_cpar(struct wcd_dsp_cntl *cntl)
+{
+	struct snd_soc_codec *codec = cntl->codec;
+	u8 nom_lo, nom_hi, svs2_lo, svs2_hi;
+
+	/* Configure CPAR */
+	nom_hi = svs2_hi = 0;
+	if (cntl->clk_rate == 9600000) {
+		nom_lo = 0x90;
+		svs2_lo = 0x50;
+	} else {
+		nom_lo = 0x70;
+		svs2_lo = 0x3e;
+	}
+
+	snd_soc_write(codec, WCD934X_TEST_DEBUG_LVAL_NOM_LOW, nom_lo);
+	snd_soc_write(codec, WCD934X_TEST_DEBUG_LVAL_NOM_HIGH, nom_hi);
+	snd_soc_write(codec, WCD934X_TEST_DEBUG_LVAL_SVS_SVS2_LOW, svs2_lo);
+	snd_soc_write(codec, WCD934X_TEST_DEBUG_LVAL_SVS_SVS2_HIGH, svs2_hi);
+
+	snd_soc_update_bits(codec, WCD934X_CPE_SS_PWR_CPEFLL_CTL,
+			    0x03, 0x03);
+}
+
+static int wcd_cntl_cpe_fll_ctrl(struct wcd_dsp_cntl *cntl,
+				 bool enable)
+{
+	struct snd_soc_codec *codec = cntl->codec;
+	int ret = 0;
+
+	if (enable) {
+		ret = wcd_cntl_cpe_fll_calibrate(cntl);
+		if (IS_ERR_VALUE(ret)) {
+			dev_err(codec->dev,
+				"%s: cpe_fll_cal failed, err = %d\n",
+				__func__, ret);
+			goto done;
+		}
+
+		wcd_cntl_config_cpar(cntl);
+
+		/* Enable AHB CLK and CPE CLK*/
+		snd_soc_update_bits(codec, WCD934X_CPE_SS_CPE_CTL,
+				    0x05, 0x05);
+	} else {
+		/* Disable AHB CLK and CPE CLK */
+		snd_soc_update_bits(codec, WCD934X_CPE_SS_CPE_CTL,
+				    0x05, 0x00);
+		/* Reset the CPAR mode for CPE FLL */
+		snd_soc_write(codec, WCD934X_CPE_FLL_FLL_MODE, 0x20);
+		snd_soc_update_bits(codec, WCD934X_CPE_SS_CPAR_CFG,
+				    0x04, 0x00);
+		snd_soc_update_bits(codec, WCD934X_CPE_SS_CPAR_CTL,
+				    0x02, 0x00);
+	}
+done:
+	return ret;
+}
+
+static int wcd_cntl_clocks_enable(struct wcd_dsp_cntl *cntl)
+{
+	struct snd_soc_codec *codec = cntl->codec;
+	int ret;
+
+	WCD_CNTL_MUTEX_LOCK(codec, cntl->clk_mutex);
+	/* Enable codec clock */
+	if (cntl->cdc_cb && cntl->cdc_cb->cdc_clk_en)
+		ret = cntl->cdc_cb->cdc_clk_en(codec, true);
+	else
+		ret = -EINVAL;
+
+	if (IS_ERR_VALUE(ret)) {
+		dev_err(codec->dev,
+			"%s: Failed to enable cdc clk, err = %d\n",
+			__func__, ret);
+		goto done;
+	}
+	/* Pull CPAR out of reset */
+	snd_soc_update_bits(codec, WCD934X_CPE_SS_CPAR_CTL, 0x04, 0x00);
+
+	/* Configure and Enable CPE FLL clock */
+	ret = wcd_cntl_cpe_fll_ctrl(cntl, true);
+	if (IS_ERR_VALUE(ret)) {
+		dev_err(codec->dev,
+			"%s: Failed to enable cpe clk, err = %d\n",
+			__func__, ret);
+		goto err_cpe_clk;
+	}
+	cntl->is_clk_enabled = true;
+
+	/* Ungate the CPR clock  */
+	snd_soc_update_bits(codec, WCD934X_CODEC_RPM_CLK_GATE, 0x10, 0x00);
+done:
+	WCD_CNTL_MUTEX_UNLOCK(codec, cntl->clk_mutex);
+	return ret;
+
+err_cpe_clk:
+	if (cntl->cdc_cb && cntl->cdc_cb->cdc_clk_en)
+		cntl->cdc_cb->cdc_clk_en(codec, false);
+
+	snd_soc_update_bits(codec, WCD934X_CPE_SS_CPAR_CTL, 0x04, 0x04);
+	WCD_CNTL_MUTEX_UNLOCK(codec, cntl->clk_mutex);
+	return ret;
+}
+
+static int wcd_cntl_clocks_disable(struct wcd_dsp_cntl *cntl)
+{
+	struct snd_soc_codec *codec = cntl->codec;
+	int ret = 0;
+
+	WCD_CNTL_MUTEX_LOCK(codec, cntl->clk_mutex);
+	if (!cntl->is_clk_enabled) {
+		dev_info(codec->dev, "%s: clocks already disabled\n",
+			__func__);
+		goto done;
+	}
+
+	/* Gate the CPR clock  */
+	snd_soc_update_bits(codec, WCD934X_CODEC_RPM_CLK_GATE, 0x10, 0x10);
+
+	/* Disable CPE FLL clock */
+	ret = wcd_cntl_cpe_fll_ctrl(cntl, false);
+	if (IS_ERR_VALUE(ret))
+		dev_err(codec->dev,
+			"%s: Failed to disable cpe clk, err = %d\n",
+			__func__, ret);
+
+	/*
+	 * Even if CPE FLL disable failed, go ahead and disable
+	 * the codec clock
+	 */
+	if (cntl->cdc_cb && cntl->cdc_cb->cdc_clk_en)
+		ret = cntl->cdc_cb->cdc_clk_en(codec, false);
+	else
+		ret = -EINVAL;
+
+	cntl->is_clk_enabled = false;
+
+	/* Put CPAR in reset */
+	snd_soc_update_bits(codec, WCD934X_CPE_SS_CPAR_CTL, 0x04, 0x04);
+done:
+	WCD_CNTL_MUTEX_UNLOCK(codec, cntl->clk_mutex);
+	return ret;
+}
+
+static void wcd_cntl_cpar_ctrl(struct wcd_dsp_cntl *cntl,
+			       bool enable)
+{
+	struct snd_soc_codec *codec = cntl->codec;
+
+	if (enable)
+		snd_soc_update_bits(codec, WCD934X_CPE_SS_CPAR_CTL, 0x03, 0x03);
+	else
+		snd_soc_update_bits(codec, WCD934X_CPE_SS_CPAR_CTL, 0x03, 0x00);
+}
+
+static int wcd_cntl_enable_memory(struct wcd_dsp_cntl *cntl,
+				  enum wcd_mem_type mem_type)
+{
+	struct snd_soc_codec *codec = cntl->codec;
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+	int loop_cnt = 0;
+	u8 status;
+	int ret = 0;
+
+
+	switch (mem_type) {
+
+	case WCD_MEM_TYPE_ALWAYS_ON:
+
+		/* 512KB of always on region */
+		wcd9xxx_slim_write_repeat(wcd9xxx,
+				WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_0,
+				ARRAY_SIZE(mem_enable_values),
+				mem_enable_values);
+		wcd9xxx_slim_write_repeat(wcd9xxx,
+				WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_1,
+				ARRAY_SIZE(mem_enable_values),
+				mem_enable_values);
+		break;
+
+	case WCD_MEM_TYPE_SWITCHABLE:
+
+		snd_soc_update_bits(codec, WCD934X_CPE_SS_SOC_SW_COLLAPSE_CTL,
+				    0x04, 0x00);
+		snd_soc_update_bits(codec, WCD934X_TEST_DEBUG_MEM_CTRL,
+				    0x80, 0x80);
+		snd_soc_update_bits(codec, WCD934X_CPE_SS_SOC_SW_COLLAPSE_CTL,
+				    0x01, 0x01);
+		do {
+			loop_cnt++;
+			/* Time to enable the power domain for memory */
+			usleep_range(100, 150);
+			status = snd_soc_read(codec,
+					WCD934X_CPE_SS_SOC_SW_COLLAPSE_CTL);
+		} while ((status & 0x02) != 0x02 &&
+			  loop_cnt != WCD_MEM_ENABLE_MAX_RETRIES);
+
+		if ((status & 0x02) != 0x02) {
+			dev_err(cntl->codec->dev,
+				"%s: power domain not enabled, status = 0x%02x\n",
+				__func__, status);
+			ret = -EIO;
+			goto done;
+		}
+
+		/* Rest of the memory */
+		wcd9xxx_slim_write_repeat(wcd9xxx,
+				WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_2,
+				ARRAY_SIZE(mem_enable_values),
+				mem_enable_values);
+		wcd9xxx_slim_write_repeat(wcd9xxx,
+				WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_3,
+				ARRAY_SIZE(mem_enable_values),
+				mem_enable_values);
+
+		snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_DRAM1_SHUTDOWN,
+			      0x05);
+		break;
+
+	default:
+		dev_err(cntl->codec->dev, "%s: Invalid mem_type %d\n",
+			__func__, mem_type);
+		ret = -EINVAL;
+		break;
+	}
+done:
+	/* Make sure Deep sleep of memories is enabled for all banks */
+	snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_0, 0xFF);
+	snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_1, 0x0F);
+
+	return ret;
+}
+
+static void wcd_cntl_disable_memory(struct wcd_dsp_cntl *cntl,
+				    enum wcd_mem_type mem_type)
+{
+	struct snd_soc_codec *codec = cntl->codec;
+	u8 val;
+
+	switch (mem_type) {
+	case WCD_MEM_TYPE_ALWAYS_ON:
+		snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_1,
+			      0xFF);
+		snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_0,
+			      0xFF);
+		break;
+	case WCD_MEM_TYPE_SWITCHABLE:
+		snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_3,
+			      0xFF);
+		snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_2,
+			      0xFF);
+		snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_DRAM1_SHUTDOWN,
+			      0x07);
+
+		snd_soc_update_bits(codec, WCD934X_CPE_SS_SOC_SW_COLLAPSE_CTL,
+				    0x01, 0x00);
+		val = snd_soc_read(codec, WCD934X_CPE_SS_SOC_SW_COLLAPSE_CTL);
+		if (val & 0x02)
+			dev_err(codec->dev,
+				"%s: Disable switchable failed, val = 0x%02x",
+				__func__, val);
+
+		snd_soc_update_bits(codec, WCD934X_TEST_DEBUG_MEM_CTRL,
+				    0x80, 0x00);
+		break;
+	default:
+		dev_err(cntl->codec->dev, "%s: Invalid mem_type %d\n",
+			__func__, mem_type);
+		break;
+	}
+
+	snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_0, 0xFF);
+	snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_1, 0x0F);
+}
+
+static void wcd_cntl_do_shutdown(struct wcd_dsp_cntl *cntl)
+{
+	struct snd_soc_codec *codec = cntl->codec;
+
+	/* Disable WDOG */
+	snd_soc_update_bits(codec, WCD934X_CPE_SS_WDOG_CFG,
+			    0x3F, 0x01);
+
+	/* Put WDSP in reset state */
+	snd_soc_update_bits(codec, WCD934X_CPE_SS_CPE_CTL,
+			    0x02, 0x00);
+
+	/* If DSP transitions from boot to shutdown, then vote for SVS */
+	if (cntl->is_wdsp_booted)
+		cntl->cdc_cb->cdc_vote_svs(codec, true);
+	cntl->is_wdsp_booted = false;
+}
+
+static int wcd_cntl_do_boot(struct wcd_dsp_cntl *cntl)
+{
+	struct snd_soc_codec *codec = cntl->codec;
+	int ret = 0;
+
+	/*
+	 * Debug mode is set from debugfs file node. If debug_mode
+	 * is set, then do not configure the watchdog timer. This
+	 * will be required for debugging the DSP firmware.
+	 */
+	if (cntl->debug_mode) {
+		snd_soc_update_bits(codec, WCD934X_CPE_SS_WDOG_CFG,
+				    0x3F, 0x01);
+	} else {
+		snd_soc_update_bits(codec, WCD934X_CPE_SS_WDOG_CFG,
+				    0x3F, 0x21);
+	}
+
+	/* Make sure all the error interrupts are cleared */
+	snd_soc_write(codec, WCD934X_CPE_SS_SS_ERROR_INT_CLEAR_0A, 0xFF);
+	snd_soc_write(codec, WCD934X_CPE_SS_SS_ERROR_INT_CLEAR_0B, 0xFF);
+
+	reinit_completion(&cntl->boot_complete);
+
+	/* Remove WDSP out of reset */
+	snd_soc_update_bits(codec, WCD934X_CPE_SS_CPE_CTL,
+			    0x02, 0x02);
+
+	/*
+	 * In debug mode, DSP may not boot up normally,
+	 * wait indefinitely for DSP to boot.
+	 */
+	if (cntl->debug_mode) {
+		wait_for_completion(&cntl->boot_complete);
+		dev_dbg(codec->dev, "%s: WDSP booted in dbg mode\n", __func__);
+		cntl->is_wdsp_booted = true;
+		goto done;
+	}
+
+	/* Boot in normal mode */
+	ret = wait_for_completion_timeout(&cntl->boot_complete,
+				msecs_to_jiffies(WCD_DSP_BOOT_TIMEOUT_MS));
+	if (!ret) {
+		dev_err(codec->dev, "%s: WDSP boot timed out\n",
+			__func__);
+		ret = -ETIMEDOUT;
+		goto err_boot;
+	} else {
+		/*
+		 * Re-initialize the return code to 0, as in success case,
+		 * it will hold the remaining time for completion timeout
+		 */
+		ret = 0;
+	}
+
+	dev_dbg(codec->dev, "%s: WDSP booted in normal mode\n", __func__);
+	cntl->is_wdsp_booted = true;
+
+	/* Enable WDOG */
+	snd_soc_update_bits(codec, WCD934X_CPE_SS_WDOG_CFG,
+			    0x10, 0x10);
+done:
+	/* If dsp booted up, then remove vote on SVS */
+	if (cntl->is_wdsp_booted)
+		cntl->cdc_cb->cdc_vote_svs(codec, false);
+
+	return ret;
+err_boot:
+	/* call shutdown to perform cleanup */
+	wcd_cntl_do_shutdown(cntl);
+	return ret;
+}
+
+static irqreturn_t wcd_cntl_ipc_irq(int irq, void *data)
+{
+	struct wcd_dsp_cntl *cntl = data;
+	int ret;
+
+	complete(&cntl->boot_complete);
+
+	if (cntl->m_dev && cntl->m_ops &&
+	    cntl->m_ops->signal_handler)
+		ret = cntl->m_ops->signal_handler(cntl->m_dev, WDSP_IPC1_INTR,
+						  NULL);
+	else
+		ret = -EINVAL;
+
+	if (IS_ERR_VALUE(ret))
+		dev_err(cntl->codec->dev,
+			"%s: Failed to handle irq %d\n", __func__, irq);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t wcd_cntl_err_irq(int irq, void *data)
+{
+	struct wcd_dsp_cntl *cntl = data;
+	struct snd_soc_codec *codec = cntl->codec;
+	struct wdsp_err_signal_arg arg;
+	u16 status = 0;
+	u8 reg_val;
+	int ret = 0;
+
+	reg_val = snd_soc_read(codec, WCD934X_CPE_SS_SS_ERROR_INT_STATUS_0A);
+	status = status | reg_val;
+
+	reg_val = snd_soc_read(codec, WCD934X_CPE_SS_SS_ERROR_INT_STATUS_0B);
+	status = status | (reg_val << 8);
+
+	dev_info(codec->dev, "%s: error interrupt status = 0x%x\n",
+		__func__, status);
+
+	if ((status & cntl->irqs.fatal_irqs) &&
+	    (cntl->m_dev && cntl->m_ops && cntl->m_ops->signal_handler)) {
+		arg.mem_dumps_enabled = cntl->ramdump_enable;
+		arg.remote_start_addr = WCD_934X_RAMDUMP_START_ADDR;
+		arg.dump_size = WCD_934X_RAMDUMP_SIZE;
+		ret = cntl->m_ops->signal_handler(cntl->m_dev, WDSP_ERR_INTR,
+						  &arg);
+		if (IS_ERR_VALUE(ret))
+			dev_err(cntl->codec->dev,
+				"%s: Failed to handle fatal irq 0x%x\n",
+				__func__, status & cntl->irqs.fatal_irqs);
+		wcd_cntl_change_online_state(cntl, 0);
+	} else {
+		dev_err(cntl->codec->dev, "%s: Invalid signal_handler\n",
+			__func__);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int wcd_control_handler(struct device *dev, void *priv_data,
+			       enum wdsp_event_type event, void *data)
+{
+	struct wcd_dsp_cntl *cntl = priv_data;
+	struct snd_soc_codec *codec = cntl->codec;
+	int ret = 0;
+
+	switch (event) {
+	case WDSP_EVENT_POST_INIT:
+	case WDSP_EVENT_POST_DLOAD_CODE:
+	case WDSP_EVENT_DLOAD_FAILED:
+	case WDSP_EVENT_POST_SHUTDOWN:
+
+		/* Disable CPAR */
+		wcd_cntl_cpar_ctrl(cntl, false);
+		/* Disable all the clocks */
+		ret = wcd_cntl_clocks_disable(cntl);
+		if (IS_ERR_VALUE(ret))
+			dev_err(codec->dev,
+				"%s: Failed to disable clocks, err = %d\n",
+				__func__, ret);
+
+		if (event == WDSP_EVENT_POST_DLOAD_CODE)
+			/* Mark DSP online since code download is complete */
+			wcd_cntl_change_online_state(cntl, 1);
+
+		break;
+
+	case WDSP_EVENT_PRE_DLOAD_DATA:
+	case WDSP_EVENT_PRE_DLOAD_CODE:
+
+		/* Enable all the clocks */
+		ret = wcd_cntl_clocks_enable(cntl);
+		if (IS_ERR_VALUE(ret)) {
+			dev_err(codec->dev,
+				"%s: Failed to enable clocks, err = %d\n",
+				__func__, ret);
+			goto done;
+		}
+
+		/* Enable CPAR */
+		wcd_cntl_cpar_ctrl(cntl, true);
+
+		if (event == WDSP_EVENT_PRE_DLOAD_CODE)
+			wcd_cntl_enable_memory(cntl, WCD_MEM_TYPE_ALWAYS_ON);
+		else if (event == WDSP_EVENT_PRE_DLOAD_DATA)
+			wcd_cntl_enable_memory(cntl, WCD_MEM_TYPE_SWITCHABLE);
+		break;
+
+	case WDSP_EVENT_DO_BOOT:
+
+		ret = wcd_cntl_do_boot(cntl);
+		if (IS_ERR_VALUE(ret))
+			dev_err(codec->dev,
+				"%s: WDSP boot failed, err = %d\n",
+				__func__, ret);
+		break;
+
+	case WDSP_EVENT_DO_SHUTDOWN:
+
+		wcd_cntl_do_shutdown(cntl);
+		wcd_cntl_disable_memory(cntl, WCD_MEM_TYPE_SWITCHABLE);
+		break;
+
+	default:
+		dev_dbg(codec->dev, "%s: unhandled event %d\n",
+			__func__, event);
+	}
+
+done:
+	return ret;
+}
+
+static int wcd_cntl_sysfs_init(char *dir, struct wcd_dsp_cntl *cntl)
+{
+	struct snd_soc_codec *codec = cntl->codec;
+	int ret = 0;
+
+	ret = kobject_init_and_add(&cntl->wcd_kobj, &wcd_cntl_ktype,
+				   kernel_kobj, dir);
+	if (IS_ERR_VALUE(ret)) {
+		dev_err(codec->dev,
+			"%s: Failed to add kobject %s, err = %d\n",
+			__func__, dir, ret);
+		goto done;
+	}
+
+	ret = sysfs_create_file(&cntl->wcd_kobj, &cntl_attr_boot.attr);
+	if (IS_ERR_VALUE(ret)) {
+		dev_err(codec->dev,
+			"%s: Failed to add wdsp_boot sysfs entry to %s\n",
+			__func__, dir);
+		goto fail_create_file;
+	}
+
+	return ret;
+
+fail_create_file:
+	kobject_put(&cntl->wcd_kobj);
+done:
+	return ret;
+}
+
+static void wcd_cntl_sysfs_remove(struct wcd_dsp_cntl *cntl)
+{
+	sysfs_remove_file(&cntl->wcd_kobj, &cntl_attr_boot.attr);
+	kobject_put(&cntl->wcd_kobj);
+}
+
+static void wcd_cntl_debugfs_init(char *dir, struct wcd_dsp_cntl *cntl)
+{
+	struct snd_soc_codec *codec = cntl->codec;
+
+	cntl->entry = debugfs_create_dir(dir, NULL);
+	if (IS_ERR_OR_NULL(dir)) {
+		dev_err(codec->dev, "%s debugfs_create_dir failed for %s\n",
+			__func__, dir);
+		goto done;
+	}
+
+	debugfs_create_u32("debug_mode", S_IRUGO | S_IWUSR,
+			   cntl->entry, &cntl->debug_mode);
+	debugfs_create_bool("ramdump_enable", S_IRUGO | S_IWUSR,
+			    cntl->entry, &cntl->ramdump_enable);
+done:
+	return;
+}
+
+static void wcd_cntl_debugfs_remove(struct wcd_dsp_cntl *cntl)
+{
+	if (cntl)
+		debugfs_remove(cntl->entry);
+}
+
+static int wcd_miscdev_release(struct inode *inode, struct file *filep)
+{
+	struct wcd_dsp_cntl *cntl = container_of(filep->private_data,
+						 struct wcd_dsp_cntl, miscdev);
+	if (!cntl->m_dev || !cntl->m_ops ||
+	    !cntl->m_ops->vote_for_dsp) {
+		dev_err(cntl->codec->dev,
+			"%s: DSP not ready to boot\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Make sure the DSP users goes to zero upon closing dev node */
+	while (cntl->boot_reqs > 0) {
+		cntl->m_ops->vote_for_dsp(cntl->m_dev, false);
+		cntl->boot_reqs--;
+	}
+
+	return 0;
+}
+
+static ssize_t wcd_miscdev_write(struct file *filep, const char __user *ubuf,
+				 size_t count, loff_t *pos)
+{
+	struct wcd_dsp_cntl *cntl = container_of(filep->private_data,
+						 struct wcd_dsp_cntl, miscdev);
+	char val[WCD_DSP_CNTL_MAX_COUNT + 1];
+	bool vote;
+	int ret = 0;
+
+	memset(val, 0, WCD_DSP_CNTL_MAX_COUNT + 1);
+
+	if (count == 0 || count > WCD_DSP_CNTL_MAX_COUNT) {
+		pr_err("%s: Invalid count = %zd\n", __func__, count);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = copy_from_user(val, ubuf, count);
+	if (IS_ERR_VALUE(ret)) {
+		dev_err(cntl->codec->dev,
+			"%s: copy_from_user failed, err = %d\n",
+			__func__, ret);
+		ret = -EFAULT;
+		goto done;
+	}
+
+	if (val[0] == '1') {
+		cntl->boot_reqs++;
+		vote = true;
+	} else if (val[0] == '0') {
+		if (cntl->boot_reqs == 0) {
+			dev_err(cntl->codec->dev,
+				"%s: WDSP already disabled\n", __func__);
+			ret = -EINVAL;
+			goto done;
+		}
+		cntl->boot_reqs--;
+		vote = false;
+	} else {
+		dev_err(cntl->codec->dev, "%s: Invalid value %s\n",
+			__func__, val);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	dev_dbg(cntl->codec->dev,
+		"%s: booted = %s, ref_cnt = %d, vote = %s\n",
+		__func__, cntl->is_wdsp_booted ? "true" : "false",
+		cntl->boot_reqs, vote ? "true" : "false");
+
+	if (cntl->m_dev && cntl->m_ops &&
+	    cntl->m_ops->vote_for_dsp)
+		ret = cntl->m_ops->vote_for_dsp(cntl->m_dev, vote);
+	else
+		ret = -EINVAL;
+done:
+	if (ret)
+		return ret;
+	else
+		return count;
+}
+
+static const struct file_operations wcd_miscdev_fops = {
+	.write = wcd_miscdev_write,
+	.release = wcd_miscdev_release,
+};
+
+static int wcd_cntl_miscdev_create(struct wcd_dsp_cntl *cntl)
+{
+	snprintf(cntl->miscdev_name, ARRAY_SIZE(cntl->miscdev_name),
+		"wcd_dsp%u_control", cntl->dsp_instance);
+	cntl->miscdev.minor = MISC_DYNAMIC_MINOR;
+	cntl->miscdev.name = cntl->miscdev_name;
+	cntl->miscdev.fops = &wcd_miscdev_fops;
+	cntl->miscdev.parent = cntl->codec->dev;
+
+	return misc_register(&cntl->miscdev);
+}
+
+static void wcd_cntl_miscdev_destroy(struct wcd_dsp_cntl *cntl)
+{
+	misc_deregister(&cntl->miscdev);
+}
+
+static int wcd_control_init(struct device *dev, void *priv_data)
+{
+	struct wcd_dsp_cntl *cntl = priv_data;
+	struct snd_soc_codec *codec = cntl->codec;
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+	struct wcd9xxx_core_resource *core_res = &wcd9xxx->core_res;
+	int ret;
+	bool err_irq_requested = false;
+
+	ret = wcd9xxx_request_irq(core_res,
+				  cntl->irqs.cpe_ipc1_irq,
+				  wcd_cntl_ipc_irq, "CPE IPC1",
+				  cntl);
+	if (IS_ERR_VALUE(ret)) {
+		dev_err(codec->dev,
+			"%s: Failed to request cpe ipc irq, err = %d\n",
+			__func__, ret);
+		goto done;
+	}
+
+	/* Unmask the fatal irqs */
+	snd_soc_write(codec, WCD934X_CPE_SS_SS_ERROR_INT_MASK_0A,
+		      ~(cntl->irqs.fatal_irqs & 0xFF));
+	snd_soc_write(codec, WCD934X_CPE_SS_SS_ERROR_INT_MASK_0B,
+		      ~((cntl->irqs.fatal_irqs >> 8) & 0xFF));
+
+	/*
+	 * CPE ERR irq is used only for error reporting from WCD DSP,
+	 * even if this request fails, DSP can be function normally.
+	 * Continuing with init even if the CPE ERR irq request fails.
+	 */
+	if (wcd9xxx_request_irq(core_res, cntl->irqs.cpe_err_irq,
+				wcd_cntl_err_irq, "CPE ERR", cntl))
+		dev_info(codec->dev, "%s: Failed request_irq(cpe_err_irq)",
+			__func__);
+	else
+		err_irq_requested = true;
+
+
+	/* Enable all the clocks */
+	ret = wcd_cntl_clocks_enable(cntl);
+	if (IS_ERR_VALUE(ret)) {
+		dev_err(codec->dev, "%s: Failed to enable clocks, err = %d\n",
+			__func__, ret);
+		goto err_clk_enable;
+	}
+	wcd_cntl_cpar_ctrl(cntl, true);
+
+	return 0;
+
+err_clk_enable:
+	/* Mask all error interrupts */
+	snd_soc_write(codec, WCD934X_CPE_SS_SS_ERROR_INT_MASK_0A, 0xFF);
+	snd_soc_write(codec, WCD934X_CPE_SS_SS_ERROR_INT_MASK_0B, 0xFF);
+
+	/* Free the irq's requested */
+	wcd9xxx_free_irq(core_res, cntl->irqs.cpe_ipc1_irq, cntl);
+
+	if (err_irq_requested)
+		wcd9xxx_free_irq(core_res, cntl->irqs.cpe_err_irq, cntl);
+done:
+	return ret;
+}
+
+static int wcd_control_deinit(struct device *dev, void *priv_data)
+{
+	struct wcd_dsp_cntl *cntl = priv_data;
+	struct snd_soc_codec *codec = cntl->codec;
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+	struct wcd9xxx_core_resource *core_res = &wcd9xxx->core_res;
+
+	wcd_cntl_clocks_disable(cntl);
+	wcd_cntl_cpar_ctrl(cntl, false);
+
+	/* Mask all error interrupts */
+	snd_soc_write(codec, WCD934X_CPE_SS_SS_ERROR_INT_MASK_0A, 0xFF);
+	snd_soc_write(codec, WCD934X_CPE_SS_SS_ERROR_INT_MASK_0B, 0xFF);
+
+	/* Free the irq's requested */
+	wcd9xxx_free_irq(core_res, cntl->irqs.cpe_err_irq, cntl);
+	wcd9xxx_free_irq(core_res, cntl->irqs.cpe_ipc1_irq, cntl);
+
+	return 0;
+}
+
+static struct wdsp_cmpnt_ops control_ops = {
+	.init = wcd_control_init,
+	.deinit = wcd_control_deinit,
+	.event_handler = wcd_control_handler,
+};
+
+static int wcd_ctrl_component_bind(struct device *dev,
+				   struct device *master,
+				   void *data)
+{
+	struct wcd_dsp_cntl *cntl;
+	struct snd_soc_codec *codec;
+	struct snd_card *card;
+	struct snd_info_entry *entry;
+	char proc_name[WCD_PROCFS_ENTRY_MAX_LEN];
+	char wcd_cntl_dir_name[WCD_CNTL_DIR_NAME_LEN_MAX];
+	int ret = 0;
+
+	if (!dev || !master || !data) {
+		pr_err("%s: Invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	cntl = tavil_get_wcd_dsp_cntl(dev);
+	if (!cntl) {
+		dev_err(dev, "%s: Failed to get cntl reference\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	cntl->m_dev = master;
+	cntl->m_ops = data;
+
+	if (!cntl->m_ops->register_cmpnt_ops) {
+		dev_err(dev, "%s: invalid master callback register_cmpnt_ops\n",
+			__func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = cntl->m_ops->register_cmpnt_ops(master, dev, cntl, &control_ops);
+	if (ret) {
+		dev_err(dev, "%s: register_cmpnt_ops failed, err = %d\n",
+			__func__, ret);
+		goto done;
+	}
+
+	ret = wcd_cntl_miscdev_create(cntl);
+	if (IS_ERR_VALUE(ret)) {
+		dev_err(dev, "%s: misc dev register failed, err = %d\n",
+			__func__, ret);
+		goto done;
+	}
+
+	snprintf(wcd_cntl_dir_name, WCD_CNTL_DIR_NAME_LEN_MAX,
+		 "%s%d", "wdsp", cntl->dsp_instance);
+	ret = wcd_cntl_sysfs_init(wcd_cntl_dir_name, cntl);
+	if (IS_ERR_VALUE(ret)) {
+		dev_err(dev, "%s: sysfs_init failed, err = %d\n",
+			__func__, ret);
+		goto err_sysfs_init;
+	}
+
+	wcd_cntl_debugfs_init(wcd_cntl_dir_name, cntl);
+
+	codec = cntl->codec;
+	card = codec->component.card->snd_card;
+	snprintf(proc_name, WCD_PROCFS_ENTRY_MAX_LEN, "%s%d%s", "cpe",
+		 cntl->dsp_instance, "_state");
+	entry = snd_info_create_card_entry(card, proc_name, card->proc_root);
+	if (!entry) {
+		/* Do not treat this as Fatal error */
+		dev_err(dev, "%s: Failed to create procfs entry %s\n",
+			__func__, proc_name);
+		goto err_sysfs_init;
+	}
+
+	cntl->ssr_entry.entry = entry;
+	cntl->ssr_entry.offline = 1;
+	entry->size = WCD_PROCFS_ENTRY_MAX_LEN;
+	entry->content = SNDRV_INFO_CONTENT_DATA;
+	entry->c.ops = &wdsp_ssr_entry_ops;
+	entry->private_data = cntl;
+	ret = snd_info_register(entry);
+	if (IS_ERR_VALUE(ret)) {
+		dev_err(dev, "%s: Failed to register entry %s, err = %d\n",
+			__func__, proc_name, ret);
+		snd_info_free_entry(entry);
+		/* Let bind still happen even if creating the entry failed */
+		ret = 0;
+	}
+done:
+	return ret;
+
+err_sysfs_init:
+	wcd_cntl_miscdev_destroy(cntl);
+	return ret;
+}
+
+static void wcd_ctrl_component_unbind(struct device *dev,
+				      struct device *master,
+				      void *data)
+{
+	struct wcd_dsp_cntl *cntl;
+
+	if (!dev) {
+		pr_err("%s: Invalid device\n", __func__);
+		return;
+	}
+
+	cntl = tavil_get_wcd_dsp_cntl(dev);
+	if (!cntl) {
+		dev_err(dev, "%s: Failed to get cntl reference\n",
+			__func__);
+		return;
+	}
+
+	cntl->m_dev = NULL;
+	cntl->m_ops = NULL;
+
+	/* Remove the sysfs entries */
+	wcd_cntl_sysfs_remove(cntl);
+
+	/* Remove the debugfs entries */
+	wcd_cntl_debugfs_remove(cntl);
+
+	/* Remove the misc device */
+	wcd_cntl_miscdev_destroy(cntl);
+}
+
+static const struct component_ops wcd_ctrl_component_ops = {
+	.bind = wcd_ctrl_component_bind,
+	.unbind = wcd_ctrl_component_unbind,
+};
+
+/*
+ * wcd_dsp_ssr_event: handle the SSR event raised by caller.
+ * @cntl: Handle to the wcd_dsp_cntl structure
+ * @event: The SSR event to be handled
+ *
+ * Notifies the manager driver about the SSR event.
+ * Returns 0 on success and negative error code on error.
+ */
+int wcd_dsp_ssr_event(struct wcd_dsp_cntl *cntl, enum cdc_ssr_event event)
+{
+	int ret = 0;
+
+	if (!cntl) {
+		pr_err("%s: Invalid handle to control\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!cntl->m_dev || !cntl->m_ops || !cntl->m_ops->signal_handler) {
+		dev_err(cntl->codec->dev,
+			"%s: Invalid signal_handler callback\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (event) {
+	case WCD_CDC_DOWN_EVENT:
+		ret = cntl->m_ops->signal_handler(cntl->m_dev,
+						  WDSP_CDC_DOWN_SIGNAL,
+						  NULL);
+		if (IS_ERR_VALUE(ret))
+			dev_err(cntl->codec->dev,
+				"%s: WDSP_CDC_DOWN_SIGNAL failed, err = %d\n",
+				__func__, ret);
+		wcd_cntl_change_online_state(cntl, 0);
+		break;
+	case WCD_CDC_UP_EVENT:
+		ret = cntl->m_ops->signal_handler(cntl->m_dev,
+						  WDSP_CDC_UP_SIGNAL,
+						  NULL);
+		if (IS_ERR_VALUE(ret))
+			dev_err(cntl->codec->dev,
+				"%s: WDSP_CDC_UP_SIGNAL failed, err = %d\n",
+				__func__, ret);
+		break;
+	default:
+		dev_err(cntl->codec->dev, "%s: Invalid event %d\n",
+			__func__, event);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(wcd_dsp_ssr_event);
+
+/*
+ * wcd_dsp_cntl_init: Initialize the wcd-dsp control
+ * @codec: pointer to the codec handle
+ * @params: Parameters required to initialize wcd-dsp control
+ *
+ * This API is expected to be invoked by the codec driver and
+ * provide information essential for the wcd dsp control to
+ * configure and initialize the dsp
+ */
+void wcd_dsp_cntl_init(struct snd_soc_codec *codec,
+		       struct wcd_dsp_params *params,
+		       struct wcd_dsp_cntl **cntl)
+{
+	struct wcd_dsp_cntl *control;
+	int ret;
+
+	if (!codec || !params) {
+		pr_err("%s: Invalid handle to %s\n", __func__,
+		       (!codec) ? "codec" : "params");
+		*cntl = NULL;
+		return;
+	}
+
+	if (*cntl) {
+		pr_err("%s: cntl is non NULL, maybe already initialized ?\n",
+			__func__);
+		return;
+	}
+
+	if (!params->cb || !params->cb->cdc_clk_en ||
+	    !params->cb->cdc_vote_svs) {
+		dev_err(codec->dev,
+			"%s: clk_en and vote_svs callbacks must be provided\n",
+			__func__);
+		return;
+	}
+
+	control = kzalloc(sizeof(*control), GFP_KERNEL);
+	if (!(control))
+		return;
+
+	control->codec = codec;
+	control->clk_rate = params->clk_rate;
+	control->cdc_cb = params->cb;
+	control->dsp_instance = params->dsp_instance;
+	memcpy(&control->irqs, &params->irqs, sizeof(control->irqs));
+	init_completion(&control->boot_complete);
+	mutex_init(&control->clk_mutex);
+	mutex_init(&control->ssr_mutex);
+	init_waitqueue_head(&control->ssr_entry.offline_poll_wait);
+
+	/*
+	 * The default state of WDSP is in SVS mode.
+	 * Vote for SVS now, the vote will be removed only
+	 * after DSP is booted up.
+	 */
+	control->cdc_cb->cdc_vote_svs(codec, true);
+
+	/*
+	 * If this is the last component needed by master to be ready,
+	 * then component_bind will be called within the component_add.
+	 * Hence, the data pointer should be assigned before component_add,
+	 * so that we can access it during this component's bind call.
+	 */
+	*cntl = control;
+	ret = component_add(codec->dev, &wcd_ctrl_component_ops);
+	if (ret) {
+		dev_err(codec->dev, "%s: component_add failed, err = %d\n",
+			__func__, ret);
+		kfree(*cntl);
+		*cntl = NULL;
+	}
+}
+EXPORT_SYMBOL(wcd_dsp_cntl_init);
+
+/*
+ * wcd_dsp_cntl_deinit: De-initialize the wcd-dsp control
+ * @cntl: The struct wcd_dsp_cntl to de-initialize
+ *
+ * This API is intended to be invoked by the codec driver
+ * to de-initialize the wcd dsp control
+ */
+void wcd_dsp_cntl_deinit(struct wcd_dsp_cntl **cntl)
+{
+	struct wcd_dsp_cntl *control = *cntl;
+	struct snd_soc_codec *codec;
+
+	/* If control is NULL, there is nothing to de-initialize */
+	if (!control)
+		return;
+	codec = control->codec;
+
+	/*
+	 * Calling shutdown will cleanup all register states,
+	 * irrespective of DSP was booted up or not.
+	 */
+	wcd_cntl_do_shutdown(control);
+	wcd_cntl_disable_memory(control, WCD_MEM_TYPE_SWITCHABLE);
+	wcd_cntl_disable_memory(control, WCD_MEM_TYPE_ALWAYS_ON);
+
+	component_del(codec->dev, &wcd_ctrl_component_ops);
+
+	mutex_destroy(&control->clk_mutex);
+	mutex_destroy(&control->ssr_mutex);
+	kfree(*cntl);
+	*cntl = NULL;
+}
+EXPORT_SYMBOL(wcd_dsp_cntl_deinit);
diff -Nruw linux-4.4.115-fbx/sound/soc/codecs/wcd934x./wcd934x-dsp-cntl.h linux-4.4.115-fbx/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.h
--- linux-4.4.115-fbx/sound/soc/codecs/wcd934x./wcd934x-dsp-cntl.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.h	2019-01-22 16:16:29.551301175 +0100
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __WCD934X_DSP_CNTL_H__
+#define __WCD934X_DSP_CNTL_H__
+
+#include <sound/soc.h>
+#include <sound/wcd-dsp-mgr.h>
+
+enum cdc_ssr_event {
+	WCD_CDC_DOWN_EVENT,
+	WCD_CDC_UP_EVENT,
+};
+
+struct wcd_dsp_cdc_cb {
+	/* Callback to enable codec clock */
+	int (*cdc_clk_en)(struct snd_soc_codec *, bool);
+	/* Callback to vote and unvote for SVS2 mode */
+	void (*cdc_vote_svs)(struct snd_soc_codec *, bool);
+};
+
+struct wcd_dsp_irq_info {
+	/* IPC interrupt */
+	int cpe_ipc1_irq;
+
+	/* CPE error summary interrupt */
+	int cpe_err_irq;
+
+	/*
+	 * Bit mask to indicate which of the
+	 * error interrupts are to be considered
+	 * as fatal.
+	 */
+	u16 fatal_irqs;
+};
+
+struct wcd_dsp_params {
+	struct wcd_dsp_cdc_cb *cb;
+	struct wcd_dsp_irq_info irqs;
+
+	/* Rate at which the codec clock operates */
+	u32 clk_rate;
+
+	/*
+	 * Represents the dsp instance, will be used
+	 * to create sysfs and debugfs entries with
+	 * directory wdsp<dsp-instance>
+	 */
+	u32 dsp_instance;
+};
+
+struct wdsp_ssr_entry {
+	u8 offline;
+	u8 offline_change;
+	wait_queue_head_t offline_poll_wait;
+	struct snd_info_entry *entry;
+};
+
+struct wcd_dsp_cntl {
+	/* Handle to codec */
+	struct snd_soc_codec *codec;
+
+	/* Clk rate of the codec clock */
+	u32 clk_rate;
+
+	/* Callbacks to codec driver */
+	const struct wcd_dsp_cdc_cb *cdc_cb;
+
+	/* Completion to indicate WDSP boot done */
+	struct completion boot_complete;
+
+	struct wcd_dsp_irq_info irqs;
+	u32 dsp_instance;
+
+	/* Sysfs entries related */
+	int boot_reqs;
+	struct kobject wcd_kobj;
+
+	/* Debugfs related */
+	struct dentry *entry;
+	u32 debug_mode;
+	bool ramdump_enable;
+
+	/* WDSP manager drivers data */
+	struct device *m_dev;
+	struct wdsp_mgr_ops *m_ops;
+
+	/* clk related */
+	struct mutex clk_mutex;
+	bool is_clk_enabled;
+
+	/* Keep track of WDSP boot status */
+	bool is_wdsp_booted;
+
+	/* SSR related */
+	struct wdsp_ssr_entry ssr_entry;
+	struct mutex ssr_mutex;
+
+	/* Misc device related */
+	char miscdev_name[256];
+	struct miscdevice miscdev;
+};
+
+void wcd_dsp_cntl_init(struct snd_soc_codec *codec,
+		       struct wcd_dsp_params *params,
+		       struct wcd_dsp_cntl **cntl);
+void wcd_dsp_cntl_deinit(struct wcd_dsp_cntl **cntl);
+int wcd_dsp_ssr_event(struct wcd_dsp_cntl *cntl, enum cdc_ssr_event event);
+#endif /* end __WCD_DSP_CONTROL_H__ */
diff -Nruw linux-4.4.115-fbx/sound/soc/codecs/wcd934x./wcd934x.h linux-4.4.115-fbx/sound/soc/codecs/wcd934x/wcd934x.h
--- linux-4.4.115-fbx/sound/soc/codecs/wcd934x./wcd934x.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd934x/wcd934x.h	2019-01-22 16:16:29.555301211 +0100
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef WCD934X_H
+#define WCD934X_H
+
+#include <sound/apr_audio-v2.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx-slimslave.h>
+#include "wcd934x-dsp-cntl.h"
+#include "../wcd9xxx-common-v2.h"
+#include "../wcd-mbhc-v2.h"
+
+#define WCD934X_REGISTER_START_OFFSET  0x800
+#define WCD934X_SB_PGD_PORT_RX_BASE   0x40
+#define WCD934X_SB_PGD_PORT_TX_BASE   0x50
+#define WCD934X_RX_PORT_START_NUMBER  16
+
+#define WCD934X_DMIC_CLK_DIV_2  0x0
+#define WCD934X_DMIC_CLK_DIV_3  0x1
+#define WCD934X_DMIC_CLK_DIV_4  0x2
+#define WCD934X_DMIC_CLK_DIV_6  0x3
+#define WCD934X_DMIC_CLK_DIV_8  0x4
+#define WCD934X_DMIC_CLK_DIV_16  0x5
+#define WCD934X_DMIC_CLK_DRIVE_DEFAULT 0x02
+
+#define WCD934X_ANC_DMIC_X2_FULL_RATE 1
+#define WCD934X_ANC_DMIC_X2_HALF_RATE 0
+
+#define TAVIL_MAX_MICBIAS 4
+#define TAVIL_NUM_INTERPOLATORS 9
+#define MAX_ON_DEMAND_SUPPLY_NAME_LENGTH    64
+
+/* Convert from vout ctl to micbias voltage in mV */
+#define  WCD_VOUT_CTL_TO_MICB(v)  (1000 + v * 50)
+
+/* Feature masks to distinguish codec version */
+#define DSD_DISABLED_MASK   0
+#define SLNQ_DISABLED_MASK  1
+
+#define DSD_DISABLED   (1 << DSD_DISABLED_MASK)
+#define SLNQ_DISABLED  (1 << SLNQ_DISABLED_MASK)
+
+/* Number of input and output Slimbus port */
+enum {
+	WCD934X_RX0 = 0,
+	WCD934X_RX1,
+	WCD934X_RX2,
+	WCD934X_RX3,
+	WCD934X_RX4,
+	WCD934X_RX5,
+	WCD934X_RX6,
+	WCD934X_RX7,
+	WCD934X_RX_MAX,
+};
+
+enum {
+	WCD934X_TX0 = 0,
+	WCD934X_TX1,
+	WCD934X_TX2,
+	WCD934X_TX3,
+	WCD934X_TX4,
+	WCD934X_TX5,
+	WCD934X_TX6,
+	WCD934X_TX7,
+	WCD934X_TX8,
+	WCD934X_TX9,
+	WCD934X_TX10,
+	WCD934X_TX11,
+	WCD934X_TX12,
+	WCD934X_TX13,
+	WCD934X_TX14,
+	WCD934X_TX15,
+	WCD934X_TX_MAX,
+};
+
+enum {
+	INTERP_EAR = 0,
+	INTERP_HPHL,
+	INTERP_HPHR,
+	INTERP_LO1,
+	INTERP_LO2,
+	INTERP_LO3_NA, /* LO3 not avalible in Tavil*/
+	INTERP_LO4_NA,
+	INTERP_SPKR1,
+	INTERP_SPKR2,
+	INTERP_MAX,
+};
+
+enum {
+	/* INTR_REG 0 */
+	WCD934X_IRQ_MISC = 1,
+	WCD934X_IRQ_HPH_PA_OCPL_FAULT,
+	WCD934X_IRQ_HPH_PA_OCPR_FAULT,
+	WCD934X_IRQ_EAR_PA_OCP_FAULT,
+	WCD934X_IRQ_HPH_PA_CNPL_COMPLETE,
+	WCD934X_IRQ_HPH_PA_CNPR_COMPLETE,
+	WCD934X_IRQ_EAR_PA_CNP_COMPLETE,
+	/* INTR_REG 1 */
+	WCD934X_IRQ_MBHC_SW_DET,
+	WCD934X_IRQ_MBHC_ELECT_INS_REM_DET,
+	WCD934X_IRQ_MBHC_BUTTON_PRESS_DET,
+	WCD934X_IRQ_MBHC_BUTTON_RELEASE_DET,
+	WCD934X_IRQ_MBHC_ELECT_INS_REM_LEG_DET,
+	WCD934X_IRQ_RESERVED_0,
+	WCD934X_IRQ_RESERVED_1,
+	WCD934X_IRQ_RESERVED_2,
+	/* INTR_REG 2 */
+	WCD934X_IRQ_LINE_PA1_CNP_COMPLETE,
+	WCD934X_IRQ_LINE_PA2_CNP_COMPLETE,
+	WCD934X_IRQ_SLNQ_ANALOG_ERROR,
+	WCD934X_IRQ_RESERVED_3,
+	WCD934X_IRQ_SOUNDWIRE,
+	WCD934X_IRQ_VDD_DIG_RAMP_COMPLETE,
+	WCD934X_IRQ_RCO_ERROR,
+	WCD934X_IRQ_CPE_ERROR,
+	/* INTR_REG 3 */
+	WCD934X_IRQ_MAD_AUDIO,
+	WCD934X_IRQ_MAD_BEACON,
+	WCD934X_IRQ_MAD_ULTRASOUND,
+	WCD934X_IRQ_VBAT_ATTACK,
+	WCD934X_IRQ_VBAT_RESTORE,
+	WCD934X_IRQ_CPE1_INTR,
+	WCD934X_IRQ_RESERVED_4,
+	WCD934X_IRQ_SLNQ_DIGITAL,
+	WCD934X_NUM_IRQS,
+};
+
+/*
+ * Selects compander and smart boost settings
+ * for a given speaker mode
+ */
+enum {
+	WCD934X_SPKR_MODE_DEFAULT,
+	WCD934X_SPKR_MODE_1, /* COMP Gain = 12dB, Smartboost Max = 5.5V */
+};
+
+/*
+ * Rx path gain offsets
+ */
+enum {
+	WCD934X_RX_GAIN_OFFSET_M1P5_DB,
+	WCD934X_RX_GAIN_OFFSET_0_DB,
+};
+
+/*
+ * Dai data structure holds the
+ * dai specific info like rate,
+ * channel number etc.
+ */
+struct tavil_codec_dai_data {
+	u32 rate;
+	u32 *ch_num;
+	u32 ch_act;
+	u32 ch_tot;
+};
+
+/*
+ * Structure used to update codec
+ * register defaults after reset
+ */
+struct tavil_reg_mask_val {
+	u16 reg;
+	u8 mask;
+	u8 val;
+};
+
+extern void *tavil_get_afe_config(struct snd_soc_codec *codec,
+				  enum afe_config_type config_type);
+extern int tavil_cdc_mclk_enable(struct snd_soc_codec *codec, bool enable);
+extern int tavil_set_spkr_mode(struct snd_soc_codec *codec, int mode);
+extern int tavil_set_spkr_gain_offset(struct snd_soc_codec *codec, int offset);
+extern struct wcd_dsp_cntl *tavil_get_wcd_dsp_cntl(struct device *dev);
+extern int wcd934x_get_micb_vout_ctl_val(u32 micb_mv);
+extern int tavil_micbias_control(struct snd_soc_codec *codec,
+				 int micb_num,
+				 int req, bool is_dapm);
+extern int tavil_mbhc_micb_adjust_voltage(struct snd_soc_codec *codec,
+					  int req_volt,
+					  int micb_num);
+extern struct wcd934x_mbhc *tavil_soc_get_mbhc(struct snd_soc_codec *codec);
+extern int tavil_codec_enable_interp_clk(struct snd_soc_codec *codec,
+					 int event, int intp_idx);
+extern struct tavil_dsd_config *tavil_get_dsd_config(struct snd_soc_codec *);
+extern int tavil_codec_info_create_codec_entry(struct snd_info_entry *,
+					       struct snd_soc_codec *);
+#endif
diff -Nruw linux-4.4.115-fbx/sound/soc/codecs/wcd934x./wcd934x-mbhc.c linux-4.4.115-fbx/sound/soc/codecs/wcd934x/wcd934x-mbhc.c
--- linux-4.4.115-fbx/sound/soc/codecs/wcd934x./wcd934x-mbhc.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd934x/wcd934x-mbhc.c	2019-10-29 09:26:26.125227467 +0100
@@ -0,0 +1,1104 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/regmap.h>
+#include <linux/mfd/wcd9xxx/core.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx-irq.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
+#include <linux/mfd/wcd934x/registers.h>
+#include <linux/mfd/wcd9xxx/pdata.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include "wcd934x.h"
+#include "wcd934x-mbhc.h"
+#include "../wcdcal-hwdep.h"
+
+#define TAVIL_ZDET_SUPPORTED          true
+/* Z value defined in milliohm */
+#define TAVIL_ZDET_VAL_32             32000
+#define TAVIL_ZDET_VAL_400            400000
+#define TAVIL_ZDET_VAL_1200           1200000
+#define TAVIL_ZDET_VAL_100K           100000000
+/* Z floating defined in ohms */
+#define TAVIL_ZDET_FLOATING_IMPEDANCE 0x0FFFFFFE
+
+#define TAVIL_ZDET_NUM_MEASUREMENTS   900
+#define TAVIL_MBHC_GET_C1(c)          ((c & 0xC000) >> 14)
+#define TAVIL_MBHC_GET_X1(x)          (x & 0x3FFF)
+/* Z value compared in milliOhm */
+#define TAVIL_MBHC_IS_SECOND_RAMP_REQUIRED(z) ((z > 400000) || (z < 32000))
+#define TAVIL_MBHC_ZDET_CONST         (86 * 16384)
+#define TAVIL_MBHC_MOISTURE_RREF      R_24_KOHM
+
+static struct wcd_mbhc_register
+	wcd_mbhc_registers[WCD_MBHC_REG_FUNC_MAX] = {
+	WCD_MBHC_REGISTER("WCD_MBHC_L_DET_EN",
+			  WCD934X_ANA_MBHC_MECH, 0x80, 7, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_GND_DET_EN",
+			  WCD934X_ANA_MBHC_MECH, 0x40, 6, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_MECH_DETECTION_TYPE",
+			  WCD934X_ANA_MBHC_MECH, 0x20, 5, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_MIC_CLAMP_CTL",
+			  WCD934X_MBHC_NEW_PLUG_DETECT_CTL, 0x30, 4, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_ELECT_DETECTION_TYPE",
+			  WCD934X_ANA_MBHC_ELECT, 0x08, 3, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HS_L_DET_PULL_UP_CTRL",
+			  WCD934X_MBHC_NEW_PLUG_DETECT_CTL, 0xC0, 6, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HS_L_DET_PULL_UP_COMP_CTRL",
+			  WCD934X_ANA_MBHC_MECH, 0x04, 2, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HPHL_PLUG_TYPE",
+			  WCD934X_ANA_MBHC_MECH, 0x10, 4, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_GND_PLUG_TYPE",
+			  WCD934X_ANA_MBHC_MECH, 0x08, 3, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_SW_HPH_LP_100K_TO_GND",
+			  WCD934X_ANA_MBHC_MECH, 0x01, 0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_ELECT_SCHMT_ISRC",
+			  WCD934X_ANA_MBHC_ELECT, 0x06, 1, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_FSM_EN",
+			  WCD934X_ANA_MBHC_ELECT, 0x80, 7, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_INSREM_DBNC",
+			  WCD934X_MBHC_NEW_PLUG_DETECT_CTL, 0x0F, 0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_BTN_DBNC",
+			  WCD934X_MBHC_NEW_CTL_1, 0x03, 0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HS_VREF",
+			  WCD934X_MBHC_NEW_CTL_2, 0x03, 0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HS_COMP_RESULT",
+			  WCD934X_ANA_MBHC_RESULT_3, 0x08, 3, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_MIC_SCHMT_RESULT",
+			  WCD934X_ANA_MBHC_RESULT_3, 0x20, 5, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HPHL_SCHMT_RESULT",
+			  WCD934X_ANA_MBHC_RESULT_3, 0x80, 7, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HPHR_SCHMT_RESULT",
+			  WCD934X_ANA_MBHC_RESULT_3, 0x40, 6, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_OCP_FSM_EN",
+			  WCD934X_HPH_OCP_CTL, 0x10, 4, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_BTN_RESULT",
+			  WCD934X_ANA_MBHC_RESULT_3, 0x07, 0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_BTN_ISRC_CTL",
+			  WCD934X_ANA_MBHC_ELECT, 0x70, 4, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_ELECT_RESULT",
+			  WCD934X_ANA_MBHC_RESULT_3, 0xFF, 0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_MICB_CTRL",
+			  WCD934X_ANA_MICB2, 0xC0, 6, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HPH_CNP_WG_TIME",
+			  WCD934X_HPH_CNP_WG_TIME, 0xFF, 0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HPHR_PA_EN",
+			  WCD934X_ANA_HPH, 0x40, 6, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HPHL_PA_EN",
+			  WCD934X_ANA_HPH, 0x80, 7, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HPH_PA_EN",
+			  WCD934X_ANA_HPH, 0xC0, 6, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_SWCH_LEVEL_REMOVE",
+			  WCD934X_ANA_MBHC_RESULT_3, 0x10, 4, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_PULLDOWN_CTRL",
+			  0, 0, 0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_ANC_DET_EN",
+			  WCD934X_ANA_MBHC_ZDET, 0x01, 0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_FSM_STATUS",
+			  WCD934X_MBHC_STATUS_SPARE_1, 0x01, 0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_MUX_CTL",
+			  WCD934X_MBHC_NEW_CTL_2, 0x70, 4, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HPHL_OCP_DET_EN",
+			  WCD934X_HPH_L_TEST, 0x01, 0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HPHR_OCP_DET_EN",
+			  WCD934X_HPH_R_TEST, 0x01, 0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HPHL_OCP_STATUS",
+			  WCD934X_INTR_PIN1_STATUS0, 0x04, 2, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HPHR_OCP_STATUS",
+			  WCD934X_INTR_PIN1_STATUS0, 0x08, 3, 0),
+};
+
+static const struct wcd_mbhc_intr intr_ids = {
+	.mbhc_sw_intr =  WCD934X_IRQ_MBHC_SW_DET,
+	.mbhc_btn_press_intr = WCD934X_IRQ_MBHC_BUTTON_PRESS_DET,
+	.mbhc_btn_release_intr = WCD934X_IRQ_MBHC_BUTTON_RELEASE_DET,
+	.mbhc_hs_ins_intr = WCD934X_IRQ_MBHC_ELECT_INS_REM_LEG_DET,
+	.mbhc_hs_rem_intr = WCD934X_IRQ_MBHC_ELECT_INS_REM_DET,
+	.hph_left_ocp = WCD934X_IRQ_HPH_PA_OCPL_FAULT,
+	.hph_right_ocp = WCD934X_IRQ_HPH_PA_OCPR_FAULT,
+};
+
+
+static char on_demand_supply_name[][MAX_ON_DEMAND_SUPPLY_NAME_LENGTH] = {
+	"cdc-vdd-mic-bias",
+};
+
+struct tavil_mbhc_zdet_param {
+	u16 ldo_ctl;
+	u16 noff;
+	u16 nshift;
+	u16 btn5;
+	u16 btn6;
+	u16 btn7;
+};
+
+static int tavil_mbhc_request_irq(struct snd_soc_codec *codec,
+				  int irq, irq_handler_t handler,
+				  const char *name, void *data)
+{
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+	struct wcd9xxx_core_resource *core_res =
+				&wcd9xxx->core_res;
+
+	return wcd9xxx_request_irq(core_res, irq, handler, name, data);
+}
+
+static void tavil_mbhc_irq_control(struct snd_soc_codec *codec,
+				   int irq, bool enable)
+{
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+	struct wcd9xxx_core_resource *core_res =
+				&wcd9xxx->core_res;
+	if (enable)
+		wcd9xxx_enable_irq(core_res, irq);
+	else
+		wcd9xxx_disable_irq(core_res, irq);
+}
+
+static int tavil_mbhc_free_irq(struct snd_soc_codec *codec,
+			       int irq, void *data)
+{
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+	struct wcd9xxx_core_resource *core_res =
+				&wcd9xxx->core_res;
+
+	wcd9xxx_free_irq(core_res, irq, data);
+	return 0;
+}
+
+static void tavil_mbhc_clk_setup(struct snd_soc_codec *codec,
+				 bool enable)
+{
+	if (enable)
+		snd_soc_update_bits(codec, WCD934X_MBHC_NEW_CTL_1,
+				    0x80, 0x80);
+	else
+		snd_soc_update_bits(codec, WCD934X_MBHC_NEW_CTL_1,
+				    0x80, 0x00);
+}
+
+static int tavil_mbhc_btn_to_num(struct snd_soc_codec *codec)
+{
+	return snd_soc_read(codec, WCD934X_ANA_MBHC_RESULT_3) & 0x7;
+}
+
+static int tavil_enable_ext_mb_source(struct wcd_mbhc *mbhc,
+				      bool turn_on)
+{
+	struct wcd934x_mbhc *wcd934x_mbhc;
+	struct snd_soc_codec *codec = mbhc->codec;
+	struct wcd934x_on_demand_supply *supply;
+	int ret = 0;
+
+	wcd934x_mbhc = container_of(mbhc, struct wcd934x_mbhc, wcd_mbhc);
+
+	supply =  &wcd934x_mbhc->on_demand_list[WCD934X_ON_DEMAND_MICBIAS];
+	if (!supply->supply) {
+		dev_dbg(codec->dev, "%s: warning supply not present ond for %s\n",
+				__func__, "onDemand Micbias");
+		return ret;
+	}
+
+	dev_dbg(codec->dev, "%s turn_on: %d count: %d\n", __func__, turn_on,
+		supply->ondemand_supply_count);
+
+	if (turn_on) {
+		if (!(supply->ondemand_supply_count)) {
+			ret = snd_soc_dapm_force_enable_pin(
+				snd_soc_codec_get_dapm(codec),
+				"MICBIAS_REGULATOR");
+			snd_soc_dapm_sync(snd_soc_codec_get_dapm(codec));
+		}
+		supply->ondemand_supply_count++;
+	} else {
+		if (supply->ondemand_supply_count > 0)
+			supply->ondemand_supply_count--;
+		if (!(supply->ondemand_supply_count)) {
+			ret = snd_soc_dapm_disable_pin(
+				snd_soc_codec_get_dapm(codec),
+				"MICBIAS_REGULATOR");
+		snd_soc_dapm_sync(snd_soc_codec_get_dapm(codec));
+		}
+	}
+
+	if (ret)
+		dev_err(codec->dev, "%s: Failed to %s external micbias source\n",
+			__func__, turn_on ? "enable" : "disabled");
+	else
+		dev_dbg(codec->dev, "%s: %s external micbias source\n",
+			__func__, turn_on ? "Enabled" : "Disabled");
+
+	return ret;
+}
+
+static void tavil_mbhc_mbhc_bias_control(struct snd_soc_codec *codec,
+					 bool enable)
+{
+	if (enable)
+		snd_soc_update_bits(codec, WCD934X_ANA_MBHC_ELECT,
+				    0x01, 0x01);
+	else
+		snd_soc_update_bits(codec, WCD934X_ANA_MBHC_ELECT,
+				    0x01, 0x00);
+}
+
+static void tavil_mbhc_program_btn_thr(struct snd_soc_codec *codec,
+				       s16 *btn_low, s16 *btn_high,
+				       int num_btn, bool is_micbias)
+{
+	int i;
+	int vth;
+
+	if (num_btn > WCD_MBHC_DEF_BUTTONS) {
+		dev_err(codec->dev, "%s: invalid number of buttons: %d\n",
+			__func__, num_btn);
+		return;
+	}
+	/*
+	 * Tavil just needs one set of thresholds for button detection
+	 * due to micbias voltage ramp to pullup upon button press. So
+	 * btn_low and is_micbias are ignored and always program button
+	 * thresholds using btn_high.
+	 */
+	for (i = 0; i < num_btn; i++) {
+		vth = ((btn_high[i] * 2) / 25) & 0x3F;
+		snd_soc_update_bits(codec, WCD934X_ANA_MBHC_BTN0 + i,
+				    0xFC, vth << 2);
+		dev_dbg(codec->dev, "%s: btn_high[%d]: %d, vth: %d\n",
+			__func__, i, btn_high[i], vth);
+	}
+}
+
+static bool tavil_mbhc_lock_sleep(struct wcd_mbhc *mbhc, bool lock)
+{
+	struct snd_soc_codec *codec = mbhc->codec;
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+	struct wcd9xxx_core_resource *core_res =
+				&wcd9xxx->core_res;
+	bool ret = 0;
+
+	if (lock)
+		ret = wcd9xxx_lock_sleep(core_res);
+	else
+		wcd9xxx_unlock_sleep(core_res);
+
+	return ret;
+}
+
+static int tavil_mbhc_register_notifier(struct wcd_mbhc *mbhc,
+					struct notifier_block *nblock,
+					bool enable)
+{
+	struct wcd934x_mbhc *wcd934x_mbhc;
+
+	wcd934x_mbhc = container_of(mbhc, struct wcd934x_mbhc, wcd_mbhc);
+
+	if (enable)
+		return blocking_notifier_chain_register(&wcd934x_mbhc->notifier,
+							nblock);
+	else
+		return blocking_notifier_chain_unregister(
+				&wcd934x_mbhc->notifier, nblock);
+}
+
+static bool tavil_mbhc_micb_en_status(struct wcd_mbhc *mbhc, int micb_num)
+{
+	u8 val;
+
+	if (micb_num == MIC_BIAS_2) {
+		val = (snd_soc_read(mbhc->codec, WCD934X_ANA_MICB2) >> 6);
+		if (val == 0x01)
+			return true;
+	}
+	return false;
+}
+
+static bool tavil_mbhc_hph_pa_on_status(struct snd_soc_codec *codec)
+{
+	return (snd_soc_read(codec, WCD934X_ANA_HPH) & 0xC0) ? true : false;
+}
+
+static void tavil_mbhc_hph_l_pull_up_control(
+		struct snd_soc_codec *codec,
+		enum mbhc_hs_pullup_iref pull_up_cur)
+{
+	/* Default pull up current to 2uA */
+	if (pull_up_cur < I_OFF || pull_up_cur > I_3P0_UA ||
+	    pull_up_cur == I_DEFAULT)
+		pull_up_cur = I_2P0_UA;
+
+	dev_dbg(codec->dev, "%s: HS pull up current:%d\n",
+		__func__, pull_up_cur);
+
+	snd_soc_update_bits(codec, WCD934X_MBHC_NEW_PLUG_DETECT_CTL,
+			    0xC0, pull_up_cur << 6);
+}
+
+static int tavil_mbhc_request_micbias(struct snd_soc_codec *codec,
+				      int micb_num, int req)
+{
+	int ret;
+
+	/*
+	 * If micbias is requested, make sure that there
+	 * is vote to enable mclk
+	 */
+	if (req == MICB_ENABLE)
+		tavil_cdc_mclk_enable(codec, true);
+
+	ret = tavil_micbias_control(codec, micb_num, req, false);
+
+	/*
+	 * Release vote for mclk while requesting for
+	 * micbias disable
+	 */
+	if (req == MICB_DISABLE)
+		tavil_cdc_mclk_enable(codec, false);
+
+	return ret;
+}
+
+static void tavil_mbhc_micb_ramp_control(struct snd_soc_codec *codec,
+					 bool enable)
+{
+	if (enable) {
+		snd_soc_update_bits(codec, WCD934X_ANA_MICB2_RAMP,
+				    0x1C, 0x0C);
+		snd_soc_update_bits(codec, WCD934X_ANA_MICB2_RAMP,
+				    0x80, 0x80);
+	} else {
+		snd_soc_update_bits(codec, WCD934X_ANA_MICB2_RAMP,
+				    0x80, 0x00);
+		snd_soc_update_bits(codec, WCD934X_ANA_MICB2_RAMP,
+				    0x1C, 0x00);
+	}
+}
+
+static struct firmware_cal *tavil_get_hwdep_fw_cal(struct wcd_mbhc *mbhc,
+						   enum wcd_cal_type type)
+{
+	struct wcd934x_mbhc *wcd934x_mbhc;
+	struct firmware_cal *hwdep_cal;
+	struct snd_soc_codec *codec = mbhc->codec;
+
+	wcd934x_mbhc = container_of(mbhc, struct wcd934x_mbhc, wcd_mbhc);
+
+	if (!codec) {
+		pr_err("%s: NULL codec pointer\n", __func__);
+		return NULL;
+	}
+	hwdep_cal = wcdcal_get_fw_cal(wcd934x_mbhc->fw_data, type);
+	if (!hwdep_cal)
+		dev_err(codec->dev, "%s: cal not sent by %d\n",
+			__func__, type);
+
+	return hwdep_cal;
+}
+
+static int tavil_mbhc_micb_ctrl_threshold_mic(struct snd_soc_codec *codec,
+					      int micb_num, bool req_en)
+{
+	struct wcd9xxx_pdata *pdata = dev_get_platdata(codec->dev->parent);
+	int rc, micb_mv;
+
+	if (micb_num != MIC_BIAS_2)
+		return -EINVAL;
+
+	/*
+	 * If device tree micbias level is already above the minimum
+	 * voltage needed to detect threshold microphone, then do
+	 * not change the micbias, just return.
+	 */
+	if (pdata->micbias.micb2_mv >= WCD_MBHC_THR_HS_MICB_MV)
+		return 0;
+
+	micb_mv = req_en ? WCD_MBHC_THR_HS_MICB_MV : pdata->micbias.micb2_mv;
+
+	rc = tavil_mbhc_micb_adjust_voltage(codec, micb_mv, MIC_BIAS_2);
+
+	return rc;
+}
+
+static inline void tavil_mbhc_get_result_params(struct wcd9xxx *wcd9xxx,
+						s16 *d1_a, u16 noff,
+						int32_t *zdet)
+{
+	int i;
+	int val, val1;
+	s16 c1;
+	s32 x1, d1;
+	int32_t denom;
+	int minCode_param[] = {
+			3277, 1639, 820, 410, 205, 103, 52, 26
+	};
+
+	regmap_update_bits(wcd9xxx->regmap, WCD934X_ANA_MBHC_ZDET, 0x20, 0x20);
+	for (i = 0; i < TAVIL_ZDET_NUM_MEASUREMENTS; i++) {
+		regmap_read(wcd9xxx->regmap, WCD934X_ANA_MBHC_RESULT_2, &val);
+		if (val & 0x80)
+			break;
+	}
+	val = val << 0x8;
+	regmap_read(wcd9xxx->regmap, WCD934X_ANA_MBHC_RESULT_1, &val1);
+	val |= val1;
+	regmap_update_bits(wcd9xxx->regmap, WCD934X_ANA_MBHC_ZDET, 0x20, 0x00);
+	x1 = TAVIL_MBHC_GET_X1(val);
+	c1 = TAVIL_MBHC_GET_C1(val);
+	/* If ramp is not complete, give additional 5ms */
+	if ((c1 < 2) && x1)
+		usleep_range(5000, 5050);
+
+	if (!c1 || !x1) {
+		dev_dbg(wcd9xxx->dev,
+			"%s: Impedance detect ramp error, c1=%d, x1=0x%x\n",
+			__func__, c1, x1);
+		goto ramp_down;
+	}
+	d1 = d1_a[c1];
+	denom = (x1 * d1) - (1 << (14 - noff));
+	if (denom > 0)
+		*zdet = (TAVIL_MBHC_ZDET_CONST * 1000) / denom;
+	else if (x1 < minCode_param[noff])
+		*zdet = TAVIL_ZDET_FLOATING_IMPEDANCE;
+
+	dev_dbg(wcd9xxx->dev, "%s: d1=%d, c1=%d, x1=0x%x, z_val=%d(milliOhm)\n",
+		__func__, d1, c1, x1, *zdet);
+ramp_down:
+	i = 0;
+	while (x1) {
+		regmap_bulk_read(wcd9xxx->regmap,
+				 WCD934X_ANA_MBHC_RESULT_1, (u8 *)&val, 2);
+		x1 = TAVIL_MBHC_GET_X1(val);
+		i++;
+		if (i == TAVIL_ZDET_NUM_MEASUREMENTS)
+			break;
+	}
+}
+
+static void tavil_mbhc_zdet_ramp(struct snd_soc_codec *codec,
+				 struct tavil_mbhc_zdet_param *zdet_param,
+				 int32_t *zl, int32_t *zr, s16 *d1_a)
+{
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+	int32_t zdet = 0;
+
+	snd_soc_update_bits(codec, WCD934X_MBHC_NEW_ZDET_ANA_CTL, 0x70,
+			    zdet_param->ldo_ctl << 4);
+	snd_soc_update_bits(codec, WCD934X_ANA_MBHC_BTN5, 0xFC,
+			    zdet_param->btn5);
+	snd_soc_update_bits(codec, WCD934X_ANA_MBHC_BTN6, 0xFC,
+			    zdet_param->btn6);
+	snd_soc_update_bits(codec, WCD934X_ANA_MBHC_BTN7, 0xFC,
+			    zdet_param->btn7);
+	snd_soc_update_bits(codec, WCD934X_MBHC_NEW_ZDET_ANA_CTL, 0x0F,
+			    zdet_param->noff);
+	snd_soc_update_bits(codec, WCD934X_MBHC_NEW_ZDET_RAMP_CTL, 0x0F,
+			    zdet_param->nshift);
+
+	if (!zl)
+		goto z_right;
+	/* Start impedance measurement for HPH_L */
+	regmap_update_bits(wcd9xxx->regmap,
+			   WCD934X_ANA_MBHC_ZDET, 0x80, 0x80);
+	dev_dbg(wcd9xxx->dev, "%s: ramp for HPH_L, noff = %d\n",
+		__func__, zdet_param->noff);
+	tavil_mbhc_get_result_params(wcd9xxx, d1_a, zdet_param->noff, &zdet);
+	regmap_update_bits(wcd9xxx->regmap,
+			   WCD934X_ANA_MBHC_ZDET, 0x80, 0x00);
+
+	*zl = zdet;
+
+z_right:
+	if (!zr)
+		return;
+	/* Start impedance measurement for HPH_R */
+	regmap_update_bits(wcd9xxx->regmap,
+			   WCD934X_ANA_MBHC_ZDET, 0x40, 0x40);
+	dev_dbg(wcd9xxx->dev, "%s: ramp for HPH_R, noff = %d\n",
+		__func__, zdet_param->noff);
+	tavil_mbhc_get_result_params(wcd9xxx, d1_a, zdet_param->noff, &zdet);
+	regmap_update_bits(wcd9xxx->regmap,
+			   WCD934X_ANA_MBHC_ZDET, 0x40, 0x00);
+
+	*zr = zdet;
+}
+
+static inline void tavil_wcd_mbhc_qfuse_cal(struct snd_soc_codec *codec,
+					    int32_t *z_val, int flag_l_r)
+{
+	s16 q1;
+	int q1_cal;
+
+	if (*z_val < (TAVIL_ZDET_VAL_400/1000))
+		q1 = snd_soc_read(codec,
+			WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT1 + (2 * flag_l_r));
+	else
+		q1 = snd_soc_read(codec,
+			WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT2 + (2 * flag_l_r));
+	if (q1 & 0x80)
+		q1_cal = (10000 - ((q1 & 0x7F) * 25));
+	else
+		q1_cal = (10000 + (q1 * 25));
+	if (q1_cal > 0)
+		*z_val = ((*z_val) * 10000) / q1_cal;
+}
+
+static void tavil_wcd_mbhc_calc_impedance(struct wcd_mbhc *mbhc, uint32_t *zl,
+					  uint32_t *zr)
+{
+	struct snd_soc_codec *codec = mbhc->codec;
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+	s16 reg0, reg1, reg2, reg3, reg4;
+	int32_t z1L, z1R, z1Ls;
+	int zMono, z_diff1, z_diff2;
+	bool is_fsm_disable = false;
+	struct tavil_mbhc_zdet_param zdet_param[] = {
+		{4, 0, 4, 0x08, 0x14, 0x18}, /* < 32ohm */
+		{2, 0, 3, 0x18, 0x7C, 0x90}, /* 32ohm < Z < 400ohm */
+		{1, 4, 5, 0x18, 0x7C, 0x90}, /* 400ohm < Z < 1200ohm */
+		{1, 6, 7, 0x18, 0x7C, 0x90}, /* >1200ohm */
+	};
+	struct tavil_mbhc_zdet_param *zdet_param_ptr = NULL;
+	s16 d1_a[][4] = {
+		{0, 30, 90, 30},
+		{0, 30, 30, 5},
+		{0, 30, 30, 5},
+		{0, 30, 30, 5},
+	};
+	s16 *d1 = NULL;
+
+	WCD_MBHC_RSC_ASSERT_LOCKED(mbhc);
+
+	reg0 = snd_soc_read(codec, WCD934X_ANA_MBHC_BTN5);
+	reg1 = snd_soc_read(codec, WCD934X_ANA_MBHC_BTN6);
+	reg2 = snd_soc_read(codec, WCD934X_ANA_MBHC_BTN7);
+	reg3 = snd_soc_read(codec, WCD934X_MBHC_CTL_CLK);
+	reg4 = snd_soc_read(codec, WCD934X_MBHC_NEW_ZDET_ANA_CTL);
+
+	if (snd_soc_read(codec, WCD934X_ANA_MBHC_ELECT) & 0x80) {
+		is_fsm_disable = true;
+		regmap_update_bits(wcd9xxx->regmap,
+				   WCD934X_ANA_MBHC_ELECT, 0x80, 0x00);
+	}
+
+	/* For NO-jack, disable L_DET_EN before Z-det measurements */
+	if (mbhc->hphl_swh)
+		regmap_update_bits(wcd9xxx->regmap,
+				   WCD934X_ANA_MBHC_MECH, 0x80, 0x00);
+
+	/* Turn off 100k pull down on HPHL */
+	regmap_update_bits(wcd9xxx->regmap,
+			   WCD934X_ANA_MBHC_MECH, 0x01, 0x00);
+
+	/* First get impedance on Left */
+	d1 = d1_a[1];
+	zdet_param_ptr = &zdet_param[1];
+	tavil_mbhc_zdet_ramp(codec, zdet_param_ptr, &z1L, NULL, d1);
+
+	if (!TAVIL_MBHC_IS_SECOND_RAMP_REQUIRED(z1L))
+		goto left_ch_impedance;
+
+	/* Second ramp for left ch */
+	if (z1L < TAVIL_ZDET_VAL_32) {
+		zdet_param_ptr = &zdet_param[0];
+		d1 = d1_a[0];
+	} else if ((z1L > TAVIL_ZDET_VAL_400) && (z1L <= TAVIL_ZDET_VAL_1200)) {
+		zdet_param_ptr = &zdet_param[2];
+		d1 = d1_a[2];
+	} else if (z1L > TAVIL_ZDET_VAL_1200) {
+		zdet_param_ptr = &zdet_param[3];
+		d1 = d1_a[3];
+	}
+	tavil_mbhc_zdet_ramp(codec, zdet_param_ptr, &z1L, NULL, d1);
+
+left_ch_impedance:
+	if ((z1L == TAVIL_ZDET_FLOATING_IMPEDANCE) ||
+		(z1L > TAVIL_ZDET_VAL_100K)) {
+		*zl = TAVIL_ZDET_FLOATING_IMPEDANCE;
+		zdet_param_ptr = &zdet_param[1];
+		d1 = d1_a[1];
+	} else {
+		*zl = z1L/1000;
+		tavil_wcd_mbhc_qfuse_cal(codec, zl, 0);
+	}
+	dev_dbg(codec->dev, "%s: impedance on HPH_L = %d(ohms)\n",
+		__func__, *zl);
+
+	/* Start of right impedance ramp and calculation */
+	tavil_mbhc_zdet_ramp(codec, zdet_param_ptr, NULL, &z1R, d1);
+	if (TAVIL_MBHC_IS_SECOND_RAMP_REQUIRED(z1R)) {
+		if (((z1R > TAVIL_ZDET_VAL_1200) &&
+			(zdet_param_ptr->noff == 0x6)) ||
+			((*zl) != TAVIL_ZDET_FLOATING_IMPEDANCE))
+			goto right_ch_impedance;
+		/* Second ramp for right ch */
+		if (z1R < TAVIL_ZDET_VAL_32) {
+			zdet_param_ptr = &zdet_param[0];
+			d1 = d1_a[0];
+		} else if ((z1R > TAVIL_ZDET_VAL_400) &&
+			(z1R <= TAVIL_ZDET_VAL_1200)) {
+			zdet_param_ptr = &zdet_param[2];
+			d1 = d1_a[2];
+		} else if (z1R > TAVIL_ZDET_VAL_1200) {
+			zdet_param_ptr = &zdet_param[3];
+			d1 = d1_a[3];
+		}
+		tavil_mbhc_zdet_ramp(codec, zdet_param_ptr, NULL, &z1R, d1);
+	}
+right_ch_impedance:
+	if ((z1R == TAVIL_ZDET_FLOATING_IMPEDANCE) ||
+		(z1R > TAVIL_ZDET_VAL_100K)) {
+		*zr = TAVIL_ZDET_FLOATING_IMPEDANCE;
+	} else {
+		*zr = z1R/1000;
+		tavil_wcd_mbhc_qfuse_cal(codec, zr, 1);
+	}
+	dev_dbg(codec->dev, "%s: impedance on HPH_R = %d(ohms)\n",
+		__func__, *zr);
+
+	/* Mono/stereo detection */
+	if ((*zl == TAVIL_ZDET_FLOATING_IMPEDANCE) &&
+		(*zr == TAVIL_ZDET_FLOATING_IMPEDANCE)) {
+		dev_dbg(codec->dev,
+			"%s: plug type is invalid or extension cable\n",
+			__func__);
+		goto zdet_complete;
+	}
+	if ((*zl == TAVIL_ZDET_FLOATING_IMPEDANCE) ||
+	    (*zr == TAVIL_ZDET_FLOATING_IMPEDANCE) ||
+	    ((*zl < WCD_MONO_HS_MIN_THR) && (*zr > WCD_MONO_HS_MIN_THR)) ||
+	    ((*zl > WCD_MONO_HS_MIN_THR) && (*zr < WCD_MONO_HS_MIN_THR))) {
+		dev_dbg(codec->dev,
+			"%s: Mono plug type with one ch floating or shorted to GND\n",
+			__func__);
+		mbhc->hph_type = WCD_MBHC_HPH_MONO;
+		goto zdet_complete;
+	}
+	snd_soc_update_bits(codec, WCD934X_HPH_R_ATEST, 0x02, 0x02);
+	snd_soc_update_bits(codec, WCD934X_HPH_PA_CTL2, 0x40, 0x01);
+	if (*zl < (TAVIL_ZDET_VAL_32/1000))
+		tavil_mbhc_zdet_ramp(codec, &zdet_param[0], &z1Ls, NULL, d1);
+	else
+		tavil_mbhc_zdet_ramp(codec, &zdet_param[1], &z1Ls, NULL, d1);
+	snd_soc_update_bits(codec, WCD934X_HPH_PA_CTL2, 0x40, 0x00);
+	snd_soc_update_bits(codec, WCD934X_HPH_R_ATEST, 0x02, 0x00);
+	z1Ls /= 1000;
+	tavil_wcd_mbhc_qfuse_cal(codec, &z1Ls, 0);
+	/* Parallel of left Z and 9 ohm pull down resistor */
+	zMono = ((*zl) * 9) / ((*zl) + 9);
+	z_diff1 = (z1Ls > zMono) ? (z1Ls - zMono) : (zMono - z1Ls);
+	z_diff2 = ((*zl) > z1Ls) ? ((*zl) - z1Ls) : (z1Ls - (*zl));
+	if ((z_diff1 * (*zl + z1Ls)) > (z_diff2 * (z1Ls + zMono))) {
+		dev_dbg(codec->dev, "%s: stereo plug type detected\n",
+			__func__);
+		mbhc->hph_type = WCD_MBHC_HPH_STEREO;
+	} else {
+		dev_dbg(codec->dev, "%s: MONO plug type detected\n",
+			__func__);
+		mbhc->hph_type = WCD_MBHC_HPH_MONO;
+	}
+
+zdet_complete:
+	snd_soc_write(codec, WCD934X_ANA_MBHC_BTN5, reg0);
+	snd_soc_write(codec, WCD934X_ANA_MBHC_BTN6, reg1);
+	snd_soc_write(codec, WCD934X_ANA_MBHC_BTN7, reg2);
+	/* Turn on 100k pull down on HPHL */
+	regmap_update_bits(wcd9xxx->regmap,
+			   WCD934X_ANA_MBHC_MECH, 0x01, 0x01);
+
+	/* For NO-jack, re-enable L_DET_EN after Z-det measurements */
+	if (mbhc->hphl_swh)
+		regmap_update_bits(wcd9xxx->regmap,
+				   WCD934X_ANA_MBHC_MECH, 0x80, 0x80);
+
+	snd_soc_write(codec, WCD934X_MBHC_NEW_ZDET_ANA_CTL, reg4);
+	snd_soc_write(codec, WCD934X_MBHC_CTL_CLK, reg3);
+	if (is_fsm_disable)
+		regmap_update_bits(wcd9xxx->regmap,
+				   WCD934X_ANA_MBHC_ELECT, 0x80, 0x80);
+}
+
+static void tavil_mbhc_gnd_det_ctrl(struct snd_soc_codec *codec, bool enable)
+{
+	if (enable) {
+		snd_soc_update_bits(codec, WCD934X_ANA_MBHC_MECH,
+				    0x02, 0x02);
+		snd_soc_update_bits(codec, WCD934X_ANA_MBHC_MECH,
+				    0x40, 0x40);
+	} else {
+		snd_soc_update_bits(codec, WCD934X_ANA_MBHC_MECH,
+				    0x40, 0x00);
+		snd_soc_update_bits(codec, WCD934X_ANA_MBHC_MECH,
+				    0x02, 0x00);
+	}
+}
+
+static void tavil_mbhc_hph_pull_down_ctrl(struct snd_soc_codec *codec,
+					  bool enable)
+{
+	if (enable) {
+		snd_soc_update_bits(codec, WCD934X_HPH_PA_CTL2,
+				    0x40, 0x40);
+		snd_soc_update_bits(codec, WCD934X_HPH_PA_CTL2,
+				    0x10, 0x10);
+	} else {
+		snd_soc_update_bits(codec, WCD934X_HPH_PA_CTL2,
+				    0x40, 0x00);
+		snd_soc_update_bits(codec, WCD934X_HPH_PA_CTL2,
+				    0x10, 0x00);
+	}
+}
+static void tavil_mbhc_moisture_config(struct wcd_mbhc *mbhc)
+{
+	struct snd_soc_codec *codec = mbhc->codec;
+
+	if ((mbhc->moist_rref == R_OFF) ||
+	    (mbhc->mbhc_cfg->enable_usbc_analog)) {
+		snd_soc_update_bits(codec, WCD934X_MBHC_NEW_CTL_2,
+				    0x0C, R_OFF << 2);
+		return;
+	}
+
+	/* Donot enable moisture detection if jack type is NC */
+	if (!mbhc->hphl_swh) {
+		dev_dbg(codec->dev, "%s: disable moisture detection for NC\n",
+			__func__);
+		snd_soc_update_bits(codec, WCD934X_MBHC_NEW_CTL_2,
+				    0x0C, R_OFF << 2);
+		return;
+	}
+
+	snd_soc_update_bits(codec, WCD934X_MBHC_NEW_CTL_2,
+			    0x0C, mbhc->moist_rref << 2);
+}
+
+static bool tavil_hph_register_recovery(struct wcd_mbhc *mbhc)
+{
+	struct snd_soc_codec *codec = mbhc->codec;
+	struct wcd934x_mbhc *wcd934x_mbhc = tavil_soc_get_mbhc(codec);
+
+	if (!wcd934x_mbhc)
+		return false;
+
+	wcd934x_mbhc->is_hph_recover = false;
+	snd_soc_dapm_force_enable_pin(snd_soc_codec_get_dapm(codec),
+				      "RESET_HPH_REGISTERS");
+	snd_soc_dapm_sync(snd_soc_codec_get_dapm(codec));
+
+	snd_soc_dapm_disable_pin(snd_soc_codec_get_dapm(codec),
+				 "RESET_HPH_REGISTERS");
+	snd_soc_dapm_sync(snd_soc_codec_get_dapm(codec));
+
+	return wcd934x_mbhc->is_hph_recover;
+}
+
+static void tavil_update_anc_state(struct snd_soc_codec *codec, bool enable,
+				   int anc_num)
+{
+	if (enable)
+		snd_soc_update_bits(codec, WCD934X_CDC_RX1_RX_PATH_CFG0 +
+				(20 * anc_num), 0x10, 0x10);
+	else
+		snd_soc_update_bits(codec, WCD934X_CDC_RX1_RX_PATH_CFG0 +
+				(20 * anc_num), 0x10, 0x00);
+}
+
+static bool tavil_is_anc_on(struct wcd_mbhc *mbhc)
+{
+	bool anc_on = false;
+	u16 ancl, ancr;
+
+	ancl =
+	(snd_soc_read(mbhc->codec, WCD934X_CDC_RX1_RX_PATH_CFG0)) & 0x10;
+	ancr =
+	(snd_soc_read(mbhc->codec, WCD934X_CDC_RX2_RX_PATH_CFG0)) & 0x10;
+
+	anc_on = !!(ancl | ancr);
+
+	return anc_on;
+}
+
+static const struct wcd_mbhc_cb mbhc_cb = {
+	.request_irq = tavil_mbhc_request_irq,
+	.irq_control = tavil_mbhc_irq_control,
+	.free_irq = tavil_mbhc_free_irq,
+	.clk_setup = tavil_mbhc_clk_setup,
+	.map_btn_code_to_num = tavil_mbhc_btn_to_num,
+	.enable_mb_source = tavil_enable_ext_mb_source,
+	.mbhc_bias = tavil_mbhc_mbhc_bias_control,
+	.set_btn_thr = tavil_mbhc_program_btn_thr,
+	.lock_sleep = tavil_mbhc_lock_sleep,
+	.register_notifier = tavil_mbhc_register_notifier,
+	.micbias_enable_status = tavil_mbhc_micb_en_status,
+	.hph_pa_on_status = tavil_mbhc_hph_pa_on_status,
+	.hph_pull_up_control = tavil_mbhc_hph_l_pull_up_control,
+	.mbhc_micbias_control = tavil_mbhc_request_micbias,
+	.mbhc_micb_ramp_control = tavil_mbhc_micb_ramp_control,
+	.get_hwdep_fw_cal = tavil_get_hwdep_fw_cal,
+	.mbhc_micb_ctrl_thr_mic = tavil_mbhc_micb_ctrl_threshold_mic,
+	.compute_impedance = tavil_wcd_mbhc_calc_impedance,
+	.mbhc_gnd_det_ctrl = tavil_mbhc_gnd_det_ctrl,
+	.hph_pull_down_ctrl = tavil_mbhc_hph_pull_down_ctrl,
+	.mbhc_moisture_config = tavil_mbhc_moisture_config,
+	.hph_register_recovery = tavil_hph_register_recovery,
+	.update_anc_state = tavil_update_anc_state,
+	.is_anc_on = tavil_is_anc_on,
+};
+
+static struct regulator *tavil_codec_find_ondemand_regulator(
+		struct snd_soc_codec *codec, const char *name)
+{
+	int i;
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+	struct wcd9xxx_pdata *pdata = dev_get_platdata(codec->dev->parent);
+
+	for (i = 0; i < wcd9xxx->num_of_supplies; ++i) {
+		if (pdata->regulator[i].ondemand &&
+		    wcd9xxx->supplies[i].supply &&
+		    !strcmp(wcd9xxx->supplies[i].supply, name))
+			return wcd9xxx->supplies[i].consumer;
+	}
+
+	dev_dbg(codec->dev, "Warning: regulator not found:%s\n",
+		name);
+	return NULL;
+}
+
+static int tavil_get_hph_type(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct wcd934x_mbhc *wcd934x_mbhc = tavil_soc_get_mbhc(codec);
+	struct wcd_mbhc *mbhc;
+
+	if (!wcd934x_mbhc) {
+		dev_err(codec->dev, "%s: mbhc not initialized!\n", __func__);
+		return -EINVAL;
+	}
+
+	mbhc = &wcd934x_mbhc->wcd_mbhc;
+
+	ucontrol->value.integer.value[0] = (u32) mbhc->hph_type;
+	dev_dbg(codec->dev, "%s: hph_type = %u\n", __func__, mbhc->hph_type);
+
+	return 0;
+}
+
+static int tavil_hph_impedance_get(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	uint32_t zl, zr;
+	bool hphr;
+	struct soc_multi_mixer_control *mc;
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct wcd934x_mbhc *wcd934x_mbhc = tavil_soc_get_mbhc(codec);
+
+	if (!wcd934x_mbhc) {
+		dev_err(codec->dev, "%s: mbhc not initialized!\n", __func__);
+		return -EINVAL;
+	}
+
+	mc = (struct soc_multi_mixer_control *)(kcontrol->private_value);
+	hphr = mc->shift;
+	wcd_mbhc_get_impedance(&wcd934x_mbhc->wcd_mbhc, &zl, &zr);
+	dev_dbg(codec->dev, "%s: zl=%u(ohms), zr=%u(ohms)\n", __func__, zl, zr);
+	ucontrol->value.integer.value[0] = hphr ? zr : zl;
+
+	return 0;
+}
+
+static const struct snd_kcontrol_new hph_type_detect_controls[] = {
+	SOC_SINGLE_EXT("HPH Type", 0, 0, UINT_MAX, 0,
+		       tavil_get_hph_type, NULL),
+};
+
+static const struct snd_kcontrol_new impedance_detect_controls[] = {
+	SOC_SINGLE_EXT("HPHL Impedance", 0, 0, UINT_MAX, 0,
+		       tavil_hph_impedance_get, NULL),
+	SOC_SINGLE_EXT("HPHR Impedance", 0, 1, UINT_MAX, 0,
+		       tavil_hph_impedance_get, NULL),
+};
+
+/*
+ * tavil_mbhc_get_impedance: get impedance of headphone left and right channels
+ * @wcd934x_mbhc: handle to struct wcd934x_mbhc *
+ * @zl: handle to left-ch impedance
+ * @zr: handle to right-ch impedance
+ * return 0 for success or error code in case of failure
+ */
+int tavil_mbhc_get_impedance(struct wcd934x_mbhc *wcd934x_mbhc,
+			     uint32_t *zl, uint32_t *zr)
+{
+	if (!wcd934x_mbhc) {
+		pr_err("%s: mbhc not initialized!\n", __func__);
+		return -EINVAL;
+	}
+	if (!zl || !zr) {
+		pr_err("%s: zl or zr null!\n", __func__);
+		return -EINVAL;
+	}
+
+	return wcd_mbhc_get_impedance(&wcd934x_mbhc->wcd_mbhc, zl, zr);
+}
+EXPORT_SYMBOL(tavil_mbhc_get_impedance);
+
+/*
+ * tavil_mbhc_hs_detect: starts mbhc insertion/removal functionality
+ * @codec: handle to snd_soc_codec *
+ * @mbhc_cfg: handle to mbhc configuration structure
+ * return 0 if mbhc_start is success or error code in case of failure
+ */
+int tavil_mbhc_hs_detect(struct snd_soc_codec *codec,
+			 struct wcd_mbhc_config *mbhc_cfg)
+{
+	struct wcd934x_mbhc *wcd934x_mbhc = tavil_soc_get_mbhc(codec);
+
+	if (!wcd934x_mbhc) {
+		dev_err(codec->dev, "%s: mbhc not initialized!\n", __func__);
+		return -EINVAL;
+	}
+
+	return wcd_mbhc_start(&wcd934x_mbhc->wcd_mbhc, mbhc_cfg);
+}
+EXPORT_SYMBOL(tavil_mbhc_hs_detect);
+
+/*
+ * tavil_mbhc_hs_detect_exit: stop mbhc insertion/removal functionality
+ * @codec: handle to snd_soc_codec *
+ */
+void tavil_mbhc_hs_detect_exit(struct snd_soc_codec *codec)
+{
+	struct wcd934x_mbhc *wcd934x_mbhc = tavil_soc_get_mbhc(codec);
+
+	if (!wcd934x_mbhc) {
+		dev_err(codec->dev, "%s: mbhc not initialized!\n", __func__);
+		return;
+	}
+	wcd_mbhc_stop(&wcd934x_mbhc->wcd_mbhc);
+}
+EXPORT_SYMBOL(tavil_mbhc_hs_detect_exit);
+
+/*
+ * tavil_mbhc_post_ssr_init: initialize mbhc for tavil post subsystem restart
+ * @mbhc: poniter to wcd934x_mbhc structure
+ * @codec: handle to snd_soc_codec *
+ *
+ * return 0 if mbhc_init is success or error code in case of failure
+ */
+int tavil_mbhc_post_ssr_init(struct wcd934x_mbhc *mbhc,
+			     struct snd_soc_codec *codec)
+{
+	int ret;
+
+	if (!mbhc || !codec)
+		return -EINVAL;
+
+	wcd_mbhc_deinit(&mbhc->wcd_mbhc);
+	ret = wcd_mbhc_init(&mbhc->wcd_mbhc, codec, &mbhc_cb, &intr_ids,
+			    wcd_mbhc_registers, TAVIL_ZDET_SUPPORTED);
+	if (ret) {
+		dev_err(codec->dev, "%s: mbhc initialization failed\n",
+			__func__);
+		goto done;
+	}
+	snd_soc_update_bits(codec, WCD934X_MBHC_NEW_CTL_1, 0x04, 0x04);
+	snd_soc_update_bits(codec, WCD934X_MBHC_CTL_BCS, 0x01, 0x01);
+
+done:
+	return ret;
+}
+EXPORT_SYMBOL(tavil_mbhc_post_ssr_init);
+
+/*
+ * tavil_mbhc_init: initialize mbhc for tavil
+ * @mbhc: poniter to wcd934x_mbhc struct pointer to store the configs
+ * @codec: handle to snd_soc_codec *
+ * @fw_data: handle to firmware data
+ *
+ * return 0 if mbhc_init is success or error code in case of failure
+ */
+int tavil_mbhc_init(struct wcd934x_mbhc **mbhc, struct snd_soc_codec *codec,
+		    struct fw_info *fw_data)
+{
+	struct regulator *supply;
+	struct wcd934x_mbhc *wcd934x_mbhc;
+	int ret;
+
+	wcd934x_mbhc = devm_kzalloc(codec->dev, sizeof(struct wcd934x_mbhc),
+				    GFP_KERNEL);
+	if (!wcd934x_mbhc)
+		return -ENOMEM;
+
+	wcd934x_mbhc->wcd9xxx = dev_get_drvdata(codec->dev->parent);
+	wcd934x_mbhc->fw_data = fw_data;
+	BLOCKING_INIT_NOTIFIER_HEAD(&wcd934x_mbhc->notifier);
+
+	ret = wcd_mbhc_init(&wcd934x_mbhc->wcd_mbhc, codec, &mbhc_cb, &intr_ids,
+			    wcd_mbhc_registers, TAVIL_ZDET_SUPPORTED);
+	if (ret) {
+		dev_err(codec->dev, "%s: mbhc initialization failed\n",
+			__func__);
+		goto err;
+	}
+
+	supply = tavil_codec_find_ondemand_regulator(codec,
+			on_demand_supply_name[WCD934X_ON_DEMAND_MICBIAS]);
+	if (supply) {
+		wcd934x_mbhc->on_demand_list[
+			WCD934X_ON_DEMAND_MICBIAS].supply =
+				supply;
+		wcd934x_mbhc->on_demand_list[
+			WCD934X_ON_DEMAND_MICBIAS].ondemand_supply_count =
+				0;
+	}
+
+	(*mbhc) = wcd934x_mbhc;
+	snd_soc_add_codec_controls(codec, impedance_detect_controls,
+				   ARRAY_SIZE(impedance_detect_controls));
+	snd_soc_add_codec_controls(codec, hph_type_detect_controls,
+				   ARRAY_SIZE(hph_type_detect_controls));
+
+	snd_soc_update_bits(codec, WCD934X_MBHC_NEW_CTL_1, 0x04, 0x04);
+	snd_soc_update_bits(codec, WCD934X_MBHC_CTL_BCS, 0x01, 0x01);
+
+	return 0;
+err:
+	devm_kfree(codec->dev, wcd934x_mbhc);
+	return ret;
+}
+EXPORT_SYMBOL(tavil_mbhc_init);
+
+/*
+ * tavil_mbhc_deinit: deinitialize mbhc for tavil
+ * @codec: handle to snd_soc_codec *
+ */
+void tavil_mbhc_deinit(struct snd_soc_codec *codec)
+{
+	struct wcd934x_mbhc *wcd934x_mbhc = tavil_soc_get_mbhc(codec);
+
+	if (wcd934x_mbhc) {
+		wcd_mbhc_deinit(&wcd934x_mbhc->wcd_mbhc);
+		devm_kfree(codec->dev, wcd934x_mbhc);
+	}
+}
+EXPORT_SYMBOL(tavil_mbhc_deinit);
diff -Nruw linux-4.4.115-fbx/sound/soc/codecs/wcd934x./wcd934x-mbhc.h linux-4.4.115-fbx/sound/soc/codecs/wcd934x/wcd934x-mbhc.h
--- linux-4.4.115-fbx/sound/soc/codecs/wcd934x./wcd934x-mbhc.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd934x/wcd934x-mbhc.h	2019-01-22 16:16:29.551301175 +0100
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __WCD934X_MBHC_H__
+#define __WCD934X_MBHC_H__
+#include "../wcd-mbhc-v2.h"
+
+enum wcd934x_on_demand_supply_name {
+	WCD934X_ON_DEMAND_MICBIAS = 0,
+	WCD934X_ON_DEMAND_SUPPLIES_MAX,
+};
+
+struct wcd934x_on_demand_supply {
+	struct regulator *supply;
+	int ondemand_supply_count;
+};
+
+struct wcd934x_mbhc {
+	struct wcd_mbhc wcd_mbhc;
+	struct blocking_notifier_head notifier;
+	struct wcd934x_on_demand_supply on_demand_list[
+			WCD934X_ON_DEMAND_SUPPLIES_MAX];
+	struct wcd9xxx *wcd9xxx;
+	struct fw_info *fw_data;
+	bool mbhc_started;
+	bool is_hph_recover;
+};
+
+#ifdef CONFIG_SND_SOC_WCD934X_MBHC
+extern int tavil_mbhc_init(struct wcd934x_mbhc **mbhc,
+			   struct snd_soc_codec *codec,
+			   struct fw_info *fw_data);
+extern void tavil_mbhc_hs_detect_exit(struct snd_soc_codec *codec);
+extern int tavil_mbhc_hs_detect(struct snd_soc_codec *codec,
+				struct wcd_mbhc_config *mbhc_cfg);
+extern void tavil_mbhc_deinit(struct snd_soc_codec *codec);
+extern int tavil_mbhc_post_ssr_init(struct wcd934x_mbhc *mbhc,
+				    struct snd_soc_codec *codec);
+extern int tavil_mbhc_get_impedance(struct wcd934x_mbhc *wcd934x_mbhc,
+				    uint32_t *zl, uint32_t *zr);
+#else
+static inline int tavil_mbhc_init(struct wcd934x_mbhc **mbhc,
+				  struct snd_soc_codec *codec,
+				  struct fw_info *fw_data)
+{
+	return 0;
+}
+static inline void tavil_mbhc_hs_detect_exit(struct snd_soc_codec *codec)
+{
+}
+static inline int tavil_mbhc_hs_detect(struct snd_soc_codec *codec,
+				       struct wcd_mbhc_config *mbhc_cfg)
+{
+		return 0;
+}
+static inline void tavil_mbhc_deinit(struct snd_soc_codec *codec)
+{
+}
+static inline int tavil_mbhc_post_ssr_init(struct wcd934x_mbhc *mbhc,
+					   struct snd_soc_codec *codec)
+{
+	return 0;
+}
+static inline int tavil_mbhc_get_impedance(struct wcd934x_mbhc *wcd934x_mbhc,
+					   uint32_t *zl, uint32_t *zr)
+{
+	if (zl)
+		*zl = 0;
+	if (zr)
+		*zr = 0;
+	return -EINVAL;
+}
+#endif
+
+#endif /* __WCD934X_MBHC_H__ */
+
+
diff -Nruw linux-4.4.115-fbx/sound/soc/codecs/wcd934x./wcd934x-routing.h linux-4.4.115-fbx/sound/soc/codecs/wcd934x/wcd934x-routing.h
--- linux-4.4.115-fbx/sound/soc/codecs/wcd934x./wcd934x-routing.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd934x/wcd934x-routing.h	2019-01-22 16:16:29.551301175 +0100
@@ -0,0 +1,1171 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef WCD934X_ROUTING_H
+#define WCD934X_ROUTING_H
+
+#include <sound/soc-dapm.h>
+
+const struct snd_soc_dapm_route tavil_slim_audio_map[] = {
+
+	/* Virtual input widgets */
+	{"AIF1 CAP", NULL, "AIF1_CAP Mixer"},
+	{"AIF2 CAP", NULL, "AIF2_CAP Mixer"},
+	{"AIF3 CAP", NULL, "AIF3_CAP Mixer"},
+	{"AIF4 MAD", NULL, "AIF4_MAD Mixer"},
+
+	/* Virtual input widget Mixer */
+	{"AIF1_CAP Mixer", "SLIM TX0", "SLIM TX0"},
+	{"AIF1_CAP Mixer", "SLIM TX1", "SLIM TX1"},
+	{"AIF1_CAP Mixer", "SLIM TX2", "SLIM TX2"},
+	{"AIF1_CAP Mixer", "SLIM TX3", "SLIM TX3"},
+	{"AIF1_CAP Mixer", "SLIM TX4", "SLIM TX4"},
+	{"AIF1_CAP Mixer", "SLIM TX5", "SLIM TX5"},
+	{"AIF1_CAP Mixer", "SLIM TX6", "SLIM TX6"},
+	{"AIF1_CAP Mixer", "SLIM TX7", "SLIM TX7"},
+	{"AIF1_CAP Mixer", "SLIM TX8", "SLIM TX8"},
+	{"AIF1_CAP Mixer", "SLIM TX9", "SLIM TX9"},
+	{"AIF1_CAP Mixer", "SLIM TX10", "SLIM TX10"},
+	{"AIF1_CAP Mixer", "SLIM TX11", "SLIM TX11"},
+	{"AIF1_CAP Mixer", "SLIM TX13", "SLIM TX13"},
+
+	{"AIF2_CAP Mixer", "SLIM TX0", "SLIM TX0"},
+	{"AIF2_CAP Mixer", "SLIM TX1", "SLIM TX1"},
+	{"AIF2_CAP Mixer", "SLIM TX2", "SLIM TX2"},
+	{"AIF2_CAP Mixer", "SLIM TX3", "SLIM TX3"},
+	{"AIF2_CAP Mixer", "SLIM TX4", "SLIM TX4"},
+	{"AIF2_CAP Mixer", "SLIM TX5", "SLIM TX5"},
+	{"AIF2_CAP Mixer", "SLIM TX6", "SLIM TX6"},
+	{"AIF2_CAP Mixer", "SLIM TX7", "SLIM TX7"},
+	{"AIF2_CAP Mixer", "SLIM TX8", "SLIM TX8"},
+	{"AIF2_CAP Mixer", "SLIM TX9", "SLIM TX9"},
+	{"AIF2_CAP Mixer", "SLIM TX10", "SLIM TX10"},
+	{"AIF2_CAP Mixer", "SLIM TX11", "SLIM TX11"},
+	{"AIF2_CAP Mixer", "SLIM TX13", "SLIM TX13"},
+
+	{"AIF3_CAP Mixer", "SLIM TX0", "SLIM TX0"},
+	{"AIF3_CAP Mixer", "SLIM TX1", "SLIM TX1"},
+	{"AIF3_CAP Mixer", "SLIM TX2", "SLIM TX2"},
+	{"AIF3_CAP Mixer", "SLIM TX3", "SLIM TX3"},
+	{"AIF3_CAP Mixer", "SLIM TX4", "SLIM TX4"},
+	{"AIF3_CAP Mixer", "SLIM TX5", "SLIM TX5"},
+	{"AIF3_CAP Mixer", "SLIM TX6", "SLIM TX6"},
+	{"AIF3_CAP Mixer", "SLIM TX7", "SLIM TX7"},
+	{"AIF3_CAP Mixer", "SLIM TX8", "SLIM TX8"},
+	{"AIF3_CAP Mixer", "SLIM TX9", "SLIM TX9"},
+	{"AIF3_CAP Mixer", "SLIM TX10", "SLIM TX10"},
+	{"AIF3_CAP Mixer", "SLIM TX11", "SLIM TX11"},
+	{"AIF3_CAP Mixer", "SLIM TX13", "SLIM TX13"},
+
+	{"AIF4_MAD Mixer", "SLIM TX13", "SLIM TX13"},
+
+	{"SLIM RX0 MUX", "AIF1_PB", "AIF1 PB"},
+	{"SLIM RX1 MUX", "AIF1_PB", "AIF1 PB"},
+	{"SLIM RX2 MUX", "AIF1_PB", "AIF1 PB"},
+	{"SLIM RX3 MUX", "AIF1_PB", "AIF1 PB"},
+	{"SLIM RX4 MUX", "AIF1_PB", "AIF1 PB"},
+	{"SLIM RX5 MUX", "AIF1_PB", "AIF1 PB"},
+	{"SLIM RX6 MUX", "AIF1_PB", "AIF1 PB"},
+	{"SLIM RX7 MUX", "AIF1_PB", "AIF1 PB"},
+
+	{"SLIM RX0 MUX", "AIF2_PB", "AIF2 PB"},
+	{"SLIM RX1 MUX", "AIF2_PB", "AIF2 PB"},
+	{"SLIM RX2 MUX", "AIF2_PB", "AIF2 PB"},
+	{"SLIM RX3 MUX", "AIF2_PB", "AIF2 PB"},
+	{"SLIM RX4 MUX", "AIF2_PB", "AIF2 PB"},
+	{"SLIM RX5 MUX", "AIF2_PB", "AIF2 PB"},
+	{"SLIM RX6 MUX", "AIF2_PB", "AIF2 PB"},
+	{"SLIM RX7 MUX", "AIF2_PB", "AIF2 PB"},
+
+	{"SLIM RX0 MUX", "AIF3_PB", "AIF3 PB"},
+	{"SLIM RX1 MUX", "AIF3_PB", "AIF3 PB"},
+	{"SLIM RX2 MUX", "AIF3_PB", "AIF3 PB"},
+	{"SLIM RX3 MUX", "AIF3_PB", "AIF3 PB"},
+	{"SLIM RX4 MUX", "AIF3_PB", "AIF3 PB"},
+	{"SLIM RX5 MUX", "AIF3_PB", "AIF3 PB"},
+	{"SLIM RX6 MUX", "AIF3_PB", "AIF3 PB"},
+	{"SLIM RX7 MUX", "AIF3_PB", "AIF3 PB"},
+
+	{"SLIM RX0 MUX", "AIF4_PB", "AIF4 PB"},
+	{"SLIM RX1 MUX", "AIF4_PB", "AIF4 PB"},
+	{"SLIM RX2 MUX", "AIF4_PB", "AIF4 PB"},
+	{"SLIM RX3 MUX", "AIF4_PB", "AIF4 PB"},
+	{"SLIM RX4 MUX", "AIF4_PB", "AIF4 PB"},
+	{"SLIM RX5 MUX", "AIF4_PB", "AIF4 PB"},
+	{"SLIM RX6 MUX", "AIF4_PB", "AIF4 PB"},
+	{"SLIM RX7 MUX", "AIF4_PB", "AIF4 PB"},
+
+	{"SLIM RX0", NULL, "SLIM RX0 MUX"},
+	{"SLIM RX1", NULL, "SLIM RX1 MUX"},
+	{"SLIM RX2", NULL, "SLIM RX2 MUX"},
+	{"SLIM RX3", NULL, "SLIM RX3 MUX"},
+	{"SLIM RX4", NULL, "SLIM RX4 MUX"},
+	{"SLIM RX5", NULL, "SLIM RX5 MUX"},
+	{"SLIM RX6", NULL, "SLIM RX6 MUX"},
+	{"SLIM RX7", NULL, "SLIM RX7 MUX"},
+
+};
+
+const struct snd_soc_dapm_route tavil_audio_map[] = {
+
+	/* MAD */
+	{"MAD_SEL MUX", "SPE", "MAD_CPE_INPUT"},
+	{"MAD_SEL MUX", "MSM", "MADINPUT"},
+
+	{"MAD_INP MUX", "MAD", "MAD_SEL MUX"},
+	{"MAD_INP MUX", "DEC1", "ADC MUX1"},
+
+	{"MAD_BROADCAST", "Switch", "MAD_INP MUX"},
+	{"MAD_CPE1", "Switch", "MAD_INP MUX"},
+	{"MAD_CPE2", "Switch", "MAD_INP MUX"},
+
+	{"MAD_CPE_OUT1", NULL, "MAD_CPE1"},
+	{"MAD_CPE_OUT2", NULL, "MAD_CPE2"},
+
+	/* VI Feedback */
+	{"AIF4_VI Mixer", "SPKR_VI_1", "VIINPUT"},
+	{"AIF4_VI Mixer", "SPKR_VI_2", "VIINPUT"},
+	{"AIF4 VI", NULL, "AIF4_VI Mixer"},
+
+	/* CDC Tx interface with SLIMBUS */
+	{"SLIM TX0", NULL, "CDC_IF TX0 MUX"},
+	{"SLIM TX1", NULL, "CDC_IF TX1 MUX"},
+	{"SLIM TX2", NULL, "CDC_IF TX2 MUX"},
+	{"SLIM TX3", NULL, "CDC_IF TX3 MUX"},
+	{"SLIM TX4", NULL, "CDC_IF TX4 MUX"},
+	{"SLIM TX5", NULL, "CDC_IF TX5 MUX"},
+	{"SLIM TX6", NULL, "CDC_IF TX6 MUX"},
+	{"SLIM TX7", NULL, "CDC_IF TX7 MUX"},
+	{"SLIM TX8", NULL, "CDC_IF TX8 MUX"},
+	{"SLIM TX9", NULL, "CDC_IF TX9 MUX"},
+	{"SLIM TX10", NULL, "CDC_IF TX10 MUX"},
+	{"SLIM TX11", NULL, "CDC_IF TX11 MUX"},
+	{"SLIM TX13", NULL, "CDC_IF TX13 MUX"},
+
+	{"CDC_IF TX0 MUX", "DEC0", "ADC MUX0"},
+	{"CDC_IF TX0 MUX", "RX_MIX_TX0", "RX MIX TX0 MUX"},
+	{"CDC_IF TX0 MUX", "DEC0_192", "ADC US MUX0"},
+
+	{"CDC_IF TX1 MUX", "DEC1", "ADC MUX1"},
+	{"CDC_IF TX1 MUX", "RX_MIX_TX1", "RX MIX TX1 MUX"},
+	{"CDC_IF TX1 MUX", "DEC1_192", "ADC US MUX1"},
+
+	{"CDC_IF TX2 MUX", "DEC2", "ADC MUX2"},
+	{"CDC_IF TX2 MUX", "RX_MIX_TX2", "RX MIX TX2 MUX"},
+	{"CDC_IF TX2 MUX", "DEC2_192", "ADC US MUX2"},
+
+	{"CDC_IF TX3 MUX", "DEC3", "ADC MUX3"},
+	{"CDC_IF TX3 MUX", "RX_MIX_TX3", "RX MIX TX3 MUX"},
+	{"CDC_IF TX3 MUX", "DEC3_192", "ADC US MUX3"},
+
+	{"CDC_IF TX4 MUX", "DEC4", "ADC MUX4"},
+	{"CDC_IF TX4 MUX", "RX_MIX_TX4", "RX MIX TX4 MUX"},
+	{"CDC_IF TX4 MUX", "DEC4_192", "ADC US MUX4"},
+
+	{"CDC_IF TX5 MUX", "DEC5", "ADC MUX5"},
+	{"CDC_IF TX5 MUX", "RX_MIX_TX5", "RX MIX TX5 MUX"},
+	{"CDC_IF TX5 MUX", "DEC5_192", "ADC US MUX5"},
+
+	{"CDC_IF TX6 MUX", "DEC6", "ADC MUX6"},
+	{"CDC_IF TX6 MUX", "RX_MIX_TX6", "RX MIX TX6 MUX"},
+	{"CDC_IF TX6 MUX", "DEC6_192", "ADC US MUX6"},
+
+	{"CDC_IF TX7 MUX", "DEC7", "ADC MUX7"},
+	{"CDC_IF TX7 MUX", "RX_MIX_TX7", "RX MIX TX7 MUX"},
+	{"CDC_IF TX7 MUX", "DEC7_192", "ADC US MUX7"},
+
+	{"CDC_IF TX8 MUX", "DEC8", "ADC MUX8"},
+	{"CDC_IF TX8 MUX", "RX_MIX_TX8", "RX MIX TX8 MUX"},
+	{"CDC_IF TX8 MUX", "DEC8_192", "ADC US MUX8"},
+
+	{"CDC_IF TX9 MUX", "DEC7", "ADC MUX7"},
+	{"CDC_IF TX9 MUX", "DEC7_192", "ADC US MUX7"},
+	{"CDC_IF TX10 MUX", "DEC6", "ADC MUX6"},
+	{"CDC_IF TX10 MUX", "DEC6_192", "ADC US MUX6"},
+
+	{"CDC_IF TX11 MUX", "DEC_0_5", "CDC_IF TX11 INP1 MUX"},
+	{"CDC_IF TX11 MUX", "DEC_9_12", "CDC_IF TX11 INP1 MUX"},
+	{"CDC_IF TX11 INP1 MUX", "DEC0", "ADC MUX0"},
+	{"CDC_IF TX11 INP1 MUX", "DEC1", "ADC MUX1"},
+	{"CDC_IF TX11 INP1 MUX", "DEC2", "ADC MUX2"},
+	{"CDC_IF TX11 INP1 MUX", "DEC3", "ADC MUX3"},
+	{"CDC_IF TX11 INP1 MUX", "DEC4", "ADC MUX4"},
+	{"CDC_IF TX11 INP1 MUX", "DEC5", "ADC MUX5"},
+	{"CDC_IF TX11 INP1 MUX", "RX_MIX_TX5", "RX MIX TX5 MUX"},
+
+	{"CDC_IF TX13 MUX", "MAD_BRDCST", "MAD_BROADCAST"},
+	{"CDC_IF TX13 MUX", "CDC_DEC_5", "CDC_IF TX13 INP1 MUX"},
+	{"CDC_IF TX13 INP1 MUX", "DEC5", "ADC MUX5"},
+	{"CDC_IF TX13 INP1 MUX", "DEC5_192", "ADC US MUX5"},
+
+	{"RX MIX TX0 MUX", "RX_MIX0", "RX INT0 SEC MIX"},
+	{"RX MIX TX0 MUX", "RX_MIX1", "RX INT1 SEC MIX"},
+	{"RX MIX TX0 MUX", "RX_MIX2", "RX INT2 SEC MIX"},
+	{"RX MIX TX0 MUX", "RX_MIX3", "RX INT3 SEC MIX"},
+	{"RX MIX TX0 MUX", "RX_MIX4", "RX INT4 SEC MIX"},
+	{"RX MIX TX0 MUX", "RX_MIX7", "RX INT7 SEC MIX"},
+	{"RX MIX TX0 MUX", "RX_MIX8", "RX INT8 SEC MIX"},
+
+	{"RX MIX TX1 MUX", "RX_MIX0", "RX INT0 SEC MIX"},
+	{"RX MIX TX1 MUX", "RX_MIX1", "RX INT1 SEC MIX"},
+	{"RX MIX TX1 MUX", "RX_MIX2", "RX INT2 SEC MIX"},
+	{"RX MIX TX1 MUX", "RX_MIX3", "RX INT3 SEC MIX"},
+	{"RX MIX TX1 MUX", "RX_MIX4", "RX INT4 SEC MIX"},
+	{"RX MIX TX1 MUX", "RX_MIX7", "RX INT7 SEC MIX"},
+	{"RX MIX TX1 MUX", "RX_MIX8", "RX INT8 SEC MIX"},
+
+	{"RX MIX TX2 MUX", "RX_MIX0", "RX INT0 SEC MIX"},
+	{"RX MIX TX2 MUX", "RX_MIX1", "RX INT1 SEC MIX"},
+	{"RX MIX TX2 MUX", "RX_MIX2", "RX INT2 SEC MIX"},
+	{"RX MIX TX2 MUX", "RX_MIX3", "RX INT3 SEC MIX"},
+	{"RX MIX TX2 MUX", "RX_MIX4", "RX INT4 SEC MIX"},
+	{"RX MIX TX2 MUX", "RX_MIX7", "RX INT7 SEC MIX"},
+	{"RX MIX TX2 MUX", "RX_MIX8", "RX INT8 SEC MIX"},
+
+	{"RX MIX TX3 MUX", "RX_MIX0", "RX INT0 SEC MIX"},
+	{"RX MIX TX3 MUX", "RX_MIX1", "RX INT1 SEC MIX"},
+	{"RX MIX TX3 MUX", "RX_MIX2", "RX INT2 SEC MIX"},
+	{"RX MIX TX3 MUX", "RX_MIX3", "RX INT3 SEC MIX"},
+	{"RX MIX TX3 MUX", "RX_MIX4", "RX INT4 SEC MIX"},
+	{"RX MIX TX3 MUX", "RX_MIX7", "RX INT7 SEC MIX"},
+	{"RX MIX TX3 MUX", "RX_MIX8", "RX INT8 SEC MIX"},
+
+	{"RX MIX TX4 MUX", "RX_MIX0", "RX INT0 SEC MIX"},
+	{"RX MIX TX4 MUX", "RX_MIX1", "RX INT1 SEC MIX"},
+	{"RX MIX TX4 MUX", "RX_MIX2", "RX INT2 SEC MIX"},
+	{"RX MIX TX4 MUX", "RX_MIX3", "RX INT3 SEC MIX"},
+	{"RX MIX TX4 MUX", "RX_MIX4", "RX INT4 SEC MIX"},
+	{"RX MIX TX4 MUX", "RX_MIX7", "RX INT7 SEC MIX"},
+	{"RX MIX TX4 MUX", "RX_MIX8", "RX INT8 SEC MIX"},
+
+	{"RX MIX TX5 MUX", "RX_MIX0", "RX INT0 SEC MIX"},
+	{"RX MIX TX5 MUX", "RX_MIX1", "RX INT1 SEC MIX"},
+	{"RX MIX TX5 MUX", "RX_MIX2", "RX INT2 SEC MIX"},
+	{"RX MIX TX5 MUX", "RX_MIX3", "RX INT3 SEC MIX"},
+	{"RX MIX TX5 MUX", "RX_MIX4", "RX INT4 SEC MIX"},
+	{"RX MIX TX5 MUX", "RX_MIX7", "RX INT7 SEC MIX"},
+	{"RX MIX TX5 MUX", "RX_MIX8", "RX INT8 SEC MIX"},
+
+	{"RX MIX TX6 MUX", "RX_MIX0", "RX INT0 SEC MIX"},
+	{"RX MIX TX6 MUX", "RX_MIX1", "RX INT1 SEC MIX"},
+	{"RX MIX TX6 MUX", "RX_MIX2", "RX INT2 SEC MIX"},
+	{"RX MIX TX6 MUX", "RX_MIX3", "RX INT3 SEC MIX"},
+	{"RX MIX TX6 MUX", "RX_MIX4", "RX INT4 SEC MIX"},
+	{"RX MIX TX6 MUX", "RX_MIX7", "RX INT7 SEC MIX"},
+	{"RX MIX TX6 MUX", "RX_MIX8", "RX INT8 SEC MIX"},
+
+	{"RX MIX TX7 MUX", "RX_MIX0", "RX INT0 SEC MIX"},
+	{"RX MIX TX7 MUX", "RX_MIX1", "RX INT1 SEC MIX"},
+	{"RX MIX TX7 MUX", "RX_MIX2", "RX INT2 SEC MIX"},
+	{"RX MIX TX7 MUX", "RX_MIX3", "RX INT3 SEC MIX"},
+	{"RX MIX TX7 MUX", "RX_MIX4", "RX INT4 SEC MIX"},
+	{"RX MIX TX7 MUX", "RX_MIX7", "RX INT7 SEC MIX"},
+	{"RX MIX TX7 MUX", "RX_MIX8", "RX INT8 SEC MIX"},
+
+	{"RX MIX TX8 MUX", "RX_MIX0", "RX INT0 SEC MIX"},
+	{"RX MIX TX8 MUX", "RX_MIX1", "RX INT1 SEC MIX"},
+	{"RX MIX TX8 MUX", "RX_MIX2", "RX INT2 SEC MIX"},
+	{"RX MIX TX8 MUX", "RX_MIX3", "RX INT3 SEC MIX"},
+	{"RX MIX TX8 MUX", "RX_MIX4", "RX INT4 SEC MIX"},
+	{"RX MIX TX8 MUX", "RX_MIX7", "RX INT7 SEC MIX"},
+	{"RX MIX TX8 MUX", "RX_MIX8", "RX INT8 SEC MIX"},
+
+	{"ADC US MUX0", "US_Switch", "ADC MUX0"},
+	{"ADC US MUX1", "US_Switch", "ADC MUX1"},
+	{"ADC US MUX2", "US_Switch", "ADC MUX2"},
+	{"ADC US MUX3", "US_Switch", "ADC MUX3"},
+	{"ADC US MUX4", "US_Switch", "ADC MUX4"},
+	{"ADC US MUX5", "US_Switch", "ADC MUX5"},
+	{"ADC US MUX6", "US_Switch", "ADC MUX6"},
+	{"ADC US MUX7", "US_Switch", "ADC MUX7"},
+	{"ADC US MUX8", "US_Switch", "ADC MUX8"},
+
+	{"ADC MUX0", "DMIC", "DMIC MUX0"},
+	{"ADC MUX0", "AMIC", "AMIC MUX0"},
+	{"ADC MUX1", "DMIC", "DMIC MUX1"},
+	{"ADC MUX1", "AMIC", "AMIC MUX1"},
+	{"ADC MUX2", "DMIC", "DMIC MUX2"},
+	{"ADC MUX2", "AMIC", "AMIC MUX2"},
+	{"ADC MUX3", "DMIC", "DMIC MUX3"},
+	{"ADC MUX3", "AMIC", "AMIC MUX3"},
+	{"ADC MUX4", "DMIC", "DMIC MUX4"},
+	{"ADC MUX4", "AMIC", "AMIC MUX4"},
+	{"ADC MUX5", "DMIC", "DMIC MUX5"},
+	{"ADC MUX5", "AMIC", "AMIC MUX5"},
+	{"ADC MUX6", "DMIC", "DMIC MUX6"},
+	{"ADC MUX6", "AMIC", "AMIC MUX6"},
+	{"ADC MUX7", "DMIC", "DMIC MUX7"},
+	{"ADC MUX7", "AMIC", "AMIC MUX7"},
+	{"ADC MUX8", "DMIC", "DMIC MUX8"},
+	{"ADC MUX8", "AMIC", "AMIC MUX8"},
+	{"ADC MUX10", "DMIC", "DMIC MUX10"},
+	{"ADC MUX10", "AMIC", "AMIC MUX10"},
+	{"ADC MUX11", "DMIC", "DMIC MUX11"},
+	{"ADC MUX11", "AMIC", "AMIC MUX11"},
+	{"ADC MUX12", "DMIC", "DMIC MUX12"},
+	{"ADC MUX12", "AMIC", "AMIC MUX12"},
+	{"ADC MUX13", "DMIC", "DMIC MUX13"},
+	{"ADC MUX13", "AMIC", "AMIC MUX13"},
+
+	{"ADC MUX0", "ANC_FB_TUNE1", "ADC MUX10"},
+	{"ADC MUX0", "ANC_FB_TUNE1", "ADC MUX11"},
+	{"ADC MUX0", "ANC_FB_TUNE2", "ADC MUX12"},
+	{"ADC MUX0", "ANC_FB_TUNE2", "ADC MUX13"},
+	{"ADC MUX1", "ANC_FB_TUNE1", "ADC MUX10"},
+	{"ADC MUX1", "ANC_FB_TUNE1", "ADC MUX11"},
+	{"ADC MUX1", "ANC_FB_TUNE2", "ADC MUX12"},
+	{"ADC MUX1", "ANC_FB_TUNE2", "ADC MUX13"},
+	{"ADC MUX2", "ANC_FB_TUNE1", "ADC MUX10"},
+	{"ADC MUX2", "ANC_FB_TUNE1", "ADC MUX11"},
+	{"ADC MUX2", "ANC_FB_TUNE2", "ADC MUX12"},
+	{"ADC MUX2", "ANC_FB_TUNE2", "ADC MUX13"},
+	{"ADC MUX3", "ANC_FB_TUNE1", "ADC MUX10"},
+	{"ADC MUX3", "ANC_FB_TUNE1", "ADC MUX11"},
+	{"ADC MUX3", "ANC_FB_TUNE2", "ADC MUX12"},
+	{"ADC MUX3", "ANC_FB_TUNE2", "ADC MUX13"},
+	{"ADC MUX4", "ANC_FB_TUNE1", "ADC MUX10"},
+	{"ADC MUX4", "ANC_FB_TUNE1", "ADC MUX11"},
+	{"ADC MUX4", "ANC_FB_TUNE2", "ADC MUX12"},
+	{"ADC MUX4", "ANC_FB_TUNE2", "ADC MUX13"},
+	{"ADC MUX5", "ANC_FB_TUNE1", "ADC MUX10"},
+	{"ADC MUX5", "ANC_FB_TUNE1", "ADC MUX11"},
+	{"ADC MUX5", "ANC_FB_TUNE2", "ADC MUX12"},
+	{"ADC MUX5", "ANC_FB_TUNE2", "ADC MUX13"},
+	{"ADC MUX6", "ANC_FB_TUNE1", "ADC MUX10"},
+	{"ADC MUX6", "ANC_FB_TUNE1", "ADC MUX11"},
+	{"ADC MUX6", "ANC_FB_TUNE2", "ADC MUX12"},
+	{"ADC MUX6", "ANC_FB_TUNE2", "ADC MUX13"},
+	{"ADC MUX7", "ANC_FB_TUNE1", "ADC MUX10"},
+	{"ADC MUX7", "ANC_FB_TUNE1", "ADC MUX11"},
+	{"ADC MUX7", "ANC_FB_TUNE2", "ADC MUX12"},
+	{"ADC MUX7", "ANC_FB_TUNE2", "ADC MUX13"},
+	{"ADC MUX8", "ANC_FB_TUNE1", "ADC MUX10"},
+	{"ADC MUX8", "ANC_FB_TUNE1", "ADC MUX11"},
+	{"ADC MUX8", "ANC_FB_TUNE2", "ADC MUX12"},
+	{"ADC MUX8", "ANC_FB_TUNE2", "ADC MUX13"},
+
+	{"DMIC MUX0", "DMIC0", "DMIC0"},
+	{"DMIC MUX0", "DMIC1", "DMIC1"},
+	{"DMIC MUX0", "DMIC2", "DMIC2"},
+	{"DMIC MUX0", "DMIC3", "DMIC3"},
+	{"DMIC MUX0", "DMIC4", "DMIC4"},
+	{"DMIC MUX0", "DMIC5", "DMIC5"},
+	{"AMIC MUX0", "ADC1", "ADC1"},
+	{"AMIC MUX0", "ADC2", "ADC2"},
+	{"AMIC MUX0", "ADC3", "ADC3"},
+	{"AMIC MUX0", "ADC4", "ADC4"},
+
+	{"DMIC MUX1", "DMIC0", "DMIC0"},
+	{"DMIC MUX1", "DMIC1", "DMIC1"},
+	{"DMIC MUX1", "DMIC2", "DMIC2"},
+	{"DMIC MUX1", "DMIC3", "DMIC3"},
+	{"DMIC MUX1", "DMIC4", "DMIC4"},
+	{"DMIC MUX1", "DMIC5", "DMIC5"},
+	{"AMIC MUX1", "ADC1", "ADC1"},
+	{"AMIC MUX1", "ADC2", "ADC2"},
+	{"AMIC MUX1", "ADC3", "ADC3"},
+	{"AMIC MUX1", "ADC4", "ADC4"},
+
+	{"DMIC MUX2", "DMIC0", "DMIC0"},
+	{"DMIC MUX2", "DMIC1", "DMIC1"},
+	{"DMIC MUX2", "DMIC2", "DMIC2"},
+	{"DMIC MUX2", "DMIC3", "DMIC3"},
+	{"DMIC MUX2", "DMIC4", "DMIC4"},
+	{"DMIC MUX2", "DMIC5", "DMIC5"},
+	{"AMIC MUX2", "ADC1", "ADC1"},
+	{"AMIC MUX2", "ADC2", "ADC2"},
+	{"AMIC MUX2", "ADC3", "ADC3"},
+	{"AMIC MUX2", "ADC4", "ADC4"},
+
+	{"DMIC MUX3", "DMIC0", "DMIC0"},
+	{"DMIC MUX3", "DMIC1", "DMIC1"},
+	{"DMIC MUX3", "DMIC2", "DMIC2"},
+	{"DMIC MUX3", "DMIC3", "DMIC3"},
+	{"DMIC MUX3", "DMIC4", "DMIC4"},
+	{"DMIC MUX3", "DMIC5", "DMIC5"},
+	{"AMIC MUX3", "ADC1", "ADC1"},
+	{"AMIC MUX3", "ADC2", "ADC2"},
+	{"AMIC MUX3", "ADC3", "ADC3"},
+	{"AMIC MUX3", "ADC4", "ADC4"},
+
+	{"DMIC MUX4", "DMIC0", "DMIC0"},
+	{"DMIC MUX4", "DMIC1", "DMIC1"},
+	{"DMIC MUX4", "DMIC2", "DMIC2"},
+	{"DMIC MUX4", "DMIC3", "DMIC3"},
+	{"DMIC MUX4", "DMIC4", "DMIC4"},
+	{"DMIC MUX4", "DMIC5", "DMIC5"},
+	{"AMIC MUX4", "ADC1", "ADC1"},
+	{"AMIC MUX4", "ADC2", "ADC2"},
+	{"AMIC MUX4", "ADC3", "ADC3"},
+	{"AMIC MUX4", "ADC4", "ADC4"},
+
+	{"DMIC MUX5", "DMIC0", "DMIC0"},
+	{"DMIC MUX5", "DMIC1", "DMIC1"},
+	{"DMIC MUX5", "DMIC2", "DMIC2"},
+	{"DMIC MUX5", "DMIC3", "DMIC3"},
+	{"DMIC MUX5", "DMIC4", "DMIC4"},
+	{"DMIC MUX5", "DMIC5", "DMIC5"},
+	{"AMIC MUX5", "ADC1", "ADC1"},
+	{"AMIC MUX5", "ADC2", "ADC2"},
+	{"AMIC MUX5", "ADC3", "ADC3"},
+	{"AMIC MUX5", "ADC4", "ADC4"},
+
+	{"DMIC MUX6", "DMIC0", "DMIC0"},
+	{"DMIC MUX6", "DMIC1", "DMIC1"},
+	{"DMIC MUX6", "DMIC2", "DMIC2"},
+	{"DMIC MUX6", "DMIC3", "DMIC3"},
+	{"DMIC MUX6", "DMIC4", "DMIC4"},
+	{"DMIC MUX6", "DMIC5", "DMIC5"},
+	{"AMIC MUX6", "ADC1", "ADC1"},
+	{"AMIC MUX6", "ADC2", "ADC2"},
+	{"AMIC MUX6", "ADC3", "ADC3"},
+	{"AMIC MUX6", "ADC4", "ADC4"},
+
+	{"DMIC MUX7", "DMIC0", "DMIC0"},
+	{"DMIC MUX7", "DMIC1", "DMIC1"},
+	{"DMIC MUX7", "DMIC2", "DMIC2"},
+	{"DMIC MUX7", "DMIC3", "DMIC3"},
+	{"DMIC MUX7", "DMIC4", "DMIC4"},
+	{"DMIC MUX7", "DMIC5", "DMIC5"},
+	{"AMIC MUX7", "ADC1", "ADC1"},
+	{"AMIC MUX7", "ADC2", "ADC2"},
+	{"AMIC MUX7", "ADC3", "ADC3"},
+	{"AMIC MUX7", "ADC4", "ADC4"},
+
+	{"DMIC MUX8", "DMIC0", "DMIC0"},
+	{"DMIC MUX8", "DMIC1", "DMIC1"},
+	{"DMIC MUX8", "DMIC2", "DMIC2"},
+	{"DMIC MUX8", "DMIC3", "DMIC3"},
+	{"DMIC MUX8", "DMIC4", "DMIC4"},
+	{"DMIC MUX8", "DMIC5", "DMIC5"},
+	{"AMIC MUX8", "ADC1", "ADC1"},
+	{"AMIC MUX8", "ADC2", "ADC2"},
+	{"AMIC MUX8", "ADC3", "ADC3"},
+	{"AMIC MUX8", "ADC4", "ADC4"},
+
+	{"DMIC MUX10", "DMIC0", "DMIC0"},
+	{"DMIC MUX10", "DMIC1", "DMIC1"},
+	{"DMIC MUX10", "DMIC2", "DMIC2"},
+	{"DMIC MUX10", "DMIC3", "DMIC3"},
+	{"DMIC MUX10", "DMIC4", "DMIC4"},
+	{"DMIC MUX10", "DMIC5", "DMIC5"},
+	{"AMIC MUX10", "ADC1", "ADC1"},
+	{"AMIC MUX10", "ADC2", "ADC2"},
+	{"AMIC MUX10", "ADC3", "ADC3"},
+	{"AMIC MUX10", "ADC4", "ADC4"},
+
+	{"DMIC MUX11", "DMIC0", "DMIC0"},
+	{"DMIC MUX11", "DMIC1", "DMIC1"},
+	{"DMIC MUX11", "DMIC2", "DMIC2"},
+	{"DMIC MUX11", "DMIC3", "DMIC3"},
+	{"DMIC MUX11", "DMIC4", "DMIC4"},
+	{"DMIC MUX11", "DMIC5", "DMIC5"},
+	{"AMIC MUX11", "ADC1", "ADC1"},
+	{"AMIC MUX11", "ADC2", "ADC2"},
+	{"AMIC MUX11", "ADC3", "ADC3"},
+	{"AMIC MUX11", "ADC4", "ADC4"},
+
+	{"DMIC MUX12", "DMIC0", "DMIC0"},
+	{"DMIC MUX12", "DMIC1", "DMIC1"},
+	{"DMIC MUX12", "DMIC2", "DMIC2"},
+	{"DMIC MUX12", "DMIC3", "DMIC3"},
+	{"DMIC MUX12", "DMIC4", "DMIC4"},
+	{"DMIC MUX12", "DMIC5", "DMIC5"},
+	{"AMIC MUX12", "ADC1", "ADC1"},
+	{"AMIC MUX12", "ADC2", "ADC2"},
+	{"AMIC MUX12", "ADC3", "ADC3"},
+	{"AMIC MUX12", "ADC4", "ADC4"},
+
+	{"DMIC MUX13", "DMIC0", "DMIC0"},
+	{"DMIC MUX13", "DMIC1", "DMIC1"},
+	{"DMIC MUX13", "DMIC2", "DMIC2"},
+	{"DMIC MUX13", "DMIC3", "DMIC3"},
+	{"DMIC MUX13", "DMIC4", "DMIC4"},
+	{"DMIC MUX13", "DMIC5", "DMIC5"},
+	{"AMIC MUX13", "ADC1", "ADC1"},
+	{"AMIC MUX13", "ADC2", "ADC2"},
+	{"AMIC MUX13", "ADC3", "ADC3"},
+	{"AMIC MUX13", "ADC4", "ADC4"},
+
+	{"AMIC4_5 SEL", "AMIC4", "AMIC4"},
+	{"AMIC4_5 SEL", "AMIC5", "AMIC5"},
+
+	{"ADC1", NULL, "AMIC1"},
+	{"ADC2", NULL, "AMIC2"},
+	{"ADC3", NULL, "AMIC3"},
+	{"ADC4", NULL, "AMIC4_5 SEL"},
+
+	/* CDC Rx interface with SLIMBUS */
+	{"CDC_IF RX0 MUX", "SLIM RX0", "SLIM RX0"},
+	{"CDC_IF RX1 MUX", "SLIM RX1", "SLIM RX1"},
+	{"CDC_IF RX2 MUX", "SLIM RX2", "SLIM RX2"},
+	{"CDC_IF RX3 MUX", "SLIM RX3", "SLIM RX3"},
+	{"CDC_IF RX4 MUX", "SLIM RX4", "SLIM RX4"},
+	{"CDC_IF RX5 MUX", "SLIM RX5", "SLIM RX5"},
+	{"CDC_IF RX6 MUX", "SLIM RX6", "SLIM RX6"},
+	{"CDC_IF RX7 MUX", "SLIM RX7", "SLIM RX7"},
+
+	{"RX INT0_1 MIX1 INP0", "RX0", "CDC_IF RX0 MUX"},
+	{"RX INT0_1 MIX1 INP0", "RX1", "CDC_IF RX1 MUX"},
+	{"RX INT0_1 MIX1 INP0", "RX2", "CDC_IF RX2 MUX"},
+	{"RX INT0_1 MIX1 INP0", "RX3", "CDC_IF RX3 MUX"},
+	{"RX INT0_1 MIX1 INP0", "RX4", "CDC_IF RX4 MUX"},
+	{"RX INT0_1 MIX1 INP0", "RX5", "CDC_IF RX5 MUX"},
+	{"RX INT0_1 MIX1 INP0", "RX6", "CDC_IF RX6 MUX"},
+	{"RX INT0_1 MIX1 INP0", "RX7", "CDC_IF RX7 MUX"},
+	{"RX INT0_1 MIX1 INP0", "IIR0", "IIR0"},
+	{"RX INT0_1 MIX1 INP0", "IIR1", "IIR1"},
+	{"RX INT0_1 MIX1 INP1", "RX0", "CDC_IF RX0 MUX"},
+	{"RX INT0_1 MIX1 INP1", "RX1", "CDC_IF RX1 MUX"},
+	{"RX INT0_1 MIX1 INP1", "RX2", "CDC_IF RX2 MUX"},
+	{"RX INT0_1 MIX1 INP1", "RX3", "CDC_IF RX3 MUX"},
+	{"RX INT0_1 MIX1 INP1", "RX4", "CDC_IF RX4 MUX"},
+	{"RX INT0_1 MIX1 INP1", "RX5", "CDC_IF RX5 MUX"},
+	{"RX INT0_1 MIX1 INP1", "RX6", "CDC_IF RX6 MUX"},
+	{"RX INT0_1 MIX1 INP1", "RX7", "CDC_IF RX7 MUX"},
+	{"RX INT0_1 MIX1 INP1", "IIR0", "IIR0"},
+	{"RX INT0_1 MIX1 INP1", "IIR1", "IIR1"},
+	{"RX INT0_1 MIX1 INP2", "RX0", "CDC_IF RX0 MUX"},
+	{"RX INT0_1 MIX1 INP2", "RX1", "CDC_IF RX1 MUX"},
+	{"RX INT0_1 MIX1 INP2", "RX2", "CDC_IF RX2 MUX"},
+	{"RX INT0_1 MIX1 INP2", "RX3", "CDC_IF RX3 MUX"},
+	{"RX INT0_1 MIX1 INP2", "RX4", "CDC_IF RX4 MUX"},
+	{"RX INT0_1 MIX1 INP2", "RX5", "CDC_IF RX5 MUX"},
+	{"RX INT0_1 MIX1 INP2", "RX6", "CDC_IF RX6 MUX"},
+	{"RX INT0_1 MIX1 INP2", "RX7", "CDC_IF RX7 MUX"},
+	{"RX INT0_1 MIX1 INP2", "IIR0", "IIR0"},
+	{"RX INT0_1 MIX1 INP2", "IIR1", "IIR1"},
+
+	{"RX INT1_1 MIX1 INP0", "RX0", "CDC_IF RX0 MUX"},
+	{"RX INT1_1 MIX1 INP0", "RX1", "CDC_IF RX1 MUX"},
+	{"RX INT1_1 MIX1 INP0", "RX2", "CDC_IF RX2 MUX"},
+	{"RX INT1_1 MIX1 INP0", "RX3", "CDC_IF RX3 MUX"},
+	{"RX INT1_1 MIX1 INP0", "RX4", "CDC_IF RX4 MUX"},
+	{"RX INT1_1 MIX1 INP0", "RX5", "CDC_IF RX5 MUX"},
+	{"RX INT1_1 MIX1 INP0", "RX6", "CDC_IF RX6 MUX"},
+	{"RX INT1_1 MIX1 INP0", "RX7", "CDC_IF RX7 MUX"},
+	{"RX INT1_1 MIX1 INP0", "IIR0", "IIR0"},
+	{"RX INT1_1 MIX1 INP0", "IIR1", "IIR1"},
+	{"RX INT1_1 MIX1 INP1", "RX0", "CDC_IF RX0 MUX"},
+	{"RX INT1_1 MIX1 INP1", "RX1", "CDC_IF RX1 MUX"},
+	{"RX INT1_1 MIX1 INP1", "RX2", "CDC_IF RX2 MUX"},
+	{"RX INT1_1 MIX1 INP1", "RX3", "CDC_IF RX3 MUX"},
+	{"RX INT1_1 MIX1 INP1", "RX4", "CDC_IF RX4 MUX"},
+	{"RX INT1_1 MIX1 INP1", "RX5", "CDC_IF RX5 MUX"},
+	{"RX INT1_1 MIX1 INP1", "RX6", "CDC_IF RX6 MUX"},
+	{"RX INT1_1 MIX1 INP1", "RX7", "CDC_IF RX7 MUX"},
+	{"RX INT1_1 MIX1 INP1", "IIR0", "IIR0"},
+	{"RX INT1_1 MIX1 INP1", "IIR1", "IIR1"},
+	{"RX INT1_1 MIX1 INP2", "RX0", "CDC_IF RX0 MUX"},
+	{"RX INT1_1 MIX1 INP2", "RX1", "CDC_IF RX1 MUX"},
+	{"RX INT1_1 MIX1 INP2", "RX2", "CDC_IF RX2 MUX"},
+	{"RX INT1_1 MIX1 INP2", "RX3", "CDC_IF RX3 MUX"},
+	{"RX INT1_1 MIX1 INP2", "RX4", "CDC_IF RX4 MUX"},
+	{"RX INT1_1 MIX1 INP2", "RX5", "CDC_IF RX5 MUX"},
+	{"RX INT1_1 MIX1 INP2", "RX6", "CDC_IF RX6 MUX"},
+	{"RX INT1_1 MIX1 INP2", "RX7", "CDC_IF RX7 MUX"},
+	{"RX INT1_1 MIX1 INP2", "IIR0", "IIR0"},
+	{"RX INT1_1 MIX1 INP2", "IIR1", "IIR1"},
+	{"RX INT2_1 MIX1 INP0", "RX0", "CDC_IF RX0 MUX"},
+	{"RX INT2_1 MIX1 INP0", "RX1", "CDC_IF RX1 MUX"},
+	{"RX INT2_1 MIX1 INP0", "RX2", "CDC_IF RX2 MUX"},
+	{"RX INT2_1 MIX1 INP0", "RX3", "CDC_IF RX3 MUX"},
+	{"RX INT2_1 MIX1 INP0", "RX4", "CDC_IF RX4 MUX"},
+	{"RX INT2_1 MIX1 INP0", "RX5", "CDC_IF RX5 MUX"},
+	{"RX INT2_1 MIX1 INP0", "RX6", "CDC_IF RX6 MUX"},
+	{"RX INT2_1 MIX1 INP0", "RX7", "CDC_IF RX7 MUX"},
+	{"RX INT2_1 MIX1 INP0", "IIR0", "IIR0"},
+	{"RX INT2_1 MIX1 INP0", "IIR1", "IIR1"},
+	{"RX INT2_1 MIX1 INP1", "RX0", "CDC_IF RX0 MUX"},
+	{"RX INT2_1 MIX1 INP1", "RX1", "CDC_IF RX1 MUX"},
+	{"RX INT2_1 MIX1 INP1", "RX2", "CDC_IF RX2 MUX"},
+	{"RX INT2_1 MIX1 INP1", "RX3", "CDC_IF RX3 MUX"},
+	{"RX INT2_1 MIX1 INP1", "RX4", "CDC_IF RX4 MUX"},
+	{"RX INT2_1 MIX1 INP1", "RX5", "CDC_IF RX5 MUX"},
+	{"RX INT2_1 MIX1 INP1", "RX6", "CDC_IF RX6 MUX"},
+	{"RX INT2_1 MIX1 INP1", "RX7", "CDC_IF RX7 MUX"},
+	{"RX INT2_1 MIX1 INP1", "IIR0", "IIR0"},
+	{"RX INT2_1 MIX1 INP1", "IIR1", "IIR1"},
+	{"RX INT2_1 MIX1 INP2", "RX0", "CDC_IF RX0 MUX"},
+	{"RX INT2_1 MIX1 INP2", "RX1", "CDC_IF RX1 MUX"},
+	{"RX INT2_1 MIX1 INP2", "RX2", "CDC_IF RX2 MUX"},
+	{"RX INT2_1 MIX1 INP2", "RX3", "CDC_IF RX3 MUX"},
+	{"RX INT2_1 MIX1 INP2", "RX4", "CDC_IF RX4 MUX"},
+	{"RX INT2_1 MIX1 INP2", "RX5", "CDC_IF RX5 MUX"},
+	{"RX INT2_1 MIX1 INP2", "RX6", "CDC_IF RX6 MUX"},
+	{"RX INT2_1 MIX1 INP2", "RX7", "CDC_IF RX7 MUX"},
+	{"RX INT2_1 MIX1 INP2", "IIR0", "IIR0"},
+	{"RX INT2_1 MIX1 INP2", "IIR1", "IIR1"},
+
+	{"RX INT3_1 MIX1 INP0", "RX0", "CDC_IF RX0 MUX"},
+	{"RX INT3_1 MIX1 INP0", "RX1", "CDC_IF RX1 MUX"},
+	{"RX INT3_1 MIX1 INP0", "RX2", "CDC_IF RX2 MUX"},
+	{"RX INT3_1 MIX1 INP0", "RX3", "CDC_IF RX3 MUX"},
+	{"RX INT3_1 MIX1 INP0", "RX4", "CDC_IF RX4 MUX"},
+	{"RX INT3_1 MIX1 INP0", "RX5", "CDC_IF RX5 MUX"},
+	{"RX INT3_1 MIX1 INP0", "RX6", "CDC_IF RX6 MUX"},
+	{"RX INT3_1 MIX1 INP0", "RX7", "CDC_IF RX7 MUX"},
+	{"RX INT3_1 MIX1 INP0", "IIR0", "IIR0"},
+	{"RX INT3_1 MIX1 INP0", "IIR1", "IIR1"},
+	{"RX INT3_1 MIX1 INP1", "RX0", "CDC_IF RX0 MUX"},
+	{"RX INT3_1 MIX1 INP1", "RX1", "CDC_IF RX1 MUX"},
+	{"RX INT3_1 MIX1 INP1", "RX2", "CDC_IF RX2 MUX"},
+	{"RX INT3_1 MIX1 INP1", "RX3", "CDC_IF RX3 MUX"},
+	{"RX INT3_1 MIX1 INP1", "RX4", "CDC_IF RX4 MUX"},
+	{"RX INT3_1 MIX1 INP1", "RX5", "CDC_IF RX5 MUX"},
+	{"RX INT3_1 MIX1 INP1", "RX6", "CDC_IF RX6 MUX"},
+	{"RX INT3_1 MIX1 INP1", "RX7", "CDC_IF RX7 MUX"},
+	{"RX INT3_1 MIX1 INP1", "IIR0", "IIR0"},
+	{"RX INT3_1 MIX1 INP1", "IIR1", "IIR1"},
+	{"RX INT3_1 MIX1 INP2", "RX0", "CDC_IF RX0 MUX"},
+	{"RX INT3_1 MIX1 INP2", "RX1", "CDC_IF RX1 MUX"},
+	{"RX INT3_1 MIX1 INP2", "RX2", "CDC_IF RX2 MUX"},
+	{"RX INT3_1 MIX1 INP2", "RX3", "CDC_IF RX3 MUX"},
+	{"RX INT3_1 MIX1 INP2", "RX4", "CDC_IF RX4 MUX"},
+	{"RX INT3_1 MIX1 INP2", "RX5", "CDC_IF RX5 MUX"},
+	{"RX INT3_1 MIX1 INP2", "RX6", "CDC_IF RX6 MUX"},
+	{"RX INT3_1 MIX1 INP2", "RX7", "CDC_IF RX7 MUX"},
+	{"RX INT3_1 MIX1 INP2", "IIR0", "IIR0"},
+	{"RX INT3_1 MIX1 INP2", "IIR1", "IIR1"},
+
+	{"RX INT4_1 MIX1 INP0", "RX0", "CDC_IF RX0 MUX"},
+	{"RX INT4_1 MIX1 INP0", "RX1", "CDC_IF RX1 MUX"},
+	{"RX INT4_1 MIX1 INP0", "RX2", "CDC_IF RX2 MUX"},
+	{"RX INT4_1 MIX1 INP0", "RX3", "CDC_IF RX3 MUX"},
+	{"RX INT4_1 MIX1 INP0", "RX4", "CDC_IF RX4 MUX"},
+	{"RX INT4_1 MIX1 INP0", "RX5", "CDC_IF RX5 MUX"},
+	{"RX INT4_1 MIX1 INP0", "RX6", "CDC_IF RX6 MUX"},
+	{"RX INT4_1 MIX1 INP0", "RX7", "CDC_IF RX7 MUX"},
+	{"RX INT4_1 MIX1 INP0", "IIR0", "IIR0"},
+	{"RX INT4_1 MIX1 INP0", "IIR1", "IIR1"},
+	{"RX INT4_1 MIX1 INP1", "RX0", "CDC_IF RX0 MUX"},
+	{"RX INT4_1 MIX1 INP1", "RX1", "CDC_IF RX1 MUX"},
+	{"RX INT4_1 MIX1 INP1", "RX2", "CDC_IF RX2 MUX"},
+	{"RX INT4_1 MIX1 INP1", "RX3", "CDC_IF RX3 MUX"},
+	{"RX INT4_1 MIX1 INP1", "RX4", "CDC_IF RX4 MUX"},
+	{"RX INT4_1 MIX1 INP1", "RX5", "CDC_IF RX5 MUX"},
+	{"RX INT4_1 MIX1 INP1", "RX6", "CDC_IF RX6 MUX"},
+	{"RX INT4_1 MIX1 INP1", "RX7", "CDC_IF RX7 MUX"},
+	{"RX INT4_1 MIX1 INP1", "IIR0", "IIR0"},
+	{"RX INT4_1 MIX1 INP1", "IIR1", "IIR1"},
+	{"RX INT4_1 MIX1 INP2", "RX0", "CDC_IF RX0 MUX"},
+	{"RX INT4_1 MIX1 INP2", "RX1", "CDC_IF RX1 MUX"},
+	{"RX INT4_1 MIX1 INP2", "RX2", "CDC_IF RX2 MUX"},
+	{"RX INT4_1 MIX1 INP2", "RX3", "CDC_IF RX3 MUX"},
+	{"RX INT4_1 MIX1 INP2", "RX4", "CDC_IF RX4 MUX"},
+	{"RX INT4_1 MIX1 INP2", "RX5", "CDC_IF RX5 MUX"},
+	{"RX INT4_1 MIX1 INP2", "RX6", "CDC_IF RX6 MUX"},
+	{"RX INT4_1 MIX1 INP2", "RX7", "CDC_IF RX7 MUX"},
+	{"RX INT4_1 MIX1 INP2", "IIR0", "IIR0"},
+	{"RX INT4_1 MIX1 INP2", "IIR1", "IIR1"},
+
+	{"RX INT7_1 MIX1 INP0", "RX0", "CDC_IF RX0 MUX"},
+	{"RX INT7_1 MIX1 INP0", "RX1", "CDC_IF RX1 MUX"},
+	{"RX INT7_1 MIX1 INP0", "RX2", "CDC_IF RX2 MUX"},
+	{"RX INT7_1 MIX1 INP0", "RX3", "CDC_IF RX3 MUX"},
+	{"RX INT7_1 MIX1 INP0", "RX4", "CDC_IF RX4 MUX"},
+	{"RX INT7_1 MIX1 INP0", "RX5", "CDC_IF RX5 MUX"},
+	{"RX INT7_1 MIX1 INP0", "RX6", "CDC_IF RX6 MUX"},
+	{"RX INT7_1 MIX1 INP0", "RX7", "CDC_IF RX7 MUX"},
+	{"RX INT7_1 MIX1 INP0", "IIR0", "IIR0"},
+	{"RX INT7_1 MIX1 INP0", "IIR1", "IIR1"},
+	{"RX INT7_1 MIX1 INP1", "RX0", "CDC_IF RX0 MUX"},
+	{"RX INT7_1 MIX1 INP1", "RX1", "CDC_IF RX1 MUX"},
+	{"RX INT7_1 MIX1 INP1", "RX2", "CDC_IF RX2 MUX"},
+	{"RX INT7_1 MIX1 INP1", "RX3", "CDC_IF RX3 MUX"},
+	{"RX INT7_1 MIX1 INP1", "RX4", "CDC_IF RX4 MUX"},
+	{"RX INT7_1 MIX1 INP1", "RX5", "CDC_IF RX5 MUX"},
+	{"RX INT7_1 MIX1 INP1", "RX6", "CDC_IF RX6 MUX"},
+	{"RX INT7_1 MIX1 INP1", "RX7", "CDC_IF RX7 MUX"},
+	{"RX INT7_1 MIX1 INP1", "IIR0", "IIR0"},
+	{"RX INT7_1 MIX1 INP1", "IIR1", "IIR1"},
+	{"RX INT7_1 MIX1 INP2", "RX0", "CDC_IF RX0 MUX"},
+	{"RX INT7_1 MIX1 INP2", "RX1", "CDC_IF RX1 MUX"},
+	{"RX INT7_1 MIX1 INP2", "RX2", "CDC_IF RX2 MUX"},
+	{"RX INT7_1 MIX1 INP2", "RX3", "CDC_IF RX3 MUX"},
+	{"RX INT7_1 MIX1 INP2", "RX4", "CDC_IF RX4 MUX"},
+	{"RX INT7_1 MIX1 INP2", "RX5", "CDC_IF RX5 MUX"},
+	{"RX INT7_1 MIX1 INP2", "RX6", "CDC_IF RX6 MUX"},
+	{"RX INT7_1 MIX1 INP2", "RX7", "CDC_IF RX7 MUX"},
+	{"RX INT7_1 MIX1 INP2", "IIR0", "IIR0"},
+	{"RX INT7_1 MIX1 INP2", "IIR1", "IIR1"},
+
+	{"RX INT8_1 MIX1 INP0", "RX0", "CDC_IF RX0 MUX"},
+	{"RX INT8_1 MIX1 INP0", "RX1", "CDC_IF RX1 MUX"},
+	{"RX INT8_1 MIX1 INP0", "RX2", "CDC_IF RX2 MUX"},
+	{"RX INT8_1 MIX1 INP0", "RX3", "CDC_IF RX3 MUX"},
+	{"RX INT8_1 MIX1 INP0", "RX4", "CDC_IF RX4 MUX"},
+	{"RX INT8_1 MIX1 INP0", "RX5", "CDC_IF RX5 MUX"},
+	{"RX INT8_1 MIX1 INP0", "RX6", "CDC_IF RX6 MUX"},
+	{"RX INT8_1 MIX1 INP0", "RX7", "CDC_IF RX7 MUX"},
+	{"RX INT8_1 MIX1 INP0", "IIR0", "IIR0"},
+	{"RX INT8_1 MIX1 INP0", "IIR1", "IIR1"},
+	{"RX INT8_1 MIX1 INP1", "RX0", "CDC_IF RX0 MUX"},
+	{"RX INT8_1 MIX1 INP1", "RX1", "CDC_IF RX1 MUX"},
+	{"RX INT8_1 MIX1 INP1", "RX2", "CDC_IF RX2 MUX"},
+	{"RX INT8_1 MIX1 INP1", "RX3", "CDC_IF RX3 MUX"},
+	{"RX INT8_1 MIX1 INP1", "RX4", "CDC_IF RX4 MUX"},
+	{"RX INT8_1 MIX1 INP1", "RX5", "CDC_IF RX5 MUX"},
+	{"RX INT8_1 MIX1 INP1", "RX6", "CDC_IF RX6 MUX"},
+	{"RX INT8_1 MIX1 INP1", "RX7", "CDC_IF RX7 MUX"},
+	{"RX INT8_1 MIX1 INP1", "IIR0", "IIR0"},
+	{"RX INT8_1 MIX1 INP1", "IIR1", "IIR1"},
+	{"RX INT8_1 MIX1 INP2", "RX0", "CDC_IF RX0 MUX"},
+	{"RX INT8_1 MIX1 INP2", "RX1", "CDC_IF RX1 MUX"},
+	{"RX INT8_1 MIX1 INP2", "RX2", "CDC_IF RX2 MUX"},
+	{"RX INT8_1 MIX1 INP2", "RX3", "CDC_IF RX3 MUX"},
+	{"RX INT8_1 MIX1 INP2", "RX4", "CDC_IF RX4 MUX"},
+	{"RX INT8_1 MIX1 INP2", "RX5", "CDC_IF RX5 MUX"},
+	{"RX INT8_1 MIX1 INP2", "RX6", "CDC_IF RX6 MUX"},
+	{"RX INT8_1 MIX1 INP2", "RX7", "CDC_IF RX7 MUX"},
+	{"RX INT8_1 MIX1 INP2", "IIR0", "IIR0"},
+	{"RX INT8_1 MIX1 INP2", "IIR1", "IIR1"},
+
+	{"RX INT0_1 MIX1", NULL, "RX INT0_1 MIX1 INP0"},
+	{"RX INT0_1 MIX1", NULL, "RX INT0_1 MIX1 INP1"},
+	{"RX INT0_1 MIX1", NULL, "RX INT0_1 MIX1 INP2"},
+	{"RX INT1_1 MIX1", NULL, "RX INT1_1 MIX1 INP0"},
+	{"RX INT1_1 MIX1", NULL, "RX INT1_1 MIX1 INP1"},
+	{"RX INT1_1 MIX1", NULL, "RX INT1_1 MIX1 INP2"},
+	{"RX INT2_1 MIX1", NULL, "RX INT2_1 MIX1 INP0"},
+	{"RX INT2_1 MIX1", NULL, "RX INT2_1 MIX1 INP1"},
+	{"RX INT2_1 MIX1", NULL, "RX INT2_1 MIX1 INP2"},
+	{"RX INT3_1 MIX1", NULL, "RX INT3_1 MIX1 INP0"},
+	{"RX INT3_1 MIX1", NULL, "RX INT3_1 MIX1 INP1"},
+	{"RX INT3_1 MIX1", NULL, "RX INT3_1 MIX1 INP2"},
+	{"RX INT4_1 MIX1", NULL, "RX INT4_1 MIX1 INP0"},
+	{"RX INT4_1 MIX1", NULL, "RX INT4_1 MIX1 INP1"},
+	{"RX INT4_1 MIX1", NULL, "RX INT4_1 MIX1 INP2"},
+	{"RX INT7_1 MIX1", NULL, "RX INT7_1 MIX1 INP0"},
+	{"RX INT7_1 MIX1", NULL, "RX INT7_1 MIX1 INP1"},
+	{"RX INT7_1 MIX1", NULL, "RX INT7_1 MIX1 INP2"},
+	{"RX INT8_1 MIX1", NULL, "RX INT8_1 MIX1 INP0"},
+	{"RX INT8_1 MIX1", NULL, "RX INT8_1 MIX1 INP1"},
+	{"RX INT8_1 MIX1", NULL, "RX INT8_1 MIX1 INP2"},
+
+	/* Mixing path INT0 */
+	{"RX INT0_2 MUX", "RX0", "CDC_IF RX0 MUX"},
+	{"RX INT0_2 MUX", "RX1", "CDC_IF RX1 MUX"},
+	{"RX INT0_2 MUX", "RX2", "CDC_IF RX2 MUX"},
+	{"RX INT0_2 MUX", "RX3", "CDC_IF RX3 MUX"},
+	{"RX INT0_2 MUX", "RX4", "CDC_IF RX4 MUX"},
+	{"RX INT0_2 MUX", "RX5", "CDC_IF RX5 MUX"},
+	{"RX INT0_2 MUX", "RX6", "CDC_IF RX6 MUX"},
+	{"RX INT0_2 MUX", "RX7", "CDC_IF RX7 MUX"},
+	{"RX INT0_2 INTERP", NULL, "RX INT0_2 MUX"},
+	{"RX INT0 SEC MIX", NULL, "RX INT0_2 INTERP"},
+
+	/* Mixing path INT1 */
+	{"RX INT1_2 MUX", "RX0", "CDC_IF RX0 MUX"},
+	{"RX INT1_2 MUX", "RX1", "CDC_IF RX1 MUX"},
+	{"RX INT1_2 MUX", "RX2", "CDC_IF RX2 MUX"},
+	{"RX INT1_2 MUX", "RX3", "CDC_IF RX3 MUX"},
+	{"RX INT1_2 MUX", "RX4", "CDC_IF RX4 MUX"},
+	{"RX INT1_2 MUX", "RX5", "CDC_IF RX5 MUX"},
+	{"RX INT1_2 MUX", "RX6", "CDC_IF RX6 MUX"},
+	{"RX INT1_2 MUX", "RX7", "CDC_IF RX7 MUX"},
+	{"RX INT1_2 INTERP", NULL, "RX INT1_2 MUX"},
+	{"RX INT1 SEC MIX", NULL, "RX INT1_2 INTERP"},
+
+	/* Mixing path INT2 */
+	{"RX INT2_2 MUX", "RX0", "CDC_IF RX0 MUX"},
+	{"RX INT2_2 MUX", "RX1", "CDC_IF RX1 MUX"},
+	{"RX INT2_2 MUX", "RX2", "CDC_IF RX2 MUX"},
+	{"RX INT2_2 MUX", "RX3", "CDC_IF RX3 MUX"},
+	{"RX INT2_2 MUX", "RX4", "CDC_IF RX4 MUX"},
+	{"RX INT2_2 MUX", "RX5", "CDC_IF RX5 MUX"},
+	{"RX INT2_2 MUX", "RX6", "CDC_IF RX6 MUX"},
+	{"RX INT2_2 MUX", "RX7", "CDC_IF RX7 MUX"},
+	{"RX INT2_2 INTERP", NULL, "RX INT2_2 MUX"},
+	{"RX INT2 SEC MIX", NULL, "RX INT2_2 INTERP"},
+
+	/* Mixing path INT3 */
+	{"RX INT3_2 MUX", "RX0", "CDC_IF RX0 MUX"},
+	{"RX INT3_2 MUX", "RX1", "CDC_IF RX1 MUX"},
+	{"RX INT3_2 MUX", "RX2", "CDC_IF RX2 MUX"},
+	{"RX INT3_2 MUX", "RX3", "CDC_IF RX3 MUX"},
+	{"RX INT3_2 MUX", "RX4", "CDC_IF RX4 MUX"},
+	{"RX INT3_2 MUX", "RX5", "CDC_IF RX5 MUX"},
+	{"RX INT3_2 MUX", "RX6", "CDC_IF RX6 MUX"},
+	{"RX INT3_2 MUX", "RX7", "CDC_IF RX7 MUX"},
+	{"RX INT3_2 INTERP", NULL, "RX INT3_2 MUX"},
+	{"RX INT3 SEC MIX", NULL, "RX INT3_2 INTERP"},
+
+	/* Mixing path INT4 */
+	{"RX INT4_2 MUX", "RX0", "CDC_IF RX0 MUX"},
+	{"RX INT4_2 MUX", "RX1", "CDC_IF RX1 MUX"},
+	{"RX INT4_2 MUX", "RX2", "CDC_IF RX2 MUX"},
+	{"RX INT4_2 MUX", "RX3", "CDC_IF RX3 MUX"},
+	{"RX INT4_2 MUX", "RX4", "CDC_IF RX4 MUX"},
+	{"RX INT4_2 MUX", "RX5", "CDC_IF RX5 MUX"},
+	{"RX INT4_2 MUX", "RX6", "CDC_IF RX6 MUX"},
+	{"RX INT4_2 MUX", "RX7", "CDC_IF RX7 MUX"},
+	{"RX INT4_2 INTERP", NULL, "RX INT4_2 MUX"},
+	{"RX INT4 SEC MIX", NULL, "RX INT4_2 INTERP"},
+
+	/* Mixing path INT7 */
+	{"RX INT7_2 MUX", "RX0", "CDC_IF RX0 MUX"},
+	{"RX INT7_2 MUX", "RX1", "CDC_IF RX1 MUX"},
+	{"RX INT7_2 MUX", "RX2", "CDC_IF RX2 MUX"},
+	{"RX INT7_2 MUX", "RX3", "CDC_IF RX3 MUX"},
+	{"RX INT7_2 MUX", "RX4", "CDC_IF RX4 MUX"},
+	{"RX INT7_2 MUX", "RX5", "CDC_IF RX5 MUX"},
+	{"RX INT7_2 MUX", "RX6", "CDC_IF RX6 MUX"},
+	{"RX INT7_2 MUX", "RX7", "CDC_IF RX7 MUX"},
+	{"RX INT7_2 INTERP", NULL, "RX INT7_2 MUX"},
+	{"RX INT7 SEC MIX", NULL, "RX INT7_2 INTERP"},
+
+	/* Mixing path INT8 */
+	{"RX INT8_2 MUX", "RX0", "CDC_IF RX0 MUX"},
+	{"RX INT8_2 MUX", "RX1", "CDC_IF RX1 MUX"},
+	{"RX INT8_2 MUX", "RX2", "CDC_IF RX2 MUX"},
+	{"RX INT8_2 MUX", "RX3", "CDC_IF RX3 MUX"},
+	{"RX INT8_2 MUX", "RX4", "CDC_IF RX4 MUX"},
+	{"RX INT8_2 MUX", "RX5", "CDC_IF RX5 MUX"},
+	{"RX INT8_2 MUX", "RX6", "CDC_IF RX6 MUX"},
+	{"RX INT8_2 MUX", "RX7", "CDC_IF RX7 MUX"},
+	{"RX INT8_2 INTERP", NULL, "RX INT8_2 MUX"},
+	{"RX INT8 SEC MIX", NULL, "RX INT8_2 INTERP"},
+
+	{"RX INT0_1 INTERP", NULL, "RX INT0_1 MIX1"},
+	{"RX INT0 SEC MIX", NULL, "RX INT0_1 INTERP"},
+	{"RX INT0 MIX2", NULL, "RX INT0 SEC MIX"},
+	{"RX INT0 MIX2", NULL, "RX INT0 MIX2 INP"},
+	{"RX INT0 DEM MUX", "CLSH_DSM_OUT", "RX INT0 MIX2"},
+	{"RX INT0 DAC", NULL, "RX INT0 DEM MUX"},
+	{"RX INT0 DAC", NULL, "RX_BIAS"},
+	{"EAR PA", NULL, "RX INT0 DAC"},
+	{"EAR", NULL, "EAR PA"},
+
+	{"RX INT1_1 INTERP", NULL, "RX INT1_1 MIX1"},
+	{"RX INT1 SEC MIX", NULL, "RX INT1_1 INTERP"},
+	{"RX INT1 MIX2", NULL, "RX INT1 SEC MIX"},
+	{"RX INT1 MIX2", NULL, "RX INT1 MIX2 INP"},
+	{"RX INT1 MIX3", NULL, "RX INT1 MIX2"},
+	{"RX INT1 DEM MUX", "CLSH_DSM_OUT", "RX INT1 MIX3"},
+	{"RX INT1 DAC", NULL, "RX INT1 DEM MUX"},
+	{"RX INT1 DAC", NULL, "RX_BIAS"},
+	{"HPHL PA", NULL, "RX INT1 DAC"},
+	{"HPHL", NULL, "HPHL PA"},
+
+	{"RX INT2_1 INTERP", NULL, "RX INT2_1 MIX1"},
+	{"RX INT2 SEC MIX", NULL, "RX INT2_1 INTERP"},
+	{"RX INT2 MIX2", NULL, "RX INT2 SEC MIX"},
+	{"RX INT2 MIX2", NULL, "RX INT2 MIX2 INP"},
+	{"RX INT2 MIX3", NULL, "RX INT2 MIX2"},
+	{"RX INT2 DEM MUX", "CLSH_DSM_OUT", "RX INT2 MIX3"},
+	{"RX INT2 DAC", NULL, "RX INT2 DEM MUX"},
+	{"RX INT2 DAC", NULL, "RX_BIAS"},
+	{"HPHR PA", NULL, "RX INT2 DAC"},
+	{"HPHR", NULL, "HPHR PA"},
+
+	{"RX INT3_1 INTERP", NULL, "RX INT3_1 MIX1"},
+	{"RX INT3 SEC MIX", NULL, "RX INT3_1 INTERP"},
+	{"RX INT3 MIX2", NULL, "RX INT3 SEC MIX"},
+	{"RX INT3 MIX2", NULL, "RX INT3 MIX2 INP"},
+	{"RX INT3 MIX3", NULL, "RX INT3 MIX2"},
+	{"RX INT3 DAC", NULL, "RX INT3 MIX3"},
+	{"RX INT3 DAC", NULL, "RX_BIAS"},
+	{"LINEOUT1 PA", NULL, "RX INT3 DAC"},
+	{"LINEOUT1", NULL, "LINEOUT1 PA"},
+
+	{"RX INT4_1 INTERP", NULL, "RX INT4_1 MIX1"},
+	{"RX INT4 SEC MIX", NULL, "RX INT4_1 INTERP"},
+	{"RX INT4 SEC MIX", NULL, "RX INT4_1 MIX1"},
+	{"RX INT4 MIX2", NULL, "RX INT4 SEC MIX"},
+	{"RX INT4 MIX2", NULL, "RX INT4 MIX2 INP"},
+	{"RX INT4 MIX3", NULL, "RX INT4 MIX2"},
+	{"RX INT4 DAC", NULL, "RX INT4 MIX3"},
+	{"RX INT4 DAC", NULL, "RX_BIAS"},
+	{"LINEOUT2 PA", NULL, "RX INT4 DAC"},
+	{"LINEOUT2", NULL, "LINEOUT2 PA"},
+
+	{"RX INT7_1 INTERP", NULL, "RX INT7_1 MIX1"},
+	{"RX INT7 SEC MIX", NULL, "RX INT7_1 INTERP"},
+	{"RX INT7 MIX2", NULL, "RX INT7 SEC MIX"},
+	{"RX INT7 MIX2", NULL, "RX INT7 MIX2 INP"},
+	{"RX INT7 CHAIN", NULL, "RX INT7 MIX2"},
+	{"RX INT7 CHAIN", NULL, "RX_BIAS"},
+	{"SPK1 OUT", NULL, "RX INT7 CHAIN"},
+
+	{"RX INT8_1 INTERP", NULL, "RX INT8_1 MIX1"},
+	{"RX INT8 SEC MIX", NULL, "RX INT8_1 INTERP"},
+	{"RX INT8 SEC MIX", NULL, "RX INT8_1 MIX1"},
+	{"RX INT8 CHAIN", NULL, "RX INT8 SEC MIX"},
+	{"RX INT8 CHAIN", NULL, "RX_BIAS"},
+	{"SPK2 OUT", NULL, "RX INT8 CHAIN"},
+
+	/* ANC Routing */
+	{"ANC0 FB MUX", "ANC_IN_EAR", "RX INT0 MIX2"},
+	{"ANC0 FB MUX", "ANC_IN_HPHL", "RX INT1 MIX2"},
+	{"ANC0 FB MUX", "ANC_IN_LO1", "RX INT3 MIX2"},
+	{"ANC0 FB MUX", "ANC_IN_EAR_SPKR", "RX INT7 MIX2"},
+	{"ANC1 FB MUX", "ANC_IN_HPHR", "RX INT2 MIX2"},
+	{"ANC1 FB MUX", "ANC_IN_LO2", "RX INT4 MIX2"},
+
+	{"ANC OUT EAR Enable", "Switch", "ADC MUX10"},
+	{"ANC OUT EAR Enable", "Switch", "ADC MUX11"},
+	{"RX INT0 MIX2", NULL, "ANC OUT EAR Enable"},
+
+	{"ANC OUT HPHL Enable", "Switch", "ADC MUX10"},
+	{"ANC OUT HPHL Enable", "Switch", "ADC MUX11"},
+	{"RX INT1 MIX2", NULL, "ANC OUT HPHL Enable"},
+
+	{"ANC OUT HPHR Enable", "Switch", "ADC MUX12"},
+	{"ANC OUT HPHR Enable", "Switch", "ADC MUX13"},
+	{"RX INT2 MIX2", NULL, "ANC OUT HPHR Enable"},
+
+	{"ANC EAR PA", NULL, "RX INT0 DAC"},
+	{"ANC EAR", NULL, "ANC EAR PA"},
+
+	{"ANC HPHL PA", NULL, "RX INT1 DAC"},
+	{"ANC HPHL", NULL, "ANC HPHL PA"},
+
+	{"ANC HPHR PA", NULL, "RX INT2 DAC"},
+	{"ANC HPHR", NULL, "ANC HPHR PA"},
+
+	{"ANC OUT EAR SPKR Enable", "Switch", "ADC MUX10"},
+	{"ANC OUT EAR SPKR Enable", "Switch", "ADC MUX11"},
+	{"RX INT7 MIX2", NULL, "ANC OUT EAR SPKR Enable"},
+
+	{"ANC SPKR PA Enable", "Switch", "RX INT7 CHAIN"},
+	{"ANC SPK1 PA", NULL, "ANC SPKR PA Enable"},
+	{"SPK1 OUT", NULL, "ANC SPK1 PA"},
+
+	/*
+	 * SRC0, SRC1 inputs to Sidetone RX Mixer
+	 * on RX0, RX1, RX2, RX3, RX4 and RX7 chains
+	 */
+	{"IIR0", NULL, "IIR0 INP0 MUX"},
+	{"IIR0 INP0 MUX", "DEC0", "ADC MUX0"},
+	{"IIR0 INP0 MUX", "DEC1", "ADC MUX1"},
+	{"IIR0 INP0 MUX", "DEC2", "ADC MUX2"},
+	{"IIR0 INP0 MUX", "DEC3", "ADC MUX3"},
+	{"IIR0 INP0 MUX", "DEC4", "ADC MUX4"},
+	{"IIR0 INP0 MUX", "DEC5", "ADC MUX5"},
+	{"IIR0 INP0 MUX", "DEC6", "ADC MUX6"},
+	{"IIR0 INP0 MUX", "DEC7", "ADC MUX7"},
+	{"IIR0 INP0 MUX", "DEC8", "ADC MUX8"},
+	{"IIR0 INP0 MUX", "RX0", "CDC_IF RX0 MUX"},
+	{"IIR0 INP0 MUX", "RX1", "CDC_IF RX1 MUX"},
+	{"IIR0 INP0 MUX", "RX2", "CDC_IF RX2 MUX"},
+	{"IIR0 INP0 MUX", "RX3", "CDC_IF RX3 MUX"},
+	{"IIR0 INP0 MUX", "RX4", "CDC_IF RX4 MUX"},
+	{"IIR0 INP0 MUX", "RX5", "CDC_IF RX5 MUX"},
+	{"IIR0 INP0 MUX", "RX6", "CDC_IF RX6 MUX"},
+	{"IIR0 INP0 MUX", "RX7", "CDC_IF RX7 MUX"},
+	{"IIR0", NULL, "IIR0 INP1 MUX"},
+	{"IIR0 INP1 MUX", "DEC0", "ADC MUX0"},
+	{"IIR0 INP1 MUX", "DEC1", "ADC MUX1"},
+	{"IIR0 INP1 MUX", "DEC2", "ADC MUX2"},
+	{"IIR0 INP1 MUX", "DEC3", "ADC MUX3"},
+	{"IIR0 INP1 MUX", "DEC4", "ADC MUX4"},
+	{"IIR0 INP1 MUX", "DEC5", "ADC MUX5"},
+	{"IIR0 INP1 MUX", "DEC6", "ADC MUX6"},
+	{"IIR0 INP1 MUX", "DEC7", "ADC MUX7"},
+	{"IIR0 INP1 MUX", "DEC8", "ADC MUX8"},
+	{"IIR0 INP1 MUX", "RX0", "CDC_IF RX0 MUX"},
+	{"IIR0 INP1 MUX", "RX1", "CDC_IF RX1 MUX"},
+	{"IIR0 INP1 MUX", "RX2", "CDC_IF RX2 MUX"},
+	{"IIR0 INP1 MUX", "RX3", "CDC_IF RX3 MUX"},
+	{"IIR0 INP1 MUX", "RX4", "CDC_IF RX4 MUX"},
+	{"IIR0 INP1 MUX", "RX5", "CDC_IF RX5 MUX"},
+	{"IIR0 INP1 MUX", "RX6", "CDC_IF RX6 MUX"},
+	{"IIR0 INP1 MUX", "RX7", "CDC_IF RX7 MUX"},
+	{"IIR0", NULL, "IIR0 INP2 MUX"},
+	{"IIR0 INP2 MUX", "DEC0", "ADC MUX0"},
+	{"IIR0 INP2 MUX", "DEC1", "ADC MUX1"},
+	{"IIR0 INP2 MUX", "DEC2", "ADC MUX2"},
+	{"IIR0 INP2 MUX", "DEC3", "ADC MUX3"},
+	{"IIR0 INP2 MUX", "DEC4", "ADC MUX4"},
+	{"IIR0 INP2 MUX", "DEC5", "ADC MUX5"},
+	{"IIR0 INP2 MUX", "DEC6", "ADC MUX6"},
+	{"IIR0 INP2 MUX", "DEC7", "ADC MUX7"},
+	{"IIR0 INP2 MUX", "DEC8", "ADC MUX8"},
+	{"IIR0 INP2 MUX", "RX0", "CDC_IF RX0 MUX"},
+	{"IIR0 INP2 MUX", "RX1", "CDC_IF RX1 MUX"},
+	{"IIR0 INP2 MUX", "RX2", "CDC_IF RX2 MUX"},
+	{"IIR0 INP2 MUX", "RX3", "CDC_IF RX3 MUX"},
+	{"IIR0 INP2 MUX", "RX4", "CDC_IF RX4 MUX"},
+	{"IIR0 INP2 MUX", "RX5", "CDC_IF RX5 MUX"},
+	{"IIR0 INP2 MUX", "RX6", "CDC_IF RX6 MUX"},
+	{"IIR0 INP2 MUX", "RX7", "CDC_IF RX7 MUX"},
+	{"IIR0", NULL, "IIR0 INP3 MUX"},
+	{"IIR0 INP3 MUX", "DEC0", "ADC MUX0"},
+	{"IIR0 INP3 MUX", "DEC1", "ADC MUX1"},
+	{"IIR0 INP3 MUX", "DEC2", "ADC MUX2"},
+	{"IIR0 INP3 MUX", "DEC3", "ADC MUX3"},
+	{"IIR0 INP3 MUX", "DEC4", "ADC MUX4"},
+	{"IIR0 INP3 MUX", "DEC5", "ADC MUX5"},
+	{"IIR0 INP3 MUX", "DEC6", "ADC MUX6"},
+	{"IIR0 INP3 MUX", "DEC7", "ADC MUX7"},
+	{"IIR0 INP3 MUX", "DEC8", "ADC MUX8"},
+	{"IIR0 INP3 MUX", "RX0", "CDC_IF RX0 MUX"},
+	{"IIR0 INP3 MUX", "RX1", "CDC_IF RX1 MUX"},
+	{"IIR0 INP3 MUX", "RX2", "CDC_IF RX2 MUX"},
+	{"IIR0 INP3 MUX", "RX3", "CDC_IF RX3 MUX"},
+	{"IIR0 INP3 MUX", "RX4", "CDC_IF RX4 MUX"},
+	{"IIR0 INP3 MUX", "RX5", "CDC_IF RX5 MUX"},
+	{"IIR0 INP3 MUX", "RX6", "CDC_IF RX6 MUX"},
+	{"IIR0 INP3 MUX", "RX7", "CDC_IF RX7 MUX"},
+
+	{"IIR1", NULL, "IIR1 INP0 MUX"},
+	{"IIR1 INP0 MUX", "DEC0", "ADC MUX0"},
+	{"IIR1 INP0 MUX", "DEC1", "ADC MUX1"},
+	{"IIR1 INP0 MUX", "DEC2", "ADC MUX2"},
+	{"IIR1 INP0 MUX", "DEC3", "ADC MUX3"},
+	{"IIR1 INP0 MUX", "DEC4", "ADC MUX4"},
+	{"IIR1 INP0 MUX", "DEC5", "ADC MUX5"},
+	{"IIR1 INP0 MUX", "DEC6", "ADC MUX6"},
+	{"IIR1 INP0 MUX", "DEC7", "ADC MUX7"},
+	{"IIR1 INP0 MUX", "DEC8", "ADC MUX8"},
+	{"IIR1 INP0 MUX", "RX0", "CDC_IF RX0 MUX"},
+	{"IIR1 INP0 MUX", "RX1", "CDC_IF RX1 MUX"},
+	{"IIR1 INP0 MUX", "RX2", "CDC_IF RX2 MUX"},
+	{"IIR1 INP0 MUX", "RX3", "CDC_IF RX3 MUX"},
+	{"IIR1 INP0 MUX", "RX4", "CDC_IF RX4 MUX"},
+	{"IIR1 INP0 MUX", "RX5", "CDC_IF RX5 MUX"},
+	{"IIR1 INP0 MUX", "RX6", "CDC_IF RX6 MUX"},
+	{"IIR1 INP0 MUX", "RX7", "CDC_IF RX7 MUX"},
+	{"IIR1", NULL, "IIR1 INP1 MUX"},
+	{"IIR1 INP1 MUX", "DEC0", "ADC MUX0"},
+	{"IIR1 INP1 MUX", "DEC1", "ADC MUX1"},
+	{"IIR1 INP1 MUX", "DEC2", "ADC MUX2"},
+	{"IIR1 INP1 MUX", "DEC3", "ADC MUX3"},
+	{"IIR1 INP1 MUX", "DEC4", "ADC MUX4"},
+	{"IIR1 INP1 MUX", "DEC5", "ADC MUX5"},
+	{"IIR1 INP1 MUX", "DEC6", "ADC MUX6"},
+	{"IIR1 INP1 MUX", "DEC7", "ADC MUX7"},
+	{"IIR1 INP1 MUX", "DEC8", "ADC MUX8"},
+	{"IIR1 INP1 MUX", "RX0", "CDC_IF RX0 MUX"},
+	{"IIR1 INP1 MUX", "RX1", "CDC_IF RX1 MUX"},
+	{"IIR1 INP1 MUX", "RX2", "CDC_IF RX2 MUX"},
+	{"IIR1 INP1 MUX", "RX3", "CDC_IF RX3 MUX"},
+	{"IIR1 INP1 MUX", "RX4", "CDC_IF RX4 MUX"},
+	{"IIR1 INP1 MUX", "RX5", "CDC_IF RX5 MUX"},
+	{"IIR1 INP1 MUX", "RX6", "CDC_IF RX6 MUX"},
+	{"IIR1 INP1 MUX", "RX7", "CDC_IF RX7 MUX"},
+	{"IIR1", NULL, "IIR1 INP2 MUX"},
+	{"IIR1 INP2 MUX", "DEC0", "ADC MUX0"},
+	{"IIR1 INP2 MUX", "DEC1", "ADC MUX1"},
+	{"IIR1 INP2 MUX", "DEC2", "ADC MUX2"},
+	{"IIR1 INP2 MUX", "DEC3", "ADC MUX3"},
+	{"IIR1 INP2 MUX", "DEC4", "ADC MUX4"},
+	{"IIR1 INP2 MUX", "DEC5", "ADC MUX5"},
+	{"IIR1 INP2 MUX", "DEC6", "ADC MUX6"},
+	{"IIR1 INP2 MUX", "DEC7", "ADC MUX7"},
+	{"IIR1 INP2 MUX", "DEC8", "ADC MUX8"},
+	{"IIR1 INP2 MUX", "RX0", "CDC_IF RX0 MUX"},
+	{"IIR1 INP2 MUX", "RX1", "CDC_IF RX1 MUX"},
+	{"IIR1 INP2 MUX", "RX2", "CDC_IF RX2 MUX"},
+	{"IIR1 INP2 MUX", "RX3", "CDC_IF RX3 MUX"},
+	{"IIR1 INP2 MUX", "RX4", "CDC_IF RX4 MUX"},
+	{"IIR1 INP2 MUX", "RX5", "CDC_IF RX5 MUX"},
+	{"IIR1 INP2 MUX", "RX6", "CDC_IF RX6 MUX"},
+	{"IIR1 INP2 MUX", "RX7", "CDC_IF RX7 MUX"},
+	{"IIR1", NULL, "IIR1 INP3 MUX"},
+	{"IIR1 INP3 MUX", "DEC0", "ADC MUX0"},
+	{"IIR1 INP3 MUX", "DEC1", "ADC MUX1"},
+	{"IIR1 INP3 MUX", "DEC2", "ADC MUX2"},
+	{"IIR1 INP3 MUX", "DEC3", "ADC MUX3"},
+	{"IIR1 INP3 MUX", "DEC4", "ADC MUX4"},
+	{"IIR1 INP3 MUX", "DEC5", "ADC MUX5"},
+	{"IIR1 INP3 MUX", "DEC6", "ADC MUX6"},
+	{"IIR1 INP3 MUX", "DEC7", "ADC MUX7"},
+	{"IIR1 INP3 MUX", "DEC8", "ADC MUX8"},
+	{"IIR1 INP3 MUX", "RX0", "CDC_IF RX0 MUX"},
+	{"IIR1 INP3 MUX", "RX1", "CDC_IF RX1 MUX"},
+	{"IIR1 INP3 MUX", "RX2", "CDC_IF RX2 MUX"},
+	{"IIR1 INP3 MUX", "RX3", "CDC_IF RX3 MUX"},
+	{"IIR1 INP3 MUX", "RX4", "CDC_IF RX4 MUX"},
+	{"IIR1 INP3 MUX", "RX5", "CDC_IF RX5 MUX"},
+	{"IIR1 INP3 MUX", "RX6", "CDC_IF RX6 MUX"},
+	{"IIR1 INP3 MUX", "RX7", "CDC_IF RX7 MUX"},
+
+	{"SRC0", NULL, "IIR0"},
+	{"SRC1", NULL, "IIR1"},
+	{"RX INT0 MIX2 INP", "SRC0", "SRC0"},
+	{"RX INT0 MIX2 INP", "SRC1", "SRC1"},
+	{"RX INT1 MIX2 INP", "SRC0", "SRC0"},
+	{"RX INT1 MIX2 INP", "SRC1", "SRC1"},
+	{"RX INT2 MIX2 INP", "SRC0", "SRC0"},
+	{"RX INT2 MIX2 INP", "SRC1", "SRC1"},
+	{"RX INT3 MIX2 INP", "SRC0", "SRC0"},
+	{"RX INT3 MIX2 INP", "SRC1", "SRC1"},
+	{"RX INT4 MIX2 INP", "SRC0", "SRC0"},
+	{"RX INT4 MIX2 INP", "SRC1", "SRC1"},
+	{"RX INT7 MIX2 INP", "SRC0", "SRC0"},
+	{"RX INT7 MIX2 INP", "SRC1", "SRC1"},
+
+	/* Native clk main path routing */
+	{"RX INT1_1 NATIVE MUX", "ON", "RX INT1_1 MIX1"},
+	{"RX INT1_1 INTERP", NULL, "RX INT1_1 NATIVE MUX"},
+	{"RX INT1_1 NATIVE MUX", NULL, "RX INT1 NATIVE SUPPLY"},
+
+	{"RX INT2_1 NATIVE MUX", "ON", "RX INT2_1 MIX1"},
+	{"RX INT2_1 INTERP", NULL, "RX INT2_1 NATIVE MUX"},
+	{"RX INT2_1 NATIVE MUX", NULL, "RX INT2 NATIVE SUPPLY"},
+
+	{"RX INT3_1 NATIVE MUX", "ON", "RX INT3_1 MIX1"},
+	{"RX INT3_1 INTERP", NULL, "RX INT3_1 NATIVE MUX"},
+	{"RX INT3_1 NATIVE MUX", NULL, "RX INT3 NATIVE SUPPLY"},
+
+	{"RX INT4_1 NATIVE MUX", "ON", "RX INT4_1 MIX1"},
+	{"RX INT4_1 INTERP", NULL, "RX INT4_1 NATIVE MUX"},
+	{"RX INT4_1 NATIVE MUX", NULL, "RX INT4 NATIVE SUPPLY"},
+
+	/* Native clk mix path routing */
+	{"RX INT1_2 NATIVE MUX", "ON", "RX INT1_2 MUX"},
+	{"RX INT1_2 INTERP", NULL, "RX INT1_2 NATIVE MUX"},
+	{"RX INT1_2 NATIVE MUX", NULL, "RX INT1 NATIVE SUPPLY"},
+
+	{"RX INT2_2 NATIVE MUX", "ON", "RX INT2_2 MUX"},
+	{"RX INT2_2 INTERP", NULL, "RX INT2_2 NATIVE MUX"},
+	{"RX INT2_2 NATIVE MUX", NULL, "RX INT2 NATIVE SUPPLY"},
+
+	{"RX INT3_2 NATIVE MUX", "ON", "RX INT3_2 MUX"},
+	{"RX INT3_2 INTERP", NULL, "RX INT3_2 NATIVE MUX"},
+	{"RX INT3_2 NATIVE MUX", NULL, "RX INT3 NATIVE SUPPLY"},
+
+	{"RX INT4_2 NATIVE MUX", "ON", "RX INT4_2 MUX"},
+	{"RX INT4_2 INTERP", NULL, "RX INT4_2 NATIVE MUX"},
+	{"RX INT4_2 NATIVE MUX", NULL, "RX INT4 NATIVE SUPPLY"},
+
+	{"RX INT7_2 NATIVE MUX", "ON", "RX INT7_2 MUX"},
+	{"RX INT7_2 INTERP", NULL, "RX INT7_2 NATIVE MUX"},
+	{"RX INT7_2 NATIVE MUX", NULL, "RX INT7 NATIVE SUPPLY"},
+
+	{"RX INT8_2 NATIVE MUX", "ON", "RX INT8_2 MUX"},
+	{"RX INT8_2 INTERP", NULL, "RX INT8_2 NATIVE MUX"},
+	{"RX INT8_2 NATIVE MUX", NULL, "RX INT8 NATIVE SUPPLY"},
+
+	/* ASRC Routing */
+	{"ASRC0 MUX", "ASRC_IN_HPHL", "RX INT1_2 INTERP"},
+	{"RX INT1 SEC MIX", "HPHL Switch", "ASRC0 MUX"},
+
+	{"ASRC1 MUX", "ASRC_IN_HPHR", "RX INT2_2 INTERP"},
+	{"RX INT2 SEC MIX", "HPHR Switch", "ASRC1 MUX"},
+
+	{"ASRC0 MUX", "ASRC_IN_LO1", "RX INT3_2 INTERP"},
+	{"RX INT3 SEC MIX", "LO1 Switch", "ASRC0 MUX"},
+
+	{"ASRC1 MUX", "ASRC_IN_LO2", "RX INT4_2 INTERP"},
+	{"RX INT4 SEC MIX", "LO2 Switch", "ASRC1 MUX"},
+
+	{"ASRC2 MUX", "ASRC_IN_SPKR1", "RX INT7_2 INTERP"},
+	{"RX INT7 SEC MIX", NULL, "ASRC2 MUX"},
+
+	{"ASRC3 MUX", "ASRC_IN_SPKR2", "RX INT8_2 INTERP"},
+	{"RX INT8 SEC MIX", NULL, "ASRC3 MUX"},
+};
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd9xxx-common.c	2019-01-22 16:16:29.555301211 +0100
@@ -0,0 +1,1478 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <sound/soc.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
+#include "wcd9xxx-common.h"
+
+#define CLSH_COMPUTE_EAR 0x01
+#define CLSH_COMPUTE_HPH_L 0x02
+#define CLSH_COMPUTE_HPH_R 0x03
+
+#define BUCK_VREF_0P494V 0x3F
+#define BUCK_VREF_2V 0xFF
+#define BUCK_VREF_0P494V 0x3F
+#define BUCK_VREF_1P8V 0xE6
+
+#define BUCK_SETTLE_TIME_US 50
+#define NCP_SETTLE_TIME_US 50
+
+#define MAX_IMPED_PARAMS 13
+
+#define USLEEP_RANGE_MARGIN_US 100
+
+struct wcd9xxx_imped_val {
+	u32 imped_val;
+	u8 index;
+};
+
+static const struct wcd9xxx_reg_mask_val imped_table[][MAX_IMPED_PARAMS] = {
+	{
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x46},
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x04},
+		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x11},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x9B},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x15},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1C},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x04},
+		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x0C},
+	},
+	{
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x47},
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x05},
+		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x11},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x9B},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x15},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1C},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x05},
+		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x0C},
+	},
+	{
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x49},
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x07},
+		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x12},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x35},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x4E},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x06},
+		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x0E},
+	},
+	{
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x49},
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x16},
+		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAC},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x17},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x5F},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xCF},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x06},
+		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x0F},
+	},
+	{
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x59},
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x15},
+		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x9C},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1B},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xCE},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xBD},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x07},
+		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x10},
+	},
+	{
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x66},
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x04},
+		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x9A},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2E},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xBD},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xA6},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x07},
+		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x11},
+	},
+	{
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x79},
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x04},
+		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x11},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x37},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xA6},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAD},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x08},
+		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x12},
+	},
+	{
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x76},
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x04},
+		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x11},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x4E},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAD},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAC},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x09},
+		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x12},
+	},
+	{
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x78},
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x05},
+		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x12},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xD0},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAC},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x13},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x0A},
+		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x13},
+	},
+	{
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x7A},
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x06},
+		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x14},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xB7},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x13},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x14},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x0B},
+		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x14},
+	},
+	{
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x60},
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x09},
+		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1C},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xA4},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x14},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1F},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x0C},
+		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x14},
+	},
+	{
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x79},
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x17},
+		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x25},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAE},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1F},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1D},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x0D},
+		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x15},
+	},
+	{
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x78},
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x16},
+		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2C},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAC},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1D},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1C},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x0E},
+		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x16},
+	},
+	{
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x89},
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x05},
+		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x40},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x13},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1C},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1B},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x10},
+		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x16},
+	},
+	{
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x97},
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x05},
+		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xD0},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x14},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1B},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1B},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x12},
+		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x17},
+	},
+	{
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x8A},
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x06},
+		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xB7},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x10},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1B},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x24},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x13},
+		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x17},
+	},
+	{
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x8A},
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x07},
+		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xA4},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1D},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x24},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x25},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x15},
+		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x18},
+	},
+	{
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x9A},
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x08},
+		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAE},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1C},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x25},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x27},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x18},
+		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x19},
+	},
+	{
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x8B},
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x18},
+		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAC},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1B},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x20},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2E},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x1A},
+		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x19},
+	},
+	{
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x9A},
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x17},
+		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x13},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1B},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2E},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2D},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x1D},
+		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x1A},
+	},
+	{
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0xA9},
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x06},
+		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x14},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x24},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2D},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2C},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x1F},
+		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x19},
+	},
+	{
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0xB9},
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x06},
+		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x10},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x25},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2C},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2C},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x23},
+		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x18},
+	},
+	{
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0xA9},
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x07},
+		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1D},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x27},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2C},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x35},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x26},
+		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x16},
+	},
+};
+
+static const struct wcd9xxx_imped_val imped_index[] = {
+	{4000, 0},
+	{4500, 1},
+	{5000, 2},
+	{5500, 3},
+	{6000, 4},
+	{6500, 5},
+	{7000, 6},
+	{7700, 7},
+	{8470, 8},
+	{9317, 9},
+	{10248, 10},
+	{11273, 11},
+	{12400, 12},
+	{13641, 13},
+	{15005, 14},
+	{16505, 15},
+	{18156, 16},
+	{19971, 17},
+	{21969, 18},
+	{24165, 19},
+	{26582, 20},
+	{29240, 21},
+	{32164, 22},
+};
+
+static inline void
+wcd9xxx_enable_clsh_block(struct snd_soc_codec *codec,
+			  struct wcd9xxx_clsh_cdc_data *clsh_d, bool enable)
+{
+	if ((enable && ++clsh_d->clsh_users == 1) ||
+	    (!enable && --clsh_d->clsh_users == 0))
+		snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLSH_B1_CTL,
+				    0x01, enable ? 0x01 : 0x00);
+	dev_dbg(codec->dev, "%s: clsh_users %d, enable %d", __func__,
+		clsh_d->clsh_users, enable);
+}
+
+static inline void wcd9xxx_enable_anc_delay(
+	struct snd_soc_codec *codec,
+	bool on)
+{
+	snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLSH_B1_CTL,
+		0x02, on ? 0x02 : 0x00);
+}
+
+static inline void
+wcd9xxx_enable_buck(struct snd_soc_codec *codec,
+		    struct wcd9xxx_clsh_cdc_data *clsh_d, bool enable)
+{
+	if ((enable && ++clsh_d->buck_users == 1) ||
+	    (!enable && --clsh_d->buck_users == 0))
+		snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_1,
+				    0x80, enable ? 0x80 : 0x00);
+	dev_dbg(codec->dev, "%s: buck_users %d, enable %d", __func__,
+		clsh_d->buck_users, enable);
+}
+
+static void (*clsh_state_fp[NUM_CLSH_STATES])(struct snd_soc_codec *,
+					      struct wcd9xxx_clsh_cdc_data *,
+					      u8 req_state, bool req_type);
+
+static const char *state_to_str(u8 state, char *buf, size_t buflen)
+{
+	int i;
+	int cnt = 0;
+	/*
+	 * This array of strings should match with enum wcd9xxx_clsh_state_bit.
+	 */
+	const char *states[] = {
+		"STATE_EAR",
+		"STATE_HPH_L",
+		"STATE_HPH_R",
+		"STATE_LO",
+	};
+
+	if (state == WCD9XXX_CLSH_STATE_IDLE) {
+		snprintf(buf, buflen, "[STATE_IDLE]");
+		goto done;
+	}
+
+	buf[0] = '\0';
+	for (i = 0; i < ARRAY_SIZE(states); i++) {
+		if (!(state & (1 << i)))
+			continue;
+		cnt = snprintf(buf, buflen - cnt - 1, "%s%s%s", buf,
+			       buf[0] == '\0' ? "[" : "|",
+			       states[i]);
+	}
+	if (cnt > 0)
+		strlcat(buf + cnt, "]", buflen);
+
+done:
+	if (buf[0] == '\0')
+		snprintf(buf, buflen, "[STATE_UNKNOWN]");
+	return buf;
+}
+
+static void wcd9xxx_cfg_clsh_param_common(
+		struct snd_soc_codec *codec)
+{
+	int i;
+	const struct wcd9xxx_reg_mask_val reg_set[] = {
+		{WCD9XXX_A_CDC_CLSH_BUCK_NCP_VARS, 0x3 << 0, 0},
+		{WCD9XXX_A_CDC_CLSH_BUCK_NCP_VARS, 0x3 << 2, 1 << 2},
+		{WCD9XXX_A_CDC_CLSH_BUCK_NCP_VARS, (0x1 << 4), 0},
+		{WCD9XXX_A_CDC_CLSH_B2_CTL, (0x3 << 0), 0x01},
+		{WCD9XXX_A_CDC_CLSH_B2_CTL, (0x3 << 2), (0x01 << 2)},
+		{WCD9XXX_A_CDC_CLSH_B2_CTL, (0xf << 4), (0x03 << 4)},
+		{WCD9XXX_A_CDC_CLSH_B3_CTL, (0xf << 4), (0x03 << 4)},
+		{WCD9XXX_A_CDC_CLSH_B3_CTL, (0xf << 0), (0x0B)},
+		{WCD9XXX_A_CDC_CLSH_B1_CTL, (0x1 << 5), (0x01 << 5)},
+		{WCD9XXX_A_CDC_CLSH_B1_CTL, (0x1 << 1), (0x01 << 1)},
+	};
+
+	for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+		snd_soc_update_bits(codec, reg_set[i].reg, reg_set[i].mask,
+						    reg_set[i].val);
+
+	dev_dbg(codec->dev, "%s: Programmed class H controller common parameters",
+			 __func__);
+}
+
+static void wcd9xxx_chargepump_request(struct snd_soc_codec *codec, bool on)
+{
+	static int cp_count;
+
+	if (on && (++cp_count == 1)) {
+		snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLK_OTHR_CTL,
+				    0x01, 0x01);
+		dev_dbg(codec->dev, "%s: Charge Pump enabled, count = %d\n",
+			__func__, cp_count);
+	} else if (!on) {
+		if (--cp_count < 0) {
+			dev_dbg(codec->dev,
+				"%s: Unbalanced disable for charge pump\n",
+				__func__);
+			if (snd_soc_read(codec, WCD9XXX_A_CDC_CLK_OTHR_CTL) &
+			    0x01) {
+				dev_dbg(codec->dev,
+					"%s: Actual chargepump is ON\n",
+					__func__);
+			}
+			cp_count = 0;
+			WARN_ON(1);
+		}
+
+		if (cp_count == 0) {
+			snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLK_OTHR_CTL,
+					    0x01, 0x00);
+			dev_dbg(codec->dev,
+				"%s: Charge pump disabled, count = %d\n",
+				__func__, cp_count);
+		}
+	}
+}
+
+void wcd9xxx_enable_high_perf_mode(struct snd_soc_codec *codec,
+				struct wcd9xxx_clsh_cdc_data *clsh_d,
+				u8 uhqa_mode, u8 req_state, bool req_type)
+{
+	dev_dbg(codec->dev, "%s: users fclk8 %d, fclk5 %d", __func__,
+			clsh_d->ncp_users[NCP_FCLK_LEVEL_8],
+			clsh_d->ncp_users[NCP_FCLK_LEVEL_5]);
+
+	if (req_type == WCD9XXX_CLSAB_REQ_ENABLE) {
+		clsh_d->ncp_users[NCP_FCLK_LEVEL_8]++;
+		snd_soc_write(codec, WCD9XXX_A_RX_HPH_BIAS_PA,
+					WCD9XXX_A_RX_HPH_BIAS_PA__POR);
+		snd_soc_write(codec, WCD9XXX_A_RX_HPH_L_PA_CTL, 0x48);
+		snd_soc_write(codec, WCD9XXX_A_RX_HPH_R_PA_CTL, 0x48);
+		if (uhqa_mode)
+			snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_CHOP_CTL,
+						0x20, 0x00);
+		wcd9xxx_chargepump_request(codec, true);
+		wcd9xxx_enable_anc_delay(codec, true);
+		wcd9xxx_enable_buck(codec, clsh_d, false);
+		if (clsh_d->ncp_users[NCP_FCLK_LEVEL_8] > 0)
+			snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC,
+						0x0F, 0x08);
+		snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC, 0x30, 0x30);
+
+		/* Enable NCP and wait until settles down */
+		if (snd_soc_update_bits(codec, WCD9XXX_A_NCP_EN, 0x01, 0x01))
+			usleep_range(NCP_SETTLE_TIME_US, NCP_SETTLE_TIME_US+10);
+	} else {
+		snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_CHOP_CTL,
+					0x20, 0x20);
+		snd_soc_write(codec, WCD9XXX_A_RX_HPH_L_PA_CTL,
+					WCD9XXX_A_RX_HPH_L_PA_CTL__POR);
+		snd_soc_write(codec, WCD9XXX_A_RX_HPH_R_PA_CTL,
+					WCD9XXX_A_RX_HPH_R_PA_CTL__POR);
+		snd_soc_write(codec, WCD9XXX_A_RX_HPH_BIAS_PA, 0x57);
+		wcd9xxx_enable_buck(codec, clsh_d, true);
+		wcd9xxx_chargepump_request(codec, false);
+		wcd9xxx_enable_anc_delay(codec, false);
+		clsh_d->ncp_users[NCP_FCLK_LEVEL_8]--;
+		if (clsh_d->ncp_users[NCP_FCLK_LEVEL_8] == 0 &&
+		    clsh_d->ncp_users[NCP_FCLK_LEVEL_5] == 0)
+			snd_soc_update_bits(codec, WCD9XXX_A_NCP_EN,
+						0x01, 0x00);
+		else if (clsh_d->ncp_users[NCP_FCLK_LEVEL_8] == 0)
+			snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC,
+						0x0F, 0x05);
+	}
+	dev_dbg(codec->dev, "%s: leave\n", __func__);
+}
+EXPORT_SYMBOL(wcd9xxx_enable_high_perf_mode);
+
+static int get_impedance_index(u32 imped)
+{
+	int i = 0;
+	if (imped < imped_index[i].imped_val) {
+		pr_debug("%s, detected impedance is less than 4 Ohm\n",
+				__func__);
+		goto ret;
+	}
+	if (imped >= imped_index[ARRAY_SIZE(imped_index) - 1].imped_val) {
+		pr_debug("%s, detected impedance is greater than 32164 Ohm\n",
+				__func__);
+		i = ARRAY_SIZE(imped_index) - 1;
+		goto ret;
+	}
+	for (i = 0; i < ARRAY_SIZE(imped_index) - 1; i++) {
+		if (imped >= imped_index[i].imped_val &&
+			imped < imped_index[i + 1].imped_val)
+			break;
+	}
+ret:
+	pr_debug("%s: selected impedance index = %d\n",
+			__func__, imped_index[i].index);
+	return imped_index[i].index;
+}
+
+void wcd9xxx_clsh_imped_config(struct snd_soc_codec *codec,
+				  int imped)
+{
+	int i  = 0;
+	int index = 0;
+	index = get_impedance_index(imped);
+	if (index >= ARRAY_SIZE(imped_index)) {
+		pr_err("%s, invalid imped = %d\n", __func__, imped);
+		return;
+	}
+	for (i = 0; i < MAX_IMPED_PARAMS; i++)
+		snd_soc_write(codec, imped_table[index][i].reg,
+					imped_table[index][i].val);
+}
+
+static void wcd9xxx_clsh_comp_req(struct snd_soc_codec *codec,
+				  struct wcd9xxx_clsh_cdc_data *clsh_d,
+				  int compute_pa, bool on)
+{
+	u8 shift;
+
+	if (compute_pa == CLSH_COMPUTE_EAR) {
+		snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLSH_B1_CTL, 0x10,
+				    (on ? 0x10 : 0));
+	} else {
+		if (compute_pa == CLSH_COMPUTE_HPH_L) {
+			shift = 3;
+		} else if (compute_pa == CLSH_COMPUTE_HPH_R) {
+			shift = 2;
+		} else {
+			dev_dbg(codec->dev,
+				"%s: classh computation request is incorrect\n",
+				__func__);
+			return;
+		}
+
+		if (on)
+			wcd9xxx_resmgr_add_cond_update_bits(clsh_d->resmgr,
+						  WCD9XXX_COND_HPH,
+						  WCD9XXX_A_CDC_CLSH_B1_CTL,
+						  shift, false);
+		else
+			wcd9xxx_resmgr_rm_cond_update_bits(clsh_d->resmgr,
+						  WCD9XXX_COND_HPH,
+						  WCD9XXX_A_CDC_CLSH_B1_CTL,
+						  shift, false);
+	}
+}
+
+int wcd9xxx_soc_update_bits_push(struct snd_soc_codec *codec,
+					struct list_head *list,
+					uint16_t reg, uint8_t mask,
+					uint8_t value, int delay)
+{
+	int rc;
+	struct wcd9xxx_register_save_node *node;
+
+	node = kmalloc(sizeof(*node), GFP_KERNEL);
+	if (unlikely(!node)) {
+		pr_err("%s: Not enough memory\n", __func__);
+		return -ENOMEM;
+	}
+	node->reg = reg;
+	node->value = snd_soc_read(codec, reg);
+	list_add(&node->lh, list);
+	if (mask == 0xFF)
+		rc = snd_soc_write(codec, reg, value);
+	else
+		rc = snd_soc_update_bits(codec, reg, mask, value);
+	if (delay)
+		usleep_range(delay, delay + USLEEP_RANGE_MARGIN_US);
+	return rc;
+}
+EXPORT_SYMBOL(wcd9xxx_soc_update_bits_push);
+
+void wcd9xxx_restore_registers(struct snd_soc_codec *codec,
+			       struct list_head *lh)
+{
+	struct wcd9xxx_register_save_node *node, *nodetmp;
+
+	list_for_each_entry_safe(node, nodetmp, lh, lh) {
+		snd_soc_write(codec, node->reg, node->value);
+		list_del(&node->lh);
+		kfree(node);
+	}
+}
+EXPORT_SYMBOL(wcd9xxx_restore_registers);
+
+static void wcd9xxx_dynamic_bypass_buck_ctrl_lo(struct snd_soc_codec *cdc,
+						bool enable)
+{
+	int i;
+	const struct wcd9xxx_reg_mask_val reg_set[] = {
+		{WCD9XXX_A_BUCK_MODE_3, (0x1 << 3), (enable << 3)},
+		{WCD9XXX_A_BUCK_MODE_5, enable ? 0xFF : 0x02, 0x02},
+		{WCD9XXX_A_BUCK_MODE_5, 0x1, 0x01}
+	};
+
+	if (!enable) {
+		snd_soc_update_bits(cdc, WCD9XXX_A_BUCK_MODE_1,
+					(0x1 << 3), 0x00);
+		snd_soc_update_bits(cdc, WCD9XXX_A_BUCK_MODE_4,
+					0xFF, BUCK_VREF_2V);
+	}
+	for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+		snd_soc_update_bits(cdc, reg_set[i].reg, reg_set[i].mask,
+							reg_set[i].val);
+
+	/* 50us sleep is reqd. as per the class H HW design sequence */
+	usleep_range(BUCK_SETTLE_TIME_US, BUCK_SETTLE_TIME_US+10);
+}
+
+static void wcd9xxx_dynamic_bypass_buck_ctrl(struct snd_soc_codec *cdc,
+						bool enable)
+{
+	int i;
+	const struct wcd9xxx_reg_mask_val reg_set[] = {
+		{WCD9XXX_A_BUCK_MODE_3, (0x1 << 3), (enable << 3)},
+		{WCD9XXX_A_BUCK_MODE_5, (0x1 << 1), ((!enable) << 1)},
+		{WCD9XXX_A_BUCK_MODE_5, 0x1, !enable}
+	};
+	if (!enable) {
+		snd_soc_update_bits(cdc, WCD9XXX_A_BUCK_MODE_1,
+					(0x1 << 3), 0x00);
+		snd_soc_update_bits(cdc, WCD9XXX_A_BUCK_MODE_4,
+					0xFF, BUCK_VREF_2V);
+	}
+	for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+		snd_soc_update_bits(cdc, reg_set[i].reg, reg_set[i].mask,
+							reg_set[i].val);
+
+	/* 50us sleep is reqd. as per the class H HW design sequence */
+	usleep_range(BUCK_SETTLE_TIME_US, BUCK_SETTLE_TIME_US+10);
+}
+
+static void wcd9xxx_set_buck_mode(struct snd_soc_codec *codec, u8 buck_vref)
+{
+	int i;
+	const struct wcd9xxx_reg_mask_val reg_set[] = {
+		{WCD9XXX_A_BUCK_MODE_5, 0x02, 0x02},
+		{WCD9XXX_A_BUCK_MODE_4, 0xFF, buck_vref},
+		{WCD9XXX_A_BUCK_MODE_1, 0x04, 0x04},
+		{WCD9XXX_A_BUCK_MODE_3, 0x04, 0x00},
+		{WCD9XXX_A_BUCK_MODE_3, 0x08, 0x00},
+	};
+
+	for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+		snd_soc_update_bits(codec, reg_set[i].reg,
+					reg_set[i].mask, reg_set[i].val);
+
+	dev_dbg(codec->dev, "%s: Done\n", __func__);
+	usleep_range(BUCK_SETTLE_TIME_US, BUCK_SETTLE_TIME_US);
+}
+
+
+/* This will be called for all states except Lineout */
+static void wcd9xxx_clsh_enable_post_pa(struct snd_soc_codec *codec,
+	struct wcd9xxx_clsh_cdc_data *cdc_clsh_d)
+{
+	int i;
+	const struct wcd9xxx_reg_mask_val reg_set[] = {
+		{WCD9XXX_A_BUCK_MODE_5, 0x02, 0x00},
+		{WCD9XXX_A_NCP_STATIC, 0x20, 0x00},
+		{WCD9XXX_A_BUCK_MODE_3, 0x04, 0x04},
+	};
+
+	for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+		snd_soc_update_bits(codec, reg_set[i].reg,
+					reg_set[i].mask, reg_set[i].val);
+
+	if (!cdc_clsh_d->is_dynamic_vdd_cp)
+		snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_3,
+							0x08, 0x08);
+
+	dev_dbg(codec->dev, "%s: completed clsh mode settings after PA enable\n",
+		   __func__);
+
+}
+
+static void wcd9xxx_set_fclk_get_ncp(struct snd_soc_codec *codec,
+				     struct wcd9xxx_clsh_cdc_data *clsh_d,
+				     enum ncp_fclk_level fclk_level)
+{
+	clsh_d->ncp_users[fclk_level]++;
+
+	pr_debug("%s: enter ncp type %d users fclk8 %d, fclk5 %d\n", __func__,
+		 fclk_level, clsh_d->ncp_users[NCP_FCLK_LEVEL_8],
+		 clsh_d->ncp_users[NCP_FCLK_LEVEL_5]);
+
+	snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC, 0x10, 0x00);
+	/* fclk level 8 dominates level 5 */
+	if (clsh_d->ncp_users[NCP_FCLK_LEVEL_8] > 0)
+		snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC, 0x0F, 0x08);
+	else if (clsh_d->ncp_users[NCP_FCLK_LEVEL_5] > 0)
+		snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC, 0x0F, 0x05);
+	else
+		WARN_ONCE(1, "Unexpected users %d,%d\n",
+			  clsh_d->ncp_users[NCP_FCLK_LEVEL_8],
+			  clsh_d->ncp_users[NCP_FCLK_LEVEL_5]);
+	snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC, 0x20, 0x20);
+
+	/* enable NCP and wait until settles down */
+	if (snd_soc_update_bits(codec, WCD9XXX_A_NCP_EN, 0x01, 0x01))
+		usleep_range(NCP_SETTLE_TIME_US, NCP_SETTLE_TIME_US + 50);
+	pr_debug("%s: leave\n", __func__);
+}
+
+static void wcd9xxx_set_fclk_put_ncp(struct snd_soc_codec *codec,
+				     struct wcd9xxx_clsh_cdc_data *clsh_d,
+				     enum ncp_fclk_level fclk_level)
+{
+	clsh_d->ncp_users[fclk_level]--;
+
+	pr_debug("%s: enter ncp type %d users fclk8 %d, fclk5 %d\n", __func__,
+		 fclk_level, clsh_d->ncp_users[NCP_FCLK_LEVEL_8],
+		 clsh_d->ncp_users[NCP_FCLK_LEVEL_5]);
+
+	if (clsh_d->ncp_users[NCP_FCLK_LEVEL_8] == 0 &&
+	    clsh_d->ncp_users[NCP_FCLK_LEVEL_5] == 0)
+		snd_soc_update_bits(codec, WCD9XXX_A_NCP_EN, 0x01, 0x00);
+	else if (clsh_d->ncp_users[NCP_FCLK_LEVEL_8] == 0)
+		/* if dominating level 8 has gone, switch to 5 */
+		snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC, 0x0F, 0x05);
+	pr_debug("%s: leave\n", __func__);
+}
+
+static void wcd9xxx_cfg_clsh_param_ear(struct snd_soc_codec *codec)
+{
+	int i;
+	const struct wcd9xxx_reg_mask_val reg_set[] = {
+		{WCD9XXX_A_CDC_CLSH_B1_CTL, (0x1 << 7), 0},
+		{WCD9XXX_A_CDC_CLSH_V_PA_HD_EAR, (0x3f << 0), 0x0D},
+		{WCD9XXX_A_CDC_CLSH_V_PA_MIN_EAR, (0x3f << 0), 0x3A},
+
+		/* Under assumption that EAR load is 10.7ohm */
+		{WCD9XXX_A_CDC_CLSH_IDLE_EAR_THSD, (0x3f << 0), 0x26},
+		{WCD9XXX_A_CDC_CLSH_FCLKONLY_EAR_THSD, (0x3f << 0), 0x2C},
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_EAR_L, 0xff, 0xA9},
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_EAR_U, 0xff, 0x07},
+		{WCD9XXX_A_CDC_CLSH_K_ADDR, (0x1 << 7), 0},
+		{WCD9XXX_A_CDC_CLSH_K_ADDR, (0xf << 0), 0x08},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1b},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2d},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x36},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x37},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+	};
+
+	for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+		snd_soc_update_bits(codec, reg_set[i].reg,
+					reg_set[i].mask, reg_set[i].val);
+
+	dev_dbg(codec->dev, "%s: Programmed Class H controller EAR specific params\n",
+			 __func__);
+}
+
+static void wcd9xxx_cfg_clsh_param_hph(struct snd_soc_codec *codec)
+{
+	int i;
+	const struct wcd9xxx_reg_mask_val reg_set[] = {
+		{WCD9XXX_A_CDC_CLSH_B1_CTL, (0x1 << 6), 0},
+		{WCD9XXX_A_CDC_CLSH_V_PA_HD_HPH, 0x3f, 0x0D},
+		{WCD9XXX_A_CDC_CLSH_V_PA_MIN_HPH, 0x3f, 0x1D},
+
+		/* Under assumption that HPH load is 16ohm per channel */
+		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0x3f, 0x13},
+		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0x1f, 0x19},
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x97},
+		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x05},
+		{WCD9XXX_A_CDC_CLSH_K_ADDR, (0x1 << 7), 0},
+		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0x0f, 0},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAE},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1C},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x24},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x25},
+		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
+	};
+
+	for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+		snd_soc_update_bits(codec, reg_set[i].reg, reg_set[i].mask,
+							reg_set[i].val);
+	dev_dbg(codec->dev, "%s: Programmed Class H controller HPH specific params\n",
+			 __func__);
+}
+
+static void wcd9xxx_ncp_bypass_enable(struct snd_soc_codec *cdc, bool enable)
+{
+	snd_soc_update_bits(cdc, WCD9XXX_A_NCP_STATIC, 0x10, (enable << 4));
+	/* 50us sleep is reqd. as per the class H HW design sequence */
+	usleep_range(BUCK_SETTLE_TIME_US, BUCK_SETTLE_TIME_US+10);
+}
+
+static void wcd9xxx_clsh_set_Iest(struct snd_soc_codec *codec,
+		u8 value)
+{
+	snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_5,
+				    0x01, (0x01 & 0x03));
+	snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_5,
+				    0xFC, (value << 2));
+}
+
+static void wcd9xxx_clsh_state_hph_ear(struct snd_soc_codec *codec,
+			struct wcd9xxx_clsh_cdc_data *clsh_d,
+			u8 req_state, bool is_enable)
+{
+	int compute_pa = 0;
+
+	dev_dbg(codec->dev, "%s: enter %s\n", __func__,
+			is_enable ? "enable" : "disable");
+
+	if (is_enable) {
+		/*
+		 * The below check condition is required to make sure
+		 * functions inside if condition will execute only once.
+		 */
+		if ((clsh_d->state == WCD9XXX_CLSH_STATE_EAR) ||
+			(req_state == WCD9XXX_CLSH_STATE_EAR)) {
+			wcd9xxx_dynamic_bypass_buck_ctrl(codec, false);
+			wcd9xxx_ncp_bypass_enable(codec, true);
+		}
+		switch (req_state) {
+		case WCD9XXX_CLSH_STATE_HPHL:
+			compute_pa = CLSH_COMPUTE_HPH_L;
+			break;
+		case WCD9XXX_CLSH_STATE_HPHR:
+			compute_pa = CLSH_COMPUTE_HPH_R;
+			break;
+		case WCD9XXX_CLSH_STATE_EAR:
+			compute_pa = CLSH_COMPUTE_EAR;
+			break;
+		default:
+			dev_dbg(codec->dev,
+				"%s:Invalid state:0x%x,enable:0x%x\n",
+				__func__, req_state, is_enable);
+			break;
+		}
+		wcd9xxx_clsh_comp_req(codec, clsh_d, compute_pa, true);
+
+		dev_dbg(codec->dev, "%s: Enabled hph+ear mode clsh\n",
+				__func__);
+	} else {
+		switch (req_state) {
+		case WCD9XXX_CLSH_STATE_HPHL:
+			compute_pa = CLSH_COMPUTE_HPH_L;
+			break;
+		case WCD9XXX_CLSH_STATE_HPHR:
+			compute_pa = CLSH_COMPUTE_HPH_R;
+			break;
+		case WCD9XXX_CLSH_STATE_EAR:
+			compute_pa = CLSH_COMPUTE_EAR;
+			break;
+		default:
+			dev_dbg(codec->dev,
+				"%s:Invalid state:0x%x,enable:0x%x\n",
+				__func__, req_state, is_enable);
+			break;
+		}
+		wcd9xxx_clsh_comp_req(codec, clsh_d, compute_pa, false);
+
+		if (((clsh_d->state & (~req_state)) ==
+				WCD9XXX_CLSH_STATE_EAR) ||
+			(req_state == WCD9XXX_CLSH_STATE_EAR)) {
+			wcd9xxx_ncp_bypass_enable(codec, false);
+			wcd9xxx_dynamic_bypass_buck_ctrl(codec, true);
+		}
+	}
+}
+
+static void wcd9xxx_clsh_state_hph_lo(struct snd_soc_codec *codec,
+			struct wcd9xxx_clsh_cdc_data *clsh_d,
+			u8 req_state, bool is_enable)
+{
+
+	dev_dbg(codec->dev, "%s: enter %s\n", __func__,
+			is_enable ? "enable" : "disable");
+	if (is_enable) {
+		if ((clsh_d->state == WCD9XXX_CLSH_STATE_LO) ||
+			(req_state == WCD9XXX_CLSH_STATE_LO)) {
+			wcd9xxx_dynamic_bypass_buck_ctrl_lo(codec, false);
+			wcd9xxx_enable_buck(codec, clsh_d, true);
+			wcd9xxx_ncp_bypass_enable(codec, true);
+			if (req_state & WCD9XXX_CLSH_STATE_HPH_ST) {
+				wcd9xxx_set_fclk_get_ncp(codec, clsh_d,
+							NCP_FCLK_LEVEL_8);
+				wcd9xxx_set_fclk_put_ncp(codec, clsh_d,
+							NCP_FCLK_LEVEL_5);
+				wcd9xxx_enable_clsh_block(codec, clsh_d, true);
+				wcd9xxx_chargepump_request(codec, true);
+				wcd9xxx_enable_anc_delay(codec, true);
+			}
+		}
+		if (req_state == WCD9XXX_CLSH_STATE_HPHL)
+			wcd9xxx_clsh_comp_req(codec, clsh_d,
+						CLSH_COMPUTE_HPH_L, true);
+		if (req_state == WCD9XXX_CLSH_STATE_HPHR)
+			wcd9xxx_clsh_comp_req(codec, clsh_d,
+						CLSH_COMPUTE_HPH_R, true);
+	} else {
+		switch (req_state) {
+		case WCD9XXX_CLSH_STATE_LO:
+			snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC,
+						0x20, 0x00);
+			wcd9xxx_dynamic_bypass_buck_ctrl_lo(codec, true);
+			break;
+		case WCD9XXX_CLSH_STATE_HPHL:
+			wcd9xxx_clsh_comp_req(codec, clsh_d,
+						CLSH_COMPUTE_HPH_L, false);
+			break;
+		case WCD9XXX_CLSH_STATE_HPHR:
+			wcd9xxx_clsh_comp_req(codec, clsh_d,
+						CLSH_COMPUTE_HPH_R, false);
+			break;
+		default:
+			dev_dbg(codec->dev,
+				 "%s:Invalid state:0x%x,enable:0x%x\n",
+				__func__, req_state, is_enable);
+			break;
+		}
+		if ((req_state == WCD9XXX_CLSH_STATE_LO) ||
+		((clsh_d->state & (~req_state)) == WCD9XXX_CLSH_STATE_LO)) {
+			wcd9xxx_ncp_bypass_enable(codec, false);
+
+			if ((clsh_d->state & (~req_state)) ==
+						WCD9XXX_CLSH_STATE_LO) {
+				wcd9xxx_set_fclk_get_ncp(codec, clsh_d,
+							NCP_FCLK_LEVEL_5);
+				wcd9xxx_set_fclk_put_ncp(codec, clsh_d,
+							NCP_FCLK_LEVEL_8);
+			}
+
+			if (req_state & WCD9XXX_CLSH_STATE_HPH_ST) {
+				usleep_range(BUCK_SETTLE_TIME_US,
+						BUCK_SETTLE_TIME_US + 10);
+				if (clsh_d->buck_mv ==
+						WCD9XXX_CDC_BUCK_MV_1P8) {
+					wcd9xxx_enable_buck(codec, clsh_d,
+								false);
+					wcd9xxx_ncp_bypass_enable(codec, true);
+				} else {
+					/*
+					 *NCP settle time recommended by codec
+					 *specification
+					 */
+					usleep_range(NCP_SETTLE_TIME_US,
+						NCP_SETTLE_TIME_US + 10);
+					wcd9xxx_clsh_set_Iest(codec, 0x02);
+				}
+				snd_soc_update_bits(codec,
+						WCD9XXX_A_BUCK_MODE_1,
+						0x04, 0x00);
+				snd_soc_update_bits(codec,
+						 WCD9XXX_A_BUCK_MODE_4,
+						0xFF, BUCK_VREF_1P8V);
+			}
+		}
+	}
+}
+
+static void wcd9xxx_clsh_state_ear_lo(struct snd_soc_codec *codec,
+			struct wcd9xxx_clsh_cdc_data *clsh_d,
+			u8 req_state, bool is_enable)
+{
+
+	dev_dbg(codec->dev, "%s: enter %s\n", __func__,
+			is_enable ? "enable" : "disable");
+	if (is_enable) {
+		wcd9xxx_dynamic_bypass_buck_ctrl(codec, false);
+		wcd9xxx_enable_buck(codec, clsh_d, true);
+		wcd9xxx_ncp_bypass_enable(codec, true);
+		if (req_state & WCD9XXX_CLSH_STATE_EAR) {
+			wcd9xxx_set_fclk_get_ncp(codec, clsh_d,
+						NCP_FCLK_LEVEL_8);
+			wcd9xxx_set_fclk_put_ncp(codec, clsh_d,
+						NCP_FCLK_LEVEL_5);
+			wcd9xxx_enable_clsh_block(codec, clsh_d, true);
+			wcd9xxx_chargepump_request(codec, true);
+			wcd9xxx_enable_anc_delay(codec, true);
+			wcd9xxx_clsh_comp_req(codec, clsh_d,
+						CLSH_COMPUTE_EAR, true);
+		}
+	} else {
+		wcd9xxx_ncp_bypass_enable(codec, false);
+
+		if ((clsh_d->state & (~req_state)) == WCD9XXX_CLSH_STATE_LO) {
+			wcd9xxx_set_fclk_get_ncp(codec, clsh_d,
+						NCP_FCLK_LEVEL_5);
+			wcd9xxx_set_fclk_put_ncp(codec, clsh_d,
+						NCP_FCLK_LEVEL_8);
+		}
+
+		if (req_state & WCD9XXX_CLSH_STATE_LO) {
+			snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC,
+						0x20, 0x00);
+			wcd9xxx_dynamic_bypass_buck_ctrl(codec, true);
+		} else if (req_state & WCD9XXX_CLSH_STATE_EAR) {
+			wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_EAR,
+						false);
+			/*sleep 5ms*/
+			if (clsh_d->buck_mv == WCD9XXX_CDC_BUCK_MV_1P8) {
+				wcd9xxx_enable_buck(codec, clsh_d, false);
+				wcd9xxx_ncp_bypass_enable(codec, true);
+			} else {
+				/* NCP settle time recommended by codec	spec */
+				usleep_range(NCP_SETTLE_TIME_US,
+					     NCP_SETTLE_TIME_US + 10);
+				wcd9xxx_clsh_set_Iest(codec, 0x02);
+			}
+			snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_1,
+						0x04, 0x00);
+			snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_4,
+						0xFF, BUCK_VREF_1P8V);
+		}
+	}
+}
+
+static void wcd9xxx_clsh_state_hph_ear_lo(struct snd_soc_codec *codec,
+			struct wcd9xxx_clsh_cdc_data *clsh_d,
+			u8 req_state, bool is_enable)
+{
+	dev_dbg(codec->dev, "%s: enter %s\n", __func__,
+			is_enable ? "enable" : "disable");
+
+	if (req_state & WCD9XXX_CLSH_STATE_HPHL)
+		wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_L,
+					is_enable);
+
+	if (req_state & WCD9XXX_CLSH_STATE_HPHR)
+		wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_R,
+					is_enable);
+
+	if (req_state & WCD9XXX_CLSH_STATE_EAR)
+		wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_EAR,
+					is_enable);
+}
+
+static void wcd9xxx_clsh_state_ear(struct snd_soc_codec *codec,
+			struct wcd9xxx_clsh_cdc_data *clsh_d,
+			u8 req_state, bool is_enable)
+{
+	pr_debug("%s: enter %s\n", __func__, is_enable ? "enable" : "disable");
+	if (is_enable) {
+		wcd9xxx_cfg_clsh_param_common(codec);
+		wcd9xxx_cfg_clsh_param_ear(codec);
+		wcd9xxx_enable_clsh_block(codec, clsh_d, true);
+		wcd9xxx_chargepump_request(codec, true);
+		wcd9xxx_enable_anc_delay(codec, true);
+		wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_EAR, true);
+		wcd9xxx_set_buck_mode(codec, BUCK_VREF_2V);
+		wcd9xxx_enable_buck(codec, clsh_d, true);
+		wcd9xxx_set_fclk_get_ncp(codec, clsh_d, NCP_FCLK_LEVEL_8);
+
+		dev_dbg(codec->dev, "%s: Enabled ear mode class h\n", __func__);
+	} else {
+		dev_dbg(codec->dev, "%s: stub fallback to ear\n", __func__);
+		wcd9xxx_set_fclk_put_ncp(codec, clsh_d, NCP_FCLK_LEVEL_8);
+		wcd9xxx_enable_buck(codec, clsh_d, false);
+		wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_EAR, false);
+		wcd9xxx_chargepump_request(codec, false);
+		wcd9xxx_enable_clsh_block(codec, clsh_d, false);
+	}
+}
+
+static void wcd9xxx_clsh_state_hph_l(struct snd_soc_codec *codec,
+		struct wcd9xxx_clsh_cdc_data *clsh_d,
+		u8 req_state, bool is_enable)
+{
+	pr_debug("%s: enter %s\n", __func__, is_enable ? "enable" : "disable");
+
+	if (is_enable) {
+		wcd9xxx_cfg_clsh_param_common(codec);
+		wcd9xxx_cfg_clsh_param_hph(codec);
+		wcd9xxx_enable_clsh_block(codec, clsh_d, true);
+		wcd9xxx_chargepump_request(codec, true);
+		wcd9xxx_enable_anc_delay(codec, true);
+		wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_L, true);
+		wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_R, true);
+		wcd9xxx_set_buck_mode(codec, BUCK_VREF_0P494V);
+		wcd9xxx_enable_buck(codec, clsh_d, true);
+		wcd9xxx_set_fclk_get_ncp(codec, clsh_d, NCP_FCLK_LEVEL_8);
+
+		dev_dbg(codec->dev, "%s: Done\n", __func__);
+	} else {
+		wcd9xxx_set_fclk_put_ncp(codec, clsh_d, NCP_FCLK_LEVEL_8);
+		wcd9xxx_enable_buck(codec, clsh_d, false);
+		wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_L, false);
+		wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_R, false);
+		wcd9xxx_enable_clsh_block(codec, clsh_d, false);
+		wcd9xxx_chargepump_request(codec, false);
+	}
+}
+
+static void wcd9xxx_clsh_state_hph_r(struct snd_soc_codec *codec,
+		struct wcd9xxx_clsh_cdc_data *clsh_d,
+		u8 req_state, bool is_enable)
+{
+	pr_debug("%s: enter %s\n", __func__, is_enable ? "enable" : "disable");
+
+	if (is_enable) {
+		wcd9xxx_cfg_clsh_param_common(codec);
+		wcd9xxx_cfg_clsh_param_hph(codec);
+		wcd9xxx_enable_clsh_block(codec, clsh_d, true);
+		wcd9xxx_chargepump_request(codec, true);
+		wcd9xxx_enable_anc_delay(codec, true);
+		wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_L, true);
+		wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_R, true);
+		wcd9xxx_set_buck_mode(codec, BUCK_VREF_0P494V);
+		wcd9xxx_enable_buck(codec, clsh_d, true);
+		wcd9xxx_set_fclk_get_ncp(codec, clsh_d, NCP_FCLK_LEVEL_8);
+
+		dev_dbg(codec->dev, "%s: Done\n", __func__);
+	} else {
+		wcd9xxx_set_fclk_put_ncp(codec, clsh_d, NCP_FCLK_LEVEL_8);
+		wcd9xxx_enable_buck(codec, clsh_d, false);
+		wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_L, false);
+		wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_R, false);
+		wcd9xxx_enable_clsh_block(codec, clsh_d, false);
+		wcd9xxx_chargepump_request(codec, false);
+	}
+}
+
+static void wcd9xxx_clsh_state_hph_st(struct snd_soc_codec *codec,
+		struct wcd9xxx_clsh_cdc_data *clsh_d,
+		u8 req_state, bool is_enable)
+{
+	pr_debug("%s: enter %s\n", __func__, is_enable ? "enable" : "disable");
+
+	if (is_enable) {
+		dev_dbg(codec->dev, "%s: stub fallback to hph_st\n", __func__);
+	} else {
+		dev_dbg(codec->dev, "%s: stub fallback to hph_st\n", __func__);
+	}
+}
+
+static void wcd9xxx_clsh_state_lo(struct snd_soc_codec *codec,
+		struct wcd9xxx_clsh_cdc_data *clsh_d,
+		u8 req_state, bool is_enable)
+{
+	pr_debug("%s: enter %s, buck_mv %d\n", __func__,
+		 is_enable ? "enable" : "disable", clsh_d->buck_mv);
+
+	if (is_enable) {
+		wcd9xxx_set_buck_mode(codec, BUCK_VREF_1P8V);
+		wcd9xxx_enable_buck(codec, clsh_d, true);
+		wcd9xxx_set_fclk_get_ncp(codec, clsh_d, NCP_FCLK_LEVEL_5);
+
+		if (clsh_d->buck_mv == WCD9XXX_CDC_BUCK_MV_1P8) {
+			wcd9xxx_enable_buck(codec, clsh_d, false);
+			snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC,
+					    1 << 4, 1 << 4);
+			/* NCP settle time recommended by codec specification */
+			usleep_range(NCP_SETTLE_TIME_US,
+				     NCP_SETTLE_TIME_US + 10);
+		} else {
+			/* NCP settle time recommended by codec specification */
+			usleep_range(NCP_SETTLE_TIME_US,
+				     NCP_SETTLE_TIME_US + 10);
+			snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_5,
+					    0x01, (0x01 & 0x03));
+			snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_5,
+					    0xFC, (0xFC & 0xB));
+		}
+		snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_1, 0x04, 0x00);
+	} else {
+		dev_dbg(codec->dev, "%s: stub fallback to lineout\n", __func__);
+		wcd9xxx_set_fclk_put_ncp(codec, clsh_d, NCP_FCLK_LEVEL_5);
+		if (clsh_d->buck_mv != WCD9XXX_CDC_BUCK_MV_1P8)
+			wcd9xxx_enable_buck(codec, clsh_d, false);
+	}
+}
+
+static void wcd9xxx_clsh_state_err(struct snd_soc_codec *codec,
+		struct wcd9xxx_clsh_cdc_data *clsh_d,
+		u8 req_state, bool is_enable)
+{
+	char msg[128];
+
+	dev_dbg(codec->dev,
+		"%s Wrong request for class H state machine requested to %s %s",
+		__func__, is_enable ? "enable" : "disable",
+		state_to_str(req_state, msg, sizeof(msg)));
+	WARN_ON(1);
+}
+
+/*
+ * Function: wcd9xxx_clsh_is_state_valid
+ * Params: state
+ * Description:
+ * Provides information on valid states of Class H configuration
+ */
+static int wcd9xxx_clsh_is_state_valid(u8 state)
+{
+	switch (state) {
+	case WCD9XXX_CLSH_STATE_IDLE:
+	case WCD9XXX_CLSH_STATE_EAR:
+	case WCD9XXX_CLSH_STATE_HPHL:
+	case WCD9XXX_CLSH_STATE_HPHR:
+	case WCD9XXX_CLSH_STATE_HPH_ST:
+	case WCD9XXX_CLSH_STATE_LO:
+	case WCD9XXX_CLSH_STATE_HPHL_EAR:
+	case WCD9XXX_CLSH_STATE_HPHR_EAR:
+	case WCD9XXX_CLSH_STATE_HPH_ST_EAR:
+	case WCD9XXX_CLSH_STATE_HPHL_LO:
+	case WCD9XXX_CLSH_STATE_HPHR_LO:
+	case WCD9XXX_CLSH_STATE_HPH_ST_LO:
+	case WCD9XXX_CLSH_STATE_EAR_LO:
+	case WCD9XXX_CLSH_STATE_HPHL_EAR_LO:
+	case WCD9XXX_CLSH_STATE_HPHR_EAR_LO:
+	case WCD9XXX_CLSH_STATE_HPH_ST_EAR_LO:
+		return 1;
+	default:
+		break;
+	}
+	return 0;
+}
+
+/*
+ * Function: wcd9xxx_clsh_fsm
+ * Params: codec, cdc_clsh_d, req_state, req_type, clsh_event
+ * Description:
+ * This function handles PRE DAC and POST DAC conditions of different devices
+ * and updates class H configuration of different combination of devices
+ * based on validity of their states. cdc_clsh_d will contain current
+ * class h state information
+ */
+void wcd9xxx_clsh_fsm(struct snd_soc_codec *codec,
+		struct wcd9xxx_clsh_cdc_data *cdc_clsh_d,
+		u8 req_state, bool req_type, u8 clsh_event)
+{
+	u8 old_state, new_state;
+	char msg0[128], msg1[128];
+
+	switch (clsh_event) {
+	case WCD9XXX_CLSH_EVENT_PRE_DAC:
+		/* PRE_DAC event should be used only for Enable */
+		BUG_ON(req_type != WCD9XXX_CLSH_REQ_ENABLE);
+
+		old_state = cdc_clsh_d->state;
+		new_state = old_state | req_state;
+
+		if (!wcd9xxx_clsh_is_state_valid(new_state)) {
+			dev_dbg(codec->dev,
+				"%s: classH not a valid new state: %s\n",
+				__func__,
+				state_to_str(new_state, msg0, sizeof(msg0)));
+			return;
+		}
+		if (new_state == old_state) {
+			dev_dbg(codec->dev,
+				"%s: classH already in requested state: %s\n",
+				__func__,
+				state_to_str(new_state, msg0, sizeof(msg0)));
+			return;
+		}
+		(*clsh_state_fp[new_state]) (codec, cdc_clsh_d, req_state,
+					     req_type);
+		cdc_clsh_d->state = new_state;
+		dev_dbg(codec->dev,
+			"%s: ClassH state transition from %s to %s\n",
+			__func__, state_to_str(old_state, msg0, sizeof(msg0)),
+			state_to_str(cdc_clsh_d->state, msg1, sizeof(msg1)));
+
+		break;
+	case WCD9XXX_CLSH_EVENT_POST_PA:
+		if (req_type == WCD9XXX_CLSH_REQ_DISABLE) {
+			old_state = cdc_clsh_d->state;
+			new_state = old_state & (~req_state);
+
+			if (new_state < NUM_CLSH_STATES) {
+				if (!wcd9xxx_clsh_is_state_valid(old_state)) {
+					dev_dbg(codec->dev,
+						"%s:Invalid old state:%s\n",
+						__func__,
+						state_to_str(old_state, msg0,
+						sizeof(msg0)));
+					return;
+				}
+				if (new_state == old_state) {
+					dev_dbg(codec->dev,
+					"%s: clsH already in old state: %s\n",
+					__func__,
+					state_to_str(new_state, msg0,
+					sizeof(msg0)));
+					return;
+				}
+				(*clsh_state_fp[old_state]) (codec, cdc_clsh_d,
+							     req_state,
+							     req_type);
+				cdc_clsh_d->state = new_state;
+				dev_dbg(codec->dev, "%s: ClassH state transition from %s to %s\n",
+					__func__, state_to_str(old_state, msg0,
+							       sizeof(msg0)),
+					state_to_str(cdc_clsh_d->state, msg1,
+						     sizeof(msg1)));
+
+			} else {
+				dev_dbg(codec->dev, "%s:wrong new state=0x%x\n",
+						__func__, new_state);
+			}
+		} else if (!(cdc_clsh_d->state & WCD9XXX_CLSH_STATE_LO)) {
+			wcd9xxx_clsh_enable_post_pa(codec, cdc_clsh_d);
+		}
+
+		break;
+	}
+
+}
+EXPORT_SYMBOL_GPL(wcd9xxx_clsh_fsm);
+
+void wcd9xxx_clsh_init(struct wcd9xxx_clsh_cdc_data *clsh,
+		       struct wcd9xxx_resmgr *resmgr)
+{
+	int i;
+	clsh->state = WCD9XXX_CLSH_STATE_IDLE;
+	clsh->resmgr = resmgr;
+
+	for (i = 0; i < NUM_CLSH_STATES; i++)
+		clsh_state_fp[i] = wcd9xxx_clsh_state_err;
+
+	clsh_state_fp[WCD9XXX_CLSH_STATE_EAR] = wcd9xxx_clsh_state_ear;
+	clsh_state_fp[WCD9XXX_CLSH_STATE_HPHL] =
+						wcd9xxx_clsh_state_hph_l;
+	clsh_state_fp[WCD9XXX_CLSH_STATE_HPHR] =
+						wcd9xxx_clsh_state_hph_r;
+	clsh_state_fp[WCD9XXX_CLSH_STATE_HPH_ST] =
+						wcd9xxx_clsh_state_hph_st;
+	clsh_state_fp[WCD9XXX_CLSH_STATE_LO] = wcd9xxx_clsh_state_lo;
+	clsh_state_fp[WCD9XXX_CLSH_STATE_HPHL_EAR] =
+						wcd9xxx_clsh_state_hph_ear;
+	clsh_state_fp[WCD9XXX_CLSH_STATE_HPHR_EAR] =
+						wcd9xxx_clsh_state_hph_ear;
+	clsh_state_fp[WCD9XXX_CLSH_STATE_HPH_ST_EAR] =
+						wcd9xxx_clsh_state_hph_ear;
+	clsh_state_fp[WCD9XXX_CLSH_STATE_HPHL_LO] = wcd9xxx_clsh_state_hph_lo;
+	clsh_state_fp[WCD9XXX_CLSH_STATE_HPHR_LO] = wcd9xxx_clsh_state_hph_lo;
+	clsh_state_fp[WCD9XXX_CLSH_STATE_HPH_ST_LO] =
+						wcd9xxx_clsh_state_hph_lo;
+	clsh_state_fp[WCD9XXX_CLSH_STATE_EAR_LO] = wcd9xxx_clsh_state_ear_lo;
+	clsh_state_fp[WCD9XXX_CLSH_STATE_HPHL_EAR_LO] =
+						wcd9xxx_clsh_state_hph_ear_lo;
+	clsh_state_fp[WCD9XXX_CLSH_STATE_HPHR_EAR_LO] =
+						wcd9xxx_clsh_state_hph_ear_lo;
+	clsh_state_fp[WCD9XXX_CLSH_STATE_HPH_ST_EAR_LO] =
+						wcd9xxx_clsh_state_hph_ear_lo;
+
+}
+EXPORT_SYMBOL_GPL(wcd9xxx_clsh_init);
+
+MODULE_DESCRIPTION("WCD9XXX Common");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd9xxx-common.h	2019-01-22 16:16:29.555301211 +0100
@@ -0,0 +1,286 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef WCD9XXX_CODEC_COMMON
+
+#define WCD9XXX_CODEC_COMMON
+
+#include "wcd9xxx-resmgr.h"
+
+#define WCD9XXX_CLSH_REQ_ENABLE true
+#define WCD9XXX_CLSH_REQ_DISABLE false
+
+#define WCD9XXX_CLSH_EVENT_PRE_DAC 0x01
+#define WCD9XXX_CLSH_EVENT_POST_PA 0x02
+
+/* Basic states for Class H state machine.
+ * represented as a bit mask within a u8 data type
+ * bit 0: EAR mode
+ * bit 1: HPH Left mode
+ * bit 2: HPH Right mode
+ * bit 3: Lineout mode
+ * bit 4: Ultrasound mode
+ */
+#define	WCD9XXX_CLSH_STATE_IDLE 0x00
+#define	WCD9XXX_CLSH_STATE_EAR (0x01 << 0)
+#define	WCD9XXX_CLSH_STATE_HPHL (0x01 << 1)
+#define	WCD9XXX_CLSH_STATE_HPHR (0x01 << 2)
+#define	WCD9XXX_CLSH_STATE_LO (0x01 << 3)
+#define NUM_CLSH_STATES (0x01 << 4)
+
+#define	WCD9XXX_CLSAB_STATE_IDLE  0x00
+#define WCD9XXX_CLSAB_STATE_HPHL (0x01 << 1)
+#define WCD9XXX_CLSAB_STATE_HPHR (0x01 << 2)
+
+#define WCD9XXX_CLSAB_REQ_ENABLE  true
+#define WCD9XXX_CLSAB_REQ_DISABLE false
+
+#define WCD9XXX_NON_UHQA_MODE	0
+
+#define WCD9XXX_DMIC_SAMPLE_RATE_DIV_2    0x0
+#define WCD9XXX_DMIC_SAMPLE_RATE_DIV_3    0x1
+#define WCD9XXX_DMIC_SAMPLE_RATE_DIV_4    0x2
+
+#define WCD9XXX_DMIC_B1_CTL_DIV_2 0x00
+#define WCD9XXX_DMIC_B1_CTL_DIV_3 0x22
+#define WCD9XXX_DMIC_B1_CTL_DIV_4 0x44
+
+#define WCD9XXX_DMIC_B2_CTL_DIV_2 0x00
+#define WCD9XXX_DMIC_B2_CTL_DIV_3 0x02
+#define WCD9XXX_DMIC_B2_CTL_DIV_4 0x04
+
+#define WCD9XXX_ANC_DMIC_X2_ON    0x1
+#define WCD9XXX_ANC_DMIC_X2_OFF   0x0
+
+/* Derived State: Bits 1 and 2 should be set for Headphone stereo */
+#define WCD9XXX_CLSH_STATE_HPH_ST (WCD9XXX_CLSH_STATE_HPHL | \
+						WCD9XXX_CLSH_STATE_HPHR)
+
+#define WCD9XXX_CLSH_STATE_HPHL_EAR (WCD9XXX_CLSH_STATE_HPHL | \
+						WCD9XXX_CLSH_STATE_EAR)
+#define WCD9XXX_CLSH_STATE_HPHR_EAR (WCD9XXX_CLSH_STATE_HPHR | \
+						WCD9XXX_CLSH_STATE_EAR)
+
+#define WCD9XXX_CLSH_STATE_HPH_ST_EAR (WCD9XXX_CLSH_STATE_HPH_ST | \
+						WCD9XXX_CLSH_STATE_EAR)
+
+#define WCD9XXX_CLSH_STATE_HPHL_LO (WCD9XXX_CLSH_STATE_HPHL | \
+						WCD9XXX_CLSH_STATE_LO)
+#define WCD9XXX_CLSH_STATE_HPHR_LO (WCD9XXX_CLSH_STATE_HPHR | \
+						WCD9XXX_CLSH_STATE_LO)
+
+#define WCD9XXX_CLSH_STATE_HPH_ST_LO (WCD9XXX_CLSH_STATE_HPH_ST | \
+						WCD9XXX_CLSH_STATE_LO)
+
+#define WCD9XXX_CLSH_STATE_EAR_LO (WCD9XXX_CLSH_STATE_EAR | \
+						WCD9XXX_CLSH_STATE_LO)
+
+#define WCD9XXX_CLSH_STATE_HPHL_EAR_LO (WCD9XXX_CLSH_STATE_HPHL | \
+						WCD9XXX_CLSH_STATE_EAR | \
+						WCD9XXX_CLSH_STATE_LO)
+#define WCD9XXX_CLSH_STATE_HPHR_EAR_LO (WCD9XXX_CLSH_STATE_HPHR | \
+						WCD9XXX_CLSH_STATE_EAR | \
+						WCD9XXX_CLSH_STATE_LO)
+#define WCD9XXX_CLSH_STATE_HPH_ST_EAR_LO (WCD9XXX_CLSH_STATE_HPH_ST | \
+						WCD9XXX_CLSH_STATE_EAR | \
+						WCD9XXX_CLSH_STATE_LO)
+
+struct wcd9xxx_reg_mask_val {
+	u16	reg;
+	u8	mask;
+	u8	val;
+};
+
+enum ncp_fclk_level {
+	NCP_FCLK_LEVEL_8,
+	NCP_FCLK_LEVEL_5,
+	NCP_FCLK_LEVEL_MAX,
+};
+
+/* Class H data that the codec driver will maintain */
+struct wcd9xxx_clsh_cdc_data {
+	u8 state;
+	int buck_mv;
+	bool is_dynamic_vdd_cp;
+	int clsh_users;
+	int buck_users;
+	int ncp_users[NCP_FCLK_LEVEL_MAX];
+	struct wcd9xxx_resmgr *resmgr;
+};
+
+struct wcd9xxx_anc_header {
+	u32 reserved[3];
+	u32 num_anc_slots;
+};
+
+enum wcd9xxx_buck_volt {
+	WCD9XXX_CDC_BUCK_UNSUPPORTED = 0,
+	WCD9XXX_CDC_BUCK_MV_1P8 = 1800000,
+	WCD9XXX_CDC_BUCK_MV_2P15 = 2150000,
+};
+
+struct mad_audio_header {
+	u32 reserved[3];
+	u32 num_reg_cfg;
+};
+
+struct mad_microphone_info {
+	uint8_t input_microphone;
+	uint8_t cycle_time;
+	uint8_t settle_time;
+	uint8_t padding;
+} __packed;
+
+struct mad_micbias_info {
+	uint8_t micbias;
+	uint8_t k_factor;
+	uint8_t external_bypass_capacitor;
+	uint8_t internal_biasing;
+	uint8_t cfilter;
+	uint8_t padding[3];
+} __packed;
+
+struct mad_rms_audio_beacon_info {
+	uint8_t rms_omit_samples;
+	uint8_t rms_comp_time;
+	uint8_t detection_mechanism;
+	uint8_t rms_diff_threshold;
+	uint8_t rms_threshold_lsb;
+	uint8_t rms_threshold_msb;
+	uint8_t padding[2];
+	uint8_t iir_coefficients[36];
+} __packed;
+
+struct mad_rms_ultrasound_info {
+	uint8_t rms_comp_time;
+	uint8_t detection_mechanism;
+	uint8_t rms_diff_threshold;
+	uint8_t rms_threshold_lsb;
+	uint8_t rms_threshold_msb;
+	uint8_t padding[3];
+	uint8_t iir_coefficients[36];
+} __packed;
+
+struct mad_audio_cal {
+	uint32_t version;
+	struct mad_microphone_info microphone_info;
+	struct mad_micbias_info micbias_info;
+	struct mad_rms_audio_beacon_info audio_info;
+	struct mad_rms_audio_beacon_info beacon_info;
+	struct mad_rms_ultrasound_info ultrasound_info;
+} __packed;
+
+extern void wcd9xxx_clsh_fsm(struct snd_soc_codec *codec,
+		struct wcd9xxx_clsh_cdc_data *cdc_clsh_d,
+		u8 req_state, bool req_type, u8 clsh_event);
+
+extern void wcd9xxx_enable_high_perf_mode(struct snd_soc_codec *codec,
+				struct wcd9xxx_clsh_cdc_data *clsh_d,
+				u8 uhqa_mode, u8 req_state, bool req_type);
+
+extern void wcd9xxx_clsh_init(struct wcd9xxx_clsh_cdc_data *clsh,
+			      struct wcd9xxx_resmgr *resmgr);
+
+extern void wcd9xxx_clsh_imped_config(struct snd_soc_codec *codec,
+				  int imped);
+
+enum wcd9xxx_codec_event {
+	WCD9XXX_CODEC_EVENT_CODEC_UP = 0,
+};
+
+struct wcd9xxx_register_save_node {
+	struct list_head lh;
+	u16 reg;
+	u16 value;
+};
+
+extern int wcd9xxx_soc_update_bits_push(struct snd_soc_codec *codec,
+					struct list_head *lh,
+					uint16_t reg, uint8_t mask,
+					uint8_t value, int delay);
+extern void wcd9xxx_restore_registers(struct snd_soc_codec *codec,
+				      struct list_head *lh);
+enum {
+	RESERVED = 0,
+	AANC_LPF_FF_FB = 1,
+	AANC_LPF_COEFF_MSB,
+	AANC_LPF_COEFF_LSB,
+	HW_MAD_AUDIO_ENABLE,
+	HW_MAD_ULTR_ENABLE,
+	HW_MAD_BEACON_ENABLE,
+	HW_MAD_AUDIO_SLEEP_TIME,
+	HW_MAD_ULTR_SLEEP_TIME,
+	HW_MAD_BEACON_SLEEP_TIME,
+	HW_MAD_TX_AUDIO_SWITCH_OFF,
+	HW_MAD_TX_ULTR_SWITCH_OFF,
+	HW_MAD_TX_BEACON_SWITCH_OFF,
+	MAD_AUDIO_INT_DEST_SELECT_REG,
+	MAD_ULT_INT_DEST_SELECT_REG,
+	MAD_BEACON_INT_DEST_SELECT_REG,
+	MAD_CLIP_INT_DEST_SELECT_REG,
+	MAD_VBAT_INT_DEST_SELECT_REG,
+	MAD_AUDIO_INT_MASK_REG,
+	MAD_ULT_INT_MASK_REG,
+	MAD_BEACON_INT_MASK_REG,
+	MAD_CLIP_INT_MASK_REG,
+	MAD_VBAT_INT_MASK_REG,
+	MAD_AUDIO_INT_STATUS_REG,
+	MAD_ULT_INT_STATUS_REG,
+	MAD_BEACON_INT_STATUS_REG,
+	MAD_CLIP_INT_STATUS_REG,
+	MAD_VBAT_INT_STATUS_REG,
+	MAD_AUDIO_INT_CLEAR_REG,
+	MAD_ULT_INT_CLEAR_REG,
+	MAD_BEACON_INT_CLEAR_REG,
+	MAD_CLIP_INT_CLEAR_REG,
+	MAD_VBAT_INT_CLEAR_REG,
+	SB_PGD_PORT_TX_WATERMARK_N,
+	SB_PGD_PORT_TX_ENABLE_N,
+	SB_PGD_PORT_RX_WATERMARK_N,
+	SB_PGD_PORT_RX_ENABLE_N,
+	SB_PGD_TX_PORTn_MULTI_CHNL_0,
+	SB_PGD_TX_PORTn_MULTI_CHNL_1,
+	SB_PGD_RX_PORTn_MULTI_CHNL_0,
+	SB_PGD_RX_PORTn_MULTI_CHNL_1,
+	AANC_FF_GAIN_ADAPTIVE,
+	AANC_FFGAIN_ADAPTIVE_EN,
+	AANC_GAIN_CONTROL,
+	SPKR_CLIP_PIPE_BANK_SEL,
+	SPKR_CLIPDET_VAL0,
+	SPKR_CLIPDET_VAL1,
+	SPKR_CLIPDET_VAL2,
+	SPKR_CLIPDET_VAL3,
+	SPKR_CLIPDET_VAL4,
+	SPKR_CLIPDET_VAL5,
+	SPKR_CLIPDET_VAL6,
+	SPKR_CLIPDET_VAL7,
+	VBAT_RELEASE_INT_DEST_SELECT_REG,
+	VBAT_RELEASE_INT_MASK_REG,
+	VBAT_RELEASE_INT_STATUS_REG,
+	VBAT_RELEASE_INT_CLEAR_REG,
+	MAD2_CLIP_INT_DEST_SELECT_REG,
+	MAD2_CLIP_INT_MASK_REG,
+	MAD2_CLIP_INT_STATUS_REG,
+	MAD2_CLIP_INT_CLEAR_REG,
+	SPKR2_CLIP_PIPE_BANK_SEL,
+	SPKR2_CLIPDET_VAL0,
+	SPKR2_CLIPDET_VAL1,
+	SPKR2_CLIPDET_VAL2,
+	SPKR2_CLIPDET_VAL3,
+	SPKR2_CLIPDET_VAL4,
+	SPKR2_CLIPDET_VAL5,
+	SPKR2_CLIPDET_VAL6,
+	SPKR2_CLIPDET_VAL7,
+	MAX_CFG_REGISTERS,
+};
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd9xxx-common-v2.c	2019-01-22 16:16:29.555301211 +0100
@@ -0,0 +1,1365 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <sound/soc.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/mfd/wcd9xxx/core.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
+#include "wcd9xxx-common-v2.h"
+
+#define WCD_USLEEP_RANGE 50
+#define MAX_IMPED_PARAMS 6
+
+enum {
+	DAC_GAIN_0DB = 0,
+	DAC_GAIN_0P2DB,
+	DAC_GAIN_0P4DB,
+	DAC_GAIN_0P6DB,
+	DAC_GAIN_0P8DB,
+	DAC_GAIN_M0P2DB,
+	DAC_GAIN_M0P4DB,
+	DAC_GAIN_M0P6DB,
+};
+
+enum {
+	VREF_FILT_R_0OHM = 0,
+	VREF_FILT_R_25KOHM,
+	VREF_FILT_R_50KOHM,
+	VREF_FILT_R_100KOHM,
+};
+
+enum {
+	DELTA_I_0MA,
+	DELTA_I_10MA,
+	DELTA_I_20MA,
+	DELTA_I_30MA,
+	DELTA_I_40MA,
+	DELTA_I_50MA,
+};
+
+struct wcd_imped_val {
+	u32 imped_val;
+	u8 index;
+};
+
+static const struct wcd_reg_mask_val imped_table[][MAX_IMPED_PARAMS] = {
+	{
+		{WCD9XXX_CDC_RX1_RX_VOL_CTL, 0xff, 0xf5},
+		{WCD9XXX_CDC_RX1_RX_VOL_MIX_CTL, 0xff, 0xf5},
+		{WCD9XXX_CDC_RX1_RX_PATH_SEC1, 0x01, 0x01},
+		{WCD9XXX_CDC_RX2_RX_VOL_CTL, 0xff, 0xf5},
+		{WCD9XXX_CDC_RX2_RX_VOL_MIX_CTL, 0xff, 0xf5},
+		{WCD9XXX_CDC_RX2_RX_PATH_SEC1, 0x01, 0x01},
+	},
+	{
+		{WCD9XXX_CDC_RX1_RX_VOL_CTL, 0xff, 0xf7},
+		{WCD9XXX_CDC_RX1_RX_VOL_MIX_CTL, 0xff, 0xf7},
+		{WCD9XXX_CDC_RX1_RX_PATH_SEC1, 0x01, 0x01},
+		{WCD9XXX_CDC_RX2_RX_VOL_CTL, 0xff, 0xf7},
+		{WCD9XXX_CDC_RX2_RX_VOL_MIX_CTL, 0xff, 0xf7},
+		{WCD9XXX_CDC_RX2_RX_PATH_SEC1, 0x01, 0x01},
+	},
+	{
+		{WCD9XXX_CDC_RX1_RX_VOL_CTL, 0xff, 0xf9},
+		{WCD9XXX_CDC_RX1_RX_VOL_MIX_CTL, 0xff, 0xf9},
+		{WCD9XXX_CDC_RX1_RX_PATH_SEC1, 0x01, 0x0},
+		{WCD9XXX_CDC_RX2_RX_VOL_CTL, 0xff, 0xf9},
+		{WCD9XXX_CDC_RX2_RX_VOL_MIX_CTL, 0xff, 0xf9},
+		{WCD9XXX_CDC_RX2_RX_PATH_SEC1, 0x01, 0x0},
+	},
+	{
+		{WCD9XXX_CDC_RX1_RX_VOL_CTL, 0xff, 0xfa},
+		{WCD9XXX_CDC_RX1_RX_VOL_MIX_CTL, 0xff, 0xfa},
+		{WCD9XXX_CDC_RX1_RX_PATH_SEC1, 0x01, 0x01},
+		{WCD9XXX_CDC_RX2_RX_VOL_CTL, 0xff, 0xfa},
+		{WCD9XXX_CDC_RX2_RX_VOL_MIX_CTL, 0xff, 0xfa},
+		{WCD9XXX_CDC_RX2_RX_PATH_SEC1, 0x01, 0x01},
+	},
+	{
+		{WCD9XXX_CDC_RX1_RX_VOL_CTL, 0xff, 0xfb},
+		{WCD9XXX_CDC_RX1_RX_VOL_MIX_CTL, 0xff, 0xfb},
+		{WCD9XXX_CDC_RX1_RX_PATH_SEC1, 0x01, 0x01},
+		{WCD9XXX_CDC_RX2_RX_VOL_CTL, 0xff, 0xfb},
+		{WCD9XXX_CDC_RX2_RX_VOL_MIX_CTL, 0xff, 0xfb},
+		{WCD9XXX_CDC_RX2_RX_PATH_SEC1, 0x01, 0x01},
+	},
+	{
+		{WCD9XXX_CDC_RX1_RX_VOL_CTL, 0xff, 0xfc},
+		{WCD9XXX_CDC_RX1_RX_VOL_MIX_CTL, 0xff, 0xfc},
+		{WCD9XXX_CDC_RX1_RX_PATH_SEC1, 0x01, 0x01},
+		{WCD9XXX_CDC_RX2_RX_VOL_CTL, 0xff, 0xfc},
+		{WCD9XXX_CDC_RX2_RX_VOL_MIX_CTL, 0xff, 0xfc},
+		{WCD9XXX_CDC_RX2_RX_PATH_SEC1, 0x01, 0x01},
+	},
+	{
+		{WCD9XXX_CDC_RX1_RX_VOL_CTL, 0xff, 0xfd},
+		{WCD9XXX_CDC_RX1_RX_VOL_MIX_CTL, 0xff, 0xfd},
+		{WCD9XXX_CDC_RX1_RX_PATH_SEC1, 0x01, 0x01},
+		{WCD9XXX_CDC_RX2_RX_VOL_CTL, 0xff, 0xfd},
+		{WCD9XXX_CDC_RX2_RX_VOL_MIX_CTL, 0xff, 0xfd},
+		{WCD9XXX_CDC_RX2_RX_PATH_SEC1, 0x01, 0x01},
+	},
+	{
+		{WCD9XXX_CDC_RX1_RX_VOL_CTL, 0xff, 0xfe},
+		{WCD9XXX_CDC_RX1_RX_VOL_MIX_CTL, 0xff, 0xfe},
+		{WCD9XXX_CDC_RX1_RX_PATH_SEC1, 0x01, 0x01},
+		{WCD9XXX_CDC_RX2_RX_VOL_CTL, 0xff, 0xfe},
+		{WCD9XXX_CDC_RX2_RX_VOL_MIX_CTL, 0xff, 0xfe},
+		{WCD9XXX_CDC_RX2_RX_PATH_SEC1, 0x01, 0x01},
+	},
+	{
+		{WCD9XXX_CDC_RX1_RX_VOL_CTL, 0xff, 0xff},
+		{WCD9XXX_CDC_RX1_RX_VOL_MIX_CTL, 0xff, 0xff},
+		{WCD9XXX_CDC_RX1_RX_PATH_SEC1, 0x01, 0x00},
+		{WCD9XXX_CDC_RX2_RX_VOL_CTL, 0xff, 0xff},
+		{WCD9XXX_CDC_RX2_RX_VOL_MIX_CTL, 0xff, 0xff},
+		{WCD9XXX_CDC_RX2_RX_PATH_SEC1, 0x01, 0x00},
+	},
+};
+
+static const struct wcd_reg_mask_val imped_table_tavil[][MAX_IMPED_PARAMS] = {
+	{
+		{WCD9XXX_CDC_RX1_RX_VOL_CTL, 0xff, 0xf2},
+		{WCD9XXX_CDC_RX1_RX_VOL_MIX_CTL, 0xff, 0xf2},
+		{WCD9XXX_CDC_RX1_RX_PATH_SEC1, 0x01, 0x00},
+		{WCD9XXX_CDC_RX2_RX_VOL_CTL, 0xff, 0xf2},
+		{WCD9XXX_CDC_RX2_RX_VOL_MIX_CTL, 0xff, 0xf2},
+		{WCD9XXX_CDC_RX2_RX_PATH_SEC1, 0x01, 0x00},
+	},
+	{
+		{WCD9XXX_CDC_RX1_RX_VOL_CTL, 0xff, 0xf4},
+		{WCD9XXX_CDC_RX1_RX_VOL_MIX_CTL, 0xff, 0xf4},
+		{WCD9XXX_CDC_RX1_RX_PATH_SEC1, 0x01, 0x00},
+		{WCD9XXX_CDC_RX2_RX_VOL_CTL, 0xff, 0xf4},
+		{WCD9XXX_CDC_RX2_RX_VOL_MIX_CTL, 0xff, 0xf4},
+		{WCD9XXX_CDC_RX2_RX_PATH_SEC1, 0x01, 0x00},
+	},
+	{
+		{WCD9XXX_CDC_RX1_RX_VOL_CTL, 0xff, 0xf7},
+		{WCD9XXX_CDC_RX1_RX_VOL_MIX_CTL, 0xff, 0xf7},
+		{WCD9XXX_CDC_RX1_RX_PATH_SEC1, 0x01, 0x01},
+		{WCD9XXX_CDC_RX2_RX_VOL_CTL, 0xff, 0xf7},
+		{WCD9XXX_CDC_RX2_RX_VOL_MIX_CTL, 0xff, 0xf7},
+		{WCD9XXX_CDC_RX2_RX_PATH_SEC1, 0x01, 0x01},
+	},
+	{
+		{WCD9XXX_CDC_RX1_RX_VOL_CTL, 0xff, 0xf9},
+		{WCD9XXX_CDC_RX1_RX_VOL_MIX_CTL, 0xff, 0xf9},
+		{WCD9XXX_CDC_RX1_RX_PATH_SEC1, 0x01, 0x00},
+		{WCD9XXX_CDC_RX2_RX_VOL_CTL, 0xff, 0xf9},
+		{WCD9XXX_CDC_RX2_RX_VOL_MIX_CTL, 0xff, 0xf9},
+		{WCD9XXX_CDC_RX2_RX_PATH_SEC1, 0x01, 0x00},
+	},
+	{
+		{WCD9XXX_CDC_RX1_RX_VOL_CTL, 0xff, 0xfa},
+		{WCD9XXX_CDC_RX1_RX_VOL_MIX_CTL, 0xff, 0xfa},
+		{WCD9XXX_CDC_RX1_RX_PATH_SEC1, 0x01, 0x00},
+		{WCD9XXX_CDC_RX2_RX_VOL_CTL, 0xff, 0xfa},
+		{WCD9XXX_CDC_RX2_RX_VOL_MIX_CTL, 0xff, 0xfa},
+		{WCD9XXX_CDC_RX2_RX_PATH_SEC1, 0x01, 0x00},
+	},
+	{
+		{WCD9XXX_CDC_RX1_RX_VOL_CTL, 0xff, 0xfb},
+		{WCD9XXX_CDC_RX1_RX_VOL_MIX_CTL, 0xff, 0xfb},
+		{WCD9XXX_CDC_RX1_RX_PATH_SEC1, 0x01, 0x00},
+		{WCD9XXX_CDC_RX2_RX_VOL_CTL, 0xff, 0xfb},
+		{WCD9XXX_CDC_RX2_RX_VOL_MIX_CTL, 0xff, 0xfb},
+		{WCD9XXX_CDC_RX2_RX_PATH_SEC1, 0x01, 0x00},
+	},
+	{
+		{WCD9XXX_CDC_RX1_RX_VOL_CTL, 0xff, 0xfc},
+		{WCD9XXX_CDC_RX1_RX_VOL_MIX_CTL, 0xff, 0xfc},
+		{WCD9XXX_CDC_RX1_RX_PATH_SEC1, 0x01, 0x00},
+		{WCD9XXX_CDC_RX2_RX_VOL_CTL, 0xff, 0xfc},
+		{WCD9XXX_CDC_RX2_RX_VOL_MIX_CTL, 0xff, 0xfc},
+		{WCD9XXX_CDC_RX2_RX_PATH_SEC1, 0x01, 0x00},
+	},
+	{
+		{WCD9XXX_CDC_RX1_RX_VOL_CTL, 0xff, 0xfd},
+		{WCD9XXX_CDC_RX1_RX_VOL_MIX_CTL, 0xff, 0xfd},
+		{WCD9XXX_CDC_RX1_RX_PATH_SEC1, 0x01, 0x00},
+		{WCD9XXX_CDC_RX2_RX_VOL_CTL, 0xff, 0xfd},
+		{WCD9XXX_CDC_RX2_RX_VOL_MIX_CTL, 0xff, 0xfd},
+		{WCD9XXX_CDC_RX2_RX_PATH_SEC1, 0x01, 0x00},
+	},
+	{
+		{WCD9XXX_CDC_RX1_RX_VOL_CTL, 0xff, 0xfd},
+		{WCD9XXX_CDC_RX1_RX_VOL_MIX_CTL, 0xff, 0xfd},
+		{WCD9XXX_CDC_RX1_RX_PATH_SEC1, 0x01, 0x01},
+		{WCD9XXX_CDC_RX2_RX_VOL_CTL, 0xff, 0xfd},
+		{WCD9XXX_CDC_RX2_RX_VOL_MIX_CTL, 0xff, 0xfd},
+		{WCD9XXX_CDC_RX2_RX_PATH_SEC1, 0x01, 0x01},
+	},
+};
+
+static const struct wcd_imped_val imped_index[] = {
+	{4, 0},
+	{5, 1},
+	{6, 2},
+	{7, 3},
+	{8, 4},
+	{9, 5},
+	{10, 6},
+	{11, 7},
+	{12, 8},
+	{13, 9},
+};
+
+static void (*clsh_state_fp[NUM_CLSH_STATES_V2])(struct snd_soc_codec *,
+					      struct wcd_clsh_cdc_data *,
+					      u8 req_state, bool en, int mode);
+
+static int get_impedance_index(int imped)
+{
+	int i = 0;
+
+	if (imped < imped_index[i].imped_val) {
+		pr_debug("%s, detected impedance is less than 4 Ohm\n",
+				__func__);
+		i = 0;
+		goto ret;
+	}
+	if (imped >= imped_index[ARRAY_SIZE(imped_index) - 1].imped_val) {
+		pr_debug("%s, detected impedance is greater than 12 Ohm\n",
+				__func__);
+		i = ARRAY_SIZE(imped_index) - 1;
+		goto ret;
+	}
+	for (i = 0; i < ARRAY_SIZE(imped_index) - 1; i++) {
+		if (imped >= imped_index[i].imped_val &&
+			imped < imped_index[i + 1].imped_val)
+			break;
+	}
+ret:
+	pr_debug("%s: selected impedance index = %d\n",
+			__func__, imped_index[i].index);
+	return imped_index[i].index;
+}
+
+/*
+ * Function: wcd_clsh_imped_config
+ * Params: codec, imped, reset
+ * Description:
+ * This function updates HPHL and HPHR gain settings
+ * according to the impedance value.
+ */
+void wcd_clsh_imped_config(struct snd_soc_codec *codec, int imped, bool reset)
+{
+	int i;
+	int index = 0;
+	int table_size;
+
+	static const struct wcd_reg_mask_val
+				(*imped_table_ptr)[MAX_IMPED_PARAMS];
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+
+	if (IS_CODEC_TYPE(wcd9xxx, WCD934X)) {
+		table_size = ARRAY_SIZE(imped_table_tavil);
+		imped_table_ptr = imped_table_tavil;
+	} else {
+		table_size = ARRAY_SIZE(imped_table);
+		imped_table_ptr = imped_table;
+	}
+
+	/* reset = 1, which means request is to reset the register values */
+	if (reset) {
+		for (i = 0; i < MAX_IMPED_PARAMS; i++)
+			snd_soc_update_bits(codec,
+				imped_table_ptr[index][i].reg,
+				imped_table_ptr[index][i].mask, 0);
+		return;
+	}
+	index = get_impedance_index(imped);
+	if (index >= (ARRAY_SIZE(imped_index) - 1)) {
+		pr_debug("%s, impedance not in range = %d\n", __func__, imped);
+		return;
+	}
+	if (index >= table_size) {
+		pr_debug("%s, impedance index not in range = %d\n", __func__,
+			index);
+		return;
+	}
+	for (i = 0; i < MAX_IMPED_PARAMS; i++)
+		snd_soc_update_bits(codec,
+				imped_table_ptr[index][i].reg,
+				imped_table_ptr[index][i].mask,
+				imped_table_ptr[index][i].val);
+}
+EXPORT_SYMBOL(wcd_clsh_imped_config);
+
+static bool is_native_44_1_active(struct snd_soc_codec *codec)
+{
+	bool native_active = false;
+	u8 native_clk, rx1_rate, rx2_rate;
+
+	native_clk = snd_soc_read(codec,
+				 WCD9XXX_CDC_CLK_RST_CTRL_MCLK_CONTROL);
+	rx1_rate = snd_soc_read(codec, WCD9XXX_CDC_RX1_RX_PATH_CTL);
+	rx2_rate = snd_soc_read(codec, WCD9XXX_CDC_RX2_RX_PATH_CTL);
+
+	dev_dbg(codec->dev, "%s: native_clk %x rx1_rate= %x rx2_rate= %x",
+		__func__, native_clk, rx1_rate, rx2_rate);
+
+	if ((native_clk & 0x2) &&
+	    ((rx1_rate & 0x0F) == 0x9 || (rx2_rate & 0x0F) == 0x9))
+		native_active = true;
+
+	return native_active;
+}
+
+static const char *mode_to_str(int mode)
+{
+	switch (mode) {
+	case CLS_H_NORMAL:
+		return "CLS_H_NORMAL";
+	case CLS_H_HIFI:
+		return "CLS_H_HIFI";
+	case CLS_H_LOHIFI:
+		return "CLS_H_LOHIFI";
+	case CLS_H_LP:
+		return "CLS_H_LP";
+	case CLS_H_ULP:
+		return "CLS_H_ULP";
+	case CLS_AB:
+		return "CLS_AB";
+	case CLS_AB_HIFI:
+		return "CLS_AB_HIFI";
+	default:
+		return "CLS_H_INVALID";
+	};
+}
+
+static const char *state_to_str(u8 state, char *buf, size_t buflen)
+{
+	int i;
+	int cnt = 0;
+	/*
+	 * This array of strings should match with enum wcd_clsh_state_bit.
+	 */
+	const char *states[] = {
+		"STATE_EAR",
+		"STATE_HPH_L",
+		"STATE_HPH_R",
+		"STATE_LO",
+	};
+
+	if (state == WCD_CLSH_STATE_IDLE) {
+		snprintf(buf, buflen, "[STATE_IDLE]");
+		goto done;
+	}
+
+	buf[0] = '\0';
+	for (i = 0; i < ARRAY_SIZE(states); i++) {
+		if (!(state & (1 << i)))
+			continue;
+		cnt = snprintf(buf, buflen - cnt - 1, "%s%s%s", buf,
+			       buf[0] == '\0' ? "[" : "|",
+			       states[i]);
+	}
+	if (cnt > 0)
+		strlcat(buf + cnt, "]", buflen);
+
+done:
+	if (buf[0] == '\0')
+		snprintf(buf, buflen, "[STATE_UNKNOWN]");
+	return buf;
+}
+
+static inline void
+wcd_enable_clsh_block(struct snd_soc_codec *codec,
+		      struct wcd_clsh_cdc_data *clsh_d, bool enable)
+{
+	if ((enable && ++clsh_d->clsh_users == 1) ||
+	    (!enable && --clsh_d->clsh_users == 0))
+		snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLSH_CRC, 0x01,
+				    (u8) enable);
+	if (clsh_d->clsh_users < 0)
+		clsh_d->clsh_users = 0;
+	dev_dbg(codec->dev, "%s: clsh_users %d, enable %d", __func__,
+		clsh_d->clsh_users, enable);
+}
+
+static inline bool wcd_clsh_enable_status(struct snd_soc_codec *codec)
+{
+	return snd_soc_read(codec, WCD9XXX_A_CDC_CLSH_CRC) & 0x01;
+}
+
+static inline int wcd_clsh_get_int_mode(struct wcd_clsh_cdc_data *clsh_d,
+					int clsh_state)
+{
+	int mode;
+
+	if ((clsh_state != WCD_CLSH_STATE_EAR) &&
+	    (clsh_state != WCD_CLSH_STATE_HPHL) &&
+	    (clsh_state != WCD_CLSH_STATE_HPHR) &&
+	    (clsh_state != WCD_CLSH_STATE_LO))
+		mode = CLS_NONE;
+	else
+		mode = clsh_d->interpolator_modes[ffs(clsh_state)];
+
+	return mode;
+}
+
+static inline void wcd_clsh_set_int_mode(struct wcd_clsh_cdc_data *clsh_d,
+					int clsh_state, int mode)
+{
+	if ((clsh_state != WCD_CLSH_STATE_EAR) &&
+	    (clsh_state != WCD_CLSH_STATE_HPHL) &&
+	    (clsh_state != WCD_CLSH_STATE_HPHR) &&
+	    (clsh_state != WCD_CLSH_STATE_LO))
+		return;
+
+	clsh_d->interpolator_modes[ffs(clsh_state)] = mode;
+}
+
+static inline void wcd_clsh_set_buck_mode(struct snd_soc_codec *codec,
+					  int mode)
+{
+	if (mode == CLS_H_HIFI || mode == CLS_H_LOHIFI ||
+	    mode == CLS_AB_HIFI || mode == CLS_AB)
+		snd_soc_update_bits(codec, WCD9XXX_A_ANA_RX_SUPPLIES,
+				    0x08, 0x08); /* set to HIFI */
+	else
+		snd_soc_update_bits(codec, WCD9XXX_A_ANA_RX_SUPPLIES,
+				    0x08, 0x00); /* set to default */
+}
+
+static inline void wcd_clsh_set_flyback_mode(struct snd_soc_codec *codec,
+					     int mode)
+{
+	if (mode == CLS_H_HIFI || mode == CLS_H_LOHIFI ||
+	    mode == CLS_AB_HIFI || mode == CLS_AB)
+		snd_soc_update_bits(codec, WCD9XXX_A_ANA_RX_SUPPLIES,
+				    0x04, 0x04); /* set to HIFI */
+	else
+		snd_soc_update_bits(codec, WCD9XXX_A_ANA_RX_SUPPLIES,
+				    0x04, 0x00); /* set to Default */
+}
+
+static inline void wcd_clsh_gm3_boost_disable(struct snd_soc_codec *codec,
+					      int mode)
+{
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+
+	if (!IS_CODEC_TYPE(wcd9xxx, WCD934X))
+		return;
+
+	if (mode == CLS_H_HIFI || mode == CLS_H_LOHIFI ||
+	    mode == CLS_AB_HIFI || mode == CLS_AB) {
+		if (TAVIL_IS_1_0(wcd9xxx))
+			snd_soc_update_bits(codec, WCD9XXX_HPH_CNP_WG_CTL,
+					    0x80, 0x0); /* disable GM3 Boost */
+		snd_soc_update_bits(codec, WCD9XXX_FLYBACK_VNEG_CTRL_4,
+				    0xF0, 0x80);
+	} else {
+		snd_soc_update_bits(codec, WCD9XXX_HPH_CNP_WG_CTL,
+				    0x80, 0x80); /* set to Default */
+		snd_soc_update_bits(codec, WCD9XXX_FLYBACK_VNEG_CTRL_4,
+				    0xF0, 0x70);
+	}
+}
+
+
+static inline void wcd_clsh_force_iq_ctl(struct snd_soc_codec *codec,
+					 int mode)
+{
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+
+	if (!IS_CODEC_TYPE(wcd9xxx, WCD934X))
+		return;
+
+	if (mode == CLS_H_LOHIFI || mode == CLS_AB) {
+		snd_soc_update_bits(codec, WCD9XXX_HPH_NEW_INT_PA_MISC2,
+				    0x20, 0x20);
+		snd_soc_update_bits(codec, WCD9XXX_RX_BIAS_HPH_LOWPOWER,
+				    0xF0, 0xC0);
+		snd_soc_update_bits(codec, WCD9XXX_HPH_PA_CTL1,
+				    0x0E, 0x02);
+	} else {
+
+		snd_soc_update_bits(codec, WCD9XXX_HPH_NEW_INT_PA_MISC2,
+				    0x20, 0x0);
+		snd_soc_update_bits(codec, WCD9XXX_RX_BIAS_HPH_LOWPOWER,
+				    0xF0, 0x80);
+		snd_soc_update_bits(codec, WCD9XXX_HPH_PA_CTL1,
+				    0x0E, 0x06);
+	}
+}
+
+static void wcd_clsh_buck_ctrl(struct snd_soc_codec *codec,
+			       struct wcd_clsh_cdc_data *clsh_d,
+			       int mode,
+			       bool enable)
+{
+	/* enable/disable buck */
+	if ((enable && (++clsh_d->buck_users == 1)) ||
+	   (!enable && (--clsh_d->buck_users == 0)))
+		snd_soc_update_bits(codec, WCD9XXX_A_ANA_RX_SUPPLIES,
+				    (1 << 7), (enable << 7));
+	dev_dbg(codec->dev, "%s: buck_users %d, enable %d, mode: %s",
+		__func__, clsh_d->buck_users, enable, mode_to_str(mode));
+	/*
+	 * 500us sleep is required after buck enable/disable
+	 * as per HW requirement
+	 */
+	usleep_range(500, 500 + WCD_USLEEP_RANGE);
+}
+
+static void wcd_clsh_flyback_ctrl(struct snd_soc_codec *codec,
+				  struct wcd_clsh_cdc_data *clsh_d,
+				  int mode,
+				  bool enable)
+{
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+	struct wcd9xxx_reg_val bulk_reg[2];
+	u8 vneg[] = {0x00, 0x40};
+
+	/* enable/disable flyback */
+	if ((enable && (++clsh_d->flyback_users == 1)) ||
+	   (!enable && (--clsh_d->flyback_users == 0))) {
+		snd_soc_update_bits(codec, WCD9XXX_A_ANA_RX_SUPPLIES,
+				    (1 << 6), (enable << 6));
+		/* 100usec delay is needed as per HW requirement */
+		usleep_range(100, 110);
+		if (enable && (TASHA_IS_1_1(wcd9xxx))) {
+			wcd_clsh_set_flyback_mode(codec, CLS_H_HIFI);
+			snd_soc_update_bits(codec, WCD9XXX_FLYBACK_EN,
+					    0x60, 0x40);
+			snd_soc_update_bits(codec, WCD9XXX_FLYBACK_EN,
+					    0x10, 0x10);
+			vneg[0] = snd_soc_read(codec,
+					       WCD9XXX_A_ANA_RX_SUPPLIES);
+			vneg[0] &= ~(0x40);
+			vneg[1] = vneg[0] | 0x40;
+			bulk_reg[0].reg = WCD9XXX_A_ANA_RX_SUPPLIES;
+			bulk_reg[0].buf = &vneg[0];
+			bulk_reg[0].bytes = 1;
+			bulk_reg[1].reg = WCD9XXX_A_ANA_RX_SUPPLIES;
+			bulk_reg[1].buf = &vneg[1];
+			bulk_reg[1].bytes = 1;
+			/* 500usec delay is needed as per HW requirement */
+			usleep_range(500, 510);
+			wcd9xxx_slim_bulk_write(wcd9xxx, bulk_reg, 2,
+						false);
+			snd_soc_update_bits(codec, WCD9XXX_FLYBACK_EN,
+					    0x10, 0x00);
+			wcd_clsh_set_flyback_mode(codec, mode);
+		}
+
+	}
+	dev_dbg(codec->dev, "%s: flyback_users %d, enable %d, mode: %s",
+		__func__, clsh_d->flyback_users, enable, mode_to_str(mode));
+	/*
+	 * 500us sleep is required after flyback enable/disable
+	 * as per HW requirement
+	 */
+	usleep_range(500, 500 + WCD_USLEEP_RANGE);
+}
+
+static void wcd_clsh_set_gain_path(struct snd_soc_codec *codec,
+				   int mode)
+{
+	u8 val = 0;
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+
+	if (!TASHA_IS_2_0(wcd9xxx))
+		return;
+
+	switch (mode) {
+	case CLS_H_NORMAL:
+	case CLS_AB:
+		val = 0x00;
+		break;
+	case CLS_H_HIFI:
+		val = 0x02;
+		break;
+	case CLS_H_LP:
+		val = 0x01;
+		break;
+	default:
+		return;
+	};
+	snd_soc_update_bits(codec, WCD9XXX_HPH_L_EN, 0xC0, (val << 6));
+	snd_soc_update_bits(codec, WCD9XXX_HPH_R_EN, 0xC0, (val << 6));
+}
+
+static void wcd_clsh_set_hph_mode(struct snd_soc_codec *codec,
+				  int mode)
+{
+	u8 val = 0;
+	u8 gain = 0;
+	u8 res_val = VREF_FILT_R_0OHM;
+	u8 ipeak = DELTA_I_50MA;
+
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+
+	switch (mode) {
+	case CLS_H_NORMAL:
+		res_val = VREF_FILT_R_50KOHM;
+		val = 0x00;
+		gain = DAC_GAIN_0DB;
+		ipeak = DELTA_I_50MA;
+		break;
+	case CLS_AB:
+		val = 0x00;
+		gain = DAC_GAIN_0DB;
+		ipeak = DELTA_I_50MA;
+		break;
+	case CLS_AB_HIFI:
+		val = 0x08;
+		break;
+	case CLS_H_HIFI:
+		val = 0x08;
+		gain = DAC_GAIN_M0P2DB;
+		ipeak = DELTA_I_50MA;
+		break;
+	case CLS_H_LOHIFI:
+		val = 0x00;
+		if ((IS_CODEC_TYPE(wcd9xxx, WCD9335)) ||
+		    (IS_CODEC_TYPE(wcd9xxx, WCD9326))) {
+			val = 0x08;
+			gain = DAC_GAIN_M0P2DB;
+			ipeak = DELTA_I_50MA;
+		}
+		break;
+	case CLS_H_ULP:
+		val = 0x0C;
+		break;
+	case CLS_H_LP:
+		val = 0x04;
+		ipeak = DELTA_I_30MA;
+		break;
+	default:
+		return;
+	};
+
+	/*
+	 * For tavil set mode to Lower_power for
+	 * CLS_H_LOHIFI and CLS_AB
+	 */
+	if ((IS_CODEC_TYPE(wcd9xxx, WCD934X)) &&
+	    (mode == CLS_H_LOHIFI || mode == CLS_AB))
+		val = 0x04;
+
+	snd_soc_update_bits(codec, WCD9XXX_A_ANA_HPH, 0x0C, val);
+	if (TASHA_IS_2_0(wcd9xxx)) {
+		snd_soc_update_bits(codec, WCD9XXX_CLASSH_CTRL_VCL_2,
+				    0x30, (res_val << 4));
+		if (mode != CLS_H_LP)
+			snd_soc_update_bits(codec, WCD9XXX_HPH_REFBUFF_UHQA_CTL,
+					    0x07, gain);
+		snd_soc_update_bits(codec, WCD9XXX_CLASSH_CTRL_CCL_1,
+				    0xF0, (ipeak << 4));
+	}
+}
+
+static void wcd_clsh_set_flyback_vneg_ctl(struct snd_soc_codec *codec,
+					  bool enable)
+{
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+
+	if (!TASHA_IS_2_0(wcd9xxx))
+		return;
+
+	if (enable) {
+		snd_soc_update_bits(codec, WCD9XXX_FLYBACK_VNEG_CTRL_1, 0xE0,
+				    0x00);
+		snd_soc_update_bits(codec, WCD9XXX_FLYBACK_VNEGDAC_CTRL_2,
+				    0xE0, (0x07 << 5));
+	} else {
+		snd_soc_update_bits(codec, WCD9XXX_FLYBACK_VNEG_CTRL_1, 0xE0,
+				    (0x07 << 5));
+		snd_soc_update_bits(codec, WCD9XXX_FLYBACK_VNEGDAC_CTRL_2,
+				    0xE0, (0x02 << 5));
+	}
+}
+
+static void wcd_clsh_set_flyback_current(struct snd_soc_codec *codec, int mode)
+{
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+
+	if (!TASHA_IS_2_0(wcd9xxx))
+		return;
+
+	snd_soc_update_bits(codec, WCD9XXX_RX_BIAS_FLYB_BUFF, 0x0F, 0x0A);
+	snd_soc_update_bits(codec, WCD9XXX_RX_BIAS_FLYB_BUFF, 0xF0, 0xA0);
+	/* Sleep needed to avoid click and pop as per HW requirement */
+	usleep_range(100, 110);
+}
+
+static void wcd_clsh_set_buck_regulator_mode(struct snd_soc_codec *codec,
+					     int mode)
+{
+	snd_soc_update_bits(codec, WCD9XXX_A_ANA_RX_SUPPLIES,
+			    0x02, 0x00);
+}
+
+static void wcd_clsh_state_lo(struct snd_soc_codec *codec,
+			      struct wcd_clsh_cdc_data *clsh_d,
+			      u8 req_state, bool is_enable, int mode)
+{
+	dev_dbg(codec->dev, "%s: mode: %s, %s\n", __func__, mode_to_str(mode),
+		is_enable ? "enable" : "disable");
+
+	if (mode != CLS_AB && mode != CLS_AB_HIFI) {
+		dev_err(codec->dev, "%s: LO cannot be in this mode: %d\n",
+			__func__, mode);
+		return;
+	}
+
+	if (is_enable) {
+		wcd_clsh_set_buck_regulator_mode(codec, mode);
+		wcd_clsh_set_flyback_vneg_ctl(codec, true);
+		wcd_clsh_set_buck_mode(codec, mode);
+		wcd_clsh_set_flyback_mode(codec, mode);
+		wcd_clsh_flyback_ctrl(codec, clsh_d, mode, true);
+		wcd_clsh_set_flyback_current(codec, mode);
+		wcd_clsh_buck_ctrl(codec, clsh_d, mode, true);
+	} else {
+		wcd_clsh_buck_ctrl(codec, clsh_d, mode, false);
+		wcd_clsh_flyback_ctrl(codec, clsh_d, mode, false);
+		wcd_clsh_set_flyback_mode(codec, CLS_H_NORMAL);
+		wcd_clsh_set_buck_mode(codec, CLS_H_NORMAL);
+		wcd_clsh_set_flyback_vneg_ctl(codec, false);
+		wcd_clsh_set_buck_regulator_mode(codec, CLS_H_NORMAL);
+	}
+}
+
+static void wcd_clsh_state_hph_ear(struct snd_soc_codec *codec,
+				   struct wcd_clsh_cdc_data *clsh_d,
+				   u8 req_state, bool is_enable, int mode)
+{
+	int hph_mode = 0;
+
+	dev_dbg(codec->dev, "%s: mode: %s, %s\n", __func__, mode_to_str(mode),
+		is_enable ? "enable" : "disable");
+
+	if (is_enable) {
+		if (req_state == WCD_CLSH_STATE_EAR) {
+			/* If HPH is running in CLS-AB when
+			 * EAR comes, let it continue to run
+			 * in Class-AB, no need to enable Class-H
+			 * for EAR.
+			 */
+			if (clsh_d->state & WCD_CLSH_STATE_HPHL)
+				hph_mode = wcd_clsh_get_int_mode(clsh_d,
+						WCD_CLSH_STATE_HPHL);
+			else if (clsh_d->state & WCD_CLSH_STATE_HPHR)
+				hph_mode = wcd_clsh_get_int_mode(clsh_d,
+						WCD_CLSH_STATE_HPHR);
+			else
+				return;
+			if (hph_mode != CLS_AB && hph_mode != CLS_AB_HIFI
+			    && !is_native_44_1_active(codec))
+				snd_soc_update_bits(codec,
+						WCD9XXX_A_CDC_RX0_RX_PATH_CFG0,
+						0x40, 0x40);
+		}
+
+		if (is_native_44_1_active(codec)) {
+			snd_soc_write(codec, WCD9XXX_CDC_CLSH_HPH_V_PA, 0x39);
+			snd_soc_update_bits(codec,
+					WCD9XXX_CDC_RX0_RX_PATH_SEC0,
+					0x03, 0x00);
+			if ((req_state == WCD_CLSH_STATE_HPHL) ||
+			    (req_state == WCD_CLSH_STATE_HPHR))
+				snd_soc_update_bits(codec,
+						WCD9XXX_A_CDC_RX0_RX_PATH_CFG0,
+						0x40, 0x00);
+		}
+
+		if (req_state == WCD_CLSH_STATE_HPHL)
+			snd_soc_update_bits(codec,
+					    WCD9XXX_A_CDC_RX1_RX_PATH_CFG0,
+					    0x40, 0x40);
+		if (req_state == WCD_CLSH_STATE_HPHR)
+			snd_soc_update_bits(codec,
+					    WCD9XXX_A_CDC_RX2_RX_PATH_CFG0,
+					    0x40, 0x40);
+		if ((req_state == WCD_CLSH_STATE_HPHL) ||
+		    (req_state == WCD_CLSH_STATE_HPHR)) {
+			wcd_clsh_set_gain_path(codec, mode);
+			wcd_clsh_set_flyback_mode(codec, mode);
+			wcd_clsh_set_buck_mode(codec, mode);
+		}
+	} else {
+		if (req_state == WCD_CLSH_STATE_EAR) {
+			/*
+			 * If EAR goes away, disable EAR Channel Enable
+			 * if HPH running in Class-H otherwise
+			 * and if HPH requested mode is CLS_AB then
+			 * no need to disable EAR channel enable bit.
+			 */
+		       if (wcd_clsh_enable_status(codec))
+				snd_soc_update_bits(codec,
+						WCD9XXX_A_CDC_RX0_RX_PATH_CFG0,
+						0x40, 0x00);
+		}
+
+		if (is_native_44_1_active(codec)) {
+			snd_soc_write(codec, WCD9XXX_CDC_CLSH_HPH_V_PA, 0x1C);
+			snd_soc_update_bits(codec,
+					WCD9XXX_CDC_RX0_RX_PATH_SEC0,
+					0x03, 0x01);
+			if (((clsh_d->state & WCD_CLSH_STATE_HPH_ST)
+				  != WCD_CLSH_STATE_HPH_ST) &&
+			    ((req_state == WCD_CLSH_STATE_HPHL) ||
+			     (req_state == WCD_CLSH_STATE_HPHR)))
+				snd_soc_update_bits(codec,
+						WCD9XXX_A_CDC_RX0_RX_PATH_CFG0,
+						0x40, 0x40);
+		}
+
+		if (req_state == WCD_CLSH_STATE_HPHL)
+			snd_soc_update_bits(codec,
+					WCD9XXX_A_CDC_RX1_RX_PATH_CFG0,
+					0x40, 0x00);
+		if (req_state == WCD_CLSH_STATE_HPHR)
+			snd_soc_update_bits(codec,
+					WCD9XXX_A_CDC_RX2_RX_PATH_CFG0,
+					0x40, 0x00);
+		if ((req_state & WCD_CLSH_STATE_HPH_ST) &&
+		    !wcd_clsh_enable_status(codec)) {
+			/* If Class-H is not enabled when HPH is turned
+			 * off, enable it as EAR is in progress
+			 */
+			wcd_enable_clsh_block(codec, clsh_d, true);
+			snd_soc_update_bits(codec,
+					WCD9XXX_A_CDC_RX0_RX_PATH_CFG0,
+					0x40, 0x40);
+			wcd_clsh_set_flyback_mode(codec, CLS_H_NORMAL);
+			wcd_clsh_set_buck_mode(codec, CLS_H_NORMAL);
+		}
+	}
+}
+
+static void wcd_clsh_state_ear_lo(struct snd_soc_codec *codec,
+				  struct wcd_clsh_cdc_data *clsh_d,
+				  u8 req_state, bool is_enable, int mode)
+{
+	dev_dbg(codec->dev, "%s: mode: %s, %s\n", __func__, mode_to_str(mode),
+		is_enable ? "enable" : "disable");
+
+	if (is_enable) {
+		/* LO powerup is taken care in PA sequence.
+		 * No need to change to class AB here.
+		 */
+		if (req_state == WCD_CLSH_STATE_EAR) {
+			/* EAR powerup.*/
+			if (!wcd_clsh_enable_status(codec)) {
+				wcd_enable_clsh_block(codec, clsh_d, true);
+				wcd_clsh_set_buck_mode(codec, mode);
+				wcd_clsh_set_flyback_mode(codec, mode);
+			}
+			snd_soc_update_bits(codec,
+					WCD9XXX_A_CDC_RX0_RX_PATH_CFG0,
+					0x40, 0x40);
+		}
+	} else {
+		if (req_state == WCD_CLSH_STATE_EAR) {
+			/* EAR powerdown.*/
+			wcd_enable_clsh_block(codec, clsh_d, false);
+			wcd_clsh_set_buck_mode(codec, CLS_H_NORMAL);
+			wcd_clsh_set_flyback_mode(codec, CLS_H_NORMAL);
+			snd_soc_update_bits(codec,
+					WCD9XXX_A_CDC_RX0_RX_PATH_CFG0,
+					0x40, 0x00);
+		}
+		/* LO powerdown is taken care in PA sequence.
+		 * No need to change to class H here.
+		 */
+	}
+}
+
+static void wcd_clsh_state_hph_lo(struct snd_soc_codec *codec,
+				  struct wcd_clsh_cdc_data *clsh_d,
+				  u8 req_state, bool is_enable, int mode)
+{
+	int hph_mode = 0;
+
+	dev_dbg(codec->dev, "%s: mode: %s, %s\n", __func__, mode_to_str(mode),
+		is_enable ? "enable" : "disable");
+
+	if (is_enable) {
+		/*
+		 * If requested state is LO, put regulator
+		 * in class-AB or if requested state is HPH,
+		 * which means LO is already enabled, keep
+		 * the regulator config the same at class-AB
+		 * and just set the power modes for flyback
+		 * and buck.
+		 */
+		if (req_state == WCD_CLSH_STATE_LO)
+			wcd_clsh_set_buck_regulator_mode(codec, CLS_AB);
+		else {
+			if (!wcd_clsh_enable_status(codec)) {
+				wcd_enable_clsh_block(codec, clsh_d, true);
+				snd_soc_update_bits(codec,
+						WCD9XXX_A_CDC_CLSH_K1_MSB,
+						0x0F, 0x00);
+				snd_soc_update_bits(codec,
+						WCD9XXX_A_CDC_CLSH_K1_LSB,
+						0xFF, 0xC0);
+				wcd_clsh_set_flyback_mode(codec, mode);
+				wcd_clsh_set_flyback_vneg_ctl(codec, false);
+				wcd_clsh_set_buck_mode(codec, mode);
+				wcd_clsh_set_hph_mode(codec, mode);
+				wcd_clsh_set_gain_path(codec, mode);
+			} else {
+				dev_dbg(codec->dev, "%s:clsh is already enabled\n",
+					__func__);
+			}
+			if (req_state == WCD_CLSH_STATE_HPHL)
+				snd_soc_update_bits(codec,
+					WCD9XXX_A_CDC_RX1_RX_PATH_CFG0,
+					0x40, 0x40);
+			if (req_state == WCD_CLSH_STATE_HPHR)
+				snd_soc_update_bits(codec,
+					WCD9XXX_A_CDC_RX2_RX_PATH_CFG0,
+					0x40, 0x40);
+		}
+	} else {
+		if ((req_state == WCD_CLSH_STATE_HPHL) ||
+		    (req_state == WCD_CLSH_STATE_HPHR)) {
+			if (req_state == WCD_CLSH_STATE_HPHL)
+				snd_soc_update_bits(codec,
+					    WCD9XXX_A_CDC_RX1_RX_PATH_CFG0,
+					    0x40, 0x00);
+			if (req_state == WCD_CLSH_STATE_HPHR)
+				snd_soc_update_bits(codec,
+					    WCD9XXX_A_CDC_RX2_RX_PATH_CFG0,
+					    0x40, 0x00);
+			/*
+			 * If HPH is powering down first, then disable clsh,
+			 * set the buck/flyback mode to default and keep the
+			 * regulator at Class-AB
+			 */
+			if ((clsh_d->state & WCD_CLSH_STATE_HPH_ST)
+				!= WCD_CLSH_STATE_HPH_ST) {
+				wcd_enable_clsh_block(codec, clsh_d, false);
+				wcd_clsh_set_flyback_vneg_ctl(codec, true);
+				wcd_clsh_set_flyback_mode(codec, CLS_H_NORMAL);
+				wcd_clsh_set_buck_mode(codec, CLS_H_NORMAL);
+			}
+		} else {
+			/* LO powerdown.
+			 * If HPH mode also is CLS-AB, no need
+			 * to turn-on class-H, otherwise enable
+			 * Class-H configuration.
+			 */
+			if (clsh_d->state & WCD_CLSH_STATE_HPHL)
+				hph_mode = wcd_clsh_get_int_mode(clsh_d,
+						WCD_CLSH_STATE_HPHL);
+			else if (clsh_d->state & WCD_CLSH_STATE_HPHR)
+				hph_mode = wcd_clsh_get_int_mode(clsh_d,
+						WCD_CLSH_STATE_HPHR);
+			else
+				return;
+			dev_dbg(codec->dev, "%s: hph_mode = %d\n", __func__,
+				hph_mode);
+
+			if ((hph_mode == CLS_AB) ||
+			   (hph_mode == CLS_AB_HIFI) ||
+			   (hph_mode == CLS_NONE))
+				goto end;
+
+			/*
+			 * If Class-H is already enabled (HPH ON and then
+			 * LO ON), no need to turn on again, just set the
+			 * regulator mode.
+			 */
+			if (wcd_clsh_enable_status(codec)) {
+				wcd_clsh_set_buck_regulator_mode(codec,
+								 hph_mode);
+				goto end;
+			} else {
+				dev_dbg(codec->dev, "%s: clsh is not enabled\n",
+					__func__);
+			}
+
+			wcd_enable_clsh_block(codec, clsh_d, true);
+			snd_soc_update_bits(codec,
+					WCD9XXX_A_CDC_CLSH_K1_MSB,
+					0x0F, 0x00);
+			snd_soc_update_bits(codec,
+					WCD9XXX_A_CDC_CLSH_K1_LSB,
+					0xFF, 0xC0);
+			wcd_clsh_set_buck_regulator_mode(codec,
+							 hph_mode);
+			if (clsh_d->state & WCD_CLSH_STATE_HPHL)
+				snd_soc_update_bits(codec,
+						WCD9XXX_A_CDC_RX1_RX_PATH_CFG0,
+						0x40, 0x40);
+			if (clsh_d->state & WCD_CLSH_STATE_HPHR)
+				snd_soc_update_bits(codec,
+						WCD9XXX_A_CDC_RX2_RX_PATH_CFG0,
+						0x40, 0x40);
+			wcd_clsh_set_hph_mode(codec, hph_mode);
+		}
+	}
+end:
+	return;
+}
+
+static void wcd_clsh_state_hph_st(struct snd_soc_codec *codec,
+				  struct wcd_clsh_cdc_data *clsh_d,
+				  u8 req_state, bool is_enable, int mode)
+{
+	dev_dbg(codec->dev, "%s: mode: %s, %s\n", __func__, mode_to_str(mode),
+		is_enable ? "enable" : "disable");
+
+	if (mode == CLS_AB || mode == CLS_AB_HIFI)
+		return;
+
+	if (is_enable) {
+		if (req_state == WCD_CLSH_STATE_HPHL)
+			snd_soc_update_bits(codec,
+					    WCD9XXX_A_CDC_RX1_RX_PATH_CFG0,
+					    0x40, 0x40);
+		if (req_state == WCD_CLSH_STATE_HPHR)
+			snd_soc_update_bits(codec,
+					    WCD9XXX_A_CDC_RX2_RX_PATH_CFG0,
+					    0x40, 0x40);
+	} else {
+		if (req_state == WCD_CLSH_STATE_HPHL)
+			snd_soc_update_bits(codec,
+					    WCD9XXX_A_CDC_RX1_RX_PATH_CFG0,
+					    0x40, 0x00);
+		if (req_state == WCD_CLSH_STATE_HPHR)
+			snd_soc_update_bits(codec,
+					    WCD9XXX_A_CDC_RX2_RX_PATH_CFG0,
+					    0x40, 0x00);
+	}
+}
+
+static void wcd_clsh_state_hph_r(struct snd_soc_codec *codec,
+				 struct wcd_clsh_cdc_data *clsh_d,
+				 u8 req_state, bool is_enable, int mode)
+{
+	dev_dbg(codec->dev, "%s: mode: %s, %s\n", __func__, mode_to_str(mode),
+		is_enable ? "enable" : "disable");
+
+	if (mode == CLS_H_NORMAL) {
+		dev_err(codec->dev, "%s: Normal mode not applicable for hph_r\n",
+			__func__);
+		return;
+	}
+
+	if (is_enable) {
+		if (mode != CLS_AB && mode != CLS_AB_HIFI) {
+			wcd_enable_clsh_block(codec, clsh_d, true);
+			/*
+			 * These K1 values depend on the Headphone Impedance
+			 * For now it is assumed to be 16 ohm
+			 */
+			snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLSH_K1_MSB,
+					    0x0F, 0x00);
+			snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLSH_K1_LSB,
+					    0xFF, 0xC0);
+			snd_soc_update_bits(codec,
+					    WCD9XXX_A_CDC_RX2_RX_PATH_CFG0,
+					    0x40, 0x40);
+		}
+		wcd_clsh_set_buck_regulator_mode(codec, mode);
+		wcd_clsh_set_flyback_mode(codec, mode);
+		wcd_clsh_gm3_boost_disable(codec, mode);
+		wcd_clsh_force_iq_ctl(codec, mode);
+		wcd_clsh_flyback_ctrl(codec, clsh_d, mode, true);
+		wcd_clsh_set_flyback_current(codec, mode);
+		wcd_clsh_set_buck_mode(codec, mode);
+		wcd_clsh_buck_ctrl(codec, clsh_d, mode, true);
+		wcd_clsh_set_hph_mode(codec, mode);
+		wcd_clsh_set_gain_path(codec, mode);
+	} else {
+		wcd_clsh_set_hph_mode(codec, CLS_H_NORMAL);
+
+		if (mode != CLS_AB && mode != CLS_AB_HIFI) {
+			snd_soc_update_bits(codec,
+					    WCD9XXX_A_CDC_RX2_RX_PATH_CFG0,
+					    0x40, 0x00);
+			wcd_enable_clsh_block(codec, clsh_d, false);
+		}
+		/* buck and flyback set to default mode and disable */
+		wcd_clsh_buck_ctrl(codec, clsh_d, CLS_H_NORMAL, false);
+		wcd_clsh_flyback_ctrl(codec, clsh_d, CLS_H_NORMAL, false);
+		wcd_clsh_force_iq_ctl(codec, CLS_H_NORMAL);
+		wcd_clsh_gm3_boost_disable(codec, CLS_H_NORMAL);
+		wcd_clsh_set_flyback_mode(codec, CLS_H_NORMAL);
+		wcd_clsh_set_buck_mode(codec, CLS_H_NORMAL);
+		wcd_clsh_set_buck_regulator_mode(codec, CLS_H_NORMAL);
+	}
+}
+
+static void wcd_clsh_state_hph_l(struct snd_soc_codec *codec,
+				 struct wcd_clsh_cdc_data *clsh_d,
+				 u8 req_state, bool is_enable, int mode)
+{
+	dev_dbg(codec->dev, "%s: mode: %s, %s\n", __func__, mode_to_str(mode),
+		is_enable ? "enable" : "disable");
+
+	if (mode == CLS_H_NORMAL) {
+		dev_err(codec->dev, "%s: Normal mode not applicable for hph_l\n",
+			__func__);
+		return;
+	}
+
+	if (is_enable) {
+		if (mode != CLS_AB && mode != CLS_AB_HIFI) {
+			wcd_enable_clsh_block(codec, clsh_d, true);
+			/*
+			 * These K1 values depend on the Headphone Impedance
+			 * For now it is assumed to be 16 ohm
+			 */
+			snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLSH_K1_MSB,
+					    0x0F, 0x00);
+			snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLSH_K1_LSB,
+					    0xFF, 0xC0);
+			snd_soc_update_bits(codec,
+					    WCD9XXX_A_CDC_RX1_RX_PATH_CFG0,
+					    0x40, 0x40);
+		}
+		wcd_clsh_set_buck_regulator_mode(codec, mode);
+		wcd_clsh_set_flyback_mode(codec, mode);
+		wcd_clsh_gm3_boost_disable(codec, mode);
+		wcd_clsh_force_iq_ctl(codec, mode);
+		wcd_clsh_flyback_ctrl(codec, clsh_d, mode, true);
+		wcd_clsh_set_flyback_current(codec, mode);
+		wcd_clsh_set_buck_mode(codec, mode);
+		wcd_clsh_buck_ctrl(codec, clsh_d, mode, true);
+		wcd_clsh_set_hph_mode(codec, mode);
+		wcd_clsh_set_gain_path(codec, mode);
+	} else {
+		wcd_clsh_set_hph_mode(codec, CLS_H_NORMAL);
+
+		if (mode != CLS_AB && mode != CLS_AB_HIFI) {
+			snd_soc_update_bits(codec,
+					    WCD9XXX_A_CDC_RX1_RX_PATH_CFG0,
+					    0x40, 0x00);
+			wcd_enable_clsh_block(codec, clsh_d, false);
+		}
+		/* set buck and flyback to Default Mode */
+		wcd_clsh_buck_ctrl(codec, clsh_d, CLS_H_NORMAL, false);
+		wcd_clsh_flyback_ctrl(codec, clsh_d, CLS_H_NORMAL, false);
+		wcd_clsh_force_iq_ctl(codec, CLS_H_NORMAL);
+		wcd_clsh_gm3_boost_disable(codec, CLS_H_NORMAL);
+		wcd_clsh_set_flyback_mode(codec, CLS_H_NORMAL);
+		wcd_clsh_set_buck_mode(codec, CLS_H_NORMAL);
+		wcd_clsh_set_buck_regulator_mode(codec, CLS_H_NORMAL);
+	}
+}
+
+static void wcd_clsh_state_ear(struct snd_soc_codec *codec,
+		struct wcd_clsh_cdc_data *clsh_d,
+		u8 req_state, bool is_enable, int mode)
+{
+	dev_dbg(codec->dev, "%s: mode: %s, %s\n", __func__, mode_to_str(mode),
+		is_enable ? "enable" : "disable");
+
+	if (mode != CLS_H_NORMAL) {
+		dev_err(codec->dev, "%s: mode: %s cannot be used for EAR\n",
+			__func__, mode_to_str(mode));
+		return;
+	}
+
+	if (is_enable) {
+		wcd_enable_clsh_block(codec, clsh_d, true);
+		snd_soc_update_bits(codec,
+				    WCD9XXX_A_CDC_RX0_RX_PATH_CFG0,
+				    0x40, 0x40);
+		wcd_clsh_set_buck_mode(codec, mode);
+		wcd_clsh_set_flyback_mode(codec, mode);
+		wcd_clsh_flyback_ctrl(codec, clsh_d, mode, true);
+		wcd_clsh_set_flyback_current(codec, mode);
+		wcd_clsh_buck_ctrl(codec, clsh_d, mode, true);
+	} else {
+		snd_soc_update_bits(codec,
+				    WCD9XXX_A_CDC_RX0_RX_PATH_CFG0,
+				    0x40, 0x00);
+		wcd_enable_clsh_block(codec, clsh_d, false);
+		wcd_clsh_buck_ctrl(codec, clsh_d, mode, false);
+		wcd_clsh_flyback_ctrl(codec, clsh_d, mode, false);
+		wcd_clsh_set_flyback_mode(codec, CLS_H_NORMAL);
+		wcd_clsh_set_buck_mode(codec, CLS_H_NORMAL);
+	}
+}
+
+static void wcd_clsh_state_err(struct snd_soc_codec *codec,
+		struct wcd_clsh_cdc_data *clsh_d,
+		u8 req_state, bool is_enable, int mode)
+{
+	char msg[128];
+
+	dev_err(codec->dev,
+		"%s Wrong request for class H state machine requested to %s %s",
+		__func__, is_enable ? "enable" : "disable",
+		state_to_str(req_state, msg, sizeof(msg)));
+	WARN_ON(1);
+}
+
+/*
+ * Function: wcd_clsh_is_state_valid
+ * Params: state
+ * Description:
+ * Provides information on valid states of Class H configuration
+ */
+static bool wcd_clsh_is_state_valid(u8 state)
+{
+	switch (state) {
+	case WCD_CLSH_STATE_IDLE:
+	case WCD_CLSH_STATE_EAR:
+	case WCD_CLSH_STATE_HPHL:
+	case WCD_CLSH_STATE_HPHR:
+	case WCD_CLSH_STATE_HPH_ST:
+	case WCD_CLSH_STATE_LO:
+	case WCD_CLSH_STATE_HPHL_EAR:
+	case WCD_CLSH_STATE_HPHR_EAR:
+	case WCD_CLSH_STATE_HPH_ST_EAR:
+	case WCD_CLSH_STATE_HPHL_LO:
+	case WCD_CLSH_STATE_HPHR_LO:
+	case WCD_CLSH_STATE_HPH_ST_LO:
+	case WCD_CLSH_STATE_EAR_LO:
+		return true;
+	default:
+		return false;
+	};
+}
+
+/*
+ * Function: wcd_clsh_fsm
+ * Params: codec, cdc_clsh_d, req_state, req_type, clsh_event
+ * Description:
+ * This function handles PRE DAC and POST DAC conditions of different devices
+ * and updates class H configuration of different combination of devices
+ * based on validity of their states. cdc_clsh_d will contain current
+ * class h state information
+ */
+void wcd_clsh_fsm(struct snd_soc_codec *codec,
+		struct wcd_clsh_cdc_data *cdc_clsh_d,
+		u8 clsh_event, u8 req_state,
+		int int_mode)
+{
+	u8 old_state, new_state;
+	char msg0[128], msg1[128];
+
+	switch (clsh_event) {
+	case WCD_CLSH_EVENT_PRE_DAC:
+		old_state = cdc_clsh_d->state;
+		new_state = old_state | req_state;
+
+		if (!wcd_clsh_is_state_valid(new_state)) {
+			dev_err(codec->dev,
+				"%s: Class-H not a valid new state: %s\n",
+				__func__,
+				state_to_str(new_state, msg0, sizeof(msg0)));
+			return;
+		}
+		if (new_state == old_state) {
+			dev_err(codec->dev,
+				"%s: Class-H already in requested state: %s\n",
+				__func__,
+				state_to_str(new_state, msg0, sizeof(msg0)));
+			return;
+		}
+		cdc_clsh_d->state = new_state;
+		wcd_clsh_set_int_mode(cdc_clsh_d, req_state, int_mode);
+		(*clsh_state_fp[new_state]) (codec, cdc_clsh_d, req_state,
+					     CLSH_REQ_ENABLE, int_mode);
+		dev_dbg(codec->dev,
+			"%s: ClassH state transition from %s to %s\n",
+			__func__, state_to_str(old_state, msg0, sizeof(msg0)),
+			state_to_str(cdc_clsh_d->state, msg1, sizeof(msg1)));
+		break;
+	case WCD_CLSH_EVENT_POST_PA:
+		old_state = cdc_clsh_d->state;
+		new_state = old_state & (~req_state);
+		if (new_state < NUM_CLSH_STATES_V2) {
+			if (!wcd_clsh_is_state_valid(old_state)) {
+				dev_err(codec->dev,
+					"%s:Invalid old state:%s\n",
+					__func__,
+					state_to_str(old_state, msg0,
+						     sizeof(msg0)));
+				return;
+			}
+			if (new_state == old_state) {
+				dev_err(codec->dev,
+					"%s: Class-H already in requested state: %s\n",
+					__func__,
+					state_to_str(new_state, msg0,
+						     sizeof(msg0)));
+				return;
+			}
+			(*clsh_state_fp[old_state]) (codec, cdc_clsh_d,
+					req_state, CLSH_REQ_DISABLE,
+					int_mode);
+			cdc_clsh_d->state = new_state;
+			wcd_clsh_set_int_mode(cdc_clsh_d, req_state, CLS_NONE);
+			dev_dbg(codec->dev, "%s: ClassH state transition from %s to %s\n",
+				__func__, state_to_str(old_state, msg0,
+						       sizeof(msg0)),
+				state_to_str(cdc_clsh_d->state, msg1,
+					     sizeof(msg1)));
+		}
+		break;
+	};
+}
+
+int wcd_clsh_get_clsh_state(struct wcd_clsh_cdc_data *clsh)
+{
+	return clsh->state;
+}
+EXPORT_SYMBOL(wcd_clsh_get_clsh_state);
+
+void wcd_clsh_init(struct wcd_clsh_cdc_data *clsh)
+{
+	int i;
+	clsh->state = WCD_CLSH_STATE_IDLE;
+
+	for (i = 0; i < NUM_CLSH_STATES_V2; i++)
+		clsh_state_fp[i] = wcd_clsh_state_err;
+
+	clsh_state_fp[WCD_CLSH_STATE_EAR] = wcd_clsh_state_ear;
+	clsh_state_fp[WCD_CLSH_STATE_HPHL] =
+						wcd_clsh_state_hph_l;
+	clsh_state_fp[WCD_CLSH_STATE_HPHR] =
+						wcd_clsh_state_hph_r;
+	clsh_state_fp[WCD_CLSH_STATE_HPH_ST] =
+						wcd_clsh_state_hph_st;
+	clsh_state_fp[WCD_CLSH_STATE_LO] = wcd_clsh_state_lo;
+	clsh_state_fp[WCD_CLSH_STATE_HPHL_EAR] =
+						wcd_clsh_state_hph_ear;
+	clsh_state_fp[WCD_CLSH_STATE_HPHR_EAR] =
+						wcd_clsh_state_hph_ear;
+	clsh_state_fp[WCD_CLSH_STATE_HPH_ST_EAR] =
+						wcd_clsh_state_hph_ear;
+	clsh_state_fp[WCD_CLSH_STATE_HPHL_LO] = wcd_clsh_state_hph_lo;
+	clsh_state_fp[WCD_CLSH_STATE_HPHR_LO] = wcd_clsh_state_hph_lo;
+	clsh_state_fp[WCD_CLSH_STATE_HPH_ST_LO] =
+						wcd_clsh_state_hph_lo;
+	clsh_state_fp[WCD_CLSH_STATE_EAR_LO] = wcd_clsh_state_ear_lo;
+	/* Set interpolaotr modes to NONE */
+	wcd_clsh_set_int_mode(clsh, WCD_CLSH_STATE_EAR, CLS_NONE);
+	wcd_clsh_set_int_mode(clsh, WCD_CLSH_STATE_HPHL, CLS_NONE);
+	wcd_clsh_set_int_mode(clsh, WCD_CLSH_STATE_HPHR, CLS_NONE);
+	wcd_clsh_set_int_mode(clsh, WCD_CLSH_STATE_LO, CLS_NONE);
+	clsh->flyback_users = 0;
+	clsh->buck_users = 0;
+	clsh->clsh_users = 0;
+}
+EXPORT_SYMBOL(wcd_clsh_init);
+
+MODULE_DESCRIPTION("WCD9XXX Common Driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd9xxx-common-v2.h	2019-01-22 16:16:29.555301211 +0100
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _WCD9XXX_COMMON_V2
+
+#define _WCD9XXX_COMMON_V2
+
+#define CLSH_REQ_ENABLE true
+#define CLSH_REQ_DISABLE false
+
+#define WCD_CLSH_EVENT_PRE_DAC 0x01
+#define WCD_CLSH_EVENT_POST_PA 0x02
+#define MAX_VBAT_MONITOR_WRITES 17
+/*
+ * Basic states for Class H state machine.
+ * represented as a bit mask within a u8 data type
+ * bit 0: EAR mode
+ * bit 1: HPH Left mode
+ * bit 2: HPH Right mode
+ * bit 3: Lineout mode
+ */
+#define	WCD_CLSH_STATE_IDLE 0x00
+#define	WCD_CLSH_STATE_EAR (0x01 << 0)
+#define	WCD_CLSH_STATE_HPHL (0x01 << 1)
+#define	WCD_CLSH_STATE_HPHR (0x01 << 2)
+#define	WCD_CLSH_STATE_LO (0x01 << 3)
+
+/*
+ * Though number of CLSH states are 4, max state shoulbe be 5
+ * because state array index starts from 1.
+ */
+#define WCD_CLSH_STATE_MAX 5
+#define NUM_CLSH_STATES_V2 (0x01 << WCD_CLSH_STATE_MAX)
+
+
+/* Derived State: Bits 1 and 2 should be set for Headphone stereo */
+#define WCD_CLSH_STATE_HPH_ST (WCD_CLSH_STATE_HPHL | \
+			       WCD_CLSH_STATE_HPHR)
+
+#define WCD_CLSH_STATE_HPHL_LO (WCD_CLSH_STATE_HPHL | \
+				    WCD_CLSH_STATE_LO)
+#define WCD_CLSH_STATE_HPHR_LO (WCD_CLSH_STATE_HPHR | \
+				    WCD_CLSH_STATE_LO)
+#define WCD_CLSH_STATE_HPH_ST_LO (WCD_CLSH_STATE_HPH_ST | \
+				      WCD_CLSH_STATE_LO)
+#define WCD_CLSH_STATE_EAR_LO (WCD_CLSH_STATE_EAR | \
+				   WCD_CLSH_STATE_LO)
+#define WCD_CLSH_STATE_HPHL_EAR (WCD_CLSH_STATE_HPHL | \
+				     WCD_CLSH_STATE_EAR)
+#define WCD_CLSH_STATE_HPHR_EAR (WCD_CLSH_STATE_HPHR | \
+				     WCD_CLSH_STATE_EAR)
+#define WCD_CLSH_STATE_HPH_ST_EAR (WCD_CLSH_STATE_HPH_ST | \
+				       WCD_CLSH_STATE_EAR)
+
+enum {
+	CLS_H_NORMAL = 0, /* Class-H Default */
+	CLS_H_HIFI, /* Class-H HiFi */
+	CLS_H_LP, /* Class-H Low Power */
+	CLS_AB, /* Class-AB Low HIFI*/
+	CLS_H_LOHIFI, /* LoHIFI */
+	CLS_H_ULP, /* Ultra Low power */
+	CLS_AB_HIFI, /* Class-AB */
+	CLS_NONE, /* None of the above modes */
+};
+
+/* Class H data that the codec driver will maintain */
+struct wcd_clsh_cdc_data {
+	u8 state;
+	int flyback_users;
+	int buck_users;
+	int clsh_users;
+	int interpolator_modes[WCD_CLSH_STATE_MAX];
+};
+
+struct wcd_mad_audio_header {
+	u32 reserved[3];
+	u32 num_reg_cfg;
+};
+
+struct wcd_mad_microphone_info {
+	uint8_t input_microphone;
+	uint8_t cycle_time;
+	uint8_t settle_time;
+	uint8_t padding;
+} __packed;
+
+struct wcd_mad_micbias_info {
+	uint8_t micbias;
+	uint8_t k_factor;
+	uint8_t external_bypass_capacitor;
+	uint8_t internal_biasing;
+	uint8_t cfilter;
+	uint8_t padding[3];
+} __packed;
+
+struct wcd_mad_rms_audio_beacon_info {
+	uint8_t rms_omit_samples;
+	uint8_t rms_comp_time;
+	uint8_t detection_mechanism;
+	uint8_t rms_diff_threshold;
+	uint8_t rms_threshold_lsb;
+	uint8_t rms_threshold_msb;
+	uint8_t padding[2];
+	uint8_t iir_coefficients[36];
+} __packed;
+
+struct wcd_mad_rms_ultrasound_info {
+	uint8_t rms_comp_time;
+	uint8_t detection_mechanism;
+	uint8_t rms_diff_threshold;
+	uint8_t rms_threshold_lsb;
+	uint8_t rms_threshold_msb;
+	uint8_t padding[3];
+	uint8_t iir_coefficients[36];
+} __packed;
+
+struct wcd_mad_audio_cal {
+	uint32_t version;
+	struct wcd_mad_microphone_info microphone_info;
+	struct wcd_mad_micbias_info micbias_info;
+	struct wcd_mad_rms_audio_beacon_info audio_info;
+	struct wcd_mad_rms_audio_beacon_info beacon_info;
+	struct wcd_mad_rms_ultrasound_info ultrasound_info;
+} __packed;
+
+struct wcd9xxx_anc_header {
+	u32 reserved[3];
+	u32 num_anc_slots;
+};
+
+struct vbat_monitor_reg {
+	u32 size;
+	u32 writes[MAX_VBAT_MONITOR_WRITES];
+} __packed;
+
+struct wcd_reg_mask_val {
+	u16	reg;
+	u8	mask;
+	u8	val;
+};
+
+extern void wcd_clsh_fsm(struct snd_soc_codec *codec,
+		struct wcd_clsh_cdc_data *cdc_clsh_d,
+		u8 clsh_event, u8 req_state,
+		int int_mode);
+
+extern void wcd_clsh_init(struct wcd_clsh_cdc_data *clsh);
+extern int wcd_clsh_get_clsh_state(struct wcd_clsh_cdc_data *clsh);
+extern void wcd_clsh_imped_config(struct snd_soc_codec *codec, int imped,
+		bool reset);
+
+enum {
+	RESERVED = 0,
+	AANC_LPF_FF_FB = 1,
+	AANC_LPF_COEFF_MSB,
+	AANC_LPF_COEFF_LSB,
+	HW_MAD_AUDIO_ENABLE,
+	HW_MAD_ULTR_ENABLE,
+	HW_MAD_BEACON_ENABLE,
+	HW_MAD_AUDIO_SLEEP_TIME,
+	HW_MAD_ULTR_SLEEP_TIME,
+	HW_MAD_BEACON_SLEEP_TIME,
+	HW_MAD_TX_AUDIO_SWITCH_OFF,
+	HW_MAD_TX_ULTR_SWITCH_OFF,
+	HW_MAD_TX_BEACON_SWITCH_OFF,
+	MAD_AUDIO_INT_DEST_SELECT_REG,
+	MAD_ULT_INT_DEST_SELECT_REG,
+	MAD_BEACON_INT_DEST_SELECT_REG,
+	MAD_CLIP_INT_DEST_SELECT_REG,
+	VBAT_INT_DEST_SELECT_REG,
+	MAD_AUDIO_INT_MASK_REG,
+	MAD_ULT_INT_MASK_REG,
+	MAD_BEACON_INT_MASK_REG,
+	MAD_CLIP_INT_MASK_REG,
+	VBAT_INT_MASK_REG,
+	MAD_AUDIO_INT_STATUS_REG,
+	MAD_ULT_INT_STATUS_REG,
+	MAD_BEACON_INT_STATUS_REG,
+	MAD_CLIP_INT_STATUS_REG,
+	VBAT_INT_STATUS_REG,
+	MAD_AUDIO_INT_CLEAR_REG,
+	MAD_ULT_INT_CLEAR_REG,
+	MAD_BEACON_INT_CLEAR_REG,
+	MAD_CLIP_INT_CLEAR_REG,
+	VBAT_INT_CLEAR_REG,
+	SB_PGD_PORT_TX_WATERMARK_N,
+	SB_PGD_PORT_TX_ENABLE_N,
+	SB_PGD_PORT_RX_WATERMARK_N,
+	SB_PGD_PORT_RX_ENABLE_N,
+	SB_PGD_TX_PORTn_MULTI_CHNL_0,
+	SB_PGD_TX_PORTn_MULTI_CHNL_1,
+	SB_PGD_RX_PORTn_MULTI_CHNL_0,
+	SB_PGD_RX_PORTn_MULTI_CHNL_1,
+	AANC_FF_GAIN_ADAPTIVE,
+	AANC_FFGAIN_ADAPTIVE_EN,
+	AANC_GAIN_CONTROL,
+	SPKR_CLIP_PIPE_BANK_SEL,
+	SPKR_CLIPDET_VAL0,
+	SPKR_CLIPDET_VAL1,
+	SPKR_CLIPDET_VAL2,
+	SPKR_CLIPDET_VAL3,
+	SPKR_CLIPDET_VAL4,
+	SPKR_CLIPDET_VAL5,
+	SPKR_CLIPDET_VAL6,
+	SPKR_CLIPDET_VAL7,
+	VBAT_RELEASE_INT_DEST_SELECT_REG,
+	VBAT_RELEASE_INT_MASK_REG,
+	VBAT_RELEASE_INT_STATUS_REG,
+	VBAT_RELEASE_INT_CLEAR_REG,
+	MAD2_CLIP_INT_DEST_SELECT_REG,
+	MAD2_CLIP_INT_MASK_REG,
+	MAD2_CLIP_INT_STATUS_REG,
+	MAD2_CLIP_INT_CLEAR_REG,
+	SPKR2_CLIP_PIPE_BANK_SEL,
+	SPKR2_CLIPDET_VAL0,
+	SPKR2_CLIPDET_VAL1,
+	SPKR2_CLIPDET_VAL2,
+	SPKR2_CLIPDET_VAL3,
+	SPKR2_CLIPDET_VAL4,
+	SPKR2_CLIPDET_VAL5,
+	SPKR2_CLIPDET_VAL6,
+	SPKR2_CLIPDET_VAL7,
+	MAX_CFG_REGISTERS,
+};
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd9xxx-mbhc.c	2019-01-22 16:16:29.555301211 +0100
@@ -0,0 +1,5643 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/mfd/wcd9xxx/core.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx-irq.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
+#include <linux/mfd/wcd9xxx/wcd9320_registers.h>
+#include <linux/mfd/wcd9xxx/pdata.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/jack.h>
+#include <sound/tlv.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/input.h>
+#include "wcd9xxx-mbhc.h"
+#include "wcdcal-hwdep.h"
+#include "wcd9xxx-resmgr.h"
+#include "wcd9xxx-common.h"
+
+#define WCD9XXX_JACK_MASK (SND_JACK_HEADSET | SND_JACK_OC_HPHL | \
+			   SND_JACK_OC_HPHR | SND_JACK_LINEOUT | \
+			   SND_JACK_UNSUPPORTED | SND_JACK_MICROPHONE2 | \
+			   SND_JACK_MECHANICAL)
+#define WCD9XXX_JACK_BUTTON_MASK (SND_JACK_BTN_0 | SND_JACK_BTN_1 | \
+				  SND_JACK_BTN_2 | SND_JACK_BTN_3 | \
+				  SND_JACK_BTN_4 | SND_JACK_BTN_5 )
+
+#define NUM_DCE_PLUG_DETECT 3
+#define NUM_DCE_PLUG_INS_DETECT 5
+#define NUM_ATTEMPTS_INSERT_DETECT 25
+#define NUM_ATTEMPTS_TO_REPORT 5
+
+#define FAKE_INS_LOW 10
+#define FAKE_INS_HIGH 80
+#define FAKE_INS_HIGH_NO_SWCH 150
+#define FAKE_REMOVAL_MIN_PERIOD_MS 50
+#define FAKE_INS_DELTA_SCALED_MV 300
+
+#define BUTTON_MIN 0x8000
+#define STATUS_REL_DETECTION 0x0C
+
+#define HS_DETECT_PLUG_TIME_MS (5 * 1000)
+#define ANC_HPH_DETECT_PLUG_TIME_MS (5 * 1000)
+#define HS_DETECT_PLUG_INERVAL_MS 100
+#define SWCH_REL_DEBOUNCE_TIME_MS 50
+#define SWCH_IRQ_DEBOUNCE_TIME_US 5000
+#define BTN_RELEASE_DEBOUNCE_TIME_MS 25
+
+#define GND_MIC_SWAP_THRESHOLD 2
+#define OCP_ATTEMPT 1
+
+#define FW_READ_ATTEMPTS 15
+#define FW_READ_TIMEOUT 4000000
+
+#define BUTTON_POLLING_SUPPORTED true
+
+#define MCLK_RATE_12288KHZ 12288000
+#define MCLK_RATE_9600KHZ 9600000
+
+#define DEFAULT_DCE_STA_WAIT 55
+#define DEFAULT_DCE_WAIT 60000
+#define DEFAULT_STA_WAIT 5000
+
+#define VDDIO_MICBIAS_MV 1800
+
+#define WCD9XXX_MICBIAS_PULLDOWN_SETTLE_US 5000
+
+#define WCD9XXX_HPHL_STATUS_READY_WAIT_US 1000
+#define WCD9XXX_MUX_SWITCH_READY_WAIT_MS 50
+#define WCD9XXX_MEAS_DELTA_MAX_MV 120
+#define WCD9XXX_MEAS_INVALD_RANGE_LOW_MV 20
+#define WCD9XXX_MEAS_INVALD_RANGE_HIGH_MV 80
+
+/* Threshold in milliohm used for mono/stereo
+ * plug classification
+ */
+#define WCD9XXX_MONO_HS_DIFF_THR 20000000
+#define WCD9XXX_MONO_HS_MIN_THR 2000
+
+/*
+ * Invalid voltage range for the detection
+ * of plug type with current source
+ */
+#define WCD9XXX_CS_MEAS_INVALD_RANGE_LOW_MV 160
+#define WCD9XXX_CS_MEAS_INVALD_RANGE_HIGH_MV 265
+
+/*
+ * Threshold used to detect euro headset
+ * with current source
+ */
+#define WCD9XXX_CS_GM_SWAP_THRES_MIN_MV 10
+#define WCD9XXX_CS_GM_SWAP_THRES_MAX_MV 40
+
+#define WCD9XXX_MBHC_NSC_CS 9
+#define WCD9XXX_GM_SWAP_THRES_MIN_MV 150
+#define WCD9XXX_GM_SWAP_THRES_MAX_MV 650
+#define WCD9XXX_THRESHOLD_MIC_THRESHOLD 200
+
+#define WCD9XXX_USLEEP_RANGE_MARGIN_US 100
+
+/* RX_HPH_CNP_WG_TIME increases by 0.24ms */
+#define WCD9XXX_WG_TIME_FACTOR_US	240
+
+#define WCD9XXX_V_CS_HS_MAX 500
+#define WCD9XXX_V_CS_NO_MIC 5
+#define WCD9XXX_MB_MEAS_DELTA_MAX_MV 80
+#define WCD9XXX_CS_MEAS_DELTA_MAX_MV 12
+
+#define WCD9XXX_ZDET_ZONE_1 80000
+#define WCD9XXX_ZDET_ZONE_2 800000
+
+#define WCD9XXX_IS_IN_ZDET_ZONE_1(x) (x < WCD9XXX_ZDET_ZONE_1 ? 1 : 0)
+#define WCD9XXX_IS_IN_ZDET_ZONE_2(x) ((x > WCD9XXX_ZDET_ZONE_1 && \
+				x < WCD9XXX_ZDET_ZONE_2) ? 1 : 0)
+#define WCD9XXX_IS_IN_ZDET_ZONE_3(x) (x > WCD9XXX_ZDET_ZONE_2 ? 1 : 0)
+#define WCD9XXX_BOX_CAR_AVRG_MIN 1
+#define WCD9XXX_BOX_CAR_AVRG_MAX 10
+
+/*
+ * Need to report LINEIN if H/L impedance
+ * is larger than 5K ohm
+ */
+#define WCD9XXX_LINEIN_THRESHOLD 5000000
+
+static int impedance_detect_en;
+module_param(impedance_detect_en, int,
+			S_IRUGO | S_IWUSR | S_IWGRP);
+MODULE_PARM_DESC(impedance_detect_en, "enable/disable impedance detect");
+static unsigned int z_det_box_car_avg = 1;
+module_param(z_det_box_car_avg, int,
+			S_IRUGO | S_IWUSR | S_IWGRP);
+MODULE_PARM_DESC(z_det_box_car_avg,
+		 "Number of samples for impedance detection");
+
+static bool detect_use_vddio_switch;
+
+struct wcd9xxx_mbhc_detect {
+	u16 dce;
+	u16 sta;
+	u16 hphl_status;
+	bool swap_gnd;
+	bool vddio;
+	bool hwvalue;
+	bool mic_bias;
+	/* internal purpose from here */
+	bool _above_no_mic;
+	bool _below_v_hs_max;
+	s16 _vdces;
+	enum wcd9xxx_mbhc_plug_type _type;
+};
+
+enum meas_type {
+	STA = 0,
+	DCE,
+};
+
+enum {
+	MBHC_USE_HPHL_TRIGGER = 1,
+	MBHC_USE_MB_TRIGGER = 2
+};
+
+/*
+ * Flags to track of PA and DAC state.
+ * PA and DAC should be tracked separately as AUXPGA loopback requires
+ * only PA to be turned on without DAC being on.
+ */
+enum pa_dac_ack_flags {
+	WCD9XXX_HPHL_PA_OFF_ACK = 0,
+	WCD9XXX_HPHR_PA_OFF_ACK,
+	WCD9XXX_HPHL_DAC_OFF_ACK,
+	WCD9XXX_HPHR_DAC_OFF_ACK
+};
+
+enum wcd9xxx_current_v_idx {
+	WCD9XXX_CURRENT_V_INS_H,
+	WCD9XXX_CURRENT_V_INS_HU,
+	WCD9XXX_CURRENT_V_B1_H,
+	WCD9XXX_CURRENT_V_B1_HU,
+	WCD9XXX_CURRENT_V_BR_H,
+};
+
+static int wcd9xxx_detect_impedance(struct wcd9xxx_mbhc *mbhc, uint32_t *zl,
+				    uint32_t *zr);
+static s16 wcd9xxx_get_current_v(struct wcd9xxx_mbhc *mbhc,
+				 const enum wcd9xxx_current_v_idx idx);
+static void wcd9xxx_get_z(struct wcd9xxx_mbhc *mbhc, s16 *dce_z, s16 *sta_z,
+			  struct mbhc_micbias_regs *micb_regs,
+			  bool norel);
+
+static void wcd9xxx_mbhc_calc_thres(struct wcd9xxx_mbhc *mbhc);
+
+static u16 wcd9xxx_codec_v_sta_dce(struct wcd9xxx_mbhc *mbhc,
+				   enum meas_type dce, s16 vin_mv,
+				   bool cs_enable);
+
+static bool wcd9xxx_mbhc_polling(struct wcd9xxx_mbhc *mbhc)
+{
+	return snd_soc_read(mbhc->codec, WCD9XXX_A_CDC_MBHC_EN_CTL) & 0x1;
+}
+
+static void wcd9xxx_turn_onoff_override(struct wcd9xxx_mbhc *mbhc, bool on)
+{
+	struct snd_soc_codec *codec = mbhc->codec;
+	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
+			    0x04, on ? 0x04 : 0x00);
+}
+
+/* called under codec_resource_lock acquisition */
+static void wcd9xxx_pause_hs_polling(struct wcd9xxx_mbhc *mbhc)
+{
+	struct snd_soc_codec *codec = mbhc->codec;
+
+	pr_debug("%s: enter\n", __func__);
+	if (!mbhc->polling_active) {
+		pr_debug("polling not active, nothing to pause\n");
+		return;
+	}
+
+	/* Soft reset MBHC block */
+	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8, 0x8);
+	pr_debug("%s: leave\n", __func__);
+}
+
+/* called under codec_resource_lock acquisition */
+static void wcd9xxx_start_hs_polling(struct wcd9xxx_mbhc *mbhc)
+{
+	struct snd_soc_codec *codec = mbhc->codec;
+	int mbhc_state = mbhc->mbhc_state;
+
+	pr_debug("%s: enter\n", __func__);
+	if (!mbhc->polling_active) {
+		pr_debug("Polling is not active, do not start polling\n");
+		return;
+	}
+
+	/*
+	 * setup internal micbias if codec uses internal micbias for
+	 * headset detection
+	 */
+	if (mbhc->mbhc_cfg->use_int_rbias) {
+		if (mbhc->mbhc_cb && mbhc->mbhc_cb->setup_int_rbias)
+			mbhc->mbhc_cb->setup_int_rbias(codec, true);
+		else
+			pr_err("%s: internal bias requested but codec did not provide callback\n",
+				__func__);
+	}
+
+	snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x04);
+	if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mux_bias_block)
+		mbhc->mbhc_cb->enable_mux_bias_block(codec);
+	else
+		snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
+				    0x80, 0x80);
+
+	if (!mbhc->no_mic_headset_override &&
+	    mbhc_state == MBHC_STATE_POTENTIAL) {
+		pr_debug("%s recovering MBHC state machine\n", __func__);
+		mbhc->mbhc_state = MBHC_STATE_POTENTIAL_RECOVERY;
+		/* set to max button press threshold */
+		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B2_CTL, 0x7F);
+		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B1_CTL, 0xFF);
+		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B4_CTL, 0x7F);
+		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B3_CTL, 0xFF);
+		/* set to max */
+		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B6_CTL, 0x7F);
+		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B5_CTL, 0xFF);
+	}
+
+	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x1);
+	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8, 0x0);
+	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x1);
+	pr_debug("%s: leave\n", __func__);
+}
+
+static int __wcd9xxx_resmgr_get_k_val(struct wcd9xxx_mbhc *mbhc,
+		unsigned int cfilt_mv)
+{
+	return wcd9xxx_resmgr_get_k_val(mbhc->resmgr, cfilt_mv);
+}
+
+/*
+ * called under codec_resource_lock acquisition
+ * return old status
+ */
+static bool __wcd9xxx_switch_micbias(struct wcd9xxx_mbhc *mbhc,
+				     int vddio_switch, bool restartpolling,
+				     bool checkpolling)
+{
+	bool ret;
+	int cfilt_k_val;
+	bool override;
+	struct snd_soc_codec *codec;
+	struct mbhc_internal_cal_data *d = &mbhc->mbhc_data;
+
+	codec = mbhc->codec;
+
+	if (mbhc->micbias_enable) {
+		pr_debug("%s: micbias is already on\n", __func__);
+		ret = mbhc->mbhc_micbias_switched;
+		return ret;
+	}
+
+	ret = mbhc->mbhc_micbias_switched;
+	if (vddio_switch && !mbhc->mbhc_micbias_switched &&
+	    (!checkpolling || mbhc->polling_active)) {
+		if (restartpolling)
+			wcd9xxx_pause_hs_polling(mbhc);
+		override = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_CTL) &
+			   0x04;
+		if (!override)
+			wcd9xxx_turn_onoff_override(mbhc, true);
+
+		snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL,
+				    0x10, 0x00);
+		snd_soc_update_bits(codec, WCD9XXX_A_LDO_H_MODE_1,
+				    0x20, 0x00);
+		/* Adjust threshold if Mic Bias voltage changes */
+		if (d->micb_mv != VDDIO_MICBIAS_MV) {
+			cfilt_k_val = __wcd9xxx_resmgr_get_k_val(mbhc,
+							      VDDIO_MICBIAS_MV);
+			usleep_range(10000, 10100);
+			snd_soc_update_bits(codec,
+					mbhc->mbhc_bias_regs.cfilt_val,
+					0xFC, (cfilt_k_val << 2));
+			usleep_range(10000, 10100);
+			/* Threshods for insertion/removal */
+			snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B1_CTL,
+				      d->v_ins_hu[MBHC_V_IDX_VDDIO] & 0xFF);
+			snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B2_CTL,
+				      (d->v_ins_hu[MBHC_V_IDX_VDDIO] >> 8) &
+				      0xFF);
+
+			if (mbhc->mbhc_state != MBHC_STATE_POTENTIAL_RECOVERY) {
+				/* Threshods for button press */
+				snd_soc_write(codec,
+					WCD9XXX_A_CDC_MBHC_VOLT_B3_CTL,
+					d->v_b1_hu[MBHC_V_IDX_VDDIO] & 0xFF);
+				snd_soc_write(codec,
+					WCD9XXX_A_CDC_MBHC_VOLT_B4_CTL,
+					(d->v_b1_hu[MBHC_V_IDX_VDDIO] >> 8) &
+					0xFF);
+				snd_soc_write(codec,
+					WCD9XXX_A_CDC_MBHC_VOLT_B5_CTL,
+					d->v_b1_h[MBHC_V_IDX_VDDIO] & 0xFF);
+				snd_soc_write(codec,
+					WCD9XXX_A_CDC_MBHC_VOLT_B6_CTL,
+					(d->v_b1_h[MBHC_V_IDX_VDDIO] >> 8) &
+					0xFF);
+				/* Threshods for button release */
+				snd_soc_write(codec,
+					WCD9XXX_A_CDC_MBHC_VOLT_B9_CTL,
+					d->v_brh[MBHC_V_IDX_VDDIO] & 0xFF);
+				snd_soc_write(codec,
+					WCD9XXX_A_CDC_MBHC_VOLT_B10_CTL,
+					(d->v_brh[MBHC_V_IDX_VDDIO] >> 8) &
+					0xFF);
+			}
+			pr_debug("%s: Programmed MBHC thresholds to VDDIO\n",
+				 __func__);
+		}
+
+		/* Enable MIC BIAS Switch to VDDIO */
+		snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg,
+				    0x80, 0x80);
+		snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg,
+				    0x10, 0x00);
+		if (!override)
+			wcd9xxx_turn_onoff_override(mbhc, false);
+		if (restartpolling)
+			wcd9xxx_start_hs_polling(mbhc);
+
+		mbhc->mbhc_micbias_switched = true;
+		pr_debug("%s: VDDIO switch enabled\n", __func__);
+	} else if (!vddio_switch && mbhc->mbhc_micbias_switched) {
+		if ((!checkpolling || mbhc->polling_active) &&
+		    restartpolling)
+			wcd9xxx_pause_hs_polling(mbhc);
+
+			snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL,
+					    0x10, 0x10);
+			snd_soc_update_bits(codec, WCD9XXX_A_LDO_H_MODE_1,
+					    0x20, 0x20);
+		/* Reprogram thresholds */
+		if (d->micb_mv != VDDIO_MICBIAS_MV) {
+			cfilt_k_val =
+			    __wcd9xxx_resmgr_get_k_val(mbhc,
+						     d->micb_mv);
+			snd_soc_update_bits(codec,
+					mbhc->mbhc_bias_regs.cfilt_val,
+					0xFC, (cfilt_k_val << 2));
+			usleep_range(10000, 10100);
+			/* Revert threshods for insertion/removal */
+			snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B1_CTL,
+					d->v_ins_hu[MBHC_V_IDX_CFILT] & 0xFF);
+			snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B2_CTL,
+					(d->v_ins_hu[MBHC_V_IDX_CFILT] >> 8) &
+					0xFF);
+			if (mbhc->mbhc_state != MBHC_STATE_POTENTIAL_RECOVERY) {
+				/* Revert threshods for button press */
+				snd_soc_write(codec,
+					WCD9XXX_A_CDC_MBHC_VOLT_B3_CTL,
+					d->v_b1_hu[MBHC_V_IDX_CFILT] & 0xFF);
+				snd_soc_write(codec,
+					WCD9XXX_A_CDC_MBHC_VOLT_B4_CTL,
+					(d->v_b1_hu[MBHC_V_IDX_CFILT] >> 8) &
+					0xFF);
+				snd_soc_write(codec,
+					WCD9XXX_A_CDC_MBHC_VOLT_B5_CTL,
+					d->v_b1_h[MBHC_V_IDX_CFILT] & 0xFF);
+				snd_soc_write(codec,
+					WCD9XXX_A_CDC_MBHC_VOLT_B6_CTL,
+					(d->v_b1_h[MBHC_V_IDX_CFILT] >> 8) &
+					0xFF);
+				/* Revert threshods for button release */
+				snd_soc_write(codec,
+					WCD9XXX_A_CDC_MBHC_VOLT_B9_CTL,
+					d->v_brh[MBHC_V_IDX_CFILT] & 0xFF);
+				snd_soc_write(codec,
+					WCD9XXX_A_CDC_MBHC_VOLT_B10_CTL,
+					(d->v_brh[MBHC_V_IDX_CFILT] >> 8) &
+					0xFF);
+			}
+			pr_debug("%s: Programmed MBHC thresholds to MICBIAS\n",
+					__func__);
+		}
+
+		/* Disable MIC BIAS Switch to VDDIO */
+		snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg, 0x80,
+				    0x00);
+		snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg, 0x10,
+				    0x00);
+
+		if ((!checkpolling || mbhc->polling_active) && restartpolling)
+			wcd9xxx_start_hs_polling(mbhc);
+
+		mbhc->mbhc_micbias_switched = false;
+		pr_debug("%s: VDDIO switch disabled\n", __func__);
+	}
+
+	return ret;
+}
+
+static void wcd9xxx_switch_micbias(struct wcd9xxx_mbhc *mbhc, int vddio_switch)
+{
+	__wcd9xxx_switch_micbias(mbhc, vddio_switch, true, true);
+}
+
+static s16 wcd9xxx_get_current_v(struct wcd9xxx_mbhc *mbhc,
+				 const enum wcd9xxx_current_v_idx idx)
+{
+	enum mbhc_v_index vidx;
+	s16 ret = -EINVAL;
+
+	if ((mbhc->mbhc_data.micb_mv != VDDIO_MICBIAS_MV) &&
+	    mbhc->mbhc_micbias_switched)
+		vidx = MBHC_V_IDX_VDDIO;
+	else
+		vidx = MBHC_V_IDX_CFILT;
+
+	switch (idx) {
+	case WCD9XXX_CURRENT_V_INS_H:
+		ret = (s16)mbhc->mbhc_data.v_ins_h[vidx];
+		break;
+	case WCD9XXX_CURRENT_V_INS_HU:
+		ret = (s16)mbhc->mbhc_data.v_ins_hu[vidx];
+		break;
+	case WCD9XXX_CURRENT_V_B1_H:
+		ret = (s16)mbhc->mbhc_data.v_b1_h[vidx];
+		break;
+	case WCD9XXX_CURRENT_V_B1_HU:
+		ret = (s16)mbhc->mbhc_data.v_b1_hu[vidx];
+		break;
+	case WCD9XXX_CURRENT_V_BR_H:
+		ret = (s16)mbhc->mbhc_data.v_brh[vidx];
+		break;
+	}
+
+	return ret;
+}
+
+void *wcd9xxx_mbhc_cal_btn_det_mp(
+			    const struct wcd9xxx_mbhc_btn_detect_cfg *btn_det,
+			    const enum wcd9xxx_mbhc_btn_det_mem mem)
+{
+	void *ret = (void *)&btn_det->_v_btn_low;
+
+	switch (mem) {
+	case MBHC_BTN_DET_GAIN:
+		ret += sizeof(btn_det->_n_cic);
+	case MBHC_BTN_DET_N_CIC:
+		ret += sizeof(btn_det->_n_ready);
+	case MBHC_BTN_DET_N_READY:
+		ret += sizeof(btn_det->_v_btn_high[0]) * btn_det->num_btn;
+	case MBHC_BTN_DET_V_BTN_HIGH:
+		ret += sizeof(btn_det->_v_btn_low[0]) * btn_det->num_btn;
+	case MBHC_BTN_DET_V_BTN_LOW:
+		/* do nothing */
+		break;
+	default:
+		ret = NULL;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(wcd9xxx_mbhc_cal_btn_det_mp);
+
+static void wcd9xxx_calibrate_hs_polling(struct wcd9xxx_mbhc *mbhc)
+{
+	struct snd_soc_codec *codec = mbhc->codec;
+	const s16 v_ins_hu = wcd9xxx_get_current_v(mbhc,
+						   WCD9XXX_CURRENT_V_INS_HU);
+	const s16 v_b1_hu = wcd9xxx_get_current_v(mbhc,
+						  WCD9XXX_CURRENT_V_B1_HU);
+	const s16 v_b1_h = wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_B1_H);
+	const s16 v_brh = wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_BR_H);
+
+	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B1_CTL, v_ins_hu & 0xFF);
+	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B2_CTL,
+		      (v_ins_hu >> 8) & 0xFF);
+
+	if (mbhc->mbhc_state != MBHC_STATE_POTENTIAL_RECOVERY) {
+		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B3_CTL, v_b1_hu &
+				0xFF);
+		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B4_CTL,
+				(v_b1_hu >> 8) & 0xFF);
+		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B5_CTL, v_b1_h &
+				0xFF);
+		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B6_CTL,
+				(v_b1_h >> 8) & 0xFF);
+		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B9_CTL, v_brh &
+				0xFF);
+		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B10_CTL,
+				(v_brh >> 8) & 0xFF);
+		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B11_CTL,
+				mbhc->mbhc_data.v_brl & 0xFF);
+		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B12_CTL,
+				(mbhc->mbhc_data.v_brl >> 8) & 0xFF);
+	}
+}
+
+static void wcd9xxx_codec_switch_cfilt_mode(struct wcd9xxx_mbhc *mbhc,
+					    bool fast)
+{
+	struct snd_soc_codec *codec = mbhc->codec;
+	struct wcd9xxx_cfilt_mode cfilt_mode;
+
+	if (mbhc->mbhc_cb && mbhc->mbhc_cb->switch_cfilt_mode) {
+		cfilt_mode = mbhc->mbhc_cb->switch_cfilt_mode(mbhc, fast);
+	} else {
+		if (fast)
+			cfilt_mode.reg_mode_val = WCD9XXX_CFILT_FAST_MODE;
+		else
+			cfilt_mode.reg_mode_val = WCD9XXX_CFILT_SLOW_MODE;
+
+		cfilt_mode.reg_mask = 0x40;
+		cfilt_mode.cur_mode_val =
+		    snd_soc_read(codec, mbhc->mbhc_bias_regs.cfilt_ctl) & 0x40;
+	}
+
+	if (cfilt_mode.cur_mode_val
+			!= cfilt_mode.reg_mode_val) {
+		if (mbhc->polling_active && wcd9xxx_mbhc_polling(mbhc))
+			wcd9xxx_pause_hs_polling(mbhc);
+		snd_soc_update_bits(codec,
+				    mbhc->mbhc_bias_regs.cfilt_ctl,
+					cfilt_mode.reg_mask,
+					cfilt_mode.reg_mode_val);
+		if (mbhc->polling_active && wcd9xxx_mbhc_polling(mbhc))
+			wcd9xxx_start_hs_polling(mbhc);
+		pr_debug("%s: CFILT mode change (%x to %x)\n", __func__,
+			cfilt_mode.cur_mode_val,
+			cfilt_mode.reg_mode_val);
+	} else {
+		pr_debug("%s: CFILT Value is already %x\n",
+			 __func__, cfilt_mode.cur_mode_val);
+	}
+}
+
+static void wcd9xxx_jack_report(struct wcd9xxx_mbhc *mbhc,
+				struct snd_soc_jack *jack, int status, int mask)
+{
+	if (jack == &mbhc->headset_jack) {
+		wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr,
+						WCD9XXX_COND_HPH_MIC,
+						status & SND_JACK_MICROPHONE);
+		wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr,
+						WCD9XXX_COND_HPH,
+						status & SND_JACK_HEADPHONE);
+	}
+
+	snd_soc_jack_report(jack, status, mask);
+}
+
+static void __hphocp_off_report(struct wcd9xxx_mbhc *mbhc, u32 jack_status,
+				int irq)
+{
+	struct snd_soc_codec *codec;
+
+	pr_debug("%s: clear ocp status %x\n", __func__, jack_status);
+	codec = mbhc->codec;
+	if (mbhc->hph_status & jack_status) {
+		mbhc->hph_status &= ~jack_status;
+		wcd9xxx_jack_report(mbhc, &mbhc->headset_jack,
+				    mbhc->hph_status, WCD9XXX_JACK_MASK);
+		snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_OCP_CTL, 0x10,
+				    0x00);
+		snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_OCP_CTL, 0x10,
+				    0x10);
+		/*
+		 * reset retry counter as PA is turned off signifying
+		 * start of new OCP detection session
+		 */
+		if (mbhc->intr_ids->hph_left_ocp)
+			mbhc->hphlocp_cnt = 0;
+		else
+			mbhc->hphrocp_cnt = 0;
+		wcd9xxx_enable_irq(mbhc->resmgr->core_res, irq);
+	}
+}
+
+static void hphrocp_off_report(struct wcd9xxx_mbhc *mbhc, u32 jack_status)
+{
+	__hphocp_off_report(mbhc, SND_JACK_OC_HPHR,
+			    mbhc->intr_ids->hph_right_ocp);
+}
+
+static void hphlocp_off_report(struct wcd9xxx_mbhc *mbhc, u32 jack_status)
+{
+	__hphocp_off_report(mbhc, SND_JACK_OC_HPHL,
+			    mbhc->intr_ids->hph_left_ocp);
+}
+
+static void wcd9xxx_get_mbhc_micbias_regs(struct wcd9xxx_mbhc *mbhc,
+				enum wcd9xxx_mbhc_micbias_type mb_type)
+{
+	unsigned int cfilt;
+	struct wcd9xxx_micbias_setting *micbias_pdata =
+		mbhc->resmgr->micbias_pdata;
+	struct mbhc_micbias_regs *micbias_regs;
+	enum wcd9xxx_micbias_num mb_num;
+
+	if (mb_type == MBHC_ANC_MIC_MB) {
+		micbias_regs = &mbhc->mbhc_anc_bias_regs;
+		mb_num = mbhc->mbhc_cfg->anc_micbias;
+	} else {
+		micbias_regs = &mbhc->mbhc_bias_regs;
+		mb_num = mbhc->mbhc_cfg->micbias;
+	}
+
+	switch (mb_num) {
+	case MBHC_MICBIAS1:
+		cfilt = micbias_pdata->bias1_cfilt_sel;
+		micbias_regs->mbhc_reg = WCD9XXX_A_MICB_1_MBHC;
+		micbias_regs->int_rbias = WCD9XXX_A_MICB_1_INT_RBIAS;
+		micbias_regs->ctl_reg = WCD9XXX_A_MICB_1_CTL;
+		break;
+	case MBHC_MICBIAS2:
+		cfilt = micbias_pdata->bias2_cfilt_sel;
+		micbias_regs->mbhc_reg = WCD9XXX_A_MICB_2_MBHC;
+		micbias_regs->int_rbias = WCD9XXX_A_MICB_2_INT_RBIAS;
+		micbias_regs->ctl_reg = WCD9XXX_A_MICB_2_CTL;
+		break;
+	case MBHC_MICBIAS3:
+		cfilt = micbias_pdata->bias3_cfilt_sel;
+		micbias_regs->mbhc_reg = WCD9XXX_A_MICB_3_MBHC;
+		micbias_regs->int_rbias = WCD9XXX_A_MICB_3_INT_RBIAS;
+		micbias_regs->ctl_reg = WCD9XXX_A_MICB_3_CTL;
+		break;
+	case MBHC_MICBIAS4:
+		cfilt = micbias_pdata->bias4_cfilt_sel;
+		micbias_regs->mbhc_reg = mbhc->resmgr->reg_addr->micb_4_mbhc;
+		micbias_regs->int_rbias =
+		    mbhc->resmgr->reg_addr->micb_4_int_rbias;
+		micbias_regs->ctl_reg = mbhc->resmgr->reg_addr->micb_4_ctl;
+		break;
+	default:
+		/* Should never reach here */
+		pr_err("%s: Invalid MIC BIAS for MBHC\n", __func__);
+		return;
+	}
+
+	micbias_regs->cfilt_sel = cfilt;
+
+	switch (cfilt) {
+	case WCD9XXX_CFILT1_SEL:
+		micbias_regs->cfilt_val = WCD9XXX_A_MICB_CFILT_1_VAL;
+		micbias_regs->cfilt_ctl = WCD9XXX_A_MICB_CFILT_1_CTL;
+		break;
+	case WCD9XXX_CFILT2_SEL:
+		micbias_regs->cfilt_val = WCD9XXX_A_MICB_CFILT_2_VAL;
+		micbias_regs->cfilt_ctl = WCD9XXX_A_MICB_CFILT_2_CTL;
+		break;
+	case WCD9XXX_CFILT3_SEL:
+		micbias_regs->cfilt_val = WCD9XXX_A_MICB_CFILT_3_VAL;
+		micbias_regs->cfilt_ctl = WCD9XXX_A_MICB_CFILT_3_CTL;
+		break;
+	}
+
+	if (mb_type == MBHC_PRIMARY_MIC_MB) {
+		switch (cfilt) {
+		case WCD9XXX_CFILT1_SEL:
+			mbhc->mbhc_data.micb_mv = micbias_pdata->cfilt1_mv;
+			break;
+		case WCD9XXX_CFILT2_SEL:
+			mbhc->mbhc_data.micb_mv = micbias_pdata->cfilt2_mv;
+			break;
+		case WCD9XXX_CFILT3_SEL:
+			mbhc->mbhc_data.micb_mv = micbias_pdata->cfilt3_mv;
+			break;
+		}
+	}
+
+}
+
+static void wcd9xxx_clr_and_turnon_hph_padac(struct wcd9xxx_mbhc *mbhc)
+{
+	bool pa_turned_on = false;
+	struct snd_soc_codec *codec = mbhc->codec;
+	u8 wg_time;
+
+	wg_time = snd_soc_read(codec, WCD9XXX_A_RX_HPH_CNP_WG_TIME);
+	wg_time += 1;
+
+	if (test_and_clear_bit(WCD9XXX_HPHR_DAC_OFF_ACK,
+			       &mbhc->hph_pa_dac_state)) {
+		pr_debug("%s: HPHR clear flag and enable DAC\n", __func__);
+		snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_R_DAC_CTL,
+				    0xC0, 0xC0);
+	}
+	if (test_and_clear_bit(WCD9XXX_HPHL_DAC_OFF_ACK,
+				&mbhc->hph_pa_dac_state)) {
+		pr_debug("%s: HPHL clear flag and enable DAC\n", __func__);
+		snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_L_DAC_CTL,
+				    0x80, 0x80);
+	}
+
+	if (test_and_clear_bit(WCD9XXX_HPHR_PA_OFF_ACK,
+			       &mbhc->hph_pa_dac_state)) {
+		pr_debug("%s: HPHR clear flag and enable PA\n", __func__);
+		snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_CNP_EN, 0x10,
+				    1 << 4);
+		pa_turned_on = true;
+	}
+	if (test_and_clear_bit(WCD9XXX_HPHL_PA_OFF_ACK,
+			       &mbhc->hph_pa_dac_state)) {
+		pr_debug("%s: HPHL clear flag and enable PA\n", __func__);
+		snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_CNP_EN, 0x20, 1
+				    << 5);
+		pa_turned_on = true;
+	}
+
+	if (pa_turned_on) {
+		pr_debug("%s: PA was turned on by MBHC and not by DAPM\n",
+			 __func__);
+		usleep_range(wg_time * 1000, wg_time * 1000 + 50);
+	}
+}
+
+static int wcd9xxx_cancel_btn_work(struct wcd9xxx_mbhc *mbhc)
+{
+	int r;
+	r = cancel_delayed_work_sync(&mbhc->mbhc_btn_dwork);
+	if (r)
+		/* if scheduled mbhc.mbhc_btn_dwork is canceled from here,
+		 * we have to unlock from here instead btn_work */
+		wcd9xxx_unlock_sleep(mbhc->resmgr->core_res);
+	return r;
+}
+
+static bool wcd9xxx_is_hph_dac_on(struct snd_soc_codec *codec, int left)
+{
+	u8 hph_reg_val = 0;
+	if (left)
+		hph_reg_val = snd_soc_read(codec, WCD9XXX_A_RX_HPH_L_DAC_CTL);
+	else
+		hph_reg_val = snd_soc_read(codec, WCD9XXX_A_RX_HPH_R_DAC_CTL);
+
+	return (hph_reg_val & 0xC0) ? true : false;
+}
+
+static bool wcd9xxx_is_hph_pa_on(struct snd_soc_codec *codec)
+{
+	u8 hph_reg_val = 0;
+	hph_reg_val = snd_soc_read(codec, WCD9XXX_A_RX_HPH_CNP_EN);
+
+	return (hph_reg_val & 0x30) ? true : false;
+}
+
+/* called under codec_resource_lock acquisition */
+static void wcd9xxx_set_and_turnoff_hph_padac(struct wcd9xxx_mbhc *mbhc)
+{
+	u8 wg_time;
+	struct snd_soc_codec *codec = mbhc->codec;
+
+	wg_time = snd_soc_read(codec, WCD9XXX_A_RX_HPH_CNP_WG_TIME);
+	wg_time += 1;
+
+	/* If headphone PA is on, check if userspace receives
+	 * removal event to sync-up PA's state */
+	if (wcd9xxx_is_hph_pa_on(codec)) {
+		pr_debug("%s PA is on, setting PA_OFF_ACK\n", __func__);
+		set_bit(WCD9XXX_HPHL_PA_OFF_ACK, &mbhc->hph_pa_dac_state);
+		set_bit(WCD9XXX_HPHR_PA_OFF_ACK, &mbhc->hph_pa_dac_state);
+	} else {
+		pr_debug("%s PA is off\n", __func__);
+	}
+
+	if (wcd9xxx_is_hph_dac_on(codec, 1))
+		set_bit(WCD9XXX_HPHL_DAC_OFF_ACK, &mbhc->hph_pa_dac_state);
+	if (wcd9xxx_is_hph_dac_on(codec, 0))
+		set_bit(WCD9XXX_HPHR_DAC_OFF_ACK, &mbhc->hph_pa_dac_state);
+
+	snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_CNP_EN, 0x30, 0x00);
+	snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_L_DAC_CTL, 0x80, 0x00);
+	snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_R_DAC_CTL, 0xC0, 0x00);
+	usleep_range(wg_time * 1000, wg_time * 1000 + 50);
+}
+
+static void wcd9xxx_insert_detect_setup(struct wcd9xxx_mbhc *mbhc, bool ins)
+{
+	if (!mbhc->mbhc_cfg->insert_detect)
+		return;
+	pr_debug("%s: Setting up %s detection\n", __func__,
+		 ins ? "insert" : "removal");
+	/* Disable detection to avoid glitch */
+	snd_soc_update_bits(mbhc->codec, WCD9XXX_A_MBHC_INSERT_DETECT, 1, 0);
+	if (mbhc->mbhc_cfg->gpio_level_insert)
+		snd_soc_write(mbhc->codec, WCD9XXX_A_MBHC_INSERT_DETECT,
+			      (0x68 | (ins ? (1 << 1) : 0)));
+	else
+		snd_soc_write(mbhc->codec, WCD9XXX_A_MBHC_INSERT_DETECT,
+			      (0x6C | (ins ? (1 << 1) : 0)));
+	/* Re-enable detection */
+	snd_soc_update_bits(mbhc->codec, WCD9XXX_A_MBHC_INSERT_DETECT, 1, 1);
+}
+
+/* called under codec_resource_lock acquisition */
+static void wcd9xxx_report_plug(struct wcd9xxx_mbhc *mbhc, int insertion,
+				enum snd_jack_types jack_type)
+{
+	WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
+
+	pr_debug("%s: enter insertion %d hph_status %x\n",
+		 __func__, insertion, mbhc->hph_status);
+	if (!insertion) {
+		/* Report removal */
+		mbhc->hph_status &= ~jack_type;
+		/*
+		 * cancel possibly scheduled btn work and
+		 * report release if we reported button press
+		 */
+		if (wcd9xxx_cancel_btn_work(mbhc))
+			pr_debug("%s: button press is canceled\n", __func__);
+		else if (mbhc->buttons_pressed) {
+			pr_debug("%s: release of button press%d\n",
+				 __func__, jack_type);
+			wcd9xxx_jack_report(mbhc, &mbhc->button_jack, 0,
+					    mbhc->buttons_pressed);
+			mbhc->buttons_pressed &=
+				~WCD9XXX_JACK_BUTTON_MASK;
+		}
+
+		if (mbhc->micbias_enable && mbhc->micbias_enable_cb) {
+			pr_debug("%s: Disabling micbias\n", __func__);
+			mbhc->micbias_enable = false;
+			mbhc->micbias_enable_cb(mbhc->codec, false,
+						mbhc->mbhc_cfg->micbias);
+		}
+		mbhc->zl = mbhc->zr = 0;
+		mbhc->hph_type = MBHC_HPH_NONE;
+		pr_debug("%s: Reporting removal %d(%x)\n", __func__,
+			 jack_type, mbhc->hph_status);
+		wcd9xxx_jack_report(mbhc, &mbhc->headset_jack, mbhc->hph_status,
+				    WCD9XXX_JACK_MASK);
+		wcd9xxx_set_and_turnoff_hph_padac(mbhc);
+		hphrocp_off_report(mbhc, SND_JACK_OC_HPHR);
+		hphlocp_off_report(mbhc, SND_JACK_OC_HPHL);
+		mbhc->current_plug = PLUG_TYPE_NONE;
+		mbhc->polling_active = false;
+		if (mbhc->mbhc_cb && mbhc->mbhc_cb->hph_auto_pulldown_ctrl)
+			mbhc->mbhc_cb->hph_auto_pulldown_ctrl(mbhc->codec,
+								false);
+	} else {
+		/*
+		 * Report removal of current jack type.
+		 * Headphone to headset shouldn't report headphone
+		 * removal.
+		 */
+		if (mbhc->mbhc_cfg->detect_extn_cable &&
+		    !(mbhc->current_plug == PLUG_TYPE_HEADPHONE &&
+		      jack_type == SND_JACK_HEADSET) &&
+		    (mbhc->hph_status && mbhc->hph_status != jack_type)) {
+			if (mbhc->micbias_enable && mbhc->micbias_enable_cb &&
+			    mbhc->hph_status == SND_JACK_HEADSET) {
+				pr_debug("%s: Disabling micbias\n", __func__);
+				mbhc->micbias_enable = false;
+				mbhc->micbias_enable_cb(mbhc->codec, false,
+						mbhc->mbhc_cfg->micbias);
+			}
+
+			pr_debug("%s: Reporting removal (%x)\n",
+				 __func__, mbhc->hph_status);
+			mbhc->zl = mbhc->zr = 0;
+			wcd9xxx_jack_report(mbhc, &mbhc->headset_jack,
+					    0, WCD9XXX_JACK_MASK);
+			mbhc->hph_status &= ~(SND_JACK_HEADSET |
+						SND_JACK_LINEOUT |
+						SND_JACK_ANC_HEADPHONE |
+						SND_JACK_UNSUPPORTED);
+			if (mbhc->mbhc_cb &&
+				 mbhc->mbhc_cb->hph_auto_pulldown_ctrl)
+				mbhc->mbhc_cb->hph_auto_pulldown_ctrl(
+								mbhc->codec,
+								false);
+		}
+
+		/* Report insertion */
+		if (jack_type == SND_JACK_HEADPHONE) {
+			mbhc->current_plug = PLUG_TYPE_HEADPHONE;
+		} else if (jack_type == SND_JACK_UNSUPPORTED) {
+			mbhc->current_plug = PLUG_TYPE_GND_MIC_SWAP;
+		} else if (jack_type == SND_JACK_HEADSET) {
+			mbhc->polling_active = BUTTON_POLLING_SUPPORTED;
+			mbhc->current_plug = PLUG_TYPE_HEADSET;
+			mbhc->update_z = true;
+		} else if (jack_type == SND_JACK_LINEOUT) {
+			mbhc->current_plug = PLUG_TYPE_HIGH_HPH;
+		} else if (jack_type == SND_JACK_ANC_HEADPHONE) {
+			mbhc->polling_active = BUTTON_POLLING_SUPPORTED;
+			mbhc->current_plug = PLUG_TYPE_ANC_HEADPHONE;
+		}
+
+		if (mbhc->impedance_detect && impedance_detect_en) {
+			wcd9xxx_detect_impedance(mbhc,
+					&mbhc->zl, &mbhc->zr);
+			if ((mbhc->zl > WCD9XXX_LINEIN_THRESHOLD) &&
+				(mbhc->zr > WCD9XXX_LINEIN_THRESHOLD)) {
+				jack_type = SND_JACK_LINEOUT;
+				mbhc->current_plug = PLUG_TYPE_HIGH_HPH;
+				pr_debug("%s: Replace with SND_JACK_LINEOUT\n",
+				__func__);
+			}
+		}
+
+		mbhc->hph_status |= jack_type;
+
+		if (mbhc->micbias_enable && mbhc->micbias_enable_cb) {
+			pr_debug("%s: Enabling micbias\n", __func__);
+			mbhc->micbias_enable_cb(mbhc->codec, true,
+						mbhc->mbhc_cfg->micbias);
+		}
+
+		pr_debug("%s: Reporting insertion %d(%x)\n", __func__,
+			 jack_type, mbhc->hph_status);
+		wcd9xxx_jack_report(mbhc, &mbhc->headset_jack,
+				    (mbhc->hph_status | SND_JACK_MECHANICAL),
+				    WCD9XXX_JACK_MASK);
+		/*
+		 * if PA is already on, switch micbias
+		 * source to VDDIO
+		 */
+		if (((mbhc->current_plug == PLUG_TYPE_HEADSET) ||
+		     (mbhc->current_plug == PLUG_TYPE_ANC_HEADPHONE)) &&
+		    ((mbhc->event_state & (1 << MBHC_EVENT_PA_HPHL |
+			1 << MBHC_EVENT_PA_HPHR))))
+			__wcd9xxx_switch_micbias(mbhc, 1, false,
+						 false);
+		wcd9xxx_clr_and_turnon_hph_padac(mbhc);
+	}
+	/* Setup insert detect */
+	wcd9xxx_insert_detect_setup(mbhc, !insertion);
+
+	pr_debug("%s: leave hph_status %x\n", __func__, mbhc->hph_status);
+}
+
+/* should be called under interrupt context that hold suspend */
+static void wcd9xxx_schedule_hs_detect_plug(struct wcd9xxx_mbhc *mbhc,
+					    struct work_struct *work)
+{
+	pr_debug("%s: scheduling wcd9xxx_correct_swch_plug\n", __func__);
+	WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
+	mbhc->hs_detect_work_stop = false;
+	wcd9xxx_lock_sleep(mbhc->resmgr->core_res);
+	schedule_work(work);
+}
+
+/* called under codec_resource_lock acquisition */
+static void wcd9xxx_cancel_hs_detect_plug(struct wcd9xxx_mbhc *mbhc,
+					 struct work_struct *work)
+{
+	pr_debug("%s: Canceling correct_plug_swch\n", __func__);
+	WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
+	mbhc->hs_detect_work_stop = true;
+	wmb();
+	WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
+	if (cancel_work_sync(work)) {
+		pr_debug("%s: correct_plug_swch is canceled\n",
+			 __func__);
+		wcd9xxx_unlock_sleep(mbhc->resmgr->core_res);
+	}
+	WCD9XXX_BCL_LOCK(mbhc->resmgr);
+}
+
+static s16 scale_v_micb_vddio(struct wcd9xxx_mbhc *mbhc, int v, bool tovddio)
+{
+	int r;
+	int vddio_k, mb_k;
+	vddio_k = __wcd9xxx_resmgr_get_k_val(mbhc, VDDIO_MICBIAS_MV);
+	mb_k = __wcd9xxx_resmgr_get_k_val(mbhc, mbhc->mbhc_data.micb_mv);
+	if (tovddio)
+		r = v * (vddio_k + 4) / (mb_k + 4);
+	else
+		r = v * (mb_k + 4) / (vddio_k + 4);
+	return r;
+}
+
+static s16 wcd9xxx_get_current_v_hs_max(struct wcd9xxx_mbhc *mbhc)
+{
+	s16 v_hs_max;
+	struct wcd9xxx_mbhc_plug_type_cfg *plug_type;
+
+	plug_type = WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration);
+	if ((mbhc->mbhc_data.micb_mv != VDDIO_MICBIAS_MV) &&
+	    mbhc->mbhc_micbias_switched)
+		v_hs_max = scale_v_micb_vddio(mbhc, plug_type->v_hs_max, true);
+	else
+		v_hs_max = plug_type->v_hs_max;
+	return v_hs_max;
+}
+
+static short wcd9xxx_read_sta_result(struct snd_soc_codec *codec)
+{
+	u8 bias_msb, bias_lsb;
+	short bias_value;
+
+	bias_msb = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B3_STATUS);
+	bias_lsb = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B2_STATUS);
+	bias_value = (bias_msb << 8) | bias_lsb;
+	return bias_value;
+}
+
+static short wcd9xxx_read_dce_result(struct snd_soc_codec *codec)
+{
+	u8 bias_msb, bias_lsb;
+	short bias_value;
+
+	bias_msb = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B5_STATUS);
+	bias_lsb = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B4_STATUS);
+	bias_value = (bias_msb << 8) | bias_lsb;
+	return bias_value;
+}
+
+static void wcd9xxx_turn_onoff_rel_detection(struct snd_soc_codec *codec,
+					     bool on)
+{
+	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x02, on << 1);
+}
+
+static short __wcd9xxx_codec_sta_dce(struct wcd9xxx_mbhc *mbhc, int dce,
+				     bool override_bypass, bool noreldetection)
+{
+	short bias_value;
+	struct snd_soc_codec *codec = mbhc->codec;
+
+	wcd9xxx_disable_irq(mbhc->resmgr->core_res,
+			    mbhc->intr_ids->dce_est_complete);
+	if (noreldetection)
+		wcd9xxx_turn_onoff_rel_detection(codec, false);
+
+	if (mbhc->mbhc_cfg->do_recalibration)
+		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x2,
+				    0x0);
+	/* Turn on the override */
+	if (!override_bypass)
+		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x4, 0x4);
+	if (dce) {
+		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8,
+				    0x8);
+		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x4);
+		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8,
+				    0x0);
+		if (mbhc->mbhc_cfg->do_recalibration)
+			snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL,
+					    0x2, 0x2);
+		usleep_range(mbhc->mbhc_data.t_sta_dce,
+			     mbhc->mbhc_data.t_sta_dce + 50);
+		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x4);
+		usleep_range(mbhc->mbhc_data.t_dce, mbhc->mbhc_data.t_dce + 50);
+		bias_value = wcd9xxx_read_dce_result(codec);
+	} else {
+		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8,
+				    0x8);
+		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x2);
+		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8,
+				    0x0);
+		if (mbhc->mbhc_cfg->do_recalibration)
+			snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL,
+					    0x2, 0x2);
+		usleep_range(mbhc->mbhc_data.t_sta_dce,
+			     mbhc->mbhc_data.t_sta_dce + 50);
+		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x2);
+		usleep_range(mbhc->mbhc_data.t_sta,
+			     mbhc->mbhc_data.t_sta + 50);
+		bias_value = wcd9xxx_read_sta_result(codec);
+		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8,
+				    0x8);
+		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x0);
+	}
+	/* Turn off the override after measuring mic voltage */
+	if (!override_bypass)
+		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x04,
+				    0x00);
+
+	if (noreldetection)
+		wcd9xxx_turn_onoff_rel_detection(codec, true);
+	wcd9xxx_enable_irq(mbhc->resmgr->core_res,
+			   mbhc->intr_ids->dce_est_complete);
+
+	return bias_value;
+}
+
+static short wcd9xxx_codec_sta_dce(struct wcd9xxx_mbhc *mbhc, int dce,
+				   bool norel)
+{
+	bool override_bypass;
+
+	/* Bypass override if it is already enabled */
+	override_bypass = (snd_soc_read(mbhc->codec,
+					WCD9XXX_A_CDC_MBHC_B1_CTL) &
+			   0x04) ? true : false;
+
+	return __wcd9xxx_codec_sta_dce(mbhc, dce, override_bypass, norel);
+}
+
+static s32 __wcd9xxx_codec_sta_dce_v(struct wcd9xxx_mbhc *mbhc, s8 dce,
+				     u16 bias_value, s16 z, u32 micb_mv)
+{
+	s16 value, mb;
+	s32 mv = 0;
+
+	value = bias_value;
+	if (dce) {
+		mb = (mbhc->mbhc_data.dce_mb);
+		if (mb - z)
+			mv = (value - z) * (s32)micb_mv / (mb - z);
+	} else {
+		mb = (mbhc->mbhc_data.sta_mb);
+		if (mb - z)
+			mv = (value - z) * (s32)micb_mv / (mb - z);
+	}
+
+	return mv;
+}
+
+static s32 wcd9xxx_codec_sta_dce_v(struct wcd9xxx_mbhc *mbhc, s8 dce,
+				   u16 bias_value)
+{
+	s16 z;
+	z = dce ? (s16)mbhc->mbhc_data.dce_z : (s16)mbhc->mbhc_data.sta_z;
+	return __wcd9xxx_codec_sta_dce_v(mbhc, dce, bias_value, z,
+					 mbhc->mbhc_data.micb_mv);
+}
+
+/* To enable/disable bandgap and RC oscillator */
+static void wcd9xxx_mbhc_ctrl_clk_bandgap(struct wcd9xxx_mbhc *mbhc,
+		bool enable)
+{
+	if (enable) {
+		WCD9XXX_BG_CLK_LOCK(mbhc->resmgr);
+		wcd9xxx_resmgr_get_bandgap(mbhc->resmgr,
+				WCD9XXX_BANDGAP_AUDIO_MODE);
+		if (mbhc->mbhc_cb && mbhc->mbhc_cb->codec_rco_ctrl) {
+			WCD9XXX_BG_CLK_UNLOCK(mbhc->resmgr);
+			mbhc->mbhc_cb->codec_rco_ctrl(mbhc->codec, true);
+		} else {
+			wcd9xxx_resmgr_get_clk_block(mbhc->resmgr,
+					WCD9XXX_CLK_RCO);
+			WCD9XXX_BG_CLK_UNLOCK(mbhc->resmgr);
+		}
+	} else {
+		if (mbhc->mbhc_cb && mbhc->mbhc_cb->codec_rco_ctrl) {
+			mbhc->mbhc_cb->codec_rco_ctrl(mbhc->codec, false);
+			WCD9XXX_BG_CLK_LOCK(mbhc->resmgr);
+		} else {
+			WCD9XXX_BG_CLK_LOCK(mbhc->resmgr);
+			wcd9xxx_resmgr_put_clk_block(mbhc->resmgr,
+					WCD9XXX_CLK_RCO);
+		}
+		wcd9xxx_resmgr_put_bandgap(mbhc->resmgr,
+				WCD9XXX_BANDGAP_AUDIO_MODE);
+		WCD9XXX_BG_CLK_UNLOCK(mbhc->resmgr);
+	}
+}
+
+/* called only from interrupt which is under codec_resource_lock acquisition */
+static short wcd9xxx_mbhc_setup_hs_polling(struct wcd9xxx_mbhc *mbhc,
+				struct mbhc_micbias_regs *mbhc_micb_regs,
+				bool is_cs_enable)
+{
+	struct snd_soc_codec *codec = mbhc->codec;
+	short bias_value;
+	u8 cfilt_mode;
+
+	WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
+
+	pr_debug("%s: enter\n", __func__);
+	if (!mbhc->mbhc_cfg->calibration) {
+		pr_err("%s: Error, no calibration exists\n", __func__);
+		return -ENODEV;
+	}
+
+	/* Enable external voltage source to micbias if present */
+	if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mb_source)
+		mbhc->mbhc_cb->enable_mb_source(codec, true, true);
+
+	/*
+	 * setup internal micbias if codec uses internal micbias for
+	 * headset detection
+	 */
+	if (mbhc->mbhc_cfg->use_int_rbias) {
+		if (mbhc->mbhc_cb && mbhc->mbhc_cb->setup_int_rbias)
+			mbhc->mbhc_cb->setup_int_rbias(codec, true);
+	else
+		pr_err("%s: internal bias requested but codec did not provide callback\n",
+			__func__);
+	}
+
+	snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x05, 0x01);
+
+	/* Make sure CFILT is in fast mode, save current mode */
+	cfilt_mode = snd_soc_read(codec, mbhc_micb_regs->cfilt_ctl);
+	if (mbhc->mbhc_cb && mbhc->mbhc_cb->cfilt_fast_mode)
+		mbhc->mbhc_cb->cfilt_fast_mode(codec, mbhc);
+	else
+		snd_soc_update_bits(codec, mbhc_micb_regs->cfilt_ctl,
+				    0x70, 0x00);
+
+	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x2, 0x2);
+	snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
+		      mbhc->scaling_mux_in);
+	pr_debug("%s:  scaling_mux_input: %d\n", __func__,
+						 mbhc->scaling_mux_in);
+
+	if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mux_bias_block)
+		mbhc->mbhc_cb->enable_mux_bias_block(codec);
+	else
+		snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
+				    0x80, 0x80);
+
+	snd_soc_update_bits(codec, WCD9XXX_A_TX_7_MBHC_EN, 0x80, 0x80);
+	snd_soc_update_bits(codec, WCD9XXX_A_TX_7_MBHC_EN, 0x1F, 0x1C);
+	snd_soc_update_bits(codec, WCD9XXX_A_TX_7_MBHC_TEST_CTL, 0x40, 0x40);
+
+	snd_soc_update_bits(codec, WCD9XXX_A_TX_7_MBHC_EN, 0x80, 0x00);
+	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8, 0x8);
+	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8, 0x00);
+
+	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x2, 0x2);
+	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8, 0x8);
+
+	if (!mbhc->mbhc_cfg->do_recalibration) {
+		if (!is_cs_enable)
+			wcd9xxx_calibrate_hs_polling(mbhc);
+	}
+
+	/* don't flip override */
+	bias_value = __wcd9xxx_codec_sta_dce(mbhc, 1, true, true);
+	snd_soc_write(codec, mbhc_micb_regs->cfilt_ctl, cfilt_mode);
+	snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x13, 0x00);
+
+	return bias_value;
+}
+
+static void wcd9xxx_recalibrate(struct wcd9xxx_mbhc *mbhc,
+				struct mbhc_micbias_regs *mbhc_micb_regs,
+				bool is_cs_enable)
+{
+	struct snd_soc_codec *codec = mbhc->codec;
+	s16 reg;
+	int change;
+	struct wcd9xxx_mbhc_btn_detect_cfg *btn_det;
+	s16 sta_z = 0, dce_z = 0;
+
+	btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(mbhc->mbhc_cfg->calibration);
+
+	if (mbhc->mbhc_cfg->do_recalibration) {
+		/* recalibrate dce_z and sta_z */
+		reg = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_CTL);
+		change = snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
+					     0x78, btn_det->mbhc_nsc << 3);
+		wcd9xxx_get_z(mbhc, &dce_z, &sta_z, mbhc_micb_regs, true);
+		if (change)
+			snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, reg);
+		if (dce_z && sta_z) {
+			pr_debug("%s: sta_z 0x%x -> 0x%x, dce_z 0x%x -> 0x%x\n",
+				 __func__,
+				 mbhc->mbhc_data.sta_z, sta_z & 0xffff,
+				 mbhc->mbhc_data.dce_z, dce_z & 0xffff);
+			mbhc->mbhc_data.dce_z = dce_z;
+			mbhc->mbhc_data.sta_z = sta_z;
+			wcd9xxx_mbhc_calc_thres(mbhc);
+			wcd9xxx_calibrate_hs_polling(mbhc);
+		} else {
+			pr_warn("%s: failed get new dce_z/sta_z 0x%x/0x%x\n",
+				__func__, dce_z, sta_z);
+		}
+
+		if (is_cs_enable) {
+			/* recalibrate dce_nsc_cs_z */
+			reg = snd_soc_read(mbhc->codec,
+					   WCD9XXX_A_CDC_MBHC_B1_CTL);
+			snd_soc_update_bits(mbhc->codec,
+					    WCD9XXX_A_CDC_MBHC_B1_CTL,
+					    0x78, WCD9XXX_MBHC_NSC_CS << 3);
+			wcd9xxx_get_z(mbhc, &dce_z, NULL, mbhc_micb_regs,
+				      true);
+			snd_soc_write(mbhc->codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
+				      reg);
+			if (dce_z) {
+				mbhc->mbhc_data.dce_nsc_cs_z = dce_z;
+				/* update v_cs_ins_h with new dce_nsc_cs_z */
+				mbhc->mbhc_data.v_cs_ins_h =
+						wcd9xxx_codec_v_sta_dce(
+							mbhc, DCE,
+							WCD9XXX_V_CS_HS_MAX,
+							is_cs_enable);
+				pr_debug("%s: dce_nsc_cs_z 0x%x -> 0x%x, v_cs_ins_h 0x%x\n",
+					  __func__,
+					  mbhc->mbhc_data.dce_nsc_cs_z,
+					  dce_z & 0xffff,
+					  mbhc->mbhc_data.v_cs_ins_h);
+			} else {
+				pr_debug("%s: failed get new dce_nsc_cs_z\n",
+					 __func__);
+			}
+		}
+	}
+}
+
+static void wcd9xxx_shutdown_hs_removal_detect(struct wcd9xxx_mbhc *mbhc)
+{
+	struct snd_soc_codec *codec = mbhc->codec;
+	const struct wcd9xxx_mbhc_general_cfg *generic =
+	    WCD9XXX_MBHC_CAL_GENERAL_PTR(mbhc->mbhc_cfg->calibration);
+
+	/* Need MBHC clock */
+	if (mbhc->mbhc_cb && mbhc->mbhc_cb->codec_rco_ctrl)
+		mbhc->mbhc_cb->codec_rco_ctrl(mbhc->codec, true);
+	else {
+		WCD9XXX_BG_CLK_LOCK(mbhc->resmgr);
+		wcd9xxx_resmgr_get_clk_block(mbhc->resmgr, WCD9XXX_CLK_RCO);
+		WCD9XXX_BG_CLK_UNLOCK(mbhc->resmgr);
+	}
+
+	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x2, 0x2);
+	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x6, 0x0);
+	__wcd9xxx_switch_micbias(mbhc, 0, false, false);
+
+	usleep_range(generic->t_shutdown_plug_rem,
+		     generic->t_shutdown_plug_rem + 50);
+
+	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0xA, 0x8);
+
+	if (mbhc->mbhc_cb && mbhc->mbhc_cb->codec_rco_ctrl)
+		mbhc->mbhc_cb->codec_rco_ctrl(mbhc->codec, false);
+	else {
+		WCD9XXX_BG_CLK_LOCK(mbhc->resmgr);
+		/* Put requested CLK back */
+		wcd9xxx_resmgr_put_clk_block(mbhc->resmgr, WCD9XXX_CLK_RCO);
+		WCD9XXX_BG_CLK_UNLOCK(mbhc->resmgr);
+	}
+
+	snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x00);
+}
+
+static void wcd9xxx_cleanup_hs_polling(struct wcd9xxx_mbhc *mbhc)
+{
+
+	pr_debug("%s: enter\n", __func__);
+	WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
+
+	wcd9xxx_shutdown_hs_removal_detect(mbhc);
+
+
+	/* Disable external voltage source to micbias if present */
+	if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mb_source)
+		mbhc->mbhc_cb->enable_mb_source(mbhc->codec, false, true);
+
+	mbhc->polling_active = false;
+	mbhc->mbhc_state = MBHC_STATE_NONE;
+	pr_debug("%s: leave\n", __func__);
+}
+
+/* called under codec_resource_lock acquisition */
+static void wcd9xxx_codec_hphr_gnd_switch(struct snd_soc_codec *codec, bool on)
+{
+	snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x01, on);
+	if (on)
+		usleep_range(5000, 5100);
+}
+
+static void wcd9xxx_onoff_vddio_switch(struct wcd9xxx_mbhc *mbhc, bool on)
+{
+	pr_debug("%s: vddio %d\n", __func__, on);
+
+	if (mbhc->mbhc_cb && mbhc->mbhc_cb->pull_mb_to_vddio) {
+		mbhc->mbhc_cb->pull_mb_to_vddio(mbhc->codec, on);
+		goto exit;
+	}
+
+	if (on) {
+		snd_soc_update_bits(mbhc->codec, mbhc->mbhc_bias_regs.mbhc_reg,
+				    1 << 7, 1 << 7);
+		snd_soc_update_bits(mbhc->codec, WCD9XXX_A_MAD_ANA_CTRL,
+				    1 << 4, 0);
+	} else {
+		snd_soc_update_bits(mbhc->codec, WCD9XXX_A_MAD_ANA_CTRL,
+				    1 << 4, 1 << 4);
+		snd_soc_update_bits(mbhc->codec, mbhc->mbhc_bias_regs.mbhc_reg,
+				    1 << 7, 0);
+	}
+
+exit:
+	/*
+	 * Wait for the micbias to settle down to vddio
+	 * when the micbias to vddio switch is enabled.
+	 */
+	if (on)
+		usleep_range(10000, 10100);
+}
+
+static int wcd9xxx_hphl_status(struct wcd9xxx_mbhc *mbhc)
+{
+	u16 hph, status;
+	struct snd_soc_codec *codec = mbhc->codec;
+
+	WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
+	hph = snd_soc_read(codec, WCD9XXX_A_MBHC_HPH);
+	snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x12, 0x02);
+	usleep_range(WCD9XXX_HPHL_STATUS_READY_WAIT_US,
+		     WCD9XXX_HPHL_STATUS_READY_WAIT_US +
+		     WCD9XXX_USLEEP_RANGE_MARGIN_US);
+	status = snd_soc_read(codec, WCD9XXX_A_RX_HPH_L_STATUS);
+	snd_soc_write(codec, WCD9XXX_A_MBHC_HPH, hph);
+	return status;
+}
+
+static enum wcd9xxx_mbhc_plug_type
+wcd9xxx_cs_find_plug_type(struct wcd9xxx_mbhc *mbhc,
+			  struct wcd9xxx_mbhc_detect *dt, const int size,
+			  bool highhph,
+			  unsigned long event_state)
+{
+	int i;
+	int vdce, mb_mv;
+	int ch, sz, delta_thr;
+	int minv = 0, maxv = INT_MIN;
+	struct wcd9xxx_mbhc_detect *d = dt;
+	struct wcd9xxx_mbhc_detect *dprev = d, *dmicbias = NULL, *dgnd = NULL;
+	enum wcd9xxx_mbhc_plug_type type = PLUG_TYPE_INVALID;
+
+	const struct wcd9xxx_mbhc_plug_type_cfg *plug_type =
+	    WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration);
+	s16 hs_max, no_mic, dce_z;
+	int highhph_cnt = 0;
+
+	pr_debug("%s: enter\n", __func__);
+	pr_debug("%s: event_state 0x%lx\n", __func__, event_state);
+
+	sz = size - 1;
+	for (i = 0, d = dt, ch = 0; i < sz; i++, d++) {
+		if (d->mic_bias) {
+			dce_z = mbhc->mbhc_data.dce_z;
+			mb_mv = mbhc->mbhc_data.micb_mv;
+			hs_max = plug_type->v_hs_max;
+			no_mic = plug_type->v_no_mic;
+		} else {
+			dce_z = mbhc->mbhc_data.dce_nsc_cs_z;
+			mb_mv = VDDIO_MICBIAS_MV;
+			hs_max = WCD9XXX_V_CS_HS_MAX;
+			no_mic = WCD9XXX_V_CS_NO_MIC;
+		}
+
+		vdce = __wcd9xxx_codec_sta_dce_v(mbhc, true, d->dce,
+						 dce_z, (u32)mb_mv);
+		d->_vdces = vdce;
+		if (d->_vdces < no_mic)
+			d->_type = PLUG_TYPE_HEADPHONE;
+		else if (d->_vdces >= hs_max) {
+			d->_type = PLUG_TYPE_HIGH_HPH;
+			highhph_cnt++;
+		} else
+			d->_type = PLUG_TYPE_HEADSET;
+
+		pr_debug("%s: DCE #%d, %04x, V %04d(%04d), HPHL %d TYPE %d\n",
+			 __func__, i, d->dce, vdce, d->_vdces,
+			 d->hphl_status & 0x01,
+			 d->_type);
+
+		ch += d->hphl_status & 0x01;
+		if (!d->swap_gnd && !d->mic_bias) {
+			if (maxv < d->_vdces)
+				maxv = d->_vdces;
+			if (!minv || minv > d->_vdces)
+				minv = d->_vdces;
+		}
+		if ((!d->mic_bias &&
+		    (d->_vdces >= WCD9XXX_CS_MEAS_INVALD_RANGE_LOW_MV &&
+		     d->_vdces <= WCD9XXX_CS_MEAS_INVALD_RANGE_HIGH_MV)) ||
+		    (d->mic_bias &&
+		    (d->_vdces >= WCD9XXX_MEAS_INVALD_RANGE_LOW_MV &&
+		     d->_vdces <= WCD9XXX_MEAS_INVALD_RANGE_HIGH_MV))) {
+			pr_debug("%s: within invalid range\n", __func__);
+			type = PLUG_TYPE_INVALID;
+			goto exit;
+		}
+	}
+
+	delta_thr = ((highhph_cnt == sz) || highhph) ?
+			      WCD9XXX_MB_MEAS_DELTA_MAX_MV :
+			      WCD9XXX_CS_MEAS_DELTA_MAX_MV;
+
+	for (i = 0, d = dt; i < sz; i++, d++) {
+		if ((i > 0) && !d->mic_bias && !d->swap_gnd &&
+		    (d->_type != dprev->_type)) {
+			pr_debug("%s: Invalid, inconsistent types\n", __func__);
+			type = PLUG_TYPE_INVALID;
+			goto exit;
+		}
+
+		if (!d->swap_gnd && !d->mic_bias &&
+		    (abs(minv - d->_vdces) > delta_thr ||
+		     abs(maxv - d->_vdces) > delta_thr)) {
+			pr_debug("%s: Invalid, delta %dmv, %dmv and %dmv\n",
+				 __func__, d->_vdces, minv, maxv);
+			type = PLUG_TYPE_INVALID;
+			goto exit;
+		} else if (d->swap_gnd) {
+			dgnd = d;
+		}
+
+		if (!d->mic_bias && !d->swap_gnd)
+			dprev = d;
+		else if (d->mic_bias)
+			dmicbias = d;
+	}
+	if (dgnd && dt->_type != PLUG_TYPE_HEADSET &&
+	    dt->_type != dgnd->_type) {
+		pr_debug("%s: Invalid, inconsistent types\n", __func__);
+		type = PLUG_TYPE_INVALID;
+		goto exit;
+	}
+
+	type = dt->_type;
+	if (dmicbias) {
+		if (dmicbias->_type == PLUG_TYPE_HEADSET &&
+		    (dt->_type == PLUG_TYPE_HIGH_HPH ||
+		     dt->_type == PLUG_TYPE_HEADSET)) {
+			type = PLUG_TYPE_HEADSET;
+			if (dt->_type == PLUG_TYPE_HIGH_HPH) {
+				pr_debug("%s: Headset with threshold on MIC detected\n",
+					 __func__);
+				if (mbhc->mbhc_cfg->micbias_enable_flags &
+				 (1 << MBHC_MICBIAS_ENABLE_THRESHOLD_HEADSET))
+					mbhc->micbias_enable = true;
+			}
+		}
+	}
+
+	if (type == PLUG_TYPE_HEADSET && dgnd && !dgnd->mic_bias) {
+		/* if plug type is Headphone report as GND_MIC_SWAP */
+		if (dgnd->_type == PLUG_TYPE_HEADPHONE) {
+			pr_debug("%s: GND_MIC_SWAP\n", __func__);
+			type = PLUG_TYPE_GND_MIC_SWAP;
+			/*
+			 * if type is GND_MIC_SWAP we should not check
+			 * HPHL status hence goto exit
+			 */
+			goto exit;
+		} else if (dgnd->_type != PLUG_TYPE_HEADSET && !dmicbias) {
+			pr_debug("%s: Invalid, inconsistent types\n", __func__);
+			type = PLUG_TYPE_INVALID;
+		}
+	}
+
+	if (event_state & (1 << MBHC_EVENT_PA_HPHL)) {
+		pr_debug("%s: HPHL PA was ON\n", __func__);
+	} else if (ch != sz && ch > 0) {
+		pr_debug("%s: Invalid, inconsistent HPHL..\n", __func__);
+		type = PLUG_TYPE_INVALID;
+		goto exit;
+	}
+
+	if (!(event_state & (1UL << MBHC_EVENT_PA_HPHL))) {
+		if (((type == PLUG_TYPE_HEADSET ||
+		      type == PLUG_TYPE_HEADPHONE) && ch != sz)) {
+			pr_debug("%s: Invalid, not fully inserted, TYPE %d\n",
+				 __func__, type);
+			type = PLUG_TYPE_INVALID;
+		}
+	}
+
+	if (type == PLUG_TYPE_HEADSET &&
+	    (mbhc->mbhc_cfg->micbias_enable_flags &
+	    (1 << MBHC_MICBIAS_ENABLE_REGULAR_HEADSET)))
+		mbhc->micbias_enable = true;
+
+exit:
+	pr_debug("%s: Plug type %d detected\n", __func__, type);
+	return type;
+}
+
+/*
+ * wcd9xxx_find_plug_type : Find out and return the best plug type with given
+ *			    list of wcd9xxx_mbhc_detect structure.
+ * param mbhc wcd9xxx_mbhc structure
+ * param dt collected measurements
+ * param size array size of dt
+ * param event_state mbhc->event_state when dt is collected
+ */
+static enum wcd9xxx_mbhc_plug_type
+wcd9xxx_find_plug_type(struct wcd9xxx_mbhc *mbhc,
+		       struct wcd9xxx_mbhc_detect *dt, const int size,
+		       unsigned long event_state)
+{
+	int i;
+	int ch;
+	enum wcd9xxx_mbhc_plug_type type;
+	int vdce;
+	struct wcd9xxx_mbhc_detect *d, *dprev, *dgnd = NULL, *dvddio = NULL;
+	int maxv = 0, minv = 0;
+	const struct wcd9xxx_mbhc_plug_type_cfg *plug_type =
+	    WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration);
+	const s16 hs_max = plug_type->v_hs_max;
+	const s16 no_mic = plug_type->v_no_mic;
+
+	pr_debug("%s: event_state 0x%lx\n", __func__, event_state);
+
+	for (i = 0, d = dt, ch = 0; i < size; i++, d++) {
+		vdce = wcd9xxx_codec_sta_dce_v(mbhc, true, d->dce);
+		if (d->vddio)
+			d->_vdces = scale_v_micb_vddio(mbhc, vdce, false);
+		else
+			d->_vdces = vdce;
+
+		if (d->_vdces >= no_mic && d->_vdces < hs_max)
+			d->_type = PLUG_TYPE_HEADSET;
+		else if (d->_vdces < no_mic)
+			d->_type = PLUG_TYPE_HEADPHONE;
+		else
+			d->_type = PLUG_TYPE_HIGH_HPH;
+
+		ch += d->hphl_status & 0x01;
+		if (!d->swap_gnd && !d->hwvalue && !d->vddio) {
+			if (maxv < d->_vdces)
+				maxv = d->_vdces;
+			if (!minv || minv > d->_vdces)
+				minv = d->_vdces;
+		}
+
+		pr_debug("%s: DCE #%d, %04x, V %04d(%04d), GND %d, VDDIO %d, HPHL %d TYPE %d\n",
+			 __func__, i, d->dce, vdce, d->_vdces,
+			 d->swap_gnd, d->vddio, d->hphl_status & 0x01,
+			 d->_type);
+
+
+		/*
+		 * If GND and MIC prongs are aligned to HPHR and GND of
+		 * headphone, codec measures the voltage based on
+		 * impedance between HPHR and GND which results in ~80mv.
+		 * Avoid this.
+		 */
+		if (d->_vdces >= WCD9XXX_MEAS_INVALD_RANGE_LOW_MV &&
+		    d->_vdces <= WCD9XXX_MEAS_INVALD_RANGE_HIGH_MV) {
+			pr_debug("%s: within invalid range\n", __func__);
+			type = PLUG_TYPE_INVALID;
+			goto exit;
+		}
+	}
+
+	if (event_state & (1 << MBHC_EVENT_PA_HPHL)) {
+		pr_debug("%s: HPHL PA was ON\n", __func__);
+	} else if (ch != size && ch > 0) {
+		pr_debug("%s: Invalid, inconsistent HPHL\n", __func__);
+		type = PLUG_TYPE_INVALID;
+		goto exit;
+	}
+
+	for (i = 0, dprev = NULL, d = dt; i < size; i++, d++) {
+		if (d->vddio) {
+			dvddio = d;
+			continue;
+		}
+
+		if ((i > 0) && (dprev != NULL) && (d->_type != dprev->_type)) {
+			pr_debug("%s: Invalid, inconsistent types\n", __func__);
+			type = PLUG_TYPE_INVALID;
+			goto exit;
+		}
+
+		if (!d->swap_gnd && !d->hwvalue &&
+		    (abs(minv - d->_vdces) > WCD9XXX_MEAS_DELTA_MAX_MV ||
+		     abs(maxv - d->_vdces) > WCD9XXX_MEAS_DELTA_MAX_MV)) {
+			pr_debug("%s: Invalid, delta %dmv, %dmv and %dmv\n",
+				 __func__, d->_vdces, minv, maxv);
+			type = PLUG_TYPE_INVALID;
+			goto exit;
+		} else if (d->swap_gnd) {
+			dgnd = d;
+		}
+		dprev = d;
+	}
+
+	WARN_ON(i != size);
+	type = dt->_type;
+	if (type == PLUG_TYPE_HEADSET && dgnd) {
+		if ((dgnd->_vdces + WCD9XXX_GM_SWAP_THRES_MIN_MV <
+		     minv) &&
+		    (dgnd->_vdces + WCD9XXX_GM_SWAP_THRES_MAX_MV >
+		     maxv))
+			type = PLUG_TYPE_GND_MIC_SWAP;
+	}
+
+	/* if HPHL PA was on, we cannot use hphl status */
+	if (!(event_state & (1UL << MBHC_EVENT_PA_HPHL))) {
+		if (((type == PLUG_TYPE_HEADSET ||
+		      type == PLUG_TYPE_HEADPHONE) && ch != size) ||
+		    (type == PLUG_TYPE_GND_MIC_SWAP && ch)) {
+			pr_debug("%s: Invalid, not fully inserted, TYPE %d\n",
+				 __func__, type);
+			type = PLUG_TYPE_INVALID;
+		}
+	}
+
+	if (type == PLUG_TYPE_HEADSET) {
+		if (dvddio && ((dvddio->_vdces > hs_max) ||
+		   (dvddio->_vdces > minv + WCD9XXX_THRESHOLD_MIC_THRESHOLD))) {
+			pr_debug("%s: Headset with threshold on MIC detected\n",
+				 __func__);
+			if (mbhc->mbhc_cfg->micbias_enable_flags &
+			    (1 << MBHC_MICBIAS_ENABLE_THRESHOLD_HEADSET))
+				mbhc->micbias_enable = true;
+		} else {
+			pr_debug("%s: Headset with regular MIC detected\n",
+				 __func__);
+			if (mbhc->mbhc_cfg->micbias_enable_flags &
+			    (1 << MBHC_MICBIAS_ENABLE_REGULAR_HEADSET))
+				mbhc->micbias_enable = true;
+		}
+	}
+exit:
+	pr_debug("%s: Plug type %d detected, micbias_enable %d\n", __func__,
+		 type, mbhc->micbias_enable);
+	return type;
+}
+
+/*
+ * Pull down MBHC micbias for provided duration in microsecond.
+ */
+static int wcd9xxx_pull_down_micbias(struct wcd9xxx_mbhc *mbhc, int us)
+{
+	bool micbiasconn = false;
+	struct snd_soc_codec *codec = mbhc->codec;
+	const u16 ctlreg = mbhc->mbhc_bias_regs.ctl_reg;
+
+	/*
+	 * Disable MBHC to micbias connection to pull down
+	 * micbias and pull down micbias for a moment.
+	 */
+	if ((snd_soc_read(mbhc->codec, ctlreg) & 0x01)) {
+		WARN_ONCE(1, "MBHC micbias is already pulled down unexpectedly\n");
+		return -EFAULT;
+	}
+
+	if ((snd_soc_read(mbhc->codec, WCD9XXX_A_MAD_ANA_CTRL) & 1 << 4)) {
+		snd_soc_update_bits(mbhc->codec, WCD9XXX_A_MAD_ANA_CTRL,
+				    1 << 4, 0);
+		micbiasconn = true;
+	}
+
+	snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01, 0x01);
+
+	/*
+	 * Pull down for 1ms to discharge bias. Give small margin (10us) to be
+	 * able to get consistent result across DCEs.
+	 */
+	usleep_range(1000, 1000 + 10);
+
+	if (micbiasconn)
+		snd_soc_update_bits(mbhc->codec, WCD9XXX_A_MAD_ANA_CTRL,
+				    1 << 4, 1 << 4);
+	snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01, 0x00);
+	usleep_range(us, us + WCD9XXX_USLEEP_RANGE_MARGIN_US);
+
+	return 0;
+}
+
+/* Called under codec resource lock acquisition */
+void wcd9xxx_turn_onoff_current_source(struct wcd9xxx_mbhc *mbhc,
+				       struct mbhc_micbias_regs *mbhc_micb_regs,
+				       bool on, bool highhph)
+{
+	struct snd_soc_codec *codec;
+	struct wcd9xxx_mbhc_btn_detect_cfg *btn_det;
+	const struct wcd9xxx_mbhc_plug_detect_cfg *plug_det =
+	    WCD9XXX_MBHC_CAL_PLUG_DET_PTR(mbhc->mbhc_cfg->calibration);
+
+	btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(mbhc->mbhc_cfg->calibration);
+	codec = mbhc->codec;
+
+	WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
+
+	if ((on && mbhc->is_cs_enabled) ||
+	    (!on && !mbhc->is_cs_enabled)) {
+		pr_debug("%s: Current source is already %s\n",
+			__func__, on ? "ON" : "OFF");
+		return;
+	}
+
+	if (on) {
+		pr_debug("%s: enabling current source\n", __func__);
+		/* Nsc to 9 */
+		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
+				    0x78, 0x48);
+		/* pull down diode bit to 0 */
+		snd_soc_update_bits(codec, mbhc_micb_regs->mbhc_reg,
+				    0x01, 0x00);
+		/*
+		 * Keep the low power insertion/removal
+		 * detection (reg 0x3DD) disabled
+		 */
+		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_INT_CTL,
+				    0x01, 0x00);
+		/*
+		 * Enable the Mic Bias current source
+		 * Write bits[6:5] of register MICB_2_MBHC to 0x3 (V_20_UA)
+		 * Write bit[7] of register MICB_2_MBHC to 1
+		 * (INS_DET_ISRC_EN__ENABLE)
+		 * MICB_2_MBHC__SCHT_TRIG_EN to 1
+		 */
+		snd_soc_update_bits(codec, mbhc_micb_regs->mbhc_reg,
+				    0xF0, 0xF0);
+		/* Disconnect MBHC Override from MicBias and LDOH */
+		snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL, 0x10, 0x00);
+		mbhc->is_cs_enabled = true;
+	} else {
+		pr_debug("%s: disabling current source\n", __func__);
+		/* Connect MBHC Override from MicBias and LDOH */
+		snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL, 0x10, 0x10);
+		/* INS_DET_ISRC_CTL to acdb value */
+		snd_soc_update_bits(codec, mbhc_micb_regs->mbhc_reg,
+				    0x60, plug_det->mic_current << 5);
+		if (!highhph) {
+			/* INS_DET_ISRC_EN__ENABLE to 0 */
+			snd_soc_update_bits(codec,
+					    mbhc_micb_regs->mbhc_reg,
+					    0x80, 0x00);
+			/* MICB_2_MBHC__SCHT_TRIG_EN  to 0 */
+			snd_soc_update_bits(codec,
+					    mbhc_micb_regs->mbhc_reg,
+					    0x10, 0x00);
+		}
+		/* Nsc to acdb value */
+		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x78,
+				    btn_det->mbhc_nsc << 3);
+		mbhc->is_cs_enabled = false;
+	}
+}
+
+static enum wcd9xxx_mbhc_plug_type
+wcd9xxx_codec_cs_get_plug_type(struct wcd9xxx_mbhc *mbhc, bool highhph)
+{
+	struct snd_soc_codec *codec = mbhc->codec;
+	struct wcd9xxx_mbhc_detect rt[NUM_DCE_PLUG_INS_DETECT];
+	enum wcd9xxx_mbhc_plug_type type = PLUG_TYPE_INVALID;
+	int i;
+
+	pr_debug("%s: enter\n", __func__);
+	WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
+
+	BUG_ON(NUM_DCE_PLUG_INS_DETECT < 4);
+
+	wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, true);
+	rt[0].swap_gnd = false;
+	rt[0].vddio = false;
+	rt[0].hwvalue = true;
+	rt[0].hphl_status = wcd9xxx_hphl_status(mbhc);
+	rt[0].dce = wcd9xxx_mbhc_setup_hs_polling(mbhc, &mbhc->mbhc_bias_regs,
+						  true);
+	rt[0].mic_bias = false;
+
+	for (i = 1; i < NUM_DCE_PLUG_INS_DETECT - 1; i++) {
+		rt[i].swap_gnd = (i == NUM_DCE_PLUG_INS_DETECT - 3);
+		rt[i].mic_bias = ((i == NUM_DCE_PLUG_INS_DETECT - 4) &&
+				   highhph);
+		rt[i].hphl_status = wcd9xxx_hphl_status(mbhc);
+		if (rt[i].swap_gnd)
+			wcd9xxx_codec_hphr_gnd_switch(codec, true);
+
+		if (rt[i].mic_bias)
+			wcd9xxx_turn_onoff_current_source(mbhc,
+							  &mbhc->mbhc_bias_regs,
+							  false, false);
+
+		rt[i].dce = __wcd9xxx_codec_sta_dce(mbhc, 1, !highhph, true);
+		if (rt[i].mic_bias)
+			wcd9xxx_turn_onoff_current_source(mbhc,
+							  &mbhc->mbhc_bias_regs,
+							  true, false);
+		if (rt[i].swap_gnd)
+			wcd9xxx_codec_hphr_gnd_switch(codec, false);
+	}
+
+	/* recalibrate DCE/STA GND voltages */
+	wcd9xxx_recalibrate(mbhc, &mbhc->mbhc_bias_regs, true);
+
+	type = wcd9xxx_cs_find_plug_type(mbhc, rt, ARRAY_SIZE(rt), highhph,
+					 mbhc->event_state);
+
+	wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, false);
+	pr_debug("%s: plug_type:%d\n", __func__, type);
+
+	return type;
+}
+
+static enum wcd9xxx_mbhc_plug_type
+wcd9xxx_codec_get_plug_type(struct wcd9xxx_mbhc *mbhc, bool highhph)
+{
+	int i;
+	bool vddioon;
+	struct wcd9xxx_mbhc_plug_type_cfg *plug_type_ptr;
+	struct wcd9xxx_mbhc_detect rt[NUM_DCE_PLUG_INS_DETECT];
+	enum wcd9xxx_mbhc_plug_type type = PLUG_TYPE_INVALID;
+	struct snd_soc_codec *codec = mbhc->codec;
+
+	pr_debug("%s: enter\n", __func__);
+	WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
+
+	/* make sure override is on */
+	WARN_ON(!(snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_CTL) & 0x04));
+
+	/* GND and MIC swap detection requires at least 2 rounds of DCE */
+	BUG_ON(NUM_DCE_PLUG_INS_DETECT < 2);
+	detect_use_vddio_switch = mbhc->mbhc_cfg->use_vddio_meas;
+
+	/*
+	 * There are chances vddio switch is on and cfilt voltage is adjusted
+	 * to vddio voltage even after plug type removal reported.
+	 */
+	vddioon = __wcd9xxx_switch_micbias(mbhc, 0, false, false);
+	pr_debug("%s: vddio switch was %s\n", __func__, vddioon ? "on" : "off");
+
+	plug_type_ptr =
+	    WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration);
+
+	/*
+	 * cfilter in fast mode requires 1ms to charge up and down micbias
+	 * fully.
+	 */
+	(void) wcd9xxx_pull_down_micbias(mbhc,
+					 WCD9XXX_MICBIAS_PULLDOWN_SETTLE_US);
+
+	wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, true);
+	rt[0].hphl_status = wcd9xxx_hphl_status(mbhc);
+	rt[0].dce = wcd9xxx_mbhc_setup_hs_polling(mbhc, &mbhc->mbhc_bias_regs,
+						  false);
+	rt[0].swap_gnd = false;
+	rt[0].vddio = false;
+	rt[0].hwvalue = true;
+	for (i = 1; i < NUM_DCE_PLUG_INS_DETECT; i++) {
+		rt[i].swap_gnd = (i == NUM_DCE_PLUG_INS_DETECT - 2);
+		if (detect_use_vddio_switch)
+			rt[i].vddio = (i == 1);
+		else
+			rt[i].vddio = false;
+		rt[i].hphl_status = wcd9xxx_hphl_status(mbhc);
+		rt[i].hwvalue = false;
+		if (rt[i].swap_gnd)
+			wcd9xxx_codec_hphr_gnd_switch(codec, true);
+		if (rt[i].vddio)
+			wcd9xxx_onoff_vddio_switch(mbhc, true);
+		/*
+		 * Pull down micbias to detect headset with mic which has
+		 * threshold and to have more consistent voltage measurements.
+		 *
+		 * cfilter in fast mode requires 1ms to charge up and down
+		 * micbias fully.
+		 */
+		(void) wcd9xxx_pull_down_micbias(mbhc,
+					    WCD9XXX_MICBIAS_PULLDOWN_SETTLE_US);
+		rt[i].dce = __wcd9xxx_codec_sta_dce(mbhc, 1, true, true);
+		if (rt[i].vddio)
+			wcd9xxx_onoff_vddio_switch(mbhc, false);
+		if (rt[i].swap_gnd)
+			wcd9xxx_codec_hphr_gnd_switch(codec, false);
+	}
+	/* recalibrate DCE/STA GND voltages */
+	wcd9xxx_recalibrate(mbhc, &mbhc->mbhc_bias_regs, false);
+
+	if (vddioon)
+		__wcd9xxx_switch_micbias(mbhc, 1, false, false);
+
+	type = wcd9xxx_find_plug_type(mbhc, rt, ARRAY_SIZE(rt),
+				      mbhc->event_state);
+
+	wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, false);
+	pr_debug("%s: leave\n", __func__);
+	return type;
+}
+
+static bool wcd9xxx_swch_level_remove(struct wcd9xxx_mbhc *mbhc)
+{
+	if (mbhc->mbhc_cfg->gpio)
+		return (gpio_get_value_cansleep(mbhc->mbhc_cfg->gpio) !=
+			mbhc->mbhc_cfg->gpio_level_insert);
+	else if (mbhc->mbhc_cfg->insert_detect) {
+		if (mbhc->mbhc_cb && mbhc->mbhc_cb->insert_rem_status)
+			return mbhc->mbhc_cb->insert_rem_status(mbhc->codec);
+		else
+			return snd_soc_read(mbhc->codec,
+				    WCD9XXX_A_MBHC_INSERT_DET_STATUS) &
+				    (1 << 2);
+	} else
+		WARN(1, "Invalid jack detection configuration\n");
+
+	return true;
+}
+
+static bool is_clk_active(struct snd_soc_codec *codec)
+{
+	return !!(snd_soc_read(codec, WCD9XXX_A_CDC_CLK_MCLK_CTL) & 0x05);
+}
+
+static int wcd9xxx_enable_hs_detect(struct wcd9xxx_mbhc *mbhc,
+				    int insertion, int trigger, bool padac_off)
+{
+	struct snd_soc_codec *codec = mbhc->codec;
+	int central_bias_enabled = 0;
+	const struct wcd9xxx_mbhc_general_cfg *generic =
+	    WCD9XXX_MBHC_CAL_GENERAL_PTR(mbhc->mbhc_cfg->calibration);
+	const struct wcd9xxx_mbhc_plug_detect_cfg *plug_det =
+	    WCD9XXX_MBHC_CAL_PLUG_DET_PTR(mbhc->mbhc_cfg->calibration);
+
+	pr_debug("%s: enter insertion(%d) trigger(0x%x)\n",
+		 __func__, insertion, trigger);
+
+	if (!mbhc->mbhc_cfg->calibration) {
+		pr_err("Error, no wcd9xxx calibration\n");
+		return -EINVAL;
+	}
+
+	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_INT_CTL, 0x1, 0);
+
+	/*
+	 * Make sure mic bias and Mic line schmitt trigger
+	 * are turned OFF
+	 */
+	snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01, 0x01);
+	snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg, 0x90, 0x00);
+
+	if (insertion) {
+		wcd9xxx_switch_micbias(mbhc, 0);
+
+		/* DAPM can manipulate PA/DAC bits concurrently */
+		if (padac_off == true)
+			wcd9xxx_set_and_turnoff_hph_padac(mbhc);
+
+		if (trigger & MBHC_USE_HPHL_TRIGGER) {
+			/* Enable HPH Schmitt Trigger */
+			snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x11,
+					0x11);
+			snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x0C,
+					plug_det->hph_current << 2);
+			snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x02,
+					0x02);
+		}
+		if (trigger & MBHC_USE_MB_TRIGGER) {
+			/* enable the mic line schmitt trigger */
+			snd_soc_update_bits(codec,
+					mbhc->mbhc_bias_regs.mbhc_reg,
+					0x60, plug_det->mic_current << 5);
+			snd_soc_update_bits(codec,
+					mbhc->mbhc_bias_regs.mbhc_reg,
+					0x80, 0x80);
+			usleep_range(plug_det->t_mic_pid, plug_det->t_mic_pid +
+						WCD9XXX_USLEEP_RANGE_MARGIN_US);
+			snd_soc_update_bits(codec,
+					mbhc->mbhc_bias_regs.ctl_reg, 0x01,
+					0x00);
+			snd_soc_update_bits(codec,
+					mbhc->mbhc_bias_regs.mbhc_reg,
+					0x10, 0x10);
+		}
+
+		/* setup for insetion detection */
+		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_INT_CTL, 0x2, 0);
+	} else {
+		pr_debug("setup for removal detection\n");
+		/* Make sure the HPH schmitt trigger is OFF */
+		snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x12, 0x00);
+
+		/* enable the mic line schmitt trigger */
+		snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg,
+				    0x01, 0x00);
+		snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg, 0x60,
+				    plug_det->mic_current << 5);
+		snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg,
+				    0x80, 0x80);
+		usleep_range(plug_det->t_mic_pid, plug_det->t_mic_pid +
+					WCD9XXX_USLEEP_RANGE_MARGIN_US);
+		snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg,
+				    0x10, 0x10);
+
+		/* Setup for low power removal detection */
+		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_INT_CTL, 0x2,
+				    0x2);
+	}
+
+	if (snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_CTL) & 0x4) {
+		/* called by interrupt */
+		if (!is_clk_active(codec)) {
+			wcd9xxx_resmgr_enable_config_mode(mbhc->resmgr, 1);
+			snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
+					0x06, 0);
+			usleep_range(generic->t_shutdown_plug_rem,
+					generic->t_shutdown_plug_rem +
+					WCD9XXX_USLEEP_RANGE_MARGIN_US);
+			wcd9xxx_resmgr_enable_config_mode(mbhc->resmgr, 0);
+		} else
+			snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
+					0x06, 0);
+	}
+
+	snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.int_rbias, 0x80, 0);
+
+	/* If central bandgap disabled */
+	if (!(snd_soc_read(codec, WCD9XXX_A_PIN_CTL_OE1) & 1)) {
+		snd_soc_update_bits(codec, WCD9XXX_A_PIN_CTL_OE1, 0x3, 0x3);
+		usleep_range(generic->t_bg_fast_settle,
+			     generic->t_bg_fast_settle +
+			     WCD9XXX_USLEEP_RANGE_MARGIN_US);
+		central_bias_enabled = 1;
+	}
+
+	/* If LDO_H disabled */
+	if (snd_soc_read(codec, WCD9XXX_A_PIN_CTL_OE0) & 0x80) {
+		snd_soc_update_bits(codec, WCD9XXX_A_PIN_CTL_OE0, 0x10, 0);
+		snd_soc_update_bits(codec, WCD9XXX_A_PIN_CTL_OE0, 0x80, 0x80);
+		usleep_range(generic->t_ldoh, generic->t_ldoh +
+					      WCD9XXX_USLEEP_RANGE_MARGIN_US);
+		snd_soc_update_bits(codec, WCD9XXX_A_PIN_CTL_OE0, 0x80, 0);
+
+		if (central_bias_enabled)
+			snd_soc_update_bits(codec, WCD9XXX_A_PIN_CTL_OE1, 0x1,
+					    0);
+	}
+
+	if (mbhc->resmgr->reg_addr && mbhc->resmgr->reg_addr->micb_4_mbhc)
+		snd_soc_update_bits(codec, mbhc->resmgr->reg_addr->micb_4_mbhc,
+				    0x3, mbhc->mbhc_cfg->micbias);
+
+	wcd9xxx_enable_irq(mbhc->resmgr->core_res, mbhc->intr_ids->insertion);
+	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_INT_CTL, 0x1, 0x1);
+	pr_debug("%s: leave\n", __func__);
+
+	return 0;
+}
+
+/*
+ * Function to determine whether anc microphone is preset or not.
+ * Return true if anc microphone is detected or false if not detected.
+ */
+static bool wcd9xxx_detect_anc_plug_type(struct wcd9xxx_mbhc *mbhc)
+{
+	struct wcd9xxx_mbhc_detect rt[NUM_DCE_PLUG_INS_DETECT - 1];
+	bool anc_mic_found = true;
+	int i, mb_mv;
+	const struct wcd9xxx_mbhc_plug_type_cfg *plug_type =
+	    WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration);
+	s16 hs_max, dce_z;
+	s16 no_mic;
+	bool override_en;
+	bool timedout;
+	unsigned long timeout, retry = 0;
+	enum wcd9xxx_mbhc_plug_type type;
+	bool cs_enable;
+
+	if (mbhc->mbhc_cfg->anc_micbias != MBHC_MICBIAS3 &&
+	    mbhc->mbhc_cfg->anc_micbias != MBHC_MICBIAS2)
+		return false;
+
+	pr_debug("%s: enter\n", __func__);
+
+	override_en = (snd_soc_read(mbhc->codec, WCD9XXX_A_CDC_MBHC_B1_CTL) &
+		       0x04) ? true : false;
+	cs_enable = ((mbhc->mbhc_cfg->cs_enable_flags &
+		    (1 << MBHC_CS_ENABLE_DET_ANC)) != 0) &&
+		    (!(snd_soc_read(mbhc->codec,
+		       mbhc->mbhc_anc_bias_regs.ctl_reg) & 0x80)) &&
+		     (mbhc->mbhc_cfg->micbias != mbhc->mbhc_cfg->anc_micbias);
+
+	if (cs_enable) {
+		wcd9xxx_turn_onoff_current_source(mbhc,
+						  &mbhc->mbhc_anc_bias_regs,
+						  true, false);
+	} else {
+		if (mbhc->mbhc_cfg->anc_micbias == MBHC_MICBIAS3) {
+			if (mbhc->micbias_enable_cb)
+				mbhc->micbias_enable_cb(mbhc->codec, true,
+						mbhc->mbhc_cfg->anc_micbias);
+			else
+				return false;
+		} else {
+			/* Enable override */
+			if (!override_en)
+				wcd9xxx_turn_onoff_override(mbhc, true);
+		}
+	}
+
+	if (!cs_enable) {
+		hs_max = plug_type->v_hs_max;
+		no_mic = plug_type->v_no_mic;
+		dce_z = mbhc->mbhc_data.dce_z;
+		mb_mv = mbhc->mbhc_data.micb_mv;
+	} else {
+		hs_max = WCD9XXX_V_CS_HS_MAX;
+		no_mic = WCD9XXX_V_CS_NO_MIC;
+		mb_mv = VDDIO_MICBIAS_MV;
+		dce_z = mbhc->mbhc_data.dce_nsc_cs_z;
+	}
+
+	wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, true);
+
+	timeout = jiffies + msecs_to_jiffies(ANC_HPH_DETECT_PLUG_TIME_MS);
+	anc_mic_found = true;
+
+	while (!(timedout = time_after(jiffies, timeout))) {
+		retry++;
+
+		if (wcd9xxx_swch_level_remove(mbhc)) {
+			pr_debug("%s: Switch level is low\n", __func__);
+			anc_mic_found = false;
+			break;
+		}
+
+		pr_debug("%s: Retry attempt %lu", __func__, retry - 1);
+
+		rt[0].hphl_status = wcd9xxx_hphl_status(mbhc);
+		rt[0].dce = wcd9xxx_mbhc_setup_hs_polling(mbhc,
+						  &mbhc->mbhc_anc_bias_regs,
+						  cs_enable);
+		rt[0]._vdces = __wcd9xxx_codec_sta_dce_v(mbhc, true, rt[0].dce,
+							 dce_z, (u32)mb_mv);
+
+		if (rt[0]._vdces >= no_mic && rt[0]._vdces < hs_max)
+			rt[0]._type = PLUG_TYPE_HEADSET;
+		else if (rt[0]._vdces < no_mic)
+			rt[0]._type = PLUG_TYPE_HEADPHONE;
+		else
+			rt[0]._type = PLUG_TYPE_HIGH_HPH;
+
+		pr_debug("%s: DCE #%d, V %04d, HPHL %d TYPE %d\n",
+				__func__, 0, rt[0]._vdces,
+				rt[0].hphl_status & 0x01,
+				rt[0]._type);
+
+		for (i = 1; i < NUM_DCE_PLUG_INS_DETECT - 1; i++) {
+			rt[i].dce = __wcd9xxx_codec_sta_dce(mbhc, 1,
+							    true, true);
+			rt[i]._vdces = __wcd9xxx_codec_sta_dce_v(mbhc, true,
+							 rt[i].dce, dce_z,
+							 (u32) mb_mv);
+
+			if (rt[i]._vdces >= no_mic && rt[i]._vdces < hs_max)
+				rt[i]._type = PLUG_TYPE_HEADSET;
+			else if (rt[i]._vdces < no_mic)
+				rt[i]._type = PLUG_TYPE_HEADPHONE;
+			else
+				rt[i]._type = PLUG_TYPE_HIGH_HPH;
+
+			rt[i].hphl_status = wcd9xxx_hphl_status(mbhc);
+
+			pr_debug("%s: DCE #%d, V %04d, HPHL %d TYPE %d\n",
+					__func__, i, rt[i]._vdces,
+					rt[i].hphl_status & 0x01,
+					rt[i]._type);
+		}
+
+		/*
+		 * Check for the "type" of all the 4 measurements
+		 * If all 4 measurements have the Type as PLUG_TYPE_HEADSET
+		 * then it is proper mic and declare that the plug has two mics
+		 */
+		for (i = 0; i < NUM_DCE_PLUG_INS_DETECT - 1; i++) {
+			if (i > 0 && (rt[i - 1]._type != rt[i]._type)) {
+				type = PLUG_TYPE_INVALID;
+				break;
+			} else {
+				type = rt[0]._type;
+			}
+		}
+
+		pr_debug("%s: Plug type found in ANC detection :%d",
+			__func__, type);
+
+		if (type != PLUG_TYPE_HEADSET)
+			anc_mic_found = false;
+		if (anc_mic_found || (type == PLUG_TYPE_HEADPHONE &&
+		    mbhc->mbhc_cfg->hw_jack_type == FIVE_POLE_JACK) ||
+		    (type == PLUG_TYPE_HIGH_HPH &&
+		    mbhc->mbhc_cfg->hw_jack_type == SIX_POLE_JACK))
+			break;
+	}
+
+	wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, false);
+	if (cs_enable) {
+		wcd9xxx_turn_onoff_current_source(mbhc,
+						  &mbhc->mbhc_anc_bias_regs,
+						  false, false);
+	} else {
+		if (mbhc->mbhc_cfg->anc_micbias == MBHC_MICBIAS3) {
+			if (mbhc->micbias_enable_cb)
+				mbhc->micbias_enable_cb(mbhc->codec, false,
+						mbhc->mbhc_cfg->anc_micbias);
+		} else {
+			/* Disable override */
+			if (!override_en)
+				wcd9xxx_turn_onoff_override(mbhc, false);
+		}
+	}
+	pr_debug("%s: leave\n", __func__);
+	return anc_mic_found;
+}
+
+/* called under codec_resource_lock acquisition */
+static void wcd9xxx_find_plug_and_report(struct wcd9xxx_mbhc *mbhc,
+					 enum wcd9xxx_mbhc_plug_type plug_type)
+{
+	bool anc_mic_found = false;
+
+	pr_debug("%s: enter current_plug(%d) new_plug(%d)\n",
+		 __func__, mbhc->current_plug, plug_type);
+
+	WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
+
+	if (plug_type == PLUG_TYPE_HEADPHONE &&
+	    mbhc->current_plug == PLUG_TYPE_NONE) {
+		/*
+		 * Nothing was reported previously
+		 * report a headphone or unsupported
+		 */
+		wcd9xxx_report_plug(mbhc, 1, SND_JACK_HEADPHONE);
+		wcd9xxx_cleanup_hs_polling(mbhc);
+	} else if (plug_type == PLUG_TYPE_GND_MIC_SWAP) {
+		if (!mbhc->mbhc_cfg->detect_extn_cable) {
+			if (mbhc->current_plug == PLUG_TYPE_HEADSET)
+				wcd9xxx_report_plug(mbhc, 0,
+							 SND_JACK_HEADSET);
+			else if (mbhc->current_plug == PLUG_TYPE_HEADPHONE)
+				wcd9xxx_report_plug(mbhc, 0,
+							 SND_JACK_HEADPHONE);
+		}
+		wcd9xxx_report_plug(mbhc, 1, SND_JACK_UNSUPPORTED);
+		wcd9xxx_cleanup_hs_polling(mbhc);
+	} else if (plug_type == PLUG_TYPE_HEADSET) {
+
+		if (mbhc->mbhc_cfg->enable_anc_mic_detect) {
+			/*
+			 * Do not report Headset, because at this point
+			 * it could be a ANC headphone having two mics.
+			 * So, proceed further to detect if there is a
+			 * second mic.
+			 */
+			mbhc->scaling_mux_in = 0x08;
+			anc_mic_found = wcd9xxx_detect_anc_plug_type(mbhc);
+		}
+
+		if (anc_mic_found) {
+			/* Report ANC headphone */
+			wcd9xxx_report_plug(mbhc, 1, SND_JACK_ANC_HEADPHONE);
+		} else {
+			/*
+			 * If Headphone was reported previously, this will
+			 * only report the mic line
+			 */
+			wcd9xxx_report_plug(mbhc, 1, SND_JACK_HEADSET);
+		}
+		/* Button detection required RC oscillator */
+		wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, true);
+		/*
+		 * sleep so that audio path completely tears down
+		 * before report plug insertion to the user space
+		 */
+		msleep(100);
+
+		wcd9xxx_start_hs_polling(mbhc);
+	} else if (plug_type == PLUG_TYPE_HIGH_HPH) {
+		if (mbhc->mbhc_cfg->detect_extn_cable) {
+			/* High impedance device found. Report as LINEOUT*/
+			if (mbhc->current_plug == PLUG_TYPE_NONE)
+				wcd9xxx_report_plug(mbhc, 1, SND_JACK_LINEOUT);
+			wcd9xxx_cleanup_hs_polling(mbhc);
+			pr_debug("%s: setup mic trigger for further detection\n",
+				 __func__);
+			mbhc->lpi_enabled = true;
+			/*
+			 * Do not enable HPHL trigger. If playback is active,
+			 * it might lead to continuous false HPHL triggers
+			 */
+			wcd9xxx_enable_hs_detect(mbhc, 1, MBHC_USE_MB_TRIGGER,
+						 false);
+		} else {
+			if (mbhc->current_plug == PLUG_TYPE_NONE)
+				wcd9xxx_report_plug(mbhc, 1,
+							 SND_JACK_HEADPHONE);
+			wcd9xxx_cleanup_hs_polling(mbhc);
+			pr_debug("setup mic trigger for further detection\n");
+			mbhc->lpi_enabled = true;
+			wcd9xxx_enable_hs_detect(mbhc, 1, MBHC_USE_MB_TRIGGER |
+							  MBHC_USE_HPHL_TRIGGER,
+						 false);
+		}
+	} else {
+		WARN(1, "Unexpected current plug_type %d, plug_type %d\n",
+		     mbhc->current_plug, plug_type);
+	}
+	pr_debug("%s: leave\n", __func__);
+}
+
+/* called under codec_resource_lock acquisition */
+static void wcd9xxx_mbhc_decide_swch_plug(struct wcd9xxx_mbhc *mbhc)
+{
+	enum wcd9xxx_mbhc_plug_type plug_type;
+	bool current_source_enable;
+
+	pr_debug("%s: enter\n", __func__);
+
+	WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
+
+	current_source_enable = (((mbhc->mbhc_cfg->cs_enable_flags &
+		      (1 << MBHC_CS_ENABLE_INSERTION)) != 0) &&
+		     (!(snd_soc_read(mbhc->codec,
+				     mbhc->mbhc_bias_regs.ctl_reg) & 0x80)));
+
+	mbhc->scaling_mux_in = 0x04;
+
+	if (current_source_enable) {
+		wcd9xxx_turn_onoff_current_source(mbhc, &mbhc->mbhc_bias_regs,
+						  true, false);
+		plug_type = wcd9xxx_codec_cs_get_plug_type(mbhc, false);
+		/*
+		 * For other plug types, the current source disable
+		 * will be done from wcd9xxx_correct_swch_plug
+		 */
+		if (plug_type == PLUG_TYPE_HEADSET)
+			wcd9xxx_turn_onoff_current_source(mbhc,
+						&mbhc->mbhc_bias_regs,
+						false, false);
+	} else {
+		wcd9xxx_turn_onoff_override(mbhc, true);
+		plug_type = wcd9xxx_codec_get_plug_type(mbhc, true);
+		wcd9xxx_turn_onoff_override(mbhc, false);
+	}
+
+	if (wcd9xxx_swch_level_remove(mbhc)) {
+		if (current_source_enable && mbhc->is_cs_enabled) {
+			wcd9xxx_turn_onoff_current_source(mbhc,
+					&mbhc->mbhc_bias_regs,
+					false, false);
+		}
+		pr_debug("%s: Switch level is low when determining plug\n",
+			 __func__);
+		return;
+	}
+
+	if (plug_type == PLUG_TYPE_INVALID ||
+	    plug_type == PLUG_TYPE_GND_MIC_SWAP) {
+		wcd9xxx_cleanup_hs_polling(mbhc);
+		wcd9xxx_schedule_hs_detect_plug(mbhc,
+						&mbhc->correct_plug_swch);
+	} else if (plug_type == PLUG_TYPE_HEADPHONE) {
+		wcd9xxx_report_plug(mbhc, 1, SND_JACK_HEADPHONE);
+		wcd9xxx_cleanup_hs_polling(mbhc);
+		wcd9xxx_schedule_hs_detect_plug(mbhc,
+						&mbhc->correct_plug_swch);
+	} else if (plug_type == PLUG_TYPE_HIGH_HPH) {
+		wcd9xxx_cleanup_hs_polling(mbhc);
+		wcd9xxx_schedule_hs_detect_plug(mbhc,
+						&mbhc->correct_plug_swch);
+	} else {
+		pr_debug("%s: Valid plug found, determine plug type %d\n",
+			 __func__, plug_type);
+		wcd9xxx_find_plug_and_report(mbhc, plug_type);
+	}
+	pr_debug("%s: leave\n", __func__);
+}
+
+/* called under codec_resource_lock acquisition */
+static void wcd9xxx_mbhc_detect_plug_type(struct wcd9xxx_mbhc *mbhc)
+{
+	pr_debug("%s: enter\n", __func__);
+	WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
+
+	if (wcd9xxx_swch_level_remove(mbhc))
+		pr_debug("%s: Switch level low when determining plug\n",
+			 __func__);
+	else
+		wcd9xxx_mbhc_decide_swch_plug(mbhc);
+	pr_debug("%s: leave\n", __func__);
+}
+
+/* called only from interrupt which is under codec_resource_lock acquisition */
+static void wcd9xxx_hs_insert_irq_swch(struct wcd9xxx_mbhc *mbhc,
+				       bool is_removal)
+{
+	if (!is_removal) {
+		pr_debug("%s: MIC trigger insertion interrupt\n", __func__);
+
+		rmb();
+		if (mbhc->lpi_enabled)
+			msleep(100);
+
+		rmb();
+		if (!mbhc->lpi_enabled) {
+			pr_debug("%s: lpi is disabled\n", __func__);
+		} else if (!wcd9xxx_swch_level_remove(mbhc)) {
+			pr_debug("%s: Valid insertion, detect plug type\n",
+				 __func__);
+			wcd9xxx_mbhc_decide_swch_plug(mbhc);
+		} else {
+			pr_debug("%s: Invalid insertion stop plug detection\n",
+				 __func__);
+		}
+	} else if (mbhc->mbhc_cfg->detect_extn_cable) {
+		pr_debug("%s: Removal\n", __func__);
+		if (!wcd9xxx_swch_level_remove(mbhc)) {
+			/*
+			 * Switch indicates, something is still inserted.
+			 * This could be extension cable i.e. headset is
+			 * removed from extension cable.
+			 */
+			/* cancel detect plug */
+			wcd9xxx_cancel_hs_detect_plug(mbhc,
+						      &mbhc->correct_plug_swch);
+			wcd9xxx_mbhc_decide_swch_plug(mbhc);
+		}
+	} else {
+		pr_err("%s: Switch IRQ used, invalid MBHC Removal\n", __func__);
+	}
+}
+
+static bool is_valid_mic_voltage(struct wcd9xxx_mbhc *mbhc, s32 mic_mv,
+				 bool cs_enable)
+{
+	const struct wcd9xxx_mbhc_plug_type_cfg *plug_type =
+	    WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration);
+	const s16 v_hs_max = wcd9xxx_get_current_v_hs_max(mbhc);
+
+	if (cs_enable)
+		return ((mic_mv > WCD9XXX_V_CS_NO_MIC) &&
+			 (mic_mv < WCD9XXX_V_CS_HS_MAX)) ? true : false;
+	else
+		return (!(mic_mv > WCD9XXX_MEAS_INVALD_RANGE_LOW_MV &&
+			  mic_mv < WCD9XXX_MEAS_INVALD_RANGE_HIGH_MV) &&
+			(mic_mv > plug_type->v_no_mic) &&
+			(mic_mv < v_hs_max)) ? true : false;
+}
+
+/*
+ * called under codec_resource_lock acquisition
+ * returns true if mic voltage range is back to normal insertion
+ * returns false either if timedout or removed
+ */
+static bool wcd9xxx_hs_remove_settle(struct wcd9xxx_mbhc *mbhc)
+{
+	int i;
+	bool timedout, settled = false;
+	s32 mic_mv[NUM_DCE_PLUG_DETECT];
+	short mb_v[NUM_DCE_PLUG_DETECT];
+	unsigned long retry = 0, timeout;
+	bool cs_enable;
+
+	cs_enable = (((mbhc->mbhc_cfg->cs_enable_flags &
+		      (1 << MBHC_CS_ENABLE_REMOVAL)) != 0) &&
+		     (!(snd_soc_read(mbhc->codec,
+				     mbhc->mbhc_bias_regs.ctl_reg) & 0x80)));
+	if (cs_enable)
+		wcd9xxx_turn_onoff_current_source(mbhc, &mbhc->mbhc_bias_regs,
+						  true, false);
+
+	timeout = jiffies + msecs_to_jiffies(HS_DETECT_PLUG_TIME_MS);
+	while (!(timedout = time_after(jiffies, timeout))) {
+		retry++;
+		if (wcd9xxx_swch_level_remove(mbhc)) {
+			pr_debug("%s: Switch indicates removal\n", __func__);
+			break;
+		}
+
+		if (retry > 1)
+			msleep(250);
+		else
+			msleep(50);
+
+		if (wcd9xxx_swch_level_remove(mbhc)) {
+			pr_debug("%s: Switch indicates removal\n", __func__);
+			break;
+		}
+
+		if (cs_enable) {
+			for (i = 0; i < NUM_DCE_PLUG_DETECT; i++) {
+				mb_v[i] = __wcd9xxx_codec_sta_dce(mbhc, 1,
+								  true, true);
+				mic_mv[i] = __wcd9xxx_codec_sta_dce_v(mbhc,
+								      true,
+								      mb_v[i],
+						mbhc->mbhc_data.dce_nsc_cs_z,
+						(u32)VDDIO_MICBIAS_MV);
+				pr_debug("%s : DCE run %lu, mic_mv = %d(%x)\n",
+					 __func__, retry, mic_mv[i], mb_v[i]);
+			}
+		} else {
+			for (i = 0; i < NUM_DCE_PLUG_DETECT; i++) {
+				mb_v[i] = wcd9xxx_codec_sta_dce(mbhc, 1,
+								true);
+				mic_mv[i] = wcd9xxx_codec_sta_dce_v(mbhc, 1,
+								mb_v[i]);
+				pr_debug("%s : DCE run %lu, mic_mv = %d(%x)\n",
+					 __func__, retry, mic_mv[i],
+								mb_v[i]);
+			}
+		}
+
+		if (wcd9xxx_swch_level_remove(mbhc)) {
+			pr_debug("%s: Switcn indicates removal\n", __func__);
+			break;
+		}
+
+		if (mbhc->current_plug == PLUG_TYPE_NONE) {
+			pr_debug("%s : headset/headphone is removed\n",
+				 __func__);
+			break;
+		}
+
+		for (i = 0; i < NUM_DCE_PLUG_DETECT; i++)
+			if (!is_valid_mic_voltage(mbhc, mic_mv[i], cs_enable))
+				break;
+
+		if (i == NUM_DCE_PLUG_DETECT) {
+			pr_debug("%s: MIC voltage settled\n", __func__);
+			settled = true;
+			msleep(200);
+			break;
+		}
+	}
+
+	if (cs_enable)
+		wcd9xxx_turn_onoff_current_source(mbhc, &mbhc->mbhc_bias_regs,
+						  false, false);
+
+	if (timedout)
+		pr_debug("%s: Microphone did not settle in %d seconds\n",
+			 __func__, HS_DETECT_PLUG_TIME_MS);
+	return settled;
+}
+
+/* called only from interrupt which is under codec_resource_lock acquisition */
+static void wcd9xxx_hs_remove_irq_swch(struct wcd9xxx_mbhc *mbhc)
+{
+	pr_debug("%s: enter\n", __func__);
+	if (wcd9xxx_hs_remove_settle(mbhc))
+		wcd9xxx_start_hs_polling(mbhc);
+	pr_debug("%s: leave\n", __func__);
+}
+
+/* called only from interrupt which is under codec_resource_lock acquisition */
+static void wcd9xxx_hs_remove_irq_noswch(struct wcd9xxx_mbhc *mbhc)
+{
+	s16 dce, dcez;
+	unsigned long timeout;
+	bool removed = true;
+	struct snd_soc_codec *codec = mbhc->codec;
+	const struct wcd9xxx_mbhc_general_cfg *generic =
+		WCD9XXX_MBHC_CAL_GENERAL_PTR(mbhc->mbhc_cfg->calibration);
+	bool cs_enable;
+	s16 cur_v_ins_h;
+	u32 mb_mv;
+
+	pr_debug("%s: enter\n", __func__);
+	if (mbhc->current_plug != PLUG_TYPE_HEADSET &&
+		mbhc->current_plug != PLUG_TYPE_ANC_HEADPHONE) {
+		pr_debug("%s(): Headset is not inserted, ignore removal\n",
+			 __func__);
+		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL,
+				0x08, 0x08);
+		return;
+	}
+
+	usleep_range(generic->t_shutdown_plug_rem,
+		     generic->t_shutdown_plug_rem +
+		     WCD9XXX_USLEEP_RANGE_MARGIN_US);
+
+	/* If micbias is enabled, don't enable current source */
+	cs_enable = (((mbhc->mbhc_cfg->cs_enable_flags &
+		      (1 << MBHC_CS_ENABLE_REMOVAL)) != 0) &&
+		     (!(snd_soc_read(codec,
+				     mbhc->mbhc_bias_regs.ctl_reg) & 0x80)));
+	if (cs_enable)
+		wcd9xxx_turn_onoff_current_source(mbhc, &mbhc->mbhc_bias_regs,
+						  true, false);
+
+	timeout = jiffies + msecs_to_jiffies(FAKE_REMOVAL_MIN_PERIOD_MS);
+	do {
+		if (cs_enable) {
+			dce = __wcd9xxx_codec_sta_dce(mbhc, 1,  true, true);
+			dcez = mbhc->mbhc_data.dce_nsc_cs_z;
+			mb_mv = VDDIO_MICBIAS_MV;
+		} else {
+			dce = wcd9xxx_codec_sta_dce(mbhc, 1,  true);
+			dcez = mbhc->mbhc_data.dce_z;
+			mb_mv = mbhc->mbhc_data.micb_mv;
+		}
+
+		pr_debug("%s: DCE 0x%x,%d\n", __func__, dce,
+			  __wcd9xxx_codec_sta_dce_v(mbhc, true, dce,
+						    dcez, mb_mv));
+
+		cur_v_ins_h = cs_enable ? (s16) mbhc->mbhc_data.v_cs_ins_h :
+					  (wcd9xxx_get_current_v(mbhc,
+					   WCD9XXX_CURRENT_V_INS_H));
+
+		if (dce < cur_v_ins_h) {
+			removed = false;
+			break;
+		}
+	} while (!time_after(jiffies, timeout));
+	pr_debug("%s: headset %sactually removed\n", __func__,
+		  removed ? "" : "not ");
+
+	if (cs_enable)
+		wcd9xxx_turn_onoff_current_source(mbhc, &mbhc->mbhc_bias_regs,
+						  false, false);
+
+	if (removed) {
+		if (mbhc->mbhc_cfg->detect_extn_cable) {
+			if (!wcd9xxx_swch_level_remove(mbhc)) {
+				/*
+				 * extension cable is still plugged in
+				 * report it as LINEOUT device
+				 */
+				if (mbhc->hph_status == SND_JACK_HEADSET)
+					wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc,
+							false);
+				wcd9xxx_report_plug(mbhc, 1, SND_JACK_LINEOUT);
+				wcd9xxx_cleanup_hs_polling(mbhc);
+				wcd9xxx_enable_hs_detect(mbhc, 1,
+							 MBHC_USE_MB_TRIGGER,
+							 false);
+			}
+		} else {
+			/* Cancel possibly running hs_detect_work */
+			wcd9xxx_cancel_hs_detect_plug(mbhc,
+						    &mbhc->correct_plug_noswch);
+			/*
+			 * If this removal is not false, first check the micbias
+			 * switch status and switch it to LDOH if it is already
+			 * switched to VDDIO.
+			 */
+			wcd9xxx_switch_micbias(mbhc, 0);
+
+			wcd9xxx_report_plug(mbhc, 0, SND_JACK_HEADSET);
+			wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, false);
+			wcd9xxx_cleanup_hs_polling(mbhc);
+			wcd9xxx_enable_hs_detect(mbhc, 1, MBHC_USE_MB_TRIGGER |
+							  MBHC_USE_HPHL_TRIGGER,
+						 true);
+		}
+	} else {
+		wcd9xxx_start_hs_polling(mbhc);
+	}
+	pr_debug("%s: leave\n", __func__);
+}
+
+/* called only from interrupt which is under codec_resource_lock acquisition */
+static void wcd9xxx_hs_insert_irq_extn(struct wcd9xxx_mbhc *mbhc,
+				       bool is_mb_trigger)
+{
+	/* Cancel possibly running hs_detect_work */
+	wcd9xxx_cancel_hs_detect_plug(mbhc, &mbhc->correct_plug_swch);
+
+	if (is_mb_trigger) {
+		pr_debug("%s: Waiting for Headphone left trigger\n", __func__);
+		wcd9xxx_enable_hs_detect(mbhc, 1, MBHC_USE_HPHL_TRIGGER, false);
+	} else  {
+		pr_debug("%s: HPHL trigger received, detecting plug type\n",
+			 __func__);
+		wcd9xxx_mbhc_detect_plug_type(mbhc);
+	}
+}
+
+static irqreturn_t wcd9xxx_hs_remove_irq(int irq, void *data)
+{
+	struct wcd9xxx_mbhc *mbhc = data;
+
+	pr_debug("%s: enter, removal interrupt\n", __func__);
+	WCD9XXX_BCL_LOCK(mbhc->resmgr);
+	/*
+	 * While we don't know whether MIC is there or not, let the resmgr know
+	 * so micbias can be disabled temporarily
+	 */
+	if (mbhc->current_plug == PLUG_TYPE_HEADSET) {
+		wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr,
+						WCD9XXX_COND_HPH_MIC, false);
+		wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr,
+						WCD9XXX_COND_HPH, false);
+	} else if (mbhc->current_plug == PLUG_TYPE_HEADPHONE) {
+		wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr,
+						WCD9XXX_COND_HPH, false);
+	}
+
+	if (mbhc->mbhc_cfg->detect_extn_cable &&
+	    !wcd9xxx_swch_level_remove(mbhc))
+		wcd9xxx_hs_remove_irq_noswch(mbhc);
+	else
+		wcd9xxx_hs_remove_irq_swch(mbhc);
+
+	if (mbhc->current_plug == PLUG_TYPE_HEADSET) {
+		wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr,
+						WCD9XXX_COND_HPH, true);
+		wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr,
+						WCD9XXX_COND_HPH_MIC, true);
+	} else if (mbhc->current_plug == PLUG_TYPE_HEADPHONE) {
+		wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr,
+						WCD9XXX_COND_HPH, true);
+	}
+	WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t wcd9xxx_hs_insert_irq(int irq, void *data)
+{
+	bool is_mb_trigger, is_removal;
+	struct wcd9xxx_mbhc *mbhc = data;
+	struct snd_soc_codec *codec = mbhc->codec;
+
+	pr_debug("%s: enter\n", __func__);
+	WCD9XXX_BCL_LOCK(mbhc->resmgr);
+	wcd9xxx_disable_irq(mbhc->resmgr->core_res, mbhc->intr_ids->insertion);
+
+	is_mb_trigger = !!(snd_soc_read(codec, mbhc->mbhc_bias_regs.mbhc_reg) &
+			   0x10);
+	is_removal = !!(snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_INT_CTL) & 0x02);
+	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_INT_CTL, 0x03, 0x00);
+
+	/* Turn off both HPH and MIC line schmitt triggers */
+	snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg, 0x90, 0x00);
+	snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x13, 0x00);
+	snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01, 0x00);
+
+	if (mbhc->mbhc_cfg->detect_extn_cable &&
+	    mbhc->current_plug == PLUG_TYPE_HIGH_HPH)
+		wcd9xxx_hs_insert_irq_extn(mbhc, is_mb_trigger);
+	else
+		wcd9xxx_hs_insert_irq_swch(mbhc, is_removal);
+
+	WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
+	return IRQ_HANDLED;
+}
+
+static void wcd9xxx_btn_lpress_fn(struct work_struct *work)
+{
+	struct delayed_work *dwork;
+	short bias_value;
+	int dce_mv, sta_mv;
+	struct wcd9xxx_mbhc *mbhc;
+
+	pr_debug("%s:\n", __func__);
+
+	dwork = to_delayed_work(work);
+	mbhc = container_of(dwork, struct wcd9xxx_mbhc, mbhc_btn_dwork);
+
+	bias_value = wcd9xxx_read_sta_result(mbhc->codec);
+	sta_mv = wcd9xxx_codec_sta_dce_v(mbhc, 0, bias_value);
+
+	bias_value = wcd9xxx_read_dce_result(mbhc->codec);
+	dce_mv = wcd9xxx_codec_sta_dce_v(mbhc, 1, bias_value);
+	pr_debug("%s: STA: %d, DCE: %d\n", __func__, sta_mv, dce_mv);
+
+	pr_debug("%s: Reporting long button press event\n", __func__);
+	wcd9xxx_jack_report(mbhc, &mbhc->button_jack, mbhc->buttons_pressed,
+			    mbhc->buttons_pressed);
+
+	pr_debug("%s: leave\n", __func__);
+	wcd9xxx_unlock_sleep(mbhc->resmgr->core_res);
+}
+
+static void wcd9xxx_mbhc_insert_work(struct work_struct *work)
+{
+	struct delayed_work *dwork;
+	struct wcd9xxx_mbhc *mbhc;
+	struct snd_soc_codec *codec;
+	struct wcd9xxx_core_resource *core_res;
+
+	dwork = to_delayed_work(work);
+	mbhc = container_of(dwork, struct wcd9xxx_mbhc, mbhc_insert_dwork);
+	codec = mbhc->codec;
+	core_res = mbhc->resmgr->core_res;
+
+	pr_debug("%s:\n", __func__);
+
+	/* Turn off both HPH and MIC line schmitt triggers */
+	snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg, 0x90, 0x00);
+	snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x13, 0x00);
+	snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01, 0x00);
+	wcd9xxx_disable_irq_sync(core_res, mbhc->intr_ids->insertion);
+	wcd9xxx_mbhc_detect_plug_type(mbhc);
+	wcd9xxx_unlock_sleep(core_res);
+}
+
+static bool wcd9xxx_mbhc_fw_validate(const void *data, size_t size)
+{
+	u32 cfg_offset;
+	struct wcd9xxx_mbhc_imped_detect_cfg *imped_cfg;
+	struct wcd9xxx_mbhc_btn_detect_cfg *btn_cfg;
+	struct firmware_cal fw;
+
+	fw.data = (void *)data;
+	fw.size = size;
+
+	if (fw.size < WCD9XXX_MBHC_CAL_MIN_SIZE)
+		return false;
+
+	/*
+	 * Previous check guarantees that there is enough fw data up
+	 * to num_btn
+	 */
+	btn_cfg = WCD9XXX_MBHC_CAL_BTN_DET_PTR(fw.data);
+	cfg_offset = (u32) ((void *) btn_cfg - (void *) fw.data);
+	if (fw.size < (cfg_offset + WCD9XXX_MBHC_CAL_BTN_SZ(btn_cfg)))
+		return false;
+
+	/*
+	 * Previous check guarantees that there is enough fw data up
+	 * to start of impedance detection configuration
+	 */
+	imped_cfg = WCD9XXX_MBHC_CAL_IMPED_DET_PTR(fw.data);
+	cfg_offset = (u32) ((void *) imped_cfg - (void *) fw.data);
+
+	if (fw.size < (cfg_offset + WCD9XXX_MBHC_CAL_IMPED_MIN_SZ))
+		return false;
+
+	if (fw.size < (cfg_offset + WCD9XXX_MBHC_CAL_IMPED_SZ(imped_cfg)))
+		return false;
+
+	return true;
+}
+
+static u16 wcd9xxx_codec_v_sta_dce(struct wcd9xxx_mbhc *mbhc,
+				   enum meas_type dce, s16 vin_mv,
+				   bool cs_enable)
+{
+	s16 diff, zero;
+	u32 mb_mv, in;
+	u16 value;
+	s16 dce_z;
+
+	mb_mv = mbhc->mbhc_data.micb_mv;
+	dce_z = mbhc->mbhc_data.dce_z;
+
+	if (mb_mv == 0) {
+		pr_err("%s: Mic Bias voltage is set to zero\n", __func__);
+		return -EINVAL;
+	}
+	if (cs_enable) {
+		mb_mv = VDDIO_MICBIAS_MV;
+		dce_z = mbhc->mbhc_data.dce_nsc_cs_z;
+	}
+
+	if (dce) {
+		diff = (mbhc->mbhc_data.dce_mb) - (dce_z);
+		zero = (dce_z);
+	} else {
+		diff = (mbhc->mbhc_data.sta_mb) - (mbhc->mbhc_data.sta_z);
+		zero = (mbhc->mbhc_data.sta_z);
+	}
+	in = (u32) diff * vin_mv;
+
+	value = (u16) (in / mb_mv) + zero;
+	return value;
+}
+
+static void wcd9xxx_mbhc_calc_thres(struct wcd9xxx_mbhc *mbhc)
+{
+	struct snd_soc_codec *codec;
+	s16 adj_v_hs_max;
+	s16 btn_mv = 0, btn_mv_sta[MBHC_V_IDX_NUM], btn_mv_dce[MBHC_V_IDX_NUM];
+	struct wcd9xxx_mbhc_btn_detect_cfg *btn_det;
+	struct wcd9xxx_mbhc_plug_type_cfg *plug_type;
+	u16 *btn_high;
+	int i;
+
+	pr_debug("%s: enter\n", __func__);
+	codec = mbhc->codec;
+	btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(mbhc->mbhc_cfg->calibration);
+	plug_type = WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration);
+
+	mbhc->mbhc_data.v_ins_hu[MBHC_V_IDX_CFILT] =
+	    wcd9xxx_codec_v_sta_dce(mbhc, STA, plug_type->v_hs_max, false);
+	mbhc->mbhc_data.v_ins_h[MBHC_V_IDX_CFILT] =
+	    wcd9xxx_codec_v_sta_dce(mbhc, DCE, plug_type->v_hs_max, false);
+
+	mbhc->mbhc_data.v_inval_ins_low = FAKE_INS_LOW;
+	mbhc->mbhc_data.v_inval_ins_high = FAKE_INS_HIGH;
+
+	if (mbhc->mbhc_data.micb_mv != VDDIO_MICBIAS_MV) {
+		adj_v_hs_max = scale_v_micb_vddio(mbhc, plug_type->v_hs_max,
+						  true);
+		mbhc->mbhc_data.v_ins_hu[MBHC_V_IDX_VDDIO] =
+		    wcd9xxx_codec_v_sta_dce(mbhc, STA, adj_v_hs_max, false);
+		mbhc->mbhc_data.v_ins_h[MBHC_V_IDX_VDDIO] =
+		    wcd9xxx_codec_v_sta_dce(mbhc, DCE, adj_v_hs_max, false);
+		mbhc->mbhc_data.v_inval_ins_low =
+		    scale_v_micb_vddio(mbhc, mbhc->mbhc_data.v_inval_ins_low,
+				       false);
+		mbhc->mbhc_data.v_inval_ins_high =
+		    scale_v_micb_vddio(mbhc, mbhc->mbhc_data.v_inval_ins_high,
+				       false);
+	}
+	mbhc->mbhc_data.v_cs_ins_h = wcd9xxx_codec_v_sta_dce(mbhc, DCE,
+							WCD9XXX_V_CS_HS_MAX,
+							true);
+	pr_debug("%s: v_ins_h for current source: 0x%x\n", __func__,
+		  mbhc->mbhc_data.v_cs_ins_h);
+
+	btn_high = wcd9xxx_mbhc_cal_btn_det_mp(btn_det,
+					       MBHC_BTN_DET_V_BTN_HIGH);
+	for (i = 0; i < btn_det->num_btn; i++)
+		btn_mv = btn_high[i] > btn_mv ? btn_high[i] : btn_mv;
+
+	btn_mv_sta[MBHC_V_IDX_CFILT] = btn_mv + btn_det->v_btn_press_delta_sta;
+	btn_mv_dce[MBHC_V_IDX_CFILT] = btn_mv + btn_det->v_btn_press_delta_cic;
+	btn_mv_sta[MBHC_V_IDX_VDDIO] =
+	    scale_v_micb_vddio(mbhc, btn_mv_sta[MBHC_V_IDX_CFILT], true);
+	btn_mv_dce[MBHC_V_IDX_VDDIO] =
+	    scale_v_micb_vddio(mbhc, btn_mv_dce[MBHC_V_IDX_CFILT], true);
+
+	mbhc->mbhc_data.v_b1_hu[MBHC_V_IDX_CFILT] =
+	    wcd9xxx_codec_v_sta_dce(mbhc, STA, btn_mv_sta[MBHC_V_IDX_CFILT],
+				    false);
+	mbhc->mbhc_data.v_b1_h[MBHC_V_IDX_CFILT] =
+	    wcd9xxx_codec_v_sta_dce(mbhc, DCE, btn_mv_dce[MBHC_V_IDX_CFILT],
+				    false);
+	mbhc->mbhc_data.v_b1_hu[MBHC_V_IDX_VDDIO] =
+	    wcd9xxx_codec_v_sta_dce(mbhc, STA, btn_mv_sta[MBHC_V_IDX_VDDIO],
+				    false);
+	mbhc->mbhc_data.v_b1_h[MBHC_V_IDX_VDDIO] =
+	    wcd9xxx_codec_v_sta_dce(mbhc, DCE, btn_mv_dce[MBHC_V_IDX_VDDIO],
+				    false);
+
+	mbhc->mbhc_data.v_brh[MBHC_V_IDX_CFILT] =
+	    mbhc->mbhc_data.v_b1_h[MBHC_V_IDX_CFILT];
+	mbhc->mbhc_data.v_brh[MBHC_V_IDX_VDDIO] =
+	    mbhc->mbhc_data.v_b1_h[MBHC_V_IDX_VDDIO];
+
+	mbhc->mbhc_data.v_brl = BUTTON_MIN;
+
+	mbhc->mbhc_data.v_no_mic =
+	    wcd9xxx_codec_v_sta_dce(mbhc, STA, plug_type->v_no_mic, false);
+	pr_debug("%s: leave\n", __func__);
+}
+
+static void wcd9xxx_onoff_ext_mclk(struct wcd9xxx_mbhc *mbhc, bool on)
+{
+	/*
+	 * XXX: {codec}_mclk_enable holds WCD9XXX_BCL_LOCK,
+	 * therefore wcd9xxx_onoff_ext_mclk caller SHOULDN'T hold
+	 * WCD9XXX_BCL_LOCK when it calls wcd9xxx_onoff_ext_mclk()
+	 */
+	if (mbhc && mbhc->mbhc_cfg && mbhc->mbhc_cfg->mclk_cb_fn)
+		mbhc->mbhc_cfg->mclk_cb_fn(mbhc->codec, on, false);
+}
+
+/*
+ * Mic Bias Enable Decision
+ * Return true if high_hph_cnt is a power of 2 (!= 2)
+ * otherwise return false
+ */
+static bool wcd9xxx_mbhc_enable_mb_decision(int high_hph_cnt)
+{
+	return (high_hph_cnt > 2) && !(high_hph_cnt & (high_hph_cnt - 1));
+}
+
+static inline void wcd9xxx_handle_gnd_mic_swap(struct wcd9xxx_mbhc *mbhc,
+					int pt_gnd_mic_swap_cnt,
+					enum wcd9xxx_mbhc_plug_type plug_type)
+{
+	if (mbhc->mbhc_cfg->swap_gnd_mic &&
+	    (pt_gnd_mic_swap_cnt == GND_MIC_SWAP_THRESHOLD)) {
+		/*
+		 * if switch is toggled, check again,
+		 * otherwise report unsupported plug
+		 */
+		mbhc->mbhc_cfg->swap_gnd_mic(mbhc->codec);
+	} else if (pt_gnd_mic_swap_cnt >= GND_MIC_SWAP_THRESHOLD) {
+		/* Report UNSUPPORTED plug
+		 * and continue polling
+		 */
+		WCD9XXX_BCL_LOCK(mbhc->resmgr);
+		if (!mbhc->mbhc_cfg->detect_extn_cable) {
+			if (mbhc->current_plug == PLUG_TYPE_HEADPHONE)
+				wcd9xxx_report_plug(mbhc, 0,
+						    SND_JACK_HEADPHONE);
+			else if (mbhc->current_plug == PLUG_TYPE_HEADSET)
+				wcd9xxx_report_plug(mbhc, 0,
+						    SND_JACK_HEADSET);
+		}
+		if (mbhc->current_plug != plug_type)
+			wcd9xxx_report_plug(mbhc, 1,
+					SND_JACK_UNSUPPORTED);
+		WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
+	}
+}
+
+static void wcd9xxx_correct_swch_plug(struct work_struct *work)
+{
+	struct wcd9xxx_mbhc *mbhc;
+	struct snd_soc_codec *codec;
+	enum wcd9xxx_mbhc_plug_type plug_type = PLUG_TYPE_INVALID;
+	unsigned long timeout;
+	int retry = 0, pt_gnd_mic_swap_cnt = 0;
+	int highhph_cnt = 0;
+	bool correction = false;
+	bool current_source_enable;
+	bool wrk_complete = true, highhph = false;
+
+	pr_debug("%s: enter\n", __func__);
+
+	mbhc = container_of(work, struct wcd9xxx_mbhc, correct_plug_swch);
+	codec = mbhc->codec;
+
+	current_source_enable = (((mbhc->mbhc_cfg->cs_enable_flags &
+		      (1 << MBHC_CS_ENABLE_POLLING)) != 0) &&
+		     (!(snd_soc_read(codec,
+				     mbhc->mbhc_bias_regs.ctl_reg) & 0x80)));
+
+	wcd9xxx_onoff_ext_mclk(mbhc, true);
+
+	/*
+	 * Keep override on during entire plug type correction work.
+	 *
+	 * This is okay under the assumption that any switch irqs which use
+	 * MBHC block cancel and sync this work so override is off again
+	 * prior to switch interrupt handler's MBHC block usage.
+	 * Also while this correction work is running, we can guarantee
+	 * DAPM doesn't use any MBHC block as this work only runs with
+	 * headphone detection.
+	 */
+	if (current_source_enable) {
+		WCD9XXX_BCL_LOCK(mbhc->resmgr);
+		wcd9xxx_turn_onoff_current_source(mbhc, &mbhc->mbhc_bias_regs,
+						  true, false);
+		WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
+	} else {
+		wcd9xxx_turn_onoff_override(mbhc, true);
+	}
+
+	timeout = jiffies + msecs_to_jiffies(HS_DETECT_PLUG_TIME_MS);
+	while (!time_after(jiffies, timeout)) {
+		++retry;
+		rmb();
+		if (mbhc->hs_detect_work_stop) {
+			wrk_complete = false;
+			pr_debug("%s: stop requested\n", __func__);
+			break;
+		}
+
+		msleep(HS_DETECT_PLUG_INERVAL_MS);
+		if (wcd9xxx_swch_level_remove(mbhc)) {
+			wrk_complete = false;
+			pr_debug("%s: Switch level is low\n", __func__);
+			break;
+		}
+
+		/* can race with removal interrupt */
+		WCD9XXX_BCL_LOCK(mbhc->resmgr);
+		if (current_source_enable)
+			plug_type = wcd9xxx_codec_cs_get_plug_type(mbhc,
+								   highhph);
+		else
+			plug_type = wcd9xxx_codec_get_plug_type(mbhc, true);
+		WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
+
+		pr_debug("%s: attempt(%d) current_plug(%d) new_plug(%d)\n",
+			 __func__, retry, mbhc->current_plug, plug_type);
+
+		highhph_cnt = (plug_type == PLUG_TYPE_HIGH_HPH) ?
+					(highhph_cnt + 1) :
+					0;
+		highhph = wcd9xxx_mbhc_enable_mb_decision(highhph_cnt);
+		if (plug_type == PLUG_TYPE_INVALID) {
+			pr_debug("Invalid plug in attempt # %d\n", retry);
+			if (!mbhc->mbhc_cfg->detect_extn_cable &&
+			    retry == NUM_ATTEMPTS_TO_REPORT &&
+			    mbhc->current_plug == PLUG_TYPE_NONE) {
+				WCD9XXX_BCL_LOCK(mbhc->resmgr);
+				wcd9xxx_report_plug(mbhc, 1,
+						    SND_JACK_HEADPHONE);
+				WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
+			}
+		} else if (plug_type == PLUG_TYPE_HEADPHONE) {
+			pr_debug("Good headphone detected, continue polling\n");
+			WCD9XXX_BCL_LOCK(mbhc->resmgr);
+			if (mbhc->mbhc_cfg->detect_extn_cable) {
+				if (mbhc->current_plug != plug_type)
+					wcd9xxx_report_plug(mbhc, 1,
+							    SND_JACK_HEADPHONE);
+			} else if (mbhc->current_plug == PLUG_TYPE_NONE) {
+				wcd9xxx_report_plug(mbhc, 1,
+						    SND_JACK_HEADPHONE);
+			}
+			WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
+		} else if (plug_type == PLUG_TYPE_HIGH_HPH) {
+			pr_debug("%s: High HPH detected, continue polling\n",
+				  __func__);
+			WCD9XXX_BCL_LOCK(mbhc->resmgr);
+			if (mbhc->mbhc_cfg->detect_extn_cable) {
+				if (mbhc->current_plug != plug_type)
+					wcd9xxx_report_plug(mbhc, 1,
+							    SND_JACK_LINEOUT);
+			} else if (mbhc->current_plug == PLUG_TYPE_NONE) {
+					wcd9xxx_report_plug(mbhc, 1,
+							    SND_JACK_HEADPHONE);
+			}
+			WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
+		} else {
+			if (plug_type == PLUG_TYPE_GND_MIC_SWAP) {
+				pt_gnd_mic_swap_cnt++;
+				if (pt_gnd_mic_swap_cnt >=
+						GND_MIC_SWAP_THRESHOLD)
+					wcd9xxx_handle_gnd_mic_swap(mbhc,
+							pt_gnd_mic_swap_cnt,
+							plug_type);
+				pr_debug("%s: unsupported HS detected, continue polling\n",
+					 __func__);
+				continue;
+			} else {
+				pt_gnd_mic_swap_cnt = 0;
+
+				WCD9XXX_BCL_LOCK(mbhc->resmgr);
+				/* Turn off override/current source */
+				if (current_source_enable)
+					wcd9xxx_turn_onoff_current_source(mbhc,
+							&mbhc->mbhc_bias_regs,
+							false, false);
+				else
+					wcd9xxx_turn_onoff_override(mbhc,
+								    false);
+				/*
+				 * The valid plug also includes
+				 * PLUG_TYPE_GND_MIC_SWAP
+				 */
+				wcd9xxx_find_plug_and_report(mbhc, plug_type);
+				WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
+				pr_debug("Attempt %d found correct plug %d\n",
+						retry,
+						plug_type);
+				correction = true;
+			}
+			break;
+		}
+	}
+
+	highhph = false;
+	if (wrk_complete && plug_type == PLUG_TYPE_HIGH_HPH) {
+		pr_debug("%s: polling is done, still HPH, so enabling MIC trigger\n",
+			 __func__);
+		WCD9XXX_BCL_LOCK(mbhc->resmgr);
+		wcd9xxx_find_plug_and_report(mbhc, plug_type);
+		highhph = true;
+		WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
+	}
+
+	if (plug_type == PLUG_TYPE_HEADPHONE) {
+		if (mbhc->mbhc_cb && mbhc->mbhc_cb->hph_auto_pulldown_ctrl)
+			mbhc->mbhc_cb->hph_auto_pulldown_ctrl(codec, true);
+	}
+
+	if (!correction && current_source_enable) {
+		WCD9XXX_BCL_LOCK(mbhc->resmgr);
+		wcd9xxx_turn_onoff_current_source(mbhc, &mbhc->mbhc_bias_regs,
+						  false, highhph);
+		WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
+	} else if (!correction) {
+		wcd9xxx_turn_onoff_override(mbhc, false);
+	}
+
+	wcd9xxx_onoff_ext_mclk(mbhc, false);
+
+	if (mbhc->mbhc_cfg->detect_extn_cable) {
+		WCD9XXX_BCL_LOCK(mbhc->resmgr);
+		if ((mbhc->current_plug == PLUG_TYPE_HEADPHONE &&
+		    wrk_complete) ||
+		    mbhc->current_plug == PLUG_TYPE_GND_MIC_SWAP ||
+		    mbhc->current_plug == PLUG_TYPE_INVALID ||
+		    (plug_type == PLUG_TYPE_INVALID && wrk_complete)) {
+			/* Enable removal detection */
+			wcd9xxx_cleanup_hs_polling(mbhc);
+			wcd9xxx_enable_hs_detect(mbhc, 0, 0, false);
+		}
+		WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
+	}
+	pr_debug("%s: leave current_plug(%d)\n", __func__, mbhc->current_plug);
+	/* unlock sleep */
+	wcd9xxx_unlock_sleep(mbhc->resmgr->core_res);
+}
+
+static void wcd9xxx_swch_irq_handler(struct wcd9xxx_mbhc *mbhc)
+{
+	bool insert;
+	bool is_removed = false;
+	struct snd_soc_codec *codec = mbhc->codec;
+
+	pr_debug("%s: enter\n", __func__);
+
+	mbhc->in_swch_irq_handler = true;
+	/* Wait here for debounce time */
+	usleep_range(SWCH_IRQ_DEBOUNCE_TIME_US, SWCH_IRQ_DEBOUNCE_TIME_US +
+					WCD9XXX_USLEEP_RANGE_MARGIN_US);
+
+	WCD9XXX_BCL_LOCK(mbhc->resmgr);
+
+	/* cancel pending button press */
+	if (wcd9xxx_cancel_btn_work(mbhc))
+		pr_debug("%s: button press is canceled\n", __func__);
+
+	insert = !wcd9xxx_swch_level_remove(mbhc);
+	pr_debug("%s: Current plug type %d, insert %d\n", __func__,
+		 mbhc->current_plug, insert);
+	if ((mbhc->current_plug == PLUG_TYPE_NONE) && insert) {
+
+		mbhc->lpi_enabled = false;
+		wmb();
+		/* cancel detect plug */
+		wcd9xxx_cancel_hs_detect_plug(mbhc,
+				      &mbhc->correct_plug_swch);
+
+		if ((mbhc->current_plug != PLUG_TYPE_NONE) &&
+		    (mbhc->current_plug != PLUG_TYPE_HIGH_HPH) &&
+		    !(snd_soc_read(codec, WCD9XXX_A_MBHC_INSERT_DETECT) &
+				   (1 << 1))) {
+			pr_debug("%s: current plug: %d\n", __func__,
+				mbhc->current_plug);
+			goto exit;
+		}
+
+		/* Disable Mic Bias pull down and HPH Switch to GND */
+		snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01,
+				    0x00);
+		snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x01, 0x00);
+		wcd9xxx_mbhc_detect_plug_type(mbhc);
+	} else if ((mbhc->current_plug != PLUG_TYPE_NONE) && !insert) {
+		mbhc->lpi_enabled = false;
+		wmb();
+		/* cancel detect plug */
+		wcd9xxx_cancel_hs_detect_plug(mbhc,
+				      &mbhc->correct_plug_swch);
+
+		if (mbhc->current_plug == PLUG_TYPE_HEADPHONE) {
+			wcd9xxx_report_plug(mbhc, 0, SND_JACK_HEADPHONE);
+			is_removed = true;
+		} else if (mbhc->current_plug == PLUG_TYPE_GND_MIC_SWAP) {
+			wcd9xxx_report_plug(mbhc, 0, SND_JACK_UNSUPPORTED);
+			is_removed = true;
+		} else if (mbhc->current_plug == PLUG_TYPE_HEADSET) {
+			wcd9xxx_pause_hs_polling(mbhc);
+			wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, false);
+			wcd9xxx_cleanup_hs_polling(mbhc);
+			wcd9xxx_report_plug(mbhc, 0, SND_JACK_HEADSET);
+			is_removed = true;
+		} else if (mbhc->current_plug == PLUG_TYPE_HIGH_HPH) {
+			wcd9xxx_report_plug(mbhc, 0, SND_JACK_LINEOUT);
+			is_removed = true;
+		} else if (mbhc->current_plug == PLUG_TYPE_ANC_HEADPHONE) {
+			wcd9xxx_pause_hs_polling(mbhc);
+			wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, false);
+			wcd9xxx_cleanup_hs_polling(mbhc);
+			wcd9xxx_report_plug(mbhc, 0, SND_JACK_ANC_HEADPHONE);
+			is_removed = true;
+		}
+
+		if (is_removed) {
+			snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
+				      0x00);
+			snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
+					    0x02, 0x00);
+
+			/* Enable Mic Bias pull down and HPH Switch to GND */
+			snd_soc_update_bits(codec,
+					mbhc->mbhc_bias_regs.ctl_reg, 0x01,
+					0x01);
+			snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x01,
+					0x01);
+			/* Make sure mic trigger is turned off */
+			snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg,
+					    0x01, 0x01);
+			snd_soc_update_bits(codec,
+					    mbhc->mbhc_bias_regs.mbhc_reg,
+					    0x90, 0x00);
+			/* Reset MBHC State Machine */
+			snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL,
+					    0x08, 0x08);
+			snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL,
+					    0x08, 0x00);
+			/* Turn off override */
+			wcd9xxx_turn_onoff_override(mbhc, false);
+		}
+	}
+exit:
+	mbhc->in_swch_irq_handler = false;
+	WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
+	pr_debug("%s: leave\n", __func__);
+}
+
+static irqreturn_t wcd9xxx_mech_plug_detect_irq(int irq, void *data)
+{
+	int r = IRQ_HANDLED;
+	struct wcd9xxx_mbhc *mbhc = data;
+
+	pr_debug("%s: enter\n", __func__);
+	if (unlikely(wcd9xxx_lock_sleep(mbhc->resmgr->core_res) == false)) {
+		pr_warn("%s: failed to hold suspend\n", __func__);
+		r = IRQ_NONE;
+	} else {
+		/* Call handler */
+		wcd9xxx_swch_irq_handler(mbhc);
+		wcd9xxx_unlock_sleep(mbhc->resmgr->core_res);
+	}
+
+	pr_debug("%s: leave %d\n", __func__, r);
+	return r;
+}
+
+static int wcd9xxx_is_false_press(struct wcd9xxx_mbhc *mbhc)
+{
+	s16 mb_v;
+	int i = 0;
+	int r = 0;
+	const s16 v_ins_hu =
+	    wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_INS_HU);
+	const s16 v_ins_h =
+	    wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_INS_H);
+	const s16 v_b1_hu =
+	    wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_B1_HU);
+	const s16 v_b1_h =
+	    wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_B1_H);
+	const unsigned long timeout =
+	    jiffies + msecs_to_jiffies(BTN_RELEASE_DEBOUNCE_TIME_MS);
+
+	while (time_before(jiffies, timeout)) {
+		/*
+		 * This function needs to run measurements just few times during
+		 * release debounce time.  Make 1ms interval to avoid
+		 * unnecessary excessive measurements.
+		 */
+		usleep_range(1000, 1000 + WCD9XXX_USLEEP_RANGE_MARGIN_US);
+		if (i == 0) {
+			mb_v = wcd9xxx_codec_sta_dce(mbhc, 0, true);
+			pr_debug("%s: STA[0]: %d,%d\n", __func__, mb_v,
+				 wcd9xxx_codec_sta_dce_v(mbhc, 0, mb_v));
+			if (mb_v < v_b1_hu || mb_v > v_ins_hu) {
+				r = 1;
+				break;
+			}
+		} else {
+			mb_v = wcd9xxx_codec_sta_dce(mbhc, 1, true);
+			pr_debug("%s: DCE[%d]: %d,%d\n", __func__, i, mb_v,
+				 wcd9xxx_codec_sta_dce_v(mbhc, 1, mb_v));
+			if (mb_v < v_b1_h || mb_v > v_ins_h) {
+				r = 1;
+				break;
+			}
+		}
+		i++;
+	}
+
+	return r;
+}
+
+/* called under codec_resource_lock acquisition */
+static int wcd9xxx_determine_button(const struct wcd9xxx_mbhc *mbhc,
+				  const s32 micmv)
+{
+	s16 *v_btn_low, *v_btn_high;
+	struct wcd9xxx_mbhc_btn_detect_cfg *btn_det;
+	int i, btn = -1;
+
+	btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(mbhc->mbhc_cfg->calibration);
+	v_btn_low = wcd9xxx_mbhc_cal_btn_det_mp(btn_det,
+						MBHC_BTN_DET_V_BTN_LOW);
+	v_btn_high = wcd9xxx_mbhc_cal_btn_det_mp(btn_det,
+						 MBHC_BTN_DET_V_BTN_HIGH);
+
+	for (i = 0; i < btn_det->num_btn; i++) {
+		if ((v_btn_low[i] <= micmv) && (v_btn_high[i] >= micmv)) {
+			btn = i;
+			break;
+		}
+	}
+
+	if (btn == -1)
+		pr_debug("%s: couldn't find button number for mic mv %d\n",
+			 __func__, micmv);
+
+	return btn;
+}
+
+static int wcd9xxx_get_button_mask(const int btn)
+{
+	int mask = 0;
+	switch (btn) {
+	case 0:
+		mask = SND_JACK_BTN_0;
+		break;
+	case 1:
+		mask = SND_JACK_BTN_1;
+		break;
+	case 2:
+		mask = SND_JACK_BTN_2;
+		break;
+	case 3:
+		mask = SND_JACK_BTN_3;
+		break;
+	case 4:
+		mask = SND_JACK_BTN_4;
+		break;
+	case 5:
+		mask = SND_JACK_BTN_5;
+		break;
+	}
+	return mask;
+}
+
+static void wcd9xxx_get_z(struct wcd9xxx_mbhc *mbhc, s16 *dce_z, s16 *sta_z,
+			  struct mbhc_micbias_regs *micb_regs,
+			  bool norel_detection)
+{
+	s16 reg0, reg1;
+	int change;
+	struct snd_soc_codec *codec = mbhc->codec;
+
+	WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
+	/* Pull down micbias to ground and disconnect vddio switch */
+	reg0 = snd_soc_read(codec, micb_regs->ctl_reg);
+	snd_soc_update_bits(codec, micb_regs->ctl_reg, 0x81, 0x1);
+	reg1 = snd_soc_read(codec, micb_regs->mbhc_reg);
+	snd_soc_update_bits(codec, micb_regs->mbhc_reg, 1 << 7, 0);
+
+	/* Disconnect override from micbias */
+	change = snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL, 1 << 4,
+				     1 << 0);
+	usleep_range(1000, 1000 + 1000);
+	if (sta_z) {
+		*sta_z = wcd9xxx_codec_sta_dce(mbhc, 0, norel_detection);
+		pr_debug("%s: sta_z 0x%x\n", __func__, *sta_z & 0xFFFF);
+	}
+	if (dce_z) {
+		*dce_z = wcd9xxx_codec_sta_dce(mbhc, 1, norel_detection);
+		pr_debug("%s: dce_z 0x%x\n", __func__, *dce_z & 0xFFFF);
+	}
+
+	/* Connect override from micbias */
+	if (change)
+		snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL, 1 << 4,
+				    1 << 4);
+	/* Disable pull down micbias to ground */
+	snd_soc_write(codec, micb_regs->mbhc_reg, reg1);
+	snd_soc_write(codec, micb_regs->ctl_reg, reg0);
+}
+
+/*
+ * This function recalibrates dce_z and sta_z parameters.
+ * No release detection will be false when this function is
+ * used.
+ */
+void wcd9xxx_update_z(struct wcd9xxx_mbhc *mbhc)
+{
+	const u16 sta_z = mbhc->mbhc_data.sta_z;
+	const u16 dce_z = mbhc->mbhc_data.dce_z;
+
+	wcd9xxx_get_z(mbhc, &mbhc->mbhc_data.dce_z, &mbhc->mbhc_data.sta_z,
+		      &mbhc->mbhc_bias_regs, false);
+	pr_debug("%s: sta_z 0x%x,dce_z 0x%x -> sta_z 0x%x,dce_z 0x%x\n",
+		 __func__, sta_z & 0xFFFF, dce_z & 0xFFFF,
+		 mbhc->mbhc_data.sta_z & 0xFFFF,
+		 mbhc->mbhc_data.dce_z & 0xFFFF);
+
+	wcd9xxx_mbhc_calc_thres(mbhc);
+	wcd9xxx_calibrate_hs_polling(mbhc);
+}
+
+/*
+ * wcd9xxx_update_rel_threshold : update mbhc release upper bound threshold
+ *				  to ceilmv + buffer
+ */
+static int wcd9xxx_update_rel_threshold(struct wcd9xxx_mbhc *mbhc, int ceilmv,
+					bool vddio)
+{
+	u16 v_brh, v_b1_hu;
+	int mv;
+	struct wcd9xxx_mbhc_btn_detect_cfg *btn_det;
+	void *calibration = mbhc->mbhc_cfg->calibration;
+	struct snd_soc_codec *codec = mbhc->codec;
+
+	btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(calibration);
+	mv = ceilmv + btn_det->v_btn_press_delta_cic;
+	if (vddio)
+		mv = scale_v_micb_vddio(mbhc, mv, true);
+	pr_debug("%s: reprogram vb1hu/vbrh to %dmv\n", __func__, mv);
+
+	if (mbhc->mbhc_state != MBHC_STATE_POTENTIAL_RECOVERY) {
+		/*
+		 * update LSB first so mbhc hardware block
+		 * doesn't see too low value.
+		 */
+		v_b1_hu = wcd9xxx_codec_v_sta_dce(mbhc, STA, mv, false);
+		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B3_CTL, v_b1_hu &
+				0xFF);
+		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B4_CTL,
+				(v_b1_hu >> 8) & 0xFF);
+		v_brh = wcd9xxx_codec_v_sta_dce(mbhc, DCE, mv, false);
+		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B9_CTL, v_brh &
+				0xFF);
+		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B10_CTL,
+				(v_brh >> 8) & 0xFF);
+	}
+	return 0;
+}
+
+irqreturn_t wcd9xxx_dce_handler(int irq, void *data)
+{
+	int i, mask;
+	bool vddio;
+	u8 mbhc_status;
+	s16 dce_z, sta_z;
+	s32 stamv, stamv_s;
+	s16 *v_btn_high;
+	struct wcd9xxx_mbhc_btn_detect_cfg *btn_det;
+	int btn = -1, meas = 0;
+	struct wcd9xxx_mbhc *mbhc = data;
+	const struct wcd9xxx_mbhc_btn_detect_cfg *d =
+	    WCD9XXX_MBHC_CAL_BTN_DET_PTR(mbhc->mbhc_cfg->calibration);
+	short btnmeas[d->n_btn_meas + 1];
+	short dce[d->n_btn_meas + 1], sta;
+	s32 mv[d->n_btn_meas + 1], mv_s[d->n_btn_meas + 1];
+	struct snd_soc_codec *codec = mbhc->codec;
+	struct wcd9xxx_core_resource *core_res = mbhc->resmgr->core_res;
+	int n_btn_meas = d->n_btn_meas;
+	void *calibration = mbhc->mbhc_cfg->calibration;
+
+	pr_debug("%s: enter\n", __func__);
+
+	WCD9XXX_BCL_LOCK(mbhc->resmgr);
+	mutex_lock(&mbhc->mbhc_lock);
+	mbhc_status = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_STATUS) & 0x3E;
+
+	if (mbhc->mbhc_state == MBHC_STATE_POTENTIAL_RECOVERY) {
+		pr_debug("%s: mbhc is being recovered, skip button press\n",
+			 __func__);
+		goto done;
+	}
+
+	mbhc->mbhc_state = MBHC_STATE_POTENTIAL;
+
+	if (!mbhc->polling_active) {
+		pr_warn("%s: mbhc polling is not active, skip button press\n",
+			__func__);
+		goto done;
+	}
+
+	/* If switch nterrupt already kicked in, ignore button press */
+	if (mbhc->in_swch_irq_handler) {
+		pr_debug("%s: Swtich level changed, ignore button press\n",
+			 __func__);
+		btn = -1;
+		goto done;
+	}
+
+	/*
+	 * setup internal micbias if codec uses internal micbias for
+	 * headset detection
+	 */
+	if (mbhc->mbhc_cfg->use_int_rbias) {
+		if (mbhc->mbhc_cb && mbhc->mbhc_cb->setup_int_rbias)
+			mbhc->mbhc_cb->setup_int_rbias(codec, true);
+		else
+			pr_err("%s: internal bias requested but codec did not provide callback\n",
+				__func__);
+	}
+
+
+	/* Measure scaled HW DCE */
+	vddio = (mbhc->mbhc_data.micb_mv != VDDIO_MICBIAS_MV &&
+		 mbhc->mbhc_micbias_switched);
+
+	dce_z = mbhc->mbhc_data.dce_z;
+	sta_z = mbhc->mbhc_data.sta_z;
+
+	/* Measure scaled HW STA */
+	dce[0] = wcd9xxx_read_dce_result(codec);
+	sta = wcd9xxx_read_sta_result(codec);
+	if (mbhc_status != STATUS_REL_DETECTION) {
+		if (mbhc->mbhc_last_resume &&
+		    !time_after(jiffies, mbhc->mbhc_last_resume + HZ)) {
+			pr_debug("%s: Button is released after resume\n",
+				__func__);
+			n_btn_meas = 0;
+		} else {
+			pr_debug("%s: Button is released without resume",
+				 __func__);
+			if (mbhc->update_z) {
+				wcd9xxx_update_z(mbhc);
+				dce_z = mbhc->mbhc_data.dce_z;
+				sta_z = mbhc->mbhc_data.sta_z;
+				mbhc->update_z = true;
+			}
+			stamv = __wcd9xxx_codec_sta_dce_v(mbhc, 0, sta, sta_z,
+						mbhc->mbhc_data.micb_mv);
+			if (vddio)
+				stamv_s = scale_v_micb_vddio(mbhc, stamv,
+							     false);
+			else
+				stamv_s = stamv;
+			mv[0] = __wcd9xxx_codec_sta_dce_v(mbhc, 1, dce[0],
+					  dce_z, mbhc->mbhc_data.micb_mv);
+			mv_s[0] = vddio ? scale_v_micb_vddio(mbhc, mv[0],
+							     false) : mv[0];
+			btn = wcd9xxx_determine_button(mbhc, mv_s[0]);
+			if (btn != wcd9xxx_determine_button(mbhc, stamv_s))
+				btn = -1;
+			goto done;
+		}
+	}
+
+	for (meas = 1; ((d->n_btn_meas) && (meas < (d->n_btn_meas + 1)));
+	     meas++)
+		dce[meas] = wcd9xxx_codec_sta_dce(mbhc, 1, false);
+
+	if (mbhc->update_z) {
+		wcd9xxx_update_z(mbhc);
+		dce_z = mbhc->mbhc_data.dce_z;
+		sta_z = mbhc->mbhc_data.sta_z;
+		mbhc->update_z = true;
+	}
+
+	stamv = __wcd9xxx_codec_sta_dce_v(mbhc, 0, sta, sta_z,
+					  mbhc->mbhc_data.micb_mv);
+	if (vddio)
+		stamv_s = scale_v_micb_vddio(mbhc, stamv, false);
+	else
+		stamv_s = stamv;
+	pr_debug("%s: Meas HW - STA 0x%x,%d,%d\n", __func__,
+		 sta & 0xFFFF, stamv, stamv_s);
+
+	/* determine pressed button */
+	mv[0] = __wcd9xxx_codec_sta_dce_v(mbhc, 1, dce[0], dce_z,
+					  mbhc->mbhc_data.micb_mv);
+	mv_s[0] = vddio ? scale_v_micb_vddio(mbhc, mv[0], false) : mv[0];
+	btnmeas[0] = wcd9xxx_determine_button(mbhc, mv_s[0]);
+	pr_debug("%s: Meas HW - DCE 0x%x,%d,%d button %d\n", __func__,
+		 dce[0] & 0xFFFF, mv[0], mv_s[0], btnmeas[0]);
+	if (n_btn_meas == 0)
+		btn = btnmeas[0];
+	for (meas = 1; (n_btn_meas && d->n_btn_meas &&
+			(meas < (d->n_btn_meas + 1))); meas++) {
+		mv[meas] = __wcd9xxx_codec_sta_dce_v(mbhc, 1, dce[meas], dce_z,
+						     mbhc->mbhc_data.micb_mv);
+		mv_s[meas] = vddio ? scale_v_micb_vddio(mbhc, mv[meas], false) :
+				     mv[meas];
+		btnmeas[meas] = wcd9xxx_determine_button(mbhc, mv_s[meas]);
+		pr_debug("%s: Meas %d - DCE 0x%x,%d,%d button %d\n",
+			 __func__, meas, dce[meas] & 0xFFFF, mv[meas],
+			 mv_s[meas], btnmeas[meas]);
+		/*
+		 * if large enough measurements are collected,
+		 * start to check if last all n_btn_con measurements were
+		 * in same button low/high range
+		 */
+		if (meas + 1 >= d->n_btn_con) {
+			for (i = 0; i < d->n_btn_con; i++)
+				if ((btnmeas[meas] < 0) ||
+				    (btnmeas[meas] != btnmeas[meas - i]))
+					break;
+			if (i == d->n_btn_con) {
+				/* button pressed */
+				btn = btnmeas[meas];
+				break;
+			} else if ((n_btn_meas - meas) < (d->n_btn_con - 1)) {
+				/*
+				 * if left measurements are less than n_btn_con,
+				 * it's impossible to find button number
+				 */
+				break;
+			}
+		}
+	}
+
+	if (btn >= 0) {
+		if (mbhc->in_swch_irq_handler) {
+			pr_debug(
+			"%s: Switch irq triggered, ignore button press\n",
+			__func__);
+			goto done;
+		}
+		btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(calibration);
+		v_btn_high = wcd9xxx_mbhc_cal_btn_det_mp(btn_det,
+						       MBHC_BTN_DET_V_BTN_HIGH);
+		WARN_ON(btn >= btn_det->num_btn);
+		/* reprogram release threshold to catch voltage ramp up early */
+		wcd9xxx_update_rel_threshold(mbhc, v_btn_high[btn], vddio);
+
+		mask = wcd9xxx_get_button_mask(btn);
+		mbhc->buttons_pressed |= mask;
+		wcd9xxx_lock_sleep(core_res);
+		if (schedule_delayed_work(&mbhc->mbhc_btn_dwork,
+					  msecs_to_jiffies(400)) == 0) {
+			WARN(1, "Button pressed twice without release event\n");
+			wcd9xxx_unlock_sleep(core_res);
+		}
+	} else {
+		pr_debug("%s: bogus button press, too short press?\n",
+			 __func__);
+	}
+
+ done:
+	pr_debug("%s: leave\n", __func__);
+	mutex_unlock(&mbhc->mbhc_lock);
+	WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t wcd9xxx_release_handler(int irq, void *data)
+{
+	int ret;
+	bool waitdebounce = true;
+	struct wcd9xxx_mbhc *mbhc = data;
+
+	pr_debug("%s: enter\n", __func__);
+	WCD9XXX_BCL_LOCK(mbhc->resmgr);
+	mbhc->mbhc_state = MBHC_STATE_RELEASE;
+
+	if (mbhc->buttons_pressed & WCD9XXX_JACK_BUTTON_MASK) {
+		ret = wcd9xxx_cancel_btn_work(mbhc);
+		if (ret == 0) {
+			pr_debug("%s: Reporting long button release event\n",
+				 __func__);
+			wcd9xxx_jack_report(mbhc, &mbhc->button_jack, 0,
+					    mbhc->buttons_pressed);
+		} else {
+			if (wcd9xxx_is_false_press(mbhc)) {
+				pr_debug("%s: Fake button press interrupt\n",
+					 __func__);
+			} else {
+				if (mbhc->in_swch_irq_handler) {
+					pr_debug("%s: Switch irq kicked in, ignore\n",
+						 __func__);
+				} else {
+					pr_debug("%s: Reporting btn press\n",
+						 __func__);
+					wcd9xxx_jack_report(mbhc,
+							 &mbhc->button_jack,
+							 mbhc->buttons_pressed,
+							 mbhc->buttons_pressed);
+					pr_debug("%s: Reporting btn release\n",
+						 __func__);
+					wcd9xxx_jack_report(mbhc,
+						      &mbhc->button_jack,
+						      0, mbhc->buttons_pressed);
+					waitdebounce = false;
+				}
+			}
+		}
+
+		mbhc->buttons_pressed &= ~WCD9XXX_JACK_BUTTON_MASK;
+	}
+
+	wcd9xxx_calibrate_hs_polling(mbhc);
+
+	if (waitdebounce)
+		msleep(SWCH_REL_DEBOUNCE_TIME_MS);
+	wcd9xxx_start_hs_polling(mbhc);
+
+	pr_debug("%s: leave\n", __func__);
+	WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t wcd9xxx_hphl_ocp_irq(int irq, void *data)
+{
+	struct wcd9xxx_mbhc *mbhc = data;
+	struct snd_soc_codec *codec;
+
+	pr_info("%s: received HPHL OCP irq\n", __func__);
+
+	if (mbhc) {
+		codec = mbhc->codec;
+		if ((mbhc->hphlocp_cnt < OCP_ATTEMPT) &&
+		    (!mbhc->hphrocp_cnt)) {
+			pr_info("%s: retry\n", __func__);
+			mbhc->hphlocp_cnt++;
+			snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_OCP_CTL,
+					    0x10, 0x00);
+			snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_OCP_CTL,
+					    0x10, 0x10);
+		} else {
+			wcd9xxx_disable_irq(mbhc->resmgr->core_res,
+					  mbhc->intr_ids->hph_left_ocp);
+			mbhc->hph_status |= SND_JACK_OC_HPHL;
+			wcd9xxx_jack_report(mbhc, &mbhc->headset_jack,
+					    mbhc->hph_status,
+					    WCD9XXX_JACK_MASK);
+		}
+	} else {
+		pr_err("%s: Bad wcd9xxx private data\n", __func__);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t wcd9xxx_hphr_ocp_irq(int irq, void *data)
+{
+	struct wcd9xxx_mbhc *mbhc = data;
+	struct snd_soc_codec *codec;
+
+	pr_info("%s: received HPHR OCP irq\n", __func__);
+	codec = mbhc->codec;
+	if ((mbhc->hphrocp_cnt < OCP_ATTEMPT) &&
+	    (!mbhc->hphlocp_cnt)) {
+		pr_info("%s: retry\n", __func__);
+		mbhc->hphrocp_cnt++;
+		snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_OCP_CTL, 0x10,
+				    0x00);
+		snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_OCP_CTL, 0x10,
+				    0x10);
+	} else {
+		wcd9xxx_disable_irq(mbhc->resmgr->core_res,
+				    mbhc->intr_ids->hph_right_ocp);
+		mbhc->hph_status |= SND_JACK_OC_HPHR;
+		wcd9xxx_jack_report(mbhc, &mbhc->headset_jack,
+				    mbhc->hph_status, WCD9XXX_JACK_MASK);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int wcd9xxx_acdb_mclk_index(const int rate)
+{
+	if (rate == MCLK_RATE_12288KHZ)
+		return 0;
+	else if (rate == MCLK_RATE_9600KHZ)
+		return 1;
+	else {
+		BUG_ON(1);
+		return -EINVAL;
+	}
+}
+
+static void wcd9xxx_update_mbhc_clk_rate(struct wcd9xxx_mbhc *mbhc, u32 rate)
+{
+	u32 dce_wait, sta_wait;
+	u8 ncic, nmeas, navg;
+	void *calibration;
+	u8 *n_cic, *n_ready;
+	struct wcd9xxx_mbhc_btn_detect_cfg *btn_det;
+	u8 npoll = 4, nbounce_wait = 30;
+	struct snd_soc_codec *codec = mbhc->codec;
+	int idx = wcd9xxx_acdb_mclk_index(rate);
+	int idxmclk = wcd9xxx_acdb_mclk_index(mbhc->mbhc_cfg->mclk_rate);
+
+	pr_debug("%s: Updating clock rate dependents, rate = %u\n", __func__,
+		 rate);
+	calibration = mbhc->mbhc_cfg->calibration;
+
+	/*
+	 * First compute the DCE / STA wait times depending on tunable
+	 * parameters. The value is computed in microseconds
+	 */
+	btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(calibration);
+	n_ready = wcd9xxx_mbhc_cal_btn_det_mp(btn_det, MBHC_BTN_DET_N_READY);
+	n_cic = wcd9xxx_mbhc_cal_btn_det_mp(btn_det, MBHC_BTN_DET_N_CIC);
+	nmeas = WCD9XXX_MBHC_CAL_BTN_DET_PTR(calibration)->n_meas;
+	navg = WCD9XXX_MBHC_CAL_GENERAL_PTR(calibration)->mbhc_navg;
+
+	/* ncic stays with the same what we had during calibration */
+	ncic = n_cic[idxmclk];
+	dce_wait = (1000 * 512 * ncic * (nmeas + 1)) / (rate / 1000);
+	sta_wait = (1000 * 128 * (navg + 1)) / (rate / 1000);
+	mbhc->mbhc_data.t_dce = dce_wait;
+	/* give extra margin to sta for safety */
+	mbhc->mbhc_data.t_sta = sta_wait + 250;
+	mbhc->mbhc_data.t_sta_dce = ((1000 * 256) / (rate / 1000) *
+				     n_ready[idx]) + 10;
+
+	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_TIMER_B1_CTL, n_ready[idx]);
+	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_TIMER_B6_CTL, ncic);
+
+	if (rate == MCLK_RATE_12288KHZ) {
+		npoll = 4;
+		nbounce_wait = 30;
+	} else if (rate == MCLK_RATE_9600KHZ) {
+		npoll = 3;
+		nbounce_wait = 23;
+	}
+
+	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_TIMER_B2_CTL, npoll);
+	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_TIMER_B3_CTL, nbounce_wait);
+	pr_debug("%s: leave\n", __func__);
+}
+
+static void wcd9xxx_mbhc_cal(struct wcd9xxx_mbhc *mbhc)
+{
+	u8 cfilt_mode;
+	u16 reg0, reg1, reg2;
+	struct snd_soc_codec *codec = mbhc->codec;
+
+	pr_debug("%s: enter\n", __func__);
+	wcd9xxx_disable_irq(mbhc->resmgr->core_res,
+			    mbhc->intr_ids->dce_est_complete);
+	wcd9xxx_turn_onoff_rel_detection(codec, false);
+
+	/* t_dce and t_sta are updated by wcd9xxx_update_mbhc_clk_rate() */
+	WARN_ON(!mbhc->mbhc_data.t_dce);
+	WARN_ON(!mbhc->mbhc_data.t_sta);
+
+	/*
+	 * LDOH and CFILT are already configured during pdata handling.
+	 * Only need to make sure CFILT and bandgap are in Fast mode.
+	 * Need to restore defaults once calculation is done.
+	 *
+	 * In case when Micbias is powered by external source, request
+	 * turn on the external voltage source for Calibration.
+	 */
+	if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mb_source)
+		mbhc->mbhc_cb->enable_mb_source(codec, true, false);
+
+	cfilt_mode = snd_soc_read(codec, mbhc->mbhc_bias_regs.cfilt_ctl);
+	if (mbhc->mbhc_cb && mbhc->mbhc_cb->cfilt_fast_mode)
+		mbhc->mbhc_cb->cfilt_fast_mode(codec, mbhc);
+	else
+		snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.cfilt_ctl,
+				    0x40, 0x00);
+
+	if (mbhc->mbhc_cb && mbhc->mbhc_cb->micbias_pulldown_ctrl)
+		mbhc->mbhc_cb->micbias_pulldown_ctrl(mbhc, false);
+
+	/*
+	 * Micbias, CFILT, LDOH, MBHC MUX mode settings
+	 * to perform ADC calibration
+	 */
+	if (mbhc->mbhc_cb && mbhc->mbhc_cb->select_cfilt)
+		mbhc->mbhc_cb->select_cfilt(codec, mbhc);
+	else
+	snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x60,
+			    mbhc->mbhc_cfg->micbias << 5);
+	snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01, 0x00);
+	snd_soc_update_bits(codec, WCD9XXX_A_LDO_H_MODE_1, 0x60, 0x60);
+	snd_soc_write(codec, WCD9XXX_A_TX_7_MBHC_TEST_CTL, 0x78);
+	if (mbhc->mbhc_cb && mbhc->mbhc_cb->codec_specific_cal)
+		mbhc->mbhc_cb->codec_specific_cal(codec, mbhc);
+	else
+		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
+				    0x04, 0x04);
+
+	/* Pull down micbias to ground */
+	reg0 = snd_soc_read(codec, mbhc->mbhc_bias_regs.ctl_reg);
+	snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 1, 1);
+	/* Disconnect override from micbias */
+	reg1 = snd_soc_read(codec, WCD9XXX_A_MAD_ANA_CTRL);
+	snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL, 1 << 4, 1 << 0);
+	/* Connect the MUX to micbias */
+	snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x02);
+	if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mux_bias_block)
+		mbhc->mbhc_cb->enable_mux_bias_block(codec);
+	else
+		snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
+				    0x80, 0x80);
+	/*
+	 * Hardware that has external cap can delay mic bias ramping down up
+	 * to 50ms.
+	 */
+	msleep(WCD9XXX_MUX_SWITCH_READY_WAIT_MS);
+	/* DCE measurement for 0 voltage */
+	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x0A);
+	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x02);
+	mbhc->mbhc_data.dce_z = __wcd9xxx_codec_sta_dce(mbhc, 1, true, false);
+
+	/* compute dce_z for current source */
+	reg2 = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_CTL);
+	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x78,
+			    WCD9XXX_MBHC_NSC_CS << 3);
+
+	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x0A);
+	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x02);
+	mbhc->mbhc_data.dce_nsc_cs_z = __wcd9xxx_codec_sta_dce(mbhc, 1, true,
+							       false);
+	pr_debug("%s: dce_z with nsc cs: 0x%x\n", __func__,
+						 mbhc->mbhc_data.dce_nsc_cs_z);
+
+	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, reg2);
+
+	/* STA measurement for 0 voltage */
+	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x0A);
+	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x02);
+	mbhc->mbhc_data.sta_z = __wcd9xxx_codec_sta_dce(mbhc, 0, true, false);
+
+	/* Restore registers */
+	snd_soc_write(codec, mbhc->mbhc_bias_regs.ctl_reg, reg0);
+	snd_soc_write(codec, WCD9XXX_A_MAD_ANA_CTRL, reg1);
+
+	/* DCE measurment for MB voltage */
+	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x0A);
+	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x02);
+	snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x02);
+	if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mux_bias_block)
+		mbhc->mbhc_cb->enable_mux_bias_block(codec);
+	else
+		snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
+				    0x80, 0x80);
+	/*
+	 * Hardware that has external cap can delay mic bias ramping down up
+	 * to 50ms.
+	 */
+	msleep(WCD9XXX_MUX_SWITCH_READY_WAIT_MS);
+	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x04);
+	usleep_range(mbhc->mbhc_data.t_dce, mbhc->mbhc_data.t_dce +
+					WCD9XXX_USLEEP_RANGE_MARGIN_US);
+	mbhc->mbhc_data.dce_mb = wcd9xxx_read_dce_result(codec);
+
+	/* STA Measurement for MB Voltage */
+	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x0A);
+	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x02);
+	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x02);
+	snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x02);
+	if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mux_bias_block)
+		mbhc->mbhc_cb->enable_mux_bias_block(codec);
+	else
+		snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
+				    0x80, 0x80);
+	/*
+	 * Hardware that has external cap can delay mic bias ramping down up
+	 * to 50ms.
+	 */
+	msleep(WCD9XXX_MUX_SWITCH_READY_WAIT_MS);
+	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x02);
+	usleep_range(mbhc->mbhc_data.t_sta, mbhc->mbhc_data.t_sta +
+					WCD9XXX_USLEEP_RANGE_MARGIN_US);
+	mbhc->mbhc_data.sta_mb = wcd9xxx_read_sta_result(codec);
+
+	/* Restore default settings. */
+	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x04, 0x00);
+	snd_soc_write(codec, mbhc->mbhc_bias_regs.cfilt_ctl, cfilt_mode);
+	snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x04);
+	if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mux_bias_block)
+		mbhc->mbhc_cb->enable_mux_bias_block(codec);
+	else
+		snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
+				    0x80, 0x80);
+	usleep_range(100, 110);
+
+	if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mb_source)
+		mbhc->mbhc_cb->enable_mb_source(codec, false, false);
+
+	if (mbhc->mbhc_cb && mbhc->mbhc_cb->micbias_pulldown_ctrl)
+		mbhc->mbhc_cb->micbias_pulldown_ctrl(mbhc, true);
+
+	wcd9xxx_enable_irq(mbhc->resmgr->core_res,
+			   mbhc->intr_ids->dce_est_complete);
+	wcd9xxx_turn_onoff_rel_detection(codec, true);
+
+	pr_debug("%s: leave\n", __func__);
+}
+
+static void wcd9xxx_mbhc_setup(struct wcd9xxx_mbhc *mbhc)
+{
+	int n;
+	u8 *gain;
+	struct wcd9xxx_mbhc_general_cfg *generic;
+	struct wcd9xxx_mbhc_btn_detect_cfg *btn_det;
+	struct snd_soc_codec *codec = mbhc->codec;
+	const int idx = wcd9xxx_acdb_mclk_index(mbhc->mbhc_cfg->mclk_rate);
+
+	pr_debug("%s: enter\n", __func__);
+	generic = WCD9XXX_MBHC_CAL_GENERAL_PTR(mbhc->mbhc_cfg->calibration);
+	btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(mbhc->mbhc_cfg->calibration);
+
+	for (n = 0; n < 8; n++) {
+		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_FIR_B1_CFG,
+				    0x07, n);
+		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_FIR_B2_CFG,
+			      btn_det->c[n]);
+	}
+
+	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B2_CTL, 0x07,
+			    btn_det->nc);
+
+	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_TIMER_B4_CTL, 0x70,
+			    generic->mbhc_nsa << 4);
+
+	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_TIMER_B4_CTL, 0x0F,
+			    btn_det->n_meas);
+
+	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_TIMER_B5_CTL,
+		      generic->mbhc_navg);
+
+	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x80, 0x80);
+
+	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x78,
+			    btn_det->mbhc_nsc << 3);
+
+	if (mbhc->mbhc_cb && mbhc->mbhc_cb->get_cdc_type &&
+			mbhc->mbhc_cb->get_cdc_type() !=
+					WCD9XXX_CDC_TYPE_HELICON) {
+		if (mbhc->resmgr->reg_addr->micb_4_mbhc)
+			snd_soc_update_bits(codec,
+					mbhc->resmgr->reg_addr->micb_4_mbhc,
+					0x03, MBHC_MICBIAS2);
+	}
+
+	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x02, 0x02);
+
+	snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_2, 0xF0, 0xF0);
+
+	gain = wcd9xxx_mbhc_cal_btn_det_mp(btn_det, MBHC_BTN_DET_GAIN);
+	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B2_CTL, 0x78,
+			    gain[idx] << 3);
+	snd_soc_update_bits(codec, WCD9XXX_A_MICB_2_MBHC, 0x04, 0x04);
+
+	pr_debug("%s: leave\n", __func__);
+}
+
+static int wcd9xxx_setup_jack_detect_irq(struct wcd9xxx_mbhc *mbhc)
+{
+	int ret = 0;
+	void *core_res = mbhc->resmgr->core_res;
+
+	if (mbhc->mbhc_cfg->gpio) {
+		ret = request_threaded_irq(mbhc->mbhc_cfg->gpio_irq, NULL,
+					   wcd9xxx_mech_plug_detect_irq,
+					   (IRQF_TRIGGER_RISING |
+					    IRQF_TRIGGER_FALLING),
+					   "headset detect", mbhc);
+		if (ret) {
+			pr_err("%s: Failed to request gpio irq %d\n", __func__,
+			       mbhc->mbhc_cfg->gpio_irq);
+		} else {
+			ret = enable_irq_wake(mbhc->mbhc_cfg->gpio_irq);
+			if (ret)
+				pr_err("%s: Failed to enable wake up irq %d\n",
+				       __func__, mbhc->mbhc_cfg->gpio_irq);
+		}
+	} else if (mbhc->mbhc_cfg->insert_detect) {
+		/* Enable HPHL_10K_SW */
+		snd_soc_update_bits(mbhc->codec, WCD9XXX_A_RX_HPH_OCP_CTL,
+				    1 << 1, 1 << 1);
+
+		ret = wcd9xxx_request_irq(core_res,
+					  mbhc->intr_ids->hs_jack_switch,
+					  wcd9xxx_mech_plug_detect_irq,
+					  "Jack Detect",
+					  mbhc);
+		if (ret)
+			pr_err("%s: Failed to request insert detect irq %d\n",
+				__func__, mbhc->intr_ids->hs_jack_switch);
+	}
+
+	return ret;
+}
+
+static int wcd9xxx_init_and_calibrate(struct wcd9xxx_mbhc *mbhc)
+{
+	int ret = 0;
+	struct snd_soc_codec *codec = mbhc->codec;
+
+	pr_debug("%s: enter\n", __func__);
+
+	/* Enable MCLK during calibration */
+	wcd9xxx_onoff_ext_mclk(mbhc, true);
+	wcd9xxx_mbhc_setup(mbhc);
+	wcd9xxx_mbhc_cal(mbhc);
+	wcd9xxx_mbhc_calc_thres(mbhc);
+	wcd9xxx_onoff_ext_mclk(mbhc, false);
+	wcd9xxx_calibrate_hs_polling(mbhc);
+
+	/* Enable Mic Bias pull down and HPH Switch to GND */
+	snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01, 0x01);
+	snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x01, 0x01);
+	INIT_WORK(&mbhc->correct_plug_swch, wcd9xxx_correct_swch_plug);
+
+	if (!IS_ERR_VALUE(ret)) {
+		snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_OCP_CTL, 0x10,
+				    0x10);
+		wcd9xxx_enable_irq(mbhc->resmgr->core_res,
+				   mbhc->intr_ids->hph_left_ocp);
+		wcd9xxx_enable_irq(mbhc->resmgr->core_res,
+				   mbhc->intr_ids->hph_right_ocp);
+
+		/* Initialize mechanical mbhc */
+		ret = wcd9xxx_setup_jack_detect_irq(mbhc);
+
+		if (!ret && mbhc->mbhc_cfg->gpio) {
+			/* Requested with IRQF_DISABLED */
+			enable_irq(mbhc->mbhc_cfg->gpio_irq);
+
+			/* Bootup time detection */
+			wcd9xxx_swch_irq_handler(mbhc);
+		} else if (!ret && mbhc->mbhc_cfg->insert_detect) {
+			pr_debug("%s: Setting up codec own insert detection\n",
+				 __func__);
+			/* Setup for insertion detection */
+			wcd9xxx_insert_detect_setup(mbhc, true);
+		}
+	}
+
+	pr_debug("%s: leave\n", __func__);
+
+	return ret;
+}
+
+static void wcd9xxx_mbhc_fw_read(struct work_struct *work)
+{
+	struct delayed_work *dwork;
+	struct wcd9xxx_mbhc *mbhc;
+	struct snd_soc_codec *codec;
+	const struct firmware *fw;
+	struct firmware_cal *fw_data = NULL;
+	int ret = -1, retry = 0;
+	bool use_default_cal = false;
+
+	dwork = to_delayed_work(work);
+	mbhc = container_of(dwork, struct wcd9xxx_mbhc, mbhc_firmware_dwork);
+	codec = mbhc->codec;
+
+	while (retry < FW_READ_ATTEMPTS) {
+		retry++;
+		pr_info("%s:Attempt %d to request MBHC firmware\n",
+				__func__, retry);
+		if (mbhc->mbhc_cb->get_hwdep_fw_cal)
+			fw_data = mbhc->mbhc_cb->get_hwdep_fw_cal(codec,
+					WCD9XXX_MBHC_CAL);
+		if (!fw_data)
+			ret = request_firmware(&fw, "wcd9320/wcd9320_mbhc.bin",
+					codec->dev);
+		/*
+		 * if request_firmware and hwdep cal both fail then
+		 * retry for few times before bailing out
+		 */
+		if ((ret != 0) && !fw_data) {
+			usleep_range(FW_READ_TIMEOUT, FW_READ_TIMEOUT +
+					WCD9XXX_USLEEP_RANGE_MARGIN_US);
+		} else {
+			pr_info("%s: MBHC Firmware read succesful\n",
+					__func__);
+			break;
+		}
+	}
+	if (!fw_data)
+		pr_info("%s: using request_firmware\n", __func__);
+	else
+		pr_info("%s: using hwdep cal\n", __func__);
+	if (ret != 0 && !fw_data) {
+		pr_err("%s: Cannot load MBHC firmware use default cal\n",
+				__func__);
+		use_default_cal = true;
+	}
+	if (!use_default_cal) {
+		const void *data;
+		size_t size;
+
+		if (fw_data) {
+			data = fw_data->data;
+			size = fw_data->size;
+		} else {
+			data = fw->data;
+			size = fw->size;
+		}
+		if (wcd9xxx_mbhc_fw_validate(data, size) == false) {
+			pr_err("%s: Invalid MBHC cal data size use default cal\n",
+			       __func__);
+			if (!fw_data)
+				release_firmware(fw);
+		} else {
+			if (fw_data) {
+				mbhc->mbhc_cfg->calibration =
+						(void *)fw_data->data;
+				mbhc->mbhc_cal = fw_data;
+			} else {
+				mbhc->mbhc_cfg->calibration =
+						(void *)fw->data;
+				mbhc->mbhc_fw = fw;
+			}
+		}
+	}
+
+	(void) wcd9xxx_init_and_calibrate(mbhc);
+}
+
+#ifdef CONFIG_DEBUG_FS
+ssize_t codec_mbhc_debug_read(struct file *file, char __user *buf,
+			      size_t count, loff_t *pos)
+{
+	const int size = 768;
+	char buffer[size];
+	int n = 0;
+	struct wcd9xxx_mbhc *mbhc = file->private_data;
+	const struct mbhc_internal_cal_data *p = &mbhc->mbhc_data;
+	const s16 v_ins_hu =
+	    wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_INS_HU);
+	const s16 v_ins_h =
+	    wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_INS_H);
+	const s16 v_b1_hu =
+	    wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_B1_HU);
+	const s16 v_b1_h =
+	    wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_B1_H);
+	const s16 v_br_h =
+	    wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_BR_H);
+
+	n = scnprintf(buffer, size - n, "dce_z = %x(%dmv)\n",
+		      p->dce_z, wcd9xxx_codec_sta_dce_v(mbhc, 1, p->dce_z));
+	n += scnprintf(buffer + n, size - n, "dce_mb = %x(%dmv)\n",
+		       p->dce_mb, wcd9xxx_codec_sta_dce_v(mbhc, 1, p->dce_mb));
+	n += scnprintf(buffer + n, size - n, "dce_nsc_cs_z = %x(%dmv)\n",
+		      p->dce_nsc_cs_z,
+		      __wcd9xxx_codec_sta_dce_v(mbhc, 1, p->dce_nsc_cs_z,
+						p->dce_nsc_cs_z,
+						VDDIO_MICBIAS_MV));
+	n += scnprintf(buffer + n, size - n, "sta_z = %x(%dmv)\n",
+		       p->sta_z, wcd9xxx_codec_sta_dce_v(mbhc, 0, p->sta_z));
+	n += scnprintf(buffer + n, size - n, "sta_mb = %x(%dmv)\n",
+		       p->sta_mb, wcd9xxx_codec_sta_dce_v(mbhc, 0, p->sta_mb));
+	n += scnprintf(buffer + n, size - n, "t_dce = %d\n",  p->t_dce);
+	n += scnprintf(buffer + n, size - n, "t_sta = %d\n",  p->t_sta);
+	n += scnprintf(buffer + n, size - n, "micb_mv = %dmv\n", p->micb_mv);
+	n += scnprintf(buffer + n, size - n, "v_ins_hu = %x(%dmv)\n",
+		       v_ins_hu, wcd9xxx_codec_sta_dce_v(mbhc, 0, v_ins_hu));
+	n += scnprintf(buffer + n, size - n, "v_ins_h = %x(%dmv)\n",
+		       v_ins_h, wcd9xxx_codec_sta_dce_v(mbhc, 1, v_ins_h));
+	n += scnprintf(buffer + n, size - n, "v_b1_hu = %x(%dmv)\n",
+		       v_b1_hu, wcd9xxx_codec_sta_dce_v(mbhc, 0, v_b1_hu));
+	n += scnprintf(buffer + n, size - n, "v_b1_h = %x(%dmv)\n",
+		       v_b1_h, wcd9xxx_codec_sta_dce_v(mbhc, 1, v_b1_h));
+	n += scnprintf(buffer + n, size - n, "v_brh = %x(%dmv)\n",
+		       v_br_h, wcd9xxx_codec_sta_dce_v(mbhc, 1, v_br_h));
+	n += scnprintf(buffer + n, size - n, "v_brl = %x(%dmv)\n",  p->v_brl,
+		       wcd9xxx_codec_sta_dce_v(mbhc, 0, p->v_brl));
+	n += scnprintf(buffer + n, size - n, "v_no_mic = %x(%dmv)\n",
+		       p->v_no_mic,
+		       wcd9xxx_codec_sta_dce_v(mbhc, 0, p->v_no_mic));
+	n += scnprintf(buffer + n, size - n, "v_inval_ins_low = %d\n",
+		       p->v_inval_ins_low);
+	n += scnprintf(buffer + n, size - n, "v_inval_ins_high = %d\n",
+		       p->v_inval_ins_high);
+	n += scnprintf(buffer + n, size - n, "Insert detect insert = %d\n",
+		       !wcd9xxx_swch_level_remove(mbhc));
+	buffer[n] = 0;
+
+	return simple_read_from_buffer(buf, count, pos, buffer, n);
+}
+
+static int codec_debug_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t codec_debug_write(struct file *filp,
+				 const char __user *ubuf, size_t cnt,
+				 loff_t *ppos)
+{
+	char lbuf[32];
+	char *buf;
+	int rc;
+	struct wcd9xxx_mbhc *mbhc = filp->private_data;
+
+	if (cnt > sizeof(lbuf) - 1)
+		return -EINVAL;
+
+	rc = copy_from_user(lbuf, ubuf, cnt);
+	if (rc)
+		return -EFAULT;
+
+	lbuf[cnt] = '\0';
+	buf = (char *)lbuf;
+	mbhc->no_mic_headset_override = (*strsep(&buf, " ") == '0') ?
+					     false : true;
+	return rc;
+}
+
+static const struct file_operations mbhc_trrs_debug_ops = {
+	.open = codec_debug_open,
+	.write = codec_debug_write,
+};
+
+static const struct file_operations mbhc_debug_ops = {
+	.open = codec_debug_open,
+	.read = codec_mbhc_debug_read,
+};
+
+static void wcd9xxx_init_debugfs(struct wcd9xxx_mbhc *mbhc)
+{
+	mbhc->debugfs_poke =
+	    debugfs_create_file("TRRS", S_IFREG | S_IRUGO, NULL, mbhc,
+				&mbhc_trrs_debug_ops);
+	mbhc->debugfs_mbhc =
+	    debugfs_create_file("wcd9xxx_mbhc", S_IFREG | S_IRUGO,
+				NULL, mbhc, &mbhc_debug_ops);
+}
+
+static void wcd9xxx_cleanup_debugfs(struct wcd9xxx_mbhc *mbhc)
+{
+	debugfs_remove(mbhc->debugfs_poke);
+	debugfs_remove(mbhc->debugfs_mbhc);
+}
+#else
+static void wcd9xxx_init_debugfs(struct wcd9xxx_mbhc *mbhc)
+{
+}
+
+static void wcd9xxx_cleanup_debugfs(struct wcd9xxx_mbhc *mbhc)
+{
+}
+#endif
+
+int wcd9xxx_mbhc_set_keycode(struct wcd9xxx_mbhc *mbhc)
+{
+	enum snd_jack_types type = SND_JACK_BTN_0;
+	int i, ret, result = 0;
+	int *btn_key_code;
+
+	btn_key_code = mbhc->mbhc_cfg->key_code;
+
+	for (i = 0 ; i < 8 ; i++) {
+		if (btn_key_code[i] != 0) {
+			switch (i) {
+			case 0:
+				type = SND_JACK_BTN_0;
+				break;
+			case 1:
+				type = SND_JACK_BTN_1;
+				break;
+			case 2:
+				type = SND_JACK_BTN_2;
+				break;
+			case 3:
+				type = SND_JACK_BTN_3;
+				break;
+			case 4:
+				type = SND_JACK_BTN_4;
+				break;
+			case 5:
+				type = SND_JACK_BTN_5;
+				break;
+			default:
+				WARN_ONCE(1, "Wrong button number:%d\n", i);
+				result = -1;
+				break;
+			}
+			ret = snd_jack_set_key(mbhc->button_jack.jack,
+					       type,
+					       btn_key_code[i]);
+			if (ret) {
+				pr_err("%s: Failed to set code for %d\n",
+					__func__, btn_key_code[i]);
+				result = -1;
+			}
+			input_set_capability(
+				mbhc->button_jack.jack->input_dev,
+				EV_KEY, btn_key_code[i]);
+			pr_debug("%s: set btn%d key code:%d\n", __func__,
+				i, btn_key_code[i]);
+		}
+	}
+	return result;
+}
+
+int wcd9xxx_mbhc_start(struct wcd9xxx_mbhc *mbhc,
+		       struct wcd9xxx_mbhc_config *mbhc_cfg)
+{
+	int rc = 0;
+	struct snd_soc_codec *codec = mbhc->codec;
+
+	pr_debug("%s: enter\n", __func__);
+
+	if (!codec) {
+		pr_err("%s: no codec\n", __func__);
+		return -EINVAL;
+	}
+
+	if (mbhc_cfg->mclk_rate != MCLK_RATE_12288KHZ &&
+	    mbhc_cfg->mclk_rate != MCLK_RATE_9600KHZ) {
+		pr_err("Error: unsupported clock rate %d\n",
+		       mbhc_cfg->mclk_rate);
+		return -EINVAL;
+	}
+
+	/* Save mbhc config */
+	mbhc->mbhc_cfg = mbhc_cfg;
+
+	/* Set btn key code */
+	if (wcd9xxx_mbhc_set_keycode(mbhc))
+		pr_err("Set btn key code error!!!\n");
+
+	/* Get HW specific mbhc registers' address */
+	wcd9xxx_get_mbhc_micbias_regs(mbhc, MBHC_PRIMARY_MIC_MB);
+
+	/* Get HW specific mbhc registers' address for anc */
+	wcd9xxx_get_mbhc_micbias_regs(mbhc, MBHC_ANC_MIC_MB);
+
+	/* Put CFILT in fast mode by default */
+	if (mbhc->mbhc_cb && mbhc->mbhc_cb->cfilt_fast_mode)
+		mbhc->mbhc_cb->cfilt_fast_mode(codec, mbhc);
+	else
+		snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.cfilt_ctl,
+		0x40, WCD9XXX_CFILT_FAST_MODE);
+
+	/*
+	 * setup internal micbias if codec uses internal micbias for
+	 * headset detection
+	 */
+	if (mbhc->mbhc_cfg->use_int_rbias) {
+		if (mbhc->mbhc_cb && mbhc->mbhc_cb->setup_int_rbias) {
+			mbhc->mbhc_cb->setup_int_rbias(codec, true);
+		} else {
+			pr_info("%s: internal bias requested but codec did not provide callback\n",
+				__func__);
+		}
+	}
+
+	/*
+	 * If codec has specific clock gating for MBHC,
+	 * remove the clock gate
+	 */
+	if (mbhc->mbhc_cb &&
+			mbhc->mbhc_cb->enable_clock_gate)
+		mbhc->mbhc_cb->enable_clock_gate(mbhc->codec, true);
+
+	if (!mbhc->mbhc_cfg->read_fw_bin ||
+	    (mbhc->mbhc_cfg->read_fw_bin && mbhc->mbhc_fw) ||
+	    (mbhc->mbhc_cfg->read_fw_bin && mbhc->mbhc_cal)) {
+		rc = wcd9xxx_init_and_calibrate(mbhc);
+	} else {
+		if (!mbhc->mbhc_fw || !mbhc->mbhc_cal)
+			schedule_delayed_work(&mbhc->mbhc_firmware_dwork,
+					     usecs_to_jiffies(FW_READ_TIMEOUT));
+		else
+			pr_debug("%s: Skipping to read mbhc fw, 0x%pK %pK\n",
+				 __func__, mbhc->mbhc_fw, mbhc->mbhc_cal);
+	}
+
+	pr_debug("%s: leave %d\n", __func__, rc);
+	return rc;
+}
+EXPORT_SYMBOL(wcd9xxx_mbhc_start);
+
+void wcd9xxx_mbhc_stop(struct wcd9xxx_mbhc *mbhc)
+{
+	if (mbhc->mbhc_fw || mbhc->mbhc_cal) {
+		cancel_delayed_work_sync(&mbhc->mbhc_firmware_dwork);
+		if (!mbhc->mbhc_cal)
+			release_firmware(mbhc->mbhc_fw);
+		mbhc->mbhc_fw = NULL;
+		mbhc->mbhc_cal = NULL;
+	}
+}
+EXPORT_SYMBOL(wcd9xxx_mbhc_stop);
+
+static enum wcd9xxx_micbias_num
+wcd9xxx_event_to_micbias(const enum wcd9xxx_notify_event event)
+{
+	enum wcd9xxx_micbias_num ret;
+	switch (event) {
+	case WCD9XXX_EVENT_PRE_MICBIAS_1_ON:
+	case WCD9XXX_EVENT_PRE_MICBIAS_1_OFF:
+	case WCD9XXX_EVENT_POST_MICBIAS_1_ON:
+	case WCD9XXX_EVENT_POST_MICBIAS_1_OFF:
+		ret = MBHC_MICBIAS1;
+		break;
+	case WCD9XXX_EVENT_PRE_MICBIAS_2_ON:
+	case WCD9XXX_EVENT_PRE_MICBIAS_2_OFF:
+	case WCD9XXX_EVENT_POST_MICBIAS_2_ON:
+	case WCD9XXX_EVENT_POST_MICBIAS_2_OFF:
+		ret = MBHC_MICBIAS2;
+		break;
+	case WCD9XXX_EVENT_PRE_MICBIAS_3_ON:
+	case WCD9XXX_EVENT_PRE_MICBIAS_3_OFF:
+	case WCD9XXX_EVENT_POST_MICBIAS_3_ON:
+	case WCD9XXX_EVENT_POST_MICBIAS_3_OFF:
+		ret = MBHC_MICBIAS3;
+		break;
+	case WCD9XXX_EVENT_PRE_MICBIAS_4_ON:
+	case WCD9XXX_EVENT_PRE_MICBIAS_4_OFF:
+	case WCD9XXX_EVENT_POST_MICBIAS_4_ON:
+	case WCD9XXX_EVENT_POST_MICBIAS_4_OFF:
+		ret = MBHC_MICBIAS4;
+		break;
+	default:
+		WARN_ONCE(1, "Cannot convert event %d to micbias\n", event);
+		ret = MBHC_MICBIAS_INVALID;
+		break;
+	}
+	return ret;
+}
+
+static int wcd9xxx_event_to_cfilt(const enum wcd9xxx_notify_event event)
+{
+	int ret;
+	switch (event) {
+	case WCD9XXX_EVENT_PRE_CFILT_1_OFF:
+	case WCD9XXX_EVENT_POST_CFILT_1_OFF:
+	case WCD9XXX_EVENT_PRE_CFILT_1_ON:
+	case WCD9XXX_EVENT_POST_CFILT_1_ON:
+		ret = WCD9XXX_CFILT1_SEL;
+		break;
+	case WCD9XXX_EVENT_PRE_CFILT_2_OFF:
+	case WCD9XXX_EVENT_POST_CFILT_2_OFF:
+	case WCD9XXX_EVENT_PRE_CFILT_2_ON:
+	case WCD9XXX_EVENT_POST_CFILT_2_ON:
+		ret = WCD9XXX_CFILT2_SEL;
+		break;
+	case WCD9XXX_EVENT_PRE_CFILT_3_OFF:
+	case WCD9XXX_EVENT_POST_CFILT_3_OFF:
+	case WCD9XXX_EVENT_PRE_CFILT_3_ON:
+	case WCD9XXX_EVENT_POST_CFILT_3_ON:
+		ret = WCD9XXX_CFILT3_SEL;
+		break;
+	default:
+		ret = -1;
+	}
+	return ret;
+}
+
+static int wcd9xxx_get_mbhc_cfilt_sel(struct wcd9xxx_mbhc *mbhc)
+{
+	int cfilt;
+	const struct wcd9xxx_micbias_setting *mb_pdata =
+		mbhc->resmgr->micbias_pdata;
+
+	switch (mbhc->mbhc_cfg->micbias) {
+	case MBHC_MICBIAS1:
+		cfilt = mb_pdata->bias1_cfilt_sel;
+		break;
+	case MBHC_MICBIAS2:
+		cfilt = mb_pdata->bias2_cfilt_sel;
+		break;
+	case MBHC_MICBIAS3:
+		cfilt = mb_pdata->bias3_cfilt_sel;
+		break;
+	case MBHC_MICBIAS4:
+		cfilt = mb_pdata->bias4_cfilt_sel;
+		break;
+	default:
+		cfilt = MBHC_MICBIAS_INVALID;
+		break;
+	}
+	return cfilt;
+}
+
+static void wcd9xxx_enable_mbhc_txfe(struct wcd9xxx_mbhc *mbhc, bool on)
+{
+	if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mbhc_txfe)
+		mbhc->mbhc_cb->enable_mbhc_txfe(mbhc->codec, on);
+	else
+		snd_soc_update_bits(mbhc->codec, WCD9XXX_A_TX_7_MBHC_TEST_CTL,
+				    0x40, on ? 0x40 : 0x00);
+}
+
+static int wcd9xxx_event_notify(struct notifier_block *self, unsigned long val,
+				void *data)
+{
+	int ret = 0;
+	struct wcd9xxx_mbhc *mbhc = ((struct wcd9xxx_resmgr *)data)->mbhc;
+	struct snd_soc_codec *codec;
+	enum wcd9xxx_notify_event event = (enum wcd9xxx_notify_event)val;
+
+	pr_debug("%s: enter event %s(%d)\n", __func__,
+		 wcd9xxx_get_event_string(event), event);
+
+	if (!mbhc || !mbhc->mbhc_cfg) {
+		pr_debug("mbhc not initialized\n");
+		return 0;
+	}
+	codec = mbhc->codec;
+	mutex_lock(&mbhc->mbhc_lock);
+	switch (event) {
+	/* MICBIAS usage change */
+	case WCD9XXX_EVENT_PRE_MICBIAS_1_ON:
+	case WCD9XXX_EVENT_PRE_MICBIAS_2_ON:
+	case WCD9XXX_EVENT_PRE_MICBIAS_3_ON:
+	case WCD9XXX_EVENT_PRE_MICBIAS_4_ON:
+		if (mbhc->mbhc_cfg && mbhc->mbhc_cfg->micbias ==
+		    wcd9xxx_event_to_micbias(event)) {
+			wcd9xxx_switch_micbias(mbhc, 0);
+			/*
+			 * Enable MBHC TxFE whenever  micbias is
+			 * turned ON and polling is active
+			 */
+			if (mbhc->polling_active)
+				wcd9xxx_enable_mbhc_txfe(mbhc, true);
+		}
+		break;
+	case WCD9XXX_EVENT_POST_MICBIAS_1_ON:
+	case WCD9XXX_EVENT_POST_MICBIAS_2_ON:
+	case WCD9XXX_EVENT_POST_MICBIAS_3_ON:
+	case WCD9XXX_EVENT_POST_MICBIAS_4_ON:
+		if (mbhc->mbhc_cfg && mbhc->mbhc_cfg->micbias ==
+		    wcd9xxx_event_to_micbias(event) &&
+		    wcd9xxx_mbhc_polling(mbhc)) {
+			/* if polling is on, restart it */
+			wcd9xxx_pause_hs_polling(mbhc);
+			wcd9xxx_start_hs_polling(mbhc);
+		}
+		break;
+	case WCD9XXX_EVENT_POST_MICBIAS_1_OFF:
+	case WCD9XXX_EVENT_POST_MICBIAS_2_OFF:
+	case WCD9XXX_EVENT_POST_MICBIAS_3_OFF:
+	case WCD9XXX_EVENT_POST_MICBIAS_4_OFF:
+		if (mbhc->mbhc_cfg && mbhc->mbhc_cfg->micbias ==
+		    wcd9xxx_event_to_micbias(event)) {
+			if (mbhc->event_state &
+			   (1 << MBHC_EVENT_PA_HPHL | 1 << MBHC_EVENT_PA_HPHR))
+				wcd9xxx_switch_micbias(mbhc, 1);
+			/*
+			 * Disable MBHC TxFE, in case it was enabled earlier
+			 * when micbias was enabled and polling is not active.
+			 */
+			if (!mbhc->polling_active)
+				wcd9xxx_enable_mbhc_txfe(mbhc, false);
+		}
+		if (mbhc->micbias_enable && mbhc->polling_active &&
+		    !(snd_soc_read(mbhc->codec, mbhc->mbhc_bias_regs.ctl_reg)
+		    & 0x80)) {
+			pr_debug("%s:Micbias turned off by recording, set up again",
+				 __func__);
+			snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg,
+					    0x80, 0x80);
+		}
+		break;
+	/* PA usage change */
+	case WCD9XXX_EVENT_PRE_HPHL_PA_ON:
+		set_bit(MBHC_EVENT_PA_HPHL, &mbhc->event_state);
+		if (!(snd_soc_read(codec, mbhc->mbhc_bias_regs.ctl_reg) & 0x80))
+			/* if micbias is not enabled, switch to vddio */
+			wcd9xxx_switch_micbias(mbhc, 1);
+		break;
+	case WCD9XXX_EVENT_PRE_HPHR_PA_ON:
+		set_bit(MBHC_EVENT_PA_HPHR, &mbhc->event_state);
+		break;
+	case WCD9XXX_EVENT_POST_HPHL_PA_OFF:
+		clear_bit(MBHC_EVENT_PA_HPHL, &mbhc->event_state);
+		/* if HPH PAs are off, report OCP and switch back to CFILT */
+		clear_bit(WCD9XXX_HPHL_PA_OFF_ACK, &mbhc->hph_pa_dac_state);
+		clear_bit(WCD9XXX_HPHL_DAC_OFF_ACK, &mbhc->hph_pa_dac_state);
+		if (mbhc->hph_status & SND_JACK_OC_HPHL)
+			hphlocp_off_report(mbhc, SND_JACK_OC_HPHL);
+		if (!(mbhc->event_state &
+		      (1 << MBHC_EVENT_PA_HPHL | 1 << MBHC_EVENT_PA_HPHR |
+		       1 << MBHC_EVENT_PRE_TX_3_ON)))
+			wcd9xxx_switch_micbias(mbhc, 0);
+		break;
+	case WCD9XXX_EVENT_POST_HPHR_PA_OFF:
+		clear_bit(MBHC_EVENT_PA_HPHR, &mbhc->event_state);
+		/* if HPH PAs are off, report OCP and switch back to CFILT */
+		clear_bit(WCD9XXX_HPHR_PA_OFF_ACK, &mbhc->hph_pa_dac_state);
+		clear_bit(WCD9XXX_HPHR_DAC_OFF_ACK, &mbhc->hph_pa_dac_state);
+		if (mbhc->hph_status & SND_JACK_OC_HPHR)
+			hphrocp_off_report(mbhc, SND_JACK_OC_HPHL);
+		if (!(mbhc->event_state &
+		      (1 << MBHC_EVENT_PA_HPHL | 1 << MBHC_EVENT_PA_HPHR |
+		       1 << MBHC_EVENT_PRE_TX_3_ON)))
+			wcd9xxx_switch_micbias(mbhc, 0);
+		break;
+	/* Clock usage change */
+	case WCD9XXX_EVENT_PRE_MCLK_ON:
+		break;
+	case WCD9XXX_EVENT_POST_MCLK_ON:
+		/* Change to lower TxAAF frequency */
+		snd_soc_update_bits(codec, WCD9XXX_A_TX_COM_BIAS, 1 << 4,
+				    1 << 4);
+		/* Re-calibrate clock rate dependent values */
+		wcd9xxx_update_mbhc_clk_rate(mbhc, mbhc->mbhc_cfg->mclk_rate);
+		/* If clock source changes, stop and restart polling */
+		if (wcd9xxx_mbhc_polling(mbhc)) {
+			wcd9xxx_calibrate_hs_polling(mbhc);
+			wcd9xxx_start_hs_polling(mbhc);
+		}
+		break;
+	case WCD9XXX_EVENT_PRE_MCLK_OFF:
+		/* If clock source changes, stop and restart polling */
+		if (wcd9xxx_mbhc_polling(mbhc))
+			wcd9xxx_pause_hs_polling(mbhc);
+		break;
+	case WCD9XXX_EVENT_POST_MCLK_OFF:
+		break;
+	case WCD9XXX_EVENT_PRE_RCO_ON:
+		break;
+	case WCD9XXX_EVENT_POST_RCO_ON:
+		/* Change to higher TxAAF frequency */
+		snd_soc_update_bits(codec, WCD9XXX_A_TX_COM_BIAS, 1 << 4,
+				    0 << 4);
+		/* Re-calibrate clock rate dependent values */
+		wcd9xxx_update_mbhc_clk_rate(mbhc, mbhc->rco_clk_rate);
+		/* If clock source changes, stop and restart polling */
+		if (wcd9xxx_mbhc_polling(mbhc)) {
+			wcd9xxx_calibrate_hs_polling(mbhc);
+			wcd9xxx_start_hs_polling(mbhc);
+		}
+		break;
+	case WCD9XXX_EVENT_PRE_RCO_OFF:
+		/* If clock source changes, stop and restart polling */
+		if (wcd9xxx_mbhc_polling(mbhc))
+			wcd9xxx_pause_hs_polling(mbhc);
+		break;
+	case WCD9XXX_EVENT_POST_RCO_OFF:
+		break;
+	/* CFILT usage change */
+	case WCD9XXX_EVENT_PRE_CFILT_1_ON:
+	case WCD9XXX_EVENT_PRE_CFILT_2_ON:
+	case WCD9XXX_EVENT_PRE_CFILT_3_ON:
+		if (wcd9xxx_get_mbhc_cfilt_sel(mbhc) ==
+		    wcd9xxx_event_to_cfilt(event))
+			/*
+			 * Switch CFILT to slow mode if MBHC CFILT is being
+			 * used.
+			 */
+			wcd9xxx_codec_switch_cfilt_mode(mbhc, false);
+		break;
+	case WCD9XXX_EVENT_POST_CFILT_1_OFF:
+	case WCD9XXX_EVENT_POST_CFILT_2_OFF:
+	case WCD9XXX_EVENT_POST_CFILT_3_OFF:
+		if (wcd9xxx_get_mbhc_cfilt_sel(mbhc) ==
+		    wcd9xxx_event_to_cfilt(event))
+			/*
+			 * Switch CFILT to fast mode if MBHC CFILT is not
+			 * used anymore.
+			 */
+			wcd9xxx_codec_switch_cfilt_mode(mbhc, true);
+		break;
+	/* System resume */
+	case WCD9XXX_EVENT_POST_RESUME:
+		mbhc->mbhc_last_resume = jiffies;
+		break;
+	/* BG mode chage */
+	case WCD9XXX_EVENT_PRE_BG_OFF:
+	case WCD9XXX_EVENT_POST_BG_OFF:
+	case WCD9XXX_EVENT_PRE_BG_AUDIO_ON:
+	case WCD9XXX_EVENT_POST_BG_AUDIO_ON:
+	case WCD9XXX_EVENT_PRE_BG_MBHC_ON:
+	case WCD9XXX_EVENT_POST_BG_MBHC_ON:
+		/* Not used for now */
+		break;
+	case WCD9XXX_EVENT_PRE_TX_3_ON:
+		/*
+		 * if polling is ON, mbhc micbias not enabled
+		 *  switch micbias source to VDDIO
+		 */
+		set_bit(MBHC_EVENT_PRE_TX_3_ON, &mbhc->event_state);
+		if (!(snd_soc_read(codec, mbhc->mbhc_bias_regs.ctl_reg)
+		      & 0x80) &&
+		    mbhc->polling_active && !mbhc->mbhc_micbias_switched)
+			wcd9xxx_switch_micbias(mbhc, 1);
+		break;
+	case WCD9XXX_EVENT_POST_TX_3_OFF:
+		/*
+		 * Switch back to micbias if HPH PA or TX3 path
+		 * is disabled
+		 */
+		clear_bit(MBHC_EVENT_PRE_TX_3_ON, &mbhc->event_state);
+		if (mbhc->polling_active && mbhc->mbhc_micbias_switched &&
+		    !(mbhc->event_state & (1 << MBHC_EVENT_PA_HPHL |
+		      1 << MBHC_EVENT_PA_HPHR)))
+			wcd9xxx_switch_micbias(mbhc, 0);
+		break;
+	default:
+		WARN(1, "Unknown event %d\n", event);
+		ret = -EINVAL;
+	}
+	mutex_unlock(&mbhc->mbhc_lock);
+
+	pr_debug("%s: leave\n", __func__);
+
+	return ret;
+}
+
+static s16 wcd9xxx_read_impedance_regs(struct wcd9xxx_mbhc *mbhc)
+{
+	struct snd_soc_codec *codec = mbhc->codec;
+	short bias_value;
+	int i;
+	s32 z_t = 0;
+	s32 z_loop = z_det_box_car_avg;
+
+	/* Box Car avrg of less than a particular loop count will not be
+	 * accomodated. Similarly if the count is more than a particular number
+	 * it will not be counted. Set z_loop counter to a limit, if its more
+	 * or less than the value in WCD9XXX_BOX_CAR_AVRG_MAX or
+	 * WCD9XXX_BOX_CAR_AVRG_MIN
+	 */
+	if (z_loop < WCD9XXX_BOX_CAR_AVRG_MIN) {
+		dev_dbg(codec->dev,
+			"%s: Box Car avrg counter < %d. Limiting it to %d\n",
+			__func__, WCD9XXX_BOX_CAR_AVRG_MIN,
+			WCD9XXX_BOX_CAR_AVRG_MIN);
+		z_loop = WCD9XXX_BOX_CAR_AVRG_MIN;
+	} else if (z_loop > WCD9XXX_BOX_CAR_AVRG_MAX) {
+		dev_dbg(codec->dev,
+			"%s: Box Car avrg counter > %d. Limiting it to %d\n",
+			__func__, WCD9XXX_BOX_CAR_AVRG_MAX,
+			WCD9XXX_BOX_CAR_AVRG_MAX);
+		z_loop = WCD9XXX_BOX_CAR_AVRG_MAX;
+	}
+
+	/* Take box car average if needed */
+	for (i = 0; i < z_loop; i++) {
+		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x2);
+		/* Wait for atleast 1800uS to let register write to settle */
+		usleep_range(1800, 1800 + WCD9XXX_USLEEP_RANGE_MARGIN_US);
+		z_t += wcd9xxx_read_sta_result(codec);
+	}
+	/* Take average of the Z values read */
+	bias_value = (s16) (z_t / z_loop);
+	return bias_value;
+}
+
+static int wcd9xxx_remeasure_z_values(struct wcd9xxx_mbhc *mbhc,
+					s16 l[3], s16 r[3],
+					uint32_t *zl, uint32_t *zr,
+					u32 *zl_stereo, u32 *zl_mono)
+{
+	s16 l_t[3] = {0}, r_t[3] = {0};
+	s16 l2_stereo, l2_mono;
+	bool left, right;
+	struct snd_soc_codec *codec = mbhc->codec;
+
+	if (!mbhc->mbhc_cb || !mbhc->mbhc_cb->setup_zdet ||
+		!mbhc->mbhc_cb->compute_impedance) {
+		dev_err(codec->dev, "%s: Invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	left = !!(l);
+	right = !!(r);
+
+	dev_dbg(codec->dev, "%s: Remeasuring impedance values\n", __func__);
+	dev_dbg(codec->dev, "%s: l: %pK, r: %pK, left=%d, right=%d\n", __func__,
+		 l, r, left, right);
+
+	/* Remeasure V2 values */
+	snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_2, 0xFF, 0xF0);
+	if (right)
+		r_t[2] = wcd9xxx_read_impedance_regs(mbhc);
+	snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0xFF, 0xC0);
+	if (left)
+		l_t[2] = wcd9xxx_read_impedance_regs(mbhc);
+
+	/* Ramp down HPHR */
+	mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_HPHR_RAMP_DISABLE);
+
+	if (right) {
+		/* Take R0'/R1' */
+		snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_2,
+				    0xFF, 0xF8);
+		snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
+				    0xFF, 0xA0);
+		r_t[1] = wcd9xxx_read_impedance_regs(mbhc);
+		snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_2,
+				    0xFF, 0xF0);
+		r_t[0] = wcd9xxx_read_impedance_regs(mbhc);
+	}
+
+	/* Put back gain to 1x */
+	if (!left && right)
+		mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_GAIN_0);
+
+	snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0xFF, 0xC0);
+	/* Take L2'' measurement */
+	l2_stereo = wcd9xxx_read_impedance_regs(mbhc);
+
+	/* Turn off HPHR PA and take L2''' */
+	mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_HPHR_PA_DISABLE);
+	l2_mono = wcd9xxx_read_impedance_regs(mbhc);
+
+	/* Ramp HPHL from -15mV to 0V */
+	mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_HPHL_RAMP_DISABLE);
+
+	/* Take L0' and L1' with iCal */
+	l_t[0] = wcd9xxx_read_impedance_regs(mbhc);
+	snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_2, 0xFF, 0xF8);
+	l_t[1] = wcd9xxx_read_impedance_regs(mbhc);
+
+	if (left) {
+		l[0] = l_t[0];
+		l[1] = l_t[1];
+		l[2] = l_t[2];
+	}
+	if (right) {
+		r[0] = r_t[0];
+		r[1] = r_t[1];
+		r[2] = r_t[2];
+	}
+
+	/* compute the new impedance values */
+	mbhc->mbhc_cb->compute_impedance(mbhc, l, r, zl, zr);
+
+	if (!left && right)
+		mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_GAIN_UPDATE_1X);
+	/* compute the new ZL'' value */
+	l_t[2] = l2_stereo;
+	mbhc->mbhc_cb->compute_impedance(mbhc, l_t, NULL, zl_stereo, NULL);
+	/* compute the new ZL''' value */
+	l_t[2] = l2_mono;
+	mbhc->mbhc_cb->compute_impedance(mbhc, l_t, NULL, zl_mono, NULL);
+
+	pr_debug("%s: L0': 0x%x, L1': 0x%x L2_stereo: 0x%x, L2_mono: 0x%x\n",
+		 __func__, l_t[0] & 0xffff, l_t[1] & 0xffff,
+		 l2_stereo & 0xffff, l2_mono & 0xffff);
+	pr_debug("%s: ZL_stereo = %u, ZL_mono = %u\n",
+		 __func__, *zl_stereo, *zl_mono);
+
+	return 0;
+}
+
+static enum mbhc_zdet_zones wcd9xxx_assign_zdet_zone(uint32_t zl, uint32_t zr,
+						     int32_t *gain)
+{
+	enum mbhc_zdet_zones zdet_zone;
+
+	if (WCD9XXX_IS_IN_ZDET_ZONE_1(zl) &&
+		 WCD9XXX_IS_IN_ZDET_ZONE_1(zr)) {
+		zdet_zone = ZL_ZONE1__ZR_ZONE1;
+		*gain = 0;
+	} else if (WCD9XXX_IS_IN_ZDET_ZONE_2(zl) &&
+		 WCD9XXX_IS_IN_ZDET_ZONE_2(zr)) {
+		zdet_zone = ZL_ZONE2__ZR_ZONE2;
+		*gain = MBHC_ZDET_GAIN_1;
+	} else if (WCD9XXX_IS_IN_ZDET_ZONE_3(zl) &&
+		 WCD9XXX_IS_IN_ZDET_ZONE_3(zr)) {
+		zdet_zone = ZL_ZONE3__ZR_ZONE3;
+		*gain = MBHC_ZDET_GAIN_2;
+	} else if (WCD9XXX_IS_IN_ZDET_ZONE_2(zl) &&
+		 WCD9XXX_IS_IN_ZDET_ZONE_1(zr)) {
+		zdet_zone = ZL_ZONE2__ZR_ZONE1;
+		*gain = MBHC_ZDET_GAIN_1;
+	} else if (WCD9XXX_IS_IN_ZDET_ZONE_3(zl) &&
+		 WCD9XXX_IS_IN_ZDET_ZONE_1(zr)) {
+		zdet_zone = ZL_ZONE3__ZR_ZONE1;
+		*gain = MBHC_ZDET_GAIN_2;
+	} else if (WCD9XXX_IS_IN_ZDET_ZONE_1(zl) &&
+		 WCD9XXX_IS_IN_ZDET_ZONE_2(zr)) {
+		zdet_zone = ZL_ZONE1__ZR_ZONE2;
+		*gain = MBHC_ZDET_GAIN_1;
+	} else if (WCD9XXX_IS_IN_ZDET_ZONE_1(zl) &&
+		 WCD9XXX_IS_IN_ZDET_ZONE_3(zr)) {
+		zdet_zone = ZL_ZONE1__ZR_ZONE3;
+		*gain = MBHC_ZDET_GAIN_2;
+	} else {
+		zdet_zone = ZL_ZR_NOT_IN_ZONE1;
+		*gain = MBHC_ZDET_GAIN_1;
+	}
+
+	return zdet_zone;
+}
+
+static int wcd9xxx_detect_impedance(struct wcd9xxx_mbhc *mbhc, uint32_t *zl,
+				    uint32_t *zr)
+{
+	int i;
+	int ret = 0;
+	u8 micb_mbhc_val;
+	s16 l[3], r[3];
+	s16 *z[] = {
+		&l[0], &r[0], &r[1], &l[1], &l[2], &r[2],
+	};
+	u32 zl_stereo, zl_mono;
+	u32 zl_diff_1, zl_diff_2;
+	bool override_en;
+	struct snd_soc_codec *codec = mbhc->codec;
+	const int mux_wait_us = 25;
+	const struct wcd9xxx_reg_mask_val reg_set_mux[] = {
+		/* Phase 1 */
+		/* Set MBHC_MUX for HPHL without ical */
+		{WCD9XXX_A_MBHC_SCALING_MUX_2, 0xFF, 0xF0},
+		/* Set MBHC_MUX for HPHR without ical */
+		{WCD9XXX_A_MBHC_SCALING_MUX_1, 0xFF, 0xA0},
+		/* Set MBHC_MUX for HPHR with ical */
+		{WCD9XXX_A_MBHC_SCALING_MUX_2, 0xFF, 0xF8},
+		/* Set MBHC_MUX for HPHL with ical */
+		{WCD9XXX_A_MBHC_SCALING_MUX_1, 0xFF, 0xC0},
+
+		/* Phase 2 */
+		{WCD9XXX_A_MBHC_SCALING_MUX_2, 0xFF, 0xF0},
+		/* Set MBHC_MUX for HPHR without ical and wait for 25us */
+		{WCD9XXX_A_MBHC_SCALING_MUX_1, 0xFF, 0xA0},
+	};
+
+	pr_debug("%s: enter\n", __func__);
+	WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
+
+	if (!mbhc->mbhc_cb || !mbhc->mbhc_cb->setup_zdet ||
+	    !mbhc->mbhc_cb->compute_impedance || !zl || !zr) {
+		return -EINVAL;
+	}
+
+	/*
+	 * Impedance detection is an intrusive function as it mutes RX paths,
+	 * enable PAs and etc.  Therefore codec drvier including ALSA
+	 * shouldn't read and write hardware registers during detection.
+	 */
+	wcd9xxx_onoff_ext_mclk(mbhc, true);
+
+	/*
+	 * For impedance detection, make sure to disable micbias from
+	 * override signal so that override does not cause micbias
+	 * to be enabled. This setting will be undone after completing
+	 * impedance measurement.
+	 */
+	micb_mbhc_val = snd_soc_read(codec, WCD9XXX_A_MAD_ANA_CTRL);
+	snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL,
+			    0x10, 0x00);
+
+	override_en = (snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_CTL) & 0x04) ?
+					true : false;
+	if (!override_en)
+		wcd9xxx_turn_onoff_override(mbhc, true);
+	pr_debug("%s: Setting impedance detection\n", __func__);
+
+	/* Codec specific setup for L0, R0, L1 and R1 measurements */
+	mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_PRE_MEASURE);
+
+	pr_debug("%s: Performing impedance detection\n", __func__);
+	for (i = 0; i < ARRAY_SIZE(reg_set_mux) - 2; i++) {
+		snd_soc_update_bits(codec, reg_set_mux[i].reg,
+				    reg_set_mux[i].mask,
+				    reg_set_mux[i].val);
+		if (mbhc->mbhc_cb->get_cdc_type &&
+		    mbhc->mbhc_cb->get_cdc_type() ==
+				WCD9XXX_CDC_TYPE_TOMTOM) {
+			*(z[i]) = wcd9xxx_read_impedance_regs(mbhc);
+		} else {
+			if (mbhc->mbhc_cb->enable_mux_bias_block)
+				mbhc->mbhc_cb->enable_mux_bias_block(codec);
+			else
+				snd_soc_update_bits(codec,
+						WCD9XXX_A_MBHC_SCALING_MUX_1,
+						0x80, 0x80);
+			/* 25us is required after mux change to settle down */
+			usleep_range(mux_wait_us,
+				 mux_wait_us + WCD9XXX_USLEEP_RANGE_MARGIN_US);
+			*(z[i]) = __wcd9xxx_codec_sta_dce(mbhc, 0,
+							  true, false);
+		}
+	}
+
+	/* Codec specific setup for L2 and R2 measurements */
+	mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_POST_MEASURE);
+
+	for (; i < ARRAY_SIZE(reg_set_mux); i++) {
+		snd_soc_update_bits(codec, reg_set_mux[i].reg,
+				    reg_set_mux[i].mask,
+				    reg_set_mux[i].val);
+		if (mbhc->mbhc_cb->get_cdc_type &&
+		    mbhc->mbhc_cb->get_cdc_type() ==
+				WCD9XXX_CDC_TYPE_TOMTOM) {
+			*(z[i]) = wcd9xxx_read_impedance_regs(mbhc);
+		} else {
+			if (mbhc->mbhc_cb->enable_mux_bias_block)
+				mbhc->mbhc_cb->enable_mux_bias_block(codec);
+			else
+				snd_soc_update_bits(codec,
+						WCD9XXX_A_MBHC_SCALING_MUX_1,
+						0x80, 0x80);
+			/* 25us is required after mux change to settle down */
+			usleep_range(mux_wait_us,
+				mux_wait_us + WCD9XXX_USLEEP_RANGE_MARGIN_US);
+			*(z[i]) = __wcd9xxx_codec_sta_dce(mbhc, 0,
+							  true, false);
+		}
+	}
+
+	mbhc->mbhc_cb->compute_impedance(mbhc, l, r, zl, zr);
+
+	/*
+	 * For some codecs, an additional step of zdet is needed
+	 * to overcome effects of noise and for better accuracy of
+	 * z values
+	 */
+	if (mbhc->mbhc_cb->get_cdc_type &&
+	     mbhc->mbhc_cb->get_cdc_type() == WCD9XXX_CDC_TYPE_TOMTOM) {
+		uint32_t zl_t = 0, zr_t = 0;
+		s16 *l_p, *r_p;
+		enum mbhc_zdet_zones zdet_zone;
+		int32_t gain;
+
+		zdet_zone = wcd9xxx_assign_zdet_zone(*zl, *zr, &gain);
+		switch (zdet_zone) {
+		case ZL_ZONE1__ZR_ZONE1:
+			l_p = NULL;
+			r_p = NULL;
+			break;
+		case ZL_ZONE2__ZR_ZONE2:
+		case ZL_ZONE3__ZR_ZONE3:
+		case ZL_ZR_NOT_IN_ZONE1:
+			l_p = l;
+			r_p = r;
+			break;
+		case ZL_ZONE2__ZR_ZONE1:
+		case ZL_ZONE3__ZR_ZONE1:
+		/* If ZR falls in Zone 1, further computations with
+		 * gain update are not required
+		 */
+			l_p = l;
+			r_p = NULL;
+			break;
+		case ZL_ZONE1__ZR_ZONE2:
+		case ZL_ZONE1__ZR_ZONE3:
+		/* If ZL falls in Zone 1, further computations with
+		 * gain update are not required
+		 */
+			l_p = NULL;
+			r_p = r;
+			break;
+		}
+		pr_debug("%s:zdet_zone = %d, gain = %d\n", __func__,
+			 zdet_zone, gain);
+		if (gain)
+			mbhc->mbhc_cb->setup_zdet(mbhc, gain);
+
+		wcd9xxx_remeasure_z_values(mbhc, l_p, r_p, &zl_t, &zr_t,
+					   &zl_stereo, &zl_mono);
+
+		*zl = (zl_t) ? zl_t : *zl;
+		*zr = (zr_t) ? zr_t : *zr;
+
+		/* Check for Mono/Stereo Type
+		 * Conditions to classify Mono/Stereo
+		 * i. Difference of zl_stereo and zl_mono > (1/2) of zl_mono
+		 * ii. Absolute difference of zl and zr above a threshold
+		 */
+		zl_diff_1 = (zl_mono > zl_stereo) ? (zl_mono - zl_stereo) :
+						  (zl_stereo - zl_mono);
+		zl_diff_2 = (*zl > *zr) ? (*zl - *zr) : (*zr - *zl);
+
+		mbhc->hph_type = MBHC_HPH_NONE;
+		if (mbhc->current_plug != PLUG_TYPE_HIGH_HPH) {
+			if ((zl_diff_1 > (zl_mono >> 1)) ||
+			    (zl_diff_2 > WCD9XXX_MONO_HS_DIFF_THR) ||
+			    ((*zl < WCD9XXX_MONO_HS_MIN_THR) &&
+			     (*zr > WCD9XXX_MONO_HS_MIN_THR)) ||
+			    ((*zr < WCD9XXX_MONO_HS_MIN_THR) &&
+			     (*zl > WCD9XXX_MONO_HS_MIN_THR))) {
+				pr_debug("%s: MONO plug type detected\n",
+					 __func__);
+				mbhc->hph_type = MBHC_HPH_MONO;
+				*zl = zl_mono;
+			} else {
+				pr_debug("%s: STEREO plug type detected\n",
+					 __func__);
+				mbhc->hph_type = MBHC_HPH_STEREO;
+			}
+		}
+	}
+
+	mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_PA_DISABLE);
+
+	/* Calculate z values based on the Q-fuse registers, if used */
+	if (mbhc->mbhc_cb->zdet_error_approx)
+		mbhc->mbhc_cb->zdet_error_approx(mbhc, zl, zr);
+
+	wcd9xxx_onoff_ext_mclk(mbhc, false);
+
+	if (!override_en)
+		wcd9xxx_turn_onoff_override(mbhc, false);
+
+	/* Undo the micbias disable for override */
+	snd_soc_write(codec, WCD9XXX_A_MAD_ANA_CTRL, micb_mbhc_val);
+
+	pr_debug("%s: L0: 0x%x(%d), L1: 0x%x(%d), L2: 0x%x(%d)\n",
+		 __func__,
+		 l[0] & 0xffff, l[0], l[1] & 0xffff, l[1], l[2] & 0xffff, l[2]);
+	pr_debug("%s: R0: 0x%x(%d), R1: 0x%x(%d), R2: 0x%x(%d)\n",
+		 __func__,
+		 r[0] & 0xffff, r[0], r[1] & 0xffff, r[1], r[2] & 0xffff, r[2]);
+	pr_debug("%s: RL %u milliohm, RR %u milliohm\n", __func__, *zl, *zr);
+	pr_debug("%s: Impedance detection completed\n", __func__);
+
+	return ret;
+}
+
+int wcd9xxx_mbhc_get_impedance(struct wcd9xxx_mbhc *mbhc, uint32_t *zl,
+			       uint32_t *zr)
+{
+	*zl = mbhc->zl;
+	*zr = mbhc->zr;
+
+	if (*zl && *zr)
+		return 0;
+	else
+		return -EINVAL;
+}
+
+/*
+ * wcd9xxx_mbhc_init : initialize MBHC internal structures.
+ *
+ * NOTE: mbhc->mbhc_cfg is not YET configure so shouldn't be used
+ */
+int wcd9xxx_mbhc_init(struct wcd9xxx_mbhc *mbhc, struct wcd9xxx_resmgr *resmgr,
+		      struct snd_soc_codec *codec,
+		      int (*micbias_enable_cb) (struct snd_soc_codec*,  bool,
+						enum wcd9xxx_micbias_num),
+		      const struct wcd9xxx_mbhc_cb *mbhc_cb,
+		      const struct wcd9xxx_mbhc_intr *mbhc_cdc_intr_ids,
+		      int rco_clk_rate,
+		      bool impedance_det_en)
+{
+	int ret;
+	void *core_res;
+
+	pr_debug("%s: enter\n", __func__);
+	memset(&mbhc->mbhc_bias_regs, 0, sizeof(struct mbhc_micbias_regs));
+	memset(&mbhc->mbhc_data, 0, sizeof(struct mbhc_internal_cal_data));
+
+	mbhc->mbhc_data.t_sta_dce = DEFAULT_DCE_STA_WAIT;
+	mbhc->mbhc_data.t_dce = DEFAULT_DCE_WAIT;
+	mbhc->mbhc_data.t_sta = DEFAULT_STA_WAIT;
+	mbhc->mbhc_micbias_switched = false;
+	mbhc->polling_active = false;
+	mbhc->mbhc_state = MBHC_STATE_NONE;
+	mbhc->in_swch_irq_handler = false;
+	mbhc->current_plug = PLUG_TYPE_NONE;
+	mbhc->lpi_enabled = false;
+	mbhc->no_mic_headset_override = false;
+	mbhc->mbhc_last_resume = 0;
+	mbhc->codec = codec;
+	mbhc->resmgr = resmgr;
+	mbhc->resmgr->mbhc = mbhc;
+	mbhc->micbias_enable_cb = micbias_enable_cb;
+	mbhc->rco_clk_rate = rco_clk_rate;
+	mbhc->mbhc_cb = mbhc_cb;
+	mbhc->intr_ids = mbhc_cdc_intr_ids;
+	mbhc->impedance_detect = impedance_det_en;
+	mbhc->hph_type = MBHC_HPH_NONE;
+
+	if (mbhc->intr_ids == NULL) {
+		pr_err("%s: Interrupt mapping not provided\n", __func__);
+		return -EINVAL;
+	}
+
+	if (mbhc->headset_jack.jack == NULL) {
+		ret = snd_soc_card_jack_new(codec->component.card,
+					    "Headset Jack", WCD9XXX_JACK_MASK,
+					    &mbhc->headset_jack, NULL, 0);
+		if (ret) {
+			pr_err("%s: Failed to create new jack\n", __func__);
+			return ret;
+		}
+
+		ret = snd_soc_card_jack_new(codec->component.card,
+					    "Button Jack",
+					    WCD9XXX_JACK_BUTTON_MASK,
+					    &mbhc->button_jack, NULL, 0);
+		if (ret) {
+			pr_err("Failed to create new jack\n");
+			return ret;
+		}
+
+		ret = snd_jack_set_key(mbhc->button_jack.jack,
+				       SND_JACK_BTN_0,
+				       KEY_MEDIA);
+		if (ret) {
+			pr_err("%s: Failed to set code for btn-0\n",
+				__func__);
+			return ret;
+		}
+
+		set_bit(INPUT_PROP_NO_DUMMY_RELEASE,
+			mbhc->button_jack.jack->input_dev->propbit);
+
+		INIT_DELAYED_WORK(&mbhc->mbhc_firmware_dwork,
+				  wcd9xxx_mbhc_fw_read);
+		INIT_DELAYED_WORK(&mbhc->mbhc_btn_dwork, wcd9xxx_btn_lpress_fn);
+		INIT_DELAYED_WORK(&mbhc->mbhc_insert_dwork,
+				  wcd9xxx_mbhc_insert_work);
+	}
+
+	mutex_init(&mbhc->mbhc_lock);
+
+	/* Register event notifier */
+	mbhc->nblock.notifier_call = wcd9xxx_event_notify;
+	ret = wcd9xxx_resmgr_register_notifier(mbhc->resmgr, &mbhc->nblock);
+	if (ret) {
+		pr_err("%s: Failed to register notifier %d\n", __func__, ret);
+		mutex_destroy(&mbhc->mbhc_lock);
+		return ret;
+	}
+
+	wcd9xxx_init_debugfs(mbhc);
+
+	/* Disable Impedance detection by default for certain codec types */
+	if (mbhc->mbhc_cb && mbhc->mbhc_cb->get_cdc_type &&
+	    (mbhc->mbhc_cb->get_cdc_type() == WCD9XXX_CDC_TYPE_HELICON))
+		impedance_detect_en = 0;
+	else
+		impedance_detect_en = impedance_det_en ? 1 : 0;
+
+	core_res = mbhc->resmgr->core_res;
+	ret = wcd9xxx_request_irq(core_res, mbhc->intr_ids->insertion,
+				  wcd9xxx_hs_insert_irq,
+				  "Headset insert detect", mbhc);
+	if (ret) {
+		pr_err("%s: Failed to request irq %d, ret = %d\n", __func__,
+		       mbhc->intr_ids->insertion, ret);
+		goto err_insert_irq;
+	}
+	wcd9xxx_disable_irq(core_res, mbhc->intr_ids->insertion);
+
+	ret = wcd9xxx_request_irq(core_res, mbhc->intr_ids->poll_plug_rem,
+				  wcd9xxx_hs_remove_irq,
+				  "Headset remove detect", mbhc);
+	if (ret) {
+		pr_err("%s: Failed to request irq %d\n", __func__,
+			mbhc->intr_ids->poll_plug_rem);
+		goto err_remove_irq;
+	}
+
+	ret = wcd9xxx_request_irq(core_res, mbhc->intr_ids->dce_est_complete,
+				  wcd9xxx_dce_handler, "DC Estimation detect",
+				  mbhc);
+	if (ret) {
+		pr_err("%s: Failed to request irq %d\n", __func__,
+		       mbhc->intr_ids->dce_est_complete);
+		goto err_potential_irq;
+	}
+
+	ret = wcd9xxx_request_irq(core_res, mbhc->intr_ids->button_release,
+				  wcd9xxx_release_handler,
+				  "Button Release detect", mbhc);
+	if (ret) {
+		pr_err("%s: Failed to request irq %d\n", __func__,
+			mbhc->intr_ids->button_release);
+		goto err_release_irq;
+	}
+
+	ret = wcd9xxx_request_irq(core_res, mbhc->intr_ids->hph_left_ocp,
+				  wcd9xxx_hphl_ocp_irq, "HPH_L OCP detect",
+				  mbhc);
+	if (ret) {
+		pr_err("%s: Failed to request irq %d\n", __func__,
+		       mbhc->intr_ids->hph_left_ocp);
+		goto err_hphl_ocp_irq;
+	}
+	wcd9xxx_disable_irq(core_res, mbhc->intr_ids->hph_left_ocp);
+
+	ret = wcd9xxx_request_irq(core_res, mbhc->intr_ids->hph_right_ocp,
+				  wcd9xxx_hphr_ocp_irq, "HPH_R OCP detect",
+				  mbhc);
+	if (ret) {
+		pr_err("%s: Failed to request irq %d\n", __func__,
+		       mbhc->intr_ids->hph_right_ocp);
+		goto err_hphr_ocp_irq;
+	}
+	wcd9xxx_disable_irq(core_res, mbhc->intr_ids->hph_right_ocp);
+
+	wcd9xxx_regmgr_cond_register(resmgr, 1 << WCD9XXX_COND_HPH_MIC |
+					     1 << WCD9XXX_COND_HPH);
+
+	pr_debug("%s: leave ret %d\n", __func__, ret);
+	return ret;
+
+err_hphr_ocp_irq:
+	wcd9xxx_free_irq(core_res, mbhc->intr_ids->hph_left_ocp, mbhc);
+err_hphl_ocp_irq:
+	wcd9xxx_free_irq(core_res, mbhc->intr_ids->button_release, mbhc);
+err_release_irq:
+	wcd9xxx_free_irq(core_res, mbhc->intr_ids->dce_est_complete, mbhc);
+err_potential_irq:
+	wcd9xxx_free_irq(core_res, mbhc->intr_ids->poll_plug_rem, mbhc);
+err_remove_irq:
+	wcd9xxx_free_irq(core_res, mbhc->intr_ids->insertion, mbhc);
+err_insert_irq:
+	wcd9xxx_resmgr_unregister_notifier(mbhc->resmgr, &mbhc->nblock);
+
+	mutex_destroy(&mbhc->mbhc_lock);
+
+	pr_debug("%s: leave ret %d\n", __func__, ret);
+	return ret;
+}
+EXPORT_SYMBOL(wcd9xxx_mbhc_init);
+
+void wcd9xxx_mbhc_deinit(struct wcd9xxx_mbhc *mbhc)
+{
+	struct wcd9xxx_core_resource *core_res =
+				mbhc->resmgr->core_res;
+
+	wcd9xxx_regmgr_cond_deregister(mbhc->resmgr, 1 << WCD9XXX_COND_HPH_MIC |
+						     1 << WCD9XXX_COND_HPH);
+
+	wcd9xxx_free_irq(core_res, mbhc->intr_ids->button_release, mbhc);
+	wcd9xxx_free_irq(core_res, mbhc->intr_ids->dce_est_complete, mbhc);
+	wcd9xxx_free_irq(core_res, mbhc->intr_ids->poll_plug_rem, mbhc);
+	wcd9xxx_free_irq(core_res, mbhc->intr_ids->insertion, mbhc);
+	wcd9xxx_free_irq(core_res, mbhc->intr_ids->hs_jack_switch, mbhc);
+	wcd9xxx_free_irq(core_res, mbhc->intr_ids->hph_left_ocp, mbhc);
+	wcd9xxx_free_irq(core_res, mbhc->intr_ids->hph_right_ocp, mbhc);
+
+	mutex_destroy(&mbhc->mbhc_lock);
+	wcd9xxx_resmgr_unregister_notifier(mbhc->resmgr, &mbhc->nblock);
+	wcd9xxx_cleanup_debugfs(mbhc);
+}
+EXPORT_SYMBOL(wcd9xxx_mbhc_deinit);
+
+MODULE_DESCRIPTION("wcd9xxx MBHC module");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd9xxx-mbhc.h	2019-01-22 16:16:29.555301211 +0100
@@ -0,0 +1,492 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __WCD9XXX_MBHC_H__
+#define __WCD9XXX_MBHC_H__
+
+#include "wcd9xxx-resmgr.h"
+#include "wcdcal-hwdep.h"
+
+#define WCD9XXX_CFILT_FAST_MODE 0x00
+#define WCD9XXX_CFILT_SLOW_MODE 0x40
+#define WCD9XXX_CFILT_EXT_PRCHG_EN 0x30
+#define WCD9XXX_CFILT_EXT_PRCHG_DSBL 0x00
+
+#define WCD9XXX_USLEEP_RANGE_MARGIN_US 100
+
+struct mbhc_micbias_regs {
+	u16 cfilt_val;
+	u16 cfilt_ctl;
+	u16 mbhc_reg;
+	u16 int_rbias;
+	u16 ctl_reg;
+	u8 cfilt_sel;
+};
+
+enum mbhc_v_index {
+	MBHC_V_IDX_CFILT,
+	MBHC_V_IDX_VDDIO,
+	MBHC_V_IDX_NUM,
+};
+
+enum mbhc_cal_type {
+	MBHC_CAL_MCLK,
+	MBHC_CAL_RCO,
+	MBHC_CAL_NUM,
+};
+
+enum mbhc_impedance_detect_stages {
+	MBHC_ZDET_PRE_MEASURE,
+	MBHC_ZDET_POST_MEASURE,
+	MBHC_ZDET_GAIN_0,
+	MBHC_ZDET_GAIN_1,
+	MBHC_ZDET_GAIN_2,
+	MBHC_ZDET_HPHR_RAMP_DISABLE,
+	MBHC_ZDET_HPHL_RAMP_DISABLE,
+	MBHC_ZDET_RAMP_DISABLE,
+	MBHC_ZDET_HPHR_PA_DISABLE,
+	MBHC_ZDET_PA_DISABLE,
+	MBHC_ZDET_GAIN_UPDATE_1X,
+};
+
+/* Zone assignments used in WCD9330 for Zdet */
+enum mbhc_zdet_zones {
+	ZL_ZONE1__ZR_ZONE1,
+	ZL_ZONE2__ZR_ZONE2,
+	ZL_ZONE3__ZR_ZONE3,
+	ZL_ZONE2__ZR_ZONE1,
+	ZL_ZONE3__ZR_ZONE1,
+	ZL_ZONE1__ZR_ZONE2,
+	ZL_ZONE1__ZR_ZONE3,
+	ZL_ZR_NOT_IN_ZONE1,
+};
+
+/* Data used by MBHC */
+struct mbhc_internal_cal_data {
+	u16 dce_z;
+	u16 dce_nsc_cs_z;
+	u16 dce_mb;
+	u16 sta_z;
+	u16 sta_mb;
+	u32 t_sta_dce;
+	u32 t_dce;
+	u32 t_sta;
+	u32 micb_mv;
+	u16 v_ins_hu[MBHC_V_IDX_NUM];
+	u16 v_ins_h[MBHC_V_IDX_NUM];
+	u16 v_b1_hu[MBHC_V_IDX_NUM];
+	u16 v_b1_h[MBHC_V_IDX_NUM];
+	u16 v_brh[MBHC_V_IDX_NUM];
+	u16 v_brl;
+	u16 v_no_mic;
+	s16 v_inval_ins_low;
+	s16 v_inval_ins_high;
+	u16 v_cs_ins_h;
+};
+
+enum wcd9xxx_mbhc_plug_type {
+	PLUG_TYPE_INVALID = -1,
+	PLUG_TYPE_NONE,
+	PLUG_TYPE_HEADSET,
+	PLUG_TYPE_HEADPHONE,
+	PLUG_TYPE_HIGH_HPH,
+	PLUG_TYPE_GND_MIC_SWAP,
+	PLUG_TYPE_ANC_HEADPHONE,
+};
+
+enum wcd9xxx_mbhc_micbias_type {
+	MBHC_PRIMARY_MIC_MB,
+	MBHC_ANC_MIC_MB,
+};
+
+enum wcd9xxx_micbias_num {
+	MBHC_MICBIAS_INVALID = -1,
+	MBHC_MICBIAS1,
+	MBHC_MICBIAS2,
+	MBHC_MICBIAS3,
+	MBHC_MICBIAS4,
+};
+
+enum hw_jack_type {
+	FOUR_POLE_JACK = 0,
+	FIVE_POLE_JACK,
+	SIX_POLE_JACK,
+};
+
+enum wcd9xx_mbhc_micbias_enable_bits {
+	MBHC_MICBIAS_ENABLE_THRESHOLD_HEADSET,
+	MBHC_MICBIAS_ENABLE_REGULAR_HEADSET,
+};
+
+enum wcd9xx_mbhc_cs_enable_bits {
+	MBHC_CS_ENABLE_POLLING,
+	MBHC_CS_ENABLE_INSERTION,
+	MBHC_CS_ENABLE_REMOVAL,
+	MBHC_CS_ENABLE_DET_ANC,
+};
+
+enum wcd9xxx_mbhc_state {
+	MBHC_STATE_NONE = -1,
+	MBHC_STATE_POTENTIAL,
+	MBHC_STATE_POTENTIAL_RECOVERY,
+	MBHC_STATE_RELEASE,
+};
+
+enum wcd9xxx_mbhc_btn_det_mem {
+	MBHC_BTN_DET_V_BTN_LOW,
+	MBHC_BTN_DET_V_BTN_HIGH,
+	MBHC_BTN_DET_N_READY,
+	MBHC_BTN_DET_N_CIC,
+	MBHC_BTN_DET_GAIN
+};
+
+enum wcd9xxx_mbhc_clk_freq {
+	TAIKO_MCLK_12P2MHZ = 0,
+	TAIKO_MCLK_9P6MHZ,
+	TAIKO_NUM_CLK_FREQS,
+};
+
+enum wcd9xxx_mbhc_event_state {
+	MBHC_EVENT_PA_HPHL,
+	MBHC_EVENT_PA_HPHR,
+	MBHC_EVENT_PRE_TX_3_ON,
+	MBHC_EVENT_POST_TX_3_OFF,
+};
+
+enum mbhc_hph_type {
+	MBHC_HPH_NONE = 0,
+	MBHC_HPH_MONO,
+	MBHC_HPH_STEREO,
+};
+
+struct wcd9xxx_mbhc_general_cfg {
+	u8 t_ldoh;
+	u8 t_bg_fast_settle;
+	u8 t_shutdown_plug_rem;
+	u8 mbhc_nsa;
+	u8 mbhc_navg;
+	u8 v_micbias_l;
+	u8 v_micbias;
+	u8 mbhc_reserved;
+	u16 settle_wait;
+	u16 t_micbias_rampup;
+	u16 t_micbias_rampdown;
+	u16 t_supply_bringup;
+} __packed;
+
+struct wcd9xxx_mbhc_plug_detect_cfg {
+	u32 mic_current;
+	u32 hph_current;
+	u16 t_mic_pid;
+	u16 t_ins_complete;
+	u16 t_ins_retry;
+	u16 v_removal_delta;
+	u8 micbias_slow_ramp;
+	u8 reserved0;
+	u8 reserved1;
+	u8 reserved2;
+} __packed;
+
+struct wcd9xxx_mbhc_plug_type_cfg {
+	u8 av_detect;
+	u8 mono_detect;
+	u8 num_ins_tries;
+	u8 reserved0;
+	s16 v_no_mic;
+	s16 v_av_min;
+	s16 v_av_max;
+	s16 v_hs_min;
+	s16 v_hs_max;
+	u16 reserved1;
+} __packed;
+
+struct wcd9xxx_mbhc_btn_detect_cfg {
+	s8 c[8];
+	u8 nc;
+	u8 n_meas;
+	u8 mbhc_nsc;
+	u8 n_btn_meas;
+	u8 n_btn_con;
+	u8 num_btn;
+	u8 reserved0;
+	u8 reserved1;
+	u16 t_poll;
+	u16 t_bounce_wait;
+	u16 t_rel_timeout;
+	s16 v_btn_press_delta_sta;
+	s16 v_btn_press_delta_cic;
+	u16 t_btn0_timeout;
+	s16 _v_btn_low[0]; /* v_btn_low[num_btn] */
+	s16 _v_btn_high[0]; /* v_btn_high[num_btn] */
+	u8 _n_ready[TAIKO_NUM_CLK_FREQS];
+	u8 _n_cic[TAIKO_NUM_CLK_FREQS];
+	u8 _gain[TAIKO_NUM_CLK_FREQS];
+} __packed;
+
+struct wcd9xxx_mbhc_imped_detect_cfg {
+	u8 _hs_imped_detect;
+	u8 _n_rload;
+	u8 _hph_keep_on;
+	u8 _repeat_rload_calc;
+	u16 _t_dac_ramp_time;
+	u16 _rhph_high;
+	u16 _rhph_low;
+	u16 _rload[0]; /* rload[n_rload] */
+	u16 _alpha[0]; /* alpha[n_rload] */
+	u16 _beta[3];
+} __packed;
+
+struct wcd9xxx_mbhc_config {
+	bool read_fw_bin;
+	/*
+	 * void* calibration contains:
+	 *  struct wcd9xxx_mbhc_general_cfg generic;
+	 *  struct wcd9xxx_mbhc_plug_detect_cfg plug_det;
+	 *  struct wcd9xxx_mbhc_plug_type_cfg plug_type;
+	 *  struct wcd9xxx_mbhc_btn_detect_cfg btn_det;
+	 *  struct wcd9xxx_mbhc_imped_detect_cfg imped_det;
+	 * Note: various size depends on btn_det->num_btn
+	 */
+	void *calibration;
+	enum wcd9xxx_micbias_num micbias;
+	enum wcd9xxx_micbias_num anc_micbias;
+	int (*mclk_cb_fn) (struct snd_soc_codec*, int, bool);
+	unsigned int mclk_rate;
+	unsigned int gpio;
+	unsigned int gpio_irq;
+	int gpio_level_insert;
+	bool insert_detect; /* codec has own MBHC_INSERT_DETECT */
+	bool detect_extn_cable;
+	/* bit mask of enum wcd9xx_mbhc_micbias_enable_bits */
+	unsigned long micbias_enable_flags;
+	/* swap_gnd_mic returns true if extern GND/MIC swap switch toggled */
+	bool (*swap_gnd_mic) (struct snd_soc_codec *);
+	unsigned long cs_enable_flags;
+	bool use_int_rbias;
+	bool do_recalibration;
+	bool use_vddio_meas;
+	bool enable_anc_mic_detect;
+	enum hw_jack_type hw_jack_type;
+	int key_code[8];
+};
+
+struct wcd9xxx_cfilt_mode {
+	u8 reg_mode_val;
+	u8 cur_mode_val;
+	u8 reg_mask;
+};
+
+struct wcd9xxx_mbhc_intr {
+	int poll_plug_rem;
+	int shortavg_complete;
+	int potential_button_press;
+	int button_release;
+	int dce_est_complete;
+	int insertion;
+	int hph_left_ocp;
+	int hph_right_ocp;
+	int hs_jack_switch;
+};
+
+struct wcd9xxx_mbhc_cb {
+	void (*enable_mux_bias_block) (struct snd_soc_codec *);
+	void (*cfilt_fast_mode) (struct snd_soc_codec *, struct wcd9xxx_mbhc *);
+	void (*codec_specific_cal) (struct snd_soc_codec *,
+				    struct wcd9xxx_mbhc *);
+	struct wcd9xxx_cfilt_mode (*switch_cfilt_mode) (struct wcd9xxx_mbhc *,
+							bool);
+	void (*select_cfilt) (struct snd_soc_codec *, struct wcd9xxx_mbhc *);
+	enum wcd9xxx_cdc_type (*get_cdc_type) (void);
+	void (*enable_clock_gate) (struct snd_soc_codec *, bool);
+	int (*setup_zdet) (struct wcd9xxx_mbhc *,
+			   enum mbhc_impedance_detect_stages stage);
+	void (*compute_impedance) (struct wcd9xxx_mbhc *, s16 *, s16 *,
+				   uint32_t *, uint32_t *);
+	void (*zdet_error_approx) (struct wcd9xxx_mbhc *, uint32_t *,
+				    uint32_t *);
+	void (*enable_mbhc_txfe) (struct snd_soc_codec *, bool);
+	int (*enable_mb_source) (struct snd_soc_codec *, bool, bool);
+	void (*setup_int_rbias) (struct snd_soc_codec *, bool);
+	void (*pull_mb_to_vddio) (struct snd_soc_codec *, bool);
+	bool (*insert_rem_status) (struct snd_soc_codec *);
+	void (*micbias_pulldown_ctrl) (struct wcd9xxx_mbhc *, bool);
+	int (*codec_rco_ctrl) (struct snd_soc_codec *, bool);
+	void (*hph_auto_pulldown_ctrl) (struct snd_soc_codec *, bool);
+	struct firmware_cal * (*get_hwdep_fw_cal) (struct snd_soc_codec *,
+				enum wcd_cal_type);
+};
+
+struct wcd9xxx_mbhc {
+	bool polling_active;
+	/* Delayed work to report long button press */
+	struct delayed_work mbhc_btn_dwork;
+	int buttons_pressed;
+	enum wcd9xxx_mbhc_state mbhc_state;
+	struct wcd9xxx_mbhc_config *mbhc_cfg;
+	const struct wcd9xxx_mbhc_cb *mbhc_cb;
+
+	struct mbhc_internal_cal_data mbhc_data;
+
+	struct mbhc_micbias_regs mbhc_bias_regs;
+	struct mbhc_micbias_regs mbhc_anc_bias_regs;
+
+	bool mbhc_micbias_switched;
+
+	u32 hph_status; /* track headhpone status */
+	u8 hphlocp_cnt; /* headphone left ocp retry */
+	u8 hphrocp_cnt; /* headphone right ocp retry */
+
+	/* Work to perform MBHC Firmware Read */
+	struct delayed_work mbhc_firmware_dwork;
+	const struct firmware *mbhc_fw;
+	struct firmware_cal *mbhc_cal;
+
+	struct delayed_work mbhc_insert_dwork;
+
+	u8 current_plug;
+	struct work_struct correct_plug_swch;
+	/*
+	 * Work to perform polling on microphone voltage
+	 * in order to correct plug type once plug type
+	 * is detected as headphone
+	 */
+	struct work_struct correct_plug_noswch;
+	bool hs_detect_work_stop;
+
+	bool lpi_enabled; /* low power insertion detection */
+	bool in_swch_irq_handler;
+
+	struct wcd9xxx_resmgr *resmgr;
+	struct snd_soc_codec *codec;
+
+	bool no_mic_headset_override;
+
+	/* track PA/DAC state to sync with userspace */
+	unsigned long hph_pa_dac_state;
+	/*
+	 * save codec's state with resmgr event notification
+	 * bit flags of enum wcd9xxx_mbhc_event_state
+	 */
+	unsigned long event_state;
+
+	unsigned long mbhc_last_resume; /* in jiffies */
+
+	bool insert_detect_level_insert;
+
+	struct snd_soc_jack headset_jack;
+	struct snd_soc_jack button_jack;
+
+	struct notifier_block nblock;
+
+	bool micbias_enable;
+	int (*micbias_enable_cb) (struct snd_soc_codec*,  bool,
+				  enum wcd9xxx_micbias_num);
+
+	bool impedance_detect;
+	/* impedance of hphl and hphr */
+	uint32_t zl, zr;
+
+	u32 rco_clk_rate;
+
+	bool update_z;
+
+	u8   scaling_mux_in;
+	/* Holds codec specific interrupt mapping */
+	const struct wcd9xxx_mbhc_intr *intr_ids;
+
+	/* Indicates status of current source switch */
+	bool is_cs_enabled;
+
+	/* Holds type of Headset - Mono/Stereo */
+	enum mbhc_hph_type hph_type;
+
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *debugfs_poke;
+	struct dentry *debugfs_mbhc;
+#endif
+
+	struct mutex mbhc_lock;
+};
+
+#define WCD9XXX_MBHC_CAL_SIZE(buttons, rload) ( \
+	sizeof(enum wcd9xxx_micbias_num) + \
+	sizeof(struct wcd9xxx_mbhc_general_cfg) + \
+	sizeof(struct wcd9xxx_mbhc_plug_detect_cfg) + \
+	    ((sizeof(s16) + sizeof(s16)) * buttons) + \
+	sizeof(struct wcd9xxx_mbhc_plug_type_cfg) + \
+	sizeof(struct wcd9xxx_mbhc_btn_detect_cfg) + \
+	sizeof(struct wcd9xxx_mbhc_imped_detect_cfg) + \
+	    ((sizeof(u16) + sizeof(u16)) * rload) \
+	)
+
+#define WCD9XXX_MBHC_CAL_GENERAL_PTR(cali) ( \
+	    (struct wcd9xxx_mbhc_general_cfg *) cali)
+#define WCD9XXX_MBHC_CAL_PLUG_DET_PTR(cali) ( \
+	    (struct wcd9xxx_mbhc_plug_detect_cfg *) \
+	    &(WCD9XXX_MBHC_CAL_GENERAL_PTR(cali)[1]))
+#define WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(cali) ( \
+	    (struct wcd9xxx_mbhc_plug_type_cfg *) \
+	    &(WCD9XXX_MBHC_CAL_PLUG_DET_PTR(cali)[1]))
+#define WCD9XXX_MBHC_CAL_BTN_DET_PTR(cali) ( \
+	    (struct wcd9xxx_mbhc_btn_detect_cfg *) \
+	    &(WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(cali)[1]))
+#define WCD9XXX_MBHC_CAL_IMPED_DET_PTR(cali) ( \
+	    (struct wcd9xxx_mbhc_imped_detect_cfg *) \
+	    (((void *)&WCD9XXX_MBHC_CAL_BTN_DET_PTR(cali)[1]) + \
+	     (WCD9XXX_MBHC_CAL_BTN_DET_PTR(cali)->num_btn * \
+	      (sizeof(WCD9XXX_MBHC_CAL_BTN_DET_PTR(cali)->_v_btn_low[0]) + \
+	       sizeof(WCD9XXX_MBHC_CAL_BTN_DET_PTR(cali)->_v_btn_high[0])))) \
+	)
+
+/* minimum size of calibration data assuming there is only one button and
+ * one rload.
+ */
+#define WCD9XXX_MBHC_CAL_MIN_SIZE ( \
+	    sizeof(struct wcd9xxx_mbhc_general_cfg) + \
+	    sizeof(struct wcd9xxx_mbhc_plug_detect_cfg) + \
+	    sizeof(struct wcd9xxx_mbhc_plug_type_cfg) + \
+	    sizeof(struct wcd9xxx_mbhc_btn_detect_cfg) + \
+	    sizeof(struct wcd9xxx_mbhc_imped_detect_cfg) + \
+	    (sizeof(u16) * 2) \
+	)
+
+#define WCD9XXX_MBHC_CAL_BTN_SZ(cfg_ptr) ( \
+	    sizeof(struct wcd9xxx_mbhc_btn_detect_cfg) + \
+	    (cfg_ptr->num_btn * (sizeof(cfg_ptr->_v_btn_low[0]) + \
+				 sizeof(cfg_ptr->_v_btn_high[0]))))
+
+#define WCD9XXX_MBHC_CAL_IMPED_MIN_SZ ( \
+	    sizeof(struct wcd9xxx_mbhc_imped_detect_cfg) + sizeof(u16) * 2)
+
+#define WCD9XXX_MBHC_CAL_IMPED_SZ(cfg_ptr) ( \
+	    sizeof(struct wcd9xxx_mbhc_imped_detect_cfg) + \
+	    (cfg_ptr->_n_rload * \
+	     (sizeof(cfg_ptr->_rload[0]) + sizeof(cfg_ptr->_alpha[0]))))
+
+int wcd9xxx_mbhc_set_keycode(struct wcd9xxx_mbhc *mbhc);
+int wcd9xxx_mbhc_start(struct wcd9xxx_mbhc *mbhc,
+		       struct wcd9xxx_mbhc_config *mbhc_cfg);
+void wcd9xxx_mbhc_stop(struct wcd9xxx_mbhc *mbhc);
+int wcd9xxx_mbhc_init(struct wcd9xxx_mbhc *mbhc, struct wcd9xxx_resmgr *resmgr,
+		      struct snd_soc_codec *codec,
+		      int (*micbias_enable_cb) (struct snd_soc_codec*,  bool,
+						enum wcd9xxx_micbias_num),
+		      const struct wcd9xxx_mbhc_cb *mbhc_cb,
+		      const struct wcd9xxx_mbhc_intr *mbhc_cdc_intr_ids,
+		      int rco_clk_rate,
+		      bool impedance_det_en);
+void wcd9xxx_mbhc_deinit(struct wcd9xxx_mbhc *mbhc);
+void *wcd9xxx_mbhc_cal_btn_det_mp(
+			    const struct wcd9xxx_mbhc_btn_detect_cfg *btn_det,
+			    const enum wcd9xxx_mbhc_btn_det_mem mem);
+int wcd9xxx_mbhc_get_impedance(struct wcd9xxx_mbhc *mbhc, uint32_t *zl,
+			       uint32_t *zr);
+#endif /* __WCD9XXX_MBHC_H__ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd9xxx-resmgr.c	2019-01-22 16:16:29.555301211 +0100
@@ -0,0 +1,1099 @@
+/* Copyright (c) 2012-2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
+#include <linux/debugfs.h>
+#include <linux/mfd/wcd9xxx/core.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
+#include <uapi/linux/mfd/wcd9xxx/wcd9320_registers.h>
+#include <linux/mfd/wcd9xxx/wcd9330_registers.h>
+#include <linux/mfd/wcd9xxx/pdata.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include "wcd9xxx-resmgr.h"
+
+static char wcd9xxx_event_string[][64] = {
+	"WCD9XXX_EVENT_INVALID",
+
+	"WCD9XXX_EVENT_PRE_RCO_ON",
+	"WCD9XXX_EVENT_POST_RCO_ON",
+	"WCD9XXX_EVENT_PRE_RCO_OFF",
+	"WCD9XXX_EVENT_POST_RCO_OFF",
+
+	"WCD9XXX_EVENT_PRE_MCLK_ON",
+	"WCD9XXX_EVENT_POST_MCLK_ON",
+	"WCD9XXX_EVENT_PRE_MCLK_OFF",
+	"WCD9XXX_EVENT_POST_MCLK_OFF",
+
+	"WCD9XXX_EVENT_PRE_BG_OFF",
+	"WCD9XXX_EVENT_POST_BG_OFF",
+	"WCD9XXX_EVENT_PRE_BG_AUDIO_ON",
+	"WCD9XXX_EVENT_POST_BG_AUDIO_ON",
+	"WCD9XXX_EVENT_PRE_BG_MBHC_ON",
+	"WCD9XXX_EVENT_POST_BG_MBHC_ON",
+
+	"WCD9XXX_EVENT_PRE_MICBIAS_1_OFF",
+	"WCD9XXX_EVENT_POST_MICBIAS_1_OFF",
+	"WCD9XXX_EVENT_PRE_MICBIAS_2_OFF",
+	"WCD9XXX_EVENT_POST_MICBIAS_2_OFF",
+	"WCD9XXX_EVENT_PRE_MICBIAS_3_OFF",
+	"WCD9XXX_EVENT_POST_MICBIAS_3_OFF",
+	"WCD9XXX_EVENT_PRE_MICBIAS_4_OFF",
+	"WCD9XXX_EVENT_POST_MICBIAS_4_OFF",
+	"WCD9XXX_EVENT_PRE_MICBIAS_1_ON",
+	"WCD9XXX_EVENT_POST_MICBIAS_1_ON",
+	"WCD9XXX_EVENT_PRE_MICBIAS_2_ON",
+	"WCD9XXX_EVENT_POST_MICBIAS_2_ON",
+	"WCD9XXX_EVENT_PRE_MICBIAS_3_ON",
+	"WCD9XXX_EVENT_POST_MICBIAS_3_ON",
+	"WCD9XXX_EVENT_PRE_MICBIAS_4_ON",
+	"WCD9XXX_EVENT_POST_MICBIAS_4_ON",
+
+	"WCD9XXX_EVENT_PRE_CFILT_1_OFF",
+	"WCD9XXX_EVENT_POST_CFILT_1_OFF",
+	"WCD9XXX_EVENT_PRE_CFILT_2_OFF",
+	"WCD9XXX_EVENT_POST_CFILT_2_OFF",
+	"WCD9XXX_EVENT_PRE_CFILT_3_OFF",
+	"WCD9XXX_EVENT_POST_CFILT_3_OFF",
+	"WCD9XXX_EVENT_PRE_CFILT_1_ON",
+	"WCD9XXX_EVENT_POST_CFILT_1_ON",
+	"WCD9XXX_EVENT_PRE_CFILT_2_ON",
+	"WCD9XXX_EVENT_POST_CFILT_2_ON",
+	"WCD9XXX_EVENT_PRE_CFILT_3_ON",
+	"WCD9XXX_EVENT_POST_CFILT_3_ON",
+
+	"WCD9XXX_EVENT_PRE_HPHL_PA_ON",
+	"WCD9XXX_EVENT_POST_HPHL_PA_OFF",
+	"WCD9XXX_EVENT_PRE_HPHR_PA_ON",
+	"WCD9XXX_EVENT_POST_HPHR_PA_OFF",
+
+	"WCD9XXX_EVENT_POST_RESUME",
+
+	"WCD9XXX_EVENT_PRE_TX_3_ON",
+	"WCD9XXX_EVENT_POST_TX_3_OFF",
+
+	"WCD9XXX_EVENT_LAST",
+};
+
+#define WCD9XXX_RCO_CALIBRATION_RETRY_COUNT 5
+#define WCD9XXX_RCO_CALIBRATION_DELAY_US 5000
+#define WCD9XXX_USLEEP_RANGE_MARGIN_US 100
+#define WCD9XXX_RCO_CALIBRATION_DELAY_INC_US 1000
+
+struct wcd9xxx_resmgr_cond_entry {
+	unsigned short reg;
+	int shift;
+	bool invert;
+	enum wcd9xxx_resmgr_cond cond;
+	struct list_head list;
+};
+
+static enum wcd9xxx_clock_type wcd9xxx_save_clock(struct wcd9xxx_resmgr
+						  *resmgr);
+static void wcd9xxx_restore_clock(struct wcd9xxx_resmgr *resmgr,
+				  enum wcd9xxx_clock_type type);
+
+const char *wcd9xxx_get_event_string(enum wcd9xxx_notify_event type)
+{
+	return wcd9xxx_event_string[type];
+}
+
+void wcd9xxx_resmgr_notifier_call(struct wcd9xxx_resmgr *resmgr,
+				  const enum wcd9xxx_notify_event e)
+{
+	pr_debug("%s: notifier call event %d\n", __func__, e);
+	blocking_notifier_call_chain(&resmgr->notifier, e, resmgr);
+}
+
+static void wcd9xxx_disable_bg(struct wcd9xxx_resmgr *resmgr)
+{
+	/* Notify bg mode change */
+	wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_BG_OFF);
+	/* Disable bg */
+	snd_soc_update_bits(resmgr->codec, WCD9XXX_A_BIAS_CENTRAL_BG_CTL,
+			    0x03, 0x00);
+	usleep_range(100, 110);
+	/* Notify bg mode change */
+	wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_POST_BG_OFF);
+}
+
+/*
+ * BG enablement should always enable in slow mode.
+ * The fast mode doesn't need to be enabled as fast mode BG is to be driven
+ * by MBHC override.
+ */
+static void wcd9xxx_enable_bg(struct wcd9xxx_resmgr *resmgr)
+{
+	struct snd_soc_codec *codec = resmgr->codec;
+
+	/* Enable BG in slow mode and precharge */
+	snd_soc_update_bits(codec, WCD9XXX_A_BIAS_CENTRAL_BG_CTL, 0x80, 0x80);
+	snd_soc_update_bits(codec, WCD9XXX_A_BIAS_CENTRAL_BG_CTL, 0x04, 0x04);
+	snd_soc_update_bits(codec, WCD9XXX_A_BIAS_CENTRAL_BG_CTL, 0x01, 0x01);
+	usleep_range(1000, 1100);
+	snd_soc_update_bits(codec, WCD9XXX_A_BIAS_CENTRAL_BG_CTL, 0x80, 0x00);
+}
+
+static void wcd9xxx_enable_bg_audio(struct wcd9xxx_resmgr *resmgr)
+{
+	/* Notify bandgap mode change */
+	wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_BG_AUDIO_ON);
+	wcd9xxx_enable_bg(resmgr);
+	/* Notify bandgap mode change */
+	wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_POST_BG_AUDIO_ON);
+}
+
+static void wcd9xxx_enable_bg_mbhc(struct wcd9xxx_resmgr *resmgr)
+{
+	struct snd_soc_codec *codec = resmgr->codec;
+
+	/* Notify bandgap mode change */
+	wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_BG_MBHC_ON);
+
+	/*
+	 * mclk should be off or clk buff source souldn't be VBG
+	 * Let's turn off mclk always
+	 */
+	WARN_ON(snd_soc_read(codec, WCD9XXX_A_CLK_BUFF_EN2) & (1 << 2));
+
+	wcd9xxx_enable_bg(resmgr);
+	/* Notify bandgap mode change */
+	wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_POST_BG_MBHC_ON);
+}
+
+static void wcd9xxx_disable_clock_block(struct wcd9xxx_resmgr *resmgr)
+{
+	struct snd_soc_codec *codec = resmgr->codec;
+
+	pr_debug("%s: enter\n", __func__);
+	WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr);
+
+	/* Notify */
+	if (resmgr->clk_type == WCD9XXX_CLK_RCO)
+		wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_RCO_OFF);
+	else
+		wcd9xxx_resmgr_notifier_call(resmgr,
+					     WCD9XXX_EVENT_PRE_MCLK_OFF);
+
+	switch (resmgr->codec_type) {
+	case WCD9XXX_CDC_TYPE_TOMTOM:
+		snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x04, 0x00);
+		usleep_range(50, 55);
+		snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x02, 0x02);
+		snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x40, 0x40);
+		snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x40, 0x00);
+		snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x01, 0x00);
+		break;
+	default:
+		snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x04, 0x00);
+		usleep_range(50, 55);
+		snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x02, 0x02);
+		snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x05, 0x00);
+		break;
+	}
+	usleep_range(50, 55);
+	/* Notify */
+	if (resmgr->clk_type == WCD9XXX_CLK_RCO) {
+		wcd9xxx_resmgr_notifier_call(resmgr,
+					     WCD9XXX_EVENT_POST_RCO_OFF);
+	} else {
+		wcd9xxx_resmgr_notifier_call(resmgr,
+					     WCD9XXX_EVENT_POST_MCLK_OFF);
+	}
+	pr_debug("%s: leave\n", __func__);
+}
+
+static void wcd9xxx_resmgr_cdc_specific_get_clk(struct wcd9xxx_resmgr *resmgr,
+						int clk_users)
+{
+	/* Caller of this funcion should have acquired
+	 *  BG_CLK lock
+	 */
+	WCD9XXX_BG_CLK_UNLOCK(resmgr);
+	if (clk_users) {
+		if (resmgr->resmgr_cb &&
+		    resmgr->resmgr_cb->cdc_rco_ctrl) {
+			while (clk_users--)
+				resmgr->resmgr_cb->cdc_rco_ctrl(resmgr->codec,
+								true);
+		}
+	}
+	/* Acquire BG_CLK lock before return */
+	WCD9XXX_BG_CLK_LOCK(resmgr);
+}
+
+void wcd9xxx_resmgr_post_ssr(struct wcd9xxx_resmgr *resmgr)
+{
+	int old_bg_audio_users, old_bg_mbhc_users;
+	int old_clk_rco_users, old_clk_mclk_users;
+
+	pr_debug("%s: enter\n", __func__);
+
+	WCD9XXX_BG_CLK_LOCK(resmgr);
+	old_bg_audio_users = resmgr->bg_audio_users;
+	old_bg_mbhc_users = resmgr->bg_mbhc_users;
+	old_clk_rco_users = resmgr->clk_rco_users;
+	old_clk_mclk_users = resmgr->clk_mclk_users;
+	resmgr->bg_audio_users = 0;
+	resmgr->bg_mbhc_users = 0;
+	resmgr->bandgap_type = WCD9XXX_BANDGAP_OFF;
+	resmgr->clk_rco_users = 0;
+	resmgr->clk_mclk_users = 0;
+	resmgr->clk_type = WCD9XXX_CLK_OFF;
+
+	if (old_bg_audio_users) {
+		while (old_bg_audio_users--)
+			wcd9xxx_resmgr_get_bandgap(resmgr,
+						  WCD9XXX_BANDGAP_AUDIO_MODE);
+	}
+
+	if (old_bg_mbhc_users) {
+		while (old_bg_mbhc_users--)
+			wcd9xxx_resmgr_get_bandgap(resmgr,
+						  WCD9XXX_BANDGAP_MBHC_MODE);
+	}
+
+	if (old_clk_mclk_users) {
+		while (old_clk_mclk_users--)
+			wcd9xxx_resmgr_get_clk_block(resmgr, WCD9XXX_CLK_MCLK);
+	}
+
+	if (resmgr->codec_type == WCD9XXX_CDC_TYPE_TOMTOM) {
+		wcd9xxx_resmgr_cdc_specific_get_clk(resmgr, old_clk_rco_users);
+	} else if (old_clk_rco_users) {
+		while (old_clk_rco_users--)
+			wcd9xxx_resmgr_get_clk_block(resmgr,
+					WCD9XXX_CLK_RCO);
+	}
+	WCD9XXX_BG_CLK_UNLOCK(resmgr);
+	pr_debug("%s: leave\n", __func__);
+}
+
+/*
+ * wcd9xxx_resmgr_get_bandgap : Vote for bandgap ref
+ * choice : WCD9XXX_BANDGAP_AUDIO_MODE, WCD9XXX_BANDGAP_MBHC_MODE
+ */
+void wcd9xxx_resmgr_get_bandgap(struct wcd9xxx_resmgr *resmgr,
+				const enum wcd9xxx_bandgap_type choice)
+{
+	enum wcd9xxx_clock_type clock_save = WCD9XXX_CLK_OFF;
+
+	pr_debug("%s: enter, wants %d\n", __func__, choice);
+
+	WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr);
+	switch (choice) {
+	case WCD9XXX_BANDGAP_AUDIO_MODE:
+		resmgr->bg_audio_users++;
+		if (resmgr->bg_audio_users == 1 && resmgr->bg_mbhc_users) {
+			/*
+			 * Current bg is MBHC mode, about to switch to
+			 * audio mode.
+			 */
+			WARN_ON(resmgr->bandgap_type !=
+				WCD9XXX_BANDGAP_MBHC_MODE);
+
+			/* BG mode can be changed only with clock off */
+			if (resmgr->codec_type != WCD9XXX_CDC_TYPE_TOMTOM)
+				clock_save = wcd9xxx_save_clock(resmgr);
+			/* Swtich BG mode */
+			wcd9xxx_disable_bg(resmgr);
+			wcd9xxx_enable_bg_audio(resmgr);
+			/* restore clock */
+			if (resmgr->codec_type != WCD9XXX_CDC_TYPE_TOMTOM)
+				wcd9xxx_restore_clock(resmgr, clock_save);
+		} else if (resmgr->bg_audio_users == 1) {
+			/* currently off, just enable it */
+			WARN_ON(resmgr->bandgap_type != WCD9XXX_BANDGAP_OFF);
+			wcd9xxx_enable_bg_audio(resmgr);
+		}
+		resmgr->bandgap_type = WCD9XXX_BANDGAP_AUDIO_MODE;
+		break;
+	case WCD9XXX_BANDGAP_MBHC_MODE:
+		resmgr->bg_mbhc_users++;
+		if (resmgr->bandgap_type == WCD9XXX_BANDGAP_MBHC_MODE ||
+		    resmgr->bandgap_type == WCD9XXX_BANDGAP_AUDIO_MODE)
+			/* do nothing */
+			break;
+
+		/* bg mode can be changed only with clock off */
+		clock_save = wcd9xxx_save_clock(resmgr);
+		/* enable bg with MBHC mode */
+		wcd9xxx_enable_bg_mbhc(resmgr);
+		/* restore clock */
+		wcd9xxx_restore_clock(resmgr, clock_save);
+		/* save current mode */
+		resmgr->bandgap_type = WCD9XXX_BANDGAP_MBHC_MODE;
+		break;
+	default:
+		pr_err("%s: Error, Invalid bandgap settings\n", __func__);
+		break;
+	}
+
+	pr_debug("%s: bg users audio %d, mbhc %d\n", __func__,
+		 resmgr->bg_audio_users, resmgr->bg_mbhc_users);
+}
+
+/*
+ * wcd9xxx_resmgr_put_bandgap : Unvote bandgap ref that has been voted
+ * choice : WCD9XXX_BANDGAP_AUDIO_MODE, WCD9XXX_BANDGAP_MBHC_MODE
+ */
+void wcd9xxx_resmgr_put_bandgap(struct wcd9xxx_resmgr *resmgr,
+				enum wcd9xxx_bandgap_type choice)
+{
+	enum wcd9xxx_clock_type clock_save;
+
+	pr_debug("%s: enter choice %d\n", __func__, choice);
+
+	WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr);
+	switch (choice) {
+	case WCD9XXX_BANDGAP_AUDIO_MODE:
+		if (--resmgr->bg_audio_users == 0) {
+			if (resmgr->bg_mbhc_users) {
+				/* bg mode can be changed only with clock off */
+				clock_save = wcd9xxx_save_clock(resmgr);
+				/* switch to MBHC mode */
+				wcd9xxx_enable_bg_mbhc(resmgr);
+				/* restore clock */
+				wcd9xxx_restore_clock(resmgr, clock_save);
+				resmgr->bandgap_type =
+				    WCD9XXX_BANDGAP_MBHC_MODE;
+			} else {
+				/* turn off */
+				wcd9xxx_disable_bg(resmgr);
+				resmgr->bandgap_type = WCD9XXX_BANDGAP_OFF;
+			}
+		}
+		break;
+	case WCD9XXX_BANDGAP_MBHC_MODE:
+		WARN(resmgr->bandgap_type == WCD9XXX_BANDGAP_OFF,
+		     "Unexpected bandgap type %d\n", resmgr->bandgap_type);
+		if (--resmgr->bg_mbhc_users == 0 &&
+		    resmgr->bandgap_type == WCD9XXX_BANDGAP_MBHC_MODE) {
+			wcd9xxx_disable_bg(resmgr);
+			resmgr->bandgap_type = WCD9XXX_BANDGAP_OFF;
+		}
+		break;
+	default:
+		pr_err("%s: Error, Invalid bandgap settings\n", __func__);
+		break;
+	}
+
+	pr_debug("%s: bg users audio %d, mbhc %d\n", __func__,
+		 resmgr->bg_audio_users, resmgr->bg_mbhc_users);
+}
+
+void wcd9xxx_resmgr_enable_rx_bias(struct wcd9xxx_resmgr *resmgr, u32 enable)
+{
+	struct snd_soc_codec *codec = resmgr->codec;
+
+	if (enable) {
+		resmgr->rx_bias_count++;
+		if (resmgr->rx_bias_count == 1)
+			snd_soc_update_bits(codec, WCD9XXX_A_RX_COM_BIAS,
+					    0x80, 0x80);
+	} else {
+		resmgr->rx_bias_count--;
+		if (!resmgr->rx_bias_count)
+			snd_soc_update_bits(codec, WCD9XXX_A_RX_COM_BIAS,
+					    0x80, 0x00);
+	}
+}
+
+int wcd9xxx_resmgr_enable_config_mode(struct wcd9xxx_resmgr *resmgr, int enable)
+{
+	struct snd_soc_codec *codec = resmgr->codec;
+
+	pr_debug("%s: enable = %d\n", __func__, enable);
+	if (enable) {
+		snd_soc_update_bits(codec, WCD9XXX_A_RC_OSC_FREQ, 0x10, 0);
+		/* bandgap mode to fast */
+		if (resmgr->pdata->mclk_rate == WCD9XXX_MCLK_CLK_12P288MHZ)
+			/* Set current value to 200nA for 12.288MHz clock */
+			snd_soc_write(codec, WCD9XXX_A_BIAS_OSC_BG_CTL, 0x37);
+		else
+			snd_soc_write(codec, WCD9XXX_A_BIAS_OSC_BG_CTL, 0x17);
+
+		usleep_range(5, 10);
+		snd_soc_update_bits(codec, WCD9XXX_A_RC_OSC_FREQ, 0x80, 0x80);
+		snd_soc_update_bits(codec, WCD9XXX_A_RC_OSC_TEST, 0x80, 0x80);
+		usleep_range(10, 20);
+		snd_soc_update_bits(codec, WCD9XXX_A_RC_OSC_TEST, 0x80, 0);
+		usleep_range(10000, 10100);
+
+		if (resmgr->pdata->mclk_rate != WCD9XXX_MCLK_CLK_12P288MHZ)
+			snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1,
+							0x08, 0x08);
+	} else {
+		snd_soc_update_bits(codec, WCD9XXX_A_BIAS_OSC_BG_CTL, 0x1, 0);
+		snd_soc_update_bits(codec, WCD9XXX_A_RC_OSC_FREQ, 0x80, 0);
+	}
+
+	return 0;
+}
+
+static void wcd9xxx_enable_clock_block(struct wcd9xxx_resmgr *resmgr,
+				enum wcd9xxx_clock_config_mode config_mode)
+{
+	struct snd_soc_codec *codec = resmgr->codec;
+	unsigned long delay = WCD9XXX_RCO_CALIBRATION_DELAY_US;
+	int num_retry = 0;
+	unsigned int valr;
+	unsigned int valr1;
+	unsigned int valw[] = {0x01, 0x01, 0x10, 0x00};
+
+	pr_debug("%s: config_mode = %d\n", __func__, config_mode);
+
+	/* transit to RCO requires mclk off */
+	if (resmgr->codec_type != WCD9XXX_CDC_TYPE_TOMTOM)
+		WARN_ON(snd_soc_read(codec, WCD9XXX_A_CLK_BUFF_EN2) & (1 << 2));
+
+	if (config_mode == WCD9XXX_CFG_RCO) {
+		/* Notify */
+		wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_RCO_ON);
+		/* enable RCO and switch to it */
+		wcd9xxx_resmgr_enable_config_mode(resmgr, 1);
+		snd_soc_write(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x02);
+		usleep_range(1000, 1100);
+	} else if (config_mode == WCD9XXX_CFG_CAL_RCO) {
+		snd_soc_update_bits(codec, TOMTOM_A_BIAS_OSC_BG_CTL,
+				    0x01, 0x01);
+		/* 1ms sleep required after BG enabled */
+		usleep_range(1000, 1100);
+
+		if (resmgr->pdata->mclk_rate == WCD9XXX_MCLK_CLK_12P288MHZ) {
+			/*
+			 * Set RCO clock rate as 12.288MHz rate explicitly
+			 * as the Qfuse values are incorrect for this rate
+			 */
+			snd_soc_update_bits(codec, TOMTOM_A_RCO_CTRL,
+					0x50, 0x50);
+		} else {
+			snd_soc_update_bits(codec, TOMTOM_A_RCO_CTRL,
+					0x18, 0x10);
+			valr = snd_soc_read(codec,
+					TOMTOM_A_QFUSE_DATA_OUT0) & (0x04);
+			valr1 = snd_soc_read(codec,
+					TOMTOM_A_QFUSE_DATA_OUT1) & (0x08);
+			valr = (valr >> 1) | (valr1 >> 3);
+			snd_soc_update_bits(codec, TOMTOM_A_RCO_CTRL, 0x60,
+					valw[valr] << 5);
+		}
+		snd_soc_update_bits(codec, TOMTOM_A_RCO_CTRL, 0x80, 0x80);
+
+		do {
+			snd_soc_update_bits(codec,
+					    TOMTOM_A_RCO_CALIBRATION_CTRL1,
+					    0x80, 0x80);
+			snd_soc_update_bits(codec,
+					    TOMTOM_A_RCO_CALIBRATION_CTRL1,
+					    0x80, 0x00);
+			/* RCO calibration takes approx. 5ms */
+			usleep_range(delay, delay +
+					    WCD9XXX_USLEEP_RANGE_MARGIN_US);
+			if (!(snd_soc_read(codec,
+				TOMTOM_A_RCO_CALIBRATION_RESULT1) & 0x10))
+				break;
+			if (num_retry >= 3) {
+				delay = delay +
+					WCD9XXX_RCO_CALIBRATION_DELAY_INC_US;
+			}
+		} while (num_retry++ < WCD9XXX_RCO_CALIBRATION_RETRY_COUNT);
+	} else {
+		/* Notify */
+		wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_MCLK_ON);
+		/* switch to MCLK */
+
+		switch (resmgr->codec_type) {
+		case WCD9XXX_CDC_TYPE_TOMTOM:
+			snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1,
+					    0x08, 0x00);
+			snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1,
+					    0x40, 0x40);
+			snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1,
+					    0x40, 0x00);
+			/* clk source to ext clk and clk buff ref to VBG */
+			snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1,
+					    0x0C, 0x04);
+			break;
+		default:
+			snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1,
+					    0x08, 0x00);
+			/* if RCO is enabled, switch from it */
+			if (snd_soc_read(codec, WCD9XXX_A_RC_OSC_FREQ) & 0x80) {
+				snd_soc_write(codec, WCD9XXX_A_CLK_BUFF_EN2,
+					      0x02);
+				wcd9xxx_resmgr_enable_config_mode(resmgr, 0);
+			}
+			/* clk source to ext clk and clk buff ref to VBG */
+			snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1,
+					    0x0C, 0x04);
+			break;
+		}
+	}
+
+	if (config_mode != WCD9XXX_CFG_CAL_RCO) {
+		snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1,
+				    0x01, 0x01);
+		/*
+		 * sleep required by codec hardware to
+		 * enable clock buffer
+		 */
+		usleep_range(1000, 1200);
+		snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2,
+				    0x02, 0x00);
+		/* on MCLK */
+		snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2,
+				    0x04, 0x04);
+		snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLK_MCLK_CTL,
+				    0x01, 0x01);
+	}
+	usleep_range(50, 55);
+
+	/* Notify */
+	if (config_mode == WCD9XXX_CFG_RCO)
+		wcd9xxx_resmgr_notifier_call(resmgr,
+					     WCD9XXX_EVENT_POST_RCO_ON);
+	else if (config_mode == WCD9XXX_CFG_MCLK)
+		wcd9xxx_resmgr_notifier_call(resmgr,
+					     WCD9XXX_EVENT_POST_MCLK_ON);
+}
+
+/*
+ * disable clock and return previous clock state
+ */
+static enum wcd9xxx_clock_type wcd9xxx_save_clock(struct wcd9xxx_resmgr *resmgr)
+{
+	WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr);
+	if (resmgr->clk_type != WCD9XXX_CLK_OFF)
+		wcd9xxx_disable_clock_block(resmgr);
+	return resmgr->clk_type != WCD9XXX_CLK_OFF;
+}
+
+static void wcd9xxx_restore_clock(struct wcd9xxx_resmgr *resmgr,
+				  enum wcd9xxx_clock_type type)
+{
+	if (type != WCD9XXX_CLK_OFF)
+		wcd9xxx_enable_clock_block(resmgr, type == WCD9XXX_CLK_RCO);
+}
+
+void wcd9xxx_resmgr_get_clk_block(struct wcd9xxx_resmgr *resmgr,
+				  enum wcd9xxx_clock_type type)
+{
+	struct snd_soc_codec *codec = resmgr->codec;
+
+	pr_debug("%s: current %d, requested %d, rco_users %d, mclk_users %d\n",
+		 __func__, resmgr->clk_type, type,
+		 resmgr->clk_rco_users, resmgr->clk_mclk_users);
+	WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr);
+	switch (type) {
+	case WCD9XXX_CLK_RCO:
+		if (++resmgr->clk_rco_users == 1 &&
+		    resmgr->clk_type == WCD9XXX_CLK_OFF) {
+			/* enable RCO and switch to it */
+			wcd9xxx_enable_clock_block(resmgr, WCD9XXX_CFG_RCO);
+			resmgr->clk_type = WCD9XXX_CLK_RCO;
+		} else if (resmgr->clk_rco_users == 1 &&
+			   resmgr->clk_type == WCD9XXX_CLK_MCLK &&
+			   resmgr->codec_type == WCD9XXX_CDC_TYPE_TOMTOM) {
+			/*
+			 * Enable RCO but do not switch CLK MUX to RCO
+			 * unless ext_clk_users is 1, which indicates
+			 * EXT CLK is enabled for RCO calibration
+			 */
+			wcd9xxx_enable_clock_block(resmgr, WCD9XXX_CFG_CAL_RCO);
+			if (resmgr->ext_clk_users == 1) {
+				/* Notify */
+				wcd9xxx_resmgr_notifier_call(resmgr,
+						WCD9XXX_EVENT_PRE_RCO_ON);
+				/* CLK MUX to RCO */
+				if (resmgr->pdata->mclk_rate !=
+						WCD9XXX_MCLK_CLK_12P288MHZ)
+					snd_soc_update_bits(codec,
+						WCD9XXX_A_CLK_BUFF_EN1,
+						0x08, 0x08);
+				resmgr->clk_type = WCD9XXX_CLK_RCO;
+				wcd9xxx_resmgr_notifier_call(resmgr,
+						WCD9XXX_EVENT_POST_RCO_ON);
+			}
+		}
+		break;
+	case WCD9XXX_CLK_MCLK:
+		if (++resmgr->clk_mclk_users == 1 &&
+		    resmgr->clk_type == WCD9XXX_CLK_OFF) {
+			/* switch to MCLK */
+			wcd9xxx_enable_clock_block(resmgr, WCD9XXX_CFG_MCLK);
+			resmgr->clk_type = WCD9XXX_CLK_MCLK;
+		} else if (resmgr->clk_mclk_users == 1 &&
+			   resmgr->clk_type == WCD9XXX_CLK_RCO) {
+			/* RCO to MCLK switch, with RCO still powered on */
+			if (resmgr->codec_type == WCD9XXX_CDC_TYPE_TOMTOM) {
+				wcd9xxx_resmgr_notifier_call(resmgr,
+						WCD9XXX_EVENT_PRE_MCLK_ON);
+				snd_soc_update_bits(codec,
+						WCD9XXX_A_BIAS_CENTRAL_BG_CTL,
+						0x40, 0x00);
+				/* Enable clock buffer */
+				snd_soc_update_bits(codec,
+						WCD9XXX_A_CLK_BUFF_EN1,
+						0x01, 0x01);
+				snd_soc_update_bits(codec,
+						WCD9XXX_A_CLK_BUFF_EN1,
+						0x08, 0x00);
+				wcd9xxx_resmgr_notifier_call(resmgr,
+						WCD9XXX_EVENT_POST_MCLK_ON);
+			} else {
+				/* if RCO is enabled, switch from it */
+				WARN_ON(!(snd_soc_read(resmgr->codec,
+					WCD9XXX_A_RC_OSC_FREQ) & 0x80));
+				/* disable clock block */
+				wcd9xxx_disable_clock_block(resmgr);
+				/* switch to MCLK */
+				wcd9xxx_enable_clock_block(resmgr,
+							   WCD9XXX_CFG_MCLK);
+			}
+			resmgr->clk_type = WCD9XXX_CLK_MCLK;
+		}
+		break;
+	default:
+		pr_err("%s: Error, Invalid clock get request %d\n", __func__,
+		       type);
+		break;
+	}
+	pr_debug("%s: leave\n", __func__);
+}
+
+void wcd9xxx_resmgr_put_clk_block(struct wcd9xxx_resmgr *resmgr,
+				  enum wcd9xxx_clock_type type)
+{
+	struct snd_soc_codec *codec = resmgr->codec;
+
+	pr_debug("%s: current %d, put %d\n", __func__, resmgr->clk_type, type);
+
+	WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr);
+	switch (type) {
+	case WCD9XXX_CLK_RCO:
+		if (--resmgr->clk_rco_users == 0 &&
+		    resmgr->clk_type == WCD9XXX_CLK_RCO) {
+			wcd9xxx_disable_clock_block(resmgr);
+			if (resmgr->codec_type == WCD9XXX_CDC_TYPE_TOMTOM) {
+				/* Powerdown RCO */
+				 snd_soc_update_bits(codec, TOMTOM_A_RCO_CTRL,
+						     0x80, 0x00);
+				 snd_soc_update_bits(codec,
+						TOMTOM_A_BIAS_OSC_BG_CTL,
+						0x01, 0x00);
+			} else {
+				/* if RCO is enabled, switch from it */
+				if (snd_soc_read(resmgr->codec,
+						 WCD9XXX_A_RC_OSC_FREQ)
+						 & 0x80) {
+					snd_soc_write(resmgr->codec,
+						WCD9XXX_A_CLK_BUFF_EN2,
+						0x02);
+					wcd9xxx_resmgr_enable_config_mode(
+								resmgr,	0);
+				}
+			}
+			resmgr->clk_type = WCD9XXX_CLK_OFF;
+		}
+		break;
+	case WCD9XXX_CLK_MCLK:
+		if (--resmgr->clk_mclk_users == 0 &&
+		    resmgr->clk_rco_users == 0) {
+			wcd9xxx_disable_clock_block(resmgr);
+
+			if ((resmgr->codec_type == WCD9XXX_CDC_TYPE_TOMTOM) &&
+			    (snd_soc_read(codec, TOMTOM_A_RCO_CTRL) & 0x80)) {
+				/* powerdown RCO*/
+				 snd_soc_update_bits(codec, TOMTOM_A_RCO_CTRL,
+						     0x80, 0x00);
+				 snd_soc_update_bits(codec,
+						TOMTOM_A_BIAS_OSC_BG_CTL,
+						0x01, 0x00);
+			}
+			resmgr->clk_type = WCD9XXX_CLK_OFF;
+		} else if (resmgr->clk_mclk_users == 0 &&
+			   resmgr->clk_rco_users) {
+			if (resmgr->codec_type == WCD9XXX_CDC_TYPE_TOMTOM) {
+				if (!(snd_soc_read(codec, TOMTOM_A_RCO_CTRL) &
+				      0x80)) {
+					dev_dbg(codec->dev, "%s: Enabling RCO\n",
+						__func__);
+					wcd9xxx_enable_clock_block(resmgr,
+							WCD9XXX_CFG_CAL_RCO);
+					snd_soc_update_bits(codec,
+							WCD9XXX_A_CLK_BUFF_EN1,
+							0x01, 0x00);
+				} else {
+					wcd9xxx_resmgr_notifier_call(resmgr,
+						WCD9XXX_EVENT_PRE_MCLK_OFF);
+					snd_soc_update_bits(codec,
+							WCD9XXX_A_CLK_BUFF_EN1,
+							0x08, 0x08);
+					snd_soc_update_bits(codec,
+							WCD9XXX_A_CLK_BUFF_EN1,
+							0x01, 0x00);
+					wcd9xxx_resmgr_notifier_call(resmgr,
+						WCD9XXX_EVENT_POST_MCLK_OFF);
+					/* CLK Mux changed to RCO, notify that
+					 * RCO is ON
+					 */
+					wcd9xxx_resmgr_notifier_call(resmgr,
+						WCD9XXX_EVENT_POST_RCO_ON);
+				}
+			} else {
+				/* disable clock */
+				wcd9xxx_disable_clock_block(resmgr);
+				/* switch to RCO */
+				wcd9xxx_enable_clock_block(resmgr,
+							WCD9XXX_CFG_RCO);
+			}
+			resmgr->clk_type = WCD9XXX_CLK_RCO;
+		}
+		break;
+	default:
+		pr_err("%s: Error, Invalid clock get request %d\n", __func__,
+		       type);
+		break;
+	}
+	WARN_ON(resmgr->clk_rco_users < 0);
+	WARN_ON(resmgr->clk_mclk_users < 0);
+
+	pr_debug("%s: new rco_users %d, mclk_users %d\n", __func__,
+		 resmgr->clk_rco_users, resmgr->clk_mclk_users);
+}
+
+/*
+ * wcd9xxx_resmgr_get_clk_type()
+ * Returns clk type that is currently enabled
+ */
+int wcd9xxx_resmgr_get_clk_type(struct wcd9xxx_resmgr *resmgr)
+{
+	return resmgr->clk_type;
+}
+
+static void wcd9xxx_resmgr_update_cfilt_usage(struct wcd9xxx_resmgr *resmgr,
+					      enum wcd9xxx_cfilt_sel cfilt_sel,
+					      bool inc)
+{
+	u16 micb_cfilt_reg;
+	enum wcd9xxx_notify_event e_pre_on, e_post_off;
+	struct snd_soc_codec *codec = resmgr->codec;
+
+	switch (cfilt_sel) {
+	case WCD9XXX_CFILT1_SEL:
+		micb_cfilt_reg = WCD9XXX_A_MICB_CFILT_1_CTL;
+		e_pre_on = WCD9XXX_EVENT_PRE_CFILT_1_ON;
+		e_post_off = WCD9XXX_EVENT_POST_CFILT_1_OFF;
+		break;
+	case WCD9XXX_CFILT2_SEL:
+		micb_cfilt_reg = WCD9XXX_A_MICB_CFILT_2_CTL;
+		e_pre_on = WCD9XXX_EVENT_PRE_CFILT_2_ON;
+		e_post_off = WCD9XXX_EVENT_POST_CFILT_2_OFF;
+		break;
+	case WCD9XXX_CFILT3_SEL:
+		micb_cfilt_reg = WCD9XXX_A_MICB_CFILT_3_CTL;
+		e_pre_on = WCD9XXX_EVENT_PRE_CFILT_3_ON;
+		e_post_off = WCD9XXX_EVENT_POST_CFILT_3_OFF;
+		break;
+	default:
+		WARN(1, "Invalid CFILT selection %d\n", cfilt_sel);
+		return; /* should not happen */
+	}
+
+	if (inc) {
+		if ((resmgr->cfilt_users[cfilt_sel]++) == 0) {
+			/* Notify */
+			wcd9xxx_resmgr_notifier_call(resmgr, e_pre_on);
+			/* Enable CFILT */
+			snd_soc_update_bits(codec, micb_cfilt_reg, 0x80, 0x80);
+		}
+	} else {
+		/*
+		 * Check if count not zero, decrease
+		 * then check if zero, go ahead disable cfilter
+		 */
+		WARN(resmgr->cfilt_users[cfilt_sel] == 0,
+		     "Invalid CFILT use count 0\n");
+		if ((--resmgr->cfilt_users[cfilt_sel]) == 0) {
+			/* Disable CFILT */
+			snd_soc_update_bits(codec, micb_cfilt_reg, 0x80, 0);
+			/* Notify MBHC so MBHC can switch CFILT to fast mode */
+			wcd9xxx_resmgr_notifier_call(resmgr, e_post_off);
+		}
+	}
+}
+
+void wcd9xxx_resmgr_cfilt_get(struct wcd9xxx_resmgr *resmgr,
+			      enum wcd9xxx_cfilt_sel cfilt_sel)
+{
+	return wcd9xxx_resmgr_update_cfilt_usage(resmgr, cfilt_sel, true);
+}
+
+void wcd9xxx_resmgr_cfilt_put(struct wcd9xxx_resmgr *resmgr,
+			      enum wcd9xxx_cfilt_sel cfilt_sel)
+{
+	return wcd9xxx_resmgr_update_cfilt_usage(resmgr, cfilt_sel, false);
+}
+
+int wcd9xxx_resmgr_get_k_val(struct wcd9xxx_resmgr *resmgr,
+			     unsigned int cfilt_mv)
+{
+	int rc = -EINVAL;
+	unsigned int ldoh_v = resmgr->micbias_pdata->ldoh_v;
+	unsigned min_mv, max_mv;
+
+	switch (ldoh_v) {
+	case WCD9XXX_LDOH_1P95_V:
+		min_mv = 160;
+		max_mv = 1800;
+		break;
+	case WCD9XXX_LDOH_2P35_V:
+		min_mv = 200;
+		max_mv = 2200;
+		break;
+	case WCD9XXX_LDOH_2P75_V:
+		min_mv = 240;
+		max_mv = 2600;
+		break;
+	case WCD9XXX_LDOH_3P0_V:
+		min_mv = 260;
+		max_mv = 2875;
+		break;
+	default:
+		goto done;
+	}
+
+	if (cfilt_mv < min_mv || cfilt_mv > max_mv)
+		goto done;
+
+	for (rc = 4; rc <= 44; rc++) {
+		min_mv = max_mv * (rc) / 44;
+		if (min_mv >= cfilt_mv) {
+			rc -= 4;
+			break;
+		}
+	}
+done:
+	return rc;
+}
+
+static void wcd9xxx_resmgr_cond_trigger_cond(struct wcd9xxx_resmgr *resmgr,
+					     enum wcd9xxx_resmgr_cond cond)
+{
+	struct list_head *l;
+	struct wcd9xxx_resmgr_cond_entry *e;
+	bool set;
+
+	pr_debug("%s: enter\n", __func__);
+	/* update bit if cond isn't available or cond is set */
+	set = !test_bit(cond, &resmgr->cond_avail_flags) ||
+	      !!test_bit(cond, &resmgr->cond_flags);
+	list_for_each(l, &resmgr->update_bit_cond_h) {
+		e = list_entry(l, struct wcd9xxx_resmgr_cond_entry, list);
+		if (e->cond == cond)
+			snd_soc_update_bits(resmgr->codec, e->reg,
+					    1 << e->shift,
+					    (set ? !e->invert : e->invert)
+					    << e->shift);
+	}
+	pr_debug("%s: leave\n", __func__);
+}
+
+/*
+ * wcd9xxx_regmgr_cond_register : notify resmgr conditions in the condbits are
+ *				  avaliable and notified.
+ * condbits : contains bitmask of enum wcd9xxx_resmgr_cond
+ */
+void wcd9xxx_regmgr_cond_register(struct wcd9xxx_resmgr *resmgr,
+				  unsigned long condbits)
+{
+	unsigned int cond;
+
+	for_each_set_bit(cond, &condbits, BITS_PER_BYTE * sizeof(condbits)) {
+		mutex_lock(&resmgr->update_bit_cond_lock);
+		WARN(test_bit(cond, &resmgr->cond_avail_flags),
+		     "Condition 0x%0x is already registered\n", cond);
+		set_bit(cond, &resmgr->cond_avail_flags);
+		wcd9xxx_resmgr_cond_trigger_cond(resmgr, cond);
+		mutex_unlock(&resmgr->update_bit_cond_lock);
+		pr_debug("%s: Condition 0x%x is registered\n", __func__, cond);
+	}
+}
+
+void wcd9xxx_regmgr_cond_deregister(struct wcd9xxx_resmgr *resmgr,
+				    unsigned long condbits)
+{
+	unsigned int cond;
+
+	for_each_set_bit(cond, &condbits, BITS_PER_BYTE * sizeof(condbits)) {
+		mutex_lock(&resmgr->update_bit_cond_lock);
+		WARN(!test_bit(cond, &resmgr->cond_avail_flags),
+		     "Condition 0x%0x isn't registered\n", cond);
+		clear_bit(cond, &resmgr->cond_avail_flags);
+		wcd9xxx_resmgr_cond_trigger_cond(resmgr, cond);
+		mutex_unlock(&resmgr->update_bit_cond_lock);
+		pr_debug("%s: Condition 0x%x is deregistered\n", __func__,
+			 cond);
+	}
+}
+
+void wcd9xxx_resmgr_cond_update_cond(struct wcd9xxx_resmgr *resmgr,
+				     enum wcd9xxx_resmgr_cond cond, bool set)
+{
+	mutex_lock(&resmgr->update_bit_cond_lock);
+	if ((set && !test_and_set_bit(cond, &resmgr->cond_flags)) ||
+	    (!set && test_and_clear_bit(cond, &resmgr->cond_flags))) {
+		pr_debug("%s: Resource %d condition changed to %s\n", __func__,
+			 cond, set ? "set" : "clear");
+		wcd9xxx_resmgr_cond_trigger_cond(resmgr, cond);
+	}
+	mutex_unlock(&resmgr->update_bit_cond_lock);
+}
+
+int wcd9xxx_resmgr_add_cond_update_bits(struct wcd9xxx_resmgr *resmgr,
+					enum wcd9xxx_resmgr_cond cond,
+					unsigned short reg, int shift,
+					bool invert)
+{
+	struct wcd9xxx_resmgr_cond_entry *entry;
+
+	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		return -ENOMEM;
+
+	entry->cond = cond;
+	entry->reg = reg;
+	entry->shift = shift;
+	entry->invert = invert;
+
+	mutex_lock(&resmgr->update_bit_cond_lock);
+	list_add_tail(&entry->list, &resmgr->update_bit_cond_h);
+
+	wcd9xxx_resmgr_cond_trigger_cond(resmgr, cond);
+	mutex_unlock(&resmgr->update_bit_cond_lock);
+
+	return 0;
+}
+
+/*
+ * wcd9xxx_resmgr_rm_cond_update_bits :
+ * Clear bit and remove from the conditional bit update list
+ */
+int wcd9xxx_resmgr_rm_cond_update_bits(struct wcd9xxx_resmgr *resmgr,
+				       enum wcd9xxx_resmgr_cond cond,
+				       unsigned short reg, int shift,
+				       bool invert)
+{
+	struct list_head *l, *next;
+	struct wcd9xxx_resmgr_cond_entry *e = NULL;
+
+	pr_debug("%s: enter\n", __func__);
+	mutex_lock(&resmgr->update_bit_cond_lock);
+	list_for_each_safe(l, next, &resmgr->update_bit_cond_h) {
+		e = list_entry(l, struct wcd9xxx_resmgr_cond_entry, list);
+		if (e->reg == reg && e->shift == shift && e->invert == invert) {
+			snd_soc_update_bits(resmgr->codec, e->reg,
+					    1 << e->shift,
+					    e->invert << e->shift);
+			list_del(&e->list);
+			mutex_unlock(&resmgr->update_bit_cond_lock);
+			kfree(e);
+			return 0;
+		}
+	}
+	mutex_unlock(&resmgr->update_bit_cond_lock);
+	pr_err("%s: Cannot find update bit entry reg 0x%x, shift %d\n",
+	       __func__, e ? e->reg : 0, e ? e->shift : 0);
+
+	return -EINVAL;
+}
+
+int wcd9xxx_resmgr_register_notifier(struct wcd9xxx_resmgr *resmgr,
+				     struct notifier_block *nblock)
+{
+	return blocking_notifier_chain_register(&resmgr->notifier, nblock);
+}
+
+int wcd9xxx_resmgr_unregister_notifier(struct wcd9xxx_resmgr *resmgr,
+				       struct notifier_block *nblock)
+{
+	return blocking_notifier_chain_unregister(&resmgr->notifier, nblock);
+}
+
+int wcd9xxx_resmgr_init(struct wcd9xxx_resmgr *resmgr,
+			struct snd_soc_codec *codec,
+			struct wcd9xxx_core_resource *core_res,
+			struct wcd9xxx_pdata *pdata,
+			struct wcd9xxx_micbias_setting *micbias_pdata,
+			struct wcd9xxx_reg_address *reg_addr,
+			const struct wcd9xxx_resmgr_cb *resmgr_cb,
+			enum wcd9xxx_cdc_type cdc_type)
+{
+	WARN(ARRAY_SIZE(wcd9xxx_event_string) != WCD9XXX_EVENT_LAST + 1,
+	     "Event string table isn't up to date!, %zd != %d\n",
+	     ARRAY_SIZE(wcd9xxx_event_string), WCD9XXX_EVENT_LAST + 1);
+
+	resmgr->bandgap_type = WCD9XXX_BANDGAP_OFF;
+	resmgr->codec = codec;
+	resmgr->codec_type = cdc_type;
+	/* This gives access of core handle to lock/unlock suspend */
+	resmgr->core_res = core_res;
+	resmgr->pdata = pdata;
+	resmgr->micbias_pdata = micbias_pdata;
+	resmgr->reg_addr = reg_addr;
+	resmgr->resmgr_cb = resmgr_cb;
+
+	INIT_LIST_HEAD(&resmgr->update_bit_cond_h);
+
+	BLOCKING_INIT_NOTIFIER_HEAD(&resmgr->notifier);
+
+	mutex_init(&resmgr->codec_resource_lock);
+	mutex_init(&resmgr->codec_bg_clk_lock);
+	mutex_init(&resmgr->update_bit_cond_lock);
+
+	return 0;
+}
+
+void wcd9xxx_resmgr_deinit(struct wcd9xxx_resmgr *resmgr)
+{
+	mutex_destroy(&resmgr->update_bit_cond_lock);
+	mutex_destroy(&resmgr->codec_bg_clk_lock);
+	mutex_destroy(&resmgr->codec_resource_lock);
+}
+
+void wcd9xxx_resmgr_bcl_lock(struct wcd9xxx_resmgr *resmgr)
+{
+	mutex_lock(&resmgr->codec_resource_lock);
+}
+
+void wcd9xxx_resmgr_bcl_unlock(struct wcd9xxx_resmgr *resmgr)
+{
+	mutex_unlock(&resmgr->codec_resource_lock);
+}
+
+MODULE_DESCRIPTION("wcd9xxx resmgr module");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd9xxx-resmgr.h	2019-01-22 16:16:29.555301211 +0100
@@ -0,0 +1,280 @@
+/* Copyright (c) 2012-2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __WCD9XXX_COMMON_H__
+#define __WCD9XXX_COMMON_H__
+
+#include <linux/notifier.h>
+#include <linux/mfd/wcd9xxx/core.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
+
+enum wcd9xxx_bandgap_type {
+	WCD9XXX_BANDGAP_OFF,
+	WCD9XXX_BANDGAP_AUDIO_MODE,
+	WCD9XXX_BANDGAP_MBHC_MODE,
+};
+
+enum wcd9xxx_cdc_type {
+	WCD9XXX_CDC_TYPE_INVALID = 0,
+	WCD9XXX_CDC_TYPE_TAIKO,
+	WCD9XXX_CDC_TYPE_TAPAN,
+	WCD9XXX_CDC_TYPE_HELICON,
+	WCD9XXX_CDC_TYPE_TOMTOM,
+};
+
+enum wcd9xxx_clock_type {
+	WCD9XXX_CLK_OFF,
+	WCD9XXX_CLK_RCO,
+	WCD9XXX_CLK_MCLK,
+};
+
+enum wcd9xxx_clock_config_mode {
+	WCD9XXX_CFG_MCLK = 0,
+	WCD9XXX_CFG_RCO,
+	WCD9XXX_CFG_CAL_RCO,
+};
+
+enum wcd9xxx_cfilt_sel {
+	WCD9XXX_CFILT1_SEL,
+	WCD9XXX_CFILT2_SEL,
+	WCD9XXX_CFILT3_SEL,
+	WCD9XXX_NUM_OF_CFILT,
+};
+
+struct wcd9xxx_reg_address {
+	u16 micb_4_ctl;
+	u16 micb_4_int_rbias;
+	u16 micb_4_mbhc;
+};
+
+enum wcd9xxx_notify_event {
+	WCD9XXX_EVENT_INVALID,
+
+	WCD9XXX_EVENT_PRE_RCO_ON,
+	WCD9XXX_EVENT_POST_RCO_ON,
+	WCD9XXX_EVENT_PRE_RCO_OFF,
+	WCD9XXX_EVENT_POST_RCO_OFF,
+
+	WCD9XXX_EVENT_PRE_MCLK_ON,
+	WCD9XXX_EVENT_POST_MCLK_ON,
+	WCD9XXX_EVENT_PRE_MCLK_OFF,
+	WCD9XXX_EVENT_POST_MCLK_OFF,
+
+	WCD9XXX_EVENT_PRE_BG_OFF,
+	WCD9XXX_EVENT_POST_BG_OFF,
+	WCD9XXX_EVENT_PRE_BG_AUDIO_ON,
+	WCD9XXX_EVENT_POST_BG_AUDIO_ON,
+	WCD9XXX_EVENT_PRE_BG_MBHC_ON,
+	WCD9XXX_EVENT_POST_BG_MBHC_ON,
+
+	WCD9XXX_EVENT_PRE_MICBIAS_1_OFF,
+	WCD9XXX_EVENT_POST_MICBIAS_1_OFF,
+	WCD9XXX_EVENT_PRE_MICBIAS_2_OFF,
+	WCD9XXX_EVENT_POST_MICBIAS_2_OFF,
+	WCD9XXX_EVENT_PRE_MICBIAS_3_OFF,
+	WCD9XXX_EVENT_POST_MICBIAS_3_OFF,
+	WCD9XXX_EVENT_PRE_MICBIAS_4_OFF,
+	WCD9XXX_EVENT_POST_MICBIAS_4_OFF,
+	WCD9XXX_EVENT_PRE_MICBIAS_1_ON,
+	WCD9XXX_EVENT_POST_MICBIAS_1_ON,
+	WCD9XXX_EVENT_PRE_MICBIAS_2_ON,
+	WCD9XXX_EVENT_POST_MICBIAS_2_ON,
+	WCD9XXX_EVENT_PRE_MICBIAS_3_ON,
+	WCD9XXX_EVENT_POST_MICBIAS_3_ON,
+	WCD9XXX_EVENT_PRE_MICBIAS_4_ON,
+	WCD9XXX_EVENT_POST_MICBIAS_4_ON,
+
+	WCD9XXX_EVENT_PRE_CFILT_1_OFF,
+	WCD9XXX_EVENT_POST_CFILT_1_OFF,
+	WCD9XXX_EVENT_PRE_CFILT_2_OFF,
+	WCD9XXX_EVENT_POST_CFILT_2_OFF,
+	WCD9XXX_EVENT_PRE_CFILT_3_OFF,
+	WCD9XXX_EVENT_POST_CFILT_3_OFF,
+	WCD9XXX_EVENT_PRE_CFILT_1_ON,
+	WCD9XXX_EVENT_POST_CFILT_1_ON,
+	WCD9XXX_EVENT_PRE_CFILT_2_ON,
+	WCD9XXX_EVENT_POST_CFILT_2_ON,
+	WCD9XXX_EVENT_PRE_CFILT_3_ON,
+	WCD9XXX_EVENT_POST_CFILT_3_ON,
+
+	WCD9XXX_EVENT_PRE_HPHL_PA_ON,
+	WCD9XXX_EVENT_POST_HPHL_PA_OFF,
+	WCD9XXX_EVENT_PRE_HPHR_PA_ON,
+	WCD9XXX_EVENT_POST_HPHR_PA_OFF,
+
+	WCD9XXX_EVENT_POST_RESUME,
+
+	WCD9XXX_EVENT_PRE_TX_3_ON,
+	WCD9XXX_EVENT_POST_TX_3_OFF,
+
+	WCD9XXX_EVENT_LAST,
+};
+
+struct wcd9xxx_resmgr_cb {
+	int (*cdc_rco_ctrl)(struct snd_soc_codec *, bool);
+};
+
+struct wcd9xxx_resmgr {
+	struct snd_soc_codec *codec;
+	struct wcd9xxx_core_resource *core_res;
+
+	u32 rx_bias_count;
+
+	/*
+	 * bandgap_type, bg_audio_users and bg_mbhc_users have to be
+	 * referred/manipulated after acquiring codec_bg_clk_lock mutex
+	 */
+	enum wcd9xxx_bandgap_type bandgap_type;
+	u16 bg_audio_users;
+	u16 bg_mbhc_users;
+
+	/*
+	 * clk_type, clk_rco_users and clk_mclk_users have to be
+	 * referred/manipulated after acquiring codec_bg_clk_lock mutex
+	 */
+	enum wcd9xxx_clock_type clk_type;
+	u16 clk_rco_users;
+	u16 clk_mclk_users;
+	u16 ext_clk_users;
+
+	/* cfilt users per cfilts */
+	u16 cfilt_users[WCD9XXX_NUM_OF_CFILT];
+
+	struct wcd9xxx_reg_address *reg_addr;
+
+	struct wcd9xxx_pdata *pdata;
+
+	struct wcd9xxx_micbias_setting *micbias_pdata;
+
+	struct blocking_notifier_head notifier;
+	/* Notifier needs mbhc pointer with resmgr */
+	struct wcd9xxx_mbhc *mbhc;
+
+	unsigned long cond_flags;
+	unsigned long cond_avail_flags;
+	struct list_head update_bit_cond_h;
+	struct mutex update_bit_cond_lock;
+
+	/*
+	 * Currently, only used for mbhc purpose, to protect
+	 * concurrent execution of mbhc threaded irq handlers and
+	 * kill race between DAPM and MBHC. But can serve as a
+	 * general lock to protect codec resource
+	 */
+	struct mutex codec_resource_lock;
+	struct mutex codec_bg_clk_lock;
+
+	enum wcd9xxx_cdc_type codec_type;
+
+	const struct wcd9xxx_resmgr_cb *resmgr_cb;
+};
+
+int wcd9xxx_resmgr_init(struct wcd9xxx_resmgr *resmgr,
+			struct snd_soc_codec *codec,
+			struct wcd9xxx_core_resource *core_res,
+			struct wcd9xxx_pdata *pdata,
+			struct wcd9xxx_micbias_setting *micbias_pdata,
+			struct wcd9xxx_reg_address *reg_addr,
+			const struct wcd9xxx_resmgr_cb *resmgr_cb,
+			enum wcd9xxx_cdc_type cdc_type);
+void wcd9xxx_resmgr_deinit(struct wcd9xxx_resmgr *resmgr);
+
+int wcd9xxx_resmgr_enable_config_mode(struct wcd9xxx_resmgr *resmgr,
+				int enable);
+
+void wcd9xxx_resmgr_enable_rx_bias(struct wcd9xxx_resmgr *resmgr, u32 enable);
+void wcd9xxx_resmgr_get_clk_block(struct wcd9xxx_resmgr *resmgr,
+				  enum wcd9xxx_clock_type type);
+void wcd9xxx_resmgr_put_clk_block(struct wcd9xxx_resmgr *resmgr,
+				  enum wcd9xxx_clock_type type);
+void wcd9xxx_resmgr_get_bandgap(struct wcd9xxx_resmgr *resmgr,
+				const enum wcd9xxx_bandgap_type choice);
+void wcd9xxx_resmgr_put_bandgap(struct wcd9xxx_resmgr *resmgr,
+				enum wcd9xxx_bandgap_type choice);
+void wcd9xxx_resmgr_cfilt_get(struct wcd9xxx_resmgr *resmgr,
+			      enum wcd9xxx_cfilt_sel cfilt_sel);
+void wcd9xxx_resmgr_cfilt_put(struct wcd9xxx_resmgr *resmgr,
+			      enum wcd9xxx_cfilt_sel cfilt_sel);
+int wcd9xxx_resmgr_get_clk_type(struct wcd9xxx_resmgr *resmgr);
+
+void wcd9xxx_resmgr_bcl_lock(struct wcd9xxx_resmgr *resmgr);
+void wcd9xxx_resmgr_post_ssr(struct wcd9xxx_resmgr *resmgr);
+#define WCD9XXX_BCL_LOCK(resmgr)			\
+{							\
+	pr_debug("%s: Acquiring BCL\n", __func__);	\
+	wcd9xxx_resmgr_bcl_lock(resmgr);			\
+	pr_debug("%s: Acquiring BCL done\n", __func__);	\
+}
+
+void wcd9xxx_resmgr_bcl_unlock(struct wcd9xxx_resmgr *resmgr);
+#define WCD9XXX_BCL_UNLOCK(resmgr)			\
+{							\
+	pr_debug("%s: Release BCL\n", __func__);	\
+	wcd9xxx_resmgr_bcl_unlock(resmgr);			\
+}
+
+#define WCD9XXX_BCL_ASSERT_LOCKED(resmgr)		\
+{							\
+	WARN_ONCE(!mutex_is_locked(&resmgr->codec_resource_lock), \
+		  "%s: BCL should have acquired\n", __func__); \
+}
+
+#define WCD9XXX_BG_CLK_LOCK(resmgr)			\
+{							\
+	struct wcd9xxx_resmgr *__resmgr = resmgr;	\
+	pr_debug("%s: Acquiring BG_CLK\n", __func__);	\
+	mutex_lock(&__resmgr->codec_bg_clk_lock);	\
+	pr_debug("%s: Acquiring BG_CLK done\n", __func__);	\
+}
+
+#define WCD9XXX_BG_CLK_UNLOCK(resmgr)			\
+{							\
+	struct wcd9xxx_resmgr *__resmgr = resmgr;	\
+	pr_debug("%s: Releasing BG_CLK\n", __func__);	\
+	mutex_unlock(&__resmgr->codec_bg_clk_lock);	\
+}
+
+#define WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr)		\
+{							\
+	WARN_ONCE(!mutex_is_locked(&resmgr->codec_bg_clk_lock), \
+		  "%s: BG_CLK lock should have acquired\n", __func__); \
+}
+
+const char *wcd9xxx_get_event_string(enum wcd9xxx_notify_event type);
+int wcd9xxx_resmgr_get_k_val(struct wcd9xxx_resmgr *resmgr,
+			     unsigned int cfilt_mv);
+int wcd9xxx_resmgr_register_notifier(struct wcd9xxx_resmgr *resmgr,
+				     struct notifier_block *nblock);
+int wcd9xxx_resmgr_unregister_notifier(struct wcd9xxx_resmgr *resmgr,
+				       struct notifier_block *nblock);
+void wcd9xxx_resmgr_notifier_call(struct wcd9xxx_resmgr *resmgr,
+				  const enum wcd9xxx_notify_event e);
+
+enum wcd9xxx_resmgr_cond {
+	WCD9XXX_COND_HPH = 0x01, /* Headphone */
+	WCD9XXX_COND_HPH_MIC = 0x02, /* Microphone on the headset */
+};
+void wcd9xxx_regmgr_cond_register(struct wcd9xxx_resmgr *resmgr,
+				  unsigned long condbits);
+void wcd9xxx_regmgr_cond_deregister(struct wcd9xxx_resmgr *resmgr,
+				    unsigned long condbits);
+int wcd9xxx_resmgr_rm_cond_update_bits(struct wcd9xxx_resmgr *resmgr,
+				       enum wcd9xxx_resmgr_cond cond,
+				       unsigned short reg, int shift,
+				       bool invert);
+int wcd9xxx_resmgr_add_cond_update_bits(struct wcd9xxx_resmgr *resmgr,
+					enum wcd9xxx_resmgr_cond cond,
+					unsigned short reg, int shift,
+					bool invert);
+void wcd9xxx_resmgr_cond_update_cond(struct wcd9xxx_resmgr *resmgr,
+				     enum wcd9xxx_resmgr_cond cond, bool set);
+
+#endif /* __WCD9XXX_COMMON_H__ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd9xxx-resmgr-v2.c	2019-01-22 16:16:29.555301211 +0100
@@ -0,0 +1,682 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mfd/wcd9xxx/core.h>
+#include <linux/mfd/wcd9335/registers.h>
+#include <linux/mfd/wcd934x/registers.h>
+#include <sound/soc.h>
+#include "wcd9xxx-resmgr-v2.h"
+
+#define WCD9XXX_RCO_CALIBRATION_DELAY_INC_US 5000
+#define WCD93XX_ANA_BIAS 0x0601
+#define WCD93XX_CDC_CLK_RST_CTRL_MCLK_CONTROL 0x0d41
+#define WCD93XX_CDC_CLK_RST_CTRL_FS_CNT_CONTROL 0x0d42
+
+
+static const char *wcd_resmgr_clk_type_to_str(enum wcd_clock_type clk_type)
+{
+	if (clk_type == WCD_CLK_OFF)
+		return "WCD_CLK_OFF";
+	else if (clk_type == WCD_CLK_RCO)
+		return "WCD_CLK_RCO";
+	else if (clk_type == WCD_CLK_MCLK)
+		return "WCD_CLK_MCLK";
+	else
+		return "WCD_CLK_UNDEFINED";
+}
+
+static int wcd_resmgr_codec_reg_update_bits(struct wcd9xxx_resmgr_v2 *resmgr,
+					    u16 reg, u8 mask, u8 val)
+{
+	bool change;
+	int ret;
+
+	if (resmgr->codec_type == WCD934X) {
+		/* Tavil does not support ANA_CLK_TOP register */
+		if (reg == WCD9335_ANA_CLK_TOP)
+			return 0;
+	} else {
+		/* Tasha does not support CLK_SYS_MCLK_PRG register */
+		if (reg == WCD934X_CLK_SYS_MCLK_PRG)
+			return 0;
+	}
+	if (resmgr->codec) {
+		ret = snd_soc_update_bits(resmgr->codec, reg, mask, val);
+	} else if (resmgr->core_res->wcd_core_regmap) {
+		ret = regmap_update_bits_check(
+				resmgr->core_res->wcd_core_regmap,
+				reg, mask, val, &change);
+		if (!ret)
+			ret = change;
+	} else {
+		pr_err("%s: codec/regmap not defined\n", __func__);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int wcd_resmgr_codec_reg_read(struct wcd9xxx_resmgr_v2 *resmgr,
+				     unsigned int reg)
+{
+	int val, ret;
+
+	if (resmgr->codec_type == WCD934X) {
+		if (reg == WCD9335_ANA_CLK_TOP)
+			return 0;
+	} else {
+		if (reg == WCD934X_CLK_SYS_MCLK_PRG)
+			return 0;
+	}
+	if (resmgr->codec) {
+		val = snd_soc_read(resmgr->codec, reg);
+	} else if (resmgr->core_res->wcd_core_regmap) {
+		ret = regmap_read(resmgr->core_res->wcd_core_regmap,
+				  reg, &val);
+		if (ret)
+			val = ret;
+	} else {
+		pr_err("%s: wcd regmap is null\n", __func__);
+		return -EINVAL;
+	}
+
+	return val;
+}
+
+/*
+ * wcd_resmgr_get_clk_type()
+ * Returns clk type that is currently enabled
+ */
+int wcd_resmgr_get_clk_type(struct wcd9xxx_resmgr_v2 *resmgr)
+{
+	if (!resmgr) {
+		pr_err("%s: resmgr not initialized\n", __func__);
+		return -EINVAL;
+	}
+	return resmgr->clk_type;
+}
+
+static void wcd_resmgr_cdc_specific_get_clk(struct wcd9xxx_resmgr_v2 *resmgr,
+						int clk_users)
+{
+	/* Caller of this function should have acquired BG_CLK lock */
+	if (clk_users) {
+		if (resmgr->resmgr_cb &&
+		    resmgr->resmgr_cb->cdc_rco_ctrl) {
+			while (clk_users--)
+				resmgr->resmgr_cb->cdc_rco_ctrl(resmgr->codec,
+								true);
+		}
+	}
+}
+
+void wcd_resmgr_post_ssr_v2(struct wcd9xxx_resmgr_v2 *resmgr)
+{
+	int old_bg_audio_users;
+	int old_clk_rco_users, old_clk_mclk_users;
+
+	WCD9XXX_V2_BG_CLK_LOCK(resmgr);
+
+	old_bg_audio_users = resmgr->master_bias_users;
+	old_clk_mclk_users = resmgr->clk_mclk_users;
+	old_clk_rco_users = resmgr->clk_rco_users;
+	resmgr->master_bias_users = 0;
+	resmgr->clk_mclk_users = 0;
+	resmgr->clk_rco_users = 0;
+	resmgr->clk_type = WCD_CLK_OFF;
+
+	pr_debug("%s: old_bg_audio_users=%d old_clk_mclk_users=%d old_clk_rco_users=%d\n",
+		 __func__, old_bg_audio_users,
+		 old_clk_mclk_users, old_clk_rco_users);
+
+	if (old_bg_audio_users) {
+		while (old_bg_audio_users--)
+			wcd_resmgr_enable_master_bias(resmgr);
+	}
+
+	if (old_clk_mclk_users) {
+		while (old_clk_mclk_users--)
+			wcd_resmgr_enable_clk_block(resmgr, WCD_CLK_MCLK);
+	}
+
+	if (old_clk_rco_users)
+		wcd_resmgr_cdc_specific_get_clk(resmgr, old_clk_rco_users);
+
+	WCD9XXX_V2_BG_CLK_UNLOCK(resmgr);
+}
+
+
+/*
+ * wcd_resmgr_enable_master_bias: enable codec master bias
+ * @resmgr: handle to struct wcd9xxx_resmgr_v2
+ */
+int wcd_resmgr_enable_master_bias(struct wcd9xxx_resmgr_v2 *resmgr)
+{
+	mutex_lock(&resmgr->master_bias_lock);
+
+	resmgr->master_bias_users++;
+	if (resmgr->master_bias_users == 1) {
+		wcd_resmgr_codec_reg_update_bits(resmgr, WCD93XX_ANA_BIAS,
+						 0x80, 0x80);
+		wcd_resmgr_codec_reg_update_bits(resmgr, WCD93XX_ANA_BIAS,
+						 0x40, 0x40);
+		/*
+		 * 1ms delay is required after pre-charge is enabled
+		 * as per HW requirement
+		 */
+		usleep_range(1000, 1100);
+		wcd_resmgr_codec_reg_update_bits(resmgr, WCD93XX_ANA_BIAS,
+						 0x40, 0x00);
+		wcd_resmgr_codec_reg_update_bits(resmgr,
+						WCD93XX_ANA_BIAS, 0x20, 0x00);
+	}
+
+	pr_debug("%s: current master bias users: %d\n", __func__,
+		 resmgr->master_bias_users);
+
+	mutex_unlock(&resmgr->master_bias_lock);
+	return 0;
+}
+
+/*
+ * wcd_resmgr_disable_master_bias: disable codec master bias
+ * @resmgr: handle to struct wcd9xxx_resmgr_v2
+ */
+int wcd_resmgr_disable_master_bias(struct wcd9xxx_resmgr_v2 *resmgr)
+{
+	mutex_lock(&resmgr->master_bias_lock);
+	if (resmgr->master_bias_users <= 0) {
+		mutex_unlock(&resmgr->master_bias_lock);
+		return -EINVAL;
+	}
+
+	resmgr->master_bias_users--;
+	if (resmgr->master_bias_users == 0) {
+		wcd_resmgr_codec_reg_update_bits(resmgr, WCD93XX_ANA_BIAS,
+						 0x80, 0x00);
+		wcd_resmgr_codec_reg_update_bits(resmgr,
+						WCD93XX_ANA_BIAS, 0x20, 0x00);
+	}
+	mutex_unlock(&resmgr->master_bias_lock);
+	return 0;
+}
+
+static int wcd_resmgr_enable_clk_mclk(struct wcd9xxx_resmgr_v2 *resmgr)
+{
+	/* Enable mclk requires master bias to be enabled first */
+	if (resmgr->master_bias_users <= 0) {
+		pr_err("%s: Cannot turn on MCLK, BG is not enabled\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (((resmgr->clk_mclk_users == 0) &&
+	     (resmgr->clk_type == WCD_CLK_MCLK)) ||
+	    ((resmgr->clk_mclk_users > 0) &&
+	    (resmgr->clk_type != WCD_CLK_MCLK))) {
+		pr_err("%s: Error enabling MCLK, clk_type: %s\n",
+			__func__,
+			wcd_resmgr_clk_type_to_str(resmgr->clk_type));
+		return -EINVAL;
+	}
+
+	if (++resmgr->clk_mclk_users == 1) {
+		wcd_resmgr_codec_reg_update_bits(resmgr,
+				WCD9335_ANA_CLK_TOP, 0x80, 0x80);
+		wcd_resmgr_codec_reg_update_bits(resmgr,
+				WCD9335_ANA_CLK_TOP, 0x08, 0x00);
+		wcd_resmgr_codec_reg_update_bits(resmgr,
+				WCD9335_ANA_CLK_TOP, 0x04, 0x04);
+		if (resmgr->codec_type == WCD934X) {
+			/*
+			 * In tavil clock contrl register is changed
+			 * to CLK_SYS_MCLK_PRG
+			 */
+			wcd_resmgr_codec_reg_update_bits(resmgr,
+					WCD934X_CLK_SYS_MCLK_PRG, 0x80, 0x80);
+			wcd_resmgr_codec_reg_update_bits(resmgr,
+					WCD934X_CLK_SYS_MCLK_PRG, 0x30, 0x10);
+			wcd_resmgr_codec_reg_update_bits(resmgr,
+					WCD934X_CLK_SYS_MCLK_PRG, 0x02, 0x00);
+			wcd_resmgr_codec_reg_update_bits(resmgr,
+					WCD934X_CLK_SYS_MCLK_PRG, 0x01, 0x01);
+			wcd_resmgr_codec_reg_update_bits(resmgr,
+					WCD934X_CLK_SYS_MCLK_PRG, 0x02, 0x00);
+			wcd_resmgr_codec_reg_update_bits(resmgr,
+					WCD93XX_CDC_CLK_RST_CTRL_FS_CNT_CONTROL,
+					0x01, 0x01);
+			wcd_resmgr_codec_reg_update_bits(resmgr,
+					WCD93XX_CDC_CLK_RST_CTRL_MCLK_CONTROL,
+					0x01, 0x01);
+			wcd_resmgr_codec_reg_update_bits(resmgr,
+					WCD93XX_CDC_CLK_RST_CTRL_MCLK_CONTROL,
+					0x01, 0x01);
+			wcd_resmgr_codec_reg_update_bits(resmgr,
+					WCD934X_CODEC_RPM_CLK_GATE, 0x03, 0x00);
+		} else {
+			wcd_resmgr_codec_reg_update_bits(resmgr,
+					WCD93XX_CDC_CLK_RST_CTRL_FS_CNT_CONTROL,
+					0x01, 0x01);
+			wcd_resmgr_codec_reg_update_bits(resmgr,
+					WCD93XX_CDC_CLK_RST_CTRL_MCLK_CONTROL,
+					0x01, 0x01);
+		}
+		/*
+		 * 10us sleep is required after clock is enabled
+		 * as per HW requirement
+		 */
+		usleep_range(10, 15);
+	}
+
+	resmgr->clk_type = WCD_CLK_MCLK;
+
+	pr_debug("%s: mclk_users: %d, clk_type: %s\n", __func__,
+		 resmgr->clk_mclk_users,
+		 wcd_resmgr_clk_type_to_str(resmgr->clk_type));
+
+	return 0;
+}
+
+static int wcd_resmgr_disable_clk_mclk(struct wcd9xxx_resmgr_v2 *resmgr)
+{
+	if (resmgr->clk_mclk_users <= 0) {
+		pr_err("%s: No mclk users, cannot disable mclk\n", __func__);
+		return -EINVAL;
+	}
+
+	if (--resmgr->clk_mclk_users == 0) {
+		if (resmgr->clk_rco_users > 0) {
+			/* MCLK to RCO switch */
+			wcd_resmgr_codec_reg_update_bits(resmgr,
+					WCD9335_ANA_CLK_TOP,
+					0x08, 0x08);
+			wcd_resmgr_codec_reg_update_bits(resmgr,
+					WCD934X_CLK_SYS_MCLK_PRG, 0x02, 0x02);
+			/* Disable clock buffer */
+			wcd_resmgr_codec_reg_update_bits(resmgr,
+					WCD934X_CLK_SYS_MCLK_PRG, 0x80, 0x00);
+			resmgr->clk_type = WCD_CLK_RCO;
+		} else {
+			wcd_resmgr_codec_reg_update_bits(resmgr,
+					WCD9335_ANA_CLK_TOP,
+					0x04, 0x00);
+			wcd_resmgr_codec_reg_update_bits(resmgr,
+					WCD934X_CLK_SYS_MCLK_PRG, 0x81, 0x00);
+			resmgr->clk_type = WCD_CLK_OFF;
+		}
+
+		wcd_resmgr_codec_reg_update_bits(resmgr, WCD9335_ANA_CLK_TOP,
+						 0x80, 0x00);
+	}
+
+	if ((resmgr->codec_type == WCD934X) &&
+	    (resmgr->clk_type == WCD_CLK_OFF))
+		wcd_resmgr_set_sido_input_src(resmgr, SIDO_SOURCE_INTERNAL);
+
+	pr_debug("%s: mclk_users: %d, clk_type: %s\n", __func__,
+		 resmgr->clk_mclk_users,
+		 wcd_resmgr_clk_type_to_str(resmgr->clk_type));
+
+	return 0;
+}
+
+static void wcd_resmgr_set_buck_accuracy(struct wcd9xxx_resmgr_v2 *resmgr)
+{
+	wcd_resmgr_codec_reg_update_bits(resmgr, WCD934X_ANA_BUCK_CTL,
+					 0x02, 0x02);
+	/* 100us sleep needed after HIGH_ACCURACY_PRE_EN1 */
+	usleep_range(100, 110);
+	wcd_resmgr_codec_reg_update_bits(resmgr, WCD934X_ANA_BUCK_CTL,
+					 0x01, 0x01);
+	/* 100us sleep needed after HIGH_ACCURACY_PRE_EN2 */
+	usleep_range(100, 110);
+	wcd_resmgr_codec_reg_update_bits(resmgr, WCD934X_ANA_BUCK_CTL,
+					 0x04, 0x04);
+	/* 100us sleep needed after HIGH_ACCURACY_EN */
+	usleep_range(100, 110);
+}
+
+static int wcd_resmgr_enable_clk_rco(struct wcd9xxx_resmgr_v2 *resmgr)
+{
+	bool rco_cal_done = true;
+
+	resmgr->clk_rco_users++;
+	if ((resmgr->clk_rco_users == 1) &&
+	    ((resmgr->clk_type == WCD_CLK_OFF) ||
+	     (resmgr->clk_mclk_users == 0))) {
+		pr_warn("%s: RCO enable requires MCLK to be ON first\n",
+			__func__);
+		resmgr->clk_rco_users--;
+		return -EINVAL;
+	} else if ((resmgr->clk_rco_users == 1) &&
+		   (resmgr->clk_mclk_users)) {
+		/* RCO Enable */
+		if (resmgr->sido_input_src == SIDO_SOURCE_INTERNAL) {
+			wcd_resmgr_codec_reg_update_bits(resmgr,
+							 WCD9335_ANA_RCO,
+							 0x80, 0x80);
+			if (resmgr->codec_type == WCD934X)
+				wcd_resmgr_set_buck_accuracy(resmgr);
+		}
+
+		/*
+		 * 20us required after RCO BG is enabled as per HW
+		 * requirements
+		 */
+		usleep_range(20, 25);
+		wcd_resmgr_codec_reg_update_bits(resmgr, WCD9335_ANA_RCO,
+						 0x40, 0x40);
+		/*
+		 * 20us required after RCO is enabled as per HW
+		 * requirements
+		 */
+		usleep_range(20, 25);
+		/* RCO Calibration */
+		wcd_resmgr_codec_reg_update_bits(resmgr, WCD9335_ANA_RCO,
+						 0x04, 0x04);
+		if (resmgr->codec_type == WCD934X)
+			/*
+			 * For wcd934x codec, 20us sleep is needed
+			 * after enabling RCO calibration
+			 */
+			usleep_range(20, 25);
+
+		wcd_resmgr_codec_reg_update_bits(resmgr, WCD9335_ANA_RCO,
+						 0x04, 0x00);
+		if (resmgr->codec_type == WCD934X)
+			/*
+			 * For wcd934x codec, 20us sleep is needed
+			 * after disabling RCO calibration
+			 */
+			usleep_range(20, 25);
+
+		/* RCO calibration takes app. 5ms to complete */
+		usleep_range(WCD9XXX_RCO_CALIBRATION_DELAY_INC_US,
+		       WCD9XXX_RCO_CALIBRATION_DELAY_INC_US + 100);
+		if (wcd_resmgr_codec_reg_read(resmgr, WCD9335_ANA_RCO) & 0x02)
+			rco_cal_done = false;
+
+		WARN((!rco_cal_done), "RCO Calibration failed\n");
+
+		/* Switch MUX to RCO */
+		if (resmgr->clk_mclk_users == 1) {
+			wcd_resmgr_codec_reg_update_bits(resmgr,
+							WCD9335_ANA_CLK_TOP,
+							0x08, 0x08);
+			wcd_resmgr_codec_reg_update_bits(resmgr,
+						 WCD934X_CLK_SYS_MCLK_PRG,
+						 0x02, 0x02);
+			resmgr->clk_type = WCD_CLK_RCO;
+		}
+	}
+	pr_debug("%s: rco clk users: %d, clk_type: %s\n", __func__,
+		 resmgr->clk_rco_users,
+		 wcd_resmgr_clk_type_to_str(resmgr->clk_type));
+
+	return 0;
+}
+
+static int wcd_resmgr_disable_clk_rco(struct wcd9xxx_resmgr_v2 *resmgr)
+{
+	if ((resmgr->clk_rco_users <= 0) ||
+	    (resmgr->clk_type == WCD_CLK_OFF)) {
+		pr_err("%s: rco_clk_users = %d, clk_type = %d, cannot disable\n",
+			__func__, resmgr->clk_rco_users, resmgr->clk_type);
+		return -EINVAL;
+	}
+
+	resmgr->clk_rco_users--;
+
+	if ((resmgr->clk_rco_users == 0) &&
+	    (resmgr->clk_type == WCD_CLK_RCO)) {
+		wcd_resmgr_codec_reg_update_bits(resmgr, WCD9335_ANA_CLK_TOP,
+						 0x08, 0x00);
+		wcd_resmgr_codec_reg_update_bits(resmgr,
+						 WCD934X_CLK_SYS_MCLK_PRG,
+						 0x02, 0x00);
+		wcd_resmgr_codec_reg_update_bits(resmgr, WCD9335_ANA_CLK_TOP,
+						 0x04, 0x00);
+		wcd_resmgr_codec_reg_update_bits(resmgr, WCD9335_ANA_RCO,
+						 0x40, 0x00);
+		if (resmgr->sido_input_src == SIDO_SOURCE_INTERNAL)
+			wcd_resmgr_codec_reg_update_bits(resmgr,
+							 WCD9335_ANA_RCO,
+							 0x80, 0x00);
+		wcd_resmgr_codec_reg_update_bits(resmgr,
+						 WCD934X_CLK_SYS_MCLK_PRG,
+						 0x01, 0x00);
+		resmgr->clk_type = WCD_CLK_OFF;
+	} else if ((resmgr->clk_rco_users == 0) &&
+	      (resmgr->clk_mclk_users)) {
+		/* Disable RCO while MCLK is ON */
+		wcd_resmgr_codec_reg_update_bits(resmgr, WCD9335_ANA_RCO,
+						 0x40, 0x00);
+		if (resmgr->sido_input_src == SIDO_SOURCE_INTERNAL)
+			wcd_resmgr_codec_reg_update_bits(resmgr,
+							 WCD9335_ANA_RCO,
+							 0x80, 0x00);
+	}
+
+	if ((resmgr->codec_type == WCD934X) &&
+	    (resmgr->clk_type == WCD_CLK_OFF))
+		wcd_resmgr_set_sido_input_src(resmgr, SIDO_SOURCE_INTERNAL);
+
+	pr_debug("%s: rco clk users: %d, clk_type: %s\n", __func__,
+		 resmgr->clk_rco_users,
+		 wcd_resmgr_clk_type_to_str(resmgr->clk_type));
+
+	return 0;
+}
+
+/*
+ * wcd_resmgr_enable_clk_block: enable MCLK or RCO
+ * @resmgr: handle to struct wcd9xxx_resmgr_v2
+ * @type: Clock type to enable
+ */
+int wcd_resmgr_enable_clk_block(struct wcd9xxx_resmgr_v2 *resmgr,
+				enum wcd_clock_type type)
+{
+	int ret;
+
+	switch (type) {
+	case WCD_CLK_MCLK:
+		ret = wcd_resmgr_enable_clk_mclk(resmgr);
+		break;
+	case WCD_CLK_RCO:
+		ret = wcd_resmgr_enable_clk_rco(resmgr);
+		break;
+	default:
+		pr_err("%s: Unknown Clock type: %s\n", __func__,
+			wcd_resmgr_clk_type_to_str(type));
+		ret = -EINVAL;
+		break;
+	};
+
+	if (ret)
+		pr_err("%s: Enable clock %s failed\n", __func__,
+			wcd_resmgr_clk_type_to_str(type));
+
+	return ret;
+}
+
+void wcd_resmgr_set_sido_input_src(struct wcd9xxx_resmgr_v2 *resmgr,
+					  int sido_src)
+{
+	if (!resmgr)
+		return;
+
+	if (sido_src == resmgr->sido_input_src)
+		return;
+
+	if (sido_src == SIDO_SOURCE_INTERNAL) {
+		wcd_resmgr_codec_reg_update_bits(resmgr, WCD934X_ANA_BUCK_CTL,
+						 0x04, 0x00);
+		usleep_range(100, 110);
+		wcd_resmgr_codec_reg_update_bits(resmgr, WCD934X_ANA_BUCK_CTL,
+						 0x03, 0x00);
+		usleep_range(100, 110);
+		wcd_resmgr_codec_reg_update_bits(resmgr, WCD934X_ANA_RCO,
+						 0x80, 0x00);
+		usleep_range(100, 110);
+		resmgr->sido_input_src = SIDO_SOURCE_INTERNAL;
+		pr_debug("%s: sido input src to internal\n", __func__);
+	} else if (sido_src == SIDO_SOURCE_RCO_BG) {
+		wcd_resmgr_codec_reg_update_bits(resmgr, WCD934X_ANA_RCO,
+						 0x80, 0x80);
+		usleep_range(100, 110);
+		wcd_resmgr_codec_reg_update_bits(resmgr, WCD934X_ANA_BUCK_CTL,
+						 0x02, 0x02);
+		usleep_range(100, 110);
+		wcd_resmgr_codec_reg_update_bits(resmgr, WCD934X_ANA_BUCK_CTL,
+						 0x01, 0x01);
+		usleep_range(100, 110);
+		wcd_resmgr_codec_reg_update_bits(resmgr, WCD934X_ANA_BUCK_CTL,
+						 0x04, 0x04);
+		usleep_range(100, 110);
+		resmgr->sido_input_src = SIDO_SOURCE_RCO_BG;
+		pr_debug("%s: sido input src to external\n", __func__);
+	}
+}
+EXPORT_SYMBOL(wcd_resmgr_set_sido_input_src);
+
+/*
+ * wcd_resmgr_set_sido_input_src_locked:
+ *   Set SIDO input in BG_CLK locked context
+ *
+ * @resmgr: handle to struct wcd9xxx_resmgr_v2
+ * @sido_src: Select the SIDO input source
+ */
+void wcd_resmgr_set_sido_input_src_locked(struct wcd9xxx_resmgr_v2 *resmgr,
+					  int sido_src)
+{
+	if (!resmgr)
+		return;
+
+	WCD9XXX_V2_BG_CLK_LOCK(resmgr);
+	wcd_resmgr_set_sido_input_src(resmgr, sido_src);
+	WCD9XXX_V2_BG_CLK_UNLOCK(resmgr);
+}
+EXPORT_SYMBOL(wcd_resmgr_set_sido_input_src_locked);
+
+/*
+ * wcd_resmgr_disable_clk_block: disable MCLK or RCO
+ * @resmgr: handle to struct wcd9xxx_resmgr_v2
+ * @type: Clock type to disable
+ */
+int wcd_resmgr_disable_clk_block(struct wcd9xxx_resmgr_v2 *resmgr,
+				enum wcd_clock_type type)
+{
+	int ret;
+
+	switch (type) {
+	case WCD_CLK_MCLK:
+		ret = wcd_resmgr_disable_clk_mclk(resmgr);
+		break;
+	case WCD_CLK_RCO:
+		ret = wcd_resmgr_disable_clk_rco(resmgr);
+		break;
+	default:
+		pr_err("%s: Unknown Clock type: %s\n", __func__,
+			wcd_resmgr_clk_type_to_str(type));
+		ret = -EINVAL;
+		break;
+	};
+
+	if (ret)
+		pr_err("%s: Disable clock %s failed\n", __func__,
+			wcd_resmgr_clk_type_to_str(type));
+
+	return ret;
+}
+
+/*
+ * wcd_resmgr_init: initialize wcd resource manager
+ * @core_res: handle to struct wcd9xxx_core_resource
+ *
+ * Early init call without a handle to snd_soc_codec *
+ */
+struct wcd9xxx_resmgr_v2 *wcd_resmgr_init(
+		struct wcd9xxx_core_resource *core_res,
+		struct snd_soc_codec *codec)
+{
+	struct wcd9xxx_resmgr_v2 *resmgr;
+	struct wcd9xxx *wcd9xxx;
+
+	resmgr = kzalloc(sizeof(struct wcd9xxx_resmgr_v2), GFP_KERNEL);
+	if (!resmgr) {
+		pr_err("%s: Cannot allocate memory for wcd resmgr\n", __func__);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	wcd9xxx = container_of(core_res, struct wcd9xxx, core_res);
+	if (!wcd9xxx) {
+		kfree(resmgr);
+		pr_err("%s: Cannot get wcd9xx pointer\n", __func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	mutex_init(&resmgr->codec_bg_clk_lock);
+	mutex_init(&resmgr->master_bias_lock);
+	resmgr->master_bias_users = 0;
+	resmgr->clk_mclk_users = 0;
+	resmgr->clk_rco_users = 0;
+	resmgr->master_bias_users = 0;
+	resmgr->codec = codec;
+	resmgr->core_res = core_res;
+	resmgr->sido_input_src = SIDO_SOURCE_INTERNAL;
+	resmgr->codec_type = wcd9xxx->type;
+
+	return resmgr;
+}
+
+/*
+ * wcd_resmgr_remove: Clean-up wcd resource manager
+ * @resmgr: handle to struct wcd9xxx_resmgr_v2
+ */
+void wcd_resmgr_remove(struct wcd9xxx_resmgr_v2 *resmgr)
+{
+	mutex_destroy(&resmgr->master_bias_lock);
+	kfree(resmgr);
+}
+
+/*
+ * wcd_resmgr_post_init: post init call to assign codec handle
+ * @resmgr: handle to struct wcd9xxx_resmgr_v2 created during early init
+ * @resmgr_cb: codec callback function for resmgr
+ * @codec: handle to struct snd_soc_codec
+ */
+int wcd_resmgr_post_init(struct wcd9xxx_resmgr_v2 *resmgr,
+			 const struct wcd_resmgr_cb *resmgr_cb,
+			 struct snd_soc_codec *codec)
+{
+	if (!resmgr) {
+		pr_err("%s: resmgr not allocated\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!codec) {
+		pr_err("%s: Codec memory is NULL, nothing to post init\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	resmgr->codec = codec;
+	resmgr->resmgr_cb = resmgr_cb;
+
+	return 0;
+}
+MODULE_DESCRIPTION("wcd9xxx resmgr v2 module");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd9xxx-resmgr-v2.h	2019-01-22 16:16:29.555301211 +0100
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __WCD9XXX_COMMON_V2_H__
+#define __WCD9XXX_COMMON_V2_H__
+
+#include <linux/mfd/wcd9xxx/core.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
+
+enum wcd_clock_type {
+	WCD_CLK_OFF,
+	WCD_CLK_RCO,
+	WCD_CLK_MCLK,
+};
+
+enum {
+	SIDO_SOURCE_INTERNAL,
+	SIDO_SOURCE_RCO_BG,
+};
+
+struct wcd_resmgr_cb {
+	int (*cdc_rco_ctrl)(struct snd_soc_codec *, bool);
+};
+
+struct wcd9xxx_resmgr_v2 {
+	struct snd_soc_codec *codec;
+	struct wcd9xxx_core_resource *core_res;
+
+	int master_bias_users;
+	int clk_mclk_users;
+	int clk_rco_users;
+
+	struct mutex codec_bg_clk_lock;
+	struct mutex master_bias_lock;
+
+	enum codec_variant codec_type;
+	enum wcd_clock_type clk_type;
+
+	const struct wcd_resmgr_cb *resmgr_cb;
+	int sido_input_src;
+};
+
+#define WCD9XXX_V2_BG_CLK_LOCK(resmgr)			\
+{							\
+	struct wcd9xxx_resmgr_v2 *__resmgr = resmgr;	\
+	pr_debug("%s: Acquiring BG_CLK\n", __func__);	\
+	mutex_lock(&__resmgr->codec_bg_clk_lock);	\
+	pr_debug("%s: Acquiring BG_CLK done\n", __func__);	\
+}
+
+#define WCD9XXX_V2_BG_CLK_UNLOCK(resmgr)			\
+{							\
+	struct wcd9xxx_resmgr_v2 *__resmgr = resmgr;	\
+	pr_debug("%s: Releasing BG_CLK\n", __func__);	\
+	mutex_unlock(&__resmgr->codec_bg_clk_lock);	\
+}
+
+#define WCD9XXX_V2_BG_CLK_ASSERT_LOCKED(resmgr)		\
+{							\
+	WARN_ONCE(!mutex_is_locked(&resmgr->codec_bg_clk_lock), \
+		  "%s: BG_CLK lock should have acquired\n", __func__); \
+}
+
+int wcd_resmgr_enable_master_bias(struct wcd9xxx_resmgr_v2 *resmgr);
+int wcd_resmgr_disable_master_bias(struct wcd9xxx_resmgr_v2 *resmgr);
+struct wcd9xxx_resmgr_v2 *wcd_resmgr_init(
+		struct wcd9xxx_core_resource *core_res,
+		struct snd_soc_codec *codec);
+void wcd_resmgr_remove(struct wcd9xxx_resmgr_v2 *resmgr);
+int wcd_resmgr_post_init(struct wcd9xxx_resmgr_v2 *resmgr,
+			 const struct wcd_resmgr_cb *resmgr_cb,
+			 struct snd_soc_codec *codec);
+int wcd_resmgr_enable_clk_block(struct wcd9xxx_resmgr_v2 *resmgr,
+				enum wcd_clock_type type);
+int wcd_resmgr_disable_clk_block(struct wcd9xxx_resmgr_v2 *resmgr,
+				enum wcd_clock_type type);
+int wcd_resmgr_get_clk_type(struct wcd9xxx_resmgr_v2 *resmgr);
+void wcd_resmgr_post_ssr_v2(struct wcd9xxx_resmgr_v2 *resmgr);
+void wcd_resmgr_set_sido_input_src_locked(struct wcd9xxx_resmgr_v2 *resmgr,
+					  int sido_src);
+void wcd_resmgr_set_sido_input_src(struct wcd9xxx_resmgr_v2 *resmgr,
+					  int sido_src);
+
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wcdcal-hwdep.c	2019-01-22 16:16:29.559301247 +0100
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/ioctl.h>
+#include <linux/bitops.h>
+#include <sound/hwdep.h>
+#include <sound/msmcal-hwdep.h>
+#include <sound/soc.h>
+#include "wcdcal-hwdep.h"
+
+const int cal_size_info[WCD9XXX_MAX_CAL] = {
+	[WCD9XXX_ANC_CAL] = 16384,
+	[WCD9XXX_MBHC_CAL] = 4096,
+	[WCD9XXX_MAD_CAL] = 4096,
+	[WCD9XXX_VBAT_CAL] = 72,
+};
+
+const char *cal_name_info[WCD9XXX_MAX_CAL] = {
+	[WCD9XXX_ANC_CAL] = "anc",
+	[WCD9XXX_MBHC_CAL] = "mbhc",
+	[WCD9XXX_MAD_CAL] = "mad",
+	[WCD9XXX_VBAT_CAL] = "vbat",
+};
+
+struct firmware_cal *wcdcal_get_fw_cal(struct fw_info *fw_data,
+					enum wcd_cal_type type)
+{
+	if (!fw_data) {
+		pr_err("%s: fw_data is NULL\n", __func__);
+		return NULL;
+	}
+	if (type >= WCD9XXX_MAX_CAL ||
+		type < WCD9XXX_MIN_CAL) {
+		pr_err("%s: wrong cal type sent %d\n", __func__, type);
+		return NULL;
+	}
+	mutex_lock(&fw_data->lock);
+	if (!test_bit(WCDCAL_RECIEVED,
+		&fw_data->wcdcal_state[type])) {
+		pr_err("%s: cal not sent by userspace %d\n",
+			__func__, type);
+		mutex_unlock(&fw_data->lock);
+		return NULL;
+	}
+	mutex_unlock(&fw_data->lock);
+	return fw_data->fw[type];
+}
+EXPORT_SYMBOL(wcdcal_get_fw_cal);
+
+static int wcdcal_hwdep_ioctl_shared(struct snd_hwdep *hw,
+			struct wcdcal_ioctl_buffer fw_user)
+{
+	struct fw_info *fw_data = hw->private_data;
+	struct firmware_cal **fw = fw_data->fw;
+	void *data;
+
+	if (!test_bit(fw_user.cal_type, fw_data->cal_bit)) {
+		pr_err("%s: codec didn't set this %d!!\n",
+				__func__, fw_user.cal_type);
+		return -EFAULT;
+	}
+	if (fw_user.cal_type >= WCD9XXX_MAX_CAL ||
+		fw_user.cal_type < WCD9XXX_MIN_CAL) {
+		pr_err("%s: wrong cal type sent %d\n",
+				__func__, fw_user.cal_type);
+		return -EFAULT;
+	}
+	if (fw_user.size > cal_size_info[fw_user.cal_type] ||
+		fw_user.size <= 0) {
+		pr_err("%s: incorrect firmware size %d for %s\n",
+			__func__, fw_user.size,
+			cal_name_info[fw_user.cal_type]);
+		return -EFAULT;
+	}
+	data = fw[fw_user.cal_type]->data;
+	if (copy_from_user(data, fw_user.buffer, fw_user.size))
+		return -EFAULT;
+	fw[fw_user.cal_type]->size = fw_user.size;
+	mutex_lock(&fw_data->lock);
+	set_bit(WCDCAL_RECIEVED, &fw_data->wcdcal_state[fw_user.cal_type]);
+	mutex_unlock(&fw_data->lock);
+	return 0;
+}
+
+#ifdef CONFIG_COMPAT
+struct wcdcal_ioctl_buffer32 {
+	u32 size;
+	compat_uptr_t buffer;
+	enum wcd_cal_type cal_type;
+};
+
+enum {
+	SNDRV_CTL_IOCTL_HWDEP_CAL_TYPE32 =
+		_IOW('U', 0x1, struct wcdcal_ioctl_buffer32),
+};
+
+static int wcdcal_hwdep_ioctl_compat(struct snd_hwdep *hw, struct file *file,
+		unsigned int cmd, unsigned long arg)
+{
+	struct wcdcal_ioctl_buffer __user *argp = (void __user *)arg;
+	struct wcdcal_ioctl_buffer32 fw_user32;
+	struct wcdcal_ioctl_buffer fw_user_compat;
+
+	if (cmd != SNDRV_CTL_IOCTL_HWDEP_CAL_TYPE32) {
+		pr_err("%s: wrong ioctl command sent %u!\n", __func__, cmd);
+		return -ENOIOCTLCMD;
+	}
+	if (copy_from_user(&fw_user32, argp, sizeof(fw_user32))) {
+		pr_err("%s: failed to copy\n", __func__);
+		return -EFAULT;
+	}
+	fw_user_compat.size = fw_user32.size;
+	fw_user_compat.buffer = compat_ptr(fw_user32.buffer);
+	fw_user_compat.cal_type = fw_user32.cal_type;
+	return wcdcal_hwdep_ioctl_shared(hw, fw_user_compat);
+}
+#else
+#define wcdcal_hwdep_ioctl_compat NULL
+#endif
+
+static int wcdcal_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
+		unsigned int cmd, unsigned long arg)
+{
+	struct wcdcal_ioctl_buffer __user *argp = (void __user *)arg;
+	struct wcdcal_ioctl_buffer fw_user;
+
+	if (cmd != SNDRV_CTL_IOCTL_HWDEP_CAL_TYPE) {
+		pr_err("%s: wrong ioctl command sent %d!\n", __func__, cmd);
+		return -ENOIOCTLCMD;
+	}
+	if (copy_from_user(&fw_user, argp, sizeof(fw_user))) {
+		pr_err("%s: failed to copy\n", __func__);
+		return -EFAULT;
+	}
+	return wcdcal_hwdep_ioctl_shared(hw, fw_user);
+}
+
+static int wcdcal_hwdep_release(struct snd_hwdep *hw, struct file *file)
+{
+	struct fw_info *fw_data = hw->private_data;
+	mutex_lock(&fw_data->lock);
+	/* clear all the calibrations */
+	memset(fw_data->wcdcal_state, 0,
+		sizeof(fw_data->wcdcal_state));
+	mutex_unlock(&fw_data->lock);
+	return 0;
+}
+
+int wcd_cal_create_hwdep(void *data, int node, struct snd_soc_codec *codec)
+{
+	char hwname[40];
+	struct snd_hwdep *hwdep;
+	struct firmware_cal **fw;
+	struct fw_info *fw_data = data;
+	int err, cal_bit;
+
+	if (!fw_data || !codec) {
+		pr_err("%s: wrong arguments passed\n", __func__);
+		return -EINVAL;
+	}
+
+	fw = fw_data->fw;
+	snprintf(hwname, strlen("Codec %s"), "Codec %s",
+		 codec->component.name);
+	err = snd_hwdep_new(codec->component.card->snd_card,
+			    hwname, node, &hwdep);
+	if (err < 0) {
+		dev_err(codec->dev, "%s: new hwdep failed %d\n",
+				__func__, err);
+		return err;
+	}
+	snprintf(hwdep->name, strlen("Codec %s"), "Codec %s",
+		 codec->component.name);
+	hwdep->iface = SNDRV_HWDEP_IFACE_AUDIO_CODEC;
+	hwdep->private_data = fw_data;
+	hwdep->ops.ioctl_compat = wcdcal_hwdep_ioctl_compat;
+	hwdep->ops.ioctl = wcdcal_hwdep_ioctl;
+	hwdep->ops.release = wcdcal_hwdep_release;
+	mutex_init(&fw_data->lock);
+
+	for_each_set_bit(cal_bit, fw_data->cal_bit, WCD9XXX_MAX_CAL) {
+		set_bit(WCDCAL_UNINITIALISED,
+				&fw_data->wcdcal_state[cal_bit]);
+		fw[cal_bit] = kzalloc(sizeof *(fw[cal_bit]), GFP_KERNEL);
+		if (!fw[cal_bit]) {
+			dev_err(codec->dev, "%s: no memory for %s cal\n",
+				__func__, cal_name_info[cal_bit]);
+			goto end;
+		}
+	}
+	for_each_set_bit(cal_bit, fw_data->cal_bit, WCD9XXX_MAX_CAL) {
+		fw[cal_bit]->data = kzalloc(cal_size_info[cal_bit],
+						GFP_KERNEL);
+		if (!fw[cal_bit]->data) {
+			dev_err(codec->dev, "%s: no memory for %s cal data\n",
+				__func__, cal_name_info[cal_bit]);
+			goto exit;
+		}
+		set_bit(WCDCAL_INITIALISED,
+			&fw_data->wcdcal_state[cal_bit]);
+	}
+	return 0;
+exit:
+	for_each_set_bit(cal_bit, fw_data->cal_bit, WCD9XXX_MAX_CAL) {
+		kfree(fw[cal_bit]->data);
+		fw[cal_bit]->data = NULL;
+	}
+end:
+	for_each_set_bit(cal_bit, fw_data->cal_bit, WCD9XXX_MAX_CAL) {
+		kfree(fw[cal_bit]);
+		fw[cal_bit] = NULL;
+	}
+	return -ENOMEM;
+}
+EXPORT_SYMBOL(wcd_cal_create_hwdep);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wcdcal-hwdep.h	2019-01-22 16:16:29.559301247 +0100
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __WCD9XXX_HWDEP_H__
+#define __WCD9XXX_HWDEP_H__
+#include <sound/msmcal-hwdep.h>
+
+enum wcd_cal_states {
+	WCDCAL_UNINITIALISED,
+	WCDCAL_INITIALISED,
+	WCDCAL_RECIEVED
+};
+
+struct fw_info {
+	struct firmware_cal *fw[WCD9XXX_MAX_CAL];
+	DECLARE_BITMAP(cal_bit, WCD9XXX_MAX_CAL);
+	/* for calibration tracking */
+	unsigned long wcdcal_state[WCD9XXX_MAX_CAL];
+	struct mutex lock;
+};
+
+struct firmware_cal {
+	u8 *data;
+	size_t size;
+};
+
+struct snd_soc_codec;
+int wcd_cal_create_hwdep(void *fw, int node, struct snd_soc_codec *codec);
+struct firmware_cal *wcdcal_get_fw_cal(struct fw_info *fw_data,
+					enum wcd_cal_type type);
+#endif /* __WCD9XXX_HWDEP_H__ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd_cmi_api.h	2019-01-22 16:16:29.555301211 +0100
@@ -0,0 +1,43 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CMI_API__
+#define __CMI_API__
+
+enum cmi_api_result {
+	CMI_API_FAILED = 1,
+	CMI_API_BUSY,
+	CMI_API_NO_MEMORY,
+	CMI_API_NOT_READY,
+};
+
+enum cmi_api_event {
+	CMI_API_MSG = 1,
+	CMI_API_OFFLINE,
+	CMI_API_ONLINE,
+	CMI_API_DEINITIALIZED,
+};
+
+struct cmi_api_notification {
+	enum cmi_api_event event;
+	enum cmi_api_result result;
+	void *message;
+};
+
+void *cmi_register(
+	void notification_callback
+		(const struct cmi_api_notification *parameter),
+	u32 service);
+enum cmi_api_result cmi_deregister(void *reg_handle);
+enum cmi_api_result cmi_send_msg(void *message);
+
+#endif /*__CMI_API__*/
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd_cpe_core.c	2019-01-22 16:16:29.555301211 +0100
@@ -0,0 +1,4614 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/elf.h>
+#include <linux/wait.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/pm_qos.h>
+#include <linux/dma-mapping.h>
+#include <sound/soc.h>
+#include <sound/info.h>
+#include <sound/lsm_params.h>
+#include <sound/cpe_core.h>
+#include <sound/cpe_cmi.h>
+#include <sound/cpe_err.h>
+#include <soc/qcom/pm.h>
+#include <linux/mfd/wcd9xxx/core.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx-irq.h>
+#include <sound/audio_cal_utils.h>
+#include "wcd_cpe_core.h"
+#include "wcd_cpe_services.h"
+#include "wcd_cmi_api.h"
+
+#define CMI_CMD_TIMEOUT (10 * HZ)
+#define WCD_CPE_LSM_MAX_SESSIONS 2
+#define WCD_CPE_AFE_MAX_PORTS 4
+#define AFE_SVC_EXPLICIT_PORT_START 1
+#define WCD_CPE_EC_PP_BUF_SIZE	480 /* 5 msec buffer */
+
+#define ELF_FLAG_EXECUTE (1 << 0)
+#define ELF_FLAG_WRITE (1 << 1)
+#define ELF_FLAG_READ (1 << 2)
+
+#define ELF_FLAG_RW (ELF_FLAG_READ | ELF_FLAG_WRITE)
+
+#define WCD_CPE_GRAB_LOCK(lock, name)		\
+{						\
+	pr_debug("%s: %s lock acquire\n",	\
+		 __func__, name);		\
+	mutex_lock(lock);			\
+}
+
+#define WCD_CPE_REL_LOCK(lock, name)		\
+{						\
+	pr_debug("%s: %s lock release\n",	\
+		 __func__, name);		\
+	mutex_unlock(lock);			\
+}
+
+#define WCD_CPE_STATE_MAX_LEN 11
+#define CPE_OFFLINE_WAIT_TIMEOUT (2 * HZ)
+#define CPE_READY_WAIT_TIMEOUT (3 * HZ)
+#define WCD_CPE_SYSFS_DIR_MAX_LENGTH 32
+
+#define CPE_ERR_IRQ_CB(core) \
+	(core->cpe_cdc_cb->cpe_err_irq_control)
+
+/*
+ * AFE output buffer size is always
+ * (sample_rate * number of bytes per sample/2*1000)
+ */
+#define AFE_OUT_BUF_SIZE(bit_width, sample_rate) \
+	(((sample_rate) * (bit_width / BITS_PER_BYTE))/(2*1000))
+
+enum afe_port_state {
+	AFE_PORT_STATE_DEINIT = 0,
+	AFE_PORT_STATE_INIT,
+	AFE_PORT_STATE_CONFIG,
+	AFE_PORT_STATE_STARTED,
+	AFE_PORT_STATE_SUSPENDED,
+};
+
+struct wcd_cmi_afe_port_data {
+	u8 port_id;
+	struct mutex afe_lock;
+	struct completion afe_cmd_complete;
+	enum afe_port_state port_state;
+	u8 cmd_result;
+	u32 mem_handle;
+};
+
+struct cpe_lsm_ids {
+	u32 module_id;
+	u32 param_id;
+};
+
+static struct wcd_cpe_core *core_d;
+static struct cpe_lsm_session
+		*lsm_sessions[WCD_CPE_LSM_MAX_SESSIONS + 1];
+struct wcd_cpe_core * (*wcd_get_cpe_core) (struct snd_soc_codec *);
+static struct wcd_cmi_afe_port_data afe_ports[WCD_CPE_AFE_MAX_PORTS + 1];
+static void wcd_cpe_svc_event_cb(const struct cpe_svc_notification *param);
+static int wcd_cpe_setup_irqs(struct wcd_cpe_core *core);
+static void wcd_cpe_cleanup_irqs(struct wcd_cpe_core *core);
+static ssize_t cpe_ftm_test_trigger(struct file *file,
+				     const char __user *user_buf,
+				     size_t count, loff_t *ppos);
+static u32 ramdump_enable;
+static u32 cpe_ftm_test_status;
+static const struct file_operations cpe_ftm_test_trigger_fops = {
+	.open = simple_open,
+	.write = cpe_ftm_test_trigger,
+};
+
+static int wcd_cpe_afe_svc_cmd_mode(void *core_handle,
+				    u8 mode);
+struct wcd_cpe_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct wcd_cpe_core *core, char *buf);
+	ssize_t (*store)(struct wcd_cpe_core *core, const char *buf,
+			 ssize_t count);
+};
+
+#define WCD_CPE_ATTR(_name, _mode, _show, _store) \
+static struct wcd_cpe_attribute cpe_attr_##_name = { \
+	.attr = {.name = __stringify(_name), .mode = _mode}, \
+	.show = _show, \
+	.store = _store, \
+}
+
+#define to_wcd_cpe_attr(a) \
+	container_of((a), struct wcd_cpe_attribute, attr)
+
+#define kobj_to_cpe_core(kobj) \
+	container_of((kobj), struct wcd_cpe_core, cpe_kobj)
+
+/* wcd_cpe_lsm_session_active: check if any session is active
+ * return true if any session is active.
+ */
+static bool wcd_cpe_lsm_session_active(void)
+{
+	int index = 1;
+	bool lsm_active = false;
+
+	/* session starts from index 1 */
+	for (; index <= WCD_CPE_LSM_MAX_SESSIONS; index++) {
+		if (lsm_sessions[index] != NULL) {
+			lsm_active = true;
+			break;
+		} else {
+			lsm_active = false;
+		}
+	}
+	return lsm_active;
+}
+
+static int wcd_cpe_get_sfr_dump(struct wcd_cpe_core *core)
+{
+	struct cpe_svc_mem_segment dump_seg;
+	int rc;
+	u8 *sfr_dump;
+
+	sfr_dump = kzalloc(core->sfr_buf_size, GFP_KERNEL);
+	if (!sfr_dump) {
+		dev_err(core->dev,
+			"%s: No memory for sfr dump\n",
+			__func__);
+		goto done;
+	}
+
+	dump_seg.type = CPE_SVC_DATA_MEM;
+	dump_seg.cpe_addr = core->sfr_buf_addr;
+	dump_seg.size = core->sfr_buf_size;
+	dump_seg.data = sfr_dump;
+	dev_dbg(core->dev,
+		"%s: reading SFR from CPE, size = %zu\n",
+		__func__, core->sfr_buf_size);
+
+	rc = cpe_svc_ramdump(core->cpe_handle, &dump_seg);
+	if (IS_ERR_VALUE(rc)) {
+		dev_err(core->dev,
+			"%s: Failed to read cpe sfr_dump, err = %d\n",
+			__func__, rc);
+		goto free_sfr_dump;
+	}
+
+	dev_info(core->dev,
+		 "%s: cpe_sfr = %s\n", __func__, sfr_dump);
+
+free_sfr_dump:
+	kfree(sfr_dump);
+done:
+	/* Even if SFR dump failed, do not return error */
+	return 0;
+}
+
+static int wcd_cpe_collect_ramdump(struct wcd_cpe_core *core)
+{
+	struct cpe_svc_mem_segment dump_seg;
+	int rc;
+
+	if (!core->cpe_ramdump_dev || !core->cpe_dump_v_addr ||
+	    core->hw_info.dram_size == 0) {
+		dev_err(core->dev,
+			"%s: Ramdump devices not set up, size = %zu\n",
+			__func__, core->hw_info.dram_size);
+		return -EINVAL;
+	}
+
+	dump_seg.type = CPE_SVC_DATA_MEM;
+	dump_seg.cpe_addr = core->hw_info.dram_offset;
+	dump_seg.size = core->hw_info.dram_size;
+	dump_seg.data = core->cpe_dump_v_addr;
+
+	dev_dbg(core->dev,
+		"%s: Reading ramdump from CPE\n",
+		__func__);
+
+	rc = cpe_svc_ramdump(core->cpe_handle, &dump_seg);
+	if (IS_ERR_VALUE(rc)) {
+		dev_err(core->dev,
+			"%s: Failed to read CPE ramdump, err = %d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	dev_dbg(core->dev,
+		"%s: completed reading ramdump from CPE\n",
+		__func__);
+
+	core->cpe_ramdump_seg.address = (unsigned long) core->cpe_dump_addr;
+	core->cpe_ramdump_seg.size = core->hw_info.dram_size;
+	core->cpe_ramdump_seg.v_address = core->cpe_dump_v_addr;
+
+	rc = do_ramdump(core->cpe_ramdump_dev,
+			&core->cpe_ramdump_seg, 1);
+	if (rc)
+		dev_err(core->dev,
+			"%s: fail to dump cpe ram to device, err = %d\n",
+			__func__, rc);
+	return rc;
+}
+
+/* wcd_cpe_is_valid_elf_hdr: check if the ELF header is valid
+ * @core: handle to wcd_cpe_core
+ * @fw_size: size of firmware from request_firmware
+ * @ehdr: the elf header to be checked for
+ * return true if all checks pass, true if any elf check fails
+ */
+static bool wcd_cpe_is_valid_elf_hdr(struct wcd_cpe_core *core, size_t fw_size,
+				     const struct elf32_hdr *ehdr)
+{
+	if (fw_size < sizeof(*ehdr)) {
+		dev_err(core->dev, "%s:Firmware too small\n", __func__);
+		goto elf_check_fail;
+	}
+
+	if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) {
+		dev_err(core->dev, "%s: Not an ELF file\n", __func__);
+		goto elf_check_fail;
+	}
+
+	if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN) {
+		dev_err(core->dev, "%s: Not a executable image\n", __func__);
+		goto elf_check_fail;
+	}
+
+	if (ehdr->e_phnum == 0) {
+		dev_err(core->dev, "%s: no segments to load\n", __func__);
+		goto elf_check_fail;
+	}
+
+	if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
+	    sizeof(struct elf32_hdr) > fw_size) {
+		dev_err(core->dev, "%s: Too small MDT file\n", __func__);
+		goto elf_check_fail;
+	}
+
+	return true;
+
+elf_check_fail:
+	return false;
+}
+
+/*
+ * wcd_cpe_load_each_segment: download segment to CPE
+ * @core: handle to struct wcd_cpe_core
+ * @file_idx: index of split firmware image file name
+ * @phdr: program header from metadata
+ */
+static int wcd_cpe_load_each_segment(struct wcd_cpe_core *core,
+			  int file_idx, const struct elf32_phdr *phdr)
+{
+	const struct firmware *split_fw;
+	char split_fname[32];
+	int ret = 0;
+	struct cpe_svc_mem_segment *segment;
+
+	if (!core || !phdr) {
+		pr_err("%s: Invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	/* file size can be 0 for bss segments */
+	if (phdr->p_filesz == 0 || phdr->p_memsz == 0)
+		return 0;
+
+	segment = kzalloc(sizeof(struct cpe_svc_mem_segment), GFP_KERNEL);
+	if (!segment) {
+		dev_err(core->dev,
+			"%s: no memory for segment info, file_idx = %d\n"
+			, __func__, file_idx);
+		return -ENOMEM;
+	}
+
+	snprintf(split_fname, sizeof(split_fname), "%s.b%02d",
+		 core->fname, file_idx);
+
+	ret = request_firmware(&split_fw, split_fname, core->dev);
+	if (ret) {
+		dev_err(core->dev, "firmware %s not found\n",
+			split_fname);
+		ret = -EIO;
+		goto fw_req_fail;
+	}
+
+	if (phdr->p_flags & ELF_FLAG_EXECUTE)
+		segment->type = CPE_SVC_INSTRUCTION_MEM;
+	else if (phdr->p_flags & ELF_FLAG_RW)
+		segment->type = CPE_SVC_DATA_MEM;
+	else {
+		dev_err(core->dev, "%s invalid flags 0x%x\n",
+			__func__, phdr->p_flags);
+		goto done;
+	}
+
+	if (phdr->p_filesz != split_fw->size) {
+		dev_err(core->dev,
+			"%s: %s size mismatch, phdr_size: 0x%x fw_size: 0x%zx",
+			__func__, split_fname, phdr->p_filesz, split_fw->size);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	segment->cpe_addr = phdr->p_paddr;
+	segment->size = phdr->p_filesz;
+	segment->data = (u8 *) split_fw->data;
+
+	dev_dbg(core->dev,
+		"%s: cpe segment type %s read from firmware\n", __func__,
+		(segment->type == CPE_SVC_INSTRUCTION_MEM) ?
+			"INSTRUCTION" : "DATA");
+
+	ret = cpe_svc_download_segment(core->cpe_handle, segment);
+	if (ret) {
+		dev_err(core->dev,
+			"%s: Failed to download %s, error = %d\n",
+			__func__, split_fname, ret);
+		goto done;
+	}
+
+done:
+	release_firmware(split_fw);
+
+fw_req_fail:
+	kfree(segment);
+	return ret;
+}
+
+/*
+ * wcd_cpe_enable_cpe_clks: enable the clocks for CPE
+ * @core: handle to wcd_cpe_core
+ * @enable: flag indicating whether to enable/disable cpe clocks
+ */
+static int wcd_cpe_enable_cpe_clks(struct wcd_cpe_core *core, bool enable)
+{
+	int ret, ret1;
+
+	if (!core || !core->cpe_cdc_cb ||
+	    !core->cpe_cdc_cb->cpe_clk_en) {
+		pr_err("%s: invalid handle\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	ret = core->cpe_cdc_cb->cdc_clk_en(core->codec, enable);
+	if (ret) {
+		dev_err(core->dev, "%s: Failed to enable RCO\n",
+			__func__);
+		return ret;
+	}
+
+	if (!enable && core->cpe_clk_ref > 0)
+		core->cpe_clk_ref--;
+
+	/*
+	 * CPE clk will be enabled at the first time
+	 * and be disabled at the last time.
+	 */
+	if (core->cpe_clk_ref == 0) {
+		ret = core->cpe_cdc_cb->cpe_clk_en(core->codec, enable);
+		if (ret) {
+			dev_err(core->dev,
+				"%s: cpe_clk_en() failed, err = %d\n",
+				__func__, ret);
+			goto cpe_clk_fail;
+		}
+	}
+
+	if (enable)
+		core->cpe_clk_ref++;
+
+	return 0;
+
+cpe_clk_fail:
+	/* Release the codec clk if CPE clk enable failed */
+	if (enable) {
+		ret1 = core->cpe_cdc_cb->cdc_clk_en(core->codec, !enable);
+		if (ret1)
+			dev_err(core->dev,
+				"%s: Fail to release codec clk, err = %d\n",
+				__func__, ret1);
+	}
+
+	return ret;
+}
+
+/*
+ * wcd_cpe_bus_vote_max_bw: Function to vote for max bandwidth on codec bus
+ * @core: handle to core for cpe
+ * @vote: flag to indicate enable/disable of vote
+ *
+ * This function will try to use the codec provided callback to
+ * vote/unvote for the max bandwidth of the bus that is used by
+ * the codec for register reads/writes.
+ */
+static int wcd_cpe_bus_vote_max_bw(struct wcd_cpe_core *core,
+		bool vote)
+{
+	if (!core || !core->cpe_cdc_cb) {
+		pr_err("%s: Invalid handle to %s\n",
+			__func__,
+			(!core) ? "core" : "codec callbacks");
+		return -EINVAL;
+	}
+
+	if (core->cpe_cdc_cb->bus_vote_bw) {
+		dev_dbg(core->dev, "%s: %s cdc bus max bandwidth\n",
+			 __func__, vote ? "Vote" : "Unvote");
+		core->cpe_cdc_cb->bus_vote_bw(core->codec, vote);
+	}
+
+	return 0;
+}
+
+/*
+ * wcd_cpe_load_fw: Function to load the fw image
+ * @core: cpe core pointer
+ * @load_type: indicates whether to load to data section
+ *	       or the instruction section
+ *
+ * Parse the mdt file to look for program headers, load each
+ * split file corresponding to the program headers.
+ */
+static int wcd_cpe_load_fw(struct wcd_cpe_core *core,
+	unsigned int load_type)
+{
+
+	int ret, phdr_idx;
+	struct snd_soc_codec *codec = NULL;
+	struct wcd9xxx *wcd9xxx = NULL;
+	const struct elf32_hdr *ehdr;
+	const struct elf32_phdr *phdr;
+	const struct firmware *fw;
+	const u8 *elf_ptr;
+	char mdt_name[64];
+	bool img_dload_fail = false;
+	bool load_segment;
+
+	if (!core || !core->cpe_handle) {
+		pr_err("%s: Error CPE core %pK\n", __func__,
+		       core);
+		return -EINVAL;
+	}
+	codec = core->codec;
+	wcd9xxx = dev_get_drvdata(codec->dev->parent);
+	snprintf(mdt_name, sizeof(mdt_name), "%s.mdt", core->fname);
+	ret = request_firmware(&fw, mdt_name, core->dev);
+	if (IS_ERR_VALUE(ret)) {
+		dev_err(core->dev, "firmware %s not found\n", mdt_name);
+		return ret;
+	}
+
+	ehdr = (struct elf32_hdr *) fw->data;
+	if (!wcd_cpe_is_valid_elf_hdr(core, fw->size, ehdr)) {
+		dev_err(core->dev, "%s: fw mdt %s is invalid\n",
+			__func__, mdt_name);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	elf_ptr = fw->data + sizeof(*ehdr);
+
+	if (load_type == ELF_FLAG_EXECUTE) {
+		/* Reset CPE first */
+		ret = cpe_svc_reset(core->cpe_handle);
+		if (IS_ERR_VALUE(ret)) {
+			dev_err(core->dev,
+				"%s: Failed to reset CPE with error %d\n",
+				__func__, ret);
+			goto done;
+		}
+	}
+
+	dev_dbg(core->dev, "%s: start image dload, name = %s, load_type = 0x%x\n",
+		__func__, core->fname, load_type);
+
+	wcd_cpe_bus_vote_max_bw(core, true);
+
+	/* parse every program header and request corresponding firmware */
+	for (phdr_idx = 0; phdr_idx < ehdr->e_phnum; phdr_idx++) {
+		phdr = (struct elf32_phdr *)elf_ptr;
+		load_segment = false;
+
+		dev_dbg(core->dev,
+			"index = %d, vaddr = 0x%x, paddr = 0x%x, "
+			"filesz = 0x%x, memsz = 0x%x, flags = 0x%x\n"
+			, phdr_idx, phdr->p_vaddr, phdr->p_paddr,
+			phdr->p_filesz, phdr->p_memsz, phdr->p_flags);
+
+		switch (load_type) {
+		case ELF_FLAG_EXECUTE:
+			if (phdr->p_flags & load_type)
+				load_segment = true;
+			break;
+		case ELF_FLAG_RW:
+			if (!(phdr->p_flags & ELF_FLAG_EXECUTE) &&
+			    (phdr->p_flags & load_type))
+				load_segment = true;
+			break;
+		default:
+			pr_err("%s: Invalid load_type 0x%x\n",
+				__func__, load_type);
+			ret = -EINVAL;
+			goto rel_bus_vote;
+		}
+
+		if (load_segment) {
+			ret = wcd_cpe_load_each_segment(core,
+						phdr_idx, phdr);
+			if (IS_ERR_VALUE(ret)) {
+				dev_err(core->dev,
+					"Failed to load segment %d, aborting img dload\n",
+					phdr_idx);
+				img_dload_fail = true;
+				goto rel_bus_vote;
+			}
+		} else {
+			dev_dbg(core->dev,
+				"%s: skipped segment with index %d\n",
+				__func__, phdr_idx);
+		}
+
+		elf_ptr = elf_ptr + sizeof(*phdr);
+	}
+	if (load_type == ELF_FLAG_EXECUTE)
+		core->ssr_type = WCD_CPE_IMEM_DOWNLOADED;
+
+rel_bus_vote:
+	wcd_cpe_bus_vote_max_bw(core, false);
+
+done:
+	release_firmware(fw);
+	return ret;
+}
+
+/*
+ * wcd_cpe_change_online_state - mark cpe online/offline state
+ * @core: core session to mark
+ * @online: whether online of offline
+ *
+ */
+static void wcd_cpe_change_online_state(struct wcd_cpe_core *core,
+			int online)
+{
+	struct wcd_cpe_ssr_entry *ssr_entry = NULL;
+	unsigned long ret;
+
+	if (!core) {
+		pr_err("%s: Invalid core handle\n",
+			__func__);
+		return;
+	}
+
+	ssr_entry = &core->ssr_entry;
+	WCD_CPE_GRAB_LOCK(&core->ssr_lock, "SSR");
+	ssr_entry->offline = !online;
+	wmb();
+	ret = xchg(&ssr_entry->offline_change, 1);
+	wake_up_interruptible(&ssr_entry->offline_poll_wait);
+	WCD_CPE_REL_LOCK(&core->ssr_lock, "SSR");
+	pr_debug("%s: change state 0x%x offline_change 0x%x\n"
+		 " core->offline 0x%x, ret = %ld\n",
+		 __func__, online,
+		 ssr_entry->offline_change,
+		 core->ssr_entry.offline, ret);
+}
+
+/*
+ * wcd_cpe_load_fw_image: work function to load the fw image
+ * @work: work that is scheduled to perform the image loading
+ *
+ * Parse the mdt file to look for program headers, load each
+ * split file corresponding to the program headers.
+ */
+static void wcd_cpe_load_fw_image(struct work_struct *work)
+{
+	struct wcd_cpe_core *core;
+	int ret = 0;
+	core = container_of(work, struct wcd_cpe_core, load_fw_work);
+	ret = wcd_cpe_load_fw(core, ELF_FLAG_EXECUTE);
+	if (!ret)
+		wcd_cpe_change_online_state(core, 1);
+	else
+		pr_err("%s: failed to load instruction section, err = %d\n",
+			__func__, ret);
+	return;
+}
+
+/*
+ * wcd_cpe_get_core_handle: get the handle to wcd_cpe_core
+ * @codec: codec from which this handle is to be obtained
+ * Codec driver should provide a callback function to obtain
+ * handle to wcd_cpe_core during initialization of wcd_cpe_core
+ */
+void *wcd_cpe_get_core_handle(
+	struct snd_soc_codec *codec)
+{
+	struct wcd_cpe_core *core = NULL;
+
+	if (!codec) {
+		pr_err("%s: Invalid codec handle\n",
+			__func__);
+		goto done;
+	}
+
+	if (!wcd_get_cpe_core) {
+		dev_err(codec->dev,
+			"%s: codec callback not available\n",
+			__func__);
+		goto done;
+	}
+
+	core = wcd_get_cpe_core(codec);
+
+	if (!core)
+		dev_err(codec->dev,
+			"%s: handle to core not available\n",
+			__func__);
+done:
+	return core;
+}
+
+/*
+ * svass_engine_irq: threaded interrupt handler for svass engine irq
+ * @irq: interrupt number
+ * @data: data pointer passed during irq registration
+ */
+static irqreturn_t svass_engine_irq(int irq, void *data)
+{
+	struct wcd_cpe_core *core = data;
+	int ret = 0;
+
+	if (!core) {
+		pr_err("%s: Invalid data for interrupt handler\n",
+			__func__);
+		goto done;
+	}
+
+	ret = cpe_svc_process_irq(core->cpe_handle, CPE_IRQ_OUTBOX_IRQ);
+	if (IS_ERR_VALUE(ret))
+		dev_err(core->dev,
+			"%s: Error processing irq from cpe_Services\n",
+			__func__);
+done:
+	return IRQ_HANDLED;
+}
+
+/*
+ * wcd_cpe_state_read - update read status in procfs
+ * @entry: snd_info_entry
+ * @buf: buffer where the read status is updated.
+ *
+ */
+static ssize_t wcd_cpe_state_read(struct snd_info_entry *entry,
+			       void *file_private_data, struct file *file,
+			       char __user *buf, size_t count, loff_t pos)
+{
+	int len = 0;
+	char buffer[WCD_CPE_STATE_MAX_LEN];
+	struct wcd_cpe_core *core = NULL;
+	struct wcd_cpe_ssr_entry *ssr_entry = NULL;
+
+	core = (struct wcd_cpe_core *) entry->private_data;
+	if (!core) {
+		pr_err("%s: CPE core NULL\n", __func__);
+		return -EINVAL;
+	}
+	ssr_entry = &core->ssr_entry;
+	rmb();
+	dev_dbg(core->dev,
+		"%s: Offline 0x%x\n", __func__,
+		 ssr_entry->offline);
+
+	WCD_CPE_GRAB_LOCK(&core->ssr_lock, "SSR");
+	len = snprintf(buffer, sizeof(buffer), "%s\n",
+		       ssr_entry->offline ? "OFFLINE" : "ONLINE");
+	WCD_CPE_REL_LOCK(&core->ssr_lock, "SSR");
+
+	return simple_read_from_buffer(buf, count, &pos, buffer, len);
+}
+
+/*
+ * wcd_cpe_state_poll - polls for change state
+ * @entry: snd_info_entry
+ * @wait: wait for duration for poll wait
+ *
+ */
+static unsigned int wcd_cpe_state_poll(struct snd_info_entry *entry,
+					void *private_data, struct file *file,
+					poll_table *wait)
+{
+	struct wcd_cpe_core *core = NULL;
+	struct wcd_cpe_ssr_entry *ssr_entry = NULL;
+	int ret = 0;
+
+	core = (struct wcd_cpe_core *) entry->private_data;
+	if (!core) {
+		pr_err("%s: CPE core NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	ssr_entry = &core->ssr_entry;
+
+	dev_dbg(core->dev, "%s: CPE Poll wait\n",
+	       __func__);
+	poll_wait(file, &ssr_entry->offline_poll_wait, wait);
+	dev_dbg(core->dev, "%s: Wake-up Poll wait\n",
+	       __func__);
+	WCD_CPE_GRAB_LOCK(&core->ssr_lock, "SSR");
+
+	if (xchg(&ssr_entry->offline_change, 0))
+		ret = POLLIN | POLLPRI | POLLRDNORM;
+
+	WCD_CPE_REL_LOCK(&core->ssr_lock, "SSR");
+
+	dev_dbg(core->dev, "%s: ret (%d) from poll_wait\n",
+		__func__, ret);
+	return ret;
+}
+
+/*
+ * wcd_cpe_is_online_state - return true if card is online state
+ * @core: core offline to query
+ */
+static bool wcd_cpe_is_online_state(void *core_handle)
+{
+	struct wcd_cpe_core *core = core_handle;
+	if (core_handle) {
+		return !core->ssr_entry.offline;
+	} else {
+		pr_err("%s: Core handle NULL\n", __func__);
+		/* still return 1- offline if core ptr null */
+		return false;
+	}
+}
+
+static struct snd_info_entry_ops wcd_cpe_state_proc_ops = {
+	.read = wcd_cpe_state_read,
+	.poll = wcd_cpe_state_poll,
+};
+
+static int wcd_cpe_check_new_image(struct wcd_cpe_core *core)
+{
+	int rc = 0;
+	char temp_img_name[WCD_CPE_IMAGE_FNAME_MAX];
+
+	if (!strcmp(core->fname, core->dyn_fname) &&
+	    core->ssr_type != WCD_CPE_INITIALIZED) {
+		dev_dbg(core->dev,
+			"%s: Firmware unchanged, fname = %s, ssr_type 0x%x\n",
+			__func__, core->fname, core->ssr_type);
+		goto done;
+	}
+
+	/*
+	 * Different firmware name requested,
+	 * Re-load the instruction section
+	 */
+	strlcpy(temp_img_name, core->fname,
+		WCD_CPE_IMAGE_FNAME_MAX);
+	strlcpy(core->fname, core->dyn_fname,
+		WCD_CPE_IMAGE_FNAME_MAX);
+
+	rc = wcd_cpe_load_fw(core, ELF_FLAG_EXECUTE);
+	if (rc) {
+		dev_err(core->dev,
+			"%s: Failed to dload new image %s, err = %d\n",
+			__func__, core->fname, rc);
+		/* If new image download failed, revert back to old image */
+		strlcpy(core->fname, temp_img_name,
+			WCD_CPE_IMAGE_FNAME_MAX);
+		rc = wcd_cpe_load_fw(core, ELF_FLAG_EXECUTE);
+		if (rc)
+			dev_err(core->dev,
+				"%s: Failed to re-dload image %s, err = %d\n",
+				__func__, core->fname, rc);
+	} else {
+		dev_info(core->dev, "%s: fw changed to %s\n",
+			 __func__, core->fname);
+	}
+done:
+	return rc;
+}
+
+static int wcd_cpe_enable(struct wcd_cpe_core *core,
+		bool enable)
+{
+	int ret = 0;
+
+	if (enable) {
+		/* Reset CPE first */
+		ret = cpe_svc_reset(core->cpe_handle);
+		if (IS_ERR_VALUE(ret)) {
+			dev_err(core->dev,
+				"%s: CPE Reset failed, error = %d\n",
+				__func__, ret);
+			goto done;
+		}
+
+		ret = wcd_cpe_setup_irqs(core);
+		if (ret) {
+			dev_err(core->dev,
+				"%s: CPE IRQs setup failed, error = %d\n",
+				__func__, ret);
+			goto done;
+		}
+		ret = wcd_cpe_check_new_image(core);
+		if (ret)
+			goto fail_boot;
+
+		/* Dload data section */
+		ret = wcd_cpe_load_fw(core, ELF_FLAG_RW);
+		if (ret) {
+			dev_err(core->dev,
+				"%s: Failed to dload data section, err = %d\n",
+				__func__, ret);
+			goto fail_boot;
+		}
+
+		ret = wcd_cpe_enable_cpe_clks(core, true);
+		if (IS_ERR_VALUE(ret)) {
+			dev_err(core->dev,
+				"%s: CPE clk enable failed, err = %d\n",
+				__func__, ret);
+			goto fail_boot;
+		}
+
+		ret = cpe_svc_boot(core->cpe_handle,
+				   core->cpe_debug_mode);
+		if (IS_ERR_VALUE(ret)) {
+			dev_err(core->dev,
+				"%s: Failed to boot CPE\n",
+				__func__);
+			goto fail_boot;
+		}
+
+		/* wait for CPE to be online */
+		dev_dbg(core->dev,
+			"%s: waiting for CPE bootup\n",
+			__func__);
+
+		wait_for_completion(&core->online_compl);
+
+		dev_dbg(core->dev,
+			"%s: CPE bootup done\n",
+			__func__);
+
+		core->ssr_type = WCD_CPE_ENABLED;
+	} else {
+		if (core->ssr_type == WCD_CPE_BUS_DOWN_EVENT ||
+		    core->ssr_type == WCD_CPE_SSR_EVENT) {
+			/*
+			 * If this disable vote is when
+			 * SSR is in progress, do not disable CPE here,
+			 * instead SSR handler will control CPE.
+			 */
+			wcd_cpe_enable_cpe_clks(core, false);
+			wcd_cpe_cleanup_irqs(core);
+			goto done;
+		}
+
+		ret = cpe_svc_shutdown(core->cpe_handle);
+		if (IS_ERR_VALUE(ret)) {
+			dev_err(core->dev,
+				"%s: CPE shutdown failed, error %d\n",
+				__func__, ret);
+			goto done;
+		}
+
+		wcd_cpe_enable_cpe_clks(core, false);
+		wcd_cpe_cleanup_irqs(core);
+		core->ssr_type = WCD_CPE_IMEM_DOWNLOADED;
+	}
+
+	return ret;
+
+fail_boot:
+	wcd_cpe_cleanup_irqs(core);
+
+done:
+	return ret;
+}
+
+/*
+ * wcd_cpe_boot_ssr: Load the images to CPE after ssr and bootup cpe
+ * @core: handle to the core
+ */
+static int wcd_cpe_boot_ssr(struct wcd_cpe_core *core)
+{
+	int rc = 0;
+
+	if (!core || !core->cpe_handle) {
+		pr_err("%s: Invalid handle\n", __func__);
+		rc = -EINVAL;
+		goto fail;
+	}
+	/* Load the instruction section and mark CPE as online */
+	rc = wcd_cpe_load_fw(core, ELF_FLAG_EXECUTE);
+	if (rc) {
+		dev_err(core->dev,
+			"%s: Failed to load instruction, err = %d\n",
+			__func__, rc);
+		goto fail;
+	} else {
+		wcd_cpe_change_online_state(core, 1);
+	}
+
+fail:
+	return rc;
+}
+
+/*
+ * wcd_cpe_clr_ready_status:
+ *	Clear the value from the ready status for CPE
+ * @core: handle to the core
+ * @value: flag/bitmask that is to be cleared
+ *
+ * This function should not be invoked with ssr_lock acquired
+ */
+static void wcd_cpe_clr_ready_status(struct wcd_cpe_core *core,
+			       u8 value)
+{
+	WCD_CPE_GRAB_LOCK(&core->ssr_lock, "SSR");
+	core->ready_status &= ~(value);
+	dev_dbg(core->dev,
+		"%s: ready_status = 0x%x\n",
+		__func__, core->ready_status);
+	WCD_CPE_REL_LOCK(&core->ssr_lock, "SSR");
+}
+
+/*
+ * wcd_cpe_set_and_complete:
+ *	Set the ready status with the provided value and
+ *	flag the completion object if ready status moves
+ *	to ready to download
+ * @core: handle to the core
+ * @value: flag/bitmask that is to be set
+ */
+static void wcd_cpe_set_and_complete(struct wcd_cpe_core *core,
+				u8 value)
+{
+	WCD_CPE_GRAB_LOCK(&core->ssr_lock, "SSR");
+	core->ready_status |= value;
+	if ((core->ready_status & WCD_CPE_READY_TO_DLOAD) ==
+	    WCD_CPE_READY_TO_DLOAD) {
+		dev_dbg(core->dev,
+			"%s: marking ready, status = 0x%x\n",
+			__func__, core->ready_status);
+		complete(&core->ready_compl);
+	}
+	WCD_CPE_REL_LOCK(&core->ssr_lock, "SSR");
+}
+
+
+/*
+ * wcd_cpe_ssr_work: work function to handle CPE SSR
+ * @work: work that is scheduled to perform CPE shutdown
+ *	and restart
+ */
+static void wcd_cpe_ssr_work(struct work_struct *work)
+{
+
+	int rc = 0;
+	u32 irq = 0;
+	struct wcd_cpe_core *core = NULL;
+	u8 status = 0;
+
+	core = container_of(work, struct wcd_cpe_core, ssr_work);
+	if (!core) {
+		pr_err("%s: Core handle NULL\n", __func__);
+		return;
+	}
+
+	/* Obtain pm request up in case of suspend mode */
+	pm_qos_add_request(&core->pm_qos_req,
+			   PM_QOS_CPU_DMA_LATENCY,
+			   PM_QOS_DEFAULT_VALUE);
+	pm_qos_update_request(&core->pm_qos_req,
+			msm_cpuidle_get_deep_idle_latency());
+
+	dev_dbg(core->dev,
+		"%s: CPE SSR with event %d\n",
+		__func__, core->ssr_type);
+
+	if (core->ssr_type == WCD_CPE_SSR_EVENT) {
+		if (CPE_ERR_IRQ_CB(core))
+			core->cpe_cdc_cb->cpe_err_irq_control(
+					core->codec,
+					CPE_ERR_IRQ_STATUS,
+					&status);
+		if (status & core->irq_info.cpe_fatal_irqs)
+			irq = CPE_IRQ_WDOG_BITE;
+	} else {
+		/* If bus is down, cdc reg cannot be read */
+		irq = CPE_IRQ_WDOG_BITE;
+	}
+
+	if (core->cpe_users > 0) {
+		rc = cpe_svc_process_irq(core->cpe_handle, irq);
+		if (IS_ERR_VALUE(rc))
+			/*
+			 * Even if process_irq fails,
+			 * wait for cpe to move to offline state
+			 */
+			dev_err(core->dev,
+				"%s: irq processing failed, error = %d\n",
+				__func__, rc);
+
+		rc = wait_for_completion_timeout(&core->offline_compl,
+						 CPE_OFFLINE_WAIT_TIMEOUT);
+		if (!rc) {
+			dev_err(core->dev,
+				"%s: wait for cpe offline timed out\n",
+				__func__);
+			goto err_ret;
+		}
+		if (core->ssr_type != WCD_CPE_BUS_DOWN_EVENT) {
+			wcd_cpe_get_sfr_dump(core);
+
+			/*
+			 * Ramdump has to be explicitly enabled
+			 * through debugfs and cannot be collected
+			 * when bus is down.
+			 */
+			if (ramdump_enable)
+				wcd_cpe_collect_ramdump(core);
+		}
+	} else {
+		pr_err("%s: no cpe users, mark as offline\n", __func__);
+		wcd_cpe_change_online_state(core, 0);
+		wcd_cpe_set_and_complete(core,
+					 WCD_CPE_BLK_READY);
+	}
+
+	rc = wait_for_completion_timeout(&core->ready_compl,
+					 CPE_READY_WAIT_TIMEOUT);
+	if (!rc) {
+		dev_err(core->dev,
+			"%s: ready to online timed out, status = %u\n",
+			__func__, core->ready_status);
+		goto err_ret;
+	}
+
+	rc = wcd_cpe_boot_ssr(core);
+
+	/* Once image are downloaded make sure all
+	 * error interrupts are cleared
+	 */
+	if (CPE_ERR_IRQ_CB(core))
+		core->cpe_cdc_cb->cpe_err_irq_control(core->codec,
+					CPE_ERR_IRQ_CLEAR, NULL);
+
+err_ret:
+	/* remove after default pm qos */
+	pm_qos_update_request(&core->pm_qos_req,
+			      PM_QOS_DEFAULT_VALUE);
+	pm_qos_remove_request(&core->pm_qos_req);
+}
+
+/*
+ * wcd_cpe_ssr_handle: handle SSR events here.
+ * @core_handle: handle to the cpe core
+ * @event: indicates ADSP or CDSP SSR.
+ */
+int wcd_cpe_ssr_event(void *core_handle,
+		      enum wcd_cpe_ssr_state_event event)
+{
+	struct wcd_cpe_core *core = core_handle;
+
+	if (!core) {
+		pr_err("%s: Invalid handle to core\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	/*
+	 * If CPE is not even enabled, the SSR event for
+	 * CPE needs to be ignored
+	 */
+	if (core->ssr_type == WCD_CPE_INITIALIZED) {
+		dev_info(core->dev,
+			"%s: CPE initialized but not enabled, skip CPE ssr\n",
+			 __func__);
+		return 0;
+	}
+
+	dev_dbg(core->dev,
+		"%s: Schedule ssr work, event = %d\n",
+		__func__, core->ssr_type);
+
+	switch (event) {
+	case WCD_CPE_BUS_DOWN_EVENT:
+		/*
+		 * If bus down, then CPE block is also
+		 * treated to be down
+		 */
+		wcd_cpe_clr_ready_status(core, WCD_CPE_READY_TO_DLOAD);
+		core->ssr_type = event;
+		schedule_work(&core->ssr_work);
+		break;
+
+	case WCD_CPE_SSR_EVENT:
+		wcd_cpe_clr_ready_status(core, WCD_CPE_BLK_READY);
+		core->ssr_type = event;
+		schedule_work(&core->ssr_work);
+		break;
+
+	case WCD_CPE_BUS_UP_EVENT:
+		wcd_cpe_set_and_complete(core, WCD_CPE_BUS_READY);
+		/*
+		 * In case of bus up event ssr_type will be changed
+		 * to WCD_CPE_ACTIVE once CPE is online
+		 */
+		break;
+
+	default:
+		dev_err(core->dev,
+			"%s: unhandled SSR event %d\n",
+			__func__, event);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(wcd_cpe_ssr_event);
+
+/*
+ * svass_exception_irq: threaded irq handler for sva error interrupts
+ * @irq: interrupt number
+ * @data: data pointer passed during irq registration
+ *
+ * Once a error interrupt is received, it is not cleared, since
+ * clearing this interrupt will raise spurious interrupts unless
+ * CPE is reset.
+ */
+static irqreturn_t svass_exception_irq(int irq, void *data)
+{
+	struct wcd_cpe_core *core = data;
+	u8 status = 0;
+
+	if (!core || !CPE_ERR_IRQ_CB(core)) {
+		pr_err("%s: Invalid %s\n",
+		       __func__,
+		       (!core) ? "core" : "cdc control");
+		return IRQ_HANDLED;
+	}
+
+	core->cpe_cdc_cb->cpe_err_irq_control(core->codec,
+			CPE_ERR_IRQ_STATUS, &status);
+
+	while (status != 0) {
+		if (status & core->irq_info.cpe_fatal_irqs) {
+			dev_err(core->dev,
+				"%s: CPE SSR event,err_status = 0x%02x\n",
+				__func__, status);
+			wcd_cpe_ssr_event(core, WCD_CPE_SSR_EVENT);
+			/*
+			 * If fatal interrupt is received,
+			 * trigger SSR and stop processing
+			 * further interrupts
+			 */
+			break;
+		}
+		/*
+		 * Mask the interrupt that was raised to
+		 * avoid spurious interrupts
+		 */
+		core->cpe_cdc_cb->cpe_err_irq_control(core->codec,
+					CPE_ERR_IRQ_MASK, &status);
+
+		/* Clear only the interrupt that was raised */
+		core->cpe_cdc_cb->cpe_err_irq_control(core->codec,
+					CPE_ERR_IRQ_CLEAR, &status);
+		dev_err(core->dev,
+			"%s: err_interrupt status = 0x%x\n",
+			__func__, status);
+
+		/* Read status for pending interrupts */
+		core->cpe_cdc_cb->cpe_err_irq_control(core->codec,
+					CPE_ERR_IRQ_STATUS, &status);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * wcd_cpe_cmi_afe_cb: callback called on response to afe commands
+ * @param: parameter containing the response code, etc
+ *
+ * Process the request to the command sent to CPE and wakeup the
+ * command send wait.
+ */
+static void wcd_cpe_cmi_afe_cb(const struct cmi_api_notification *param)
+{
+	struct cmi_hdr *hdr;
+	struct wcd_cmi_afe_port_data *afe_port_d;
+	u8 port_id;
+
+	if (!param) {
+		pr_err("%s: param is null\n", __func__);
+		return;
+	}
+
+	if (param->event != CMI_API_MSG) {
+		pr_err("%s: unhandled event 0x%x\n",
+			__func__, param->event);
+		return;
+	}
+
+	pr_debug("%s: param->result = %d\n",
+		 __func__, param->result);
+
+	hdr = (struct cmi_hdr *) param->message;
+
+	/*
+	 * for AFE cmd response, port id is
+	 * stored at session id field of header
+	 */
+	port_id = CMI_HDR_GET_SESSION_ID(hdr);
+	if (port_id > WCD_CPE_AFE_MAX_PORTS) {
+		pr_err("%s: invalid port_id %d\n",
+			__func__, port_id);
+		return;
+	}
+
+	afe_port_d = &(afe_ports[port_id]);
+
+	if (hdr->opcode == CPE_CMI_BASIC_RSP_OPCODE) {
+
+		u8 *payload = ((u8 *)param->message) + (sizeof(struct cmi_hdr));
+		u8 result = payload[0];
+		afe_port_d->cmd_result = result;
+		complete(&afe_port_d->afe_cmd_complete);
+
+	} else if (hdr->opcode == CPE_AFE_PORT_CMDRSP_SHARED_MEM_ALLOC) {
+
+		struct cpe_cmdrsp_shmem_alloc *cmdrsp_shmem_alloc =
+			(struct cpe_cmdrsp_shmem_alloc *) param->message;
+
+		if (cmdrsp_shmem_alloc->addr == 0) {
+			pr_err("%s: Failed AFE shared mem alloc\n", __func__);
+			afe_port_d->cmd_result = CMI_SHMEM_ALLOC_FAILED;
+		} else {
+			pr_debug("%s AFE shared mem addr = 0x%x\n",
+				 __func__, cmdrsp_shmem_alloc->addr);
+			afe_port_d->mem_handle = cmdrsp_shmem_alloc->addr;
+			afe_port_d->cmd_result = 0;
+		}
+		complete(&afe_port_d->afe_cmd_complete);
+	}
+
+	return;
+}
+
+/*
+ * wcd_cpe_initialize_afe_port_data: Initialize all AFE ports
+ *
+ * Initialize the data for all the afe ports. Assign the
+ * afe port state to INIT state.
+ */
+static void wcd_cpe_initialize_afe_port_data(void)
+{
+	struct wcd_cmi_afe_port_data *afe_port_d;
+	int i;
+
+	for (i = 0; i <= WCD_CPE_AFE_MAX_PORTS; i++) {
+		afe_port_d = &afe_ports[i];
+		afe_port_d->port_id = i;
+		init_completion(&afe_port_d->afe_cmd_complete);
+		afe_port_d->port_state = AFE_PORT_STATE_INIT;
+		mutex_init(&afe_port_d->afe_lock);
+	}
+}
+
+/*
+ * wcd_cpe_deinitialize_afe_port_data: De-initialize all AFE ports
+ *
+ * De-Initialize the data for all the afe ports. Assign the
+ * afe port state to DEINIT state.
+ */
+static void wcd_cpe_deinitialize_afe_port_data(void)
+{
+	struct wcd_cmi_afe_port_data *afe_port_d;
+	int i;
+
+	for (i = 0; i <= WCD_CPE_AFE_MAX_PORTS; i++) {
+		afe_port_d = &afe_ports[i];
+		afe_port_d->port_state = AFE_PORT_STATE_DEINIT;
+		mutex_destroy(&afe_port_d->afe_lock);
+	}
+}
+
+/*
+ * wcd_cpe_svc_event_cb: callback from cpe services, indicating
+ * CPE is online or offline.
+ * @param: parameter / payload for event to be notified
+ */
+static void wcd_cpe_svc_event_cb(const struct cpe_svc_notification *param)
+{
+	struct snd_soc_codec *codec;
+	struct wcd_cpe_core *core;
+	struct cpe_svc_boot_event *boot_data;
+	bool active_sessions;
+
+	if (!param) {
+		pr_err("%s: Invalid event\n", __func__);
+		return;
+	}
+
+	codec = param->private_data;
+	if (!codec) {
+		pr_err("%s: Invalid handle to codec\n",
+			__func__);
+		return;
+	}
+
+	core = wcd_cpe_get_core_handle(codec);
+	if (!core) {
+		pr_err("%s: Invalid handle to core\n",
+			__func__);
+		return;
+	}
+
+	dev_dbg(core->dev,
+		"%s: event = 0x%x, ssr_type = 0x%x\n",
+		__func__, param->event, core->ssr_type);
+
+	switch (param->event) {
+	case CPE_SVC_BOOT:
+		boot_data = (struct cpe_svc_boot_event *)
+				param->payload;
+		core->sfr_buf_addr = boot_data->debug_address;
+		core->sfr_buf_size = boot_data->debug_buffer_size;
+		dev_dbg(core->dev,
+			"%s: CPE booted, sfr_addr = %d, sfr_size = %zu\n",
+			__func__, core->sfr_buf_addr,
+			core->sfr_buf_size);
+		break;
+	case CPE_SVC_ONLINE:
+		core->ssr_type = WCD_CPE_ACTIVE;
+		dev_dbg(core->dev, "%s CPE is now online\n",
+			 __func__);
+		complete(&core->online_compl);
+		break;
+	case CPE_SVC_OFFLINE:
+		/*
+		 * offline can happen during normal shutdown,
+		 * but we are interested in offline only during
+		 * SSR.
+		 */
+		if (core->ssr_type != WCD_CPE_SSR_EVENT &&
+		    core->ssr_type != WCD_CPE_BUS_DOWN_EVENT)
+			break;
+
+		active_sessions = wcd_cpe_lsm_session_active();
+		wcd_cpe_change_online_state(core, 0);
+		complete(&core->offline_compl);
+		dev_err(core->dev, "%s: CPE is now offline\n",
+			 __func__);
+		break;
+	case CPE_SVC_CMI_CLIENTS_DEREG:
+
+		/*
+		 * Only when either CPE SSR is in progress,
+		 * or the bus is down, we need to mark the CPE
+		 * as ready. In all other cases, this event is
+		 * ignored
+		 */
+		if (core->ssr_type == WCD_CPE_SSR_EVENT ||
+		    core->ssr_type == WCD_CPE_BUS_DOWN_EVENT)
+			wcd_cpe_set_and_complete(core,
+						 WCD_CPE_BLK_READY);
+		break;
+	default:
+		dev_err(core->dev,
+			"%s: unhandled notification\n",
+			__func__);
+		break;
+	}
+
+	return;
+}
+
+/*
+ * wcd_cpe_cleanup_irqs: free the irq resources required by cpe
+ * @core: handle the cpe core
+ *
+ * This API will free the IRQs for CPE but does not mask the
+ * CPE interrupts. If masking is needed, it has to be done
+ * explicity by caller.
+ */
+static void wcd_cpe_cleanup_irqs(struct wcd_cpe_core *core)
+{
+
+	struct snd_soc_codec *codec = core->codec;
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+	struct wcd9xxx_core_resource *core_res = &wcd9xxx->core_res;
+
+	wcd9xxx_free_irq(core_res,
+			 core->irq_info.cpe_engine_irq,
+			 core);
+	wcd9xxx_free_irq(core_res,
+			 core->irq_info.cpe_err_irq,
+			 core);
+
+}
+
+/*
+ * wcd_cpe_setup_sva_err_intr: setup the irqs for CPE
+ * @core: handle to wcd_cpe_core
+ * All interrupts needed for CPE are acquired. If any
+ * request_irq fails, then all irqs are free'd
+ */
+static int wcd_cpe_setup_irqs(struct wcd_cpe_core *core)
+{
+	int ret;
+	struct snd_soc_codec *codec = core->codec;
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+	struct wcd9xxx_core_resource *core_res = &wcd9xxx->core_res;
+
+	ret = wcd9xxx_request_irq(core_res,
+				  core->irq_info.cpe_engine_irq,
+				  svass_engine_irq, "SVASS_Engine", core);
+	if (ret) {
+		dev_err(core->dev,
+			"%s: Failed to request svass engine irq\n",
+			__func__);
+		goto fail_engine_irq;
+	}
+
+	/* Make sure all error interrupts are cleared */
+	if (CPE_ERR_IRQ_CB(core))
+		core->cpe_cdc_cb->cpe_err_irq_control(
+					core->codec,
+					CPE_ERR_IRQ_CLEAR,
+					NULL);
+
+	/* Enable required error interrupts */
+	if (CPE_ERR_IRQ_CB(core))
+		core->cpe_cdc_cb->cpe_err_irq_control(
+					core->codec,
+					CPE_ERR_IRQ_UNMASK,
+					NULL);
+
+	ret = wcd9xxx_request_irq(core_res,
+				  core->irq_info.cpe_err_irq,
+				  svass_exception_irq, "SVASS_Exception", core);
+	if (ret) {
+		dev_err(core->dev,
+			"%s: Failed to request svass err irq\n",
+			__func__);
+		goto fail_exception_irq;
+	}
+
+	return 0;
+
+fail_exception_irq:
+	wcd9xxx_free_irq(core_res,
+			 core->irq_info.cpe_engine_irq, core);
+
+fail_engine_irq:
+	return ret;
+}
+
+static int wcd_cpe_get_cal_index(int32_t cal_type)
+{
+	int cal_index = -EINVAL;
+
+	if (cal_type == ULP_AFE_CAL_TYPE)
+		cal_index = WCD_CPE_LSM_CAL_AFE;
+	else if (cal_type == ULP_LSM_CAL_TYPE)
+		cal_index = WCD_CPE_LSM_CAL_LSM;
+	else if (cal_type == ULP_LSM_TOPOLOGY_ID_CAL_TYPE)
+		cal_index = WCD_CPE_LSM_CAL_TOPOLOGY_ID;
+	else
+		pr_err("%s: invalid cal_type %d\n",
+			__func__, cal_type);
+
+	return cal_index;
+}
+
+static int wcd_cpe_alloc_cal(int32_t cal_type, size_t data_size, void *data)
+{
+	int ret = 0;
+	int cal_index;
+
+	cal_index = wcd_cpe_get_cal_index(cal_type);
+	if (cal_index < 0) {
+		pr_err("%s: invalid caltype %d\n",
+			__func__, cal_type);
+		return -EINVAL;
+	}
+
+	ret = cal_utils_alloc_cal(data_size, data,
+				  core_d->cal_data[cal_index],
+				  0, NULL);
+	if (ret < 0)
+		pr_err("%s: cal_utils_alloc_block failed, ret = %d, cal type = %d!\n",
+			__func__, ret, cal_type);
+	return ret;
+}
+
+static int wcd_cpe_dealloc_cal(int32_t cal_type, size_t data_size,
+			   void *data)
+{
+	int ret = 0;
+	int cal_index;
+
+	cal_index = wcd_cpe_get_cal_index(cal_type);
+	if (cal_index < 0) {
+		pr_err("%s: invalid caltype %d\n",
+			__func__, cal_type);
+		return -EINVAL;
+	}
+
+	ret = cal_utils_dealloc_cal(data_size, data,
+				    core_d->cal_data[cal_index]);
+	if (ret < 0)
+		pr_err("%s: cal_utils_dealloc_block failed, ret = %d, cal type = %d!\n",
+			__func__, ret, cal_type);
+	return ret;
+}
+
+static int wcd_cpe_set_cal(int32_t cal_type, size_t data_size, void *data)
+{
+	int ret = 0;
+	int cal_index;
+
+	cal_index = wcd_cpe_get_cal_index(cal_type);
+	if (cal_index < 0) {
+		pr_err("%s: invalid caltype %d\n",
+			__func__, cal_type);
+		return -EINVAL;
+	}
+
+	ret = cal_utils_set_cal(data_size, data,
+				core_d->cal_data[cal_index],
+				0, NULL);
+	if (ret < 0)
+		pr_err("%s: cal_utils_set_cal failed, ret = %d, cal type = %d!\n",
+			__func__, ret, cal_type);
+	return ret;
+}
+
+static int wcd_cpe_cal_init(struct wcd_cpe_core *core)
+{
+	int ret = 0;
+
+	struct cal_type_info cal_type_info[] = {
+		{{ULP_AFE_CAL_TYPE,
+		 {wcd_cpe_alloc_cal, wcd_cpe_dealloc_cal, NULL,
+		  wcd_cpe_set_cal, NULL, NULL} },
+		{NULL, NULL, cal_utils_match_buf_num} },
+
+		{{ULP_LSM_CAL_TYPE,
+		 {wcd_cpe_alloc_cal, wcd_cpe_dealloc_cal, NULL,
+		  wcd_cpe_set_cal, NULL, NULL} },
+		 {NULL, NULL, cal_utils_match_buf_num} },
+
+		{{ULP_LSM_TOPOLOGY_ID_CAL_TYPE,
+		 {wcd_cpe_alloc_cal, wcd_cpe_dealloc_cal, NULL,
+		  wcd_cpe_set_cal, NULL, NULL} },
+		 {NULL, NULL, cal_utils_match_buf_num} },
+	};
+
+	ret = cal_utils_create_cal_types(WCD_CPE_LSM_CAL_MAX,
+					 core->cal_data,
+					 cal_type_info);
+	if (ret < 0)
+		pr_err("%s: could not create cal type!\n",
+		       __func__);
+	return ret;
+}
+
+/*
+ * wcd_cpe_enable: setup the cpe interrupts and schedule
+ *	the work to download image and bootup the CPE.
+ * core: handle to cpe core structure
+ */
+static int wcd_cpe_vote(struct wcd_cpe_core *core,
+		bool enable)
+{
+	int ret = 0;
+
+	if (!core) {
+		pr_err("%s: Invalid handle to core\n",
+			__func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	dev_dbg(core->dev,
+		"%s: enter, enable = %s, cpe_users = %u\n",
+		__func__, (enable ? "true" : "false"),
+		core->cpe_users);
+
+	if (enable) {
+		core->cpe_users++;
+		if (core->cpe_users == 1) {
+			ret = wcd_cpe_enable(core, enable);
+			if (ret) {
+				dev_err(core->dev,
+					"%s: CPE enable failed, err = %d\n",
+					__func__, ret);
+				goto done;
+			}
+		} else {
+			dev_dbg(core->dev,
+				"%s: cpe already enabled, users = %u\n",
+				__func__, core->cpe_users);
+			goto done;
+		}
+	} else {
+		core->cpe_users--;
+		if (core->cpe_users == 0) {
+			ret = wcd_cpe_enable(core, enable);
+			if (ret) {
+				dev_err(core->dev,
+					"%s: CPE disable failed, err = %d\n",
+					__func__, ret);
+				goto done;
+			}
+		} else {
+			dev_dbg(core->dev,
+				"%s: %u valid users on cpe\n",
+				__func__, core->cpe_users);
+			goto done;
+		}
+	}
+
+	dev_dbg(core->dev,
+		"%s: leave, enable = %s, cpe_users = %u\n",
+		__func__, (enable ? "true" : "false"),
+		core->cpe_users);
+
+done:
+	return ret;
+}
+
+static int wcd_cpe_debugfs_init(struct wcd_cpe_core *core)
+{
+	int rc = 0;
+
+	struct dentry *dir = debugfs_create_dir("wcd_cpe", NULL);
+	if (IS_ERR_OR_NULL(dir)) {
+		dir = NULL;
+		rc = -ENODEV;
+		goto err_create_dir;
+	}
+
+	if (!debugfs_create_u32("ramdump_enable", S_IRUGO | S_IWUSR,
+				dir, &ramdump_enable)) {
+		dev_err(core->dev, "%s: Failed to create debugfs node %s\n",
+			__func__, "ramdump_enable");
+		rc = -ENODEV;
+		goto err_create_entry;
+	}
+
+	if (!debugfs_create_file("cpe_ftm_test_trigger", S_IWUSR,
+				dir, core, &cpe_ftm_test_trigger_fops)) {
+		dev_err(core->dev, "%s: Failed to create debugfs node %s\n",
+			__func__, "cpe_ftm_test_trigger");
+		rc = -ENODEV;
+		goto err_create_entry;
+	}
+
+	if (!debugfs_create_u32("cpe_ftm_test_status", S_IRUGO,
+				dir, &cpe_ftm_test_status)) {
+		dev_err(core->dev, "%s: Failed to create debugfs node %s\n",
+			__func__, "cpe_ftm_test_status");
+		rc = -ENODEV;
+		goto err_create_entry;
+	}
+
+err_create_entry:
+	debugfs_remove(dir);
+
+err_create_dir:
+	return rc;
+}
+
+static ssize_t fw_name_show(struct wcd_cpe_core *core, char *buf)
+{
+	return snprintf(buf, WCD_CPE_IMAGE_FNAME_MAX, "%s",
+			core->dyn_fname);
+}
+
+static ssize_t fw_name_store(struct wcd_cpe_core *core,
+		const char *buf, ssize_t count)
+{
+	int copy_count = count;
+	const char *pos;
+
+	pos = memchr(buf, '\n', count);
+	if (pos)
+		copy_count = pos - buf;
+
+	if (copy_count > (WCD_CPE_IMAGE_FNAME_MAX - 1)) {
+		dev_err(core->dev,
+			"%s: Invalid length %d, max allowed %d\n",
+			__func__, copy_count, WCD_CPE_IMAGE_FNAME_MAX - 1);
+		return -EINVAL;
+	}
+
+	strlcpy(core->dyn_fname, buf, copy_count + 1);
+
+	return count;
+}
+
+WCD_CPE_ATTR(fw_name, 0660, fw_name_show, fw_name_store);
+
+static ssize_t wcd_cpe_sysfs_show(struct kobject *kobj,
+		struct attribute *attr, char *buf)
+{
+	struct wcd_cpe_attribute *cpe_attr = to_wcd_cpe_attr(attr);
+	struct wcd_cpe_core *core = kobj_to_cpe_core(kobj);
+	ssize_t ret = -EINVAL;
+
+	if (core && cpe_attr->show)
+		ret = cpe_attr->show(core, buf);
+
+	return ret;
+}
+
+static ssize_t wcd_cpe_sysfs_store(struct kobject *kobj,
+		struct attribute *attr, const char *buf,
+		size_t count)
+{
+	struct wcd_cpe_attribute *cpe_attr = to_wcd_cpe_attr(attr);
+	struct wcd_cpe_core *core = kobj_to_cpe_core(kobj);
+	ssize_t ret = -EINVAL;
+
+	if (core && cpe_attr->store)
+		ret = cpe_attr->store(core, buf, count);
+
+	return ret;
+}
+
+static const struct sysfs_ops wcd_cpe_sysfs_ops = {
+	.show = wcd_cpe_sysfs_show,
+	.store = wcd_cpe_sysfs_store,
+};
+
+static struct kobj_type wcd_cpe_ktype = {
+	.sysfs_ops = &wcd_cpe_sysfs_ops,
+};
+
+static int wcd_cpe_sysfs_init(struct wcd_cpe_core *core, int id)
+{
+	char sysfs_dir_name[WCD_CPE_SYSFS_DIR_MAX_LENGTH];
+	int rc = 0;
+
+	snprintf(sysfs_dir_name, WCD_CPE_SYSFS_DIR_MAX_LENGTH,
+		 "%s%d", "wcd_cpe", id);
+
+	rc = kobject_init_and_add(&core->cpe_kobj, &wcd_cpe_ktype,
+				  kernel_kobj,
+				  sysfs_dir_name);
+	if (unlikely(rc)) {
+		dev_err(core->dev,
+			"%s: Failed to add kobject %s, err = %d\n",
+			__func__, sysfs_dir_name, rc);
+		goto done;
+	}
+
+	rc = sysfs_create_file(&core->cpe_kobj, &cpe_attr_fw_name.attr);
+	if (rc) {
+		dev_err(core->dev,
+			"%s: Failed to fw_name sysfs entry to %s\n",
+			__func__, sysfs_dir_name);
+		goto fail_create_file;
+	}
+
+	return 0;
+
+fail_create_file:
+	kobject_put(&core->cpe_kobj);
+done:
+	return rc;
+}
+
+static ssize_t cpe_ftm_test_trigger(struct file *file,
+				     const char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	struct wcd_cpe_core *core = file->private_data;
+	int ret = 0;
+
+	/* Enable the clks for cpe */
+	ret = wcd_cpe_enable_cpe_clks(core, true);
+	if (IS_ERR_VALUE(ret)) {
+		dev_err(core->dev,
+			"%s: CPE clk enable failed, err = %d\n",
+			__func__, ret);
+		goto done;
+	}
+
+	/* Get the CPE_STATUS */
+	ret = cpe_svc_ftm_test(core->cpe_handle, &cpe_ftm_test_status);
+	if (IS_ERR_VALUE(ret)) {
+		dev_err(core->dev,
+			"%s: CPE FTM test failed, err = %d\n",
+			__func__, ret);
+		if (ret == CPE_SVC_BUSY) {
+			cpe_ftm_test_status = 1;
+			ret = 0;
+		}
+	}
+
+	/* Disable the clks for cpe */
+	ret = wcd_cpe_enable_cpe_clks(core, false);
+	if (IS_ERR_VALUE(ret)) {
+		dev_err(core->dev,
+			"%s: CPE clk disable failed, err = %d\n",
+			__func__, ret);
+	}
+
+done:
+	if (ret < 0)
+		return ret;
+	else
+		return count;
+}
+
+static int wcd_cpe_validate_params(
+	struct snd_soc_codec *codec,
+	struct wcd_cpe_params *params)
+{
+
+	if (!codec) {
+		pr_err("%s: Invalid codec\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!params) {
+		dev_err(codec->dev,
+			"%s: No params supplied for codec %s\n",
+			__func__, codec->component.name);
+		return -EINVAL;
+	}
+
+	if (!params->codec || !params->get_cpe_core ||
+	    !params->cdc_cb) {
+		dev_err(codec->dev,
+			"%s: Invalid params for codec %s\n",
+			__func__, codec->component.name);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * wcd_cpe_init: Initialize CPE related structures
+ * @img_fname: filename for firmware image
+ * @codec: handle to codec requesting for image download
+ * @params: parameter structure passed from caller
+ *
+ * This API will initialize the cpe core but will not
+ * download the image or boot the cpe core.
+ */
+struct wcd_cpe_core *wcd_cpe_init(const char *img_fname,
+	struct snd_soc_codec *codec,
+	struct wcd_cpe_params *params)
+{
+	struct wcd_cpe_core *core;
+	int ret = 0;
+	struct snd_card *card = NULL;
+	struct snd_info_entry *entry = NULL;
+	char proc_name[WCD_CPE_STATE_MAX_LEN];
+	const char *cpe_name = "cpe";
+	const char *state_name = "_state";
+	const struct cpe_svc_hw_cfg *hw_info;
+	int id = 0;
+
+	if (wcd_cpe_validate_params(codec, params))
+		return NULL;
+
+	core = kzalloc(sizeof(struct wcd_cpe_core), GFP_KERNEL);
+	if (!core) {
+		dev_err(codec->dev,
+			"%s: Failed to allocate cpe core data\n",
+			__func__);
+		return NULL;
+	}
+
+	snprintf(core->fname, sizeof(core->fname), "%s", img_fname);
+	strlcpy(core->dyn_fname, core->fname, WCD_CPE_IMAGE_FNAME_MAX);
+
+	wcd_get_cpe_core = params->get_cpe_core;
+
+	core->codec = params->codec;
+	core->dev = params->codec->dev;
+	core->cpe_debug_mode = params->dbg_mode;
+
+	core->cdc_info.major_version = params->cdc_major_ver;
+	core->cdc_info.minor_version = params->cdc_minor_ver;
+	core->cdc_info.id = params->cdc_id;
+
+	core->cpe_cdc_cb = params->cdc_cb;
+
+	memcpy(&core->irq_info, &params->cdc_irq_info,
+	       sizeof(core->irq_info));
+
+	INIT_WORK(&core->load_fw_work, wcd_cpe_load_fw_image);
+	INIT_WORK(&core->ssr_work, wcd_cpe_ssr_work);
+	init_completion(&core->offline_compl);
+	init_completion(&core->ready_compl);
+	init_completion(&core->online_compl);
+	init_waitqueue_head(&core->ssr_entry.offline_poll_wait);
+	mutex_init(&core->ssr_lock);
+	mutex_init(&core->session_lock);
+	core->cpe_users = 0;
+	core->cpe_clk_ref = 0;
+
+	/*
+	 * By default, during probe, it is assumed that
+	 * both CPE hardware block and underlying bus to codec
+	 * are ready
+	 */
+	core->ready_status = WCD_CPE_READY_TO_DLOAD;
+
+	core->cpe_handle = cpe_svc_initialize(NULL, &core->cdc_info,
+					      params->cpe_svc_params);
+	if (!core->cpe_handle) {
+		dev_err(core->dev,
+			"%s: failed to initialize cpe services\n",
+			__func__);
+		goto fail_cpe_initialize;
+	}
+
+	core->cpe_reg_handle = cpe_svc_register(core->cpe_handle,
+					wcd_cpe_svc_event_cb,
+					CPE_SVC_ONLINE | CPE_SVC_OFFLINE |
+					CPE_SVC_BOOT |
+					CPE_SVC_CMI_CLIENTS_DEREG,
+					"codec cpe handler");
+	if (!core->cpe_reg_handle) {
+		dev_err(core->dev,
+			"%s: failed to register cpe service\n",
+			__func__);
+		goto fail_cpe_register;
+	}
+
+	card = codec->component.card->snd_card;
+	snprintf(proc_name, (sizeof("cpe") + sizeof("_state") +
+		 sizeof(id) - 2), "%s%d%s", cpe_name, id, state_name);
+	entry = snd_info_create_card_entry(card, proc_name,
+					   card->proc_root);
+	if (entry) {
+		core->ssr_entry.entry = entry;
+		core->ssr_entry.offline = 1;
+		entry->size = WCD_CPE_STATE_MAX_LEN;
+		entry->content = SNDRV_INFO_CONTENT_DATA;
+		entry->c.ops = &wcd_cpe_state_proc_ops;
+		entry->private_data = core;
+		ret = snd_info_register(entry);
+		if (ret < 0) {
+			dev_err(core->dev,
+				"%s: snd_info_register failed (%d)\n",
+				 __func__, ret);
+			snd_info_free_entry(entry);
+			entry = NULL;
+		}
+	} else {
+		dev_err(core->dev,
+			"%s: Failed to create CPE SSR status entry\n",
+			__func__);
+		/*
+		 * Even if SSR entry creation fails, continue
+		 * with image download
+		 */
+	}
+
+	core_d = core;
+	ret = wcd_cpe_cal_init(core);
+	if (IS_ERR_VALUE(ret)) {
+		dev_err(core->dev,
+			"%s: CPE calibration init failed, err = %d\n",
+			__func__, ret);
+		goto fail_cpe_reset;
+	}
+
+	wcd_cpe_debugfs_init(core);
+
+	wcd_cpe_sysfs_init(core, id);
+
+	hw_info = cpe_svc_get_hw_cfg(core->cpe_handle);
+	if (!hw_info) {
+		dev_err(core->dev,
+			"%s: hw info not available\n",
+			__func__);
+		goto schedule_dload_work;
+	} else {
+		core->hw_info.dram_offset = hw_info->DRAM_offset;
+		core->hw_info.dram_size = hw_info->DRAM_size;
+		core->hw_info.iram_offset = hw_info->IRAM_offset;
+		core->hw_info.iram_size = hw_info->IRAM_size;
+	}
+
+	/* Setup the ramdump device and buffer */
+	core->cpe_ramdump_dev = create_ramdump_device("cpe",
+						      core->dev);
+	if (!core->cpe_ramdump_dev) {
+		dev_err(core->dev,
+			"%s: Failed to create ramdump device\n",
+			__func__);
+		goto schedule_dload_work;
+	}
+
+	arch_setup_dma_ops(core->dev, 0, 0, NULL, 0);
+	core->cpe_dump_v_addr = dma_alloc_coherent(core->dev,
+						   core->hw_info.dram_size,
+						   &core->cpe_dump_addr,
+						   GFP_KERNEL);
+	if (!core->cpe_dump_v_addr) {
+		dev_err(core->dev,
+			"%s: Failed to alloc memory for cpe dump, size = %zd\n",
+			__func__, core->hw_info.dram_size);
+		goto schedule_dload_work;
+	} else {
+		memset(core->cpe_dump_v_addr, 0, core->hw_info.dram_size);
+	}
+
+schedule_dload_work:
+	core->ssr_type = WCD_CPE_INITIALIZED;
+	schedule_work(&core->load_fw_work);
+	return core;
+
+fail_cpe_reset:
+	cpe_svc_deregister(core->cpe_handle, core->cpe_reg_handle);
+
+fail_cpe_register:
+	cpe_svc_deinitialize(core->cpe_handle);
+
+fail_cpe_initialize:
+	kfree(core);
+	return NULL;
+}
+EXPORT_SYMBOL(wcd_cpe_init);
+
+/*
+ * wcd_cpe_cmi_lsm_callback: callback called from cpe services
+ *			     to notify command response for lsm
+ *			     service
+ * @param: param containing the response code and status
+ *
+ * This callback is registered with cpe services while registering
+ * the LSM service
+ */
+static void wcd_cpe_cmi_lsm_callback(const struct cmi_api_notification *param)
+{
+	struct cmi_hdr *hdr;
+	struct cpe_lsm_session *lsm_session;
+	u8 session_id;
+
+	if (!param) {
+		pr_err("%s: param is null\n", __func__);
+		return;
+	}
+
+	if (param->event != CMI_API_MSG) {
+		pr_err("%s: unhandled event 0x%x\n", __func__, param->event);
+		return;
+	}
+
+	hdr = (struct cmi_hdr *) param->message;
+	session_id = CMI_HDR_GET_SESSION_ID(hdr);
+
+	if (session_id > WCD_CPE_LSM_MAX_SESSIONS) {
+		pr_err("%s: invalid lsm session id = %d\n",
+			__func__, session_id);
+		return;
+	}
+
+	lsm_session = lsm_sessions[session_id];
+
+	if (hdr->opcode == CPE_CMI_BASIC_RSP_OPCODE) {
+
+		u8 *payload = ((u8 *)param->message) + (sizeof(struct cmi_hdr));
+		u8 result = payload[0];
+		lsm_session->cmd_err_code = result;
+		complete(&lsm_session->cmd_comp);
+
+	} else if (hdr->opcode == CPE_LSM_SESSION_CMDRSP_SHARED_MEM_ALLOC) {
+
+		struct cpe_cmdrsp_shmem_alloc *cmdrsp_shmem_alloc =
+			(struct cpe_cmdrsp_shmem_alloc *) param->message;
+
+		if (cmdrsp_shmem_alloc->addr == 0) {
+			pr_err("%s: Failed LSM shared mem alloc\n", __func__);
+			lsm_session->cmd_err_code = CMI_SHMEM_ALLOC_FAILED;
+
+		} else {
+
+			pr_debug("%s LSM shared mem addr = 0x%x\n",
+				__func__, cmdrsp_shmem_alloc->addr);
+			lsm_session->lsm_mem_handle = cmdrsp_shmem_alloc->addr;
+			lsm_session->cmd_err_code = 0;
+		}
+
+		complete(&lsm_session->cmd_comp);
+
+	} else if (hdr->opcode == CPE_LSM_SESSION_EVENT_DETECTION_STATUS_V2) {
+
+		struct cpe_lsm_event_detect_v2 *event_detect_v2 =
+			(struct cpe_lsm_event_detect_v2 *) param->message;
+
+		if (!lsm_session->priv_d) {
+			pr_err("%s: private data is not present\n",
+				__func__);
+			return;
+		}
+
+		pr_debug("%s: event payload, status = %u, size = %u\n",
+			__func__, event_detect_v2->detection_status,
+			event_detect_v2->size);
+
+		if (lsm_session->event_cb)
+			lsm_session->event_cb(
+				lsm_session->priv_d,
+				event_detect_v2->detection_status,
+				event_detect_v2->size,
+				event_detect_v2->payload);
+	}
+
+	return;
+}
+
+/*
+ * wcd_cpe_cmi_send_lsm_msg: send a message to lsm service
+ * @core: handle to cpe core
+ * @session: session on which to send the message
+ * @message: actual message containing header and payload
+ *
+ * Sends message to lsm service for specified session and wait
+ * for response back on the message.
+ * should be called after acquiring session specific mutex
+ */
+static int wcd_cpe_cmi_send_lsm_msg(
+			struct wcd_cpe_core *core,
+			struct cpe_lsm_session *session,
+			void *message)
+{
+	int ret = 0;
+	struct cmi_hdr *hdr = message;
+
+	pr_debug("%s: sending message with opcode 0x%x\n",
+		 __func__, hdr->opcode);
+
+	if (unlikely(!wcd_cpe_is_online_state(core))) {
+		dev_err(core->dev,
+			"%s: MSG not sent, CPE offline\n",
+			 __func__);
+		goto done;
+	}
+
+	if (CMI_HDR_GET_OBM_FLAG(hdr))
+		wcd_cpe_bus_vote_max_bw(core, true);
+
+	reinit_completion(&session->cmd_comp);
+	ret = cmi_send_msg(message);
+	if (ret) {
+		pr_err("%s: msg opcode (0x%x) send failed (%d)\n",
+			__func__, hdr->opcode, ret);
+		goto rel_bus_vote;
+	}
+
+	ret = wait_for_completion_timeout(&session->cmd_comp,
+					  CMI_CMD_TIMEOUT);
+	if (ret > 0) {
+		pr_debug("%s: command 0x%x, received response 0x%x\n",
+			__func__, hdr->opcode, session->cmd_err_code);
+		if (session->cmd_err_code == CMI_SHMEM_ALLOC_FAILED)
+			session->cmd_err_code = CPE_ENOMEMORY;
+		if (session->cmd_err_code > 0)
+			pr_err("%s: CPE returned error[%s]\n",
+				__func__, cpe_err_get_err_str(
+				session->cmd_err_code));
+		ret = cpe_err_get_lnx_err_code(session->cmd_err_code);
+		goto rel_bus_vote;
+	} else {
+		pr_err("%s: command (0x%x) send timed out\n",
+			__func__, hdr->opcode);
+		ret = -ETIMEDOUT;
+		goto rel_bus_vote;
+	}
+
+
+rel_bus_vote:
+
+	if (CMI_HDR_GET_OBM_FLAG(hdr))
+		wcd_cpe_bus_vote_max_bw(core, false);
+
+done:
+	return ret;
+}
+
+
+/*
+ * fill_cmi_header: fill the cmi header with specified values
+ *
+ * @hdr: header to be updated with values
+ * @session_id: session id of the header,
+ *		in case of AFE service it is port_id
+ * @service_id: afe/lsm, etc
+ * @version: update the version field in header
+ * @payload_size: size of the payload following after header
+ * @opcode: opcode of the message
+ * @obm_flag: indicates if this header is for obm message
+ *
+ */
+static int fill_cmi_header(struct cmi_hdr *hdr,
+			   u8 session_id, u8 service_id,
+			   bool version, u8 payload_size,
+			   u16 opcode, bool obm_flag)
+{
+	/* sanitize the data */
+	if (!IS_VALID_SESSION_ID(session_id) ||
+	    !IS_VALID_SERVICE_ID(service_id) ||
+	    !IS_VALID_PLD_SIZE(payload_size)) {
+		pr_err("Invalid header creation request\n");
+		return -EINVAL;
+	}
+
+	CMI_HDR_SET_SESSION(hdr, session_id);
+	CMI_HDR_SET_SERVICE(hdr, service_id);
+	if (version)
+		CMI_HDR_SET_VERSION(hdr, 1);
+	else
+		CMI_HDR_SET_VERSION(hdr, 0);
+
+	CMI_HDR_SET_PAYLOAD_SIZE(hdr, payload_size);
+
+	hdr->opcode = opcode;
+
+	if (obm_flag)
+		CMI_HDR_SET_OBM(hdr, CMI_OBM_FLAG_OUT_BAND);
+	else
+		CMI_HDR_SET_OBM(hdr, CMI_OBM_FLAG_IN_BAND);
+
+	return 0;
+}
+
+/*
+ * fill_lsm_cmd_header_v0_inband:
+ *	Given the header, fill the header with information
+ *	for lsm service, version 0 and inband message
+ * @hdr: the cmi header to be filled.
+ * @session_id: ID for the lsm session
+ * @payload_size: size for cmi message payload
+ * @opcode: opcode for cmi message
+ */
+static int fill_lsm_cmd_header_v0_inband(struct cmi_hdr *hdr,
+		u8 session_id, u8 payload_size, u16 opcode)
+{
+	return fill_cmi_header(hdr, session_id,
+			       CMI_CPE_LSM_SERVICE_ID, false,
+			       payload_size, opcode, false);
+}
+
+/*
+ * wcd_cpe_is_valid_lsm_session:
+ *	Check session paramters to identify validity for the sesion
+ * @core: handle to cpe core
+ * @session: handle to the lsm session
+ * @func: invoking function to be printed in error logs
+ */
+static int wcd_cpe_is_valid_lsm_session(struct wcd_cpe_core *core,
+		struct cpe_lsm_session *session,
+		const char *func)
+{
+	if (unlikely(IS_ERR_OR_NULL(core))) {
+		pr_err("%s: invalid handle to core\n",
+			func);
+		return -EINVAL;
+	}
+
+	if (unlikely(IS_ERR_OR_NULL(session))) {
+		dev_err(core->dev, "%s: invalid session\n",
+			func);
+		return -EINVAL;
+	}
+
+	if (session->id > WCD_CPE_LSM_MAX_SESSIONS) {
+		dev_err(core->dev, "%s: invalid session id (%u)\n",
+			func, session->id);
+		return -EINVAL;
+	}
+
+	dev_dbg(core->dev, "%s: session_id = %u\n",
+		func, session->id);
+	return 0;
+}
+
+static int wcd_cpe_cmd_lsm_open_tx_v2(
+	struct wcd_cpe_core *core,
+	struct cpe_lsm_session *session)
+{
+	struct cpe_lsm_cmd_open_tx_v2 cmd_open_tx_v2;
+	struct cal_block_data *top_cal = NULL;
+	struct audio_cal_info_lsm_top *lsm_top;
+	int ret = 0;
+
+	ret = wcd_cpe_is_valid_lsm_session(core, session,
+					   __func__);
+	if (ret)
+		return ret;
+
+	if (core->cal_data[WCD_CPE_LSM_CAL_TOPOLOGY_ID] == NULL) {
+		dev_err(core->dev,
+			"%s: LSM_TOPOLOGY cal not allocated!\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&core->cal_data[WCD_CPE_LSM_CAL_TOPOLOGY_ID]->lock);
+	top_cal = cal_utils_get_only_cal_block(
+			core->cal_data[WCD_CPE_LSM_CAL_TOPOLOGY_ID]);
+	if (!top_cal) {
+		dev_err(core->dev,
+			"%s: Failed to get LSM TOPOLOGY cal block\n",
+			__func__);
+		ret = -EINVAL;
+		goto unlock_cal_mutex;
+	}
+
+	lsm_top = (struct audio_cal_info_lsm_top *)
+			top_cal->cal_info;
+
+	if (!lsm_top) {
+		dev_err(core->dev,
+			"%s: cal_info for LSM_TOPOLOGY not found\n",
+			__func__);
+		ret = -EINVAL;
+		goto unlock_cal_mutex;
+	}
+
+	dev_dbg(core->dev,
+		"%s: topology_id = 0x%x, acdb_id = 0x%x, app_type = 0x%x\n",
+		__func__, lsm_top->topology, lsm_top->acdb_id,
+		lsm_top->app_type);
+
+	if (lsm_top->topology == 0) {
+		dev_err(core->dev,
+			"%s: topology id not sent for app_type 0x%x\n",
+			__func__, lsm_top->app_type);
+		ret = -EINVAL;
+		goto unlock_cal_mutex;
+	}
+
+	WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
+
+	memset(&cmd_open_tx_v2, 0, sizeof(struct cpe_lsm_cmd_open_tx_v2));
+	if (fill_lsm_cmd_header_v0_inband(&cmd_open_tx_v2.hdr,
+				session->id, OPEN_V2_CMD_PAYLOAD_SIZE,
+				CPE_LSM_SESSION_CMD_OPEN_TX_V2)) {
+		ret = -EINVAL;
+		goto end_ret;
+	}
+
+	cmd_open_tx_v2.topology_id = lsm_top->topology;
+	ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_open_tx_v2);
+	if (ret)
+		dev_err(core->dev,
+			"%s: failed to send open_tx_v2 cmd, err = %d\n",
+			__func__, ret);
+	else
+		session->is_topology_used = true;
+end_ret:
+	WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
+
+unlock_cal_mutex:
+	mutex_unlock(&core->cal_data[WCD_CPE_LSM_CAL_TOPOLOGY_ID]->lock);
+	return ret;
+}
+
+/*
+ * wcd_cpe_cmd_lsm_open_tx: compose and send lsm open command
+ * @core_handle: handle to cpe core
+ * @session: session for which the command needs to be sent
+ * @app_id: application id part of the command
+ * @sample_rate: sample rate for this session
+ */
+static int wcd_cpe_cmd_lsm_open_tx(void *core_handle,
+		struct cpe_lsm_session *session,
+		u16 app_id, u16 sample_rate)
+{
+	struct cpe_lsm_cmd_open_tx cmd_open_tx;
+	struct wcd_cpe_core *core = core_handle;
+	int ret = 0;
+
+	ret = wcd_cpe_is_valid_lsm_session(core, session,
+					   __func__);
+	if (ret)
+		return ret;
+
+	/* Try to open with topology first */
+	ret = wcd_cpe_cmd_lsm_open_tx_v2(core, session);
+	if (!ret)
+		goto done;
+
+	dev_dbg(core->dev, "%s: Try open_tx without topology\n",
+		__func__);
+
+	WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
+
+	memset(&cmd_open_tx, 0, sizeof(struct cpe_lsm_cmd_open_tx));
+	if (fill_lsm_cmd_header_v0_inband(&cmd_open_tx.hdr,
+				session->id, OPEN_CMD_PAYLOAD_SIZE,
+				CPE_LSM_SESSION_CMD_OPEN_TX)) {
+		ret = -EINVAL;
+		goto end_ret;
+	}
+
+	cmd_open_tx.app_id = app_id;
+	cmd_open_tx.sampling_rate = sample_rate;
+
+	ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_open_tx);
+	if (ret)
+		dev_err(core->dev,
+			"%s: failed to send open_tx cmd, err = %d\n",
+			__func__, ret);
+end_ret:
+	WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
+done:
+	return ret;
+}
+
+/*
+ * wcd_cpe_cmd_close_tx: compose and send lsm close command
+ * @core_handle: handle to cpe core
+ * @session: session for which the command needs to be sent
+ */
+static int wcd_cpe_cmd_lsm_close_tx(void *core_handle,
+			struct cpe_lsm_session *session)
+{
+	struct cmi_hdr cmd_close_tx;
+	struct wcd_cpe_core *core = core_handle;
+	int ret = 0;
+
+	ret = wcd_cpe_is_valid_lsm_session(core, session,
+					   __func__);
+	if (ret)
+		return ret;
+
+	WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
+
+	memset(&cmd_close_tx, 0, sizeof(cmd_close_tx));
+	if (fill_lsm_cmd_header_v0_inband(&cmd_close_tx, session->id,
+			    0, CPE_LSM_SESSION_CMD_CLOSE_TX)) {
+		ret = -EINVAL;
+		goto end_ret;
+	}
+
+	ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_close_tx);
+	if (ret)
+		dev_err(core->dev,
+			"%s: lsm close_tx cmd failed, err = %d\n",
+			__func__, ret);
+	else
+		session->is_topology_used = false;
+end_ret:
+	WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
+	return ret;
+}
+
+/*
+ * wcd_cpe_cmd_shmem_alloc: compose and send lsm shared
+ *			    memory allocation command
+ * @core_handle: handle to cpe core
+ * @session: session for which the command needs to be sent
+ * @size: size of memory to be allocated
+ */
+static int wcd_cpe_cmd_lsm_shmem_alloc(void *core_handle,
+			struct cpe_lsm_session *session,
+			u32 size)
+{
+	struct cpe_cmd_shmem_alloc cmd_shmem_alloc;
+	struct wcd_cpe_core *core = core_handle;
+	int ret = 0;
+
+	ret = wcd_cpe_is_valid_lsm_session(core, session,
+					   __func__);
+	if (ret)
+		return ret;
+
+	WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
+
+	memset(&cmd_shmem_alloc, 0, sizeof(cmd_shmem_alloc));
+	if (fill_lsm_cmd_header_v0_inband(&cmd_shmem_alloc.hdr, session->id,
+			    SHMEM_ALLOC_CMD_PLD_SIZE,
+			    CPE_LSM_SESSION_CMD_SHARED_MEM_ALLOC)) {
+		ret = -EINVAL;
+		goto end_ret;
+	}
+
+	cmd_shmem_alloc.size = size;
+	ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_shmem_alloc);
+	if (ret)
+		dev_err(core->dev,
+			"%s: lsm_shmem_alloc cmd send fail, %d\n",
+			__func__, ret);
+end_ret:
+	WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
+	return ret;
+}
+
+/*
+ * wcd_cpe_cmd_lsm_shmem_dealloc: deallocate the shared memory
+ *				  for the specified session
+ * @core_handle: handle to cpe core
+ * @session: session for which memory needs to be deallocated.
+ */
+static int wcd_cpe_cmd_lsm_shmem_dealloc(void *core_handle,
+		struct cpe_lsm_session *session)
+{
+	struct cpe_cmd_shmem_dealloc cmd_dealloc;
+	struct wcd_cpe_core *core = core_handle;
+	int ret = 0;
+
+	ret = wcd_cpe_is_valid_lsm_session(core, session,
+					   __func__);
+	if (ret)
+		return ret;
+
+	WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
+
+	memset(&cmd_dealloc, 0, sizeof(cmd_dealloc));
+	if (fill_lsm_cmd_header_v0_inband(&cmd_dealloc.hdr, session->id,
+			    SHMEM_DEALLOC_CMD_PLD_SIZE,
+			    CPE_LSM_SESSION_CMD_SHARED_MEM_DEALLOC)) {
+		ret = -EINVAL;
+		goto end_ret;
+	}
+
+	cmd_dealloc.addr = session->lsm_mem_handle;
+	ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_dealloc);
+	if (ret) {
+		dev_err(core->dev,
+			"%s: lsm_shmem_dealloc cmd failed, rc %d\n",
+			__func__, ret);
+		goto end_ret;
+	}
+
+	memset(&session->lsm_mem_handle, 0,
+	       sizeof(session->lsm_mem_handle));
+
+end_ret:
+	WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
+	return ret;
+}
+
+/*
+ * wcd_cpe_send_lsm_cal: send the calibration for lsm service
+ *			      from acdb to the cpe
+ * @core: handle to cpe core
+ * @session: session for which the calibration needs to be set.
+ */
+static int wcd_cpe_send_lsm_cal(
+			struct wcd_cpe_core *core,
+			struct cpe_lsm_session *session)
+{
+
+	u8 *msg_pld;
+	struct cmi_hdr *hdr;
+	struct cal_block_data *lsm_cal = NULL;
+	void *inb_msg;
+	int rc = 0;
+
+	if (core->cal_data[WCD_CPE_LSM_CAL_LSM] == NULL) {
+		pr_err("%s: LSM cal not allocated!\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&core->cal_data[WCD_CPE_LSM_CAL_LSM]->lock);
+	lsm_cal = cal_utils_get_only_cal_block(
+			core->cal_data[WCD_CPE_LSM_CAL_LSM]);
+	if (!lsm_cal) {
+		pr_err("%s: failed to get lsm cal block\n", __func__);
+		rc = -EINVAL;
+		goto unlock_cal_mutex;
+	}
+
+	if (lsm_cal->cal_data.size == 0) {
+		dev_dbg(core->dev, "%s: No LSM cal to send\n",
+			__func__);
+		rc = 0;
+		goto unlock_cal_mutex;
+	}
+
+	inb_msg = kzalloc(sizeof(struct cmi_hdr) + lsm_cal->cal_data.size,
+			  GFP_KERNEL);
+	if (!inb_msg) {
+		pr_err("%s: no memory for lsm acdb cal\n",
+			__func__);
+		rc = -ENOMEM;
+		goto unlock_cal_mutex;
+	}
+
+	hdr = (struct cmi_hdr *) inb_msg;
+
+	rc = fill_lsm_cmd_header_v0_inband(hdr, session->id,
+			lsm_cal->cal_data.size,
+			CPE_LSM_SESSION_CMD_SET_PARAMS);
+	if (rc) {
+		pr_err("%s: invalid params for header, err = %d\n",
+			__func__, rc);
+		goto free_msg;
+	}
+
+	msg_pld = ((u8 *) inb_msg) + sizeof(struct cmi_hdr);
+	memcpy(msg_pld, lsm_cal->cal_data.kvaddr,
+	       lsm_cal->cal_data.size);
+
+	rc = wcd_cpe_cmi_send_lsm_msg(core, session, inb_msg);
+	if (rc)
+		pr_err("%s: acdb lsm_params send failed, err = %d\n",
+			__func__, rc);
+
+free_msg:
+	kfree(inb_msg);
+
+unlock_cal_mutex:
+	mutex_unlock(&core->cal_data[WCD_CPE_LSM_CAL_LSM]->lock);
+	return rc;
+
+}
+
+static void wcd_cpe_set_param_data(struct cpe_param_data *param_d,
+		struct cpe_lsm_ids *ids, u32 p_size,
+		u32 set_param_cmd)
+{
+	param_d->module_id = ids->module_id;
+	param_d->param_id = ids->param_id;
+
+	switch (set_param_cmd) {
+	case CPE_LSM_SESSION_CMD_SET_PARAMS_V2:
+		param_d->p_size.param_size = p_size;
+		break;
+	case CPE_LSM_SESSION_CMD_SET_PARAMS:
+	default:
+		param_d->p_size.sr.param_size =
+			(u16) p_size;
+		param_d->p_size.sr.reserved = 0;
+		break;
+	}
+}
+
+static int wcd_cpe_send_param_epd_thres(struct wcd_cpe_core *core,
+		struct cpe_lsm_session *session,
+		void *data, struct cpe_lsm_ids *ids)
+{
+	struct snd_lsm_ep_det_thres *ep_det_data;
+	struct cpe_lsm_param_epd_thres epd_cmd;
+	struct cmi_hdr *msg_hdr = &epd_cmd.hdr;
+	struct cpe_param_data *param_d =
+				&epd_cmd.param;
+	int rc;
+
+	memset(&epd_cmd, 0, sizeof(epd_cmd));
+	ep_det_data = (struct snd_lsm_ep_det_thres *) data;
+	if (fill_lsm_cmd_header_v0_inband(msg_hdr,
+				session->id,
+				CPE_CMD_EPD_THRES_PLD_SIZE,
+				CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
+		rc = -EINVAL;
+		goto err_ret;
+	}
+
+	wcd_cpe_set_param_data(param_d, ids,
+			       CPE_EPD_THRES_PARAM_SIZE,
+			       CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
+
+	epd_cmd.minor_version = 1;
+	epd_cmd.epd_begin = ep_det_data->epd_begin;
+	epd_cmd.epd_end = ep_det_data->epd_end;
+
+	WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
+	rc = wcd_cpe_cmi_send_lsm_msg(core, session, &epd_cmd);
+	if (unlikely(rc))
+		dev_err(core->dev,
+			"%s: set_param(EPD Threshold) failed, rc %dn",
+			__func__, rc);
+	WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
+err_ret:
+	return rc;
+}
+
+static int wcd_cpe_send_param_opmode(struct wcd_cpe_core *core,
+		struct cpe_lsm_session *session,
+		void *data, struct cpe_lsm_ids *ids)
+{
+	struct snd_lsm_detect_mode *opmode_d;
+	struct cpe_lsm_param_opmode opmode_cmd;
+	struct cmi_hdr *msg_hdr = &opmode_cmd.hdr;
+	struct cpe_param_data *param_d =
+				&opmode_cmd.param;
+	int rc;
+
+	memset(&opmode_cmd, 0, sizeof(opmode_cmd));
+	opmode_d = (struct snd_lsm_detect_mode *) data;
+	if (fill_lsm_cmd_header_v0_inband(msg_hdr,
+				session->id,
+				CPE_CMD_OPMODE_PLD_SIZE,
+				CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
+		rc = -EINVAL;
+		goto err_ret;
+	}
+
+	wcd_cpe_set_param_data(param_d, ids,
+			       CPE_OPMODE_PARAM_SIZE,
+			       CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
+
+	opmode_cmd.minor_version = 1;
+	if (opmode_d->mode == LSM_MODE_KEYWORD_ONLY_DETECTION)
+		opmode_cmd.mode = 1;
+	else
+		opmode_cmd.mode = 3;
+
+	if (opmode_d->detect_failure)
+		opmode_cmd.mode |= 0x04;
+
+	opmode_cmd.reserved = 0;
+
+	WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
+	rc = wcd_cpe_cmi_send_lsm_msg(core, session, &opmode_cmd);
+	if (unlikely(rc))
+		dev_err(core->dev,
+			"%s: set_param(operation_mode) failed, rc %dn",
+			__func__, rc);
+	WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
+err_ret:
+	return rc;
+}
+
+static int wcd_cpe_send_param_gain(struct wcd_cpe_core *core,
+		struct cpe_lsm_session *session,
+		void *data, struct cpe_lsm_ids *ids)
+{
+	struct snd_lsm_gain *gain_d;
+	struct cpe_lsm_param_gain gain_cmd;
+	struct cmi_hdr *msg_hdr = &gain_cmd.hdr;
+	struct cpe_param_data *param_d =
+				&gain_cmd.param;
+	int rc;
+
+	memset(&gain_cmd, 0, sizeof(gain_cmd));
+	gain_d = (struct snd_lsm_gain *) data;
+	if (fill_lsm_cmd_header_v0_inband(msg_hdr,
+				session->id,
+				CPE_CMD_GAIN_PLD_SIZE,
+				CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
+		rc = -EINVAL;
+		goto err_ret;
+	}
+
+	wcd_cpe_set_param_data(param_d, ids,
+			       CPE_GAIN_PARAM_SIZE,
+			       CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
+
+	gain_cmd.minor_version = 1;
+	gain_cmd.gain = gain_d->gain;
+	gain_cmd.reserved = 0;
+
+	WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
+	rc = wcd_cpe_cmi_send_lsm_msg(core, session, &gain_cmd);
+	if (unlikely(rc))
+		dev_err(core->dev,
+			"%s: set_param(lsm_gain) failed, rc %dn",
+			__func__, rc);
+	WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
+err_ret:
+	return rc;
+}
+
+static int wcd_cpe_send_param_connectport(struct wcd_cpe_core *core,
+		struct cpe_lsm_session *session,
+		void *data, struct cpe_lsm_ids *ids, u16 port_id)
+{
+	struct cpe_lsm_param_connectport con_port_cmd;
+	struct cmi_hdr *msg_hdr = &con_port_cmd.hdr;
+	struct cpe_param_data *param_d =
+				&con_port_cmd.param;
+	int rc;
+
+	memset(&con_port_cmd, 0, sizeof(con_port_cmd));
+	if (fill_lsm_cmd_header_v0_inband(msg_hdr,
+				session->id,
+				CPE_CMD_CONNECTPORT_PLD_SIZE,
+				CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
+		rc = -EINVAL;
+		goto err_ret;
+	}
+
+	wcd_cpe_set_param_data(param_d, ids,
+			       CPE_CONNECTPORT_PARAM_SIZE,
+			       CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
+
+	con_port_cmd.minor_version = 1;
+	con_port_cmd.afe_port_id = port_id;
+	con_port_cmd.reserved = 0;
+
+	WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
+	rc = wcd_cpe_cmi_send_lsm_msg(core, session, &con_port_cmd);
+	if (unlikely(rc))
+		dev_err(core->dev,
+			"%s: set_param(connect_port) failed, rc %dn",
+			__func__, rc);
+	WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
+err_ret:
+	return rc;
+}
+
+static int wcd_cpe_send_param_conf_levels(
+		struct wcd_cpe_core *core,
+		struct cpe_lsm_session *session,
+		struct cpe_lsm_ids *ids)
+{
+	struct cpe_lsm_conf_level conf_level_data;
+	struct cmi_hdr *hdr = &(conf_level_data.hdr);
+	struct cpe_param_data *param_d = &(conf_level_data.param);
+	u8 pld_size = 0;
+	u8 pad_bytes = 0;
+	void *message;
+	int ret = 0;
+
+	memset(&conf_level_data, 0, sizeof(conf_level_data));
+
+	pld_size = (sizeof(struct cpe_lsm_conf_level) - sizeof(struct cmi_hdr));
+	pld_size += session->num_confidence_levels;
+	pad_bytes = ((4 - (pld_size % 4)) % 4);
+	pld_size += pad_bytes;
+
+	fill_cmi_header(hdr, session->id, CMI_CPE_LSM_SERVICE_ID,
+			false, pld_size,
+			CPE_LSM_SESSION_CMD_SET_PARAMS_V2, false);
+
+	wcd_cpe_set_param_data(param_d, ids,
+			       pld_size - sizeof(struct cpe_param_data),
+			       CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
+
+	conf_level_data.num_active_models = session->num_confidence_levels;
+
+	message = kzalloc(sizeof(struct cpe_lsm_conf_level) +
+			   conf_level_data.num_active_models + pad_bytes,
+			   GFP_KERNEL);
+	if (!message) {
+		pr_err("%s: no memory for conf_level\n", __func__);
+		return -ENOMEM;
+	}
+
+	memcpy(message, &conf_level_data,
+	       sizeof(struct cpe_lsm_conf_level));
+	memcpy(((u8 *) message) + sizeof(struct cpe_lsm_conf_level),
+		session->conf_levels, conf_level_data.num_active_models);
+
+	WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
+	ret = wcd_cpe_cmi_send_lsm_msg(core, session, message);
+	if (ret)
+		pr_err("%s: lsm_set_conf_levels failed, err = %d\n",
+			__func__, ret);
+	kfree(message);
+	WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
+	return ret;
+}
+
+static int wcd_cpe_send_param_snd_model(struct wcd_cpe_core *core,
+	struct cpe_lsm_session *session, struct cpe_lsm_ids *ids)
+{
+	int ret = 0;
+	struct cmi_obm_msg obm_msg;
+	struct cpe_param_data *param_d;
+
+
+	ret = fill_cmi_header(&obm_msg.hdr, session->id,
+			CMI_CPE_LSM_SERVICE_ID, 0, 20,
+			CPE_LSM_SESSION_CMD_SET_PARAMS_V2, true);
+	if (ret) {
+		dev_err(core->dev,
+			"%s: Invalid parameters, rc = %d\n",
+			__func__, ret);
+		goto err_ret;
+	}
+
+	obm_msg.pld.version = 0;
+	obm_msg.pld.size = session->snd_model_size;
+	obm_msg.pld.data_ptr.kvaddr = session->snd_model_data;
+	obm_msg.pld.mem_handle = session->lsm_mem_handle;
+
+	param_d = (struct cpe_param_data *) session->snd_model_data;
+	wcd_cpe_set_param_data(param_d, ids,
+			(session->snd_model_size - sizeof(*param_d)),
+			CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
+
+	WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
+	ret = wcd_cpe_cmi_send_lsm_msg(core, session, &obm_msg);
+	if (ret)
+		dev_err(core->dev,
+			"%s: snd_model_register failed, %d\n",
+			__func__, ret);
+	WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
+
+err_ret:
+	return ret;
+}
+
+static int wcd_cpe_send_param_dereg_model(
+	struct wcd_cpe_core *core,
+	struct cpe_lsm_session *session,
+	struct cpe_lsm_ids *ids)
+{
+	struct cmi_hdr *hdr;
+	struct cpe_param_data *param_d;
+	u8 *message;
+	u32 pld_size;
+	int rc = 0;
+
+	pld_size = sizeof(*hdr) + sizeof(*param_d);
+
+	message = kzalloc(pld_size, GFP_KERNEL);
+	if (!message)
+		return -ENOMEM;
+
+	hdr = (struct cmi_hdr *) message;
+	param_d = (struct cpe_param_data *)
+			(((u8 *) message) + sizeof(*hdr));
+
+	if (fill_lsm_cmd_header_v0_inband(hdr,
+				session->id,
+				sizeof(*param_d),
+				CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
+		rc = -EINVAL;
+		goto err_ret;
+	}
+	wcd_cpe_set_param_data(param_d, ids, 0,
+			       CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
+	WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
+	rc = wcd_cpe_cmi_send_lsm_msg(core, session, message);
+	if (rc)
+		dev_err(core->dev,
+			"%s: snd_model_deregister failed, %d\n",
+			__func__, rc);
+	WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
+err_ret:
+	kfree(message);
+	return rc;
+}
+
+static int wcd_cpe_send_custom_param(
+	struct wcd_cpe_core *core,
+	struct cpe_lsm_session *session,
+	void *data, u32 msg_size)
+{
+	u8 *msg;
+	struct cmi_hdr *hdr;
+	u8 *msg_pld;
+	int rc;
+
+	if (msg_size > CMI_INBAND_MESSAGE_SIZE) {
+		dev_err(core->dev,
+			"%s: out of band custom params not supported\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	msg = kzalloc(sizeof(*hdr) + msg_size, GFP_KERNEL);
+	if (!msg)
+		return -ENOMEM;
+
+	hdr = (struct cmi_hdr *) msg;
+	msg_pld = msg + sizeof(struct cmi_hdr);
+
+	if (fill_lsm_cmd_header_v0_inband(hdr,
+				session->id,
+				msg_size,
+				CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
+		rc = -EINVAL;
+		goto err_ret;
+	}
+
+	memcpy(msg_pld, data, msg_size);
+	WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
+	rc = wcd_cpe_cmi_send_lsm_msg(core, session, msg);
+	if (rc)
+		dev_err(core->dev,
+			"%s: custom params send failed, err = %d\n",
+			 __func__, rc);
+	WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
+err_ret:
+	kfree(msg);
+	return rc;
+}
+
+static int wcd_cpe_set_one_param(void *core_handle,
+	struct cpe_lsm_session *session, struct lsm_params_info *p_info,
+	void *data, uint32_t param_type)
+{
+	struct wcd_cpe_core *core = core_handle;
+	int rc = 0;
+	struct cpe_lsm_ids ids;
+
+	memset(&ids, 0, sizeof(ids));
+	ids.module_id = p_info->module_id;
+	ids.param_id = p_info->param_id;
+
+	switch (param_type) {
+	case LSM_ENDPOINT_DETECT_THRESHOLD:
+		rc = wcd_cpe_send_param_epd_thres(core, session,
+						data, &ids);
+		break;
+	case LSM_OPERATION_MODE:
+		rc = wcd_cpe_send_param_opmode(core, session, data, &ids);
+		break;
+	case LSM_GAIN:
+		rc = wcd_cpe_send_param_gain(core, session, data, &ids);
+		break;
+	case LSM_MIN_CONFIDENCE_LEVELS:
+		rc = wcd_cpe_send_param_conf_levels(core, session, &ids);
+		break;
+	case LSM_REG_SND_MODEL:
+		rc = wcd_cpe_send_param_snd_model(core, session, &ids);
+		break;
+	case LSM_DEREG_SND_MODEL:
+		rc = wcd_cpe_send_param_dereg_model(core, session, &ids);
+		break;
+	case LSM_CUSTOM_PARAMS:
+		rc = wcd_cpe_send_custom_param(core, session,
+					       data, p_info->param_size);
+		break;
+	default:
+		pr_err("%s: wrong param_type 0x%x\n",
+			__func__, param_type);
+	}
+
+	if (rc)
+		dev_err(core->dev,
+			"%s: send_param(%d) failed, err %d\n",
+			 __func__, param_type, rc);
+	return rc;
+}
+
+/*
+ * wcd_cpe_lsm_set_params: set the parameters for lsm service
+ * @core: handle to cpe core
+ * @session: session for which the parameters are to be set
+ * @detect_mode: mode for detection
+ * @detect_failure: flag indicating failure detection enabled/disabled
+ *
+ */
+static int wcd_cpe_lsm_set_params(struct wcd_cpe_core *core,
+	struct cpe_lsm_session *session,
+	enum lsm_detection_mode detect_mode, bool detect_failure)
+{
+	struct cpe_lsm_ids ids;
+	struct snd_lsm_detect_mode det_mode;
+
+	int ret = 0;
+
+	/* Send lsm calibration */
+	ret = wcd_cpe_send_lsm_cal(core, session);
+	if (ret) {
+		pr_err("%s: fail to sent acdb cal, err = %d",
+			__func__, ret);
+		goto err_ret;
+	}
+
+	/* Send operation mode */
+	ids.module_id = CPE_LSM_MODULE_ID_VOICE_WAKEUP;
+	ids.param_id = CPE_LSM_PARAM_ID_OPERATION_MODE;
+	det_mode.mode = detect_mode;
+	det_mode.detect_failure = detect_failure;
+	ret = wcd_cpe_send_param_opmode(core, session,
+					&det_mode, &ids);
+	if (ret)
+		dev_err(core->dev,
+			"%s: Failed to set opmode, err=%d\n",
+			__func__, ret);
+
+err_ret:
+	return ret;
+}
+
+static int wcd_cpe_lsm_set_data(void *core_handle,
+				struct cpe_lsm_session *session,
+				enum lsm_detection_mode detect_mode,
+				bool detect_failure)
+{
+	struct wcd_cpe_core *core = core_handle;
+	struct cpe_lsm_ids ids;
+	int ret = 0;
+
+	if (session->num_confidence_levels > 0) {
+		ret = wcd_cpe_lsm_set_params(core, session, detect_mode,
+				       detect_failure);
+		if (ret) {
+			dev_err(core->dev,
+				"%s: lsm set params failed, rc = %d\n",
+				__func__, ret);
+			goto err_ret;
+		}
+
+		ids.module_id = CPE_LSM_MODULE_ID_VOICE_WAKEUP;
+		ids.param_id = CPE_LSM_PARAM_ID_MIN_CONFIDENCE_LEVELS;
+		ret = wcd_cpe_send_param_conf_levels(core, session, &ids);
+		if (ret) {
+			dev_err(core->dev,
+				"%s: lsm confidence levels failed, rc = %d\n",
+				__func__, ret);
+			goto err_ret;
+		}
+	} else {
+		dev_dbg(core->dev,
+			"%s: no conf levels to set\n",
+			__func__);
+	}
+
+err_ret:
+	return ret;
+}
+
+/*
+ * wcd_cpe_lsm_reg_snd_model: register the sound model for listen
+ * @session: session for which to register the sound model
+ * @detect_mode: detection mode, user dependent/independent
+ * @detect_failure: flag to indicate if failure detection is enabled
+ *
+ * The memory required for sound model should be pre-allocated on CPE
+ * before this function is invoked.
+ */
+static int wcd_cpe_lsm_reg_snd_model(void *core_handle,
+				 struct cpe_lsm_session *session,
+				 enum lsm_detection_mode detect_mode,
+				 bool detect_failure)
+{
+	int ret = 0;
+	struct cmi_obm_msg obm_msg;
+	struct wcd_cpe_core *core = core_handle;
+
+	ret = wcd_cpe_is_valid_lsm_session(core, session,
+					   __func__);
+	if (ret)
+		return ret;
+
+	ret = wcd_cpe_lsm_set_data(core_handle, session,
+				   detect_mode, detect_failure);
+	if (ret) {
+		dev_err(core->dev,
+			"%s: fail to set lsm data, err = %d\n",
+			__func__, ret);
+		return ret;
+	}
+
+	WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
+
+	ret = fill_cmi_header(&obm_msg.hdr, session->id,
+			CMI_CPE_LSM_SERVICE_ID, 0, 20,
+			CPE_LSM_SESSION_CMD_REGISTER_SOUND_MODEL, true);
+	if (ret) {
+		dev_err(core->dev,
+			"%s: Invalid parameters, rc = %d\n",
+			__func__, ret);
+		goto err_ret;
+	}
+
+	obm_msg.pld.version = 0;
+	obm_msg.pld.size = session->snd_model_size;
+	obm_msg.pld.data_ptr.kvaddr = session->snd_model_data;
+	obm_msg.pld.mem_handle = session->lsm_mem_handle;
+
+	ret = wcd_cpe_cmi_send_lsm_msg(core, session, &obm_msg);
+	if (ret)
+		dev_err(core->dev,
+			"%s: snd_model_register failed, %d\n",
+			__func__, ret);
+err_ret:
+	WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
+	return ret;
+}
+
+/*
+ * wcd_cpe_lsm_dereg_snd_model: deregister the sound model for listen
+ * @core_handle: handle to cpe core
+ * @session: session for which to deregister the sound model
+ *
+ */
+static int wcd_cpe_lsm_dereg_snd_model(void *core_handle,
+				struct cpe_lsm_session *session)
+{
+	struct cmi_hdr cmd_dereg_snd_model;
+	struct wcd_cpe_core *core = core_handle;
+	int ret = 0;
+
+	ret = wcd_cpe_is_valid_lsm_session(core, session,
+					   __func__);
+	if (ret)
+		return ret;
+
+	WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
+
+	memset(&cmd_dereg_snd_model, 0, sizeof(cmd_dereg_snd_model));
+	if (fill_lsm_cmd_header_v0_inband(&cmd_dereg_snd_model, session->id,
+			    0, CPE_LSM_SESSION_CMD_DEREGISTER_SOUND_MODEL)) {
+		ret = -EINVAL;
+		goto end_ret;
+	}
+
+	ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_dereg_snd_model);
+	if (ret)
+		dev_err(core->dev,
+			"%s: failed to send dereg_snd_model cmd\n",
+			__func__);
+end_ret:
+	WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
+	return ret;
+}
+
+/*
+ * wcd_cpe_lsm_get_afe_out_port_id: get afe output port id
+ * @core_handle: handle to the CPE core
+ * @session: session for which port id needs to get
+ */
+static int wcd_cpe_lsm_get_afe_out_port_id(void *core_handle,
+					   struct cpe_lsm_session *session)
+{
+	struct wcd_cpe_core *core = core_handle;
+	struct snd_soc_codec *codec;
+	int rc = 0;
+
+	if (!core || !core->codec) {
+		pr_err("%s: Invalid handle to %s\n",
+			__func__,
+			(!core) ? "core" : "codec");
+		rc = -EINVAL;
+		goto done;
+	}
+
+	if (!session) {
+		dev_err(core->dev, "%s: Invalid session\n",
+			__func__);
+		rc = -EINVAL;
+		goto done;
+	}
+
+	if (!core->cpe_cdc_cb ||
+		!core->cpe_cdc_cb->get_afe_out_port_id) {
+		session->afe_out_port_id = WCD_CPE_AFE_OUT_PORT_2;
+		dev_dbg(core->dev,
+			"%s: callback not defined, default port_id = %d\n",
+			__func__, session->afe_out_port_id);
+		goto done;
+	}
+
+	codec = core->codec;
+	rc = core->cpe_cdc_cb->get_afe_out_port_id(codec,
+						   &session->afe_out_port_id);
+	if (rc) {
+		dev_err(core->dev,
+			"%s: failed to get port id, err = %d\n",
+			__func__, rc);
+		goto done;
+	}
+	dev_dbg(core->dev, "%s: port_id: %d\n", __func__,
+		session->afe_out_port_id);
+
+done:
+	return rc;
+}
+
+/*
+ * wcd_cpe_cmd_lsm_start: send the start command to lsm
+ * @core_handle: handle to the CPE core
+ * @session: session for which start command to be sent
+ *
+ */
+static int wcd_cpe_cmd_lsm_start(void *core_handle,
+			struct cpe_lsm_session *session)
+{
+	struct cmi_hdr cmd_lsm_start;
+	struct wcd_cpe_core *core = core_handle;
+	int ret = 0;
+
+	ret = wcd_cpe_is_valid_lsm_session(core, session,
+					   __func__);
+	if (ret)
+		return ret;
+
+	WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
+
+	memset(&cmd_lsm_start, 0, sizeof(struct cmi_hdr));
+	if (fill_lsm_cmd_header_v0_inband(&cmd_lsm_start, session->id, 0,
+					  CPE_LSM_SESSION_CMD_START)) {
+		ret = -EINVAL;
+		goto end_ret;
+	}
+
+	ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_lsm_start);
+	if (ret)
+		dev_err(core->dev, "failed to send lsm_start cmd\n");
+end_ret:
+	WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
+	return ret;
+}
+
+/*
+ * wcd_cpe_cmd_lsm_stop: send the stop command for LSM service
+ * @core_handle: handle to the cpe core
+ * @session: session for which stop command to be sent
+ *
+ */
+static int wcd_cpe_cmd_lsm_stop(void *core_handle,
+		struct cpe_lsm_session *session)
+{
+	struct cmi_hdr cmd_lsm_stop;
+	struct wcd_cpe_core *core = core_handle;
+	int ret = 0;
+
+	ret = wcd_cpe_is_valid_lsm_session(core, session,
+					   __func__);
+	if (ret)
+		return ret;
+
+	WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
+
+	memset(&cmd_lsm_stop, 0, sizeof(struct cmi_hdr));
+	if (fill_lsm_cmd_header_v0_inband(&cmd_lsm_stop, session->id, 0,
+					  CPE_LSM_SESSION_CMD_STOP)) {
+		ret = -EINVAL;
+		goto end_ret;
+	}
+
+	ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_lsm_stop);
+	if (ret)
+		dev_err(core->dev,
+			"%s: failed to send lsm_stop cmd\n",
+			__func__);
+end_ret:
+	WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
+	return ret;
+
+}
+
+/*
+ * wcd_cpe_alloc_lsm_session: allocate a lsm session
+ * @core: handle to wcd_cpe_core
+ * @lsm_priv_d: lsm private data
+ */
+static struct cpe_lsm_session *wcd_cpe_alloc_lsm_session(
+	void *core_handle, void *client_data,
+	void (*event_cb) (void *, u8, u8, u8 *))
+{
+	struct cpe_lsm_session *session;
+	int i, session_id = -1;
+	struct wcd_cpe_core *core = core_handle;
+	bool afe_register_service = false;
+	int ret = 0;
+
+	/*
+	 * Even if multiple listen sessions can be
+	 * allocated, the AFE service registration
+	 * should be done only once as CPE can only
+	 * have one instance of AFE service.
+	 *
+	 * If this is the first session to be allocated,
+	 * only then register the afe service.
+	 */
+	WCD_CPE_GRAB_LOCK(&core->session_lock, "session_lock");
+	if (!wcd_cpe_lsm_session_active())
+		afe_register_service = true;
+
+	for (i = 1; i <= WCD_CPE_LSM_MAX_SESSIONS; i++) {
+		if (!lsm_sessions[i]) {
+			session_id = i;
+			break;
+		}
+	}
+
+	if (session_id < 0) {
+		dev_err(core->dev,
+			"%s: max allowed sessions already allocated\n",
+			__func__);
+		WCD_CPE_REL_LOCK(&core->session_lock, "session_lock");
+		return NULL;
+	}
+
+	ret = wcd_cpe_vote(core, true);
+	if (ret) {
+		dev_err(core->dev,
+			"%s: Failed to enable cpe, err = %d\n",
+			__func__, ret);
+		WCD_CPE_REL_LOCK(&core->session_lock, "session_lock");
+		return NULL;
+	}
+
+	session = kzalloc(sizeof(struct cpe_lsm_session), GFP_KERNEL);
+	if (!session) {
+		dev_err(core->dev,
+			"%s: failed to allocate session, no memory\n",
+			__func__);
+		goto err_session_alloc;
+	}
+
+	session->id = session_id;
+	session->event_cb = event_cb;
+	session->cmi_reg_handle = cmi_register(wcd_cpe_cmi_lsm_callback,
+						CMI_CPE_LSM_SERVICE_ID);
+	if (!session->cmi_reg_handle) {
+		dev_err(core->dev,
+			"%s: Failed to register LSM service with CMI\n",
+			__func__);
+		goto err_ret;
+	}
+	session->priv_d = client_data;
+	mutex_init(&session->lsm_lock);
+	if (afe_register_service) {
+		/* Register for AFE Service */
+		core->cmi_afe_handle = cmi_register(wcd_cpe_cmi_afe_cb,
+						CMI_CPE_AFE_SERVICE_ID);
+		wcd_cpe_initialize_afe_port_data();
+		if (!core->cmi_afe_handle) {
+			dev_err(core->dev,
+				"%s: Failed to register AFE service with CMI\n",
+				__func__);
+			goto err_afe_svc_reg;
+		}
+
+		/* Once AFE service is registered, send the mode command */
+		ret = wcd_cpe_afe_svc_cmd_mode(core,
+				AFE_SVC_EXPLICIT_PORT_START);
+		if (ret)
+			goto err_afe_mode_cmd;
+	}
+
+	session->lsm_mem_handle = 0;
+	init_completion(&session->cmd_comp);
+
+	lsm_sessions[session_id] = session;
+
+	WCD_CPE_REL_LOCK(&core->session_lock, "session_lock");
+	return session;
+
+err_afe_mode_cmd:
+	cmi_deregister(core->cmi_afe_handle);
+
+err_afe_svc_reg:
+	cmi_deregister(session->cmi_reg_handle);
+	mutex_destroy(&session->lsm_lock);
+
+err_ret:
+	kfree(session);
+
+err_session_alloc:
+	wcd_cpe_vote(core, false);
+	WCD_CPE_REL_LOCK(&core->session_lock, "session_lock");
+	return NULL;
+}
+
+/*
+ * wcd_cpe_lsm_config_lab_latency: send lab latency value
+ * @core: handle to wcd_cpe_core
+ * @session: lsm session
+ * @latency: the value of latency for lab setup in msec
+ */
+static int wcd_cpe_lsm_config_lab_latency(
+		struct wcd_cpe_core *core,
+		struct cpe_lsm_session *session,
+		u32 latency)
+{
+	int ret = 0, pld_size = CPE_PARAM_LSM_LAB_LATENCY_SIZE;
+	struct cpe_lsm_lab_latency_config cpe_lab_latency;
+	struct cpe_lsm_lab_config *lab_lat = &cpe_lab_latency.latency_cfg;
+	struct cpe_param_data *param_d = &lab_lat->param;
+	struct cpe_lsm_ids ids;
+
+	if (fill_lsm_cmd_header_v0_inband(&cpe_lab_latency.hdr, session->id,
+		(u8) pld_size, CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
+		pr_err("%s: Failed to create header\n", __func__);
+		return -EINVAL;
+	}
+	if (latency == 0x00 || latency > WCD_CPE_LAB_MAX_LATENCY) {
+		pr_err("%s: Invalid latency %u\n",
+			__func__, latency);
+		return -EINVAL;
+	} else {
+		lab_lat->latency = latency;
+	}
+
+	lab_lat->minor_ver = 1;
+	ids.module_id = CPE_LSM_MODULE_ID_LAB;
+	ids.param_id = CPE_LSM_PARAM_ID_LAB_CONFIG;
+	wcd_cpe_set_param_data(param_d, &ids,
+			       PARAM_SIZE_LSM_LATENCY_SIZE,
+			       CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
+
+	pr_debug("%s: Module 0x%x Param 0x%x size %zu pld_size 0x%x\n",
+		  __func__, lab_lat->param.module_id,
+		 lab_lat->param.param_id, PARAM_SIZE_LSM_LATENCY_SIZE,
+		 pld_size);
+
+	WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
+	ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cpe_lab_latency);
+	if (ret != 0)
+		pr_err("%s: lsm_set_params failed, error = %d\n",
+		       __func__, ret);
+	WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
+	return ret;
+}
+
+/*
+ * wcd_cpe_lsm_lab_control: enable/disable lab
+ * @core: handle to wcd_cpe_core
+ * @session: lsm session
+ * @enable: Indicates whether to enable / disable lab
+ */
+static int wcd_cpe_lsm_lab_control(
+		void *core_handle,
+		struct cpe_lsm_session *session,
+		bool enable)
+{
+	struct wcd_cpe_core *core = core_handle;
+	int ret = 0, pld_size = CPE_PARAM_SIZE_LSM_LAB_CONTROL;
+	struct cpe_lsm_control_lab cpe_lab_enable;
+	struct cpe_lsm_lab_enable *lab_enable = &cpe_lab_enable.lab_enable;
+	struct cpe_param_data *param_d = &lab_enable->param;
+	struct cpe_lsm_ids ids;
+
+	pr_debug("%s: enter payload_size = %d Enable %d\n",
+		 __func__, pld_size, enable);
+
+	memset(&cpe_lab_enable, 0, sizeof(cpe_lab_enable));
+
+	if (fill_lsm_cmd_header_v0_inband(&cpe_lab_enable.hdr, session->id,
+		(u8) pld_size, CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
+		return -EINVAL;
+	}
+	if (enable == true)
+		lab_enable->enable = 1;
+	else
+		lab_enable->enable = 0;
+
+	ids.module_id = CPE_LSM_MODULE_ID_LAB;
+	ids.param_id = CPE_LSM_PARAM_ID_LAB_ENABLE;
+	wcd_cpe_set_param_data(param_d, &ids,
+			PARAM_SIZE_LSM_CONTROL_SIZE,
+			CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
+
+	pr_debug("%s: Module 0x%x, Param 0x%x size %zu pld_size 0x%x\n",
+		 __func__, lab_enable->param.module_id,
+		 lab_enable->param.param_id, PARAM_SIZE_LSM_CONTROL_SIZE,
+		 pld_size);
+
+	WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
+	ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cpe_lab_enable);
+	if (ret != 0) {
+		pr_err("%s: lsm_set_params failed, error = %d\n",
+			__func__, ret);
+		WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
+		goto done;
+	}
+	WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
+
+	if (lab_enable->enable)
+		ret = wcd_cpe_lsm_config_lab_latency(core, session,
+					       WCD_CPE_LAB_MAX_LATENCY);
+done:
+	return ret;
+}
+
+/*
+ * wcd_cpe_lsm_eob: stop lab
+ * @core: handle to wcd_cpe_core
+ * @session: lsm session to be deallocated
+ */
+static int wcd_cpe_lsm_eob(
+			struct wcd_cpe_core *core,
+			struct cpe_lsm_session *session)
+{
+	int ret = 0;
+	struct cmi_hdr lab_eob;
+
+	if (fill_lsm_cmd_header_v0_inband(&lab_eob, session->id,
+		0, CPE_LSM_SESSION_CMD_EOB)) {
+		return -EINVAL;
+	}
+
+	WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
+	ret = wcd_cpe_cmi_send_lsm_msg(core, session, &lab_eob);
+	if (ret != 0)
+		pr_err("%s: lsm_set_params failed\n", __func__);
+	WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
+
+	return ret;
+}
+
+/*
+ * wcd_cpe_dealloc_lsm_session: deallocate lsm session
+ * @core: handle to wcd_cpe_core
+ * @session: lsm session to be deallocated
+ */
+static int wcd_cpe_dealloc_lsm_session(void *core_handle,
+			struct cpe_lsm_session *session)
+{
+	struct wcd_cpe_core *core = core_handle;
+	int ret = 0;
+
+	WCD_CPE_GRAB_LOCK(&core->session_lock, "session_lock");
+	if (!session) {
+		dev_err(core->dev,
+			"%s: Invalid lsm session\n", __func__);
+		WCD_CPE_REL_LOCK(&core->session_lock, "session_lock");
+		return -EINVAL;
+	}
+
+	dev_dbg(core->dev, "%s: session %d being deallocated\n",
+		__func__, session->id);
+	if (session->id > WCD_CPE_LSM_MAX_SESSIONS) {
+		dev_err(core->dev,
+			"%s: Wrong session id %d max allowed = %d\n",
+			__func__, session->id,
+			WCD_CPE_LSM_MAX_SESSIONS);
+		WCD_CPE_REL_LOCK(&core->session_lock, "session_lock");
+		return -EINVAL;
+	}
+
+	cmi_deregister(session->cmi_reg_handle);
+	mutex_destroy(&session->lsm_lock);
+	lsm_sessions[session->id] = NULL;
+	kfree(session);
+
+	if (!wcd_cpe_lsm_session_active()) {
+		cmi_deregister(core->cmi_afe_handle);
+		core->cmi_afe_handle = NULL;
+		wcd_cpe_deinitialize_afe_port_data();
+	}
+
+	ret = wcd_cpe_vote(core, false);
+	if (ret)
+		dev_dbg(core->dev,
+			"%s: Failed to un-vote cpe, err = %d\n",
+			__func__, ret);
+
+	WCD_CPE_REL_LOCK(&core->session_lock, "session_lock");
+	return ret;
+}
+
+static int wcd_cpe_lab_ch_setup(void *core_handle,
+		struct cpe_lsm_session *session,
+		enum wcd_cpe_event event)
+{
+	struct wcd_cpe_core *core = core_handle;
+	struct snd_soc_codec *codec;
+	int rc = 0;
+	u8 cpe_intr_bits;
+
+	if (!core || !core->codec) {
+		pr_err("%s: Invalid handle to %s\n",
+			__func__,
+			(!core) ? "core" : "codec");
+		rc = EINVAL;
+		goto done;
+	}
+
+	if (!core->cpe_cdc_cb ||
+	    !core->cpe_cdc_cb->cdc_ext_clk ||
+	    !core->cpe_cdc_cb->lab_cdc_ch_ctl) {
+		dev_err(core->dev,
+			"%s: Invalid codec callbacks\n",
+			__func__);
+		rc = -EINVAL;
+		goto done;
+	}
+
+	codec = core->codec;
+	dev_dbg(core->dev,
+		"%s: event = 0x%x\n",
+		__func__, event);
+
+	switch (event) {
+	case WCD_CPE_PRE_ENABLE:
+		rc = core->cpe_cdc_cb->cdc_ext_clk(codec, true, false);
+		if (rc) {
+			dev_err(core->dev,
+				"%s: failed to enable cdc clk, err = %d\n",
+				__func__, rc);
+			goto done;
+		}
+
+		rc = core->cpe_cdc_cb->lab_cdc_ch_ctl(codec,
+						      true);
+		if (rc) {
+			dev_err(core->dev,
+				"%s: failed to enable cdc port, err = %d\n",
+				__func__, rc);
+			rc = core->cpe_cdc_cb->cdc_ext_clk(codec, false, false);
+			goto done;
+		}
+
+		break;
+
+	case WCD_CPE_POST_ENABLE:
+		rc = cpe_svc_toggle_lab(core->cpe_handle, true);
+		if (rc)
+			dev_err(core->dev,
+			"%s: Failed to enable lab\n", __func__);
+		break;
+
+	case WCD_CPE_PRE_DISABLE:
+		/*
+		 * Mask the non-fatal interrupts in CPE as they will
+		 * be generated during lab teardown and may flood.
+		 */
+		cpe_intr_bits = ~(core->irq_info.cpe_fatal_irqs & 0xFF);
+		if (CPE_ERR_IRQ_CB(core))
+			core->cpe_cdc_cb->cpe_err_irq_control(
+						core->codec,
+						CPE_ERR_IRQ_MASK,
+						&cpe_intr_bits);
+
+		rc = core->cpe_cdc_cb->lab_cdc_ch_ctl(codec,
+						      false);
+		if (rc)
+			dev_err(core->dev,
+				"%s: failed to disable cdc port, err = %d\n",
+				__func__, rc);
+		break;
+
+	case WCD_CPE_POST_DISABLE:
+		rc = wcd_cpe_lsm_eob(core, session);
+		if (rc)
+			dev_err(core->dev,
+				"%s: eob send failed, err = %d\n",
+				__func__, rc);
+
+		/* Continue teardown even if eob failed */
+		rc = cpe_svc_toggle_lab(core->cpe_handle, false);
+		if (rc)
+			dev_err(core->dev,
+			"%s: Failed to disable lab\n", __func__);
+
+		/* Continue with disabling even if toggle lab fails */
+		rc = core->cpe_cdc_cb->cdc_ext_clk(codec, false, false);
+		if (rc)
+			dev_err(core->dev,
+				"%s: failed to disable cdc clk, err = %d\n",
+				__func__, rc);
+
+		/* Unmask non-fatal CPE interrupts */
+		cpe_intr_bits = ~(core->irq_info.cpe_fatal_irqs & 0xFF);
+		if (CPE_ERR_IRQ_CB(core))
+			core->cpe_cdc_cb->cpe_err_irq_control(
+						core->codec,
+						CPE_ERR_IRQ_UNMASK,
+						&cpe_intr_bits);
+		break;
+
+	default:
+		dev_err(core->dev,
+			"%s: Invalid event 0x%x\n",
+			__func__, event);
+		rc = -EINVAL;
+		break;
+	}
+
+done:
+	return rc;
+}
+
+static int wcd_cpe_lsm_set_fmt_cfg(void *core_handle,
+			struct cpe_lsm_session *session)
+{
+	int ret;
+	struct cpe_lsm_output_format_cfg out_fmt_cfg;
+	struct wcd_cpe_core *core = core_handle;
+
+	ret = wcd_cpe_is_valid_lsm_session(core, session, __func__);
+	if (ret)
+		goto done;
+
+	WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
+
+	memset(&out_fmt_cfg, 0, sizeof(out_fmt_cfg));
+	if (fill_lsm_cmd_header_v0_inband(&out_fmt_cfg.hdr,
+			session->id, OUT_FMT_CFG_CMD_PAYLOAD_SIZE,
+			CPE_LSM_SESSION_CMD_TX_BUFF_OUTPUT_CONFIG)) {
+		ret = -EINVAL;
+		goto err_ret;
+	}
+
+	out_fmt_cfg.format = session->out_fmt_cfg.format;
+	out_fmt_cfg.packing = session->out_fmt_cfg.pack_mode;
+	out_fmt_cfg.data_path_events = session->out_fmt_cfg.data_path_events;
+
+	ret = wcd_cpe_cmi_send_lsm_msg(core, session, &out_fmt_cfg);
+	if (ret)
+		dev_err(core->dev,
+			"%s: lsm_set_output_format_cfg failed, err = %d\n",
+			__func__, ret);
+
+err_ret:
+	WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
+done:
+	return ret;
+}
+
+static void wcd_cpe_snd_model_offset(void *core_handle,
+		struct cpe_lsm_session *session, size_t *offset)
+{
+	*offset = sizeof(struct cpe_param_data);
+}
+
+static int wcd_cpe_lsm_set_media_fmt_params(void *core_handle,
+					  struct cpe_lsm_session *session,
+					  struct lsm_hw_params *param)
+{
+	struct cpe_lsm_media_fmt_param media_fmt;
+	struct cmi_hdr *msg_hdr = &media_fmt.hdr;
+	struct wcd_cpe_core *core = core_handle;
+	struct cpe_param_data *param_d = &media_fmt.param;
+	struct cpe_lsm_ids ids;
+	int ret;
+
+	memset(&media_fmt, 0, sizeof(media_fmt));
+	if (fill_lsm_cmd_header_v0_inband(msg_hdr,
+				session->id,
+				CPE_MEDIA_FMT_PLD_SIZE,
+				CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	memset(&ids, 0, sizeof(ids));
+	ids.module_id = CPE_LSM_MODULE_FRAMEWORK;
+	ids.param_id = CPE_LSM_PARAM_ID_MEDIA_FMT;
+
+	wcd_cpe_set_param_data(param_d, &ids, CPE_MEDIA_FMT_PARAM_SIZE,
+			       CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
+
+	media_fmt.minor_version = 1;
+	media_fmt.sample_rate = param->sample_rate;
+	media_fmt.num_channels = param->num_chs;
+	media_fmt.bit_width = param->bit_width;
+
+	WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
+	ret = wcd_cpe_cmi_send_lsm_msg(core, session, &media_fmt);
+	if (ret)
+		dev_err(core->dev,
+			"%s: Set_param(media_format) failed, err=%d\n",
+			__func__, ret);
+	WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
+done:
+	return ret;
+}
+
+static int wcd_cpe_lsm_set_port(void *core_handle,
+				struct cpe_lsm_session *session, void *data)
+{
+	u32 port_id;
+	int ret;
+	struct cpe_lsm_ids ids;
+	struct wcd_cpe_core *core = core_handle;
+
+	ret = wcd_cpe_is_valid_lsm_session(core, session, __func__);
+	if (ret)
+		goto done;
+
+	if (!data) {
+		dev_err(core->dev, "%s: data is NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+	port_id = *(u32 *)data;
+	dev_dbg(core->dev, "%s: port_id: %d\n", __func__, port_id);
+
+	memset(&ids, 0, sizeof(ids));
+	ids.module_id = LSM_MODULE_ID_FRAMEWORK;
+	ids.param_id = LSM_PARAM_ID_CONNECT_TO_PORT;
+
+	ret = wcd_cpe_send_param_connectport(core, session, NULL,
+					     &ids, port_id);
+	if (ret)
+		dev_err(core->dev,
+			"%s: send_param_connectport failed, err %d\n",
+			__func__, ret);
+done:
+	return ret;
+}
+
+/*
+ * wcd_cpe_get_lsm_ops: register lsm driver to codec
+ * @lsm_ops: structure with lsm callbacks
+ * @codec: codec to which this lsm driver is registered to
+ */
+int wcd_cpe_get_lsm_ops(struct wcd_cpe_lsm_ops *lsm_ops)
+{
+	lsm_ops->lsm_alloc_session = wcd_cpe_alloc_lsm_session;
+	lsm_ops->lsm_dealloc_session = wcd_cpe_dealloc_lsm_session;
+	lsm_ops->lsm_open_tx = wcd_cpe_cmd_lsm_open_tx;
+	lsm_ops->lsm_close_tx = wcd_cpe_cmd_lsm_close_tx;
+	lsm_ops->lsm_shmem_alloc = wcd_cpe_cmd_lsm_shmem_alloc;
+	lsm_ops->lsm_shmem_dealloc = wcd_cpe_cmd_lsm_shmem_dealloc;
+	lsm_ops->lsm_register_snd_model = wcd_cpe_lsm_reg_snd_model;
+	lsm_ops->lsm_deregister_snd_model = wcd_cpe_lsm_dereg_snd_model;
+	lsm_ops->lsm_get_afe_out_port_id = wcd_cpe_lsm_get_afe_out_port_id;
+	lsm_ops->lsm_start = wcd_cpe_cmd_lsm_start;
+	lsm_ops->lsm_stop = wcd_cpe_cmd_lsm_stop;
+	lsm_ops->lsm_lab_control = wcd_cpe_lsm_lab_control;
+	lsm_ops->lab_ch_setup = wcd_cpe_lab_ch_setup;
+	lsm_ops->lsm_set_data = wcd_cpe_lsm_set_data;
+	lsm_ops->lsm_set_fmt_cfg = wcd_cpe_lsm_set_fmt_cfg;
+	lsm_ops->lsm_set_one_param = wcd_cpe_set_one_param;
+	lsm_ops->lsm_get_snd_model_offset = wcd_cpe_snd_model_offset;
+	lsm_ops->lsm_set_media_fmt_params = wcd_cpe_lsm_set_media_fmt_params;
+	lsm_ops->lsm_set_port = wcd_cpe_lsm_set_port;
+
+	return 0;
+}
+EXPORT_SYMBOL(wcd_cpe_get_lsm_ops);
+
+static int fill_afe_cmd_header(struct cmi_hdr *hdr, u8 port_id,
+				u16 opcode, u8 pld_size,
+				bool obm_flag)
+{
+	CMI_HDR_SET_SESSION(hdr, port_id);
+	CMI_HDR_SET_SERVICE(hdr, CMI_CPE_AFE_SERVICE_ID);
+
+	CMI_HDR_SET_PAYLOAD_SIZE(hdr, pld_size);
+
+	hdr->opcode = opcode;
+
+	if (obm_flag)
+		CMI_HDR_SET_OBM(hdr, CMI_OBM_FLAG_OUT_BAND);
+	else
+		CMI_HDR_SET_OBM(hdr, CMI_OBM_FLAG_IN_BAND);
+
+	return 0;
+}
+
+/*
+ * wcd_cpe_cmi_send_afe_msg: send message to AFE service
+ * @core: wcd cpe core handle
+ * @port_cfg: configuration data for the afe port
+ *	      for which this message is to be sent
+ * @message: actual message with header and payload
+ *
+ * Port specific lock needs to be acquired before this
+ * function can be invoked
+ */
+static int wcd_cpe_cmi_send_afe_msg(
+	struct wcd_cpe_core *core,
+	struct wcd_cmi_afe_port_data *port_d,
+	void *message)
+{
+	int ret = 0;
+	struct cmi_hdr *hdr = message;
+
+	pr_debug("%s: sending message with opcode 0x%x\n",
+		__func__, hdr->opcode);
+
+	if (unlikely(!wcd_cpe_is_online_state(core))) {
+		dev_err(core->dev, "%s: CPE offline\n", __func__);
+		return 0;
+	}
+
+	if (CMI_HDR_GET_OBM_FLAG(hdr))
+		wcd_cpe_bus_vote_max_bw(core, true);
+
+	ret = cmi_send_msg(message);
+	if (ret) {
+		pr_err("%s: cmd 0x%x send failed, err = %d\n",
+			__func__, hdr->opcode, ret);
+		goto rel_bus_vote;
+	}
+
+	ret = wait_for_completion_timeout(&port_d->afe_cmd_complete,
+					  CMI_CMD_TIMEOUT);
+	if (ret > 0) {
+		pr_debug("%s: command 0x%x, received response 0x%x\n",
+			 __func__, hdr->opcode, port_d->cmd_result);
+		if (port_d->cmd_result == CMI_SHMEM_ALLOC_FAILED)
+			port_d->cmd_result = CPE_ENOMEMORY;
+		if (port_d->cmd_result > 0)
+			pr_err("%s: CPE returned error[%s]\n",
+				__func__, cpe_err_get_err_str(
+				port_d->cmd_result));
+		ret = cpe_err_get_lnx_err_code(port_d->cmd_result);
+		goto rel_bus_vote;
+	} else {
+		pr_err("%s: command 0x%x send timed out\n",
+			__func__, hdr->opcode);
+		ret = -ETIMEDOUT;
+		goto rel_bus_vote;
+	}
+
+rel_bus_vote:
+	reinit_completion(&port_d->afe_cmd_complete);
+
+	if (CMI_HDR_GET_OBM_FLAG(hdr))
+		wcd_cpe_bus_vote_max_bw(core, false);
+
+	return ret;
+}
+
+
+
+/*
+ * wcd_cpe_afe_shmem_alloc: allocate the cpe memory for afe service
+ * @core: handle to cpe core
+ * @port_cfg: configuration data for the port which needs
+ *	      memory to be allocated on CPE
+ * @size: size of the memory to be allocated
+ */
+static int wcd_cpe_afe_shmem_alloc(
+	struct wcd_cpe_core *core,
+	struct wcd_cmi_afe_port_data *port_d,
+	u32 size)
+{
+	struct cpe_cmd_shmem_alloc cmd_shmem_alloc;
+	int ret = 0;
+
+	pr_debug("%s: enter: size = %d\n", __func__, size);
+
+	memset(&cmd_shmem_alloc, 0, sizeof(cmd_shmem_alloc));
+	if (fill_afe_cmd_header(&cmd_shmem_alloc.hdr, port_d->port_id,
+			    CPE_AFE_PORT_CMD_SHARED_MEM_ALLOC,
+			    SHMEM_ALLOC_CMD_PLD_SIZE, false)) {
+		ret = -EINVAL;
+		goto end_ret;
+	}
+
+	cmd_shmem_alloc.size = size;
+
+	ret = wcd_cpe_cmi_send_afe_msg(core, port_d, &cmd_shmem_alloc);
+	if (ret) {
+		pr_err("%s: afe_shmem_alloc fail,ret = %d\n",
+			__func__, ret);
+		goto end_ret;
+	}
+
+	pr_debug("%s: completed %s, mem_handle = 0x%x\n",
+		__func__, "CPE_AFE_CMD_SHARED_MEM_ALLOC",
+		port_d->mem_handle);
+
+end_ret:
+	return ret;
+}
+
+/*
+ * wcd_cpe_afe_shmem_dealloc: deallocate the cpe memory for
+ *			      afe service
+ * @core: handle to cpe core
+ * @port_d: configuration data for the port which needs
+ *	      memory to be deallocated on CPE
+ * The memory handle to be de-allocated is saved in the
+ * port configuration data
+ */
+static int wcd_cpe_afe_shmem_dealloc(
+	struct wcd_cpe_core *core,
+	struct wcd_cmi_afe_port_data *port_d)
+{
+	struct cpe_cmd_shmem_dealloc cmd_dealloc;
+	int ret = 0;
+
+	pr_debug("%s: enter, port_id = %d\n",
+		 __func__, port_d->port_id);
+
+	memset(&cmd_dealloc, 0, sizeof(cmd_dealloc));
+	if (fill_afe_cmd_header(&cmd_dealloc.hdr, port_d->port_id,
+				CPE_AFE_PORT_CMD_SHARED_MEM_DEALLOC,
+				SHMEM_DEALLOC_CMD_PLD_SIZE, false)) {
+		ret = -EINVAL;
+		goto end_ret;
+	}
+
+	cmd_dealloc.addr = port_d->mem_handle;
+	ret = wcd_cpe_cmi_send_afe_msg(core, port_d, &cmd_dealloc);
+	if (ret) {
+		pr_err("failed to send shmem_dealloc cmd\n");
+		goto end_ret;
+	}
+	memset(&port_d->mem_handle, 0,
+	       sizeof(port_d->mem_handle));
+
+end_ret:
+	return ret;
+}
+
+/*
+ * wcd_cpe_send_afe_cal: send the acdb calibration to AFE port
+ * @core: handle to cpe core
+ * @port_d: configuration data for the port for which the
+ *	      calibration needs to be appplied
+ */
+static int wcd_cpe_send_afe_cal(void *core_handle,
+		struct wcd_cmi_afe_port_data *port_d)
+{
+
+	struct cal_block_data *afe_cal = NULL;
+	struct wcd_cpe_core *core = core_handle;
+	struct cmi_obm_msg obm_msg;
+	void *inb_msg = NULL;
+	void *msg;
+	int rc = 0;
+	bool is_obm_msg;
+
+	if (core->cal_data[WCD_CPE_LSM_CAL_AFE] == NULL) {
+		pr_err("%s: LSM cal not allocated!\n",
+			__func__);
+		rc = -EINVAL;
+		goto rel_cal_mutex;
+	}
+
+	mutex_lock(&core->cal_data[WCD_CPE_LSM_CAL_AFE]->lock);
+	afe_cal = cal_utils_get_only_cal_block(
+			core->cal_data[WCD_CPE_LSM_CAL_AFE]);
+	if (!afe_cal) {
+		pr_err("%s: failed to get afe cal block\n",
+			__func__);
+		rc = -EINVAL;
+		goto rel_cal_mutex;
+	}
+
+	if (afe_cal->cal_data.size == 0) {
+		dev_dbg(core->dev, "%s: No AFE cal to send\n",
+			__func__);
+		rc = 0;
+		goto rel_cal_mutex;
+	}
+
+	is_obm_msg = (afe_cal->cal_data.size >
+		      CMI_INBAND_MESSAGE_SIZE) ? true : false;
+
+	if (is_obm_msg) {
+		struct cmi_hdr *hdr = &(obm_msg.hdr);
+		struct cmi_obm *pld = &(obm_msg.pld);
+
+		rc = wcd_cpe_afe_shmem_alloc(core, port_d,
+					afe_cal->cal_data.size);
+		if (rc) {
+			dev_err(core->dev,
+				"%s: AFE shmem alloc fail %d\n",
+				__func__, rc);
+			goto rel_cal_mutex;
+		}
+
+		rc = fill_afe_cmd_header(hdr, port_d->port_id,
+					 CPE_AFE_CMD_SET_PARAM,
+					 CPE_AFE_PARAM_PAYLOAD_SIZE,
+					 true);
+		if (rc) {
+			dev_err(core->dev,
+				"%s: invalid params for header, err = %d\n",
+				__func__, rc);
+			wcd_cpe_afe_shmem_dealloc(core, port_d);
+			goto rel_cal_mutex;
+		}
+
+		pld->version = 0;
+		pld->size = afe_cal->cal_data.size;
+		pld->data_ptr.kvaddr = afe_cal->cal_data.kvaddr;
+		pld->mem_handle = port_d->mem_handle;
+		msg = &obm_msg;
+
+	} else {
+		u8 *msg_pld;
+		struct cmi_hdr *hdr;
+		inb_msg = kzalloc(sizeof(struct cmi_hdr) +
+					afe_cal->cal_data.size,
+				  GFP_KERNEL);
+		if (!inb_msg) {
+			dev_err(core->dev,
+				"%s: no memory for afe cal inband\n",
+				__func__);
+			rc = -ENOMEM;
+			goto rel_cal_mutex;
+		}
+
+		hdr = (struct cmi_hdr *) inb_msg;
+
+		rc = fill_afe_cmd_header(hdr, port_d->port_id,
+					 CPE_AFE_CMD_SET_PARAM,
+					 CPE_AFE_PARAM_PAYLOAD_SIZE,
+					 false);
+		if (rc) {
+			dev_err(core->dev,
+				"%s: invalid params for header, err = %d\n",
+				__func__, rc);
+			kfree(inb_msg);
+			inb_msg = NULL;
+			goto rel_cal_mutex;
+		}
+
+		msg_pld = ((u8 *) inb_msg) + sizeof(struct cmi_hdr);
+		memcpy(msg_pld, afe_cal->cal_data.kvaddr,
+		       afe_cal->cal_data.size);
+
+		msg = inb_msg;
+	}
+
+	rc = wcd_cpe_cmi_send_afe_msg(core, port_d, msg);
+	if (rc)
+		pr_err("%s: afe cal for listen failed, rc = %d\n",
+			__func__, rc);
+
+	if (is_obm_msg) {
+		wcd_cpe_afe_shmem_dealloc(core, port_d);
+		port_d->mem_handle = 0;
+	} else {
+		kfree(inb_msg);
+		inb_msg = NULL;
+	}
+
+rel_cal_mutex:
+	mutex_unlock(&core->cal_data[WCD_CPE_LSM_CAL_AFE]->lock);
+	return rc;
+}
+
+/*
+ * wcd_cpe_is_valid_port: check validity of afe port id
+ * @core: handle to core to check for validity
+ * @afe_cfg: client provided afe configuration
+ * @func: function name invoking this validity check,
+ *	  used for logging purpose only.
+ */
+static int wcd_cpe_is_valid_port(struct wcd_cpe_core *core,
+		struct wcd_cpe_afe_port_cfg *afe_cfg,
+		const char *func)
+{
+	if (unlikely(IS_ERR_OR_NULL(core))) {
+		pr_err("%s: Invalid core handle\n", func);
+		return -EINVAL;
+	}
+
+	if (afe_cfg->port_id > WCD_CPE_AFE_MAX_PORTS) {
+		dev_err(core->dev,
+			"%s: invalid afe port (%u)\n",
+			func, afe_cfg->port_id);
+		return -EINVAL;
+	}
+
+	dev_dbg(core->dev,
+		"%s: port_id = %u\n",
+		func, afe_cfg->port_id);
+
+	return 0;
+}
+
+static int wcd_cpe_afe_svc_cmd_mode(void *core_handle,
+				    u8 mode)
+{
+	struct cpe_afe_svc_cmd_mode afe_mode;
+	struct wcd_cpe_core *core = core_handle;
+	struct wcd_cmi_afe_port_data *afe_port_d;
+	int ret;
+
+	afe_port_d = &afe_ports[0];
+	/*
+	 * AFE SVC mode command is for the service and not port
+	 * specific, hence use AFE port as 0 so the command will
+	 * be applied to all AFE ports on CPE.
+	 */
+	afe_port_d->port_id = 0;
+
+	WCD_CPE_GRAB_LOCK(&afe_port_d->afe_lock, "afe");
+	memset(&afe_mode, 0, sizeof(afe_mode));
+	if (fill_afe_cmd_header(&afe_mode.hdr, afe_port_d->port_id,
+				CPE_AFE_SVC_CMD_LAB_MODE,
+				CPE_AFE_CMD_MODE_PAYLOAD_SIZE,
+				false)) {
+		ret = -EINVAL;
+		goto err_ret;
+	}
+
+	afe_mode.mode = mode;
+
+	ret = wcd_cpe_cmi_send_afe_msg(core, afe_port_d, &afe_mode);
+	if (ret)
+		dev_err(core->dev,
+			"%s: afe_svc_mode cmd failed, err = %d\n",
+			__func__, ret);
+
+err_ret:
+	WCD_CPE_REL_LOCK(&afe_port_d->afe_lock, "afe");
+	return ret;
+}
+
+static int wcd_cpe_afe_cmd_port_cfg(void *core_handle,
+		struct wcd_cpe_afe_port_cfg *afe_cfg)
+{
+	struct cpe_afe_cmd_port_cfg port_cfg_cmd;
+	struct wcd_cpe_core *core = core_handle;
+	struct wcd_cmi_afe_port_data *afe_port_d;
+	int ret;
+
+	ret = wcd_cpe_is_valid_port(core, afe_cfg, __func__);
+	if (ret)
+		goto done;
+
+	afe_port_d = &afe_ports[afe_cfg->port_id];
+	afe_port_d->port_id = afe_cfg->port_id;
+
+	WCD_CPE_GRAB_LOCK(&afe_port_d->afe_lock, "afe");
+	memset(&port_cfg_cmd, 0, sizeof(port_cfg_cmd));
+	if (fill_afe_cmd_header(&port_cfg_cmd.hdr,
+			afe_cfg->port_id,
+			CPE_AFE_PORT_CMD_GENERIC_CONFIG,
+			CPE_AFE_CMD_PORT_CFG_PAYLOAD_SIZE,
+			false)) {
+		ret = -EINVAL;
+		goto err_ret;
+	}
+
+	port_cfg_cmd.bit_width = afe_cfg->bit_width;
+	port_cfg_cmd.num_channels = afe_cfg->num_channels;
+	port_cfg_cmd.sample_rate = afe_cfg->sample_rate;
+
+	if (afe_port_d->port_id == CPE_AFE_PORT_3_TX)
+		port_cfg_cmd.buffer_size = WCD_CPE_EC_PP_BUF_SIZE;
+	else
+		port_cfg_cmd.buffer_size = AFE_OUT_BUF_SIZE(afe_cfg->bit_width,
+							afe_cfg->sample_rate);
+
+	ret = wcd_cpe_cmi_send_afe_msg(core, afe_port_d, &port_cfg_cmd);
+	if (ret)
+		dev_err(core->dev,
+			"%s: afe_port_config failed, err = %d\n",
+			__func__, ret);
+
+err_ret:
+	WCD_CPE_REL_LOCK(&afe_port_d->afe_lock, "afe");
+done:
+	return ret;
+}
+
+/*
+ * wcd_cpe_afe_set_params: set the parameters for afe port
+ * @afe_cfg: configuration data for the port for which the
+ *	      parameters are to be set
+ */
+static int wcd_cpe_afe_set_params(void *core_handle,
+		struct wcd_cpe_afe_port_cfg *afe_cfg, bool afe_mad_ctl)
+{
+	struct cpe_afe_params afe_params;
+	struct cpe_afe_hw_mad_ctrl *hw_mad_ctrl = &afe_params.hw_mad_ctrl;
+	struct cpe_afe_port_cfg *port_cfg = &afe_params.port_cfg;
+	struct wcd_cpe_core *core = core_handle;
+	struct wcd_cmi_afe_port_data *afe_port_d;
+	int ret = 0, pld_size = 0;
+
+	ret = wcd_cpe_is_valid_port(core, afe_cfg, __func__);
+	if (ret)
+		return ret;
+
+	afe_port_d = &afe_ports[afe_cfg->port_id];
+	afe_port_d->port_id = afe_cfg->port_id;
+
+	WCD_CPE_GRAB_LOCK(&afe_port_d->afe_lock, "afe");
+
+	ret = wcd_cpe_send_afe_cal(core, afe_port_d);
+	if (ret) {
+		dev_err(core->dev,
+			"%s: afe acdb cal send failed, err = %d\n",
+			__func__, ret);
+		goto err_ret;
+	}
+
+	pld_size = CPE_AFE_PARAM_PAYLOAD_SIZE;
+	memset(&afe_params, 0, sizeof(afe_params));
+
+	if (fill_afe_cmd_header(&afe_params.hdr,
+				afe_cfg->port_id,
+				CPE_AFE_CMD_SET_PARAM,
+				(u8) pld_size, false)) {
+		ret = -EINVAL;
+		goto err_ret;
+	}
+
+	hw_mad_ctrl->param.module_id = CPE_AFE_MODULE_HW_MAD;
+	hw_mad_ctrl->param.param_id = CPE_AFE_PARAM_ID_HW_MAD_CTL;
+	hw_mad_ctrl->param.p_size.sr.param_size = PARAM_SIZE_AFE_HW_MAD_CTRL;
+	hw_mad_ctrl->param.p_size.sr.reserved = 0;
+	hw_mad_ctrl->minor_version = 1;
+	hw_mad_ctrl->mad_type = MAD_TYPE_AUDIO;
+	hw_mad_ctrl->mad_enable = afe_mad_ctl;
+
+	port_cfg->param.module_id = CPE_AFE_MODULE_AUDIO_DEV_INTERFACE;
+	port_cfg->param.param_id = CPE_AFE_PARAM_ID_GENERIC_PORT_CONFIG;
+	port_cfg->param.p_size.sr.param_size = PARAM_SIZE_AFE_PORT_CFG;
+	port_cfg->param.p_size.sr.reserved = 0;
+	port_cfg->minor_version = 1;
+	port_cfg->bit_width = afe_cfg->bit_width;
+	port_cfg->num_channels = afe_cfg->num_channels;
+	port_cfg->sample_rate = afe_cfg->sample_rate;
+
+	ret = wcd_cpe_cmi_send_afe_msg(core, afe_port_d, &afe_params);
+	if (ret)
+		dev_err(core->dev,
+			"%s: afe_port_config failed, err = %d\n",
+			__func__, ret);
+err_ret:
+	WCD_CPE_REL_LOCK(&afe_port_d->afe_lock, "afe");
+	return ret;
+}
+
+/*
+ * wcd_cpe_afe_port_start: send the start command to afe service
+ * @core_handle: handle to the cpe core
+ * @port_cfg: configuration data for the afe port which needs
+ *	      to be started.
+ */
+static int wcd_cpe_afe_port_start(void *core_handle,
+			struct wcd_cpe_afe_port_cfg *port_cfg)
+{
+
+	struct cmi_hdr hdr;
+	struct wcd_cpe_core *core = core_handle;
+	struct wcd_cmi_afe_port_data *afe_port_d;
+	int ret = 0;
+
+	ret = wcd_cpe_is_valid_port(core, port_cfg, __func__);
+	if (ret)
+		return ret;
+
+	afe_port_d = &afe_ports[port_cfg->port_id];
+	afe_port_d->port_id = port_cfg->port_id;
+
+	WCD_CPE_GRAB_LOCK(&afe_port_d->afe_lock, "afe");
+
+	memset(&hdr, 0, sizeof(struct cmi_hdr));
+	fill_afe_cmd_header(&hdr, port_cfg->port_id,
+			    CPE_AFE_PORT_CMD_START,
+			    0, false);
+	ret = wcd_cpe_cmi_send_afe_msg(core, afe_port_d, &hdr);
+	if (ret)
+		dev_err(core->dev,
+			"%s: afe_port_start cmd failed, err = %d\n",
+			__func__, ret);
+	WCD_CPE_REL_LOCK(&afe_port_d->afe_lock, "afe");
+	return ret;
+}
+
+/*
+ * wcd_cpe_afe_port_stop: send stop command to afe service
+ * @core_handle: handle to the cpe core
+ * @port_cfg: configuration data for the afe port which needs
+ *	      to be stopped.
+ */
+static int wcd_cpe_afe_port_stop(void *core_handle,
+	struct wcd_cpe_afe_port_cfg *port_cfg)
+{
+	struct cmi_hdr hdr;
+	struct wcd_cpe_core *core = core_handle;
+	struct wcd_cmi_afe_port_data *afe_port_d;
+	int ret = 0;
+
+	ret = wcd_cpe_is_valid_port(core, port_cfg, __func__);
+	if (ret)
+		return ret;
+
+	afe_port_d = &afe_ports[port_cfg->port_id];
+	afe_port_d->port_id = port_cfg->port_id;
+
+	WCD_CPE_GRAB_LOCK(&afe_port_d->afe_lock, "afe");
+
+	memset(&hdr, 0, sizeof(hdr));
+	fill_afe_cmd_header(&hdr, port_cfg->port_id,
+			    CPE_AFE_PORT_CMD_STOP,
+			    0, false);
+	ret = wcd_cpe_cmi_send_afe_msg(core, afe_port_d, &hdr);
+	if (ret)
+		dev_err(core->dev,
+			"%s: afe_stop cmd failed, err = %d\n",
+			__func__, ret);
+
+	WCD_CPE_REL_LOCK(&afe_port_d->afe_lock, "afe");
+	return ret;
+}
+
+/*
+ * wcd_cpe_afe_port_suspend: send suspend command to afe service
+ * @core_handle: handle to the cpe core
+ * @port_cfg: configuration data for the afe port which needs
+ *	      to be suspended.
+ */
+static int wcd_cpe_afe_port_suspend(void *core_handle,
+		struct wcd_cpe_afe_port_cfg *port_cfg)
+{
+	struct cmi_hdr hdr;
+	struct wcd_cpe_core *core = core_handle;
+	struct wcd_cmi_afe_port_data *afe_port_d;
+	int ret = 0;
+
+	ret = wcd_cpe_is_valid_port(core, port_cfg, __func__);
+	if (ret)
+		return ret;
+
+	afe_port_d = &afe_ports[port_cfg->port_id];
+	afe_port_d->port_id = port_cfg->port_id;
+
+	WCD_CPE_GRAB_LOCK(&afe_port_d->afe_lock, "afe");
+
+	memset(&hdr, 0, sizeof(struct cmi_hdr));
+	fill_afe_cmd_header(&hdr, port_cfg->port_id,
+			    CPE_AFE_PORT_CMD_SUSPEND,
+			    0, false);
+	ret = wcd_cpe_cmi_send_afe_msg(core, afe_port_d, &hdr);
+	if (ret)
+		dev_err(core->dev,
+			"%s: afe_suspend cmd failed, err = %d\n",
+			__func__, ret);
+	WCD_CPE_REL_LOCK(&afe_port_d->afe_lock, "afe");
+	return ret;
+}
+
+/*
+ * wcd_cpe_afe_port_resume: send the resume command to afe service
+ * @core_handle: handle to the cpe core
+ * @port_cfg: configuration data for the afe port which needs
+ *	      to be resumed.
+ */
+static int wcd_cpe_afe_port_resume(void *core_handle,
+		struct wcd_cpe_afe_port_cfg *port_cfg)
+{
+	struct cmi_hdr hdr;
+	struct wcd_cpe_core *core = core_handle;
+	struct wcd_cmi_afe_port_data *afe_port_d;
+	int ret = 0;
+
+	ret = wcd_cpe_is_valid_port(core, port_cfg, __func__);
+	if (ret)
+		return ret;
+
+	afe_port_d = &afe_ports[port_cfg->port_id];
+	afe_port_d->port_id = port_cfg->port_id;
+
+	WCD_CPE_GRAB_LOCK(&afe_port_d->afe_lock, "afe");
+
+	memset(&hdr, 0, sizeof(hdr));
+	fill_afe_cmd_header(&hdr, port_cfg->port_id,
+			    CPE_AFE_PORT_CMD_RESUME,
+			    0, false);
+	ret = wcd_cpe_cmi_send_afe_msg(core, afe_port_d, &hdr);
+	if (ret)
+		dev_err(core->dev,
+			"%s: afe_resume cmd failed, err = %d\n",
+			__func__, ret);
+	WCD_CPE_REL_LOCK(&afe_port_d->afe_lock, "afe");
+	return ret;
+
+}
+
+/*
+ * wcd_cpe_register_afe_driver: register lsm driver to codec
+ * @cpe_ops: structure with lsm callbacks
+ * @codec: codec to which this lsm driver is registered to
+ */
+int wcd_cpe_get_afe_ops(struct wcd_cpe_afe_ops *afe_ops)
+{
+	afe_ops->afe_set_params = wcd_cpe_afe_set_params;
+	afe_ops->afe_port_start = wcd_cpe_afe_port_start;
+	afe_ops->afe_port_stop = wcd_cpe_afe_port_stop;
+	afe_ops->afe_port_suspend = wcd_cpe_afe_port_suspend;
+	afe_ops->afe_port_resume = wcd_cpe_afe_port_resume;
+	afe_ops->afe_port_cmd_cfg = wcd_cpe_afe_cmd_port_cfg;
+
+	return 0;
+}
+EXPORT_SYMBOL(wcd_cpe_get_afe_ops);
+
+MODULE_DESCRIPTION("WCD CPE Core");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd_cpe_core.h	2019-01-22 16:16:29.555301211 +0100
@@ -0,0 +1,229 @@
+/* Copyright (c) 2013-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef WCD_CPE_CORE_H
+#define WCD_CPE_CORE_H
+
+#include <soc/qcom/ramdump.h>
+#include <linux/dma-mapping.h>
+#include "wcd_cpe_services.h"
+
+#define WCD_CPE_LAB_MAX_LATENCY 250
+#define WCD_CPE_MAD_SLIM_CHANNEL 140
+
+/* Indicates CPE block is ready for image re-download */
+#define WCD_CPE_BLK_READY  (1 << 0)
+/* Indicates the underlying bus is ready */
+#define WCD_CPE_BUS_READY (1 << 1)
+
+/*
+ * only when the underlying bus and CPE block both are ready,
+ * the state will be ready to download
+ */
+#define WCD_CPE_READY_TO_DLOAD	\
+	(WCD_CPE_BLK_READY | WCD_CPE_BUS_READY)
+
+#define WCD_CPE_LOAD_IMEM (1 << 0)
+#define WCD_CPE_LOAD_DATA (1 << 1)
+#define WCD_CPE_LOAD_ALL \
+	(WCD_CPE_LOAD_IMEM | WCD_CPE_LOAD_DATA)
+
+#define WCD_CPE_IMAGE_FNAME_MAX 64
+
+#define WCD_CPE_AFE_OUT_PORT_2 2
+#define WCD_CPE_AFE_OUT_PORT_4 4
+
+enum {
+	WCD_CPE_LSM_CAL_AFE = 0,
+	WCD_CPE_LSM_CAL_LSM,
+	WCD_CPE_LSM_CAL_TOPOLOGY_ID,
+	WCD_CPE_LSM_CAL_MAX,
+};
+
+enum cpe_err_irq_cntl_type {
+	CPE_ERR_IRQ_MASK = 0,
+	CPE_ERR_IRQ_UNMASK,
+	CPE_ERR_IRQ_CLEAR,
+	CPE_ERR_IRQ_STATUS,
+};
+
+struct wcd_cpe_cdc_cb {
+	/* codec provided callback to enable RCO */
+	int (*cdc_clk_en) (struct snd_soc_codec *, bool);
+
+	/* callback for FLL setup for codec */
+	int (*cpe_clk_en) (struct snd_soc_codec *, bool);
+	int (*cdc_ext_clk)(struct snd_soc_codec *codec, int enable, bool dapm);
+	int (*lab_cdc_ch_ctl)(struct snd_soc_codec *codec, u8 event);
+	int (*get_afe_out_port_id)(struct snd_soc_codec *codec, u16 *port_id);
+	int (*bus_vote_bw)(struct snd_soc_codec *codec,
+			   bool vote);
+
+	/* Callback to control the cpe error interrupt mask/status/clear */
+	int (*cpe_err_irq_control)(struct snd_soc_codec *codec,
+				    enum cpe_err_irq_cntl_type cntl_type,
+				    u8 *status);
+};
+
+enum wcd_cpe_ssr_state_event {
+	/* Indicates CPE is initialized */
+	WCD_CPE_INITIALIZED = 0,
+	/* Indicates that IMEM is downloaded to CPE */
+	WCD_CPE_IMEM_DOWNLOADED,
+	/* Indicates CPE is enabled */
+	WCD_CPE_ENABLED,
+	/* Indicates that CPE is currently active */
+	WCD_CPE_ACTIVE,
+	/* Event from underlying bus notifying bus is down */
+	WCD_CPE_BUS_DOWN_EVENT,
+	/* Event from CPE block, notifying CPE is down */
+	WCD_CPE_SSR_EVENT,
+	/* Event from underlying bus notifying bus is up */
+	WCD_CPE_BUS_UP_EVENT,
+};
+
+struct wcd_cpe_ssr_entry {
+	int offline;
+	u32 offline_change;
+	wait_queue_head_t offline_poll_wait;
+	struct snd_info_entry *entry;
+};
+
+struct wcd_cpe_irq_info {
+	int cpe_engine_irq;
+	int cpe_err_irq;
+	u8 cpe_fatal_irqs;
+};
+
+struct wcd_cpe_hw_info {
+	u32 dram_offset;
+	size_t dram_size;
+	u32 iram_offset;
+	size_t iram_size;
+};
+
+struct wcd_cpe_core {
+	/* handle to cpe services */
+	void *cpe_handle;
+
+	/* registration handle to cpe services */
+	void *cpe_reg_handle;
+
+	/* cmi registration handle for afe service */
+	void *cmi_afe_handle;
+
+	/* handle to codec */
+	struct snd_soc_codec *codec;
+
+	/* codec device */
+	struct device *dev;
+
+	/* firmware image file name */
+	char fname[WCD_CPE_IMAGE_FNAME_MAX];
+
+	/* firmware image file name from sysfs */
+	char dyn_fname[WCD_CPE_IMAGE_FNAME_MAX];
+
+	/* codec information needed by cpe services */
+	struct cpe_svc_codec_info_v1 cdc_info;
+
+	/* work to perform image download */
+	struct work_struct load_fw_work;
+
+	/* flag to indicate mode in which cpe needs to be booted */
+	int cpe_debug_mode;
+
+	/* callbacks for codec specific implementation */
+	const struct wcd_cpe_cdc_cb *cpe_cdc_cb;
+
+	/* work to handle CPE SSR*/
+	struct work_struct ssr_work;
+
+	/* PM handle for suspend mode during SSR */
+	struct pm_qos_request pm_qos_req;
+
+	/* completion event indicating CPE OFFLINE */
+	struct completion offline_compl;
+
+	/* entry into snd card procfs indicating cpe status */
+	struct wcd_cpe_ssr_entry ssr_entry;
+
+	/*
+	 * completion event to signal CPE is
+	 * ready for image re-download
+	 */
+	struct completion ready_compl;
+
+	/* maintains the status for cpe ssr */
+	u8 ready_status;
+
+	/* Indicate SSR type */
+	enum wcd_cpe_ssr_state_event ssr_type;
+
+	/* mutex to protect cpe ssr status variables */
+	struct mutex ssr_lock;
+
+	/* mutex to protect cpe session status variables */
+	struct mutex session_lock;
+
+	/* Store the calibration data needed for cpe */
+	struct cal_type_data *cal_data[WCD_CPE_LSM_CAL_MAX];
+
+	/* completion event to signal CPE is online */
+	struct completion online_compl;
+
+	/* reference counter for cpe usage */
+	u8 cpe_users;
+
+	/* Ramdump support */
+	void *cpe_ramdump_dev;
+	struct ramdump_segment cpe_ramdump_seg;
+	dma_addr_t cpe_dump_addr;
+	void *cpe_dump_v_addr;
+
+	/* SFR support */
+	u32 sfr_buf_addr;
+	size_t sfr_buf_size;
+
+	/* IRQ information for CPE interrupts */
+	struct wcd_cpe_irq_info irq_info;
+
+	/* Kobject for sysfs entry */
+	struct kobject cpe_kobj;
+
+	/* Reference count for cpe clk*/
+	int cpe_clk_ref;
+
+	/* codec based hardware info */
+	struct wcd_cpe_hw_info hw_info;
+};
+
+struct wcd_cpe_params {
+	struct snd_soc_codec *codec;
+	struct wcd_cpe_core * (*get_cpe_core) (
+				struct snd_soc_codec *);
+	const struct wcd_cpe_cdc_cb *cdc_cb;
+	int dbg_mode;
+	u16 cdc_major_ver;
+	u16 cdc_minor_ver;
+	u32 cdc_id;
+
+	struct wcd_cpe_irq_info cdc_irq_info;
+
+	struct cpe_svc_init_param *cpe_svc_params;
+};
+
+int wcd_cpe_ssr_event(void *core_handle,
+		      enum wcd_cpe_ssr_state_event event);
+struct wcd_cpe_core *wcd_cpe_init(const char *,
+struct snd_soc_codec *, struct wcd_cpe_params *params);
+#endif
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd_cpe_services.c	2019-01-22 16:16:29.555301211 +0100
@@ -0,0 +1,3016 @@
+/* Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#include <linux/mfd/wcd9xxx/core.h>
+#include <sound/cpe_cmi.h>
+#include <sound/soc.h>
+#include <linux/mfd/wcd9xxx/wcd9330_registers.h>
+#include <linux/mfd/wcd9335/registers.h>
+#include "wcd_cpe_services.h"
+#include "wcd_cmi_api.h"
+
+#define CPE_MSG_BUFFER_SIZE 132
+#define CPE_NO_SERVICE 0
+
+#define CMI_DRIVER_SUPPORTED_VERSION 0
+#define CMI_API_SUCCESS 0
+#define CMI_MSG_TRANSPORT (0x0002)
+#define CPE_SVC_INACTIVE_STATE_RETRIES_MAX 10
+
+#define TOMTOM_A_SVASS_SPE_DRAM_OFFSET				0x50000
+#define TOMTOM_A_SVASS_SPE_DRAM_SIZE				0x30000
+#define TOMTOM_A_SVASS_SPE_IRAM_OFFSET				0x80000
+#define TOMTOM_A_SVASS_SPE_IRAM_SIZE				0xC000
+#define TOMTOM_A_SVASS_SPE_INBOX_SIZE				12
+#define TOMTOM_A_SVASS_SPE_OUTBOX_SIZE				12
+
+#define MEM_ACCESS_NONE_VAL			0x0
+#define MEM_ACCESS_IRAM_VAL			0x1
+#define MEM_ACCESS_DRAM_VAL			0x2
+#define LISTEN_CTL_SPE_VAL			0x0
+#define LISTEN_CTL_MSM_VAL			0x1
+
+#define TOMTOM_A_SVASS_SPE_INBOX(N)	(TOMTOM_A_SVASS_SPE_INBOX_0 + (N))
+#define TOMTOM_A_SVASS_SPE_OUTBOX(N)	(TOMTOM_A_SVASS_SPE_OUTBOX_0 + (N))
+
+#define WCD9335_CPE_SS_SPE_DRAM_OFFSET		0x48000
+#define WCD9335_CPE_SS_SPE_DRAM_SIZE		0x34000
+#define WCD9335_CPE_SS_SPE_IRAM_OFFSET		0x80000
+#define WCD9335_CPE_SS_SPE_IRAM_SIZE		0x20000
+
+#define WCD9335_CPE_SS_SPE_INBOX_SIZE		16
+#define WCD9335_CPE_SS_SPE_OUTBOX_SIZE		16
+#define WCD9335_CPE_SS_SPE_MEM_BANK_SIZ		16
+
+#define WCD9335_CPE_SS_SPE_INBOX1(N)	(WCD9335_CPE_SS_INBOX1_0 + (N))
+#define WCD9335_CPE_SS_SPE_OUTBOX1(N)	(WCD9335_CPE_SS_OUTBOX1_0 + (N))
+#define WCD9335_CPE_SS_MEM_BANK(N)	(WCD9335_CPE_SS_MEM_BANK_0 + (N))
+
+#define CHUNK_SIZE 16
+
+#define CPE_SVC_GRAB_LOCK(lock, name)		\
+{						\
+	pr_debug("%s: %s lock acquire\n",	\
+		 __func__, name);		\
+	mutex_lock(lock);			\
+}
+
+#define CPE_SVC_REL_LOCK(lock, name)		\
+{						\
+	pr_debug("%s: %s lock release\n",	\
+		 __func__, name);		\
+	mutex_unlock(lock);			\
+}
+
+static const struct cpe_svc_hw_cfg cpe_svc_tomtom_info = {
+	TOMTOM_A_SVASS_SPE_DRAM_SIZE,
+	TOMTOM_A_SVASS_SPE_DRAM_OFFSET,
+	TOMTOM_A_SVASS_SPE_IRAM_SIZE,
+	TOMTOM_A_SVASS_SPE_IRAM_OFFSET,
+	TOMTOM_A_SVASS_SPE_INBOX_SIZE,
+	TOMTOM_A_SVASS_SPE_OUTBOX_SIZE
+};
+
+static const struct cpe_svc_hw_cfg cpe_svc_wcd9335_info = {
+	WCD9335_CPE_SS_SPE_DRAM_SIZE,
+	WCD9335_CPE_SS_SPE_DRAM_OFFSET,
+	WCD9335_CPE_SS_SPE_IRAM_SIZE,
+	WCD9335_CPE_SS_SPE_IRAM_OFFSET,
+	WCD9335_CPE_SS_SPE_INBOX_SIZE,
+	WCD9335_CPE_SS_SPE_OUTBOX_SIZE
+};
+
+enum cpe_state {
+	CPE_STATE_UNINITIALIZED = 0,
+	CPE_STATE_INITIALIZED,
+	CPE_STATE_IDLE,
+	CPE_STATE_DOWNLOADING,
+	CPE_STATE_BOOTING,
+	CPE_STATE_SENDING_MSG,
+	CPE_STATE_OFFLINE,
+	CPE_STATE_BUFFERING,
+	CPE_STATE_BUFFERING_CANCELLED
+};
+
+enum cpe_substate {
+	CPE_SS_IDLE = 0,
+	CPE_SS_MSG_REQUEST_ACCESS,
+	CPE_SS_MSG_SEND_INBOX,
+	CPE_SS_MSG_SENT,
+	CPE_SS_DL_DOWNLOADING,
+	CPE_SS_DL_COMPLETED,
+	CPE_SS_BOOT,
+	CPE_SS_BOOT_INIT,
+	CPE_SS_ONLINE
+};
+
+enum cpe_command {
+	CPE_CMD_KILL_THREAD = 0,
+	CPE_CMD_BOOT,
+	CPE_CMD_BOOT_INITIALIZE,
+	CPE_CMD_BOOT_COMPLETE,
+	CPE_CMD_SEND_MSG,
+	CPE_CMD_SEND_TRANS_MSG,
+	CPE_CMD_SEND_MSG_COMPLETE,
+	CPE_CMD_PROCESS_IRQ,
+	CPE_CMD_RAMDUMP,
+	CPE_CMD_DL_SEGMENT,
+	CPE_CMD_SHUTDOWN,
+	CPE_CMD_RESET,
+	CPE_CMD_DEINITIALIZE,
+	CPE_CMD_READ,
+	CPE_CMD_ENABLE_LAB,
+	CPE_CMD_DISABLE_LAB,
+	CPE_CMD_SWAP_BUFFER,
+	CPE_LAB_CFG_SB,
+	CPE_CMD_CANCEL_MEMACCESS,
+	CPE_CMD_PROC_INCOMING_MSG,
+	CPE_CMD_FTM_TEST,
+};
+
+enum cpe_process_result {
+	CPE_PROC_SUCCESS = 0,
+	CPE_PROC_FAILED,
+	CPE_PROC_KILLED,
+	CPE_PROC_QUEUED,
+};
+
+struct cpe_command_node {
+	enum cpe_command command;
+	enum cpe_svc_result result;
+	void *data;
+	struct list_head list;
+};
+
+struct cpe_info {
+	struct list_head main_queue;
+	struct completion cmd_complete;
+	struct completion thread_comp;
+	void *thread_handler;
+	bool stop_thread;
+	struct mutex msg_lock;
+	enum cpe_state state;
+	enum cpe_substate substate;
+	struct list_head client_list;
+	enum cpe_process_result (*cpe_process_command)
+			(struct cpe_command_node *command_node);
+	enum cpe_svc_result (*cpe_cmd_validate)
+				(const struct cpe_info *i,
+				 enum cpe_command command);
+	enum cpe_svc_result (*cpe_start_notification)
+			     (struct cpe_info *i);
+	u32 initialized;
+	struct cpe_svc_tgt_abstraction *tgt;
+	void *pending;
+	void *data;
+	void *client_context;
+	u32 codec_id;
+	struct work_struct clk_plan_work;
+	struct completion core_svc_cmd_compl;
+};
+
+struct cpe_tgt_waiti_info {
+	u8 tgt_waiti_size;
+	u8 *tgt_waiti_data;
+};
+
+struct cpe_svc_tgt_abstraction {
+	enum cpe_svc_result (*tgt_boot) (int debug_mode);
+
+	u32 (*tgt_cpar_init_done) (void);
+
+	u32 (*tgt_is_active) (void);
+
+	enum cpe_svc_result (*tgt_reset) (void);
+
+	enum cpe_svc_result (*tgt_stop)(void);
+
+	enum cpe_svc_result (*tgt_read_mailbox)
+				(u8 *buffer, size_t size);
+
+	enum cpe_svc_result (*tgt_write_mailbox)
+				(u8 *buffer, size_t size);
+
+	enum cpe_svc_result (*tgt_read_ram)
+				(struct cpe_info *c,
+				 struct cpe_svc_mem_segment *data);
+
+	enum cpe_svc_result (*tgt_write_ram)
+				(struct cpe_info *c,
+				const struct cpe_svc_mem_segment *data);
+
+	enum cpe_svc_result (*tgt_route_notification)
+				(enum cpe_svc_module module,
+				 enum cpe_svc_route_dest dest);
+
+	enum cpe_svc_result (*tgt_set_debug_mode) (u32 enable);
+	const struct cpe_svc_hw_cfg *(*tgt_get_cpe_info) (void);
+	enum cpe_svc_result (*tgt_deinit)
+				(struct cpe_svc_tgt_abstraction *param);
+	enum cpe_svc_result (*tgt_voice_tx_lab)
+				(bool);
+	u8 *inbox;
+	u8 *outbox;
+	struct cpe_tgt_waiti_info *tgt_waiti_info;
+};
+
+static enum cpe_svc_result cpe_tgt_tomtom_init(
+	struct cpe_svc_codec_info_v1 *codec_info,
+	struct cpe_svc_tgt_abstraction *param);
+
+static enum cpe_svc_result cpe_tgt_wcd9335_init(
+	struct cpe_svc_codec_info_v1 *codec_info,
+	struct cpe_svc_tgt_abstraction *param);
+
+struct cpe_send_msg {
+	u8 *payload;
+	u32 isobm;
+	u32 address;
+	size_t size;
+};
+
+struct cpe_read_handle {
+	void *registration;
+	struct cpe_info t_info;
+	struct list_head buffers;
+	void *config;
+};
+
+struct generic_notification {
+	void (*notification)
+		(const struct cpe_svc_notification *parameter);
+	void (*cmi_notification)
+		(const struct cmi_api_notification *parameter);
+};
+
+struct cpe_notif_node {
+	struct generic_notification notif;
+	u32 mask;
+	u32 service;
+	const struct cpe_info *context;
+	const char *name;
+	u32 disabled;
+	struct list_head list;
+};
+
+struct cpe_priv {
+	struct cpe_info *cpe_default_handle;
+	void (*cpe_irq_control_callback)(u32 enable);
+	void (*cpe_query_freq_plans_cb)
+		(void *cdc_priv,
+		 struct cpe_svc_cfg_clk_plan *clk_freq);
+	void (*cpe_change_freq_plan_cb)(void *cdc_priv,
+			u32 clk_freq);
+	u32 cpe_msg_buffer;
+	void *cpe_cmi_handle;
+	struct mutex cpe_api_mutex;
+	struct mutex cpe_svc_lock;
+	struct cpe_svc_boot_event cpe_debug_vector;
+	void *cdc_priv;
+};
+
+static struct cpe_priv cpe_d;
+
+static enum cpe_svc_result __cpe_svc_shutdown(void *cpe_handle);
+
+static enum cpe_svc_result cpe_is_command_valid(
+		const struct cpe_info *t_info,
+		enum cpe_command command);
+
+static int cpe_register_read(u32 reg, u8 *val)
+{
+	*(val) = snd_soc_read(cpe_d.cdc_priv, reg);
+	return 0;
+}
+
+static enum cpe_svc_result cpe_update_bits(u32 reg,
+		u32 mask, u32 value)
+{
+	int ret = 0;
+	ret = snd_soc_update_bits(cpe_d.cdc_priv, reg,
+				  mask, value);
+	if (ret < 0)
+		return CPE_SVC_FAILED;
+
+	return CPE_SVC_SUCCESS;
+}
+
+static int cpe_register_write(u32 reg, u32 val)
+{
+	int ret = 0;
+
+	if (reg != TOMTOM_A_SVASS_MEM_BANK &&
+	    reg != WCD9335_CPE_SS_MEM_BANK_0)
+		pr_debug("%s: reg = 0x%x, value = 0x%x\n",
+			  __func__, reg, val);
+
+	ret = snd_soc_write(cpe_d.cdc_priv, reg, val);
+	if (ret < 0)
+		return CPE_SVC_FAILED;
+
+	return CPE_SVC_SUCCESS;
+}
+
+static int cpe_register_write_repeat(u32 reg, u8 *ptr, u32 to_write)
+{
+	struct snd_soc_codec *codec = cpe_d.cdc_priv;
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+	int ret = 0;
+
+	ret = wcd9xxx_slim_write_repeat(wcd9xxx, reg, to_write, ptr);
+	if (ret != 0)
+		pr_err("%s: slim_write_repeat failed\n", __func__);
+
+	if (ret < 0)
+		return CPE_SVC_FAILED;
+
+	return CPE_SVC_SUCCESS;
+}
+
+static bool cpe_register_read_autoinc_supported(void)
+{
+	return true;
+}
+
+
+/* Called under msgq locked context */
+static void cpe_cmd_received(struct cpe_info *t_info)
+{
+	struct cpe_command_node *node = NULL;
+	enum cpe_process_result proc_rc = CPE_PROC_SUCCESS;
+	if (!t_info) {
+		pr_err("%s: Invalid thread info\n",
+			__func__);
+		return;
+	}
+
+	while (!list_empty(&t_info->main_queue)) {
+		if (proc_rc != CPE_PROC_SUCCESS)
+			break;
+		node = list_first_entry(&t_info->main_queue,
+					struct cpe_command_node, list);
+		if (!node)
+			break;
+		list_del(&node->list);
+		proc_rc = t_info->cpe_process_command(node);
+		pr_debug("%s: process command return %d\n",
+			 __func__, proc_rc);
+
+		switch (proc_rc) {
+		case CPE_PROC_SUCCESS:
+			kfree(node);
+			break;
+		case CPE_PROC_FAILED:
+			kfree(node);
+			pr_err("%s: cmd failed\n", __func__);
+			break;
+		case CPE_PROC_KILLED:
+			break;
+		default:
+			list_add(&node->list, &(t_info->main_queue));
+
+		}
+	}
+}
+
+static int cpe_worker_thread(void *context)
+{
+	struct cpe_info *t_info = (struct cpe_info *)context;
+
+	/*
+	 * Thread will run until requested to stop explicitly
+	 * by setting the t_info->stop_thread flag
+	 */
+	while (1) {
+		/* Wait for command to be processed */
+		wait_for_completion(&t_info->cmd_complete);
+
+		CPE_SVC_GRAB_LOCK(&t_info->msg_lock, "msg_lock");
+		cpe_cmd_received(t_info);
+		reinit_completion(&t_info->cmd_complete);
+		/* Check if thread needs to be stopped */
+		if (t_info->stop_thread)
+			goto unlock_and_exit;
+		CPE_SVC_REL_LOCK(&t_info->msg_lock, "msg_lock");
+	};
+
+unlock_and_exit:
+	pr_debug("%s: thread stopped\n", __func__);
+	CPE_SVC_REL_LOCK(&t_info->msg_lock, "msg_lock");
+	complete_and_exit(&t_info->thread_comp, 0);
+}
+
+static void cpe_create_worker_thread(struct cpe_info *t_info)
+{
+	INIT_LIST_HEAD(&t_info->main_queue);
+	init_completion(&t_info->cmd_complete);
+	init_completion(&t_info->thread_comp);
+	t_info->stop_thread = false;
+	t_info->thread_handler = kthread_run(cpe_worker_thread,
+		(void *)t_info, "cpe-worker-thread");
+	pr_debug("%s: Created new worker thread\n",
+		 __func__);
+}
+
+static void cpe_cleanup_worker_thread(struct cpe_info *t_info)
+{
+	if (!t_info->thread_handler) {
+		pr_err("%s: thread not created\n", __func__);
+		return;
+	}
+
+	/*
+	 * Wake up the command handler in case
+	 * it is waiting for an command to be processed.
+	 */
+	CPE_SVC_GRAB_LOCK(&t_info->msg_lock, "msg_lock");
+	t_info->stop_thread = true;
+	complete(&t_info->cmd_complete);
+	CPE_SVC_REL_LOCK(&t_info->msg_lock, "msg_lock");
+
+	/* Wait for the thread to exit */
+	wait_for_completion(&t_info->thread_comp);
+	t_info->thread_handler = NULL;
+
+	pr_debug("%s: Thread cleaned up successfully\n",
+		 __func__);
+}
+
+static enum cpe_svc_result
+cpe_send_cmd_to_thread(struct cpe_info *t_info,
+	enum cpe_command command, void *data,
+	bool high_prio)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+	struct cpe_command_node *cmd = NULL;
+
+	rc = cpe_is_command_valid(t_info, command);
+	if (rc != CPE_SVC_SUCCESS) {
+		pr_err("%s: Invalid command %d\n",
+			__func__, command);
+		return rc;
+	}
+
+	cmd = kzalloc(sizeof(struct cpe_command_node),
+		      GFP_ATOMIC);
+	if (!cmd) {
+		pr_err("%s: No memory for cmd node, size = %zu\n",
+			__func__, sizeof(struct cpe_command_node));
+		return CPE_SVC_NO_MEMORY;
+	}
+
+	cmd->command = command;
+	cmd->data = data;
+
+	CPE_SVC_GRAB_LOCK(&t_info->msg_lock, "msg_lock");
+	if (high_prio)
+		list_add(&(cmd->list),
+			 &(t_info->main_queue));
+	else
+		list_add_tail(&(cmd->list),
+			      &(t_info->main_queue));
+	complete(&t_info->cmd_complete);
+	CPE_SVC_REL_LOCK(&t_info->msg_lock, "msg_lock");
+
+	return rc;
+}
+
+static enum cpe_svc_result cpe_change_state(
+	struct cpe_info *t_info,
+	enum cpe_state state, enum cpe_substate ss)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+
+	if (!t_info)
+		t_info = cpe_d.cpe_default_handle;
+
+	t_info->state = state;
+	t_info->substate = ss;
+
+	pr_debug("%s: current state: %d,%d, new_state: %d,%d\n",
+		 __func__, t_info->state, t_info->substate,
+		 state, ss);
+
+	return rc;
+}
+
+static enum cpe_svc_result
+cpe_is_command_valid(const struct cpe_info *t_info,
+		enum cpe_command command)
+{
+	enum cpe_svc_result rc = CPE_SVC_INVALID_HANDLE;
+	if (t_info && t_info->cpe_cmd_validate)
+		rc = t_info->cpe_cmd_validate(t_info, command);
+	else
+		pr_err("%s: invalid handle or callback\n",
+			__func__);
+	return rc;
+}
+
+static void cpe_notify_client(struct cpe_notif_node *client,
+		struct cpe_svc_notification *payload)
+{
+	if (!client || !payload) {
+		pr_err("%s: invalid client or payload\n",
+			__func__);
+		return;
+	}
+
+	if (!(client->mask & payload->event)) {
+		pr_debug("%s: client mask 0x%x not registered for event 0x%x\n",
+			 __func__, client->mask, payload->event);
+		return;
+	}
+
+	if (client->notif.notification && !client->disabled)
+		client->notif.notification(payload);
+
+	if ((client->mask & CPE_SVC_CMI_MSG) &&
+	     client->notif.cmi_notification)
+		client->notif.cmi_notification(
+			(const struct cmi_api_notification *)payload);
+}
+
+static void cpe_broadcast_notification(const struct cpe_info *t_info,
+		struct cpe_svc_notification *payload)
+{
+	struct cpe_notif_node *n = NULL;
+
+	if (!t_info || !payload) {
+		pr_err("%s: invalid handle\n", __func__);
+		return;
+	}
+
+	pr_debug("%s: notify clients, event = %d\n",
+		 __func__, payload->event);
+	payload->private_data = cpe_d.cdc_priv;
+
+	CPE_SVC_GRAB_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
+	list_for_each_entry(n, &t_info->client_list, list) {
+		if (!(n->mask & CPE_SVC_CMI_MSG)) {
+			cpe_notify_client(n, payload);
+		}
+	}
+	CPE_SVC_REL_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
+}
+
+static void *cpe_register_generic(struct cpe_info *t_info,
+		void notification_callback(
+			const struct cpe_svc_notification *parameter),
+		void cmi_callback(
+			const struct cmi_api_notification *parameter),
+		u32 mask, u32 service, const char *name)
+{
+	struct cpe_notif_node *n = NULL;
+
+	n = kzalloc(sizeof(struct cpe_notif_node),
+		    GFP_KERNEL);
+	if (!n) {
+		pr_err("%s: No memory for notification, size = %zu\n",
+			__func__, sizeof(struct cpe_notif_node));
+		return NULL;
+	}
+	n->mask = mask;
+	n->service = service;
+	n->notif.notification = notification_callback;
+	n->notif.cmi_notification = cmi_callback;
+	n->context = t_info;
+	n->disabled = false;
+	n->name = name;
+
+	CPE_SVC_GRAB_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
+	/* Make sure CPE core service is first */
+	if (service == CMI_CPE_CORE_SERVICE_ID)
+		list_add(&n->list, &t_info->client_list);
+	else
+		list_add_tail(&n->list, &t_info->client_list);
+	CPE_SVC_REL_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
+
+	return n;
+}
+
+static enum cpe_svc_result cpe_deregister_generic(struct cpe_info *t_info,
+		void *reg_handle)
+{
+	struct cpe_notif_node *n = (struct cpe_notif_node *)reg_handle;
+
+	if (!t_info || !reg_handle) {
+		pr_err("%s: invalid handle\n", __func__);
+		return CPE_SVC_INVALID_HANDLE;
+	}
+
+	CPE_SVC_GRAB_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
+	list_del(&(n->list));
+	kfree(reg_handle);
+	CPE_SVC_REL_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
+
+	return CPE_SVC_SUCCESS;
+}
+
+static enum cpe_svc_result cpe_svc_tgt_init(struct cpe_svc_codec_info_v1 *i,
+		struct cpe_svc_tgt_abstraction *abs)
+{
+	if (!i || !abs) {
+		pr_err("%s: Incorrect information provided\n",
+			__func__);
+		return CPE_SVC_FAILED;
+	}
+
+	switch (i->id) {
+	case CPE_SVC_CODEC_TOMTOM:
+		return cpe_tgt_tomtom_init(i, abs);
+	case CPE_SVC_CODEC_WCD9335:
+		return cpe_tgt_wcd9335_init(i, abs);
+	default:
+		pr_err("%s: Codec type %d not supported\n",
+			__func__, i->id);
+		return CPE_SVC_FAILED;
+	}
+
+	return CPE_SVC_SUCCESS;
+}
+
+static void cpe_notify_cmi_client(struct cpe_info *t_info, u8 *payload,
+		enum cpe_svc_result result)
+{
+	struct cpe_notif_node *n = NULL;
+	struct cmi_api_notification notif;
+	struct cmi_hdr *hdr;
+	u8 service = 0;
+
+	if (!t_info || !payload) {
+		pr_err("%s: invalid payload/handle\n",
+			__func__);
+		return;
+	}
+
+	hdr = CMI_GET_HEADER(payload);
+	service = CMI_HDR_GET_SERVICE(hdr);
+
+	notif.event = CPE_SVC_CMI_MSG;
+	notif.result = result;
+	notif.message = payload;
+
+	CPE_SVC_GRAB_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
+	list_for_each_entry(n, &t_info->client_list, list) {
+
+		if ((n->mask & CPE_SVC_CMI_MSG) &&
+		    n->service == service &&
+		    n->notif.cmi_notification) {
+			n->notif.cmi_notification(&notif);
+			break;
+		}
+	}
+	CPE_SVC_REL_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
+}
+
+static void cpe_toggle_irq_notification(struct cpe_info *t_info, u32 value)
+{
+	if (cpe_d.cpe_irq_control_callback)
+		cpe_d.cpe_irq_control_callback(value);
+}
+
+static void cpe_command_cleanup(struct cpe_command_node *command_node)
+{
+	switch (command_node->command) {
+	case CPE_CMD_SEND_MSG:
+	case CPE_CMD_SEND_TRANS_MSG:
+	case CPE_CMD_SEND_MSG_COMPLETE:
+	case CPE_CMD_SHUTDOWN:
+	case CPE_CMD_READ:
+		kfree(command_node->data);
+		command_node->data = NULL;
+		break;
+	default:
+		pr_err("%s: unhandled command\n",
+			__func__);
+		break;
+	}
+}
+
+static enum cpe_svc_result cpe_send_msg_to_inbox(
+		struct cpe_info *t_info, u32 opcode,
+		struct cpe_send_msg *msg)
+{
+	size_t bytes = 0;
+	size_t inbox_size =
+		t_info->tgt->tgt_get_cpe_info()->inbox_size;
+	struct cmi_hdr *hdr;
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+
+	memset(t_info->tgt->inbox, 0, inbox_size);
+	hdr = CMI_GET_HEADER(t_info->tgt->inbox);
+	CMI_HDR_SET_SESSION(hdr, 1);
+	CMI_HDR_SET_SERVICE(hdr, CMI_CPE_CORE_SERVICE_ID);
+	CMI_HDR_SET_VERSION(hdr, CMI_DRIVER_SUPPORTED_VERSION);
+	CMI_HDR_SET_OBM(hdr, CMI_OBM_FLAG_IN_BAND);
+
+	switch (opcode) {
+	case CPE_CORE_SVC_CMD_SHARED_MEM_ALLOC: {
+		struct cmi_core_svc_cmd_shared_mem_alloc *m;
+		CMI_HDR_SET_OPCODE(hdr,
+			CPE_CORE_SVC_CMD_SHARED_MEM_ALLOC);
+		CMI_HDR_SET_PAYLOAD_SIZE(hdr,
+			sizeof(struct cmi_core_svc_cmd_shared_mem_alloc));
+		m = (struct cmi_core_svc_cmd_shared_mem_alloc *)
+			CMI_GET_PAYLOAD(t_info->tgt->inbox);
+		m->size = CPE_MSG_BUFFER_SIZE;
+		pr_debug("send shared mem alloc msg to cpe inbox\n");
+		}
+		break;
+	case CPE_CORE_SVC_CMD_DRAM_ACCESS_REQ:
+		CMI_HDR_SET_OPCODE(hdr,
+			CPE_CORE_SVC_CMD_DRAM_ACCESS_REQ);
+		CMI_HDR_SET_PAYLOAD_SIZE(hdr, 0);
+		pr_debug("%s: Creating DRAM acces request msg\n",
+			 __func__);
+		break;
+
+	case CPE_CMI_BASIC_RSP_OPCODE: {
+		struct cmi_basic_rsp_result *rsp;
+		CMI_HDR_SET_OPCODE(hdr,
+			       CPE_CMI_BASIC_RSP_OPCODE);
+		CMI_HDR_SET_PAYLOAD_SIZE(hdr,
+			sizeof(struct cmi_basic_rsp_result));
+		rsp = (struct cmi_basic_rsp_result *)
+				CMI_GET_PAYLOAD(t_info->tgt->inbox);
+		rsp->status = 0;
+		pr_debug("%s: send basic response\n", __func__);
+		}
+		break;
+
+	default:
+		if (msg->address != 0) {
+			struct cmi_msg_transport *m = NULL;
+			struct cpe_svc_mem_segment mem_seg;
+
+			mem_seg.type = CPE_SVC_DATA_MEM;
+			if (msg->isobm) {
+				struct cmi_obm *obm = (struct cmi_obm *)
+				CMI_GET_PAYLOAD(msg->payload);
+				mem_seg.cpe_addr = obm->mem_handle;
+				mem_seg.data = (u8 *)obm->data_ptr.kvaddr;
+				mem_seg.size = obm->size;
+				t_info->tgt->tgt_write_ram(t_info, &mem_seg);
+			}
+
+			mem_seg.cpe_addr = msg->address;
+			mem_seg.data = msg->payload;
+			mem_seg.size = msg->size;
+			t_info->tgt->tgt_write_ram(t_info, &mem_seg);
+
+			hdr = CMI_GET_HEADER(t_info->tgt->inbox);
+			CMI_HDR_SET_OPCODE(hdr, CMI_MSG_TRANSPORT);
+			m = (struct cmi_msg_transport *)
+				CMI_GET_PAYLOAD(t_info->tgt->inbox);
+			m->addr = msg->address;
+			m->size = msg->size;
+			CMI_HDR_SET_PAYLOAD_SIZE(hdr,
+				sizeof(struct cmi_msg_transport));
+		} else {
+			memcpy(t_info->tgt->inbox, msg->payload,
+			       msg->size);
+		}
+
+		break;
+	}
+
+	pr_debug("%s: sending message to cpe inbox\n",
+		  __func__);
+	bytes = sizeof(struct cmi_hdr);
+	hdr = CMI_GET_HEADER(t_info->tgt->inbox);
+	bytes += CMI_HDR_GET_PAYLOAD_SIZE(hdr);
+	rc = t_info->tgt->tgt_write_mailbox(t_info->tgt->inbox, bytes);
+
+	return rc;
+}
+
+static bool cpe_is_cmd_clk_req(void *cmd)
+{
+	struct cmi_hdr *hdr;
+
+	hdr = CMI_GET_HEADER(cmd);
+
+	if ((CMI_HDR_GET_SERVICE(hdr) ==
+	    CMI_CPE_CORE_SERVICE_ID)) {
+		if (CMI_GET_OPCODE(cmd) ==
+		    CPE_CORE_SVC_CMD_CLK_FREQ_REQUEST)
+			return true;
+	}
+
+	return false;
+}
+
+static enum cpe_svc_result cpe_process_clk_change_req(
+		struct cpe_info *t_info)
+{
+	struct cmi_core_svc_cmd_clk_freq_request *req;
+	req = (struct cmi_core_svc_cmd_clk_freq_request *)
+			CMI_GET_PAYLOAD(t_info->tgt->outbox);
+
+	if (!cpe_d.cpe_change_freq_plan_cb) {
+		pr_err("%s: No support for clk freq change\n",
+			__func__);
+		return CPE_SVC_FAILED;
+	}
+
+	cpe_d.cpe_change_freq_plan_cb(cpe_d.cdc_priv,
+				      req->clk_freq);
+
+	/*send a basic response*/
+	cpe_send_msg_to_inbox(t_info,
+		CPE_CMI_BASIC_RSP_OPCODE, NULL);
+
+	return CPE_SVC_SUCCESS;
+}
+
+static void cpe_process_irq_int(u32 irq,
+		struct cpe_info *t_info)
+{
+	struct cpe_command_node temp_node;
+	struct cpe_send_msg *m;
+	u8 size = 0;
+	bool err_irq = false;
+	struct cmi_hdr *hdr;
+
+	pr_debug("%s: irq = %u\n", __func__, irq);
+
+	if (!t_info) {
+		pr_err("%s: Invalid handle\n",
+			__func__);
+		return;
+	}
+
+	CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+	switch (irq) {
+	case CPE_IRQ_OUTBOX_IRQ:
+		size = t_info->tgt->tgt_get_cpe_info()->outbox_size;
+		t_info->tgt->tgt_read_mailbox(t_info->tgt->outbox, size);
+		break;
+
+	case CPE_IRQ_MEM_ACCESS_ERROR:
+		err_irq = true;
+		cpe_change_state(t_info, CPE_STATE_OFFLINE, CPE_SS_IDLE);
+		break;
+
+	case CPE_IRQ_WDOG_BITE:
+	case CPE_IRQ_RCO_WDOG_INT:
+		err_irq = true;
+		__cpe_svc_shutdown(t_info);
+		break;
+
+	case CPE_IRQ_FLL_LOCK_LOST:
+	default:
+		err_irq = true;
+		break;
+	}
+
+	if (err_irq) {
+		pr_err("%s: CPE error IRQ %u occured\n",
+			__func__, irq);
+		CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+		return;
+	}
+
+	switch (t_info->state) {
+	case CPE_STATE_BOOTING:
+
+		switch (t_info->substate) {
+		case CPE_SS_BOOT:
+			temp_node.command = CPE_CMD_BOOT_INITIALIZE;
+			temp_node.result = CPE_SVC_SUCCESS;
+			t_info->substate = CPE_SS_BOOT_INIT;
+			t_info->cpe_process_command(&temp_node);
+			break;
+
+		case CPE_SS_BOOT_INIT:
+			temp_node.command = CPE_CMD_BOOT_COMPLETE;
+			temp_node.result = CPE_SVC_SUCCESS;
+			t_info->substate = CPE_SS_ONLINE;
+			t_info->cpe_process_command(&temp_node);
+			break;
+
+		default:
+			pr_debug("%s: unhandled substate %d for state %d\n",
+				 __func__, t_info->state, t_info->substate);
+			break;
+		}
+		break;
+
+	case CPE_STATE_SENDING_MSG:
+		hdr = CMI_GET_HEADER(t_info->tgt->outbox);
+		if (CMI_GET_OPCODE(t_info->tgt->outbox) ==
+		    CPE_LSM_SESSION_EVENT_DETECTION_STATUS_V2) {
+			pr_debug("%s: session_id: %u, state: %d,%d, event received\n",
+				 __func__, CMI_HDR_GET_SESSION_ID(hdr),
+				t_info->state, t_info->substate);
+			temp_node.command = CPE_CMD_PROC_INCOMING_MSG;
+			temp_node.data = NULL;
+			t_info->cpe_process_command(&temp_node);
+			break;
+		}
+
+		m = (struct cpe_send_msg *)t_info->pending;
+
+		switch (t_info->substate) {
+		case CPE_SS_MSG_REQUEST_ACCESS:
+			cpe_send_cmd_to_thread(t_info,
+				CPE_CMD_SEND_TRANS_MSG, m, true);
+			break;
+
+		case CPE_SS_MSG_SEND_INBOX:
+			if (cpe_is_cmd_clk_req(t_info->tgt->outbox))
+				cpe_process_clk_change_req(t_info);
+			else
+				cpe_send_cmd_to_thread(t_info,
+					CPE_CMD_SEND_MSG_COMPLETE, m, true);
+			break;
+
+		default:
+			pr_debug("%s: unhandled substate %d for state %d\n",
+				 __func__, t_info->state, t_info->substate);
+			break;
+		}
+		break;
+
+	case CPE_STATE_IDLE:
+		pr_debug("%s: Message received, notifying client\n",
+			 __func__);
+		temp_node.command = CPE_CMD_PROC_INCOMING_MSG;
+		temp_node.data = NULL;
+		t_info->cpe_process_command(&temp_node);
+		break;
+
+	default:
+		pr_debug("%s: unhandled state %d\n",
+			 __func__, t_info->state);
+		break;
+	}
+
+	CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+}
+
+
+static void broacast_boot_failed(void)
+{
+	struct cpe_info *t_info = cpe_d.cpe_default_handle;
+	struct cpe_svc_notification payload;
+
+	payload.event = CPE_SVC_BOOT_FAILED;
+	payload.result = CPE_SVC_FAILED;
+	payload.payload = NULL;
+	if (t_info)
+		payload.private_data =
+			t_info->client_context;
+	cpe_broadcast_notification(t_info, &payload);
+}
+
+static enum cpe_svc_result broadcast_boot_event(
+		struct cpe_info *t_info)
+{
+	struct cpe_svc_notification payload;
+
+	payload.event = CPE_SVC_ONLINE;
+	payload.result = CPE_SVC_SUCCESS;
+	payload.payload = NULL;
+	if (t_info)
+		payload.private_data =
+			t_info->client_context;
+	cpe_broadcast_notification(t_info, &payload);
+
+	return CPE_SVC_SUCCESS;
+}
+
+static enum cpe_process_result cpe_boot_initialize(struct cpe_info *t_info,
+	enum cpe_svc_result *cpe_rc)
+{
+	enum cpe_process_result rc = CPE_SVC_FAILED;
+	struct cpe_svc_notification payload;
+	struct cmi_core_svc_event_system_boot *p = NULL;
+
+	if (CMI_GET_OPCODE(t_info->tgt->outbox) !=
+		CPE_CORE_SVC_EVENT_SYSTEM_BOOT) {
+		broacast_boot_failed();
+		return rc;
+	}
+
+	p = (struct cmi_core_svc_event_system_boot *)
+		CMI_GET_PAYLOAD(t_info->tgt->outbox);
+	if (p->status != CPE_BOOT_SUCCESS) {
+		pr_err("%s: cpe boot failed, status = %d\n",
+			__func__, p->status);
+		broacast_boot_failed();
+		return rc;
+	}
+
+	/* boot was successful */
+	if (p->version ==
+	    CPE_CORE_VERSION_SYSTEM_BOOT_EVENT) {
+		cpe_d.cpe_debug_vector.debug_address =
+				p->sfr_buff_address;
+		cpe_d.cpe_debug_vector.debug_buffer_size =
+				p->sfr_buff_size;
+		cpe_d.cpe_debug_vector.status = p->status;
+		payload.event = CPE_SVC_BOOT;
+		payload.result = CPE_SVC_SUCCESS;
+		payload.payload = (void *)&cpe_d.cpe_debug_vector;
+		payload.private_data = t_info->client_context;
+		cpe_broadcast_notification(t_info, &payload);
+	}
+	cpe_change_state(t_info, CPE_STATE_BOOTING,
+			 CPE_SS_BOOT_INIT);
+	(*cpe_rc) = cpe_send_msg_to_inbox(t_info,
+			CPE_CORE_SVC_CMD_SHARED_MEM_ALLOC, NULL);
+	rc = CPE_PROC_SUCCESS;
+	return rc;
+}
+
+static void cpe_svc_core_cmi_handler(
+		const struct cmi_api_notification *parameter)
+{
+	struct cmi_hdr *hdr;
+
+	if (!parameter)
+		return;
+
+	pr_debug("%s: event = %d\n",
+		 __func__, parameter->event);
+
+	if (parameter->event != CMI_API_MSG)
+		return;
+
+	hdr = (struct cmi_hdr *) parameter->message;
+
+	if (hdr->opcode == CPE_CMI_BASIC_RSP_OPCODE) {
+		struct cmi_basic_rsp_result *result;
+
+		result = (struct cmi_basic_rsp_result *)
+			((u8 *)parameter->message) + (sizeof(*hdr));
+		if (result->status)
+			pr_err("%s: error response, error code = %u\n",
+				__func__, result->status);
+		complete(&cpe_d.cpe_default_handle->core_svc_cmd_compl);
+	}
+}
+
+static void cpe_clk_plan_work(struct work_struct *work)
+{
+	struct cpe_info *t_info = NULL;
+	size_t size = 0;
+	struct cpe_svc_cfg_clk_plan plan;
+	u8 *cmi_msg;
+	struct cmi_hdr *hdr;
+	int rc;
+
+	t_info = container_of(work, struct cpe_info, clk_plan_work);
+	if (!t_info) {
+		pr_err("%s: Invalid handle for cpe_info\n",
+			__func__);
+		return;
+	}
+
+	/* Register the core service */
+	cpe_d.cpe_cmi_handle = cmi_register(
+					cpe_svc_core_cmi_handler,
+					CMI_CPE_CORE_SERVICE_ID);
+
+	/* send the clk plan command */
+	if (!cpe_d.cpe_query_freq_plans_cb) {
+		pr_err("%s: No support for querying clk plans\n",
+			__func__);
+		return;
+	}
+
+	cpe_d.cpe_query_freq_plans_cb(cpe_d.cdc_priv, &plan);
+	size = sizeof(plan.current_clk_feq) +
+		sizeof(plan.num_clk_freqs);
+	size += plan.num_clk_freqs *
+		  sizeof(plan.clk_freqs[0]);
+	cmi_msg = kzalloc(size + sizeof(struct cmi_hdr),
+			  GFP_KERNEL);
+	if (!cmi_msg) {
+		pr_err("%s: no memory for cmi_msg\n",
+			__func__);
+		return;
+	}
+
+	hdr = (struct cmi_hdr *) cmi_msg;
+	CMI_HDR_SET_OPCODE(hdr,
+			   CPE_CORE_SVC_CMD_CFG_CLK_PLAN);
+	CMI_HDR_SET_SERVICE(hdr, CMI_CPE_CORE_SERVICE_ID);
+		CMI_HDR_SET_SESSION(hdr, 1);
+	CMI_HDR_SET_VERSION(hdr, CMI_DRIVER_SUPPORTED_VERSION);
+	CMI_HDR_SET_PAYLOAD_SIZE(hdr, size);
+	memcpy(CMI_GET_PAYLOAD(cmi_msg), &plan,
+	       size);
+	cmi_send_msg(cmi_msg);
+
+	/* Wait for clk plan command to complete */
+	rc = wait_for_completion_timeout(&t_info->core_svc_cmd_compl,
+					 (10 * HZ));
+	if (!rc) {
+		pr_err("%s: clk plan cmd timed out\n",
+			__func__);
+		goto cmd_fail;
+	}
+
+	/* clk plan cmd is successful, send start notification */
+	if (t_info->cpe_start_notification)
+		t_info->cpe_start_notification(t_info);
+	else
+		pr_err("%s: no start notification\n",
+			 __func__);
+
+cmd_fail:
+	kfree(cmi_msg);
+	cmi_deregister(cpe_d.cpe_cmi_handle);
+}
+
+static enum cpe_process_result cpe_boot_complete(
+		struct cpe_info *t_info)
+{
+	struct cmi_core_svc_cmdrsp_shared_mem_alloc *p = NULL;
+
+	if (CMI_GET_OPCODE(t_info->tgt->outbox) !=
+		CPE_CORE_SVC_CMDRSP_SHARED_MEM_ALLOC) {
+		broacast_boot_failed();
+		return CPE_PROC_FAILED;
+	}
+
+	p = (struct cmi_core_svc_cmdrsp_shared_mem_alloc *)
+		CMI_GET_PAYLOAD(t_info->tgt->outbox);
+	cpe_d.cpe_msg_buffer = p->addr;
+
+	if (cpe_d.cpe_msg_buffer == 0) {
+		pr_err("%s: Invalid cpe buffer for message\n",
+			__func__);
+		broacast_boot_failed();
+		return CPE_PROC_FAILED;
+	}
+
+	cpe_change_state(t_info, CPE_STATE_IDLE, CPE_SS_IDLE);
+	cpe_create_worker_thread(t_info);
+
+	if (t_info->codec_id != CPE_SVC_CODEC_TOMTOM) {
+		schedule_work(&t_info->clk_plan_work);
+	} else {
+		if (t_info->cpe_start_notification)
+			t_info->cpe_start_notification(t_info);
+		else
+			pr_err("%s: no start notification\n",
+				__func__);
+	}
+
+	pr_debug("%s: boot complete\n", __func__);
+	return CPE_SVC_SUCCESS;
+}
+
+static enum cpe_process_result cpe_process_send_msg(
+	struct cpe_info *t_info,
+	enum cpe_svc_result *cpe_rc,
+	struct cpe_command_node *command_node)
+{
+	enum cpe_process_result rc = CPE_PROC_SUCCESS;
+	struct cpe_send_msg *m =
+		(struct cpe_send_msg *)command_node->data;
+	u32 size = m->size;
+
+	if (t_info->pending) {
+		pr_debug("%s: message queued\n", __func__);
+		*cpe_rc = CPE_SVC_SUCCESS;
+		return CPE_PROC_QUEUED;
+	}
+
+	pr_debug("%s: Send CMI message, size = %u\n",
+		 __func__, size);
+
+	if (size <= t_info->tgt->tgt_get_cpe_info()->inbox_size) {
+		pr_debug("%s: Msg fits mailbox, size %u\n",
+			 __func__, size);
+		cpe_change_state(t_info, CPE_STATE_SENDING_MSG,
+			CPE_SS_MSG_SEND_INBOX);
+		t_info->pending = m;
+		*cpe_rc = cpe_send_msg_to_inbox(t_info, 0, m);
+	} else if (size < CPE_MSG_BUFFER_SIZE) {
+		m->address = cpe_d.cpe_msg_buffer;
+		pr_debug("%s: Message req CMI mem access\n",
+			 __func__);
+		t_info->pending = m;
+		cpe_change_state(t_info, CPE_STATE_SENDING_MSG,
+			CPE_SS_MSG_REQUEST_ACCESS);
+		*cpe_rc = cpe_send_msg_to_inbox(t_info,
+			CPE_CORE_SVC_CMD_DRAM_ACCESS_REQ, m);
+	} else {
+		pr_debug("%s: Invalid msg size %u\n",
+			 __func__, size);
+		cpe_command_cleanup(command_node);
+		rc = CPE_PROC_FAILED;
+		cpe_change_state(t_info, CPE_STATE_IDLE,
+			CPE_SS_IDLE);
+	}
+
+	return rc;
+}
+
+static enum cpe_process_result cpe_process_incoming(
+		struct cpe_info *t_info)
+{
+	enum cpe_process_result rc = CPE_PROC_FAILED;
+	struct cmi_hdr *hdr;
+
+	hdr = CMI_GET_HEADER(t_info->tgt->outbox);
+
+	if (CMI_HDR_GET_SERVICE(hdr) ==
+	    CMI_CPE_CORE_SERVICE_ID) {
+		pr_debug("%s: core service message received\n",
+			 __func__);
+
+		switch (CMI_GET_OPCODE(t_info->tgt->outbox)) {
+		case CPE_CORE_SVC_CMD_CLK_FREQ_REQUEST:
+			cpe_process_clk_change_req(t_info);
+			rc = CPE_PROC_SUCCESS;
+			break;
+		case CMI_MSG_TRANSPORT:
+			pr_debug("%s: transport msg received\n",
+				 __func__);
+			rc = CPE_PROC_SUCCESS;
+			break;
+		case CPE_CMI_BASIC_RSP_OPCODE:
+			pr_debug("%s: received basic rsp\n",
+				 __func__);
+			rc = CPE_PROC_SUCCESS;
+			break;
+		default:
+			pr_debug("%s: unknown message received\n",
+				 __func__);
+			break;
+		}
+	} else {
+		/* if service id if for a CMI client, notify client */
+		pr_debug("%s: Message received, notifying client\n",
+			 __func__);
+		cpe_notify_cmi_client(t_info,
+			t_info->tgt->outbox, CPE_SVC_SUCCESS);
+		rc = CPE_PROC_SUCCESS;
+	}
+
+	return rc;
+}
+
+static enum cpe_process_result cpe_process_kill_thread(
+	struct cpe_info *t_info,
+	struct cpe_command_node *command_node)
+{
+	struct cpe_svc_notification payload;
+
+	cpe_d.cpe_msg_buffer = 0;
+	payload.result = CPE_SVC_SHUTTING_DOWN;
+	payload.event = CPE_SVC_OFFLINE;
+	payload.payload = NULL;
+	payload.private_data = t_info->client_context;
+	/*
+	 * Make state as offline before broadcasting
+	 * the message to clients.
+	 */
+	cpe_change_state(t_info, CPE_STATE_OFFLINE,
+			 CPE_SS_IDLE);
+	cpe_broadcast_notification(t_info, &payload);
+
+	return CPE_PROC_KILLED;
+}
+
+static enum cpe_process_result cpe_mt_process_cmd(
+		struct cpe_command_node *command_node)
+{
+	struct cpe_info *t_info = cpe_d.cpe_default_handle;
+	enum cpe_svc_result cpe_rc = CPE_SVC_SUCCESS;
+	enum cpe_process_result rc = CPE_PROC_SUCCESS;
+	struct cpe_send_msg *m;
+	struct cmi_hdr *hdr;
+	u8 service = 0;
+	u8 retries = 0;
+
+	if (!t_info || !command_node) {
+		pr_err("%s: Invalid handle/command node\n",
+			__func__);
+		return CPE_PROC_FAILED;
+	}
+
+	pr_debug("%s: cmd = %u\n", __func__, command_node->command);
+
+	cpe_rc = cpe_is_command_valid(t_info, command_node->command);
+
+	if (cpe_rc != CPE_SVC_SUCCESS) {
+		pr_err("%s: Invalid command %d, err = %d\n",
+			__func__, command_node->command, cpe_rc);
+		return CPE_PROC_FAILED;
+	}
+
+	switch (command_node->command) {
+
+	case CPE_CMD_BOOT_INITIALIZE:
+		rc = cpe_boot_initialize(t_info, &cpe_rc);
+		break;
+
+	case CPE_CMD_BOOT_COMPLETE:
+		rc = cpe_boot_complete(t_info);
+		break;
+
+	case CPE_CMD_SEND_MSG:
+		rc = cpe_process_send_msg(t_info, &cpe_rc,
+					  command_node);
+		break;
+
+	case CPE_CMD_SEND_TRANS_MSG:
+		m = (struct cpe_send_msg *)command_node->data;
+
+		while (retries < CPE_SVC_INACTIVE_STATE_RETRIES_MAX) {
+			if (t_info->tgt->tgt_is_active()) {
+				++retries;
+				/* Wait for CPE to be inactive */
+				usleep_range(5000, 5100);
+			} else {
+				break;
+			}
+		}
+
+		pr_debug("%s: cpe inactive after %d attempts\n",
+			 __func__, retries);
+
+		cpe_change_state(t_info, CPE_STATE_SENDING_MSG,
+				CPE_SS_MSG_SEND_INBOX);
+		rc = cpe_send_msg_to_inbox(t_info, 0, m);
+		break;
+
+	case CPE_CMD_SEND_MSG_COMPLETE:
+		hdr = CMI_GET_HEADER(t_info->tgt->outbox);
+		service = CMI_HDR_GET_SERVICE(hdr);
+		pr_debug("%s: msg send success, notifying clients\n",
+			 __func__);
+		cpe_command_cleanup(command_node);
+		t_info->pending = NULL;
+		cpe_change_state(t_info,
+				 CPE_STATE_IDLE, CPE_SS_IDLE);
+		cpe_notify_cmi_client(t_info,
+			t_info->tgt->outbox, CPE_SVC_SUCCESS);
+		break;
+
+	case CPE_CMD_PROC_INCOMING_MSG:
+		rc = cpe_process_incoming(t_info);
+		break;
+
+	case CPE_CMD_KILL_THREAD:
+		rc = cpe_process_kill_thread(t_info, command_node);
+		break;
+
+	default:
+		pr_err("%s: unhandled cpe cmd = %d\n",
+			__func__, command_node->command);
+		break;
+	}
+
+	if (cpe_rc != CPE_SVC_SUCCESS) {
+		pr_err("%s: failed to execute command\n", __func__);
+		if (t_info->pending) {
+			m = (struct cpe_send_msg *)t_info->pending;
+			cpe_notify_cmi_client(t_info, m->payload,
+					      CPE_SVC_FAILED);
+			t_info->pending = NULL;
+		}
+
+		cpe_command_cleanup(command_node);
+		rc = CPE_PROC_FAILED;
+		cpe_change_state(t_info, CPE_STATE_IDLE,
+			CPE_SS_IDLE);
+	}
+
+	return rc;
+}
+
+static enum cpe_svc_result cpe_mt_validate_cmd(
+		const struct cpe_info *t_info,
+		enum cpe_command command)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+
+	if ((t_info == NULL) || t_info->initialized == false) {
+		pr_err("%s: cpe service is not ready\n",
+			__func__);
+		return CPE_SVC_NOT_READY;
+	}
+
+	switch (t_info->state) {
+	case CPE_STATE_UNINITIALIZED:
+	case CPE_STATE_INITIALIZED:
+		switch (command) {
+		case CPE_CMD_RESET:
+		case CPE_CMD_DL_SEGMENT:
+		case CPE_CMD_RAMDUMP:
+		case CPE_CMD_PROCESS_IRQ:
+		case CPE_CMD_KILL_THREAD:
+		case CPE_CMD_DEINITIALIZE:
+		case CPE_CMD_FTM_TEST:
+			rc = CPE_SVC_SUCCESS;
+			break;
+		default:
+			rc = CPE_SVC_NOT_READY;
+			break;
+		}
+		break;
+
+	case CPE_STATE_DOWNLOADING:
+		switch (command) {
+		case CPE_CMD_RESET:
+		case CPE_CMD_DL_SEGMENT:
+		case CPE_CMD_BOOT:
+		case CPE_CMD_FTM_TEST:
+			rc = CPE_SVC_SUCCESS;
+			break;
+		default:
+			rc = CPE_SVC_NOT_READY;
+			break;
+		}
+		break;
+
+	case CPE_STATE_BOOTING:
+		switch (command) {
+		case CPE_CMD_PROCESS_IRQ:
+		case CPE_CMD_BOOT_INITIALIZE:
+		case CPE_CMD_BOOT_COMPLETE:
+		case CPE_CMD_SHUTDOWN:
+			rc = CPE_SVC_SUCCESS;
+			break;
+		case CPE_CMD_FTM_TEST:
+			rc = CPE_SVC_BUSY;
+			break;
+		default:
+			rc = CPE_SVC_NOT_READY;
+			break;
+		}
+		break;
+
+	case CPE_STATE_IDLE:
+		switch (command) {
+		case CPE_CMD_SEND_MSG:
+		case CPE_CMD_SEND_TRANS_MSG:
+		case CPE_CMD_SEND_MSG_COMPLETE:
+		case CPE_CMD_PROCESS_IRQ:
+		case CPE_CMD_RESET:
+		case CPE_CMD_SHUTDOWN:
+		case CPE_CMD_KILL_THREAD:
+		case CPE_CMD_PROC_INCOMING_MSG:
+			rc = CPE_SVC_SUCCESS;
+			break;
+		case CPE_CMD_FTM_TEST:
+			rc = CPE_SVC_BUSY;
+			break;
+		default:
+			rc = CPE_SVC_FAILED;
+			break;
+		}
+		break;
+
+	case CPE_STATE_SENDING_MSG:
+		switch (command) {
+		case CPE_CMD_SEND_MSG:
+		case CPE_CMD_SEND_TRANS_MSG:
+		case CPE_CMD_SEND_MSG_COMPLETE:
+		case CPE_CMD_PROCESS_IRQ:
+		case CPE_CMD_SHUTDOWN:
+		case CPE_CMD_KILL_THREAD:
+		case CPE_CMD_PROC_INCOMING_MSG:
+			rc = CPE_SVC_SUCCESS;
+			break;
+		case CPE_CMD_FTM_TEST:
+			rc = CPE_SVC_BUSY;
+			break;
+		default:
+			rc = CPE_SVC_FAILED;
+			break;
+		}
+		break;
+
+	case CPE_STATE_OFFLINE:
+		switch (command) {
+		case CPE_CMD_RESET:
+		case CPE_CMD_RAMDUMP:
+		case CPE_CMD_KILL_THREAD:
+			rc = CPE_SVC_SUCCESS;
+			break;
+		default:
+			rc = CPE_SVC_NOT_READY;
+			break;
+		}
+		break;
+
+	default:
+		pr_debug("%s: unhandled state %d\n",
+			 __func__, t_info->state);
+		break;
+	}
+
+	if (rc != CPE_SVC_SUCCESS)
+		pr_err("%s: invalid command %d, state = %d\n",
+			__func__, command, t_info->state);
+	return rc;
+}
+
+void *cpe_svc_initialize(
+		void irq_control_callback(u32 enable),
+		const void *codec_info, void *context)
+{
+	struct cpe_info *t_info = NULL;
+	const struct cpe_svc_hw_cfg *cap = NULL;
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+	struct cpe_svc_init_param *init_context =
+		(struct cpe_svc_init_param *) context;
+	void *client_context = NULL;
+
+	if (cpe_d.cpe_default_handle &&
+	    cpe_d.cpe_default_handle->initialized == true)
+		return (void *)cpe_d.cpe_default_handle;
+	cpe_d.cpe_query_freq_plans_cb = NULL;
+	cpe_d.cpe_change_freq_plan_cb = NULL;
+
+	if (context) {
+		client_context = init_context->context;
+		switch (init_context->version) {
+		case CPE_SVC_INIT_PARAM_V1:
+			cpe_d.cpe_query_freq_plans_cb =
+				init_context->query_freq_plans_cb;
+			cpe_d.cpe_change_freq_plan_cb =
+				init_context->change_freq_plan_cb;
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (!cpe_d.cpe_default_handle) {
+		cpe_d.cpe_default_handle = kzalloc(sizeof(struct cpe_info),
+					     GFP_KERNEL);
+		if (!cpe_d.cpe_default_handle) {
+			pr_err("%s: no memory for cpe handle, size = %zu\n",
+				__func__, sizeof(struct cpe_info));
+			goto err_register;
+		}
+
+		memset(cpe_d.cpe_default_handle, 0,
+		       sizeof(struct cpe_info));
+	}
+
+	t_info = cpe_d.cpe_default_handle;
+	t_info->client_context = client_context;
+
+	INIT_LIST_HEAD(&t_info->client_list);
+	cpe_d.cdc_priv = client_context;
+	INIT_WORK(&t_info->clk_plan_work, cpe_clk_plan_work);
+	init_completion(&t_info->core_svc_cmd_compl);
+
+	t_info->tgt = kzalloc(sizeof(struct cpe_svc_tgt_abstraction),
+			      GFP_KERNEL);
+	if (!t_info->tgt) {
+		pr_err("%s: target allocation failed, size = %zu\n",
+			__func__,
+			sizeof(struct cpe_svc_tgt_abstraction));
+		goto err_tgt_alloc;
+	}
+	t_info->codec_id =
+		((struct cpe_svc_codec_info_v1 *) codec_info)->id;
+
+	rc = cpe_svc_tgt_init((struct cpe_svc_codec_info_v1 *)codec_info,
+			t_info->tgt);
+
+	if (rc != CPE_SVC_SUCCESS) {
+		pr_err("%s: target initialization failed, err = %d\n",
+			__func__, rc);
+		goto err_tgt_init;
+	}
+
+	cap = t_info->tgt->tgt_get_cpe_info();
+
+	memset(t_info->tgt->outbox, 0, cap->outbox_size);
+	memset(t_info->tgt->inbox, 0, cap->inbox_size);
+	mutex_init(&t_info->msg_lock);
+	cpe_d.cpe_irq_control_callback = irq_control_callback;
+	t_info->cpe_process_command = cpe_mt_process_cmd;
+	t_info->cpe_cmd_validate = cpe_mt_validate_cmd;
+	t_info->cpe_start_notification = broadcast_boot_event;
+	mutex_init(&cpe_d.cpe_api_mutex);
+	mutex_init(&cpe_d.cpe_svc_lock);
+	pr_debug("%s: cpe services initialized\n", __func__);
+	t_info->state = CPE_STATE_INITIALIZED;
+	t_info->initialized = true;
+
+	return t_info;
+
+err_tgt_init:
+	kfree(t_info->tgt);
+
+err_tgt_alloc:
+	kfree(cpe_d.cpe_default_handle);
+	cpe_d.cpe_default_handle = NULL;
+
+err_register:
+	return NULL;
+}
+
+enum cpe_svc_result cpe_svc_deinitialize(void *cpe_handle)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+	struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
+
+	if (!t_info)
+		t_info = cpe_d.cpe_default_handle;
+
+	rc = cpe_is_command_valid(t_info, CPE_CMD_DEINITIALIZE);
+
+	if (rc != CPE_SVC_SUCCESS) {
+		pr_err("%s: Invalid command %d\n",
+			__func__, CPE_CMD_DEINITIALIZE);
+		return rc;
+	}
+
+	if (cpe_d.cpe_default_handle == t_info)
+		cpe_d.cpe_default_handle = NULL;
+
+	t_info->tgt->tgt_deinit(t_info->tgt);
+	cpe_change_state(t_info, CPE_STATE_UNINITIALIZED,
+			 CPE_SS_IDLE);
+	mutex_destroy(&t_info->msg_lock);
+	kfree(t_info->tgt);
+	kfree(t_info);
+	mutex_destroy(&cpe_d.cpe_api_mutex);
+	mutex_destroy(&cpe_d.cpe_svc_lock);
+
+	return rc;
+}
+
+void *cpe_svc_register(void *cpe_handle,
+		void (*notification_callback)
+			(const struct cpe_svc_notification *parameter),
+		u32 mask, const char *name)
+{
+	void *reg_handle;
+
+	CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+	if (!cpe_d.cpe_default_handle) {
+		cpe_d.cpe_default_handle = kzalloc(sizeof(struct cpe_info),
+					     GFP_KERNEL);
+		if (!cpe_d.cpe_default_handle) {
+			pr_err("%s: no_mem for cpe handle, sz = %zu\n",
+				__func__, sizeof(struct cpe_info));
+			CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+			return NULL;
+		}
+
+		memset(cpe_d.cpe_default_handle, 0,
+			sizeof(struct cpe_info));
+	}
+
+	if (!cpe_handle)
+		cpe_handle = cpe_d.cpe_default_handle;
+
+	reg_handle = cpe_register_generic((struct cpe_info *)cpe_handle,
+					   notification_callback,
+					   NULL,
+					   mask, CPE_NO_SERVICE, name);
+	CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+
+	return reg_handle;
+}
+
+enum cpe_svc_result cpe_svc_deregister(void *cpe_handle, void *reg_handle)
+{
+	enum cpe_svc_result rc;
+
+	CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+	if (!cpe_handle)
+		cpe_handle = cpe_d.cpe_default_handle;
+
+	rc = cpe_deregister_generic((struct cpe_info *)cpe_handle,
+				    reg_handle);
+	CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+
+	return rc;
+}
+
+enum cpe_svc_result cpe_svc_download_segment(void *cpe_handle,
+	const struct cpe_svc_mem_segment *segment)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+	struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
+
+	CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+	if (!t_info)
+		t_info = cpe_d.cpe_default_handle;
+
+	rc = cpe_is_command_valid(t_info, CPE_CMD_DL_SEGMENT);
+
+	if (rc != CPE_SVC_SUCCESS) {
+		pr_err("%s: cmd validation fail, cmd = %d\n",
+			__func__, CPE_CMD_DL_SEGMENT);
+		CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+		return rc;
+	}
+
+	cpe_toggle_irq_notification(t_info, false);
+	t_info->state = CPE_STATE_DOWNLOADING;
+	t_info->substate = CPE_SS_DL_DOWNLOADING;
+	rc = t_info->tgt->tgt_write_ram(t_info, segment);
+	cpe_toggle_irq_notification(t_info, true);
+	CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+
+	return rc;
+}
+
+enum cpe_svc_result cpe_svc_boot(void *cpe_handle, int debug_mode)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+	struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
+
+	CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+	if (!t_info)
+		t_info = cpe_d.cpe_default_handle;
+
+	rc = cpe_is_command_valid(t_info, CPE_CMD_BOOT);
+
+	if (rc != CPE_SVC_SUCCESS) {
+		pr_err("%s: cmd validation fail, cmd = %d\n",
+			__func__, CPE_CMD_BOOT);
+		CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+		return rc;
+	}
+
+	if (rc == CPE_SVC_SUCCESS) {
+		t_info->tgt->tgt_boot(debug_mode);
+		t_info->state = CPE_STATE_BOOTING;
+		t_info->substate = CPE_SS_BOOT;
+		pr_debug("%s: cpe service booting\n",
+			 __func__);
+	}
+
+	CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+	return rc;
+}
+
+enum cpe_svc_result cpe_svc_process_irq(void *cpe_handle, u32 cpe_irq)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+	struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
+
+	if (!t_info)
+		t_info = cpe_d.cpe_default_handle;
+
+	cpe_toggle_irq_notification(t_info, false);
+	cpe_process_irq_int(cpe_irq, t_info);
+	cpe_toggle_irq_notification(t_info, true);
+
+	return rc;
+}
+
+enum cpe_svc_result cpe_svc_route_notification(void *cpe_handle,
+		enum cpe_svc_module module, enum cpe_svc_route_dest dest)
+{
+	struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
+	enum cpe_svc_result rc = CPE_SVC_NOT_READY;
+
+	CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+	if (!t_info)
+		t_info = cpe_d.cpe_default_handle;
+
+	if (t_info->tgt)
+		rc = t_info->tgt->tgt_route_notification(module, dest);
+
+	CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+	return rc;
+}
+
+static enum cpe_svc_result __cpe_svc_shutdown(void *cpe_handle)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+	struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
+	struct cpe_command_node *n = NULL;
+	struct cpe_command_node kill_cmd;
+
+	if (!t_info)
+		t_info = cpe_d.cpe_default_handle;
+
+	rc = cpe_is_command_valid(t_info, CPE_CMD_SHUTDOWN);
+
+	if (rc != CPE_SVC_SUCCESS) {
+		pr_err("%s: cmd validation fail, cmd = %d\n",
+			__func__, CPE_CMD_SHUTDOWN);
+		return rc;
+	}
+
+	while (!list_empty(&t_info->main_queue)) {
+		n = list_first_entry(&t_info->main_queue,
+				     struct cpe_command_node, list);
+
+		if (n->command == CPE_CMD_SEND_MSG) {
+			cpe_notify_cmi_client(t_info, (u8 *)n->data,
+				CPE_SVC_SHUTTING_DOWN);
+		}
+		/*
+		 * Since command cannot be processed,
+		 * delete it from the list and perform cleanup
+		 */
+		list_del(&n->list);
+		cpe_command_cleanup(n);
+		kfree(n);
+	}
+
+	pr_debug("%s: cpe service OFFLINE state\n", __func__);
+
+	t_info->state = CPE_STATE_OFFLINE;
+	t_info->substate = CPE_SS_IDLE;
+
+	memset(&kill_cmd, 0, sizeof(kill_cmd));
+	kill_cmd.command = CPE_CMD_KILL_THREAD;
+
+	if (t_info->pending) {
+		struct cpe_send_msg *m =
+			(struct cpe_send_msg *)t_info->pending;
+		cpe_notify_cmi_client(t_info, m->payload,
+			CPE_SVC_SHUTTING_DOWN);
+		kfree(t_info->pending);
+		t_info->pending = NULL;
+	}
+
+	cpe_cleanup_worker_thread(t_info);
+	t_info->cpe_process_command(&kill_cmd);
+
+	return rc;
+}
+
+enum cpe_svc_result cpe_svc_shutdown(void *cpe_handle)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+
+	CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+	rc = __cpe_svc_shutdown(cpe_handle);
+	CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+	return rc;
+}
+
+enum cpe_svc_result cpe_svc_reset(void *cpe_handle)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+	struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
+
+	CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+	if (!t_info)
+		t_info = cpe_d.cpe_default_handle;
+
+	rc = cpe_is_command_valid(t_info, CPE_CMD_RESET);
+
+	if (rc != CPE_SVC_SUCCESS) {
+		pr_err("%s: cmd validation fail, cmd = %d\n",
+			__func__, CPE_CMD_RESET);
+		CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+		return rc;
+	}
+
+	if (t_info && t_info->tgt) {
+		rc = t_info->tgt->tgt_reset();
+		pr_debug("%s: cpe services in INITIALIZED state\n",
+			 __func__);
+		t_info->state = CPE_STATE_INITIALIZED;
+		t_info->substate = CPE_SS_IDLE;
+	}
+	CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+
+	return rc;
+}
+
+enum cpe_svc_result cpe_svc_ramdump(void *cpe_handle,
+		struct cpe_svc_mem_segment *buffer)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+	struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
+
+	CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+	if (!t_info)
+		t_info = cpe_d.cpe_default_handle;
+
+	rc = cpe_is_command_valid(t_info, CPE_CMD_RAMDUMP);
+	if (rc != CPE_SVC_SUCCESS) {
+		pr_err("%s: cmd validation fail, cmd = %d\n",
+			__func__, CPE_CMD_RAMDUMP);
+		CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+		return rc;
+	}
+
+	if (t_info->tgt) {
+		rc = t_info->tgt->tgt_read_ram(t_info, buffer);
+	} else {
+		pr_err("%s: cpe service not ready\n", __func__);
+		rc = CPE_SVC_NOT_READY;
+	}
+	CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+
+	return rc;
+}
+
+enum cpe_svc_result cpe_svc_set_debug_mode(void *cpe_handle, u32 mode)
+{
+	struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
+	enum cpe_svc_result rc = CPE_SVC_INVALID_HANDLE;
+
+	CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+	if (!t_info)
+		t_info = cpe_d.cpe_default_handle;
+
+	if (t_info->tgt)
+		rc = t_info->tgt->tgt_set_debug_mode(mode);
+	CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+
+	return rc;
+}
+
+const struct cpe_svc_hw_cfg *cpe_svc_get_hw_cfg(void *cpe_handle)
+{
+	struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
+
+	if (!t_info)
+		t_info = cpe_d.cpe_default_handle;
+
+	if (t_info->tgt)
+		return t_info->tgt->tgt_get_cpe_info();
+
+	return NULL;
+}
+
+void *cmi_register(
+		void notification_callback(
+			const struct cmi_api_notification *parameter),
+		u32 service)
+{
+	void *reg_handle = NULL;
+
+	CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+	reg_handle = cpe_register_generic(cpe_d.cpe_default_handle,
+			NULL,
+			notification_callback,
+			(CPE_SVC_CMI_MSG | CPE_SVC_OFFLINE |
+			 CPE_SVC_ONLINE),
+			service,
+			"CMI_CLIENT");
+	CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+
+	return reg_handle;
+}
+
+enum cmi_api_result cmi_deregister(void *reg_handle)
+{
+	u32 clients = 0;
+	struct cpe_notif_node *n = NULL;
+	enum cmi_api_result rc = CMI_API_SUCCESS;
+	struct cpe_svc_notification payload;
+
+	CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+	rc = (enum cmi_api_result) cpe_deregister_generic(
+		cpe_d.cpe_default_handle, reg_handle);
+
+	CPE_SVC_GRAB_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
+	list_for_each_entry(n, &cpe_d.cpe_default_handle->client_list, list) {
+		if (n->mask & CPE_SVC_CMI_MSG)
+			clients++;
+	}
+	CPE_SVC_REL_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
+
+	if (clients == 0) {
+		payload.event = CPE_SVC_CMI_CLIENTS_DEREG;
+		payload.payload = NULL;
+		payload.result = CPE_SVC_SUCCESS;
+		cpe_broadcast_notification(cpe_d.cpe_default_handle, &payload);
+	}
+
+	CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+	return rc;
+}
+
+enum cmi_api_result cmi_send_msg(void *message)
+{
+	enum cmi_api_result rc = CMI_API_SUCCESS;
+	struct cpe_send_msg *msg = NULL;
+	struct cmi_hdr *hdr;
+
+	CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+	hdr = CMI_GET_HEADER(message);
+	msg = kzalloc(sizeof(struct cpe_send_msg),
+		      GFP_ATOMIC);
+	if (!msg) {
+		pr_err("%s: no memory for cmi msg, sz = %zu\n",
+			__func__, sizeof(struct cpe_send_msg));
+		CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+		return CPE_SVC_NO_MEMORY;
+	}
+
+	if (CMI_HDR_GET_OBM_FLAG(hdr) == CMI_OBM_FLAG_OUT_BAND)
+		msg->isobm = 1;
+	else
+		msg->isobm = 0;
+
+	msg->size = sizeof(struct cmi_hdr) +
+			CMI_HDR_GET_PAYLOAD_SIZE(hdr);
+
+	msg->payload = kzalloc(msg->size, GFP_ATOMIC);
+	if (!msg->payload) {
+		pr_err("%s: no memory for cmi payload, sz = %zd\n",
+			__func__, msg->size);
+		kfree(msg);
+		CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+		return CPE_SVC_NO_MEMORY;
+	}
+
+	msg->address = 0;
+	memcpy((void *)msg->payload, message, msg->size);
+
+	rc = (enum cmi_api_result) cpe_send_cmd_to_thread(
+			cpe_d.cpe_default_handle,
+			CPE_CMD_SEND_MSG,
+			(void *)msg, false);
+
+	if (rc != 0) {
+		pr_err("%s: Failed to queue message\n", __func__);
+		kfree(msg->payload);
+		kfree(msg);
+	}
+
+	CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+	return rc;
+}
+
+enum cpe_svc_result cpe_svc_ftm_test(void *cpe_handle, u32 *status)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+	struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
+	struct cpe_svc_mem_segment backup_seg;
+	struct cpe_svc_mem_segment waiti_seg;
+	u8 *backup_data = NULL;
+
+	CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+	if (!t_info)
+		t_info = cpe_d.cpe_default_handle;
+
+	rc = cpe_is_command_valid(t_info, CPE_CMD_FTM_TEST);
+	if (rc != CPE_SVC_SUCCESS) {
+		pr_err("%s: cmd validation fail, cmd = %d\n",
+			__func__, CPE_CMD_FTM_TEST);
+		goto fail_cmd;
+	}
+
+	if (t_info && t_info->tgt) {
+		backup_data = kzalloc(
+				t_info->tgt->tgt_waiti_info->tgt_waiti_size,
+				GFP_KERNEL);
+
+		/* CPE reset */
+		rc = t_info->tgt->tgt_reset();
+		if (rc != CPE_SVC_SUCCESS) {
+			pr_err("%s: CPE reset fail! err = %d\n",
+				__func__, rc);
+			goto err_return;
+		}
+
+		/* Back up the 4 byte IRAM data first */
+		backup_seg.type = CPE_SVC_INSTRUCTION_MEM;
+		backup_seg.cpe_addr =
+			t_info->tgt->tgt_get_cpe_info()->IRAM_offset;
+		backup_seg.size = t_info->tgt->tgt_waiti_info->tgt_waiti_size;
+		backup_seg.data = backup_data;
+
+		pr_debug("%s: Backing up IRAM data from CPE\n",
+			__func__);
+
+		rc = t_info->tgt->tgt_read_ram(t_info, &backup_seg);
+		if (rc != CPE_SVC_SUCCESS) {
+			pr_err("%s: Fail to backup CPE IRAM data, err = %d\n",
+				__func__, rc);
+			goto err_return;
+		}
+
+		pr_debug("%s: Complete backing up IRAM data from CPE\n",
+			__func__);
+
+		/* Write the WAITI instruction data */
+		waiti_seg.type = CPE_SVC_INSTRUCTION_MEM;
+		waiti_seg.cpe_addr =
+			t_info->tgt->tgt_get_cpe_info()->IRAM_offset;
+		waiti_seg.size = t_info->tgt->tgt_waiti_info->tgt_waiti_size;
+		waiti_seg.data = t_info->tgt->tgt_waiti_info->tgt_waiti_data;
+
+		rc = t_info->tgt->tgt_write_ram(t_info, &waiti_seg);
+		if (rc != CPE_SVC_SUCCESS) {
+			pr_err("%s: Fail to write the WAITI data, err = %d\n",
+				__func__, rc);
+			goto restore_iram;
+		}
+
+		/* Boot up cpe to execute the WAITI instructions */
+		rc = t_info->tgt->tgt_boot(1);
+		if (rc != CPE_SVC_SUCCESS) {
+			pr_err("%s: Fail to boot CPE, err = %d\n",
+				__func__, rc);
+			goto reset;
+		}
+
+		/*
+		 * 1ms delay is suggested by the hw team to
+		 * wait for cpe to boot up.
+		 */
+		usleep_range(1000, 1100);
+
+		/* Check if the cpe init is done after executing the WAITI */
+		*status = t_info->tgt->tgt_cpar_init_done();
+
+reset:
+		/* Set the cpe back to reset state */
+		rc = t_info->tgt->tgt_reset();
+		if (rc != CPE_SVC_SUCCESS) {
+			pr_err("%s: CPE reset fail! err = %d\n",
+				__func__, rc);
+			goto restore_iram;
+		}
+
+restore_iram:
+		/* Restore the IRAM 4 bytes data */
+		rc = t_info->tgt->tgt_write_ram(t_info, &backup_seg);
+		if (rc != CPE_SVC_SUCCESS) {
+			pr_err("%s: Fail to restore the IRAM data, err = %d\n",
+				__func__, rc);
+			goto err_return;
+		}
+	}
+
+err_return:
+	kfree(backup_data);
+fail_cmd:
+	CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
+	return rc;
+}
+
+static enum cpe_svc_result cpe_tgt_tomtom_boot(int debug_mode)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+
+	if (!debug_mode)
+		rc = cpe_update_bits(TOMTOM_A_SVASS_CPAR_WDOG_CFG,
+				     0x3F, 0x31);
+	else
+		pr_info("%s: CPE in debug mode, WDOG disabled\n",
+			__func__);
+
+	rc = cpe_update_bits(TOMTOM_A_SVASS_CLKRST_CTL,
+			     0x02, 0x00);
+	rc = cpe_update_bits(TOMTOM_A_SVASS_CLKRST_CTL,
+			     0x0C, 0x04);
+	rc = cpe_update_bits(TOMTOM_A_SVASS_CPAR_CFG,
+			     0x01, 0x01);
+
+	return rc;
+}
+
+static u32 cpe_tgt_tomtom_is_cpar_init_done(void)
+{
+	u8 status = 0;
+	cpe_register_read(TOMTOM_A_SVASS_STATUS, &status);
+	return status & 0x01;
+}
+
+static u32 cpe_tgt_tomtom_is_active(void)
+{
+	u8 status = 0;
+	cpe_register_read(TOMTOM_A_SVASS_STATUS, &status);
+	return status & 0x04;
+}
+
+static enum cpe_svc_result cpe_tgt_tomtom_reset(void)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+
+	rc = cpe_update_bits(TOMTOM_A_SVASS_CPAR_WDOG_CFG,
+			     0x30, 0x00);
+
+	rc = cpe_update_bits(TOMTOM_A_SVASS_CPAR_CFG,
+			     0x01, 0x00);
+	rc = cpe_update_bits(TOMTOM_A_MEM_LEAKAGE_CTL,
+			     0x07, 0x03);
+	rc = cpe_update_bits(TOMTOM_A_SVASS_CLKRST_CTL,
+			     0x08, 0x08);
+	rc = cpe_update_bits(TOMTOM_A_SVASS_CLKRST_CTL,
+			     0x02, 0x02);
+	return rc;
+}
+
+enum cpe_svc_result cpe_tgt_tomtom_voicetx(bool enable)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+	u8 val = 0;
+
+	if (enable)
+		val = 0x02;
+	else
+		val = 0x00;
+	rc = cpe_update_bits(TOMTOM_A_SVASS_CFG,
+			     0x02, val);
+	val = 0;
+	cpe_register_read(TOMTOM_A_SVASS_CFG, &val);
+	return rc;
+}
+
+enum cpe_svc_result cpe_svc_toggle_lab(void *cpe_handle, bool enable)
+{
+
+	struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
+
+	if (!t_info)
+		t_info = cpe_d.cpe_default_handle;
+
+	if (t_info->tgt)
+		return t_info->tgt->tgt_voice_tx_lab(enable);
+	else
+		return CPE_SVC_INVALID_HANDLE;
+}
+
+static enum cpe_svc_result cpe_tgt_tomtom_read_mailbox(u8 *buffer,
+	size_t size)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+	u32 cnt = 0;
+
+	if (size >= TOMTOM_A_SVASS_SPE_OUTBOX_SIZE)
+		size = TOMTOM_A_SVASS_SPE_OUTBOX_SIZE - 1;
+	for (cnt = 0; (cnt < size) && (rc == CPE_SVC_SUCCESS); cnt++) {
+		rc = cpe_register_read(TOMTOM_A_SVASS_SPE_OUTBOX(cnt),
+			&(buffer[cnt]));
+	}
+	return rc;
+}
+
+static enum cpe_svc_result cpe_tgt_tomtom_write_mailbox(u8 *buffer,
+	size_t size)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+	u32 cnt = 0;
+
+	if (size >= TOMTOM_A_SVASS_SPE_INBOX_SIZE)
+		size = TOMTOM_A_SVASS_SPE_INBOX_SIZE - 1;
+	for (cnt = 0; (cnt < size) && (rc == CPE_SVC_SUCCESS); cnt++) {
+		rc = cpe_register_write(TOMTOM_A_SVASS_SPE_INBOX(cnt),
+			buffer[cnt]);
+	}
+
+	if (rc == CPE_SVC_SUCCESS)
+		rc = cpe_register_write(TOMTOM_A_SVASS_SPE_INBOX_TRG, 1);
+
+	return rc;
+}
+
+static enum cpe_svc_result cpe_get_mem_addr(struct cpe_info *t_info,
+		const struct cpe_svc_mem_segment *mem_seg,
+		u32 *addr, u8 *mem)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+	u32 offset, mem_sz, address;
+	u8 mem_type;
+
+	switch (mem_seg->type) {
+
+	case CPE_SVC_DATA_MEM:
+		mem_type = MEM_ACCESS_DRAM_VAL;
+		offset = TOMTOM_A_SVASS_SPE_DRAM_OFFSET;
+		mem_sz = TOMTOM_A_SVASS_SPE_DRAM_SIZE;
+		break;
+
+	case CPE_SVC_INSTRUCTION_MEM:
+		mem_type = MEM_ACCESS_IRAM_VAL;
+		offset = TOMTOM_A_SVASS_SPE_IRAM_OFFSET;
+		mem_sz = TOMTOM_A_SVASS_SPE_IRAM_SIZE;
+		break;
+
+	default:
+		pr_err("%s: Invalid mem type = %u\n",
+			__func__, mem_seg->type);
+		return CPE_SVC_INVALID_HANDLE;
+	}
+
+	if (mem_seg->cpe_addr < offset) {
+		pr_err("%s: Invalid addr %x for mem type %u\n",
+			__func__, mem_seg->cpe_addr, mem_type);
+			return CPE_SVC_INVALID_HANDLE;
+	}
+
+	address = mem_seg->cpe_addr - offset;
+	if (address + mem_seg->size > mem_sz) {
+		pr_err("%s: wrong size %zu, start adress %x, mem_type %u\n",
+			__func__, mem_seg->size, address, mem_type);
+		return CPE_SVC_INVALID_HANDLE;
+	}
+
+	(*addr) = address;
+	(*mem) = mem_type;
+
+	return rc;
+}
+
+static enum cpe_svc_result cpe_tgt_tomtom_read_RAM(struct cpe_info *t_info,
+		struct cpe_svc_mem_segment *mem_seg)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+	u8 mem_reg_val = 0;
+	u32 cnt = 0;
+	bool autoinc;
+	u8 mem = MEM_ACCESS_NONE_VAL;
+	u32 addr = 0;
+	u32 ptr_update = true;
+
+	if (!mem_seg) {
+		pr_err("%s: Invalid mem segment\n",
+			__func__);
+		return CPE_SVC_INVALID_HANDLE;
+	}
+
+	rc = cpe_get_mem_addr(t_info, mem_seg, &addr, &mem);
+
+	if (rc != CPE_SVC_SUCCESS) {
+		pr_err("%s: Cannot obtain address, mem_type %u\n",
+			__func__, mem_seg->type);
+		return rc;
+	}
+
+	rc = cpe_register_write(TOMTOM_A_SVASS_MEM_CTL, 0);
+	autoinc = cpe_register_read_autoinc_supported();
+	if (autoinc)
+		mem_reg_val |= 0x04;
+
+	mem_reg_val |= 0x08;
+	mem_reg_val |= mem;
+
+	do {
+		if (!autoinc || ptr_update) {
+			rc = cpe_register_write(TOMTOM_A_SVASS_MEM_PTR0,
+				(addr & 0xFF));
+			rc = cpe_register_write(TOMTOM_A_SVASS_MEM_PTR1,
+				((addr >> 8) & 0xFF));
+			rc = cpe_register_write(TOMTOM_A_SVASS_MEM_PTR2,
+				((addr >> 16) & 0xFF));
+
+			rc = cpe_register_write(TOMTOM_A_SVASS_MEM_CTL,
+						mem_reg_val);
+
+			ptr_update = false;
+		}
+		rc = cpe_register_read(TOMTOM_A_SVASS_MEM_BANK,
+			&mem_seg->data[cnt]);
+
+		if (!autoinc)
+			rc = cpe_register_write(TOMTOM_A_SVASS_MEM_CTL, 0);
+	} while (++cnt < mem_seg->size);
+
+	rc = cpe_register_write(TOMTOM_A_SVASS_MEM_CTL, 0);
+
+	return rc;
+}
+
+static enum cpe_svc_result cpe_tgt_tomtom_write_RAM(struct cpe_info *t_info,
+		const struct cpe_svc_mem_segment *mem_seg)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+	u8 mem_reg_val = 0;
+	u8 mem = MEM_ACCESS_NONE_VAL;
+	u32 addr = 0;
+	u8 *temp_ptr = NULL;
+	u32 temp_size = 0;
+	bool autoinc;
+
+	if (!mem_seg) {
+		pr_err("%s: Invalid mem segment\n",
+			__func__);
+		return CPE_SVC_INVALID_HANDLE;
+	}
+
+	rc = cpe_get_mem_addr(t_info, mem_seg, &addr, &mem);
+
+	if (rc != CPE_SVC_SUCCESS) {
+		pr_err("%s: Cannot obtain address, mem_type %u\n",
+			__func__, mem_seg->type);
+		return rc;
+	}
+
+	autoinc = cpe_register_read_autoinc_supported();
+	if (autoinc)
+		mem_reg_val |= 0x04;
+	mem_reg_val |= mem;
+
+	rc = cpe_update_bits(TOMTOM_A_SVASS_MEM_CTL,
+			     0x0F, mem_reg_val);
+
+	rc = cpe_register_write(TOMTOM_A_SVASS_MEM_PTR0,
+				(addr & 0xFF));
+	rc = cpe_register_write(TOMTOM_A_SVASS_MEM_PTR1,
+				((addr >> 8) & 0xFF));
+
+	rc = cpe_register_write(TOMTOM_A_SVASS_MEM_PTR2,
+				((addr >> 16) & 0xFF));
+
+	temp_size = 0;
+	temp_ptr = mem_seg->data;
+
+	while (temp_size <= mem_seg->size) {
+		u32 to_write = (mem_seg->size >= temp_size+CHUNK_SIZE)
+			? CHUNK_SIZE : (mem_seg->size-temp_size);
+
+		if (t_info->state == CPE_STATE_OFFLINE) {
+			pr_err("%s: CPE is offline\n", __func__);
+			return CPE_SVC_FAILED;
+		}
+
+		cpe_register_write_repeat(TOMTOM_A_SVASS_MEM_BANK,
+			temp_ptr, to_write);
+		temp_size += CHUNK_SIZE;
+		temp_ptr += CHUNK_SIZE;
+	}
+
+	rc = cpe_register_write(TOMTOM_A_SVASS_MEM_CTL, 0);
+	return rc;
+}
+
+static enum cpe_svc_result cpe_tgt_tomtom_route_notification(
+		enum cpe_svc_module module,
+		enum cpe_svc_route_dest dest)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+	u8 ctl_reg_val = 0;
+
+	switch (module) {
+	case CPE_SVC_LISTEN_PROC:
+		switch (dest) {
+		case CPE_SVC_EXTERNAL:
+			ctl_reg_val = LISTEN_CTL_MSM_VAL;
+			break;
+		case CPE_SVC_INTERNAL:
+			ctl_reg_val = LISTEN_CTL_SPE_VAL;
+			break;
+		default:
+			pr_err("%s: Invalid dest %d\n",
+				__func__, dest);
+			return CPE_SVC_FAILED;
+		}
+
+		rc = cpe_update_bits(TOMTOM_A_SVASS_CFG,
+				     0x01, ctl_reg_val);
+		break;
+	default:
+		pr_err("%s: Invalid module %d\n",
+			__func__, module);
+		rc = CPE_SVC_FAILED;
+		break;
+	}
+
+	return rc;
+}
+
+static enum cpe_svc_result cpe_tgt_tomtom_set_debug_mode(u32 enable)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+	u8 dbg_reg_val = 0x00;
+	if (enable)
+		dbg_reg_val = 0x08;
+	rc = cpe_update_bits(TOMTOM_A_SVASS_DEBUG,
+			     0x08, dbg_reg_val);
+	return rc;
+}
+
+static const struct cpe_svc_hw_cfg *cpe_tgt_tomtom_get_cpe_info(void)
+{
+	return &cpe_svc_tomtom_info;
+}
+
+static enum cpe_svc_result cpe_tgt_tomtom_deinit(
+		struct cpe_svc_tgt_abstraction *param)
+{
+	kfree(param->inbox);
+	param->inbox = NULL;
+	kfree(param->outbox);
+	param->outbox = NULL;
+	memset(param, 0, sizeof(struct cpe_svc_tgt_abstraction));
+	return CPE_SVC_SUCCESS;
+}
+
+static u8 cpe_tgt_tomtom_waiti_data[] = {0x00, 0x70, 0x00, 0x00};
+
+static struct cpe_tgt_waiti_info cpe_tgt_tomtom_waiti_info = {
+	.tgt_waiti_size = ARRAY_SIZE(cpe_tgt_tomtom_waiti_data),
+	.tgt_waiti_data = cpe_tgt_tomtom_waiti_data,
+};
+
+static enum cpe_svc_result cpe_tgt_tomtom_init(
+		struct cpe_svc_codec_info_v1 *codec_info,
+		struct cpe_svc_tgt_abstraction *param)
+{
+	if (!codec_info)
+		return CPE_SVC_INVALID_HANDLE;
+	if (!param)
+		return CPE_SVC_INVALID_HANDLE;
+
+	if (codec_info->id == CPE_SVC_CODEC_TOMTOM) {
+		param->tgt_boot      = cpe_tgt_tomtom_boot;
+		param->tgt_cpar_init_done = cpe_tgt_tomtom_is_cpar_init_done;
+		param->tgt_is_active = cpe_tgt_tomtom_is_active;
+		param->tgt_reset = cpe_tgt_tomtom_reset;
+		param->tgt_read_mailbox = cpe_tgt_tomtom_read_mailbox;
+		param->tgt_write_mailbox = cpe_tgt_tomtom_write_mailbox;
+		param->tgt_read_ram = cpe_tgt_tomtom_read_RAM;
+		param->tgt_write_ram = cpe_tgt_tomtom_write_RAM;
+		param->tgt_route_notification =
+			cpe_tgt_tomtom_route_notification;
+		param->tgt_set_debug_mode = cpe_tgt_tomtom_set_debug_mode;
+		param->tgt_get_cpe_info = cpe_tgt_tomtom_get_cpe_info;
+		param->tgt_deinit = cpe_tgt_tomtom_deinit;
+		param->tgt_voice_tx_lab = cpe_tgt_tomtom_voicetx;
+		param->tgt_waiti_info = &cpe_tgt_tomtom_waiti_info;
+
+		param->inbox = kzalloc(TOMTOM_A_SVASS_SPE_INBOX_SIZE,
+				       GFP_KERNEL);
+		if (!param->inbox) {
+			pr_err("%s: no memory for inbox, sz = %d\n",
+				__func__, TOMTOM_A_SVASS_SPE_INBOX_SIZE);
+			return CPE_SVC_NO_MEMORY;
+		}
+
+		param->outbox = kzalloc(TOMTOM_A_SVASS_SPE_OUTBOX_SIZE,
+					GFP_KERNEL);
+		if (!param->outbox) {
+			kfree(param->inbox);
+			pr_err("%s: no memory for inbox, sz = %d\n",
+				__func__, TOMTOM_A_SVASS_SPE_OUTBOX_SIZE);
+			return CPE_SVC_NO_MEMORY;
+		}
+	}
+
+	return CPE_SVC_SUCCESS;
+}
+
+static enum cpe_svc_result cpe_tgt_wcd9335_boot(int debug_mode)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+
+	if (!debug_mode)
+		rc |= cpe_update_bits(
+				WCD9335_CPE_SS_WDOG_CFG,
+				0x3f, 0x31);
+	else
+		pr_info("%s: CPE in debug mode, WDOG disabled\n",
+			__func__);
+
+	rc |= cpe_register_write(WCD9335_CPE_SS_CPARMAD_BUFRDY_INT_PERIOD, 19);
+	rc |= cpe_update_bits(WCD9335_CPE_SS_CPAR_CTL, 0x04, 0x00);
+	rc |= cpe_update_bits(WCD9335_CPE_SS_CPAR_CTL, 0x02, 0x02);
+	rc |= cpe_update_bits(WCD9335_CPE_SS_CPAR_CTL, 0x01, 0x01);
+
+	if (unlikely(rc)) {
+		pr_err("%s: Failed to boot, err = %d\n",
+			__func__, rc);
+		rc = CPE_SVC_FAILED;
+	}
+
+	return rc;
+}
+
+static u32 cpe_tgt_wcd9335_is_cpar_init_done(void)
+{
+	u8 temp = 0;
+	cpe_register_read(WCD9335_CPE_SS_STATUS, &temp);
+	return temp & 0x1;
+}
+
+static u32 cpe_tgt_wcd9335_is_active(void)
+{
+	u8 temp = 0;
+	cpe_register_read(WCD9335_CPE_SS_STATUS, &temp);
+	return temp & 0x4;
+}
+
+static enum cpe_svc_result cpe_tgt_wcd9335_reset(void)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+
+	rc |= cpe_update_bits(WCD9335_CPE_SS_CPAR_CFG, 0x01, 0x00);
+
+	rc |= cpe_register_write(
+		WCD9335_CODEC_RPM_PWR_CPE_IRAM_SHUTDOWN, 0x00);
+	rc |= cpe_register_write(
+		WCD9335_CODEC_RPM_PWR_CPE_DRAM1_SHUTDOWN, 0x00);
+	rc |= cpe_register_write(
+		WCD9335_CODEC_RPM_PWR_CPE_DRAM0_SHUTDOWN_1, 0x00);
+	rc |= cpe_register_write(
+		WCD9335_CODEC_RPM_PWR_CPE_DRAM0_SHUTDOWN_2, 0x00);
+
+	rc |= cpe_update_bits(WCD9335_CPE_SS_CPAR_CTL, 0x04, 0x04);
+
+	if (unlikely(rc)) {
+		pr_err("%s: failed to reset cpe, err = %d\n",
+			__func__, rc);
+		rc = CPE_SVC_FAILED;
+	}
+
+	return rc;
+}
+
+static enum cpe_svc_result cpe_tgt_wcd9335_read_mailbox(u8 *buffer,
+	size_t size)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+	u32 cnt = 0;
+
+	pr_debug("%s: size=%zd\n", __func__, size);
+
+	if (size > WCD9335_CPE_SS_SPE_OUTBOX_SIZE)
+		size = WCD9335_CPE_SS_SPE_OUTBOX_SIZE;
+
+	for (cnt = 0; (cnt < size) && (rc == CPE_SVC_SUCCESS); cnt++)
+		rc = cpe_register_read(WCD9335_CPE_SS_SPE_OUTBOX1(cnt),
+				       &buffer[cnt]);
+
+	rc = cpe_register_write(WCD9335_CPE_SS_OUTBOX1_ACK, 0x01);
+
+	if (unlikely(rc)) {
+		pr_err("%s: failed to ACK outbox, err = %d\n",
+			__func__, rc);
+		rc = CPE_SVC_FAILED;
+	}
+
+	return rc;
+}
+
+static enum cpe_svc_result cpe_tgt_wcd9335_write_mailbox(u8 *buffer,
+	size_t size)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+	u32 cnt = 0;
+
+	pr_debug("%s: size = %zd\n", __func__, size);
+	if (size > WCD9335_CPE_SS_SPE_INBOX_SIZE)
+		size = WCD9335_CPE_SS_SPE_INBOX_SIZE;
+	for (cnt = 0; (cnt < size) && (rc == CPE_SVC_SUCCESS); cnt++) {
+		rc |= cpe_register_write(WCD9335_CPE_SS_SPE_INBOX1(cnt),
+			buffer[cnt]);
+	}
+
+	if (unlikely(rc)) {
+		pr_err("%s: Error %d writing mailbox registers\n",
+			__func__, rc);
+		return rc;
+	}
+
+	rc = cpe_register_write(WCD9335_CPE_SS_INBOX1_TRG, 1);
+	return rc;
+}
+
+static enum cpe_svc_result cpe_wcd9335_get_mem_addr(struct cpe_info *t_info,
+		const struct cpe_svc_mem_segment *mem_seg,
+		u32 *addr, u8 *mem)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+	u32 offset, mem_sz, address;
+	u8 mem_type;
+
+	switch (mem_seg->type) {
+	case CPE_SVC_DATA_MEM:
+		mem_type = MEM_ACCESS_DRAM_VAL;
+		offset = WCD9335_CPE_SS_SPE_DRAM_OFFSET;
+		mem_sz = WCD9335_CPE_SS_SPE_DRAM_SIZE;
+		break;
+
+	case CPE_SVC_INSTRUCTION_MEM:
+		mem_type = MEM_ACCESS_IRAM_VAL;
+		offset = WCD9335_CPE_SS_SPE_IRAM_OFFSET;
+		mem_sz = WCD9335_CPE_SS_SPE_IRAM_SIZE;
+		break;
+
+	default:
+		pr_err("%s: Invalid mem type = %u\n",
+			__func__, mem_seg->type);
+		return CPE_SVC_INVALID_HANDLE;
+	}
+
+	if (mem_seg->cpe_addr < offset) {
+		pr_err("%s: Invalid addr %x for mem type %u\n",
+			__func__, mem_seg->cpe_addr, mem_type);
+		return CPE_SVC_INVALID_HANDLE;
+	}
+
+	address = mem_seg->cpe_addr - offset;
+	if (address + mem_seg->size > mem_sz) {
+		pr_err("%s: wrong size %zu, start adress %x, mem_type %u\n",
+			__func__, mem_seg->size, address, mem_type);
+		return CPE_SVC_INVALID_HANDLE;
+	}
+
+	(*addr) = address;
+	(*mem) = mem_type;
+
+	return rc;
+}
+
+static enum cpe_svc_result cpe_tgt_wcd9335_read_RAM(struct cpe_info *t_info,
+		struct cpe_svc_mem_segment *mem_seg)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+	u8 temp = 0;
+	u32 cnt = 0;
+	u8 mem = 0x0;
+	u32 addr = 0;
+	u32 lastaddr = 0;
+	u32 ptr_update = true;
+	bool autoinc;
+
+	if (!mem_seg) {
+		pr_err("%s: Invalid buffer\n", __func__);
+		return CPE_SVC_INVALID_HANDLE;
+	}
+
+	rc = cpe_wcd9335_get_mem_addr(t_info, mem_seg, &addr, &mem);
+
+	if (rc != CPE_SVC_SUCCESS) {
+		pr_err("%s: Cannot obtain address, mem_type %u\n",
+			__func__, mem_seg->type);
+		return rc;
+	}
+
+	rc |= cpe_register_write(WCD9335_CPE_SS_MEM_CTRL, 0);
+	autoinc = cpe_register_read_autoinc_supported();
+
+	if (autoinc)
+		temp = 0x18;
+	else
+		temp = 0x10;
+
+	temp |= mem;
+
+	lastaddr = ~addr;
+	do {
+		if (!autoinc || (ptr_update)) {
+			/* write LSB only if modified */
+			if ((lastaddr & 0xFF) != (addr & 0xFF))
+				rc |= cpe_register_write(
+						WCD9335_CPE_SS_MEM_PTR_0,
+						(addr & 0xFF));
+			/* write middle byte only if modified */
+			if (((lastaddr >> 8) & 0xFF) != ((addr >> 8) & 0xFF))
+				rc |= cpe_register_write(
+						WCD9335_CPE_SS_MEM_PTR_1,
+						((addr>>8) & 0xFF));
+			/* write MSB only if modified */
+			if (((lastaddr >> 16) & 0xFF) != ((addr >> 16) & 0xFF))
+				rc |= cpe_register_write(
+						WCD9335_CPE_SS_MEM_PTR_2,
+						((addr>>16) & 0xFF));
+
+			rc |= cpe_register_write(WCD9335_CPE_SS_MEM_CTRL, temp);
+			lastaddr = addr;
+			addr++;
+			ptr_update = false;
+		}
+
+		rc |= cpe_register_read(WCD9335_CPE_SS_MEM_BANK_0,
+				       &mem_seg->data[cnt]);
+
+		if (!autoinc)
+			rc |= cpe_register_write(WCD9335_CPE_SS_MEM_CTRL, 0);
+	} while ((++cnt < mem_seg->size) ||
+		 (rc != CPE_SVC_SUCCESS));
+
+	rc |= cpe_register_write(WCD9335_CPE_SS_MEM_CTRL, 0);
+
+	if (rc)
+		pr_err("%s: Failed to read registers, err = %d\n",
+			__func__, rc);
+
+	return rc;
+}
+
+static enum cpe_svc_result cpe_tgt_wcd9335_write_RAM(struct cpe_info *t_info,
+		const struct cpe_svc_mem_segment *mem_seg)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+	u8 mem_reg_val = 0;
+	u8 mem = MEM_ACCESS_NONE_VAL;
+	u32 addr = 0;
+	u8 *temp_ptr = NULL;
+	u32 temp_size = 0;
+	bool autoinc;
+
+	if (!mem_seg) {
+		pr_err("%s: Invalid mem segment\n",
+			__func__);
+		return CPE_SVC_INVALID_HANDLE;
+	}
+
+	rc = cpe_wcd9335_get_mem_addr(t_info, mem_seg, &addr, &mem);
+
+	if (rc != CPE_SVC_SUCCESS) {
+		pr_err("%s: Cannot obtain address, mem_type %u\n",
+			__func__, mem_seg->type);
+		return rc;
+	}
+
+	autoinc = cpe_register_read_autoinc_supported();
+	if (autoinc)
+		mem_reg_val = 0x18;
+	else
+		mem_reg_val = 0x10;
+
+	mem_reg_val |= mem;
+
+	rc = cpe_update_bits(WCD9335_CPE_SS_MEM_CTRL,
+			     0x0F, mem_reg_val);
+
+	rc = cpe_register_write(WCD9335_CPE_SS_MEM_PTR_0,
+				(addr & 0xFF));
+	rc = cpe_register_write(WCD9335_CPE_SS_MEM_PTR_1,
+				((addr >> 8) & 0xFF));
+
+	rc = cpe_register_write(WCD9335_CPE_SS_MEM_PTR_2,
+				((addr >> 16) & 0xFF));
+
+	temp_size = 0;
+	temp_ptr = mem_seg->data;
+
+	while (temp_size <= mem_seg->size) {
+		u32 to_write = (mem_seg->size >= temp_size+CHUNK_SIZE)
+			? CHUNK_SIZE : (mem_seg->size - temp_size);
+
+		if (t_info->state == CPE_STATE_OFFLINE) {
+			pr_err("%s: CPE is offline\n", __func__);
+			return CPE_SVC_FAILED;
+		}
+
+		cpe_register_write_repeat(WCD9335_CPE_SS_MEM_BANK_0,
+			temp_ptr, to_write);
+		temp_size += CHUNK_SIZE;
+		temp_ptr += CHUNK_SIZE;
+	}
+
+	rc = cpe_register_write(WCD9335_CPE_SS_MEM_CTRL, 0);
+
+	if (rc)
+		pr_err("%s: Failed to write registers, err = %d\n",
+			__func__, rc);
+	return rc;
+}
+
+static enum cpe_svc_result cpe_tgt_wcd9335_route_notification(
+		enum cpe_svc_module module,
+		enum cpe_svc_route_dest dest)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+
+	pr_debug("%s: Module = %d, Destination = %d\n",
+		 __func__, module, dest);
+
+	switch (module) {
+	case CPE_SVC_LISTEN_PROC:
+		switch (dest) {
+		case CPE_SVC_EXTERNAL:
+			rc = cpe_update_bits(WCD9335_CPE_SS_CFG, 0x01, 0x01);
+			break;
+		case CPE_SVC_INTERNAL:
+			rc = cpe_update_bits(WCD9335_CPE_SS_CFG, 0x01, 0x00);
+			break;
+		default:
+			pr_err("%s: Invalid destination %d\n",
+				__func__, dest);
+			return CPE_SVC_FAILED;
+		}
+		break;
+	default:
+		pr_err("%s: Invalid module %d\n",
+			__func__, module);
+		rc = CPE_SVC_FAILED;
+		break;
+	}
+	return rc;
+}
+
+static enum cpe_svc_result cpe_tgt_wcd9335_set_debug_mode(u32 enable)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+
+	pr_debug("%s: enable = %s\n", __func__,
+		 (enable) ? "true" : "false");
+
+	return rc;
+}
+
+static const struct cpe_svc_hw_cfg *cpe_tgt_wcd9335_get_cpe_info(void)
+{
+	return &cpe_svc_wcd9335_info;
+}
+
+static enum cpe_svc_result
+cpe_tgt_wcd9335_deinit(struct cpe_svc_tgt_abstraction *param)
+{
+	kfree(param->inbox);
+	param->inbox = NULL;
+	kfree(param->outbox);
+	param->outbox = NULL;
+	memset(param, 0, sizeof(struct cpe_svc_tgt_abstraction));
+
+	return CPE_SVC_SUCCESS;
+}
+
+static enum cpe_svc_result
+	cpe_tgt_wcd9335_voicetx(bool enable)
+{
+	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
+	u8 val = 0;
+
+	pr_debug("%s: enable = %u\n", __func__, enable);
+	if (enable)
+		val = 0x02;
+	else
+		val = 0x00;
+
+	rc = cpe_update_bits(WCD9335_CPE_SS_CFG, 0x02, val);
+	val = 0;
+	cpe_register_read(WCD9335_CPE_SS_CFG, &val);
+
+	return rc;
+}
+
+static u8 cpe_tgt_wcd9335_waiti_data[] = {0x00, 0x70, 0x00, 0x00};
+
+static struct cpe_tgt_waiti_info cpe_tgt_wcd9335_waiti_info = {
+	.tgt_waiti_size = ARRAY_SIZE(cpe_tgt_wcd9335_waiti_data),
+	.tgt_waiti_data = cpe_tgt_wcd9335_waiti_data,
+};
+
+static enum cpe_svc_result cpe_tgt_wcd9335_init(
+		struct cpe_svc_codec_info_v1 *codec_info,
+		struct cpe_svc_tgt_abstraction *param)
+{
+	if (!codec_info)
+		return CPE_SVC_INVALID_HANDLE;
+	if (!param)
+		return CPE_SVC_INVALID_HANDLE;
+
+	if (codec_info->id == CPE_SVC_CODEC_WCD9335) {
+		param->tgt_boot = cpe_tgt_wcd9335_boot;
+		param->tgt_cpar_init_done = cpe_tgt_wcd9335_is_cpar_init_done;
+		param->tgt_is_active = cpe_tgt_wcd9335_is_active;
+		param->tgt_reset = cpe_tgt_wcd9335_reset;
+		param->tgt_read_mailbox = cpe_tgt_wcd9335_read_mailbox;
+		param->tgt_write_mailbox = cpe_tgt_wcd9335_write_mailbox;
+		param->tgt_read_ram = cpe_tgt_wcd9335_read_RAM;
+		param->tgt_write_ram = cpe_tgt_wcd9335_write_RAM;
+		param->tgt_route_notification =
+			cpe_tgt_wcd9335_route_notification;
+		param->tgt_set_debug_mode = cpe_tgt_wcd9335_set_debug_mode;
+		param->tgt_get_cpe_info = cpe_tgt_wcd9335_get_cpe_info;
+		param->tgt_deinit = cpe_tgt_wcd9335_deinit;
+		param->tgt_voice_tx_lab = cpe_tgt_wcd9335_voicetx;
+		param->tgt_waiti_info = &cpe_tgt_wcd9335_waiti_info;
+
+		param->inbox = kzalloc(WCD9335_CPE_SS_SPE_INBOX_SIZE,
+				       GFP_KERNEL);
+		if (!param->inbox) {
+			pr_err("%s: no memory for inbox, sz = %d\n",
+				__func__, WCD9335_CPE_SS_SPE_INBOX_SIZE);
+			return CPE_SVC_NO_MEMORY;
+		}
+
+		param->outbox = kzalloc(WCD9335_CPE_SS_SPE_OUTBOX_SIZE,
+					GFP_KERNEL);
+		if (!param->outbox) {
+			kfree(param->inbox);
+			pr_err("%s: no memory for inbox, sz = %d\n",
+				__func__, WCD9335_CPE_SS_SPE_OUTBOX_SIZE);
+			return CPE_SVC_NO_MEMORY;
+		}
+	}
+
+	return CPE_SVC_SUCCESS;
+}
+
+MODULE_DESCRIPTION("WCD CPE Services");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd_cpe_services.h	2019-01-22 16:16:29.555301211 +0100
@@ -0,0 +1,179 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CPE_SERVICES__
+#define __CPE_SERVICES__
+
+#define CPE_IRQ_OUTBOX_IRQ		0x01
+#define CPE_IRQ_MEM_ACCESS_ERROR	0x02
+#define CPE_IRQ_WDOG_BITE		0x04
+#define CPE_IRQ_BUFFER_OVERFLOW		0x08
+#define CPE_IRQ_LAB_OVFUNF		0x10
+#define CPE_IRQ_FLL_LOCK_LOST		0x20
+#define CPE_IRQ_RCO_WDOG_INT		0x40
+
+#define EFAILED (MAX_ERRNO - 1)
+#define ENOTREADY (MAX_ERRNO - 2)
+
+#define MAX_SUPPORTED_CLKFREQ 8
+#define CPE_SVC_INIT_PARAM_V1 1
+
+enum cpe_svc_result {
+	CPE_SVC_SUCCESS			= 0,
+	CPE_SVC_FAILED			= -EFAILED,
+	CPE_SVC_NO_MEMORY		= -ENOMEM,
+	CPE_SVC_INVALID_HANDLE		= -EINVAL,
+	CPE_SVC_NOT_READY		= -ENOTREADY,
+	CPE_SVC_SHUTTING_DOWN		= -ESHUTDOWN,
+	CPE_SVC_BUSY			= -EBUSY,
+};
+
+enum cpe_svc_event {
+	CPE_SVC_CMI_MSG			= 0x01,
+	CPE_SVC_OFFLINE			= 0x02,
+	CPE_SVC_ONLINE			= 0x04,
+	CPE_SVC_BOOT_FAILED		= 0x08,
+	CPE_SVC_READ_COMPLETE		= 0x10,
+	CPE_SVC_READ_ERROR		= 0x20,
+	CPE_SVC_BOOT			= 0x40,
+	CPE_SVC_CMI_CLIENTS_DEREG	= 0x100,
+	CPE_SVC_EVENT_ANCHOR		= 0x7FFF
+};
+
+enum cpe_svc_module {
+	CPE_SVC_LISTEN_PROC		= 1,
+	CPE_SVC_MODULE_ANCHOR		= 0x7F
+};
+
+enum cpe_svc_route_dest {
+	CPE_SVC_EXTERNAL		= 1,
+	CPE_SVC_INTERNAL		= 2,
+	CPE_SVC_ROUTE_ANCHOR		= 0x7F
+};
+
+enum cpe_svc_mem_type {
+	CPE_SVC_DATA_MEM		= 1,
+	CPE_SVC_INSTRUCTION_MEM		= 2,
+	CPE_SVC_IPC_MEM			= 3,
+	CPE_SVC_MEM_TYPE_ANCHOR		= 0x7F
+};
+
+enum cpe_svc_codec_id {
+	CPE_SVC_CODEC_TOMTOM		= 5,
+	CPE_SVC_CODEC_WCD9335		= 7,
+	CPE_SVC_CODEC_WCD9326		= 8,
+	CPE_SVC_CODEC_ID_ANCHOR		= 0x7ffffff
+};
+
+enum cpe_svc_codec_version {
+	CPE_SVC_CODEC_V1P0		= 1,
+	CPE_SVC_CODEC_VERSION_ANCHOR	= 0x7fffffff
+};
+
+struct cpe_svc_codec_info_v1 {
+	u16			major_version;/*must be 1*/
+	u16			minor_version;/*must be 0*/
+	u32			id;
+	u32			version;
+	/*Add 1.1 version fields after this line*/
+};
+
+struct cpe_svc_notification {
+	enum cpe_svc_event event;
+	enum cpe_svc_result result;
+	void *payload;
+	void *private_data;
+};
+
+struct cpe_svc_msg_payload {
+	u8    *cmi_msg;
+};
+
+struct cpe_svc_read_complete {
+	u8    *buffer;
+	size_t   size;
+};
+
+struct cpe_svc_boot_event {
+	u32 debug_address;
+	size_t debug_buffer_size;
+	u32 status;
+};
+
+struct cpe_svc_mem_segment {
+	enum cpe_svc_mem_type type;
+	u32 cpe_addr;
+	size_t size;
+	u8 *data;
+};
+
+struct cpe_svc_hw_cfg {
+	size_t DRAM_size;
+	u32 DRAM_offset;
+	size_t IRAM_size;
+	u32 IRAM_offset;
+	u8 inbox_size;
+	u8 outbox_size;
+};
+
+struct cpe_svc_cfg_clk_plan {
+	u32 current_clk_feq;
+	u32 num_clk_freqs;
+	u32 clk_freqs[MAX_SUPPORTED_CLKFREQ];
+};
+
+struct cpe_svc_init_param {
+	void *context;
+	u32 version;
+	void (*query_freq_plans_cb)(void *cdc_priv,
+			struct cpe_svc_cfg_clk_plan *clk_freq);
+	void (*change_freq_plan_cb)(void *cdc_priv,
+			u32 clk_freq);
+};
+
+
+void *cpe_svc_initialize(
+		void irq_control_callback(u32 enable),
+		const void *codec_info, void *context);
+enum cpe_svc_result cpe_svc_deinitialize(void *cpe_handle);
+
+void *cpe_svc_register(void *cpe_handle,
+		void (*notification_callback)(
+			const struct cpe_svc_notification *parameter),
+		u32 mask, const char *name);
+
+enum cpe_svc_result cpe_svc_deregister(void *cpe_handle, void *reg_handle);
+
+enum cpe_svc_result cpe_svc_download_segment(void *cpe_handle,
+		const struct cpe_svc_mem_segment *segment);
+
+enum cpe_svc_result cpe_svc_boot(void *cpe_handle, int debug_mode);
+
+enum cpe_svc_result cpe_svc_shutdown(void *cpe_handle);
+
+enum cpe_svc_result cpe_svc_reset(void *cpe_handle);
+
+enum cpe_svc_result cpe_svc_process_irq(void *cpe_handle, u32 cpe_irq);
+
+enum cpe_svc_result
+cpe_svc_route_notification(void *cpe_handle, enum cpe_svc_module module,
+		enum cpe_svc_route_dest dest);
+
+enum cpe_svc_result cpe_svc_ramdump(void *cpe_handle,
+		struct cpe_svc_mem_segment *buffer);
+
+enum cpe_svc_result cpe_svc_set_debug_mode(void *cpe_handle, u32 mode);
+
+const struct cpe_svc_hw_cfg *cpe_svc_get_hw_cfg(void *cpe_handle);
+enum cpe_svc_result cpe_svc_toggle_lab(void *cpe_handle, bool enable);
+enum cpe_svc_result cpe_svc_ftm_test(void *cpe_handle, u32 *status);
+#endif /*__CPE_SERVICES__*/
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd-dsp-mgr.c	2019-01-22 16:16:29.543301102 +0100
@@ -0,0 +1,1231 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/stringify.h>
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/component.h>
+#include <linux/dma-mapping.h>
+#include <soc/qcom/ramdump.h>
+#include <sound/wcd-dsp-mgr.h>
+#include "wcd-dsp-utils.h"
+
+/* Forward declarations */
+static char *wdsp_get_cmpnt_type_string(enum wdsp_cmpnt_type);
+
+/* Component related macros */
+#define WDSP_GET_COMPONENT(wdsp, x) ((x >= WDSP_CMPNT_TYPE_MAX || x < 0) ? \
+					NULL : (&(wdsp->cmpnts[x])))
+#define WDSP_GET_CMPNT_TYPE_STR(x) wdsp_get_cmpnt_type_string(x)
+
+/*
+ * These #defines indicate the bit number in status field
+ * for each of the status. If bit is set, it indicates
+ * the status as done, else if bit is not set, it indicates
+ * the status is either failed or not done.
+ */
+#define WDSP_STATUS_INITIALIZED   BIT(0)
+#define WDSP_STATUS_CODE_DLOADED  BIT(1)
+#define WDSP_STATUS_DATA_DLOADED  BIT(2)
+#define WDSP_STATUS_BOOTED        BIT(3)
+
+/* Helper macros for printing wdsp messages */
+#define WDSP_ERR(wdsp, fmt, ...)		\
+	dev_err(wdsp->mdev, "%s: " fmt "\n", __func__, ##__VA_ARGS__)
+#define WDSP_DBG(wdsp, fmt, ...)	\
+	dev_dbg(wdsp->mdev, "%s: " fmt "\n", __func__, ##__VA_ARGS__)
+
+/* Helper macros for locking */
+#define WDSP_MGR_MUTEX_LOCK(wdsp, lock)         \
+{                                               \
+	WDSP_DBG(wdsp, "mutex_lock(%s)",        \
+		 __stringify_1(lock));          \
+	mutex_lock(&lock);                      \
+}
+
+#define WDSP_MGR_MUTEX_UNLOCK(wdsp, lock)       \
+{                                               \
+	WDSP_DBG(wdsp, "mutex_unlock(%s)",      \
+		 __stringify_1(lock));          \
+	mutex_unlock(&lock);                    \
+}
+
+/* Helper macros for using status mask */
+#define WDSP_SET_STATUS(wdsp, state)                  \
+{                                                     \
+	wdsp->status |= state;                        \
+	WDSP_DBG(wdsp, "set 0x%lx, new_state = 0x%x", \
+		 state, wdsp->status);                \
+}
+
+#define WDSP_CLEAR_STATUS(wdsp, state)                  \
+{                                                       \
+	wdsp->status &= (~state);                       \
+	WDSP_DBG(wdsp, "clear 0x%lx, new_state = 0x%x", \
+		 state, wdsp->status);                  \
+}
+
+#define WDSP_STATUS_IS_SET(wdsp, state) (wdsp->status & state)
+
+/* SSR relate status macros */
+#define WDSP_SSR_STATUS_WDSP_READY    BIT(0)
+#define WDSP_SSR_STATUS_CDC_READY     BIT(1)
+#define WDSP_SSR_STATUS_READY         \
+	(WDSP_SSR_STATUS_WDSP_READY | WDSP_SSR_STATUS_CDC_READY)
+#define WDSP_SSR_READY_WAIT_TIMEOUT   (10 * HZ)
+
+enum wdsp_ssr_type {
+
+	/* Init value, indicates there is no SSR in progress */
+	WDSP_SSR_TYPE_NO_SSR = 0,
+
+	/*
+	 * Indicates WDSP crashed. The manager driver internally
+	 * decides when to perform WDSP restart based on the
+	 * users of wdsp. Hence there is no explicit WDSP_UP.
+	 */
+	WDSP_SSR_TYPE_WDSP_DOWN,
+
+	/* Indicates codec hardware is down */
+	WDSP_SSR_TYPE_CDC_DOWN,
+
+	/* Indicates codec hardware is up, trigger to restart WDSP */
+	WDSP_SSR_TYPE_CDC_UP,
+};
+
+struct wdsp_cmpnt {
+
+	/* OF node of the phandle */
+	struct device_node *np;
+
+	/*
+	 * Child component's dev_name, should be set in DT for the child's
+	 * phandle if child's dev->of_node does not match the phandle->of_node
+	 */
+	const char *cdev_name;
+
+	/* Child component's device node */
+	struct device *cdev;
+
+	/* Private data that component may want back on callbacks */
+	void *priv_data;
+
+	/* Child ops */
+	struct wdsp_cmpnt_ops *ops;
+};
+
+struct wdsp_ramdump_data {
+
+	/* Ramdump device */
+	void *rd_dev;
+
+	/* DMA address of the dump */
+	dma_addr_t rd_addr;
+
+	/* Virtual address of the dump */
+	void *rd_v_addr;
+
+	/* Data provided through error interrupt */
+	struct wdsp_err_signal_arg err_data;
+};
+
+struct wdsp_mgr_priv {
+
+	/* Manager driver's struct device pointer */
+	struct device *mdev;
+
+	/* Match struct for component framework */
+	struct component_match *match;
+
+	/* Manager's ops/function callbacks */
+	struct wdsp_mgr_ops *ops;
+
+	/* Array to store information for all expected components */
+	struct wdsp_cmpnt cmpnts[WDSP_CMPNT_TYPE_MAX];
+
+	/* The filename of image to be downloaded */
+	const char *img_fname;
+
+	/* Keeps track of current state of manager driver */
+	u32 status;
+
+	/* Work to load the firmware image after component binding */
+	struct work_struct load_fw_work;
+
+	/* List of segments in image to be downloaded */
+	struct list_head *seg_list;
+
+	/* Base address of the image in memory */
+	u32 base_addr;
+
+	/* Instances using dsp */
+	int dsp_users;
+
+	/* Lock for serializing ops called by components */
+	struct mutex api_mutex;
+
+	struct wdsp_ramdump_data dump_data;
+
+	/* SSR related */
+	enum wdsp_ssr_type ssr_type;
+	struct mutex ssr_mutex;
+	struct work_struct ssr_work;
+	u16 ready_status;
+	struct completion ready_compl;
+
+	/* Debugfs related */
+	struct dentry *entry;
+	bool panic_on_error;
+};
+
+static char *wdsp_get_ssr_type_string(enum wdsp_ssr_type type)
+{
+	switch (type) {
+	case WDSP_SSR_TYPE_NO_SSR:
+		return "NO_SSR";
+	case WDSP_SSR_TYPE_WDSP_DOWN:
+		return "WDSP_DOWN";
+	case WDSP_SSR_TYPE_CDC_DOWN:
+		return "CDC_DOWN";
+	case WDSP_SSR_TYPE_CDC_UP:
+		return "CDC_UP";
+	default:
+		pr_err("%s: Invalid ssr_type %d\n",
+			__func__, type);
+		return "Invalid";
+	}
+}
+
+static char *wdsp_get_cmpnt_type_string(enum wdsp_cmpnt_type type)
+{
+	switch (type) {
+	case WDSP_CMPNT_CONTROL:
+		return "control";
+	case WDSP_CMPNT_IPC:
+		return "ipc";
+	case WDSP_CMPNT_TRANSPORT:
+		return "transport";
+	default:
+		pr_err("%s: Invalid component type %d\n",
+			__func__, type);
+		return "Invalid";
+	}
+}
+
+static void __wdsp_clr_ready_locked(struct wdsp_mgr_priv *wdsp,
+				    u16 value)
+{
+	wdsp->ready_status &= ~(value);
+	WDSP_DBG(wdsp, "ready_status = 0x%x", wdsp->ready_status);
+}
+
+static void __wdsp_set_ready_locked(struct wdsp_mgr_priv *wdsp,
+				    u16 value, bool mark_complete)
+{
+	wdsp->ready_status |= value;
+	WDSP_DBG(wdsp, "ready_status = 0x%x", wdsp->ready_status);
+
+	if (mark_complete &&
+	    wdsp->ready_status == WDSP_SSR_STATUS_READY) {
+		WDSP_DBG(wdsp, "marking ready completion");
+		complete(&wdsp->ready_compl);
+	}
+}
+
+static void wdsp_broadcast_event_upseq(struct wdsp_mgr_priv *wdsp,
+				       enum wdsp_event_type event,
+				       void *data)
+{
+	struct wdsp_cmpnt *cmpnt;
+	int i;
+
+	for (i = 0; i < WDSP_CMPNT_TYPE_MAX; i++) {
+		cmpnt = WDSP_GET_COMPONENT(wdsp, i);
+		if (cmpnt && cmpnt->ops && cmpnt->ops->event_handler)
+			cmpnt->ops->event_handler(cmpnt->cdev, cmpnt->priv_data,
+						  event, data);
+	}
+}
+
+static void wdsp_broadcast_event_downseq(struct wdsp_mgr_priv *wdsp,
+					 enum wdsp_event_type event,
+					 void *data)
+{
+	struct wdsp_cmpnt *cmpnt;
+	int i;
+
+	for (i = WDSP_CMPNT_TYPE_MAX - 1; i >= 0; i--) {
+		cmpnt = WDSP_GET_COMPONENT(wdsp, i);
+		if (cmpnt && cmpnt->ops && cmpnt->ops->event_handler)
+			cmpnt->ops->event_handler(cmpnt->cdev, cmpnt->priv_data,
+						  event, data);
+	}
+}
+
+static int wdsp_unicast_event(struct wdsp_mgr_priv *wdsp,
+			      enum wdsp_cmpnt_type type,
+			      enum wdsp_event_type event,
+			      void *data)
+{
+	struct wdsp_cmpnt *cmpnt;
+	int ret;
+
+	cmpnt = WDSP_GET_COMPONENT(wdsp, type);
+	if (cmpnt && cmpnt->ops && cmpnt->ops->event_handler) {
+		ret = cmpnt->ops->event_handler(cmpnt->cdev, cmpnt->priv_data,
+						event, data);
+	} else {
+		WDSP_ERR(wdsp, "not valid event_handler for %s",
+			 WDSP_GET_CMPNT_TYPE_STR(type));
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static void wdsp_deinit_components(struct wdsp_mgr_priv *wdsp)
+{
+	struct wdsp_cmpnt *cmpnt;
+	int i;
+
+	for (i = WDSP_CMPNT_TYPE_MAX - 1; i >= 0; i--) {
+		cmpnt = WDSP_GET_COMPONENT(wdsp, i);
+		if (cmpnt && cmpnt->ops && cmpnt->ops->deinit)
+			cmpnt->ops->deinit(cmpnt->cdev, cmpnt->priv_data);
+	}
+}
+
+static int wdsp_init_components(struct wdsp_mgr_priv *wdsp)
+{
+	struct wdsp_cmpnt *cmpnt;
+	int fail_idx = WDSP_CMPNT_TYPE_MAX;
+	int i, ret = 0;
+
+	for (i = 0; i < WDSP_CMPNT_TYPE_MAX; i++) {
+
+		cmpnt = WDSP_GET_COMPONENT(wdsp, i);
+
+		/* Init is allowed to be NULL */
+		if (!cmpnt->ops || !cmpnt->ops->init)
+			continue;
+		ret = cmpnt->ops->init(cmpnt->cdev, cmpnt->priv_data);
+		if (ret) {
+			WDSP_ERR(wdsp, "Init failed (%d) for component %s",
+				 ret, WDSP_GET_CMPNT_TYPE_STR(i));
+				fail_idx = i;
+				break;
+		}
+	}
+
+	if (fail_idx < WDSP_CMPNT_TYPE_MAX) {
+		/* Undo init for already initialized components */
+		for (i = fail_idx - 1; i >= 0; i--) {
+			struct wdsp_cmpnt *cmpnt = WDSP_GET_COMPONENT(wdsp, i);
+
+			if (cmpnt->ops && cmpnt->ops->deinit)
+				cmpnt->ops->deinit(cmpnt->cdev,
+						   cmpnt->priv_data);
+		}
+	} else {
+		wdsp_broadcast_event_downseq(wdsp, WDSP_EVENT_POST_INIT, NULL);
+	}
+
+	return ret;
+}
+
+static int wdsp_load_each_segment(struct wdsp_mgr_priv *wdsp,
+				  struct wdsp_img_segment *seg)
+{
+	struct wdsp_img_section img_section;
+	int ret;
+
+	WDSP_DBG(wdsp,
+		 "base_addr 0x%x, split_fname %s, load_addr 0x%x, size 0x%zx",
+		 wdsp->base_addr, seg->split_fname, seg->load_addr, seg->size);
+
+	if (seg->load_addr < wdsp->base_addr) {
+		WDSP_ERR(wdsp, "Invalid addr 0x%x, base_addr = 0x%x",
+			 seg->load_addr, wdsp->base_addr);
+		return -EINVAL;
+	}
+
+	img_section.addr = seg->load_addr - wdsp->base_addr;
+	img_section.size = seg->size;
+	img_section.data = seg->data;
+
+	ret = wdsp_unicast_event(wdsp, WDSP_CMPNT_TRANSPORT,
+				 WDSP_EVENT_DLOAD_SECTION,
+				 &img_section);
+	if (IS_ERR_VALUE(ret))
+		WDSP_ERR(wdsp,
+			 "Failed, err = %d for base_addr = 0x%x split_fname = %s, load_addr = 0x%x, size = 0x%zx",
+			 ret, wdsp->base_addr, seg->split_fname,
+			 seg->load_addr, seg->size);
+	return ret;
+}
+
+static int wdsp_download_segments(struct wdsp_mgr_priv *wdsp,
+				  unsigned int type)
+{
+	struct wdsp_cmpnt *ctl;
+	struct wdsp_img_segment *seg = NULL;
+	enum wdsp_event_type pre, post;
+	long status;
+	int ret;
+
+	ctl = WDSP_GET_COMPONENT(wdsp, WDSP_CMPNT_CONTROL);
+
+	if (type == WDSP_ELF_FLAG_RE) {
+		pre = WDSP_EVENT_PRE_DLOAD_CODE;
+		post = WDSP_EVENT_POST_DLOAD_CODE;
+		status = WDSP_STATUS_CODE_DLOADED;
+	} else if (type == WDSP_ELF_FLAG_WRITE) {
+		pre = WDSP_EVENT_PRE_DLOAD_DATA;
+		post = WDSP_EVENT_POST_DLOAD_DATA;
+		status = WDSP_STATUS_DATA_DLOADED;
+	} else {
+		WDSP_ERR(wdsp, "Invalid type %u", type);
+		return -EINVAL;
+	}
+
+	ret = wdsp_get_segment_list(ctl->cdev, wdsp->img_fname,
+				    type, wdsp->seg_list, &wdsp->base_addr);
+	if (IS_ERR_VALUE(ret) ||
+	    list_empty(wdsp->seg_list)) {
+		WDSP_ERR(wdsp, "Error %d to get image segments for type %d",
+			 ret, type);
+		wdsp_broadcast_event_downseq(wdsp, WDSP_EVENT_DLOAD_FAILED,
+					     NULL);
+		goto done;
+	}
+
+	/* Notify all components that image is about to be downloaded */
+	wdsp_broadcast_event_upseq(wdsp, pre, NULL);
+
+	/* Go through the list of segments and download one by one */
+	list_for_each_entry(seg, wdsp->seg_list, list) {
+		ret = wdsp_load_each_segment(wdsp, seg);
+		if (ret)
+			goto dload_error;
+	}
+
+	/* Flush the list before setting status and notifying components */
+	wdsp_flush_segment_list(wdsp->seg_list);
+
+	WDSP_SET_STATUS(wdsp, status);
+
+	/* Notify all components that image is downloaded */
+	wdsp_broadcast_event_downseq(wdsp, post, NULL);
+done:
+	return ret;
+
+dload_error:
+	wdsp_flush_segment_list(wdsp->seg_list);
+	wdsp_broadcast_event_downseq(wdsp, WDSP_EVENT_DLOAD_FAILED, NULL);
+
+	return ret;
+}
+
+static int wdsp_init_and_dload_code_sections(struct wdsp_mgr_priv *wdsp)
+{
+	int ret;
+	bool is_initialized;
+
+	is_initialized = WDSP_STATUS_IS_SET(wdsp, WDSP_STATUS_INITIALIZED);
+
+	if (!is_initialized) {
+		/* Components are not initialized yet, initialize them */
+		ret = wdsp_init_components(wdsp);
+		if (IS_ERR_VALUE(ret)) {
+			WDSP_ERR(wdsp, "INIT failed, err = %d", ret);
+			goto done;
+		}
+		WDSP_SET_STATUS(wdsp, WDSP_STATUS_INITIALIZED);
+	}
+
+	/* Download the read-execute sections of image */
+	ret = wdsp_download_segments(wdsp, WDSP_ELF_FLAG_RE);
+	if (IS_ERR_VALUE(ret)) {
+		WDSP_ERR(wdsp, "Error %d to download code sections", ret);
+		goto done;
+	}
+done:
+	return ret;
+}
+
+static void wdsp_load_fw_image(struct work_struct *work)
+{
+	struct wdsp_mgr_priv *wdsp;
+	int ret;
+
+	wdsp = container_of(work, struct wdsp_mgr_priv, load_fw_work);
+	if (!wdsp) {
+		pr_err("%s: Invalid private_data\n", __func__);
+		return;
+	}
+
+	ret = wdsp_init_and_dload_code_sections(wdsp);
+	if (IS_ERR_VALUE(ret))
+		WDSP_ERR(wdsp, "dload code sections failed, err = %d", ret);
+}
+
+static int wdsp_enable_dsp(struct wdsp_mgr_priv *wdsp)
+{
+	int ret;
+
+	/* Make sure wdsp is in good state */
+	if (!WDSP_STATUS_IS_SET(wdsp, WDSP_STATUS_CODE_DLOADED)) {
+		WDSP_ERR(wdsp, "WDSP in invalid state 0x%x", wdsp->status);
+		return -EINVAL;
+	}
+
+	/*
+	 * Acquire SSR mutex lock to make sure enablement of DSP
+	 * does not race with SSR handling.
+	 */
+	WDSP_MGR_MUTEX_LOCK(wdsp, wdsp->ssr_mutex);
+	/* Download the read-write sections of image */
+	ret = wdsp_download_segments(wdsp, WDSP_ELF_FLAG_WRITE);
+	if (IS_ERR_VALUE(ret)) {
+		WDSP_ERR(wdsp, "Data section download failed, err = %d", ret);
+		goto done;
+	}
+
+	wdsp_broadcast_event_upseq(wdsp, WDSP_EVENT_PRE_BOOTUP, NULL);
+
+	ret = wdsp_unicast_event(wdsp, WDSP_CMPNT_CONTROL,
+				 WDSP_EVENT_DO_BOOT, NULL);
+	if (IS_ERR_VALUE(ret)) {
+		WDSP_ERR(wdsp, "Failed to boot dsp, err = %d", ret);
+		WDSP_CLEAR_STATUS(wdsp, WDSP_STATUS_DATA_DLOADED);
+		goto done;
+	}
+
+	wdsp_broadcast_event_downseq(wdsp, WDSP_EVENT_POST_BOOTUP, NULL);
+	WDSP_SET_STATUS(wdsp, WDSP_STATUS_BOOTED);
+done:
+	WDSP_MGR_MUTEX_UNLOCK(wdsp, wdsp->ssr_mutex);
+	return ret;
+}
+
+static int wdsp_disable_dsp(struct wdsp_mgr_priv *wdsp)
+{
+	int ret;
+
+	WDSP_MGR_MUTEX_LOCK(wdsp, wdsp->ssr_mutex);
+
+	/*
+	 * If Disable happened while SSR is in progress, then set the SSR
+	 * ready status indicating WDSP is now ready. Ignore the disable
+	 * event here and let the SSR handler go through shutdown.
+	 */
+	if (wdsp->ssr_type != WDSP_SSR_TYPE_NO_SSR) {
+		__wdsp_set_ready_locked(wdsp, WDSP_SSR_STATUS_WDSP_READY, true);
+		WDSP_MGR_MUTEX_UNLOCK(wdsp, wdsp->ssr_mutex);
+		return 0;
+	}
+
+	WDSP_MGR_MUTEX_UNLOCK(wdsp, wdsp->ssr_mutex);
+
+	/* Make sure wdsp is in good state */
+	if (!WDSP_STATUS_IS_SET(wdsp, WDSP_STATUS_BOOTED)) {
+		WDSP_ERR(wdsp, "wdsp in invalid state 0x%x", wdsp->status);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	wdsp_broadcast_event_downseq(wdsp, WDSP_EVENT_PRE_SHUTDOWN, NULL);
+	ret = wdsp_unicast_event(wdsp, WDSP_CMPNT_CONTROL,
+				 WDSP_EVENT_DO_SHUTDOWN, NULL);
+	if (IS_ERR_VALUE(ret)) {
+		WDSP_ERR(wdsp, "Failed to shutdown dsp, err = %d", ret);
+		goto done;
+	}
+
+	wdsp_broadcast_event_downseq(wdsp, WDSP_EVENT_POST_SHUTDOWN, NULL);
+	WDSP_CLEAR_STATUS(wdsp, WDSP_STATUS_BOOTED);
+
+	/* Data sections are to be downloaded per boot */
+	WDSP_CLEAR_STATUS(wdsp, WDSP_STATUS_DATA_DLOADED);
+done:
+	return ret;
+}
+
+static int wdsp_register_cmpnt_ops(struct device *wdsp_dev,
+				   struct device *cdev,
+				   void *priv_data,
+				   struct wdsp_cmpnt_ops *ops)
+{
+	struct wdsp_mgr_priv *wdsp;
+	struct wdsp_cmpnt *cmpnt;
+	int i, ret;
+
+	if (!wdsp_dev || !cdev || !ops)
+		return -EINVAL;
+
+	wdsp = dev_get_drvdata(wdsp_dev);
+
+	WDSP_MGR_MUTEX_LOCK(wdsp, wdsp->api_mutex);
+
+	for (i = 0; i < WDSP_CMPNT_TYPE_MAX; i++) {
+		cmpnt = WDSP_GET_COMPONENT(wdsp, i);
+		if ((cdev->of_node && cdev->of_node == cmpnt->np) ||
+		    (cmpnt->cdev_name &&
+		     !strcmp(dev_name(cdev), cmpnt->cdev_name))) {
+			break;
+		}
+	}
+
+	if (i == WDSP_CMPNT_TYPE_MAX) {
+		WDSP_ERR(wdsp, "Failed to register component dev %s",
+			 dev_name(cdev));
+		ret = -EINVAL;
+		goto done;
+	}
+
+	cmpnt->cdev = cdev;
+	cmpnt->ops = ops;
+	cmpnt->priv_data = priv_data;
+done:
+	WDSP_MGR_MUTEX_UNLOCK(wdsp, wdsp->api_mutex);
+	return 0;
+}
+
+static struct device *wdsp_get_dev_for_cmpnt(struct device *wdsp_dev,
+					     enum wdsp_cmpnt_type type)
+{
+	struct wdsp_mgr_priv *wdsp;
+	struct wdsp_cmpnt *cmpnt;
+
+	if (!wdsp_dev || type >= WDSP_CMPNT_TYPE_MAX)
+		return NULL;
+
+	wdsp = dev_get_drvdata(wdsp_dev);
+	cmpnt = WDSP_GET_COMPONENT(wdsp, type);
+
+	return cmpnt->cdev;
+}
+
+static void wdsp_collect_ramdumps(struct wdsp_mgr_priv *wdsp)
+{
+	struct wdsp_img_section img_section;
+	struct wdsp_err_signal_arg *data = &wdsp->dump_data.err_data;
+	struct ramdump_segment rd_seg;
+	int ret = 0;
+
+	if (wdsp->ssr_type != WDSP_SSR_TYPE_WDSP_DOWN ||
+	    !data->mem_dumps_enabled) {
+		WDSP_DBG(wdsp, "cannot dump memory, ssr_type %s, dumps %s",
+			 wdsp_get_ssr_type_string(wdsp->ssr_type),
+			 !(data->mem_dumps_enabled) ? "disabled" : "enabled");
+		goto done;
+	}
+
+	if (data->dump_size == 0 ||
+	    data->remote_start_addr < wdsp->base_addr) {
+		WDSP_ERR(wdsp, "Invalid start addr 0x%x or dump_size 0x%zx",
+			 data->remote_start_addr, data->dump_size);
+		goto done;
+	}
+
+	if (!wdsp->dump_data.rd_dev) {
+		WDSP_ERR(wdsp, "Ramdump device is not setup");
+		goto done;
+	}
+
+	WDSP_DBG(wdsp, "base_addr 0x%x, dump_start_addr 0x%x, dump_size 0x%zx",
+		 wdsp->base_addr, data->remote_start_addr, data->dump_size);
+
+	/* Allocate memory for dumps */
+	wdsp->dump_data.rd_v_addr = dma_alloc_coherent(wdsp->mdev,
+						       data->dump_size,
+						       &wdsp->dump_data.rd_addr,
+						       GFP_KERNEL);
+	if (!wdsp->dump_data.rd_v_addr)
+		goto done;
+
+	img_section.addr = data->remote_start_addr - wdsp->base_addr;
+	img_section.size = data->dump_size;
+	img_section.data = wdsp->dump_data.rd_v_addr;
+
+	ret = wdsp_unicast_event(wdsp, WDSP_CMPNT_TRANSPORT,
+				 WDSP_EVENT_READ_SECTION,
+				 &img_section);
+	if (IS_ERR_VALUE(ret)) {
+		WDSP_ERR(wdsp, "Failed to read dumps, size 0x%zx at addr 0x%x",
+			 img_section.size, img_section.addr);
+		goto err_read_dumps;
+	}
+
+	/*
+	 * If panic_on_error flag is explicitly set through the debugfs,
+	 * then cause a BUG here to aid debugging.
+	 */
+	BUG_ON(wdsp->panic_on_error);
+
+	rd_seg.address = (unsigned long) wdsp->dump_data.rd_v_addr;
+	rd_seg.size = img_section.size;
+	rd_seg.v_address = wdsp->dump_data.rd_v_addr;
+
+	ret = do_ramdump(wdsp->dump_data.rd_dev, &rd_seg, 1);
+	if (IS_ERR_VALUE(ret))
+		WDSP_ERR(wdsp, "do_ramdump failed with error %d", ret);
+
+err_read_dumps:
+	dma_free_coherent(wdsp->mdev, data->dump_size,
+			  wdsp->dump_data.rd_v_addr, wdsp->dump_data.rd_addr);
+done:
+	return;
+}
+
+static void wdsp_ssr_work_fn(struct work_struct *work)
+{
+	struct wdsp_mgr_priv *wdsp;
+	int ret;
+
+	wdsp = container_of(work, struct wdsp_mgr_priv, ssr_work);
+	if (!wdsp) {
+		pr_err("%s: Invalid private_data\n", __func__);
+		return;
+	}
+
+	WDSP_MGR_MUTEX_LOCK(wdsp, wdsp->ssr_mutex);
+
+	/* Issue ramdumps and shutdown only if DSP is currently booted */
+	if (WDSP_STATUS_IS_SET(wdsp, WDSP_STATUS_BOOTED)) {
+		wdsp_collect_ramdumps(wdsp);
+		ret = wdsp_unicast_event(wdsp, WDSP_CMPNT_CONTROL,
+					 WDSP_EVENT_DO_SHUTDOWN, NULL);
+		if (IS_ERR_VALUE(ret))
+			WDSP_ERR(wdsp, "Failed WDSP shutdown, err = %d", ret);
+
+		wdsp_broadcast_event_downseq(wdsp, WDSP_EVENT_POST_SHUTDOWN,
+					     NULL);
+		WDSP_CLEAR_STATUS(wdsp, WDSP_STATUS_BOOTED);
+	}
+
+	WDSP_MGR_MUTEX_UNLOCK(wdsp, wdsp->ssr_mutex);
+	ret = wait_for_completion_timeout(&wdsp->ready_compl,
+					  WDSP_SSR_READY_WAIT_TIMEOUT);
+	WDSP_MGR_MUTEX_LOCK(wdsp, wdsp->ssr_mutex);
+	if (ret == 0) {
+		WDSP_ERR(wdsp, "wait_for_ready timed out, status = 0x%x",
+			 wdsp->ready_status);
+		goto done;
+	}
+
+	/* Data sections are to downloaded per WDSP boot */
+	WDSP_CLEAR_STATUS(wdsp, WDSP_STATUS_DATA_DLOADED);
+
+	/*
+	 * Even though code section could possible be retained on DSP
+	 * crash, go ahead and still re-download just to avoid any
+	 * memory corruption from previous crash.
+	 */
+	WDSP_CLEAR_STATUS(wdsp, WDSP_STATUS_CODE_DLOADED);
+
+	/* If codec restarted, then all components must be re-initialized */
+	if (wdsp->ssr_type == WDSP_SSR_TYPE_CDC_UP) {
+		wdsp_deinit_components(wdsp);
+		WDSP_CLEAR_STATUS(wdsp, WDSP_STATUS_INITIALIZED);
+	}
+
+	ret = wdsp_init_and_dload_code_sections(wdsp);
+	if (IS_ERR_VALUE(ret)) {
+		WDSP_ERR(wdsp, "Failed to dload code sections err = %d",
+			 ret);
+		goto done;
+	}
+
+	/* SSR handling is finished, mark SSR type as NO_SSR */
+	wdsp->ssr_type = WDSP_SSR_TYPE_NO_SSR;
+done:
+	WDSP_MGR_MUTEX_UNLOCK(wdsp, wdsp->ssr_mutex);
+}
+
+static int wdsp_ssr_handler(struct wdsp_mgr_priv *wdsp, void *arg,
+			    enum wdsp_ssr_type ssr_type)
+{
+	enum wdsp_ssr_type current_ssr_type;
+	struct wdsp_err_signal_arg *err_data;
+
+	WDSP_MGR_MUTEX_LOCK(wdsp, wdsp->ssr_mutex);
+
+	current_ssr_type = wdsp->ssr_type;
+	WDSP_DBG(wdsp, "Current ssr_type %s, handling ssr_type %s",
+		 wdsp_get_ssr_type_string(current_ssr_type),
+		 wdsp_get_ssr_type_string(ssr_type));
+	wdsp->ssr_type = ssr_type;
+
+	if (arg) {
+		err_data = (struct wdsp_err_signal_arg *) arg;
+		memcpy(&wdsp->dump_data.err_data, err_data,
+		       sizeof(*err_data));
+	} else {
+		memset(&wdsp->dump_data.err_data, 0,
+		       sizeof(wdsp->dump_data.err_data));
+	}
+
+	switch (ssr_type) {
+
+	case WDSP_SSR_TYPE_WDSP_DOWN:
+		__wdsp_clr_ready_locked(wdsp, WDSP_SSR_STATUS_WDSP_READY);
+		wdsp_broadcast_event_downseq(wdsp, WDSP_EVENT_PRE_SHUTDOWN,
+					     NULL);
+		schedule_work(&wdsp->ssr_work);
+		break;
+
+	case WDSP_SSR_TYPE_CDC_DOWN:
+		__wdsp_clr_ready_locked(wdsp, WDSP_SSR_STATUS_CDC_READY);
+		/*
+		 * If DSP is booted when CDC_DOWN is received, it needs
+		 * to be shutdown.
+		 */
+		if (WDSP_STATUS_IS_SET(wdsp, WDSP_STATUS_BOOTED)) {
+			__wdsp_clr_ready_locked(wdsp,
+						WDSP_SSR_STATUS_WDSP_READY);
+			wdsp_broadcast_event_downseq(wdsp,
+						     WDSP_EVENT_PRE_SHUTDOWN,
+						     NULL);
+		}
+
+		schedule_work(&wdsp->ssr_work);
+		break;
+
+	case WDSP_SSR_TYPE_CDC_UP:
+		__wdsp_set_ready_locked(wdsp, WDSP_SSR_STATUS_CDC_READY, true);
+		break;
+
+	default:
+		WDSP_ERR(wdsp, "undefined ssr_type %d\n", ssr_type);
+		/* Revert back the ssr_type for undefined events */
+		wdsp->ssr_type = current_ssr_type;
+		break;
+	}
+
+	WDSP_MGR_MUTEX_UNLOCK(wdsp, wdsp->ssr_mutex);
+
+	return 0;
+}
+
+static int wdsp_signal_handler(struct device *wdsp_dev,
+			       enum wdsp_signal signal, void *arg)
+{
+	struct wdsp_mgr_priv *wdsp;
+	int ret;
+
+	if (!wdsp_dev)
+		return -EINVAL;
+
+	wdsp = dev_get_drvdata(wdsp_dev);
+	WDSP_MGR_MUTEX_LOCK(wdsp, wdsp->api_mutex);
+
+	WDSP_DBG(wdsp, "Raised signal %d", signal);
+
+	switch (signal) {
+	case WDSP_IPC1_INTR:
+		ret = wdsp_unicast_event(wdsp, WDSP_CMPNT_IPC,
+					 WDSP_EVENT_IPC1_INTR, NULL);
+		break;
+	case WDSP_ERR_INTR:
+		ret = wdsp_ssr_handler(wdsp, arg, WDSP_SSR_TYPE_WDSP_DOWN);
+		break;
+	case WDSP_CDC_DOWN_SIGNAL:
+		ret = wdsp_ssr_handler(wdsp, arg, WDSP_SSR_TYPE_CDC_DOWN);
+		break;
+	case WDSP_CDC_UP_SIGNAL:
+		ret = wdsp_ssr_handler(wdsp, arg, WDSP_SSR_TYPE_CDC_UP);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	if (IS_ERR_VALUE(ret))
+		WDSP_ERR(wdsp, "handling signal %d failed with error %d",
+			 signal, ret);
+	WDSP_MGR_MUTEX_UNLOCK(wdsp, wdsp->api_mutex);
+
+	return ret;
+}
+
+static int wdsp_vote_for_dsp(struct device *wdsp_dev,
+			     bool vote)
+{
+	struct wdsp_mgr_priv *wdsp;
+	int ret = 0;
+
+	if (!wdsp_dev)
+		return -EINVAL;
+
+	wdsp = dev_get_drvdata(wdsp_dev);
+
+	WDSP_MGR_MUTEX_LOCK(wdsp, wdsp->api_mutex);
+	WDSP_DBG(wdsp, "request %s, current users = %d",
+		 vote ? "enable" : "disable", wdsp->dsp_users);
+
+	if (vote) {
+		wdsp->dsp_users++;
+		if (wdsp->dsp_users == 1)
+			ret = wdsp_enable_dsp(wdsp);
+	} else {
+		if (wdsp->dsp_users == 0)
+			goto done;
+
+		wdsp->dsp_users--;
+		if (wdsp->dsp_users == 0)
+			ret = wdsp_disable_dsp(wdsp);
+	}
+
+	if (IS_ERR_VALUE(ret))
+		WDSP_DBG(wdsp, "wdsp %s failed, err = %d",
+			 vote ? "enable" : "disable", ret);
+
+done:
+	WDSP_MGR_MUTEX_UNLOCK(wdsp, wdsp->api_mutex);
+	return ret;
+}
+
+static int wdsp_suspend(struct device *wdsp_dev)
+{
+	struct wdsp_mgr_priv *wdsp;
+	int rc = 0, i;
+
+	if (!wdsp_dev) {
+		pr_err("%s: Invalid handle to device\n", __func__);
+		return -EINVAL;
+	}
+
+	wdsp = dev_get_drvdata(wdsp_dev);
+
+	for (i =  WDSP_CMPNT_TYPE_MAX - 1; i >= 0; i--) {
+		rc = wdsp_unicast_event(wdsp, i, WDSP_EVENT_SUSPEND, NULL);
+		if (rc < 0) {
+			WDSP_ERR(wdsp, "component %s failed to suspend\n",
+				WDSP_GET_CMPNT_TYPE_STR(i));
+			break;
+		}
+	}
+
+	return rc;
+}
+
+static int wdsp_resume(struct device *wdsp_dev)
+{
+	struct wdsp_mgr_priv *wdsp;
+	int rc = 0, i;
+
+	if (!wdsp_dev) {
+		pr_err("%s: Invalid handle to device\n", __func__);
+		return -EINVAL;
+	}
+
+	wdsp = dev_get_drvdata(wdsp_dev);
+
+	for (i =  0; i < WDSP_CMPNT_TYPE_MAX; i++) {
+		rc = wdsp_unicast_event(wdsp, i, WDSP_EVENT_RESUME, NULL);
+		if (rc < 0) {
+			WDSP_ERR(wdsp, "component %s failed to resume\n",
+				WDSP_GET_CMPNT_TYPE_STR(i));
+			break;
+		}
+	}
+
+	return rc;
+}
+
+static struct wdsp_mgr_ops wdsp_ops = {
+	.register_cmpnt_ops = wdsp_register_cmpnt_ops,
+	.get_dev_for_cmpnt = wdsp_get_dev_for_cmpnt,
+	.signal_handler = wdsp_signal_handler,
+	.vote_for_dsp = wdsp_vote_for_dsp,
+	.suspend = wdsp_suspend,
+	.resume = wdsp_resume,
+};
+
+static int wdsp_mgr_compare_of(struct device *dev, void *data)
+{
+	struct wdsp_cmpnt *cmpnt = data;
+
+	/*
+	 * First try to match based on of_node, if of_node is not
+	 * present, try to match on the dev_name
+	 */
+	return ((dev->of_node && dev->of_node == cmpnt->np) ||
+		(cmpnt->cdev_name &&
+		 !strcmp(dev_name(dev), cmpnt->cdev_name)));
+}
+
+static void wdsp_mgr_debugfs_init(struct wdsp_mgr_priv *wdsp)
+{
+	wdsp->entry = debugfs_create_dir("wdsp_mgr", NULL);
+	if (IS_ERR_OR_NULL(wdsp->entry))
+		return;
+
+	debugfs_create_bool("panic_on_error", S_IRUGO | S_IWUSR,
+			    wdsp->entry, &wdsp->panic_on_error);
+}
+
+static void wdsp_mgr_debugfs_remove(struct wdsp_mgr_priv *wdsp)
+{
+	debugfs_remove_recursive(wdsp->entry);
+	wdsp->entry = NULL;
+}
+
+static int wdsp_mgr_bind(struct device *dev)
+{
+	struct wdsp_mgr_priv *wdsp = dev_get_drvdata(dev);
+	struct wdsp_cmpnt *cmpnt;
+	int ret, idx;
+
+	wdsp->ops = &wdsp_ops;
+
+	/* Setup ramdump device */
+	wdsp->dump_data.rd_dev = create_ramdump_device("wdsp", dev);
+	if (!wdsp->dump_data.rd_dev)
+		dev_info(dev, "%s: create_ramdump_device failed\n", __func__);
+
+	ret = component_bind_all(dev, wdsp->ops);
+	if (IS_ERR_VALUE(ret))
+		WDSP_ERR(wdsp, "component_bind_all failed %d\n", ret);
+
+	/* Make sure all components registered ops */
+	for (idx = 0; idx < WDSP_CMPNT_TYPE_MAX; idx++) {
+		cmpnt = WDSP_GET_COMPONENT(wdsp, idx);
+		if (!cmpnt->cdev || !cmpnt->ops) {
+			WDSP_ERR(wdsp, "%s did not register ops\n",
+				 WDSP_GET_CMPNT_TYPE_STR(idx));
+			ret = -EINVAL;
+			component_unbind_all(dev, wdsp->ops);
+			break;
+		}
+	}
+
+	wdsp_mgr_debugfs_init(wdsp);
+
+	/* Schedule the work to download image if binding was successful. */
+	if (!ret)
+		schedule_work(&wdsp->load_fw_work);
+
+	return ret;
+}
+
+static void wdsp_mgr_unbind(struct device *dev)
+{
+	struct wdsp_mgr_priv *wdsp = dev_get_drvdata(dev);
+	struct wdsp_cmpnt *cmpnt;
+	int idx;
+
+	component_unbind_all(dev, wdsp->ops);
+
+	wdsp_mgr_debugfs_remove(wdsp);
+
+	if (wdsp->dump_data.rd_dev) {
+		destroy_ramdump_device(wdsp->dump_data.rd_dev);
+		wdsp->dump_data.rd_dev = NULL;
+	}
+
+	/* Clear all status bits */
+	wdsp->status = 0x00;
+
+	/* clean up the components */
+	for (idx = 0; idx < WDSP_CMPNT_TYPE_MAX; idx++) {
+		cmpnt = WDSP_GET_COMPONENT(wdsp, idx);
+		cmpnt->cdev = NULL;
+		cmpnt->ops = NULL;
+		cmpnt->priv_data = NULL;
+	}
+}
+
+static const struct component_master_ops wdsp_master_ops = {
+	.bind = wdsp_mgr_bind,
+	.unbind = wdsp_mgr_unbind,
+};
+
+static void *wdsp_mgr_parse_phandle(struct wdsp_mgr_priv *wdsp,
+				    int index)
+{
+	struct device *mdev = wdsp->mdev;
+	struct device_node *np;
+	struct wdsp_cmpnt *cmpnt = NULL;
+	struct of_phandle_args pargs;
+	u32 value;
+	int ret;
+
+	ret = of_parse_phandle_with_fixed_args(mdev->of_node,
+					      "qcom,wdsp-components", 1,
+					      index, &pargs);
+	if (ret) {
+		WDSP_ERR(wdsp, "parse_phandle at index %d failed %d",
+			 index, ret);
+		return NULL;
+	}
+
+	np = pargs.np;
+	value = pargs.args[0];
+
+	if (value >= WDSP_CMPNT_TYPE_MAX) {
+		WDSP_ERR(wdsp, "invalid phandle_arg to of_node %s", np->name);
+		goto done;
+	}
+
+	cmpnt = WDSP_GET_COMPONENT(wdsp, value);
+	if (cmpnt->np || cmpnt->cdev_name) {
+		WDSP_ERR(wdsp, "cmpnt %d already added", value);
+		cmpnt = NULL;
+		goto done;
+	}
+
+	cmpnt->np = np;
+	of_property_read_string(np, "qcom,wdsp-cmpnt-dev-name",
+				&cmpnt->cdev_name);
+done:
+	of_node_put(np);
+	return cmpnt;
+}
+
+static int wdsp_mgr_parse_dt_entries(struct wdsp_mgr_priv *wdsp)
+{
+	struct device *dev = wdsp->mdev;
+	void *match_data;
+	int ph_idx, ret;
+
+	ret = of_property_read_string(dev->of_node, "qcom,img-filename",
+				      &wdsp->img_fname);
+	if (IS_ERR_VALUE(ret)) {
+		WDSP_ERR(wdsp, "Reading property %s failed, error = %d",
+			 "qcom,img-filename", ret);
+		return ret;
+	}
+
+	ret = of_count_phandle_with_args(dev->of_node,
+					 "qcom,wdsp-components",
+					 NULL);
+	if (ret == -ENOENT) {
+		WDSP_ERR(wdsp, "Property %s not defined in DT",
+			 "qcom,wdsp-components");
+		goto done;
+	} else if (ret != WDSP_CMPNT_TYPE_MAX * 2) {
+		WDSP_ERR(wdsp, "Invalid phandle + arg count %d, expected %d",
+			 ret, WDSP_CMPNT_TYPE_MAX * 2);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = 0;
+
+	for (ph_idx = 0; ph_idx < WDSP_CMPNT_TYPE_MAX; ph_idx++) {
+
+		match_data = wdsp_mgr_parse_phandle(wdsp, ph_idx);
+		if (!match_data) {
+			WDSP_ERR(wdsp, "component not found at idx %d", ph_idx);
+			ret = -EINVAL;
+			goto done;
+		}
+
+		component_match_add(dev, &wdsp->match,
+				    wdsp_mgr_compare_of, match_data);
+	}
+
+done:
+	return ret;
+}
+
+static int wdsp_mgr_probe(struct platform_device *pdev)
+{
+	struct wdsp_mgr_priv *wdsp;
+	struct device *mdev = &pdev->dev;
+	int ret;
+
+	wdsp = devm_kzalloc(mdev, sizeof(*wdsp), GFP_KERNEL);
+	if (!wdsp)
+		return -ENOMEM;
+	wdsp->mdev = mdev;
+	wdsp->seg_list = devm_kzalloc(mdev, sizeof(struct list_head),
+				      GFP_KERNEL);
+	if (!wdsp->seg_list) {
+		devm_kfree(mdev, wdsp);
+		return -ENOMEM;
+	}
+
+	ret = wdsp_mgr_parse_dt_entries(wdsp);
+	if (ret)
+		goto err_dt_parse;
+
+	INIT_WORK(&wdsp->load_fw_work, wdsp_load_fw_image);
+	INIT_LIST_HEAD(wdsp->seg_list);
+	mutex_init(&wdsp->api_mutex);
+	mutex_init(&wdsp->ssr_mutex);
+	wdsp->ssr_type = WDSP_SSR_TYPE_NO_SSR;
+	wdsp->ready_status = WDSP_SSR_STATUS_READY;
+	INIT_WORK(&wdsp->ssr_work, wdsp_ssr_work_fn);
+	init_completion(&wdsp->ready_compl);
+	arch_setup_dma_ops(wdsp->mdev, 0, 0, NULL, 0);
+	dev_set_drvdata(mdev, wdsp);
+
+	ret = component_master_add_with_match(mdev, &wdsp_master_ops,
+					      wdsp->match);
+	if (IS_ERR_VALUE(ret)) {
+		WDSP_ERR(wdsp, "Failed to add master, err = %d", ret);
+		goto err_master_add;
+	}
+
+	return 0;
+
+err_master_add:
+	mutex_destroy(&wdsp->api_mutex);
+	mutex_destroy(&wdsp->ssr_mutex);
+err_dt_parse:
+	devm_kfree(mdev, wdsp->seg_list);
+	devm_kfree(mdev, wdsp);
+	dev_set_drvdata(mdev, NULL);
+
+	return ret;
+}
+
+static int wdsp_mgr_remove(struct platform_device *pdev)
+{
+	struct device *mdev = &pdev->dev;
+	struct wdsp_mgr_priv *wdsp = dev_get_drvdata(mdev);
+
+	component_master_del(mdev, &wdsp_master_ops);
+
+	mutex_destroy(&wdsp->api_mutex);
+	mutex_destroy(&wdsp->ssr_mutex);
+	devm_kfree(mdev, wdsp->seg_list);
+	devm_kfree(mdev, wdsp);
+	dev_set_drvdata(mdev, NULL);
+
+	return 0;
+};
+
+static const struct of_device_id wdsp_mgr_dt_match[] = {
+	{.compatible = "qcom,wcd-dsp-mgr" },
+	{ }
+};
+
+static struct platform_driver wdsp_mgr_driver = {
+	.driver = {
+		.name = "wcd-dsp-mgr",
+		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(wdsp_mgr_dt_match),
+	},
+	.probe = wdsp_mgr_probe,
+	.remove = wdsp_mgr_remove,
+};
+module_platform_driver(wdsp_mgr_driver);
+
+MODULE_DESCRIPTION("WCD DSP manager driver");
+MODULE_DEVICE_TABLE(of, wdsp_mgr_dt_match);
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd-dsp-utils.c	2019-01-22 16:16:29.543301102 +0100
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/elf.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include "wcd-dsp-utils.h"
+
+static bool wdsp_is_valid_elf_hdr(const struct elf32_hdr *ehdr,
+				  size_t fw_size)
+{
+	if (fw_size < sizeof(*ehdr)) {
+		pr_err("%s: Firmware too small\n", __func__);
+		goto elf_check_fail;
+	}
+
+	if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) {
+		pr_err("%s: Not an ELF file\n", __func__);
+		goto elf_check_fail;
+	}
+
+	if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN) {
+		pr_err("%s: Not an executable image\n", __func__);
+		goto elf_check_fail;
+	}
+
+	if (ehdr->e_phnum == 0) {
+		pr_err("%s: no segments to load\n", __func__);
+		goto elf_check_fail;
+	}
+
+	if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
+	    sizeof(struct elf32_hdr) > fw_size) {
+		pr_err("%s: Too small MDT file\n", __func__);
+		goto elf_check_fail;
+	}
+
+	return true;
+
+elf_check_fail:
+	return false;
+}
+
+static int wdsp_add_segment_to_list(struct device *dev,
+				    const char *img_fname,
+				    const struct elf32_phdr *phdr,
+				    int phdr_idx,
+				    struct list_head *seg_list)
+{
+	struct wdsp_img_segment *seg;
+	int ret = 0;
+
+	/* Do not load segments with zero size */
+	if (phdr->p_filesz == 0 || phdr->p_memsz == 0)
+		goto done;
+
+	seg = kzalloc(sizeof(*seg), GFP_KERNEL);
+	if (!seg) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	snprintf(seg->split_fname, sizeof(seg->split_fname),
+		 "%s.b%02d", img_fname, phdr_idx);
+	ret = request_firmware(&seg->split_fw, seg->split_fname, dev);
+	if (IS_ERR_VALUE(ret)) {
+		dev_err(dev, "%s: firmware %s not found\n",
+			__func__, seg->split_fname);
+		goto bad_seg;
+	}
+
+	seg->load_addr = phdr->p_paddr;
+	seg->size = phdr->p_filesz;
+	seg->data = (u8 *) seg->split_fw->data;
+
+	list_add_tail(&seg->list, seg_list);
+done:
+	return ret;
+bad_seg:
+	kfree(seg);
+	return ret;
+}
+
+/*
+ * wdsp_flush_segment_list: Flush the list of segments
+ * @seg_list: List of segments to be flushed
+ * This API will traverse through the list of segments provided in
+ * seg_list, release the firmware for each segment and delete the
+ * segment from the list.
+ */
+void wdsp_flush_segment_list(struct list_head *seg_list)
+{
+	struct wdsp_img_segment *seg, *next;
+
+	list_for_each_entry_safe(seg, next, seg_list, list) {
+		release_firmware(seg->split_fw);
+		list_del(&seg->list);
+		kfree(seg);
+	}
+}
+EXPORT_SYMBOL(wdsp_flush_segment_list);
+
+/*
+ * wdsp_get_segment_list: Get the list of requested segments
+ * @dev: struct device pointer of caller
+ * @img_fname: Image name for the mdt and split firmware files
+ * @segment_type: Requested segment type, should be either
+ *		  WDSP_ELF_FLAG_RE or WDSP_ELF_FLAG_WRITE
+ * @seg_list: An initialized head for list of segmented to be returned
+ * @entry_point: Pointer to return the entry point of the image
+ * This API will parse the mdt file for img_fname and create
+ * an struct wdsp_img_segment for each segment that matches segment_type
+ * and add this structure to list pointed by seg_list
+ */
+int wdsp_get_segment_list(struct device *dev,
+			  const char *img_fname,
+			  unsigned int segment_type,
+			  struct list_head *seg_list,
+			  u32 *entry_point)
+{
+	const struct firmware *fw;
+	const struct elf32_hdr *ehdr;
+	const struct elf32_phdr *phdr;
+	const u8 *elf_ptr;
+	char mdt_name[WDSP_IMG_NAME_LEN_MAX];
+	int ret, phdr_idx;
+	bool segment_match;
+
+	if (!dev) {
+		ret = -EINVAL;
+		pr_err("%s: Invalid device handle\n", __func__);
+		goto done;
+	}
+
+	if (!img_fname || !seg_list || !entry_point) {
+		ret = -EINVAL;
+		dev_err(dev, "%s: Invalid input params\n",
+			__func__);
+		goto done;
+	}
+
+	if (segment_type != WDSP_ELF_FLAG_RE &&
+	    segment_type != WDSP_ELF_FLAG_WRITE) {
+		dev_err(dev, "%s: Invalid request for segment_type %d\n",
+			__func__, segment_type);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	snprintf(mdt_name, sizeof(mdt_name), "%s.mdt", img_fname);
+	ret = request_firmware(&fw, mdt_name, dev);
+	if (IS_ERR_VALUE(ret)) {
+		dev_err(dev, "%s: firmware %s not found\n",
+			__func__, mdt_name);
+		goto done;
+	}
+
+	ehdr = (struct elf32_hdr *) fw->data;
+	*entry_point = ehdr->e_entry;
+	if (!wdsp_is_valid_elf_hdr(ehdr, fw->size)) {
+		dev_err(dev, "%s: fw mdt %s is invalid\n",
+			__func__, mdt_name);
+		ret = -EINVAL;
+		goto bad_elf;
+	}
+
+	elf_ptr = fw->data + sizeof(*ehdr);
+	for (phdr_idx = 0; phdr_idx < ehdr->e_phnum; phdr_idx++) {
+		phdr = (struct elf32_phdr *) elf_ptr;
+		segment_match = false;
+
+		switch (segment_type) {
+		case WDSP_ELF_FLAG_RE:
+			/*
+			 * Flag can be READ or EXECUTE or both but
+			 * WRITE flag should not be set.
+			 */
+			if ((phdr->p_flags & segment_type) &&
+			    !(phdr->p_flags & WDSP_ELF_FLAG_WRITE))
+				segment_match = true;
+			break;
+		case WDSP_ELF_FLAG_WRITE:
+			/*
+			 * If WRITE flag is set, other flags do not
+			 * matter.
+			 */
+			if (phdr->p_flags & segment_type)
+				segment_match = true;
+			break;
+		}
+
+		if (segment_match) {
+			ret = wdsp_add_segment_to_list(dev, img_fname, phdr,
+						       phdr_idx, seg_list);
+			if (IS_ERR_VALUE(ret)) {
+				wdsp_flush_segment_list(seg_list);
+				goto bad_elf;
+			}
+		}
+		elf_ptr = elf_ptr + sizeof(*phdr);
+	}
+
+bad_elf:
+	release_firmware(fw);
+done:
+	return ret;
+}
+EXPORT_SYMBOL(wdsp_get_segment_list);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd-dsp-utils.h	2019-01-22 16:16:29.543301102 +0100
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __WCD_DSP_UTILS_H__
+#define __WCD_DSP_UTILS_H__
+
+#define WDSP_IMG_NAME_LEN_MAX    64
+
+#define WDSP_ELF_FLAG_EXECUTE    (1 << 0)
+#define WDSP_ELF_FLAG_WRITE      (1 << 1)
+#define WDSP_ELF_FLAG_READ       (1 << 2)
+
+#define WDSP_ELF_FLAG_RE (WDSP_ELF_FLAG_READ | WDSP_ELF_FLAG_EXECUTE)
+
+struct wdsp_img_segment {
+
+	/* Firmware for the slit image */
+	const struct firmware *split_fw;
+
+	/* Name of the split firmware file */
+	char split_fname[WDSP_IMG_NAME_LEN_MAX];
+
+	/* Address where the segment is to be loaded */
+	u32 load_addr;
+
+	/* Buffer to hold the data to be loaded */
+	u8 *data;
+
+	/* Size of the data to be loaded */
+	size_t size;
+
+	/* List node pointing to next segment */
+	struct list_head list;
+};
+
+int wdsp_get_segment_list(struct device *, const char *,
+			  unsigned int, struct list_head *,
+			  u32 *);
+void wdsp_flush_segment_list(struct list_head *);
+
+#endif /* __WCD_DSP_UTILS_H__ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd-mbhc-v2.c	2019-10-29 09:26:26.121227428 +0100
@@ -0,0 +1,3041 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/kernel.h>
+#include <linux/input.h>
+#include <linux/firmware.h>
+#include <linux/completion.h>
+#include <linux/mfd/msm-cdc-pinctrl.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include "wcd-mbhc-v2.h"
+#include "wcdcal-hwdep.h"
+
+#define WCD_MBHC_JACK_MASK (SND_JACK_HEADSET | SND_JACK_OC_HPHL | \
+			   SND_JACK_OC_HPHR | SND_JACK_LINEOUT | \
+			   SND_JACK_MECHANICAL | SND_JACK_MICROPHONE2 | \
+			   SND_JACK_UNSUPPORTED)
+
+#define WCD_MBHC_JACK_BUTTON_MASK (SND_JACK_BTN_0 | SND_JACK_BTN_1 | \
+				  SND_JACK_BTN_2 | SND_JACK_BTN_3 | \
+				  SND_JACK_BTN_4 | SND_JACK_BTN_5 )
+#define OCP_ATTEMPT 20
+#define HS_DETECT_PLUG_TIME_MS (3 * 1000)
+#define SPECIAL_HS_DETECT_TIME_MS (2 * 1000)
+#define MBHC_BUTTON_PRESS_THRESHOLD_MIN 250
+#define GND_MIC_SWAP_THRESHOLD 4
+#define WCD_FAKE_REMOVAL_MIN_PERIOD_MS 100
+#define HS_VREF_MIN_VAL 1400
+#define FW_READ_ATTEMPTS 15
+#define FW_READ_TIMEOUT 4000000
+#define FAKE_REM_RETRY_ATTEMPTS 3
+#define MAX_IMPED 60000
+
+#define WCD_MBHC_BTN_PRESS_COMPL_TIMEOUT_MS  50
+#define ANC_DETECT_RETRY_CNT 7
+#define WCD_MBHC_SPL_HS_CNT  1
+
+static int det_extn_cable_en;
+module_param(det_extn_cable_en, int,
+		S_IRUGO | S_IWUSR | S_IWGRP);
+MODULE_PARM_DESC(det_extn_cable_en, "enable/disable extn cable detect");
+
+enum wcd_mbhc_cs_mb_en_flag {
+	WCD_MBHC_EN_CS = 0,
+	WCD_MBHC_EN_MB,
+	WCD_MBHC_EN_PULLUP,
+	WCD_MBHC_EN_NONE,
+};
+
+static void wcd_mbhc_jack_report(struct wcd_mbhc *mbhc,
+				struct snd_soc_jack *jack, int status, int mask)
+{
+	snd_soc_jack_report(jack, status, mask);
+}
+
+static void __hphocp_off_report(struct wcd_mbhc *mbhc, u32 jack_status,
+				int irq)
+{
+	struct snd_soc_codec *codec = mbhc->codec;
+
+	dev_dbg(codec->dev, "%s: clear ocp status %x\n",
+		__func__, jack_status);
+
+	if (mbhc->hph_status & jack_status) {
+		mbhc->hph_status &= ~jack_status;
+		wcd_mbhc_jack_report(mbhc, &mbhc->headset_jack,
+				     mbhc->hph_status, WCD_MBHC_JACK_MASK);
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_OCP_FSM_EN, 0);
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_OCP_FSM_EN, 1);
+		/*
+		 * reset retry counter as PA is turned off signifying
+		 * start of new OCP detection session
+		 */
+		if (mbhc->intr_ids->hph_left_ocp)
+			mbhc->hphlocp_cnt = 0;
+		else
+			mbhc->hphrocp_cnt = 0;
+		mbhc->mbhc_cb->irq_control(codec, irq, true);
+	}
+}
+
+static void hphrocp_off_report(struct wcd_mbhc *mbhc, u32 jack_status)
+{
+	__hphocp_off_report(mbhc, SND_JACK_OC_HPHR,
+			    mbhc->intr_ids->hph_right_ocp);
+}
+
+static void hphlocp_off_report(struct wcd_mbhc *mbhc, u32 jack_status)
+{
+	__hphocp_off_report(mbhc, SND_JACK_OC_HPHL,
+			    mbhc->intr_ids->hph_left_ocp);
+}
+
+static void wcd_program_hs_vref(struct wcd_mbhc *mbhc)
+{
+	struct wcd_mbhc_plug_type_cfg *plug_type_cfg;
+	struct snd_soc_codec *codec = mbhc->codec;
+	u32 reg_val;
+
+	plug_type_cfg = WCD_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration);
+	reg_val = ((plug_type_cfg->v_hs_max - HS_VREF_MIN_VAL) / 100);
+
+	dev_dbg(codec->dev, "%s: reg_val  = %x\n", __func__, reg_val);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_HS_VREF, reg_val);
+}
+
+static void wcd_program_btn_threshold(const struct wcd_mbhc *mbhc, bool micbias)
+{
+	struct wcd_mbhc_btn_detect_cfg *btn_det;
+	struct snd_soc_codec *codec = mbhc->codec;
+	struct snd_soc_card *card = codec->component.card;
+	s16 *btn_low, *btn_high;
+
+	if (mbhc->mbhc_cfg->calibration == NULL) {
+		dev_err(card->dev, "%s: calibration data is NULL\n", __func__);
+		return;
+	}
+
+	btn_det = WCD_MBHC_CAL_BTN_DET_PTR(mbhc->mbhc_cfg->calibration);
+	btn_low = btn_det->_v_btn_low;
+	btn_high = ((void *)&btn_det->_v_btn_low) +
+			(sizeof(btn_det->_v_btn_low[0]) * btn_det->num_btn);
+
+	mbhc->mbhc_cb->set_btn_thr(codec, btn_low, btn_high, btn_det->num_btn,
+				   micbias);
+}
+
+static void wcd_enable_curr_micbias(const struct wcd_mbhc *mbhc,
+				const enum wcd_mbhc_cs_mb_en_flag cs_mb_en)
+{
+
+	/*
+	 * Some codecs handle micbias/pullup enablement in codec
+	 * drivers itself and micbias is not needed for regular
+	 * plug type detection. So if micbias_control callback function
+	 * is defined, just return.
+	 */
+	if (mbhc->mbhc_cb->mbhc_micbias_control)
+		return;
+
+	pr_debug("%s: enter, cs_mb_en: %d\n", __func__, cs_mb_en);
+
+	switch (cs_mb_en) {
+	case WCD_MBHC_EN_CS:
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MICB_CTRL, 0);
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL, 3);
+		/* Program Button threshold registers as per CS */
+		wcd_program_btn_threshold(mbhc, false);
+		break;
+	case WCD_MBHC_EN_MB:
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL, 0);
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
+
+		/* Disable PULL_UP_EN & enable MICBIAS */
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MICB_CTRL, 2);
+		/* Program Button threshold registers as per MICBIAS */
+		wcd_program_btn_threshold(mbhc, true);
+		break;
+	case WCD_MBHC_EN_PULLUP:
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL, 3);
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MICB_CTRL, 1);
+		/* Program Button threshold registers as per MICBIAS */
+		wcd_program_btn_threshold(mbhc, true);
+		break;
+	case WCD_MBHC_EN_NONE:
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL, 0);
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MICB_CTRL, 0);
+		break;
+	default:
+		pr_debug("%s: Invalid parameter", __func__);
+		break;
+	}
+
+	pr_debug("%s: exit\n", __func__);
+}
+
+static const char *wcd_mbhc_get_event_string(int event)
+{
+	switch (event) {
+	case WCD_EVENT_PRE_MICBIAS_2_OFF:
+		return WCD_MBHC_STRINGIFY(WCD_EVENT_PRE_MICBIAS_2_OFF);
+	case WCD_EVENT_POST_MICBIAS_2_OFF:
+		return WCD_MBHC_STRINGIFY(WCD_EVENT_POST_MICBIAS_2_OFF);
+	case WCD_EVENT_PRE_MICBIAS_2_ON:
+		return WCD_MBHC_STRINGIFY(WCD_EVENT_PRE_MICBIAS_2_ON);
+	case WCD_EVENT_POST_MICBIAS_2_ON:
+		return WCD_MBHC_STRINGIFY(WCD_EVENT_POST_MICBIAS_2_ON);
+	case WCD_EVENT_PRE_HPHL_PA_ON:
+		return WCD_MBHC_STRINGIFY(WCD_EVENT_PRE_HPHL_PA_ON);
+	case WCD_EVENT_POST_HPHL_PA_OFF:
+		return WCD_MBHC_STRINGIFY(WCD_EVENT_POST_HPHL_PA_OFF);
+	case WCD_EVENT_PRE_HPHR_PA_ON:
+		return WCD_MBHC_STRINGIFY(WCD_EVENT_PRE_HPHR_PA_ON);
+	case WCD_EVENT_POST_HPHR_PA_OFF:
+		return WCD_MBHC_STRINGIFY(WCD_EVENT_POST_HPHR_PA_OFF);
+	case WCD_EVENT_PRE_HPHR_PA_OFF:
+		return WCD_MBHC_STRINGIFY(WCD_EVENT_PRE_HPHR_PA_OFF);
+	case WCD_EVENT_PRE_HPHL_PA_OFF:
+		return WCD_MBHC_STRINGIFY(WCD_EVENT_PRE_HPHL_PA_OFF);
+	case WCD_EVENT_POST_DAPM_MICBIAS_2_ON:
+		return WCD_MBHC_STRINGIFY(WCD_EVENT_POST_DAPM_MICBIAS_2_ON);
+	case WCD_EVENT_PRE_DAPM_MICBIAS_2_ON:
+		return WCD_MBHC_STRINGIFY(WCD_EVENT_PRE_DAPM_MICBIAS_2_ON);
+	case WCD_EVENT_POST_DAPM_MICBIAS_2_OFF:
+		return WCD_MBHC_STRINGIFY(WCD_EVENT_POST_DAPM_MICBIAS_2_OFF);
+	case WCD_EVENT_PRE_DAPM_MICBIAS_2_OFF:
+		return WCD_MBHC_STRINGIFY(WCD_EVENT_PRE_DAPM_MICBIAS_2_OFF);
+	case WCD_EVENT_OCP_OFF:
+		return WCD_MBHC_STRINGIFY(WCD_EVENT_OCP_OFF);
+	case WCD_EVENT_OCP_ON:
+		return WCD_MBHC_STRINGIFY(WCD_EVENT_OCP_ON);
+	case WCD_EVENT_INVALID:
+	default:
+		return WCD_MBHC_STRINGIFY(WCD_EVENT_INVALID);
+	}
+}
+
+static int wcd_event_notify(struct notifier_block *self, unsigned long val,
+			    void *data)
+{
+	struct wcd_mbhc *mbhc = (struct wcd_mbhc *)data;
+	enum wcd_notify_event event = (enum wcd_notify_event)val;
+	struct snd_soc_codec *codec = mbhc->codec;
+	bool micbias2 = false;
+	bool micbias1 = false;
+	u8 fsm_en = 0;
+
+	pr_debug("%s: event %s (%d)\n", __func__,
+		 wcd_mbhc_get_event_string(event), event);
+	if (mbhc->mbhc_cb->micbias_enable_status) {
+		micbias2 = mbhc->mbhc_cb->micbias_enable_status(mbhc,
+								MIC_BIAS_2);
+		micbias1 = mbhc->mbhc_cb->micbias_enable_status(mbhc,
+								MIC_BIAS_1);
+	}
+	switch (event) {
+	/* MICBIAS usage change */
+	case WCD_EVENT_POST_DAPM_MICBIAS_2_ON:
+		mbhc->is_hs_recording = true;
+		pr_debug("%s: is_capture: %d\n", __func__,
+			  mbhc->is_hs_recording);
+		break;
+	case WCD_EVENT_POST_MICBIAS_2_ON:
+		if (!mbhc->micbias_enable)
+			goto out_micb_en;
+		if (mbhc->mbhc_cb->mbhc_common_micb_ctrl) {
+			mbhc->mbhc_cb->mbhc_common_micb_ctrl(codec,
+					MBHC_COMMON_MICB_PRECHARGE,
+					true);
+			mbhc->mbhc_cb->mbhc_common_micb_ctrl(codec,
+					MBHC_COMMON_MICB_SET_VAL,
+					true);
+			/*
+			 * Special headset needs MICBIAS as 2.7V so wait for
+			 * 50 msec for the MICBIAS to reach 2.7 volts.
+			 */
+			msleep(50);
+		}
+		if (mbhc->mbhc_cb->set_auto_zeroing)
+			mbhc->mbhc_cb->set_auto_zeroing(codec, true);
+		if (mbhc->mbhc_cb->mbhc_common_micb_ctrl)
+			mbhc->mbhc_cb->mbhc_common_micb_ctrl(codec,
+					MBHC_COMMON_MICB_PRECHARGE,
+					false);
+out_micb_en:
+		/* Disable current source if micbias enabled */
+		if (mbhc->mbhc_cb->mbhc_micbias_control) {
+			WCD_MBHC_REG_READ(WCD_MBHC_FSM_EN, fsm_en);
+			if (fsm_en)
+				WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL,
+							 0);
+		} else {
+			mbhc->is_hs_recording = true;
+			wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
+		}
+		/* configure cap settings properly when micbias is enabled */
+		if (mbhc->mbhc_cb->set_cap_mode)
+			mbhc->mbhc_cb->set_cap_mode(codec, micbias1, true);
+		break;
+	case WCD_EVENT_PRE_MICBIAS_2_OFF:
+		/*
+		 * Before MICBIAS_2 is turned off, if FSM is enabled,
+		 * make sure current source is enabled so as to detect
+		 * button press/release events
+		 */
+		if (mbhc->mbhc_cb->mbhc_micbias_control &&
+		    !mbhc->micbias_enable) {
+			WCD_MBHC_REG_READ(WCD_MBHC_FSM_EN, fsm_en);
+			if (fsm_en)
+				WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL,
+							 3);
+		}
+		break;
+	/* MICBIAS usage change */
+	case WCD_EVENT_POST_DAPM_MICBIAS_2_OFF:
+		mbhc->is_hs_recording = false;
+		pr_debug("%s: is_capture: %d\n", __func__,
+			  mbhc->is_hs_recording);
+		break;
+	case WCD_EVENT_POST_MICBIAS_2_OFF:
+		if (!mbhc->mbhc_cb->mbhc_micbias_control)
+			mbhc->is_hs_recording = false;
+		if (mbhc->micbias_enable) {
+			wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
+			break;
+		}
+
+		if (mbhc->mbhc_cb->set_auto_zeroing)
+			mbhc->mbhc_cb->set_auto_zeroing(codec, false);
+		if (mbhc->mbhc_cb->set_micbias_value && !mbhc->micbias_enable)
+			mbhc->mbhc_cb->set_micbias_value(codec);
+		/* Enable PULL UP if PA's are enabled */
+		if ((test_bit(WCD_MBHC_EVENT_PA_HPHL, &mbhc->event_state)) ||
+				(test_bit(WCD_MBHC_EVENT_PA_HPHR,
+					  &mbhc->event_state)))
+			/* enable pullup and cs, disable mb */
+			wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_PULLUP);
+		else
+			/* enable current source and disable mb, pullup*/
+			wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_CS);
+
+		/* configure cap settings properly when micbias is disabled */
+		if (mbhc->mbhc_cb->set_cap_mode)
+			mbhc->mbhc_cb->set_cap_mode(codec, micbias1, false);
+		break;
+	case WCD_EVENT_PRE_HPHL_PA_OFF:
+		mutex_lock(&mbhc->hphl_pa_lock);
+		break;
+	case WCD_EVENT_POST_HPHL_PA_OFF:
+		clear_bit(WCD_MBHC_HPHL_PA_OFF_ACK, &mbhc->hph_pa_dac_state);
+		if (mbhc->hph_status & SND_JACK_OC_HPHL)
+			hphlocp_off_report(mbhc, SND_JACK_OC_HPHL);
+		clear_bit(WCD_MBHC_EVENT_PA_HPHL, &mbhc->event_state);
+		/* check if micbias is enabled */
+		if (micbias2)
+			/* Disable cs, pullup & enable micbias */
+			wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
+		else
+			/* Disable micbias, pullup & enable cs */
+			wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_CS);
+		mutex_unlock(&mbhc->hphl_pa_lock);
+		clear_bit(WCD_MBHC_ANC0_OFF_ACK, &mbhc->hph_anc_state);
+		break;
+	case WCD_EVENT_PRE_HPHR_PA_OFF:
+		mutex_lock(&mbhc->hphr_pa_lock);
+		break;
+	case WCD_EVENT_POST_HPHR_PA_OFF:
+		clear_bit(WCD_MBHC_HPHR_PA_OFF_ACK, &mbhc->hph_pa_dac_state);
+		if (mbhc->hph_status & SND_JACK_OC_HPHR)
+			hphrocp_off_report(mbhc, SND_JACK_OC_HPHR);
+		clear_bit(WCD_MBHC_EVENT_PA_HPHR, &mbhc->event_state);
+		/* check if micbias is enabled */
+		if (micbias2)
+			/* Disable cs, pullup & enable micbias */
+			wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
+		else
+			/* Disable micbias, pullup & enable cs */
+			wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_CS);
+		mutex_unlock(&mbhc->hphr_pa_lock);
+		clear_bit(WCD_MBHC_ANC1_OFF_ACK, &mbhc->hph_anc_state);
+		break;
+	case WCD_EVENT_PRE_HPHL_PA_ON:
+		set_bit(WCD_MBHC_EVENT_PA_HPHL, &mbhc->event_state);
+		/* check if micbias is enabled */
+		if (micbias2)
+			/* Disable cs, pullup & enable micbias */
+			wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
+		else
+			/* Disable micbias, enable pullup & cs */
+			wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_PULLUP);
+		break;
+	case WCD_EVENT_PRE_HPHR_PA_ON:
+		set_bit(WCD_MBHC_EVENT_PA_HPHR, &mbhc->event_state);
+		/* check if micbias is enabled */
+		if (micbias2)
+			/* Disable cs, pullup & enable micbias */
+			wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
+		else
+			/* Disable micbias, enable pullup & cs */
+			wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_PULLUP);
+		break;
+	case WCD_EVENT_OCP_OFF:
+		mbhc->mbhc_cb->irq_control(mbhc->codec,
+					   mbhc->intr_ids->hph_left_ocp,
+					   false);
+		break;
+	case WCD_EVENT_OCP_ON:
+		mbhc->mbhc_cb->irq_control(mbhc->codec,
+					   mbhc->intr_ids->hph_left_ocp,
+					   true);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int wcd_cancel_btn_work(struct wcd_mbhc *mbhc)
+{
+	int r;
+
+	r = cancel_delayed_work_sync(&mbhc->mbhc_btn_dwork);
+	/*
+	 * if scheduled mbhc.mbhc_btn_dwork is canceled from here,
+	 * we have to unlock from here instead btn_work
+	 */
+	if (r)
+		mbhc->mbhc_cb->lock_sleep(mbhc, false);
+	return r;
+}
+
+static bool wcd_swch_level_remove(struct wcd_mbhc *mbhc)
+{
+	u16 result2 = 0;
+
+	WCD_MBHC_REG_READ(WCD_MBHC_SWCH_LEVEL_REMOVE, result2);
+	return (result2) ? true : false;
+}
+
+/* should be called under interrupt context that hold suspend */
+static void wcd_schedule_hs_detect_plug(struct wcd_mbhc *mbhc,
+					    struct work_struct *work)
+{
+	pr_debug("%s: scheduling correct_swch_plug\n", __func__);
+	WCD_MBHC_RSC_ASSERT_LOCKED(mbhc);
+	mbhc->hs_detect_work_stop = false;
+	mbhc->mbhc_cb->lock_sleep(mbhc, true);
+	schedule_work(work);
+}
+
+/* called under codec_resource_lock acquisition */
+static void wcd_cancel_hs_detect_plug(struct wcd_mbhc *mbhc,
+					 struct work_struct *work)
+{
+	pr_debug("%s: Canceling correct_plug_swch\n", __func__);
+	mbhc->hs_detect_work_stop = true;
+	WCD_MBHC_RSC_UNLOCK(mbhc);
+	if (cancel_work_sync(work)) {
+		pr_debug("%s: correct_plug_swch is canceled\n",
+			 __func__);
+		mbhc->mbhc_cb->lock_sleep(mbhc, false);
+	}
+	WCD_MBHC_RSC_LOCK(mbhc);
+}
+
+static void wcd_mbhc_clr_and_turnon_hph_padac(struct wcd_mbhc *mbhc)
+{
+	bool pa_turned_on = false;
+	u8 wg_time = 0;
+
+	WCD_MBHC_REG_READ(WCD_MBHC_HPH_CNP_WG_TIME, wg_time);
+	wg_time += 1;
+
+	mutex_lock(&mbhc->hphr_pa_lock);
+	if (test_and_clear_bit(WCD_MBHC_HPHR_PA_OFF_ACK,
+			       &mbhc->hph_pa_dac_state)) {
+		pr_debug("%s: HPHR clear flag and enable PA\n", __func__);
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_HPHR_PA_EN, 1);
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_HPHR_OCP_DET_EN, 1);
+		pa_turned_on = true;
+	}
+	mutex_unlock(&mbhc->hphr_pa_lock);
+	mutex_lock(&mbhc->hphl_pa_lock);
+	if (test_and_clear_bit(WCD_MBHC_HPHL_PA_OFF_ACK,
+			       &mbhc->hph_pa_dac_state)) {
+		pr_debug("%s: HPHL clear flag and enable PA\n", __func__);
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_HPHL_PA_EN, 1);
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_HPHL_OCP_DET_EN, 1);
+		pa_turned_on = true;
+	}
+	mutex_unlock(&mbhc->hphl_pa_lock);
+
+	if (pa_turned_on) {
+		pr_debug("%s: PA was turned on by MBHC and not by DAPM\n",
+			 __func__);
+		usleep_range(wg_time * 1000, wg_time * 1000 + 50);
+	}
+
+	if (test_and_clear_bit(WCD_MBHC_ANC0_OFF_ACK,
+				&mbhc->hph_anc_state)) {
+		usleep_range(20000, 20100);
+		pr_debug("%s: HPHL ANC clear flag and enable ANC_EN\n",
+			__func__);
+		if (mbhc->mbhc_cb->update_anc_state)
+			mbhc->mbhc_cb->update_anc_state(mbhc->codec, true, 0);
+	}
+
+	if (test_and_clear_bit(WCD_MBHC_ANC1_OFF_ACK,
+				&mbhc->hph_anc_state)) {
+		usleep_range(20000, 20100);
+		pr_debug("%s: HPHR ANC clear flag and enable ANC_EN\n",
+			__func__);
+		if (mbhc->mbhc_cb->update_anc_state)
+			mbhc->mbhc_cb->update_anc_state(mbhc->codec, true, 1);
+	}
+
+}
+
+static bool wcd_mbhc_is_hph_pa_on(struct wcd_mbhc *mbhc)
+{
+	bool hph_pa_on = false;
+
+	WCD_MBHC_REG_READ(WCD_MBHC_HPH_PA_EN, hph_pa_on);
+
+	return (hph_pa_on) ? true : false;
+}
+
+static void wcd_mbhc_set_and_turnoff_hph_padac(struct wcd_mbhc *mbhc)
+{
+	u8 wg_time = 0;
+
+	WCD_MBHC_REG_READ(WCD_MBHC_HPH_CNP_WG_TIME, wg_time);
+	wg_time += 1;
+
+	/* If headphone PA is on, check if userspace receives
+	* removal event to sync-up PA's state */
+	if (wcd_mbhc_is_hph_pa_on(mbhc)) {
+		pr_debug("%s PA is on, setting PA_OFF_ACK\n", __func__);
+		set_bit(WCD_MBHC_HPHL_PA_OFF_ACK, &mbhc->hph_pa_dac_state);
+		set_bit(WCD_MBHC_HPHR_PA_OFF_ACK, &mbhc->hph_pa_dac_state);
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_HPHL_OCP_DET_EN, 0);
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_HPHR_OCP_DET_EN, 0);
+	} else {
+		pr_debug("%s PA is off\n", __func__);
+	}
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_HPH_PA_EN, 0);
+	usleep_range(wg_time * 1000, wg_time * 1000 + 50);
+
+
+	if (mbhc->mbhc_cb->is_anc_on && mbhc->mbhc_cb->is_anc_on(mbhc)) {
+		usleep_range(20000, 20100);
+		pr_debug("%s ANC is on, setting ANC_OFF_ACK\n", __func__);
+		set_bit(WCD_MBHC_ANC0_OFF_ACK, &mbhc->hph_anc_state);
+		set_bit(WCD_MBHC_ANC1_OFF_ACK, &mbhc->hph_anc_state);
+		if (mbhc->mbhc_cb->update_anc_state) {
+			mbhc->mbhc_cb->update_anc_state(mbhc->codec, false, 0);
+			mbhc->mbhc_cb->update_anc_state(mbhc->codec, false, 1);
+		} else {
+			pr_debug("%s ANC is off\n", __func__);
+		}
+	}
+}
+
+int wcd_mbhc_get_impedance(struct wcd_mbhc *mbhc, uint32_t *zl,
+			uint32_t *zr)
+{
+	*zl = mbhc->zl;
+	*zr = mbhc->zr;
+
+	if (*zl && *zr)
+		return 0;
+	else
+		return -EINVAL;
+}
+
+static void wcd_mbhc_hs_elec_irq(struct wcd_mbhc *mbhc, int irq_type,
+				 bool enable)
+{
+	int irq;
+
+	WCD_MBHC_RSC_ASSERT_LOCKED(mbhc);
+
+	if (irq_type == WCD_MBHC_ELEC_HS_INS)
+		irq = mbhc->intr_ids->mbhc_hs_ins_intr;
+	else if (irq_type == WCD_MBHC_ELEC_HS_REM)
+		irq = mbhc->intr_ids->mbhc_hs_rem_intr;
+	else {
+		pr_debug("%s: irq_type: %d, enable: %d\n",
+			__func__, irq_type, enable);
+		return;
+	}
+
+	pr_debug("%s: irq: %d, enable: %d, intr_status:%lu\n",
+		 __func__, irq, enable, mbhc->intr_status);
+	if ((test_bit(irq_type, &mbhc->intr_status)) != enable) {
+		mbhc->mbhc_cb->irq_control(mbhc->codec, irq, enable);
+		if (enable)
+			set_bit(irq_type, &mbhc->intr_status);
+		else
+			clear_bit(irq_type, &mbhc->intr_status);
+	}
+}
+
+static void wcd_mbhc_report_plug(struct wcd_mbhc *mbhc, int insertion,
+				enum snd_jack_types jack_type)
+{
+	struct snd_soc_codec *codec = mbhc->codec;
+	bool is_pa_on = false;
+
+	WCD_MBHC_RSC_ASSERT_LOCKED(mbhc);
+
+	pr_debug("%s: enter insertion %d hph_status %x\n",
+		 __func__, insertion, mbhc->hph_status);
+	if (!insertion) {
+		/* Report removal */
+		mbhc->hph_status &= ~jack_type;
+		/*
+		 * cancel possibly scheduled btn work and
+		 * report release if we reported button press
+		 */
+		if (wcd_cancel_btn_work(mbhc)) {
+			pr_debug("%s: button press is canceled\n", __func__);
+		} else if (mbhc->buttons_pressed) {
+			pr_debug("%s: release of button press%d\n",
+				 __func__, jack_type);
+			wcd_mbhc_jack_report(mbhc, &mbhc->button_jack, 0,
+					    mbhc->buttons_pressed);
+			mbhc->buttons_pressed &=
+				~WCD_MBHC_JACK_BUTTON_MASK;
+		}
+
+		if (mbhc->micbias_enable) {
+			if (mbhc->mbhc_cb->mbhc_micbias_control)
+				mbhc->mbhc_cb->mbhc_micbias_control(
+						codec, MIC_BIAS_2,
+						MICB_DISABLE);
+			if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic)
+				mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(
+						codec,
+						MIC_BIAS_2, false);
+			if (mbhc->mbhc_cb->set_micbias_value) {
+				mbhc->mbhc_cb->set_micbias_value(codec);
+				WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MICB_CTRL, 0);
+			}
+			mbhc->micbias_enable = false;
+		}
+
+		mbhc->hph_type = WCD_MBHC_HPH_NONE;
+		mbhc->zl = mbhc->zr = 0;
+		pr_debug("%s: Reporting removal %d(%x)\n", __func__,
+			 jack_type, mbhc->hph_status);
+		wcd_mbhc_jack_report(mbhc, &mbhc->headset_jack,
+				mbhc->hph_status, WCD_MBHC_JACK_MASK);
+		wcd_mbhc_set_and_turnoff_hph_padac(mbhc);
+		hphrocp_off_report(mbhc, SND_JACK_OC_HPHR);
+		hphlocp_off_report(mbhc, SND_JACK_OC_HPHL);
+		mbhc->current_plug = MBHC_PLUG_TYPE_NONE;
+		mbhc->force_linein = false;
+	} else {
+		/*
+		 * Report removal of current jack type.
+		 * Headphone to headset shouldn't report headphone
+		 * removal.
+		 */
+		if (mbhc->mbhc_cfg->detect_extn_cable &&
+		    (mbhc->current_plug == MBHC_PLUG_TYPE_HIGH_HPH ||
+		    jack_type == SND_JACK_LINEOUT) &&
+		    (mbhc->hph_status && mbhc->hph_status != jack_type)) {
+
+			if (mbhc->micbias_enable &&
+			    mbhc->hph_status == SND_JACK_HEADSET) {
+				if (mbhc->mbhc_cb->mbhc_micbias_control)
+					mbhc->mbhc_cb->mbhc_micbias_control(
+						codec, MIC_BIAS_2,
+						MICB_DISABLE);
+				if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic)
+					mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(
+						codec,
+						MIC_BIAS_2, false);
+				if (mbhc->mbhc_cb->set_micbias_value) {
+					mbhc->mbhc_cb->set_micbias_value(
+							codec);
+					WCD_MBHC_REG_UPDATE_BITS(
+							WCD_MBHC_MICB_CTRL, 0);
+				}
+				mbhc->micbias_enable = false;
+			}
+			mbhc->hph_type = WCD_MBHC_HPH_NONE;
+			mbhc->zl = mbhc->zr = 0;
+			pr_debug("%s: Reporting removal (%x)\n",
+				 __func__, mbhc->hph_status);
+			wcd_mbhc_jack_report(mbhc, &mbhc->headset_jack,
+					    0, WCD_MBHC_JACK_MASK);
+
+			if (mbhc->hph_status == SND_JACK_LINEOUT) {
+
+				pr_debug("%s: Enable micbias\n", __func__);
+				/* Disable current source and enable micbias */
+				wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
+				pr_debug("%s: set up elec removal detection\n",
+					  __func__);
+				WCD_MBHC_REG_UPDATE_BITS(
+						WCD_MBHC_ELECT_DETECTION_TYPE,
+						0);
+				usleep_range(200, 210);
+				wcd_mbhc_hs_elec_irq(mbhc,
+						     WCD_MBHC_ELEC_HS_REM,
+						     true);
+			}
+			mbhc->hph_status &= ~(SND_JACK_HEADSET |
+						SND_JACK_LINEOUT |
+						SND_JACK_ANC_HEADPHONE |
+						SND_JACK_UNSUPPORTED);
+			mbhc->force_linein = false;
+		}
+
+		if (mbhc->current_plug == MBHC_PLUG_TYPE_HEADSET &&
+			jack_type == SND_JACK_HEADPHONE)
+			mbhc->hph_status &= ~SND_JACK_HEADSET;
+
+		/* Report insertion */
+		if (jack_type == SND_JACK_HEADPHONE)
+			mbhc->current_plug = MBHC_PLUG_TYPE_HEADPHONE;
+		else if (jack_type == SND_JACK_UNSUPPORTED)
+			mbhc->current_plug = MBHC_PLUG_TYPE_GND_MIC_SWAP;
+		else if (jack_type == SND_JACK_HEADSET) {
+			mbhc->current_plug = MBHC_PLUG_TYPE_HEADSET;
+			mbhc->jiffies_atreport = jiffies;
+		} else if (jack_type == SND_JACK_LINEOUT) {
+			mbhc->current_plug = MBHC_PLUG_TYPE_HIGH_HPH;
+		} else if (jack_type == SND_JACK_ANC_HEADPHONE)
+			mbhc->current_plug = MBHC_PLUG_TYPE_ANC_HEADPHONE;
+
+		if (mbhc->mbhc_cb->hph_pa_on_status)
+			is_pa_on = mbhc->mbhc_cb->hph_pa_on_status(codec);
+
+		if (mbhc->impedance_detect &&
+			mbhc->mbhc_cb->compute_impedance &&
+			(mbhc->mbhc_cfg->linein_th != 0) &&
+			(!is_pa_on)) {
+				mbhc->mbhc_cb->compute_impedance(mbhc,
+						&mbhc->zl, &mbhc->zr);
+			if ((mbhc->zl > mbhc->mbhc_cfg->linein_th &&
+				mbhc->zl < MAX_IMPED) &&
+				(mbhc->zr > mbhc->mbhc_cfg->linein_th &&
+				 mbhc->zr < MAX_IMPED) &&
+				(jack_type == SND_JACK_HEADPHONE)) {
+				jack_type = SND_JACK_LINEOUT;
+				mbhc->force_linein = true;
+				mbhc->current_plug = MBHC_PLUG_TYPE_HIGH_HPH;
+				if (mbhc->hph_status) {
+					mbhc->hph_status &= ~(SND_JACK_HEADSET |
+							SND_JACK_LINEOUT |
+							SND_JACK_UNSUPPORTED);
+					wcd_mbhc_jack_report(mbhc,
+							&mbhc->headset_jack,
+							mbhc->hph_status,
+							WCD_MBHC_JACK_MASK);
+				}
+				pr_debug("%s: Marking jack type as SND_JACK_LINEOUT\n",
+				__func__);
+			}
+		}
+
+		mbhc->hph_status |= jack_type;
+
+		pr_debug("%s: Reporting insertion %d(%x)\n", __func__,
+			 jack_type, mbhc->hph_status);
+		wcd_mbhc_jack_report(mbhc, &mbhc->headset_jack,
+				    (mbhc->hph_status | SND_JACK_MECHANICAL),
+				    WCD_MBHC_JACK_MASK);
+		wcd_mbhc_clr_and_turnon_hph_padac(mbhc);
+	}
+	pr_debug("%s: leave hph_status %x\n", __func__, mbhc->hph_status);
+}
+
+static bool wcd_mbhc_detect_anc_plug_type(struct wcd_mbhc *mbhc)
+{
+	bool anc_mic_found = false;
+	u16 val, hs_comp_res, btn_status = 0;
+	unsigned long retry = 0;
+	int valid_plug_cnt = 0, invalid_plug_cnt = 0;
+	int btn_status_cnt = 0;
+	bool is_check_btn_press = false;
+
+
+	if (mbhc->mbhc_cfg->anc_micbias < MIC_BIAS_1 ||
+	    mbhc->mbhc_cfg->anc_micbias > MIC_BIAS_4)
+		return false;
+
+	if (!mbhc->mbhc_cb->mbhc_micbias_control)
+		return false;
+
+	WCD_MBHC_REG_READ(WCD_MBHC_FSM_EN, val);
+
+	if (val)
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+
+	mbhc->mbhc_cb->mbhc_micbias_control(mbhc->codec,
+					    mbhc->mbhc_cfg->anc_micbias,
+					    MICB_ENABLE);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MUX_CTL, 0x2);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ANC_DET_EN, 1);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
+	/*
+	 * wait for button debounce time 20ms. If 4-pole plug is inserted
+	 * into 5-pole jack, then there will be a button press interrupt
+	 * during anc plug detection. In that case though Hs_comp_res is 0,
+	 * it should not be declared as ANC plug type
+	 */
+	usleep_range(20000, 20100);
+
+	/*
+	 * After enabling FSM, to handle slow insertion scenarios,
+	 * check hs_comp_result for few times to see if the IN3 voltage
+	 * is below the Vref
+	 */
+	do {
+		if (wcd_swch_level_remove(mbhc)) {
+			pr_debug("%s: Switch level is low\n", __func__);
+			goto exit;
+		}
+		pr_debug("%s: Retry attempt %lu\n", __func__, retry + 1);
+		WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res);
+
+		if (!hs_comp_res) {
+			valid_plug_cnt++;
+			is_check_btn_press = true;
+		} else
+			invalid_plug_cnt++;
+		/* Wait 1ms before taking another reading */
+		usleep_range(1000, 1100);
+
+		WCD_MBHC_REG_READ(WCD_MBHC_FSM_STATUS, btn_status);
+		if (btn_status)
+			btn_status_cnt++;
+
+		retry++;
+	} while (retry < ANC_DETECT_RETRY_CNT);
+
+	pr_debug("%s: valid: %d, invalid: %d, btn_status_cnt: %d\n",
+		 __func__, valid_plug_cnt, invalid_plug_cnt, btn_status_cnt);
+
+	/* decision logic */
+	if ((valid_plug_cnt > invalid_plug_cnt) && is_check_btn_press &&
+	    (btn_status_cnt == 0))
+		anc_mic_found = true;
+exit:
+	if (!val)
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ANC_DET_EN, 0);
+
+	mbhc->mbhc_cb->mbhc_micbias_control(mbhc->codec,
+					    mbhc->mbhc_cfg->anc_micbias,
+					    MICB_DISABLE);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MUX_CTL, 0x0);
+	pr_debug("%s: anc mic %sfound\n", __func__,
+		 anc_mic_found ? "" : "not ");
+	return anc_mic_found;
+}
+
+static void wcd_mbhc_find_plug_and_report(struct wcd_mbhc *mbhc,
+					 enum wcd_mbhc_plug_type plug_type)
+{
+	bool anc_mic_found = false;
+	enum snd_jack_types jack_type;
+
+	pr_debug("%s: enter current_plug(%d) new_plug(%d)\n",
+		 __func__, mbhc->current_plug, plug_type);
+
+	WCD_MBHC_RSC_ASSERT_LOCKED(mbhc);
+
+	if (mbhc->current_plug == plug_type) {
+		pr_debug("%s: cable already reported, exit\n", __func__);
+		goto exit;
+	}
+
+	if (plug_type == MBHC_PLUG_TYPE_HEADPHONE) {
+		/*
+		 * Nothing was reported previously
+		 * report a headphone or unsupported
+		 */
+		wcd_mbhc_report_plug(mbhc, 1, SND_JACK_HEADPHONE);
+	} else if (plug_type == MBHC_PLUG_TYPE_GND_MIC_SWAP) {
+			if (mbhc->current_plug == MBHC_PLUG_TYPE_HEADPHONE)
+				wcd_mbhc_report_plug(mbhc, 0,
+						SND_JACK_HEADPHONE);
+			if (mbhc->current_plug == MBHC_PLUG_TYPE_HEADSET)
+				wcd_mbhc_report_plug(mbhc, 0, SND_JACK_HEADSET);
+		wcd_mbhc_report_plug(mbhc, 1, SND_JACK_UNSUPPORTED);
+	} else if (plug_type == MBHC_PLUG_TYPE_HEADSET) {
+		if (mbhc->mbhc_cfg->enable_anc_mic_detect)
+			anc_mic_found = wcd_mbhc_detect_anc_plug_type(mbhc);
+
+		jack_type = SND_JACK_HEADSET;
+		if (anc_mic_found)
+			jack_type = SND_JACK_ANC_HEADPHONE;
+
+		/*
+		 * If Headphone was reported previously, this will
+		 * only report the mic line
+		 */
+		wcd_mbhc_report_plug(mbhc, 1, jack_type);
+	} else if (plug_type == MBHC_PLUG_TYPE_HIGH_HPH) {
+		if (mbhc->mbhc_cfg->detect_extn_cable) {
+			/* High impedance device found. Report as LINEOUT */
+			wcd_mbhc_report_plug(mbhc, 1, SND_JACK_LINEOUT);
+			pr_debug("%s: setup mic trigger for further detection\n",
+				 __func__);
+
+			/* Disable HW FSM and current source */
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL, 0);
+			/* Setup for insertion detection */
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_DETECTION_TYPE,
+						 1);
+			/*
+			 * Enable HPHL trigger and MIC Schmitt triggers
+			 * and request for elec insertion interrupts
+			 */
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC,
+						 3);
+			wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS,
+					     true);
+		} else {
+			wcd_mbhc_report_plug(mbhc, 1, SND_JACK_LINEOUT);
+		}
+	} else {
+		WARN(1, "Unexpected current plug_type %d, plug_type %d\n",
+		     mbhc->current_plug, plug_type);
+	}
+exit:
+	pr_debug("%s: leave\n", __func__);
+}
+
+/* To determine if cross connection occured */
+static int wcd_check_cross_conn(struct wcd_mbhc *mbhc)
+{
+	u16 swap_res = 0;
+	enum wcd_mbhc_plug_type plug_type = MBHC_PLUG_TYPE_NONE;
+	s16 reg1 = 0;
+	bool hphl_sch_res = 0, hphr_sch_res = 0;
+
+	if (wcd_swch_level_remove(mbhc)) {
+		pr_debug("%s: Switch level is low\n", __func__);
+		return -EINVAL;
+	}
+
+	/* If PA is enabled, dont check for cross-connection */
+	if (mbhc->mbhc_cb->hph_pa_on_status)
+		if (mbhc->mbhc_cb->hph_pa_on_status(mbhc->codec))
+			return false;
+
+	WCD_MBHC_REG_READ(WCD_MBHC_ELECT_SCHMT_ISRC, reg1);
+	/*
+	 * Check if there is any cross connection,
+	 * Micbias and schmitt trigger (HPHL-HPHR)
+	 * needs to be enabled. For some codecs like wcd9335,
+	 * pull-up will already be enabled when this function
+	 * is called for cross-connection identification. No
+	 * need to enable micbias in that case.
+	 */
+	wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 2);
+
+	WCD_MBHC_REG_READ(WCD_MBHC_ELECT_RESULT, swap_res);
+	pr_debug("%s: swap_res%x\n", __func__, swap_res);
+
+	/*
+	 * Read reg hphl and hphr schmitt result with cross connection
+	 * bit. These bits will both be "0" in case of cross connection
+	 * otherwise, they stay at 1
+	 */
+	WCD_MBHC_REG_READ(WCD_MBHC_HPHL_SCHMT_RESULT, hphl_sch_res);
+	WCD_MBHC_REG_READ(WCD_MBHC_HPHR_SCHMT_RESULT, hphr_sch_res);
+	if (!(hphl_sch_res || hphr_sch_res)) {
+		plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
+		pr_debug("%s: Cross connection identified\n", __func__);
+	} else {
+		pr_debug("%s: No Cross connection found\n", __func__);
+	}
+
+	/* Disable schmitt trigger and restore micbias */
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, reg1);
+	pr_debug("%s: leave, plug type: %d\n", __func__,  plug_type);
+
+	return (plug_type == MBHC_PLUG_TYPE_GND_MIC_SWAP) ? true : false;
+}
+
+static bool wcd_is_special_headset(struct wcd_mbhc *mbhc)
+{
+	struct snd_soc_codec *codec = mbhc->codec;
+	int delay = 0, rc;
+	bool ret = false;
+	u16 hs_comp_res;
+	bool is_spl_hs = false;
+
+	/*
+	 * Increase micbias to 2.7V to detect headsets with
+	 * threshold on microphone
+	 */
+	if (mbhc->mbhc_cb->mbhc_micbias_control &&
+	    !mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic) {
+		pr_debug("%s: callback fn micb_ctrl_thr_mic not defined\n",
+			 __func__);
+		return false;
+	} else if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic) {
+		rc = mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(codec,
+							MIC_BIAS_2, true);
+		if (rc) {
+			pr_err("%s: Micbias control for thr mic failed, rc: %d\n",
+				__func__, rc);
+			return false;
+		}
+	}
+
+	wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
+
+	pr_debug("%s: special headset, start register writes\n", __func__);
+
+	WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res);
+	while (!is_spl_hs)  {
+		if (mbhc->hs_detect_work_stop) {
+			pr_debug("%s: stop requested: %d\n", __func__,
+					mbhc->hs_detect_work_stop);
+			break;
+		}
+		delay = delay + 50;
+		if (mbhc->mbhc_cb->mbhc_common_micb_ctrl) {
+			mbhc->mbhc_cb->mbhc_common_micb_ctrl(codec,
+					MBHC_COMMON_MICB_PRECHARGE,
+					true);
+			mbhc->mbhc_cb->mbhc_common_micb_ctrl(codec,
+					MBHC_COMMON_MICB_SET_VAL,
+					true);
+		}
+		/* Wait for 50msec for MICBIAS to settle down */
+		msleep(50);
+		if (mbhc->mbhc_cb->set_auto_zeroing)
+			mbhc->mbhc_cb->set_auto_zeroing(codec, true);
+		/* Wait for 50msec for FSM to update result values */
+		msleep(50);
+		WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res);
+		if (!(hs_comp_res)) {
+			pr_debug("%s: Special headset detected in %d msecs\n",
+					__func__, (delay * 2));
+			is_spl_hs = true;
+		}
+		if (delay == SPECIAL_HS_DETECT_TIME_MS) {
+			pr_debug("%s: Spl headset didnt get detect in 4 sec\n",
+					__func__);
+			break;
+		}
+	}
+	if (is_spl_hs) {
+		pr_debug("%s: Headset with threshold found\n",  __func__);
+		mbhc->micbias_enable = true;
+		ret = true;
+	}
+	if (mbhc->mbhc_cb->mbhc_common_micb_ctrl)
+		mbhc->mbhc_cb->mbhc_common_micb_ctrl(codec,
+				MBHC_COMMON_MICB_PRECHARGE,
+				false);
+	if (mbhc->mbhc_cb->set_micbias_value && !mbhc->micbias_enable)
+		mbhc->mbhc_cb->set_micbias_value(codec);
+	if (mbhc->mbhc_cb->set_auto_zeroing)
+		mbhc->mbhc_cb->set_auto_zeroing(codec, false);
+
+	if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic &&
+	    !mbhc->micbias_enable)
+		mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(codec, MIC_BIAS_2,
+						      false);
+
+	pr_debug("%s: leave, micb_enable: %d\n", __func__,
+		  mbhc->micbias_enable);
+	return ret;
+}
+
+static void wcd_mbhc_update_fsm_source(struct wcd_mbhc *mbhc,
+				       enum wcd_mbhc_plug_type plug_type)
+{
+	bool micbias2;
+
+	micbias2 = mbhc->mbhc_cb->micbias_enable_status(mbhc,
+							MIC_BIAS_2);
+	switch (plug_type) {
+	case MBHC_PLUG_TYPE_HEADPHONE:
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL, 3);
+		break;
+	case MBHC_PLUG_TYPE_HEADSET:
+	case MBHC_PLUG_TYPE_ANC_HEADPHONE:
+		if (!mbhc->is_hs_recording && !micbias2)
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL, 3);
+		break;
+	default:
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL, 0);
+		break;
+
+	};
+}
+
+static void wcd_enable_mbhc_supply(struct wcd_mbhc *mbhc,
+			enum wcd_mbhc_plug_type plug_type)
+{
+
+	struct snd_soc_codec *codec = mbhc->codec;
+
+	/*
+	 * Do not disable micbias if recording is going on or
+	 * headset is inserted on the other side of the extn
+	 * cable. If headset has been detected current source
+	 * needs to be kept enabled for button detection to work.
+	 * If the accessory type is invalid or unsupported, we
+	 * dont need to enable either of them.
+	 */
+	if (det_extn_cable_en && mbhc->is_extn_cable &&
+		mbhc->mbhc_cb && mbhc->mbhc_cb->extn_use_mb &&
+		mbhc->mbhc_cb->extn_use_mb(codec)) {
+		if (plug_type == MBHC_PLUG_TYPE_HEADPHONE ||
+		    plug_type == MBHC_PLUG_TYPE_HEADSET)
+			wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
+	} else {
+		if (plug_type == MBHC_PLUG_TYPE_HEADSET) {
+			if (mbhc->is_hs_recording || mbhc->micbias_enable)
+				wcd_enable_curr_micbias(mbhc,
+							WCD_MBHC_EN_MB);
+			else if ((test_bit(WCD_MBHC_EVENT_PA_HPHL,
+				&mbhc->event_state)) ||
+				(test_bit(WCD_MBHC_EVENT_PA_HPHR,
+				&mbhc->event_state)))
+					wcd_enable_curr_micbias(mbhc,
+							WCD_MBHC_EN_PULLUP);
+			else
+				wcd_enable_curr_micbias(mbhc,
+							WCD_MBHC_EN_CS);
+		} else if (plug_type == MBHC_PLUG_TYPE_HEADPHONE) {
+			wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_CS);
+		} else {
+			wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_NONE);
+		}
+	}
+}
+
+static bool wcd_mbhc_check_for_spl_headset(struct wcd_mbhc *mbhc,
+					   int *spl_hs_cnt)
+{
+	u16 hs_comp_res_1_8v = 0, hs_comp_res_2_7v = 0;
+	bool spl_hs = false;
+
+	if (!mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic)
+		goto exit;
+
+	/* Read back hs_comp_res @ 1.8v Micbias */
+	WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res_1_8v);
+	if (!hs_comp_res_1_8v) {
+		spl_hs = false;
+		goto exit;
+	}
+
+	/* Bump up MB2 to 2.7v */
+	mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(mbhc->codec,
+				mbhc->mbhc_cfg->mbhc_micbias, true);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
+	usleep_range(10000, 10100);
+
+	/* Read back HS_COMP_RESULT */
+	WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res_2_7v);
+	if (!hs_comp_res_2_7v && hs_comp_res_1_8v)
+		spl_hs = true;
+
+	if (spl_hs && spl_hs_cnt)
+		*spl_hs_cnt += 1;
+
+	/* MB2 back to 1.8v */
+	if (*spl_hs_cnt != WCD_MBHC_SPL_HS_CNT) {
+		mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(mbhc->codec,
+				mbhc->mbhc_cfg->mbhc_micbias, false);
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
+		usleep_range(10000, 10100);
+	}
+
+	if (spl_hs)
+		pr_debug("%s: Detected special HS (%d)\n", __func__, spl_hs);
+
+exit:
+	return spl_hs;
+}
+
+static void wcd_correct_swch_plug(struct work_struct *work)
+{
+	struct wcd_mbhc *mbhc;
+	struct snd_soc_codec *codec;
+	enum wcd_mbhc_plug_type plug_type = MBHC_PLUG_TYPE_INVALID;
+	unsigned long timeout;
+	u16 hs_comp_res = 0, hphl_sch = 0, mic_sch = 0, btn_result = 0;
+	bool wrk_complete = false;
+	int pt_gnd_mic_swap_cnt = 0;
+	int no_gnd_mic_swap_cnt = 0;
+	bool is_pa_on = false, spl_hs = false, spl_hs_reported = false;
+	bool micbias2 = false;
+	bool micbias1 = false;
+	int ret = 0;
+	int rc, spl_hs_count = 0;
+	int cross_conn;
+	int try = 0;
+
+	pr_debug("%s: enter\n", __func__);
+
+	mbhc = container_of(work, struct wcd_mbhc, correct_plug_swch);
+	codec = mbhc->codec;
+
+	/*
+	 * Enable micbias/pullup for detection in correct work.
+	 * This work will get scheduled from detect_plug_type which
+	 * will already request for pullup/micbias. If the pullup/micbias
+	 * is handled with ref-counts by individual codec drivers, there is
+	 * no need to enabale micbias/pullup here
+	 */
+
+	wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
+
+
+	/* Enable HW FSM */
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
+	/*
+	 * Check for any button press interrupts before starting 3-sec
+	 * loop.
+	 */
+	rc = wait_for_completion_timeout(&mbhc->btn_press_compl,
+			msecs_to_jiffies(WCD_MBHC_BTN_PRESS_COMPL_TIMEOUT_MS));
+
+	WCD_MBHC_REG_READ(WCD_MBHC_BTN_RESULT, btn_result);
+	WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res);
+
+	if (!rc) {
+		pr_debug("%s No btn press interrupt\n", __func__);
+		if (!btn_result && !hs_comp_res)
+			plug_type = MBHC_PLUG_TYPE_HEADSET;
+		else if (!btn_result && hs_comp_res)
+			plug_type = MBHC_PLUG_TYPE_HIGH_HPH;
+		else
+			plug_type = MBHC_PLUG_TYPE_INVALID;
+	} else {
+		if (!btn_result && !hs_comp_res)
+			plug_type = MBHC_PLUG_TYPE_HEADPHONE;
+		else
+			plug_type = MBHC_PLUG_TYPE_INVALID;
+	}
+
+	do {
+		cross_conn = wcd_check_cross_conn(mbhc);
+		try++;
+	} while (try < GND_MIC_SWAP_THRESHOLD);
+	/*
+	 * check for cross coneection 4 times.
+	 * conisder the result of the fourth iteration.
+	 */
+	if (cross_conn > 0) {
+		pr_debug("%s: cross con found, start polling\n",
+			 __func__);
+		plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
+		pr_debug("%s: Plug found, plug type is %d\n",
+			 __func__, plug_type);
+		goto correct_plug_type;
+	}
+
+	if ((plug_type == MBHC_PLUG_TYPE_HEADSET ||
+	     plug_type == MBHC_PLUG_TYPE_HEADPHONE) &&
+	    (!wcd_swch_level_remove(mbhc))) {
+		WCD_MBHC_RSC_LOCK(mbhc);
+		wcd_mbhc_find_plug_and_report(mbhc, plug_type);
+		WCD_MBHC_RSC_UNLOCK(mbhc);
+	}
+
+correct_plug_type:
+
+	timeout = jiffies + msecs_to_jiffies(HS_DETECT_PLUG_TIME_MS);
+	while (!time_after(jiffies, timeout)) {
+		if (mbhc->hs_detect_work_stop) {
+			pr_debug("%s: stop requested: %d\n", __func__,
+					mbhc->hs_detect_work_stop);
+			wcd_enable_curr_micbias(mbhc,
+						WCD_MBHC_EN_NONE);
+			if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic &&
+				mbhc->micbias_enable) {
+				mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(
+					mbhc->codec, MIC_BIAS_2, false);
+				if (mbhc->mbhc_cb->set_micbias_value)
+					mbhc->mbhc_cb->set_micbias_value(
+							mbhc->codec);
+				mbhc->micbias_enable = false;
+			}
+			goto exit;
+		}
+		if (mbhc->btn_press_intr) {
+			wcd_cancel_btn_work(mbhc);
+			mbhc->btn_press_intr = false;
+		}
+		/* Toggle FSM */
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
+
+		/* allow sometime and re-check stop requested again */
+		msleep(20);
+		if (mbhc->hs_detect_work_stop) {
+			pr_debug("%s: stop requested: %d\n", __func__,
+					mbhc->hs_detect_work_stop);
+			wcd_enable_curr_micbias(mbhc,
+						WCD_MBHC_EN_NONE);
+			if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic &&
+				mbhc->micbias_enable) {
+				mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(
+					mbhc->codec, MIC_BIAS_2, false);
+				if (mbhc->mbhc_cb->set_micbias_value)
+					mbhc->mbhc_cb->set_micbias_value(
+							mbhc->codec);
+				mbhc->micbias_enable = false;
+			}
+			goto exit;
+		}
+		WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res);
+
+		pr_debug("%s: hs_comp_res: %x\n", __func__, hs_comp_res);
+		if (mbhc->mbhc_cb->hph_pa_on_status)
+			is_pa_on = mbhc->mbhc_cb->hph_pa_on_status(codec);
+
+		/*
+		 * instead of hogging system by contineous polling, wait for
+		 * sometime and re-check stop request again.
+		 */
+		msleep(180);
+		if (hs_comp_res && (spl_hs_count < WCD_MBHC_SPL_HS_CNT)) {
+			spl_hs = wcd_mbhc_check_for_spl_headset(mbhc,
+								&spl_hs_count);
+
+			if (spl_hs_count == WCD_MBHC_SPL_HS_CNT) {
+				hs_comp_res = 0;
+				spl_hs = true;
+				mbhc->micbias_enable = true;
+			}
+		}
+
+		if ((!hs_comp_res) && (!is_pa_on)) {
+			/* Check for cross connection*/
+			ret = wcd_check_cross_conn(mbhc);
+			if (ret < 0) {
+				continue;
+			} else if (ret > 0) {
+				pt_gnd_mic_swap_cnt++;
+				no_gnd_mic_swap_cnt = 0;
+				if (pt_gnd_mic_swap_cnt <
+						GND_MIC_SWAP_THRESHOLD) {
+					continue;
+				} else if (pt_gnd_mic_swap_cnt >
+						GND_MIC_SWAP_THRESHOLD) {
+					/*
+					 * This is due to GND/MIC switch didn't
+					 * work,  Report unsupported plug.
+					 */
+					pr_debug("%s: switch didnt work\n",
+						  __func__);
+					plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
+					goto report;
+				} else {
+					plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
+				}
+			} else {
+				no_gnd_mic_swap_cnt++;
+				pt_gnd_mic_swap_cnt = 0;
+				plug_type = MBHC_PLUG_TYPE_HEADSET;
+				if ((no_gnd_mic_swap_cnt <
+				    GND_MIC_SWAP_THRESHOLD) &&
+				    (spl_hs_count != WCD_MBHC_SPL_HS_CNT)) {
+					continue;
+				} else {
+					no_gnd_mic_swap_cnt = 0;
+				}
+			}
+			if ((pt_gnd_mic_swap_cnt == GND_MIC_SWAP_THRESHOLD) &&
+				(plug_type == MBHC_PLUG_TYPE_GND_MIC_SWAP)) {
+				/*
+				 * if switch is toggled, check again,
+				 * otherwise report unsupported plug
+				 */
+				if (mbhc->mbhc_cfg->swap_gnd_mic &&
+					mbhc->mbhc_cfg->swap_gnd_mic(codec)) {
+					pr_debug("%s: US_EU gpio present,flip switch\n"
+						, __func__);
+					continue;
+				}
+			}
+		}
+
+		WCD_MBHC_REG_READ(WCD_MBHC_HPHL_SCHMT_RESULT, hphl_sch);
+		WCD_MBHC_REG_READ(WCD_MBHC_MIC_SCHMT_RESULT, mic_sch);
+		if (hs_comp_res && !(hphl_sch || mic_sch)) {
+			pr_debug("%s: cable is extension cable\n", __func__);
+			plug_type = MBHC_PLUG_TYPE_HIGH_HPH;
+			wrk_complete = true;
+		} else {
+			pr_debug("%s: cable might be headset: %d\n", __func__,
+					plug_type);
+			if (!(plug_type == MBHC_PLUG_TYPE_GND_MIC_SWAP)) {
+				plug_type = MBHC_PLUG_TYPE_HEADSET;
+				if (!spl_hs_reported &&
+				    spl_hs_count == WCD_MBHC_SPL_HS_CNT) {
+					spl_hs_reported = true;
+					WCD_MBHC_RSC_LOCK(mbhc);
+					wcd_mbhc_find_plug_and_report(mbhc,
+								    plug_type);
+					WCD_MBHC_RSC_UNLOCK(mbhc);
+					continue;
+				} else if (spl_hs_reported)
+					continue;
+				/*
+				 * Report headset only if not already reported
+				 * and if there is not button press without
+				 * release
+				 */
+				if (((mbhc->current_plug !=
+				      MBHC_PLUG_TYPE_HEADSET) &&
+				     (mbhc->current_plug !=
+				      MBHC_PLUG_TYPE_ANC_HEADPHONE)) &&
+				    !wcd_swch_level_remove(mbhc) &&
+				    !mbhc->btn_press_intr) {
+					pr_debug("%s: cable is %sheadset\n",
+						__func__,
+						((spl_hs_count ==
+							WCD_MBHC_SPL_HS_CNT) ?
+							"special ":""));
+					goto report;
+				}
+			}
+			wrk_complete = false;
+		}
+	}
+	if (!wrk_complete && mbhc->btn_press_intr) {
+		pr_debug("%s: Can be slow insertion of headphone\n", __func__);
+		wcd_cancel_btn_work(mbhc);
+		/* Report as headphone only if previously
+		 * not reported as lineout
+		 */
+		if (!mbhc->force_linein)
+			plug_type = MBHC_PLUG_TYPE_HEADPHONE;
+	}
+	/*
+	 * If plug_tye is headset, we might have already reported either in
+	 * detect_plug-type or in above while loop, no need to report again
+	 */
+	if (!wrk_complete && ((plug_type == MBHC_PLUG_TYPE_HEADSET) ||
+	    (plug_type == MBHC_PLUG_TYPE_ANC_HEADPHONE))) {
+		pr_debug("%s: plug_type:0x%x already reported\n",
+			 __func__, mbhc->current_plug);
+		goto enable_supply;
+	}
+
+	if (plug_type == MBHC_PLUG_TYPE_HIGH_HPH &&
+		(!det_extn_cable_en)) {
+		if (wcd_is_special_headset(mbhc)) {
+			pr_debug("%s: Special headset found %d\n",
+					__func__, plug_type);
+			plug_type = MBHC_PLUG_TYPE_HEADSET;
+			goto report;
+		}
+	}
+
+report:
+	if (wcd_swch_level_remove(mbhc)) {
+		pr_debug("%s: Switch level is low\n", __func__);
+		goto exit;
+	}
+	if (plug_type == MBHC_PLUG_TYPE_GND_MIC_SWAP && mbhc->btn_press_intr) {
+		pr_debug("%s: insertion of headphone with swap\n", __func__);
+		wcd_cancel_btn_work(mbhc);
+		plug_type = MBHC_PLUG_TYPE_HEADPHONE;
+	}
+	pr_debug("%s: Valid plug found, plug type %d wrk_cmpt %d btn_intr %d\n",
+			__func__, plug_type, wrk_complete,
+			mbhc->btn_press_intr);
+	WCD_MBHC_RSC_LOCK(mbhc);
+	wcd_mbhc_find_plug_and_report(mbhc, plug_type);
+	WCD_MBHC_RSC_UNLOCK(mbhc);
+enable_supply:
+	if (mbhc->mbhc_cb->mbhc_micbias_control)
+		wcd_mbhc_update_fsm_source(mbhc, plug_type);
+	else
+		wcd_enable_mbhc_supply(mbhc, plug_type);
+exit:
+	if (mbhc->mbhc_cb->mbhc_micbias_control &&
+	    !mbhc->micbias_enable)
+		mbhc->mbhc_cb->mbhc_micbias_control(codec, MIC_BIAS_2,
+						    MICB_DISABLE);
+
+	/*
+	 * If plug type is corrected from special headset to headphone,
+	 * clear the micbias enable flag, set micbias back to 1.8V and
+	 * disable micbias.
+	 */
+	if (plug_type == MBHC_PLUG_TYPE_HEADPHONE &&
+	    mbhc->micbias_enable) {
+		if (mbhc->mbhc_cb->mbhc_micbias_control)
+			mbhc->mbhc_cb->mbhc_micbias_control(
+					codec, MIC_BIAS_2,
+					MICB_DISABLE);
+		if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic)
+			mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(
+					codec,
+					MIC_BIAS_2, false);
+		if (mbhc->mbhc_cb->set_micbias_value) {
+			mbhc->mbhc_cb->set_micbias_value(codec);
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MICB_CTRL, 0);
+		}
+		mbhc->micbias_enable = false;
+	}
+
+	if (mbhc->mbhc_cb->micbias_enable_status) {
+		micbias1 = mbhc->mbhc_cb->micbias_enable_status(mbhc,
+								MIC_BIAS_1);
+		micbias2 = mbhc->mbhc_cb->micbias_enable_status(mbhc,
+								MIC_BIAS_2);
+	}
+
+	if (mbhc->mbhc_cfg->detect_extn_cable &&
+	    ((plug_type == MBHC_PLUG_TYPE_HEADPHONE) ||
+	     (plug_type == MBHC_PLUG_TYPE_HEADSET)) &&
+	    !mbhc->hs_detect_work_stop) {
+		WCD_MBHC_RSC_LOCK(mbhc);
+		wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_REM, true);
+		WCD_MBHC_RSC_UNLOCK(mbhc);
+	}
+	if (mbhc->mbhc_cb->set_cap_mode)
+		mbhc->mbhc_cb->set_cap_mode(codec, micbias1, micbias2);
+
+	if (mbhc->mbhc_cb->hph_pull_down_ctrl)
+		mbhc->mbhc_cb->hph_pull_down_ctrl(codec, true);
+
+	mbhc->mbhc_cb->lock_sleep(mbhc, false);
+	pr_debug("%s: leave\n", __func__);
+}
+
+/* called under codec_resource_lock acquisition */
+static void wcd_mbhc_detect_plug_type(struct wcd_mbhc *mbhc)
+{
+	struct snd_soc_codec *codec = mbhc->codec;
+	bool micbias1 = false;
+
+	pr_debug("%s: enter\n", __func__);
+	WCD_MBHC_RSC_ASSERT_LOCKED(mbhc);
+
+	if (mbhc->mbhc_cb->hph_pull_down_ctrl)
+		mbhc->mbhc_cb->hph_pull_down_ctrl(codec, false);
+
+	if (mbhc->mbhc_cb->micbias_enable_status)
+		micbias1 = mbhc->mbhc_cb->micbias_enable_status(mbhc,
+								MIC_BIAS_1);
+
+	if (mbhc->mbhc_cb->set_cap_mode)
+		mbhc->mbhc_cb->set_cap_mode(codec, micbias1, true);
+
+	if (mbhc->mbhc_cb->mbhc_micbias_control)
+		mbhc->mbhc_cb->mbhc_micbias_control(codec, MIC_BIAS_2,
+						    MICB_ENABLE);
+	else
+		wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
+
+	/* Re-initialize button press completion object */
+	reinit_completion(&mbhc->btn_press_compl);
+	wcd_schedule_hs_detect_plug(mbhc, &mbhc->correct_plug_swch);
+	pr_debug("%s: leave\n", __func__);
+}
+
+static void wcd_mbhc_swch_irq_handler(struct wcd_mbhc *mbhc)
+{
+	bool detection_type = 0;
+	bool micbias1 = false;
+	struct snd_soc_codec *codec = mbhc->codec;
+
+	dev_dbg(codec->dev, "%s: enter\n", __func__);
+
+	WCD_MBHC_RSC_LOCK(mbhc);
+
+	mbhc->in_swch_irq_handler = true;
+
+	/* cancel pending button press */
+	if (wcd_cancel_btn_work(mbhc))
+		pr_debug("%s: button press is canceled\n", __func__);
+
+	WCD_MBHC_REG_READ(WCD_MBHC_MECH_DETECTION_TYPE, detection_type);
+
+	/* Set the detection type appropriately */
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MECH_DETECTION_TYPE,
+				 !detection_type);
+
+	pr_debug("%s: mbhc->current_plug: %d detection_type: %d\n", __func__,
+			mbhc->current_plug, detection_type);
+	wcd_cancel_hs_detect_plug(mbhc, &mbhc->correct_plug_swch);
+
+	if (mbhc->mbhc_cb->micbias_enable_status)
+		micbias1 = mbhc->mbhc_cb->micbias_enable_status(mbhc,
+						MIC_BIAS_1);
+
+	if ((mbhc->current_plug == MBHC_PLUG_TYPE_NONE) &&
+	    detection_type) {
+		/* Make sure MASTER_BIAS_CTL is enabled */
+		mbhc->mbhc_cb->mbhc_bias(codec, true);
+
+		if (mbhc->mbhc_cb->mbhc_common_micb_ctrl)
+			mbhc->mbhc_cb->mbhc_common_micb_ctrl(codec,
+					MBHC_COMMON_MICB_TAIL_CURR, true);
+
+		if (!mbhc->mbhc_cfg->hs_ext_micbias &&
+		     mbhc->mbhc_cb->micb_internal)
+			/*
+			 * Enable Tx2 RBias if the headset
+			 * is using internal micbias
+			 */
+			mbhc->mbhc_cb->micb_internal(codec, 1, true);
+
+		/* Remove micbias pulldown */
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_PULLDOWN_CTRL, 0);
+		/* Apply trim if needed on the device */
+		if (mbhc->mbhc_cb->trim_btn_reg)
+			mbhc->mbhc_cb->trim_btn_reg(codec);
+		/* Enable external voltage source to micbias if present */
+		if (mbhc->mbhc_cb->enable_mb_source)
+			mbhc->mbhc_cb->enable_mb_source(mbhc, true);
+		mbhc->btn_press_intr = false;
+		mbhc->is_btn_press = false;
+		wcd_mbhc_detect_plug_type(mbhc);
+	} else if ((mbhc->current_plug != MBHC_PLUG_TYPE_NONE)
+			&& !detection_type) {
+		/* Disable external voltage source to micbias if present */
+		if (mbhc->mbhc_cb->enable_mb_source)
+			mbhc->mbhc_cb->enable_mb_source(mbhc, false);
+		/* Disable HW FSM */
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL, 0);
+		if (mbhc->mbhc_cb->mbhc_common_micb_ctrl)
+			mbhc->mbhc_cb->mbhc_common_micb_ctrl(codec,
+					MBHC_COMMON_MICB_TAIL_CURR, false);
+
+		if (mbhc->mbhc_cb->set_cap_mode)
+			mbhc->mbhc_cb->set_cap_mode(codec, micbias1, false);
+
+		mbhc->btn_press_intr = false;
+		mbhc->is_btn_press = false;
+		if (mbhc->current_plug == MBHC_PLUG_TYPE_HEADPHONE) {
+			wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_REM,
+					     false);
+			wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS,
+					     false);
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_DETECTION_TYPE,
+						 1);
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 0);
+			wcd_mbhc_report_plug(mbhc, 0, SND_JACK_HEADPHONE);
+		} else if (mbhc->current_plug == MBHC_PLUG_TYPE_GND_MIC_SWAP) {
+			wcd_mbhc_report_plug(mbhc, 0, SND_JACK_UNSUPPORTED);
+		} else if (mbhc->current_plug == MBHC_PLUG_TYPE_HEADSET) {
+			/* make sure to turn off Rbias */
+			if (mbhc->mbhc_cb->micb_internal)
+				mbhc->mbhc_cb->micb_internal(codec, 1, false);
+
+			/* Pulldown micbias */
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_PULLDOWN_CTRL, 1);
+			wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_REM,
+					     false);
+			wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS,
+					     false);
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_DETECTION_TYPE,
+						 1);
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 0);
+			wcd_mbhc_report_plug(mbhc, 0, SND_JACK_HEADSET);
+		} else if (mbhc->current_plug == MBHC_PLUG_TYPE_HIGH_HPH) {
+			mbhc->is_extn_cable = false;
+			wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_REM,
+					     false);
+			wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS,
+					     false);
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_DETECTION_TYPE,
+						 1);
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 0);
+			wcd_mbhc_report_plug(mbhc, 0, SND_JACK_LINEOUT);
+		} else if (mbhc->current_plug == MBHC_PLUG_TYPE_ANC_HEADPHONE) {
+			wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_REM, false);
+			wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS, false);
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_DETECTION_TYPE,
+						 0);
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 0);
+			wcd_mbhc_report_plug(mbhc, 0, SND_JACK_ANC_HEADPHONE);
+		}
+	} else if (!detection_type) {
+		/* Disable external voltage source to micbias if present */
+		if (mbhc->mbhc_cb->enable_mb_source)
+			mbhc->mbhc_cb->enable_mb_source(mbhc, false);
+		/* Disable HW FSM */
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL, 0);
+	}
+
+	mbhc->in_swch_irq_handler = false;
+	WCD_MBHC_RSC_UNLOCK(mbhc);
+	pr_debug("%s: leave\n", __func__);
+}
+
+static irqreturn_t wcd_mbhc_mech_plug_detect_irq(int irq, void *data)
+{
+	int r = IRQ_HANDLED;
+	struct wcd_mbhc *mbhc = data;
+
+	pr_debug("%s: enter\n", __func__);
+	if (unlikely((mbhc->mbhc_cb->lock_sleep(mbhc, true)) == false)) {
+		pr_warn("%s: failed to hold suspend\n", __func__);
+		r = IRQ_NONE;
+	} else {
+		/* Call handler */
+		wcd_mbhc_swch_irq_handler(mbhc);
+		mbhc->mbhc_cb->lock_sleep(mbhc, false);
+	}
+	pr_debug("%s: leave %d\n", __func__, r);
+	return r;
+}
+
+static int wcd_mbhc_get_button_mask(struct wcd_mbhc *mbhc)
+{
+	int mask = 0;
+	int btn;
+
+	btn = mbhc->mbhc_cb->map_btn_code_to_num(mbhc->codec);
+
+	switch (btn) {
+	case 0:
+		mask = SND_JACK_BTN_0;
+		break;
+	case 1:
+		mask = SND_JACK_BTN_1;
+		break;
+	case 2:
+		mask = SND_JACK_BTN_2;
+		break;
+	case 3:
+		mask = SND_JACK_BTN_3;
+		break;
+	case 4:
+		mask = SND_JACK_BTN_4;
+		break;
+	case 5:
+		mask = SND_JACK_BTN_5;
+		break;
+	default:
+		break;
+	}
+
+	return mask;
+}
+
+static irqreturn_t wcd_mbhc_hs_ins_irq(int irq, void *data)
+{
+	struct wcd_mbhc *mbhc = data;
+	bool detection_type = 0, hphl_sch = 0, mic_sch = 0;
+	u16 elect_result = 0;
+	static u16 hphl_trigerred;
+	static u16 mic_trigerred;
+
+	pr_debug("%s: enter\n", __func__);
+	if (!mbhc->mbhc_cfg->detect_extn_cable) {
+		pr_debug("%s: Returning as Extension cable feature not enabled\n",
+			__func__);
+		return IRQ_HANDLED;
+	}
+	WCD_MBHC_RSC_LOCK(mbhc);
+
+	WCD_MBHC_REG_READ(WCD_MBHC_ELECT_DETECTION_TYPE, detection_type);
+	WCD_MBHC_REG_READ(WCD_MBHC_ELECT_RESULT, elect_result);
+
+	pr_debug("%s: detection_type %d, elect_result %x\n", __func__,
+				detection_type, elect_result);
+	if (detection_type) {
+		/* check if both Left and MIC Schmitt triggers are triggered */
+		WCD_MBHC_REG_READ(WCD_MBHC_HPHL_SCHMT_RESULT, hphl_sch);
+		WCD_MBHC_REG_READ(WCD_MBHC_MIC_SCHMT_RESULT, mic_sch);
+		if (hphl_sch && mic_sch) {
+			/* Go for plug type determination */
+			pr_debug("%s: Go for plug type determination\n",
+				  __func__);
+			goto determine_plug;
+
+		} else {
+			if (mic_sch) {
+				mic_trigerred++;
+				pr_debug("%s: Insertion MIC trigerred %d\n",
+					 __func__, mic_trigerred);
+				WCD_MBHC_REG_UPDATE_BITS(
+						WCD_MBHC_ELECT_SCHMT_ISRC,
+						0);
+				msleep(20);
+				WCD_MBHC_REG_UPDATE_BITS(
+						WCD_MBHC_ELECT_SCHMT_ISRC,
+						1);
+			}
+			if (hphl_sch) {
+				hphl_trigerred++;
+				pr_debug("%s: Insertion HPHL trigerred %d\n",
+					 __func__, hphl_trigerred);
+			}
+			if (mic_trigerred && hphl_trigerred) {
+				/* Go for plug type determination */
+				pr_debug("%s: Go for plug type determination\n",
+					 __func__);
+				goto determine_plug;
+			}
+		}
+	}
+	WCD_MBHC_RSC_UNLOCK(mbhc);
+	pr_debug("%s: leave\n", __func__);
+	return IRQ_HANDLED;
+
+determine_plug:
+	/*
+	 * Disable HPHL trigger and MIC Schmitt triggers.
+	 * Setup for insertion detection.
+	 */
+	pr_debug("%s: Disable insertion interrupt\n", __func__);
+	wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS,
+			     false);
+
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 0);
+	hphl_trigerred = 0;
+	mic_trigerred = 0;
+	mbhc->is_extn_cable = true;
+	mbhc->btn_press_intr = false;
+	mbhc->is_btn_press = false;
+	wcd_mbhc_detect_plug_type(mbhc);
+	WCD_MBHC_RSC_UNLOCK(mbhc);
+	pr_debug("%s: leave\n", __func__);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t wcd_mbhc_hs_rem_irq(int irq, void *data)
+{
+	struct wcd_mbhc *mbhc = data;
+	u8 hs_comp_result = 0, hphl_sch = 0, mic_sch = 0;
+	static u16 hphl_trigerred;
+	static u16 mic_trigerred;
+	unsigned long timeout;
+	bool removed = true;
+	int retry = 0;
+
+	pr_debug("%s: enter\n", __func__);
+
+	WCD_MBHC_RSC_LOCK(mbhc);
+
+	timeout = jiffies +
+		  msecs_to_jiffies(WCD_FAKE_REMOVAL_MIN_PERIOD_MS);
+	do {
+		retry++;
+		/*
+		 * read the result register every 10ms to look for
+		 * any change in HS_COMP_RESULT bit
+		 */
+		usleep_range(10000, 10100);
+		WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_result);
+		pr_debug("%s: Check result reg for fake removal: hs_comp_res %x\n",
+			 __func__, hs_comp_result);
+		if ((!hs_comp_result) &&
+		    retry > FAKE_REM_RETRY_ATTEMPTS) {
+			removed = false;
+			break;
+		}
+	} while (!time_after(jiffies, timeout));
+
+	if (wcd_swch_level_remove(mbhc)) {
+		pr_debug("%s: Switch level is low ", __func__);
+		goto exit;
+	}
+	pr_debug("%s: headset %s actually removed\n", __func__,
+		removed ? "" : "not ");
+
+	WCD_MBHC_REG_READ(WCD_MBHC_HPHL_SCHMT_RESULT, hphl_sch);
+	WCD_MBHC_REG_READ(WCD_MBHC_MIC_SCHMT_RESULT, mic_sch);
+	WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_result);
+
+	if (removed) {
+		if (!(hphl_sch && mic_sch && hs_comp_result)) {
+			/*
+			 * extension cable is still plugged in
+			 * report it as LINEOUT device
+			 */
+			goto report_unplug;
+		} else {
+			if (!mic_sch) {
+				mic_trigerred++;
+				pr_debug("%s: Removal MIC trigerred %d\n",
+					 __func__, mic_trigerred);
+			}
+			if (!hphl_sch) {
+				hphl_trigerred++;
+				pr_debug("%s: Removal HPHL trigerred %d\n",
+					 __func__, hphl_trigerred);
+			}
+			if (mic_trigerred && hphl_trigerred) {
+				/*
+				 * extension cable is still plugged in
+				 * report it as LINEOUT device
+				 */
+				goto report_unplug;
+			}
+		}
+	}
+exit:
+	WCD_MBHC_RSC_UNLOCK(mbhc);
+	pr_debug("%s: leave\n", __func__);
+	return IRQ_HANDLED;
+
+report_unplug:
+
+	/* cancel pending button press */
+	if (wcd_cancel_btn_work(mbhc))
+		pr_debug("%s: button press is canceled\n", __func__);
+	/* cancel correct work function */
+	wcd_cancel_hs_detect_plug(mbhc, &mbhc->correct_plug_swch);
+
+	pr_debug("%s: Report extension cable\n", __func__);
+	wcd_mbhc_report_plug(mbhc, 1, SND_JACK_LINEOUT);
+	/*
+	 * If PA is enabled HPHL schmitt trigger can
+	 * be unreliable, make sure to disable it
+	 */
+	if (test_bit(WCD_MBHC_EVENT_PA_HPHL,
+		&mbhc->event_state))
+		wcd_mbhc_set_and_turnoff_hph_padac(mbhc);
+	/*
+	 * Disable HPHL trigger and MIC Schmitt triggers.
+	 * Setup for insertion detection.
+	 */
+	wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_REM,
+			     false);
+	wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_NONE);
+	/* Disable HW FSM */
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 3);
+
+	/* Set the detection type appropriately */
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_DETECTION_TYPE, 1);
+	wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS,
+			     true);
+	hphl_trigerred = 0;
+	mic_trigerred = 0;
+	WCD_MBHC_RSC_UNLOCK(mbhc);
+	pr_debug("%s: leave\n", __func__);
+	return IRQ_HANDLED;
+}
+
+static void wcd_btn_lpress_fn(struct work_struct *work)
+{
+	struct delayed_work *dwork;
+	struct wcd_mbhc *mbhc;
+	s16 btn_result = 0;
+
+	pr_debug("%s: Enter\n", __func__);
+
+	dwork = to_delayed_work(work);
+	mbhc = container_of(dwork, struct wcd_mbhc, mbhc_btn_dwork);
+
+	WCD_MBHC_REG_READ(WCD_MBHC_BTN_RESULT, btn_result);
+	if (mbhc->current_plug == MBHC_PLUG_TYPE_HEADSET) {
+		pr_debug("%s: Reporting long button press event, btn_result: %d\n",
+			 __func__, btn_result);
+		wcd_mbhc_jack_report(mbhc, &mbhc->button_jack,
+				mbhc->buttons_pressed, mbhc->buttons_pressed);
+	}
+	pr_debug("%s: leave\n", __func__);
+	mbhc->mbhc_cb->lock_sleep(mbhc, false);
+}
+
+static bool wcd_mbhc_fw_validate(const void *data, size_t size)
+{
+	u32 cfg_offset;
+	struct wcd_mbhc_btn_detect_cfg *btn_cfg;
+	struct firmware_cal fw;
+
+	fw.data = (void *)data;
+	fw.size = size;
+
+	if (fw.size < WCD_MBHC_CAL_MIN_SIZE)
+		return false;
+
+	/*
+	 * Previous check guarantees that there is enough fw data up
+	 * to num_btn
+	 */
+	btn_cfg = WCD_MBHC_CAL_BTN_DET_PTR(fw.data);
+	cfg_offset = (u32) ((void *) btn_cfg - (void *) fw.data);
+	if (fw.size < (cfg_offset + WCD_MBHC_CAL_BTN_SZ(btn_cfg)))
+		return false;
+
+	return true;
+}
+
+static irqreturn_t wcd_mbhc_btn_press_handler(int irq, void *data)
+{
+	struct wcd_mbhc *mbhc = data;
+	int mask;
+	unsigned long msec_val;
+
+	pr_debug("%s: enter\n", __func__);
+	complete(&mbhc->btn_press_compl);
+	WCD_MBHC_RSC_LOCK(mbhc);
+	wcd_cancel_btn_work(mbhc);
+	if (wcd_swch_level_remove(mbhc)) {
+		pr_debug("%s: Switch level is low ", __func__);
+		goto done;
+	}
+
+	mbhc->is_btn_press = true;
+	msec_val = jiffies_to_msecs(jiffies - mbhc->jiffies_atreport);
+	pr_debug("%s: msec_val = %ld\n", __func__, msec_val);
+	if (msec_val < MBHC_BUTTON_PRESS_THRESHOLD_MIN) {
+		pr_debug("%s: Too short, ignore button press\n", __func__);
+		goto done;
+	}
+
+	/* If switch interrupt already kicked in, ignore button press */
+	if (mbhc->in_swch_irq_handler) {
+		pr_debug("%s: Swtich level changed, ignore button press\n",
+			 __func__);
+		goto done;
+	}
+	mask = wcd_mbhc_get_button_mask(mbhc);
+	if (mask == SND_JACK_BTN_0)
+		mbhc->btn_press_intr = true;
+
+	if (mbhc->current_plug != MBHC_PLUG_TYPE_HEADSET) {
+		pr_debug("%s: Plug isn't headset, ignore button press\n",
+				__func__);
+		goto done;
+	}
+	mbhc->buttons_pressed |= mask;
+	mbhc->mbhc_cb->lock_sleep(mbhc, true);
+	if (schedule_delayed_work(&mbhc->mbhc_btn_dwork,
+				msecs_to_jiffies(400)) == 0) {
+		WARN(1, "Button pressed twice without release event\n");
+		mbhc->mbhc_cb->lock_sleep(mbhc, false);
+	}
+done:
+	pr_debug("%s: leave\n", __func__);
+	WCD_MBHC_RSC_UNLOCK(mbhc);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t wcd_mbhc_release_handler(int irq, void *data)
+{
+	struct wcd_mbhc *mbhc = data;
+	int ret;
+
+	pr_debug("%s: enter\n", __func__);
+	WCD_MBHC_RSC_LOCK(mbhc);
+	if (wcd_swch_level_remove(mbhc)) {
+		pr_debug("%s: Switch level is low ", __func__);
+		goto exit;
+	}
+
+	if (mbhc->is_btn_press) {
+		mbhc->is_btn_press = false;
+	} else {
+		pr_debug("%s: This release is for fake btn press\n", __func__);
+		goto exit;
+	}
+
+	/*
+	 * If current plug is headphone then there is no chance to
+	 * get btn release interrupt, so connected cable should be
+	 * headset not headphone.
+	 */
+	if (mbhc->current_plug == MBHC_PLUG_TYPE_HEADPHONE) {
+		wcd_mbhc_find_plug_and_report(mbhc, MBHC_PLUG_TYPE_HEADSET);
+		goto exit;
+
+	}
+	if (mbhc->buttons_pressed & WCD_MBHC_JACK_BUTTON_MASK) {
+		ret = wcd_cancel_btn_work(mbhc);
+		if (ret == 0) {
+			pr_debug("%s: Reporting long button release event\n",
+				 __func__);
+			wcd_mbhc_jack_report(mbhc, &mbhc->button_jack,
+					0, mbhc->buttons_pressed);
+		} else {
+			if (mbhc->in_swch_irq_handler) {
+				pr_debug("%s: Switch irq kicked in, ignore\n",
+					__func__);
+			} else {
+				pr_debug("%s: Reporting btn press\n",
+					 __func__);
+				wcd_mbhc_jack_report(mbhc,
+						     &mbhc->button_jack,
+						     mbhc->buttons_pressed,
+						     mbhc->buttons_pressed);
+				pr_debug("%s: Reporting btn release\n",
+					 __func__);
+				wcd_mbhc_jack_report(mbhc,
+						&mbhc->button_jack,
+						0, mbhc->buttons_pressed);
+			}
+		}
+		mbhc->buttons_pressed &= ~WCD_MBHC_JACK_BUTTON_MASK;
+	}
+exit:
+	pr_debug("%s: leave\n", __func__);
+	WCD_MBHC_RSC_UNLOCK(mbhc);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t wcd_mbhc_hphl_ocp_irq(int irq, void *data)
+{
+	struct wcd_mbhc *mbhc = data;
+	int val;
+
+	pr_debug("%s: received HPHL OCP irq\n", __func__);
+	if (mbhc) {
+		if (mbhc->mbhc_cb->hph_register_recovery) {
+			if (mbhc->mbhc_cb->hph_register_recovery(mbhc)) {
+				WCD_MBHC_REG_READ(WCD_MBHC_HPHR_OCP_STATUS,
+						  val);
+				if ((val != -EINVAL) && val)
+					mbhc->is_hph_ocp_pending = true;
+				goto done;
+			}
+		}
+
+		if (mbhc->hphlocp_cnt < OCP_ATTEMPT) {
+			mbhc->hphlocp_cnt++;
+			pr_debug("%s: retry, hphlocp_cnt: %d\n", __func__,
+				 mbhc->hphlocp_cnt);
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_OCP_FSM_EN, 0);
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_OCP_FSM_EN, 1);
+		} else {
+			mbhc->mbhc_cb->irq_control(mbhc->codec,
+						   mbhc->intr_ids->hph_left_ocp,
+						   false);
+			mbhc->hph_status |= SND_JACK_OC_HPHL;
+			wcd_mbhc_jack_report(mbhc, &mbhc->headset_jack,
+					    mbhc->hph_status,
+					    WCD_MBHC_JACK_MASK);
+		}
+	} else {
+		pr_err("%s: Bad wcd9xxx_spmi private data\n", __func__);
+	}
+done:
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t wcd_mbhc_hphr_ocp_irq(int irq, void *data)
+{
+	struct wcd_mbhc *mbhc = data;
+
+	pr_debug("%s: received HPHR OCP irq\n", __func__);
+
+	if (!mbhc) {
+		pr_err("%s: Bad mbhc private data\n", __func__);
+		goto done;
+	}
+
+	if (mbhc->is_hph_ocp_pending) {
+		mbhc->is_hph_ocp_pending = false;
+		goto done;
+	}
+
+	if (mbhc->mbhc_cb->hph_register_recovery) {
+		if (mbhc->mbhc_cb->hph_register_recovery(mbhc))
+			/* register corruption, hence reset registers */
+			goto done;
+	}
+	if (mbhc->hphrocp_cnt < OCP_ATTEMPT) {
+		mbhc->hphrocp_cnt++;
+		pr_debug("%s: retry, hphrocp_cnt: %d\n", __func__,
+			 mbhc->hphrocp_cnt);
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_OCP_FSM_EN, 0);
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_OCP_FSM_EN, 1);
+	} else {
+		mbhc->mbhc_cb->irq_control(mbhc->codec,
+					   mbhc->intr_ids->hph_right_ocp,
+					   false);
+		mbhc->hph_status |= SND_JACK_OC_HPHR;
+		wcd_mbhc_jack_report(mbhc, &mbhc->headset_jack,
+				    mbhc->hph_status, WCD_MBHC_JACK_MASK);
+	}
+done:
+	return IRQ_HANDLED;
+}
+
+static int wcd_mbhc_initialise(struct wcd_mbhc *mbhc)
+{
+	int ret = 0;
+	struct snd_soc_codec *codec = mbhc->codec;
+
+	pr_debug("%s: enter\n", __func__);
+	WCD_MBHC_RSC_LOCK(mbhc);
+
+	/* enable HS detection */
+	if (mbhc->mbhc_cb->hph_pull_up_control)
+		mbhc->mbhc_cb->hph_pull_up_control(codec, I_DEFAULT);
+	else
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_HS_L_DET_PULL_UP_CTRL, 3);
+
+	if (mbhc->mbhc_cfg->moisture_en && mbhc->mbhc_cb->mbhc_moisture_config)
+		mbhc->mbhc_cb->mbhc_moisture_config(mbhc);
+
+	/*
+	 * For USB analog we need to override the switch configuration.
+	 * Also, disable hph_l pull-up current source as HS_DET_L is driven
+	 * by an external source
+	 */
+	if (mbhc->mbhc_cfg->enable_usbc_analog) {
+		mbhc->hphl_swh = 1;
+		mbhc->gnd_swh = 1;
+
+		if (mbhc->mbhc_cb->hph_pull_up_control)
+			mbhc->mbhc_cb->hph_pull_up_control(codec, I_OFF);
+		else
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_HS_L_DET_PULL_UP_CTRL,
+						 0);
+	}
+
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_HPHL_PLUG_TYPE, mbhc->hphl_swh);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_GND_PLUG_TYPE, mbhc->gnd_swh);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_SW_HPH_LP_100K_TO_GND, 1);
+	if (mbhc->mbhc_cfg->gnd_det_en && mbhc->mbhc_cb->mbhc_gnd_det_ctrl)
+		mbhc->mbhc_cb->mbhc_gnd_det_ctrl(codec, true);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_HS_L_DET_PULL_UP_COMP_CTRL, 1);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_L_DET_EN, 1);
+
+	if (mbhc->mbhc_cfg->enable_usbc_analog) {
+		/* Insertion debounce set to 48ms */
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_INSREM_DBNC, 4);
+	} else {
+		/* Insertion debounce set to 96ms */
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_INSREM_DBNC, 6);
+	}
+
+	/* Button Debounce set to 16ms */
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_DBNC, 2);
+
+	/* Enable micbias ramp */
+	if (mbhc->mbhc_cb->mbhc_micb_ramp_control)
+		mbhc->mbhc_cb->mbhc_micb_ramp_control(codec, true);
+	/* enable bias */
+	mbhc->mbhc_cb->mbhc_bias(codec, true);
+	/* enable MBHC clock */
+	if (mbhc->mbhc_cb->clk_setup)
+		mbhc->mbhc_cb->clk_setup(codec, true);
+
+	/* program HS_VREF value */
+	wcd_program_hs_vref(mbhc);
+
+	wcd_program_btn_threshold(mbhc, false);
+
+	INIT_WORK(&mbhc->correct_plug_swch, wcd_correct_swch_plug);
+
+	reinit_completion(&mbhc->btn_press_compl);
+
+	WCD_MBHC_RSC_UNLOCK(mbhc);
+	pr_debug("%s: leave\n", __func__);
+	return ret;
+}
+
+static void wcd_mbhc_fw_read(struct work_struct *work)
+{
+	struct delayed_work *dwork;
+	struct wcd_mbhc *mbhc;
+	struct snd_soc_codec *codec;
+	const struct firmware *fw;
+	struct firmware_cal *fw_data = NULL;
+	int ret = -1, retry = 0;
+	bool use_default_cal = false;
+
+	dwork = to_delayed_work(work);
+	mbhc = container_of(dwork, struct wcd_mbhc, mbhc_firmware_dwork);
+	codec = mbhc->codec;
+
+	while (retry < FW_READ_ATTEMPTS) {
+		retry++;
+		pr_debug("%s:Attempt %d to request MBHC firmware\n",
+			__func__, retry);
+		if (mbhc->mbhc_cb->get_hwdep_fw_cal)
+			fw_data = mbhc->mbhc_cb->get_hwdep_fw_cal(mbhc,
+					WCD9XXX_MBHC_CAL);
+		if (!fw_data)
+			ret = request_firmware(&fw, "wcd9320/wcd9320_mbhc.bin",
+				       codec->dev);
+		/*
+		 * if request_firmware and hwdep cal both fail then
+		 * sleep for 4sec for the userspace to send data to kernel
+		 * retry for few times before bailing out
+		 */
+		if ((ret != 0) && !fw_data) {
+			usleep_range(FW_READ_TIMEOUT, FW_READ_TIMEOUT +
+					WCD_MBHC_USLEEP_RANGE_MARGIN_US);
+		} else {
+			pr_debug("%s: MBHC Firmware read succesful\n",
+					__func__);
+			break;
+		}
+	}
+	if (!fw_data)
+		pr_debug("%s: using request_firmware\n", __func__);
+	else
+		pr_debug("%s: using hwdep cal\n", __func__);
+
+	if (ret != 0 && !fw_data) {
+		pr_err("%s: Cannot load MBHC firmware use default cal\n",
+		       __func__);
+		use_default_cal = true;
+	}
+	if (!use_default_cal) {
+		const void *data;
+		size_t size;
+
+		if (fw_data) {
+			data = fw_data->data;
+			size = fw_data->size;
+		} else {
+			data = fw->data;
+			size = fw->size;
+		}
+		if (wcd_mbhc_fw_validate(data, size) == false) {
+			pr_err("%s: Invalid MBHC cal data size use default cal\n",
+				__func__);
+			if (!fw_data)
+				release_firmware(fw);
+		} else {
+			if (fw_data) {
+				mbhc->mbhc_cfg->calibration =
+					(void *)fw_data->data;
+				mbhc->mbhc_cal = fw_data;
+			} else {
+				mbhc->mbhc_cfg->calibration =
+					(void *)fw->data;
+				mbhc->mbhc_fw = fw;
+			}
+		}
+
+	}
+
+	(void) wcd_mbhc_initialise(mbhc);
+}
+
+int wcd_mbhc_set_keycode(struct wcd_mbhc *mbhc)
+{
+	enum snd_jack_types type;
+	int i, ret, result = 0;
+	int *btn_key_code;
+
+	btn_key_code = mbhc->mbhc_cfg->key_code;
+
+	for (i = 0 ; i < WCD_MBHC_KEYCODE_NUM ; i++) {
+		if (btn_key_code[i] != 0) {
+			switch (i) {
+			case 0:
+				type = SND_JACK_BTN_0;
+				break;
+			case 1:
+				type = SND_JACK_BTN_1;
+				break;
+			case 2:
+				type = SND_JACK_BTN_2;
+				break;
+			case 3:
+				type = SND_JACK_BTN_3;
+				break;
+			case 4:
+				type = SND_JACK_BTN_4;
+				break;
+			case 5:
+				type = SND_JACK_BTN_5;
+				break;
+			default:
+				WARN_ONCE(1, "Wrong button number:%d\n", i);
+				result = -1;
+				return result;
+			}
+			ret = snd_jack_set_key(mbhc->button_jack.jack,
+							type,
+							btn_key_code[i]);
+			if (ret) {
+				pr_err("%s: Failed to set code for %d\n",
+					__func__, btn_key_code[i]);
+				result = -1;
+				return result;
+			}
+			input_set_capability(
+				mbhc->button_jack.jack->input_dev,
+				EV_KEY, btn_key_code[i]);
+			pr_debug("%s: set btn%d key code:%d\n", __func__,
+				i, btn_key_code[i]);
+		}
+	}
+	if (btn_key_code[0])
+		mbhc->is_btn_already_regd = true;
+	return result;
+}
+
+static int wcd_mbhc_usb_c_analog_setup_gpios(struct wcd_mbhc *mbhc,
+					     bool active)
+{
+	int rc = 0;
+	struct usbc_ana_audio_config *config =
+		&mbhc->mbhc_cfg->usbc_analog_cfg;
+	union power_supply_propval pval;
+
+	dev_dbg(mbhc->codec->dev, "%s: setting GPIOs active = %d\n",
+		__func__, active);
+
+	memset(&pval, 0, sizeof(pval));
+
+	if (active) {
+		pval.intval = POWER_SUPPLY_TYPEC_PR_SOURCE;
+		if (power_supply_set_property(mbhc->usb_psy,
+				POWER_SUPPLY_PROP_TYPEC_POWER_ROLE, &pval))
+			dev_info(mbhc->codec->dev, "%s: force PR_SOURCE mode unsuccessful\n",
+				 __func__);
+		else
+			mbhc->usbc_force_pr_mode = true;
+
+		if (config->usbc_en1_gpio_p)
+			rc = msm_cdc_pinctrl_select_active_state(
+				config->usbc_en1_gpio_p);
+		if (rc == 0 && config->usbc_en2n_gpio_p)
+			rc = msm_cdc_pinctrl_select_active_state(
+				config->usbc_en2n_gpio_p);
+		if (rc == 0 && config->usbc_force_gpio_p)
+			rc = msm_cdc_pinctrl_select_active_state(
+				config->usbc_force_gpio_p);
+		mbhc->usbc_mode = POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER;
+	} else {
+		/* no delay is required when disabling GPIOs */
+		if (config->usbc_en2n_gpio_p)
+			msm_cdc_pinctrl_select_sleep_state(
+				config->usbc_en2n_gpio_p);
+		if (config->usbc_en1_gpio_p)
+			msm_cdc_pinctrl_select_sleep_state(
+				config->usbc_en1_gpio_p);
+		if (config->usbc_force_gpio_p)
+			msm_cdc_pinctrl_select_sleep_state(
+				config->usbc_force_gpio_p);
+
+		if (mbhc->usbc_force_pr_mode) {
+			pval.intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+			if (power_supply_set_property(mbhc->usb_psy,
+				POWER_SUPPLY_PROP_TYPEC_POWER_ROLE, &pval))
+				dev_info(mbhc->codec->dev, "%s: force PR_DUAL mode unsuccessful\n",
+					 __func__);
+
+			mbhc->usbc_force_pr_mode = false;
+		}
+
+		mbhc->usbc_mode = POWER_SUPPLY_TYPEC_NONE;
+	}
+
+	return rc;
+}
+
+/* workqueue */
+static void wcd_mbhc_usbc_analog_work_fn(struct work_struct *work)
+{
+	struct wcd_mbhc *mbhc =
+		container_of(work, struct wcd_mbhc, usbc_analog_work);
+
+	wcd_mbhc_usb_c_analog_setup_gpios(mbhc,
+			mbhc->usbc_mode != POWER_SUPPLY_TYPEC_NONE);
+}
+
+/* this callback function is used to process PMI notification */
+static int wcd_mbhc_usb_c_event_changed(struct notifier_block *nb,
+					unsigned long evt, void *ptr)
+{
+	int ret;
+	union power_supply_propval mode;
+	struct wcd_mbhc *mbhc = container_of(nb, struct wcd_mbhc, psy_nb);
+	struct snd_soc_codec *codec = mbhc->codec;
+
+	if (ptr != mbhc->usb_psy || evt != PSY_EVENT_PROP_CHANGED)
+		return 0;
+
+	ret = power_supply_get_property(mbhc->usb_psy,
+			POWER_SUPPLY_PROP_TYPEC_MODE, &mode);
+	if (ret) {
+		dev_err(codec->dev, "%s: Unable to read USB TYPEC_MODE: %d\n",
+			__func__, ret);
+		return ret;
+	}
+
+	dev_dbg(codec->dev, "%s: USB change event received\n",
+		__func__);
+	dev_dbg(codec->dev, "%s: supply mode %d, expected %d\n", __func__,
+		mode.intval, POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER);
+
+	switch (mode.intval) {
+	case POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER:
+	case POWER_SUPPLY_TYPEC_NONE:
+		dev_dbg(codec->dev, "%s: usbc_mode: %d; mode.intval: %d\n",
+			__func__, mbhc->usbc_mode, mode.intval);
+
+		if (mbhc->usbc_mode == mode.intval)
+			break; /* filter notifications received before */
+		mbhc->usbc_mode = mode.intval;
+
+		dev_dbg(codec->dev, "%s: queueing usbc_analog_work\n",
+			__func__);
+		schedule_work(&mbhc->usbc_analog_work);
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
+
+/* PMI registration code */
+static int wcd_mbhc_usb_c_analog_init(struct wcd_mbhc *mbhc)
+{
+	int ret = 0;
+	struct snd_soc_codec *codec = mbhc->codec;
+
+	dev_dbg(mbhc->codec->dev, "%s: usb-c analog setup start\n", __func__);
+	INIT_WORK(&mbhc->usbc_analog_work, wcd_mbhc_usbc_analog_work_fn);
+
+	mbhc->usb_psy = power_supply_get_by_name("usb");
+	if (IS_ERR_OR_NULL(mbhc->usb_psy)) {
+		dev_err(codec->dev, "%s: could not get USB psy info\n",
+			__func__);
+		ret = -EPROBE_DEFER;
+		if (IS_ERR(mbhc->usb_psy))
+			ret = PTR_ERR(mbhc->usb_psy);
+		mbhc->usb_psy = NULL;
+		goto err;
+	}
+
+	ret = wcd_mbhc_usb_c_analog_setup_gpios(mbhc, false);
+	if (ret) {
+		dev_err(codec->dev, "%s: error while setting USBC ana gpios\n",
+			__func__);
+		goto err;
+	}
+
+	mbhc->psy_nb.notifier_call = wcd_mbhc_usb_c_event_changed;
+	mbhc->psy_nb.priority = 0;
+	ret = power_supply_reg_notifier(&mbhc->psy_nb);
+	if (ret) {
+		dev_err(codec->dev, "%s: power supply registration failed\n",
+			__func__);
+		goto err;
+	}
+
+	/*
+	* as part of the init sequence check if there is a connected
+	* USB C analog adapter
+	*/
+	dev_dbg(mbhc->codec->dev, "%s: verify if USB adapter is already inserted\n",
+		__func__);
+	ret = wcd_mbhc_usb_c_event_changed(&mbhc->psy_nb,
+					   PSY_EVENT_PROP_CHANGED,
+					   mbhc->usb_psy);
+
+err:
+	return ret;
+}
+
+static int wcd_mbhc_usb_c_analog_deinit(struct wcd_mbhc *mbhc)
+{
+	wcd_mbhc_usb_c_analog_setup_gpios(mbhc, false);
+
+	/* deregister from PMI */
+	power_supply_unreg_notifier(&mbhc->psy_nb);
+
+	return 0;
+}
+
+static int wcd_mbhc_init_gpio(struct wcd_mbhc *mbhc,
+			      struct wcd_mbhc_config *mbhc_cfg,
+			      const char *gpio_dt_str,
+			      int *gpio, struct device_node **gpio_dn)
+{
+	int rc = 0;
+	struct snd_soc_codec *codec = mbhc->codec;
+	struct snd_soc_card *card = codec->component.card;
+
+	dev_dbg(mbhc->codec->dev, "%s: gpio %s\n", __func__, gpio_dt_str);
+
+	*gpio_dn = of_parse_phandle(card->dev->of_node, gpio_dt_str, 0);
+
+	if (!(*gpio_dn)) {
+		*gpio = of_get_named_gpio(card->dev->of_node, gpio_dt_str, 0);
+		if (!gpio_is_valid(*gpio)) {
+			dev_err(card->dev, "%s, property %s not in node %s",
+				__func__, gpio_dt_str,
+				card->dev->of_node->full_name);
+			rc = -EINVAL;
+		}
+	}
+
+	return rc;
+}
+
+int wcd_mbhc_start(struct wcd_mbhc *mbhc, struct wcd_mbhc_config *mbhc_cfg)
+{
+	int rc = 0;
+	struct usbc_ana_audio_config *config;
+	struct snd_soc_codec *codec;
+	struct snd_soc_card *card;
+	const char *usb_c_dt = "qcom,msm-mbhc-usbc-audio-supported";
+
+	if (!mbhc || !mbhc_cfg)
+		return -EINVAL;
+
+	config = &mbhc_cfg->usbc_analog_cfg;
+	codec = mbhc->codec;
+	card = codec->component.card;
+
+	/* update the mbhc config */
+	mbhc->mbhc_cfg = mbhc_cfg;
+
+	dev_dbg(mbhc->codec->dev, "%s: enter\n", __func__);
+
+	/* check if USB C analog is defined on device tree */
+	mbhc_cfg->enable_usbc_analog = 0;
+	if (of_find_property(card->dev->of_node, usb_c_dt, NULL)) {
+		rc = of_property_read_u32(card->dev->of_node, usb_c_dt,
+				&mbhc_cfg->enable_usbc_analog);
+	}
+	if (mbhc_cfg->enable_usbc_analog == 0 || rc != 0) {
+		dev_info(card->dev,
+				"%s: %s in dt node is missing or false\n",
+				__func__, usb_c_dt);
+		dev_info(card->dev,
+			"%s: skipping USB c analog configuration\n", __func__);
+	}
+
+	/* initialize GPIOs */
+	if (mbhc_cfg->enable_usbc_analog) {
+		dev_dbg(mbhc->codec->dev, "%s: usbc analog enabled\n",
+				__func__);
+		rc = wcd_mbhc_init_gpio(mbhc, mbhc_cfg,
+				"qcom,usbc-analog-en1_gpio",
+				&config->usbc_en1_gpio,
+				&config->usbc_en1_gpio_p);
+		if (rc)
+			goto err;
+
+		rc = wcd_mbhc_init_gpio(mbhc, mbhc_cfg,
+				"qcom,usbc-analog-en2_n_gpio",
+				&config->usbc_en2n_gpio,
+				&config->usbc_en2n_gpio_p);
+		if (rc)
+			goto err;
+
+		if (of_find_property(card->dev->of_node,
+				     "qcom,usbc-analog-force_detect_gpio",
+				     NULL)) {
+			rc = wcd_mbhc_init_gpio(mbhc, mbhc_cfg,
+					"qcom,usbc-analog-force_detect_gpio",
+					&config->usbc_force_gpio,
+					&config->usbc_force_gpio_p);
+			if (rc)
+				goto err;
+		}
+
+		dev_dbg(mbhc->codec->dev, "%s: calling usb_c_analog_init\n",
+			__func__);
+		/* init PMI notifier */
+		rc = wcd_mbhc_usb_c_analog_init(mbhc);
+		if (rc) {
+			rc = EPROBE_DEFER;
+			goto err;
+		}
+	}
+
+	/* Set btn key code */
+	if ((!mbhc->is_btn_already_regd) && wcd_mbhc_set_keycode(mbhc))
+		pr_err("Set btn key code error!!!\n");
+
+	if (!mbhc->mbhc_cfg->read_fw_bin ||
+	    (mbhc->mbhc_cfg->read_fw_bin && mbhc->mbhc_fw) ||
+	    (mbhc->mbhc_cfg->read_fw_bin && mbhc->mbhc_cal)) {
+		rc = wcd_mbhc_initialise(mbhc);
+	} else {
+		if (!mbhc->mbhc_fw || !mbhc->mbhc_cal)
+			schedule_delayed_work(&mbhc->mbhc_firmware_dwork,
+				      usecs_to_jiffies(FW_READ_TIMEOUT));
+		else
+			pr_err("%s: Skipping to read mbhc fw, 0x%pK %pK\n",
+				 __func__, mbhc->mbhc_fw, mbhc->mbhc_cal);
+	}
+
+	return rc;
+err:
+	if (config->usbc_en1_gpio > 0) {
+		dev_dbg(card->dev, "%s free usb en1 gpio %d\n",
+			__func__, config->usbc_en1_gpio);
+		gpio_free(config->usbc_en1_gpio);
+		config->usbc_en1_gpio = 0;
+	}
+	if (config->usbc_en2n_gpio > 0) {
+		dev_dbg(card->dev, "%s free usb_en2 gpio %d\n",
+			__func__, config->usbc_en2n_gpio);
+		gpio_free(config->usbc_en2n_gpio);
+		config->usbc_en2n_gpio = 0;
+	}
+	if (config->usbc_force_gpio > 0) {
+		dev_dbg(card->dev, "%s free usb_force gpio %d\n",
+			__func__, config->usbc_force_gpio);
+		gpio_free(config->usbc_force_gpio);
+		config->usbc_force_gpio = 0;
+	}
+	if (config->usbc_en1_gpio_p)
+		of_node_put(config->usbc_en1_gpio_p);
+	if (config->usbc_en2n_gpio_p)
+		of_node_put(config->usbc_en2n_gpio_p);
+	if (config->usbc_force_gpio_p)
+		of_node_put(config->usbc_force_gpio_p);
+	dev_dbg(mbhc->codec->dev, "%s: leave %d\n", __func__, rc);
+	return rc;
+}
+EXPORT_SYMBOL(wcd_mbhc_start);
+
+void wcd_mbhc_stop(struct wcd_mbhc *mbhc)
+{
+	struct usbc_ana_audio_config *config = &mbhc->mbhc_cfg->usbc_analog_cfg;
+
+	pr_debug("%s: enter\n", __func__);
+
+	if (mbhc->current_plug != MBHC_PLUG_TYPE_NONE) {
+		if (mbhc->mbhc_cb && mbhc->mbhc_cb->skip_imped_detect)
+			mbhc->mbhc_cb->skip_imped_detect(mbhc->codec);
+	}
+	mbhc->current_plug = MBHC_PLUG_TYPE_NONE;
+	mbhc->hph_status = 0;
+	if (mbhc->mbhc_cb && mbhc->mbhc_cb->irq_control) {
+		mbhc->mbhc_cb->irq_control(mbhc->codec,
+				mbhc->intr_ids->hph_left_ocp,
+				false);
+		mbhc->mbhc_cb->irq_control(mbhc->codec,
+				mbhc->intr_ids->hph_right_ocp,
+				false);
+	}
+	if (mbhc->mbhc_fw || mbhc->mbhc_cal) {
+		cancel_delayed_work_sync(&mbhc->mbhc_firmware_dwork);
+		if (!mbhc->mbhc_cal)
+			release_firmware(mbhc->mbhc_fw);
+		mbhc->mbhc_fw = NULL;
+		mbhc->mbhc_cal = NULL;
+	}
+
+	if (mbhc->mbhc_cfg->enable_usbc_analog) {
+		wcd_mbhc_usb_c_analog_deinit(mbhc);
+		/* free GPIOs */
+		if (config->usbc_en1_gpio > 0)
+			gpio_free(config->usbc_en1_gpio);
+		if (config->usbc_en2n_gpio > 0)
+			gpio_free(config->usbc_en2n_gpio);
+		if (config->usbc_force_gpio)
+			gpio_free(config->usbc_force_gpio);
+
+		if (config->usbc_en1_gpio_p)
+			of_node_put(config->usbc_en1_gpio_p);
+		if (config->usbc_en2n_gpio_p)
+			of_node_put(config->usbc_en2n_gpio_p);
+		if (config->usbc_force_gpio_p)
+			of_node_put(config->usbc_force_gpio_p);
+	}
+
+	pr_debug("%s: leave\n", __func__);
+}
+EXPORT_SYMBOL(wcd_mbhc_stop);
+
+/*
+ * wcd_mbhc_init : initialize MBHC internal structures.
+ *
+ * NOTE: mbhc->mbhc_cfg is not YET configure so shouldn't be used
+ */
+int wcd_mbhc_init(struct wcd_mbhc *mbhc, struct snd_soc_codec *codec,
+		      const struct wcd_mbhc_cb *mbhc_cb,
+		      const struct wcd_mbhc_intr *mbhc_cdc_intr_ids,
+		      struct wcd_mbhc_register *wcd_mbhc_regs,
+		      bool impedance_det_en)
+{
+	int ret = 0;
+	int hph_swh = 0;
+	int gnd_swh = 0;
+	u32 hph_moist_config[3];
+	struct snd_soc_card *card = codec->component.card;
+	const char *hph_switch = "qcom,msm-mbhc-hphl-swh";
+	const char *gnd_switch = "qcom,msm-mbhc-gnd-swh";
+
+	pr_debug("%s: enter\n", __func__);
+
+	ret = of_property_read_u32(card->dev->of_node, hph_switch, &hph_swh);
+	if (ret) {
+		dev_err(card->dev,
+			"%s: missing %s in dt node\n", __func__, hph_switch);
+		goto err;
+	}
+
+	ret = of_property_read_u32(card->dev->of_node, gnd_switch, &gnd_swh);
+	if (ret) {
+		dev_err(card->dev,
+			"%s: missing %s in dt node\n", __func__, gnd_switch);
+		goto err;
+	}
+
+	ret = of_property_read_u32_array(card->dev->of_node,
+					 "qcom,msm-mbhc-moist-cfg",
+					 hph_moist_config, 3);
+	if (ret) {
+		dev_dbg(card->dev, "%s: no qcom,msm-mbhc-moist-cfg in DT\n",
+			__func__);
+		mbhc->moist_vref = V_45_MV;
+		mbhc->moist_iref = I_3P0_UA;
+		mbhc->moist_rref = R_24_KOHM;
+	} else {
+		mbhc->moist_vref = hph_moist_config[0];
+		mbhc->moist_iref = hph_moist_config[1];
+		mbhc->moist_rref = hph_moist_config[2];
+	}
+
+	mbhc->in_swch_irq_handler = false;
+	mbhc->current_plug = MBHC_PLUG_TYPE_NONE;
+	mbhc->is_btn_press = false;
+	mbhc->codec = codec;
+	mbhc->intr_ids = mbhc_cdc_intr_ids;
+	mbhc->impedance_detect = impedance_det_en;
+	mbhc->hphl_swh = hph_swh;
+	mbhc->gnd_swh = gnd_swh;
+	mbhc->micbias_enable = false;
+	mbhc->mbhc_cb = mbhc_cb;
+	mbhc->btn_press_intr = false;
+	mbhc->is_hs_recording = false;
+	mbhc->is_extn_cable = false;
+	mbhc->hph_type = WCD_MBHC_HPH_NONE;
+	mbhc->wcd_mbhc_regs = wcd_mbhc_regs;
+
+	if (mbhc->intr_ids == NULL) {
+		pr_err("%s: Interrupt mapping not provided\n", __func__);
+		return -EINVAL;
+	}
+	if (!mbhc->wcd_mbhc_regs) {
+		dev_err(codec->dev, "%s: mbhc registers are not defined\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	/* Check if IRQ and other required callbacks are defined or not */
+	if (!mbhc_cb || !mbhc_cb->request_irq || !mbhc_cb->irq_control ||
+	    !mbhc_cb->free_irq || !mbhc_cb->map_btn_code_to_num ||
+	    !mbhc_cb->lock_sleep || !mbhc_cb->mbhc_bias ||
+	    !mbhc_cb->set_btn_thr) {
+		dev_err(codec->dev, "%s: required mbhc callbacks are not defined\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (mbhc->headset_jack.jack == NULL) {
+		ret = snd_soc_card_jack_new(codec->component.card,
+					    "Headset Jack", WCD_MBHC_JACK_MASK,
+					    &mbhc->headset_jack, NULL, 0);
+		if (ret) {
+			pr_err("%s: Failed to create new jack\n", __func__);
+			return ret;
+		}
+
+		ret = snd_soc_card_jack_new(codec->component.card,
+					    "Button Jack",
+					    WCD_MBHC_JACK_BUTTON_MASK,
+					    &mbhc->button_jack, NULL, 0);
+		if (ret) {
+			pr_err("Failed to create new jack\n");
+			return ret;
+		}
+
+		ret = snd_jack_set_key(mbhc->button_jack.jack,
+				       SND_JACK_BTN_0,
+				       KEY_MEDIA);
+		if (ret) {
+			pr_err("%s: Failed to set code for btn-0\n",
+				__func__);
+			return ret;
+		}
+
+		set_bit(INPUT_PROP_NO_DUMMY_RELEASE,
+			mbhc->button_jack.jack->input_dev->propbit);
+
+		INIT_DELAYED_WORK(&mbhc->mbhc_firmware_dwork,
+				  wcd_mbhc_fw_read);
+		INIT_DELAYED_WORK(&mbhc->mbhc_btn_dwork, wcd_btn_lpress_fn);
+	}
+	mutex_init(&mbhc->hphl_pa_lock);
+	mutex_init(&mbhc->hphr_pa_lock);
+	init_completion(&mbhc->btn_press_compl);
+
+	/* Register event notifier */
+	mbhc->nblock.notifier_call = wcd_event_notify;
+	if (mbhc->mbhc_cb->register_notifier) {
+		ret = mbhc->mbhc_cb->register_notifier(mbhc, &mbhc->nblock,
+						       true);
+		if (ret) {
+			pr_err("%s: Failed to register notifier %d\n",
+				__func__, ret);
+			return ret;
+		}
+	}
+
+	init_waitqueue_head(&mbhc->wait_btn_press);
+	mutex_init(&mbhc->codec_resource_lock);
+
+	ret = mbhc->mbhc_cb->request_irq(codec, mbhc->intr_ids->mbhc_sw_intr,
+				  wcd_mbhc_mech_plug_detect_irq,
+				  "mbhc sw intr", mbhc);
+	if (ret) {
+		pr_err("%s: Failed to request irq %d, ret = %d\n", __func__,
+		       mbhc->intr_ids->mbhc_sw_intr, ret);
+		goto err_mbhc_sw_irq;
+	}
+
+	ret = mbhc->mbhc_cb->request_irq(codec,
+					 mbhc->intr_ids->mbhc_btn_press_intr,
+					 wcd_mbhc_btn_press_handler,
+					 "Button Press detect",
+					 mbhc);
+	if (ret) {
+		pr_err("%s: Failed to request irq %d\n", __func__,
+		       mbhc->intr_ids->mbhc_btn_press_intr);
+		goto err_btn_press_irq;
+	}
+
+	ret = mbhc->mbhc_cb->request_irq(codec,
+					 mbhc->intr_ids->mbhc_btn_release_intr,
+					 wcd_mbhc_release_handler,
+					 "Button Release detect", mbhc);
+	if (ret) {
+		pr_err("%s: Failed to request irq %d\n", __func__,
+			mbhc->intr_ids->mbhc_btn_release_intr);
+		goto err_btn_release_irq;
+	}
+
+	ret = mbhc->mbhc_cb->request_irq(codec,
+					 mbhc->intr_ids->mbhc_hs_ins_intr,
+					 wcd_mbhc_hs_ins_irq,
+					 "Elect Insert", mbhc);
+	if (ret) {
+		pr_err("%s: Failed to request irq %d\n", __func__,
+		       mbhc->intr_ids->mbhc_hs_ins_intr);
+		goto err_mbhc_hs_ins_irq;
+	}
+	mbhc->mbhc_cb->irq_control(codec, mbhc->intr_ids->mbhc_hs_ins_intr,
+				   false);
+	clear_bit(WCD_MBHC_ELEC_HS_INS, &mbhc->intr_status);
+
+	ret = mbhc->mbhc_cb->request_irq(codec,
+					 mbhc->intr_ids->mbhc_hs_rem_intr,
+					 wcd_mbhc_hs_rem_irq,
+					 "Elect Remove", mbhc);
+	if (ret) {
+		pr_err("%s: Failed to request irq %d\n", __func__,
+		       mbhc->intr_ids->mbhc_hs_rem_intr);
+		goto err_mbhc_hs_rem_irq;
+	}
+	mbhc->mbhc_cb->irq_control(codec, mbhc->intr_ids->mbhc_hs_rem_intr,
+				   false);
+	clear_bit(WCD_MBHC_ELEC_HS_REM, &mbhc->intr_status);
+
+	ret = mbhc->mbhc_cb->request_irq(codec, mbhc->intr_ids->hph_left_ocp,
+				  wcd_mbhc_hphl_ocp_irq, "HPH_L OCP detect",
+				  mbhc);
+	if (ret) {
+		pr_err("%s: Failed to request irq %d\n", __func__,
+		       mbhc->intr_ids->hph_left_ocp);
+		goto err_hphl_ocp_irq;
+	}
+
+	ret = mbhc->mbhc_cb->request_irq(codec, mbhc->intr_ids->hph_right_ocp,
+				  wcd_mbhc_hphr_ocp_irq, "HPH_R OCP detect",
+				  mbhc);
+	if (ret) {
+		pr_err("%s: Failed to request irq %d\n", __func__,
+		       mbhc->intr_ids->hph_right_ocp);
+		goto err_hphr_ocp_irq;
+	}
+
+	pr_debug("%s: leave ret %d\n", __func__, ret);
+	return ret;
+
+err_hphr_ocp_irq:
+	mbhc->mbhc_cb->free_irq(codec, mbhc->intr_ids->hph_left_ocp, mbhc);
+err_hphl_ocp_irq:
+	mbhc->mbhc_cb->free_irq(codec, mbhc->intr_ids->mbhc_hs_rem_intr, mbhc);
+err_mbhc_hs_rem_irq:
+	mbhc->mbhc_cb->free_irq(codec, mbhc->intr_ids->mbhc_hs_ins_intr, mbhc);
+err_mbhc_hs_ins_irq:
+	mbhc->mbhc_cb->free_irq(codec, mbhc->intr_ids->mbhc_btn_release_intr,
+				mbhc);
+err_btn_release_irq:
+	mbhc->mbhc_cb->free_irq(codec, mbhc->intr_ids->mbhc_btn_press_intr,
+				mbhc);
+err_btn_press_irq:
+	mbhc->mbhc_cb->free_irq(codec, mbhc->intr_ids->mbhc_sw_intr, mbhc);
+err_mbhc_sw_irq:
+	if (mbhc->mbhc_cb->register_notifier)
+		mbhc->mbhc_cb->register_notifier(mbhc, &mbhc->nblock, false);
+	mutex_destroy(&mbhc->codec_resource_lock);
+err:
+	pr_debug("%s: leave ret %d\n", __func__, ret);
+	return ret;
+}
+EXPORT_SYMBOL(wcd_mbhc_init);
+
+void wcd_mbhc_deinit(struct wcd_mbhc *mbhc)
+{
+	struct snd_soc_codec *codec = mbhc->codec;
+
+	mbhc->mbhc_cb->free_irq(codec, mbhc->intr_ids->mbhc_sw_intr, mbhc);
+	mbhc->mbhc_cb->free_irq(codec, mbhc->intr_ids->mbhc_btn_press_intr,
+				mbhc);
+	mbhc->mbhc_cb->free_irq(codec, mbhc->intr_ids->mbhc_btn_release_intr,
+				mbhc);
+	mbhc->mbhc_cb->free_irq(codec, mbhc->intr_ids->mbhc_hs_ins_intr, mbhc);
+	mbhc->mbhc_cb->free_irq(codec, mbhc->intr_ids->mbhc_hs_rem_intr, mbhc);
+	mbhc->mbhc_cb->free_irq(codec, mbhc->intr_ids->hph_left_ocp, mbhc);
+	mbhc->mbhc_cb->free_irq(codec, mbhc->intr_ids->hph_right_ocp, mbhc);
+	if (mbhc->mbhc_cb && mbhc->mbhc_cb->register_notifier)
+		mbhc->mbhc_cb->register_notifier(mbhc, &mbhc->nblock, false);
+	wcd_cancel_hs_detect_plug(mbhc, &mbhc->correct_plug_swch);
+	mutex_destroy(&mbhc->codec_resource_lock);
+	mutex_destroy(&mbhc->hphl_pa_lock);
+	mutex_destroy(&mbhc->hphr_pa_lock);
+}
+EXPORT_SYMBOL(wcd_mbhc_deinit);
+
+MODULE_DESCRIPTION("wcd MBHC v2 module");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd-mbhc-v2.h	2019-10-29 09:26:26.121227428 +0100
@@ -0,0 +1,572 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __WCD_MBHC_V2_H__
+#define __WCD_MBHC_V2_H__
+
+#include <linux/wait.h>
+#include <linux/stringify.h>
+#include <linux/power_supply.h>
+#include "wcdcal-hwdep.h"
+
+#define TOMBAK_MBHC_NC	0
+#define TOMBAK_MBHC_NO	1
+#define WCD_MBHC_DEF_BUTTONS 8
+#define WCD_MBHC_KEYCODE_NUM 8
+#define WCD_MBHC_USLEEP_RANGE_MARGIN_US 100
+#define WCD_MBHC_THR_HS_MICB_MV  2700
+/* z value defined in Ohms */
+#define WCD_MONO_HS_MIN_THR	2
+#define WCD_MBHC_STRINGIFY(s)  __stringify(s)
+
+enum {
+	WCD_MBHC_ELEC_HS_INS,
+	WCD_MBHC_ELEC_HS_REM,
+};
+
+struct wcd_mbhc;
+enum wcd_mbhc_register_function {
+	WCD_MBHC_L_DET_EN,
+	WCD_MBHC_GND_DET_EN,
+	WCD_MBHC_MECH_DETECTION_TYPE,
+	WCD_MBHC_MIC_CLAMP_CTL,
+	WCD_MBHC_ELECT_DETECTION_TYPE,
+	WCD_MBHC_HS_L_DET_PULL_UP_CTRL,
+	WCD_MBHC_HS_L_DET_PULL_UP_COMP_CTRL,
+	WCD_MBHC_HPHL_PLUG_TYPE,
+	WCD_MBHC_GND_PLUG_TYPE,
+	WCD_MBHC_SW_HPH_LP_100K_TO_GND,
+	WCD_MBHC_ELECT_SCHMT_ISRC,
+	WCD_MBHC_FSM_EN,
+	WCD_MBHC_INSREM_DBNC,
+	WCD_MBHC_BTN_DBNC,
+	WCD_MBHC_HS_VREF,
+	WCD_MBHC_HS_COMP_RESULT,
+	WCD_MBHC_MIC_SCHMT_RESULT,
+	WCD_MBHC_HPHL_SCHMT_RESULT,
+	WCD_MBHC_HPHR_SCHMT_RESULT,
+	WCD_MBHC_OCP_FSM_EN,
+	WCD_MBHC_BTN_RESULT,
+	WCD_MBHC_BTN_ISRC_CTL,
+	WCD_MBHC_ELECT_RESULT,
+	WCD_MBHC_MICB_CTRL,    /* Pull-up and micb control */
+	WCD_MBHC_HPH_CNP_WG_TIME,
+	WCD_MBHC_HPHR_PA_EN,
+	WCD_MBHC_HPHL_PA_EN,
+	WCD_MBHC_HPH_PA_EN,
+	WCD_MBHC_SWCH_LEVEL_REMOVE,
+	WCD_MBHC_PULLDOWN_CTRL,
+	WCD_MBHC_ANC_DET_EN,
+	WCD_MBHC_FSM_STATUS,
+	WCD_MBHC_MUX_CTL,
+	WCD_MBHC_HPHL_OCP_DET_EN,
+	WCD_MBHC_HPHR_OCP_DET_EN,
+	WCD_MBHC_HPHL_OCP_STATUS,
+	WCD_MBHC_HPHR_OCP_STATUS,
+	WCD_MBHC_REG_FUNC_MAX,
+};
+
+enum wcd_mbhc_plug_type {
+	MBHC_PLUG_TYPE_INVALID = -1,
+	MBHC_PLUG_TYPE_NONE,
+	MBHC_PLUG_TYPE_HEADSET,
+	MBHC_PLUG_TYPE_HEADPHONE,
+	MBHC_PLUG_TYPE_HIGH_HPH,
+	MBHC_PLUG_TYPE_GND_MIC_SWAP,
+	MBHC_PLUG_TYPE_ANC_HEADPHONE,
+};
+
+enum pa_dac_ack_flags {
+	WCD_MBHC_HPHL_PA_OFF_ACK = 0,
+	WCD_MBHC_HPHR_PA_OFF_ACK,
+};
+
+enum anc_ack_flags {
+	WCD_MBHC_ANC0_OFF_ACK = 0,
+	WCD_MBHC_ANC1_OFF_ACK,
+};
+
+enum wcd_mbhc_btn_det_mem {
+	WCD_MBHC_BTN_DET_V_BTN_LOW,
+	WCD_MBHC_BTN_DET_V_BTN_HIGH
+};
+
+enum {
+	MIC_BIAS_1 = 1,
+	MIC_BIAS_2,
+	MIC_BIAS_3,
+	MIC_BIAS_4
+};
+
+enum {
+	MICB_PULLUP_ENABLE,
+	MICB_PULLUP_DISABLE,
+	MICB_ENABLE,
+	MICB_DISABLE,
+};
+
+enum {
+	MBHC_COMMON_MICB_PRECHARGE,
+	MBHC_COMMON_MICB_SET_VAL,
+	MBHC_COMMON_MICB_TAIL_CURR,
+};
+
+enum wcd_notify_event {
+	WCD_EVENT_INVALID,
+	/* events for micbias ON and OFF */
+	WCD_EVENT_PRE_MICBIAS_2_OFF,
+	WCD_EVENT_POST_MICBIAS_2_OFF,
+	WCD_EVENT_PRE_MICBIAS_2_ON,
+	WCD_EVENT_POST_MICBIAS_2_ON,
+	WCD_EVENT_PRE_DAPM_MICBIAS_2_OFF,
+	WCD_EVENT_POST_DAPM_MICBIAS_2_OFF,
+	WCD_EVENT_PRE_DAPM_MICBIAS_2_ON,
+	WCD_EVENT_POST_DAPM_MICBIAS_2_ON,
+	/* events for PA ON and OFF */
+	WCD_EVENT_PRE_HPHL_PA_ON,
+	WCD_EVENT_POST_HPHL_PA_OFF,
+	WCD_EVENT_PRE_HPHR_PA_ON,
+	WCD_EVENT_POST_HPHR_PA_OFF,
+	WCD_EVENT_PRE_HPHL_PA_OFF,
+	WCD_EVENT_PRE_HPHR_PA_OFF,
+	WCD_EVENT_OCP_OFF,
+	WCD_EVENT_OCP_ON,
+	WCD_EVENT_LAST,
+};
+
+enum wcd_mbhc_event_state {
+	WCD_MBHC_EVENT_PA_HPHL,
+	WCD_MBHC_EVENT_PA_HPHR,
+};
+struct wcd_mbhc_general_cfg {
+	u8 t_ldoh;
+	u8 t_bg_fast_settle;
+	u8 t_shutdown_plug_rem;
+	u8 mbhc_nsa;
+	u8 mbhc_navg;
+	u8 v_micbias_l;
+	u8 v_micbias;
+	u8 mbhc_reserved;
+	u16 settle_wait;
+	u16 t_micbias_rampup;
+	u16 t_micbias_rampdown;
+	u16 t_supply_bringup;
+} __packed;
+
+struct wcd_mbhc_plug_detect_cfg {
+	u32 mic_current;
+	u32 hph_current;
+	u16 t_mic_pid;
+	u16 t_ins_complete;
+	u16 t_ins_retry;
+	u16 v_removal_delta;
+	u8 micbias_slow_ramp;
+	u8 reserved0;
+	u8 reserved1;
+	u8 reserved2;
+} __packed;
+
+struct wcd_mbhc_plug_type_cfg {
+	u8 av_detect;
+	u8 mono_detect;
+	u8 num_ins_tries;
+	u8 reserved0;
+	s16 v_no_mic;
+	s16 v_av_min;
+	s16 v_av_max;
+	s16 v_hs_min;
+	s16 v_hs_max;
+	u16 reserved1;
+} __packed;
+
+struct wcd_mbhc_btn_detect_cfg {
+	s8 c[8];
+	u8 nc;
+	u8 n_meas;
+	u8 mbhc_nsc;
+	u8 n_btn_meas;
+	u8 n_btn_con;
+	u8 num_btn;
+	u8 reserved0;
+	u8 reserved1;
+	u16 t_poll;
+	u16 t_bounce_wait;
+	u16 t_rel_timeout;
+	s16 v_btn_press_delta_sta;
+	s16 v_btn_press_delta_cic;
+	u16 t_btn0_timeout;
+	s16 _v_btn_low[0]; /* v_btn_low[num_btn] */
+	s16 _v_btn_high[0]; /* v_btn_high[num_btn] */
+	u8 _n_ready[2];
+	u8 _n_cic[2];
+	u8 _gain[2];
+} __packed;
+
+struct wcd_mbhc_imped_detect_cfg {
+	u8 _hs_imped_detect;
+	u8 _n_rload;
+	u8 _hph_keep_on;
+	u8 _repeat_rload_calc;
+	u16 _t_dac_ramp_time;
+	u16 _rhph_high;
+	u16 _rhph_low;
+	u16 _rload[0]; /* rload[n_rload] */
+	u16 _alpha[0]; /* alpha[n_rload] */
+	u16 _beta[3];
+} __packed;
+
+enum wcd_mbhc_hph_type {
+	WCD_MBHC_HPH_NONE = 0,
+	WCD_MBHC_HPH_MONO,
+	WCD_MBHC_HPH_STEREO,
+};
+
+/*
+ * These enum definitions are directly mapped to the register
+ * definitions
+ */
+enum mbhc_moisture_vref {
+	V_OFF,
+	V_45_MV,
+	V_100_MV,
+	V_225_MV,
+};
+
+enum mbhc_hs_pullup_iref {
+	I_DEFAULT = -1,
+	I_OFF = 0,
+	I_1P0_UA,
+	I_2P0_UA,
+	I_3P0_UA,
+};
+
+enum mbhc_moisture_rref {
+	R_OFF,
+	R_24_KOHM,
+	R_84_KOHM,
+	R_184_KOHM,
+};
+
+struct usbc_ana_audio_config {
+	int usbc_en1_gpio;
+	int usbc_en2n_gpio;
+	int usbc_force_gpio;
+	struct device_node *usbc_en1_gpio_p; /* used by pinctrl API */
+	struct device_node *usbc_en2n_gpio_p; /* used by pinctrl API */
+	struct device_node *usbc_force_gpio_p; /* used by pinctrl API */
+};
+
+struct wcd_mbhc_config {
+	bool read_fw_bin;
+	void *calibration;
+	bool detect_extn_cable;
+	bool mono_stero_detection;
+	bool (*swap_gnd_mic)(struct snd_soc_codec *codec);
+	bool hs_ext_micbias;
+	bool gnd_det_en;
+	int key_code[WCD_MBHC_KEYCODE_NUM];
+	uint32_t linein_th;
+	bool moisture_en;
+	int mbhc_micbias;
+	int anc_micbias;
+	bool enable_anc_mic_detect;
+	u32 enable_usbc_analog;
+	struct usbc_ana_audio_config usbc_analog_cfg;
+};
+
+struct wcd_mbhc_intr {
+	int mbhc_sw_intr;
+	int mbhc_btn_press_intr;
+	int mbhc_btn_release_intr;
+	int mbhc_hs_ins_intr;
+	int mbhc_hs_rem_intr;
+	int hph_left_ocp;
+	int hph_right_ocp;
+};
+
+struct wcd_mbhc_register {
+	const char *id;
+	u16 reg;
+	u8 mask;
+	u8 offset;
+	u8 invert;
+};
+
+#define WCD_MBHC_REGISTER(rid, rreg, rmask, rshift, rinvert) \
+{ .id = rid, .reg = rreg, .mask = rmask, .offset = rshift, .invert = rinvert }
+
+#define WCD_MBHC_RSC_LOCK(mbhc)			\
+{							\
+	pr_debug("%s: Acquiring BCL\n", __func__);	\
+	mutex_lock(&mbhc->codec_resource_lock);		\
+	pr_debug("%s: Acquiring BCL done\n", __func__);	\
+}
+
+#define WCD_MBHC_RSC_UNLOCK(mbhc)			\
+{							\
+	pr_debug("%s: Release BCL\n", __func__);	\
+	mutex_unlock(&mbhc->codec_resource_lock);	\
+}
+
+#define WCD_MBHC_RSC_ASSERT_LOCKED(mbhc)		\
+{							\
+	WARN_ONCE(!mutex_is_locked(&mbhc->codec_resource_lock), \
+		  "%s: BCL should have acquired\n", __func__); \
+}
+
+/*
+ * Macros to update and read mbhc register bits. Check for
+ * "0" before updating or reading the register, because it
+ * is possible that one codec wants to write to that bit and
+ * other codec does not.
+ */
+#define WCD_MBHC_REG_UPDATE_BITS(function, val)         \
+do {                                                    \
+	if (mbhc->wcd_mbhc_regs[function].reg) {        \
+		snd_soc_update_bits(mbhc->codec,	\
+		mbhc->wcd_mbhc_regs[function].reg,	\
+		mbhc->wcd_mbhc_regs[function].mask,	\
+		val << (mbhc->wcd_mbhc_regs[function].offset)); \
+	}                                               \
+} while (0)
+
+#define WCD_MBHC_REG_READ(function, val)	        \
+do {                                                    \
+	if (mbhc->wcd_mbhc_regs[function].reg) {        \
+		val = (((snd_soc_read(mbhc->codec,	\
+		mbhc->wcd_mbhc_regs[function].reg)) &	\
+		(mbhc->wcd_mbhc_regs[function].mask)) >> \
+		(mbhc->wcd_mbhc_regs[function].offset)); \
+	} else {                                         \
+		val = -EINVAL;                           \
+	}                                                \
+} while (0)
+
+struct wcd_mbhc_cb {
+	int (*enable_mb_source)(struct wcd_mbhc *, bool);
+	void (*trim_btn_reg)(struct snd_soc_codec *);
+	void (*compute_impedance)(struct wcd_mbhc *, uint32_t *, uint32_t *);
+	void (*set_micbias_value)(struct snd_soc_codec *);
+	void (*set_auto_zeroing)(struct snd_soc_codec *, bool);
+	struct firmware_cal * (*get_hwdep_fw_cal)(struct wcd_mbhc *,
+			enum wcd_cal_type);
+	void (*set_cap_mode)(struct snd_soc_codec *, bool, bool);
+	int (*register_notifier)(struct wcd_mbhc *,
+				 struct notifier_block *nblock,
+				 bool enable);
+	int (*request_irq)(struct snd_soc_codec *,
+			int, irq_handler_t, const char *, void *);
+	void (*irq_control)(struct snd_soc_codec *,
+			int irq, bool enable);
+	int (*free_irq)(struct snd_soc_codec *,
+			int irq, void *);
+	void (*clk_setup)(struct snd_soc_codec *, bool);
+	int (*map_btn_code_to_num)(struct snd_soc_codec *);
+	bool (*lock_sleep)(struct wcd_mbhc *, bool);
+	bool (*micbias_enable_status)(struct wcd_mbhc *, int);
+	void (*mbhc_bias)(struct snd_soc_codec *, bool);
+	void (*mbhc_common_micb_ctrl)(struct snd_soc_codec *,
+				      int event, bool);
+	void (*micb_internal)(struct snd_soc_codec *,
+			int micb_num, bool);
+	bool (*hph_pa_on_status)(struct snd_soc_codec *);
+	void (*set_btn_thr)(struct snd_soc_codec *, s16 *, s16 *,
+			    int num_btn, bool);
+	void (*hph_pull_up_control)(struct snd_soc_codec *,
+				    enum mbhc_hs_pullup_iref);
+	int (*mbhc_micbias_control)(struct snd_soc_codec *, int, int req);
+	void (*mbhc_micb_ramp_control)(struct snd_soc_codec *, bool);
+	void (*skip_imped_detect)(struct snd_soc_codec *);
+	bool (*extn_use_mb)(struct snd_soc_codec *);
+	int (*mbhc_micb_ctrl_thr_mic)(struct snd_soc_codec *, int, bool);
+	void (*mbhc_gnd_det_ctrl)(struct snd_soc_codec *, bool);
+	void (*hph_pull_down_ctrl)(struct snd_soc_codec *, bool);
+	void (*mbhc_moisture_config)(struct wcd_mbhc *);
+	bool (*hph_register_recovery)(struct wcd_mbhc *);
+	void (*update_anc_state)(struct snd_soc_codec *codec,
+				 bool enable, int anc_num);
+	bool (*is_anc_on)(struct wcd_mbhc *mbhc);
+};
+
+struct wcd_mbhc {
+	/* Delayed work to report long button press */
+	struct delayed_work mbhc_btn_dwork;
+	int buttons_pressed;
+	struct wcd_mbhc_config *mbhc_cfg;
+	const struct wcd_mbhc_cb *mbhc_cb;
+
+	u32 hph_status; /* track headhpone status */
+	u8 hphlocp_cnt; /* headphone left ocp retry */
+	u8 hphrocp_cnt; /* headphone right ocp retry */
+
+	wait_queue_head_t wait_btn_press;
+	bool is_btn_press;
+	u8 current_plug;
+	bool in_swch_irq_handler;
+	bool hphl_swh; /*track HPHL switch NC / NO */
+	bool gnd_swh; /*track GND switch NC / NO */
+	u32 moist_vref;
+	u32 moist_iref;
+	u32 moist_rref;
+	u8 micbias1_cap_mode; /* track ext cap setting */
+	u8 micbias2_cap_mode; /* track ext cap setting */
+	bool hs_detect_work_stop;
+	bool micbias_enable;
+	bool btn_press_intr;
+	bool is_hs_recording;
+	bool is_extn_cable;
+	bool skip_imped_detection;
+	bool is_btn_already_regd;
+
+	struct snd_soc_codec *codec;
+	/* Work to perform MBHC Firmware Read */
+	struct delayed_work mbhc_firmware_dwork;
+	const struct firmware *mbhc_fw;
+	struct firmware_cal *mbhc_cal;
+
+	/* track PA/DAC state to sync with userspace */
+	unsigned long hph_pa_dac_state;
+	unsigned long hph_anc_state;
+	unsigned long event_state;
+	unsigned long jiffies_atreport;
+
+	/* impedance of hphl and hphr */
+	uint32_t zl, zr;
+	bool impedance_detect;
+
+	/* Holds type of Headset - Mono/Stereo */
+	enum wcd_mbhc_hph_type hph_type;
+
+	struct snd_soc_jack headset_jack;
+	struct snd_soc_jack button_jack;
+	struct mutex codec_resource_lock;
+
+	/* Holds codec specific interrupt mapping */
+	const struct wcd_mbhc_intr *intr_ids;
+
+	/* Work to correct accessory type */
+	struct work_struct correct_plug_swch;
+	struct notifier_block nblock;
+
+	struct wcd_mbhc_register *wcd_mbhc_regs;
+
+	struct completion btn_press_compl;
+	struct mutex hphl_pa_lock;
+	struct mutex hphr_pa_lock;
+
+	unsigned long intr_status;
+	bool is_hph_ocp_pending;
+
+	bool usbc_force_pr_mode;
+	int usbc_mode;
+	struct notifier_block psy_nb;
+	struct power_supply *usb_psy;
+	struct work_struct usbc_analog_work;
+	bool force_linein;
+};
+#define WCD_MBHC_CAL_SIZE(buttons, rload) ( \
+	sizeof(struct wcd_mbhc_general_cfg) + \
+	sizeof(struct wcd_mbhc_plug_detect_cfg) + \
+	((sizeof(s16) + sizeof(s16)) * buttons) + \
+	    sizeof(struct wcd_mbhc_plug_type_cfg) + \
+	sizeof(struct wcd_mbhc_btn_detect_cfg) + \
+	sizeof(struct wcd_mbhc_imped_detect_cfg) + \
+		((sizeof(u16) + sizeof(u16)) * rload) \
+	)
+
+
+#define WCD_MBHC_CAL_GENERAL_PTR(cali) ( \
+	(struct wcd_mbhc_general_cfg *) cali)
+#define WCD_MBHC_CAL_PLUG_DET_PTR(cali) ( \
+	(struct wcd_mbhc_plug_detect_cfg *) \
+	&(WCD_MBHC_CAL_GENERAL_PTR(cali)[1]))
+#define WCD_MBHC_CAL_PLUG_TYPE_PTR(cali) ( \
+	(struct wcd_mbhc_plug_type_cfg *) \
+	&(WCD_MBHC_CAL_PLUG_DET_PTR(cali)[1]))
+#define WCD_MBHC_CAL_BTN_DET_PTR(cali) ( \
+	    (struct wcd_mbhc_btn_detect_cfg *) \
+	&(WCD_MBHC_CAL_PLUG_TYPE_PTR(cali)[1]))
+#define WCD_MBHC_CAL_IMPED_DET_PTR(cali) ( \
+	(struct wcd_mbhc_imped_detect_cfg *) \
+	(((void *)&WCD_MBHC_CAL_BTN_DET_PTR(cali)[1]) + \
+	(WCD_MBHC_CAL_BTN_DET_PTR(cali)->num_btn * \
+	(sizeof(WCD_MBHC_CAL_BTN_DET_PTR(cali)->_v_btn_low[0]) + \
+	sizeof(WCD_MBHC_CAL_BTN_DET_PTR(cali)->_v_btn_high[0])))) \
+	)
+
+#define WCD_MBHC_CAL_MIN_SIZE ( \
+	sizeof(struct wcd_mbhc_general_cfg) + \
+	sizeof(struct wcd_mbhc_plug_detect_cfg) + \
+	sizeof(struct wcd_mbhc_plug_type_cfg) + \
+	sizeof(struct wcd_mbhc_btn_detect_cfg) + \
+	sizeof(struct wcd_mbhc_imped_detect_cfg) + \
+	(sizeof(u16)*2)  \
+	)
+
+#define WCD_MBHC_CAL_BTN_SZ(cfg_ptr) ( \
+	sizeof(struct wcd_mbhc_btn_detect_cfg) + \
+	(cfg_ptr->num_btn * (sizeof(cfg_ptr->_v_btn_low[0]) + \
+			sizeof(cfg_ptr->_v_btn_high[0]))))
+
+#define WCD_MBHC_CAL_IMPED_MIN_SZ ( \
+	sizeof(struct wcd_mbhc_imped_detect_cfg) + sizeof(u16) * 2)
+
+#define WCD_MBHC_CAL_IMPED_SZ(cfg_ptr) ( \
+	sizeof(struct wcd_mbhc_imped_detect_cfg) + \
+	(cfg_ptr->_n_rload * \
+	(sizeof(cfg_ptr->_rload[0]) + sizeof(cfg_ptr->_alpha[0]))))
+
+#ifdef CONFIG_SND_SOC_WCD_MBHC
+int wcd_mbhc_set_keycode(struct wcd_mbhc *mbhc);
+int wcd_mbhc_start(struct wcd_mbhc *mbhc,
+		       struct wcd_mbhc_config *mbhc_cfg);
+void wcd_mbhc_stop(struct wcd_mbhc *mbhc);
+int wcd_mbhc_init(struct wcd_mbhc *mbhc, struct snd_soc_codec *codec,
+		      const struct wcd_mbhc_cb *mbhc_cb,
+		      const struct wcd_mbhc_intr *mbhc_cdc_intr_ids,
+		      struct wcd_mbhc_register *mbhc_reg,
+		      bool impedance_det_en);
+int wcd_mbhc_get_impedance(struct wcd_mbhc *mbhc, uint32_t *zl,
+			   uint32_t *zr);
+void wcd_mbhc_deinit(struct wcd_mbhc *mbhc);
+#else
+static inline void wcd_mbhc_stop(struct wcd_mbhc *mbhc)
+{
+	return;
+}
+static inline int wcd_mbhc_init(struct wcd_mbhc *mbhc,
+				struct snd_soc_codec *codec,
+				const struct wcd_mbhc_cb *mbhc_cb,
+				const struct wcd_mbhc_intr *mbhc_cdc_intr_ids,
+				struct wcd_mbhc_register *mbhc_reg,
+				bool impedance_det_en)
+{
+	return 0;
+}
+static inline int wcd_mbhc_start(struct wcd_mbhc *mbhc,
+				 struct wcd_mbhc_config *mbhc_cfg)
+{
+	return 0;
+}
+static inline int wcd_mbhc_get_impedance(struct wcd_mbhc *mbhc,
+					 uint32_t *zl,
+					 uint32_t *zr)
+{
+	*zl = 0;
+	*zr = 0;
+	return -EINVAL;
+}
+static inline void wcd_mbhc_deinit(struct wcd_mbhc *mbhc)
+{
+}
+#endif
+
+#endif /* __WCD_MBHC_V2_H__ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd-spi.c	2019-01-22 16:16:29.547301138 +0100
@@ -0,0 +1,1521 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/spi/spi.h>
+#include <linux/regmap.h>
+#include <linux/component.h>
+#include <linux/ratelimit.h>
+#include <sound/wcd-dsp-mgr.h>
+#include <sound/wcd-spi.h>
+#include "wcd-spi-registers.h"
+
+/* Byte manipulations */
+#define SHIFT_1_BYTES    (8)
+#define SHIFT_2_BYTES    (16)
+#define SHIFT_3_BYTES    (24)
+
+/* Command opcodes */
+#define WCD_SPI_CMD_NOP     (0x00)
+#define WCD_SPI_CMD_WREN    (0x06)
+#define WCD_SPI_CMD_CLKREQ  (0xDA)
+#define WCD_SPI_CMD_RDSR    (0x05)
+#define WCD_SPI_CMD_IRR     (0x81)
+#define WCD_SPI_CMD_IRW     (0x82)
+#define WCD_SPI_CMD_MIOR    (0x83)
+#define WCD_SPI_CMD_FREAD   (0x0B)
+#define WCD_SPI_CMD_MIOW    (0x02)
+#define WCD_SPI_WRITE_FRAME_OPCODE \
+	(WCD_SPI_CMD_MIOW << SHIFT_3_BYTES)
+#define WCD_SPI_READ_FRAME_OPCODE \
+	(WCD_SPI_CMD_MIOR << SHIFT_3_BYTES)
+#define WCD_SPI_FREAD_FRAME_OPCODE \
+	(WCD_SPI_CMD_FREAD << SHIFT_3_BYTES)
+
+/* Command lengths */
+#define WCD_SPI_OPCODE_LEN       (0x01)
+#define WCD_SPI_CMD_NOP_LEN      (0x01)
+#define WCD_SPI_CMD_WREN_LEN     (0x01)
+#define WCD_SPI_CMD_CLKREQ_LEN   (0x04)
+#define WCD_SPI_CMD_IRR_LEN      (0x04)
+#define WCD_SPI_CMD_IRW_LEN      (0x06)
+#define WCD_SPI_WRITE_SINGLE_LEN (0x08)
+#define WCD_SPI_READ_SINGLE_LEN  (0x13)
+#define WCD_SPI_CMD_FREAD_LEN    (0x13)
+
+/* Command delays */
+#define WCD_SPI_CLKREQ_DELAY_USECS (500)
+#define WCD_SPI_CLK_OFF_TIMER_MS   (500)
+#define WCD_SPI_RESUME_TIMEOUT_MS 100
+
+/* Command masks */
+#define WCD_CMD_ADDR_MASK            \
+	(0xFF |                      \
+	 (0xFF << SHIFT_1_BYTES) |   \
+	 (0xFF << SHIFT_2_BYTES))
+
+/* Clock ctrl request related */
+#define WCD_SPI_CLK_ENABLE true
+#define WCD_SPI_CLK_DISABLE false
+#define WCD_SPI_CLK_FLAG_DELAYED    (1 << 0)
+#define WCD_SPI_CLK_FLAG_IMMEDIATE  (1 << 1)
+
+/* Internal addresses */
+#define WCD_SPI_ADDR_IPC_CTL_HOST (0x012014)
+
+/* Word sizes and min/max lengths */
+#define WCD_SPI_WORD_BYTE_CNT (4)
+#define WCD_SPI_RW_MULTI_MIN_LEN (16)
+
+/* Max size is 32 bytes less than 64Kbytes */
+#define WCD_SPI_RW_MULTI_MAX_LEN ((64 * 1024) - 32)
+
+/*
+ * Max size for the pre-allocated buffers is the max
+ * possible read/write length + 32 bytes for the SPI
+ * read/write command header itself.
+ */
+#define WCD_SPI_RW_MAX_BUF_SIZE (WCD_SPI_RW_MULTI_MAX_LEN + 32)
+
+/* Alignment requirements */
+#define WCD_SPI_RW_MIN_ALIGN    WCD_SPI_WORD_BYTE_CNT
+#define WCD_SPI_RW_MULTI_ALIGN  (16)
+
+/* Status mask bits */
+#define WCD_SPI_CLK_STATE_ENABLED BIT(0)
+#define WCD_SPI_IS_SUSPENDED BIT(1)
+
+/* Locking related */
+#define WCD_SPI_MUTEX_LOCK(spi, lock)              \
+{                                                  \
+	dev_vdbg(&spi->dev, "%s: mutex_lock(%s)\n", \
+		 __func__, __stringify_1(lock));    \
+	mutex_lock(&lock);                         \
+}
+
+#define WCD_SPI_MUTEX_UNLOCK(spi, lock)              \
+{                                                    \
+	dev_vdbg(&spi->dev, "%s: mutex_unlock(%s)\n", \
+		 __func__, __stringify_1(lock));      \
+	mutex_unlock(&lock);                         \
+}
+
+struct wcd_spi_debug_data {
+	struct dentry *dir;
+	u32 addr;
+	u32 size;
+};
+
+struct wcd_spi_priv {
+	struct spi_device *spi;
+	u32 mem_base_addr;
+
+	struct regmap *regmap;
+
+	/* Message for single transfer */
+	struct spi_message msg1;
+	struct spi_transfer xfer1;
+
+	/* Message for two transfers */
+	struct spi_message msg2;
+	struct spi_transfer xfer2[2];
+
+	/* Register access related */
+	u32 reg_bytes;
+	u32 val_bytes;
+
+	/* Clock requests related */
+	struct mutex clk_mutex;
+	int clk_users;
+	unsigned long status_mask;
+	struct delayed_work clk_dwork;
+
+	/* Transaction related */
+	struct mutex xfer_mutex;
+
+	struct device *m_dev;
+	struct wdsp_mgr_ops *m_ops;
+
+	/* Debugfs related information */
+	struct wcd_spi_debug_data debug_data;
+
+	/* Completion object to indicate system resume completion */
+	struct completion resume_comp;
+
+	/* Buffers to hold memory used for transfers */
+	void *tx_buf;
+	void *rx_buf;
+};
+
+enum xfer_request {
+	WCD_SPI_XFER_WRITE,
+	WCD_SPI_XFER_READ,
+};
+
+
+static char *wcd_spi_xfer_req_str(enum xfer_request req)
+{
+	if (req == WCD_SPI_XFER_WRITE)
+		return "xfer_write";
+	else if (req == WCD_SPI_XFER_READ)
+		return "xfer_read";
+	else
+		return "xfer_invalid";
+}
+
+static void wcd_spi_reinit_xfer(struct spi_transfer *xfer)
+{
+	xfer->tx_buf = NULL;
+	xfer->rx_buf = NULL;
+	xfer->delay_usecs = 0;
+	xfer->len = 0;
+}
+
+static bool wcd_spi_is_suspended(struct wcd_spi_priv *wcd_spi)
+{
+	return test_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
+}
+
+static bool wcd_spi_can_suspend(struct wcd_spi_priv *wcd_spi)
+{
+	struct spi_device *spi = wcd_spi->spi;
+
+	if (wcd_spi->clk_users > 0 ||
+	    test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask)) {
+		dev_err(&spi->dev, "%s: cannot suspend, clk_users = %d\n",
+			__func__, wcd_spi->clk_users);
+		return false;
+	}
+
+	return true;
+}
+
+static int wcd_spi_wait_for_resume(struct wcd_spi_priv *wcd_spi)
+{
+	struct spi_device *spi = wcd_spi->spi;
+	int rc = 0;
+
+	WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
+	/* If the system is already in resumed state, return right away */
+	if (!wcd_spi_is_suspended(wcd_spi))
+		goto done;
+
+	/* If suspended then wait for resume to happen */
+	reinit_completion(&wcd_spi->resume_comp);
+	WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
+	rc = wait_for_completion_timeout(&wcd_spi->resume_comp,
+				msecs_to_jiffies(WCD_SPI_RESUME_TIMEOUT_MS));
+	WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
+	if (rc == 0) {
+		dev_err(&spi->dev, "%s: failed to resume in %u msec\n",
+			__func__, WCD_SPI_RESUME_TIMEOUT_MS);
+		rc = -EIO;
+		goto done;
+	}
+
+	dev_dbg(&spi->dev, "%s: resume successful\n", __func__);
+	rc = 0;
+done:
+	WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
+	return rc;
+}
+
+static int wcd_spi_read_single(struct spi_device *spi,
+			       u32 remote_addr, u32 *val)
+{
+	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+	struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
+	struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
+	u8 *tx_buf = wcd_spi->tx_buf;
+	u32 frame = 0;
+	int ret;
+
+	dev_dbg(&spi->dev, "%s: remote_addr = 0x%x\n",
+		__func__, remote_addr);
+
+	if (!tx_buf) {
+		dev_err(&spi->dev, "%s: tx_buf not allocated\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	frame |= WCD_SPI_READ_FRAME_OPCODE;
+	frame |= remote_addr & WCD_CMD_ADDR_MASK;
+
+	wcd_spi_reinit_xfer(tx_xfer);
+	frame = cpu_to_be32(frame);
+	memcpy(tx_buf, &frame, sizeof(frame));
+	tx_xfer->tx_buf = tx_buf;
+	tx_xfer->len = WCD_SPI_READ_SINGLE_LEN;
+
+	wcd_spi_reinit_xfer(rx_xfer);
+	rx_xfer->rx_buf = val;
+	rx_xfer->len = sizeof(*val);
+
+	ret = spi_sync(spi, &wcd_spi->msg2);
+
+	return ret;
+}
+
+static int wcd_spi_read_multi(struct spi_device *spi,
+			      u32 remote_addr, u8 *data,
+			      size_t len)
+{
+	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+	struct spi_transfer *xfer = &wcd_spi->xfer1;
+	u8 *tx_buf = wcd_spi->tx_buf;
+	u8 *rx_buf = wcd_spi->rx_buf;
+	u32 frame = 0;
+	int ret;
+
+	dev_dbg(&spi->dev,  "%s: addr 0x%x, len = %zd\n",
+		__func__, remote_addr, len);
+
+	frame |= WCD_SPI_FREAD_FRAME_OPCODE;
+	frame |= remote_addr & WCD_CMD_ADDR_MASK;
+
+	if (!tx_buf || !rx_buf) {
+		dev_err(&spi->dev, "%s: %s not allocated\n", __func__,
+			(!tx_buf) ? "tx_buf" : "rx_buf");
+		return -ENOMEM;
+	}
+
+	wcd_spi_reinit_xfer(xfer);
+	frame = cpu_to_be32(frame);
+	memcpy(tx_buf, &frame, sizeof(frame));
+	xfer->tx_buf = tx_buf;
+	xfer->rx_buf = rx_buf;
+	xfer->len = WCD_SPI_CMD_FREAD_LEN + len;
+
+	ret = spi_sync(spi, &wcd_spi->msg1);
+	if (ret) {
+		dev_err(&spi->dev, "%s: failed, err = %d\n",
+			__func__, ret);
+		goto done;
+	}
+
+	memcpy(data, rx_buf + WCD_SPI_CMD_FREAD_LEN, len);
+done:
+	return ret;
+}
+
+static int wcd_spi_write_single(struct spi_device *spi,
+				u32 remote_addr, u32 val)
+{
+	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+	struct spi_transfer *xfer = &wcd_spi->xfer1;
+	u8 buf[WCD_SPI_WRITE_SINGLE_LEN];
+	u32 frame = 0;
+
+	dev_dbg(&spi->dev, "%s: remote_addr = 0x%x, val = 0x%x\n",
+		__func__, remote_addr, val);
+
+	memset(buf, 0, WCD_SPI_WRITE_SINGLE_LEN);
+	frame |= WCD_SPI_WRITE_FRAME_OPCODE;
+	frame |= (remote_addr & WCD_CMD_ADDR_MASK);
+
+	frame = cpu_to_be32(frame);
+	memcpy(buf, &frame, sizeof(frame));
+	memcpy(buf + sizeof(frame), &val, sizeof(val));
+
+	wcd_spi_reinit_xfer(xfer);
+	xfer->tx_buf = buf;
+	xfer->len = WCD_SPI_WRITE_SINGLE_LEN;
+
+	return spi_sync(spi, &wcd_spi->msg1);
+}
+
+static int wcd_spi_write_multi(struct spi_device *spi,
+			       u32 remote_addr, u8 *data,
+			       size_t len)
+{
+	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+	struct spi_transfer *xfer = &wcd_spi->xfer1;
+	u32 frame = 0;
+	u8 *tx_buf = wcd_spi->tx_buf;
+	int xfer_len, ret;
+
+	dev_dbg(&spi->dev, "%s: addr = 0x%x len = %zd\n",
+		__func__, remote_addr, len);
+
+	frame |= WCD_SPI_WRITE_FRAME_OPCODE;
+	frame |= (remote_addr & WCD_CMD_ADDR_MASK);
+
+	frame = cpu_to_be32(frame);
+	xfer_len = len + sizeof(frame);
+
+	if (!tx_buf) {
+		dev_err(&spi->dev, "%s: tx_buf not allocated\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	memcpy(tx_buf, &frame, sizeof(frame));
+	memcpy(tx_buf + sizeof(frame), data, len);
+
+	wcd_spi_reinit_xfer(xfer);
+	xfer->tx_buf = tx_buf;
+	xfer->len = xfer_len;
+
+	ret = spi_sync(spi, &wcd_spi->msg1);
+	if (IS_ERR_VALUE(ret))
+		dev_err(&spi->dev,
+			"%s: Failed, addr = 0x%x, len = %zd\n",
+			__func__, remote_addr, len);
+	return ret;
+}
+
+static int wcd_spi_transfer_split(struct spi_device *spi,
+				  struct wcd_spi_msg *data_msg,
+				  enum xfer_request xfer_req)
+{
+	u32 addr = data_msg->remote_addr;
+	u8 *data = data_msg->data;
+	int remain_size = data_msg->len;
+	int to_xfer, loop_cnt, ret = 0;
+
+	/* Perform single writes until multi word alignment is met */
+	loop_cnt = 1;
+	while (remain_size &&
+	       !IS_ALIGNED(addr, WCD_SPI_RW_MULTI_ALIGN)) {
+		if (xfer_req == WCD_SPI_XFER_WRITE)
+			ret = wcd_spi_write_single(spi, addr,
+						   (*(u32 *)data));
+		else
+			ret = wcd_spi_read_single(spi, addr,
+						  (u32 *)data);
+		if (IS_ERR_VALUE(ret)) {
+			dev_err(&spi->dev,
+				"%s: %s fail iter(%d) start-word addr (0x%x)\n",
+				__func__, wcd_spi_xfer_req_str(xfer_req),
+				loop_cnt, addr);
+			goto done;
+		}
+
+		addr += WCD_SPI_WORD_BYTE_CNT;
+		data += WCD_SPI_WORD_BYTE_CNT;
+		remain_size -= WCD_SPI_WORD_BYTE_CNT;
+		loop_cnt++;
+	}
+
+	/* Perform multi writes for max allowed multi writes */
+	loop_cnt = 1;
+	while (remain_size >= WCD_SPI_RW_MULTI_MAX_LEN) {
+		if (xfer_req == WCD_SPI_XFER_WRITE)
+			ret = wcd_spi_write_multi(spi, addr, data,
+						  WCD_SPI_RW_MULTI_MAX_LEN);
+		else
+			ret = wcd_spi_read_multi(spi, addr, data,
+						 WCD_SPI_RW_MULTI_MAX_LEN);
+		if (IS_ERR_VALUE(ret)) {
+			dev_err(&spi->dev,
+				"%s: %s fail iter(%d) max-write addr (0x%x)\n",
+				__func__, wcd_spi_xfer_req_str(xfer_req),
+				loop_cnt, addr);
+			goto done;
+		}
+
+		addr += WCD_SPI_RW_MULTI_MAX_LEN;
+		data += WCD_SPI_RW_MULTI_MAX_LEN;
+		remain_size -= WCD_SPI_RW_MULTI_MAX_LEN;
+		loop_cnt++;
+	}
+
+	/*
+	 * Perform write for max possible data that is multiple
+	 * of the minimum size for multi-write commands.
+	 */
+	to_xfer = remain_size - (remain_size % WCD_SPI_RW_MULTI_MIN_LEN);
+	if (remain_size >= WCD_SPI_RW_MULTI_MIN_LEN &&
+	    to_xfer > 0) {
+		if (xfer_req == WCD_SPI_XFER_WRITE)
+			ret = wcd_spi_write_multi(spi, addr, data, to_xfer);
+		else
+			ret = wcd_spi_read_multi(spi, addr, data, to_xfer);
+		if (IS_ERR_VALUE(ret)) {
+			dev_err(&spi->dev,
+				"%s: %s fail write addr (0x%x), size (0x%x)\n",
+				__func__, wcd_spi_xfer_req_str(xfer_req),
+				addr, to_xfer);
+			goto done;
+		}
+
+		addr += to_xfer;
+		data += to_xfer;
+		remain_size -= to_xfer;
+	}
+
+	/* Perform single writes for the last remaining data */
+	loop_cnt = 1;
+	while (remain_size > 0) {
+		if (xfer_req == WCD_SPI_XFER_WRITE)
+			ret = wcd_spi_write_single(spi, addr, (*((u32 *)data)));
+		else
+			ret = wcd_spi_read_single(spi, addr,  (u32 *) data);
+		if (IS_ERR_VALUE(ret)) {
+			dev_err(&spi->dev,
+				"%s: %s fail iter(%d) end-write addr (0x%x)\n",
+				__func__, wcd_spi_xfer_req_str(xfer_req),
+				loop_cnt, addr);
+			goto done;
+		}
+
+		addr += WCD_SPI_WORD_BYTE_CNT;
+		data += WCD_SPI_WORD_BYTE_CNT;
+		remain_size -= WCD_SPI_WORD_BYTE_CNT;
+		loop_cnt++;
+	}
+
+done:
+	return ret;
+}
+
+static int wcd_spi_cmd_nop(struct spi_device *spi)
+{
+	u8 nop = WCD_SPI_CMD_NOP;
+
+	return spi_write(spi, &nop, WCD_SPI_CMD_NOP_LEN);
+}
+
+static int wcd_spi_cmd_clkreq(struct spi_device *spi)
+{
+	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+	struct spi_transfer *xfer = &wcd_spi->xfer1;
+	u8 cmd[WCD_SPI_CMD_CLKREQ_LEN] = {
+		WCD_SPI_CMD_CLKREQ,
+		0xBA, 0x80, 0x00};
+
+	wcd_spi_reinit_xfer(xfer);
+	xfer->tx_buf = cmd;
+	xfer->len = WCD_SPI_CMD_CLKREQ_LEN;
+	xfer->delay_usecs = WCD_SPI_CLKREQ_DELAY_USECS;
+
+	return spi_sync(spi, &wcd_spi->msg1);
+}
+
+static int wcd_spi_cmd_wr_en(struct spi_device *spi)
+{
+	u8 wr_en = WCD_SPI_CMD_WREN;
+
+	return spi_write(spi, &wr_en, WCD_SPI_CMD_WREN_LEN);
+}
+
+static int wcd_spi_cmd_rdsr(struct spi_device *spi,
+			    u32 *rdsr_status)
+{
+	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+	struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
+	struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
+	u8 rdsr_cmd;
+	u32 status;
+	int ret;
+
+	rdsr_cmd = WCD_SPI_CMD_RDSR;
+	wcd_spi_reinit_xfer(tx_xfer);
+	tx_xfer->tx_buf = &rdsr_cmd;
+	tx_xfer->len = sizeof(rdsr_cmd);
+
+
+	wcd_spi_reinit_xfer(rx_xfer);
+	rx_xfer->rx_buf = &status;
+	rx_xfer->len = sizeof(status);
+
+	ret = spi_sync(spi, &wcd_spi->msg2);
+	if (IS_ERR_VALUE(ret)) {
+		dev_err(&spi->dev, "%s: RDSR failed, err = %d\n",
+			__func__, ret);
+		goto done;
+	}
+
+	*rdsr_status = be32_to_cpu(status);
+
+	dev_dbg(&spi->dev, "%s: RDSR success, value = 0x%x\n",
+		 __func__, *rdsr_status);
+done:
+	return ret;
+}
+
+static int wcd_spi_clk_enable(struct spi_device *spi)
+{
+	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+	int ret;
+	u32 rd_status = 0;
+
+	ret = wcd_spi_cmd_nop(spi);
+	if (IS_ERR_VALUE(ret)) {
+		dev_err(&spi->dev, "%s: NOP1 failed, err = %d\n",
+			__func__, ret);
+		goto done;
+	}
+
+	ret = wcd_spi_cmd_clkreq(spi);
+	if (IS_ERR_VALUE(ret)) {
+		dev_err(&spi->dev, "%s: CLK_REQ failed, err = %d\n",
+			__func__, ret);
+		goto done;
+	}
+
+	ret = wcd_spi_cmd_nop(spi);
+	if (IS_ERR_VALUE(ret)) {
+		dev_err(&spi->dev, "%s: NOP2 failed, err = %d\n",
+			__func__, ret);
+		goto done;
+	}
+	wcd_spi_cmd_rdsr(spi, &rd_status);
+	/*
+	 * Read status zero means reads are not
+	 * happenning on the bus, possibly because
+	 * clock request failed.
+	 */
+	if (rd_status) {
+		set_bit(WCD_SPI_CLK_STATE_ENABLED,
+			&wcd_spi->status_mask);
+	} else {
+		dev_err(&spi->dev, "%s: RDSR status is zero\n",
+			__func__);
+		ret = -EIO;
+	}
+done:
+	return ret;
+}
+
+static int wcd_spi_clk_disable(struct spi_device *spi)
+{
+	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+	int ret;
+
+	ret = wcd_spi_write_single(spi, WCD_SPI_ADDR_IPC_CTL_HOST, 0x01);
+	if (IS_ERR_VALUE(ret))
+		dev_err(&spi->dev, "%s: Failed, err = %d\n",
+			__func__, ret);
+	else
+		clear_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask);
+
+	return ret;
+}
+
+static int wcd_spi_clk_ctrl(struct spi_device *spi,
+			    bool request, u32 flags)
+{
+	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+	int ret = 0;
+	const char *delay_str;
+
+	delay_str = (flags == WCD_SPI_CLK_FLAG_DELAYED) ?
+		    "delayed" : "immediate";
+
+	WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
+
+	/* Reject any unbalanced disable request */
+	if (wcd_spi->clk_users < 0 ||
+	    (!request && wcd_spi->clk_users == 0)) {
+		dev_err(&spi->dev, "%s: Unbalanced clk_users %d for %s\n",
+			 __func__, wcd_spi->clk_users,
+			request ? "enable" : "disable");
+		ret = -EINVAL;
+
+		/* Reset the clk_users to 0 */
+		wcd_spi->clk_users = 0;
+
+		goto done;
+	}
+
+	if (request == WCD_SPI_CLK_ENABLE) {
+		/*
+		 * If the SPI bus is suspended, then return error
+		 * as the transaction cannot be completed.
+		 */
+		if (wcd_spi_is_suspended(wcd_spi)) {
+			dev_err(&spi->dev,
+				"%s: SPI suspended, cannot enable clk\n",
+				__func__);
+			ret = -EIO;
+			goto done;
+		}
+
+		/* Cancel the disable clk work */
+		WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
+		cancel_delayed_work_sync(&wcd_spi->clk_dwork);
+		WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
+
+		wcd_spi->clk_users++;
+
+		/*
+		 * If clk state is already set,
+		 * then clk wasnt really disabled
+		 */
+		if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
+			goto done;
+		else if (wcd_spi->clk_users == 1)
+			ret = wcd_spi_clk_enable(spi);
+
+	} else {
+		wcd_spi->clk_users--;
+
+		/* Clock is still voted for */
+		if (wcd_spi->clk_users > 0)
+			goto done;
+
+		/*
+		 * If we are here, clk_users must be 0 and needs
+		 * to be disabled. Call the disable based on the
+		 * flags.
+		 */
+		if (flags == WCD_SPI_CLK_FLAG_DELAYED) {
+			schedule_delayed_work(&wcd_spi->clk_dwork,
+				msecs_to_jiffies(WCD_SPI_CLK_OFF_TIMER_MS));
+		} else {
+			ret = wcd_spi_clk_disable(spi);
+			if (IS_ERR_VALUE(ret))
+				dev_err(&spi->dev,
+					"%s: Failed to disable clk err = %d\n",
+					__func__, ret);
+		}
+	}
+
+done:
+	dev_dbg(&spi->dev, "%s: updated clk_users = %d, request_%s %s\n",
+		__func__, wcd_spi->clk_users, request ? "enable" : "disable",
+		request ? "" : delay_str);
+	WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
+
+	return ret;
+}
+
+static int wcd_spi_init(struct spi_device *spi)
+{
+	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+	int ret;
+
+	ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
+			       WCD_SPI_CLK_FLAG_IMMEDIATE);
+	if (IS_ERR_VALUE(ret))
+		goto done;
+
+	ret = wcd_spi_cmd_wr_en(spi);
+	if (IS_ERR_VALUE(ret))
+		goto err_wr_en;
+
+	/*
+	 * In case spi_init is called after component deinit,
+	 * it is possible hardware register state is also reset.
+	 * Sync the regcache here so hardware state is updated
+	 * to reflect the cache.
+	 */
+	regcache_sync(wcd_spi->regmap);
+
+	regmap_write(wcd_spi->regmap, WCD_SPI_SLAVE_CONFIG,
+		     0x0F3D0800);
+
+	/* Write the MTU to max allowed size */
+	regmap_update_bits(wcd_spi->regmap,
+			   WCD_SPI_SLAVE_TRNS_LEN,
+			   0xFFFF0000, 0xFFFF0000);
+err_wr_en:
+	wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
+			 WCD_SPI_CLK_FLAG_IMMEDIATE);
+done:
+	return ret;
+}
+
+static void wcd_spi_clk_work(struct work_struct *work)
+{
+	struct delayed_work *dwork;
+	struct wcd_spi_priv *wcd_spi;
+	struct spi_device *spi;
+	int ret;
+
+	dwork = to_delayed_work(work);
+	wcd_spi = container_of(dwork, struct wcd_spi_priv, clk_dwork);
+	spi = wcd_spi->spi;
+
+	WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
+	ret = wcd_spi_clk_disable(spi);
+	if (IS_ERR_VALUE(ret))
+		dev_err(&spi->dev,
+			"%s: Failed to disable clk, err = %d\n",
+			__func__, ret);
+	WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
+}
+
+static int __wcd_spi_data_xfer(struct spi_device *spi,
+			       struct wcd_spi_msg *msg,
+			       enum xfer_request xfer_req)
+{
+	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+	int ret;
+
+	/* Check for minimum alignment requirements */
+	if (!IS_ALIGNED(msg->remote_addr, WCD_SPI_RW_MIN_ALIGN)) {
+		dev_err(&spi->dev,
+			"%s addr 0x%x is not aligned to 0x%x\n",
+			__func__, msg->remote_addr, WCD_SPI_RW_MIN_ALIGN);
+		return -EINVAL;
+	} else if (msg->len % WCD_SPI_WORD_BYTE_CNT) {
+		dev_err(&spi->dev,
+			"%s len 0x%zx is not multiple of %d\n",
+			__func__, msg->len, WCD_SPI_WORD_BYTE_CNT);
+		return -EINVAL;
+	}
+
+	WCD_SPI_MUTEX_LOCK(spi, wcd_spi->xfer_mutex);
+	if (msg->len == WCD_SPI_WORD_BYTE_CNT) {
+		if (xfer_req == WCD_SPI_XFER_WRITE)
+			ret = wcd_spi_write_single(spi, msg->remote_addr,
+						   (*((u32 *)msg->data)));
+		else
+			ret = wcd_spi_read_single(spi, msg->remote_addr,
+						  (u32 *) msg->data);
+	} else {
+		ret = wcd_spi_transfer_split(spi, msg, xfer_req);
+	}
+	WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->xfer_mutex);
+
+	return ret;
+}
+
+static int wcd_spi_data_xfer(struct spi_device *spi,
+			     struct wcd_spi_msg *msg,
+			     enum xfer_request req)
+{
+	int ret, ret1;
+
+	if (msg->len <= 0) {
+		dev_err(&spi->dev, "%s: Invalid size %zd\n",
+			__func__, msg->len);
+		return -EINVAL;
+	}
+
+	/* Request for clock */
+	ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
+			       WCD_SPI_CLK_FLAG_IMMEDIATE);
+	if (IS_ERR_VALUE(ret)) {
+		dev_err(&spi->dev, "%s: clk enable failed %d\n",
+			__func__, ret);
+		goto done;
+	}
+
+	/* Perform the transaction */
+	ret = __wcd_spi_data_xfer(spi, msg, req);
+	if (IS_ERR_VALUE(ret))
+		dev_err(&spi->dev,
+			"%s: Failed %s, addr = 0x%x, size = 0x%zx, err = %d\n",
+			__func__, wcd_spi_xfer_req_str(req),
+			msg->remote_addr, msg->len, ret);
+
+	/* Release the clock even if xfer failed */
+	ret1 = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
+				WCD_SPI_CLK_FLAG_DELAYED);
+	if (IS_ERR_VALUE(ret1))
+		dev_err(&spi->dev, "%s: clk disable failed %d\n",
+			__func__, ret1);
+done:
+	return ret;
+}
+
+/*
+ * wcd_spi_data_write: Write data to WCD SPI
+ * @spi: spi_device struct
+ * @msg: msg that needs to be written to WCD
+ *
+ * This API writes length of data to address specified. These details
+ * about the write are encapsulated in @msg. Write size should be multiple
+ * of 4 bytes and write address should be 4-byte aligned.
+ */
+int wcd_spi_data_write(struct spi_device *spi,
+		       struct wcd_spi_msg *msg)
+{
+	if (!spi || !msg) {
+		pr_err("%s: Invalid %s\n", __func__,
+			(!spi) ? "spi device" : "msg");
+		return -EINVAL;
+	}
+
+	dev_dbg_ratelimited(&spi->dev, "%s: addr = 0x%x, len = %zu\n",
+			    __func__, msg->remote_addr, msg->len);
+	return wcd_spi_data_xfer(spi, msg, WCD_SPI_XFER_WRITE);
+}
+EXPORT_SYMBOL(wcd_spi_data_write);
+
+/*
+ * wcd_spi_data_read: Read data from WCD SPI
+ * @spi: spi_device struct
+ * @msg: msg that needs to be read from WCD
+ *
+ * This API reads length of data from address specified. These details
+ * about the read are encapsulated in @msg. Read size should be multiple
+ * of 4 bytes and read address should be 4-byte aligned.
+ */
+int wcd_spi_data_read(struct spi_device *spi,
+		      struct wcd_spi_msg *msg)
+{
+	if (!spi || !msg) {
+		pr_err("%s: Invalid %s\n", __func__,
+			(!spi) ? "spi device" : "msg");
+		return -EINVAL;
+	}
+
+	dev_dbg_ratelimited(&spi->dev, "%s: addr = 0x%x,len = %zu\n",
+			    __func__, msg->remote_addr, msg->len);
+	return wcd_spi_data_xfer(spi, msg, WCD_SPI_XFER_READ);
+}
+EXPORT_SYMBOL(wcd_spi_data_read);
+
+static int wdsp_spi_dload_section(struct spi_device *spi,
+				  void *data)
+{
+	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+	struct wdsp_img_section *sec = data;
+	struct wcd_spi_msg msg;
+	int ret;
+
+	dev_dbg(&spi->dev, "%s: addr = 0x%x, size = 0x%zx\n",
+		__func__, sec->addr, sec->size);
+
+	msg.remote_addr = sec->addr + wcd_spi->mem_base_addr;
+	msg.data = sec->data;
+	msg.len = sec->size;
+
+	ret = __wcd_spi_data_xfer(spi, &msg, WCD_SPI_XFER_WRITE);
+	if (IS_ERR_VALUE(ret))
+		dev_err(&spi->dev, "%s: fail addr (0x%x) size (0x%zx)\n",
+			__func__, msg.remote_addr, msg.len);
+	return ret;
+}
+
+static int wdsp_spi_read_section(struct spi_device *spi, void *data)
+{
+	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+	struct wdsp_img_section *sec = data;
+	struct wcd_spi_msg msg;
+	int ret;
+
+	msg.remote_addr = sec->addr + wcd_spi->mem_base_addr;
+	msg.data = sec->data;
+	msg.len = sec->size;
+
+	dev_dbg(&spi->dev, "%s: addr = 0x%x, size = 0x%zx\n",
+		__func__, msg.remote_addr, msg.len);
+
+	ret = wcd_spi_data_xfer(spi, &msg, WCD_SPI_XFER_READ);
+	if (IS_ERR_VALUE(ret))
+		dev_err(&spi->dev, "%s: fail addr (0x%x) size (0x%zx)\n",
+			__func__, msg.remote_addr, msg.len);
+	return ret;
+}
+
+static int wdsp_spi_event_handler(struct device *dev, void *priv_data,
+				  enum wdsp_event_type event,
+				  void *data)
+{
+	struct spi_device *spi = to_spi_device(dev);
+	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+	int ret = 0;
+
+	dev_dbg(&spi->dev, "%s: event type %d\n",
+		__func__, event);
+
+	switch (event) {
+	case WDSP_EVENT_POST_SHUTDOWN:
+		cancel_delayed_work_sync(&wcd_spi->clk_dwork);
+		WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
+		if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
+			wcd_spi_clk_disable(spi);
+		wcd_spi->clk_users = 0;
+		WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
+		break;
+
+	case WDSP_EVENT_PRE_DLOAD_CODE:
+	case WDSP_EVENT_PRE_DLOAD_DATA:
+		ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
+				       WCD_SPI_CLK_FLAG_IMMEDIATE);
+		if (IS_ERR_VALUE(ret))
+			dev_err(&spi->dev, "%s: clk_req failed %d\n",
+				__func__, ret);
+		break;
+
+	case WDSP_EVENT_POST_DLOAD_CODE:
+	case WDSP_EVENT_POST_DLOAD_DATA:
+	case WDSP_EVENT_DLOAD_FAILED:
+
+		ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
+				       WCD_SPI_CLK_FLAG_IMMEDIATE);
+		if (IS_ERR_VALUE(ret))
+			dev_err(&spi->dev, "%s: clk unvote failed %d\n",
+				__func__, ret);
+		break;
+
+	case WDSP_EVENT_DLOAD_SECTION:
+		ret = wdsp_spi_dload_section(spi, data);
+		break;
+
+	case WDSP_EVENT_READ_SECTION:
+		ret = wdsp_spi_read_section(spi, data);
+		break;
+
+	case WDSP_EVENT_SUSPEND:
+		WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
+		if (!wcd_spi_can_suspend(wcd_spi))
+			ret = -EBUSY;
+		WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
+		break;
+
+	case WDSP_EVENT_RESUME:
+		ret = wcd_spi_wait_for_resume(wcd_spi);
+		break;
+
+	default:
+		dev_dbg(&spi->dev, "%s: Unhandled event %d\n",
+			__func__, event);
+		break;
+	}
+
+	return ret;
+}
+
+static int wcd_spi_bus_gwrite(void *context, const void *reg,
+			      size_t reg_len, const void *val,
+			      size_t val_len)
+{
+	struct device *dev = context;
+	struct spi_device *spi = to_spi_device(dev);
+	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+	u8 tx_buf[WCD_SPI_CMD_IRW_LEN];
+
+	if (!reg || !val || reg_len != wcd_spi->reg_bytes ||
+	    val_len != wcd_spi->val_bytes) {
+		dev_err(&spi->dev,
+			"%s: Invalid input, reg_len = %zd, val_len = %zd",
+			__func__, reg_len, val_len);
+		return -EINVAL;
+	}
+
+	tx_buf[0] = WCD_SPI_CMD_IRW;
+	tx_buf[1] = *((u8 *)reg);
+	memcpy(&tx_buf[WCD_SPI_OPCODE_LEN + reg_len],
+	       val, val_len);
+
+	return spi_write(spi, tx_buf, WCD_SPI_CMD_IRW_LEN);
+}
+
+static int wcd_spi_bus_write(void *context, const void *data,
+			     size_t count)
+{
+	struct device *dev = context;
+	struct spi_device *spi = to_spi_device(dev);
+	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+
+	if (count < (wcd_spi->reg_bytes + wcd_spi->val_bytes)) {
+		dev_err(&spi->dev, "%s: Invalid size %zd\n",
+			__func__, count);
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	return wcd_spi_bus_gwrite(context, data, wcd_spi->reg_bytes,
+				  data + wcd_spi->reg_bytes,
+				  count - wcd_spi->reg_bytes);
+}
+
+static int wcd_spi_bus_read(void *context, const void *reg,
+			    size_t reg_len, void *val,
+			    size_t val_len)
+{
+	struct device *dev = context;
+	struct spi_device *spi = to_spi_device(dev);
+	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+	struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
+	struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
+	u8 tx_buf[WCD_SPI_CMD_IRR_LEN];
+
+	if (!reg || !val || reg_len != wcd_spi->reg_bytes ||
+	    val_len != wcd_spi->val_bytes) {
+		dev_err(&spi->dev,
+			"%s: Invalid input, reg_len = %zd, val_len = %zd",
+			__func__, reg_len, val_len);
+		return -EINVAL;
+	}
+
+	memset(tx_buf, 0, WCD_SPI_OPCODE_LEN);
+	tx_buf[0] = WCD_SPI_CMD_IRR;
+	tx_buf[1] = *((u8 *)reg);
+
+	wcd_spi_reinit_xfer(tx_xfer);
+	tx_xfer->tx_buf = tx_buf;
+	tx_xfer->rx_buf = NULL;
+	tx_xfer->len = WCD_SPI_CMD_IRR_LEN;
+
+	wcd_spi_reinit_xfer(rx_xfer);
+	rx_xfer->tx_buf = NULL;
+	rx_xfer->rx_buf = val;
+	rx_xfer->len = val_len;
+
+	return spi_sync(spi, &wcd_spi->msg2);
+}
+
+static struct regmap_bus wcd_spi_regmap_bus = {
+	.write = wcd_spi_bus_write,
+	.gather_write = wcd_spi_bus_gwrite,
+	.read = wcd_spi_bus_read,
+	.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+	.val_format_endian_default = REGMAP_ENDIAN_BIG,
+};
+
+static int wcd_spi_state_show(struct seq_file *f, void *ptr)
+{
+	struct spi_device *spi = f->private;
+	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+	const char *clk_state, *clk_mutex, *xfer_mutex;
+
+	if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
+		clk_state = "enabled";
+	else
+		clk_state = "disabled";
+
+	clk_mutex = mutex_is_locked(&wcd_spi->clk_mutex) ?
+		    "locked" : "unlocked";
+
+	xfer_mutex = mutex_is_locked(&wcd_spi->xfer_mutex) ?
+		     "locked" : "unlocked";
+
+	seq_printf(f, "clk_state = %s\nclk_users = %d\n"
+		   "clk_mutex = %s\nxfer_mutex = %s\n",
+		   clk_state, wcd_spi->clk_users, clk_mutex,
+		   xfer_mutex);
+	return 0;
+}
+
+static int wcd_spi_state_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, wcd_spi_state_show, inode->i_private);
+}
+
+static const struct file_operations state_fops = {
+	.open = wcd_spi_state_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static ssize_t wcd_spi_debugfs_mem_read(struct file *file, char __user *ubuf,
+					size_t count, loff_t *ppos)
+{
+	struct spi_device *spi = file->private_data;
+	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+	struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
+	struct wcd_spi_msg msg;
+	ssize_t buf_size, read_count = 0;
+	char *buf;
+	int ret;
+
+	if (*ppos < 0 || !count)
+		return -EINVAL;
+
+	if (dbg_data->size == 0 || dbg_data->addr == 0) {
+		dev_err(&spi->dev,
+			"%s: Invalid request, size = %u, addr = 0x%x\n",
+			__func__, dbg_data->size, dbg_data->addr);
+		return 0;
+	}
+
+	buf_size = count < dbg_data->size ? count : dbg_data->size;
+	buf = kzalloc(buf_size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	msg.data = buf;
+	msg.remote_addr = dbg_data->addr;
+	msg.len = buf_size;
+	msg.flags = 0;
+
+	ret = wcd_spi_data_read(spi, &msg);
+	if (IS_ERR_VALUE(ret)) {
+		dev_err(&spi->dev,
+			"%s: Failed to read %zu bytes from addr 0x%x\n",
+			__func__, buf_size, msg.remote_addr);
+		goto done;
+	}
+
+	read_count = simple_read_from_buffer(ubuf, count, ppos, buf, buf_size);
+
+done:
+	kfree(buf);
+	if (ret < 0)
+		return ret;
+	else
+		return read_count;
+}
+
+static const struct file_operations mem_read_fops = {
+	.open = simple_open,
+	.read = wcd_spi_debugfs_mem_read,
+};
+
+static int wcd_spi_debugfs_init(struct spi_device *spi)
+{
+	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+	struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
+	int rc = 0;
+
+	dbg_data->dir = debugfs_create_dir("wcd_spi", NULL);
+	if (IS_ERR_OR_NULL(dbg_data->dir)) {
+		dbg_data->dir = NULL;
+		rc = -ENODEV;
+		goto done;
+	}
+
+	debugfs_create_file("state", 0444, dbg_data->dir, spi, &state_fops);
+	debugfs_create_u32("addr", S_IRUGO | S_IWUSR, dbg_data->dir,
+			   &dbg_data->addr);
+	debugfs_create_u32("size", S_IRUGO | S_IWUSR, dbg_data->dir,
+			   &dbg_data->size);
+
+	debugfs_create_file("mem_read", S_IRUGO, dbg_data->dir,
+			    spi, &mem_read_fops);
+done:
+	return rc;
+}
+
+
+static const struct reg_default wcd_spi_defaults[] = {
+	{WCD_SPI_SLAVE_SANITY, 0xDEADBEEF},
+	{WCD_SPI_SLAVE_DEVICE_ID, 0x00500000},
+	{WCD_SPI_SLAVE_STATUS, 0x80100000},
+	{WCD_SPI_SLAVE_CONFIG, 0x0F200808},
+	{WCD_SPI_SLAVE_SW_RESET, 0x00000000},
+	{WCD_SPI_SLAVE_IRQ_STATUS, 0x00000000},
+	{WCD_SPI_SLAVE_IRQ_EN, 0x00000000},
+	{WCD_SPI_SLAVE_IRQ_CLR, 0x00000000},
+	{WCD_SPI_SLAVE_IRQ_FORCE, 0x00000000},
+	{WCD_SPI_SLAVE_TX, 0x00000000},
+	{WCD_SPI_SLAVE_TEST_BUS_DATA, 0x00000000},
+	{WCD_SPI_SLAVE_TEST_BUS_CTRL, 0x00000000},
+	{WCD_SPI_SLAVE_SW_RST_IRQ, 0x00000000},
+	{WCD_SPI_SLAVE_CHAR_CFG, 0x00000000},
+	{WCD_SPI_SLAVE_CHAR_DATA_MOSI, 0x00000000},
+	{WCD_SPI_SLAVE_CHAR_DATA_CS_N, 0x00000000},
+	{WCD_SPI_SLAVE_CHAR_DATA_MISO, 0x00000000},
+	{WCD_SPI_SLAVE_TRNS_BYTE_CNT, 0x00000000},
+	{WCD_SPI_SLAVE_TRNS_LEN, 0x00000000},
+	{WCD_SPI_SLAVE_FIFO_LEVEL, 0x00000000},
+	{WCD_SPI_SLAVE_GENERICS, 0x80000000},
+	{WCD_SPI_SLAVE_EXT_BASE_ADDR, 0x00000000},
+};
+
+static bool wcd_spi_is_volatile_reg(struct device *dev,
+				    unsigned int reg)
+{
+	switch (reg) {
+	case WCD_SPI_SLAVE_SANITY:
+	case WCD_SPI_SLAVE_STATUS:
+	case WCD_SPI_SLAVE_IRQ_STATUS:
+	case WCD_SPI_SLAVE_TX:
+	case WCD_SPI_SLAVE_SW_RST_IRQ:
+	case WCD_SPI_SLAVE_TRNS_BYTE_CNT:
+	case WCD_SPI_SLAVE_FIFO_LEVEL:
+	case WCD_SPI_SLAVE_GENERICS:
+		return true;
+	}
+
+	return false;
+}
+
+static bool wcd_spi_is_readable_reg(struct device *dev,
+				    unsigned int reg)
+{
+	switch (reg) {
+	case WCD_SPI_SLAVE_SW_RESET:
+	case WCD_SPI_SLAVE_IRQ_CLR:
+	case WCD_SPI_SLAVE_IRQ_FORCE:
+		return false;
+	}
+
+	return true;
+}
+
+static struct regmap_config wcd_spi_regmap_cfg = {
+	.reg_bits = 8,
+	.val_bits = 32,
+	.cache_type = REGCACHE_RBTREE,
+	.reg_defaults = wcd_spi_defaults,
+	.num_reg_defaults = ARRAY_SIZE(wcd_spi_defaults),
+	.max_register = WCD_SPI_MAX_REGISTER,
+	.volatile_reg = wcd_spi_is_volatile_reg,
+	.readable_reg = wcd_spi_is_readable_reg,
+};
+
+static int wdsp_spi_init(struct device *dev, void *priv_data)
+{
+	struct spi_device *spi = to_spi_device(dev);
+	int ret;
+
+	ret = wcd_spi_init(spi);
+	if (IS_ERR_VALUE(ret))
+		dev_err(&spi->dev, "%s: Init failed, err = %d\n",
+			__func__, ret);
+	return ret;
+}
+
+static int wdsp_spi_deinit(struct device *dev, void *priv_data)
+{
+	struct spi_device *spi = to_spi_device(dev);
+	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+
+	/*
+	 * Deinit means the hardware is reset. Mark the cache
+	 * as dirty here, so init will sync the cache
+	 */
+	regcache_mark_dirty(wcd_spi->regmap);
+
+	return 0;
+}
+
+static struct wdsp_cmpnt_ops wdsp_spi_ops = {
+	.init = wdsp_spi_init,
+	.deinit = wdsp_spi_deinit,
+	.event_handler = wdsp_spi_event_handler,
+};
+
+static int wcd_spi_component_bind(struct device *dev,
+				  struct device *master,
+				  void *data)
+{
+	struct spi_device *spi = to_spi_device(dev);
+	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+	int ret = 0;
+
+	wcd_spi->m_dev = master;
+	wcd_spi->m_ops = data;
+
+	if (wcd_spi->m_ops &&
+	    wcd_spi->m_ops->register_cmpnt_ops)
+		ret = wcd_spi->m_ops->register_cmpnt_ops(master, dev,
+							 wcd_spi,
+							 &wdsp_spi_ops);
+	if (ret) {
+		dev_err(dev, "%s: register_cmpnt_ops failed, err = %d\n",
+			__func__, ret);
+		goto done;
+	}
+
+	wcd_spi->reg_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.reg_bits, 8);
+	wcd_spi->val_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.val_bits, 8);
+
+	wcd_spi->regmap = devm_regmap_init(&spi->dev, &wcd_spi_regmap_bus,
+					   &spi->dev, &wcd_spi_regmap_cfg);
+	if (IS_ERR(wcd_spi->regmap)) {
+		ret = PTR_ERR(wcd_spi->regmap);
+		dev_err(&spi->dev, "%s: Failed to allocate regmap, err = %d\n",
+			__func__, ret);
+		goto done;
+	}
+
+	if (wcd_spi_debugfs_init(spi))
+		dev_err(&spi->dev, "%s: Failed debugfs init\n", __func__);
+
+	spi_message_init(&wcd_spi->msg1);
+	spi_message_add_tail(&wcd_spi->xfer1, &wcd_spi->msg1);
+
+	spi_message_init(&wcd_spi->msg2);
+	spi_message_add_tail(&wcd_spi->xfer2[0], &wcd_spi->msg2);
+	spi_message_add_tail(&wcd_spi->xfer2[1], &wcd_spi->msg2);
+
+	/* Pre-allocate the buffers */
+	wcd_spi->tx_buf = kzalloc(WCD_SPI_RW_MAX_BUF_SIZE,
+				  GFP_KERNEL | GFP_DMA);
+	if (!wcd_spi->tx_buf) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	wcd_spi->rx_buf = kzalloc(WCD_SPI_RW_MAX_BUF_SIZE,
+				  GFP_KERNEL | GFP_DMA);
+	if (!wcd_spi->rx_buf) {
+		kfree(wcd_spi->tx_buf);
+		wcd_spi->tx_buf = NULL;
+		ret = -ENOMEM;
+		goto done;
+	}
+done:
+	return ret;
+}
+
+static void wcd_spi_component_unbind(struct device *dev,
+				     struct device *master,
+				     void *data)
+{
+	struct spi_device *spi = to_spi_device(dev);
+	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+
+	wcd_spi->m_dev = NULL;
+	wcd_spi->m_ops = NULL;
+
+	spi_transfer_del(&wcd_spi->xfer1);
+	spi_transfer_del(&wcd_spi->xfer2[0]);
+	spi_transfer_del(&wcd_spi->xfer2[1]);
+
+	kfree(wcd_spi->tx_buf);
+	kfree(wcd_spi->rx_buf);
+	wcd_spi->tx_buf = NULL;
+	wcd_spi->rx_buf = NULL;
+}
+
+static const struct component_ops wcd_spi_component_ops = {
+	.bind = wcd_spi_component_bind,
+	.unbind = wcd_spi_component_unbind,
+};
+
+static int wcd_spi_probe(struct spi_device *spi)
+{
+	struct wcd_spi_priv *wcd_spi;
+	int ret = 0;
+
+	wcd_spi = devm_kzalloc(&spi->dev, sizeof(*wcd_spi),
+			       GFP_KERNEL);
+	if (!wcd_spi)
+		return -ENOMEM;
+
+	ret = of_property_read_u32(spi->dev.of_node,
+				   "qcom,mem-base-addr",
+				   &wcd_spi->mem_base_addr);
+	if (IS_ERR_VALUE(ret)) {
+		dev_err(&spi->dev, "%s: Missing %s DT entry",
+			__func__, "qcom,mem-base-addr");
+		goto err_ret;
+	}
+
+	dev_dbg(&spi->dev,
+		"%s: mem_base_addr 0x%x\n", __func__, wcd_spi->mem_base_addr);
+
+	mutex_init(&wcd_spi->clk_mutex);
+	mutex_init(&wcd_spi->xfer_mutex);
+	INIT_DELAYED_WORK(&wcd_spi->clk_dwork, wcd_spi_clk_work);
+	init_completion(&wcd_spi->resume_comp);
+
+	wcd_spi->spi = spi;
+	spi_set_drvdata(spi, wcd_spi);
+
+	ret = component_add(&spi->dev, &wcd_spi_component_ops);
+	if (ret) {
+		dev_err(&spi->dev, "%s: component_add failed err = %d\n",
+			__func__, ret);
+		goto err_component_add;
+	}
+
+	return ret;
+
+err_component_add:
+	mutex_destroy(&wcd_spi->clk_mutex);
+	mutex_destroy(&wcd_spi->xfer_mutex);
+err_ret:
+	devm_kfree(&spi->dev, wcd_spi);
+	spi_set_drvdata(spi, NULL);
+	return ret;
+}
+
+static int wcd_spi_remove(struct spi_device *spi)
+{
+	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+
+	component_del(&spi->dev, &wcd_spi_component_ops);
+
+	mutex_destroy(&wcd_spi->clk_mutex);
+	mutex_destroy(&wcd_spi->xfer_mutex);
+
+	devm_kfree(&spi->dev, wcd_spi);
+	spi_set_drvdata(spi, NULL);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int wcd_spi_suspend(struct device *dev)
+{
+	struct spi_device *spi = to_spi_device(dev);
+	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+	int rc = 0;
+
+	WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
+	if (!wcd_spi_can_suspend(wcd_spi)) {
+		rc = -EBUSY;
+		goto done;
+	}
+
+	/*
+	 * If we are here, it is okay to let the suspend go
+	 * through for this driver. But, still need to notify
+	 * the master to make sure all other components can suspend
+	 * as well.
+	 */
+	if (wcd_spi->m_dev && wcd_spi->m_ops &&
+	  wcd_spi->m_ops->suspend) {
+		WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
+		rc = wcd_spi->m_ops->suspend(wcd_spi->m_dev);
+		WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
+	}
+
+	if (rc == 0)
+		set_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
+	else
+		dev_dbg(&spi->dev, "%s: cannot suspend, err = %d\n",
+			__func__, rc);
+done:
+	WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
+	return rc;
+}
+
+static int wcd_spi_resume(struct device *dev)
+{
+	struct spi_device *spi = to_spi_device(dev);
+	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+
+	WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
+	clear_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
+	complete(&wcd_spi->resume_comp);
+	WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
+
+	return 0;
+}
+
+static const struct dev_pm_ops wcd_spi_pm_ops = {
+	.suspend = wcd_spi_suspend,
+	.resume = wcd_spi_resume,
+};
+#endif
+
+static const struct of_device_id wcd_spi_of_match[] = {
+	{ .compatible = "qcom,wcd-spi-v2", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, wcd_spi_of_match);
+
+static struct spi_driver wcd_spi_driver = {
+	.driver = {
+		.name = "wcd-spi-v2",
+		.of_match_table = wcd_spi_of_match,
+#ifdef CONFIG_PM
+		.pm = &wcd_spi_pm_ops,
+#endif
+	},
+	.probe = wcd_spi_probe,
+	.remove = wcd_spi_remove,
+};
+
+module_spi_driver(wcd_spi_driver);
+
+MODULE_DESCRIPTION("WCD SPI driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wcd-spi-registers.h	2019-01-22 16:16:29.547301138 +0100
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __WCD_SPI_REGISTERS_H__
+#define __WCD_SPI_REGISTERS_H__
+
+#include <linux/regmap.h>
+
+#define WCD_SPI_SLAVE_SANITY         (0x00)
+#define WCD_SPI_SLAVE_DEVICE_ID      (0x04)
+#define WCD_SPI_SLAVE_STATUS         (0x08)
+#define WCD_SPI_SLAVE_CONFIG         (0x0c)
+#define WCD_SPI_SLAVE_SW_RESET       (0x10)
+#define WCD_SPI_SLAVE_IRQ_STATUS     (0x14)
+#define WCD_SPI_SLAVE_IRQ_EN         (0x18)
+#define WCD_SPI_SLAVE_IRQ_CLR        (0x1c)
+#define WCD_SPI_SLAVE_IRQ_FORCE      (0x20)
+#define WCD_SPI_SLAVE_TX             (0x24)
+#define WCD_SPI_SLAVE_TEST_BUS_DATA  (0x2c)
+#define WCD_SPI_SLAVE_TEST_BUS_CTRL  (0x30)
+#define WCD_SPI_SLAVE_SW_RST_IRQ     (0x34)
+#define WCD_SPI_SLAVE_CHAR_CFG       (0x38)
+#define WCD_SPI_SLAVE_CHAR_DATA_MOSI (0x3c)
+#define WCD_SPI_SLAVE_CHAR_DATA_CS_N (0x40)
+#define WCD_SPI_SLAVE_CHAR_DATA_MISO (0x44)
+#define WCD_SPI_SLAVE_TRNS_BYTE_CNT  (0x4c)
+#define WCD_SPI_SLAVE_TRNS_LEN       (0x50)
+#define WCD_SPI_SLAVE_FIFO_LEVEL     (0x54)
+#define WCD_SPI_SLAVE_GENERICS       (0x58)
+#define WCD_SPI_SLAVE_EXT_BASE_ADDR  (0x5c)
+#define WCD_SPI_MAX_REGISTER         (0x5F)
+
+#endif /* End __WCD_SPI_REGISTERS_H__ */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wsa881x.c	2019-10-29 09:26:26.129227506 +0100
@@ -0,0 +1,1441 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/printk.h>
+#include <linux/bitops.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/regmap.h>
+#include <linux/debugfs.h>
+#include <linux/soundwire/soundwire.h>
+#include <linux/mfd/msm-cdc-pinctrl.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+#include "wsa881x.h"
+#include "wsa881x-temp-sensor.h"
+
+#define WSA881X_NUM_RETRY	5
+
+enum {
+	G_18DB = 0,
+	G_16P5DB,
+	G_15DB,
+	G_13P5DB,
+	G_12DB,
+	G_10P5DB,
+	G_9DB,
+	G_7P5DB,
+	G_6DB,
+	G_4P5DB,
+	G_3DB,
+	G_1P5DB,
+	G_0DB,
+};
+
+enum {
+	DISABLE = 0,
+	ENABLE,
+};
+
+enum {
+	SWR_DAC_PORT,
+	SWR_COMP_PORT,
+	SWR_BOOST_PORT,
+	SWR_VISENSE_PORT,
+};
+
+struct swr_port {
+	u8 port_id;
+	u8 ch_mask;
+	u32 ch_rate;
+	u8 num_ch;
+};
+
+enum {
+	WSA881X_DEV_DOWN,
+	WSA881X_DEV_UP,
+};
+
+/*
+ * Private data Structure for wsa881x. All parameters related to
+ * WSA881X codec needs to be defined here.
+ */
+struct wsa881x_priv {
+	struct regmap *regmap;
+	struct device *dev;
+	struct swr_device *swr_slave;
+	struct snd_soc_codec *codec;
+	bool comp_enable;
+	bool boost_enable;
+	bool visense_enable;
+	u8 pa_gain;
+	struct swr_port port[WSA881X_MAX_SWR_PORTS];
+	int pd_gpio;
+	struct wsa881x_tz_priv tz_pdata;
+	int bg_cnt;
+	int clk_cnt;
+	int version;
+	struct mutex bg_lock;
+	struct mutex res_lock;
+	struct snd_info_entry *entry;
+	struct snd_info_entry *version_entry;
+	int state;
+	struct delayed_work ocp_ctl_work;
+	struct device_node *wsa_rst_np;
+	int pa_mute;
+};
+
+#define SWR_SLV_MAX_REG_ADDR	0x390
+#define SWR_SLV_START_REG_ADDR	0x40
+#define SWR_SLV_MAX_BUF_LEN	20
+#define BYTES_PER_LINE		12
+#define SWR_SLV_RD_BUF_LEN	8
+#define SWR_SLV_WR_BUF_LEN	32
+#define SWR_SLV_MAX_DEVICES	2
+
+#define WSA881X_VERSION_ENTRY_SIZE 27
+#define WSA881X_OCP_CTL_TIMER_SEC 2
+#define WSA881X_OCP_CTL_TEMP_CELSIUS 25
+#define WSA881X_OCP_CTL_POLL_TIMER_SEC 60
+
+static int wsa881x_ocp_poll_timer_sec = WSA881X_OCP_CTL_POLL_TIMER_SEC;
+module_param(wsa881x_ocp_poll_timer_sec, int,
+		S_IRUGO | S_IWUSR | S_IWGRP);
+MODULE_PARM_DESC(wsa881x_ocp_poll_timer_sec, "timer for ocp ctl polling");
+
+static struct wsa881x_priv *dbgwsa881x;
+static struct dentry *debugfs_wsa881x_dent;
+static struct dentry *debugfs_peek;
+static struct dentry *debugfs_poke;
+static struct dentry *debugfs_reg_dump;
+static unsigned int read_data;
+static unsigned int devnum;
+
+static int32_t wsa881x_resource_acquire(struct snd_soc_codec *codec,
+						bool enable);
+
+static const char * const wsa_pa_gain_text[] = {
+	"G_18_DB", "G_16P5_DB", "G_15_DB", "G_13P5_DB", "G_12_DB", "G_10P5_DB",
+	"G_9_DB", "G_7P5_DB", "G_6_DB", "G_4P5_DB", "G_3_DB", "G_1P5_DB",
+	"G_0_DB"
+};
+
+static const struct soc_enum wsa_pa_gain_enum =
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(wsa_pa_gain_text), wsa_pa_gain_text);
+
+static int wsa_pa_gain_get(struct snd_kcontrol *kcontrol,
+			   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct wsa881x_priv *wsa881x = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.integer.value[0] = wsa881x->pa_gain;
+
+	dev_dbg(codec->dev, "%s: PA gain = 0x%x\n", __func__, wsa881x->pa_gain);
+
+	return 0;
+}
+
+static int wsa_pa_gain_put(struct snd_kcontrol *kcontrol,
+			   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct wsa881x_priv *wsa881x = snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0]  = %ld\n",
+		__func__, ucontrol->value.integer.value[0]);
+
+	wsa881x->pa_gain =  ucontrol->value.integer.value[0];
+
+	return 0;
+}
+
+static int wsa881x_get_mute(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct wsa881x_priv *wsa881x = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.integer.value[0] = wsa881x->pa_mute;
+
+	return 0;
+}
+
+static int wsa881x_set_mute(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct wsa881x_priv *wsa881x = snd_soc_codec_get_drvdata(codec);
+	int value = ucontrol->value.integer.value[0];
+
+	dev_dbg(codec->dev, "%s: mute current %d, new %d\n",
+		__func__, wsa881x->pa_mute, value);
+
+	if (value)
+		snd_soc_update_bits(codec, WSA881X_SPKR_DRV_EN, 0x80, 0x00);
+	wsa881x->pa_mute = value;
+
+	return 0;
+}
+
+
+static const struct snd_kcontrol_new wsa_snd_controls[] = {
+	SOC_ENUM_EXT("WSA PA Gain", wsa_pa_gain_enum,
+		     wsa_pa_gain_get, wsa_pa_gain_put),
+	SOC_SINGLE_EXT("WSA PA Mute", SND_SOC_NOPM, 0, 1, 0,
+		wsa881x_get_mute, wsa881x_set_mute),
+};
+
+static int codec_debug_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static int get_parameters(char *buf, u32 *param1, int num_of_par)
+{
+	char *token;
+	int base, cnt;
+
+	token = strsep(&buf, " ");
+	for (cnt = 0; cnt < num_of_par; cnt++) {
+		if (token) {
+			if ((token[1] == 'x') || (token[1] == 'X'))
+				base = 16;
+			else
+				base = 10;
+
+			if (kstrtou32(token, base, &param1[cnt]) != 0)
+				return -EINVAL;
+
+			token = strsep(&buf, " ");
+		} else
+			return -EINVAL;
+	}
+	return 0;
+}
+
+static ssize_t wsa881x_codec_version_read(struct snd_info_entry *entry,
+			       void *file_private_data, struct file *file,
+			       char __user *buf, size_t count, loff_t pos)
+{
+	struct wsa881x_priv *wsa881x;
+	char buffer[WSA881X_VERSION_ENTRY_SIZE];
+	int len;
+
+	wsa881x = (struct wsa881x_priv *) entry->private_data;
+	if (!wsa881x) {
+		pr_err("%s: wsa881x priv is null\n", __func__);
+		return -EINVAL;
+	}
+
+	len = snprintf(buffer, sizeof(buffer), "WSA881X-SOUNDWIRE_2_0\n");
+
+	return simple_read_from_buffer(buf, count, &pos, buffer, len);
+}
+
+static struct snd_info_entry_ops wsa881x_codec_info_ops = {
+	.read = wsa881x_codec_version_read,
+};
+
+/*
+ * wsa881x_codec_info_create_codec_entry - creates wsa881x module
+ * @codec_root: The parent directory
+ * @codec: Codec instance
+ *
+ * Creates wsa881x module and version entry under the given
+ * parent directory.
+ *
+ * Return: 0 on success or negative error code on failure.
+ */
+int wsa881x_codec_info_create_codec_entry(struct snd_info_entry *codec_root,
+					  struct snd_soc_codec *codec)
+{
+	struct snd_info_entry *version_entry;
+	struct wsa881x_priv *wsa881x;
+	struct snd_soc_card *card;
+	char name[80];
+
+	if (!codec_root || !codec)
+		return -EINVAL;
+
+	wsa881x = snd_soc_codec_get_drvdata(codec);
+	card = codec->component.card;
+	snprintf(name, sizeof(name), "%s.%x", "wsa881x",
+		 (u32)wsa881x->swr_slave->addr);
+
+	wsa881x->entry = snd_register_module_info(codec_root->module,
+						  (const char *)name,
+						  codec_root);
+	if (!wsa881x->entry) {
+		dev_dbg(codec->dev, "%s: failed to create wsa881x entry\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	version_entry = snd_info_create_card_entry(card->snd_card,
+						   "version",
+						   wsa881x->entry);
+	if (!version_entry) {
+		dev_dbg(codec->dev, "%s: failed to create wsa881x version entry\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	version_entry->private_data = wsa881x;
+	version_entry->size = WSA881X_VERSION_ENTRY_SIZE;
+	version_entry->content = SNDRV_INFO_CONTENT_DATA;
+	version_entry->c.ops = &wsa881x_codec_info_ops;
+
+	if (snd_info_register(version_entry) < 0) {
+		snd_info_free_entry(version_entry);
+		return -ENOMEM;
+	}
+	wsa881x->version_entry = version_entry;
+
+	return 0;
+}
+EXPORT_SYMBOL(wsa881x_codec_info_create_codec_entry);
+
+static bool is_swr_slv_reg_readable(int reg)
+{
+	bool ret = true;
+
+	if (((reg > 0x46) && (reg < 0x4A)) ||
+	    ((reg > 0x4A) && (reg < 0x50)) ||
+	    ((reg > 0x55) && (reg < 0xE0)) ||
+	    ((reg > 0xE0) && (reg < 0xF0)) ||
+	    ((reg > 0xF0) && (reg < 0x100)) ||
+	    ((reg > 0x105) && (reg < 0x120)) ||
+	    ((reg > 0x128) && (reg < 0x130)) ||
+	    ((reg > 0x138) && (reg < 0x200)) ||
+	    ((reg > 0x205) && (reg < 0x220)) ||
+	    ((reg > 0x228) && (reg < 0x230)) ||
+	    ((reg > 0x238) && (reg < 0x300)) ||
+	    ((reg > 0x305) && (reg < 0x320)) ||
+	    ((reg > 0x328) && (reg < 0x330)) ||
+	    ((reg > 0x338) && (reg < 0x400)) ||
+	    ((reg > 0x405) && (reg < 0x420)))
+		ret = false;
+
+	return ret;
+}
+
+static ssize_t wsa881x_swrslave_reg_show(char __user *ubuf, size_t count,
+					  loff_t *ppos)
+{
+	int i, reg_val, len;
+	ssize_t total = 0;
+	char tmp_buf[SWR_SLV_MAX_BUF_LEN];
+
+	if (!ubuf || !ppos || (devnum == 0))
+		return 0;
+
+	for (i = (((int) *ppos / BYTES_PER_LINE) + SWR_SLV_START_REG_ADDR);
+		i <= SWR_SLV_MAX_REG_ADDR; i++) {
+		if (!is_swr_slv_reg_readable(i))
+			continue;
+		swr_read(dbgwsa881x->swr_slave, devnum,
+			i, &reg_val, 1);
+		len = snprintf(tmp_buf, 25, "0x%.3x: 0x%.2x\n", i,
+			       (reg_val & 0xFF));
+		if ((total + len) >= count - 1)
+			break;
+		if (copy_to_user((ubuf + total), tmp_buf, len)) {
+			pr_err("%s: fail to copy reg dump\n", __func__);
+			total = -EFAULT;
+			goto copy_err;
+		}
+		*ppos += len;
+		total += len;
+	}
+
+copy_err:
+	return total;
+}
+
+static ssize_t codec_debug_read(struct file *file, char __user *ubuf,
+				size_t count, loff_t *ppos)
+{
+	char lbuf[SWR_SLV_RD_BUF_LEN];
+	char *access_str;
+	ssize_t ret_cnt;
+
+	if (!count || !file || !ppos || !ubuf)
+		return -EINVAL;
+
+	access_str = file->private_data;
+	if (*ppos < 0)
+		return -EINVAL;
+
+	if (!strcmp(access_str, "swrslave_peek")) {
+		snprintf(lbuf, sizeof(lbuf), "0x%x\n", (read_data & 0xFF));
+		ret_cnt = simple_read_from_buffer(ubuf, count, ppos, lbuf,
+					       strnlen(lbuf, 7));
+	} else if (!strcmp(access_str, "swrslave_reg_dump")) {
+		ret_cnt = wsa881x_swrslave_reg_show(ubuf, count, ppos);
+	} else {
+		pr_err("%s: %s not permitted to read\n", __func__, access_str);
+		ret_cnt = -EPERM;
+	}
+	return ret_cnt;
+}
+
+static ssize_t codec_debug_write(struct file *filp,
+	const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+	char lbuf[SWR_SLV_WR_BUF_LEN];
+	int rc;
+	u32 param[5];
+	char *access_str;
+
+	if (!filp || !ppos || !ubuf)
+		return -EINVAL;
+
+	access_str = filp->private_data;
+	if (cnt > sizeof(lbuf) - 1)
+		return -EINVAL;
+
+	rc = copy_from_user(lbuf, ubuf, cnt);
+	if (rc)
+		return -EFAULT;
+
+	lbuf[cnt] = '\0';
+	if (!strcmp(access_str, "swrslave_poke")) {
+		/* write */
+		rc = get_parameters(lbuf, param, 3);
+		if ((param[0] <= SWR_SLV_MAX_REG_ADDR) && (param[1] <= 0xFF) &&
+			(rc == 0))
+			swr_write(dbgwsa881x->swr_slave, param[2],
+				param[0], &param[1]);
+		else
+			rc = -EINVAL;
+	} else if (!strcmp(access_str, "swrslave_peek")) {
+		/* read */
+		rc = get_parameters(lbuf, param, 2);
+		if ((param[0] <= SWR_SLV_MAX_REG_ADDR) && (rc == 0))
+			swr_read(dbgwsa881x->swr_slave, param[1],
+				param[0], &read_data, 1);
+		else
+			rc = -EINVAL;
+	} else if (!strcmp(access_str, "swrslave_reg_dump")) {
+		/* reg dump */
+		rc = get_parameters(lbuf, param, 1);
+		if ((rc == 0) && (param[0] > 0) &&
+		    (param[0] <= SWR_SLV_MAX_DEVICES))
+			devnum = param[0];
+		else
+			rc = -EINVAL;
+	}
+	if (rc == 0)
+		rc = cnt;
+	else
+		pr_err("%s: rc = %d\n", __func__, rc);
+
+	return rc;
+}
+
+static const struct file_operations codec_debug_ops = {
+	.open = codec_debug_open,
+	.write = codec_debug_write,
+	.read = codec_debug_read,
+};
+
+static const struct reg_sequence wsa881x_pre_pmu_pa[] = {
+	{WSA881X_SPKR_DRV_GAIN, 0x41, 0},
+	{WSA881X_SPKR_MISC_CTL1, 0x01, 0},
+	{WSA881X_ADC_EN_DET_TEST_I, 0x01, 0},
+	{WSA881X_ADC_EN_MODU_V, 0x02, 0},
+	{WSA881X_ADC_EN_DET_TEST_V, 0x10, 0},
+	{WSA881X_SPKR_PWRSTG_DBG, 0xA0, 0},
+};
+
+static const struct reg_sequence wsa881x_pre_pmu_pa_2_0[] = {
+	{WSA881X_SPKR_DRV_GAIN, 0x41, 0},
+	{WSA881X_SPKR_MISC_CTL1, 0x87, 0},
+};
+
+static const struct reg_sequence wsa881x_post_pmu_pa[] = {
+	{WSA881X_SPKR_PWRSTG_DBG, 0x00, 0},
+	{WSA881X_ADC_EN_DET_TEST_V, 0x00, 0},
+	{WSA881X_ADC_EN_MODU_V, 0x00, 0},
+	{WSA881X_ADC_EN_DET_TEST_I, 0x00, 0},
+};
+
+static const struct reg_sequence wsa881x_vi_txfe_en[] = {
+	{WSA881X_SPKR_PROT_FE_VSENSE_VCM, 0x85, 0},
+	{WSA881X_SPKR_PROT_ATEST2, 0x0A, 0},
+	{WSA881X_SPKR_PROT_FE_GAIN, 0xCF, 0},
+};
+
+static const struct reg_sequence wsa881x_vi_txfe_en_2_0[] = {
+	{WSA881X_SPKR_PROT_FE_VSENSE_VCM, 0x85, 0},
+	{WSA881X_SPKR_PROT_ATEST2, 0x0A, 0},
+	{WSA881X_SPKR_PROT_FE_GAIN, 0x47, 0},
+};
+
+static int wsa881x_boost_ctrl(struct snd_soc_codec *codec, bool enable)
+{
+	dev_dbg(codec->dev, "%s: enable:%d\n", __func__, enable);
+	if (enable)
+		snd_soc_update_bits(codec, WSA881X_BOOST_EN_CTL, 0x80, 0x80);
+	else
+		snd_soc_update_bits(codec, WSA881X_BOOST_EN_CTL, 0x80, 0x00);
+	/*
+	 * 1.5ms sleep is needed after boost enable/disable as per
+	 * HW requirement
+	 */
+	usleep_range(1500, 1510);
+	return 0;
+}
+
+static int wsa881x_visense_txfe_ctrl(struct snd_soc_codec *codec, bool enable,
+				     u8 isense1_gain, u8 isense2_gain,
+				     u8 vsense_gain)
+{
+	struct wsa881x_priv *wsa881x = snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev,
+		"%s: enable:%d, isense1 gain: %d, isense2 gain: %d, vsense_gain %d\n",
+		__func__, enable, isense1_gain, isense2_gain, vsense_gain);
+
+	if (enable) {
+		regmap_multi_reg_write(wsa881x->regmap,
+				wsa881x_vi_txfe_en_2_0,
+				ARRAY_SIZE(wsa881x_vi_txfe_en_2_0));
+	} else {
+		snd_soc_update_bits(codec, WSA881X_SPKR_PROT_FE_VSENSE_VCM,
+				    0x08, 0x08);
+		/*
+		 * 200us sleep is needed after visense txfe disable as per
+		 * HW requirement.
+		 */
+		usleep_range(200, 210);
+		snd_soc_update_bits(codec, WSA881X_SPKR_PROT_FE_GAIN,
+				    0x01, 0x00);
+	}
+	return 0;
+}
+
+static int wsa881x_visense_adc_ctrl(struct snd_soc_codec *codec, bool enable)
+{
+
+	dev_dbg(codec->dev, "%s: enable:%d\n", __func__, enable);
+	snd_soc_update_bits(codec, WSA881X_ADC_EN_MODU_V, (0x01 << 7),
+			    (enable << 7));
+	snd_soc_update_bits(codec, WSA881X_ADC_EN_MODU_I, (0x01 << 7),
+			    (enable << 7));
+	return 0;
+}
+
+static void wsa881x_bandgap_ctrl(struct snd_soc_codec *codec, bool enable)
+{
+	struct wsa881x_priv *wsa881x = snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s: enable:%d, bg_count:%d\n", __func__,
+		enable, wsa881x->bg_cnt);
+	mutex_lock(&wsa881x->bg_lock);
+	if (enable) {
+		++wsa881x->bg_cnt;
+		if (wsa881x->bg_cnt == 1) {
+			snd_soc_update_bits(codec, WSA881X_TEMP_OP,
+					    0x08, 0x08);
+			/* 400usec sleep is needed as per HW requirement */
+			usleep_range(400, 410);
+			snd_soc_update_bits(codec, WSA881X_TEMP_OP,
+					    0x04, 0x04);
+		}
+	} else {
+		--wsa881x->bg_cnt;
+		if (wsa881x->bg_cnt <= 0) {
+			WARN_ON(wsa881x->bg_cnt < 0);
+			wsa881x->bg_cnt = 0;
+			snd_soc_update_bits(codec, WSA881X_TEMP_OP, 0x04, 0x00);
+			snd_soc_update_bits(codec, WSA881X_TEMP_OP, 0x08, 0x00);
+		}
+	}
+	mutex_unlock(&wsa881x->bg_lock);
+}
+
+static void wsa881x_clk_ctrl(struct snd_soc_codec *codec, bool enable)
+{
+	struct wsa881x_priv *wsa881x = snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s: enable:%d, clk_count:%d\n", __func__,
+		enable, wsa881x->clk_cnt);
+	mutex_lock(&wsa881x->res_lock);
+	if (enable) {
+		++wsa881x->clk_cnt;
+		if (wsa881x->clk_cnt == 1) {
+			snd_soc_write(codec, WSA881X_CDC_DIG_CLK_CTL, 0x01);
+			snd_soc_write(codec, WSA881X_CDC_ANA_CLK_CTL, 0x01);
+		}
+	} else {
+		--wsa881x->clk_cnt;
+		if (wsa881x->clk_cnt <= 0) {
+			WARN_ON(wsa881x->clk_cnt < 0);
+			wsa881x->clk_cnt = 0;
+			snd_soc_write(codec, WSA881X_CDC_DIG_CLK_CTL, 0x00);
+			snd_soc_write(codec, WSA881X_CDC_ANA_CLK_CTL, 0x00);
+		}
+	}
+	mutex_unlock(&wsa881x->res_lock);
+}
+
+static int wsa881x_get_compander(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct wsa881x_priv *wsa881x = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.integer.value[0] = wsa881x->comp_enable;
+	return 0;
+}
+
+static int wsa881x_set_compander(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct wsa881x_priv *wsa881x = snd_soc_codec_get_drvdata(codec);
+	int value = ucontrol->value.integer.value[0];
+
+	dev_dbg(codec->dev, "%s: Compander enable current %d, new %d\n",
+		 __func__, wsa881x->comp_enable, value);
+	wsa881x->comp_enable = value;
+	return 0;
+}
+
+static int wsa881x_get_boost(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct wsa881x_priv *wsa881x = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.integer.value[0] = wsa881x->boost_enable;
+	return 0;
+}
+
+static int wsa881x_set_boost(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct wsa881x_priv *wsa881x = snd_soc_codec_get_drvdata(codec);
+	int value = ucontrol->value.integer.value[0];
+
+	dev_dbg(codec->dev, "%s: Boost enable current %d, new %d\n",
+		 __func__, wsa881x->boost_enable, value);
+	wsa881x->boost_enable = value;
+	return 0;
+}
+
+static int wsa881x_get_visense(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct wsa881x_priv *wsa881x = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.integer.value[0] = wsa881x->visense_enable;
+	return 0;
+}
+
+static int wsa881x_set_visense(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct wsa881x_priv *wsa881x = snd_soc_codec_get_drvdata(codec);
+	int value = ucontrol->value.integer.value[0];
+
+	dev_dbg(codec->dev, "%s: VIsense enable current %d, new %d\n",
+		 __func__, wsa881x->visense_enable, value);
+	wsa881x->visense_enable = value;
+	return 0;
+}
+
+static const struct snd_kcontrol_new wsa881x_snd_controls[] = {
+	SOC_SINGLE_EXT("COMP Switch", SND_SOC_NOPM, 0, 1, 0,
+		wsa881x_get_compander, wsa881x_set_compander),
+
+	SOC_SINGLE_EXT("BOOST Switch", SND_SOC_NOPM, 0, 1, 0,
+		wsa881x_get_boost, wsa881x_set_boost),
+
+	SOC_SINGLE_EXT("VISENSE Switch", SND_SOC_NOPM, 0, 1, 0,
+		wsa881x_get_visense, wsa881x_set_visense),
+};
+
+static const struct snd_kcontrol_new swr_dac_port[] = {
+	SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0)
+};
+
+static int wsa881x_set_port(struct snd_soc_codec *codec, int port_idx,
+			u8 *port_id, u8 *num_ch, u8 *ch_mask, u32 *ch_rate)
+{
+	struct wsa881x_priv *wsa881x = snd_soc_codec_get_drvdata(codec);
+
+	*port_id = wsa881x->port[port_idx].port_id;
+	*num_ch = wsa881x->port[port_idx].num_ch;
+	*ch_mask = wsa881x->port[port_idx].ch_mask;
+	*ch_rate = wsa881x->port[port_idx].ch_rate;
+	return 0;
+}
+
+static int wsa881x_enable_swr_dac_port(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct wsa881x_priv *wsa881x = snd_soc_codec_get_drvdata(codec);
+	u8 port_id[WSA881X_MAX_SWR_PORTS];
+	u8 num_ch[WSA881X_MAX_SWR_PORTS];
+	u8 ch_mask[WSA881X_MAX_SWR_PORTS];
+	u32 ch_rate[WSA881X_MAX_SWR_PORTS];
+	u8 num_port = 0;
+
+	dev_dbg(codec->dev, "%s: event %d name %s\n", __func__,
+		event, w->name);
+	if (wsa881x == NULL)
+		return -EINVAL;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		wsa881x_set_port(codec, SWR_DAC_PORT,
+				&port_id[num_port], &num_ch[num_port],
+				&ch_mask[num_port], &ch_rate[num_port]);
+		++num_port;
+
+		if (wsa881x->comp_enable) {
+			wsa881x_set_port(codec, SWR_COMP_PORT,
+					&port_id[num_port], &num_ch[num_port],
+					&ch_mask[num_port], &ch_rate[num_port]);
+			++num_port;
+		}
+		if (wsa881x->boost_enable) {
+			wsa881x_set_port(codec, SWR_BOOST_PORT,
+					&port_id[num_port], &num_ch[num_port],
+					&ch_mask[num_port], &ch_rate[num_port]);
+			++num_port;
+		}
+		if (wsa881x->visense_enable) {
+			wsa881x_set_port(codec, SWR_VISENSE_PORT,
+					&port_id[num_port], &num_ch[num_port],
+					&ch_mask[num_port], &ch_rate[num_port]);
+			++num_port;
+		}
+		swr_connect_port(wsa881x->swr_slave, &port_id[0], num_port,
+				&ch_mask[0], &ch_rate[0], &num_ch[0]);
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		port_id[num_port] = wsa881x->port[SWR_DAC_PORT].port_id;
+		++num_port;
+		if (wsa881x->comp_enable) {
+			port_id[num_port] =
+				wsa881x->port[SWR_COMP_PORT].port_id;
+			++num_port;
+		}
+		if (wsa881x->boost_enable) {
+			port_id[num_port] =
+				wsa881x->port[SWR_BOOST_PORT].port_id;
+			++num_port;
+		}
+		if (wsa881x->visense_enable) {
+			port_id[num_port] =
+				wsa881x->port[SWR_VISENSE_PORT].port_id;
+			++num_port;
+		}
+		swr_disconnect_port(wsa881x->swr_slave, &port_id[0], num_port);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int wsa881x_rdac_event(struct snd_soc_dapm_widget *w,
+			struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct wsa881x_priv *wsa881x = snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s: %s %d boost %d visense %d\n", __func__,
+		w->name, event,	wsa881x->boost_enable,
+		wsa881x->visense_enable);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		wsa881x_resource_acquire(codec, ENABLE);
+		wsa881x_boost_ctrl(codec, ENABLE);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		swr_slvdev_datapath_control(wsa881x->swr_slave,
+					    wsa881x->swr_slave->dev_num,
+					    false);
+		wsa881x_boost_ctrl(codec, DISABLE);
+		wsa881x_resource_acquire(codec, DISABLE);
+		break;
+	}
+	return 0;
+}
+
+static int wsa881x_ramp_pa_gain(struct snd_soc_codec *codec,
+				int min_gain, int max_gain, int udelay)
+{
+	int val;
+	for (val = min_gain; max_gain <= val; val--) {
+		snd_soc_update_bits(codec, WSA881X_SPKR_DRV_GAIN,
+				    0xF0, val << 4);
+		/*
+		 * 1ms delay is needed for every step change in gain as per
+		 * HW requirement.
+		 */
+		usleep_range(udelay, udelay+10);
+	}
+	return 0;
+}
+
+static void wsa881x_ocp_ctl_work(struct work_struct *work)
+{
+	struct wsa881x_priv *wsa881x;
+	struct delayed_work *dwork;
+	struct snd_soc_codec *codec;
+	int temp_val;
+
+	dwork = to_delayed_work(work);
+	wsa881x = container_of(dwork, struct wsa881x_priv, ocp_ctl_work);
+
+	codec = wsa881x->codec;
+	wsa881x_get_temp(wsa881x->tz_pdata.tz_dev, &temp_val);
+	dev_dbg(codec->dev, " temp = %d\n", temp_val);
+
+	if (temp_val <= WSA881X_OCP_CTL_TEMP_CELSIUS)
+		snd_soc_update_bits(codec, WSA881X_SPKR_OCP_CTL, 0xC0, 0x00);
+	else
+		snd_soc_update_bits(codec, WSA881X_SPKR_OCP_CTL, 0xC0, 0xC0);
+
+	schedule_delayed_work(&wsa881x->ocp_ctl_work,
+			msecs_to_jiffies(wsa881x_ocp_poll_timer_sec * 1000));
+}
+
+static int wsa881x_spkr_pa_event(struct snd_soc_dapm_widget *w,
+			struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct wsa881x_priv *wsa881x = snd_soc_codec_get_drvdata(codec);
+	int min_gain, max_gain;
+
+	dev_dbg(codec->dev, "%s: %s %d\n", __func__, w->name, event);
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_update_bits(codec, WSA881X_SPKR_OCP_CTL, 0xC0, 0x80);
+		regmap_multi_reg_write(wsa881x->regmap,
+				wsa881x_pre_pmu_pa_2_0,
+				ARRAY_SIZE(wsa881x_pre_pmu_pa_2_0));
+		swr_slvdev_datapath_control(wsa881x->swr_slave,
+					    wsa881x->swr_slave->dev_num,
+					    true);
+		/* Set register mode if compander is not enabled */
+		if (!wsa881x->comp_enable)
+			snd_soc_update_bits(codec, WSA881X_SPKR_DRV_GAIN,
+					    0x08, 0x08);
+		else
+			snd_soc_update_bits(codec, WSA881X_SPKR_DRV_GAIN,
+					    0x08, 0x00);
+
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		if (!wsa881x->comp_enable) {
+			max_gain = wsa881x->pa_gain;
+			/*
+			 * Gain has to set incrementally in 4 steps
+			 * as per HW sequence
+			 */
+			if (max_gain > G_4P5DB)
+				min_gain = G_0DB;
+			else
+				min_gain = max_gain + 3;
+			/*
+			 * 1ms delay is needed before change in gain
+			 * as per HW requirement.
+			 */
+			usleep_range(1000, 1010);
+			wsa881x_ramp_pa_gain(codec, min_gain, max_gain, 1000);
+		}
+		if (wsa881x->visense_enable) {
+			wsa881x_visense_txfe_ctrl(codec, ENABLE,
+						0x00, 0x03, 0x01);
+			snd_soc_update_bits(codec, WSA881X_ADC_EN_SEL_IBAIS,
+					    0x07, 0x01);
+			wsa881x_visense_adc_ctrl(codec, ENABLE);
+		}
+		schedule_delayed_work(&wsa881x->ocp_ctl_work,
+			msecs_to_jiffies(WSA881X_OCP_CTL_TIMER_SEC * 1000));
+		/* Force remove group */
+		swr_remove_from_group(wsa881x->swr_slave,
+				      wsa881x->swr_slave->dev_num);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		if (wsa881x->visense_enable) {
+			wsa881x_visense_adc_ctrl(codec, DISABLE);
+			wsa881x_visense_txfe_ctrl(codec, DISABLE,
+						0x00, 0x01, 0x01);
+		}
+		cancel_delayed_work_sync(&wsa881x->ocp_ctl_work);
+		snd_soc_update_bits(codec, WSA881X_SPKR_OCP_CTL, 0xC0, 0xC0);
+		break;
+	}
+	return 0;
+}
+
+static const struct snd_soc_dapm_widget wsa881x_dapm_widgets[] = {
+	SND_SOC_DAPM_INPUT("IN"),
+
+	SND_SOC_DAPM_MIXER_E("SWR DAC_Port", SND_SOC_NOPM, 0, 0, swr_dac_port,
+		ARRAY_SIZE(swr_dac_port), wsa881x_enable_swr_dac_port,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_DAC_E("RDAC", NULL, WSA881X_SPKR_DAC_CTL, 7, 0,
+		wsa881x_rdac_event,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_PGA_E("SPKR PGA", WSA881X_SPKR_DRV_EN, 7, 0, NULL, 0,
+			wsa881x_spkr_pa_event, SND_SOC_DAPM_PRE_PMU |
+			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_OUTPUT("SPKR"),
+};
+
+static const struct snd_soc_dapm_route wsa881x_audio_map[] = {
+	{"SWR DAC_Port", "Switch", "IN"},
+	{"RDAC", NULL, "SWR DAC_Port"},
+	{"SPKR PGA", NULL, "RDAC"},
+	{"SPKR", NULL, "SPKR PGA"},
+};
+
+int wsa881x_set_channel_map(struct snd_soc_codec *codec, u8 *port, u8 num_port,
+				unsigned int *ch_mask, unsigned int *ch_rate)
+{
+	struct wsa881x_priv *wsa881x = snd_soc_codec_get_drvdata(codec);
+	int i;
+
+	if (!port || !ch_mask || !ch_rate ||
+		(num_port > WSA881X_MAX_SWR_PORTS)) {
+		dev_err(codec->dev,
+			"%s: Invalid port=%pK, ch_mask=%pK, ch_rate=%pK\n",
+			__func__, port, ch_mask, ch_rate);
+		return -EINVAL;
+	}
+	for (i = 0; i < num_port; i++) {
+		wsa881x->port[i].port_id = port[i];
+		wsa881x->port[i].ch_mask = ch_mask[i];
+		wsa881x->port[i].ch_rate = ch_rate[i];
+		wsa881x->port[i].num_ch = __sw_hweight8(ch_mask[i]);
+	}
+	return 0;
+}
+EXPORT_SYMBOL(wsa881x_set_channel_map);
+
+static void wsa881x_init(struct snd_soc_codec *codec)
+{
+	struct wsa881x_priv *wsa881x = snd_soc_codec_get_drvdata(codec);
+
+	wsa881x->version = snd_soc_read(codec, WSA881X_CHIP_ID1);
+	wsa881x_regmap_defaults(wsa881x->regmap, wsa881x->version);
+	/* Enable software reset output from soundwire slave */
+	snd_soc_update_bits(codec, WSA881X_SWR_RESET_EN, 0x07, 0x07);
+	/* Bring out of analog reset */
+	snd_soc_update_bits(codec, WSA881X_CDC_RST_CTL, 0x02, 0x02);
+	/* Bring out of digital reset */
+	snd_soc_update_bits(codec, WSA881X_CDC_RST_CTL, 0x01, 0x01);
+
+	snd_soc_update_bits(codec, WSA881X_CLOCK_CONFIG, 0x10, 0x10);
+	snd_soc_update_bits(codec, WSA881X_SPKR_OCP_CTL, 0x02, 0x02);
+	snd_soc_update_bits(codec, WSA881X_SPKR_MISC_CTL1, 0xC0, 0x80);
+	snd_soc_update_bits(codec, WSA881X_SPKR_MISC_CTL1, 0x06, 0x06);
+	snd_soc_update_bits(codec, WSA881X_SPKR_BIAS_INT, 0xFF, 0x00);
+	snd_soc_update_bits(codec, WSA881X_SPKR_PA_INT, 0xF0, 0x40);
+	snd_soc_update_bits(codec, WSA881X_SPKR_PA_INT, 0x0E, 0x0E);
+	snd_soc_update_bits(codec, WSA881X_BOOST_LOOP_STABILITY,
+			    0x03, 0x03);
+	snd_soc_update_bits(codec, WSA881X_BOOST_MISC2_CTL, 0xFF, 0x14);
+	snd_soc_update_bits(codec, WSA881X_BOOST_START_CTL, 0x80, 0x80);
+	snd_soc_update_bits(codec, WSA881X_BOOST_START_CTL, 0x03, 0x00);
+	snd_soc_update_bits(codec, WSA881X_BOOST_SLOPE_COMP_ISENSE_FB,
+			    0x0C, 0x04);
+	snd_soc_update_bits(codec, WSA881X_BOOST_SLOPE_COMP_ISENSE_FB,
+			    0x03, 0x00);
+	if (snd_soc_read(codec, WSA881X_OTP_REG_0))
+		snd_soc_update_bits(codec, WSA881X_BOOST_PRESET_OUT1,
+				    0xF0, 0x70);
+	snd_soc_update_bits(codec, WSA881X_BOOST_PRESET_OUT2,
+			    0xF0, 0x30);
+	snd_soc_update_bits(codec, WSA881X_SPKR_DRV_EN, 0x08, 0x08);
+	snd_soc_update_bits(codec, WSA881X_BOOST_CURRENT_LIMIT,
+			    0x0F, 0x08);
+	snd_soc_update_bits(codec, WSA881X_SPKR_OCP_CTL, 0x30, 0x30);
+	snd_soc_update_bits(codec, WSA881X_SPKR_OCP_CTL, 0x0C, 0x00);
+	snd_soc_update_bits(codec, WSA881X_OTP_REG_28, 0x3F, 0x3A);
+	snd_soc_update_bits(codec, WSA881X_BONGO_RESRV_REG1,
+			    0xFF, 0xB2);
+	snd_soc_update_bits(codec, WSA881X_BONGO_RESRV_REG2,
+			    0xFF, 0x05);
+}
+
+static int32_t wsa881x_resource_acquire(struct snd_soc_codec *codec,
+						bool enable)
+{
+	wsa881x_clk_ctrl(codec, enable);
+	wsa881x_bandgap_ctrl(codec, enable);
+	return 0;
+}
+
+static int32_t wsa881x_temp_reg_read(struct snd_soc_codec *codec,
+				     struct wsa_temp_register *wsa_temp_reg)
+{
+	struct wsa881x_priv *wsa881x = snd_soc_codec_get_drvdata(codec);
+	struct swr_device *dev;
+	u8 retry = WSA881X_NUM_RETRY;
+	u8 devnum = 0;
+
+	if (!wsa881x) {
+		dev_err(codec->dev, "%s: wsa881x is NULL\n", __func__);
+		return -EINVAL;
+	}
+	dev = wsa881x->swr_slave;
+	if (dev && (wsa881x->state == WSA881X_DEV_DOWN)) {
+		while (swr_get_logical_dev_num(dev, dev->addr, &devnum) &&
+		       retry--) {
+			/* Retry after 1 msec delay */
+			usleep_range(1000, 1100);
+		}
+		if (retry == 0) {
+			dev_err(codec->dev,
+				"%s get devnum %d for dev addr %lx failed\n",
+				__func__, devnum, dev->addr);
+			return -EINVAL;
+		}
+	}
+	mutex_lock(&wsa881x->res_lock);
+	if (!wsa881x->clk_cnt) {
+		regcache_mark_dirty(wsa881x->regmap);
+		regcache_sync(wsa881x->regmap);
+	}
+	mutex_unlock(&wsa881x->res_lock);
+
+	wsa881x_resource_acquire(codec, ENABLE);
+
+	snd_soc_update_bits(codec, WSA881X_TADC_VALUE_CTL, 0x01, 0x00);
+	wsa_temp_reg->dmeas_msb = snd_soc_read(codec, WSA881X_TEMP_MSB);
+	wsa_temp_reg->dmeas_lsb = snd_soc_read(codec, WSA881X_TEMP_LSB);
+	snd_soc_update_bits(codec, WSA881X_TADC_VALUE_CTL, 0x01, 0x01);
+	wsa_temp_reg->d1_msb = snd_soc_read(codec, WSA881X_OTP_REG_1);
+	wsa_temp_reg->d1_lsb = snd_soc_read(codec, WSA881X_OTP_REG_2);
+	wsa_temp_reg->d2_msb = snd_soc_read(codec, WSA881X_OTP_REG_3);
+	wsa_temp_reg->d2_lsb = snd_soc_read(codec, WSA881X_OTP_REG_4);
+
+	wsa881x_resource_acquire(codec, DISABLE);
+
+	return 0;
+}
+
+static int wsa881x_probe(struct snd_soc_codec *codec)
+{
+	struct wsa881x_priv *wsa881x = snd_soc_codec_get_drvdata(codec);
+	struct swr_device *dev;
+
+	if (!wsa881x)
+		return -EINVAL;
+
+	dev = wsa881x->swr_slave;
+	wsa881x->codec = codec;
+	mutex_init(&wsa881x->bg_lock);
+	mutex_init(&wsa881x->res_lock);
+	wsa881x_init(codec);
+	snprintf(wsa881x->tz_pdata.name, sizeof(wsa881x->tz_pdata.name),
+		"%s.%x", "wsatz", (u8)dev->addr);
+	wsa881x->bg_cnt = 0;
+	wsa881x->clk_cnt = 0;
+	wsa881x->tz_pdata.codec = codec;
+	wsa881x->tz_pdata.wsa_temp_reg_read = wsa881x_temp_reg_read;
+	wsa881x_init_thermal(&wsa881x->tz_pdata);
+	snd_soc_add_codec_controls(codec, wsa_snd_controls,
+				   ARRAY_SIZE(wsa_snd_controls));
+	INIT_DELAYED_WORK(&wsa881x->ocp_ctl_work, wsa881x_ocp_ctl_work);
+	return 0;
+}
+
+static int wsa881x_remove(struct snd_soc_codec *codec)
+{
+	struct wsa881x_priv *wsa881x = snd_soc_codec_get_drvdata(codec);
+
+	if (wsa881x->tz_pdata.tz_dev)
+		wsa881x_deinit_thermal(wsa881x->tz_pdata.tz_dev);
+	mutex_destroy(&wsa881x->bg_lock);
+	mutex_destroy(&wsa881x->res_lock);
+
+	return 0;
+}
+
+static struct regmap *wsa881x_get_regmap(struct device *dev)
+{
+	struct wsa881x_priv *control = swr_get_dev_data(to_swr_device(dev));
+
+	if (!control)
+		return NULL;
+
+	return control->regmap;
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_wsa881x = {
+	.probe = wsa881x_probe,
+	.remove = wsa881x_remove,
+	.controls = wsa881x_snd_controls,
+	.num_controls = ARRAY_SIZE(wsa881x_snd_controls),
+	.dapm_widgets = wsa881x_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(wsa881x_dapm_widgets),
+	.dapm_routes = wsa881x_audio_map,
+	.num_dapm_routes = ARRAY_SIZE(wsa881x_audio_map),
+	.get_regmap = wsa881x_get_regmap,
+};
+
+static int wsa881x_gpio_ctrl(struct wsa881x_priv *wsa881x, bool enable)
+{
+	int ret = 0;
+
+	if (wsa881x->pd_gpio < 0) {
+		dev_err(wsa881x->dev, "%s: gpio is not valid %d\n",
+			__func__, wsa881x->pd_gpio);
+		return -EINVAL;
+	}
+
+	if (wsa881x->wsa_rst_np) {
+		if (enable)
+			ret = msm_cdc_pinctrl_select_active_state(
+							wsa881x->wsa_rst_np);
+		else
+			ret = msm_cdc_pinctrl_select_sleep_state(
+							wsa881x->wsa_rst_np);
+		if (ret != 0)
+			dev_err(wsa881x->dev,
+				"%s: Failed to turn state %d; ret=%d\n",
+				__func__, enable, ret);
+	} else {
+		if (gpio_is_valid(wsa881x->pd_gpio))
+			gpio_direction_output(wsa881x->pd_gpio, enable);
+	}
+
+	return ret;
+}
+
+static int wsa881x_gpio_init(struct swr_device *pdev)
+{
+	int ret = 0;
+	struct wsa881x_priv *wsa881x;
+
+	wsa881x = swr_get_dev_data(pdev);
+	if (!wsa881x) {
+		dev_err(&pdev->dev, "%s: wsa881x is NULL\n", __func__);
+		return -EINVAL;
+	}
+	dev_dbg(&pdev->dev, "%s: gpio %d request with name %s\n",
+		__func__, wsa881x->pd_gpio, dev_name(&pdev->dev));
+	ret = gpio_request(wsa881x->pd_gpio, dev_name(&pdev->dev));
+	if (ret) {
+		if (ret == -EBUSY) {
+			/* GPIO was already requested */
+			dev_dbg(&pdev->dev,
+				 "%s: gpio %d is already set to high\n",
+				 __func__, wsa881x->pd_gpio);
+			ret = 0;
+		} else {
+			dev_err(&pdev->dev, "%s: Failed to request gpio %d, err: %d\n",
+				__func__, wsa881x->pd_gpio, ret);
+		}
+	}
+	return ret;
+}
+
+static int wsa881x_swr_probe(struct swr_device *pdev)
+{
+	int ret = 0;
+	struct wsa881x_priv *wsa881x;
+	u8 devnum = 0;
+	bool pin_state_current = false;
+
+	wsa881x = devm_kzalloc(&pdev->dev, sizeof(struct wsa881x_priv),
+			    GFP_KERNEL);
+	if (!wsa881x) {
+		dev_err(&pdev->dev, "%s: cannot create memory for wsa881x\n",
+			__func__);
+		return -ENOMEM;
+	}
+	wsa881x->wsa_rst_np = of_parse_phandle(pdev->dev.of_node,
+					     "qcom,spkr-sd-n-node", 0);
+	if (!wsa881x->wsa_rst_np) {
+		dev_dbg(&pdev->dev, "%s: Not using pinctrl, fallback to gpio\n",
+			__func__);
+		wsa881x->pd_gpio = of_get_named_gpio(pdev->dev.of_node,
+						     "qcom,spkr-sd-n-gpio", 0);
+		if (wsa881x->pd_gpio < 0) {
+			dev_err(&pdev->dev, "%s: %s property is not found %d\n",
+				__func__, "qcom,spkr-sd-n-gpio",
+				wsa881x->pd_gpio);
+			goto err;
+		}
+		dev_dbg(&pdev->dev, "%s: reset gpio %d\n", __func__,
+			wsa881x->pd_gpio);
+	}
+	swr_set_dev_data(pdev, wsa881x);
+
+	wsa881x->swr_slave = pdev;
+
+	if (!wsa881x->wsa_rst_np) {
+		ret = wsa881x_gpio_init(pdev);
+		if (ret)
+			goto err;
+	}
+	if (wsa881x->wsa_rst_np)
+		pin_state_current = msm_cdc_pinctrl_get_state(
+						wsa881x->wsa_rst_np);
+	wsa881x_gpio_ctrl(wsa881x, true);
+	wsa881x->state = WSA881X_DEV_UP;
+
+	if (!debugfs_wsa881x_dent) {
+		dbgwsa881x = wsa881x;
+		debugfs_wsa881x_dent = debugfs_create_dir(
+						"wsa881x_swr_slave", 0);
+		if (!IS_ERR(debugfs_wsa881x_dent)) {
+			debugfs_peek = debugfs_create_file("swrslave_peek",
+					S_IFREG | S_IRUGO, debugfs_wsa881x_dent,
+					(void *) "swrslave_peek",
+					&codec_debug_ops);
+
+			debugfs_poke = debugfs_create_file("swrslave_poke",
+					S_IFREG | S_IRUGO, debugfs_wsa881x_dent,
+					(void *) "swrslave_poke",
+					&codec_debug_ops);
+
+			debugfs_reg_dump = debugfs_create_file(
+						"swrslave_reg_dump",
+						S_IFREG | S_IRUGO,
+						debugfs_wsa881x_dent,
+						(void *) "swrslave_reg_dump",
+						&codec_debug_ops);
+		}
+	}
+
+	/*
+	 * Add 5msec delay to provide sufficient time for
+	 * soundwire auto enumeration of slave devices as
+	 * as per HW requirement.
+	 */
+	usleep_range(5000, 5010);
+	ret = swr_get_logical_dev_num(pdev, pdev->addr, &devnum);
+	if (ret) {
+		dev_dbg(&pdev->dev,
+			"%s get devnum %d for dev addr %lx failed\n",
+			__func__, devnum, pdev->addr);
+		goto dev_err;
+	}
+	pdev->dev_num = devnum;
+
+	wsa881x->regmap = devm_regmap_init_swr(pdev,
+					       &wsa881x_regmap_config);
+	if (IS_ERR(wsa881x->regmap)) {
+		ret = PTR_ERR(wsa881x->regmap);
+		dev_err(&pdev->dev, "%s: regmap_init failed %d\n",
+			__func__, ret);
+		goto dev_err;
+	}
+
+	ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wsa881x,
+				     NULL, 0);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: Codec registration failed\n",
+			__func__);
+		goto dev_err;
+	}
+
+	return 0;
+
+dev_err:
+	if (pin_state_current == false)
+		wsa881x_gpio_ctrl(wsa881x, false);
+	swr_remove_device(pdev);
+err:
+	return ret;
+}
+
+static int wsa881x_swr_remove(struct swr_device *pdev)
+{
+	struct wsa881x_priv *wsa881x;
+
+	wsa881x = swr_get_dev_data(pdev);
+	if (!wsa881x) {
+		dev_err(&pdev->dev, "%s: wsa881x is NULL\n", __func__);
+		return -EINVAL;
+	}
+	debugfs_remove_recursive(debugfs_wsa881x_dent);
+	snd_soc_unregister_codec(&pdev->dev);
+	if (wsa881x->pd_gpio)
+		gpio_free(wsa881x->pd_gpio);
+	swr_set_dev_data(pdev, NULL);
+	return 0;
+}
+
+static int wsa881x_swr_up(struct swr_device *pdev)
+{
+	int ret;
+	struct wsa881x_priv *wsa881x;
+
+	wsa881x = swr_get_dev_data(pdev);
+	if (!wsa881x) {
+		dev_err(&pdev->dev, "%s: wsa881x is NULL\n", __func__);
+		return -EINVAL;
+	}
+	ret = wsa881x_gpio_ctrl(wsa881x, true);
+	if (ret)
+		dev_err(&pdev->dev, "%s: Failed to enable gpio\n", __func__);
+	else
+		wsa881x->state = WSA881X_DEV_UP;
+
+	return ret;
+}
+
+static int wsa881x_swr_down(struct swr_device *pdev)
+{
+	struct wsa881x_priv *wsa881x;
+	int ret;
+
+	wsa881x = swr_get_dev_data(pdev);
+	if (!wsa881x) {
+		dev_err(&pdev->dev, "%s: wsa881x is NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (delayed_work_pending(&wsa881x->ocp_ctl_work))
+		cancel_delayed_work_sync(&wsa881x->ocp_ctl_work);
+	ret = wsa881x_gpio_ctrl(wsa881x, false);
+	if (ret)
+		dev_err(&pdev->dev, "%s: Failed to disable gpio\n", __func__);
+	else
+		wsa881x->state = WSA881X_DEV_DOWN;
+
+	return ret;
+}
+
+static int wsa881x_swr_reset(struct swr_device *pdev)
+{
+	struct wsa881x_priv *wsa881x;
+	u8 retry = WSA881X_NUM_RETRY;
+	u8 devnum = 0;
+
+	wsa881x = swr_get_dev_data(pdev);
+	if (!wsa881x) {
+		dev_err(&pdev->dev, "%s: wsa881x is NULL\n", __func__);
+		return -EINVAL;
+	}
+	wsa881x->bg_cnt = 0;
+	wsa881x->clk_cnt = 0;
+	while (swr_get_logical_dev_num(pdev, pdev->addr, &devnum) && retry--) {
+		/* Retry after 1 msec delay */
+		usleep_range(1000, 1100);
+	}
+	pdev->dev_num = devnum;
+	regcache_mark_dirty(wsa881x->regmap);
+	regcache_sync(wsa881x->regmap);
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int wsa881x_swr_suspend(struct device *dev)
+{
+	dev_dbg(dev, "%s: system suspend\n", __func__);
+	return 0;
+}
+
+static int wsa881x_swr_resume(struct device *dev)
+{
+	struct wsa881x_priv *wsa881x = swr_get_dev_data(to_swr_device(dev));
+
+	if (!wsa881x) {
+		dev_err(dev, "%s: wsa881x private data is NULL\n", __func__);
+		return -EINVAL;
+	}
+	dev_dbg(dev, "%s: system resume\n", __func__);
+	return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops wsa881x_swr_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(wsa881x_swr_suspend, wsa881x_swr_resume)
+};
+
+static const struct swr_device_id wsa881x_swr_id[] = {
+	{"wsa881x", 0},
+	{}
+};
+
+static struct of_device_id wsa881x_swr_dt_match[] = {
+	{
+		.compatible = "qcom,wsa881x",
+	},
+	{}
+};
+
+static struct swr_driver wsa881x_codec_driver = {
+	.driver = {
+		.name = "wsa881x",
+		.owner = THIS_MODULE,
+		.pm = &wsa881x_swr_pm_ops,
+		.of_match_table = wsa881x_swr_dt_match,
+	},
+	.probe = wsa881x_swr_probe,
+	.remove = wsa881x_swr_remove,
+	.id_table = wsa881x_swr_id,
+	.device_up = wsa881x_swr_up,
+	.device_down = wsa881x_swr_down,
+	.reset_device = wsa881x_swr_reset,
+};
+
+static int __init wsa881x_codec_init(void)
+{
+	return swr_driver_register(&wsa881x_codec_driver);
+}
+
+static void __exit wsa881x_codec_exit(void)
+{
+	swr_driver_unregister(&wsa881x_codec_driver);
+}
+
+module_init(wsa881x_codec_init);
+module_exit(wsa881x_codec_exit);
+
+MODULE_DESCRIPTION("WSA881x Codec driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wsa881x.h	2019-01-22 16:16:29.595301573 +0100
@@ -0,0 +1,34 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _WSA881X_H
+#define _WSA881X_H
+
+#include <linux/regmap.h>
+#include <sound/soc.h>
+#include <sound/info.h>
+#include "wsa881x-registers.h"
+
+#define WSA881X_MAX_SWR_PORTS   4
+
+extern int wsa881x_set_channel_map(struct snd_soc_codec *codec, u8 *port,
+				u8 num_port, unsigned int *ch_mask,
+				unsigned int *ch_rate);
+
+extern const u8 wsa881x_reg_readable[WSA881X_CACHE_SIZE];
+extern struct regmap_config wsa881x_regmap_config;
+extern int wsa881x_codec_info_create_codec_entry(
+					struct snd_info_entry *codec_root,
+					struct snd_soc_codec *codec);
+void wsa881x_regmap_defaults(struct regmap *regmap, u8 version);
+
+#endif /* _WSA881X_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wsa881x-registers.h	2019-01-22 16:16:29.595301573 +0100
@@ -0,0 +1,178 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef WSA881X_REGISTERS_H
+#define WSA881X_REGISTERS_H
+
+#define WSA881X_DIGITAL_BASE		0x3000
+#define WSA881X_ANALOG_BASE		0x3100
+
+/* Digital register address space */
+#define WSA881X_CHIP_ID0			(WSA881X_DIGITAL_BASE+0x0000)
+#define WSA881X_CHIP_ID1			(WSA881X_DIGITAL_BASE+0x0001)
+#define WSA881X_CHIP_ID2			(WSA881X_DIGITAL_BASE+0x0002)
+#define WSA881X_CHIP_ID3			(WSA881X_DIGITAL_BASE+0x0003)
+#define WSA881X_BUS_ID				(WSA881X_DIGITAL_BASE+0x0004)
+#define WSA881X_CDC_RST_CTL			(WSA881X_DIGITAL_BASE+0x0005)
+#define WSA881X_CDC_TOP_CLK_CTL			(WSA881X_DIGITAL_BASE+0x0006)
+#define WSA881X_CDC_ANA_CLK_CTL			(WSA881X_DIGITAL_BASE+0x0007)
+#define WSA881X_CDC_DIG_CLK_CTL			(WSA881X_DIGITAL_BASE+0x0008)
+#define WSA881X_CLOCK_CONFIG			(WSA881X_DIGITAL_BASE+0x0009)
+#define WSA881X_ANA_CTL				(WSA881X_DIGITAL_BASE+0x000A)
+#define WSA881X_SWR_RESET_EN			(WSA881X_DIGITAL_BASE+0x000B)
+#define WSA881X_RESET_CTL			(WSA881X_DIGITAL_BASE+0x000C)
+#define WSA881X_TADC_VALUE_CTL			(WSA881X_DIGITAL_BASE+0x000F)
+#define WSA881X_TEMP_DETECT_CTL			(WSA881X_DIGITAL_BASE+0x0010)
+#define WSA881X_TEMP_MSB			(WSA881X_DIGITAL_BASE+0x0011)
+#define WSA881X_TEMP_LSB			(WSA881X_DIGITAL_BASE+0x0012)
+#define WSA881X_TEMP_CONFIG0			(WSA881X_DIGITAL_BASE+0x0013)
+#define WSA881X_TEMP_CONFIG1			(WSA881X_DIGITAL_BASE+0x0014)
+#define WSA881X_CDC_CLIP_CTL			(WSA881X_DIGITAL_BASE+0x0015)
+#define WSA881X_SDM_PDM9_LSB			(WSA881X_DIGITAL_BASE+0x0016)
+#define WSA881X_SDM_PDM9_MSB			(WSA881X_DIGITAL_BASE+0x0017)
+#define WSA881X_CDC_RX_CTL			(WSA881X_DIGITAL_BASE+0x0018)
+#define WSA881X_DEM_BYPASS_DATA0		(WSA881X_DIGITAL_BASE+0x0019)
+#define WSA881X_DEM_BYPASS_DATA1		(WSA881X_DIGITAL_BASE+0x001A)
+#define WSA881X_DEM_BYPASS_DATA2		(WSA881X_DIGITAL_BASE+0x001B)
+#define WSA881X_DEM_BYPASS_DATA3		(WSA881X_DIGITAL_BASE+0x001C)
+#define WSA881X_OTP_CTRL0			(WSA881X_DIGITAL_BASE+0x001D)
+#define WSA881X_OTP_CTRL1			(WSA881X_DIGITAL_BASE+0x001E)
+#define WSA881X_HDRIVE_CTL_GROUP1		(WSA881X_DIGITAL_BASE+0x001F)
+#define WSA881X_INTR_MODE			(WSA881X_DIGITAL_BASE+0x0020)
+#define WSA881X_INTR_MASK			(WSA881X_DIGITAL_BASE+0x0021)
+#define WSA881X_INTR_STATUS			(WSA881X_DIGITAL_BASE+0x0022)
+#define WSA881X_INTR_CLEAR			(WSA881X_DIGITAL_BASE+0x0023)
+#define WSA881X_INTR_LEVEL			(WSA881X_DIGITAL_BASE+0x0024)
+#define WSA881X_INTR_SET			(WSA881X_DIGITAL_BASE+0x0025)
+#define WSA881X_INTR_TEST			(WSA881X_DIGITAL_BASE+0x0026)
+#define WSA881X_PDM_TEST_MODE			(WSA881X_DIGITAL_BASE+0x0030)
+#define WSA881X_ATE_TEST_MODE			(WSA881X_DIGITAL_BASE+0x0031)
+#define WSA881X_PIN_CTL_MODE			(WSA881X_DIGITAL_BASE+0x0032)
+#define WSA881X_PIN_CTL_OE			(WSA881X_DIGITAL_BASE+0x0033)
+#define WSA881X_PIN_WDATA_IOPAD			(WSA881X_DIGITAL_BASE+0x0034)
+#define WSA881X_PIN_STATUS			(WSA881X_DIGITAL_BASE+0x0035)
+#define WSA881X_DIG_DEBUG_MODE			(WSA881X_DIGITAL_BASE+0x0037)
+#define WSA881X_DIG_DEBUG_SEL			(WSA881X_DIGITAL_BASE+0x0038)
+#define WSA881X_DIG_DEBUG_EN			(WSA881X_DIGITAL_BASE+0x0039)
+#define WSA881X_SWR_HM_TEST1			(WSA881X_DIGITAL_BASE+0x003B)
+#define WSA881X_SWR_HM_TEST2			(WSA881X_DIGITAL_BASE+0x003C)
+#define WSA881X_TEMP_DETECT_DBG_CTL		(WSA881X_DIGITAL_BASE+0x003D)
+#define WSA881X_TEMP_DEBUG_MSB			(WSA881X_DIGITAL_BASE+0x003E)
+#define WSA881X_TEMP_DEBUG_LSB			(WSA881X_DIGITAL_BASE+0x003F)
+#define WSA881X_SAMPLE_EDGE_SEL			(WSA881X_DIGITAL_BASE+0x0044)
+#define WSA881X_IOPAD_CTL			(WSA881X_DIGITAL_BASE+0x0045)
+#define WSA881X_SPARE_0				(WSA881X_DIGITAL_BASE+0x0050)
+#define WSA881X_SPARE_1				(WSA881X_DIGITAL_BASE+0x0051)
+#define WSA881X_SPARE_2				(WSA881X_DIGITAL_BASE+0x0052)
+#define WSA881X_OTP_REG_0			(WSA881X_DIGITAL_BASE+0x0080)
+#define WSA881X_OTP_REG_1			(WSA881X_DIGITAL_BASE+0x0081)
+#define WSA881X_OTP_REG_2			(WSA881X_DIGITAL_BASE+0x0082)
+#define WSA881X_OTP_REG_3			(WSA881X_DIGITAL_BASE+0x0083)
+#define WSA881X_OTP_REG_4			(WSA881X_DIGITAL_BASE+0x0084)
+#define WSA881X_OTP_REG_5			(WSA881X_DIGITAL_BASE+0x0085)
+#define WSA881X_OTP_REG_6			(WSA881X_DIGITAL_BASE+0x0086)
+#define WSA881X_OTP_REG_7			(WSA881X_DIGITAL_BASE+0x0087)
+#define WSA881X_OTP_REG_8			(WSA881X_DIGITAL_BASE+0x0088)
+#define WSA881X_OTP_REG_9			(WSA881X_DIGITAL_BASE+0x0089)
+#define WSA881X_OTP_REG_10			(WSA881X_DIGITAL_BASE+0x008A)
+#define WSA881X_OTP_REG_11			(WSA881X_DIGITAL_BASE+0x008B)
+#define WSA881X_OTP_REG_12			(WSA881X_DIGITAL_BASE+0x008C)
+#define WSA881X_OTP_REG_13			(WSA881X_DIGITAL_BASE+0x008D)
+#define WSA881X_OTP_REG_14			(WSA881X_DIGITAL_BASE+0x008E)
+#define WSA881X_OTP_REG_15			(WSA881X_DIGITAL_BASE+0x008F)
+#define WSA881X_OTP_REG_16			(WSA881X_DIGITAL_BASE+0x0090)
+#define WSA881X_OTP_REG_17			(WSA881X_DIGITAL_BASE+0x0091)
+#define WSA881X_OTP_REG_18			(WSA881X_DIGITAL_BASE+0x0092)
+#define WSA881X_OTP_REG_19			(WSA881X_DIGITAL_BASE+0x0093)
+#define WSA881X_OTP_REG_20			(WSA881X_DIGITAL_BASE+0x0094)
+#define WSA881X_OTP_REG_21			(WSA881X_DIGITAL_BASE+0x0095)
+#define WSA881X_OTP_REG_22			(WSA881X_DIGITAL_BASE+0x0096)
+#define WSA881X_OTP_REG_23			(WSA881X_DIGITAL_BASE+0x0097)
+#define WSA881X_OTP_REG_24			(WSA881X_DIGITAL_BASE+0x0098)
+#define WSA881X_OTP_REG_25			(WSA881X_DIGITAL_BASE+0x0099)
+#define WSA881X_OTP_REG_26			(WSA881X_DIGITAL_BASE+0x009A)
+#define WSA881X_OTP_REG_27			(WSA881X_DIGITAL_BASE+0x009B)
+#define WSA881X_OTP_REG_28			(WSA881X_DIGITAL_BASE+0x009C)
+#define WSA881X_OTP_REG_29			(WSA881X_DIGITAL_BASE+0x009D)
+#define WSA881X_OTP_REG_30			(WSA881X_DIGITAL_BASE+0x009E)
+#define WSA881X_OTP_REG_31			(WSA881X_DIGITAL_BASE+0x009F)
+#define WSA881X_OTP_REG_63			(WSA881X_DIGITAL_BASE+0x00BF)
+
+/* Analog Register address space */
+#define WSA881X_BIAS_REF_CTRL			(WSA881X_ANALOG_BASE+0x0000)
+#define WSA881X_BIAS_TEST			(WSA881X_ANALOG_BASE+0x0001)
+#define WSA881X_BIAS_BIAS			(WSA881X_ANALOG_BASE+0x0002)
+#define WSA881X_TEMP_OP				(WSA881X_ANALOG_BASE+0x0003)
+#define WSA881X_TEMP_IREF_CTRL			(WSA881X_ANALOG_BASE+0x0004)
+#define WSA881X_TEMP_ISENS_CTRL			(WSA881X_ANALOG_BASE+0x0005)
+#define WSA881X_TEMP_CLK_CTRL			(WSA881X_ANALOG_BASE+0x0006)
+#define WSA881X_TEMP_TEST			(WSA881X_ANALOG_BASE+0x0007)
+#define WSA881X_TEMP_BIAS			(WSA881X_ANALOG_BASE+0x0008)
+#define WSA881X_TEMP_ADC_CTRL			(WSA881X_ANALOG_BASE+0x0009)
+#define WSA881X_TEMP_DOUT_MSB			(WSA881X_ANALOG_BASE+0x000A)
+#define WSA881X_TEMP_DOUT_LSB			(WSA881X_ANALOG_BASE+0x000B)
+#define WSA881X_ADC_EN_MODU_V			(WSA881X_ANALOG_BASE+0x0010)
+#define WSA881X_ADC_EN_MODU_I			(WSA881X_ANALOG_BASE+0x0011)
+#define WSA881X_ADC_EN_DET_TEST_V		(WSA881X_ANALOG_BASE+0x0012)
+#define WSA881X_ADC_EN_DET_TEST_I		(WSA881X_ANALOG_BASE+0x0013)
+#define WSA881X_ADC_SEL_IBIAS			(WSA881X_ANALOG_BASE+0x0014)
+#define WSA881X_ADC_EN_SEL_IBAIS		(WSA881X_ANALOG_BASE+0x0015)
+#define WSA881X_SPKR_DRV_EN			(WSA881X_ANALOG_BASE+0x001A)
+#define WSA881X_SPKR_DRV_GAIN			(WSA881X_ANALOG_BASE+0x001B)
+#define WSA881X_SPKR_DAC_CTL			(WSA881X_ANALOG_BASE+0x001C)
+#define WSA881X_SPKR_DRV_DBG			(WSA881X_ANALOG_BASE+0x001D)
+#define WSA881X_SPKR_PWRSTG_DBG			(WSA881X_ANALOG_BASE+0x001E)
+#define WSA881X_SPKR_OCP_CTL			(WSA881X_ANALOG_BASE+0x001F)
+#define WSA881X_SPKR_CLIP_CTL			(WSA881X_ANALOG_BASE+0x0020)
+#define WSA881X_SPKR_BBM_CTL			(WSA881X_ANALOG_BASE+0x0021)
+#define WSA881X_SPKR_MISC_CTL1			(WSA881X_ANALOG_BASE+0x0022)
+#define WSA881X_SPKR_MISC_CTL2			(WSA881X_ANALOG_BASE+0x0023)
+#define WSA881X_SPKR_BIAS_INT			(WSA881X_ANALOG_BASE+0x0024)
+#define WSA881X_SPKR_PA_INT			(WSA881X_ANALOG_BASE+0x0025)
+#define WSA881X_SPKR_BIAS_CAL			(WSA881X_ANALOG_BASE+0x0026)
+#define WSA881X_SPKR_BIAS_PSRR			(WSA881X_ANALOG_BASE+0x0027)
+#define WSA881X_SPKR_STATUS1			(WSA881X_ANALOG_BASE+0x0028)
+#define WSA881X_SPKR_STATUS2			(WSA881X_ANALOG_BASE+0x0029)
+#define WSA881X_BOOST_EN_CTL			(WSA881X_ANALOG_BASE+0x002A)
+#define WSA881X_BOOST_CURRENT_LIMIT		(WSA881X_ANALOG_BASE+0x002B)
+#define WSA881X_BOOST_PS_CTL			(WSA881X_ANALOG_BASE+0x002C)
+#define WSA881X_BOOST_PRESET_OUT1		(WSA881X_ANALOG_BASE+0x002D)
+#define WSA881X_BOOST_PRESET_OUT2		(WSA881X_ANALOG_BASE+0x002E)
+#define WSA881X_BOOST_FORCE_OUT			(WSA881X_ANALOG_BASE+0x002F)
+#define WSA881X_BOOST_LDO_PROG			(WSA881X_ANALOG_BASE+0x0030)
+#define WSA881X_BOOST_SLOPE_COMP_ISENSE_FB	(WSA881X_ANALOG_BASE+0x0031)
+#define WSA881X_BOOST_RON_CTL			(WSA881X_ANALOG_BASE+0x0032)
+#define WSA881X_BOOST_LOOP_STABILITY		(WSA881X_ANALOG_BASE+0x0033)
+#define WSA881X_BOOST_ZX_CTL			(WSA881X_ANALOG_BASE+0x0034)
+#define WSA881X_BOOST_START_CTL			(WSA881X_ANALOG_BASE+0x0035)
+#define WSA881X_BOOST_MISC1_CTL			(WSA881X_ANALOG_BASE+0x0036)
+#define WSA881X_BOOST_MISC2_CTL			(WSA881X_ANALOG_BASE+0x0037)
+#define WSA881X_BOOST_MISC3_CTL			(WSA881X_ANALOG_BASE+0x0038)
+#define WSA881X_BOOST_ATEST_CTL			(WSA881X_ANALOG_BASE+0x0039)
+#define WSA881X_SPKR_PROT_FE_GAIN		(WSA881X_ANALOG_BASE+0x003A)
+#define WSA881X_SPKR_PROT_FE_CM_LDO_SET		(WSA881X_ANALOG_BASE+0x003B)
+#define WSA881X_SPKR_PROT_FE_ISENSE_BIAS_SET1	(WSA881X_ANALOG_BASE+0x003C)
+#define WSA881X_SPKR_PROT_FE_ISENSE_BIAS_SET2	(WSA881X_ANALOG_BASE+0x003D)
+#define WSA881X_SPKR_PROT_ATEST1		(WSA881X_ANALOG_BASE+0x003E)
+#define WSA881X_SPKR_PROT_ATEST2		(WSA881X_ANALOG_BASE+0x003F)
+#define WSA881X_SPKR_PROT_FE_VSENSE_VCM		(WSA881X_ANALOG_BASE+0x0040)
+#define WSA881X_SPKR_PROT_FE_VSENSE_BIAS_SET1	(WSA881X_ANALOG_BASE+0x0041)
+#define WSA881X_BONGO_RESRV_REG1		(WSA881X_ANALOG_BASE+0x0042)
+#define WSA881X_BONGO_RESRV_REG2		(WSA881X_ANALOG_BASE+0x0043)
+#define WSA881X_SPKR_PROT_SAR			(WSA881X_ANALOG_BASE+0x0044)
+#define WSA881X_SPKR_STATUS3			(WSA881X_ANALOG_BASE+0x0045)
+
+#define WSA881X_NUM_REGISTERS			(WSA881X_SPKR_STATUS3+1)
+#define WSA881X_MAX_REGISTER			(WSA881X_NUM_REGISTERS-1)
+#define WSA881X_CACHE_SIZE			WSA881X_NUM_REGISTERS
+
+#endif /* WSA881X_REGISTERS_H */
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wsa881x-regmap.c	2019-01-22 16:16:29.595301573 +0100
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/regmap.h>
+#include <linux/device.h>
+#include "wsa881x-registers.h"
+#include "wsa881x.h"
+
+/*
+ * Default register reset values that are common across different versions
+ * are defined here. If a register reset value is changed based on version
+ * then remove it from this structure and add it in version specific
+ * structures.
+ */
+static struct reg_default wsa881x_defaults[] = {
+	{WSA881X_CHIP_ID0, 0x00},
+	{WSA881X_CHIP_ID1, 0x00},
+	{WSA881X_CHIP_ID2, 0x00},
+	{WSA881X_CHIP_ID3, 0x02},
+	{WSA881X_BUS_ID, 0x00},
+	{WSA881X_CDC_RST_CTL, 0x00},
+	{WSA881X_CDC_TOP_CLK_CTL, 0x03},
+	{WSA881X_CDC_ANA_CLK_CTL, 0x00},
+	{WSA881X_CDC_DIG_CLK_CTL, 0x00},
+	{WSA881X_CLOCK_CONFIG, 0x00},
+	{WSA881X_ANA_CTL, 0x08},
+	{WSA881X_SWR_RESET_EN, 0x00},
+	{WSA881X_TEMP_DETECT_CTL, 0x01},
+	{WSA881X_TEMP_MSB, 0x00},
+	{WSA881X_TEMP_LSB, 0x00},
+	{WSA881X_TEMP_CONFIG0, 0x00},
+	{WSA881X_TEMP_CONFIG1, 0x00},
+	{WSA881X_CDC_CLIP_CTL, 0x03},
+	{WSA881X_SDM_PDM9_LSB, 0x00},
+	{WSA881X_SDM_PDM9_MSB, 0x00},
+	{WSA881X_CDC_RX_CTL, 0x7E},
+	{WSA881X_DEM_BYPASS_DATA0, 0x00},
+	{WSA881X_DEM_BYPASS_DATA1, 0x00},
+	{WSA881X_DEM_BYPASS_DATA2, 0x00},
+	{WSA881X_DEM_BYPASS_DATA3, 0x00},
+	{WSA881X_OTP_CTRL0, 0x00},
+	{WSA881X_OTP_CTRL1, 0x00},
+	{WSA881X_HDRIVE_CTL_GROUP1, 0x00},
+	{WSA881X_INTR_MODE, 0x00},
+	{WSA881X_INTR_STATUS, 0x00},
+	{WSA881X_INTR_CLEAR, 0x00},
+	{WSA881X_INTR_LEVEL, 0x00},
+	{WSA881X_INTR_SET, 0x00},
+	{WSA881X_INTR_TEST, 0x00},
+	{WSA881X_PDM_TEST_MODE, 0x00},
+	{WSA881X_ATE_TEST_MODE, 0x00},
+	{WSA881X_PIN_CTL_MODE, 0x00},
+	{WSA881X_PIN_CTL_OE, 0x00},
+	{WSA881X_PIN_WDATA_IOPAD, 0x00},
+	{WSA881X_PIN_STATUS, 0x00},
+	{WSA881X_DIG_DEBUG_MODE, 0x00},
+	{WSA881X_DIG_DEBUG_SEL, 0x00},
+	{WSA881X_DIG_DEBUG_EN, 0x00},
+	{WSA881X_SWR_HM_TEST1, 0x08},
+	{WSA881X_SWR_HM_TEST2, 0x00},
+	{WSA881X_TEMP_DETECT_DBG_CTL, 0x00},
+	{WSA881X_TEMP_DEBUG_MSB, 0x00},
+	{WSA881X_TEMP_DEBUG_LSB, 0x00},
+	{WSA881X_SAMPLE_EDGE_SEL, 0x0C},
+	{WSA881X_SPARE_0, 0x00},
+	{WSA881X_SPARE_1, 0x00},
+	{WSA881X_SPARE_2, 0x00},
+	{WSA881X_OTP_REG_0, 0x01},
+	{WSA881X_OTP_REG_1, 0xFF},
+	{WSA881X_OTP_REG_2, 0xC0},
+	{WSA881X_OTP_REG_3, 0xFF},
+	{WSA881X_OTP_REG_4, 0xC0},
+	{WSA881X_OTP_REG_5, 0xFF},
+	{WSA881X_OTP_REG_6, 0xFF},
+	{WSA881X_OTP_REG_7, 0xFF},
+	{WSA881X_OTP_REG_8, 0xFF},
+	{WSA881X_OTP_REG_9, 0xFF},
+	{WSA881X_OTP_REG_10, 0xFF},
+	{WSA881X_OTP_REG_11, 0xFF},
+	{WSA881X_OTP_REG_12, 0xFF},
+	{WSA881X_OTP_REG_13, 0xFF},
+	{WSA881X_OTP_REG_14, 0xFF},
+	{WSA881X_OTP_REG_15, 0xFF},
+	{WSA881X_OTP_REG_16, 0xFF},
+	{WSA881X_OTP_REG_17, 0xFF},
+	{WSA881X_OTP_REG_18, 0xFF},
+	{WSA881X_OTP_REG_19, 0xFF},
+	{WSA881X_OTP_REG_20, 0xFF},
+	{WSA881X_OTP_REG_21, 0xFF},
+	{WSA881X_OTP_REG_22, 0xFF},
+	{WSA881X_OTP_REG_23, 0xFF},
+	{WSA881X_OTP_REG_24, 0x03},
+	{WSA881X_OTP_REG_25, 0x01},
+	{WSA881X_OTP_REG_26, 0x03},
+	{WSA881X_OTP_REG_27, 0x11},
+	{WSA881X_OTP_REG_63, 0x40},
+	/* WSA881x Analog registers */
+	{WSA881X_BIAS_REF_CTRL, 0x6C},
+	{WSA881X_BIAS_TEST, 0x16},
+	{WSA881X_BIAS_BIAS, 0xF0},
+	{WSA881X_TEMP_OP, 0x00},
+	{WSA881X_TEMP_IREF_CTRL, 0x56},
+	{WSA881X_TEMP_ISENS_CTRL, 0x47},
+	{WSA881X_TEMP_CLK_CTRL, 0x87},
+	{WSA881X_TEMP_TEST, 0x00},
+	{WSA881X_TEMP_BIAS, 0x51},
+	{WSA881X_TEMP_DOUT_MSB, 0x00},
+	{WSA881X_TEMP_DOUT_LSB, 0x00},
+	{WSA881X_ADC_EN_MODU_V, 0x00},
+	{WSA881X_ADC_EN_MODU_I, 0x00},
+	{WSA881X_ADC_EN_DET_TEST_V, 0x00},
+	{WSA881X_ADC_EN_DET_TEST_I, 0x00},
+	{WSA881X_ADC_EN_SEL_IBAIS, 0x10},
+	{WSA881X_SPKR_DRV_EN, 0x74},
+	{WSA881X_SPKR_DRV_DBG, 0x15},
+	{WSA881X_SPKR_PWRSTG_DBG, 0x00},
+	{WSA881X_SPKR_OCP_CTL, 0xD4},
+	{WSA881X_SPKR_CLIP_CTL, 0x90},
+	{WSA881X_SPKR_PA_INT, 0x54},
+	{WSA881X_SPKR_BIAS_CAL, 0xAC},
+	{WSA881X_SPKR_STATUS1, 0x00},
+	{WSA881X_SPKR_STATUS2, 0x00},
+	{WSA881X_BOOST_EN_CTL, 0x18},
+	{WSA881X_BOOST_CURRENT_LIMIT, 0x7A},
+	{WSA881X_BOOST_PRESET_OUT2, 0x70},
+	{WSA881X_BOOST_FORCE_OUT, 0x0E},
+	{WSA881X_BOOST_LDO_PROG, 0x16},
+	{WSA881X_BOOST_SLOPE_COMP_ISENSE_FB, 0x71},
+	{WSA881X_BOOST_RON_CTL, 0x0F},
+	{WSA881X_BOOST_ZX_CTL, 0x34},
+	{WSA881X_BOOST_START_CTL, 0x23},
+	{WSA881X_BOOST_MISC1_CTL, 0x80},
+	{WSA881X_BOOST_MISC2_CTL, 0x00},
+	{WSA881X_BOOST_MISC3_CTL, 0x00},
+	{WSA881X_BOOST_ATEST_CTL, 0x00},
+	{WSA881X_SPKR_PROT_FE_GAIN, 0x46},
+	{WSA881X_SPKR_PROT_FE_CM_LDO_SET, 0x3B},
+	{WSA881X_SPKR_PROT_FE_ISENSE_BIAS_SET1, 0x8D},
+	{WSA881X_SPKR_PROT_FE_ISENSE_BIAS_SET2, 0x8D},
+	{WSA881X_SPKR_PROT_ATEST1, 0x01},
+	{WSA881X_SPKR_PROT_FE_VSENSE_VCM, 0x8D},
+	{WSA881X_SPKR_PROT_FE_VSENSE_BIAS_SET1, 0x4D},
+	{WSA881X_SPKR_PROT_SAR, 0x00},
+	{WSA881X_SPKR_STATUS3, 0x00},
+};
+
+/* Default register reset values for WSA881x rev 2.0 */
+static struct reg_sequence wsa881x_rev_2_0[] = {
+	{WSA881X_RESET_CTL, 0x00, 0x00},
+	{WSA881X_TADC_VALUE_CTL, 0x01, 0x00},
+	{WSA881X_INTR_MASK, 0x1B, 0x00},
+	{WSA881X_IOPAD_CTL, 0x00, 0x00},
+	{WSA881X_OTP_REG_28, 0x3F, 0x00},
+	{WSA881X_OTP_REG_29, 0x3F, 0x00},
+	{WSA881X_OTP_REG_30, 0x01, 0x00},
+	{WSA881X_OTP_REG_31, 0x01, 0x00},
+	{WSA881X_TEMP_ADC_CTRL, 0x03, 0x00},
+	{WSA881X_ADC_SEL_IBIAS, 0x45, 0x00},
+	{WSA881X_SPKR_DRV_GAIN, 0xC1, 0x00},
+	{WSA881X_SPKR_DAC_CTL, 0x42, 0x00},
+	{WSA881X_SPKR_BBM_CTL, 0x02, 0x00},
+	{WSA881X_SPKR_MISC_CTL1, 0x40, 0x00},
+	{WSA881X_SPKR_MISC_CTL2, 0x07, 0x00},
+	{WSA881X_SPKR_BIAS_INT, 0x5F, 0x00},
+	{WSA881X_SPKR_BIAS_PSRR, 0x44, 0x00},
+	{WSA881X_BOOST_PS_CTL, 0xA0, 0x00},
+	{WSA881X_BOOST_PRESET_OUT1, 0xB7, 0x00},
+	{WSA881X_BOOST_LOOP_STABILITY, 0x8D, 0x00},
+	{WSA881X_SPKR_PROT_ATEST2, 0x02, 0x00},
+	{WSA881X_BONGO_RESRV_REG1, 0x5E, 0x00},
+	{WSA881X_BONGO_RESRV_REG2, 0x07, 0x00},
+};
+
+/*
+ * wsa881x_regmap_defaults - update regmap default register values
+ * @regmap: pointer to regmap structure
+ * @version: wsa881x version id
+ *
+ * Update regmap default register values based on version id
+ *
+ */
+void wsa881x_regmap_defaults(struct regmap *regmap, u8 version)
+{
+	u16 ret = 0;
+
+	if (!regmap) {
+		pr_debug("%s: regmap structure is NULL\n", __func__);
+		return;
+	}
+
+	regcache_cache_only(regmap, true);
+	ret = regmap_multi_reg_write(regmap, wsa881x_rev_2_0,
+				     ARRAY_SIZE(wsa881x_rev_2_0));
+	regcache_cache_only(regmap, false);
+
+	if (ret)
+		pr_debug("%s: Failed to update regmap defaults ret= %d\n",
+			 __func__, ret);
+}
+EXPORT_SYMBOL(wsa881x_regmap_defaults);
+
+static bool wsa881x_readable_register(struct device *dev, unsigned int reg)
+{
+	return wsa881x_reg_readable[reg];
+}
+
+static bool wsa881x_volatile_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case WSA881X_CHIP_ID0:
+	case WSA881X_CHIP_ID1:
+	case WSA881X_CHIP_ID2:
+	case WSA881X_CHIP_ID3:
+	case WSA881X_BUS_ID:
+	case WSA881X_TEMP_MSB:
+	case WSA881X_TEMP_LSB:
+	case WSA881X_SDM_PDM9_LSB:
+	case WSA881X_SDM_PDM9_MSB:
+	case WSA881X_OTP_CTRL1:
+	case WSA881X_INTR_STATUS:
+	case WSA881X_ATE_TEST_MODE:
+	case WSA881X_PIN_STATUS:
+	case WSA881X_SWR_HM_TEST2:
+	case WSA881X_SPKR_STATUS1:
+	case WSA881X_SPKR_STATUS2:
+	case WSA881X_SPKR_STATUS3:
+	case WSA881X_OTP_REG_0:
+	case WSA881X_OTP_REG_1:
+	case WSA881X_OTP_REG_2:
+	case WSA881X_OTP_REG_3:
+	case WSA881X_OTP_REG_4:
+	case WSA881X_OTP_REG_5:
+	case WSA881X_OTP_REG_31:
+	case WSA881X_TEMP_DOUT_MSB:
+	case WSA881X_TEMP_DOUT_LSB:
+	case WSA881X_TEMP_OP:
+	case WSA881X_SPKR_PROT_SAR:
+		return true;
+	default:
+		return false;
+	}
+}
+
+struct regmap_config wsa881x_regmap_config = {
+	.reg_bits = 16,
+	.val_bits = 8,
+	.cache_type = REGCACHE_RBTREE,
+	.reg_defaults = wsa881x_defaults,
+	.num_reg_defaults = ARRAY_SIZE(wsa881x_defaults),
+	.max_register = WSA881X_MAX_REGISTER,
+	.volatile_reg = wsa881x_volatile_register,
+	.readable_reg = wsa881x_readable_register,
+	.reg_format_endian = REGMAP_ENDIAN_NATIVE,
+	.val_format_endian = REGMAP_ENDIAN_NATIVE,
+	.can_multi_write = true,
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wsa881x-tables.c	2019-01-22 16:16:29.595301573 +0100
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/regmap.h>
+#include <linux/device.h>
+#include "wsa881x-registers.h"
+
+const u8 wsa881x_reg_readable[WSA881X_CACHE_SIZE] = {
+	[WSA881X_CHIP_ID0] = 1,
+	[WSA881X_CHIP_ID1] = 1,
+	[WSA881X_CHIP_ID2] = 1,
+	[WSA881X_CHIP_ID3] = 1,
+	[WSA881X_BUS_ID] = 1,
+	[WSA881X_CDC_RST_CTL] = 1,
+	[WSA881X_CDC_TOP_CLK_CTL] = 1,
+	[WSA881X_CDC_ANA_CLK_CTL] = 1,
+	[WSA881X_CDC_DIG_CLK_CTL] = 1,
+	[WSA881X_CLOCK_CONFIG] = 1,
+	[WSA881X_ANA_CTL] = 1,
+	[WSA881X_SWR_RESET_EN] = 1,
+	[WSA881X_RESET_CTL] = 1,
+	[WSA881X_TADC_VALUE_CTL] = 1,
+	[WSA881X_TEMP_DETECT_CTL] = 1,
+	[WSA881X_TEMP_MSB] = 1,
+	[WSA881X_TEMP_LSB] = 1,
+	[WSA881X_TEMP_CONFIG0] = 1,
+	[WSA881X_TEMP_CONFIG1] = 1,
+	[WSA881X_CDC_CLIP_CTL] = 1,
+	[WSA881X_SDM_PDM9_LSB] = 1,
+	[WSA881X_SDM_PDM9_MSB] = 1,
+	[WSA881X_CDC_RX_CTL] = 1,
+	[WSA881X_DEM_BYPASS_DATA0] = 1,
+	[WSA881X_DEM_BYPASS_DATA1] = 1,
+	[WSA881X_DEM_BYPASS_DATA2] = 1,
+	[WSA881X_DEM_BYPASS_DATA3] = 1,
+	[WSA881X_OTP_CTRL0] = 1,
+	[WSA881X_OTP_CTRL1] = 1,
+	[WSA881X_HDRIVE_CTL_GROUP1] = 1,
+	[WSA881X_INTR_MODE] = 1,
+	[WSA881X_INTR_MASK] = 1,
+	[WSA881X_INTR_STATUS] = 1,
+	[WSA881X_INTR_CLEAR] = 1,
+	[WSA881X_INTR_LEVEL] = 1,
+	[WSA881X_INTR_SET] = 1,
+	[WSA881X_INTR_TEST] = 1,
+	[WSA881X_PDM_TEST_MODE] = 1,
+	[WSA881X_ATE_TEST_MODE] = 1,
+	[WSA881X_PIN_CTL_MODE] = 1,
+	[WSA881X_PIN_CTL_OE] = 1,
+	[WSA881X_PIN_WDATA_IOPAD] = 1,
+	[WSA881X_PIN_STATUS] = 1,
+	[WSA881X_DIG_DEBUG_MODE] = 1,
+	[WSA881X_DIG_DEBUG_SEL] = 1,
+	[WSA881X_DIG_DEBUG_EN] = 1,
+	[WSA881X_SWR_HM_TEST1] = 1,
+	[WSA881X_SWR_HM_TEST2] = 1,
+	[WSA881X_TEMP_DETECT_DBG_CTL] = 1,
+	[WSA881X_TEMP_DEBUG_MSB] = 1,
+	[WSA881X_TEMP_DEBUG_LSB] = 1,
+	[WSA881X_SAMPLE_EDGE_SEL] = 1,
+	[WSA881X_IOPAD_CTL] = 1,
+	[WSA881X_SPARE_0] = 1,
+	[WSA881X_SPARE_1] = 1,
+	[WSA881X_SPARE_2] = 1,
+	[WSA881X_OTP_REG_0] = 1,
+	[WSA881X_OTP_REG_1] = 1,
+	[WSA881X_OTP_REG_2] = 1,
+	[WSA881X_OTP_REG_3] = 1,
+	[WSA881X_OTP_REG_4] = 1,
+	[WSA881X_OTP_REG_5] = 1,
+	[WSA881X_OTP_REG_6] = 1,
+	[WSA881X_OTP_REG_7] = 1,
+	[WSA881X_OTP_REG_8] = 1,
+	[WSA881X_OTP_REG_9] = 1,
+	[WSA881X_OTP_REG_10] = 1,
+	[WSA881X_OTP_REG_11] = 1,
+	[WSA881X_OTP_REG_12] = 1,
+	[WSA881X_OTP_REG_13] = 1,
+	[WSA881X_OTP_REG_14] = 1,
+	[WSA881X_OTP_REG_15] = 1,
+	[WSA881X_OTP_REG_16] = 1,
+	[WSA881X_OTP_REG_17] = 1,
+	[WSA881X_OTP_REG_18] = 1,
+	[WSA881X_OTP_REG_19] = 1,
+	[WSA881X_OTP_REG_20] = 1,
+	[WSA881X_OTP_REG_21] = 1,
+	[WSA881X_OTP_REG_22] = 1,
+	[WSA881X_OTP_REG_23] = 1,
+	[WSA881X_OTP_REG_24] = 1,
+	[WSA881X_OTP_REG_25] = 1,
+	[WSA881X_OTP_REG_26] = 1,
+	[WSA881X_OTP_REG_27] = 1,
+	[WSA881X_OTP_REG_28] = 1,
+	[WSA881X_OTP_REG_29] = 1,
+	[WSA881X_OTP_REG_30] = 1,
+	[WSA881X_OTP_REG_31] = 1,
+	[WSA881X_OTP_REG_63] = 1,
+	/* Analog Registers */
+	[WSA881X_BIAS_REF_CTRL] = 1,
+	[WSA881X_BIAS_TEST] = 1,
+	[WSA881X_BIAS_BIAS] = 1,
+	[WSA881X_TEMP_OP] = 1,
+	[WSA881X_TEMP_IREF_CTRL] = 1,
+	[WSA881X_TEMP_ISENS_CTRL] = 1,
+	[WSA881X_TEMP_CLK_CTRL] = 1,
+	[WSA881X_TEMP_TEST] = 1,
+	[WSA881X_TEMP_BIAS] = 1,
+	[WSA881X_TEMP_ADC_CTRL] = 1,
+	[WSA881X_TEMP_DOUT_MSB] = 1,
+	[WSA881X_TEMP_DOUT_LSB] = 1,
+	[WSA881X_ADC_EN_MODU_V] = 1,
+	[WSA881X_ADC_EN_MODU_I] = 1,
+	[WSA881X_ADC_EN_DET_TEST_V] = 1,
+	[WSA881X_ADC_EN_DET_TEST_I] = 1,
+	[WSA881X_ADC_SEL_IBIAS] = 1,
+	[WSA881X_ADC_EN_SEL_IBAIS] = 1,
+	[WSA881X_SPKR_DRV_EN] = 1,
+	[WSA881X_SPKR_DRV_GAIN] = 1,
+	[WSA881X_SPKR_DAC_CTL] = 1,
+	[WSA881X_SPKR_DRV_DBG] = 1,
+	[WSA881X_SPKR_PWRSTG_DBG] = 1,
+	[WSA881X_SPKR_OCP_CTL] = 1,
+	[WSA881X_SPKR_CLIP_CTL] = 1,
+	[WSA881X_SPKR_BBM_CTL] = 1,
+	[WSA881X_SPKR_MISC_CTL1] = 1,
+	[WSA881X_SPKR_MISC_CTL2] = 1,
+	[WSA881X_SPKR_BIAS_INT] = 1,
+	[WSA881X_SPKR_PA_INT] = 1,
+	[WSA881X_SPKR_BIAS_CAL] = 1,
+	[WSA881X_SPKR_BIAS_PSRR] = 1,
+	[WSA881X_SPKR_STATUS1] = 1,
+	[WSA881X_SPKR_STATUS2] = 1,
+	[WSA881X_BOOST_EN_CTL] = 1,
+	[WSA881X_BOOST_CURRENT_LIMIT] = 1,
+	[WSA881X_BOOST_PS_CTL] = 1,
+	[WSA881X_BOOST_PRESET_OUT1] = 1,
+	[WSA881X_BOOST_PRESET_OUT2] = 1,
+	[WSA881X_BOOST_FORCE_OUT] = 1,
+	[WSA881X_BOOST_LDO_PROG] = 1,
+	[WSA881X_BOOST_SLOPE_COMP_ISENSE_FB] = 1,
+	[WSA881X_BOOST_RON_CTL] = 1,
+	[WSA881X_BOOST_LOOP_STABILITY] = 1,
+	[WSA881X_BOOST_ZX_CTL] = 1,
+	[WSA881X_BOOST_START_CTL] = 1,
+	[WSA881X_BOOST_MISC1_CTL] = 1,
+	[WSA881X_BOOST_MISC2_CTL] = 1,
+	[WSA881X_BOOST_MISC3_CTL] = 1,
+	[WSA881X_BOOST_ATEST_CTL] = 1,
+	[WSA881X_SPKR_PROT_FE_GAIN] = 1,
+	[WSA881X_SPKR_PROT_FE_CM_LDO_SET] = 1,
+	[WSA881X_SPKR_PROT_FE_ISENSE_BIAS_SET1] = 1,
+	[WSA881X_SPKR_PROT_FE_ISENSE_BIAS_SET2] = 1,
+	[WSA881X_SPKR_PROT_ATEST1] = 1,
+	[WSA881X_SPKR_PROT_ATEST2] = 1,
+	[WSA881X_SPKR_PROT_FE_VSENSE_VCM] = 1,
+	[WSA881X_SPKR_PROT_FE_VSENSE_BIAS_SET1] = 1,
+	[WSA881X_BONGO_RESRV_REG1] = 1,
+	[WSA881X_BONGO_RESRV_REG2] = 1,
+	[WSA881X_SPKR_PROT_SAR] = 1,
+	[WSA881X_SPKR_STATUS3] = 1,
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wsa881x-temp-sensor.c	2019-01-22 16:16:29.595301573 +0100
@@ -0,0 +1,149 @@
+/* Copyright (c) 2015, 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/thermal.h>
+#include <sound/soc.h>
+#include "wsa881x-temp-sensor.h"
+
+#define T1_TEMP -10
+#define T2_TEMP 150
+#define LOW_TEMP_THRESHOLD 5
+#define HIGH_TEMP_THRESHOLD 45
+#define TEMP_INVALID	0xFFFF
+#define WSA881X_TEMP_RETRY 3
+/*
+ * wsa881x_get_temp - get wsa temperature
+ * @thermal: thermal zone device
+ * @temp: temperature value
+ *
+ * Get the temperature of wsa881x.
+ *
+ * Return: 0 on success or negative error code on failure.
+ */
+int wsa881x_get_temp(struct thermal_zone_device *thermal,
+		     int *temp)
+{
+	struct wsa881x_tz_priv *pdata;
+	struct snd_soc_codec *codec;
+	struct wsa_temp_register reg;
+	int dmeas, d1, d2;
+	int ret = 0;
+	int temp_val;
+	int t1 = T1_TEMP;
+	int t2 = T2_TEMP;
+	u8 retry = WSA881X_TEMP_RETRY;
+
+	if (!thermal)
+		return -EINVAL;
+
+	if (thermal->devdata) {
+		pdata = thermal->devdata;
+		if (pdata->codec) {
+			codec = pdata->codec;
+		} else {
+			pr_err("%s: codec is NULL\n", __func__);
+			return -EINVAL;
+		}
+	} else {
+		pr_err("%s: pdata is NULL\n", __func__);
+		return -EINVAL;
+	}
+temp_retry:
+	if (pdata->wsa_temp_reg_read) {
+		ret = pdata->wsa_temp_reg_read(codec, &reg);
+		if (ret) {
+			pr_err("%s: temperature register read failed: %d\n",
+				__func__, ret);
+			return ret;
+		}
+	} else {
+		pr_err("%s: wsa_temp_reg_read is NULL\n", __func__);
+		return -EINVAL;
+	}
+	/*
+	 * Temperature register values are expected to be in the
+	 * following range.
+	 * d1_msb  = 68 - 92 and d1_lsb  = 0, 64, 128, 192
+	 * d2_msb  = 185 -218 and  d2_lsb  = 0, 64, 128, 192
+	 */
+	if ((reg.d1_msb < 68 || reg.d1_msb > 92) ||
+	    (!(reg.d1_lsb == 0 || reg.d1_lsb == 64 || reg.d1_lsb == 128 ||
+		reg.d1_lsb == 192)) ||
+	    (reg.d2_msb < 185 || reg.d2_msb > 218) ||
+	    (!(reg.d2_lsb == 0 || reg.d2_lsb == 64 || reg.d2_lsb == 128 ||
+		reg.d2_lsb == 192))) {
+		printk_ratelimited("%s: Temperature registers[%d %d %d %d] are out of range\n",
+				   __func__, reg.d1_msb, reg.d1_lsb, reg.d2_msb,
+				   reg.d2_lsb);
+	}
+	dmeas = ((reg.dmeas_msb << 0x8) | reg.dmeas_lsb) >> 0x6;
+	d1 = ((reg.d1_msb << 0x8) | reg.d1_lsb) >> 0x6;
+	d2 = ((reg.d2_msb << 0x8) | reg.d2_lsb) >> 0x6;
+
+	if (d1 == d2)
+		temp_val = TEMP_INVALID;
+	else
+		temp_val = t1 + (((dmeas - d1) * (t2 - t1))/(d2 - d1));
+
+	if (temp_val <= LOW_TEMP_THRESHOLD ||
+		temp_val >= HIGH_TEMP_THRESHOLD) {
+		printk_ratelimited("%s: T0: %d is out of range[%d, %d]\n",
+				   __func__, temp_val, LOW_TEMP_THRESHOLD,
+				   HIGH_TEMP_THRESHOLD);
+		if (retry--) {
+			msleep(20);
+			goto temp_retry;
+		}
+	}
+	if (temp)
+		*temp = temp_val;
+	pr_debug("%s: t0 measured: %d dmeas = %d, d1 = %d, d2 = %d\n",
+		  __func__, temp_val, dmeas, d1, d2);
+	return ret;
+}
+EXPORT_SYMBOL(wsa881x_get_temp);
+
+static struct thermal_zone_device_ops wsa881x_thermal_ops = {
+	.get_temp = wsa881x_get_temp,
+};
+
+int wsa881x_init_thermal(struct wsa881x_tz_priv *tz_pdata)
+{
+	struct thermal_zone_device *tz_dev;
+
+	if (tz_pdata == NULL) {
+		pr_err("%s: thermal pdata is NULL\n", __func__);
+		return -EINVAL;
+	}
+	/* Register with the thermal zone */
+	tz_dev = thermal_zone_device_register(tz_pdata->name,
+				0, 0, tz_pdata,
+				&wsa881x_thermal_ops, NULL, 0, 0);
+	if (IS_ERR(tz_dev)) {
+		pr_err("%s: thermal device register failed.\n", __func__);
+		return -EINVAL;
+	}
+	tz_pdata->tz_dev = tz_dev;
+	return 0;
+}
+EXPORT_SYMBOL(wsa881x_init_thermal);
+
+void wsa881x_deinit_thermal(struct thermal_zone_device *tz_dev)
+{
+	if (tz_dev)
+		thermal_zone_device_unregister(tz_dev);
+}
+EXPORT_SYMBOL(wsa881x_deinit_thermal);
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/soc/codecs/wsa881x-temp-sensor.h	2019-01-22 16:16:29.595301573 +0100
@@ -0,0 +1,39 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef WSA881X_TEMP_SENSOR_H
+#define WSA881X_TEMP_SENSOR_H
+
+#include <linux/thermal.h>
+#include <sound/soc.h>
+
+struct wsa_temp_register {
+	u8 d1_msb;
+	u8 d1_lsb;
+	u8 d2_msb;
+	u8 d2_lsb;
+	u8 dmeas_msb;
+	u8 dmeas_lsb;
+};
+typedef int32_t (*wsa_temp_register_read)(struct snd_soc_codec *codec,
+					struct wsa_temp_register *wsa_temp_reg);
+struct wsa881x_tz_priv {
+	struct thermal_zone_device *tz_dev;
+	struct snd_soc_codec *codec;
+	struct wsa_temp_register *wsa_temp_reg;
+	char name[80];
+	wsa_temp_register_read wsa_temp_reg_read;
+};
+
+int wsa881x_get_temp(struct thermal_zone_device *tz_dev, int *temp);
+int wsa881x_init_thermal(struct wsa881x_tz_priv *tz_pdata);
+void wsa881x_deinit_thermal(struct thermal_zone_device *tz_dev);
+#endif
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./audio_test_mod.c linux-4.4.115-fbx/sound/soc/msm/audio_test_mod.c
--- linux-4.4.115-fbx/sound/soc/msm./audio_test_mod.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/audio_test_mod.c	2019-10-29 09:26:26.141227624 +0100
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+
+static int audio_test_mod_probe(struct platform_device *pdev)
+{
+	pr_debug("%s: dev name %s\n", __func__, dev_name(&pdev->dev));
+	return 0;
+}
+
+static int audio_test_mod_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static const struct of_device_id audio_test_mod_dt_match[] = {
+	{.compatible = "qcom,audio-test-mod"},
+	{}
+};
+
+static struct platform_driver audio_test_mod_driver = {
+	.driver = {
+		.name = "audio-test-mod",
+		.owner = THIS_MODULE,
+		.of_match_table = audio_test_mod_dt_match,
+	},
+	.probe = audio_test_mod_probe,
+	.remove = audio_test_mod_remove,
+};
+
+static int __init audio_test_mod_init(void)
+{
+	platform_driver_register(&audio_test_mod_driver);
+	return 0;
+}
+
+static void __exit audio_test_mod_exit(void)
+{
+	platform_driver_unregister(&audio_test_mod_driver);
+}
+
+module_init(audio_test_mod_init);
+module_exit(audio_test_mod_exit);
+
+MODULE_DESCRIPTION("Audio test module driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./device_event.h linux-4.4.115-fbx/sound/soc/msm/device_event.h
--- linux-4.4.115-fbx/sound/soc/msm./device_event.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/device_event.h	2019-01-22 16:16:29.619301790 +0100
@@ -0,0 +1,20 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DEVICE_EVENT_H
+#define __DEVICE_EVENT_H
+
+#define QC_AUDIO_EXTERNAL_SPK_1_EVENT "qc_ext_spk_1"
+#define QC_AUDIO_EXTERNAL_SPK_2_EVENT "qc_ext_spk_2"
+#define QC_AUDIO_EXTERNAL_MIC_EVENT "qc_ext_mic"
+
+#endif /* __DEVICE_EVENT_H */
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./Kconfig linux-4.4.115-fbx/sound/soc/msm/Kconfig
--- linux-4.4.115-fbx/sound/soc/msm./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/Kconfig	2019-10-29 09:26:26.141227624 +0100
@@ -0,0 +1,238 @@
+menu "MSM SoC Audio support"
+
+config SND_SOC_MSM_HOSTLESS_PCM
+	tristate
+
+config SND_SOC_MSM_QDSP6V2_INTF
+	bool "SoC Q6 audio driver for MSM/APQ"
+	depends on MSM_QDSP6_APRV2_GLINK
+	help
+	 To add support for SoC audio on MSM/APQ.
+	 This will enable all the platform specific
+	 interactions towards DSP. It includes asm,
+	 adm and afe interfaces on the DSP.
+
+config SND_SOC_QDSP6V2
+	tristate "SoC ALSA audio driver for QDSP6V2"
+	select SND_SOC_MSM_QDSP6V2_INTF
+	select SND_SOC_COMPRESS
+	help
+	 To add support for MSM QDSP6V2 Soc Audio.
+	 This will enable sound soc platform specific
+	 audio drivers. This includes q6asm, q6adm,
+	 q6afe interfaces to DSP using apr.
+
+config SND_SOC_QDSP_DEBUG
+	bool "QDSP Audio Driver Debug Feature"
+	help
+	 Configuration to enable debugging utilities for
+	 QDSP6 based audio drivers. One debugging utility
+	 is inducing kernel panic upon encountering critical
+	 errors from DSP audio modules
+
+config DOLBY_DS2
+	bool "Enable Dolby DS2"
+	depends on SND_SOC_MSM_QDSP6V2_INTF
+	help
+	 To add support for dolby DAP post processing.
+	 This support is to configure the post processing parameters
+	 to DSP. The configuration includes sending the end point
+	 device, end point dependent post processing parameters and
+	 the various posrt processing parameters
+
+config DOLBY_LICENSE
+	bool "Enable Dolby LICENSE"
+	depends on SND_SOC_MSM_QDSP6V2_INTF
+	help
+	 To add support for dolby DAP post processing,
+	 and retain DAP set license functionality only.
+	 This is required by Dolby GEF implementation which needs
+	 nothing but dolby license validation functionality in driver.
+
+config DTS_EAGLE
+	bool "Enable DTS Eagle Support"
+	depends on SND_SOC_MSM_QDSP6V2_INTF
+	select SND_HWDEP
+	help
+	 To add DTS Eagle support on QDSP6 targets.
+	 Eagle is a DTS pre/post processing
+	 package that includes HeadphoneX. The configuration
+	 includes sending tuning parameters of various modules.
+
+config DTS_SRS_TM
+	bool "Enable DTS SRS"
+	depends on SND_SOC_MSM_QDSP6V2_INTF
+	help
+	 To add support for DTS SRS post processing.
+	 This support is to configure the post processing
+	 parameters to DSP. The configuration includes sending
+	 tuning parameters of various modules.
+
+config QTI_PP
+	bool "Enable QTI PP"
+	depends on SND_SOC_MSM_QDSP6V2_INTF
+	help
+	 To add support for default QTI post processing.
+	 This support is to configure the post processing
+	 parameters to DSP. The configuration includes sending
+	 tuning parameters of various modules such as equalizer,
+	 customized mixing.
+
+config QTI_PP_AUDIOSPHERE
+	bool "Enable QTI AUDIOSPHERE PP"
+	depends on SND_SOC_MSM_QDSP6V2_INTF
+	help
+	 To add support for QTI audio sphere post processing.
+	 This support is to configure the post processing
+	 parameters to DSP. The configuration includes sending
+	 tuning parameters of audio sphere module.
+
+config SND_SOC_CPE
+	tristate "CPE drivers"
+	depends on SND_SOC_WCD_CPE
+	help
+	 To add support for Codec Processing Engine. This support
+	 is to enable CPE block on the codec and this config needs
+	 to be added to codecs that contain the CPE hardware block.
+	 The configuration includes the cpe lsm driver to enable
+	 listen on codec.
+
+config SND_SOC_INT_CODEC
+	tristate "SoC Machine driver for SDM660_INT"
+	depends on ARCH_QCOM
+	select SND_SOC_QDSP6V2
+	select SND_SOC_MSM_STUB
+	select SND_SOC_MSM_HOSTLESS_PCM
+	select SND_DYNAMIC_MINORS
+	select MSM_QDSP6_APRV2_GLINK
+	select MSM_QDSP6_SSR
+	select MSM_QDSP6_PDR
+	select MSM_QDSP6_NOTIFIER
+	select MSM_QDSP6V2_CODECS
+	select MSM_CDC_PINCTRL
+	select SND_SOC_MSM_SDW
+	select SND_SOC_SDM660_CDC
+	select SND_SOC_MSM_HDMI_CODEC_RX
+	select QTI_PP
+	select DTS_SRS_TM
+	select DOLBY_LICENSE
+	select SND_HWDEP
+	select MSM_ULTRASOUND
+	select DTS_EAGLE
+	select SND_SOC_SDM660_COMMON
+	select SND_SOC_COMPRESS
+	select PINCTRL_LPI
+	help
+	To add support for SoC audio on MSM_INT.
+	This will enable sound soc drivers which
+	interfaces with DSP, also it will enable
+	the machine driver and the corresponding
+	DAI-links
+
+config SND_SOC_EXT_CODEC
+	tristate "SoC Machine driver for SDM660_EXT"
+	depends on ARCH_QCOM
+	select SND_SOC_QDSP6V2
+	select SND_SOC_MSM_STUB
+	select SND_SOC_MSM_HOSTLESS_PCM
+	select SND_DYNAMIC_MINORS
+	select MSM_QDSP6_APRV2_GLINK
+	select MSM_QDSP6_SSR
+	select MSM_QDSP6_PDR
+	select MSM_QDSP6_NOTIFIER
+	select MSM_QDSP6V2_CODECS
+	select SND_SOC_WCD9335
+	select SND_SOC_WCD934X
+	select SND_SOC_WSA881X
+	select SND_SOC_MSM_HDMI_CODEC_RX
+	select MFD_CORE
+	select QTI_PP
+	select DTS_SRS_TM
+	select DOLBY_LICENSE
+	select SND_SOC_CPE
+	select SND_SOC_WCD_CPE
+	select SND_HWDEP
+	select MSM_ULTRASOUND
+	select DTS_EAGLE
+	select SND_SOC_SDM660_COMMON
+	select SND_SOC_COMPRESS
+	select PINCTRL_LPI
+	help
+	To add support for SoC audio on MSM_EXT.
+	This will enable sound soc drivers which
+	interfaces with DSP, also it will enable
+	the machine driver and the corresponding
+	DAI-links
+
+config SND_SOC_MSM8996
+	tristate "SoC Machine driver for MSM8996 boards"
+	depends on ARCH_MSM8996
+	select SND_SOC_COMPRESS
+	select SND_SOC_QDSP6V2
+	select SND_SOC_MSM_STUB
+	select SND_SOC_MSM_HOSTLESS_PCM
+	select SND_DYNAMIC_MINORS
+	select MSM_QDSP6_APRV2
+	select MSM_QDSP6V2_CODECS
+	select SND_SOC_WCD9335
+	select SND_SOC_WSA881X
+	select SND_SOC_MSM_HDMI_CODEC_RX
+	select DTS_SRS_TM
+	select QTI_PP
+	select QTI_PP_AUDIOSPHERE
+	select SND_SOC_CPE
+	select MSM_ULTRASOUND
+	select DOLBY_DS2
+	select SND_HWDEP
+        select DTS_EAGLE
+	help
+	 To add support for SoC audio on MSM8996.
+	 This will enable sound soc drivers which
+	 interfaces with DSP, also it will enable
+	 the machine driver and the corresponding
+	 DAI-links
+
+config SND_SOC_MSM8998
+	tristate "SoC Machine driver for MSM8998 boards"
+	depends on ARCH_QCOM
+	select SND_SOC_COMPRESS
+	select SND_SOC_QDSP6V2
+	select SND_SOC_MSM_STUB
+	select SND_SOC_MSM_HOSTLESS_PCM
+	select SND_DYNAMIC_MINORS
+	select MSM_QDSP6_APRV2_GLINK
+	select MSM_QDSP6_SSR
+	select MSM_QDSP6_PDR
+	select MSM_QDSP6_NOTIFIER
+	select MSM_QDSP6V2_CODECS
+	select SND_SOC_WCD9335
+	select SND_SOC_WCD934X
+	select SND_SOC_WSA881X
+	select SND_SOC_MSM_HDMI_CODEC_RX
+	select DTS_SRS_TM
+	select QTI_PP
+	select SND_SOC_CPE
+	select MSM_ULTRASOUND
+	select DOLBY_LICENSE
+	select SND_HWDEP
+        select DTS_EAGLE
+	help
+	 To add support for SoC audio on MSM8998.
+	 This will enable sound soc drivers which
+	 interfaces with DSP, also it will enable
+	 the machine driver and the corresponding
+	 DAI-links
+
+config SND_SOC_660
+	tristate "SoC Machine driver for SDM660 boards"
+	depends on ARCH_SDM660
+	select SND_SOC_INT_CODEC
+	select SND_SOC_EXT_CODEC
+	help
+	 To add support for SoC audio on SDM660.
+	 This will enable sound soc drivers which
+	 interfaces with DSP, also it will enable
+	 the machine driver and the corresponding
+	 DAI-links
+
+endmenu
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./Makefile linux-4.4.115-fbx/sound/soc/msm/Makefile
--- linux-4.4.115-fbx/sound/soc/msm./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/Makefile	2019-10-29 09:26:26.141227624 +0100
@@ -0,0 +1,38 @@
+# MSM Machine Support
+
+CFLAGS_msm8998.o = -I$(src)
+
+snd-soc-hostless-pcm-objs := msm-pcm-hostless.o
+snd-soc-hostless-pcm-objs := msm-pcm-hostless.o audio_test_mod.o
+obj-$(CONFIG_SND_SOC_MSM_HOSTLESS_PCM) += snd-soc-hostless-pcm.o
+
+obj-$(CONFIG_SND_SOC_MSM_QDSP6V2_INTF) += qdsp6v2/
+
+snd-soc-qdsp6v2-objs := msm-dai-fe.o
+obj-$(CONFIG_SND_SOC_QDSP6V2) += snd-soc-qdsp6v2.o
+
+#for CPE drivers
+snd-soc-cpe-objs := msm-cpe-lsm.o
+obj-$(CONFIG_SND_SOC_CPE) += snd-soc-cpe.o
+
+# for MSM8996 sound card driver
+snd-soc-msm8996-objs := msm8996.o apq8096-auto.o
+obj-$(CONFIG_SND_SOC_MSM8996) += snd-soc-msm8996.o
+
+# for MSM8998 sound card driver
+snd-soc-msm8998-objs := msm8998.o
+obj-$(CONFIG_SND_SOC_MSM8998) += snd-soc-msm8998.o
+
+# for SDM660 sound card driver
+snd-soc-sdm660-common-objs := sdm660-common.o
+obj-$(CONFIG_SND_SOC_SDM660_COMMON) += snd-soc-sdm660-common.o
+
+# for SDM660 sound card driver
+snd-soc-int-codec-objs := sdm660-internal.o
+obj-$(CONFIG_SND_SOC_INT_CODEC) += snd-soc-sdm660-common.o
+obj-$(CONFIG_SND_SOC_INT_CODEC) += snd-soc-int-codec.o
+
+# for SDM660 sound card driver
+snd-soc-ext-codec-objs := sdm660-external.o sdm660-ext-dai-links.o
+obj-$(CONFIG_SND_SOC_EXT_CODEC) += snd-soc-sdm660-common.o
+obj-$(CONFIG_SND_SOC_EXT_CODEC) += snd-soc-ext-codec.o
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./msm8998.c linux-4.4.115-fbx/sound/soc/msm/msm8998.c
--- linux-4.4.115-fbx/sound/soc/msm./msm8998.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/msm8998.c	2019-07-17 21:25:15.849462456 +0200
@@ -0,0 +1,7701 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/switch.h>
+#include <linux/input.h>
+#include <linux/of_device.h>
+#include <linux/mfd/msm-cdc-pinctrl.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/jack.h>
+#include <sound/q6afe-v2.h>
+#include <sound/q6core.h>
+#include <sound/pcm_params.h>
+#include <sound/info.h>
+#include <linux/qdsp6v2/audio_notifier.h>
+#include "device_event.h"
+#include "qdsp6v2/msm-pcm-routing-v2.h"
+#include "../codecs/wcd9335.h"
+#include "../codecs/wcd934x/wcd934x.h"
+#include "../codecs/wcd934x/wcd934x-mbhc.h"
+#include "../codecs/wsa881x.h"
+#include "../codecs/wm8804.h"
+
+#define DRV_NAME "msm8998-asoc-snd"
+
+#define __CHIPSET__ "MSM8998 "
+#define MSM_DAILINK_NAME(name) (__CHIPSET__#name)
+
+#define SAMPLING_RATE_8KHZ      8000
+#define SAMPLING_RATE_11P025KHZ 11025
+#define SAMPLING_RATE_16KHZ     16000
+#define SAMPLING_RATE_22P05KHZ  22050
+#define SAMPLING_RATE_32KHZ     32000
+#define SAMPLING_RATE_44P1KHZ   44100
+#define SAMPLING_RATE_48KHZ     48000
+#define SAMPLING_RATE_88P2KHZ   88200
+#define SAMPLING_RATE_96KHZ     96000
+#define SAMPLING_RATE_176P4KHZ  176400
+#define SAMPLING_RATE_192KHZ    192000
+#define SAMPLING_RATE_352P8KHZ  352800
+#define SAMPLING_RATE_384KHZ    384000
+
+#define WCD9XXX_MBHC_DEF_BUTTONS    8
+#define WCD9XXX_MBHC_DEF_RLOADS     5
+#define CODEC_EXT_CLK_RATE          9600000
+#define ADSP_STATE_READY_TIMEOUT_MS 3000
+#define DEV_NAME_STR_LEN            32
+
+#define WSA8810_NAME_1 "wsa881x.20170211"
+#define WSA8810_NAME_2 "wsa881x.20170212"
+
+#define WCN_CDC_SLIM_RX_CH_MAX 2
+#define WCN_CDC_SLIM_TX_CH_MAX 3
+
+#define TDM_CHANNEL_MAX 8
+#define TDM_SLOT_OFFSET_MAX 8
+
+#define MSM_HIFI_ON 1
+
+enum {
+	SLIM_RX_0 = 0,
+	SLIM_RX_1,
+	SLIM_RX_2,
+	SLIM_RX_3,
+	SLIM_RX_4,
+	SLIM_RX_5,
+	SLIM_RX_6,
+	SLIM_RX_7,
+	SLIM_RX_MAX,
+};
+
+enum {
+	SLIM_TX_0 = 0,
+	SLIM_TX_1,
+	SLIM_TX_2,
+	SLIM_TX_3,
+	SLIM_TX_4,
+	SLIM_TX_5,
+	SLIM_TX_6,
+	SLIM_TX_7,
+	SLIM_TX_8,
+	SLIM_TX_MAX,
+};
+
+enum {
+	PRIM_MI2S = 0,
+	SEC_MI2S,
+	TERT_MI2S,
+	QUAT_MI2S,
+	MI2S_MAX,
+};
+
+enum {
+	PRIM_AUX_PCM = 0,
+	SEC_AUX_PCM,
+	TERT_AUX_PCM,
+	QUAT_AUX_PCM,
+	AUX_PCM_MAX,
+};
+
+enum {
+	PCM_I2S_SEL_PRIM = 0,
+	PCM_I2S_SEL_SEC,
+	PCM_I2S_SEL_TERT,
+	PCM_I2S_SEL_QUAT,
+	PCM_I2S_SEL_MAX,
+};
+
+struct mi2s_aux_pcm_common_conf {
+	struct mutex lock;
+	void *pcm_i2s_sel_vt_addr;
+};
+
+struct mi2s_conf {
+	struct mutex lock;
+	u32 ref_cnt;
+	u32 msm_is_mi2s_master;
+	u32 msm_is_ext_mclk;
+};
+
+struct auxpcm_conf {
+	struct mutex lock;
+	u32 ref_cnt;
+};
+
+struct dev_config {
+	u32 sample_rate;
+	u32 bit_format;
+	u32 channels;
+};
+
+enum {
+	HDMI_RX_IDX = 0,
+	DP_RX_IDX,
+	EXT_DISP_RX_IDX_MAX,
+};
+
+struct msm_wsa881x_dev_info {
+	struct device_node *of_node;
+	u32 index;
+};
+
+enum pinctrl_pin_state {
+	STATE_DISABLE = 0, /* All pins are in sleep state */
+	STATE_MI2S_ACTIVE,  /* IS2 = active, TDM = sleep */
+	STATE_TDM_ACTIVE,  /* IS2 = sleep, TDM = active */
+};
+
+struct msm_pinctrl_info {
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *mi2s_disable;
+	struct pinctrl_state *tdm_disable;
+	struct pinctrl_state *mi2s_active;
+	struct pinctrl_state *tdm_active;
+	enum pinctrl_pin_state curr_state;
+};
+
+struct msm_asoc_mach_data {
+	u32 mclk_freq;
+	int us_euro_gpio; /* used by gpio driver API */
+	struct device_node *us_euro_gpio_p; /* used by pinctrl API */
+	struct device_node *hph_en1_gpio_p; /* used by pinctrl API */
+	struct device_node *hph_en0_gpio_p; /* used by pinctrl API */
+	struct snd_info_entry *codec_root;
+	struct msm_pinctrl_info pinctrl_info;
+	bool has_fbx_wm8804;
+	bool has_fbx_sil9437;
+};
+
+struct msm_asoc_wcd93xx_codec {
+	void* (*get_afe_config_fn)(struct snd_soc_codec *codec,
+				   enum afe_config_type config_type);
+	void (*mbhc_hs_detect_exit)(struct snd_soc_codec *codec);
+};
+
+static const char *const pin_states[] = {"sleep", "i2s-active",
+					 "tdm-active"};
+
+enum {
+	TDM_0 = 0,
+	TDM_1,
+	TDM_2,
+	TDM_3,
+	TDM_4,
+	TDM_5,
+	TDM_6,
+	TDM_7,
+	TDM_PORT_MAX,
+};
+
+enum {
+	TDM_PRI = 0,
+	TDM_SEC,
+	TDM_TERT,
+	TDM_QUAT,
+	TDM_INTERFACE_MAX,
+};
+
+struct tdm_port {
+	u32 mode;
+	u32 channel;
+};
+
+/* TDM default config */
+static struct dev_config tdm_rx_cfg[TDM_INTERFACE_MAX][TDM_PORT_MAX] = {
+	{ /* PRI TDM */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_0 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_1 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_2 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_3 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_4 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_5 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_6 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_7 */
+	},
+	{ /* SEC TDM */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_0 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_1 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_2 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_3 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_4 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_5 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_6 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_7 */
+	},
+	{ /* TERT TDM */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_0 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_1 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_2 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_3 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_4 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_5 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_6 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_7 */
+	},
+	{ /* QUAT TDM */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_0 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_1 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_2 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_3 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_4 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_5 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_6 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_7 */
+	}
+};
+
+/* TDM default config */
+static struct dev_config tdm_tx_cfg[TDM_INTERFACE_MAX][TDM_PORT_MAX] = {
+	{ /* PRI TDM */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_0 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_1 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_2 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_3 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_4 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_5 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_6 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_7 */
+	},
+	{ /* SEC TDM */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_0 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_1 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_2 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_3 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_4 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_5 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_6 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_7 */
+	},
+	{ /* TERT TDM */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_0 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_1 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_2 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_3 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_4 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_5 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_6 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_7 */
+	},
+	{ /* QUAT TDM */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_0 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_1 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_2 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_3 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_4 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_5 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_6 */
+		{SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_7 */
+	}
+};
+
+/*TDM default offset currently only supporting TDM_RX_0 and TDM_TX_0 */
+static unsigned int tdm_slot_offset[TDM_PORT_MAX][TDM_SLOT_OFFSET_MAX] = {
+	{0, 4, 8, 12, 16, 20, 24, 28},/* TX_0 | RX_0 */
+	{AFE_SLOT_MAPPING_OFFSET_INVALID},/* TX_1 | RX_1 */
+	{AFE_SLOT_MAPPING_OFFSET_INVALID},/* TX_2 | RX_2 */
+	{AFE_SLOT_MAPPING_OFFSET_INVALID},/* TX_3 | RX_3 */
+	{AFE_SLOT_MAPPING_OFFSET_INVALID},/* TX_4 | RX_4 */
+	{AFE_SLOT_MAPPING_OFFSET_INVALID},/* TX_5 | RX_5 */
+	{AFE_SLOT_MAPPING_OFFSET_INVALID},/* TX_6 | RX_6 */
+	{AFE_SLOT_MAPPING_OFFSET_INVALID},/* TX_7 | RX_7 */
+};
+
+/* Default configuration of slimbus channels */
+static struct dev_config slim_rx_cfg[] = {
+	[SLIM_RX_0] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
+	[SLIM_RX_1] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
+	[SLIM_RX_2] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
+	[SLIM_RX_3] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
+	[SLIM_RX_4] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
+	[SLIM_RX_5] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
+	[SLIM_RX_6] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
+	[SLIM_RX_7] = {SAMPLING_RATE_8KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
+};
+
+static struct dev_config slim_tx_cfg[] = {
+	[SLIM_TX_0] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
+	[SLIM_TX_1] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
+	[SLIM_TX_2] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
+	[SLIM_TX_3] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
+	[SLIM_TX_4] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
+	[SLIM_TX_5] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
+	[SLIM_TX_6] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
+	[SLIM_TX_7] = {SAMPLING_RATE_8KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
+	[SLIM_TX_8] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 2},
+};
+
+
+/* Default configuration of external display BE */
+static struct dev_config ext_disp_rx_cfg[] = {
+	[HDMI_RX_IDX] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 2},
+	[DP_RX_IDX] =   {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 2},
+};
+
+static struct dev_config usb_rx_cfg = {
+	.sample_rate = SAMPLING_RATE_48KHZ,
+	.bit_format = SNDRV_PCM_FORMAT_S16_LE,
+	.channels = 2,
+};
+
+static struct dev_config usb_tx_cfg = {
+	.sample_rate = SAMPLING_RATE_48KHZ,
+	.bit_format = SNDRV_PCM_FORMAT_S16_LE,
+	.channels = 1,
+};
+
+static struct dev_config proxy_rx_cfg = {
+	.sample_rate = SAMPLING_RATE_48KHZ,
+	.bit_format = SNDRV_PCM_FORMAT_S16_LE,
+	.channels = 2,
+};
+
+/* Default configuration of MI2S channels */
+static struct dev_config mi2s_rx_cfg[] = {
+	[PRIM_MI2S] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 2},
+	[SEC_MI2S]  = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 2},
+	[TERT_MI2S] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 2},
+	[QUAT_MI2S] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 2},
+};
+
+static struct dev_config mi2s_tx_cfg[] = {
+	[PRIM_MI2S] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
+	[SEC_MI2S]  = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
+	[TERT_MI2S] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
+	[QUAT_MI2S] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
+};
+
+static struct dev_config aux_pcm_rx_cfg[] = {
+	[PRIM_AUX_PCM] = {SAMPLING_RATE_8KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
+	[SEC_AUX_PCM]  = {SAMPLING_RATE_8KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
+	[TERT_AUX_PCM] = {SAMPLING_RATE_8KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
+	[QUAT_AUX_PCM] = {SAMPLING_RATE_8KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
+};
+
+static struct dev_config aux_pcm_tx_cfg[] = {
+	[PRIM_AUX_PCM] = {SAMPLING_RATE_8KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
+	[SEC_AUX_PCM]  = {SAMPLING_RATE_8KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
+	[TERT_AUX_PCM] = {SAMPLING_RATE_8KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
+	[QUAT_AUX_PCM] = {SAMPLING_RATE_8KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
+};
+
+static int msm_vi_feed_tx_ch = 2;
+static const char *const slim_rx_ch_text[] = {"One", "Two", "Three", "Four",
+						"Five", "Six", "Seven",
+						"Eight"};
+static const char *const slim_tx_ch_text[] = {"One", "Two", "Three", "Four",
+						"Five", "Six", "Seven",
+						"Eight"};
+static const char *const vi_feed_ch_text[] = {"One", "Two"};
+static char const *bit_format_text[] = {"S16_LE", "S24_LE", "S24_3LE",
+					  "S32_LE"};
+static char const *ext_disp_bit_format_text[] = {"S16_LE", "S24_LE"};
+static char const *slim_sample_rate_text[] = {"KHZ_8", "KHZ_16",
+					"KHZ_32", "KHZ_44P1", "KHZ_48",
+					"KHZ_88P2", "KHZ_96", "KHZ_176P4",
+					"KHZ_192", "KHZ_352P8", "KHZ_384"};
+static char const *bt_sample_rate_text[] = {"KHZ_8", "KHZ_16", "KHZ_48"};
+static const char *const usb_ch_text[] = {"One", "Two", "Three", "Four",
+					   "Five", "Six", "Seven",
+					   "Eight"};
+static char const *ch_text[] = {"Two", "Three", "Four", "Five",
+					"Six", "Seven", "Eight"};
+static char const *usb_sample_rate_text[] = {"KHZ_8", "KHZ_11P025",
+					"KHZ_16", "KHZ_22P05",
+					"KHZ_32", "KHZ_44P1", "KHZ_48",
+					"KHZ_88P2", "KHZ_96", "KHZ_176P4",
+					"KHZ_192", "KHZ_352P8", "KHZ_384"};
+static char const *ext_disp_sample_rate_text[] = {"KHZ_48", "KHZ_96",
+					"KHZ_192", "KHZ_32", "KHZ_44P1",
+					"KHZ_88P2", "KHZ_176P4"};
+static char const *tdm_ch_text[] = {"One", "Two", "Three", "Four",
+				    "Five", "Six", "Seven", "Eight"};
+static char const *tdm_bit_format_text[] = {"S16_LE", "S24_LE", "S32_LE"};
+static char const *tdm_sample_rate_text[] = {"KHZ_8", "KHZ_16", "KHZ_32",
+					     "KHZ_44P1", "KHZ_48", "KHZ_96",
+					     "KHZ_192", "KHZ_352P8", "KHZ_384"};
+static const char *const auxpcm_rate_text[] = {"KHZ_8", "KHZ_16"};
+static char const *mi2s_rate_text[] = {"KHZ_8", "KHZ_16",
+				      "KHZ_32", "KHZ_44P1", "KHZ_48",
+				      "KHZ_88P2", "KHZ_96", "KHZ_176P4",
+				      "KHZ_192"};
+static const char *const mi2s_ch_text[] = {"One", "Two", "Three", "Four",
+					   "Five", "Six", "Seven",
+					   "Eight"};
+static const char *const hifi_text[] = {"Off", "On"};
+
+static SOC_ENUM_SINGLE_EXT_DECL(slim_0_rx_chs, slim_rx_ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(slim_2_rx_chs, slim_rx_ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(slim_0_tx_chs, slim_tx_ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(slim_1_tx_chs, slim_tx_ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(slim_5_rx_chs, slim_rx_ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(slim_6_rx_chs, slim_rx_ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(usb_rx_chs, usb_ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(usb_tx_chs, usb_ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(vi_feed_tx_chs, vi_feed_ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(ext_disp_rx_chs, ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(proxy_rx_chs, ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(slim_0_rx_format, bit_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(slim_5_rx_format, bit_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(slim_6_rx_format, bit_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(slim_0_tx_format, bit_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(usb_rx_format, bit_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(usb_tx_format, bit_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(ext_disp_rx_format, ext_disp_bit_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(slim_0_rx_sample_rate, slim_sample_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(slim_2_rx_sample_rate, slim_sample_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(slim_0_tx_sample_rate, slim_sample_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(slim_5_rx_sample_rate, slim_sample_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(slim_6_rx_sample_rate, slim_sample_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(bt_sample_rate, bt_sample_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(usb_rx_sample_rate, usb_sample_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(usb_tx_sample_rate, usb_sample_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(ext_disp_rx_sample_rate,
+				ext_disp_sample_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(tdm_tx_chs, tdm_ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(tdm_tx_format, tdm_bit_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(tdm_tx_sample_rate, tdm_sample_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(tdm_rx_chs, tdm_ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(tdm_rx_format, tdm_bit_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(tdm_rx_sample_rate, tdm_sample_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(prim_aux_pcm_rx_sample_rate, auxpcm_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(sec_aux_pcm_rx_sample_rate, auxpcm_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(tert_aux_pcm_rx_sample_rate, auxpcm_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(quat_aux_pcm_rx_sample_rate, auxpcm_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(prim_aux_pcm_tx_sample_rate, auxpcm_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(sec_aux_pcm_tx_sample_rate, auxpcm_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(tert_aux_pcm_tx_sample_rate, auxpcm_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(quat_aux_pcm_tx_sample_rate, auxpcm_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(prim_mi2s_rx_sample_rate, mi2s_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(sec_mi2s_rx_sample_rate, mi2s_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(tert_mi2s_rx_sample_rate, mi2s_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_rx_sample_rate, mi2s_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(prim_mi2s_tx_sample_rate, mi2s_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(sec_mi2s_tx_sample_rate, mi2s_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(tert_mi2s_tx_sample_rate, mi2s_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_tx_sample_rate, mi2s_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(prim_mi2s_rx_chs, mi2s_ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(prim_mi2s_tx_chs, mi2s_ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(sec_mi2s_rx_chs, mi2s_ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(sec_mi2s_tx_chs, mi2s_ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(tert_mi2s_rx_chs, mi2s_ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(tert_mi2s_tx_chs, mi2s_ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_rx_chs, mi2s_ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_tx_chs, mi2s_ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(mi2s_rx_format, bit_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(mi2s_tx_format, bit_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(hifi_function, hifi_text);
+
+static struct platform_device *spdev;
+static int msm_hifi_control;
+
+static bool is_initial_boot;
+static bool codec_reg_done;
+static struct snd_soc_aux_dev *msm_aux_dev;
+static struct snd_soc_codec_conf *msm_codec_conf;
+static struct msm_asoc_wcd93xx_codec msm_codec_fn;
+
+static void *def_tasha_mbhc_cal(void);
+static void *def_tavil_mbhc_cal(void);
+static int msm_snd_enable_codec_ext_clk(struct snd_soc_codec *codec,
+					int enable, bool dapm);
+static int msm_wsa881x_init(struct snd_soc_component *component);
+
+/*
+ * Need to report LINEIN
+ * if R/L channel impedance is larger than 5K ohm
+ */
+static struct wcd_mbhc_config wcd_mbhc_cfg = {
+	.read_fw_bin = false,
+	.calibration = NULL,
+	.detect_extn_cable = true,
+	.mono_stero_detection = false,
+	.swap_gnd_mic = NULL,
+	.hs_ext_micbias = true,
+	.key_code[0] = KEY_MEDIA,
+	.key_code[1] = KEY_VOICECOMMAND,
+	.key_code[2] = KEY_VOLUMEUP,
+	.key_code[3] = KEY_VOLUMEDOWN,
+	.key_code[4] = 0,
+	.key_code[5] = 0,
+	.key_code[6] = 0,
+	.key_code[7] = 0,
+	.linein_th = 5000,
+	.moisture_en = true,
+	.mbhc_micbias = MIC_BIAS_2,
+	.anc_micbias = MIC_BIAS_2,
+	.enable_anc_mic_detect = false,
+};
+
+static struct snd_soc_dapm_route wcd_audio_paths_tasha[] = {
+	{"MIC BIAS1", NULL, "MCLK TX"},
+	{"MIC BIAS2", NULL, "MCLK TX"},
+	{"MIC BIAS3", NULL, "MCLK TX"},
+	{"MIC BIAS4", NULL, "MCLK TX"},
+};
+
+static struct snd_soc_dapm_route wcd_audio_paths[] = {
+	{"MIC BIAS1", NULL, "MCLK"},
+	{"MIC BIAS2", NULL, "MCLK"},
+	{"MIC BIAS3", NULL, "MCLK"},
+	{"MIC BIAS4", NULL, "MCLK"},
+};
+
+static u32 mi2s_ebit_clk[MI2S_MAX] = {
+	Q6AFE_LPASS_CLK_ID_PRI_MI2S_EBIT,
+	Q6AFE_LPASS_CLK_ID_SEC_MI2S_EBIT,
+	Q6AFE_LPASS_CLK_ID_TER_MI2S_EBIT,
+	Q6AFE_LPASS_CLK_ID_QUAD_MI2S_EBIT,
+};
+
+static struct afe_clk_set mi2s_clk[MI2S_MAX] = {
+	{
+		AFE_API_VERSION_I2S_CONFIG,
+		Q6AFE_LPASS_CLK_ID_PRI_MI2S_IBIT,
+		Q6AFE_LPASS_IBIT_CLK_1_P536_MHZ,
+		Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO,
+		Q6AFE_LPASS_CLK_ROOT_DEFAULT,
+		0,
+	},
+	{
+		AFE_API_VERSION_I2S_CONFIG,
+		Q6AFE_LPASS_CLK_ID_SEC_MI2S_IBIT,
+		Q6AFE_LPASS_IBIT_CLK_1_P536_MHZ,
+		Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO,
+		Q6AFE_LPASS_CLK_ROOT_DEFAULT,
+		0,
+	},
+	{
+		AFE_API_VERSION_I2S_CONFIG,
+		Q6AFE_LPASS_CLK_ID_TER_MI2S_IBIT,
+		Q6AFE_LPASS_IBIT_CLK_1_P536_MHZ,
+		Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO,
+		Q6AFE_LPASS_CLK_ROOT_DEFAULT,
+		0,
+	},
+	{
+		AFE_API_VERSION_I2S_CONFIG,
+		Q6AFE_LPASS_CLK_ID_QUAD_MI2S_IBIT,
+		Q6AFE_LPASS_IBIT_CLK_1_P536_MHZ,
+		Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO,
+		Q6AFE_LPASS_CLK_ROOT_DEFAULT,
+		0,
+	}
+};
+
+static struct afe_clk_set mi2s_mclk[MI2S_MAX] = {
+	{
+		AFE_API_VERSION_I2S_CONFIG,
+		Q6AFE_LPASS_CLK_ID_MCLK_1,
+		Q6AFE_LPASS_OSR_CLK_12_P288_MHZ,
+		Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO,
+		Q6AFE_LPASS_CLK_ROOT_DEFAULT,
+		0,
+	},
+	{
+		AFE_API_VERSION_I2S_CONFIG,
+		Q6AFE_LPASS_CLK_ID_MCLK_2,
+		Q6AFE_LPASS_OSR_CLK_12_P288_MHZ,
+		Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO,
+		Q6AFE_LPASS_CLK_ROOT_DEFAULT,
+		0,
+	},
+	{
+		AFE_API_VERSION_I2S_CONFIG,
+		Q6AFE_LPASS_CLK_ID_MCLK_3,
+		Q6AFE_LPASS_OSR_CLK_12_P288_MHZ,
+		Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO,
+		Q6AFE_LPASS_CLK_ROOT_DEFAULT,
+		0,
+	},
+	{
+		AFE_API_VERSION_I2S_CONFIG,
+		Q6AFE_LPASS_CLK_ID_MCLK_4,
+		Q6AFE_LPASS_OSR_CLK_12_P288_MHZ,
+		Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO,
+		Q6AFE_LPASS_CLK_ROOT_DEFAULT,
+		0,
+	}
+};
+
+static struct mi2s_aux_pcm_common_conf mi2s_auxpcm_conf[PCM_I2S_SEL_MAX];
+static struct mi2s_conf mi2s_intf_conf[MI2S_MAX];
+static struct auxpcm_conf auxpcm_intf_conf[AUX_PCM_MAX];
+
+static int slim_get_sample_rate_val(int sample_rate)
+{
+	int sample_rate_val = 0;
+
+	switch (sample_rate) {
+	case SAMPLING_RATE_8KHZ:
+		sample_rate_val = 0;
+		break;
+	case SAMPLING_RATE_16KHZ:
+		sample_rate_val = 1;
+		break;
+	case SAMPLING_RATE_32KHZ:
+		sample_rate_val = 2;
+		break;
+	case SAMPLING_RATE_44P1KHZ:
+		sample_rate_val = 3;
+		break;
+	case SAMPLING_RATE_48KHZ:
+		sample_rate_val = 4;
+		break;
+	case SAMPLING_RATE_88P2KHZ:
+		sample_rate_val = 5;
+		break;
+	case SAMPLING_RATE_96KHZ:
+		sample_rate_val = 6;
+		break;
+	case SAMPLING_RATE_176P4KHZ:
+		sample_rate_val = 7;
+		break;
+	case SAMPLING_RATE_192KHZ:
+		sample_rate_val = 8;
+		break;
+	case SAMPLING_RATE_352P8KHZ:
+		sample_rate_val = 9;
+		break;
+	case SAMPLING_RATE_384KHZ:
+		sample_rate_val = 10;
+		break;
+	default:
+		sample_rate_val = 4;
+		break;
+	}
+	return sample_rate_val;
+}
+
+static int slim_get_sample_rate(int value)
+{
+	int sample_rate = 0;
+
+	switch (value) {
+	case 0:
+		sample_rate = SAMPLING_RATE_8KHZ;
+		break;
+	case 1:
+		sample_rate = SAMPLING_RATE_16KHZ;
+		break;
+	case 2:
+		sample_rate = SAMPLING_RATE_32KHZ;
+		break;
+	case 3:
+		sample_rate = SAMPLING_RATE_44P1KHZ;
+		break;
+	case 4:
+		sample_rate = SAMPLING_RATE_48KHZ;
+		break;
+	case 5:
+		sample_rate = SAMPLING_RATE_88P2KHZ;
+		break;
+	case 6:
+		sample_rate = SAMPLING_RATE_96KHZ;
+		break;
+	case 7:
+		sample_rate = SAMPLING_RATE_176P4KHZ;
+		break;
+	case 8:
+		sample_rate = SAMPLING_RATE_192KHZ;
+		break;
+	case 9:
+		sample_rate = SAMPLING_RATE_352P8KHZ;
+		break;
+	case 10:
+		sample_rate = SAMPLING_RATE_384KHZ;
+		break;
+	default:
+		sample_rate = SAMPLING_RATE_48KHZ;
+		break;
+	}
+	return sample_rate;
+}
+
+static int slim_get_bit_format_val(int bit_format)
+{
+	int val = 0;
+
+	switch (bit_format) {
+	case SNDRV_PCM_FORMAT_S32_LE:
+		val = 3;
+		break;
+	case SNDRV_PCM_FORMAT_S24_3LE:
+		val = 2;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		val = 1;
+		break;
+	case SNDRV_PCM_FORMAT_S16_LE:
+	default:
+		val = 0;
+		break;
+	}
+	return val;
+}
+
+static int slim_get_bit_format(int val)
+{
+	int bit_fmt = SNDRV_PCM_FORMAT_S16_LE;
+
+	switch (val) {
+	case 0:
+		bit_fmt = SNDRV_PCM_FORMAT_S16_LE;
+		break;
+	case 1:
+		bit_fmt = SNDRV_PCM_FORMAT_S24_LE;
+		break;
+	case 2:
+		bit_fmt = SNDRV_PCM_FORMAT_S24_3LE;
+		break;
+	case 3:
+		bit_fmt = SNDRV_PCM_FORMAT_S32_LE;
+		break;
+	default:
+		bit_fmt = SNDRV_PCM_FORMAT_S16_LE;
+		break;
+	}
+	return bit_fmt;
+}
+
+static int slim_get_port_idx(struct snd_kcontrol *kcontrol)
+{
+	int port_id = 0;
+
+	if (strnstr(kcontrol->id.name, "SLIM_0_RX", sizeof("SLIM_0_RX")))
+		port_id = SLIM_RX_0;
+	else if (strnstr(kcontrol->id.name, "SLIM_2_RX", sizeof("SLIM_2_RX")))
+		port_id = SLIM_RX_2;
+	else if (strnstr(kcontrol->id.name, "SLIM_5_RX", sizeof("SLIM_5_RX")))
+		port_id = SLIM_RX_5;
+	else if (strnstr(kcontrol->id.name, "SLIM_6_RX", sizeof("SLIM_6_RX")))
+		port_id = SLIM_RX_6;
+	else if (strnstr(kcontrol->id.name, "SLIM_0_TX", sizeof("SLIM_0_TX")))
+		port_id = SLIM_TX_0;
+	else if (strnstr(kcontrol->id.name, "SLIM_1_TX", sizeof("SLIM_1_TX")))
+		port_id = SLIM_TX_1;
+	else {
+		pr_err("%s: unsupported channel: %s",
+			__func__, kcontrol->id.name);
+		return -EINVAL;
+	}
+
+	return port_id;
+}
+
+static int slim_rx_sample_rate_get(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	int ch_num = slim_get_port_idx(kcontrol);
+
+	if (ch_num < 0)
+		return ch_num;
+
+	ucontrol->value.enumerated.item[0] =
+		slim_get_sample_rate_val(slim_rx_cfg[ch_num].sample_rate);
+
+	pr_debug("%s: slim[%d]_rx_sample_rate = %d, item = %d\n", __func__,
+		 ch_num, slim_rx_cfg[ch_num].sample_rate,
+		 ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int slim_rx_sample_rate_put(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	int ch_num = slim_get_port_idx(kcontrol);
+
+	if (ch_num < 0)
+		return ch_num;
+
+	slim_rx_cfg[ch_num].sample_rate =
+		slim_get_sample_rate(ucontrol->value.enumerated.item[0]);
+
+	pr_debug("%s: slim[%d]_rx_sample_rate = %d, item = %d\n", __func__,
+		 ch_num, slim_rx_cfg[ch_num].sample_rate,
+		 ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int slim_tx_sample_rate_get(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	int ch_num = slim_get_port_idx(kcontrol);
+
+	if (ch_num < 0)
+		return ch_num;
+
+	ucontrol->value.enumerated.item[0] =
+		slim_get_sample_rate_val(slim_tx_cfg[ch_num].sample_rate);
+
+	pr_debug("%s: slim[%d]_tx_sample_rate = %d, item = %d\n", __func__,
+		 ch_num, slim_tx_cfg[ch_num].sample_rate,
+		 ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int slim_tx_sample_rate_put(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	int sample_rate = 0;
+	int ch_num = slim_get_port_idx(kcontrol);
+
+	if (ch_num < 0)
+		return ch_num;
+
+	sample_rate = slim_get_sample_rate(ucontrol->value.enumerated.item[0]);
+	if (sample_rate == SAMPLING_RATE_44P1KHZ) {
+		pr_err("%s: Unsupported sample rate %d: for Tx path\n",
+			__func__, sample_rate);
+		return -EINVAL;
+	}
+	slim_tx_cfg[ch_num].sample_rate = sample_rate;
+
+	pr_debug("%s: slim[%d]_tx_sample_rate = %d, value = %d\n", __func__,
+		 ch_num, slim_tx_cfg[ch_num].sample_rate,
+		 ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int slim_rx_bit_format_get(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
+{
+	int ch_num = slim_get_port_idx(kcontrol);
+
+	if (ch_num < 0)
+		return ch_num;
+
+	ucontrol->value.enumerated.item[0] =
+			slim_get_bit_format_val(slim_rx_cfg[ch_num].bit_format);
+
+	pr_debug("%s: slim[%d]_rx_bit_format = %d, ucontrol value = %d\n",
+		 __func__, ch_num, slim_rx_cfg[ch_num].bit_format,
+			ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int slim_rx_bit_format_put(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
+{
+	int ch_num = slim_get_port_idx(kcontrol);
+
+	if (ch_num < 0)
+		return ch_num;
+
+	slim_rx_cfg[ch_num].bit_format =
+		slim_get_bit_format(ucontrol->value.enumerated.item[0]);
+
+	pr_debug("%s: slim[%d]_rx_bit_format = %d, ucontrol value = %d\n",
+		 __func__, ch_num, slim_rx_cfg[ch_num].bit_format,
+			ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int slim_tx_bit_format_get(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
+{
+	int ch_num = slim_get_port_idx(kcontrol);
+
+	if (ch_num < 0)
+		return ch_num;
+
+	ucontrol->value.enumerated.item[0] =
+			slim_get_bit_format_val(slim_tx_cfg[ch_num].bit_format);
+
+	pr_debug("%s: slim[%d]_tx_bit_format = %d, ucontrol value = %d\n",
+		 __func__, ch_num, slim_tx_cfg[ch_num].bit_format,
+			ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int slim_tx_bit_format_put(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
+{
+	int ch_num = slim_get_port_idx(kcontrol);
+
+	if (ch_num < 0)
+		return ch_num;
+
+	slim_tx_cfg[ch_num].bit_format =
+		slim_get_bit_format(ucontrol->value.enumerated.item[0]);
+
+	pr_debug("%s: slim[%d]_tx_bit_format = %d, ucontrol value = %d\n",
+		 __func__, ch_num, slim_tx_cfg[ch_num].bit_format,
+			ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int msm_slim_rx_ch_get(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	int ch_num = slim_get_port_idx(kcontrol);
+
+	if (ch_num < 0)
+		return ch_num;
+
+	pr_debug("%s: msm_slim_[%d]_rx_ch  = %d\n", __func__,
+		 ch_num, slim_rx_cfg[ch_num].channels);
+	ucontrol->value.enumerated.item[0] = slim_rx_cfg[ch_num].channels - 1;
+
+	return 0;
+}
+
+static int msm_slim_rx_ch_put(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	int ch_num = slim_get_port_idx(kcontrol);
+
+	if (ch_num < 0)
+		return ch_num;
+
+	slim_rx_cfg[ch_num].channels = ucontrol->value.enumerated.item[0] + 1;
+	pr_debug("%s: msm_slim_[%d]_rx_ch  = %d\n", __func__,
+		 ch_num, slim_rx_cfg[ch_num].channels);
+
+	return 1;
+}
+
+static int msm_slim_tx_ch_get(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	int ch_num = slim_get_port_idx(kcontrol);
+
+	if (ch_num < 0)
+		return ch_num;
+
+	pr_debug("%s: msm_slim_[%d]_tx_ch  = %d\n", __func__,
+		 ch_num, slim_tx_cfg[ch_num].channels);
+	ucontrol->value.enumerated.item[0] = slim_tx_cfg[ch_num].channels - 1;
+
+	return 0;
+}
+
+static int msm_slim_tx_ch_put(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	int ch_num = slim_get_port_idx(kcontrol);
+
+	if (ch_num < 0)
+		return ch_num;
+
+	slim_tx_cfg[ch_num].channels = ucontrol->value.enumerated.item[0] + 1;
+	pr_debug("%s: msm_slim_[%d]_tx_ch = %d\n", __func__,
+		 ch_num, slim_tx_cfg[ch_num].channels);
+
+	return 1;
+}
+
+static int msm_vi_feed_tx_ch_get(struct snd_kcontrol *kcontrol,
+				 struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = msm_vi_feed_tx_ch - 1;
+	pr_debug("%s: msm_vi_feed_tx_ch = %ld\n", __func__,
+		 ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int msm_vi_feed_tx_ch_put(struct snd_kcontrol *kcontrol,
+				 struct snd_ctl_elem_value *ucontrol)
+{
+	msm_vi_feed_tx_ch = ucontrol->value.integer.value[0] + 1;
+
+	pr_debug("%s: msm_vi_feed_tx_ch = %d\n", __func__, msm_vi_feed_tx_ch);
+	return 1;
+}
+
+static int msm_bt_sample_rate_get(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
+{
+	/*
+	 * Slimbus_7_Rx/Tx sample rate values should always be in sync (same)
+	 * when used for BT_SCO use case. Return either Rx or Tx sample rate
+	 * value.
+	 */
+	switch (slim_rx_cfg[SLIM_RX_7].sample_rate) {
+	case SAMPLING_RATE_48KHZ:
+		ucontrol->value.integer.value[0] = 2;
+		break;
+	case SAMPLING_RATE_16KHZ:
+		ucontrol->value.integer.value[0] = 1;
+		break;
+	case SAMPLING_RATE_8KHZ:
+	default:
+		ucontrol->value.integer.value[0] = 0;
+		break;
+	}
+	pr_debug("%s: sample rate = %d", __func__,
+		 slim_rx_cfg[SLIM_RX_7].sample_rate);
+
+	return 0;
+}
+
+static int msm_bt_sample_rate_put(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
+{
+	switch (ucontrol->value.integer.value[0]) {
+	case 1:
+		slim_rx_cfg[SLIM_RX_7].sample_rate = SAMPLING_RATE_16KHZ;
+		slim_tx_cfg[SLIM_TX_7].sample_rate = SAMPLING_RATE_16KHZ;
+		break;
+	case 2:
+		slim_rx_cfg[SLIM_RX_7].sample_rate = SAMPLING_RATE_48KHZ;
+		slim_tx_cfg[SLIM_TX_7].sample_rate = SAMPLING_RATE_48KHZ;
+		break;
+	case 0:
+	default:
+		slim_rx_cfg[SLIM_RX_7].sample_rate = SAMPLING_RATE_8KHZ;
+		slim_tx_cfg[SLIM_TX_7].sample_rate = SAMPLING_RATE_8KHZ;
+		break;
+	}
+	pr_debug("%s: sample rates: slim7_rx = %d, slim7_tx = %d, value = %d\n",
+		 __func__,
+		 slim_rx_cfg[SLIM_RX_7].sample_rate,
+		 slim_tx_cfg[SLIM_TX_7].sample_rate,
+		 ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int usb_audio_rx_ch_get(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	pr_debug("%s: usb_audio_rx_ch  = %d\n", __func__,
+		 usb_rx_cfg.channels);
+	ucontrol->value.integer.value[0] = usb_rx_cfg.channels - 1;
+	return 0;
+}
+
+static int usb_audio_rx_ch_put(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	usb_rx_cfg.channels = ucontrol->value.integer.value[0] + 1;
+
+	pr_debug("%s: usb_audio_rx_ch = %d\n", __func__, usb_rx_cfg.channels);
+	return 1;
+}
+
+static int usb_audio_rx_sample_rate_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	int sample_rate_val;
+
+	switch (usb_rx_cfg.sample_rate) {
+	case SAMPLING_RATE_384KHZ:
+		sample_rate_val = 12;
+		break;
+	case SAMPLING_RATE_352P8KHZ:
+		sample_rate_val = 11;
+		break;
+	case SAMPLING_RATE_192KHZ:
+		sample_rate_val = 10;
+		break;
+	case SAMPLING_RATE_176P4KHZ:
+		sample_rate_val = 9;
+		break;
+	case SAMPLING_RATE_96KHZ:
+		sample_rate_val = 8;
+		break;
+	case SAMPLING_RATE_88P2KHZ:
+		sample_rate_val = 7;
+		break;
+	case SAMPLING_RATE_48KHZ:
+		sample_rate_val = 6;
+		break;
+	case SAMPLING_RATE_44P1KHZ:
+		sample_rate_val = 5;
+		break;
+	case SAMPLING_RATE_32KHZ:
+		sample_rate_val = 4;
+		break;
+	case SAMPLING_RATE_22P05KHZ:
+		sample_rate_val = 3;
+		break;
+	case SAMPLING_RATE_16KHZ:
+		sample_rate_val = 2;
+		break;
+	case SAMPLING_RATE_11P025KHZ:
+		sample_rate_val = 1;
+		break;
+	case SAMPLING_RATE_8KHZ:
+	default:
+		sample_rate_val = 0;
+		break;
+	}
+
+	ucontrol->value.integer.value[0] = sample_rate_val;
+	pr_debug("%s: usb_audio_rx_sample_rate = %d\n", __func__,
+		 usb_rx_cfg.sample_rate);
+	return 0;
+}
+
+static int usb_audio_rx_sample_rate_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	switch (ucontrol->value.integer.value[0]) {
+	case 12:
+		usb_rx_cfg.sample_rate = SAMPLING_RATE_384KHZ;
+		break;
+	case 11:
+		usb_rx_cfg.sample_rate = SAMPLING_RATE_352P8KHZ;
+		break;
+	case 10:
+		usb_rx_cfg.sample_rate = SAMPLING_RATE_192KHZ;
+		break;
+	case 9:
+		usb_rx_cfg.sample_rate = SAMPLING_RATE_176P4KHZ;
+		break;
+	case 8:
+		usb_rx_cfg.sample_rate = SAMPLING_RATE_96KHZ;
+		break;
+	case 7:
+		usb_rx_cfg.sample_rate = SAMPLING_RATE_88P2KHZ;
+		break;
+	case 6:
+		usb_rx_cfg.sample_rate = SAMPLING_RATE_48KHZ;
+		break;
+	case 5:
+		usb_rx_cfg.sample_rate = SAMPLING_RATE_44P1KHZ;
+		break;
+	case 4:
+		usb_rx_cfg.sample_rate = SAMPLING_RATE_32KHZ;
+		break;
+	case 3:
+		usb_rx_cfg.sample_rate = SAMPLING_RATE_22P05KHZ;
+		break;
+	case 2:
+		usb_rx_cfg.sample_rate = SAMPLING_RATE_16KHZ;
+		break;
+	case 1:
+		usb_rx_cfg.sample_rate = SAMPLING_RATE_11P025KHZ;
+		break;
+	case 0:
+		usb_rx_cfg.sample_rate = SAMPLING_RATE_8KHZ;
+		break;
+	default:
+		usb_rx_cfg.sample_rate = SAMPLING_RATE_48KHZ;
+		break;
+	}
+
+	pr_debug("%s: control value = %ld, usb_audio_rx_sample_rate = %d\n",
+		__func__, ucontrol->value.integer.value[0],
+		usb_rx_cfg.sample_rate);
+	return 0;
+}
+
+static int usb_audio_rx_format_get(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	switch (usb_rx_cfg.bit_format) {
+	case SNDRV_PCM_FORMAT_S32_LE:
+		ucontrol->value.integer.value[0] = 3;
+		break;
+	case SNDRV_PCM_FORMAT_S24_3LE:
+		ucontrol->value.integer.value[0] = 2;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		ucontrol->value.integer.value[0] = 1;
+		break;
+	case SNDRV_PCM_FORMAT_S16_LE:
+	default:
+		ucontrol->value.integer.value[0] = 0;
+		break;
+	}
+
+	pr_debug("%s: usb_audio_rx_format = %d, ucontrol value = %ld\n",
+		 __func__, usb_rx_cfg.bit_format,
+		 ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int usb_audio_rx_format_put(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	int rc = 0;
+
+	switch (ucontrol->value.integer.value[0]) {
+	case 3:
+		usb_rx_cfg.bit_format = SNDRV_PCM_FORMAT_S32_LE;
+		break;
+	case 2:
+		usb_rx_cfg.bit_format = SNDRV_PCM_FORMAT_S24_3LE;
+		break;
+	case 1:
+		usb_rx_cfg.bit_format = SNDRV_PCM_FORMAT_S24_LE;
+		break;
+	case 0:
+	default:
+		usb_rx_cfg.bit_format = SNDRV_PCM_FORMAT_S16_LE;
+		break;
+	}
+	pr_debug("%s: usb_audio_rx_format = %d, ucontrol value = %ld\n",
+		 __func__, usb_rx_cfg.bit_format,
+		 ucontrol->value.integer.value[0]);
+
+	return rc;
+}
+
+static int usb_audio_tx_ch_get(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	pr_debug("%s: usb_audio_tx_ch  = %d\n", __func__,
+		 usb_tx_cfg.channels);
+	ucontrol->value.integer.value[0] = usb_tx_cfg.channels - 1;
+	return 0;
+}
+
+static int usb_audio_tx_ch_put(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	usb_tx_cfg.channels = ucontrol->value.integer.value[0] + 1;
+
+	pr_debug("%s: usb_audio_tx_ch = %d\n", __func__, usb_tx_cfg.channels);
+	return 1;
+}
+
+static int usb_audio_tx_sample_rate_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	int sample_rate_val;
+
+	switch (usb_tx_cfg.sample_rate) {
+	case SAMPLING_RATE_384KHZ:
+		sample_rate_val = 12;
+		break;
+	case SAMPLING_RATE_352P8KHZ:
+		sample_rate_val = 11;
+		break;
+	case SAMPLING_RATE_192KHZ:
+		sample_rate_val = 10;
+		break;
+	case SAMPLING_RATE_176P4KHZ:
+		sample_rate_val = 9;
+		break;
+	case SAMPLING_RATE_96KHZ:
+		sample_rate_val = 8;
+		break;
+	case SAMPLING_RATE_88P2KHZ:
+		sample_rate_val = 7;
+		break;
+	case SAMPLING_RATE_48KHZ:
+		sample_rate_val = 6;
+		break;
+	case SAMPLING_RATE_44P1KHZ:
+		sample_rate_val = 5;
+		break;
+	case SAMPLING_RATE_32KHZ:
+		sample_rate_val = 4;
+		break;
+	case SAMPLING_RATE_22P05KHZ:
+		sample_rate_val = 3;
+		break;
+	case SAMPLING_RATE_16KHZ:
+		sample_rate_val = 2;
+		break;
+	case SAMPLING_RATE_11P025KHZ:
+		sample_rate_val = 1;
+		break;
+	case SAMPLING_RATE_8KHZ:
+		sample_rate_val = 0;
+		break;
+	default:
+		sample_rate_val = 6;
+		break;
+	}
+
+	ucontrol->value.integer.value[0] = sample_rate_val;
+	pr_debug("%s: usb_audio_tx_sample_rate = %d\n", __func__,
+		 usb_tx_cfg.sample_rate);
+	return 0;
+}
+
+static int usb_audio_tx_sample_rate_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	switch (ucontrol->value.integer.value[0]) {
+	case 12:
+		usb_tx_cfg.sample_rate = SAMPLING_RATE_384KHZ;
+		break;
+	case 11:
+		usb_tx_cfg.sample_rate = SAMPLING_RATE_352P8KHZ;
+		break;
+	case 10:
+		usb_tx_cfg.sample_rate = SAMPLING_RATE_192KHZ;
+		break;
+	case 9:
+		usb_tx_cfg.sample_rate = SAMPLING_RATE_176P4KHZ;
+		break;
+	case 8:
+		usb_tx_cfg.sample_rate = SAMPLING_RATE_96KHZ;
+		break;
+	case 7:
+		usb_tx_cfg.sample_rate = SAMPLING_RATE_88P2KHZ;
+		break;
+	case 6:
+		usb_tx_cfg.sample_rate = SAMPLING_RATE_48KHZ;
+		break;
+	case 5:
+		usb_tx_cfg.sample_rate = SAMPLING_RATE_44P1KHZ;
+		break;
+	case 4:
+		usb_tx_cfg.sample_rate = SAMPLING_RATE_32KHZ;
+		break;
+	case 3:
+		usb_tx_cfg.sample_rate = SAMPLING_RATE_22P05KHZ;
+		break;
+	case 2:
+		usb_tx_cfg.sample_rate = SAMPLING_RATE_16KHZ;
+		break;
+	case 1:
+		usb_tx_cfg.sample_rate = SAMPLING_RATE_11P025KHZ;
+		break;
+	case 0:
+		usb_tx_cfg.sample_rate = SAMPLING_RATE_8KHZ;
+		break;
+	default:
+		usb_tx_cfg.sample_rate = SAMPLING_RATE_48KHZ;
+		break;
+	}
+
+	pr_debug("%s: control value = %ld, usb_audio_tx_sample_rate = %d\n",
+		__func__, ucontrol->value.integer.value[0],
+		usb_tx_cfg.sample_rate);
+	return 0;
+}
+
+static int usb_audio_tx_format_get(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	switch (usb_tx_cfg.bit_format) {
+	case SNDRV_PCM_FORMAT_S32_LE:
+		ucontrol->value.integer.value[0] = 3;
+		break;
+	case SNDRV_PCM_FORMAT_S24_3LE:
+		ucontrol->value.integer.value[0] = 2;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		ucontrol->value.integer.value[0] = 1;
+		break;
+	case SNDRV_PCM_FORMAT_S16_LE:
+	default:
+		ucontrol->value.integer.value[0] = 0;
+		break;
+	}
+
+	pr_debug("%s: usb_audio_tx_format = %d, ucontrol value = %ld\n",
+		 __func__, usb_tx_cfg.bit_format,
+		 ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int usb_audio_tx_format_put(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	int rc = 0;
+
+	switch (ucontrol->value.integer.value[0]) {
+	case 3:
+		usb_tx_cfg.bit_format = SNDRV_PCM_FORMAT_S32_LE;
+		break;
+	case 2:
+		usb_tx_cfg.bit_format = SNDRV_PCM_FORMAT_S24_3LE;
+		break;
+	case 1:
+		usb_tx_cfg.bit_format = SNDRV_PCM_FORMAT_S24_LE;
+		break;
+	case 0:
+	default:
+		usb_tx_cfg.bit_format = SNDRV_PCM_FORMAT_S16_LE;
+		break;
+	}
+	pr_debug("%s: usb_audio_tx_format = %d, ucontrol value = %ld\n",
+		 __func__, usb_tx_cfg.bit_format,
+		 ucontrol->value.integer.value[0]);
+
+	return rc;
+}
+
+static int ext_disp_get_port_idx(struct snd_kcontrol *kcontrol)
+{
+	int idx;
+
+	if (strnstr(kcontrol->id.name, "HDMI_RX", sizeof("HDMI_RX")))
+		idx = HDMI_RX_IDX;
+	else if (strnstr(kcontrol->id.name, "Display Port RX",
+			 sizeof("Display Port RX")))
+		idx = DP_RX_IDX;
+	else {
+		pr_err("%s: unsupported BE: %s",
+			__func__, kcontrol->id.name);
+		idx = -EINVAL;
+	}
+
+	return idx;
+}
+
+static int ext_disp_rx_format_get(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = ext_disp_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	switch (ext_disp_rx_cfg[idx].bit_format) {
+	case SNDRV_PCM_FORMAT_S24_LE:
+		ucontrol->value.integer.value[0] = 1;
+		break;
+
+	case SNDRV_PCM_FORMAT_S16_LE:
+	default:
+		ucontrol->value.integer.value[0] = 0;
+		break;
+	}
+
+	pr_debug("%s: ext_disp_rx[%d].format = %d, ucontrol value = %ld\n",
+		 __func__, idx, ext_disp_rx_cfg[idx].bit_format,
+		 ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int ext_disp_rx_format_put(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = ext_disp_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	switch (ucontrol->value.integer.value[0]) {
+	case 1:
+		ext_disp_rx_cfg[idx].bit_format = SNDRV_PCM_FORMAT_S24_LE;
+		break;
+	case 0:
+	default:
+		ext_disp_rx_cfg[idx].bit_format = SNDRV_PCM_FORMAT_S16_LE;
+		break;
+	}
+	pr_debug("%s: ext_disp_rx[%d].format = %d, ucontrol value = %ld\n",
+		 __func__, idx, ext_disp_rx_cfg[idx].bit_format,
+		 ucontrol->value.integer.value[0]);
+
+	return 0;
+}
+
+static int ext_disp_rx_ch_get(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = ext_disp_get_port_idx(kcontrol);
+
+	 if (idx < 0)
+		return idx;
+
+	ucontrol->value.integer.value[0] =
+			ext_disp_rx_cfg[idx].channels - 2;
+
+	pr_debug("%s: ext_disp_rx[%d].ch = %d\n", __func__,
+		 idx, ext_disp_rx_cfg[idx].channels);
+
+	return 0;
+}
+
+static int ext_disp_rx_ch_put(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = ext_disp_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	ext_disp_rx_cfg[idx].channels =
+			ucontrol->value.integer.value[0] + 2;
+
+	pr_debug("%s: ext_disp_rx[%d].ch = %d\n", __func__,
+		 idx, ext_disp_rx_cfg[idx].channels);
+	return 1;
+}
+
+static int ext_disp_rx_sample_rate_get(struct snd_kcontrol *kcontrol,
+				       struct snd_ctl_elem_value *ucontrol)
+{
+	int sample_rate_val;
+	int idx = ext_disp_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	switch (ext_disp_rx_cfg[idx].sample_rate) {
+	case SAMPLING_RATE_176P4KHZ:
+		sample_rate_val = 6;
+		break;
+
+	case SAMPLING_RATE_88P2KHZ:
+		sample_rate_val = 5;
+		break;
+
+	case SAMPLING_RATE_44P1KHZ:
+		sample_rate_val = 4;
+		break;
+
+	case SAMPLING_RATE_32KHZ:
+		sample_rate_val = 3;
+		break;
+
+	case SAMPLING_RATE_192KHZ:
+		sample_rate_val = 2;
+		break;
+
+	case SAMPLING_RATE_96KHZ:
+		sample_rate_val = 1;
+		break;
+
+	case SAMPLING_RATE_48KHZ:
+	default:
+		sample_rate_val = 0;
+		break;
+	}
+
+	ucontrol->value.integer.value[0] = sample_rate_val;
+	pr_debug("%s: ext_disp_rx[%d].sample_rate = %d\n", __func__,
+		 idx, ext_disp_rx_cfg[idx].sample_rate);
+
+	return 0;
+}
+
+static int ext_disp_rx_sample_rate_put(struct snd_kcontrol *kcontrol,
+				       struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = ext_disp_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	switch (ucontrol->value.integer.value[0]) {
+	case 6:
+		ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_176P4KHZ;
+		break;
+	case 5:
+		ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_88P2KHZ;
+		break;
+	case 4:
+		ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_44P1KHZ;
+		break;
+	case 3:
+		ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_32KHZ;
+		break;
+	case 2:
+		ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_192KHZ;
+		break;
+	case 1:
+		ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_96KHZ;
+		break;
+	case 0:
+	default:
+		ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_48KHZ;
+		break;
+	}
+
+	pr_debug("%s: control value = %ld, ext_disp_rx[%d].sample_rate = %d\n",
+		 __func__, ucontrol->value.integer.value[0], idx,
+		 ext_disp_rx_cfg[idx].sample_rate);
+	return 0;
+}
+
+static int proxy_rx_ch_get(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	pr_debug("%s: proxy_rx channels = %d\n",
+		 __func__, proxy_rx_cfg.channels);
+	ucontrol->value.integer.value[0] = proxy_rx_cfg.channels - 2;
+
+	return 0;
+}
+
+static int proxy_rx_ch_put(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	proxy_rx_cfg.channels = ucontrol->value.integer.value[0] + 2;
+	pr_debug("%s: proxy_rx channels = %d\n",
+		 __func__, proxy_rx_cfg.channels);
+
+	return 1;
+}
+
+static int tdm_get_sample_rate(int value)
+{
+	int sample_rate = 0;
+
+	switch (value) {
+	case 0:
+		sample_rate = SAMPLING_RATE_8KHZ;
+		break;
+	case 1:
+		sample_rate = SAMPLING_RATE_16KHZ;
+		break;
+	case 2:
+		sample_rate = SAMPLING_RATE_32KHZ;
+		break;
+	case 3:
+		sample_rate = SAMPLING_RATE_44P1KHZ;
+		break;
+	case 4:
+		sample_rate = SAMPLING_RATE_48KHZ;
+		break;
+	case 5:
+		sample_rate = SAMPLING_RATE_96KHZ;
+		break;
+	case 6:
+		sample_rate = SAMPLING_RATE_192KHZ;
+		break;
+	case 7:
+		sample_rate = SAMPLING_RATE_352P8KHZ;
+		break;
+	case 8:
+		sample_rate = SAMPLING_RATE_384KHZ;
+		break;
+	default:
+		sample_rate = SAMPLING_RATE_48KHZ;
+		break;
+	}
+	return sample_rate;
+}
+
+static int aux_pcm_get_sample_rate(int value)
+{
+	int sample_rate;
+
+	switch (value) {
+	case 1:
+		sample_rate = SAMPLING_RATE_16KHZ;
+		break;
+	case 0:
+	default:
+		sample_rate = SAMPLING_RATE_8KHZ;
+		break;
+	}
+	return sample_rate;
+}
+
+static int tdm_get_sample_rate_val(int sample_rate)
+{
+	int sample_rate_val = 0;
+
+	switch (sample_rate) {
+	case SAMPLING_RATE_8KHZ:
+		sample_rate_val = 0;
+		break;
+	case SAMPLING_RATE_16KHZ:
+		sample_rate_val = 1;
+		break;
+	case SAMPLING_RATE_32KHZ:
+		sample_rate_val = 2;
+		break;
+	case SAMPLING_RATE_44P1KHZ:
+		sample_rate_val = 3;
+		break;
+	case SAMPLING_RATE_48KHZ:
+		sample_rate_val = 4;
+		break;
+	case SAMPLING_RATE_96KHZ:
+		sample_rate_val = 5;
+		break;
+	case SAMPLING_RATE_192KHZ:
+		sample_rate_val = 6;
+		break;
+	case SAMPLING_RATE_352P8KHZ:
+		sample_rate_val = 7;
+		break;
+	case SAMPLING_RATE_384KHZ:
+		sample_rate_val = 8;
+		break;
+	default:
+		sample_rate_val = 4;
+		break;
+	}
+	return sample_rate_val;
+}
+
+static int aux_pcm_get_sample_rate_val(int sample_rate)
+{
+	int sample_rate_val;
+
+	switch (sample_rate) {
+	case SAMPLING_RATE_16KHZ:
+		sample_rate_val = 1;
+		break;
+	case SAMPLING_RATE_8KHZ:
+	default:
+		sample_rate_val = 0;
+		break;
+	}
+	return sample_rate_val;
+}
+
+static int tdm_get_port_idx(struct snd_kcontrol *kcontrol,
+			    struct tdm_port *port)
+{
+	if (port) {
+		if (strnstr(kcontrol->id.name, "PRI",
+		    sizeof(kcontrol->id.name))) {
+			port->mode = TDM_PRI;
+		} else if (strnstr(kcontrol->id.name, "SEC",
+		    sizeof(kcontrol->id.name))) {
+			port->mode = TDM_SEC;
+		} else if (strnstr(kcontrol->id.name, "TERT",
+		    sizeof(kcontrol->id.name))) {
+			port->mode = TDM_TERT;
+		} else if (strnstr(kcontrol->id.name, "QUAT",
+		    sizeof(kcontrol->id.name))) {
+			port->mode = TDM_QUAT;
+		} else {
+			pr_err("%s: unsupported mode in: %s",
+				__func__, kcontrol->id.name);
+			return -EINVAL;
+		}
+
+		if (strnstr(kcontrol->id.name, "RX_0",
+		    sizeof(kcontrol->id.name)) ||
+		    strnstr(kcontrol->id.name, "TX_0",
+		    sizeof(kcontrol->id.name))) {
+			port->channel = TDM_0;
+		} else if (strnstr(kcontrol->id.name, "RX_1",
+			   sizeof(kcontrol->id.name)) ||
+			   strnstr(kcontrol->id.name, "TX_1",
+			   sizeof(kcontrol->id.name))) {
+			port->channel = TDM_1;
+		} else if (strnstr(kcontrol->id.name, "RX_2",
+			   sizeof(kcontrol->id.name)) ||
+			   strnstr(kcontrol->id.name, "TX_2",
+			   sizeof(kcontrol->id.name))) {
+			port->channel = TDM_2;
+		} else if (strnstr(kcontrol->id.name, "RX_3",
+			   sizeof(kcontrol->id.name)) ||
+			   strnstr(kcontrol->id.name, "TX_3",
+			   sizeof(kcontrol->id.name))) {
+			port->channel = TDM_3;
+		} else if (strnstr(kcontrol->id.name, "RX_4",
+			   sizeof(kcontrol->id.name)) ||
+			   strnstr(kcontrol->id.name, "TX_4",
+			   sizeof(kcontrol->id.name))) {
+			port->channel = TDM_4;
+		} else if (strnstr(kcontrol->id.name, "RX_5",
+			   sizeof(kcontrol->id.name)) ||
+			   strnstr(kcontrol->id.name, "TX_5",
+			   sizeof(kcontrol->id.name))) {
+			port->channel = TDM_5;
+		} else if (strnstr(kcontrol->id.name, "RX_6",
+			   sizeof(kcontrol->id.name)) ||
+			   strnstr(kcontrol->id.name, "TX_6",
+			   sizeof(kcontrol->id.name))) {
+			port->channel = TDM_6;
+		} else if (strnstr(kcontrol->id.name, "RX_7",
+			   sizeof(kcontrol->id.name)) ||
+			   strnstr(kcontrol->id.name, "TX_7",
+			   sizeof(kcontrol->id.name))) {
+			port->channel = TDM_7;
+		} else {
+			pr_err("%s: unsupported channel in: %s",
+				__func__, kcontrol->id.name);
+			return -EINVAL;
+		}
+	} else
+		return -EINVAL;
+	return 0;
+}
+
+static int tdm_rx_sample_rate_get(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	struct tdm_port port;
+	int ret = tdm_get_port_idx(kcontrol, &port);
+
+	if (ret) {
+		pr_err("%s: unsupported control: %s",
+			__func__, kcontrol->id.name);
+	} else {
+		ucontrol->value.enumerated.item[0] = tdm_get_sample_rate_val(
+			tdm_rx_cfg[port.mode][port.channel].sample_rate);
+
+		pr_debug("%s: tdm_rx_sample_rate = %d, item = %d\n", __func__,
+			 tdm_rx_cfg[port.mode][port.channel].sample_rate,
+			 ucontrol->value.enumerated.item[0]);
+	}
+	return ret;
+}
+
+static int tdm_rx_sample_rate_put(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	struct tdm_port port;
+	int ret = tdm_get_port_idx(kcontrol, &port);
+
+	if (ret) {
+		pr_err("%s: unsupported control: %s",
+			__func__, kcontrol->id.name);
+	} else {
+		tdm_rx_cfg[port.mode][port.channel].sample_rate =
+			tdm_get_sample_rate(ucontrol->value.enumerated.item[0]);
+
+		pr_debug("%s: tdm_rx_sample_rate = %d, item = %d\n", __func__,
+			 tdm_rx_cfg[port.mode][port.channel].sample_rate,
+			 ucontrol->value.enumerated.item[0]);
+	}
+	return ret;
+}
+
+static int tdm_tx_sample_rate_get(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	struct tdm_port port;
+	int ret = tdm_get_port_idx(kcontrol, &port);
+
+	if (ret) {
+		pr_err("%s: unsupported control: %s",
+			__func__, kcontrol->id.name);
+	} else {
+		ucontrol->value.enumerated.item[0] = tdm_get_sample_rate_val(
+			tdm_tx_cfg[port.mode][port.channel].sample_rate);
+
+		pr_debug("%s: tdm_tx_sample_rate = %d, item = %d\n", __func__,
+			 tdm_tx_cfg[port.mode][port.channel].sample_rate,
+			 ucontrol->value.enumerated.item[0]);
+	}
+	return ret;
+}
+
+static int tdm_tx_sample_rate_put(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	struct tdm_port port;
+	int ret = tdm_get_port_idx(kcontrol, &port);
+
+	if (ret) {
+		pr_err("%s: unsupported control: %s",
+			__func__, kcontrol->id.name);
+	} else {
+		tdm_tx_cfg[port.mode][port.channel].sample_rate =
+			tdm_get_sample_rate(ucontrol->value.enumerated.item[0]);
+
+		pr_debug("%s: tdm_tx_sample_rate = %d, item = %d\n", __func__,
+			 tdm_tx_cfg[port.mode][port.channel].sample_rate,
+			 ucontrol->value.enumerated.item[0]);
+	}
+	return ret;
+}
+
+static int tdm_get_format(int value)
+{
+	int format = 0;
+
+	switch (value) {
+	case 0:
+		format = SNDRV_PCM_FORMAT_S16_LE;
+		break;
+	case 1:
+		format = SNDRV_PCM_FORMAT_S24_LE;
+		break;
+	case 2:
+		format = SNDRV_PCM_FORMAT_S32_LE;
+		break;
+	default:
+		format = SNDRV_PCM_FORMAT_S16_LE;
+		break;
+	}
+	return format;
+}
+
+static int tdm_get_format_val(int format)
+{
+	int value = 0;
+
+	switch (format) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		value = 0;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		value = 1;
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		value = 2;
+		break;
+	default:
+		value = 0;
+		break;
+	}
+	return value;
+}
+
+static int tdm_rx_format_get(struct snd_kcontrol *kcontrol,
+			     struct snd_ctl_elem_value *ucontrol)
+{
+	struct tdm_port port;
+	int ret = tdm_get_port_idx(kcontrol, &port);
+
+	if (ret) {
+		pr_err("%s: unsupported control: %s",
+			__func__, kcontrol->id.name);
+	} else {
+		ucontrol->value.enumerated.item[0] = tdm_get_format_val(
+				tdm_rx_cfg[port.mode][port.channel].bit_format);
+
+		pr_debug("%s: tdm_rx_bit_format = %d, item = %d\n", __func__,
+			 tdm_rx_cfg[port.mode][port.channel].bit_format,
+			 ucontrol->value.enumerated.item[0]);
+	}
+	return ret;
+}
+
+static int tdm_rx_format_put(struct snd_kcontrol *kcontrol,
+			     struct snd_ctl_elem_value *ucontrol)
+{
+	struct tdm_port port;
+	int ret = tdm_get_port_idx(kcontrol, &port);
+
+	if (ret) {
+		pr_err("%s: unsupported control: %s",
+			__func__, kcontrol->id.name);
+	} else {
+		tdm_rx_cfg[port.mode][port.channel].bit_format =
+			tdm_get_format(ucontrol->value.enumerated.item[0]);
+
+		pr_debug("%s: tdm_rx_bit_format = %d, item = %d\n", __func__,
+			 tdm_rx_cfg[port.mode][port.channel].bit_format,
+			 ucontrol->value.enumerated.item[0]);
+	}
+	return ret;
+}
+
+static int tdm_tx_format_get(struct snd_kcontrol *kcontrol,
+			     struct snd_ctl_elem_value *ucontrol)
+{
+	struct tdm_port port;
+	int ret = tdm_get_port_idx(kcontrol, &port);
+
+	if (ret) {
+		pr_err("%s: unsupported control: %s",
+			__func__, kcontrol->id.name);
+	} else {
+		ucontrol->value.enumerated.item[0] = tdm_get_format_val(
+				tdm_tx_cfg[port.mode][port.channel].bit_format);
+
+		pr_debug("%s: tdm_tx_bit_format = %d, item = %d\n", __func__,
+			 tdm_tx_cfg[port.mode][port.channel].bit_format,
+			 ucontrol->value.enumerated.item[0]);
+	}
+	return ret;
+}
+
+static int tdm_tx_format_put(struct snd_kcontrol *kcontrol,
+			     struct snd_ctl_elem_value *ucontrol)
+{
+	struct tdm_port port;
+	int ret = tdm_get_port_idx(kcontrol, &port);
+
+	if (ret) {
+		pr_err("%s: unsupported control: %s",
+			__func__, kcontrol->id.name);
+	} else {
+		tdm_tx_cfg[port.mode][port.channel].bit_format =
+			tdm_get_format(ucontrol->value.enumerated.item[0]);
+
+		pr_debug("%s: tdm_tx_bit_format = %d, item = %d\n", __func__,
+			 tdm_tx_cfg[port.mode][port.channel].bit_format,
+			 ucontrol->value.enumerated.item[0]);
+	}
+	return ret;
+}
+
+static int tdm_rx_ch_get(struct snd_kcontrol *kcontrol,
+			 struct snd_ctl_elem_value *ucontrol)
+{
+	struct tdm_port port;
+	int ret = tdm_get_port_idx(kcontrol, &port);
+
+	if (ret) {
+		pr_err("%s: unsupported control: %s",
+			__func__, kcontrol->id.name);
+	} else {
+
+		ucontrol->value.enumerated.item[0] =
+			tdm_rx_cfg[port.mode][port.channel].channels - 1;
+
+		pr_debug("%s: tdm_rx_ch = %d, item = %d\n", __func__,
+			 tdm_rx_cfg[port.mode][port.channel].channels - 1,
+			 ucontrol->value.enumerated.item[0]);
+	}
+	return ret;
+}
+
+static int tdm_rx_ch_put(struct snd_kcontrol *kcontrol,
+			 struct snd_ctl_elem_value *ucontrol)
+{
+	struct tdm_port port;
+	int ret = tdm_get_port_idx(kcontrol, &port);
+
+	if (ret) {
+		pr_err("%s: unsupported control: %s",
+			__func__, kcontrol->id.name);
+	} else {
+		tdm_rx_cfg[port.mode][port.channel].channels =
+			ucontrol->value.enumerated.item[0] + 1;
+
+		pr_debug("%s: tdm_rx_ch = %d, item = %d\n", __func__,
+			 tdm_rx_cfg[port.mode][port.channel].channels,
+			 ucontrol->value.enumerated.item[0] + 1);
+	}
+	return ret;
+}
+
+static int tdm_tx_ch_get(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	struct tdm_port port;
+	int ret = tdm_get_port_idx(kcontrol, &port);
+
+	if (ret) {
+		pr_err("%s: unsupported control: %s",
+			__func__, kcontrol->id.name);
+	} else {
+		ucontrol->value.enumerated.item[0] =
+			tdm_tx_cfg[port.mode][port.channel].channels - 1;
+
+		pr_debug("%s: tdm_tx_ch = %d, item = %d\n", __func__,
+			 tdm_tx_cfg[port.mode][port.channel].channels - 1,
+			 ucontrol->value.enumerated.item[0]);
+	}
+	return ret;
+}
+
+static int tdm_tx_ch_put(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	struct tdm_port port;
+	int ret = tdm_get_port_idx(kcontrol, &port);
+
+	if (ret) {
+		pr_err("%s: unsupported control: %s",
+			__func__, kcontrol->id.name);
+	} else {
+		tdm_tx_cfg[port.mode][port.channel].channels =
+			ucontrol->value.enumerated.item[0] + 1;
+
+		pr_debug("%s: tdm_tx_ch = %d, item = %d\n", __func__,
+			 tdm_tx_cfg[port.mode][port.channel].channels,
+			 ucontrol->value.enumerated.item[0] + 1);
+	}
+	return ret;
+}
+
+static int aux_pcm_get_port_idx(struct snd_kcontrol *kcontrol)
+{
+	int idx;
+
+	if (strnstr(kcontrol->id.name, "PRIM_AUX_PCM",
+		    sizeof("PRIM_AUX_PCM")))
+		idx = PRIM_AUX_PCM;
+	else if (strnstr(kcontrol->id.name, "SEC_AUX_PCM",
+			 sizeof("SEC_AUX_PCM")))
+		idx = SEC_AUX_PCM;
+	else if (strnstr(kcontrol->id.name, "TERT_AUX_PCM",
+			 sizeof("TERT_AUX_PCM")))
+		idx = TERT_AUX_PCM;
+	else if (strnstr(kcontrol->id.name, "QUAT_AUX_PCM",
+			 sizeof("QUAT_AUX_PCM")))
+		idx = QUAT_AUX_PCM;
+	else {
+		pr_err("%s: unsupported port: %s",
+			__func__, kcontrol->id.name);
+		idx = -EINVAL;
+	}
+
+	return idx;
+}
+
+static int aux_pcm_rx_sample_rate_put(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = aux_pcm_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	aux_pcm_rx_cfg[idx].sample_rate =
+		aux_pcm_get_sample_rate(ucontrol->value.enumerated.item[0]);
+
+	pr_debug("%s: idx[%d]_rx_sample_rate = %d, item = %d\n", __func__,
+		 idx, aux_pcm_rx_cfg[idx].sample_rate,
+		 ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int aux_pcm_rx_sample_rate_get(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = aux_pcm_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	ucontrol->value.enumerated.item[0] =
+	     aux_pcm_get_sample_rate_val(aux_pcm_rx_cfg[idx].sample_rate);
+
+	pr_debug("%s: idx[%d]_rx_sample_rate = %d, item = %d\n", __func__,
+		 idx, aux_pcm_rx_cfg[idx].sample_rate,
+		 ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int aux_pcm_tx_sample_rate_put(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = aux_pcm_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	aux_pcm_tx_cfg[idx].sample_rate =
+		aux_pcm_get_sample_rate(ucontrol->value.enumerated.item[0]);
+
+	pr_debug("%s: idx[%d]_tx_sample_rate = %d, item = %d\n", __func__,
+		 idx, aux_pcm_tx_cfg[idx].sample_rate,
+		 ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int aux_pcm_tx_sample_rate_get(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = aux_pcm_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	ucontrol->value.enumerated.item[0] =
+	     aux_pcm_get_sample_rate_val(aux_pcm_tx_cfg[idx].sample_rate);
+
+	pr_debug("%s: idx[%d]_tx_sample_rate = %d, item = %d\n", __func__,
+		 idx, aux_pcm_tx_cfg[idx].sample_rate,
+		 ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int mi2s_get_port_idx(struct snd_kcontrol *kcontrol)
+{
+	int idx;
+
+	if (strnstr(kcontrol->id.name, "PRIM_MI2S_RX",
+	    sizeof("PRIM_MI2S_RX")))
+		idx = PRIM_MI2S;
+	else if (strnstr(kcontrol->id.name, "SEC_MI2S_RX",
+		 sizeof("SEC_MI2S_RX")))
+		idx = SEC_MI2S;
+	else if (strnstr(kcontrol->id.name, "TERT_MI2S_RX",
+		 sizeof("TERT_MI2S_RX")))
+		idx = TERT_MI2S;
+	else if (strnstr(kcontrol->id.name, "QUAT_MI2S_RX",
+		 sizeof("QUAT_MI2S_RX")))
+		idx = QUAT_MI2S;
+	else if (strnstr(kcontrol->id.name, "PRIM_MI2S_TX",
+		 sizeof("PRIM_MI2S_TX")))
+		idx = PRIM_MI2S;
+	else if (strnstr(kcontrol->id.name, "SEC_MI2S_TX",
+		 sizeof("SEC_MI2S_TX")))
+		idx = SEC_MI2S;
+	else if (strnstr(kcontrol->id.name, "TERT_MI2S_TX",
+		 sizeof("TERT_MI2S_TX")))
+		idx = TERT_MI2S;
+	else if (strnstr(kcontrol->id.name, "QUAT_MI2S_TX",
+		 sizeof("QUAT_MI2S_TX")))
+		idx = QUAT_MI2S;
+	else {
+		pr_err("%s: unsupported channel: %s",
+			__func__, kcontrol->id.name);
+		idx = -EINVAL;
+	}
+
+	return idx;
+}
+
+static int mi2s_get_sample_rate_val(int sample_rate)
+{
+	int sample_rate_val;
+
+	switch (sample_rate) {
+	case SAMPLING_RATE_8KHZ:
+		sample_rate_val = 0;
+		break;
+	case SAMPLING_RATE_16KHZ:
+		sample_rate_val = 1;
+		break;
+	case SAMPLING_RATE_32KHZ:
+		sample_rate_val = 2;
+		break;
+	case SAMPLING_RATE_44P1KHZ:
+		sample_rate_val = 3;
+		break;
+	case SAMPLING_RATE_48KHZ:
+		sample_rate_val = 4;
+		break;
+	case SAMPLING_RATE_88P2KHZ:
+		sample_rate_val = 5;
+		break;
+	case SAMPLING_RATE_96KHZ:
+		sample_rate_val = 6;
+		break;
+	case SAMPLING_RATE_176P4KHZ:
+		sample_rate_val = 7;
+		break;
+	case SAMPLING_RATE_192KHZ:
+		sample_rate_val = 8;
+		break;
+	default:
+		sample_rate_val = 4;
+		break;
+	}
+	return sample_rate_val;
+}
+
+static int mi2s_get_sample_rate(int value)
+{
+	int sample_rate;
+
+	switch (value) {
+	case 0:
+		sample_rate = SAMPLING_RATE_8KHZ;
+		break;
+	case 1:
+		sample_rate = SAMPLING_RATE_16KHZ;
+		break;
+	case 2:
+		sample_rate = SAMPLING_RATE_32KHZ;
+		break;
+	case 3:
+		sample_rate = SAMPLING_RATE_44P1KHZ;
+		break;
+	case 4:
+		sample_rate = SAMPLING_RATE_48KHZ;
+		break;
+	case 5:
+		sample_rate = SAMPLING_RATE_88P2KHZ;
+		break;
+	case 6:
+		sample_rate = SAMPLING_RATE_96KHZ;
+		break;
+	case 7:
+		sample_rate = SAMPLING_RATE_176P4KHZ;
+		break;
+	case 8:
+		sample_rate = SAMPLING_RATE_192KHZ;
+		break;
+	default:
+		sample_rate = SAMPLING_RATE_48KHZ;
+		break;
+	}
+	return sample_rate;
+}
+
+static int mi2s_get_format(int value)
+{
+	int format;
+
+	switch (value) {
+	case 0:
+		format = SNDRV_PCM_FORMAT_S16_LE;
+		break;
+	case 1:
+		format = SNDRV_PCM_FORMAT_S24_LE;
+		break;
+	case 2:
+		format = SNDRV_PCM_FORMAT_S24_3LE;
+		break;
+	case 3:
+		format = SNDRV_PCM_FORMAT_S32_LE;
+		break;
+	default:
+		format = SNDRV_PCM_FORMAT_S16_LE;
+		break;
+	}
+	return format;
+}
+
+static int mi2s_get_format_value(int format)
+{
+	int value;
+
+	switch (format) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		value = 0;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		value = 1;
+		break;
+	case SNDRV_PCM_FORMAT_S24_3LE:
+		value = 2;
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		value = 3;
+		break;
+	default:
+		value = 0;
+		break;
+	}
+	return value;
+}
+
+static int mi2s_rx_sample_rate_put(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = mi2s_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	mi2s_rx_cfg[idx].sample_rate =
+		mi2s_get_sample_rate(ucontrol->value.enumerated.item[0]);
+
+	pr_debug("%s: idx[%d]_rx_sample_rate = %d, item = %d\n", __func__,
+		 idx, mi2s_rx_cfg[idx].sample_rate,
+		 ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int mi2s_rx_sample_rate_get(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = mi2s_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	ucontrol->value.enumerated.item[0] =
+		mi2s_get_sample_rate_val(mi2s_rx_cfg[idx].sample_rate);
+
+	pr_debug("%s: idx[%d]_rx_sample_rate = %d, item = %d\n", __func__,
+		 idx, mi2s_rx_cfg[idx].sample_rate,
+		 ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int mi2s_tx_sample_rate_put(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = mi2s_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	mi2s_tx_cfg[idx].sample_rate =
+		mi2s_get_sample_rate(ucontrol->value.enumerated.item[0]);
+
+	pr_debug("%s: idx[%d]_tx_sample_rate = %d, item = %d\n", __func__,
+		 idx, mi2s_tx_cfg[idx].sample_rate,
+		 ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int mi2s_tx_sample_rate_get(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = mi2s_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	ucontrol->value.enumerated.item[0] =
+		mi2s_get_sample_rate_val(mi2s_tx_cfg[idx].sample_rate);
+
+	pr_debug("%s: idx[%d]_tx_sample_rate = %d, item = %d\n", __func__,
+		 idx, mi2s_tx_cfg[idx].sample_rate,
+		 ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int msm_mi2s_rx_ch_get(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = mi2s_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	pr_debug("%s: msm_mi2s_[%d]_rx_ch  = %d\n", __func__,
+		 idx, mi2s_rx_cfg[idx].channels);
+	ucontrol->value.enumerated.item[0] = mi2s_rx_cfg[idx].channels - 1;
+
+	return 0;
+}
+
+static int msm_mi2s_rx_ch_put(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = mi2s_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	mi2s_rx_cfg[idx].channels = ucontrol->value.enumerated.item[0] + 1;
+	pr_debug("%s: msm_mi2s_[%d]_rx_ch  = %d\n", __func__,
+		 idx, mi2s_rx_cfg[idx].channels);
+
+	return 1;
+}
+
+static int msm_mi2s_tx_ch_get(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = mi2s_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	pr_debug("%s: msm_mi2s_[%d]_tx_ch  = %d\n", __func__,
+		 idx, mi2s_tx_cfg[idx].channels);
+	ucontrol->value.enumerated.item[0] = mi2s_tx_cfg[idx].channels - 1;
+
+	return 0;
+}
+
+static int msm_mi2s_tx_ch_put(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = mi2s_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	mi2s_tx_cfg[idx].channels = ucontrol->value.enumerated.item[0] + 1;
+	pr_debug("%s: msm_mi2s_[%d]_tx_ch  = %d\n", __func__,
+		 idx, mi2s_tx_cfg[idx].channels);
+
+	return 1;
+}
+
+static int msm_mi2s_rx_format_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = mi2s_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	ucontrol->value.enumerated.item[0] =
+		mi2s_get_format_value(mi2s_rx_cfg[idx].bit_format);
+
+	pr_debug("%s: idx[%d]_rx_format = %d, item = %d\n", __func__,
+		idx, mi2s_rx_cfg[idx].bit_format,
+		ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int msm_mi2s_rx_format_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = mi2s_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	mi2s_rx_cfg[idx].bit_format =
+		mi2s_get_format(ucontrol->value.enumerated.item[0]);
+
+	pr_debug("%s: idx[%d]_rx_format = %d, item = %d\n", __func__,
+		  idx, mi2s_rx_cfg[idx].bit_format,
+		  ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int msm_mi2s_tx_format_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = mi2s_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	ucontrol->value.enumerated.item[0] =
+		mi2s_get_format_value(mi2s_tx_cfg[idx].bit_format);
+
+	pr_debug("%s: idx[%d]_tx_format = %d, item = %d\n", __func__,
+		idx, mi2s_tx_cfg[idx].bit_format,
+		ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int msm_mi2s_tx_format_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = mi2s_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	mi2s_tx_cfg[idx].bit_format =
+		mi2s_get_format(ucontrol->value.enumerated.item[0]);
+
+	pr_debug("%s: idx[%d]_tx_format = %d, item = %d\n", __func__,
+		  idx, mi2s_tx_cfg[idx].bit_format,
+		  ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int msm_hifi_ctrl(struct snd_soc_codec *codec)
+{
+	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+	struct snd_soc_card *card = codec->component.card;
+	struct msm_asoc_mach_data *pdata =
+				snd_soc_card_get_drvdata(card);
+
+	pr_debug("%s: msm_hifi_control = %d", __func__,
+		 msm_hifi_control);
+
+	if (!pdata || !pdata->hph_en1_gpio_p) {
+		pr_err("%s: hph_en1_gpio is invalid\n", __func__);
+		return -EINVAL;
+	}
+	if (msm_hifi_control == MSM_HIFI_ON) {
+		msm_cdc_pinctrl_select_active_state(pdata->hph_en1_gpio_p);
+		/* 5msec delay needed as per HW requirement */
+		usleep_range(5000, 5010);
+	} else {
+		msm_cdc_pinctrl_select_sleep_state(pdata->hph_en1_gpio_p);
+	}
+	snd_soc_dapm_sync(dapm);
+
+	return 0;
+}
+
+static int msm_hifi_get(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	pr_debug("%s: msm_hifi_control = %d\n",
+		 __func__, msm_hifi_control);
+	ucontrol->value.integer.value[0] = msm_hifi_control;
+
+	return 0;
+}
+
+static int msm_hifi_put(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+
+	pr_debug("%s() ucontrol->value.integer.value[0] = %ld\n",
+		 __func__, ucontrol->value.integer.value[0]);
+
+	msm_hifi_control = ucontrol->value.integer.value[0];
+	msm_hifi_ctrl(codec);
+
+	return 0;
+}
+
+static inline struct snd_soc_pcm_runtime *get_wm8804_runtime(
+	struct snd_soc_card *card) {
+	return snd_soc_get_pcm_runtime(card, LPASS_BE_TERT_MI2S_TX);
+}
+
+static int fbx_wm8804_spdif_info(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
+	uinfo->count = 1;
+	return 0;
+}
+
+static int fbx_wm8804_spdif_capture_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
+	struct snd_soc_codec *wm8804_codec = get_wm8804_runtime(card)->codec;
+	unsigned int mask;
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		mask = (i == 3) ? 0x3f : 0xff;
+		ucontrol->value.iec958.status[i] =
+			snd_soc_read(wm8804_codec, WM8804_RXCHAN1 + i) & mask;
+	}
+
+	return 0;
+}
+
+static int fbx_wm8804_status_flag_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
+	struct snd_soc_codec *wm8804_codec = get_wm8804_runtime(card)->codec;
+
+	unsigned int bit = kcontrol->private_value & 0xff;
+	unsigned int reg = (kcontrol->private_value >> 8) & 0xff;
+	unsigned int invert = (kcontrol->private_value >> 16) & 0xff;
+
+	bool flag = snd_soc_read(wm8804_codec, reg) & (1 << bit);
+
+	ucontrol->value.integer.value[0] = invert ? !flag : flag;
+
+	return 0;
+}
+
+static int fbx_wm8804_always_valid_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
+	struct snd_soc_codec *wm8804_codec = get_wm8804_runtime(card)->codec;
+
+	ucontrol->value.integer.value[0] =
+		snd_soc_read(wm8804_codec, WM8804_PLL6) & BIT(6);
+
+	return 0;
+}
+
+static int fbx_wm8804_always_valid_set(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
+	struct snd_soc_codec *wm8804_codec = get_wm8804_runtime(card)->codec;
+
+	snd_soc_update_bits(wm8804_codec, WM8804_PLL6, BIT(6),
+			    ucontrol->value.integer.value[0] << 6);
+
+	return 0;
+}
+
+static const char * const recovered_frequency_texts[] = {
+	"176.4/192 kHz",
+	"88.2/96 kHz",
+	"44.1/48 kHz",
+	"32 kHz"
+};
+
+#define NUM_RECOVERED_FREQUENCIES \
+	ARRAY_SIZE(recovered_frequency_texts)
+
+static int fbx_wm8804_recovered_frequency_info(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+	uinfo->count = 1;
+	uinfo->value.enumerated.items = NUM_RECOVERED_FREQUENCIES;
+	if (uinfo->value.enumerated.item >= NUM_RECOVERED_FREQUENCIES)
+		uinfo->value.enumerated.item = NUM_RECOVERED_FREQUENCIES - 1;
+	strcpy(uinfo->value.enumerated.name,
+		recovered_frequency_texts[uinfo->value.enumerated.item]);
+	return 0;
+}
+
+static int fbx_wm8804_recovered_frequency_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
+	struct snd_soc_pcm_runtime *rtd = get_wm8804_runtime(card);
+	struct snd_soc_codec *wm8804_codec;
+
+	if (!rtd) {
+		dev_err(card->dev,
+			"%s: snd_soc_get_pcm_runtime for wm8804 failed!\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	wm8804_codec = rtd->codec;
+
+	ucontrol->value.enumerated.item[0] =
+		(snd_soc_read(wm8804_codec, WM8804_SPDSTAT) >> 4) & 0x03;
+	return 0;
+}
+
+static const struct snd_kcontrol_new fbx_wm8804_controls[] = {
+	{
+		.access =  SNDRV_CTL_ELEM_ACCESS_READ
+			   | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+		.iface =   SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name =    SNDRV_CTL_NAME_IEC958("", CAPTURE, DEFAULT),
+		.info =    fbx_wm8804_spdif_info,
+		.get =     fbx_wm8804_spdif_capture_get,
+	},
+
+	{
+		.access =  SNDRV_CTL_ELEM_ACCESS_READ
+			   | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+		.iface =   SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name =    SNDRV_CTL_NAME_IEC958("", CAPTURE, NONE)
+				"Recovered Frequency",
+		.info =    fbx_wm8804_recovered_frequency_info,
+		.get =     fbx_wm8804_recovered_frequency_get,
+	},
+
+#define SPDIF_FLAG_CTRL(desc, reg, bit, invert) \
+{ \
+		.access =  SNDRV_CTL_ELEM_ACCESS_READ \
+			   | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
+		.iface =   SNDRV_CTL_ELEM_IFACE_MIXER, \
+		.name =    SNDRV_CTL_NAME_IEC958("", CAPTURE, NONE) \
+				desc " Flag", \
+		.info =    snd_ctl_boolean_mono_info, \
+		.get =     fbx_wm8804_status_flag_get, \
+		.private_value = \
+			(bit) | ((reg) << 8) | ((invert) << 16) \
+}
+
+	SPDIF_FLAG_CTRL("Audio", WM8804_SPDSTAT, 0, 1),
+	SPDIF_FLAG_CTRL("Non-PCM", WM8804_SPDSTAT, 1, 0),
+	SPDIF_FLAG_CTRL("Copyright", WM8804_SPDSTAT, 2, 1),
+	SPDIF_FLAG_CTRL("De-Emphasis", WM8804_SPDSTAT, 3, 0),
+	SPDIF_FLAG_CTRL("Lock", WM8804_SPDSTAT, 6, 1),
+	SPDIF_FLAG_CTRL("Invalid", WM8804_INTSTAT, 1, 0),
+	SPDIF_FLAG_CTRL("TransErr", WM8804_INTSTAT, 3, 0),
+
+	{
+		.access =  SNDRV_CTL_ELEM_ACCESS_READWRITE
+			   | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+		.iface =   SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name =    SNDRV_CTL_NAME_IEC958("", CAPTURE, NONE)
+				"Always Valid",
+		.info =    snd_ctl_boolean_mono_info,
+		.get =     fbx_wm8804_always_valid_get,
+		.put =     fbx_wm8804_always_valid_set,
+	},
+};
+
+static const struct snd_kcontrol_new msm_snd_controls[] = {
+	SOC_ENUM_EXT("SLIM_0_RX Channels", slim_0_rx_chs,
+			msm_slim_rx_ch_get, msm_slim_rx_ch_put),
+	SOC_ENUM_EXT("SLIM_2_RX Channels", slim_2_rx_chs,
+			msm_slim_rx_ch_get, msm_slim_rx_ch_put),
+	SOC_ENUM_EXT("SLIM_0_TX Channels", slim_0_tx_chs,
+			msm_slim_tx_ch_get, msm_slim_tx_ch_put),
+	SOC_ENUM_EXT("SLIM_1_TX Channels", slim_1_tx_chs,
+			msm_slim_tx_ch_get, msm_slim_tx_ch_put),
+	SOC_ENUM_EXT("SLIM_5_RX Channels", slim_5_rx_chs,
+			msm_slim_rx_ch_get, msm_slim_rx_ch_put),
+	SOC_ENUM_EXT("SLIM_6_RX Channels", slim_6_rx_chs,
+			msm_slim_rx_ch_get, msm_slim_rx_ch_put),
+	SOC_ENUM_EXT("VI_FEED_TX Channels", vi_feed_tx_chs,
+			msm_vi_feed_tx_ch_get, msm_vi_feed_tx_ch_put),
+	SOC_ENUM_EXT("USB_AUDIO_RX Channels", usb_rx_chs,
+			usb_audio_rx_ch_get, usb_audio_rx_ch_put),
+	SOC_ENUM_EXT("USB_AUDIO_TX Channels", usb_tx_chs,
+			usb_audio_tx_ch_get, usb_audio_tx_ch_put),
+	SOC_ENUM_EXT("HDMI_RX Channels", ext_disp_rx_chs,
+			ext_disp_rx_ch_get, ext_disp_rx_ch_put),
+	SOC_ENUM_EXT("Display Port RX Channels", ext_disp_rx_chs,
+			ext_disp_rx_ch_get, ext_disp_rx_ch_put),
+	SOC_ENUM_EXT("PROXY_RX Channels", proxy_rx_chs,
+			proxy_rx_ch_get, proxy_rx_ch_put),
+	SOC_ENUM_EXT("SLIM_0_RX Format", slim_0_rx_format,
+			slim_rx_bit_format_get, slim_rx_bit_format_put),
+	SOC_ENUM_EXT("SLIM_5_RX Format", slim_5_rx_format,
+			slim_rx_bit_format_get, slim_rx_bit_format_put),
+	SOC_ENUM_EXT("SLIM_6_RX Format", slim_6_rx_format,
+			slim_rx_bit_format_get, slim_rx_bit_format_put),
+	SOC_ENUM_EXT("SLIM_0_TX Format", slim_0_tx_format,
+			slim_tx_bit_format_get, slim_tx_bit_format_put),
+	SOC_ENUM_EXT("USB_AUDIO_RX Format", usb_rx_format,
+			usb_audio_rx_format_get, usb_audio_rx_format_put),
+	SOC_ENUM_EXT("USB_AUDIO_TX Format", usb_tx_format,
+			usb_audio_tx_format_get, usb_audio_tx_format_put),
+	SOC_ENUM_EXT("HDMI_RX Bit Format", ext_disp_rx_format,
+			ext_disp_rx_format_get, ext_disp_rx_format_put),
+	SOC_ENUM_EXT("Display Port RX Bit Format", ext_disp_rx_format,
+			ext_disp_rx_format_get, ext_disp_rx_format_put),
+	SOC_ENUM_EXT("SLIM_0_RX SampleRate", slim_0_rx_sample_rate,
+			slim_rx_sample_rate_get, slim_rx_sample_rate_put),
+	SOC_ENUM_EXT("SLIM_2_RX SampleRate", slim_2_rx_sample_rate,
+			slim_rx_sample_rate_get, slim_rx_sample_rate_put),
+	SOC_ENUM_EXT("SLIM_0_TX SampleRate", slim_0_tx_sample_rate,
+			slim_tx_sample_rate_get, slim_tx_sample_rate_put),
+	SOC_ENUM_EXT("SLIM_5_RX SampleRate", slim_5_rx_sample_rate,
+			slim_rx_sample_rate_get, slim_rx_sample_rate_put),
+	SOC_ENUM_EXT("SLIM_6_RX SampleRate", slim_6_rx_sample_rate,
+			slim_rx_sample_rate_get, slim_rx_sample_rate_put),
+	SOC_ENUM_EXT("BT SampleRate", bt_sample_rate,
+			msm_bt_sample_rate_get,
+			msm_bt_sample_rate_put),
+	SOC_ENUM_EXT("USB_AUDIO_RX SampleRate", usb_rx_sample_rate,
+			usb_audio_rx_sample_rate_get,
+			usb_audio_rx_sample_rate_put),
+	SOC_ENUM_EXT("USB_AUDIO_TX SampleRate", usb_tx_sample_rate,
+			usb_audio_tx_sample_rate_get,
+			usb_audio_tx_sample_rate_put),
+	SOC_ENUM_EXT("HDMI_RX SampleRate", ext_disp_rx_sample_rate,
+			ext_disp_rx_sample_rate_get,
+			ext_disp_rx_sample_rate_put),
+	SOC_ENUM_EXT("Display Port RX SampleRate", ext_disp_rx_sample_rate,
+			ext_disp_rx_sample_rate_get,
+			ext_disp_rx_sample_rate_put),
+	SOC_ENUM_EXT("PRI_TDM_RX_0 SampleRate", tdm_rx_sample_rate,
+			tdm_rx_sample_rate_get,
+			tdm_rx_sample_rate_put),
+	SOC_ENUM_EXT("PRI_TDM_TX_0 SampleRate", tdm_tx_sample_rate,
+			tdm_tx_sample_rate_get,
+			tdm_tx_sample_rate_put),
+	SOC_ENUM_EXT("PRI_TDM_RX_0 Format", tdm_rx_format,
+			tdm_rx_format_get,
+			tdm_rx_format_put),
+	SOC_ENUM_EXT("PRI_TDM_TX_0 Format", tdm_tx_format,
+			tdm_tx_format_get,
+			tdm_tx_format_put),
+	SOC_ENUM_EXT("PRI_TDM_RX_0 Channels", tdm_rx_chs,
+			tdm_rx_ch_get,
+			tdm_rx_ch_put),
+	SOC_ENUM_EXT("PRI_TDM_TX_0 Channels", tdm_tx_chs,
+			tdm_tx_ch_get,
+			tdm_tx_ch_put),
+	SOC_ENUM_EXT("SEC_TDM_RX_0 SampleRate", tdm_rx_sample_rate,
+			tdm_rx_sample_rate_get,
+			tdm_rx_sample_rate_put),
+	SOC_ENUM_EXT("SEC_TDM_TX_0 SampleRate", tdm_tx_sample_rate,
+			tdm_tx_sample_rate_get,
+			tdm_tx_sample_rate_put),
+	SOC_ENUM_EXT("SEC_TDM_RX_0 Format", tdm_rx_format,
+			tdm_rx_format_get,
+			tdm_rx_format_put),
+	SOC_ENUM_EXT("SEC_TDM_TX_0 Format", tdm_tx_format,
+			tdm_tx_format_get,
+			tdm_tx_format_put),
+	SOC_ENUM_EXT("SEC_TDM_RX_0 Channels", tdm_rx_chs,
+			tdm_rx_ch_get,
+			tdm_rx_ch_put),
+	SOC_ENUM_EXT("SEC_TDM_TX_0 Channels", tdm_tx_chs,
+			tdm_tx_ch_get,
+			tdm_tx_ch_put),
+	SOC_ENUM_EXT("TERT_TDM_RX_0 SampleRate", tdm_rx_sample_rate,
+			tdm_rx_sample_rate_get,
+			tdm_rx_sample_rate_put),
+	SOC_ENUM_EXT("TERT_TDM_TX_0 SampleRate", tdm_tx_sample_rate,
+			tdm_tx_sample_rate_get,
+			tdm_tx_sample_rate_put),
+	SOC_ENUM_EXT("TERT_TDM_RX_0 Format", tdm_rx_format,
+			tdm_rx_format_get,
+			tdm_rx_format_put),
+	SOC_ENUM_EXT("TERT_TDM_TX_0 Format", tdm_tx_format,
+			tdm_tx_format_get,
+			tdm_tx_format_put),
+	SOC_ENUM_EXT("TERT_TDM_RX_0 Channels", tdm_rx_chs,
+			tdm_rx_ch_get,
+			tdm_rx_ch_put),
+	SOC_ENUM_EXT("TERT_TDM_TX_0 Channels", tdm_tx_chs,
+			tdm_tx_ch_get,
+			tdm_tx_ch_put),
+	SOC_ENUM_EXT("QUAT_TDM_RX_0 SampleRate", tdm_rx_sample_rate,
+			tdm_rx_sample_rate_get,
+			tdm_rx_sample_rate_put),
+	SOC_ENUM_EXT("QUAT_TDM_TX_0 SampleRate", tdm_tx_sample_rate,
+			tdm_tx_sample_rate_get,
+			tdm_tx_sample_rate_put),
+	SOC_ENUM_EXT("QUAT_TDM_RX_0 Format", tdm_rx_format,
+			tdm_rx_format_get,
+			tdm_rx_format_put),
+	SOC_ENUM_EXT("QUAT_TDM_TX_0 Format", tdm_tx_format,
+			tdm_tx_format_get,
+			tdm_tx_format_put),
+	SOC_ENUM_EXT("QUAT_TDM_RX_0 Channels", tdm_rx_chs,
+			tdm_rx_ch_get,
+			tdm_rx_ch_put),
+	SOC_ENUM_EXT("QUAT_TDM_TX_0 Channels", tdm_tx_chs,
+			tdm_tx_ch_get,
+			tdm_tx_ch_put),
+	SOC_ENUM_EXT("PRIM_AUX_PCM_RX SampleRate", prim_aux_pcm_rx_sample_rate,
+			aux_pcm_rx_sample_rate_get,
+			aux_pcm_rx_sample_rate_put),
+	SOC_ENUM_EXT("SEC_AUX_PCM_RX SampleRate", sec_aux_pcm_rx_sample_rate,
+			aux_pcm_rx_sample_rate_get,
+			aux_pcm_rx_sample_rate_put),
+	SOC_ENUM_EXT("TERT_AUX_PCM_RX SampleRate", tert_aux_pcm_rx_sample_rate,
+			aux_pcm_rx_sample_rate_get,
+			aux_pcm_rx_sample_rate_put),
+	SOC_ENUM_EXT("QUAT_AUX_PCM_RX SampleRate", quat_aux_pcm_rx_sample_rate,
+			aux_pcm_rx_sample_rate_get,
+			aux_pcm_rx_sample_rate_put),
+	SOC_ENUM_EXT("PRIM_AUX_PCM_TX SampleRate", prim_aux_pcm_tx_sample_rate,
+			aux_pcm_tx_sample_rate_get,
+			aux_pcm_tx_sample_rate_put),
+	SOC_ENUM_EXT("SEC_AUX_PCM_TX SampleRate", sec_aux_pcm_tx_sample_rate,
+			aux_pcm_tx_sample_rate_get,
+			aux_pcm_tx_sample_rate_put),
+	SOC_ENUM_EXT("TERT_AUX_PCM_TX SampleRate", tert_aux_pcm_tx_sample_rate,
+			aux_pcm_tx_sample_rate_get,
+			aux_pcm_tx_sample_rate_put),
+	SOC_ENUM_EXT("QUAT_AUX_PCM_TX SampleRate", quat_aux_pcm_tx_sample_rate,
+			aux_pcm_tx_sample_rate_get,
+			aux_pcm_tx_sample_rate_put),
+	SOC_ENUM_EXT("PRIM_MI2S_RX SampleRate", prim_mi2s_rx_sample_rate,
+			mi2s_rx_sample_rate_get,
+			mi2s_rx_sample_rate_put),
+	SOC_ENUM_EXT("SEC_MI2S_RX SampleRate", sec_mi2s_rx_sample_rate,
+			mi2s_rx_sample_rate_get,
+			mi2s_rx_sample_rate_put),
+	SOC_ENUM_EXT("TERT_MI2S_RX SampleRate", tert_mi2s_rx_sample_rate,
+			mi2s_rx_sample_rate_get,
+			mi2s_rx_sample_rate_put),
+	SOC_ENUM_EXT("QUAT_MI2S_RX SampleRate", quat_mi2s_rx_sample_rate,
+			mi2s_rx_sample_rate_get,
+			mi2s_rx_sample_rate_put),
+	SOC_ENUM_EXT("PRIM_MI2S_TX SampleRate", prim_mi2s_tx_sample_rate,
+			mi2s_tx_sample_rate_get,
+			mi2s_tx_sample_rate_put),
+	SOC_ENUM_EXT("SEC_MI2S_TX SampleRate", sec_mi2s_tx_sample_rate,
+			mi2s_tx_sample_rate_get,
+			mi2s_tx_sample_rate_put),
+	SOC_ENUM_EXT("TERT_MI2S_TX SampleRate", tert_mi2s_tx_sample_rate,
+			mi2s_tx_sample_rate_get,
+			mi2s_tx_sample_rate_put),
+	SOC_ENUM_EXT("QUAT_MI2S_TX SampleRate", quat_mi2s_tx_sample_rate,
+			mi2s_tx_sample_rate_get,
+			mi2s_tx_sample_rate_put),
+	SOC_ENUM_EXT("PRIM_MI2S_RX Channels", prim_mi2s_rx_chs,
+			msm_mi2s_rx_ch_get, msm_mi2s_rx_ch_put),
+	SOC_ENUM_EXT("PRIM_MI2S_TX Channels", prim_mi2s_tx_chs,
+			msm_mi2s_tx_ch_get, msm_mi2s_tx_ch_put),
+	SOC_ENUM_EXT("SEC_MI2S_RX Channels", sec_mi2s_rx_chs,
+			msm_mi2s_rx_ch_get, msm_mi2s_rx_ch_put),
+	SOC_ENUM_EXT("SEC_MI2S_TX Channels", sec_mi2s_tx_chs,
+			msm_mi2s_tx_ch_get, msm_mi2s_tx_ch_put),
+	SOC_ENUM_EXT("TERT_MI2S_RX Channels", tert_mi2s_rx_chs,
+			msm_mi2s_rx_ch_get, msm_mi2s_rx_ch_put),
+	SOC_ENUM_EXT("TERT_MI2S_TX Channels", tert_mi2s_tx_chs,
+			msm_mi2s_tx_ch_get, msm_mi2s_tx_ch_put),
+	SOC_ENUM_EXT("QUAT_MI2S_RX Channels", quat_mi2s_rx_chs,
+			msm_mi2s_rx_ch_get, msm_mi2s_rx_ch_put),
+	SOC_ENUM_EXT("QUAT_MI2S_TX Channels", quat_mi2s_tx_chs,
+			msm_mi2s_tx_ch_get, msm_mi2s_tx_ch_put),
+	SOC_ENUM_EXT("PRIM_MI2S_RX Format", mi2s_rx_format,
+			msm_mi2s_rx_format_get, msm_mi2s_rx_format_put),
+	SOC_ENUM_EXT("PRIM_MI2S_TX Format", mi2s_tx_format,
+			msm_mi2s_tx_format_get, msm_mi2s_tx_format_put),
+	SOC_ENUM_EXT("SEC_MI2S_RX Format", mi2s_rx_format,
+			msm_mi2s_rx_format_get, msm_mi2s_rx_format_put),
+	SOC_ENUM_EXT("SEC_MI2S_TX Format", mi2s_tx_format,
+			msm_mi2s_tx_format_get, msm_mi2s_tx_format_put),
+	SOC_ENUM_EXT("TERT_MI2S_RX Format", mi2s_rx_format,
+			msm_mi2s_rx_format_get, msm_mi2s_rx_format_put),
+	SOC_ENUM_EXT("TERT_MI2S_TX Format", mi2s_tx_format,
+			msm_mi2s_tx_format_get, msm_mi2s_tx_format_put),
+	SOC_ENUM_EXT("QUAT_MI2S_RX Format", mi2s_rx_format,
+			msm_mi2s_rx_format_get, msm_mi2s_rx_format_put),
+	SOC_ENUM_EXT("QUAT_MI2S_TX Format", mi2s_tx_format,
+			msm_mi2s_tx_format_get, msm_mi2s_tx_format_put),
+	SOC_ENUM_EXT("HiFi Function", hifi_function, msm_hifi_get,
+			msm_hifi_put),
+};
+
+static int msm_snd_enable_codec_ext_clk(struct snd_soc_codec *codec,
+					int enable, bool dapm)
+{
+	int ret = 0;
+
+	if (!strcmp(dev_name(codec->dev), "tasha_codec"))
+		ret = tasha_cdc_mclk_enable(codec, enable, dapm);
+	else if (!strcmp(dev_name(codec->dev), "tavil_codec"))
+		ret = tavil_cdc_mclk_enable(codec, enable);
+	else {
+		dev_err(codec->dev, "%s: unknown codec to enable ext clk\n",
+			__func__);
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+static int msm_snd_enable_codec_ext_tx_clk(struct snd_soc_codec *codec,
+					   int enable, bool dapm)
+{
+	int ret = 0;
+
+	if (!strcmp(dev_name(codec->dev), "tasha_codec"))
+		ret = tasha_cdc_mclk_tx_enable(codec, enable, dapm);
+	else {
+		dev_err(codec->dev, "%s: unknown codec to enable ext clk\n",
+			__func__);
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+static int msm_mclk_tx_event(struct snd_soc_dapm_widget *w,
+				 struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	pr_debug("%s: event = %d\n", __func__, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		return msm_snd_enable_codec_ext_tx_clk(codec, 1, true);
+	case SND_SOC_DAPM_POST_PMD:
+		return msm_snd_enable_codec_ext_tx_clk(codec, 0, true);
+	}
+	return 0;
+}
+
+static int msm_mclk_event(struct snd_soc_dapm_widget *w,
+				 struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	pr_debug("%s: event = %d\n", __func__, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		return msm_snd_enable_codec_ext_clk(codec, 1, true);
+	case SND_SOC_DAPM_POST_PMD:
+		return msm_snd_enable_codec_ext_clk(codec, 0, true);
+	}
+	return 0;
+}
+
+static int msm_hifi_ctrl_event(struct snd_soc_dapm_widget *w,
+			       struct snd_kcontrol *k, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct snd_soc_card *card = codec->component.card;
+	struct msm_asoc_mach_data *pdata =
+				snd_soc_card_get_drvdata(card);
+
+	pr_debug("%s: msm_hifi_control = %d", __func__, msm_hifi_control);
+
+	if (!pdata || !pdata->hph_en0_gpio_p) {
+		pr_err("%s: hph_en0_gpio is invalid\n", __func__);
+		return -EINVAL;
+	}
+
+	if (msm_hifi_control != MSM_HIFI_ON) {
+		pr_debug("%s: HiFi mixer control is not set\n",
+			 __func__);
+		return 0;
+	}
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		msm_cdc_pinctrl_select_active_state(pdata->hph_en0_gpio_p);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		msm_cdc_pinctrl_select_sleep_state(pdata->hph_en0_gpio_p);
+		break;
+	}
+
+	return 0;
+}
+
+static const struct snd_soc_dapm_widget msm_dapm_widgets[] = {
+
+	SND_SOC_DAPM_SUPPLY("MCLK",  SND_SOC_NOPM, 0, 0,
+			    msm_mclk_event,
+			    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_SUPPLY("MCLK TX",  SND_SOC_NOPM, 0, 0,
+	msm_mclk_tx_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_SPK("Lineout_1 amp", NULL),
+	SND_SOC_DAPM_SPK("Lineout_3 amp", NULL),
+	SND_SOC_DAPM_SPK("Lineout_2 amp", NULL),
+	SND_SOC_DAPM_SPK("Lineout_4 amp", NULL),
+	SND_SOC_DAPM_SPK("hifi amp", msm_hifi_ctrl_event),
+	SND_SOC_DAPM_MIC("Handset Mic", NULL),
+	SND_SOC_DAPM_MIC("Headset Mic", NULL),
+	SND_SOC_DAPM_MIC("ANCRight Headset Mic", NULL),
+	SND_SOC_DAPM_MIC("ANCLeft Headset Mic", NULL),
+	SND_SOC_DAPM_MIC("Analog Mic5", NULL),
+	SND_SOC_DAPM_MIC("Analog Mic6", NULL),
+
+	SND_SOC_DAPM_MIC("Digital Mic0", NULL),
+	SND_SOC_DAPM_MIC("Digital Mic1", NULL),
+	SND_SOC_DAPM_MIC("Digital Mic2", NULL),
+	SND_SOC_DAPM_MIC("Digital Mic3", NULL),
+	SND_SOC_DAPM_MIC("Digital Mic4", NULL),
+	SND_SOC_DAPM_MIC("Digital Mic5", NULL),
+};
+
+static inline int param_is_mask(int p)
+{
+	return (p >= SNDRV_PCM_HW_PARAM_FIRST_MASK) &&
+			(p <= SNDRV_PCM_HW_PARAM_LAST_MASK);
+}
+
+static inline struct snd_mask *param_to_mask(struct snd_pcm_hw_params *p,
+					     int n)
+{
+	return &(p->masks[n - SNDRV_PCM_HW_PARAM_FIRST_MASK]);
+}
+
+static void param_set_mask(struct snd_pcm_hw_params *p, int n, unsigned bit)
+{
+	if (bit >= SNDRV_MASK_MAX)
+		return;
+	if (param_is_mask(n)) {
+		struct snd_mask *m = param_to_mask(p, n);
+
+		m->bits[0] = 0;
+		m->bits[1] = 0;
+		m->bits[bit >> 5] |= (1 << (bit & 31));
+	}
+}
+
+static int msm_slim_get_ch_from_beid(int32_t be_id)
+{
+	int ch_id = 0;
+
+	switch (be_id) {
+	case MSM_BACKEND_DAI_SLIMBUS_0_RX:
+		ch_id = SLIM_RX_0;
+		break;
+	case MSM_BACKEND_DAI_SLIMBUS_1_RX:
+		ch_id = SLIM_RX_1;
+		break;
+	case MSM_BACKEND_DAI_SLIMBUS_2_RX:
+		ch_id = SLIM_RX_2;
+		break;
+	case MSM_BACKEND_DAI_SLIMBUS_3_RX:
+		ch_id = SLIM_RX_3;
+		break;
+	case MSM_BACKEND_DAI_SLIMBUS_4_RX:
+		ch_id = SLIM_RX_4;
+		break;
+	case MSM_BACKEND_DAI_SLIMBUS_6_RX:
+		ch_id = SLIM_RX_6;
+		break;
+	case MSM_BACKEND_DAI_SLIMBUS_0_TX:
+		ch_id = SLIM_TX_0;
+		break;
+	case MSM_BACKEND_DAI_SLIMBUS_3_TX:
+		ch_id = SLIM_TX_3;
+		break;
+	default:
+		ch_id = SLIM_RX_0;
+		break;
+	}
+
+	return ch_id;
+}
+
+static int msm_ext_disp_get_idx_from_beid(int32_t be_id)
+{
+	int idx;
+
+	switch (be_id) {
+	case MSM_BACKEND_DAI_HDMI_RX:
+		idx = HDMI_RX_IDX;
+		break;
+	case MSM_BACKEND_DAI_DISPLAY_PORT_RX:
+		idx = DP_RX_IDX;
+		break;
+	default:
+		pr_err("%s: Incorrect ext_disp be_id %d\n", __func__, be_id);
+		idx = -EINVAL;
+		break;
+	}
+
+	return idx;
+}
+
+static int msm_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+				  struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_dai_link *dai_link = rtd->dai_link;
+	struct snd_interval *rate = hw_param_interval(params,
+					SNDRV_PCM_HW_PARAM_RATE);
+	struct snd_interval *channels = hw_param_interval(params,
+					SNDRV_PCM_HW_PARAM_CHANNELS);
+	int rc = 0;
+	int idx;
+	void *config = NULL;
+	struct snd_soc_codec *codec = NULL;
+
+	pr_debug("%s: format = %d, rate = %d\n",
+		  __func__, params_format(params), params_rate(params));
+
+	switch (dai_link->be_id) {
+	case MSM_BACKEND_DAI_SLIMBUS_0_RX:
+	case MSM_BACKEND_DAI_SLIMBUS_1_RX:
+	case MSM_BACKEND_DAI_SLIMBUS_2_RX:
+	case MSM_BACKEND_DAI_SLIMBUS_3_RX:
+	case MSM_BACKEND_DAI_SLIMBUS_4_RX:
+	case MSM_BACKEND_DAI_SLIMBUS_6_RX:
+		idx = msm_slim_get_ch_from_beid(dai_link->be_id);
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+				slim_rx_cfg[idx].bit_format);
+		rate->min = rate->max = slim_rx_cfg[idx].sample_rate;
+		channels->min = channels->max = slim_rx_cfg[idx].channels;
+		break;
+
+	case MSM_BACKEND_DAI_SLIMBUS_0_TX:
+	case MSM_BACKEND_DAI_SLIMBUS_3_TX:
+		idx = msm_slim_get_ch_from_beid(dai_link->be_id);
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+				slim_tx_cfg[idx].bit_format);
+		rate->min = rate->max = slim_tx_cfg[idx].sample_rate;
+		channels->min = channels->max = slim_tx_cfg[idx].channels;
+		break;
+
+	case MSM_BACKEND_DAI_SLIMBUS_1_TX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+				slim_tx_cfg[1].bit_format);
+		rate->min = rate->max = slim_tx_cfg[1].sample_rate;
+		channels->min = channels->max = slim_tx_cfg[1].channels;
+		break;
+
+	case MSM_BACKEND_DAI_SLIMBUS_4_TX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			       SNDRV_PCM_FORMAT_S32_LE);
+		rate->min = rate->max = SAMPLING_RATE_8KHZ;
+		channels->min = channels->max = msm_vi_feed_tx_ch;
+		break;
+
+	case MSM_BACKEND_DAI_SLIMBUS_5_RX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+				slim_rx_cfg[5].bit_format);
+		rate->min = rate->max = slim_rx_cfg[5].sample_rate;
+		channels->min = channels->max = slim_rx_cfg[5].channels;
+		break;
+
+	case MSM_BACKEND_DAI_SLIMBUS_5_TX:
+		codec = rtd->codec;
+		rate->min = rate->max = SAMPLING_RATE_16KHZ;
+		channels->min = channels->max = 1;
+
+		config = msm_codec_fn.get_afe_config_fn(codec,
+					AFE_SLIMBUS_SLAVE_PORT_CONFIG);
+		if (config) {
+			rc = afe_set_config(AFE_SLIMBUS_SLAVE_PORT_CONFIG,
+					    config, SLIMBUS_5_TX);
+			if (rc)
+				pr_err("%s: Failed to set slimbus slave port config %d\n",
+					__func__, rc);
+		}
+		break;
+
+	case MSM_BACKEND_DAI_SLIMBUS_7_RX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+				slim_rx_cfg[SLIM_RX_7].bit_format);
+		rate->min = rate->max = slim_rx_cfg[SLIM_RX_7].sample_rate;
+		channels->min = channels->max =
+			slim_rx_cfg[SLIM_RX_7].channels;
+		break;
+
+	case MSM_BACKEND_DAI_SLIMBUS_7_TX:
+		rate->min = rate->max = slim_tx_cfg[SLIM_TX_7].sample_rate;
+		channels->min = channels->max =
+			slim_tx_cfg[SLIM_TX_7].channels;
+		break;
+
+	case MSM_BACKEND_DAI_SLIMBUS_8_TX:
+		rate->min = rate->max = slim_tx_cfg[SLIM_TX_8].sample_rate;
+		channels->min = channels->max =
+			slim_tx_cfg[SLIM_TX_8].channels;
+		break;
+
+	case MSM_BACKEND_DAI_USB_RX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+				usb_rx_cfg.bit_format);
+		rate->min = rate->max = usb_rx_cfg.sample_rate;
+		channels->min = channels->max = usb_rx_cfg.channels;
+		break;
+
+	case MSM_BACKEND_DAI_USB_TX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+				usb_tx_cfg.bit_format);
+		rate->min = rate->max = usb_tx_cfg.sample_rate;
+		channels->min = channels->max = usb_tx_cfg.channels;
+		break;
+
+	case MSM_BACKEND_DAI_HDMI_RX:
+	case MSM_BACKEND_DAI_DISPLAY_PORT_RX:
+		idx = msm_ext_disp_get_idx_from_beid(dai_link->be_id);
+		if (IS_ERR_VALUE(idx)) {
+			pr_err("%s: Incorrect ext disp idx %d\n",
+			       __func__, idx);
+			rc = idx;
+			goto done;
+		}
+
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+				ext_disp_rx_cfg[idx].bit_format);
+		rate->min = rate->max = ext_disp_rx_cfg[idx].sample_rate;
+		channels->min = channels->max = ext_disp_rx_cfg[idx].channels;
+		break;
+
+	case MSM_BACKEND_DAI_AFE_PCM_RX:
+		channels->min = channels->max = proxy_rx_cfg.channels;
+		rate->min = rate->max = SAMPLING_RATE_48KHZ;
+		break;
+
+	case MSM_BACKEND_DAI_PRI_TDM_RX_0:
+		channels->min = channels->max =
+				tdm_rx_cfg[TDM_PRI][TDM_0].channels;
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			       tdm_rx_cfg[TDM_PRI][TDM_0].bit_format);
+		rate->min = rate->max = tdm_rx_cfg[TDM_PRI][TDM_0].sample_rate;
+		break;
+
+	case MSM_BACKEND_DAI_PRI_TDM_TX_0:
+		channels->min = channels->max =
+				tdm_tx_cfg[TDM_PRI][TDM_0].channels;
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			       tdm_tx_cfg[TDM_PRI][TDM_0].bit_format);
+		rate->min = rate->max = tdm_tx_cfg[TDM_PRI][TDM_0].sample_rate;
+		break;
+
+	case MSM_BACKEND_DAI_SEC_TDM_RX_0:
+		channels->min = channels->max =
+				tdm_rx_cfg[TDM_SEC][TDM_0].channels;
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			       tdm_rx_cfg[TDM_SEC][TDM_0].bit_format);
+		rate->min = rate->max = tdm_rx_cfg[TDM_SEC][TDM_0].sample_rate;
+		break;
+
+	case MSM_BACKEND_DAI_SEC_TDM_TX_0:
+		channels->min = channels->max =
+				tdm_tx_cfg[TDM_SEC][TDM_0].channels;
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			       tdm_tx_cfg[TDM_SEC][TDM_0].bit_format);
+		rate->min = rate->max = tdm_tx_cfg[TDM_SEC][TDM_0].sample_rate;
+		break;
+
+	case MSM_BACKEND_DAI_TERT_TDM_RX_0:
+		channels->min = channels->max =
+				tdm_rx_cfg[TDM_TERT][TDM_0].channels;
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			       tdm_rx_cfg[TDM_TERT][TDM_0].bit_format);
+		rate->min = rate->max = tdm_rx_cfg[TDM_TERT][TDM_0].sample_rate;
+		break;
+
+	case MSM_BACKEND_DAI_TERT_TDM_TX_0:
+		channels->min = channels->max =
+				tdm_tx_cfg[TDM_TERT][TDM_0].channels;
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			       tdm_tx_cfg[TDM_TERT][TDM_0].bit_format);
+		rate->min = rate->max = tdm_tx_cfg[TDM_TERT][TDM_0].sample_rate;
+		break;
+
+	case MSM_BACKEND_DAI_QUAT_TDM_RX_0:
+		channels->min = channels->max =
+				tdm_rx_cfg[TDM_QUAT][TDM_0].channels;
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			       tdm_rx_cfg[TDM_QUAT][TDM_0].bit_format);
+		rate->min = rate->max = tdm_rx_cfg[TDM_QUAT][TDM_0].sample_rate;
+		break;
+
+	case MSM_BACKEND_DAI_QUAT_TDM_TX_0:
+		channels->min = channels->max =
+				tdm_tx_cfg[TDM_QUAT][TDM_0].channels;
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			       tdm_tx_cfg[TDM_QUAT][TDM_0].bit_format);
+		rate->min = rate->max = tdm_tx_cfg[TDM_QUAT][TDM_0].sample_rate;
+		break;
+
+	case MSM_BACKEND_DAI_AUXPCM_RX:
+		rate->min = rate->max =
+			aux_pcm_rx_cfg[PRIM_AUX_PCM].sample_rate;
+		channels->min = channels->max =
+			aux_pcm_rx_cfg[PRIM_AUX_PCM].channels;
+		break;
+
+	case MSM_BACKEND_DAI_AUXPCM_TX:
+		rate->min = rate->max =
+			aux_pcm_tx_cfg[PRIM_AUX_PCM].sample_rate;
+		channels->min = channels->max =
+			aux_pcm_tx_cfg[PRIM_AUX_PCM].channels;
+		break;
+
+	case MSM_BACKEND_DAI_SEC_AUXPCM_RX:
+		rate->min = rate->max =
+			aux_pcm_rx_cfg[SEC_AUX_PCM].sample_rate;
+		channels->min = channels->max =
+			aux_pcm_rx_cfg[SEC_AUX_PCM].channels;
+		break;
+
+	case MSM_BACKEND_DAI_SEC_AUXPCM_TX:
+		rate->min = rate->max =
+			aux_pcm_tx_cfg[SEC_AUX_PCM].sample_rate;
+		channels->min = channels->max =
+			aux_pcm_tx_cfg[SEC_AUX_PCM].channels;
+		break;
+
+	case MSM_BACKEND_DAI_TERT_AUXPCM_RX:
+		rate->min = rate->max =
+			aux_pcm_rx_cfg[TERT_AUX_PCM].sample_rate;
+		channels->min = channels->max =
+			aux_pcm_rx_cfg[TERT_AUX_PCM].channels;
+		break;
+
+	case MSM_BACKEND_DAI_TERT_AUXPCM_TX:
+		rate->min = rate->max =
+			aux_pcm_tx_cfg[TERT_AUX_PCM].sample_rate;
+		channels->min = channels->max =
+			aux_pcm_tx_cfg[TERT_AUX_PCM].channels;
+		break;
+
+	case MSM_BACKEND_DAI_QUAT_AUXPCM_RX:
+		rate->min = rate->max =
+			aux_pcm_rx_cfg[QUAT_AUX_PCM].sample_rate;
+		channels->min = channels->max =
+			aux_pcm_rx_cfg[QUAT_AUX_PCM].channels;
+		break;
+
+	case MSM_BACKEND_DAI_QUAT_AUXPCM_TX:
+		rate->min = rate->max =
+			aux_pcm_tx_cfg[QUAT_AUX_PCM].sample_rate;
+		channels->min = channels->max =
+			aux_pcm_tx_cfg[QUAT_AUX_PCM].channels;
+		break;
+
+	case MSM_BACKEND_DAI_PRI_MI2S_RX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_rx_cfg[PRIM_MI2S].bit_format);
+		rate->min = rate->max = mi2s_rx_cfg[PRIM_MI2S].sample_rate;
+		channels->min = channels->max =
+			mi2s_rx_cfg[PRIM_MI2S].channels;
+		break;
+
+	case MSM_BACKEND_DAI_PRI_MI2S_TX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_tx_cfg[PRIM_MI2S].bit_format);
+		rate->min = rate->max = mi2s_tx_cfg[PRIM_MI2S].sample_rate;
+		channels->min = channels->max =
+			mi2s_tx_cfg[PRIM_MI2S].channels;
+		break;
+
+	case MSM_BACKEND_DAI_SECONDARY_MI2S_RX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_rx_cfg[SEC_MI2S].bit_format);
+		rate->min = rate->max = mi2s_rx_cfg[SEC_MI2S].sample_rate;
+		channels->min = channels->max =
+			mi2s_rx_cfg[SEC_MI2S].channels;
+		break;
+
+	case MSM_BACKEND_DAI_SECONDARY_MI2S_TX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_tx_cfg[SEC_MI2S].bit_format);
+		rate->min = rate->max = mi2s_tx_cfg[SEC_MI2S].sample_rate;
+		channels->min = channels->max =
+			mi2s_tx_cfg[SEC_MI2S].channels;
+		break;
+
+	case MSM_BACKEND_DAI_TERTIARY_MI2S_RX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_rx_cfg[TERT_MI2S].bit_format);
+		rate->min = rate->max = mi2s_rx_cfg[TERT_MI2S].sample_rate;
+		channels->min = channels->max =
+			mi2s_rx_cfg[TERT_MI2S].channels;
+		break;
+
+	case MSM_BACKEND_DAI_TERTIARY_MI2S_TX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_tx_cfg[TERT_MI2S].bit_format);
+		rate->min = rate->max = mi2s_tx_cfg[TERT_MI2S].sample_rate;
+		channels->min = channels->max =
+			mi2s_tx_cfg[TERT_MI2S].channels;
+		break;
+
+	case MSM_BACKEND_DAI_QUATERNARY_MI2S_RX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_rx_cfg[QUAT_MI2S].bit_format);
+		rate->min = rate->max = mi2s_rx_cfg[QUAT_MI2S].sample_rate;
+		channels->min = channels->max =
+			mi2s_rx_cfg[QUAT_MI2S].channels;
+		break;
+
+	case MSM_BACKEND_DAI_QUATERNARY_MI2S_TX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_tx_cfg[QUAT_MI2S].bit_format);
+		rate->min = rate->max = mi2s_tx_cfg[QUAT_MI2S].sample_rate;
+		channels->min = channels->max =
+			mi2s_tx_cfg[QUAT_MI2S].channels;
+		break;
+
+	default:
+		rate->min = rate->max = SAMPLING_RATE_48KHZ;
+		break;
+	}
+
+done:
+	return rc;
+}
+
+static bool msm_swap_gnd_mic(struct snd_soc_codec *codec)
+{
+	struct snd_soc_card *card = codec->component.card;
+	struct msm_asoc_mach_data *pdata =
+				snd_soc_card_get_drvdata(card);
+	int value = 0;
+
+	if (pdata->us_euro_gpio_p) {
+		value = msm_cdc_pinctrl_get_state(pdata->us_euro_gpio_p);
+		if (value)
+			msm_cdc_pinctrl_select_sleep_state(
+							pdata->us_euro_gpio_p);
+		else
+			msm_cdc_pinctrl_select_active_state(
+							pdata->us_euro_gpio_p);
+	} else if (pdata->us_euro_gpio >= 0) {
+		value = gpio_get_value_cansleep(pdata->us_euro_gpio);
+		gpio_set_value_cansleep(pdata->us_euro_gpio, !value);
+	}
+	pr_debug("%s: swap select switch %d to %d\n", __func__, value, !value);
+	return true;
+}
+
+static int msm_afe_set_config(struct snd_soc_codec *codec)
+{
+	int ret = 0;
+	void *config_data = NULL;
+
+	if (!msm_codec_fn.get_afe_config_fn) {
+		dev_err(codec->dev, "%s: codec get afe config not init'ed\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	config_data = msm_codec_fn.get_afe_config_fn(codec,
+			AFE_CDC_REGISTERS_CONFIG);
+	if (config_data) {
+		ret = afe_set_config(AFE_CDC_REGISTERS_CONFIG, config_data, 0);
+		if (ret) {
+			dev_err(codec->dev,
+				"%s: Failed to set codec registers config %d\n",
+				__func__, ret);
+			return ret;
+		}
+	}
+
+	config_data = msm_codec_fn.get_afe_config_fn(codec,
+			AFE_CDC_REGISTER_PAGE_CONFIG);
+	if (config_data) {
+		ret = afe_set_config(AFE_CDC_REGISTER_PAGE_CONFIG, config_data,
+				    0);
+		if (ret)
+			dev_err(codec->dev,
+				"%s: Failed to set cdc register page config\n",
+				__func__);
+	}
+
+	config_data = msm_codec_fn.get_afe_config_fn(codec,
+			AFE_SLIMBUS_SLAVE_CONFIG);
+	if (config_data) {
+		ret = afe_set_config(AFE_SLIMBUS_SLAVE_CONFIG, config_data, 0);
+		if (ret) {
+			dev_err(codec->dev,
+				"%s: Failed to set slimbus slave config %d\n",
+				__func__, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void msm_afe_clear_config(void)
+{
+	afe_clear_config(AFE_CDC_REGISTERS_CONFIG);
+	afe_clear_config(AFE_SLIMBUS_SLAVE_CONFIG);
+}
+
+static int msm_adsp_power_up_config(struct snd_soc_codec *codec)
+{
+	int ret = 0;
+	unsigned long timeout;
+	int adsp_ready = 0;
+
+	timeout = jiffies +
+		msecs_to_jiffies(ADSP_STATE_READY_TIMEOUT_MS);
+
+	do {
+		if (q6core_is_adsp_ready()) {
+			pr_debug("%s: ADSP Audio is ready\n", __func__);
+			adsp_ready = 1;
+			break;
+		} else {
+			/*
+			 * ADSP will be coming up after subsystem restart and
+			 * it might not be fully up when the control reaches
+			 * here. So, wait for 50msec before checking ADSP state
+			 */
+			msleep(50);
+		}
+	} while (time_after(timeout, jiffies));
+
+	if (!adsp_ready) {
+		pr_err("%s: timed out waiting for ADSP Audio\n", __func__);
+		ret = -ETIMEDOUT;
+		goto err_fail;
+	}
+
+	ret = msm_afe_set_config(codec);
+	if (ret)
+		pr_err("%s: Failed to set AFE config. err %d\n",
+			__func__, ret);
+
+	return 0;
+
+err_fail:
+	return ret;
+}
+
+static int msm8998_notifier_service_cb(struct notifier_block *this,
+					 unsigned long opcode, void *ptr)
+{
+	int ret;
+	struct snd_soc_card *card = NULL;
+	const char *be_dl_name = LPASS_BE_SLIMBUS_0_RX;
+	struct snd_soc_pcm_runtime *rtd;
+	struct snd_soc_codec *codec;
+
+	pr_debug("%s: Service opcode 0x%lx\n", __func__, opcode);
+
+	switch (opcode) {
+	case AUDIO_NOTIFIER_SERVICE_DOWN:
+		/*
+		 * Use flag to ignore initial boot notifications
+		 * On initial boot msm_adsp_power_up_config is
+		 * called on init. There is no need to clear
+		 * and set the config again on initial boot.
+		 */
+		if (is_initial_boot)
+			break;
+		msm_afe_clear_config();
+		break;
+	case AUDIO_NOTIFIER_SERVICE_UP:
+		if (is_initial_boot) {
+			is_initial_boot = false;
+			break;
+		}
+		if (!spdev)
+			return -EINVAL;
+
+		card = platform_get_drvdata(spdev);
+		rtd = snd_soc_get_pcm_runtime(card, be_dl_name);
+		if (!rtd) {
+			dev_err(card->dev,
+				"%s: snd_soc_get_pcm_runtime for %s failed!\n",
+				__func__, be_dl_name);
+			ret = -EINVAL;
+			goto done;
+		}
+		codec = rtd->codec;
+
+		ret = msm_adsp_power_up_config(codec);
+		if (ret < 0) {
+			dev_err(card->dev,
+				"%s: msm_adsp_power_up_config failed ret = %d!\n",
+				__func__, ret);
+			goto done;
+		}
+		break;
+	default:
+		break;
+	}
+done:
+	return NOTIFY_OK;
+}
+
+static struct notifier_block service_nb = {
+	.notifier_call  = msm8998_notifier_service_cb,
+	.priority = -INT_MAX,
+};
+
+static int msm_audrx_init(struct snd_soc_pcm_runtime *rtd)
+{
+	int ret = 0;
+	void *config_data;
+	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_soc_pcm_runtime *rtd_aux = rtd->card->rtd_aux;
+	struct snd_card *card;
+	struct snd_info_entry *entry;
+	struct msm_asoc_mach_data *pdata =
+				snd_soc_card_get_drvdata(rtd->card);
+
+	/* Codec SLIMBUS configuration
+	 * RX1, RX2, RX3, RX4, RX5, RX6, RX7, RX8, RX9, RX10, RX11, RX12, RX13
+	 * TX1, TX2, TX3, TX4, TX5, TX6, TX7, TX8, TX9, TX10, TX11, TX12, TX13
+	 * TX14, TX15, TX16
+	 */
+	unsigned int rx_ch[TASHA_RX_MAX] = {144, 145, 146, 147, 148, 149, 150,
+					    151, 152, 153, 154, 155, 156};
+	unsigned int tx_ch[TASHA_TX_MAX] = {128, 129, 130, 131, 132, 133,
+					    134, 135, 136, 137, 138, 139,
+					    140, 141, 142, 143};
+
+	/* Tavil Codec SLIMBUS configuration
+	 * RX1, RX2, RX3, RX4, RX5, RX6, RX7, RX8
+	 * TX1, TX2, TX3, TX4, TX5, TX6, TX7, TX8, TX9, TX10, TX11, TX12, TX13
+	 * TX14, TX15, TX16
+	 */
+	unsigned int rx_ch_tavil[WCD934X_RX_MAX] = {144, 145, 146, 147, 148,
+						    149, 150, 151};
+	unsigned int tx_ch_tavil[WCD934X_TX_MAX] = {128, 129, 130, 131, 132,
+						    133, 134, 135, 136, 137,
+						    138, 139, 140, 141, 142,
+						    143};
+
+	pr_info("%s: dev_name%s\n", __func__, dev_name(cpu_dai->dev));
+
+	rtd->pmdown_time = 0;
+
+	snd_soc_dapm_new_controls(dapm, msm_dapm_widgets,
+				ARRAY_SIZE(msm_dapm_widgets));
+
+	if (!strcmp(dev_name(codec_dai->dev), "tasha_codec"))
+		snd_soc_dapm_add_routes(dapm, wcd_audio_paths_tasha,
+					ARRAY_SIZE(wcd_audio_paths_tasha));
+	else
+		snd_soc_dapm_add_routes(dapm, wcd_audio_paths,
+					ARRAY_SIZE(wcd_audio_paths));
+
+	snd_soc_dapm_ignore_suspend(dapm, "Handset Mic");
+	snd_soc_dapm_ignore_suspend(dapm, "Headset Mic");
+	snd_soc_dapm_ignore_suspend(dapm, "ANCRight Headset Mic");
+	snd_soc_dapm_ignore_suspend(dapm, "ANCLeft Headset Mic");
+	snd_soc_dapm_ignore_suspend(dapm, "Digital Mic0");
+	snd_soc_dapm_ignore_suspend(dapm, "Digital Mic1");
+	snd_soc_dapm_ignore_suspend(dapm, "Digital Mic2");
+	snd_soc_dapm_ignore_suspend(dapm, "Digital Mic3");
+	snd_soc_dapm_ignore_suspend(dapm, "Digital Mic4");
+	snd_soc_dapm_ignore_suspend(dapm, "Digital Mic5");
+	snd_soc_dapm_ignore_suspend(dapm, "Analog Mic5");
+	snd_soc_dapm_ignore_suspend(dapm, "Analog Mic6");
+	snd_soc_dapm_ignore_suspend(dapm, "MADINPUT");
+	snd_soc_dapm_ignore_suspend(dapm, "MAD_CPE_INPUT");
+	snd_soc_dapm_ignore_suspend(dapm, "MAD_CPE_OUT1");
+	snd_soc_dapm_ignore_suspend(dapm, "MAD_CPE_OUT2");
+	snd_soc_dapm_ignore_suspend(dapm, "EAR");
+	snd_soc_dapm_ignore_suspend(dapm, "LINEOUT1");
+	snd_soc_dapm_ignore_suspend(dapm, "LINEOUT2");
+	snd_soc_dapm_ignore_suspend(dapm, "ANC EAR");
+	snd_soc_dapm_ignore_suspend(dapm, "SPK1 OUT");
+	snd_soc_dapm_ignore_suspend(dapm, "SPK2 OUT");
+	snd_soc_dapm_ignore_suspend(dapm, "HPHL");
+	snd_soc_dapm_ignore_suspend(dapm, "HPHR");
+	snd_soc_dapm_ignore_suspend(dapm, "AIF4 VI");
+	snd_soc_dapm_ignore_suspend(dapm, "VIINPUT");
+	snd_soc_dapm_ignore_suspend(dapm, "ANC HPHL");
+	snd_soc_dapm_ignore_suspend(dapm, "ANC HPHR");
+
+	if (!strcmp(dev_name(codec_dai->dev), "tasha_codec")) {
+		snd_soc_dapm_ignore_suspend(dapm, "LINEOUT3");
+		snd_soc_dapm_ignore_suspend(dapm, "LINEOUT4");
+		snd_soc_dapm_ignore_suspend(dapm, "ANC LINEOUT1");
+		snd_soc_dapm_ignore_suspend(dapm, "ANC LINEOUT2");
+	}
+
+	snd_soc_dapm_sync(dapm);
+
+	if (!strcmp(dev_name(codec_dai->dev), "tavil_codec")) {
+		snd_soc_dai_set_channel_map(codec_dai, ARRAY_SIZE(tx_ch_tavil),
+					tx_ch_tavil, ARRAY_SIZE(rx_ch_tavil),
+					rx_ch_tavil);
+	} else {
+		snd_soc_dai_set_channel_map(codec_dai, ARRAY_SIZE(tx_ch),
+					tx_ch, ARRAY_SIZE(rx_ch),
+					rx_ch);
+	}
+
+	if (!strcmp(dev_name(codec_dai->dev), "tavil_codec")) {
+		msm_codec_fn.get_afe_config_fn = tavil_get_afe_config;
+	} else {
+		msm_codec_fn.get_afe_config_fn = tasha_get_afe_config;
+		msm_codec_fn.mbhc_hs_detect_exit = tasha_mbhc_hs_detect_exit;
+	}
+
+	ret = msm_adsp_power_up_config(codec);
+	if (ret) {
+		pr_err("%s: Failed to set AFE config %d\n", __func__, ret);
+		goto err_afe_cfg;
+	}
+
+	config_data = msm_codec_fn.get_afe_config_fn(codec,
+						     AFE_AANC_VERSION);
+	if (config_data) {
+		ret = afe_set_config(AFE_AANC_VERSION, config_data, 0);
+		if (ret) {
+			pr_err("%s: Failed to set aanc version %d\n",
+				__func__, ret);
+			goto err_afe_cfg;
+		}
+	}
+
+	if (!strcmp(dev_name(codec_dai->dev), "tasha_codec")) {
+		config_data = msm_codec_fn.get_afe_config_fn(codec,
+						AFE_CDC_CLIP_REGISTERS_CONFIG);
+		if (config_data) {
+			ret = afe_set_config(AFE_CDC_CLIP_REGISTERS_CONFIG,
+						 config_data, 0);
+			if (ret) {
+				pr_err("%s: Failed to set clip registers %d\n",
+					__func__, ret);
+				goto err_afe_cfg;
+			}
+		}
+		config_data = msm_codec_fn.get_afe_config_fn(codec,
+				AFE_CLIP_BANK_SEL);
+		if (config_data) {
+			ret = afe_set_config(AFE_CLIP_BANK_SEL, config_data, 0);
+			if (ret) {
+				pr_err("%s: Failed to set AFE bank selection %d\n",
+					__func__, ret);
+				goto err_afe_cfg;
+			}
+		}
+	}
+
+	/*
+	 * Send speaker configuration only for WSA8810.
+	 * Defalut configuration is for WSA8815.
+	 */
+	pr_debug("%s: Number of aux devices: %d\n",
+		__func__, rtd->card->num_aux_devs);
+	if (!strcmp(dev_name(codec_dai->dev), "tavil_codec")) {
+		if (rtd->card->num_aux_devs && rtd_aux && rtd_aux->component)
+			if (!strcmp(rtd_aux->component->name, WSA8810_NAME_1) ||
+			    !strcmp(rtd_aux->component->name, WSA8810_NAME_2)) {
+				tavil_set_spkr_mode(rtd->codec, SPKR_MODE_1);
+				tavil_set_spkr_gain_offset(rtd->codec,
+							RX_GAIN_OFFSET_M1P5_DB);
+		}
+		card = rtd->card->snd_card;
+		entry = snd_register_module_info(card->module, "codecs",
+						 card->proc_root);
+		if (!entry) {
+			pr_debug("%s: Cannot create codecs module entry\n",
+				 __func__);
+			pdata->codec_root = NULL;
+			goto done;
+		}
+		pdata->codec_root = entry;
+		tavil_codec_info_create_codec_entry(pdata->codec_root, codec);
+	} else {
+		if (rtd->card->num_aux_devs && rtd_aux && rtd_aux->component)
+			if (!strcmp(rtd_aux->component->name, WSA8810_NAME_1) ||
+			    !strcmp(rtd_aux->component->name, WSA8810_NAME_2)) {
+				tasha_set_spkr_mode(rtd->codec, SPKR_MODE_1);
+				tasha_set_spkr_gain_offset(rtd->codec,
+							RX_GAIN_OFFSET_M1P5_DB);
+		}
+		card = rtd->card->snd_card;
+		entry = snd_register_module_info(card->module, "codecs",
+						 card->proc_root);
+		if (!entry) {
+			pr_debug("%s: Cannot create codecs module entry\n",
+				 __func__);
+			ret = 0;
+			goto err_snd_module;
+		}
+		pdata->codec_root = entry;
+		tasha_codec_info_create_codec_entry(pdata->codec_root, codec);
+	}
+done:
+	codec_reg_done = true;
+	return 0;
+
+err_snd_module:
+err_afe_cfg:
+	return ret;
+}
+
+static int msm_wcn_init(struct snd_soc_pcm_runtime *rtd)
+{
+	unsigned int rx_ch[WCN_CDC_SLIM_RX_CH_MAX] = {157, 158};
+	unsigned int tx_ch[WCN_CDC_SLIM_TX_CH_MAX]  = {159, 160, 161};
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+
+	return snd_soc_dai_set_channel_map(codec_dai, ARRAY_SIZE(tx_ch),
+					   tx_ch, ARRAY_SIZE(rx_ch), rx_ch);
+}
+
+static void *def_tasha_mbhc_cal(void)
+{
+	void *tasha_wcd_cal;
+	struct wcd_mbhc_btn_detect_cfg *btn_cfg;
+	u16 *btn_high;
+
+	tasha_wcd_cal = kzalloc(WCD_MBHC_CAL_SIZE(WCD_MBHC_DEF_BUTTONS,
+				WCD9XXX_MBHC_DEF_RLOADS), GFP_KERNEL);
+	if (!tasha_wcd_cal)
+		return NULL;
+
+#define S(X, Y) ((WCD_MBHC_CAL_PLUG_TYPE_PTR(tasha_wcd_cal)->X) = (Y))
+	S(v_hs_max, 1600);
+#undef S
+#define S(X, Y) ((WCD_MBHC_CAL_BTN_DET_PTR(tasha_wcd_cal)->X) = (Y))
+	S(num_btn, WCD_MBHC_DEF_BUTTONS);
+#undef S
+
+	btn_cfg = WCD_MBHC_CAL_BTN_DET_PTR(tasha_wcd_cal);
+	btn_high = ((void *)&btn_cfg->_v_btn_low) +
+		(sizeof(btn_cfg->_v_btn_low[0]) * btn_cfg->num_btn);
+
+	btn_high[0] = 75;
+	btn_high[1] = 150;
+	btn_high[2] = 237;
+	btn_high[3] = 500;
+	btn_high[4] = 500;
+	btn_high[5] = 500;
+	btn_high[6] = 500;
+	btn_high[7] = 500;
+
+	return tasha_wcd_cal;
+}
+
+static void *def_tavil_mbhc_cal(void)
+{
+	void *tavil_wcd_cal;
+	struct wcd_mbhc_btn_detect_cfg *btn_cfg;
+	u16 *btn_high;
+
+	tavil_wcd_cal = kzalloc(WCD_MBHC_CAL_SIZE(WCD_MBHC_DEF_BUTTONS,
+				WCD9XXX_MBHC_DEF_RLOADS), GFP_KERNEL);
+	if (!tavil_wcd_cal)
+		return NULL;
+
+#define S(X, Y) ((WCD_MBHC_CAL_PLUG_TYPE_PTR(tavil_wcd_cal)->X) = (Y))
+	S(v_hs_max, 1600);
+#undef S
+#define S(X, Y) ((WCD_MBHC_CAL_BTN_DET_PTR(tavil_wcd_cal)->X) = (Y))
+	S(num_btn, WCD_MBHC_DEF_BUTTONS);
+#undef S
+
+	btn_cfg = WCD_MBHC_CAL_BTN_DET_PTR(tavil_wcd_cal);
+	btn_high = ((void *)&btn_cfg->_v_btn_low) +
+		(sizeof(btn_cfg->_v_btn_low[0]) * btn_cfg->num_btn);
+
+	btn_high[0] = 75;
+	btn_high[1] = 150;
+	btn_high[2] = 237;
+	btn_high[3] = 500;
+	btn_high[4] = 500;
+	btn_high[5] = 500;
+	btn_high[6] = 500;
+	btn_high[7] = 500;
+
+	return tavil_wcd_cal;
+}
+
+static int msm_snd_hw_params(struct snd_pcm_substream *substream,
+			     struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct snd_soc_dai_link *dai_link = rtd->dai_link;
+
+	int ret = 0;
+	u32 rx_ch[SLIM_MAX_RX_PORTS], tx_ch[SLIM_MAX_TX_PORTS];
+	u32 rx_ch_cnt = 0, tx_ch_cnt = 0;
+	u32 user_set_tx_ch = 0;
+	u32 rx_ch_count;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		ret = snd_soc_dai_get_channel_map(codec_dai,
+					&tx_ch_cnt, tx_ch, &rx_ch_cnt, rx_ch);
+		if (ret < 0) {
+			pr_err("%s: failed to get codec chan map, err:%d\n",
+				__func__, ret);
+			goto err_ch_map;
+		}
+		if (dai_link->be_id == MSM_BACKEND_DAI_SLIMBUS_5_RX) {
+			pr_debug("%s: rx_5_ch=%d\n", __func__,
+				  slim_rx_cfg[5].channels);
+			rx_ch_count = slim_rx_cfg[5].channels;
+		} else if (dai_link->be_id == MSM_BACKEND_DAI_SLIMBUS_2_RX) {
+			pr_debug("%s: rx_2_ch=%d\n", __func__,
+				 slim_rx_cfg[2].channels);
+			rx_ch_count = slim_rx_cfg[2].channels;
+		} else if (dai_link->be_id == MSM_BACKEND_DAI_SLIMBUS_6_RX) {
+			pr_debug("%s: rx_6_ch=%d\n", __func__,
+				  slim_rx_cfg[6].channels);
+			rx_ch_count = slim_rx_cfg[6].channels;
+		} else {
+			pr_debug("%s: rx_0_ch=%d\n", __func__,
+				  slim_rx_cfg[0].channels);
+			rx_ch_count = slim_rx_cfg[0].channels;
+		}
+		ret = snd_soc_dai_set_channel_map(cpu_dai, 0, 0,
+						  rx_ch_count, rx_ch);
+		if (ret < 0) {
+			pr_err("%s: failed to set cpu chan map, err:%d\n",
+				__func__, ret);
+			goto err_ch_map;
+		}
+	} else {
+
+		pr_debug("%s: %s_tx_dai_id_%d_ch=%d\n", __func__,
+			 codec_dai->name, codec_dai->id, user_set_tx_ch);
+		ret = snd_soc_dai_get_channel_map(codec_dai,
+					 &tx_ch_cnt, tx_ch, &rx_ch_cnt, rx_ch);
+		if (ret < 0) {
+			pr_err("%s: failed to get codec chan map\n, err:%d\n",
+				__func__, ret);
+			goto err_ch_map;
+		}
+		/* For <codec>_tx1 case */
+		if (dai_link->be_id == MSM_BACKEND_DAI_SLIMBUS_0_TX)
+			user_set_tx_ch = slim_tx_cfg[0].channels;
+		/* For <codec>_tx3 case */
+		else if (dai_link->be_id == MSM_BACKEND_DAI_SLIMBUS_1_TX)
+			user_set_tx_ch = slim_tx_cfg[1].channels;
+		else if (dai_link->be_id == MSM_BACKEND_DAI_SLIMBUS_4_TX)
+			user_set_tx_ch = msm_vi_feed_tx_ch;
+		else
+			user_set_tx_ch = tx_ch_cnt;
+
+		pr_debug("%s: msm_slim_0_tx_ch(%d) user_set_tx_ch(%d) tx_ch_cnt(%d), be_id (%d)\n",
+			 __func__,  slim_tx_cfg[0].channels, user_set_tx_ch,
+			 tx_ch_cnt, dai_link->be_id);
+
+		ret = snd_soc_dai_set_channel_map(cpu_dai,
+						  user_set_tx_ch, tx_ch, 0, 0);
+		if (ret < 0)
+			pr_err("%s: failed to set cpu chan map, err:%d\n",
+				__func__, ret);
+	}
+
+err_ch_map:
+	return ret;
+}
+
+static int msm_snd_cpe_hw_params(struct snd_pcm_substream *substream,
+				 struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct snd_soc_dai_link *dai_link = rtd->dai_link;
+
+	int ret = 0;
+	u32 tx_ch[SLIM_MAX_TX_PORTS];
+	u32 tx_ch_cnt = 0;
+	u32 user_set_tx_ch = 0;
+
+	if (substream->stream != SNDRV_PCM_STREAM_CAPTURE) {
+		pr_err("%s: Invalid stream type %d\n",
+			__func__, substream->stream);
+		ret = -EINVAL;
+		goto err_stream_type;
+	}
+
+	pr_debug("%s: %s_tx_dai_id_%d\n", __func__,
+		 codec_dai->name, codec_dai->id);
+	ret = snd_soc_dai_get_channel_map(codec_dai,
+				 &tx_ch_cnt, tx_ch, NULL, NULL);
+	if (ret < 0) {
+		pr_err("%s: failed to get codec chan map\n, err:%d\n",
+			__func__, ret);
+		goto err_ch_map;
+	}
+
+	user_set_tx_ch = tx_ch_cnt;
+
+	pr_debug("%s: tx_ch_cnt(%d) be_id %d\n",
+		 __func__, tx_ch_cnt, dai_link->be_id);
+
+	ret = snd_soc_dai_set_channel_map(cpu_dai,
+					  user_set_tx_ch, tx_ch, 0, 0);
+	if (ret < 0)
+		pr_err("%s: failed to set cpu chan map, err:%d\n",
+			__func__, ret);
+err_ch_map:
+err_stream_type:
+	return ret;
+}
+
+static int msm_slimbus_2_hw_params(struct snd_pcm_substream *substream,
+					  struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	unsigned int rx_ch[SLIM_MAX_RX_PORTS], tx_ch[SLIM_MAX_TX_PORTS];
+	unsigned int rx_ch_cnt = 0, tx_ch_cnt = 0;
+	unsigned int num_tx_ch = 0;
+	unsigned int num_rx_ch = 0;
+	int ret = 0;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		num_rx_ch =  params_channels(params);
+		pr_debug("%s: %s rx_dai_id = %d  num_ch = %d\n", __func__,
+			codec_dai->name, codec_dai->id, num_rx_ch);
+		ret = snd_soc_dai_get_channel_map(codec_dai,
+				&tx_ch_cnt, tx_ch, &rx_ch_cnt, rx_ch);
+		if (ret < 0) {
+			pr_err("%s: failed to get codec chan map, err:%d\n",
+				__func__, ret);
+			goto err_ch_map;
+		}
+		ret = snd_soc_dai_set_channel_map(cpu_dai, 0, 0,
+				num_rx_ch, rx_ch);
+		if (ret < 0) {
+			pr_err("%s: failed to set cpu chan map, err:%d\n",
+				__func__, ret);
+			goto err_ch_map;
+		}
+	} else {
+		num_tx_ch =  params_channels(params);
+		pr_debug("%s: %s  tx_dai_id = %d  num_ch = %d\n", __func__,
+			codec_dai->name, codec_dai->id, num_tx_ch);
+		ret = snd_soc_dai_get_channel_map(codec_dai,
+				&tx_ch_cnt, tx_ch, &rx_ch_cnt, rx_ch);
+		if (ret < 0) {
+			pr_err("%s: failed to get codec chan map, err:%d\n",
+				__func__, ret);
+			goto err_ch_map;
+		}
+		ret = snd_soc_dai_set_channel_map(cpu_dai,
+				num_tx_ch, tx_ch, 0, 0);
+		if (ret < 0) {
+			pr_err("%s: failed to set cpu chan map, err:%d\n",
+				__func__, ret);
+			goto err_ch_map;
+		}
+	}
+
+err_ch_map:
+	return ret;
+}
+
+static int msm_wcn_hw_params(struct snd_pcm_substream *substream,
+			     struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct snd_soc_dai_link *dai_link = rtd->dai_link;
+	u32 rx_ch[WCN_CDC_SLIM_RX_CH_MAX], tx_ch[WCN_CDC_SLIM_TX_CH_MAX];
+	u32 rx_ch_cnt = 0, tx_ch_cnt = 0;
+	int ret;
+
+	dev_dbg(rtd->dev, "%s: %s_tx_dai_id_%d\n", __func__,
+		 codec_dai->name, codec_dai->id);
+	ret = snd_soc_dai_get_channel_map(codec_dai,
+				 &tx_ch_cnt, tx_ch, &rx_ch_cnt, rx_ch);
+	if (ret) {
+		dev_err(rtd->dev,
+			"%s: failed to get BTFM codec chan map\n, err:%d\n",
+			__func__, ret);
+		goto exit;
+	}
+
+	dev_dbg(rtd->dev, "%s: tx_ch_cnt(%d) be_id %d\n",
+		__func__, tx_ch_cnt, dai_link->be_id);
+
+	ret = snd_soc_dai_set_channel_map(cpu_dai,
+					  tx_ch_cnt, tx_ch, rx_ch_cnt, rx_ch);
+	if (ret)
+		dev_err(rtd->dev, "%s: failed to set cpu chan map, err:%d\n",
+			__func__, ret);
+
+exit:
+	return ret;
+}
+
+static int msm_aux_pcm_snd_startup(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	int index = cpu_dai->id - 1;
+
+	dev_dbg(rtd->card->dev,
+		"%s: substream = %s  stream = %d, dai name %s, dai ID %d\n",
+		__func__, substream->name, substream->stream,
+		cpu_dai->name, cpu_dai->id);
+
+	if (index < PRIM_AUX_PCM || index > QUAT_AUX_PCM) {
+		ret = -EINVAL;
+		dev_err(rtd->card->dev,
+			"%s: CPU DAI id (%d) out of range\n",
+			__func__, cpu_dai->id);
+		goto done;
+	}
+
+	mutex_lock(&auxpcm_intf_conf[index].lock);
+	if (++auxpcm_intf_conf[index].ref_cnt == 1) {
+		if (mi2s_auxpcm_conf[index].pcm_i2s_sel_vt_addr != NULL) {
+			mutex_lock(&mi2s_auxpcm_conf[index].lock);
+			iowrite32(1,
+				mi2s_auxpcm_conf[index].pcm_i2s_sel_vt_addr);
+			mutex_unlock(&mi2s_auxpcm_conf[index].lock);
+		} else {
+			dev_err(rtd->card->dev,
+				"%s lpaif_tert_muxsel_virt_addr is NULL\n",
+				__func__);
+			ret = -EINVAL;
+		}
+	}
+	if (IS_ERR_VALUE(ret))
+		auxpcm_intf_conf[index].ref_cnt--;
+
+	mutex_unlock(&auxpcm_intf_conf[index].lock);
+
+done:
+	return ret;
+}
+
+static void msm_aux_pcm_snd_shutdown(struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	int index = rtd->cpu_dai->id - 1;
+
+	dev_dbg(rtd->card->dev,
+		"%s: substream = %s  stream = %d, dai name %s, dai ID %d\n",
+		__func__,
+		substream->name, substream->stream,
+		rtd->cpu_dai->name, rtd->cpu_dai->id);
+
+	if (index < PRIM_AUX_PCM || index > QUAT_AUX_PCM) {
+		dev_err(rtd->card->dev,
+			"%s: CPU DAI id (%d) out of range\n",
+			__func__, rtd->cpu_dai->id);
+		return;
+	}
+
+	mutex_lock(&auxpcm_intf_conf[index].lock);
+	if (--auxpcm_intf_conf[index].ref_cnt == 0) {
+		if (mi2s_auxpcm_conf[index].pcm_i2s_sel_vt_addr != NULL) {
+			mutex_lock(&mi2s_auxpcm_conf[index].lock);
+			iowrite32(0,
+				mi2s_auxpcm_conf[index].pcm_i2s_sel_vt_addr);
+			mutex_unlock(&mi2s_auxpcm_conf[index].lock);
+		} else {
+			dev_err(rtd->card->dev,
+				"%s lpaif_tert_muxsel_virt_addr is NULL\n",
+				__func__);
+		}
+	}
+	mutex_unlock(&auxpcm_intf_conf[index].lock);
+}
+
+static int msm_get_port_id(int be_id)
+{
+	int afe_port_id;
+
+	switch (be_id) {
+	case MSM_BACKEND_DAI_PRI_MI2S_RX:
+		afe_port_id = AFE_PORT_ID_PRIMARY_MI2S_RX;
+		break;
+	case MSM_BACKEND_DAI_PRI_MI2S_TX:
+		afe_port_id = AFE_PORT_ID_PRIMARY_MI2S_TX;
+		break;
+	case MSM_BACKEND_DAI_SECONDARY_MI2S_RX:
+		afe_port_id = AFE_PORT_ID_SECONDARY_MI2S_RX;
+		break;
+	case MSM_BACKEND_DAI_SECONDARY_MI2S_TX:
+		afe_port_id = AFE_PORT_ID_SECONDARY_MI2S_TX;
+		break;
+	case MSM_BACKEND_DAI_TERTIARY_MI2S_RX:
+		afe_port_id = AFE_PORT_ID_TERTIARY_MI2S_RX;
+		break;
+	case MSM_BACKEND_DAI_TERTIARY_MI2S_TX:
+		afe_port_id = AFE_PORT_ID_TERTIARY_MI2S_TX;
+		break;
+	case MSM_BACKEND_DAI_QUATERNARY_MI2S_RX:
+		afe_port_id = AFE_PORT_ID_QUATERNARY_MI2S_RX;
+		break;
+	case MSM_BACKEND_DAI_QUATERNARY_MI2S_TX:
+		afe_port_id = AFE_PORT_ID_QUATERNARY_MI2S_TX;
+		break;
+	default:
+		pr_err("%s: Invalid be_id: %d\n", __func__, be_id);
+		afe_port_id = -EINVAL;
+	}
+
+	return afe_port_id;
+}
+
+static u32 get_mi2s_bits_per_sample(u32 bit_format)
+{
+	u32 bit_per_sample;
+
+	switch (bit_format) {
+	case SNDRV_PCM_FORMAT_S32_LE:
+	case SNDRV_PCM_FORMAT_S24_3LE:
+	case SNDRV_PCM_FORMAT_S24_LE:
+		bit_per_sample = 32;
+		break;
+	case SNDRV_PCM_FORMAT_S16_LE:
+	default:
+		bit_per_sample = 16;
+		break;
+	}
+
+	return bit_per_sample;
+}
+
+static void update_mi2s_clk_val(int dai_id, int stream)
+{
+	u32 bit_per_sample;
+
+	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		bit_per_sample =
+		    get_mi2s_bits_per_sample(mi2s_rx_cfg[dai_id].bit_format);
+		mi2s_clk[dai_id].clk_freq_in_hz =
+		    mi2s_rx_cfg[dai_id].sample_rate * 2 * bit_per_sample;
+	} else {
+		bit_per_sample =
+		    get_mi2s_bits_per_sample(mi2s_tx_cfg[dai_id].bit_format);
+		mi2s_clk[dai_id].clk_freq_in_hz =
+		    mi2s_tx_cfg[dai_id].sample_rate * 2 * bit_per_sample;
+	}
+}
+
+static int msm_mi2s_set_sclk(struct snd_pcm_substream *substream, bool enable)
+{
+	int ret = 0;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	int port_id = 0;
+	int index = cpu_dai->id;
+
+	port_id = msm_get_port_id(rtd->dai_link->be_id);
+	if (IS_ERR_VALUE(port_id)) {
+		dev_err(rtd->card->dev, "%s: Invalid port_id\n", __func__);
+		ret = port_id;
+		goto done;
+	}
+
+	if (enable) {
+		update_mi2s_clk_val(index, substream->stream);
+		dev_dbg(rtd->card->dev, "%s: clock rate %ul\n", __func__,
+			mi2s_clk[index].clk_freq_in_hz);
+	}
+
+	mi2s_clk[index].enable = enable;
+	ret = afe_set_lpass_clock_v2(port_id,
+				     &mi2s_clk[index]);
+	if (ret < 0) {
+		dev_err(rtd->card->dev,
+			"%s: afe lpass clock failed for port 0x%x , err:%d\n",
+			__func__, port_id, ret);
+		goto done;
+	}
+
+done:
+	return ret;
+}
+
+static int msm_set_pinctrl(struct msm_pinctrl_info *pinctrl_info,
+				enum pinctrl_pin_state new_state)
+{
+	int ret = 0;
+	int curr_state = 0;
+
+	if (pinctrl_info == NULL) {
+		pr_err("%s: pinctrl_info is NULL\n", __func__);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	if (pinctrl_info->pinctrl == NULL) {
+		pr_err("%s: pinctrl_info->pinctrl is NULL\n", __func__);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	curr_state = pinctrl_info->curr_state;
+	pinctrl_info->curr_state = new_state;
+	pr_debug("%s: curr_state = %s new_state = %s\n", __func__,
+		 pin_states[curr_state], pin_states[pinctrl_info->curr_state]);
+
+	if (curr_state == pinctrl_info->curr_state) {
+		pr_debug("%s: Already in same state\n", __func__);
+		goto err;
+	}
+
+	if (curr_state != STATE_DISABLE &&
+		pinctrl_info->curr_state != STATE_DISABLE) {
+		pr_debug("%s: state already active cannot switch\n", __func__);
+		ret = -EIO;
+		goto err;
+	}
+
+	switch (pinctrl_info->curr_state) {
+	case STATE_MI2S_ACTIVE:
+		ret = pinctrl_select_state(pinctrl_info->pinctrl,
+					pinctrl_info->mi2s_active);
+		if (ret) {
+			pr_err("%s: MI2S state select failed with %d\n",
+				__func__, ret);
+			ret = -EIO;
+			goto err;
+		}
+		break;
+	case STATE_TDM_ACTIVE:
+		ret = pinctrl_select_state(pinctrl_info->pinctrl,
+					pinctrl_info->tdm_active);
+		if (ret) {
+			pr_err("%s: TDM state select failed with %d\n",
+				__func__, ret);
+			ret = -EIO;
+			goto err;
+		}
+		break;
+	case STATE_DISABLE:
+		if (curr_state == STATE_MI2S_ACTIVE) {
+			ret = pinctrl_select_state(pinctrl_info->pinctrl,
+					pinctrl_info->mi2s_disable);
+		} else {
+			ret = pinctrl_select_state(pinctrl_info->pinctrl,
+					pinctrl_info->tdm_disable);
+		}
+		if (ret) {
+			pr_err("%s:  state disable failed with %d\n",
+				__func__, ret);
+			ret = -EIO;
+			goto err;
+		}
+		break;
+	default:
+		pr_err("%s: TLMM pin state is invalid\n", __func__);
+		return -EINVAL;
+	}
+
+err:
+	return ret;
+}
+
+static void msm_release_pinctrl(struct platform_device *pdev)
+{
+	struct snd_soc_card *card = platform_get_drvdata(pdev);
+	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+	struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+
+	if (pinctrl_info->pinctrl) {
+		devm_pinctrl_put(pinctrl_info->pinctrl);
+		pinctrl_info->pinctrl = NULL;
+	}
+}
+
+static int msm_get_pinctrl(struct platform_device *pdev)
+{
+	struct snd_soc_card *card = platform_get_drvdata(pdev);
+	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+	struct msm_pinctrl_info *pinctrl_info = NULL;
+	struct pinctrl *pinctrl;
+	int ret;
+
+	pinctrl_info = &pdata->pinctrl_info;
+
+	if (pinctrl_info == NULL) {
+		pr_err("%s: pinctrl_info is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	pinctrl = devm_pinctrl_get(&pdev->dev);
+	if (IS_ERR_OR_NULL(pinctrl)) {
+		pr_err("%s: Unable to get pinctrl handle\n", __func__);
+		return -EINVAL;
+	}
+	pinctrl_info->pinctrl = pinctrl;
+
+	/* get all the states handles from Device Tree */
+	pinctrl_info->mi2s_disable = pinctrl_lookup_state(pinctrl,
+						"quat-mi2s-sleep");
+	if (IS_ERR(pinctrl_info->mi2s_disable)) {
+		pr_err("%s: could not get mi2s_disable pinstate\n", __func__);
+		goto err;
+	}
+	pinctrl_info->mi2s_active = pinctrl_lookup_state(pinctrl,
+						"quat-mi2s-active");
+	if (IS_ERR(pinctrl_info->mi2s_active)) {
+		pr_err("%s: could not get mi2s_active pinstate\n", __func__);
+		goto err;
+	}
+	pinctrl_info->tdm_disable = pinctrl_lookup_state(pinctrl,
+						"quat-tdm-sleep");
+	if (IS_ERR(pinctrl_info->tdm_disable)) {
+		pr_err("%s: could not get tdm_disable pinstate\n", __func__);
+		goto err;
+	}
+	pinctrl_info->tdm_active = pinctrl_lookup_state(pinctrl,
+						"quat-tdm-active");
+	if (IS_ERR(pinctrl_info->tdm_active)) {
+		pr_err("%s: could not get tdm_active pinstate\n",
+			__func__);
+		goto err;
+	}
+	/* Reset the TLMM pins to a default state */
+	ret = pinctrl_select_state(pinctrl_info->pinctrl,
+					pinctrl_info->mi2s_disable);
+	if (ret != 0) {
+		pr_err("%s: Disable TLMM pins failed with %d\n",
+			__func__, ret);
+		ret = -EIO;
+		goto err;
+	}
+	pinctrl_info->curr_state = STATE_DISABLE;
+
+	return 0;
+
+err:
+	devm_pinctrl_put(pinctrl);
+	pinctrl_info->pinctrl = NULL;
+	return -EINVAL;
+}
+
+static int msm_tdm_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+				      struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct snd_interval *rate = hw_param_interval(params,
+					SNDRV_PCM_HW_PARAM_RATE);
+	struct snd_interval *channels = hw_param_interval(params,
+					SNDRV_PCM_HW_PARAM_CHANNELS);
+
+	if (cpu_dai->id == AFE_PORT_ID_QUATERNARY_TDM_RX) {
+		channels->min = channels->max =
+				tdm_rx_cfg[TDM_QUAT][TDM_0].channels;
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			       tdm_rx_cfg[TDM_QUAT][TDM_0].bit_format);
+		rate->min = rate->max =
+				tdm_rx_cfg[TDM_QUAT][TDM_0].sample_rate;
+	} else if (cpu_dai->id == AFE_PORT_ID_SECONDARY_TDM_RX) {
+		channels->min = channels->max =
+				tdm_rx_cfg[TDM_SEC][TDM_0].channels;
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			       tdm_rx_cfg[TDM_SEC][TDM_0].bit_format);
+		rate->min = rate->max = tdm_rx_cfg[TDM_SEC][TDM_0].sample_rate;
+	} else {
+		pr_err("%s: dai id 0x%x not supported\n",
+			__func__, cpu_dai->id);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: dai id = 0x%x channels = %d rate = %d format = 0x%x\n",
+		__func__, cpu_dai->id, channels->max, rate->max,
+		params_format(params));
+
+	return 0;
+}
+
+static int msm8998_tdm_snd_hw_params(struct snd_pcm_substream *substream,
+				     struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	int ret = 0;
+	int channels, slot_width, slots;
+	unsigned int slot_mask;
+	unsigned int slot_offset[8] = {0, 4, 8, 12, 16, 20, 24, 28};
+
+	pr_debug("%s: dai id = 0x%x\n", __func__, cpu_dai->id);
+
+	slots = tdm_rx_cfg[TDM_QUAT][TDM_0].channels;
+	/*2 slot config - bits 0 and 1 set for the first two slots */
+	slot_mask = 0x0000FFFF >> (16-slots);
+	slot_width = 32;
+	channels = slots;
+
+	pr_debug("%s: slot_width %d slots %d\n", __func__, slot_width, slots);
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		pr_debug("%s: slot_width %d\n", __func__, slot_width);
+		ret = snd_soc_dai_set_tdm_slot(cpu_dai, 0, slot_mask,
+			slots, slot_width);
+		if (ret < 0) {
+			pr_err("%s: failed to set tdm slot, err:%d\n",
+				__func__, ret);
+			goto end;
+		}
+
+		ret = snd_soc_dai_set_channel_map(cpu_dai,
+			0, NULL, channels, slot_offset);
+		if (ret < 0) {
+			pr_err("%s: failed to set channel map, err:%d\n",
+				__func__, ret);
+			goto end;
+		}
+	} else {
+		pr_err("%s: invalid use case, err:%d\n",
+			__func__, ret);
+	}
+
+end:
+	return ret;
+}
+
+static int msm8998_tdm_snd_startup(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_card *card = rtd->card;
+	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+	struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+
+	ret = msm_set_pinctrl(pinctrl_info, STATE_TDM_ACTIVE);
+	if (ret)
+		pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
+			__func__, ret);
+
+	return ret;
+}
+
+static void msm8998_tdm_snd_shutdown(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_card *card = rtd->card;
+	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+	struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+
+	ret = msm_set_pinctrl(pinctrl_info, STATE_DISABLE);
+	if (ret)
+		pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
+			__func__, ret);
+
+}
+
+static struct snd_soc_ops msm8998_tdm_be_ops = {
+	.hw_params = msm8998_tdm_snd_hw_params,
+	.startup = msm8998_tdm_snd_startup,
+	.shutdown = msm8998_tdm_snd_shutdown
+};
+
+static int msm_mi2s_snd_startup(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	int port_id = msm_get_port_id(rtd->dai_link->be_id);
+	int index = cpu_dai->id;
+	unsigned int fmt = SND_SOC_DAIFMT_CBS_CFS;
+	struct snd_soc_card *card = rtd->card;
+	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+	struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+	int ret_pinctrl = 0;
+
+	dev_dbg(rtd->card->dev,
+		"%s: substream = %s  stream = %d, dai name %s, dai ID %d\n",
+		__func__, substream->name, substream->stream,
+		cpu_dai->name, cpu_dai->id);
+
+	if (index < PRIM_MI2S || index > QUAT_MI2S) {
+		ret = -EINVAL;
+		dev_err(rtd->card->dev,
+			"%s: CPU DAI id (%d) out of range\n",
+			__func__, cpu_dai->id);
+		goto done;
+	}
+	if (index == QUAT_MI2S) {
+		ret_pinctrl = msm_set_pinctrl(pinctrl_info, STATE_MI2S_ACTIVE);
+		if (ret_pinctrl) {
+			pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
+				__func__, ret_pinctrl);
+		}
+	}
+
+	if (pdata->has_fbx_wm8804 && index == TERT_MI2S) {
+		struct snd_soc_dai *codec_dai = rtd->codec_dai;
+
+		snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
+				    SND_SOC_DAIFMT_CBM_CFM |
+				    SND_SOC_DAIFMT_NB_NF);
+		snd_soc_dai_set_clkdiv(codec_dai, WM8804_MCLK_DIV,
+				       WM8804_MCLKDIV_256FS);
+		snd_soc_dai_set_pll(codec_dai, 0, 0, 24000000, 47155200);
+		snd_soc_dai_set_sysclk(codec_dai, WM8804_CLKOUT_SRC_CLK1, 0, 0);
+		snd_soc_dai_set_sysclk(codec_dai, WM8804_TX_CLKSRC_PLL, 0, 0);
+	}
+
+	if (pdata->has_fbx_sil9437 && index == QUAT_MI2S) {
+		struct snd_soc_dai *codec_dai = rtd->codec_dai;
+
+		snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
+				    SND_SOC_DAIFMT_CBM_CFM |
+				    SND_SOC_DAIFMT_NB_NF);
+	}
+
+	/*
+	 * Muxtex protection in case the same MI2S
+	 * interface using for both TX and RX  so
+	 * that the same clock won't be enable twice.
+	 */
+	mutex_lock(&mi2s_intf_conf[index].lock);
+	if (++mi2s_intf_conf[index].ref_cnt == 1) {
+		/* Check if msm needs to provide the clock to the interface */
+		if (!mi2s_intf_conf[index].msm_is_mi2s_master) {
+			fmt = SND_SOC_DAIFMT_CBM_CFM;
+			mi2s_clk[index].clk_id = mi2s_ebit_clk[index];
+		}
+		ret = msm_mi2s_set_sclk(substream, true);
+		if (IS_ERR_VALUE(ret)) {
+			dev_err(rtd->card->dev,
+				"%s: afe lpass clock failed to enable MI2S clock, err:%d\n",
+				__func__, ret);
+			goto clean_up;
+		}
+		if (mi2s_auxpcm_conf[index].pcm_i2s_sel_vt_addr != NULL) {
+			mutex_lock(&mi2s_auxpcm_conf[index].lock);
+			iowrite32(0,
+				mi2s_auxpcm_conf[index].pcm_i2s_sel_vt_addr);
+			mutex_unlock(&mi2s_auxpcm_conf[index].lock);
+		} else {
+			dev_err(rtd->card->dev,
+				"%s lpaif_muxsel_virt_addr is NULL for dai %d\n",
+				__func__, index);
+			ret = -EINVAL;
+			goto clk_off;
+		}
+		ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
+		if (IS_ERR_VALUE(ret)) {
+			pr_err("%s: set fmt cpu dai failed for MI2S (%d), err:%d\n",
+				__func__, index, ret);
+			goto clk_off;
+		}
+		if (mi2s_intf_conf[index].msm_is_ext_mclk) {
+			mi2s_mclk[index].enable = 1;
+			pr_debug("%s: Enabling mclk, clk_freq_in_hz = %u\n",
+				__func__, mi2s_mclk[index].clk_freq_in_hz);
+			ret = afe_set_lpass_clock_v2(port_id,
+						     &mi2s_mclk[index]);
+			if (ret < 0) {
+				pr_err("%s: afe lpass mclk failed, err:%d\n",
+					__func__, ret);
+				goto clk_off;
+			}
+		}
+	}
+clk_off:
+	if (IS_ERR_VALUE(ret))
+		msm_mi2s_set_sclk(substream, false);
+clean_up:
+	if (IS_ERR_VALUE(ret))
+		mi2s_intf_conf[index].ref_cnt--;
+	mutex_unlock(&mi2s_intf_conf[index].lock);
+done:
+	return ret;
+}
+
+static void msm_mi2s_snd_shutdown(struct snd_pcm_substream *substream)
+{
+	int ret;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	int port_id = msm_get_port_id(rtd->dai_link->be_id);
+	int index = rtd->cpu_dai->id;
+	struct snd_soc_card *card = rtd->card;
+	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+	struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+	int ret_pinctrl = 0;
+
+	pr_debug("%s(): substream = %s  stream = %d\n", __func__,
+		 substream->name, substream->stream);
+	if (index < PRIM_MI2S || index > QUAT_MI2S) {
+		pr_err("%s:invalid MI2S DAI(%d)\n", __func__, index);
+		return;
+	}
+
+	mutex_lock(&mi2s_intf_conf[index].lock);
+	if (--mi2s_intf_conf[index].ref_cnt == 0) {
+		ret = msm_mi2s_set_sclk(substream, false);
+		if (ret < 0) {
+			pr_err("%s:clock disable failed for MI2S (%d); ret=%d\n",
+				__func__, index, ret);
+		}
+		if (mi2s_intf_conf[index].msm_is_ext_mclk) {
+			mi2s_mclk[index].enable = 0;
+			pr_debug("%s: Disabling mclk, clk_freq_in_hz = %u\n",
+				 __func__, mi2s_mclk[index].clk_freq_in_hz);
+			ret = afe_set_lpass_clock_v2(port_id,
+						     &mi2s_mclk[index]);
+			if (ret < 0) {
+				pr_err("%s: mclk disable failed for MCLK (%d); ret=%d\n",
+					__func__, index, ret);
+			}
+		}
+	}
+	mutex_unlock(&mi2s_intf_conf[index].lock);
+
+	if (index == QUAT_MI2S) {
+		ret_pinctrl = msm_set_pinctrl(pinctrl_info, STATE_DISABLE);
+		if (ret_pinctrl)
+			pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
+				__func__, ret_pinctrl);
+	}
+}
+
+static struct snd_soc_ops msm_mi2s_be_ops = {
+	.startup = msm_mi2s_snd_startup,
+	.shutdown = msm_mi2s_snd_shutdown,
+};
+
+static struct snd_soc_ops msm_aux_pcm_be_ops = {
+	.startup = msm_aux_pcm_snd_startup,
+	.shutdown = msm_aux_pcm_snd_shutdown,
+};
+
+static unsigned int tdm_param_set_slot_mask(u16 port_id, int slot_width,
+					    int slots)
+{
+	unsigned int slot_mask = 0;
+	int i, j;
+	unsigned int *slot_offset;
+
+	for (i = TDM_0; i < TDM_PORT_MAX; i++) {
+		slot_offset = tdm_slot_offset[i];
+
+		for (j = 0; j < slots; j++) {
+			if (slot_offset[j] != AFE_SLOT_MAPPING_OFFSET_INVALID)
+				slot_mask |=
+				(1 << ((slot_offset[j] * 8) / slot_width));
+			else
+				break;
+		}
+	}
+
+	return slot_mask;
+}
+
+static int msm_tdm_snd_hw_params(struct snd_pcm_substream *substream,
+				     struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	int ret = 0;
+	int channels, slot_width, slots;
+	unsigned int slot_mask;
+	unsigned int *slot_offset;
+	int offset_channels = 0;
+	int i;
+
+	pr_debug("%s: dai id = 0x%x\n", __func__, cpu_dai->id);
+
+	channels = params_channels(params);
+	switch (channels) {
+	case 1:
+	case 2:
+	case 3:
+	case 4:
+	case 5:
+	case 6:
+	case 7:
+	case 8:
+		switch (params_format(params)) {
+		case SNDRV_PCM_FORMAT_S32_LE:
+		case SNDRV_PCM_FORMAT_S24_LE:
+		case SNDRV_PCM_FORMAT_S16_LE:
+		/*
+		 * up to 8 channels HW config should
+		 * use 32 bit slot width for max support of
+		 * stream bit width. (slot_width > bit_width)
+		 */
+			slot_width = 32;
+			break;
+		default:
+			pr_err("%s: invalid param format 0x%x\n",
+				__func__, params_format(params));
+			return -EINVAL;
+		}
+		slots = 8;
+		slot_mask = tdm_param_set_slot_mask(cpu_dai->id,
+						    slot_width,
+						    channels);
+		if (!slot_mask) {
+			pr_err("%s: invalid slot_mask 0x%x\n",
+				__func__, slot_mask);
+			return -EINVAL;
+		}
+		break;
+	default:
+		pr_err("%s: invalid param channels %d\n",
+			__func__, channels);
+		return -EINVAL;
+	}
+	/* currently only supporting TDM_RX_0 and TDM_TX_0 */
+	switch (cpu_dai->id) {
+	case AFE_PORT_ID_PRIMARY_TDM_RX:
+	case AFE_PORT_ID_SECONDARY_TDM_RX:
+	case AFE_PORT_ID_TERTIARY_TDM_RX:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX:
+	case AFE_PORT_ID_PRIMARY_TDM_TX:
+	case AFE_PORT_ID_SECONDARY_TDM_TX:
+	case AFE_PORT_ID_TERTIARY_TDM_TX:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX:
+		slot_offset = tdm_slot_offset[TDM_0];
+		break;
+	default:
+		pr_err("%s: dai id 0x%x not supported\n",
+			__func__, cpu_dai->id);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < TDM_SLOT_OFFSET_MAX; i++) {
+		if (slot_offset[i] != AFE_SLOT_MAPPING_OFFSET_INVALID)
+			offset_channels++;
+		else
+			break;
+	}
+
+	if (offset_channels == 0) {
+		pr_err("%s: slot offset not supported, offset_channels %d\n",
+			__func__, offset_channels);
+		return -EINVAL;
+	}
+
+	if (channels > offset_channels) {
+		pr_err("%s: channels %d exceed offset_channels %d\n",
+			__func__, channels, offset_channels);
+		return -EINVAL;
+	}
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		ret = snd_soc_dai_set_tdm_slot(cpu_dai, 0, slot_mask,
+					       slots, slot_width);
+		if (ret < 0) {
+			pr_err("%s: failed to set tdm slot, err:%d\n",
+				__func__, ret);
+			goto end;
+		}
+
+		ret = snd_soc_dai_set_channel_map(cpu_dai, 0, NULL,
+						  channels, slot_offset);
+		if (ret < 0) {
+			pr_err("%s: failed to set channel map, err:%d\n",
+				__func__, ret);
+			goto end;
+		}
+	} else {
+		ret = snd_soc_dai_set_tdm_slot(cpu_dai, slot_mask, 0,
+					       slots, slot_width);
+		if (ret < 0) {
+			pr_err("%s: failed to set tdm slot, err:%d\n",
+				__func__, ret);
+			goto end;
+		}
+
+		ret = snd_soc_dai_set_channel_map(cpu_dai, channels,
+						  slot_offset, 0, NULL);
+		if (ret < 0) {
+			pr_err("%s: failed to set channel map, err:%d\n",
+				__func__, ret);
+			goto end;
+		}
+	}
+end:
+	return ret;
+}
+
+static struct snd_soc_ops msm_be_ops = {
+	.hw_params = msm_snd_hw_params,
+};
+
+static struct snd_soc_ops msm_cpe_ops = {
+	.hw_params = msm_snd_cpe_hw_params,
+};
+
+static struct snd_soc_ops msm_slimbus_2_be_ops = {
+	.hw_params = msm_slimbus_2_hw_params,
+};
+
+static struct snd_soc_ops msm_wcn_ops = {
+	.hw_params = msm_wcn_hw_params,
+};
+
+static struct snd_soc_ops msm_tdm_be_ops = {
+	.hw_params = msm_tdm_snd_hw_params
+};
+
+/* Digital audio interface glue - connects codec <---> CPU */
+static struct snd_soc_dai_link msm_common_dai_links[] = {
+	/* FrontEnd DAI Links */
+	{
+		.name = MSM_DAILINK_NAME(Media1),
+		.stream_name = "MultiMedia1",
+		.cpu_dai_name = "MultiMedia1",
+		.platform_name = "msm-pcm-dsp.0",
+		.dynamic = 1,
+		.async_ops = ASYNC_DPCM_SND_SOC_PREPARE,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			SND_SOC_DPCM_TRIGGER_POST},
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.ignore_suspend = 1,
+		/* this dainlink has playback support */
+		.ignore_pmdown_time = 1,
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA1
+	},
+	{
+		.name = MSM_DAILINK_NAME(Media2),
+		.stream_name = "MultiMedia2",
+		.cpu_dai_name = "MultiMedia2",
+		.platform_name = "msm-pcm-dsp.0",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			SND_SOC_DPCM_TRIGGER_POST},
+		.ignore_suspend = 1,
+		/* this dainlink has playback support */
+		.ignore_pmdown_time = 1,
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA2,
+	},
+	{
+		.name = "VoiceMMode1",
+		.stream_name = "VoiceMMode1",
+		.cpu_dai_name = "VoiceMMode1",
+		.platform_name = "msm-pcm-voice",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			    SND_SOC_DPCM_TRIGGER_POST},
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.be_id = MSM_FRONTEND_DAI_VOICEMMODE1,
+	},
+	{
+		.name = "MSM VoIP",
+		.stream_name = "VoIP",
+		.cpu_dai_name = "VoIP",
+		.platform_name = "msm-voip-dsp",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			SND_SOC_DPCM_TRIGGER_POST},
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.ignore_suspend = 1,
+		/* this dainlink has playback support */
+		.ignore_pmdown_time = 1,
+		.be_id = MSM_FRONTEND_DAI_VOIP,
+	},
+	{
+		.name = MSM_DAILINK_NAME(ULL),
+		.stream_name = "MultiMedia3",
+		.cpu_dai_name = "MultiMedia3",
+		.platform_name = "msm-pcm-dsp.2",
+		.dynamic = 1,
+		.async_ops = ASYNC_DPCM_SND_SOC_PREPARE,
+		.dpcm_playback = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			SND_SOC_DPCM_TRIGGER_POST},
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.ignore_suspend = 1,
+		/* this dainlink has playback support */
+		.ignore_pmdown_time = 1,
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA3,
+	},
+	/* Hostless PCM purpose */
+	{
+		.name = "SLIMBUS_0 Hostless",
+		.stream_name = "SLIMBUS_0 Hostless",
+		.cpu_dai_name = "SLIMBUS0_HOSTLESS",
+		.platform_name = "msm-pcm-hostless",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			    SND_SOC_DPCM_TRIGGER_POST},
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		 /* this dailink has playback support */
+		.ignore_pmdown_time = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+	},
+	{
+		.name = "MSM AFE-PCM RX",
+		.stream_name = "AFE-PROXY RX",
+		.cpu_dai_name = "msm-dai-q6-dev.241",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-rx",
+		.platform_name = "msm-pcm-afe",
+		.dpcm_playback = 1,
+		.ignore_suspend = 1,
+		/* this dainlink has playback support */
+		.ignore_pmdown_time = 1,
+	},
+	{
+		.name = "MSM AFE-PCM TX",
+		.stream_name = "AFE-PROXY TX",
+		.cpu_dai_name = "msm-dai-q6-dev.240",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-tx",
+		.platform_name  = "msm-pcm-afe",
+		.dpcm_capture = 1,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = MSM_DAILINK_NAME(Compress1),
+		.stream_name = "Compress1",
+		.cpu_dai_name = "MultiMedia4",
+		.platform_name = "msm-compress-dsp",
+		.dynamic = 1,
+		.async_ops = ASYNC_DPCM_SND_SOC_HW_PARAMS,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			 SND_SOC_DPCM_TRIGGER_POST},
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		 /* this dainlink has playback support */
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA4,
+	},
+	{
+		.name = "AUXPCM Hostless",
+		.stream_name = "AUXPCM Hostless",
+		.cpu_dai_name = "AUXPCM_HOSTLESS",
+		.platform_name = "msm-pcm-hostless",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			SND_SOC_DPCM_TRIGGER_POST},
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		/* this dainlink has playback support */
+		.ignore_pmdown_time = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+	},
+	{
+		.name = "SLIMBUS_1 Hostless",
+		.stream_name = "SLIMBUS_1 Hostless",
+		.cpu_dai_name = "SLIMBUS1_HOSTLESS",
+		.platform_name = "msm-pcm-hostless",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			    SND_SOC_DPCM_TRIGGER_POST},
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		 /* this dailink has playback support */
+		.ignore_pmdown_time = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+	},
+	{
+		.name = "SLIMBUS_3 Hostless",
+		.stream_name = "SLIMBUS_3 Hostless",
+		.cpu_dai_name = "SLIMBUS3_HOSTLESS",
+		.platform_name = "msm-pcm-hostless",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			    SND_SOC_DPCM_TRIGGER_POST},
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		 /* this dailink has playback support */
+		.ignore_pmdown_time = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+	},
+	{
+		.name = "SLIMBUS_4 Hostless",
+		.stream_name = "SLIMBUS_4 Hostless",
+		.cpu_dai_name = "SLIMBUS4_HOSTLESS",
+		.platform_name = "msm-pcm-hostless",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			    SND_SOC_DPCM_TRIGGER_POST},
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		 /* this dailink has playback support */
+		.ignore_pmdown_time = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+	},
+	{
+		.name = MSM_DAILINK_NAME(LowLatency),
+		.stream_name = "MultiMedia5",
+		.cpu_dai_name = "MultiMedia5",
+		.platform_name = "msm-pcm-dsp.1",
+		.dynamic = 1,
+		.async_ops = ASYNC_DPCM_SND_SOC_PREPARE,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+				SND_SOC_DPCM_TRIGGER_POST},
+		.ignore_suspend = 1,
+		/* this dainlink has playback support */
+		.ignore_pmdown_time = 1,
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA5,
+	},
+	{
+		.name = "Listen 1 Audio Service",
+		.stream_name = "Listen 1 Audio Service",
+		.cpu_dai_name = "LSM1",
+		.platform_name = "msm-lsm-client",
+		.dynamic = 1,
+		.dpcm_capture = 1,
+		.trigger = { SND_SOC_DPCM_TRIGGER_POST,
+			     SND_SOC_DPCM_TRIGGER_POST },
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.be_id = MSM_FRONTEND_DAI_LSM1,
+	},
+	/* Multiple Tunnel instances */
+	{
+		.name = MSM_DAILINK_NAME(Compress2),
+		.stream_name = "Compress2",
+		.cpu_dai_name = "MultiMedia7",
+		.platform_name = "msm-compress-dsp",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			 SND_SOC_DPCM_TRIGGER_POST},
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		 /* this dainlink has playback support */
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA7,
+	},
+	{
+		.name = MSM_DAILINK_NAME(Compress3),
+		.stream_name = "Compress3",
+		.cpu_dai_name = "MultiMedia10",
+		.platform_name = "msm-compress-dsp",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			 SND_SOC_DPCM_TRIGGER_POST},
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		 /* this dainlink has playback support */
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA10,
+	},
+	{
+		.name = MSM_DAILINK_NAME(ULL_NOIRQ),
+		.stream_name = "MM_NOIRQ",
+		.cpu_dai_name = "MultiMedia8",
+		.platform_name = "msm-pcm-dsp-noirq",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			 SND_SOC_DPCM_TRIGGER_POST},
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		 /* this dainlink has playback support */
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA8,
+	},
+	/* HDMI Hostless */
+	{
+		.name = "HDMI_RX_HOSTLESS",
+		.stream_name = "HDMI_RX_HOSTLESS",
+		.cpu_dai_name = "HDMI_HOSTLESS",
+		.platform_name = "msm-pcm-hostless",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			SND_SOC_DPCM_TRIGGER_POST},
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+	},
+	{
+		.name = "VoiceMMode2",
+		.stream_name = "VoiceMMode2",
+		.cpu_dai_name = "VoiceMMode2",
+		.platform_name = "msm-pcm-voice",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			    SND_SOC_DPCM_TRIGGER_POST},
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.be_id = MSM_FRONTEND_DAI_VOICEMMODE2,
+	},
+	/* LSM FE */
+	{
+		.name = "Listen 2 Audio Service",
+		.stream_name = "Listen 2 Audio Service",
+		.cpu_dai_name = "LSM2",
+		.platform_name = "msm-lsm-client",
+		.dynamic = 1,
+		.dpcm_capture = 1,
+		.trigger = { SND_SOC_DPCM_TRIGGER_POST,
+				 SND_SOC_DPCM_TRIGGER_POST },
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.be_id = MSM_FRONTEND_DAI_LSM2,
+	},
+	{
+		.name = "Listen 3 Audio Service",
+		.stream_name = "Listen 3 Audio Service",
+		.cpu_dai_name = "LSM3",
+		.platform_name = "msm-lsm-client",
+		.dynamic = 1,
+		.dpcm_capture = 1,
+		.trigger = { SND_SOC_DPCM_TRIGGER_POST,
+				 SND_SOC_DPCM_TRIGGER_POST },
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.be_id = MSM_FRONTEND_DAI_LSM3,
+	},
+	{
+		.name = "Listen 4 Audio Service",
+		.stream_name = "Listen 4 Audio Service",
+		.cpu_dai_name = "LSM4",
+		.platform_name = "msm-lsm-client",
+		.dynamic = 1,
+		.dpcm_capture = 1,
+		.trigger = { SND_SOC_DPCM_TRIGGER_POST,
+				 SND_SOC_DPCM_TRIGGER_POST },
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.be_id = MSM_FRONTEND_DAI_LSM4,
+	},
+	{
+		.name = "Listen 5 Audio Service",
+		.stream_name = "Listen 5 Audio Service",
+		.cpu_dai_name = "LSM5",
+		.platform_name = "msm-lsm-client",
+		.dynamic = 1,
+		.dpcm_capture = 1,
+		.trigger = { SND_SOC_DPCM_TRIGGER_POST,
+				 SND_SOC_DPCM_TRIGGER_POST },
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.be_id = MSM_FRONTEND_DAI_LSM5,
+	},
+	{
+		.name = "Listen 6 Audio Service",
+		.stream_name = "Listen 6 Audio Service",
+		.cpu_dai_name = "LSM6",
+		.platform_name = "msm-lsm-client",
+		.dynamic = 1,
+		.dpcm_capture = 1,
+		.trigger = { SND_SOC_DPCM_TRIGGER_POST,
+				 SND_SOC_DPCM_TRIGGER_POST },
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.be_id = MSM_FRONTEND_DAI_LSM6,
+	},
+	{
+		.name = "Listen 7 Audio Service",
+		.stream_name = "Listen 7 Audio Service",
+		.cpu_dai_name = "LSM7",
+		.platform_name = "msm-lsm-client",
+		.dynamic = 1,
+		.dpcm_capture = 1,
+		.trigger = { SND_SOC_DPCM_TRIGGER_POST,
+				 SND_SOC_DPCM_TRIGGER_POST },
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.be_id = MSM_FRONTEND_DAI_LSM7,
+	},
+	{
+		.name = "Listen 8 Audio Service",
+		.stream_name = "Listen 8 Audio Service",
+		.cpu_dai_name = "LSM8",
+		.platform_name = "msm-lsm-client",
+		.dynamic = 1,
+		.dpcm_capture = 1,
+		.trigger = { SND_SOC_DPCM_TRIGGER_POST,
+				 SND_SOC_DPCM_TRIGGER_POST },
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.be_id = MSM_FRONTEND_DAI_LSM8,
+	},
+	{
+		.name = MSM_DAILINK_NAME(Media9),
+		.stream_name = "MultiMedia9",
+		.cpu_dai_name = "MultiMedia9",
+		.platform_name = "msm-pcm-dsp.0",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+				SND_SOC_DPCM_TRIGGER_POST},
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.ignore_suspend = 1,
+		/* this dainlink has playback support */
+		.ignore_pmdown_time = 1,
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA9,
+	},
+	{
+		.name = MSM_DAILINK_NAME(Compress4),
+		.stream_name = "Compress4",
+		.cpu_dai_name = "MultiMedia11",
+		.platform_name = "msm-compress-dsp",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			 SND_SOC_DPCM_TRIGGER_POST},
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		 /* this dainlink has playback support */
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA11,
+	},
+	{
+		.name = MSM_DAILINK_NAME(Compress5),
+		.stream_name = "Compress5",
+		.cpu_dai_name = "MultiMedia12",
+		.platform_name = "msm-compress-dsp",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			 SND_SOC_DPCM_TRIGGER_POST},
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		 /* this dainlink has playback support */
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA12,
+	},
+	{
+		.name = MSM_DAILINK_NAME(Compress6),
+		.stream_name = "Compress6",
+		.cpu_dai_name = "MultiMedia13",
+		.platform_name = "msm-compress-dsp",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			 SND_SOC_DPCM_TRIGGER_POST},
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		 /* this dainlink has playback support */
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA13,
+	},
+	{
+		.name = MSM_DAILINK_NAME(Compress7),
+		.stream_name = "Compress7",
+		.cpu_dai_name = "MultiMedia14",
+		.platform_name = "msm-compress-dsp",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			 SND_SOC_DPCM_TRIGGER_POST},
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		 /* this dainlink has playback support */
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA14,
+	},
+	{
+		.name = MSM_DAILINK_NAME(Compress8),
+		.stream_name = "Compress8",
+		.cpu_dai_name = "MultiMedia15",
+		.platform_name = "msm-compress-dsp",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			 SND_SOC_DPCM_TRIGGER_POST},
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		 /* this dainlink has playback support */
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA15,
+	},
+	{
+		.name = MSM_DAILINK_NAME(ULL_NOIRQ_2),
+		.stream_name = "MM_NOIRQ_2",
+		.cpu_dai_name = "MultiMedia16",
+		.platform_name = "msm-pcm-dsp-noirq",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			 SND_SOC_DPCM_TRIGGER_POST},
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		 /* this dainlink has playback support */
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA16,
+	},
+	{
+		.name = "SLIMBUS_8 Hostless",
+		.stream_name = "SLIMBUS8_HOSTLESS Capture",
+		.cpu_dai_name = "SLIMBUS8_HOSTLESS",
+		.platform_name = "msm-pcm-hostless",
+		.dynamic = 1,
+		.dpcm_capture = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			    SND_SOC_DPCM_TRIGGER_POST},
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+	},
+};
+
+static struct snd_soc_dai_link msm_tasha_fe_dai_links[] = {
+	{
+		.name = LPASS_BE_SLIMBUS_4_TX,
+		.stream_name = "Slimbus4 Capture",
+		.cpu_dai_name = "msm-dai-q6-dev.16393",
+		.platform_name = "msm-pcm-hostless",
+		.codec_name = "tasha_codec",
+		.codec_dai_name = "tasha_vifeedback",
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_4_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_be_ops,
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+	},
+	/* Ultrasound RX DAI Link */
+	{
+		.name = "SLIMBUS_2 Hostless Playback",
+		.stream_name = "SLIMBUS_2 Hostless Playback",
+		.cpu_dai_name = "msm-dai-q6-dev.16388",
+		.platform_name = "msm-pcm-hostless",
+		.codec_name = "tasha_codec",
+		.codec_dai_name = "tasha_rx2",
+		.ignore_suspend = 1,
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ops = &msm_slimbus_2_be_ops,
+	},
+	/* Ultrasound TX DAI Link */
+	{
+		.name = "SLIMBUS_2 Hostless Capture",
+		.stream_name = "SLIMBUS_2 Hostless Capture",
+		.cpu_dai_name = "msm-dai-q6-dev.16389",
+		.platform_name = "msm-pcm-hostless",
+		.codec_name = "tasha_codec",
+		.codec_dai_name = "tasha_tx2",
+		.ignore_suspend = 1,
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ops = &msm_slimbus_2_be_ops,
+	},
+	/* CPE LSM direct dai-link */
+	{
+		.name = "CPE Listen service",
+		.stream_name = "CPE Listen Audio Service",
+		.cpu_dai_name = "msm-dai-slim",
+		.platform_name = "msm-cpe-lsm",
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			    SND_SOC_DPCM_TRIGGER_POST},
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		.codec_dai_name = "tasha_mad1",
+		.codec_name = "tasha_codec",
+		.ops = &msm_cpe_ops,
+	},
+	{
+		.name = "SLIMBUS_6 Hostless Playback",
+		.stream_name = "SLIMBUS_6 Hostless",
+		.cpu_dai_name = "SLIMBUS6_HOSTLESS",
+		.platform_name = "msm-pcm-hostless",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			    SND_SOC_DPCM_TRIGGER_POST},
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		 /* this dailink has playback support */
+		.ignore_pmdown_time = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+	},
+	/* CPE LSM EC PP direct dai-link */
+	{
+		.name = "CPE Listen service ECPP",
+		.stream_name = "CPE Listen Audio Service ECPP",
+		.cpu_dai_name = "CPE_LSM_NOHOST",
+		.platform_name = "msm-cpe-lsm.3",
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			    SND_SOC_DPCM_TRIGGER_POST},
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		.codec_dai_name = "tasha_cpe",
+		.codec_name = "tasha_codec",
+	},
+};
+
+static struct snd_soc_dai_link msm_tavil_fe_dai_links[] = {
+	{
+		.name = LPASS_BE_SLIMBUS_4_TX,
+		.stream_name = "Slimbus4 Capture",
+		.cpu_dai_name = "msm-dai-q6-dev.16393",
+		.platform_name = "msm-pcm-hostless",
+		.codec_name = "tavil_codec",
+		.codec_dai_name = "tavil_vifeedback",
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_4_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_be_ops,
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+	},
+	/* Ultrasound RX DAI Link */
+	{
+		.name = "SLIMBUS_2 Hostless Playback",
+		.stream_name = "SLIMBUS_2 Hostless Playback",
+		.cpu_dai_name = "msm-dai-q6-dev.16388",
+		.platform_name = "msm-pcm-hostless",
+		.codec_name = "tavil_codec",
+		.codec_dai_name = "tavil_rx2",
+		.ignore_suspend = 1,
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ops = &msm_slimbus_2_be_ops,
+	},
+	/* Ultrasound TX DAI Link */
+	{
+		.name = "SLIMBUS_2 Hostless Capture",
+		.stream_name = "SLIMBUS_2 Hostless Capture",
+		.cpu_dai_name = "msm-dai-q6-dev.16389",
+		.platform_name = "msm-pcm-hostless",
+		.codec_name = "tavil_codec",
+		.codec_dai_name = "tavil_tx2",
+		.ignore_suspend = 1,
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ops = &msm_slimbus_2_be_ops,
+	},
+};
+
+static struct snd_soc_dai_link msm_common_misc_fe_dai_links[] = {
+	{
+		.name = MSM_DAILINK_NAME(ASM Loopback),
+		.stream_name = "MultiMedia6",
+		.cpu_dai_name = "MultiMedia6",
+		.platform_name = "msm-pcm-loopback",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			    SND_SOC_DPCM_TRIGGER_POST},
+		.ignore_suspend = 1,
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_pmdown_time = 1,
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA6,
+	},
+	{
+		.name = "USB Audio Hostless",
+		.stream_name = "USB Audio Hostless",
+		.cpu_dai_name = "USBAUDIO_HOSTLESS",
+		.platform_name = "msm-pcm-hostless",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			    SND_SOC_DPCM_TRIGGER_POST},
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+	},
+	{
+		.name = MSM_DAILINK_NAME(Transcode Loopback Playback),
+		.stream_name = "Transcode Loopback Playback",
+		.cpu_dai_name = "MultiMedia26",
+		.platform_name = "msm-transcode-loopback",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			 SND_SOC_DPCM_TRIGGER_POST},
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		 /* this dainlink has playback support */
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA26,
+	},
+	{
+		.name = MSM_DAILINK_NAME(Transcode Loopback Capture),
+		.stream_name = "Transcode Loopback Capture",
+		.cpu_dai_name = "MultiMedia27",
+		.platform_name = "msm-transcode-loopback",
+		.dynamic = 1,
+		.dpcm_capture = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			 SND_SOC_DPCM_TRIGGER_POST},
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA27,
+	},
+	{
+		.name = "MultiMedia21",
+		.stream_name = "MultiMedia21",
+		.cpu_dai_name = "MultiMedia21",
+		.platform_name = "msm-pcm-dsp.0",
+		.dynamic = 1,
+		.async_ops = ASYNC_DPCM_SND_SOC_PREPARE,
+		.dpcm_playback = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			SND_SOC_DPCM_TRIGGER_POST},
+		.ignore_suspend = 1,
+		/* this dainlink has playback support */
+		.ignore_pmdown_time = 1,
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA21,
+	},
+	{
+		.name = "MultiMedia22",
+		.stream_name = "MultiMedia22",
+		.cpu_dai_name = "MultiMedia22",
+		.platform_name = "msm-pcm-dsp.0",
+		.dynamic = 1,
+		.async_ops = ASYNC_DPCM_SND_SOC_PREPARE,
+		.dpcm_playback = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			SND_SOC_DPCM_TRIGGER_POST},
+		.ignore_suspend = 1,
+		/* this dainlink has playback support */
+		.ignore_pmdown_time = 1,
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA22,
+	},
+	{
+		.name = "MultiMedia23",
+		.stream_name = "MultiMedia23",
+		.cpu_dai_name = "MultiMedia23",
+		.platform_name = "msm-pcm-dsp.0",
+		.dynamic = 1,
+		.async_ops = ASYNC_DPCM_SND_SOC_PREPARE,
+		.dpcm_playback = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			SND_SOC_DPCM_TRIGGER_POST},
+		.ignore_suspend = 1,
+		/* this dainlink has playback support */
+		.ignore_pmdown_time = 1,
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA23,
+	},
+	{
+		.name = "MultiMedia24",
+		.stream_name = "MultiMedia24",
+		.cpu_dai_name = "MultiMedia24",
+		.platform_name = "msm-pcm-dsp.0",
+		.dynamic = 1,
+		.async_ops = ASYNC_DPCM_SND_SOC_PREPARE,
+		.dpcm_playback = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			SND_SOC_DPCM_TRIGGER_POST},
+		.ignore_suspend = 1,
+		/* this dainlink has playback support */
+		.ignore_pmdown_time = 1,
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA24,
+	},
+	{
+		.name = "MultiMedia25",
+		.stream_name = "MultiMedia25",
+		.cpu_dai_name = "MultiMedia25",
+		.platform_name = "msm-pcm-dsp.0",
+		.dynamic = 1,
+		.async_ops = ASYNC_DPCM_SND_SOC_PREPARE,
+		.dpcm_playback = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			SND_SOC_DPCM_TRIGGER_POST},
+		.ignore_suspend = 1,
+		/* this dainlink has playback support */
+		.ignore_pmdown_time = 1,
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA25,
+	},
+};
+
+static struct snd_soc_dai_link msm_common_be_dai_links[] = {
+	/* Backend AFE DAI Links */
+	{
+		.name = LPASS_BE_AFE_PCM_RX,
+		.stream_name = "AFE Playback",
+		.cpu_dai_name = "msm-dai-q6-dev.224",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-rx",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_AFE_PCM_RX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		/* this dainlink has playback support */
+		.ignore_pmdown_time = 1,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = LPASS_BE_AFE_PCM_TX,
+		.stream_name = "AFE Capture",
+		.cpu_dai_name = "msm-dai-q6-dev.225",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-tx",
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.be_id = MSM_BACKEND_DAI_AFE_PCM_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ignore_suspend = 1,
+	},
+	/* Incall Record Uplink BACK END DAI Link */
+	{
+		.name = LPASS_BE_INCALL_RECORD_TX,
+		.stream_name = "Voice Uplink Capture",
+		.cpu_dai_name = "msm-dai-q6-dev.32772",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-tx",
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.be_id = MSM_BACKEND_DAI_INCALL_RECORD_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ignore_suspend = 1,
+	},
+	/* Incall Record Downlink BACK END DAI Link */
+	{
+		.name = LPASS_BE_INCALL_RECORD_RX,
+		.stream_name = "Voice Downlink Capture",
+		.cpu_dai_name = "msm-dai-q6-dev.32771",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-tx",
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.be_id = MSM_BACKEND_DAI_INCALL_RECORD_RX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ignore_suspend = 1,
+	},
+	/* Incall Music BACK END DAI Link */
+	{
+		.name = LPASS_BE_VOICE_PLAYBACK_TX,
+		.stream_name = "Voice Farend Playback",
+		.cpu_dai_name = "msm-dai-q6-dev.32773",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-rx",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_VOICE_PLAYBACK_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ignore_suspend = 1,
+	},
+	/* Incall Music 2 BACK END DAI Link */
+	{
+		.name = LPASS_BE_VOICE2_PLAYBACK_TX,
+		.stream_name = "Voice2 Farend Playback",
+		.cpu_dai_name = "msm-dai-q6-dev.32770",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-rx",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_VOICE2_PLAYBACK_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = LPASS_BE_USB_AUDIO_RX,
+		.stream_name = "USB Audio Playback",
+		.cpu_dai_name = "msm-dai-q6-dev.28672",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-rx",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_USB_RX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ignore_pmdown_time = 1,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = LPASS_BE_USB_AUDIO_TX,
+		.stream_name = "USB Audio Capture",
+		.cpu_dai_name = "msm-dai-q6-dev.28673",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-tx",
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.be_id = MSM_BACKEND_DAI_USB_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = LPASS_BE_PRI_TDM_RX_0,
+		.stream_name = "Primary TDM0 Playback",
+		.cpu_dai_name = "msm-dai-q6-tdm.36864",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-rx",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_tdm_be_ops,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = LPASS_BE_PRI_TDM_TX_0,
+		.stream_name = "Primary TDM0 Capture",
+		.cpu_dai_name = "msm-dai-q6-tdm.36865",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-tx",
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.be_id = MSM_BACKEND_DAI_PRI_TDM_TX_0,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_tdm_be_ops,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = LPASS_BE_SEC_TDM_RX_0,
+		.stream_name = "Secondary TDM0 Playback",
+		.cpu_dai_name = "msm-dai-q6-tdm.36880",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-rx",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_tdm_be_ops,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = LPASS_BE_SEC_TDM_TX_0,
+		.stream_name = "Secondary TDM0 Capture",
+		.cpu_dai_name = "msm-dai-q6-tdm.36881",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-tx",
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.be_id = MSM_BACKEND_DAI_SEC_TDM_TX_0,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_tdm_be_ops,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = LPASS_BE_TERT_TDM_RX_0,
+		.stream_name = "Tertiary TDM0 Playback",
+		.cpu_dai_name = "msm-dai-q6-tdm.36896",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-rx",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_TERT_TDM_RX_0,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_tdm_be_ops,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = LPASS_BE_TERT_TDM_TX_0,
+		.stream_name = "Tertiary TDM0 Capture",
+		.cpu_dai_name = "msm-dai-q6-tdm.36897",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-tx",
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.be_id = MSM_BACKEND_DAI_TERT_TDM_TX_0,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_tdm_be_ops,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = LPASS_BE_QUAT_TDM_RX_0,
+		.stream_name = "Quaternary TDM0 Playback",
+		.cpu_dai_name = "msm-dai-q6-tdm.36912",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-rx",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+		.be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+		.ops = &msm8998_tdm_be_ops,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = LPASS_BE_QUAT_TDM_TX_0,
+		.stream_name = "Quaternary TDM0 Capture",
+		.cpu_dai_name = "msm-dai-q6-tdm.36913",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-tx",
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.be_id = MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_tdm_be_ops,
+		.ignore_suspend = 1,
+	},
+};
+
+static struct snd_soc_dai_link msm_tasha_be_dai_links[] = {
+	{
+		.name = LPASS_BE_SLIMBUS_0_RX,
+		.stream_name = "Slimbus Playback",
+		.cpu_dai_name = "msm-dai-q6-dev.16384",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "tasha_codec",
+		.codec_dai_name = "tasha_mix_rx1",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_0_RX,
+		.init = &msm_audrx_init,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		/* this dainlink has playback support */
+		.ignore_pmdown_time = 1,
+		.ignore_suspend = 1,
+		.ops = &msm_be_ops,
+	},
+	{
+		.name = LPASS_BE_SLIMBUS_0_TX,
+		.stream_name = "Slimbus Capture",
+		.cpu_dai_name = "msm-dai-q6-dev.16385",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "tasha_codec",
+		.codec_dai_name = "tasha_tx1",
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_0_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ignore_suspend = 1,
+		.ops = &msm_be_ops,
+	},
+	{
+		.name = LPASS_BE_SLIMBUS_1_RX,
+		.stream_name = "Slimbus1 Playback",
+		.cpu_dai_name = "msm-dai-q6-dev.16386",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "tasha_codec",
+		.codec_dai_name = "tasha_mix_rx1",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_1_RX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_be_ops,
+		/* dai link has playback support */
+		.ignore_pmdown_time = 1,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = LPASS_BE_SLIMBUS_1_TX,
+		.stream_name = "Slimbus1 Capture",
+		.cpu_dai_name = "msm-dai-q6-dev.16387",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "tasha_codec",
+		.codec_dai_name = "tasha_tx3",
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_1_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_be_ops,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = LPASS_BE_SLIMBUS_3_RX,
+		.stream_name = "Slimbus3 Playback",
+		.cpu_dai_name = "msm-dai-q6-dev.16390",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "tasha_codec",
+		.codec_dai_name = "tasha_mix_rx1",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_3_RX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_be_ops,
+		/* dai link has playback support */
+		.ignore_pmdown_time = 1,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = LPASS_BE_SLIMBUS_3_TX,
+		.stream_name = "Slimbus3 Capture",
+		.cpu_dai_name = "msm-dai-q6-dev.16391",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "tasha_codec",
+		.codec_dai_name = "tasha_tx1",
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_3_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_be_ops,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = LPASS_BE_SLIMBUS_4_RX,
+		.stream_name = "Slimbus4 Playback",
+		.cpu_dai_name = "msm-dai-q6-dev.16392",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "tasha_codec",
+		.codec_dai_name = "tasha_mix_rx1",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_4_RX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_be_ops,
+		/* dai link has playback support */
+		.ignore_pmdown_time = 1,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = LPASS_BE_SLIMBUS_5_RX,
+		.stream_name = "Slimbus5 Playback",
+		.cpu_dai_name = "msm-dai-q6-dev.16394",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "tasha_codec",
+		.codec_dai_name = "tasha_rx3",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_5_RX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_be_ops,
+		/* dai link has playback support */
+		.ignore_pmdown_time = 1,
+		.ignore_suspend = 1,
+	},
+	/* MAD BE */
+	{
+		.name = LPASS_BE_SLIMBUS_5_TX,
+		.stream_name = "Slimbus5 Capture",
+		.cpu_dai_name = "msm-dai-q6-dev.16395",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "tasha_codec",
+		.codec_dai_name = "tasha_mad1",
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_5_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_be_ops,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = LPASS_BE_SLIMBUS_6_RX,
+		.stream_name = "Slimbus6 Playback",
+		.cpu_dai_name = "msm-dai-q6-dev.16396",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "tasha_codec",
+		.codec_dai_name = "tasha_rx4",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_6_RX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_be_ops,
+		/* dai link has playback support */
+		.ignore_pmdown_time = 1,
+		.ignore_suspend = 1,
+	},
+	/* Slimbus VI Recording */
+	{
+		.name = LPASS_BE_SLIMBUS_TX_VI,
+		.stream_name = "Slimbus4 Capture",
+		.cpu_dai_name = "msm-dai-q6-dev.16393",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "tasha_codec",
+		.codec_dai_name = "tasha_vifeedback",
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_4_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_be_ops,
+		.ignore_suspend = 1,
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.ignore_pmdown_time = 1,
+	},
+};
+
+static struct snd_soc_dai_link msm_tavil_be_dai_links[] = {
+	{
+		.name = LPASS_BE_SLIMBUS_0_RX,
+		.stream_name = "Slimbus Playback",
+		.cpu_dai_name = "msm-dai-q6-dev.16384",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "tavil_codec",
+		.codec_dai_name = "tavil_rx1",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_0_RX,
+		.init = &msm_audrx_init,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		/* this dainlink has playback support */
+		.ignore_pmdown_time = 1,
+		.ignore_suspend = 1,
+		.ops = &msm_be_ops,
+	},
+	{
+		.name = LPASS_BE_SLIMBUS_0_TX,
+		.stream_name = "Slimbus Capture",
+		.cpu_dai_name = "msm-dai-q6-dev.16385",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "tavil_codec",
+		.codec_dai_name = "tavil_tx1",
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_0_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ignore_suspend = 1,
+		.ops = &msm_be_ops,
+	},
+	{
+		.name = LPASS_BE_SLIMBUS_1_RX,
+		.stream_name = "Slimbus1 Playback",
+		.cpu_dai_name = "msm-dai-q6-dev.16386",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "tavil_codec",
+		.codec_dai_name = "tavil_rx1",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_1_RX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_be_ops,
+		/* dai link has playback support */
+		.ignore_pmdown_time = 1,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = LPASS_BE_SLIMBUS_1_TX,
+		.stream_name = "Slimbus1 Capture",
+		.cpu_dai_name = "msm-dai-q6-dev.16387",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "tavil_codec",
+		.codec_dai_name = "tavil_tx3",
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_1_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_be_ops,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = LPASS_BE_SLIMBUS_2_RX,
+		.stream_name = "Slimbus2 Playback",
+		.cpu_dai_name = "msm-dai-q6-dev.16388",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "tavil_codec",
+		.codec_dai_name = "tavil_rx2",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_2_RX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_be_ops,
+		.ignore_pmdown_time = 1,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = LPASS_BE_SLIMBUS_3_RX,
+		.stream_name = "Slimbus3 Playback",
+		.cpu_dai_name = "msm-dai-q6-dev.16390",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "tavil_codec",
+		.codec_dai_name = "tavil_rx1",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_3_RX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_be_ops,
+		/* dai link has playback support */
+		.ignore_pmdown_time = 1,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = LPASS_BE_SLIMBUS_3_TX,
+		.stream_name = "Slimbus3 Capture",
+		.cpu_dai_name = "msm-dai-q6-dev.16391",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "tavil_codec",
+		.codec_dai_name = "tavil_tx1",
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_3_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_be_ops,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = LPASS_BE_SLIMBUS_4_RX,
+		.stream_name = "Slimbus4 Playback",
+		.cpu_dai_name = "msm-dai-q6-dev.16392",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "tavil_codec",
+		.codec_dai_name = "tavil_rx1",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_4_RX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_be_ops,
+		/* dai link has playback support */
+		.ignore_pmdown_time = 1,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = LPASS_BE_SLIMBUS_5_RX,
+		.stream_name = "Slimbus5 Playback",
+		.cpu_dai_name = "msm-dai-q6-dev.16394",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "tavil_codec",
+		.codec_dai_name = "tavil_rx3",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_5_RX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_be_ops,
+		/* dai link has playback support */
+		.ignore_pmdown_time = 1,
+		.ignore_suspend = 1,
+	},
+	/* MAD BE */
+	{
+		.name = LPASS_BE_SLIMBUS_5_TX,
+		.stream_name = "Slimbus5 Capture",
+		.cpu_dai_name = "msm-dai-q6-dev.16395",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "tavil_codec",
+		.codec_dai_name = "tavil_mad1",
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_5_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_be_ops,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = LPASS_BE_SLIMBUS_6_RX,
+		.stream_name = "Slimbus6 Playback",
+		.cpu_dai_name = "msm-dai-q6-dev.16396",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "tavil_codec",
+		.codec_dai_name = "tavil_rx4",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_6_RX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_be_ops,
+		/* dai link has playback support */
+		.ignore_pmdown_time = 1,
+		.ignore_suspend = 1,
+	},
+	/* Slimbus VI Recording */
+	{
+		.name = LPASS_BE_SLIMBUS_TX_VI,
+		.stream_name = "Slimbus4 Capture",
+		.cpu_dai_name = "msm-dai-q6-dev.16393",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "tavil_codec",
+		.codec_dai_name = "tavil_vifeedback",
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_4_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_be_ops,
+		.ignore_suspend = 1,
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.ignore_pmdown_time = 1,
+	},
+};
+
+static struct snd_soc_dai_link msm_wcn_be_dai_links[] = {
+	{
+		.name = LPASS_BE_SLIMBUS_7_RX,
+		.stream_name = "Slimbus7 Playback",
+		.cpu_dai_name = "msm-dai-q6-dev.16398",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "btfmslim_slave",
+		/* BT codec driver determines capabilities based on
+		 * dai name, bt codecdai name should always contains
+		 * supported usecase information
+		 */
+		.codec_dai_name = "btfm_bt_sco_a2dp_slim_rx",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_7_RX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_wcn_ops,
+		/* dai link has playback support */
+		.ignore_pmdown_time = 1,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = LPASS_BE_SLIMBUS_7_TX,
+		.stream_name = "Slimbus7 Capture",
+		.cpu_dai_name = "msm-dai-q6-dev.16399",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "btfmslim_slave",
+		.codec_dai_name = "btfm_bt_sco_slim_tx",
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_7_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_wcn_ops,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = LPASS_BE_SLIMBUS_8_TX,
+		.stream_name = "Slimbus8 Capture",
+		.cpu_dai_name = "msm-dai-q6-dev.16401",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "btfmslim_slave",
+		.codec_dai_name = "btfm_fm_slim_tx",
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_8_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.init = &msm_wcn_init,
+		.ops = &msm_wcn_ops,
+		.ignore_suspend = 1,
+	},
+};
+
+static struct snd_soc_dai_link ext_disp_be_dai_link[] = {
+	/* HDMI BACK END DAI Link */
+	{
+		.name = LPASS_BE_HDMI,
+		.stream_name = "HDMI Playback",
+		.cpu_dai_name = "msm-dai-q6-hdmi.8",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-ext-disp-audio-codec-rx",
+		.codec_dai_name = "msm_hdmi_audio_codec_rx_dai",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_HDMI_RX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ignore_pmdown_time = 1,
+		.ignore_suspend = 1,
+	},
+	/* DISP PORT BACK END DAI Link */
+	{
+		.name = LPASS_BE_DISPLAY_PORT,
+		.stream_name = "Display Port Playback",
+		.cpu_dai_name = "msm-dai-q6-dp.24608",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-ext-disp-audio-codec-rx",
+		.codec_dai_name = "msm_dp_audio_codec_rx_dai",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ignore_pmdown_time = 1,
+		.ignore_suspend = 1,
+	},
+};
+
+static struct snd_soc_dai_link msm_mi2s_be_dai_links[] = {
+	{
+		.name = LPASS_BE_PRI_MI2S_RX,
+		.stream_name = "Primary MI2S Playback",
+		.cpu_dai_name = "msm-dai-q6-mi2s.0",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-rx",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_PRI_MI2S_RX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_mi2s_be_ops,
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+	},
+	{
+		.name = LPASS_BE_PRI_MI2S_TX,
+		.stream_name = "Primary MI2S Capture",
+		.cpu_dai_name = "msm-dai-q6-mi2s.0",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-tx",
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.be_id = MSM_BACKEND_DAI_PRI_MI2S_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_mi2s_be_ops,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = LPASS_BE_SEC_MI2S_RX,
+		.stream_name = "Secondary MI2S Playback",
+		.cpu_dai_name = "msm-dai-q6-mi2s.1",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-rx",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_mi2s_be_ops,
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+	},
+	{
+		.name = LPASS_BE_SEC_MI2S_TX,
+		.stream_name = "Secondary MI2S Capture",
+		.cpu_dai_name = "msm-dai-q6-mi2s.1",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-tx",
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.be_id = MSM_BACKEND_DAI_SECONDARY_MI2S_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_mi2s_be_ops,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = LPASS_BE_TERT_MI2S_RX,
+		.stream_name = "Tertiary MI2S Playback",
+		.cpu_dai_name = "msm-dai-q6-mi2s.2",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-rx",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_mi2s_be_ops,
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+	},
+	{
+		.name = LPASS_BE_TERT_MI2S_TX,
+		.stream_name = "Tertiary MI2S Capture",
+		.cpu_dai_name = "msm-dai-q6-mi2s.2",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-tx",
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.be_id = MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_mi2s_be_ops,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = LPASS_BE_QUAT_MI2S_RX,
+		.stream_name = "Quaternary MI2S Playback",
+		.cpu_dai_name = "msm-dai-q6-mi2s.3",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-rx",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_mi2s_be_ops,
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+	},
+	{
+		.name = LPASS_BE_QUAT_MI2S_TX,
+		.stream_name = "Quaternary MI2S Capture",
+		.cpu_dai_name = "msm-dai-q6-mi2s.3",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-tx",
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.be_id = MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_mi2s_be_ops,
+		.ignore_suspend = 1,
+	},
+};
+
+static struct snd_soc_dai_link msm_auxpcm_be_dai_links[] = {
+	/* Primary AUX PCM Backend DAI Links */
+	{
+		.name = LPASS_BE_AUXPCM_RX,
+		.stream_name = "AUX PCM Playback",
+		.cpu_dai_name = "msm-dai-q6-auxpcm.1",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-rx",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_AUXPCM_RX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ignore_pmdown_time = 1,
+		.ignore_suspend = 1,
+		.ops = &msm_aux_pcm_be_ops,
+	},
+	{
+		.name = LPASS_BE_AUXPCM_TX,
+		.stream_name = "AUX PCM Capture",
+		.cpu_dai_name = "msm-dai-q6-auxpcm.1",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-tx",
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.be_id = MSM_BACKEND_DAI_AUXPCM_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ignore_pmdown_time = 1,
+		.ignore_suspend = 1,
+		.ops = &msm_aux_pcm_be_ops,
+	},
+	/* Secondary AUX PCM Backend DAI Links */
+	{
+		.name = LPASS_BE_SEC_AUXPCM_RX,
+		.stream_name = "Sec AUX PCM Playback",
+		.cpu_dai_name = "msm-dai-q6-auxpcm.2",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-rx",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ignore_pmdown_time = 1,
+		.ignore_suspend = 1,
+		.ops = &msm_aux_pcm_be_ops,
+	},
+	{
+		.name = LPASS_BE_SEC_AUXPCM_TX,
+		.stream_name = "Sec AUX PCM Capture",
+		.cpu_dai_name = "msm-dai-q6-auxpcm.2",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-tx",
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.be_id = MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		.ops = &msm_aux_pcm_be_ops,
+	},
+	/* Tertiary AUX PCM Backend DAI Links */
+	{
+		.name = LPASS_BE_TERT_AUXPCM_RX,
+		.stream_name = "Tert AUX PCM Playback",
+		.cpu_dai_name = "msm-dai-q6-auxpcm.3",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-rx",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ignore_pmdown_time = 1,
+		.ignore_suspend = 1,
+		.ops = &msm_aux_pcm_be_ops,
+	},
+	{
+		.name = LPASS_BE_TERT_AUXPCM_TX,
+		.stream_name = "Tert AUX PCM Capture",
+		.cpu_dai_name = "msm-dai-q6-auxpcm.3",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-tx",
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.be_id = MSM_BACKEND_DAI_TERT_AUXPCM_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		.ops = &msm_aux_pcm_be_ops,
+	},
+	/* Quaternary AUX PCM Backend DAI Links */
+	{
+		.name = LPASS_BE_QUAT_AUXPCM_RX,
+		.stream_name = "Quat AUX PCM Playback",
+		.cpu_dai_name = "msm-dai-q6-auxpcm.4",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-rx",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ignore_pmdown_time = 1,
+		.ignore_suspend = 1,
+		.ops = &msm_aux_pcm_be_ops,
+	},
+	{
+		.name = LPASS_BE_QUAT_AUXPCM_TX,
+		.stream_name = "Quat AUX PCM Capture",
+		.cpu_dai_name = "msm-dai-q6-auxpcm.4",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-tx",
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.be_id = MSM_BACKEND_DAI_QUAT_AUXPCM_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		.ops = &msm_aux_pcm_be_ops,
+	},
+};
+
+static int msm_snd_card_late_probe(struct snd_soc_card *card)
+{
+	const char *be_dl_name = LPASS_BE_SLIMBUS_0_RX;
+	struct snd_soc_pcm_runtime *rtd;
+	int ret = 0;
+	void *mbhc_calibration;
+
+	rtd = snd_soc_get_pcm_runtime(card, be_dl_name);
+	if (!rtd) {
+		dev_err(card->dev,
+			"%s: snd_soc_get_pcm_runtime for %s failed!\n",
+			__func__, be_dl_name);
+		ret = -EINVAL;
+		goto err_pcm_runtime;
+	}
+
+	mbhc_calibration = def_tasha_mbhc_cal();
+	if (!mbhc_calibration) {
+		ret = -ENOMEM;
+		goto err_mbhc_cal;
+	}
+	wcd_mbhc_cfg.calibration = mbhc_calibration;
+	ret = tasha_mbhc_hs_detect(rtd->codec, &wcd_mbhc_cfg);
+	if (ret) {
+		dev_err(card->dev, "%s: mbhc hs detect failed, err:%d\n",
+			__func__, ret);
+		goto err_hs_detect;
+	}
+	return 0;
+
+err_hs_detect:
+	kfree(mbhc_calibration);
+err_mbhc_cal:
+err_pcm_runtime:
+	return ret;
+}
+
+static int msm_snd_card_tavil_late_probe(struct snd_soc_card *card)
+{
+	const char *be_dl_name = LPASS_BE_SLIMBUS_0_RX;
+	struct snd_soc_pcm_runtime *rtd;
+	int ret = 0;
+	void *mbhc_calibration;
+
+	rtd = snd_soc_get_pcm_runtime(card, be_dl_name);
+	if (!rtd) {
+		dev_err(card->dev,
+			"%s: snd_soc_get_pcm_runtime for %s failed!\n",
+			__func__, be_dl_name);
+		ret = -EINVAL;
+		goto err_pcm_runtime;
+	}
+
+	mbhc_calibration = def_tavil_mbhc_cal();
+	if (!mbhc_calibration) {
+		ret = -ENOMEM;
+		goto err_mbhc_cal;
+	}
+	wcd_mbhc_cfg.calibration = mbhc_calibration;
+	ret = tavil_mbhc_hs_detect(rtd->codec, &wcd_mbhc_cfg);
+	if (ret) {
+		dev_err(card->dev, "%s: mbhc hs detect failed, err:%d\n",
+			__func__, ret);
+		goto err_hs_detect;
+	}
+	return 0;
+
+err_hs_detect:
+	kfree(mbhc_calibration);
+err_mbhc_cal:
+err_pcm_runtime:
+	return ret;
+}
+
+struct snd_soc_card snd_soc_card_tasha_msm = {
+	.name		= "msm8998-tasha-snd-card",
+	.late_probe	= msm_snd_card_late_probe,
+};
+
+struct snd_soc_card snd_soc_card_tavil_msm = {
+	.name		= "msm8998-tavil-snd-card",
+	.late_probe	= msm_snd_card_tavil_late_probe,
+};
+
+static int msm_populate_dai_link_component_of_node(
+					struct snd_soc_card *card)
+{
+	int i, index, ret = 0;
+	struct device *cdev = card->dev;
+	struct snd_soc_dai_link *dai_link = card->dai_link;
+	struct device_node *np;
+	struct msm_asoc_mach_data *pdata =
+				snd_soc_card_get_drvdata(card);
+
+	if (!cdev) {
+		pr_err("%s: Sound card device memory NULL\n", __func__);
+		return -ENODEV;
+	}
+
+	for (i = 0; i < card->num_links; i++) {
+		if (dai_link[i].platform_of_node && dai_link[i].cpu_of_node)
+			continue;
+
+		/* populate platform_of_node for snd card dai links */
+		if (dai_link[i].platform_name &&
+		    !dai_link[i].platform_of_node) {
+			index = of_property_match_string(cdev->of_node,
+						"asoc-platform-names",
+						dai_link[i].platform_name);
+			if (index < 0) {
+				pr_err("%s: No match found for platform name: %s\n",
+					__func__, dai_link[i].platform_name);
+				ret = index;
+				goto err;
+			}
+			np = of_parse_phandle(cdev->of_node, "asoc-platform",
+					      index);
+			if (!np) {
+				pr_err("%s: retrieving phandle for platform %s, index %d failed\n",
+					__func__, dai_link[i].platform_name,
+					index);
+				ret = -ENODEV;
+				goto err;
+			}
+			dai_link[i].platform_of_node = np;
+			dai_link[i].platform_name = NULL;
+		}
+
+		/* populate cpu_of_node for snd card dai links */
+		if (dai_link[i].cpu_dai_name && !dai_link[i].cpu_of_node) {
+			index = of_property_match_string(cdev->of_node,
+						 "asoc-cpu-names",
+						 dai_link[i].cpu_dai_name);
+			if (index >= 0) {
+				np = of_parse_phandle(cdev->of_node, "asoc-cpu",
+						index);
+				if (!np) {
+					pr_err("%s: retrieving phandle for cpu dai %s failed\n",
+						__func__,
+						dai_link[i].cpu_dai_name);
+					ret = -ENODEV;
+					goto err;
+				}
+				dai_link[i].cpu_of_node = np;
+				dai_link[i].cpu_dai_name = NULL;
+			}
+		}
+
+		/* ugly hack for freebox codecs */
+		if (pdata->has_fbx_wm8804 &&
+		    !strcmp(dai_link[i].name, LPASS_BE_TERT_MI2S_TX)) {
+			dai_link[i].codec_name = "wm8804.7-003b";
+			dai_link[i].codec_dai_name = "wm8804-spdif";
+		}
+
+		if (pdata->has_fbx_sil9437 &&
+		    !strcmp(dai_link[i].name, LPASS_BE_QUAT_MI2S_TX)) {
+			dai_link[i].codec_name = "sil9437.7-0031";
+			dai_link[i].codec_dai_name = "sil9437-earc";
+		}
+
+		/* populate codec_of_node for snd card dai links */
+		if (dai_link[i].codec_name && !dai_link[i].codec_of_node) {
+			index = of_property_match_string(cdev->of_node,
+						 "asoc-codec-names",
+						 dai_link[i].codec_name);
+			if (index < 0)
+				continue;
+			np = of_parse_phandle(cdev->of_node, "asoc-codec",
+					      index);
+			if (!np) {
+				pr_err("%s: retrieving phandle for codec %s failed\n",
+					__func__, dai_link[i].codec_name);
+				ret = -ENODEV;
+				goto err;
+			}
+			dai_link[i].codec_of_node = np;
+			dai_link[i].codec_name = NULL;
+		}
+	}
+
+err:
+	return ret;
+}
+
+static int msm_prepare_us_euro(struct snd_soc_card *card)
+{
+	struct msm_asoc_mach_data *pdata =
+				snd_soc_card_get_drvdata(card);
+	int ret = 0;
+
+	if (pdata->us_euro_gpio >= 0) {
+		dev_dbg(card->dev, "%s: us_euro gpio request %d", __func__,
+			pdata->us_euro_gpio);
+		ret = gpio_request(pdata->us_euro_gpio, "TASHA_CODEC_US_EURO");
+		if (ret) {
+			dev_err(card->dev,
+				"%s: Failed to request codec US/EURO gpio %d error %d\n",
+				__func__, pdata->us_euro_gpio, ret);
+		}
+	}
+
+	return ret;
+}
+
+struct snd_soc_card snd_soc_card_freebox_msm = {
+	.name		= "msm8998-freebox-snd-card",
+};
+
+static const struct of_device_id msm8998_asoc_machine_of_match[]  = {
+	{ .compatible = "qcom,msm8998-asoc-snd-tasha",
+	  .data = "tasha_codec"},
+	{ .compatible = "qcom,msm8998-asoc-snd-tavil",
+	  .data = "tavil_codec"},
+	{ .compatible = "qcom,msm8998-asoc-snd-freebox",
+	  .data = "freebox"},
+	{},
+};
+
+static struct snd_soc_card *populate_snd_card_dailinks(struct device *dev)
+{
+	struct snd_soc_card *card = NULL;
+	struct snd_soc_dai_link *dailink;
+	int max_links, total_links;
+	const struct of_device_id *match;
+
+	match = of_match_node(msm8998_asoc_machine_of_match, dev->of_node);
+	if (!match) {
+		dev_err(dev, "%s: No DT match found for sound card\n",
+			__func__);
+		return NULL;
+	}
+
+	max_links = ARRAY_SIZE(msm_common_dai_links) +
+		ARRAY_SIZE(msm_tasha_fe_dai_links) +
+		ARRAY_SIZE(msm_tavil_fe_dai_links) +
+		ARRAY_SIZE(msm_common_misc_fe_dai_links) +
+		ARRAY_SIZE(msm_common_be_dai_links) +
+		ARRAY_SIZE(msm_tasha_be_dai_links) +
+		ARRAY_SIZE(msm_tavil_be_dai_links) +
+		ARRAY_SIZE(msm_wcn_be_dai_links) +
+		ARRAY_SIZE(ext_disp_be_dai_link) +
+		ARRAY_SIZE(msm_mi2s_be_dai_links) +
+		ARRAY_SIZE(msm_auxpcm_be_dai_links);
+
+	dailink = devm_kzalloc(dev, max_links * sizeof (*dailink), GFP_KERNEL);
+	if (!dailink)
+		return NULL;
+
+	total_links = 0;
+
+	memcpy(dailink, msm_common_dai_links,
+	       sizeof(msm_common_dai_links));
+	total_links += ARRAY_SIZE(msm_common_dai_links);
+
+	memcpy(dailink + total_links, msm_common_misc_fe_dai_links,
+	       sizeof(msm_common_misc_fe_dai_links));
+	total_links += ARRAY_SIZE(msm_common_misc_fe_dai_links);
+
+	memcpy(dailink + total_links, msm_common_be_dai_links,
+	       sizeof(msm_common_be_dai_links));
+	total_links += ARRAY_SIZE(msm_common_be_dai_links);
+
+	if (!strcmp(match->data, "tasha_codec")) {
+		card = &snd_soc_card_tasha_msm;
+
+		memcpy(dailink + total_links, msm_tasha_fe_dai_links,
+		       sizeof(msm_tasha_fe_dai_links));
+		total_links += ARRAY_SIZE(msm_tasha_fe_dai_links);
+
+		memcpy(dailink + total_links, msm_tasha_be_dai_links,
+		       sizeof(msm_tasha_be_dai_links));
+		total_links += ARRAY_SIZE(msm_tasha_be_dai_links);
+
+	} else if (!strcmp(match->data, "tavil_codec")) {
+		card = &snd_soc_card_tavil_msm;
+
+		memcpy(dailink + total_links, msm_tavil_fe_dai_links,
+		       sizeof(msm_tavil_fe_dai_links));
+		total_links += ARRAY_SIZE(msm_tavil_fe_dai_links);
+
+		memcpy(dailink + total_links, msm_tavil_be_dai_links,
+		       sizeof(msm_tavil_be_dai_links));
+		total_links += ARRAY_SIZE(msm_tavil_be_dai_links);
+
+	} else if (!strcmp(match->data, "freebox")) {
+		card = &snd_soc_card_freebox_msm;
+	}
+
+	if (of_property_read_bool(dev->of_node, "qcom,wcn-btfm")) {
+		dev_dbg(dev, "%s(): WCN BTFM support present\n", __func__);
+		memcpy(dailink + total_links, msm_wcn_be_dai_links,
+		       sizeof(msm_wcn_be_dai_links));
+		total_links += ARRAY_SIZE(msm_wcn_be_dai_links);
+	}
+
+	if (of_property_read_bool(dev->of_node, "qcom,ext-disp-audio-rx")) {
+		dev_dbg(dev, "%s(): External display audio support present\n",
+			__func__);
+		memcpy(dailink + total_links, ext_disp_be_dai_link,
+		       sizeof(ext_disp_be_dai_link));
+		total_links += ARRAY_SIZE(ext_disp_be_dai_link);
+	}
+
+	if (of_property_read_bool(dev->of_node, "qcom,mi2s-audio-intf")) {
+		memcpy(dailink + total_links, msm_mi2s_be_dai_links,
+		       sizeof(msm_mi2s_be_dai_links));
+		total_links += ARRAY_SIZE(msm_mi2s_be_dai_links);
+	}
+
+	if (of_property_read_bool(dev->of_node, "qcom,auxpcm-audio-intf")) {
+		memcpy(dailink + total_links, msm_auxpcm_be_dai_links,
+		       sizeof(msm_auxpcm_be_dai_links));
+		total_links += ARRAY_SIZE(msm_auxpcm_be_dai_links);
+	}
+
+	if (card) {
+		card->dai_link = dailink;
+		card->num_links = total_links;
+	}
+
+	return card;
+}
+
+static int msm_wsa881x_init(struct snd_soc_component *component)
+{
+	u8 spkleft_ports[WSA881X_MAX_SWR_PORTS] = {100, 101, 102, 106};
+	u8 spkright_ports[WSA881X_MAX_SWR_PORTS] = {103, 104, 105, 107};
+	unsigned int ch_rate[WSA881X_MAX_SWR_PORTS] = {2400, 600, 300, 1200};
+	unsigned int ch_mask[WSA881X_MAX_SWR_PORTS] = {0x1, 0xF, 0x3, 0x3};
+	struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
+	struct msm_asoc_mach_data *pdata;
+	struct snd_soc_dapm_context *dapm;
+	int ret = 0;
+
+	if (!codec) {
+		pr_err("%s codec is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	dapm = snd_soc_codec_get_dapm(codec);
+
+	if (!strcmp(component->name_prefix, "SpkrLeft")) {
+		dev_dbg(codec->dev, "%s: setting left ch map to codec %s\n",
+			__func__, codec->component.name);
+		wsa881x_set_channel_map(codec, &spkleft_ports[0],
+				WSA881X_MAX_SWR_PORTS, &ch_mask[0],
+				&ch_rate[0]);
+		if (dapm->component) {
+			snd_soc_dapm_ignore_suspend(dapm, "SpkrLeft IN");
+			snd_soc_dapm_ignore_suspend(dapm, "SpkrLeft SPKR");
+		}
+	} else if (!strcmp(component->name_prefix, "SpkrRight")) {
+		dev_dbg(codec->dev, "%s: setting right ch map to codec %s\n",
+			__func__, codec->component.name);
+		wsa881x_set_channel_map(codec, &spkright_ports[0],
+				WSA881X_MAX_SWR_PORTS, &ch_mask[0],
+				&ch_rate[0]);
+		if (dapm->component) {
+			snd_soc_dapm_ignore_suspend(dapm, "SpkrRight IN");
+			snd_soc_dapm_ignore_suspend(dapm, "SpkrRight SPKR");
+		}
+	} else {
+		dev_err(codec->dev, "%s: wrong codec name %s\n", __func__,
+			codec->component.name);
+		ret = -EINVAL;
+		goto err_codec;
+	}
+	pdata = snd_soc_card_get_drvdata(component->card);
+	if (pdata && pdata->codec_root)
+		wsa881x_codec_info_create_codec_entry(pdata->codec_root,
+						      codec);
+
+err_codec:
+	return ret;
+}
+
+static int msm_init_wsa_dev(struct platform_device *pdev,
+				struct snd_soc_card *card)
+{
+	struct device_node *wsa_of_node;
+	u32 wsa_max_devs;
+	u32 wsa_dev_cnt;
+	int i;
+	struct msm_wsa881x_dev_info *wsa881x_dev_info;
+	const char *wsa_auxdev_name_prefix[1];
+	char *dev_name_str = NULL;
+	int found = 0;
+	int ret = 0;
+
+	/* Get maximum WSA device count for this platform */
+	ret = of_property_read_u32(pdev->dev.of_node,
+				   "qcom,wsa-max-devs", &wsa_max_devs);
+	if (ret) {
+		ret = 0;
+		dev_dbg(&pdev->dev,
+			 "%s: wsa-max-devs property missing in DT %s, ret = %d\n",
+			 __func__, pdev->dev.of_node->full_name, ret);
+		goto err_dt;
+	}
+	if (wsa_max_devs == 0) {
+		dev_warn(&pdev->dev,
+			 "%s: Max WSA devices is 0 for this target?\n",
+			 __func__);
+		goto err_dt;
+	}
+
+	/* Get count of WSA device phandles for this platform */
+	wsa_dev_cnt = of_count_phandle_with_args(pdev->dev.of_node,
+						 "qcom,wsa-devs", NULL);
+	if (wsa_dev_cnt == -ENOENT) {
+		dev_warn(&pdev->dev, "%s: No wsa device defined in DT.\n",
+			 __func__);
+		goto err_dt;
+	} else if (wsa_dev_cnt <= 0) {
+		dev_err(&pdev->dev,
+			"%s: Error reading wsa device from DT. wsa_dev_cnt = %d\n",
+			__func__, wsa_dev_cnt);
+		ret = -EINVAL;
+		goto err_dt;
+	}
+
+	/*
+	 * Expect total phandles count to be NOT less than maximum possible
+	 * WSA count. However, if it is less, then assign same value to
+	 * max count as well.
+	 */
+	if (wsa_dev_cnt < wsa_max_devs) {
+		dev_dbg(&pdev->dev,
+			"%s: wsa_max_devs = %d cannot exceed wsa_dev_cnt = %d\n",
+			__func__, wsa_max_devs, wsa_dev_cnt);
+		wsa_max_devs = wsa_dev_cnt;
+	}
+
+	/* Make sure prefix string passed for each WSA device */
+	ret = of_property_count_strings(pdev->dev.of_node,
+					"qcom,wsa-aux-dev-prefix");
+	if (ret != wsa_dev_cnt) {
+		dev_err(&pdev->dev,
+			"%s: expecting %d wsa prefix. Defined only %d in DT\n",
+			__func__, wsa_dev_cnt, ret);
+		ret = -EINVAL;
+		goto err_dt;
+	}
+
+	/*
+	 * Alloc mem to store phandle and index info of WSA device, if already
+	 * registered with ALSA core
+	 */
+	wsa881x_dev_info = devm_kcalloc(&pdev->dev, wsa_max_devs,
+					sizeof(struct msm_wsa881x_dev_info),
+					GFP_KERNEL);
+	if (!wsa881x_dev_info) {
+		ret = -ENOMEM;
+		goto err_mem;
+	}
+
+	/*
+	 * search and check whether all WSA devices are already
+	 * registered with ALSA core or not. If found a node, store
+	 * the node and the index in a local array of struct for later
+	 * use.
+	 */
+	for (i = 0; i < wsa_dev_cnt; i++) {
+		wsa_of_node = of_parse_phandle(pdev->dev.of_node,
+					    "qcom,wsa-devs", i);
+		if (unlikely(!wsa_of_node)) {
+			/* we should not be here */
+			dev_err(&pdev->dev,
+				"%s: wsa dev node is not present\n",
+				__func__);
+			ret = -EINVAL;
+			goto err_dev_node;
+		}
+		if (soc_find_component(wsa_of_node, NULL)) {
+			/* WSA device registered with ALSA core */
+			wsa881x_dev_info[found].of_node = wsa_of_node;
+			wsa881x_dev_info[found].index = i;
+			found++;
+			if (found == wsa_max_devs)
+				break;
+		}
+	}
+
+	if (found < wsa_max_devs) {
+		dev_dbg(&pdev->dev,
+			"%s: failed to find %d components. Found only %d\n",
+			__func__, wsa_max_devs, found);
+		return -EPROBE_DEFER;
+	}
+	dev_info(&pdev->dev,
+		"%s: found %d wsa881x devices registered with ALSA core\n",
+		__func__, found);
+
+	card->num_aux_devs = wsa_max_devs;
+	card->num_configs = wsa_max_devs;
+
+	/* Alloc array of AUX devs struct */
+	msm_aux_dev = devm_kcalloc(&pdev->dev, card->num_aux_devs,
+				       sizeof(struct snd_soc_aux_dev),
+				       GFP_KERNEL);
+	if (!msm_aux_dev) {
+		ret = -ENOMEM;
+		goto err_auxdev_mem;
+	}
+
+	/* Alloc array of codec conf struct */
+	msm_codec_conf = devm_kcalloc(&pdev->dev, card->num_aux_devs,
+					  sizeof(struct snd_soc_codec_conf),
+					  GFP_KERNEL);
+	if (!msm_codec_conf) {
+		ret = -ENOMEM;
+		goto err_codec_conf;
+	}
+
+	for (i = 0; i < card->num_aux_devs; i++) {
+		dev_name_str = devm_kzalloc(&pdev->dev, DEV_NAME_STR_LEN,
+					    GFP_KERNEL);
+		if (!dev_name_str) {
+			ret = -ENOMEM;
+			goto err_dev_str;
+		}
+
+		ret = of_property_read_string_index(pdev->dev.of_node,
+						    "qcom,wsa-aux-dev-prefix",
+						    wsa881x_dev_info[i].index,
+						    wsa_auxdev_name_prefix);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"%s: failed to read wsa aux dev prefix, ret = %d\n",
+				__func__, ret);
+			ret = -EINVAL;
+			goto err_dt_prop;
+		}
+
+		snprintf(dev_name_str, strlen("wsa881x.%d"), "wsa881x.%d", i);
+		msm_aux_dev[i].name = dev_name_str;
+		msm_aux_dev[i].codec_name = NULL;
+		msm_aux_dev[i].codec_of_node =
+					wsa881x_dev_info[i].of_node;
+		msm_aux_dev[i].init = msm_wsa881x_init;
+		msm_codec_conf[i].dev_name = NULL;
+		msm_codec_conf[i].name_prefix = wsa_auxdev_name_prefix[0];
+		msm_codec_conf[i].of_node =
+				wsa881x_dev_info[i].of_node;
+	}
+	card->codec_conf = msm_codec_conf;
+	card->aux_dev = msm_aux_dev;
+
+	return 0;
+
+err_dt_prop:
+	devm_kfree(&pdev->dev, dev_name_str);
+err_dev_str:
+	devm_kfree(&pdev->dev, msm_codec_conf);
+err_codec_conf:
+	devm_kfree(&pdev->dev, msm_aux_dev);
+err_auxdev_mem:
+err_dev_node:
+	devm_kfree(&pdev->dev, wsa881x_dev_info);
+err_mem:
+err_dt:
+	return ret;
+}
+
+static void i2s_auxpcm_init(struct platform_device *pdev)
+{
+	struct resource *muxsel;
+	int count;
+	u32 mi2s_master_slave[MI2S_MAX];
+	u32 mi2s_ext_mclk[MI2S_MAX];
+	int ret;
+	char *str[PCM_I2S_SEL_MAX] = {
+		"lpaif_pri_mode_muxsel",
+		"lpaif_sec_mode_muxsel",
+		"lpaif_tert_mode_muxsel",
+		"lpaif_quat_mode_muxsel"
+	};
+
+	for (count = 0; count < MI2S_MAX; count++) {
+		mutex_init(&mi2s_intf_conf[count].lock);
+		mi2s_intf_conf[count].ref_cnt = 0;
+	}
+
+	for (count = 0; count < AUX_PCM_MAX; count++) {
+		mutex_init(&auxpcm_intf_conf[count].lock);
+		auxpcm_intf_conf[count].ref_cnt = 0;
+	}
+
+	for (count = 0; count < PCM_I2S_SEL_MAX; count++) {
+		mutex_init(&mi2s_auxpcm_conf[count].lock);
+		mi2s_auxpcm_conf[count].pcm_i2s_sel_vt_addr = NULL;
+	}
+
+	for (count = 0; count < PCM_I2S_SEL_MAX; count++) {
+		muxsel = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						      str[count]);
+		if (muxsel) {
+			mi2s_auxpcm_conf[count].pcm_i2s_sel_vt_addr
+				= ioremap(muxsel->start, resource_size(muxsel));
+		}
+	}
+
+	ret = of_property_read_u32_array(pdev->dev.of_node,
+			"qcom,msm-mi2s-master",
+			mi2s_master_slave, MI2S_MAX);
+	if (ret) {
+		dev_dbg(&pdev->dev, "%s: no qcom,msm-mi2s-master in DT node\n",
+			__func__);
+	} else {
+		for (count = 0; count < MI2S_MAX; count++) {
+			mi2s_intf_conf[count].msm_is_mi2s_master =
+				mi2s_master_slave[count];
+		}
+	}
+
+	ret = of_property_read_u32_array(pdev->dev.of_node,
+					 "qcom,msm-mi2s-ext-mclk",
+					 mi2s_ext_mclk, MI2S_MAX);
+	if (ret) {
+		dev_dbg(&pdev->dev, "%s: no qcom,msm-mi2s-ext-mclk in DT node\n",
+			__func__);
+	} else {
+		for (count = 0; count < MI2S_MAX; count++)
+			mi2s_intf_conf[count].msm_is_ext_mclk =
+				mi2s_ext_mclk[count];
+	}
+}
+
+static void i2s_auxpcm_deinit(void)
+{
+	int count;
+
+	for (count = 0; count < PCM_I2S_SEL_MAX; count++)
+		if (mi2s_auxpcm_conf[count].pcm_i2s_sel_vt_addr !=
+			NULL)
+			iounmap(
+			mi2s_auxpcm_conf[count].pcm_i2s_sel_vt_addr);
+}
+
+static int msm_asoc_machine_probe(struct platform_device *pdev)
+{
+	struct snd_soc_card *card;
+	struct msm_asoc_mach_data *pdata;
+	const char *mbhc_audio_jack_type = NULL;
+	char *mclk_freq_prop_name;
+	const struct of_device_id *match;
+	int ret;
+
+	if (!pdev->dev.of_node) {
+		dev_err(&pdev->dev, "No platform supplied from device tree\n");
+		return -EINVAL;
+	}
+
+	pdata = devm_kzalloc(&pdev->dev,
+			sizeof(struct msm_asoc_mach_data), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	card = populate_snd_card_dailinks(&pdev->dev);
+	if (!card) {
+		dev_err(&pdev->dev, "%s: Card uninitialized\n", __func__);
+		ret = -EINVAL;
+		goto err;
+	}
+	card->dev = &pdev->dev;
+	platform_set_drvdata(pdev, card);
+	snd_soc_card_set_drvdata(card, pdata);
+
+	ret = snd_soc_of_parse_card_name(card, "qcom,model");
+	if (ret) {
+		dev_err(&pdev->dev, "parse card name failed, err:%d\n",
+			ret);
+		goto err;
+	}
+
+	if (of_property_read_bool(pdev->dev.of_node, "qcom,audio-routing")) {
+		ret = snd_soc_of_parse_audio_routing(card,
+					"qcom,audio-routing");
+		if (ret) {
+			dev_err(&pdev->dev, "parse audio routing failed, err:%d\n",
+				ret);
+			goto err;
+		}
+	}
+
+	match = of_match_node(msm8998_asoc_machine_of_match,
+			pdev->dev.of_node);
+	if (!match) {
+		dev_err(&pdev->dev, "%s: no matched codec is found.\n",
+			__func__);
+		goto err;
+	}
+
+	if (!strcmp(match->data, "tasha_codec"))
+		mclk_freq_prop_name = "qcom,tasha-mclk-clk-freq";
+	else if (!strcmp(match->data, "tavil_codec"))
+		mclk_freq_prop_name = "qcom,tavil-mclk-clk-freq";
+	else
+		mclk_freq_prop_name = NULL;
+
+	if (mclk_freq_prop_name != NULL) {
+		ret = of_property_read_u32(pdev->dev.of_node,
+				mclk_freq_prop_name, &pdata->mclk_freq);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"Looking up %s property in node %s failed, err%d\n",
+				mclk_freq_prop_name,
+				pdev->dev.of_node->full_name, ret);
+			goto err;
+		}
+
+		if (pdata->mclk_freq != CODEC_EXT_CLK_RATE) {
+			dev_err(&pdev->dev, "unsupported mclk freq %u\n",
+				pdata->mclk_freq);
+			ret = -EINVAL;
+			goto err;
+		}
+	}
+
+	/* Check if Freebox HW codecs are expected from DT */
+	pdata->has_fbx_wm8804 =
+		of_property_read_bool(pdev->dev.of_node, "fbx,spdif-wm8804");
+	pdata->has_fbx_sil9437 =
+		of_property_read_bool(pdev->dev.of_node, "fbx,arc-sil9437");
+
+	ret = msm_populate_dai_link_component_of_node(card);
+	if (ret) {
+		ret = -EPROBE_DEFER;
+		goto err;
+	}
+	ret = msm_init_wsa_dev(pdev, card);
+	if (ret)
+		goto err;
+
+	ret = devm_snd_soc_register_card(&pdev->dev, card);
+	if (ret == -EPROBE_DEFER) {
+		if (codec_reg_done)
+			ret = -EINVAL;
+		goto err;
+	} else if (ret) {
+		dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
+			ret);
+		goto err;
+	}
+	dev_info(&pdev->dev, "Sound card %s registered\n", card->name);
+	spdev = pdev;
+
+	ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+	if (ret) {
+		dev_dbg(&pdev->dev, "%s: failed to add child nodes, ret=%d\n",
+			__func__, ret);
+	} else {
+		pdata->hph_en1_gpio_p = of_parse_phandle(pdev->dev.of_node,
+							"qcom,hph-en1-gpio", 0);
+		if (!pdata->hph_en1_gpio_p) {
+			dev_dbg(&pdev->dev, "property %s not detected in node %s",
+				"qcom,hph-en1-gpio",
+				pdev->dev.of_node->full_name);
+		}
+
+		pdata->hph_en0_gpio_p = of_parse_phandle(pdev->dev.of_node,
+							"qcom,hph-en0-gpio", 0);
+		if (!pdata->hph_en0_gpio_p) {
+			dev_dbg(&pdev->dev, "property %s not detected in node %s",
+				"qcom,hph-en0-gpio",
+				pdev->dev.of_node->full_name);
+		}
+	}
+
+	ret = of_property_read_string(pdev->dev.of_node,
+		"qcom,mbhc-audio-jack-type", &mbhc_audio_jack_type);
+	if (ret) {
+		dev_dbg(&pdev->dev, "Looking up %s property in node %s failed",
+			"qcom,mbhc-audio-jack-type",
+			pdev->dev.of_node->full_name);
+		dev_dbg(&pdev->dev, "Jack type properties set to default");
+	} else {
+		if (!strcmp(mbhc_audio_jack_type, "4-pole-jack")) {
+			wcd_mbhc_cfg.enable_anc_mic_detect = false;
+			dev_dbg(&pdev->dev, "This hardware has 4 pole jack");
+		} else if (!strcmp(mbhc_audio_jack_type, "5-pole-jack")) {
+			wcd_mbhc_cfg.enable_anc_mic_detect = true;
+			dev_dbg(&pdev->dev, "This hardware has 5 pole jack");
+		} else if (!strcmp(mbhc_audio_jack_type, "6-pole-jack")) {
+			wcd_mbhc_cfg.enable_anc_mic_detect = true;
+			dev_dbg(&pdev->dev, "This hardware has 6 pole jack");
+		} else {
+			wcd_mbhc_cfg.enable_anc_mic_detect = false;
+			dev_dbg(&pdev->dev, "Unknown value, set to default");
+		}
+	}
+	/*
+	 * Parse US-Euro gpio info from DT. Report no error if us-euro
+	 * entry is not found in DT file as some targets do not support
+	 * US-Euro detection
+	 */
+	pdata->us_euro_gpio = of_get_named_gpio(pdev->dev.of_node,
+				"qcom,us-euro-gpios", 0);
+	if (!gpio_is_valid(pdata->us_euro_gpio))
+		pdata->us_euro_gpio_p = of_parse_phandle(pdev->dev.of_node,
+					"qcom,us-euro-gpios", 0);
+	if (!gpio_is_valid(pdata->us_euro_gpio) && (!pdata->us_euro_gpio_p)) {
+		dev_dbg(&pdev->dev, "property %s not detected in node %s",
+			"qcom,us-euro-gpios", pdev->dev.of_node->full_name);
+	} else {
+		dev_dbg(&pdev->dev, "%s detected",
+			"qcom,us-euro-gpios");
+		wcd_mbhc_cfg.swap_gnd_mic = msm_swap_gnd_mic;
+	}
+
+	ret = msm_prepare_us_euro(card);
+	if (ret)
+		dev_dbg(&pdev->dev, "msm_prepare_us_euro failed (%d)\n",
+			ret);
+
+	/* Parse pinctrl info from devicetree */
+	ret = msm_get_pinctrl(pdev);
+	if (!ret) {
+		pr_debug("%s: pinctrl parsing successful\n", __func__);
+	} else {
+		dev_dbg(&pdev->dev,
+			"%s: Parsing pinctrl failed with %d. Cannot use Ports\n",
+			__func__, ret);
+		ret = 0;
+	}
+
+	i2s_auxpcm_init(pdev);
+
+	ret = snd_soc_add_card_controls(card, msm_snd_controls,
+					ARRAY_SIZE(msm_snd_controls));
+	if (ret < 0) {
+		pr_err("%s: add_card_controls failed, err %d\n",
+			__func__, ret);
+		goto err;
+	}
+
+	if (pdata->has_fbx_wm8804) {
+		ret = snd_soc_add_card_controls(card, fbx_wm8804_controls,
+						ARRAY_SIZE(fbx_wm8804_controls));
+		if (ret < 0) {
+			pr_err("%s: add_card_controls failed, err %d\n",
+			       __func__, ret);
+			goto err;
+		}
+	}
+
+	is_initial_boot = true;
+	ret = audio_notifier_register("msm8998", AUDIO_NOTIFIER_ADSP_DOMAIN,
+				      &service_nb);
+	if (ret < 0)
+		pr_err("%s: Audio notifier register failed ret = %d\n",
+			__func__, ret);
+
+	return 0;
+err:
+	if (pdata->us_euro_gpio > 0) {
+		dev_dbg(&pdev->dev, "%s free us_euro gpio %d\n",
+			__func__, pdata->us_euro_gpio);
+		gpio_free(pdata->us_euro_gpio);
+		pdata->us_euro_gpio = 0;
+	}
+	msm_release_pinctrl(pdev);
+	devm_kfree(&pdev->dev, pdata);
+	return ret;
+}
+
+static int msm_asoc_machine_remove(struct platform_device *pdev)
+{
+	struct snd_soc_card *card = platform_get_drvdata(pdev);
+	struct msm_asoc_mach_data *pdata =
+				snd_soc_card_get_drvdata(card);
+
+	gpio_free(pdata->us_euro_gpio);
+	i2s_auxpcm_deinit();
+
+	snd_soc_unregister_card(card);
+	return 0;
+}
+
+static struct platform_driver msm8998_asoc_machine_driver = {
+	.driver = {
+		.name = DRV_NAME,
+		.owner = THIS_MODULE,
+		.pm = &snd_soc_pm_ops,
+		.of_match_table = msm8998_asoc_machine_of_match,
+	},
+	.probe = msm_asoc_machine_probe,
+	.remove = msm_asoc_machine_remove,
+};
+module_platform_driver(msm8998_asoc_machine_driver);
+
+MODULE_DESCRIPTION("ALSA SoC msm");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_DEVICE_TABLE(of, msm8998_asoc_machine_of_match);
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./msm-cpe-lsm.c linux-4.4.115-fbx/sound/soc/msm/msm-cpe-lsm.c
--- linux-4.4.115-fbx/sound/soc/msm./msm-cpe-lsm.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/msm-cpe-lsm.c	2019-01-22 16:16:29.619301790 +0100
@@ -0,0 +1,3355 @@
+/*
+ * Copyright (c) 2013-2017, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/kthread.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/freezer.h>
+#include <sound/soc.h>
+#include <sound/cpe_core.h>
+#include <sound/lsm_params.h>
+#include <sound/pcm_params.h>
+#include <sound/msm-slim-dma.h>
+
+#define SAMPLE_RATE_48KHZ 48000
+#define SAMPLE_RATE_16KHZ 16000
+#define LSM_VOICE_WAKEUP_APP_V2 2
+#define AFE_PORT_ID_1 1
+#define AFE_PORT_ID_3 3
+#define AFE_OUT_PORT_2 2
+#define LISTEN_MIN_NUM_PERIODS     2
+#define LISTEN_MAX_NUM_PERIODS     12
+#define LISTEN_MAX_PERIOD_SIZE     61440
+#define LISTEN_MIN_PERIOD_SIZE     320
+#define LISTEN_MAX_STATUS_PAYLOAD_SIZE 256
+#define MSM_CPE_MAX_CUSTOM_PARAM_SIZE 2048
+
+#define MSM_CPE_LAB_THREAD_TIMEOUT (3 * (HZ/10))
+
+#define MSM_CPE_LSM_GRAB_LOCK(lock, name)		\
+{						\
+	pr_debug("%s: %s lock acquire\n",	\
+		 __func__, name);		\
+	mutex_lock(lock);			\
+}
+
+#define MSM_CPE_LSM_REL_LOCK(lock, name)		\
+{						\
+	pr_debug("%s: %s lock release\n",	\
+		 __func__, name);		\
+	mutex_unlock(lock);			\
+}
+
+/* Conventional and unconventional sample rate supported */
+static unsigned int supported_sample_rates[] = {
+	8000, 16000, 48000, 192000, 384000
+};
+
+static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
+	.count = ARRAY_SIZE(supported_sample_rates),
+	.list = supported_sample_rates,
+	.mask = 0,
+};
+
+
+static struct snd_pcm_hardware msm_pcm_hardware_listen = {
+	.info =	(SNDRV_PCM_INFO_BLOCK_TRANSFER |
+		 SNDRV_PCM_INFO_MMAP_VALID |
+		 SNDRV_PCM_INFO_INTERLEAVED |
+		 SNDRV_PCM_INFO_PAUSE |
+		 SNDRV_PCM_INFO_RESUME),
+	.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+		    SNDRV_PCM_FMTBIT_S24_LE |
+		    SNDRV_PCM_FMTBIT_S32_LE),
+	.rates = (SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_48000 |
+		  SNDRV_PCM_RATE_192000 | SNDRV_PCM_RATE_384000),
+	.rate_min = 16000,
+	.rate_max = 384000,
+	.channels_min =	1,
+	.channels_max =	1,
+	.buffer_bytes_max = LISTEN_MAX_NUM_PERIODS *
+			    LISTEN_MAX_PERIOD_SIZE,
+	.period_bytes_min = LISTEN_MIN_PERIOD_SIZE,
+	.period_bytes_max = LISTEN_MAX_PERIOD_SIZE,
+	.periods_min = LISTEN_MIN_NUM_PERIODS,
+	.periods_max = LISTEN_MAX_NUM_PERIODS,
+	.fifo_size = 0,
+};
+
+enum {
+	AFE_CMD_INVALID = 0,
+	AFE_CMD_PORT_START,
+	AFE_CMD_PORT_SUSPEND,
+	AFE_CMD_PORT_RESUME,
+	AFE_CMD_PORT_STOP,
+};
+
+enum cpe_lab_thread_status {
+	MSM_LSM_LAB_THREAD_STOP,
+	MSM_LSM_LAB_THREAD_RUNNING,
+	MSM_LSM_LAB_THREAD_ERROR,
+};
+
+struct cpe_hw_params {
+	u32 sample_rate;
+	u16 sample_size;
+	u32 buf_sz;
+	u32 period_count;
+	u16 channels;
+};
+
+struct cpe_data_pcm_buf {
+	u8 *mem;
+	phys_addr_t phys;
+};
+
+struct cpe_lsm_lab {
+	atomic_t in_count;
+	atomic_t abort_read;
+	u32 dma_write;
+	u32 buf_idx;
+	u32 pcm_size;
+	enum cpe_lab_thread_status thread_status;
+	struct cpe_data_pcm_buf *pcm_buf;
+	wait_queue_head_t period_wait;
+	struct completion comp;
+	struct completion thread_complete;
+};
+
+struct cpe_priv {
+	void *core_handle;
+	struct snd_soc_codec *codec;
+	struct wcd_cpe_lsm_ops lsm_ops;
+	struct wcd_cpe_afe_ops afe_ops;
+	bool afe_mad_ctl;
+	u32 input_port_id;
+};
+
+struct cpe_lsm_data {
+	struct device *dev;
+	struct cpe_lsm_session *lsm_session;
+	struct mutex lsm_api_lock;
+	struct cpe_lsm_lab lab;
+	struct cpe_hw_params hw_params;
+	struct snd_pcm_substream *substream;
+
+	wait_queue_head_t event_wait;
+	atomic_t event_avail;
+	atomic_t event_stop;
+
+	u8 ev_det_status;
+	u8 ev_det_pld_size;
+	u8 *ev_det_payload;
+
+	bool cpe_prepared;
+};
+
+static int msm_cpe_afe_mad_ctl_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	struct cpe_priv *cpe = kcontrol->private_data;
+
+	ucontrol->value.integer.value[0] = cpe->afe_mad_ctl;
+	return 0;
+}
+
+static int msm_cpe_afe_mad_ctl_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	struct cpe_priv *cpe = kcontrol->private_data;
+
+	cpe->afe_mad_ctl = ucontrol->value.integer.value[0];
+	return 0;
+}
+
+static struct snd_kcontrol_new msm_cpe_kcontrols[] = {
+	SOC_SINGLE_EXT("CPE AFE MAD Enable", SND_SOC_NOPM, 0, 1, 0,
+			msm_cpe_afe_mad_ctl_get, msm_cpe_afe_mad_ctl_put),
+};
+
+/*
+ * cpe_get_private_data: obtain ASoC platform driver private data
+ * @substream: ASoC substream for which private data to be obtained
+ */
+static struct cpe_priv *cpe_get_private_data(
+	struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *rtd;
+
+	if (!substream || !substream->private_data) {
+		pr_err("%s: %s is invalid\n",
+			__func__,
+			(!substream) ? "substream" : "private_data");
+		goto err_ret;
+	}
+
+	rtd = substream->private_data;
+
+	if (!rtd || !rtd->platform) {
+		pr_err("%s: %s is invalid\n",
+			 __func__,
+			(!rtd) ? "runtime" : "platform");
+		goto err_ret;
+	}
+
+	return snd_soc_platform_get_drvdata(rtd->platform);
+
+err_ret:
+	return NULL;
+}
+
+/*
+ * cpe_get_lsm_data: obtain the lsm session data given the substream
+ * @substream: ASoC substream for which lsm session data to be obtained
+ */
+static struct cpe_lsm_data *cpe_get_lsm_data(
+	struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	return runtime->private_data;
+}
+
+static void msm_cpe_process_event_status(void *data,
+		u8 detect_status, u8 size, u8 *payload)
+{
+	struct cpe_lsm_data *lsm_d = data;
+
+	lsm_d->ev_det_status = detect_status;
+	lsm_d->ev_det_pld_size = size;
+
+	lsm_d->ev_det_payload = kzalloc(size, GFP_KERNEL);
+	if (!lsm_d->ev_det_payload) {
+		pr_err("%s: no memory for event payload, size = %u\n",
+			__func__, size);
+		return;
+	}
+	memcpy(lsm_d->ev_det_payload, payload, size);
+
+	atomic_set(&lsm_d->event_avail, 1);
+	wake_up(&lsm_d->event_wait);
+}
+
+static void msm_cpe_process_event_status_done(struct cpe_lsm_data *lsm_data)
+{
+	kfree(lsm_data->ev_det_payload);
+	lsm_data->ev_det_payload = NULL;
+
+	lsm_data->ev_det_status = 0;
+	lsm_data->ev_det_pld_size = 0;
+}
+
+/*
+ * msm_cpe_afe_port_cntl: Perform the afe port control
+ * @substream: substream for which afe port command to be performed
+ * @core_handle: handle to core
+ * @afe_ops: handle to the afe operations
+ * @afe_cfg: afe port configuration data
+ * @cmd: command to be sent to AFE
+ *
+ */
+static int msm_cpe_afe_port_cntl(
+		struct snd_pcm_substream *substream,
+		void *core_handle,
+		struct wcd_cpe_afe_ops *afe_ops,
+		struct wcd_cpe_afe_port_cfg *afe_cfg,
+		int cmd)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	int rc = 0;
+
+	if (!afe_cfg->port_id) {
+		/*
+		 * It is possible driver can get closed without prepare,
+		 * in which case afe ports will not be initialized.
+		 */
+		dev_dbg(rtd->dev,
+			"%s: Invalid afe port id\n",
+			__func__);
+		return 0;
+	}
+
+	switch (cmd) {
+	case AFE_CMD_PORT_START:
+		rc = afe_ops->afe_port_start(core_handle, afe_cfg);
+		if (rc != 0)
+			dev_err(rtd->dev,
+				"%s: AFE port start failed\n",
+				__func__);
+		break;
+	case AFE_CMD_PORT_SUSPEND:
+		rc = afe_ops->afe_port_suspend(core_handle, afe_cfg);
+		if (rc != 0)
+			dev_err(rtd->dev,
+				"%s: afe_suspend failed, err = %d\n",
+				__func__, rc);
+		break;
+	case AFE_CMD_PORT_RESUME:
+		rc = afe_ops->afe_port_resume(core_handle, afe_cfg);
+		if (rc != 0)
+			dev_err(rtd->dev,
+				"%s: afe_resume failed, err = %d\n",
+				__func__, rc);
+		break;
+	case AFE_CMD_PORT_STOP:
+		rc = afe_ops->afe_port_stop(core_handle, afe_cfg);
+		if (rc != 0)
+			dev_err(rtd->dev,
+				"%s: afe_stopfailed, err = %d\n",
+				__func__, rc);
+		break;
+	}
+
+	return rc;
+}
+
+static int msm_cpe_lsm_lab_stop(struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct cpe_lsm_data *lsm_d = cpe_get_lsm_data(substream);
+	struct cpe_priv *cpe = cpe_get_private_data(substream);
+	struct wcd_cpe_lsm_ops *lsm_ops;
+	struct wcd_cpe_afe_ops *afe_ops;
+	struct cpe_lsm_session *session;
+	struct cpe_lsm_lab *lab_d = &lsm_d->lab;
+	struct msm_slim_dma_data *dma_data = NULL;
+	int rc;
+
+	/*
+	 * the caller is not aware of LAB status and will
+	 * try to stop lab even if it is already stopped.
+	 * return success right away is LAB is already stopped
+	 */
+	if (lab_d->thread_status == MSM_LSM_LAB_THREAD_STOP) {
+		dev_dbg(rtd->dev,
+			"%s: lab already stopped\n",
+			__func__);
+		return 0;
+	}
+
+	if (!cpe || !cpe->core_handle) {
+		dev_err(rtd->dev,
+			"%s: Invalid private data\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (!lsm_d->lsm_session) {
+		dev_err(rtd->dev,
+			"%s: Invalid session data\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	lsm_ops = &cpe->lsm_ops;
+	afe_ops = &cpe->afe_ops;
+	session = lsm_d->lsm_session;
+	if (rtd->cpu_dai)
+		dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai,
+					substream);
+	if (!dma_data || !dma_data->dai_channel_ctl) {
+		dev_err(rtd->dev,
+			"%s: dma_data is not set\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (lab_d->thread_status == MSM_LSM_LAB_THREAD_RUNNING) {
+		dev_dbg(rtd->dev, "%s: stopping lab thread\n",
+			__func__);
+		rc = kthread_stop(session->lsm_lab_thread);
+
+		/*
+		 * kthread_stop returns EINTR if the thread_fn
+		 * was not scheduled before calling kthread_stop.
+		 * In this case, we dont need to wait for lab
+		 * thread to complete as lab thread will not be
+		 * scheduled at all.
+		 */
+		if (rc == -EINTR)
+			goto done;
+
+		/* Wait for the lab thread to exit */
+		rc = wait_for_completion_timeout(
+				&lab_d->thread_complete,
+				MSM_CPE_LAB_THREAD_TIMEOUT);
+		if (!rc) {
+			dev_err(rtd->dev,
+				"%s: Wait for lab thread timedout\n",
+				__func__);
+			return -ETIMEDOUT;
+		}
+	}
+
+	rc = lsm_ops->lab_ch_setup(cpe->core_handle,
+				   session,
+				   WCD_CPE_PRE_DISABLE);
+	if (rc)
+		dev_err(rtd->dev,
+			"%s: PRE ch teardown failed, err = %d\n",
+			__func__, rc);
+	/* continue with teardown even if any intermediate step fails */
+	rc = dma_data->dai_channel_ctl(dma_data, rtd->cpu_dai, false);
+	if (rc)
+		dev_err(rtd->dev,
+			"%s: open data failed %d\n", __func__, rc);
+	dma_data->ph = 0;
+
+	/*
+	 * Even though LAB stop failed,
+	 * output AFE port needs to be stopped
+	 */
+	rc = afe_ops->afe_port_stop(cpe->core_handle,
+				    &session->afe_out_port_cfg);
+	if (rc)
+		dev_err(rtd->dev,
+			"%s: AFE out port stop failed, err = %d\n",
+			__func__, rc);
+
+	rc = lsm_ops->lab_ch_setup(cpe->core_handle,
+				   session,
+				   WCD_CPE_POST_DISABLE);
+	if (rc)
+		dev_err(rtd->dev,
+			"%s: POST ch teardown failed, err = %d\n",
+			__func__, rc);
+
+done:
+	lab_d->thread_status = MSM_LSM_LAB_THREAD_STOP;
+	lab_d->buf_idx = 0;
+	atomic_set(&lab_d->in_count, 0);
+	lab_d->dma_write = 0;
+
+	return 0;
+}
+
+static int msm_cpe_lab_buf_alloc(struct snd_pcm_substream *substream,
+		struct cpe_lsm_session *session,
+		struct msm_slim_dma_data *dma_data)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct cpe_lsm_data *lsm_d = cpe_get_lsm_data(substream);
+	struct cpe_lsm_lab *lab_d = &lsm_d->lab;
+	struct cpe_hw_params *hw_params = &lsm_d->hw_params;
+	struct cpe_data_pcm_buf *pcm_buf = NULL;
+	int rc = 0;
+	int dma_alloc = 0;
+	u32 count = 0;
+	u32 bufsz, bufcnt;
+
+	if (lab_d->pcm_buf &&
+	    lab_d->pcm_buf->mem) {
+		dev_dbg(rtd->dev,
+			"%s: LAB buf already allocated\n",
+			__func__);
+		goto exit;
+	}
+
+	bufsz = hw_params->buf_sz;
+	bufcnt = hw_params->period_count;
+
+	dev_dbg(rtd->dev,
+		"%s:Buf Size %d Buf count %d\n",
+		 __func__,
+		bufsz, bufcnt);
+
+	pcm_buf = kzalloc(((sizeof(struct cpe_data_pcm_buf)) * bufcnt),
+			  GFP_KERNEL);
+	if (!pcm_buf) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	lab_d->pcm_buf = pcm_buf;
+	dma_alloc = bufsz * bufcnt;
+	pcm_buf->mem = NULL;
+	pcm_buf->mem = dma_alloc_coherent(dma_data->sdev->dev.parent,
+					  dma_alloc,
+					  &(pcm_buf->phys),
+					  GFP_KERNEL);
+	if (!pcm_buf->mem) {
+		dev_err(rtd->dev,
+			"%s:DMA alloc failed size = %x\n",
+			__func__, dma_alloc);
+		rc = -ENOMEM;
+		goto fail;
+	}
+
+	count = 0;
+	while (count < bufcnt) {
+		pcm_buf[count].mem = pcm_buf[0].mem + (count * bufsz);
+		pcm_buf[count].phys = pcm_buf[0].phys + (count * bufsz);
+		dev_dbg(rtd->dev,
+			"%s: pcm_buf[%d].mem %pK pcm_buf[%d].phys %pK\n",
+			 __func__, count,
+			(void *)pcm_buf[count].mem,
+			count, &(pcm_buf[count].phys));
+		count++;
+	}
+
+	return 0;
+fail:
+	if (pcm_buf) {
+		if (pcm_buf->mem)
+			dma_free_coherent(dma_data->sdev->dev.parent, dma_alloc,
+					  pcm_buf->mem, pcm_buf->phys);
+		kfree(pcm_buf);
+		lab_d->pcm_buf = NULL;
+	}
+exit:
+	return rc;
+}
+
+static int msm_cpe_lab_buf_dealloc(struct snd_pcm_substream *substream,
+	struct cpe_lsm_session *session, struct msm_slim_dma_data *dma_data)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct cpe_lsm_data *lsm_d = cpe_get_lsm_data(substream);
+	struct cpe_lsm_lab *lab_d = &lsm_d->lab;
+	struct cpe_hw_params *hw_params = &lsm_d->hw_params;
+	int rc = 0;
+	int dma_alloc = 0;
+	struct cpe_data_pcm_buf *pcm_buf = NULL;
+	int bufsz, bufcnt;
+
+	bufsz = hw_params->buf_sz;
+	bufcnt = hw_params->period_count;
+
+	dev_dbg(rtd->dev,
+		"%s:Buf Size %d Buf count %d\n", __func__,
+		bufsz, bufcnt);
+
+	if (bufcnt <= 0 || bufsz <= 0) {
+		dev_err(rtd->dev,
+			"%s: Invalid params, bufsz = %u, bufcnt = %u\n",
+			__func__, bufsz, bufcnt);
+		return -EINVAL;
+	}
+
+	pcm_buf = lab_d->pcm_buf;
+	dma_alloc = bufsz * bufcnt;
+	if (dma_data && pcm_buf)
+		dma_free_coherent(dma_data->sdev->dev.parent, dma_alloc,
+				  pcm_buf->mem, pcm_buf->phys);
+	kfree(pcm_buf);
+	lab_d->pcm_buf = NULL;
+	return rc;
+}
+
+/*
+ * msm_cpe_lab_thread: Initiated on KW detection
+ * @data: lab data
+ *
+ * Start lab thread and call CPE core API for SLIM
+ * read operations.
+ */
+static int msm_cpe_lab_thread(void *data)
+{
+	struct cpe_lsm_data *lsm_d = data;
+	struct cpe_lsm_session *session = lsm_d->lsm_session;
+	struct snd_pcm_substream *substream = lsm_d->substream;
+	struct cpe_lsm_lab *lab_d = &lsm_d->lab;
+	struct cpe_hw_params *hw_params = &lsm_d->hw_params;
+	struct cpe_priv *cpe = cpe_get_private_data(substream);
+	struct wcd_cpe_lsm_ops *lsm_ops;
+	struct wcd_cpe_afe_ops *afe_ops;
+	struct cpe_data_pcm_buf *cur_buf, *next_buf;
+	struct msm_slim_dma_data *dma_data = NULL;
+	struct snd_soc_pcm_runtime *rtd = NULL;
+	bool wait_timedout = false;
+	int rc = 0;
+	u32 done_len = 0;
+	u32 buf_count = 0;
+	u32 prd_cnt;
+
+	allow_signal(SIGKILL);
+	set_current_state(TASK_INTERRUPTIBLE);
+
+	pr_debug("%s: Lab thread start\n", __func__);
+	init_completion(&lab_d->comp);
+
+	if (PCM_RUNTIME_CHECK(substream)) {
+		rc = -EINVAL;
+		goto done;
+	}
+
+	if (!cpe || !cpe->core_handle) {
+		pr_err("%s: Handle to %s is invalid\n",
+			__func__,
+			(!cpe) ? "cpe" : "core");
+		rc = -EINVAL;
+		goto done;
+	}
+
+	rtd = substream->private_data;
+	if (rtd->cpu_dai)
+		dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai,
+					substream);
+	if (!dma_data || !dma_data->dai_channel_ctl) {
+		pr_err("%s: dma_data is not set\n", __func__);
+		rc = -EINVAL;
+		goto done;
+	}
+
+	lsm_ops = &cpe->lsm_ops;
+	afe_ops = &cpe->afe_ops;
+
+	rc = lsm_ops->lab_ch_setup(cpe->core_handle,
+				   session,
+				   WCD_CPE_PRE_ENABLE);
+	if (rc) {
+		dev_err(rtd->dev,
+			"%s: PRE ch setup failed, err = %d\n",
+			__func__, rc);
+		goto done;
+	}
+
+	rc = dma_data->dai_channel_ctl(dma_data, rtd->cpu_dai, true);
+	if (rc) {
+		dev_err(rtd->dev,
+			"%s: open data failed %d\n", __func__, rc);
+		goto done;
+	}
+
+	dev_dbg(rtd->dev, "%s: Established data channel\n",
+		__func__);
+
+	init_waitqueue_head(&lab_d->period_wait);
+	memset(lab_d->pcm_buf[0].mem, 0, lab_d->pcm_size);
+
+	rc = slim_port_xfer(dma_data->sdev, dma_data->ph,
+			    lab_d->pcm_buf[0].phys,
+			    hw_params->buf_sz, &lab_d->comp);
+	if (rc) {
+		dev_err(rtd->dev,
+			"%s: buf[0] slim_port_xfer failed, err = %d\n",
+			__func__, rc);
+		goto done;
+	}
+
+	rc = slim_port_xfer(dma_data->sdev, dma_data->ph,
+			    lab_d->pcm_buf[1].phys,
+			    hw_params->buf_sz, &lab_d->comp);
+	if (rc) {
+		dev_err(rtd->dev,
+			"%s: buf[0] slim_port_xfer failed, err = %d\n",
+			__func__, rc);
+		goto done;
+	}
+
+	cur_buf = &lab_d->pcm_buf[0];
+	next_buf = &lab_d->pcm_buf[2];
+	prd_cnt = hw_params->period_count;
+	rc = lsm_ops->lab_ch_setup(cpe->core_handle,
+				   session,
+				   WCD_CPE_POST_ENABLE);
+	if (rc) {
+		dev_err(rtd->dev,
+			"%s: POST ch setup failed, err = %d\n",
+			__func__, rc);
+		goto done;
+	}
+
+	rc = afe_ops->afe_port_start(cpe->core_handle,
+			&session->afe_out_port_cfg);
+	if (rc) {
+		dev_err(rtd->dev,
+			"%s: AFE out port start failed, err = %d\n",
+			__func__, rc);
+		goto done;
+	}
+
+	while (!kthread_should_stop() &&
+	       lab_d->thread_status != MSM_LSM_LAB_THREAD_ERROR) {
+
+		rc = slim_port_xfer(dma_data->sdev, dma_data->ph,
+				    next_buf->phys,
+				    hw_params->buf_sz, &lab_d->comp);
+		if (rc) {
+			dev_err(rtd->dev,
+				"%s: slim_port_xfer failed, err = %d\n",
+				__func__, rc);
+			lab_d->thread_status = MSM_LSM_LAB_THREAD_ERROR;
+		}
+
+		rc = wait_for_completion_timeout(&lab_d->comp, (2 * HZ/10));
+		if (!rc) {
+			dev_err(rtd->dev,
+				"%s: wait timedout for slim buffer\n",
+				__func__);
+			wait_timedout = true;
+		} else {
+			wait_timedout = false;
+		}
+
+		rc = slim_port_get_xfer_status(dma_data->sdev,
+					       dma_data->ph,
+					       &cur_buf->phys, &done_len);
+		if (rc ||
+		    (!rc && wait_timedout)) {
+			dev_err(rtd->dev,
+				"%s: xfer_status failure, rc = %d, wait_timedout = %s\n",
+				__func__, rc,
+				(wait_timedout ? "true" : "false"));
+			lab_d->thread_status = MSM_LSM_LAB_THREAD_ERROR;
+		}
+
+		if (done_len ||
+		    ((!done_len) &&
+		     lab_d->thread_status == MSM_LSM_LAB_THREAD_ERROR)) {
+			atomic_inc(&lab_d->in_count);
+			lab_d->dma_write += snd_pcm_lib_period_bytes(substream);
+			snd_pcm_period_elapsed(substream);
+			wake_up(&lab_d->period_wait);
+			buf_count++;
+
+			cur_buf = &lab_d->pcm_buf[buf_count % prd_cnt];
+			next_buf = &lab_d->pcm_buf[(buf_count + 2) % prd_cnt];
+			dev_dbg(rtd->dev,
+				"%s: Cur buf.mem = %pK Next Buf.mem = %pK\n"
+				" buf count = 0x%x\n", __func__,
+				cur_buf->mem, next_buf->mem, buf_count);
+		} else {
+			dev_err(rtd->dev,
+				"%s: SB get status, invalid len = 0x%x\n",
+				__func__, done_len);
+		}
+		done_len = 0;
+	}
+
+done:
+	if (rc)
+		lab_d->thread_status = MSM_LSM_LAB_THREAD_ERROR;
+	pr_debug("%s: Exit lab_thread, exit_status=%d, thread_status=%d\n",
+		 __func__, rc, lab_d->thread_status);
+	complete(&lab_d->thread_complete);
+
+	return 0;
+}
+
+/*
+ * msm_cpe_lsm_open: ASoC call to open the stream
+ * @substream: substream that is to be opened
+ *
+ * Create session data for lsm session and open the lsm session
+ * on CPE.
+ */
+static int msm_cpe_lsm_open(struct snd_pcm_substream *substream)
+{
+	struct cpe_lsm_data *lsm_d;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct cpe_priv *cpe = cpe_get_private_data(substream);
+	struct wcd_cpe_lsm_ops *lsm_ops;
+	int rc = 0;
+
+	if (!cpe || !cpe->codec) {
+		dev_err(rtd->dev,
+			"%s: Invalid private data\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	runtime->hw = msm_pcm_hardware_listen;
+
+	rc = snd_pcm_hw_constraint_list(runtime, 0,
+				SNDRV_PCM_HW_PARAM_RATE,
+				&constraints_sample_rates);
+	if (rc < 0) {
+		pr_err("snd_pcm_hw_constraint_list failed rc %d\n", rc);
+		return -EINVAL;
+	}
+
+	/* Ensure that buffer size is a multiple of period size */
+	rc = snd_pcm_hw_constraint_integer(runtime,
+					   SNDRV_PCM_HW_PARAM_PERIODS);
+	if (rc < 0) {
+		pr_err("%s: Unable to set pcm_param_periods, rc %d\n",
+			__func__, rc);
+		return -EINVAL;
+	}
+
+	rc = snd_pcm_hw_constraint_minmax(runtime,
+		SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
+		LISTEN_MIN_NUM_PERIODS * LISTEN_MIN_PERIOD_SIZE,
+		LISTEN_MAX_NUM_PERIODS * LISTEN_MAX_PERIOD_SIZE);
+	if (rc < 0) {
+		pr_err("%s: Unable to set pcm constraints, rc %d\n",
+			__func__, rc);
+		return -EINVAL;
+	}
+
+	cpe->core_handle = wcd_cpe_get_core_handle(cpe->codec);
+
+	if (!cpe->core_handle) {
+		dev_err(rtd->dev,
+			"%s: Invalid handle to codec core\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	lsm_ops = &cpe->lsm_ops;
+	lsm_d = kzalloc(sizeof(struct cpe_lsm_data), GFP_KERNEL);
+	if (!lsm_d) {
+		dev_err(rtd->dev,
+			"%s: ENOMEM for lsm session, size = %zd\n",
+			__func__, sizeof(struct cpe_lsm_data));
+		rc = -ENOMEM;
+		goto fail_return;
+	}
+	mutex_init(&lsm_d->lsm_api_lock);
+
+	lsm_d->lsm_session = lsm_ops->lsm_alloc_session(cpe->core_handle,
+					lsm_d, msm_cpe_process_event_status);
+	if (!lsm_d->lsm_session) {
+		dev_err(rtd->dev,
+			"%s: session allocation failed",
+			__func__);
+		rc = -EINVAL;
+		goto fail_session_alloc;
+	}
+	/* Explicitly Assign the LAB thread to STOP state */
+	lsm_d->lab.thread_status = MSM_LSM_LAB_THREAD_STOP;
+	lsm_d->lsm_session->started = false;
+	lsm_d->substream = substream;
+	init_waitqueue_head(&lsm_d->lab.period_wait);
+	lsm_d->cpe_prepared = false;
+
+	dev_dbg(rtd->dev, "%s: allocated session with id = %d\n",
+		__func__, lsm_d->lsm_session->id);
+
+
+	rc = lsm_ops->lsm_open_tx(cpe->core_handle, lsm_d->lsm_session,
+				   LSM_VOICE_WAKEUP_APP_V2, 16000);
+	if (rc  < 0) {
+		dev_err(rtd->dev,
+			"%s: OPEN_TX cmd failed, err = %d\n",
+			__func__, rc);
+		goto fail_open_tx;
+	}
+
+	init_waitqueue_head(&lsm_d->event_wait);
+	atomic_set(&lsm_d->event_avail, 0);
+	atomic_set(&lsm_d->event_stop, 0);
+	runtime->private_data = lsm_d;
+
+	return 0;
+
+fail_open_tx:
+	lsm_ops->lsm_dealloc_session(cpe->core_handle, lsm_d->lsm_session);
+
+fail_session_alloc:
+	mutex_destroy(&lsm_d->lsm_api_lock);
+	kfree(lsm_d);
+fail_return:
+	return rc;
+}
+
+/*
+ * msm_cpe_lsm_close: ASoC call to close/cleanup the stream
+ * @substream: substream that is to be closed
+ *
+ * Deallocate the session and release the AFE port. It is not
+ * required to deregister the sound model as long as we close
+ * the lsm session on CPE.
+ */
+static int msm_cpe_lsm_close(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct cpe_lsm_data *lsm_d = cpe_get_lsm_data(substream);
+	struct cpe_priv *cpe = cpe_get_private_data(substream);
+	struct wcd_cpe_lsm_ops *lsm_ops;
+	struct cpe_lsm_session *session;
+	struct wcd_cpe_afe_ops *afe_ops;
+	struct wcd_cpe_afe_port_cfg *afe_cfg;
+	int rc = 0;
+
+	if (!cpe || !cpe->core_handle) {
+		dev_err(rtd->dev,
+			"%s: Invalid private data\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (!lsm_d || !lsm_d->lsm_session) {
+		dev_err(rtd->dev,
+			"%s: Invalid session data\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	lsm_ops = &cpe->lsm_ops;
+	session = lsm_d->lsm_session;
+	afe_ops = &cpe->afe_ops;
+	afe_cfg = &(lsm_d->lsm_session->afe_port_cfg);
+
+	/*
+	 * If driver is closed without stopping LAB,
+	 * explicitly stop LAB before cleaning up the
+	 * driver resources.
+	 */
+	rc = msm_cpe_lsm_lab_stop(substream);
+	if (rc) {
+		dev_err(rtd->dev,
+			"%s: Failed to stop lab, error = %d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	rc = msm_cpe_afe_port_cntl(substream,
+				   cpe->core_handle,
+				   afe_ops, afe_cfg,
+				   AFE_CMD_PORT_STOP);
+
+	lsm_d->cpe_prepared = false;
+
+	rc = lsm_ops->lsm_close_tx(cpe->core_handle, session);
+	if (rc != 0) {
+		dev_err(rtd->dev,
+			"%s: lsm_close fail, err = %d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	lsm_ops->lsm_dealloc_session(cpe->core_handle, session);
+	runtime->private_data = NULL;
+	mutex_destroy(&lsm_d->lsm_api_lock);
+	kfree(lsm_d);
+
+	return rc;
+}
+
+static int msm_cpe_lsm_get_conf_levels(
+		struct cpe_lsm_session *session,
+		u8 *conf_levels_ptr)
+{
+	int rc = 0;
+
+	if (session->num_confidence_levels <= 0) {
+		pr_debug("%s: conf_levels (%u), skip set params\n",
+			 __func__,
+			session->num_confidence_levels);
+		goto done;
+	}
+
+	session->conf_levels = kzalloc(session->num_confidence_levels,
+				       GFP_KERNEL);
+	if (!session->conf_levels) {
+		pr_err("%s: No memory for confidence levels %u\n",
+			__func__, session->num_confidence_levels);
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	if (copy_from_user(session->conf_levels,
+			   conf_levels_ptr,
+			   session->num_confidence_levels)) {
+		pr_err("%s: copy_from_user failed for confidence levels %u\n",
+			__func__, session->num_confidence_levels);
+		kfree(session->conf_levels);
+		session->conf_levels = NULL;
+		rc = -EFAULT;
+		goto done;
+	}
+
+done:
+	return rc;
+}
+
+static int msm_cpe_lsm_validate_out_format(
+	struct snd_pcm_substream *substream,
+	struct snd_lsm_output_format_cfg *cfg)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	int rc = 0;
+
+	if (!cfg) {
+		dev_err(rtd->dev,
+			"%s: Invalid lsm out cfg\n", __func__);
+		rc = -EINVAL;
+		goto done;
+	}
+
+	if (cfg->format != LSM_OUT_FORMAT_PCM &&
+	    cfg->format != LSM_OUT_FORMAT_ADPCM) {
+		dev_err(rtd->dev,
+			"%s: Invalid format %u\n",
+			__func__, cfg->format);
+		rc = -EINVAL;
+		goto done;
+	}
+
+	if (cfg->packing != LSM_OUT_DATA_RAW &&
+	    cfg->packing != LSM_OUT_DATA_PACKED) {
+		dev_err(rtd->dev,
+			"%s: Invalid packing method %u\n",
+			__func__, cfg->packing);
+		rc = -EINVAL;
+		goto done;
+	}
+
+	if (cfg->events != LSM_OUT_DATA_EVENTS_DISABLED &&
+	    cfg->events != LSM_OUT_DATA_EVENTS_ENABLED) {
+		dev_err(rtd->dev,
+			"%s: Invalid events provided %u\n",
+			__func__, cfg->events);
+		rc = -EINVAL;
+		goto done;
+	}
+
+	if (cfg->mode != LSM_OUT_TRANSFER_MODE_RT &&
+	    cfg->mode != LSM_OUT_TRANSFER_MODE_FTRT) {
+		dev_err(rtd->dev,
+			"%s: Invalid transfer mode %u\n",
+			__func__, cfg->mode);
+		rc = -EINVAL;
+		goto done;
+	}
+
+done:
+	return rc;
+}
+
+/*
+ * msm_cpe_lsm_ioctl_shared: Shared IOCTL for this platform driver
+ * @substream: ASoC substream for which the operation is invoked
+ * @cmd: command for the ioctl
+ * @arg: argument for the ioctl
+ *
+ * Perform dedicated listen functions like register sound model,
+ * deregister sound model, etc
+ * Called with lsm_api_lock acquired.
+ */
+static int msm_cpe_lsm_ioctl_shared(struct snd_pcm_substream *substream,
+			 unsigned int cmd, void *arg)
+{
+	struct snd_lsm_sound_model_v2 snd_model;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct cpe_lsm_data *lsm_d = cpe_get_lsm_data(substream);
+	struct cpe_priv *cpe = cpe_get_private_data(substream);
+	struct cpe_lsm_session *session;
+	struct wcd_cpe_lsm_ops *lsm_ops;
+	struct cpe_lsm_lab *lab_d = &lsm_d->lab;
+	struct snd_dma_buffer *dma_buf = &substream->dma_buffer;
+	struct msm_slim_dma_data *dma_data = NULL;
+	struct snd_lsm_detection_params det_params;
+	int rc = 0;
+
+	if (!cpe || !cpe->core_handle) {
+		dev_err(rtd->dev,
+			"%s: Invalid private data\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (!lsm_d || !lsm_d->lsm_session) {
+		dev_err(rtd->dev,
+			"%s: Invalid session data\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	session = lsm_d->lsm_session;
+	lsm_ops = &cpe->lsm_ops;
+
+	switch (cmd) {
+	case SNDRV_LSM_STOP_LAB:
+		dev_dbg(rtd->dev,
+			"%s: %s, lab_enable = %d, lab_thread_ststus = %d\n",
+			__func__, "SNDRV_LSM_STOP_LAB",
+			session->lab_enable,
+			lab_d->thread_status);
+
+		if (session->lab_enable &&
+		    lab_d->thread_status != MSM_LSM_LAB_THREAD_STOP) {
+			atomic_inc(&lab_d->abort_read);
+			wake_up(&lab_d->period_wait);
+			rc = msm_cpe_lsm_lab_stop(substream);
+			if (rc) {
+				dev_err(rtd->dev,
+					"%s: stop LAB failed, error = %d\n",
+					__func__, rc);
+				return rc;
+			}
+		} else if (!session->lab_enable) {
+			dev_dbg(rtd->dev,
+				"%s: LAB already stopped\n",
+				__func__);
+		}
+
+		break;
+
+	case SNDRV_LSM_LAB_CONTROL:
+		if (copy_from_user(&session->lab_enable, (void *)arg,
+				   sizeof(u32))) {
+			dev_err(rtd->dev,
+				"%s: copy_from_user failed, size %zd\n",
+				__func__, sizeof(u32));
+			return -EFAULT;
+		}
+
+		dev_dbg(rtd->dev,
+			"%s: %s, lab_enable = %d\n",
+			__func__, "SNDRV_LSM_LAB_CONTROL",
+			session->lab_enable);
+		if (rtd->cpu_dai)
+			dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai,
+						substream);
+		if (!dma_data || !dma_data->dai_channel_ctl) {
+			dev_err(rtd->dev,
+				"%s: dma_data is not set\n", __func__);
+			return -EINVAL;
+		}
+
+		if (session->lab_enable) {
+			rc = msm_cpe_lab_buf_alloc(substream,
+						   session, dma_data);
+			if (IS_ERR_VALUE(rc)) {
+				dev_err(rtd->dev,
+					"%s: lab buffer alloc failed, err = %d\n",
+					__func__, rc);
+				return rc;
+			}
+
+			dma_buf->dev.type = SNDRV_DMA_TYPE_DEV;
+			dma_buf->dev.dev = substream->pcm->card->dev;
+			dma_buf->private_data = NULL;
+			dma_buf->area = lab_d->pcm_buf[0].mem;
+			dma_buf->addr =  lab_d->pcm_buf[0].phys;
+			dma_buf->bytes = (lsm_d->hw_params.buf_sz *
+					lsm_d->hw_params.period_count);
+			init_completion(&lab_d->thread_complete);
+			snd_pcm_set_runtime_buffer(substream,
+						   &substream->dma_buffer);
+			rc = lsm_ops->lsm_lab_control(cpe->core_handle,
+					session, true);
+			if (IS_ERR_VALUE(rc)) {
+				dev_err(rtd->dev,
+					"%s: Lab Enable Failed rc %d\n",
+					__func__, rc);
+				return rc;
+			}
+		} else {
+			/*
+			 * It is possible that lab is still enabled
+			 * when trying to de-allocate the lab buffer.
+			 * Make sure to disable lab before de-allocating
+			 * the lab buffer.
+			 */
+			rc = msm_cpe_lsm_lab_stop(substream);
+			if (IS_ERR_VALUE(rc)) {
+				dev_err(rtd->dev,
+					"%s: LAB stop failed, error = %d\n",
+					__func__, rc);
+				return rc;
+			}
+			/*
+			 * Buffer has to be de-allocated even if
+			 * lab_control failed.
+			 */
+			rc = msm_cpe_lab_buf_dealloc(substream,
+						     session, dma_data);
+			if (IS_ERR_VALUE(rc)) {
+				dev_err(rtd->dev,
+					"%s: lab buffer free failed, err = %d\n",
+					__func__, rc);
+				return rc;
+			}
+		}
+	break;
+	case SNDRV_LSM_REG_SND_MODEL_V2:
+		dev_dbg(rtd->dev,
+			"%s: %s\n",
+			__func__, "SNDRV_LSM_REG_SND_MODEL_V2");
+
+		memcpy(&snd_model, arg,
+			sizeof(struct snd_lsm_sound_model_v2));
+
+		session->num_confidence_levels =
+				snd_model.num_confidence_levels;
+		rc = msm_cpe_lsm_get_conf_levels(session,
+				snd_model.confidence_level);
+		if (rc) {
+			dev_err(rtd->dev,
+				"%s: %s get_conf_levels fail, err = %d\n",
+				__func__, "SNDRV_LSM_REG_SND_MODEL_V2",
+				rc);
+			break;
+		}
+
+		session->snd_model_data = kzalloc(snd_model.data_size,
+						  GFP_KERNEL);
+		if (!session->snd_model_data) {
+			dev_err(rtd->dev, "%s: No memory for sound model\n",
+				__func__);
+			kfree(session->conf_levels);
+			session->conf_levels = NULL;
+			return -ENOMEM;
+		}
+		session->snd_model_size = snd_model.data_size;
+
+		if (copy_from_user(session->snd_model_data,
+				   snd_model.data, snd_model.data_size)) {
+			dev_err(rtd->dev,
+				"%s: copy_from_user failed for snd_model\n",
+				__func__);
+			kfree(session->conf_levels);
+			kfree(session->snd_model_data);
+			session->conf_levels = NULL;
+			session->snd_model_data = NULL;
+			return -EFAULT;
+		}
+
+		rc = lsm_ops->lsm_shmem_alloc(cpe->core_handle, session,
+					       session->snd_model_size);
+		if (rc != 0) {
+			dev_err(rtd->dev,
+				"%s: shared memory allocation failed, err = %d\n",
+			       __func__, rc);
+			kfree(session->snd_model_data);
+			kfree(session->conf_levels);
+			session->snd_model_data = NULL;
+			session->conf_levels = NULL;
+			return rc;
+		}
+
+		rc = lsm_ops->lsm_register_snd_model(cpe->core_handle, session,
+						snd_model.detection_mode,
+						snd_model.detect_failure);
+		if (rc != 0) {
+			dev_err(rtd->dev,
+				"%s: snd_model_reg failed, err = %d\n",
+			       __func__, rc);
+			lsm_ops->lsm_shmem_dealloc(cpe->core_handle, session);
+			kfree(session->snd_model_data);
+			kfree(session->conf_levels);
+			session->snd_model_data = NULL;
+			session->conf_levels = NULL;
+			return rc;
+		}
+
+		break;
+
+	case SNDRV_LSM_DEREG_SND_MODEL:
+		dev_dbg(rtd->dev,
+			"%s: %s\n",
+			__func__, "SNDRV_LSM_DEREG_SND_MODEL");
+
+		if (session->lab_enable) {
+			/*
+			 * It is possible that lab is still enabled
+			 * when trying to deregister sound model.
+			 * Make sure to disable lab before de-allocating
+			 * the lab buffer.
+			 */
+			rc = msm_cpe_lsm_lab_stop(substream);
+			if (rc) {
+				dev_err(rtd->dev,
+					"%s: LAB stop failed, error = %d\n",
+					__func__, rc);
+				return rc;
+			}
+
+			rc = lsm_ops->lsm_lab_control(cpe->core_handle,
+					session, false);
+			if (rc)
+				dev_err(rtd->dev,
+					"%s: Lab Disable Failed rc %d\n",
+				       __func__, rc);
+
+			dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai,
+							substream);
+			if (!dma_data || !dma_data->dai_channel_ctl)
+				dev_err(rtd->dev,
+					"%s: dma_data is not set\n", __func__);
+
+			/*
+			 * Buffer has to be de-allocated even if
+			 * lab_control failed and/or dma data is invalid.
+			 */
+			rc = msm_cpe_lab_buf_dealloc(substream,
+						session, dma_data);
+			if (IS_ERR_VALUE(rc))
+				dev_err(rtd->dev,
+					"%s: lab buffer free failed, err = %d\n",
+					__func__, rc);
+		}
+
+		rc = lsm_ops->lsm_deregister_snd_model(
+				cpe->core_handle, session);
+		if (rc != 0) {
+			dev_err(rtd->dev,
+				"%s: snd_model de-reg failed, err = %d\n",
+				__func__, rc);
+			return rc;
+		}
+
+		kfree(session->snd_model_data);
+		kfree(session->conf_levels);
+		session->snd_model_data = NULL;
+		session->conf_levels = NULL;
+
+		rc = lsm_ops->lsm_shmem_dealloc(cpe->core_handle, session);
+		if (rc != 0) {
+			dev_err(rtd->dev,
+				"%s: LSM shared memory dealloc failed, err = %d\n",
+				__func__, rc);
+			return rc;
+		}
+
+		break;
+
+	case SNDRV_LSM_EVENT_STATUS:
+	case SNDRV_LSM_EVENT_STATUS_V3: {
+		struct snd_lsm_event_status *user;
+		struct snd_lsm_event_status_v3 *user_v3;
+
+		dev_dbg(rtd->dev,
+			"%s: %s\n",
+			__func__, "SNDRV_LSM_EVENT_STATUS(_V3)");
+		if (!arg) {
+			dev_err(rtd->dev,
+				"%s: Invalid argument to ioctl %s\n",
+				__func__,
+				"SNDRV_LSM_EVENT_STATUS(_V3)");
+			return -EINVAL;
+		}
+
+		/*
+		 * Release the api lock before wait to allow
+		 * other IOCTLs to be invoked while waiting
+		 * for event
+		 */
+		MSM_CPE_LSM_REL_LOCK(&lsm_d->lsm_api_lock,
+				     "lsm_api_lock");
+
+		rc = wait_event_freezable(lsm_d->event_wait,
+				(atomic_read(&lsm_d->event_avail) == 1) ||
+				(atomic_read(&lsm_d->event_stop) == 1));
+
+		MSM_CPE_LSM_GRAB_LOCK(&lsm_d->lsm_api_lock,
+				      "lsm_api_lock");
+
+		if (!rc) {
+			if (atomic_read(&lsm_d->event_avail) == 1) {
+				rc = 0;
+				atomic_set(&lsm_d->event_avail, 0);
+
+				if (cmd == SNDRV_LSM_EVENT_STATUS) {
+					user = arg;
+					if (lsm_d->ev_det_pld_size >
+						user->payload_size) {
+						dev_err(rtd->dev,
+							"%s: avail pld_bytes = %u, needed = %u\n",
+							__func__,
+							user->payload_size,
+							lsm_d->ev_det_pld_size);
+						return -EINVAL;
+					}
+
+					user->status = lsm_d->ev_det_status;
+					user->payload_size =
+							lsm_d->ev_det_pld_size;
+					memcpy(user->payload,
+					       lsm_d->ev_det_payload,
+					       lsm_d->ev_det_pld_size);
+				} else {
+					user_v3 = arg;
+					if (lsm_d->ev_det_pld_size >
+						user_v3->payload_size) {
+						dev_err(rtd->dev,
+							"%s: avail pld_bytes = %u, needed = %u\n",
+							__func__,
+							user_v3->payload_size,
+							lsm_d->ev_det_pld_size);
+						return -EINVAL;
+					}
+					/* event status timestamp not supported
+					 * on CPE mode. Set msw and lsw to 0.
+					 */
+					user_v3->timestamp_lsw = 0;
+					user_v3->timestamp_msw = 0;
+					user_v3->status = lsm_d->ev_det_status;
+					user_v3->payload_size =
+							lsm_d->ev_det_pld_size;
+					memcpy(user_v3->payload,
+					       lsm_d->ev_det_payload,
+					       lsm_d->ev_det_pld_size);
+				}
+			} else if (atomic_read(&lsm_d->event_stop) == 1) {
+				dev_dbg(rtd->dev,
+					"%s: wait_aborted\n", __func__);
+				if (cmd == SNDRV_LSM_EVENT_STATUS) {
+					user = arg;
+					user->payload_size = 0;
+				} else {
+					user_v3 = arg;
+					user_v3->payload_size = 0;
+				}
+				rc = 0;
+			}
+		}
+	}
+		break;
+
+	case SNDRV_LSM_ABORT_EVENT:
+		dev_dbg(rtd->dev,
+			"%s: %s\n",
+			__func__, "SNDRV_LSM_ABORT_EVENT");
+		atomic_set(&lsm_d->event_stop, 1);
+		wake_up(&lsm_d->event_wait);
+		break;
+
+	case SNDRV_LSM_START:
+		dev_dbg(rtd->dev,
+			"%s: %s\n",
+			__func__, "SNDRV_LSM_START");
+		rc = lsm_ops->lsm_start(cpe->core_handle, session);
+		if (rc != 0) {
+			dev_err(rtd->dev,
+				"%s: lsm_start fail, err = %d\n",
+				__func__, rc);
+			return rc;
+		}
+		session->started = true;
+		break;
+
+	case SNDRV_LSM_STOP:
+		dev_dbg(rtd->dev,
+			"%s: %s, lab_enable = %d, lab_thread_status = %d\n",
+			__func__, "SNDRV_LSM_STOP",
+			session->lab_enable,
+			lab_d->thread_status);
+		if ((session->lab_enable &&
+		     lab_d->thread_status ==
+		     MSM_LSM_LAB_THREAD_RUNNING)) {
+			/* Explicitly stop LAB */
+			rc = msm_cpe_lsm_lab_stop(substream);
+			if (rc) {
+				dev_err(rtd->dev,
+					"%s: lab_stop failed, err = %d\n",
+					__func__, rc);
+				return rc;
+			}
+		}
+
+		rc = lsm_ops->lsm_stop(cpe->core_handle, session);
+		if (rc != 0) {
+			dev_err(rtd->dev,
+				"%s: lsm_stop fail err = %d\n",
+				__func__, rc);
+
+			return rc;
+		}
+		session->started = false;
+		break;
+
+	case SNDRV_LSM_SET_PARAMS:
+		memcpy(&det_params, arg,
+			sizeof(det_params));
+		if (det_params.num_confidence_levels <= 0) {
+			dev_err(rtd->dev,
+				"%s: %s: Invalid confidence levels %u\n",
+				__func__, "SNDRV_LSM_SET_PARAMS",
+				det_params.num_confidence_levels);
+			return -EINVAL;
+		}
+
+		session->num_confidence_levels =
+				det_params.num_confidence_levels;
+		rc = msm_cpe_lsm_get_conf_levels(session,
+						det_params.conf_level);
+		if (rc) {
+			dev_err(rtd->dev,
+				"%s: %s get_conf_levels fail, err = %d\n",
+				__func__, "SNDRV_LSM_SET_PARAMS",
+				rc);
+			break;
+		}
+
+		rc = lsm_ops->lsm_set_data(cpe->core_handle, session,
+					   det_params.detect_mode,
+					   det_params.detect_failure);
+		if (rc) {
+			dev_err(rtd->dev,
+				"%s: lsm_set_data failed, err = %d\n",
+				__func__, rc);
+			return rc;
+		}
+
+		kfree(session->conf_levels);
+		session->conf_levels = NULL;
+
+		break;
+
+		case SNDRV_LSM_OUT_FORMAT_CFG: {
+			struct snd_lsm_output_format_cfg u_fmt_cfg;
+
+			if (!arg) {
+				dev_err(rtd->dev,
+					"%s: Invalid argument to ioctl %s\n",
+					__func__, "SNDRV_LSM_OUT_FORMAT_CFG");
+				return -EINVAL;
+			}
+
+			if (copy_from_user(&u_fmt_cfg, arg,
+					   sizeof(u_fmt_cfg))) {
+				dev_err(rtd->dev,
+					"%s: copy_from_user failed for out_fmt_cfg\n",
+					__func__);
+				return -EFAULT;
+			}
+
+			if (msm_cpe_lsm_validate_out_format(substream,
+							    &u_fmt_cfg))
+				return -EINVAL;
+
+			session->out_fmt_cfg.format = u_fmt_cfg.format;
+			session->out_fmt_cfg.pack_mode = u_fmt_cfg.packing;
+			session->out_fmt_cfg.data_path_events =
+						u_fmt_cfg.events;
+			session->out_fmt_cfg.transfer_mode = u_fmt_cfg.mode;
+
+			rc = lsm_ops->lsm_set_fmt_cfg(cpe->core_handle,
+						      session);
+			if (rc) {
+				dev_err(rtd->dev,
+					"%s: lsm_set_fmt_cfg failed, err = %d\n",
+					__func__, rc);
+				return rc;
+			}
+		}
+		break;
+
+	case SNDRV_LSM_SET_PORT: {
+		u32 port_id = cpe->input_port_id;
+
+		dev_dbg(rtd->dev, "%s: %s\n", __func__, "SNDRV_LSM_SET_PORT");
+		rc = lsm_ops->lsm_set_port(cpe->core_handle, session, &port_id);
+		if (rc) {
+			dev_err(rtd->dev,
+				"%s: lsm_set_port failed, err = %d\n",
+				__func__, rc);
+			return rc;
+		}
+	}
+	break;
+
+	default:
+		dev_dbg(rtd->dev,
+			"%s: Default snd_lib_ioctl cmd 0x%x\n",
+			__func__, cmd);
+		rc = snd_pcm_lib_ioctl(substream, cmd, arg);
+	}
+
+	return rc;
+}
+
+static int msm_cpe_lsm_lab_start(struct snd_pcm_substream *substream,
+		u16 event_det_status)
+{
+	struct snd_soc_pcm_runtime *rtd;
+	struct cpe_lsm_data *lsm_d = NULL;
+	struct cpe_priv *cpe = NULL;
+	struct cpe_lsm_session *session = NULL;
+	struct cpe_lsm_lab *lab_d = NULL;
+	struct cpe_hw_params *hw_params;
+	struct wcd_cpe_lsm_ops *lsm_ops;
+	struct wcd_cpe_afe_ops *afe_ops;
+	struct wcd_cpe_afe_port_cfg *out_port;
+	int rc;
+
+	if (!substream || !substream->private_data) {
+		pr_err("%s: invalid substream (%pK)\n",
+			__func__, substream);
+		return -EINVAL;
+	}
+
+	rtd = substream->private_data;
+	lsm_d = cpe_get_lsm_data(substream);
+	cpe = cpe_get_private_data(substream);
+
+	if (!cpe || !cpe->core_handle) {
+		dev_err(rtd->dev,
+			"%s: Invalid private data\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (!lsm_d || !lsm_d->lsm_session) {
+		dev_err(rtd->dev,
+			"%s: Invalid session data\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	session = lsm_d->lsm_session;
+	lsm_ops = &cpe->lsm_ops;
+	lab_d = &lsm_d->lab;
+	afe_ops = &cpe->afe_ops;
+	hw_params = &lsm_d->hw_params;
+
+	if (!session->started) {
+		dev_dbg(rtd->dev,
+			"%s: Session is stopped, cannot start LAB\n",
+			__func__);
+		return 0;
+	}
+
+	reinit_completion(&lab_d->thread_complete);
+
+	if (session->lab_enable &&
+	    event_det_status ==
+	    LSM_VOICE_WAKEUP_STATUS_DETECTED) {
+		out_port = &session->afe_out_port_cfg;
+		out_port->port_id = session->afe_out_port_id;
+		out_port->bit_width = hw_params->sample_size;
+		out_port->num_channels = hw_params->channels;
+		out_port->sample_rate = hw_params->sample_rate;
+		dev_dbg(rtd->dev, "%s: port_id= %u, bit_width= %u, rate= %u\n",
+			 __func__, out_port->port_id, out_port->bit_width,
+			out_port->sample_rate);
+
+		rc = afe_ops->afe_port_cmd_cfg(cpe->core_handle,
+					       out_port);
+		if (rc) {
+			dev_err(rtd->dev,
+				"%s: Failed afe generic config v2, err = %d\n",
+				__func__, rc);
+			return rc;
+		}
+
+		atomic_set(&lab_d->abort_read, 0);
+		dev_dbg(rtd->dev,
+			"%s: KW detected, scheduling LAB thread\n",
+			__func__);
+
+		/*
+		 * Even though thread might be only scheduled and
+		 * not currently running, mark the internal driver
+		 * status to running so driver can cancel this thread
+		 * if it needs to before the thread gets chance to run.
+		 */
+		lab_d->thread_status = MSM_LSM_LAB_THREAD_RUNNING;
+		session->lsm_lab_thread = kthread_run(
+				msm_cpe_lab_thread,
+				lsm_d,
+				"lab_thread");
+	}
+
+	return 0;
+}
+
+static bool msm_cpe_lsm_is_valid_stream(struct snd_pcm_substream *substream,
+		const char *func)
+{
+	struct snd_soc_pcm_runtime *rtd;
+	struct cpe_lsm_data *lsm_d = NULL;
+	struct cpe_priv *cpe = NULL;
+	struct cpe_lsm_session *session = NULL;
+	struct wcd_cpe_lsm_ops *lsm_ops;
+
+	if (!substream || !substream->private_data) {
+		pr_err("%s: invalid substream (%pK)\n",
+			func, substream);
+		return false;
+	}
+
+	rtd = substream->private_data;
+	lsm_d = cpe_get_lsm_data(substream);
+	cpe = cpe_get_private_data(substream);
+
+	if (!cpe || !cpe->core_handle) {
+		dev_err(rtd->dev,
+			"%s: Invalid private data\n",
+			func);
+		return false;
+	}
+
+	if (!lsm_d || !lsm_d->lsm_session) {
+		dev_err(rtd->dev,
+			"%s: Invalid session data\n",
+			func);
+		return false;
+	}
+
+	session = lsm_d->lsm_session;
+	lsm_ops = &cpe->lsm_ops;
+
+	if (!lsm_ops) {
+		dev_err(rtd->dev,
+			"%s: Invalid lsm_ops\n", func);
+		return false;
+	}
+
+	return true;
+}
+
+static int msm_cpe_lsm_set_epd(struct snd_pcm_substream *substream,
+		struct lsm_params_info *p_info)
+{
+	struct snd_soc_pcm_runtime *rtd;
+	struct cpe_lsm_data *lsm_d = NULL;
+	struct cpe_priv *cpe = NULL;
+	struct cpe_lsm_session *session = NULL;
+	struct wcd_cpe_lsm_ops *lsm_ops;
+	struct snd_lsm_ep_det_thres epd_thres;
+	int rc;
+
+	if (!msm_cpe_lsm_is_valid_stream(substream, __func__))
+		return -EINVAL;
+
+	rtd = substream->private_data;
+	lsm_d = cpe_get_lsm_data(substream);
+	cpe = cpe_get_private_data(substream);
+	session = lsm_d->lsm_session;
+	lsm_ops = &cpe->lsm_ops;
+
+	if (p_info->param_size != sizeof(epd_thres)) {
+		dev_err(rtd->dev,
+			"%s: Invalid param_size %d\n",
+			__func__, p_info->param_size);
+		rc = -EINVAL;
+		goto done;
+	}
+
+	if (copy_from_user(&epd_thres, p_info->param_data,
+			   p_info->param_size)) {
+		dev_err(rtd->dev,
+			"%s: copy_from_user failed, size = %d\n",
+			__func__, p_info->param_size);
+		rc = -EFAULT;
+		goto done;
+	}
+
+	rc = lsm_ops->lsm_set_one_param(cpe->core_handle,
+				session, p_info, &epd_thres,
+				LSM_ENDPOINT_DETECT_THRESHOLD);
+	if (unlikely(rc))
+		dev_err(rtd->dev,
+			"%s: set_one_param(epd_threshold) failed, rc %d\n",
+			__func__, rc);
+done:
+	return rc;
+}
+
+static int msm_cpe_lsm_set_mode(struct snd_pcm_substream *substream,
+		struct lsm_params_info *p_info)
+{
+	struct snd_soc_pcm_runtime *rtd;
+	struct cpe_lsm_data *lsm_d = NULL;
+	struct cpe_priv *cpe = NULL;
+	struct cpe_lsm_session *session = NULL;
+	struct wcd_cpe_lsm_ops *lsm_ops;
+	struct snd_lsm_detect_mode det_mode;
+	int rc;
+
+	if (!msm_cpe_lsm_is_valid_stream(substream, __func__))
+		return -EINVAL;
+
+	rtd = substream->private_data;
+	lsm_d = cpe_get_lsm_data(substream);
+	cpe = cpe_get_private_data(substream);
+	session = lsm_d->lsm_session;
+	lsm_ops = &cpe->lsm_ops;
+
+	if (p_info->param_size != sizeof(det_mode)) {
+		dev_err(rtd->dev,
+			"%s: Invalid param_size %d\n",
+			__func__, p_info->param_size);
+		rc = -EINVAL;
+		goto done;
+	}
+
+	if (copy_from_user(&det_mode, p_info->param_data,
+			   p_info->param_size)) {
+		dev_err(rtd->dev,
+			"%s: copy_from_user failed, size = %d\n",
+			__func__, p_info->param_size);
+		rc = -EFAULT;
+		goto done;
+	}
+
+	rc = lsm_ops->lsm_set_one_param(cpe->core_handle,
+				session, p_info, &det_mode,
+				LSM_OPERATION_MODE);
+	if (unlikely(rc))
+		dev_err(rtd->dev,
+			"%s: set_one_param(epd_threshold) failed, rc %d\n",
+			__func__, rc);
+done:
+	return rc;
+}
+
+static int msm_cpe_lsm_set_gain(struct snd_pcm_substream *substream,
+		struct lsm_params_info *p_info)
+{
+	struct snd_soc_pcm_runtime *rtd;
+	struct cpe_lsm_data *lsm_d = NULL;
+	struct cpe_priv *cpe = NULL;
+	struct cpe_lsm_session *session = NULL;
+	struct wcd_cpe_lsm_ops *lsm_ops;
+	struct snd_lsm_gain gain;
+	int rc;
+
+	if (!msm_cpe_lsm_is_valid_stream(substream, __func__))
+		return -EINVAL;
+
+	rtd = substream->private_data;
+	lsm_d = cpe_get_lsm_data(substream);
+	cpe = cpe_get_private_data(substream);
+	session = lsm_d->lsm_session;
+	lsm_ops = &cpe->lsm_ops;
+
+	if (p_info->param_size != sizeof(gain)) {
+		dev_err(rtd->dev,
+			"%s: Invalid param_size %d\n",
+			__func__, p_info->param_size);
+		rc = -EINVAL;
+		goto done;
+	}
+
+	if (copy_from_user(&gain, p_info->param_data,
+			   p_info->param_size)) {
+		dev_err(rtd->dev,
+			"%s: copy_from_user failed, size = %d\n",
+			__func__, p_info->param_size);
+		rc = -EFAULT;
+		goto done;
+	}
+
+	rc = lsm_ops->lsm_set_one_param(cpe->core_handle,
+				session, p_info, &gain,
+				LSM_GAIN);
+	if (unlikely(rc))
+		dev_err(rtd->dev,
+			"%s: set_one_param(epd_threshold) failed, rc %d\n",
+			__func__, rc);
+done:
+	return rc;
+
+}
+
+static int msm_cpe_lsm_set_conf(struct snd_pcm_substream *substream,
+		struct lsm_params_info *p_info)
+{
+	struct snd_soc_pcm_runtime *rtd;
+	struct cpe_lsm_data *lsm_d = NULL;
+	struct cpe_priv *cpe = NULL;
+	struct cpe_lsm_session *session = NULL;
+	struct wcd_cpe_lsm_ops *lsm_ops;
+	int rc;
+
+	if (!msm_cpe_lsm_is_valid_stream(substream, __func__))
+		return -EINVAL;
+
+	rtd = substream->private_data;
+	lsm_d = cpe_get_lsm_data(substream);
+	cpe = cpe_get_private_data(substream);
+	session = lsm_d->lsm_session;
+	lsm_ops = &cpe->lsm_ops;
+
+	session->num_confidence_levels =
+			p_info->param_size;
+	rc = msm_cpe_lsm_get_conf_levels(session,
+			p_info->param_data);
+	if (rc) {
+		dev_err(rtd->dev,
+			"%s: get_conf_levels failed, err = %d\n",
+			__func__, rc);
+		goto done;
+	}
+
+	rc = lsm_ops->lsm_set_one_param(cpe->core_handle,
+				session, p_info, NULL,
+				LSM_MIN_CONFIDENCE_LEVELS);
+	if (unlikely(rc))
+		dev_err(rtd->dev,
+			"%s: set_one_param(conf_levels) failed, rc %d\n",
+			__func__, rc);
+done:
+	return rc;
+}
+
+static int msm_cpe_lsm_reg_model(struct snd_pcm_substream *substream,
+		struct lsm_params_info *p_info)
+{
+	struct snd_soc_pcm_runtime *rtd;
+	struct cpe_lsm_data *lsm_d = NULL;
+	struct cpe_priv *cpe = NULL;
+	struct cpe_lsm_session *session = NULL;
+	struct wcd_cpe_lsm_ops *lsm_ops;
+	int rc;
+	size_t offset;
+	u8 *snd_model_ptr;
+
+	if (!msm_cpe_lsm_is_valid_stream(substream, __func__))
+		return -EINVAL;
+
+	rtd = substream->private_data;
+	lsm_d = cpe_get_lsm_data(substream);
+	cpe = cpe_get_private_data(substream);
+	session = lsm_d->lsm_session;
+	lsm_ops = &cpe->lsm_ops;
+
+	lsm_ops->lsm_get_snd_model_offset(cpe->core_handle,
+			session, &offset);
+	/* Check if 'p_info->param_size + offset' crosses U32_MAX. */
+	if (p_info->param_size > U32_MAX - offset) {
+		dev_err(rtd->dev,
+			"%s: Invalid param_size %d\n",
+			__func__, p_info->param_size);
+		return -EINVAL;
+	}
+	session->snd_model_size = p_info->param_size + offset;
+
+	session->snd_model_data = vzalloc(session->snd_model_size);
+	if (!session->snd_model_data)
+			return -ENOMEM;
+	snd_model_ptr = ((u8 *) session->snd_model_data) + offset;
+
+	if (copy_from_user(snd_model_ptr,
+			   p_info->param_data, p_info->param_size)) {
+		dev_err(rtd->dev,
+			"%s: copy_from_user for snd_model failed\n",
+			__func__);
+		rc = -EFAULT;
+		goto free_snd_model_data;
+	}
+
+	rc = lsm_ops->lsm_shmem_alloc(cpe->core_handle, session,
+				      session->snd_model_size);
+	if (rc != 0) {
+		dev_err(rtd->dev,
+			"%s: shared memory allocation failed, err = %d\n",
+		       __func__, rc);
+		rc = -EINVAL;
+		goto free_snd_model_data;
+	}
+
+	rc = lsm_ops->lsm_set_one_param(cpe->core_handle,
+				session, p_info, NULL,
+				LSM_REG_SND_MODEL);
+	if (unlikely(rc)) {
+		dev_err(rtd->dev,
+			"%s: set_one_param(snd_model) failed, rc %d\n",
+			__func__, rc);
+		goto dealloc_shmem;
+	}
+	return 0;
+
+dealloc_shmem:
+	lsm_ops->lsm_shmem_dealloc(cpe->core_handle, session);
+
+free_snd_model_data:
+	vfree(session->snd_model_data);
+	return rc;
+}
+
+static int msm_cpe_lsm_dereg_model(struct snd_pcm_substream *substream,
+		struct lsm_params_info *p_info)
+{
+	struct snd_soc_pcm_runtime *rtd;
+	struct cpe_lsm_data *lsm_d = NULL;
+	struct cpe_priv *cpe = NULL;
+	struct cpe_lsm_session *session = NULL;
+	struct wcd_cpe_lsm_ops *lsm_ops;
+	int rc;
+
+	if (!msm_cpe_lsm_is_valid_stream(substream, __func__))
+		return -EINVAL;
+
+	rtd = substream->private_data;
+	lsm_d = cpe_get_lsm_data(substream);
+	cpe = cpe_get_private_data(substream);
+	session = lsm_d->lsm_session;
+	lsm_ops = &cpe->lsm_ops;
+
+	rc = lsm_ops->lsm_set_one_param(cpe->core_handle,
+				session, p_info, NULL,
+				LSM_DEREG_SND_MODEL);
+	if (rc)
+		dev_err(rtd->dev,
+			"%s: dereg_snd_model failed\n",
+			__func__);
+	return lsm_ops->lsm_shmem_dealloc(cpe->core_handle, session);
+}
+
+static int msm_cpe_lsm_set_custom(struct snd_pcm_substream *substream,
+		struct lsm_params_info *p_info)
+{
+	struct snd_soc_pcm_runtime *rtd;
+	struct cpe_lsm_data *lsm_d = NULL;
+	struct cpe_priv *cpe = NULL;
+	struct cpe_lsm_session *session = NULL;
+	struct wcd_cpe_lsm_ops *lsm_ops;
+	u8 *data;
+	int rc;
+
+	if (!msm_cpe_lsm_is_valid_stream(substream, __func__))
+		return -EINVAL;
+
+	rtd = substream->private_data;
+	lsm_d = cpe_get_lsm_data(substream);
+	cpe = cpe_get_private_data(substream);
+	session = lsm_d->lsm_session;
+	lsm_ops = &cpe->lsm_ops;
+
+	if (p_info->param_size > MSM_CPE_MAX_CUSTOM_PARAM_SIZE) {
+		dev_err(rtd->dev,
+			"%s: invalid size %d, max allowed %d\n",
+			__func__, p_info->param_size,
+			MSM_CPE_MAX_CUSTOM_PARAM_SIZE);
+		return -EINVAL;
+	}
+
+	data = kzalloc(p_info->param_size, GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	if (copy_from_user(data, p_info->param_data,
+			   p_info->param_size)) {
+		dev_err(rtd->dev,
+			"%s: copy_from_user failed for custom params, size = %d\n",
+			__func__, p_info->param_size);
+		rc = -EFAULT;
+		goto err_ret;
+	}
+
+	rc = lsm_ops->lsm_set_one_param(cpe->core_handle,
+				session, p_info, data,
+				LSM_CUSTOM_PARAMS);
+	if (rc)
+		dev_err(rtd->dev,
+			"%s: custom_params failed, err = %d\n",
+			__func__, rc);
+err_ret:
+	kfree(data);
+	return rc;
+}
+
+static int msm_cpe_lsm_process_params(struct snd_pcm_substream *substream,
+		struct snd_lsm_module_params *p_data,
+		void *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct lsm_params_info *p_info;
+	int i;
+	int rc = 0;
+
+	p_info = (struct lsm_params_info *) params;
+
+	for (i = 0; i < p_data->num_params; i++) {
+		dev_dbg(rtd->dev,
+			"%s: param (%d), module_id = 0x%x, param_id = 0x%x, param_size = 0x%x, param_type = 0x%x\n",
+			__func__, i, p_info->module_id,
+			p_info->param_id, p_info->param_size,
+			p_info->param_type);
+
+		switch (p_info->param_type) {
+		case LSM_ENDPOINT_DETECT_THRESHOLD:
+			rc = msm_cpe_lsm_set_epd(substream, p_info);
+			break;
+		case LSM_OPERATION_MODE:
+			rc = msm_cpe_lsm_set_mode(substream, p_info);
+			break;
+		case LSM_GAIN:
+			rc = msm_cpe_lsm_set_gain(substream, p_info);
+			break;
+		case LSM_MIN_CONFIDENCE_LEVELS:
+			rc = msm_cpe_lsm_set_conf(substream, p_info);
+			break;
+		case LSM_REG_SND_MODEL:
+			rc = msm_cpe_lsm_reg_model(substream, p_info);
+			break;
+		case LSM_DEREG_SND_MODEL:
+			rc = msm_cpe_lsm_dereg_model(substream, p_info);
+			break;
+		case LSM_CUSTOM_PARAMS:
+			rc = msm_cpe_lsm_set_custom(substream, p_info);
+			break;
+		default:
+			dev_err(rtd->dev,
+				"%s: Invalid param_type %d\n",
+				__func__, p_info->param_type);
+			rc = -EINVAL;
+			break;
+		}
+		if (rc) {
+			pr_err("%s: set_param fail for param_type %d\n",
+				__func__, p_info->param_type);
+			return rc;
+		}
+
+		p_info++;
+	}
+
+	return rc;
+}
+
+static int msm_cpe_lsm_ioctl(struct snd_pcm_substream *substream,
+			 unsigned int cmd, void *arg)
+{
+	int err = 0;
+	struct snd_soc_pcm_runtime *rtd;
+	struct cpe_priv *cpe = NULL;
+	struct cpe_lsm_data *lsm_d = NULL;
+	struct cpe_lsm_session *session = NULL;
+	struct wcd_cpe_lsm_ops *lsm_ops;
+
+	if (!substream || !substream->private_data) {
+		pr_err("%s: invalid substream (%pK)\n",
+			__func__, substream);
+		return -EINVAL;
+	}
+
+	rtd = substream->private_data;
+	lsm_d = cpe_get_lsm_data(substream);
+	cpe = cpe_get_private_data(substream);
+
+	if (!cpe || !cpe->core_handle) {
+		dev_err(rtd->dev,
+			"%s: Invalid private data\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (!lsm_d || !lsm_d->lsm_session) {
+		dev_err(rtd->dev,
+			"%s: Invalid session data\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	MSM_CPE_LSM_GRAB_LOCK(&lsm_d->lsm_api_lock,
+			      "lsm_api_lock");
+
+	session = lsm_d->lsm_session;
+	lsm_ops = &cpe->lsm_ops;
+
+	switch (cmd) {
+	case SNDRV_LSM_REG_SND_MODEL_V2: {
+		struct snd_lsm_sound_model_v2 snd_model;
+
+		if (session->is_topology_used) {
+			dev_err(rtd->dev,
+				"%s: %s: not supported if using topology\n",
+				__func__, "LSM_REG_SND_MODEL_V2");
+			err = -EINVAL;
+			goto done;
+		}
+
+		if (copy_from_user(&snd_model, (void *)arg,
+				   sizeof(struct snd_lsm_sound_model_v2))) {
+			dev_err(rtd->dev,
+				"%s: copy from user failed, size %zd\n",
+				__func__,
+				sizeof(struct snd_lsm_sound_model_v2));
+			err = -EFAULT;
+			goto done;
+		}
+
+		err = msm_cpe_lsm_ioctl_shared(substream, cmd,
+					       &snd_model);
+	}
+		break;
+	case SNDRV_LSM_EVENT_STATUS: {
+		struct snd_lsm_event_status u_event_status;
+		struct snd_lsm_event_status *event_status = NULL;
+		int u_pld_size = 0;
+
+		if (copy_from_user(&u_event_status, (void *)arg,
+				   sizeof(struct snd_lsm_event_status))) {
+			dev_err(rtd->dev,
+				"%s: event status copy from user failed, size %zd\n",
+				__func__,
+				sizeof(struct snd_lsm_event_status));
+			err = -EFAULT;
+			goto done;
+		}
+
+		if (u_event_status.payload_size >
+		    LISTEN_MAX_STATUS_PAYLOAD_SIZE) {
+			dev_err(rtd->dev,
+				"%s: payload_size %d is invalid, max allowed = %d\n",
+				__func__, u_event_status.payload_size,
+				LISTEN_MAX_STATUS_PAYLOAD_SIZE);
+			err = -EINVAL;
+			goto done;
+		}
+
+		u_pld_size = sizeof(struct snd_lsm_event_status) +
+				u_event_status.payload_size;
+
+		event_status = kzalloc(u_pld_size, GFP_KERNEL);
+		if (!event_status) {
+			dev_err(rtd->dev,
+				"%s: No memory for event status\n",
+				__func__);
+			err = -ENOMEM;
+			goto done;
+		} else {
+			event_status->payload_size =
+				u_event_status.payload_size;
+			err = msm_cpe_lsm_ioctl_shared(substream,
+						       cmd, event_status);
+		}
+
+		if (!err  && copy_to_user(arg, event_status, u_pld_size)) {
+			dev_err(rtd->dev,
+				"%s: copy to user failed\n",
+				__func__);
+			kfree(event_status);
+			err = -EFAULT;
+			goto done;
+		}
+
+		msm_cpe_lsm_lab_start(substream, event_status->status);
+		msm_cpe_process_event_status_done(lsm_d);
+		kfree(event_status);
+	}
+		break;
+	case SNDRV_LSM_EVENT_STATUS_V3: {
+		struct snd_lsm_event_status_v3 u_event_status;
+		struct snd_lsm_event_status_v3 *event_status = NULL;
+		int u_pld_size = 0;
+
+		if (copy_from_user(&u_event_status, (void *)arg,
+				   sizeof(struct snd_lsm_event_status_v3))) {
+			dev_err(rtd->dev,
+				"%s: event status copy from user failed, size %zd\n",
+				__func__,
+				sizeof(struct snd_lsm_event_status_v3));
+			err = -EFAULT;
+			goto done;
+		}
+
+		if (u_event_status.payload_size >
+		    LISTEN_MAX_STATUS_PAYLOAD_SIZE) {
+			dev_err(rtd->dev,
+				"%s: payload_size %d is invalid, max allowed = %d\n",
+				__func__, u_event_status.payload_size,
+				LISTEN_MAX_STATUS_PAYLOAD_SIZE);
+			err = -EINVAL;
+			goto done;
+		}
+
+		u_pld_size = sizeof(struct snd_lsm_event_status_v3) +
+				u_event_status.payload_size;
+
+		event_status = kzalloc(u_pld_size, GFP_KERNEL);
+		if (!event_status) {
+			err = -ENOMEM;
+			goto done;
+		} else {
+			event_status->payload_size =
+				u_event_status.payload_size;
+			err = msm_cpe_lsm_ioctl_shared(substream,
+						       cmd, event_status);
+		}
+
+		if (!err  && copy_to_user(arg, event_status, u_pld_size)) {
+			dev_err(rtd->dev,
+				"%s: copy to user failed\n",
+				__func__);
+			kfree(event_status);
+			err = -EFAULT;
+			goto done;
+		}
+
+		msm_cpe_lsm_lab_start(substream, event_status->status);
+		msm_cpe_process_event_status_done(lsm_d);
+		kfree(event_status);
+	}
+		break;
+	case SNDRV_LSM_SET_PARAMS: {
+		struct snd_lsm_detection_params det_params;
+
+		if (session->is_topology_used) {
+			dev_err(rtd->dev,
+				"%s: %s: not supported if using topology\n",
+				__func__, "SNDRV_LSM_SET_PARAMS");
+			err = -EINVAL;
+			goto done;
+		}
+
+		if (copy_from_user(&det_params, (void *) arg,
+				   sizeof(det_params))) {
+			dev_err(rtd->dev,
+				"%s: %s: copy_from_user failed, size = %zd\n",
+				__func__, "SNDRV_LSM_SET_PARAMS",
+				sizeof(det_params));
+			err = -EFAULT;
+			goto done;
+		}
+
+		err = msm_cpe_lsm_ioctl_shared(substream, cmd,
+					       &det_params);
+	}
+		break;
+
+	case SNDRV_LSM_SET_MODULE_PARAMS: {
+		struct snd_lsm_module_params p_data;
+		size_t p_size;
+		u8 *params;
+
+		if (!session->is_topology_used) {
+			dev_err(rtd->dev,
+				"%s: %s: not supported if not using topology\n",
+				__func__, "SET_MODULE_PARAMS");
+			err = -EINVAL;
+			goto done;
+		}
+
+		if (!arg) {
+			dev_err(rtd->dev,
+				"%s: %s: No Param data to set\n",
+				__func__, "SET_MODULE_PARAMS");
+			err = -EINVAL;
+			goto done;
+		}
+
+		if (copy_from_user(&p_data, arg,
+				   sizeof(p_data))) {
+			dev_err(rtd->dev,
+				"%s: %s: copy_from_user failed, size = %zd\n",
+				__func__, "p_data", sizeof(p_data));
+			err = -EFAULT;
+			goto done;
+		}
+
+		if (p_data.num_params > LSM_PARAMS_MAX) {
+			dev_err(rtd->dev,
+				"%s: %s: Invalid num_params %d\n",
+				__func__, "SET_MODULE_PARAMS",
+				p_data.num_params);
+			err = -EINVAL;
+			goto done;
+		}
+
+		p_size = p_data.num_params *
+			 sizeof(struct lsm_params_info);
+
+		if (p_data.data_size != p_size) {
+			dev_err(rtd->dev,
+				"%s: %s: Invalid size %zd\n",
+				__func__, "SET_MODULE_PARAMS", p_size);
+
+			err = -EFAULT;
+			goto done;
+		}
+
+		params = kzalloc(p_size, GFP_KERNEL);
+		if (!params) {
+			err = -ENOMEM;
+			goto done;
+		}
+
+		if (copy_from_user(params, p_data.params,
+				   p_data.data_size)) {
+			dev_err(rtd->dev,
+				"%s: %s: copy_from_user failed, size = %d\n",
+				__func__, "params", p_data.data_size);
+			kfree(params);
+			err = -EFAULT;
+			goto done;
+		}
+
+		err = msm_cpe_lsm_process_params(substream, &p_data, params);
+		if (err)
+			dev_err(rtd->dev,
+				"%s: %s: Failed to set params, err = %d\n",
+				__func__, "SET_MODULE_PARAMS", err);
+		kfree(params);
+		break;
+	}
+	default:
+		err = msm_cpe_lsm_ioctl_shared(substream, cmd, arg);
+		break;
+	}
+
+done:
+	MSM_CPE_LSM_REL_LOCK(&lsm_d->lsm_api_lock,
+			     "lsm_api_lock");
+	return err;
+}
+
+#ifdef CONFIG_COMPAT
+struct snd_lsm_sound_model_v2_32 {
+	compat_uptr_t data;
+	compat_uptr_t confidence_level;
+	u32 data_size;
+	enum lsm_detection_mode detection_mode;
+	u8 num_confidence_levels;
+	bool detect_failure;
+};
+
+struct snd_lsm_detection_params_32 {
+	compat_uptr_t conf_level;
+	enum lsm_detection_mode detect_mode;
+	u8 num_confidence_levels;
+	bool detect_failure;
+};
+
+struct lsm_params_info_32 {
+	u32 module_id;
+	u32 param_id;
+	u32 param_size;
+	compat_uptr_t param_data;
+	uint32_t param_type;
+};
+
+struct snd_lsm_module_params_32 {
+	compat_uptr_t params;
+	u32 num_params;
+	u32 data_size;
+};
+
+enum {
+	SNDRV_LSM_REG_SND_MODEL_V2_32 =
+		_IOW('U', 0x07, struct snd_lsm_sound_model_v2_32),
+	SNDRV_LSM_SET_PARAMS32 =
+		_IOW('U', 0x0A, struct snd_lsm_detection_params_32),
+	SNDRV_LSM_SET_MODULE_PARAMS_32 =
+		_IOW('U', 0x0B, struct snd_lsm_module_params_32),
+};
+
+static int msm_cpe_lsm_ioctl_compat(struct snd_pcm_substream *substream,
+			 unsigned int cmd, void *arg)
+{
+	int err = 0;
+	struct snd_soc_pcm_runtime *rtd;
+	struct cpe_priv *cpe = NULL;
+	struct cpe_lsm_data *lsm_d = NULL;
+	struct cpe_lsm_session *session = NULL;
+	struct wcd_cpe_lsm_ops *lsm_ops;
+
+	if (!substream || !substream->private_data) {
+		pr_err("%s: invalid substream (%pK)\n",
+			__func__, substream);
+		return -EINVAL;
+	}
+
+	rtd = substream->private_data;
+	lsm_d = cpe_get_lsm_data(substream);
+	cpe = cpe_get_private_data(substream);
+
+	if (!cpe || !cpe->core_handle) {
+		dev_err(rtd->dev,
+			"%s: Invalid private data\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (!lsm_d || !lsm_d->lsm_session) {
+		dev_err(rtd->dev,
+			"%s: Invalid session data\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	MSM_CPE_LSM_GRAB_LOCK(&lsm_d->lsm_api_lock,
+			      "lsm_api_lock");
+
+	session = lsm_d->lsm_session;
+	lsm_ops = &cpe->lsm_ops;
+
+	switch (cmd) {
+	case SNDRV_LSM_REG_SND_MODEL_V2_32: {
+		struct snd_lsm_sound_model_v2 snd_model;
+		struct snd_lsm_sound_model_v2_32 snd_model32;
+
+		if (session->is_topology_used) {
+			dev_err(rtd->dev,
+				"%s: %s: not supported if using topology\n",
+				__func__, "LSM_REG_SND_MODEL_V2_32");
+			err = -EINVAL;
+			goto done;
+		}
+
+		dev_dbg(rtd->dev,
+			"%s: ioctl %s\n", __func__,
+			"SNDRV_LSM_REG_SND_MODEL_V2_32");
+
+		if (copy_from_user(&snd_model32, (void *)arg,
+				   sizeof(snd_model32))) {
+			dev_err(rtd->dev,
+				"%s: copy from user failed, size %zd\n",
+				__func__,
+				sizeof(snd_model32));
+			err = -EFAULT;
+			goto done;
+		}
+
+		snd_model.data = compat_ptr(snd_model32.data);
+		snd_model.confidence_level =
+			compat_ptr(snd_model32.confidence_level);
+		snd_model.data_size = snd_model32.data_size;
+		snd_model.detect_failure = snd_model32.detect_failure;
+		snd_model.num_confidence_levels =
+			snd_model32.num_confidence_levels;
+		snd_model.detection_mode = snd_model32.detection_mode;
+
+		cmd = SNDRV_LSM_REG_SND_MODEL_V2;
+		err = msm_cpe_lsm_ioctl_shared(substream, cmd, &snd_model);
+		if (err)
+			dev_err(rtd->dev,
+				"%s: %s failed, error = %d\n",
+				__func__,
+				"SNDRV_LSM_REG_SND_MODEL_V2_32",
+				err);
+	}
+		break;
+	case SNDRV_LSM_EVENT_STATUS: {
+		struct snd_lsm_event_status *event_status = NULL;
+		struct snd_lsm_event_status u_event_status32;
+		struct snd_lsm_event_status *udata_32 = NULL;
+		int u_pld_size = 0;
+
+		dev_dbg(rtd->dev,
+			"%s: ioctl %s\n", __func__,
+			"SNDRV_LSM_EVENT_STATUS32");
+
+		if (copy_from_user(&u_event_status32, (void *)arg,
+				   sizeof(struct snd_lsm_event_status))) {
+			dev_err(rtd->dev,
+				"%s: event status copy from user failed, size %zd\n",
+				__func__,
+				sizeof(struct snd_lsm_event_status));
+			err = -EFAULT;
+			goto done;
+		}
+
+		if (u_event_status32.payload_size >
+		   LISTEN_MAX_STATUS_PAYLOAD_SIZE) {
+			dev_err(rtd->dev,
+				"%s: payload_size %d is invalid, max allowed = %d\n",
+				__func__, u_event_status32.payload_size,
+				LISTEN_MAX_STATUS_PAYLOAD_SIZE);
+			err = -EINVAL;
+			goto done;
+		}
+
+		u_pld_size = sizeof(struct snd_lsm_event_status) +
+				u_event_status32.payload_size;
+		event_status = kzalloc(u_pld_size, GFP_KERNEL);
+		if (!event_status) {
+			dev_err(rtd->dev,
+				"%s: No memory for event status\n",
+				__func__);
+			err = -ENOMEM;
+			goto done;
+		} else {
+			event_status->payload_size =
+				u_event_status32.payload_size;
+			err = msm_cpe_lsm_ioctl_shared(substream,
+						       cmd, event_status);
+			if (err)
+				dev_err(rtd->dev,
+					"%s: %s failed, error = %d\n",
+					__func__,
+					"SNDRV_LSM_EVENT_STATUS32",
+					err);
+		}
+
+		if (!err) {
+			udata_32 = kzalloc(u_pld_size, GFP_KERNEL);
+			if (!udata_32) {
+				dev_err(rtd->dev,
+					"%s: nomem for udata\n",
+					__func__);
+				err = -EFAULT;
+			} else {
+				udata_32->status = event_status->status;
+				udata_32->payload_size =
+					event_status->payload_size;
+				memcpy(udata_32->payload,
+				       event_status->payload,
+				       u_pld_size);
+			}
+		}
+
+		if (!err  && copy_to_user(arg, udata_32,
+					  u_pld_size)) {
+			dev_err(rtd->dev,
+				"%s: copy to user failed\n",
+				__func__);
+			kfree(event_status);
+			kfree(udata_32);
+			err = -EFAULT;
+			goto done;
+		}
+
+		msm_cpe_lsm_lab_start(substream, event_status->status);
+		msm_cpe_process_event_status_done(lsm_d);
+		kfree(event_status);
+		kfree(udata_32);
+	}
+		break;
+	case SNDRV_LSM_EVENT_STATUS_V3: {
+		struct snd_lsm_event_status_v3 *event_status = NULL;
+		struct snd_lsm_event_status_v3 u_event_status32;
+		struct snd_lsm_event_status_v3 *udata_32 = NULL;
+		int u_pld_size = 0;
+
+		dev_dbg(rtd->dev,
+			"%s: ioctl %s\n", __func__,
+			"SNDRV_LSM_EVENT_STATUS_V3_32");
+
+		if (copy_from_user(&u_event_status32, (void *)arg,
+				   sizeof(struct snd_lsm_event_status_v3))) {
+			dev_err(rtd->dev,
+				"%s: event status copy from user failed, size %zd\n",
+				__func__,
+				sizeof(struct snd_lsm_event_status_v3));
+			err = -EFAULT;
+			goto done;
+		}
+
+		if (u_event_status32.payload_size >
+		   LISTEN_MAX_STATUS_PAYLOAD_SIZE) {
+			dev_err(rtd->dev,
+				"%s: payload_size %d is invalid, max allowed = %d\n",
+				__func__, u_event_status32.payload_size,
+				LISTEN_MAX_STATUS_PAYLOAD_SIZE);
+			err = -EINVAL;
+			goto done;
+		}
+
+		u_pld_size = sizeof(struct snd_lsm_event_status_v3) +
+				u_event_status32.payload_size;
+		event_status = kzalloc(u_pld_size, GFP_KERNEL);
+		if (!event_status) {
+			dev_err(rtd->dev,
+				"%s: No memory for event status\n",
+				__func__);
+			err = -ENOMEM;
+			goto done;
+		} else {
+			event_status->payload_size =
+				u_event_status32.payload_size;
+			err = msm_cpe_lsm_ioctl_shared(substream,
+						       cmd, event_status);
+			if (err)
+				dev_err(rtd->dev,
+					"%s: %s failed, error = %d\n",
+					__func__,
+					"SNDRV_LSM_EVENT_STATUS_V3_32",
+					err);
+		}
+
+		if (!err) {
+			udata_32 = kzalloc(u_pld_size, GFP_KERNEL);
+			if (!udata_32) {
+				dev_err(rtd->dev,
+					"%s: nomem for udata\n",
+					__func__);
+				err = -EFAULT;
+			} else {
+				udata_32->timestamp_lsw =
+					event_status->timestamp_lsw;
+				udata_32->timestamp_msw =
+					event_status->timestamp_msw;
+				udata_32->status = event_status->status;
+				udata_32->payload_size =
+					event_status->payload_size;
+				memcpy(udata_32->payload,
+				       event_status->payload,
+				       event_status->payload_size);
+			}
+		}
+
+		if (!err  && copy_to_user(arg, udata_32,
+					  u_pld_size)) {
+			dev_err(rtd->dev,
+				"%s: copy to user failed\n",
+				__func__);
+			kfree(event_status);
+			kfree(udata_32);
+			err = -EFAULT;
+			goto done;
+		}
+
+		msm_cpe_lsm_lab_start(substream, event_status->status);
+		msm_cpe_process_event_status_done(lsm_d);
+		kfree(event_status);
+		kfree(udata_32);
+	}
+		break;
+	case SNDRV_LSM_SET_PARAMS32: {
+		struct snd_lsm_detection_params_32 det_params32;
+		struct snd_lsm_detection_params det_params;
+
+		if (session->is_topology_used) {
+			dev_err(rtd->dev,
+				"%s: %s: not supported if using topology\n",
+				__func__, "SNDRV_LSM_SET_PARAMS32");
+
+			err = -EINVAL;
+			goto done;
+		}
+
+		if (copy_from_user(&det_params32, arg,
+				   sizeof(det_params32))) {
+			err = -EFAULT;
+			dev_err(rtd->dev,
+				"%s: %s: copy_from_user failed, size = %zd\n",
+				__func__, "SNDRV_LSM_SET_PARAMS_32",
+				sizeof(det_params32));
+		} else {
+			det_params.conf_level =
+				compat_ptr(det_params32.conf_level);
+			det_params.detect_mode =
+				det_params32.detect_mode;
+			det_params.num_confidence_levels =
+				det_params32.num_confidence_levels;
+			det_params.detect_failure =
+				det_params32.detect_failure;
+			cmd = SNDRV_LSM_SET_PARAMS;
+			err = msm_cpe_lsm_ioctl_shared(substream, cmd,
+						  &det_params);
+			if (err)
+				dev_err(rtd->dev,
+					"%s: ioctl %s failed\n", __func__,
+					"SNDRV_LSM_SET_PARAMS");
+		}
+
+		break;
+	}
+
+	case SNDRV_LSM_SET_MODULE_PARAMS_32: {
+		struct snd_lsm_module_params_32 p_data_32;
+		struct snd_lsm_module_params p_data;
+		u8 *params, *params32;
+		size_t p_size;
+		struct lsm_params_info_32 *p_info_32;
+		struct lsm_params_info *p_info;
+		int i;
+
+		if (!session->is_topology_used) {
+			dev_err(rtd->dev,
+				"%s: %s: not supported if not using topology\n",
+				__func__, "SET_MODULE_PARAMS_32");
+			err = -EINVAL;
+			goto done;
+		}
+
+		if (copy_from_user(&p_data_32, arg,
+				   sizeof(p_data_32))) {
+			dev_err(rtd->dev,
+				"%s: %s: copy_from_user failed, size = %zd\n",
+				__func__, "SET_MODULE_PARAMS_32",
+				sizeof(p_data_32));
+			err = -EFAULT;
+			goto done;
+		}
+
+		p_data.params = compat_ptr(p_data_32.params);
+		p_data.num_params = p_data_32.num_params;
+		p_data.data_size = p_data_32.data_size;
+
+		if (p_data.num_params > LSM_PARAMS_MAX) {
+			dev_err(rtd->dev,
+				"%s: %s: Invalid num_params %d\n",
+				__func__, "SET_MODULE_PARAMS_32",
+				p_data.num_params);
+			err = -EINVAL;
+			goto done;
+		}
+
+		if (p_data.data_size !=
+		    (p_data.num_params * sizeof(struct lsm_params_info_32))) {
+			dev_err(rtd->dev,
+				"%s: %s: Invalid size %d\n",
+				__func__, "SET_MODULE_PARAMS_32",
+				p_data.data_size);
+			err = -EINVAL;
+			goto done;
+		}
+
+		p_size = sizeof(struct lsm_params_info_32) *
+			 p_data.num_params;
+
+		params32 = kzalloc(p_size, GFP_KERNEL);
+		if (!params32) {
+			err = -ENOMEM;
+			goto done;
+		}
+
+		p_size = sizeof(struct lsm_params_info) * p_data.num_params;
+		params = kzalloc(p_size, GFP_KERNEL);
+		if (!params) {
+			kfree(params32);
+			err = -ENOMEM;
+			goto done;
+		}
+
+		if (copy_from_user(params32, p_data.params,
+				   p_data.data_size)) {
+			dev_err(rtd->dev,
+				"%s: %s: copy_from_user failed, size = %d\n",
+				__func__, "params32", p_data.data_size);
+			kfree(params32);
+			kfree(params);
+			err = -EFAULT;
+			goto done;
+		}
+
+		p_info_32 = (struct lsm_params_info_32 *) params32;
+		p_info = (struct lsm_params_info *) params;
+		for (i = 0; i < p_data.num_params; i++) {
+			p_info->module_id = p_info_32->module_id;
+			p_info->param_id = p_info_32->param_id;
+			p_info->param_size = p_info_32->param_size;
+			p_info->param_data = compat_ptr(p_info_32->param_data);
+			p_info->param_type = p_info_32->param_type;
+
+			p_info_32++;
+			p_info++;
+		}
+
+		err = msm_cpe_lsm_process_params(substream,
+					     &p_data, params);
+		if (err)
+			dev_err(rtd->dev,
+				"%s: Failed to process params, err = %d\n",
+				__func__, err);
+		kfree(params);
+		kfree(params32);
+		break;
+	}
+	case SNDRV_LSM_REG_SND_MODEL_V2:
+	case SNDRV_LSM_SET_PARAMS:
+	case SNDRV_LSM_SET_MODULE_PARAMS:
+		/*
+		 * In ideal cases, the compat_ioctl should never be called
+		 * with the above unlocked ioctl commands. Print error
+		 * and return error if it does.
+		 */
+		dev_err(rtd->dev,
+			"%s: Invalid cmd for compat_ioctl\n",
+			__func__);
+		err = -EINVAL;
+		break;
+	default:
+		err = msm_cpe_lsm_ioctl_shared(substream, cmd, arg);
+		break;
+	}
+done:
+	MSM_CPE_LSM_REL_LOCK(&lsm_d->lsm_api_lock,
+			     "lsm_api_lock");
+	return err;
+}
+
+#else
+#define msm_cpe_lsm_ioctl_compat NULL
+#endif
+
+/*
+ * msm_cpe_lsm_prepare: prepare call from ASoC core for this platform
+ * @substream: ASoC substream for which the operation is invoked
+ *
+ * start the AFE port on CPE associated for this listen session
+ */
+static int msm_cpe_lsm_prepare(struct snd_pcm_substream *substream)
+{
+	int rc = 0;
+	struct cpe_priv *cpe = cpe_get_private_data(substream);
+	struct cpe_lsm_data *lsm_d = cpe_get_lsm_data(substream);
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct wcd_cpe_afe_ops *afe_ops;
+	struct wcd_cpe_afe_port_cfg *afe_cfg;
+	struct cpe_lsm_session *lsm_session;
+	struct cpe_lsm_lab *lab_d = &lsm_d->lab;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct lsm_hw_params lsm_param;
+	struct wcd_cpe_lsm_ops *lsm_ops;
+
+	if (!cpe || !cpe->core_handle) {
+		dev_err(rtd->dev,
+			"%s: Invalid private data\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (!lsm_d || !lsm_d->lsm_session) {
+		dev_err(rtd->dev,
+			"%s: Invalid session data\n",
+			__func__);
+		return -EINVAL;
+	}
+	if (runtime->status->state == SNDRV_PCM_STATE_XRUN ||
+	    runtime->status->state == SNDRV_PCM_STATE_PREPARED) {
+		pr_err("%s: XRUN ignore for now\n", __func__);
+		return 0;
+	}
+
+	lsm_session = lsm_d->lsm_session;
+	lab_d->pcm_size = snd_pcm_lib_buffer_bytes(substream);
+
+	dev_dbg(rtd->dev,
+		"%s: pcm_size 0x%x", __func__, lab_d->pcm_size);
+
+	if (lsm_d->cpe_prepared) {
+		dev_dbg(rtd->dev, "%s: CPE is alredy prepared\n",
+			__func__);
+		return 0;
+	}
+
+	lsm_ops = &cpe->lsm_ops;
+	afe_ops = &cpe->afe_ops;
+	afe_cfg = &(lsm_d->lsm_session->afe_port_cfg);
+
+	switch (cpe->input_port_id) {
+	case AFE_PORT_ID_3:
+		afe_cfg->port_id = AFE_PORT_ID_3;
+		afe_cfg->bit_width = 16;
+		afe_cfg->num_channels = 1;
+		afe_cfg->sample_rate = SAMPLE_RATE_48KHZ;
+		rc = afe_ops->afe_port_cmd_cfg(cpe->core_handle, afe_cfg);
+		break;
+	case AFE_PORT_ID_1:
+	default:
+		afe_cfg->port_id = AFE_PORT_ID_1;
+		afe_cfg->bit_width = 16;
+		afe_cfg->num_channels = 1;
+		afe_cfg->sample_rate = SAMPLE_RATE_16KHZ;
+		rc = afe_ops->afe_set_params(cpe->core_handle,
+					     afe_cfg, cpe->afe_mad_ctl);
+		break;
+	}
+
+	if (rc != 0) {
+		dev_err(rtd->dev,
+			"%s: cpe afe params failed for port = %d, err = %d\n",
+			 __func__, afe_cfg->port_id, rc);
+		return rc;
+	}
+	lsm_param.sample_rate = afe_cfg->sample_rate;
+	lsm_param.num_chs = afe_cfg->num_channels;
+	lsm_param.bit_width = afe_cfg->bit_width;
+	rc = lsm_ops->lsm_set_media_fmt_params(cpe->core_handle, lsm_session,
+					       &lsm_param);
+	if (rc)
+		dev_dbg(rtd->dev,
+			"%s: failed to set lsm media fmt params, err = %d\n",
+			__func__, rc);
+
+	/* Send connect to port (input) */
+	rc = lsm_ops->lsm_set_port(cpe->core_handle, lsm_session,
+				   &cpe->input_port_id);
+	if (rc) {
+		dev_err(rtd->dev,
+			"%s: Failed to set connect input port, err=%d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	if (cpe->input_port_id != 3) {
+		rc = lsm_ops->lsm_get_afe_out_port_id(cpe->core_handle,
+						      lsm_session);
+		if (rc != 0) {
+			dev_err(rtd->dev,
+				"%s: failed to get port id, err = %d\n",
+				__func__, rc);
+			return rc;
+		}
+		/* Send connect to port (output) */
+		rc = lsm_ops->lsm_set_port(cpe->core_handle, lsm_session,
+					   &lsm_session->afe_out_port_id);
+		if (rc) {
+			dev_err(rtd->dev,
+				"%s: Failed to set connect output port, err=%d\n",
+				__func__, rc);
+			return rc;
+		}
+	}
+	rc = msm_cpe_afe_port_cntl(substream,
+				   cpe->core_handle,
+				   afe_ops, afe_cfg,
+				   AFE_CMD_PORT_START);
+	if (rc)
+		dev_err(rtd->dev,
+			"%s: cpe_afe_port start failed, err = %d\n",
+			__func__, rc);
+	else
+		lsm_d->cpe_prepared = true;
+
+	return rc;
+}
+
+/*
+ * msm_cpe_lsm_trigger: trigger call from ASoC core for this platform
+ * @substream: ASoC substream for which the operation is invoked
+ * @cmd: the trigger command from framework
+ *
+ * suspend/resume the AFE port on CPE associated with listen session
+ */
+static int msm_cpe_lsm_trigger(struct snd_pcm_substream *substream,
+			       int cmd)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct cpe_priv *cpe = cpe_get_private_data(substream);
+	struct cpe_lsm_data *lsm_d = cpe_get_lsm_data(substream);
+	struct wcd_cpe_afe_ops *afe_ops;
+	struct wcd_cpe_afe_port_cfg *afe_cfg;
+	int afe_cmd = AFE_CMD_INVALID;
+	int rc = 0;
+
+	if (!cpe || !cpe->core_handle) {
+		dev_err(rtd->dev,
+			"%s: Invalid private data\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (!lsm_d || !lsm_d->lsm_session) {
+		dev_err(rtd->dev,
+			"%s: Invalid session data\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	afe_ops = &cpe->afe_ops;
+	afe_cfg = &(lsm_d->lsm_session->afe_port_cfg);
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		afe_cmd = AFE_CMD_PORT_SUSPEND;
+		break;
+
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		afe_cmd = AFE_CMD_PORT_RESUME;
+		break;
+
+	default:
+		afe_cmd = AFE_CMD_INVALID;
+		dev_dbg(rtd->dev,
+			"%s: unhandled trigger cmd %d\n",
+			__func__, cmd);
+		break;
+	}
+
+	if (afe_cmd != AFE_CMD_INVALID)
+		rc = msm_cpe_afe_port_cntl(substream,
+					   cpe->core_handle,
+					   afe_ops, afe_cfg,
+					   afe_cmd);
+
+	return rc;
+}
+
+static int msm_cpe_lsm_hwparams(struct snd_pcm_substream *substream,
+					struct snd_pcm_hw_params *params)
+{
+
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct cpe_lsm_data *lsm_d = cpe_get_lsm_data(substream);
+	struct cpe_priv *cpe = cpe_get_private_data(substream);
+	struct cpe_lsm_session *session = NULL;
+	struct cpe_hw_params *hw_params = NULL;
+
+	if (!cpe || !cpe->core_handle) {
+		dev_err(rtd->dev,
+			"%s: Invalid %s\n",
+			__func__,
+			(!cpe) ? "cpe" : "core");
+		return -EINVAL;
+	}
+
+	if (!lsm_d || !lsm_d->lsm_session) {
+		dev_err(rtd->dev,
+			"%s: Invalid %s\n",
+			__func__,
+			(!lsm_d) ? "priv_data" : "session");
+		return -EINVAL;
+	}
+
+	session = lsm_d->lsm_session;
+	hw_params = &lsm_d->hw_params;
+	hw_params->buf_sz = (params_buffer_bytes(params)
+				/ params_periods(params));
+	hw_params->period_count = params_periods(params);
+	hw_params->channels = params_channels(params);
+	hw_params->sample_rate = params_rate(params);
+
+	if (params_format(params) == SNDRV_PCM_FORMAT_S16_LE)
+		hw_params->sample_size = 16;
+	else if (params_format(params) ==
+		 SNDRV_PCM_FORMAT_S24_LE)
+		hw_params->sample_size = 24;
+	else if (params_format(params) ==
+		 SNDRV_PCM_FORMAT_S32_LE)
+		hw_params->sample_size = 32;
+	else {
+		dev_err(rtd->dev,
+			"%s: Invalid Format 0x%x\n",
+			__func__, params_format(params));
+		return -EINVAL;
+	}
+
+	dev_dbg(rtd->dev,
+		"%s: Format %d buffer size(bytes) %d period count %d\n"
+		" Channel %d period in bytes 0x%x Period Size 0x%x rate = %d\n",
+		__func__, params_format(params), params_buffer_bytes(params),
+		params_periods(params), params_channels(params),
+		params_period_bytes(params), params_period_size(params),
+		params_rate(params));
+
+	return 0;
+}
+
+static snd_pcm_uframes_t msm_cpe_lsm_pointer(
+				struct snd_pcm_substream *substream)
+{
+
+	struct cpe_lsm_data *lsm_d = cpe_get_lsm_data(substream);
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct cpe_lsm_session *session;
+	struct cpe_lsm_lab *lab_d = &lsm_d->lab;
+
+	session = lsm_d->lsm_session;
+	if (lab_d->dma_write  >= lab_d->pcm_size)
+		lab_d->dma_write = 0;
+	dev_dbg(rtd->dev,
+		"%s:pcm_dma_pos = %d\n",
+		__func__, lab_d->dma_write);
+
+	return bytes_to_frames(runtime, (lab_d->dma_write));
+}
+
+static int msm_cpe_lsm_copy(struct snd_pcm_substream *substream, int a,
+	 snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames)
+{
+	struct cpe_lsm_data *lsm_d = cpe_get_lsm_data(substream);
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct cpe_lsm_session *session;
+	struct cpe_lsm_lab *lab_d = &lsm_d->lab;
+	char *pcm_buf;
+	int fbytes = 0;
+	int rc = 0;
+
+	fbytes = frames_to_bytes(runtime, frames);
+	if (runtime->status->state == SNDRV_PCM_STATE_XRUN ||
+	   runtime->status->state == SNDRV_PCM_STATE_PREPARED) {
+		pr_err("%s: XRUN ignore for now\n", __func__);
+		return 0;
+	}
+	session = lsm_d->lsm_session;
+
+	/* Check if buffer reading is already in error state */
+	if (lab_d->thread_status == MSM_LSM_LAB_THREAD_ERROR) {
+		dev_err(rtd->dev,
+			"%s: Bufferring is in error state\n",
+			__func__);
+		/*
+		 * Advance the period so there is no wait in case
+		 * read is invoked even after error is propogated
+		 */
+		atomic_inc(&lab_d->in_count);
+		lab_d->dma_write += snd_pcm_lib_period_bytes(substream);
+		snd_pcm_period_elapsed(substream);
+		return -ENETRESET;
+	} else if (lab_d->thread_status == MSM_LSM_LAB_THREAD_STOP) {
+		dev_err(rtd->dev,
+			"%s: Buferring is in stopped\n",
+			__func__);
+		return -EIO;
+	}
+
+	rc = wait_event_timeout(lab_d->period_wait,
+			(atomic_read(&lab_d->in_count) ||
+			atomic_read(&lab_d->abort_read)),
+			(2 * HZ));
+	if (atomic_read(&lab_d->abort_read)) {
+		pr_debug("%s: LSM LAB Abort read\n", __func__);
+		return -EIO;
+	}
+	if (lab_d->thread_status != MSM_LSM_LAB_THREAD_RUNNING) {
+		pr_err("%s: Lab stopped\n", __func__);
+		return -EIO;
+	}
+	if (!rc) {
+		pr_err("%s:LAB err wait_event_timeout\n", __func__);
+		rc = -EAGAIN;
+		goto fail;
+	}
+	if (lab_d->buf_idx >= (lsm_d->hw_params.period_count))
+		lab_d->buf_idx = 0;
+	pcm_buf = (lab_d->pcm_buf[lab_d->buf_idx].mem);
+	pr_debug("%s: Buf IDX = 0x%x pcm_buf %pK\n",
+		 __func__,  lab_d->buf_idx, pcm_buf);
+	if (pcm_buf) {
+		if (copy_to_user(buf, pcm_buf, fbytes)) {
+			pr_err("Failed to copy buf to user\n");
+			rc = -EFAULT;
+			goto fail;
+		}
+	}
+	lab_d->buf_idx++;
+	atomic_dec(&lab_d->in_count);
+	return 0;
+fail:
+	return rc;
+}
+
+/*
+ * msm_asoc_cpe_lsm_probe: ASoC framework for lsm platform driver
+ * @platform: platform registered with ASoC core
+ *
+ * Allocate the private data for this platform and obtain the ops for
+ * lsm and afe modules from underlying driver. Also find the codec
+ * for this platform as specified by machine driver for ASoC framework.
+ */
+static int msm_asoc_cpe_lsm_probe(struct snd_soc_platform *platform)
+{
+	struct snd_soc_card *card;
+	struct snd_soc_pcm_runtime *rtd;
+	struct snd_soc_codec *codec;
+	struct cpe_priv *cpe_priv;
+	const struct snd_kcontrol_new *kcontrol;
+	bool found_runtime = false;
+	const char *cpe_dev_id = "qcom,msm-cpe-lsm-id";
+	u32 port_id = 0;
+	int ret = 0;
+	int i;
+
+	if (!platform || !platform->component.card) {
+		pr_err("%s: Invalid platform or card\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	card = platform->component.card;
+
+	/* Match platform to codec */
+	for (i = 0; i < card->num_links; i++) {
+		rtd = &card->rtd[i];
+		if (!rtd->platform)
+			continue;
+		if (!strcmp(rtd->platform->component.name,
+			    platform->component.name)) {
+			found_runtime = true;
+			break;
+		}
+	}
+
+	if (!found_runtime) {
+		dev_err(platform->dev,
+			"%s: Failed to find runtime for platform\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32(platform->dev->of_node, cpe_dev_id,
+				  &port_id);
+	if (ret) {
+		dev_dbg(platform->dev,
+			"%s: missing 0x%x in dt node\n", __func__, port_id);
+		port_id = 1;
+	}
+
+	codec = rtd->codec;
+
+	cpe_priv = kzalloc(sizeof(struct cpe_priv),
+			   GFP_KERNEL);
+	if (!cpe_priv) {
+		dev_err(platform->dev,
+			"%s: no memory for priv data, size = %zd\n",
+			__func__, sizeof(struct cpe_priv));
+		return -ENOMEM;
+	}
+
+	cpe_priv->codec = codec;
+	cpe_priv->input_port_id = port_id;
+	wcd_cpe_get_lsm_ops(&cpe_priv->lsm_ops);
+	wcd_cpe_get_afe_ops(&cpe_priv->afe_ops);
+
+	snd_soc_platform_set_drvdata(platform, cpe_priv);
+	kcontrol = &msm_cpe_kcontrols[0];
+	snd_ctl_add(card->snd_card, snd_ctl_new1(kcontrol, cpe_priv));
+	return 0;
+}
+
+static struct snd_pcm_ops msm_cpe_lsm_ops = {
+	.open = msm_cpe_lsm_open,
+	.close = msm_cpe_lsm_close,
+	.ioctl = msm_cpe_lsm_ioctl,
+	.prepare = msm_cpe_lsm_prepare,
+	.trigger = msm_cpe_lsm_trigger,
+	.pointer = msm_cpe_lsm_pointer,
+	.copy = msm_cpe_lsm_copy,
+	.hw_params = msm_cpe_lsm_hwparams,
+	.compat_ioctl = msm_cpe_lsm_ioctl_compat,
+};
+
+static struct snd_soc_platform_driver msm_soc_cpe_platform = {
+	.ops = &msm_cpe_lsm_ops,
+	.probe = msm_asoc_cpe_lsm_probe,
+};
+
+/*
+ * msm_cpe_lsm_probe: platform driver probe
+ * @pdev: platform device
+ *
+ * Register the ASoC platform driver with ASoC core
+ */
+static int msm_cpe_lsm_probe(struct platform_device *pdev)
+{
+
+	return snd_soc_register_platform(&pdev->dev,
+					 &msm_soc_cpe_platform);
+}
+
+/*
+ * msm_cpe_lsm_remove: platform driver remove
+ * @pdev: platform device
+ *
+ * Deregister the ASoC platform driver
+ */
+static int msm_cpe_lsm_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_platform(&pdev->dev);
+	return 0;
+}
+
+static const struct of_device_id msm_cpe_lsm_dt_match[] = {
+	{.compatible = "qcom,msm-cpe-lsm" },
+	{ }
+};
+
+static struct platform_driver msm_cpe_lsm_driver = {
+	.driver = {
+		.name = "msm-cpe-lsm",
+		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(msm_cpe_lsm_dt_match),
+	},
+	.probe = msm_cpe_lsm_probe,
+	.remove = msm_cpe_lsm_remove,
+};
+module_platform_driver(msm_cpe_lsm_driver);
+
+MODULE_DESCRIPTION("CPE LSM platform driver");
+MODULE_DEVICE_TABLE(of, msm_cpe_lsm_dt_match);
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./msm-dai-fe.c linux-4.4.115-fbx/sound/soc/msm/msm-dai-fe.c
--- linux-4.4.115-fbx/sound/soc/msm./msm-dai-fe.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/msm-dai-fe.c	2019-01-22 16:16:29.619301790 +0100
@@ -0,0 +1,2851 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+
+static struct snd_soc_dai_ops msm_fe_dai_ops = {};
+
+/* Conventional and unconventional sample rate supported */
+static unsigned int supported_sample_rates[] = {
+	8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000,
+	88200, 96000, 176400, 192000, 352800, 384000
+};
+
+static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
+	.count = ARRAY_SIZE(supported_sample_rates),
+	.list = supported_sample_rates,
+	.mask = 0,
+};
+
+static int multimedia_startup(struct snd_pcm_substream *substream,
+	struct snd_soc_dai *dai)
+{
+	snd_pcm_hw_constraint_list(substream->runtime, 0,
+		SNDRV_PCM_HW_PARAM_RATE,
+		&constraints_sample_rates);
+	return 0;
+}
+
+static int fe_dai_probe(struct snd_soc_dai *dai)
+{
+	struct snd_soc_dapm_route intercon;
+	struct snd_soc_dapm_context *dapm;
+
+	if (!dai || !dai->driver) {
+		pr_err("%s invalid params\n", __func__);
+		return -EINVAL;
+	}
+	dapm = snd_soc_component_get_dapm(dai->component);
+	memset(&intercon, 0 , sizeof(intercon));
+	if (dai->driver->playback.stream_name &&
+		dai->driver->playback.aif_name) {
+		dev_dbg(dai->dev, "%s add route for widget %s",
+			   __func__, dai->driver->playback.stream_name);
+		intercon.source = dai->driver->playback.stream_name;
+		intercon.sink = dai->driver->playback.aif_name;
+		dev_dbg(dai->dev, "%s src %s sink %s\n",
+			   __func__, intercon.source, intercon.sink);
+		snd_soc_dapm_add_routes(dapm, &intercon, 1);
+		snd_soc_dapm_ignore_suspend(dapm, intercon.source);
+	}
+	if (dai->driver->capture.stream_name &&
+	   dai->driver->capture.aif_name) {
+		dev_dbg(dai->dev, "%s add route for widget %s",
+			   __func__, dai->driver->capture.stream_name);
+		intercon.sink = dai->driver->capture.stream_name;
+		intercon.source = dai->driver->capture.aif_name;
+		dev_dbg(dai->dev, "%s src %s sink %s\n",
+			   __func__, intercon.source, intercon.sink);
+		snd_soc_dapm_add_routes(dapm, &intercon, 1);
+		snd_soc_dapm_ignore_suspend(dapm, intercon.sink);
+	}
+	return 0;
+}
+
+static struct snd_soc_dai_ops msm_fe_Multimedia_dai_ops = {
+	.startup	= multimedia_startup,
+};
+
+static const struct snd_soc_component_driver msm_fe_dai_component = {
+	.name		= "msm-dai-fe",
+};
+
+static struct snd_soc_dai_driver msm_fe_dais[] = {
+	{
+		.playback = {
+			.stream_name = "MultiMedia1 Playback",
+			.aif_name = "MM_DL1",
+			.rates = (SNDRV_PCM_RATE_8000_384000|
+					SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+						SNDRV_PCM_FMTBIT_S24_LE |
+						SNDRV_PCM_FMTBIT_S24_3LE |
+						SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =	384000,
+		},
+		.capture = {
+			.stream_name = "MultiMedia1 Capture",
+			.aif_name = "MM_UL1",
+			.rates = (SNDRV_PCM_RATE_8000_384000|
+					SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE |
+				    SNDRV_PCM_FMTBIT_S24_3LE |
+				    SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =	48000,
+		},
+		.ops = &msm_fe_Multimedia_dai_ops,
+		.name = "MultiMedia1",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "MultiMedia2 Playback",
+			.aif_name = "MM_DL2",
+			.rates = (SNDRV_PCM_RATE_8000_384000|
+					SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+						SNDRV_PCM_FMTBIT_S24_LE |
+						SNDRV_PCM_FMTBIT_S24_3LE |
+						SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =	384000,
+		},
+		.capture = {
+			.stream_name = "MultiMedia2 Capture",
+			.aif_name = "MM_UL2",
+			.rates = (SNDRV_PCM_RATE_8000_384000|
+					SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE |
+				    SNDRV_PCM_FMTBIT_S24_3LE |
+				    SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =	48000,
+		},
+		.ops = &msm_fe_Multimedia_dai_ops,
+		.name = "MultiMedia2",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "CS-VOICE Playback",
+			.aif_name = "CS-VOICE_DL1",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.capture = {
+			.stream_name = "CS-VOICE Capture",
+			.aif_name = "CS-VOICE_UL1",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "CS-VOICE",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "VoIP Playback",
+			.aif_name = "VOIP_DL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+					SNDRV_PCM_FMTBIT_SPECIAL,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =	8000,
+			.rate_max = 48000,
+		},
+		.capture = {
+			.stream_name = "VoIP Capture",
+			.aif_name = "VOIP_UL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+					SNDRV_PCM_FMTBIT_SPECIAL,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =	8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "VoIP",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "MultiMedia3 Playback",
+			.aif_name = "MM_DL3",
+			.rates = (SNDRV_PCM_RATE_8000_384000 |
+					SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+						SNDRV_PCM_FMTBIT_S24_LE |
+						SNDRV_PCM_FMTBIT_S24_3LE |
+						SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 6,
+			.rate_min =	8000,
+			.rate_max = 384000,
+		},
+		.capture = {
+			.stream_name = "MultiMedia3 Capture",
+			.aif_name = "MM_UL3",
+			.rates = (SNDRV_PCM_RATE_8000_384000|
+					SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+						SNDRV_PCM_FMTBIT_S24_LE |
+						SNDRV_PCM_FMTBIT_S24_3LE |
+						SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =	48000,
+		},
+		.ops = &msm_fe_Multimedia_dai_ops,
+		.name = "MultiMedia3",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "MultiMedia4 Playback",
+			.aif_name = "MM_DL4",
+			.rates = (SNDRV_PCM_RATE_8000_384000 |
+					SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+						SNDRV_PCM_FMTBIT_S24_LE |
+						SNDRV_PCM_FMTBIT_S24_3LE |
+						SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =	8000,
+			.rate_max = 384000,
+		},
+		.ops = &msm_fe_Multimedia_dai_ops,
+		.compress_new = snd_soc_new_compress,
+		.name = "MultiMedia4",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "MultiMedia5 Playback",
+			.aif_name = "MM_DL5",
+			.rates = (SNDRV_PCM_RATE_8000_384000 |
+					SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+						SNDRV_PCM_FMTBIT_S24_LE |
+						SNDRV_PCM_FMTBIT_S24_3LE |
+						SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =	8000,
+			.rate_max = 384000,
+		},
+		.capture = {
+			.stream_name = "MultiMedia5 Capture",
+			.aif_name = "MM_UL5",
+			.rates = (SNDRV_PCM_RATE_8000_48000|
+					SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE |
+				    SNDRV_PCM_FMTBIT_S24_3LE |
+				    SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =	48000,
+		},
+		.ops = &msm_fe_Multimedia_dai_ops,
+		.name = "MultiMedia5",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "MultiMedia6 Playback",
+			.aif_name = "MM_DL6",
+			.rates = (SNDRV_PCM_RATE_8000_384000 |
+					SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+						SNDRV_PCM_FMTBIT_S24_LE |
+						SNDRV_PCM_FMTBIT_S24_3LE |
+						SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =	8000,
+			.rate_max = 384000,
+		},
+		.capture = {
+			.stream_name = "MultiMedia6 Capture",
+			.aif_name = "MM_UL6",
+			.rates = (SNDRV_PCM_RATE_8000_48000|
+					SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE |
+				    SNDRV_PCM_FMTBIT_S24_3LE |
+				    SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =	48000,
+		},
+		.ops = &msm_fe_Multimedia_dai_ops,
+		.name = "MultiMedia6",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "MultiMedia7 Playback",
+			.aif_name = "MM_DL7",
+			.rates = (SNDRV_PCM_RATE_8000_384000 |
+					SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+						SNDRV_PCM_FMTBIT_S24_LE |
+						SNDRV_PCM_FMTBIT_S24_3LE |
+						SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =	8000,
+			.rate_max = 384000,
+		},
+		.ops = &msm_fe_Multimedia_dai_ops,
+		.compress_new = snd_soc_new_compress,
+		.name = "MultiMedia7",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "MultiMedia8 Playback",
+			.aif_name = "MM_DL8",
+			.rates = (SNDRV_PCM_RATE_8000_384000 |
+					SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+						SNDRV_PCM_FMTBIT_S24_LE |
+						SNDRV_PCM_FMTBIT_S24_3LE |
+						SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =	8000,
+			.rate_max = 384000,
+		},
+		.capture = {
+			.stream_name = "MultiMedia8 Capture",
+			.aif_name = "MM_UL8",
+			.rates = (SNDRV_PCM_RATE_8000_48000|
+					SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE |
+				    SNDRV_PCM_FMTBIT_S24_3LE |
+				    SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =	48000,
+		},
+		.ops = &msm_fe_Multimedia_dai_ops,
+		.name = "MultiMedia8",
+		.probe = fe_dai_probe,
+	},
+	/* FE DAIs created for hostless operation purpose */
+	{
+		.playback = {
+			.stream_name = "SLIMBUS0_HOSTLESS Playback",
+			.aif_name = "SLIM0_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_384000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =     384000,
+		},
+		.capture = {
+			.stream_name = "SLIMBUS0_HOSTLESS Capture",
+			.aif_name = "SLIM0_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_96000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE |
+				    SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =     384000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "SLIMBUS0_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "SLIMBUS1_HOSTLESS Playback",
+			.aif_name = "SLIM1_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_384000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+						SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =     8000,
+			.rate_max =     384000,
+		},
+		.capture = {
+			.stream_name = "SLIMBUS1_HOSTLESS Capture",
+			.aif_name = "SLIM1_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "SLIMBUS1_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "SLIMBUS3_HOSTLESS Playback",
+			.aif_name = "SLIM3_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_384000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+						SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =     8000,
+			.rate_max =     384000,
+		},
+		.capture = {
+			.stream_name = "SLIMBUS3_HOSTLESS Capture",
+			.aif_name = "SLIM3_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "SLIMBUS3_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "SLIMBUS4_HOSTLESS Playback",
+			.aif_name = "SLIM4_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_384000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+						SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =     8000,
+			.rate_max =     384000,
+		},
+		.capture = {
+			.stream_name = "SLIMBUS4_HOSTLESS Capture",
+			.aif_name = "SLIM4_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "SLIMBUS4_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "SLIMBUS6_HOSTLESS Playback",
+			.aif_name = "SLIM6_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_384000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =     384000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "SLIMBUS6_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "SLIMBUS7_HOSTLESS Playback",
+			.aif_name = "SLIM7_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_384000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =     384000,
+		},
+		.capture = {
+			.stream_name = "SLIMBUS7_HOSTLESS Capture",
+			.aif_name = "SLIM7_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_384000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =     384000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "SLIMBUS7_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "SLIMBUS8_HOSTLESS Playback",
+			.aif_name = "SLIM8_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_384000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =     384000,
+		},
+		.capture = {
+			.stream_name = "SLIMBUS8_HOSTLESS Capture",
+			.aif_name = "SLIM8_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_384000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =     384000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "SLIMBUS8_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "INT_FM_HOSTLESS Playback",
+			.aif_name = "INTFM_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.capture = {
+			.stream_name = "INT_FM_HOSTLESS Capture",
+			.aif_name = "INTFM_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "INT_FM_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "INT_HFP_BT Hostless Playback",
+			.aif_name = "INTHFP_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =     8000,
+			.rate_max =     16000,
+		},
+		.capture = {
+			.stream_name = "INT_HFP_BT Hostless Capture",
+			.aif_name = "INTHFP_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =     8000,
+			.rate_max =     16000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "INT_HFP_BT_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "USBAUDIO_HOSTLESS Playback",
+			.aif_name = "USBAUDIO_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |
+				SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+				SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |
+				SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |
+				SNDRV_PCM_RATE_192000 | SNDRV_PCM_RATE_352800 |
+				SNDRV_PCM_RATE_384000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				SNDRV_PCM_FMTBIT_S24_LE |
+				SNDRV_PCM_FMTBIT_S24_3LE |
+				SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =     384000,
+		},
+		.capture = {
+			.stream_name = "USBAUDIO_HOSTLESS Capture",
+			.aif_name = "USBAUDIO_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |
+				SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+				SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |
+				SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |
+				SNDRV_PCM_RATE_192000 | SNDRV_PCM_RATE_352800 |
+				SNDRV_PCM_RATE_384000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				SNDRV_PCM_FMTBIT_S24_LE |
+				SNDRV_PCM_FMTBIT_S24_3LE |
+				SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =     384000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "USBAUDIO_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "AFE Playback",
+			.aif_name = "PCM_RX",
+			.rates = (SNDRV_PCM_RATE_8000 |
+				SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_48000),
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.capture = {
+			.stream_name = "AFE Capture",
+			.aif_name = "PCM_TX",
+			.rates = (SNDRV_PCM_RATE_8000 |
+				SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_48000),
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "AFE-PROXY",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "HDMI_HOSTLESS Playback",
+			.aif_name = "HDMI_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "HDMI_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "AUXPCM_HOSTLESS Playback",
+			.aif_name = "AUXPCM_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 1,
+			.rate_min =     8000,
+			.rate_max =     16000,
+		},
+		.capture = {
+			.stream_name = "AUXPCM_HOSTLESS Capture",
+			.aif_name = "AUXPCM_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 1,
+			.rate_min =     8000,
+			.rate_max =    16000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "AUXPCM_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "VOICE_STUB Playback",
+			.aif_name = "VOICE_STUB_DL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.capture = {
+			.stream_name = "VOICE_STUB Capture",
+			.aif_name = "VOICE_STUB_UL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "VOICE_STUB",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "VoLTE Playback",
+			.aif_name = "VoLTE_DL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.capture = {
+			.stream_name = "VoLTE Capture",
+			.aif_name = "VoLTE_UL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "VoLTE",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "MI2S_RX_HOSTLESS Playback",
+			.aif_name = "MI2S_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.capture = {
+			.stream_name = "MI2S_TX_HOSTLESS Capture",
+			.aif_name = "MI2S_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "MI2S_TX_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "SEC_I2S_RX_HOSTLESS Playback",
+			.aif_name = "SEC_I2S_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =     8000,
+			.rate_max =    48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "SEC_I2S_RX_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Primary MI2S_TX Hostless Capture",
+			.aif_name = "PRI_MI2S_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "PRI_MI2S_TX_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Primary MI2S_RX Hostless Playback",
+			.aif_name = "PRI_MI2S_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_384000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =     8000,
+			.rate_max =    384000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "PRI_MI2S_RX_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Secondary MI2S_TX Hostless Capture",
+			.aif_name = "SEC_MI2S_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "SEC_MI2S_TX_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Secondary MI2S_RX Hostless Playback",
+			.aif_name = "SEC_MI2S_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_384000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 384000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "SEC_MI2S_RX_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Tertiary MI2S_TX Hostless Capture",
+			.aif_name = "TERT_MI2S_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "TERT_MI2S_TX_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Tertiary MI2S_RX Hostless Playback",
+			.aif_name = "TERT_MI2S_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_384000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =	8000,
+			.rate_max =    384000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "TERT_MI2S_RX_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Quaternary MI2S_TX Hostless Capture",
+			.aif_name = "QUAT_MI2S_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "QUAT_MI2S_TX_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Quaternary MI2S_RX Hostless Playback",
+			.aif_name = "QUAT_MI2S_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_384000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 384000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "QUAT_MI2S_RX_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "INT0 MI2S_RX Hostless Playback",
+			.aif_name = "INT0_MI2S_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_192000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =     8000,
+			.rate_max =    192000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "INT0_MI2S_RX_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "INT4 MI2S_RX Hostless Playback",
+			.aif_name = "INT4_MI2S_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_192000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 4,
+			.rate_min =     8000,
+			.rate_max =    192000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "INT4_MI2S_RX_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "INT3 MI2S_TX Hostless Capture",
+			.aif_name = "INT3_MI2S_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "INT3_MI2S_TX_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	/* TDM Hostless */
+	{
+		.capture = {
+			.stream_name = "Primary TDM0 Hostless Capture",
+			.aif_name = "PRI_TDM_TX_0_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "PRI_TDM_TX_0_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Primary TDM0 Hostless Playback",
+			.aif_name = "PRI_TDM_RX_0_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "PRI_TDM_RX_0_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Primary TDM1 Hostless Capture",
+			.aif_name = "PRI_TDM_TX_1_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "PRI_TDM_TX_1_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Primary TDM1 Hostless Playback",
+			.aif_name = "PRI_TDM_RX_1_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "PRI_TDM_RX_1_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Primary TDM2 Hostless Capture",
+			.aif_name = "PRI_TDM_TX_2_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "PRI_TDM_TX_2_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Primary TDM2 Hostless Playback",
+			.aif_name = "PRI_TDM_RX_2_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "PRI_TDM_RX_2_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Primary TDM3 Hostless Capture",
+			.aif_name = "PRI_TDM_TX_3_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "PRI_TDM_TX_3_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Primary TDM3 Hostless Playback",
+			.aif_name = "PRI_TDM_RX_3_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "PRI_TDM_RX_3_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Primary TDM4 Hostless Capture",
+			.aif_name = "PRI_TDM_TX_4_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "PRI_TDM_TX_4_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Primary TDM4 Hostless Playback",
+			.aif_name = "PRI_TDM_RX_4_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "PRI_TDM_RX_4_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Primary TDM5 Hostless Capture",
+			.aif_name = "PRI_TDM_TX_5_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "PRI_TDM_TX_5_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Primary TDM5 Hostless Playback",
+			.aif_name = "PRI_TDM_RX_5_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "PRI_TDM_RX_5_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Primary TDM6 Hostless Capture",
+			.aif_name = "PRI_TDM_TX_6_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "PRI_TDM_TX_6_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Primary TDM6 Hostless Playback",
+			.aif_name = "PRI_TDM_RX_6_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "PRI_TDM_RX_6_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Primary TDM7 Hostless Capture",
+			.aif_name = "PRI_TDM_TX_7_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "PRI_TDM_TX_7_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Primary TDM7 Hostless Playback",
+			.aif_name = "PRI_TDM_RX_7_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "PRI_TDM_RX_7_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Secondary TDM0 Hostless Capture",
+			.aif_name = "SEC_TDM_TX_0_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "SEC_TDM_TX_0_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Secondary TDM0 Hostless Playback",
+			.aif_name = "SEC_TDM_RX_0_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "SEC_TDM_RX_0_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Secondary TDM1 Hostless Capture",
+			.aif_name = "SEC_TDM_TX_1_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "SEC_TDM_TX_1_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Secondary TDM1 Hostless Playback",
+			.aif_name = "SEC_TDM_RX_1_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "SEC_TDM_RX_1_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Secondary TDM2 Hostless Capture",
+			.aif_name = "SEC_TDM_TX_2_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "SEC_TDM_TX_2_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Secondary TDM2 Hostless Playback",
+			.aif_name = "SEC_TDM_RX_2_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "SEC_TDM_RX_2_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Secondary TDM3 Hostless Capture",
+			.aif_name = "SEC_TDM_TX_3_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "SEC_TDM_TX_3_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Secondary TDM3 Hostless Playback",
+			.aif_name = "SEC_TDM_RX_3_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "SEC_TDM_RX_3_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Secondary TDM4 Hostless Capture",
+			.aif_name = "SEC_TDM_TX_4_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "SEC_TDM_TX_4_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Secondary TDM4 Hostless Playback",
+			.aif_name = "SEC_TDM_RX_4_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "SEC_TDM_RX_4_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Secondary TDM5 Hostless Capture",
+			.aif_name = "SEC_TDM_TX_5_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "SEC_TDM_TX_5_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Secondary TDM5 Hostless Playback",
+			.aif_name = "SEC_TDM_RX_5_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "SEC_TDM_RX_5_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Secondary TDM6 Hostless Capture",
+			.aif_name = "SEC_TDM_TX_6_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "SEC_TDM_TX_6_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Secondary TDM6 Hostless Playback",
+			.aif_name = "SEC_TDM_RX_6_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "SEC_TDM_RX_6_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Secondary TDM7 Hostless Capture",
+			.aif_name = "SEC_TDM_TX_7_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "SEC_TDM_TX_7_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Secondary TDM7 Hostless Playback",
+			.aif_name = "SEC_TDM_RX_7_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "SEC_TDM_RX_7_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Tertiary TDM0 Hostless Capture",
+			.aif_name = "TERT_TDM_TX_0_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "TERT_TDM_TX_0_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Tertiary TDM0 Hostless Playback",
+			.aif_name = "TERT_TDM_RX_0_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "TERT_TDM_RX_0_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Tertiary TDM1 Hostless Capture",
+			.aif_name = "TERT_TDM_TX_1_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "TERT_TDM_TX_1_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Tertiary TDM1 Hostless Playback",
+			.aif_name = "TERT_TDM_RX_1_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "TERT_TDM_RX_1_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Tertiary TDM2 Hostless Capture",
+			.aif_name = "TERT_TDM_TX_2_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "TERT_TDM_TX_2_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Tertiary TDM2 Hostless Playback",
+			.aif_name = "TERT_TDM_RX_2_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "TERT_TDM_RX_2_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Tertiary TDM3 Hostless Capture",
+			.aif_name = "TERT_TDM_TX_3_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "TERT_TDM_TX_3_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Tertiary TDM3 Hostless Playback",
+			.aif_name = "TERT_TDM_RX_3_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "TERT_TDM_RX_3_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Tertiary TDM4 Hostless Capture",
+			.aif_name = "TERT_TDM_TX_4_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "TERT_TDM_TX_4_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Tertiary TDM4 Hostless Playback",
+			.aif_name = "TERT_TDM_RX_4_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "TERT_TDM_RX_4_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Tertiary TDM5 Hostless Capture",
+			.aif_name = "TERT_TDM_TX_5_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "TERT_TDM_TX_5_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Tertiary TDM5 Hostless Playback",
+			.aif_name = "TERT_TDM_RX_5_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "TERT_TDM_RX_5_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Tertiary TDM6 Hostless Capture",
+			.aif_name = "TERT_TDM_TX_6_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "TERT_TDM_TX_6_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Tertiary TDM6 Hostless Playback",
+			.aif_name = "TERT_TDM_RX_6_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "TERT_TDM_RX_6_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Tertiary TDM7 Hostless Capture",
+			.aif_name = "TERT_TDM_TX_7_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "TERT_TDM_TX_7_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Tertiary TDM7 Hostless Playback",
+			.aif_name = "TERT_TDM_RX_7_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "TERT_TDM_RX_7_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Quaternary TDM0 Hostless Capture",
+			.aif_name = "QUAT_TDM_TX_0_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "QUAT_TDM_TX_0_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Quaternary TDM0 Hostless Playback",
+			.aif_name = "QUAT_TDM_RX_0_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "QUAT_TDM_RX_0_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Quaternary TDM1 Hostless Capture",
+			.aif_name = "QUAT_TDM_TX_1_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "QUAT_TDM_TX_1_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Quaternary TDM1 Hostless Playback",
+			.aif_name = "QUAT_TDM_RX_1_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "QUAT_TDM_RX_1_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Quaternary TDM2 Hostless Capture",
+			.aif_name = "QUAT_TDM_TX_2_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "QUAT_TDM_TX_2_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Quaternary TDM2 Hostless Playback",
+			.aif_name = "QUAT_TDM_RX_2_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "QUAT_TDM_RX_2_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Quaternary TDM3 Hostless Capture",
+			.aif_name = "QUAT_TDM_TX_3_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "QUAT_TDM_TX_3_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Quaternary TDM3 Hostless Playback",
+			.aif_name = "QUAT_TDM_RX_3_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "QUAT_TDM_RX_3_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Quaternary TDM4 Hostless Capture",
+			.aif_name = "QUAT_TDM_TX_4_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "QUAT_TDM_TX_4_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Quaternary TDM4 Hostless Playback",
+			.aif_name = "QUAT_TDM_RX_4_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "QUAT_TDM_RX_4_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Quaternary TDM5 Hostless Capture",
+			.aif_name = "QUAT_TDM_TX_5_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "QUAT_TDM_TX_5_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Quaternary TDM5 Hostless Playback",
+			.aif_name = "QUAT_TDM_RX_5_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "QUAT_TDM_RX_5_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Quaternary TDM6 Hostless Capture",
+			.aif_name = "QUAT_TDM_TX_6_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "QUAT_TDM_TX_6_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Quaternary TDM6 Hostless Playback",
+			.aif_name = "QUAT_TDM_RX_6_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "QUAT_TDM_RX_6_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Quaternary TDM7 Hostless Capture",
+			.aif_name = "QUAT_TDM_TX_7_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "QUAT_TDM_TX_7_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Quaternary TDM7 Hostless Playback",
+			.aif_name = "QUAT_TDM_RX_7_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "QUAT_TDM_RX_7_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Voice2 Playback",
+			.aif_name = "VOICE2_DL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.capture = {
+			.stream_name = "Voice2 Capture",
+			.aif_name = "VOICE2_UL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "Voice2",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "Pseudo Playback",
+			.aif_name = "MM_DL9",
+			.rates = (SNDRV_PCM_RATE_8000_48000 |
+					SNDRV_PCM_RATE_KNOT),
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =	8000,
+			.rate_max = 48000,
+		},
+		.capture = {
+			.stream_name = "Pseudo Capture",
+			.aif_name = "MM_UL9",
+			.rates = (SNDRV_PCM_RATE_8000_48000|
+					SNDRV_PCM_RATE_KNOT),
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =	48000,
+		},
+		.ops = &msm_fe_Multimedia_dai_ops,
+		.name = "Pseudo",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "DTMF_RX_HOSTLESS Playback",
+			.aif_name = "DTMF_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =	8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "DTMF_RX_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "CPE Listen Audio capture",
+			.aif_name = "CPE_LSM_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 1,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "CPE_LSM_NOHOST",
+	},
+	{
+		.playback = {
+			.stream_name = "VOLTE_STUB Playback",
+			.aif_name = "VOLTE_STUB_DL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.capture = {
+			.stream_name = "VOLTE_STUB Capture",
+			.aif_name = "VOLTE_STUB_UL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "VOLTE_STUB",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "VOICE2_STUB Playback",
+			.aif_name = "VOICE2_STUB_DL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.capture = {
+			.stream_name = "VOICE2_STUB Capture",
+			.aif_name = "VOICE2_STUB_UL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "VOICE2_STUB",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "MultiMedia9 Playback",
+			.aif_name = "MM_DL9",
+			.rates = (SNDRV_PCM_RATE_8000_384000|
+				  SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE |
+				    SNDRV_PCM_FMTBIT_S24_3LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =	384000,
+		},
+		.capture = {
+			.stream_name = "MultiMedia9 Capture",
+			.aif_name = "MM_UL9",
+			.rates = (SNDRV_PCM_RATE_8000_48000|
+				  SNDRV_PCM_RATE_KNOT),
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =	48000,
+		},
+		.ops = &msm_fe_Multimedia_dai_ops,
+		.name = "MultiMedia9",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "QCHAT Playback",
+			.aif_name = "QCHAT_DL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.capture = {
+			.stream_name = "QCHAT Capture",
+			.aif_name = "QCHAT_UL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "QCHAT",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Listen 1 Audio Service Capture",
+			.aif_name = "LSM1_UL_HL",
+			.rates = (SNDRV_PCM_RATE_16000 |
+				  SNDRV_PCM_RATE_48000),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 4,
+			.rate_min = 16000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "LSM1",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Listen 2 Audio Service Capture",
+			.aif_name = "LSM2_UL_HL",
+			.rates = (SNDRV_PCM_RATE_16000 |
+				  SNDRV_PCM_RATE_48000),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 4,
+			.rate_min = 16000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "LSM2",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Listen 3 Audio Service Capture",
+			.aif_name = "LSM3_UL_HL",
+			.rates = (SNDRV_PCM_RATE_16000 |
+				  SNDRV_PCM_RATE_48000),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 4,
+			.rate_min = 16000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "LSM3",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Listen 4 Audio Service Capture",
+			.aif_name = "LSM4_UL_HL",
+			.rates = (SNDRV_PCM_RATE_16000 |
+				  SNDRV_PCM_RATE_48000),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 4,
+			.rate_min = 16000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "LSM4",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Listen 5 Audio Service Capture",
+			.aif_name = "LSM5_UL_HL",
+			.rates = (SNDRV_PCM_RATE_16000 |
+				  SNDRV_PCM_RATE_48000),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 4,
+			.rate_min = 16000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "LSM5",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Listen 6 Audio Service Capture",
+			.aif_name = "LSM6_UL_HL",
+			.rates = (SNDRV_PCM_RATE_16000 |
+				  SNDRV_PCM_RATE_48000),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 4,
+			.rate_min = 16000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "LSM6",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Listen 7 Audio Service Capture",
+			.aif_name = "LSM7_UL_HL",
+			.rates = (SNDRV_PCM_RATE_16000 |
+				  SNDRV_PCM_RATE_48000),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 4,
+			.rate_min = 16000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "LSM7",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "Listen 8 Audio Service Capture",
+			.aif_name = "LSM8_UL_HL",
+			.rates = (SNDRV_PCM_RATE_16000 |
+				  SNDRV_PCM_RATE_48000),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 4,
+			.rate_min = 16000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "LSM8",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "VoWLAN Playback",
+			.aif_name = "VoWLAN_DL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.capture = {
+			.stream_name = "VoWLAN Capture",
+			.aif_name = "VoWLAN_UL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "VoWLAN",
+		.probe = fe_dai_probe,
+	},
+	/* FE DAIs created for multiple instances of offload playback */
+	{
+		.playback = {
+			.stream_name = "MultiMedia10 Playback",
+			.aif_name = "MM_DL10",
+			.rates = (SNDRV_PCM_RATE_8000_384000 |
+				  SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE |
+				    SNDRV_PCM_FMTBIT_S24_3LE |
+				    SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =	8000,
+			.rate_max = 384000,
+		},
+		.ops = &msm_fe_Multimedia_dai_ops,
+		.compress_new = snd_soc_new_compress,
+		.name = "MultiMedia10",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "MultiMedia11 Playback",
+			.aif_name = "MM_DL11",
+			.rates = (SNDRV_PCM_RATE_8000_384000 |
+				  SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE |
+				    SNDRV_PCM_FMTBIT_S24_3LE |
+				    SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =	8000,
+			.rate_max = 384000,
+		},
+		.ops = &msm_fe_Multimedia_dai_ops,
+		.compress_new = snd_soc_new_compress,
+		.name = "MultiMedia11",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "MultiMedia12 Playback",
+			.aif_name = "MM_DL12",
+			.rates = (SNDRV_PCM_RATE_8000_384000 |
+				  SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE |
+				    SNDRV_PCM_FMTBIT_S24_3LE |
+				    SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =	8000,
+			.rate_max = 384000,
+		},
+		.ops = &msm_fe_Multimedia_dai_ops,
+		.compress_new = snd_soc_new_compress,
+		.name = "MultiMedia12",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "MultiMedia13 Playback",
+			.aif_name = "MM_DL13",
+			.rates = (SNDRV_PCM_RATE_8000_384000 |
+				  SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE |
+				    SNDRV_PCM_FMTBIT_S24_3LE |
+				    SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =	8000,
+			.rate_max = 384000,
+		},
+		.ops = &msm_fe_Multimedia_dai_ops,
+		.compress_new = snd_soc_new_compress,
+		.name = "MultiMedia13",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "MultiMedia14 Playback",
+			.aif_name = "MM_DL14",
+			.rates = (SNDRV_PCM_RATE_8000_384000 |
+				  SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE |
+				    SNDRV_PCM_FMTBIT_S24_3LE |
+				    SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =	8000,
+			.rate_max = 384000,
+		},
+		.ops = &msm_fe_Multimedia_dai_ops,
+		.compress_new = snd_soc_new_compress,
+		.name = "MultiMedia14",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "MultiMedia15 Playback",
+			.aif_name = "MM_DL15",
+			.rates = (SNDRV_PCM_RATE_8000_384000 |
+				  SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE |
+				    SNDRV_PCM_FMTBIT_S24_3LE |
+				    SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =	8000,
+			.rate_max = 384000,
+		},
+		.ops = &msm_fe_Multimedia_dai_ops,
+		.compress_new = snd_soc_new_compress,
+		.name = "MultiMedia15",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "MultiMedia16 Playback",
+			.aif_name = "MM_DL16",
+			.rates = (SNDRV_PCM_RATE_8000_384000 |
+				  SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE |
+				    SNDRV_PCM_FMTBIT_S24_3LE |
+				    SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =	8000,
+			.rate_max = 384000,
+		},
+		.capture = {
+			.stream_name = "MultiMedia16 Capture",
+			.aif_name = "MM_UL16",
+			.rates = (SNDRV_PCM_RATE_8000_48000|
+					SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE |
+				    SNDRV_PCM_FMTBIT_S24_3LE |
+				    SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.ops = &msm_fe_Multimedia_dai_ops,
+		.name = "MultiMedia16",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "VoiceMMode1 Playback",
+			.aif_name = "VOICEMMODE1_DL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =     8000,
+			.rate_max =     48000,
+	 },
+		.capture = {
+			.stream_name = "VoiceMMode1 Capture",
+			.aif_name = "VOICEMMODE1_UL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "VoiceMMode1",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "VoiceMMode2 Playback",
+			.aif_name = "VOICEMMODE2_DL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.capture = {
+			.stream_name = "VoiceMMode2 Capture",
+			.aif_name = "VOICEMMODE2_UL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "VoiceMMode2",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "MultiMedia17 Capture",
+			.aif_name = "MM_UL17",
+			.rates = (SNDRV_PCM_RATE_8000_48000|
+					SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE |
+				    SNDRV_PCM_FMTBIT_S24_3LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.ops = &msm_fe_Multimedia_dai_ops,
+		.compress_new = snd_soc_new_compress,
+		.name = "MultiMedia17",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "MultiMedia18 Capture",
+			.aif_name = "MM_UL18",
+			.rates = (SNDRV_PCM_RATE_8000_48000|
+					SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE |
+				    SNDRV_PCM_FMTBIT_S24_3LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =     192000,
+		},
+		.ops = &msm_fe_Multimedia_dai_ops,
+		.compress_new = snd_soc_new_compress,
+		.name = "MultiMedia18",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "MultiMedia19 Capture",
+			.aif_name = "MM_UL19",
+			.rates = (SNDRV_PCM_RATE_8000_48000|
+					SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE |
+				    SNDRV_PCM_FMTBIT_S24_3LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.ops = &msm_fe_Multimedia_dai_ops,
+		.compress_new = snd_soc_new_compress,
+		.name = "MultiMedia19",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "MultiMedia20 Playback",
+			.aif_name = "MM_DL20",
+			.rates = (SNDRV_PCM_RATE_8000_384000|
+					SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE |
+				    SNDRV_PCM_FMTBIT_S24_3LE |
+				    SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =     384000,
+		},
+		.capture = {
+			.stream_name = "MultiMedia20 Capture",
+			.aif_name = "MM_UL20",
+			.rates = (SNDRV_PCM_RATE_8000_48000|
+					SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE |
+				    SNDRV_PCM_FMTBIT_S24_3LE |
+				    SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.ops = &msm_fe_Multimedia_dai_ops,
+		.name = "MultiMedia20",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "MultiMedia21 Playback",
+			.aif_name = "MM_DL21",
+			.rates = (SNDRV_PCM_RATE_8000_384000 |
+					SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE |
+				    SNDRV_PCM_FMTBIT_S24_3LE |
+				    SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 384000,
+		},
+		.capture = {
+			.stream_name = "MultiMedia21 Capture",
+			.aif_name = "MM_UL21",
+			.rates = (SNDRV_PCM_RATE_8000_48000|
+					SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE |
+				    SNDRV_PCM_FMTBIT_S24_3LE |
+				    SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.ops = &msm_fe_Multimedia_dai_ops,
+		.name = "MultiMedia21",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "MultiMedia22 Playback",
+			.aif_name = "MM_DL22",
+			.rates = (SNDRV_PCM_RATE_8000_384000 |
+					SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE |
+				    SNDRV_PCM_FMTBIT_S24_3LE |
+				    SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 384000,
+		},
+		.ops = &msm_fe_Multimedia_dai_ops,
+		.name = "MultiMedia22",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "MultiMedia23 Playback",
+			.aif_name = "MM_DL23",
+			.rates = (SNDRV_PCM_RATE_8000_384000 |
+					SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE |
+				    SNDRV_PCM_FMTBIT_S24_3LE |
+				    SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 384000,
+		},
+		.ops = &msm_fe_Multimedia_dai_ops,
+		.name = "MultiMedia23",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "MultiMedia24 Playback",
+			.aif_name = "MM_DL24",
+			.rates = (SNDRV_PCM_RATE_8000_384000 |
+					SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE |
+				    SNDRV_PCM_FMTBIT_S24_3LE |
+				    SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 384000,
+		},
+		.ops = &msm_fe_Multimedia_dai_ops,
+		.name = "MultiMedia24",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "MultiMedia25 Playback",
+			.aif_name = "MM_DL25",
+			.rates = (SNDRV_PCM_RATE_8000_384000 |
+					SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE |
+				    SNDRV_PCM_FMTBIT_S24_3LE |
+				    SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 384000,
+		},
+		.ops = &msm_fe_Multimedia_dai_ops,
+		.name = "MultiMedia25",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
+			.stream_name = "MultiMedia26 Playback",
+			.aif_name = "MM_DL26",
+			.rates = (SNDRV_PCM_RATE_8000_384000 |
+					SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE |
+				    SNDRV_PCM_FMTBIT_S24_3LE |
+				    SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 384000,
+		},
+		.ops = &msm_fe_Multimedia_dai_ops,
+		.compress_new = snd_soc_new_compress,
+		.name = "MultiMedia26",
+		.probe = fe_dai_probe,
+	},
+	{
+		.capture = {
+			.stream_name = "MultiMedia27 Capture",
+			.aif_name = "MM_UL27",
+			.rates = (SNDRV_PCM_RATE_8000_192000|
+					SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE |
+				    SNDRV_PCM_FMTBIT_S24_3LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 192000,
+		},
+		.ops = &msm_fe_Multimedia_dai_ops,
+		.compress_new = snd_soc_new_compress,
+		.name = "MultiMedia27",
+		.probe = fe_dai_probe,
+	},
+};
+
+static int msm_fe_dai_dev_probe(struct platform_device *pdev)
+{
+
+	dev_dbg(&pdev->dev, "%s: dev name %s\n", __func__,
+		dev_name(&pdev->dev));
+	return snd_soc_register_component(&pdev->dev, &msm_fe_dai_component,
+		msm_fe_dais, ARRAY_SIZE(msm_fe_dais));
+}
+
+static int msm_fe_dai_dev_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_component(&pdev->dev);
+	return 0;
+}
+
+static const struct of_device_id msm_dai_fe_dt_match[] = {
+	{.compatible = "qcom,msm-dai-fe"},
+	{}
+};
+
+static struct platform_driver msm_fe_dai_driver = {
+	.probe  = msm_fe_dai_dev_probe,
+	.remove = msm_fe_dai_dev_remove,
+	.driver = {
+		.name = "msm-dai-fe",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_dai_fe_dt_match,
+	},
+};
+
+static int __init msm_fe_dai_init(void)
+{
+	return platform_driver_register(&msm_fe_dai_driver);
+}
+module_init(msm_fe_dai_init);
+
+static void __exit msm_fe_dai_exit(void)
+{
+	platform_driver_unregister(&msm_fe_dai_driver);
+}
+module_exit(msm_fe_dai_exit);
+
+/* Module information */
+MODULE_DESCRIPTION("MSM Frontend DAI driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./msm-pcm-hostless.c linux-4.4.115-fbx/sound/soc/msm/msm-pcm-hostless.c
--- linux-4.4.115-fbx/sound/soc/msm./msm-pcm-hostless.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/msm-pcm-hostless.c	2019-10-29 09:26:26.141227624 +0100
@@ -0,0 +1,167 @@
+/* Copyright (c) 2011-2014, 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/pcm.h>
+#include <linux/qdsp6v2/apr.h>
+
+struct hostless_pdata {
+	struct work_struct msm_test_add_child_dev_work;
+	struct device *dev;
+};
+
+#define AUDIO_TEST_MOD_STRING_LEN 30
+
+static void msm_test_add_child_dev(struct work_struct *work)
+{
+	struct hostless_pdata *pdata;
+	struct platform_device *pdev;
+	struct device_node *node;
+	int ret;
+	char plat_dev_name[AUDIO_TEST_MOD_STRING_LEN];
+	int adsp_state;
+
+	pdata = container_of(work, struct hostless_pdata,
+			     msm_test_add_child_dev_work);
+	if (!pdata) {
+		pr_err("%s: Memory for pdata does not exist\n",
+			__func__);
+		return;
+	}
+	if (!pdata->dev) {
+		pr_err("%s: pdata dev is not initialized\n", __func__);
+		return;
+	}
+	if (!pdata->dev->of_node) {
+		dev_err(pdata->dev,
+			"%s: DT node for pdata does not exist\n", __func__);
+		return;
+	}
+
+	adsp_state = apr_get_subsys_state();
+	while (adsp_state != APR_SUBSYS_LOADED) {
+		dev_dbg(pdata->dev, "Adsp is not loaded yet %d\n",
+			adsp_state);
+		msleep(500);
+		adsp_state = apr_get_subsys_state();
+	}
+	msleep(1000);
+	for_each_child_of_node(pdata->dev->of_node, node) {
+		if (!strcmp(node->name, "audio_test_mod"))
+			strlcpy(plat_dev_name, "audio_test_mod",
+				(AUDIO_TEST_MOD_STRING_LEN - 1));
+		else
+			continue;
+
+		pdev = platform_device_alloc(plat_dev_name, -1);
+		if (!pdev) {
+			dev_err(pdata->dev, "%s: pdev memory alloc failed\n",
+				__func__);
+			ret = -ENOMEM;
+			goto err;
+		}
+		pdev->dev.parent = pdata->dev;
+		pdev->dev.of_node = node;
+
+		ret = platform_device_add(pdev);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"%s: Cannot add platform device\n",
+				__func__);
+			goto fail_pdev_add;
+		}
+	}
+	return;
+fail_pdev_add:
+	platform_device_put(pdev);
+err:
+	return;
+}
+
+static int msm_pcm_hostless_prepare(struct snd_pcm_substream *substream)
+{
+	if (!substream) {
+		pr_err("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	if (pm_qos_request_active(&substream->latency_pm_qos_req))
+		pm_qos_remove_request(&substream->latency_pm_qos_req);
+
+	return 0;
+}
+
+static struct snd_pcm_ops msm_pcm_hostless_ops = {
+	.prepare = msm_pcm_hostless_prepare
+};
+
+static struct snd_soc_platform_driver msm_soc_hostless_platform = {
+	.ops		= &msm_pcm_hostless_ops,
+};
+
+static int msm_pcm_hostless_probe(struct platform_device *pdev)
+{
+	struct hostless_pdata *pdata;
+
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+	pr_debug("%s: dev name %s\n", __func__, dev_name(&pdev->dev));
+	pdata->dev = &pdev->dev;
+	INIT_WORK(&pdata->msm_test_add_child_dev_work,
+		  msm_test_add_child_dev);
+	schedule_work(&pdata->msm_test_add_child_dev_work);
+	return snd_soc_register_platform(&pdev->dev,
+				   &msm_soc_hostless_platform);
+}
+
+static int msm_pcm_hostless_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_platform(&pdev->dev);
+	return 0;
+}
+
+static const struct of_device_id msm_pcm_hostless_dt_match[] = {
+	{.compatible = "qcom,msm-pcm-hostless"},
+	{}
+};
+
+static struct platform_driver msm_pcm_hostless_driver = {
+	.driver = {
+		.name = "msm-pcm-hostless",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_pcm_hostless_dt_match,
+	},
+	.probe = msm_pcm_hostless_probe,
+	.remove = msm_pcm_hostless_remove,
+};
+
+static int __init msm_soc_platform_init(void)
+{
+	return platform_driver_register(&msm_pcm_hostless_driver);
+}
+module_init(msm_soc_platform_init);
+
+static void __exit msm_soc_platform_exit(void)
+{
+	platform_driver_unregister(&msm_pcm_hostless_driver);
+}
+module_exit(msm_soc_platform_exit);
+
+MODULE_DESCRIPTION("Hostless platform driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/adsp_err.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/adsp_err.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/adsp_err.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/adsp_err.c	2019-01-22 16:16:29.623301827 +0100
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2016, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/errno.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <sound/apr_audio-v2.h>
+
+
+/* ERROR STRING */
+/* Success. The operation completed with no errors. */
+#define ADSP_EOK_STR          "ADSP_EOK"
+/* General failure. */
+#define ADSP_EFAILED_STR      "ADSP_EFAILED"
+/* Bad operation parameter. */
+#define ADSP_EBADPARAM_STR    "ADSP_EBADPARAM"
+/* Unsupported routine or operation. */
+#define ADSP_EUNSUPPORTED_STR "ADSP_EUNSUPPORTED"
+/* Unsupported version. */
+#define ADSP_EVERSION_STR     "ADSP_EVERSION"
+/* Unexpected problem encountered. */
+#define ADSP_EUNEXPECTED_STR  "ADSP_EUNEXPECTED"
+/* Unhandled problem occurred. */
+#define ADSP_EPANIC_STR       "ADSP_EPANIC"
+/* Unable to allocate resource. */
+#define ADSP_ENORESOURCE_STR  "ADSP_ENORESOURCE"
+/* Invalid handle. */
+#define ADSP_EHANDLE_STR      "ADSP_EHANDLE"
+/* Operation is already processed. */
+#define ADSP_EALREADY_STR     "ADSP_EALREADY"
+/* Operation is not ready to be processed. */
+#define ADSP_ENOTREADY_STR    "ADSP_ENOTREADY"
+/* Operation is pending completion. */
+#define ADSP_EPENDING_STR     "ADSP_EPENDING"
+/* Operation could not be accepted or processed. */
+#define ADSP_EBUSY_STR        "ADSP_EBUSY"
+/* Operation aborted due to an error. */
+#define ADSP_EABORTED_STR     "ADSP_EABORTED"
+/* Operation preempted by a higher priority. */
+#define ADSP_EPREEMPTED_STR   "ADSP_EPREEMPTED"
+/* Operation requests intervention to complete. */
+#define ADSP_ECONTINUE_STR    "ADSP_ECONTINUE"
+/* Operation requests immediate intervention to complete. */
+#define ADSP_EIMMEDIATE_STR   "ADSP_EIMMEDIATE"
+/* Operation is not implemented. */
+#define ADSP_ENOTIMPL_STR     "ADSP_ENOTIMPL"
+/* Operation needs more data or resources. */
+#define ADSP_ENEEDMORE_STR    "ADSP_ENEEDMORE"
+/* Operation does not have memory. */
+#define ADSP_ENOMEMORY_STR    "ADSP_ENOMEMORY"
+/* Item does not exist. */
+#define ADSP_ENOTEXIST_STR    "ADSP_ENOTEXIST"
+/* Unexpected error code. */
+#define ADSP_ERR_MAX_STR      "ADSP_ERR_MAX"
+
+#ifdef CONFIG_SND_SOC_QDSP_DEBUG
+static bool adsp_err_panic;
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *debugfs_adsp_err;
+
+static ssize_t adsp_err_debug_write(struct file *filp,
+	const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+	char cmd;
+
+	if (copy_from_user(&cmd, ubuf, 1))
+		return -EFAULT;
+
+	if (cmd == '0')
+		adsp_err_panic = false;
+	else
+		adsp_err_panic = true;
+
+	return cnt;
+}
+
+static const struct file_operations adsp_err_debug_ops = {
+	.write = adsp_err_debug_write,
+};
+#endif
+#endif
+
+struct adsp_err_code {
+	int		lnx_err_code;
+	char	*adsp_err_str;
+};
+
+
+static struct adsp_err_code adsp_err_code_info[ADSP_ERR_MAX+1] = {
+	{ 0, ADSP_EOK_STR},
+	{ -ENOTRECOVERABLE, ADSP_EFAILED_STR},
+	{ -EINVAL, ADSP_EBADPARAM_STR},
+	{ -ENOSYS, ADSP_EUNSUPPORTED_STR},
+	{ -ENOPROTOOPT, ADSP_EVERSION_STR},
+	{ -ENOTRECOVERABLE, ADSP_EUNEXPECTED_STR},
+	{ -ENOTRECOVERABLE, ADSP_EPANIC_STR},
+	{ -ENOSPC, ADSP_ENORESOURCE_STR},
+	{ -EBADR, ADSP_EHANDLE_STR},
+	{ -EALREADY, ADSP_EALREADY_STR},
+	{ -EPERM, ADSP_ENOTREADY_STR},
+	{ -EINPROGRESS, ADSP_EPENDING_STR},
+	{ -EBUSY, ADSP_EBUSY_STR},
+	{ -ECANCELED, ADSP_EABORTED_STR},
+	{ -EAGAIN, ADSP_EPREEMPTED_STR},
+	{ -EAGAIN, ADSP_ECONTINUE_STR},
+	{ -EAGAIN, ADSP_EIMMEDIATE_STR},
+	{ -EAGAIN, ADSP_ENOTIMPL_STR},
+	{ -ENODATA, ADSP_ENEEDMORE_STR},
+	{ -EADV, ADSP_ERR_MAX_STR},
+	{ -ENOMEM, ADSP_ENOMEMORY_STR},
+	{ -ENODEV, ADSP_ENOTEXIST_STR},
+	{ -EADV, ADSP_ERR_MAX_STR},
+};
+
+#ifdef CONFIG_SND_SOC_QDSP_DEBUG
+static inline void adsp_err_check_panic(u32 adsp_error)
+{
+	if (adsp_err_panic && adsp_error != ADSP_EALREADY)
+		panic("%s: encounter adsp_err=0x%x\n", __func__, adsp_error);
+}
+#else
+static inline void adsp_err_check_panic(u32 adsp_error) {}
+#endif
+
+int adsp_err_get_lnx_err_code(u32 adsp_error)
+{
+	adsp_err_check_panic(adsp_error);
+
+	if (adsp_error > ADSP_ERR_MAX)
+		return adsp_err_code_info[ADSP_ERR_MAX].lnx_err_code;
+	else
+		return adsp_err_code_info[adsp_error].lnx_err_code;
+}
+
+char *adsp_err_get_err_str(u32 adsp_error)
+{
+	if (adsp_error > ADSP_ERR_MAX)
+		return adsp_err_code_info[ADSP_ERR_MAX].adsp_err_str;
+	else
+		return adsp_err_code_info[adsp_error].adsp_err_str;
+}
+
+#if defined(CONFIG_SND_SOC_QDSP_DEBUG) && defined(CONFIG_DEBUG_FS)
+static int __init adsp_err_init(void)
+{
+
+
+	debugfs_adsp_err = debugfs_create_file("msm_adsp_audio_debug",
+					       S_IFREG | S_IRUGO, NULL, NULL,
+					       &adsp_err_debug_ops);
+
+	return 0;
+}
+
+device_initcall(adsp_err_init);
+#endif
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/audio_calibration.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/audio_calibration.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/audio_calibration.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/audio_calibration.c	2019-01-22 16:16:29.623301827 +0100
@@ -0,0 +1,628 @@
+/* Copyright (c) 2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/mutex.h>
+#include <linux/msm_ion.h>
+#include <linux/msm_audio_ion.h>
+#include <sound/audio_calibration.h>
+#include <sound/audio_cal_utils.h>
+
+struct audio_cal_client_info {
+	struct list_head		list;
+	struct audio_cal_callbacks	*callbacks;
+};
+
+struct audio_cal_info {
+	struct mutex			common_lock;
+	struct mutex			cal_mutex[MAX_CAL_TYPES];
+	struct list_head		client_info[MAX_CAL_TYPES];
+	int				ref_count;
+};
+
+static struct audio_cal_info	audio_cal;
+
+
+static bool callbacks_are_equal(struct audio_cal_callbacks *callback1,
+				struct audio_cal_callbacks *callback2)
+{
+	bool				ret = true;
+	struct audio_cal_callbacks	*call1 = callback1;
+	struct audio_cal_callbacks	*call2 = callback2;
+	pr_debug("%s\n", __func__);
+
+	if ((call1 == NULL) && (call2 == NULL))
+		ret = true;
+	else if ((call1 == NULL) || (call2 == NULL))
+		ret = false;
+	else if ((call1->alloc != call2->alloc) ||
+		(call1->dealloc != call2->dealloc) ||
+		(call1->pre_cal != call2->pre_cal) ||
+		(call1->set_cal != call2->set_cal) ||
+		(call1->get_cal != call2->get_cal) ||
+		(call1->post_cal != call2->post_cal))
+		ret = false;
+	return ret;
+}
+
+int audio_cal_deregister(int num_cal_types,
+			 struct audio_cal_reg *reg_data)
+{
+	int				ret = 0;
+	int				i = 0;
+	struct list_head		*ptr, *next;
+	struct audio_cal_client_info	*client_info_node = NULL;
+	pr_debug("%s\n", __func__);
+
+	if (reg_data == NULL) {
+		pr_err("%s: reg_data is NULL!\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	} else if ((num_cal_types <= 0) ||
+		(num_cal_types > MAX_CAL_TYPES)) {
+		pr_err("%s: num_cal_types of %d is Invalid!\n",
+			__func__, num_cal_types);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	for (; i < num_cal_types; i++) {
+		if ((reg_data[i].cal_type < 0) ||
+			(reg_data[i].cal_type >= MAX_CAL_TYPES)) {
+			pr_err("%s: cal type %d at index %d is Invalid!\n",
+				__func__, reg_data[i].cal_type, i);
+			ret = -EINVAL;
+			continue;
+		}
+
+		mutex_lock(&audio_cal.cal_mutex[reg_data[i].cal_type]);
+		list_for_each_safe(ptr, next,
+			&audio_cal.client_info[reg_data[i].cal_type]) {
+
+			client_info_node = list_entry(ptr,
+				struct audio_cal_client_info, list);
+			if (callbacks_are_equal(client_info_node->callbacks,
+				&reg_data[i].callbacks)) {
+				list_del(&client_info_node->list);
+				kfree(client_info_node->callbacks);
+				client_info_node->callbacks = NULL;
+				kfree(client_info_node);
+				client_info_node = NULL;
+				break;
+			}
+		}
+		mutex_unlock(&audio_cal.cal_mutex[reg_data[i].cal_type]);
+	}
+done:
+	return ret;
+}
+
+
+int audio_cal_register(int num_cal_types,
+			 struct audio_cal_reg *reg_data)
+{
+	int				ret = 0;
+	int				i = 0;
+	struct audio_cal_client_info	*client_info_node = NULL;
+	struct audio_cal_callbacks	*callback_node = NULL;
+	pr_debug("%s\n", __func__);
+
+	if (reg_data == NULL) {
+		pr_err("%s: callbacks are NULL!\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	} else if ((num_cal_types <= 0) ||
+		(num_cal_types > MAX_CAL_TYPES)) {
+		pr_err("%s: num_cal_types of %d is Invalid!\n",
+			__func__, num_cal_types);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	for (; i < num_cal_types; i++) {
+		if ((reg_data[i].cal_type < 0) ||
+			(reg_data[i].cal_type >= MAX_CAL_TYPES)) {
+			pr_err("%s: cal type %d at index %d is Invalid!\n",
+				__func__, reg_data[i].cal_type, i);
+			ret = -EINVAL;
+			goto err;
+		}
+
+		client_info_node = kmalloc(sizeof(*client_info_node),
+			GFP_KERNEL);
+		if (client_info_node == NULL) {
+			pr_err("%s: could not allocated client_info_node!\n",
+				__func__);
+			ret = -ENOMEM;
+			goto err;
+		}
+		INIT_LIST_HEAD(&client_info_node->list);
+
+		callback_node = kmalloc(sizeof(*callback_node),
+			GFP_KERNEL);
+		if (callback_node == NULL) {
+			pr_err("%s: could not allocated callback_node!\n",
+				__func__);
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		memcpy(callback_node, &reg_data[i].callbacks,
+			sizeof(*callback_node));
+		client_info_node->callbacks = callback_node;
+
+		mutex_lock(&audio_cal.cal_mutex[reg_data[i].cal_type]);
+		list_add_tail(&client_info_node->list,
+			&audio_cal.client_info[reg_data[i].cal_type]);
+		mutex_unlock(&audio_cal.cal_mutex[reg_data[i].cal_type]);
+	}
+done:
+	return ret;
+err:
+	audio_cal_deregister(num_cal_types, reg_data);
+	return ret;
+}
+
+static int call_allocs(int32_t cal_type,
+				size_t cal_type_size, void *data)
+{
+	int				ret = 0;
+	int				ret2 = 0;
+	struct list_head		*ptr, *next;
+	struct audio_cal_client_info	*client_info_node = NULL;
+	pr_debug("%s\n", __func__);
+
+	list_for_each_safe(ptr, next,
+			&audio_cal.client_info[cal_type]) {
+
+		client_info_node = list_entry(ptr,
+			struct audio_cal_client_info, list);
+
+		if (client_info_node->callbacks->alloc == NULL)
+			continue;
+
+		ret2 = client_info_node->callbacks->
+			alloc(cal_type, cal_type_size, data);
+		if (ret2 < 0) {
+			pr_err("%s: alloc failed!\n", __func__);
+			ret = ret2;
+		}
+	}
+	return ret;
+}
+
+static int call_deallocs(int32_t cal_type,
+				size_t cal_type_size, void *data)
+{
+	int				ret = 0;
+	int				ret2 = 0;
+	struct list_head		*ptr, *next;
+	struct audio_cal_client_info	*client_info_node = NULL;
+	pr_debug("%s cal type %d\n", __func__, cal_type);
+
+	list_for_each_safe(ptr, next,
+			&audio_cal.client_info[cal_type]) {
+
+		client_info_node = list_entry(ptr,
+			struct audio_cal_client_info, list);
+
+		if (client_info_node->callbacks->dealloc == NULL)
+			continue;
+
+		ret2 = client_info_node->callbacks->
+			dealloc(cal_type, cal_type_size, data);
+		if (ret2 < 0) {
+			pr_err("%s: dealloc failed!\n", __func__);
+			ret = ret2;
+		}
+	}
+	return ret;
+}
+
+static int call_pre_cals(int32_t cal_type,
+				size_t cal_type_size, void *data)
+{
+	int				ret = 0;
+	int				ret2 = 0;
+	struct list_head		*ptr, *next;
+	struct audio_cal_client_info	*client_info_node = NULL;
+	pr_debug("%s cal type %d\n", __func__, cal_type);
+
+	list_for_each_safe(ptr, next,
+			&audio_cal.client_info[cal_type]) {
+
+		client_info_node = list_entry(ptr,
+			struct audio_cal_client_info, list);
+
+		if (client_info_node->callbacks->pre_cal == NULL)
+			continue;
+
+		ret2 = client_info_node->callbacks->
+			pre_cal(cal_type, cal_type_size, data);
+		if (ret2 < 0) {
+			pr_err("%s: pre_cal failed!\n", __func__);
+			ret = ret2;
+		}
+	}
+	return ret;
+}
+
+static int call_post_cals(int32_t cal_type,
+				size_t cal_type_size, void *data)
+{
+	int				ret = 0;
+	int				ret2 = 0;
+	struct list_head		*ptr, *next;
+	struct audio_cal_client_info	*client_info_node = NULL;
+	pr_debug("%s cal type %d\n", __func__, cal_type);
+
+	list_for_each_safe(ptr, next,
+			&audio_cal.client_info[cal_type]) {
+
+		client_info_node = list_entry(ptr,
+			struct audio_cal_client_info, list);
+
+		if (client_info_node->callbacks->post_cal == NULL)
+			continue;
+
+		ret2 = client_info_node->callbacks->
+			post_cal(cal_type, cal_type_size, data);
+		if (ret2 < 0) {
+			pr_err("%s: post_cal failed!\n", __func__);
+			ret = ret2;
+		}
+	}
+	return ret;
+}
+
+static int call_set_cals(int32_t cal_type,
+				size_t cal_type_size, void *data)
+{
+	int				ret = 0;
+	int				ret2 = 0;
+	struct list_head		*ptr, *next;
+	struct audio_cal_client_info	*client_info_node = NULL;
+	pr_debug("%s cal type %d\n", __func__, cal_type);
+
+	list_for_each_safe(ptr, next,
+			&audio_cal.client_info[cal_type]) {
+
+		client_info_node = list_entry(ptr,
+			struct audio_cal_client_info, list);
+
+		if (client_info_node->callbacks->set_cal == NULL)
+			continue;
+
+		ret2 = client_info_node->callbacks->
+			set_cal(cal_type, cal_type_size, data);
+		if (ret2 < 0) {
+			pr_err("%s: set_cal failed!\n", __func__);
+			ret = ret2;
+		}
+	}
+	return ret;
+}
+
+static int call_get_cals(int32_t cal_type,
+				size_t cal_type_size, void *data)
+{
+	int				ret = 0;
+	int				ret2 = 0;
+	struct list_head		*ptr, *next;
+	struct audio_cal_client_info	*client_info_node = NULL;
+	pr_debug("%s cal type %d\n", __func__, cal_type);
+
+	list_for_each_safe(ptr, next,
+			&audio_cal.client_info[cal_type]) {
+
+		client_info_node = list_entry(ptr,
+			struct audio_cal_client_info, list);
+
+		if (client_info_node->callbacks->get_cal == NULL)
+			continue;
+
+		ret2 = client_info_node->callbacks->
+			get_cal(cal_type, cal_type_size, data);
+		if (ret2 < 0) {
+			pr_err("%s: get_cal failed!\n", __func__);
+			ret = ret2;
+		}
+	}
+	return ret;
+}
+
+static int audio_cal_open(struct inode *inode, struct file *f)
+{
+	int ret = 0;
+	pr_debug("%s\n", __func__);
+
+	mutex_lock(&audio_cal.common_lock);
+	audio_cal.ref_count++;
+	mutex_unlock(&audio_cal.common_lock);
+
+	return ret;
+}
+
+static void dealloc_all_clients(void)
+{
+	int				i = 0;
+	struct audio_cal_type_dealloc	dealloc_data;
+	pr_debug("%s\n", __func__);
+
+	dealloc_data.cal_hdr.version = VERSION_0_0;
+	dealloc_data.cal_hdr.buffer_number = ALL_CAL_BLOCKS;
+	dealloc_data.cal_data.mem_handle = -1;
+
+	for (; i < MAX_CAL_TYPES; i++)
+		call_deallocs(i, sizeof(dealloc_data), &dealloc_data);
+}
+
+static int audio_cal_release(struct inode *inode, struct file *f)
+{
+	int ret = 0;
+	pr_debug("%s\n", __func__);
+
+	mutex_lock(&audio_cal.common_lock);
+	audio_cal.ref_count--;
+	if (audio_cal.ref_count <= 0) {
+		audio_cal.ref_count = 0;
+		dealloc_all_clients();
+	}
+	mutex_unlock(&audio_cal.common_lock);
+
+	return ret;
+}
+
+static long audio_cal_shared_ioctl(struct file *file, unsigned int cmd,
+							void __user *arg)
+{
+	int				ret = 0;
+	int32_t				size;
+	struct audio_cal_basic		*data = NULL;
+	pr_debug("%s\n", __func__);
+
+	switch (cmd) {
+	case AUDIO_ALLOCATE_CALIBRATION:
+	case AUDIO_DEALLOCATE_CALIBRATION:
+	case AUDIO_PREPARE_CALIBRATION:
+	case AUDIO_SET_CALIBRATION:
+	case AUDIO_GET_CALIBRATION:
+	case AUDIO_POST_CALIBRATION:
+		break;
+	default:
+		pr_err("%s: ioctl not found!\n", __func__);
+		ret = -EFAULT;
+		goto done;
+	}
+
+	if (copy_from_user(&size, (void *)arg, sizeof(size))) {
+		pr_err("%s: Could not copy size value from user\n", __func__);
+		ret = -EFAULT;
+		goto done;
+	} else if ((size < sizeof(struct audio_cal_basic))
+		|| (size > MAX_IOCTL_CMD_SIZE)) {
+		pr_err("%s: Invalid size sent to driver: %d, max size is %d, min size is %zd\n",
+			__func__, size, MAX_IOCTL_CMD_SIZE,
+			sizeof(struct audio_cal_basic));
+		ret = -EINVAL;
+		goto done;
+	}
+
+	data = kmalloc(size, GFP_KERNEL);
+	if (data == NULL) {
+		pr_err("%s: Could not allocate memory of size %d for ioctl\n",
+			__func__, size);
+		ret = -ENOMEM;
+		goto done;
+	} else if (copy_from_user(data, (void *)arg, size)) {
+		pr_err("%s: Could not copy data from user\n",
+			__func__);
+		ret = -EFAULT;
+		goto done;
+	} else if ((data->hdr.cal_type < 0) ||
+		(data->hdr.cal_type >= MAX_CAL_TYPES)) {
+		pr_err("%s: cal type %d is Invalid!\n",
+			__func__, data->hdr.cal_type);
+		ret = -EINVAL;
+		goto done;
+	} else if ((data->hdr.cal_type_size <
+		sizeof(struct audio_cal_type_basic)) ||
+		(data->hdr.cal_type_size >
+		get_user_cal_type_size(data->hdr.cal_type))) {
+		pr_err("%s: cal type size %d is Invalid! Max is %zd!\n",
+			__func__, data->hdr.cal_type_size,
+			get_user_cal_type_size(data->hdr.cal_type));
+		ret = -EINVAL;
+		goto done;
+	} else if (data->cal_type.cal_hdr.buffer_number < 0) {
+		pr_err("%s: cal type %d Invalid buffer number %d!\n",
+			__func__, data->hdr.cal_type,
+			data->cal_type.cal_hdr.buffer_number);
+		ret = -EINVAL;
+		goto done;
+	} else if ((data->hdr.cal_type_size + sizeof(data->hdr)) > size) {
+		pr_err("%s: cal type hdr size %zd + cal type size %d is greater than user buffer size %d\n",
+			__func__, sizeof(data->hdr), data->hdr.cal_type_size,
+			size);
+		ret = -EFAULT;
+		goto done;
+	}
+
+
+	mutex_lock(&audio_cal.cal_mutex[data->hdr.cal_type]);
+
+	switch (cmd) {
+	case AUDIO_ALLOCATE_CALIBRATION:
+		ret = call_allocs(data->hdr.cal_type,
+			data->hdr.cal_type_size, &data->cal_type);
+		break;
+	case AUDIO_DEALLOCATE_CALIBRATION:
+		ret = call_deallocs(data->hdr.cal_type,
+			data->hdr.cal_type_size, &data->cal_type);
+		break;
+	case AUDIO_PREPARE_CALIBRATION:
+		ret = call_pre_cals(data->hdr.cal_type,
+			data->hdr.cal_type_size, &data->cal_type);
+		break;
+	case AUDIO_SET_CALIBRATION:
+		ret = call_set_cals(data->hdr.cal_type,
+			data->hdr.cal_type_size, &data->cal_type);
+		break;
+	case AUDIO_GET_CALIBRATION:
+		ret = call_get_cals(data->hdr.cal_type,
+			data->hdr.cal_type_size, &data->cal_type);
+		break;
+	case AUDIO_POST_CALIBRATION:
+		ret = call_post_cals(data->hdr.cal_type,
+			data->hdr.cal_type_size, &data->cal_type);
+		break;
+	}
+
+	if (cmd == AUDIO_GET_CALIBRATION) {
+		if (data->hdr.cal_type_size == 0)
+			goto unlock;
+		if (data == NULL)
+			goto unlock;
+		if (copy_to_user(arg, data,
+			sizeof(data->hdr) + data->hdr.cal_type_size)) {
+			pr_err("%s: Could not copy cal type to user\n",
+				__func__);
+			ret = -EFAULT;
+			goto unlock;
+		}
+	}
+
+unlock:
+	mutex_unlock(&audio_cal.cal_mutex[data->hdr.cal_type]);
+done:
+	kfree(data);
+	return ret;
+}
+
+static long audio_cal_ioctl(struct file *f,
+		unsigned int cmd, unsigned long arg)
+{
+	return audio_cal_shared_ioctl(f, cmd, (void __user *)arg);
+}
+
+#ifdef CONFIG_COMPAT
+
+#define AUDIO_ALLOCATE_CALIBRATION32	_IOWR(CAL_IOCTL_MAGIC, \
+							200, compat_uptr_t)
+#define AUDIO_DEALLOCATE_CALIBRATION32	_IOWR(CAL_IOCTL_MAGIC, \
+							201, compat_uptr_t)
+#define AUDIO_PREPARE_CALIBRATION32	_IOWR(CAL_IOCTL_MAGIC, \
+							202, compat_uptr_t)
+#define AUDIO_SET_CALIBRATION32		_IOWR(CAL_IOCTL_MAGIC, \
+							203, compat_uptr_t)
+#define AUDIO_GET_CALIBRATION32		_IOWR(CAL_IOCTL_MAGIC, \
+							204, compat_uptr_t)
+#define AUDIO_POST_CALIBRATION32	_IOWR(CAL_IOCTL_MAGIC, \
+							205, compat_uptr_t)
+
+static long audio_cal_compat_ioctl(struct file *f,
+		unsigned int cmd, unsigned long arg)
+{
+	unsigned int cmd64;
+	int ret = 0;
+
+	switch (cmd) {
+	case AUDIO_ALLOCATE_CALIBRATION32:
+		cmd64 = AUDIO_ALLOCATE_CALIBRATION;
+		break;
+	case AUDIO_DEALLOCATE_CALIBRATION32:
+		cmd64 = AUDIO_DEALLOCATE_CALIBRATION;
+		break;
+	case AUDIO_PREPARE_CALIBRATION32:
+		cmd64 = AUDIO_PREPARE_CALIBRATION;
+		break;
+	case AUDIO_SET_CALIBRATION32:
+		cmd64 = AUDIO_SET_CALIBRATION;
+		break;
+	case AUDIO_GET_CALIBRATION32:
+		cmd64 = AUDIO_GET_CALIBRATION;
+		break;
+	case AUDIO_POST_CALIBRATION32:
+		cmd64 = AUDIO_POST_CALIBRATION;
+		break;
+	default:
+		pr_err("%s: ioctl not found!\n", __func__);
+		ret = -EFAULT;
+		goto done;
+	}
+
+	ret = audio_cal_shared_ioctl(f, cmd64, compat_ptr(arg));
+done:
+	return ret;
+}
+#endif
+
+static const struct file_operations audio_cal_fops = {
+	.owner = THIS_MODULE,
+	.open = audio_cal_open,
+	.release = audio_cal_release,
+	.unlocked_ioctl = audio_cal_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl =   audio_cal_compat_ioctl,
+#endif
+};
+
+struct miscdevice audio_cal_misc = {
+	.minor	= MISC_DYNAMIC_MINOR,
+	.name	= "msm_audio_cal",
+	.fops	= &audio_cal_fops,
+};
+
+static int __init audio_cal_init(void)
+{
+	int i = 0;
+	pr_debug("%s\n", __func__);
+
+	memset(&audio_cal, 0, sizeof(audio_cal));
+	mutex_init(&audio_cal.common_lock);
+	for (; i < MAX_CAL_TYPES; i++) {
+		INIT_LIST_HEAD(&audio_cal.client_info[i]);
+		mutex_init(&audio_cal.cal_mutex[i]);
+	}
+
+	return misc_register(&audio_cal_misc);
+}
+
+static void __exit audio_cal_exit(void)
+{
+	int				i = 0;
+	struct list_head		*ptr, *next;
+	struct audio_cal_client_info	*client_info_node;
+
+	for (; i < MAX_CAL_TYPES; i++) {
+		list_for_each_safe(ptr, next,
+			&audio_cal.client_info[i]) {
+			client_info_node = list_entry(ptr,
+				struct audio_cal_client_info, list);
+			list_del(&client_info_node->list);
+			kfree(client_info_node->callbacks);
+			client_info_node->callbacks = NULL;
+			kfree(client_info_node);
+			client_info_node = NULL;
+		}
+	}
+}
+
+subsys_initcall(audio_cal_init);
+module_exit(audio_cal_exit);
+
+MODULE_DESCRIPTION("SoC QDSP6v2 Audio Calibration driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/audio_cal_utils.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/audio_cal_utils.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/audio_cal_utils.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/audio_cal_utils.c	2019-10-29 09:26:26.145227663 +0100
@@ -0,0 +1,976 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/mutex.h>
+#include <sound/audio_cal_utils.h>
+
+static int unmap_memory(struct cal_type_data *cal_type,
+			struct cal_block_data *cal_block);
+
+size_t get_cal_info_size(int32_t cal_type)
+{
+	size_t size = 0;
+
+	switch (cal_type) {
+	case CVP_VOC_RX_TOPOLOGY_CAL_TYPE:
+		size = sizeof(struct audio_cal_info_voc_top);
+		break;
+	case CVP_VOC_TX_TOPOLOGY_CAL_TYPE:
+		size = sizeof(struct audio_cal_info_voc_top);
+		break;
+	case CVP_VOCPROC_STATIC_CAL_TYPE:
+		size = sizeof(struct audio_cal_info_vocproc);
+		break;
+	case CVP_VOCPROC_DYNAMIC_CAL_TYPE:
+		size = sizeof(struct audio_cal_info_vocvol);
+		break;
+	case CVS_VOCSTRM_STATIC_CAL_TYPE:
+		size = 0;
+		break;
+	case CVP_VOCDEV_CFG_CAL_TYPE:
+		size = sizeof(struct audio_cal_info_vocdev_cfg);
+		break;
+	case CVP_VOCPROC_STATIC_COL_CAL_TYPE:
+		size = sizeof(struct audio_cal_info_voc_col);
+		break;
+	case CVP_VOCPROC_DYNAMIC_COL_CAL_TYPE:
+		size = sizeof(struct audio_cal_info_voc_col);
+		break;
+	case CVS_VOCSTRM_STATIC_COL_CAL_TYPE:
+		size = sizeof(struct audio_cal_info_voc_col);
+		break;
+	case ADM_TOPOLOGY_CAL_TYPE:
+	case ADM_LSM_TOPOLOGY_CAL_TYPE:
+		size = sizeof(struct audio_cal_info_adm_top);
+		break;
+	case ADM_CUST_TOPOLOGY_CAL_TYPE:
+	case CORE_CUSTOM_TOPOLOGIES_CAL_TYPE:
+		size = 0;
+		break;
+	case ADM_AUDPROC_CAL_TYPE:
+	case ADM_LSM_AUDPROC_CAL_TYPE:
+		size = sizeof(struct audio_cal_info_audproc);
+		break;
+	case ADM_AUDVOL_CAL_TYPE:
+	case ADM_RTAC_AUDVOL_CAL_TYPE:
+		size = sizeof(struct audio_cal_info_audvol);
+		break;
+	case ASM_TOPOLOGY_CAL_TYPE:
+		size = sizeof(struct audio_cal_info_asm_top);
+		break;
+	case ASM_CUST_TOPOLOGY_CAL_TYPE:
+		size = 0;
+		break;
+	case ASM_AUDSTRM_CAL_TYPE:
+		size = sizeof(struct audio_cal_info_audstrm);
+		break;
+	case AFE_TOPOLOGY_CAL_TYPE:
+	case AFE_LSM_TOPOLOGY_CAL_TYPE:
+		size = sizeof(struct audio_cal_info_afe_top);
+		break;
+	case AFE_CUST_TOPOLOGY_CAL_TYPE:
+		size = 0;
+		break;
+	case AFE_COMMON_RX_CAL_TYPE:
+		size = sizeof(struct audio_cal_info_afe);
+		break;
+	case AFE_COMMON_TX_CAL_TYPE:
+	case AFE_LSM_TX_CAL_TYPE:
+		size = sizeof(struct audio_cal_info_afe);
+		break;
+	case AFE_FB_SPKR_PROT_CAL_TYPE:
+		size = sizeof(struct audio_cal_info_spk_prot_cfg);
+		break;
+	case AFE_FB_SPKR_PROT_TH_VI_CAL_TYPE:
+		/*
+		 * Since get and set parameter structures are different in size
+		 * use the maximum size of get and set parameter structure
+		 */
+		size = max(sizeof(struct audio_cal_info_sp_th_vi_ftm_cfg),
+			   sizeof(struct audio_cal_info_sp_th_vi_param));
+		break;
+	case AFE_FB_SPKR_PROT_EX_VI_CAL_TYPE:
+		/*
+		 * Since get and set parameter structures are different in size
+		 * use the maximum size of get and set parameter structure
+		 */
+		size = max(sizeof(struct audio_cal_info_sp_ex_vi_ftm_cfg),
+			   sizeof(struct audio_cal_info_sp_ex_vi_param));
+		break;
+	case AFE_ANC_CAL_TYPE:
+		size = 0;
+		break;
+	case AFE_AANC_CAL_TYPE:
+		size = sizeof(struct audio_cal_info_aanc);
+		break;
+	case AFE_HW_DELAY_CAL_TYPE:
+		size = sizeof(struct audio_cal_info_hw_delay);
+		break;
+	case AFE_SIDETONE_CAL_TYPE:
+		size = sizeof(struct audio_cal_info_sidetone);
+		break;
+	case AFE_SIDETONE_IIR_CAL_TYPE:
+		size = sizeof(struct audio_cal_info_sidetone_iir);
+		break;
+	case LSM_CUST_TOPOLOGY_CAL_TYPE:
+		size = 0;
+		break;
+	case LSM_TOPOLOGY_CAL_TYPE:
+		size = sizeof(struct audio_cal_info_lsm_top);
+		break;
+	case ULP_LSM_TOPOLOGY_ID_CAL_TYPE:
+		size = sizeof(struct audio_cal_info_lsm_top);
+		break;
+	case LSM_CAL_TYPE:
+		size = sizeof(struct audio_cal_info_lsm);
+		break;
+	case ADM_RTAC_INFO_CAL_TYPE:
+		size = 0;
+		break;
+	case VOICE_RTAC_INFO_CAL_TYPE:
+		size = 0;
+		break;
+	case ADM_RTAC_APR_CAL_TYPE:
+		size = 0;
+		break;
+	case ASM_RTAC_APR_CAL_TYPE:
+		size = 0;
+		break;
+	case VOICE_RTAC_APR_CAL_TYPE:
+		size = 0;
+		break;
+	case MAD_CAL_TYPE:
+		size = 0;
+		break;
+	case ULP_AFE_CAL_TYPE:
+		size = sizeof(struct audio_cal_info_afe);
+		break;
+	case ULP_LSM_CAL_TYPE:
+		size = sizeof(struct audio_cal_info_lsm);
+		break;
+	case AUDIO_CORE_METAINFO_CAL_TYPE:
+		size = sizeof(struct audio_cal_info_metainfo);
+		break;
+	case SRS_TRUMEDIA_CAL_TYPE:
+		size = 0;
+		break;
+	default:
+		pr_err("%s:Invalid cal type %d!",
+			__func__, cal_type);
+	}
+	return size;
+}
+
+size_t get_user_cal_type_size(int32_t cal_type)
+{
+	size_t size = 0;
+
+	switch (cal_type) {
+	case CVP_VOC_RX_TOPOLOGY_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_voc_top);
+		break;
+	case CVP_VOC_TX_TOPOLOGY_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_voc_top);
+		break;
+	case CVP_VOCPROC_STATIC_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_vocproc);
+		break;
+	case CVP_VOCPROC_DYNAMIC_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_vocvol);
+		break;
+	case CVS_VOCSTRM_STATIC_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_basic);
+		break;
+	case CVP_VOCDEV_CFG_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_vocdev_cfg);
+		break;
+	case CVP_VOCPROC_STATIC_COL_CAL_TYPE:
+	case CVP_VOCPROC_DYNAMIC_COL_CAL_TYPE:
+	case CVS_VOCSTRM_STATIC_COL_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_voc_col);
+		break;
+	case ADM_TOPOLOGY_CAL_TYPE:
+	case ADM_LSM_TOPOLOGY_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_adm_top);
+		break;
+	case ADM_CUST_TOPOLOGY_CAL_TYPE:
+	case CORE_CUSTOM_TOPOLOGIES_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_basic);
+		break;
+	case ADM_AUDPROC_CAL_TYPE:
+	case ADM_LSM_AUDPROC_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_audproc);
+		break;
+	case ADM_AUDVOL_CAL_TYPE:
+	case ADM_RTAC_AUDVOL_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_audvol);
+		break;
+	case ASM_TOPOLOGY_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_asm_top);
+		break;
+	case ASM_CUST_TOPOLOGY_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_basic);
+		break;
+	case ASM_AUDSTRM_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_audstrm);
+		break;
+	case AFE_TOPOLOGY_CAL_TYPE:
+	case AFE_LSM_TOPOLOGY_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_afe_top);
+		break;
+	case AFE_CUST_TOPOLOGY_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_basic);
+		break;
+	case AFE_COMMON_RX_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_afe);
+		break;
+	case AFE_COMMON_TX_CAL_TYPE:
+	case AFE_LSM_TX_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_afe);
+		break;
+	case AFE_FB_SPKR_PROT_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_fb_spk_prot_cfg);
+		break;
+	case AFE_FB_SPKR_PROT_TH_VI_CAL_TYPE:
+		/*
+		 * Since get and set parameter structures are different in size
+		 * use the maximum size of get and set parameter structure
+		 */
+		size = max(sizeof(struct audio_cal_type_sp_th_vi_ftm_cfg),
+			   sizeof(struct audio_cal_type_sp_th_vi_param));
+		break;
+	case AFE_FB_SPKR_PROT_EX_VI_CAL_TYPE:
+		/*
+		 * Since get and set parameter structures are different in size
+		 * use the maximum size of get and set parameter structure
+		 */
+		size = max(sizeof(struct audio_cal_type_sp_ex_vi_ftm_cfg),
+			   sizeof(struct audio_cal_type_sp_ex_vi_param));
+		break;
+	case AFE_ANC_CAL_TYPE:
+		size = 0;
+		break;
+	case AFE_AANC_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_aanc);
+		break;
+	case AFE_HW_DELAY_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_hw_delay);
+		break;
+	case AFE_SIDETONE_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_sidetone);
+		break;
+	case AFE_SIDETONE_IIR_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_sidetone_iir);
+		break;
+	case LSM_CUST_TOPOLOGY_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_basic);
+		break;
+	case LSM_TOPOLOGY_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_lsm_top);
+		break;
+	case ULP_LSM_TOPOLOGY_ID_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_lsm_top);
+		break;
+	case LSM_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_lsm);
+		break;
+	case ADM_RTAC_INFO_CAL_TYPE:
+		size = 0;
+		break;
+	case VOICE_RTAC_INFO_CAL_TYPE:
+		size = 0;
+		break;
+	case ADM_RTAC_APR_CAL_TYPE:
+		size = 0;
+		break;
+	case ASM_RTAC_APR_CAL_TYPE:
+		size = 0;
+		break;
+	case VOICE_RTAC_APR_CAL_TYPE:
+		size = 0;
+		break;
+	case MAD_CAL_TYPE:
+		size = 0;
+		break;
+	case ULP_AFE_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_afe);
+		break;
+	case ULP_LSM_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_lsm);
+		break;
+	case AUDIO_CORE_METAINFO_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_metainfo);
+		break;
+	case SRS_TRUMEDIA_CAL_TYPE:
+		size = 0;
+		break;
+	default:
+		pr_err("%s:Invalid cal type %d!",
+			__func__, cal_type);
+	}
+	return size;
+}
+
+int32_t cal_utils_get_cal_type_version(void *cal_type_data)
+{
+	struct audio_cal_type_basic *data = NULL;
+
+	data = (struct audio_cal_type_basic *)cal_type_data;
+
+	return data->cal_hdr.version;
+}
+
+static struct cal_type_data *create_cal_type_data(
+				struct cal_type_info *info)
+{
+	struct cal_type_data	*cal_type = NULL;
+
+	if ((info->reg.cal_type < 0) ||
+		(info->reg.cal_type >= MAX_CAL_TYPES)) {
+		pr_err("%s: cal type %d is Invalid!\n",
+			__func__, info->reg.cal_type);
+		goto done;
+	}
+
+	if (info->cal_util_callbacks.match_block == NULL) {
+		pr_err("%s: cal type %d no method to match blocks!\n",
+			__func__, info->reg.cal_type);
+		goto done;
+	}
+
+	cal_type = kmalloc(sizeof(*cal_type), GFP_KERNEL);
+	if (cal_type == NULL) {
+		pr_err("%s: could not allocate cal_type!\n", __func__);
+		goto done;
+	}
+	INIT_LIST_HEAD(&cal_type->cal_blocks);
+	mutex_init(&cal_type->lock);
+	memcpy(&cal_type->info, info,
+		sizeof(cal_type->info));
+done:
+	return cal_type;
+}
+
+int cal_utils_create_cal_types(int num_cal_types,
+			struct cal_type_data **cal_type,
+			struct cal_type_info *info)
+{
+	int				ret = 0;
+	int				i;
+	pr_debug("%s\n", __func__);
+
+	if (cal_type == NULL) {
+		pr_err("%s: cal_type is NULL!\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	} else if (info == NULL) {
+		pr_err("%s: info is NULL!\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	} else if ((num_cal_types <= 0) ||
+		(num_cal_types > MAX_CAL_TYPES)) {
+		pr_err("%s: num_cal_types of %d is Invalid!\n",
+			__func__, num_cal_types);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	for (i = 0; i < num_cal_types; i++) {
+		if ((info[i].reg.cal_type < 0) ||
+			(info[i].reg.cal_type >= MAX_CAL_TYPES)) {
+			pr_err("%s: cal type %d at index %d is Invalid!\n",
+				__func__, info[i].reg.cal_type, i);
+			ret = -EINVAL;
+			goto done;
+		}
+
+		cal_type[i] = create_cal_type_data(&info[i]);
+		if (cal_type[i] == NULL) {
+			pr_err("%s: Could not allocate cal_type of index %d!\n",
+				__func__, i);
+			ret = -EINVAL;
+			goto done;
+		}
+
+		ret = audio_cal_register(1, &info[i].reg);
+		if (ret < 0) {
+			pr_err("%s: audio_cal_register failed, ret = %d!\n",
+				__func__, ret);
+			ret = -EINVAL;
+			goto done;
+		}
+		pr_debug("%s: cal type %d at index %d!\n",
+			__func__, info[i].reg.cal_type, i);
+	}
+done:
+	return ret;
+}
+
+static void delete_cal_block(struct cal_block_data *cal_block)
+{
+	pr_debug("%s\n", __func__);
+
+	if (cal_block == NULL)
+		goto done;
+
+	list_del(&cal_block->list);
+	kfree(cal_block->client_info);
+	cal_block->client_info = NULL;
+	kfree(cal_block->cal_info);
+	cal_block->cal_info = NULL;
+	if (cal_block->map_data.ion_client  != NULL) {
+		msm_audio_ion_free(cal_block->map_data.ion_client,
+			cal_block->map_data.ion_handle);
+		cal_block->map_data.ion_client = NULL;
+		cal_block->map_data.ion_handle = NULL;
+	}
+	kfree(cal_block);
+done:
+	return;
+}
+
+static void destroy_all_cal_blocks(struct cal_type_data *cal_type)
+{
+	int				ret = 0;
+	struct list_head		*ptr, *next;
+	struct cal_block_data		*cal_block;
+
+	list_for_each_safe(ptr, next,
+		&cal_type->cal_blocks) {
+
+		cal_block = list_entry(ptr,
+			struct cal_block_data, list);
+
+		ret = unmap_memory(cal_type, cal_block);
+		if (ret < 0) {
+			pr_err("%s: unmap_memory failed, cal type %d, ret = %d!\n",
+				__func__,
+			       cal_type->info.reg.cal_type,
+				ret);
+		}
+		delete_cal_block(cal_block);
+		cal_block = NULL;
+	}
+
+	return;
+}
+
+static void destroy_cal_type_data(struct cal_type_data *cal_type)
+{
+	if (cal_type == NULL)
+		goto done;
+
+	destroy_all_cal_blocks(cal_type);
+	list_del(&cal_type->cal_blocks);
+	kfree(cal_type);
+done:
+	return;
+}
+
+void cal_utils_destroy_cal_types(int num_cal_types,
+			struct cal_type_data **cal_type)
+{
+	int				i;
+	pr_debug("%s\n", __func__);
+
+	if (cal_type == NULL) {
+		pr_err("%s: cal_type is NULL!\n", __func__);
+		goto done;
+	} else if ((num_cal_types <= 0) ||
+		(num_cal_types > MAX_CAL_TYPES)) {
+		pr_err("%s: num_cal_types of %d is Invalid!\n",
+			__func__, num_cal_types);
+		goto done;
+	}
+
+	for (i = 0; i < num_cal_types; i++) {
+		audio_cal_deregister(1, &cal_type[i]->info.reg);
+		destroy_cal_type_data(cal_type[i]);
+		cal_type[i] = NULL;
+	}
+done:
+	return;
+}
+
+struct cal_block_data *cal_utils_get_only_cal_block(
+			struct cal_type_data *cal_type)
+{
+	struct list_head		*ptr, *next;
+	struct cal_block_data		*cal_block = NULL;
+
+	if (cal_type == NULL)
+		goto done;
+
+	list_for_each_safe(ptr, next,
+		&cal_type->cal_blocks) {
+
+		cal_block = list_entry(ptr,
+			struct cal_block_data, list);
+		break;
+	}
+done:
+	return cal_block;
+}
+
+bool cal_utils_match_buf_num(struct cal_block_data *cal_block,
+					void *user_data)
+{
+	bool ret = false;
+	struct audio_cal_type_basic	*data = user_data;
+
+	if (cal_block->buffer_number == data->cal_hdr.buffer_number)
+		ret = true;
+
+	return ret;
+}
+
+static struct cal_block_data *get_matching_cal_block(
+					struct cal_type_data *cal_type,
+					void *data)
+{
+	struct list_head		*ptr, *next;
+	struct cal_block_data		*cal_block = NULL;
+
+	list_for_each_safe(ptr, next,
+		&cal_type->cal_blocks) {
+
+		cal_block = list_entry(ptr,
+			struct cal_block_data, list);
+
+		if (cal_type->info.cal_util_callbacks.
+			match_block(cal_block, data))
+			return cal_block;
+	}
+
+	return NULL;
+}
+
+static int cal_block_ion_alloc(struct cal_block_data *cal_block)
+{
+	int	ret = 0;
+
+	if (cal_block == NULL) {
+		pr_err("%s: cal_block is NULL!\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = msm_audio_ion_import("audio_cal_client",
+		&cal_block->map_data.ion_client,
+		&cal_block->map_data.ion_handle,
+		cal_block->map_data.ion_map_handle,
+		NULL, 0,
+		&cal_block->cal_data.paddr,
+		&cal_block->map_data.map_size,
+		&cal_block->cal_data.kvaddr);
+	if (ret) {
+		pr_err("%s: audio ION import failed, rc = %d\n",
+			__func__, ret);
+		ret = -ENOMEM;
+		goto done;
+	}
+done:
+	return ret;
+}
+
+static struct cal_block_data *create_cal_block(struct cal_type_data *cal_type,
+				struct audio_cal_type_basic *basic_cal,
+				size_t client_info_size, void *client_info)
+{
+	struct cal_block_data	*cal_block = NULL;
+
+	if (cal_type == NULL) {
+		pr_err("%s: cal_type is NULL!\n", __func__);
+		goto done;
+	} else if (basic_cal == NULL) {
+		pr_err("%s: basic_cal is NULL!\n", __func__);
+		goto done;
+	}
+
+	cal_block = kzalloc(sizeof(*cal_block),
+		GFP_KERNEL);
+	if (cal_block == NULL) {
+		pr_err("%s: could not allocate cal_block!\n", __func__);
+		goto done;
+	}
+
+	INIT_LIST_HEAD(&cal_block->list);
+
+	cal_block->map_data.ion_map_handle = basic_cal->cal_data.mem_handle;
+	if (basic_cal->cal_data.mem_handle > 0) {
+		if (cal_block_ion_alloc(cal_block)) {
+			pr_err("%s: cal_block_ion_alloc failed!\n",
+				__func__);
+			goto err;
+		}
+	}
+	if (client_info_size > 0) {
+		cal_block->client_info_size = client_info_size;
+		cal_block->client_info = kmalloc(client_info_size, GFP_KERNEL);
+		if (cal_block->client_info == NULL) {
+			pr_err("%s: could not allocats client_info!\n",
+				__func__);
+			goto err;
+		}
+		if (client_info != NULL)
+			memcpy(cal_block->client_info, client_info,
+				client_info_size);
+	}
+
+	cal_block->cal_info = kzalloc(
+		get_cal_info_size(cal_type->info.reg.cal_type),
+		GFP_KERNEL);
+	if (cal_block->cal_info == NULL) {
+		pr_err("%s: could not allocats cal_info!\n",
+			__func__);
+		goto err;
+	}
+	cal_block->buffer_number = basic_cal->cal_hdr.buffer_number;
+	list_add_tail(&cal_block->list, &cal_type->cal_blocks);
+	pr_debug("%s: created block for cal type %d, buf num %d, map handle %d, map size %zd paddr 0x%pK!\n",
+		__func__, cal_type->info.reg.cal_type,
+		cal_block->buffer_number,
+		cal_block->map_data.ion_map_handle,
+		cal_block->map_data.map_size,
+		&cal_block->cal_data.paddr);
+done:
+	return cal_block;
+err:
+	kfree(cal_block->cal_info);
+	cal_block->cal_info = NULL;
+	kfree(cal_block->client_info);
+	cal_block->client_info = NULL;
+	kfree(cal_block);
+	cal_block = NULL;
+	return cal_block;
+}
+
+void cal_utils_clear_cal_block_q6maps(int num_cal_types,
+					struct cal_type_data **cal_type)
+{
+	int				i = 0;
+	struct list_head		*ptr, *next;
+	struct cal_block_data		*cal_block;
+	pr_debug("%s\n", __func__);
+
+	if (cal_type == NULL) {
+		pr_err("%s: cal_type is NULL!\n", __func__);
+		goto done;
+	} else if ((num_cal_types <= 0) ||
+		(num_cal_types > MAX_CAL_TYPES)) {
+		pr_err("%s: num_cal_types of %d is Invalid!\n",
+			__func__, num_cal_types);
+		goto done;
+	}
+
+	for (; i < num_cal_types; i++) {
+		if (cal_type[i] == NULL)
+			continue;
+
+		mutex_lock(&cal_type[i]->lock);
+		list_for_each_safe(ptr, next,
+			&cal_type[i]->cal_blocks) {
+
+			cal_block = list_entry(ptr,
+				struct cal_block_data, list);
+
+			cal_block->map_data.q6map_handle = 0;
+		}
+		mutex_unlock(&cal_type[i]->lock);
+	}
+done:
+	return;
+}
+
+
+
+static int realloc_memory(struct cal_block_data *cal_block)
+{
+	int ret = 0;
+
+	msm_audio_ion_free(cal_block->map_data.ion_client,
+		cal_block->map_data.ion_handle);
+	cal_block->map_data.ion_client = NULL;
+	cal_block->map_data.ion_handle = NULL;
+	cal_block->cal_data.size = 0;
+
+	ret = cal_block_ion_alloc(cal_block);
+	if (ret < 0)
+		pr_err("%s: realloc_memory failed!\n",
+			__func__);
+	return ret;
+}
+
+static int map_memory(struct cal_type_data *cal_type,
+			struct cal_block_data *cal_block)
+{
+	int ret = 0;
+
+
+	if (cal_type->info.cal_util_callbacks.map_cal != NULL) {
+		if ((cal_block->map_data.ion_map_handle < 0) ||
+			(cal_block->map_data.map_size <= 0) ||
+			(cal_block->map_data.q6map_handle != 0)) {
+			goto done;
+		}
+
+		pr_debug("%s: cal type %d call map\n",
+			__func__, cal_type->info.reg.cal_type);
+		ret = cal_type->info.cal_util_callbacks.
+			map_cal(cal_type->info.reg.cal_type, cal_block);
+		if (ret < 0) {
+			pr_err("%s: map_cal failed, cal type %d, ret = %d!\n",
+				__func__, cal_type->info.reg.cal_type,
+				ret);
+			goto done;
+		}
+	}
+done:
+	return ret;
+}
+
+static int unmap_memory(struct cal_type_data *cal_type,
+			struct cal_block_data *cal_block)
+{
+	int ret = 0;
+
+	if (cal_type->info.cal_util_callbacks.unmap_cal != NULL) {
+		if ((cal_block->map_data.ion_map_handle < 0) ||
+			(cal_block->map_data.map_size <= 0) ||
+			(cal_block->map_data.q6map_handle == 0)) {
+			goto done;
+		}
+		pr_debug("%s: cal type %d call unmap\n",
+			__func__, cal_type->info.reg.cal_type);
+		ret = cal_type->info.cal_util_callbacks.
+			unmap_cal(cal_type->info.reg.cal_type, cal_block);
+		if (ret < 0) {
+			pr_err("%s: unmap_cal failed, cal type %d, ret = %d!\n",
+				__func__, cal_type->info.reg.cal_type,
+				ret);
+			goto done;
+		}
+	}
+done:
+	return ret;
+}
+
+int cal_utils_alloc_cal(size_t data_size, void *data,
+			struct cal_type_data *cal_type,
+			size_t client_info_size, void *client_info)
+{
+	int				ret = 0;
+	struct cal_block_data		*cal_block;
+	struct audio_cal_type_alloc	*alloc_data = data;
+	pr_debug("%s\n", __func__);
+
+	if (cal_type == NULL) {
+		pr_err("%s: cal_type is NULL!\n",
+			__func__);
+		ret = -EINVAL;
+		goto done;
+	}
+	if (data_size < sizeof(struct audio_cal_type_alloc)) {
+		pr_err("%s: data_size of %zd does not equal alloc struct size of %zd!\n",
+			__func__, data_size,
+		       sizeof(struct audio_cal_type_alloc));
+		ret = -EINVAL;
+		goto done;
+	}
+	if ((client_info_size > 0) && (client_info == NULL)) {
+		pr_err("%s: User info pointer is NULL but size is %zd!\n",
+			__func__, client_info_size);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (alloc_data->cal_data.mem_handle < 0) {
+		pr_err("%s: mem_handle %d invalid!\n",
+			__func__, alloc_data->cal_data.mem_handle);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	mutex_lock(&cal_type->lock);
+
+	cal_block = get_matching_cal_block(cal_type,
+		data);
+	if (cal_block != NULL) {
+		ret = unmap_memory(cal_type, cal_block);
+		if (ret < 0)
+			goto err;
+		ret = realloc_memory(cal_block);
+		if (ret < 0)
+			goto err;
+	} else {
+		cal_block = create_cal_block(cal_type,
+			(struct audio_cal_type_basic *)alloc_data,
+			client_info_size, client_info);
+		if (cal_block == NULL) {
+			pr_err("%s: create_cal_block failed for %d!\n",
+				__func__, alloc_data->cal_data.mem_handle);
+			ret = -EINVAL;
+			goto err;
+		}
+	}
+
+	ret = map_memory(cal_type, cal_block);
+	if (ret < 0)
+		goto err;
+err:
+	mutex_unlock(&cal_type->lock);
+done:
+	return ret;
+}
+
+int cal_utils_dealloc_cal(size_t data_size, void *data,
+			struct cal_type_data *cal_type)
+{
+	int				ret = 0;
+	struct cal_block_data		*cal_block;
+	struct audio_cal_type_dealloc	*dealloc_data = data;
+	pr_debug("%s\n", __func__);
+
+
+	if (cal_type == NULL) {
+		pr_err("%s: cal_type is NULL!\n",
+			__func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (data_size < sizeof(struct audio_cal_type_dealloc)) {
+		pr_err("%s: data_size of %zd does not equal struct size of %zd!\n",
+			__func__, data_size,
+			sizeof(struct audio_cal_type_dealloc));
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if ((dealloc_data->cal_data.mem_handle == -1) &&
+		(dealloc_data->cal_hdr.buffer_number == ALL_CAL_BLOCKS)) {
+		destroy_all_cal_blocks(cal_type);
+		goto done;
+	}
+
+	if (dealloc_data->cal_data.mem_handle < 0) {
+		pr_err("%s: mem_handle %d invalid!\n",
+			__func__, dealloc_data->cal_data.mem_handle);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	mutex_lock(&cal_type->lock);
+	cal_block = get_matching_cal_block(
+		cal_type,
+		data);
+	if (cal_block == NULL) {
+		pr_err("%s: allocation does not exist for %d!\n",
+			__func__, dealloc_data->cal_data.mem_handle);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	ret = unmap_memory(cal_type, cal_block);
+	if (ret < 0)
+		goto err;
+
+	delete_cal_block(cal_block);
+err:
+	mutex_unlock(&cal_type->lock);
+done:
+	return ret;
+}
+
+int cal_utils_set_cal(size_t data_size, void *data,
+			struct cal_type_data *cal_type,
+			size_t client_info_size, void *client_info)
+{
+	int ret = 0;
+	struct cal_block_data		*cal_block;
+	struct audio_cal_type_basic	*basic_data = data;
+	pr_debug("%s\n", __func__);
+
+	if (cal_type == NULL) {
+		pr_err("%s: cal_type is NULL!\n",
+			__func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if ((client_info_size > 0) && (client_info == NULL)) {
+		pr_err("%s: User info pointer is NULL but size is %zd!\n",
+			__func__, client_info_size);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if ((data_size > get_user_cal_type_size(
+		cal_type->info.reg.cal_type)) || (data_size < 0)) {
+		pr_err("%s: cal_type %d, data_size of %zd is invalid, expecting %zd!\n",
+			__func__, cal_type->info.reg.cal_type, data_size,
+			get_user_cal_type_size(cal_type->info.reg.cal_type));
+		ret = -EINVAL;
+		goto done;
+	}
+
+	mutex_lock(&cal_type->lock);
+	cal_block = get_matching_cal_block(
+		cal_type,
+		data);
+	if (cal_block == NULL) {
+		if (basic_data->cal_data.mem_handle > 0) {
+			pr_err("%s: allocation does not exist for %d!\n",
+				__func__, basic_data->cal_data.mem_handle);
+			ret = -EINVAL;
+			goto err;
+		} else {
+			cal_block = create_cal_block(
+				cal_type,
+				basic_data,
+				client_info_size, client_info);
+			if (cal_block == NULL) {
+				pr_err("%s: create_cal_block failed for cal type %d!\n",
+					__func__,
+				       cal_type->info.reg.cal_type);
+				ret = -EINVAL;
+				goto err;
+			}
+		}
+	}
+
+	ret = map_memory(cal_type, cal_block);
+	if (ret < 0)
+		goto err;
+
+	cal_block->cal_data.size = basic_data->cal_data.cal_size;
+
+	if (client_info_size > 0) {
+		memcpy(cal_block->client_info,
+			client_info,
+			client_info_size);
+	}
+
+	memcpy(cal_block->cal_info,
+		((uint8_t *)data + sizeof(struct audio_cal_type_basic)),
+		data_size - sizeof(struct audio_cal_type_basic));
+
+err:
+	mutex_unlock(&cal_type->lock);
+done:
+	return ret;
+}
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/audio_slimslave.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/audio_slimslave.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/audio_slimslave.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/audio_slimslave.c	2019-01-22 16:16:29.623301827 +0100
@@ -0,0 +1,177 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/miscdevice.h>
+#include <sound/audio_slimslave.h>
+#include <linux/slimbus/slimbus.h>
+#include <linux/pm_runtime.h>
+
+static struct slim_device *slim;
+static int vote_count;
+struct mutex suspend_lock;
+bool suspend;
+
+static int audio_slim_open(struct inode *inode, struct file *file)
+{
+	pr_debug("%s:\n", __func__);
+
+	if (vote_count) {
+		pr_debug("%s: unvote: vote_count=%d\n", __func__, vote_count);
+		pm_runtime_mark_last_busy(slim->dev.parent);
+		pm_runtime_put(slim->dev.parent);
+		vote_count--;
+	}
+	return 0;
+};
+
+static int audio_slim_release(struct inode *inode, struct file *file)
+{
+	pr_debug("%s:\n", __func__);
+
+	if (vote_count) {
+		pr_debug("%s: unvote: vote_count=%d\n", __func__, vote_count);
+		pm_runtime_mark_last_busy(slim->dev.parent);
+		pm_runtime_put(slim->dev.parent);
+		vote_count--;
+	} else {
+		pr_debug("%s: vote: vote_count=%d\n", __func__, vote_count);
+		pm_runtime_get_sync(slim->dev.parent);
+		vote_count++;
+	}
+	return 0;
+};
+
+static long audio_slim_ioctl(struct file *file, unsigned int cmd,
+			     unsigned long u_arg)
+{
+	switch (cmd) {
+	case AUDIO_SLIMSLAVE_VOTE:
+		mutex_lock(&suspend_lock);
+		if (!vote_count && !suspend) {
+			pr_debug("%s:AUDIO_SLIMSLAVE_VOTE\n", __func__);
+			pm_runtime_get_sync(slim->dev.parent);
+			vote_count++;
+		} else {
+			pr_err("%s:Invalid vote: vote_count=%d suspend=%d\n",
+				 __func__, vote_count, suspend);
+		}
+		mutex_unlock(&suspend_lock);
+		break;
+	case AUDIO_SLIMSLAVE_UNVOTE:
+		mutex_lock(&suspend_lock);
+		if (vote_count && !suspend) {
+			pr_debug("%s:AUDIO_SLIMSLAVE_UNVOTE\n", __func__);
+			pm_runtime_mark_last_busy(slim->dev.parent);
+			pm_runtime_put(slim->dev.parent);
+			vote_count--;
+		} else {
+			pr_err("%s:Invalid unvote: vote_count=%d suspend=%d\n",
+				 __func__, vote_count, suspend);
+		}
+		mutex_unlock(&suspend_lock);
+		break;
+	default:
+		pr_debug("%s: Invalid ioctl cmd: %d\n", __func__, cmd);
+		break;
+	}
+	return 0;
+}
+
+static const struct file_operations audio_slimslave_fops = {
+	.open =                 audio_slim_open,
+	.unlocked_ioctl =       audio_slim_ioctl,
+	.release =              audio_slim_release,
+};
+
+struct miscdevice audio_slimslave_misc = {
+	.minor  =       MISC_DYNAMIC_MINOR,
+	.name   =       AUDIO_SLIMSLAVE_IOCTL_NAME,
+	.fops   =       &audio_slimslave_fops,
+};
+
+static int audio_slimslave_probe(struct slim_device *audio_slim)
+{
+	pr_debug("%s:\n", __func__);
+
+	mutex_init(&suspend_lock);
+	suspend = false;
+	slim = audio_slim;
+	misc_register(&audio_slimslave_misc);
+	return 0;
+}
+
+static int audio_slimslave_remove(struct slim_device *audio_slim)
+{
+	pr_debug("%s:\n", __func__);
+
+	misc_deregister(&audio_slimslave_misc);
+	return 0;
+}
+
+static int audio_slimslave_resume(struct slim_device *audio_slim)
+{
+	pr_debug("%s:\n", __func__);
+
+	mutex_lock(&suspend_lock);
+	suspend = false;
+	mutex_unlock(&suspend_lock);
+	return 0;
+}
+
+static int audio_slimslave_suspend(struct slim_device *audio_slim,
+				   pm_message_t pmesg)
+{
+	pr_debug("%s:\n", __func__);
+
+	mutex_lock(&suspend_lock);
+	suspend = true;
+	mutex_unlock(&suspend_lock);
+	return 0;
+}
+
+static const struct slim_device_id audio_slimslave_dt_match[] = {
+	{"audio-slimslave", 0},
+	{}
+};
+
+static struct slim_driver audio_slimslave_driver = {
+	.driver = {
+		.name = "audio-slimslave",
+		.owner = THIS_MODULE,
+	},
+	.probe = audio_slimslave_probe,
+	.remove = audio_slimslave_remove,
+	.id_table = audio_slimslave_dt_match,
+	.resume = audio_slimslave_resume,
+	.suspend = audio_slimslave_suspend,
+};
+
+static int __init audio_slimslave_init(void)
+{
+	return slim_driver_register(&audio_slimslave_driver);
+}
+module_init(audio_slimslave_init);
+
+static void __exit audio_slimslave_exit(void)
+{
+
+}
+module_exit(audio_slimslave_exit);
+
+/* Module information */
+MODULE_DESCRIPTION("Audio side Slimbus slave driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/Makefile linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/Makefile
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/Makefile	2019-10-29 09:26:26.145227663 +0100
@@ -0,0 +1,21 @@
+snd-soc-qdsp6v2-objs += msm-dai-q6-v2.o msm-pcm-q6-v2.o msm-pcm-routing-v2.o \
+			msm-compress-q6-v2.o \
+			msm-pcm-afe-v2.o msm-pcm-voip-v2.o \
+			msm-pcm-voice-v2.o msm-dai-q6-hdmi-v2.o \
+			msm-lsm-client.o msm-pcm-host-voice-v2.o \
+			msm-audio-effects-q6-v2.o msm-pcm-loopback-v2.o \
+			msm-dai-slim.o msm-transcode-loopback-q6-v2.o \
+			adsp_err.o
+obj-$(CONFIG_SND_SOC_QDSP6V2) += snd-soc-qdsp6v2.o msm-pcm-dtmf-v2.o \
+				 msm-dai-stub-v2.o
+obj-$(CONFIG_SND_HWDEP) += msm-pcm-routing-devdep.o
+obj-$(CONFIG_DOLBY_DAP) += msm-dolby-dap-config.o
+obj-$(CONFIG_DOLBY_DS2) += msm-ds2-dap-config.o
+obj-$(CONFIG_DOLBY_LICENSE) += msm-ds2-dap-config.o
+obj-$(CONFIG_DTS_SRS_TM) += msm-dts-srs-tm-config.o
+obj-$(CONFIG_QTI_PP) += msm-qti-pp-config.o
+obj-y += audio_calibration.o audio_cal_utils.o q6adm.o q6afe.o q6asm.o \
+	q6audio-v2.o q6voice.o q6core.o rtac.o q6lsm.o audio_slimslave.o \
+	msm-pcm-q6-noirq.o
+ocmem-audio-objs += audio_ocmem.o
+obj-$(CONFIG_AUDIO_OCMEM) += ocmem-audio.o
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-audio-effects-q6-v2.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-audio-effects-q6-v2.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c	2019-10-29 09:26:26.145227663 +0100
@@ -0,0 +1,1383 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <sound/apr_audio-v2.h>
+#include <sound/q6asm-v2.h>
+#include <sound/compress_params.h>
+#include <sound/msm-audio-effects-q6-v2.h>
+#include <sound/devdep_params.h>
+
+#define MAX_ENABLE_CMD_SIZE 32
+
+#define GET_NEXT(ptr, upper_limit, rc)                                  \
+({                                                                      \
+	if (((ptr) + 1) > (upper_limit)) {                              \
+		pr_err("%s: param list out of boundary\n", __func__);   \
+		(rc) = -EINVAL;                                         \
+	}                                                               \
+	((rc) == 0) ? *(ptr)++ :  -EINVAL;                              \
+})
+
+#define CHECK_PARAM_LEN(len, max_len, tag, rc)                          \
+do {                                                                    \
+	if ((len) > (max_len)) {                                        \
+		pr_err("%s: params length overflows\n", (tag));         \
+		(rc) = -EINVAL;                                         \
+	}                                                               \
+} while (0)
+
+
+bool msm_audio_effects_is_effmodule_supp_in_top(int effect_module,
+						int topology)
+{
+	switch (effect_module) {
+	case VIRTUALIZER_MODULE:
+	case REVERB_MODULE:
+	case BASS_BOOST_MODULE:
+	case PBE_MODULE:
+	case EQ_MODULE:
+		switch (topology) {
+		case ASM_STREAM_POSTPROC_TOPO_ID_SA_PLUS:
+			return true;
+		default:
+			return false;
+		}
+	default:
+		return false;
+	}
+}
+
+int msm_audio_effects_enable_extn(struct audio_client *ac,
+				struct msm_nt_eff_all_config *effects,
+				bool flag)
+{
+	uint32_t updt_params[MAX_ENABLE_CMD_SIZE] = {0};
+	uint32_t params_length;
+	int rc = 0;
+
+	pr_debug("%s\n", __func__);
+	if (!ac) {
+		pr_err("%s: cannot set audio effects\n", __func__);
+		return -EINVAL;
+	}
+	params_length = 0;
+	updt_params[0] = AUDPROC_MODULE_ID_VIRTUALIZER;
+	updt_params[1] = AUDPROC_PARAM_ID_ENABLE;
+	updt_params[2] = VIRTUALIZER_ENABLE_PARAM_SZ;
+	updt_params[3] = flag;
+	params_length += COMMAND_PAYLOAD_SZ + VIRTUALIZER_ENABLE_PARAM_SZ;
+	if (effects->virtualizer.enable_flag)
+		q6asm_send_audio_effects_params(ac, (char *)&updt_params[0],
+					params_length);
+	memset(updt_params, 0, MAX_ENABLE_CMD_SIZE);
+	params_length = 0;
+	updt_params[0] = AUDPROC_MODULE_ID_BASS_BOOST;
+	updt_params[1] = AUDPROC_PARAM_ID_ENABLE;
+	updt_params[2] = BASS_BOOST_ENABLE_PARAM_SZ;
+	updt_params[3] = flag;
+	params_length += COMMAND_PAYLOAD_SZ + BASS_BOOST_ENABLE_PARAM_SZ;
+	if (effects->bass_boost.enable_flag)
+		q6asm_send_audio_effects_params(ac, (char *)&updt_params[0],
+					params_length);
+	memset(updt_params, 0, MAX_ENABLE_CMD_SIZE);
+	params_length = 0;
+	updt_params[0] = AUDPROC_MODULE_ID_POPLESS_EQUALIZER;
+	updt_params[1] = AUDPROC_PARAM_ID_ENABLE;
+	updt_params[2] = EQ_ENABLE_PARAM_SZ;
+	updt_params[3] = flag;
+	params_length += COMMAND_PAYLOAD_SZ + EQ_ENABLE_PARAM_SZ;
+	if (effects->equalizer.enable_flag)
+		q6asm_send_audio_effects_params(ac, (char *)&updt_params[0],
+					params_length);
+	return rc;
+}
+
+int msm_audio_effects_virtualizer_handler(struct audio_client *ac,
+				struct virtualizer_params *virtualizer,
+				long *values)
+{
+	long *param_max_offset = values + MAX_PP_PARAMS_SZ - 1;
+	char *params = NULL;
+	int rc = 0;
+	int devices = GET_NEXT(values, param_max_offset, rc);
+	int num_commands = GET_NEXT(values, param_max_offset, rc);
+	int *updt_params, i, prev_enable_flag;
+	uint32_t params_length = (MAX_INBAND_PARAM_SZ);
+
+	pr_debug("%s\n", __func__);
+	if (!ac || (devices == -EINVAL) || (num_commands == -EINVAL)) {
+		pr_err("%s: cannot set audio effects\n", __func__);
+		return -EINVAL;
+	}
+	params = kzalloc(params_length, GFP_KERNEL);
+	if (!params) {
+		pr_err("%s, params memory alloc failed\n", __func__);
+		return -ENOMEM;
+	}
+	pr_debug("%s: device: %d\n", __func__, devices);
+	updt_params = (int *)params;
+	params_length = 0;
+	for (i = 0; i < num_commands; i++) {
+		uint32_t command_id =
+			GET_NEXT(values, param_max_offset, rc);
+		uint32_t command_config_state =
+			GET_NEXT(values, param_max_offset, rc);
+		uint32_t index_offset =
+			GET_NEXT(values, param_max_offset, rc);
+		uint32_t length =
+			GET_NEXT(values, param_max_offset, rc);
+		switch (command_id) {
+		case VIRTUALIZER_ENABLE:
+			if (length != 1 || index_offset != 0) {
+				pr_err("VIRT ENABLE:invalid params\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			prev_enable_flag = virtualizer->enable_flag;
+			virtualizer->enable_flag =
+				GET_NEXT(values, param_max_offset, rc);
+			pr_debug("%s:VIRT ENABLE prev:%d, new:%d\n", __func__,
+				prev_enable_flag, virtualizer->enable_flag);
+			if (prev_enable_flag != virtualizer->enable_flag) {
+				params_length += COMMAND_PAYLOAD_SZ +
+					VIRTUALIZER_ENABLE_PARAM_SZ;
+				CHECK_PARAM_LEN(params_length,
+						MAX_INBAND_PARAM_SZ,
+						"VIRT ENABLE", rc);
+				if (rc != 0)
+					goto invalid_config;
+				*updt_params++ =
+				AUDPROC_MODULE_ID_VIRTUALIZER;
+				*updt_params++ =
+				AUDPROC_PARAM_ID_VIRTUALIZER_ENABLE;
+				*updt_params++ =
+				VIRTUALIZER_ENABLE_PARAM_SZ;
+				*updt_params++ =
+				virtualizer->enable_flag;
+			}
+			break;
+		case VIRTUALIZER_STRENGTH:
+			if (length != 1 || index_offset != 0) {
+				pr_err("VIRT STRENGTH:invalid params\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			virtualizer->strength =
+				GET_NEXT(values, param_max_offset, rc);
+			pr_debug("%s: VIRT STRENGTH val: %d\n",
+					__func__, virtualizer->strength);
+			if (command_config_state == CONFIG_SET) {
+				params_length += COMMAND_PAYLOAD_SZ +
+					VIRTUALIZER_STRENGTH_PARAM_SZ;
+				CHECK_PARAM_LEN(params_length,
+						MAX_INBAND_PARAM_SZ,
+						"VIRT STRENGTH", rc);
+				if (rc != 0)
+					goto invalid_config;
+				*updt_params++ =
+					AUDPROC_MODULE_ID_VIRTUALIZER;
+				*updt_params++ =
+					AUDPROC_PARAM_ID_VIRTUALIZER_STRENGTH;
+				*updt_params++ =
+					VIRTUALIZER_STRENGTH_PARAM_SZ;
+				*updt_params++ =
+					virtualizer->strength;
+			}
+			break;
+		case VIRTUALIZER_OUT_TYPE:
+			if (length != 1 || index_offset != 0) {
+				pr_err("VIRT OUT_TYPE:invalid params\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			virtualizer->out_type =
+				GET_NEXT(values, param_max_offset, rc);
+			pr_debug("%s: VIRT OUT_TYPE val:%d\n",
+				__func__, virtualizer->out_type);
+			if (command_config_state == CONFIG_SET) {
+				params_length += COMMAND_PAYLOAD_SZ +
+					VIRTUALIZER_OUT_TYPE_PARAM_SZ;
+				CHECK_PARAM_LEN(params_length,
+						MAX_INBAND_PARAM_SZ,
+						"VIRT OUT_TYPE", rc);
+				if (rc != 0)
+					goto invalid_config;
+				*updt_params++ =
+					AUDPROC_MODULE_ID_VIRTUALIZER;
+				*updt_params++ =
+					AUDPROC_PARAM_ID_VIRTUALIZER_OUT_TYPE;
+				*updt_params++ =
+					VIRTUALIZER_OUT_TYPE_PARAM_SZ;
+				*updt_params++ =
+					virtualizer->out_type;
+			}
+			break;
+		case VIRTUALIZER_GAIN_ADJUST:
+			if (length != 1 || index_offset != 0) {
+				pr_err("VIRT GAIN_ADJUST: invalid params\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			virtualizer->gain_adjust =
+				GET_NEXT(values, param_max_offset, rc);
+			pr_debug("%s: VIRT GAIN_ADJUST val:%d\n",
+				__func__, virtualizer->gain_adjust);
+			if (command_config_state == CONFIG_SET) {
+				params_length += COMMAND_PAYLOAD_SZ +
+					VIRTUALIZER_GAIN_ADJUST_PARAM_SZ;
+				CHECK_PARAM_LEN(params_length,
+						MAX_INBAND_PARAM_SZ,
+						"VIRT GAIN_ADJUST", rc);
+				if (rc != 0)
+					goto invalid_config;
+				*updt_params++ =
+				AUDPROC_MODULE_ID_VIRTUALIZER;
+				*updt_params++ =
+				AUDPROC_PARAM_ID_VIRTUALIZER_GAIN_ADJUST;
+				*updt_params++ =
+				VIRTUALIZER_GAIN_ADJUST_PARAM_SZ;
+				*updt_params++ =
+				virtualizer->gain_adjust;
+			}
+			break;
+		default:
+			pr_err("%s: Invalid command to set config\n", __func__);
+			break;
+		}
+	}
+	if (params_length && (rc == 0))
+		q6asm_send_audio_effects_params(ac, params,
+						params_length);
+	else
+		pr_debug("%s: did not send pp params\n", __func__);
+invalid_config:
+	kfree(params);
+	return rc;
+}
+
+int msm_audio_effects_reverb_handler(struct audio_client *ac,
+				     struct reverb_params *reverb,
+				     long *values)
+{
+	long *param_max_offset = values + MAX_PP_PARAMS_SZ - 1;
+	char *params = NULL;
+	int rc = 0;
+	int devices = GET_NEXT(values, param_max_offset, rc);
+	int num_commands = GET_NEXT(values, param_max_offset, rc);
+	int *updt_params, i, prev_enable_flag;
+	uint32_t params_length = (MAX_INBAND_PARAM_SZ);
+
+	pr_debug("%s\n", __func__);
+	if (!ac || (devices == -EINVAL) || (num_commands == -EINVAL)) {
+		pr_err("%s: cannot set audio effects\n", __func__);
+		return -EINVAL;
+	}
+	params = kzalloc(params_length, GFP_KERNEL);
+	if (!params) {
+		pr_err("%s, params memory alloc failed\n", __func__);
+		return -ENOMEM;
+	}
+	pr_debug("%s: device: %d\n", __func__, devices);
+	updt_params = (int *)params;
+	params_length = 0;
+	for (i = 0; i < num_commands; i++) {
+		uint32_t command_id =
+			GET_NEXT(values, param_max_offset, rc);
+		uint32_t command_config_state =
+			GET_NEXT(values, param_max_offset, rc);
+		uint32_t index_offset =
+			GET_NEXT(values, param_max_offset, rc);
+		uint32_t length =
+			GET_NEXT(values, param_max_offset, rc);
+		switch (command_id) {
+		case REVERB_ENABLE:
+			if (length != 1 || index_offset != 0) {
+				pr_err("REVERB_ENABLE:invalid params\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			prev_enable_flag = reverb->enable_flag;
+			reverb->enable_flag =
+				GET_NEXT(values, param_max_offset, rc);
+			pr_debug("%s:REVERB_ENABLE prev:%d,new:%d\n", __func__,
+					prev_enable_flag, reverb->enable_flag);
+			if (prev_enable_flag != reverb->enable_flag) {
+				params_length += COMMAND_PAYLOAD_SZ +
+					REVERB_ENABLE_PARAM_SZ;
+				CHECK_PARAM_LEN(params_length,
+						MAX_INBAND_PARAM_SZ,
+						"REVERB_ENABLE", rc);
+				if (rc != 0)
+					goto invalid_config;
+				*updt_params++ =
+					AUDPROC_MODULE_ID_REVERB;
+				*updt_params++ =
+					AUDPROC_PARAM_ID_REVERB_ENABLE;
+				*updt_params++ =
+					REVERB_ENABLE_PARAM_SZ;
+				*updt_params++ =
+					reverb->enable_flag;
+			}
+			break;
+		case REVERB_MODE:
+			if (length != 1 || index_offset != 0) {
+				pr_err("REVERB_MODE:invalid params\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			reverb->mode =
+				GET_NEXT(values, param_max_offset, rc);
+			pr_debug("%s: REVERB_MODE val:%d\n",
+				__func__, reverb->mode);
+			if (command_config_state == CONFIG_SET) {
+				params_length += COMMAND_PAYLOAD_SZ +
+					REVERB_MODE_PARAM_SZ;
+				CHECK_PARAM_LEN(params_length,
+						MAX_INBAND_PARAM_SZ,
+						"REVERB_MODE", rc);
+				if (rc != 0)
+					goto invalid_config;
+				*updt_params++ =
+					AUDPROC_MODULE_ID_REVERB;
+				*updt_params++ =
+					AUDPROC_PARAM_ID_REVERB_MODE;
+				*updt_params++ =
+					REVERB_MODE_PARAM_SZ;
+				*updt_params++ =
+					reverb->mode;
+			}
+			break;
+		case REVERB_PRESET:
+			if (length != 1 || index_offset != 0) {
+				pr_err("REVERB_PRESET:invalid params\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			reverb->preset =
+				GET_NEXT(values, param_max_offset, rc);
+			pr_debug("%s: REVERB_PRESET val:%d\n",
+					__func__, reverb->preset);
+			if (command_config_state == CONFIG_SET) {
+				params_length += COMMAND_PAYLOAD_SZ +
+					REVERB_PRESET_PARAM_SZ;
+				CHECK_PARAM_LEN(params_length,
+						MAX_INBAND_PARAM_SZ,
+						"REVERB_PRESET", rc);
+				if (rc != 0)
+					goto invalid_config;
+				*updt_params++ =
+					AUDPROC_MODULE_ID_REVERB;
+				*updt_params++ =
+					AUDPROC_PARAM_ID_REVERB_PRESET;
+				*updt_params++ =
+					REVERB_PRESET_PARAM_SZ;
+				*updt_params++ =
+					reverb->preset;
+			}
+			break;
+		case REVERB_WET_MIX:
+			if (length != 1 || index_offset != 0) {
+				pr_err("REVERB_WET_MIX:invalid params\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			reverb->wet_mix =
+				GET_NEXT(values, param_max_offset, rc);
+			pr_debug("%s: REVERB_WET_MIX val:%d\n",
+				__func__, reverb->wet_mix);
+			if (command_config_state == CONFIG_SET) {
+				params_length += COMMAND_PAYLOAD_SZ +
+					REVERB_WET_MIX_PARAM_SZ;
+				CHECK_PARAM_LEN(params_length,
+						MAX_INBAND_PARAM_SZ,
+						"REVERB_WET_MIX", rc);
+				if (rc != 0)
+					goto invalid_config;
+				*updt_params++ =
+					AUDPROC_MODULE_ID_REVERB;
+				*updt_params++ =
+					AUDPROC_PARAM_ID_REVERB_WET_MIX;
+				*updt_params++ =
+					REVERB_WET_MIX_PARAM_SZ;
+				*updt_params++ =
+					reverb->wet_mix;
+			}
+			break;
+		case REVERB_GAIN_ADJUST:
+			if (length != 1 || index_offset != 0) {
+				pr_err("REVERB_GAIN_ADJUST:invalid params\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			reverb->gain_adjust =
+				GET_NEXT(values, param_max_offset, rc);
+			pr_debug("%s: REVERB_GAIN_ADJUST val:%d\n",
+					__func__, reverb->gain_adjust);
+			if (command_config_state == CONFIG_SET) {
+				params_length += COMMAND_PAYLOAD_SZ +
+					REVERB_GAIN_ADJUST_PARAM_SZ;
+				CHECK_PARAM_LEN(params_length,
+						MAX_INBAND_PARAM_SZ,
+						"REVERB_GAIN_ADJUST", rc);
+				if (rc != 0)
+					goto invalid_config;
+				*updt_params++ =
+					AUDPROC_MODULE_ID_REVERB;
+				*updt_params++ =
+					AUDPROC_PARAM_ID_REVERB_GAIN_ADJUST;
+				*updt_params++ =
+					REVERB_GAIN_ADJUST_PARAM_SZ;
+				*updt_params++ =
+					reverb->gain_adjust;
+			}
+			break;
+		case REVERB_ROOM_LEVEL:
+			if (length != 1 || index_offset != 0) {
+				pr_err("REVERB_ROOM_LEVEL:invalid params\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			reverb->room_level =
+				GET_NEXT(values, param_max_offset, rc);
+			pr_debug("%s: REVERB_ROOM_LEVEL val:%d\n",
+				__func__, reverb->room_level);
+			if (command_config_state == CONFIG_SET) {
+				params_length += COMMAND_PAYLOAD_SZ +
+					REVERB_ROOM_LEVEL_PARAM_SZ;
+				CHECK_PARAM_LEN(params_length,
+						MAX_INBAND_PARAM_SZ,
+						"REVERB_ROOM_LEVEL", rc);
+				if (rc != 0)
+					goto invalid_config;
+				*updt_params++ =
+					AUDPROC_MODULE_ID_REVERB;
+				*updt_params++ =
+					AUDPROC_PARAM_ID_REVERB_ROOM_LEVEL;
+				*updt_params++ =
+					REVERB_ROOM_LEVEL_PARAM_SZ;
+				*updt_params++ =
+					reverb->room_level;
+			}
+			break;
+		case REVERB_ROOM_HF_LEVEL:
+			if (length != 1 || index_offset != 0) {
+				pr_err("REVERB_ROOM_HF_LEVEL:invalid params\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			reverb->room_hf_level =
+				GET_NEXT(values, param_max_offset, rc);
+			pr_debug("%s: REVERB_ROOM_HF_LEVEL val%d\n",
+				__func__, reverb->room_hf_level);
+			if (command_config_state == CONFIG_SET) {
+				params_length += COMMAND_PAYLOAD_SZ +
+					REVERB_ROOM_HF_LEVEL_PARAM_SZ;
+				CHECK_PARAM_LEN(params_length,
+						MAX_INBAND_PARAM_SZ,
+						"REVERB_ROOM_HF_LEVEL", rc);
+				if (rc != 0)
+					goto invalid_config;
+				*updt_params++ =
+					AUDPROC_MODULE_ID_REVERB;
+				*updt_params++ =
+					AUDPROC_PARAM_ID_REVERB_ROOM_HF_LEVEL;
+				*updt_params++ =
+					REVERB_ROOM_HF_LEVEL_PARAM_SZ;
+				*updt_params++ =
+					reverb->room_hf_level;
+			}
+			break;
+		case REVERB_DECAY_TIME:
+			if (length != 1 || index_offset != 0) {
+				pr_err("REVERB_DECAY_TIME:invalid params\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			reverb->decay_time =
+				GET_NEXT(values, param_max_offset, rc);
+			pr_debug("%s: REVERB_DECAY_TIME val:%d\n",
+				__func__, reverb->decay_time);
+			if (command_config_state == CONFIG_SET) {
+				params_length += COMMAND_PAYLOAD_SZ +
+					REVERB_DECAY_TIME_PARAM_SZ;
+				CHECK_PARAM_LEN(params_length,
+						MAX_INBAND_PARAM_SZ,
+						"REVERB_DECAY_TIME", rc);
+				if (rc != 0)
+					goto invalid_config;
+				*updt_params++ =
+					AUDPROC_MODULE_ID_REVERB;
+				*updt_params++ =
+					AUDPROC_PARAM_ID_REVERB_DECAY_TIME;
+				*updt_params++ =
+					REVERB_DECAY_TIME_PARAM_SZ;
+				*updt_params++ =
+					reverb->decay_time;
+			}
+			break;
+		case REVERB_DECAY_HF_RATIO:
+			if (length != 1 || index_offset != 0) {
+				pr_err("REVERB_DECAY_HF_RATIOinvalid params\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			reverb->decay_hf_ratio =
+				GET_NEXT(values, param_max_offset, rc);
+			pr_debug("%s: REVERB_DECAY_HF_RATIO val%d\n",
+				__func__, reverb->decay_hf_ratio);
+			if (command_config_state == CONFIG_SET) {
+				params_length += COMMAND_PAYLOAD_SZ +
+					REVERB_DECAY_HF_RATIO_PARAM_SZ;
+				CHECK_PARAM_LEN(params_length,
+						MAX_INBAND_PARAM_SZ,
+						"REVERB_DECAY_HF_RATIO", rc);
+				if (rc != 0)
+					goto invalid_config;
+				*updt_params++ =
+					AUDPROC_MODULE_ID_REVERB;
+				*updt_params++ =
+					AUDPROC_PARAM_ID_REVERB_DECAY_HF_RATIO;
+				*updt_params++ =
+					REVERB_DECAY_HF_RATIO_PARAM_SZ;
+				*updt_params++ =
+					reverb->decay_hf_ratio;
+			}
+			break;
+		case REVERB_REFLECTIONS_LEVEL:
+			if (length != 1 || index_offset != 0) {
+				pr_err("REVERB_REFLECTION_LVLinvalid params\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			reverb->reflections_level =
+				GET_NEXT(values, param_max_offset, rc);
+			pr_debug("%s: REVERB_REFLECTIONS_LEVEL val:%d\n",
+				__func__, reverb->reflections_level);
+			if (command_config_state == CONFIG_SET) {
+				params_length += COMMAND_PAYLOAD_SZ +
+					REVERB_REFLECTIONS_LEVEL_PARAM_SZ;
+				CHECK_PARAM_LEN(params_length,
+						MAX_INBAND_PARAM_SZ,
+						"REVERB_REFLECTIONS_LEVEL", rc);
+				if (rc != 0)
+					goto invalid_config;
+				*updt_params++ =
+				AUDPROC_MODULE_ID_REVERB;
+				*updt_params++ =
+				AUDPROC_PARAM_ID_REVERB_REFLECTIONS_LEVEL;
+				*updt_params++ =
+				REVERB_REFLECTIONS_LEVEL_PARAM_SZ;
+				*updt_params++ =
+				reverb->reflections_level;
+			}
+			break;
+		case REVERB_REFLECTIONS_DELAY:
+			if (length != 1 || index_offset != 0) {
+				pr_err("REVERB_REFLECTION_DLYinvalid params\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			reverb->reflections_delay =
+				GET_NEXT(values, param_max_offset, rc);
+			pr_debug("%s: REVERB_REFLECTIONS_DELAY val:%d\n",
+				__func__, reverb->reflections_delay);
+			if (command_config_state == CONFIG_SET) {
+				params_length += COMMAND_PAYLOAD_SZ +
+					REVERB_REFLECTIONS_DELAY_PARAM_SZ;
+				CHECK_PARAM_LEN(params_length,
+						MAX_INBAND_PARAM_SZ,
+						"REVERB_REFLECTIONS_DELAY", rc);
+				if (rc != 0)
+					goto invalid_config;
+				*updt_params++ =
+				AUDPROC_MODULE_ID_REVERB;
+				*updt_params++ =
+				AUDPROC_PARAM_ID_REVERB_REFLECTIONS_DELAY;
+				*updt_params++ =
+				REVERB_REFLECTIONS_DELAY_PARAM_SZ;
+				*updt_params++ =
+				reverb->reflections_delay;
+			}
+			break;
+		case REVERB_LEVEL:
+			if (length != 1 || index_offset != 0) {
+				pr_err("REVERB_LEVEL:invalid params\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			reverb->level =
+				GET_NEXT(values, param_max_offset, rc);
+			pr_debug("%s: REVERB_LEVEL val:%d\n",
+				__func__, reverb->level);
+			if (command_config_state == CONFIG_SET) {
+				params_length += COMMAND_PAYLOAD_SZ +
+					REVERB_LEVEL_PARAM_SZ;
+				CHECK_PARAM_LEN(params_length,
+						MAX_INBAND_PARAM_SZ,
+						"REVERB_LEVEL", rc);
+				if (rc != 0)
+					goto invalid_config;
+				*updt_params++ =
+					AUDPROC_MODULE_ID_REVERB;
+				*updt_params++ =
+					AUDPROC_PARAM_ID_REVERB_LEVEL;
+				*updt_params++ =
+					REVERB_LEVEL_PARAM_SZ;
+				*updt_params++ =
+					reverb->level;
+			}
+			break;
+		case REVERB_DELAY:
+			if (length != 1 || index_offset != 0) {
+				pr_err("REVERB_DELAY:invalid params\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			reverb->delay =
+				GET_NEXT(values, param_max_offset, rc);
+			pr_debug("%s:REVERB_DELAY val:%d\n",
+					__func__, reverb->delay);
+			if (command_config_state == CONFIG_SET) {
+				params_length += COMMAND_PAYLOAD_SZ +
+					REVERB_DELAY_PARAM_SZ;
+				CHECK_PARAM_LEN(params_length,
+						MAX_INBAND_PARAM_SZ,
+						"REVERB_DELAY", rc);
+				if (rc != 0)
+					goto invalid_config;
+				*updt_params++ =
+					AUDPROC_MODULE_ID_REVERB;
+				*updt_params++ =
+					AUDPROC_PARAM_ID_REVERB_DELAY;
+				*updt_params++ =
+					REVERB_DELAY_PARAM_SZ;
+				*updt_params++ =
+					reverb->delay;
+			}
+			break;
+		case REVERB_DIFFUSION:
+			if (length != 1 || index_offset != 0) {
+				pr_err("REVERB_DIFFUSION:invalid params\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			reverb->diffusion =
+				GET_NEXT(values, param_max_offset, rc);
+			pr_debug("%s: REVERB_DIFFUSION val:%d\n",
+				__func__, reverb->diffusion);
+			if (command_config_state == CONFIG_SET) {
+				params_length += COMMAND_PAYLOAD_SZ +
+					REVERB_DIFFUSION_PARAM_SZ;
+				CHECK_PARAM_LEN(params_length,
+						MAX_INBAND_PARAM_SZ,
+						"REVERB_DIFFUSION", rc);
+				if (rc != 0)
+					goto invalid_config;
+				*updt_params++ =
+					AUDPROC_MODULE_ID_REVERB;
+				*updt_params++ =
+					AUDPROC_PARAM_ID_REVERB_DIFFUSION;
+				*updt_params++ =
+					REVERB_DIFFUSION_PARAM_SZ;
+				*updt_params++ =
+					reverb->diffusion;
+			}
+			break;
+		case REVERB_DENSITY:
+			if (length != 1 || index_offset != 0) {
+				pr_err("REVERB_DENSITY:invalid params\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			reverb->density =
+				GET_NEXT(values, param_max_offset, rc);
+			pr_debug("%s: REVERB_DENSITY val:%d\n",
+				__func__, reverb->density);
+			if (command_config_state == CONFIG_SET) {
+				params_length += COMMAND_PAYLOAD_SZ +
+					REVERB_DENSITY_PARAM_SZ;
+				CHECK_PARAM_LEN(params_length,
+						MAX_INBAND_PARAM_SZ,
+						"REVERB_DENSITY", rc);
+				if (rc != 0)
+					goto invalid_config;
+				*updt_params++ =
+					AUDPROC_MODULE_ID_REVERB;
+				*updt_params++ =
+					AUDPROC_PARAM_ID_REVERB_DENSITY;
+				*updt_params++ =
+					REVERB_DENSITY_PARAM_SZ;
+				*updt_params++ =
+					reverb->density;
+			}
+			break;
+		default:
+			pr_err("%s: Invalid command to set config\n", __func__);
+			break;
+		}
+	}
+	if (params_length && (rc == 0))
+		q6asm_send_audio_effects_params(ac, params,
+						params_length);
+	else
+		pr_debug("%s: did not send pp params\n", __func__);
+invalid_config:
+	kfree(params);
+	return rc;
+}
+
+int msm_audio_effects_bass_boost_handler(struct audio_client *ac,
+					struct bass_boost_params *bass_boost,
+					long *values)
+{
+	long *param_max_offset = values + MAX_PP_PARAMS_SZ - 1;
+	char *params = NULL;
+	int rc = 0;
+	int devices = GET_NEXT(values, param_max_offset, rc);
+	int num_commands = GET_NEXT(values, param_max_offset, rc);
+	int *updt_params, i, prev_enable_flag;
+	uint32_t params_length = (MAX_INBAND_PARAM_SZ);
+
+	pr_debug("%s\n", __func__);
+	if (!ac || (devices == -EINVAL) || (num_commands == -EINVAL)) {
+		pr_err("%s: cannot set audio effects\n", __func__);
+		return -EINVAL;
+	}
+	params = kzalloc(params_length, GFP_KERNEL);
+	if (!params) {
+		pr_err("%s, params memory alloc failed\n", __func__);
+		return -ENOMEM;
+	}
+	pr_debug("%s: device: %d\n", __func__, devices);
+	updt_params = (int *)params;
+	params_length = 0;
+	for (i = 0; i < num_commands; i++) {
+		uint32_t command_id =
+			GET_NEXT(values, param_max_offset, rc);
+		uint32_t command_config_state =
+			GET_NEXT(values, param_max_offset, rc);
+		uint32_t index_offset =
+			GET_NEXT(values, param_max_offset, rc);
+		uint32_t length =
+			GET_NEXT(values, param_max_offset, rc);
+		switch (command_id) {
+		case BASS_BOOST_ENABLE:
+			if (length != 1 || index_offset != 0) {
+				pr_err("BASS_BOOST_ENABLE:invalid params\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			prev_enable_flag = bass_boost->enable_flag;
+			bass_boost->enable_flag =
+				GET_NEXT(values, param_max_offset, rc);
+			pr_debug("%s: BASS_BOOST_ENABLE prev:%d new:%d\n",
+				__func__, prev_enable_flag,
+				bass_boost->enable_flag);
+			if (prev_enable_flag != bass_boost->enable_flag) {
+				params_length += COMMAND_PAYLOAD_SZ +
+					BASS_BOOST_ENABLE_PARAM_SZ;
+				CHECK_PARAM_LEN(params_length,
+						MAX_INBAND_PARAM_SZ,
+						"BASS_BOOST_ENABLE", rc);
+				if (rc != 0)
+					goto invalid_config;
+				*updt_params++ =
+					AUDPROC_MODULE_ID_BASS_BOOST;
+				*updt_params++ =
+					AUDPROC_PARAM_ID_BASS_BOOST_ENABLE;
+				*updt_params++ =
+					BASS_BOOST_ENABLE_PARAM_SZ;
+				*updt_params++ =
+					bass_boost->enable_flag;
+			}
+			break;
+		case BASS_BOOST_MODE:
+			if (length != 1 || index_offset != 0) {
+				pr_err("BASS_BOOST_MODE:invalid params\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			bass_boost->mode =
+				GET_NEXT(values, param_max_offset, rc);
+			pr_debug("%s: BASS_BOOST_MODE val:%d\n",
+				__func__, bass_boost->mode);
+			if (command_config_state == CONFIG_SET) {
+				params_length += COMMAND_PAYLOAD_SZ +
+					BASS_BOOST_MODE_PARAM_SZ;
+				CHECK_PARAM_LEN(params_length,
+						MAX_INBAND_PARAM_SZ,
+						"BASS_BOOST_MODE", rc);
+				if (rc != 0)
+					goto invalid_config;
+				*updt_params++ =
+					AUDPROC_MODULE_ID_BASS_BOOST;
+				*updt_params++ =
+					AUDPROC_PARAM_ID_BASS_BOOST_MODE;
+				*updt_params++ =
+					BASS_BOOST_MODE_PARAM_SZ;
+				*updt_params++ =
+					bass_boost->mode;
+			}
+			break;
+		case BASS_BOOST_STRENGTH:
+			if (length != 1 || index_offset != 0) {
+				pr_err("BASS_BOOST_STRENGTH:invalid params\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			bass_boost->strength =
+				GET_NEXT(values, param_max_offset, rc);
+			pr_debug("%s: BASS_BOOST_STRENGTH val:%d\n",
+				__func__, bass_boost->strength);
+			if (command_config_state == CONFIG_SET) {
+				params_length += COMMAND_PAYLOAD_SZ +
+					BASS_BOOST_STRENGTH_PARAM_SZ;
+				CHECK_PARAM_LEN(params_length,
+						MAX_INBAND_PARAM_SZ,
+						"BASS_BOOST_STRENGTH", rc);
+				if (rc != 0)
+					goto invalid_config;
+				*updt_params++ =
+					AUDPROC_MODULE_ID_BASS_BOOST;
+				*updt_params++ =
+					AUDPROC_PARAM_ID_BASS_BOOST_STRENGTH;
+				*updt_params++ =
+					BASS_BOOST_STRENGTH_PARAM_SZ;
+				*updt_params++ =
+					bass_boost->strength;
+			}
+			break;
+		default:
+			pr_err("%s: Invalid command to set config\n", __func__);
+			break;
+		}
+	}
+	if (params_length && (rc == 0))
+		q6asm_send_audio_effects_params(ac, params,
+						params_length);
+	else
+		pr_debug("%s: did not send pp params\n", __func__);
+invalid_config:
+	kfree(params);
+	return rc;
+}
+
+int msm_audio_effects_pbe_handler(struct audio_client *ac,
+					struct pbe_params *pbe,
+					long *values)
+{
+	long *param_max_offset = values + MAX_PP_PARAMS_SZ - 1;
+	char *params = NULL;
+	int rc = 0;
+	int devices = GET_NEXT(values, param_max_offset, rc);
+	int num_commands = GET_NEXT(values, param_max_offset, rc);
+	int *updt_params, i, j, prev_enable_flag;
+	uint32_t params_length = (MAX_INBAND_PARAM_SZ);
+
+	pr_debug("%s\n", __func__);
+	if (!ac || (devices == -EINVAL) || (num_commands == -EINVAL)) {
+		pr_err("%s: cannot set audio effects\n", __func__);
+		return -EINVAL;
+	}
+	params = kzalloc(params_length, GFP_KERNEL);
+	if (!params) {
+		pr_err("%s, params memory alloc failed\n", __func__);
+		return -ENOMEM;
+	}
+	pr_debug("%s: device: %d\n", __func__, devices);
+	updt_params = (int *)params;
+	params_length = 0;
+	for (i = 0; i < num_commands; i++) {
+		uint32_t command_id =
+			GET_NEXT(values, param_max_offset, rc);
+		uint32_t command_config_state =
+			GET_NEXT(values, param_max_offset, rc);
+		uint32_t index_offset =
+			GET_NEXT(values, param_max_offset, rc);
+		uint32_t length =
+			GET_NEXT(values, param_max_offset, rc);
+		switch (command_id) {
+		case PBE_ENABLE:
+			pr_debug("%s: PBE_ENABLE\n", __func__);
+			if (length != 1 || index_offset != 0) {
+				pr_err("no valid params\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			prev_enable_flag = pbe->enable_flag;
+			pbe->enable_flag =
+				GET_NEXT(values, param_max_offset, rc);
+			if (prev_enable_flag != pbe->enable_flag) {
+				params_length += COMMAND_PAYLOAD_SZ +
+					PBE_ENABLE_PARAM_SZ;
+				CHECK_PARAM_LEN(params_length,
+						MAX_INBAND_PARAM_SZ,
+						"PBE_ENABLE", rc);
+				if (rc != 0)
+					goto invalid_config;
+				*updt_params++ =
+					AUDPROC_MODULE_ID_PBE;
+				*updt_params++ =
+					AUDPROC_PARAM_ID_PBE_ENABLE;
+				*updt_params++ =
+					PBE_ENABLE_PARAM_SZ;
+				*updt_params++ =
+					pbe->enable_flag;
+			}
+			break;
+		case PBE_CONFIG:
+			pr_debug("%s: PBE_PARAM length %u\n", __func__, length);
+			if (length > sizeof(struct pbe_config_t) ||
+				length < PBE_CONFIG_PARAM_LEN ||
+				index_offset != 0) {
+				pr_err("no valid params, len %d\n", length);
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			if (command_config_state == CONFIG_SET) {
+				params_length += COMMAND_PAYLOAD_SZ + length;
+				CHECK_PARAM_LEN(params_length,
+						MAX_INBAND_PARAM_SZ,
+						"PBE_PARAM", rc);
+				if (rc != 0)
+					goto invalid_config;
+				*updt_params++ =
+					AUDPROC_MODULE_ID_PBE;
+				*updt_params++ =
+					AUDPROC_PARAM_ID_PBE_PARAM_CONFIG;
+				*updt_params++ =
+					length;
+				for (j = 0; j < length; ) {
+					j += sizeof(*updt_params);
+					*updt_params++ =
+						GET_NEXT(
+						values,
+						param_max_offset,
+						rc);
+				}
+			}
+			break;
+		default:
+			pr_err("%s: Invalid command to set config\n", __func__);
+			break;
+		}
+	}
+	if (params_length && (rc == 0))
+		q6asm_send_audio_effects_params(ac, params,
+						params_length);
+invalid_config:
+	kfree(params);
+	return rc;
+}
+
+int msm_audio_effects_popless_eq_handler(struct audio_client *ac,
+					 struct eq_params *eq,
+					 long *values)
+{
+	long *param_max_offset = values + MAX_PP_PARAMS_SZ - 1;
+	char *params = NULL;
+	int rc = 0;
+	int devices = GET_NEXT(values, param_max_offset, rc);
+	int num_commands = GET_NEXT(values, param_max_offset, rc);
+	int *updt_params, i, prev_enable_flag;
+	uint32_t params_length = (MAX_INBAND_PARAM_SZ);
+
+	pr_debug("%s\n", __func__);
+	if (!ac || (devices == -EINVAL) || (num_commands == -EINVAL)) {
+		pr_err("%s: cannot set audio effects\n", __func__);
+		return -EINVAL;
+	}
+	params = kzalloc(params_length, GFP_KERNEL);
+	if (!params) {
+		pr_err("%s, params memory alloc failed\n", __func__);
+		return -ENOMEM;
+	}
+	pr_debug("%s: device: %d\n", __func__, devices);
+	updt_params = (int *)params;
+	params_length = 0;
+	for (i = 0; i < num_commands; i++) {
+		uint32_t command_id =
+			GET_NEXT(values, param_max_offset, rc);
+		uint32_t command_config_state =
+			GET_NEXT(values, param_max_offset, rc);
+		uint32_t index_offset =
+			GET_NEXT(values, param_max_offset, rc);
+		uint32_t length =
+			GET_NEXT(values, param_max_offset, rc);
+		uint32_t idx;
+		int j;
+		switch (command_id) {
+		case EQ_ENABLE:
+			if (length != 1 || index_offset != 0) {
+				pr_err("EQ_ENABLE:invalid params\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			prev_enable_flag = eq->enable_flag;
+			eq->enable_flag =
+				GET_NEXT(values, param_max_offset, rc);
+			pr_debug("%s: EQ_ENABLE prev:%d new:%d\n", __func__,
+				prev_enable_flag, eq->enable_flag);
+			if (prev_enable_flag != eq->enable_flag) {
+				params_length += COMMAND_PAYLOAD_SZ +
+					EQ_ENABLE_PARAM_SZ;
+				CHECK_PARAM_LEN(params_length,
+						MAX_INBAND_PARAM_SZ,
+						"EQ_ENABLE", rc);
+				if (rc != 0)
+					goto invalid_config;
+				*updt_params++ =
+					AUDPROC_MODULE_ID_POPLESS_EQUALIZER;
+				*updt_params++ =
+					AUDPROC_PARAM_ID_EQ_ENABLE;
+				*updt_params++ =
+					EQ_ENABLE_PARAM_SZ;
+				*updt_params++ =
+					eq->enable_flag;
+			}
+			break;
+		case EQ_CONFIG:
+			if (length < EQ_CONFIG_PARAM_LEN || index_offset != 0) {
+				pr_err("EQ_CONFIG:invalid params\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			pr_debug("%s: EQ_CONFIG bands:%d, pgain:%d, pset:%d\n",
+				 __func__, eq->config.num_bands,
+				eq->config.eq_pregain, eq->config.preset_id);
+			for (idx = 0; idx < MAX_EQ_BANDS; idx++)
+				eq->per_band_cfg[idx].band_idx = -1;
+			eq->config.eq_pregain =
+				GET_NEXT(values, param_max_offset, rc);
+			eq->config.preset_id =
+				GET_NEXT(values, param_max_offset, rc);
+			eq->config.num_bands =
+				GET_NEXT(values, param_max_offset, rc);
+			if (eq->config.num_bands > MAX_EQ_BANDS) {
+				pr_err("EQ_CONFIG:invalid num of bands\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			if (eq->config.num_bands &&
+			    (((length - EQ_CONFIG_PARAM_LEN)/
+				EQ_CONFIG_PER_BAND_PARAM_LEN)
+				!= eq->config.num_bands)) {
+				pr_err("EQ_CONFIG:invalid length per band\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			for (j = 0; j < eq->config.num_bands; j++) {
+				idx = GET_NEXT(values, param_max_offset, rc);
+				if (idx >= MAX_EQ_BANDS) {
+					pr_err("EQ_CONFIG:invalid band index\n");
+					rc = -EINVAL;
+					goto invalid_config;
+				}
+				eq->per_band_cfg[idx].band_idx = idx;
+				eq->per_band_cfg[idx].filter_type =
+					GET_NEXT(values, param_max_offset, rc);
+				eq->per_band_cfg[idx].freq_millihertz =
+					GET_NEXT(values, param_max_offset, rc);
+				eq->per_band_cfg[idx].gain_millibels =
+					GET_NEXT(values, param_max_offset, rc);
+				eq->per_band_cfg[idx].quality_factor =
+					GET_NEXT(values, param_max_offset, rc);
+			}
+			if (command_config_state == CONFIG_SET) {
+				int config_param_length = EQ_CONFIG_PARAM_SZ +
+					(EQ_CONFIG_PER_BAND_PARAM_SZ*
+					 eq->config.num_bands);
+				params_length += COMMAND_PAYLOAD_SZ +
+						config_param_length;
+				CHECK_PARAM_LEN(params_length,
+						MAX_INBAND_PARAM_SZ,
+						"EQ_CONFIG", rc);
+				if (rc != 0)
+					goto invalid_config;
+				*updt_params++ =
+					AUDPROC_MODULE_ID_POPLESS_EQUALIZER;
+				*updt_params++ =
+					AUDPROC_PARAM_ID_EQ_CONFIG;
+				*updt_params++ =
+					config_param_length;
+				*updt_params++ =
+					eq->config.eq_pregain;
+				*updt_params++ =
+					eq->config.preset_id;
+				*updt_params++ =
+					eq->config.num_bands;
+				for (idx = 0; idx < MAX_EQ_BANDS; idx++) {
+					if (eq->per_band_cfg[idx].band_idx < 0)
+						continue;
+					*updt_params++ =
+					eq->per_band_cfg[idx].filter_type;
+					*updt_params++ =
+					eq->per_band_cfg[idx].freq_millihertz;
+					*updt_params++ =
+					eq->per_band_cfg[idx].gain_millibels;
+					*updt_params++ =
+					eq->per_band_cfg[idx].quality_factor;
+					*updt_params++ =
+					eq->per_band_cfg[idx].band_idx;
+				}
+			}
+			break;
+		case EQ_BAND_INDEX:
+			if (length != 1 || index_offset != 0) {
+				pr_err("EQ_BAND_INDEX:invalid params\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			idx = GET_NEXT(values, param_max_offset, rc);
+			if (idx > MAX_EQ_BANDS) {
+				pr_err("EQ_BAND_INDEX:invalid band index\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			eq->band_index = idx;
+			pr_debug("%s: EQ_BAND_INDEX val:%d\n",
+				__func__, eq->band_index);
+			if (command_config_state == CONFIG_SET) {
+				params_length += COMMAND_PAYLOAD_SZ +
+					EQ_BAND_INDEX_PARAM_SZ;
+				CHECK_PARAM_LEN(params_length,
+						MAX_INBAND_PARAM_SZ,
+						"EQ_BAND_INDEX", rc);
+				if (rc != 0)
+					goto invalid_config;
+				*updt_params++ =
+					AUDPROC_MODULE_ID_POPLESS_EQUALIZER;
+				*updt_params++ =
+					AUDPROC_PARAM_ID_EQ_BAND_INDEX;
+				*updt_params++ =
+					EQ_BAND_INDEX_PARAM_SZ;
+				*updt_params++ =
+					eq->band_index;
+			}
+			break;
+		case EQ_SINGLE_BAND_FREQ:
+			if (length != 1 || index_offset != 0) {
+				pr_err("EQ_SINGLE_BAND_FREQ:invalid params\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			if (eq->band_index > MAX_EQ_BANDS) {
+				pr_err("EQ_SINGLE_BAND_FREQ:invalid index\n");
+				break;
+			}
+			eq->freq_millihertz =
+				GET_NEXT(values, param_max_offset, rc);
+			pr_debug("%s: EQ_SINGLE_BAND_FREQ idx:%d, val:%d\n",
+				__func__, eq->band_index, eq->freq_millihertz);
+			if (command_config_state == CONFIG_SET) {
+				params_length += COMMAND_PAYLOAD_SZ +
+					EQ_SINGLE_BAND_FREQ_PARAM_SZ;
+				CHECK_PARAM_LEN(params_length,
+						MAX_INBAND_PARAM_SZ,
+						"EQ_SINGLE_BAND_FREQ", rc);
+				if (rc != 0)
+					goto invalid_config;
+				*updt_params++ =
+					AUDPROC_MODULE_ID_POPLESS_EQUALIZER;
+				*updt_params++ =
+					AUDPROC_PARAM_ID_EQ_SINGLE_BAND_FREQ;
+				*updt_params++ =
+					EQ_SINGLE_BAND_FREQ_PARAM_SZ;
+				*updt_params++ =
+					eq->freq_millihertz;
+			}
+			break;
+		default:
+			pr_err("%s: Invalid command to set config\n", __func__);
+			break;
+		}
+	}
+	if (params_length && (rc == 0))
+		q6asm_send_audio_effects_params(ac, params,
+						params_length);
+	else
+		pr_debug("%s: did not send pp params\n", __func__);
+invalid_config:
+	kfree(params);
+	return rc;
+}
+
+static int __msm_audio_effects_volume_handler(struct audio_client *ac,
+					      struct soft_volume_params *vol,
+					      long *values,
+					      int instance)
+{
+	int devices;
+	int num_commands;
+	char *params = NULL;
+	int *updt_params, i;
+	uint32_t params_length = (MAX_INBAND_PARAM_SZ);
+	long *param_max_offset;
+	int rc = 0;
+
+	pr_debug("%s: instance: %d\n", __func__, instance);
+	if (!values) {
+		pr_err("%s: set audio effects failed, no valid data\n",
+			__func__);
+		return -EINVAL;
+	}
+	param_max_offset = values + MAX_PP_PARAMS_SZ - 1;
+	devices = GET_NEXT(values, param_max_offset, rc);
+	num_commands = GET_NEXT(values, param_max_offset, rc);
+	if (!ac || (devices == -EINVAL) || (num_commands == -EINVAL)) {
+		pr_err("%s: cannot set audio effects\n", __func__);
+		return -EINVAL;
+	}
+	params = kzalloc(params_length, GFP_KERNEL);
+	if (!params) {
+		pr_err("%s, params memory alloc failed\n", __func__);
+		return -ENOMEM;
+	}
+	updt_params = (int *)params;
+	params_length = 0;
+	for (i = 0; i < num_commands; i++) {
+		uint32_t command_id =
+			GET_NEXT(values, param_max_offset, rc);
+		uint32_t command_config_state =
+			GET_NEXT(values, param_max_offset, rc);
+		uint32_t index_offset =
+			GET_NEXT(values, param_max_offset, rc);
+		uint32_t length =
+			GET_NEXT(values, param_max_offset, rc);
+		switch (command_id) {
+		case SOFT_VOLUME_GAIN_2CH:
+		case SOFT_VOLUME2_GAIN_2CH:
+			if (length != 2 || index_offset != 0) {
+				pr_err("VOLUME_GAIN_2CH: invalid params\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			vol->left_gain = GET_NEXT(values, param_max_offset, rc);
+			vol->right_gain =
+				GET_NEXT(values, param_max_offset, rc);
+			vol->master_gain = 0x2000;
+			if (command_config_state == CONFIG_SET) {
+				params_length += COMMAND_PAYLOAD_SZ +
+						SOFT_VOLUME_GAIN_2CH_PARAM_SZ;
+				params_length += COMMAND_PAYLOAD_SZ +
+					SOFT_VOLUME_GAIN_MASTER_PARAM_SZ;
+				CHECK_PARAM_LEN(params_length,
+						MAX_INBAND_PARAM_SZ,
+						"VOLUME/VOLUME2_GAIN_2CH",
+						rc);
+				if (rc != 0)
+					goto invalid_config;
+				if (instance == SOFT_VOLUME_INSTANCE_2)
+					*updt_params++ =
+						ASM_MODULE_ID_VOL_CTRL2;
+				else
+					*updt_params++ =
+						ASM_MODULE_ID_VOL_CTRL;
+				*updt_params++ =
+					ASM_PARAM_ID_VOL_CTRL_LR_CHANNEL_GAIN;
+				*updt_params++ =
+					SOFT_VOLUME_GAIN_2CH_PARAM_SZ;
+				*updt_params++ =
+					(vol->left_gain << 16) |
+						vol->right_gain;
+				if (instance == SOFT_VOLUME_INSTANCE_2)
+					*updt_params++ =
+						ASM_MODULE_ID_VOL_CTRL2;
+				else
+					*updt_params++ =
+						ASM_MODULE_ID_VOL_CTRL;
+				*updt_params++ =
+					ASM_PARAM_ID_VOL_CTRL_MASTER_GAIN;
+				*updt_params++ =
+					SOFT_VOLUME_GAIN_MASTER_PARAM_SZ;
+				*updt_params++ =
+					vol->master_gain;
+			}
+			break;
+		case SOFT_VOLUME_GAIN_MASTER:
+		case SOFT_VOLUME2_GAIN_MASTER:
+			if (length != 1 || index_offset != 0) {
+				pr_err("VOLUME_GAIN_MASTER: invalid params\n");
+				rc = -EINVAL;
+				goto invalid_config;
+			}
+			vol->left_gain = 0x2000;
+			vol->right_gain = 0x2000;
+			vol->master_gain =
+				GET_NEXT(values, param_max_offset, rc);
+			if (command_config_state == CONFIG_SET) {
+				params_length += COMMAND_PAYLOAD_SZ +
+						SOFT_VOLUME_GAIN_2CH_PARAM_SZ;
+				params_length += COMMAND_PAYLOAD_SZ +
+					SOFT_VOLUME_GAIN_MASTER_PARAM_SZ;
+				CHECK_PARAM_LEN(params_length,
+						MAX_INBAND_PARAM_SZ,
+						"VOLUME/VOLUME2_GAIN_MASTER",
+						rc);
+				if (rc != 0)
+					goto invalid_config;
+				if (instance == SOFT_VOLUME_INSTANCE_2)
+					*updt_params++ =
+						ASM_MODULE_ID_VOL_CTRL2;
+				else
+					*updt_params++ =
+						ASM_MODULE_ID_VOL_CTRL;
+				*updt_params++ =
+					ASM_PARAM_ID_VOL_CTRL_LR_CHANNEL_GAIN;
+				*updt_params++ =
+					SOFT_VOLUME_GAIN_2CH_PARAM_SZ;
+				*updt_params++ =
+					(vol->left_gain << 16) |
+						vol->right_gain;
+				if (instance == SOFT_VOLUME_INSTANCE_2)
+					*updt_params++ =
+						ASM_MODULE_ID_VOL_CTRL2;
+				else
+					*updt_params++ =
+						ASM_MODULE_ID_VOL_CTRL;
+				*updt_params++ =
+					ASM_PARAM_ID_VOL_CTRL_MASTER_GAIN;
+				*updt_params++ =
+					SOFT_VOLUME_GAIN_MASTER_PARAM_SZ;
+				*updt_params++ =
+					vol->master_gain;
+			}
+			break;
+		default:
+			pr_err("%s: Invalid command id: %d to set config\n",
+				__func__, command_id);
+			break;
+		}
+	}
+	if (params_length && (rc == 0))
+		q6asm_send_audio_effects_params(ac, params,
+						params_length);
+invalid_config:
+	kfree(params);
+	return rc;
+}
+
+int msm_audio_effects_volume_handler(struct audio_client *ac,
+				     struct soft_volume_params *vol,
+				     long *values)
+{
+	return __msm_audio_effects_volume_handler(ac, vol, values,
+						  SOFT_VOLUME_INSTANCE_1);
+}
+
+int msm_audio_effects_volume_handler_v2(struct audio_client *ac,
+					struct soft_volume_params *vol,
+					long *values, int instance)
+{
+	return __msm_audio_effects_volume_handler(ac, vol, values, instance);
+}
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-compress-q6-v2.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-compress-q6-v2.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c	2019-10-29 09:26:26.145227663 +0100
@@ -0,0 +1,4910 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/time.h>
+#include <linux/math64.h>
+#include <linux/wait.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/initval.h>
+#include <sound/control.h>
+#include <sound/q6asm-v2.h>
+#include <sound/pcm_params.h>
+#include <sound/audio_effects.h>
+#include <asm/dma.h>
+#include <linux/dma-mapping.h>
+#include <linux/msm_audio_ion.h>
+#include <linux/msm_audio.h>
+
+#include <sound/timer.h>
+#include <sound/tlv.h>
+
+#include <sound/apr_audio-v2.h>
+#include <sound/q6asm-v2.h>
+#include <sound/q6core.h>
+#include <sound/compress_params.h>
+#include <sound/compress_offload.h>
+#include <sound/compress_driver.h>
+#include <sound/msm-audio-effects-q6-v2.h>
+#include "msm-pcm-routing-v2.h"
+#include "msm-qti-pp-config.h"
+
+#define DSP_PP_BUFFERING_IN_MSEC	25
+#define PARTIAL_DRAIN_ACK_EARLY_BY_MSEC	150
+#define MP3_OUTPUT_FRAME_SZ		1152
+#define AAC_OUTPUT_FRAME_SZ		1024
+#define AC3_OUTPUT_FRAME_SZ		1536
+#define EAC3_OUTPUT_FRAME_SZ		1536
+#define DSP_NUM_OUTPUT_FRAME_BUFFERED	2
+#define FLAC_BLK_SIZE_LIMIT		65535
+
+/* Timestamp mode payload offsets */
+#define CAPTURE_META_DATA_TS_OFFSET_LSW	6
+#define CAPTURE_META_DATA_TS_OFFSET_MSW	7
+
+/* decoder parameter length */
+#define DDP_DEC_MAX_NUM_PARAM		18
+
+/* Default values used if user space does not set */
+#define COMPR_PLAYBACK_MIN_FRAGMENT_SIZE (8 * 1024)
+#define COMPR_PLAYBACK_MAX_FRAGMENT_SIZE (128 * 1024)
+#define COMPR_PLAYBACK_MIN_NUM_FRAGMENTS (4)
+#define COMPR_PLAYBACK_MAX_NUM_FRAGMENTS (16 * 4)
+
+#define COMPRESSED_LR_VOL_MAX_STEPS	0x2000
+const DECLARE_TLV_DB_LINEAR(msm_compr_vol_gain, 0,
+				COMPRESSED_LR_VOL_MAX_STEPS);
+
+/* Stream id switches between 1 and 2 */
+#define NEXT_STREAM_ID(stream_id) ((stream_id & 1) + 1)
+
+#define STREAM_ARRAY_INDEX(stream_id) (stream_id - 1)
+
+#define MAX_NUMBER_OF_STREAMS 2
+
+struct msm_compr_gapless_state {
+	bool set_next_stream_id;
+	int32_t stream_opened[MAX_NUMBER_OF_STREAMS];
+	uint32_t initial_samples_drop;
+	uint32_t trailing_samples_drop;
+	uint32_t gapless_transition;
+	bool use_dsp_gapless_mode;
+	union snd_codec_options codec_options;
+};
+
+static unsigned int supported_sample_rates[] = {
+	8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 64000,
+	88200, 96000, 128000, 144000, 176400, 192000, 352800, 384000, 2822400,
+	5644800
+};
+
+struct msm_compr_pdata {
+	struct snd_compr_stream *cstream[MSM_FRONTEND_DAI_MAX];
+	uint32_t volume[MSM_FRONTEND_DAI_MAX][2]; /* For both L & R */
+	struct msm_compr_audio_effects *audio_effects[MSM_FRONTEND_DAI_MAX];
+	bool use_dsp_gapless_mode;
+	bool use_legacy_api; /* indicates use older asm apis*/
+	struct msm_compr_dec_params *dec_params[MSM_FRONTEND_DAI_MAX];
+	struct msm_compr_ch_map *ch_map[MSM_FRONTEND_DAI_MAX];
+	int32_t ion_fd[MSM_FRONTEND_DAI_MAX];
+	bool is_in_use[MSM_FRONTEND_DAI_MAX];
+};
+
+struct msm_compr_audio {
+	struct snd_compr_stream *cstream;
+	struct snd_compr_caps compr_cap;
+	struct snd_compr_codec_caps codec_caps;
+	struct snd_compr_params codec_param;
+	struct audio_client *audio_client;
+
+	uint32_t codec;
+	uint32_t compr_passthr;
+	void    *buffer; /* virtual address */
+	phys_addr_t buffer_paddr; /* physical address */
+	uint32_t app_pointer;
+	uint32_t buffer_size;
+	uint32_t byte_offset;
+	uint64_t copied_total; /* bytes consumed by DSP */
+	uint64_t bytes_received; /* from userspace */
+	uint64_t bytes_sent; /* to DSP */
+
+	uint64_t received_total; /* bytes received from DSP */
+	uint64_t bytes_copied; /* to userspace */
+	uint64_t bytes_read; /* from DSP */
+	uint32_t bytes_read_offset; /* bytes read offset */
+
+	uint32_t ts_header_offset; /* holds the timestamp header offset */
+
+	int32_t first_buffer;
+	int32_t last_buffer;
+	int32_t partial_drain_delay;
+
+	uint16_t session_id;
+
+	uint32_t sample_rate;
+	uint32_t num_channels;
+
+	/*
+	 * convention - commands coming from the same thread
+	 * can use the common cmd_ack var. Others (e.g drain/EOS)
+	 * must use separate vars to track command status.
+	 */
+	uint32_t cmd_ack;
+	uint32_t cmd_interrupt;
+	uint32_t drain_ready;
+	uint32_t eos_ack;
+
+	uint32_t stream_available;
+	uint32_t next_stream;
+
+	uint32_t run_mode;
+	uint32_t start_delay_lsw;
+	uint32_t start_delay_msw;
+
+	int32_t shm_ion_fd;
+	struct ion_client *lib_ion_client;
+	struct ion_client *shm_ion_client;
+	struct ion_handle *lib_ion_handle;
+	struct ion_handle *shm_ion_handle;
+
+	uint64_t marker_timestamp;
+
+	struct msm_compr_gapless_state gapless_state;
+
+	atomic_t start;
+	atomic_t eos;
+	atomic_t drain;
+	atomic_t xrun;
+	atomic_t close;
+	atomic_t wait_on_close;
+	atomic_t error;
+
+	wait_queue_head_t eos_wait;
+	wait_queue_head_t drain_wait;
+	wait_queue_head_t close_wait;
+	wait_queue_head_t wait_for_stream_avail;
+
+	spinlock_t lock;
+};
+
+const u32 compr_codecs[] = {
+	SND_AUDIOCODEC_AC3, SND_AUDIOCODEC_EAC3, SND_AUDIOCODEC_DTS,
+	SND_AUDIOCODEC_DSD, SND_AUDIOCODEC_TRUEHD, SND_AUDIOCODEC_IEC61937};
+
+struct query_audio_effect {
+	uint32_t mod_id;
+	uint32_t parm_id;
+	uint32_t size;
+	uint32_t offset;
+	uint32_t device;
+};
+
+struct msm_compr_audio_effects {
+	struct bass_boost_params bass_boost;
+	struct pbe_params pbe;
+	struct virtualizer_params virtualizer;
+	struct reverb_params reverb;
+	struct eq_params equalizer;
+	struct soft_volume_params volume;
+	struct query_audio_effect query;
+};
+
+struct msm_compr_dec_params {
+	struct snd_dec_ddp ddp_params;
+};
+
+struct msm_compr_ch_map {
+	bool set_ch_map;
+	char channel_map[PCM_FORMAT_MAX_NUM_CHANNEL];
+};
+
+static int msm_compr_send_dec_params(struct snd_compr_stream *cstream,
+				     struct msm_compr_dec_params *dec_params,
+				     int stream_id);
+
+static int msm_compr_set_render_mode(struct msm_compr_audio *prtd,
+				     uint32_t render_mode) {
+	int ret = -EINVAL;
+	struct audio_client *ac = prtd->audio_client;
+
+	pr_debug("%s, got render mode %u\n", __func__, render_mode);
+
+	if (render_mode == SNDRV_COMPRESS_RENDER_MODE_AUDIO_MASTER) {
+		render_mode = ASM_SESSION_MTMX_STRTR_PARAM_RENDER_DEFAULT;
+	} else if (render_mode == SNDRV_COMPRESS_RENDER_MODE_STC_MASTER) {
+		render_mode = ASM_SESSION_MTMX_STRTR_PARAM_RENDER_LOCAL_STC;
+		prtd->run_mode = ASM_SESSION_CMD_RUN_STARTIME_RUN_WITH_DELAY;
+	} else {
+		pr_err("%s, Invalid render mode %u\n", __func__,
+			render_mode);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	ret = q6asm_send_mtmx_strtr_render_mode(ac, render_mode);
+	if (ret) {
+		pr_err("%s, Render mode can't be set error %d\n", __func__,
+			ret);
+	}
+exit:
+	return ret;
+}
+
+static int msm_compr_set_clk_rec_mode(struct audio_client *ac,
+				     uint32_t clk_rec_mode) {
+	int ret = -EINVAL;
+
+	pr_debug("%s, got clk rec mode %u\n", __func__, clk_rec_mode);
+
+	if (clk_rec_mode == SNDRV_COMPRESS_CLK_REC_MODE_NONE) {
+		clk_rec_mode = ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC_NONE;
+	} else if (clk_rec_mode == SNDRV_COMPRESS_CLK_REC_MODE_AUTO) {
+		clk_rec_mode = ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC_AUTO;
+	} else {
+		pr_err("%s, Invalid clk rec_mode mode %u\n", __func__,
+			clk_rec_mode);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	ret = q6asm_send_mtmx_strtr_clk_rec_mode(ac, clk_rec_mode);
+	if (ret) {
+		pr_err("%s, clk rec mode can't be set, error %d\n", __func__,
+			ret);
+	}
+
+exit:
+	return ret;
+}
+
+static int msm_compr_set_render_window(struct audio_client *ac,
+		uint32_t ws_lsw, uint32_t ws_msw,
+		uint32_t we_lsw, uint32_t we_msw)
+{
+	int ret = -EINVAL;
+	struct asm_session_mtmx_strtr_param_window_v2_t asm_mtmx_strtr_window;
+	uint32_t param_id;
+
+	pr_debug("%s, ws_lsw 0x%x ws_msw 0x%x we_lsw 0x%x we_ms 0x%x\n",
+		 __func__, ws_lsw, ws_msw, we_lsw, we_msw);
+
+	memset(&asm_mtmx_strtr_window, 0,
+	       sizeof(struct asm_session_mtmx_strtr_param_window_v2_t));
+	asm_mtmx_strtr_window.window_lsw = ws_lsw;
+	asm_mtmx_strtr_window.window_msw = ws_msw;
+	param_id = ASM_SESSION_MTMX_STRTR_PARAM_RENDER_WINDOW_START_V2;
+	ret = q6asm_send_mtmx_strtr_window(ac, &asm_mtmx_strtr_window,
+					   param_id);
+	if (ret) {
+		pr_err("%s, start window can't be set error %d\n", __func__,
+			ret);
+		goto exit;
+	}
+
+	asm_mtmx_strtr_window.window_lsw = we_lsw;
+	asm_mtmx_strtr_window.window_msw = we_msw;
+	param_id = ASM_SESSION_MTMX_STRTR_PARAM_RENDER_WINDOW_END_V2;
+	ret = q6asm_send_mtmx_strtr_window(ac, &asm_mtmx_strtr_window,
+					   param_id);
+	if (ret) {
+		pr_err("%s, end window can't be set error %d\n", __func__,
+			ret);
+	}
+
+exit:
+	return ret;
+}
+
+static int msm_compr_enable_adjust_session_clock(struct audio_client *ac,
+		bool enable)
+{
+	int ret;
+
+	pr_debug("%s, enable adjust_session %d\n", __func__, enable);
+
+	ret = q6asm_send_mtmx_strtr_enable_adjust_session_clock(ac, enable);
+	if (ret)
+		pr_err("%s, adjust session clock can't be set error %d\n",
+			__func__, ret);
+
+	return ret;
+}
+
+static int msm_compr_adjust_session_clock(struct audio_client *ac,
+		uint32_t adjust_session_lsw, uint32_t adjust_session_msw)
+{
+	int ret;
+
+	pr_debug("%s, adjust_session_time_msw 0x%x adjust_session_time_lsw 0x%x\n",
+		 __func__, adjust_session_msw, adjust_session_lsw);
+
+	ret = q6asm_adjust_session_clock(ac,
+			adjust_session_lsw,
+			adjust_session_msw);
+	if (ret)
+		pr_err("%s, adjust session clock can't be set error %d\n",
+			__func__, ret);
+
+	return ret;
+}
+
+static int msm_compr_set_volume(struct snd_compr_stream *cstream,
+				uint32_t volume_l, uint32_t volume_r)
+{
+	struct msm_compr_audio *prtd;
+	int rc = 0;
+	uint32_t avg_vol, gain_list[VOLUME_CONTROL_MAX_CHANNELS];
+	uint32_t num_channels;
+	struct snd_soc_pcm_runtime *rtd;
+	struct msm_compr_pdata *pdata;
+	bool use_default = true;
+	u8 *chmap = NULL;
+
+	pr_debug("%s: volume_l %d volume_r %d\n",
+		__func__, volume_l, volume_r);
+	if (!cstream || !cstream->runtime) {
+		pr_err("%s: session not active\n", __func__);
+		return -EPERM;
+	}
+	rtd = cstream->private_data;
+	prtd = cstream->runtime->private_data;
+
+	if (!rtd || !rtd->platform || !prtd || !prtd->audio_client) {
+		pr_err("%s: invalid rtd, prtd or audio client", __func__);
+		return rc;
+	}
+	pdata = snd_soc_platform_get_drvdata(rtd->platform);
+
+	if (prtd->compr_passthr != LEGACY_PCM) {
+		pr_debug("%s: No volume config for passthrough %d\n",
+			 __func__, prtd->compr_passthr);
+		return rc;
+	}
+
+	use_default = !(pdata->ch_map[rtd->dai_link->be_id]->set_ch_map);
+	chmap = pdata->ch_map[rtd->dai_link->be_id]->channel_map;
+	num_channels = prtd->num_channels;
+
+	if (prtd->num_channels > 2) {
+		/*
+		 * Currently the left and right gains are averaged an applied
+		 * to all channels. This might not be desirable. But currently,
+		 * there exists no API in userspace to send a list of gains for
+		 * each channel either. If such an API does become available,
+		 * the mixer control must be updated to accept more than 2
+		 * channel gains.
+		 *
+		 */
+		avg_vol = (volume_l + volume_r) / 2;
+		rc = q6asm_set_volume(prtd->audio_client, avg_vol);
+	} else {
+		gain_list[0] = volume_l;
+		gain_list[1] = volume_r;
+		gain_list[2] = volume_l;
+		num_channels = 3;
+		use_default = true;
+		rc = q6asm_set_multich_gain(prtd->audio_client, num_channels,
+					gain_list, chmap, use_default);
+	}
+
+	if (rc < 0)
+		pr_err("%s: Send vol gain command failed rc=%d\n",
+		       __func__, rc);
+
+	return rc;
+}
+
+static int msm_compr_send_ddp_cfg(struct audio_client *ac,
+				  struct snd_dec_ddp *ddp,
+				  int stream_id)
+{
+	int i, rc;
+	pr_debug("%s\n", __func__);
+	for (i = 0; i < ddp->params_length; i++) {
+		rc = q6asm_ds1_set_stream_endp_params(ac, ddp->params_id[i],
+						      ddp->params_value[i],
+						      stream_id);
+		if (rc) {
+			pr_err("sending params_id: %d failed\n",
+				ddp->params_id[i]);
+			return rc;
+		}
+	}
+	return 0;
+}
+
+static int msm_compr_send_buffer(struct msm_compr_audio *prtd)
+{
+	int buffer_length;
+	uint64_t bytes_available;
+	struct audio_aio_write_param param;
+	struct snd_codec_metadata *buff_addr;
+
+	if (!atomic_read(&prtd->start)) {
+		pr_err("%s: stream is not in started state\n", __func__);
+		return -EINVAL;
+	}
+
+
+	if (atomic_read(&prtd->xrun)) {
+		WARN(1, "%s called while xrun is true", __func__);
+		return -EPERM;
+	}
+
+	pr_debug("%s: bytes_received = %llu copied_total = %llu\n",
+		__func__, prtd->bytes_received, prtd->copied_total);
+	if (prtd->first_buffer &&  prtd->gapless_state.use_dsp_gapless_mode &&
+		prtd->compr_passthr == LEGACY_PCM)
+		q6asm_stream_send_meta_data(prtd->audio_client,
+				prtd->audio_client->stream_id,
+				prtd->gapless_state.initial_samples_drop,
+				prtd->gapless_state.trailing_samples_drop);
+
+	buffer_length = prtd->codec_param.buffer.fragment_size;
+	bytes_available = prtd->bytes_received - prtd->copied_total;
+	if (bytes_available < prtd->codec_param.buffer.fragment_size)
+		buffer_length = bytes_available;
+
+	if (prtd->byte_offset + buffer_length > prtd->buffer_size) {
+		buffer_length = (prtd->buffer_size - prtd->byte_offset);
+		pr_debug("wrap around situation, send partial data %d now", buffer_length);
+	}
+
+	if (buffer_length) {
+		param.paddr = prtd->buffer_paddr + prtd->byte_offset;
+		WARN(prtd->byte_offset % 32 != 0, "offset %x not multiple of 32",
+		prtd->byte_offset);
+	}
+	else
+		param.paddr = prtd->buffer_paddr;
+	param.len	= buffer_length;
+	if (prtd->ts_header_offset) {
+		buff_addr = (struct snd_codec_metadata *)
+					(prtd->buffer + prtd->byte_offset);
+		param.len = buff_addr->length;
+		param.msw_ts = (uint32_t)
+			((buff_addr->timestamp & 0xFFFFFFFF00000000LL) >> 32);
+		param.lsw_ts = (uint32_t) (buff_addr->timestamp & 0xFFFFFFFFLL);
+		param.paddr += prtd->ts_header_offset;
+		param.flags = SET_TIMESTAMP;
+		param.metadata_len = prtd->ts_header_offset;
+	} else {
+		param.msw_ts = 0;
+		param.lsw_ts = 0;
+		param.flags = NO_TIMESTAMP;
+		param.metadata_len = 0;
+	}
+	param.uid	= buffer_length;
+	param.last_buffer = prtd->last_buffer;
+
+	pr_debug("%s: sending %d bytes to DSP byte_offset = %d\n",
+		__func__, param.len, prtd->byte_offset);
+	if (q6asm_async_write(prtd->audio_client, &param) < 0) {
+		pr_err("%s:q6asm_async_write failed\n", __func__);
+	} else {
+		prtd->bytes_sent += buffer_length;
+		if (prtd->first_buffer)
+			prtd->first_buffer = 0;
+	}
+
+	return 0;
+}
+
+static int msm_compr_read_buffer(struct msm_compr_audio *prtd)
+{
+	int buffer_length;
+	uint64_t bytes_available;
+	uint64_t buffer_sent;
+	struct audio_aio_read_param param;
+	int ret;
+
+	if (!atomic_read(&prtd->start)) {
+		pr_err("%s: stream is not in started state\n", __func__);
+		return -EINVAL;
+	}
+
+	buffer_length = prtd->codec_param.buffer.fragment_size -
+						 prtd->ts_header_offset;
+	bytes_available = prtd->received_total - prtd->bytes_copied;
+	buffer_sent = prtd->bytes_read - prtd->bytes_copied;
+	if (buffer_sent + buffer_length + prtd->ts_header_offset
+						> prtd->buffer_size) {
+		pr_debug(" %s : Buffer is Full bytes_available: %llu\n",
+				__func__, bytes_available);
+		return 0;
+	}
+
+	memset(&param, 0x0, sizeof(struct audio_aio_read_param));
+	param.paddr = prtd->buffer_paddr + prtd->bytes_read_offset +
+						prtd->ts_header_offset;
+	param.len = buffer_length;
+	param.uid = buffer_length;
+	param.flags = prtd->codec_param.codec.flags;
+
+	pr_debug("%s: reading %d bytes from DSP byte_offset = %llu\n",
+			__func__, buffer_length, prtd->bytes_read);
+	ret = q6asm_async_read(prtd->audio_client, &param);
+	if (ret < 0) {
+		pr_err("%s: q6asm_async_read failed - %d\n",
+			__func__, ret);
+		return ret;
+	}
+	prtd->bytes_read += buffer_length;
+	prtd->bytes_read_offset += buffer_length;
+	if (prtd->bytes_read_offset >= prtd->buffer_size)
+		prtd->bytes_read_offset -= prtd->buffer_size;
+
+	return 0;
+}
+
+static void compr_event_handler(uint32_t opcode,
+		uint32_t token, uint32_t *payload, void *priv)
+{
+	struct msm_compr_audio *prtd = priv;
+	struct snd_compr_stream *cstream;
+	struct audio_client *ac;
+	uint32_t chan_mode = 0;
+	uint32_t sample_rate = 0;
+	uint64_t bytes_available;
+	int stream_id;
+	uint32_t stream_index;
+	unsigned long flags;
+	uint64_t read_size;
+	uint32_t *buff_addr;
+	struct snd_soc_pcm_runtime *rtd;
+	int ret = 0;
+
+	if (!prtd) {
+		pr_err("%s: prtd is NULL\n", __func__);
+		return;
+	}
+	cstream = prtd->cstream;
+	if (!cstream) {
+		pr_err("%s: cstream is NULL\n", __func__);
+		return;
+	}
+
+	ac = prtd->audio_client;
+
+	/*
+	 * Token for rest of the compressed commands use to set
+	 * session id, stream id, dir etc.
+	 */
+	stream_id = q6asm_get_stream_id_from_token(token);
+
+	pr_debug("%s opcode =%08x\n", __func__, opcode);
+	switch (opcode) {
+	case ASM_DATA_EVENT_WRITE_DONE_V2:
+		spin_lock_irqsave(&prtd->lock, flags);
+
+		if (payload[3]) {
+			pr_err("%s WRITE FAILED w/ err 0x%x !, paddr 0x%x, byte_offset=%d,copied_total=%llu,token=%d\n",
+				__func__,
+				payload[3],
+				payload[0],
+				prtd->byte_offset,
+				prtd->copied_total, token);
+
+			if (atomic_read(&prtd->drain) && prtd->last_buffer) {
+				pr_debug("wake up on drain\n");
+				prtd->drain_ready = 1;
+				wake_up(&prtd->drain_wait);
+				atomic_set(&prtd->drain, 0);
+				prtd->last_buffer = 0;
+			} else {
+				atomic_set(&prtd->start, 0);
+			}
+		} else {
+			pr_debug("ASM_DATA_EVENT_WRITE_DONE_V2 offset %d, length %d\n",
+				 prtd->byte_offset, token);
+		}
+
+		/*
+		 * Token for WRITE command represents the amount of data
+		 * written to ADSP in the last write, update offset and
+		 * total copied data accordingly.
+		 */
+		if (prtd->ts_header_offset) {
+			/* Always assume that the data will be sent to DSP on
+			 * frame boundary.
+			 * i.e, one frame of userspace write will result in
+			 * one kernel write to DSP. This is needed as
+			 * timestamp will be sent per frame.
+			 */
+			prtd->byte_offset +=
+					prtd->codec_param.buffer.fragment_size;
+			prtd->copied_total +=
+					prtd->codec_param.buffer.fragment_size;
+		} else {
+			prtd->byte_offset += token;
+			prtd->copied_total += token;
+		}
+		if (prtd->byte_offset >= prtd->buffer_size)
+			prtd->byte_offset -= prtd->buffer_size;
+
+		snd_compr_fragment_elapsed(cstream);
+
+		if (!atomic_read(&prtd->start)) {
+			/* Writes must be restarted from _copy() */
+			pr_debug("write_done received while not started, treat as xrun");
+			atomic_set(&prtd->xrun, 1);
+			spin_unlock_irqrestore(&prtd->lock, flags);
+			break;
+		}
+
+		bytes_available = prtd->bytes_received - prtd->copied_total;
+		if (bytes_available < cstream->runtime->fragment_size) {
+			pr_debug("WRITE_DONE Insufficient data to send. break out\n");
+			atomic_set(&prtd->xrun, 1);
+
+			if (prtd->last_buffer)
+				prtd->last_buffer = 0;
+			if (atomic_read(&prtd->drain)) {
+				pr_debug("wake up on drain\n");
+				prtd->drain_ready = 1;
+				wake_up(&prtd->drain_wait);
+				atomic_set(&prtd->drain, 0);
+			}
+		} else if ((bytes_available == cstream->runtime->fragment_size)
+			   && atomic_read(&prtd->drain)) {
+			prtd->last_buffer = 1;
+			msm_compr_send_buffer(prtd);
+			prtd->last_buffer = 0;
+		} else
+			msm_compr_send_buffer(prtd);
+
+		spin_unlock_irqrestore(&prtd->lock, flags);
+		break;
+
+	case ASM_DATA_EVENT_READ_DONE_V2:
+		spin_lock_irqsave(&prtd->lock, flags);
+
+		pr_debug("ASM_DATA_EVENT_READ_DONE_V2 offset %d, length %d\n",
+				 prtd->byte_offset, payload[4]);
+
+		if (prtd->ts_header_offset) {
+			/* Update the header for received buffer */
+			buff_addr = prtd->buffer + prtd->byte_offset;
+			/* Write the length of the buffer */
+			*buff_addr = prtd->codec_param.buffer.fragment_size
+						 - prtd->ts_header_offset;
+			buff_addr++;
+			/* Write the offset */
+			*buff_addr = prtd->ts_header_offset;
+			buff_addr++;
+			/* Write the TS LSW */
+			*buff_addr = payload[CAPTURE_META_DATA_TS_OFFSET_LSW];
+			buff_addr++;
+			/* Write the TS MSW */
+			*buff_addr = payload[CAPTURE_META_DATA_TS_OFFSET_MSW];
+		}
+		/* Always assume read_size is same as fragment_size */
+		read_size = prtd->codec_param.buffer.fragment_size;
+		prtd->byte_offset += read_size;
+		prtd->received_total += read_size;
+		if (prtd->byte_offset >= prtd->buffer_size)
+			prtd->byte_offset -= prtd->buffer_size;
+
+		snd_compr_fragment_elapsed(cstream);
+
+		if (!atomic_read(&prtd->start)) {
+			pr_debug("read_done received while not started, treat as xrun");
+			atomic_set(&prtd->xrun, 1);
+			spin_unlock_irqrestore(&prtd->lock, flags);
+			break;
+		}
+		msm_compr_read_buffer(prtd);
+
+		spin_unlock_irqrestore(&prtd->lock, flags);
+		break;
+
+	case ASM_DATA_EVENT_RENDERED_EOS:
+		spin_lock_irqsave(&prtd->lock, flags);
+		pr_debug("%s: ASM_DATA_CMDRSP_EOS token 0x%x,stream id %d\n",
+			  __func__, token, stream_id);
+		if (atomic_read(&prtd->eos) &&
+		    !prtd->gapless_state.set_next_stream_id) {
+			pr_debug("ASM_DATA_CMDRSP_EOS wake up\n");
+			prtd->eos_ack = 1;
+			wake_up(&prtd->eos_wait);
+		}
+		atomic_set(&prtd->eos, 0);
+		stream_index = STREAM_ARRAY_INDEX(stream_id);
+		if (stream_index >= MAX_NUMBER_OF_STREAMS ||
+		    stream_index < 0) {
+			pr_err("%s: Invalid stream index %d", __func__,
+				stream_index);
+			spin_unlock_irqrestore(&prtd->lock, flags);
+			break;
+		}
+
+		if (prtd->gapless_state.set_next_stream_id &&
+			prtd->gapless_state.stream_opened[stream_index]) {
+			pr_debug("%s: CMD_CLOSE stream_id %d\n",
+				  __func__, stream_id);
+			q6asm_stream_cmd_nowait(ac, CMD_CLOSE, stream_id);
+			atomic_set(&prtd->close, 1);
+			prtd->gapless_state.stream_opened[stream_index] = 0;
+			prtd->gapless_state.set_next_stream_id = false;
+		}
+		if (prtd->gapless_state.gapless_transition)
+			prtd->gapless_state.gapless_transition = 0;
+		spin_unlock_irqrestore(&prtd->lock, flags);
+		break;
+	case ASM_STREAM_PP_EVENT:
+	case ASM_STREAM_CMD_ENCDEC_EVENTS:
+		pr_debug("%s: ASM_STREAM_EVENT(0x%x)\n", __func__, opcode);
+		rtd = cstream->private_data;
+		if (!rtd) {
+			pr_err("%s: rtd is NULL\n", __func__);
+			return;
+		}
+
+		ret = msm_adsp_inform_mixer_ctl(rtd, payload);
+		if (ret) {
+			pr_err("%s: failed to inform mixer ctrl. err = %d\n",
+				__func__, ret);
+			return;
+		}
+		break;
+	case ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY:
+	case ASM_DATA_EVENT_ENC_SR_CM_CHANGE_NOTIFY: {
+		pr_debug("ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY\n");
+		chan_mode = payload[1] >> 16;
+		sample_rate = payload[2] >> 16;
+		if (prtd && (chan_mode != prtd->num_channels ||
+				sample_rate != prtd->sample_rate)) {
+			prtd->num_channels = chan_mode;
+			prtd->sample_rate = sample_rate;
+		}
+	}
+	case APR_BASIC_RSP_RESULT: {
+		switch (payload[0]) {
+		case ASM_SESSION_CMD_RUN_V2:
+			/* check if the first buffer need to be sent to DSP */
+			pr_debug("ASM_SESSION_CMD_RUN_V2\n");
+
+			/* FIXME: A state is a better way, dealing with this*/
+			spin_lock_irqsave(&prtd->lock, flags);
+
+			if (cstream->direction == SND_COMPRESS_CAPTURE) {
+				atomic_set(&prtd->start, 1);
+				msm_compr_read_buffer(prtd);
+				spin_unlock_irqrestore(&prtd->lock, flags);
+				break;
+			}
+
+			if (!prtd->bytes_sent) {
+				bytes_available = prtd->bytes_received - prtd->copied_total;
+				if (bytes_available < cstream->runtime->fragment_size) {
+					pr_debug("CMD_RUN_V2 Insufficient data to send. break out\n");
+					atomic_set(&prtd->xrun, 1);
+				} else
+					msm_compr_send_buffer(prtd);
+			}
+
+			/*
+			 * The condition below ensures playback finishes in the
+			 * follow cornercase
+			 * WRITE(last buffer)
+			 * WAIT_FOR_DRAIN
+			 * PAUSE
+			 * WRITE_DONE(X)
+			 * RESUME
+			 */
+			if ((prtd->copied_total == prtd->bytes_sent) &&
+			    atomic_read(&prtd->drain)) {
+				pr_debug("RUN ack, wake up & continue pending drain\n");
+
+				if (prtd->last_buffer)
+					prtd->last_buffer = 0;
+
+				prtd->drain_ready = 1;
+				wake_up(&prtd->drain_wait);
+				atomic_set(&prtd->drain, 0);
+			}
+
+			spin_unlock_irqrestore(&prtd->lock, flags);
+			break;
+		case ASM_STREAM_CMD_FLUSH:
+			pr_debug("%s: ASM_STREAM_CMD_FLUSH:", __func__);
+			pr_debug("token 0x%x, stream id %d\n", token,
+				  stream_id);
+			prtd->cmd_ack = 1;
+			break;
+		case ASM_DATA_CMD_REMOVE_INITIAL_SILENCE:
+			pr_debug("%s: ASM_DATA_CMD_REMOVE_INITIAL_SILENCE:",
+				   __func__);
+			pr_debug("token 0x%x, stream id = %d\n", token,
+				  stream_id);
+			break;
+		case ASM_DATA_CMD_REMOVE_TRAILING_SILENCE:
+			pr_debug("%s: ASM_DATA_CMD_REMOVE_TRAILING_SILENCE:",
+				  __func__);
+			pr_debug("token = 0x%x,	stream id = %d\n", token,
+				  stream_id);
+			break;
+		case ASM_STREAM_CMD_CLOSE:
+			pr_debug("%s: ASM_DATA_CMD_CLOSE:", __func__);
+			pr_debug("token 0x%x, stream id %d\n", token,
+				  stream_id);
+			/*
+			 * wakeup wait for stream avail on stream 3
+			 * after stream 1 ends.
+			 */
+			if (prtd->next_stream) {
+				pr_debug("%s:CLOSE:wakeup wait for stream\n",
+					  __func__);
+				prtd->stream_available = 1;
+				wake_up(&prtd->wait_for_stream_avail);
+				prtd->next_stream = 0;
+			}
+			if (atomic_read(&prtd->close) &&
+			    atomic_read(&prtd->wait_on_close)) {
+				prtd->cmd_ack = 1;
+				wake_up(&prtd->close_wait);
+			}
+			atomic_set(&prtd->close, 0);
+			break;
+		case ASM_STREAM_CMD_REGISTER_PP_EVENTS:
+			pr_debug("%s: ASM_STREAM_CMD_REGISTER_PP_EVENTS:",
+				__func__);
+			break;
+		default:
+			break;
+		}
+		break;
+	}
+	case ASM_SESSION_CMDRSP_GET_SESSIONTIME_V3:
+		pr_debug("%s: ASM_SESSION_CMDRSP_GET_SESSIONTIME_V3\n",
+			  __func__);
+		break;
+	case RESET_EVENTS:
+		pr_err("%s: Received reset events CB, move to error state",
+			__func__);
+		spin_lock_irqsave(&prtd->lock, flags);
+		/*
+		 * Since ADSP is down, let this driver pretend that it copied
+		 * all the bytes received, so that next write will be triggered
+		 */
+		prtd->copied_total = prtd->bytes_received;
+		snd_compr_fragment_elapsed(cstream);
+		atomic_set(&prtd->error, 1);
+		wake_up(&prtd->drain_wait);
+		if (atomic_cmpxchg(&prtd->eos, 1, 0)) {
+			pr_debug("%s:unblock eos wait queues", __func__);
+			wake_up(&prtd->eos_wait);
+		}
+		spin_unlock_irqrestore(&prtd->lock, flags);
+		break;
+	default:
+		pr_debug("%s: Not Supported Event opcode[0x%x]\n",
+			  __func__, opcode);
+		break;
+	}
+}
+
+static int msm_compr_get_partial_drain_delay(int frame_sz, int sample_rate)
+{
+	int delay_time_ms = 0;
+
+	delay_time_ms = ((DSP_NUM_OUTPUT_FRAME_BUFFERED * frame_sz * 1000) /
+			sample_rate) + DSP_PP_BUFFERING_IN_MSEC;
+	delay_time_ms = delay_time_ms > PARTIAL_DRAIN_ACK_EARLY_BY_MSEC ?
+			delay_time_ms - PARTIAL_DRAIN_ACK_EARLY_BY_MSEC : 0;
+
+	pr_debug("%s: frame_sz %d, sample_rate %d, partial drain delay %d\n",
+		__func__, frame_sz, sample_rate, delay_time_ms);
+	return delay_time_ms;
+}
+
+static void populate_codec_list(struct msm_compr_audio *prtd)
+{
+	pr_debug("%s\n", __func__);
+	prtd->compr_cap.direction = SND_COMPRESS_PLAYBACK;
+	prtd->compr_cap.min_fragment_size =
+			COMPR_PLAYBACK_MIN_FRAGMENT_SIZE;
+	prtd->compr_cap.max_fragment_size =
+			COMPR_PLAYBACK_MAX_FRAGMENT_SIZE;
+	prtd->compr_cap.min_fragments =
+			COMPR_PLAYBACK_MIN_NUM_FRAGMENTS;
+	prtd->compr_cap.max_fragments =
+			COMPR_PLAYBACK_MAX_NUM_FRAGMENTS;
+	prtd->compr_cap.num_codecs = 17;
+	prtd->compr_cap.codecs[0] = SND_AUDIOCODEC_MP3;
+	prtd->compr_cap.codecs[1] = SND_AUDIOCODEC_AAC;
+	prtd->compr_cap.codecs[2] = SND_AUDIOCODEC_AC3;
+	prtd->compr_cap.codecs[3] = SND_AUDIOCODEC_EAC3;
+	prtd->compr_cap.codecs[4] = SND_AUDIOCODEC_MP2;
+	prtd->compr_cap.codecs[5] = SND_AUDIOCODEC_PCM;
+	prtd->compr_cap.codecs[6] = SND_AUDIOCODEC_WMA;
+	prtd->compr_cap.codecs[7] = SND_AUDIOCODEC_WMA_PRO;
+	prtd->compr_cap.codecs[8] = SND_AUDIOCODEC_FLAC;
+	prtd->compr_cap.codecs[9] = SND_AUDIOCODEC_VORBIS;
+	prtd->compr_cap.codecs[10] = SND_AUDIOCODEC_ALAC;
+	prtd->compr_cap.codecs[11] = SND_AUDIOCODEC_APE;
+	prtd->compr_cap.codecs[12] = SND_AUDIOCODEC_DTS;
+	prtd->compr_cap.codecs[13] = SND_AUDIOCODEC_DSD;
+	prtd->compr_cap.codecs[14] = SND_AUDIOCODEC_APTX;
+	prtd->compr_cap.codecs[15] = SND_AUDIOCODEC_TRUEHD;
+	prtd->compr_cap.codecs[16] = SND_AUDIOCODEC_IEC61937;
+}
+
+static int msm_compr_send_media_format_block(struct snd_compr_stream *cstream,
+					     int stream_id,
+					     bool use_gapless_codec_options)
+{
+	struct snd_compr_runtime *runtime = cstream->runtime;
+	struct msm_compr_audio *prtd = runtime->private_data;
+	struct snd_soc_pcm_runtime *rtd = cstream->private_data;
+	struct msm_compr_pdata *pdata =
+			snd_soc_platform_get_drvdata(rtd->platform);
+	struct asm_aac_cfg aac_cfg;
+	struct asm_wma_cfg wma_cfg;
+	struct asm_wmapro_cfg wma_pro_cfg;
+	struct asm_flac_cfg flac_cfg;
+	struct asm_vorbis_cfg vorbis_cfg;
+	struct asm_alac_cfg alac_cfg;
+	struct asm_ape_cfg ape_cfg;
+	struct asm_dsd_cfg dsd_cfg;
+	struct aptx_dec_bt_addr_cfg aptx_cfg;
+	union snd_codec_options *codec_options;
+
+	int ret = 0;
+	uint16_t bit_width;
+	bool use_default_chmap = true;
+	char *chmap = NULL;
+	uint16_t sample_word_size;
+
+	pr_debug("%s: use_gapless_codec_options %d\n",
+			__func__, use_gapless_codec_options);
+
+	if (use_gapless_codec_options)
+		codec_options = &(prtd->gapless_state.codec_options);
+	else
+		codec_options = &(prtd->codec_param.codec.options);
+
+	if (!codec_options) {
+		pr_err("%s: codec_options is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (prtd->codec) {
+	case FORMAT_LINEAR_PCM:
+		pr_debug("SND_AUDIOCODEC_PCM\n");
+		if (pdata->ch_map[rtd->dai_link->be_id]) {
+			use_default_chmap =
+			    !(pdata->ch_map[rtd->dai_link->be_id]->set_ch_map);
+			chmap =
+			    pdata->ch_map[rtd->dai_link->be_id]->channel_map;
+		}
+
+		switch (prtd->codec_param.codec.format) {
+		case SNDRV_PCM_FORMAT_S32_LE:
+			bit_width = 32;
+			sample_word_size = 32;
+			break;
+		case SNDRV_PCM_FORMAT_S24_LE:
+			bit_width = 24;
+			sample_word_size = 32;
+			break;
+		case SNDRV_PCM_FORMAT_S24_3LE:
+			bit_width = 24;
+			sample_word_size = 24;
+			break;
+		case SNDRV_PCM_FORMAT_S16_LE:
+		default:
+			bit_width = 16;
+			sample_word_size = 16;
+			break;
+		}
+		ret = q6asm_media_format_block_pcm_format_support_v4(
+							prtd->audio_client,
+							prtd->sample_rate,
+							prtd->num_channels,
+							bit_width, stream_id,
+							use_default_chmap,
+							chmap,
+							sample_word_size,
+							ASM_LITTLE_ENDIAN,
+							DEFAULT_QF);
+		if (ret < 0)
+			pr_err("%s: CMD Format block failed\n", __func__);
+
+		break;
+	case FORMAT_MP3:
+		pr_debug("SND_AUDIOCODEC_MP3\n");
+		/* no media format block needed */
+		break;
+	case FORMAT_MPEG4_AAC:
+		pr_debug("SND_AUDIOCODEC_AAC\n");
+		memset(&aac_cfg, 0x0, sizeof(struct asm_aac_cfg));
+		aac_cfg.aot = AAC_ENC_MODE_EAAC_P;
+		if (prtd->codec_param.codec.format ==
+					SND_AUDIOSTREAMFORMAT_MP4ADTS)
+			aac_cfg.format = 0x0;
+		else if (prtd->codec_param.codec.format ==
+					SND_AUDIOSTREAMFORMAT_MP4LATM)
+			aac_cfg.format = 0x04;
+		else
+			aac_cfg.format = 0x03;
+		aac_cfg.ch_cfg = prtd->num_channels;
+		aac_cfg.sample_rate = prtd->sample_rate;
+		ret = q6asm_stream_media_format_block_aac(prtd->audio_client,
+							  &aac_cfg, stream_id);
+		if (ret < 0)
+			pr_err("%s: CMD Format block failed\n", __func__);
+		break;
+	case FORMAT_AC3:
+		pr_debug("SND_AUDIOCODEC_AC3\n");
+		break;
+	case FORMAT_EAC3:
+		pr_debug("SND_AUDIOCODEC_EAC3\n");
+		break;
+	case FORMAT_WMA_V9:
+		pr_debug("SND_AUDIOCODEC_WMA\n");
+		memset(&wma_cfg, 0x0, sizeof(struct asm_wma_cfg));
+		wma_cfg.format_tag = prtd->codec_param.codec.format;
+		wma_cfg.ch_cfg = prtd->codec_param.codec.ch_in;
+		wma_cfg.sample_rate = prtd->sample_rate;
+		wma_cfg.avg_bytes_per_sec = codec_options->wma.avg_bit_rate/8;
+		wma_cfg.block_align = codec_options->wma.super_block_align;
+		wma_cfg.valid_bits_per_sample =
+			codec_options->wma.bits_per_sample;
+		wma_cfg.ch_mask = codec_options->wma.channelmask;
+		wma_cfg.encode_opt = codec_options->wma.encodeopt;
+		ret = q6asm_media_format_block_wma(prtd->audio_client,
+					&wma_cfg, stream_id);
+		if (ret < 0)
+			pr_err("%s: CMD Format block failed\n", __func__);
+		break;
+	case FORMAT_WMA_V10PRO:
+		pr_debug("SND_AUDIOCODEC_WMA_PRO\n");
+		memset(&wma_pro_cfg, 0x0, sizeof(struct asm_wmapro_cfg));
+		wma_pro_cfg.format_tag = prtd->codec_param.codec.format;
+		wma_pro_cfg.ch_cfg = prtd->codec_param.codec.ch_in;
+		wma_pro_cfg.sample_rate = prtd->sample_rate;
+		wma_cfg.avg_bytes_per_sec = codec_options->wma.avg_bit_rate/8;
+		wma_pro_cfg.block_align = codec_options->wma.super_block_align;
+		wma_pro_cfg.valid_bits_per_sample =
+			codec_options->wma.bits_per_sample;
+		wma_pro_cfg.ch_mask = codec_options->wma.channelmask;
+		wma_pro_cfg.encode_opt = codec_options->wma.encodeopt;
+		wma_pro_cfg.adv_encode_opt = codec_options->wma.encodeopt1;
+		wma_pro_cfg.adv_encode_opt2 = codec_options->wma.encodeopt2;
+		ret = q6asm_media_format_block_wmapro(prtd->audio_client,
+				&wma_pro_cfg, stream_id);
+		if (ret < 0)
+			pr_err("%s: CMD Format block failed\n", __func__);
+		break;
+	case FORMAT_MP2:
+		pr_debug("%s: SND_AUDIOCODEC_MP2\n", __func__);
+		break;
+	case FORMAT_FLAC:
+		pr_debug("%s: SND_AUDIOCODEC_FLAC\n", __func__);
+		memset(&flac_cfg, 0x0, sizeof(struct asm_flac_cfg));
+		flac_cfg.ch_cfg = prtd->num_channels;
+		flac_cfg.sample_rate = prtd->sample_rate;
+		flac_cfg.stream_info_present = 1;
+		flac_cfg.sample_size = codec_options->flac_dec.sample_size;
+		flac_cfg.min_blk_size = codec_options->flac_dec.min_blk_size;
+		flac_cfg.max_blk_size = codec_options->flac_dec.max_blk_size;
+		flac_cfg.max_frame_size =
+			codec_options->flac_dec.max_frame_size;
+		flac_cfg.min_frame_size =
+			codec_options->flac_dec.min_frame_size;
+
+		ret = q6asm_stream_media_format_block_flac(prtd->audio_client,
+							&flac_cfg, stream_id);
+		if (ret < 0)
+			pr_err("%s: CMD Format block failed ret %d\n",
+				__func__, ret);
+
+		break;
+	case FORMAT_VORBIS:
+		pr_debug("%s: SND_AUDIOCODEC_VORBIS\n", __func__);
+		memset(&vorbis_cfg, 0x0, sizeof(struct asm_vorbis_cfg));
+		vorbis_cfg.bit_stream_fmt =
+			codec_options->vorbis_dec.bit_stream_fmt;
+
+		ret = q6asm_stream_media_format_block_vorbis(
+					prtd->audio_client, &vorbis_cfg,
+					stream_id);
+		if (ret < 0)
+			pr_err("%s: CMD Format block failed ret %d\n",
+					__func__, ret);
+
+		break;
+	case FORMAT_ALAC:
+		pr_debug("%s: SND_AUDIOCODEC_ALAC\n", __func__);
+		memset(&alac_cfg, 0x0, sizeof(struct asm_alac_cfg));
+		alac_cfg.num_channels = prtd->num_channels;
+		alac_cfg.sample_rate = prtd->sample_rate;
+		alac_cfg.frame_length = codec_options->alac.frame_length;
+		alac_cfg.compatible_version =
+			codec_options->alac.compatible_version;
+		alac_cfg.bit_depth = codec_options->alac.bit_depth;
+		alac_cfg.pb = codec_options->alac.pb;
+		alac_cfg.mb = codec_options->alac.mb;
+		alac_cfg.kb = codec_options->alac.kb;
+		alac_cfg.max_run = codec_options->alac.max_run;
+		alac_cfg.max_frame_bytes = codec_options->alac.max_frame_bytes;
+		alac_cfg.avg_bit_rate = codec_options->alac.avg_bit_rate;
+		alac_cfg.channel_layout_tag =
+			codec_options->alac.channel_layout_tag;
+
+		ret = q6asm_media_format_block_alac(prtd->audio_client,
+							&alac_cfg, stream_id);
+		if (ret < 0)
+			pr_err("%s: CMD Format block failed ret %d\n",
+					__func__, ret);
+		break;
+	case FORMAT_APE:
+		pr_debug("%s: SND_AUDIOCODEC_APE\n", __func__);
+		memset(&ape_cfg, 0x0, sizeof(struct asm_ape_cfg));
+		ape_cfg.num_channels = prtd->num_channels;
+		ape_cfg.sample_rate = prtd->sample_rate;
+		ape_cfg.compatible_version =
+			codec_options->ape.compatible_version;
+		ape_cfg.compression_level =
+			codec_options->ape.compression_level;
+		ape_cfg.format_flags = codec_options->ape.format_flags;
+		ape_cfg.blocks_per_frame = codec_options->ape.blocks_per_frame;
+		ape_cfg.final_frame_blocks =
+			codec_options->ape.final_frame_blocks;
+		ape_cfg.total_frames = codec_options->ape.total_frames;
+		ape_cfg.bits_per_sample = codec_options->ape.bits_per_sample;
+		ape_cfg.seek_table_present =
+			codec_options->ape.seek_table_present;
+
+		ret = q6asm_media_format_block_ape(prtd->audio_client,
+							&ape_cfg, stream_id);
+
+		if (ret < 0)
+			pr_err("%s: CMD Format block failed ret %d\n",
+					__func__, ret);
+		break;
+	case FORMAT_DTS:
+		pr_debug("SND_AUDIOCODEC_DTS\n");
+		/* no media format block needed */
+		break;
+	case FORMAT_DSD:
+		pr_debug("%s: SND_AUDIOCODEC_DSD\n", __func__);
+		memset(&dsd_cfg, 0x0, sizeof(struct asm_dsd_cfg));
+		dsd_cfg.num_channels = prtd->num_channels;
+		dsd_cfg.dsd_data_rate = prtd->sample_rate;
+		dsd_cfg.num_version = 0;
+		dsd_cfg.is_bitwise_big_endian = 1;
+		dsd_cfg.dsd_channel_block_size = 1;
+		ret = q6asm_media_format_block_dsd(prtd->audio_client,
+						   &dsd_cfg, stream_id);
+		if (ret < 0)
+			pr_err("%s: CMD DSD Format block failed ret %d\n",
+				__func__, ret);
+		break;
+	case FORMAT_TRUEHD:
+		pr_debug("SND_AUDIOCODEC_TRUEHD\n");
+		/* no media format block needed */
+		break;
+	case FORMAT_IEC61937:
+		pr_debug("SND_AUDIOCODEC_IEC61937\n");
+		ret = q6asm_media_format_block_iec(prtd->audio_client,
+						   prtd->sample_rate,
+						   prtd->num_channels);
+		if (ret < 0)
+			pr_err("%s: CMD IEC61937 Format block failed ret %d\n",
+				__func__, ret);
+		break;
+	case FORMAT_APTX:
+		pr_debug("SND_AUDIOCODEC_APTX\n");
+		memset(&aptx_cfg, 0x0, sizeof(struct aptx_dec_bt_addr_cfg));
+		ret = q6asm_stream_media_format_block_aptx_dec(
+							prtd->audio_client,
+							prtd->sample_rate,
+							stream_id);
+		if (ret >= 0) {
+			aptx_cfg.nap = codec_options->aptx_dec.nap;
+			aptx_cfg.uap = codec_options->aptx_dec.uap;
+			aptx_cfg.lap = codec_options->aptx_dec.lap;
+			q6asm_set_aptx_dec_bt_addr(prtd->audio_client,
+							&aptx_cfg);
+		} else {
+			pr_err("%s: CMD Format block failed ret %d\n",
+					 __func__, ret);
+		}
+		break;
+	default:
+		pr_debug("%s, unsupported format, skip", __func__);
+		break;
+	}
+	return ret;
+}
+
+static int msm_compr_init_pp_params(struct snd_compr_stream *cstream,
+				    struct audio_client *ac)
+{
+	int ret = 0;
+	struct asm_softvolume_params softvol = {
+		.period = SOFT_VOLUME_PERIOD,
+		.step = SOFT_VOLUME_STEP,
+		.rampingcurve = SOFT_VOLUME_CURVE_LINEAR,
+	};
+
+	switch (ac->topology) {
+	default:
+		ret = q6asm_set_softvolume_v2(ac, &softvol,
+					      SOFT_VOLUME_INSTANCE_1);
+		if (ret < 0)
+			pr_err("%s: Send SoftVolume Param failed ret=%d\n",
+			__func__, ret);
+
+		break;
+	}
+	return ret;
+}
+
+static int msm_compr_configure_dsp_for_playback
+			(struct snd_compr_stream *cstream)
+{
+	struct snd_compr_runtime *runtime = cstream->runtime;
+	struct msm_compr_audio *prtd = runtime->private_data;
+	struct snd_soc_pcm_runtime *soc_prtd = cstream->private_data;
+	uint16_t bits_per_sample = 16;
+	int dir = IN, ret = 0;
+	struct audio_client *ac = prtd->audio_client;
+	uint32_t stream_index;
+	union snd_codec_options *codec_options =
+		&(prtd->codec_param.codec.options);
+
+	struct asm_softpause_params softpause = {
+		.enable = SOFT_PAUSE_ENABLE,
+		.period = SOFT_PAUSE_PERIOD,
+		.step = SOFT_PAUSE_STEP,
+		.rampingcurve = SOFT_PAUSE_CURVE_LINEAR,
+	};
+	struct asm_softvolume_params softvol = {
+		.period = SOFT_VOLUME_PERIOD,
+		.step = SOFT_VOLUME_STEP,
+		.rampingcurve = SOFT_VOLUME_CURVE_LINEAR,
+	};
+
+	pr_debug("%s: stream_id %d\n", __func__, ac->stream_id);
+	stream_index = STREAM_ARRAY_INDEX(ac->stream_id);
+	if (stream_index >= MAX_NUMBER_OF_STREAMS || stream_index < 0) {
+		pr_err("%s: Invalid stream index:%d", __func__, stream_index);
+		return -EINVAL;
+	}
+
+	if ((prtd->codec_param.codec.format == SNDRV_PCM_FORMAT_S24_LE) ||
+		(prtd->codec_param.codec.format == SNDRV_PCM_FORMAT_S24_3LE))
+		bits_per_sample = 24;
+	else if (prtd->codec_param.codec.format == SNDRV_PCM_FORMAT_S32_LE)
+		bits_per_sample = 32;
+	else if (prtd->codec == FORMAT_FLAC && codec_options &&
+		(codec_options->flac_dec.sample_size != 0))
+		bits_per_sample = codec_options->flac_dec.sample_size;
+
+	if (prtd->compr_passthr != LEGACY_PCM) {
+		ret = q6asm_open_write_compressed(ac, prtd->codec,
+						  prtd->compr_passthr);
+		if (ret < 0) {
+			pr_err("%s:ASM open write err[%d] for compr_type[%d]\n",
+				__func__, ret, prtd->compr_passthr);
+			return ret;
+		}
+		prtd->gapless_state.stream_opened[stream_index] = 1;
+
+		ret = msm_pcm_routing_reg_phy_compr_stream(
+				soc_prtd->dai_link->be_id,
+				ac->perf_mode,
+				prtd->session_id,
+				SNDRV_PCM_STREAM_PLAYBACK,
+				prtd->compr_passthr);
+		if (ret) {
+			pr_err("%s: compr stream reg failed:%d\n", __func__,
+				ret);
+			return ret;
+		}
+	} else {
+		pr_debug("%s: stream_id %d bits_per_sample %d\n",
+				__func__, ac->stream_id, bits_per_sample);
+		ret = q6asm_stream_open_write_v4(ac,
+				prtd->codec, bits_per_sample,
+				ac->stream_id,
+				prtd->gapless_state.use_dsp_gapless_mode);
+		if (ret < 0) {
+			pr_err("%s:ASM open write err[%d] for compr type[%d]\n",
+				__func__, ret, prtd->compr_passthr);
+			 return -ENOMEM;
+		}
+		prtd->gapless_state.stream_opened[stream_index] = 1;
+
+		pr_debug("%s: be_id %d\n", __func__, soc_prtd->dai_link->be_id);
+		ret = msm_pcm_routing_reg_phy_stream(soc_prtd->dai_link->be_id,
+				ac->perf_mode,
+				prtd->session_id,
+				SNDRV_PCM_STREAM_PLAYBACK);
+		if (ret) {
+			pr_err("%s: stream reg failed:%d\n", __func__, ret);
+			return ret;
+		}
+	}
+
+	ret = msm_compr_set_volume(cstream, 0, 0);
+	if (ret < 0)
+		pr_err("%s : Set Volume failed : %d", __func__, ret);
+
+	if (prtd->compr_passthr != LEGACY_PCM) {
+		pr_debug("%s : Don't send cal and PP params for compress path",
+				__func__);
+	} else {
+		ret = q6asm_send_cal(ac);
+		if (ret < 0)
+			pr_debug("%s : Send cal failed : %d", __func__, ret);
+
+		ret = q6asm_set_softpause(ac, &softpause);
+		if (ret < 0)
+			pr_err("%s: Send SoftPause Param failed ret=%d\n",
+					__func__, ret);
+
+		ret = q6asm_set_softvolume(ac, &softvol);
+		if (ret < 0)
+			pr_err("%s: Send SoftVolume Param failed ret=%d\n",
+					__func__, ret);
+	}
+	ret = q6asm_set_io_mode(ac, (COMPRESSED_STREAM_IO | ASYNC_IO_MODE));
+	if (ret < 0) {
+		pr_err("%s: Set IO mode failed\n", __func__);
+		return -EINVAL;
+	}
+
+	runtime->fragments = prtd->codec_param.buffer.fragments;
+	runtime->fragment_size = prtd->codec_param.buffer.fragment_size;
+	pr_debug("allocate %d buffers each of size %d\n",
+			runtime->fragments,
+			runtime->fragment_size);
+	ret = q6asm_audio_client_buf_alloc_contiguous(dir, ac,
+					runtime->fragment_size,
+					runtime->fragments);
+	if (ret < 0) {
+		pr_err("Audio Start: Buffer Allocation failed rc = %d\n", ret);
+		return -ENOMEM;
+	}
+
+	prtd->byte_offset  = 0;
+	prtd->copied_total = 0;
+	prtd->app_pointer  = 0;
+	prtd->bytes_received = 0;
+	prtd->bytes_sent = 0;
+	prtd->buffer       = ac->port[dir].buf[0].data;
+	prtd->buffer_paddr = ac->port[dir].buf[0].phys;
+	prtd->buffer_size  = runtime->fragments * runtime->fragment_size;
+
+	/* Bit-0 of flags represent timestamp mode */
+	if (prtd->codec_param.codec.flags & COMPRESSED_TIMESTAMP_FLAG)
+		prtd->ts_header_offset = sizeof(struct snd_codec_metadata);
+	else
+		prtd->ts_header_offset = 0;
+
+	ret = msm_compr_send_media_format_block(cstream, ac->stream_id, false);
+	if (ret < 0) {
+		pr_err("%s, failed to send media format block\n", __func__);
+	}
+
+	return ret;
+}
+
+static int msm_compr_configure_dsp_for_capture(struct snd_compr_stream *cstream)
+{
+	struct snd_compr_runtime *runtime = cstream->runtime;
+	struct msm_compr_audio *prtd = runtime->private_data;
+	struct snd_soc_pcm_runtime *soc_prtd = cstream->private_data;
+	uint16_t bits_per_sample;
+	uint16_t sample_word_size;
+	int dir = OUT, ret = 0;
+	struct audio_client *ac = prtd->audio_client;
+	uint32_t stream_index;
+
+	switch (prtd->codec_param.codec.format) {
+	case SNDRV_PCM_FORMAT_S24_LE:
+		bits_per_sample = 24;
+		sample_word_size = 32;
+		break;
+	case SNDRV_PCM_FORMAT_S24_3LE:
+		bits_per_sample = 24;
+		sample_word_size = 24;
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		bits_per_sample = 32;
+		sample_word_size = 32;
+		break;
+	case SNDRV_PCM_FORMAT_S16_LE:
+	default:
+		bits_per_sample = 16;
+		sample_word_size = 16;
+		break;
+	}
+
+	pr_debug("%s: stream_id %d bits_per_sample %d\n",
+			__func__, ac->stream_id, bits_per_sample);
+
+	if (prtd->codec_param.codec.flags & COMPRESSED_TIMESTAMP_FLAG) {
+		ret = q6asm_open_read_v4(prtd->audio_client, FORMAT_LINEAR_PCM,
+			bits_per_sample, true);
+	} else {
+		ret = q6asm_open_read_v4(prtd->audio_client, FORMAT_LINEAR_PCM,
+			bits_per_sample, false);
+	}
+	if (ret < 0) {
+		pr_err("%s: q6asm_open_read failed:%d\n", __func__, ret);
+		return ret;
+	}
+
+	ret = msm_pcm_routing_reg_phy_stream(soc_prtd->dai_link->be_id,
+			ac->perf_mode,
+			prtd->session_id,
+			SNDRV_PCM_STREAM_CAPTURE);
+	if (ret) {
+		pr_err("%s: stream reg failed:%d\n", __func__, ret);
+		return ret;
+	}
+
+	ret = q6asm_set_io_mode(ac, (COMPRESSED_STREAM_IO | ASYNC_IO_MODE));
+	if (ret < 0) {
+		pr_err("%s: Set IO mode failed\n", __func__);
+		return -EINVAL;
+	}
+
+	stream_index = STREAM_ARRAY_INDEX(ac->stream_id);
+	if (stream_index >= MAX_NUMBER_OF_STREAMS || stream_index < 0) {
+		pr_err("%s: Invalid stream index:%d", __func__, stream_index);
+		return -EINVAL;
+	}
+
+	runtime->fragments = prtd->codec_param.buffer.fragments;
+	runtime->fragment_size = prtd->codec_param.buffer.fragment_size;
+	pr_debug("%s: allocate %d buffers each of size %d\n",
+			__func__, runtime->fragments,
+			runtime->fragment_size);
+	ret = q6asm_audio_client_buf_alloc_contiguous(dir, ac,
+					runtime->fragment_size,
+					runtime->fragments);
+	if (ret < 0) {
+		pr_err("Audio Start: Buffer Allocation failed rc = %d\n", ret);
+		return -ENOMEM;
+	}
+
+	prtd->byte_offset    = 0;
+	prtd->received_total = 0;
+	prtd->app_pointer    = 0;
+	prtd->bytes_copied   = 0;
+	prtd->bytes_read     = 0;
+	prtd->bytes_read_offset = 0;
+	prtd->buffer         = ac->port[dir].buf[0].data;
+	prtd->buffer_paddr   = ac->port[dir].buf[0].phys;
+	prtd->buffer_size    = runtime->fragments * runtime->fragment_size;
+
+	/* Bit-0 of flags represent timestamp mode */
+	if (prtd->codec_param.codec.flags & COMPRESSED_TIMESTAMP_FLAG)
+		prtd->ts_header_offset = sizeof(struct snd_codec_metadata);
+	else
+		prtd->ts_header_offset = 0;
+
+	pr_debug("%s: sample_rate = %d channels = %d bps = %d sample_word_size = %d\n",
+			__func__, prtd->sample_rate, prtd->num_channels,
+					 bits_per_sample, sample_word_size);
+	ret = q6asm_enc_cfg_blk_pcm_format_support_v3(prtd->audio_client,
+					prtd->sample_rate, prtd->num_channels,
+					bits_per_sample, sample_word_size);
+
+	return ret;
+}
+
+static int msm_compr_map_ion_fd(struct msm_compr_audio *prtd, int fd)
+{
+	ion_phys_addr_t paddr;
+	size_t pa_len = 0;
+	int ret = 0;
+
+	ret = msm_audio_ion_phys_assign("audio_lib_mem_client",
+					&prtd->lib_ion_client,
+					&prtd->lib_ion_handle,
+					fd, &paddr, &pa_len, HLOS_TO_ADSP);
+	if (ret) {
+		pr_err("%s: audio lib ION phys failed, rc = %d\n",
+			__func__, ret);
+		goto done;
+	}
+
+	ret = q6core_add_remove_pool_pages(paddr, pa_len,
+				 ADSP_MEMORY_MAP_HLOS_PHYSPOOL, true);
+	if (ret) {
+		pr_err("%s: add remove pages failed, rc = %d\n", __func__, ret);
+		/* Assign back to HLOS if add pages cmd failed */
+		msm_audio_ion_phys_free(prtd->lib_ion_client,
+					prtd->lib_ion_handle,
+					&paddr, &pa_len, ADSP_TO_HLOS);
+	}
+
+done:
+	return ret;
+}
+
+static int msm_compr_unmap_ion_fd(struct msm_compr_audio *prtd)
+{
+	ion_phys_addr_t paddr;
+	size_t pa_len = 0;
+	int ret = 0;
+
+	if (!prtd->lib_ion_client || !prtd->lib_ion_handle) {
+		pr_err("%s: ion_client or ion_handle is NULL", __func__);
+		return -EINVAL;
+	}
+
+	ret = msm_audio_ion_phys_free(prtd->lib_ion_client,
+				      prtd->lib_ion_handle,
+				      &paddr, &pa_len, ADSP_TO_HLOS);
+	if (ret) {
+		pr_err("%s: audio lib ION phys failed, rc = %d\n",
+			__func__, ret);
+		goto done;
+	}
+
+	ret = q6core_add_remove_pool_pages(paddr, pa_len,
+				 ADSP_MEMORY_MAP_HLOS_PHYSPOOL, false);
+	if (ret)
+		pr_err("%s: add remove pages failed, rc = %d\n", __func__, ret);
+
+done:
+	return ret;
+}
+
+static int msm_compr_playback_open(struct snd_compr_stream *cstream)
+{
+	struct snd_compr_runtime *runtime = cstream->runtime;
+	struct snd_soc_pcm_runtime *rtd = cstream->private_data;
+	struct msm_compr_audio *prtd = NULL;
+	struct msm_compr_pdata *pdata =
+			snd_soc_platform_get_drvdata(rtd->platform);
+	int ret = 0;
+
+	pr_debug("%s\n", __func__);
+	if (pdata->is_in_use[rtd->dai_link->be_id] == true) {
+		pr_err("%s: %s is already in use,err: %d ",
+			__func__, rtd->dai_link->cpu_dai_name, -EBUSY);
+		return -EBUSY;
+	}
+	prtd = kzalloc(sizeof(struct msm_compr_audio), GFP_KERNEL);
+	if (prtd == NULL) {
+		pr_err("Failed to allocate memory for msm_compr_audio\n");
+		return -ENOMEM;
+	}
+
+	runtime->private_data = NULL;
+	prtd->cstream = cstream;
+	pdata->cstream[rtd->dai_link->be_id] = cstream;
+	pdata->audio_effects[rtd->dai_link->be_id] =
+		 kzalloc(sizeof(struct msm_compr_audio_effects), GFP_KERNEL);
+	if (pdata->audio_effects[rtd->dai_link->be_id] == NULL) {
+		pr_err("%s: Could not allocate memory for effects\n", __func__);
+		ret = -ENOMEM;
+		goto effect_err;
+	}
+	pdata->dec_params[rtd->dai_link->be_id] =
+		 kzalloc(sizeof(struct msm_compr_dec_params), GFP_KERNEL);
+	if (pdata->dec_params[rtd->dai_link->be_id] == NULL) {
+		pr_err("%s: Could not allocate memory for dec params\n",
+			__func__);
+		ret = -ENOMEM;
+		goto param_err;
+	}
+	prtd->codec = FORMAT_MP3;
+	prtd->bytes_received = 0;
+	prtd->bytes_sent = 0;
+	prtd->copied_total = 0;
+	prtd->byte_offset = 0;
+	prtd->sample_rate = 44100;
+	prtd->num_channels = 2;
+	prtd->drain_ready = 0;
+	prtd->last_buffer = 0;
+	prtd->first_buffer = 1;
+	prtd->partial_drain_delay = 0;
+	prtd->next_stream = 0;
+	memset(&prtd->gapless_state, 0, sizeof(struct msm_compr_gapless_state));
+	/*
+	 * Update the use_dsp_gapless_mode from gapless struture with the value
+	 * part of platform data.
+	 */
+	prtd->gapless_state.use_dsp_gapless_mode = pdata->use_dsp_gapless_mode;
+
+	pr_debug("%s: gapless mode %d", __func__, pdata->use_dsp_gapless_mode);
+
+	spin_lock_init(&prtd->lock);
+
+	atomic_set(&prtd->eos, 0);
+	atomic_set(&prtd->start, 0);
+	atomic_set(&prtd->drain, 0);
+	atomic_set(&prtd->xrun, 0);
+	atomic_set(&prtd->close, 0);
+	atomic_set(&prtd->wait_on_close, 0);
+	atomic_set(&prtd->error, 0);
+
+	init_waitqueue_head(&prtd->eos_wait);
+	init_waitqueue_head(&prtd->drain_wait);
+	init_waitqueue_head(&prtd->close_wait);
+	init_waitqueue_head(&prtd->wait_for_stream_avail);
+
+	runtime->private_data = prtd;
+	populate_codec_list(prtd);
+	prtd->audio_client = q6asm_audio_client_alloc(
+				(app_cb)compr_event_handler, prtd);
+	if (!prtd->audio_client) {
+		pr_err("%s: Could not allocate memory for client\n", __func__);
+		ret = -ENOMEM;
+		goto ac_err;
+	}
+	pr_debug("%s: session ID %d\n", __func__, prtd->audio_client->session);
+	prtd->audio_client->perf_mode = false;
+	prtd->session_id = prtd->audio_client->session;
+	msm_adsp_init_mixer_ctl_pp_event_queue(rtd);
+	if (pdata->ion_fd[rtd->dai_link->be_id] > 0) {
+		ret = msm_compr_map_ion_fd(prtd,
+					pdata->ion_fd[rtd->dai_link->be_id]);
+		if (ret < 0)
+			goto map_err;
+	}
+	pdata->is_in_use[rtd->dai_link->be_id] = true;
+	return 0;
+
+map_err:
+	q6asm_audio_client_free(prtd->audio_client);
+ac_err:
+	kfree(pdata->dec_params[rtd->dai_link->be_id]);
+	pdata->dec_params[rtd->dai_link->be_id] = NULL;
+param_err:
+	kfree(pdata->audio_effects[rtd->dai_link->be_id]);
+	pdata->audio_effects[rtd->dai_link->be_id] = NULL;
+effect_err:
+	pdata->cstream[rtd->dai_link->be_id] = NULL;
+	runtime->private_data = NULL;
+	kfree(prtd);
+	return ret;
+}
+
+static int msm_compr_capture_open(struct snd_compr_stream *cstream)
+{
+	struct snd_compr_runtime *runtime = cstream->runtime;
+	struct snd_soc_pcm_runtime *rtd = cstream->private_data;
+	struct msm_compr_audio *prtd;
+	struct msm_compr_pdata *pdata =
+			snd_soc_platform_get_drvdata(rtd->platform);
+
+	pr_debug("%s\n", __func__);
+	prtd = kzalloc(sizeof(struct msm_compr_audio), GFP_KERNEL);
+	if (prtd == NULL) {
+		pr_err("Failed to allocate memory for msm_compr_audio\n");
+		return -ENOMEM;
+	}
+
+	runtime->private_data = NULL;
+	prtd->cstream = cstream;
+	pdata->cstream[rtd->dai_link->be_id] = cstream;
+
+	prtd->audio_client = q6asm_audio_client_alloc(
+				(app_cb)compr_event_handler, prtd);
+	if (!prtd->audio_client) {
+		pr_err("%s: Could not allocate memory for client\n", __func__);
+		pdata->cstream[rtd->dai_link->be_id] = NULL;
+		kfree(prtd);
+		return -ENOMEM;
+	}
+	pr_debug("%s: session ID %d\n", __func__, prtd->audio_client->session);
+	prtd->audio_client->perf_mode = false;
+	prtd->session_id = prtd->audio_client->session;
+	prtd->codec = FORMAT_LINEAR_PCM;
+	prtd->bytes_copied = 0;
+	prtd->bytes_read = 0;
+	prtd->bytes_read_offset = 0;
+	prtd->received_total = 0;
+	prtd->byte_offset = 0;
+	prtd->sample_rate = 48000;
+	prtd->num_channels = 2;
+	prtd->first_buffer = 0;
+
+	spin_lock_init(&prtd->lock);
+
+	atomic_set(&prtd->eos, 0);
+	atomic_set(&prtd->start, 0);
+	atomic_set(&prtd->drain, 0);
+	atomic_set(&prtd->xrun, 0);
+	atomic_set(&prtd->close, 0);
+	atomic_set(&prtd->wait_on_close, 0);
+	atomic_set(&prtd->error, 0);
+
+	runtime->private_data = prtd;
+
+	return 0;
+}
+
+static int msm_compr_open(struct snd_compr_stream *cstream)
+{
+	int ret = 0;
+
+	if (cstream->direction == SND_COMPRESS_PLAYBACK)
+		ret = msm_compr_playback_open(cstream);
+	else if (cstream->direction == SND_COMPRESS_CAPTURE)
+		ret = msm_compr_capture_open(cstream);
+	return ret;
+}
+
+static int msm_compr_playback_free(struct snd_compr_stream *cstream)
+{
+	struct snd_compr_runtime *runtime;
+	struct msm_compr_audio *prtd;
+	struct snd_soc_pcm_runtime *soc_prtd;
+	struct msm_compr_pdata *pdata;
+	struct audio_client *ac;
+	int dir = IN, ret = 0, stream_id;
+	unsigned long flags;
+	uint32_t stream_index;
+	ion_phys_addr_t paddr;
+	size_t pa_len = 0;
+
+	pr_debug("%s\n", __func__);
+
+	if (!cstream) {
+		pr_err("%s cstream is null\n", __func__);
+		return 0;
+	}
+	runtime = cstream->runtime;
+	soc_prtd = cstream->private_data;
+	if (!runtime || !soc_prtd || !(soc_prtd->platform)) {
+		pr_err("%s runtime or soc_prtd or platform is null\n",
+			__func__);
+		return 0;
+	}
+	prtd = runtime->private_data;
+	if (!prtd) {
+		pr_err("%s prtd is null\n", __func__);
+		return 0;
+	}
+	prtd->cmd_interrupt = 1;
+	wake_up(&prtd->drain_wait);
+	pdata = snd_soc_platform_get_drvdata(soc_prtd->platform);
+	ac = prtd->audio_client;
+	if (!pdata || !ac) {
+		pr_err("%s pdata or ac is null\n", __func__);
+		return 0;
+	}
+	if (atomic_read(&prtd->eos)) {
+		ret = wait_event_timeout(prtd->eos_wait,
+					 prtd->eos_ack, 5 * HZ);
+		if (!ret)
+			pr_err("%s: CMD_EOS failed\n", __func__);
+	}
+	if (atomic_read(&prtd->close)) {
+		prtd->cmd_ack = 0;
+		atomic_set(&prtd->wait_on_close, 1);
+		ret = wait_event_timeout(prtd->close_wait,
+					prtd->cmd_ack, 5 * HZ);
+		if (!ret)
+			pr_err("%s: CMD_CLOSE failed\n", __func__);
+	}
+
+	spin_lock_irqsave(&prtd->lock, flags);
+	stream_id = ac->stream_id;
+	stream_index = STREAM_ARRAY_INDEX(NEXT_STREAM_ID(stream_id));
+
+	if ((stream_index < MAX_NUMBER_OF_STREAMS && stream_index >= 0) &&
+	    (prtd->gapless_state.stream_opened[stream_index])) {
+		prtd->gapless_state.stream_opened[stream_index] = 0;
+		spin_unlock_irqrestore(&prtd->lock, flags);
+		pr_debug(" close stream %d", NEXT_STREAM_ID(stream_id));
+		q6asm_stream_cmd(ac, CMD_CLOSE, NEXT_STREAM_ID(stream_id));
+		spin_lock_irqsave(&prtd->lock, flags);
+	}
+
+	stream_index = STREAM_ARRAY_INDEX(stream_id);
+	if ((stream_index < MAX_NUMBER_OF_STREAMS && stream_index >= 0) &&
+	    (prtd->gapless_state.stream_opened[stream_index])) {
+		prtd->gapless_state.stream_opened[stream_index] = 0;
+		spin_unlock_irqrestore(&prtd->lock, flags);
+		pr_debug("close stream %d", stream_id);
+		q6asm_stream_cmd(ac, CMD_CLOSE, stream_id);
+		spin_lock_irqsave(&prtd->lock, flags);
+	}
+	spin_unlock_irqrestore(&prtd->lock, flags);
+
+	pdata->cstream[soc_prtd->dai_link->be_id] = NULL;
+	if (cstream->direction == SND_COMPRESS_PLAYBACK) {
+		msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id,
+						SNDRV_PCM_STREAM_PLAYBACK);
+	}
+
+	q6asm_audio_client_buf_free_contiguous(dir, ac);
+	if (prtd->shm_ion_fd > 0)
+		msm_audio_ion_phys_free(prtd->shm_ion_client,
+					prtd->shm_ion_handle,
+					&paddr, &pa_len, ADSP_TO_HLOS);
+	if (pdata->ion_fd[soc_prtd->dai_link->be_id] > 0) {
+		msm_compr_unmap_ion_fd(prtd);
+		pdata->ion_fd[soc_prtd->dai_link->be_id] = 0;
+	}
+
+	q6asm_audio_client_free(ac);
+	msm_adsp_clean_mixer_ctl_pp_event_queue(soc_prtd);
+	if (pdata->audio_effects[soc_prtd->dai_link->be_id] != NULL) {
+		kfree(pdata->audio_effects[soc_prtd->dai_link->be_id]);
+		pdata->audio_effects[soc_prtd->dai_link->be_id] = NULL;
+	}
+	if (pdata->dec_params[soc_prtd->dai_link->be_id] != NULL) {
+		kfree(pdata->dec_params[soc_prtd->dai_link->be_id]);
+		pdata->dec_params[soc_prtd->dai_link->be_id] = NULL;
+	}
+	pdata->is_in_use[soc_prtd->dai_link->be_id] = false;
+	kfree(prtd);
+	runtime->private_data = NULL;
+
+	return 0;
+}
+
+static int msm_compr_capture_free(struct snd_compr_stream *cstream)
+{
+	struct snd_compr_runtime *runtime;
+	struct msm_compr_audio *prtd;
+	struct snd_soc_pcm_runtime *soc_prtd;
+	struct msm_compr_pdata *pdata;
+	struct audio_client *ac;
+	int dir = OUT, stream_id;
+	unsigned long flags;
+	uint32_t stream_index;
+
+	if (!cstream) {
+		pr_err("%s cstream is null\n", __func__);
+		return 0;
+	}
+	runtime = cstream->runtime;
+	soc_prtd = cstream->private_data;
+	if (!runtime || !soc_prtd || !(soc_prtd->platform)) {
+		pr_err("%s runtime or soc_prtd or platform is null\n",
+			__func__);
+		return 0;
+	}
+	prtd = runtime->private_data;
+	if (!prtd) {
+		pr_err("%s prtd is null\n", __func__);
+		return 0;
+	}
+	pdata = snd_soc_platform_get_drvdata(soc_prtd->platform);
+	ac = prtd->audio_client;
+	if (!pdata || !ac) {
+		pr_err("%s pdata or ac is null\n", __func__);
+		return 0;
+	}
+
+	spin_lock_irqsave(&prtd->lock, flags);
+	stream_id = ac->stream_id;
+
+	stream_index = STREAM_ARRAY_INDEX(stream_id);
+	if ((stream_index < MAX_NUMBER_OF_STREAMS && stream_index >= 0)) {
+		spin_unlock_irqrestore(&prtd->lock, flags);
+		pr_debug("close stream %d", stream_id);
+		q6asm_stream_cmd(ac, CMD_CLOSE, stream_id);
+		spin_lock_irqsave(&prtd->lock, flags);
+	}
+	spin_unlock_irqrestore(&prtd->lock, flags);
+
+	pdata->cstream[soc_prtd->dai_link->be_id] = NULL;
+	msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id,
+					SNDRV_PCM_STREAM_CAPTURE);
+
+	q6asm_audio_client_buf_free_contiguous(dir, ac);
+
+	q6asm_audio_client_free(ac);
+
+	kfree(prtd);
+	runtime->private_data = NULL;
+
+	return 0;
+}
+
+static int msm_compr_free(struct snd_compr_stream *cstream)
+{
+	int ret = 0;
+
+	if (cstream->direction == SND_COMPRESS_PLAYBACK)
+		ret = msm_compr_playback_free(cstream);
+	else if (cstream->direction == SND_COMPRESS_CAPTURE)
+		ret = msm_compr_capture_free(cstream);
+	return ret;
+}
+
+static bool msm_compr_validate_codec_compr(__u32 codec_id)
+{
+	int32_t i;
+
+	for (i = 0; i < ARRAY_SIZE(compr_codecs); i++) {
+		if (compr_codecs[i] == codec_id)
+			return true;
+	}
+	return false;
+}
+
+/* compress stream operations */
+static int msm_compr_set_params(struct snd_compr_stream *cstream,
+				struct snd_compr_params *params)
+{
+	struct snd_compr_runtime *runtime = cstream->runtime;
+	struct msm_compr_audio *prtd = runtime->private_data;
+	int ret = 0, frame_sz = 0;
+	int i, num_rates;
+	bool is_format_gapless = false;
+
+	pr_debug("%s\n", __func__);
+
+	num_rates = sizeof(supported_sample_rates)/sizeof(unsigned int);
+	for (i = 0; i < num_rates; i++)
+		if (params->codec.sample_rate == supported_sample_rates[i])
+			break;
+	if (i == num_rates)
+		return -EINVAL;
+
+	memcpy(&prtd->codec_param, params, sizeof(struct snd_compr_params));
+	/* ToDo: remove duplicates */
+	prtd->num_channels = prtd->codec_param.codec.ch_in;
+	prtd->sample_rate = prtd->codec_param.codec.sample_rate;
+	pr_debug("%s: sample_rate %d\n", __func__, prtd->sample_rate);
+
+	if ((prtd->codec_param.codec.compr_passthr >= LEGACY_PCM &&
+	    prtd->codec_param.
+	    codec.compr_passthr <= COMPRESSED_PASSTHROUGH_DSD) ||
+	    (prtd->codec_param.
+	    codec.compr_passthr == COMPRESSED_PASSTHROUGH_IEC61937))
+		prtd->compr_passthr = prtd->codec_param.codec.compr_passthr;
+	else
+		prtd->compr_passthr = LEGACY_PCM;
+	pr_debug("%s: compr_passthr = %d", __func__, prtd->compr_passthr);
+	if (prtd->compr_passthr != LEGACY_PCM) {
+		pr_debug("%s: Reset gapless mode playback for compr_type[%d]\n",
+			__func__, prtd->compr_passthr);
+		prtd->gapless_state.use_dsp_gapless_mode = 0;
+		if (!msm_compr_validate_codec_compr(params->codec.id)) {
+			pr_err("%s codec not supported in passthrough,id =%d\n",
+				 __func__, params->codec.id);
+			return -EINVAL;
+		}
+	}
+
+	switch (params->codec.id) {
+	case SND_AUDIOCODEC_PCM: {
+		pr_debug("SND_AUDIOCODEC_PCM\n");
+		prtd->codec = FORMAT_LINEAR_PCM;
+		is_format_gapless = true;
+		break;
+	}
+
+	case SND_AUDIOCODEC_MP3: {
+		pr_debug("SND_AUDIOCODEC_MP3\n");
+		prtd->codec = FORMAT_MP3;
+		frame_sz = MP3_OUTPUT_FRAME_SZ;
+		is_format_gapless = true;
+		break;
+	}
+
+	case SND_AUDIOCODEC_AAC: {
+		pr_debug("SND_AUDIOCODEC_AAC\n");
+		prtd->codec = FORMAT_MPEG4_AAC;
+		frame_sz = AAC_OUTPUT_FRAME_SZ;
+		is_format_gapless = true;
+		break;
+	}
+
+	case SND_AUDIOCODEC_AC3: {
+		pr_debug("SND_AUDIOCODEC_AC3\n");
+		prtd->codec = FORMAT_AC3;
+		frame_sz = AC3_OUTPUT_FRAME_SZ;
+		is_format_gapless = true;
+		break;
+	}
+
+	case SND_AUDIOCODEC_EAC3: {
+		pr_debug("SND_AUDIOCODEC_EAC3\n");
+		prtd->codec = FORMAT_EAC3;
+		frame_sz = EAC3_OUTPUT_FRAME_SZ;
+		is_format_gapless = true;
+		break;
+	}
+
+	case SND_AUDIOCODEC_MP2: {
+		pr_debug("SND_AUDIOCODEC_MP2\n");
+		prtd->codec = FORMAT_MP2;
+		break;
+	}
+
+	case SND_AUDIOCODEC_WMA: {
+		pr_debug("SND_AUDIOCODEC_WMA\n");
+		prtd->codec = FORMAT_WMA_V9;
+		break;
+	}
+
+	case SND_AUDIOCODEC_WMA_PRO: {
+		pr_debug("SND_AUDIOCODEC_WMA_PRO\n");
+		prtd->codec = FORMAT_WMA_V10PRO;
+		break;
+	}
+
+	case SND_AUDIOCODEC_FLAC: {
+		pr_debug("%s: SND_AUDIOCODEC_FLAC\n", __func__);
+		prtd->codec = FORMAT_FLAC;
+		/*
+		 * DSP bufferring is based on blk size,
+		 * consider mininum buffering to rule out any false wait
+		 */
+		frame_sz =
+			prtd->codec_param.codec.options.flac_dec.min_blk_size;
+		is_format_gapless = true;
+		break;
+	}
+
+	case SND_AUDIOCODEC_VORBIS: {
+		pr_debug("%s: SND_AUDIOCODEC_VORBIS\n", __func__);
+		prtd->codec = FORMAT_VORBIS;
+		break;
+	}
+
+	case SND_AUDIOCODEC_ALAC: {
+		pr_debug("%s: SND_AUDIOCODEC_ALAC\n", __func__);
+		prtd->codec = FORMAT_ALAC;
+		break;
+	}
+
+	case SND_AUDIOCODEC_APE: {
+		pr_debug("%s: SND_AUDIOCODEC_APE\n", __func__);
+		prtd->codec = FORMAT_APE;
+		break;
+	}
+
+	case SND_AUDIOCODEC_DTS: {
+		pr_debug("%s: SND_AUDIOCODEC_DTS\n", __func__);
+		prtd->codec = FORMAT_DTS;
+		break;
+	}
+
+	case SND_AUDIOCODEC_DSD: {
+		pr_debug("%s: SND_AUDIOCODEC_DSD\n", __func__);
+		prtd->codec = FORMAT_DSD;
+		break;
+	}
+
+	case SND_AUDIOCODEC_TRUEHD: {
+		pr_debug("%s: SND_AUDIOCODEC_TRUEHD\n", __func__);
+		prtd->codec = FORMAT_TRUEHD;
+		break;
+	}
+
+	case SND_AUDIOCODEC_IEC61937: {
+		pr_debug("%s: SND_AUDIOCODEC_IEC61937\n", __func__);
+		prtd->codec = FORMAT_IEC61937;
+		break;
+	}
+
+	case SND_AUDIOCODEC_APTX: {
+		pr_debug("%s: SND_AUDIOCODEC_APTX\n", __func__);
+		prtd->codec = FORMAT_APTX;
+		break;
+	}
+
+	default:
+		pr_err("codec not supported, id =%d\n", params->codec.id);
+		return -EINVAL;
+	}
+
+	if (!is_format_gapless)
+		prtd->gapless_state.use_dsp_gapless_mode = false;
+
+	prtd->partial_drain_delay =
+		msm_compr_get_partial_drain_delay(frame_sz, prtd->sample_rate);
+
+	if (cstream->direction == SND_COMPRESS_PLAYBACK)
+		ret = msm_compr_configure_dsp_for_playback(cstream);
+	else if (cstream->direction == SND_COMPRESS_CAPTURE)
+		ret = msm_compr_configure_dsp_for_capture(cstream);
+
+	return ret;
+}
+
+static int msm_compr_drain_buffer(struct msm_compr_audio *prtd,
+				  unsigned long *flags)
+{
+	int rc = 0;
+
+	atomic_set(&prtd->drain, 1);
+	prtd->drain_ready = 0;
+	spin_unlock_irqrestore(&prtd->lock, *flags);
+	pr_debug("%s: wait for buffer to be drained\n",  __func__);
+	rc = wait_event_interruptible(prtd->drain_wait,
+					prtd->drain_ready ||
+					prtd->cmd_interrupt ||
+					atomic_read(&prtd->xrun) ||
+					atomic_read(&prtd->error));
+	pr_debug("%s: out of buffer drain wait with ret %d\n", __func__, rc);
+	spin_lock_irqsave(&prtd->lock, *flags);
+	if (prtd->cmd_interrupt) {
+		pr_debug("%s: buffer drain interrupted by flush)\n", __func__);
+		rc = -EINTR;
+		prtd->cmd_interrupt = 0;
+	}
+	if (atomic_read(&prtd->error)) {
+		pr_err("%s: Got RESET EVENTS notification, return\n",
+			__func__);
+		rc = -ENETRESET;
+	}
+	return rc;
+}
+
+static int msm_compr_wait_for_stream_avail(struct msm_compr_audio *prtd,
+				    unsigned long *flags)
+{
+	int rc = 0;
+	pr_debug("next session is already in opened state\n");
+	prtd->next_stream = 1;
+	prtd->cmd_interrupt = 0;
+	spin_unlock_irqrestore(&prtd->lock, *flags);
+	/*
+	 * Wait for stream to be available, or the wait to be interrupted by
+	 * commands like flush or till a timeout of one second.
+	 */
+	rc = wait_event_timeout(prtd->wait_for_stream_avail,
+		prtd->stream_available || prtd->cmd_interrupt, 1 * HZ);
+	pr_err("%s:prtd->stream_available %d, prtd->cmd_interrupt %d rc %d\n",
+		   __func__, prtd->stream_available, prtd->cmd_interrupt, rc);
+
+	spin_lock_irqsave(&prtd->lock, *flags);
+	if (rc == 0) {
+		pr_err("%s: wait_for_stream_avail timed out\n",
+						__func__);
+		rc =  -ETIMEDOUT;
+	} else if (prtd->cmd_interrupt == 1) {
+		/*
+		 * This scenario might not happen as we do not allow
+		 * flush in transition state.
+		 */
+		pr_debug("%s: wait_for_stream_avail interrupted\n", __func__);
+		prtd->cmd_interrupt = 0;
+		prtd->stream_available = 0;
+		rc = -EINTR;
+	} else {
+		prtd->stream_available = 0;
+		rc = 0;
+	}
+	pr_debug("%s : rc = %d",  __func__, rc);
+	return rc;
+}
+
+static int msm_compr_trigger(struct snd_compr_stream *cstream, int cmd)
+{
+	struct snd_compr_runtime *runtime = cstream->runtime;
+	struct msm_compr_audio *prtd = runtime->private_data;
+	struct snd_soc_pcm_runtime *rtd = cstream->private_data;
+	struct msm_compr_pdata *pdata =
+			snd_soc_platform_get_drvdata(rtd->platform);
+	uint32_t *volume = pdata->volume[rtd->dai_link->be_id];
+	struct audio_client *ac = prtd->audio_client;
+	unsigned long fe_id = rtd->dai_link->be_id;
+	int rc = 0;
+	int bytes_to_write;
+	unsigned long flags;
+	int stream_id;
+	uint32_t stream_index;
+	uint16_t bits_per_sample = 16;
+	union snd_codec_options *codec_options =
+		&(prtd->codec_param.codec.options);
+
+	spin_lock_irqsave(&prtd->lock, flags);
+	if (atomic_read(&prtd->error)) {
+		pr_err("%s Got RESET EVENTS notification, return immediately",
+			__func__);
+		spin_unlock_irqrestore(&prtd->lock, flags);
+		return 0;
+	}
+	spin_unlock_irqrestore(&prtd->lock, flags);
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+		pr_debug("%s: SNDRV_PCM_TRIGGER_START\n", __func__);
+		atomic_set(&prtd->start, 1);
+
+		/*
+		* compr_set_volume and compr_init_pp_params
+		* are used to configure ASM volume hence not
+		* needed for compress passthrough playback.
+		*
+		* compress passthrough volume is controlled in
+		* ADM by adm_send_compressed_device_mute()
+		*/
+		if (prtd->compr_passthr == LEGACY_PCM &&
+			cstream->direction == SND_COMPRESS_PLAYBACK) {
+			/* set volume for the stream before RUN */
+			rc = msm_compr_set_volume(cstream,
+				volume[0], volume[1]);
+			if (rc)
+				pr_err("%s : Set Volume failed : %d\n",
+					__func__, rc);
+
+			rc = msm_compr_init_pp_params(cstream, ac);
+			if (rc)
+				pr_err("%s : init PP params failed : %d\n",
+					__func__, rc);
+		} else {
+			msm_compr_read_buffer(prtd);
+		}
+		/* issue RUN command for the stream */
+		q6asm_run_nowait(prtd->audio_client, prtd->run_mode,
+				 prtd->start_delay_msw, prtd->start_delay_lsw);
+		break;
+	case SNDRV_PCM_TRIGGER_STOP:
+		spin_lock_irqsave(&prtd->lock, flags);
+		pr_debug("%s: SNDRV_PCM_TRIGGER_STOP transition %d\n", __func__,
+					prtd->gapless_state.gapless_transition);
+		stream_id = ac->stream_id;
+		atomic_set(&prtd->start, 0);
+		if (cstream->direction == SND_COMPRESS_CAPTURE) {
+			q6asm_cmd_nowait(prtd->audio_client, CMD_PAUSE);
+			atomic_set(&prtd->xrun, 0);
+			prtd->received_total = 0;
+			prtd->bytes_copied = 0;
+			prtd->bytes_read = 0;
+			prtd->bytes_read_offset = 0;
+			prtd->byte_offset  = 0;
+			prtd->app_pointer  = 0;
+			spin_unlock_irqrestore(&prtd->lock, flags);
+			break;
+		}
+		if (prtd->next_stream) {
+			pr_debug("%s: interrupt next track wait queues\n",
+								__func__);
+			prtd->cmd_interrupt = 1;
+			wake_up(&prtd->wait_for_stream_avail);
+			prtd->next_stream = 0;
+		}
+		if (atomic_read(&prtd->eos)) {
+			pr_debug("%s: interrupt eos wait queues", __func__);
+			/*
+			 * Gapless playback does not wait for eos, do not set
+			 * cmd_int and do not wake up eos_wait during gapless
+			 * transition
+			 */
+			if (!prtd->gapless_state.gapless_transition) {
+				prtd->cmd_interrupt = 1;
+				wake_up(&prtd->eos_wait);
+			}
+			atomic_set(&prtd->eos, 0);
+		}
+		if (atomic_read(&prtd->drain)) {
+			pr_debug("%s: interrupt drain wait queues", __func__);
+			prtd->cmd_interrupt = 1;
+			prtd->drain_ready = 1;
+			wake_up(&prtd->drain_wait);
+			atomic_set(&prtd->drain, 0);
+		}
+		prtd->last_buffer = 0;
+		prtd->cmd_ack = 0;
+		if (!prtd->gapless_state.gapless_transition) {
+			pr_debug("issue CMD_FLUSH stream_id %d\n", stream_id);
+			spin_unlock_irqrestore(&prtd->lock, flags);
+			q6asm_stream_cmd(
+				prtd->audio_client, CMD_FLUSH, stream_id);
+			spin_lock_irqsave(&prtd->lock, flags);
+		} else {
+			prtd->first_buffer = 0;
+		}
+		/* FIXME. only reset if flush was successful */
+		prtd->byte_offset  = 0;
+		prtd->copied_total = 0;
+		prtd->app_pointer  = 0;
+		prtd->bytes_received = 0;
+		prtd->bytes_sent = 0;
+		prtd->marker_timestamp = 0;
+
+		atomic_set(&prtd->xrun, 0);
+		spin_unlock_irqrestore(&prtd->lock, flags);
+		break;
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		pr_debug("SNDRV_PCM_TRIGGER_PAUSE_PUSH transition %d\n",
+				prtd->gapless_state.gapless_transition);
+		if (!prtd->gapless_state.gapless_transition) {
+			pr_debug("issue CMD_PAUSE stream_id %d\n",
+				  ac->stream_id);
+			q6asm_stream_cmd_nowait(ac, CMD_PAUSE, ac->stream_id);
+			atomic_set(&prtd->start, 0);
+		}
+		break;
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		pr_debug("SNDRV_PCM_TRIGGER_PAUSE_RELEASE transition %d\n",
+				   prtd->gapless_state.gapless_transition);
+		if (!prtd->gapless_state.gapless_transition) {
+			atomic_set(&prtd->start, 1);
+			q6asm_run_nowait(prtd->audio_client, prtd->run_mode,
+					 0, 0);
+		}
+		break;
+	case SND_COMPR_TRIGGER_PARTIAL_DRAIN:
+		pr_debug("%s: SND_COMPR_TRIGGER_PARTIAL_DRAIN\n", __func__);
+		if (!prtd->gapless_state.use_dsp_gapless_mode) {
+			pr_debug("%s: set partial drain as drain\n", __func__);
+			cmd = SND_COMPR_TRIGGER_DRAIN;
+		}
+	case SND_COMPR_TRIGGER_DRAIN:
+		pr_debug("%s: SNDRV_COMPRESS_DRAIN\n", __func__);
+		/* Make sure all the data is sent to DSP before sending EOS */
+		spin_lock_irqsave(&prtd->lock, flags);
+
+		if (!atomic_read(&prtd->start)) {
+			pr_err("%s: stream is not in started state\n",
+				__func__);
+			rc = -EPERM;
+			spin_unlock_irqrestore(&prtd->lock, flags);
+			break;
+		}
+		if (prtd->bytes_received > prtd->copied_total) {
+			pr_debug("%s: wait till all the data is sent to dsp\n",
+				__func__);
+			rc = msm_compr_drain_buffer(prtd, &flags);
+			if (rc || !atomic_read(&prtd->start)) {
+				if (rc != -ENETRESET)
+					rc = -EINTR;
+				spin_unlock_irqrestore(&prtd->lock, flags);
+				break;
+			}
+			/*
+			 * FIXME: Bug.
+			 * Write(32767)
+			 * Start
+			 * Drain <- Indefinite wait
+			 * sol1 : if (prtd->copied_total) then wait?
+			 * sol2 : (prtd->cmd_interrupt || prtd->drain_ready || atomic_read(xrun)
+			 */
+			bytes_to_write = prtd->bytes_received
+						- prtd->copied_total;
+			WARN(bytes_to_write > runtime->fragment_size,
+			     "last write %d cannot be > than fragment_size",
+			     bytes_to_write);
+
+			if (bytes_to_write > 0) {
+				pr_debug("%s: send %d partial bytes at the end",
+				       __func__, bytes_to_write);
+				atomic_set(&prtd->xrun, 0);
+				prtd->last_buffer = 1;
+				msm_compr_send_buffer(prtd);
+			}
+		}
+
+		if ((cmd == SND_COMPR_TRIGGER_PARTIAL_DRAIN) &&
+		    (prtd->gapless_state.set_next_stream_id)) {
+			/* wait for the last buffer to be returned */
+
+			if (prtd->last_buffer) {
+				pr_debug("%s: last buffer drain\n", __func__);
+				rc = msm_compr_drain_buffer(prtd, &flags);
+				if (rc || !atomic_read(&prtd->start)) {
+					spin_unlock_irqrestore(&prtd->lock,
+									flags);
+					break;
+				}
+			}
+			/* send EOS */
+			prtd->eos_ack = 0;
+			atomic_set(&prtd->eos, 1);
+			pr_debug("issue CMD_EOS stream_id %d\n", ac->stream_id);
+			q6asm_stream_cmd_nowait(ac, CMD_EOS, ac->stream_id);
+			pr_info("PARTIAL DRAIN, do not wait for EOS ack\n");
+
+			/* send a zero length buffer */
+			atomic_set(&prtd->xrun, 0);
+			msm_compr_send_buffer(prtd);
+
+			/* wait for the zero length buffer to be returned */
+			pr_debug("%s: zero length buffer drain\n", __func__);
+			rc = msm_compr_drain_buffer(prtd, &flags);
+			if (rc || !atomic_read(&prtd->start)) {
+				spin_unlock_irqrestore(&prtd->lock, flags);
+				break;
+			}
+
+			/* sleep for additional duration partial drain */
+			atomic_set(&prtd->drain, 1);
+			prtd->drain_ready = 0;
+			pr_debug("%s, additional sleep: %d\n", __func__,
+				 prtd->partial_drain_delay);
+			spin_unlock_irqrestore(&prtd->lock, flags);
+			rc = wait_event_timeout(prtd->drain_wait,
+				prtd->drain_ready || prtd->cmd_interrupt,
+				msecs_to_jiffies(prtd->partial_drain_delay));
+			pr_debug("%s: out of additional wait for low sample rate\n",
+				 __func__);
+			spin_lock_irqsave(&prtd->lock, flags);
+			if (prtd->cmd_interrupt) {
+				pr_debug("%s: additional wait interrupted by flush)\n",
+					 __func__);
+				rc = -EINTR;
+				prtd->cmd_interrupt = 0;
+				spin_unlock_irqrestore(&prtd->lock, flags);
+				break;
+			}
+
+			/* move to next stream and reset vars */
+			pr_debug("%s: Moving to next stream in gapless\n",
+								__func__);
+			ac->stream_id = NEXT_STREAM_ID(ac->stream_id);
+			prtd->byte_offset = 0;
+			prtd->app_pointer  = 0;
+			prtd->first_buffer = 1;
+			prtd->last_buffer = 0;
+			/*
+			 * Set gapless transition flag only if EOS hasn't been
+			 * acknowledged already.
+			 */
+			if (atomic_read(&prtd->eos))
+				prtd->gapless_state.gapless_transition = 1;
+			prtd->marker_timestamp = 0;
+
+			/*
+			Don't reset these as these vars map to
+			total_bytes_transferred and total_bytes_available
+			directly, only total_bytes_transferred will be updated
+			in the next avail() ioctl
+				prtd->copied_total = 0;
+				prtd->bytes_received = 0;
+			*/
+			atomic_set(&prtd->drain, 0);
+			atomic_set(&prtd->xrun, 1);
+			pr_debug("%s: issue CMD_RUN", __func__);
+			q6asm_run_nowait(prtd->audio_client, 0, 0, 0);
+			spin_unlock_irqrestore(&prtd->lock, flags);
+			break;
+		}
+		/*
+		   moving to next stream failed, so reset the gapless state
+		   set next stream id for the same session so that the same
+		   stream can be used for gapless playback
+		*/
+		prtd->gapless_state.set_next_stream_id = false;
+		prtd->gapless_state.gapless_transition = 0;
+		pr_debug("%s:CMD_EOS stream_id %d\n", __func__, ac->stream_id);
+
+		prtd->eos_ack = 0;
+		atomic_set(&prtd->eos, 1);
+		q6asm_stream_cmd_nowait(ac, CMD_EOS, ac->stream_id);
+
+		spin_unlock_irqrestore(&prtd->lock, flags);
+
+
+		/* Wait indefinitely for  DRAIN. Flush can also signal this*/
+		rc = wait_event_interruptible(prtd->eos_wait,
+						(prtd->eos_ack ||
+						prtd->cmd_interrupt ||
+						atomic_read(&prtd->error)));
+
+		if (rc < 0)
+			pr_err("%s: EOS wait failed\n", __func__);
+
+		pr_debug("%s: SNDRV_COMPRESS_DRAIN  out of wait for EOS\n",
+			  __func__);
+
+		if (prtd->cmd_interrupt)
+			rc = -EINTR;
+
+		if (atomic_read(&prtd->error)) {
+			pr_err("%s: Got RESET EVENTS notification, return\n",
+				__func__);
+			rc = -ENETRESET;
+		}
+
+		/*FIXME : what if a flush comes while PC is here */
+		if (rc == 0) {
+			/*
+			 * Failed to open second stream in DSP for gapless
+			 * so prepare the current stream in session
+			 * for gapless playback
+			 */
+			spin_lock_irqsave(&prtd->lock, flags);
+			pr_debug("%s:issue CMD_PAUSE stream_id %d",
+					  __func__, ac->stream_id);
+			q6asm_stream_cmd_nowait(ac, CMD_PAUSE, ac->stream_id);
+			prtd->cmd_ack = 0;
+			spin_unlock_irqrestore(&prtd->lock, flags);
+
+			/*
+			 * Cache this time as last known time
+			 */
+			if (pdata->use_legacy_api)
+				q6asm_get_session_time_legacy(
+							prtd->audio_client,
+						       &prtd->marker_timestamp);
+			else
+				q6asm_get_session_time(prtd->audio_client,
+						       &prtd->marker_timestamp);
+
+			spin_lock_irqsave(&prtd->lock, flags);
+			/*
+			 * Don't reset these as these vars map to
+			 * total_bytes_transferred and total_bytes_available.
+			 * Just total_bytes_transferred will be updated
+			 * in the next avail() ioctl.
+			 * prtd->copied_total = 0;
+			 * prtd->bytes_received = 0;
+			 * do not reset prtd->bytes_sent as well as the same
+			 * session is used for gapless playback
+			 */
+			prtd->byte_offset = 0;
+
+			prtd->app_pointer  = 0;
+			prtd->first_buffer = 1;
+			prtd->last_buffer = 0;
+			atomic_set(&prtd->drain, 0);
+			atomic_set(&prtd->xrun, 1);
+			spin_unlock_irqrestore(&prtd->lock, flags);
+
+			pr_debug("%s:issue CMD_FLUSH ac->stream_id %d",
+					      __func__, ac->stream_id);
+			q6asm_stream_cmd(ac, CMD_FLUSH, ac->stream_id);
+
+			q6asm_run_nowait(prtd->audio_client, 0, 0, 0);
+		}
+		prtd->cmd_interrupt = 0;
+		break;
+	case SND_COMPR_TRIGGER_NEXT_TRACK:
+		if (!prtd->gapless_state.use_dsp_gapless_mode) {
+			pr_debug("%s: ignore trigger next track\n", __func__);
+			rc = 0;
+			break;
+		}
+		pr_debug("%s: SND_COMPR_TRIGGER_NEXT_TRACK\n", __func__);
+		spin_lock_irqsave(&prtd->lock, flags);
+		rc = 0;
+		/* next stream in gapless */
+		stream_id = NEXT_STREAM_ID(ac->stream_id);
+		/*
+		 * Wait if stream 1 has not completed before honoring next
+		 * track for stream 3. Scenario happens if second clip is
+		 * small and fills in one buffer so next track will be
+		 * called immediately.
+		 */
+		stream_index = STREAM_ARRAY_INDEX(stream_id);
+		if (stream_index >= MAX_NUMBER_OF_STREAMS ||
+		    stream_index < 0) {
+			pr_err("%s: Invalid stream index: %d", __func__,
+				stream_index);
+			spin_unlock_irqrestore(&prtd->lock, flags);
+			rc = -EINVAL;
+			break;
+		}
+
+		if (prtd->gapless_state.stream_opened[stream_index]) {
+			if (prtd->gapless_state.gapless_transition) {
+				rc = msm_compr_wait_for_stream_avail(prtd,
+								    &flags);
+			} else {
+				/*
+				 * If session is already opened break out if
+				 * the state is not gapless transition. This
+				 * is when seek happens after the last buffer
+				 * is sent to the driver. Next track would be
+				 * called again after last buffer is sent.
+				 */
+				pr_debug("next session is in opened state\n");
+				spin_unlock_irqrestore(&prtd->lock, flags);
+				break;
+			}
+		}
+		spin_unlock_irqrestore(&prtd->lock, flags);
+		if (rc < 0) {
+			/*
+			 * if return type EINTR  then reset to zero. Tiny
+			 * compress treats EINTR as error and prevents PARTIAL
+			 * DRAIN. EINTR is not an error. wait for stream avail
+			 * is interrupted by some other command like FLUSH.
+			 */
+			if (rc == -EINTR) {
+				pr_debug("%s: EINTR reset rc to 0\n", __func__);
+				rc = 0;
+			}
+			break;
+		}
+
+		if (prtd->codec_param.codec.format == SNDRV_PCM_FORMAT_S24_LE)
+			bits_per_sample = 24;
+		else if (prtd->codec_param.codec.format ==
+			 SNDRV_PCM_FORMAT_S32_LE)
+			bits_per_sample = 32;
+		else if (prtd->codec == FORMAT_FLAC && codec_options &&
+			(codec_options->flac_dec.sample_size != 0))
+			bits_per_sample = codec_options->flac_dec.sample_size;
+
+		pr_debug("%s: open_write stream_id %d bits_per_sample %d",
+				__func__, stream_id, bits_per_sample);
+		rc = q6asm_stream_open_write_v4(prtd->audio_client,
+				prtd->codec, bits_per_sample,
+				stream_id,
+				prtd->gapless_state.use_dsp_gapless_mode);
+		if (rc < 0) {
+			pr_err("%s: Session out open failed for gapless\n",
+				 __func__);
+			break;
+		}
+
+		spin_lock_irqsave(&prtd->lock, flags);
+		prtd->gapless_state.stream_opened[stream_index] = 1;
+		prtd->gapless_state.set_next_stream_id = true;
+		spin_unlock_irqrestore(&prtd->lock, flags);
+
+		rc = msm_compr_send_media_format_block(cstream,
+						stream_id, false);
+		if (rc < 0) {
+			pr_err("%s, failed to send media format block\n",
+				__func__);
+			break;
+		}
+		msm_compr_send_dec_params(cstream, pdata->dec_params[fe_id],
+					  stream_id);
+		break;
+	}
+
+	return rc;
+}
+
+static int msm_compr_pointer(struct snd_compr_stream *cstream,
+					struct snd_compr_tstamp *arg)
+{
+	struct snd_compr_runtime *runtime = cstream->runtime;
+	struct snd_soc_pcm_runtime *rtd = cstream->private_data;
+	struct msm_compr_audio *prtd = runtime->private_data;
+	struct msm_compr_pdata *pdata = NULL;
+	struct snd_compr_tstamp tstamp;
+	uint64_t timestamp = 0;
+	int rc = 0, first_buffer;
+	unsigned long flags;
+	uint32_t gapless_transition;
+
+	pdata = snd_soc_platform_get_drvdata(rtd->platform);
+	pr_debug("%s\n", __func__);
+	memset(&tstamp, 0x0, sizeof(struct snd_compr_tstamp));
+
+	spin_lock_irqsave(&prtd->lock, flags);
+	tstamp.sampling_rate = prtd->sample_rate;
+	tstamp.byte_offset = prtd->byte_offset;
+	if (cstream->direction == SND_COMPRESS_PLAYBACK)
+		tstamp.copied_total = prtd->copied_total;
+	else if (cstream->direction == SND_COMPRESS_CAPTURE)
+		tstamp.copied_total = prtd->received_total;
+	first_buffer = prtd->first_buffer;
+	if (atomic_read(&prtd->error)) {
+		pr_err_ratelimited("%s Got RESET EVENTS notification, return error\n",
+				   __func__);
+		if (cstream->direction == SND_COMPRESS_PLAYBACK)
+			runtime->total_bytes_transferred = tstamp.copied_total;
+		else
+			runtime->total_bytes_available = tstamp.copied_total;
+		tstamp.pcm_io_frames = 0;
+		memcpy(arg, &tstamp, sizeof(struct snd_compr_tstamp));
+		spin_unlock_irqrestore(&prtd->lock, flags);
+		return -ENETRESET;
+	}
+	if (cstream->direction == SND_COMPRESS_PLAYBACK) {
+
+		gapless_transition = prtd->gapless_state.gapless_transition;
+		spin_unlock_irqrestore(&prtd->lock, flags);
+		if (gapless_transition)
+			pr_debug("%s session time in gapless transition",
+				__func__);
+		/*
+		 *- Do not query if no buffer has been given.
+		 *- Do not query on a gapless transition.
+		 *  Playback for the 2nd stream can start (thus returning time
+		 *  starting from 0) before the driver knows about EOS of first
+		 *  stream.
+		*/
+		if (!first_buffer || gapless_transition) {
+
+			if (pdata->use_legacy_api)
+				rc = q6asm_get_session_time_legacy(
+				prtd->audio_client, &prtd->marker_timestamp);
+			else
+				rc = q6asm_get_session_time(
+				prtd->audio_client, &prtd->marker_timestamp);
+			if (rc < 0) {
+				pr_err("%s: Get Session Time return =%lld\n",
+					__func__, timestamp);
+				if (atomic_read(&prtd->error))
+					return -ENETRESET;
+				else
+					return -EAGAIN;
+			}
+		}
+	} else {
+		spin_unlock_irqrestore(&prtd->lock, flags);
+	}
+	timestamp = prtd->marker_timestamp;
+
+	/* DSP returns timestamp in usec */
+	pr_debug("%s: timestamp = %lld usec\n", __func__, timestamp);
+	timestamp *= prtd->sample_rate;
+	tstamp.pcm_io_frames = (snd_pcm_uframes_t)div64_u64(timestamp, 1000000);
+	memcpy(arg, &tstamp, sizeof(struct snd_compr_tstamp));
+
+	return 0;
+}
+
+static int msm_compr_ack(struct snd_compr_stream *cstream,
+			size_t count)
+{
+	struct snd_compr_runtime *runtime = cstream->runtime;
+	struct msm_compr_audio *prtd = runtime->private_data;
+	void *src, *dstn;
+	size_t copy;
+	unsigned long flags;
+
+	WARN(1, "This path is untested");
+	return -EINVAL;
+
+	pr_debug("%s: count = %zd\n", __func__, count);
+	if (!prtd->buffer) {
+		pr_err("%s: Buffer is not allocated yet ??\n", __func__);
+		return -EINVAL;
+	}
+	src = runtime->buffer + prtd->app_pointer;
+	dstn = prtd->buffer + prtd->app_pointer;
+	if (count < prtd->buffer_size - prtd->app_pointer) {
+		memcpy(dstn, src, count);
+		prtd->app_pointer += count;
+	} else {
+		copy = prtd->buffer_size - prtd->app_pointer;
+		memcpy(dstn, src, copy);
+		memcpy(prtd->buffer, runtime->buffer, count - copy);
+		prtd->app_pointer = count - copy;
+	}
+
+	/*
+	 * If the stream is started and all the bytes received were
+	 * copied to DSP, the newly received bytes should be
+	 * sent right away
+	 */
+	spin_lock_irqsave(&prtd->lock, flags);
+
+	if (atomic_read(&prtd->start) &&
+		prtd->bytes_received == prtd->copied_total) {
+		prtd->bytes_received += count;
+		msm_compr_send_buffer(prtd);
+	} else
+		prtd->bytes_received += count;
+
+	spin_unlock_irqrestore(&prtd->lock, flags);
+
+	return 0;
+}
+
+static int msm_compr_playback_copy(struct snd_compr_stream *cstream,
+				  char __user *buf, size_t count)
+{
+	struct snd_compr_runtime *runtime = cstream->runtime;
+	struct msm_compr_audio *prtd = runtime->private_data;
+	void *dstn;
+	size_t copy;
+	uint64_t bytes_available = 0;
+	unsigned long flags;
+
+	pr_debug("%s: count = %zd\n", __func__, count);
+	if (!prtd->buffer) {
+		pr_err("%s: Buffer is not allocated yet ??", __func__);
+		return 0;
+	}
+
+	spin_lock_irqsave(&prtd->lock, flags);
+	if (atomic_read(&prtd->error)) {
+		pr_err("%s Got RESET EVENTS notification", __func__);
+		spin_unlock_irqrestore(&prtd->lock, flags);
+		return -ENETRESET;
+	}
+	spin_unlock_irqrestore(&prtd->lock, flags);
+
+	dstn = prtd->buffer + prtd->app_pointer;
+	if (count < prtd->buffer_size - prtd->app_pointer) {
+		if (copy_from_user(dstn, buf, count))
+			return -EFAULT;
+		prtd->app_pointer += count;
+	} else {
+		copy = prtd->buffer_size - prtd->app_pointer;
+		if (copy_from_user(dstn, buf, copy))
+			return -EFAULT;
+		if (copy_from_user(prtd->buffer, buf + copy, count - copy))
+			return -EFAULT;
+		prtd->app_pointer = count - copy;
+	}
+
+	/*
+	 * If stream is started and there has been an xrun,
+	 * since the available bytes fits fragment_size, copy the data right away
+	 */
+	spin_lock_irqsave(&prtd->lock, flags);
+	prtd->bytes_received += count;
+	if (atomic_read(&prtd->start)) {
+		if (atomic_read(&prtd->xrun)) {
+			pr_debug("%s: in xrun, count = %zd\n", __func__, count);
+			bytes_available = prtd->bytes_received - prtd->copied_total;
+			if (bytes_available >= runtime->fragment_size) {
+				pr_debug("%s: handle xrun, bytes_to_write = %llu\n",
+					 __func__,
+					 bytes_available);
+				atomic_set(&prtd->xrun, 0);
+				msm_compr_send_buffer(prtd);
+			} /* else not sufficient data */
+		} /* writes will continue on the next write_done */
+	}
+
+	spin_unlock_irqrestore(&prtd->lock, flags);
+
+	return count;
+}
+
+static int msm_compr_capture_copy(struct snd_compr_stream *cstream,
+					char __user *buf, size_t count)
+{
+	struct snd_compr_runtime *runtime = cstream->runtime;
+	struct msm_compr_audio *prtd = runtime->private_data;
+	void *source;
+	unsigned long flags;
+
+	pr_debug("%s: count = %zd\n", __func__, count);
+	if (!prtd->buffer) {
+		pr_err("%s: Buffer is not allocated yet ??", __func__);
+		return 0;
+	}
+
+	spin_lock_irqsave(&prtd->lock, flags);
+	if (atomic_read(&prtd->error)) {
+		pr_err("%s Got RESET EVENTS notification", __func__);
+		spin_unlock_irqrestore(&prtd->lock, flags);
+		return -ENETRESET;
+	}
+
+	source = prtd->buffer + prtd->app_pointer;
+	/* check if we have requested amount of data to copy to user*/
+	if (count <= prtd->received_total - prtd->bytes_copied)	{
+		spin_unlock_irqrestore(&prtd->lock, flags);
+		if (copy_to_user(buf, source, count)) {
+			pr_err("copy_to_user failed");
+			return -EFAULT;
+		}
+		spin_lock_irqsave(&prtd->lock, flags);
+		prtd->app_pointer += count;
+		if (prtd->app_pointer >= prtd->buffer_size)
+			prtd->app_pointer -= prtd->buffer_size;
+		prtd->bytes_copied += count;
+	}
+	msm_compr_read_buffer(prtd);
+
+	spin_unlock_irqrestore(&prtd->lock, flags);
+	return count;
+}
+
+static int msm_compr_copy(struct snd_compr_stream *cstream,
+				char __user *buf, size_t count)
+{
+	int ret = 0;
+
+	pr_debug(" In %s\n", __func__);
+	if (cstream->direction == SND_COMPRESS_PLAYBACK)
+		ret = msm_compr_playback_copy(cstream, buf, count);
+	else if (cstream->direction == SND_COMPRESS_CAPTURE)
+		ret = msm_compr_capture_copy(cstream, buf, count);
+	return ret;
+}
+
+static int msm_compr_get_caps(struct snd_compr_stream *cstream,
+				struct snd_compr_caps *arg)
+{
+	struct snd_compr_runtime *runtime = cstream->runtime;
+	struct msm_compr_audio *prtd = runtime->private_data;
+	int ret = 0;
+
+	pr_debug("%s\n", __func__);
+	if ((arg != NULL) && (prtd != NULL)) {
+		memcpy(arg, &prtd->compr_cap, sizeof(struct snd_compr_caps));
+	} else {
+		ret = -EINVAL;
+		pr_err("%s: arg (0x%pK), prtd (0x%pK)\n", __func__, arg, prtd);
+	}
+
+	return ret;
+}
+
+static int msm_compr_get_codec_caps(struct snd_compr_stream *cstream,
+				struct snd_compr_codec_caps *codec)
+{
+	pr_debug("%s\n", __func__);
+
+	switch (codec->codec) {
+	case SND_AUDIOCODEC_MP3:
+		codec->num_descriptors = 2;
+		codec->descriptor[0].max_ch = 2;
+		memcpy(codec->descriptor[0].sample_rates,
+		       supported_sample_rates,
+		       sizeof(supported_sample_rates));
+		codec->descriptor[0].num_sample_rates =
+			sizeof(supported_sample_rates)/sizeof(unsigned int);
+		codec->descriptor[0].bit_rate[0] = 320; /* 320kbps */
+		codec->descriptor[0].bit_rate[1] = 128;
+		codec->descriptor[0].num_bitrates = 2;
+		codec->descriptor[0].profiles = 0;
+		codec->descriptor[0].modes = SND_AUDIOCHANMODE_MP3_STEREO;
+		codec->descriptor[0].formats = 0;
+		break;
+	case SND_AUDIOCODEC_AAC:
+		codec->num_descriptors = 2;
+		codec->descriptor[1].max_ch = 2;
+		memcpy(codec->descriptor[1].sample_rates,
+		       supported_sample_rates,
+		       sizeof(supported_sample_rates));
+		codec->descriptor[1].num_sample_rates =
+			sizeof(supported_sample_rates)/sizeof(unsigned int);
+		codec->descriptor[1].bit_rate[0] = 320; /* 320kbps */
+		codec->descriptor[1].bit_rate[1] = 128;
+		codec->descriptor[1].num_bitrates = 2;
+		codec->descriptor[1].profiles = 0;
+		codec->descriptor[1].modes = 0;
+		codec->descriptor[1].formats =
+			(SND_AUDIOSTREAMFORMAT_MP4ADTS |
+				SND_AUDIOSTREAMFORMAT_RAW);
+		break;
+	case SND_AUDIOCODEC_AC3:
+	case SND_AUDIOCODEC_EAC3:
+	case SND_AUDIOCODEC_FLAC:
+	case SND_AUDIOCODEC_VORBIS:
+	case SND_AUDIOCODEC_ALAC:
+	case SND_AUDIOCODEC_APE:
+	case SND_AUDIOCODEC_DTS:
+	case SND_AUDIOCODEC_DSD:
+	case SND_AUDIOCODEC_TRUEHD:
+	case SND_AUDIOCODEC_IEC61937:
+	case SND_AUDIOCODEC_APTX:
+		break;
+	default:
+		pr_err("%s: Unsupported audio codec %d\n",
+			__func__, codec->codec);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int msm_compr_set_metadata(struct snd_compr_stream *cstream,
+				struct snd_compr_metadata *metadata)
+{
+	struct msm_compr_audio *prtd;
+	struct audio_client *ac;
+	pr_debug("%s\n", __func__);
+
+	if (!metadata || !cstream)
+		return -EINVAL;
+
+	prtd = cstream->runtime->private_data;
+	if (!prtd || !prtd->audio_client) {
+		pr_err("%s: prtd or audio client is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	if (((metadata->key == SNDRV_COMPRESS_ENCODER_PADDING) ||
+	     (metadata->key == SNDRV_COMPRESS_ENCODER_DELAY)) &&
+	     (prtd->compr_passthr != LEGACY_PCM)) {
+		pr_debug("%s: No trailing silence for compress_type[%d]\n",
+			__func__, prtd->compr_passthr);
+		return 0;
+	}
+
+	ac = prtd->audio_client;
+	if (metadata->key == SNDRV_COMPRESS_ENCODER_PADDING) {
+		pr_debug("%s, got encoder padding %u", __func__, metadata->value[0]);
+		prtd->gapless_state.trailing_samples_drop = metadata->value[0];
+	} else if (metadata->key == SNDRV_COMPRESS_ENCODER_DELAY) {
+		pr_debug("%s, got encoder delay %u", __func__, metadata->value[0]);
+		prtd->gapless_state.initial_samples_drop = metadata->value[0];
+	} else if (metadata->key == SNDRV_COMPRESS_RENDER_MODE) {
+		return msm_compr_set_render_mode(prtd, metadata->value[0]);
+	} else if (metadata->key == SNDRV_COMPRESS_CLK_REC_MODE) {
+		return msm_compr_set_clk_rec_mode(ac, metadata->value[0]);
+	} else if (metadata->key == SNDRV_COMPRESS_RENDER_WINDOW) {
+		return msm_compr_set_render_window(
+				ac,
+				metadata->value[0],
+				metadata->value[1],
+				metadata->value[2],
+				metadata->value[3]);
+	} else if (metadata->key == SNDRV_COMPRESS_START_DELAY) {
+		prtd->start_delay_lsw = metadata->value[0];
+		prtd->start_delay_msw = metadata->value[1];
+	} else if (metadata->key ==
+				SNDRV_COMPRESS_ENABLE_ADJUST_SESSION_CLOCK) {
+		return msm_compr_enable_adjust_session_clock(ac,
+				metadata->value[0]);
+	} else if (metadata->key == SNDRV_COMPRESS_ADJUST_SESSION_CLOCK) {
+		return msm_compr_adjust_session_clock(ac,
+				metadata->value[0],
+				metadata->value[1]);
+	}
+
+	return 0;
+}
+
+static int msm_compr_get_metadata(struct snd_compr_stream *cstream,
+				struct snd_compr_metadata *metadata)
+{
+	struct msm_compr_audio *prtd;
+	struct audio_client *ac;
+	int ret = -EINVAL;
+
+	pr_debug("%s\n", __func__);
+
+	if (!metadata || !cstream || !cstream->runtime)
+		return ret;
+
+	if (metadata->key != SNDRV_COMPRESS_PATH_DELAY) {
+		pr_err("%s, unsupported key %d\n", __func__, metadata->key);
+		return ret;
+	}
+
+	prtd = cstream->runtime->private_data;
+	if (!prtd || !prtd->audio_client) {
+		pr_err("%s: prtd or audio client is NULL\n", __func__);
+		return ret;
+	}
+
+	ac = prtd->audio_client;
+	ret = q6asm_get_path_delay(prtd->audio_client);
+	if (ret) {
+		pr_err("%s: get_path_delay failed, ret=%d\n", __func__, ret);
+		return ret;
+	}
+
+	pr_debug("%s, path delay(in us) %u\n", __func__, ac->path_delay);
+
+	metadata->value[0] = ac->path_delay;
+
+	return ret;
+}
+
+
+static int msm_compr_set_next_track_param(struct snd_compr_stream *cstream,
+				union snd_codec_options *codec_options)
+{
+	struct msm_compr_audio *prtd;
+	struct audio_client *ac;
+	int ret = 0;
+
+	if (!codec_options || !cstream)
+		return -EINVAL;
+
+	prtd = cstream->runtime->private_data;
+	if (!prtd || !prtd->audio_client) {
+		pr_err("%s: prtd or audio client is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	ac = prtd->audio_client;
+
+	pr_debug("%s: got codec options for codec type %u",
+		__func__, prtd->codec);
+	switch (prtd->codec) {
+	case FORMAT_WMA_V9:
+	case FORMAT_WMA_V10PRO:
+	case FORMAT_FLAC:
+	case FORMAT_VORBIS:
+	case FORMAT_ALAC:
+	case FORMAT_APE:
+		memcpy(&(prtd->gapless_state.codec_options),
+			codec_options,
+			sizeof(union snd_codec_options));
+		ret = msm_compr_send_media_format_block(cstream,
+						ac->stream_id, true);
+		if (ret < 0) {
+			pr_err("%s: failed to send media format block\n",
+				__func__);
+		}
+		break;
+
+	default:
+		pr_debug("%s: Ignore sending CMD Format block\n",
+			__func__);
+		break;
+	}
+
+	return ret;
+}
+
+static int msm_compr_volume_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	unsigned long fe_id = kcontrol->private_value;
+	struct msm_compr_pdata *pdata = (struct msm_compr_pdata *)
+			snd_soc_component_get_drvdata(comp);
+	struct snd_compr_stream *cstream = NULL;
+	uint32_t *volume = NULL;
+
+	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+		pr_err("%s Received out of bounds fe_id %lu\n",
+			__func__, fe_id);
+		return -EINVAL;
+	}
+
+	cstream = pdata->cstream[fe_id];
+	volume = pdata->volume[fe_id];
+
+	volume[0] = ucontrol->value.integer.value[0];
+	volume[1] = ucontrol->value.integer.value[1];
+	pr_debug("%s: fe_id %lu left_vol %d right_vol %d\n",
+		 __func__, fe_id, volume[0], volume[1]);
+	if (cstream)
+		msm_compr_set_volume(cstream, volume[0], volume[1]);
+	return 0;
+}
+
+static int msm_compr_volume_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	unsigned long fe_id = kcontrol->private_value;
+
+	struct msm_compr_pdata *pdata =
+		snd_soc_component_get_drvdata(comp);
+	uint32_t *volume = NULL;
+
+	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+		pr_err("%s Received out of bound fe_id %lu\n", __func__, fe_id);
+		return -EINVAL;
+	}
+
+	volume = pdata->volume[fe_id];
+	pr_debug("%s: fe_id %lu\n", __func__, fe_id);
+	ucontrol->value.integer.value[0] = volume[0];
+	ucontrol->value.integer.value[1] = volume[1];
+
+	return 0;
+}
+
+static int msm_compr_audio_effects_config_put(struct snd_kcontrol *kcontrol,
+					   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	unsigned long fe_id = kcontrol->private_value;
+	struct msm_compr_pdata *pdata = (struct msm_compr_pdata *)
+			snd_soc_component_get_drvdata(comp);
+	struct msm_compr_audio_effects *audio_effects = NULL;
+	struct snd_compr_stream *cstream = NULL;
+	struct msm_compr_audio *prtd = NULL;
+	long *values = &(ucontrol->value.integer.value[0]);
+	int effects_module;
+
+	pr_debug("%s\n", __func__);
+	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+		pr_err("%s Received out of bounds fe_id %lu\n",
+			__func__, fe_id);
+		return -EINVAL;
+	}
+	cstream = pdata->cstream[fe_id];
+	audio_effects = pdata->audio_effects[fe_id];
+	if (!cstream || !audio_effects) {
+		pr_debug("%s: stream or effects inactive\n", __func__);
+		return -EINVAL;
+	}
+	prtd = cstream->runtime->private_data;
+	if (!prtd) {
+		pr_err("%s: cannot set audio effects\n", __func__);
+		return -EINVAL;
+	}
+	if (prtd->compr_passthr != LEGACY_PCM) {
+		pr_debug("%s: No effects for compr_type[%d]\n",
+			__func__, prtd->compr_passthr);
+		return 0;
+	} else {
+		pr_debug("%s: Effects supported for compr_type[%d]\n",
+			 __func__, prtd->compr_passthr);
+	}
+	effects_module = *values++;
+	switch (effects_module) {
+	case VIRTUALIZER_MODULE:
+		pr_debug("%s: VIRTUALIZER_MODULE\n", __func__);
+		if (msm_audio_effects_is_effmodule_supp_in_top(effects_module,
+						prtd->audio_client->topology))
+			msm_audio_effects_virtualizer_handler(
+						prtd->audio_client,
+						&(audio_effects->virtualizer),
+						values);
+		break;
+	case REVERB_MODULE:
+		pr_debug("%s: REVERB_MODULE\n", __func__);
+		if (msm_audio_effects_is_effmodule_supp_in_top(effects_module,
+						prtd->audio_client->topology))
+			msm_audio_effects_reverb_handler(prtd->audio_client,
+						 &(audio_effects->reverb),
+						 values);
+		break;
+	case BASS_BOOST_MODULE:
+		pr_debug("%s: BASS_BOOST_MODULE\n", __func__);
+		if (msm_audio_effects_is_effmodule_supp_in_top(effects_module,
+						prtd->audio_client->topology))
+			msm_audio_effects_bass_boost_handler(prtd->audio_client,
+						   &(audio_effects->bass_boost),
+						     values);
+		break;
+	case PBE_MODULE:
+		pr_debug("%s: PBE_MODULE\n", __func__);
+		if (msm_audio_effects_is_effmodule_supp_in_top(effects_module,
+						prtd->audio_client->topology))
+			msm_audio_effects_pbe_handler(prtd->audio_client,
+						   &(audio_effects->pbe),
+						     values);
+		break;
+	case EQ_MODULE:
+		pr_debug("%s: EQ_MODULE\n", __func__);
+		if (msm_audio_effects_is_effmodule_supp_in_top(effects_module,
+						prtd->audio_client->topology))
+			msm_audio_effects_popless_eq_handler(prtd->audio_client,
+						    &(audio_effects->equalizer),
+						     values);
+		break;
+	case SOFT_VOLUME_MODULE:
+		pr_debug("%s: SOFT_VOLUME_MODULE\n", __func__);
+		break;
+	case SOFT_VOLUME2_MODULE:
+		pr_debug("%s: SOFT_VOLUME2_MODULE\n", __func__);
+		if (msm_audio_effects_is_effmodule_supp_in_top(effects_module,
+						prtd->audio_client->topology))
+			msm_audio_effects_volume_handler_v2(prtd->audio_client,
+						&(audio_effects->volume),
+						values, SOFT_VOLUME_INSTANCE_2);
+		break;
+	default:
+		pr_err("%s Invalid effects config module\n", __func__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int msm_compr_audio_effects_config_get(struct snd_kcontrol *kcontrol,
+					   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	unsigned long fe_id = kcontrol->private_value;
+	struct msm_compr_pdata *pdata = (struct msm_compr_pdata *)
+			snd_soc_component_get_drvdata(comp);
+	struct msm_compr_audio_effects *audio_effects = NULL;
+	struct snd_compr_stream *cstream = NULL;
+	struct msm_compr_audio *prtd = NULL;
+
+	pr_debug("%s\n", __func__);
+	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+		pr_err("%s Received out of bounds fe_id %lu\n",
+			__func__, fe_id);
+		return -EINVAL;
+	}
+	cstream = pdata->cstream[fe_id];
+	audio_effects = pdata->audio_effects[fe_id];
+	if (!cstream || !audio_effects) {
+		pr_debug("%s: stream or effects inactive\n", __func__);
+		return -EINVAL;
+	}
+	prtd = cstream->runtime->private_data;
+	if (!prtd) {
+		pr_err("%s: cannot set audio effects\n", __func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int msm_compr_query_audio_effect_put(struct snd_kcontrol *kcontrol,
+					   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	unsigned long fe_id = kcontrol->private_value;
+	struct msm_compr_pdata *pdata = (struct msm_compr_pdata *)
+			snd_soc_component_get_drvdata(comp);
+	struct msm_compr_audio_effects *audio_effects = NULL;
+	struct snd_compr_stream *cstream = NULL;
+	struct msm_compr_audio *prtd = NULL;
+	long *values = &(ucontrol->value.integer.value[0]);
+
+	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+		pr_err("%s Received out of bounds fe_id %lu\n",
+			__func__, fe_id);
+		return -EINVAL;
+	}
+	cstream = pdata->cstream[fe_id];
+	audio_effects = pdata->audio_effects[fe_id];
+	if (!cstream || !audio_effects) {
+		pr_debug("%s: stream or effects inactive\n", __func__);
+		return -EINVAL;
+	}
+	prtd = cstream->runtime->private_data;
+	if (!prtd) {
+		pr_err("%s: cannot set audio effects\n", __func__);
+		return -EINVAL;
+	}
+	if (prtd->compr_passthr != LEGACY_PCM) {
+		pr_err("%s: No effects for compr_type[%d]\n",
+			__func__, prtd->compr_passthr);
+		return -EPERM;
+	}
+	audio_effects->query.mod_id = (u32)*values++;
+	audio_effects->query.parm_id = (u32)*values++;
+	audio_effects->query.size = (u32)*values++;
+	audio_effects->query.offset = (u32)*values++;
+	audio_effects->query.device = (u32)*values++;
+	return 0;
+}
+
+static int msm_compr_query_audio_effect_get(struct snd_kcontrol *kcontrol,
+					   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	unsigned long fe_id = kcontrol->private_value;
+	struct msm_compr_pdata *pdata = (struct msm_compr_pdata *)
+			snd_soc_component_get_drvdata(comp);
+	struct msm_compr_audio_effects *audio_effects = NULL;
+	struct snd_compr_stream *cstream = NULL;
+	struct msm_compr_audio *prtd = NULL;
+	long *values = &(ucontrol->value.integer.value[0]);
+
+	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+		pr_err("%s Received out of bounds fe_id %lu\n",
+			__func__, fe_id);
+		return -EINVAL;
+	}
+	cstream = pdata->cstream[fe_id];
+	audio_effects = pdata->audio_effects[fe_id];
+	if (!cstream || !audio_effects) {
+		pr_debug("%s: stream or effects inactive\n", __func__);
+		return -EINVAL;
+	}
+	prtd = cstream->runtime->private_data;
+	if (!prtd) {
+		pr_err("%s: cannot set audio effects\n", __func__);
+		return -EINVAL;
+	}
+	values[0] = (long)audio_effects->query.mod_id;
+	values[1] = (long)audio_effects->query.parm_id;
+	values[2] = (long)audio_effects->query.size;
+	values[3] = (long)audio_effects->query.offset;
+	values[4] = (long)audio_effects->query.device;
+	return 0;
+}
+
+static int msm_compr_send_dec_params(struct snd_compr_stream *cstream,
+				     struct msm_compr_dec_params *dec_params,
+				     int stream_id)
+{
+
+	int rc = 0;
+	struct msm_compr_audio *prtd = NULL;
+	struct snd_dec_ddp *ddp = &dec_params->ddp_params;
+
+	if (!cstream || !dec_params) {
+		pr_err("%s: stream or dec_params inactive\n", __func__);
+		rc = -EINVAL;
+		goto end;
+	}
+	prtd = cstream->runtime->private_data;
+	if (!prtd) {
+		pr_err("%s: cannot set dec_params\n", __func__);
+		rc = -EINVAL;
+		goto end;
+	}
+	switch (prtd->codec) {
+	case FORMAT_MP3:
+	case FORMAT_MPEG4_AAC:
+	case FORMAT_TRUEHD:
+	case FORMAT_IEC61937:
+	case FORMAT_APTX:
+		pr_debug("%s: no runtime parameters for codec: %d\n", __func__,
+			 prtd->codec);
+		break;
+	case FORMAT_AC3:
+	case FORMAT_EAC3:
+		if (prtd->compr_passthr != LEGACY_PCM) {
+			pr_debug("%s: No DDP param for compr_type[%d]\n",
+				 __func__, prtd->compr_passthr);
+			break;
+		}
+		rc = msm_compr_send_ddp_cfg(prtd->audio_client, ddp, stream_id);
+		if (rc < 0)
+			pr_err("%s: DDP CMD CFG failed %d\n", __func__, rc);
+		break;
+	default:
+		break;
+	}
+end:
+	return rc;
+
+}
+static int msm_compr_dec_params_put(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	unsigned long fe_id = kcontrol->private_value;
+	struct msm_compr_pdata *pdata = (struct msm_compr_pdata *)
+			snd_soc_component_get_drvdata(comp);
+	struct msm_compr_dec_params *dec_params = NULL;
+	struct snd_compr_stream *cstream = NULL;
+	struct msm_compr_audio *prtd = NULL;
+	long *values = &(ucontrol->value.integer.value[0]);
+	int rc = 0;
+
+	pr_debug("%s\n", __func__);
+	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+		pr_err("%s Received out of bounds fe_id %lu\n",
+			__func__, fe_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	cstream = pdata->cstream[fe_id];
+	dec_params = pdata->dec_params[fe_id];
+
+	if (!cstream || !dec_params) {
+		pr_err("%s: stream or dec_params inactive\n", __func__);
+		rc = -EINVAL;
+		goto end;
+	}
+	prtd = cstream->runtime->private_data;
+	if (!prtd) {
+		pr_err("%s: cannot set dec_params\n", __func__);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	switch (prtd->codec) {
+	case FORMAT_MP3:
+	case FORMAT_MPEG4_AAC:
+	case FORMAT_FLAC:
+	case FORMAT_VORBIS:
+	case FORMAT_ALAC:
+	case FORMAT_APE:
+	case FORMAT_DTS:
+	case FORMAT_DSD:
+	case FORMAT_TRUEHD:
+	case FORMAT_IEC61937:
+	case FORMAT_APTX:
+		pr_debug("%s: no runtime parameters for codec: %d\n", __func__,
+			 prtd->codec);
+		break;
+	case FORMAT_AC3:
+	case FORMAT_EAC3: {
+		struct snd_dec_ddp *ddp = &dec_params->ddp_params;
+		int cnt;
+		 if (prtd->compr_passthr != LEGACY_PCM) {
+			pr_debug("%s: No DDP param for compr_type[%d]\n",
+				__func__, prtd->compr_passthr);
+			break;
+		 }
+
+		ddp->params_length = (*values++);
+		if (ddp->params_length > DDP_DEC_MAX_NUM_PARAM) {
+			pr_err("%s: invalid num of params:: %d\n", __func__,
+				ddp->params_length);
+			rc = -EINVAL;
+			goto end;
+		}
+		for (cnt = 0; cnt < ddp->params_length; cnt++) {
+			ddp->params_id[cnt] = *values++;
+			ddp->params_value[cnt] = *values++;
+		}
+		prtd = cstream->runtime->private_data;
+		if (prtd && prtd->audio_client)
+			rc = msm_compr_send_dec_params(cstream, dec_params,
+						prtd->audio_client->stream_id);
+		break;
+	}
+	default:
+		break;
+	}
+end:
+	pr_debug("%s: ret %d\n", __func__, rc);
+	return rc;
+}
+
+static int msm_compr_dec_params_get(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_value *ucontrol)
+{
+	/* dummy function */
+	return 0;
+}
+
+static int msm_compr_playback_app_type_cfg_put(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_RX;
+	int be_id = ucontrol->value.integer.value[3];
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0, 0, 48000};
+	int ret = 0;
+
+	cfg_data.app_type = ucontrol->value.integer.value[0];
+	cfg_data.acdb_dev_id = ucontrol->value.integer.value[1];
+	if (ucontrol->value.integer.value[2] != 0)
+		cfg_data.sample_rate = ucontrol->value.integer.value[2];
+	pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
+		__func__, fe_id, session_type, be_id,
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
+	ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+						      be_id, &cfg_data);
+	if (ret < 0)
+		pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
+			__func__, ret);
+
+	return ret;
+}
+
+static int msm_compr_playback_app_type_cfg_get(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_RX;
+	int be_id = 0;
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0};
+	int ret = 0;
+
+	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
+						      &be_id, &cfg_data);
+	if (ret < 0) {
+		pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
+			__func__, ret);
+		goto done;
+	}
+
+	ucontrol->value.integer.value[0] = cfg_data.app_type;
+	ucontrol->value.integer.value[1] = cfg_data.acdb_dev_id;
+	ucontrol->value.integer.value[2] = cfg_data.sample_rate;
+	ucontrol->value.integer.value[3] = be_id;
+	pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+		__func__, fe_id, session_type, be_id,
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
+done:
+	return ret;
+}
+
+static int msm_compr_capture_app_type_cfg_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_TX;
+	int be_id = ucontrol->value.integer.value[3];
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0, 0, 48000};
+	int ret = 0;
+
+	cfg_data.app_type = ucontrol->value.integer.value[0];
+	cfg_data.acdb_dev_id = ucontrol->value.integer.value[1];
+	if (ucontrol->value.integer.value[2] != 0)
+		cfg_data.sample_rate = ucontrol->value.integer.value[2];
+	pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
+		__func__, fe_id, session_type, be_id,
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
+	ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+						      be_id, &cfg_data);
+	if (ret < 0)
+		pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
+			__func__, ret);
+
+	return ret;
+}
+
+static int msm_compr_capture_app_type_cfg_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_TX;
+	int be_id = 0;
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0};
+	int ret = 0;
+
+	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
+						      &be_id, &cfg_data);
+	if (ret < 0) {
+		pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
+			__func__, ret);
+		goto done;
+	}
+
+	ucontrol->value.integer.value[0] = cfg_data.app_type;
+	ucontrol->value.integer.value[1] = cfg_data.acdb_dev_id;
+	ucontrol->value.integer.value[2] = cfg_data.sample_rate;
+	ucontrol->value.integer.value[3] = be_id;
+	pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+		__func__, fe_id, session_type, be_id,
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
+done:
+	return ret;
+}
+
+static int msm_compr_channel_map_put(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	u64 fe_id = kcontrol->private_value;
+	struct msm_compr_pdata *pdata = (struct msm_compr_pdata *)
+			snd_soc_component_get_drvdata(comp);
+	int rc = 0, i;
+
+	pr_debug("%s: fe_id- %llu\n", __func__, fe_id);
+
+	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+		pr_err("%s Received out of bounds fe_id %llu\n",
+			__func__, fe_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (pdata->ch_map[fe_id]) {
+		pdata->ch_map[fe_id]->set_ch_map = true;
+		for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++)
+			pdata->ch_map[fe_id]->channel_map[i] =
+				(char)(ucontrol->value.integer.value[i]);
+	} else {
+		pr_debug("%s: no memory for ch_map, default will be set\n",
+			__func__);
+	}
+end:
+	pr_debug("%s: ret %d\n", __func__, rc);
+	return rc;
+}
+
+static int msm_compr_channel_map_get(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	u64 fe_id = kcontrol->private_value;
+	struct msm_compr_pdata *pdata = (struct msm_compr_pdata *)
+			snd_soc_component_get_drvdata(comp);
+	int rc = 0, i;
+
+	pr_debug("%s: fe_id- %llu\n", __func__, fe_id);
+	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+		pr_err("%s: Received out of bounds fe_id %llu\n",
+			__func__, fe_id);
+		rc = -EINVAL;
+		goto end;
+	}
+	if (pdata->ch_map[fe_id]) {
+		for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++)
+			ucontrol->value.integer.value[i] =
+				pdata->ch_map[fe_id]->channel_map[i];
+	}
+end:
+	pr_debug("%s: ret %d\n", __func__, rc);
+	return rc;
+}
+
+static int msm_compr_adsp_stream_cmd_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	unsigned long fe_id = kcontrol->private_value;
+	struct msm_compr_pdata *pdata = (struct msm_compr_pdata *)
+				snd_soc_component_get_drvdata(comp);
+	struct snd_compr_stream *cstream = NULL;
+	struct msm_compr_audio *prtd;
+	int ret = 0;
+	struct msm_adsp_event_data *event_data = NULL;
+	uint64_t actual_payload_len = 0;
+
+	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+		pr_err("%s Received invalid fe_id %lu\n",
+			__func__, fe_id);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	cstream = pdata->cstream[fe_id];
+	if (cstream == NULL) {
+		pr_err("%s cstream is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	prtd = cstream->runtime->private_data;
+	if (!prtd) {
+		pr_err("%s: prtd is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (prtd->audio_client == NULL) {
+		pr_err("%s: audio_client is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	event_data = (struct msm_adsp_event_data *)ucontrol->value.bytes.data;
+	if ((event_data->event_type < ADSP_STREAM_PP_EVENT) ||
+	    (event_data->event_type >= ADSP_STREAM_EVENT_MAX)) {
+		pr_err("%s: invalid event_type=%d",
+			__func__, event_data->event_type);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	actual_payload_len = sizeof(struct msm_adsp_event_data) +
+					event_data->payload_len;
+	if (actual_payload_len >= U32_MAX) {
+		pr_err("%s payload length 0x%X  exceeds limit",
+				__func__, event_data->payload_len);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (event_data->payload_len > sizeof(ucontrol->value.bytes.data)
+			 - sizeof(struct msm_adsp_event_data)) {
+		pr_err("%s param length=%d  exceeds limit",
+			__func__, event_data->payload_len);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = q6asm_send_stream_cmd(prtd->audio_client, event_data);
+	if (ret < 0)
+		pr_err("%s: failed to send stream event cmd, err = %d\n",
+			__func__, ret);
+done:
+	return ret;
+}
+
+static int msm_compr_playback_dnmix_ctl_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = 0;
+	int len = 0;
+	int i = 0;
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	unsigned long fe_id = kcontrol->private_value;
+	struct msm_compr_pdata *pdata = (struct msm_compr_pdata *)
+					snd_soc_component_get_drvdata(comp);
+	struct snd_compr_stream *cstream = NULL;
+	struct msm_compr_audio *prtd;
+	struct asm_stream_pan_ctrl_params dnmix_param;
+	int be_id = ucontrol->value.integer.value[len++];
+	int stream_id = 0;
+	/*
+	 * Max index for this mixer control includes below
+	 * be_id			(1)
+	 * num_output_channels		(1)
+	 * num_input_channels		(1)
+	 * output ch map (max)		(8)
+	 * input ch map (max)		(8)
+	 * mix matrix coefficients (max)(64)
+	 */
+	int max_index = 0;
+	int max_mixer_ctrl_value_size = 128;
+
+	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+		pr_err("%s Received out of bounds invalid fe_id %lu\n",
+			__func__, fe_id);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	cstream = pdata->cstream[fe_id];
+	if (cstream == NULL) {
+		pr_err("%s cstream is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	prtd = cstream->runtime->private_data;
+	if (!prtd) {
+		pr_err("%s: prtd is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (prtd->audio_client == NULL) {
+		pr_err("%s: audio_client is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	stream_id = prtd->audio_client->session;
+	if (len >= max_mixer_ctrl_value_size) {
+		ret = -EINVAL;
+		goto done;
+	}
+	dnmix_param.num_output_channels =
+				ucontrol->value.integer.value[len++];
+	if (dnmix_param.num_output_channels >
+		PCM_FORMAT_MAX_NUM_CHANNEL) {
+		ret = -EINVAL;
+		goto done;
+	}
+	if (len >= max_mixer_ctrl_value_size) {
+		ret = -EINVAL;
+		goto done;
+	}
+	dnmix_param.num_input_channels =
+				ucontrol->value.integer.value[len++];
+	if (dnmix_param.num_input_channels >
+		PCM_FORMAT_MAX_NUM_CHANNEL) {
+		ret = -EINVAL;
+		goto done;
+	}
+	max_index = len + dnmix_param.num_output_channels +
+			dnmix_param.num_input_channels +
+			dnmix_param.num_output_channels *
+			dnmix_param.num_input_channels;
+	if (max_index >= max_mixer_ctrl_value_size) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (ucontrol->value.integer.value[len++]) {
+		for (i = 0; i < dnmix_param.num_output_channels; i++) {
+			dnmix_param.output_channel_map[i] =
+				ucontrol->value.integer.value[len++];
+		}
+	}
+	if (ucontrol->value.integer.value[len++]) {
+		for (i = 0; i < dnmix_param.num_input_channels; i++) {
+			dnmix_param.input_channel_map[i] =
+				ucontrol->value.integer.value[len++];
+		}
+	}
+	if (ucontrol->value.integer.value[len++]) {
+		for (i = 0; i < dnmix_param.num_output_channels *
+				dnmix_param.num_input_channels; i++) {
+			dnmix_param.gain[i] =
+					ucontrol->value.integer.value[len++];
+		}
+	}
+	msm_routing_set_downmix_control_data(be_id,
+						stream_id,
+						&dnmix_param);
+
+done:
+	return ret;
+}
+
+static int msm_compr_shm_ion_fd_map_put(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	unsigned long fe_id = kcontrol->private_value;
+	struct msm_compr_pdata *pdata = (struct msm_compr_pdata *)
+				snd_soc_component_get_drvdata(comp);
+	struct snd_compr_stream *cstream = NULL;
+	struct msm_compr_audio *prtd;
+	int ret = 0;
+
+	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+		pr_err("%s Received out of bounds invalid fe_id %lu\n",
+			__func__, fe_id);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	cstream = pdata->cstream[fe_id];
+	if (cstream == NULL) {
+		pr_err("%s cstream is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	prtd = cstream->runtime->private_data;
+	if (!prtd) {
+		pr_err("%s: prtd is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (prtd->audio_client == NULL) {
+		pr_err("%s: audio_client is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	memcpy(&prtd->shm_ion_fd, ucontrol->value.bytes.data,
+		sizeof(prtd->shm_ion_fd));
+	ret = q6asm_audio_map_shm_fd(prtd->audio_client,
+				&prtd->shm_ion_client,
+				&prtd->shm_ion_handle, prtd->shm_ion_fd);
+	if (ret < 0)
+		pr_err("%s: failed to map shm mem\n", __func__);
+done:
+	return ret;
+}
+
+static int msm_compr_lib_ion_fd_map_put(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	unsigned long fe_id = kcontrol->private_value;
+	struct msm_compr_pdata *pdata = (struct msm_compr_pdata *)
+				snd_soc_component_get_drvdata(comp);
+	int ret = 0;
+
+	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+		pr_err("%s Received out of bounds invalid fe_id %lu\n",
+			__func__, fe_id);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	memcpy(&pdata->ion_fd[fe_id], ucontrol->value.bytes.data,
+		   sizeof(pdata->ion_fd[fe_id]));
+
+done:
+	return ret;
+}
+
+static int msm_compr_rtic_event_ack_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	unsigned long fe_id = kcontrol->private_value;
+	struct msm_compr_pdata *pdata = (struct msm_compr_pdata *)
+					snd_soc_component_get_drvdata(comp);
+	struct snd_compr_stream *cstream = NULL;
+	struct msm_compr_audio *prtd;
+	int ret = 0;
+	int param_length = 0;
+
+	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+		pr_err("%s Received invalid fe_id %lu\n",
+			__func__, fe_id);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	cstream = pdata->cstream[fe_id];
+	if (cstream == NULL) {
+		pr_err("%s cstream is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	prtd = cstream->runtime->private_data;
+	if (!prtd) {
+		pr_err("%s: prtd is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (prtd->audio_client == NULL) {
+		pr_err("%s: audio_client is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	memcpy(&param_length, ucontrol->value.bytes.data,
+		sizeof(param_length));
+	if ((param_length + sizeof(param_length))
+		>= sizeof(ucontrol->value.bytes.data)) {
+		pr_err("%s param length=%d  exceeds limit",
+			__func__, param_length);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = q6asm_send_rtic_event_ack(prtd->audio_client,
+			ucontrol->value.bytes.data + sizeof(param_length),
+			param_length);
+	if (ret < 0)
+		pr_err("%s: failed to send rtic event ack, err = %d\n",
+			__func__, ret);
+done:
+	return ret;
+}
+
+static int msm_compr_gapless_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	struct msm_compr_pdata *pdata = (struct msm_compr_pdata *)
+		snd_soc_component_get_drvdata(comp);
+	pdata->use_dsp_gapless_mode =  ucontrol->value.integer.value[0];
+	pr_debug("%s: value: %ld\n", __func__,
+		ucontrol->value.integer.value[0]);
+
+	return 0;
+}
+
+static int msm_compr_gapless_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	struct msm_compr_pdata *pdata =
+		snd_soc_component_get_drvdata(comp);
+	pr_debug("%s:gapless mode %d\n", __func__, pdata->use_dsp_gapless_mode);
+	ucontrol->value.integer.value[0] = pdata->use_dsp_gapless_mode;
+
+	return 0;
+}
+
+static const struct snd_kcontrol_new msm_compr_gapless_controls[] = {
+	SOC_SINGLE_EXT("Compress Gapless Playback",
+			0, 0, 1, 0,
+			msm_compr_gapless_get,
+			msm_compr_gapless_put),
+};
+
+static int msm_compr_probe(struct snd_soc_platform *platform)
+{
+	struct msm_compr_pdata *pdata;
+	int i;
+	int rc;
+	const char *qdsp_version;
+
+	pr_debug("%s\n", __func__);
+	pdata = (struct msm_compr_pdata *)
+			kzalloc(sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	snd_soc_platform_set_drvdata(platform, pdata);
+
+	for (i = 0; i < MSM_FRONTEND_DAI_MAX; i++) {
+		pdata->volume[i][0] = COMPRESSED_LR_VOL_MAX_STEPS;
+		pdata->volume[i][1] = COMPRESSED_LR_VOL_MAX_STEPS;
+		pdata->audio_effects[i] = NULL;
+		pdata->dec_params[i] = NULL;
+		pdata->cstream[i] = NULL;
+		pdata->ch_map[i] = NULL;
+		pdata->is_in_use[i] = false;
+	}
+
+	snd_soc_add_platform_controls(platform, msm_compr_gapless_controls,
+				      ARRAY_SIZE(msm_compr_gapless_controls));
+
+	rc =  of_property_read_string(platform->dev->of_node,
+		"qcom,adsp-version", &qdsp_version);
+	if (!rc) {
+		if (!strcmp(qdsp_version, "MDSP 1.2"))
+			pdata->use_legacy_api = true;
+		else
+			pdata->use_legacy_api = false;
+	} else
+		pdata->use_legacy_api = false;
+
+	pr_debug("%s: use legacy api %d\n", __func__, pdata->use_legacy_api);
+	/*
+	 * use_dsp_gapless_mode part of platform data(pdata) is updated from HAL
+	 * through a mixer control before compress driver is opened. The mixer
+	 * control is used to decide if dsp gapless mode needs to be enabled.
+	 * Gapless is disabled by default.
+	 */
+	pdata->use_dsp_gapless_mode = false;
+	return 0;
+}
+
+static int msm_compr_volume_info(struct snd_kcontrol *kcontrol,
+				 struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	uinfo->count = 2;
+	uinfo->value.integer.min = 0;
+	uinfo->value.integer.max = COMPRESSED_LR_VOL_MAX_STEPS;
+	return 0;
+}
+
+static int msm_compr_audio_effects_config_info(struct snd_kcontrol *kcontrol,
+					       struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	uinfo->count = MAX_PP_PARAMS_SZ;
+	uinfo->value.integer.min = 0;
+	uinfo->value.integer.max = 0xFFFFFFFF;
+	return 0;
+}
+
+static int msm_compr_query_audio_effect_info(struct snd_kcontrol *kcontrol,
+					     struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	uinfo->count = 128;
+	uinfo->value.integer.min = 0;
+	uinfo->value.integer.max = 0xFFFFFFFF;
+	return 0;
+}
+
+static int msm_compr_dec_params_info(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	uinfo->count = 128;
+	uinfo->value.integer.min = 0;
+	uinfo->value.integer.max = 0xFFFFFFFF;
+	return 0;
+}
+
+static int msm_compr_app_type_cfg_info(struct snd_kcontrol *kcontrol,
+				       struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	uinfo->count = 5;
+	uinfo->value.integer.min = 0;
+	uinfo->value.integer.max = 0xFFFFFFFF;
+	return 0;
+}
+
+static int msm_compr_channel_map_info(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	uinfo->count = 8;
+	uinfo->value.integer.min = 0;
+	uinfo->value.integer.max = 0xFFFFFFFF;
+	return 0;
+}
+
+static int msm_compr_device_downmix_info(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	uinfo->count = 128;
+	uinfo->value.integer.min = 0;
+	uinfo->value.integer.max = 0xFFFFFFFF;
+	return 0;
+}
+
+static int msm_compr_add_volume_control(struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name = "Compress Playback";
+	const char *deviceNo       = "NN";
+	const char *suffix         = "Volume";
+	char *mixer_str = NULL;
+	int ctl_len;
+	struct snd_kcontrol_new fe_volume_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |
+			  SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_compr_volume_info,
+		.tlv.p = msm_compr_vol_gain,
+		.get = msm_compr_volume_get,
+		.put = msm_compr_volume_put,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s NULL rtd\n", __func__);
+		return 0;
+	}
+	pr_debug("%s: added new compr FE with name %s, id %d, cpu dai %s, device no %d\n",
+		 __func__, rtd->dai_link->name, rtd->dai_link->be_id,
+		 rtd->dai_link->cpu_dai_name, rtd->pcm->device);
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1 +
+		  strlen(suffix) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		pr_err("failed to allocate mixer ctrl str of len %d", ctl_len);
+		return 0;
+	}
+	snprintf(mixer_str, ctl_len, "%s %d %s", mixer_ctl_name,
+		 rtd->pcm->device, suffix);
+	fe_volume_control[0].name = mixer_str;
+	fe_volume_control[0].private_value = rtd->dai_link->be_id;
+	pr_debug("Registering new mixer ctl %s", mixer_str);
+	snd_soc_add_platform_controls(rtd->platform, fe_volume_control,
+				      ARRAY_SIZE(fe_volume_control));
+	kfree(mixer_str);
+	return 0;
+}
+
+static int msm_compr_add_audio_effects_control(struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name = "Audio Effects Config";
+	const char *deviceNo       = "NN";
+	char *mixer_str = NULL;
+	int ctl_len;
+	struct snd_kcontrol_new fe_audio_effects_config_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_compr_audio_effects_config_info,
+		.get = msm_compr_audio_effects_config_get,
+		.put = msm_compr_audio_effects_config_put,
+		.private_value = 0,
+		}
+	};
+
+
+	if (!rtd) {
+		pr_err("%s NULL rtd\n", __func__);
+		return 0;
+	}
+
+	pr_debug("%s: added new compr FE with name %s, id %d, cpu dai %s, device no %d\n",
+		 __func__, rtd->dai_link->name, rtd->dai_link->be_id,
+		 rtd->dai_link->cpu_dai_name, rtd->pcm->device);
+
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+
+	if (!mixer_str) {
+		pr_err("failed to allocate mixer ctrl str of len %d", ctl_len);
+		return 0;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+
+	fe_audio_effects_config_control[0].name = mixer_str;
+	fe_audio_effects_config_control[0].private_value = rtd->dai_link->be_id;
+	pr_debug("Registering new mixer ctl %s\n", mixer_str);
+	snd_soc_add_platform_controls(rtd->platform,
+				fe_audio_effects_config_control,
+				ARRAY_SIZE(fe_audio_effects_config_control));
+	kfree(mixer_str);
+	return 0;
+}
+
+static int msm_compr_add_query_audio_effect_control(
+					struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name = "Query Audio Effect Param";
+	const char *deviceNo       = "NN";
+	char *mixer_str = NULL;
+	int ctl_len;
+	struct snd_kcontrol_new fe_query_audio_effect_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_compr_query_audio_effect_info,
+		.get = msm_compr_query_audio_effect_get,
+		.put = msm_compr_query_audio_effect_put,
+		.private_value = 0,
+		}
+	};
+	if (!rtd) {
+		pr_err("%s NULL rtd\n", __func__);
+		return 0;
+	}
+	pr_debug("%s: added new compr FE with name %s, id %d, cpu dai %s, device no %d\n",
+		 __func__, rtd->dai_link->name, rtd->dai_link->be_id,
+		 rtd->dai_link->cpu_dai_name, rtd->pcm->device);
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		pr_err("failed to allocate mixer ctrl str of len %d", ctl_len);
+		return 0;
+	}
+	snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+	fe_query_audio_effect_control[0].name = mixer_str;
+	fe_query_audio_effect_control[0].private_value = rtd->dai_link->be_id;
+	pr_debug("%s: registering new mixer ctl %s\n", __func__, mixer_str);
+	snd_soc_add_platform_controls(rtd->platform,
+				fe_query_audio_effect_control,
+				ARRAY_SIZE(fe_query_audio_effect_control));
+	kfree(mixer_str);
+	return 0;
+}
+
+static int msm_compr_add_audio_adsp_stream_cmd_control(
+			struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name = DSP_STREAM_CMD;
+	const char *deviceNo = "NN";
+	char *mixer_str = NULL;
+	int ctl_len = 0, ret = 0;
+	struct snd_kcontrol_new fe_audio_adsp_stream_cmd_config_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_adsp_stream_cmd_info,
+		.put = msm_compr_adsp_stream_cmd_put,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s NULL rtd\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+	fe_audio_adsp_stream_cmd_config_control[0].name = mixer_str;
+	fe_audio_adsp_stream_cmd_config_control[0].private_value =
+				rtd->dai_link->be_id;
+	pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+	ret = snd_soc_add_platform_controls(rtd->platform,
+		fe_audio_adsp_stream_cmd_config_control,
+		ARRAY_SIZE(fe_audio_adsp_stream_cmd_config_control));
+	if (ret < 0)
+		pr_err("%s: failed to add ctl %s. err = %d\n",
+			__func__, mixer_str, ret);
+
+	kfree(mixer_str);
+done:
+	return ret;
+}
+
+static int msm_compr_add_audio_adsp_stream_callback_control(
+			struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name = DSP_STREAM_CALLBACK;
+	const char *deviceNo = "NN";
+	char *mixer_str = NULL;
+	int ctl_len = 0, ret = 0;
+	struct snd_kcontrol *kctl;
+
+	struct snd_kcontrol_new fe_audio_adsp_callback_config_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_adsp_stream_callback_info,
+		.get = msm_adsp_stream_callback_get,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s: rtd is  NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+	fe_audio_adsp_callback_config_control[0].name = mixer_str;
+	fe_audio_adsp_callback_config_control[0].private_value =
+					rtd->dai_link->be_id;
+	pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+	ret = snd_soc_add_platform_controls(rtd->platform,
+			fe_audio_adsp_callback_config_control,
+			ARRAY_SIZE(fe_audio_adsp_callback_config_control));
+	if (ret < 0) {
+		pr_err("%s: failed to add ctl %s. err = %d\n",
+			__func__, mixer_str, ret);
+		ret = -EINVAL;
+		goto free_mixer_str;
+	}
+
+	kctl = snd_soc_card_get_kcontrol(rtd->card, mixer_str);
+	if (!kctl) {
+		pr_err("%s: failed to get kctl %s.\n", __func__, mixer_str);
+		ret = -EINVAL;
+		goto free_mixer_str;
+	}
+
+	kctl->private_data = NULL;
+
+free_mixer_str:
+	kfree(mixer_str);
+done:
+	return ret;
+}
+
+static int msm_compr_add_dec_runtime_params_control(
+						struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name	= "Audio Stream";
+	const char *deviceNo		= "NN";
+	const char *suffix		= "Dec Params";
+	char *mixer_str = NULL;
+	int ctl_len;
+	struct snd_kcontrol_new fe_dec_params_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_compr_dec_params_info,
+		.get = msm_compr_dec_params_get,
+		.put = msm_compr_dec_params_put,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s NULL rtd\n", __func__);
+		return 0;
+	}
+
+	pr_debug("%s: added new compr FE with name %s, id %d, cpu dai %s, device no %d\n",
+		 __func__, rtd->dai_link->name, rtd->dai_link->be_id,
+		 rtd->dai_link->cpu_dai_name, rtd->pcm->device);
+
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1 +
+		  strlen(suffix) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+
+	if (!mixer_str) {
+		pr_err("failed to allocate mixer ctrl str of len %d", ctl_len);
+		return 0;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d %s", mixer_ctl_name,
+		 rtd->pcm->device, suffix);
+
+	fe_dec_params_control[0].name = mixer_str;
+	fe_dec_params_control[0].private_value = rtd->dai_link->be_id;
+	pr_debug("Registering new mixer ctl %s", mixer_str);
+	snd_soc_add_platform_controls(rtd->platform,
+				      fe_dec_params_control,
+				      ARRAY_SIZE(fe_dec_params_control));
+	kfree(mixer_str);
+	return 0;
+}
+
+static int msm_compr_add_device_down_mix_controls(
+					struct snd_soc_pcm_runtime *rtd)
+{
+	const char *playback_mixer_ctl_name = "Audio Device";
+	const char *deviceNo = "NN";
+	const char *suffix = "Downmix Control";
+	char *mixer_str = NULL;
+	int ctl_len = 0, ret = 0;
+	struct snd_kcontrol_new device_downmix_control[1] = {
+		{
+			.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+			.name = "?",
+			.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+			.info = msm_compr_device_downmix_info,
+			.put = msm_compr_playback_dnmix_ctl_put,
+			.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s NULL rtd\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+	ctl_len = strlen(playback_mixer_ctl_name) + 1 +
+			strlen(deviceNo) + 1 + strlen(suffix) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d %s",
+		playback_mixer_ctl_name, rtd->pcm->device, suffix);
+	device_downmix_control[0].name = mixer_str;
+	device_downmix_control[0].private_value = rtd->dai_link->be_id;
+	ret = snd_soc_add_platform_controls(rtd->platform,
+					device_downmix_control,
+					ARRAY_SIZE(device_downmix_control));
+	if (ret < 0)
+		pr_err("%s: failed to add ctl %s\n", __func__, mixer_str);
+
+	kfree(mixer_str);
+done:
+	return ret;
+}
+
+static int msm_compr_add_app_type_cfg_control(struct snd_soc_pcm_runtime *rtd)
+{
+	const char *playback_mixer_ctl_name	= "Audio Stream";
+	const char *capture_mixer_ctl_name	= "Audio Stream Capture";
+	const char *deviceNo		= "NN";
+	const char *suffix		= "App Type Cfg";
+	char *mixer_str = NULL;
+	int ctl_len;
+	struct snd_kcontrol_new fe_app_type_cfg_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_compr_app_type_cfg_info,
+		.put = msm_compr_playback_app_type_cfg_put,
+		.get = msm_compr_playback_app_type_cfg_get,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s NULL rtd\n", __func__);
+		return 0;
+	}
+
+	pr_debug("%s: added new compr FE ctl with name %s, id %d, cpu dai %s, device no %d\n",
+		__func__, rtd->dai_link->name, rtd->dai_link->be_id,
+			rtd->dai_link->cpu_dai_name, rtd->pcm->device);
+	if (rtd->compr->direction == SND_COMPRESS_PLAYBACK)
+		ctl_len = strlen(playback_mixer_ctl_name) + 1 + strlen(deviceNo)
+			 + 1 + strlen(suffix) + 1;
+	else
+		ctl_len = strlen(capture_mixer_ctl_name) + 1 + strlen(deviceNo)
+			+ 1 + strlen(suffix) + 1;
+
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+
+	if (!mixer_str) {
+		pr_err("failed to allocate mixer ctrl str of len %d", ctl_len);
+		return 0;
+	}
+
+	if (rtd->compr->direction == SND_COMPRESS_PLAYBACK)
+		snprintf(mixer_str, ctl_len, "%s %d %s",
+			 playback_mixer_ctl_name, rtd->pcm->device, suffix);
+	else
+		snprintf(mixer_str, ctl_len, "%s %d %s",
+			 capture_mixer_ctl_name, rtd->pcm->device, suffix);
+
+	fe_app_type_cfg_control[0].name = mixer_str;
+	fe_app_type_cfg_control[0].private_value = rtd->dai_link->be_id;
+
+	if (rtd->compr->direction == SND_COMPRESS_PLAYBACK) {
+		fe_app_type_cfg_control[0].put =
+					 msm_compr_playback_app_type_cfg_put;
+		fe_app_type_cfg_control[0].get =
+					 msm_compr_playback_app_type_cfg_get;
+	} else {
+		fe_app_type_cfg_control[0].put =
+					 msm_compr_capture_app_type_cfg_put;
+		fe_app_type_cfg_control[0].get =
+					 msm_compr_capture_app_type_cfg_get;
+	}
+	pr_debug("Registering new mixer ctl %s", mixer_str);
+	snd_soc_add_platform_controls(rtd->platform,
+				fe_app_type_cfg_control,
+				ARRAY_SIZE(fe_app_type_cfg_control));
+	kfree(mixer_str);
+	return 0;
+}
+
+static int msm_compr_add_channel_map_control(struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name = "Playback Channel Map";
+	const char *deviceNo       = "NN";
+	char *mixer_str = NULL;
+	struct msm_compr_pdata *pdata = NULL;
+	int ctl_len;
+	struct snd_kcontrol_new fe_channel_map_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_compr_channel_map_info,
+		.get = msm_compr_channel_map_get,
+		.put = msm_compr_channel_map_put,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s: NULL rtd\n", __func__);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: added new compr FE with name %s, id %d, cpu dai %s, device no %d\n",
+		 __func__, rtd->dai_link->name, rtd->dai_link->be_id,
+		 rtd->dai_link->cpu_dai_name, rtd->pcm->device);
+
+	ctl_len = strlen(mixer_ctl_name) + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+
+	if (!mixer_str) {
+		pr_err("%s: failed to allocate mixer ctrl str of len %d\n",
+			__func__, ctl_len);
+		return -ENOMEM;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s%d", mixer_ctl_name, rtd->pcm->device);
+
+	fe_channel_map_control[0].name = mixer_str;
+	fe_channel_map_control[0].private_value = rtd->dai_link->be_id;
+	pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+	snd_soc_add_platform_controls(rtd->platform,
+				fe_channel_map_control,
+				ARRAY_SIZE(fe_channel_map_control));
+
+	pdata = snd_soc_platform_get_drvdata(rtd->platform);
+	pdata->ch_map[rtd->dai_link->be_id] =
+		 kzalloc(sizeof(struct msm_compr_ch_map), GFP_KERNEL);
+	if (!pdata->ch_map[rtd->dai_link->be_id]) {
+		pr_err("%s: Could not allocate memory for channel map\n",
+			__func__);
+		kfree(mixer_str);
+		return -ENOMEM;
+	}
+	kfree(mixer_str);
+	return 0;
+}
+
+static int msm_compr_add_shm_ion_fd_cmd_control(struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name = "Playback ION FD";
+	const char *deviceNo = "NN";
+	char *mixer_str = NULL;
+	int ctl_len = 0, ret = 0;
+	struct snd_kcontrol_new fe_ion_fd_config_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_adsp_stream_cmd_info,
+		.put = msm_compr_shm_ion_fd_map_put,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s NULL rtd\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+	fe_ion_fd_config_control[0].name = mixer_str;
+	fe_ion_fd_config_control[0].private_value = rtd->dai_link->be_id;
+	pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+	ret = snd_soc_add_platform_controls(rtd->platform,
+				fe_ion_fd_config_control,
+				ARRAY_SIZE(fe_ion_fd_config_control));
+	if (ret < 0)
+		pr_err("%s: failed to add ctl %s\n", __func__, mixer_str);
+
+	kfree(mixer_str);
+done:
+	return ret;
+}
+
+static int msm_compr_add_lib_ion_fd_cmd_control(struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name = "Playback ION LIB FD";
+	const char *deviceNo = "NN";
+	char *mixer_str = NULL;
+	int ctl_len = 0, ret = 0;
+	struct snd_kcontrol_new fe_ion_fd_config_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_adsp_stream_cmd_info,
+		.put = msm_compr_lib_ion_fd_map_put,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s NULL rtd\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+	fe_ion_fd_config_control[0].name = mixer_str;
+	fe_ion_fd_config_control[0].private_value = rtd->dai_link->be_id;
+	pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+	ret = snd_soc_add_platform_controls(rtd->platform,
+				fe_ion_fd_config_control,
+				ARRAY_SIZE(fe_ion_fd_config_control));
+	if (ret < 0)
+		pr_err("%s: failed to add ctl %s\n", __func__, mixer_str);
+
+	kfree(mixer_str);
+done:
+	return ret;
+}
+
+static int msm_compr_add_event_ack_cmd_control(struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name = "Playback Event Ack";
+	const char *deviceNo = "NN";
+	char *mixer_str = NULL;
+	int ctl_len = 0, ret = 0;
+	struct snd_kcontrol_new fe_event_ack_config_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_adsp_stream_cmd_info,
+		.put = msm_compr_rtic_event_ack_put,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s NULL rtd\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+	fe_event_ack_config_control[0].name = mixer_str;
+	fe_event_ack_config_control[0].private_value = rtd->dai_link->be_id;
+	pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+	ret = snd_soc_add_platform_controls(rtd->platform,
+				fe_event_ack_config_control,
+				ARRAY_SIZE(fe_event_ack_config_control));
+	if (ret < 0)
+		pr_err("%s: failed to add ctl %s\n", __func__, mixer_str);
+
+	kfree(mixer_str);
+done:
+	return ret;
+}
+
+static int msm_compr_new(struct snd_soc_pcm_runtime *rtd)
+{
+	int rc;
+
+	rc = msm_compr_add_volume_control(rtd);
+	if (rc)
+		pr_err("%s: Could not add Compr Volume Control\n", __func__);
+
+	rc = msm_compr_add_audio_effects_control(rtd);
+	if (rc)
+		pr_err("%s: Could not add Compr Audio Effects Control\n",
+			__func__);
+
+	rc = msm_compr_add_audio_adsp_stream_cmd_control(rtd);
+	if (rc)
+		pr_err("%s: Could not add Compr ADSP Stream Cmd Control\n",
+			__func__);
+
+	rc = msm_compr_add_audio_adsp_stream_callback_control(rtd);
+	if (rc)
+		pr_err("%s: Could not add Compr ADSP Stream Callback Control\n",
+			__func__);
+
+	rc = msm_compr_add_shm_ion_fd_cmd_control(rtd);
+	if (rc)
+		pr_err("%s: Could not add Compr ion fd Control\n",
+			__func__);
+
+	rc = msm_compr_add_lib_ion_fd_cmd_control(rtd);
+	if (rc)
+		pr_err("%s: Could not add Compr ion lib fd Control\n",
+			__func__);
+
+	rc = msm_compr_add_event_ack_cmd_control(rtd);
+	if (rc)
+		pr_err("%s: Could not add Compr event ack Control\n",
+			__func__);
+
+	rc = msm_compr_add_device_down_mix_controls(rtd);
+	if (rc)
+		pr_err("%s: Could not add Compr downmix Control\n",
+			__func__);
+
+	rc = msm_compr_add_query_audio_effect_control(rtd);
+	if (rc)
+		pr_err("%s: Could not add Compr Query Audio Effect Control\n",
+			__func__);
+
+	rc = msm_compr_add_dec_runtime_params_control(rtd);
+	if (rc)
+		pr_err("%s: Could not add Compr Dec runtime params Control\n",
+			__func__);
+	rc = msm_compr_add_app_type_cfg_control(rtd);
+	if (rc)
+		pr_err("%s: Could not add Compr App Type Cfg Control\n",
+			__func__);
+	rc = msm_compr_add_channel_map_control(rtd);
+	if (rc)
+		pr_err("%s: Could not add Compr Channel Map Control\n",
+			__func__);
+	return 0;
+}
+
+static struct snd_compr_ops msm_compr_ops = {
+	.open			= msm_compr_open,
+	.free			= msm_compr_free,
+	.trigger		= msm_compr_trigger,
+	.pointer		= msm_compr_pointer,
+	.set_params		= msm_compr_set_params,
+	.set_metadata		= msm_compr_set_metadata,
+	.get_metadata		= msm_compr_get_metadata,
+	.set_next_track_param	= msm_compr_set_next_track_param,
+	.ack			= msm_compr_ack,
+	.copy			= msm_compr_copy,
+	.get_caps		= msm_compr_get_caps,
+	.get_codec_caps		= msm_compr_get_codec_caps,
+};
+
+static struct snd_soc_platform_driver msm_soc_platform = {
+	.probe		= msm_compr_probe,
+	.compr_ops	= &msm_compr_ops,
+	.pcm_new	= msm_compr_new,
+};
+
+static int msm_compr_dev_probe(struct platform_device *pdev)
+{
+
+	pr_debug("%s: dev name %s\n", __func__, dev_name(&pdev->dev));
+	return snd_soc_register_platform(&pdev->dev,
+					&msm_soc_platform);
+}
+
+static int msm_compr_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_platform(&pdev->dev);
+	return 0;
+}
+
+static const struct of_device_id msm_compr_dt_match[] = {
+	{.compatible = "qcom,msm-compress-dsp"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, msm_compr_dt_match);
+
+static struct platform_driver msm_compr_driver = {
+	.driver = {
+		.name = "msm-compress-dsp",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_compr_dt_match,
+	},
+	.probe = msm_compr_dev_probe,
+	.remove = msm_compr_remove,
+};
+
+static int __init msm_soc_platform_init(void)
+{
+	return platform_driver_register(&msm_compr_driver);
+}
+module_init(msm_soc_platform_init);
+
+static void __exit msm_soc_platform_exit(void)
+{
+	platform_driver_unregister(&msm_compr_driver);
+}
+module_exit(msm_soc_platform_exit);
+
+MODULE_DESCRIPTION("Compress Offload platform driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-dai-q6-hdmi-v2.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-dai-q6-hdmi-v2.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-dai-q6-hdmi-v2.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-dai-q6-hdmi-v2.c	2019-01-22 16:16:29.623301827 +0100
@@ -0,0 +1,554 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/apr_audio-v2.h>
+#include <sound/q6afe-v2.h>
+#include <sound/msm-dai-q6-v2.h>
+#include <sound/pcm_params.h>
+
+#define HDMI_RX_CA_MAX 0x32
+
+enum {
+	STATUS_PORT_STARTED, /* track if AFE port has started */
+	STATUS_MAX
+};
+
+struct msm_ext_disp_ca {
+	bool set_ca;
+	u32 ca;
+};
+
+struct msm_dai_q6_hdmi_dai_data {
+	DECLARE_BITMAP(status_mask, STATUS_MAX);
+	u32 rate;
+	u32 channels;
+	struct msm_ext_disp_ca ca;
+	union afe_port_config port_config;
+};
+
+static int msm_dai_q6_ext_disp_format_put(struct snd_kcontrol *kcontrol,
+					  struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_hdmi_dai_data *dai_data = kcontrol->private_data;
+	int value = ucontrol->value.integer.value[0];
+
+	if (!dai_data) {
+		pr_err("%s: dai_data is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	dai_data->port_config.hdmi_multi_ch.datatype = value;
+	pr_debug("%s: value = %d\n", __func__, value);
+
+	return 0;
+}
+
+static int msm_dai_q6_ext_disp_format_get(struct snd_kcontrol *kcontrol,
+					  struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_hdmi_dai_data *dai_data = kcontrol->private_data;
+
+	if (!dai_data) {
+		pr_err("%s: dai_data is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	ucontrol->value.integer.value[0] =
+		dai_data->port_config.hdmi_multi_ch.datatype;
+	pr_debug("%s: value = %ld\n",
+		 __func__, ucontrol->value.integer.value[0]);
+
+	return 0;
+}
+
+static int msm_dai_q6_ext_disp_ca_put(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_hdmi_dai_data *dai_data = kcontrol->private_data;
+
+	if (!dai_data) {
+		pr_err("%s: dai_data is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	dai_data->ca.ca = ucontrol->value.integer.value[0];
+	dai_data->ca.set_ca = true;
+	pr_debug("%s: ca = %d\n", __func__, dai_data->ca.ca);
+	return 0;
+}
+
+static int msm_dai_q6_ext_disp_ca_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_hdmi_dai_data *dai_data = kcontrol->private_data;
+
+	if (!dai_data) {
+		pr_err("%s: dai_data is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	ucontrol->value.integer.value[0] = dai_data->ca.ca;
+	pr_debug("%s: ca = %d\n", __func__, dai_data->ca.ca);
+	return 0;
+}
+
+/* HDMI format field for AFE_PORT_MULTI_CHAN_HDMI_AUDIO_IF_CONFIG command
+ *  0: linear PCM
+ *  1: non-linear PCM
+ */
+static const char * const hdmi_format[] = {
+	"LPCM",
+	"Compr"
+};
+
+static const struct soc_enum hdmi_config_enum[] = {
+	SOC_ENUM_SINGLE_EXT(2, hdmi_format),
+};
+
+static int msm_dai_q6_ext_disp_drift_info(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+	uinfo->count = sizeof(struct afe_param_id_dev_timing_stats);
+
+	return 0;
+}
+
+static int msm_dai_q6_ext_disp_drift_get(struct snd_kcontrol *kcontrol,
+		struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = -EINVAL;
+	struct afe_param_id_dev_timing_stats timing_stats;
+	struct snd_soc_dai *dai = kcontrol->private_data;
+	struct msm_dai_q6_hdmi_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+	if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+		pr_debug("%s:  afe port not started. status_mask = %ld\n",
+			 __func__, *dai_data->status_mask);
+		goto done;
+	}
+
+	memset(&timing_stats, 0, sizeof(struct afe_param_id_dev_timing_stats));
+	ret = afe_get_av_dev_drift(&timing_stats, dai->id);
+	if (ret) {
+		pr_debug("%s: Error getting AFE Drift for port %d, err=%d\n",
+			 __func__, dai->id, ret);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	memcpy(ucontrol->value.bytes.data, (void *)&timing_stats,
+	       sizeof(struct afe_param_id_dev_timing_stats));
+done:
+	return ret;
+}
+
+static const struct snd_kcontrol_new hdmi_config_controls[] = {
+	SOC_ENUM_EXT("HDMI RX Format", hdmi_config_enum[0],
+				 msm_dai_q6_ext_disp_format_get,
+				 msm_dai_q6_ext_disp_format_put),
+	SOC_SINGLE_MULTI_EXT("HDMI RX CA", SND_SOC_NOPM, 0,
+				 HDMI_RX_CA_MAX, 0, 1,
+				 msm_dai_q6_ext_disp_ca_get,
+				 msm_dai_q6_ext_disp_ca_put),
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READ,
+		.iface	= SNDRV_CTL_ELEM_IFACE_PCM,
+		.name	= "HDMI DRIFT",
+		.info	= msm_dai_q6_ext_disp_drift_info,
+		.get	= msm_dai_q6_ext_disp_drift_get,
+	},
+};
+
+static const struct snd_kcontrol_new display_port_config_controls[] = {
+	SOC_ENUM_EXT("Display Port RX Format", hdmi_config_enum[0],
+				 msm_dai_q6_ext_disp_format_get,
+				 msm_dai_q6_ext_disp_format_put),
+	SOC_SINGLE_MULTI_EXT("Display Port RX CA", SND_SOC_NOPM, 0,
+				 HDMI_RX_CA_MAX, 0, 1,
+				 msm_dai_q6_ext_disp_ca_get,
+				 msm_dai_q6_ext_disp_ca_put),
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READ,
+		.iface	= SNDRV_CTL_ELEM_IFACE_PCM,
+		.name	= "DISPLAY_PORT DRIFT",
+		.info	= msm_dai_q6_ext_disp_drift_info,
+		.get	= msm_dai_q6_ext_disp_drift_get,
+	},
+};
+
+/* Current implementation assumes hw_param is called once
+ * This may not be the case but what to do when ADM and AFE
+ * port are already opened and parameter changes
+ */
+static int msm_dai_q6_hdmi_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_hdmi_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+	dai_data->channels = params_channels(params);
+	dai_data->rate = params_rate(params);
+	dai_data->port_config.hdmi_multi_ch.reserved = 0;
+	dai_data->port_config.hdmi_multi_ch.hdmi_cfg_minor_version = 1;
+	dai_data->port_config.hdmi_multi_ch.sample_rate = dai_data->rate;
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		dai_data->port_config.hdmi_multi_ch.bit_width = 16;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		dai_data->port_config.hdmi_multi_ch.bit_width = 24;
+		break;
+	}
+
+	/*refer to HDMI spec CEA-861-E: Table 28 Audio InfoFrame Data Byte 4*/
+	switch (dai_data->channels) {
+	case 2:
+		dai_data->port_config.hdmi_multi_ch.channel_allocation = 0;
+		break;
+	case 3:
+		dai_data->port_config.hdmi_multi_ch.channel_allocation = 0x02;
+		break;
+	case 4:
+		dai_data->port_config.hdmi_multi_ch.channel_allocation = 0x06;
+		break;
+	case 5:
+		dai_data->port_config.hdmi_multi_ch.channel_allocation = 0x0A;
+		break;
+	case 6:
+		dai_data->port_config.hdmi_multi_ch.channel_allocation = 0x0B;
+		break;
+	case 7:
+		dai_data->port_config.hdmi_multi_ch.channel_allocation = 0x12;
+		break;
+	case 8:
+		dai_data->port_config.hdmi_multi_ch.channel_allocation = 0x13;
+		break;
+	default:
+		dev_err(dai->dev, "invalid Channels = %u\n",
+				dai_data->channels);
+		return -EINVAL;
+	}
+	dev_dbg(dai->dev, "%s() minor version: %u samplerate: %u bitwidth: %u\n"
+		"num_ch = %u channel_allocation = %u datatype = %d\n", __func__,
+		dai_data->port_config.hdmi_multi_ch.hdmi_cfg_minor_version,
+		dai_data->port_config.hdmi_multi_ch.sample_rate,
+		dai_data->port_config.hdmi_multi_ch.bit_width,
+		dai_data->channels,
+		dai_data->port_config.hdmi_multi_ch.channel_allocation,
+		dai_data->port_config.hdmi_multi_ch.datatype);
+
+	return 0;
+}
+
+
+static void msm_dai_q6_hdmi_shutdown(struct snd_pcm_substream *substream,
+				struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_hdmi_dai_data *dai_data = dev_get_drvdata(dai->dev);
+	int rc = 0;
+
+	if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+		pr_info("%s:  afe port not started. dai_data->status_mask = %ld\n",
+		 __func__, *dai_data->status_mask);
+		return;
+	}
+
+	rc = afe_close(dai->id); /* can block */
+
+	if (IS_ERR_VALUE(rc))
+		dev_err(dai->dev, "fail to close AFE port\n");
+
+	pr_debug("%s: dai_data->status_mask = %ld\n", __func__,
+			*dai_data->status_mask);
+
+	clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
+	memset(&dai_data->ca, 0, sizeof(dai_data->ca));
+}
+
+
+static int msm_dai_q6_hdmi_prepare(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_hdmi_dai_data *dai_data = dev_get_drvdata(dai->dev);
+	int rc = 0;
+
+	if (dai_data->ca.set_ca)
+		dai_data->port_config.hdmi_multi_ch.channel_allocation =
+							      dai_data->ca.ca;
+
+	if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+		rc = afe_port_start(dai->id, &dai_data->port_config,
+				    dai_data->rate);
+		if (IS_ERR_VALUE(rc))
+			dev_err(dai->dev, "fail to open AFE port %x\n",
+				dai->id);
+		else
+			set_bit(STATUS_PORT_STARTED,
+				dai_data->status_mask);
+	}
+
+	return rc;
+}
+
+static inline void msm_dai_q6_hdmi_set_dai_id(struct snd_soc_dai *dai)
+{
+	if (!dai->driver->id) {
+		dev_warn(dai->dev, "DAI driver id is not set\n");
+		return;
+	}
+	dai->id = dai->driver->id;
+	return;
+}
+
+static int msm_dai_q6_hdmi_dai_probe(struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_hdmi_dai_data *dai_data;
+	const struct snd_kcontrol_new *kcontrol;
+	int rc = 0;
+	struct snd_soc_dapm_route intercon;
+	struct snd_soc_dapm_context *dapm;
+
+	if (!dai || !dai->driver) {
+		pr_err("%s: dai or dai->driver is NULL\n", __func__);
+		return -EINVAL;
+	}
+	dai_data = kzalloc(sizeof(struct msm_dai_q6_hdmi_dai_data),
+		GFP_KERNEL);
+
+	if (!dai_data) {
+		dev_err(dai->dev, "DAI-%d: fail to allocate dai data\n",
+		dai->id);
+		rc = -ENOMEM;
+	} else
+		dev_set_drvdata(dai->dev, dai_data);
+
+	msm_dai_q6_hdmi_set_dai_id(dai);
+
+	if (dai->driver->id == HDMI_RX) {
+		kcontrol = &hdmi_config_controls[0];
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				 snd_ctl_new1(kcontrol, dai_data));
+
+		kcontrol = &hdmi_config_controls[1];
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				 snd_ctl_new1(kcontrol, dai_data));
+
+		kcontrol = &hdmi_config_controls[2];
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				 snd_ctl_new1(kcontrol, dai));
+	} else if (dai->driver->id == DISPLAY_PORT_RX) {
+		kcontrol = &display_port_config_controls[0];
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				 snd_ctl_new1(kcontrol, dai_data));
+
+		kcontrol = &display_port_config_controls[1];
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				 snd_ctl_new1(kcontrol, dai_data));
+
+		kcontrol = &display_port_config_controls[2];
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				snd_ctl_new1(kcontrol, dai));
+	} else {
+		dev_err(dai->dev, "%s: Invalid id:%d\n",
+			__func__, dai->driver->id);
+		kfree(dai_data);
+		dev_set_drvdata(dai->dev, NULL);
+		return -EINVAL;
+	}
+
+	dapm = snd_soc_component_get_dapm(dai->component);
+	memset(&intercon, 0 , sizeof(intercon));
+	if (!rc) {
+		if (dai->driver->playback.stream_name &&
+			dai->driver->playback.aif_name) {
+			dev_dbg(dai->dev, "%s add route for widget %s",
+				   __func__, dai->driver->playback.stream_name);
+			intercon.source = dai->driver->playback.aif_name;
+			intercon.sink = dai->driver->playback.stream_name;
+			dev_dbg(dai->dev, "%s src %s sink %s\n",
+				   __func__, intercon.source, intercon.sink);
+			snd_soc_dapm_add_routes(dapm, &intercon, 1);
+		}
+		if (dai->driver->capture.stream_name &&
+		   dai->driver->capture.aif_name) {
+			dev_dbg(dai->dev, "%s add route for widget %s",
+				   __func__, dai->driver->capture.stream_name);
+			intercon.sink = dai->driver->capture.aif_name;
+			intercon.source = dai->driver->capture.stream_name;
+			dev_dbg(dai->dev, "%s src %s sink %s\n",
+				   __func__, intercon.source, intercon.sink);
+			snd_soc_dapm_add_routes(dapm, &intercon, 1);
+		}
+	}
+	return rc;
+}
+
+static int msm_dai_q6_hdmi_dai_remove(struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_hdmi_dai_data *dai_data;
+	int rc;
+
+	dai_data = dev_get_drvdata(dai->dev);
+
+	/* If AFE port is still up, close it */
+	if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+		rc = afe_close(dai->id); /* can block */
+
+		if (IS_ERR_VALUE(rc))
+			dev_err(dai->dev, "fail to close AFE port\n");
+
+		clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
+	}
+	kfree(dai_data);
+
+	return 0;
+}
+
+static struct snd_soc_dai_ops msm_dai_q6_hdmi_ops = {
+	.prepare	= msm_dai_q6_hdmi_prepare,
+	.hw_params	= msm_dai_q6_hdmi_hw_params,
+	.shutdown	= msm_dai_q6_hdmi_shutdown,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_hdmi_hdmi_rx_dai = {
+	.playback = {
+		.stream_name = "HDMI Playback",
+		.aif_name = "HDMI",
+		.rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+			 SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |
+			 SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |
+			 SNDRV_PCM_RATE_192000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+		.channels_min = 2,
+		.channels_max = 8,
+		.rate_max = 192000,
+		.rate_min = 48000,
+	},
+	.ops = &msm_dai_q6_hdmi_ops,
+	.id = HDMI_RX,
+	.probe = msm_dai_q6_hdmi_dai_probe,
+	.remove = msm_dai_q6_hdmi_dai_remove,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_display_port_rx_dai[] = {
+	{
+		.playback = {
+			.stream_name = "Display Port Playback",
+			.aif_name = "DISPLAY_PORT",
+			.rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+				 SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |
+				 SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |
+				 SNDRV_PCM_RATE_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE,
+			.channels_min = 2,
+			.channels_max = 8,
+			.rate_max =     192000,
+			.rate_min =     48000,
+		},
+		.ops = &msm_dai_q6_hdmi_ops,
+		.id = DISPLAY_PORT_RX,
+		.probe = msm_dai_q6_hdmi_dai_probe,
+		.remove = msm_dai_q6_hdmi_dai_remove,
+	},
+};
+
+static const struct snd_soc_component_driver msm_dai_hdmi_q6_component = {
+	.name		= "msm-dai-q6-hdmi",
+};
+
+/* To do: change to register DAIs as batch */
+static int msm_dai_q6_hdmi_dev_probe(struct platform_device *pdev)
+{
+	int rc, id;
+	const char *q6_dev_id = "qcom,msm-dai-q6-dev-id";
+
+	rc = of_property_read_u32(pdev->dev.of_node, q6_dev_id, &id);
+	if (rc) {
+		dev_err(&pdev->dev,
+			"%s: missing %s in dt node\n", __func__, q6_dev_id);
+		return rc;
+	}
+
+	pdev->id = id;
+
+	pr_debug("%s: dev name %s, id:%d\n", __func__,
+			dev_name(&pdev->dev), pdev->id);
+
+	switch (pdev->id) {
+	case HDMI_RX:
+		rc = snd_soc_register_component(&pdev->dev,
+			&msm_dai_hdmi_q6_component,
+			&msm_dai_q6_hdmi_hdmi_rx_dai, 1);
+		break;
+	case DISPLAY_PORT_RX:
+		rc = snd_soc_register_component(&pdev->dev,
+			&msm_dai_hdmi_q6_component,
+			&msm_dai_q6_display_port_rx_dai[0],
+			ARRAY_SIZE(msm_dai_q6_display_port_rx_dai));
+		break;
+	default:
+		dev_err(&pdev->dev, "invalid device ID %d\n", pdev->id);
+		rc = -ENODEV;
+		break;
+	}
+	return rc;
+}
+
+static int msm_dai_q6_hdmi_dev_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_component(&pdev->dev);
+	return 0;
+}
+
+static const struct of_device_id msm_dai_q6_hdmi_dt_match[] = {
+	{.compatible = "qcom,msm-dai-q6-hdmi"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, msm_dai_q6_hdmi_dt_match);
+
+static struct platform_driver msm_dai_q6_hdmi_driver = {
+	.probe  = msm_dai_q6_hdmi_dev_probe,
+	.remove = msm_dai_q6_hdmi_dev_remove,
+	.driver = {
+		.name = "msm-dai-q6-hdmi",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_dai_q6_hdmi_dt_match,
+	},
+};
+
+static int __init msm_dai_q6_hdmi_init(void)
+{
+	return platform_driver_register(&msm_dai_q6_hdmi_driver);
+}
+module_init(msm_dai_q6_hdmi_init);
+
+static void __exit msm_dai_q6_hdmi_exit(void)
+{
+	platform_driver_unregister(&msm_dai_q6_hdmi_driver);
+}
+module_exit(msm_dai_q6_hdmi_exit);
+
+/* Module information */
+MODULE_DESCRIPTION("MSM DSP HDMI DAI driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-dai-q6-v2.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-dai-q6-v2.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c	2019-10-29 09:26:26.149227702 +0100
@@ -0,0 +1,8332 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/wcd9xxx/core.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/of_device.h>
+#include <linux/clk/msm-clk.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/apr_audio-v2.h>
+#include <sound/q6afe-v2.h>
+#include <sound/msm-dai-q6-v2.h>
+#include <sound/pcm_params.h>
+
+#define MSM_DAI_PRI_AUXPCM_DT_DEV_ID 1
+#define MSM_DAI_SEC_AUXPCM_DT_DEV_ID 2
+#define MSM_DAI_TERT_AUXPCM_DT_DEV_ID 3
+#define MSM_DAI_QUAT_AUXPCM_DT_DEV_ID 4
+
+
+#define spdif_clock_value(rate) (2*rate*32*2)
+#define CHANNEL_STATUS_SIZE 24
+#define CHANNEL_STATUS_MASK_INIT 0x0
+#define CHANNEL_STATUS_MASK 0x4
+#define AFE_API_VERSION_CLOCK_SET 1
+
+#define DAI_FORMATS_S16_S24_S32_LE (SNDRV_PCM_FMTBIT_S16_LE | \
+				    SNDRV_PCM_FMTBIT_S24_LE | \
+				    SNDRV_PCM_FMTBIT_S32_LE)
+
+enum {
+	ENC_FMT_NONE,
+	ENC_FMT_SBC = ASM_MEDIA_FMT_SBC,
+	ENC_FMT_AAC_V2 = ASM_MEDIA_FMT_AAC_V2,
+	ENC_FMT_APTX = ASM_MEDIA_FMT_APTX,
+	ENC_FMT_APTX_HD = ASM_MEDIA_FMT_APTX_HD,
+};
+
+enum {
+	SPKR_1,
+	SPKR_2,
+};
+
+static const struct afe_clk_set lpass_clk_set_default = {
+	AFE_API_VERSION_CLOCK_SET,
+	Q6AFE_LPASS_CLK_ID_PRI_PCM_IBIT,
+	Q6AFE_LPASS_OSR_CLK_2_P048_MHZ,
+	Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO,
+	Q6AFE_LPASS_CLK_ROOT_DEFAULT,
+	0,
+};
+
+static const struct afe_clk_cfg lpass_clk_cfg_default = {
+	AFE_API_VERSION_I2S_CONFIG,
+	Q6AFE_LPASS_OSR_CLK_2_P048_MHZ,
+	0,
+	Q6AFE_LPASS_CLK_SRC_INTERNAL,
+	Q6AFE_LPASS_CLK_ROOT_DEFAULT,
+	Q6AFE_LPASS_MODE_CLK1_VALID,
+	0,
+};
+enum {
+	STATUS_PORT_STARTED, /* track if AFE port has started */
+	/* track AFE Tx port status for bi-directional transfers */
+	STATUS_TX_PORT,
+	/* track AFE Rx port status for bi-directional transfers */
+	STATUS_RX_PORT,
+	STATUS_MAX
+};
+
+enum {
+	RATE_8KHZ,
+	RATE_16KHZ,
+	RATE_MAX_NUM_OF_AUX_PCM_RATES,
+};
+
+enum {
+	IDX_PRIMARY_TDM_RX_0,
+	IDX_PRIMARY_TDM_RX_1,
+	IDX_PRIMARY_TDM_RX_2,
+	IDX_PRIMARY_TDM_RX_3,
+	IDX_PRIMARY_TDM_RX_4,
+	IDX_PRIMARY_TDM_RX_5,
+	IDX_PRIMARY_TDM_RX_6,
+	IDX_PRIMARY_TDM_RX_7,
+	IDX_PRIMARY_TDM_TX_0,
+	IDX_PRIMARY_TDM_TX_1,
+	IDX_PRIMARY_TDM_TX_2,
+	IDX_PRIMARY_TDM_TX_3,
+	IDX_PRIMARY_TDM_TX_4,
+	IDX_PRIMARY_TDM_TX_5,
+	IDX_PRIMARY_TDM_TX_6,
+	IDX_PRIMARY_TDM_TX_7,
+	IDX_SECONDARY_TDM_RX_0,
+	IDX_SECONDARY_TDM_RX_1,
+	IDX_SECONDARY_TDM_RX_2,
+	IDX_SECONDARY_TDM_RX_3,
+	IDX_SECONDARY_TDM_RX_4,
+	IDX_SECONDARY_TDM_RX_5,
+	IDX_SECONDARY_TDM_RX_6,
+	IDX_SECONDARY_TDM_RX_7,
+	IDX_SECONDARY_TDM_TX_0,
+	IDX_SECONDARY_TDM_TX_1,
+	IDX_SECONDARY_TDM_TX_2,
+	IDX_SECONDARY_TDM_TX_3,
+	IDX_SECONDARY_TDM_TX_4,
+	IDX_SECONDARY_TDM_TX_5,
+	IDX_SECONDARY_TDM_TX_6,
+	IDX_SECONDARY_TDM_TX_7,
+	IDX_TERTIARY_TDM_RX_0,
+	IDX_TERTIARY_TDM_RX_1,
+	IDX_TERTIARY_TDM_RX_2,
+	IDX_TERTIARY_TDM_RX_3,
+	IDX_TERTIARY_TDM_RX_4,
+	IDX_TERTIARY_TDM_RX_5,
+	IDX_TERTIARY_TDM_RX_6,
+	IDX_TERTIARY_TDM_RX_7,
+	IDX_TERTIARY_TDM_TX_0,
+	IDX_TERTIARY_TDM_TX_1,
+	IDX_TERTIARY_TDM_TX_2,
+	IDX_TERTIARY_TDM_TX_3,
+	IDX_TERTIARY_TDM_TX_4,
+	IDX_TERTIARY_TDM_TX_5,
+	IDX_TERTIARY_TDM_TX_6,
+	IDX_TERTIARY_TDM_TX_7,
+	IDX_QUATERNARY_TDM_RX_0,
+	IDX_QUATERNARY_TDM_RX_1,
+	IDX_QUATERNARY_TDM_RX_2,
+	IDX_QUATERNARY_TDM_RX_3,
+	IDX_QUATERNARY_TDM_RX_4,
+	IDX_QUATERNARY_TDM_RX_5,
+	IDX_QUATERNARY_TDM_RX_6,
+	IDX_QUATERNARY_TDM_RX_7,
+	IDX_QUATERNARY_TDM_TX_0,
+	IDX_QUATERNARY_TDM_TX_1,
+	IDX_QUATERNARY_TDM_TX_2,
+	IDX_QUATERNARY_TDM_TX_3,
+	IDX_QUATERNARY_TDM_TX_4,
+	IDX_QUATERNARY_TDM_TX_5,
+	IDX_QUATERNARY_TDM_TX_6,
+	IDX_QUATERNARY_TDM_TX_7,
+	IDX_TDM_MAX,
+};
+
+enum {
+	IDX_GROUP_PRIMARY_TDM_RX,
+	IDX_GROUP_PRIMARY_TDM_TX,
+	IDX_GROUP_SECONDARY_TDM_RX,
+	IDX_GROUP_SECONDARY_TDM_TX,
+	IDX_GROUP_TERTIARY_TDM_RX,
+	IDX_GROUP_TERTIARY_TDM_TX,
+	IDX_GROUP_QUATERNARY_TDM_RX,
+	IDX_GROUP_QUATERNARY_TDM_TX,
+	IDX_GROUP_TDM_MAX,
+};
+
+struct msm_dai_q6_dai_data {
+	DECLARE_BITMAP(status_mask, STATUS_MAX);
+	DECLARE_BITMAP(hwfree_status, STATUS_MAX);
+	u32 rate;
+	u32 channels;
+	u32 bitwidth;
+	u32 cal_mode;
+	u32 afe_in_channels;
+	u16 afe_in_bitformat;
+	struct afe_enc_config enc_config;
+	union afe_port_config port_config;
+	u16 vi_feed_mono;
+};
+
+struct msm_dai_q6_spdif_dai_data {
+	DECLARE_BITMAP(status_mask, STATUS_MAX);
+	u32 rate;
+	u32 channels;
+	u32 bitwidth;
+	struct afe_spdif_port_config spdif_port;
+};
+
+struct msm_dai_q6_mi2s_dai_config {
+	u16 pdata_mi2s_lines;
+	struct msm_dai_q6_dai_data mi2s_dai_data;
+};
+
+struct msm_dai_q6_mi2s_dai_data {
+	struct msm_dai_q6_mi2s_dai_config tx_dai;
+	struct msm_dai_q6_mi2s_dai_config rx_dai;
+};
+
+struct msm_dai_q6_auxpcm_dai_data {
+	/* BITMAP to track Rx and Tx port usage count */
+	DECLARE_BITMAP(auxpcm_port_status, STATUS_MAX);
+	struct mutex rlock; /* auxpcm dev resource lock */
+	u16 rx_pid; /* AUXPCM RX AFE port ID */
+	u16 tx_pid; /* AUXPCM TX AFE port ID */
+	u16 afe_clk_ver;
+	struct afe_clk_cfg clk_cfg; /* hold LPASS clock configuration */
+	struct afe_clk_set clk_set; /* hold LPASS clock configuration */
+	struct msm_dai_q6_dai_data bdai_data; /* incoporate base DAI data */
+};
+
+struct msm_dai_q6_tdm_dai_data {
+	DECLARE_BITMAP(status_mask, STATUS_MAX);
+	u32 rate;
+	u32 channels;
+	u32 bitwidth;
+	u32 num_group_ports;
+	struct afe_clk_set clk_set; /* hold LPASS clock config. */
+	union afe_port_group_config group_cfg; /* hold tdm group config */
+	struct afe_tdm_port_config port_cfg; /* hold tdm config */
+};
+
+/* MI2S format field for AFE_PORT_CMD_I2S_CONFIG command
+ *  0: linear PCM
+ *  1: non-linear PCM
+ *  2: PCM data in IEC 60968 container
+ *  3: compressed data in IEC 60958 container
+ */
+static const char *const mi2s_format[] = {
+	"LPCM",
+	"Compr",
+	"LPCM-60958",
+	"Compr-60958"
+};
+
+static const char *const mi2s_vi_feed_mono[] = {
+	"Left",
+	"Right",
+};
+
+static const struct soc_enum mi2s_config_enum[] = {
+	SOC_ENUM_SINGLE_EXT(4, mi2s_format),
+	SOC_ENUM_SINGLE_EXT(2, mi2s_vi_feed_mono),
+};
+
+static const char *const sb_format[] = {
+	"UNPACKED",
+	"PACKED_16B",
+	"DSD_DOP",
+};
+
+static const struct soc_enum sb_config_enum[] = {
+	SOC_ENUM_SINGLE_EXT(3, sb_format),
+};
+
+static const char *const tdm_data_format[] = {
+	"LPCM",
+	"Compr",
+	"Gen Compr"
+};
+
+static const char *const tdm_header_type[] = {
+	"Invalid",
+	"Default",
+	"Entertainment",
+};
+
+static const struct soc_enum tdm_config_enum[] = {
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(tdm_data_format), tdm_data_format),
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(tdm_header_type), tdm_header_type),
+};
+
+static DEFINE_MUTEX(tdm_mutex);
+
+static atomic_t tdm_group_ref[IDX_GROUP_TDM_MAX];
+
+/* cache of group cfg per parent node */
+static struct afe_param_id_group_device_tdm_cfg tdm_group_cfg = {
+	AFE_API_VERSION_GROUP_DEVICE_TDM_CONFIG,
+	AFE_GROUP_DEVICE_ID_QUATERNARY_TDM_RX,
+	0,
+	{AFE_PORT_ID_QUATERNARY_TDM_RX,
+	AFE_PORT_ID_QUATERNARY_TDM_RX_1,
+	AFE_PORT_ID_QUATERNARY_TDM_RX_2,
+	AFE_PORT_ID_QUATERNARY_TDM_RX_3,
+	AFE_PORT_ID_QUATERNARY_TDM_RX_4,
+	AFE_PORT_ID_QUATERNARY_TDM_RX_5,
+	AFE_PORT_ID_QUATERNARY_TDM_RX_6,
+	AFE_PORT_ID_QUATERNARY_TDM_RX_7},
+	8,
+	48000,
+	32,
+	8,
+	32,
+	0xFF,
+};
+
+static u32 num_tdm_group_ports;
+
+static struct afe_clk_set tdm_clk_set = {
+	AFE_API_VERSION_CLOCK_SET,
+	Q6AFE_LPASS_CLK_ID_QUAD_TDM_EBIT,
+	Q6AFE_LPASS_IBIT_CLK_DISABLE,
+	Q6AFE_LPASS_CLK_ATTRIBUTE_INVERT_COUPLE_NO,
+	Q6AFE_LPASS_CLK_ROOT_DEFAULT,
+	0,
+};
+
+int msm_dai_q6_get_group_idx(u16 id)
+{
+	switch (id) {
+	case AFE_GROUP_DEVICE_ID_PRIMARY_TDM_RX:
+	case AFE_PORT_ID_PRIMARY_TDM_RX:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_1:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_2:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_3:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_4:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_5:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_6:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_7:
+		return IDX_GROUP_PRIMARY_TDM_RX;
+	case AFE_GROUP_DEVICE_ID_PRIMARY_TDM_TX:
+	case AFE_PORT_ID_PRIMARY_TDM_TX:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_1:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_2:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_3:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_4:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_5:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_6:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_7:
+		return IDX_GROUP_PRIMARY_TDM_TX;
+	case AFE_GROUP_DEVICE_ID_SECONDARY_TDM_RX:
+	case AFE_PORT_ID_SECONDARY_TDM_RX:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_1:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_2:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_3:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_4:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_5:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_6:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_7:
+		return IDX_GROUP_SECONDARY_TDM_RX;
+	case AFE_GROUP_DEVICE_ID_SECONDARY_TDM_TX:
+	case AFE_PORT_ID_SECONDARY_TDM_TX:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_1:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_2:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_3:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_4:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_5:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_6:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_7:
+		return IDX_GROUP_SECONDARY_TDM_TX;
+	case AFE_GROUP_DEVICE_ID_TERTIARY_TDM_RX:
+	case AFE_PORT_ID_TERTIARY_TDM_RX:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_1:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_2:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_3:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_4:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_5:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_6:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_7:
+		return IDX_GROUP_TERTIARY_TDM_RX;
+	case AFE_GROUP_DEVICE_ID_TERTIARY_TDM_TX:
+	case AFE_PORT_ID_TERTIARY_TDM_TX:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_1:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_2:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_3:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_4:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_5:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_6:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_7:
+		return IDX_GROUP_TERTIARY_TDM_TX;
+	case AFE_GROUP_DEVICE_ID_QUATERNARY_TDM_RX:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_1:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_2:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_3:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_4:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_5:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_6:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_7:
+		return IDX_GROUP_QUATERNARY_TDM_RX;
+	case AFE_GROUP_DEVICE_ID_QUATERNARY_TDM_TX:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_1:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_2:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_3:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_4:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_5:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_6:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_7:
+		return IDX_GROUP_QUATERNARY_TDM_TX;
+	default: return -EINVAL;
+	}
+}
+
+int msm_dai_q6_get_port_idx(u16 id)
+{
+	switch (id) {
+	case AFE_PORT_ID_PRIMARY_TDM_RX:
+		return IDX_PRIMARY_TDM_RX_0;
+	case AFE_PORT_ID_PRIMARY_TDM_TX:
+		return IDX_PRIMARY_TDM_TX_0;
+	case AFE_PORT_ID_PRIMARY_TDM_RX_1:
+		return IDX_PRIMARY_TDM_RX_1;
+	case AFE_PORT_ID_PRIMARY_TDM_TX_1:
+		return IDX_PRIMARY_TDM_TX_1;
+	case AFE_PORT_ID_PRIMARY_TDM_RX_2:
+		return IDX_PRIMARY_TDM_RX_2;
+	case AFE_PORT_ID_PRIMARY_TDM_TX_2:
+		return IDX_PRIMARY_TDM_TX_2;
+	case AFE_PORT_ID_PRIMARY_TDM_RX_3:
+		return IDX_PRIMARY_TDM_RX_3;
+	case AFE_PORT_ID_PRIMARY_TDM_TX_3:
+		return IDX_PRIMARY_TDM_TX_3;
+	case AFE_PORT_ID_PRIMARY_TDM_RX_4:
+		return IDX_PRIMARY_TDM_RX_4;
+	case AFE_PORT_ID_PRIMARY_TDM_TX_4:
+		return IDX_PRIMARY_TDM_TX_4;
+	case AFE_PORT_ID_PRIMARY_TDM_RX_5:
+		return IDX_PRIMARY_TDM_RX_5;
+	case AFE_PORT_ID_PRIMARY_TDM_TX_5:
+		return IDX_PRIMARY_TDM_TX_5;
+	case AFE_PORT_ID_PRIMARY_TDM_RX_6:
+		return IDX_PRIMARY_TDM_RX_6;
+	case AFE_PORT_ID_PRIMARY_TDM_TX_6:
+		return IDX_PRIMARY_TDM_TX_6;
+	case AFE_PORT_ID_PRIMARY_TDM_RX_7:
+		return IDX_PRIMARY_TDM_RX_7;
+	case AFE_PORT_ID_PRIMARY_TDM_TX_7:
+		return IDX_PRIMARY_TDM_TX_7;
+	case AFE_PORT_ID_SECONDARY_TDM_RX:
+		return IDX_SECONDARY_TDM_RX_0;
+	case AFE_PORT_ID_SECONDARY_TDM_TX:
+		return IDX_SECONDARY_TDM_TX_0;
+	case AFE_PORT_ID_SECONDARY_TDM_RX_1:
+		return IDX_SECONDARY_TDM_RX_1;
+	case AFE_PORT_ID_SECONDARY_TDM_TX_1:
+		return IDX_SECONDARY_TDM_TX_1;
+	case AFE_PORT_ID_SECONDARY_TDM_RX_2:
+		return IDX_SECONDARY_TDM_RX_2;
+	case AFE_PORT_ID_SECONDARY_TDM_TX_2:
+		return IDX_SECONDARY_TDM_TX_2;
+	case AFE_PORT_ID_SECONDARY_TDM_RX_3:
+		return IDX_SECONDARY_TDM_RX_3;
+	case AFE_PORT_ID_SECONDARY_TDM_TX_3:
+		return IDX_SECONDARY_TDM_TX_3;
+	case AFE_PORT_ID_SECONDARY_TDM_RX_4:
+		return IDX_SECONDARY_TDM_RX_4;
+	case AFE_PORT_ID_SECONDARY_TDM_TX_4:
+		return IDX_SECONDARY_TDM_TX_4;
+	case AFE_PORT_ID_SECONDARY_TDM_RX_5:
+		return IDX_SECONDARY_TDM_RX_5;
+	case AFE_PORT_ID_SECONDARY_TDM_TX_5:
+		return IDX_SECONDARY_TDM_TX_5;
+	case AFE_PORT_ID_SECONDARY_TDM_RX_6:
+		return IDX_SECONDARY_TDM_RX_6;
+	case AFE_PORT_ID_SECONDARY_TDM_TX_6:
+		return IDX_SECONDARY_TDM_TX_6;
+	case AFE_PORT_ID_SECONDARY_TDM_RX_7:
+		return IDX_SECONDARY_TDM_RX_7;
+	case AFE_PORT_ID_SECONDARY_TDM_TX_7:
+		return IDX_SECONDARY_TDM_TX_7;
+	case AFE_PORT_ID_TERTIARY_TDM_RX:
+		return IDX_TERTIARY_TDM_RX_0;
+	case AFE_PORT_ID_TERTIARY_TDM_TX:
+		return IDX_TERTIARY_TDM_TX_0;
+	case AFE_PORT_ID_TERTIARY_TDM_RX_1:
+		return IDX_TERTIARY_TDM_RX_1;
+	case AFE_PORT_ID_TERTIARY_TDM_TX_1:
+		return IDX_TERTIARY_TDM_TX_1;
+	case AFE_PORT_ID_TERTIARY_TDM_RX_2:
+		return IDX_TERTIARY_TDM_RX_2;
+	case AFE_PORT_ID_TERTIARY_TDM_TX_2:
+		return IDX_TERTIARY_TDM_TX_2;
+	case AFE_PORT_ID_TERTIARY_TDM_RX_3:
+		return IDX_TERTIARY_TDM_RX_3;
+	case AFE_PORT_ID_TERTIARY_TDM_TX_3:
+		return IDX_TERTIARY_TDM_TX_3;
+	case AFE_PORT_ID_TERTIARY_TDM_RX_4:
+		return IDX_TERTIARY_TDM_RX_4;
+	case AFE_PORT_ID_TERTIARY_TDM_TX_4:
+		return IDX_TERTIARY_TDM_TX_4;
+	case AFE_PORT_ID_TERTIARY_TDM_RX_5:
+		return IDX_TERTIARY_TDM_RX_5;
+	case AFE_PORT_ID_TERTIARY_TDM_TX_5:
+		return IDX_TERTIARY_TDM_TX_5;
+	case AFE_PORT_ID_TERTIARY_TDM_RX_6:
+		return IDX_TERTIARY_TDM_RX_6;
+	case AFE_PORT_ID_TERTIARY_TDM_TX_6:
+		return IDX_TERTIARY_TDM_TX_6;
+	case AFE_PORT_ID_TERTIARY_TDM_RX_7:
+		return IDX_TERTIARY_TDM_RX_7;
+	case AFE_PORT_ID_TERTIARY_TDM_TX_7:
+		return IDX_TERTIARY_TDM_TX_7;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX:
+		return IDX_QUATERNARY_TDM_RX_0;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX:
+		return IDX_QUATERNARY_TDM_TX_0;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_1:
+		return IDX_QUATERNARY_TDM_RX_1;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_1:
+		return IDX_QUATERNARY_TDM_TX_1;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_2:
+		return IDX_QUATERNARY_TDM_RX_2;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_2:
+		return IDX_QUATERNARY_TDM_TX_2;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_3:
+		return IDX_QUATERNARY_TDM_RX_3;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_3:
+		return IDX_QUATERNARY_TDM_TX_3;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_4:
+		return IDX_QUATERNARY_TDM_RX_4;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_4:
+		return IDX_QUATERNARY_TDM_TX_4;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_5:
+		return IDX_QUATERNARY_TDM_RX_5;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_5:
+		return IDX_QUATERNARY_TDM_TX_5;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_6:
+		return IDX_QUATERNARY_TDM_RX_6;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_6:
+		return IDX_QUATERNARY_TDM_TX_6;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_7:
+		return IDX_QUATERNARY_TDM_RX_7;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_7:
+		return IDX_QUATERNARY_TDM_TX_7;
+	default: return -EINVAL;
+	}
+}
+
+static u16 msm_dai_q6_max_num_slot(int frame_rate)
+{
+	/* Max num of slots is bits per frame divided
+	 * by bits per sample which is 16
+	 */
+	switch (frame_rate) {
+	case AFE_PORT_PCM_BITS_PER_FRAME_8:
+		return 0;
+	case AFE_PORT_PCM_BITS_PER_FRAME_16:
+		return 1;
+	case AFE_PORT_PCM_BITS_PER_FRAME_32:
+		return 2;
+	case AFE_PORT_PCM_BITS_PER_FRAME_64:
+		return 4;
+	case AFE_PORT_PCM_BITS_PER_FRAME_128:
+		return 8;
+	case AFE_PORT_PCM_BITS_PER_FRAME_256:
+		return 16;
+	default:
+		pr_err("%s Invalid bits per frame %d\n",
+			__func__, frame_rate);
+		return 0;
+	}
+}
+
+static int msm_dai_q6_dai_add_route(struct snd_soc_dai *dai)
+{
+	struct snd_soc_dapm_route intercon;
+	struct snd_soc_dapm_context *dapm;
+
+	if (!dai) {
+		pr_err("%s: Invalid params dai\n", __func__);
+		return -EINVAL;
+	}
+	if (!dai->driver) {
+		pr_err("%s: Invalid params dai driver\n", __func__);
+		return -EINVAL;
+	}
+	dapm = snd_soc_component_get_dapm(dai->component);
+	memset(&intercon, 0 , sizeof(intercon));
+	if (dai->driver->playback.stream_name &&
+		dai->driver->playback.aif_name) {
+		dev_dbg(dai->dev, "%s: add route for widget %s",
+				__func__, dai->driver->playback.stream_name);
+		intercon.source = dai->driver->playback.aif_name;
+		intercon.sink = dai->driver->playback.stream_name;
+		dev_dbg(dai->dev, "%s: src %s sink %s\n",
+				__func__, intercon.source, intercon.sink);
+		snd_soc_dapm_add_routes(dapm, &intercon, 1);
+	}
+	if (dai->driver->capture.stream_name &&
+		dai->driver->capture.aif_name) {
+		dev_dbg(dai->dev, "%s: add route for widget %s",
+				__func__, dai->driver->capture.stream_name);
+		intercon.sink = dai->driver->capture.aif_name;
+		intercon.source = dai->driver->capture.stream_name;
+		dev_dbg(dai->dev, "%s: src %s sink %s\n",
+				__func__, intercon.source, intercon.sink);
+		snd_soc_dapm_add_routes(dapm, &intercon, 1);
+	}
+	return 0;
+}
+
+static int msm_dai_q6_auxpcm_hw_params(
+				struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_auxpcm_dai_data *aux_dai_data =
+			dev_get_drvdata(dai->dev);
+	struct msm_dai_q6_dai_data *dai_data = &aux_dai_data->bdai_data;
+	struct msm_dai_auxpcm_pdata *auxpcm_pdata =
+			(struct msm_dai_auxpcm_pdata *) dai->dev->platform_data;
+	int rc = 0, slot_mapping_copy_len = 0;
+
+	if (params_channels(params) != 1 || (params_rate(params) != 8000 &&
+	    params_rate(params) != 16000)) {
+		dev_err(dai->dev, "%s: invalid param chan %d rate %d\n",
+			__func__, params_channels(params), params_rate(params));
+		return -EINVAL;
+	}
+
+	mutex_lock(&aux_dai_data->rlock);
+
+	if (test_bit(STATUS_TX_PORT, aux_dai_data->auxpcm_port_status) ||
+	    test_bit(STATUS_RX_PORT, aux_dai_data->auxpcm_port_status)) {
+		/* AUXPCM DAI in use */
+		if (dai_data->rate != params_rate(params)) {
+			dev_err(dai->dev, "%s: rate mismatch of running DAI\n",
+			__func__);
+			rc = -EINVAL;
+		}
+		mutex_unlock(&aux_dai_data->rlock);
+		return rc;
+	}
+
+	dai_data->channels = params_channels(params);
+	dai_data->rate = params_rate(params);
+
+	if (dai_data->rate == 8000) {
+		dai_data->port_config.pcm.pcm_cfg_minor_version =
+				AFE_API_VERSION_PCM_CONFIG;
+		dai_data->port_config.pcm.aux_mode = auxpcm_pdata->mode_8k.mode;
+		dai_data->port_config.pcm.sync_src = auxpcm_pdata->mode_8k.sync;
+		dai_data->port_config.pcm.frame_setting =
+					auxpcm_pdata->mode_8k.frame;
+		dai_data->port_config.pcm.quantype =
+					 auxpcm_pdata->mode_8k.quant;
+		dai_data->port_config.pcm.ctrl_data_out_enable =
+					 auxpcm_pdata->mode_8k.data;
+		dai_data->port_config.pcm.sample_rate = dai_data->rate;
+		dai_data->port_config.pcm.num_channels = dai_data->channels;
+		dai_data->port_config.pcm.bit_width = 16;
+		if (ARRAY_SIZE(dai_data->port_config.pcm.slot_number_mapping) <=
+		    auxpcm_pdata->mode_8k.num_slots)
+			slot_mapping_copy_len =
+				ARRAY_SIZE(
+				dai_data->port_config.pcm.slot_number_mapping)
+				 * sizeof(uint16_t);
+		else
+			slot_mapping_copy_len = auxpcm_pdata->mode_8k.num_slots
+				* sizeof(uint16_t);
+
+		if (auxpcm_pdata->mode_8k.slot_mapping) {
+			memcpy(dai_data->port_config.pcm.slot_number_mapping,
+			       auxpcm_pdata->mode_8k.slot_mapping,
+			       slot_mapping_copy_len);
+		} else {
+			dev_err(dai->dev, "%s 8khz slot mapping is NULL\n",
+				__func__);
+			mutex_unlock(&aux_dai_data->rlock);
+			return -EINVAL;
+		}
+	} else {
+		dai_data->port_config.pcm.pcm_cfg_minor_version =
+				AFE_API_VERSION_PCM_CONFIG;
+		dai_data->port_config.pcm.aux_mode =
+					auxpcm_pdata->mode_16k.mode;
+		dai_data->port_config.pcm.sync_src =
+					auxpcm_pdata->mode_16k.sync;
+		dai_data->port_config.pcm.frame_setting =
+					auxpcm_pdata->mode_16k.frame;
+		dai_data->port_config.pcm.quantype =
+					auxpcm_pdata->mode_16k.quant;
+		dai_data->port_config.pcm.ctrl_data_out_enable =
+					auxpcm_pdata->mode_16k.data;
+		dai_data->port_config.pcm.sample_rate = dai_data->rate;
+		dai_data->port_config.pcm.num_channels = dai_data->channels;
+		dai_data->port_config.pcm.bit_width = 16;
+		if (ARRAY_SIZE(dai_data->port_config.pcm.slot_number_mapping) <=
+		    auxpcm_pdata->mode_16k.num_slots)
+			slot_mapping_copy_len =
+				ARRAY_SIZE(
+				dai_data->port_config.pcm.slot_number_mapping)
+				 * sizeof(uint16_t);
+		else
+			slot_mapping_copy_len = auxpcm_pdata->mode_16k.num_slots
+				* sizeof(uint16_t);
+
+		if (auxpcm_pdata->mode_16k.slot_mapping) {
+			memcpy(dai_data->port_config.pcm.slot_number_mapping,
+			       auxpcm_pdata->mode_16k.slot_mapping,
+			       slot_mapping_copy_len);
+		} else {
+			dev_err(dai->dev, "%s 16khz slot mapping is NULL\n",
+				__func__);
+			mutex_unlock(&aux_dai_data->rlock);
+			return -EINVAL;
+		}
+	}
+
+	dev_dbg(dai->dev, "%s: aux_mode 0x%x sync_src 0x%x frame_setting 0x%x\n",
+		__func__, dai_data->port_config.pcm.aux_mode,
+		dai_data->port_config.pcm.sync_src,
+		dai_data->port_config.pcm.frame_setting);
+	dev_dbg(dai->dev, "%s: qtype 0x%x dout 0x%x num_map[0] 0x%x\n"
+		"num_map[1] 0x%x num_map[2] 0x%x num_map[3] 0x%x\n",
+		__func__, dai_data->port_config.pcm.quantype,
+		dai_data->port_config.pcm.ctrl_data_out_enable,
+		dai_data->port_config.pcm.slot_number_mapping[0],
+		dai_data->port_config.pcm.slot_number_mapping[1],
+		dai_data->port_config.pcm.slot_number_mapping[2],
+		dai_data->port_config.pcm.slot_number_mapping[3]);
+
+	mutex_unlock(&aux_dai_data->rlock);
+	return rc;
+}
+
+static int msm_dai_q6_auxpcm_set_clk(
+		struct msm_dai_q6_auxpcm_dai_data *aux_dai_data,
+		u16 port_id, bool enable)
+{
+	int rc;
+
+	pr_debug("%s: afe_clk_ver: %d, port_id: %d, enable: %d\n", __func__,
+		 aux_dai_data->afe_clk_ver, port_id, enable);
+	if (aux_dai_data->afe_clk_ver == AFE_CLK_VERSION_V2) {
+		aux_dai_data->clk_set.enable = enable;
+		rc = afe_set_lpass_clock_v2(port_id,
+					&aux_dai_data->clk_set);
+	} else {
+		if (!enable)
+			aux_dai_data->clk_cfg.clk_val1 = 0;
+		rc = afe_set_lpass_clock(port_id,
+					&aux_dai_data->clk_cfg);
+	}
+	return rc;
+}
+
+static void msm_dai_q6_auxpcm_shutdown(struct snd_pcm_substream *substream,
+				struct snd_soc_dai *dai)
+{
+	int rc = 0;
+	struct msm_dai_q6_auxpcm_dai_data *aux_dai_data =
+		dev_get_drvdata(dai->dev);
+
+	mutex_lock(&aux_dai_data->rlock);
+
+	if (!(test_bit(STATUS_TX_PORT, aux_dai_data->auxpcm_port_status) ||
+	      test_bit(STATUS_RX_PORT, aux_dai_data->auxpcm_port_status))) {
+		dev_dbg(dai->dev, "%s(): dai->id %d PCM ports already closed\n",
+				__func__, dai->id);
+		goto exit;
+	}
+
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		if (test_bit(STATUS_TX_PORT, aux_dai_data->auxpcm_port_status))
+			clear_bit(STATUS_TX_PORT,
+				  aux_dai_data->auxpcm_port_status);
+		else {
+			dev_dbg(dai->dev, "%s: PCM_TX port already closed\n",
+				__func__);
+			goto exit;
+		}
+	} else if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		if (test_bit(STATUS_RX_PORT, aux_dai_data->auxpcm_port_status))
+			clear_bit(STATUS_RX_PORT,
+				  aux_dai_data->auxpcm_port_status);
+		else {
+			dev_dbg(dai->dev, "%s: PCM_RX port already closed\n",
+				__func__);
+			goto exit;
+		}
+	}
+	if (test_bit(STATUS_TX_PORT, aux_dai_data->auxpcm_port_status) ||
+	    test_bit(STATUS_RX_PORT, aux_dai_data->auxpcm_port_status)) {
+		dev_dbg(dai->dev, "%s: cannot shutdown PCM ports\n",
+			__func__);
+		goto exit;
+	}
+
+	dev_dbg(dai->dev, "%s: dai->id = %d closing PCM AFE ports\n",
+			__func__, dai->id);
+
+	rc = afe_close(aux_dai_data->rx_pid); /* can block */
+	if (IS_ERR_VALUE(rc))
+		dev_err(dai->dev, "fail to close PCM_RX  AFE port\n");
+
+	rc = afe_close(aux_dai_data->tx_pid);
+	if (IS_ERR_VALUE(rc))
+		dev_err(dai->dev, "fail to close AUX PCM TX port\n");
+
+	msm_dai_q6_auxpcm_set_clk(aux_dai_data, aux_dai_data->rx_pid, false);
+	msm_dai_q6_auxpcm_set_clk(aux_dai_data, aux_dai_data->tx_pid, false);
+exit:
+	mutex_unlock(&aux_dai_data->rlock);
+	return;
+}
+
+static int msm_dai_q6_auxpcm_prepare(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_auxpcm_dai_data *aux_dai_data =
+		dev_get_drvdata(dai->dev);
+	struct msm_dai_q6_dai_data *dai_data = &aux_dai_data->bdai_data;
+	struct msm_dai_auxpcm_pdata *auxpcm_pdata = NULL;
+	int rc = 0;
+	u32 pcm_clk_rate;
+
+	auxpcm_pdata = dai->dev->platform_data;
+	mutex_lock(&aux_dai_data->rlock);
+
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		if (test_bit(STATUS_TX_PORT,
+				aux_dai_data->auxpcm_port_status)) {
+			dev_dbg(dai->dev, "%s: PCM_TX port already ON\n",
+				__func__);
+			goto exit;
+		} else
+			set_bit(STATUS_TX_PORT,
+				  aux_dai_data->auxpcm_port_status);
+	} else if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		if (test_bit(STATUS_RX_PORT,
+				aux_dai_data->auxpcm_port_status)) {
+			dev_dbg(dai->dev, "%s: PCM_RX port already ON\n",
+				__func__);
+			goto exit;
+		} else
+			set_bit(STATUS_RX_PORT,
+				  aux_dai_data->auxpcm_port_status);
+	}
+	if (test_bit(STATUS_TX_PORT, aux_dai_data->auxpcm_port_status) &&
+	    test_bit(STATUS_RX_PORT, aux_dai_data->auxpcm_port_status)) {
+		dev_dbg(dai->dev, "%s: PCM ports already set\n", __func__);
+		goto exit;
+	}
+
+	dev_dbg(dai->dev, "%s: dai->id:%d  opening afe ports\n",
+			__func__, dai->id);
+
+	rc = afe_q6_interface_prepare();
+	if (IS_ERR_VALUE(rc)) {
+		dev_err(dai->dev, "fail to open AFE APR\n");
+		goto fail;
+	}
+
+	/*
+	 * For AUX PCM Interface the below sequence of clk
+	 * settings and afe_open is a strict requirement.
+	 *
+	 * Also using afe_open instead of afe_port_start_nowait
+	 * to make sure the port is open before deasserting the
+	 * clock line. This is required because pcm register is
+	 * not written before clock deassert. Hence the hw does
+	 * not get updated with new setting if the below clock
+	 * assert/deasset and afe_open sequence is not followed.
+	 */
+
+	if (dai_data->rate == 8000) {
+		pcm_clk_rate = auxpcm_pdata->mode_8k.pcm_clk_rate;
+	} else if (dai_data->rate == 16000) {
+		pcm_clk_rate = (auxpcm_pdata->mode_16k.pcm_clk_rate);
+	} else {
+		dev_err(dai->dev, "%s: Invalid AUX PCM rate %d\n", __func__,
+			dai_data->rate);
+		rc = -EINVAL;
+		goto fail;
+	}
+	if (aux_dai_data->afe_clk_ver == AFE_CLK_VERSION_V2) {
+		memcpy(&aux_dai_data->clk_set, &lpass_clk_set_default,
+				sizeof(struct afe_clk_set));
+		aux_dai_data->clk_set.clk_freq_in_hz = pcm_clk_rate;
+
+		switch (dai->id) {
+		case MSM_DAI_PRI_AUXPCM_DT_DEV_ID:
+			if (pcm_clk_rate)
+				aux_dai_data->clk_set.clk_id =
+					Q6AFE_LPASS_CLK_ID_PRI_PCM_IBIT;
+			else
+				aux_dai_data->clk_set.clk_id =
+					Q6AFE_LPASS_CLK_ID_PRI_PCM_EBIT;
+			break;
+		case MSM_DAI_SEC_AUXPCM_DT_DEV_ID:
+			if (pcm_clk_rate)
+				aux_dai_data->clk_set.clk_id =
+					Q6AFE_LPASS_CLK_ID_SEC_PCM_IBIT;
+			else
+				aux_dai_data->clk_set.clk_id =
+					Q6AFE_LPASS_CLK_ID_SEC_PCM_EBIT;
+			break;
+		case MSM_DAI_TERT_AUXPCM_DT_DEV_ID:
+			if (pcm_clk_rate)
+				aux_dai_data->clk_set.clk_id =
+					Q6AFE_LPASS_CLK_ID_TER_PCM_IBIT;
+			else
+				aux_dai_data->clk_set.clk_id =
+					Q6AFE_LPASS_CLK_ID_TER_PCM_EBIT;
+			break;
+		case MSM_DAI_QUAT_AUXPCM_DT_DEV_ID:
+			if (pcm_clk_rate)
+				aux_dai_data->clk_set.clk_id =
+					Q6AFE_LPASS_CLK_ID_QUAD_PCM_IBIT;
+			else
+				aux_dai_data->clk_set.clk_id =
+					Q6AFE_LPASS_CLK_ID_QUAD_PCM_EBIT;
+			break;
+		default:
+			dev_err(dai->dev, "%s: AUXPCM id: %d not supported\n",
+				__func__, dai->id);
+			break;
+		}
+	} else {
+		memcpy(&aux_dai_data->clk_cfg, &lpass_clk_cfg_default,
+				sizeof(struct afe_clk_cfg));
+		aux_dai_data->clk_cfg.clk_val1 = pcm_clk_rate;
+	}
+
+	rc = msm_dai_q6_auxpcm_set_clk(aux_dai_data,
+				       aux_dai_data->rx_pid, true);
+	if (rc < 0) {
+		dev_err(dai->dev,
+			"%s:afe_set_lpass_clock on RX pcm_src_clk failed\n",
+			__func__);
+		goto fail;
+	}
+
+	rc = msm_dai_q6_auxpcm_set_clk(aux_dai_data,
+				       aux_dai_data->tx_pid, true);
+	if (rc < 0) {
+		dev_err(dai->dev,
+			"%s:afe_set_lpass_clock on TX pcm_src_clk failed\n",
+			__func__);
+		goto fail;
+	}
+
+	afe_open(aux_dai_data->rx_pid, &dai_data->port_config, dai_data->rate);
+	afe_open(aux_dai_data->tx_pid, &dai_data->port_config, dai_data->rate);
+	goto exit;
+
+fail:
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		clear_bit(STATUS_TX_PORT, aux_dai_data->auxpcm_port_status);
+	else if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		clear_bit(STATUS_RX_PORT, aux_dai_data->auxpcm_port_status);
+
+exit:
+	mutex_unlock(&aux_dai_data->rlock);
+	return rc;
+}
+
+static int msm_dai_q6_auxpcm_trigger(struct snd_pcm_substream *substream,
+		int cmd, struct snd_soc_dai *dai)
+{
+	int rc = 0;
+
+	pr_debug("%s:port:%d  cmd:%d\n",
+		__func__, dai->id, cmd);
+
+	switch (cmd) {
+
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		/* afe_open will be called from prepare */
+		return 0;
+
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		return 0;
+
+	default:
+		pr_err("%s: cmd %d\n", __func__, cmd);
+		rc = -EINVAL;
+	}
+
+	return rc;
+
+}
+
+static int msm_dai_q6_dai_auxpcm_remove(struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_auxpcm_dai_data *aux_dai_data;
+	int rc;
+
+	aux_dai_data = dev_get_drvdata(dai->dev);
+
+	dev_dbg(dai->dev, "%s: dai->id %d closing afe\n",
+		__func__, dai->id);
+
+	if (test_bit(STATUS_TX_PORT, aux_dai_data->auxpcm_port_status) ||
+	    test_bit(STATUS_RX_PORT, aux_dai_data->auxpcm_port_status)) {
+		rc = afe_close(aux_dai_data->rx_pid); /* can block */
+		if (IS_ERR_VALUE(rc))
+			dev_err(dai->dev, "fail to close AUXPCM RX AFE port\n");
+		rc = afe_close(aux_dai_data->tx_pid);
+		if (IS_ERR_VALUE(rc))
+			dev_err(dai->dev, "fail to close AUXPCM TX AFE port\n");
+		clear_bit(STATUS_TX_PORT, aux_dai_data->auxpcm_port_status);
+		clear_bit(STATUS_RX_PORT, aux_dai_data->auxpcm_port_status);
+	}
+	msm_dai_q6_auxpcm_set_clk(aux_dai_data, aux_dai_data->rx_pid, false);
+	msm_dai_q6_auxpcm_set_clk(aux_dai_data, aux_dai_data->tx_pid, false);
+	return 0;
+}
+
+static int msm_dai_q6_aux_pcm_probe(struct snd_soc_dai *dai)
+{
+	int rc = 0;
+
+	if (!dai) {
+		pr_err("%s: Invalid params dai\n", __func__);
+		return -EINVAL;
+	}
+	if (!dai->dev) {
+		pr_err("%s: Invalid params dai dev\n", __func__);
+		return -EINVAL;
+	}
+	if (!dai->driver->id) {
+		dev_warn(dai->dev, "DAI driver id is not set\n");
+		return -EINVAL;
+	}
+	dai->id = dai->driver->id;
+	rc = msm_dai_q6_dai_add_route(dai);
+	return rc;
+}
+
+static struct snd_soc_dai_ops msm_dai_q6_auxpcm_ops = {
+	.prepare	= msm_dai_q6_auxpcm_prepare,
+	.trigger	= msm_dai_q6_auxpcm_trigger,
+	.hw_params	= msm_dai_q6_auxpcm_hw_params,
+	.shutdown	= msm_dai_q6_auxpcm_shutdown,
+};
+
+static const struct snd_soc_component_driver
+	msm_dai_q6_aux_pcm_dai_component = {
+	.name		= "msm-auxpcm-dev",
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_aux_pcm_dai[] = {
+	{
+		.playback = {
+			.stream_name = "AUX PCM Playback",
+			.aif_name = "AUX_PCM_RX",
+			.rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000),
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 1,
+			.rate_max = 16000,
+			.rate_min = 8000,
+		},
+		.capture = {
+			.stream_name = "AUX PCM Capture",
+			.aif_name = "AUX_PCM_TX",
+			.rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000),
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 1,
+			.rate_max = 16000,
+			.rate_min = 8000,
+		},
+		.id = MSM_DAI_PRI_AUXPCM_DT_DEV_ID,
+		.ops = &msm_dai_q6_auxpcm_ops,
+		.probe = msm_dai_q6_aux_pcm_probe,
+		.remove = msm_dai_q6_dai_auxpcm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Sec AUX PCM Playback",
+			.aif_name = "SEC_AUX_PCM_RX",
+			.rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000),
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 1,
+			.rate_max = 16000,
+			.rate_min = 8000,
+		},
+		.capture = {
+			.stream_name = "Sec AUX PCM Capture",
+			.aif_name = "SEC_AUX_PCM_TX",
+			.rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000),
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 1,
+			.rate_max = 16000,
+			.rate_min = 8000,
+		},
+		.id = MSM_DAI_SEC_AUXPCM_DT_DEV_ID,
+		.ops = &msm_dai_q6_auxpcm_ops,
+		.probe = msm_dai_q6_aux_pcm_probe,
+		.remove = msm_dai_q6_dai_auxpcm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Tert AUX PCM Playback",
+			.aif_name = "TERT_AUX_PCM_RX",
+			.rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000),
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 1,
+			.rate_max = 16000,
+			.rate_min = 8000,
+		},
+		.capture = {
+			.stream_name = "Tert AUX PCM Capture",
+			.aif_name = "TERT_AUX_PCM_TX",
+			.rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000),
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 1,
+			.rate_max = 16000,
+			.rate_min = 8000,
+		},
+		.id = MSM_DAI_TERT_AUXPCM_DT_DEV_ID,
+		.ops = &msm_dai_q6_auxpcm_ops,
+		.probe = msm_dai_q6_aux_pcm_probe,
+		.remove = msm_dai_q6_dai_auxpcm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Quat AUX PCM Playback",
+			.aif_name = "QUAT_AUX_PCM_RX",
+			.rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000),
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 1,
+			.rate_max = 16000,
+			.rate_min = 8000,
+		},
+		.capture = {
+			.stream_name = "Quat AUX PCM Capture",
+			.aif_name = "QUAT_AUX_PCM_TX",
+			.rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000),
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 1,
+			.rate_max = 16000,
+			.rate_min = 8000,
+		},
+		.id = MSM_DAI_QUAT_AUXPCM_DT_DEV_ID,
+		.ops = &msm_dai_q6_auxpcm_ops,
+		.probe = msm_dai_q6_aux_pcm_probe,
+		.remove = msm_dai_q6_dai_auxpcm_remove,
+	},
+};
+
+static int msm_dai_q6_spdif_format_put(struct snd_kcontrol *kcontrol,
+		struct snd_ctl_elem_value *ucontrol)
+{
+
+	struct msm_dai_q6_spdif_dai_data *dai_data = kcontrol->private_data;
+	int value = ucontrol->value.integer.value[0];
+	dai_data->spdif_port.cfg.data_format = value;
+	pr_debug("%s: value = %d\n", __func__, value);
+	return 0;
+}
+
+static int msm_dai_q6_spdif_format_get(struct snd_kcontrol *kcontrol,
+		struct snd_ctl_elem_value *ucontrol)
+{
+
+	struct msm_dai_q6_spdif_dai_data *dai_data = kcontrol->private_data;
+	ucontrol->value.integer.value[0] =
+		dai_data->spdif_port.cfg.data_format;
+	return 0;
+}
+
+static const char * const spdif_format[] = {
+	"LPCM",
+	"Compr"
+};
+
+static const struct soc_enum spdif_config_enum[] = {
+	SOC_ENUM_SINGLE_EXT(2, spdif_format),
+};
+
+static int msm_dai_q6_spdif_chstatus_put(struct snd_kcontrol *kcontrol,
+		struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_spdif_dai_data *dai_data = kcontrol->private_data;
+	int ret = 0;
+
+	dai_data->spdif_port.ch_status.status_type =
+		AFE_API_VERSION_SPDIF_CH_STATUS_CONFIG;
+	memset(dai_data->spdif_port.ch_status.status_mask,
+			CHANNEL_STATUS_MASK_INIT, CHANNEL_STATUS_SIZE);
+	dai_data->spdif_port.ch_status.status_mask[0] =
+		CHANNEL_STATUS_MASK;
+
+	memcpy(dai_data->spdif_port.ch_status.status_bits,
+			ucontrol->value.iec958.status, CHANNEL_STATUS_SIZE);
+
+	if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+		pr_debug("%s: Port already started. Dynamic update\n",
+				__func__);
+		ret = afe_send_spdif_ch_status_cfg(
+				&dai_data->spdif_port.ch_status,
+				AFE_PORT_ID_SPDIF_RX);
+	}
+	return ret;
+}
+
+static int msm_dai_q6_spdif_chstatus_get(struct snd_kcontrol *kcontrol,
+		struct snd_ctl_elem_value *ucontrol)
+{
+
+	struct msm_dai_q6_spdif_dai_data *dai_data = kcontrol->private_data;
+	memcpy(ucontrol->value.iec958.status,
+			dai_data->spdif_port.ch_status.status_bits,
+			CHANNEL_STATUS_SIZE);
+	return 0;
+}
+
+static int msm_dai_q6_spdif_chstatus_info(struct snd_kcontrol *kcontrol,
+		struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
+	uinfo->count = 1;
+	return 0;
+}
+
+static const struct snd_kcontrol_new spdif_config_controls[] = {
+	{
+		.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
+				SNDRV_CTL_ELEM_ACCESS_INACTIVE),
+		.iface  =   SNDRV_CTL_ELEM_IFACE_PCM,
+		.name   =   SNDRV_CTL_NAME_IEC958("", PLAYBACK, PCM_STREAM),
+		.info   =   msm_dai_q6_spdif_chstatus_info,
+		.get    =   msm_dai_q6_spdif_chstatus_get,
+		.put    =   msm_dai_q6_spdif_chstatus_put,
+	},
+	SOC_ENUM_EXT("SPDIF RX Format", spdif_config_enum[0],
+			msm_dai_q6_spdif_format_get,
+			msm_dai_q6_spdif_format_put)
+};
+
+
+static int msm_dai_q6_spdif_hw_params(struct snd_pcm_substream *substream,
+		struct snd_pcm_hw_params *params,
+		struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_spdif_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+	dai->id = AFE_PORT_ID_SPDIF_RX;
+	dai_data->channels = params_channels(params);
+	dai_data->spdif_port.cfg.num_channels = dai_data->channels;
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		dai_data->spdif_port.cfg.bit_width = 16;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+	case SNDRV_PCM_FORMAT_S24_3LE:
+		dai_data->spdif_port.cfg.bit_width = 24;
+		break;
+	default:
+		pr_err("%s: format %d\n",
+			__func__, params_format(params));
+		return -EINVAL;
+	}
+
+	dai_data->rate = params_rate(params);
+	dai_data->bitwidth = dai_data->spdif_port.cfg.bit_width;
+	dai_data->spdif_port.cfg.sample_rate = dai_data->rate;
+	dai_data->spdif_port.cfg.spdif_cfg_minor_version =
+		AFE_API_VERSION_SPDIF_CONFIG;
+	dev_dbg(dai->dev, " channel %d sample rate %d bit width %d\n",
+			dai_data->channels, dai_data->rate,
+			dai_data->spdif_port.cfg.bit_width);
+	dai_data->spdif_port.cfg.reserved = 0;
+	return 0;
+}
+
+static void msm_dai_q6_spdif_shutdown(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_spdif_dai_data *dai_data = dev_get_drvdata(dai->dev);
+	int rc = 0;
+
+	if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+		pr_info("%s:  afe port not started. dai_data->status_mask = %ld\n",
+				__func__, *dai_data->status_mask);
+		return;
+	}
+
+	rc = afe_close(dai->id);
+
+	if (IS_ERR_VALUE(rc))
+		dev_err(dai->dev, "fail to close AFE port\n");
+
+	pr_debug("%s: dai_data->status_mask = %ld\n", __func__,
+			*dai_data->status_mask);
+
+	clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
+}
+
+
+static int msm_dai_q6_spdif_prepare(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_spdif_dai_data *dai_data = dev_get_drvdata(dai->dev);
+	int rc = 0;
+
+	if (IS_ERR_VALUE(rc)) {
+		dev_err(dai->dev, "%s: clk_config failed", __func__);
+		return rc;
+	}
+	if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+		rc = afe_spdif_port_start(dai->id, &dai_data->spdif_port,
+				dai_data->rate);
+		if (IS_ERR_VALUE(rc))
+			dev_err(dai->dev, "fail to open AFE port 0x%x\n",
+					dai->id);
+		else
+			set_bit(STATUS_PORT_STARTED,
+					dai_data->status_mask);
+	}
+
+	return rc;
+}
+
+static int msm_dai_q6_spdif_dai_probe(struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_spdif_dai_data *dai_data;
+	const struct snd_kcontrol_new *kcontrol;
+	int rc = 0;
+	struct snd_soc_dapm_route intercon;
+	struct snd_soc_dapm_context *dapm;
+
+	if (!dai) {
+		pr_err("%s: dai not found!!\n", __func__);
+		return -EINVAL;
+	}
+	dai_data = kzalloc(sizeof(struct msm_dai_q6_spdif_dai_data),
+			GFP_KERNEL);
+
+	if (!dai_data) {
+		dev_err(dai->dev, "DAI-%d: fail to allocate dai data\n",
+				AFE_PORT_ID_SPDIF_RX);
+		rc = -ENOMEM;
+	} else
+		dev_set_drvdata(dai->dev, dai_data);
+
+	kcontrol = &spdif_config_controls[1];
+	dapm = snd_soc_component_get_dapm(dai->component);
+
+	rc = snd_ctl_add(dai->component->card->snd_card,
+			snd_ctl_new1(kcontrol, dai_data));
+
+	memset(&intercon, 0 , sizeof(intercon));
+	if (!rc && dai && dai->driver) {
+		if (dai->driver->playback.stream_name &&
+				dai->driver->playback.aif_name) {
+			dev_dbg(dai->dev, "%s: add route for widget %s",
+				__func__, dai->driver->playback.stream_name);
+			intercon.source = dai->driver->playback.aif_name;
+			intercon.sink = dai->driver->playback.stream_name;
+			dev_dbg(dai->dev, "%s: src %s sink %s\n",
+				__func__, intercon.source, intercon.sink);
+			snd_soc_dapm_add_routes(dapm, &intercon, 1);
+		}
+		if (dai->driver->capture.stream_name &&
+				dai->driver->capture.aif_name) {
+			dev_dbg(dai->dev, "%s: add route for widget %s",
+				__func__, dai->driver->capture.stream_name);
+			intercon.sink = dai->driver->capture.aif_name;
+			intercon.source = dai->driver->capture.stream_name;
+			dev_dbg(dai->dev, "%s: src %s sink %s\n",
+				__func__, intercon.source, intercon.sink);
+			snd_soc_dapm_add_routes(dapm, &intercon, 1);
+		}
+	}
+	return rc;
+}
+
+static int msm_dai_q6_spdif_dai_remove(struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_spdif_dai_data *dai_data;
+	int rc;
+
+	dai_data = dev_get_drvdata(dai->dev);
+
+	/* If AFE port is still up, close it */
+	if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+		rc = afe_close(dai->id); /* can block */
+
+		if (IS_ERR_VALUE(rc))
+			dev_err(dai->dev, "fail to close AFE port\n");
+
+		clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
+	}
+	kfree(dai_data);
+
+	return 0;
+}
+
+
+static struct snd_soc_dai_ops msm_dai_q6_spdif_ops = {
+	.prepare	= msm_dai_q6_spdif_prepare,
+	.hw_params	= msm_dai_q6_spdif_hw_params,
+	.shutdown	= msm_dai_q6_spdif_shutdown,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_spdif_spdif_rx_dai = {
+	.playback = {
+		.stream_name = "SPDIF Playback",
+		.aif_name = "SPDIF_RX",
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+			SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+		.channels_min = 1,
+		.channels_max = 4,
+		.rate_min = 8000,
+		.rate_max = 48000,
+	},
+	.ops = &msm_dai_q6_spdif_ops,
+	.probe = msm_dai_q6_spdif_dai_probe,
+	.remove = msm_dai_q6_spdif_dai_remove,
+};
+
+static const struct snd_soc_component_driver msm_dai_spdif_q6_component = {
+	.name		= "msm-dai-q6-spdif",
+};
+
+static int msm_dai_q6_prepare(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+	int rc = 0;
+
+	if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+		if (dai_data->enc_config.format != ENC_FMT_NONE) {
+			int bitwidth = 0;
+
+			if (dai_data->afe_in_bitformat ==
+			    SNDRV_PCM_FORMAT_S24_LE)
+				bitwidth = 24;
+			else if (dai_data->afe_in_bitformat ==
+				 SNDRV_PCM_FORMAT_S16_LE)
+				bitwidth = 16;
+			pr_debug("%s: calling AFE_PORT_START_V2 with enc_format: %d\n",
+				 __func__, dai_data->enc_config.format);
+			rc = afe_port_start_v2(dai->id, &dai_data->port_config,
+					       dai_data->rate,
+					       dai_data->afe_in_channels,
+					       bitwidth,
+					       &dai_data->enc_config);
+			if (rc < 0)
+				pr_err("%s: afe_port_start_v2 failed error: %d\n",
+					__func__, rc);
+		} else {
+			rc = afe_port_start(dai->id, &dai_data->port_config,
+						dai_data->rate);
+		}
+		if (IS_ERR_VALUE(rc))
+			dev_err(dai->dev, "fail to open AFE port 0x%x\n",
+				dai->id);
+		else
+			set_bit(STATUS_PORT_STARTED,
+				dai_data->status_mask);
+	}
+	return rc;
+}
+
+static int msm_dai_q6_cdc_hw_params(struct snd_pcm_hw_params *params,
+				    struct snd_soc_dai *dai, int stream)
+{
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+	dai_data->channels = params_channels(params);
+	switch (dai_data->channels) {
+	case 2:
+		dai_data->port_config.i2s.mono_stereo = MSM_AFE_STEREO;
+		break;
+	case 1:
+		dai_data->port_config.i2s.mono_stereo = MSM_AFE_MONO;
+		break;
+	default:
+		return -EINVAL;
+		pr_err("%s: err channels %d\n",
+			__func__, dai_data->channels);
+		break;
+	}
+
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+	case SNDRV_PCM_FORMAT_SPECIAL:
+		dai_data->port_config.i2s.bit_width = 16;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+	case SNDRV_PCM_FORMAT_S24_3LE:
+		dai_data->port_config.i2s.bit_width = 24;
+		break;
+	default:
+		pr_err("%s: format %d\n",
+			__func__, params_format(params));
+		return -EINVAL;
+	}
+
+	dai_data->rate = params_rate(params);
+	dai_data->port_config.i2s.sample_rate = dai_data->rate;
+	dai_data->port_config.i2s.i2s_cfg_minor_version =
+						AFE_API_VERSION_I2S_CONFIG;
+	dai_data->port_config.i2s.data_format =  AFE_LINEAR_PCM_DATA;
+	dev_dbg(dai->dev, " channel %d sample rate %d entered\n",
+	dai_data->channels, dai_data->rate);
+
+	dai_data->port_config.i2s.channel_mode = 1;
+	return 0;
+}
+
+static u8 num_of_bits_set(u8 sd_line_mask)
+{
+	u8 num_bits_set = 0;
+
+	while (sd_line_mask) {
+		num_bits_set++;
+		sd_line_mask = sd_line_mask & (sd_line_mask - 1);
+	}
+	return num_bits_set;
+}
+
+static int msm_dai_q6_i2s_hw_params(struct snd_pcm_hw_params *params,
+				    struct snd_soc_dai *dai, int stream)
+{
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+	struct msm_i2s_data *i2s_pdata =
+			(struct msm_i2s_data *) dai->dev->platform_data;
+
+	dai_data->channels = params_channels(params);
+	if (num_of_bits_set(i2s_pdata->sd_lines) == 1) {
+		switch (dai_data->channels) {
+		case 2:
+			dai_data->port_config.i2s.mono_stereo = MSM_AFE_STEREO;
+			break;
+		case 1:
+			dai_data->port_config.i2s.mono_stereo = MSM_AFE_MONO;
+			break;
+		default:
+			pr_warn("%s: greater than stereo has not been validated %d",
+				__func__, dai_data->channels);
+			break;
+		}
+	}
+	dai_data->rate = params_rate(params);
+	dai_data->port_config.i2s.sample_rate = dai_data->rate;
+	dai_data->port_config.i2s.i2s_cfg_minor_version =
+						AFE_API_VERSION_I2S_CONFIG;
+	dai_data->port_config.i2s.data_format =  AFE_LINEAR_PCM_DATA;
+	/* Q6 only supports 16 as now */
+	dai_data->port_config.i2s.bit_width = 16;
+	dai_data->port_config.i2s.channel_mode = 1;
+
+	return 0;
+}
+
+static int msm_dai_q6_slim_bus_hw_params(struct snd_pcm_hw_params *params,
+				    struct snd_soc_dai *dai, int stream)
+{
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+	dai_data->channels = params_channels(params);
+	dai_data->rate = params_rate(params);
+
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+	case SNDRV_PCM_FORMAT_SPECIAL:
+		dai_data->port_config.slim_sch.bit_width = 16;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+	case SNDRV_PCM_FORMAT_S24_3LE:
+		dai_data->port_config.slim_sch.bit_width = 24;
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		dai_data->port_config.slim_sch.bit_width = 32;
+		break;
+	default:
+		pr_err("%s: format %d\n",
+			__func__, params_format(params));
+		return -EINVAL;
+	}
+
+	dai_data->port_config.slim_sch.sb_cfg_minor_version =
+				AFE_API_VERSION_SLIMBUS_CONFIG;
+	dai_data->port_config.slim_sch.sample_rate = dai_data->rate;
+	dai_data->port_config.slim_sch.num_channels = dai_data->channels;
+
+	switch (dai->id) {
+	case SLIMBUS_7_RX:
+	case SLIMBUS_7_TX:
+	case SLIMBUS_8_RX:
+	case SLIMBUS_8_TX:
+		dai_data->port_config.slim_sch.slimbus_dev_id =
+			AFE_SLIMBUS_DEVICE_2;
+		break;
+	default:
+		dai_data->port_config.slim_sch.slimbus_dev_id =
+			AFE_SLIMBUS_DEVICE_1;
+		break;
+	}
+
+	dev_dbg(dai->dev, "%s:slimbus_dev_id[%hu] bit_wd[%hu] format[%hu]\n"
+		"num_channel %hu  shared_ch_mapping[0]  %hu\n"
+		"slave_port_mapping[1]  %hu slave_port_mapping[2]  %hu\n"
+		"sample_rate %d\n", __func__,
+		dai_data->port_config.slim_sch.slimbus_dev_id,
+		dai_data->port_config.slim_sch.bit_width,
+		dai_data->port_config.slim_sch.data_format,
+		dai_data->port_config.slim_sch.num_channels,
+		dai_data->port_config.slim_sch.shared_ch_mapping[0],
+		dai_data->port_config.slim_sch.shared_ch_mapping[1],
+		dai_data->port_config.slim_sch.shared_ch_mapping[2],
+		dai_data->rate);
+
+	return 0;
+}
+
+static int msm_dai_q6_usb_audio_hw_params(struct snd_pcm_hw_params *params,
+					  struct snd_soc_dai *dai, int stream)
+{
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+	dai_data->channels = params_channels(params);
+	dai_data->rate = params_rate(params);
+
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+	case SNDRV_PCM_FORMAT_SPECIAL:
+		dai_data->port_config.usb_audio.bit_width = 16;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+	case SNDRV_PCM_FORMAT_S24_3LE:
+		dai_data->port_config.usb_audio.bit_width = 24;
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		dai_data->port_config.usb_audio.bit_width = 32;
+		break;
+
+	default:
+		dev_err(dai->dev, "%s: invalid format %d\n",
+			__func__, params_format(params));
+		return -EINVAL;
+	}
+	dai_data->port_config.usb_audio.cfg_minor_version =
+					AFE_API_MINIOR_VERSION_USB_AUDIO_CONFIG;
+	dai_data->port_config.usb_audio.num_channels = dai_data->channels;
+	dai_data->port_config.usb_audio.sample_rate = dai_data->rate;
+
+	dev_dbg(dai->dev, "%s: dev_id[0x%x] bit_wd[%hu] format[%hu]\n"
+		"num_channel %hu  sample_rate %d\n", __func__,
+		dai_data->port_config.usb_audio.dev_token,
+		dai_data->port_config.usb_audio.bit_width,
+		dai_data->port_config.usb_audio.data_format,
+		dai_data->port_config.usb_audio.num_channels,
+		dai_data->port_config.usb_audio.sample_rate);
+
+	return 0;
+}
+
+static int msm_dai_q6_bt_fm_hw_params(struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai, int stream)
+{
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+	dai_data->channels = params_channels(params);
+	dai_data->rate = params_rate(params);
+
+	dev_dbg(dai->dev, "channels %d sample rate %d entered\n",
+		dai_data->channels, dai_data->rate);
+
+	memset(&dai_data->port_config, 0, sizeof(dai_data->port_config));
+
+	pr_debug("%s: setting bt_fm parameters\n", __func__);
+
+	dai_data->port_config.int_bt_fm.bt_fm_cfg_minor_version =
+					AFE_API_VERSION_INTERNAL_BT_FM_CONFIG;
+	dai_data->port_config.int_bt_fm.num_channels = dai_data->channels;
+	dai_data->port_config.int_bt_fm.sample_rate = dai_data->rate;
+	dai_data->port_config.int_bt_fm.bit_width = 16;
+
+	return 0;
+}
+
+static int msm_dai_q6_afe_rtproxy_hw_params(struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+	dai_data->rate = params_rate(params);
+	dai_data->port_config.rtproxy.num_channels = params_channels(params);
+	dai_data->port_config.rtproxy.sample_rate = params_rate(params);
+
+	pr_debug("channel %d entered,dai_id: %d,rate: %d\n",
+	dai_data->port_config.rtproxy.num_channels, dai->id, dai_data->rate);
+
+	dai_data->port_config.rtproxy.rt_proxy_cfg_minor_version =
+				AFE_API_VERSION_RT_PROXY_CONFIG;
+	dai_data->port_config.rtproxy.bit_width = 16; /* Q6 only supports 16 */
+	dai_data->port_config.rtproxy.interleaved = 1;
+	dai_data->port_config.rtproxy.frame_size = params_period_bytes(params);
+	dai_data->port_config.rtproxy.jitter_allowance =
+				dai_data->port_config.rtproxy.frame_size/2;
+	dai_data->port_config.rtproxy.low_water_mark = 0;
+	dai_data->port_config.rtproxy.high_water_mark = 0;
+
+	return 0;
+}
+
+static int msm_dai_q6_psuedo_port_hw_params(struct snd_pcm_hw_params *params,
+				    struct snd_soc_dai *dai, int stream)
+{
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+	dai_data->channels = params_channels(params);
+	dai_data->rate = params_rate(params);
+
+	/* Q6 only supports 16 as now */
+	dai_data->port_config.pseudo_port.pseud_port_cfg_minor_version =
+				AFE_API_VERSION_PSEUDO_PORT_CONFIG;
+	dai_data->port_config.pseudo_port.num_channels =
+				params_channels(params);
+	dai_data->port_config.pseudo_port.bit_width = 16;
+	dai_data->port_config.pseudo_port.data_format = 0;
+	dai_data->port_config.pseudo_port.timing_mode =
+				AFE_PSEUDOPORT_TIMING_MODE_TIMER;
+	dai_data->port_config.pseudo_port.sample_rate = params_rate(params);
+
+	dev_dbg(dai->dev, "%s: bit_wd[%hu] num_channels [%hu] format[%hu]\n"
+		"timing Mode %hu sample_rate %d\n", __func__,
+		dai_data->port_config.pseudo_port.bit_width,
+		dai_data->port_config.pseudo_port.num_channels,
+		dai_data->port_config.pseudo_port.data_format,
+		dai_data->port_config.pseudo_port.timing_mode,
+		dai_data->port_config.pseudo_port.sample_rate);
+
+	return 0;
+}
+
+/* Current implementation assumes hw_param is called once
+ * This may not be the case but what to do when ADM and AFE
+ * port are already opened and parameter changes
+ */
+static int msm_dai_q6_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai)
+{
+	int rc = 0;
+
+	switch (dai->id) {
+	case PRIMARY_I2S_TX:
+	case PRIMARY_I2S_RX:
+	case SECONDARY_I2S_RX:
+		rc = msm_dai_q6_cdc_hw_params(params, dai, substream->stream);
+		break;
+	case MI2S_RX:
+		rc = msm_dai_q6_i2s_hw_params(params, dai, substream->stream);
+		break;
+	case SLIMBUS_0_RX:
+	case SLIMBUS_1_RX:
+	case SLIMBUS_2_RX:
+	case SLIMBUS_3_RX:
+	case SLIMBUS_4_RX:
+	case SLIMBUS_5_RX:
+	case SLIMBUS_6_RX:
+	case SLIMBUS_7_RX:
+	case SLIMBUS_8_RX:
+	case SLIMBUS_0_TX:
+	case SLIMBUS_1_TX:
+	case SLIMBUS_2_TX:
+	case SLIMBUS_3_TX:
+	case SLIMBUS_4_TX:
+	case SLIMBUS_5_TX:
+	case SLIMBUS_6_TX:
+	case SLIMBUS_7_TX:
+	case SLIMBUS_8_TX:
+		rc = msm_dai_q6_slim_bus_hw_params(params, dai,
+				substream->stream);
+		break;
+	case INT_BT_SCO_RX:
+	case INT_BT_SCO_TX:
+	case INT_BT_A2DP_RX:
+	case INT_FM_RX:
+	case INT_FM_TX:
+		rc = msm_dai_q6_bt_fm_hw_params(params, dai, substream->stream);
+		break;
+	case AFE_PORT_ID_USB_RX:
+	case AFE_PORT_ID_USB_TX:
+		rc = msm_dai_q6_usb_audio_hw_params(params, dai,
+						    substream->stream);
+		break;
+	case RT_PROXY_DAI_001_TX:
+	case RT_PROXY_DAI_001_RX:
+	case RT_PROXY_DAI_002_TX:
+	case RT_PROXY_DAI_002_RX:
+		rc = msm_dai_q6_afe_rtproxy_hw_params(params, dai);
+		break;
+	case VOICE_PLAYBACK_TX:
+	case VOICE2_PLAYBACK_TX:
+	case VOICE_RECORD_RX:
+	case VOICE_RECORD_TX:
+		rc = msm_dai_q6_psuedo_port_hw_params(params,
+						dai, substream->stream);
+		break;
+	default:
+		dev_err(dai->dev, "invalid AFE port ID 0x%x\n", dai->id);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static void msm_dai_q6_shutdown(struct snd_pcm_substream *substream,
+				struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+	int rc = 0;
+
+	if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+		pr_debug("%s: stop pseudo port:%d\n", __func__,  dai->id);
+		rc = afe_close(dai->id); /* can block */
+
+		if (IS_ERR_VALUE(rc))
+			dev_err(dai->dev, "fail to close AFE port\n");
+		pr_debug("%s: dai_data->status_mask = %ld\n", __func__,
+			*dai_data->status_mask);
+		clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
+	}
+}
+
+static int msm_dai_q6_cdc_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBS_CFS:
+		dai_data->port_config.i2s.ws_src = 1; /* CPU is master */
+		break;
+	case SND_SOC_DAIFMT_CBM_CFM:
+		dai_data->port_config.i2s.ws_src = 0; /* CPU is slave */
+		break;
+	default:
+		pr_err("%s: fmt 0x%x\n",
+			__func__, fmt & SND_SOC_DAIFMT_MASTER_MASK);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int msm_dai_q6_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+	int rc = 0;
+
+	dev_dbg(dai->dev, "%s: id = %d fmt[%d]\n", __func__,
+							dai->id, fmt);
+	switch (dai->id) {
+	case PRIMARY_I2S_TX:
+	case PRIMARY_I2S_RX:
+	case MI2S_RX:
+	case SECONDARY_I2S_RX:
+		rc = msm_dai_q6_cdc_set_fmt(dai, fmt);
+		break;
+	default:
+		dev_err(dai->dev, "invalid cpu_dai id 0x%x\n", dai->id);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static int msm_dai_q6_set_channel_map(struct snd_soc_dai *dai,
+				unsigned int tx_num, unsigned int *tx_slot,
+				unsigned int rx_num, unsigned int *rx_slot)
+
+{
+	int rc = 0;
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+	unsigned int i = 0;
+
+	dev_dbg(dai->dev, "%s: id = %d\n", __func__, dai->id);
+	switch (dai->id) {
+	case SLIMBUS_0_RX:
+	case SLIMBUS_1_RX:
+	case SLIMBUS_2_RX:
+	case SLIMBUS_3_RX:
+	case SLIMBUS_4_RX:
+	case SLIMBUS_5_RX:
+	case SLIMBUS_6_RX:
+	case SLIMBUS_7_RX:
+	case SLIMBUS_8_RX:
+		/*
+		 * channel number to be between 128 and 255.
+		 * For RX port use channel numbers
+		 * from 138 to 144 for pre-Taiko
+		 * from 144 to 159 for Taiko
+		 */
+		if (!rx_slot) {
+			pr_err("%s: rx slot not found\n", __func__);
+			return -EINVAL;
+		}
+		if (rx_num > AFE_PORT_MAX_AUDIO_CHAN_CNT) {
+			pr_err("%s: invalid rx num %d\n", __func__, rx_num);
+			return -EINVAL;
+		}
+
+		for (i = 0; i < rx_num; i++) {
+			dai_data->port_config.slim_sch.shared_ch_mapping[i] =
+			    rx_slot[i];
+			pr_debug("%s: find number of channels[%d] ch[%d]\n",
+			       __func__, i, rx_slot[i]);
+		}
+		dai_data->port_config.slim_sch.num_channels = rx_num;
+		pr_debug("%s: SLIMBUS_%d_RX cnt[%d] ch[%d %d]\n", __func__,
+			(dai->id - SLIMBUS_0_RX) / 2, rx_num,
+			dai_data->port_config.slim_sch.shared_ch_mapping[0],
+			dai_data->port_config.slim_sch.shared_ch_mapping[1]);
+
+		break;
+	case SLIMBUS_0_TX:
+	case SLIMBUS_1_TX:
+	case SLIMBUS_2_TX:
+	case SLIMBUS_3_TX:
+	case SLIMBUS_4_TX:
+	case SLIMBUS_5_TX:
+	case SLIMBUS_6_TX:
+	case SLIMBUS_7_TX:
+	case SLIMBUS_8_TX:
+		/*
+		 * channel number to be between 128 and 255.
+		 * For TX port use channel numbers
+		 * from 128 to 137 for pre-Taiko
+		 * from 128 to 143 for Taiko
+		 */
+		if (!tx_slot) {
+			pr_err("%s: tx slot not found\n", __func__);
+			return -EINVAL;
+		}
+		if (tx_num > AFE_PORT_MAX_AUDIO_CHAN_CNT) {
+			pr_err("%s: invalid tx num %d\n", __func__, tx_num);
+			return -EINVAL;
+		}
+
+		for (i = 0; i < tx_num; i++) {
+			dai_data->port_config.slim_sch.shared_ch_mapping[i] =
+			    tx_slot[i];
+			pr_debug("%s: find number of channels[%d] ch[%d]\n",
+				 __func__, i, tx_slot[i]);
+		}
+		dai_data->port_config.slim_sch.num_channels = tx_num;
+		pr_debug("%s:SLIMBUS_%d_TX cnt[%d] ch[%d %d]\n", __func__,
+			(dai->id - SLIMBUS_0_TX) / 2, tx_num,
+			dai_data->port_config.slim_sch.shared_ch_mapping[0],
+			dai_data->port_config.slim_sch.shared_ch_mapping[1]);
+		break;
+	default:
+		dev_err(dai->dev, "invalid cpu_dai id 0x%x\n", dai->id);
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+static struct snd_soc_dai_ops msm_dai_q6_ops = {
+	.prepare	= msm_dai_q6_prepare,
+	.hw_params	= msm_dai_q6_hw_params,
+	.shutdown	= msm_dai_q6_shutdown,
+	.set_fmt	= msm_dai_q6_set_fmt,
+	.set_channel_map = msm_dai_q6_set_channel_map,
+};
+
+/*
+ * For single CPU DAI registration, the dai id needs to be
+ * set explicitly in the dai probe as ASoC does not read
+ * the cpu->driver->id field rather it assigns the dai id
+ * from the device name that is in the form %s.%d. This dai
+ * id should be assigned to back-end AFE port id and used
+ * during dai prepare. For multiple dai registration, it
+ * is not required to call this function, however the dai->
+ * driver->id field must be defined and set to corresponding
+ * AFE Port id.
+ */
+static inline void msm_dai_q6_set_dai_id(struct snd_soc_dai *dai)
+{
+	if (!dai->driver->id) {
+		dev_warn(dai->dev, "DAI driver id is not set\n");
+		return;
+	}
+	dai->id = dai->driver->id;
+	return;
+}
+
+static int msm_dai_q6_cal_info_put(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_dai_data *dai_data = kcontrol->private_data;
+	u16 port_id = ((struct soc_enum *)
+					kcontrol->private_value)->reg;
+
+	dai_data->cal_mode = ucontrol->value.integer.value[0];
+	pr_debug("%s: setting cal_mode to %d\n",
+		__func__, dai_data->cal_mode);
+	afe_set_cal_mode(port_id, dai_data->cal_mode);
+
+	return 0;
+}
+
+static int msm_dai_q6_cal_info_get(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_dai_data *dai_data = kcontrol->private_data;
+
+	ucontrol->value.integer.value[0] = dai_data->cal_mode;
+	return 0;
+}
+
+static int msm_dai_q6_sb_format_put(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_dai_data *dai_data = kcontrol->private_data;
+	int value = ucontrol->value.integer.value[0];
+
+	if (dai_data) {
+		dai_data->port_config.slim_sch.data_format = value;
+		pr_debug("%s: format = %d\n",  __func__, value);
+	}
+
+	return 0;
+}
+
+static int msm_dai_q6_sb_format_get(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_dai_data *dai_data = kcontrol->private_data;
+
+	if (dai_data)
+		ucontrol->value.integer.value[0] =
+			dai_data->port_config.slim_sch.data_format;
+
+	return 0;
+}
+
+static int msm_dai_q6_usb_audio_cfg_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_dai_data *dai_data = kcontrol->private_data;
+	u32 val = ucontrol->value.integer.value[0];
+
+	if (dai_data) {
+		dai_data->port_config.usb_audio.dev_token = val;
+		pr_debug("%s: dev_token = 0x%x\n",  __func__,
+				 dai_data->port_config.usb_audio.dev_token);
+	} else {
+		pr_err("%s: dai_data is NULL\n", __func__);
+	}
+
+	return 0;
+}
+
+static int msm_dai_q6_usb_audio_cfg_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_dai_data *dai_data = kcontrol->private_data;
+
+	if (dai_data) {
+		ucontrol->value.integer.value[0] =
+			 dai_data->port_config.usb_audio.dev_token;
+		pr_debug("%s: dev_token = 0x%x\n",  __func__,
+				 dai_data->port_config.usb_audio.dev_token);
+	} else {
+		pr_err("%s: dai_data is NULL\n", __func__);
+	}
+
+	return 0;
+}
+
+static int msm_dai_q6_usb_audio_endian_cfg_put(struct snd_kcontrol *kcontrol,
+					 struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_dai_data *dai_data = kcontrol->private_data;
+	u32 val = ucontrol->value.integer.value[0];
+
+	if (dai_data) {
+		dai_data->port_config.usb_audio.endian = val;
+		pr_debug("%s: endian = 0x%x\n",  __func__,
+				 dai_data->port_config.usb_audio.endian);
+	} else {
+		pr_err("%s: dai_data is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int msm_dai_q6_usb_audio_endian_cfg_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_dai_data *dai_data = kcontrol->private_data;
+
+	if (dai_data) {
+		ucontrol->value.integer.value[0] =
+			 dai_data->port_config.usb_audio.endian;
+		pr_debug("%s: endian = 0x%x\n",  __func__,
+				 dai_data->port_config.usb_audio.endian);
+	} else {
+		pr_err("%s: dai_data is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int  msm_dai_q6_afe_enc_cfg_info(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+	uinfo->count = sizeof(struct afe_enc_config);
+
+	return 0;
+}
+
+static int msm_dai_q6_afe_enc_cfg_get(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = 0;
+	struct msm_dai_q6_dai_data *dai_data = kcontrol->private_data;
+
+	if (dai_data) {
+		int format_size = sizeof(dai_data->enc_config.format);
+
+		pr_debug("%s:encoder config for %d format\n",
+			 __func__, dai_data->enc_config.format);
+		memcpy(ucontrol->value.bytes.data,
+			&dai_data->enc_config.format,
+			format_size);
+		switch (dai_data->enc_config.format) {
+		case ENC_FMT_SBC:
+			memcpy(ucontrol->value.bytes.data + format_size,
+				&dai_data->enc_config.data,
+				sizeof(struct asm_sbc_enc_cfg_t));
+			break;
+		case ENC_FMT_AAC_V2:
+			memcpy(ucontrol->value.bytes.data + format_size,
+				&dai_data->enc_config.data,
+				sizeof(struct asm_aac_enc_cfg_v2_t));
+			break;
+		case ENC_FMT_APTX:
+		case ENC_FMT_APTX_HD:
+			memcpy(ucontrol->value.bytes.data + format_size,
+				&dai_data->enc_config.data,
+				sizeof(struct asm_aac_enc_cfg_v2_t));
+			break;
+		default:
+			pr_debug("%s: unknown format = %d\n",
+				 __func__, dai_data->enc_config.format);
+			ret = -EINVAL;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static int msm_dai_q6_afe_enc_cfg_put(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = 0;
+	struct msm_dai_q6_dai_data *dai_data = kcontrol->private_data;
+
+	if (dai_data) {
+		int format_size = sizeof(dai_data->enc_config.format);
+
+		memset(&dai_data->enc_config, 0x0,
+			sizeof(struct afe_enc_config));
+		memcpy(&dai_data->enc_config.format,
+			ucontrol->value.bytes.data,
+			format_size);
+		pr_debug("%s: Received encoder config for %d format\n",
+			 __func__, dai_data->enc_config.format);
+		switch (dai_data->enc_config.format) {
+		case ENC_FMT_SBC:
+			memcpy(&dai_data->enc_config.data,
+				ucontrol->value.bytes.data + format_size,
+				sizeof(struct asm_sbc_enc_cfg_t));
+			break;
+		case ENC_FMT_AAC_V2:
+			memcpy(&dai_data->enc_config.data,
+				ucontrol->value.bytes.data + format_size,
+				sizeof(struct asm_aac_enc_cfg_v2_t));
+			break;
+		case ENC_FMT_APTX:
+		case ENC_FMT_APTX_HD:
+			memcpy(&dai_data->enc_config.data,
+				ucontrol->value.bytes.data + format_size,
+				sizeof(struct asm_custom_enc_cfg_aptx_t));
+			break;
+		default:
+			pr_debug("%s: Ignore enc config for unknown format = %d\n",
+				 __func__, dai_data->enc_config.format);
+			ret = -EINVAL;
+			break;
+		}
+	} else
+		ret = -EINVAL;
+
+	return ret;
+}
+
+static const char *const afe_input_chs_text[] = {"Zero", "One", "Two"};
+
+static const struct soc_enum afe_input_chs_enum[] = {
+	SOC_ENUM_SINGLE_EXT(3, afe_input_chs_text),
+};
+
+static const char *const afe_input_bit_format_text[] = {"S16_LE", "S24_LE"};
+
+static const struct soc_enum afe_input_bit_format_enum[] = {
+	SOC_ENUM_SINGLE_EXT(2, afe_input_bit_format_text),
+};
+
+static int msm_dai_q6_afe_input_channel_get(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_dai_data *dai_data = kcontrol->private_data;
+
+	if (dai_data) {
+		ucontrol->value.integer.value[0] = dai_data->afe_in_channels;
+		pr_debug("%s:afe input channel = %d\n",
+			  __func__, dai_data->afe_in_channels);
+	}
+
+	return 0;
+}
+
+static int msm_dai_q6_afe_input_channel_put(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_dai_data *dai_data = kcontrol->private_data;
+
+	if (dai_data) {
+		dai_data->afe_in_channels = ucontrol->value.integer.value[0];
+		pr_debug("%s: updating afe input channel : %d\n",
+			__func__, dai_data->afe_in_channels);
+	}
+
+	return 0;
+}
+
+static int msm_dai_q6_afe_input_bit_format_get(
+			struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_dai_data *dai_data = kcontrol->private_data;
+
+	if (!dai_data) {
+		pr_err("%s: Invalid dai data\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (dai_data->afe_in_bitformat) {
+	case SNDRV_PCM_FORMAT_S24_LE:
+		ucontrol->value.integer.value[0] = 1;
+		break;
+	case SNDRV_PCM_FORMAT_S16_LE:
+	default:
+		ucontrol->value.integer.value[0] = 0;
+		break;
+	}
+	pr_debug("%s: afe input bit format : %ld\n",
+		  __func__, ucontrol->value.integer.value[0]);
+
+	return 0;
+}
+
+static int msm_dai_q6_afe_input_bit_format_put(
+			struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_dai_data *dai_data = kcontrol->private_data;
+
+	if (!dai_data) {
+		pr_err("%s: Invalid dai data\n", __func__);
+		return -EINVAL;
+	}
+	switch (ucontrol->value.integer.value[0]) {
+	case 1:
+		dai_data->afe_in_bitformat = SNDRV_PCM_FORMAT_S24_LE;
+		break;
+	case 0:
+	default:
+		dai_data->afe_in_bitformat = SNDRV_PCM_FORMAT_S16_LE;
+		break;
+	}
+	pr_debug("%s: updating afe input bit format : %d\n",
+		__func__, dai_data->afe_in_bitformat);
+
+	return 0;
+}
+
+
+static const struct snd_kcontrol_new afe_enc_config_controls[] = {
+	{
+		.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
+			   SNDRV_CTL_ELEM_ACCESS_INACTIVE),
+		.iface = SNDRV_CTL_ELEM_IFACE_PCM,
+		.name = "SLIM_7_RX Encoder Config",
+		.info = msm_dai_q6_afe_enc_cfg_info,
+		.get = msm_dai_q6_afe_enc_cfg_get,
+		.put = msm_dai_q6_afe_enc_cfg_put,
+	},
+	SOC_ENUM_EXT("AFE Input Channels", afe_input_chs_enum[0],
+		     msm_dai_q6_afe_input_channel_get,
+		     msm_dai_q6_afe_input_channel_put),
+	SOC_ENUM_EXT("AFE Input Bit Format", afe_input_bit_format_enum[0],
+		     msm_dai_q6_afe_input_bit_format_get,
+		     msm_dai_q6_afe_input_bit_format_put),
+};
+
+static int msm_dai_q6_slim_rx_drift_info(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+	uinfo->count = sizeof(struct afe_param_id_dev_timing_stats);
+
+	return 0;
+}
+
+static int msm_dai_q6_slim_rx_drift_get(struct snd_kcontrol *kcontrol,
+		struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = -EINVAL;
+	struct afe_param_id_dev_timing_stats timing_stats;
+	struct snd_soc_dai *dai = kcontrol->private_data;
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+	if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+		pr_err("%s: afe port not started. dai_data->status_mask = %ld\n",
+			__func__, *dai_data->status_mask);
+		goto done;
+	}
+
+	memset(&timing_stats, 0, sizeof(struct afe_param_id_dev_timing_stats));
+	ret = afe_get_av_dev_drift(&timing_stats, dai->id);
+	if (ret) {
+		pr_err("%s: Error getting AFE Drift for port %d, err=%d\n",
+			__func__, dai->id, ret);
+
+		goto done;
+	}
+
+	memcpy(ucontrol->value.bytes.data, (void *)&timing_stats,
+			sizeof(struct afe_param_id_dev_timing_stats));
+done:
+	return ret;
+}
+
+static const char * const afe_cal_mode_text[] = {
+	"CAL_MODE_DEFAULT", "CAL_MODE_NONE"
+};
+
+static const struct soc_enum slim_2_rx_enum =
+	SOC_ENUM_SINGLE(SLIMBUS_2_RX, 0, ARRAY_SIZE(afe_cal_mode_text),
+			afe_cal_mode_text);
+
+static const struct soc_enum rt_proxy_1_rx_enum =
+	SOC_ENUM_SINGLE(RT_PROXY_PORT_001_RX, 0, ARRAY_SIZE(afe_cal_mode_text),
+			afe_cal_mode_text);
+
+static const struct soc_enum rt_proxy_1_tx_enum =
+	SOC_ENUM_SINGLE(RT_PROXY_PORT_001_TX, 0, ARRAY_SIZE(afe_cal_mode_text),
+			afe_cal_mode_text);
+
+static const struct snd_kcontrol_new sb_config_controls[] = {
+	SOC_ENUM_EXT("SLIM_4_TX Format", sb_config_enum[0],
+		     msm_dai_q6_sb_format_get,
+		     msm_dai_q6_sb_format_put),
+	SOC_ENUM_EXT("SLIM_2_RX SetCalMode", slim_2_rx_enum,
+		     msm_dai_q6_cal_info_get,
+		     msm_dai_q6_cal_info_put),
+	SOC_ENUM_EXT("SLIM_2_RX Format", sb_config_enum[0],
+		     msm_dai_q6_sb_format_get,
+		     msm_dai_q6_sb_format_put)
+};
+
+static const struct snd_kcontrol_new rt_proxy_config_controls[] = {
+	SOC_ENUM_EXT("RT_PROXY_1_RX SetCalMode", rt_proxy_1_rx_enum,
+		     msm_dai_q6_cal_info_get,
+		     msm_dai_q6_cal_info_put),
+	SOC_ENUM_EXT("RT_PROXY_1_TX SetCalMode", rt_proxy_1_tx_enum,
+		     msm_dai_q6_cal_info_get,
+		     msm_dai_q6_cal_info_put),
+};
+
+static const struct snd_kcontrol_new usb_audio_cfg_controls[] = {
+	SOC_SINGLE_EXT("USB_AUDIO_RX dev_token", 0, 0, UINT_MAX, 0,
+			msm_dai_q6_usb_audio_cfg_get,
+			msm_dai_q6_usb_audio_cfg_put),
+	SOC_SINGLE_EXT("USB_AUDIO_RX endian", 0, 0, 1, 0,
+			msm_dai_q6_usb_audio_endian_cfg_get,
+			msm_dai_q6_usb_audio_endian_cfg_put),
+	SOC_SINGLE_EXT("USB_AUDIO_TX dev_token", 0, 0, UINT_MAX, 0,
+			msm_dai_q6_usb_audio_cfg_get,
+			msm_dai_q6_usb_audio_cfg_put),
+	SOC_SINGLE_EXT("USB_AUDIO_TX endian", 0, 0, 1, 0,
+			msm_dai_q6_usb_audio_endian_cfg_get,
+			msm_dai_q6_usb_audio_endian_cfg_put),
+};
+
+static const struct snd_kcontrol_new avd_drift_config_controls[] = {
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READ,
+		.iface	= SNDRV_CTL_ELEM_IFACE_PCM,
+		.name	= "SLIMBUS_0_RX DRIFT",
+		.info	= msm_dai_q6_slim_rx_drift_info,
+		.get	= msm_dai_q6_slim_rx_drift_get,
+	},
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READ,
+		.iface	= SNDRV_CTL_ELEM_IFACE_PCM,
+		.name	= "SLIMBUS_6_RX DRIFT",
+		.info	= msm_dai_q6_slim_rx_drift_info,
+		.get	= msm_dai_q6_slim_rx_drift_get,
+	},
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READ,
+		.iface	= SNDRV_CTL_ELEM_IFACE_PCM,
+		.name	= "SLIMBUS_7_RX DRIFT",
+		.info	= msm_dai_q6_slim_rx_drift_info,
+		.get	= msm_dai_q6_slim_rx_drift_get,
+	},
+};
+static int msm_dai_q6_dai_probe(struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_dai_data *dai_data;
+	int rc = 0;
+
+	if (!dai) {
+		pr_err("%s: Invalid params dai\n", __func__);
+		return -EINVAL;
+	}
+	if (!dai->dev) {
+		pr_err("%s: Invalid params dai dev\n", __func__);
+		return -EINVAL;
+	}
+
+	dai_data = kzalloc(sizeof(struct msm_dai_q6_dai_data), GFP_KERNEL);
+
+	if (!dai_data) {
+		dev_err(dai->dev, "DAI-%d: fail to allocate dai data\n",
+		dai->id);
+		rc = -ENOMEM;
+	} else
+		dev_set_drvdata(dai->dev, dai_data);
+
+	msm_dai_q6_set_dai_id(dai);
+
+	switch (dai->id) {
+	case SLIMBUS_4_TX:
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				 snd_ctl_new1(&sb_config_controls[0],
+				 dai_data));
+		break;
+	case SLIMBUS_2_RX:
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				 snd_ctl_new1(&sb_config_controls[1],
+				 dai_data));
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				 snd_ctl_new1(&sb_config_controls[2],
+				 dai_data));
+		break;
+	case SLIMBUS_7_RX:
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				 snd_ctl_new1(&afe_enc_config_controls[0],
+				 dai_data));
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				 snd_ctl_new1(&afe_enc_config_controls[1],
+				 dai_data));
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				 snd_ctl_new1(&afe_enc_config_controls[2],
+				 dai_data));
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				snd_ctl_new1(&avd_drift_config_controls[2],
+					dai));
+		break;
+	case RT_PROXY_DAI_001_RX:
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				 snd_ctl_new1(&rt_proxy_config_controls[0],
+				 dai_data));
+		break;
+	case RT_PROXY_DAI_001_TX:
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				 snd_ctl_new1(&rt_proxy_config_controls[1],
+				 dai_data));
+		break;
+	case AFE_PORT_ID_USB_RX:
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				 snd_ctl_new1(&usb_audio_cfg_controls[0],
+				 dai_data));
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				 snd_ctl_new1(&usb_audio_cfg_controls[1],
+				 dai_data));
+		break;
+	case AFE_PORT_ID_USB_TX:
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				 snd_ctl_new1(&usb_audio_cfg_controls[2],
+				 dai_data));
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				 snd_ctl_new1(&usb_audio_cfg_controls[3],
+				 dai_data));
+		break;
+	case SLIMBUS_0_RX:
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				snd_ctl_new1(&avd_drift_config_controls[0],
+					dai));
+		break;
+	case SLIMBUS_6_RX:
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				snd_ctl_new1(&avd_drift_config_controls[1],
+					dai));
+		break;
+	}
+	if (IS_ERR_VALUE(rc))
+		dev_err(dai->dev, "%s: err add config ctl, DAI = %s\n",
+			__func__, dai->name);
+
+	rc = msm_dai_q6_dai_add_route(dai);
+	return rc;
+}
+
+static int msm_dai_q6_dai_remove(struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_dai_data *dai_data;
+	int rc;
+
+	dai_data = dev_get_drvdata(dai->dev);
+
+	/* If AFE port is still up, close it */
+	if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+		pr_debug("%s: stop pseudo port:%d\n", __func__,  dai->id);
+		rc = afe_close(dai->id); /* can block */
+
+		if (IS_ERR_VALUE(rc))
+			dev_err(dai->dev, "fail to close AFE port\n");
+		clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
+	}
+	kfree(dai_data);
+
+	return 0;
+}
+
+static struct snd_soc_dai_driver msm_dai_q6_afe_rx_dai[] = {
+	{
+		.playback = {
+			.stream_name = "AFE Playback",
+			.aif_name = "PCM_RX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+			SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =     8000,
+			.rate_max =	48000,
+		},
+		.ops = &msm_dai_q6_ops,
+		.id = RT_PROXY_DAI_001_RX,
+		.probe = msm_dai_q6_dai_probe,
+		.remove = msm_dai_q6_dai_remove,
+	},
+	{
+		.playback = {
+			 .stream_name = "AFE-PROXY RX",
+			 .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+			 SNDRV_PCM_RATE_16000,
+			 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+			 .channels_min = 1,
+			 .channels_max = 2,
+			 .rate_min =     8000,
+			 .rate_max =	48000,
+		},
+		.ops = &msm_dai_q6_ops,
+		.id = RT_PROXY_DAI_002_RX,
+		.probe = msm_dai_q6_dai_probe,
+		.remove = msm_dai_q6_dai_remove,
+	},
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_afe_tx_dai[] = {
+	{
+		.capture = {
+			.stream_name = "AFE Capture",
+			.aif_name = "PCM_TX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+			SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =	48000,
+		},
+		.ops = &msm_dai_q6_ops,
+		.id = RT_PROXY_DAI_002_TX,
+		.probe = msm_dai_q6_dai_probe,
+		.remove = msm_dai_q6_dai_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "AFE-PROXY TX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+			SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =	48000,
+		},
+		.ops = &msm_dai_q6_ops,
+		.id = RT_PROXY_DAI_001_TX,
+		.probe = msm_dai_q6_dai_probe,
+		.remove = msm_dai_q6_dai_remove,
+	},
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_bt_sco_rx_dai = {
+	.playback = {
+		.stream_name = "Internal BT-SCO Playback",
+		.aif_name = "INT_BT_SCO_RX",
+		.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		.channels_min = 1,
+		.channels_max = 1,
+		.rate_max = 16000,
+		.rate_min = 8000,
+	},
+	.ops = &msm_dai_q6_ops,
+	.id = INT_BT_SCO_RX,
+	.probe = msm_dai_q6_dai_probe,
+	.remove = msm_dai_q6_dai_remove,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_bt_a2dp_rx_dai = {
+	.playback = {
+		.stream_name = "Internal BT-A2DP Playback",
+		.aif_name = "INT_BT_A2DP_RX",
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		.channels_min = 1,
+		.channels_max = 2,
+		.rate_max = 48000,
+		.rate_min = 48000,
+	},
+	.ops = &msm_dai_q6_ops,
+	.id = INT_BT_A2DP_RX,
+	.probe = msm_dai_q6_dai_probe,
+	.remove = msm_dai_q6_dai_remove,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_bt_sco_tx_dai = {
+	.capture = {
+		.stream_name = "Internal BT-SCO Capture",
+		.aif_name = "INT_BT_SCO_TX",
+		.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		.channels_min = 1,
+		.channels_max = 1,
+		.rate_max = 16000,
+		.rate_min = 8000,
+	},
+	.ops = &msm_dai_q6_ops,
+	.id = INT_BT_SCO_TX,
+	.probe = msm_dai_q6_dai_probe,
+	.remove = msm_dai_q6_dai_remove,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_fm_rx_dai = {
+	.playback = {
+		.stream_name = "Internal FM Playback",
+		.aif_name = "INT_FM_RX",
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+		SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		.channels_min = 2,
+		.channels_max = 2,
+		.rate_max = 48000,
+		.rate_min = 8000,
+	},
+	.ops = &msm_dai_q6_ops,
+	.id = INT_FM_RX,
+	.probe = msm_dai_q6_dai_probe,
+	.remove = msm_dai_q6_dai_remove,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_fm_tx_dai = {
+	.capture = {
+		.stream_name = "Internal FM Capture",
+		.aif_name = "INT_FM_TX",
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+		SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		.channels_min = 2,
+		.channels_max = 2,
+		.rate_max = 48000,
+		.rate_min = 8000,
+	},
+	.ops = &msm_dai_q6_ops,
+	.id = INT_FM_TX,
+	.probe = msm_dai_q6_dai_probe,
+	.remove = msm_dai_q6_dai_remove,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_voc_playback_dai[] = {
+	{
+		.playback = {
+			.stream_name = "Voice Farend Playback",
+			.aif_name = "VOICE_PLAYBACK_TX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+				 SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.ops = &msm_dai_q6_ops,
+		.id = VOICE_PLAYBACK_TX,
+		.probe = msm_dai_q6_dai_probe,
+		.remove = msm_dai_q6_dai_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Voice2 Farend Playback",
+			.aif_name = "VOICE2_PLAYBACK_TX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+				 SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.ops = &msm_dai_q6_ops,
+		.id = VOICE2_PLAYBACK_TX,
+		.probe = msm_dai_q6_dai_probe,
+		.remove = msm_dai_q6_dai_remove,
+	},
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_incall_record_dai[] = {
+	{
+		.capture = {
+			.stream_name = "Voice Uplink Capture",
+			.aif_name = "INCALL_RECORD_TX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+			SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.ops = &msm_dai_q6_ops,
+		.id = VOICE_RECORD_TX,
+		.probe = msm_dai_q6_dai_probe,
+		.remove = msm_dai_q6_dai_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Voice Downlink Capture",
+			.aif_name = "INCALL_RECORD_RX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+			SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.ops = &msm_dai_q6_ops,
+		.id = VOICE_RECORD_RX,
+		.probe = msm_dai_q6_dai_probe,
+		.remove = msm_dai_q6_dai_remove,
+	},
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_usb_rx_dai = {
+	.playback = {
+		.stream_name = "USB Audio Playback",
+		.aif_name = "USB_AUDIO_RX",
+		.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |
+			 SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 |
+			 SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+			 SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |
+			 SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |
+			 SNDRV_PCM_RATE_192000 | SNDRV_PCM_RATE_352800 |
+			 SNDRV_PCM_RATE_384000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
+			   SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE,
+		.channels_min = 1,
+		.channels_max = 8,
+		.rate_max = 384000,
+		.rate_min = 8000,
+	},
+	.ops = &msm_dai_q6_ops,
+	.id = AFE_PORT_ID_USB_RX,
+	.probe = msm_dai_q6_dai_probe,
+	.remove = msm_dai_q6_dai_remove,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_usb_tx_dai = {
+	.capture = {
+		.stream_name = "USB Audio Capture",
+		.aif_name = "USB_AUDIO_TX",
+		.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |
+			 SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 |
+			 SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+			 SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |
+			 SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |
+			 SNDRV_PCM_RATE_192000 | SNDRV_PCM_RATE_352800 |
+			 SNDRV_PCM_RATE_384000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
+			   SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE,
+		.channels_min = 1,
+		.channels_max = 8,
+		.rate_max = 384000,
+		.rate_min = 8000,
+	},
+	.ops = &msm_dai_q6_ops,
+	.id = AFE_PORT_ID_USB_TX,
+	.probe = msm_dai_q6_dai_probe,
+	.remove = msm_dai_q6_dai_remove,
+};
+
+static int msm_auxpcm_dev_probe(struct platform_device *pdev)
+{
+	struct msm_dai_q6_auxpcm_dai_data *dai_data;
+	struct msm_dai_auxpcm_pdata *auxpcm_pdata;
+	uint32_t val_array[RATE_MAX_NUM_OF_AUX_PCM_RATES];
+	uint32_t val = 0;
+	const char *intf_name;
+	int rc = 0, i = 0, len = 0;
+	const uint32_t *slot_mapping_array = NULL;
+	u32 array_length = 0;
+
+	dai_data = kzalloc(sizeof(struct msm_dai_q6_auxpcm_dai_data),
+			   GFP_KERNEL);
+	if (!dai_data) {
+		dev_err(&pdev->dev,
+			"Failed to allocate memory for auxpcm DAI data\n");
+		return -ENOMEM;
+	}
+
+	auxpcm_pdata = kzalloc(sizeof(struct msm_dai_auxpcm_pdata),
+				GFP_KERNEL);
+
+	if (!auxpcm_pdata) {
+		dev_err(&pdev->dev, "Failed to allocate memory for platform data\n");
+		goto fail_pdata_nomem;
+	}
+
+	dev_dbg(&pdev->dev, "%s: dev %pK, dai_data %pK, auxpcm_pdata %pK\n",
+		__func__, &pdev->dev, dai_data, auxpcm_pdata);
+
+	rc = of_property_read_u32_array(pdev->dev.of_node,
+			"qcom,msm-cpudai-auxpcm-mode",
+			val_array, RATE_MAX_NUM_OF_AUX_PCM_RATES);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: qcom,msm-cpudai-auxpcm-mode missing in DT node\n",
+			__func__);
+		goto fail_invalid_dt;
+	}
+	auxpcm_pdata->mode_8k.mode = (u16)val_array[RATE_8KHZ];
+	auxpcm_pdata->mode_16k.mode = (u16)val_array[RATE_16KHZ];
+
+	rc = of_property_read_u32_array(pdev->dev.of_node,
+			"qcom,msm-cpudai-auxpcm-sync",
+			val_array, RATE_MAX_NUM_OF_AUX_PCM_RATES);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: qcom,msm-cpudai-auxpcm-sync missing in DT node\n",
+			__func__);
+		goto fail_invalid_dt;
+	}
+	auxpcm_pdata->mode_8k.sync = (u16)val_array[RATE_8KHZ];
+	auxpcm_pdata->mode_16k.sync = (u16)val_array[RATE_16KHZ];
+
+	rc = of_property_read_u32_array(pdev->dev.of_node,
+			"qcom,msm-cpudai-auxpcm-frame",
+			val_array, RATE_MAX_NUM_OF_AUX_PCM_RATES);
+
+	if (rc) {
+		dev_err(&pdev->dev, "%s: qcom,msm-cpudai-auxpcm-frame missing in DT node\n",
+			__func__);
+		goto fail_invalid_dt;
+	}
+	auxpcm_pdata->mode_8k.frame = (u16)val_array[RATE_8KHZ];
+	auxpcm_pdata->mode_16k.frame = (u16)val_array[RATE_16KHZ];
+
+	rc = of_property_read_u32_array(pdev->dev.of_node,
+			"qcom,msm-cpudai-auxpcm-quant",
+			val_array, RATE_MAX_NUM_OF_AUX_PCM_RATES);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: qcom,msm-cpudai-auxpcm-quant missing in DT node\n",
+			__func__);
+		goto fail_invalid_dt;
+	}
+	auxpcm_pdata->mode_8k.quant = (u16)val_array[RATE_8KHZ];
+	auxpcm_pdata->mode_16k.quant = (u16)val_array[RATE_16KHZ];
+
+	rc = of_property_read_u32_array(pdev->dev.of_node,
+			"qcom,msm-cpudai-auxpcm-num-slots",
+			val_array, RATE_MAX_NUM_OF_AUX_PCM_RATES);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: qcom,msm-cpudai-auxpcm-num-slots missing in DT node\n",
+			__func__);
+		goto fail_invalid_dt;
+	}
+	auxpcm_pdata->mode_8k.num_slots = (u16)val_array[RATE_8KHZ];
+
+	if (auxpcm_pdata->mode_8k.num_slots >
+	    msm_dai_q6_max_num_slot(auxpcm_pdata->mode_8k.frame)) {
+		dev_err(&pdev->dev, "%s Max slots %d greater than DT node %d\n",
+			 __func__,
+			msm_dai_q6_max_num_slot(auxpcm_pdata->mode_8k.frame),
+			auxpcm_pdata->mode_8k.num_slots);
+		rc = -EINVAL;
+		goto fail_invalid_dt;
+	}
+	auxpcm_pdata->mode_16k.num_slots = (u16)val_array[RATE_16KHZ];
+
+	if (auxpcm_pdata->mode_16k.num_slots >
+	    msm_dai_q6_max_num_slot(auxpcm_pdata->mode_16k.frame)) {
+		dev_err(&pdev->dev, "%s Max slots %d greater than DT node %d\n",
+			__func__,
+			msm_dai_q6_max_num_slot(auxpcm_pdata->mode_16k.frame),
+			auxpcm_pdata->mode_16k.num_slots);
+		rc = -EINVAL;
+		goto fail_invalid_dt;
+	}
+
+	slot_mapping_array = of_get_property(pdev->dev.of_node,
+				"qcom,msm-cpudai-auxpcm-slot-mapping", &len);
+
+	if (slot_mapping_array == NULL) {
+		dev_err(&pdev->dev, "%s slot_mapping_array is not valid\n",
+			__func__);
+		rc = -EINVAL;
+		goto fail_invalid_dt;
+	}
+
+	array_length = auxpcm_pdata->mode_8k.num_slots +
+		       auxpcm_pdata->mode_16k.num_slots;
+
+	if (len != sizeof(uint32_t) * array_length) {
+		dev_err(&pdev->dev, "%s Length is %d and expected is %zd\n",
+			__func__, len, sizeof(uint32_t) * array_length);
+		rc = -EINVAL;
+		goto fail_invalid_dt;
+	}
+
+	auxpcm_pdata->mode_8k.slot_mapping =
+					kzalloc(sizeof(uint16_t) *
+					    auxpcm_pdata->mode_8k.num_slots,
+					    GFP_KERNEL);
+	if (!auxpcm_pdata->mode_8k.slot_mapping) {
+		dev_err(&pdev->dev, "%s No mem for mode_8k slot mapping\n",
+			__func__);
+		rc = -ENOMEM;
+		goto fail_invalid_dt;
+	}
+
+	for (i = 0; i < auxpcm_pdata->mode_8k.num_slots; i++)
+		auxpcm_pdata->mode_8k.slot_mapping[i] =
+				(u16)be32_to_cpu(slot_mapping_array[i]);
+
+	auxpcm_pdata->mode_16k.slot_mapping =
+					kzalloc(sizeof(uint16_t) *
+					     auxpcm_pdata->mode_16k.num_slots,
+					     GFP_KERNEL);
+
+	if (!auxpcm_pdata->mode_16k.slot_mapping) {
+		dev_err(&pdev->dev, "%s No mem for mode_16k slot mapping\n",
+			__func__);
+		rc = -ENOMEM;
+		goto fail_invalid_16k_slot_mapping;
+	}
+
+	for (i = 0; i < auxpcm_pdata->mode_16k.num_slots; i++)
+		auxpcm_pdata->mode_16k.slot_mapping[i] =
+			(u16)be32_to_cpu(slot_mapping_array[i +
+					auxpcm_pdata->mode_8k.num_slots]);
+
+	rc = of_property_read_u32_array(pdev->dev.of_node,
+			"qcom,msm-cpudai-auxpcm-data",
+			val_array, RATE_MAX_NUM_OF_AUX_PCM_RATES);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: qcom,msm-cpudai-auxpcm-data missing in DT node\n",
+			__func__);
+		goto fail_invalid_dt1;
+	}
+	auxpcm_pdata->mode_8k.data = (u16)val_array[RATE_8KHZ];
+	auxpcm_pdata->mode_16k.data = (u16)val_array[RATE_16KHZ];
+
+	rc = of_property_read_u32_array(pdev->dev.of_node,
+			"qcom,msm-cpudai-auxpcm-pcm-clk-rate",
+			val_array, RATE_MAX_NUM_OF_AUX_PCM_RATES);
+	if (rc) {
+		dev_err(&pdev->dev,
+			"%s: qcom,msm-cpudai-auxpcm-pcm-clk-rate missing in DT\n",
+			__func__);
+		goto fail_invalid_dt1;
+	}
+	auxpcm_pdata->mode_8k.pcm_clk_rate = (int)val_array[RATE_8KHZ];
+	auxpcm_pdata->mode_16k.pcm_clk_rate = (int)val_array[RATE_16KHZ];
+
+	rc = of_property_read_string(pdev->dev.of_node,
+			"qcom,msm-auxpcm-interface", &intf_name);
+	if (rc) {
+		dev_err(&pdev->dev,
+			"%s: qcom,msm-auxpcm-interface missing in DT node\n",
+			__func__);
+		goto fail_nodev_intf;
+	}
+
+	if (!strcmp(intf_name, "primary")) {
+		dai_data->rx_pid = AFE_PORT_ID_PRIMARY_PCM_RX;
+		dai_data->tx_pid = AFE_PORT_ID_PRIMARY_PCM_TX;
+		pdev->id = MSM_DAI_PRI_AUXPCM_DT_DEV_ID;
+		i = 0;
+	} else if (!strcmp(intf_name, "secondary")) {
+		dai_data->rx_pid = AFE_PORT_ID_SECONDARY_PCM_RX;
+		dai_data->tx_pid = AFE_PORT_ID_SECONDARY_PCM_TX;
+		pdev->id = MSM_DAI_SEC_AUXPCM_DT_DEV_ID;
+		i = 1;
+	} else if (!strcmp(intf_name, "tertiary")) {
+		dai_data->rx_pid = AFE_PORT_ID_TERTIARY_PCM_RX;
+		dai_data->tx_pid = AFE_PORT_ID_TERTIARY_PCM_TX;
+		pdev->id = MSM_DAI_TERT_AUXPCM_DT_DEV_ID;
+		i = 2;
+	} else if (!strcmp(intf_name, "quaternary")) {
+		dai_data->rx_pid = AFE_PORT_ID_QUATERNARY_PCM_RX;
+		dai_data->tx_pid = AFE_PORT_ID_QUATERNARY_PCM_TX;
+		pdev->id = MSM_DAI_QUAT_AUXPCM_DT_DEV_ID;
+		i = 3;
+	} else {
+		dev_err(&pdev->dev, "%s: invalid DT intf name %s\n",
+			__func__, intf_name);
+		goto fail_invalid_intf;
+	}
+	rc = of_property_read_u32(pdev->dev.of_node,
+				  "qcom,msm-cpudai-afe-clk-ver", &val);
+	if (rc)
+		dai_data->afe_clk_ver = AFE_CLK_VERSION_V1;
+	else
+		dai_data->afe_clk_ver = val;
+
+	mutex_init(&dai_data->rlock);
+	dev_dbg(&pdev->dev, "dev name %s\n", dev_name(&pdev->dev));
+
+	dev_set_drvdata(&pdev->dev, dai_data);
+	pdev->dev.platform_data = (void *) auxpcm_pdata;
+
+	rc = snd_soc_register_component(&pdev->dev,
+			&msm_dai_q6_aux_pcm_dai_component,
+			&msm_dai_q6_aux_pcm_dai[i], 1);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: auxpcm dai reg failed, rc=%d\n",
+				__func__, rc);
+		goto fail_reg_dai;
+	}
+
+	return rc;
+
+fail_reg_dai:
+fail_invalid_intf:
+fail_nodev_intf:
+fail_invalid_dt1:
+	kfree(auxpcm_pdata->mode_16k.slot_mapping);
+fail_invalid_16k_slot_mapping:
+	kfree(auxpcm_pdata->mode_8k.slot_mapping);
+fail_invalid_dt:
+	kfree(auxpcm_pdata);
+fail_pdata_nomem:
+	kfree(dai_data);
+	return rc;
+}
+
+static int msm_auxpcm_dev_remove(struct platform_device *pdev)
+{
+	struct msm_dai_q6_auxpcm_dai_data *dai_data;
+
+	dai_data = dev_get_drvdata(&pdev->dev);
+
+	snd_soc_unregister_component(&pdev->dev);
+
+	mutex_destroy(&dai_data->rlock);
+	kfree(dai_data);
+	kfree(pdev->dev.platform_data);
+
+	return 0;
+}
+
+static struct of_device_id msm_auxpcm_dev_dt_match[] = {
+	{ .compatible = "qcom,msm-auxpcm-dev", },
+	{}
+};
+
+
+static struct platform_driver msm_auxpcm_dev_driver = {
+	.probe  = msm_auxpcm_dev_probe,
+	.remove = msm_auxpcm_dev_remove,
+	.driver = {
+		.name = "msm-auxpcm-dev",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_auxpcm_dev_dt_match,
+	},
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_slimbus_rx_dai[] = {
+	{
+		.playback = {
+			.stream_name = "Slimbus Playback",
+			.aif_name = "SLIMBUS_0_RX",
+			.rates = SNDRV_PCM_RATE_8000_384000,
+			.formats = DAI_FORMATS_S16_S24_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 384000,
+		},
+		.ops = &msm_dai_q6_ops,
+		.id = SLIMBUS_0_RX,
+		.probe = msm_dai_q6_dai_probe,
+		.remove = msm_dai_q6_dai_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Slimbus1 Playback",
+			.aif_name = "SLIMBUS_1_RX",
+			.rates = SNDRV_PCM_RATE_8000_384000,
+			.formats = DAI_FORMATS_S16_S24_S32_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 384000,
+		},
+		.ops = &msm_dai_q6_ops,
+		.id = SLIMBUS_1_RX,
+		.probe = msm_dai_q6_dai_probe,
+		.remove = msm_dai_q6_dai_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Slimbus2 Playback",
+			.aif_name = "SLIMBUS_2_RX",
+			.rates = SNDRV_PCM_RATE_8000_384000,
+			.formats = DAI_FORMATS_S16_S24_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 384000,
+		},
+		.ops = &msm_dai_q6_ops,
+		.id = SLIMBUS_2_RX,
+		.probe = msm_dai_q6_dai_probe,
+		.remove = msm_dai_q6_dai_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Slimbus3 Playback",
+			.aif_name = "SLIMBUS_3_RX",
+			.rates = SNDRV_PCM_RATE_8000_384000,
+			.formats = DAI_FORMATS_S16_S24_S32_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 384000,
+		},
+		.ops = &msm_dai_q6_ops,
+		.id = SLIMBUS_3_RX,
+		.probe = msm_dai_q6_dai_probe,
+		.remove = msm_dai_q6_dai_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Slimbus4 Playback",
+			.aif_name = "SLIMBUS_4_RX",
+			.rates = SNDRV_PCM_RATE_8000_384000,
+			.formats = DAI_FORMATS_S16_S24_S32_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 384000,
+		},
+		.ops = &msm_dai_q6_ops,
+		.id = SLIMBUS_4_RX,
+		.probe = msm_dai_q6_dai_probe,
+		.remove = msm_dai_q6_dai_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Slimbus6 Playback",
+			.aif_name = "SLIMBUS_6_RX",
+			.rates = SNDRV_PCM_RATE_8000_384000,
+			.formats = DAI_FORMATS_S16_S24_S32_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 384000,
+		},
+		.ops = &msm_dai_q6_ops,
+		.id = SLIMBUS_6_RX,
+		.probe = msm_dai_q6_dai_probe,
+		.remove = msm_dai_q6_dai_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Slimbus5 Playback",
+			.aif_name = "SLIMBUS_5_RX",
+			.rates = SNDRV_PCM_RATE_8000_384000,
+			.formats = DAI_FORMATS_S16_S24_S32_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 384000,
+		},
+		.ops = &msm_dai_q6_ops,
+		.id = SLIMBUS_5_RX,
+		.probe = msm_dai_q6_dai_probe,
+		.remove = msm_dai_q6_dai_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Slimbus7 Playback",
+			.aif_name = "SLIMBUS_7_RX",
+			.rates = SNDRV_PCM_RATE_8000_384000,
+			.formats = DAI_FORMATS_S16_S24_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 384000,
+		},
+		.ops = &msm_dai_q6_ops,
+		.id = SLIMBUS_7_RX,
+		.probe = msm_dai_q6_dai_probe,
+		.remove = msm_dai_q6_dai_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Slimbus8 Playback",
+			.aif_name = "SLIMBUS_8_RX",
+			.rates = SNDRV_PCM_RATE_8000_384000,
+			.formats = DAI_FORMATS_S16_S24_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 384000,
+		},
+		.ops = &msm_dai_q6_ops,
+		.id = SLIMBUS_8_RX,
+		.probe = msm_dai_q6_dai_probe,
+		.remove = msm_dai_q6_dai_remove,
+	},
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_slimbus_tx_dai[] = {
+	{
+		.capture = {
+			.stream_name = "Slimbus Capture",
+			.aif_name = "SLIMBUS_0_TX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+			SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_96000 |
+			SNDRV_PCM_RATE_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S24_3LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 192000,
+		},
+		.ops = &msm_dai_q6_ops,
+		.id = SLIMBUS_0_TX,
+		.probe = msm_dai_q6_dai_probe,
+		.remove = msm_dai_q6_dai_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Slimbus1 Capture",
+			.aif_name = "SLIMBUS_1_TX",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+			SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+			SNDRV_PCM_RATE_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S24_3LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 192000,
+		},
+		.ops = &msm_dai_q6_ops,
+		.id = SLIMBUS_1_TX,
+		.probe = msm_dai_q6_dai_probe,
+		.remove = msm_dai_q6_dai_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Slimbus2 Capture",
+			.aif_name = "SLIMBUS_2_TX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+			SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_96000 |
+			SNDRV_PCM_RATE_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 192000,
+		},
+		.ops = &msm_dai_q6_ops,
+		.id = SLIMBUS_2_TX,
+		.probe = msm_dai_q6_dai_probe,
+		.remove = msm_dai_q6_dai_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Slimbus3 Capture",
+			.aif_name = "SLIMBUS_3_TX",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+			SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+			SNDRV_PCM_RATE_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE,
+			.channels_min = 2,
+			.channels_max = 4,
+			.rate_min = 8000,
+			.rate_max = 192000,
+		},
+		.ops = &msm_dai_q6_ops,
+		.id = SLIMBUS_3_TX,
+		.probe = msm_dai_q6_dai_probe,
+		.remove = msm_dai_q6_dai_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Slimbus4 Capture",
+			.aif_name = "SLIMBUS_4_TX",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+			SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+			SNDRV_PCM_RATE_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 2,
+			.channels_max = 4,
+			.rate_min = 8000,
+			.rate_max = 192000,
+		},
+		.ops = &msm_dai_q6_ops,
+		.id = SLIMBUS_4_TX,
+		.probe = msm_dai_q6_dai_probe,
+		.remove = msm_dai_q6_dai_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Slimbus5 Capture",
+			.aif_name = "SLIMBUS_5_TX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+			SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_96000 |
+			SNDRV_PCM_RATE_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 192000,
+		},
+		.ops = &msm_dai_q6_ops,
+		.id = SLIMBUS_5_TX,
+		.probe = msm_dai_q6_dai_probe,
+		.remove = msm_dai_q6_dai_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Slimbus6 Capture",
+			.aif_name = "SLIMBUS_6_TX",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+			SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+			SNDRV_PCM_RATE_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 192000,
+		},
+		.ops = &msm_dai_q6_ops,
+		.id = SLIMBUS_6_TX,
+		.probe = msm_dai_q6_dai_probe,
+		.remove = msm_dai_q6_dai_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Slimbus7 Capture",
+			.aif_name = "SLIMBUS_7_TX",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+			SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+			SNDRV_PCM_RATE_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 192000,
+		},
+		.ops = &msm_dai_q6_ops,
+		.id = SLIMBUS_7_TX,
+		.probe = msm_dai_q6_dai_probe,
+		.remove = msm_dai_q6_dai_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Slimbus8 Capture",
+			.aif_name = "SLIMBUS_8_TX",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+			SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+			SNDRV_PCM_RATE_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 192000,
+		},
+		.ops = &msm_dai_q6_ops,
+		.id = SLIMBUS_8_TX,
+		.probe = msm_dai_q6_dai_probe,
+		.remove = msm_dai_q6_dai_remove,
+	},
+};
+
+static int msm_dai_q6_mi2s_format_put(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_dai_data *dai_data = kcontrol->private_data;
+	int value = ucontrol->value.integer.value[0];
+	dai_data->port_config.i2s.data_format = value;
+	pr_debug("%s: value = %d, channel = %d, line = %d\n",
+		 __func__, value, dai_data->port_config.i2s.mono_stereo,
+		 dai_data->port_config.i2s.channel_mode);
+	return 0;
+}
+
+static int msm_dai_q6_mi2s_format_get(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_dai_data *dai_data = kcontrol->private_data;
+	ucontrol->value.integer.value[0] =
+		dai_data->port_config.i2s.data_format;
+	return 0;
+}
+
+static int msm_dai_q6_mi2s_vi_feed_mono_put(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_dai_data *dai_data = kcontrol->private_data;
+	int value = ucontrol->value.integer.value[0];
+
+	dai_data->vi_feed_mono = value;
+	pr_debug("%s: value = %d\n", __func__, value);
+	return 0;
+}
+
+static int msm_dai_q6_mi2s_vi_feed_mono_get(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_dai_data *dai_data = kcontrol->private_data;
+
+	ucontrol->value.integer.value[0] = dai_data->vi_feed_mono;
+	return 0;
+}
+
+static const struct snd_kcontrol_new mi2s_config_controls[] = {
+	SOC_ENUM_EXT("PRI MI2S RX Format", mi2s_config_enum[0],
+		     msm_dai_q6_mi2s_format_get,
+		     msm_dai_q6_mi2s_format_put),
+	SOC_ENUM_EXT("SEC MI2S RX Format", mi2s_config_enum[0],
+		     msm_dai_q6_mi2s_format_get,
+		     msm_dai_q6_mi2s_format_put),
+	SOC_ENUM_EXT("TERT MI2S RX Format", mi2s_config_enum[0],
+		     msm_dai_q6_mi2s_format_get,
+		     msm_dai_q6_mi2s_format_put),
+	SOC_ENUM_EXT("QUAT MI2S RX Format", mi2s_config_enum[0],
+		     msm_dai_q6_mi2s_format_get,
+		     msm_dai_q6_mi2s_format_put),
+	SOC_ENUM_EXT("QUIN MI2S RX Format", mi2s_config_enum[0],
+		     msm_dai_q6_mi2s_format_get,
+		     msm_dai_q6_mi2s_format_put),
+	SOC_ENUM_EXT("PRI MI2S TX Format", mi2s_config_enum[0],
+		     msm_dai_q6_mi2s_format_get,
+		     msm_dai_q6_mi2s_format_put),
+	SOC_ENUM_EXT("SEC MI2S TX Format", mi2s_config_enum[0],
+		     msm_dai_q6_mi2s_format_get,
+		     msm_dai_q6_mi2s_format_put),
+	SOC_ENUM_EXT("TERT MI2S TX Format", mi2s_config_enum[0],
+		     msm_dai_q6_mi2s_format_get,
+		     msm_dai_q6_mi2s_format_put),
+	SOC_ENUM_EXT("QUAT MI2S TX Format", mi2s_config_enum[0],
+		     msm_dai_q6_mi2s_format_get,
+		     msm_dai_q6_mi2s_format_put),
+	SOC_ENUM_EXT("QUIN MI2S TX Format", mi2s_config_enum[0],
+		     msm_dai_q6_mi2s_format_get,
+		     msm_dai_q6_mi2s_format_put),
+	SOC_ENUM_EXT("SENARY MI2S TX Format", mi2s_config_enum[0],
+		     msm_dai_q6_mi2s_format_get,
+		     msm_dai_q6_mi2s_format_put),
+	SOC_ENUM_EXT("INT5 MI2S TX Format", mi2s_config_enum[0],
+		     msm_dai_q6_mi2s_format_get,
+		     msm_dai_q6_mi2s_format_put),
+};
+
+static const struct snd_kcontrol_new mi2s_vi_feed_controls[] = {
+	SOC_ENUM_EXT("INT5 MI2S VI MONO", mi2s_config_enum[1],
+		     msm_dai_q6_mi2s_vi_feed_mono_get,
+		     msm_dai_q6_mi2s_vi_feed_mono_put),
+};
+
+static int msm_dai_q6_dai_mi2s_probe(struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_mi2s_dai_data *mi2s_dai_data =
+			dev_get_drvdata(dai->dev);
+	struct msm_mi2s_pdata *mi2s_pdata =
+			(struct msm_mi2s_pdata *) dai->dev->platform_data;
+	struct snd_kcontrol *kcontrol = NULL;
+	int rc = 0;
+	const struct snd_kcontrol_new *ctrl = NULL;
+	const struct snd_kcontrol_new *vi_feed_ctrl = NULL;
+
+	dai->id = mi2s_pdata->intf_id;
+
+	if (mi2s_dai_data->rx_dai.mi2s_dai_data.port_config.i2s.channel_mode) {
+		if (dai->id == MSM_PRIM_MI2S)
+			ctrl = &mi2s_config_controls[0];
+		if (dai->id == MSM_SEC_MI2S)
+			ctrl = &mi2s_config_controls[1];
+		if (dai->id == MSM_TERT_MI2S)
+			ctrl = &mi2s_config_controls[2];
+		if (dai->id == MSM_QUAT_MI2S)
+			ctrl = &mi2s_config_controls[3];
+		if (dai->id == MSM_QUIN_MI2S)
+			ctrl = &mi2s_config_controls[4];
+	}
+
+	if (ctrl) {
+		kcontrol = snd_ctl_new1(ctrl,
+					&mi2s_dai_data->rx_dai.mi2s_dai_data);
+		rc = snd_ctl_add(dai->component->card->snd_card, kcontrol);
+
+		if (IS_ERR_VALUE(rc)) {
+			dev_err(dai->dev, "%s: err add RX fmt ctl DAI = %s\n",
+				__func__, dai->name);
+			goto rtn;
+		}
+	}
+
+	ctrl = NULL;
+	if (mi2s_dai_data->tx_dai.mi2s_dai_data.port_config.i2s.channel_mode) {
+		if (dai->id == MSM_PRIM_MI2S)
+			ctrl = &mi2s_config_controls[5];
+		if (dai->id == MSM_SEC_MI2S)
+			ctrl = &mi2s_config_controls[6];
+		if (dai->id == MSM_TERT_MI2S)
+			ctrl = &mi2s_config_controls[7];
+		if (dai->id == MSM_QUAT_MI2S)
+			ctrl = &mi2s_config_controls[8];
+		if (dai->id == MSM_QUIN_MI2S)
+			ctrl = &mi2s_config_controls[9];
+		if (dai->id == MSM_SENARY_MI2S)
+			ctrl = &mi2s_config_controls[10];
+		if (dai->id == MSM_INT5_MI2S)
+			ctrl = &mi2s_config_controls[11];
+	}
+
+	if (ctrl) {
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				snd_ctl_new1(ctrl,
+				&mi2s_dai_data->tx_dai.mi2s_dai_data));
+
+		if (IS_ERR_VALUE(rc)) {
+			if (kcontrol)
+				snd_ctl_remove(dai->component->card->snd_card,
+						kcontrol);
+			dev_err(dai->dev, "%s: err add TX fmt ctl DAI = %s\n",
+				__func__, dai->name);
+		}
+	}
+
+	if (dai->id == MSM_INT5_MI2S)
+		vi_feed_ctrl = &mi2s_vi_feed_controls[0];
+
+	if (vi_feed_ctrl) {
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				snd_ctl_new1(vi_feed_ctrl,
+				&mi2s_dai_data->tx_dai.mi2s_dai_data));
+
+		if (IS_ERR_VALUE(rc)) {
+			dev_err(dai->dev, "%s: err add TX vi feed channel ctl DAI = %s\n",
+				__func__, dai->name);
+		}
+	}
+
+	rc = msm_dai_q6_dai_add_route(dai);
+rtn:
+	return rc;
+}
+
+
+static int msm_dai_q6_dai_mi2s_remove(struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_mi2s_dai_data *mi2s_dai_data =
+		dev_get_drvdata(dai->dev);
+	int rc;
+
+	/* If AFE port is still up, close it */
+	if (test_bit(STATUS_PORT_STARTED,
+		     mi2s_dai_data->rx_dai.mi2s_dai_data.status_mask)) {
+		rc = afe_close(MI2S_RX); /* can block */
+		if (IS_ERR_VALUE(rc))
+			dev_err(dai->dev, "fail to close MI2S_RX port\n");
+		clear_bit(STATUS_PORT_STARTED,
+			  mi2s_dai_data->rx_dai.mi2s_dai_data.status_mask);
+	}
+	if (test_bit(STATUS_PORT_STARTED,
+		     mi2s_dai_data->tx_dai.mi2s_dai_data.status_mask)) {
+		rc = afe_close(MI2S_TX); /* can block */
+		if (IS_ERR_VALUE(rc))
+			dev_err(dai->dev, "fail to close MI2S_TX port\n");
+		clear_bit(STATUS_PORT_STARTED,
+			  mi2s_dai_data->tx_dai.mi2s_dai_data.status_mask);
+	}
+	kfree(mi2s_dai_data);
+	return 0;
+}
+
+static int msm_dai_q6_mi2s_startup(struct snd_pcm_substream *substream,
+				   struct snd_soc_dai *dai)
+{
+
+	return 0;
+}
+
+
+static int msm_mi2s_get_port_id(u32 mi2s_id, int stream, u16 *port_id)
+{
+	int ret = 0;
+
+	switch (stream) {
+	case SNDRV_PCM_STREAM_PLAYBACK:
+		switch (mi2s_id) {
+		case MSM_PRIM_MI2S:
+			*port_id = AFE_PORT_ID_PRIMARY_MI2S_RX;
+			break;
+		case MSM_SEC_MI2S:
+			*port_id = AFE_PORT_ID_SECONDARY_MI2S_RX;
+			break;
+		case MSM_TERT_MI2S:
+			*port_id = AFE_PORT_ID_TERTIARY_MI2S_RX;
+			break;
+		case MSM_QUAT_MI2S:
+			*port_id = AFE_PORT_ID_QUATERNARY_MI2S_RX;
+			break;
+		case MSM_SEC_MI2S_SD1:
+			*port_id = AFE_PORT_ID_SECONDARY_MI2S_RX_SD1;
+			break;
+		case MSM_QUIN_MI2S:
+			*port_id = AFE_PORT_ID_QUINARY_MI2S_RX;
+			break;
+		case MSM_INT0_MI2S:
+			*port_id = AFE_PORT_ID_INT0_MI2S_RX;
+			break;
+		case MSM_INT1_MI2S:
+			*port_id = AFE_PORT_ID_INT1_MI2S_RX;
+			break;
+		case MSM_INT2_MI2S:
+			*port_id = AFE_PORT_ID_INT2_MI2S_RX;
+			break;
+		case MSM_INT3_MI2S:
+			*port_id = AFE_PORT_ID_INT3_MI2S_RX;
+			break;
+		case MSM_INT4_MI2S:
+			*port_id = AFE_PORT_ID_INT4_MI2S_RX;
+			break;
+		case MSM_INT5_MI2S:
+			*port_id = AFE_PORT_ID_INT5_MI2S_RX;
+			break;
+		case MSM_INT6_MI2S:
+			*port_id = AFE_PORT_ID_INT6_MI2S_RX;
+			break;
+		default:
+			pr_err("%s: playback err id 0x%x\n",
+				__func__, mi2s_id);
+			ret = -1;
+			break;
+		}
+	break;
+	case SNDRV_PCM_STREAM_CAPTURE:
+		switch (mi2s_id) {
+		case MSM_PRIM_MI2S:
+			*port_id = AFE_PORT_ID_PRIMARY_MI2S_TX;
+			break;
+		case MSM_SEC_MI2S:
+			*port_id = AFE_PORT_ID_SECONDARY_MI2S_TX;
+			break;
+		case MSM_TERT_MI2S:
+			*port_id = AFE_PORT_ID_TERTIARY_MI2S_TX;
+			break;
+		case MSM_QUAT_MI2S:
+			*port_id = AFE_PORT_ID_QUATERNARY_MI2S_TX;
+			break;
+		case MSM_QUIN_MI2S:
+			*port_id = AFE_PORT_ID_QUINARY_MI2S_TX;
+			break;
+		case MSM_SENARY_MI2S:
+			*port_id = AFE_PORT_ID_SENARY_MI2S_TX;
+			break;
+		case MSM_INT0_MI2S:
+			*port_id = AFE_PORT_ID_INT0_MI2S_TX;
+			break;
+		case MSM_INT1_MI2S:
+			*port_id = AFE_PORT_ID_INT1_MI2S_TX;
+			break;
+		case MSM_INT2_MI2S:
+			*port_id = AFE_PORT_ID_INT2_MI2S_TX;
+			break;
+		case MSM_INT3_MI2S:
+			*port_id = AFE_PORT_ID_INT3_MI2S_TX;
+			break;
+		case MSM_INT4_MI2S:
+			*port_id = AFE_PORT_ID_INT4_MI2S_TX;
+			break;
+		case MSM_INT5_MI2S:
+			*port_id = AFE_PORT_ID_INT5_MI2S_TX;
+			break;
+		case MSM_INT6_MI2S:
+			*port_id = AFE_PORT_ID_INT6_MI2S_TX;
+			break;
+		default:
+			pr_err("%s: capture err id 0x%x\n", __func__, mi2s_id);
+			ret = -1;
+			break;
+		}
+	break;
+	default:
+		pr_err("%s: default err %d\n", __func__, stream);
+		ret = -1;
+	break;
+	}
+	pr_debug("%s: port_id = 0x%x\n", __func__, *port_id);
+	return ret;
+}
+
+static int msm_dai_q6_mi2s_prepare(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_mi2s_dai_data *mi2s_dai_data =
+		dev_get_drvdata(dai->dev);
+	struct msm_dai_q6_dai_data *dai_data =
+		(substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+		 &mi2s_dai_data->rx_dai.mi2s_dai_data :
+		 &mi2s_dai_data->tx_dai.mi2s_dai_data);
+	u16 port_id = 0;
+	int rc = 0;
+
+	if (msm_mi2s_get_port_id(dai->id, substream->stream,
+				 &port_id) != 0) {
+		dev_err(dai->dev, "%s: Invalid Port ID 0x%x\n",
+				__func__, port_id);
+		return -EINVAL;
+	}
+
+	dev_dbg(dai->dev, "%s: dai id %d, afe port id = 0x%x\n"
+		"dai_data->channels = %u sample_rate = %u\n", __func__,
+		dai->id, port_id, dai_data->channels, dai_data->rate);
+
+	if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+		/* PORT START should be set if prepare called
+		 * in active state.
+		 */
+		rc = afe_port_start(port_id, &dai_data->port_config,
+				    dai_data->rate);
+
+		if (IS_ERR_VALUE(rc))
+			dev_err(dai->dev, "fail to open AFE port 0x%x\n",
+				dai->id);
+		else
+			set_bit(STATUS_PORT_STARTED,
+				dai_data->status_mask);
+	}
+	if (!test_bit(STATUS_PORT_STARTED, dai_data->hwfree_status)) {
+		set_bit(STATUS_PORT_STARTED, dai_data->hwfree_status);
+		dev_dbg(dai->dev, "%s: set hwfree_status to started\n",
+				__func__);
+	}
+	return rc;
+}
+
+static int msm_dai_q6_mi2s_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_mi2s_dai_data *mi2s_dai_data =
+		dev_get_drvdata(dai->dev);
+	struct msm_dai_q6_mi2s_dai_config *mi2s_dai_config =
+		(substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+		&mi2s_dai_data->rx_dai : &mi2s_dai_data->tx_dai);
+	struct msm_dai_q6_dai_data *dai_data = &mi2s_dai_config->mi2s_dai_data;
+	struct afe_param_id_i2s_cfg *i2s = &dai_data->port_config.i2s;
+
+	dai_data->channels = params_channels(params);
+	switch (dai_data->channels) {
+	case 8:
+	case 7:
+		if (mi2s_dai_config->pdata_mi2s_lines < AFE_PORT_I2S_8CHS)
+			goto error_invalid_data;
+		dai_data->port_config.i2s.channel_mode = AFE_PORT_I2S_8CHS;
+		break;
+	case 6:
+	case 5:
+		if (mi2s_dai_config->pdata_mi2s_lines < AFE_PORT_I2S_6CHS)
+			goto error_invalid_data;
+		dai_data->port_config.i2s.channel_mode = AFE_PORT_I2S_6CHS;
+		break;
+	case 4:
+	case 3:
+		if (mi2s_dai_config->pdata_mi2s_lines < AFE_PORT_I2S_QUAD01)
+			goto error_invalid_data;
+		if (mi2s_dai_config->pdata_mi2s_lines == AFE_PORT_I2S_QUAD23)
+			dai_data->port_config.i2s.channel_mode =
+				mi2s_dai_config->pdata_mi2s_lines;
+		else
+			dai_data->port_config.i2s.channel_mode =
+					AFE_PORT_I2S_QUAD01;
+		break;
+	case 2:
+	case 1:
+		if (mi2s_dai_config->pdata_mi2s_lines < AFE_PORT_I2S_SD0)
+			goto error_invalid_data;
+		switch (mi2s_dai_config->pdata_mi2s_lines) {
+		case AFE_PORT_I2S_SD0:
+		case AFE_PORT_I2S_SD1:
+		case AFE_PORT_I2S_SD2:
+		case AFE_PORT_I2S_SD3:
+			dai_data->port_config.i2s.channel_mode =
+				mi2s_dai_config->pdata_mi2s_lines;
+			break;
+		case AFE_PORT_I2S_QUAD01:
+		case AFE_PORT_I2S_6CHS:
+		case AFE_PORT_I2S_8CHS:
+			if (dai_data->vi_feed_mono == SPKR_1)
+				dai_data->port_config.i2s.channel_mode =
+							AFE_PORT_I2S_SD0;
+			else
+				dai_data->port_config.i2s.channel_mode =
+							AFE_PORT_I2S_SD1;
+			break;
+		case AFE_PORT_I2S_QUAD23:
+			dai_data->port_config.i2s.channel_mode =
+						AFE_PORT_I2S_SD2;
+			break;
+		}
+		if (dai_data->channels == 2)
+			dai_data->port_config.i2s.mono_stereo =
+						MSM_AFE_CH_STEREO;
+		else
+			dai_data->port_config.i2s.mono_stereo = MSM_AFE_MONO;
+		break;
+	default:
+		pr_err("%s: default err channels %d\n",
+			__func__, dai_data->channels);
+		goto error_invalid_data;
+	}
+	dai_data->rate = params_rate(params);
+
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+	case SNDRV_PCM_FORMAT_SPECIAL:
+		dai_data->port_config.i2s.bit_width = 16;
+		dai_data->bitwidth = 16;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+	case SNDRV_PCM_FORMAT_S24_3LE:
+		dai_data->port_config.i2s.bit_width = 24;
+		dai_data->bitwidth = 24;
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		dai_data->port_config.i2s.bit_width = 32;
+		dai_data->bitwidth = 32;
+		break;
+	default:
+		pr_err("%s: format %d\n",
+			__func__, params_format(params));
+		return -EINVAL;
+	}
+
+	dai_data->port_config.i2s.i2s_cfg_minor_version =
+			AFE_API_VERSION_I2S_CONFIG;
+	dai_data->port_config.i2s.sample_rate = dai_data->rate;
+	if ((test_bit(STATUS_PORT_STARTED,
+	    mi2s_dai_data->rx_dai.mi2s_dai_data.status_mask) &&
+	    test_bit(STATUS_PORT_STARTED,
+	    mi2s_dai_data->rx_dai.mi2s_dai_data.hwfree_status)) ||
+	    (test_bit(STATUS_PORT_STARTED,
+	    mi2s_dai_data->tx_dai.mi2s_dai_data.status_mask) &&
+	    test_bit(STATUS_PORT_STARTED,
+	    mi2s_dai_data->tx_dai.mi2s_dai_data.hwfree_status))) {
+		if ((mi2s_dai_data->tx_dai.mi2s_dai_data.rate !=
+		    mi2s_dai_data->rx_dai.mi2s_dai_data.rate) ||
+		   (mi2s_dai_data->rx_dai.mi2s_dai_data.bitwidth !=
+		    mi2s_dai_data->tx_dai.mi2s_dai_data.bitwidth)) {
+			dev_err(dai->dev, "%s: Error mismatch in HW params\n"
+				"Tx sample_rate = %u bit_width = %hu\n"
+				"Rx sample_rate = %u bit_width = %hu\n"
+				, __func__,
+				mi2s_dai_data->tx_dai.mi2s_dai_data.rate,
+				mi2s_dai_data->tx_dai.mi2s_dai_data.bitwidth,
+				mi2s_dai_data->rx_dai.mi2s_dai_data.rate,
+				mi2s_dai_data->rx_dai.mi2s_dai_data.bitwidth);
+			return -EINVAL;
+		}
+	}
+	dev_dbg(dai->dev, "%s: dai id %d dai_data->channels = %d\n"
+		"sample_rate = %u i2s_cfg_minor_version = 0x%x\n"
+		"bit_width = %hu  channel_mode = 0x%x mono_stereo = %#x\n"
+		"ws_src = 0x%x sample_rate = %u data_format = 0x%x\n"
+		"reserved = %u\n", __func__, dai->id, dai_data->channels,
+		dai_data->rate, i2s->i2s_cfg_minor_version, i2s->bit_width,
+		i2s->channel_mode, i2s->mono_stereo, i2s->ws_src,
+		i2s->sample_rate, i2s->data_format, i2s->reserved);
+
+	return 0;
+
+error_invalid_data:
+	pr_err("%s: dai_data->channels = %d channel_mode = %d\n", __func__,
+		 dai_data->channels, dai_data->port_config.i2s.channel_mode);
+	return -EINVAL;
+}
+
+
+static int msm_dai_q6_mi2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+	struct msm_dai_q6_mi2s_dai_data *mi2s_dai_data =
+	dev_get_drvdata(dai->dev);
+
+	if (test_bit(STATUS_PORT_STARTED,
+	    mi2s_dai_data->rx_dai.mi2s_dai_data.status_mask) ||
+	    test_bit(STATUS_PORT_STARTED,
+	    mi2s_dai_data->tx_dai.mi2s_dai_data.status_mask)) {
+		dev_err(dai->dev, "%s: err chg i2s mode while dai running",
+			__func__);
+		return -EPERM;
+	}
+
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBS_CFS:
+		mi2s_dai_data->rx_dai.mi2s_dai_data.port_config.i2s.ws_src = 1;
+		mi2s_dai_data->tx_dai.mi2s_dai_data.port_config.i2s.ws_src = 1;
+		break;
+	case SND_SOC_DAIFMT_CBM_CFM:
+		mi2s_dai_data->rx_dai.mi2s_dai_data.port_config.i2s.ws_src = 0;
+		mi2s_dai_data->tx_dai.mi2s_dai_data.port_config.i2s.ws_src = 0;
+		break;
+	default:
+		pr_err("%s: fmt %d\n",
+			__func__, fmt & SND_SOC_DAIFMT_MASTER_MASK);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int msm_dai_q6_mi2s_hw_free(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_mi2s_dai_data *mi2s_dai_data =
+			dev_get_drvdata(dai->dev);
+	struct msm_dai_q6_dai_data *dai_data =
+		(substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+		 &mi2s_dai_data->rx_dai.mi2s_dai_data :
+		 &mi2s_dai_data->tx_dai.mi2s_dai_data);
+
+	if (test_bit(STATUS_PORT_STARTED, dai_data->hwfree_status)) {
+		clear_bit(STATUS_PORT_STARTED, dai_data->hwfree_status);
+		dev_dbg(dai->dev, "%s: clear hwfree_status\n", __func__);
+	}
+	return 0;
+}
+
+static void msm_dai_q6_mi2s_shutdown(struct snd_pcm_substream *substream,
+				     struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_mi2s_dai_data *mi2s_dai_data =
+			dev_get_drvdata(dai->dev);
+	struct msm_dai_q6_dai_data *dai_data =
+		(substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+		 &mi2s_dai_data->rx_dai.mi2s_dai_data :
+		 &mi2s_dai_data->tx_dai.mi2s_dai_data);
+	 u16 port_id = 0;
+	int rc = 0;
+
+	if (msm_mi2s_get_port_id(dai->id, substream->stream,
+				 &port_id) != 0) {
+		dev_err(dai->dev, "%s: Invalid Port ID 0x%x\n",
+				__func__, port_id);
+	}
+
+	dev_dbg(dai->dev, "%s: closing afe port id = 0x%x\n",
+			__func__, port_id);
+
+	if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+		rc = afe_close(port_id);
+		if (IS_ERR_VALUE(rc))
+			dev_err(dai->dev, "fail to close AFE port\n");
+		clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
+	}
+	if (test_bit(STATUS_PORT_STARTED, dai_data->hwfree_status))
+		clear_bit(STATUS_PORT_STARTED, dai_data->hwfree_status);
+}
+
+static struct snd_soc_dai_ops msm_dai_q6_mi2s_ops = {
+	.startup	= msm_dai_q6_mi2s_startup,
+	.prepare	= msm_dai_q6_mi2s_prepare,
+	.hw_params	= msm_dai_q6_mi2s_hw_params,
+	.hw_free	= msm_dai_q6_mi2s_hw_free,
+	.set_fmt	= msm_dai_q6_mi2s_set_fmt,
+	.shutdown	= msm_dai_q6_mi2s_shutdown,
+};
+
+/* Channel min and max are initialized base on platform data */
+static struct snd_soc_dai_driver msm_dai_q6_mi2s_dai[] = {
+	{
+		.playback = {
+			.stream_name = "Primary MI2S Playback",
+			.aif_name = "PRI_MI2S_RX",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |
+				 SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 |
+				 SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+				 SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+				 SNDRV_PCM_RATE_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				SNDRV_PCM_FMTBIT_S24_LE |
+				SNDRV_PCM_FMTBIT_S24_3LE,
+			.rate_min =     8000,
+			.rate_max =     192000,
+		},
+		.capture = {
+			.stream_name = "Primary MI2S Capture",
+			.aif_name = "PRI_MI2S_TX",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |
+				 SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 |
+				 SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+				 SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+				 SNDRV_PCM_RATE_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.rate_min =     8000,
+			.rate_max =     192000,
+		},
+		.ops = &msm_dai_q6_mi2s_ops,
+		.id = MSM_PRIM_MI2S,
+		.probe = msm_dai_q6_dai_mi2s_probe,
+		.remove = msm_dai_q6_dai_mi2s_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Secondary MI2S Playback",
+			.aif_name = "SEC_MI2S_RX",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |
+				 SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 |
+				 SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+				 SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+				 SNDRV_PCM_RATE_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.rate_min =     8000,
+			.rate_max =     192000,
+		},
+		.capture = {
+			.stream_name = "Secondary MI2S Capture",
+			.aif_name = "SEC_MI2S_TX",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |
+				 SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 |
+				 SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+				 SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+				 SNDRV_PCM_RATE_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.rate_min =     8000,
+			.rate_max =     192000,
+		},
+		.ops = &msm_dai_q6_mi2s_ops,
+		.id = MSM_SEC_MI2S,
+		.probe = msm_dai_q6_dai_mi2s_probe,
+		.remove = msm_dai_q6_dai_mi2s_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Tertiary MI2S Playback",
+			.aif_name = "TERT_MI2S_RX",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |
+				 SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 |
+				 SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+				 SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+				 SNDRV_PCM_RATE_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.rate_min =     8000,
+			.rate_max =     192000,
+		},
+		.capture = {
+			.stream_name = "Tertiary MI2S Capture",
+			.aif_name = "TERT_MI2S_TX",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |
+				 SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 |
+				 SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+				 SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+				 SNDRV_PCM_RATE_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.rate_min =     8000,
+			.rate_max =     192000,
+		},
+		.ops = &msm_dai_q6_mi2s_ops,
+		.id = MSM_TERT_MI2S,
+		.probe = msm_dai_q6_dai_mi2s_probe,
+		.remove = msm_dai_q6_dai_mi2s_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Quaternary MI2S Playback",
+			.aif_name = "QUAT_MI2S_RX",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |
+				 SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 |
+				 SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+				 SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+				 SNDRV_PCM_RATE_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.rate_min =     8000,
+			.rate_max =     192000,
+		},
+		.capture = {
+			.stream_name = "Quaternary MI2S Capture",
+			.aif_name = "QUAT_MI2S_TX",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |
+				 SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 |
+				 SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+				 SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |
+				 SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |
+				 SNDRV_PCM_RATE_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.rate_min =     8000,
+			.rate_max =     192000,
+		},
+		.ops = &msm_dai_q6_mi2s_ops,
+		.id = MSM_QUAT_MI2S,
+		.probe = msm_dai_q6_dai_mi2s_probe,
+		.remove = msm_dai_q6_dai_mi2s_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Quinary MI2S Playback",
+			.aif_name = "QUIN_MI2S_RX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+			SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_96000 |
+			SNDRV_PCM_RATE_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.rate_min =     8000,
+			.rate_max =     192000,
+		},
+		.capture = {
+			.stream_name = "Quinary MI2S Capture",
+			.aif_name = "QUIN_MI2S_TX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+			SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.ops = &msm_dai_q6_mi2s_ops,
+		.id = MSM_QUIN_MI2S,
+		.probe = msm_dai_q6_dai_mi2s_probe,
+		.remove = msm_dai_q6_dai_mi2s_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Secondary MI2S Playback SD1",
+			.aif_name = "SEC_MI2S_RX_SD1",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+			SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.id = MSM_SEC_MI2S_SD1,
+	},
+	{
+		.capture = {
+			.stream_name = "Senary_mi2s Capture",
+			.aif_name = "SENARY_TX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+			SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.ops = &msm_dai_q6_mi2s_ops,
+		.id = MSM_SENARY_MI2S,
+		.probe = msm_dai_q6_dai_mi2s_probe,
+		.remove = msm_dai_q6_dai_mi2s_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "INT0 MI2S Playback",
+			.aif_name = "INT0_MI2S_RX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+			SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_44100 |
+			SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				SNDRV_PCM_FMTBIT_S24_LE |
+				SNDRV_PCM_FMTBIT_S24_3LE,
+			.rate_min =     8000,
+			.rate_max =     192000,
+		},
+		.capture = {
+			.stream_name = "INT0 MI2S Capture",
+			.aif_name = "INT0_MI2S_TX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+			SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.ops = &msm_dai_q6_mi2s_ops,
+		.id = MSM_INT0_MI2S,
+		.probe = msm_dai_q6_dai_mi2s_probe,
+		.remove = msm_dai_q6_dai_mi2s_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "INT1 MI2S Playback",
+			.aif_name = "INT1_MI2S_RX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+			SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				SNDRV_PCM_FMTBIT_S24_LE |
+				SNDRV_PCM_FMTBIT_S24_3LE,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.capture = {
+			.stream_name = "INT1 MI2S Capture",
+			.aif_name = "INT1_MI2S_TX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+			SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.ops = &msm_dai_q6_mi2s_ops,
+		.id = MSM_INT1_MI2S,
+		.probe = msm_dai_q6_dai_mi2s_probe,
+		.remove = msm_dai_q6_dai_mi2s_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "INT2 MI2S Playback",
+			.aif_name = "INT2_MI2S_RX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+			SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				SNDRV_PCM_FMTBIT_S24_LE |
+				SNDRV_PCM_FMTBIT_S24_3LE,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.capture = {
+			.stream_name = "INT2 MI2S Capture",
+			.aif_name = "INT2_MI2S_TX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+			SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.ops = &msm_dai_q6_mi2s_ops,
+		.id = MSM_INT2_MI2S,
+		.probe = msm_dai_q6_dai_mi2s_probe,
+		.remove = msm_dai_q6_dai_mi2s_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "INT3 MI2S Playback",
+			.aif_name = "INT3_MI2S_RX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+			SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				SNDRV_PCM_FMTBIT_S24_LE |
+				SNDRV_PCM_FMTBIT_S24_3LE,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.capture = {
+			.stream_name = "INT3 MI2S Capture",
+			.aif_name = "INT3_MI2S_TX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+			SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.ops = &msm_dai_q6_mi2s_ops,
+		.id = MSM_INT3_MI2S,
+		.probe = msm_dai_q6_dai_mi2s_probe,
+		.remove = msm_dai_q6_dai_mi2s_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "INT4 MI2S Playback",
+			.aif_name = "INT4_MI2S_RX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+			SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_96000 |
+			SNDRV_PCM_RATE_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				SNDRV_PCM_FMTBIT_S24_LE |
+				SNDRV_PCM_FMTBIT_S24_3LE,
+			.rate_min =     8000,
+			.rate_max =     192000,
+		},
+		.capture = {
+			.stream_name = "INT4 MI2S Capture",
+			.aif_name = "INT4_MI2S_TX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+			SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.ops = &msm_dai_q6_mi2s_ops,
+		.id = MSM_INT4_MI2S,
+		.probe = msm_dai_q6_dai_mi2s_probe,
+		.remove = msm_dai_q6_dai_mi2s_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "INT5 MI2S Playback",
+			.aif_name = "INT5_MI2S_RX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+			SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				SNDRV_PCM_FMTBIT_S24_LE |
+				SNDRV_PCM_FMTBIT_S24_3LE,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.capture = {
+			.stream_name = "INT5 MI2S Capture",
+			.aif_name = "INT5_MI2S_TX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+			SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.ops = &msm_dai_q6_mi2s_ops,
+		.id = MSM_INT5_MI2S,
+		.probe = msm_dai_q6_dai_mi2s_probe,
+		.remove = msm_dai_q6_dai_mi2s_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "INT6 MI2S Playback",
+			.aif_name = "INT6_MI2S_RX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+			SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				SNDRV_PCM_FMTBIT_S24_LE |
+				SNDRV_PCM_FMTBIT_S24_3LE,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.capture = {
+			.stream_name = "INT6 MI2S Capture",
+			.aif_name = "INT6_MI2S_TX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+			SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.ops = &msm_dai_q6_mi2s_ops,
+		.id = MSM_INT6_MI2S,
+		.probe = msm_dai_q6_dai_mi2s_probe,
+		.remove = msm_dai_q6_dai_mi2s_remove,
+	},
+};
+
+
+static int msm_dai_q6_mi2s_get_lineconfig(u16 sd_lines, u16 *config_ptr,
+					  unsigned int *ch_cnt)
+{
+	u8 num_of_sd_lines;
+
+	num_of_sd_lines = num_of_bits_set(sd_lines);
+	switch (num_of_sd_lines) {
+	case 0:
+		pr_debug("%s: no line is assigned\n", __func__);
+		break;
+	case 1:
+		switch (sd_lines) {
+		case MSM_MI2S_SD0:
+			*config_ptr = AFE_PORT_I2S_SD0;
+			break;
+		case MSM_MI2S_SD1:
+			*config_ptr = AFE_PORT_I2S_SD1;
+			break;
+		case MSM_MI2S_SD2:
+			*config_ptr = AFE_PORT_I2S_SD2;
+			break;
+		case MSM_MI2S_SD3:
+			*config_ptr = AFE_PORT_I2S_SD3;
+			break;
+		default:
+			pr_err("%s: invalid SD lines %d\n",
+				   __func__, sd_lines);
+			goto error_invalid_data;
+		}
+		break;
+	case 2:
+		switch (sd_lines) {
+		case MSM_MI2S_SD0 | MSM_MI2S_SD1:
+			*config_ptr = AFE_PORT_I2S_QUAD01;
+			break;
+		case MSM_MI2S_SD2 | MSM_MI2S_SD3:
+			*config_ptr = AFE_PORT_I2S_QUAD23;
+			break;
+		default:
+			pr_err("%s: invalid SD lines %d\n",
+				   __func__, sd_lines);
+			goto error_invalid_data;
+		}
+		break;
+	case 3:
+		switch (sd_lines) {
+		case MSM_MI2S_SD0 | MSM_MI2S_SD1 | MSM_MI2S_SD2:
+			*config_ptr = AFE_PORT_I2S_6CHS;
+			break;
+		default:
+			pr_err("%s: invalid SD lines %d\n",
+				   __func__, sd_lines);
+			goto error_invalid_data;
+		}
+		break;
+	case 4:
+		switch (sd_lines) {
+		case MSM_MI2S_SD0 | MSM_MI2S_SD1 | MSM_MI2S_SD2 | MSM_MI2S_SD3:
+			*config_ptr = AFE_PORT_I2S_8CHS;
+			break;
+		default:
+			pr_err("%s: invalid SD lines %d\n",
+				   __func__, sd_lines);
+			goto error_invalid_data;
+		}
+		break;
+	default:
+		pr_err("%s: invalid SD lines %d\n", __func__, num_of_sd_lines);
+		goto error_invalid_data;
+	}
+	*ch_cnt = num_of_sd_lines;
+	return 0;
+
+error_invalid_data:
+	pr_err("%s: invalid data\n", __func__);
+	return -EINVAL;
+}
+
+static int msm_dai_q6_mi2s_platform_data_validation(
+	struct platform_device *pdev, struct snd_soc_dai_driver *dai_driver)
+{
+	struct msm_dai_q6_mi2s_dai_data *dai_data = dev_get_drvdata(&pdev->dev);
+	struct msm_mi2s_pdata *mi2s_pdata =
+			(struct msm_mi2s_pdata *) pdev->dev.platform_data;
+	unsigned int ch_cnt;
+	int rc = 0;
+	u16 sd_line;
+
+	if (mi2s_pdata == NULL) {
+		pr_err("%s: mi2s_pdata NULL", __func__);
+		return -EINVAL;
+	}
+
+	rc = msm_dai_q6_mi2s_get_lineconfig(mi2s_pdata->rx_sd_lines,
+					    &sd_line, &ch_cnt);
+
+	if (IS_ERR_VALUE(rc)) {
+		dev_err(&pdev->dev, "invalid MI2S RX sd line config\n");
+		goto rtn;
+	}
+
+	if (ch_cnt) {
+		dai_data->rx_dai.mi2s_dai_data.port_config.i2s.channel_mode =
+		sd_line;
+		dai_data->rx_dai.pdata_mi2s_lines = sd_line;
+		dai_driver->playback.channels_min = 1;
+		dai_driver->playback.channels_max = ch_cnt << 1;
+	} else {
+		dai_driver->playback.channels_min = 0;
+		dai_driver->playback.channels_max = 0;
+	}
+	rc = msm_dai_q6_mi2s_get_lineconfig(mi2s_pdata->tx_sd_lines,
+					    &sd_line, &ch_cnt);
+
+	if (IS_ERR_VALUE(rc)) {
+		dev_err(&pdev->dev, "invalid MI2S TX sd line config\n");
+		goto rtn;
+	}
+
+	if (ch_cnt) {
+		dai_data->tx_dai.mi2s_dai_data.port_config.i2s.channel_mode =
+		sd_line;
+		dai_data->tx_dai.pdata_mi2s_lines = sd_line;
+		dai_driver->capture.channels_min = 1;
+		dai_driver->capture.channels_max = ch_cnt << 1;
+	} else {
+		dai_driver->capture.channels_min = 0;
+		dai_driver->capture.channels_max = 0;
+	}
+
+	dev_dbg(&pdev->dev, "%s: playback sdline 0x%x capture sdline 0x%x\n",
+		__func__, dai_data->rx_dai.pdata_mi2s_lines,
+		dai_data->tx_dai.pdata_mi2s_lines);
+	dev_dbg(&pdev->dev, "%s: playback ch_max %d capture ch_mx %d\n",
+		__func__, dai_driver->playback.channels_max,
+		dai_driver->capture.channels_max);
+rtn:
+	return rc;
+}
+
+static const struct snd_soc_component_driver msm_q6_mi2s_dai_component = {
+	.name		= "msm-dai-q6-mi2s",
+};
+static int msm_dai_q6_mi2s_dev_probe(struct platform_device *pdev)
+{
+	struct msm_dai_q6_mi2s_dai_data *dai_data;
+	const char *q6_mi2s_dev_id = "qcom,msm-dai-q6-mi2s-dev-id";
+	u32 tx_line = 0;
+	u32  rx_line = 0;
+	u32 mi2s_intf = 0;
+	struct msm_mi2s_pdata *mi2s_pdata;
+	int rc;
+
+	rc = of_property_read_u32(pdev->dev.of_node, q6_mi2s_dev_id,
+				  &mi2s_intf);
+	if (rc) {
+		dev_err(&pdev->dev,
+			"%s: missing 0x%x in dt node\n", __func__, mi2s_intf);
+		goto rtn;
+	}
+
+	dev_dbg(&pdev->dev, "dev name %s dev id 0x%x\n", dev_name(&pdev->dev),
+		mi2s_intf);
+
+	if ((mi2s_intf < MSM_MI2S_MIN || mi2s_intf > MSM_MI2S_MAX)
+		|| (mi2s_intf >= ARRAY_SIZE(msm_dai_q6_mi2s_dai))) {
+		dev_err(&pdev->dev,
+			"%s: Invalid MI2S ID %u from Device Tree\n",
+			__func__, mi2s_intf);
+		rc = -ENXIO;
+		goto rtn;
+	}
+
+	pdev->id = mi2s_intf;
+
+	mi2s_pdata = kzalloc(sizeof(struct msm_mi2s_pdata), GFP_KERNEL);
+	if (!mi2s_pdata) {
+		dev_err(&pdev->dev, "fail to allocate mi2s_pdata data\n");
+		rc = -ENOMEM;
+		goto rtn;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node, "qcom,msm-mi2s-rx-lines",
+				  &rx_line);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: Rx line from DT file %s\n", __func__,
+			"qcom,msm-mi2s-rx-lines");
+		goto free_pdata;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node, "qcom,msm-mi2s-tx-lines",
+				  &tx_line);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: Tx line from DT file %s\n", __func__,
+			"qcom,msm-mi2s-tx-lines");
+		goto free_pdata;
+	}
+	dev_dbg(&pdev->dev, "dev name %s Rx line 0x%x , Tx ine 0x%x\n",
+		dev_name(&pdev->dev), rx_line, tx_line);
+	mi2s_pdata->rx_sd_lines = rx_line;
+	mi2s_pdata->tx_sd_lines = tx_line;
+	mi2s_pdata->intf_id = mi2s_intf;
+
+	dai_data = kzalloc(sizeof(struct msm_dai_q6_mi2s_dai_data),
+			   GFP_KERNEL);
+	if (!dai_data) {
+		dev_err(&pdev->dev, "fail to allocate dai data\n");
+		rc = -ENOMEM;
+		goto free_pdata;
+	} else
+		dev_set_drvdata(&pdev->dev, dai_data);
+
+	pdev->dev.platform_data = mi2s_pdata;
+
+	rc = msm_dai_q6_mi2s_platform_data_validation(pdev,
+			&msm_dai_q6_mi2s_dai[mi2s_intf]);
+	if (IS_ERR_VALUE(rc))
+		goto free_dai_data;
+
+	rc = snd_soc_register_component(&pdev->dev, &msm_q6_mi2s_dai_component,
+	&msm_dai_q6_mi2s_dai[mi2s_intf], 1);
+
+	if (IS_ERR_VALUE(rc))
+		goto err_register;
+	return 0;
+
+err_register:
+	dev_err(&pdev->dev, "fail to msm_dai_q6_mi2s_dev_probe\n");
+free_dai_data:
+	kfree(dai_data);
+free_pdata:
+	kfree(mi2s_pdata);
+rtn:
+	return rc;
+}
+
+static int msm_dai_q6_mi2s_dev_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_component(&pdev->dev);
+	return 0;
+}
+
+static const struct snd_soc_component_driver msm_dai_q6_component = {
+	.name		= "msm-dai-q6-dev",
+};
+
+static int msm_dai_q6_dev_probe(struct platform_device *pdev)
+{
+	int rc, id, i, len;
+	const char *q6_dev_id = "qcom,msm-dai-q6-dev-id";
+	char stream_name[80];
+
+	rc = of_property_read_u32(pdev->dev.of_node, q6_dev_id, &id);
+	if (rc) {
+		dev_err(&pdev->dev,
+			"%s: missing %s in dt node\n", __func__, q6_dev_id);
+		return rc;
+	}
+
+	pdev->id = id;
+
+	pr_debug("%s: dev name %s, id:%d\n", __func__,
+		 dev_name(&pdev->dev), pdev->id);
+
+	switch (id) {
+	case SLIMBUS_0_RX:
+		strlcpy(stream_name, "Slimbus Playback", 80);
+		goto register_slim_playback;
+	case SLIMBUS_2_RX:
+		strlcpy(stream_name, "Slimbus2 Playback", 80);
+		goto register_slim_playback;
+	case SLIMBUS_1_RX:
+		strlcpy(stream_name, "Slimbus1 Playback", 80);
+		goto register_slim_playback;
+	case SLIMBUS_3_RX:
+		strlcpy(stream_name, "Slimbus3 Playback", 80);
+		goto register_slim_playback;
+	case SLIMBUS_4_RX:
+		strlcpy(stream_name, "Slimbus4 Playback", 80);
+		goto register_slim_playback;
+	case SLIMBUS_5_RX:
+		strlcpy(stream_name, "Slimbus5 Playback", 80);
+		goto register_slim_playback;
+	case SLIMBUS_6_RX:
+		strlcpy(stream_name, "Slimbus6 Playback", 80);
+		goto register_slim_playback;
+	case SLIMBUS_7_RX:
+		strlcpy(stream_name, "Slimbus7 Playback", sizeof(stream_name));
+		goto register_slim_playback;
+	case SLIMBUS_8_RX:
+		strlcpy(stream_name, "Slimbus8 Playback", sizeof(stream_name));
+		goto register_slim_playback;
+register_slim_playback:
+		rc = -ENODEV;
+		len = strnlen(stream_name , 80);
+		for (i = 0; i < ARRAY_SIZE(msm_dai_q6_slimbus_rx_dai); i++) {
+			if (msm_dai_q6_slimbus_rx_dai[i].playback.stream_name &&
+				!strncmp(stream_name,
+				msm_dai_q6_slimbus_rx_dai[i] \
+				.playback.stream_name,
+				len)) {
+				rc = snd_soc_register_component(&pdev->dev,
+				&msm_dai_q6_component, &msm_dai_q6_slimbus_rx_dai[i], 1);
+				break;
+			}
+		}
+		if (rc)
+			pr_err("%s: Device not found stream name %s\n",
+				__func__, stream_name);
+		break;
+	case SLIMBUS_0_TX:
+		strlcpy(stream_name, "Slimbus Capture", 80);
+		goto register_slim_capture;
+	case SLIMBUS_1_TX:
+		strlcpy(stream_name, "Slimbus1 Capture", 80);
+		goto register_slim_capture;
+	case SLIMBUS_2_TX:
+		strlcpy(stream_name, "Slimbus2 Capture", 80);
+		goto register_slim_capture;
+	case SLIMBUS_3_TX:
+		strlcpy(stream_name, "Slimbus3 Capture", 80);
+		goto register_slim_capture;
+	case SLIMBUS_4_TX:
+		strlcpy(stream_name, "Slimbus4 Capture", 80);
+		goto register_slim_capture;
+	case SLIMBUS_5_TX:
+		strlcpy(stream_name, "Slimbus5 Capture", 80);
+		goto register_slim_capture;
+	case SLIMBUS_6_TX:
+		strlcpy(stream_name, "Slimbus6 Capture", 80);
+		goto register_slim_capture;
+	case SLIMBUS_7_TX:
+		strlcpy(stream_name, "Slimbus7 Capture", sizeof(stream_name));
+		goto register_slim_capture;
+	case SLIMBUS_8_TX:
+		strlcpy(stream_name, "Slimbus8 Capture", sizeof(stream_name));
+		goto register_slim_capture;
+register_slim_capture:
+		rc = -ENODEV;
+		len = strnlen(stream_name , 80);
+		for (i = 0; i < ARRAY_SIZE(msm_dai_q6_slimbus_tx_dai); i++) {
+			if (msm_dai_q6_slimbus_tx_dai[i].capture.stream_name &&
+				!strncmp(stream_name,
+				msm_dai_q6_slimbus_tx_dai[i] \
+				.capture.stream_name,
+				len)) {
+				rc = snd_soc_register_component(&pdev->dev,
+				&msm_dai_q6_component, &msm_dai_q6_slimbus_tx_dai[i], 1);
+				break;
+			}
+		}
+		if (rc)
+			pr_err("%s: Device not found stream name %s\n",
+				__func__, stream_name);
+		break;
+	case INT_BT_SCO_RX:
+		rc = snd_soc_register_component(&pdev->dev, &msm_dai_q6_component,
+		&msm_dai_q6_bt_sco_rx_dai, 1);
+		break;
+	case INT_BT_SCO_TX:
+		rc = snd_soc_register_component(&pdev->dev, &msm_dai_q6_component,
+		&msm_dai_q6_bt_sco_tx_dai, 1);
+		break;
+	case INT_BT_A2DP_RX:
+		rc = snd_soc_register_component(&pdev->dev,
+		&msm_dai_q6_component, &msm_dai_q6_bt_a2dp_rx_dai, 1);
+		break;
+	case INT_FM_RX:
+		rc = snd_soc_register_component(&pdev->dev, &msm_dai_q6_component,
+		&msm_dai_q6_fm_rx_dai, 1);
+		break;
+	case INT_FM_TX:
+		rc = snd_soc_register_component(&pdev->dev,
+		&msm_dai_q6_component, &msm_dai_q6_fm_tx_dai, 1);
+		break;
+	case AFE_PORT_ID_USB_RX:
+		rc = snd_soc_register_component(&pdev->dev,
+		&msm_dai_q6_component,
+		&msm_dai_q6_usb_rx_dai, 1);
+		break;
+	case AFE_PORT_ID_USB_TX:
+		rc = snd_soc_register_component(&pdev->dev,
+		&msm_dai_q6_component,
+		&msm_dai_q6_usb_tx_dai, 1);
+		break;
+	case RT_PROXY_DAI_001_RX:
+		strlcpy(stream_name, "AFE Playback", 80);
+		goto register_afe_playback;
+	case RT_PROXY_DAI_002_RX:
+		strlcpy(stream_name, "AFE-PROXY RX", 80);
+register_afe_playback:
+		rc = -ENODEV;
+		len = strnlen(stream_name , 80);
+		for (i = 0; i < ARRAY_SIZE(msm_dai_q6_afe_rx_dai); i++) {
+			if (msm_dai_q6_afe_rx_dai[i].playback.stream_name &&
+				!strncmp(stream_name,
+				msm_dai_q6_afe_rx_dai[i].playback.stream_name,
+				len)) {
+				rc = snd_soc_register_component(&pdev->dev,
+				&msm_dai_q6_component, &msm_dai_q6_afe_rx_dai[i], 1);
+				break;
+			}
+		}
+		if (rc)
+			pr_err("%s: Device not found stream name %s\n",
+			__func__, stream_name);
+		break;
+	case RT_PROXY_DAI_001_TX:
+		strlcpy(stream_name, "AFE-PROXY TX", 80);
+		goto register_afe_capture;
+	case RT_PROXY_DAI_002_TX:
+		strlcpy(stream_name, "AFE Capture", 80);
+register_afe_capture:
+		rc = -ENODEV;
+		len = strnlen(stream_name , 80);
+		for (i = 0; i < ARRAY_SIZE(msm_dai_q6_afe_tx_dai); i++) {
+			if (msm_dai_q6_afe_tx_dai[i].capture.stream_name &&
+				!strncmp(stream_name,
+				msm_dai_q6_afe_tx_dai[i].capture.stream_name,
+				len)) {
+				rc = snd_soc_register_component(&pdev->dev,
+				&msm_dai_q6_component, &msm_dai_q6_afe_tx_dai[i], 1);
+				break;
+			}
+		}
+		if (rc)
+			pr_err("%s: Device not found stream name %s\n",
+			__func__, stream_name);
+		break;
+	case VOICE_PLAYBACK_TX:
+		strlcpy(stream_name, "Voice Farend Playback", 80);
+		goto register_voice_playback;
+	case VOICE2_PLAYBACK_TX:
+		strlcpy(stream_name, "Voice2 Farend Playback", 80);
+register_voice_playback:
+		rc = -ENODEV;
+		len = strnlen(stream_name , 80);
+		for (i = 0; i < ARRAY_SIZE(msm_dai_q6_voc_playback_dai); i++) {
+			if (msm_dai_q6_voc_playback_dai[i].playback.stream_name
+			    && !strcmp(stream_name,
+			 msm_dai_q6_voc_playback_dai[i].playback.stream_name)) {
+				rc = snd_soc_register_component(&pdev->dev,
+					&msm_dai_q6_component,
+					&msm_dai_q6_voc_playback_dai[i], 1);
+				break;
+			}
+		}
+		if (rc)
+			pr_err("%s Device not found stream name %s\n",
+			       __func__, stream_name);
+		break;
+	case VOICE_RECORD_RX:
+		strlcpy(stream_name, "Voice Downlink Capture", 80);
+		goto register_uplink_capture;
+	case VOICE_RECORD_TX:
+		strlcpy(stream_name, "Voice Uplink Capture", 80);
+register_uplink_capture:
+		rc = -ENODEV;
+		len = strnlen(stream_name , 80);
+		for (i = 0; i < ARRAY_SIZE(msm_dai_q6_incall_record_dai); i++) {
+			if (msm_dai_q6_incall_record_dai[i].capture.stream_name &&
+				!strncmp(stream_name,
+				msm_dai_q6_incall_record_dai[i].capture.stream_name,
+				len)) {
+				rc = snd_soc_register_component(&pdev->dev,
+				&msm_dai_q6_component, &msm_dai_q6_incall_record_dai[i], 1);
+				break;
+			}
+		}
+		if (rc)
+			pr_err("%s: Device not found stream name %s\n",
+			__func__, stream_name);
+		break;
+
+	default:
+		rc = -ENODEV;
+		break;
+	}
+
+	return rc;
+}
+
+static int msm_dai_q6_dev_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_component(&pdev->dev);
+	return 0;
+}
+
+static const struct of_device_id msm_dai_q6_dev_dt_match[] = {
+	{ .compatible = "qcom,msm-dai-q6-dev", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, msm_dai_q6_dev_dt_match);
+
+static struct platform_driver msm_dai_q6_dev = {
+	.probe  = msm_dai_q6_dev_probe,
+	.remove = msm_dai_q6_dev_remove,
+	.driver = {
+		.name = "msm-dai-q6-dev",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_dai_q6_dev_dt_match,
+	},
+};
+
+static int msm_dai_q6_probe(struct platform_device *pdev)
+{
+	int rc;
+	pr_debug("%s: dev name %s, id:%d\n", __func__,
+		 dev_name(&pdev->dev), pdev->id);
+	rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: failed to add child nodes, rc=%d\n",
+			__func__, rc);
+	} else
+		dev_dbg(&pdev->dev, "%s: added child node\n", __func__);
+
+	return rc;
+}
+
+static int msm_dai_q6_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static const struct of_device_id msm_dai_q6_dt_match[] = {
+	{ .compatible = "qcom,msm-dai-q6", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, msm_dai_q6_dt_match);
+static struct platform_driver msm_dai_q6 = {
+	.probe  = msm_dai_q6_probe,
+	.remove = msm_dai_q6_remove,
+	.driver = {
+		.name = "msm-dai-q6",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_dai_q6_dt_match,
+	},
+};
+
+static int msm_dai_mi2s_q6_probe(struct platform_device *pdev)
+{
+	int rc;
+	rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: failed to add child nodes, rc=%d\n",
+			__func__, rc);
+	} else
+		dev_dbg(&pdev->dev, "%s: added child node\n", __func__);
+	return rc;
+}
+
+static int msm_dai_mi2s_q6_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static const struct of_device_id msm_dai_mi2s_dt_match[] = {
+	{ .compatible = "qcom,msm-dai-mi2s", },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(of, msm_dai_mi2s_dt_match);
+
+static struct platform_driver msm_dai_mi2s_q6 = {
+	.probe  = msm_dai_mi2s_q6_probe,
+	.remove = msm_dai_mi2s_q6_remove,
+	.driver = {
+		.name = "msm-dai-mi2s",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_dai_mi2s_dt_match,
+	},
+};
+
+static const struct of_device_id msm_dai_q6_mi2s_dev_dt_match[] = {
+	{ .compatible = "qcom,msm-dai-q6-mi2s", },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(of, msm_dai_q6_mi2s_dev_dt_match);
+
+static struct platform_driver msm_dai_q6_mi2s_driver = {
+	.probe  = msm_dai_q6_mi2s_dev_probe,
+	.remove  = msm_dai_q6_mi2s_dev_remove,
+	.driver = {
+		.name = "msm-dai-q6-mi2s",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_dai_q6_mi2s_dev_dt_match,
+	},
+};
+
+static int msm_dai_q6_spdif_dev_probe(struct platform_device *pdev)
+{
+	int rc;
+
+	pdev->id = AFE_PORT_ID_SPDIF_RX;
+
+	pr_debug("%s: dev name %s, id:%d\n", __func__,
+			dev_name(&pdev->dev), pdev->id);
+
+	rc = snd_soc_register_component(&pdev->dev,
+			&msm_dai_spdif_q6_component,
+			&msm_dai_q6_spdif_spdif_rx_dai, 1);
+	return rc;
+}
+
+static int msm_dai_q6_spdif_dev_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_component(&pdev->dev);
+	return 0;
+}
+
+static const struct of_device_id msm_dai_q6_spdif_dt_match[] = {
+	{.compatible = "qcom,msm-dai-q6-spdif"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, msm_dai_q6_spdif_dt_match);
+
+static struct platform_driver msm_dai_q6_spdif_driver = {
+	.probe  = msm_dai_q6_spdif_dev_probe,
+	.remove = msm_dai_q6_spdif_dev_remove,
+	.driver = {
+		.name = "msm-dai-q6-spdif",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_dai_q6_spdif_dt_match,
+	},
+};
+
+static int msm_dai_q6_tdm_set_clk_param(u32 group_id,
+					struct afe_clk_set *clk_set, u32 mode)
+{
+	switch (group_id) {
+	case AFE_GROUP_DEVICE_ID_PRIMARY_TDM_RX:
+	case AFE_GROUP_DEVICE_ID_PRIMARY_TDM_TX:
+		if (mode)
+			clk_set->clk_id = Q6AFE_LPASS_CLK_ID_PRI_TDM_IBIT;
+		else
+			clk_set->clk_id = Q6AFE_LPASS_CLK_ID_PRI_TDM_EBIT;
+		break;
+	case AFE_GROUP_DEVICE_ID_SECONDARY_TDM_RX:
+	case AFE_GROUP_DEVICE_ID_SECONDARY_TDM_TX:
+		if (mode)
+			clk_set->clk_id = Q6AFE_LPASS_CLK_ID_SEC_TDM_IBIT;
+		else
+			clk_set->clk_id = Q6AFE_LPASS_CLK_ID_SEC_TDM_EBIT;
+		break;
+	case AFE_GROUP_DEVICE_ID_TERTIARY_TDM_RX:
+	case AFE_GROUP_DEVICE_ID_TERTIARY_TDM_TX:
+		if (mode)
+			clk_set->clk_id = Q6AFE_LPASS_CLK_ID_TER_TDM_IBIT;
+		else
+			clk_set->clk_id = Q6AFE_LPASS_CLK_ID_TER_TDM_EBIT;
+		break;
+	case AFE_GROUP_DEVICE_ID_QUATERNARY_TDM_RX:
+	case AFE_GROUP_DEVICE_ID_QUATERNARY_TDM_TX:
+		if (mode)
+			clk_set->clk_id = Q6AFE_LPASS_CLK_ID_QUAD_TDM_IBIT;
+		else
+			clk_set->clk_id = Q6AFE_LPASS_CLK_ID_QUAD_TDM_EBIT;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int msm_dai_tdm_q6_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	const uint32_t *port_id_array = NULL;
+	uint32_t array_length = 0;
+	int i = 0;
+	int group_idx = 0;
+	u32 clk_mode = 0;
+
+	/* extract tdm group info into static */
+	rc = of_property_read_u32(pdev->dev.of_node,
+		"qcom,msm-cpudai-tdm-group-id",
+		(u32 *)&tdm_group_cfg.group_id);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: Group ID from DT file %s\n",
+			__func__, "qcom,msm-cpudai-tdm-group-id");
+		goto rtn;
+	}
+	dev_dbg(&pdev->dev, "%s: Group ID from DT file 0x%x\n",
+		__func__, tdm_group_cfg.group_id);
+
+	dev_info(&pdev->dev, "%s: dev_name: %s group_id: 0x%x\n",
+		__func__, dev_name(&pdev->dev), tdm_group_cfg.group_id);
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+		"qcom,msm-cpudai-tdm-group-num-ports",
+		&num_tdm_group_ports);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: Group Num Ports from DT file %s\n",
+			__func__, "qcom,msm-cpudai-tdm-group-num-ports");
+		goto rtn;
+	}
+	dev_dbg(&pdev->dev, "%s: Group Num Ports from DT file 0x%x\n",
+		__func__, num_tdm_group_ports);
+
+	if (num_tdm_group_ports > AFE_GROUP_DEVICE_NUM_PORTS) {
+		dev_err(&pdev->dev, "%s Group Num Ports %d greater than Max %d\n",
+			__func__, num_tdm_group_ports,
+			AFE_GROUP_DEVICE_NUM_PORTS);
+		rc = -EINVAL;
+		goto rtn;
+	}
+
+	port_id_array = of_get_property(pdev->dev.of_node,
+		"qcom,msm-cpudai-tdm-group-port-id",
+		&array_length);
+	if (port_id_array == NULL) {
+		dev_err(&pdev->dev, "%s port_id_array is not valid\n",
+			__func__);
+		rc = -EINVAL;
+		goto rtn;
+	}
+	if (array_length != sizeof(uint32_t) * num_tdm_group_ports) {
+		dev_err(&pdev->dev, "%s array_length is %d, expected is %zd\n",
+			__func__, array_length,
+			sizeof(uint32_t) * num_tdm_group_ports);
+		rc = -EINVAL;
+		goto rtn;
+	}
+
+	for (i = 0; i < num_tdm_group_ports; i++)
+		tdm_group_cfg.port_id[i] =
+			(u16)be32_to_cpu(port_id_array[i]);
+	/* Unused index should be filled with 0 or AFE_PORT_INVALID */
+	for (i = num_tdm_group_ports; i < AFE_GROUP_DEVICE_NUM_PORTS; i++)
+		tdm_group_cfg.port_id[i] =
+			AFE_PORT_INVALID;
+
+	/* extract tdm clk info into static */
+	rc = of_property_read_u32(pdev->dev.of_node,
+		"qcom,msm-cpudai-tdm-clk-rate",
+		&tdm_clk_set.clk_freq_in_hz);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: Clk Rate from DT file %s\n",
+			__func__, "qcom,msm-cpudai-tdm-clk-rate");
+		goto rtn;
+	}
+	dev_dbg(&pdev->dev, "%s: Clk Rate from DT file %d\n",
+		__func__, tdm_clk_set.clk_freq_in_hz);
+
+	/* initialize static tdm clk attribute to default value */
+	tdm_clk_set.clk_attri = Q6AFE_LPASS_CLK_ATTRIBUTE_INVERT_COUPLE_NO;
+
+	/* extract tdm clk attribute into static */
+	if (of_find_property(pdev->dev.of_node,
+			"qcom,msm-cpudai-tdm-clk-attribute", NULL)) {
+		rc = of_property_read_u16(pdev->dev.of_node,
+			"qcom,msm-cpudai-tdm-clk-attribute",
+			&tdm_clk_set.clk_attri);
+		if (rc) {
+			dev_err(&pdev->dev, "%s: clk attribute from DT file %s\n",
+				__func__, "qcom,msm-cpudai-tdm-clk-attribute");
+			goto rtn;
+		}
+		dev_dbg(&pdev->dev, "%s: clk attribute from DT file %d\n",
+			__func__, tdm_clk_set.clk_attri);
+	} else {
+		dev_dbg(&pdev->dev, "%s: No optional clk attribute found\n",
+			__func__);
+	}
+
+	/* extract tdm clk src master/slave info into static */
+	rc = of_property_read_u32(pdev->dev.of_node,
+		"qcom,msm-cpudai-tdm-clk-internal",
+		&clk_mode);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: Clk id from DT file %s\n",
+			__func__, "qcom,msm-cpudai-tdm-clk-internal");
+		goto rtn;
+	}
+	dev_dbg(&pdev->dev, "%s: Clk id from DT file %d\n",
+		__func__, clk_mode);
+
+	rc = msm_dai_q6_tdm_set_clk_param(tdm_group_cfg.group_id,
+					  &tdm_clk_set, clk_mode);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: group id not supported 0x%x\n",
+			__func__, tdm_group_cfg.group_id);
+		goto rtn;
+	}
+
+	/* other initializations within device group */
+	group_idx = msm_dai_q6_get_group_idx(tdm_group_cfg.group_id);
+	if (group_idx < 0) {
+		dev_err(&pdev->dev, "%s: group id 0x%x not supported\n",
+			__func__, tdm_group_cfg.group_id);
+		rc = -EINVAL;
+		goto rtn;
+	}
+	atomic_set(&tdm_group_ref[group_idx], 0);
+
+	/* probe child node info */
+	rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: failed to add child nodes, rc=%d\n",
+			__func__, rc);
+		goto rtn;
+	} else
+		dev_dbg(&pdev->dev, "%s: added child node\n", __func__);
+
+rtn:
+	return rc;
+}
+
+static int msm_dai_tdm_q6_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static const struct of_device_id msm_dai_tdm_dt_match[] = {
+	{ .compatible = "qcom,msm-dai-tdm", },
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, msm_dai_tdm_dt_match);
+
+static struct platform_driver msm_dai_tdm_q6 = {
+	.probe  = msm_dai_tdm_q6_probe,
+	.remove = msm_dai_tdm_q6_remove,
+	.driver = {
+		.name = "msm-dai-tdm",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_dai_tdm_dt_match,
+	},
+};
+
+static int msm_dai_q6_tdm_data_format_put(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_tdm_dai_data *dai_data = kcontrol->private_data;
+	int value = ucontrol->value.integer.value[0];
+
+	switch (value) {
+	case 0:
+	  dai_data->port_cfg.tdm.data_format = AFE_LINEAR_PCM_DATA;
+	  break;
+	case 1:
+	  dai_data->port_cfg.tdm.data_format = AFE_NON_LINEAR_DATA;
+	  break;
+	case 2:
+	  dai_data->port_cfg.tdm.data_format = AFE_GENERIC_COMPRESSED;
+	  break;
+	default:
+	  pr_err("%s: data_format invalid\n", __func__);
+	  break;
+	}
+	pr_debug("%s: data_format = %d\n",
+		__func__, dai_data->port_cfg.tdm.data_format);
+	return 0;
+}
+
+static int msm_dai_q6_tdm_data_format_get(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_tdm_dai_data *dai_data = kcontrol->private_data;
+
+	ucontrol->value.integer.value[0] =
+		dai_data->port_cfg.tdm.data_format;
+	pr_debug("%s: data_format = %d\n",
+		__func__, dai_data->port_cfg.tdm.data_format);
+	return 0;
+}
+
+static int msm_dai_q6_tdm_header_type_put(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_tdm_dai_data *dai_data = kcontrol->private_data;
+	int value = ucontrol->value.integer.value[0];
+
+	dai_data->port_cfg.custom_tdm_header.header_type = value;
+	pr_debug("%s: header_type = %d\n",
+		__func__,
+		dai_data->port_cfg.custom_tdm_header.header_type);
+	return 0;
+}
+
+static int msm_dai_q6_tdm_header_type_get(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_tdm_dai_data *dai_data = kcontrol->private_data;
+
+	ucontrol->value.integer.value[0] =
+		dai_data->port_cfg.custom_tdm_header.header_type;
+	pr_debug("%s: header_type = %d\n",
+		__func__,
+		dai_data->port_cfg.custom_tdm_header.header_type);
+	return 0;
+}
+
+static int msm_dai_q6_tdm_header_put(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_tdm_dai_data *dai_data = kcontrol->private_data;
+	int i = 0;
+
+	for (i = 0; i < AFE_CUSTOM_TDM_HEADER_MAX_CNT; i++) {
+		dai_data->port_cfg.custom_tdm_header.header[i] =
+			(u16)ucontrol->value.integer.value[i];
+		pr_debug("%s: header #%d = 0x%x\n",
+			__func__, i,
+			dai_data->port_cfg.custom_tdm_header.header[i]);
+	}
+	return 0;
+}
+
+static int msm_dai_q6_tdm_header_get(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_tdm_dai_data *dai_data = kcontrol->private_data;
+	int i = 0;
+
+	for (i = 0; i < AFE_CUSTOM_TDM_HEADER_MAX_CNT; i++) {
+		ucontrol->value.integer.value[i] =
+			dai_data->port_cfg.custom_tdm_header.header[i];
+		pr_debug("%s: header #%d = 0x%x\n",
+			__func__, i,
+			dai_data->port_cfg.custom_tdm_header.header[i]);
+	}
+	return 0;
+}
+
+static const struct snd_kcontrol_new tdm_config_controls_data_format[] = {
+	SOC_ENUM_EXT("PRI_TDM_RX_0 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("PRI_TDM_RX_1 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("PRI_TDM_RX_2 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("PRI_TDM_RX_3 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("PRI_TDM_RX_4 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("PRI_TDM_RX_5 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("PRI_TDM_RX_6 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("PRI_TDM_RX_7 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("PRI_TDM_TX_0 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("PRI_TDM_TX_1 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("PRI_TDM_TX_2 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("PRI_TDM_TX_3 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("PRI_TDM_TX_4 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("PRI_TDM_TX_5 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("PRI_TDM_TX_6 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("PRI_TDM_TX_7 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("SEC_TDM_RX_0 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("SEC_TDM_RX_1 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("SEC_TDM_RX_2 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("SEC_TDM_RX_3 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("SEC_TDM_RX_4 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("SEC_TDM_RX_5 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("SEC_TDM_RX_6 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("SEC_TDM_RX_7 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("SEC_TDM_TX_0 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("SEC_TDM_TX_1 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("SEC_TDM_TX_2 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("SEC_TDM_TX_3 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("SEC_TDM_TX_4 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("SEC_TDM_TX_5 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("SEC_TDM_TX_6 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("SEC_TDM_TX_7 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("TERT_TDM_RX_0 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("TERT_TDM_RX_1 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("TERT_TDM_RX_2 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("TERT_TDM_RX_3 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("TERT_TDM_RX_4 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("TERT_TDM_RX_5 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("TERT_TDM_RX_6 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("TERT_TDM_RX_7 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("TERT_TDM_TX_0 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("TERT_TDM_TX_1 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("TERT_TDM_TX_2 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("TERT_TDM_TX_3 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("TERT_TDM_TX_4 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("TERT_TDM_TX_5 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("TERT_TDM_TX_6 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("TERT_TDM_TX_7 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("QUAT_TDM_RX_0 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("QUAT_TDM_RX_1 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("QUAT_TDM_RX_2 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("QUAT_TDM_RX_3 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("QUAT_TDM_RX_4 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("QUAT_TDM_RX_5 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("QUAT_TDM_RX_6 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("QUAT_TDM_RX_7 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("QUAT_TDM_TX_0 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("QUAT_TDM_TX_1 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("QUAT_TDM_TX_2 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("QUAT_TDM_TX_3 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("QUAT_TDM_TX_4 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("QUAT_TDM_TX_5 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("QUAT_TDM_TX_6 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+	SOC_ENUM_EXT("QUAT_TDM_TX_7 Data Format", tdm_config_enum[0],
+			msm_dai_q6_tdm_data_format_get,
+			msm_dai_q6_tdm_data_format_put),
+};
+
+static const struct snd_kcontrol_new tdm_config_controls_header_type[] = {
+	SOC_ENUM_EXT("PRI_TDM_RX_0 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("PRI_TDM_RX_1 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("PRI_TDM_RX_2 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("PRI_TDM_RX_3 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("PRI_TDM_RX_4 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("PRI_TDM_RX_5 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("PRI_TDM_RX_6 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("PRI_TDM_RX_7 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("PRI_TDM_TX_0 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("PRI_TDM_TX_1 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("PRI_TDM_TX_2 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("PRI_TDM_TX_3 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("PRI_TDM_TX_4 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("PRI_TDM_TX_5 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("PRI_TDM_TX_6 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("PRI_TDM_TX_7 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("SEC_TDM_RX_0 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("SEC_TDM_RX_1 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("SEC_TDM_RX_2 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("SEC_TDM_RX_3 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("SEC_TDM_RX_4 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("SEC_TDM_RX_5 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("SEC_TDM_RX_6 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("SEC_TDM_RX_7 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("SEC_TDM_TX_0 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("SEC_TDM_TX_1 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("SEC_TDM_TX_2 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("SEC_TDM_TX_3 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("SEC_TDM_TX_4 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("SEC_TDM_TX_5 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("SEC_TDM_TX_6 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("SEC_TDM_TX_7 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("TERT_TDM_RX_0 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("TERT_TDM_RX_1 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("TERT_TDM_RX_2 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("TERT_TDM_RX_3 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("TERT_TDM_RX_4 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("TERT_TDM_RX_5 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("TERT_TDM_RX_6 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("TERT_TDM_RX_7 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("TERT_TDM_TX_0 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("TERT_TDM_TX_1 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("TERT_TDM_TX_2 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("TERT_TDM_TX_3 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("TERT_TDM_TX_4 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("TERT_TDM_TX_5 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("TERT_TDM_TX_6 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("TERT_TDM_TX_7 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("QUAT_TDM_RX_0 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("QUAT_TDM_RX_1 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("QUAT_TDM_RX_2 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("QUAT_TDM_RX_3 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("QUAT_TDM_RX_4 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("QUAT_TDM_RX_5 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("QUAT_TDM_RX_6 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("QUAT_TDM_RX_7 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("QUAT_TDM_TX_0 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("QUAT_TDM_TX_1 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("QUAT_TDM_TX_2 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("QUAT_TDM_TX_3 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("QUAT_TDM_TX_4 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("QUAT_TDM_TX_5 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("QUAT_TDM_TX_6 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+	SOC_ENUM_EXT("QUAT_TDM_TX_7 Header Type", tdm_config_enum[1],
+			msm_dai_q6_tdm_header_type_get,
+			msm_dai_q6_tdm_header_type_put),
+};
+
+static const struct snd_kcontrol_new tdm_config_controls_header[] = {
+	SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_0 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_1 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_2 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_3 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_4 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_5 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_6 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_7 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_0 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_1 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_2 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_3 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_4 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_5 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_6 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_7 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_0 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_1 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_2 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_3 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_4 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_5 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_6 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_7 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_0 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_1 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_2 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_3 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_4 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_5 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_6 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_7 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_0 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_1 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_2 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_3 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_4 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_5 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_6 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_7 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_0 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_1 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_2 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_3 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_4 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_5 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_6 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_7 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_0 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_1 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_2 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_3 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_4 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_5 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_6 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_7 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_0 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_1 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_2 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_3 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_4 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_5 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_6 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+	SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_7 Header",
+			SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 8,
+			msm_dai_q6_tdm_header_get,
+			msm_dai_q6_tdm_header_put),
+};
+
+static int msm_dai_q6_tdm_set_clk(
+		struct msm_dai_q6_tdm_dai_data *dai_data,
+		u16 port_id, bool enable)
+{
+	int rc = 0;
+
+	dai_data->clk_set.enable = enable;
+
+	rc = afe_set_lpass_clock_v2(port_id,
+		&dai_data->clk_set);
+	if (rc < 0)
+		pr_err("%s: afe lpass clock failed, err:%d\n",
+			__func__, rc);
+
+	return rc;
+}
+
+static int msm_dai_q6_dai_tdm_probe(struct snd_soc_dai *dai)
+{
+	int rc = 0;
+	struct msm_dai_q6_tdm_dai_data *tdm_dai_data =
+			dev_get_drvdata(dai->dev);
+	struct snd_kcontrol *data_format_kcontrol = NULL;
+	struct snd_kcontrol *header_type_kcontrol = NULL;
+	struct snd_kcontrol *header_kcontrol = NULL;
+	int port_idx = 0;
+	const struct snd_kcontrol_new *data_format_ctrl = NULL;
+	const struct snd_kcontrol_new *header_type_ctrl = NULL;
+	const struct snd_kcontrol_new *header_ctrl = NULL;
+
+	msm_dai_q6_set_dai_id(dai);
+
+	port_idx = msm_dai_q6_get_port_idx(dai->id);
+	if (port_idx < 0) {
+		dev_err(dai->dev, "%s port id 0x%x not supported\n",
+			__func__, dai->id);
+		rc = -EINVAL;
+		goto rtn;
+	}
+
+	data_format_ctrl =
+		&tdm_config_controls_data_format[port_idx];
+	header_type_ctrl =
+		&tdm_config_controls_header_type[port_idx];
+	header_ctrl =
+		&tdm_config_controls_header[port_idx];
+
+	if (data_format_ctrl) {
+		data_format_kcontrol = snd_ctl_new1(data_format_ctrl,
+					tdm_dai_data);
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				 data_format_kcontrol);
+
+		if (IS_ERR_VALUE(rc)) {
+			dev_err(dai->dev, "%s: err add data format ctrl DAI = %s\n",
+				__func__, dai->name);
+			goto rtn;
+		}
+	}
+
+	if (header_type_ctrl) {
+		header_type_kcontrol = snd_ctl_new1(header_type_ctrl,
+					tdm_dai_data);
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				 header_type_kcontrol);
+
+		if (IS_ERR_VALUE(rc)) {
+			if (data_format_kcontrol)
+				snd_ctl_remove(dai->component->card->snd_card,
+					data_format_kcontrol);
+			dev_err(dai->dev, "%s: err add header type ctrl DAI = %s\n",
+				__func__, dai->name);
+			goto rtn;
+		}
+	}
+
+	if (header_ctrl) {
+		header_kcontrol = snd_ctl_new1(header_ctrl,
+					tdm_dai_data);
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				 header_kcontrol);
+
+		if (IS_ERR_VALUE(rc)) {
+			if (header_type_kcontrol)
+				snd_ctl_remove(dai->component->card->snd_card,
+					header_type_kcontrol);
+			if (data_format_kcontrol)
+				snd_ctl_remove(dai->component->card->snd_card,
+					data_format_kcontrol);
+			dev_err(dai->dev, "%s: err add header ctrl DAI = %s\n",
+				__func__, dai->name);
+			goto rtn;
+		}
+	}
+
+	rc = msm_dai_q6_dai_add_route(dai);
+
+rtn:
+	return rc;
+}
+
+
+static int msm_dai_q6_dai_tdm_remove(struct snd_soc_dai *dai)
+{
+	int rc = 0;
+	struct msm_dai_q6_tdm_dai_data *tdm_dai_data =
+		dev_get_drvdata(dai->dev);
+	u16 group_id = tdm_dai_data->group_cfg.tdm_cfg.group_id;
+	int group_idx = 0;
+	atomic_t *group_ref = NULL;
+
+	group_idx = msm_dai_q6_get_group_idx(dai->id);
+	if (group_idx < 0) {
+		dev_err(dai->dev, "%s port id 0x%x not supported\n",
+			__func__, dai->id);
+		return -EINVAL;
+	}
+
+	group_ref = &tdm_group_ref[group_idx];
+
+	/* If AFE port is still up, close it */
+	if (test_bit(STATUS_PORT_STARTED, tdm_dai_data->status_mask)) {
+		rc = afe_close(dai->id); /* can block */
+		if (IS_ERR_VALUE(rc)) {
+			dev_err(dai->dev, "%s: fail to close AFE port 0x%x\n",
+				__func__, dai->id);
+		}
+		atomic_dec(group_ref);
+		clear_bit(STATUS_PORT_STARTED,
+			  tdm_dai_data->status_mask);
+
+		if (atomic_read(group_ref) == 0) {
+			rc = afe_port_group_enable(group_id,
+				NULL, false);
+			if (IS_ERR_VALUE(rc)) {
+				dev_err(dai->dev, "fail to disable AFE group 0x%x\n",
+					group_id);
+			}
+			rc = msm_dai_q6_tdm_set_clk(tdm_dai_data,
+				dai->id, false);
+			if (IS_ERR_VALUE(rc)) {
+				dev_err(dai->dev, "%s: fail to disable AFE clk 0x%x\n",
+					__func__, dai->id);
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int msm_dai_q6_tdm_set_tdm_slot(struct snd_soc_dai *dai,
+				unsigned int tx_mask,
+				unsigned int rx_mask,
+				int slots, int slot_width)
+{
+	int rc = 0;
+	struct msm_dai_q6_tdm_dai_data *dai_data =
+		dev_get_drvdata(dai->dev);
+	struct afe_param_id_group_device_tdm_cfg *tdm_group =
+		&dai_data->group_cfg.tdm_cfg;
+	unsigned int cap_mask;
+
+	dev_dbg(dai->dev, "%s: dai id = 0x%x\n", __func__, dai->id);
+
+	/* HW only supports 16 and 32 bit slot width configuration */
+	if ((slot_width != 16) && (slot_width != 32)) {
+		dev_err(dai->dev, "%s: invalid slot_width %d\n",
+			__func__, slot_width);
+		return -EINVAL;
+	}
+
+	/* HW only supports 16 and 8 slots configuration */
+	switch (slots) {
+	case 2:
+		cap_mask = 0x03;
+		break;
+	case 8:
+		cap_mask = 0xFF;
+		break;
+	case 16:
+		cap_mask = 0xFFFF;
+		break;
+	default:
+		dev_err(dai->dev, "%s: invalid slots %d\n",
+			__func__, slots);
+		return -EINVAL;
+	}
+
+	switch (dai->id) {
+	case AFE_PORT_ID_PRIMARY_TDM_RX:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_1:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_2:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_3:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_4:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_5:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_6:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_7:
+	case AFE_PORT_ID_SECONDARY_TDM_RX:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_1:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_2:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_3:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_4:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_5:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_6:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_7:
+	case AFE_PORT_ID_TERTIARY_TDM_RX:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_1:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_2:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_3:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_4:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_5:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_6:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_7:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_1:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_2:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_3:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_4:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_5:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_6:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_7:
+		tdm_group->nslots_per_frame = slots;
+		tdm_group->slot_width = slot_width;
+		tdm_group->slot_mask = rx_mask & cap_mask;
+		break;
+	case AFE_PORT_ID_PRIMARY_TDM_TX:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_1:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_2:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_3:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_4:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_5:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_6:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_7:
+	case AFE_PORT_ID_SECONDARY_TDM_TX:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_1:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_2:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_3:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_4:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_5:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_6:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_7:
+	case AFE_PORT_ID_TERTIARY_TDM_TX:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_1:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_2:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_3:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_4:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_5:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_6:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_7:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_1:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_2:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_3:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_4:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_5:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_6:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_7:
+		tdm_group->nslots_per_frame = slots;
+		tdm_group->slot_width = slot_width;
+		tdm_group->slot_mask = tx_mask & cap_mask;
+		break;
+	default:
+		dev_err(dai->dev, "%s: invalid dai id 0x%x\n",
+			__func__, dai->id);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int msm_dai_q6_tdm_set_sysclk(struct snd_soc_dai *dai,
+				int clk_id, unsigned int freq, int dir)
+{
+	struct msm_dai_q6_tdm_dai_data *dai_data =
+		dev_get_drvdata(dai->dev);
+
+	switch (dai->id) {
+	case AFE_PORT_ID_PRIMARY_TDM_RX:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_1:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_2:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_3:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_4:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_5:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_6:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_7:
+	case AFE_PORT_ID_PRIMARY_TDM_TX:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_1:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_2:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_3:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_4:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_5:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_6:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_7:
+		dai_data->clk_set.clk_freq_in_hz = freq;
+		break;
+	default:
+		return 0;
+	}
+
+	dev_dbg(dai->dev, "%s: dai id = 0x%x group clk_freq %d\n",
+			__func__, dai->id, freq);
+	return 0;
+}
+
+
+static int msm_dai_q6_tdm_set_channel_map(struct snd_soc_dai *dai,
+				unsigned int tx_num, unsigned int *tx_slot,
+				unsigned int rx_num, unsigned int *rx_slot)
+{
+	int rc = 0;
+	struct msm_dai_q6_tdm_dai_data *dai_data =
+		dev_get_drvdata(dai->dev);
+	struct afe_param_id_slot_mapping_cfg *slot_mapping =
+		&dai_data->port_cfg.slot_mapping;
+	int i = 0;
+
+	dev_dbg(dai->dev, "%s: dai id = 0x%x\n", __func__, dai->id);
+
+	switch (dai->id) {
+	case AFE_PORT_ID_PRIMARY_TDM_RX:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_1:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_2:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_3:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_4:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_5:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_6:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_7:
+	case AFE_PORT_ID_SECONDARY_TDM_RX:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_1:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_2:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_3:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_4:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_5:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_6:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_7:
+	case AFE_PORT_ID_TERTIARY_TDM_RX:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_1:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_2:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_3:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_4:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_5:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_6:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_7:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_1:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_2:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_3:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_4:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_5:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_6:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_7:
+		if (!rx_slot) {
+			dev_err(dai->dev, "%s: rx slot not found\n", __func__);
+			return -EINVAL;
+		}
+		if (rx_num > AFE_PORT_MAX_AUDIO_CHAN_CNT) {
+			dev_err(dai->dev, "%s: invalid rx num %d\n", __func__,
+				rx_num);
+			return -EINVAL;
+		}
+
+		for (i = 0; i < rx_num; i++)
+			slot_mapping->offset[i] = rx_slot[i];
+		for (i = rx_num; i < AFE_PORT_MAX_AUDIO_CHAN_CNT; i++)
+			slot_mapping->offset[i] =
+				AFE_SLOT_MAPPING_OFFSET_INVALID;
+
+		slot_mapping->num_channel = rx_num;
+		break;
+	case AFE_PORT_ID_PRIMARY_TDM_TX:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_1:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_2:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_3:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_4:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_5:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_6:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_7:
+	case AFE_PORT_ID_SECONDARY_TDM_TX:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_1:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_2:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_3:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_4:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_5:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_6:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_7:
+	case AFE_PORT_ID_TERTIARY_TDM_TX:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_1:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_2:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_3:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_4:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_5:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_6:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_7:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_1:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_2:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_3:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_4:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_5:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_6:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_7:
+		if (!tx_slot) {
+			dev_err(dai->dev, "%s: tx slot not found\n", __func__);
+			return -EINVAL;
+		}
+		if (tx_num > AFE_PORT_MAX_AUDIO_CHAN_CNT) {
+			dev_err(dai->dev, "%s: invalid tx num %d\n", __func__,
+				tx_num);
+			return -EINVAL;
+		}
+
+		for (i = 0; i < tx_num; i++)
+			slot_mapping->offset[i] = tx_slot[i];
+		for (i = tx_num; i < AFE_PORT_MAX_AUDIO_CHAN_CNT; i++)
+			slot_mapping->offset[i] =
+				AFE_SLOT_MAPPING_OFFSET_INVALID;
+
+		slot_mapping->num_channel = tx_num;
+		break;
+	default:
+		dev_err(dai->dev, "%s: invalid dai id 0x%x\n",
+			__func__, dai->id);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int msm_dai_q6_tdm_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_tdm_dai_data *dai_data =
+		dev_get_drvdata(dai->dev);
+
+	struct afe_param_id_group_device_tdm_cfg *tdm_group =
+		&dai_data->group_cfg.tdm_cfg;
+	struct afe_param_id_tdm_cfg *tdm =
+		&dai_data->port_cfg.tdm;
+	struct afe_param_id_slot_mapping_cfg *slot_mapping =
+		&dai_data->port_cfg.slot_mapping;
+	struct afe_param_id_custom_tdm_header_cfg *custom_tdm_header =
+		&dai_data->port_cfg.custom_tdm_header;
+
+	pr_debug("%s: dev_name: %s\n",
+		__func__, dev_name(dai->dev));
+
+	if ((params_channels(params) == 0) ||
+		(params_channels(params) > 8)) {
+		dev_err(dai->dev, "%s: invalid param channels %d\n",
+			__func__, params_channels(params));
+		return -EINVAL;
+	}
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		dai_data->bitwidth = 16;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+	case SNDRV_PCM_FORMAT_S24_3LE:
+		dai_data->bitwidth = 24;
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		dai_data->bitwidth = 32;
+		break;
+	default:
+		dev_err(dai->dev, "%s: invalid param format 0x%x\n",
+			__func__, params_format(params));
+		return -EINVAL;
+	}
+	dai_data->channels = params_channels(params);
+	dai_data->rate = params_rate(params);
+
+	/*
+	 * update tdm group config param
+	 * NOTE: group config is set to the same as slot config.
+	 */
+	tdm_group->bit_width = tdm_group->slot_width;
+	tdm_group->num_channels = tdm_group->nslots_per_frame;
+	tdm_group->sample_rate = dai_data->rate;
+
+	pr_debug("%s: TDM GROUP:\n"
+		"num_channels=%d sample_rate=%d bit_width=%d\n"
+		"nslots_per_frame=%d slot_width=%d slot_mask=0x%x\n",
+		__func__,
+		tdm_group->num_channels,
+		tdm_group->sample_rate,
+		tdm_group->bit_width,
+		tdm_group->nslots_per_frame,
+		tdm_group->slot_width,
+		tdm_group->slot_mask);
+	pr_debug("%s: TDM GROUP:\n"
+		"port_id[0]=0x%x port_id[1]=0x%x port_id[2]=0x%x port_id[3]=0x%x\n"
+		"port_id[4]=0x%x port_id[5]=0x%x port_id[6]=0x%x port_id[7]=0x%x\n",
+		__func__,
+		tdm_group->port_id[0],
+		tdm_group->port_id[1],
+		tdm_group->port_id[2],
+		tdm_group->port_id[3],
+		tdm_group->port_id[4],
+		tdm_group->port_id[5],
+		tdm_group->port_id[6],
+		tdm_group->port_id[7]);
+
+	/*
+	 * update tdm config param
+	 * NOTE: channels/rate/bitwidth are per stream property
+	 */
+	tdm->num_channels = dai_data->channels;
+	tdm->sample_rate = dai_data->rate;
+	tdm->bit_width = dai_data->bitwidth;
+	/*
+	 * port slot config is the same as group slot config
+	 * port slot mask should be set according to offset
+	 */
+	tdm->nslots_per_frame = tdm_group->nslots_per_frame;
+	tdm->slot_width = tdm_group->slot_width;
+	tdm->slot_mask = tdm_group->slot_mask;
+
+	pr_debug("%s: TDM:\n"
+		"num_channels=%d sample_rate=%d bit_width=%d\n"
+		"nslots_per_frame=%d slot_width=%d slot_mask=0x%x\n"
+		"data_format=0x%x sync_mode=0x%x sync_src=0x%x\n"
+		"data_out=0x%x invert_sync=0x%x data_delay=0x%x\n",
+		__func__,
+		tdm->num_channels,
+		tdm->sample_rate,
+		tdm->bit_width,
+		tdm->nslots_per_frame,
+		tdm->slot_width,
+		tdm->slot_mask,
+		tdm->data_format,
+		tdm->sync_mode,
+		tdm->sync_src,
+		tdm->ctrl_data_out_enable,
+		tdm->ctrl_invert_sync_pulse,
+		tdm->ctrl_sync_data_delay);
+
+	/*
+	 * update slot mapping config param
+	 * NOTE: channels/rate/bitwidth are per stream property
+	 */
+	slot_mapping->bitwidth = dai_data->bitwidth;
+
+	pr_debug("%s: SLOT MAPPING:\n"
+		"num_channel=%d bitwidth=%d data_align=0x%x\n",
+		__func__,
+		slot_mapping->num_channel,
+		slot_mapping->bitwidth,
+		slot_mapping->data_align_type);
+	pr_debug("%s: SLOT MAPPING:\n"
+		"offset[0]=0x%x offset[1]=0x%x offset[2]=0x%x offset[3]=0x%x\n"
+		"offset[4]=0x%x offset[5]=0x%x offset[6]=0x%x offset[7]=0x%x\n",
+		__func__,
+		slot_mapping->offset[0],
+		slot_mapping->offset[1],
+		slot_mapping->offset[2],
+		slot_mapping->offset[3],
+		slot_mapping->offset[4],
+		slot_mapping->offset[5],
+		slot_mapping->offset[6],
+		slot_mapping->offset[7]);
+
+	/*
+	 * update custom header config param
+	 * NOTE: channels/rate/bitwidth are per playback stream property.
+	 * custom tdm header only applicable to playback stream.
+	 */
+	if (custom_tdm_header->header_type !=
+		AFE_CUSTOM_TDM_HEADER_TYPE_INVALID) {
+		pr_debug("%s: CUSTOM TDM HEADER:\n"
+			"start_offset=0x%x header_width=%d\n"
+			"num_frame_repeat=%d header_type=0x%x\n",
+			__func__,
+			custom_tdm_header->start_offset,
+			custom_tdm_header->header_width,
+			custom_tdm_header->num_frame_repeat,
+			custom_tdm_header->header_type);
+		pr_debug("%s: CUSTOM TDM HEADER:\n"
+			"header[0]=0x%x header[1]=0x%x header[2]=0x%x header[3]=0x%x\n"
+			"header[4]=0x%x header[5]=0x%x header[6]=0x%x header[7]=0x%x\n",
+			__func__,
+			custom_tdm_header->header[0],
+			custom_tdm_header->header[1],
+			custom_tdm_header->header[2],
+			custom_tdm_header->header[3],
+			custom_tdm_header->header[4],
+			custom_tdm_header->header[5],
+			custom_tdm_header->header[6],
+			custom_tdm_header->header[7]);
+	}
+
+	return 0;
+}
+
+static int msm_dai_q6_tdm_prepare(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	int rc = 0;
+	struct msm_dai_q6_tdm_dai_data *dai_data =
+		dev_get_drvdata(dai->dev);
+	u16 group_id = dai_data->group_cfg.tdm_cfg.group_id;
+	int group_idx = 0;
+	atomic_t *group_ref = NULL;
+
+	group_idx = msm_dai_q6_get_group_idx(dai->id);
+	if (group_idx < 0) {
+		dev_err(dai->dev, "%s port id 0x%x not supported\n",
+			__func__, dai->id);
+		return -EINVAL;
+	}
+
+	mutex_lock(&tdm_mutex);
+
+	group_ref = &tdm_group_ref[group_idx];
+
+	if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+		/* PORT START should be set if prepare called
+		in active state. */
+		if (atomic_read(group_ref) == 0) {
+			/* TX and RX share the same clk.
+			AFE clk is enabled per group to simplify the logic.
+			DSP will monitor the clk count. */
+			rc = msm_dai_q6_tdm_set_clk(dai_data,
+				dai->id, true);
+			if (IS_ERR_VALUE(rc)) {
+				dev_err(dai->dev, "%s: fail to enable AFE clk 0x%x\n",
+					__func__, dai->id);
+				goto rtn;
+			}
+
+			/*
+			 * if only one port, don't do group enable as there
+			 * is no group need for only one port
+			 */
+			if (dai_data->num_group_ports > 1) {
+				rc = afe_port_group_enable(group_id,
+					&dai_data->group_cfg, true);
+				if (IS_ERR_VALUE(rc)) {
+					dev_err(dai->dev,
+					"%s: fail to enable AFE group 0x%x\n",
+					__func__, group_id);
+					goto rtn;
+				}
+			}
+		}
+
+		rc = afe_tdm_port_start(dai->id, &dai_data->port_cfg,
+			dai_data->rate, dai_data->num_group_ports);
+		if (IS_ERR_VALUE(rc)) {
+			if (atomic_read(group_ref) == 0) {
+				afe_port_group_enable(group_id,
+					NULL, false);
+				msm_dai_q6_tdm_set_clk(dai_data,
+					dai->id, false);
+			}
+			dev_err(dai->dev, "%s: fail to open AFE port 0x%x\n",
+				__func__, dai->id);
+		} else {
+			set_bit(STATUS_PORT_STARTED,
+				dai_data->status_mask);
+			atomic_inc(group_ref);
+		}
+
+		/* TODO: need to monitor PCM/MI2S/TDM HW status */
+		/* NOTE: AFE should error out if HW resource contention */
+
+	}
+
+rtn:
+	mutex_unlock(&tdm_mutex);
+	return rc;
+}
+
+static void msm_dai_q6_tdm_shutdown(struct snd_pcm_substream *substream,
+				     struct snd_soc_dai *dai)
+{
+	int rc = 0;
+	struct msm_dai_q6_tdm_dai_data *dai_data =
+		dev_get_drvdata(dai->dev);
+	u16 group_id = dai_data->group_cfg.tdm_cfg.group_id;
+	int group_idx = 0;
+	atomic_t *group_ref = NULL;
+
+	group_idx = msm_dai_q6_get_group_idx(dai->id);
+	if (group_idx < 0) {
+		dev_err(dai->dev, "%s port id 0x%x not supported\n",
+			__func__, dai->id);
+		return;
+	}
+
+	mutex_lock(&tdm_mutex);
+
+	group_ref = &tdm_group_ref[group_idx];
+
+	if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+		rc = afe_close(dai->id);
+		if (IS_ERR_VALUE(rc)) {
+			dev_err(dai->dev, "%s: fail to close AFE port 0x%x\n",
+				__func__, dai->id);
+		}
+		atomic_dec(group_ref);
+		clear_bit(STATUS_PORT_STARTED,
+			dai_data->status_mask);
+
+		if (atomic_read(group_ref) == 0) {
+			rc = afe_port_group_enable(group_id,
+				NULL, false);
+			if (IS_ERR_VALUE(rc)) {
+				dev_err(dai->dev, "%s: fail to disable AFE group 0x%x\n",
+					__func__, group_id);
+			}
+			rc = msm_dai_q6_tdm_set_clk(dai_data,
+				dai->id, false);
+			if (IS_ERR_VALUE(rc)) {
+				dev_err(dai->dev, "%s: fail to disable AFE clk 0x%x\n",
+					__func__, dai->id);
+			}
+		}
+
+		/* TODO: need to monitor PCM/MI2S/TDM HW status */
+		/* NOTE: AFE should error out if HW resource contention */
+
+	}
+
+	mutex_unlock(&tdm_mutex);
+}
+
+static struct snd_soc_dai_ops msm_dai_q6_tdm_ops = {
+	.prepare          = msm_dai_q6_tdm_prepare,
+	.hw_params        = msm_dai_q6_tdm_hw_params,
+	.set_tdm_slot     = msm_dai_q6_tdm_set_tdm_slot,
+	.set_channel_map  = msm_dai_q6_tdm_set_channel_map,
+	.set_sysclk       = msm_dai_q6_tdm_set_sysclk,
+	.shutdown         = msm_dai_q6_tdm_shutdown,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
+	{
+		.playback = {
+			.stream_name = "Primary TDM0 Playback",
+			.aif_name = "PRI_TDM_RX_0",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_PRIMARY_TDM_RX,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Primary TDM1 Playback",
+			.aif_name = "PRI_TDM_RX_1",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_PRIMARY_TDM_RX_1,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Primary TDM2 Playback",
+			.aif_name = "PRI_TDM_RX_2",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_PRIMARY_TDM_RX_2,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Primary TDM3 Playback",
+			.aif_name = "PRI_TDM_RX_3",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_PRIMARY_TDM_RX_3,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Primary TDM4 Playback",
+			.aif_name = "PRI_TDM_RX_4",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_PRIMARY_TDM_RX_4,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Primary TDM5 Playback",
+			.aif_name = "PRI_TDM_RX_5",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_PRIMARY_TDM_RX_5,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Primary TDM6 Playback",
+			.aif_name = "PRI_TDM_RX_6",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_PRIMARY_TDM_RX_6,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Primary TDM7 Playback",
+			.aif_name = "PRI_TDM_RX_7",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_PRIMARY_TDM_RX_7,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Primary TDM0 Capture",
+			.aif_name = "PRI_TDM_TX_0",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_PRIMARY_TDM_TX,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Primary TDM1 Capture",
+			.aif_name = "PRI_TDM_TX_1",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_PRIMARY_TDM_TX_1,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Primary TDM2 Capture",
+			.aif_name = "PRI_TDM_TX_2",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_PRIMARY_TDM_TX_2,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Primary TDM3 Capture",
+			.aif_name = "PRI_TDM_TX_3",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_PRIMARY_TDM_TX_3,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Primary TDM4 Capture",
+			.aif_name = "PRI_TDM_TX_4",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_PRIMARY_TDM_TX_4,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Primary TDM5 Capture",
+			.aif_name = "PRI_TDM_TX_5",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_PRIMARY_TDM_TX_5,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Primary TDM6 Capture",
+			.aif_name = "PRI_TDM_TX_6",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_PRIMARY_TDM_TX_6,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Primary TDM7 Capture",
+			.aif_name = "PRI_TDM_TX_7",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_PRIMARY_TDM_TX_7,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Secondary TDM0 Playback",
+			.aif_name = "SEC_TDM_RX_0",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_SECONDARY_TDM_RX,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Secondary TDM1 Playback",
+			.aif_name = "SEC_TDM_RX_1",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_SECONDARY_TDM_RX_1,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Secondary TDM2 Playback",
+			.aif_name = "SEC_TDM_RX_2",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_SECONDARY_TDM_RX_2,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Secondary TDM3 Playback",
+			.aif_name = "SEC_TDM_RX_3",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_SECONDARY_TDM_RX_3,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Secondary TDM4 Playback",
+			.aif_name = "SEC_TDM_RX_4",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_SECONDARY_TDM_RX_4,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Secondary TDM5 Playback",
+			.aif_name = "SEC_TDM_RX_5",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_SECONDARY_TDM_RX_5,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Secondary TDM6 Playback",
+			.aif_name = "SEC_TDM_RX_6",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_SECONDARY_TDM_RX_6,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Secondary TDM7 Playback",
+			.aif_name = "SEC_TDM_RX_7",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_SECONDARY_TDM_RX_7,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Secondary TDM0 Capture",
+			.aif_name = "SEC_TDM_TX_0",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_SECONDARY_TDM_TX,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Secondary TDM1 Capture",
+			.aif_name = "SEC_TDM_TX_1",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_SECONDARY_TDM_TX_1,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Secondary TDM2 Capture",
+			.aif_name = "SEC_TDM_TX_2",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_SECONDARY_TDM_TX_2,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Secondary TDM3 Capture",
+			.aif_name = "SEC_TDM_TX_3",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_SECONDARY_TDM_TX_3,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Secondary TDM4 Capture",
+			.aif_name = "SEC_TDM_TX_4",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_SECONDARY_TDM_TX_4,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Secondary TDM5 Capture",
+			.aif_name = "SEC_TDM_TX_5",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_SECONDARY_TDM_TX_5,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Secondary TDM6 Capture",
+			.aif_name = "SEC_TDM_TX_6",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_SECONDARY_TDM_TX_6,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Secondary TDM7 Capture",
+			.aif_name = "SEC_TDM_TX_7",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_SECONDARY_TDM_TX_7,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Tertiary TDM0 Playback",
+			.aif_name = "TERT_TDM_RX_0",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_TERTIARY_TDM_RX,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Tertiary TDM1 Playback",
+			.aif_name = "TERT_TDM_RX_1",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_TERTIARY_TDM_RX_1,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Tertiary TDM2 Playback",
+			.aif_name = "TERT_TDM_RX_2",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_TERTIARY_TDM_RX_2,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Tertiary TDM3 Playback",
+			.aif_name = "TERT_TDM_RX_3",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_TERTIARY_TDM_RX_3,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Tertiary TDM4 Playback",
+			.aif_name = "TERT_TDM_RX_4",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_TERTIARY_TDM_RX_4,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Tertiary TDM5 Playback",
+			.aif_name = "TERT_TDM_RX_5",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_TERTIARY_TDM_RX_5,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Tertiary TDM6 Playback",
+			.aif_name = "TERT_TDM_RX_6",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_TERTIARY_TDM_RX_6,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Tertiary TDM7 Playback",
+			.aif_name = "TERT_TDM_RX_7",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_TERTIARY_TDM_RX_7,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Tertiary TDM0 Capture",
+			.aif_name = "TERT_TDM_TX_0",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_TERTIARY_TDM_TX,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Tertiary TDM1 Capture",
+			.aif_name = "TERT_TDM_TX_1",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_TERTIARY_TDM_TX_1,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Tertiary TDM2 Capture",
+			.aif_name = "TERT_TDM_TX_2",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_TERTIARY_TDM_TX_2,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Tertiary TDM3 Capture",
+			.aif_name = "TERT_TDM_TX_3",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_TERTIARY_TDM_TX_3,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Tertiary TDM4 Capture",
+			.aif_name = "TERT_TDM_TX_4",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_TERTIARY_TDM_TX_4,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Tertiary TDM5 Capture",
+			.aif_name = "TERT_TDM_TX_5",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_TERTIARY_TDM_TX_5,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Tertiary TDM6 Capture",
+			.aif_name = "TERT_TDM_TX_6",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_TERTIARY_TDM_TX_6,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Tertiary TDM7 Capture",
+			.aif_name = "TERT_TDM_TX_7",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_TERTIARY_TDM_TX_7,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Quaternary TDM0 Playback",
+			.aif_name = "QUAT_TDM_RX_0",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+				SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_QUATERNARY_TDM_RX,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Quaternary TDM1 Playback",
+			.aif_name = "QUAT_TDM_RX_1",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_QUATERNARY_TDM_RX_1,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Quaternary TDM2 Playback",
+			.aif_name = "QUAT_TDM_RX_2",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_QUATERNARY_TDM_RX_2,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Quaternary TDM3 Playback",
+			.aif_name = "QUAT_TDM_RX_3",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_QUATERNARY_TDM_RX_3,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Quaternary TDM4 Playback",
+			.aif_name = "QUAT_TDM_RX_4",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_QUATERNARY_TDM_RX_4,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Quaternary TDM5 Playback",
+			.aif_name = "QUAT_TDM_RX_5",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_QUATERNARY_TDM_RX_5,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Quaternary TDM6 Playback",
+			.aif_name = "QUAT_TDM_RX_6",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_QUATERNARY_TDM_RX_6,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "Quaternary TDM7 Playback",
+			.aif_name = "QUAT_TDM_RX_7",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_QUATERNARY_TDM_RX_7,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Quaternary TDM0 Capture",
+			.aif_name = "QUAT_TDM_TX_0",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_QUATERNARY_TDM_TX,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Quaternary TDM1 Capture",
+			.aif_name = "QUAT_TDM_TX_1",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_QUATERNARY_TDM_TX_1,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Quaternary TDM2 Capture",
+			.aif_name = "QUAT_TDM_TX_2",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_QUATERNARY_TDM_TX_2,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Quaternary TDM3 Capture",
+			.aif_name = "QUAT_TDM_TX_3",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_QUATERNARY_TDM_TX_3,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Quaternary TDM4 Capture",
+			.aif_name = "QUAT_TDM_TX_4",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_QUATERNARY_TDM_TX_4,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Quaternary TDM5 Capture",
+			.aif_name = "QUAT_TDM_TX_5",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_QUATERNARY_TDM_TX_5,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Quaternary TDM6 Capture",
+			.aif_name = "QUAT_TDM_TX_6",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_QUATERNARY_TDM_TX_6,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Quaternary TDM7 Capture",
+			.aif_name = "QUAT_TDM_TX_7",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min = 8000,
+			.rate_max = 352800,
+		},
+		.ops = &msm_dai_q6_tdm_ops,
+		.id = AFE_PORT_ID_QUATERNARY_TDM_TX_7,
+		.probe = msm_dai_q6_dai_tdm_probe,
+		.remove = msm_dai_q6_dai_tdm_remove,
+	},
+};
+
+static const struct snd_soc_component_driver msm_q6_tdm_dai_component = {
+	.name		= "msm-dai-q6-tdm",
+};
+
+static int msm_dai_q6_tdm_dev_probe(struct platform_device *pdev)
+{
+	struct msm_dai_q6_tdm_dai_data *dai_data = NULL;
+	struct afe_param_id_custom_tdm_header_cfg *custom_tdm_header = NULL;
+	int rc = 0;
+	u32 tdm_dev_id = 0;
+	int port_idx = 0;
+	struct device_node *tdm_parent_node = NULL;
+
+	/* retrieve device/afe id */
+	rc = of_property_read_u32(pdev->dev.of_node,
+		"qcom,msm-cpudai-tdm-dev-id",
+		&tdm_dev_id);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: Device ID missing in DT file\n",
+			__func__);
+		goto rtn;
+	}
+	if ((tdm_dev_id < AFE_PORT_ID_TDM_PORT_RANGE_START) ||
+		(tdm_dev_id > AFE_PORT_ID_TDM_PORT_RANGE_END)) {
+		dev_err(&pdev->dev, "%s: Invalid TDM Device ID 0x%x in DT file\n",
+			__func__, tdm_dev_id);
+		rc = -ENXIO;
+		goto rtn;
+	}
+	pdev->id = tdm_dev_id;
+
+	dev_info(&pdev->dev, "%s: dev_name: %s dev_id: 0x%x\n",
+		__func__, dev_name(&pdev->dev), tdm_dev_id);
+
+	dai_data = kzalloc(sizeof(struct msm_dai_q6_tdm_dai_data),
+				GFP_KERNEL);
+	if (!dai_data) {
+		rc = -ENOMEM;
+		dev_err(&pdev->dev,
+			"%s Failed to allocate memory for tdm dai_data\n",
+			__func__);
+		goto rtn;
+	}
+	memset(dai_data, 0, sizeof(*dai_data));
+
+	/* TDM CFG */
+	tdm_parent_node = of_get_parent(pdev->dev.of_node);
+	rc = of_property_read_u32(tdm_parent_node,
+		"qcom,msm-cpudai-tdm-sync-mode",
+		(u32 *)&dai_data->port_cfg.tdm.sync_mode);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: Sync Mode from DT file %s\n",
+			__func__, "qcom,msm-cpudai-tdm-sync-mode");
+		goto free_dai_data;
+	}
+	dev_dbg(&pdev->dev, "%s: Sync Mode from DT file 0x%x\n",
+		__func__, dai_data->port_cfg.tdm.sync_mode);
+
+	rc = of_property_read_u32(tdm_parent_node,
+		"qcom,msm-cpudai-tdm-sync-src",
+		(u32 *)&dai_data->port_cfg.tdm.sync_src);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: Sync Src from DT file %s\n",
+			__func__, "qcom,msm-cpudai-tdm-sync-src");
+		goto free_dai_data;
+	}
+	dev_dbg(&pdev->dev, "%s: Sync Src from DT file 0x%x\n",
+		__func__, dai_data->port_cfg.tdm.sync_src);
+
+	rc = of_property_read_u32(tdm_parent_node,
+		"qcom,msm-cpudai-tdm-data-out",
+		(u32 *)&dai_data->port_cfg.tdm.ctrl_data_out_enable);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: Data Out from DT file %s\n",
+			__func__, "qcom,msm-cpudai-tdm-data-out");
+		goto free_dai_data;
+	}
+	dev_dbg(&pdev->dev, "%s: Data Out from DT file 0x%x\n",
+		__func__, dai_data->port_cfg.tdm.ctrl_data_out_enable);
+
+	rc = of_property_read_u32(tdm_parent_node,
+		"qcom,msm-cpudai-tdm-invert-sync",
+		(u32 *)&dai_data->port_cfg.tdm.ctrl_invert_sync_pulse);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: Invert Sync from DT file %s\n",
+			__func__, "qcom,msm-cpudai-tdm-invert-sync");
+		goto free_dai_data;
+	}
+	dev_dbg(&pdev->dev, "%s: Invert Sync from DT file 0x%x\n",
+		__func__, dai_data->port_cfg.tdm.ctrl_invert_sync_pulse);
+
+	rc = of_property_read_u32(tdm_parent_node,
+		"qcom,msm-cpudai-tdm-data-delay",
+		(u32 *)&dai_data->port_cfg.tdm.ctrl_sync_data_delay);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: Data Delay from DT file %s\n",
+			__func__, "qcom,msm-cpudai-tdm-data-delay");
+		goto free_dai_data;
+	}
+	dev_dbg(&pdev->dev, "%s: Data Delay from DT file 0x%x\n",
+		__func__, dai_data->port_cfg.tdm.ctrl_sync_data_delay);
+
+	/* TDM CFG -- set default */
+	dai_data->port_cfg.tdm.data_format = AFE_LINEAR_PCM_DATA;
+	dai_data->port_cfg.tdm.tdm_cfg_minor_version =
+		AFE_API_VERSION_TDM_CONFIG;
+
+	/* TDM SLOT MAPPING CFG */
+	rc = of_property_read_u32(pdev->dev.of_node,
+		"qcom,msm-cpudai-tdm-data-align",
+		&dai_data->port_cfg.slot_mapping.data_align_type);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: Data Align from DT file %s\n",
+			__func__,
+			"qcom,msm-cpudai-tdm-data-align");
+		goto free_dai_data;
+	}
+	dev_dbg(&pdev->dev, "%s: Data Align from DT file 0x%x\n",
+		__func__, dai_data->port_cfg.slot_mapping.data_align_type);
+
+	/* TDM SLOT MAPPING CFG -- set default */
+	dai_data->port_cfg.slot_mapping.minor_version =
+		AFE_API_VERSION_SLOT_MAPPING_CONFIG;
+
+	/* CUSTOM TDM HEADER CFG */
+	custom_tdm_header = &dai_data->port_cfg.custom_tdm_header;
+	if (of_find_property(pdev->dev.of_node,
+			"qcom,msm-cpudai-tdm-header-start-offset", NULL) &&
+		of_find_property(pdev->dev.of_node,
+			"qcom,msm-cpudai-tdm-header-width", NULL) &&
+		of_find_property(pdev->dev.of_node,
+			"qcom,msm-cpudai-tdm-header-num-frame-repeat", NULL)) {
+		/* if the property exist */
+		rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,msm-cpudai-tdm-header-start-offset",
+			(u32 *)&custom_tdm_header->start_offset);
+		if (rc) {
+			dev_err(&pdev->dev, "%s: Header Start Offset from DT file %s\n",
+				__func__,
+				"qcom,msm-cpudai-tdm-header-start-offset");
+			goto free_dai_data;
+		}
+		dev_dbg(&pdev->dev, "%s: Header Start Offset from DT file 0x%x\n",
+			__func__, custom_tdm_header->start_offset);
+
+		rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,msm-cpudai-tdm-header-width",
+			(u32 *)&custom_tdm_header->header_width);
+		if (rc) {
+			dev_err(&pdev->dev, "%s: Header Width from DT file %s\n",
+				__func__, "qcom,msm-cpudai-tdm-header-width");
+			goto free_dai_data;
+		}
+		dev_dbg(&pdev->dev, "%s: Header Width from DT file 0x%x\n",
+			__func__, custom_tdm_header->header_width);
+
+		rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,msm-cpudai-tdm-header-num-frame-repeat",
+			(u32 *)&custom_tdm_header->num_frame_repeat);
+		if (rc) {
+			dev_err(&pdev->dev, "%s: Header Num Frame Repeat from DT file %s\n",
+				__func__,
+				"qcom,msm-cpudai-tdm-header-num-frame-repeat");
+			goto free_dai_data;
+		}
+		dev_dbg(&pdev->dev, "%s: Header Num Frame Repeat from DT file 0x%x\n",
+			__func__, custom_tdm_header->num_frame_repeat);
+
+		/* CUSTOM TDM HEADER CFG -- set default */
+		custom_tdm_header->minor_version =
+			AFE_API_VERSION_CUSTOM_TDM_HEADER_CONFIG;
+		custom_tdm_header->header_type =
+			AFE_CUSTOM_TDM_HEADER_TYPE_INVALID;
+	} else {
+		dev_info(&pdev->dev,
+			"%s: Custom tdm header not supported\n", __func__);
+		/* CUSTOM TDM HEADER CFG -- set default */
+		custom_tdm_header->header_type =
+			AFE_CUSTOM_TDM_HEADER_TYPE_INVALID;
+		/* proceed with probe */
+	}
+
+	/* copy static clk per parent node */
+	dai_data->clk_set = tdm_clk_set;
+	/* copy static group cfg per parent node */
+	dai_data->group_cfg.tdm_cfg = tdm_group_cfg;
+	/* copy static num group ports per parent node */
+	dai_data->num_group_ports = num_tdm_group_ports;
+
+
+	dev_set_drvdata(&pdev->dev, dai_data);
+
+	port_idx = msm_dai_q6_get_port_idx(tdm_dev_id);
+	if (port_idx < 0) {
+		dev_err(&pdev->dev, "%s Port id 0x%x not supported\n",
+			__func__, tdm_dev_id);
+		rc = -EINVAL;
+		goto free_dai_data;
+	}
+
+	rc = snd_soc_register_component(&pdev->dev,
+		&msm_q6_tdm_dai_component,
+		&msm_dai_q6_tdm_dai[port_idx], 1);
+
+	if (rc) {
+		dev_err(&pdev->dev, "%s: TDM dai 0x%x register failed, rc=%d\n",
+			__func__, tdm_dev_id, rc);
+		goto err_register;
+	}
+
+	return 0;
+
+err_register:
+free_dai_data:
+	kfree(dai_data);
+rtn:
+	return rc;
+}
+
+static int msm_dai_q6_tdm_dev_remove(struct platform_device *pdev)
+{
+	struct msm_dai_q6_tdm_dai_data *dai_data =
+		dev_get_drvdata(&pdev->dev);
+
+	snd_soc_unregister_component(&pdev->dev);
+
+	kfree(dai_data);
+
+	return 0;
+}
+
+static const struct of_device_id msm_dai_q6_tdm_dev_dt_match[] = {
+	{ .compatible = "qcom,msm-dai-q6-tdm", },
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, msm_dai_q6_tdm_dev_dt_match);
+
+static struct platform_driver msm_dai_q6_tdm_driver = {
+	.probe  = msm_dai_q6_tdm_dev_probe,
+	.remove  = msm_dai_q6_tdm_dev_remove,
+	.driver = {
+		.name = "msm-dai-q6-tdm",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_dai_q6_tdm_dev_dt_match,
+	},
+};
+
+static int __init msm_dai_q6_init(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&msm_auxpcm_dev_driver);
+	if (rc) {
+		pr_err("%s: fail to register auxpcm dev driver", __func__);
+		goto fail;
+	}
+
+	rc = platform_driver_register(&msm_dai_q6);
+	if (rc) {
+		pr_err("%s: fail to register dai q6 driver", __func__);
+		goto dai_q6_fail;
+	}
+
+	rc = platform_driver_register(&msm_dai_q6_dev);
+	if (rc) {
+		pr_err("%s: fail to register dai q6 dev driver", __func__);
+		goto dai_q6_dev_fail;
+	}
+
+	rc = platform_driver_register(&msm_dai_q6_mi2s_driver);
+	if (rc) {
+		pr_err("%s: fail to register dai MI2S dev drv\n", __func__);
+		goto dai_q6_mi2s_drv_fail;
+	}
+
+	rc = platform_driver_register(&msm_dai_mi2s_q6);
+	if (rc) {
+		pr_err("%s: fail to register dai MI2S\n", __func__);
+		goto dai_mi2s_q6_fail;
+	}
+
+	rc = platform_driver_register(&msm_dai_q6_spdif_driver);
+	if (rc) {
+		pr_err("%s: fail to register dai SPDIF\n", __func__);
+		goto dai_spdif_q6_fail;
+	}
+
+	rc = platform_driver_register(&msm_dai_q6_tdm_driver);
+	if (rc) {
+		pr_err("%s: fail to register dai TDM dev drv\n", __func__);
+		goto dai_q6_tdm_drv_fail;
+	}
+
+	rc = platform_driver_register(&msm_dai_tdm_q6);
+	if (rc) {
+		pr_err("%s: fail to register dai TDM\n", __func__);
+		goto dai_tdm_q6_fail;
+	}
+	return rc;
+
+dai_tdm_q6_fail:
+	platform_driver_unregister(&msm_dai_q6_tdm_driver);
+dai_q6_tdm_drv_fail:
+	platform_driver_unregister(&msm_dai_q6_spdif_driver);
+dai_spdif_q6_fail:
+	platform_driver_unregister(&msm_dai_mi2s_q6);
+dai_mi2s_q6_fail:
+	platform_driver_unregister(&msm_dai_q6_mi2s_driver);
+dai_q6_mi2s_drv_fail:
+	platform_driver_unregister(&msm_dai_q6_dev);
+dai_q6_dev_fail:
+	platform_driver_unregister(&msm_dai_q6);
+dai_q6_fail:
+	platform_driver_unregister(&msm_auxpcm_dev_driver);
+fail:
+	return rc;
+}
+module_init(msm_dai_q6_init);
+
+static void __exit msm_dai_q6_exit(void)
+{
+	platform_driver_unregister(&msm_dai_q6_dev);
+	platform_driver_unregister(&msm_dai_q6);
+	platform_driver_unregister(&msm_auxpcm_dev_driver);
+	platform_driver_unregister(&msm_dai_q6_spdif_driver);
+}
+module_exit(msm_dai_q6_exit);
+
+/* Module information */
+MODULE_DESCRIPTION("MSM DSP DAI driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-dai-slim.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-dai-slim.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-dai-slim.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-dai-slim.c	2019-01-22 16:16:29.627301863 +0100
@@ -0,0 +1,670 @@
+/*
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/bitops.h>
+#include <linux/slimbus/slimbus.h>
+#include <sound/soc.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/msm-slim-dma.h>
+
+#define SLIM_DEV_NAME "msm-dai-slim"
+
+#define SLIM_DAI_RATES (SNDRV_PCM_RATE_48000 | \
+			SNDRV_PCM_RATE_8000 | \
+			SNDRV_PCM_RATE_16000 | \
+			SNDRV_PCM_RATE_96000 | \
+			SNDRV_PCM_RATE_192000 | \
+			SNDRV_PCM_RATE_384000)
+
+#define SLIM_DAI_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+			  SNDRV_PCM_FMTBIT_S24_LE | \
+			  SNDRV_PCM_FMTBIT_S32_LE)
+
+#define DAI_STATE_INITIALIZED (0x01 << 0)
+#define DAI_STATE_PREPARED (0x01 << 1)
+#define DAI_STATE_RUNNING (0x01 << 2)
+
+#define SET_DAI_STATE(status, state) \
+	(status |= state)
+
+#define CLR_DAI_STATE(status, state) \
+	(status = status & (~state))
+
+enum {
+	MSM_DAI_SLIM0 = 0,
+	NUM_SLIM_DAIS,
+};
+
+struct msm_slim_dai_data {
+	unsigned int dai_id;
+	u16 *chan_h;
+	u16 *sh_ch;
+	u16 grph;
+	u32 rate;
+	u16 bits;
+	u16 ch_cnt;
+	u8 status;
+	struct snd_soc_dai_driver *dai_drv;
+	struct msm_slim_dma_data dma_data;
+	struct slim_port_cfg port_cfg;
+};
+
+struct msm_dai_slim_drv_data {
+	struct slim_device *sdev;
+	u16 num_dais;
+	struct msm_slim_dai_data slim_dai_data[NUM_SLIM_DAIS];
+};
+
+struct msm_slim_dai_data *msm_slim_get_dai_data(
+	struct msm_dai_slim_drv_data *drv_data,
+	struct snd_soc_dai *dai)
+{
+	struct msm_slim_dai_data *dai_data_t;
+	int i;
+
+	for (i = 0; i < drv_data->num_dais; i++) {
+		dai_data_t = &drv_data->slim_dai_data[i];
+		if (dai_data_t->dai_id == dai->id)
+			return dai_data_t;
+	}
+
+	dev_err(dai->dev,
+		"%s: no dai data found for dai_id %d\n",
+		__func__, dai->id);
+	return NULL;
+}
+
+static int msm_dai_slim_ch_ctl(struct msm_slim_dma_data *dma_data,
+	struct snd_soc_dai *dai, bool enable)
+{
+	struct slim_device *sdev;
+	struct msm_dai_slim_drv_data *drv_data;
+	struct msm_slim_dai_data *dai_data;
+	int rc, rc1, i;
+
+	if (!dma_data || !dma_data->sdev) {
+		pr_err("%s: Invalid %s\n", __func__,
+		       (!dma_data) ? "dma_data" : "slim_device");
+		return -EINVAL;
+	}
+
+	sdev = dma_data->sdev;
+	drv_data = dev_get_drvdata(&sdev->dev);
+	dai_data = msm_slim_get_dai_data(drv_data, dai);
+
+	if (!dai_data) {
+		dev_err(dai->dev,
+			"%s: Invalid dai_data for dai_id %d\n",
+			__func__, dai->id);
+		return -EINVAL;
+	}
+
+	dev_dbg(&sdev->dev,
+		"%s: enable = %s, rate = %u\n", __func__,
+		enable ? "true" : "false",
+		dai_data->rate);
+
+	if (enable) {
+		if (!(dai_data->status & DAI_STATE_PREPARED)) {
+			dev_err(&sdev->dev,
+				"%s: dai id (%d) has invalid state 0x%x\n",
+				__func__, dai->id, dai_data->status);
+			return -EINVAL;
+		}
+
+		rc = slim_alloc_mgrports(sdev,
+					 SLIM_REQ_DEFAULT, dai_data->ch_cnt,
+					 &(dma_data->ph),
+					 sizeof(dma_data->ph));
+
+		if (IS_ERR_VALUE(rc)) {
+			dev_err(&sdev->dev,
+				"%s:alloc mgrport failed rc %d\n",
+				__func__ , rc);
+			goto done;
+		}
+
+		rc = slim_config_mgrports(sdev, &(dma_data->ph),
+					  dai_data->ch_cnt,
+					  &(dai_data->port_cfg));
+		if (IS_ERR_VALUE(rc)) {
+			dev_err(&sdev->dev,
+				"%s: config mgrport failed rc %d\n",
+				__func__ , rc);
+			goto err_done;
+		}
+
+		for (i = 0; i < dai_data->ch_cnt; i++) {
+			rc = slim_connect_sink(sdev,
+					       &dma_data->ph, 1,
+					       dai_data->chan_h[i]);
+			if (IS_ERR_VALUE(rc)) {
+				dev_err(&sdev->dev,
+					"%s: slim_connect_sink failed, ch = %d, err = %d\n",
+					__func__, i, rc);
+				goto err_done;
+			}
+		}
+
+		rc = slim_control_ch(sdev,
+				     dai_data->grph,
+				     SLIM_CH_ACTIVATE, true);
+		if (IS_ERR_VALUE(rc)) {
+			dev_err(&sdev->dev,
+				"%s: slim activate ch failed, err = %d\n",
+				__func__, rc);
+			goto err_done;
+		}
+		/* Mark dai status as running */
+		SET_DAI_STATE(dai_data->status, DAI_STATE_RUNNING);
+	} else {
+		if (!(dai_data->status & DAI_STATE_RUNNING)) {
+			dev_err(&sdev->dev,
+				"%s: dai id (%d) has invalid state 0x%x\n",
+				__func__, dai->id, dai_data->status);
+			return -EINVAL;
+		}
+
+		rc = slim_control_ch(sdev,
+				     dai_data->grph,
+				     SLIM_CH_REMOVE, true);
+		if (IS_ERR_VALUE(rc)) {
+			dev_err(&sdev->dev,
+				"%s: slim activate ch failed, err = %d\n",
+				__func__, rc);
+			goto done;
+		}
+
+		rc = slim_dealloc_mgrports(sdev,
+					   &dma_data->ph, 1);
+		if (IS_ERR_VALUE(rc)) {
+			dev_err(&sdev->dev,
+				"%s: dealloc mgrport failed, err = %d\n",
+				__func__, rc);
+			goto done;
+		}
+		/* clear running state for dai*/
+		CLR_DAI_STATE(dai_data->status, DAI_STATE_RUNNING);
+	}
+
+	return rc;
+
+err_done:
+	rc1 = slim_dealloc_mgrports(sdev,
+				   &dma_data->ph, 1);
+	if (IS_ERR_VALUE(rc1))
+		dev_err(&sdev->dev,
+			"%s: dealloc mgrport failed, err = %d\n",
+			__func__, rc1);
+done:
+	return rc;
+}
+
+static int msm_dai_slim_hw_params(
+		struct snd_pcm_substream *substream,
+		struct snd_pcm_hw_params *params,
+		struct snd_soc_dai *dai)
+{
+	struct msm_dai_slim_drv_data *drv_data = dev_get_drvdata(dai->dev);
+	struct msm_slim_dai_data *dai_data;
+	int rc = 0;
+
+	dai_data = msm_slim_get_dai_data(drv_data, dai);
+	if (!dai_data) {
+		dev_err(dai->dev,
+			"%s: Invalid dai_data for dai_id %d\n",
+			__func__, dai->id);
+		rc = -EINVAL;
+		goto done;
+	}
+
+	if (!dai_data->ch_cnt || dai_data->ch_cnt != params_channels(params)) {
+		dev_err(dai->dev, "%s: invalid ch_cnt %d %d\n",
+			__func__, dai_data->ch_cnt, params_channels(params));
+		rc = -EINVAL;
+		goto done;
+	}
+
+	dai_data->rate = params_rate(params);
+	dai_data->port_cfg.port_opts = SLIM_OPT_NONE;
+	if (dai_data->rate >= SNDRV_PCM_RATE_48000)
+		dai_data->port_cfg.watermark = 16;
+	else
+		dai_data->port_cfg.watermark = 8;
+
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		dai_data->bits = 16;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		dai_data->bits = 24;
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		dai_data->bits = 32;
+		break;
+	default:
+		dev_err(dai->dev, "%s: invalid format %d\n", __func__,
+			params_format(params));
+		rc = -EINVAL;
+		goto done;
+	}
+
+	dev_dbg(dai->dev, "%s: ch_cnt=%u rate=%u, bit_width = %u\n",
+		__func__, dai_data->ch_cnt, dai_data->rate,
+		dai_data->bits);
+done:
+	return rc;
+}
+
+static int msm_dai_slim_set_channel_map(struct snd_soc_dai *dai,
+	unsigned int tx_num, unsigned int *tx_slot,
+	unsigned int rx_num, unsigned int *rx_slot)
+{
+	struct msm_dai_slim_drv_data *drv_data = dev_get_drvdata(dai->dev);
+	struct msm_slim_dai_data *dai_data;
+	struct snd_soc_dai_driver *dai_drv;
+	u8 i = 0;
+
+	dev_dbg(dai->dev,
+		"%s: tx_num=%u, rx_num=%u\n",
+		__func__, tx_num, rx_num);
+
+	dai_data = msm_slim_get_dai_data(drv_data, dai);
+	if (!dai_data) {
+		dev_err(dai->dev,
+			"%s: Invalid dai_data for dai_id %d\n",
+			__func__, dai->id);
+		return -EINVAL;
+	}
+
+	dai_drv = dai_data->dai_drv;
+
+	if (tx_num > dai_drv->capture.channels_max) {
+		dev_err(dai->dev, "%s: tx_num %u max out master port cnt\n",
+			__func__, tx_num);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < tx_num; i++)
+		dai_data->sh_ch[i] = tx_slot[i];
+
+	dai_data->ch_cnt = tx_num;
+	return 0;
+}
+
+static int msm_dai_slim_prepare(struct snd_pcm_substream *substream,
+				   struct snd_soc_dai *dai)
+{
+	struct msm_dai_slim_drv_data *drv_data = dev_get_drvdata(dai->dev);
+	struct msm_slim_dma_data *dma_data;
+	struct msm_slim_dai_data *dai_data = NULL;
+	struct slim_ch prop;
+	int rc;
+	u8 i, j;
+
+	dai_data = msm_slim_get_dai_data(drv_data, dai);
+	if (!dai_data) {
+		dev_err(dai->dev,
+			"%s: Invalid dai_data for dai %d\n",
+			__func__, dai->id);
+		return -EINVAL;
+	}
+
+	if (!(dai_data->status & DAI_STATE_INITIALIZED)) {
+		dev_err(dai->dev,
+			"%s: dai id (%d) has invalid state 0x%x\n",
+			__func__, dai->id, dai_data->status);
+		return -EINVAL;
+	}
+
+	if (dai_data->status & DAI_STATE_PREPARED) {
+		dev_dbg(dai->dev,
+			"%s: dai id (%d) has already prepared.\n",
+			__func__, dai->id);
+		return 0;
+	}
+
+	dma_data = &dai_data->dma_data;
+	snd_soc_dai_set_dma_data(dai, substream, dma_data);
+
+	for (i = 0; i < dai_data->ch_cnt; i++) {
+		rc = slim_query_ch(drv_data->sdev, dai_data->sh_ch[i],
+				   &dai_data->chan_h[i]);
+		if (rc) {
+			dev_err(dai->dev, "%s:query chan handle failed rc %d\n",
+				__func__ , rc);
+			goto error_chan_query;
+		}
+	}
+
+	prop.prot = SLIM_AUTO_ISO;
+	prop.baser = SLIM_RATE_4000HZ;
+	prop.dataf = SLIM_CH_DATAF_NOT_DEFINED;
+	prop.auxf = SLIM_CH_AUXF_NOT_APPLICABLE;
+	prop.ratem = (dai_data->rate/4000);
+	prop.sampleszbits = dai_data->bits;
+
+	rc = slim_define_ch(drv_data->sdev, &prop, dai_data->chan_h,
+			    dai_data->ch_cnt, true, &dai_data->grph);
+
+	if (rc) {
+		dev_err(dai->dev, "%s:define chan failed rc %d\n",
+				__func__ , rc);
+		goto error_define_chan;
+	}
+
+	/* Mark stream status as prepared */
+	SET_DAI_STATE(dai_data->status, DAI_STATE_PREPARED);
+
+	return rc;
+
+error_define_chan:
+error_chan_query:
+	for (j = 0; j < i; j++)
+		slim_dealloc_ch(drv_data->sdev, dai_data->chan_h[j]);
+	return rc;
+}
+
+static void msm_dai_slim_shutdown(struct snd_pcm_substream *stream,
+		struct snd_soc_dai *dai)
+{
+	struct msm_dai_slim_drv_data *drv_data = dev_get_drvdata(dai->dev);
+	struct msm_slim_dma_data *dma_data = NULL;
+	struct msm_slim_dai_data *dai_data;
+	int i, rc = 0;
+
+	dai_data = msm_slim_get_dai_data(drv_data, dai);
+	dma_data = snd_soc_dai_get_dma_data(dai, stream);
+	if (!dma_data || !dai_data) {
+		dev_err(dai->dev,
+			"%s: Invalid %s\n", __func__,
+			(!dma_data) ? "dma_data" : "dai_data");
+		return;
+	}
+
+	if ((!(dai_data->status & DAI_STATE_PREPARED)) ||
+	     dai_data->status & DAI_STATE_RUNNING) {
+		dev_err(dai->dev,
+			"%s: dai id (%d) has invalid state 0x%x\n",
+			__func__, dai->id, dai_data->status);
+		return;
+	}
+
+	for (i = 0; i < dai_data->ch_cnt; i++) {
+		rc = slim_dealloc_ch(drv_data->sdev, dai_data->chan_h[i]);
+		if (rc) {
+			dev_err(dai->dev,
+				"%s: dealloc_ch failed, err = %d\n",
+				__func__, rc);
+		}
+	}
+
+	snd_soc_dai_set_dma_data(dai, stream, NULL);
+	/* clear prepared state for the dai */
+	CLR_DAI_STATE(dai_data->status, DAI_STATE_PREPARED);
+
+	return;
+}
+
+static const struct snd_soc_component_driver msm_dai_slim_component = {
+	.name		= "msm-dai-slim-cmpnt",
+};
+
+static struct snd_soc_dai_ops msm_dai_slim_ops = {
+	.prepare	= msm_dai_slim_prepare,
+	.hw_params	= msm_dai_slim_hw_params,
+	.shutdown	= msm_dai_slim_shutdown,
+	.set_channel_map = msm_dai_slim_set_channel_map,
+};
+
+static struct snd_soc_dai_driver msm_slim_dais[] = {
+	{
+		/*
+		 * The first dai name should be same as device name
+		 * to support registering single and multile dais.
+		 */
+		.name = SLIM_DEV_NAME,
+		.id = MSM_DAI_SLIM0,
+		.capture = {
+			.rates = SLIM_DAI_RATES,
+			.formats = SLIM_DAI_FORMATS,
+			.channels_min = 1,
+			/*
+			 * max channels allowed is
+			 * dependent on platform and
+			 * will be updated before this
+			 * dai driver is registered.
+			 */
+			.channels_max = 1,
+			.rate_min = 8000,
+			.rate_max = 384000,
+			.stream_name = "SLIM_DAI0 Capture",
+		},
+		.ops = &msm_dai_slim_ops,
+	},
+	/*
+	 * If multiple dais are needed,
+	 * add dais here and update the
+	 * dai_id enum.
+	 */
+};
+
+static void msm_dai_slim_remove_dai_data(
+		struct device *dev,
+		struct msm_dai_slim_drv_data *drv_data)
+{
+	int i;
+	struct msm_slim_dai_data *dai_data_t;
+
+	for (i = 0; i < drv_data->num_dais; i++) {
+		dai_data_t = &drv_data->slim_dai_data[i];
+
+		kfree(dai_data_t->chan_h);
+		dai_data_t->chan_h = NULL;
+		kfree(dai_data_t->sh_ch);
+		dai_data_t->sh_ch = NULL;
+	}
+}
+
+static int msm_dai_slim_populate_dai_data(struct device *dev,
+		struct msm_dai_slim_drv_data *drv_data)
+{
+	struct snd_soc_dai_driver *dai_drv;
+	struct msm_slim_dai_data *dai_data_t;
+	u8 num_ch;
+	int i, j, rc;
+
+	for (i = 0; i < drv_data->num_dais; i++) {
+		num_ch = 0;
+		dai_drv = &msm_slim_dais[i];
+		num_ch += dai_drv->capture.channels_max;
+		num_ch += dai_drv->playback.channels_max;
+
+		dai_data_t = &drv_data->slim_dai_data[i];
+		dai_data_t->dai_drv = dai_drv;
+		dai_data_t->dai_id = dai_drv->id;
+		dai_data_t->dma_data.sdev = drv_data->sdev;
+		dai_data_t->dma_data.dai_channel_ctl =
+				msm_dai_slim_ch_ctl;
+		SET_DAI_STATE(dai_data_t->status,
+			      DAI_STATE_INITIALIZED);
+
+		dai_data_t->chan_h = devm_kzalloc(dev,
+					sizeof(u16) * num_ch,
+					GFP_KERNEL);
+		if (!dai_data_t->chan_h) {
+			dev_err(dev,
+				"%s: DAI ID %d, Failed to alloc channel handles\n",
+				__func__, i);
+			rc = -ENOMEM;
+			goto err_mem_alloc;
+		}
+
+		dai_data_t->sh_ch = devm_kzalloc(dev,
+					sizeof(u16) * num_ch,
+					GFP_KERNEL);
+		if (!dai_data_t->sh_ch) {
+			dev_err(dev,
+				"%s: DAI ID %d, Failed to alloc sh_ch\n",
+				__func__, i);
+			rc = -ENOMEM;
+			goto err_mem_alloc;
+		}
+	}
+	return 0;
+
+err_mem_alloc:
+	for (j = 0; j < i; j++) {
+		dai_data_t = &drv_data->slim_dai_data[i];
+
+		devm_kfree(dev, dai_data_t->chan_h);
+		dai_data_t->chan_h = NULL;
+
+		devm_kfree(dev, dai_data_t->sh_ch);
+		dai_data_t->sh_ch = NULL;
+	}
+	return rc;
+}
+
+static int msm_dai_slim_dev_probe(struct slim_device *sdev)
+{
+	int rc, i;
+	u8 max_channels;
+	u32 apps_ch_pipes;
+	struct msm_dai_slim_drv_data *drv_data;
+	struct device *dev = &sdev->dev;
+	struct snd_soc_dai_driver *dai_drv;
+
+	if (!dev->of_node ||
+	    !dev->of_node->parent) {
+		dev_err(dev,
+			"%s: Invalid %s\n", __func__,
+			(!dev->of_node) ? "of_node" : "parent_of_node");
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32(dev->of_node->parent,
+					 "qcom,apps-ch-pipes",
+					 &apps_ch_pipes);
+	if (rc) {
+		dev_err(dev,
+			"%s: Failed to lookup property %s in node %s, err = %d\n",
+			__func__, "qcom,apps-ch-pipes",
+			dev->of_node->parent->full_name, rc);
+		goto err_ret;
+	}
+
+	max_channels = hweight_long(apps_ch_pipes);
+	if (max_channels <= 0) {
+		dev_err(dev,
+			"%s: Invalid apps owned ports %d\n",
+			__func__, max_channels);
+		goto err_ret;
+	}
+
+	dev_dbg(dev, "%s: max channels = %u\n",
+		__func__, max_channels);
+
+	for (i = 0; i < ARRAY_SIZE(msm_slim_dais); i++) {
+		dai_drv = &msm_slim_dais[i];
+		dai_drv->capture.channels_max = max_channels;
+		dai_drv->playback.channels_max = max_channels;
+	}
+
+	drv_data = devm_kzalloc(dev, sizeof(*drv_data),
+				GFP_KERNEL);
+	if (!drv_data) {
+		dev_err(dev, "%s: dai driver struct alloc failed\n",
+			__func__);
+		rc = -ENOMEM;
+		goto err_ret;
+	}
+
+	drv_data->sdev = sdev;
+	drv_data->num_dais = NUM_SLIM_DAIS;
+
+	rc = msm_dai_slim_populate_dai_data(dev, drv_data);
+	if (rc) {
+		dev_err(dev,
+			"%s: failed to setup dai_data, err = %d\n",
+			__func__, rc);
+		goto err_populate_dai;
+	}
+
+	rc = snd_soc_register_component(&sdev->dev, &msm_dai_slim_component,
+					msm_slim_dais, NUM_SLIM_DAIS);
+
+	if (IS_ERR_VALUE(rc)) {
+		dev_err(dev, "%s: failed to register DAI, err = %d\n",
+			__func__, rc);
+		goto err_reg_comp;
+	}
+
+	dev_set_drvdata(dev, drv_data);
+	return rc;
+
+err_reg_comp:
+	msm_dai_slim_remove_dai_data(dev, drv_data);
+
+err_populate_dai:
+	devm_kfree(dev, drv_data);
+
+err_ret:
+	return rc;
+}
+
+static int msm_dai_slim_dev_remove(struct slim_device *sdev)
+{
+	snd_soc_unregister_component(&sdev->dev);
+	return 0;
+}
+
+static const struct slim_device_id msm_dai_slim_dt_match[] = {
+	{SLIM_DEV_NAME, 0 },
+	{}
+};
+
+static struct slim_driver msm_dai_slim_driver = {
+	.driver = {
+		.name = SLIM_DEV_NAME,
+		.owner = THIS_MODULE,
+	},
+	.probe = msm_dai_slim_dev_probe,
+	.remove = msm_dai_slim_dev_remove,
+	.id_table = msm_dai_slim_dt_match,
+};
+
+static int __init msm_dai_slim_init(void)
+{
+	int rc;
+	rc = slim_driver_register(&msm_dai_slim_driver);
+	if (rc)
+		pr_err("%s: failed to register with slimbus driver rc = %d",
+			__func__, rc);
+	return rc;
+}
+module_init(msm_dai_slim_init);
+
+static void __exit msm_dai_slim_exit(void)
+{
+	return;
+}
+module_exit(msm_dai_slim_exit);
+
+/* Module information */
+MODULE_DESCRIPTION("Slimbus apps-owned channel handling driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-dai-stub-v2.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-dai-stub-v2.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-dai-stub-v2.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-dai-stub-v2.c	2019-01-22 16:16:29.627301863 +0100
@@ -0,0 +1,394 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+
+enum {
+	STUB_RX,
+	STUB_TX,
+	STUB_1_RX,
+	STUB_1_TX,
+	STUB_DTMF_TX,
+	STUB_HOST_RX_CAPTURE_TX,
+	STUB_HOST_RX_PLAYBACK_RX,
+	STUB_HOST_TX_CAPTURE_TX,
+	STUB_HOST_TX_PLAYBACK_RX,
+};
+
+static int msm_dai_stub_set_channel_map(struct snd_soc_dai *dai,
+		unsigned int tx_num, unsigned int *tx_slot,
+		unsigned int rx_num, unsigned int *rx_slot)
+{
+	pr_debug("%s:\n", __func__);
+
+	return 0;
+}
+
+static struct snd_soc_dai_ops msm_dai_stub_ops = {
+	.set_channel_map = msm_dai_stub_set_channel_map,
+};
+
+static int msm_dai_stub_add_route(struct snd_soc_dai *dai)
+{
+	struct snd_soc_dapm_route intercon;
+	struct snd_soc_dapm_context *dapm;
+
+	if (!dai || !dai->driver) {
+		pr_err("%s Invalid params\n", __func__);
+		return -EINVAL;
+	}
+	dapm = snd_soc_component_get_dapm(dai->component);
+	memset(&intercon, 0 , sizeof(intercon));
+	if (dai->driver->playback.stream_name &&
+		dai->driver->playback.aif_name) {
+		dev_dbg(dai->dev, "%s add route for widget %s",
+			__func__, dai->driver->playback.stream_name);
+		intercon.source = dai->driver->playback.aif_name;
+		intercon.sink = dai->driver->playback.stream_name;
+		dev_dbg(dai->dev, "%s src %s sink %s\n",
+			__func__, intercon.source, intercon.sink);
+		snd_soc_dapm_add_routes(dapm, &intercon, 1);
+	}
+	if (dai->driver->capture.stream_name &&
+		dai->driver->capture.aif_name) {
+		dev_dbg(dai->dev, "%s add route for widget %s",
+			__func__, dai->driver->capture.stream_name);
+		intercon.sink = dai->driver->capture.aif_name;
+		intercon.source = dai->driver->capture.stream_name;
+		dev_dbg(dai->dev, "%s src %s sink %s\n",
+			__func__, intercon.source, intercon.sink);
+		snd_soc_dapm_add_routes(dapm, &intercon, 1);
+	}
+	return 0;
+}
+
+static int msm_dai_stub_dai_probe(struct snd_soc_dai *dai)
+{
+	return msm_dai_stub_add_route(dai);
+}
+
+static int msm_dai_stub_dai_remove(struct snd_soc_dai *dai)
+{
+	pr_debug("%s:\n", __func__);
+	return 0;
+}
+
+static struct snd_soc_dai_driver msm_dai_stub_dai_rx = {
+	.playback = {
+		.stream_name = "Stub Playback",
+		.aif_name = "STUB_RX",
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+			SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		.channels_min = 1,
+		.channels_max = 2,
+		.rate_min = 8000,
+		.rate_max = 48000,
+	},
+	.ops = &msm_dai_stub_ops,
+	.probe = &msm_dai_stub_dai_probe,
+	.remove = &msm_dai_stub_dai_remove,
+};
+
+static struct snd_soc_dai_driver msm_dai_stub_dai_tx[] = {
+	{
+		.capture = {
+			.stream_name = "Stub Capture",
+			.aif_name = "STUB_TX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+				SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_dai_stub_ops,
+		.probe = &msm_dai_stub_dai_probe,
+		.remove = &msm_dai_stub_dai_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "Stub1 Capture",
+			.aif_name = "STUB_1_TX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+				SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_dai_stub_ops,
+		.probe = &msm_dai_stub_dai_probe,
+		.remove = &msm_dai_stub_dai_remove,
+	}
+};
+
+static struct snd_soc_dai_driver msm_dai_stub_dtmf_tx_dai = {
+	.capture = {
+		.stream_name = "DTMF TX",
+		.aif_name = "STUB_DTMF_TX",
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+			 SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		.channels_min = 1,
+		.channels_max = 2,
+		.rate_min = 8000,
+		.rate_max = 48000,
+	},
+	.ops = &msm_dai_stub_ops,
+	.probe = &msm_dai_stub_dai_probe,
+	.remove = &msm_dai_stub_dai_remove,
+};
+
+static struct snd_soc_dai_driver msm_dai_stub_host_capture_tx_dai[] = {
+	{
+		.capture = {
+			.stream_name = "CS-VOICE HOST RX CAPTURE",
+			.aif_name = "STUB_HOST_RX_CAPTURE_TX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+				SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_dai_stub_ops,
+		.probe = &msm_dai_stub_dai_probe,
+		.remove = &msm_dai_stub_dai_remove,
+	},
+	{
+		.capture = {
+			.stream_name = "CS-VOICE HOST TX CAPTURE",
+			.aif_name = "STUB_HOST_TX_CAPTURE_TX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+				SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_dai_stub_ops,
+		.probe = &msm_dai_stub_dai_probe,
+		.remove = &msm_dai_stub_dai_remove,
+	},
+};
+
+static struct snd_soc_dai_driver msm_dai_stub_host_playback_rx_dai[] = {
+	{
+		.playback = {
+			.stream_name = "CS-VOICE HOST RX PLAYBACK",
+			.aif_name = "STUB_HOST_RX_PLAYBACK_RX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+				 SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_dai_stub_ops,
+		.probe = &msm_dai_stub_dai_probe,
+		.remove = &msm_dai_stub_dai_remove,
+	},
+	{
+		.playback = {
+			.stream_name = "CS-VOICE HOST TX PLAYBACK",
+			.aif_name = "STUB_HOST_TX_PLAYBACK_RX",
+			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+				 SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_dai_stub_ops,
+		.probe = &msm_dai_stub_dai_probe,
+		.remove = &msm_dai_stub_dai_remove,
+	},
+};
+
+static const struct snd_soc_component_driver msm_dai_stub_component = {
+	.name		= "msm-dai-stub-dev",
+};
+
+static int msm_dai_stub_dev_probe(struct platform_device *pdev)
+{
+	int rc, id = -1;
+	const char *stub_dev_id = "qcom,msm-dai-stub-dev-id";
+
+	rc = of_property_read_u32(pdev->dev.of_node, stub_dev_id, &id);
+	if (rc) {
+		dev_err(&pdev->dev,
+			"%s: missing %s in dt node\n", __func__, stub_dev_id);
+		return rc;
+	}
+
+	pdev->id = id;
+
+	pr_debug("%s: dev name %s, id:%d\n", __func__,
+		 dev_name(&pdev->dev), pdev->id);
+
+	switch (id) {
+	case STUB_RX:
+		rc = snd_soc_register_component(&pdev->dev,
+			&msm_dai_stub_component, &msm_dai_stub_dai_rx, 1);
+		break;
+	case STUB_TX:
+		rc = snd_soc_register_component(&pdev->dev,
+			&msm_dai_stub_component, &msm_dai_stub_dai_tx[0], 1);
+		break;
+	case STUB_1_TX:
+		rc = snd_soc_register_component(&pdev->dev,
+			&msm_dai_stub_component, &msm_dai_stub_dai_tx[1], 1);
+		break;
+	case STUB_DTMF_TX:
+		rc = snd_soc_register_component(&pdev->dev,
+			&msm_dai_stub_component,
+			&msm_dai_stub_dtmf_tx_dai, 1);
+		break;
+	case STUB_HOST_RX_CAPTURE_TX:
+		rc = snd_soc_register_component(&pdev->dev,
+			&msm_dai_stub_component,
+			&msm_dai_stub_host_capture_tx_dai[0], 1);
+		break;
+	case STUB_HOST_TX_CAPTURE_TX:
+		rc = snd_soc_register_component(&pdev->dev,
+			&msm_dai_stub_component,
+			&msm_dai_stub_host_capture_tx_dai[1], 1);
+		break;
+	case STUB_HOST_RX_PLAYBACK_RX:
+		rc = snd_soc_register_component(&pdev->dev,
+			&msm_dai_stub_component,
+			&msm_dai_stub_host_playback_rx_dai[0], 1);
+		break;
+	case STUB_HOST_TX_PLAYBACK_RX:
+		rc = snd_soc_register_component(&pdev->dev,
+			&msm_dai_stub_component,
+			&msm_dai_stub_host_playback_rx_dai[1], 1);
+		break;
+	}
+
+	return rc;
+}
+
+static int msm_dai_stub_dev_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_component(&pdev->dev);
+	return 0;
+}
+
+static const struct of_device_id msm_dai_stub_dev_dt_match[] = {
+	{ .compatible = "qcom,msm-dai-stub-dev", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, msm_dai_stub_dev_dt_match);
+
+static struct platform_driver msm_dai_stub_dev = {
+	.probe  = msm_dai_stub_dev_probe,
+	.remove = msm_dai_stub_dev_remove,
+	.driver = {
+		.name = "msm-dai-stub-dev",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_dai_stub_dev_dt_match,
+	},
+};
+
+static int msm_dai_stub_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+
+	dev_dbg(&pdev->dev, "dev name %s\n", dev_name(&pdev->dev));
+
+	rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: failed to add child nodes, rc=%d\n",
+			__func__, rc);
+	} else
+		dev_dbg(&pdev->dev, "%s: added child node\n", __func__);
+
+	return rc;
+}
+
+static int msm_dai_stub_remove(struct platform_device *pdev)
+{
+	pr_debug("%s:\n", __func__);
+
+	return 0;
+}
+
+static const struct of_device_id msm_dai_stub_dt_match[] = {
+	{.compatible = "qcom,msm-dai-stub"},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, msm_dai_stub_dt_match);
+
+
+static struct platform_driver msm_dai_stub_driver = {
+	.probe  = msm_dai_stub_probe,
+	.remove = msm_dai_stub_remove,
+	.driver = {
+		.name = "msm-dai-stub",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_dai_stub_dt_match,
+	},
+};
+
+static int __init msm_dai_stub_init(void)
+{
+	int rc = 0;
+
+	pr_debug("%s:\n", __func__);
+
+	rc = platform_driver_register(&msm_dai_stub_driver);
+	if (rc) {
+		pr_err("%s: fail to register dai q6 driver", __func__);
+		goto fail;
+	}
+
+	rc = platform_driver_register(&msm_dai_stub_dev);
+	if (rc) {
+		pr_err("%s: fail to register dai q6 dev driver", __func__);
+		goto dai_stub_dev_fail;
+	}
+	return rc;
+
+dai_stub_dev_fail:
+	platform_driver_unregister(&msm_dai_stub_driver);
+fail:
+	return rc;
+}
+module_init(msm_dai_stub_init);
+
+static void __exit msm_dai_stub_exit(void)
+{
+	pr_debug("%s:\n", __func__);
+
+	platform_driver_unregister(&msm_dai_stub_dev);
+	platform_driver_unregister(&msm_dai_stub_driver);
+}
+module_exit(msm_dai_stub_exit);
+
+/* Module information */
+MODULE_DESCRIPTION("MSM Stub DSP DAI driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-dolby-common.h linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-dolby-common.h
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-dolby-common.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-dolby-common.h	2019-01-22 16:16:29.627301863 +0100
@@ -0,0 +1,266 @@
+
+/* Copyright (c) 2013-2014, 2016 The Linux Foundation. All rights reserved.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_DOLBY_COMMON_H_
+#define _MSM_DOLBY_COMMON_H_
+
+#include <sound/soc.h>
+
+
+#define DOLBY_BUNDLE_MODULE_ID		0x00010723
+#define DOLBY_VISUALIZER_MODULE_ID	0x0001072B
+
+#define DOLBY_PARAM_ID_VDHE		0x0001074D
+#define DOLBY_PARAM_ID_VSPE		0x00010750
+#define DOLBY_PARAM_ID_DSSF		0x00010753
+#define DOLBY_PARAM_ID_DVLI		0x0001073E
+#define DOLBY_PARAM_ID_DVLO		0x0001073F
+#define DOLBY_PARAM_ID_DVLE		0x0001073C
+#define DOLBY_PARAM_ID_DVMC		0x00010741
+#define DOLBY_PARAM_ID_DVME		0x00010740
+#define DOLBY_PARAM_ID_IENB		0x00010744
+#define DOLBY_PARAM_ID_IEBF		0x00010745
+#define DOLBY_PARAM_ID_IEON		0x00010743
+#define DOLBY_PARAM_ID_DEON		0x00010738
+#define DOLBY_PARAM_ID_NGON		0x00010736
+#define DOLBY_PARAM_ID_GEON		0x00010748
+#define DOLBY_PARAM_ID_GENB		0x00010749
+#define DOLBY_PARAM_ID_GEBF		0x0001074A
+#define DOLBY_PARAM_ID_AONB		0x0001075B
+#define DOLBY_PARAM_ID_AOBF		0x0001075C
+#define DOLBY_PARAM_ID_AOBG		0x0001075D
+#define DOLBY_PARAM_ID_AOON		0x00010759
+#define DOLBY_PARAM_ID_ARNB		0x0001075F
+#define DOLBY_PARAM_ID_ARBF		0x00010760
+#define DOLBY_PARAM_ID_PLB		0x00010768
+#define DOLBY_PARAM_ID_PLMD		0x00010767
+#define DOLBY_PARAM_ID_DHSB		0x0001074E
+#define DOLBY_PARAM_ID_DHRG		0x0001074F
+#define DOLBY_PARAM_ID_DSSB		0x00010751
+#define DOLBY_PARAM_ID_DSSA		0x00010752
+#define DOLBY_PARAM_ID_DVLA		0x0001073D
+#define DOLBY_PARAM_ID_IEBT		0x00010746
+#define DOLBY_PARAM_ID_IEA		0x0001076A
+#define DOLBY_PARAM_ID_DEA		0x00010739
+#define DOLBY_PARAM_ID_DED		0x0001073A
+#define DOLBY_PARAM_ID_GEBG		0x0001074B
+#define DOLBY_PARAM_ID_AOCC		0x0001075A
+#define DOLBY_PARAM_ID_ARBI		0x00010761
+#define DOLBY_PARAM_ID_ARBL		0x00010762
+#define DOLBY_PARAM_ID_ARBH		0x00010763
+#define DOLBY_PARAM_ID_AROD		0x00010764
+#define DOLBY_PARAM_ID_ARTP		0x00010765
+#define DOLBY_PARAM_ID_VMON		0x00010756
+#define DOLBY_PARAM_ID_VMB		0x00010757
+#define DOLBY_PARAM_ID_VCNB		0x00010733
+#define DOLBY_PARAM_ID_VCBF		0x00010734
+#define DOLBY_PARAM_ID_PREG		0x00010728
+#define DOLBY_PARAM_ID_VEN		0x00010732
+#define DOLBY_PARAM_ID_PSTG		0x00010729
+#define DOLBY_PARAM_ID_INIT_ENDP	0x00010727
+
+/* Not Used with Set Param kcontrol, only to query using Get Param */
+#define DOLBY_PARAM_ID_VER		0x00010726
+
+#define DOLBY_PARAM_ID_VCBG		0x00010730
+#define DOLBY_PARAM_ID_VCBE		0x00010731
+
+/* DOLBY DAP control params */
+#define DOLBY_COMMIT_ALL_TO_DSP		0x70000001
+#define DOLBY_COMMIT_TO_DSP		0x70000002
+#define DOLBY_USE_CACHE			0x70000003
+#define DOLBY_AUTO_ENDP			0x70000004
+#define DOLBY_AUTO_ENDDEP_PARAMS	0x70000005
+#define DOLBY_DAP_BYPASS		0x70000006
+
+#define DOLBY_ENABLE_CUSTOM_STEREO	0x000108c7
+
+/* DOLBY DAP offsets start */
+#define DOLBY_PARAM_VDHE_LENGTH   1
+#define DOLBY_PARAM_VDHE_OFFSET   0
+#define DOLBY_PARAM_VSPE_LENGTH   1
+#define DOLBY_PARAM_VSPE_OFFSET   (DOLBY_PARAM_VDHE_OFFSET + \
+					DOLBY_PARAM_VDHE_LENGTH)
+#define DOLBY_PARAM_DSSF_LENGTH   1
+#define DOLBY_PARAM_DSSF_OFFSET   (DOLBY_PARAM_VSPE_OFFSET + \
+					DOLBY_PARAM_VSPE_LENGTH)
+#define DOLBY_PARAM_DVLI_LENGTH   1
+#define DOLBY_PARAM_DVLI_OFFSET   (DOLBY_PARAM_DSSF_OFFSET + \
+					DOLBY_PARAM_DSSF_LENGTH)
+#define DOLBY_PARAM_DVLO_LENGTH   1
+#define DOLBY_PARAM_DVLO_OFFSET   (DOLBY_PARAM_DVLI_OFFSET + \
+					DOLBY_PARAM_DVLI_LENGTH)
+#define DOLBY_PARAM_DVLE_LENGTH   1
+#define DOLBY_PARAM_DVLE_OFFSET   (DOLBY_PARAM_DVLO_OFFSET + \
+					DOLBY_PARAM_DVLO_LENGTH)
+#define DOLBY_PARAM_DVMC_LENGTH   1
+#define DOLBY_PARAM_DVMC_OFFSET   (DOLBY_PARAM_DVLE_OFFSET + \
+					DOLBY_PARAM_DVLE_LENGTH)
+#define DOLBY_PARAM_DVME_LENGTH   1
+#define DOLBY_PARAM_DVME_OFFSET   (DOLBY_PARAM_DVMC_OFFSET + \
+					DOLBY_PARAM_DVMC_LENGTH)
+#define DOLBY_PARAM_IENB_LENGTH   1
+#define DOLBY_PARAM_IENB_OFFSET   (DOLBY_PARAM_DVME_OFFSET + \
+					DOLBY_PARAM_DVME_LENGTH)
+#define DOLBY_PARAM_IEBF_LENGTH   40
+#define DOLBY_PARAM_IEBF_OFFSET   (DOLBY_PARAM_IENB_OFFSET + \
+					DOLBY_PARAM_IENB_LENGTH)
+#define DOLBY_PARAM_IEON_LENGTH   1
+#define DOLBY_PARAM_IEON_OFFSET   (DOLBY_PARAM_IEBF_OFFSET + \
+					DOLBY_PARAM_IEBF_LENGTH)
+#define DOLBY_PARAM_DEON_LENGTH   1
+#define DOLBY_PARAM_DEON_OFFSET   (DOLBY_PARAM_IEON_OFFSET + \
+					DOLBY_PARAM_IEON_LENGTH)
+#define DOLBY_PARAM_NGON_LENGTH   1
+#define DOLBY_PARAM_NGON_OFFSET   (DOLBY_PARAM_DEON_OFFSET + \
+					DOLBY_PARAM_DEON_LENGTH)
+#define DOLBY_PARAM_GEON_LENGTH   1
+#define DOLBY_PARAM_GEON_OFFSET   (DOLBY_PARAM_NGON_OFFSET + \
+					DOLBY_PARAM_NGON_LENGTH)
+#define DOLBY_PARAM_GENB_LENGTH   1
+#define DOLBY_PARAM_GENB_OFFSET   (DOLBY_PARAM_GEON_OFFSET + \
+					DOLBY_PARAM_GEON_LENGTH)
+#define DOLBY_PARAM_GEBF_LENGTH   40
+#define DOLBY_PARAM_GEBF_OFFSET   (DOLBY_PARAM_GENB_OFFSET + \
+					DOLBY_PARAM_GENB_LENGTH)
+#define DOLBY_PARAM_AONB_LENGTH   1
+#define DOLBY_PARAM_AONB_OFFSET   (DOLBY_PARAM_GEBF_OFFSET + \
+					DOLBY_PARAM_GEBF_LENGTH)
+#define DOLBY_PARAM_AOBF_LENGTH   40
+#define DOLBY_PARAM_AOBF_OFFSET   (DOLBY_PARAM_AONB_OFFSET + \
+					DOLBY_PARAM_AONB_LENGTH)
+#define DOLBY_PARAM_AOBG_LENGTH   329
+#define DOLBY_PARAM_AOBG_OFFSET   (DOLBY_PARAM_AOBF_OFFSET + \
+					DOLBY_PARAM_AOBF_LENGTH)
+#define DOLBY_PARAM_AOON_LENGTH   1
+#define DOLBY_PARAM_AOON_OFFSET   (DOLBY_PARAM_AOBG_OFFSET + \
+					DOLBY_PARAM_AOBG_LENGTH)
+#define DOLBY_PARAM_ARNB_LENGTH   1
+#define DOLBY_PARAM_ARNB_OFFSET   (DOLBY_PARAM_AOON_OFFSET + \
+					DOLBY_PARAM_AOON_LENGTH)
+#define DOLBY_PARAM_ARBF_LENGTH   40
+#define DOLBY_PARAM_ARBF_OFFSET   (DOLBY_PARAM_ARNB_OFFSET + \
+					DOLBY_PARAM_ARNB_LENGTH)
+#define DOLBY_PARAM_PLB_LENGTH    1
+#define DOLBY_PARAM_PLB_OFFSET    (DOLBY_PARAM_ARBF_OFFSET + \
+					DOLBY_PARAM_ARBF_LENGTH)
+#define DOLBY_PARAM_PLMD_LENGTH   1
+#define DOLBY_PARAM_PLMD_OFFSET   (DOLBY_PARAM_PLB_OFFSET + \
+					DOLBY_PARAM_PLB_LENGTH)
+#define DOLBY_PARAM_DHSB_LENGTH   1
+#define DOLBY_PARAM_DHSB_OFFSET   (DOLBY_PARAM_PLMD_OFFSET + \
+					DOLBY_PARAM_PLMD_LENGTH)
+#define DOLBY_PARAM_DHRG_LENGTH   1
+#define DOLBY_PARAM_DHRG_OFFSET   (DOLBY_PARAM_DHSB_OFFSET + \
+					DOLBY_PARAM_DHSB_LENGTH)
+#define DOLBY_PARAM_DSSB_LENGTH   1
+#define DOLBY_PARAM_DSSB_OFFSET   (DOLBY_PARAM_DHRG_OFFSET + \
+					DOLBY_PARAM_DHRG_LENGTH)
+#define DOLBY_PARAM_DSSA_LENGTH   1
+#define DOLBY_PARAM_DSSA_OFFSET   (DOLBY_PARAM_DSSB_OFFSET + \
+					DOLBY_PARAM_DSSB_LENGTH)
+#define DOLBY_PARAM_DVLA_LENGTH   1
+#define DOLBY_PARAM_DVLA_OFFSET   (DOLBY_PARAM_DSSA_OFFSET + \
+					DOLBY_PARAM_DSSA_LENGTH)
+#define DOLBY_PARAM_IEBT_LENGTH   40
+#define DOLBY_PARAM_IEBT_OFFSET   (DOLBY_PARAM_DVLA_OFFSET + \
+					DOLBY_PARAM_DVLA_LENGTH)
+#define DOLBY_PARAM_IEA_LENGTH    1
+#define DOLBY_PARAM_IEA_OFFSET    (DOLBY_PARAM_IEBT_OFFSET + \
+					DOLBY_PARAM_IEBT_LENGTH)
+#define DOLBY_PARAM_DEA_LENGTH    1
+#define DOLBY_PARAM_DEA_OFFSET    (DOLBY_PARAM_IEA_OFFSET + \
+					DOLBY_PARAM_IEA_LENGTH)
+#define DOLBY_PARAM_DED_LENGTH    1
+#define DOLBY_PARAM_DED_OFFSET    (DOLBY_PARAM_DEA_OFFSET + \
+					DOLBY_PARAM_DEA_LENGTH)
+#define DOLBY_PARAM_GEBG_LENGTH   40
+#define DOLBY_PARAM_GEBG_OFFSET   (DOLBY_PARAM_DED_OFFSET + \
+					DOLBY_PARAM_DED_LENGTH)
+#define DOLBY_PARAM_AOCC_LENGTH   1
+#define DOLBY_PARAM_AOCC_OFFSET   (DOLBY_PARAM_GEBG_OFFSET + \
+					DOLBY_PARAM_GEBG_LENGTH)
+#define DOLBY_PARAM_ARBI_LENGTH   40
+#define DOLBY_PARAM_ARBI_OFFSET   (DOLBY_PARAM_AOCC_OFFSET + \
+					DOLBY_PARAM_AOCC_LENGTH)
+#define DOLBY_PARAM_ARBL_LENGTH   40
+#define DOLBY_PARAM_ARBL_OFFSET   (DOLBY_PARAM_ARBI_OFFSET + \
+					DOLBY_PARAM_ARBI_LENGTH)
+#define DOLBY_PARAM_ARBH_LENGTH   40
+#define DOLBY_PARAM_ARBH_OFFSET   (DOLBY_PARAM_ARBL_OFFSET + \
+					DOLBY_PARAM_ARBL_LENGTH)
+#define DOLBY_PARAM_AROD_LENGTH   1
+#define DOLBY_PARAM_AROD_OFFSET   (DOLBY_PARAM_ARBH_OFFSET + \
+					DOLBY_PARAM_ARBH_LENGTH)
+#define DOLBY_PARAM_ARTP_LENGTH   1
+#define DOLBY_PARAM_ARTP_OFFSET   (DOLBY_PARAM_AROD_OFFSET + \
+					DOLBY_PARAM_AROD_LENGTH)
+#define DOLBY_PARAM_VMON_LENGTH   1
+#define DOLBY_PARAM_VMON_OFFSET   (DOLBY_PARAM_ARTP_OFFSET + \
+					DOLBY_PARAM_ARTP_LENGTH)
+#define DOLBY_PARAM_VMB_LENGTH    1
+#define DOLBY_PARAM_VMB_OFFSET    (DOLBY_PARAM_VMON_OFFSET + \
+					DOLBY_PARAM_VMON_LENGTH)
+#define DOLBY_PARAM_VCNB_LENGTH   1
+#define DOLBY_PARAM_VCNB_OFFSET   (DOLBY_PARAM_VMB_OFFSET + \
+					DOLBY_PARAM_VMB_LENGTH)
+#define DOLBY_PARAM_VCBF_LENGTH   20
+#define DOLBY_PARAM_VCBF_OFFSET   (DOLBY_PARAM_VCNB_OFFSET + \
+					DOLBY_PARAM_VCNB_LENGTH)
+#define DOLBY_PARAM_PREG_LENGTH   1
+#define DOLBY_PARAM_PREG_OFFSET   (DOLBY_PARAM_VCBF_OFFSET + \
+					DOLBY_PARAM_VCBF_LENGTH)
+#define DOLBY_PARAM_VEN_LENGTH    1
+#define DOLBY_PARAM_VEN_OFFSET    (DOLBY_PARAM_PREG_OFFSET + \
+					DOLBY_PARAM_PREG_LENGTH)
+#define DOLBY_PARAM_PSTG_LENGTH   1
+#define DOLBY_PARAM_PSTG_OFFSET   (DOLBY_PARAM_VEN_OFFSET + \
+					DOLBY_PARAM_VEN_LENGTH)
+
+#define DOLBY_PARAM_INT_ENDP_LENGTH		1
+#define DOLBY_PARAM_PAYLOAD_SIZE		3
+#define DOLBY_MAX_LENGTH_INDIVIDUAL_PARAM	329
+
+#define TOTAL_LENGTH_DOLBY_PARAM		745
+#define DOLBY_VIS_PARAM_HEADER_SIZE		25
+#define DOLBY_PARAM_VCNB_MAX_LENGTH		40
+
+#define DOLBY_INVALID_PORT_ID			-1
+
+enum {
+	DEVICE_NONE			= 0x0,
+	/* output devices */
+	EARPIECE			= 0x1,
+	SPEAKER				= 0x2,
+	WIRED_HEADSET			= 0x4,
+	WIRED_HEADPHONE			= 0x8,
+	BLUETOOTH_SCO			= 0x10,
+	BLUETOOTH_SCO_HEADSET		= 0x20,
+	BLUETOOTH_SCO_CARKIT		= 0x40,
+	BLUETOOTH_A2DP			= 0x80,
+	BLUETOOTH_A2DP_HEADPHONES	= 0x100,
+	BLUETOOTH_A2DP_SPEAKER		= 0x200,
+	AUX_DIGITAL			= 0x400,
+	ANLG_DOCK_HEADSET		= 0x800,
+	DGTL_DOCK_HEADSET		= 0x1000,
+	USB_ACCESSORY			= 0x2000,
+	USB_DEVICE			= 0x4000,
+	REMOTE_SUBMIX			= 0x8000,
+	ANC_HEADSET			= 0x10000,
+	ANC_HEADPHONE			= 0x20000,
+	PROXY				= 0x2000000,
+	FM				= 0x100000,
+	FM_TX				= 0x1000000,
+	DEVICE_OUT_DEFAULT		= 0x40000000,
+	DEVICE_OUT_ALL			= 0x403FFFFF,
+};
+#endif
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-dolby-dap-config.h linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.h
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-dolby-dap-config.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.h	2019-01-22 16:16:29.627301863 +0100
@@ -0,0 +1,85 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_DOLBY_DAP_CONFIG_H_
+#define _MSM_DOLBY_DAP_CONFIG_H_
+
+#include <sound/soc.h>
+#include "msm-dolby-common.h"
+
+#ifdef CONFIG_DOLBY_DAP
+/* DOLBY DOLBY GUIDS */
+#define DOLBY_ADM_COPP_TOPOLOGY_ID	0x0001033B
+#define NUM_DOLBY_ENDP_DEVICE                 23
+
+#define DOLBY_NUM_ENDP_DEPENDENT_PARAMS	  3
+#define DOLBY_ENDDEP_PARAM_DVLO_OFFSET	  0
+#define DOLBY_ENDDEP_PARAM_DVLO_LENGTH	  1
+#define DOLBY_ENDDEP_PARAM_DVLI_OFFSET    (DOLBY_ENDDEP_PARAM_DVLO_OFFSET + \
+						DOLBY_ENDDEP_PARAM_DVLO_LENGTH)
+#define DOLBY_ENDDEP_PARAM_DVLI_LENGTH    1
+#define DOLBY_ENDDEP_PARAM_VMB_OFFSET     (DOLBY_ENDDEP_PARAM_DVLI_OFFSET + \
+						DOLBY_ENDDEP_PARAM_DVLI_LENGTH)
+#define DOLBY_ENDDEP_PARAM_VMB_LENGTH     1
+#define DOLBY_ENDDEP_PARAM_LENGTH         (DOLBY_ENDDEP_PARAM_DVLO_LENGTH + \
+		DOLBY_ENDDEP_PARAM_DVLI_LENGTH + DOLBY_ENDDEP_PARAM_VMB_LENGTH)
+
+#define MAX_DOLBY_PARAMS			47
+#define MAX_DOLBY_CTRL_PARAMS			5
+#define ALL_DOLBY_PARAMS			(MAX_DOLBY_PARAMS + \
+							MAX_DOLBY_CTRL_PARAMS)
+#define DOLBY_COMMIT_ALL_IDX			MAX_DOLBY_PARAMS
+#define DOLBY_COMMIT_IDX			(MAX_DOLBY_PARAMS+1)
+#define DOLBY_USE_CACHE_IDX			(MAX_DOLBY_PARAMS+2)
+#define DOLBY_AUTO_ENDP_IDX			(MAX_DOLBY_PARAMS+3)
+#define DOLBY_AUTO_ENDDEP_IDX			(MAX_DOLBY_PARAMS+4)
+
+/* DOLBY device definitions */
+enum {
+	DOLBY_ENDP_INT_SPEAKERS = 0,
+	DOLBY_ENDP_EXT_SPEAKERS,
+	DOLBY_ENDP_HEADPHONES,
+	DOLBY_ENDP_HDMI,
+	DOLBY_ENDP_SPDIF,
+	DOLBY_ENDP_DLNA,
+	DOLBY_ENDP_ANALOG,
+};
+
+/* DOLBY device definitions end */
+
+struct dolby_dap_params {
+	uint32_t value[TOTAL_LENGTH_DOLBY_PARAM + MAX_DOLBY_PARAMS];
+} __packed;
+
+int msm_dolby_dap_init(int port_id, int copp_idx, int channels,
+		       bool is_custom_stereo_on);
+void msm_dolby_dap_deinit(int port_id);
+void msm_dolby_dap_add_controls(struct snd_soc_platform *platform);
+int dolby_dap_set_custom_stereo_onoff(int port_id, int copp_idx,
+				      bool is_custom_stereo_enabled);
+/* Dolby DOLBY end */
+#else
+int msm_dolby_dap_init(int port_id, int copp_idx, int channels,
+		       bool is_custom_stereo_on)
+{
+	return 0;
+}
+void msm_dolby_dap_deinit(int port_id) { }
+void msm_dolby_dap_add_controls(struct snd_soc_platform *platform) { }
+int dolby_dap_set_custom_stereo_onoff(int port_id, int copp_idx,
+				      bool is_custom_stereo_enabled)
+{
+	return 0;
+}
+#endif
+
+#endif
+
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-ds2-dap-config.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-ds2-dap-config.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.c	2019-10-29 09:26:26.149227702 +0100
@@ -0,0 +1,2303 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 and
+* only version 2 as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*/
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <sound/control.h>
+#include <sound/q6adm-v2.h>
+
+#include "msm-ds2-dap-config.h"
+#include "msm-pcm-routing-v2.h"
+#include <sound/q6core.h>
+
+
+#if defined(CONFIG_DOLBY_DS2) || defined(CONFIG_DOLBY_LICENSE)
+
+/* ramp up/down for 30ms    */
+#define DOLBY_SOFT_VOLUME_PERIOD	40
+/* Step value 0ms or 0us */
+#define DOLBY_SOFT_VOLUME_STEP		1000
+#define DOLBY_ADDITIONAL_RAMP_WAIT	10
+#define SOFT_VOLUME_PARAM_SIZE		3
+#define PARAM_PAYLOAD_SIZE		3
+
+enum {
+	DOLBY_SOFT_VOLUME_CURVE_LINEAR = 0,
+	DOLBY_SOFT_VOLUME_CURVE_EXP,
+	DOLBY_SOFT_VOLUME_CURVE_LOG,
+};
+
+#define VOLUME_ZERO_GAIN     0x0
+#define VOLUME_UNITY_GAIN    0x2000
+/* Wait time for module enable/disble */
+#define DOLBY_MODULE_ENABLE_PERIOD     50
+
+/* DOLBY device definitions end */
+enum {
+	DOLBY_OFF_CACHE = 0,
+	DOLBY_SPEAKER_CACHE,
+	DOLBY_HEADPHONE_CACHE,
+	DOLBY_HDMI_CACHE,
+	DOLBY_WFD_CACHE,
+	DOLBY_FM_CACHE,
+	DOLBY_MAX_CACHE,
+};
+
+enum {
+	DAP_SOFT_BYPASS = 0,
+	DAP_HARD_BYPASS,
+};
+
+enum {
+	MODULE_DISABLE = 0,
+	MODULE_ENABLE,
+};
+/* dolby param ids to/from dsp */
+static uint32_t	ds2_dap_params_id[MAX_DS2_PARAMS] = {
+	DOLBY_PARAM_ID_VDHE, DOLBY_PARAM_ID_VSPE, DOLBY_PARAM_ID_DSSF,
+	DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLE,
+	DOLBY_PARAM_ID_DVMC, DOLBY_PARAM_ID_DVME, DOLBY_PARAM_ID_IENB,
+	DOLBY_PARAM_ID_IEBF, DOLBY_PARAM_ID_IEON, DOLBY_PARAM_ID_DEON,
+	DOLBY_PARAM_ID_NGON, DOLBY_PARAM_ID_GEON, DOLBY_PARAM_ID_GENB,
+	DOLBY_PARAM_ID_GEBF, DOLBY_PARAM_ID_AONB, DOLBY_PARAM_ID_AOBF,
+	DOLBY_PARAM_ID_AOBG, DOLBY_PARAM_ID_AOON, DOLBY_PARAM_ID_ARNB,
+	DOLBY_PARAM_ID_ARBF, DOLBY_PARAM_ID_PLB,  DOLBY_PARAM_ID_PLMD,
+	DOLBY_PARAM_ID_DHSB, DOLBY_PARAM_ID_DHRG, DOLBY_PARAM_ID_DSSB,
+	DOLBY_PARAM_ID_DSSA, DOLBY_PARAM_ID_DVLA, DOLBY_PARAM_ID_IEBT,
+	DOLBY_PARAM_ID_IEA,  DOLBY_PARAM_ID_DEA,  DOLBY_PARAM_ID_DED,
+	DOLBY_PARAM_ID_GEBG, DOLBY_PARAM_ID_AOCC, DOLBY_PARAM_ID_ARBI,
+	DOLBY_PARAM_ID_ARBL, DOLBY_PARAM_ID_ARBH, DOLBY_PARAM_ID_AROD,
+	DOLBY_PARAM_ID_ARTP, DOLBY_PARAM_ID_VMON, DOLBY_PARAM_ID_VMB,
+	DOLBY_PARAM_ID_VCNB, DOLBY_PARAM_ID_VCBF, DOLBY_PARAM_ID_PREG,
+	DOLBY_PARAM_ID_VEN,  DOLBY_PARAM_ID_PSTG, DOLBY_PARAM_ID_INIT_ENDP,
+};
+
+/* modifed state:	0x00000000 - Not updated
+*			> 0x00000000 && < 0x00010000
+*				Updated and not commited to DSP
+*			0x00010001 - Updated and commited to DSP
+*			> 0x00010001 - Modified the commited value
+*/
+/* param offset */
+static uint32_t	ds2_dap_params_offset[MAX_DS2_PARAMS] = {
+	DOLBY_PARAM_VDHE_OFFSET, DOLBY_PARAM_VSPE_OFFSET,
+	DOLBY_PARAM_DSSF_OFFSET, DOLBY_PARAM_DVLI_OFFSET,
+	DOLBY_PARAM_DVLO_OFFSET, DOLBY_PARAM_DVLE_OFFSET,
+	DOLBY_PARAM_DVMC_OFFSET, DOLBY_PARAM_DVME_OFFSET,
+	DOLBY_PARAM_IENB_OFFSET, DOLBY_PARAM_IEBF_OFFSET,
+	DOLBY_PARAM_IEON_OFFSET, DOLBY_PARAM_DEON_OFFSET,
+	DOLBY_PARAM_NGON_OFFSET, DOLBY_PARAM_GEON_OFFSET,
+	DOLBY_PARAM_GENB_OFFSET, DOLBY_PARAM_GEBF_OFFSET,
+	DOLBY_PARAM_AONB_OFFSET, DOLBY_PARAM_AOBF_OFFSET,
+	DOLBY_PARAM_AOBG_OFFSET, DOLBY_PARAM_AOON_OFFSET,
+	DOLBY_PARAM_ARNB_OFFSET, DOLBY_PARAM_ARBF_OFFSET,
+	DOLBY_PARAM_PLB_OFFSET,  DOLBY_PARAM_PLMD_OFFSET,
+	DOLBY_PARAM_DHSB_OFFSET, DOLBY_PARAM_DHRG_OFFSET,
+	DOLBY_PARAM_DSSB_OFFSET, DOLBY_PARAM_DSSA_OFFSET,
+	DOLBY_PARAM_DVLA_OFFSET, DOLBY_PARAM_IEBT_OFFSET,
+	DOLBY_PARAM_IEA_OFFSET,  DOLBY_PARAM_DEA_OFFSET,
+	DOLBY_PARAM_DED_OFFSET,  DOLBY_PARAM_GEBG_OFFSET,
+	DOLBY_PARAM_AOCC_OFFSET, DOLBY_PARAM_ARBI_OFFSET,
+	DOLBY_PARAM_ARBL_OFFSET, DOLBY_PARAM_ARBH_OFFSET,
+	DOLBY_PARAM_AROD_OFFSET, DOLBY_PARAM_ARTP_OFFSET,
+	DOLBY_PARAM_VMON_OFFSET, DOLBY_PARAM_VMB_OFFSET,
+	DOLBY_PARAM_VCNB_OFFSET, DOLBY_PARAM_VCBF_OFFSET,
+	DOLBY_PARAM_PREG_OFFSET, DOLBY_PARAM_VEN_OFFSET,
+	DOLBY_PARAM_PSTG_OFFSET, DOLBY_PARAM_INT_ENDP_OFFSET,
+};
+/* param_length */
+static uint32_t	ds2_dap_params_length[MAX_DS2_PARAMS] = {
+	DOLBY_PARAM_VDHE_LENGTH, DOLBY_PARAM_VSPE_LENGTH,
+	DOLBY_PARAM_DSSF_LENGTH, DOLBY_PARAM_DVLI_LENGTH,
+	DOLBY_PARAM_DVLO_LENGTH, DOLBY_PARAM_DVLE_LENGTH,
+	DOLBY_PARAM_DVMC_LENGTH, DOLBY_PARAM_DVME_LENGTH,
+	DOLBY_PARAM_IENB_LENGTH, DOLBY_PARAM_IEBF_LENGTH,
+	DOLBY_PARAM_IEON_LENGTH, DOLBY_PARAM_DEON_LENGTH,
+	DOLBY_PARAM_NGON_LENGTH, DOLBY_PARAM_GEON_LENGTH,
+	DOLBY_PARAM_GENB_LENGTH, DOLBY_PARAM_GEBF_LENGTH,
+	DOLBY_PARAM_AONB_LENGTH, DOLBY_PARAM_AOBF_LENGTH,
+	DOLBY_PARAM_AOBG_LENGTH, DOLBY_PARAM_AOON_LENGTH,
+	DOLBY_PARAM_ARNB_LENGTH, DOLBY_PARAM_ARBF_LENGTH,
+	DOLBY_PARAM_PLB_LENGTH,  DOLBY_PARAM_PLMD_LENGTH,
+	DOLBY_PARAM_DHSB_LENGTH, DOLBY_PARAM_DHRG_LENGTH,
+	DOLBY_PARAM_DSSB_LENGTH, DOLBY_PARAM_DSSA_LENGTH,
+	DOLBY_PARAM_DVLA_LENGTH, DOLBY_PARAM_IEBT_LENGTH,
+	DOLBY_PARAM_IEA_LENGTH,  DOLBY_PARAM_DEA_LENGTH,
+	DOLBY_PARAM_DED_LENGTH,  DOLBY_PARAM_GEBG_LENGTH,
+	DOLBY_PARAM_AOCC_LENGTH, DOLBY_PARAM_ARBI_LENGTH,
+	DOLBY_PARAM_ARBL_LENGTH, DOLBY_PARAM_ARBH_LENGTH,
+	DOLBY_PARAM_AROD_LENGTH, DOLBY_PARAM_ARTP_LENGTH,
+	DOLBY_PARAM_VMON_LENGTH, DOLBY_PARAM_VMB_LENGTH,
+	DOLBY_PARAM_VCNB_LENGTH, DOLBY_PARAM_VCBF_LENGTH,
+	DOLBY_PARAM_PREG_LENGTH, DOLBY_PARAM_VEN_LENGTH,
+	DOLBY_PARAM_PSTG_LENGTH, DOLBY_PARAM_INT_ENDP_LENGTH,
+};
+
+struct ds2_dap_params_s {
+	int32_t params_val[TOTAL_LENGTH_DS2_PARAM];
+	int32_t dap_params_modified[MAX_DS2_PARAMS];
+};
+
+struct audio_rx_cal_data {
+	char aud_proc_data[AUD_PROC_BLOCK_SIZE];
+	int32_t  aud_proc_size;
+	char aud_vol_data[AUD_VOL_BLOCK_SIZE];
+	int32_t aud_vol_size;
+};
+
+static struct ds2_dap_params_s ds2_dap_params[DOLBY_MAX_CACHE];
+
+struct ds2_device_mapping {
+	int32_t device_id; /* audio_out_... */
+	int port_id; /* afe port. constant for a target variant. routing-v2*/
+	/*Only one Dolby COPP  for a specific port*/
+	int copp_idx; /* idx for the copp port on which ds2 is active */
+	int cache_dev; /* idx to a shared parameter array dependent on device*/
+	uint32_t stream_ref_count;
+	bool active;
+	void *cal_data;
+};
+
+static struct ds2_device_mapping dev_map[DS2_DEVICES_ALL];
+
+struct ds2_dap_params_states_s {
+	bool use_cache;
+	bool dap_bypass;
+	bool dap_bypass_type;
+	bool node_opened;
+	int32_t  device;
+	bool custom_stereo_onoff;
+};
+
+static struct ds2_dap_params_states_s ds2_dap_params_states = {true, false,
+				false, DEVICE_NONE};
+
+static int all_supported_devices = EARPIECE|SPEAKER|WIRED_HEADSET|
+			WIRED_HEADPHONE|BLUETOOTH_SCO|AUX_DIGITAL|
+			ANLG_DOCK_HEADSET|DGTL_DOCK_HEADSET|
+			REMOTE_SUBMIX|ANC_HEADSET|ANC_HEADPHONE|
+			PROXY|FM|FM_TX|DEVICE_NONE|
+			BLUETOOTH_SCO_HEADSET|BLUETOOTH_SCO_CARKIT;
+
+
+static void msm_ds2_dap_check_and_update_ramp_wait(int port_id, int copp_idx,
+						   int *ramp_wait)
+{
+
+	int32_t *update_params_value = NULL;
+	uint32_t params_length = SOFT_VOLUME_PARAM_SIZE * sizeof(uint32_t);
+	uint32_t param_payload_len = PARAM_PAYLOAD_SIZE * sizeof(uint32_t);
+	int rc = 0;
+
+	update_params_value = kzalloc(params_length + param_payload_len,
+				      GFP_KERNEL);
+	if (!update_params_value) {
+		pr_err("%s: params memory alloc failed\n", __func__);
+		goto end;
+	}
+	rc = adm_get_params(port_id, copp_idx,
+			    AUDPROC_MODULE_ID_VOL_CTRL,
+			    AUDPROC_PARAM_ID_SOFT_VOL_STEPPING_PARAMETERS,
+			    params_length + param_payload_len,
+			    (char *) update_params_value);
+	if (rc == 0) {
+		pr_debug("%s: params_value [0x%x, 0x%x, 0x%x]\n",
+			__func__, update_params_value[0],
+			update_params_value[1],
+			update_params_value[2]);
+		*ramp_wait = update_params_value[0];
+	}
+end:
+	kfree(update_params_value);
+	/*
+	 * No error returned as we do not need to error out from dap on/dap
+	 * bypass. The default ramp parameter will be used to wait during
+	 * ramp down.
+	 */
+	return;
+}
+
+static int msm_ds2_dap_set_vspe_vdhe(int dev_map_idx,
+				     bool is_custom_stereo_enabled)
+{
+	int32_t *update_params_value = NULL;
+	int32_t *param_val = NULL;
+	int idx, i, j, rc = 0, cdev;
+	uint32_t params_length = (TOTAL_LENGTH_DOLBY_PARAM +
+				2 * DOLBY_PARAM_PAYLOAD_SIZE) *
+				sizeof(uint32_t);
+
+	if (dev_map_idx < 0 || dev_map_idx >= DS2_DEVICES_ALL) {
+		pr_err("%s: invalid dev map index %d\n", __func__, dev_map_idx);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (dev_map[dev_map_idx].port_id == DOLBY_INVALID_PORT_ID) {
+		pr_err("%s: Invalid port id\n", __func__);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if ((dev_map[dev_map_idx].copp_idx < 0) ||
+		(dev_map[dev_map_idx].copp_idx >= MAX_COPPS_PER_PORT)) {
+		pr_err("%s: Invalid copp_idx\n", __func__);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if ((dev_map[dev_map_idx].port_id != SLIMBUS_0_RX) &&
+	     (dev_map[dev_map_idx].port_id != RT_PROXY_PORT_001_RX)) {
+		pr_debug("%s:No Custom stereo for port:0x%x\n",
+			 __func__, dev_map[dev_map_idx].port_id);
+		goto end;
+	}
+
+	update_params_value = kzalloc(params_length, GFP_KERNEL);
+	if (!update_params_value) {
+		pr_err("%s: params memory alloc failed\n", __func__);
+		rc = -ENOMEM;
+		goto end;
+	}
+	params_length = 0;
+	param_val = update_params_value;
+	cdev = dev_map[dev_map_idx].cache_dev;
+	/* for VDHE and VSPE DAP params at index 0 and 1 in table */
+	for (i = 0; i < 2; i++) {
+		*update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
+		*update_params_value++ = ds2_dap_params_id[i];
+		*update_params_value++ = ds2_dap_params_length[i] *
+					sizeof(uint32_t);
+		idx = ds2_dap_params_offset[i];
+		for (j = 0; j < ds2_dap_params_length[i]; j++) {
+			if (is_custom_stereo_enabled)
+				*update_params_value++ = 0;
+			else
+				*update_params_value++ =
+					ds2_dap_params[cdev].params_val[idx+j];
+		}
+		params_length += (DOLBY_PARAM_PAYLOAD_SIZE +
+				  ds2_dap_params_length[i]) *
+				  sizeof(uint32_t);
+	}
+
+	pr_debug("%s: valid param length: %d\n", __func__, params_length);
+	if (params_length) {
+		rc = adm_dolby_dap_send_params(dev_map[dev_map_idx].port_id,
+					       dev_map[dev_map_idx].copp_idx,
+					       (char *)param_val,
+					       params_length);
+		if (rc) {
+			pr_err("%s: send vdhe/vspe params failed with rc=%d\n",
+				__func__, rc);
+			rc = -EINVAL;
+			goto end;
+		}
+	}
+end:
+	kfree(param_val);
+	return rc;
+}
+
+int qti_set_custom_stereo_on(int port_id, int copp_idx,
+			     bool is_custom_stereo_on)
+{
+
+	uint16_t op_FL_ip_FL_weight;
+	uint16_t op_FL_ip_FR_weight;
+	uint16_t op_FR_ip_FL_weight;
+	uint16_t op_FR_ip_FR_weight;
+
+	int32_t *update_params_value32 = NULL, rc = 0;
+	int32_t *param_val = NULL;
+	int16_t *update_params_value16 = 0;
+	uint32_t params_length_bytes = CUSTOM_STEREO_PAYLOAD_SIZE *
+				       sizeof(uint32_t);
+	uint32_t avail_length = params_length_bytes;
+
+	if ((port_id != SLIMBUS_0_RX) &&
+	     (port_id != RT_PROXY_PORT_001_RX)) {
+		pr_debug("%s:No Custom stereo for port:0x%x\n",
+			 __func__, port_id);
+		goto skip_send_cmd;
+	}
+
+	pr_debug("%s: port 0x%x, copp_idx %d, is_custom_stereo_on %d\n",
+		 __func__, port_id, copp_idx, is_custom_stereo_on);
+	if (is_custom_stereo_on) {
+		op_FL_ip_FL_weight =
+			Q14_GAIN_ZERO_POINT_FIVE;
+		op_FL_ip_FR_weight =
+			Q14_GAIN_ZERO_POINT_FIVE;
+		op_FR_ip_FL_weight =
+			Q14_GAIN_ZERO_POINT_FIVE;
+		op_FR_ip_FR_weight =
+			Q14_GAIN_ZERO_POINT_FIVE;
+	} else {
+		op_FL_ip_FL_weight = Q14_GAIN_UNITY;
+		op_FL_ip_FR_weight = 0;
+		op_FR_ip_FL_weight = 0;
+		op_FR_ip_FR_weight = Q14_GAIN_UNITY;
+	}
+
+	update_params_value32 = kzalloc(params_length_bytes, GFP_KERNEL);
+	if (!update_params_value32) {
+		pr_err("%s, params memory alloc failed\n", __func__);
+		rc = -ENOMEM;
+		goto skip_send_cmd;
+	}
+	param_val = update_params_value32;
+	if (avail_length < 2 * sizeof(uint32_t))
+		goto skip_send_cmd;
+	*update_params_value32++ = MTMX_MODULE_ID_DEFAULT_CHMIXER;
+	*update_params_value32++ = DEFAULT_CHMIXER_PARAM_ID_COEFF;
+	avail_length = avail_length - (2 * sizeof(uint32_t));
+
+	update_params_value16 = (int16_t *)update_params_value32;
+	if (avail_length < 10 * sizeof(uint16_t))
+		goto skip_send_cmd;
+	*update_params_value16++ = CUSTOM_STEREO_CMD_PARAM_SIZE;
+	/* for alignment only*/
+	*update_params_value16++ = 0;
+	/* index is 32-bit param in little endian*/
+	*update_params_value16++ = CUSTOM_STEREO_INDEX_PARAM;
+	*update_params_value16++ = 0;
+	/* for stereo mixing num out ch*/
+	*update_params_value16++ = CUSTOM_STEREO_NUM_OUT_CH;
+	/* for stereo mixing num in ch*/
+	*update_params_value16++ = CUSTOM_STEREO_NUM_IN_CH;
+
+	/* Out ch map FL/FR*/
+	*update_params_value16++ = PCM_CHANNEL_FL;
+	*update_params_value16++ = PCM_CHANNEL_FR;
+
+	/* In ch map FL/FR*/
+	*update_params_value16++ = PCM_CHANNEL_FL;
+	*update_params_value16++ = PCM_CHANNEL_FR;
+	avail_length = avail_length - (10 * sizeof(uint16_t));
+	/* weighting coefficients as name suggests,
+	mixing will be done according to these coefficients*/
+	if (avail_length < 4 * sizeof(uint16_t))
+		goto skip_send_cmd;
+	*update_params_value16++ = op_FL_ip_FL_weight;
+	*update_params_value16++ = op_FL_ip_FR_weight;
+	*update_params_value16++ = op_FR_ip_FL_weight;
+	*update_params_value16++ = op_FR_ip_FR_weight;
+	avail_length = avail_length - (4 * sizeof(uint16_t));
+	if (params_length_bytes != 0) {
+		rc = adm_dolby_dap_send_params(port_id, copp_idx,
+				(char *)param_val,
+				params_length_bytes);
+		if (rc) {
+			pr_err("%s: send params failed rc=%d\n", __func__, rc);
+			rc = -EINVAL;
+			goto skip_send_cmd;
+		}
+	}
+	kfree(param_val);
+	return 0;
+skip_send_cmd:
+		pr_err("%s: insufficient memory, send cmd failed\n",
+			__func__);
+		kfree(param_val);
+		return rc;
+}
+static int dap_set_custom_stereo_onoff(int dev_map_idx,
+					bool is_custom_stereo_enabled)
+{
+
+	int32_t *update_params_value = NULL, rc = 0;
+	int32_t *param_val = NULL;
+	uint32_t params_length_bytes = (TOTAL_LENGTH_DOLBY_PARAM +
+				DOLBY_PARAM_PAYLOAD_SIZE) * sizeof(uint32_t);
+	if ((dev_map[dev_map_idx].port_id != SLIMBUS_0_RX) &&
+	     (dev_map[dev_map_idx].port_id != RT_PROXY_PORT_001_RX)) {
+		pr_debug("%s:No Custom stereo for port:0x%x\n",
+			 __func__, dev_map[dev_map_idx].port_id);
+		goto end;
+	}
+
+	if ((dev_map[dev_map_idx].copp_idx < 0) ||
+		(dev_map[dev_map_idx].copp_idx >= MAX_COPPS_PER_PORT)) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* DAP custom stereo */
+	msm_ds2_dap_set_vspe_vdhe(dev_map_idx,
+				  is_custom_stereo_enabled);
+	update_params_value = kzalloc(params_length_bytes, GFP_KERNEL);
+	if (!update_params_value) {
+		pr_err("%s: params memory alloc failed\n", __func__);
+		rc = -ENOMEM;
+		goto end;
+	}
+	params_length_bytes = 0;
+	param_val = update_params_value;
+	*update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
+	*update_params_value++ = DOLBY_ENABLE_CUSTOM_STEREO;
+	*update_params_value++ = sizeof(uint32_t);
+	if (is_custom_stereo_enabled)
+		*update_params_value++ = 1;
+	else
+		*update_params_value++ = 0;
+	params_length_bytes += (DOLBY_PARAM_PAYLOAD_SIZE + 1) *
+				sizeof(uint32_t);
+	pr_debug("%s: valid param length: %d\n", __func__, params_length_bytes);
+	if (params_length_bytes) {
+		rc = adm_dolby_dap_send_params(dev_map[dev_map_idx].port_id,
+					       dev_map[dev_map_idx].copp_idx,
+					       (char *)param_val,
+					       params_length_bytes);
+		if (rc) {
+			pr_err("%s: custom stereo param failed with rc=%d\n",
+				__func__, rc);
+			rc = -EINVAL;
+			goto end;
+		}
+	}
+end:
+	kfree(param_val);
+	return rc;
+
+}
+
+
+static int set_custom_stereo_onoff(int dev_map_idx,
+					bool is_custom_stereo_enabled)
+{
+	int rc = 0;
+	pr_debug("%s: map index %d, custom stereo %d\n", __func__, dev_map_idx,
+		 is_custom_stereo_enabled);
+
+	if (dev_map_idx < 0 || dev_map_idx >= DS2_DEVICES_ALL) {
+		pr_err("%s: invalid dev map index %d\n", __func__, dev_map_idx);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (dev_map[dev_map_idx].port_id == DOLBY_INVALID_PORT_ID) {
+		pr_err("%s: invalid port id\n", __func__);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if ((dev_map[dev_map_idx].copp_idx < 0) ||
+		(dev_map[dev_map_idx].copp_idx >= MAX_COPPS_PER_PORT)) {
+		pr_err("%s: invalid copp idx\n", __func__);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (ds2_dap_params_states.dap_bypass == true &&
+		ds2_dap_params_states.dap_bypass_type == DAP_HARD_BYPASS) {
+
+		rc = qti_set_custom_stereo_on(dev_map[dev_map_idx].port_id,
+					      dev_map[dev_map_idx].copp_idx,
+					      is_custom_stereo_enabled);
+		if (rc < 0) {
+			pr_err("%s:qti_set_custom_stereo_on_copp failed C.S %d",
+				__func__, is_custom_stereo_enabled);
+		}
+		goto end;
+
+	}
+
+	if (ds2_dap_params_states.dap_bypass == false) {
+		rc = dap_set_custom_stereo_onoff(dev_map_idx,
+						 is_custom_stereo_enabled);
+		if (rc < 0) {
+			pr_err("%s:qti_set_custom_stereo_on_copp failed C.S %d",
+				__func__, is_custom_stereo_enabled);
+		}
+		goto end;
+	}
+end:
+	return rc;
+}
+
+static int msm_ds2_dap_alloc_and_store_cal_data(int dev_map_idx, int path,
+					    int perf_mode)
+{
+	int rc = 0;
+	struct audio_rx_cal_data *aud_cal_data;
+	pr_debug("%s: path %d, perf_mode %d, dev_map_idx %d\n",
+		__func__, path, perf_mode, dev_map_idx);
+
+	if (dev_map_idx < 0 || dev_map_idx >= DS2_DEVICES_ALL) {
+		pr_err("%s: invalid dev map index %d\n", __func__, dev_map_idx);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	aud_cal_data = kzalloc(sizeof(struct audio_rx_cal_data), GFP_KERNEL);
+	if (!aud_cal_data) {
+		pr_err("%s, param memory alloc failed\n", __func__);
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	rc = adm_store_cal_data(dev_map[dev_map_idx].port_id,
+				dev_map[dev_map_idx].copp_idx, path, perf_mode,
+				ADM_AUDPROC_CAL, aud_cal_data->aud_proc_data,
+				&aud_cal_data->aud_proc_size);
+	if (rc < 0) {
+		pr_err("%s: store cal data err %d\n", __func__, rc);
+		kfree(aud_cal_data);
+		goto end;
+	}
+
+	rc = adm_store_cal_data(dev_map[dev_map_idx].port_id,
+				dev_map[dev_map_idx].copp_idx, path, perf_mode,
+				ADM_AUDVOL_CAL, aud_cal_data->aud_vol_data,
+				&aud_cal_data->aud_vol_size);
+	if (rc < 0) {
+		pr_err("%s: store cal data err %d\n", __func__, rc);
+		kfree(aud_cal_data);
+		goto end;
+	}
+
+	dev_map[dev_map_idx].cal_data = (void *)aud_cal_data;
+
+end:
+	pr_debug("%s: ret %d\n", __func__, rc);
+	return rc;
+}
+
+static int msm_ds2_dap_free_cal_data(int dev_map_idx)
+{
+	int rc = 0;
+	struct audio_rx_cal_data *aud_cal_data;
+
+	pr_debug("%s: dev_map_idx %d\n", __func__, dev_map_idx);
+	if (dev_map_idx < 0 || dev_map_idx >= DS2_DEVICES_ALL) {
+		pr_err("%s: invalid dev map index %d\n", __func__, dev_map_idx);
+		rc = -EINVAL;
+		goto end;
+	}
+	aud_cal_data = (struct audio_rx_cal_data *)
+				dev_map[dev_map_idx].cal_data;
+	kfree(aud_cal_data);
+	dev_map[dev_map_idx].cal_data = NULL;
+
+end:
+	return rc;
+}
+
+static int msm_ds2_dap_send_cal_data(int dev_map_idx)
+{
+	int rc = 0;
+	struct audio_rx_cal_data *aud_cal_data = NULL;
+
+	pr_debug("%s: devmap index %d\n", __func__, dev_map_idx);
+	if (dev_map_idx < 0 || dev_map_idx >= DS2_DEVICES_ALL) {
+		pr_err("%s: invalid dev map index %d\n", __func__, dev_map_idx);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (dev_map[dev_map_idx].cal_data == NULL) {
+		pr_err("%s: No valid calibration data stored for idx %d\n",
+			__func__, dev_map_idx);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* send aud proc cal */
+	aud_cal_data = (struct audio_rx_cal_data *)
+				dev_map[dev_map_idx].cal_data;
+	rc = adm_send_calibration(dev_map[dev_map_idx].port_id,
+				  dev_map[dev_map_idx].copp_idx,
+				  ADM_PATH_PLAYBACK, 0,
+				  ADM_AUDPROC_CAL,
+				  aud_cal_data->aud_proc_data,
+				  aud_cal_data->aud_proc_size);
+	if (rc < 0) {
+		pr_err("%s: adm_send_calibration failed %d\n", __func__, rc);
+		goto end;
+	}
+
+	/* send aud volume cal*/
+	rc = adm_send_calibration(dev_map[dev_map_idx].port_id,
+				  dev_map[dev_map_idx].copp_idx,
+				  ADM_PATH_PLAYBACK, 0,
+				  ADM_AUDVOL_CAL,
+				  aud_cal_data->aud_vol_data,
+				  aud_cal_data->aud_vol_size);
+	if (rc < 0)
+		pr_err("%s: adm_send_calibration failed %d\n", __func__, rc);
+end:
+	pr_debug("%s: return  %d\n", __func__, rc);
+	return rc;
+}
+
+static inline int msm_ds2_dap_can_enable_module(int32_t module_id)
+{
+	if (module_id == MTMX_MODULE_ID_DEFAULT_CHMIXER ||
+		module_id == AUDPROC_MODULE_ID_RESAMPLER ||
+		module_id == AUDPROC_MODULE_ID_VOL_CTRL) {
+		return false;
+	}
+	return true;
+}
+
+static int msm_ds2_dap_init_modules_in_topology(int dev_map_idx)
+{
+	int rc = 0, i = 0, port_id, copp_idx;
+	/* Account for 32 bit interger allocation */
+	int32_t param_sz = (ADM_GET_TOPO_MODULE_LIST_LENGTH / sizeof(uint32_t));
+	int32_t *update_param_val = NULL;
+
+	if (dev_map_idx < 0 || dev_map_idx >= DS2_DEVICES_ALL) {
+		pr_err("%s: invalid dev map index %d\n", __func__, dev_map_idx);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	port_id = dev_map[dev_map_idx].port_id;
+	copp_idx = dev_map[dev_map_idx].copp_idx;
+	pr_debug("%s: port_id 0x%x copp_idx %d\n", __func__, port_id, copp_idx);
+	update_param_val = kzalloc(ADM_GET_TOPO_MODULE_LIST_LENGTH, GFP_KERNEL);
+	if (!update_param_val) {
+		pr_err("%s, param memory alloc failed\n", __func__);
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	if (!ds2_dap_params_states.dap_bypass) {
+		/* get modules from dsp */
+		rc = adm_get_pp_topo_module_list(port_id, copp_idx,
+			ADM_GET_TOPO_MODULE_LIST_LENGTH,
+			(char *)update_param_val);
+		if (rc < 0) {
+			pr_err("%s:topo list port %d, err %d,copp_idx %d\n",
+				__func__, port_id, copp_idx, rc);
+			goto end;
+		}
+
+		if (update_param_val[0] > (param_sz - 1)) {
+			pr_err("%s:max modules exp/ret [%d: %d]\n",
+				__func__, (param_sz - 1),
+				update_param_val[0]);
+			rc = -EINVAL;
+			goto end;
+		}
+		/* Turn off modules */
+		for (i = 1; i < update_param_val[0]; i++) {
+			if (!msm_ds2_dap_can_enable_module(
+				update_param_val[i]) ||
+				(update_param_val[i] == DS2_MODULE_ID)) {
+				pr_debug("%s: Do not enable/disable %d\n",
+					 __func__, update_param_val[i]);
+				continue;
+			}
+
+			pr_debug("%s: param disable %d\n",
+				__func__, update_param_val[i]);
+			adm_param_enable(port_id, copp_idx, update_param_val[i],
+					 MODULE_DISABLE);
+		}
+	} else {
+		msm_ds2_dap_send_cal_data(dev_map_idx);
+
+	}
+	adm_param_enable(port_id, copp_idx, DS2_MODULE_ID,
+			 !ds2_dap_params_states.dap_bypass);
+end:
+	kfree(update_param_val);
+	return rc;
+}
+
+static bool msm_ds2_dap_check_is_param_modified(int32_t *dap_params_modified,
+				    int32_t idx, int32_t commit)
+{
+	if ((dap_params_modified[idx] == 0) ||
+		(commit &&
+		((dap_params_modified[idx] & 0x00010000) &&
+		((dap_params_modified[idx] & 0x0000FFFF) <= 1)))) {
+		pr_debug("%s: not modified at idx %d\n", __func__, idx);
+		return false;
+	}
+	pr_debug("%s: modified at idx %d\n", __func__, idx);
+	return true;
+}
+
+static int msm_ds2_dap_map_device_to_dolby_cache_devices(int32_t device_id)
+{
+	int32_t cache_dev = -1;
+	switch (device_id) {
+	case DEVICE_NONE:
+		cache_dev = DOLBY_OFF_CACHE;
+		break;
+	case EARPIECE:
+	case SPEAKER:
+		cache_dev = DOLBY_SPEAKER_CACHE;
+		break;
+	case WIRED_HEADSET:
+	case WIRED_HEADPHONE:
+	case ANLG_DOCK_HEADSET:
+	case DGTL_DOCK_HEADSET:
+	case ANC_HEADSET:
+	case ANC_HEADPHONE:
+	case BLUETOOTH_SCO:
+	case BLUETOOTH_SCO_HEADSET:
+	case BLUETOOTH_SCO_CARKIT:
+		cache_dev = DOLBY_HEADPHONE_CACHE;
+		break;
+	case FM:
+	case FM_TX:
+		cache_dev = DOLBY_FM_CACHE;
+		break;
+	case AUX_DIGITAL:
+		cache_dev = DOLBY_HDMI_CACHE;
+		break;
+	case PROXY:
+	case REMOTE_SUBMIX:
+		cache_dev = DOLBY_WFD_CACHE;
+		break;
+	default:
+		pr_err("%s: invalid cache device\n", __func__);
+	}
+	pr_debug("%s: cache device %d\n", __func__, cache_dev);
+	return cache_dev;
+}
+
+static int msm_ds2_dap_update_num_devices(struct dolby_param_data *dolby_data,
+				      int32_t *num_device, int32_t *dev_arr,
+				      int32_t array_size)
+{
+	int32_t idx = 0;
+	int supported_devices = 0;
+
+	if (!array_size) {
+		pr_err("%s: array size zero\n", __func__);
+		return -EINVAL;
+	}
+
+	if (dolby_data->device_id == DEVICE_OUT_ALL ||
+		dolby_data->device_id == DEVICE_OUT_DEFAULT)
+		supported_devices = all_supported_devices;
+	else
+		supported_devices = dolby_data->device_id;
+
+	if ((idx < array_size) && (supported_devices & EARPIECE))
+		dev_arr[idx++] = EARPIECE;
+	if ((idx < array_size) && (supported_devices & SPEAKER))
+		dev_arr[idx++] = SPEAKER;
+	if ((idx < array_size) && (supported_devices & WIRED_HEADSET))
+		dev_arr[idx++] = WIRED_HEADSET;
+	if ((idx < array_size) && (supported_devices & WIRED_HEADPHONE))
+		dev_arr[idx++] = WIRED_HEADPHONE;
+	if ((idx < array_size) && (supported_devices & BLUETOOTH_SCO))
+		dev_arr[idx++] = BLUETOOTH_SCO;
+	if ((idx < array_size) && (supported_devices & BLUETOOTH_SCO_CARKIT))
+		dev_arr[idx++] = BLUETOOTH_SCO_CARKIT;
+	if ((idx < array_size) && (supported_devices & BLUETOOTH_SCO_HEADSET))
+		dev_arr[idx++] = BLUETOOTH_SCO_HEADSET;
+	if ((idx < array_size) && (supported_devices & AUX_DIGITAL))
+		dev_arr[idx++] = AUX_DIGITAL;
+	if ((idx < array_size) && (supported_devices & ANLG_DOCK_HEADSET))
+		dev_arr[idx++] = ANLG_DOCK_HEADSET;
+	if ((idx < array_size) && (supported_devices & DGTL_DOCK_HEADSET))
+		dev_arr[idx++] = DGTL_DOCK_HEADSET;
+	if ((idx < array_size) && (supported_devices & REMOTE_SUBMIX))
+		dev_arr[idx++] = REMOTE_SUBMIX;
+	if ((idx < array_size) && (supported_devices & ANC_HEADSET))
+		dev_arr[idx++] = ANC_HEADSET;
+	if ((idx < array_size) && (supported_devices & ANC_HEADPHONE))
+		dev_arr[idx++] = ANC_HEADPHONE;
+	if ((idx < array_size) && (supported_devices & PROXY))
+		dev_arr[idx++] = PROXY;
+	if ((idx < array_size) && (supported_devices & FM))
+		dev_arr[idx++] = FM;
+	if ((idx < array_size) && (supported_devices & FM_TX))
+		dev_arr[idx++] = FM_TX;
+	/* CHECK device none separately */
+	if ((idx < array_size) && (supported_devices == DEVICE_NONE))
+		dev_arr[idx++] = DEVICE_NONE;
+	pr_debug("%s: dev id 0x%x, idx %d\n", __func__,
+		 supported_devices, idx);
+	*num_device = idx;
+	return 0;
+}
+
+static int msm_ds2_dap_get_port_id(
+		int32_t device_id, int32_t be_id)
+{
+	struct msm_pcm_routing_bdai_data bedais;
+	int port_id = DOLBY_INVALID_PORT_ID;
+	int port_type = 0;
+
+	if (be_id < 0) {
+		port_id = -1;
+		goto end;
+	}
+
+	msm_pcm_routing_get_bedai_info(be_id, &bedais);
+	pr_debug("%s: be port_id %d\n", __func__, bedais.port_id);
+	port_id = bedais.port_id;
+	port_type = afe_get_port_type(bedais.port_id);
+	if (port_type != MSM_AFE_PORT_TYPE_RX)
+		port_id = DOLBY_INVALID_PORT_ID;
+end:
+	pr_debug("%s: device_id 0x%x, be_id %d, port_id %d\n",
+		 __func__, device_id, be_id, port_id);
+	return port_id;
+}
+
+static int msm_ds2_dap_update_dev_map_port_id(int32_t device_id, int port_id)
+{
+	int i;
+	for (i = 0; i < DS2_DEVICES_ALL; i++) {
+		if (dev_map[i].device_id == device_id)
+			dev_map[i].port_id = port_id;
+	}
+	pr_debug("%s: port_id %d, device_id 0x%x\n",
+		 __func__, port_id, device_id);
+	return 0;
+}
+
+static int msm_ds2_dap_handle_bypass_wait(int port_id, int copp_idx,
+					  int wait_time)
+{
+	int ret = 0;
+	adm_set_wait_parameters(port_id, copp_idx);
+	msm_pcm_routing_release_lock();
+	ret = adm_wait_timeout(port_id, copp_idx, wait_time);
+	msm_pcm_routing_acquire_lock();
+	/* Reset the parameters if wait has timed out */
+	if (ret == 0)
+		adm_reset_wait_parameters(port_id, copp_idx);
+	return ret;
+}
+
+static int msm_ds2_dap_handle_bypass(struct dolby_param_data *dolby_data)
+{
+	int rc = 0, i = 0, j = 0;
+	/*Account for 32 bit interger allocation  */
+	int32_t param_sz = (ADM_GET_TOPO_MODULE_LIST_LENGTH / sizeof(uint32_t));
+	int32_t *mod_list = NULL;
+	int port_id = 0, copp_idx = -1;
+	bool cs_onoff = ds2_dap_params_states.custom_stereo_onoff;
+	int ramp_wait = DOLBY_SOFT_VOLUME_PERIOD;
+
+	pr_debug("%s: bypass type %d bypass %d custom stereo %d\n", __func__,
+		 ds2_dap_params_states.dap_bypass_type,
+		 ds2_dap_params_states.dap_bypass,
+		 ds2_dap_params_states.custom_stereo_onoff);
+	mod_list = kzalloc(ADM_GET_TOPO_MODULE_LIST_LENGTH, GFP_KERNEL);
+	if (!mod_list) {
+		pr_err("%s: param memory alloc failed\n", __func__);
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	for (i = 0; i < DS2_DEVICES_ALL; i++) {
+		pr_debug("%s: active dev %d\n", __func__, dev_map[i].active);
+		if (dev_map[i].active) {
+			port_id = dev_map[i].port_id;
+			copp_idx = dev_map[i].copp_idx;
+
+			if (port_id == DOLBY_INVALID_PORT_ID) {
+				pr_err("%s: invalid port\n", __func__);
+				rc = 0;
+				goto end;
+			}
+
+			if ((copp_idx < 0) ||
+				(copp_idx >= MAX_COPPS_PER_PORT)) {
+				pr_err("%s: Invalid copp_idx\n", __func__);
+				rc = 0;
+				goto end;
+			}
+
+			/* getmodules from dsp */
+			rc = adm_get_pp_topo_module_list(port_id, copp_idx,
+				    ADM_GET_TOPO_MODULE_LIST_LENGTH,
+				    (char *)mod_list);
+			if (rc < 0) {
+				pr_err("%s:adm get topo list port %d",
+					__func__, port_id);
+				pr_err("copp_idx %d, err %d\n",
+					copp_idx, rc);
+				goto end;
+			}
+			if (mod_list[0] > (param_sz - 1)) {
+				pr_err("%s:max modules exp/ret [%d: %d]\n",
+					__func__, (param_sz - 1),
+					mod_list[0]);
+				rc = -EINVAL;
+				goto end;
+			}
+			/*
+			 * get ramp parameters
+			 * check for change in ramp parameters
+			 * update ramp wait
+			 */
+			msm_ds2_dap_check_and_update_ramp_wait(port_id,
+							       copp_idx,
+							       &ramp_wait);
+
+			/* Mute before switching modules */
+			rc = adm_set_volume(port_id, copp_idx,
+					    VOLUME_ZERO_GAIN);
+			if (rc < 0) {
+				/*
+				 * Not Fatal can continue bypass operations.
+				 * Do not need to block playback
+				 */
+				pr_info("%s :Set volume port_id %d",
+					__func__, port_id);
+				pr_info("copp_idx %d, error %d\n",
+					copp_idx, rc);
+			}
+
+			rc = msm_ds2_dap_handle_bypass_wait(port_id, copp_idx,
+					    (ramp_wait +
+					     DOLBY_ADDITIONAL_RAMP_WAIT));
+			if (rc == -EINTR) {
+				pr_info("%s:bypass interupted-ignore,port %d",
+					__func__, port_id);
+				pr_info("copp_idx %d\n", copp_idx);
+				rc = 0;
+				continue;
+			}
+
+			/* if dap bypass is set */
+			if (ds2_dap_params_states.dap_bypass) {
+				/* Turn off dap module */
+				adm_param_enable(port_id, copp_idx,
+						 DS2_MODULE_ID, MODULE_DISABLE);
+				/*
+				 * If custom stereo is on at the time of bypass,
+				 * switch off custom stereo on dap and turn on
+				 * custom stereo on qti channel mixer.
+				 */
+				if (cs_onoff) {
+					rc = dap_set_custom_stereo_onoff(i,
+								!cs_onoff);
+					if (rc < 0) {
+						pr_info("%s:D_CS i %d,rc %d\n",
+							__func__, i, rc);
+					}
+					rc = qti_set_custom_stereo_on(port_id,
+								      copp_idx,
+								      cs_onoff);
+					if (rc < 0) {
+						pr_info("%s:Q_CS port id 0x%x",
+							 __func__, port_id);
+						pr_info("copp idx %d, rc %d\n",
+							copp_idx, rc);
+					}
+				}
+				/* Turn on qti modules */
+				for (j = 1; j < mod_list[0]; j++) {
+					if (!msm_ds2_dap_can_enable_module(
+						mod_list[j]) ||
+						mod_list[j] ==
+						DS2_MODULE_ID)
+						continue;
+					pr_debug("%s: param enable %d\n",
+						__func__, mod_list[j]);
+					adm_param_enable(port_id, copp_idx,
+							 mod_list[j],
+							 MODULE_ENABLE);
+				}
+
+				/* Add adm api to resend calibration on port */
+				rc = msm_ds2_dap_send_cal_data(i);
+				if (rc < 0) {
+					/*
+					 * Not fatal,continue bypass operations.
+					 * Do not need to block playback
+					 */
+					pr_info("%s:send cal err %d index %d\n",
+						__func__, rc, i);
+				}
+			} else {
+				/* Turn off qti modules */
+				for (j = 1; j < mod_list[0]; j++) {
+					if (!msm_ds2_dap_can_enable_module(
+						mod_list[j]) ||
+						mod_list[j] ==
+						DS2_MODULE_ID)
+						continue;
+					pr_debug("%s: param disable %d\n",
+						__func__, mod_list[j]);
+					adm_param_enable(port_id, copp_idx,
+							 mod_list[j],
+							 MODULE_DISABLE);
+				}
+
+				/* Enable DAP modules */
+				pr_debug("%s:DS2 param enable\n", __func__);
+				adm_param_enable(port_id, copp_idx,
+						 DS2_MODULE_ID, MODULE_ENABLE);
+				/*
+				 * If custom stereo is on at the time of dap on,
+				 * switch off custom stereo on qti channel mixer
+				 * and turn on custom stereo on DAP.
+				 * mixer(qti).
+				 */
+				if (cs_onoff) {
+					rc = qti_set_custom_stereo_on(port_id,
+								copp_idx,
+								!cs_onoff);
+					if (rc < 0) {
+						pr_info("%s:Q_CS port_id 0x%x",
+							__func__, port_id);
+						pr_info("copp_idx %d rc %d\n",
+							copp_idx, rc);
+					}
+					rc = dap_set_custom_stereo_onoff(i,
+								cs_onoff);
+					if (rc < 0) {
+						pr_info("%s:D_CS i %d,rc %d\n",
+							__func__, i, rc);
+					}
+				}
+			}
+
+			rc = msm_ds2_dap_handle_bypass_wait(port_id, copp_idx,
+				DOLBY_MODULE_ENABLE_PERIOD);
+			if (rc == -EINTR) {
+				pr_info("%s:bypass interupted port_id %d copp_idx %d\n",
+					__func__, port_id, copp_idx);
+				/* Interrupted ignore bypass */
+				rc = 0;
+				continue;
+			}
+
+			/* set volume to unity gain after module on/off */
+			rc = adm_set_volume(port_id, copp_idx,
+					    VOLUME_UNITY_GAIN);
+			if (rc < 0) {
+				/*
+				 * Not Fatal can continue bypass operations.
+				 * Do not need to block playback
+				 */
+				pr_info("%s: Set vol port %d copp %d, rc %d\n",
+					__func__, port_id, copp_idx, rc);
+				rc = 0;
+			}
+		}
+	}
+
+end:
+	kfree(mod_list);
+	pr_debug("%s:return rc=%d\n", __func__, rc);
+	return rc;
+}
+
+static int msm_ds2_dap_send_end_point(int dev_map_idx, int endp_idx)
+{
+	int rc = 0;
+	int32_t  *update_params_value = NULL, *params_value = NULL;
+	uint32_t params_length = (DOLBY_PARAM_INT_ENDP_LENGTH +
+				DOLBY_PARAM_PAYLOAD_SIZE) * sizeof(uint32_t);
+	int cache_device = 0;
+	struct ds2_dap_params_s *ds2_ap_params_obj = NULL;
+	int32_t *modified_param = NULL;
+
+	if (dev_map_idx < 0 || dev_map_idx >= DS2_DEVICES_ALL) {
+		pr_err("%s: invalid dev map index %d\n", __func__, dev_map_idx);
+		rc = -EINVAL;
+		goto end;
+	}
+	cache_device = dev_map[dev_map_idx].cache_dev;
+
+	ds2_ap_params_obj = &ds2_dap_params[cache_device];
+	pr_debug("%s: cache dev %d, dev_map_idx %d\n", __func__,
+		 cache_device, dev_map_idx);
+	pr_debug("%s: endp - %pK %pK\n",  __func__,
+		 &ds2_dap_params[cache_device], ds2_ap_params_obj);
+
+	params_value = kzalloc(params_length, GFP_KERNEL);
+	if (!params_value) {
+		pr_err("%s: params memory alloc failed\n", __func__);
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	if (dev_map[dev_map_idx].port_id == DOLBY_INVALID_PORT_ID) {
+		pr_err("%s: invalid port\n", __func__);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if ((dev_map[dev_map_idx].copp_idx < 0) ||
+		(dev_map[dev_map_idx].copp_idx >= MAX_COPPS_PER_PORT)) {
+		pr_err("%s: Invalid copp_idx\n", __func__);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	update_params_value = params_value;
+	*update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
+	*update_params_value++ = DOLBY_PARAM_ID_INIT_ENDP;
+	*update_params_value++ = DOLBY_PARAM_INT_ENDP_LENGTH * sizeof(uint32_t);
+	*update_params_value++ = ds2_ap_params_obj->params_val[
+					ds2_dap_params_offset[endp_idx]];
+	pr_debug("%s: off %d, length %d\n", __func__,
+		 ds2_dap_params_offset[endp_idx],
+		 ds2_dap_params_length[endp_idx]);
+	pr_debug("%s: param 0x%x, param val %d\n", __func__,
+		 ds2_dap_params_id[endp_idx], ds2_ap_params_obj->
+		 params_val[ds2_dap_params_offset[endp_idx]]);
+	rc = adm_dolby_dap_send_params(dev_map[dev_map_idx].port_id,
+				       dev_map[dev_map_idx].copp_idx,
+				       (char *)params_value, params_length);
+	if (rc) {
+		pr_err("%s: send dolby params failed rc %d\n", __func__, rc);
+		rc = -EINVAL;
+	}
+	modified_param = ds2_ap_params_obj->dap_params_modified;
+	if (modified_param == NULL) {
+		pr_err("%s: modified param structure invalid\n",
+		       __func__);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (msm_ds2_dap_check_is_param_modified(modified_param, endp_idx, 0))
+		ds2_ap_params_obj->dap_params_modified[endp_idx] = 0x00010001;
+
+end:
+	kfree(params_value);
+	return rc;
+}
+
+static int msm_ds2_dap_send_cached_params(int dev_map_idx,
+					  int commit)
+{
+	int32_t *update_params_value = NULL, *params_value = NULL;
+	uint32_t idx, i, j, ret = 0;
+	uint32_t params_length = (TOTAL_LENGTH_DOLBY_PARAM +
+				(MAX_DS2_PARAMS - 1) *
+				DOLBY_PARAM_PAYLOAD_SIZE) *
+				sizeof(uint32_t);
+	int cache_device = 0;
+	struct ds2_dap_params_s *ds2_ap_params_obj = NULL;
+	int32_t *modified_param = NULL;
+
+	if (dev_map_idx < 0 || dev_map_idx >= DS2_DEVICES_ALL) {
+		pr_err("%s: invalid dev map index %d\n", __func__, dev_map_idx);
+		ret = -EINVAL;
+		goto end;
+	}
+	cache_device = dev_map[dev_map_idx].cache_dev;
+
+	/* Use off profile cache in only for soft bypass */
+	if (ds2_dap_params_states.dap_bypass_type == DAP_SOFT_BYPASS &&
+		ds2_dap_params_states.dap_bypass == true) {
+		pr_debug("%s: use bypass cache 0\n", __func__);
+		cache_device =  dev_map[0].cache_dev;
+	}
+
+	ds2_ap_params_obj = &ds2_dap_params[cache_device];
+	pr_debug("%s: cached param - %pK %pK, cache_device %d\n", __func__,
+		 &ds2_dap_params[cache_device], ds2_ap_params_obj,
+		 cache_device);
+	params_value = kzalloc(params_length, GFP_KERNEL);
+	if (!params_value) {
+		pr_err("%s: params memory alloc failed\n", __func__);
+		ret =  -ENOMEM;
+		goto end;
+	}
+
+	if (dev_map[dev_map_idx].port_id == DOLBY_INVALID_PORT_ID) {
+		pr_err("%s: invalid port id\n", __func__);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	if ((dev_map[dev_map_idx].copp_idx < 0) ||
+		(dev_map[dev_map_idx].copp_idx >= MAX_COPPS_PER_PORT)) {
+		pr_err("%s: Invalid copp_idx\n", __func__);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	update_params_value = params_value;
+	params_length = 0;
+	for (i = 0; i < (MAX_DS2_PARAMS-1); i++) {
+		/*get the pointer to the param modified array in the cache*/
+		modified_param = ds2_ap_params_obj->dap_params_modified;
+		if (modified_param == NULL) {
+			pr_err("%s: modified param structure invalid\n",
+			       __func__);
+			ret = -EINVAL;
+			goto end;
+		}
+		if (!msm_ds2_dap_check_is_param_modified(modified_param, i,
+							 commit))
+			continue;
+		*update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
+		*update_params_value++ = ds2_dap_params_id[i];
+		*update_params_value++ = ds2_dap_params_length[i] *
+						sizeof(uint32_t);
+		idx = ds2_dap_params_offset[i];
+		for (j = 0; j < ds2_dap_params_length[i]; j++) {
+			*update_params_value++ =
+					ds2_ap_params_obj->params_val[idx+j];
+			pr_debug("%s: id 0x%x,val %d\n", __func__,
+				 ds2_dap_params_id[i],
+				 ds2_ap_params_obj->params_val[idx+j]);
+		}
+		params_length += (DOLBY_PARAM_PAYLOAD_SIZE +
+				ds2_dap_params_length[i]) * sizeof(uint32_t);
+	}
+
+	pr_debug("%s: valid param length: %d\n", __func__, params_length);
+	if (params_length) {
+		ret = adm_dolby_dap_send_params(dev_map[dev_map_idx].port_id,
+						dev_map[dev_map_idx].copp_idx,
+						(char *)params_value,
+						params_length);
+		if (ret) {
+			pr_err("%s: send dolby params failed ret %d\n",
+				__func__, ret);
+			ret = -EINVAL;
+			goto end;
+		}
+		for (i = 0; i < MAX_DS2_PARAMS-1; i++) {
+			/*get pointer to the param modified array in the cache*/
+			modified_param = ds2_ap_params_obj->dap_params_modified;
+			if (modified_param == NULL) {
+				pr_err("%s: modified param struct invalid\n",
+					__func__);
+				ret = -EINVAL;
+				goto end;
+			}
+			if (!msm_ds2_dap_check_is_param_modified(
+					modified_param, i, commit))
+				continue;
+			ds2_ap_params_obj->dap_params_modified[i] = 0x00010001;
+		}
+	}
+end:
+	kfree(params_value);
+	return ret;
+}
+
+static int msm_ds2_dap_commit_params(struct dolby_param_data *dolby_data,
+				 int commit)
+{
+	int ret = 0, i, idx;
+	struct ds2_dap_params_s *ds2_ap_params_obj =  NULL;
+	int32_t *modified_param = NULL;
+
+	/* Do not commit params if in hard bypass */
+	if (ds2_dap_params_states.dap_bypass_type == DAP_HARD_BYPASS &&
+		ds2_dap_params_states.dap_bypass == true) {
+		pr_debug("%s: called in bypass", __func__);
+		ret = -EINVAL;
+		goto end;
+	}
+	for (idx = 0; idx < MAX_DS2_PARAMS; idx++) {
+		if (DOLBY_PARAM_ID_INIT_ENDP == ds2_dap_params_id[idx])
+			break;
+	}
+	if (idx >= MAX_DS2_PARAMS || idx < 0) {
+		pr_err("%s: index of DS2 Param not found idx %d\n",
+			__func__, idx);
+		ret = -EINVAL;
+		goto end;
+	}
+	pr_debug("%s: found endp - idx %d 0x%x\n", __func__, idx,
+		ds2_dap_params_id[idx]);
+	for (i = 0; i < DS2_DEVICES_ALL; i++) {
+		pr_debug("%s:dev[0x%x,0x%x],i:%d,active:%d,bypass:%d,type:%d\n",
+			__func__, dolby_data->device_id, dev_map[i].device_id,
+			i, dev_map[i].active, ds2_dap_params_states.dap_bypass,
+			ds2_dap_params_states.dap_bypass_type);
+
+		if (((dev_map[i].device_id & ds2_dap_params_states.device) ||
+			((ds2_dap_params_states.dap_bypass_type ==
+			DAP_SOFT_BYPASS) &&
+			(ds2_dap_params_states.dap_bypass == true))) &&
+			(dev_map[i].active == true)) {
+
+			/*get ptr to the cache storing the params for device*/
+			if ((ds2_dap_params_states.dap_bypass_type ==
+				DAP_SOFT_BYPASS) &&
+				(ds2_dap_params_states.dap_bypass == true))
+				ds2_ap_params_obj =
+					&ds2_dap_params[dev_map[0].cache_dev];
+			else
+				ds2_ap_params_obj =
+					&ds2_dap_params[dev_map[i].cache_dev];
+
+			/*get the pointer to the param modified array in cache*/
+			modified_param = ds2_ap_params_obj->dap_params_modified;
+			if (modified_param == NULL) {
+				pr_err("%s: modified_param NULL\n", __func__);
+				ret = -EINVAL;
+				goto end;
+			}
+
+			/*
+			 * Send the endp param if use cache is set
+			 * or if param is modified
+			 */
+			if (!commit || msm_ds2_dap_check_is_param_modified(
+					modified_param, idx, commit)) {
+				msm_ds2_dap_send_end_point(i, idx);
+				commit = 0;
+			}
+			ret = msm_ds2_dap_send_cached_params(i, commit);
+			if (ret < 0) {
+				pr_err("%s: send cached param %d\n",
+					__func__, ret);
+				goto end;
+			}
+		}
+	}
+end:
+	return ret;
+}
+
+static int msm_ds2_dap_handle_commands(u32 cmd, void *arg)
+{
+	int ret  = 0, port_id = 0;
+	int32_t data;
+	struct dolby_param_data *dolby_data = (struct dolby_param_data *)arg;
+	if (get_user(data, &dolby_data->data[0])) {
+		pr_debug("%s error getting data\n", __func__);
+		ret = -EFAULT;
+		goto end;
+	}
+
+	pr_debug("%s: param_id %d,be_id %d,device_id 0x%x,length %d,data %d\n",
+		 __func__, dolby_data->param_id, dolby_data->be_id,
+		dolby_data->device_id, dolby_data->length, data);
+
+	switch (dolby_data->param_id) {
+	case DAP_CMD_COMMIT_ALL:
+		msm_ds2_dap_commit_params(dolby_data, 0);
+	break;
+
+	case DAP_CMD_COMMIT_CHANGED:
+		msm_ds2_dap_commit_params(dolby_data, 1);
+	break;
+
+	case DAP_CMD_USE_CACHE_FOR_INIT:
+		ds2_dap_params_states.use_cache = data;
+	break;
+
+	case DAP_CMD_SET_BYPASS:
+		pr_debug("%s: bypass %d bypass type %d, data %d\n", __func__,
+			 ds2_dap_params_states.dap_bypass,
+			 ds2_dap_params_states.dap_bypass_type,
+			 data);
+		/* Do not perform bypass operation if bypass state is same*/
+		if (ds2_dap_params_states.dap_bypass == data)
+			break;
+		ds2_dap_params_states.dap_bypass = data;
+		/* hard bypass */
+		if (ds2_dap_params_states.dap_bypass_type == DAP_HARD_BYPASS)
+			msm_ds2_dap_handle_bypass(dolby_data);
+		/* soft bypass */
+		msm_ds2_dap_commit_params(dolby_data, 0);
+	break;
+
+	case DAP_CMD_SET_BYPASS_TYPE:
+		if (data == true)
+			ds2_dap_params_states.dap_bypass_type =
+				DAP_HARD_BYPASS;
+		else
+			ds2_dap_params_states.dap_bypass_type =
+				DAP_SOFT_BYPASS;
+		pr_debug("%s: bypass type %d", __func__,
+			 ds2_dap_params_states.dap_bypass_type);
+	break;
+
+	case DAP_CMD_SET_ACTIVE_DEVICE:
+		pr_debug("%s: DAP_CMD_SET_ACTIVE_DEVICE length %d\n",
+			__func__, dolby_data->length);
+		/* TODO: need to handle multiple instance*/
+		ds2_dap_params_states.device |= dolby_data->device_id;
+		port_id = msm_ds2_dap_get_port_id(
+						  dolby_data->device_id,
+						  dolby_data->be_id);
+		pr_debug("%s: device id 0x%x all_dev 0x%x port_id %d\n",
+			__func__, dolby_data->device_id,
+			ds2_dap_params_states.device, port_id);
+		msm_ds2_dap_update_dev_map_port_id(dolby_data->device_id,
+					   port_id);
+		if (port_id == DOLBY_INVALID_PORT_ID) {
+			pr_err("%s: invalid port id %d\n", __func__, port_id);
+			ret = -EINVAL;
+			goto end;
+		}
+	break;
+	}
+end:
+	return ret;
+
+}
+
+static int msm_ds2_dap_set_param(u32 cmd, void *arg)
+{
+	int rc = 0, idx, i, j, off, port_id = 0, cdev = 0;
+	int32_t num_device = 0;
+	int32_t data = 0;
+	int32_t dev_arr[DS2_DSP_SUPPORTED_ENDP_DEVICE] = {0};
+	struct dolby_param_data *dolby_data =  (struct dolby_param_data *)arg;
+
+	rc = msm_ds2_dap_update_num_devices(dolby_data, &num_device, dev_arr,
+				   DS2_DSP_SUPPORTED_ENDP_DEVICE);
+	if (num_device == 0 || rc < 0) {
+		pr_err("%s: num devices 0\n", __func__);
+		rc = -EINVAL;
+		goto end;
+	}
+	for (i = 0; i < num_device; i++) {
+		port_id = msm_ds2_dap_get_port_id(dev_arr[i],
+						  dolby_data->be_id);
+		if (port_id != DOLBY_INVALID_PORT_ID)
+			msm_ds2_dap_update_dev_map_port_id(dev_arr[i], port_id);
+
+		cdev = msm_ds2_dap_map_device_to_dolby_cache_devices(
+							  dev_arr[i]);
+		if (cdev < 0 || cdev >= DOLBY_MAX_CACHE) {
+			pr_err("%s: Invalide cache device %d for device 0x%x\n",
+				__func__, cdev, dev_arr[i]);
+			rc = -EINVAL;
+			goto end;
+		}
+		pr_debug("%s:port:%d,be:%d,dev:0x%x,cdev:%d,param:0x%x,len:%d\n"
+			 , __func__, port_id, dolby_data->be_id, dev_arr[i],
+			 cdev, dolby_data->param_id, dolby_data->length);
+		for (idx = 0; idx < MAX_DS2_PARAMS; idx++) {
+			/*paramid from user space*/
+			if (dolby_data->param_id == ds2_dap_params_id[idx])
+				break;
+		}
+		if (idx > MAX_DS2_PARAMS-1) {
+			pr_err("%s: invalid param id 0x%x at idx %d\n",
+				__func__, dolby_data->param_id, idx);
+			rc = -EINVAL;
+			goto end;
+		}
+
+		off = ds2_dap_params_offset[idx];
+		if ((dolby_data->length <= 0) ||
+			(dolby_data->length > TOTAL_LENGTH_DS2_PARAM - off)) {
+			pr_err("%s: invalid length %d at idx %d\n",
+				__func__, dolby_data->length, idx);
+			rc = -EINVAL;
+			goto end;
+		}
+
+		/* cache the parameters */
+		ds2_dap_params[cdev].dap_params_modified[idx] += 1;
+		for (j = 0; j <  dolby_data->length; j++) {
+			if (get_user(data, &dolby_data->data[j])) {
+				pr_debug("%s:error getting data\n", __func__);
+				rc = -EFAULT;
+				goto end;
+			}
+			ds2_dap_params[cdev].params_val[off + j] = data;
+				pr_debug("%s:off %d,val[i/p:o/p]-[%d / %d]\n",
+					 __func__, off, data,
+					 ds2_dap_params[cdev].
+					 params_val[off + j]);
+		}
+	}
+end:
+	return rc;
+}
+
+static int msm_ds2_dap_get_param(u32 cmd, void *arg)
+{
+	int rc = 0, i, port_id = 0, copp_idx = -1;
+	struct dolby_param_data *dolby_data = (struct dolby_param_data *)arg;
+	int32_t *update_params_value = NULL, *params_value = NULL;
+	uint32_t params_length = DOLBY_MAX_LENGTH_INDIVIDUAL_PARAM *
+					sizeof(uint32_t);
+	uint32_t param_payload_len =
+			DOLBY_PARAM_PAYLOAD_SIZE * sizeof(uint32_t);
+
+	/* Return error on get param in soft or hard bypass */
+	if (ds2_dap_params_states.dap_bypass == true) {
+		pr_err("%s: called in bypass_type %d bypass %d\n", __func__,
+			ds2_dap_params_states.dap_bypass_type,
+			ds2_dap_params_states.dap_bypass);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* Return if invalid length */
+	if ((dolby_data->length >
+	      (DOLBY_MAX_LENGTH_INDIVIDUAL_PARAM - DOLBY_PARAM_PAYLOAD_SIZE)) ||
+	      (dolby_data->length <= 0)) {
+		pr_err("Invalid length %d", dolby_data->length);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	for (i = 0; i < DS2_DEVICES_ALL; i++) {
+		if ((dev_map[i].active) &&
+			(dev_map[i].device_id & dolby_data->device_id)) {
+			port_id = dev_map[i].port_id;
+			copp_idx = dev_map[i].copp_idx;
+			break;
+		}
+	}
+
+	if (port_id == DOLBY_INVALID_PORT_ID) {
+		pr_err("%s: Invalid port\n", __func__);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if ((copp_idx < 0) || (copp_idx >= MAX_COPPS_PER_PORT)) {
+		pr_err("%s: Invalid copp_idx\n", __func__);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	pr_debug("%s: port_id 0x%x, copp_idx %d, dev_map[i].device_id %x\n",
+		 __func__, port_id, copp_idx, dev_map[i].device_id);
+
+	params_value = kzalloc(params_length + param_payload_len,
+				GFP_KERNEL);
+	if (!params_value) {
+		pr_err("%s: params memory alloc failed\n", __func__);
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	if (dolby_data->param_id == DOLBY_PARAM_ID_VER) {
+		rc = adm_get_params(port_id, copp_idx,
+				    DOLBY_BUNDLE_MODULE_ID,
+				    DOLBY_PARAM_ID_VER,
+				    params_length + param_payload_len,
+				    (char *)params_value);
+	} else {
+		for (i = 0; i < MAX_DS2_PARAMS; i++)
+			if (ds2_dap_params_id[i] ==
+				dolby_data->param_id)
+				break;
+		if (i > MAX_DS2_PARAMS-1) {
+			pr_err("%s: invalid param id 0x%x at id %d\n", __func__,
+				dolby_data->param_id, i);
+			rc = -EINVAL;
+			goto end;
+		} else {
+			params_length =
+			ds2_dap_params_length[i] * sizeof(uint32_t);
+
+			rc = adm_get_params(port_id, copp_idx,
+					    DOLBY_BUNDLE_MODULE_ID,
+					    ds2_dap_params_id[i],
+					    params_length +
+					    param_payload_len,
+					    (char *)params_value);
+		}
+	}
+	if (rc) {
+		pr_err("%s: get parameters failed rc %d\n", __func__, rc);
+		rc = -EINVAL;
+		goto end;
+	}
+	update_params_value = params_value;
+	if (copy_to_user((void *)dolby_data->data,
+			&update_params_value[DOLBY_PARAM_PAYLOAD_SIZE],
+			(dolby_data->length * sizeof(uint32_t)))) {
+		pr_err("%s: error getting param\n", __func__);
+		rc = -EFAULT;
+		goto end;
+	}
+end:
+	kfree(params_value);
+	return rc;
+}
+
+static int msm_ds2_dap_param_visualizer_control_get(u32 cmd, void *arg)
+{
+	int32_t *visualizer_data = NULL;
+	int  i = 0, ret = 0, port_id = -1, cache_dev = -1, copp_idx = -1;
+	int32_t *update_visualizer_data = NULL;
+	struct dolby_param_data *dolby_data = (struct dolby_param_data *)arg;
+	uint32_t offset, length, params_length;
+	uint32_t param_payload_len =
+		DOLBY_PARAM_PAYLOAD_SIZE * sizeof(uint32_t);
+
+	for (i = 0; i < DS2_DEVICES_ALL; i++) {
+		if ((dev_map[i].active))  {
+			port_id = dev_map[i].port_id;
+			cache_dev = dev_map[i].cache_dev;
+			copp_idx = dev_map[i].copp_idx;
+			break;
+		}
+	}
+
+	if (port_id == DOLBY_INVALID_PORT_ID ||
+		(copp_idx < 0) || (copp_idx >= MAX_COPPS_PER_PORT)) {
+		ret = 0;
+		dolby_data->length = 0;
+		pr_err("%s: no device active\n", __func__);
+		goto end;
+	}
+
+	length = ds2_dap_params[cache_dev].params_val[DOLBY_PARAM_VCNB_OFFSET];
+
+	if (length > DOLBY_PARAM_VCNB_MAX_LENGTH || length <= 0) {
+		ret = 0;
+		dolby_data->length = 0;
+		pr_err("%s Incorrect VCNB length", __func__);
+		return -EINVAL;
+	}
+
+	params_length = (2*length + DOLBY_VIS_PARAM_HEADER_SIZE) *
+							 sizeof(uint32_t);
+
+	visualizer_data = kzalloc(params_length, GFP_KERNEL);
+	if (!visualizer_data) {
+		pr_err("%s: params memory alloc failed\n", __func__);
+		ret = -ENOMEM;
+		dolby_data->length = 0;
+		goto end;
+	}
+	memset(visualizer_data, 0x0, params_length);
+
+	/* Return error on get param in soft or hard bypass */
+	if (ds2_dap_params_states.dap_bypass == true) {
+		pr_debug("%s: visualizer called in bypass, return 0\n",
+			 __func__);
+		ret = 0;
+		dolby_data->length = 0;
+		goto end;
+	}
+
+	offset = 0;
+	params_length = length * sizeof(uint32_t);
+	ret = adm_get_params(port_id, copp_idx,
+			    DOLBY_BUNDLE_MODULE_ID,
+			    DOLBY_PARAM_ID_VCBG,
+			    params_length + param_payload_len,
+			    (((char *)(visualizer_data)) + offset));
+	if (ret) {
+		pr_err("%s: get parameters failed ret %d\n", __func__, ret);
+		ret = -EINVAL;
+		dolby_data->length = 0;
+		goto end;
+	}
+	offset = length * sizeof(uint32_t);
+	ret = adm_get_params(port_id, copp_idx,
+			    DOLBY_BUNDLE_MODULE_ID,
+			    DOLBY_PARAM_ID_VCBE,
+			    params_length + param_payload_len,
+			    (((char *)(visualizer_data)) + offset));
+	if (ret) {
+		pr_err("%s: get parameters failed ret %d\n", __func__, ret);
+		ret = -EINVAL;
+		dolby_data->length = 0;
+		goto end;
+	}
+	update_visualizer_data = visualizer_data;
+	dolby_data->length = 2 * length;
+
+	if (copy_to_user((void *)dolby_data->data,
+			(void *)update_visualizer_data,
+			(dolby_data->length * sizeof(uint32_t)))) {
+		pr_err("%s: copy to user failed for data\n", __func__);
+		dolby_data->length = 0;
+		ret = -EFAULT;
+		goto end;
+	}
+
+end:
+	kfree(visualizer_data);
+	return ret;
+}
+
+int msm_ds2_dap_set_security_control(u32 cmd, void *arg)
+{
+	struct dolby_param_license *dolby_license =
+				 ((struct dolby_param_license *)arg);
+	pr_debug("%s: dmid %d license key %d\n", __func__,
+		dolby_license->dmid, dolby_license->license_key);
+	core_set_dolby_manufacturer_id(dolby_license->dmid);
+	core_set_license(dolby_license->license_key, DOLBY_DS1_LICENSE_ID);
+	return 0;
+}
+
+int msm_ds2_dap_update_port_parameters(struct snd_hwdep *hw,  struct file *file,
+				       bool open)
+{
+	int  i = 0, dev_id = 0;
+	pr_debug("%s: open %d\n", __func__, open);
+	ds2_dap_params_states.node_opened = open;
+	ds2_dap_params_states.dap_bypass = true;
+	ds2_dap_params_states.dap_bypass_type = 0;
+	ds2_dap_params_states.use_cache = 0;
+	ds2_dap_params_states.device = 0;
+	ds2_dap_params_states.custom_stereo_onoff = 0;
+	for (i = 0; i < DS2_DEVICES_ALL; i++) {
+		if (i == 0)
+			dev_map[i].device_id = 0;
+		else {
+			dev_id = (1 << (i-1));
+			if (all_supported_devices & dev_id)
+				dev_map[i].device_id = dev_id;
+			else
+				continue;
+		}
+		dev_map[i].cache_dev =
+			msm_ds2_dap_map_device_to_dolby_cache_devices(
+				    dev_map[i].device_id);
+		if (dev_map[i].cache_dev < 0 ||
+				dev_map[i].cache_dev >= DOLBY_MAX_CACHE)
+			pr_err("%s: Invalide cache device %d for device 0x%x\n",
+						__func__,
+						dev_map[i].cache_dev,
+						dev_map[i].device_id);
+		dev_map[i].port_id = -1;
+		dev_map[i].active = false;
+		dev_map[i].stream_ref_count = 0;
+		dev_map[i].cal_data = NULL;
+		dev_map[i].copp_idx = -1;
+		pr_debug("%s: device_id 0x%x, cache_dev %d act  %d\n", __func__,
+			 dev_map[i].device_id, dev_map[i].cache_dev,
+			 dev_map[i].active);
+	}
+	return 0;
+
+}
+
+int msm_ds2_dap_ioctl_shared(struct snd_hwdep *hw, struct file *file,
+			     u32 cmd, void *arg)
+{
+	int ret = 0;
+	pr_debug("%s: cmd: 0x%x\n", __func__, cmd);
+	switch (cmd) {
+	case SNDRV_DEVDEP_DAP_IOCTL_SET_PARAM:
+		ret = msm_ds2_dap_set_param(cmd, arg);
+	break;
+	case SNDRV_DEVDEP_DAP_IOCTL_GET_PARAM:
+		ret = msm_ds2_dap_get_param(cmd, arg);
+	break;
+	case SNDRV_DEVDEP_DAP_IOCTL_DAP_COMMAND:
+		ret = msm_ds2_dap_handle_commands(cmd, arg);
+	break;
+	case SNDRV_DEVDEP_DAP_IOCTL_DAP_LICENSE:
+		ret = msm_ds2_dap_set_security_control(cmd, arg);
+	break;
+	case SNDRV_DEVDEP_DAP_IOCTL_GET_VISUALIZER:
+		ret = msm_ds2_dap_param_visualizer_control_get(cmd, arg);
+	break;
+	default:
+		pr_err("%s: called with invalid control 0x%x\n", __func__, cmd);
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+int msm_ds2_dap_ioctl(struct snd_hwdep *hw, struct file *file,
+		      u32 cmd, void *arg)
+{
+
+	int ret = 0;
+	pr_debug("%s: cmd: 0x%x\n", __func__, cmd);
+	if (!arg) {
+		pr_err("%s: Invalid params event status\n", __func__);
+		ret = -EINVAL;
+		goto end;
+	}
+	switch (cmd) {
+	case SNDRV_DEVDEP_DAP_IOCTL_SET_PARAM:
+	case SNDRV_DEVDEP_DAP_IOCTL_DAP_COMMAND: {
+		struct dolby_param_data dolby_data;
+		if (copy_from_user((void *)&dolby_data, (void *)arg,
+				sizeof(struct dolby_param_data))) {
+			pr_err("%s: Copy from user failed\n", __func__);
+			ret =  -EFAULT;
+			goto end;
+		}
+		ret = msm_ds2_dap_ioctl_shared(hw, file, cmd, &dolby_data);
+		break;
+	}
+	case SNDRV_DEVDEP_DAP_IOCTL_DAP_LICENSE: {
+		struct dolby_param_license dolby_license;
+		if (copy_from_user((void *)&dolby_license, (void *)arg,
+				sizeof(struct dolby_param_license))) {
+			pr_err("%s: Copy from user failed\n", __func__);
+			ret = -EFAULT;
+			goto end;
+		}
+		ret = msm_ds2_dap_ioctl_shared(hw, file, cmd, &dolby_license);
+		break;
+	}
+	case SNDRV_DEVDEP_DAP_IOCTL_GET_PARAM:
+	case SNDRV_DEVDEP_DAP_IOCTL_GET_VISUALIZER: {
+		struct dolby_param_data dolby_data;
+		if (copy_from_user((void *)&dolby_data, (void *)arg,
+				sizeof(struct dolby_param_data))) {
+			pr_err("%s: Copy from user failed\n", __func__);
+			ret =  -EFAULT;
+			goto end;
+		}
+		ret = msm_ds2_dap_ioctl_shared(hw, file, cmd, &dolby_data);
+		if (ret < 0)
+			pr_err("%s: ioctl cmd %d returned err %d\n",
+				__func__, cmd, ret);
+		if (copy_to_user((void *)arg, &dolby_data,
+			sizeof(struct dolby_param_data))) {
+			pr_err("%s: Copy to user failed\n", __func__);
+			ret = -EFAULT;
+			goto end;
+		}
+		break;
+	}
+	default:
+		pr_err("%s: called with invalid control 0x%x\n", __func__, cmd);
+		ret = -EINVAL;
+	}
+end:
+	return ret;
+
+}
+#ifdef CONFIG_COMPAT
+int msm_ds2_dap_compat_ioctl(struct snd_hwdep *hw, struct file *file,
+			     u32 cmd, void *arg)
+{
+	int ret = 0;
+	pr_debug("%s: cmd: 0x%x\n", __func__, cmd);
+	switch (cmd) {
+	case SNDRV_DEVDEP_DAP_IOCTL_SET_PARAM32:
+		cmd = SNDRV_DEVDEP_DAP_IOCTL_SET_PARAM;
+		goto handle_set_ioctl;
+	case SNDRV_DEVDEP_DAP_IOCTL_DAP_COMMAND32:
+		cmd = SNDRV_DEVDEP_DAP_IOCTL_DAP_COMMAND;
+handle_set_ioctl:
+	{
+		struct dolby_param_data32 dolby_data32;
+		struct dolby_param_data dolby_data;
+		memset(&dolby_data32, 0, sizeof(dolby_data32));
+		memset(&dolby_data, 0, sizeof(dolby_data));
+		if (copy_from_user(&dolby_data32, (void *)arg,
+				sizeof(struct dolby_param_data32))) {
+			pr_err("%s: Copy from user failed\n", __func__);
+			ret =  -EFAULT;
+			goto end;
+		}
+		dolby_data.version = dolby_data32.version;
+		dolby_data.device_id = dolby_data32.device_id;
+		dolby_data.be_id = dolby_data32.be_id;
+		dolby_data.param_id = dolby_data32.param_id;
+		dolby_data.length = dolby_data32.length;
+		dolby_data.data = compat_ptr(dolby_data32.data);
+
+		ret = msm_ds2_dap_ioctl_shared(hw, file, cmd, &dolby_data);
+		break;
+	}
+	case SNDRV_DEVDEP_DAP_IOCTL_GET_PARAM32:
+		cmd = SNDRV_DEVDEP_DAP_IOCTL_GET_PARAM;
+		goto handle_get_ioctl;
+	case SNDRV_DEVDEP_DAP_IOCTL_GET_VISUALIZER32:
+		cmd = SNDRV_DEVDEP_DAP_IOCTL_GET_VISUALIZER;
+handle_get_ioctl:
+	{
+		struct dolby_param_data32 dolby_data32;
+		struct dolby_param_data dolby_data;
+		memset(&dolby_data32, 0, sizeof(dolby_data32));
+		memset(&dolby_data, 0, sizeof(dolby_data));
+		if (copy_from_user(&dolby_data32, (void *)arg,
+				sizeof(struct dolby_param_data32))) {
+			pr_err("%s: Copy from user failed\n", __func__);
+			ret =  -EFAULT;
+			goto end;
+		}
+		dolby_data.version = dolby_data32.version;
+		dolby_data.device_id = dolby_data32.device_id;
+		dolby_data.be_id = dolby_data32.be_id;
+		dolby_data.param_id = dolby_data32.param_id;
+		dolby_data.length = dolby_data32.length;
+		dolby_data.data = compat_ptr(dolby_data32.data);
+
+		ret = msm_ds2_dap_ioctl_shared(hw, file, cmd, &dolby_data);
+		if (ret < 0)
+			pr_err("%s: ioctl cmd %d, returned err %d\n",
+				__func__, cmd, ret);
+		dolby_data32.length = dolby_data.length;
+		if (copy_to_user((void *)arg, &dolby_data32,
+			sizeof(struct dolby_param_data32))) {
+			pr_err("%s: Copy to user failed\n", __func__);
+			ret = -EFAULT;
+			goto end;
+		}
+		break;
+	}
+	case SNDRV_DEVDEP_DAP_IOCTL_DAP_LICENSE32: {
+		struct dolby_param_license32 dolby_license32;
+		struct dolby_param_license dolby_license;
+		cmd = SNDRV_DEVDEP_DAP_IOCTL_DAP_LICENSE;
+		if (copy_from_user((void *)&dolby_license32, (void *)arg,
+			sizeof(struct dolby_param_license32))) {
+			pr_err("%s: Copy from user failed\n", __func__);
+			ret = -EFAULT;
+			goto end;
+		}
+		dolby_license.dmid = dolby_license32.dmid;
+		dolby_license.license_key = dolby_license32.license_key;
+		ret = msm_ds2_dap_ioctl_shared(hw, file, cmd, &dolby_license);
+		break;
+	}
+	default:
+		pr_err("%s: called with invalid control 0x%x\n",
+			__func__, cmd);
+		ret = -EINVAL;
+	}
+end:
+	return ret;
+
+}
+#endif
+
+int msm_ds2_dap_init(int port_id, int copp_idx, int channels,
+		     bool is_custom_stereo_on)
+{
+	int ret = 0, idx = -1, i;
+	struct dolby_param_data dolby_data;
+
+	struct audproc_softvolume_params softvol = {
+		.period = DOLBY_SOFT_VOLUME_PERIOD,
+		.step = DOLBY_SOFT_VOLUME_STEP,
+		.rampingcurve = DOLBY_SOFT_VOLUME_CURVE_EXP,
+	};
+
+	pr_debug("%s: port id  %d, copp_idx %d\n", __func__, port_id, copp_idx);
+
+	if (port_id != DOLBY_INVALID_PORT_ID) {
+		for (i = 0; i < DS2_DEVICES_ALL; i++) {
+			if ((dev_map[i].port_id == port_id) &&
+				/* device part of active device */
+				(dev_map[i].device_id &
+				ds2_dap_params_states.device)) {
+				idx = i;
+				/* Give priority to headset in case of
+				   combo device */
+				if (dev_map[i].device_id == SPEAKER)
+					continue;
+				else
+					break;
+			}
+		}
+		if (idx < 0) {
+			pr_err("%s: invalid index for port %d\n",
+				__func__, port_id);
+			ret = -EINVAL;
+			goto end;
+		}
+		pr_debug("%s:index %d, dev[0x%x,0x%x]\n", __func__, idx,
+			 dev_map[idx].device_id, ds2_dap_params_states.device);
+		dev_map[idx].active = true;
+		dev_map[idx].copp_idx = copp_idx;
+		dolby_data.param_id = DOLBY_COMMIT_ALL_TO_DSP;
+		dolby_data.length = 0;
+		dolby_data.data = NULL;
+		dolby_data.device_id = dev_map[idx].device_id;
+		pr_debug("%s:  idx  %d, active %d, dev id 0x%x, ref count %d\n",
+			 __func__, idx, dev_map[idx].active,
+			 dev_map[idx].device_id,
+			 dev_map[idx].stream_ref_count);
+		if (dev_map[idx].stream_ref_count == 0) {
+			/*perform next 3 func only if hard bypass enabled*/
+			if (ds2_dap_params_states.dap_bypass_type ==
+				DAP_HARD_BYPASS) {
+				ret = msm_ds2_dap_alloc_and_store_cal_data(idx,
+						       ADM_PATH_PLAYBACK, 0);
+				if (ret < 0) {
+					pr_err("%s: Failed to alloc and store cal data for idx %d, device %d, copp_idx %d",
+					       __func__,
+					       idx, dev_map[idx].device_id,
+					       dev_map[idx].copp_idx);
+					dev_map[idx].active = false;
+					dev_map[idx].copp_idx = -1;
+					goto end;
+				}
+
+				ret = adm_set_softvolume(port_id, copp_idx,
+							 &softvol);
+				if (ret < 0) {
+					pr_err("%s: Soft volume ret error %d\n",
+						__func__, ret);
+					dev_map[idx].active = false;
+					dev_map[idx].copp_idx = -1;
+					goto end;
+				}
+
+				ret = msm_ds2_dap_init_modules_in_topology(
+							idx);
+				if (ret < 0) {
+					pr_err("%s: Failed to init modules in topolofy for idx %d, device %d, copp_idx %d\n",
+					       __func__, idx,
+					       dev_map[idx].device_id,
+					       dev_map[idx].copp_idx);
+					dev_map[idx].active = false;
+					dev_map[idx].copp_idx = -1;
+					goto end;
+				}
+			}
+
+			ret =  msm_ds2_dap_commit_params(&dolby_data, 0);
+			if (ret < 0) {
+				pr_debug("%s: commit params ret %d\n",
+					__func__, ret);
+				ret = 0;
+			}
+		}
+		dev_map[idx].stream_ref_count++;
+		if (is_custom_stereo_on) {
+			ds2_dap_params_states.custom_stereo_onoff =
+				is_custom_stereo_on;
+			set_custom_stereo_onoff(idx,
+						is_custom_stereo_on);
+		}
+	}
+
+end:
+	return ret;
+}
+
+void msm_ds2_dap_deinit(int port_id)
+{
+	/*
+	 * Get the active port corrresponding to the active device
+	 * Check if this is same as incoming port
+	 * Set it to invalid
+	 */
+	int idx = -1, i;
+	pr_debug("%s: port_id %d\n", __func__, port_id);
+	if (port_id != DOLBY_INVALID_PORT_ID) {
+		for (i = 0; i < DS2_DEVICES_ALL; i++) {
+			/* Active port */
+			if ((dev_map[i].port_id == port_id) &&
+				/* device part of active device */
+				(dev_map[i].device_id &
+				ds2_dap_params_states.device) &&
+				/*
+				 * Need this check to avoid race condition of
+				 * active device being set and playback
+				 * instance opened
+				 */
+				/* active device*/
+				dev_map[i].active) {
+				idx = i;
+				if (dev_map[i].device_id == SPEAKER)
+					continue;
+				else
+					break;
+			}
+		}
+		if (idx < 0) {
+			pr_err("%s: invalid index for port %d\n",
+				__func__, port_id);
+			return;
+		}
+		pr_debug("%s:index %d, dev [0x%x, 0x%x]\n", __func__, idx,
+			 dev_map[idx].device_id, ds2_dap_params_states.device);
+		dev_map[idx].stream_ref_count--;
+		if (dev_map[idx].stream_ref_count == 0) {
+			/*perform next func only if hard bypass enabled*/
+			if (ds2_dap_params_states.dap_bypass_type ==
+				DAP_HARD_BYPASS) {
+				msm_ds2_dap_free_cal_data(idx);
+			}
+			ds2_dap_params_states.device &= ~dev_map[idx].device_id;
+			dev_map[idx].active = false;
+			dev_map[idx].copp_idx = -1;
+		}
+		pr_debug("%s:idx  %d, active %d, dev id 0x%x ref count %d\n",
+			 __func__, idx, dev_map[idx].active,
+			 dev_map[idx].device_id, dev_map[idx].stream_ref_count);
+	}
+}
+
+int msm_ds2_dap_set_custom_stereo_onoff(int port_id, int copp_idx,
+					bool is_custom_stereo_enabled)
+{
+	int idx = -1, rc = 0, i;
+	pr_debug("%s: port_id %d\n", __func__, port_id);
+	if (port_id != DOLBY_INVALID_PORT_ID) {
+		for (i = 0; i < DS2_DEVICES_ALL; i++) {
+			if ((dev_map[i].port_id == port_id) &&
+				/* device part of active device */
+				(dev_map[i].device_id &
+				ds2_dap_params_states.device)) {
+				idx = i;
+				if (dev_map[i].device_id == SPEAKER)
+					continue;
+				else
+					break;
+			}
+		}
+		if (idx < 0) {
+			pr_err("%s: invalid index for port %d\n",
+				__func__, port_id);
+			return rc;
+		}
+		ds2_dap_params_states.custom_stereo_onoff =
+			is_custom_stereo_enabled;
+		rc = set_custom_stereo_onoff(idx,
+					is_custom_stereo_enabled);
+		if (rc < 0) {
+			pr_err("%s: Custom stereo err %d on port %d\n",
+				__func__, rc, port_id);
+		}
+	}
+	return rc;
+}
+
+#else
+
+static int msm_ds2_dap_alloc_and_store_cal_data(int dev_map_idx, int path,
+					    int perf_mode)
+{
+	return 0;
+}
+
+static int msm_ds2_dap_free_cal_data(int dev_map_idx)
+{
+	return 0;
+}
+
+static int msm_ds2_dap_send_cal_data(int dev_map_idx)
+{
+	return 0;
+}
+
+static int msm_ds2_dap_can_enable_module(int32_t module_id)
+{
+	return 0;
+}
+
+static int msm_ds2_dap_init_modules_in_topology(int dev_map_idx)
+{
+	return 0;
+}
+
+static bool msm_ds2_dap_check_is_param_modified(int32_t *dap_params_modified,
+				    int32_t idx, int32_t commit)
+{
+	return false;
+}
+
+
+static int msm_ds2_dap_map_device_to_dolby_cache_devices(int32_t device_id)
+{
+	return 0;
+}
+
+static int msm_ds2_dap_update_num_devices(struct dolby_param_data *dolby_data,
+				      int32_t *num_device, int32_t *dev_arr,
+				      int32_t array_size)
+{
+	return 0;
+}
+
+static int msm_ds2_dap_commit_params(struct dolby_param_data *dolby_data,
+				 int commit)
+{
+	return 0;
+}
+
+static int msm_ds2_dap_handle_commands(u32 cmd, void *arg)
+{
+	return 0;
+}
+
+static int msm_ds2_dap_set_param(u32 cmd, void *arg)
+{
+	return 0;
+}
+
+static int msm_ds2_dap_get_param(u32 cmd, void *arg)
+{
+	return 0;
+}
+
+static int msm_ds2_dap_send_end_point(int dev_map_idx, int endp_idx)
+{
+	return 0;
+}
+
+static int msm_ds2_dap_send_cached_params(int dev_map_idx,
+					  int commit)
+{
+	return 0;
+}
+
+static int msm_ds2_dap_set_vspe_vdhe(int dev_map_idx,
+				     bool is_custom_stereo_enabled)
+{
+	return 0;
+}
+
+static int msm_ds2_dap_param_visualizer_control_get(
+			u32 cmd, void *arg,
+			struct msm_pcm_routing_bdai_data *bedais)
+{
+	return 0;
+}
+
+static int msm_ds2_dap_set_security_control(u32 cmd, void *arg)
+{
+	return 0
+}
+
+static int msm_ds2_dap_update_dev_map_port_id(int32_t device_id, int port_id)
+{
+	return 0;
+}
+
+static int32_t msm_ds2_dap_get_port_id(
+		int32_t device_id, int32_t be_id)
+{
+	return 0;
+}
+
+static int msm_ds2_dap_handle_bypass(struct dolby_param_data *dolby_data)
+{
+	return 0;
+}
+
+static int msm_ds2_dap_handle_bypass_wait(int port_id, int copp_idx,
+					  int wait_time)
+{
+	return 0;
+}
+
+static int dap_set_custom_stereo_onoff(int dev_map_idx,
+					bool is_custom_stereo_enabled)
+{
+	return 0;
+}
+int qti_set_custom_stereo_on(int port_id, int copp_idx,
+			     bool is_custom_stereo_on)
+{
+	return 0;
+}
+int set_custom_stereo_onoff(int dev_map_idx,
+			    bool is_custom_stereo_enabled)
+{
+	return 0;
+}
+int msm_ds2_dap_ioctl_shared(struct snd_hwdep *hw, struct file *file,
+			     u32 cmd, void *arg)
+{
+	return 0;
+}
+#endif /* CONFIG_DOLBY_DS2 || CONFIG_DOLBY_LICENSE */
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-ds2-dap-config.h linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.h
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-ds2-dap-config.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.h	2019-10-29 09:26:26.149227702 +0100
@@ -0,0 +1,122 @@
+/* Copyright (c) 2013-2014, 2016, The Linux Foundation. All rights reserved.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_DS2_DAP_CONFIG_H_
+#define _MSM_DS2_DAP_CONFIG_H_
+
+#include <sound/soc.h>
+#include "msm-dolby-common.h"
+#include <sound/hwdep.h>
+#include <uapi/sound/devdep_params.h>
+
+#ifdef CONFIG_COMPAT
+struct dolby_param_data32 {
+	s32 version;
+	s32 device_id;
+	s32 be_id;
+	s32 param_id;
+	s32 length;
+	compat_uptr_t data;
+};
+
+struct dolby_param_license32 {
+	compat_uptr_t dmid;
+	compat_uptr_t license_key;
+};
+
+
+#define SNDRV_DEVDEP_DAP_IOCTL_SET_PARAM32\
+		_IOWR('U', 0x10, struct dolby_param_data32)
+#define SNDRV_DEVDEP_DAP_IOCTL_GET_PARAM32\
+		_IOR('U', 0x11, struct dolby_param_data32)
+#define SNDRV_DEVDEP_DAP_IOCTL_DAP_COMMAND32\
+		_IOWR('U', 0x13, struct dolby_param_data32)
+#define SNDRV_DEVDEP_DAP_IOCTL_DAP_LICENSE32\
+		_IOWR('U', 0x14, struct dolby_param_license32)
+#define SNDRV_DEVDEP_DAP_IOCTL_GET_VISUALIZER32\
+		_IOR('U', 0x15, struct dolby_param_data32)
+#endif
+
+#if defined(CONFIG_DOLBY_DS2) || defined(CONFIG_DOLBY_LICENSE)
+/* DOLBY DOLBY GUIDS */
+#define DS2_MODULE_ID			0x00010775
+
+#define DS2_DSP_SUPPORTED_ENDP_DEVICE		17
+#define DS2_DEVICES_ALL				32 /* enum val is 4 bytes */
+
+enum {
+
+	DAP_CMD_COMMIT_ALL         = 0,
+	DAP_CMD_COMMIT_CHANGED     = 1,
+	DAP_CMD_USE_CACHE_FOR_INIT = 2,
+	DAP_CMD_SET_BYPASS         = 3,
+	DAP_CMD_SET_ACTIVE_DEVICE  = 4,
+	DAP_CMD_SET_BYPASS_TYPE    = 5,
+};
+
+#define DOLBY_PARAM_INT_ENDP_LENGTH             1
+#define DOLBY_PARAM_INT_ENDP_OFFSET		(DOLBY_PARAM_PSTG_OFFSET + \
+							DOLBY_PARAM_PSTG_LENGTH)
+#define MAX_DS2_PARAMS				48
+#define MAX_DS2_CTRL_PARAMS			4
+#define ALL_DS2_PARAMS				(MAX_DS2_PARAMS + \
+							MAX_DS2_CTRL_PARAMS)
+#define TOTAL_LENGTH_DS2_PARAM (TOTAL_LENGTH_DOLBY_PARAM + 1)
+
+int msm_ds2_dap_update_port_parameters(struct snd_hwdep *hw,  struct file *file,
+				       bool open);
+int msm_ds2_dap_ioctl(struct snd_hwdep *hw, struct file *file,
+		      u32 cmd, void *arg);
+int msm_ds2_dap_compat_ioctl(struct snd_hwdep *hw,
+			     struct file *file,
+			     u32 cmd, void *arg);
+int msm_ds2_dap_init(int port_id, int copp_idx, int channels,
+		     bool is_custom_stereo_on);
+void msm_ds2_dap_deinit(int port_id);
+int msm_ds2_dap_set_custom_stereo_onoff(int port_id, int copp_idx,
+					bool is_custom_stereo_enabled);
+/* Dolby DOLBY end */
+#else
+
+static inline int msm_ds2_dap_update_port_parameters(struct snd_hwdep *hw,
+					       struct file *file,
+					       bool open)
+{
+	return 0;
+}
+
+static inline int msm_ds2_dap_ioctl(struct snd_hwdep *hw, struct file *file,
+				    u32 cmd, void *arg)
+{
+	return 0;
+}
+
+static inline int msm_ds2_dap_compat_ioctl(struct snd_hwdep *hw,
+					   struct file *file,
+					   u32 cmd, void *arg)
+{
+	return 0;
+}
+static inline int msm_ds2_dap_init(int port_id, int copp_idx, int channels,
+		     bool is_custom_stereo_on)
+{
+	return 0;
+}
+
+static inline void msm_ds2_dap_deinit(int port_id) { }
+
+static inline int msm_ds2_dap_set_custom_stereo_onoff(int port_id, int copp_idx,
+				    bool is_custom_stereo_enabled)
+{
+	return 0;
+}
+#endif
+#endif
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-dts-srs-tm-config.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-dts-srs-tm-config.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-dts-srs-tm-config.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-dts-srs-tm-config.c	2019-01-22 16:16:29.627301863 +0100
@@ -0,0 +1,354 @@
+/* Copyright (c) 2012-2014, 2016-2017, The Linux Foundation. All
+ * rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/mutex.h>
+#include <linux/atomic.h>
+#include <linux/msm_audio_ion.h>
+#include <sound/control.h>
+#include <sound/q6adm-v2.h>
+#include <sound/asound.h>
+#include "msm-dts-srs-tm-config.h"
+#include "msm-pcm-routing-v2.h"
+
+static int srs_port_id[AFE_MAX_PORTS] = {-1};
+static int srs_copp_idx[AFE_MAX_PORTS] = {-1};
+static union srs_trumedia_params_u msm_srs_trumedia_params;
+static struct ion_client *ion_client;
+static struct ion_handle *ion_handle;
+static struct param_outband po;
+static atomic_t ref_cnt;
+#define ION_MEM_SIZE	(8 * 1024)
+
+static int set_port_id(int port_id, int copp_idx)
+{
+	int index = adm_validate_and_get_port_index(port_id);
+	if (index < 0) {
+		pr_err("%s: Invalid port idx %d port_id %#x\n", __func__, index,
+			port_id);
+		return -EINVAL;
+	}
+	srs_port_id[index] = port_id;
+	srs_copp_idx[index] = copp_idx;
+	return 0;
+}
+
+static void msm_dts_srs_tm_send_params(__s32 port_id, __u32 techs)
+{
+	__s32 index = adm_validate_and_get_port_index(port_id);
+	if (index < 0) {
+		pr_err("%s: Invalid port idx %d port_id 0x%x\n",
+			__func__, index, port_id);
+		return;
+	}
+	if ((srs_copp_idx[index] < 0) ||
+	    (srs_copp_idx[index] >= MAX_COPPS_PER_PORT)) {
+		pr_debug("%s: send params called before copp open. so, caching\n",
+			 __func__);
+		return;
+	}
+	pr_debug("SRS %s: called, port_id = %d, techs flags = %u\n",
+		__func__, port_id, techs);
+	/* force all if techs is set to 1 */
+	if (techs == 1)
+		techs = 0xFFFFFFFF;
+
+	if (techs & (1 << SRS_ID_WOWHD))
+		srs_trumedia_open(port_id, srs_copp_idx[index], SRS_ID_WOWHD,
+			(void *)&msm_srs_trumedia_params.srs_params.wowhd);
+	if (techs & (1 << SRS_ID_CSHP))
+		srs_trumedia_open(port_id, srs_copp_idx[index], SRS_ID_CSHP,
+			(void *)&msm_srs_trumedia_params.srs_params.cshp);
+	if (techs & (1 << SRS_ID_HPF))
+		srs_trumedia_open(port_id, srs_copp_idx[index], SRS_ID_HPF,
+			(void *)&msm_srs_trumedia_params.srs_params.hpf);
+	if (techs & (1 << SRS_ID_AEQ))
+		srs_trumedia_open(port_id, srs_copp_idx[index], SRS_ID_AEQ,
+			(void *)&msm_srs_trumedia_params.srs_params.aeq);
+	if (techs & (1 << SRS_ID_HL))
+		srs_trumedia_open(port_id, srs_copp_idx[index], SRS_ID_HL,
+			(void *)&msm_srs_trumedia_params.srs_params.hl);
+	if (techs & (1 << SRS_ID_GEQ))
+		srs_trumedia_open(port_id, srs_copp_idx[index], SRS_ID_GEQ,
+			(void *)&msm_srs_trumedia_params.srs_params.geq);
+	if (techs & (1 << SRS_ID_GLOBAL))
+		srs_trumedia_open(port_id, srs_copp_idx[index], SRS_ID_GLOBAL,
+			(void *)&msm_srs_trumedia_params.srs_params.global);
+}
+
+
+static int msm_dts_srs_trumedia_control_get(struct snd_kcontrol *kcontrol,
+					    struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = 0;
+	return 0;
+}
+
+static int msm_dts_srs_trumedia_control_set_(int port_id,
+					    struct snd_kcontrol *kcontrol,
+					    struct snd_ctl_elem_value *ucontrol)
+{
+
+	__u16 offset, value, max = sizeof(msm_srs_trumedia_params) >> 1;
+	if (SRS_CMD_UPLOAD ==
+		(ucontrol->value.integer.value[0] & SRS_CMD_UPLOAD)) {
+		__u32 techs = ucontrol->value.integer.value[0] & 0xFF;
+		__s32 index = adm_validate_and_get_port_index(port_id);
+		if (index < 0) {
+			pr_err("%s: Invalid port idx %d port_id 0x%x\n",
+					__func__, index, port_id);
+				return -EINVAL;
+			}
+		pr_debug("SRS %s: send params request, flag = %u\n",
+					__func__, techs);
+		if (srs_port_id[index] >= 0 && techs)
+			msm_dts_srs_tm_send_params(port_id, techs);
+		return 0;
+	}
+	offset = (__u16)((ucontrol->value.integer.value[0] &
+			SRS_PARAM_OFFSET_MASK) >> 16);
+	value = (__u16)(ucontrol->value.integer.value[0] &
+			SRS_PARAM_VALUE_MASK);
+	if (offset < max) {
+		msm_srs_trumedia_params.raw_params[offset] = value;
+		pr_debug("SRS %s: index set... (max %d, requested %d, value 0x%X)\n",
+			 __func__, max, offset, value);
+	} else {
+		pr_err("SRS %s: index out of bounds! (max %d, requested %d)\n",
+		       __func__, max, offset);
+	}
+	return 0;
+}
+
+static int msm_dts_srs_trumedia_control_set(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	int ret, port_id;
+
+	pr_debug("SRS control normal called\n");
+	msm_pcm_routing_acquire_lock();
+	port_id = SLIMBUS_0_RX;
+	ret = msm_dts_srs_trumedia_control_set_(port_id, kcontrol, ucontrol);
+	msm_pcm_routing_release_lock();
+	return ret;
+}
+
+static int msm_dts_srs_trumedia_control_i2s_set(struct snd_kcontrol *kcontrol,
+					  struct snd_ctl_elem_value *ucontrol)
+{
+	int ret, port_id;
+
+	pr_debug("SRS control I2S called\n");
+	msm_pcm_routing_acquire_lock();
+	port_id = PRIMARY_I2S_RX;
+	ret = msm_dts_srs_trumedia_control_set_(port_id, kcontrol, ucontrol);
+	msm_pcm_routing_release_lock();
+	return ret;
+}
+
+static int msm_dts_srs_trumedia_control_mi2s_set(struct snd_kcontrol *kcontrol,
+					  struct snd_ctl_elem_value *ucontrol)
+{
+	int ret, port_id;
+
+	pr_debug("SRS control MI2S called\n");
+	msm_pcm_routing_acquire_lock();
+	port_id = AFE_PORT_ID_PRIMARY_MI2S_RX;
+	ret = msm_dts_srs_trumedia_control_set_(port_id, kcontrol, ucontrol);
+	msm_pcm_routing_release_lock();
+	return ret;
+}
+
+static int msm_dts_srs_trumedia_control_hdmi_set(struct snd_kcontrol *kcontrol,
+					   struct snd_ctl_elem_value *ucontrol)
+{
+	int ret, port_id;
+
+	pr_debug("SRS control HDMI called\n");
+	msm_pcm_routing_acquire_lock();
+	port_id = HDMI_RX;
+	ret = msm_dts_srs_trumedia_control_set_(port_id, kcontrol, ucontrol);
+	msm_pcm_routing_release_lock();
+	return ret;
+}
+
+static const struct snd_kcontrol_new lpa_srs_trumedia_controls[] = {
+	{.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.name = "SRS TruMedia",
+	.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |
+			SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.info = snd_soc_info_volsw,
+	.get = msm_dts_srs_trumedia_control_get,
+	.put = msm_dts_srs_trumedia_control_set,
+	.private_value = ((unsigned long)&(struct soc_mixer_control)
+	{.reg = SND_SOC_NOPM,
+	.rreg = SND_SOC_NOPM,
+	.shift = 0,
+	.rshift = 0,
+	.max = 0xFFFFFFFF,
+	.platform_max = 0xFFFFFFFF,
+	.invert = 0
+	})
+	}
+};
+
+static const struct snd_kcontrol_new lpa_srs_trumedia_controls_hdmi[] = {
+	{.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.name = "SRS TruMedia HDMI",
+	.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |
+			SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.info = snd_soc_info_volsw,
+	.get = msm_dts_srs_trumedia_control_get,
+	.put = msm_dts_srs_trumedia_control_hdmi_set,
+	.private_value = ((unsigned long)&(struct soc_mixer_control)
+	{.reg = SND_SOC_NOPM,
+	.rreg = SND_SOC_NOPM,
+	.shift = 0,
+	.rshift = 0,
+	.max = 0xFFFFFFFF,
+	.platform_max = 0xFFFFFFFF,
+	.invert = 0
+	})
+	}
+};
+
+static const struct snd_kcontrol_new lpa_srs_trumedia_controls_i2s[] = {
+	{.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.name = "SRS TruMedia I2S",
+	.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |
+		SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.info = snd_soc_info_volsw,
+	.get = msm_dts_srs_trumedia_control_get,
+	.put = msm_dts_srs_trumedia_control_i2s_set,
+	.private_value = ((unsigned long)&(struct soc_mixer_control)
+	{.reg = SND_SOC_NOPM,
+	.rreg = SND_SOC_NOPM,
+	.shift = 0,
+	.rshift = 0,
+	.max = 0xFFFFFFFF,
+	.platform_max = 0xFFFFFFFF,
+	.invert = 0
+	})
+	}
+};
+
+static const struct snd_kcontrol_new lpa_srs_trumedia_controls_mi2s[] = {
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "SRS TruMedia MI2S",
+		.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |
+			SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = snd_soc_info_volsw,
+		.get = msm_dts_srs_trumedia_control_get,
+		.put = msm_dts_srs_trumedia_control_mi2s_set,
+		.private_value = ((unsigned long)&(struct soc_mixer_control)
+		{
+			.reg = SND_SOC_NOPM,
+			.rreg = SND_SOC_NOPM,
+			.shift = 0,
+			.rshift = 0,
+			.max = 0xFFFFFFFF,
+			.platform_max = 0xFFFFFFFF,
+			.invert = 0
+		})
+	}
+};
+
+void msm_dts_srs_tm_add_controls(struct snd_soc_platform *platform)
+{
+	snd_soc_add_platform_controls(platform,
+				lpa_srs_trumedia_controls,
+			ARRAY_SIZE(lpa_srs_trumedia_controls));
+
+	snd_soc_add_platform_controls(platform,
+				lpa_srs_trumedia_controls_hdmi,
+			ARRAY_SIZE(lpa_srs_trumedia_controls_hdmi));
+
+	snd_soc_add_platform_controls(platform,
+				lpa_srs_trumedia_controls_i2s,
+			ARRAY_SIZE(lpa_srs_trumedia_controls_i2s));
+	snd_soc_add_platform_controls(platform,
+				lpa_srs_trumedia_controls_mi2s,
+			ARRAY_SIZE(lpa_srs_trumedia_controls_mi2s));
+}
+
+static int reg_ion_mem(void)
+{
+	int rc;
+	rc = msm_audio_ion_alloc("SRS_TRUMEDIA", &ion_client, &ion_handle,
+				 ION_MEM_SIZE, &po.paddr, (size_t *)&po.size,
+				 &po.kvaddr);
+	if (rc != 0)
+		pr_err("%s: failed to allocate memory.\n", __func__);
+		pr_debug("%s: exited ion_client = %pK, ion_handle = %pK, phys_addr = %lu, length = %d, vaddr = %pK, rc = 0x%x\n",
+			__func__, ion_client, ion_handle, (long)po.paddr,
+			(unsigned int)po.size, po.kvaddr, rc);
+	return rc;
+}
+
+void msm_dts_srs_tm_ion_memmap(struct param_outband *po_)
+{
+	if (po.kvaddr == NULL) {
+		pr_debug("%s: callingreg_ion_mem()\n", __func__);
+		reg_ion_mem();
+	}
+	po_->size = ION_MEM_SIZE;
+	po_->kvaddr = po.kvaddr;
+	po_->paddr = po.paddr;
+}
+
+static void unreg_ion_mem(void)
+{
+	msm_audio_ion_free(ion_client, ion_handle);
+	po.kvaddr = NULL;
+	po.paddr = 0;
+	po.size = 0;
+}
+
+void msm_dts_srs_tm_deinit(int port_id)
+{
+	set_port_id(port_id, -1);
+	atomic_dec(&ref_cnt);
+	if (po.kvaddr != NULL) {
+		if (!atomic_read(&ref_cnt)) {
+			pr_debug("%s: calling unreg_ion_mem()\n", __func__);
+			unreg_ion_mem();
+		}
+	}
+	return;
+}
+
+void msm_dts_srs_tm_init(int port_id, int copp_idx)
+{
+	int cur_ref_cnt = 0;
+
+	if (set_port_id(port_id, copp_idx) < 0) {
+		pr_err("%s: Invalid port_id: %d\n", __func__, port_id);
+		return;
+	}
+
+	cur_ref_cnt = atomic_read(&ref_cnt);
+	atomic_inc(&ref_cnt);
+	if (!cur_ref_cnt && po.kvaddr == NULL) {
+		pr_debug("%s: calling reg_ion_mem()\n", __func__);
+		if (reg_ion_mem() != 0) {
+			atomic_dec(&ref_cnt);
+			po.kvaddr = NULL;
+			return;
+		}
+	}
+	msm_dts_srs_tm_send_params(port_id, 1);
+	return;
+}
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-dts-srs-tm-config.h linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-dts-srs-tm-config.h
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-dts-srs-tm-config.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-dts-srs-tm-config.h	2019-01-22 16:16:29.627301863 +0100
@@ -0,0 +1,40 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_DTS_SRS_TM_CONFIG_H_
+#define _MSM_DTS_SRS_TM_CONFIG_H_
+
+#include <sound/soc.h>
+
+struct param_outband;
+
+#ifdef CONFIG_DTS_SRS_TM
+
+union srs_trumedia_params_u {
+	struct srs_trumedia_params srs_params;
+	__u16 raw_params[1];
+};
+
+void msm_dts_srs_tm_ion_memmap(struct param_outband *po_);
+void msm_dts_srs_tm_init(int port_id, int copp_idx);
+void msm_dts_srs_tm_deinit(int port_id);
+void msm_dts_srs_tm_add_controls(struct snd_soc_platform *platform);
+#else
+static inline void msm_dts_srs_tm_ion_memmap(struct param_outband *po_) { }
+static inline void msm_dts_srs_tm_init(int port_id, int copp_idx) { }
+static inline void msm_dts_srs_tm_deinit(int port_id) { }
+static inline void msm_dts_srs_tm_add_controls(
+					struct snd_soc_platform *platform) { }
+
+#endif
+
+#endif
+
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-lsm-client.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-lsm-client.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-lsm-client.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-lsm-client.c	2019-10-29 09:26:26.149227702 +0100
@@ -0,0 +1,2421 @@
+/*
+ * Copyright (c) 2013-2017, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/wait.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+#include <linux/msm_audio_ion.h>
+#include <linux/freezer.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/timer.h>
+#include <sound/initval.h>
+#include <sound/control.h>
+#include <sound/q6lsm.h>
+#include <sound/lsm_params.h>
+#include <sound/pcm_params.h>
+#include "msm-pcm-routing-v2.h"
+
+#define CAPTURE_MIN_NUM_PERIODS     2
+#define CAPTURE_MAX_NUM_PERIODS     8
+#define CAPTURE_MAX_PERIOD_SIZE     61440
+#define CAPTURE_MIN_PERIOD_SIZE     320
+#define LISTEN_MAX_STATUS_PAYLOAD_SIZE 256
+
+#define LAB_BUFFER_ALLOC 1
+#define LAB_BUFFER_DEALLOC 0
+
+static struct snd_pcm_hardware msm_pcm_hardware_capture = {
+	.info =                 (SNDRV_PCM_INFO_MMAP |
+				SNDRV_PCM_INFO_BLOCK_TRANSFER |
+				SNDRV_PCM_INFO_INTERLEAVED |
+				SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
+	.formats =              (SNDRV_PCM_FMTBIT_S16_LE |
+				SNDRV_PCM_FMTBIT_S24_LE),
+	.rates =		(SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_48000),
+	.rate_min =             16000,
+	.rate_max =             48000,
+	.channels_min =         1,
+	.channels_max =         4,
+	.buffer_bytes_max =     CAPTURE_MAX_NUM_PERIODS *
+				CAPTURE_MAX_PERIOD_SIZE,
+	.period_bytes_min =	CAPTURE_MIN_PERIOD_SIZE,
+	.period_bytes_max =     CAPTURE_MAX_PERIOD_SIZE,
+	.periods_min =          CAPTURE_MIN_NUM_PERIODS,
+	.periods_max =          CAPTURE_MAX_NUM_PERIODS,
+	.fifo_size =            0,
+};
+
+/* Conventional and unconventional sample rate supported */
+static unsigned int supported_sample_rates[] = {
+	16000, 48000,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
+	.count = ARRAY_SIZE(supported_sample_rates),
+	.list = supported_sample_rates,
+	.mask = 0,
+};
+
+struct lsm_priv {
+	struct snd_pcm_substream *substream;
+	struct lsm_client *lsm_client;
+	struct snd_lsm_event_status_v3 *event_status;
+	spinlock_t event_lock;
+	wait_queue_head_t event_wait;
+	unsigned long event_avail;
+	atomic_t event_wait_stop;
+	atomic_t buf_count;
+	atomic_t read_abort;
+	wait_queue_head_t period_wait;
+	struct mutex lsm_api_lock;
+	int appl_cnt;
+	int dma_write;
+};
+
+enum { /* lsm session states */
+	IDLE = 0,
+	RUNNING,
+};
+
+static int msm_lsm_queue_lab_buffer(struct lsm_priv *prtd, int i)
+{
+	int rc = 0;
+	struct lsm_cmd_read cmd_read;
+	struct snd_soc_pcm_runtime *rtd;
+
+	if (!prtd || !prtd->lsm_client) {
+		pr_err("%s: Invalid params prtd %pK lsm client %pK\n",
+			__func__, prtd, ((!prtd) ? NULL : prtd->lsm_client));
+		return -EINVAL;
+	}
+	if (!prtd->substream || !prtd->substream->private_data) {
+		pr_err("%s: Invalid %s\n", __func__,
+			(!prtd->substream) ? "substream" : "private_data");
+		return -EINVAL;
+	}
+	rtd = prtd->substream->private_data;
+
+	if (!prtd->lsm_client->lab_buffer ||
+		i >= prtd->lsm_client->hw_params.period_count) {
+		dev_err(rtd->dev,
+			"%s: Lab buffer not setup %pK incorrect index %d period count %d\n",
+			__func__, prtd->lsm_client->lab_buffer, i,
+			prtd->lsm_client->hw_params.period_count);
+		return -EINVAL;
+	}
+	cmd_read.buf_addr_lsw =
+		lower_32_bits(prtd->lsm_client->lab_buffer[i].phys);
+	cmd_read.buf_addr_msw =
+		msm_audio_populate_upper_32_bits(
+				prtd->lsm_client->lab_buffer[i].phys);
+	cmd_read.buf_size = prtd->lsm_client->lab_buffer[i].size;
+	cmd_read.mem_map_handle =
+		prtd->lsm_client->lab_buffer[i].mem_map_handle;
+	rc = q6lsm_read(prtd->lsm_client, &cmd_read);
+	if (rc)
+		dev_err(rtd->dev,
+			"%s: error in queuing the lab buffer rc %d\n",
+			__func__, rc);
+	return rc;
+}
+
+static int lsm_lab_buffer_sanity(struct lsm_priv *prtd,
+		struct lsm_cmd_read_done *read_done, int *index)
+{
+	int i = 0, rc = -EINVAL;
+	struct snd_soc_pcm_runtime *rtd;
+
+	if (!prtd || !read_done || !index) {
+		pr_err("%s: Invalid params prtd %pK read_done %pK index %pK\n",
+			__func__, prtd, read_done, index);
+		return -EINVAL;
+	}
+
+	if (!prtd->substream || !prtd->substream->private_data) {
+		pr_err("%s: Invalid %s\n", __func__,
+			(!prtd->substream) ? "substream" : "private_data");
+		return -EINVAL;
+	}
+	rtd = prtd->substream->private_data;
+
+	if (!prtd->lsm_client->lab_enable || !prtd->lsm_client->lab_buffer) {
+		dev_err(rtd->dev,
+			"%s: Lab not enabled %d invalid lab buffer %pK\n",
+			__func__, prtd->lsm_client->lab_enable,
+			prtd->lsm_client->lab_buffer);
+		return -EINVAL;
+	}
+	for (i = 0; i < prtd->lsm_client->hw_params.period_count; i++) {
+		if ((lower_32_bits(prtd->lsm_client->lab_buffer[i].phys) ==
+			read_done->buf_addr_lsw) &&
+			(msm_audio_populate_upper_32_bits
+				(prtd->lsm_client->lab_buffer[i].phys) ==
+			read_done->buf_addr_msw) &&
+			(prtd->lsm_client->lab_buffer[i].mem_map_handle ==
+			read_done->mem_map_handle)) {
+			dev_dbg(rtd->dev,
+				"%s: Buffer found %pK memmap handle %d\n",
+				__func__, &prtd->lsm_client->lab_buffer[i].phys,
+			prtd->lsm_client->lab_buffer[i].mem_map_handle);
+			if (read_done->total_size >
+				prtd->lsm_client->lab_buffer[i].size) {
+				dev_err(rtd->dev,
+					"%s: Size mismatch call back size %d actual size %zd\n",
+					__func__, read_done->total_size,
+				prtd->lsm_client->lab_buffer[i].size);
+				rc = -EINVAL;
+				break;
+			} else {
+				*index = i;
+				rc = 0;
+				break;
+			}
+		}
+	}
+	return rc;
+}
+
+static void lsm_event_handler(uint32_t opcode, uint32_t token,
+			      void *payload, void *priv)
+{
+	unsigned long flags;
+	struct lsm_priv *prtd = priv;
+	struct snd_pcm_substream *substream = prtd->substream;
+	struct snd_soc_pcm_runtime *rtd;
+	uint16_t status = 0;
+	uint16_t payload_size = 0;
+	uint16_t index = 0;
+	uint32_t event_ts_lsw = 0;
+	uint32_t event_ts_msw = 0;
+
+	if (!substream || !substream->private_data) {
+		pr_err("%s: Invalid %s\n", __func__,
+			(!substream) ? "substream" : "private_data");
+		return;
+	}
+	rtd = substream->private_data;
+
+	switch (opcode) {
+	case LSM_DATA_EVENT_READ_DONE: {
+		int rc;
+		struct lsm_cmd_read_done *read_done = payload;
+		int buf_index = 0;
+		if (prtd->lsm_client->session != token ||
+		    !read_done) {
+			dev_err(rtd->dev,
+				"%s: EVENT_READ_DONE invalid callback, session %d callback %d payload %pK",
+				__func__, prtd->lsm_client->session,
+				token, read_done);
+			return;
+		}
+		if (atomic_read(&prtd->read_abort)) {
+			dev_dbg(rtd->dev,
+				"%s: read abort set skip data\n", __func__);
+			return;
+		}
+		if (!lsm_lab_buffer_sanity(prtd, read_done, &buf_index)) {
+			dev_dbg(rtd->dev,
+				"%s: process read done index %d\n",
+				__func__, buf_index);
+			if (buf_index >=
+				prtd->lsm_client->hw_params.period_count) {
+				dev_err(rtd->dev,
+					"%s: Invalid index %d buf_index max cnt %d\n",
+					__func__, buf_index,
+				prtd->lsm_client->hw_params.period_count);
+				return;
+			}
+			prtd->dma_write += read_done->total_size;
+			atomic_inc(&prtd->buf_count);
+			snd_pcm_period_elapsed(substream);
+			wake_up(&prtd->period_wait);
+			/* queue the next period buffer */
+			buf_index = (buf_index + 1) %
+			prtd->lsm_client->hw_params.period_count;
+			rc = msm_lsm_queue_lab_buffer(prtd, buf_index);
+			if (rc)
+				dev_err(rtd->dev,
+					"%s: error in queuing the lab buffer rc %d\n",
+					__func__, rc);
+		} else
+			dev_err(rtd->dev, "%s: Invalid lab buffer returned by dsp\n",
+				__func__);
+		break;
+	}
+
+	case LSM_SESSION_EVENT_DETECTION_STATUS:
+		status = (uint16_t)((uint8_t *)payload)[0];
+		payload_size = (uint16_t)((uint8_t *)payload)[2];
+		index = 4;
+		dev_dbg(rtd->dev,
+			"%s: event detect status = %d payload size = %d\n",
+			__func__, status , payload_size);
+	break;
+
+	case LSM_SESSION_EVENT_DETECTION_STATUS_V2:
+		status = (uint16_t)((uint8_t *)payload)[0];
+		payload_size = (uint16_t)((uint8_t *)payload)[1];
+		index = 2;
+		dev_dbg(rtd->dev,
+			"%s: event detect status = %d payload size = %d\n",
+			__func__, status , payload_size);
+		break;
+
+	case LSM_SESSION_EVENT_DETECTION_STATUS_V3:
+		event_ts_lsw = ((uint32_t *)payload)[0];
+		event_ts_msw = ((uint32_t *)payload)[1];
+		status = (uint16_t)((uint8_t *)payload)[8];
+		payload_size = (uint16_t)((uint8_t *)payload)[9];
+		index = 10;
+		dev_dbg(rtd->dev,
+			"%s: ts_msw = %u, ts_lsw = %u, event detect status = %d payload size = %d\n",
+			__func__, event_ts_msw, event_ts_lsw, status,
+			payload_size);
+		break;
+
+	default:
+		break;
+	}
+
+	if (opcode == LSM_SESSION_EVENT_DETECTION_STATUS ||
+		opcode == LSM_SESSION_EVENT_DETECTION_STATUS_V2 ||
+		opcode == LSM_SESSION_EVENT_DETECTION_STATUS_V3) {
+		spin_lock_irqsave(&prtd->event_lock, flags);
+		prtd->event_status = krealloc(prtd->event_status,
+					sizeof(struct snd_lsm_event_status_v3) +
+					payload_size, GFP_ATOMIC);
+		if (!prtd->event_status) {
+			dev_err(rtd->dev, "%s: no memory for event status\n",
+				__func__);
+			return;
+		}
+		/*
+		 * event status timestamp will be non-zero and valid if
+		 * opcode is LSM_SESSION_EVENT_DETECTION_STATUS_V3
+		 */
+		prtd->event_status->timestamp_lsw = event_ts_lsw;
+		prtd->event_status->timestamp_msw = event_ts_msw;
+		prtd->event_status->status = status;
+		prtd->event_status->payload_size = payload_size;
+
+		if (likely(prtd->event_status)) {
+			memcpy(prtd->event_status->payload,
+			       &((uint8_t *)payload)[index],
+			       payload_size);
+			prtd->event_avail = 1;
+			spin_unlock_irqrestore(&prtd->event_lock, flags);
+			wake_up(&prtd->event_wait);
+		} else {
+			spin_unlock_irqrestore(&prtd->event_lock, flags);
+			dev_err(rtd->dev,
+				"%s: Couldn't allocate %d bytes of memory\n",
+				__func__, payload_size);
+		}
+		if (substream->timer_running)
+			snd_timer_interrupt(substream->timer, 1);
+	}
+}
+
+static int msm_lsm_lab_buffer_alloc(struct lsm_priv *lsm, int alloc)
+{
+	int ret = 0;
+	struct snd_dma_buffer *dma_buf = NULL;
+	if (!lsm) {
+		pr_err("%s: Invalid param lsm %pK\n", __func__, lsm);
+		return -EINVAL;
+	}
+	if (alloc) {
+		if (!lsm->substream) {
+			pr_err("%s: substream is NULL\n", __func__);
+			return -EINVAL;
+		}
+		ret = q6lsm_lab_buffer_alloc(lsm->lsm_client, alloc);
+		if (ret) {
+			pr_err("%s: alloc lab buffer failed ret %d\n",
+				__func__, ret);
+			goto exit;
+		}
+		dma_buf = &lsm->substream->dma_buffer;
+		dma_buf->dev.type = SNDRV_DMA_TYPE_DEV;
+		dma_buf->dev.dev = lsm->substream->pcm->card->dev;
+		dma_buf->private_data = NULL;
+		dma_buf->area = lsm->lsm_client->lab_buffer[0].data;
+		dma_buf->addr = lsm->lsm_client->lab_buffer[0].phys;
+		dma_buf->bytes = lsm->lsm_client->hw_params.buf_sz *
+		lsm->lsm_client->hw_params.period_count;
+		snd_pcm_set_runtime_buffer(lsm->substream, dma_buf);
+	} else {
+		ret = q6lsm_lab_buffer_alloc(lsm->lsm_client, alloc);
+		if (ret)
+			pr_err("%s: free lab buffer failed ret %d\n",
+				__func__, ret);
+		kfree(lsm->lsm_client->lab_buffer);
+		lsm->lsm_client->lab_buffer = NULL;
+	}
+exit:
+	return ret;
+}
+
+static int msm_lsm_get_conf_levels(struct lsm_client *client,
+				   u8 *conf_levels_ptr)
+{
+	int rc = 0;
+
+	if (client->num_confidence_levels == 0) {
+		pr_debug("%s: no confidence levels provided\n",
+			__func__);
+		client->confidence_levels = NULL;
+		goto done;
+	}
+
+	client->confidence_levels =
+		kzalloc((sizeof(uint8_t) * client->num_confidence_levels),
+			 GFP_KERNEL);
+	if (!client->confidence_levels) {
+		pr_err("%s: No memory for confidence\n"
+			"levels num of level from user = %d\n",
+			__func__, client->num_confidence_levels);
+			rc = -ENOMEM;
+			goto done;
+	}
+
+	if (copy_from_user(client->confidence_levels,
+			   conf_levels_ptr,
+			   client->num_confidence_levels)) {
+		pr_err("%s: copy from user failed, size = %d\n",
+		       __func__, client->num_confidence_levels);
+		rc = -EFAULT;
+		goto copy_err;
+	}
+
+	return rc;
+
+copy_err:
+	kfree(client->confidence_levels);
+	client->confidence_levels = NULL;
+done:
+	return rc;
+
+}
+
+static int msm_lsm_set_epd(struct snd_pcm_substream *substream,
+		struct lsm_params_info *p_info)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct lsm_priv *prtd = runtime->private_data;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	int rc = 0;
+	struct snd_lsm_ep_det_thres epd_th;
+
+	if (p_info->param_size != sizeof(epd_th)) {
+		dev_err(rtd->dev,
+			"%s: Invalid param_size %d\n",
+			__func__, p_info->param_size);
+		rc = -EINVAL;
+		goto done;
+	}
+
+	if (copy_from_user(&epd_th, p_info->param_data,
+			   p_info->param_size)) {
+		dev_err(rtd->dev,
+			"%s: copy_from_user failed, size = %d\n",
+			__func__, p_info->param_size);
+		rc = -EFAULT;
+		goto done;
+	}
+
+	rc = q6lsm_set_one_param(prtd->lsm_client, p_info,
+				 &epd_th, LSM_ENDPOINT_DETECT_THRESHOLD);
+	if (rc)
+		dev_err(rtd->dev,
+			"%s: Failed to set epd param, err = %d\n",
+			__func__, rc);
+done:
+	return rc;
+}
+
+static int msm_lsm_set_mode(struct snd_pcm_substream *substream,
+		struct lsm_params_info *p_info)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct lsm_priv *prtd = runtime->private_data;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_lsm_detect_mode mode;
+	int rc = 0;
+
+	if (p_info->param_size != sizeof(mode)) {
+		dev_err(rtd->dev,
+			"%s: Invalid param_size %d\n",
+			__func__, p_info->param_size);
+		rc = -EINVAL;
+		goto done;
+	}
+
+	if (copy_from_user(&mode, p_info->param_data,
+			   sizeof(mode))) {
+		dev_err(rtd->dev,
+			"%s: copy_from_user failed, size = %zd\n",
+			__func__, sizeof(mode));
+		rc = -EFAULT;
+		goto done;
+	}
+
+	rc = q6lsm_set_one_param(prtd->lsm_client, p_info,
+				 &mode, LSM_OPERATION_MODE);
+	if (rc)
+		dev_err(rtd->dev,
+			"%s: Failed to set det_mode param, err = %d\n",
+			__func__, rc);
+done:
+	return rc;
+}
+
+static int msm_lsm_set_gain(struct snd_pcm_substream *substream,
+		struct lsm_params_info *p_info)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct lsm_priv *prtd = runtime->private_data;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_lsm_gain gain;
+	int rc = 0;
+
+	if (p_info->param_size != sizeof(gain)) {
+		dev_err(rtd->dev,
+			"%s: Invalid param_size %d\n",
+			__func__, p_info->param_size);
+		rc = -EINVAL;
+		goto done;
+	}
+
+	if (copy_from_user(&gain, p_info->param_data,
+			   sizeof(gain))) {
+		dev_err(rtd->dev,
+			"%s: copy_from_user failed, size = %zd\n",
+			__func__, sizeof(gain));
+		rc = -EFAULT;
+		goto done;
+	}
+
+	rc = q6lsm_set_one_param(prtd->lsm_client, p_info,
+				 &gain, LSM_GAIN);
+	if (rc)
+		dev_err(rtd->dev,
+			"%s: Failed to set det_mode param, err = %d\n",
+			__func__, rc);
+done:
+	return rc;
+}
+
+static int msm_lsm_set_conf(struct snd_pcm_substream *substream,
+		struct lsm_params_info *p_info)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct lsm_priv *prtd = runtime->private_data;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	int rc = 0;
+
+	if (p_info->param_size > MAX_NUM_CONFIDENCE) {
+		dev_err(rtd->dev,
+			"%s: invalid confidence levels %d\n",
+			__func__, p_info->param_size);
+		return -EINVAL;
+	}
+
+	prtd->lsm_client->num_confidence_levels =
+			p_info->param_size;
+	rc = msm_lsm_get_conf_levels(prtd->lsm_client,
+				     p_info->param_data);
+	if (rc) {
+		dev_err(rtd->dev,
+			"%s: get_conf_levels failed, err = %d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	rc = q6lsm_set_one_param(prtd->lsm_client, p_info,
+				 prtd->lsm_client->confidence_levels,
+				 LSM_MIN_CONFIDENCE_LEVELS);
+	if (rc)
+		dev_err(rtd->dev,
+			"%s: Failed to set min_conf_levels, err = %d\n",
+			__func__, rc);
+
+	return rc;
+}
+
+static int msm_lsm_reg_model(struct snd_pcm_substream *substream,
+		struct lsm_params_info *p_info)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct lsm_priv *prtd = runtime->private_data;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	int rc = 0;
+	u8 *snd_model_ptr;
+	size_t offset = 0;
+
+	rc = q6lsm_snd_model_buf_alloc(prtd->lsm_client,
+				       p_info->param_size,
+				       true);
+	if (rc) {
+		dev_err(rtd->dev,
+			"%s: snd_model buf alloc failed, size = %d\n",
+			__func__, p_info->param_size);
+		return rc;
+	}
+
+	q6lsm_sm_set_param_data(prtd->lsm_client, p_info, &offset);
+
+	/*
+	 * For set_param, advance the sound model data with the
+	 * number of bytes required by param_data.
+	 */
+	snd_model_ptr = ((u8 *) prtd->lsm_client->sound_model.data) + offset;
+
+	if (copy_from_user(snd_model_ptr,
+			   p_info->param_data, p_info->param_size)) {
+		dev_err(rtd->dev,
+			"%s: copy_from_user for snd_model failed, size = %d\n",
+			__func__, p_info->param_size);
+		rc = -EFAULT;
+		goto err_copy;
+	}
+	rc = q6lsm_set_one_param(prtd->lsm_client, p_info, NULL,
+				 LSM_REG_SND_MODEL);
+	if (rc) {
+		dev_err(rtd->dev,
+			"%s: Failed to set sound_model, err = %d\n",
+			__func__, rc);
+		goto err_copy;
+	}
+	return rc;
+
+err_copy:
+	q6lsm_snd_model_buf_free(prtd->lsm_client);
+	return rc;
+}
+
+static int msm_lsm_dereg_model(struct snd_pcm_substream *substream,
+		struct lsm_params_info *p_info)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct lsm_priv *prtd = runtime->private_data;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	int rc = 0;
+
+	rc = q6lsm_set_one_param(prtd->lsm_client, p_info,
+				 NULL, LSM_DEREG_SND_MODEL);
+	if (rc)
+		dev_err(rtd->dev,
+			"%s: Failed to set det_mode param, err = %d\n",
+			__func__, rc);
+
+	q6lsm_snd_model_buf_free(prtd->lsm_client);
+
+	return rc;
+}
+
+static int msm_lsm_set_custom(struct snd_pcm_substream *substream,
+		struct lsm_params_info *p_info)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct lsm_priv *prtd = runtime->private_data;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	u8 *data;
+	int rc = 0;
+
+	data = kzalloc(p_info->param_size, GFP_KERNEL);
+	if (!data) {
+		dev_err(rtd->dev,
+			"%s: no memory for custom data, size = %d\n",
+			__func__, p_info->param_size);
+		return -ENOMEM;
+	}
+
+	if (copy_from_user(data, p_info->param_data,
+			   p_info->param_size)) {
+		dev_err(rtd->dev,
+			"%s: copy_from_user failed for custom params, size = %d\n",
+			__func__, p_info->param_size);
+		rc = -EFAULT;
+		goto err_ret;
+	}
+
+	rc = q6lsm_set_one_param(prtd->lsm_client, p_info,
+				 data, LSM_CUSTOM_PARAMS);
+	if (rc)
+		dev_err(rtd->dev,
+			"%s: Failed to set custom param, err = %d\n",
+			__func__, rc);
+
+err_ret:
+	kfree(data);
+	return rc;
+}
+
+static int msm_lsm_set_poll_enable(struct snd_pcm_substream *substream,
+		struct lsm_params_info *p_info)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct lsm_priv *prtd = runtime->private_data;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_lsm_poll_enable poll_enable;
+	int rc = 0;
+
+	if (p_info->param_size != sizeof(poll_enable)) {
+		dev_err(rtd->dev,
+			"%s: Invalid param_size %d\n",
+			__func__, p_info->param_size);
+		rc = -EINVAL;
+		goto done;
+	}
+
+	if (copy_from_user(&poll_enable, p_info->param_data,
+			   sizeof(poll_enable))) {
+		dev_err(rtd->dev,
+			"%s: copy_from_user failed, size = %zd\n",
+			__func__, sizeof(poll_enable));
+		rc = -EFAULT;
+		goto done;
+	}
+
+	if (prtd->lsm_client->poll_enable == poll_enable.poll_en) {
+		dev_dbg(rtd->dev,
+			"%s: Polling for session %d already %s\n",
+			__func__, prtd->lsm_client->session,
+			(poll_enable.poll_en ? "enabled" : "disabled"));
+		rc = 0;
+		goto done;
+	}
+
+	rc = q6lsm_set_one_param(prtd->lsm_client, p_info,
+				 &poll_enable, LSM_POLLING_ENABLE);
+	if (!rc) {
+		prtd->lsm_client->poll_enable = poll_enable.poll_en;
+	} else {
+		dev_err(rtd->dev,
+			"%s: Failed to set poll enable, err = %d\n",
+			__func__, rc);
+	}
+done:
+	return rc;
+}
+
+static int msm_lsm_process_params(struct snd_pcm_substream *substream,
+		struct snd_lsm_module_params *p_data,
+		void *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct lsm_params_info *p_info;
+	int i;
+	int rc = 0;
+
+	p_info = (struct lsm_params_info *) params;
+
+	for (i = 0; i < p_data->num_params; i++) {
+		dev_dbg(rtd->dev,
+			"%s: param (%d), module_id = 0x%x, param_id = 0x%x, param_size = 0x%x, param_type = 0x%x\n",
+			__func__, i, p_info->module_id,
+			p_info->param_id, p_info->param_size,
+			p_info->param_type);
+
+		switch (p_info->param_type) {
+		case LSM_ENDPOINT_DETECT_THRESHOLD:
+			rc = msm_lsm_set_epd(substream, p_info);
+			break;
+		case LSM_OPERATION_MODE:
+			rc = msm_lsm_set_mode(substream, p_info);
+			break;
+		case LSM_GAIN:
+			rc = msm_lsm_set_gain(substream, p_info);
+			break;
+		case LSM_MIN_CONFIDENCE_LEVELS:
+			rc = msm_lsm_set_conf(substream, p_info);
+			break;
+		case LSM_REG_SND_MODEL:
+			rc = msm_lsm_reg_model(substream, p_info);
+			break;
+		case LSM_DEREG_SND_MODEL:
+			rc = msm_lsm_dereg_model(substream, p_info);
+			break;
+		case LSM_CUSTOM_PARAMS:
+			rc = msm_lsm_set_custom(substream, p_info);
+			break;
+		case LSM_POLLING_ENABLE:
+			rc = msm_lsm_set_poll_enable(substream, p_info);
+			break;
+		default:
+			dev_err(rtd->dev,
+				"%s: Invalid param_type %d\n",
+				__func__, p_info->param_type);
+			rc = -EINVAL;
+			break;
+		}
+		if (rc) {
+			pr_err("%s: set_param fail for param_type %d\n",
+				__func__, p_info->param_type);
+			return rc;
+		}
+
+		p_info++;
+	}
+
+	return rc;
+}
+
+static int msm_lsm_ioctl_shared(struct snd_pcm_substream *substream,
+			 unsigned int cmd, void *arg)
+{
+	struct snd_soc_pcm_runtime *rtd;
+	unsigned long flags;
+	int ret;
+	struct snd_lsm_sound_model_v2 snd_model_v2;
+	struct snd_lsm_session_data session_data;
+	int rc = 0;
+	int xchg = 0;
+	struct snd_pcm_runtime *runtime;
+	struct lsm_priv *prtd;
+	struct snd_lsm_detection_params det_params;
+	uint8_t *confidence_level = NULL;
+
+	if (!substream || !substream->private_data) {
+		pr_err("%s: Invalid %s\n", __func__,
+			(!substream) ? "substream" : "private_data");
+		return -EINVAL;
+	}
+
+	runtime = substream->runtime;
+	prtd = runtime->private_data;
+	rtd = substream->private_data;
+
+	switch (cmd) {
+	case SNDRV_LSM_SET_SESSION_DATA:
+		dev_dbg(rtd->dev, "%s: set session data\n", __func__);
+		if (copy_from_user(&session_data, arg,
+				   sizeof(session_data))) {
+			dev_err(rtd->dev, "%s: %s: copy_from_user failed\n",
+				__func__, "LSM_SET_SESSION_DATA");
+			return -EFAULT;
+		}
+
+		if (session_data.app_id != LSM_VOICE_WAKEUP_APP_ID_V2) {
+			dev_err(rtd->dev,
+				"%s:Invalid App id %d for Listen client\n",
+			       __func__, session_data.app_id);
+			rc = -EINVAL;
+			break;
+		}
+
+		prtd->lsm_client->app_id = session_data.app_id;
+		ret = q6lsm_open(prtd->lsm_client,
+				 prtd->lsm_client->app_id);
+		if (ret < 0) {
+			dev_err(rtd->dev,
+				"%s: lsm open failed, %d\n",
+				__func__, ret);
+			return ret;
+		}
+		prtd->lsm_client->opened = true;
+		dev_dbg(rtd->dev, "%s: Session_ID = %d, APP ID = %d\n",
+			__func__,
+			prtd->lsm_client->session,
+			prtd->lsm_client->app_id);
+		break;
+	case SNDRV_LSM_REG_SND_MODEL_V2:
+		dev_dbg(rtd->dev, "%s: Registering sound model V2\n",
+			__func__);
+		memcpy(&snd_model_v2, arg,
+		       sizeof(struct snd_lsm_sound_model_v2));
+		if (snd_model_v2.num_confidence_levels >
+		    MAX_NUM_CONFIDENCE) {
+			dev_err(rtd->dev,
+				"%s: Invalid conf_levels = %d, maximum allowed = %d\n",
+				__func__, snd_model_v2.num_confidence_levels,
+				MAX_NUM_CONFIDENCE);
+			rc = -EINVAL;
+			break;
+		}
+		rc = q6lsm_snd_model_buf_alloc(prtd->lsm_client,
+					       snd_model_v2.data_size, false);
+		if (rc) {
+			dev_err(rtd->dev,
+				"%s: q6lsm buffer alloc failed V2, size %d\n",
+			       __func__, snd_model_v2.data_size);
+			break;
+		}
+		if (copy_from_user(prtd->lsm_client->sound_model.data,
+			   snd_model_v2.data, snd_model_v2.data_size)) {
+			dev_err(rtd->dev,
+				"%s: copy from user data failed\n"
+			       "data %pK size %d\n", __func__,
+			       snd_model_v2.data, snd_model_v2.data_size);
+			q6lsm_snd_model_buf_free(prtd->lsm_client);
+			rc = -EFAULT;
+			break;
+		}
+
+		dev_dbg(rtd->dev, "SND Model Magic no byte[0] %x,\n"
+			 "byte[1] %x, byte[2] %x byte[3] %x\n",
+			 snd_model_v2.data[0], snd_model_v2.data[1],
+			 snd_model_v2.data[2], snd_model_v2.data[3]);
+		prtd->lsm_client->num_confidence_levels =
+			snd_model_v2.num_confidence_levels;
+
+		rc = msm_lsm_get_conf_levels(prtd->lsm_client,
+				snd_model_v2.confidence_level);
+		if (rc) {
+			dev_err(rtd->dev,
+				"%s: get_conf_levels failed, err = %d\n",
+				__func__, rc);
+			break;
+		}
+
+		rc = q6lsm_register_sound_model(prtd->lsm_client,
+					snd_model_v2.detection_mode,
+					snd_model_v2.detect_failure);
+		if (rc < 0) {
+			dev_err(rtd->dev,
+				"%s: Register snd Model v2 failed =%d\n",
+			       __func__, rc);
+			kfree(confidence_level);
+			q6lsm_snd_model_buf_free(prtd->lsm_client);
+		}
+
+		kfree(prtd->lsm_client->confidence_levels);
+		prtd->lsm_client->confidence_levels = NULL;
+		break;
+
+	case SNDRV_LSM_SET_PARAMS:
+		dev_dbg(rtd->dev, "%s: set_params\n", __func__);
+		memcpy(&det_params, arg,
+			sizeof(det_params));
+		if (det_params.num_confidence_levels >
+		    MAX_NUM_CONFIDENCE) {
+			rc = -EINVAL;
+			break;
+		}
+
+		prtd->lsm_client->num_confidence_levels =
+			det_params.num_confidence_levels;
+
+		rc = msm_lsm_get_conf_levels(prtd->lsm_client,
+				det_params.conf_level);
+		if (rc) {
+			dev_err(rtd->dev,
+				"%s: Failed to get conf_levels, err = %d\n",
+				__func__, rc);
+			break;
+		}
+
+		rc = q6lsm_set_data(prtd->lsm_client,
+			       det_params.detect_mode,
+			       det_params.detect_failure);
+		if (rc)
+			dev_err(rtd->dev,
+				"%s: Failed to set params, err = %d\n",
+				__func__, rc);
+
+		kfree(prtd->lsm_client->confidence_levels);
+		prtd->lsm_client->confidence_levels = NULL;
+
+		break;
+
+	case SNDRV_LSM_DEREG_SND_MODEL:
+		dev_dbg(rtd->dev, "%s: Deregistering sound model\n",
+			__func__);
+		rc = q6lsm_deregister_sound_model(prtd->lsm_client);
+		if (rc)
+			dev_err(rtd->dev,
+				"%s: Sound model de-register failed, err = %d\n",
+				__func__, rc);
+		break;
+
+	case SNDRV_LSM_EVENT_STATUS:
+	case SNDRV_LSM_EVENT_STATUS_V3: {
+		uint32_t ts_lsw, ts_msw;
+		uint16_t status = 0, payload_size = 0;
+
+		dev_dbg(rtd->dev, "%s: Get event status\n", __func__);
+		atomic_set(&prtd->event_wait_stop, 0);
+
+		/*
+		 * Release the api lock before wait to allow
+		 * other IOCTLs to be invoked while waiting
+		 * for event
+		 */
+		mutex_unlock(&prtd->lsm_api_lock);
+		rc = wait_event_freezable(prtd->event_wait,
+				(cmpxchg(&prtd->event_avail, 1, 0) ||
+				 (xchg = atomic_cmpxchg(&prtd->event_wait_stop,
+							1, 0))));
+		mutex_lock(&prtd->lsm_api_lock);
+		dev_dbg(rtd->dev, "%s: wait_event_freezable %d event_wait_stop %d\n",
+			 __func__, rc, xchg);
+		if (!rc && !xchg) {
+			dev_dbg(rtd->dev, "%s: New event available %ld\n",
+				__func__, prtd->event_avail);
+			spin_lock_irqsave(&prtd->event_lock, flags);
+
+			if (prtd->event_status) {
+				payload_size = prtd->event_status->payload_size;
+				ts_lsw = prtd->event_status->timestamp_lsw;
+				ts_msw = prtd->event_status->timestamp_msw;
+				status = prtd->event_status->status;
+				spin_unlock_irqrestore(&prtd->event_lock,
+						       flags);
+			} else {
+				spin_unlock_irqrestore(&prtd->event_lock,
+						       flags);
+				rc = -EINVAL;
+				dev_err(rtd->dev,
+					"%s: prtd->event_status is NULL\n",
+					__func__);
+				break;
+			}
+
+			if (cmd == SNDRV_LSM_EVENT_STATUS) {
+				struct snd_lsm_event_status *user = arg;
+
+				if (user->payload_size < payload_size) {
+					dev_dbg(rtd->dev,
+						"%s: provided %d bytes isn't enough, needs %d bytes\n",
+						__func__, user->payload_size,
+						payload_size);
+					rc = -ENOMEM;
+				} else {
+					user->status = status;
+					user->payload_size = payload_size;
+					memcpy(user->payload,
+						prtd->event_status->payload,
+						payload_size);
+				}
+			} else {
+				struct snd_lsm_event_status_v3 *user_v3 = arg;
+
+				if (user_v3->payload_size < payload_size) {
+					dev_dbg(rtd->dev,
+						"%s: provided %d bytes isn't enough, needs %d bytes\n",
+						__func__, user_v3->payload_size,
+						payload_size);
+					rc = -ENOMEM;
+				} else {
+					user_v3->timestamp_lsw = ts_lsw;
+					user_v3->timestamp_msw = ts_msw;
+					user_v3->status = status;
+					user_v3->payload_size = payload_size;
+					memcpy(user_v3->payload,
+						prtd->event_status->payload,
+						payload_size);
+				}
+			}
+			if (!rc) {
+				if (prtd->lsm_client->lab_enable
+					&& !prtd->lsm_client->lab_started
+					&& prtd->event_status->status ==
+					LSM_VOICE_WAKEUP_STATUS_DETECTED) {
+					atomic_set(&prtd->read_abort, 0);
+					atomic_set(&prtd->buf_count, 0);
+					prtd->appl_cnt = 0;
+					prtd->dma_write = 0;
+					rc = msm_lsm_queue_lab_buffer(prtd,
+						0);
+					if (rc)
+						dev_err(rtd->dev,
+							"%s: Queue buffer failed for lab rc = %d\n",
+							__func__, rc);
+					else
+						prtd->lsm_client->lab_started
+						= true;
+				}
+			}
+		} else if (xchg) {
+			dev_dbg(rtd->dev, "%s: Wait aborted\n", __func__);
+			rc = 0;
+		}
+		break;
+	}
+
+	case SNDRV_LSM_ABORT_EVENT:
+		dev_dbg(rtd->dev, "%s: Aborting event status wait\n",
+			__func__);
+		atomic_set(&prtd->event_wait_stop, 1);
+		wake_up(&prtd->event_wait);
+		break;
+
+	case SNDRV_LSM_START:
+		dev_dbg(rtd->dev, "%s: Starting LSM client session\n",
+			__func__);
+		if (!prtd->lsm_client->started) {
+			ret = q6lsm_start(prtd->lsm_client, true);
+			if (!ret) {
+				prtd->lsm_client->started = true;
+				dev_dbg(rtd->dev, "%s: LSM client session started\n",
+					 __func__);
+			}
+		}
+		break;
+
+	case SNDRV_LSM_STOP: {
+		dev_dbg(rtd->dev,
+			"%s: Stopping LSM client session\n",
+			__func__);
+		if (prtd->lsm_client->started) {
+			if (prtd->lsm_client->lab_enable) {
+				atomic_set(&prtd->read_abort, 1);
+				if (prtd->lsm_client->lab_started) {
+					ret = q6lsm_stop_lab(prtd->lsm_client);
+					if (ret)
+						dev_err(rtd->dev,
+							"%s: stop lab failed ret %d\n",
+							__func__, ret);
+					prtd->lsm_client->lab_started = false;
+				}
+			}
+			ret = q6lsm_stop(prtd->lsm_client, true);
+			if (!ret)
+				dev_dbg(rtd->dev,
+					"%s: LSM client session stopped %d\n",
+					__func__, ret);
+			prtd->lsm_client->started = false;
+		}
+		break;
+	}
+	case SNDRV_LSM_LAB_CONTROL: {
+		u32 enable;
+
+		if (copy_from_user(&enable, arg, sizeof(enable))) {
+			dev_err(rtd->dev, "%s: %s: copy_frm_user failed\n",
+				__func__, "LSM_LAB_CONTROL");
+			return -EFAULT;
+		}
+
+		dev_dbg(rtd->dev, "%s: ioctl %s, enable = %d\n",
+			 __func__, "SNDRV_LSM_LAB_CONTROL", enable);
+		if (!prtd->lsm_client->started) {
+			if (prtd->lsm_client->lab_enable == enable) {
+				dev_dbg(rtd->dev,
+					"%s: Lab for session %d already %s\n",
+					__func__, prtd->lsm_client->session,
+					enable ? "enabled" : "disabled");
+				rc = 0;
+				break;
+			}
+			rc = q6lsm_lab_control(prtd->lsm_client, enable);
+			if (rc) {
+				dev_err(rtd->dev,
+					"%s: ioctl %s failed rc %d to %s lab for session %d\n",
+					__func__, "SNDRV_LAB_CONTROL", rc,
+					enable ? "enable" : "disable",
+					prtd->lsm_client->session);
+			} else {
+				rc = msm_lsm_lab_buffer_alloc(prtd,
+					enable ? LAB_BUFFER_ALLOC
+					: LAB_BUFFER_DEALLOC);
+				if (rc)
+					dev_err(rtd->dev,
+						"%s: msm_lsm_lab_buffer_alloc failed rc %d for %s",
+						__func__, rc,
+						enable ? "ALLOC" : "DEALLOC");
+				if (!rc)
+					prtd->lsm_client->lab_enable = enable;
+			}
+		} else {
+			dev_err(rtd->dev, "%s: ioctl %s issued after start",
+				__func__, "SNDRV_LSM_LAB_CONTROL");
+			rc = -EINVAL;
+		}
+		break;
+	}
+	case SNDRV_LSM_STOP_LAB:
+		dev_dbg(rtd->dev, "%s: stopping LAB\n", __func__);
+		if (prtd->lsm_client->lab_enable &&
+			prtd->lsm_client->lab_started) {
+			atomic_set(&prtd->read_abort, 1);
+			rc = q6lsm_stop_lab(prtd->lsm_client);
+			if (rc)
+				dev_err(rtd->dev,
+					"%s: Lab stop failed for session %d rc %d\n",
+					__func__,
+					prtd->lsm_client->session, rc);
+			prtd->lsm_client->lab_started = false;
+		}
+	break;
+
+	case SNDRV_LSM_SET_PORT:
+		dev_dbg(rtd->dev, "%s: set LSM port\n", __func__);
+		rc = q6lsm_set_port_connected(prtd->lsm_client);
+		break;
+
+	case SNDRV_LSM_SET_FWK_MODE_CONFIG: {
+		u32 mode;
+
+		if (copy_from_user(&mode, (void __user *) arg, sizeof(mode))) {
+			dev_err(rtd->dev, "%s: %s: copy_frm_user failed\n",
+				__func__, "LSM_SET_FWK_MODE_CONFIG");
+			return -EFAULT;
+		}
+
+		dev_dbg(rtd->dev, "%s: ioctl %s, enable = %d\n",
+			__func__, "SNDRV_LSM_SET_FWK_MODE_CONFIG", mode);
+		if (prtd->lsm_client->event_mode == mode) {
+			dev_dbg(rtd->dev,
+				"%s: mode for %d already set to %d\n",
+				__func__, prtd->lsm_client->session, mode);
+			rc = 0;
+		} else {
+			dev_dbg(rtd->dev, "%s: Event mode = %d\n",
+				 __func__, mode);
+			rc = q6lsm_set_fwk_mode_cfg(prtd->lsm_client, mode);
+			if (!rc)
+				prtd->lsm_client->event_mode = mode;
+			else
+				dev_err(rtd->dev,
+					"%s: set event mode failed %d\n",
+					__func__, rc);
+		}
+		break;
+	}
+
+	default:
+		dev_dbg(rtd->dev,
+			"%s: Falling into default snd_lib_ioctl cmd 0x%x\n",
+			 __func__, cmd);
+		rc = snd_pcm_lib_ioctl(substream, cmd, arg);
+		break;
+	}
+
+	if (!rc)
+		dev_dbg(rtd->dev, "%s: leave (%d)\n",
+			__func__, rc);
+	else
+		dev_err(rtd->dev, "%s: cmd 0x%x failed %d\n",
+			__func__, cmd, rc);
+
+	return rc;
+}
+#ifdef CONFIG_COMPAT
+
+struct snd_lsm_event_status32 {
+	u16 status;
+	u16 payload_size;
+	u8 payload[0];
+};
+
+struct snd_lsm_event_status_v3_32 {
+	u32 timestamp_lsw;
+	u32 timestamp_msw;
+	u16 status;
+	u16 payload_size;
+	u8 payload[0];
+};
+
+struct snd_lsm_sound_model_v2_32 {
+	compat_uptr_t data;
+	compat_uptr_t confidence_level;
+	u32 data_size;
+	enum lsm_detection_mode detection_mode;
+	u8 num_confidence_levels;
+	bool detect_failure;
+};
+
+struct snd_lsm_detection_params_32 {
+	compat_uptr_t conf_level;
+	enum lsm_detection_mode detect_mode;
+	u8 num_confidence_levels;
+	bool detect_failure;
+};
+
+struct lsm_params_info_32 {
+	u32 module_id;
+	u32 param_id;
+	u32 param_size;
+	compat_uptr_t param_data;
+	uint32_t param_type;
+};
+
+struct snd_lsm_module_params_32 {
+	compat_uptr_t params;
+	u32 num_params;
+	u32 data_size;
+};
+
+enum {
+	SNDRV_LSM_REG_SND_MODEL_V2_32 =
+		_IOW('U', 0x07, struct snd_lsm_sound_model_v2_32),
+	SNDRV_LSM_SET_PARAMS_32 =
+		_IOW('U', 0x0A, struct snd_lsm_detection_params_32),
+	SNDRV_LSM_SET_MODULE_PARAMS_32 =
+		_IOW('U', 0x0B, struct snd_lsm_module_params_32),
+	SNDRV_LSM_EVENT_STATUS_V3_32 =
+		_IOW('U', 0x0F, struct snd_lsm_event_status_v3_32),
+};
+
+static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
+			  unsigned int cmd, void __user *arg)
+{
+	struct snd_pcm_runtime *runtime;
+	struct lsm_priv *prtd;
+	struct snd_soc_pcm_runtime *rtd;
+	int err = 0;
+	u32 size = 0;
+
+	if (PCM_RUNTIME_CHECK(substream))
+		return -ENXIO;
+
+	if (!substream || !substream->private_data) {
+		pr_err("%s: Invalid %s\n", __func__,
+			(!substream) ? "substream" : "private_data");
+		return -EINVAL;
+	}
+	runtime = substream->runtime;
+	rtd = substream->private_data;
+	prtd = runtime->private_data;
+
+	mutex_lock(&prtd->lsm_api_lock);
+
+	switch (cmd) {
+	case SNDRV_LSM_EVENT_STATUS: {
+		struct snd_lsm_event_status *user = NULL, userarg32;
+		struct snd_lsm_event_status *user32 = NULL;
+		if (copy_from_user(&userarg32, arg, sizeof(userarg32))) {
+			dev_err(rtd->dev, "%s: err copyuser ioctl %s\n",
+				__func__, "SNDRV_LSM_EVENT_STATUS");
+			err = -EFAULT;
+			goto done;
+		}
+
+		if (userarg32.payload_size >
+		    LISTEN_MAX_STATUS_PAYLOAD_SIZE) {
+			pr_err("%s: payload_size %d is invalid, max allowed = %d\n",
+				__func__, userarg32.payload_size,
+				LISTEN_MAX_STATUS_PAYLOAD_SIZE);
+			err = -EINVAL;
+			goto done;
+		}
+
+		size = sizeof(*user) + userarg32.payload_size;
+		user = kzalloc(size, GFP_KERNEL);
+		if (!user) {
+			dev_err(rtd->dev,
+				"%s: Allocation failed event status size %d\n",
+				__func__, size);
+			err = -EFAULT;
+			goto done;
+		} else {
+			cmd = SNDRV_LSM_EVENT_STATUS;
+			user->payload_size = userarg32.payload_size;
+			err = msm_lsm_ioctl_shared(substream, cmd, user);
+		}
+		/* Update size with actual payload size */
+		size = sizeof(userarg32) + user->payload_size;
+		if (!err && !access_ok(VERIFY_WRITE, arg, size)) {
+			dev_err(rtd->dev,
+				"%s: write verify failed size %d\n",
+				__func__, size);
+			err = -EFAULT;
+		}
+		if (!err) {
+			user32 = kzalloc(size, GFP_KERNEL);
+			if (!user32) {
+				dev_err(rtd->dev,
+					"%s: Allocation event user status size %d\n",
+					__func__, size);
+				err = -EFAULT;
+			} else {
+				user32->status = user->status;
+				user32->payload_size = user->payload_size;
+				memcpy(user32->payload,
+				user->payload, user32->payload_size);
+			}
+		}
+		if (!err && (copy_to_user(arg, user32, size))) {
+			dev_err(rtd->dev, "%s: failed to copy payload %d",
+				__func__, size);
+			err = -EFAULT;
+		}
+		kfree(user);
+		kfree(user32);
+		if (err)
+			dev_err(rtd->dev, "%s: lsmevent failed %d",
+				__func__, err);
+		break;
+	}
+
+	case SNDRV_LSM_EVENT_STATUS_V3_32: {
+		struct snd_lsm_event_status_v3_32 userarg32, *user32 = NULL;
+		struct snd_lsm_event_status_v3 *user = NULL;
+
+		if (copy_from_user(&userarg32, arg, sizeof(userarg32))) {
+			dev_err(rtd->dev, "%s: err copyuser ioctl %s\n",
+				__func__, "SNDRV_LSM_EVENT_STATUS_V3_32");
+			return -EFAULT;
+		}
+
+		if (userarg32.payload_size >
+		    LISTEN_MAX_STATUS_PAYLOAD_SIZE) {
+			pr_err("%s: payload_size %d is invalid, max allowed = %d\n",
+				__func__, userarg32.payload_size,
+				LISTEN_MAX_STATUS_PAYLOAD_SIZE);
+			return -EINVAL;
+		}
+
+		size = sizeof(*user) + userarg32.payload_size;
+		user = kzalloc(size, GFP_KERNEL);
+		if (!user) {
+			dev_err(rtd->dev,
+				"%s: Allocation failed event status size %d\n",
+				__func__, size);
+			return -EFAULT;
+		}
+		cmd = SNDRV_LSM_EVENT_STATUS_V3;
+		user->payload_size = userarg32.payload_size;
+		err = msm_lsm_ioctl_shared(substream, cmd, user);
+
+		/* Update size with actual payload size */
+		size = sizeof(userarg32) + user->payload_size;
+		if (!err && !access_ok(VERIFY_WRITE, arg, size)) {
+			dev_err(rtd->dev,
+				"%s: write verify failed size %d\n",
+				__func__, size);
+			err = -EFAULT;
+		}
+		if (!err) {
+			user32 = kzalloc(size, GFP_KERNEL);
+			if (!user32) {
+				dev_err(rtd->dev,
+					"%s: Allocation event user status size %d\n",
+					__func__, size);
+				err = -EFAULT;
+			} else {
+				user32->timestamp_lsw = user->timestamp_lsw;
+				user32->timestamp_msw = user->timestamp_msw;
+				user32->status = user->status;
+				user32->payload_size = user->payload_size;
+				memcpy(user32->payload,
+				user->payload, user32->payload_size);
+			}
+		}
+		if (!err && (copy_to_user(arg, user32, size))) {
+			dev_err(rtd->dev, "%s: failed to copy payload %d",
+				__func__, size);
+			err = -EFAULT;
+		}
+		kfree(user);
+		kfree(user32);
+		if (err)
+			dev_err(rtd->dev, "%s: lsmevent failed %d",
+				__func__, err);
+		break;
+	}
+
+	case SNDRV_LSM_REG_SND_MODEL_V2_32: {
+		struct snd_lsm_sound_model_v2_32 snd_modelv232;
+		struct snd_lsm_sound_model_v2 snd_modelv2;
+
+		if (prtd->lsm_client->use_topology) {
+			dev_err(rtd->dev,
+				"%s: %s: not supported if using topology\n",
+				__func__, "REG_SND_MODEL_V2");
+			err = -EINVAL;
+			goto done;
+		}
+
+		if (copy_from_user(&snd_modelv232, arg,
+			sizeof(snd_modelv232))) {
+			err = -EFAULT;
+			dev_err(rtd->dev,
+				"%s: copy user failed, size %zd %s\n",
+				__func__,
+				sizeof(struct snd_lsm_sound_model_v2_32),
+				"SNDRV_LSM_REG_SND_MODEL_V2_32");
+		} else {
+			snd_modelv2.confidence_level =
+			compat_ptr(snd_modelv232.confidence_level);
+			snd_modelv2.data = compat_ptr(snd_modelv232.data);
+			snd_modelv2.data_size = snd_modelv232.data_size;
+			snd_modelv2.detect_failure =
+			snd_modelv232.detect_failure;
+			snd_modelv2.detection_mode =
+			snd_modelv232.detection_mode;
+			snd_modelv2.num_confidence_levels =
+			snd_modelv232.num_confidence_levels;
+			cmd = SNDRV_LSM_REG_SND_MODEL_V2;
+			err = msm_lsm_ioctl_shared(substream, cmd,
+				&snd_modelv2);
+			if (err)
+				dev_err(rtd->dev,
+					"%s: ioctl %s failed\n", __func__,
+					"SNDDRV_LSM_REG_SND_MODEL_V2_32");
+		}
+		break;
+	}
+
+	case SNDRV_LSM_SET_PARAMS_32:{
+		struct snd_lsm_detection_params_32 det_params32;
+		struct snd_lsm_detection_params det_params;
+
+		if (prtd->lsm_client->use_topology) {
+			dev_err(rtd->dev,
+				"%s: %s: not supported if using topology\n",
+				__func__, "SET_PARAMS_32");
+			err = -EINVAL;
+		}
+
+		if (copy_from_user(&det_params32, arg,
+				   sizeof(det_params32))) {
+			err = -EFAULT;
+			dev_err(rtd->dev,
+				"%s: %s: copy_from_user failed, size = %zd\n",
+				__func__, "SNDRV_LSM_SET_PARAMS_32",
+				sizeof(det_params32));
+		} else {
+			det_params.conf_level =
+				compat_ptr(det_params32.conf_level);
+			det_params.detect_mode =
+				det_params32.detect_mode;
+			det_params.num_confidence_levels =
+				det_params32.num_confidence_levels;
+			det_params.detect_failure =
+				det_params32.detect_failure;
+			cmd = SNDRV_LSM_SET_PARAMS;
+			err = msm_lsm_ioctl_shared(substream, cmd,
+					&det_params);
+			if (err)
+				dev_err(rtd->dev,
+					"%s: ioctl %s failed\n", __func__,
+					"SNDRV_LSM_SET_PARAMS");
+		}
+		break;
+	}
+
+	case SNDRV_LSM_SET_MODULE_PARAMS_32: {
+		struct snd_lsm_module_params_32 p_data_32;
+		struct snd_lsm_module_params p_data;
+		u8 *params, *params32;
+		size_t p_size;
+		struct lsm_params_info_32 *p_info_32;
+		struct lsm_params_info *p_info;
+		int i;
+
+		if (!prtd->lsm_client->use_topology) {
+			dev_err(rtd->dev,
+				"%s: %s: not supported if not using topology\n",
+				__func__, "SET_MODULE_PARAMS_32");
+			err = -EINVAL;
+			goto done;
+		}
+
+		if (copy_from_user(&p_data_32, arg,
+				   sizeof(p_data_32))) {
+			dev_err(rtd->dev,
+				"%s: %s: copy_from_user failed, size = %zd\n",
+				__func__, "SET_MODULE_PARAMS_32",
+				sizeof(p_data_32));
+			err = -EFAULT;
+			goto done;
+		}
+
+		p_data.params = compat_ptr(p_data_32.params);
+		p_data.num_params = p_data_32.num_params;
+		p_data.data_size = p_data_32.data_size;
+
+		if (p_data.num_params > LSM_PARAMS_MAX) {
+			dev_err(rtd->dev,
+				"%s: %s: Invalid num_params %d\n",
+				__func__, "SET_MODULE_PARAMS_32",
+				p_data.num_params);
+			err = -EINVAL;
+			goto done;
+		}
+
+		if (p_data.data_size !=
+		    (p_data.num_params * sizeof(struct lsm_params_info_32))) {
+			dev_err(rtd->dev,
+				"%s: %s: Invalid size %d\n",
+				__func__, "SET_MODULE_PARAMS_32",
+				p_data.data_size);
+			err = -EINVAL;
+			goto done;
+		}
+
+		p_size = sizeof(struct lsm_params_info_32) *
+			 p_data.num_params;
+
+		params32 = kzalloc(p_size, GFP_KERNEL);
+		if (!params32) {
+			dev_err(rtd->dev,
+				"%s: no memory for params32, size = %zd\n",
+				__func__, p_size);
+			err = -ENOMEM;
+			goto done;
+		}
+
+		p_size = sizeof(struct lsm_params_info) * p_data.num_params;
+		params = kzalloc(p_size, GFP_KERNEL);
+		if (!params) {
+			dev_err(rtd->dev,
+				"%s: no memory for params, size = %zd\n",
+				__func__, p_size);
+			kfree(params32);
+			err = -ENOMEM;
+			goto done;
+		}
+
+		if (copy_from_user(params32, p_data.params,
+				   p_data.data_size)) {
+			dev_err(rtd->dev,
+				"%s: %s: copy_from_user failed, size = %d\n",
+				__func__, "params32", p_data.data_size);
+			kfree(params32);
+			kfree(params);
+			err = -EFAULT;
+			goto done;
+		}
+
+		p_info_32 = (struct lsm_params_info_32 *) params32;
+		p_info = (struct lsm_params_info *) params;
+		for (i = 0; i < p_data.num_params; i++) {
+			p_info->module_id = p_info_32->module_id;
+			p_info->param_id = p_info_32->param_id;
+			p_info->param_size = p_info_32->param_size;
+			p_info->param_data = compat_ptr(p_info_32->param_data);
+			p_info->param_type = p_info_32->param_type;
+
+			p_info_32++;
+			p_info++;
+		}
+
+		err = msm_lsm_process_params(substream,
+					     &p_data, params);
+		if (err)
+			dev_err(rtd->dev,
+				"%s: Failed to process params, err = %d\n",
+				__func__, err);
+		kfree(params);
+		kfree(params32);
+		break;
+	}
+	case SNDRV_LSM_REG_SND_MODEL_V2:
+	case SNDRV_LSM_SET_PARAMS:
+	case SNDRV_LSM_SET_MODULE_PARAMS:
+		/*
+		 * In ideal cases, the compat_ioctl should never be called
+		 * with the above unlocked ioctl commands. Print error
+		 * and return error if it does.
+		 */
+		dev_err(rtd->dev,
+			"%s: Invalid cmd for compat_ioctl\n",
+			__func__);
+		err = -EINVAL;
+		break;
+	default:
+		err = msm_lsm_ioctl_shared(substream, cmd, arg);
+		break;
+	}
+done:
+	mutex_unlock(&prtd->lsm_api_lock);
+	return err;
+}
+#else
+#define msm_lsm_ioctl_compat NULL
+#endif
+
+static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
+			 unsigned int cmd, void *arg)
+{
+	int err = 0;
+	u32 size = 0;
+	struct snd_pcm_runtime *runtime;
+	struct snd_soc_pcm_runtime *rtd;
+	struct lsm_priv *prtd;
+
+	if (!substream || !substream->private_data) {
+		pr_err("%s: Invalid %s\n", __func__,
+			(!substream) ? "substream" : "private_data");
+		return -EINVAL;
+	}
+	runtime = substream->runtime;
+	prtd = runtime->private_data;
+	rtd = substream->private_data;
+
+	mutex_lock(&prtd->lsm_api_lock);
+	switch (cmd) {
+	case SNDRV_LSM_REG_SND_MODEL_V2: {
+		struct snd_lsm_sound_model_v2 snd_model_v2;
+
+		if (prtd->lsm_client->use_topology) {
+			dev_err(rtd->dev,
+				"%s: %s: not supported if using topology\n",
+				__func__, "REG_SND_MODEL_V2");
+			err = -EINVAL;
+			goto done;
+		}
+
+		if (copy_from_user(&snd_model_v2, arg, sizeof(snd_model_v2))) {
+			err = -EFAULT;
+			dev_err(rtd->dev,
+				"%s: copy from user failed, size %zd\n",
+				__func__,
+				sizeof(struct snd_lsm_sound_model_v2));
+		}
+		if (!err)
+			err = msm_lsm_ioctl_shared(substream, cmd,
+						   &snd_model_v2);
+		if (err)
+			dev_err(rtd->dev,
+				"%s REG_SND_MODEL failed err %d\n",
+				__func__, err);
+		goto done;
+		}
+		break;
+	case SNDRV_LSM_SET_PARAMS: {
+		struct snd_lsm_detection_params det_params;
+
+		if (prtd->lsm_client->use_topology) {
+			dev_err(rtd->dev,
+				"%s: %s: not supported if using topology\n",
+				__func__, "SET_PARAMS");
+			err = -EINVAL;
+			goto done;
+		}
+
+		pr_debug("%s: SNDRV_LSM_SET_PARAMS\n", __func__);
+
+		if (copy_from_user(&det_params, arg,
+				   sizeof(det_params))) {
+			dev_err(rtd->dev,
+				"%s: %s: copy_from_user failed, size %zd\n",
+				__func__, "SNDRV_LSM_SET_PARAMS",
+				sizeof(det_params));
+			err = -EFAULT;
+		}
+
+		if (!err)
+			err = msm_lsm_ioctl_shared(substream, cmd,
+						   &det_params);
+		else
+			dev_err(rtd->dev,
+				"%s: LSM_SET_PARAMS failed, err %d\n",
+				__func__, err);
+
+		goto done;
+	}
+
+	case SNDRV_LSM_SET_MODULE_PARAMS: {
+		struct snd_lsm_module_params p_data;
+		size_t p_size;
+		u8 *params;
+
+		if (!prtd->lsm_client->use_topology) {
+			dev_err(rtd->dev,
+				"%s: %s: not supported if not using topology\n",
+				__func__, "SET_MODULE_PARAMS");
+			err = -EINVAL;
+			goto done;
+		}
+
+		if (copy_from_user(&p_data, arg,
+				   sizeof(p_data))) {
+			dev_err(rtd->dev,
+				"%s: %s: copy_from_user failed, size = %zd\n",
+				__func__, "p_data", sizeof(p_data));
+			err = -EFAULT;
+			goto done;
+		}
+
+		if (p_data.num_params > LSM_PARAMS_MAX) {
+			dev_err(rtd->dev,
+				"%s: %s: Invalid num_params %d\n",
+				__func__, "SET_MODULE_PARAMS",
+				p_data.num_params);
+			err = -EINVAL;
+			goto done;
+		}
+
+		p_size = p_data.num_params *
+			 sizeof(struct lsm_params_info);
+
+		if (p_data.data_size != p_size) {
+			dev_err(rtd->dev,
+				"%s: %s: Invalid size %zd\n",
+				__func__, "SET_MODULE_PARAMS", p_size);
+
+			err = -EFAULT;
+			goto done;
+		}
+
+		params = kzalloc(p_size, GFP_KERNEL);
+		if (!params) {
+			dev_err(rtd->dev,
+				"%s: no memory for params\n",
+				__func__);
+			err = -ENOMEM;
+			goto done;
+		}
+
+		if (copy_from_user(params, p_data.params,
+				   p_data.data_size)) {
+			dev_err(rtd->dev,
+				"%s: %s: copy_from_user failed, size = %d\n",
+				__func__, "params", p_data.data_size);
+			kfree(params);
+			err = -EFAULT;
+			goto done;
+		}
+
+		err = msm_lsm_process_params(substream, &p_data, params);
+		if (err)
+			dev_err(rtd->dev,
+				"%s: %s: Failed to set params, err = %d\n",
+				__func__, "SET_MODULE_PARAMS", err);
+		kfree(params);
+		break;
+	}
+
+	case SNDRV_LSM_EVENT_STATUS: {
+		struct snd_lsm_event_status *user = NULL, userarg;
+		dev_dbg(rtd->dev,
+			"%s: SNDRV_LSM_EVENT_STATUS\n", __func__);
+		if (copy_from_user(&userarg, arg, sizeof(userarg))) {
+			dev_err(rtd->dev,
+				"%s: err copyuser event_status\n",
+				__func__);
+			err = -EFAULT;
+			goto done;
+		}
+
+		if (userarg.payload_size >
+		    LISTEN_MAX_STATUS_PAYLOAD_SIZE) {
+			pr_err("%s: payload_size %d is invalid, max allowed = %d\n",
+				__func__, userarg.payload_size,
+				LISTEN_MAX_STATUS_PAYLOAD_SIZE);
+			err = -EINVAL;
+			goto done;
+		}
+
+		size = sizeof(struct snd_lsm_event_status) +
+		userarg.payload_size;
+		user = kzalloc(size, GFP_KERNEL);
+		if (!user) {
+			dev_err(rtd->dev,
+				"%s: Allocation failed event status size %d\n",
+				__func__, size);
+			err = -EFAULT;
+			goto done;
+		} else {
+			user->payload_size = userarg.payload_size;
+			err = msm_lsm_ioctl_shared(substream, cmd, user);
+		}
+		/* Update size with actual payload size */
+		size = sizeof(*user) + user->payload_size;
+		if (!err && !access_ok(VERIFY_WRITE, arg, size)) {
+			dev_err(rtd->dev,
+				"%s: write verify failed size %d\n",
+				__func__, size);
+			err = -EFAULT;
+		}
+		if (!err && (copy_to_user(arg, user, size))) {
+			dev_err(rtd->dev,
+				"%s: failed to copy payload %d",
+				__func__, size);
+			err = -EFAULT;
+		}
+		kfree(user);
+		if (err)
+			dev_err(rtd->dev,
+				"%s: lsmevent failed %d", __func__, err);
+		goto done;
+	}
+
+	case SNDRV_LSM_EVENT_STATUS_V3: {
+		struct snd_lsm_event_status_v3 *user = NULL;
+		struct snd_lsm_event_status_v3 userarg;
+
+		dev_dbg(rtd->dev,
+			"%s: SNDRV_LSM_EVENT_STATUS_V3\n", __func__);
+		if (!arg) {
+			dev_err(rtd->dev,
+				"%s: Invalid params event_status_v3\n",
+				__func__);
+			err = -EINVAL;
+			goto done;
+		}
+		if (copy_from_user(&userarg, arg, sizeof(userarg))) {
+			dev_err(rtd->dev,
+				"%s: err copyuser event_status_v3\n",
+				__func__);
+			err = -EFAULT;
+			goto done;
+		}
+
+		if (userarg.payload_size >
+		    LISTEN_MAX_STATUS_PAYLOAD_SIZE) {
+			pr_err("%s: payload_size %d is invalid, max allowed = %d\n",
+				__func__, userarg.payload_size,
+				LISTEN_MAX_STATUS_PAYLOAD_SIZE);
+			err = -EINVAL;
+			goto done;
+		}
+
+		size = sizeof(struct snd_lsm_event_status_v3) +
+			userarg.payload_size;
+		user = kzalloc(size, GFP_KERNEL);
+		if (!user) {
+			dev_err(rtd->dev,
+				"%s: Allocation failed event status size %d\n",
+				__func__, size);
+			err = -EFAULT;
+			goto done;
+		}
+		user->payload_size = userarg.payload_size;
+		err = msm_lsm_ioctl_shared(substream, cmd, user);
+
+		/* Update size with actual payload size */
+		size = sizeof(*user) + user->payload_size;
+		if (!err && !access_ok(VERIFY_WRITE, arg, size)) {
+			dev_err(rtd->dev,
+				"%s: write verify failed size %d\n",
+				__func__, size);
+			err = -EFAULT;
+		}
+		if (!err && (copy_to_user(arg, user, size))) {
+			dev_err(rtd->dev,
+				"%s: failed to copy payload %d",
+				__func__, size);
+			err = -EFAULT;
+		}
+		kfree(user);
+		if (err)
+			dev_err(rtd->dev,
+				"%s: lsm_event_v3 failed %d", __func__, err);
+		break;
+	}
+
+	default:
+		err = msm_lsm_ioctl_shared(substream, cmd, arg);
+	break;
+	}
+done:
+	mutex_unlock(&prtd->lsm_api_lock);
+	return err;
+}
+
+static int msm_lsm_open(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct lsm_priv *prtd;
+	int ret = 0;
+
+	pr_debug("%s\n", __func__);
+	prtd = kzalloc(sizeof(struct lsm_priv), GFP_KERNEL);
+	if (!prtd) {
+		pr_err("%s: Failed to allocate memory for lsm_priv\n",
+		       __func__);
+		return -ENOMEM;
+	}
+	mutex_init(&prtd->lsm_api_lock);
+	spin_lock_init(&prtd->event_lock);
+	init_waitqueue_head(&prtd->event_wait);
+	init_waitqueue_head(&prtd->period_wait);
+	prtd->substream = substream;
+	runtime->private_data = prtd;
+	runtime->hw = msm_pcm_hardware_capture;
+
+	ret = snd_pcm_hw_constraint_list(runtime, 0,
+				SNDRV_PCM_HW_PARAM_RATE,
+				&constraints_sample_rates);
+	if (ret < 0)
+		pr_info("%s: snd_pcm_hw_constraint_list failed ret %d\n",
+			 __func__, ret);
+	/* Ensure that buffer size is a multiple of period size */
+	ret = snd_pcm_hw_constraint_integer(runtime,
+			    SNDRV_PCM_HW_PARAM_PERIODS);
+	if (ret < 0)
+		pr_info("%s: snd_pcm_hw_constraint_integer failed ret %d\n",
+			__func__, ret);
+
+	ret = snd_pcm_hw_constraint_minmax(runtime,
+		SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
+		CAPTURE_MIN_NUM_PERIODS * CAPTURE_MIN_PERIOD_SIZE,
+		CAPTURE_MAX_NUM_PERIODS * CAPTURE_MAX_PERIOD_SIZE);
+	if (ret < 0)
+		pr_info("%s: constraint for buffer bytes min max ret = %d\n",
+			__func__, ret);
+	ret = snd_pcm_hw_constraint_step(runtime, 0,
+		SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 32);
+	if (ret < 0) {
+		pr_info("%s: constraint for period bytes step ret = %d\n",
+			__func__, ret);
+	}
+	ret = snd_pcm_hw_constraint_step(runtime, 0,
+		SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 32);
+	if (ret < 0)
+		pr_info("%s: constraint for buffer bytes step ret = %d\n",
+			__func__, ret);
+	prtd->lsm_client = q6lsm_client_alloc(
+				(lsm_app_cb)lsm_event_handler, prtd);
+	if (!prtd->lsm_client) {
+		pr_err("%s: Could not allocate memory\n", __func__);
+		kfree(prtd);
+		runtime->private_data = NULL;
+		return -ENOMEM;
+	}
+	prtd->lsm_client->opened = false;
+	prtd->lsm_client->session_state = IDLE;
+	prtd->lsm_client->poll_enable = true;
+	prtd->lsm_client->perf_mode = 0;
+	prtd->lsm_client->event_mode = LSM_EVENT_NON_TIME_STAMP_MODE;
+
+	return 0;
+}
+
+static int msm_lsm_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct lsm_priv *prtd = runtime->private_data;
+	struct snd_soc_pcm_runtime *rtd;
+	int ret = 0;
+
+	if (!substream->private_data) {
+		pr_err("%s: Invalid private_data", __func__);
+		return -EINVAL;
+	}
+
+	rtd = prtd->substream->private_data;
+
+	if (!prtd->lsm_client) {
+		dev_err(rtd->dev,
+			"%s: LSM client data ptr is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	if (q6lsm_set_media_fmt_params(prtd->lsm_client))
+		dev_dbg(rtd->dev,
+			"%s: failed to set lsm media fmt params\n", __func__);
+
+	if (prtd->lsm_client->session_state == IDLE) {
+		ret = msm_pcm_routing_reg_phy_compr_stream(
+				rtd->dai_link->be_id,
+				prtd->lsm_client->perf_mode,
+				prtd->lsm_client->session,
+				SNDRV_PCM_STREAM_CAPTURE,
+				LISTEN);
+		if (ret) {
+			dev_err(rtd->dev,
+				"%s: register phy compr stream failed %d\n",
+					__func__, ret);
+			return ret;
+		}
+	}
+
+	prtd->lsm_client->session_state = RUNNING;
+	prtd->lsm_client->started = false;
+	runtime->private_data = prtd;
+	return ret;
+}
+
+static int msm_lsm_close(struct snd_pcm_substream *substream)
+{
+	unsigned long flags;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct lsm_priv *prtd = runtime->private_data;
+	struct snd_soc_pcm_runtime *rtd;
+	int ret = 0;
+
+	if (!substream->private_data) {
+		pr_err("%s: Invalid private_data", __func__);
+		return -EINVAL;
+	}
+	if (!prtd || !prtd->lsm_client) {
+		pr_err("%s: No LSM session active\n", __func__);
+		return -EINVAL;
+	}
+	rtd = substream->private_data;
+
+	dev_dbg(rtd->dev, "%s\n", __func__);
+	if (prtd->lsm_client->started) {
+		ret = q6lsm_stop(prtd->lsm_client, true);
+		if (ret)
+			dev_err(rtd->dev,
+				"%s: session stop failed, err = %d\n",
+				__func__, ret);
+		else
+			dev_dbg(rtd->dev,
+				"%s: LSM client session stopped %d\n",
+				 __func__, ret);
+
+		/*
+		 * Go Ahead and try de-register sound model,
+		 * even if stop failed
+		 */
+		prtd->lsm_client->started = false;
+
+		ret = q6lsm_deregister_sound_model(prtd->lsm_client);
+		if (ret)
+			dev_err(rtd->dev,
+				"%s: dereg_snd_model failed, err = %d\n",
+				__func__, ret);
+		else
+			dev_dbg(rtd->dev, "%s: dereg_snd_model succesful\n",
+				 __func__);
+	}
+
+	msm_pcm_routing_dereg_phy_stream(rtd->dai_link->be_id,
+					SNDRV_PCM_STREAM_CAPTURE);
+
+	if (prtd->lsm_client->opened) {
+		q6lsm_close(prtd->lsm_client);
+		prtd->lsm_client->opened = false;
+	}
+	q6lsm_client_free(prtd->lsm_client);
+
+	spin_lock_irqsave(&prtd->event_lock, flags);
+	kfree(prtd->event_status);
+	prtd->event_status = NULL;
+	spin_unlock_irqrestore(&prtd->event_lock, flags);
+	mutex_destroy(&prtd->lsm_api_lock);
+	kfree(prtd);
+	runtime->private_data = NULL;
+
+	return 0;
+}
+
+static int msm_lsm_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct lsm_priv *prtd = runtime->private_data;
+	struct lsm_hw_params *hw_params = NULL;
+	struct snd_soc_pcm_runtime *rtd;
+
+	if (!substream->private_data) {
+		pr_err("%s: Invalid private_data", __func__);
+		return -EINVAL;
+	}
+	rtd = substream->private_data;
+
+	if (!prtd || !params) {
+		dev_err(rtd->dev,
+			"%s: invalid params prtd %pK params %pK",
+			 __func__, prtd, params);
+		return -EINVAL;
+	}
+	hw_params = &prtd->lsm_client->hw_params;
+	hw_params->num_chs = params_channels(params);
+	hw_params->period_count = params_periods(params);
+	hw_params->sample_rate = params_rate(params);
+	if (((hw_params->sample_rate != 16000) &&
+		(hw_params->sample_rate != 48000)) ||
+		(hw_params->period_count == 0)) {
+		dev_err(rtd->dev,
+			"%s: Invalid Params sample rate %d period count %d\n",
+			__func__, hw_params->sample_rate,
+			hw_params->period_count);
+		return -EINVAL;
+	}
+
+	if (params_format(params) == SNDRV_PCM_FORMAT_S16_LE) {
+		hw_params->sample_size = 16;
+	} else if (params_format(params) == SNDRV_PCM_FORMAT_S24_LE) {
+		hw_params->sample_size = 24;
+	} else {
+		dev_err(rtd->dev, "%s: Invalid Format 0x%x\n",
+			__func__, params_format(params));
+		return -EINVAL;
+	}
+
+	hw_params->buf_sz = params_buffer_bytes(params) /
+			hw_params->period_count;
+	dev_dbg(rtd->dev,
+		"%s: channels %d sample rate %d sample size %d buffer size %d period count %d\n",
+		__func__, hw_params->num_chs, hw_params->sample_rate,
+		hw_params->sample_size, hw_params->buf_sz,
+		hw_params->period_count);
+	return 0;
+}
+
+static snd_pcm_uframes_t msm_lsm_pcm_pointer(
+	struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct lsm_priv *prtd = runtime->private_data;
+	struct snd_soc_pcm_runtime *rtd;
+
+	if (!substream->private_data) {
+		pr_err("%s: Invalid private_data", __func__);
+		return -EINVAL;
+	}
+	rtd = substream->private_data;
+
+	if (!prtd) {
+		dev_err(rtd->dev,
+			"%s: Invalid param %pK\n", __func__, prtd);
+		return 0;
+	}
+
+	if (prtd->dma_write >= snd_pcm_lib_buffer_bytes(substream))
+		prtd->dma_write = 0;
+	dev_dbg(rtd->dev,
+		"%s: dma post = %d\n", __func__, prtd->dma_write);
+	return bytes_to_frames(runtime, prtd->dma_write);
+}
+
+static int msm_lsm_pcm_copy(struct snd_pcm_substream *substream, int ch,
+	snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct lsm_priv *prtd = runtime->private_data;
+	char *pcm_buf = NULL;
+	int fbytes = 0, rc = 0;
+	struct snd_soc_pcm_runtime *rtd;
+
+	if (!substream->private_data) {
+		pr_err("%s: Invalid private_data", __func__);
+		return -EINVAL;
+	}
+	rtd = substream->private_data;
+
+	if (!prtd) {
+		dev_err(rtd->dev,
+			"%s: Invalid param %pK\n", __func__, prtd);
+		return -EINVAL;
+	}
+
+	fbytes = frames_to_bytes(runtime, frames);
+	if (runtime->status->state == SNDRV_PCM_STATE_XRUN ||
+	    runtime->status->state == SNDRV_PCM_STATE_PREPARED) {
+		dev_err(rtd->dev,
+			"%s: runtime state incorrect %d", __func__,
+			runtime->status->state);
+		return 0;
+	}
+	rc = wait_event_timeout(prtd->period_wait,
+		(atomic_read(&prtd->buf_count) |
+		atomic_read(&prtd->read_abort)), (2 * HZ));
+	if (!rc) {
+		dev_err(rtd->dev,
+			"%s: timeout for read retry\n", __func__);
+		return -EAGAIN;
+	}
+	if (atomic_read(&prtd->read_abort)) {
+		dev_err(rtd->dev,
+			"%s: Read abort recieved\n", __func__);
+		return -EIO;
+	}
+	prtd->appl_cnt = prtd->appl_cnt %
+		prtd->lsm_client->hw_params.period_count;
+	pcm_buf = prtd->lsm_client->lab_buffer[prtd->appl_cnt].data;
+	dev_dbg(rtd->dev,
+		"%s: copy the pcm data size %d\n",
+		__func__, fbytes);
+	if (pcm_buf) {
+		if (copy_to_user(buf, pcm_buf, fbytes)) {
+			dev_err(rtd->dev,
+				"%s: failed to copy bytes %d\n",
+				__func__, fbytes);
+			return -EINVAL;
+		}
+	} else {
+		dev_err(rtd->dev,
+			"%s: Invalid pcm buffer\n", __func__);
+		return -EINVAL;
+	}
+	prtd->appl_cnt = (prtd->appl_cnt + 1) %
+		prtd->lsm_client->hw_params.period_count;
+	atomic_dec(&prtd->buf_count);
+	return 0;
+}
+
+static int msm_lsm_app_type_cfg_ctl_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_TX;
+	int be_id = ucontrol->value.integer.value[3];
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0};
+	int ret = 0;
+
+	cfg_data.app_type = ucontrol->value.integer.value[0];
+	cfg_data.acdb_dev_id = ucontrol->value.integer.value[1];
+	cfg_data.sample_rate = ucontrol->value.integer.value[2];
+
+	pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
+		__func__, fe_id, session_type, be_id,
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
+	ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+						      be_id, &cfg_data);
+	if (ret < 0)
+		pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
+			__func__, ret);
+
+	return 0;
+}
+
+static int msm_lsm_app_type_cfg_ctl_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_TX;
+	int be_id = 0;
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0};
+	int ret = 0;
+
+	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
+						      &be_id, &cfg_data);
+	if (ret < 0) {
+		pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
+			__func__, ret);
+		goto done;
+	}
+
+	ucontrol->value.integer.value[0] = cfg_data.app_type;
+	ucontrol->value.integer.value[1] = cfg_data.acdb_dev_id;
+	ucontrol->value.integer.value[2] = cfg_data.sample_rate;
+	ucontrol->value.integer.value[3] = be_id;
+	pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+		__func__, fe_id, session_type, be_id,
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
+done:
+	return ret;
+}
+
+static int msm_lsm_add_app_type_controls(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_pcm *pcm = rtd->pcm;
+	struct snd_pcm_usr *app_type_info;
+	struct snd_kcontrol *kctl;
+	const char *mixer_ctl_name	= "Listen Stream";
+	const char *deviceNo		= "NN";
+	const char *suffix		= "App Type Cfg";
+	int ctl_len, ret = 0;
+
+	ctl_len = strlen(mixer_ctl_name) + 1 +
+			strlen(deviceNo) + 1 + strlen(suffix) + 1;
+	pr_debug("%s: Listen app type cntrl add\n", __func__);
+	ret = snd_pcm_add_usr_ctls(pcm, SNDRV_PCM_STREAM_CAPTURE,
+				NULL, 1, ctl_len, rtd->dai_link->be_id,
+				&app_type_info);
+	if (ret < 0) {
+		pr_err("%s: Listen app type cntrl add failed: %d\n",
+			__func__, ret);
+		return ret;
+	}
+	kctl = app_type_info->kctl;
+	snprintf(kctl->id.name, ctl_len, "%s %d %s",
+		mixer_ctl_name, rtd->pcm->device, suffix);
+	kctl->put = msm_lsm_app_type_cfg_ctl_put;
+	kctl->get = msm_lsm_app_type_cfg_ctl_get;
+	return 0;
+}
+
+static int msm_lsm_add_controls(struct snd_soc_pcm_runtime *rtd)
+{
+	int ret = 0;
+
+	ret = msm_lsm_add_app_type_controls(rtd);
+	if (ret)
+		pr_err("%s, add  app type controls failed:%d\n", __func__, ret);
+
+	return ret;
+}
+
+static struct snd_pcm_ops msm_lsm_ops = {
+	.open           = msm_lsm_open,
+	.close          = msm_lsm_close,
+	.ioctl          = msm_lsm_ioctl,
+	.prepare	= msm_lsm_prepare,
+	.compat_ioctl   = msm_lsm_ioctl_compat,
+	.hw_params      = msm_lsm_hw_params,
+	.copy           = msm_lsm_pcm_copy,
+	.pointer        = msm_lsm_pcm_pointer,
+};
+
+static int msm_asoc_lsm_new(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_card *card = rtd->card->snd_card;
+	int ret = 0;
+
+	if (!card->dev->coherent_dma_mask)
+		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+
+	ret = msm_lsm_add_controls(rtd);
+	if (ret)
+		pr_err("%s, kctl add failed:%d\n", __func__, ret);
+
+	return ret;
+}
+
+static int msm_asoc_lsm_probe(struct snd_soc_platform *platform)
+{
+	pr_debug("enter %s\n", __func__);
+
+	return 0;
+}
+
+static struct snd_soc_platform_driver msm_soc_platform = {
+	.ops		= &msm_lsm_ops,
+	.pcm_new	= msm_asoc_lsm_new,
+	.probe		= msm_asoc_lsm_probe,
+};
+
+static int msm_lsm_probe(struct platform_device *pdev)
+{
+
+	return snd_soc_register_platform(&pdev->dev, &msm_soc_platform);
+}
+
+static int msm_lsm_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_platform(&pdev->dev);
+
+	return 0;
+}
+
+static const struct of_device_id msm_lsm_client_dt_match[] = {
+	{.compatible = "qcom,msm-lsm-client" },
+	{ }
+};
+
+static struct platform_driver msm_lsm_driver = {
+	.driver = {
+		.name = "msm-lsm-client",
+		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(msm_lsm_client_dt_match),
+	},
+	.probe = msm_lsm_probe,
+	.remove = msm_lsm_remove,
+};
+
+static int __init msm_soc_platform_init(void)
+{
+	return platform_driver_register(&msm_lsm_driver);
+}
+module_init(msm_soc_platform_init);
+
+static void __exit msm_soc_platform_exit(void)
+{
+	platform_driver_unregister(&msm_lsm_driver);
+}
+module_exit(msm_soc_platform_exit);
+
+MODULE_DESCRIPTION("LSM client platform driver");
+MODULE_DEVICE_TABLE(of, msm_lsm_client_dt_match);
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-pcm-afe-v2.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-pcm-afe-v2.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c	2019-01-22 16:16:29.627301863 +0100
@@ -0,0 +1,924 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 and
+* only version 2 as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*/
+
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/time.h>
+#include <linux/wait.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/msm_audio_ion.h>
+
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/initval.h>
+#include <sound/control.h>
+#include <sound/q6adm-v2.h>
+#include <asm/dma.h>
+#include "msm-pcm-afe-v2.h"
+
+#define MIN_PLAYBACK_PERIOD_SIZE (128 * 2)
+#define MAX_PLAYBACK_PERIOD_SIZE (128 * 2 * 2 * 6)
+#define MIN_PLAYBACK_NUM_PERIODS (4)
+#define MAX_PLAYBACK_NUM_PERIODS (384)
+
+#define MIN_CAPTURE_PERIOD_SIZE (128 * 2)
+#define MAX_CAPTURE_PERIOD_SIZE (192 * 2 * 2 * 8 * 4)
+#define MIN_CAPTURE_NUM_PERIODS (4)
+#define MAX_CAPTURE_NUM_PERIODS (384)
+
+static struct snd_pcm_hardware msm_afe_hardware_playback = {
+	.info =                 (SNDRV_PCM_INFO_MMAP |
+				SNDRV_PCM_INFO_BLOCK_TRANSFER |
+				SNDRV_PCM_INFO_MMAP_VALID |
+				SNDRV_PCM_INFO_INTERLEAVED),
+	.formats =              SNDRV_PCM_FMTBIT_S16_LE|
+				SNDRV_PCM_FMTBIT_S24_LE,
+	.rates =                (SNDRV_PCM_RATE_8000 |
+				SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_48000),
+	.rate_min =             8000,
+	.rate_max =             48000,
+	.channels_min =         1,
+	.channels_max =         6,
+	.buffer_bytes_max =     MAX_PLAYBACK_PERIOD_SIZE *
+				MAX_PLAYBACK_NUM_PERIODS,
+	.period_bytes_min =     MIN_PLAYBACK_PERIOD_SIZE,
+	.period_bytes_max =     MAX_PLAYBACK_PERIOD_SIZE,
+	.periods_min =          MIN_PLAYBACK_NUM_PERIODS,
+	.periods_max =          MAX_PLAYBACK_NUM_PERIODS,
+	.fifo_size =            0,
+};
+
+static struct snd_pcm_hardware msm_afe_hardware_capture = {
+	.info =                 (SNDRV_PCM_INFO_MMAP |
+				SNDRV_PCM_INFO_BLOCK_TRANSFER |
+				SNDRV_PCM_INFO_MMAP_VALID |
+				SNDRV_PCM_INFO_INTERLEAVED),
+	.formats =              SNDRV_PCM_FMTBIT_S16_LE|
+				SNDRV_PCM_FMTBIT_S24_LE,
+	.rates =                (SNDRV_PCM_RATE_8000 |
+				SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_48000),
+	.rate_min =             8000,
+	.rate_max =             48000,
+	.channels_min =         1,
+	.channels_max =         6,
+	.buffer_bytes_max =     MAX_CAPTURE_PERIOD_SIZE *
+				MAX_CAPTURE_NUM_PERIODS,
+	.period_bytes_min =     MIN_CAPTURE_PERIOD_SIZE,
+	.period_bytes_max =     MAX_CAPTURE_PERIOD_SIZE,
+	.periods_min =          MIN_CAPTURE_NUM_PERIODS,
+	.periods_max =          MAX_CAPTURE_NUM_PERIODS,
+	.fifo_size =            0,
+};
+
+
+static enum hrtimer_restart afe_hrtimer_callback(struct hrtimer *hrt);
+static enum hrtimer_restart afe_hrtimer_rec_callback(struct hrtimer *hrt);
+
+static enum hrtimer_restart afe_hrtimer_callback(struct hrtimer *hrt)
+{
+	struct pcm_afe_info *prtd =
+		container_of(hrt, struct pcm_afe_info, hrt);
+	struct snd_pcm_substream *substream = prtd->substream;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	u32 mem_map_handle = 0;
+
+	mem_map_handle = afe_req_mmap_handle(prtd->audio_client);
+	if (!mem_map_handle)
+		pr_err("%s: mem_map_handle is NULL\n", __func__);
+
+	if (prtd->start) {
+		pr_debug("sending frame to DSP: poll_time: %d\n",
+				prtd->poll_time);
+		if (prtd->dsp_cnt == runtime->periods)
+			prtd->dsp_cnt = 0;
+		pr_debug("%s: mem_map_handle 0x%x\n", __func__, mem_map_handle);
+		afe_rt_proxy_port_write(
+		(prtd->dma_addr +
+		(prtd->dsp_cnt *
+		snd_pcm_lib_period_bytes(prtd->substream))), mem_map_handle,
+		snd_pcm_lib_period_bytes(prtd->substream));
+		prtd->dsp_cnt++;
+		hrtimer_forward_now(hrt, ns_to_ktime(prtd->poll_time
+					* 1000));
+
+		return HRTIMER_RESTART;
+	} else
+		return HRTIMER_NORESTART;
+}
+static enum hrtimer_restart afe_hrtimer_rec_callback(struct hrtimer *hrt)
+{
+	struct pcm_afe_info *prtd =
+		container_of(hrt, struct pcm_afe_info, hrt);
+	struct snd_pcm_substream *substream = prtd->substream;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	u32 mem_map_handle = 0;
+	int ret;
+
+	mem_map_handle = afe_req_mmap_handle(prtd->audio_client);
+	if (!mem_map_handle)
+		pr_err("%s: mem_map_handle is NULL\n", __func__);
+
+	if (prtd->start) {
+		if (prtd->dsp_cnt == runtime->periods)
+			prtd->dsp_cnt = 0;
+		pr_debug("%s: mem_map_handle 0x%x\n", __func__, mem_map_handle);
+		ret = afe_rt_proxy_port_read(
+		(prtd->dma_addr + (prtd->dsp_cnt
+		* snd_pcm_lib_period_bytes(prtd->substream))), mem_map_handle,
+		snd_pcm_lib_period_bytes(prtd->substream));
+		if (ret < 0) {
+			pr_err("%s: AFE port read fails: %d\n", __func__, ret);
+			prtd->start = 0;
+			return HRTIMER_NORESTART;
+		}
+		prtd->dsp_cnt++;
+		pr_debug("sending frame rec to DSP: poll_time: %d\n",
+				prtd->poll_time);
+		hrtimer_forward_now(hrt, ns_to_ktime(prtd->poll_time
+				* 1000));
+
+		return HRTIMER_RESTART;
+	} else
+		return HRTIMER_NORESTART;
+}
+static void pcm_afe_process_tx_pkt(uint32_t opcode,
+		uint32_t token, uint32_t *payload,
+		 void *priv)
+{
+	struct pcm_afe_info *prtd = priv;
+	unsigned long dsp_flags;
+	struct snd_pcm_substream *substream = NULL;
+	struct snd_pcm_runtime *runtime = NULL;
+	uint16_t event;
+	uint64_t period_bytes;
+	uint64_t bytes_one_sec;
+
+	if (prtd == NULL)
+		return;
+	substream =  prtd->substream;
+	runtime = substream->runtime;
+	pr_debug("%s\n", __func__);
+	spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
+	switch (opcode) {
+	case AFE_EVENT_RT_PROXY_PORT_STATUS: {
+		event = (uint16_t)((0xFFFF0000 & payload[0]) >> 0x10);
+			switch (event) {
+			case AFE_EVENT_RTPORT_START: {
+				prtd->dsp_cnt = 0;
+				/* Calculate poll time.
+				 * Split steps to avoid overflow.
+				 * Poll time-time corresponding to one period
+				 * in bytes.
+				 * (Samplerate * channelcount * format) =
+				 * bytes in 1 sec.
+				 * Poll time =
+				 *	(period bytes / bytes in one sec) *
+				 *	 1000000 micro seconds.
+				 * Multiplication by 1000000 is done in two
+				 * steps to keep the accuracy of poll time.
+				 */
+				if (prtd->mmap_flag) {
+					period_bytes = ((uint64_t)(
+						(snd_pcm_lib_period_bytes(
+							prtd->substream)) *
+							1000));
+					bytes_one_sec = (runtime->rate
+						* runtime->channels * 2);
+					bytes_one_sec =
+						div_u64(bytes_one_sec, 1000);
+					prtd->poll_time =
+						div_u64(period_bytes,
+						bytes_one_sec);
+					pr_debug("prtd->poll_time: %d",
+							prtd->poll_time);
+				}
+				break;
+			}
+			case AFE_EVENT_RTPORT_STOP:
+				pr_debug("%s: event!=0\n", __func__);
+				prtd->start = 0;
+				snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
+				break;
+			case AFE_EVENT_RTPORT_LOW_WM:
+				pr_debug("%s: Underrun\n", __func__);
+				break;
+			case AFE_EVENT_RTPORT_HI_WM:
+				pr_debug("%s: Overrun\n", __func__);
+				break;
+			default:
+				break;
+			}
+			break;
+	}
+	case APR_BASIC_RSP_RESULT: {
+		switch (payload[0]) {
+		case AFE_PORT_DATA_CMD_RT_PROXY_PORT_WRITE_V2:
+			pr_debug("write done\n");
+			prtd->pcm_irq_pos += snd_pcm_lib_period_bytes
+							(prtd->substream);
+			snd_pcm_period_elapsed(prtd->substream);
+			break;
+		default:
+			break;
+		}
+		break;
+	}
+	case RESET_EVENTS:
+		prtd->pcm_irq_pos += snd_pcm_lib_period_bytes
+						(prtd->substream);
+		prtd->reset_event = true;
+		snd_pcm_period_elapsed(prtd->substream);
+		break;
+	default:
+		break;
+	}
+	spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
+}
+
+static void pcm_afe_process_rx_pkt(uint32_t opcode,
+		uint32_t token, uint32_t *payload,
+		 void *priv)
+{
+	struct pcm_afe_info *prtd = priv;
+	unsigned long dsp_flags;
+	struct snd_pcm_substream *substream = NULL;
+	struct snd_pcm_runtime *runtime = NULL;
+	uint16_t event;
+	uint64_t period_bytes;
+	uint64_t bytes_one_sec;
+	uint32_t mem_map_handle = 0;
+
+	if (prtd == NULL)
+		return;
+	substream =  prtd->substream;
+	runtime = substream->runtime;
+	pr_debug("%s\n", __func__);
+	spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
+	switch (opcode) {
+	case AFE_EVENT_RT_PROXY_PORT_STATUS: {
+		event = (uint16_t)((0xFFFF0000 & payload[0]) >> 0x10);
+		switch (event) {
+		case AFE_EVENT_RTPORT_START: {
+			prtd->dsp_cnt = 0;
+			/* Calculate poll time. Split steps to avoid overflow.
+			 * Poll time-time corresponding to one period in bytes.
+			 * (Samplerate * channelcount * format)=bytes in 1 sec.
+			 * Poll time =  (period bytes / bytes in one sec) *
+			 * 1000000 micro seconds.
+			 * Multiplication by 1000000 is done in two steps to
+			 * keep the accuracy of poll time.
+			 */
+			if (prtd->mmap_flag) {
+				period_bytes = ((uint64_t)(
+					(snd_pcm_lib_period_bytes(
+					prtd->substream)) * 1000));
+				bytes_one_sec = (runtime->rate *
+						runtime->channels * 2);
+				bytes_one_sec = div_u64(bytes_one_sec , 1000);
+				prtd->poll_time =
+					div_u64(period_bytes, bytes_one_sec);
+				pr_debug("prtd->poll_time : %d\n",
+					prtd->poll_time);
+			} else {
+				mem_map_handle =
+					afe_req_mmap_handle(prtd->audio_client);
+				if (!mem_map_handle)
+					pr_err("%s:mem_map_handle is NULL\n",
+							 __func__);
+				/* Do initial read to start transfer */
+				afe_rt_proxy_port_read((prtd->dma_addr +
+					(prtd->dsp_cnt *
+					snd_pcm_lib_period_bytes(
+						prtd->substream))),
+					mem_map_handle,
+					snd_pcm_lib_period_bytes(
+						prtd->substream));
+				prtd->dsp_cnt++;
+			}
+			break;
+		}
+		case AFE_EVENT_RTPORT_STOP:
+			pr_debug("%s: event!=0\n", __func__);
+			prtd->start = 0;
+			snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
+			break;
+		case AFE_EVENT_RTPORT_LOW_WM:
+			pr_debug("%s: Underrun\n", __func__);
+			break;
+		case AFE_EVENT_RTPORT_HI_WM:
+			pr_debug("%s: Overrun\n", __func__);
+			break;
+		default:
+			break;
+		}
+		break;
+	}
+	case APR_BASIC_RSP_RESULT: {
+		switch (payload[0]) {
+		case AFE_PORT_DATA_CMD_RT_PROXY_PORT_READ_V2:
+			pr_debug("%s :Read done\n", __func__);
+			prtd->pcm_irq_pos += snd_pcm_lib_period_bytes
+							(prtd->substream);
+			if (!prtd->mmap_flag) {
+				atomic_set(&prtd->rec_bytes_avail, 1);
+				wake_up(&prtd->read_wait);
+			}
+			snd_pcm_period_elapsed(prtd->substream);
+			break;
+		default:
+			break;
+		}
+		break;
+	}
+	case RESET_EVENTS:
+		prtd->pcm_irq_pos += snd_pcm_lib_period_bytes
+							(prtd->substream);
+		prtd->reset_event = true;
+		if (!prtd->mmap_flag) {
+			atomic_set(&prtd->rec_bytes_avail, 1);
+			wake_up(&prtd->read_wait);
+		}
+		snd_pcm_period_elapsed(prtd->substream);
+		break;
+	default:
+		break;
+	}
+	spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
+}
+
+static int msm_afe_playback_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct pcm_afe_info *prtd = runtime->private_data;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *dai = rtd->cpu_dai;
+	int ret = 0;
+
+	pr_debug("%s: sample_rate=%d\n", __func__, runtime->rate);
+
+	pr_debug("%s: dai->id =%x\n", __func__, dai->id);
+	ret = afe_register_get_events(dai->id,
+			pcm_afe_process_tx_pkt, prtd);
+	if (ret < 0) {
+		pr_err("afe-pcm:register for events failed\n");
+		return ret;
+	}
+	pr_debug("%s:success\n", __func__);
+	prtd->prepared++;
+	return ret;
+}
+
+static int msm_afe_capture_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct pcm_afe_info *prtd = runtime->private_data;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *dai = rtd->cpu_dai;
+	int ret = 0;
+
+	pr_debug("%s\n", __func__);
+
+	pr_debug("%s: dai->id =%x\n", __func__, dai->id);
+	ret = afe_register_get_events(dai->id,
+			pcm_afe_process_rx_pkt, prtd);
+	if (ret < 0) {
+		pr_err("afe-pcm:register for events failed\n");
+		return ret;
+	}
+	pr_debug("%s:success\n", __func__);
+	prtd->prepared++;
+	return 0;
+}
+
+/* Conventional and unconventional sample rate supported */
+static unsigned int supported_sample_rates[] = {
+	8000, 16000, 48000
+};
+
+static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
+	.count = ARRAY_SIZE(supported_sample_rates),
+	.list = supported_sample_rates,
+	.mask = 0,
+};
+
+static int msm_afe_open(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct pcm_afe_info *prtd = NULL;
+	int ret = 0;
+
+	prtd = kzalloc(sizeof(struct pcm_afe_info), GFP_KERNEL);
+	if (prtd == NULL) {
+		pr_err("Failed to allocate memory for msm_audio\n");
+		return -ENOMEM;
+	} else
+		pr_debug("prtd %pK\n", prtd);
+
+	mutex_init(&prtd->lock);
+	spin_lock_init(&prtd->dsp_lock);
+	prtd->dsp_cnt = 0;
+
+	mutex_lock(&prtd->lock);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		runtime->hw = msm_afe_hardware_playback;
+	else
+		runtime->hw = msm_afe_hardware_capture;
+
+	prtd->substream = substream;
+	runtime->private_data = prtd;
+	prtd->audio_client = q6afe_audio_client_alloc(prtd);
+	if (!prtd->audio_client) {
+		pr_debug("%s: Could not allocate memory\n", __func__);
+		mutex_unlock(&prtd->lock);
+		kfree(prtd);
+		return -ENOMEM;
+	}
+
+	atomic_set(&prtd->rec_bytes_avail, 0);
+	init_waitqueue_head(&prtd->read_wait);
+
+	hrtimer_init(&prtd->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		prtd->hrt.function = afe_hrtimer_callback;
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		prtd->hrt.function = afe_hrtimer_rec_callback;
+
+	mutex_unlock(&prtd->lock);
+	ret = snd_pcm_hw_constraint_list(runtime, 0,
+				SNDRV_PCM_HW_PARAM_RATE,
+				&constraints_sample_rates);
+	if (ret < 0)
+		pr_err("snd_pcm_hw_constraint_list failed\n");
+	/* Ensure that buffer size is a multiple of period size */
+	ret = snd_pcm_hw_constraint_integer(runtime,
+					    SNDRV_PCM_HW_PARAM_PERIODS);
+	if (ret < 0)
+		pr_err("snd_pcm_hw_constraint_integer failed\n");
+
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		ret = snd_pcm_hw_constraint_minmax(runtime,
+			SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
+			MIN_CAPTURE_NUM_PERIODS * MIN_CAPTURE_PERIOD_SIZE,
+			MAX_CAPTURE_NUM_PERIODS * MAX_CAPTURE_PERIOD_SIZE);
+
+		if (ret < 0) {
+			pr_err("constraint for buffer bytes min max ret = %d\n",
+			      ret);
+		}
+	}
+
+	prtd->reset_event = false;
+	return 0;
+}
+
+static int msm_afe_playback_copy(struct snd_pcm_substream *substream,
+				int channel, snd_pcm_uframes_t hwoff,
+				void __user *buf, snd_pcm_uframes_t frames)
+{
+	int ret = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct pcm_afe_info *prtd = runtime->private_data;
+	char *hwbuf = runtime->dma_area + frames_to_bytes(runtime, hwoff);
+	u32 mem_map_handle = 0;
+
+	pr_debug("%s : appl_ptr 0x%lx hw_ptr 0x%lx dest_to_copy 0x%pK\n",
+		__func__,
+		runtime->control->appl_ptr, runtime->status->hw_ptr, hwbuf);
+
+	if (copy_from_user(hwbuf, buf, frames_to_bytes(runtime, frames))) {
+		pr_err("%s :Failed to copy audio from user buffer\n",
+			__func__);
+
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	if (!prtd->mmap_flag) {
+		mem_map_handle = afe_req_mmap_handle(prtd->audio_client);
+		if (!mem_map_handle) {
+			pr_err("%s: mem_map_handle is NULL\n", __func__);
+			ret = -EFAULT;
+			goto fail;
+		}
+
+		pr_debug("%s : prtd-> dma_addr 0x%lx dsp_cnt %d\n", __func__,
+			prtd->dma_addr, prtd->dsp_cnt);
+
+		if (prtd->dsp_cnt == runtime->periods)
+			prtd->dsp_cnt = 0;
+
+		ret = afe_rt_proxy_port_write(
+				(prtd->dma_addr + (prtd->dsp_cnt *
+				snd_pcm_lib_period_bytes(prtd->substream))),
+				mem_map_handle,
+				snd_pcm_lib_period_bytes(prtd->substream));
+
+		if (ret) {
+			pr_err("%s: AFE proxy port write failed %d\n",
+				__func__, ret);
+			goto fail;
+		}
+		prtd->dsp_cnt++;
+	}
+fail:
+	return ret;
+}
+
+static int msm_afe_capture_copy(struct snd_pcm_substream *substream,
+				int channel, snd_pcm_uframes_t hwoff,
+				void __user *buf, snd_pcm_uframes_t frames)
+{
+	int ret = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct pcm_afe_info *prtd = runtime->private_data;
+	char *hwbuf = runtime->dma_area + frames_to_bytes(runtime, hwoff);
+	u32 mem_map_handle = 0;
+
+	if (!prtd->mmap_flag) {
+		mem_map_handle = afe_req_mmap_handle(prtd->audio_client);
+
+		if (!mem_map_handle) {
+			pr_err("%s: mem_map_handle is NULL\n", __func__);
+			ret = -EFAULT;
+			goto fail;
+		}
+
+		if (prtd->dsp_cnt == runtime->periods)
+			prtd->dsp_cnt = 0;
+
+		ret = afe_rt_proxy_port_read((prtd->dma_addr +
+				(prtd->dsp_cnt *
+				snd_pcm_lib_period_bytes(prtd->substream))),
+				mem_map_handle,
+				snd_pcm_lib_period_bytes(prtd->substream));
+
+		if (ret) {
+			pr_err("%s: AFE proxy port read failed %d\n",
+				__func__, ret);
+			goto fail;
+		}
+
+		prtd->dsp_cnt++;
+		ret = wait_event_timeout(prtd->read_wait,
+				atomic_read(&prtd->rec_bytes_avail), 5 * HZ);
+		if (ret < 0) {
+			pr_err("%s: wait_event_timeout failed\n", __func__);
+
+			ret = -ETIMEDOUT;
+			goto fail;
+		}
+		atomic_set(&prtd->rec_bytes_avail, 0);
+	}
+	pr_debug("%s:appl_ptr 0x%lx hw_ptr 0x%lx src_to_copy 0x%pK\n",
+			__func__, runtime->control->appl_ptr,
+			runtime->status->hw_ptr, hwbuf);
+
+	if (copy_to_user(buf, hwbuf, frames_to_bytes(runtime, frames))) {
+		pr_err("%s: copy to user failed\n", __func__);
+
+		goto fail;
+		ret = -EFAULT;
+	}
+
+fail:
+	return ret;
+}
+
+static int msm_afe_copy(struct snd_pcm_substream *substream, int channel,
+			snd_pcm_uframes_t hwoff, void __user *buf,
+			snd_pcm_uframes_t frames)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct pcm_afe_info *prtd = runtime->private_data;
+
+	int ret = 0;
+
+	if (prtd->reset_event) {
+		pr_debug("%s: reset events received from ADSP, return error\n",
+			__func__);
+		return -ENETRESET;
+	}
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		ret = msm_afe_playback_copy(substream, channel, hwoff,
+					buf, frames);
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		ret = msm_afe_capture_copy(substream, channel, hwoff,
+					buf, frames);
+	return ret;
+}
+
+static int msm_afe_close(struct snd_pcm_substream *substream)
+{
+	int rc = 0;
+	struct snd_dma_buffer *dma_buf;
+	struct snd_pcm_runtime *runtime;
+	struct pcm_afe_info *prtd;
+	struct snd_soc_pcm_runtime *rtd = NULL;
+	struct snd_soc_dai *dai = NULL;
+	int dir = IN;
+	int ret = 0;
+
+	pr_debug("%s\n", __func__);
+	if (substream == NULL) {
+		pr_err("substream is NULL\n");
+		return -EINVAL;
+	}
+	rtd = substream->private_data;
+	dai = rtd->cpu_dai;
+	runtime = substream->runtime;
+	prtd = runtime->private_data;
+
+	mutex_lock(&prtd->lock);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		dir = IN;
+		ret =  afe_unregister_get_events(dai->id);
+		if (ret < 0)
+			pr_err("AFE unregister for events failed\n");
+	} else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		dir = OUT;
+		ret =  afe_unregister_get_events(dai->id);
+		if (ret < 0)
+			pr_err("AFE unregister for events failed\n");
+	}
+	if (prtd->mmap_flag)
+		hrtimer_cancel(&prtd->hrt);
+
+	rc = afe_cmd_memory_unmap(afe_req_mmap_handle(prtd->audio_client));
+	if (rc < 0)
+		pr_err("AFE memory unmap failed\n");
+
+	pr_debug("release all buffer\n");
+	dma_buf = &substream->dma_buffer;
+	if (dma_buf == NULL) {
+		pr_debug("dma_buf is NULL\n");
+			goto done;
+	}
+
+	if (dma_buf->area)
+		dma_buf->area = NULL;
+	q6afe_audio_client_buf_free_contiguous(dir, prtd->audio_client);
+done:
+	pr_debug("%s: dai->id =%x\n", __func__, dai->id);
+	q6afe_audio_client_free(prtd->audio_client);
+	mutex_unlock(&prtd->lock);
+	prtd->prepared--;
+	kfree(prtd);
+	runtime->private_data = NULL;
+	return 0;
+}
+static int msm_afe_prepare(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct pcm_afe_info *prtd = runtime->private_data;
+
+	prtd->pcm_irq_pos = 0;
+	if (prtd->prepared)
+		return 0;
+	mutex_lock(&prtd->lock);
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		ret = msm_afe_playback_prepare(substream);
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		ret = msm_afe_capture_prepare(substream);
+	mutex_unlock(&prtd->lock);
+	return ret;
+}
+static int msm_afe_mmap(struct snd_pcm_substream *substream,
+				struct vm_area_struct *vma)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct pcm_afe_info *prtd = runtime->private_data;
+	struct afe_audio_client *ac = prtd->audio_client;
+	struct afe_audio_port_data *apd = ac->port;
+	struct afe_audio_buffer *ab;
+	int dir = -1;
+
+	pr_debug("%s\n", __func__);
+	prtd->mmap_flag = 1;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		dir = IN;
+	else
+		dir = OUT;
+	ab = &(apd[dir].buf[0]);
+
+	return msm_audio_ion_mmap((struct audio_buffer *)ab, vma);
+}
+static int msm_afe_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+	int ret = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct pcm_afe_info *prtd = runtime->private_data;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		pr_debug("%s: SNDRV_PCM_TRIGGER_START\n", __func__);
+		prtd->start = 1;
+		if (prtd->mmap_flag)
+			hrtimer_start(&prtd->hrt, ns_to_ktime(0),
+					HRTIMER_MODE_REL);
+		break;
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		pr_debug("%s: SNDRV_PCM_TRIGGER_STOP\n", __func__);
+		prtd->start = 0;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+static int msm_afe_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_dma_buffer *dma_buf = &substream->dma_buffer;
+	struct pcm_afe_info *prtd = runtime->private_data;
+	struct afe_audio_buffer *buf;
+	int dir, rc;
+
+	pr_debug("%s:\n", __func__);
+
+	mutex_lock(&prtd->lock);
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		dir = IN;
+	else
+		dir = OUT;
+
+	rc = q6afe_audio_client_buf_alloc_contiguous(dir,
+		prtd->audio_client,
+		(params_buffer_bytes(params) / params_periods(params)),
+		params_periods(params));
+	pr_debug("params_buffer_bytes(params) = %d\n",
+			(params_buffer_bytes(params)));
+	pr_debug("params_periods(params) = %d\n",
+			(params_periods(params)));
+	pr_debug("params_periodsize(params) = %d\n",
+		(params_buffer_bytes(params) / params_periods(params)));
+
+	if (rc < 0) {
+		pr_err("Audio Start: Buffer Allocation failed rc = %d\n", rc);
+		mutex_unlock(&prtd->lock);
+		return -ENOMEM;
+	}
+	buf = prtd->audio_client->port[dir].buf;
+
+	if (buf == NULL || buf[0].data == NULL) {
+		mutex_unlock(&prtd->lock);
+		return -ENOMEM;
+	}
+
+	pr_debug("%s:buf = %pK\n", __func__, buf);
+	dma_buf->dev.type = SNDRV_DMA_TYPE_DEV;
+	dma_buf->dev.dev = substream->pcm->card->dev;
+	dma_buf->private_data = NULL;
+	dma_buf->area = buf[0].data;
+	dma_buf->addr = buf[0].phys;
+
+	dma_buf->bytes = params_buffer_bytes(params);
+
+	if (!dma_buf->area) {
+		pr_err("%s:MSM AFE physical memory allocation failed\n",
+							__func__);
+		mutex_unlock(&prtd->lock);
+		return -ENOMEM;
+	}
+
+	memset(dma_buf->area, 0,  params_buffer_bytes(params));
+
+	prtd->dma_addr = (phys_addr_t) dma_buf->addr;
+
+	mutex_unlock(&prtd->lock);
+
+	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+
+	rc = afe_memory_map(dma_buf->addr, dma_buf->bytes, prtd->audio_client);
+	if (rc < 0)
+		pr_err("fail to map memory to DSP\n");
+
+	return rc;
+}
+static snd_pcm_uframes_t msm_afe_pointer(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct pcm_afe_info *prtd = runtime->private_data;
+
+	if (prtd->pcm_irq_pos >= snd_pcm_lib_buffer_bytes(substream))
+		prtd->pcm_irq_pos = 0;
+
+	if (prtd->reset_event) {
+		pr_debug("%s: reset events received from ADSP, return XRUN\n",
+			__func__);
+		return SNDRV_PCM_POS_XRUN;
+	}
+
+	pr_debug("pcm_irq_pos = %d\n", prtd->pcm_irq_pos);
+	return bytes_to_frames(runtime, (prtd->pcm_irq_pos));
+}
+
+static struct snd_pcm_ops msm_afe_ops = {
+	.open           = msm_afe_open,
+	.copy           = msm_afe_copy,
+	.hw_params	= msm_afe_hw_params,
+	.trigger	= msm_afe_trigger,
+	.close          = msm_afe_close,
+	.prepare        = msm_afe_prepare,
+	.mmap		= msm_afe_mmap,
+	.pointer	= msm_afe_pointer,
+};
+
+
+static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_card *card = rtd->card->snd_card;
+	int ret = 0;
+
+	pr_debug("%s\n", __func__);
+	if (!card->dev->coherent_dma_mask)
+		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	return ret;
+}
+
+static int msm_afe_afe_probe(struct snd_soc_platform *platform)
+{
+	pr_debug("%s\n", __func__);
+	return 0;
+}
+
+static struct snd_soc_platform_driver msm_soc_platform = {
+	.ops		= &msm_afe_ops,
+	.pcm_new	= msm_asoc_pcm_new,
+	.probe		= msm_afe_afe_probe,
+};
+
+static int msm_afe_probe(struct platform_device *pdev)
+{
+
+	pr_debug("%s: dev name %s\n", __func__, dev_name(&pdev->dev));
+	return snd_soc_register_platform(&pdev->dev,
+				   &msm_soc_platform);
+}
+
+static int msm_afe_remove(struct platform_device *pdev)
+{
+	pr_debug("%s\n", __func__);
+	snd_soc_unregister_platform(&pdev->dev);
+	return 0;
+}
+static const struct of_device_id msm_pcm_afe_dt_match[] = {
+	{.compatible = "qcom,msm-pcm-afe"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, msm_pcm_afe_dt_match);
+
+static struct platform_driver msm_afe_driver = {
+	.driver = {
+		.name = "msm-pcm-afe",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_pcm_afe_dt_match,
+	},
+	.probe = msm_afe_probe,
+	.remove = msm_afe_remove,
+};
+
+static int __init msm_soc_platform_init(void)
+{
+	pr_debug("%s\n", __func__);
+	return platform_driver_register(&msm_afe_driver);
+}
+module_init(msm_soc_platform_init);
+
+static void __exit msm_soc_platform_exit(void)
+{
+	pr_debug("%s\n", __func__);
+	platform_driver_unregister(&msm_afe_driver);
+}
+module_exit(msm_soc_platform_exit);
+
+MODULE_DESCRIPTION("AFE PCM module platform driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-pcm-afe-v2.h linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.h
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-pcm-afe-v2.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.h	2019-01-22 16:16:29.627301863 +0100
@@ -0,0 +1,49 @@
+/* Copyright (c) 2012,2015-2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MSM_PCM_AFE_H
+#define _MSM_PCM_AFE_H
+#include <sound/apr_audio-v2.h>
+#include <sound/q6afe-v2.h>
+
+
+struct pcm_afe_info {
+	unsigned long dma_addr;
+	struct snd_pcm_substream *substream;
+	unsigned int pcm_irq_pos;       /* IRQ position */
+	struct mutex lock;
+	spinlock_t dsp_lock;
+	uint32_t samp_rate;
+	uint32_t channel_mode;
+	uint8_t start;
+	uint32_t dsp_cnt;
+	uint32_t buf_phys;
+	int32_t mmap_flag;
+	int prepared;
+	struct hrtimer hrt;
+	int poll_time;
+	struct afe_audio_client *audio_client;
+	wait_queue_head_t read_wait;
+	atomic_t rec_bytes_avail;
+	bool reset_event;
+};
+
+
+#define MSM_EXT(xname, fp_info, fp_get, fp_put, addr) \
+	{.iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+	.name = xname, \
+	.info = fp_info,\
+	.get = fp_get, .put = fp_put, \
+	.private_value = addr, \
+	}
+
+#endif /*_MSM_PCM_AFE_H*/
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-pcm-dtmf-v2.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-pcm-dtmf-v2.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-pcm-dtmf-v2.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-pcm-dtmf-v2.c	2019-01-22 16:16:29.631301899 +0100
@@ -0,0 +1,596 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/wait.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/pcm.h>
+#include <sound/q6afe-v2.h>
+
+#include "msm-pcm-q6-v2.h"
+#include "msm-pcm-routing-v2.h"
+#include "q6voice.h"
+
+enum {
+	DTMF_IN_RX,
+	DTMF_IN_TX,
+};
+
+enum format {
+	FORMAT_S16_LE = 2
+};
+
+struct dtmf_det_info {
+	char     session[MAX_SESSION_NAME_LEN];
+	uint8_t  dir;
+	uint16_t high_freq;
+	uint16_t low_freq;
+};
+
+struct dtmf_buf_node {
+	struct list_head list;
+	struct dtmf_det_info dtmf_det_pkt;
+};
+
+enum dtmf_state {
+	DTMF_GEN_RX_STOPPED,
+	DTMF_GEN_RX_STARTED,
+};
+
+#define DTMF_MAX_Q_LEN 10
+#define DTMF_PKT_SIZE sizeof(struct dtmf_det_info)
+
+struct dtmf_drv_info {
+	enum  dtmf_state state;
+	struct snd_pcm_substream *capture_substream;
+
+	struct list_head out_queue;
+	struct list_head free_out_queue;
+
+	wait_queue_head_t out_wait;
+
+	struct mutex lock;
+	spinlock_t dsp_lock;
+
+	uint8_t capture_start;
+	uint8_t capture_instance;
+
+	unsigned int pcm_capture_size;
+	unsigned int pcm_capture_count;
+	unsigned int pcm_capture_irq_pos;
+	unsigned int pcm_capture_buf_pos;
+};
+
+static struct snd_pcm_hardware msm_pcm_hardware = {
+	.info =                 (SNDRV_PCM_INFO_MMAP |
+				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
+				 SNDRV_PCM_INFO_MMAP_VALID |
+				 SNDRV_PCM_INFO_INTERLEAVED),
+	.formats =              SNDRV_PCM_FMTBIT_S16_LE,
+	.channels_min =         1,
+	.channels_max =         1,
+	.buffer_bytes_max =	(sizeof(struct dtmf_buf_node) * DTMF_MAX_Q_LEN),
+	.period_bytes_min =	DTMF_PKT_SIZE,
+	.period_bytes_max =	DTMF_PKT_SIZE,
+	.periods_min =		DTMF_MAX_Q_LEN,
+	.periods_max =		DTMF_MAX_Q_LEN,
+	.fifo_size =            0,
+};
+
+static int msm_dtmf_rx_generate_put(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_value *ucontrol)
+{
+	uint16_t low_freq = ucontrol->value.integer.value[0];
+	uint16_t high_freq = ucontrol->value.integer.value[1];
+	int64_t duration = ucontrol->value.integer.value[2];
+	uint16_t gain = ucontrol->value.integer.value[3];
+
+	pr_debug("%s: low_freq=%d high_freq=%d duration=%d gain=%d\n",
+		 __func__, low_freq, high_freq, (int)duration, gain);
+	afe_dtmf_generate_rx(duration, high_freq, low_freq, gain);
+	return 0;
+}
+
+static int msm_dtmf_rx_generate_get(struct  snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_value *ucontrol)
+{
+	pr_debug("%s:\n", __func__);
+	ucontrol->value.integer.value[0] = 0;
+	return 0;
+}
+
+static int msm_dtmf_detect_voice_rx_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	int enable = ucontrol->value.integer.value[0];
+
+	pr_debug("%s: enable=%d\n", __func__, enable);
+	voc_enable_dtmf_rx_detection(voc_get_session_id(VOICE_SESSION_NAME),
+				     enable);
+
+	return 0;
+}
+
+static int msm_dtmf_detect_voice_rx_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = 0;
+	return 0;
+}
+
+static int msm_dtmf_detect_volte_rx_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	int enable = ucontrol->value.integer.value[0];
+
+	pr_debug("%s: enable=%d\n", __func__, enable);
+	voc_enable_dtmf_rx_detection(voc_get_session_id(VOLTE_SESSION_NAME),
+				     enable);
+
+	return 0;
+}
+
+static int msm_dtmf_detect_volte_rx_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = 0;
+	return 0;
+}
+
+static struct snd_kcontrol_new msm_dtmf_controls[] = {
+	SOC_SINGLE_MULTI_EXT("DTMF_Generate Rx Low High Duration Gain",
+			     SND_SOC_NOPM, 0, 5000, 0, 4,
+			     msm_dtmf_rx_generate_get,
+			     msm_dtmf_rx_generate_put),
+	SOC_SINGLE_EXT("DTMF_Detect Rx Voice enable", SND_SOC_NOPM, 0, 1, 0,
+				msm_dtmf_detect_voice_rx_get,
+				msm_dtmf_detect_voice_rx_put),
+	SOC_SINGLE_EXT("DTMF_Detect Rx VoLTE enable", SND_SOC_NOPM, 0, 1, 0,
+				msm_dtmf_detect_volte_rx_get,
+				msm_dtmf_detect_volte_rx_put),
+};
+
+static int msm_pcm_dtmf_probe(struct snd_soc_platform *platform)
+{
+	snd_soc_add_platform_controls(platform, msm_dtmf_controls,
+				      ARRAY_SIZE(msm_dtmf_controls));
+	return 0;
+}
+
+static void dtmf_rx_detected_cb(uint8_t *pkt,
+				char *session,
+				void *private_data)
+{
+	struct dtmf_buf_node *buf_node = NULL;
+	struct vss_istream_evt_rx_dtmf_detected *dtmf_det_pkt =
+		(struct vss_istream_evt_rx_dtmf_detected *)pkt;
+	struct dtmf_drv_info *prtd = private_data;
+	unsigned long dsp_flags;
+
+	pr_debug("%s\n", __func__);
+	if (prtd->capture_substream == NULL)
+		return;
+
+	/* Copy dtmf detected info into out_queue. */
+	spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
+	/* discarding dtmf detection info till start is received */
+	if (!list_empty(&prtd->free_out_queue) && prtd->capture_start) {
+		buf_node = list_first_entry(&prtd->free_out_queue,
+					    struct dtmf_buf_node, list);
+		list_del(&buf_node->list);
+		buf_node->dtmf_det_pkt.high_freq = dtmf_det_pkt->high_freq;
+		buf_node->dtmf_det_pkt.low_freq = dtmf_det_pkt->low_freq;
+		if (session != NULL)
+			strlcpy(buf_node->dtmf_det_pkt.session,
+				session, MAX_SESSION_NAME_LEN);
+
+		buf_node->dtmf_det_pkt.dir = DTMF_IN_RX;
+		pr_debug("high =%d, low=%d session=%s\n",
+			 buf_node->dtmf_det_pkt.high_freq,
+			 buf_node->dtmf_det_pkt.low_freq,
+			 buf_node->dtmf_det_pkt.session);
+		list_add_tail(&buf_node->list, &prtd->out_queue);
+		prtd->pcm_capture_irq_pos += prtd->pcm_capture_count;
+		spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
+		snd_pcm_period_elapsed(prtd->capture_substream);
+	} else {
+		spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
+		pr_err("DTMF detection pkt in Rx  dropped, no free node available\n");
+	}
+
+	wake_up(&prtd->out_wait);
+}
+
+static int msm_pcm_capture_copy(struct snd_pcm_substream *substream,
+				int channel, snd_pcm_uframes_t hwoff,
+				void __user *buf, snd_pcm_uframes_t frames)
+{
+	int ret = 0;
+	int count = 0;
+	struct dtmf_buf_node *buf_node = NULL;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct dtmf_drv_info *prtd = runtime->private_data;
+	unsigned long dsp_flags;
+
+	count = frames_to_bytes(runtime, frames);
+
+	ret = wait_event_interruptible_timeout(prtd->out_wait,
+				(!list_empty(&prtd->out_queue)),
+				1 * HZ);
+
+	if (ret > 0) {
+		if (count <= DTMF_PKT_SIZE) {
+			spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
+			buf_node = list_first_entry(&prtd->out_queue,
+					struct dtmf_buf_node, list);
+			list_del(&buf_node->list);
+			spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
+			ret = copy_to_user(buf,
+					   &buf_node->dtmf_det_pkt,
+					   count);
+			if (ret) {
+				pr_err("%s: Copy to user retuned %d\n",
+					__func__, ret);
+				ret = -EFAULT;
+			}
+			spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
+			list_add_tail(&buf_node->list,
+				      &prtd->free_out_queue);
+			spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
+
+		} else {
+			pr_err("%s: Read count %d > DTMF_PKT_SIZE\n",
+				__func__, count);
+			ret = -ENOMEM;
+		}
+	} else if (ret == 0) {
+		pr_err("%s: No UL data available\n", __func__);
+		ret = -ETIMEDOUT;
+	} else {
+		pr_err("%s: Read was interrupted\n", __func__);
+		ret = -ERESTARTSYS;
+	}
+	return ret;
+}
+
+static int msm_pcm_copy(struct snd_pcm_substream *substream, int a,
+	 snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames)
+{
+	int ret = 0;
+	pr_debug("%s() DTMF\n", __func__);
+
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		ret = msm_pcm_capture_copy(substream, a, hwoff, buf, frames);
+
+	return ret;
+}
+
+static int msm_pcm_open(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct dtmf_drv_info *prtd = NULL;
+	int ret = 0;
+
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		prtd = kzalloc(sizeof(struct dtmf_drv_info), GFP_KERNEL);
+
+		if (prtd == NULL) {
+			pr_err("Failed to allocate memory for msm_audio\n");
+			ret = -ENOMEM;
+			goto done;
+		}
+
+		mutex_init(&prtd->lock);
+		spin_lock_init(&prtd->dsp_lock);
+		init_waitqueue_head(&prtd->out_wait);
+		INIT_LIST_HEAD(&prtd->out_queue);
+		INIT_LIST_HEAD(&prtd->free_out_queue);
+
+		runtime->hw = msm_pcm_hardware;
+
+		ret = snd_pcm_hw_constraint_integer(runtime,
+						    SNDRV_PCM_HW_PARAM_PERIODS);
+		if (ret < 0)
+			pr_info("snd_pcm_hw_constraint_integer failed\n");
+
+		prtd->capture_substream = substream;
+		prtd->capture_instance++;
+		runtime->private_data = prtd;
+	}
+
+done:
+	return ret;
+}
+
+static int msm_pcm_close(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+	struct list_head *ptr = NULL;
+	struct list_head *next = NULL;
+	struct dtmf_buf_node *buf_node = NULL;
+	struct snd_dma_buffer *c_dma_buf;
+	struct snd_pcm_substream *c_substream;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct dtmf_drv_info *prtd = runtime->private_data;
+	unsigned long dsp_flags;
+
+	pr_debug("%s() DTMF\n", __func__);
+
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		mutex_lock(&prtd->lock);
+		wake_up(&prtd->out_wait);
+
+		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+			prtd->capture_instance--;
+
+		if (!prtd->capture_instance) {
+			if (prtd->state == DTMF_GEN_RX_STARTED) {
+				prtd->state = DTMF_GEN_RX_STOPPED;
+				voc_disable_dtmf_det_on_active_sessions();
+				voc_register_dtmf_rx_detection_cb(NULL, NULL);
+			}
+			/* release all buffer */
+			/* release out_queue and free_out_queue */
+			pr_debug("release all buffer\n");
+			c_substream = prtd->capture_substream;
+			if (c_substream == NULL) {
+				pr_debug("c_substream is NULL\n");
+				mutex_unlock(&prtd->lock);
+				return -EINVAL;
+			}
+
+			c_dma_buf = &c_substream->dma_buffer;
+			if (c_dma_buf == NULL) {
+				pr_debug("c_dma_buf is NULL.\n");
+				mutex_unlock(&prtd->lock);
+				return -EINVAL;
+			}
+
+			if (c_dma_buf->area != NULL) {
+				spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
+				list_for_each_safe(ptr, next,
+							&prtd->out_queue) {
+					buf_node = list_entry(ptr,
+						   struct dtmf_buf_node, list);
+					list_del(&buf_node->list);
+				}
+
+				list_for_each_safe(ptr, next,
+						   &prtd->free_out_queue) {
+					buf_node = list_entry(ptr,
+						   struct dtmf_buf_node, list);
+					list_del(&buf_node->list);
+				}
+
+				spin_unlock_irqrestore(&prtd->dsp_lock,
+						       dsp_flags);
+				dma_free_coherent(c_substream->pcm->card->dev,
+						  runtime->hw.buffer_bytes_max,
+						  c_dma_buf->area,
+						  c_dma_buf->addr);
+				c_dma_buf->area = NULL;
+			}
+		}
+		prtd->capture_substream = NULL;
+		mutex_unlock(&prtd->lock);
+	}
+
+	return ret;
+}
+
+static int msm_pcm_hw_params(struct snd_pcm_substream *substream,
+			     struct snd_pcm_hw_params *params)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct dtmf_drv_info *prtd = runtime->private_data;
+	struct snd_dma_buffer *dma_buf = &substream->dma_buffer;
+	struct dtmf_buf_node *buf_node = NULL;
+	int i = 0, offset = 0;
+	int ret = 0;
+
+	pr_debug("%s: DTMF\n", __func__);
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		mutex_lock(&prtd->lock);
+		dma_buf->dev.type = SNDRV_DMA_TYPE_DEV;
+		dma_buf->dev.dev = substream->pcm->card->dev;
+		dma_buf->private_data = NULL;
+
+		dma_buf->area = dma_alloc_coherent(substream->pcm->card->dev,
+						runtime->hw.buffer_bytes_max,
+						&dma_buf->addr, GFP_KERNEL);
+		if (!dma_buf->area) {
+			pr_err("%s:MSM DTMF dma_alloc failed\n", __func__);
+			mutex_unlock(&prtd->lock);
+			return -ENOMEM;
+		}
+
+		dma_buf->bytes = runtime->hw.buffer_bytes_max;
+		memset(dma_buf->area, 0, runtime->hw.buffer_bytes_max);
+
+		for (i = 0; i < DTMF_MAX_Q_LEN; i++) {
+			pr_debug("node =%d\n", i);
+			buf_node = (void *) dma_buf->area + offset;
+			list_add_tail(&buf_node->list,
+				      &prtd->free_out_queue);
+			offset = offset + sizeof(struct dtmf_buf_node);
+		}
+
+		snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+		mutex_unlock(&prtd->lock);
+	}
+
+	return ret;
+}
+
+static int msm_pcm_capture_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct dtmf_drv_info *prtd = runtime->private_data;
+
+	pr_debug("%s: DTMF\n", __func__);
+	prtd->pcm_capture_size  = snd_pcm_lib_buffer_bytes(substream);
+	prtd->pcm_capture_count = snd_pcm_lib_period_bytes(substream);
+	prtd->pcm_capture_irq_pos = 0;
+	prtd->pcm_capture_buf_pos = 0;
+	return 0;
+}
+
+static int msm_pcm_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct dtmf_drv_info *prtd = runtime->private_data;
+
+	pr_debug("%s: DTMF\n", __func__);
+
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		mutex_lock(&prtd->lock);
+
+		msm_pcm_capture_prepare(substream);
+
+		if (runtime->format != FORMAT_S16_LE) {
+			pr_err("format:%u doesnt match %d\n",
+			       (uint32_t)runtime->format, FORMAT_S16_LE);
+			mutex_unlock(&prtd->lock);
+			return -EINVAL;
+		}
+
+		if (prtd->capture_instance &&
+			(prtd->state != DTMF_GEN_RX_STARTED)) {
+			voc_register_dtmf_rx_detection_cb(dtmf_rx_detected_cb,
+							  prtd);
+			prtd->state = DTMF_GEN_RX_STARTED;
+		}
+		mutex_unlock(&prtd->lock);
+	}
+
+	return 0;
+}
+
+static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+	int ret = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct dtmf_drv_info *prtd = runtime->private_data;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		pr_debug("%s: Trigger start\n", __func__);
+		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+			prtd->capture_start = 1;
+		break;
+	case SNDRV_PCM_TRIGGER_STOP:
+		pr_debug("SNDRV_PCM_TRIGGER_STOP\n");
+		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+			prtd->capture_start = 0;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static snd_pcm_uframes_t msm_pcm_pointer(struct snd_pcm_substream *substream)
+{
+	snd_pcm_uframes_t ret = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct dtmf_drv_info *prtd = runtime->private_data;
+
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		if (prtd->pcm_capture_irq_pos >= prtd->pcm_capture_size)
+			prtd->pcm_capture_irq_pos = 0;
+		ret = bytes_to_frames(runtime, (prtd->pcm_capture_irq_pos));
+	}
+
+	return ret;
+}
+
+static struct snd_pcm_ops msm_pcm_ops = {
+	.open           = msm_pcm_open,
+	.copy		= msm_pcm_copy,
+	.hw_params	= msm_pcm_hw_params,
+	.close          = msm_pcm_close,
+	.prepare        = msm_pcm_prepare,
+	.trigger        = msm_pcm_trigger,
+	.pointer        = msm_pcm_pointer,
+};
+
+static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_card *card = rtd->card->snd_card;
+	int ret = 0;
+
+	if (!card->dev->coherent_dma_mask)
+		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	return ret;
+}
+
+static struct snd_soc_platform_driver msm_soc_platform = {
+	.ops		= &msm_pcm_ops,
+	.pcm_new	= msm_asoc_pcm_new,
+	.probe		= msm_pcm_dtmf_probe,
+};
+
+static int msm_pcm_probe(struct platform_device *pdev)
+{
+	pr_debug("%s: dev name %s\n", __func__, dev_name(&pdev->dev));
+
+	return snd_soc_register_platform(&pdev->dev,
+					 &msm_soc_platform);
+}
+
+static int msm_pcm_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_platform(&pdev->dev);
+	return 0;
+}
+
+static const struct of_device_id msm_pcm_dtmf_dt_match[] = {
+	{.compatible = "qcom,msm-pcm-dtmf"},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, msm_pcm_dtmf_dt_match);
+
+
+static struct platform_driver msm_pcm_driver = {
+	.driver = {
+		.name = "msm-pcm-dtmf",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_pcm_dtmf_dt_match,
+	},
+	.probe = msm_pcm_probe,
+	.remove = msm_pcm_remove,
+};
+
+static int __init msm_soc_platform_init(void)
+{
+	return platform_driver_register(&msm_pcm_driver);
+}
+module_init(msm_soc_platform_init);
+
+static void __exit msm_soc_platform_exit(void)
+{
+	platform_driver_unregister(&msm_pcm_driver);
+}
+module_exit(msm_soc_platform_exit);
+
+MODULE_DESCRIPTION("DTMF platform driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-pcm-host-voice-v2.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-pcm-host-voice-v2.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-pcm-host-voice-v2.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-pcm-host-voice-v2.c	2019-01-22 16:16:29.631301899 +0100
@@ -0,0 +1,1552 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/wait.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/initval.h>
+#include <sound/control.h>
+#include <asm/dma.h>
+#include <linux/msm_audio_ion.h>
+#include "q6voice.h"
+
+#define HPCM_MAX_Q_LEN 2
+#define HPCM_MIN_VOC_PKT_SIZE 320
+#define HPCM_MAX_VOC_PKT_SIZE 640
+#define VHPCM_BLOCK_SIZE 4096
+#define CACHE_ALIGNMENT_SIZE 128
+#define CACHE_ALIGNMENT_MASK 0xFFFFFF80
+
+#define VOICE_TX_CAPTURE_DAI_ID  "CS-VOICE HOST TX CAPTURE"
+#define VOICE_TX_PLAYBACK_DAI_ID "CS-VOICE HOST TX PLAYBACK"
+#define VOICE_RX_CAPTURE_DAI_ID  "CS-VOICE HOST RX CAPTURE"
+#define VOICE_RX_PLAYBACK_DAI_ID "CS-VOICE HOST RX PLAYBACK"
+
+#define VOLTE_TX_CAPTURE_DAI_ID  "VOLTE HOST TX CAPTURE"
+#define VOLTE_TX_PLAYBACK_DAI_ID "VOLTE HOST TX PLAYBACK"
+#define VOLTE_RX_CAPTURE_DAI_ID  "VOLTE HOST RX CAPTURE"
+#define VOLTE_RX_PLAYBACK_DAI_ID "VOLTE HOST RX PLAYBACK"
+
+
+#define VoMMode1_TX_CAPTURE_DAI_ID  "VoiceMMode1 HOST TX CAPTURE"
+#define VoMMode1_TX_PLAYBACK_DAI_ID "VoiceMMode1 HOST TX PLAYBACK"
+#define VoMMode1_RX_CAPTURE_DAI_ID  "VoiceMMode1 HOST RX CAPTURE"
+#define VoMMode1_RX_PLAYBACK_DAI_ID "VoiceMMode1 HOST RX PLAYBACK"
+
+#define VoMMode2_TX_CAPTURE_DAI_ID  "VoiceMMode2 HOST TX CAPTURE"
+#define VoMMode2_TX_PLAYBACK_DAI_ID "VoiceMMode2 HOST TX PLAYBACK"
+#define VoMMode2_RX_CAPTURE_DAI_ID  "VoiceMMode2 HOST RX CAPTURE"
+#define VoMMode2_RX_PLAYBACK_DAI_ID "VoiceMMode2 HOST RX PLAYBACK"
+
+enum {
+	RX = 1,
+	TX,
+};
+
+enum {
+	VOICE_INDEX = 0,
+	VOLTE_INDEX,
+	VOMMODE1_INDEX,
+	VOMMODE2_INDEX,
+	MAX_SESSION
+};
+
+enum hpcm_state {
+	HPCM_STOPPED = 1,
+	HPCM_CLOSED,
+	HPCM_PREPARED,
+	HPCM_STARTED,
+};
+
+struct hpcm_frame {
+	uint32_t len;
+	uint8_t voc_pkt[HPCM_MAX_VOC_PKT_SIZE];
+};
+
+struct hpcm_buf_node {
+	struct list_head list;
+	struct hpcm_frame frame;
+};
+
+struct vocpcm_ion_buffer {
+	/* Physical address */
+	phys_addr_t paddr;
+	/* Kernel virtual address */
+	void *kvaddr;
+};
+
+struct dai_data {
+	enum  hpcm_state state;
+	struct snd_pcm_substream *substream;
+	struct list_head filled_queue;
+	struct list_head free_queue;
+	wait_queue_head_t queue_wait;
+	spinlock_t dsp_lock;
+	uint32_t pcm_size;
+	uint32_t pcm_count;
+	/* IRQ position */
+	uint32_t pcm_irq_pos;
+	/* Position in buffer */
+	uint32_t pcm_buf_pos;
+	struct vocpcm_ion_buffer vocpcm_ion_buffer;
+};
+
+struct tap_point {
+	struct dai_data playback_dai_data;
+	struct dai_data capture_dai_data;
+};
+
+struct session {
+	struct tap_point tx_tap_point;
+	struct tap_point rx_tap_point;
+	phys_addr_t sess_paddr;
+	void *sess_kvaddr;
+	struct ion_handle *ion_handle;
+	struct mem_map_table tp_mem_table;
+};
+
+struct tappnt_mxr_data {
+	bool enable;
+	uint16_t direction;
+	uint16_t sample_rate;
+};
+
+/* Values from mixer ctl are cached in this structure */
+struct mixer_conf {
+	int8_t sess_indx;
+	struct tappnt_mxr_data rx;
+	struct tappnt_mxr_data tx;
+};
+
+struct start_cmd {
+	struct vss_ivpcm_tap_point tap_pnt[2];
+	uint32_t no_of_tapoints;
+};
+
+struct hpcm_drv {
+	struct mutex lock;
+	struct session session[MAX_SESSION];
+	struct mixer_conf mixer_conf;
+	struct ion_client *ion_client;
+	struct start_cmd start_cmd;
+};
+
+static struct hpcm_drv hpcm_drv;
+
+static struct snd_pcm_hardware msm_pcm_hardware = {
+	.info =                 (SNDRV_PCM_INFO_MMAP |
+				SNDRV_PCM_INFO_BLOCK_TRANSFER |
+				SNDRV_PCM_INFO_MMAP_VALID |
+				SNDRV_PCM_INFO_INTERLEAVED),
+	.formats =              SNDRV_PCM_FMTBIT_S16_LE |
+				SNDRV_PCM_FMTBIT_SPECIAL,
+	.rates =                SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+	.rate_min =             8000,
+	.rate_max =             16000,
+	.channels_min =         1,
+	.channels_max =         1,
+	.buffer_bytes_max =	sizeof(struct hpcm_buf_node) * HPCM_MAX_Q_LEN,
+	.period_bytes_min =	HPCM_MIN_VOC_PKT_SIZE,
+	.period_bytes_max =	HPCM_MAX_VOC_PKT_SIZE,
+	.periods_min =		HPCM_MAX_Q_LEN,
+	.periods_max =		HPCM_MAX_Q_LEN,
+	.fifo_size =            0,
+};
+
+static char *hpcm_get_sess_name(int sess_indx)
+{
+	char *sess_name = NULL;
+
+	if (sess_indx == VOICE_INDEX)
+		sess_name = VOICE_SESSION_NAME;
+	else if (sess_indx == VOLTE_INDEX)
+		sess_name = VOLTE_SESSION_NAME;
+	else if (sess_indx == VOMMODE1_INDEX)
+		sess_name = VOICEMMODE1_NAME;
+	else if (sess_indx == VOMMODE2_INDEX)
+		sess_name = VOICEMMODE2_NAME;
+	else
+		pr_err("%s:, Invalid sess_index\n", __func__);
+
+	return sess_name;
+}
+
+static void hpcm_reset_mixer_config(struct hpcm_drv *prtd)
+{
+	prtd->mixer_conf.sess_indx = -1;
+	prtd->mixer_conf.rx.enable = false;
+	prtd->mixer_conf.rx.direction = -1;
+	prtd->mixer_conf.rx.sample_rate = 0;
+
+	prtd->mixer_conf.tx.enable = false;
+	prtd->mixer_conf.tx.direction = -1;
+	prtd->mixer_conf.tx.sample_rate = 0;
+}
+
+/* Check for valid mixer control values */
+static bool hpcm_is_valid_config(int sess_indx, int tap_point,
+				 uint16_t direction, uint16_t samplerate)
+{
+	if (sess_indx < VOICE_INDEX || sess_indx > VOMMODE2_INDEX) {
+		pr_err("%s: invalid sess_indx :%d\n", __func__, sess_indx);
+		goto error;
+	}
+
+	if (samplerate != VSS_IVPCM_SAMPLING_RATE_8K &&
+	    samplerate != VSS_IVPCM_SAMPLING_RATE_16K) {
+		pr_err("%s: invalid sample rate :%d\n", __func__, samplerate);
+		goto error;
+	}
+
+	if ((tap_point != RX) && (tap_point != TX)) {
+		pr_err("%s: invalid tappoint :%d\n", __func__, tap_point);
+		goto error;
+	}
+
+	if ((direction != VSS_IVPCM_TAP_POINT_DIR_IN) &&
+	    (direction != VSS_IVPCM_TAP_POINT_DIR_OUT) &&
+	    (direction != VSS_IVPCM_TAP_POINT_DIR_OUT_IN)) {
+		pr_err("%s: invalid direction :%d\n", __func__, direction);
+		goto error;
+	}
+
+	return true;
+
+error:
+	return false;
+}
+
+
+static struct dai_data *hpcm_get_dai_data(char *pcm_id, struct hpcm_drv *prtd)
+{
+	struct dai_data *dai_data = NULL;
+	size_t size = 0;
+
+	if (pcm_id) {
+		size = strlen(pcm_id);
+		/* Check for Voice DAI */
+		if (strnstr(pcm_id, VOICE_TX_CAPTURE_DAI_ID, size)) {
+			dai_data =
+		&prtd->session[VOICE_INDEX].tx_tap_point.capture_dai_data;
+		} else if (strnstr(pcm_id, VOICE_TX_PLAYBACK_DAI_ID, size)) {
+			dai_data =
+		&prtd->session[VOICE_INDEX].tx_tap_point.playback_dai_data;
+		} else if (strnstr(pcm_id, VOICE_RX_CAPTURE_DAI_ID, size)) {
+			dai_data =
+		&prtd->session[VOICE_INDEX].rx_tap_point.capture_dai_data;
+		} else if (strnstr(pcm_id, VOICE_RX_PLAYBACK_DAI_ID, size)) {
+			dai_data =
+		&prtd->session[VOICE_INDEX].rx_tap_point.playback_dai_data;
+		/* Check for VoLTE DAI */
+		} else if (strnstr(pcm_id, VOLTE_TX_CAPTURE_DAI_ID, size)) {
+			dai_data =
+		&prtd->session[VOLTE_INDEX].tx_tap_point.capture_dai_data;
+		} else if (strnstr(pcm_id, VOLTE_TX_PLAYBACK_DAI_ID, size)) {
+			dai_data =
+		&prtd->session[VOLTE_INDEX].tx_tap_point.playback_dai_data;
+		} else if (strnstr(pcm_id, VOLTE_RX_CAPTURE_DAI_ID, size)) {
+			dai_data =
+		&prtd->session[VOLTE_INDEX].rx_tap_point.capture_dai_data;
+		} else if (strnstr(pcm_id, VOLTE_RX_PLAYBACK_DAI_ID, size)) {
+			dai_data =
+		&prtd->session[VOLTE_INDEX].rx_tap_point.playback_dai_data;
+		/* check for VoiceMMode1 DAI */
+		} else if (strnstr(pcm_id, VoMMode1_TX_CAPTURE_DAI_ID, size)) {
+			dai_data =
+		&prtd->session[VOMMODE1_INDEX].tx_tap_point.capture_dai_data;
+		} else if (strnstr(pcm_id, VoMMode1_TX_PLAYBACK_DAI_ID, size)) {
+			dai_data =
+		&prtd->session[VOMMODE1_INDEX].tx_tap_point.playback_dai_data;
+		} else if (strnstr(pcm_id, VoMMode1_RX_CAPTURE_DAI_ID, size)) {
+			dai_data =
+		&prtd->session[VOMMODE1_INDEX].rx_tap_point.capture_dai_data;
+		} else if (strnstr(pcm_id, VoMMode1_RX_PLAYBACK_DAI_ID, size)) {
+			dai_data =
+		&prtd->session[VOMMODE1_INDEX].rx_tap_point.playback_dai_data;
+		/* check for VOiceMMode2 DAI */
+		} else if (strnstr(pcm_id, VoMMode2_TX_CAPTURE_DAI_ID, size)) {
+			dai_data =
+		&prtd->session[VOMMODE2_INDEX].tx_tap_point.capture_dai_data;
+		} else if (strnstr(pcm_id, VoMMode2_TX_PLAYBACK_DAI_ID, size)) {
+			dai_data =
+		&prtd->session[VOMMODE2_INDEX].tx_tap_point.playback_dai_data;
+		} else if (strnstr(pcm_id, VoMMode2_RX_CAPTURE_DAI_ID, size)) {
+			dai_data =
+		&prtd->session[VOMMODE2_INDEX].rx_tap_point.capture_dai_data;
+		} else if (strnstr(pcm_id, VoMMode2_RX_PLAYBACK_DAI_ID, size)) {
+			dai_data =
+		&prtd->session[VOMMODE2_INDEX].rx_tap_point.playback_dai_data;
+
+		} else {
+			pr_err("%s: Wrong dai id\n", __func__);
+		}
+	}
+
+	return dai_data;
+}
+
+static struct tap_point *hpcm_get_tappoint_data(char *pcm_id,
+						struct hpcm_drv *prtd)
+{
+	struct tap_point *tp = NULL;
+	size_t size = 0;
+
+	if (pcm_id) {
+		size = strlen(pcm_id);
+		/* Check for Voice DAI */
+		if (strnstr(pcm_id, VOICE_TX_CAPTURE_DAI_ID, size)) {
+			tp = &prtd->session[VOICE_INDEX].tx_tap_point;
+		} else if (strnstr(pcm_id, VOICE_TX_PLAYBACK_DAI_ID, size)) {
+			tp = &prtd->session[VOICE_INDEX].tx_tap_point;
+		} else if (strnstr(pcm_id, VOICE_RX_CAPTURE_DAI_ID, size)) {
+			tp = &prtd->session[VOICE_INDEX].rx_tap_point;
+		} else if (strnstr(pcm_id, VOICE_RX_PLAYBACK_DAI_ID, size)) {
+			tp = &prtd->session[VOICE_INDEX].rx_tap_point;
+		/* Check for VoLTE DAI */
+		} else if (strnstr(pcm_id, VOLTE_TX_CAPTURE_DAI_ID, size)) {
+			tp = &prtd->session[VOLTE_INDEX].tx_tap_point;
+		} else if (strnstr(pcm_id, VOLTE_TX_PLAYBACK_DAI_ID, size)) {
+			tp = &prtd->session[VOLTE_INDEX].tx_tap_point;
+		} else if (strnstr(pcm_id, VOLTE_RX_CAPTURE_DAI_ID, size)) {
+			tp = &prtd->session[VOLTE_INDEX].rx_tap_point;
+		} else if (strnstr(pcm_id, VOLTE_RX_PLAYBACK_DAI_ID, size)) {
+			tp = &prtd->session[VOLTE_INDEX].rx_tap_point;
+		/* check for VoiceMMode1 */
+		} else if (strnstr(pcm_id, VoMMode1_TX_CAPTURE_DAI_ID, size)) {
+			tp = &prtd->session[VOMMODE1_INDEX].tx_tap_point;
+		} else if (strnstr(pcm_id, VoMMode1_TX_PLAYBACK_DAI_ID, size)) {
+			tp = &prtd->session[VOMMODE1_INDEX].tx_tap_point;
+		} else if (strnstr(pcm_id, VoMMode1_RX_CAPTURE_DAI_ID, size)) {
+			tp = &prtd->session[VOMMODE1_INDEX].rx_tap_point;
+		} else if (strnstr(pcm_id, VoMMode1_RX_PLAYBACK_DAI_ID, size)) {
+			tp = &prtd->session[VOMMODE1_INDEX].rx_tap_point;
+		/* check for VoiceMMode2 */
+		} else if (strnstr(pcm_id, VoMMode2_TX_CAPTURE_DAI_ID, size)) {
+			tp = &prtd->session[VOMMODE2_INDEX].tx_tap_point;
+		} else if (strnstr(pcm_id, VoMMode2_TX_PLAYBACK_DAI_ID, size)) {
+			tp = &prtd->session[VOMMODE2_INDEX].tx_tap_point;
+		} else if (strnstr(pcm_id, VoMMode2_RX_CAPTURE_DAI_ID, size)) {
+			tp = &prtd->session[VOMMODE2_INDEX].rx_tap_point;
+		} else if (strnstr(pcm_id, VoMMode2_RX_PLAYBACK_DAI_ID, size)) {
+			tp = &prtd->session[VOMMODE2_INDEX].rx_tap_point;
+		} else {
+			pr_err("%s: wrong dai id\n", __func__);
+		}
+	}
+
+	return tp;
+}
+
+static struct tappnt_mxr_data *hpcm_get_tappnt_mixer_data(char *pcm_id,
+						struct hpcm_drv *prtd)
+{
+
+	if (strnstr(pcm_id, VOICE_TX_CAPTURE_DAI_ID, strlen(pcm_id)) ||
+	    strnstr(pcm_id, VOICE_TX_PLAYBACK_DAI_ID, strlen(pcm_id)) ||
+	    strnstr(pcm_id, VOLTE_TX_CAPTURE_DAI_ID, strlen(pcm_id)) ||
+	    strnstr(pcm_id, VOLTE_TX_PLAYBACK_DAI_ID, strlen(pcm_id)) ||
+	    strnstr(pcm_id, VoMMode1_TX_CAPTURE_DAI_ID, strlen(pcm_id)) ||
+	    strnstr(pcm_id, VoMMode1_TX_PLAYBACK_DAI_ID, strlen(pcm_id)) ||
+	    strnstr(pcm_id, VoMMode2_TX_CAPTURE_DAI_ID, strlen(pcm_id)) ||
+	    strnstr(pcm_id, VoMMode2_TX_PLAYBACK_DAI_ID, strlen(pcm_id))) {
+		return &prtd->mixer_conf.tx;
+	} else {
+		return &prtd->mixer_conf.rx;
+	}
+}
+
+static int get_tappnt_value(char *pcm_id)
+{
+
+	if (strnstr(pcm_id, VOICE_TX_CAPTURE_DAI_ID, strlen(pcm_id)) ||
+	    strnstr(pcm_id, VOICE_TX_PLAYBACK_DAI_ID, strlen(pcm_id)) ||
+	    strnstr(pcm_id, VOLTE_TX_CAPTURE_DAI_ID, strlen(pcm_id)) ||
+	    strnstr(pcm_id, VOLTE_TX_PLAYBACK_DAI_ID, strlen(pcm_id)) ||
+	    strnstr(pcm_id, VoMMode1_TX_CAPTURE_DAI_ID, strlen(pcm_id)) ||
+	    strnstr(pcm_id, VoMMode1_TX_PLAYBACK_DAI_ID, strlen(pcm_id)) ||
+	    strnstr(pcm_id, VoMMode2_TX_CAPTURE_DAI_ID, strlen(pcm_id)) ||
+	    strnstr(pcm_id, VoMMode2_TX_PLAYBACK_DAI_ID, strlen(pcm_id))) {
+		return TX;
+	} else {
+		return RX;
+	}
+}
+
+static bool hpcm_all_dais_are_ready(uint16_t direction, struct tap_point *tp,
+				    enum hpcm_state state)
+{
+	bool dais_started = false;
+
+	/*
+	 * Based on the direction set per tap point in the mixer control,
+	 * all the dais per tap point should meet the required state for the
+	 * commands such as vpcm_map_memory/vpcm_start to be executed.
+	 */
+	switch (direction) {
+	case VSS_IVPCM_TAP_POINT_DIR_OUT_IN:
+		if ((tp->playback_dai_data.state >= state) &&
+		    (tp->capture_dai_data.state >= state)) {
+			dais_started = true;
+		}
+		break;
+
+	case VSS_IVPCM_TAP_POINT_DIR_IN:
+		if (tp->playback_dai_data.state >= state)
+			dais_started = true;
+		break;
+
+	case VSS_IVPCM_TAP_POINT_DIR_OUT:
+		if (tp->capture_dai_data.state >= state)
+			dais_started = true;
+		break;
+
+	default:
+		pr_err("invalid direction\n");
+	}
+
+	return dais_started;
+}
+
+static void hpcm_create_free_queue(struct snd_dma_buffer *dma_buf,
+				   struct dai_data *dai_data)
+{
+	struct hpcm_buf_node *buf_node = NULL;
+	int i = 0, offset = 0;
+
+	for (i = 0; i < HPCM_MAX_Q_LEN; i++) {
+		buf_node = (void *)dma_buf->area + offset;
+		list_add_tail(&buf_node->list,
+			      &dai_data->free_queue);
+		offset = offset + sizeof(struct hpcm_buf_node);
+	}
+}
+
+static void hpcm_free_allocated_mem(struct hpcm_drv *prtd)
+{
+	phys_addr_t paddr = 0;
+	struct tap_point *txtp = NULL;
+	struct tap_point *rxtp = NULL;
+	struct session *sess = NULL;
+
+	sess = &prtd->session[prtd->mixer_conf.sess_indx];
+	txtp = &sess->tx_tap_point;
+	rxtp = &sess->rx_tap_point;
+	paddr = sess->sess_paddr;
+
+	if (paddr) {
+		msm_audio_ion_free(prtd->ion_client, sess->ion_handle);
+		prtd->ion_client = NULL;
+		sess->ion_handle = NULL;
+		msm_audio_ion_free(sess->tp_mem_table.client,
+				   sess->tp_mem_table.handle);
+		sess->tp_mem_table.client = NULL;
+		sess->tp_mem_table.handle = NULL;
+		sess->sess_paddr = 0;
+		sess->sess_kvaddr = 0;
+		sess->ion_handle = 0;
+		prtd->ion_client = 0;
+		sess->tp_mem_table.client = 0;
+		sess->tp_mem_table.handle = 0;
+
+		txtp->capture_dai_data.vocpcm_ion_buffer.paddr = 0;
+		txtp->capture_dai_data.vocpcm_ion_buffer.kvaddr = 0;
+
+		txtp->playback_dai_data.vocpcm_ion_buffer.paddr = 0;
+		txtp->playback_dai_data.vocpcm_ion_buffer.kvaddr = 0;
+
+		rxtp->capture_dai_data.vocpcm_ion_buffer.paddr = 0;
+		rxtp->capture_dai_data.vocpcm_ion_buffer.kvaddr = 0;
+
+		rxtp->playback_dai_data.vocpcm_ion_buffer.paddr = 0;
+		rxtp->playback_dai_data.vocpcm_ion_buffer.kvaddr = 0;
+	} else {
+		pr_debug("%s, paddr = 0, nothing to free\n", __func__);
+	}
+}
+
+static void hpcm_unmap_and_free_shared_memory(struct hpcm_drv *prtd)
+
+{
+	phys_addr_t paddr = 0;
+	char *sess_name = hpcm_get_sess_name(prtd->mixer_conf.sess_indx);
+
+	if (prtd->mixer_conf.sess_indx >= 0)
+		paddr = prtd->session[prtd->mixer_conf.sess_indx].sess_paddr;
+	else
+		paddr = 0;
+
+	if (paddr) {
+		voc_send_cvp_unmap_vocpcm_memory(voc_get_session_id(sess_name));
+		hpcm_free_allocated_mem(prtd);
+	} else {
+		pr_debug("%s, paddr = 0, nothing to unmap/free\n", __func__);
+	}
+}
+
+static int hpcm_map_vocpcm_memory(struct hpcm_drv *prtd)
+{
+	int ret = 0;
+	char *sess_name = hpcm_get_sess_name(prtd->mixer_conf.sess_indx);
+	struct session *sess = NULL;
+	sess = &prtd->session[prtd->mixer_conf.sess_indx];
+
+	ret = voc_send_cvp_map_vocpcm_memory(voc_get_session_id(sess_name),
+					     &sess->tp_mem_table,
+					     sess->sess_paddr,
+					     VHPCM_BLOCK_SIZE);
+
+	return ret;
+}
+
+static int hpcm_allocate_shared_memory(struct hpcm_drv *prtd)
+{
+	int result;
+	int ret = 0;
+	size_t mem_len;
+	size_t len;
+	struct tap_point *txtp = NULL;
+	struct tap_point *rxtp = NULL;
+	struct session *sess = NULL;
+
+	sess = &prtd->session[prtd->mixer_conf.sess_indx];
+	txtp = &sess->tx_tap_point;
+	rxtp = &sess->rx_tap_point;
+
+	result = msm_audio_ion_alloc("host_pcm_buffer",
+				     &prtd->ion_client,
+				     &sess->ion_handle,
+				     VHPCM_BLOCK_SIZE,
+				     &sess->sess_paddr,
+				     &mem_len,
+				     &sess->sess_kvaddr);
+	if (result) {
+		pr_err("%s: msm_audio_ion_alloc error, rc = %d\n",
+			__func__, result);
+		sess->sess_paddr = 0;
+		sess->sess_kvaddr = 0;
+		ret = -ENOMEM;
+		goto done;
+	}
+	pr_debug("%s: Host PCM memory block allocated\n", __func__);
+
+	/* Allocate mem_map_table for tap point */
+	result = msm_audio_ion_alloc("host_pcm_table",
+			&sess->tp_mem_table.client,
+			&sess->tp_mem_table.handle,
+			sizeof(struct vss_imemory_table_t),
+			&sess->tp_mem_table.phys,
+			&len,
+			&sess->tp_mem_table.data);
+
+	if (result) {
+		pr_err("%s: msm_audio_ion_alloc error, rc = %d\n",
+			__func__, result);
+		msm_audio_ion_free(prtd->ion_client, sess->ion_handle);
+		prtd->ion_client = NULL;
+		sess->ion_handle = NULL;
+		sess->sess_paddr = 0;
+		sess->sess_kvaddr = 0;
+		ret = -ENOMEM;
+		goto done;
+	}
+	pr_debug("%s:  Host PCM memory table allocated\n", __func__);
+
+	memset(sess->tp_mem_table.data, 0,
+	       sizeof(struct vss_imemory_table_t));
+
+	sess->tp_mem_table.size = sizeof(struct vss_imemory_table_t);
+
+	pr_debug("%s: data %pK phys %pK\n", __func__,
+		 sess->tp_mem_table.data, &sess->tp_mem_table.phys);
+
+	/* Split 4096 block into four 1024 byte blocks for each dai */
+	txtp->capture_dai_data.vocpcm_ion_buffer.paddr =
+	sess->sess_paddr;
+	txtp->capture_dai_data.vocpcm_ion_buffer.kvaddr =
+	sess->sess_kvaddr;
+
+	txtp->playback_dai_data.vocpcm_ion_buffer.paddr =
+	sess->sess_paddr + VHPCM_BLOCK_SIZE/4;
+	txtp->playback_dai_data.vocpcm_ion_buffer.kvaddr =
+	sess->sess_kvaddr + VHPCM_BLOCK_SIZE/4;
+
+	rxtp->capture_dai_data.vocpcm_ion_buffer.paddr =
+	sess->sess_paddr + (VHPCM_BLOCK_SIZE/4) * 2;
+	rxtp->capture_dai_data.vocpcm_ion_buffer.kvaddr =
+	sess->sess_kvaddr + (VHPCM_BLOCK_SIZE/4) * 2;
+
+	rxtp->playback_dai_data.vocpcm_ion_buffer.paddr =
+	sess->sess_paddr + (VHPCM_BLOCK_SIZE/4) * 3;
+	rxtp->playback_dai_data.vocpcm_ion_buffer.kvaddr =
+	sess->sess_kvaddr + (VHPCM_BLOCK_SIZE/4) * 3;
+
+done:
+	return ret;
+}
+
+static int hpcm_start_vocpcm(char *pcm_id, struct hpcm_drv *prtd,
+			     struct tap_point *tp)
+{
+	int indx = prtd->mixer_conf.sess_indx;
+	uint32_t *no_of_tp = &prtd->start_cmd.no_of_tapoints;
+	struct vss_ivpcm_tap_point *tap_pnt = &prtd->start_cmd.tap_pnt[0];
+	uint32_t no_of_tp_req = 0;
+	char *sess_name = hpcm_get_sess_name(indx);
+
+	if (prtd->mixer_conf.rx.enable)
+		no_of_tp_req++;
+	if (prtd->mixer_conf.tx.enable)
+		no_of_tp_req++;
+
+	if (prtd->mixer_conf.rx.enable && (get_tappnt_value(pcm_id) == RX)) {
+		if (hpcm_all_dais_are_ready(prtd->mixer_conf.rx.direction,
+					    tp, HPCM_PREPARED)) {
+			pr_debug("%s: RX conditions met\n", __func__);
+			tap_pnt[*no_of_tp].tap_point =
+					VSS_IVPCM_TAP_POINT_RX_DEFAULT;
+			tap_pnt[*no_of_tp].direction =
+					prtd->mixer_conf.rx.direction;
+			tap_pnt[*no_of_tp].sampling_rate =
+					prtd->mixer_conf.rx.sample_rate;
+			(*no_of_tp)++;
+		}
+	}
+
+	if (prtd->mixer_conf.tx.enable && (get_tappnt_value(pcm_id) == TX)) {
+		if (hpcm_all_dais_are_ready(prtd->mixer_conf.tx.direction,
+					    tp, HPCM_PREPARED)) {
+			pr_debug("%s: TX conditions met\n", __func__);
+			tap_pnt[*no_of_tp].tap_point =
+						VSS_IVPCM_TAP_POINT_TX_DEFAULT;
+			tap_pnt[*no_of_tp].direction =
+						prtd->mixer_conf.tx.direction;
+			tap_pnt[*no_of_tp].sampling_rate =
+						prtd->mixer_conf.tx.sample_rate;
+			(*no_of_tp)++;
+		}
+	}
+
+	if ((prtd->mixer_conf.tx.enable || prtd->mixer_conf.rx.enable) &&
+	    *no_of_tp == no_of_tp_req) {
+		voc_send_cvp_start_vocpcm(voc_get_session_id(sess_name),
+					  tap_pnt, *no_of_tp);
+		/* Reset the start command so that it is not called twice */
+		memset(&prtd->start_cmd, 0, sizeof(struct start_cmd));
+	} else {
+		pr_debug("%s: required pcm handles not opened yet\n", __func__);
+	}
+
+	return 0;
+}
+
+/* Playback path*/
+static void hpcm_copy_playback_data_from_queue(struct dai_data *dai_data,
+					       uint32_t *len)
+{
+	struct hpcm_buf_node *buf_node = NULL;
+	unsigned long dsp_flags;
+
+	if (dai_data->substream == NULL)
+		return;
+
+	spin_lock_irqsave(&dai_data->dsp_lock, dsp_flags);
+
+	if (!list_empty(&dai_data->filled_queue)) {
+		buf_node = list_first_entry(&dai_data->filled_queue,
+				struct hpcm_buf_node, list);
+		list_del(&buf_node->list);
+		*len = buf_node->frame.len;
+		memcpy((u8 *)dai_data->vocpcm_ion_buffer.kvaddr,
+		       &buf_node->frame.voc_pkt[0],
+		       buf_node->frame.len);
+
+		list_add_tail(&buf_node->list, &dai_data->free_queue);
+		dai_data->pcm_irq_pos += dai_data->pcm_count;
+		spin_unlock_irqrestore(&dai_data->dsp_lock, dsp_flags);
+		snd_pcm_period_elapsed(dai_data->substream);
+	} else {
+		*len = 0;
+		spin_unlock_irqrestore(&dai_data->dsp_lock, dsp_flags);
+		pr_err("IN data not available\n");
+	}
+
+	wake_up(&dai_data->queue_wait);
+}
+
+/* Capture path*/
+static void hpcm_copy_capture_data_to_queue(struct dai_data *dai_data,
+					    uint32_t len)
+{
+	struct hpcm_buf_node *buf_node = NULL;
+	unsigned long dsp_flags;
+
+	if (dai_data->substream == NULL)
+		return;
+
+	/* Copy out buffer packet into free_queue */
+	spin_lock_irqsave(&dai_data->dsp_lock, dsp_flags);
+
+	if (!list_empty(&dai_data->free_queue)) {
+		buf_node = list_first_entry(&dai_data->free_queue,
+					struct hpcm_buf_node, list);
+		list_del(&buf_node->list);
+		buf_node->frame.len = len;
+		memcpy(&buf_node->frame.voc_pkt[0],
+		       (uint8_t *)dai_data->vocpcm_ion_buffer.kvaddr,
+		       buf_node->frame.len);
+		list_add_tail(&buf_node->list, &dai_data->filled_queue);
+		dai_data->pcm_irq_pos += dai_data->pcm_count;
+		spin_unlock_irqrestore(&dai_data->dsp_lock, dsp_flags);
+		snd_pcm_period_elapsed(dai_data->substream);
+	} else {
+		spin_unlock_irqrestore(&dai_data->dsp_lock, dsp_flags);
+		pr_err("OUTPUT data dropped\n");
+	}
+
+	wake_up(&dai_data->queue_wait);
+}
+
+void hpcm_notify_evt_processing(uint8_t *data, char *session,
+				void *private_data)
+{
+	struct hpcm_drv *prtd = (struct hpcm_drv *)private_data;
+	struct vss_ivpcm_evt_notify_v2_t *notify_evt =
+				(struct vss_ivpcm_evt_notify_v2_t *)data;
+	struct vss_ivpcm_evt_push_buffer_v2_t push_buff_event;
+	struct tap_point *tp = NULL;
+	int in_buf_len = 0;
+	struct tappnt_mxr_data *tmd = NULL;
+	char *sess_name = hpcm_get_sess_name(prtd->mixer_conf.sess_indx);
+
+	/* If it's not a timetick, it's a error notification, drop the event */
+	if ((notify_evt->notify_mask & VSS_IVPCM_NOTIFY_MASK_TIMETICK) == 0) {
+		pr_err("%s: Error notification. mask=%d\n", __func__,
+			notify_evt->notify_mask);
+		return;
+	}
+
+	if (notify_evt->tap_point == VSS_IVPCM_TAP_POINT_TX_DEFAULT) {
+		tp = &prtd->session[prtd->mixer_conf.sess_indx].tx_tap_point;
+		tmd = &prtd->mixer_conf.tx;
+	} else if (notify_evt->tap_point == VSS_IVPCM_TAP_POINT_RX_DEFAULT) {
+		tp = &prtd->session[prtd->mixer_conf.sess_indx].rx_tap_point;
+		tmd = &prtd->mixer_conf.rx;
+	}
+
+	if (tp == NULL || tmd == NULL) {
+		pr_err("%s: tp = %pK or tmd = %pK is null\n", __func__,
+		       tp, tmd);
+
+		return;
+	}
+
+	if (notify_evt->notify_mask & VSS_IVPCM_NOTIFY_MASK_OUTPUT_BUFFER) {
+		hpcm_copy_capture_data_to_queue(&tp->capture_dai_data,
+						notify_evt->filled_out_size);
+	}
+
+	if (notify_evt->notify_mask & VSS_IVPCM_NOTIFY_MASK_INPUT_BUFFER) {
+		hpcm_copy_playback_data_from_queue(&tp->playback_dai_data,
+						   &in_buf_len);
+	}
+
+	switch (tmd->direction) {
+	/*
+	 * When the dir is OUT_IN, for the first notify mask, pushbuf mask
+	 * should be set to VSS_IVPCM_PUSH_BUFFER_MASK_OUTPUT_BUFFER since we
+	 * atleast need one buffer's worth data before we can send IN buffer.
+	 * For the consecutive notify evts, the push buf mask will set for both
+	 * VSS_IVPCM_PUSH_BUFFER_MASK_OUTPUT_BUFFER and
+	 * VSS_IVPCM_PUSH_BUFFER_MASK_IN_BUFFER.
+	 */
+	case VSS_IVPCM_TAP_POINT_DIR_OUT_IN:
+		if (notify_evt->notify_mask ==
+		    VSS_IVPCM_NOTIFY_MASK_TIMETICK) {
+			push_buff_event.push_buf_mask =
+				VSS_IVPCM_PUSH_BUFFER_MASK_OUTPUT_BUFFER;
+		} else {
+			push_buff_event.push_buf_mask =
+			   VSS_IVPCM_PUSH_BUFFER_MASK_OUTPUT_BUFFER |
+			   VSS_IVPCM_PUSH_BUFFER_MASK_INPUT_BUFFER;
+		}
+		break;
+
+	case VSS_IVPCM_TAP_POINT_DIR_IN:
+		push_buff_event.push_buf_mask =
+			VSS_IVPCM_PUSH_BUFFER_MASK_INPUT_BUFFER;
+		break;
+
+	case VSS_IVPCM_TAP_POINT_DIR_OUT:
+		push_buff_event.push_buf_mask =
+			 VSS_IVPCM_PUSH_BUFFER_MASK_OUTPUT_BUFFER;
+		break;
+	}
+
+	push_buff_event.tap_point = notify_evt->tap_point;
+	push_buff_event.out_buf_mem_address =
+		      tp->capture_dai_data.vocpcm_ion_buffer.paddr;
+	push_buff_event.in_buf_mem_address =
+		      tp->playback_dai_data.vocpcm_ion_buffer.paddr;
+	push_buff_event.sampling_rate = notify_evt->sampling_rate;
+	push_buff_event.num_in_channels = 1;
+
+	/*
+	 * ADSP must read and write from a cache aligned (128 byte) location,
+	 * and in blocks of the cache alignment size. The 128 byte cache
+	 * alignment requirement is guaranteed due to 4096 byte memory
+	 * alignment requirement during memory allocation/mapping. The output
+	 * buffer (ADSP write) size mask ensures that a 128 byte multiple
+	 * worth of will be written.  Internally, the input buffer (ADSP read)
+	 * size will also be a multiple of 128 bytes.  However it is the
+	 * application's responsibility to ensure no other data is written in
+	 * the specified length of memory.
+	 */
+	push_buff_event.out_buf_mem_size = ((notify_evt->request_buf_size) +
+				CACHE_ALIGNMENT_SIZE) & CACHE_ALIGNMENT_MASK;
+	push_buff_event.in_buf_mem_size = in_buf_len;
+
+	voc_send_cvp_vocpcm_push_buf_evt(voc_get_session_id(sess_name),
+					 &push_buff_event);
+}
+
+static int msm_hpcm_configure_voice_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+
+	int tap_point = ucontrol->value.integer.value[0];
+	uint16_t direction = ucontrol->value.integer.value[1];
+	uint16_t sample_rate = ucontrol->value.integer.value[2];
+	struct tappnt_mxr_data *tmd = NULL;
+	int ret = 0;
+
+	mutex_lock(&hpcm_drv.lock);
+	pr_debug("%s: tap_point = %d direction = %d sample_rate = %d\n",
+		 __func__, tap_point, direction, sample_rate);
+
+	if (!hpcm_is_valid_config(VOICE_INDEX, tap_point, direction,
+				  sample_rate)) {
+		pr_err("Invalid vpcm mixer control voice values\n");
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (tap_point == RX)
+		tmd = &hpcm_drv.mixer_conf.rx;
+	else
+		tmd = &hpcm_drv.mixer_conf.tx;
+
+	tmd->enable = true;
+	tmd->direction = direction;
+	tmd->sample_rate = sample_rate;
+	hpcm_drv.mixer_conf.sess_indx = VOICE_INDEX;
+
+done:
+	mutex_unlock(&hpcm_drv.lock);
+	return ret;
+}
+
+static int msm_hpcm_configure_vmmode1_put(struct snd_kcontrol *kcontrol,
+					  struct snd_ctl_elem_value *ucontrol)
+{
+
+	int tap_point = ucontrol->value.integer.value[0];
+	uint16_t direction = ucontrol->value.integer.value[1];
+	uint16_t sample_rate = ucontrol->value.integer.value[2];
+	struct tappnt_mxr_data *tmd = NULL;
+	int ret = 0;
+
+	mutex_lock(&hpcm_drv.lock);
+	pr_debug("%s: tap_point = %d direction = %d sample_rate = %d\n",
+		 __func__, tap_point, direction, sample_rate);
+
+	if (!hpcm_is_valid_config(VOMMODE1_INDEX, tap_point, direction,
+				  sample_rate)) {
+		pr_err("Invalid vpcm mixer control voice values\n");
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (tap_point == RX)
+		tmd = &hpcm_drv.mixer_conf.rx;
+	else
+		tmd = &hpcm_drv.mixer_conf.tx;
+
+	tmd->enable = true;
+	tmd->direction = direction;
+	tmd->sample_rate = sample_rate;
+	hpcm_drv.mixer_conf.sess_indx = VOMMODE1_INDEX;
+
+done:
+	mutex_unlock(&hpcm_drv.lock);
+	return ret;
+}
+
+static int msm_hpcm_configure_vmmode2_put(struct snd_kcontrol *kcontrol,
+					  struct snd_ctl_elem_value *ucontrol)
+{
+
+	int tap_point = ucontrol->value.integer.value[0];
+	uint16_t direction = ucontrol->value.integer.value[1];
+	uint16_t sample_rate = ucontrol->value.integer.value[2];
+	struct tappnt_mxr_data *tmd = NULL;
+	int ret = 0;
+
+	mutex_lock(&hpcm_drv.lock);
+	pr_debug("%s: tap_point = %d direction = %d sample_rate = %d\n",
+		 __func__, tap_point, direction, sample_rate);
+
+	if (!hpcm_is_valid_config(VOMMODE2_INDEX, tap_point, direction,
+				  sample_rate)) {
+		pr_err("Invalid vpcm mixer control voice values\n");
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (tap_point == RX)
+		tmd = &hpcm_drv.mixer_conf.rx;
+	else
+		tmd = &hpcm_drv.mixer_conf.tx;
+
+	tmd->enable = true;
+	tmd->direction = direction;
+	tmd->sample_rate = sample_rate;
+	hpcm_drv.mixer_conf.sess_indx = VOMMODE2_INDEX;
+
+done:
+	mutex_unlock(&hpcm_drv.lock);
+	return ret;
+}
+
+static int msm_hpcm_configure_volte_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+
+	int tap_point = ucontrol->value.integer.value[0];
+	uint16_t direction = ucontrol->value.integer.value[1];
+	uint16_t sample_rate = ucontrol->value.integer.value[2];
+	struct tappnt_mxr_data *tmd = NULL;
+	int ret = 0;
+
+	mutex_lock(&hpcm_drv.lock);
+	pr_debug("%s: tap_point=%d direction=%d sample_rate=%d\n",
+		 __func__, tap_point, direction, sample_rate);
+
+	if (!hpcm_is_valid_config(VOLTE_INDEX, tap_point, direction,
+				  sample_rate)) {
+		pr_err("Invalid vpcm mixer control volte values\n");
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (tap_point == RX)
+		tmd = &hpcm_drv.mixer_conf.rx;
+	else
+		tmd = &hpcm_drv.mixer_conf.tx;
+
+	tmd->enable = true;
+	tmd->direction = direction;
+	tmd->sample_rate = sample_rate;
+	hpcm_drv.mixer_conf.sess_indx = VOLTE_INDEX;
+
+done:
+	mutex_unlock(&hpcm_drv.lock);
+	return ret;
+
+}
+
+static struct snd_kcontrol_new msm_hpcm_controls[] = {
+	SOC_SINGLE_MULTI_EXT("HPCM_Voice tappoint direction samplerate",
+			     SND_SOC_NOPM, 0, 16000, 0, 3,
+			     NULL, msm_hpcm_configure_voice_put),
+	SOC_SINGLE_MULTI_EXT("HPCM_VoLTE tappoint direction samplerate",
+			     SND_SOC_NOPM, 0, 16000, 0, 3,
+			     NULL, msm_hpcm_configure_volte_put),
+	SOC_SINGLE_MULTI_EXT("HPCM_VMMode1 tappoint direction samplerate",
+			     SND_SOC_NOPM, 0, 16000, 0, 3,
+			     NULL, msm_hpcm_configure_vmmode1_put),
+	SOC_SINGLE_MULTI_EXT("HPCM_VMMode2 tappoint direction samplerate",
+			     SND_SOC_NOPM, 0, 16000, 0, 3,
+			     NULL, msm_hpcm_configure_vmmode2_put),
+};
+
+/* Sample rates supported */
+static unsigned int supported_sample_rates[] = {8000, 16000};
+
+static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
+	.count = ARRAY_SIZE(supported_sample_rates),
+	.list = supported_sample_rates,
+	.mask = 0,
+};
+
+static int msm_pcm_close(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+	struct list_head *ptr = NULL;
+	struct list_head *next = NULL;
+	struct hpcm_buf_node *buf_node = NULL;
+	struct snd_dma_buffer *dma_buf;
+	struct snd_pcm_runtime *runtime;
+	struct hpcm_drv *prtd;
+	unsigned long dsp_flags;
+	struct dai_data *dai_data = NULL;
+	struct tap_point *tp = NULL;
+	struct tappnt_mxr_data *tmd = NULL;
+	char *sess_name = NULL;
+
+	if (substream == NULL) {
+		pr_err("substream is NULL\n");
+		return -EINVAL;
+	}
+
+	pr_debug("%s, %s\n", __func__, substream->pcm->id);
+	runtime = substream->runtime;
+	prtd = runtime->private_data;
+	sess_name = hpcm_get_sess_name(prtd->mixer_conf.sess_indx);
+	dai_data = hpcm_get_dai_data(substream->pcm->id, prtd);
+
+	if (dai_data == NULL) {
+		pr_err("%s, dai_data is NULL\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	wake_up(&dai_data->queue_wait);
+	mutex_lock(&prtd->lock);
+
+	tmd = hpcm_get_tappnt_mixer_data(substream->pcm->id, prtd);
+
+	tp = hpcm_get_tappoint_data(substream->pcm->id, prtd);
+	/* Send stop command */
+	voc_send_cvp_stop_vocpcm(voc_get_session_id(sess_name));
+	/* Memory unmap/free takes place only when called the first time */
+	hpcm_unmap_and_free_shared_memory(prtd);
+	/* Unregister host PCM event callback function */
+	voc_deregister_hpcm_evt_cb();
+	/* Reset the cached start cmd */
+	memset(&prtd->start_cmd, 0, sizeof(struct start_cmd));
+	/* Release all buffer */
+	pr_debug("%s: Release all buffer\n", __func__);
+	substream = dai_data->substream;
+	if (substream == NULL) {
+		pr_debug("%s: substream is NULL\n", __func__);
+		goto done;
+	}
+	dma_buf = &substream->dma_buffer;
+	if (dma_buf == NULL) {
+		pr_debug("%s: dma_buf is NULL\n", __func__);
+		goto done;
+	}
+	if (dma_buf->area != NULL) {
+		spin_lock_irqsave(&dai_data->dsp_lock, dsp_flags);
+		list_for_each_safe(ptr, next, &dai_data->filled_queue) {
+			buf_node = list_entry(ptr,
+					struct hpcm_buf_node, list);
+			list_del(&buf_node->list);
+		}
+		list_for_each_safe(ptr, next, &dai_data->free_queue) {
+			buf_node = list_entry(ptr,
+					struct hpcm_buf_node, list);
+			list_del(&buf_node->list);
+		}
+		spin_unlock_irqrestore(&dai_data->dsp_lock, dsp_flags);
+		dma_free_coherent(substream->pcm->card->dev,
+			runtime->hw.buffer_bytes_max, dma_buf->area,
+			dma_buf->addr);
+		dma_buf->area = NULL;
+	}
+	dai_data->substream = NULL;
+	dai_data->pcm_buf_pos = 0;
+	dai_data->pcm_count = 0;
+	dai_data->pcm_irq_pos = 0;
+	dai_data->pcm_size = 0;
+	dai_data->state = HPCM_CLOSED;
+	hpcm_reset_mixer_config(prtd);
+
+done:
+	mutex_unlock(&prtd->lock);
+	return ret;
+}
+
+static int msm_pcm_playback_copy(struct snd_pcm_substream *substream, int a,
+				 snd_pcm_uframes_t hwoff, void __user *buf,
+				 snd_pcm_uframes_t frames)
+{
+	int ret = 0;
+	struct hpcm_buf_node *buf_node = NULL;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct hpcm_drv *prtd = runtime->private_data;
+	struct dai_data *dai_data = hpcm_get_dai_data(substream->pcm->id, prtd);
+	unsigned long dsp_flags;
+
+	int count = frames_to_bytes(runtime, frames);
+
+	if (dai_data == NULL) {
+		pr_err("%s, dai_data is null\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = wait_event_interruptible_timeout(dai_data->queue_wait,
+				(!list_empty(&dai_data->free_queue) ||
+				dai_data->state == HPCM_STOPPED),
+				1 * HZ);
+	if (ret > 0) {
+		if (count <= HPCM_MAX_VOC_PKT_SIZE) {
+			spin_lock_irqsave(&dai_data->dsp_lock, dsp_flags);
+			buf_node =
+				list_first_entry(&dai_data->free_queue,
+						struct hpcm_buf_node, list);
+			list_del(&buf_node->list);
+			spin_unlock_irqrestore(&dai_data->dsp_lock, dsp_flags);
+			ret = copy_from_user(&buf_node->frame.voc_pkt, buf,
+					     count);
+			buf_node->frame.len = count;
+			spin_lock_irqsave(&dai_data->dsp_lock, dsp_flags);
+			list_add_tail(&buf_node->list, &dai_data->filled_queue);
+			spin_unlock_irqrestore(&dai_data->dsp_lock, dsp_flags);
+		} else {
+			pr_err("%s: Write cnt %d is > HPCM_MAX_VOC_PKT_SIZE\n",
+				__func__, count);
+			ret = -ENOMEM;
+		}
+	} else if (ret == 0) {
+		pr_err("%s: No free Playback buffer\n", __func__);
+		ret = -ETIMEDOUT;
+	} else {
+		pr_err("%s: playback copy  was interrupted\n", __func__);
+	}
+
+done:
+	return  ret;
+}
+
+static int msm_pcm_capture_copy(struct snd_pcm_substream *substream,
+				int channel, snd_pcm_uframes_t hwoff,
+				void __user *buf, snd_pcm_uframes_t frames)
+{
+	int ret = 0;
+	int count = 0;
+	struct hpcm_buf_node *buf_node = NULL;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct hpcm_drv *prtd = runtime->private_data;
+	struct dai_data *dai_data = hpcm_get_dai_data(substream->pcm->id, prtd);
+	unsigned long dsp_flags;
+
+	if (dai_data == NULL) {
+		pr_err("%s, dai_data is null\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	count = frames_to_bytes(runtime, frames);
+
+	ret = wait_event_interruptible_timeout(dai_data->queue_wait,
+				(!list_empty(&dai_data->filled_queue) ||
+				dai_data->state == HPCM_STOPPED),
+				1 * HZ);
+
+	if (ret > 0) {
+		if (count <= HPCM_MAX_VOC_PKT_SIZE) {
+			spin_lock_irqsave(&dai_data->dsp_lock, dsp_flags);
+			buf_node = list_first_entry(&dai_data->filled_queue,
+					struct hpcm_buf_node, list);
+			list_del(&buf_node->list);
+			spin_unlock_irqrestore(&dai_data->dsp_lock, dsp_flags);
+			ret = copy_to_user(buf, &buf_node->frame.voc_pkt,
+					   buf_node->frame.len);
+			if (ret) {
+				pr_err("%s: Copy to user retuned %d\n",
+					__func__, ret);
+				ret = -EFAULT;
+			}
+			spin_lock_irqsave(&dai_data->dsp_lock, dsp_flags);
+			list_add_tail(&buf_node->list, &dai_data->free_queue);
+			spin_unlock_irqrestore(&dai_data->dsp_lock, dsp_flags);
+
+		} else {
+			pr_err("%s: Read count %d > HPCM_MAX_VOC_PKT_SIZE\n",
+				__func__, count);
+			ret = -ENOMEM;
+		}
+
+	} else if (ret == 0) {
+		pr_err("%s: No Caputre data available\n", __func__);
+		ret = -ETIMEDOUT;
+	} else {
+		pr_err("%s: Read was interrupted\n", __func__);
+		ret = -ERESTARTSYS;
+	}
+
+done:
+	return ret;
+}
+
+static int msm_pcm_copy(struct snd_pcm_substream *substream, int channel,
+			snd_pcm_uframes_t hwoff, void __user *buf,
+			snd_pcm_uframes_t frames)
+{
+	int ret = 0;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		ret = msm_pcm_playback_copy(substream, channel,
+					    hwoff, buf, frames);
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		ret = msm_pcm_capture_copy(substream, channel,
+					   hwoff, buf, frames);
+
+	return ret;
+}
+
+static snd_pcm_uframes_t msm_pcm_pointer(struct snd_pcm_substream *substream)
+{
+	struct dai_data *dai_data = NULL;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct hpcm_drv *prtd = runtime->private_data;
+	snd_pcm_uframes_t ret;
+
+	dai_data = hpcm_get_dai_data(substream->pcm->id, prtd);
+
+	if (dai_data == NULL) {
+		pr_err("%s, dai_data is null\n", __func__);
+
+		ret = 0;
+		goto done;
+	}
+
+	if (dai_data->pcm_irq_pos >= dai_data->pcm_size)
+		dai_data->pcm_irq_pos = 0;
+
+	ret = bytes_to_frames(runtime, (dai_data->pcm_irq_pos));
+
+done:
+	return ret;
+}
+
+static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+	int ret = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct hpcm_drv *prtd = runtime->private_data;
+	struct dai_data *dai_data =
+			hpcm_get_dai_data(substream->pcm->id, prtd);
+
+	if (dai_data == NULL) {
+		pr_err("%s, dai_data is null\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	pr_debug("%s, %s\n", __func__, substream->pcm->id);
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+		pr_debug("SNDRV_PCM_TRIGGER_START\n");
+		dai_data->state = HPCM_STARTED;
+		break;
+
+	case SNDRV_PCM_TRIGGER_STOP:
+		pr_debug("SNDRV_PCM_TRIGGER_STOP\n");
+		dai_data->state = HPCM_STOPPED;
+		break;
+
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+done:
+	return ret;
+}
+
+static int msm_pcm_prepare(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct hpcm_drv *prtd = runtime->private_data;
+	struct dai_data *dai_data = NULL;
+	struct tap_point *tp = NULL;
+
+	pr_debug("%s, %s\n", __func__, substream->pcm->id);
+	mutex_lock(&prtd->lock);
+
+	dai_data = hpcm_get_dai_data(substream->pcm->id, prtd);
+
+	if (dai_data == NULL) {
+		pr_err("%s, dai_data is null\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	dai_data->pcm_size  = snd_pcm_lib_buffer_bytes(substream);
+	dai_data->pcm_count = snd_pcm_lib_period_bytes(substream);
+	dai_data->pcm_irq_pos = 0;
+	dai_data->pcm_buf_pos = 0;
+	dai_data->state = HPCM_PREPARED;
+
+	/* Register event notify processing callback in prepare instead of
+	 * init() as q6voice module's init() can be called at a later point
+	 */
+	voc_register_hpcm_evt_cb(hpcm_notify_evt_processing, &hpcm_drv);
+
+	tp = hpcm_get_tappoint_data(substream->pcm->id, prtd);
+	if (tp != NULL) {
+		ret = hpcm_start_vocpcm(substream->pcm->id, prtd, tp);
+		if (ret) {
+			pr_err("error sending start cmd err=%d\n", ret);
+			goto done;
+		}
+	} else {
+		pr_err("%s tp is NULL\n", __func__);
+	}
+done:
+	mutex_unlock(&prtd->lock);
+	return ret;
+}
+
+static int msm_pcm_hw_params(struct snd_pcm_substream *substream,
+			     struct snd_pcm_hw_params *params)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_dma_buffer *dma_buf = &substream->dma_buffer;
+	struct hpcm_drv *prtd = (struct hpcm_drv *)runtime->private_data;
+	int ret = 0;
+
+	pr_debug("%s: %s\n", __func__, substream->pcm->id);
+	mutex_lock(&prtd->lock);
+
+	/* Allocate and map voice host PCM ion buffer */
+	if (prtd->session[prtd->mixer_conf.sess_indx].sess_paddr == 0) {
+		ret = hpcm_allocate_shared_memory(prtd);
+		if (ret) {
+			pr_err("error creating shared memory err=%d\n", ret);
+			goto done;
+		}
+
+		ret = hpcm_map_vocpcm_memory(prtd);
+		if (ret) {
+			pr_err("error mapping shared memory err=%d\n", ret);
+			hpcm_free_allocated_mem(prtd);
+			goto done;
+		}
+	} else {
+		pr_debug("%s, VHPCM memory allocation/mapping not performed\n"
+			 , __func__);
+	}
+
+	dma_buf->dev.type = SNDRV_DMA_TYPE_DEV;
+	dma_buf->dev.dev = substream->pcm->card->dev;
+	dma_buf->private_data = NULL;
+
+	dma_buf->area = dma_alloc_coherent(substream->pcm->card->dev,
+			runtime->hw.buffer_bytes_max,
+			&dma_buf->addr, GFP_KERNEL);
+
+	if (!dma_buf->area) {
+		pr_err("%s:MSM dma_alloc failed\n", __func__);
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	dma_buf->bytes = runtime->hw.buffer_bytes_max;
+	memset(dma_buf->area, 0, runtime->hw.buffer_bytes_max);
+
+	hpcm_create_free_queue(dma_buf,
+		hpcm_get_dai_data(substream->pcm->id, prtd));
+
+	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+
+done:
+	mutex_unlock(&prtd->lock);
+	return ret;
+}
+
+static int msm_pcm_open(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct hpcm_drv *prtd = &hpcm_drv;
+	struct tappnt_mxr_data *tmd = NULL;
+	struct dai_data *dai_data = NULL;
+	int ret = 0;
+	int tp_val = 0;
+
+	pr_debug("%s, %s\n", __func__, substream->pcm->id);
+	mutex_lock(&prtd->lock);
+
+	dai_data = hpcm_get_dai_data(substream->pcm->id, prtd);
+
+	if (dai_data == NULL) {
+		pr_err("%s, dai_data is null\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	runtime->hw = msm_pcm_hardware;
+
+	ret = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+					 &constraints_sample_rates);
+	if (ret < 0)
+		pr_debug("snd_pcm_hw_constraint_list failed\n");
+
+	ret = snd_pcm_hw_constraint_integer(runtime,
+					    SNDRV_PCM_HW_PARAM_PERIODS);
+	if (ret < 0) {
+		pr_debug("snd_pcm_hw_constraint_integer failed\n");
+		goto done;
+	}
+
+	tp_val = get_tappnt_value(substream->pcm->id);
+	tmd = hpcm_get_tappnt_mixer_data(substream->pcm->id, prtd);
+
+	/* Check wheather the kcontrol values set are valid */
+	if (!tmd ||
+	    !(tmd->enable) ||
+	    !hpcm_is_valid_config(prtd->mixer_conf.sess_indx,
+				  tp_val, tmd->direction,
+				  tmd->sample_rate)) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	dai_data->substream = substream;
+	runtime->private_data = prtd;
+
+done:
+	mutex_unlock(&prtd->lock);
+	return ret;
+}
+
+static struct snd_pcm_ops msm_pcm_ops = {
+	.open           = msm_pcm_open,
+	.hw_params      = msm_pcm_hw_params,
+	.prepare        = msm_pcm_prepare,
+	.trigger        = msm_pcm_trigger,
+	.pointer        = msm_pcm_pointer,
+	.copy           = msm_pcm_copy,
+	.close          = msm_pcm_close,
+};
+
+static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_card *card = rtd->card->snd_card;
+
+	pr_debug("%s:\n", __func__);
+	if (!card->dev->coherent_dma_mask)
+		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+
+	return 0;
+}
+
+static int msm_pcm_hpcm_probe(struct snd_soc_platform *platform)
+{
+	snd_soc_add_platform_controls(platform, msm_hpcm_controls,
+				ARRAY_SIZE(msm_hpcm_controls));
+
+	return 0;
+}
+
+static struct snd_soc_platform_driver msm_soc_platform = {
+	.ops		= &msm_pcm_ops,
+	.pcm_new	= msm_asoc_pcm_new,
+	.probe		= msm_pcm_hpcm_probe,
+};
+
+static int msm_pcm_probe(struct platform_device *pdev)
+{
+
+	pr_info("%s: dev name %s\n", __func__, dev_name(&pdev->dev));
+	return snd_soc_register_platform(&pdev->dev, &msm_soc_platform);
+}
+
+static int msm_pcm_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_platform(&pdev->dev);
+	return 0;
+}
+
+static const struct of_device_id msm_voice_host_pcm_dt_match[] = {
+	{.compatible = "qcom,msm-voice-host-pcm"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, msm_voice_host_pcm_dt_match);
+
+static struct platform_driver msm_pcm_driver = {
+	.driver = {
+		.name = "msm-voice-host-pcm",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_voice_host_pcm_dt_match,
+	},
+	.probe = msm_pcm_probe,
+	.remove = msm_pcm_remove,
+};
+
+static int __init msm_soc_platform_init(void)
+{
+	int i = 0;
+	struct session *s = NULL;
+
+	memset(&hpcm_drv, 0, sizeof(hpcm_drv));
+	mutex_init(&hpcm_drv.lock);
+
+	for (i = 0; i < MAX_SESSION; i++) {
+		s = &hpcm_drv.session[i];
+		spin_lock_init(&s->rx_tap_point.capture_dai_data.dsp_lock);
+		spin_lock_init(&s->rx_tap_point.playback_dai_data.dsp_lock);
+		spin_lock_init(&s->tx_tap_point.capture_dai_data.dsp_lock);
+		spin_lock_init(&s->tx_tap_point.playback_dai_data.dsp_lock);
+
+		init_waitqueue_head(
+			&s->rx_tap_point.capture_dai_data.queue_wait);
+		init_waitqueue_head(
+			&s->rx_tap_point.playback_dai_data.queue_wait);
+		init_waitqueue_head(
+			&s->tx_tap_point.capture_dai_data.queue_wait);
+		init_waitqueue_head(
+			&s->tx_tap_point.playback_dai_data.queue_wait);
+
+		INIT_LIST_HEAD(&s->rx_tap_point.capture_dai_data.filled_queue);
+		INIT_LIST_HEAD(&s->rx_tap_point.capture_dai_data.free_queue);
+		INIT_LIST_HEAD(&s->rx_tap_point.playback_dai_data.filled_queue);
+		INIT_LIST_HEAD(&s->rx_tap_point.playback_dai_data.free_queue);
+
+		INIT_LIST_HEAD(&s->tx_tap_point.capture_dai_data.filled_queue);
+		INIT_LIST_HEAD(&s->tx_tap_point.capture_dai_data.free_queue);
+		INIT_LIST_HEAD(&s->tx_tap_point.playback_dai_data.filled_queue);
+		INIT_LIST_HEAD(&s->tx_tap_point.playback_dai_data.free_queue);
+	}
+
+	return platform_driver_register(&msm_pcm_driver);
+}
+module_init(msm_soc_platform_init);
+
+static void __exit msm_soc_platform_exit(void)
+{
+	platform_driver_unregister(&msm_pcm_driver);
+}
+module_exit(msm_soc_platform_exit);
+
+MODULE_DESCRIPTION("PCM module platform driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-pcm-loopback-v2.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-pcm-loopback-v2.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-pcm-loopback-v2.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-pcm-loopback-v2.c	2019-10-29 09:26:26.149227702 +0100
@@ -0,0 +1,800 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 and
+* only version 2 as published by the Free Software Foundation.
+
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*/
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <sound/apr_audio-v2.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/q6asm-v2.h>
+#include <sound/pcm.h>
+#include <sound/initval.h>
+#include <sound/control.h>
+#include <sound/tlv.h>
+#include <asm/dma.h>
+#include <sound/q6audio-v2.h>
+
+#include "msm-pcm-routing-v2.h"
+
+#define LOOPBACK_VOL_MAX_STEPS 0x2000
+#define LOOPBACK_SESSION_MAX 4
+
+static DEFINE_MUTEX(loopback_session_lock);
+static const DECLARE_TLV_DB_LINEAR(loopback_rx_vol_gain, 0,
+				LOOPBACK_VOL_MAX_STEPS);
+
+struct msm_pcm_loopback {
+	struct snd_pcm_substream *playback_substream;
+	struct snd_pcm_substream *capture_substream;
+
+	int instance;
+
+	struct mutex lock;
+
+	uint32_t samp_rate;
+	uint32_t channel_mode;
+
+	int playback_start;
+	int capture_start;
+	int session_id;
+	struct audio_client *audio_client;
+	uint32_t volume;
+};
+
+struct fe_dai_session_map {
+	char stream_name[32];
+	struct msm_pcm_loopback *loopback_priv;
+};
+
+static struct fe_dai_session_map session_map[LOOPBACK_SESSION_MAX] = {
+	{ {}, NULL},
+	{ {}, NULL},
+	{ {}, NULL},
+	{ {}, NULL},
+};
+
+static u32 hfp_tx_mute;
+
+struct msm_pcm_pdata {
+	int perf_mode;
+};
+
+static void stop_pcm(struct msm_pcm_loopback *pcm);
+static int msm_pcm_loopback_get_session(struct snd_soc_pcm_runtime *rtd,
+					struct msm_pcm_loopback **pcm);
+
+static void msm_pcm_route_event_handler(enum msm_pcm_routing_event event,
+					void *priv_data)
+{
+	struct msm_pcm_loopback *pcm = priv_data;
+
+	BUG_ON(!pcm);
+
+	pr_debug("%s: event 0x%x\n", __func__, event);
+
+	switch (event) {
+	case MSM_PCM_RT_EVT_DEVSWITCH:
+		q6asm_cmd(pcm->audio_client, CMD_PAUSE);
+		q6asm_cmd(pcm->audio_client, CMD_FLUSH);
+		q6asm_run(pcm->audio_client, 0, 0, 0);
+	default:
+		pr_err("%s: default event 0x%x\n", __func__, event);
+		break;
+	}
+}
+
+static void msm_pcm_loopback_event_handler(uint32_t opcode, uint32_t token,
+					   uint32_t *payload, void *priv)
+{
+	pr_debug("%s:\n", __func__);
+	switch (opcode) {
+	case APR_BASIC_RSP_RESULT: {
+		switch (payload[0]) {
+			break;
+		default:
+			break;
+		}
+	}
+		break;
+	default:
+		pr_err("%s: Not Supported Event opcode[0x%x]\n",
+			__func__, opcode);
+		break;
+	}
+}
+
+static int msm_loopback_session_mute_get(struct snd_kcontrol *kcontrol,
+					 struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = hfp_tx_mute;
+	return 0;
+}
+
+static int msm_loopback_session_mute_put(struct snd_kcontrol *kcontrol,
+					 struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = 0, n = 0;
+	int mute = ucontrol->value.integer.value[0];
+	struct msm_pcm_loopback *pcm = NULL;
+
+	if ((mute < 0) || (mute > 1)) {
+		pr_err(" %s Invalid arguments", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	pr_debug("%s: mute=%d\n", __func__, mute);
+	hfp_tx_mute = mute;
+	for (n = 0; n < LOOPBACK_SESSION_MAX; n++) {
+		if (!strcmp(session_map[n].stream_name, "MultiMedia6"))
+			pcm = session_map[n].loopback_priv;
+	}
+	if (pcm && pcm->audio_client) {
+		ret = q6asm_set_mute(pcm->audio_client, mute);
+		if (ret < 0)
+			pr_err("%s: Send mute command failed rc=%d\n",
+				__func__, ret);
+	}
+done:
+	return ret;
+}
+
+static struct snd_kcontrol_new msm_loopback_controls[] = {
+	SOC_SINGLE_EXT("HFP TX Mute", SND_SOC_NOPM, 0, 1, 0,
+			msm_loopback_session_mute_get,
+			msm_loopback_session_mute_put),
+};
+
+static int msm_pcm_loopback_probe(struct snd_soc_platform *platform)
+{
+	snd_soc_add_platform_controls(platform, msm_loopback_controls,
+				      ARRAY_SIZE(msm_loopback_controls));
+
+	return 0;
+}
+static int pcm_loopback_set_volume(struct msm_pcm_loopback *prtd,
+				   uint32_t volume)
+{
+	int rc = -EINVAL;
+
+	pr_debug("%s: Setting volume 0x%x\n", __func__, volume);
+
+	if (prtd && prtd->audio_client) {
+		rc = q6asm_set_volume(prtd->audio_client, volume);
+		if (rc < 0) {
+			pr_err("%s: Send Volume command failed rc = %d\n",
+				__func__, rc);
+			return rc;
+		}
+		prtd->volume = volume;
+	}
+	return rc;
+}
+
+static int msm_pcm_loopback_get_session(struct snd_soc_pcm_runtime *rtd,
+					struct msm_pcm_loopback **pcm)
+{
+	int ret = 0;
+	int n, index = -1;
+
+	dev_dbg(rtd->platform->dev, "%s: stream %s\n", __func__,
+		rtd->dai_link->stream_name);
+
+	mutex_lock(&loopback_session_lock);
+	for (n = 0; n < LOOPBACK_SESSION_MAX; n++) {
+		if (!strcmp(rtd->dai_link->stream_name,
+		    session_map[n].stream_name)) {
+			*pcm = session_map[n].loopback_priv;
+			goto exit;
+		}
+		/*
+		 * Store the min index value for allocating a new session.
+		 * Here, if session stream name is not found in the
+		 * existing entries after the loop iteration, then this
+		 * index will be used to allocate the new session.
+		 * This index variable is expected to point to the topmost
+		 * available free session.
+		 */
+		if (!(session_map[n].stream_name[0]) && (index < 0))
+			index = n;
+	}
+
+	if (index < 0) {
+		dev_err(rtd->platform->dev, "%s: Max Sessions allocated\n",
+				 __func__);
+		ret = -EAGAIN;
+		goto exit;
+	}
+
+	session_map[index].loopback_priv = kzalloc(
+		sizeof(struct msm_pcm_loopback), GFP_KERNEL);
+	if (!session_map[index].loopback_priv) {
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	strlcpy(session_map[index].stream_name,
+		rtd->dai_link->stream_name,
+		sizeof(session_map[index].stream_name));
+	dev_dbg(rtd->platform->dev, "%s: stream %s index %d\n",
+		__func__, session_map[index].stream_name, index);
+
+	mutex_init(&session_map[index].loopback_priv->lock);
+	*pcm = session_map[index].loopback_priv;
+exit:
+	mutex_unlock(&loopback_session_lock);
+	return ret;
+}
+
+static int msm_pcm_open(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
+	struct msm_pcm_loopback *pcm = NULL;
+	int ret = 0;
+	uint16_t bits_per_sample = 16;
+	struct msm_pcm_routing_evt event;
+	struct asm_session_mtmx_strtr_param_window_v2_t asm_mtmx_strtr_window;
+	uint32_t param_id;
+	struct msm_pcm_pdata *pdata;
+
+	ret =  msm_pcm_loopback_get_session(rtd, &pcm);
+	if (ret)
+		return ret;
+
+	mutex_lock(&pcm->lock);
+
+	pcm->volume = 0x2000;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		pcm->playback_substream = substream;
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		pcm->capture_substream = substream;
+
+	pcm->instance++;
+	dev_dbg(rtd->platform->dev, "%s: pcm out open: %d,%d\n", __func__,
+			pcm->instance, substream->stream);
+	if (pcm->instance == 2) {
+		struct snd_soc_pcm_runtime *soc_pcm_rx =
+				pcm->playback_substream->private_data;
+		struct snd_soc_pcm_runtime *soc_pcm_tx =
+				pcm->capture_substream->private_data;
+		if (pcm->audio_client != NULL)
+			stop_pcm(pcm);
+
+		pdata = (struct msm_pcm_pdata *)
+			dev_get_drvdata(rtd->platform->dev);
+		if (!pdata) {
+			dev_err(rtd->platform->dev,
+				"%s: platform data not populated\n", __func__);
+			mutex_unlock(&pcm->lock);
+			return -EINVAL;
+		}
+
+		pcm->audio_client = q6asm_audio_client_alloc(
+				(app_cb)msm_pcm_loopback_event_handler, pcm);
+		if (!pcm->audio_client) {
+			dev_err(rtd->platform->dev,
+				"%s: Could not allocate memory\n", __func__);
+			mutex_unlock(&pcm->lock);
+			return -ENOMEM;
+		}
+		pcm->session_id = pcm->audio_client->session;
+		pcm->audio_client->perf_mode = pdata->perf_mode;
+		ret = q6asm_open_loopback_v2(pcm->audio_client,
+					     bits_per_sample);
+		if (ret < 0) {
+			dev_err(rtd->platform->dev,
+				"%s: pcm out open failed\n", __func__);
+			q6asm_audio_client_free(pcm->audio_client);
+			mutex_unlock(&pcm->lock);
+			return -ENOMEM;
+		}
+		event.event_func = msm_pcm_route_event_handler;
+		event.priv_data = (void *) pcm;
+		msm_pcm_routing_reg_phy_stream(soc_pcm_tx->dai_link->be_id,
+			pcm->audio_client->perf_mode,
+			pcm->session_id, pcm->capture_substream->stream);
+		msm_pcm_routing_reg_phy_stream_v2(soc_pcm_rx->dai_link->be_id,
+			pcm->audio_client->perf_mode,
+			pcm->session_id, pcm->playback_substream->stream,
+			event);
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+			pcm->playback_substream = substream;
+			ret = pcm_loopback_set_volume(pcm, pcm->volume);
+			if (ret < 0)
+				dev_err(rtd->platform->dev,
+					"Error %d setting volume", ret);
+		}
+		/* Set to largest negative value */
+		asm_mtmx_strtr_window.window_lsw = 0x00000000;
+		asm_mtmx_strtr_window.window_msw = 0x80000000;
+		param_id = ASM_SESSION_MTMX_STRTR_PARAM_RENDER_WINDOW_START_V2;
+		q6asm_send_mtmx_strtr_window(pcm->audio_client,
+					     &asm_mtmx_strtr_window,
+					     param_id);
+		/* Set to largest positive value */
+		asm_mtmx_strtr_window.window_lsw = 0xffffffff;
+		asm_mtmx_strtr_window.window_msw = 0x7fffffff;
+		param_id = ASM_SESSION_MTMX_STRTR_PARAM_RENDER_WINDOW_END_V2;
+		q6asm_send_mtmx_strtr_window(pcm->audio_client,
+					     &asm_mtmx_strtr_window,
+					     param_id);
+	}
+	dev_info(rtd->platform->dev, "%s: Instance = %d, Stream ID = %s\n",
+			__func__ , pcm->instance, substream->pcm->id);
+	runtime->private_data = pcm;
+
+	mutex_unlock(&pcm->lock);
+
+	return 0;
+}
+
+static void stop_pcm(struct msm_pcm_loopback *pcm)
+{
+	struct snd_soc_pcm_runtime *soc_pcm_rx;
+	struct snd_soc_pcm_runtime *soc_pcm_tx;
+
+	if (pcm->audio_client == NULL)
+		return;
+	q6asm_cmd(pcm->audio_client, CMD_CLOSE);
+
+	if (pcm->playback_substream != NULL) {
+		soc_pcm_rx = pcm->playback_substream->private_data;
+		msm_pcm_routing_dereg_phy_stream(soc_pcm_rx->dai_link->be_id,
+				SNDRV_PCM_STREAM_PLAYBACK);
+	}
+	if (pcm->capture_substream != NULL) {
+		soc_pcm_tx = pcm->capture_substream->private_data;
+		msm_pcm_routing_dereg_phy_stream(soc_pcm_tx->dai_link->be_id,
+				SNDRV_PCM_STREAM_CAPTURE);
+	}
+	q6asm_audio_client_free(pcm->audio_client);
+	pcm->audio_client = NULL;
+}
+
+static int msm_pcm_close(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_pcm_loopback *pcm = runtime->private_data;
+	struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
+	int ret = 0, n;
+	bool found = false;
+
+	mutex_lock(&pcm->lock);
+
+	dev_dbg(rtd->platform->dev, "%s: end pcm call:%d\n",
+		__func__, substream->stream);
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		pcm->playback_start = 0;
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		pcm->capture_start = 0;
+
+	pcm->instance--;
+	if (!pcm->playback_start || !pcm->capture_start) {
+		dev_dbg(rtd->platform->dev, "%s: end pcm call\n", __func__);
+		stop_pcm(pcm);
+	}
+
+	if (!pcm->instance) {
+		mutex_lock(&loopback_session_lock);
+		for (n = 0; n < LOOPBACK_SESSION_MAX; n++) {
+			if (!strcmp(rtd->dai_link->stream_name,
+					session_map[n].stream_name)) {
+				found = true;
+				break;
+			}
+		}
+		if (found) {
+			memset(session_map[n].stream_name, 0,
+				sizeof(session_map[n].stream_name));
+			mutex_unlock(&pcm->lock);
+			mutex_destroy(&session_map[n].loopback_priv->lock);
+			session_map[n].loopback_priv = NULL;
+			kfree(pcm);
+			dev_dbg(rtd->platform->dev, "%s: stream freed %s\n",
+				__func__, rtd->dai_link->stream_name);
+			mutex_unlock(&loopback_session_lock);
+			return 0;
+		}
+		mutex_unlock(&loopback_session_lock);
+	}
+	mutex_unlock(&pcm->lock);
+	return ret;
+}
+
+static int msm_pcm_prepare(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_pcm_loopback *pcm = runtime->private_data;
+	struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
+
+	mutex_lock(&pcm->lock);
+
+	dev_dbg(rtd->platform->dev, "%s: ASM loopback stream:%d\n",
+		__func__, substream->stream);
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		if (!pcm->playback_start)
+			pcm->playback_start = 1;
+	} else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		if (!pcm->capture_start)
+			pcm->capture_start = 1;
+	}
+	mutex_unlock(&pcm->lock);
+
+	return ret;
+}
+
+static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_pcm_loopback *pcm = runtime->private_data;
+	struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		dev_dbg(rtd->platform->dev,
+			"%s: playback_start:%d,capture_start:%d\n", __func__,
+			pcm->playback_start, pcm->capture_start);
+		if (pcm->playback_start && pcm->capture_start)
+			q6asm_run_nowait(pcm->audio_client, 0, 0, 0);
+		break;
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+	case SNDRV_PCM_TRIGGER_STOP:
+		dev_dbg(rtd->platform->dev,
+			"%s:Pause/Stop - playback_start:%d,capture_start:%d\n",
+			__func__, pcm->playback_start, pcm->capture_start);
+		if (pcm->playback_start && pcm->capture_start)
+			q6asm_cmd_nowait(pcm->audio_client, CMD_PAUSE);
+		break;
+	default:
+		pr_err("%s: default cmd %d\n", __func__, cmd);
+		break;
+	}
+
+	return 0;
+}
+
+static struct snd_pcm_ops msm_pcm_ops = {
+	.open           = msm_pcm_open,
+	.close          = msm_pcm_close,
+	.prepare        = msm_pcm_prepare,
+	.trigger        = msm_pcm_trigger,
+};
+
+static int msm_pcm_volume_ctl_put(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
+{
+	int rc = 0;
+	struct snd_pcm_volume *vol = kcontrol->private_data;
+	struct snd_pcm_substream *substream = vol->pcm->streams[0].substream;
+	struct msm_pcm_loopback *prtd;
+	int volume = ucontrol->value.integer.value[0];
+
+	pr_debug("%s: volume : 0x%x\n", __func__, volume);
+	if ((!substream) || (!substream->runtime)) {
+		pr_err("%s substream or runtime not found\n", __func__);
+		rc = -ENODEV;
+		goto exit;
+	}
+	prtd = substream->runtime->private_data;
+	if (!prtd) {
+		rc = -ENODEV;
+		goto exit;
+	}
+	rc = pcm_loopback_set_volume(prtd, volume);
+
+exit:
+	return rc;
+}
+
+static int msm_pcm_volume_ctl_get(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
+{
+	int rc = 0;
+	struct snd_pcm_volume *vol = snd_kcontrol_chip(kcontrol);
+	struct snd_pcm_substream *substream =
+		vol->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+	struct msm_pcm_loopback *prtd;
+
+	pr_debug("%s\n", __func__);
+	if ((!substream) || (!substream->runtime)) {
+		pr_debug("%s substream or runtime not found\n", __func__);
+		rc = -ENODEV;
+		goto exit;
+	}
+	prtd = substream->runtime->private_data;
+	if (!prtd) {
+		rc = -ENODEV;
+		goto exit;
+	}
+	ucontrol->value.integer.value[0] = prtd->volume;
+
+exit:
+	return rc;
+}
+
+static int msm_pcm_add_volume_controls(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_pcm *pcm = rtd->pcm->streams[0].pcm;
+	struct snd_pcm_volume *volume_info;
+	struct snd_kcontrol *kctl;
+	int ret = 0;
+
+	dev_dbg(rtd->dev, "%s, Volume cntrl add\n", __func__);
+	ret = snd_pcm_add_volume_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
+				      NULL, 1,
+				      rtd->dai_link->be_id,
+				      &volume_info);
+	if (ret < 0)
+		return ret;
+	kctl = volume_info->kctl;
+	kctl->put = msm_pcm_volume_ctl_put;
+	kctl->get = msm_pcm_volume_ctl_get;
+	kctl->tlv.p = loopback_rx_vol_gain;
+	return 0;
+}
+
+static int msm_pcm_playback_app_type_cfg_ctl_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_RX;
+	int be_id = ucontrol->value.integer.value[3];
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0, 0, 48000};
+	int ret = 0;
+
+	cfg_data.app_type = ucontrol->value.integer.value[0];
+	cfg_data.acdb_dev_id = ucontrol->value.integer.value[1];
+	if (ucontrol->value.integer.value[2] != 0)
+		cfg_data.sample_rate = ucontrol->value.integer.value[2];
+	pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
+		__func__, fe_id, session_type, be_id,
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
+	ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+						      be_id, &cfg_data);
+	if (ret < 0)
+		pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
+			__func__, ret);
+
+	return ret;
+}
+
+static int msm_pcm_playback_app_type_cfg_ctl_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_RX;
+	int be_id = 0;
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0};
+	int ret = 0;
+
+	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
+						      &be_id, &cfg_data);
+	if (ret < 0) {
+		pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
+			__func__, ret);
+		goto done;
+	}
+
+	ucontrol->value.integer.value[0] = cfg_data.app_type;
+	ucontrol->value.integer.value[1] = cfg_data.acdb_dev_id;
+	ucontrol->value.integer.value[2] = cfg_data.sample_rate;
+	ucontrol->value.integer.value[3] = be_id;
+	pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+		__func__, fe_id, session_type, be_id,
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
+done:
+	return ret;
+}
+
+static int msm_pcm_capture_app_type_cfg_ctl_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_TX;
+	int be_id = ucontrol->value.integer.value[3];
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0, 0, 48000};
+	int ret = 0;
+
+	cfg_data.app_type = ucontrol->value.integer.value[0];
+	cfg_data.acdb_dev_id = ucontrol->value.integer.value[1];
+	if (ucontrol->value.integer.value[2] != 0)
+		cfg_data.sample_rate = ucontrol->value.integer.value[2];
+	pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
+		__func__, fe_id, session_type, be_id,
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
+	ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+						      be_id, &cfg_data);
+	if (ret < 0)
+		pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
+			__func__, ret);
+
+	return ret;
+}
+
+static int msm_pcm_capture_app_type_cfg_ctl_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_TX;
+	int be_id = 0;
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0};
+	int ret = 0;
+
+	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
+						      &be_id, &cfg_data);
+	if (ret < 0) {
+		pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
+			__func__, ret);
+		goto done;
+	}
+
+	ucontrol->value.integer.value[0] = cfg_data.app_type;
+	ucontrol->value.integer.value[1] = cfg_data.acdb_dev_id;
+	ucontrol->value.integer.value[2] = cfg_data.sample_rate;
+	ucontrol->value.integer.value[3] = be_id;
+	pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+		__func__, fe_id, session_type, be_id,
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
+done:
+	return ret;
+}
+
+static int msm_pcm_add_app_type_controls(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_pcm *pcm = rtd->pcm->streams[0].pcm;
+	struct snd_pcm_usr *app_type_info;
+	struct snd_kcontrol *kctl;
+	const char *playback_mixer_ctl_name	= "Audio Stream";
+	const char *capture_mixer_ctl_name	= "Audio Stream Capture";
+	const char *deviceNo		= "NN";
+	const char *suffix		= "App Type Cfg";
+	int ctl_len, ret = 0;
+
+	if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
+		ctl_len = strlen(playback_mixer_ctl_name) + 1 +
+				strlen(deviceNo) + 1 + strlen(suffix) + 1;
+		pr_debug("%s: Playback app type cntrl add\n", __func__);
+		ret = snd_pcm_add_usr_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
+					NULL, 1, ctl_len, rtd->dai_link->be_id,
+					&app_type_info);
+		if (ret < 0)
+			return ret;
+		kctl = app_type_info->kctl;
+		snprintf(kctl->id.name, ctl_len, "%s %d %s",
+			playback_mixer_ctl_name, rtd->pcm->device, suffix);
+		kctl->put = msm_pcm_playback_app_type_cfg_ctl_put;
+		kctl->get = msm_pcm_playback_app_type_cfg_ctl_get;
+	}
+
+	if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
+		ctl_len = strlen(capture_mixer_ctl_name) + 1 +
+				strlen(deviceNo) + 1 + strlen(suffix) + 1;
+		pr_debug("%s: Capture app type cntrl add\n", __func__);
+		ret = snd_pcm_add_usr_ctls(pcm, SNDRV_PCM_STREAM_CAPTURE,
+					NULL, 1, ctl_len, rtd->dai_link->be_id,
+					&app_type_info);
+		if (ret < 0)
+			return ret;
+		kctl = app_type_info->kctl;
+		snprintf(kctl->id.name, ctl_len, "%s %d %s",
+			capture_mixer_ctl_name, rtd->pcm->device, suffix);
+		kctl->put = msm_pcm_capture_app_type_cfg_ctl_put;
+		kctl->get = msm_pcm_capture_app_type_cfg_ctl_get;
+	}
+
+	return 0;
+}
+
+static int msm_pcm_add_controls(struct snd_soc_pcm_runtime *rtd)
+{
+	int ret = 0;
+
+	pr_debug("%s\n", __func__);
+	ret = msm_pcm_add_volume_controls(rtd);
+	if (ret)
+		pr_err("%s: pcm add volume controls failed:%d\n",
+			__func__, ret);
+	ret = msm_pcm_add_app_type_controls(rtd);
+	if (ret)
+		pr_err("%s: pcm add app type controls failed:%d\n",
+			__func__, ret);
+	return ret;
+}
+
+static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_card *card = rtd->card->snd_card;
+	int ret = 0;
+
+	if (!card->dev->coherent_dma_mask)
+		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+
+	ret = msm_pcm_add_controls(rtd);
+	if (ret)
+		dev_err(rtd->dev, "%s, kctl add failed\n", __func__);
+	return ret;
+}
+
+static struct snd_soc_platform_driver msm_soc_platform = {
+	.ops            = &msm_pcm_ops,
+	.pcm_new        = msm_asoc_pcm_new,
+	.probe          = msm_pcm_loopback_probe,
+};
+
+static int msm_pcm_probe(struct platform_device *pdev)
+{
+	struct msm_pcm_pdata *pdata;
+
+	dev_dbg(&pdev->dev, "%s: dev name %s\n",
+		__func__, dev_name(&pdev->dev));
+
+	pdata = kzalloc(sizeof(struct msm_pcm_pdata), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	if (of_property_read_bool(pdev->dev.of_node,
+				"qcom,msm-pcm-loopback-low-latency"))
+		pdata->perf_mode = LOW_LATENCY_PCM_MODE;
+	else
+		pdata->perf_mode = LEGACY_PCM_MODE;
+
+	dev_set_drvdata(&pdev->dev, pdata);
+
+	return snd_soc_register_platform(&pdev->dev,
+				   &msm_soc_platform);
+}
+
+static int msm_pcm_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_platform(&pdev->dev);
+	return 0;
+}
+
+static const struct of_device_id msm_pcm_loopback_dt_match[] = {
+	{.compatible = "qcom,msm-pcm-loopback"},
+	{}
+};
+
+static struct platform_driver msm_pcm_driver = {
+	.driver = {
+		.name = "msm-pcm-loopback",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_pcm_loopback_dt_match,
+	},
+	.probe = msm_pcm_probe,
+	.remove = msm_pcm_remove,
+};
+
+static int __init msm_soc_platform_init(void)
+{
+	return platform_driver_register(&msm_pcm_driver);
+}
+module_init(msm_soc_platform_init);
+
+static void __exit msm_soc_platform_exit(void)
+{
+	platform_driver_unregister(&msm_pcm_driver);
+}
+module_exit(msm_soc_platform_exit);
+
+MODULE_DESCRIPTION("PCM loopback platform driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-pcm-q6-noirq.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-pcm-q6-noirq.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c	2019-10-29 09:26:26.149227702 +0100
@@ -0,0 +1,1284 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/time.h>
+#include <linux/wait.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/msm_audio_ion.h>
+
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/initval.h>
+#include <sound/control.h>
+#include <sound/q6audio-v2.h>
+#include <sound/timer.h>
+#include <sound/hwdep.h>
+
+#include <asm/dma.h>
+#include <sound/tlv.h>
+#include <sound/pcm_params.h>
+#include <sound/devdep_params.h>
+
+#include "msm-pcm-q6-v2.h"
+#include "msm-pcm-routing-v2.h"
+
+#define PCM_MASTER_VOL_MAX_STEPS	0x2000
+static const DECLARE_TLV_DB_LINEAR(msm_pcm_vol_gain, 0,
+			PCM_MASTER_VOL_MAX_STEPS);
+
+struct snd_msm {
+	struct snd_card *card;
+	struct snd_pcm *pcm;
+};
+
+#define CMD_EOS_MIN_TIMEOUT_LENGTH  50
+#define CMD_EOS_TIMEOUT_MULTIPLIER  (HZ * 50)
+
+#define ATRACE_END() \
+	trace_printk("tracing_mark_write: E\n")
+#define ATRACE_BEGIN(name) \
+	trace_printk("tracing_mark_write: B|%d|%s\n", current->tgid, name)
+#define ATRACE_FUNC() ATRACE_BEGIN(__func__)
+#define ATRACE_INT(name, value) \
+	trace_printk("tracing_mark_write: C|%d|%s|%d\n", \
+			current->tgid, name, (int)(value))
+
+#define SIO_PLAYBACK_MAX_PERIOD_SIZE PLAYBACK_MAX_PERIOD_SIZE
+#define SIO_PLAYBACK_MIN_PERIOD_SIZE 48
+#define SIO_PLAYBACK_MAX_NUM_PERIODS 512
+#define SIO_PLAYBACK_MIN_NUM_PERIODS PLAYBACK_MIN_NUM_PERIODS
+#define SIO_PLAYBACK_MIN_BYTES (SIO_PLAYBACK_MIN_NUM_PERIODS *	\
+				SIO_PLAYBACK_MIN_PERIOD_SIZE)
+
+#define SIO_PLAYBACK_MAX_BYTES ((SIO_PLAYBACK_MAX_NUM_PERIODS) *	\
+				(SIO_PLAYBACK_MAX_PERIOD_SIZE))
+
+#define SIO_CAPTURE_MAX_PERIOD_SIZE CAPTURE_MAX_PERIOD_SIZE
+#define SIO_CAPTURE_MIN_PERIOD_SIZE 48
+#define SIO_CAPTURE_MAX_NUM_PERIODS 512
+#define SIO_CAPTURE_MIN_NUM_PERIODS CAPTURE_MIN_NUM_PERIODS
+
+#define SIO_CAPTURE_MIN_BYTES (SIO_CAPTURE_MIN_NUM_PERIODS *	\
+			       SIO_CAPTURE_MIN_PERIOD_SIZE)
+
+#define SIO_CAPTURE_MAX_BYTES (SIO_CAPTURE_MAX_NUM_PERIODS *	\
+				SIO_CAPTURE_MAX_PERIOD_SIZE)
+
+static struct snd_pcm_hardware msm_pcm_hardware_playback = {
+	.info =                 (SNDRV_PCM_INFO_MMAP |
+				SNDRV_PCM_INFO_BLOCK_TRANSFER |
+				SNDRV_PCM_INFO_MMAP_VALID |
+				SNDRV_PCM_INFO_INTERLEAVED |
+				SNDRV_PCM_INFO_NO_PERIOD_WAKEUP |
+				SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
+	.formats =              (SNDRV_PCM_FMTBIT_S16_LE |
+				SNDRV_PCM_FMTBIT_S24_LE |
+				SNDRV_PCM_FMTBIT_S24_3LE),
+	.rates =                SNDRV_PCM_RATE_8000_192000,
+	.rate_min =             8000,
+	.rate_max =             192000,
+	.channels_min =         1,
+	.channels_max =         8,
+	.buffer_bytes_max =     SIO_PLAYBACK_MAX_NUM_PERIODS *
+				SIO_PLAYBACK_MAX_PERIOD_SIZE,
+	.period_bytes_min =	SIO_PLAYBACK_MIN_PERIOD_SIZE,
+	.period_bytes_max =     SIO_PLAYBACK_MAX_PERIOD_SIZE,
+	.periods_min =          SIO_PLAYBACK_MIN_NUM_PERIODS,
+	.periods_max =          SIO_PLAYBACK_MAX_NUM_PERIODS,
+	.fifo_size =            0,
+};
+
+static struct snd_pcm_hardware msm_pcm_hardware_capture = {
+	.info =                 (SNDRV_PCM_INFO_MMAP |
+				SNDRV_PCM_INFO_BLOCK_TRANSFER |
+				SNDRV_PCM_INFO_MMAP_VALID |
+				SNDRV_PCM_INFO_INTERLEAVED |
+				SNDRV_PCM_INFO_NO_PERIOD_WAKEUP |
+				SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
+	.formats =              (SNDRV_PCM_FMTBIT_S16_LE |
+				SNDRV_PCM_FMTBIT_S24_LE |
+				SNDRV_PCM_FMTBIT_S24_3LE),
+	.rates =                SNDRV_PCM_RATE_8000_48000,
+	.rate_min =             8000,
+	.rate_max =             48000,
+	.channels_min =         1,
+	.channels_max =         4,
+	.buffer_bytes_max =     SIO_CAPTURE_MAX_NUM_PERIODS *
+				SIO_CAPTURE_MAX_PERIOD_SIZE,
+	.period_bytes_min =	SIO_CAPTURE_MIN_PERIOD_SIZE,
+	.period_bytes_max =     SIO_CAPTURE_MAX_PERIOD_SIZE,
+	.periods_min =          SIO_CAPTURE_MIN_NUM_PERIODS,
+	.periods_max =          SIO_CAPTURE_MAX_NUM_PERIODS,
+	.fifo_size =            0,
+};
+
+/* Conventional and unconventional sample rate supported */
+static unsigned int supported_sample_rates[] = {
+	8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000,
+	88200, 96000, 176400, 192000
+};
+
+static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
+	.count = ARRAY_SIZE(supported_sample_rates),
+	.list = supported_sample_rates,
+	.mask = 0,
+};
+
+static unsigned long msm_pcm_fe_topology[MSM_FRONTEND_DAI_MAX];
+
+/* default value is DTS (i.e read from device tree) */
+static char const *msm_pcm_fe_topology_text[] = {
+	"DTS", "ULL", "ULL_PP", "LL" };
+
+static const struct soc_enum msm_pcm_fe_topology_enum[] = {
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(msm_pcm_fe_topology_text),
+			    msm_pcm_fe_topology_text),
+};
+
+static void event_handler(uint32_t opcode,
+		uint32_t token, uint32_t *payload, void *priv)
+{
+	uint32_t *ptrmem = (uint32_t *)payload;
+
+	switch (opcode) {
+	case ASM_DATA_EVENT_WATERMARK:
+		pr_debug("%s: Watermark level = 0x%08x\n", __func__, *ptrmem);
+		break;
+	case APR_BASIC_RSP_RESULT:
+		pr_debug("%s: Payload = [0x%x]stat[0x%x]\n",
+				__func__, payload[0], payload[1]);
+		switch (payload[0]) {
+		case ASM_SESSION_CMD_RUN_V2:
+		case ASM_SESSION_CMD_PAUSE:
+		case ASM_STREAM_CMD_FLUSH:
+			break;
+		default:
+			break;
+		}
+		break;
+	default:
+		pr_debug("Not Supported Event opcode[0x%x]\n", opcode);
+		break;
+	}
+}
+
+static int msm_pcm_open(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd;
+	int ret = 0;
+
+	prtd = kzalloc(sizeof(struct msm_audio), GFP_KERNEL);
+
+	if (prtd == NULL)
+		return -ENOMEM;
+
+	prtd->substream = substream;
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		runtime->hw = msm_pcm_hardware_playback;
+	else
+		runtime->hw = msm_pcm_hardware_capture;
+
+	ret = snd_pcm_hw_constraint_list(runtime, 0,
+				SNDRV_PCM_HW_PARAM_RATE,
+				&constraints_sample_rates);
+	if (ret)
+		pr_info("snd_pcm_hw_constraint_list failed\n");
+
+	ret = snd_pcm_hw_constraint_integer(runtime,
+					    SNDRV_PCM_HW_PARAM_PERIODS);
+	if (ret)
+		pr_info("snd_pcm_hw_constraint_integer failed\n");
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		ret = snd_pcm_hw_constraint_minmax(runtime,
+			SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
+			SIO_PLAYBACK_MIN_BYTES,
+			SIO_PLAYBACK_MAX_BYTES);
+		if (ret) {
+			pr_info("%s: P buffer bytes minmax constraint ret %d\n",
+			       __func__, ret);
+		}
+	} else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		ret = snd_pcm_hw_constraint_minmax(runtime,
+			   SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
+			   SIO_CAPTURE_MIN_BYTES,
+			   SIO_CAPTURE_MAX_BYTES);
+		if (ret) {
+			pr_info("%s: C buffer bytes minmax constraint ret %d\n",
+			       __func__, ret);
+		}
+	}
+
+	ret = snd_pcm_hw_constraint_step(runtime, 0,
+		SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 32);
+	if (ret) {
+		pr_err("%s: Constraint for period bytes step ret = %d\n",
+				__func__, ret);
+	}
+	ret = snd_pcm_hw_constraint_step(runtime, 0,
+		SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 32);
+	if (ret) {
+		pr_err("%s: Constraint for buffer bytes step ret = %d\n",
+				__func__, ret);
+	}
+	prtd->audio_client = q6asm_audio_client_alloc(
+				(app_cb)event_handler, prtd);
+	if (!prtd->audio_client) {
+		pr_err("%s: client alloc failed\n", __func__);
+		ret = -ENOMEM;
+		goto fail_cmd;
+	}
+	prtd->dsp_cnt = 0;
+	prtd->set_channel_map = false;
+	runtime->private_data = prtd;
+	return 0;
+
+fail_cmd:
+	kfree(prtd);
+	return ret;
+}
+
+static int msm_pcm_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params)
+
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+	struct msm_audio *prtd = runtime->private_data;
+	struct msm_plat_data *pdata;
+	struct snd_dma_buffer *dma_buf = &substream->dma_buffer;
+	struct audio_buffer *buf;
+	struct shared_io_config config;
+	uint16_t sample_word_size;
+	uint16_t bits_per_sample;
+	int ret;
+	int dir = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? IN : OUT;
+	unsigned long topology;
+	int perf_mode;
+
+	pdata = (struct msm_plat_data *)
+		dev_get_drvdata(soc_prtd->platform->dev);
+	if (!pdata) {
+		ret = -EINVAL;
+		pr_err("%s: platform data not populated ret: %d\n", __func__,
+		       ret);
+		return ret;
+	}
+
+	topology = msm_pcm_fe_topology[soc_prtd->dai_link->be_id];
+
+	if (!strcmp(msm_pcm_fe_topology_text[topology], "ULL_PP"))
+		perf_mode = ULL_POST_PROCESSING_PCM_MODE;
+	else if (!strcmp(msm_pcm_fe_topology_text[topology], "ULL"))
+		perf_mode = ULTRA_LOW_LATENCY_PCM_MODE;
+	else if (!strcmp(msm_pcm_fe_topology_text[topology], "LL"))
+		perf_mode = LOW_LATENCY_PCM_MODE;
+	else
+		/* use the default from the device tree */
+		perf_mode = pdata->perf_mode;
+
+
+	/* need to set LOW_LATENCY_PCM_MODE for capture since
+	 * push mode does not support ULL
+	 */
+	prtd->audio_client->perf_mode = (dir == IN) ?
+					perf_mode :
+					LOW_LATENCY_PCM_MODE;
+
+	/* rate and channels are sent to audio driver */
+	prtd->samp_rate = params_rate(params);
+	prtd->channel_mode = params_channels(params);
+	if (prtd->enabled)
+		return 0;
+
+	switch (runtime->format) {
+	case SNDRV_PCM_FORMAT_S24_LE:
+		bits_per_sample = 24;
+		sample_word_size = 32;
+		break;
+	case SNDRV_PCM_FORMAT_S24_3LE:
+		bits_per_sample = 24;
+		sample_word_size = 24;
+		break;
+	case SNDRV_PCM_FORMAT_S16_LE:
+	default:
+		bits_per_sample = 16;
+		sample_word_size = 16;
+		break;
+	}
+
+	config.format = FORMAT_LINEAR_PCM;
+	config.bits_per_sample = bits_per_sample;
+	config.rate = params_rate(params);
+	config.channels = params_channels(params);
+	config.sample_word_size = sample_word_size;
+	config.bufsz = params_buffer_bytes(params) / params_periods(params);
+	config.bufcnt = params_periods(params);
+
+	ret = q6asm_open_shared_io(prtd->audio_client, &config, dir);
+	if (ret) {
+		pr_err("%s: q6asm_open_write_shared_io failed ret: %d\n",
+		       __func__, ret);
+		return ret;
+	}
+
+	prtd->pcm_size = params_buffer_bytes(params);
+	prtd->pcm_count = params_buffer_bytes(params);
+	prtd->pcm_irq_pos = 0;
+
+	buf = prtd->audio_client->port[dir].buf;
+	dma_buf->dev.type = SNDRV_DMA_TYPE_DEV;
+	dma_buf->dev.dev = substream->pcm->card->dev;
+	dma_buf->private_data = NULL;
+	dma_buf->area = buf->data;
+	dma_buf->addr = buf->phys;
+	dma_buf->bytes = prtd->pcm_size;
+
+	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+
+	pr_debug("%s: session ID %d, perf %d\n", __func__,
+	       prtd->audio_client->session,
+		prtd->audio_client->perf_mode);
+	prtd->session_id = prtd->audio_client->session;
+
+	pr_debug("msm_pcm_routing_reg_phy_stream w/ id %d\n",
+		 soc_prtd->dai_link->be_id);
+	ret = msm_pcm_routing_reg_phy_stream(soc_prtd->dai_link->be_id,
+				       prtd->audio_client->perf_mode,
+				       prtd->session_id, substream->stream);
+
+	if (ret) {
+		pr_err("%s: stream reg failed ret:%d\n", __func__, ret);
+		return ret;
+	}
+
+	atomic_set(&prtd->out_count, runtime->periods);
+	prtd->enabled = 1;
+	prtd->cmd_pending = 0;
+	prtd->cmd_interrupt = 0;
+
+	return 0;
+}
+
+static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+	int ret = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+	int dir = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? 0 : 1;
+	struct audio_buffer *buf;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		pr_debug("%s: %s Trigger start\n", __func__,
+			 dir == 0 ? "P" : "C");
+		ret = q6asm_run(prtd->audio_client, 0, 0, 0);
+		if (ret)
+			break;
+		atomic_set(&prtd->start, 1);
+		break;
+	case SNDRV_PCM_TRIGGER_STOP:
+		pr_debug("%s: SNDRV_PCM_TRIGGER_STOP\n", __func__);
+		atomic_set(&prtd->start, 0);
+		q6asm_cmd(prtd->audio_client, CMD_PAUSE);
+		q6asm_cmd(prtd->audio_client, CMD_FLUSH);
+		buf = q6asm_shared_io_buf(prtd->audio_client, dir);
+		if (buf == NULL) {
+			pr_err("%s: shared IO buffer is null\n", __func__);
+			ret = -EINVAL;
+			break;
+		}
+		memset(buf->data, 0, buf->actual_size);
+		break;
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		pr_debug("%s: SNDRV_PCM_TRIGGER_PAUSE\n", __func__);
+		ret = q6asm_cmd_nowait(prtd->audio_client, CMD_PAUSE);
+		atomic_set(&prtd->start, 0);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+
+static int msm_pcm_mmap_fd(struct snd_pcm_substream *substream,
+			   struct snd_pcm_mmap_fd *mmap_fd)
+{
+	struct msm_audio *prtd;
+	struct audio_port_data *apd;
+	struct audio_buffer *ab;
+	int dir = -1;
+
+	if (!substream->runtime) {
+		pr_err("%s substream runtime not found\n", __func__);
+		return -EFAULT;
+	}
+
+	prtd = substream->runtime->private_data;
+	if (!prtd || !prtd->audio_client || !prtd->mmap_flag) {
+		pr_err("%s no audio client or not an mmap session\n", __func__);
+		return -EINVAL;
+	}
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		dir = IN;
+	else
+		dir = OUT;
+
+	apd = prtd->audio_client->port;
+	ab = &(apd[dir].buf[0]);
+	mmap_fd->fd = ion_share_dma_buf_fd(ab->client, ab->handle);
+	if (mmap_fd->fd >= 0) {
+		mmap_fd->dir = dir;
+		mmap_fd->actual_size = ab->actual_size;
+		mmap_fd->size = ab->size;
+	}
+	return mmap_fd->fd < 0 ? -EFAULT : 0;
+}
+
+static int msm_pcm_ioctl(struct snd_pcm_substream *substream,
+			 unsigned int cmd, void *arg)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+	int dir = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? 0 : 1;
+	struct audio_buffer *buf;
+
+	switch (cmd) {
+	case SNDRV_PCM_IOCTL1_RESET:
+		pr_debug("%s: %s SNDRV_PCM_IOCTL1_RESET\n", __func__,
+		       dir == 0 ? "P" : "C");
+		buf = q6asm_shared_io_buf(prtd->audio_client, dir);
+
+		if (buf && buf->data)
+			memset(buf->data, 0, buf->actual_size);
+		break;
+	default:
+		break;
+	}
+
+	return snd_pcm_lib_ioctl(substream, cmd, arg);
+}
+
+#ifdef CONFIG_COMPAT
+static int msm_pcm_compat_ioctl(struct snd_pcm_substream *substream,
+				unsigned int cmd, void *arg)
+{
+	/* we only handle RESET which is common for both modes */
+	return msm_pcm_ioctl(substream, cmd, arg);
+}
+#endif
+
+static snd_pcm_uframes_t msm_pcm_pointer(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	uint32_t read_index, wall_clk_msw, wall_clk_lsw;
+	/*these are offsets, unlike ASoC's full values*/
+	snd_pcm_sframes_t hw_ptr;
+	snd_pcm_sframes_t period_size;
+	int ret;
+	int retries = 10;
+	struct msm_audio *prtd = runtime->private_data;
+
+	period_size = runtime->period_size;
+
+	do {
+		ret = q6asm_get_shared_pos(prtd->audio_client,
+					   &read_index, &wall_clk_msw,
+					   &wall_clk_lsw);
+	} while (ret == -EAGAIN && --retries);
+
+	if (ret || !period_size) {
+		pr_err("get_shared_pos error or zero period size\n");
+		return 0;
+	}
+
+	hw_ptr = bytes_to_frames(substream->runtime,
+				 read_index);
+
+	if (runtime->control->appl_ptr == 0) {
+		pr_debug("ptr(%s): appl(0), hw = %lu read_index = %u\n",
+			 prtd->substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+			 "P" : "C",
+			 hw_ptr, read_index);
+	}
+	return (hw_ptr/period_size) * period_size;
+}
+
+static int msm_pcm_copy(struct snd_pcm_substream *substream, int a,
+	 snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames)
+{
+	return -EINVAL;
+}
+
+static int msm_pcm_mmap(struct snd_pcm_substream *substream,
+				struct vm_area_struct *vma)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+	struct audio_client *ac = prtd->audio_client;
+	struct audio_port_data *apd = ac->port;
+	struct audio_buffer *ab;
+	int dir = -1;
+	int ret;
+
+	pr_debug("%s: mmap begin\n", __func__);
+	prtd->mmap_flag = 1;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		dir = IN;
+	else
+		dir = OUT;
+
+	ab = &(apd[dir].buf[0]);
+
+	ret = msm_audio_ion_mmap(ab, vma);
+
+	if (ret)
+		prtd->mmap_flag = 0;
+
+	return ret;
+}
+
+static int msm_pcm_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+
+	if (!prtd || !prtd->mmap_flag)
+		return -EIO;
+
+	return 0;
+}
+
+static int msm_pcm_close(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+	struct msm_audio *prtd = runtime->private_data;
+	struct audio_client *ac = prtd->audio_client;
+	uint32_t timeout;
+	int dir = 0;
+	int ret = 0;
+
+	if (ac) {
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+			dir = IN;
+		else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+			dir = OUT;
+
+		/* determine timeout length */
+		if (runtime->frame_bits == 0 || runtime->rate == 0) {
+			timeout = CMD_EOS_MIN_TIMEOUT_LENGTH;
+		} else {
+			timeout = (runtime->period_size *
+					CMD_EOS_TIMEOUT_MULTIPLIER) /
+					((runtime->frame_bits / 8) *
+					 runtime->rate);
+			if (timeout < CMD_EOS_MIN_TIMEOUT_LENGTH)
+				timeout = CMD_EOS_MIN_TIMEOUT_LENGTH;
+		}
+
+		q6asm_cmd(ac, CMD_CLOSE);
+
+		ret = q6asm_shared_io_free(ac, dir);
+
+		if (ret) {
+			pr_err("%s: Failed to close pull mode, ret %d\n",
+					__func__, ret);
+		}
+		q6asm_audio_client_free(ac);
+	}
+	msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id,
+					 dir == IN ?
+					 SNDRV_PCM_STREAM_PLAYBACK :
+					 SNDRV_PCM_STREAM_CAPTURE);
+	kfree(prtd);
+	runtime->private_data = NULL;
+
+	return 0;
+}
+
+static int msm_pcm_set_volume(struct msm_audio *prtd, uint32_t volume)
+{
+	int rc = 0;
+
+	if (prtd && prtd->audio_client) {
+		pr_debug("%s: channels %d volume 0x%x\n", __func__,
+				prtd->channel_mode, volume);
+		rc = q6asm_set_volume(prtd->audio_client, volume);
+		if (rc < 0) {
+			pr_err("%s: Send Volume command failed rc=%d\n",
+					__func__, rc);
+		}
+	}
+	return rc;
+}
+
+static int msm_pcm_volume_ctl_get(struct snd_kcontrol *kcontrol,
+		      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_pcm_volume *vol = snd_kcontrol_chip(kcontrol);
+	struct snd_pcm_substream *substream =
+		vol->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+	struct msm_audio *prtd;
+
+	pr_debug("%s\n", __func__);
+	if (!substream) {
+		pr_debug("%s substream not found\n", __func__);
+		return -ENODEV;
+	}
+	if (!substream->runtime) {
+		pr_debug("%s substream runtime not found\n", __func__);
+		return 0;
+	}
+	prtd = substream->runtime->private_data;
+	if (prtd)
+		ucontrol->value.integer.value[0] = prtd->volume;
+	return 0;
+}
+
+static int msm_pcm_volume_ctl_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int rc = 0;
+	struct snd_pcm_volume *vol = snd_kcontrol_chip(kcontrol);
+	struct snd_pcm_substream *substream =
+		vol->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+	struct msm_audio *prtd;
+	int volume = ucontrol->value.integer.value[0];
+
+	pr_debug("%s: volume : 0x%x\n", __func__, volume);
+	if (!substream) {
+		pr_err("%s substream not found\n", __func__);
+		return -ENODEV;
+	}
+	if (!substream->runtime) {
+		pr_err("%s substream runtime not found\n", __func__);
+		return 0;
+	}
+	prtd = substream->runtime->private_data;
+	if (prtd) {
+		rc = msm_pcm_set_volume(prtd, volume);
+		prtd->volume = volume;
+	}
+	return rc;
+}
+
+static int msm_pcm_add_volume_control(struct snd_soc_pcm_runtime *rtd)
+{
+	int ret = 0;
+	struct snd_pcm *pcm = rtd->pcm;
+	struct snd_pcm_volume *volume_info;
+	struct snd_kcontrol *kctl;
+
+	dev_dbg(rtd->dev, "%s, Volume control add\n", __func__);
+	ret = snd_pcm_add_volume_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
+			NULL, 1, rtd->dai_link->be_id,
+			&volume_info);
+	if (ret < 0) {
+		pr_err("%s volume control failed ret %d\n", __func__, ret);
+		return ret;
+	}
+	kctl = volume_info->kctl;
+	kctl->put = msm_pcm_volume_ctl_put;
+	kctl->get = msm_pcm_volume_ctl_get;
+	kctl->tlv.p = msm_pcm_vol_gain;
+	return 0;
+}
+
+static int msm_pcm_chmap_ctl_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int i;
+	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
+	unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
+	struct snd_pcm_substream *substream;
+	struct msm_audio *prtd;
+
+	pr_debug("%s", __func__);
+	substream = snd_pcm_chmap_substream(info, idx);
+	if (!substream)
+		return -ENODEV;
+	if (!substream->runtime)
+		return 0;
+
+	prtd = substream->runtime->private_data;
+	if (prtd) {
+		prtd->set_channel_map = true;
+			for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++)
+				prtd->channel_map[i] =
+				(char)(ucontrol->value.integer.value[i]);
+	}
+	return 0;
+}
+
+static int msm_pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int i;
+	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
+	unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
+	struct snd_pcm_substream *substream;
+	struct msm_audio *prtd;
+
+	pr_debug("%s", __func__);
+	substream = snd_pcm_chmap_substream(info, idx);
+	if (!substream)
+		return -ENODEV;
+	memset(ucontrol->value.integer.value, 0,
+		sizeof(ucontrol->value.integer.value));
+	if (!substream->runtime)
+		return 0; /* no channels set */
+
+	prtd = substream->runtime->private_data;
+
+	if (prtd && prtd->set_channel_map == true) {
+		for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++)
+			ucontrol->value.integer.value[i] =
+					(int)prtd->channel_map[i];
+	} else {
+		for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++)
+			ucontrol->value.integer.value[i] = 0;
+	}
+
+	return 0;
+}
+
+static int msm_pcm_add_chmap_control(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_pcm *pcm = rtd->pcm;
+	struct snd_pcm_chmap *chmap_info;
+	struct snd_kcontrol *kctl;
+	char device_num[12];
+	int i, ret;
+
+	pr_debug("%s, Channel map cntrl add\n", __func__);
+	ret = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
+				     snd_pcm_std_chmaps,
+				     PCM_FORMAT_MAX_NUM_CHANNEL, 0,
+				     &chmap_info);
+	if (ret)
+		return ret;
+
+	kctl = chmap_info->kctl;
+	for (i = 0; i < kctl->count; i++)
+		kctl->vd[i].access |= SNDRV_CTL_ELEM_ACCESS_WRITE;
+	snprintf(device_num, sizeof(device_num), "%d", pcm->device);
+	strlcat(kctl->id.name, device_num, sizeof(kctl->id.name));
+	pr_debug("%s, Overwriting channel map control name to: %s",
+		__func__, kctl->id.name);
+	kctl->put = msm_pcm_chmap_ctl_put;
+	kctl->get = msm_pcm_chmap_ctl_get;
+	return 0;
+}
+
+static int msm_pcm_fe_topology_info(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_info *uinfo)
+{
+	const struct soc_enum *e = &msm_pcm_fe_topology_enum[0];
+
+	return snd_ctl_enum_info(uinfo, 1, e->items, e->texts);
+}
+
+static int msm_pcm_fe_topology_get(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	unsigned long fe_id = kcontrol->private_value;
+
+	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+		pr_err("%s Received out of bound fe_id %lu\n", __func__, fe_id);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: %lu topology %s\n", __func__, fe_id,
+		 msm_pcm_fe_topology_text[msm_pcm_fe_topology[fe_id]]);
+	ucontrol->value.enumerated.item[0] = msm_pcm_fe_topology[fe_id];
+	return 0;
+}
+
+static int msm_pcm_fe_topology_put(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	unsigned long fe_id = kcontrol->private_value;
+	unsigned int item;
+
+	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+		pr_err("%s Received out of bound fe_id %lu\n", __func__, fe_id);
+		return -EINVAL;
+	}
+
+	item = ucontrol->value.enumerated.item[0];
+	if (item >= ARRAY_SIZE(msm_pcm_fe_topology_text)) {
+		pr_err("%s Received out of bound topology %lu\n", __func__,
+		       fe_id);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: %lu new topology %s\n", __func__, fe_id,
+		 msm_pcm_fe_topology_text[item]);
+	msm_pcm_fe_topology[fe_id] = item;
+	return 0;
+}
+
+static int msm_pcm_add_fe_topology_control(struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name = "PCM_Dev";
+	const char *deviceNo       = "NN";
+	const char *topo_text      = "Topology";
+	char *mixer_str = NULL;
+	int ctl_len;
+	int ret;
+	struct snd_kcontrol_new topology_control[1] = {
+		{
+			.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+			.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+			.name =  "?",
+			.info =  msm_pcm_fe_topology_info,
+			.get = msm_pcm_fe_topology_get,
+			.put = msm_pcm_fe_topology_put,
+			.private_value = 0,
+		},
+	};
+
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1 +
+		  strlen(topo_text) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+
+	if (!mixer_str)
+		return -ENOMEM;
+
+	snprintf(mixer_str, ctl_len, "%s %d %s", mixer_ctl_name,
+		 rtd->pcm->device, topo_text);
+
+	topology_control[0].name = mixer_str;
+	topology_control[0].private_value = rtd->dai_link->be_id;
+	ret = snd_soc_add_platform_controls(rtd->platform, topology_control,
+					    ARRAY_SIZE(topology_control));
+	msm_pcm_fe_topology[rtd->dai_link->be_id] = 0;
+	kfree(mixer_str);
+	return ret;
+}
+
+static int msm_pcm_playback_app_type_cfg_ctl_put(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_RX;
+	int be_id = ucontrol->value.integer.value[3];
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0, 0, 48000};
+	int ret = 0;
+
+	cfg_data.app_type = ucontrol->value.integer.value[0];
+	cfg_data.acdb_dev_id = ucontrol->value.integer.value[1];
+	if (ucontrol->value.integer.value[2] != 0)
+		cfg_data.sample_rate = ucontrol->value.integer.value[2];
+	pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
+		__func__, fe_id, session_type, be_id,
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
+	ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+						      be_id, &cfg_data);
+	if (ret < 0)
+		pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
+		       __func__, ret);
+	return ret;
+}
+
+static int msm_pcm_playback_app_type_cfg_ctl_get(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_RX;
+	int be_id = 0;
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0};
+	int ret = 0;
+
+	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
+						      &be_id, &cfg_data);
+	if (ret < 0) {
+		pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
+		       __func__, ret);
+		goto done;
+	}
+
+	ucontrol->value.integer.value[0] = cfg_data.app_type;
+	ucontrol->value.integer.value[1] = cfg_data.acdb_dev_id;
+	ucontrol->value.integer.value[2] = cfg_data.sample_rate;
+	ucontrol->value.integer.value[3] = be_id;
+	pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+		__func__, fe_id, session_type, be_id,
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
+done:
+	return ret;
+}
+
+static int msm_pcm_capture_app_type_cfg_ctl_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_TX;
+	int be_id = ucontrol->value.integer.value[3];
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0, 0, 48000};
+	int ret = 0;
+
+	cfg_data.app_type = ucontrol->value.integer.value[0];
+	cfg_data.acdb_dev_id = ucontrol->value.integer.value[1];
+	if (ucontrol->value.integer.value[2] != 0)
+		cfg_data.sample_rate = ucontrol->value.integer.value[2];
+	pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
+		__func__, fe_id, session_type, be_id,
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
+	ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+						      be_id, &cfg_data);
+	if (ret < 0)
+		pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
+		       __func__, ret);
+
+	return ret;
+}
+
+static int msm_pcm_capture_app_type_cfg_ctl_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_TX;
+	int be_id = 0;
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0};
+	int ret = 0;
+
+	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
+						      &be_id, &cfg_data);
+	if (ret < 0) {
+		pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
+		       __func__, ret);
+		goto done;
+	}
+
+	ucontrol->value.integer.value[0] = cfg_data.app_type;
+	ucontrol->value.integer.value[1] = cfg_data.acdb_dev_id;
+	ucontrol->value.integer.value[2] = cfg_data.sample_rate;
+	ucontrol->value.integer.value[3] = be_id;
+	pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+		__func__, fe_id, session_type, be_id,
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
+done:
+	return ret;
+}
+
+static int msm_pcm_add_app_type_controls(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_pcm *pcm = rtd->pcm;
+	struct snd_pcm_usr *app_type_info;
+	struct snd_kcontrol *kctl;
+	const char *playback_mixer_ctl_name = "Audio Stream";
+	const char *capture_mixer_ctl_name = "Audio Stream Capture";
+	const char *deviceNo = "NN";
+	const char *suffix = "App Type Cfg";
+	int ctl_len, ret = 0;
+
+	if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
+		ctl_len = strlen(playback_mixer_ctl_name) + 1 +
+				strlen(deviceNo) + 1 +
+				strlen(suffix) + 1;
+		pr_debug("%s: Playback app type cntrl add\n", __func__);
+		ret = snd_pcm_add_usr_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
+				NULL, 1, ctl_len, rtd->dai_link->be_id,
+				&app_type_info);
+		if (ret < 0) {
+			pr_err("%s: playback app type cntrl add failed, err: %d\n",
+				__func__, ret);
+			return ret;
+		}
+		kctl = app_type_info->kctl;
+		snprintf(kctl->id.name, ctl_len, "%s %d %s",
+			     playback_mixer_ctl_name, rtd->pcm->device, suffix);
+		kctl->put = msm_pcm_playback_app_type_cfg_ctl_put;
+		kctl->get = msm_pcm_playback_app_type_cfg_ctl_get;
+	}
+
+	if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
+		ctl_len = strlen(capture_mixer_ctl_name) + 1 +
+				strlen(deviceNo) + 1 + strlen(suffix) + 1;
+		pr_debug("%s: Capture app type cntrl add\n", __func__);
+		ret = snd_pcm_add_usr_ctls(pcm, SNDRV_PCM_STREAM_CAPTURE,
+				NULL, 1, ctl_len, rtd->dai_link->be_id,
+				&app_type_info);
+		if (ret < 0) {
+			pr_err("%s: capture app type cntrl add failed, err: %d\n",
+				__func__, ret);
+			return ret;
+		}
+		kctl = app_type_info->kctl;
+		snprintf(kctl->id.name, ctl_len, "%s %d %s",
+		 capture_mixer_ctl_name, rtd->pcm->device, suffix);
+		kctl->put = msm_pcm_capture_app_type_cfg_ctl_put;
+		kctl->get = msm_pcm_capture_app_type_cfg_ctl_get;
+	}
+
+	return 0;
+}
+
+static int msm_pcm_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
+			       unsigned int cmd, unsigned long arg)
+{
+	int ret = 0;
+	struct snd_pcm *pcm = hw->private_data;
+	struct snd_pcm_mmap_fd __user *_mmap_fd = NULL;
+	struct snd_pcm_mmap_fd mmap_fd;
+	struct snd_pcm_substream *substream = NULL;
+	int32_t dir = -1;
+
+	switch (cmd) {
+	case SNDRV_PCM_IOCTL_MMAP_DATA_FD:
+		_mmap_fd = (struct snd_pcm_mmap_fd __user *)arg;
+		if (get_user(dir, (int32_t __user *)&(_mmap_fd->dir))) {
+			pr_err("%s: error copying mmap_fd from user\n",
+			       __func__);
+			ret = -EFAULT;
+			break;
+		}
+		if (dir != OUT && dir != IN) {
+			pr_err("%s invalid stream dir\n", __func__);
+			ret = -EINVAL;
+			break;
+		}
+		substream = pcm->streams[dir].substream;
+		if (!substream) {
+			pr_err("%s substream not found\n", __func__);
+			ret = -ENODEV;
+			break;
+		}
+		pr_debug("%s : %s MMAP Data fd\n", __func__,
+		       dir == 0 ? "P" : "C");
+		if (msm_pcm_mmap_fd(substream, &mmap_fd) < 0) {
+			pr_err("%s: error getting fd\n",
+			       __func__);
+			ret = -EFAULT;
+			break;
+		}
+		if (put_user(mmap_fd.fd, &_mmap_fd->fd) ||
+		    put_user(mmap_fd.size, &_mmap_fd->size) ||
+		    put_user(mmap_fd.actual_size, &_mmap_fd->actual_size)) {
+			pr_err("%s: error copying fd\n", __func__);
+			return -EFAULT;
+		}
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static int msm_pcm_hwdep_compat_ioctl(struct snd_hwdep *hw,
+				      struct file *file,
+				      unsigned int cmd,
+				      unsigned long arg)
+{
+	/* we only support mmap fd. Handling is common in both modes */
+	return msm_pcm_hwdep_ioctl(hw, file, cmd, arg);
+}
+#else
+static int msm_pcm_hwdep_compat_ioctl(struct snd_hwdep *hw,
+				      struct file *file,
+				      unsigned int cmd,
+				      unsigned long arg)
+{
+	return -EINVAL;
+}
+#endif
+
+static int msm_pcm_add_hwdep_dev(struct snd_soc_pcm_runtime *runtime)
+{
+	struct snd_hwdep *hwdep;
+	int rc;
+	char id[] = "NOIRQ_NN";
+
+	snprintf(id, sizeof(id), "NOIRQ_%d", runtime->pcm->device);
+	pr_debug("%s: pcm dev %d\n", __func__, runtime->pcm->device);
+	rc = snd_hwdep_new(runtime->card->snd_card,
+			   &id[0],
+			   HWDEP_FE_BASE + runtime->pcm->device,
+			   &hwdep);
+	if (!hwdep || rc < 0) {
+		pr_err("%s: hwdep intf failed to create %s - hwdep\n", __func__,
+		       id);
+		return rc;
+	}
+
+	hwdep->iface = SNDRV_HWDEP_IFACE_AUDIO_BE; /* for lack of a FE iface */
+	hwdep->private_data = runtime->pcm; /* of type struct snd_pcm */
+	hwdep->ops.ioctl = msm_pcm_hwdep_ioctl;
+	hwdep->ops.ioctl_compat = msm_pcm_hwdep_compat_ioctl;
+	return 0;
+}
+
+static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_card *card = rtd->card->snd_card;
+	struct snd_pcm *pcm = rtd->pcm;
+	int ret;
+
+	pr_debug("%s , register new control\n", __func__);
+	if (!card->dev->coherent_dma_mask)
+		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+
+	ret = msm_pcm_add_chmap_control(rtd);
+	if (ret) {
+		pr_err("%s failed to add chmap cntls\n", __func__);
+		goto exit;
+	}
+	ret = msm_pcm_add_volume_control(rtd);
+	if (ret) {
+		pr_err("%s: Could not add pcm Volume Control %d\n",
+			__func__, ret);
+	}
+
+	ret = msm_pcm_add_fe_topology_control(rtd);
+	if (ret) {
+		pr_err("%s: Could not add pcm topology control %d\n",
+			__func__, ret);
+	}
+
+	ret = msm_pcm_add_app_type_controls(rtd);
+	if (ret) {
+		pr_err("%s: Could not add app type controls failed %d\n",
+			__func__, ret);
+	}
+	ret = msm_pcm_add_hwdep_dev(rtd);
+	if (ret)
+		pr_err("%s: Could not add hw dep node\n", __func__);
+	pcm->nonatomic = true;
+exit:
+	return ret;
+}
+
+
+static struct snd_pcm_ops msm_pcm_ops = {
+	.open           = msm_pcm_open,
+	.prepare        = msm_pcm_prepare,
+	.copy           = msm_pcm_copy,
+	.hw_params	= msm_pcm_hw_params,
+	.ioctl          = msm_pcm_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl   = msm_pcm_compat_ioctl,
+#endif
+	.trigger        = msm_pcm_trigger,
+	.pointer        = msm_pcm_pointer,
+	.mmap           = msm_pcm_mmap,
+	.close          = msm_pcm_close,
+};
+
+static struct snd_soc_platform_driver msm_soc_platform = {
+	.ops		= &msm_pcm_ops,
+	.pcm_new	= msm_asoc_pcm_new,
+};
+
+static int msm_pcm_probe(struct platform_device *pdev)
+{
+	int rc;
+	struct msm_plat_data *pdata;
+	const char *latency_level;
+	int perf_mode = LOW_LATENCY_PCM_MODE;
+
+	dev_dbg(&pdev->dev, "Pull mode driver probe\n");
+
+	if (of_property_read_bool(pdev->dev.of_node,
+				  "qcom,msm-pcm-low-latency")) {
+
+		rc = of_property_read_string(pdev->dev.of_node,
+			"qcom,latency-level", &latency_level);
+		if (!rc) {
+			if (!strcmp(latency_level, "ultra"))
+				perf_mode = ULTRA_LOW_LATENCY_PCM_MODE;
+			else if (!strcmp(latency_level, "ull-pp"))
+				perf_mode = ULL_POST_PROCESSING_PCM_MODE;
+		}
+	}
+
+	pdata = devm_kzalloc(&pdev->dev,
+			     sizeof(struct msm_plat_data), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	pdata->perf_mode = perf_mode;
+
+	dev_set_drvdata(&pdev->dev, pdata);
+
+	dev_dbg(&pdev->dev, "%s: dev name %s\n",
+				__func__, dev_name(&pdev->dev));
+	dev_dbg(&pdev->dev, "Pull mode driver register\n");
+	rc = snd_soc_register_platform(&pdev->dev,
+				       &msm_soc_platform);
+
+	if (rc)
+		dev_err(&pdev->dev, "Failed to register pull mode driver\n");
+
+	return rc;
+}
+
+static int msm_pcm_remove(struct platform_device *pdev)
+{
+	struct msm_plat_data *pdata;
+
+	dev_dbg(&pdev->dev, "Pull mode remove\n");
+	pdata = dev_get_drvdata(&pdev->dev);
+	devm_kfree(&pdev->dev, pdata);
+	snd_soc_unregister_platform(&pdev->dev);
+	return 0;
+}
+static const struct of_device_id msm_pcm_dt_match[] = {
+	{.compatible = "qcom,msm-pcm-dsp-noirq"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, msm_pcm_dt_match);
+
+static struct platform_driver msm_pcm_driver_noirq = {
+	.driver = {
+		.name = "msm-pcm-dsp-noirq",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_pcm_dt_match,
+	},
+	.probe = msm_pcm_probe,
+	.remove = msm_pcm_remove,
+};
+
+static int __init msm_soc_platform_init(void)
+{
+	return platform_driver_register(&msm_pcm_driver_noirq);
+}
+module_init(msm_soc_platform_init);
+
+static void __exit msm_soc_platform_exit(void)
+{
+	platform_driver_unregister(&msm_pcm_driver_noirq);
+}
+module_exit(msm_soc_platform_exit);
+
+MODULE_DESCRIPTION("PCM NOIRQ module platform driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-pcm-q6-v2.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-pcm-q6-v2.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c	2019-10-29 09:26:26.149227702 +0100
@@ -0,0 +1,2282 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/time.h>
+#include <linux/wait.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/initval.h>
+#include <sound/control.h>
+#include <sound/q6audio-v2.h>
+#include <sound/timer.h>
+#include <asm/dma.h>
+#include <linux/dma-mapping.h>
+#include <linux/msm_audio_ion.h>
+#include <linux/msm_audio.h>
+
+#include <linux/of_device.h>
+#include <sound/tlv.h>
+#include <sound/pcm_params.h>
+
+#include "msm-pcm-q6-v2.h"
+#include "msm-pcm-routing-v2.h"
+#include "msm-qti-pp-config.h"
+
+enum stream_state {
+	IDLE = 0,
+	STOPPED,
+	RUNNING,
+};
+
+static struct audio_locks the_locks;
+
+#define PCM_MASTER_VOL_MAX_STEPS	0x2000
+static const DECLARE_TLV_DB_LINEAR(msm_pcm_vol_gain, 0,
+			PCM_MASTER_VOL_MAX_STEPS);
+
+
+struct snd_msm {
+	struct snd_card *card;
+	struct snd_pcm *pcm;
+};
+
+#define CMD_EOS_MIN_TIMEOUT_LENGTH  50
+#define CMD_EOS_TIMEOUT_MULTIPLIER  (HZ * 50)
+#define MAX_PB_COPY_RETRIES         3
+
+static struct snd_pcm_hardware msm_pcm_hardware_capture = {
+	.info =                 (SNDRV_PCM_INFO_MMAP |
+				SNDRV_PCM_INFO_BLOCK_TRANSFER |
+				SNDRV_PCM_INFO_MMAP_VALID |
+				SNDRV_PCM_INFO_INTERLEAVED |
+				SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
+	.formats =              (SNDRV_PCM_FMTBIT_S16_LE |
+				SNDRV_PCM_FMTBIT_S24_LE |
+				SNDRV_PCM_FMTBIT_S24_3LE |
+				SNDRV_PCM_FMTBIT_S32_LE),
+	.rates =                SNDRV_PCM_RATE_8000_384000,
+	.rate_min =             8000,
+	.rate_max =             384000,
+	.channels_min =         1,
+	.channels_max =         4,
+	.buffer_bytes_max =     CAPTURE_MAX_NUM_PERIODS *
+				CAPTURE_MAX_PERIOD_SIZE,
+	.period_bytes_min =	CAPTURE_MIN_PERIOD_SIZE,
+	.period_bytes_max =     CAPTURE_MAX_PERIOD_SIZE,
+	.periods_min =          CAPTURE_MIN_NUM_PERIODS,
+	.periods_max =          CAPTURE_MAX_NUM_PERIODS,
+	.fifo_size =            0,
+};
+
+static struct snd_pcm_hardware msm_pcm_hardware_playback = {
+	.info =                 (SNDRV_PCM_INFO_MMAP |
+				SNDRV_PCM_INFO_BLOCK_TRANSFER |
+				SNDRV_PCM_INFO_MMAP_VALID |
+				SNDRV_PCM_INFO_INTERLEAVED |
+				SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
+	.formats =              (SNDRV_PCM_FMTBIT_S16_LE |
+				SNDRV_PCM_FMTBIT_S24_LE |
+				SNDRV_PCM_FMTBIT_S24_3LE |
+				SNDRV_PCM_FMTBIT_S32_LE),
+	.rates =                SNDRV_PCM_RATE_8000_384000,
+	.rate_min =             8000,
+	.rate_max =             384000,
+	.channels_min =         1,
+	.channels_max =         8,
+	.buffer_bytes_max =     PLAYBACK_MAX_NUM_PERIODS *
+				PLAYBACK_MAX_PERIOD_SIZE,
+	.period_bytes_min =	PLAYBACK_MIN_PERIOD_SIZE,
+	.period_bytes_max =     PLAYBACK_MAX_PERIOD_SIZE,
+	.periods_min =          PLAYBACK_MIN_NUM_PERIODS,
+	.periods_max =          PLAYBACK_MAX_NUM_PERIODS,
+	.fifo_size =            0,
+};
+
+/* Conventional and unconventional sample rate supported */
+static unsigned int supported_sample_rates[] = {
+	8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000,
+	88200, 96000, 176400, 192000, 352800, 384000
+};
+
+static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
+	.count = ARRAY_SIZE(supported_sample_rates),
+	.list = supported_sample_rates,
+	.mask = 0,
+};
+
+static void msm_pcm_route_event_handler(enum msm_pcm_routing_event event,
+					void *priv_data)
+{
+	struct msm_audio *prtd = priv_data;
+
+	BUG_ON(!prtd);
+
+	pr_debug("%s: event %x\n", __func__, event);
+
+	switch (event) {
+	case MSM_PCM_RT_EVT_BUF_RECFG:
+		q6asm_cmd(prtd->audio_client, CMD_PAUSE);
+		q6asm_cmd(prtd->audio_client, CMD_FLUSH);
+		q6asm_run(prtd->audio_client, 0, 0, 0);
+	default:
+		break;
+	}
+}
+
+static void event_handler(uint32_t opcode,
+		uint32_t token, uint32_t *payload, void *priv)
+{
+	struct msm_audio *prtd = priv;
+	struct snd_pcm_substream *substream = prtd->substream;
+	uint32_t *ptrmem = (uint32_t *)payload;
+	uint32_t idx = 0;
+	uint32_t size = 0;
+	uint8_t buf_index;
+	struct snd_soc_pcm_runtime *rtd;
+	int ret = 0;
+
+	switch (opcode) {
+	case ASM_DATA_EVENT_WRITE_DONE_V2: {
+		pr_debug("ASM_DATA_EVENT_WRITE_DONE_V2\n");
+		pr_debug("Buffer Consumed = 0x%08x\n", *ptrmem);
+		prtd->pcm_irq_pos += prtd->pcm_count;
+		if (atomic_read(&prtd->start))
+			snd_pcm_period_elapsed(substream);
+		atomic_inc(&prtd->out_count);
+		wake_up(&the_locks.write_wait);
+		if (!atomic_read(&prtd->start))
+			break;
+		if (!prtd->mmap_flag || prtd->reset_event)
+			break;
+		if (q6asm_is_cpu_buf_avail_nolock(IN,
+				prtd->audio_client,
+				&size, &idx)) {
+			pr_debug("%s:writing %d bytes of buffer to dsp 2\n",
+					__func__, prtd->pcm_count);
+			q6asm_write_nolock(prtd->audio_client,
+				prtd->pcm_count, 0, 0, NO_TIMESTAMP);
+		}
+		break;
+	}
+	case ASM_DATA_EVENT_RENDERED_EOS:
+		pr_debug("ASM_DATA_EVENT_RENDERED_EOS\n");
+		clear_bit(CMD_EOS, &prtd->cmd_pending);
+		wake_up(&the_locks.eos_wait);
+		break;
+	case ASM_DATA_EVENT_READ_DONE_V2: {
+		pr_debug("ASM_DATA_EVENT_READ_DONE_V2\n");
+		buf_index = q6asm_get_buf_index_from_token(token);
+		if (buf_index >= CAPTURE_MAX_NUM_PERIODS) {
+			pr_err("%s: buffer index %u is out of range.\n",
+				__func__, buf_index);
+			return;
+		}
+		pr_debug("%s: token=0x%08x buf_index=0x%08x\n",
+			 __func__, token, buf_index);
+		prtd->in_frame_info[buf_index].size = payload[4];
+		prtd->in_frame_info[buf_index].offset = payload[5];
+		/* assume data size = 0 during flushing */
+		if (prtd->in_frame_info[buf_index].size) {
+			prtd->pcm_irq_pos +=
+				prtd->in_frame_info[buf_index].size;
+			pr_debug("pcm_irq_pos=%d\n", prtd->pcm_irq_pos);
+			if (atomic_read(&prtd->start))
+				snd_pcm_period_elapsed(substream);
+			if (atomic_read(&prtd->in_count) <= prtd->periods)
+				atomic_inc(&prtd->in_count);
+			wake_up(&the_locks.read_wait);
+			if (prtd->mmap_flag &&
+			    q6asm_is_cpu_buf_avail_nolock(OUT,
+				prtd->audio_client,
+				&size, &idx) &&
+			    (substream->runtime->status->state ==
+			     SNDRV_PCM_STATE_RUNNING))
+				q6asm_read_nolock(prtd->audio_client);
+		} else {
+			pr_debug("%s: reclaim flushed buf in_count %x\n",
+				__func__, atomic_read(&prtd->in_count));
+			prtd->pcm_irq_pos += prtd->pcm_count;
+			if (prtd->mmap_flag) {
+				if (q6asm_is_cpu_buf_avail_nolock(OUT,
+				    prtd->audio_client,
+				    &size, &idx) &&
+				    (substream->runtime->status->state ==
+				    SNDRV_PCM_STATE_RUNNING))
+					q6asm_read_nolock(prtd->audio_client);
+			} else {
+				atomic_inc(&prtd->in_count);
+			}
+			if (atomic_read(&prtd->in_count) == prtd->periods) {
+				pr_debug("%s: reclaimed all bufs\n", __func__);
+				if (atomic_read(&prtd->start))
+					snd_pcm_period_elapsed(substream);
+				wake_up(&the_locks.read_wait);
+			}
+		}
+		break;
+	}
+	case ASM_STREAM_PP_EVENT:
+	case ASM_STREAM_CMD_ENCDEC_EVENTS: {
+		pr_debug("%s: ASM_STREAM_EVENT (0x%x)\n", __func__, opcode);
+		if (!substream) {
+			pr_err("%s: substream is NULL.\n", __func__);
+			return;
+		}
+
+		rtd = substream->private_data;
+		if (!rtd) {
+			pr_err("%s: rtd is NULL\n", __func__);
+			return;
+		}
+
+		ret = msm_adsp_inform_mixer_ctl(rtd, payload);
+		if (ret) {
+			pr_err("%s: failed to inform mixer ctl. err = %d\n",
+				__func__, ret);
+			return;
+		}
+
+		break;
+	}
+	case APR_BASIC_RSP_RESULT: {
+		switch (payload[0]) {
+		case ASM_SESSION_CMD_RUN_V2:
+			if (substream->stream
+				!= SNDRV_PCM_STREAM_PLAYBACK) {
+				atomic_set(&prtd->start, 1);
+				break;
+			}
+			if (prtd->mmap_flag) {
+				pr_debug("%s:writing %d bytes of buffer to dsp\n",
+					__func__,
+					prtd->pcm_count);
+				q6asm_write_nolock(prtd->audio_client,
+					prtd->pcm_count,
+					0, 0, NO_TIMESTAMP);
+			} else {
+				while (atomic_read(&prtd->out_needed)) {
+					pr_debug("%s:writing %d bytes of buffer to dsp\n",
+						__func__,
+						prtd->pcm_count);
+					q6asm_write_nolock(prtd->audio_client,
+						prtd->pcm_count,
+						0, 0, NO_TIMESTAMP);
+					atomic_dec(&prtd->out_needed);
+					wake_up(&the_locks.write_wait);
+				};
+			}
+			atomic_set(&prtd->start, 1);
+			break;
+		case ASM_STREAM_CMD_REGISTER_PP_EVENTS:
+			pr_debug("%s: ASM_STREAM_CMD_REGISTER_PP_EVENTS:",
+				__func__);
+			break;
+		default:
+			pr_debug("%s:Payload = [0x%x]stat[0x%x]\n",
+				__func__, payload[0], payload[1]);
+			break;
+		}
+	}
+	break;
+	case RESET_EVENTS:
+		pr_debug("%s RESET_EVENTS\n", __func__);
+		prtd->pcm_irq_pos += prtd->pcm_count;
+		atomic_inc(&prtd->out_count);
+		atomic_inc(&prtd->in_count);
+		prtd->reset_event = true;
+		if (atomic_read(&prtd->start))
+			snd_pcm_period_elapsed(substream);
+		wake_up(&the_locks.eos_wait);
+		wake_up(&the_locks.write_wait);
+		wake_up(&the_locks.read_wait);
+		break;
+	default:
+		pr_debug("Not Supported Event opcode[0x%x]\n", opcode);
+		break;
+	}
+}
+
+static int msm_pcm_playback_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+	struct msm_audio *prtd = runtime->private_data;
+	struct msm_plat_data *pdata;
+	struct snd_pcm_hw_params *params;
+	int ret;
+	uint32_t fmt_type = FORMAT_LINEAR_PCM;
+	uint16_t bits_per_sample;
+	uint16_t sample_word_size;
+
+	pdata = (struct msm_plat_data *)
+		dev_get_drvdata(soc_prtd->platform->dev);
+	if (!pdata) {
+		pr_err("%s: platform data not populated\n", __func__);
+		return -EINVAL;
+	}
+	if (!prtd || !prtd->audio_client) {
+		pr_err("%s: private data null or audio client freed\n",
+			__func__);
+		return -EINVAL;
+	}
+	params = &soc_prtd->dpcm[substream->stream].hw_params;
+
+	pr_debug("%s\n", __func__);
+	prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream);
+	prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
+	prtd->pcm_irq_pos = 0;
+	/* rate and channels are sent to audio driver */
+	prtd->samp_rate = runtime->rate;
+	prtd->channel_mode = runtime->channels;
+	if (prtd->enabled)
+		return 0;
+
+	prtd->audio_client->perf_mode = pdata->perf_mode;
+	pr_debug("%s: perf: %x\n", __func__, pdata->perf_mode);
+
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S32_LE:
+		bits_per_sample = 32;
+		sample_word_size = 32;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		bits_per_sample = 24;
+		sample_word_size = 32;
+		break;
+	case SNDRV_PCM_FORMAT_S24_3LE:
+		bits_per_sample = 24;
+		sample_word_size = 24;
+		break;
+	case SNDRV_PCM_FORMAT_S16_LE:
+	default:
+		bits_per_sample = 16;
+		sample_word_size = 16;
+		break;
+	}
+	if (prtd->compress_enable) {
+		fmt_type = FORMAT_GEN_COMPR;
+		pr_debug("%s: Compressed enabled!\n", __func__);
+		ret = q6asm_open_write_compressed(prtd->audio_client, fmt_type,
+				COMPRESSED_PASSTHROUGH_GEN);
+		if (ret < 0) {
+			pr_err("%s: q6asm_open_write_compressed failed (%d)\n",
+			__func__, ret);
+			q6asm_audio_client_free(prtd->audio_client);
+			prtd->audio_client = NULL;
+			return -ENOMEM;
+		}
+	} else {
+		ret = q6asm_open_write_v4(prtd->audio_client,
+			fmt_type, bits_per_sample);
+
+		if (ret < 0) {
+			pr_err("%s: q6asm_open_write_v4 failed (%d)\n",
+			__func__, ret);
+			q6asm_audio_client_free(prtd->audio_client);
+			prtd->audio_client = NULL;
+			return -ENOMEM;
+		}
+
+		ret = q6asm_send_cal(prtd->audio_client);
+		if (ret < 0)
+			pr_debug("%s : Send cal failed : %d", __func__, ret);
+	}
+	pr_debug("%s: session ID %d\n", __func__,
+			prtd->audio_client->session);
+	prtd->session_id = prtd->audio_client->session;
+
+	if (prtd->compress_enable) {
+		ret = msm_pcm_routing_reg_phy_compr_stream(
+				soc_prtd->dai_link->be_id,
+				prtd->audio_client->perf_mode,
+				prtd->session_id,
+				SNDRV_PCM_STREAM_PLAYBACK,
+				COMPRESSED_PASSTHROUGH_GEN);
+	} else {
+		ret = msm_pcm_routing_reg_phy_stream(soc_prtd->dai_link->be_id,
+			prtd->audio_client->perf_mode,
+			prtd->session_id, substream->stream);
+	}
+	if (ret) {
+		pr_err("%s: stream reg failed ret:%d\n", __func__, ret);
+		return ret;
+	}
+	if (prtd->compress_enable) {
+		ret = q6asm_media_format_block_gen_compr(
+			prtd->audio_client, runtime->rate,
+			runtime->channels, !prtd->set_channel_map,
+			prtd->channel_map, bits_per_sample);
+	} else {
+		ret = q6asm_media_format_block_multi_ch_pcm_v4(
+				prtd->audio_client, runtime->rate,
+				runtime->channels, !prtd->set_channel_map,
+				prtd->channel_map, bits_per_sample,
+				sample_word_size, ASM_LITTLE_ENDIAN,
+				DEFAULT_QF);
+	}
+	if (ret < 0)
+		pr_info("%s: CMD Format block failed\n", __func__);
+
+	atomic_set(&prtd->out_count, runtime->periods);
+
+	prtd->enabled = 1;
+	prtd->cmd_pending = 0;
+	prtd->cmd_interrupt = 0;
+
+	return 0;
+}
+
+static int msm_pcm_capture_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+	struct msm_plat_data *pdata;
+	struct snd_pcm_hw_params *params;
+	struct msm_pcm_routing_evt event;
+	int ret = 0;
+	int i = 0;
+	uint16_t bits_per_sample = 16;
+	uint16_t sample_word_size;
+
+	pdata = (struct msm_plat_data *)
+		dev_get_drvdata(soc_prtd->platform->dev);
+	if (!pdata) {
+		pr_err("%s: platform data not populated\n", __func__);
+		return -EINVAL;
+	}
+	if (!prtd || !prtd->audio_client) {
+		pr_err("%s: private data null or audio client freed\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (prtd->enabled == IDLE) {
+		pr_debug("%s:perf_mode=%d periods=%d\n", __func__,
+			pdata->perf_mode, runtime->periods);
+		params = &soc_prtd->dpcm[substream->stream].hw_params;
+		if ((params_format(params) == SNDRV_PCM_FORMAT_S24_LE) ||
+			(params_format(params) == SNDRV_PCM_FORMAT_S24_3LE))
+			bits_per_sample = 24;
+		else if (params_format(params) == SNDRV_PCM_FORMAT_S32_LE)
+			bits_per_sample = 32;
+
+		/* ULL mode is not supported in capture path */
+		if (pdata->perf_mode == LEGACY_PCM_MODE)
+			prtd->audio_client->perf_mode = LEGACY_PCM_MODE;
+		else
+			prtd->audio_client->perf_mode = LOW_LATENCY_PCM_MODE;
+
+		pr_debug("%s Opening %d-ch PCM read stream, perf_mode %d\n",
+				__func__, params_channels(params),
+				prtd->audio_client->perf_mode);
+
+		ret = q6asm_open_read_v4(prtd->audio_client, FORMAT_LINEAR_PCM,
+				bits_per_sample, false);
+		if (ret < 0) {
+			pr_err("%s: q6asm_open_read failed\n", __func__);
+			q6asm_audio_client_free(prtd->audio_client);
+			prtd->audio_client = NULL;
+			return -ENOMEM;
+		}
+
+		ret = q6asm_send_cal(prtd->audio_client);
+		if (ret < 0)
+			pr_debug("%s : Send cal failed : %d", __func__, ret);
+
+		pr_debug("%s: session ID %d\n",
+				__func__, prtd->audio_client->session);
+		prtd->session_id = prtd->audio_client->session;
+		event.event_func = msm_pcm_route_event_handler;
+		event.priv_data = (void *) prtd;
+		ret = msm_pcm_routing_reg_phy_stream_v2(
+				soc_prtd->dai_link->be_id,
+				prtd->audio_client->perf_mode,
+				prtd->session_id, substream->stream,
+				event);
+		if (ret) {
+			pr_err("%s: stream reg failed ret:%d\n", __func__, ret);
+			return ret;
+		}
+	}
+
+	prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream);
+	prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
+	prtd->pcm_irq_pos = 0;
+	/* rate and channels are sent to audio driver */
+	prtd->samp_rate = runtime->rate;
+	prtd->channel_mode = runtime->channels;
+
+	if (prtd->enabled == IDLE || prtd->enabled == STOPPED) {
+		for (i = 0; i < runtime->periods; i++)
+			q6asm_read(prtd->audio_client);
+		prtd->periods = runtime->periods;
+	}
+
+	if (prtd->enabled != IDLE)
+		return 0;
+
+	switch (runtime->format) {
+	case SNDRV_PCM_FORMAT_S32_LE:
+		bits_per_sample = 32;
+		sample_word_size = 32;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		bits_per_sample = 24;
+		sample_word_size = 32;
+		break;
+	case SNDRV_PCM_FORMAT_S24_3LE:
+		bits_per_sample = 24;
+		sample_word_size = 24;
+		break;
+	case SNDRV_PCM_FORMAT_S16_LE:
+	default:
+		bits_per_sample = 16;
+		sample_word_size = 16;
+		break;
+	}
+
+	pr_debug("%s: Samp_rate = %d Channel = %d bit width = %d, word size = %d\n",
+			__func__, prtd->samp_rate, prtd->channel_mode,
+			bits_per_sample, sample_word_size);
+	ret = q6asm_enc_cfg_blk_pcm_format_support_v4(prtd->audio_client,
+						      prtd->samp_rate,
+						      prtd->channel_mode,
+						      bits_per_sample,
+						      sample_word_size,
+						      ASM_LITTLE_ENDIAN,
+						      DEFAULT_QF);
+	if (ret < 0)
+		pr_debug("%s: cmd cfg pcm was block failed", __func__);
+
+	prtd->enabled = RUNNING;
+
+	return ret;
+}
+
+static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+	int ret = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		pr_debug("%s: Trigger start\n", __func__);
+		ret = q6asm_run_nowait(prtd->audio_client, 0, 0, 0);
+		break;
+	case SNDRV_PCM_TRIGGER_STOP:
+		pr_debug("SNDRV_PCM_TRIGGER_STOP\n");
+		atomic_set(&prtd->start, 0);
+		if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK) {
+			prtd->enabled = STOPPED;
+			ret = q6asm_cmd_nowait(prtd->audio_client, CMD_PAUSE);
+			break;
+		}
+		/* pending CMD_EOS isn't expected */
+		WARN_ON_ONCE(test_bit(CMD_EOS, &prtd->cmd_pending));
+		set_bit(CMD_EOS, &prtd->cmd_pending);
+		ret = q6asm_cmd_nowait(prtd->audio_client, CMD_EOS);
+		if (ret)
+			clear_bit(CMD_EOS, &prtd->cmd_pending);
+		break;
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		pr_debug("SNDRV_PCM_TRIGGER_PAUSE\n");
+		ret = q6asm_cmd_nowait(prtd->audio_client, CMD_PAUSE);
+		atomic_set(&prtd->start, 0);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int msm_pcm_open(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+	struct msm_audio *prtd;
+	int ret = 0;
+
+	prtd = kzalloc(sizeof(struct msm_audio), GFP_KERNEL);
+	if (prtd == NULL) {
+		pr_err("Failed to allocate memory for msm_audio\n");
+		return -ENOMEM;
+	}
+	prtd->substream = substream;
+	prtd->audio_client = q6asm_audio_client_alloc(
+				(app_cb)event_handler, prtd);
+	if (!prtd->audio_client) {
+		pr_info("%s: Could not allocate memory\n", __func__);
+		kfree(prtd);
+		return -ENOMEM;
+	}
+
+	prtd->audio_client->dev = soc_prtd->platform->dev;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		runtime->hw = msm_pcm_hardware_playback;
+
+	/* Capture path */
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		runtime->hw = msm_pcm_hardware_capture;
+	else {
+		pr_err("Invalid Stream type %d\n", substream->stream);
+		return -EINVAL;
+	}
+
+	ret = snd_pcm_hw_constraint_list(runtime, 0,
+				SNDRV_PCM_HW_PARAM_RATE,
+				&constraints_sample_rates);
+	if (ret < 0)
+		pr_info("snd_pcm_hw_constraint_list failed\n");
+	/* Ensure that buffer size is a multiple of period size */
+	ret = snd_pcm_hw_constraint_integer(runtime,
+					    SNDRV_PCM_HW_PARAM_PERIODS);
+	if (ret < 0)
+		pr_info("snd_pcm_hw_constraint_integer failed\n");
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		ret = snd_pcm_hw_constraint_minmax(runtime,
+			SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
+			PLAYBACK_MIN_NUM_PERIODS * PLAYBACK_MIN_PERIOD_SIZE,
+			PLAYBACK_MAX_NUM_PERIODS * PLAYBACK_MAX_PERIOD_SIZE);
+		if (ret < 0) {
+			pr_err("constraint for buffer bytes min max ret = %d\n",
+									ret);
+		}
+	}
+
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		ret = snd_pcm_hw_constraint_minmax(runtime,
+			SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
+			CAPTURE_MIN_NUM_PERIODS * CAPTURE_MIN_PERIOD_SIZE,
+			CAPTURE_MAX_NUM_PERIODS * CAPTURE_MAX_PERIOD_SIZE);
+		if (ret < 0) {
+			pr_err("constraint for buffer bytes min max ret = %d\n",
+									ret);
+		}
+	}
+	ret = snd_pcm_hw_constraint_step(runtime, 0,
+		SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 32);
+	if (ret < 0) {
+		pr_err("constraint for period bytes step ret = %d\n",
+								ret);
+	}
+	ret = snd_pcm_hw_constraint_step(runtime, 0,
+		SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 32);
+	if (ret < 0) {
+		pr_err("constraint for buffer bytes step ret = %d\n",
+								ret);
+	}
+
+	prtd->enabled = IDLE;
+	prtd->dsp_cnt = 0;
+	prtd->set_channel_map = false;
+	prtd->reset_event = false;
+	runtime->private_data = prtd;
+	msm_adsp_init_mixer_ctl_pp_event_queue(soc_prtd);
+
+	return 0;
+}
+
+static int msm_pcm_playback_copy(struct snd_pcm_substream *substream, int a,
+	snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames)
+{
+	int ret = 0;
+	int fbytes = 0;
+	int xfer = 0;
+	char *bufptr = NULL;
+	void *data = NULL;
+	uint32_t idx = 0;
+	uint32_t size = 0;
+	uint32_t retries = 0;
+
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+
+	fbytes = frames_to_bytes(runtime, frames);
+	pr_debug("%s: prtd->out_count = %d\n",
+				__func__, atomic_read(&prtd->out_count));
+
+	while ((fbytes > 0) && (retries < MAX_PB_COPY_RETRIES)) {
+		if (prtd->reset_event) {
+			pr_err("%s: In SSR return ENETRESET before wait\n",
+				__func__);
+			return -ENETRESET;
+		}
+
+		ret = wait_event_timeout(the_locks.write_wait,
+				(atomic_read(&prtd->out_count)), 5 * HZ);
+		if (!ret) {
+			pr_err("%s: wait_event_timeout failed\n", __func__);
+			ret = -ETIMEDOUT;
+			goto fail;
+		}
+		ret = 0;
+
+		if (prtd->reset_event) {
+			pr_err("%s: In SSR return ENETRESET after wait\n",
+				__func__);
+			return -ENETRESET;
+		}
+
+		if (!atomic_read(&prtd->out_count)) {
+			pr_err("%s: pcm stopped out_count 0\n", __func__);
+			return 0;
+		}
+
+		data = q6asm_is_cpu_buf_avail(IN, prtd->audio_client, &size,
+			&idx);
+		if (data == NULL) {
+			retries++;
+			continue;
+		} else {
+			retries = 0;
+		}
+
+		if (fbytes > size)
+			xfer = size;
+		else
+			xfer = fbytes;
+
+		bufptr = data;
+		if (bufptr) {
+			pr_debug("%s:fbytes =%d: xfer=%d size=%d\n",
+						__func__, fbytes, xfer, size);
+			if (copy_from_user(bufptr, buf, xfer)) {
+				ret = -EFAULT;
+				pr_err("%s: copy_from_user failed\n",
+					__func__);
+				q6asm_cpu_buf_release(IN, prtd->audio_client);
+				goto fail;
+			}
+			buf += xfer;
+			fbytes -= xfer;
+			pr_debug("%s:fbytes = %d: xfer=%d\n", __func__, fbytes,
+				xfer);
+			if (atomic_read(&prtd->start)) {
+				pr_debug("%s:writing %d bytes of buffer to dsp\n",
+						__func__, xfer);
+				ret = q6asm_write(prtd->audio_client, xfer,
+							0, 0, NO_TIMESTAMP);
+				if (ret < 0) {
+					ret = -EFAULT;
+					q6asm_cpu_buf_release(IN,
+						prtd->audio_client);
+					goto fail;
+				}
+			} else
+				atomic_inc(&prtd->out_needed);
+			atomic_dec(&prtd->out_count);
+		}
+	}
+fail:
+	if (retries >= MAX_PB_COPY_RETRIES)
+		ret = -ENOMEM;
+
+	return  ret;
+}
+
+static int msm_pcm_playback_close(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+	struct msm_audio *prtd = runtime->private_data;
+	uint32_t timeout;
+	int dir = 0;
+	int ret = 0;
+
+	pr_debug("%s: cmd_pending 0x%lx\n", __func__, prtd->cmd_pending);
+
+	if (prtd->audio_client) {
+		dir = IN;
+
+		/* determine timeout length */
+		if (runtime->frame_bits == 0 || runtime->rate == 0) {
+			timeout = CMD_EOS_MIN_TIMEOUT_LENGTH;
+		} else {
+			timeout = (runtime->period_size *
+					CMD_EOS_TIMEOUT_MULTIPLIER) /
+					((runtime->frame_bits / 8) *
+					 runtime->rate);
+			if (timeout < CMD_EOS_MIN_TIMEOUT_LENGTH)
+				timeout = CMD_EOS_MIN_TIMEOUT_LENGTH;
+		}
+		pr_debug("%s: CMD_EOS timeout is %d\n", __func__, timeout);
+
+		ret = wait_event_timeout(the_locks.eos_wait,
+					 !test_bit(CMD_EOS, &prtd->cmd_pending),
+					 timeout);
+		if (!ret)
+			pr_err("%s: CMD_EOS failed, cmd_pending 0x%lx\n",
+			       __func__, prtd->cmd_pending);
+		q6asm_cmd(prtd->audio_client, CMD_CLOSE);
+		q6asm_audio_client_buf_free_contiguous(dir,
+					prtd->audio_client);
+		q6asm_audio_client_free(prtd->audio_client);
+	}
+	msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id,
+						SNDRV_PCM_STREAM_PLAYBACK);
+	msm_adsp_clean_mixer_ctl_pp_event_queue(soc_prtd);
+	kfree(prtd);
+	runtime->private_data = NULL;
+
+	return 0;
+}
+
+static int msm_pcm_capture_copy(struct snd_pcm_substream *substream,
+		 int channel, snd_pcm_uframes_t hwoff, void __user *buf,
+						 snd_pcm_uframes_t frames)
+{
+	int ret = 0;
+	int fbytes = 0;
+	int xfer;
+	char *bufptr;
+	void *data = NULL;
+	static uint32_t idx;
+	static uint32_t size;
+	uint32_t offset = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = substream->runtime->private_data;
+
+
+	pr_debug("%s\n", __func__);
+	fbytes = frames_to_bytes(runtime, frames);
+
+	pr_debug("appl_ptr %d\n", (int)runtime->control->appl_ptr);
+	pr_debug("hw_ptr %d\n", (int)runtime->status->hw_ptr);
+	pr_debug("avail_min %d\n", (int)runtime->control->avail_min);
+
+	if (prtd->reset_event) {
+		pr_err("%s: In SSR return ENETRESET before wait\n", __func__);
+		return -ENETRESET;
+	}
+	ret = wait_event_timeout(the_locks.read_wait,
+			(atomic_read(&prtd->in_count)), 5 * HZ);
+	if (!ret) {
+		pr_debug("%s: wait_event_timeout failed\n", __func__);
+		goto fail;
+	}
+	if (prtd->reset_event) {
+		pr_err("%s: In SSR return ENETRESET after wait\n", __func__);
+		return -ENETRESET;
+	}
+	if (!atomic_read(&prtd->in_count)) {
+		pr_debug("%s: pcm stopped in_count 0\n", __func__);
+		return 0;
+	}
+	pr_debug("Checking if valid buffer is available...%pK\n",
+						data);
+	data = q6asm_is_cpu_buf_avail(OUT, prtd->audio_client, &size, &idx);
+	bufptr = data;
+	pr_debug("Size = %d\n", size);
+	pr_debug("fbytes = %d\n", fbytes);
+	pr_debug("idx = %d\n", idx);
+	if (bufptr) {
+		xfer = fbytes;
+		if (xfer > size)
+			xfer = size;
+		offset = prtd->in_frame_info[idx].offset;
+		pr_debug("Offset value = %d\n", offset);
+		if (copy_to_user(buf, bufptr+offset, xfer)) {
+			pr_err("Failed to copy buf to user\n");
+			ret = -EFAULT;
+			q6asm_cpu_buf_release(OUT, prtd->audio_client);
+			goto fail;
+		}
+		fbytes -= xfer;
+		size -= xfer;
+		prtd->in_frame_info[idx].offset += xfer;
+		pr_debug("%s:fbytes = %d: size=%d: xfer=%d\n",
+					__func__, fbytes, size, xfer);
+		pr_debug(" Sending next buffer to dsp\n");
+		memset(&prtd->in_frame_info[idx], 0,
+		       sizeof(struct msm_audio_in_frame_info));
+		atomic_dec(&prtd->in_count);
+		ret = q6asm_read(prtd->audio_client);
+		if (ret < 0) {
+			pr_err("q6asm read failed\n");
+			ret = -EFAULT;
+			q6asm_cpu_buf_release(OUT, prtd->audio_client);
+			goto fail;
+		}
+	} else
+		pr_err("No valid buffer\n");
+
+	pr_debug("Returning from capture_copy... %d\n", ret);
+fail:
+	return ret;
+}
+
+static int msm_pcm_capture_close(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+	struct msm_audio *prtd = runtime->private_data;
+	int dir = OUT;
+
+	pr_debug("%s\n", __func__);
+	if (prtd->audio_client) {
+		q6asm_cmd(prtd->audio_client, CMD_CLOSE);
+		q6asm_audio_client_buf_free_contiguous(dir,
+				prtd->audio_client);
+		q6asm_audio_client_free(prtd->audio_client);
+	}
+
+	msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id,
+		SNDRV_PCM_STREAM_CAPTURE);
+	kfree(prtd);
+	runtime->private_data = NULL;
+
+	return 0;
+}
+
+static int msm_pcm_copy(struct snd_pcm_substream *substream, int a,
+	 snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames)
+{
+	int ret = 0;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		ret = msm_pcm_playback_copy(substream, a, hwoff, buf, frames);
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		ret = msm_pcm_capture_copy(substream, a, hwoff, buf, frames);
+	return ret;
+}
+
+static int msm_pcm_close(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		ret = msm_pcm_playback_close(substream);
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		ret = msm_pcm_capture_close(substream);
+	return ret;
+}
+
+static int msm_pcm_prepare(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		ret = msm_pcm_playback_prepare(substream);
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		ret = msm_pcm_capture_prepare(substream);
+	return ret;
+}
+
+static snd_pcm_uframes_t msm_pcm_pointer(struct snd_pcm_substream *substream)
+{
+
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+
+	if (prtd->pcm_irq_pos >= prtd->pcm_size)
+		prtd->pcm_irq_pos = 0;
+
+	pr_debug("pcm_irq_pos = %d\n", prtd->pcm_irq_pos);
+	return bytes_to_frames(runtime, (prtd->pcm_irq_pos));
+}
+
+static int msm_pcm_mmap(struct snd_pcm_substream *substream,
+				struct vm_area_struct *vma)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+	struct audio_client *ac = prtd->audio_client;
+	struct audio_port_data *apd = ac->port;
+	struct audio_buffer *ab;
+	int dir = -1;
+
+	prtd->mmap_flag = 1;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		dir = IN;
+	else
+		dir = OUT;
+	ab = &(apd[dir].buf[0]);
+
+	return msm_audio_ion_mmap(ab, vma);
+}
+
+static int msm_pcm_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+	struct snd_dma_buffer *dma_buf = &substream->dma_buffer;
+	struct audio_buffer *buf;
+	int dir, ret;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		dir = IN;
+	else
+		dir = OUT;
+	ret = q6asm_audio_client_buf_alloc_contiguous(dir,
+			prtd->audio_client,
+			(params_buffer_bytes(params) / params_periods(params)),
+			 params_periods(params));
+	if (ret < 0) {
+		pr_err("Audio Start: Buffer Allocation failed rc = %d\n",
+							ret);
+		return -ENOMEM;
+	}
+	buf = prtd->audio_client->port[dir].buf;
+	if (buf == NULL || buf[0].data == NULL)
+		return -ENOMEM;
+
+	pr_debug("%s:buf = %pK\n", __func__, buf);
+	dma_buf->dev.type = SNDRV_DMA_TYPE_DEV;
+	dma_buf->dev.dev = substream->pcm->card->dev;
+	dma_buf->private_data = NULL;
+	dma_buf->area = buf[0].data;
+	dma_buf->addr =  buf[0].phys;
+	dma_buf->bytes = params_buffer_bytes(params);
+	if (!dma_buf->area)
+		return -ENOMEM;
+
+	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+	return 0;
+}
+
+static struct snd_pcm_ops msm_pcm_ops = {
+	.open           = msm_pcm_open,
+	.copy		= msm_pcm_copy,
+	.hw_params	= msm_pcm_hw_params,
+	.close          = msm_pcm_close,
+	.ioctl          = snd_pcm_lib_ioctl,
+	.prepare        = msm_pcm_prepare,
+	.trigger        = msm_pcm_trigger,
+	.pointer        = msm_pcm_pointer,
+	.mmap		= msm_pcm_mmap,
+};
+
+static int msm_pcm_adsp_stream_cmd_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *pcm = snd_kcontrol_chip(kcontrol);
+	struct snd_soc_platform *platform = snd_soc_component_to_platform(pcm);
+	struct msm_plat_data *pdata = dev_get_drvdata(platform->dev);
+	struct snd_pcm_substream *substream;
+	struct msm_audio *prtd;
+	int ret = 0;
+	struct msm_adsp_event_data *event_data = NULL;
+	uint64_t actual_payload_len = 0;
+
+	if (!pdata) {
+		pr_err("%s pdata is NULL\n", __func__);
+		ret = -ENODEV;
+		goto done;
+	}
+
+	substream = pdata->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+	if (!substream) {
+		pr_err("%s substream not found\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (!substream->runtime) {
+		pr_err("%s substream runtime not found\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	prtd = substream->runtime->private_data;
+	if (prtd->audio_client == NULL) {
+		pr_err("%s prtd is null.\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	event_data = (struct msm_adsp_event_data *)ucontrol->value.bytes.data;
+	if ((event_data->event_type < ADSP_STREAM_PP_EVENT) ||
+	    (event_data->event_type >= ADSP_STREAM_EVENT_MAX)) {
+		pr_err("%s: invalid event_type=%d",
+			__func__, event_data->event_type);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	actual_payload_len = sizeof(struct msm_adsp_event_data) +
+							event_data->payload_len;
+	if (actual_payload_len >= U32_MAX) {
+		pr_err("%s payload length 0x%X  exceeds limit",
+			__func__, event_data->payload_len);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (event_data->payload_len > sizeof(ucontrol->value.bytes.data)
+			- sizeof(struct msm_adsp_event_data)) {
+		pr_err("%s param length=%d  exceeds limit",
+			__func__, event_data->payload_len);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = q6asm_send_stream_cmd(prtd->audio_client, event_data);
+	if (ret < 0)
+		pr_err("%s: failed to send stream event cmd, err = %d\n",
+			__func__, ret);
+done:
+	return ret;
+}
+
+static int msm_pcm_add_audio_adsp_stream_cmd_control(
+			struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name = DSP_STREAM_CMD;
+	const char *deviceNo = "NN";
+	char *mixer_str = NULL;
+	int ctl_len = 0, ret = 0;
+	struct snd_kcontrol_new fe_audio_adsp_stream_cmd_config_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_adsp_stream_cmd_info,
+		.put = msm_pcm_adsp_stream_cmd_put,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s rtd is NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+	fe_audio_adsp_stream_cmd_config_control[0].name = mixer_str;
+	fe_audio_adsp_stream_cmd_config_control[0].private_value =
+		rtd->dai_link->be_id;
+	pr_debug("Registering new mixer ctl %s\n", mixer_str);
+	ret = snd_soc_add_platform_controls(rtd->platform,
+		fe_audio_adsp_stream_cmd_config_control,
+		ARRAY_SIZE(fe_audio_adsp_stream_cmd_config_control));
+	if (ret < 0)
+		pr_err("%s: failed add ctl %s. err = %d\n",
+			__func__, mixer_str, ret);
+
+	kfree(mixer_str);
+done:
+	return ret;
+}
+
+static int msm_pcm_add_audio_adsp_stream_callback_control(
+			struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name = DSP_STREAM_CALLBACK;
+	const char *deviceNo = "NN";
+	char *mixer_str = NULL;
+	int ctl_len = 0, ret = 0;
+	struct snd_kcontrol *kctl;
+
+	struct snd_kcontrol_new fe_audio_adsp_callback_config_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_adsp_stream_callback_info,
+		.get = msm_adsp_stream_callback_get,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s NULL rtd\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	pr_debug("%s: added new pcm FE with name %s, id %d, cpu dai %s, device no %d\n",
+		 __func__, rtd->dai_link->name, rtd->dai_link->be_id,
+		 rtd->dai_link->cpu_dai_name, rtd->pcm->device);
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+	fe_audio_adsp_callback_config_control[0].name = mixer_str;
+	fe_audio_adsp_callback_config_control[0].private_value =
+		rtd->dai_link->be_id;
+	pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+	ret = snd_soc_add_platform_controls(rtd->platform,
+			fe_audio_adsp_callback_config_control,
+			ARRAY_SIZE(fe_audio_adsp_callback_config_control));
+	if (ret < 0) {
+		pr_err("%s: failed to add ctl %s. err = %d\n",
+			__func__, mixer_str, ret);
+		ret = -EINVAL;
+		goto free_mixer_str;
+	}
+
+	kctl = snd_soc_card_get_kcontrol(rtd->card, mixer_str);
+	if (!kctl) {
+		pr_err("%s: failed to get kctl %s.\n", __func__, mixer_str);
+		ret = -EINVAL;
+		goto free_mixer_str;
+	}
+
+	kctl->private_data = NULL;
+
+free_mixer_str:
+	kfree(mixer_str);
+done:
+	return ret;
+}
+
+static int msm_pcm_set_volume(struct msm_audio *prtd, uint32_t volume)
+{
+	int rc = 0;
+
+	if (prtd && prtd->audio_client) {
+		pr_debug("%s: channels %d volume 0x%x\n", __func__,
+				prtd->channel_mode, volume);
+		rc = q6asm_set_volume(prtd->audio_client, volume);
+		if (rc < 0) {
+			pr_err("%s: Send Volume command failed rc=%d\n",
+					__func__, rc);
+		}
+	}
+	return rc;
+}
+
+static int msm_pcm_volume_ctl_get(struct snd_kcontrol *kcontrol,
+		      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_pcm_volume *vol = snd_kcontrol_chip(kcontrol);
+	struct snd_pcm_substream *substream =
+		vol->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+	struct msm_audio *prtd;
+
+	pr_debug("%s\n", __func__);
+	if (!substream) {
+		pr_debug("%s substream not found\n", __func__);
+		return -ENODEV;
+	}
+	if (!substream->runtime) {
+		pr_debug("%s substream runtime not found\n", __func__);
+		return 0;
+	}
+	prtd = substream->runtime->private_data;
+	if (prtd)
+		ucontrol->value.integer.value[0] = prtd->volume;
+	return 0;
+}
+
+static int msm_pcm_volume_ctl_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int rc = 0;
+	struct snd_pcm_volume *vol = snd_kcontrol_chip(kcontrol);
+	struct snd_pcm_substream *substream =
+		vol->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+	struct msm_audio *prtd;
+	int volume = ucontrol->value.integer.value[0];
+
+	pr_debug("%s: volume : 0x%x\n", __func__, volume);
+	if (!substream) {
+		pr_err("%s substream not found\n", __func__);
+		return -ENODEV;
+	}
+	if (!substream->runtime) {
+		pr_err("%s substream runtime not found\n", __func__);
+		return 0;
+	}
+	prtd = substream->runtime->private_data;
+	if (prtd) {
+		rc = msm_pcm_set_volume(prtd, volume);
+		prtd->volume = volume;
+	}
+	return rc;
+}
+
+static int msm_pcm_add_volume_control(struct snd_soc_pcm_runtime *rtd)
+{
+	int ret = 0;
+	struct snd_pcm *pcm = rtd->pcm;
+	struct snd_pcm_volume *volume_info;
+	struct snd_kcontrol *kctl;
+
+	dev_dbg(rtd->dev, "%s, Volume control add\n", __func__);
+	ret = snd_pcm_add_volume_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
+			NULL, 1, rtd->dai_link->be_id,
+			&volume_info);
+	if (ret < 0) {
+		pr_err("%s volume control failed ret %d\n", __func__, ret);
+		return ret;
+	}
+	kctl = volume_info->kctl;
+	kctl->put = msm_pcm_volume_ctl_put;
+	kctl->get = msm_pcm_volume_ctl_get;
+	kctl->tlv.p = msm_pcm_vol_gain;
+	return 0;
+}
+
+static int msm_pcm_compress_ctl_info(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	uinfo->count = 1;
+	uinfo->value.integer.min = 0;
+	uinfo->value.integer.max = 0x2000;
+	return 0;
+}
+
+static int msm_pcm_compress_ctl_get(struct snd_kcontrol *kcontrol,
+		      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	struct snd_soc_platform *platform = snd_soc_component_to_platform(comp);
+	struct msm_plat_data *pdata = dev_get_drvdata(platform->dev);
+	struct snd_pcm_substream *substream;
+	struct msm_audio *prtd;
+
+	if (!pdata) {
+		pr_err("%s pdata is NULL\n", __func__);
+		return -ENODEV;
+	}
+	substream = pdata->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+	if (!substream) {
+		pr_err("%s substream not found\n", __func__);
+		return -EINVAL;
+	}
+	if (!substream->runtime) {
+		pr_err("%s substream runtime not found\n", __func__);
+		return 0;
+	}
+	prtd = substream->runtime->private_data;
+	if (prtd)
+		ucontrol->value.integer.value[0] = prtd->compress_enable;
+	return 0;
+}
+
+static int msm_pcm_compress_ctl_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int rc = 0;
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	struct snd_soc_platform *platform = snd_soc_component_to_platform(comp);
+	struct msm_plat_data *pdata = dev_get_drvdata(platform->dev);
+	struct snd_pcm_substream *substream;
+	struct msm_audio *prtd;
+	int compress = ucontrol->value.integer.value[0];
+
+	if (!pdata) {
+		pr_err("%s pdata is NULL\n", __func__);
+		return -ENODEV;
+	}
+	substream = pdata->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+	pr_debug("%s: compress : 0x%x\n", __func__, compress);
+	if (!substream) {
+		pr_err("%s substream not found\n", __func__);
+		return -EINVAL;
+	}
+	if (!substream->runtime) {
+		pr_err("%s substream runtime not found\n", __func__);
+		return 0;
+	}
+	prtd = substream->runtime->private_data;
+	if (prtd) {
+		pr_debug("%s: setting compress flag to 0x%x\n",
+		__func__, compress);
+		prtd->compress_enable = compress;
+	}
+	return rc;
+}
+
+static int msm_pcm_add_compress_control(struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name = "Playback ";
+	const char *mixer_ctl_end_name = " Compress";
+	const char *deviceNo = "NN";
+	char *mixer_str = NULL;
+	int ctl_len;
+	int ret = 0;
+	struct msm_plat_data *pdata;
+	struct snd_kcontrol_new pcm_compress_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_pcm_compress_ctl_info,
+		.get = msm_pcm_compress_ctl_get,
+		.put = msm_pcm_compress_ctl_put,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s: NULL rtd\n", __func__);
+		return -EINVAL;
+	}
+
+	ctl_len = strlen(mixer_ctl_name) + strlen(deviceNo) +
+		  strlen(mixer_ctl_end_name) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+
+	if (!mixer_str)
+		return -ENOMEM;
+
+	snprintf(mixer_str, ctl_len, "%s%d%s", mixer_ctl_name,
+			rtd->pcm->device, mixer_ctl_end_name);
+
+	pcm_compress_control[0].name = mixer_str;
+	pcm_compress_control[0].private_value = rtd->dai_link->be_id;
+	pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+	pdata = dev_get_drvdata(rtd->platform->dev);
+	if (pdata) {
+		if (!pdata->pcm) {
+			pdata->pcm = rtd->pcm;
+			ret = snd_soc_add_platform_controls(rtd->platform,
+							pcm_compress_control,
+							ARRAY_SIZE
+							(pcm_compress_control));
+			if (ret < 0)
+				pr_err("%s: failed add ctl %s. err = %d\n",
+					__func__, mixer_str, ret);
+		}
+	} else {
+		pr_err("%s: NULL pdata\n", __func__);
+		ret = -EINVAL;
+	}
+	kfree(mixer_str);
+	return ret;
+}
+
+static int msm_pcm_chmap_ctl_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int i;
+	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
+	unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
+	struct snd_pcm_substream *substream;
+	struct msm_audio *prtd;
+
+	pr_debug("%s", __func__);
+	substream = snd_pcm_chmap_substream(info, idx);
+	if (!substream)
+		return -ENODEV;
+	if (!substream->runtime)
+		return 0;
+
+	prtd = substream->runtime->private_data;
+	if (prtd) {
+		prtd->set_channel_map = true;
+			for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++)
+				prtd->channel_map[i] =
+				(char)(ucontrol->value.integer.value[i]);
+	}
+	return 0;
+}
+
+static int msm_pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int i;
+	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
+	unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
+	struct snd_pcm_substream *substream;
+	struct msm_audio *prtd;
+
+	pr_debug("%s", __func__);
+	substream = snd_pcm_chmap_substream(info, idx);
+	if (!substream)
+		return -ENODEV;
+	memset(ucontrol->value.integer.value, 0,
+		sizeof(ucontrol->value.integer.value));
+	if (!substream->runtime)
+		return 0; /* no channels set */
+
+	prtd = substream->runtime->private_data;
+
+	if (prtd && prtd->set_channel_map == true) {
+		for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++)
+			ucontrol->value.integer.value[i] =
+					(int)prtd->channel_map[i];
+	} else {
+		for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++)
+			ucontrol->value.integer.value[i] = 0;
+	}
+
+	return 0;
+}
+
+static int msm_pcm_add_chmap_controls(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_pcm *pcm = rtd->pcm;
+	struct snd_pcm_chmap *chmap_info;
+	struct snd_kcontrol *kctl;
+	char device_num[12];
+	int i, ret = 0;
+
+	pr_debug("%s, Channel map cntrl add\n", __func__);
+	ret = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
+				     snd_pcm_std_chmaps,
+				     PCM_FORMAT_MAX_NUM_CHANNEL, 0,
+				     &chmap_info);
+	if (ret < 0) {
+		pr_err("%s, channel map cntrl add failed\n", __func__);
+		return ret;
+	}
+	kctl = chmap_info->kctl;
+	for (i = 0; i < kctl->count; i++)
+		kctl->vd[i].access |= SNDRV_CTL_ELEM_ACCESS_WRITE;
+	snprintf(device_num, sizeof(device_num), "%d", pcm->device);
+	strlcat(kctl->id.name, device_num, sizeof(kctl->id.name));
+	pr_debug("%s, Overwriting channel map control name to: %s\n",
+		__func__, kctl->id.name);
+	kctl->put = msm_pcm_chmap_ctl_put;
+	kctl->get = msm_pcm_chmap_ctl_get;
+	return 0;
+}
+
+static int msm_pcm_playback_app_type_cfg_ctl_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_RX;
+	int be_id = ucontrol->value.integer.value[3];
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0, 0, 48000};
+	int ret = 0;
+
+	cfg_data.app_type = ucontrol->value.integer.value[0];
+	cfg_data.acdb_dev_id = ucontrol->value.integer.value[1];
+	if (ucontrol->value.integer.value[2] != 0)
+		cfg_data.sample_rate = ucontrol->value.integer.value[2];
+	pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
+		__func__, fe_id, session_type, be_id,
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
+	ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+						      be_id, &cfg_data);
+	if (ret < 0)
+		pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
+			__func__, ret);
+
+	return ret;
+}
+
+static int msm_pcm_playback_app_type_cfg_ctl_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_RX;
+	int be_id = 0;
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0};
+	int ret = 0;
+
+	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
+						      &be_id, &cfg_data);
+	if (ret < 0) {
+		pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
+			__func__, ret);
+		goto done;
+	}
+
+	ucontrol->value.integer.value[0] = cfg_data.app_type;
+	ucontrol->value.integer.value[1] = cfg_data.acdb_dev_id;
+	ucontrol->value.integer.value[2] = cfg_data.sample_rate;
+	ucontrol->value.integer.value[3] = be_id;
+	pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+		__func__, fe_id, session_type, be_id,
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
+done:
+	return ret;
+}
+
+static int msm_pcm_playback_pan_scale_ctl_info(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+	uinfo->count = sizeof(struct asm_stream_pan_ctrl_params);
+	return 0;
+}
+
+static int msm_pcm_playback_pan_scale_ctl_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = 0;
+	int len = 0;
+	int i = 0;
+	struct snd_soc_component *usr_info = snd_kcontrol_chip(kcontrol);
+	struct snd_soc_platform *platform;
+	struct msm_plat_data *pdata;
+	struct snd_pcm_substream *substream;
+	struct msm_audio *prtd;
+	struct asm_stream_pan_ctrl_params pan_param;
+	char *usr_value = NULL;
+	uint32_t *gain_ptr = NULL;
+	if (!usr_info) {
+		pr_err("%s: usr_info is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	platform = snd_soc_component_to_platform(usr_info);
+	if (!platform) {
+		pr_err("%s: platform is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+	pdata = dev_get_drvdata(platform->dev);
+	if (!pdata) {
+		pr_err("%s: pdata is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+	substream = pdata->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+	if (!substream) {
+		pr_err("%s substream not found\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (!substream->runtime) {
+		pr_err("%s substream runtime not found\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+	prtd = substream->runtime->private_data;
+	if (!prtd) {
+		ret = -EINVAL;
+		goto done;
+	}
+	usr_value = (char *) ucontrol->value.bytes.data;
+	if (!usr_value) {
+		pr_err("%s ucontrol data is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+	memcpy(&pan_param.num_output_channels, &usr_value[len],
+				sizeof(pan_param.num_output_channels));
+	len += sizeof(pan_param.num_output_channels);
+	if (pan_param.num_output_channels >
+		PCM_FORMAT_MAX_NUM_CHANNEL) {
+		ret = -EINVAL;
+		goto done;
+	}
+	memcpy(&pan_param.num_input_channels, &usr_value[len],
+				sizeof(pan_param.num_input_channels));
+	len += sizeof(pan_param.num_input_channels);
+	if (pan_param.num_input_channels >
+		PCM_FORMAT_MAX_NUM_CHANNEL) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (usr_value[len++]) {
+		memcpy(pan_param.output_channel_map, &usr_value[len],
+			(pan_param.num_output_channels *
+			sizeof(pan_param.output_channel_map[0])));
+		len += (pan_param.num_output_channels *
+			sizeof(pan_param.output_channel_map[0]));
+	}
+	if (usr_value[len++]) {
+		memcpy(pan_param.input_channel_map, &usr_value[len],
+			(pan_param.num_input_channels *
+			sizeof(pan_param.input_channel_map[0])));
+		len += (pan_param.num_input_channels *
+			sizeof(pan_param.input_channel_map[0]));
+	}
+	if (usr_value[len++]) {
+		gain_ptr = (uint32_t *) &usr_value[len];
+		for (i = 0; i < pan_param.num_output_channels *
+			pan_param.num_input_channels; i++) {
+			pan_param.gain[i] =
+				!(gain_ptr[i] > 0) ?
+				0 : 2 << 13;
+			len += sizeof(pan_param.gain[i]);
+		}
+		len += (pan_param.num_input_channels *
+		pan_param.num_output_channels * sizeof(pan_param.gain[0]));
+	}
+
+	ret = q6asm_set_mfc_panning_params(prtd->audio_client,
+					   &pan_param);
+	len -= pan_param.num_output_channels *
+		pan_param.num_input_channels * sizeof(pan_param.gain[0]);
+	if (gain_ptr) {
+		for (i = 0; i < pan_param.num_output_channels *
+			pan_param.num_input_channels; i++) {
+			/*
+			 * The data userspace passes is already in Q14 format.
+			 * For volume gain is in Q28.
+			 */
+			pan_param.gain[i] =
+				(gain_ptr[i]) << 14;
+			len += sizeof(pan_param.gain[i]);
+		}
+	}
+	ret = q6asm_set_vol_ctrl_gain_pair(prtd->audio_client,
+					   &pan_param);
+
+done:
+	return ret;
+}
+
+static int msm_pcm_playback_pan_scale_ctl_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	return 0;
+}
+
+static int msm_add_stream_pan_scale_controls(struct snd_soc_pcm_runtime *rtd)
+{
+	const char *playback_mixer_ctl_name = "Audio Stream";
+	const char *deviceNo = "NN";
+	const char *suffix = "Pan Scale Control";
+	char *mixer_str = NULL;
+	int ctl_len;
+	int ret = 0;
+	struct msm_plat_data *pdata;
+	struct snd_kcontrol_new pan_scale_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_pcm_playback_pan_scale_ctl_info,
+		.get = msm_pcm_playback_pan_scale_ctl_get,
+		.put = msm_pcm_playback_pan_scale_ctl_put,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s: NULL rtd\n", __func__);
+		return -EINVAL;
+	}
+
+	ctl_len = strlen(playback_mixer_ctl_name) + 1 +
+	strlen(deviceNo) + 1 + strlen(suffix) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d %s",
+	playback_mixer_ctl_name, rtd->pcm->device, suffix);
+	pan_scale_control[0].name = mixer_str;
+	pan_scale_control[0].private_value = rtd->dai_link->be_id;
+	pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+	pdata = dev_get_drvdata(rtd->platform->dev);
+	if (pdata) {
+		if (!pdata->pcm)
+			pdata->pcm = rtd->pcm;
+		ret = snd_soc_add_platform_controls(rtd->platform,
+							pan_scale_control,
+							ARRAY_SIZE
+							(pan_scale_control));
+		if (ret < 0)
+			pr_err("%s: failed add ctl %s. err = %d\n",
+					__func__, mixer_str, ret);
+	} else {
+		pr_err("%s: NULL pdata\n", __func__);
+		ret = -EINVAL;
+	}
+
+	kfree(mixer_str);
+done:
+	return ret;
+
+}
+
+static int msm_pcm_playback_dnmix_ctl_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	return 0;
+}
+
+static int msm_pcm_playback_dnmix_ctl_info(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+	uinfo->count = sizeof(struct asm_stream_pan_ctrl_params);
+	return 0;
+}
+
+static int msm_pcm_playback_dnmix_ctl_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = 0;
+	int len = 0;
+
+	struct snd_soc_component *usr_info = snd_kcontrol_chip(kcontrol);
+	struct snd_soc_platform *platform;
+	struct msm_plat_data *pdata;
+	struct snd_pcm_substream *substream;
+	struct msm_audio *prtd;
+	struct asm_stream_pan_ctrl_params dnmix_param;
+	char *usr_value;
+	int be_id = 0;
+	int stream_id = 0;
+
+	if (!usr_info) {
+		pr_err("%s usr_info is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+	platform = snd_soc_component_to_platform(usr_info);
+	if (!platform) {
+		pr_err("%s platform is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+	pdata = dev_get_drvdata(platform->dev);
+	if (!pdata) {
+		pr_err("%s pdata is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+	substream = pdata->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+	if (!substream) {
+		pr_err("%s substream not found\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+	if (!substream->runtime) {
+		pr_err("%s substream runtime not found\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+	prtd = substream->runtime->private_data;
+	if (!prtd) {
+		ret = -EINVAL;
+		goto done;
+	}
+	usr_value = (char *) ucontrol->value.bytes.data;
+	if (!usr_value) {
+		pr_err("%s usrvalue is null\n", __func__);
+		goto done;
+	}
+	memcpy(&be_id, usr_value, sizeof(be_id));
+	len += sizeof(be_id);
+	stream_id = prtd->audio_client->session;
+	memcpy(&dnmix_param.num_output_channels, &usr_value[len],
+				sizeof(dnmix_param.num_output_channels));
+	len += sizeof(dnmix_param.num_output_channels);
+	if (dnmix_param.num_output_channels >
+		PCM_FORMAT_MAX_NUM_CHANNEL) {
+		ret = -EINVAL;
+		goto done;
+	}
+	memcpy(&dnmix_param.num_input_channels, &usr_value[len],
+				sizeof(dnmix_param.num_input_channels));
+	len += sizeof(dnmix_param.num_input_channels);
+	if (dnmix_param.num_input_channels >
+		PCM_FORMAT_MAX_NUM_CHANNEL) {
+		ret = -EINVAL;
+		goto done;
+	}
+	if (usr_value[len++]) {
+		memcpy(dnmix_param.output_channel_map, &usr_value[len],
+			(dnmix_param.num_output_channels *
+			sizeof(dnmix_param.output_channel_map[0])));
+		len += (dnmix_param.num_output_channels *
+				sizeof(dnmix_param.output_channel_map[0]));
+	}
+	if (usr_value[len++]) {
+		memcpy(dnmix_param.input_channel_map, &usr_value[len],
+			(dnmix_param.num_input_channels *
+			sizeof(dnmix_param.input_channel_map[0])));
+		len += (dnmix_param.num_input_channels *
+				sizeof(dnmix_param.input_channel_map[0]));
+	}
+	if (usr_value[len++]) {
+		memcpy(dnmix_param.gain, (uint32_t *) &usr_value[len],
+			(dnmix_param.num_input_channels *
+			dnmix_param.num_output_channels *
+			sizeof(dnmix_param.gain[0])));
+		len += (dnmix_param.num_input_channels *
+		dnmix_param.num_output_channels * sizeof(dnmix_param.gain[0]));
+	}
+	msm_routing_set_downmix_control_data(be_id,
+					     stream_id,
+					     &dnmix_param);
+
+done:
+	return ret;
+}
+
+static int msm_add_device_down_mix_controls(struct snd_soc_pcm_runtime *rtd)
+{
+	const char *playback_mixer_ctl_name = "Audio Device";
+	const char *deviceNo = "NN";
+	const char *suffix = "Downmix Control";
+	char *mixer_str = NULL;
+	int ctl_len = 0, ret = 0;
+	struct msm_plat_data *pdata;
+	struct snd_kcontrol_new device_downmix_control[1] = {
+		{
+			.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+			.name = "?",
+			.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+			.info = msm_pcm_playback_dnmix_ctl_info,
+			.get = msm_pcm_playback_dnmix_ctl_get,
+			.put = msm_pcm_playback_dnmix_ctl_put,
+			.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s NULL rtd\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+	ctl_len = strlen(playback_mixer_ctl_name) + 1 +
+	strlen(deviceNo) + 1 + strlen(suffix) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d %s",
+	playback_mixer_ctl_name, rtd->pcm->device, suffix);
+	device_downmix_control[0].name = mixer_str;
+	device_downmix_control[0].private_value = rtd->dai_link->be_id;
+	pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+	pdata = dev_get_drvdata(rtd->platform->dev);
+	if (pdata) {
+		if (!pdata->pcm)
+			pdata->pcm = rtd->pcm;
+		ret = snd_soc_add_platform_controls(rtd->platform,
+						      device_downmix_control,
+						      ARRAY_SIZE
+						      (device_downmix_control));
+		if (ret < 0)
+			pr_err("%s: failed add ctl %s. err = %d\n",
+				 __func__, mixer_str, ret);
+	} else {
+		pr_err("%s: NULL pdata\n", __func__);
+		ret = -EINVAL;
+	}
+	kfree(mixer_str);
+done:
+	return ret;
+}
+
+static int msm_pcm_capture_app_type_cfg_ctl_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_TX;
+	int be_id = ucontrol->value.integer.value[3];
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0, 0, 48000};
+	int ret = 0;
+
+	cfg_data.app_type = ucontrol->value.integer.value[0];
+	cfg_data.acdb_dev_id = ucontrol->value.integer.value[1];
+	if (ucontrol->value.integer.value[2] != 0)
+		cfg_data.sample_rate = ucontrol->value.integer.value[2];
+	pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
+		__func__, fe_id, session_type, be_id,
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
+	ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+						      be_id, &cfg_data);
+	if (ret < 0)
+		pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
+			__func__, ret);
+
+	return ret;
+}
+
+static int msm_pcm_capture_app_type_cfg_ctl_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_TX;
+	int be_id = 0;
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0};
+	int ret = 0;
+
+	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
+						      &be_id, &cfg_data);
+	if (ret < 0) {
+		pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
+			__func__, ret);
+		goto done;
+	}
+
+	ucontrol->value.integer.value[0] = cfg_data.app_type;
+	ucontrol->value.integer.value[1] = cfg_data.acdb_dev_id;
+	ucontrol->value.integer.value[2] = cfg_data.sample_rate;
+	ucontrol->value.integer.value[3] = be_id;
+	pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+		__func__, fe_id, session_type, be_id,
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
+done:
+	return ret;
+}
+
+static int msm_pcm_add_app_type_controls(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_pcm *pcm = rtd->pcm;
+	struct snd_pcm_usr *app_type_info;
+	struct snd_kcontrol *kctl;
+	const char *playback_mixer_ctl_name	= "Audio Stream";
+	const char *capture_mixer_ctl_name	= "Audio Stream Capture";
+	const char *deviceNo		= "NN";
+	const char *suffix		= "App Type Cfg";
+	int ctl_len, ret = 0;
+
+	if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
+		ctl_len = strlen(playback_mixer_ctl_name) + 1 +
+				strlen(deviceNo) + 1 + strlen(suffix) + 1;
+		pr_debug("%s: Playback app type cntrl add\n", __func__);
+		ret = snd_pcm_add_usr_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
+					NULL, 1, ctl_len, rtd->dai_link->be_id,
+					&app_type_info);
+		if (ret < 0) {
+			pr_err("%s: playback app type cntrl add failed: %d\n",
+				__func__, ret);
+			return ret;
+		}
+		kctl = app_type_info->kctl;
+		snprintf(kctl->id.name, ctl_len, "%s %d %s",
+			playback_mixer_ctl_name, rtd->pcm->device, suffix);
+		kctl->put = msm_pcm_playback_app_type_cfg_ctl_put;
+		kctl->get = msm_pcm_playback_app_type_cfg_ctl_get;
+	}
+
+	if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
+		ctl_len = strlen(capture_mixer_ctl_name) + 1 +
+				strlen(deviceNo) + 1 + strlen(suffix) + 1;
+		pr_debug("%s: Capture app type cntrl add\n", __func__);
+		ret = snd_pcm_add_usr_ctls(pcm, SNDRV_PCM_STREAM_CAPTURE,
+					NULL, 1, ctl_len, rtd->dai_link->be_id,
+					&app_type_info);
+		if (ret < 0) {
+			pr_err("%s: capture app type cntrl add failed: %d\n",
+				__func__, ret);
+			return ret;
+		}
+		kctl = app_type_info->kctl;
+		snprintf(kctl->id.name, ctl_len, "%s %d %s",
+			capture_mixer_ctl_name, rtd->pcm->device, suffix);
+		kctl->put = msm_pcm_capture_app_type_cfg_ctl_put;
+		kctl->get = msm_pcm_capture_app_type_cfg_ctl_get;
+	}
+
+	return 0;
+}
+
+static int msm_pcm_add_controls(struct snd_soc_pcm_runtime *rtd)
+{
+	int ret = 0;
+	pr_debug("%s\n", __func__);
+	ret = msm_pcm_add_chmap_controls(rtd);
+	if (ret)
+		pr_err("%s: pcm add controls failed:%d\n", __func__, ret);
+	ret = msm_pcm_add_app_type_controls(rtd);
+	if (ret)
+		pr_err("%s: pcm add app type controls failed:%d\n",
+			__func__, ret);
+	ret = msm_add_stream_pan_scale_controls(rtd);
+	if (ret)
+		pr_err("%s: pcm add pan scale controls failed:%d\n",
+			__func__, ret);
+	ret = msm_add_device_down_mix_controls(rtd);
+	if (ret)
+		pr_err("%s: pcm add dnmix controls failed:%d\n",
+			__func__, ret);
+	return ret;
+}
+
+static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_card *card = rtd->card->snd_card;
+	int ret = 0;
+
+	if (!card->dev->coherent_dma_mask)
+		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+
+	ret = msm_pcm_add_controls(rtd);
+	if (ret) {
+		pr_err("%s, kctl add failed:%d\n", __func__, ret);
+		return ret;
+	}
+
+	ret = msm_pcm_add_volume_control(rtd);
+	if (ret)
+		pr_err("%s: Could not add pcm Volume Control %d\n",
+			__func__, ret);
+
+	ret = msm_pcm_add_compress_control(rtd);
+	if (ret)
+		pr_err("%s: Could not add pcm Compress Control %d\n",
+			__func__, ret);
+
+	ret = msm_pcm_add_audio_adsp_stream_cmd_control(rtd);
+	if (ret)
+		pr_err("%s: Could not add pcm ADSP Stream Cmd Control\n",
+			__func__);
+
+	ret = msm_pcm_add_audio_adsp_stream_callback_control(rtd);
+	if (ret)
+		pr_err("%s: Could not add pcm ADSP Stream Callback Control\n",
+			__func__);
+
+	return ret;
+}
+
+static snd_pcm_sframes_t msm_pcm_delay_blk(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+	struct audio_client *ac = prtd->audio_client;
+	snd_pcm_sframes_t frames;
+	int ret;
+
+	ret = q6asm_get_path_delay(prtd->audio_client);
+	if (ret) {
+		pr_err("%s: get_path_delay failed, ret=%d\n", __func__, ret);
+		return 0;
+	}
+
+	/* convert microseconds to frames */
+	frames = ac->path_delay / 1000 * runtime->rate / 1000;
+
+	/* also convert the remainder from the initial division */
+	frames += ac->path_delay % 1000 * runtime->rate / 1000000;
+
+	/* overcompensate for the loss of precision (empirical) */
+	frames += 2;
+
+	return frames;
+}
+
+static struct snd_soc_platform_driver msm_soc_platform = {
+	.ops		= &msm_pcm_ops,
+	.pcm_new	= msm_asoc_pcm_new,
+	.delay_blk      = msm_pcm_delay_blk,
+};
+
+static int msm_pcm_probe(struct platform_device *pdev)
+{
+	int rc;
+	int id;
+	struct msm_plat_data *pdata;
+	const char *latency_level;
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+				"qcom,msm-pcm-dsp-id", &id);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: qcom,msm-pcm-dsp-id missing in DT node\n",
+					__func__);
+		return rc;
+	}
+
+	pdata = kzalloc(sizeof(struct msm_plat_data), GFP_KERNEL);
+	if (!pdata) {
+		dev_err(&pdev->dev, "Failed to allocate memory for platform data\n");
+		return -ENOMEM;
+	}
+
+	if (of_property_read_bool(pdev->dev.of_node,
+				"qcom,msm-pcm-low-latency")) {
+
+		pdata->perf_mode = LOW_LATENCY_PCM_MODE;
+		rc = of_property_read_string(pdev->dev.of_node,
+			"qcom,latency-level", &latency_level);
+		if (!rc) {
+			if (!strcmp(latency_level, "ultra"))
+				pdata->perf_mode = ULTRA_LOW_LATENCY_PCM_MODE;
+			else if (!strcmp(latency_level, "ull-pp"))
+				pdata->perf_mode =
+					ULL_POST_PROCESSING_PCM_MODE;
+		}
+	}
+	else
+		pdata->perf_mode = LEGACY_PCM_MODE;
+
+	dev_set_drvdata(&pdev->dev, pdata);
+
+
+	dev_dbg(&pdev->dev, "%s: dev name %s\n",
+				__func__, dev_name(&pdev->dev));
+	return snd_soc_register_platform(&pdev->dev,
+				   &msm_soc_platform);
+}
+
+static int msm_pcm_remove(struct platform_device *pdev)
+{
+	struct msm_plat_data *pdata;
+
+	pdata = dev_get_drvdata(&pdev->dev);
+	kfree(pdata);
+	snd_soc_unregister_platform(&pdev->dev);
+	return 0;
+}
+static const struct of_device_id msm_pcm_dt_match[] = {
+	{.compatible = "qcom,msm-pcm-dsp"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, msm_pcm_dt_match);
+
+static struct platform_driver msm_pcm_driver = {
+	.driver = {
+		.name = "msm-pcm-dsp",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_pcm_dt_match,
+	},
+	.probe = msm_pcm_probe,
+	.remove = msm_pcm_remove,
+};
+
+static int __init msm_soc_platform_init(void)
+{
+	init_waitqueue_head(&the_locks.enable_wait);
+	init_waitqueue_head(&the_locks.eos_wait);
+	init_waitqueue_head(&the_locks.write_wait);
+	init_waitqueue_head(&the_locks.read_wait);
+
+	return platform_driver_register(&msm_pcm_driver);
+}
+module_init(msm_soc_platform_init);
+
+static void __exit msm_soc_platform_exit(void)
+{
+	platform_driver_unregister(&msm_pcm_driver);
+}
+module_exit(msm_soc_platform_exit);
+
+MODULE_DESCRIPTION("PCM module platform driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-pcm-q6-v2.h linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.h
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-pcm-q6-v2.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.h	2019-01-22 16:16:29.631301899 +0100
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (C) 2008 HTC Corporation
+ * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can find it at http://www.fsf.org.
+ */
+
+#ifndef _MSM_PCM_H
+#define _MSM_PCM_H
+#include <sound/apr_audio-v2.h>
+#include <sound/q6asm-v2.h>
+
+
+
+/* Support unconventional sample rates 12000, 24000 as well */
+#define USE_RATE                \
+			(SNDRV_PCM_RATE_8000_48000 | SNDRV_PCM_RATE_KNOT)
+
+extern int copy_count;
+
+struct buffer {
+	void *data;
+	unsigned size;
+	unsigned used;
+	unsigned addr;
+};
+
+struct buffer_rec {
+	void *data;
+	unsigned int size;
+	unsigned int read;
+	unsigned int addr;
+};
+
+struct audio_locks {
+	spinlock_t event_lock;
+	wait_queue_head_t read_wait;
+	wait_queue_head_t write_wait;
+	wait_queue_head_t eos_wait;
+	wait_queue_head_t enable_wait;
+	wait_queue_head_t flush_wait;
+};
+
+struct msm_audio_in_frame_info {
+	uint32_t size;
+	uint32_t offset;
+};
+
+#define PLAYBACK_MIN_NUM_PERIODS    2
+#define PLAYBACK_MAX_NUM_PERIODS    8
+#define PLAYBACK_MAX_PERIOD_SIZE    122880
+#define PLAYBACK_MIN_PERIOD_SIZE    128
+#define CAPTURE_MIN_NUM_PERIODS     2
+#define CAPTURE_MAX_NUM_PERIODS     8
+#define CAPTURE_MAX_PERIOD_SIZE     122880
+#define CAPTURE_MIN_PERIOD_SIZE     320
+
+struct msm_audio {
+	struct snd_pcm_substream *substream;
+	unsigned int pcm_size;
+	unsigned int pcm_count;
+	unsigned int pcm_irq_pos;       /* IRQ position */
+	uint16_t source; /* Encoding source bit mask */
+
+	struct audio_client *audio_client;
+
+	uint16_t session_id;
+
+	uint32_t samp_rate;
+	uint32_t channel_mode;
+	uint32_t dsp_cnt;
+
+	int abort; /* set when error, like sample rate mismatch */
+
+	bool reset_event;
+	int enabled;
+	int close_ack;
+	int cmd_ack;
+	/*
+	 * cmd_ack doesn't tell if paticular command has been sent so can't
+	 * determine if it needs to wait for completion.
+	 * Use cmd_pending instead when checking whether a command is been
+	 * sent or not.
+	 */
+	unsigned long cmd_pending;
+	atomic_t start;
+	atomic_t stop;
+	atomic_t out_count;
+	atomic_t in_count;
+	atomic_t out_needed;
+	atomic_t eos;
+	int out_head;
+	int periods;
+	int mmap_flag;
+	atomic_t pending_buffer;
+	bool set_channel_map;
+	char channel_map[8];
+	int cmd_interrupt;
+	bool meta_data_mode;
+	uint32_t volume;
+	bool compress_enable;
+	/* array of frame info */
+	struct msm_audio_in_frame_info in_frame_info[CAPTURE_MAX_NUM_PERIODS];
+};
+
+struct output_meta_data_st {
+	uint32_t meta_data_length;
+	uint32_t frame_size;
+	uint32_t timestamp_lsw;
+	uint32_t timestamp_msw;
+	uint32_t reserved[12];
+};
+
+struct msm_plat_data {
+	int perf_mode;
+	struct snd_pcm *pcm;
+};
+
+#endif /*_MSM_PCM_H*/
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-pcm-routing-devdep.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-pcm-routing-devdep.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-pcm-routing-devdep.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-pcm-routing-devdep.c	2019-01-22 16:16:29.631301899 +0100
@@ -0,0 +1,137 @@
+/* Copyright (c) 2014, 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <sound/hwdep.h>
+#include <sound/devdep_params.h>
+#include "msm-pcm-routing-devdep.h"
+#include "msm-ds2-dap-config.h"
+
+#ifdef CONFIG_SND_HWDEP
+static int msm_pcm_routing_hwdep_open(struct snd_hwdep *hw, struct file *file)
+{
+	pr_debug("%s\n", __func__);
+	msm_ds2_dap_update_port_parameters(hw, file, true);
+	return 0;
+}
+
+static int msm_pcm_routing_hwdep_release(struct snd_hwdep *hw,
+					 struct file *file)
+{
+	pr_debug("%s\n", __func__);
+	msm_ds2_dap_update_port_parameters(hw, file, false);
+	return 0;
+}
+
+static int msm_pcm_routing_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
+				       unsigned int cmd, unsigned long arg)
+{
+	int ret = 0;
+	void __user *argp = (void __user *)arg;
+	pr_debug("%s:cmd %x\n", __func__, cmd);
+	switch (cmd) {
+	case SNDRV_DEVDEP_DAP_IOCTL_SET_PARAM:
+	case SNDRV_DEVDEP_DAP_IOCTL_GET_PARAM:
+	case SNDRV_DEVDEP_DAP_IOCTL_DAP_COMMAND:
+	case SNDRV_DEVDEP_DAP_IOCTL_DAP_LICENSE:
+		msm_pcm_routing_acquire_lock();
+		ret = msm_ds2_dap_ioctl(hw, file, cmd, argp);
+		msm_pcm_routing_release_lock();
+		break;
+	case SNDRV_DEVDEP_DAP_IOCTL_GET_VISUALIZER:
+		ret = msm_ds2_dap_ioctl(hw, file, cmd, argp);
+		break;
+	default:
+		pr_err("%s called with invalid control 0x%X\n", __func__, cmd);
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+void msm_pcm_routing_hwdep_free(struct snd_pcm *pcm)
+{
+	pr_debug("%s\n", __func__);
+}
+
+#ifdef CONFIG_COMPAT
+static int msm_pcm_routing_hwdep_compat_ioctl(struct snd_hwdep *hw,
+					      struct file *file,
+					      unsigned int cmd,
+					      unsigned long arg)
+{
+	int ret = 0;
+	void __user *argp = (void __user *)arg;
+	pr_debug("%s:cmd %x\n", __func__, cmd);
+	switch (cmd) {
+	case SNDRV_DEVDEP_DAP_IOCTL_SET_PARAM32:
+	case SNDRV_DEVDEP_DAP_IOCTL_GET_PARAM32:
+	case SNDRV_DEVDEP_DAP_IOCTL_DAP_COMMAND32:
+	case SNDRV_DEVDEP_DAP_IOCTL_DAP_LICENSE32:
+		msm_pcm_routing_acquire_lock();
+		ret = msm_ds2_dap_compat_ioctl(hw, file, cmd, argp);
+		msm_pcm_routing_release_lock();
+		break;
+	case SNDRV_DEVDEP_DAP_IOCTL_GET_VISUALIZER32:
+		ret = msm_ds2_dap_compat_ioctl(hw, file, cmd, argp);
+		break;
+	default:
+		pr_err("%s called with invalid control 0x%X\n", __func__, cmd);
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+#endif
+
+int msm_pcm_routing_hwdep_new(struct snd_soc_pcm_runtime *runtime,
+			      struct msm_pcm_routing_bdai_data *msm_bedais)
+{
+	struct snd_hwdep *hwdep;
+	struct snd_soc_dai_link *dai_link = runtime->dai_link;
+	int rc;
+
+	if (dai_link->be_id < 0 ||
+		dai_link->be_id >= MSM_BACKEND_DAI_MAX) {
+		pr_err("%s:be_id %d invalid index\n",
+			__func__, dai_link->be_id);
+		return -EINVAL;
+	}
+	pr_debug("%s be_id %d\n", __func__, dai_link->be_id);
+	rc = snd_hwdep_new(runtime->card->snd_card,
+			   msm_bedais[dai_link->be_id].name,
+			   dai_link->be_id, &hwdep);
+	if (hwdep == NULL) {
+		pr_err("%s: hwdep intf failed to create %s- hwdep NULL\n",
+			__func__, msm_bedais[dai_link->be_id].name);
+		return rc;
+	}
+	if (IS_ERR_VALUE(rc)) {
+		pr_err("%s: hwdep intf failed to create %s rc %d\n", __func__,
+			msm_bedais[dai_link->be_id].name, rc);
+		return rc;
+	}
+
+	hwdep->iface = SNDRV_HWDEP_IFACE_AUDIO_BE;
+	hwdep->private_data = &msm_bedais[dai_link->be_id];
+	hwdep->ops.open = msm_pcm_routing_hwdep_open;
+	hwdep->ops.ioctl = msm_pcm_routing_hwdep_ioctl;
+	hwdep->ops.release = msm_pcm_routing_hwdep_release;
+#ifdef CONFIG_COMPAT
+	hwdep->ops.ioctl_compat = msm_pcm_routing_hwdep_compat_ioctl;
+#endif
+	return rc;
+}
+#endif
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-pcm-routing-devdep.h linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-pcm-routing-devdep.h
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-pcm-routing-devdep.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-pcm-routing-devdep.h	2019-01-22 16:16:29.631301899 +0100
@@ -0,0 +1,34 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 and
+* only version 2 as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*/
+
+#ifndef _MSM_PCM_ROUTING_DEVDEP_H_
+#define _MSM_PCM_ROUTING_DEVDEP_H_
+
+#include <sound/soc.h>
+#include "msm-pcm-routing-v2.h"
+
+#ifdef CONFIG_SND_HWDEP
+int msm_pcm_routing_hwdep_new(struct snd_soc_pcm_runtime *runtime,
+			      struct msm_pcm_routing_bdai_data *msm_bedais);
+void msm_pcm_routing_hwdep_free(struct snd_pcm *pcm);
+#else
+static inline int msm_pcm_routing_hwdep_new(struct snd_soc_pcm_runtime *runtime,
+				struct msm_pcm_routing_bdai_data *msm_bedais)
+{
+	return 0;
+}
+
+static inline void msm_pcm_routing_hwdep_free(struct snd_pcm *pcm)
+{
+	return;
+}
+#endif
+#endif
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-pcm-routing-v2.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-pcm-routing-v2.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c	2019-10-29 09:26:26.157227780 +0100
@@ -0,0 +1,16547 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/initval.h>
+#include <sound/control.h>
+#include <sound/q6adm-v2.h>
+#include <sound/q6asm-v2.h>
+#include <sound/q6afe-v2.h>
+#include <sound/tlv.h>
+#include <sound/asound.h>
+#include <sound/pcm_params.h>
+#include <sound/q6core.h>
+#include <sound/audio_cal_utils.h>
+#include <sound/audio_effects.h>
+#include <sound/hwdep.h>
+#include <sound/q6adm-v2.h>
+#include <sound/apr_audio-v2.h>
+
+#include "msm-pcm-routing-v2.h"
+#include "msm-pcm-routing-devdep.h"
+#include "msm-qti-pp-config.h"
+#include "msm-dts-srs-tm-config.h"
+#include "msm-dolby-dap-config.h"
+#include "msm-ds2-dap-config.h"
+#include "q6voice.h"
+#include "sound/q6lsm.h"
+
+#ifndef CONFIG_DOLBY_DAP
+#undef DOLBY_ADM_COPP_TOPOLOGY_ID
+#define DOLBY_ADM_COPP_TOPOLOGY_ID 0xFFFFFFFE
+#endif
+
+#ifndef CONFIG_DOLBY_DS2
+#undef DS2_ADM_COPP_TOPOLOGY_ID
+#define DS2_ADM_COPP_TOPOLOGY_ID 0xFFFFFFFF
+#endif
+
+static struct mutex routing_lock;
+
+static struct cal_type_data *cal_data[MAX_ROUTING_CAL_TYPES];
+
+static int fm_switch_enable;
+static int hfp_switch_enable;
+static int int0_mi2s_switch_enable;
+static int int4_mi2s_switch_enable;
+static int pri_mi2s_switch_enable;
+static int sec_mi2s_switch_enable;
+static int tert_mi2s_switch_enable;
+static int quat_mi2s_switch_enable;
+static int fm_pcmrx_switch_enable;
+static int usb_switch_enable;
+static int lsm_port_index;
+static int slim0_rx_aanc_fb_port;
+static int msm_route_ec_ref_rx;
+static int msm_ec_ref_ch = 4;
+static int msm_ec_ref_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_ec_ref_sampling_rate = 48000;
+static uint32_t voc_session_id = ALL_SESSION_VSID;
+static int msm_route_ext_ec_ref;
+static bool is_custom_stereo_on;
+static bool is_ds2_on;
+static bool swap_ch;
+
+#define WEIGHT_0_DB 0x4000
+/* all the FEs which can support channel mixer */
+static struct msm_pcm_channel_mixer channel_mixer[MSM_FRONTEND_DAI_MM_SIZE];
+/* input BE for each FE */
+static int channel_input[MSM_FRONTEND_DAI_MM_SIZE][ADM_MAX_CHANNELS];
+
+enum {
+	MADNONE,
+	MADAUDIO,
+	MADBEACON,
+	MADULTRASOUND,
+	MADSWAUDIO,
+};
+
+#define ADM_LSM_PORT_INDEX 9
+
+#define SLIMBUS_0_TX_TEXT "SLIMBUS_0_TX"
+#define SLIMBUS_1_TX_TEXT "SLIMBUS_1_TX"
+#define SLIMBUS_2_TX_TEXT "SLIMBUS_2_TX"
+#define SLIMBUS_3_TX_TEXT "SLIMBUS_3_TX"
+#define SLIMBUS_4_TX_TEXT "SLIMBUS_4_TX"
+#define SLIMBUS_5_TX_TEXT "SLIMBUS_5_TX"
+#define TERT_MI2S_TX_TEXT "TERT_MI2S_TX"
+#define QUAT_MI2S_TX_TEXT "QUAT_MI2S_TX"
+#define ADM_LSM_TX_TEXT "ADM_LSM_TX"
+#define INT3_MI2S_TX_TEXT "INT3_MI2S_TX"
+
+#define LSM_FUNCTION_TEXT "LSM Function"
+static const char * const lsm_port_text[] = {
+	"None",
+	SLIMBUS_0_TX_TEXT, SLIMBUS_1_TX_TEXT, SLIMBUS_2_TX_TEXT,
+	SLIMBUS_3_TX_TEXT, SLIMBUS_4_TX_TEXT, SLIMBUS_5_TX_TEXT,
+	TERT_MI2S_TX_TEXT, QUAT_MI2S_TX_TEXT, ADM_LSM_TX_TEXT,
+	INT3_MI2S_TX_TEXT
+};
+
+struct msm_pcm_route_bdai_pp_params {
+	unsigned long pp_params_config;
+	bool mute_on;
+	int latency;
+};
+
+static struct msm_pcm_route_bdai_pp_params
+	msm_bedais_pp_params[MSM_BACKEND_DAI_MAX];
+
+/*
+ * The be_dai_name_table is passed to HAL so that it can specify the
+ * BE ID for the BE it wants to enable based on the name. Thus there
+ * is a matching table and structure in HAL that need to be updated
+ * if any changes to these are made.
+ */
+struct msm_pcm_route_bdai_name {
+	unsigned int be_id;
+	char be_name[LPASS_BE_NAME_MAX_LENGTH];
+};
+static struct msm_pcm_route_bdai_name be_dai_name_table[MSM_BACKEND_DAI_MAX];
+
+static bool msm_pcm_routing_test_pp_param(int be_idx, long param_bit);
+static int msm_routing_send_device_pp_params(int port_id,  int copp_idx,
+					     int fe_id);
+
+static int msm_routing_get_bit_width(unsigned int format)
+{
+	int bit_width;
+
+	switch (format) {
+	case SNDRV_PCM_FORMAT_S32_LE:
+		bit_width = 32;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+	case SNDRV_PCM_FORMAT_S24_3LE:
+		bit_width = 24;
+		break;
+	case SNDRV_PCM_FORMAT_S16_LE:
+	default:
+		bit_width = 16;
+	}
+	return bit_width;
+}
+
+static bool msm_is_resample_needed(int input_sr, int output_sr)
+{
+	bool rc = false;
+
+	if (input_sr != output_sr)
+		rc = true;
+
+	pr_debug("perform resampling (%s) for copp rate (%d)afe rate (%d)",
+		(rc ? "oh yes" : "not really"),
+		input_sr, output_sr);
+
+	return rc;
+}
+
+static void msm_pcm_routing_cfg_pp(int port_id, int copp_idx, int topology,
+				   int channels)
+{
+	int rc = 0;
+	switch (topology) {
+	case SRS_TRUMEDIA_TOPOLOGY_ID:
+		pr_debug("%s: SRS_TRUMEDIA_TOPOLOGY_ID\n", __func__);
+		msm_dts_srs_tm_init(port_id, copp_idx);
+		break;
+	case DS2_ADM_COPP_TOPOLOGY_ID:
+		pr_debug("%s: DS2_ADM_COPP_TOPOLOGY %d\n",
+			 __func__, DS2_ADM_COPP_TOPOLOGY_ID);
+		rc = msm_ds2_dap_init(port_id, copp_idx, channels,
+				      is_custom_stereo_on);
+		if (rc < 0)
+			pr_err("%s: DS2 topo_id 0x%x, port %d, CS %d rc %d\n",
+				__func__, topology, port_id,
+				is_custom_stereo_on, rc);
+		break;
+	case DOLBY_ADM_COPP_TOPOLOGY_ID:
+		if (is_ds2_on) {
+			pr_debug("%s: DS2_ADM_COPP_TOPOLOGY\n", __func__);
+			rc = msm_ds2_dap_init(port_id, copp_idx, channels,
+				is_custom_stereo_on);
+			if (rc < 0)
+				pr_err("%s:DS2 topo_id 0x%x, port %d, rc %d\n",
+					__func__, topology, port_id, rc);
+		} else {
+			pr_debug("%s: DOLBY_ADM_COPP_TOPOLOGY_ID\n", __func__);
+			rc = msm_dolby_dap_init(port_id, copp_idx, channels,
+						is_custom_stereo_on);
+			if (rc < 0)
+				pr_err("%s: DS1 topo_id 0x%x, port %d, rc %d\n",
+					__func__, topology, port_id, rc);
+		}
+		break;
+	case ADM_CMD_COPP_OPEN_TOPOLOGY_ID_AUDIOSPHERE:
+		pr_debug("%s: TOPOLOGY_ID_AUDIOSPHERE\n", __func__);
+		rc = msm_qti_pp_asphere_init(port_id, copp_idx);
+		if (rc < 0)
+			pr_err("%s: topo_id 0x%x, port %d, copp %d, rc %d\n",
+				__func__, topology, port_id, copp_idx, rc);
+		break;
+	default:
+		/* custom topology specific feature param handlers */
+		break;
+	}
+}
+
+static void msm_pcm_routing_deinit_pp(int port_id, int topology)
+{
+	switch (topology) {
+	case SRS_TRUMEDIA_TOPOLOGY_ID:
+		pr_debug("%s: SRS_TRUMEDIA_TOPOLOGY_ID\n", __func__);
+		msm_dts_srs_tm_deinit(port_id);
+		break;
+	case DS2_ADM_COPP_TOPOLOGY_ID:
+		pr_debug("%s: DS2_ADM_COPP_TOPOLOGY_ID %d\n",
+			 __func__, DS2_ADM_COPP_TOPOLOGY_ID);
+		msm_ds2_dap_deinit(port_id);
+		break;
+	case DOLBY_ADM_COPP_TOPOLOGY_ID:
+		if (is_ds2_on) {
+			pr_debug("%s: DS2_ADM_COPP_TOPOLOGY_ID\n", __func__);
+			msm_ds2_dap_deinit(port_id);
+		} else {
+			pr_debug("%s: DOLBY_ADM_COPP_TOPOLOGY_ID\n", __func__);
+			msm_dolby_dap_deinit(port_id);
+		}
+		break;
+	case ADM_CMD_COPP_OPEN_TOPOLOGY_ID_AUDIOSPHERE:
+		pr_debug("%s: TOPOLOGY_ID_AUDIOSPHERE\n", __func__);
+		msm_qti_pp_asphere_deinit(port_id);
+		break;
+	default:
+		/* custom topology specific feature deinit handlers */
+		break;
+	}
+}
+
+static void msm_pcm_routng_cfg_matrix_map_pp(struct route_payload payload,
+					     int path_type, int perf_mode)
+{
+	int itr = 0, rc = 0;
+	if ((path_type == ADM_PATH_PLAYBACK) &&
+	    (perf_mode == LEGACY_PCM_MODE) &&
+	    is_custom_stereo_on) {
+		for (itr = 0; itr < payload.num_copps; itr++) {
+			if ((payload.port_id[itr] != SLIMBUS_0_RX) &&
+			    (payload.port_id[itr] != RT_PROXY_PORT_001_RX)) {
+				continue;
+			}
+
+			rc = msm_qti_pp_send_stereo_to_custom_stereo_cmd(
+				payload.port_id[itr],
+				payload.copp_idx[itr],
+				payload.session_id,
+				Q14_GAIN_ZERO_POINT_FIVE,
+				Q14_GAIN_ZERO_POINT_FIVE,
+				Q14_GAIN_ZERO_POINT_FIVE,
+				Q14_GAIN_ZERO_POINT_FIVE);
+			if (rc < 0)
+				pr_err("%s: err setting custom stereo\n",
+					__func__);
+		}
+	}
+}
+
+#define SLIMBUS_EXTPROC_RX AFE_PORT_INVALID
+struct msm_pcm_routing_bdai_data msm_bedais[MSM_BACKEND_DAI_MAX] = {
+	{ PRIMARY_I2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_PRI_I2S_RX},
+	{ PRIMARY_I2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_PRI_I2S_TX},
+	{ SLIMBUS_0_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_0_RX},
+	{ SLIMBUS_0_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_0_TX},
+	{ HDMI_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_HDMI},
+	{ INT_BT_SCO_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_INT_BT_SCO_RX},
+	{ INT_BT_SCO_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_INT_BT_SCO_TX},
+	{ INT_FM_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_INT_FM_RX},
+	{ INT_FM_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_INT_FM_TX},
+	{ RT_PROXY_PORT_001_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_AFE_PCM_RX},
+	{ RT_PROXY_PORT_001_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_AFE_PCM_TX},
+	{ AFE_PORT_ID_PRIMARY_PCM_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_AUXPCM_RX},
+	{ AFE_PORT_ID_PRIMARY_PCM_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_AUXPCM_TX},
+	{ VOICE_PLAYBACK_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_VOICE_PLAYBACK_TX},
+	{ VOICE2_PLAYBACK_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_VOICE2_PLAYBACK_TX},
+	{ VOICE_RECORD_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_INCALL_RECORD_RX},
+	{ VOICE_RECORD_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_INCALL_RECORD_TX},
+	{ MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_MI2S_RX},
+	{ MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_MI2S_TX},
+	{ SECONDARY_I2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SEC_I2S_RX},
+	{ SLIMBUS_1_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_1_RX},
+	{ SLIMBUS_1_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_1_TX},
+	{ SLIMBUS_2_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_2_RX},
+	{ SLIMBUS_2_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_2_TX},
+	{ SLIMBUS_3_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_3_RX},
+	{ SLIMBUS_3_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_3_TX},
+	{ SLIMBUS_4_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_4_RX},
+	{ SLIMBUS_4_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_4_TX},
+	{ SLIMBUS_5_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_5_RX},
+	{ SLIMBUS_5_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_5_TX},
+	{ SLIMBUS_6_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_6_RX},
+	{ SLIMBUS_6_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_6_TX},
+	{ SLIMBUS_7_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_7_RX},
+	{ SLIMBUS_7_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_7_TX},
+	{ SLIMBUS_8_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_8_RX},
+	{ SLIMBUS_8_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_8_TX},
+	{ SLIMBUS_EXTPROC_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_STUB_RX},
+	{ SLIMBUS_EXTPROC_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_STUB_TX},
+	{ SLIMBUS_EXTPROC_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_STUB_1_TX},
+	{ AFE_PORT_ID_QUATERNARY_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_QUAT_MI2S_RX},
+	{ AFE_PORT_ID_QUATERNARY_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_QUAT_MI2S_TX},
+	{ AFE_PORT_ID_SECONDARY_MI2S_RX,  0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_SEC_MI2S_RX},
+	{ AFE_PORT_ID_SECONDARY_MI2S_TX,  0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_SEC_MI2S_TX},
+	{ AFE_PORT_ID_PRIMARY_MI2S_RX,    0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_PRI_MI2S_RX},
+	{ AFE_PORT_ID_PRIMARY_MI2S_TX,    0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_PRI_MI2S_TX},
+	{ AFE_PORT_ID_TERTIARY_MI2S_RX,   0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_TERT_MI2S_RX},
+	{ AFE_PORT_ID_TERTIARY_MI2S_TX,   0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_TERT_MI2S_TX},
+	{ AUDIO_PORT_ID_I2S_RX,           0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_AUDIO_I2S_RX},
+	{ AFE_PORT_ID_SECONDARY_PCM_RX,	  0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_SEC_AUXPCM_RX},
+	{ AFE_PORT_ID_SECONDARY_PCM_TX,   0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_SEC_AUXPCM_TX},
+	{ AFE_PORT_ID_SPDIF_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_SPDIF_RX},
+	{ AFE_PORT_ID_SECONDARY_MI2S_RX_SD1, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_SEC_MI2S_RX_SD1},
+	{ AFE_PORT_ID_QUINARY_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_QUIN_MI2S_RX},
+	{ AFE_PORT_ID_QUINARY_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_QUIN_MI2S_TX},
+	{ AFE_PORT_ID_SENARY_MI2S_TX,   0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_SENARY_MI2S_TX},
+	{ AFE_PORT_ID_PRIMARY_TDM_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_PRI_TDM_RX_0},
+	{ AFE_PORT_ID_PRIMARY_TDM_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_PRI_TDM_TX_0},
+	{ AFE_PORT_ID_PRIMARY_TDM_RX_1, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_PRI_TDM_RX_1},
+	{ AFE_PORT_ID_PRIMARY_TDM_TX_1, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_PRI_TDM_TX_1},
+	{ AFE_PORT_ID_PRIMARY_TDM_RX_2, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_PRI_TDM_RX_2},
+	{ AFE_PORT_ID_PRIMARY_TDM_TX_2, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_PRI_TDM_TX_2},
+	{ AFE_PORT_ID_PRIMARY_TDM_RX_3, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_PRI_TDM_RX_3},
+	{ AFE_PORT_ID_PRIMARY_TDM_TX_3, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_PRI_TDM_TX_3},
+	{ AFE_PORT_ID_PRIMARY_TDM_RX_4, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_PRI_TDM_RX_4},
+	{ AFE_PORT_ID_PRIMARY_TDM_TX_4, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_PRI_TDM_TX_4},
+	{ AFE_PORT_ID_PRIMARY_TDM_RX_5, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_PRI_TDM_RX_5},
+	{ AFE_PORT_ID_PRIMARY_TDM_TX_5, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_PRI_TDM_TX_5},
+	{ AFE_PORT_ID_PRIMARY_TDM_RX_6, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_PRI_TDM_RX_6},
+	{ AFE_PORT_ID_PRIMARY_TDM_TX_6, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_PRI_TDM_TX_6},
+	{ AFE_PORT_ID_PRIMARY_TDM_RX_7, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_PRI_TDM_RX_7},
+	{ AFE_PORT_ID_PRIMARY_TDM_TX_7, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_PRI_TDM_TX_7},
+	{ AFE_PORT_ID_SECONDARY_TDM_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_SEC_TDM_RX_0},
+	{ AFE_PORT_ID_SECONDARY_TDM_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_SEC_TDM_TX_0},
+	{ AFE_PORT_ID_SECONDARY_TDM_RX_1, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_SEC_TDM_RX_1},
+	{ AFE_PORT_ID_SECONDARY_TDM_TX_1, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_SEC_TDM_TX_1},
+	{ AFE_PORT_ID_SECONDARY_TDM_RX_2, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_SEC_TDM_RX_2},
+	{ AFE_PORT_ID_SECONDARY_TDM_TX_2, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_SEC_TDM_TX_2},
+	{ AFE_PORT_ID_SECONDARY_TDM_RX_3, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_SEC_TDM_RX_3},
+	{ AFE_PORT_ID_SECONDARY_TDM_TX_3, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_SEC_TDM_TX_3},
+	{ AFE_PORT_ID_SECONDARY_TDM_RX_4, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_SEC_TDM_RX_4},
+	{ AFE_PORT_ID_SECONDARY_TDM_TX_4, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_SEC_TDM_TX_4},
+	{ AFE_PORT_ID_SECONDARY_TDM_RX_5, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_SEC_TDM_RX_5},
+	{ AFE_PORT_ID_SECONDARY_TDM_TX_5, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_SEC_TDM_TX_5},
+	{ AFE_PORT_ID_SECONDARY_TDM_RX_6, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_SEC_TDM_RX_6},
+	{ AFE_PORT_ID_SECONDARY_TDM_TX_6, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_SEC_TDM_TX_6},
+	{ AFE_PORT_ID_SECONDARY_TDM_RX_7, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_SEC_TDM_RX_7},
+	{ AFE_PORT_ID_SECONDARY_TDM_TX_7, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_SEC_TDM_TX_7},
+	{ AFE_PORT_ID_TERTIARY_TDM_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_TERT_TDM_RX_0},
+	{ AFE_PORT_ID_TERTIARY_TDM_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_TERT_TDM_TX_0},
+	{ AFE_PORT_ID_TERTIARY_TDM_RX_1, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_TERT_TDM_RX_1},
+	{ AFE_PORT_ID_TERTIARY_TDM_TX_1, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_TERT_TDM_TX_1},
+	{ AFE_PORT_ID_TERTIARY_TDM_RX_2, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_TERT_TDM_RX_2},
+	{ AFE_PORT_ID_TERTIARY_TDM_TX_2, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_TERT_TDM_TX_2},
+	{ AFE_PORT_ID_TERTIARY_TDM_RX_3, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_TERT_TDM_RX_3},
+	{ AFE_PORT_ID_TERTIARY_TDM_TX_3, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_TERT_TDM_TX_3},
+	{ AFE_PORT_ID_TERTIARY_TDM_RX_4, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_TERT_TDM_RX_4},
+	{ AFE_PORT_ID_TERTIARY_TDM_TX_4, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_TERT_TDM_TX_4},
+	{ AFE_PORT_ID_TERTIARY_TDM_RX_5, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_TERT_TDM_RX_5},
+	{ AFE_PORT_ID_TERTIARY_TDM_TX_5, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_TERT_TDM_TX_5},
+	{ AFE_PORT_ID_TERTIARY_TDM_RX_6, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_TERT_TDM_RX_6},
+	{ AFE_PORT_ID_TERTIARY_TDM_TX_6, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_TERT_TDM_TX_6},
+	{ AFE_PORT_ID_TERTIARY_TDM_RX_7, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_TERT_TDM_RX_7},
+	{ AFE_PORT_ID_TERTIARY_TDM_TX_7, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_TERT_TDM_TX_7},
+	{ AFE_PORT_ID_QUATERNARY_TDM_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_QUAT_TDM_RX_0},
+	{ AFE_PORT_ID_QUATERNARY_TDM_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_QUAT_TDM_TX_0},
+	{ AFE_PORT_ID_QUATERNARY_TDM_RX_1, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_QUAT_TDM_RX_1},
+	{ AFE_PORT_ID_QUATERNARY_TDM_TX_1, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_QUAT_TDM_TX_1},
+	{ AFE_PORT_ID_QUATERNARY_TDM_RX_2, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_QUAT_TDM_RX_2},
+	{ AFE_PORT_ID_QUATERNARY_TDM_TX_2, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_QUAT_TDM_TX_2},
+	{ AFE_PORT_ID_QUATERNARY_TDM_RX_3, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_QUAT_TDM_RX_3},
+	{ AFE_PORT_ID_QUATERNARY_TDM_TX_3, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_QUAT_TDM_TX_3},
+	{ AFE_PORT_ID_QUATERNARY_TDM_RX_4, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_QUAT_TDM_RX_4},
+	{ AFE_PORT_ID_QUATERNARY_TDM_TX_4, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_QUAT_TDM_TX_4},
+	{ AFE_PORT_ID_QUATERNARY_TDM_RX_5, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_QUAT_TDM_RX_5},
+	{ AFE_PORT_ID_QUATERNARY_TDM_TX_5, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_QUAT_TDM_TX_5},
+	{ AFE_PORT_ID_QUATERNARY_TDM_RX_6, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_QUAT_TDM_RX_6},
+	{ AFE_PORT_ID_QUATERNARY_TDM_TX_6, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_QUAT_TDM_TX_6},
+	{ AFE_PORT_ID_QUATERNARY_TDM_RX_7, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_QUAT_TDM_RX_7},
+	{ AFE_PORT_ID_QUATERNARY_TDM_TX_7, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_QUAT_TDM_TX_7},
+	{ INT_BT_A2DP_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_INT_BT_A2DP_RX},
+	{ AFE_PORT_ID_USB_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_USB_AUDIO_RX},
+	{ AFE_PORT_ID_USB_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_USB_AUDIO_TX},
+	{ DISPLAY_PORT_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_DISPLAY_PORT},
+	{ AFE_PORT_ID_TERTIARY_PCM_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_TERT_AUXPCM_RX},
+	{ AFE_PORT_ID_TERTIARY_PCM_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_TERT_AUXPCM_TX},
+	{ AFE_PORT_ID_QUATERNARY_PCM_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_QUAT_AUXPCM_RX},
+	{ AFE_PORT_ID_QUATERNARY_PCM_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_QUAT_AUXPCM_TX},
+	{ AFE_PORT_ID_INT0_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_INT0_MI2S_RX},
+	{ AFE_PORT_ID_INT0_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_INT0_MI2S_TX},
+	{ AFE_PORT_ID_INT1_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_INT1_MI2S_RX},
+	{ AFE_PORT_ID_INT1_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_INT1_MI2S_TX},
+	{ AFE_PORT_ID_INT2_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_INT2_MI2S_RX},
+	{ AFE_PORT_ID_INT2_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_INT2_MI2S_TX},
+	{ AFE_PORT_ID_INT3_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_INT3_MI2S_RX},
+	{ AFE_PORT_ID_INT3_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_INT3_MI2S_TX},
+	{ AFE_PORT_ID_INT4_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_INT4_MI2S_RX},
+	{ AFE_PORT_ID_INT4_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_INT4_MI2S_TX},
+	{ AFE_PORT_ID_INT5_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_INT5_MI2S_RX},
+	{ AFE_PORT_ID_INT5_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_INT5_MI2S_TX},
+	{ AFE_PORT_ID_INT6_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_INT6_MI2S_RX},
+	{ AFE_PORT_ID_INT6_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_INT6_MI2S_TX},
+};
+
+/* Track ASM playback & capture sessions of DAI
+ * Track LSM listen sessions
+ */
+static struct msm_pcm_routing_fdai_data
+	fe_dai_map[MSM_FRONTEND_DAI_MAX][2] = {
+	/* MULTIMEDIA1 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* MULTIMEDIA2 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* MULTIMEDIA3 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* MULTIMEDIA4 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* MULTIMEDIA5 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* MULTIMEDIA6 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* MULTIMEDIA7*/
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* MULTIMEDIA8 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* MULTIMEDIA9 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* MULTIMEDIA10 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* MULTIMEDIA11 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* MULTIMEDIA12 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* MULTIMEDIA13 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* MULTIMEDIA14 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* MULTIMEDIA15 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* MULTIMEDIA16 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* MULTIMEDIA17 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* MULTIMEDIA18 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* MULTIMEDIA19 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* MULTIMEDIA20 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* MULTIMEDIA21 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* MULTIMEDIA22 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* MULTIMEDIA23 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* MULTIMEDIA24 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* MULTIMEDIA25 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* MULTIMEDIA26 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* MULTIMEDIA27 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* CS_VOICE */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* VOIP */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* AFE_RX */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* AFE_TX */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* VOICE_STUB */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* VOLTE */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* DTMF_RX */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* VOICE2 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* QCHAT */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* VOLTE_STUB */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* LSM1 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* LSM2 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* LSM3 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* LSM4 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* LSM5 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* LSM6 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* LSM7 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* LSM8 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* VOICE2_STUB */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* VOWLAN */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* VOICEMMODE1 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* VOICEMMODE2 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+};
+
+static unsigned long session_copp_map[MSM_FRONTEND_DAI_MAX][2]
+				     [MSM_BACKEND_DAI_MAX];
+static struct msm_pcm_routing_app_type_data app_type_cfg[MAX_APP_TYPES];
+static struct msm_pcm_routing_app_type_data lsm_app_type_cfg[MAX_APP_TYPES];
+static struct msm_pcm_stream_app_type_cfg
+	fe_dai_app_type_cfg[MSM_FRONTEND_DAI_MAX][2][MSM_BACKEND_DAI_MAX];
+
+static int last_be_id_configured[MSM_FRONTEND_DAI_MAX][MAX_SESSION_TYPES];
+
+/* The caller of this should aqcuire routing lock */
+void msm_pcm_routing_get_bedai_info(int be_idx,
+				    struct msm_pcm_routing_bdai_data *be_dai)
+{
+	if (be_idx >= 0 && be_idx < MSM_BACKEND_DAI_MAX)
+		memcpy(be_dai, &msm_bedais[be_idx],
+		       sizeof(struct msm_pcm_routing_bdai_data));
+}
+
+/* The caller of this should aqcuire routing lock */
+void msm_pcm_routing_get_fedai_info(int fe_idx, int sess_type,
+				    struct msm_pcm_routing_fdai_data *fe_dai)
+{
+	if ((sess_type == SESSION_TYPE_TX) || (sess_type == SESSION_TYPE_RX))
+		memcpy(fe_dai, &fe_dai_map[fe_idx][sess_type],
+		       sizeof(struct msm_pcm_routing_fdai_data));
+}
+
+void msm_pcm_routing_acquire_lock(void)
+{
+	mutex_lock(&routing_lock);
+}
+
+void msm_pcm_routing_release_lock(void)
+{
+	mutex_unlock(&routing_lock);
+}
+
+static int msm_pcm_routing_get_app_type_idx(int app_type)
+{
+	int idx;
+
+	pr_debug("%s: app_type: %d\n", __func__, app_type);
+	for (idx = 0; idx < MAX_APP_TYPES; idx++) {
+		if (app_type_cfg[idx].app_type == app_type)
+			return idx;
+	}
+	pr_info("%s: App type not available, fallback to default\n", __func__);
+	return 0;
+}
+
+static int msm_pcm_routing_get_lsm_app_type_idx(int app_type)
+{
+	int idx;
+
+	pr_debug("%s: app_type: %d\n", __func__, app_type);
+	for (idx = 0; idx < MAX_APP_TYPES; idx++) {
+		if (lsm_app_type_cfg[idx].app_type == app_type)
+			return idx;
+	}
+	pr_debug("%s: App type not available, fallback to default\n", __func__);
+	return 0;
+}
+
+static bool is_mm_lsm_fe_id(int fe_id)
+{
+	bool rc = true;
+
+	if (fe_id > MSM_FRONTEND_DAI_MM_MAX_ID &&
+		((fe_id < MSM_FRONTEND_DAI_LSM1) ||
+		 (fe_id > MSM_FRONTEND_DAI_LSM8))) {
+		rc = false;
+	}
+	return rc;
+}
+
+int msm_pcm_routing_reg_stream_app_type_cfg(
+	int fedai_id, int session_type, int be_id,
+	struct msm_pcm_stream_app_type_cfg *cfg_data)
+{
+	int ret = 0;
+
+	if (cfg_data == NULL) {
+		pr_err("%s: Received NULL pointer for cfg_data\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	pr_debug("%s: fedai_id %d, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+		__func__, fedai_id, session_type, be_id,
+		cfg_data->app_type, cfg_data->acdb_dev_id,
+		cfg_data->sample_rate);
+
+	if (!is_mm_lsm_fe_id(fedai_id)) {
+		pr_err("%s: Invalid machine driver ID %d\n",
+			__func__, fedai_id);
+		ret = -EINVAL;
+		goto done;
+	}
+	if (session_type != SESSION_TYPE_RX &&
+		session_type != SESSION_TYPE_TX) {
+		pr_err("%s: Invalid session type %d\n",
+			__func__, session_type);
+		ret = -EINVAL;
+		goto done;
+	}
+	if (be_id < 0 || be_id >= MSM_BACKEND_DAI_MAX) {
+		pr_err("%s: Received out of bounds be_id %d\n",
+			__func__, be_id);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	fe_dai_app_type_cfg[fedai_id][session_type][be_id] = *cfg_data;
+
+	/*
+	 * Store the BE ID of the configuration information set as the latest so
+	 * the get mixer control knows what to return.
+	 */
+	last_be_id_configured[fedai_id][session_type] = be_id;
+
+done:
+	return ret;
+}
+EXPORT_SYMBOL(msm_pcm_routing_reg_stream_app_type_cfg);
+
+/**
+ * msm_pcm_routing_get_stream_app_type_cfg
+ *
+ * Receives fedai_id, session_type, be_id, and populates app_type,
+ * acdb_dev_id, & sample rate. Returns 0 on success. On failure returns
+ * -EINVAL and does not alter passed values.
+ *
+ * fedai_id - Passed value, front end ID for which app type config is wanted
+ * session_type - Passed value, session type for which app type config
+ *                is wanted
+ * be_id - Returned value, back end device id the app type config data is for
+ * cfg_data - Returned value, configuration data used by app type config
+ */
+int msm_pcm_routing_get_stream_app_type_cfg(
+	int fedai_id, int session_type, int *bedai_id,
+	struct msm_pcm_stream_app_type_cfg *cfg_data)
+{
+	int be_id;
+	int ret = 0;
+
+	if (bedai_id == NULL) {
+		pr_err("%s: Received NULL pointer for backend ID\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	} else if (cfg_data == NULL) {
+		pr_err("%s: NULL pointer sent for cfg_data\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	} else if (!is_mm_lsm_fe_id(fedai_id)) {
+		pr_err("%s: Invalid FE ID %d\n", __func__, fedai_id);
+		ret = -EINVAL;
+		goto done;
+	} else if (session_type != SESSION_TYPE_RX &&
+		   session_type != SESSION_TYPE_TX) {
+		pr_err("%s: Invalid session type %d\n", __func__, session_type);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	be_id = last_be_id_configured[fedai_id][session_type];
+	if (be_id < 0 || be_id >= MSM_BACKEND_DAI_MAX) {
+		pr_err("%s: Invalid BE ID %d\n", __func__, be_id);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	*bedai_id = be_id;
+	*cfg_data = fe_dai_app_type_cfg[fedai_id][session_type][be_id];
+	pr_debug("%s: fedai_id %d, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+		__func__, fedai_id, session_type, *bedai_id,
+		cfg_data->app_type, cfg_data->acdb_dev_id,
+		cfg_data->sample_rate);
+done:
+	return ret;
+}
+EXPORT_SYMBOL(msm_pcm_routing_get_stream_app_type_cfg);
+
+static struct cal_block_data *msm_routing_find_topology_by_path(int path,
+								int cal_index)
+{
+	struct list_head		*ptr, *next;
+	struct cal_block_data		*cal_block = NULL;
+	pr_debug("%s\n", __func__);
+
+	list_for_each_safe(ptr, next,
+		&cal_data[cal_index]->cal_blocks) {
+
+		cal_block = list_entry(ptr,
+			struct cal_block_data, list);
+
+		if (((struct audio_cal_info_adm_top *)cal_block
+			->cal_info)->path == path) {
+			return cal_block;
+		}
+	}
+	pr_debug("%s: Can't find topology for path %d\n", __func__, path);
+	return NULL;
+}
+
+static struct cal_block_data *msm_routing_find_topology(int path,
+							int app_type,
+							int acdb_id,
+							int cal_index)
+{
+	struct list_head		*ptr, *next;
+	struct cal_block_data		*cal_block = NULL;
+	struct audio_cal_info_adm_top	*cal_info;
+	pr_debug("%s\n", __func__);
+
+	list_for_each_safe(ptr, next,
+		&cal_data[cal_index]->cal_blocks) {
+
+		cal_block = list_entry(ptr,
+			struct cal_block_data, list);
+
+		cal_info = (struct audio_cal_info_adm_top *)
+			cal_block->cal_info;
+		if ((cal_info->path == path)  &&
+			(cal_info->app_type == app_type) &&
+			(cal_info->acdb_id == acdb_id)) {
+			return cal_block;
+		}
+	}
+	pr_debug("%s: Can't find topology for path %d, app %d, acdb_id %d defaulting to search by path\n",
+		__func__, path, app_type, acdb_id);
+	return msm_routing_find_topology_by_path(cal_index, path);
+}
+
+static int msm_routing_get_adm_topology(int fedai_id, int session_type,
+					int be_id)
+{
+	int topology = NULL_COPP_TOPOLOGY;
+	struct cal_block_data *cal_block = NULL;
+	int app_type = 0, acdb_dev_id = 0;
+
+	pr_debug("%s: fedai_id %d, session_type %d, be_id %d\n",
+	       __func__, fedai_id, session_type, be_id);
+
+	if (cal_data == NULL)
+		goto done;
+
+	app_type = fe_dai_app_type_cfg[fedai_id][session_type][be_id].app_type;
+	acdb_dev_id =
+		fe_dai_app_type_cfg[fedai_id][session_type][be_id].acdb_dev_id;
+
+	mutex_lock(&cal_data[ADM_TOPOLOGY_CAL_TYPE_IDX]->lock);
+	cal_block = msm_routing_find_topology(session_type, app_type,
+					      acdb_dev_id,
+					      ADM_TOPOLOGY_CAL_TYPE_IDX);
+	if (cal_block != NULL)
+		topology = ((struct audio_cal_info_adm_top *)
+			cal_block->cal_info)->topology;
+	mutex_unlock(&cal_data[ADM_TOPOLOGY_CAL_TYPE_IDX]->lock);
+
+	if (cal_block == NULL) {
+		pr_debug("%s: Check for LSM topology\n", __func__);
+		mutex_lock(&cal_data[ADM_LSM_TOPOLOGY_CAL_TYPE_IDX]->lock);
+		cal_block = msm_routing_find_topology(session_type, app_type,
+						acdb_dev_id,
+						ADM_LSM_TOPOLOGY_CAL_TYPE_IDX);
+		if (cal_block != NULL)
+			topology = ((struct audio_cal_info_adm_top *)
+				cal_block->cal_info)->topology;
+		mutex_unlock(&cal_data[ADM_LSM_TOPOLOGY_CAL_TYPE_IDX]->lock);
+	}
+
+done:
+	pr_debug("%s: Using topology %d\n", __func__, topology);
+	return topology;
+}
+
+static uint8_t is_be_dai_extproc(int be_dai)
+{
+	if (be_dai == MSM_BACKEND_DAI_EXTPROC_RX ||
+	   be_dai == MSM_BACKEND_DAI_EXTPROC_TX ||
+	   be_dai == MSM_BACKEND_DAI_EXTPROC_EC_TX)
+		return 1;
+	else
+		return 0;
+}
+
+static void msm_pcm_routing_build_matrix(int fedai_id, int sess_type,
+					 int path_type, int perf_mode,
+					 uint32_t passthr_mode)
+{
+	int i, port_type, j, num_copps = 0;
+	struct route_payload payload = { {0} };
+
+	port_type = ((path_type == ADM_PATH_PLAYBACK ||
+		      path_type == ADM_PATH_COMPRESSED_RX) ?
+		MSM_AFE_PORT_TYPE_RX : MSM_AFE_PORT_TYPE_TX);
+
+	for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
+		if (!is_be_dai_extproc(i) &&
+		   (afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
+		   (msm_bedais[i].active) &&
+		   (test_bit(fedai_id, &msm_bedais[i].fe_sessions[0]))) {
+			for (j = 0; j < MAX_COPPS_PER_PORT; j++) {
+				unsigned long copp =
+				      session_copp_map[fedai_id][sess_type][i];
+				if (test_bit(j, &copp)) {
+					payload.port_id[num_copps] =
+							msm_bedais[i].port_id;
+					payload.copp_idx[num_copps] = j;
+					payload.app_type[num_copps] =
+						fe_dai_app_type_cfg
+							[fedai_id][sess_type][i]
+								.app_type;
+					payload.acdb_dev_id[num_copps] =
+						fe_dai_app_type_cfg
+							[fedai_id][sess_type][i]
+								.acdb_dev_id;
+					payload.sample_rate[num_copps] =
+						fe_dai_app_type_cfg
+							[fedai_id][sess_type][i]
+								.sample_rate;
+					if (msm_pcm_routing_test_pp_param(i,
+					    ADM_PP_PARAM_LIMITER_BIT))
+						set_bit(ADM_STATUS_LIMITER,
+							&payload.route_status
+							[num_copps]);
+					num_copps++;
+				}
+			}
+		}
+	}
+
+	if (num_copps) {
+		payload.num_copps = num_copps;
+		payload.session_id = fe_dai_map[fedai_id][sess_type].strm_id;
+		adm_matrix_map(path_type, payload, perf_mode, passthr_mode);
+		msm_pcm_routng_cfg_matrix_map_pp(payload, path_type, perf_mode);
+	}
+}
+
+void msm_pcm_routing_reg_psthr_stream(int fedai_id, int dspst_id,
+				      int stream_type)
+{
+	int i, session_type, path_type, port_type;
+	u32 mode = 0;
+
+	if (fedai_id > MSM_FRONTEND_DAI_MM_MAX_ID) {
+		/* bad ID assigned in machine driver */
+		pr_err("%s: bad MM ID\n", __func__);
+		return;
+	}
+
+	if (stream_type == SNDRV_PCM_STREAM_PLAYBACK) {
+		session_type = SESSION_TYPE_RX;
+		path_type = ADM_PATH_PLAYBACK;
+		port_type = MSM_AFE_PORT_TYPE_RX;
+	} else {
+		session_type = SESSION_TYPE_TX;
+		path_type = ADM_PATH_LIVE_REC;
+		port_type = MSM_AFE_PORT_TYPE_TX;
+	}
+
+	mutex_lock(&routing_lock);
+
+	fe_dai_map[fedai_id][session_type].strm_id = dspst_id;
+	for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
+		if (!is_be_dai_extproc(i) &&
+		    (afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
+		    (msm_bedais[i].active) &&
+		    (test_bit(fedai_id, &msm_bedais[i].fe_sessions[0]))) {
+			mode = afe_get_port_type(msm_bedais[i].port_id);
+			adm_connect_afe_port(mode, dspst_id,
+					     msm_bedais[i].port_id);
+			break;
+		}
+	}
+	mutex_unlock(&routing_lock);
+}
+
+static bool route_check_fe_id_adm_support(int fe_id)
+{
+	bool rc = true;
+
+	if ((fe_id >= MSM_FRONTEND_DAI_LSM1) &&
+		 (fe_id <= MSM_FRONTEND_DAI_LSM8)) {
+		/* fe id is listen while port is set to afe */
+		if (lsm_port_index != ADM_LSM_PORT_INDEX) {
+			pr_debug("%s: fe_id %d, lsm mux slim port %d\n",
+				__func__, fe_id, lsm_port_index);
+			rc = false;
+		}
+	}
+
+	return rc;
+}
+
+int msm_pcm_routing_reg_phy_compr_stream(int fe_id, int perf_mode,
+					  int dspst_id, int stream_type,
+					  uint32_t passthr_mode)
+{
+	int i, j, session_type, path_type, port_type, topology;
+	int num_copps = 0;
+	struct route_payload payload;
+	u32 channels, sample_rate;
+	u16 bit_width = 16;
+	bool is_lsm;
+
+	pr_debug("%s:fe_id[%d] perf_mode[%d] id[%d] stream_type[%d] passt[%d]",
+		 __func__, fe_id, perf_mode, dspst_id,
+		 stream_type, passthr_mode);
+	if (!is_mm_lsm_fe_id(fe_id)) {
+		/* bad ID assigned in machine driver */
+		pr_err("%s: bad MM ID %d\n", __func__, fe_id);
+		return -EINVAL;
+	}
+
+	if (!route_check_fe_id_adm_support(fe_id)) {
+		/* ignore adm open if not supported for fe_id */
+		pr_debug("%s: No ADM support for fe id %d\n", __func__, fe_id);
+		return 0;
+	}
+
+	if (stream_type == SNDRV_PCM_STREAM_PLAYBACK) {
+		session_type = SESSION_TYPE_RX;
+		if (passthr_mode != LEGACY_PCM)
+			path_type = ADM_PATH_COMPRESSED_RX;
+		else
+			path_type = ADM_PATH_PLAYBACK;
+		port_type = MSM_AFE_PORT_TYPE_RX;
+	} else if (stream_type == SNDRV_PCM_STREAM_CAPTURE) {
+		session_type = SESSION_TYPE_TX;
+		if ((passthr_mode != LEGACY_PCM) && (passthr_mode != LISTEN))
+			path_type = ADM_PATH_COMPRESSED_TX;
+		else
+			path_type = ADM_PATH_LIVE_REC;
+		port_type = MSM_AFE_PORT_TYPE_TX;
+	} else {
+		pr_err("%s: invalid stream type %d\n", __func__, stream_type);
+		return -EINVAL;
+	}
+
+	is_lsm = (fe_id >= MSM_FRONTEND_DAI_LSM1) &&
+			 (fe_id <= MSM_FRONTEND_DAI_LSM8);
+	mutex_lock(&routing_lock);
+
+	payload.num_copps = 0; /* only RX needs to use payload */
+	fe_dai_map[fe_id][session_type].strm_id = dspst_id;
+	/* re-enable EQ if active */
+	msm_qti_pp_send_eq_values(fe_id);
+	for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
+		if (test_bit(fe_id, &msm_bedais[i].fe_sessions[0]))
+			msm_bedais[i].passthr_mode[fe_id] = passthr_mode;
+
+		if (!is_be_dai_extproc(i) &&
+			(afe_get_port_type(msm_bedais[i].port_id) ==
+			port_type) &&
+			(msm_bedais[i].active) &&
+			(test_bit(fe_id, &msm_bedais[i].fe_sessions[0]))) {
+			int app_type, app_type_idx, copp_idx, acdb_dev_id;
+
+			/*
+			 * check if ADM needs to be configured with different
+			 * channel mapping than backend
+			 */
+			if (!msm_bedais[i].adm_override_ch)
+				channels = msm_bedais[i].channel;
+			else
+				channels = msm_bedais[i].adm_override_ch;
+
+			bit_width = msm_routing_get_bit_width(
+						msm_bedais[i].format);
+			app_type =
+			fe_dai_app_type_cfg[fe_id][session_type][i].app_type;
+			if (app_type && is_lsm) {
+				app_type_idx =
+				msm_pcm_routing_get_lsm_app_type_idx(app_type);
+				sample_rate =
+				fe_dai_app_type_cfg[fe_id][session_type][i]
+					.sample_rate;
+				bit_width =
+				lsm_app_type_cfg[app_type_idx].bit_width;
+			} else if (app_type) {
+				app_type_idx =
+					msm_pcm_routing_get_app_type_idx(
+						app_type);
+				sample_rate =
+			fe_dai_app_type_cfg[fe_id][session_type][i].sample_rate;
+				bit_width =
+					app_type_cfg[app_type_idx].bit_width;
+			} else {
+				sample_rate = msm_bedais[i].sample_rate;
+			}
+			acdb_dev_id =
+			fe_dai_app_type_cfg[fe_id][session_type][i].acdb_dev_id;
+			topology = msm_routing_get_adm_topology(fe_id,
+								session_type,
+								i);
+			if ((passthr_mode == COMPRESSED_PASSTHROUGH_DSD)
+			     || (passthr_mode ==
+			     COMPRESSED_PASSTHROUGH_GEN))
+				topology = COMPRESSED_PASSTHROUGH_NONE_TOPOLOGY;
+			pr_debug("%s: Before adm open topology %d\n", __func__,
+				topology);
+
+			copp_idx =
+				adm_open(msm_bedais[i].port_id,
+					 path_type, sample_rate, channels,
+					 topology, perf_mode, bit_width,
+					 app_type, acdb_dev_id);
+			if ((copp_idx < 0) ||
+				(copp_idx >= MAX_COPPS_PER_PORT)) {
+				pr_err("%s:adm open failed coppid:%d\n",
+				__func__, copp_idx);
+				mutex_unlock(&routing_lock);
+				return -EINVAL;
+			}
+			pr_debug("%s: set idx bit of fe:%d, type: %d, be:%d\n",
+				 __func__, fe_id, session_type, i);
+			set_bit(copp_idx,
+				&session_copp_map[fe_id][session_type][i]);
+
+			if (msm_is_resample_needed(
+				sample_rate,
+				msm_bedais[i].sample_rate))
+				adm_copp_mfc_cfg(
+					msm_bedais[i].port_id, copp_idx,
+					msm_bedais[i].sample_rate);
+
+			for (j = 0; j < MAX_COPPS_PER_PORT; j++) {
+				unsigned long copp =
+				session_copp_map[fe_id][session_type][i];
+				if (test_bit(j, &copp)) {
+					payload.port_id[num_copps] =
+					msm_bedais[i].port_id;
+					payload.copp_idx[num_copps] = j;
+					payload.app_type[num_copps] =
+						fe_dai_app_type_cfg
+							[fe_id][session_type][i]
+								.app_type;
+					payload.acdb_dev_id[num_copps] =
+						fe_dai_app_type_cfg
+							[fe_id][session_type][i]
+								.acdb_dev_id;
+					payload.sample_rate[num_copps] =
+						fe_dai_app_type_cfg
+							[fe_id][session_type][i]
+								.sample_rate;
+					if (msm_pcm_routing_test_pp_param(i,
+					    ADM_PP_PARAM_LIMITER_BIT))
+						set_bit(ADM_STATUS_LIMITER,
+							&payload.route_status
+							[num_copps]);
+					num_copps++;
+				}
+			}
+			if (passthr_mode != COMPRESSED_PASSTHROUGH_DSD
+			    && passthr_mode !=
+			    COMPRESSED_PASSTHROUGH_GEN) {
+				msm_routing_send_device_pp_params(
+				msm_bedais[i].port_id,
+				copp_idx, fe_id);
+			}
+		}
+	}
+	if (num_copps) {
+		payload.num_copps = num_copps;
+		payload.session_id = fe_dai_map[fe_id][session_type].strm_id;
+		adm_matrix_map(path_type, payload, perf_mode, passthr_mode);
+		msm_pcm_routng_cfg_matrix_map_pp(payload, path_type, perf_mode);
+	}
+	mutex_unlock(&routing_lock);
+	return 0;
+}
+
+static u32 msm_pcm_routing_get_voc_sessionid(u16 val)
+{
+	u32 session_id;
+
+	switch (val) {
+	case MSM_FRONTEND_DAI_CS_VOICE:
+		session_id = voc_get_session_id(VOICE_SESSION_NAME);
+		break;
+	case MSM_FRONTEND_DAI_VOLTE:
+		session_id = voc_get_session_id(VOLTE_SESSION_NAME);
+		break;
+	case MSM_FRONTEND_DAI_VOWLAN:
+		session_id = voc_get_session_id(VOWLAN_SESSION_NAME);
+		break;
+	case MSM_FRONTEND_DAI_VOICE2:
+		session_id = voc_get_session_id(VOICE2_SESSION_NAME);
+		break;
+	case MSM_FRONTEND_DAI_QCHAT:
+		session_id = voc_get_session_id(QCHAT_SESSION_NAME);
+		break;
+	case MSM_FRONTEND_DAI_VOIP:
+		session_id = voc_get_session_id(VOIP_SESSION_NAME);
+		break;
+	case MSM_FRONTEND_DAI_VOICEMMODE1:
+		session_id = voc_get_session_id(VOICEMMODE1_NAME);
+		break;
+	case MSM_FRONTEND_DAI_VOICEMMODE2:
+		session_id = voc_get_session_id(VOICEMMODE2_NAME);
+		break;
+	default:
+		session_id = 0;
+	}
+
+	pr_debug("%s session_id 0x%x", __func__, session_id);
+	return session_id;
+}
+
+static int msm_pcm_routing_channel_mixer(int fe_id, bool perf_mode,
+				int dspst_id, int stream_type)
+{
+	int copp_idx = 0;
+	int sess_type = 0;
+	int i = 0, j = 0, be_id;
+	int ret = 0;
+
+	if (fe_id >= MSM_FRONTEND_DAI_MM_SIZE) {
+		pr_err("%s: invalid FE %d\n", __func__, fe_id);
+		return 0;
+	}
+
+	if (!(channel_mixer[fe_id].enable)) {
+		pr_debug("%s: channel mixer not enabled for FE %d\n",
+			__func__, fe_id);
+		return 0;
+	}
+
+	if (stream_type == SNDRV_PCM_STREAM_PLAYBACK)
+		sess_type = SESSION_TYPE_RX;
+	else
+		sess_type = SESSION_TYPE_TX;
+
+	for (i = 0; i < ADM_MAX_CHANNELS && channel_input[fe_id][i] > 0;
+		++i) {
+		be_id = channel_input[fe_id][i] - 1;
+		channel_mixer[fe_id].input_channels[i] =
+						msm_bedais[be_id].channel;
+
+		if ((msm_bedais[be_id].active) &&
+			test_bit(fe_id,
+			&msm_bedais[be_id].fe_sessions[0])) {
+			unsigned long copp =
+				session_copp_map[fe_id][sess_type][be_id];
+			for (j = 0; j < MAX_COPPS_PER_PORT; j++) {
+				if (test_bit(j, &copp)) {
+					copp_idx = j;
+					break;
+				}
+			}
+
+			pr_debug("%s: fe %d, be %d, channel %d, copp %d\n",
+				__func__,
+				fe_id, be_id, msm_bedais[be_id].channel,
+				copp_idx);
+			ret = adm_programable_channel_mixer(
+					msm_bedais[be_id].port_id,
+					copp_idx, dspst_id, sess_type,
+					channel_mixer + fe_id, i);
+		}
+	}
+
+	return ret;
+}
+
+int msm_pcm_routing_reg_phy_stream(int fedai_id, int perf_mode,
+					int dspst_id, int stream_type)
+{
+	int i, j, session_type, path_type, port_type, topology, num_copps = 0;
+	struct route_payload payload;
+	u32 channels, sample_rate;
+	uint16_t bits_per_sample = 16;
+	uint32_t passthr_mode = LEGACY_PCM;
+	int ret = 0;
+
+	if (fedai_id > MSM_FRONTEND_DAI_MM_MAX_ID) {
+		/* bad ID assigned in machine driver */
+		pr_err("%s: bad MM ID %d\n", __func__, fedai_id);
+		return -EINVAL;
+	}
+
+	if (stream_type == SNDRV_PCM_STREAM_PLAYBACK) {
+		session_type = SESSION_TYPE_RX;
+		path_type = ADM_PATH_PLAYBACK;
+		port_type = MSM_AFE_PORT_TYPE_RX;
+	} else {
+		session_type = SESSION_TYPE_TX;
+		path_type = ADM_PATH_LIVE_REC;
+		port_type = MSM_AFE_PORT_TYPE_TX;
+	}
+
+	mutex_lock(&routing_lock);
+
+	payload.num_copps = 0; /* only RX needs to use payload */
+	fe_dai_map[fedai_id][session_type].strm_id = dspst_id;
+	fe_dai_map[fedai_id][session_type].perf_mode = perf_mode;
+
+	/* re-enable EQ if active */
+	msm_qti_pp_send_eq_values(fedai_id);
+	for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
+		if (!is_be_dai_extproc(i) &&
+		   (afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
+		   (msm_bedais[i].active) &&
+		   (test_bit(fedai_id, &msm_bedais[i].fe_sessions[0]))) {
+			int app_type, app_type_idx, copp_idx, acdb_dev_id;
+			/*
+			 * check if ADM needs to be configured with different
+			 * channel mapping than backend
+			 */
+			if (!msm_bedais[i].adm_override_ch)
+				channels = msm_bedais[i].channel;
+			else
+				channels = msm_bedais[i].adm_override_ch;
+			msm_bedais[i].passthr_mode[fedai_id] =
+				LEGACY_PCM;
+
+			bits_per_sample = msm_routing_get_bit_width(
+						msm_bedais[i].format);
+
+			app_type =
+			fe_dai_app_type_cfg[fedai_id][session_type][i].app_type;
+			if (app_type) {
+				app_type_idx =
+				msm_pcm_routing_get_app_type_idx(app_type);
+				sample_rate =
+				fe_dai_app_type_cfg[fedai_id][session_type][i]
+					.sample_rate;
+				bits_per_sample =
+					app_type_cfg[app_type_idx].bit_width;
+			} else
+				sample_rate = msm_bedais[i].sample_rate;
+
+			acdb_dev_id =
+			fe_dai_app_type_cfg[fedai_id][session_type][i]
+				.acdb_dev_id;
+			topology = msm_routing_get_adm_topology(fedai_id,
+								session_type,
+								i);
+			copp_idx = adm_open(msm_bedais[i].port_id, path_type,
+					    sample_rate, channels, topology,
+					    perf_mode, bits_per_sample,
+					    app_type, acdb_dev_id);
+			if ((copp_idx < 0) ||
+				(copp_idx >= MAX_COPPS_PER_PORT)) {
+				pr_err("%s: adm open failed copp_idx:%d\n",
+					__func__, copp_idx);
+				mutex_unlock(&routing_lock);
+				return -EINVAL;
+			}
+			pr_debug("%s: setting idx bit of fe:%d, type: %d, be:%d\n",
+				 __func__, fedai_id, session_type, i);
+			set_bit(copp_idx,
+				&session_copp_map[fedai_id][session_type][i]);
+
+			if (msm_is_resample_needed(
+				sample_rate,
+				msm_bedais[i].sample_rate))
+				adm_copp_mfc_cfg(
+					msm_bedais[i].port_id, copp_idx,
+					msm_bedais[i].sample_rate);
+
+			for (j = 0; j < MAX_COPPS_PER_PORT; j++) {
+				unsigned long copp =
+				    session_copp_map[fedai_id][session_type][i];
+				if (test_bit(j, &copp)) {
+					payload.port_id[num_copps] =
+							msm_bedais[i].port_id;
+					payload.copp_idx[num_copps] = j;
+					payload.app_type[num_copps] =
+						fe_dai_app_type_cfg
+							[fedai_id][session_type]
+							[i].app_type;
+					payload.acdb_dev_id[num_copps] =
+						fe_dai_app_type_cfg
+							[fedai_id][session_type]
+							[i].acdb_dev_id;
+					payload.sample_rate[num_copps] =
+						fe_dai_app_type_cfg
+							[fedai_id][session_type]
+							[i].sample_rate;
+					if (msm_pcm_routing_test_pp_param(i,
+					    ADM_PP_PARAM_LIMITER_BIT))
+						set_bit(ADM_STATUS_LIMITER,
+							&payload.route_status
+							[num_copps]);
+					num_copps++;
+				}
+			}
+			if ((perf_mode == LEGACY_PCM_MODE) &&
+				(msm_bedais[i].passthr_mode[fedai_id] ==
+				LEGACY_PCM))
+				msm_pcm_routing_cfg_pp(msm_bedais[i].port_id,
+						       copp_idx, topology,
+						       channels);
+		}
+	}
+	if (num_copps) {
+		payload.num_copps = num_copps;
+		payload.session_id = fe_dai_map[fedai_id][session_type].strm_id;
+		adm_matrix_map(path_type, payload, perf_mode, passthr_mode);
+		msm_pcm_routng_cfg_matrix_map_pp(payload, path_type, perf_mode);
+	}
+
+	ret = msm_pcm_routing_channel_mixer(fedai_id, perf_mode,
+				dspst_id, stream_type);
+	mutex_unlock(&routing_lock);
+	return ret;
+}
+
+int msm_pcm_routing_reg_phy_stream_v2(int fedai_id, int perf_mode,
+				      int dspst_id, int stream_type,
+				      struct msm_pcm_routing_evt event_info)
+{
+	if (msm_pcm_routing_reg_phy_stream(fedai_id, perf_mode, dspst_id,
+				       stream_type)) {
+		pr_err("%s: failed to reg phy stream\n", __func__);
+		return -EINVAL;
+	}
+
+	if (stream_type == SNDRV_PCM_STREAM_PLAYBACK)
+		fe_dai_map[fedai_id][SESSION_TYPE_RX].event_info = event_info;
+	else
+		fe_dai_map[fedai_id][SESSION_TYPE_TX].event_info = event_info;
+	return 0;
+}
+
+void msm_pcm_routing_dereg_phy_stream(int fedai_id, int stream_type)
+{
+	int i, port_type, session_type, path_type, topology;
+	struct msm_pcm_routing_fdai_data *fdai;
+
+	if (!is_mm_lsm_fe_id(fedai_id)) {
+		/* bad ID assigned in machine driver */
+		pr_err("%s: bad MM ID\n", __func__);
+		return;
+	}
+
+	if (stream_type == SNDRV_PCM_STREAM_PLAYBACK) {
+		port_type = MSM_AFE_PORT_TYPE_RX;
+		session_type = SESSION_TYPE_RX;
+		path_type = ADM_PATH_PLAYBACK;
+	} else {
+		port_type = MSM_AFE_PORT_TYPE_TX;
+		session_type = SESSION_TYPE_TX;
+		path_type = ADM_PATH_LIVE_REC;
+	}
+
+	mutex_lock(&routing_lock);
+	for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
+		if (!is_be_dai_extproc(i) &&
+		   (afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
+		   (msm_bedais[i].active) &&
+		   (test_bit(fedai_id, &msm_bedais[i].fe_sessions[0]))) {
+			int idx;
+			unsigned long copp =
+				session_copp_map[fedai_id][session_type][i];
+			fdai = &fe_dai_map[fedai_id][session_type];
+
+			for (idx = 0; idx < MAX_COPPS_PER_PORT; idx++)
+				if (test_bit(idx, &copp))
+					break;
+
+			if (idx >= MAX_COPPS_PER_PORT || idx < 0) {
+				pr_debug("%s: copp idx is invalid, exiting\n",
+								__func__);
+				continue;
+			}
+			topology = adm_get_topology_for_port_copp_idx(
+					msm_bedais[i].port_id, idx);
+			adm_close(msm_bedais[i].port_id, fdai->perf_mode, idx);
+			pr_debug("%s:copp:%ld,idx bit fe:%d,type:%d,be:%d\n",
+				 __func__, copp, fedai_id, session_type, i);
+			clear_bit(idx,
+				  &session_copp_map[fedai_id][session_type][i]);
+			if ((DOLBY_ADM_COPP_TOPOLOGY_ID == topology ||
+				DS2_ADM_COPP_TOPOLOGY_ID == topology) &&
+			    (fdai->perf_mode == LEGACY_PCM_MODE) &&
+			    (msm_bedais[i].passthr_mode[fedai_id] ==
+					LEGACY_PCM))
+				msm_pcm_routing_deinit_pp(msm_bedais[i].port_id,
+							  topology);
+		}
+	}
+
+	fe_dai_map[fedai_id][session_type].strm_id = INVALID_SESSION;
+	fe_dai_map[fedai_id][session_type].be_srate = 0;
+	mutex_unlock(&routing_lock);
+}
+
+/* Check if FE/BE route is set */
+static bool msm_pcm_routing_route_is_set(u16 be_id, u16 fe_id)
+{
+	bool rc = false;
+
+	if (!is_mm_lsm_fe_id(fe_id)) {
+		/* recheck FE ID in the mixer control defined in this file */
+		pr_err("%s: bad MM ID\n", __func__);
+		return rc;
+	}
+
+	if (test_bit(fe_id, &msm_bedais[be_id].fe_sessions[0]))
+		rc = true;
+
+	return rc;
+}
+
+static void msm_pcm_routing_process_audio(u16 reg, u16 val, int set)
+{
+	int session_type, path_type, topology;
+	u32 channels, sample_rate;
+	uint16_t bits_per_sample = 16;
+	struct msm_pcm_routing_fdai_data *fdai;
+	uint32_t passthr_mode;
+	bool is_lsm;
+
+	pr_debug("%s: reg %x val %x set %x\n", __func__, reg, val, set);
+
+	if (!is_mm_lsm_fe_id(val)) {
+		/* recheck FE ID in the mixer control defined in this file */
+		pr_err("%s: bad MM ID\n", __func__);
+		return;
+	}
+
+	if (!route_check_fe_id_adm_support(val)) {
+		/* ignore adm open if not supported for fe_id */
+		pr_debug("%s: No ADM support for fe id %d\n", __func__, val);
+		return;
+	}
+
+	passthr_mode = msm_bedais[reg].passthr_mode[val];
+	if (afe_get_port_type(msm_bedais[reg].port_id) ==
+		MSM_AFE_PORT_TYPE_RX) {
+		session_type = SESSION_TYPE_RX;
+		if (passthr_mode != LEGACY_PCM)
+			path_type = ADM_PATH_COMPRESSED_RX;
+		else
+			path_type = ADM_PATH_PLAYBACK;
+	} else {
+		session_type = SESSION_TYPE_TX;
+		if (passthr_mode != LEGACY_PCM)
+			path_type = ADM_PATH_COMPRESSED_TX;
+		else
+			path_type = ADM_PATH_LIVE_REC;
+	}
+	is_lsm = (val >= MSM_FRONTEND_DAI_LSM1) &&
+			 (val <= MSM_FRONTEND_DAI_LSM8);
+
+	mutex_lock(&routing_lock);
+	if (set) {
+		if (!test_bit(val, &msm_bedais[reg].fe_sessions[0]) &&
+			((msm_bedais[reg].port_id == VOICE_PLAYBACK_TX) ||
+			(msm_bedais[reg].port_id == VOICE2_PLAYBACK_TX)))
+			voc_start_playback(set, msm_bedais[reg].port_id);
+
+		set_bit(val, &msm_bedais[reg].fe_sessions[0]);
+		fdai = &fe_dai_map[val][session_type];
+		if (msm_bedais[reg].active && fdai->strm_id !=
+			INVALID_SESSION) {
+			int app_type, app_type_idx, copp_idx, acdb_dev_id;
+			/*
+			 * check if ADM needs to be configured with different
+			 * channel mapping than backend
+			 */
+			if (!msm_bedais[reg].adm_override_ch)
+				channels = msm_bedais[reg].channel;
+			else
+				channels = msm_bedais[reg].adm_override_ch;
+			if (session_type == SESSION_TYPE_TX &&
+			    fdai->be_srate &&
+			    (fdai->be_srate != msm_bedais[reg].sample_rate)) {
+				pr_debug("%s: flush strm %d diff BE rates\n",
+					__func__, fdai->strm_id);
+
+				if (fdai->event_info.event_func)
+					fdai->event_info.event_func(
+						MSM_PCM_RT_EVT_BUF_RECFG,
+						fdai->event_info.priv_data);
+				fdai->be_srate = 0; /* might not need it */
+			}
+
+			bits_per_sample = msm_routing_get_bit_width(
+						msm_bedais[reg].format);
+
+			app_type =
+			fe_dai_app_type_cfg[val][session_type][reg].app_type;
+			if (app_type && is_lsm) {
+				app_type_idx =
+				msm_pcm_routing_get_lsm_app_type_idx(app_type);
+				sample_rate =
+				fe_dai_app_type_cfg[val][session_type][reg]
+					.sample_rate;
+				bits_per_sample =
+				lsm_app_type_cfg[app_type_idx].bit_width;
+			} else if (app_type) {
+				app_type_idx =
+				msm_pcm_routing_get_app_type_idx(app_type);
+				sample_rate =
+				fe_dai_app_type_cfg[val][session_type][reg]
+					.sample_rate;
+				bits_per_sample =
+					app_type_cfg[app_type_idx].bit_width;
+			} else
+				sample_rate = msm_bedais[reg].sample_rate;
+
+			topology = msm_routing_get_adm_topology(val,
+								session_type,
+								reg);
+			acdb_dev_id =
+			fe_dai_app_type_cfg[val][session_type][reg].acdb_dev_id;
+			copp_idx = adm_open(msm_bedais[reg].port_id, path_type,
+					    sample_rate, channels, topology,
+					    fdai->perf_mode, bits_per_sample,
+					    app_type, acdb_dev_id);
+			if ((copp_idx < 0) ||
+			    (copp_idx >= MAX_COPPS_PER_PORT)) {
+				pr_err("%s: adm open failed\n", __func__);
+				mutex_unlock(&routing_lock);
+				return;
+			}
+			pr_debug("%s: setting idx bit of fe:%d, type: %d, be:%d\n",
+				 __func__, val, session_type, reg);
+			set_bit(copp_idx,
+				&session_copp_map[val][session_type][reg]);
+
+			if (msm_is_resample_needed(
+				sample_rate,
+				msm_bedais[reg].sample_rate))
+				adm_copp_mfc_cfg(
+					msm_bedais[reg].port_id, copp_idx,
+					msm_bedais[reg].sample_rate);
+
+			if (session_type == SESSION_TYPE_RX &&
+			    fdai->event_info.event_func)
+				fdai->event_info.event_func(
+					MSM_PCM_RT_EVT_DEVSWITCH,
+					fdai->event_info.priv_data);
+
+			msm_pcm_routing_build_matrix(val, session_type,
+						     path_type,
+						     fdai->perf_mode,
+						     passthr_mode);
+			if ((fdai->perf_mode == LEGACY_PCM_MODE) &&
+				(passthr_mode == LEGACY_PCM))
+				msm_pcm_routing_cfg_pp(msm_bedais[reg].port_id,
+						       copp_idx, topology,
+						       channels);
+		}
+	} else {
+		if (test_bit(val, &msm_bedais[reg].fe_sessions[0]) &&
+			((msm_bedais[reg].port_id == VOICE_PLAYBACK_TX) ||
+			(msm_bedais[reg].port_id == VOICE2_PLAYBACK_TX)))
+			voc_start_playback(set, msm_bedais[reg].port_id);
+		clear_bit(val, &msm_bedais[reg].fe_sessions[0]);
+		fdai = &fe_dai_map[val][session_type];
+		if (msm_bedais[reg].active && fdai->strm_id !=
+			INVALID_SESSION) {
+			int idx;
+			int port_id;
+			unsigned long copp =
+				session_copp_map[val][session_type][reg];
+			for (idx = 0; idx < MAX_COPPS_PER_PORT; idx++)
+				if (test_bit(idx, &copp))
+					break;
+
+			port_id = msm_bedais[reg].port_id;
+			topology = adm_get_topology_for_port_copp_idx(port_id,
+								      idx);
+			adm_close(msm_bedais[reg].port_id, fdai->perf_mode,
+				  idx);
+			pr_debug("%s: copp: %ld, reset idx bit fe:%d, type: %d, be:%d topology=0x%x\n",
+				 __func__, copp, val, session_type, reg,
+				 topology);
+			clear_bit(idx,
+				  &session_copp_map[val][session_type][reg]);
+			if ((DOLBY_ADM_COPP_TOPOLOGY_ID == topology ||
+				DS2_ADM_COPP_TOPOLOGY_ID == topology) &&
+			    (fdai->perf_mode == LEGACY_PCM_MODE) &&
+			    (passthr_mode == LEGACY_PCM))
+				msm_pcm_routing_deinit_pp(
+						msm_bedais[reg].port_id,
+						topology);
+			msm_pcm_routing_build_matrix(val, session_type,
+						     path_type,
+						     fdai->perf_mode,
+						     passthr_mode);
+		}
+	}
+	if ((msm_bedais[reg].port_id == VOICE_RECORD_RX)
+			|| (msm_bedais[reg].port_id == VOICE_RECORD_TX))
+		voc_start_record(msm_bedais[reg].port_id, set, voc_session_id);
+
+	mutex_unlock(&routing_lock);
+}
+
+static int msm_routing_get_audio_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct soc_mixer_control *mc =
+	(struct soc_mixer_control *)kcontrol->private_value;
+
+	if (test_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions[0]))
+		ucontrol->value.integer.value[0] = 1;
+	else
+		ucontrol->value.integer.value[0] = 0;
+
+	pr_debug("%s: reg %x shift %x val %ld\n", __func__, mc->reg, mc->shift,
+	ucontrol->value.integer.value[0]);
+
+	return 0;
+}
+
+static int msm_routing_put_audio_mixer(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+	struct snd_soc_dapm_update *update = NULL;
+
+	if (ucontrol->value.integer.value[0] &&
+	   msm_pcm_routing_route_is_set(mc->reg, mc->shift) == false) {
+		msm_pcm_routing_process_audio(mc->reg, mc->shift, 1);
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 1, update);
+	} else if (!ucontrol->value.integer.value[0] &&
+		  msm_pcm_routing_route_is_set(mc->reg, mc->shift) == true) {
+		msm_pcm_routing_process_audio(mc->reg, mc->shift, 0);
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 0, update);
+	}
+
+	return 1;
+}
+
+static int msm_routing_get_listen_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct soc_mixer_control *mc =
+	(struct soc_mixer_control *)kcontrol->private_value;
+
+	if (test_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions[0]))
+		ucontrol->value.integer.value[0] = 1;
+	else
+		ucontrol->value.integer.value[0] = 0;
+
+	pr_debug("%s: reg %x shift %x val %ld\n", __func__, mc->reg, mc->shift,
+		ucontrol->value.integer.value[0]);
+
+	return 0;
+}
+
+static int msm_routing_put_listen_mixer(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+	struct snd_soc_dapm_update *update = NULL;
+
+	pr_debug("%s: reg %x shift %x val %ld\n", __func__, mc->reg, mc->shift,
+		ucontrol->value.integer.value[0]);
+
+	if (ucontrol->value.integer.value[0]) {
+		if (msm_pcm_routing_route_is_set(mc->reg, mc->shift) == false)
+			msm_pcm_routing_process_audio(mc->reg, mc->shift, 1);
+		snd_soc_dapm_mixer_update_power(widget->dapm,
+						kcontrol, 1, update);
+	} else if (!ucontrol->value.integer.value[0]) {
+		if (msm_pcm_routing_route_is_set(mc->reg, mc->shift) == true)
+			msm_pcm_routing_process_audio(mc->reg, mc->shift, 0);
+		snd_soc_dapm_mixer_update_power(widget->dapm,
+						kcontrol, 0, update);
+	}
+
+	return 1;
+}
+
+static void msm_pcm_routing_process_voice(u16 reg, u16 val, int set)
+{
+	u32 session_id = 0;
+	u16 path_type;
+	struct media_format_info voc_be_media_format;
+
+	pr_debug("%s: reg %x val %x set %x\n", __func__, reg, val, set);
+
+	session_id = msm_pcm_routing_get_voc_sessionid(val);
+
+	pr_debug("%s: FE DAI 0x%x session_id 0x%x\n",
+		__func__, val, session_id);
+
+	mutex_lock(&routing_lock);
+
+	if (set)
+		set_bit(val, &msm_bedais[reg].fe_sessions[0]);
+	else
+		clear_bit(val, &msm_bedais[reg].fe_sessions[0]);
+
+	if (val == MSM_FRONTEND_DAI_DTMF_RX &&
+	    afe_get_port_type(msm_bedais[reg].port_id) ==
+						MSM_AFE_PORT_TYPE_RX) {
+		pr_debug("%s(): set=%d port id=0x%x for dtmf generation\n",
+			 __func__, set, msm_bedais[reg].port_id);
+		afe_set_dtmf_gen_rx_portid(msm_bedais[reg].port_id, set);
+	}
+
+	if (afe_get_port_type(msm_bedais[reg].port_id) ==
+						MSM_AFE_PORT_TYPE_RX)
+		path_type = RX_PATH;
+	else
+		path_type = TX_PATH;
+
+	if (set) {
+		if (msm_bedais[reg].active) {
+			voc_set_route_flag(session_id, path_type, 1);
+
+			memset(&voc_be_media_format, 0,
+			       sizeof(struct media_format_info));
+
+			voc_be_media_format.port_id = msm_bedais[reg].port_id;
+			voc_be_media_format.num_channels =
+						msm_bedais[reg].channel;
+			voc_be_media_format.sample_rate =
+						msm_bedais[reg].sample_rate;
+			voc_be_media_format.bits_per_sample =
+						msm_bedais[reg].format;
+			/* Defaulting this to 1 for voice call usecases */
+			voc_be_media_format.channel_mapping[0] = 1;
+
+			voc_set_device_config(session_id, path_type,
+					      &voc_be_media_format);
+
+			if (voc_get_route_flag(session_id, TX_PATH) &&
+				voc_get_route_flag(session_id, RX_PATH))
+				voc_enable_device(session_id);
+		} else {
+			pr_debug("%s BE is not active\n", __func__);
+		}
+	} else {
+		voc_set_route_flag(session_id, path_type, 0);
+		voc_disable_device(session_id);
+	}
+
+	mutex_unlock(&routing_lock);
+
+}
+
+static int msm_routing_get_voice_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct soc_mixer_control *mc =
+	(struct soc_mixer_control *)kcontrol->private_value;
+
+	mutex_lock(&routing_lock);
+
+	if (test_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions[0]))
+		ucontrol->value.integer.value[0] = 1;
+	else
+		ucontrol->value.integer.value[0] = 0;
+
+	mutex_unlock(&routing_lock);
+
+	pr_debug("%s: reg %x shift %x val %ld\n", __func__, mc->reg, mc->shift,
+			ucontrol->value.integer.value[0]);
+
+	return 0;
+}
+
+static int msm_routing_put_voice_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+	struct snd_soc_dapm_update *update = NULL;
+
+	if (ucontrol->value.integer.value[0]) {
+		msm_pcm_routing_process_voice(mc->reg, mc->shift, 1);
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 1, update);
+	} else {
+		msm_pcm_routing_process_voice(mc->reg, mc->shift, 0);
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 0, update);
+	}
+
+	return 1;
+}
+
+static int msm_routing_get_voice_stub_mixer(struct snd_kcontrol *kcontrol,
+		struct snd_ctl_elem_value *ucontrol)
+{
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+
+	mutex_lock(&routing_lock);
+
+	if (test_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions[0]))
+		ucontrol->value.integer.value[0] = 1;
+	else
+		ucontrol->value.integer.value[0] = 0;
+
+	mutex_unlock(&routing_lock);
+
+	pr_debug("%s: reg %x shift %x val %ld\n", __func__, mc->reg, mc->shift,
+		ucontrol->value.integer.value[0]);
+
+	return 0;
+}
+
+static int msm_routing_put_voice_stub_mixer(struct snd_kcontrol *kcontrol,
+		struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+	struct snd_soc_dapm_update *update = NULL;
+
+	if (ucontrol->value.integer.value[0]) {
+		mutex_lock(&routing_lock);
+		set_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions[0]);
+		mutex_unlock(&routing_lock);
+
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 1, update);
+	} else {
+		mutex_lock(&routing_lock);
+		clear_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions[0]);
+		mutex_unlock(&routing_lock);
+
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 0, update);
+	}
+
+	pr_debug("%s: reg %x shift %x val %ld\n", __func__, mc->reg, mc->shift,
+		ucontrol->value.integer.value[0]);
+
+	return 1;
+}
+
+/*
+ * Return the mapping between port ID and backend ID to enable the AFE callback
+ * to determine the acdb_dev_id from the port id
+ */
+int msm_pcm_get_be_id_from_port_id(int port_id)
+{
+	int i;
+	int be_id = -EINVAL;
+
+	for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
+		if (msm_bedais[i].port_id == port_id) {
+			be_id = i;
+			break;
+		}
+	}
+
+	return be_id;
+}
+
+/*
+ * Return the registered dev_acdb_id given a port ID to enable identifying the
+ * correct AFE calibration information by comparing the header information.
+ */
+static int msm_pcm_get_dev_acdb_id_by_port_id(int port_id)
+{
+	int acdb_id = -EINVAL;
+	int i = 0;
+	int session;
+	int port_type = afe_get_port_type(port_id);
+	int be_id = msm_pcm_get_be_id_from_port_id(port_id);
+
+	pr_debug("%s:port_id %d be_id %d, port_type 0x%x\n",
+		  __func__, port_id, be_id, port_type);
+
+	if (port_type == MSM_AFE_PORT_TYPE_TX) {
+		session = SESSION_TYPE_TX;
+	} else if (port_type == MSM_AFE_PORT_TYPE_RX) {
+		session = SESSION_TYPE_RX;
+	} else {
+		pr_err("%s: Invalid port type %d\n", __func__, port_type);
+		acdb_id = -EINVAL;
+		goto exit;
+	}
+
+	if (be_id < 0) {
+		pr_err("%s: Error getting backend id %d\n", __func__, be_id);
+		goto exit;
+	}
+
+	mutex_lock(&routing_lock);
+	i = find_first_bit(&msm_bedais[be_id].fe_sessions[0],
+			   MSM_FRONTEND_DAI_MAX);
+	if (i < MSM_FRONTEND_DAI_MAX)
+		acdb_id = fe_dai_app_type_cfg[i][session][be_id].acdb_dev_id;
+
+	pr_debug("%s: FE[%d] session[%d] BE[%d] acdb_id(%d)\n",
+		 __func__, i, session, be_id, acdb_id);
+	mutex_unlock(&routing_lock);
+exit:
+	return acdb_id;
+}
+
+static int msm_routing_get_switch_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = fm_switch_enable;
+	pr_debug("%s: FM Switch enable %ld\n", __func__,
+		ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int msm_routing_put_switch_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_dapm_update *update = NULL;
+
+	pr_debug("%s: FM Switch enable %ld\n", __func__,
+			ucontrol->value.integer.value[0]);
+	if (ucontrol->value.integer.value[0])
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 1, update);
+	else
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 0, update);
+	fm_switch_enable = ucontrol->value.integer.value[0];
+	return 1;
+}
+
+static int msm_routing_get_hfp_switch_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = hfp_switch_enable;
+	pr_debug("%s: HFP Switch enable %ld\n", __func__,
+		ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int msm_routing_put_hfp_switch_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_dapm_update *update = NULL;
+
+	pr_debug("%s: HFP Switch enable %ld\n", __func__,
+			ucontrol->value.integer.value[0]);
+	if (ucontrol->value.integer.value[0])
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol,
+						1, update);
+	else
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol,
+						0, update);
+	hfp_switch_enable = ucontrol->value.integer.value[0];
+	return 1;
+}
+
+static int msm_routing_get_int0_mi2s_switch_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = int0_mi2s_switch_enable;
+	pr_debug("%s: INT0 MI2S Switch enable %ld\n", __func__,
+		ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int msm_routing_put_int0_mi2s_switch_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_dapm_update *update = NULL;
+
+	pr_debug("%s: INT0 MI2S Switch enable %ld\n", __func__,
+			ucontrol->value.integer.value[0]);
+	if (ucontrol->value.integer.value[0])
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 1,
+						update);
+	else
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 0,
+						update);
+	int0_mi2s_switch_enable = ucontrol->value.integer.value[0];
+	return 1;
+}
+
+static int msm_routing_get_int4_mi2s_switch_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = int4_mi2s_switch_enable;
+	pr_debug("%s: INT4 MI2S Switch enable %ld\n", __func__,
+		ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int msm_routing_put_int4_mi2s_switch_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_dapm_update *update = NULL;
+
+	pr_debug("%s: INT4 MI2S Switch enable %ld\n", __func__,
+			ucontrol->value.integer.value[0]);
+	if (ucontrol->value.integer.value[0])
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 1,
+						update);
+	else
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 0,
+						update);
+	int4_mi2s_switch_enable = ucontrol->value.integer.value[0];
+	return 1;
+}
+
+static int msm_routing_get_usb_switch_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = usb_switch_enable;
+	pr_debug("%s: HFP Switch enable %ld\n", __func__,
+		ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int msm_routing_put_usb_switch_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_dapm_update *update = NULL;
+
+	pr_debug("%s: USB Switch enable %ld\n", __func__,
+			ucontrol->value.integer.value[0]);
+	if (ucontrol->value.integer.value[0])
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol,
+						1, update);
+	else
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol,
+						0, update);
+	usb_switch_enable = ucontrol->value.integer.value[0];
+	return 1;
+}
+
+static int msm_routing_get_pri_mi2s_switch_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = pri_mi2s_switch_enable;
+	pr_debug("%s: PRI MI2S Switch enable %ld\n", __func__,
+		ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int msm_routing_put_pri_mi2s_switch_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_dapm_update *update = NULL;
+
+	pr_debug("%s: PRI MI2S Switch enable %ld\n", __func__,
+			ucontrol->value.integer.value[0]);
+	if (ucontrol->value.integer.value[0])
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 1,
+						update);
+	else
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 0,
+						update);
+	pri_mi2s_switch_enable = ucontrol->value.integer.value[0];
+	return 1;
+}
+
+static int msm_routing_get_sec_mi2s_switch_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = sec_mi2s_switch_enable;
+	pr_debug("%s: SEC MI2S Switch enable %ld\n", __func__,
+		ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int msm_routing_put_sec_mi2s_switch_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_dapm_update *update = NULL;
+
+	pr_debug("%s: SEC MI2S Switch enable %ld\n", __func__,
+			ucontrol->value.integer.value[0]);
+	if (ucontrol->value.integer.value[0])
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 1,
+						update);
+	else
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 0,
+						update);
+	sec_mi2s_switch_enable = ucontrol->value.integer.value[0];
+	return 1;
+}
+
+static int msm_routing_get_tert_mi2s_switch_mixer(
+				struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = tert_mi2s_switch_enable;
+	pr_debug("%s: TERT MI2S Switch enable %ld\n", __func__,
+		ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int msm_routing_put_tert_mi2s_switch_mixer(
+				struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_dapm_update *update = NULL;
+
+	pr_debug("%s: TERT MI2S Switch enable %ld\n", __func__,
+			ucontrol->value.integer.value[0]);
+	if (ucontrol->value.integer.value[0])
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 1,
+						update);
+	else
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 0,
+						update);
+	tert_mi2s_switch_enable = ucontrol->value.integer.value[0];
+	return 1;
+}
+
+static int msm_routing_get_quat_mi2s_switch_mixer(
+				struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = quat_mi2s_switch_enable;
+	pr_debug("%s: QUAT MI2S Switch enable %ld\n", __func__,
+		ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int msm_routing_put_quat_mi2s_switch_mixer(
+				struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_dapm_update *update = NULL;
+
+	pr_debug("%s: QUAT MI2S Switch enable %ld\n", __func__,
+			ucontrol->value.integer.value[0]);
+	if (ucontrol->value.integer.value[0])
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 1,
+						update);
+	else
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 0,
+						update);
+	quat_mi2s_switch_enable = ucontrol->value.integer.value[0];
+	return 1;
+}
+
+static int msm_routing_get_fm_pcmrx_switch_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = fm_pcmrx_switch_enable;
+	pr_debug("%s: FM Switch enable %ld\n", __func__,
+		ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int msm_routing_put_fm_pcmrx_switch_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_dapm_update *update = NULL;
+
+	pr_debug("%s: FM Switch enable %ld\n", __func__,
+			ucontrol->value.integer.value[0]);
+	if (ucontrol->value.integer.value[0])
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 1, update);
+	else
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 0, update);
+	fm_pcmrx_switch_enable = ucontrol->value.integer.value[0];
+	return 1;
+}
+
+static int msm_routing_lsm_port_get(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = lsm_port_index;
+	return 0;
+}
+
+static int msm_routing_lsm_port_put(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+	int mux = ucontrol->value.enumerated.item[0];
+	int lsm_port = AFE_PORT_ID_SLIMBUS_MULTI_CHAN_5_TX;
+
+	if (mux >= e->items) {
+		pr_err("%s: Invalid mux value %d\n", __func__, mux);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: LSM enable %ld\n", __func__,
+			ucontrol->value.integer.value[0]);
+	switch (ucontrol->value.integer.value[0]) {
+	case 1:
+		lsm_port = AFE_PORT_ID_SLIMBUS_MULTI_CHAN_0_TX;
+		break;
+	case 2:
+		lsm_port = AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_TX;
+		break;
+	case 3:
+		lsm_port = AFE_PORT_ID_SLIMBUS_MULTI_CHAN_2_TX;
+		break;
+	case 4:
+		lsm_port = AFE_PORT_ID_SLIMBUS_MULTI_CHAN_3_TX;
+		break;
+	case 5:
+		lsm_port = AFE_PORT_ID_SLIMBUS_MULTI_CHAN_4_TX;
+		break;
+	case 6:
+		lsm_port = AFE_PORT_ID_SLIMBUS_MULTI_CHAN_5_TX;
+		break;
+	case 7:
+		lsm_port = AFE_PORT_ID_TERTIARY_MI2S_TX;
+		break;
+	case 8:
+		lsm_port = AFE_PORT_ID_QUATERNARY_MI2S_TX;
+		break;
+	case 9:
+		lsm_port = ADM_LSM_PORT_ID;
+		break;
+	case 10:
+		lsm_port = AFE_PORT_ID_INT3_MI2S_TX;
+		break;
+	default:
+		pr_err("Default lsm port");
+		break;
+	}
+	set_lsm_port(lsm_port);
+	lsm_port_index = ucontrol->value.integer.value[0];
+
+	return 0;
+}
+
+static int msm_routing_lsm_func_get(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_value *ucontrol)
+{
+	int i;
+	u16 port_id;
+	enum afe_mad_type mad_type;
+
+	pr_debug("%s: enter\n", __func__);
+	for (i = 0; i < ARRAY_SIZE(lsm_port_text); i++)
+		if (!strnstr(kcontrol->id.name, lsm_port_text[i],
+			    strlen(lsm_port_text[i])))
+			break;
+
+	if (i-- == ARRAY_SIZE(lsm_port_text)) {
+		WARN(1, "Invalid id name %s\n", kcontrol->id.name);
+		return -EINVAL;
+	}
+
+	port_id = i * 2 + 1 + SLIMBUS_0_RX;
+
+	/*Check for Tertiary/Quaternary/INT3 TX port*/
+	if (strnstr(kcontrol->id.name, lsm_port_text[7],
+			strlen(lsm_port_text[7])))
+		port_id = AFE_PORT_ID_TERTIARY_MI2S_TX;
+
+	if (strnstr(kcontrol->id.name, lsm_port_text[8],
+			strlen(lsm_port_text[8])))
+		port_id = AFE_PORT_ID_QUATERNARY_MI2S_TX;
+
+	if (strnstr(kcontrol->id.name, lsm_port_text[10],
+			strlen(lsm_port_text[10])))
+		port_id = AFE_PORT_ID_INT3_MI2S_TX;
+
+	mad_type = afe_port_get_mad_type(port_id);
+	pr_debug("%s: port_id 0x%x, mad_type %d\n", __func__, port_id,
+		 mad_type);
+	switch (mad_type) {
+	case MAD_HW_NONE:
+		ucontrol->value.integer.value[0] = MADNONE;
+		break;
+	case MAD_HW_AUDIO:
+		ucontrol->value.integer.value[0] = MADAUDIO;
+		break;
+	case MAD_HW_BEACON:
+		ucontrol->value.integer.value[0] = MADBEACON;
+		break;
+	case MAD_HW_ULTRASOUND:
+		ucontrol->value.integer.value[0] = MADULTRASOUND;
+		break;
+	case MAD_SW_AUDIO:
+		ucontrol->value.integer.value[0] = MADSWAUDIO;
+	break;
+	default:
+		WARN(1, "Unknown\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int msm_routing_lsm_func_put(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_value *ucontrol)
+{
+	int i;
+	u16 port_id;
+	enum afe_mad_type mad_type;
+
+	pr_debug("%s: enter\n", __func__);
+	for (i = 0; i < ARRAY_SIZE(lsm_port_text); i++)
+		if (strnstr(kcontrol->id.name, lsm_port_text[i],
+			    strlen(lsm_port_text[i])))
+			break;
+
+	if (i-- == ARRAY_SIZE(lsm_port_text)) {
+		WARN(1, "Invalid id name %s\n", kcontrol->id.name);
+		return -EINVAL;
+	}
+
+	port_id = i * 2 + 1 + SLIMBUS_0_RX;
+	switch (ucontrol->value.integer.value[0]) {
+	case MADNONE:
+		mad_type = MAD_HW_NONE;
+		break;
+	case MADAUDIO:
+		mad_type = MAD_HW_AUDIO;
+		break;
+	case MADBEACON:
+		mad_type = MAD_HW_BEACON;
+		break;
+	case MADULTRASOUND:
+		mad_type = MAD_HW_ULTRASOUND;
+		break;
+	case MADSWAUDIO:
+		mad_type = MAD_SW_AUDIO;
+		break;
+	default:
+		WARN(1, "Unknown\n");
+		return -EINVAL;
+	}
+
+	/*Check for Tertiary/Quaternary/INT3 TX port*/
+	if (strnstr(kcontrol->id.name, lsm_port_text[7],
+			strlen(lsm_port_text[7])))
+		port_id = AFE_PORT_ID_TERTIARY_MI2S_TX;
+
+	if (strnstr(kcontrol->id.name, lsm_port_text[8],
+			strlen(lsm_port_text[8])))
+		port_id = AFE_PORT_ID_QUATERNARY_MI2S_TX;
+
+	if (strnstr(kcontrol->id.name, lsm_port_text[10],
+			strlen(lsm_port_text[10])))
+		port_id = AFE_PORT_ID_INT3_MI2S_TX;
+
+	pr_debug("%s: port_id 0x%x, mad_type %d\n", __func__, port_id,
+		 mad_type);
+	return afe_port_set_mad_type(port_id, mad_type);
+}
+
+static const char *const adm_override_chs_text[] = {
+	"Zero", "One", "Two", "Three", "Four", "Five", "Six"
+};
+
+static SOC_ENUM_SINGLE_EXT_DECL(slim_7_rx_adm_override_chs,
+				adm_override_chs_text);
+
+/* FFV solution*/
+static SOC_ENUM_SINGLE_EXT_DECL(sec_tdm_rx_0_adm_override_chs,
+				adm_override_chs_text);
+static SOC_ENUM_SINGLE_EXT_DECL(prim_mi2s_rx_adm_override_chs,
+				adm_override_chs_text);
+
+static int msm_routing_adm_get_backend_idx(struct snd_kcontrol *kcontrol)
+{
+	int backend_id;
+
+	if (strnstr(kcontrol->id.name, "SLIM7_RX", sizeof("SLIM7_RX"))) {
+		backend_id = MSM_BACKEND_DAI_SLIMBUS_7_RX;
+	} else if (strnstr(kcontrol->id.name, "SEC_TDM_RX_0",
+			   sizeof("SEC_TDM_RX_0"))) {
+		backend_id = MSM_BACKEND_DAI_SEC_TDM_RX_0;
+	} else if (strnstr(kcontrol->id.name, "PRIM_MI2S_RX",
+			   sizeof("PRIM_MI2S_RX"))) {
+		backend_id = MSM_BACKEND_DAI_PRI_MI2S_RX;
+	} else {
+		pr_err("%s: unsupported backend id: %s",
+			__func__, kcontrol->id.name);
+		return -EINVAL;
+	}
+
+	return backend_id;
+}
+static int msm_routing_adm_channel_config_get(
+					struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	int backend_id = msm_routing_adm_get_backend_idx(kcontrol);
+
+	if (backend_id >= 0) {
+		mutex_lock(&routing_lock);
+		ucontrol->value.integer.value[0] =
+			 msm_bedais[backend_id].adm_override_ch;
+		pr_debug("%s: adm channel count %ld for BE:%d\n", __func__,
+			 ucontrol->value.integer.value[0], backend_id);
+		 mutex_unlock(&routing_lock);
+	}
+
+	return 0;
+}
+
+static int msm_routing_adm_channel_config_put(
+					struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	int backend_id = msm_routing_adm_get_backend_idx(kcontrol);
+
+	if (backend_id >= 0) {
+		mutex_lock(&routing_lock);
+		msm_bedais[backend_id].adm_override_ch =
+				 ucontrol->value.integer.value[0];
+		pr_debug("%s:updating BE :%d  adm channels: %d\n",
+			  __func__, backend_id,
+			  msm_bedais[backend_id].adm_override_ch);
+		mutex_unlock(&routing_lock);
+	}
+
+	return 0;
+}
+
+static const struct snd_kcontrol_new adm_channel_config_controls[] = {
+	SOC_ENUM_EXT("SLIM7_RX ADM Channels", slim_7_rx_adm_override_chs,
+			msm_routing_adm_channel_config_get,
+			msm_routing_adm_channel_config_put),
+	SOC_ENUM_EXT("SEC_TDM_RX_0 ADM Channels", sec_tdm_rx_0_adm_override_chs,
+			msm_routing_adm_channel_config_get,
+			msm_routing_adm_channel_config_put),
+	SOC_ENUM_EXT("PRIM_MI2S_RX ADM Channels", prim_mi2s_rx_adm_override_chs,
+			msm_routing_adm_channel_config_get,
+			msm_routing_adm_channel_config_put),
+};
+
+static int msm_routing_slim_0_rx_aanc_mux_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+
+	mutex_lock(&routing_lock);
+	ucontrol->value.integer.value[0] = slim0_rx_aanc_fb_port;
+	mutex_unlock(&routing_lock);
+	pr_debug("%s: AANC Mux Port %ld\n", __func__,
+		ucontrol->value.integer.value[0]);
+	return 0;
+};
+
+static int msm_routing_slim_0_rx_aanc_mux_put(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct aanc_data aanc_info;
+
+	mutex_lock(&routing_lock);
+	memset(&aanc_info, 0x00, sizeof(aanc_info));
+	pr_debug("%s: AANC Mux Port %ld\n", __func__,
+		ucontrol->value.integer.value[0]);
+	slim0_rx_aanc_fb_port = ucontrol->value.integer.value[0];
+	if (ucontrol->value.integer.value[0] == 0) {
+		aanc_info.aanc_active = false;
+		aanc_info.aanc_tx_port = 0;
+		aanc_info.aanc_rx_port = 0;
+	} else {
+		aanc_info.aanc_active = true;
+		aanc_info.aanc_rx_port = SLIMBUS_0_RX;
+		aanc_info.aanc_tx_port =
+			(SLIMBUS_0_RX - 1 + (slim0_rx_aanc_fb_port * 2));
+	}
+	afe_set_aanc_info(&aanc_info);
+	mutex_unlock(&routing_lock);
+	return 0;
+};
+static int msm_routing_get_port_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = 0, shift = 0;
+	struct soc_mixer_control *mc =
+	(struct soc_mixer_control *)kcontrol->private_value;
+
+	idx = mc->shift/(sizeof(msm_bedais[mc->reg].port_sessions[0]) * 8);
+	shift = mc->shift%(sizeof(msm_bedais[mc->reg].port_sessions[0]) * 8);
+
+	if (idx >= BE_DAI_PORT_SESSIONS_IDX_MAX) {
+		pr_err("%s: Invalid idx = %d\n", __func__, idx);
+		return -EINVAL;
+	}
+
+	if (test_bit(shift,
+		(unsigned long *)&msm_bedais[mc->reg].port_sessions[idx]))
+		ucontrol->value.integer.value[0] = 1;
+	else
+		ucontrol->value.integer.value[0] = 0;
+
+	pr_debug("%s: reg %x shift %x val %ld\n", __func__, mc->reg, mc->shift,
+	ucontrol->value.integer.value[0]);
+
+	return 0;
+}
+
+static int msm_routing_put_port_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = 0, shift = 0;
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+
+	idx = mc->shift/(sizeof(msm_bedais[mc->reg].port_sessions[0]) * 8);
+	shift = mc->shift%(sizeof(msm_bedais[mc->reg].port_sessions[0]) * 8);
+
+	if (idx >= BE_DAI_PORT_SESSIONS_IDX_MAX) {
+		pr_err("%s: Invalid idx = %d\n", __func__, idx);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: reg 0x%x shift 0x%x val %ld idx %d reminder shift %d\n",
+		 __func__, mc->reg, mc->shift,
+		 ucontrol->value.integer.value[0], idx, shift);
+
+	if (ucontrol->value.integer.value[0]) {
+		afe_loopback(1, msm_bedais[mc->reg].port_id,
+			    msm_bedais[mc->shift].port_id);
+		set_bit(shift,
+		(unsigned long *)&msm_bedais[mc->reg].port_sessions[idx]);
+	} else {
+		afe_loopback(0, msm_bedais[mc->reg].port_id,
+			    msm_bedais[mc->shift].port_id);
+		clear_bit(shift,
+		(unsigned long *)&msm_bedais[mc->reg].port_sessions[idx]);
+	}
+
+	return 1;
+}
+
+static int msm_pcm_get_channel_rule_index(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	u16 fe_id = 0;
+
+	fe_id = ((struct soc_mixer_control *)
+			kcontrol->private_value)->shift;
+	if (fe_id >= MSM_FRONTEND_DAI_MM_SIZE) {
+		pr_err("%s: invalid FE %d\n", __func__, fe_id);
+		return -EINVAL;
+	}
+
+	ucontrol->value.integer.value[0] = channel_mixer[fe_id].rule;
+
+	return 0;
+}
+
+static int msm_pcm_put_channel_rule_index(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	u16 fe_id = 0;
+
+	fe_id = ((struct soc_mixer_control *)
+			kcontrol->private_value)->shift;
+	if (fe_id >= MSM_FRONTEND_DAI_MM_SIZE) {
+		pr_err("%s: invalid FE %d\n", __func__, fe_id);
+		return -EINVAL;
+	}
+
+	channel_mixer[fe_id].rule = ucontrol->value.integer.value[0];
+
+	return 1;
+}
+
+static int msm_pcm_get_out_chs(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	u16 fe_id = 0;
+
+	fe_id = ((struct soc_multi_mixer_control *)
+			kcontrol->private_value)->shift;
+	if (fe_id >= MSM_FRONTEND_DAI_MM_SIZE) {
+		pr_err("%s: invalid FE %d\n", __func__, fe_id);
+		return -EINVAL;
+	}
+
+	ucontrol->value.integer.value[0] =
+		channel_mixer[fe_id].output_channel;
+	return 0;
+}
+
+static int msm_pcm_put_out_chs(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	u16 fe_id = 0;
+
+	fe_id = ((struct soc_multi_mixer_control *)
+			kcontrol->private_value)->shift;
+	if (fe_id >= MSM_FRONTEND_DAI_MM_SIZE) {
+		pr_err("%s: invalid FE %d\n", __func__, fe_id);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: fe_id is %d, output channels = %d\n", __func__,
+			fe_id,
+			(unsigned int)(ucontrol->value.integer.value[0]));
+	channel_mixer[fe_id].output_channel =
+			(unsigned int)(ucontrol->value.integer.value[0]);
+
+	return 1;
+}
+
+static const char *const ch_mixer[] = {"Disable", "Enable"};
+
+/* If new backend is added, need update this array */
+static const char *const be_name[] = {
+"ZERO", "PRI_I2S_RX", "PRI_I2S_TX", "SLIM_0_RX",
+"SLIM_0_TX", "HDMI_RX", "INT_BT_SCO_RX", "INT_BT_SCO_TX",
+"INT_FM_RX", "INT_FM_TX", "AFE_PCM_RX", "AFE_PCM_TX",
+"AUXPCM_RX", "AUXPCM_TX", "VOICE_PLAYBACK_TX", "VOICE2_PLAYBACK_TX",
+"INCALL_RECORD_RX", "INCALL_RECORD_TX", "MI2S_RX", "MI2S_TX",
+"SEC_I2S_RX", "SLIM_1_RX", "SLIM_1_TX", "SLIM_2_RX",
+"SLIM_2_TX", "SLIM_3_RX", "SLIM_3_TX", "SLIM_4_RX",
+"SLIM_4_TX", "SLIM_5_RX", "SLIM_5_TX", "SLIM_6_RX",
+"SLIM_6_TX", "SLIM_7_RX", "SLIM_7_TX", "SLIM_8_RX",
+"SLIM_8_TX", "EXTPROC_RX", "EXTPROC_TX", "EXPROC_EC_TX",
+"QUAT_MI2S_RX", "QUAT_MI2S_TX", "SECOND_MI2S_RX", "SECOND_MI2S_TX",
+"PRI_MI2S_RX", "PRI_MI2S_TX", "TERT_MI2S_RX", "TERT_MI2S_TX",
+"AUDIO_I2S_RX", "SEC_AUXPCM_RX", "SEC_AUXPCM_TX", "SPDIF_RX",
+"SECOND_MI2S_RX_SD1", "QUIN_MI2S_RX", "QUIN_MI2S_TX", "SENARY_MI2S_TX",
+"PRI_TDM_RX_0", "PRI_TDM_TX_0", "PRI_TDM_RX_1", "PRI_TDM_TX_1",
+"PRI_TDM_RX_2", "PRI_TDM_TX_2", "PRI_TDM_RX_3", "PRI_TDM_TX_3",
+"PRI_TDM_RX_4", "PRI_TDM_TX_4", "PRI_TDM_RX_5", "PRI_TDM_TX_5",
+"PRI_TDM_RX_6", "PRI_TDM_TX_6", "PRI_TDM_RX_7", "PRI_TDM_TX_7",
+"SEC_TDM_RX_0", "SEC_TDM_TX_0", "SEC_TDM_RX_1", "SEC_TDM_TX_1",
+"SEC_TDM_RX_2", "SEC_TDM_TX_2", "SEC_TDM_RX_3", "SEC_TDM_TX_3",
+"SEC_TDM_RX_4", "SEC_TDM_TX_4", "SEC_TDM_RX_5", "SEC_TDM_TX_5",
+"SEC_TDM_RX_6", "SEC_TDM_TX_6", "SEC_TDM_RX_7", "SEC_TDM_TX_7",
+"TERT_TDM_RX_0", "TERT_TDM_TX_0", "TERT_TDM_RX_1", "TERT_TDM_TX_1",
+"TERT_TDM_RX_2", "TERT_TDM_TX_2", "TERT_TDM_RX_3", "TERT_TDM_TX_3",
+"TERT_TDM_RX_4", "TERT_TDM_TX_4", "TERT_TDM_RX_5", "TERT_TDM_TX_5",
+"TERT_TDM_RX_6", "TERT_TDM_TX_6", "TERT_TDM_RX_7", "TERT_TDM_TX_7",
+"QUAT_TDM_RX_0", "QUAT_TDM_TX_0", "QUAT_TDM_RX_1", "QUAT_TDM_TX_1",
+"QUAT_TDM_RX_2", "QUAT_TDM_TX_2", "QUAT_TDM_RX_3", "QUAT_TDM_TX_3",
+"QUAT_TDM_RX_4", "QUAT_TDM_TX_4", "QUAT_TDM_RX_5", "QUAT_TDM_TX_5",
+"QUAT_TDM_RX_6", "QUAT_TDM_TX_6", "QUAT_TDM_RX_7", "QUAT_TDM_TX_7",
+"INT_BT_A2DP_RX", "USB_RX", "USB_TX", "DISPLAY_PORT_RX",
+"TERT_AUXPCM_RX", "TERT_AUXPCM_TX", "QUAT_AUXPCM_RX", "QUAT_AUXPCM_TX",
+"INT0_MI2S_RX", "INT0_MI2S_TX", "INT1_MI2S_RX", "INT1_MI2S_TX",
+"INT2_MI2S_RX", "INT2_MI2S_TX", "INT3_MI2S_RX", "INT3_MI2S_TX",
+"INT4_MI2S_RX", "INT4_MI2S_TX", "INT5_MI2S_RX", "INT5_MI2S_TX",
+"INT6_MI2S_RX", "INT6_MI2S_TX"
+};
+
+static SOC_ENUM_SINGLE_DECL(mm1_channel_mux,
+	SND_SOC_NOPM, MSM_FRONTEND_DAI_MULTIMEDIA1, ch_mixer);
+static SOC_ENUM_SINGLE_DECL(mm2_channel_mux,
+	SND_SOC_NOPM, MSM_FRONTEND_DAI_MULTIMEDIA2, ch_mixer);
+static SOC_ENUM_SINGLE_DECL(mm3_channel_mux,
+	SND_SOC_NOPM, MSM_FRONTEND_DAI_MULTIMEDIA3, ch_mixer);
+static SOC_ENUM_SINGLE_DECL(mm4_channel_mux,
+	SND_SOC_NOPM, MSM_FRONTEND_DAI_MULTIMEDIA4, ch_mixer);
+
+static SOC_ENUM_DOUBLE_DECL(mm1_ch1_enum,
+	SND_SOC_NOPM, MSM_FRONTEND_DAI_MULTIMEDIA1, 0, be_name);
+static SOC_ENUM_DOUBLE_DECL(mm1_ch2_enum,
+	SND_SOC_NOPM, MSM_FRONTEND_DAI_MULTIMEDIA1, 1, be_name);
+static SOC_ENUM_DOUBLE_DECL(mm1_ch3_enum,
+	SND_SOC_NOPM, MSM_FRONTEND_DAI_MULTIMEDIA1, 2, be_name);
+static SOC_ENUM_DOUBLE_DECL(mm1_ch4_enum,
+	SND_SOC_NOPM, MSM_FRONTEND_DAI_MULTIMEDIA1, 3, be_name);
+static SOC_ENUM_DOUBLE_DECL(mm1_ch5_enum,
+	SND_SOC_NOPM, MSM_FRONTEND_DAI_MULTIMEDIA1, 4, be_name);
+static SOC_ENUM_DOUBLE_DECL(mm1_ch6_enum,
+	SND_SOC_NOPM, MSM_FRONTEND_DAI_MULTIMEDIA1, 5, be_name);
+static SOC_ENUM_DOUBLE_DECL(mm1_ch7_enum,
+	SND_SOC_NOPM, MSM_FRONTEND_DAI_MULTIMEDIA1, 6, be_name);
+static SOC_ENUM_DOUBLE_DECL(mm1_ch8_enum,
+	SND_SOC_NOPM, MSM_FRONTEND_DAI_MULTIMEDIA1, 7, be_name);
+
+static int msm_pcm_get_ctl_enum_info(struct snd_ctl_elem_info *uinfo,
+		unsigned int channels,
+		unsigned int items, const char *const names[])
+{
+	if (uinfo->value.enumerated.item >= items)
+		uinfo->value.enumerated.item = items - 1;
+
+	WARN(strlen(names[uinfo->value.enumerated.item]) >=
+		sizeof(uinfo->value.enumerated.name),
+		"ALSA: too long item name '%s'\n",
+		names[uinfo->value.enumerated.item]);
+	strlcpy(uinfo->value.enumerated.name,
+		names[uinfo->value.enumerated.item],
+		sizeof(uinfo->value.enumerated.name));
+	return 0;
+}
+
+static int msm_pcm_channel_mixer_info(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_info *uinfo)
+{
+	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+	uinfo->count = 1;
+
+	uinfo->value.enumerated.items = ARRAY_SIZE(ch_mixer);
+	msm_pcm_get_ctl_enum_info(uinfo, 1, e->items, e->texts);
+
+	return 0;
+}
+static int msm_pcm_channel_mixer_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	u16 fe_id = 0;
+
+	fe_id = ((struct soc_enum *)
+			kcontrol->private_value)->shift_l;
+	if (fe_id >= MSM_FRONTEND_DAI_MM_SIZE) {
+		pr_err("%s: invalid FE %d\n", __func__, fe_id);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: FE %d %s\n", __func__,
+		fe_id,
+		channel_mixer[fe_id].enable ? "Enabled" : "Disabled");
+	ucontrol->value.enumerated.item[0] = channel_mixer[fe_id].enable;
+	return 0;
+}
+
+static int msm_pcm_channel_mixer_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	u16 fe_id = 0;
+
+	fe_id = ((struct soc_enum *)
+			kcontrol->private_value)->shift_l;
+	if (fe_id >= MSM_FRONTEND_DAI_MM_SIZE) {
+		pr_err("%s: invalid FE %d\n", __func__, fe_id);
+		return -EINVAL;
+	}
+
+	channel_mixer[fe_id].enable = ucontrol->value.enumerated.item[0];
+	pr_debug("%s: %s FE %d\n", __func__,
+		channel_mixer[fe_id].enable ? "Enable" : "Disable",
+		fe_id);
+	return 0;
+}
+
+static int msm_pcm_channel_input_be_info(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_info *uinfo)
+{
+	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+	uinfo->count = 1;
+
+	uinfo->value.enumerated.items = ARRAY_SIZE(be_name);
+	msm_pcm_get_ctl_enum_info(uinfo, 1, e->items, e->texts);
+
+	return 0;
+}
+
+static int msm_pcm_channel_input_be_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+	u16 fe_id = 0, in_ch = 0;
+
+	fe_id = e->shift_l;
+	in_ch = e->shift_r;
+	if (fe_id >= MSM_FRONTEND_DAI_MM_SIZE) {
+		pr_err("%s: invalid FE %d\n", __func__, fe_id);
+		return -EINVAL;
+	}
+	if (in_ch >= ADM_MAX_CHANNELS) {
+		pr_err("%s: invalid input channel %d\n", __func__, in_ch);
+		return -EINVAL;
+	}
+
+	channel_input[fe_id][in_ch] = ucontrol->value.enumerated.item[0];
+	return 1;
+}
+
+static int msm_pcm_channel_input_be_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+	u16 fe_id = 0, in_ch = 0;
+
+	fe_id = e->shift_l;
+	in_ch = e->shift_r;
+	if (fe_id >= MSM_FRONTEND_DAI_MM_SIZE) {
+		pr_err("%s: invalid FE %d\n", __func__, fe_id);
+		return -EINVAL;
+	}
+	if (in_ch >= ADM_MAX_CHANNELS) {
+		pr_err("%s: invalid input channel %d\n", __func__, in_ch);
+		return -EINVAL;
+	}
+
+	ucontrol->value.enumerated.item[0] = channel_input[fe_id][in_ch];
+	return 1;
+}
+
+
+static int msm_pcm_channel_weight_info(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	uinfo->count = ADM_MAX_CHANNELS;
+	uinfo->value.integer.min = 0;
+	uinfo->value.integer.max = WEIGHT_0_DB;
+
+	return 0;
+}
+
+static int msm_pcm_channel_weight_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	u16 fe_id = 0, out_ch = 0;
+	int i, weight;
+
+	fe_id = ((struct soc_multi_mixer_control *)
+			kcontrol->private_value)->shift;
+	out_ch = ((struct soc_multi_mixer_control *)
+			kcontrol->private_value)->rshift;
+	if (fe_id >= MSM_FRONTEND_DAI_MM_SIZE) {
+		pr_err("%s: invalid FE %d\n", __func__, fe_id);
+		return -EINVAL;
+	}
+	if (out_ch >= ADM_MAX_CHANNELS) {
+		pr_err("%s: invalid input channel %d\n", __func__, out_ch);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: FE_ID: %d, channel weight %ld, %ld, %ld, %ld, %ld, %ld, %ld, %ld\n",
+		__func__, fe_id,
+		ucontrol->value.integer.value[0],
+		ucontrol->value.integer.value[1],
+		ucontrol->value.integer.value[2],
+		ucontrol->value.integer.value[3],
+		ucontrol->value.integer.value[4],
+		ucontrol->value.integer.value[5],
+		ucontrol->value.integer.value[6],
+		ucontrol->value.integer.value[7]);
+
+	for (i = 0; i < ADM_MAX_CHANNELS; ++i) {
+		weight = ucontrol->value.integer.value[i];
+		channel_mixer[fe_id].channel_weight[out_ch][i] = weight;
+		pr_debug("%s: FE_ID %d, output %d input %d weight %d\n",
+			__func__, fe_id, out_ch, i,
+			channel_mixer[fe_id].channel_weight[out_ch][i]);
+	}
+
+	return 0;
+}
+
+static int msm_pcm_channel_weight_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	u16 fe_id = 0, out_ch = 0;
+	int i;
+
+	fe_id = ((struct soc_multi_mixer_control *)
+			kcontrol->private_value)->shift;
+	out_ch = ((struct soc_multi_mixer_control *)
+			kcontrol->private_value)->rshift;
+	if (fe_id >= MSM_FRONTEND_DAI_MM_SIZE) {
+		pr_err("%s: invalid FE %d\n", __func__, fe_id);
+		return -EINVAL;
+	}
+	if (out_ch >= ADM_MAX_CHANNELS) {
+		pr_err("%s: invalid input channel %d\n", __func__, out_ch);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < ADM_MAX_CHANNELS; ++i)
+		ucontrol->value.integer.value[i] =
+			channel_mixer[fe_id].channel_weight[out_ch][i];
+
+	pr_debug("%s: FE_ID: %d, weight  %ld, %ld, %ld, %ld, %ld, %ld, %ld, %ld",
+		__func__, fe_id,
+		ucontrol->value.integer.value[0],
+		ucontrol->value.integer.value[1],
+		ucontrol->value.integer.value[2],
+		ucontrol->value.integer.value[3],
+		ucontrol->value.integer.value[4],
+		ucontrol->value.integer.value[5],
+		ucontrol->value.integer.value[6],
+		ucontrol->value.integer.value[7]);
+
+	return 0;
+}
+
+static const struct snd_kcontrol_new channel_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1 Channel Rule", SND_SOC_NOPM,
+			MSM_FRONTEND_DAI_MULTIMEDIA1, 8, 0,
+			msm_pcm_get_channel_rule_index,
+			msm_pcm_put_channel_rule_index),
+	SOC_SINGLE_EXT("MultiMedia2 Channel Rule", SND_SOC_NOPM,
+			MSM_FRONTEND_DAI_MULTIMEDIA2, 8, 0,
+			msm_pcm_get_channel_rule_index,
+			msm_pcm_put_channel_rule_index),
+	SOC_SINGLE_EXT("MultiMedia3 Channel Rule", SND_SOC_NOPM,
+			MSM_FRONTEND_DAI_MULTIMEDIA3, 8, 0,
+			msm_pcm_get_channel_rule_index,
+			msm_pcm_put_channel_rule_index),
+	SOC_SINGLE_EXT("MultiMedia4 Channel Rule", SND_SOC_NOPM,
+			MSM_FRONTEND_DAI_MULTIMEDIA4, 8, 0,
+			msm_pcm_get_channel_rule_index,
+			msm_pcm_put_channel_rule_index),
+	SOC_SINGLE_EXT("MultiMedia5 Channel Rule", SND_SOC_NOPM,
+			MSM_FRONTEND_DAI_MULTIMEDIA5, 8, 0,
+			msm_pcm_get_channel_rule_index,
+			msm_pcm_put_channel_rule_index),
+	SOC_SINGLE_EXT("MultiMedia6 Channel Rule", SND_SOC_NOPM,
+			MSM_FRONTEND_DAI_MULTIMEDIA6, 8, 0,
+			msm_pcm_get_channel_rule_index,
+			msm_pcm_put_channel_rule_index),
+
+	SOC_SINGLE_EXT("MultiMedia1 Channels", SND_SOC_NOPM,
+			MSM_FRONTEND_DAI_MULTIMEDIA1, 8, 0,
+			msm_pcm_get_out_chs,
+			msm_pcm_put_out_chs),
+	SOC_SINGLE_EXT("MultiMedia2 Channels", SND_SOC_NOPM,
+			MSM_FRONTEND_DAI_MULTIMEDIA2, 8, 0,
+			msm_pcm_get_out_chs,
+			msm_pcm_put_out_chs),
+	SOC_SINGLE_EXT("MultiMedia3 Channels", SND_SOC_NOPM,
+			MSM_FRONTEND_DAI_MULTIMEDIA3, 8, 0,
+			msm_pcm_get_out_chs,
+			msm_pcm_put_out_chs),
+	SOC_SINGLE_EXT("MultiMedia4 Channels", SND_SOC_NOPM,
+			MSM_FRONTEND_DAI_MULTIMEDIA4, 8, 0,
+			msm_pcm_get_out_chs,
+			msm_pcm_put_out_chs),
+	SOC_SINGLE_EXT("MultiMedia5 Channels", SND_SOC_NOPM,
+			MSM_FRONTEND_DAI_MULTIMEDIA5, 8, 0,
+			msm_pcm_get_out_chs,
+			msm_pcm_put_out_chs),
+	SOC_SINGLE_EXT("MultiMedia6 Channels", SND_SOC_NOPM,
+			MSM_FRONTEND_DAI_MULTIMEDIA6, 8, 0,
+			msm_pcm_get_out_chs,
+			msm_pcm_put_out_chs),
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Channel Mixer",
+	.info = msm_pcm_channel_mixer_info,
+	.get = msm_pcm_channel_mixer_get,
+	.put = msm_pcm_channel_mixer_put,
+	.private_value = (unsigned long)&(mm1_channel_mux)
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia2 Channel Mixer",
+	.info = msm_pcm_channel_mixer_info,
+	.get = msm_pcm_channel_mixer_get,
+	.put = msm_pcm_channel_mixer_put,
+	.private_value = (unsigned long)&(mm2_channel_mux)
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia3 Channel Mixer",
+	.info = msm_pcm_channel_mixer_info,
+	.get = msm_pcm_channel_mixer_get,
+	.put = msm_pcm_channel_mixer_put,
+	.private_value = (unsigned long)&(mm3_channel_mux)
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia4 Channel Mixer",
+	.info = msm_pcm_channel_mixer_info,
+	.get = msm_pcm_channel_mixer_get,
+	.put = msm_pcm_channel_mixer_put,
+	.private_value = (unsigned long)&(mm4_channel_mux)
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Output Channel1",
+	.info = msm_pcm_channel_weight_info,
+	.get = msm_pcm_channel_weight_get,
+	.put = msm_pcm_channel_weight_put,
+	.private_value = (unsigned long)&(struct soc_multi_mixer_control)
+		{ .shift = MSM_FRONTEND_DAI_MULTIMEDIA1, .rshift = 0,}
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Output Channel2",
+	.info = msm_pcm_channel_weight_info,
+	.get = msm_pcm_channel_weight_get,
+	.put = msm_pcm_channel_weight_put,
+	.private_value = (unsigned long)&(struct soc_multi_mixer_control)
+		{ .shift = MSM_FRONTEND_DAI_MULTIMEDIA1, .rshift = 1, }
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Output Channel3",
+	.info = msm_pcm_channel_weight_info,
+	.get = msm_pcm_channel_weight_get,
+	.put = msm_pcm_channel_weight_put,
+	.private_value = (unsigned long)&(struct soc_multi_mixer_control)
+		{ .shift = MSM_FRONTEND_DAI_MULTIMEDIA1, .rshift = 2,}
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Output Channel4",
+	.info = msm_pcm_channel_weight_info,
+	.get = msm_pcm_channel_weight_get,
+	.put = msm_pcm_channel_weight_put,
+	.private_value = (unsigned long)&(struct soc_multi_mixer_control)
+		{ .shift = MSM_FRONTEND_DAI_MULTIMEDIA1, .rshift = 3,}
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Output Channel5",
+	.info = msm_pcm_channel_weight_info,
+	.get = msm_pcm_channel_weight_get,
+	.put = msm_pcm_channel_weight_put,
+	.private_value = (unsigned long)&(struct soc_multi_mixer_control)
+		{ .shift = MSM_FRONTEND_DAI_MULTIMEDIA1, .rshift = 4,}
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Output Channel6",
+	.info = msm_pcm_channel_weight_info,
+	.get = msm_pcm_channel_weight_get,
+	.put = msm_pcm_channel_weight_put,
+	.private_value = (unsigned long)&(struct soc_multi_mixer_control)
+		{ .shift = MSM_FRONTEND_DAI_MULTIMEDIA1, .rshift = 5,}
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Output Channel7",
+	.info = msm_pcm_channel_weight_info,
+	.get = msm_pcm_channel_weight_get,
+	.put = msm_pcm_channel_weight_put,
+	.private_value = (unsigned long)&(struct soc_multi_mixer_control)
+		{ .shift = MSM_FRONTEND_DAI_MULTIMEDIA1, .rshift = 6,}
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Output Channel8",
+	.info = msm_pcm_channel_weight_info,
+	.get = msm_pcm_channel_weight_get,
+	.put = msm_pcm_channel_weight_put,
+	.private_value = (unsigned long)&(struct soc_multi_mixer_control)
+		{ .shift = MSM_FRONTEND_DAI_MULTIMEDIA1, .rshift = 7,}
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia2 Output Channel1",
+	.info = msm_pcm_channel_weight_info,
+	.get = msm_pcm_channel_weight_get,
+	.put = msm_pcm_channel_weight_put,
+	.private_value = (unsigned long)&(struct soc_multi_mixer_control)
+		{.shift = MSM_FRONTEND_DAI_MULTIMEDIA2, .rshift = 0,}
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia2 Output Channel2",
+	.info = msm_pcm_channel_weight_info,
+	.get = msm_pcm_channel_weight_get,
+	.put = msm_pcm_channel_weight_put,
+	.private_value = (unsigned long)&(struct soc_multi_mixer_control)
+		{.shift = MSM_FRONTEND_DAI_MULTIMEDIA2, .rshift = 1,}
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia2 Output Channel3",
+	.info = msm_pcm_channel_weight_info,
+	.get = msm_pcm_channel_weight_get,
+	.put = msm_pcm_channel_weight_put,
+	.private_value = (unsigned long)&(struct soc_multi_mixer_control)
+		{.shift = MSM_FRONTEND_DAI_MULTIMEDIA2, .rshift = 2,}
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia3 Output Channel1",
+	.info = msm_pcm_channel_weight_info,
+	.get = msm_pcm_channel_weight_get,
+	.put = msm_pcm_channel_weight_put,
+	.private_value = (unsigned long)&(struct soc_multi_mixer_control)
+		{.shift = MSM_FRONTEND_DAI_MULTIMEDIA3, .rshift = 0,}
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia3 Output Channel2",
+	.info = msm_pcm_channel_weight_info,
+	.get = msm_pcm_channel_weight_get,
+	.put = msm_pcm_channel_weight_put,
+	.private_value = (unsigned long)&(struct soc_multi_mixer_control)
+		{.shift = MSM_FRONTEND_DAI_MULTIMEDIA3, .rshift = 1,}
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Channel1",
+	.info = msm_pcm_channel_input_be_info,
+	.get = msm_pcm_channel_input_be_get,
+	.put = msm_pcm_channel_input_be_put,
+	.private_value = (unsigned long)&(mm1_ch1_enum)
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Channel2",
+	.info = msm_pcm_channel_input_be_info,
+	.get = msm_pcm_channel_input_be_get,
+	.put = msm_pcm_channel_input_be_put,
+	.private_value = (unsigned long)&(mm1_ch2_enum)
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Channel3",
+	.info = msm_pcm_channel_input_be_info,
+	.get = msm_pcm_channel_input_be_get,
+	.put = msm_pcm_channel_input_be_put,
+	.private_value = (unsigned long)&(mm1_ch3_enum)
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Channel4",
+	.info = msm_pcm_channel_input_be_info,
+	.get = msm_pcm_channel_input_be_get,
+	.put = msm_pcm_channel_input_be_put,
+	.private_value = (unsigned long)&(mm1_ch4_enum)
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Channel5",
+	.info = msm_pcm_channel_input_be_info,
+	.get = msm_pcm_channel_input_be_get,
+	.put = msm_pcm_channel_input_be_put,
+	.private_value = (unsigned long)&(mm1_ch5_enum)
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Channel6",
+	.info = msm_pcm_channel_input_be_info,
+	.get = msm_pcm_channel_input_be_get,
+	.put = msm_pcm_channel_input_be_put,
+	.private_value = (unsigned long)&(mm1_ch6_enum)
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Channel7",
+	.info = msm_pcm_channel_input_be_info,
+	.get = msm_pcm_channel_input_be_get,
+	.put = msm_pcm_channel_input_be_put,
+	.private_value = (unsigned long)&(mm1_ch7_enum)
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Channel8",
+	.info = msm_pcm_channel_input_be_info,
+	.get = msm_pcm_channel_input_be_get,
+	.put = msm_pcm_channel_input_be_put,
+	.private_value = (unsigned long)&(mm1_ch8_enum)
+	},
+};
+static int msm_ec_ref_ch_get(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = msm_ec_ref_ch;
+	pr_debug("%s: msm_ec_ref_ch = %ld\n", __func__,
+		ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int msm_ec_ref_ch_put(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	msm_ec_ref_ch = ucontrol->value.integer.value[0];
+	pr_debug("%s: msm_ec_ref_ch = %d\n", __func__, msm_ec_ref_ch);
+	adm_num_ec_ref_rx_chans(msm_ec_ref_ch);
+	return 0;
+}
+
+static const char *const ec_ref_ch_text[] = {"Zero", "One", "Two", "Three",
+	"Four", "Five", "Six", "Seven", "Eight"};
+
+static int msm_ec_ref_bit_format_get(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	switch (msm_ec_ref_bit_format) {
+	case SNDRV_PCM_FORMAT_S24_LE:
+		ucontrol->value.integer.value[0] = 2;
+		break;
+	case SNDRV_PCM_FORMAT_S16_LE:
+		ucontrol->value.integer.value[0] = 1;
+		break;
+	default:
+		ucontrol->value.integer.value[0] = 0;
+		break;
+	}
+	pr_debug("%s: msm_ec_ref_bit_format = %ld\n",
+		 __func__, ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int msm_ec_ref_bit_format_put(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	u16 bit_width = 0;
+
+	switch (ucontrol->value.integer.value[0]) {
+	case 2:
+		msm_ec_ref_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+		break;
+	case 1:
+		msm_ec_ref_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+		break;
+	default:
+		msm_ec_ref_bit_format = 0;
+		break;
+	}
+
+	if (msm_ec_ref_bit_format == SNDRV_PCM_FORMAT_S16_LE)
+		bit_width = 16;
+	else if (msm_ec_ref_bit_format == SNDRV_PCM_FORMAT_S24_LE)
+		bit_width = 24;
+
+	pr_debug("%s: msm_ec_ref_bit_format = %d\n",
+		 __func__, msm_ec_ref_bit_format);
+	adm_ec_ref_rx_bit_width(bit_width);
+	return 0;
+}
+
+static char const *ec_ref_bit_format_text[] = {"0", "S16_LE", "S24_LE"};
+
+static int msm_ec_ref_rate_get(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = msm_ec_ref_sampling_rate;
+	pr_debug("%s: msm_ec_ref_sampling_rate = %ld\n",
+		 __func__, ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int msm_ec_ref_rate_put(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	switch (ucontrol->value.integer.value[0]) {
+	case 0:
+		msm_ec_ref_sampling_rate = 0;
+		break;
+	case 1:
+		msm_ec_ref_sampling_rate = 8000;
+		break;
+	case 2:
+		msm_ec_ref_sampling_rate = 16000;
+		break;
+	case 3:
+		msm_ec_ref_sampling_rate = 32000;
+		break;
+	case 4:
+		msm_ec_ref_sampling_rate = 44100;
+		break;
+	case 5:
+		msm_ec_ref_sampling_rate = 48000;
+		break;
+	case 6:
+		msm_ec_ref_sampling_rate = 96000;
+		break;
+	case 7:
+		msm_ec_ref_sampling_rate = 192000;
+		break;
+	case 8:
+		msm_ec_ref_sampling_rate = 384000;
+		break;
+	default:
+		msm_ec_ref_sampling_rate = 48000;
+		break;
+	}
+	pr_debug("%s: msm_ec_ref_sampling_rate = %d\n",
+		 __func__, msm_ec_ref_sampling_rate);
+	adm_ec_ref_rx_sampling_rate(msm_ec_ref_sampling_rate);
+	return 0;
+}
+
+static const char *const ec_ref_rate_text[] = {"0", "8000", "16000",
+	"32000", "44100", "48000", "96000", "192000", "384000"};
+
+static const struct soc_enum msm_route_ec_ref_params_enum[] = {
+	SOC_ENUM_SINGLE_EXT(9, ec_ref_ch_text),
+	SOC_ENUM_SINGLE_EXT(3, ec_ref_bit_format_text),
+	SOC_ENUM_SINGLE_EXT(9, ec_ref_rate_text),
+};
+
+static const struct snd_kcontrol_new ec_ref_param_controls[] = {
+	SOC_ENUM_EXT("EC Reference Channels", msm_route_ec_ref_params_enum[0],
+		msm_ec_ref_ch_get, msm_ec_ref_ch_put),
+	SOC_ENUM_EXT("EC Reference Bit Format", msm_route_ec_ref_params_enum[1],
+		msm_ec_ref_bit_format_get, msm_ec_ref_bit_format_put),
+	SOC_ENUM_EXT("EC Reference SampleRate", msm_route_ec_ref_params_enum[2],
+		msm_ec_ref_rate_get, msm_ec_ref_rate_put),
+};
+
+static int msm_routing_ec_ref_rx_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	pr_debug("%s: ec_ref_rx  = %d", __func__, msm_route_ec_ref_rx);
+	mutex_lock(&routing_lock);
+	ucontrol->value.integer.value[0] = msm_route_ec_ref_rx;
+	mutex_unlock(&routing_lock);
+	return 0;
+}
+
+static int msm_routing_ec_ref_rx_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int ec_ref_port_id;
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+	struct snd_soc_dapm_update *update = NULL;
+
+
+	mutex_lock(&routing_lock);
+	switch (ucontrol->value.integer.value[0]) {
+	case 0:
+		msm_route_ec_ref_rx = 0;
+		ec_ref_port_id = AFE_PORT_INVALID;
+		break;
+	case 1:
+		msm_route_ec_ref_rx = 1;
+		ec_ref_port_id = SLIMBUS_0_RX;
+		break;
+	case 2:
+		msm_route_ec_ref_rx = 2;
+		ec_ref_port_id = AFE_PORT_ID_PRIMARY_MI2S_RX;
+		break;
+	case 3:
+		msm_route_ec_ref_rx = 3;
+		ec_ref_port_id = AFE_PORT_ID_PRIMARY_MI2S_TX;
+		break;
+	case 4:
+		msm_route_ec_ref_rx = 4;
+		ec_ref_port_id = AFE_PORT_ID_SECONDARY_MI2S_TX;
+		break;
+	case 5:
+		msm_route_ec_ref_rx = 5;
+		ec_ref_port_id = AFE_PORT_ID_TERTIARY_MI2S_TX;
+		break;
+	case 6:
+		msm_route_ec_ref_rx = 6;
+		ec_ref_port_id = AFE_PORT_ID_QUATERNARY_MI2S_TX;
+		break;
+	case 7:
+		msm_route_ec_ref_rx = 7;
+		ec_ref_port_id = AFE_PORT_ID_SECONDARY_MI2S_RX;
+		break;
+	case 9:
+		msm_route_ec_ref_rx = 9;
+		ec_ref_port_id = SLIMBUS_5_RX;
+		break;
+	case 10:
+		msm_route_ec_ref_rx = 10;
+		ec_ref_port_id = SLIMBUS_1_TX;
+		break;
+	case 11:
+		msm_route_ec_ref_rx = 11;
+		ec_ref_port_id = AFE_PORT_ID_QUATERNARY_TDM_TX_1;
+		break;
+	case 12:
+		msm_route_ec_ref_rx = 12;
+		ec_ref_port_id = AFE_PORT_ID_QUATERNARY_TDM_RX;
+		break;
+	case 13:
+		msm_route_ec_ref_rx = 13;
+		ec_ref_port_id = AFE_PORT_ID_QUATERNARY_TDM_RX_1;
+		break;
+	case 14:
+		msm_route_ec_ref_rx = 14;
+		ec_ref_port_id = AFE_PORT_ID_QUATERNARY_TDM_RX_2;
+		break;
+	case 15:
+		msm_route_ec_ref_rx = 15;
+		ec_ref_port_id = SLIMBUS_6_RX;
+		break;
+	case 16:
+		msm_route_ec_ref_rx = 16;
+		ec_ref_port_id = AFE_PORT_ID_TERTIARY_MI2S_RX;
+		break;
+	case 17:
+		msm_route_ec_ref_rx = 17;
+		ec_ref_port_id = AFE_PORT_ID_QUATERNARY_MI2S_RX;
+		break;
+	case 18:
+		msm_route_ec_ref_rx = 18;
+		ec_ref_port_id = AFE_PORT_ID_TERTIARY_TDM_TX;
+		break;
+	case 19:
+		msm_route_ec_ref_rx = 19;
+		ec_ref_port_id = AFE_PORT_ID_USB_RX;
+		break;
+	case 20:
+		msm_route_ec_ref_rx = 20;
+		ec_ref_port_id = AFE_PORT_ID_INT0_MI2S_RX;
+		break;
+	case 21:
+		msm_route_ec_ref_rx = 21;
+		ec_ref_port_id = AFE_PORT_ID_INT4_MI2S_RX;
+		break;
+	case 22:
+		msm_route_ec_ref_rx = 22;
+		ec_ref_port_id = AFE_PORT_ID_INT3_MI2S_TX;
+		break;
+	default:
+		msm_route_ec_ref_rx = 0; /* NONE */
+		pr_err("%s EC ref rx %ld not valid\n",
+			__func__, ucontrol->value.integer.value[0]);
+		ec_ref_port_id = AFE_PORT_INVALID;
+		break;
+	}
+	adm_ec_ref_rx_id(ec_ref_port_id);
+	pr_debug("%s: msm_route_ec_ref_rx = %d\n",
+	    __func__, msm_route_ec_ref_rx);
+	mutex_unlock(&routing_lock);
+	snd_soc_dapm_mux_update_power(widget->dapm, kcontrol,
+					msm_route_ec_ref_rx, e, update);
+	return 0;
+}
+
+static const char *const ec_ref_rx[] = { "None", "SLIM_RX", "I2S_RX",
+	"PRI_MI2S_TX", "SEC_MI2S_TX",
+	"TERT_MI2S_TX", "QUAT_MI2S_TX", "SEC_I2S_RX", "PROXY_RX",
+	"SLIM_5_RX", "SLIM_1_TX", "QUAT_TDM_TX_1",
+	"QUAT_TDM_RX_0", "QUAT_TDM_RX_1", "QUAT_TDM_RX_2", "SLIM_6_RX",
+	"TERT_MI2S_RX", "QUAT_MI2S_RX", "TERT_TDM_TX_0", "USB_AUDIO_RX",
+	"INT0_MI2S_RX", "INT4_MI2S_RX", "INT3_MI2S_TX"};
+
+static const struct soc_enum msm_route_ec_ref_rx_enum[] = {
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(ec_ref_rx), ec_ref_rx),
+};
+
+static const struct snd_kcontrol_new ext_ec_ref_mux_ul1 =
+	SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL1 MUX Mux",
+		msm_route_ec_ref_rx_enum[0],
+		msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+
+static const struct snd_kcontrol_new ext_ec_ref_mux_ul2 =
+	SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL2 MUX Mux",
+		msm_route_ec_ref_rx_enum[0],
+		msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+
+static const struct snd_kcontrol_new ext_ec_ref_mux_ul3 =
+	SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL3 MUX Mux",
+		msm_route_ec_ref_rx_enum[0],
+		msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+
+static const struct snd_kcontrol_new ext_ec_ref_mux_ul4 =
+	SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL4 MUX Mux",
+		msm_route_ec_ref_rx_enum[0],
+		msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+
+static const struct snd_kcontrol_new ext_ec_ref_mux_ul5 =
+	SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL5 MUX Mux",
+		msm_route_ec_ref_rx_enum[0],
+		msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+
+static const struct snd_kcontrol_new ext_ec_ref_mux_ul6 =
+	SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL6 MUX Mux",
+		msm_route_ec_ref_rx_enum[0],
+		msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+
+static const struct snd_kcontrol_new ext_ec_ref_mux_ul8 =
+	SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL8 MUX Mux",
+		msm_route_ec_ref_rx_enum[0],
+		msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+
+static const struct snd_kcontrol_new ext_ec_ref_mux_ul9 =
+	SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL9 MUX Mux",
+		msm_route_ec_ref_rx_enum[0],
+		msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+
+static const struct snd_kcontrol_new ext_ec_ref_mux_ul16 =
+	SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL16 MUX Mux",
+		msm_route_ec_ref_rx_enum[0],
+		msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+
+static const struct snd_kcontrol_new ext_ec_ref_mux_ul17 =
+	SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL17 MUX Mux",
+		msm_route_ec_ref_rx_enum[0],
+		msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+
+static const struct snd_kcontrol_new ext_ec_ref_mux_ul18 =
+	SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL18 MUX Mux",
+		msm_route_ec_ref_rx_enum[0],
+		msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+
+static const struct snd_kcontrol_new ext_ec_ref_mux_ul19 =
+	SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL19 MUX Mux",
+		msm_route_ec_ref_rx_enum[0],
+		msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+
+static int msm_routing_ext_ec_get(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
+{
+	pr_debug("%s: ext_ec_ref_rx  = %x\n", __func__, msm_route_ext_ec_ref);
+
+	mutex_lock(&routing_lock);
+	ucontrol->value.integer.value[0] = msm_route_ext_ec_ref;
+	mutex_unlock(&routing_lock);
+	return 0;
+}
+
+static int msm_routing_ext_ec_put(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	int mux = ucontrol->value.enumerated.item[0];
+	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+	int ret = 1;
+	bool state = true;
+	uint16_t ext_ec_ref_port_id;
+	struct snd_soc_dapm_update *update = NULL;
+
+	if (mux >= e->items) {
+		pr_err("%s: Invalid mux value %d\n", __func__, mux);
+		return -EINVAL;
+	}
+
+	mutex_lock(&routing_lock);
+	msm_route_ext_ec_ref = ucontrol->value.integer.value[0];
+
+	switch (msm_route_ext_ec_ref) {
+	case EXT_EC_REF_PRI_MI2S_TX:
+		ext_ec_ref_port_id = AFE_PORT_ID_PRIMARY_MI2S_TX;
+		break;
+	case EXT_EC_REF_SEC_MI2S_TX:
+		ext_ec_ref_port_id = AFE_PORT_ID_SECONDARY_MI2S_TX;
+		break;
+	case EXT_EC_REF_TERT_MI2S_TX:
+		ext_ec_ref_port_id = AFE_PORT_ID_TERTIARY_MI2S_TX;
+		break;
+	case EXT_EC_REF_QUAT_MI2S_TX:
+		ext_ec_ref_port_id = AFE_PORT_ID_QUATERNARY_MI2S_TX;
+		break;
+	case EXT_EC_REF_QUIN_MI2S_TX:
+		ext_ec_ref_port_id = AFE_PORT_ID_QUINARY_MI2S_TX;
+		break;
+	case EXT_EC_REF_SLIM_1_TX:
+		ext_ec_ref_port_id = SLIMBUS_1_TX;
+		break;
+	case EXT_EC_REF_NONE:
+	default:
+		ext_ec_ref_port_id = AFE_PORT_INVALID;
+		state = false;
+		break;
+	}
+
+	pr_debug("%s: val = %d ext_ec_ref_port_id = 0x%0x state = %d\n",
+		 __func__, msm_route_ext_ec_ref, ext_ec_ref_port_id, state);
+
+	if (!voc_set_ext_ec_ref_port_id(ext_ec_ref_port_id, state)) {
+		mutex_unlock(&routing_lock);
+		snd_soc_dapm_mux_update_power(widget->dapm, kcontrol, mux, e, update);
+	} else {
+		ret = -EINVAL;
+		mutex_unlock(&routing_lock);
+	}
+	return ret;
+}
+
+static const char * const ext_ec_ref_rx[] = {"NONE", "PRI_MI2S_TX",
+					"SEC_MI2S_TX", "TERT_MI2S_TX",
+					"QUAT_MI2S_TX", "QUIN_MI2S_TX",
+					"SLIM_1_TX"};
+
+static const struct soc_enum msm_route_ext_ec_ref_rx_enum[] = {
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(ext_ec_ref_rx), ext_ec_ref_rx),
+};
+
+static const struct snd_kcontrol_new voc_ext_ec_mux =
+	SOC_DAPM_ENUM_EXT("VOC_EXT_EC MUX Mux", msm_route_ext_ec_ref_rx_enum[0],
+			  msm_routing_ext_ec_get, msm_routing_ext_ec_put);
+
+
+static const struct snd_kcontrol_new pri_i2s_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_PRI_I2S_RX ,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia17", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia18", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new sec_i2s_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SEC_I2S_RX ,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia17", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia18", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new spdif_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SPDIF_RX ,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SPDIF_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_SPDIF_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_SPDIF_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_SPDIF_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_SPDIF_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_SPDIF_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_SPDIF_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_SPDIF_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_SPDIF_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_SPDIF_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_SPDIF_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_SPDIF_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_SPDIF_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_SPDIF_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SPDIF_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia17", MSM_BACKEND_DAI_SPDIF_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia18", MSM_BACKEND_DAI_SPDIF_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_SPDIF_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SPDIF_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new slimbus_2_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new slimbus_5_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SLIMBUS_5_RX ,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SLIMBUS_5_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_SLIMBUS_5_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_SLIMBUS_5_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_SLIMBUS_5_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_SLIMBUS_5_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_SLIMBUS_5_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_SLIMBUS_5_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_SLIMBUS_5_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_SLIMBUS_5_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_SLIMBUS_5_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_SLIMBUS_5_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_SLIMBUS_5_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_SLIMBUS_5_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_SLIMBUS_5_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SLIMBUS_5_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia17", MSM_BACKEND_DAI_SLIMBUS_5_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia18", MSM_BACKEND_DAI_SLIMBUS_5_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_SLIMBUS_5_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SLIMBUS_5_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new slimbus_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SLIMBUS_0_RX ,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia17", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia18", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia23", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA23, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia24", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA24, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia25", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA25, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new mi2s_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_MI2S_RX ,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia17", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia18", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new quaternary_mi2s_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX ,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia17", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia18", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new quinary_mi2s_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_QUINARY_MI2S_RX ,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia17", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia18", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new tertiary_mi2s_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_TERTIARY_MI2S_RX ,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia17", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia18", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new secondary_mi2s_rx2_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_SECONDARY_MI2S_RX_SD1,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new secondary_mi2s_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SECONDARY_MI2S_RX ,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia17", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia18", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new primary_mi2s_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_PRI_MI2S_RX ,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia17", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia18", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new int0_mi2s_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new int4_mi2s_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new hdmi_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia17", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia18", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia23", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA23, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia24", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA24, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia25", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA25, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new display_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+	/* incall music delivery mixer */
+static const struct snd_kcontrol_new incall_music_delivery_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_VOICE_PLAYBACK_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_VOICE_PLAYBACK_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_VOICE_PLAYBACK_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_VOICE_PLAYBACK_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new incall_music2_delivery_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_VOICE2_PLAYBACK_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_VOICE2_PLAYBACK_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_VOICE2_PLAYBACK_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_VOICE2_PLAYBACK_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new slimbus_4_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SLIMBUS_4_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SLIMBUS_4_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_SLIMBUS_4_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_SLIMBUS_4_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new slimbus_6_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia23", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA23, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia24", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA24, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia25", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA25, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new slimbus_7_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia23", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA23, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia24", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA24, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia25", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA25, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new usb_audio_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_USB_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_USB_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_USB_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_USB_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_USB_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_USB_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_USB_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_USB_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_USB_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_USB_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_USB_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_USB_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_USB_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_USB_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_USB_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_USB_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_USB_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new int_bt_sco_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia17", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia18", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new int_bt_a2dp_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_INT_BT_A2DP_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_INT_BT_A2DP_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_INT_BT_A2DP_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_INT_BT_A2DP_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_INT_BT_A2DP_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_INT_BT_A2DP_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_INT_BT_A2DP_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_INT_BT_A2DP_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_INT_BT_A2DP_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_INT_BT_A2DP_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_INT_BT_A2DP_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_INT_BT_A2DP_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_INT_BT_A2DP_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_INT_BT_A2DP_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_INT_BT_A2DP_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_INT_BT_A2DP_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_INT_BT_A2DP_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new int_fm_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_INT_FM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_INT_FM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_INT_FM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_INT_FM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_INT_FM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_INT_FM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_INT_FM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_INT_FM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_INT_FM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_INT_FM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_INT_FM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_INT_FM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_INT_FM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_INT_FM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_INT_FM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_INT_FM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia17", MSM_BACKEND_DAI_INT_FM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia18", MSM_BACKEND_DAI_INT_FM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_INT_FM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_INT_FM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new afe_pcm_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia17", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia18", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new auxpcm_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia17", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia18", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new sec_auxpcm_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia17", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia18", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new tert_auxpcm_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new quat_auxpcm_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new pri_tdm_rx_0_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new pri_tdm_rx_1_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new pri_tdm_rx_2_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new pri_tdm_rx_3_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new pri_tdm_tx_0_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new sec_tdm_rx_0_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new sec_tdm_rx_1_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new sec_tdm_rx_2_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new sec_tdm_rx_3_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new sec_tdm_tx_0_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new tert_tdm_rx_0_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_TERT_TDM_RX_0 ,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new tert_tdm_tx_0_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new tert_tdm_rx_1_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_TERT_TDM_RX_1 ,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new tert_tdm_rx_2_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_TERT_TDM_RX_2 ,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new tert_tdm_rx_3_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_TERT_TDM_RX_3 ,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new tert_tdm_rx_4_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new quat_tdm_rx_0_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_QUAT_TDM_RX_0 ,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia20", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new quat_tdm_tx_0_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new quat_tdm_rx_1_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_QUAT_TDM_RX_1 ,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia20", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new quat_tdm_rx_2_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_QUAT_TDM_RX_2 ,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia20", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new quat_tdm_rx_3_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_QUAT_TDM_RX_3 ,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia20", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new mmul1_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_TX", MSM_BACKEND_DAI_PRI_I2S_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_MI2S_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INT2_MI2S_TX", MSM_BACKEND_DAI_INT2_MI2S_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_SECONDARY_MI2S_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_AUXPCM_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_AUXPCM_UL_TX", MSM_BACKEND_DAI_TERT_AUXPCM_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_AUXPCM_UL_TX", MSM_BACKEND_DAI_QUAT_AUXPCM_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("VOC_REC_DL", MSM_BACKEND_DAI_INCALL_RECORD_RX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("VOC_REC_UL", MSM_BACKEND_DAI_INCALL_RECORD_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SLIM_4_TX", MSM_BACKEND_DAI_SLIMBUS_4_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SLIM_6_TX", MSM_BACKEND_DAI_SLIMBUS_6_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUIN_MI2S_TX", MSM_BACKEND_DAI_QUINARY_MI2S_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_TX_1,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_TX_2,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_TX_3,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_TX_1,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_TX_2,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_TX_3,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_1", MSM_BACKEND_DAI_TERT_TDM_TX_1,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_2", MSM_BACKEND_DAI_TERT_TDM_TX_2,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_3", MSM_BACKEND_DAI_TERT_TDM_TX_3,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_TX_1,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_TX_2,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_TX_3,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SLIM_7_TX", MSM_BACKEND_DAI_SLIMBUS_7_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SLIM_8_TX", MSM_BACKEND_DAI_SLIMBUS_8_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("USB_AUDIO_TX", MSM_BACKEND_DAI_USB_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new mmul2_mixer_controls[] = {
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_SECONDARY_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INT2_MI2S_TX", MSM_BACKEND_DAI_INT2_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SLIM_6_TX", MSM_BACKEND_DAI_SLIMBUS_6_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUIN_MI2S_TX", MSM_BACKEND_DAI_QUINARY_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_1", MSM_BACKEND_DAI_TERT_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_2", MSM_BACKEND_DAI_TERT_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_3", MSM_BACKEND_DAI_TERT_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SLIM_8_TX", MSM_BACKEND_DAI_SLIMBUS_8_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("USB_AUDIO_TX", MSM_BACKEND_DAI_USB_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new mmul3_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_TX", MSM_BACKEND_DAI_AUXPCM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_TX", MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_AUX_PCM_TX", MSM_BACKEND_DAI_TERT_AUXPCM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_AUX_PCM_TX", MSM_BACKEND_DAI_QUAT_AUXPCM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INT2_MI2S_TX", MSM_BACKEND_DAI_INT2_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_1", MSM_BACKEND_DAI_TERT_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_2", MSM_BACKEND_DAI_TERT_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_3", MSM_BACKEND_DAI_TERT_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new mmul4_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("VOC_REC_DL", MSM_BACKEND_DAI_INCALL_RECORD_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("VOC_REC_UL", MSM_BACKEND_DAI_INCALL_RECORD_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_1", MSM_BACKEND_DAI_TERT_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_2", MSM_BACKEND_DAI_TERT_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_3", MSM_BACKEND_DAI_TERT_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_SECONDARY_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INT2_MI2S_TX", MSM_BACKEND_DAI_INT2_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("USB_AUDIO_TX", MSM_BACKEND_DAI_USB_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new mmul5_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_AUXPCM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_TX", MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_AUX_PCM_TX", MSM_BACKEND_DAI_TERT_AUXPCM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_AUX_PCM_TX", MSM_BACKEND_DAI_QUAT_AUXPCM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INT2_MI2S_TX", MSM_BACKEND_DAI_INT2_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_1", MSM_BACKEND_DAI_TERT_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_2", MSM_BACKEND_DAI_TERT_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_3", MSM_BACKEND_DAI_TERT_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_SECONDARY_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SLIM_7_TX", MSM_BACKEND_DAI_SLIMBUS_7_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SLIM_8_TX", MSM_BACKEND_DAI_SLIMBUS_8_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("USB_AUDIO_TX", MSM_BACKEND_DAI_USB_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new mmul6_mixer_controls[] = {
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_SECONDARY_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INT2_MI2S_TX", MSM_BACKEND_DAI_INT2_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_AUXPCM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_AUXPCM_UL_TX", MSM_BACKEND_DAI_TERT_AUXPCM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_AUXPCM_UL_TX", MSM_BACKEND_DAI_QUAT_AUXPCM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_1", MSM_BACKEND_DAI_TERT_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_2", MSM_BACKEND_DAI_TERT_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_3", MSM_BACKEND_DAI_TERT_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("USB_AUDIO_TX", MSM_BACKEND_DAI_USB_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new mmul8_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_SECONDARY_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INT2_MI2S_TX", MSM_BACKEND_DAI_INT2_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("VOC_REC_DL", MSM_BACKEND_DAI_INCALL_RECORD_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("VOC_REC_UL", MSM_BACKEND_DAI_INCALL_RECORD_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SLIM_6_TX", MSM_BACKEND_DAI_SLIMBUS_6_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_1", MSM_BACKEND_DAI_TERT_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_2", MSM_BACKEND_DAI_TERT_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_3", MSM_BACKEND_DAI_TERT_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SLIM_7_TX", MSM_BACKEND_DAI_SLIMBUS_7_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("USB_AUDIO_TX", MSM_BACKEND_DAI_USB_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new mmul16_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_SECONDARY_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INT2_MI2S_TX", MSM_BACKEND_DAI_INT2_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("VOC_REC_DL", MSM_BACKEND_DAI_INCALL_RECORD_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("VOC_REC_UL", MSM_BACKEND_DAI_INCALL_RECORD_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SLIM_6_TX", MSM_BACKEND_DAI_SLIMBUS_6_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_1", MSM_BACKEND_DAI_TERT_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_2", MSM_BACKEND_DAI_TERT_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_3", MSM_BACKEND_DAI_TERT_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SLIM_7_TX", MSM_BACKEND_DAI_SLIMBUS_7_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("USB_AUDIO_TX", MSM_BACKEND_DAI_USB_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_TX", MSM_BACKEND_DAI_AUXPCM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_TX", MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_AUX_PCM_TX", MSM_BACKEND_DAI_QUAT_AUXPCM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new mmul9_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("VOC_REC_DL", MSM_BACKEND_DAI_INCALL_RECORD_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("VOC_REC_UL", MSM_BACKEND_DAI_INCALL_RECORD_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SLIM_6_TX", MSM_BACKEND_DAI_SLIMBUS_6_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_1", MSM_BACKEND_DAI_TERT_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_2", MSM_BACKEND_DAI_TERT_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_3", MSM_BACKEND_DAI_TERT_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new mmul17_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("VOC_REC_DL", MSM_BACKEND_DAI_INCALL_RECORD_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("VOC_REC_UL", MSM_BACKEND_DAI_INCALL_RECORD_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new mmul18_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("VOC_REC_DL", MSM_BACKEND_DAI_INCALL_RECORD_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("VOC_REC_UL", MSM_BACKEND_DAI_INCALL_RECORD_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new mmul19_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("VOC_REC_DL", MSM_BACKEND_DAI_INCALL_RECORD_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("VOC_REC_UL", MSM_BACKEND_DAI_INCALL_RECORD_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new mmul20_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_SECONDARY_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_1", MSM_BACKEND_DAI_TERT_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_2", MSM_BACKEND_DAI_TERT_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_3", MSM_BACKEND_DAI_TERT_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new mmul21_mixer_controls[] = {
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_AUXPCM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_1", MSM_BACKEND_DAI_TERT_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_2", MSM_BACKEND_DAI_TERT_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_3", MSM_BACKEND_DAI_TERT_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new mmul27_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA27, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA27, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA27, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA27, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new pri_rx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode1", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode2", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new sec_i2s_rx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new sec_mi2s_rx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode1", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode2", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new slimbus_rx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_SLIMBUS_0_RX ,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_SLIMBUS_0_RX ,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_SLIMBUS_0_RX ,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_SLIMBUS_0_RX ,
+	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_SLIMBUS_0_RX ,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode1", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode2", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new slimbus_6_rx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_SLIMBUS_6_RX ,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_SLIMBUS_6_RX ,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_SLIMBUS_6_RX ,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_SLIMBUS_6_RX ,
+	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_SLIMBUS_6_RX ,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode1", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode2", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new usb_audio_rx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_USB_RX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_USB_RX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_USB_RX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_USB_RX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_USB_RX,
+	MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_USB_RX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_USB_RX,
+	MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_USB_RX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_USB_RX,
+	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_USB_RX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode1", MSM_BACKEND_DAI_USB_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode2", MSM_BACKEND_DAI_USB_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new bt_sco_rx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_INT_BT_SCO_RX ,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_INT_BT_SCO_RX ,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_INT_BT_SCO_RX ,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_INT_BT_SCO_RX ,
+	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_INT_BT_SCO_RX ,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode1", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode2", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new mi2s_rx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode1", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode2", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new pri_mi2s_rx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode1", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode2", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new int0_mi2s_rx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode1", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode2", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new int4_mi2s_rx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode1", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode2", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new tert_mi2s_rx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode1", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode2", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new quat_mi2s_rx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode1", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode2", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new quin_mi2s_rx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode1", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode2", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new afe_pcm_rx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode1", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode2", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new aux_pcm_rx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode1", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode2", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new sec_aux_pcm_rx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode1", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode2", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new tert_aux_pcm_rx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode1", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode2", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new quat_aux_pcm_rx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode1", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode2", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new hdmi_rx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode1", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode2", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new slimbus_7_rx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode1", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode2", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new slimbus_8_rx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_SLIMBUS_8_RX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_SLIMBUS_8_RX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_SLIMBUS_8_RX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_SLIMBUS_8_RX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_SLIMBUS_8_RX,
+	MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_SLIMBUS_8_RX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_SLIMBUS_8_RX,
+	MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_SLIMBUS_8_RX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_SLIMBUS_8_RX,
+	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_SLIMBUS_8_RX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode1", MSM_BACKEND_DAI_SLIMBUS_8_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoiceMMode2", MSM_BACKEND_DAI_SLIMBUS_8_RX,
+	MSM_FRONTEND_DAI_VOICEMMODE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new quat_tdm_rx_2_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("VoiceMMode1", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+	MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new stub_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_EXTPROC_RX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_EXTPROC_RX,
+	MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_EXTPROC_RX,
+	MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+};
+
+static const struct snd_kcontrol_new slimbus_1_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_SLIMBUS_1_RX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_SLIMBUS_1_RX,
+	MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_SLIMBUS_1_RX,
+	MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+};
+
+static const struct snd_kcontrol_new slimbus_3_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_SLIMBUS_3_RX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_SLIMBUS_3_RX,
+	MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_SLIMBUS_3_RX,
+	MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+};
+
+static const struct snd_kcontrol_new tx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_TX_Voice", MSM_BACKEND_DAI_PRI_I2S_TX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("MI2S_TX_Voice", MSM_BACKEND_DAI_MI2S_TX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX_Voice", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX_Voice",
+	MSM_BACKEND_DAI_INT_BT_SCO_TX, MSM_FRONTEND_DAI_CS_VOICE, 1, 0,
+	msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX_Voice", MSM_BACKEND_DAI_AFE_PCM_TX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_TX_Voice", MSM_BACKEND_DAI_AUXPCM_TX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_TX_Voice", MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("TERT_AUX_PCM_TX_Voice", MSM_BACKEND_DAI_TERT_AUXPCM_TX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QUAT_AUX_PCM_TX_Voice", MSM_BACKEND_DAI_QUAT_AUXPCM_TX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("PRI_MI2S_TX_Voice", MSM_BACKEND_DAI_PRI_MI2S_TX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX_Voice", MSM_BACKEND_DAI_SECONDARY_MI2S_TX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX_Voice", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SLIM_7_TX_Voice", MSM_BACKEND_DAI_SLIMBUS_7_TX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SLIM_8_TX_Voice", MSM_BACKEND_DAI_SLIMBUS_8_TX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("USB_AUDIO_TX_Voice", MSM_BACKEND_DAI_USB_TX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new tx_voice2_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_TX_Voice2", MSM_BACKEND_DAI_PRI_I2S_TX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("MI2S_TX_Voice2", MSM_BACKEND_DAI_MI2S_TX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX_Voice2", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX_Voice2",
+	MSM_BACKEND_DAI_INT_BT_SCO_TX, MSM_FRONTEND_DAI_VOICE2, 1, 0,
+	msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX_Voice2", MSM_BACKEND_DAI_AFE_PCM_TX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_TX_Voice2", MSM_BACKEND_DAI_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_TX_Voice2", MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("TERT_AUX_PCM_TX_Voice2", MSM_BACKEND_DAI_TERT_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QUAT_AUX_PCM_TX_Voice2", MSM_BACKEND_DAI_QUAT_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("PRI_MI2S_TX_Voice2", MSM_BACKEND_DAI_PRI_MI2S_TX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX_Voice2", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SLIM_7_TX_Voice2", MSM_BACKEND_DAI_SLIMBUS_7_TX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SLIM_8_TX_Voice2", MSM_BACKEND_DAI_SLIMBUS_8_TX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("USB_AUDIO_TX_Voice2", MSM_BACKEND_DAI_USB_TX,
+	MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new tx_volte_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_TX_VoLTE", MSM_BACKEND_DAI_PRI_I2S_TX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX_VoLTE", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX_VoLTE",
+	MSM_BACKEND_DAI_INT_BT_SCO_TX, MSM_FRONTEND_DAI_VOLTE, 1, 0,
+	msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX_VoLTE", MSM_BACKEND_DAI_AFE_PCM_TX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_TX_VoLTE", MSM_BACKEND_DAI_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_TX_VoLTE", MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("TERT_AUX_PCM_TX_VoLTE", MSM_BACKEND_DAI_TERT_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QUAT_AUX_PCM_TX_VoLTE", MSM_BACKEND_DAI_QUAT_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("MI2S_TX_VoLTE", MSM_BACKEND_DAI_MI2S_TX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("PRI_MI2S_TX_VoLTE", MSM_BACKEND_DAI_PRI_MI2S_TX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX_VoLTE", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SLIM_7_TX_VoLTE", MSM_BACKEND_DAI_SLIMBUS_7_TX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SLIM_8_TX_VoLTE", MSM_BACKEND_DAI_SLIMBUS_8_TX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("USB_AUDIO_TX_VoLTE", MSM_BACKEND_DAI_USB_TX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new tx_vowlan_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_TX_VoWLAN", MSM_BACKEND_DAI_PRI_I2S_TX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX_VoWLAN", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX_VoWLAN",
+	MSM_BACKEND_DAI_INT_BT_SCO_TX, MSM_FRONTEND_DAI_VOWLAN, 1, 0,
+	msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX_VoWLAN", MSM_BACKEND_DAI_AFE_PCM_TX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_TX_VoWLAN", MSM_BACKEND_DAI_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_TX_VoWLAN", MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("TERT_AUX_PCM_TX_VoWLAN", MSM_BACKEND_DAI_TERT_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QUAT_AUX_PCM_TX_VoWLAN", MSM_BACKEND_DAI_QUAT_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("MI2S_TX_VoWLAN", MSM_BACKEND_DAI_MI2S_TX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("PRI_MI2S_TX_VoWLAN", MSM_BACKEND_DAI_PRI_MI2S_TX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX_VoWLAN", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SLIM_7_TX_VoWLAN", MSM_BACKEND_DAI_SLIMBUS_7_TX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SLIM_8_TX_VoWLAN", MSM_BACKEND_DAI_SLIMBUS_8_TX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("USB_AUDIO_TX_VoWLAN", MSM_BACKEND_DAI_USB_TX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new tx_voicemmode1_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_TX_MMode1", MSM_BACKEND_DAI_PRI_I2S_TX,
+	MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("MI2S_TX_MMode1", MSM_BACKEND_DAI_MI2S_TX,
+	MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX_MMode1",
+	MSM_BACKEND_DAI_SLIMBUS_0_TX, MSM_FRONTEND_DAI_VOICEMMODE1, 1,
+	0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("INT_BT_SCO_TX_MMode1",
+	MSM_BACKEND_DAI_INT_BT_SCO_TX, MSM_FRONTEND_DAI_VOICEMMODE1, 1,
+	0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX_MMode1",
+	MSM_BACKEND_DAI_AFE_PCM_TX, MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0,
+	msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_TX_MMode1",
+	MSM_BACKEND_DAI_AUXPCM_TX, MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0,
+	msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_TX_MMode1",
+	MSM_BACKEND_DAI_SEC_AUXPCM_TX, MSM_FRONTEND_DAI_VOICEMMODE1, 1,
+	0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("TERT_AUX_PCM_TX_MMode1",
+	MSM_BACKEND_DAI_TERT_AUXPCM_TX, MSM_FRONTEND_DAI_VOICEMMODE1, 1,
+	0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QUAT_AUX_PCM_TX_MMode1",
+	MSM_BACKEND_DAI_QUAT_AUXPCM_TX, MSM_FRONTEND_DAI_VOICEMMODE1, 1,
+	0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("PRI_MI2S_TX_MMode1",
+	MSM_BACKEND_DAI_PRI_MI2S_TX, MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0,
+	msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX_MMode1",
+	MSM_BACKEND_DAI_TERTIARY_MI2S_TX, MSM_FRONTEND_DAI_VOICEMMODE1,
+	1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX_MMode1",
+	MSM_BACKEND_DAI_INT3_MI2S_TX, MSM_FRONTEND_DAI_VOICEMMODE1,
+	1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SLIM_7_TX_MMode1",
+	MSM_BACKEND_DAI_SLIMBUS_7_TX, MSM_FRONTEND_DAI_VOICEMMODE1, 1,
+	0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SLIM_8_TX_MMode1",
+	MSM_BACKEND_DAI_SLIMBUS_8_TX, MSM_FRONTEND_DAI_VOICEMMODE1, 1,
+	0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("USB_AUDIO_TX_MMode1", MSM_BACKEND_DAI_USB_TX,
+	MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0_MMode1",
+	MSM_BACKEND_DAI_QUAT_TDM_TX_0, MSM_FRONTEND_DAI_VOICEMMODE1,
+	1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new tx_voicemmode2_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_TX_MMode2", MSM_BACKEND_DAI_PRI_I2S_TX,
+	MSM_FRONTEND_DAI_VOICEMMODE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("MI2S_TX_MMode2", MSM_BACKEND_DAI_MI2S_TX,
+	MSM_FRONTEND_DAI_VOICEMMODE2, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX_MMode2",
+	MSM_BACKEND_DAI_SLIMBUS_0_TX, MSM_FRONTEND_DAI_VOICEMMODE2, 1,
+	0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("INT_BT_SCO_TX_MMode2",
+	MSM_BACKEND_DAI_INT_BT_SCO_TX, MSM_FRONTEND_DAI_VOICEMMODE2, 1,
+	0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX_MMode2",
+	MSM_BACKEND_DAI_AFE_PCM_TX, MSM_FRONTEND_DAI_VOICEMMODE2, 1, 0,
+	msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_TX_MMode2",
+	MSM_BACKEND_DAI_AUXPCM_TX, MSM_FRONTEND_DAI_VOICEMMODE2, 1, 0,
+	msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_TX_MMode2",
+	MSM_BACKEND_DAI_SEC_AUXPCM_TX, MSM_FRONTEND_DAI_VOICEMMODE2, 1,
+	0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("TERT_AUX_PCM_TX_MMode2",
+	MSM_BACKEND_DAI_TERT_AUXPCM_TX, MSM_FRONTEND_DAI_VOICEMMODE2, 1,
+	0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QUAT_AUX_PCM_TX_MMode2",
+	MSM_BACKEND_DAI_QUAT_AUXPCM_TX, MSM_FRONTEND_DAI_VOICEMMODE2, 1,
+	0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("PRI_MI2S_TX_MMode2",
+	MSM_BACKEND_DAI_PRI_MI2S_TX, MSM_FRONTEND_DAI_VOICEMMODE2, 1, 0,
+	msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX_MMode2",
+	MSM_BACKEND_DAI_TERTIARY_MI2S_TX, MSM_FRONTEND_DAI_VOICEMMODE2,
+	1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX_MMode2",
+	MSM_BACKEND_DAI_INT3_MI2S_TX, MSM_FRONTEND_DAI_VOICEMMODE2,
+	1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SLIM_7_TX_MMode2",
+	MSM_BACKEND_DAI_SLIMBUS_7_TX, MSM_FRONTEND_DAI_VOICEMMODE2, 1,
+	0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SLIM_8_TX_MMode2",
+	MSM_BACKEND_DAI_SLIMBUS_8_TX, MSM_FRONTEND_DAI_VOICEMMODE2, 1,
+	0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("USB_AUDIO_TX_MMode2",
+	MSM_BACKEND_DAI_USB_TX, MSM_FRONTEND_DAI_VOICEMMODE2, 1,
+	0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new tx_voip_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_TX_Voip", MSM_BACKEND_DAI_PRI_I2S_TX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("MI2S_TX_Voip", MSM_BACKEND_DAI_MI2S_TX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX_Voip", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX_Voip", MSM_BACKEND_DAI_INT_BT_SCO_TX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX_Voip", MSM_BACKEND_DAI_AFE_PCM_TX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_TX_Voip", MSM_BACKEND_DAI_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_TX_Voip", MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("TERT_AUX_PCM_TX_Voip", MSM_BACKEND_DAI_TERT_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QUAT_AUX_PCM_TX_Voip", MSM_BACKEND_DAI_QUAT_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("PRI_MI2S_TX_Voip", MSM_BACKEND_DAI_PRI_MI2S_TX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX_Voip", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX_Voip", MSM_BACKEND_DAI_INT3_MI2S_TX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SLIM_7_TX_Voip", MSM_BACKEND_DAI_SLIMBUS_7_TX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SLIM_8_TX_Voip", MSM_BACKEND_DAI_SLIMBUS_8_TX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("USB_AUDIO_TX_Voip", MSM_BACKEND_DAI_USB_TX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new tx_voice_stub_mixer_controls[] = {
+	SOC_SINGLE_EXT("STUB_TX_HL", MSM_BACKEND_DAI_EXTPROC_TX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("STUB_1_TX_HL", MSM_BACKEND_DAI_EXTPROC_EC_TX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_MI2S_TX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("TERT_AUXPCM_UL_TX", MSM_BACKEND_DAI_TERT_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("QUAT_AUXPCM_UL_TX", MSM_BACKEND_DAI_QUAT_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("SLIM_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("SLIM_7_TX", MSM_BACKEND_DAI_SLIMBUS_7_TX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("SLIM_8_TX", MSM_BACKEND_DAI_SLIMBUS_8_TX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+};
+
+static const struct snd_kcontrol_new tx_voice2_stub_mixer_controls[] = {
+	SOC_SINGLE_EXT("STUB_TX_HL", MSM_BACKEND_DAI_EXTPROC_TX,
+	MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+	MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("STUB_1_TX_HL", MSM_BACKEND_DAI_EXTPROC_EC_TX,
+	MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("TERT_AUXPCM_UL_TX", MSM_BACKEND_DAI_TERT_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("QUAT_AUXPCM_UL_TX", MSM_BACKEND_DAI_QUAT_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+	MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("SLIM_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX,
+	MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX,
+	MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+	MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+	MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("SLIM_7_TX", MSM_BACKEND_DAI_SLIMBUS_7_TX,
+	MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("SLIM_8_TX", MSM_BACKEND_DAI_SLIMBUS_8_TX,
+	MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+};
+
+static const struct snd_kcontrol_new tx_volte_stub_mixer_controls[] = {
+	SOC_SINGLE_EXT("STUB_TX_HL", MSM_BACKEND_DAI_EXTPROC_TX,
+	MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+	MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("STUB_1_TX_HL", MSM_BACKEND_DAI_EXTPROC_EC_TX,
+	MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("TERT_AUXPCM_UL_TX", MSM_BACKEND_DAI_TERT_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("QUAT_AUXPCM_UL_TX", MSM_BACKEND_DAI_QUAT_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+	MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("SLIM_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX,
+	MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX,
+	MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+	MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+	MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("SLIM_7_TX", MSM_BACKEND_DAI_SLIMBUS_7_TX,
+	MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("SLIM_8_TX", MSM_BACKEND_DAI_SLIMBUS_8_TX,
+	MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+};
+
+static const struct snd_kcontrol_new tx_qchat_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_TX_QCHAT", MSM_BACKEND_DAI_PRI_I2S_TX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX_QCHAT", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX_QCHAT",
+	MSM_BACKEND_DAI_INT_BT_SCO_TX, MSM_FRONTEND_DAI_QCHAT, 1, 0,
+	msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX_QCHAT", MSM_BACKEND_DAI_AFE_PCM_TX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_TX_QCHAT", MSM_BACKEND_DAI_AUXPCM_TX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_TX_QCHAT", MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("TERT_AUX_PCM_TX_QCHAT", MSM_BACKEND_DAI_TERT_AUXPCM_TX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QUAT_AUX_PCM_TX_QCHAT", MSM_BACKEND_DAI_QUAT_AUXPCM_TX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("MI2S_TX_QCHAT", MSM_BACKEND_DAI_MI2S_TX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("PRI_MI2S_TX_QCHAT", MSM_BACKEND_DAI_PRI_MI2S_TX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX_QCHAT", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX_QCHAT", MSM_BACKEND_DAI_INT3_MI2S_TX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SLIM_7_TX_QCHAT", MSM_BACKEND_DAI_SLIMBUS_7_TX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SLIM_8_TX_QCHAT", MSM_BACKEND_DAI_SLIMBUS_8_TX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("USB_AUDIO_TX_QCHAT", MSM_BACKEND_DAI_USB_TX,
+	MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new int0_mi2s_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_BACKEND_DAI_TERTIARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_BACKEND_DAI_INT3_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_BACKEND_DAI_INT_FM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_7_TX", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_BACKEND_DAI_SLIMBUS_7_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_8_TX", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_BACKEND_DAI_SLIMBUS_8_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new int4_mi2s_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_BACKEND_DAI_TERTIARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_BACKEND_DAI_INT3_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_BACKEND_DAI_INT_FM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_7_TX", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_BACKEND_DAI_SLIMBUS_7_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_8_TX", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_BACKEND_DAI_SLIMBUS_8_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new sbus_0_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_BACKEND_DAI_INT_FM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_BACKEND_DAI_SLIMBUS_0_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_BACKEND_DAI_SLIMBUS_1_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_7_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_BACKEND_DAI_SLIMBUS_7_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_8_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_BACKEND_DAI_SLIMBUS_8_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_BACKEND_DAI_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_AUXPCM_UL_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_BACKEND_DAI_TERT_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_AUXPCM_UL_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_BACKEND_DAI_QUAT_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_BACKEND_DAI_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_BACKEND_DAI_TERTIARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_MI2S_RX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_BACKEND_DAI_PRI_MI2S_RX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_RX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_BACKEND_DAI_SECONDARY_MI2S_RX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_RX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_BACKEND_DAI_TERTIARY_MI2S_RX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_RX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new aux_pcm_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_BACKEND_DAI_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_BACKEND_DAI_SLIMBUS_0_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_BACKEND_DAI_SLIMBUS_1_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_BACKEND_DAI_QUAT_TDM_TX_0, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new sec_auxpcm_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_BACKEND_DAI_SLIMBUS_0_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_BACKEND_DAI_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new tert_auxpcm_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("TERT_AUXPCM_UL_TX", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_BACKEND_DAI_TERT_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_BACKEND_DAI_SLIMBUS_0_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_BACKEND_DAI_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new quat_auxpcm_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("QUAT_AUXPCM_UL_TX", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_BACKEND_DAI_QUAT_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_BACKEND_DAI_SLIMBUS_0_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_BACKEND_DAI_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new sbus_1_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_SLIMBUS_1_RX,
+	MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_SLIMBUS_1_RX,
+	MSM_BACKEND_DAI_AFE_PCM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_SLIMBUS_1_RX,
+	MSM_BACKEND_DAI_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_SLIMBUS_1_RX,
+	MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_AUXPCM_UL_TX", MSM_BACKEND_DAI_SLIMBUS_1_RX,
+	MSM_BACKEND_DAI_TERT_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_AUXPCM_UL_TX", MSM_BACKEND_DAI_SLIMBUS_1_RX,
+	MSM_BACKEND_DAI_QUAT_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new sbus_3_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_RX", MSM_BACKEND_DAI_SLIMBUS_3_RX,
+	MSM_BACKEND_DAI_INT_BT_SCO_RX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_SLIMBUS_3_RX,
+	MSM_BACKEND_DAI_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_RX", MSM_BACKEND_DAI_SLIMBUS_3_RX,
+	MSM_BACKEND_DAI_AFE_PCM_RX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_RX", MSM_BACKEND_DAI_SLIMBUS_3_RX,
+	MSM_BACKEND_DAI_AUXPCM_RX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_0_RX", MSM_BACKEND_DAI_SLIMBUS_3_RX,
+	MSM_BACKEND_DAI_SLIMBUS_0_RX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new sbus_6_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_BACKEND_DAI_INT_FM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_BACKEND_DAI_SLIMBUS_0_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_BACKEND_DAI_SLIMBUS_1_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_7_TX", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_BACKEND_DAI_SLIMBUS_7_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_8_TX", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_BACKEND_DAI_SLIMBUS_8_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_BACKEND_DAI_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_BACKEND_DAI_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_BACKEND_DAI_TERTIARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new bt_sco_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_BACKEND_DAI_SLIMBUS_1_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_BACKEND_DAI_SLIMBUS_0_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new afe_pcm_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_BACKEND_DAI_INT_FM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_BACKEND_DAI_SLIMBUS_1_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
+
+static const struct snd_kcontrol_new hdmi_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_BACKEND_DAI_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new display_port_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+	MSM_BACKEND_DAI_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new sec_i2s_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_BACKEND_DAI_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new mi2s_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_BACKEND_DAI_SLIMBUS_1_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_BACKEND_DAI_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new primary_mi2s_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_BACKEND_DAI_TERTIARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_BACKEND_DAI_INT_FM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUIN_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_BACKEND_DAI_QUINARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_BACKEND_DAI_SLIMBUS_0_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_8_TX", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_BACKEND_DAI_SLIMBUS_8_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new usb_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("USB_AUDIO_TX", MSM_BACKEND_DAI_USB_RX,
+	MSM_BACKEND_DAI_USB_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new quat_mi2s_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_BACKEND_DAI_TERTIARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_BACKEND_DAI_INT_FM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_BACKEND_DAI_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_BACKEND_DAI_SLIMBUS_0_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_8_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_BACKEND_DAI_SLIMBUS_8_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new pri_tdm_rx_0_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_INT_FM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_AFE_PCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_PRI_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_PRI_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_PRI_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_PRI_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new pri_tdm_rx_1_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_INT_FM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_AFE_PCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_PRI_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_PRI_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_PRI_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_PRI_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new pri_tdm_rx_2_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_INT_FM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_AFE_PCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_PRI_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_PRI_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_PRI_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_PRI_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new pri_tdm_rx_3_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_INT_FM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_AFE_PCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_PRI_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_PRI_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_PRI_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_PRI_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new sec_tdm_rx_0_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_INT_FM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_AFE_PCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_SEC_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_SEC_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_SEC_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_SEC_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new sec_tdm_rx_1_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_INT_FM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_AFE_PCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_SEC_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_SEC_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_SEC_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_SEC_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new sec_tdm_rx_2_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_INT_FM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_AFE_PCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_SEC_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_SEC_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_SEC_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_SEC_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new sec_tdm_rx_3_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_INT_FM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_AFE_PCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_SEC_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_SEC_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_SEC_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_SEC_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new tert_tdm_rx_0_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+		MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+		MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+		MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+		MSM_BACKEND_DAI_INT_FM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+		MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+		MSM_BACKEND_DAI_AFE_PCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+		MSM_BACKEND_DAI_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+		MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+		MSM_BACKEND_DAI_TERT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_1", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+		MSM_BACKEND_DAI_TERT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_2", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+		MSM_BACKEND_DAI_TERT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_3", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+		MSM_BACKEND_DAI_TERT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new tert_tdm_rx_1_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+		MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+		MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+		MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+		MSM_BACKEND_DAI_INT_FM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+		MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+		MSM_BACKEND_DAI_AFE_PCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+		MSM_BACKEND_DAI_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+		MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+		MSM_BACKEND_DAI_TERT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_1", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+		MSM_BACKEND_DAI_TERT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_2", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+		MSM_BACKEND_DAI_TERT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_3", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+		MSM_BACKEND_DAI_TERT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new tert_tdm_rx_2_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+		MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+		MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+		MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+		MSM_BACKEND_DAI_INT_FM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+		MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+		MSM_BACKEND_DAI_AFE_PCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+		MSM_BACKEND_DAI_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+		MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+		MSM_BACKEND_DAI_TERT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_1", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+		MSM_BACKEND_DAI_TERT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_2", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+		MSM_BACKEND_DAI_TERT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_3", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+		MSM_BACKEND_DAI_TERT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new tert_tdm_rx_3_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+		MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+		MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+		MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+		MSM_BACKEND_DAI_INT_FM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+		MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+		MSM_BACKEND_DAI_AFE_PCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+		MSM_BACKEND_DAI_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+		MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+		MSM_BACKEND_DAI_TERT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_1", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+		MSM_BACKEND_DAI_TERT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_2", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+		MSM_BACKEND_DAI_TERT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_3", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+		MSM_BACKEND_DAI_TERT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new quat_tdm_rx_0_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+		MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+		MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+		MSM_BACKEND_DAI_TERTIARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+		MSM_BACKEND_DAI_INT_FM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+		MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+		MSM_BACKEND_DAI_AFE_PCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+		MSM_BACKEND_DAI_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+		MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+		MSM_BACKEND_DAI_PRI_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+		MSM_BACKEND_DAI_PRI_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+		MSM_BACKEND_DAI_PRI_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+		MSM_BACKEND_DAI_PRI_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+		MSM_BACKEND_DAI_TERT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+		MSM_BACKEND_DAI_TERT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+		MSM_BACKEND_DAI_TERT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+		MSM_BACKEND_DAI_TERT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new quat_tdm_rx_1_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+		MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+		MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+		MSM_BACKEND_DAI_TERTIARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+		MSM_BACKEND_DAI_INT_FM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+		MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+		MSM_BACKEND_DAI_AFE_PCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+		MSM_BACKEND_DAI_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+		MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+		MSM_BACKEND_DAI_PRI_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+		MSM_BACKEND_DAI_PRI_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+		MSM_BACKEND_DAI_PRI_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+		MSM_BACKEND_DAI_PRI_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+		MSM_BACKEND_DAI_TERT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+		MSM_BACKEND_DAI_TERT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+		MSM_BACKEND_DAI_TERT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+		MSM_BACKEND_DAI_TERT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new quat_tdm_rx_2_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+		MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+		MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+		MSM_BACKEND_DAI_TERTIARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+		MSM_BACKEND_DAI_INT_FM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+		MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+		MSM_BACKEND_DAI_AFE_PCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+		MSM_BACKEND_DAI_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+		MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+		MSM_BACKEND_DAI_PRI_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+		MSM_BACKEND_DAI_PRI_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+		MSM_BACKEND_DAI_PRI_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+		MSM_BACKEND_DAI_PRI_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+		MSM_BACKEND_DAI_TERT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+		MSM_BACKEND_DAI_TERT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+		MSM_BACKEND_DAI_TERT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+		MSM_BACKEND_DAI_TERT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new quat_tdm_rx_3_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+		MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+		MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+		MSM_BACKEND_DAI_TERTIARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+		MSM_BACKEND_DAI_INT_FM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+		MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+		MSM_BACKEND_DAI_AFE_PCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+		MSM_BACKEND_DAI_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+		MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+		MSM_BACKEND_DAI_PRI_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+		MSM_BACKEND_DAI_PRI_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+		MSM_BACKEND_DAI_PRI_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+		MSM_BACKEND_DAI_PRI_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+		MSM_BACKEND_DAI_TERT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+		MSM_BACKEND_DAI_TERT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+		MSM_BACKEND_DAI_TERT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+		MSM_BACKEND_DAI_TERT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new tert_mi2s_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_BACKEND_DAI_TERTIARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_BACKEND_DAI_SLIMBUS_0_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_8_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_BACKEND_DAI_SLIMBUS_8_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new sec_mi2s_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_BACKEND_DAI_TERTIARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_BACKEND_DAI_SLIMBUS_0_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_BACKEND_DAI_INT_FM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_8_TX", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_BACKEND_DAI_SLIMBUS_8_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new lsm1_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIMBUS_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+		MSM_FRONTEND_DAI_LSM1, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+		MSM_FRONTEND_DAI_LSM1, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX,
+		MSM_FRONTEND_DAI_LSM1, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_4_TX", MSM_BACKEND_DAI_SLIMBUS_4_TX,
+		MSM_FRONTEND_DAI_LSM1, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_5_TX", MSM_BACKEND_DAI_SLIMBUS_5_TX,
+		MSM_FRONTEND_DAI_LSM1, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM1, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM1, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM1, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+};
+
+static const struct snd_kcontrol_new lsm2_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIMBUS_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+		MSM_FRONTEND_DAI_LSM2, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+		MSM_FRONTEND_DAI_LSM2, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX,
+		MSM_FRONTEND_DAI_LSM2, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_4_TX", MSM_BACKEND_DAI_SLIMBUS_4_TX,
+		MSM_FRONTEND_DAI_LSM2, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_5_TX", MSM_BACKEND_DAI_SLIMBUS_5_TX,
+		MSM_FRONTEND_DAI_LSM2, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM2, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM2, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM2, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+};
+
+static const struct snd_kcontrol_new lsm3_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIMBUS_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+		MSM_FRONTEND_DAI_LSM3, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+		MSM_FRONTEND_DAI_LSM3, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX,
+		MSM_FRONTEND_DAI_LSM3, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_4_TX", MSM_BACKEND_DAI_SLIMBUS_4_TX,
+		MSM_FRONTEND_DAI_LSM3, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_5_TX", MSM_BACKEND_DAI_SLIMBUS_5_TX,
+		MSM_FRONTEND_DAI_LSM3, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM3, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM3, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM3, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+};
+
+static const struct snd_kcontrol_new lsm4_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIMBUS_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+		MSM_FRONTEND_DAI_LSM4, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+		MSM_FRONTEND_DAI_LSM4, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX,
+		MSM_FRONTEND_DAI_LSM4, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_4_TX", MSM_BACKEND_DAI_SLIMBUS_4_TX,
+		MSM_FRONTEND_DAI_LSM4, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_5_TX", MSM_BACKEND_DAI_SLIMBUS_5_TX,
+		MSM_FRONTEND_DAI_LSM4, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM4, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM4, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM4, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+};
+
+static const struct snd_kcontrol_new lsm5_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIMBUS_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+		MSM_FRONTEND_DAI_LSM5, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+		MSM_FRONTEND_DAI_LSM5, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX,
+		MSM_FRONTEND_DAI_LSM5, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_4_TX", MSM_BACKEND_DAI_SLIMBUS_4_TX,
+		MSM_FRONTEND_DAI_LSM5, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_5_TX", MSM_BACKEND_DAI_SLIMBUS_5_TX,
+		MSM_FRONTEND_DAI_LSM5, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM5, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM5, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM5, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+};
+
+static const struct snd_kcontrol_new lsm6_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIMBUS_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+		MSM_FRONTEND_DAI_LSM6, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+		MSM_FRONTEND_DAI_LSM6, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX,
+		MSM_FRONTEND_DAI_LSM6, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_4_TX", MSM_BACKEND_DAI_SLIMBUS_4_TX,
+		MSM_FRONTEND_DAI_LSM6, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_5_TX", MSM_BACKEND_DAI_SLIMBUS_5_TX,
+		MSM_FRONTEND_DAI_LSM6, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM6, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM6, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM6, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+};
+
+static const struct snd_kcontrol_new lsm7_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIMBUS_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+		MSM_FRONTEND_DAI_LSM7, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+		MSM_FRONTEND_DAI_LSM7, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX,
+		MSM_FRONTEND_DAI_LSM7, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_4_TX", MSM_BACKEND_DAI_SLIMBUS_4_TX,
+		MSM_FRONTEND_DAI_LSM7, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_5_TX", MSM_BACKEND_DAI_SLIMBUS_5_TX,
+		MSM_FRONTEND_DAI_LSM7, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM7, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM7, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM7, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+};
+
+static const struct snd_kcontrol_new lsm8_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIMBUS_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+		MSM_FRONTEND_DAI_LSM8, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+		MSM_FRONTEND_DAI_LSM8, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX,
+		MSM_FRONTEND_DAI_LSM8, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_4_TX", MSM_BACKEND_DAI_SLIMBUS_4_TX,
+		MSM_FRONTEND_DAI_LSM8, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_5_TX", MSM_BACKEND_DAI_SLIMBUS_5_TX,
+		MSM_FRONTEND_DAI_LSM8, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM8, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM8, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM8, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+};
+
+static const struct snd_kcontrol_new slim_fm_switch_mixer_controls =
+	SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
+	0, 1, 0, msm_routing_get_switch_mixer,
+	msm_routing_put_switch_mixer);
+
+static const struct snd_kcontrol_new slim1_fm_switch_mixer_controls =
+	SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
+	0, 1, 0, msm_routing_get_switch_mixer,
+	msm_routing_put_switch_mixer);
+
+static const struct snd_kcontrol_new slim3_fm_switch_mixer_controls =
+	SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
+	0, 1, 0, msm_routing_get_switch_mixer,
+	msm_routing_put_switch_mixer);
+
+static const struct snd_kcontrol_new slim4_fm_switch_mixer_controls =
+	SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
+	0, 1, 0, msm_routing_get_switch_mixer,
+	msm_routing_put_switch_mixer);
+
+static const struct snd_kcontrol_new slim6_fm_switch_mixer_controls =
+	SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
+	0, 1, 0, msm_routing_get_switch_mixer,
+	msm_routing_put_switch_mixer);
+
+static const struct snd_kcontrol_new pcm_rx_switch_mixer_controls =
+	SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
+	0, 1, 0, msm_routing_get_fm_pcmrx_switch_mixer,
+	msm_routing_put_fm_pcmrx_switch_mixer);
+
+static const struct snd_kcontrol_new int0_mi2s_rx_switch_mixer_controls =
+	SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
+	0, 1, 0, msm_routing_get_int0_mi2s_switch_mixer,
+	msm_routing_put_int0_mi2s_switch_mixer);
+
+static const struct snd_kcontrol_new int4_mi2s_rx_switch_mixer_controls =
+	SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
+	0, 1, 0, msm_routing_get_int4_mi2s_switch_mixer,
+	msm_routing_put_int4_mi2s_switch_mixer);
+
+static const struct snd_kcontrol_new pri_mi2s_rx_switch_mixer_controls =
+	SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
+	0, 1, 0, msm_routing_get_pri_mi2s_switch_mixer,
+	msm_routing_put_pri_mi2s_switch_mixer);
+
+static const struct snd_kcontrol_new sec_mi2s_rx_switch_mixer_controls =
+	SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
+	0, 1, 0, msm_routing_get_sec_mi2s_switch_mixer,
+	msm_routing_put_sec_mi2s_switch_mixer);
+
+static const struct snd_kcontrol_new tert_mi2s_rx_switch_mixer_controls =
+	SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
+	0, 1, 0, msm_routing_get_tert_mi2s_switch_mixer,
+	msm_routing_put_tert_mi2s_switch_mixer);
+
+static const struct snd_kcontrol_new quat_mi2s_rx_switch_mixer_controls =
+	SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
+	0, 1, 0, msm_routing_get_quat_mi2s_switch_mixer,
+	msm_routing_put_quat_mi2s_switch_mixer);
+
+static const struct snd_kcontrol_new hfp_pri_aux_switch_mixer_controls =
+	SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
+	0, 1, 0, msm_routing_get_hfp_switch_mixer,
+	msm_routing_put_hfp_switch_mixer);
+
+static const struct snd_kcontrol_new hfp_aux_switch_mixer_controls =
+	SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
+	0, 1, 0, msm_routing_get_hfp_switch_mixer,
+	msm_routing_put_hfp_switch_mixer);
+
+static const struct snd_kcontrol_new hfp_int_switch_mixer_controls =
+	SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
+	0, 1, 0, msm_routing_get_hfp_switch_mixer,
+	msm_routing_put_hfp_switch_mixer);
+
+static const struct snd_kcontrol_new hfp_slim7_switch_mixer_controls =
+	SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
+	0, 1, 0, msm_routing_get_hfp_switch_mixer,
+	msm_routing_put_hfp_switch_mixer);
+
+static const struct snd_kcontrol_new usb_switch_mixer_controls =
+	SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
+	0, 1, 0, msm_routing_get_usb_switch_mixer,
+	msm_routing_put_usb_switch_mixer);
+
+static const struct soc_enum lsm_port_enum =
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(lsm_port_text), lsm_port_text);
+
+static const char * const lsm_func_text[] = {
+	"None", "AUDIO", "BEACON", "ULTRASOUND", "SWAUDIO",
+};
+static const struct soc_enum lsm_func_enum =
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(lsm_func_text), lsm_func_text);
+
+static const struct snd_kcontrol_new lsm_controls[] = {
+	/* kcontrol of lsm_function */
+	SOC_ENUM_EXT(SLIMBUS_0_TX_TEXT" "LSM_FUNCTION_TEXT, lsm_func_enum,
+		     msm_routing_lsm_func_get, msm_routing_lsm_func_put),
+	SOC_ENUM_EXT(SLIMBUS_1_TX_TEXT" "LSM_FUNCTION_TEXT, lsm_func_enum,
+		     msm_routing_lsm_func_get, msm_routing_lsm_func_put),
+	SOC_ENUM_EXT(SLIMBUS_2_TX_TEXT" "LSM_FUNCTION_TEXT, lsm_func_enum,
+		     msm_routing_lsm_func_get, msm_routing_lsm_func_put),
+	SOC_ENUM_EXT(SLIMBUS_3_TX_TEXT" "LSM_FUNCTION_TEXT, lsm_func_enum,
+		     msm_routing_lsm_func_get, msm_routing_lsm_func_put),
+	SOC_ENUM_EXT(SLIMBUS_4_TX_TEXT" "LSM_FUNCTION_TEXT, lsm_func_enum,
+		     msm_routing_lsm_func_get, msm_routing_lsm_func_put),
+	SOC_ENUM_EXT(SLIMBUS_5_TX_TEXT" "LSM_FUNCTION_TEXT, lsm_func_enum,
+		     msm_routing_lsm_func_get, msm_routing_lsm_func_put),
+	SOC_ENUM_EXT(TERT_MI2S_TX_TEXT" "LSM_FUNCTION_TEXT, lsm_func_enum,
+		    msm_routing_lsm_func_get, msm_routing_lsm_func_put),
+	SOC_ENUM_EXT(QUAT_MI2S_TX_TEXT" "LSM_FUNCTION_TEXT, lsm_func_enum,
+		    msm_routing_lsm_func_get, msm_routing_lsm_func_put),
+	SOC_ENUM_EXT(INT3_MI2S_TX_TEXT" "LSM_FUNCTION_TEXT, lsm_func_enum,
+		    msm_routing_lsm_func_get, msm_routing_lsm_func_put),
+	/* kcontrol of lsm_port */
+	SOC_ENUM_EXT("LSM1 Port", lsm_port_enum,
+			  msm_routing_lsm_port_get,
+			  msm_routing_lsm_port_put),
+	SOC_ENUM_EXT("LSM2 Port", lsm_port_enum,
+			  msm_routing_lsm_port_get,
+			  msm_routing_lsm_port_put),
+	SOC_ENUM_EXT("LSM3 Port", lsm_port_enum,
+			  msm_routing_lsm_port_get,
+			  msm_routing_lsm_port_put),
+	SOC_ENUM_EXT("LSM4 Port", lsm_port_enum,
+			  msm_routing_lsm_port_get,
+			  msm_routing_lsm_port_put),
+	SOC_ENUM_EXT("LSM5 Port", lsm_port_enum,
+			  msm_routing_lsm_port_get,
+			  msm_routing_lsm_port_put),
+	SOC_ENUM_EXT("LSM6 Port", lsm_port_enum,
+			  msm_routing_lsm_port_get,
+			  msm_routing_lsm_port_put),
+	SOC_ENUM_EXT("LSM7 Port", lsm_port_enum,
+			  msm_routing_lsm_port_get,
+			  msm_routing_lsm_port_put),
+	SOC_ENUM_EXT("LSM8 Port", lsm_port_enum,
+			  msm_routing_lsm_port_get,
+			  msm_routing_lsm_port_put),
+};
+
+static const char * const aanc_slim_0_rx_text[] = {
+	"ZERO", "SLIMBUS_0_TX", "SLIMBUS_1_TX", "SLIMBUS_2_TX", "SLIMBUS_3_TX",
+	"SLIMBUS_4_TX", "SLIMBUS_5_TX", "SLIMBUS_6_TX"
+};
+
+static const struct soc_enum aanc_slim_0_rx_enum =
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(aanc_slim_0_rx_text),
+				aanc_slim_0_rx_text);
+
+static const struct snd_kcontrol_new aanc_slim_0_rx_mux[] = {
+	SOC_ENUM_EXT("AANC_SLIM_0_RX MUX", aanc_slim_0_rx_enum,
+		msm_routing_slim_0_rx_aanc_mux_get,
+		msm_routing_slim_0_rx_aanc_mux_put)
+};
+
+static int msm_routing_get_stereo_to_custom_stereo_control(
+					struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = is_custom_stereo_on;
+	return 0;
+}
+
+static int msm_routing_put_stereo_to_custom_stereo_control(
+					struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	int flag = 0, i = 0, rc = 0, idx = 0;
+	int be_index = 0, port_id, topo_id;
+	unsigned int session_id = 0;
+	uint16_t op_FL_ip_FL_weight = 0;
+	uint16_t op_FL_ip_FR_weight = 0;
+	uint16_t op_FR_ip_FL_weight = 0;
+	uint16_t op_FR_ip_FR_weight = 0;
+	flag = ucontrol->value.integer.value[0];
+	pr_debug("%s E flag %d\n", __func__, flag);
+
+	if ((is_custom_stereo_on && flag) || (!is_custom_stereo_on && !flag)) {
+		pr_err("%s: is_custom_stereo_on %d, flag %d\n",
+			__func__, is_custom_stereo_on, flag);
+		return 0;
+	}
+	is_custom_stereo_on = flag ? true : false;
+	pr_debug("%s:is_custom_stereo_on %d\n", __func__, is_custom_stereo_on);
+	for (be_index = 0; be_index < MSM_BACKEND_DAI_MAX; be_index++) {
+		port_id = msm_bedais[be_index].port_id;
+		if (!msm_bedais[be_index].active)
+			continue;
+		if ((port_id != SLIMBUS_0_RX) &&
+		     (port_id != RT_PROXY_PORT_001_RX) &&
+			(port_id != AFE_PORT_ID_PRIMARY_MI2S_RX) &&
+			(port_id != AFE_PORT_ID_INT4_MI2S_RX))
+			continue;
+
+		for_each_set_bit(i, &msm_bedais[be_index].fe_sessions[0],
+				MSM_FRONTEND_DAI_MM_SIZE) {
+			if (fe_dai_map[i][SESSION_TYPE_RX].perf_mode !=
+			    LEGACY_PCM_MODE)
+				goto skip_send_custom_stereo;
+			session_id =
+				fe_dai_map[i][SESSION_TYPE_RX].strm_id;
+			if (is_custom_stereo_on) {
+				op_FL_ip_FL_weight =
+					Q14_GAIN_ZERO_POINT_FIVE;
+				op_FL_ip_FR_weight =
+					Q14_GAIN_ZERO_POINT_FIVE;
+				op_FR_ip_FL_weight =
+					Q14_GAIN_ZERO_POINT_FIVE;
+				op_FR_ip_FR_weight =
+					Q14_GAIN_ZERO_POINT_FIVE;
+			} else {
+				op_FL_ip_FL_weight = Q14_GAIN_UNITY;
+				op_FL_ip_FR_weight = 0;
+				op_FR_ip_FL_weight = 0;
+				op_FR_ip_FR_weight = Q14_GAIN_UNITY;
+			}
+			for (idx = 0; idx < MAX_COPPS_PER_PORT; idx++) {
+				unsigned long copp =
+					session_copp_map[i]
+					[SESSION_TYPE_RX][be_index];
+				if (!test_bit(idx, &copp))
+					goto skip_send_custom_stereo;
+				topo_id = adm_get_topology_for_port_copp_idx(
+					msm_bedais[be_index].port_id, idx);
+				if (topo_id < 0)
+					pr_debug("%s:Err:custom stereo topo %d",
+						 __func__, topo_id);
+					pr_debug("idx %d\n", idx);
+				if (topo_id == DS2_ADM_COPP_TOPOLOGY_ID)
+					rc = msm_ds2_dap_set_custom_stereo_onoff
+						(msm_bedais[be_index].port_id,
+						idx, is_custom_stereo_on);
+				else if (topo_id == DOLBY_ADM_COPP_TOPOLOGY_ID)
+					rc = dolby_dap_set_custom_stereo_onoff(
+						msm_bedais[be_index].port_id,
+						idx, is_custom_stereo_on);
+				else
+				rc = msm_qti_pp_send_stereo_to_custom_stereo_cmd
+						(msm_bedais[be_index].port_id,
+						idx, session_id,
+						op_FL_ip_FL_weight,
+						op_FL_ip_FR_weight,
+						op_FR_ip_FL_weight,
+						op_FR_ip_FR_weight);
+				if (rc < 0)
+skip_send_custom_stereo:
+					pr_err("%s: err setting custom stereo\n",
+						__func__);
+			}
+
+		}
+	}
+	return 0;
+}
+
+static const struct snd_kcontrol_new stereo_to_custom_stereo_controls[] = {
+	SOC_SINGLE_EXT("Set Custom Stereo OnOff", SND_SOC_NOPM, 0,
+	1, 0, msm_routing_get_stereo_to_custom_stereo_control,
+	msm_routing_put_stereo_to_custom_stereo_control),
+};
+
+static int msm_routing_get_app_type_cfg_control(struct snd_kcontrol *kcontrol,
+					  struct snd_ctl_elem_value *ucontrol)
+{
+	return 0;
+}
+
+static int msm_routing_put_app_type_cfg_control(struct snd_kcontrol *kcontrol,
+					  struct snd_ctl_elem_value *ucontrol)
+{
+	int i = 0, j;
+	int num_app_types = ucontrol->value.integer.value[i++];
+
+	pr_debug("%s\n", __func__);
+
+	memset(app_type_cfg, 0, MAX_APP_TYPES*
+				sizeof(struct msm_pcm_routing_app_type_data));
+	if (num_app_types > MAX_APP_TYPES) {
+		pr_err("%s: number of app types exceed the max supported\n",
+			__func__);
+		return -EINVAL;
+	}
+	for (j = 0; j < num_app_types; j++) {
+		app_type_cfg[j].app_type =
+				ucontrol->value.integer.value[i++];
+		app_type_cfg[j].sample_rate =
+				ucontrol->value.integer.value[i++];
+		app_type_cfg[j].bit_width =
+				ucontrol->value.integer.value[i++];
+	}
+
+	return 0;
+}
+
+static const struct snd_kcontrol_new app_type_cfg_controls[] = {
+	SOC_SINGLE_MULTI_EXT("App Type Config", SND_SOC_NOPM, 0,
+	0x7FFFFFFF, 0, 128, msm_routing_get_app_type_cfg_control,
+	msm_routing_put_app_type_cfg_control),
+};
+
+static int msm_routing_get_lsm_app_type_cfg_control(
+					struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	return 0;
+}
+
+static int msm_routing_put_lsm_app_type_cfg_control(
+					struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	int i = 0, j;
+	int num_app_types = ucontrol->value.integer.value[i++];
+
+	memset(lsm_app_type_cfg, 0, MAX_APP_TYPES*
+				sizeof(struct msm_pcm_routing_app_type_data));
+	if (num_app_types > MAX_APP_TYPES) {
+		pr_err("%s: number of app types exceed the max supported\n",
+			__func__);
+		return -EINVAL;
+	}
+	for (j = 0; j < num_app_types; j++) {
+		lsm_app_type_cfg[j].app_type =
+				ucontrol->value.integer.value[i++];
+		lsm_app_type_cfg[j].sample_rate =
+				ucontrol->value.integer.value[i++];
+		lsm_app_type_cfg[j].bit_width =
+				ucontrol->value.integer.value[i++];
+	}
+
+	return 0;
+}
+
+static const struct snd_kcontrol_new lsm_app_type_cfg_controls[] = {
+	SOC_SINGLE_MULTI_EXT("Listen App Type Config", SND_SOC_NOPM, 0,
+	0xFFFFFFFF, 0, 128, msm_routing_get_lsm_app_type_cfg_control,
+	msm_routing_put_lsm_app_type_cfg_control),
+};
+
+static int msm_routing_get_use_ds1_or_ds2_control(
+					struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = is_ds2_on;
+	return 0;
+}
+
+static int msm_routing_put_use_ds1_or_ds2_control(
+					struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	is_ds2_on = ucontrol->value.integer.value[0];
+	return 0;
+}
+
+static const struct snd_kcontrol_new use_ds1_or_ds2_controls[] = {
+	SOC_SINGLE_EXT("DS2 OnOff", SND_SOC_NOPM, 0,
+	1, 0, msm_routing_get_use_ds1_or_ds2_control,
+	msm_routing_put_use_ds1_or_ds2_control),
+};
+
+int msm_routing_get_rms_value_control(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol) {
+	int rc = 0;
+	int be_idx = 0;
+	char *param_value;
+	int *update_param_value;
+	uint32_t param_length = sizeof(uint32_t);
+	uint32_t param_payload_len = RMS_PAYLOAD_LEN * sizeof(uint32_t);
+	param_value = kzalloc(param_length + param_payload_len, GFP_KERNEL);
+	if (!param_value) {
+		pr_err("%s, param memory alloc failed\n", __func__);
+		return -ENOMEM;
+	}
+	for (be_idx = 0; be_idx < MSM_BACKEND_DAI_MAX; be_idx++)
+		if (msm_bedais[be_idx].port_id == SLIMBUS_0_TX)
+			break;
+	if ((be_idx < MSM_BACKEND_DAI_MAX) && msm_bedais[be_idx].active) {
+		rc = adm_get_params(SLIMBUS_0_TX, 0,
+				RMS_MODULEID_APPI_PASSTHRU,
+				RMS_PARAM_FIRST_SAMPLE,
+				param_length + param_payload_len,
+				param_value);
+		if (rc) {
+			pr_err("%s: get parameters failed:%d\n", __func__, rc);
+			kfree(param_value);
+			return -EINVAL;
+		}
+		update_param_value = (int *)param_value;
+		ucontrol->value.integer.value[0] = update_param_value[0];
+
+		pr_debug("%s: FROM DSP value[0] 0x%x\n",
+			  __func__, update_param_value[0]);
+	}
+	kfree(param_value);
+	return 0;
+}
+
+static int msm_voc_session_id_put(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
+{
+	voc_session_id = ucontrol->value.integer.value[0];
+
+	pr_debug("%s: voc_session_id=%u\n", __func__, voc_session_id);
+
+	return 0;
+}
+
+static int msm_voc_session_id_get(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = voc_session_id;
+
+	return 0;
+}
+
+static struct snd_kcontrol_new msm_voc_session_controls[] = {
+	SOC_SINGLE_MULTI_EXT("Voc VSID", SND_SOC_NOPM, 0,
+			     0xFFFFFFFF, 0, 1, msm_voc_session_id_get,
+			     msm_voc_session_id_put),
+};
+
+static int msm_sound_focus_info(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+	uinfo->count = sizeof(struct sound_focus_param);
+
+	return 0;
+}
+
+static int msm_voice_sound_focus_put(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = 0;
+	struct sound_focus_param soundFocusData;
+
+	memcpy((void *)&soundFocusData, ucontrol->value.bytes.data,
+		sizeof(struct sound_focus_param));
+	ret = voc_set_sound_focus(soundFocusData);
+	if (ret) {
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int msm_voice_sound_focus_get(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = 0;
+	struct sound_focus_param soundFocusData;
+
+	memset(&soundFocusData, 0, sizeof(struct sound_focus_param));
+
+	ret = voc_get_sound_focus(&soundFocusData);
+	if (ret) {
+		ret = -EINVAL;
+		goto done;
+	}
+	memcpy(ucontrol->value.bytes.data, (void *)&soundFocusData,
+		sizeof(struct sound_focus_param));
+
+done:
+	return ret;
+}
+
+static int msm_source_tracking_info(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+	uinfo->count = sizeof(struct source_tracking_param);
+
+	return 0;
+}
+
+static int msm_voice_source_tracking_get(struct snd_kcontrol *kcontrol,
+					 struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = 0;
+	struct source_tracking_param sourceTrackingData;
+
+	memset(&sourceTrackingData, 0, sizeof(struct source_tracking_param));
+
+	ret = voc_get_source_tracking(&sourceTrackingData);
+	if (ret) {
+		ret = -EINVAL;
+		goto done;
+	}
+	memcpy(ucontrol->value.bytes.data, (void *)&sourceTrackingData,
+		sizeof(struct source_tracking_param));
+
+done:
+	return ret;
+}
+
+static int msm_audio_get_copp_idx_from_port_id(int port_id, int session_type,
+					 int *copp_idx)
+{
+	int i, idx, be_idx;
+	int ret = 0;
+	unsigned long copp;
+
+	pr_debug("%s: Enter, port_id=%d\n", __func__, port_id);
+
+	ret = q6audio_validate_port(port_id);
+	if (ret < 0) {
+		pr_debug("%s: port validation failed id 0x%x ret %d\n",
+			 __func__, port_id, ret);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	for (be_idx = 0; be_idx < MSM_BACKEND_DAI_MAX; be_idx++) {
+		if (msm_bedais[be_idx].port_id == port_id)
+			break;
+	}
+	if (be_idx >= MSM_BACKEND_DAI_MAX) {
+		pr_debug("%s: Invalid be id %d\n", __func__, be_idx);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	for_each_set_bit(i, &msm_bedais[be_idx].fe_sessions[0],
+			 MSM_FRONTEND_DAI_MM_SIZE) {
+		for (idx = 0; idx < MAX_COPPS_PER_PORT; idx++) {
+			copp = session_copp_map[i]
+				[session_type][be_idx];
+			if (test_bit(idx, &copp))
+				break;
+		}
+		if (idx >= MAX_COPPS_PER_PORT)
+			continue;
+		else
+			break;
+	}
+	if (i >= MSM_FRONTEND_DAI_MM_SIZE) {
+		pr_debug("%s: Invalid FE, exiting\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+	*copp_idx = idx;
+	pr_debug("%s: copp_idx=%d\n", __func__, *copp_idx);
+
+done:
+	return ret;
+}
+
+static int msm_audio_sound_focus_derive_port_id(struct snd_kcontrol *kcontrol,
+					    const char *prefix, int *port_id)
+{
+	int ret = 0;
+
+	pr_debug("%s: Enter, prefix:%s\n", __func__, prefix);
+
+	/*
+	 * Mixer control name will be like "Sound Focus Audio Tx SLIMBUS_0"
+	 * where the prefix is "Sound Focus Audio Tx ". Skip the prefix
+	 * and compare the string with the backend name to derive the port id.
+	 */
+	if (!strcmp(kcontrol->id.name + strlen(prefix),
+					"SLIMBUS_0")) {
+		*port_id = SLIMBUS_0_TX;
+	} else if (!strcmp(kcontrol->id.name + strlen(prefix),
+					"TERT_MI2S")) {
+		*port_id = AFE_PORT_ID_TERTIARY_MI2S_TX;
+	} else if (!strcmp(kcontrol->id.name + strlen(prefix),
+					"INT3_MI2S")) {
+		*port_id = AFE_PORT_ID_INT3_MI2S_TX;
+	} else {
+		ret = -EINVAL;
+		goto done;
+	}
+	pr_debug("%s: mixer ctl name=%s, derived port_id=%d\n",
+		  __func__, kcontrol->id.name, *port_id);
+
+done:
+	return ret;
+}
+
+static int msm_audio_sound_focus_put(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = 0;
+	struct sound_focus_param soundFocusData;
+	int port_id, copp_idx;
+
+	ret = msm_audio_sound_focus_derive_port_id(kcontrol,
+				"Sound Focus Audio Tx ", &port_id);
+	if (ret != 0) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = msm_audio_get_copp_idx_from_port_id(port_id, SESSION_TYPE_TX,
+					    &copp_idx);
+	if (ret) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	memcpy((void *)&soundFocusData, ucontrol->value.bytes.data,
+		sizeof(struct sound_focus_param));
+
+	ret = adm_set_sound_focus(port_id, copp_idx, soundFocusData);
+	if (ret) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+done:
+	return ret;
+}
+
+static int msm_audio_sound_focus_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = 0;
+	struct sound_focus_param soundFocusData;
+	int port_id, copp_idx;
+
+	ret = msm_audio_sound_focus_derive_port_id(kcontrol,
+				"Sound Focus Audio Tx ", &port_id);
+	if (ret) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = msm_audio_get_copp_idx_from_port_id(port_id, SESSION_TYPE_TX,
+					    &copp_idx);
+	if (ret) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = adm_get_sound_focus(port_id, copp_idx, &soundFocusData);
+	if (ret) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	memcpy(ucontrol->value.bytes.data, (void *)&soundFocusData,
+		sizeof(struct sound_focus_param));
+
+done:
+	return ret;
+}
+
+static int msm_audio_source_tracking_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = 0;
+	struct source_tracking_param sourceTrackingData;
+	int port_id, copp_idx;
+
+	ret = msm_audio_sound_focus_derive_port_id(kcontrol,
+				"Source Tracking Audio Tx ", &port_id);
+	if (ret) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = msm_audio_get_copp_idx_from_port_id(port_id, SESSION_TYPE_TX,
+					    &copp_idx);
+	if (ret) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = adm_get_source_tracking(port_id, copp_idx, &sourceTrackingData);
+	if (ret) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	memcpy(ucontrol->value.bytes.data, (void *)&sourceTrackingData,
+		sizeof(struct source_tracking_param));
+
+done:
+	return ret;
+}
+
+static const struct snd_kcontrol_new msm_source_tracking_controls[] = {
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.iface	= SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name	= "Sound Focus Voice Tx SLIMBUS_0",
+		.info	= msm_sound_focus_info,
+		.get	= msm_voice_sound_focus_get,
+		.put	= msm_voice_sound_focus_put,
+	},
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READ,
+		.iface	= SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name	= "Source Tracking Voice Tx SLIMBUS_0",
+		.info	= msm_source_tracking_info,
+		.get	= msm_voice_source_tracking_get,
+	},
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.iface	= SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name	= "Sound Focus Audio Tx SLIMBUS_0",
+		.info	= msm_sound_focus_info,
+		.get	= msm_audio_sound_focus_get,
+		.put	= msm_audio_sound_focus_put,
+	},
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READ,
+		.iface	= SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name	= "Source Tracking Audio Tx SLIMBUS_0",
+		.info	= msm_source_tracking_info,
+		.get	= msm_audio_source_tracking_get,
+	},
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.iface	= SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name	= "Sound Focus Voice Tx TERT_MI2S",
+		.info	= msm_sound_focus_info,
+		.get	= msm_voice_sound_focus_get,
+		.put	= msm_voice_sound_focus_put,
+	},
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READ,
+		.iface	= SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name	= "Source Tracking Voice Tx TERT_MI2S",
+		.info	= msm_source_tracking_info,
+		.get	= msm_voice_source_tracking_get,
+	},
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.iface	= SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name	= "Sound Focus Audio Tx TERT_MI2S",
+		.info	= msm_sound_focus_info,
+		.get	= msm_audio_sound_focus_get,
+		.put	= msm_audio_sound_focus_put,
+	},
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READ,
+		.iface	= SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name	= "Source Tracking Audio Tx TERT_MI2S",
+		.info	= msm_source_tracking_info,
+		.get	= msm_audio_source_tracking_get,
+	},
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.iface	= SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name	= "Sound Focus Voice Tx INT3_MI2S",
+		.info	= msm_sound_focus_info,
+		.get	= msm_voice_sound_focus_get,
+		.put	= msm_voice_sound_focus_put,
+	},
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READ,
+		.iface	= SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name	= "Source Tracking Voice Tx INT3_MI2S",
+		.info	= msm_source_tracking_info,
+		.get	= msm_voice_source_tracking_get,
+	},
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.iface	= SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name	= "Sound Focus Audio Tx INT3_MI2S",
+		.info	= msm_sound_focus_info,
+		.get	= msm_audio_sound_focus_get,
+		.put	= msm_audio_sound_focus_put,
+	},
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READ,
+		.iface	= SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name	= "Source Tracking Audio Tx INT3_MI2S",
+		.info	= msm_source_tracking_info,
+		.get	= msm_audio_source_tracking_get,
+	},
+};
+
+static int spkr_prot_put_vi_lch_port(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = 0;
+	int item;
+	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+	pr_debug("%s item is %d\n", __func__,
+		   ucontrol->value.enumerated.item[0]);
+	mutex_lock(&routing_lock);
+	item = ucontrol->value.enumerated.item[0];
+	if (item < e->items) {
+		pr_debug("%s RX DAI ID %d TX DAI id %d\n",
+			__func__, e->shift_l , e->values[item]);
+		if (e->shift_l < MSM_BACKEND_DAI_MAX &&
+			e->values[item] < MSM_BACKEND_DAI_MAX)
+			/* Enable feedback TX path */
+			ret = afe_spk_prot_feed_back_cfg(
+			   msm_bedais[e->values[item]].port_id,
+			   msm_bedais[e->shift_l].port_id, 1, 0, 1);
+		else {
+			pr_debug("%s values are out of range item %d\n",
+			__func__, e->values[item]);
+			/* Disable feedback TX path */
+			if (e->values[item] == MSM_BACKEND_DAI_MAX)
+				ret = afe_spk_prot_feed_back_cfg(0, 0, 0, 0, 0);
+			else
+				ret = -EINVAL;
+		}
+	} else {
+		pr_err("%s item value is out of range item\n", __func__);
+		ret = -EINVAL;
+	}
+	mutex_unlock(&routing_lock);
+	return ret;
+}
+
+static int spkr_prot_put_vi_rch_port(struct snd_kcontrol *kcontrol,
+		struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = 0;
+	int item;
+	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+	pr_debug("%s item is %d\n", __func__,
+			ucontrol->value.enumerated.item[0]);
+	mutex_lock(&routing_lock);
+	item = ucontrol->value.enumerated.item[0];
+	if (item < e->items) {
+		pr_debug("%s RX DAI ID %d TX DAI id %d\n",
+				__func__, e->shift_l , e->values[item]);
+		if (e->shift_l < MSM_BACKEND_DAI_MAX &&
+				e->values[item] < MSM_BACKEND_DAI_MAX)
+			/* Enable feedback TX path */
+			ret = afe_spk_prot_feed_back_cfg(
+					msm_bedais[e->values[item]].port_id,
+					msm_bedais[e->shift_l].port_id,
+					1, 1, 1);
+		else {
+			pr_debug("%s values are out of range item %d\n",
+					__func__, e->values[item]);
+			/* Disable feedback TX path */
+			if (e->values[item] == MSM_BACKEND_DAI_MAX)
+				ret = afe_spk_prot_feed_back_cfg(0,
+						0, 0, 0, 0);
+			else
+				ret = -EINVAL;
+		}
+	} else {
+		pr_err("%s item value is out of range item\n", __func__);
+		ret = -EINVAL;
+	}
+	mutex_unlock(&routing_lock);
+	return ret;
+}
+
+static int spkr_prot_get_vi_lch_port(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	pr_debug("%s\n", __func__);
+	return 0;
+}
+
+static int spkr_prot_get_vi_rch_port(struct snd_kcontrol *kcontrol,
+		struct snd_ctl_elem_value *ucontrol)
+{
+	pr_debug("%s\n", __func__);
+	ucontrol->value.enumerated.item[0] = 0;
+	return 0;
+}
+
+static const char * const slim0_rx_vi_fb_tx_lch_mux_text[] = {
+	"ZERO", "SLIM4_TX"
+};
+
+static const char * const slim0_rx_vi_fb_tx_rch_mux_text[] = {
+	"ZERO", "SLIM4_TX"
+};
+
+static const char * const mi2s_rx_vi_fb_tx_mux_text[] = {
+	"ZERO", "SENARY_TX"
+};
+
+static const char * const int4_mi2s_rx_vi_fb_tx_mono_mux_text[] = {
+	"ZERO", "INT5_MI2S_TX"
+};
+
+static const char * const int4_mi2s_rx_vi_fb_tx_stereo_mux_text[] = {
+	"ZERO", "INT5_MI2S_TX"
+};
+
+static const int const slim0_rx_vi_fb_tx_lch_value[] = {
+	MSM_BACKEND_DAI_MAX, MSM_BACKEND_DAI_SLIMBUS_4_TX
+};
+
+static const int const slim0_rx_vi_fb_tx_rch_value[] = {
+	MSM_BACKEND_DAI_MAX, MSM_BACKEND_DAI_SLIMBUS_4_TX
+};
+
+static const int const mi2s_rx_vi_fb_tx_value[] = {
+	MSM_BACKEND_DAI_MAX, MSM_BACKEND_DAI_SENARY_MI2S_TX
+};
+
+static const int const int4_mi2s_rx_vi_fb_tx_mono_ch_value[] = {
+	MSM_BACKEND_DAI_MAX, MSM_BACKEND_DAI_INT5_MI2S_TX
+};
+
+static const int const int4_mi2s_rx_vi_fb_tx_stereo_ch_value[] = {
+	MSM_BACKEND_DAI_MAX, MSM_BACKEND_DAI_INT5_MI2S_TX
+};
+
+static const struct soc_enum slim0_rx_vi_fb_lch_mux_enum =
+	SOC_VALUE_ENUM_DOUBLE(0, MSM_BACKEND_DAI_SLIMBUS_0_RX, 0, 0,
+	ARRAY_SIZE(slim0_rx_vi_fb_tx_lch_mux_text),
+	slim0_rx_vi_fb_tx_lch_mux_text, slim0_rx_vi_fb_tx_lch_value);
+
+static const struct soc_enum slim0_rx_vi_fb_rch_mux_enum =
+	SOC_VALUE_ENUM_DOUBLE(0, MSM_BACKEND_DAI_SLIMBUS_0_RX, 0, 0,
+	ARRAY_SIZE(slim0_rx_vi_fb_tx_rch_mux_text),
+	slim0_rx_vi_fb_tx_rch_mux_text, slim0_rx_vi_fb_tx_rch_value);
+
+static const struct soc_enum mi2s_rx_vi_fb_mux_enum =
+	SOC_VALUE_ENUM_DOUBLE(0, MSM_BACKEND_DAI_PRI_MI2S_RX, 0, 0,
+	ARRAY_SIZE(mi2s_rx_vi_fb_tx_mux_text),
+	mi2s_rx_vi_fb_tx_mux_text, mi2s_rx_vi_fb_tx_value);
+
+static const struct soc_enum int4_mi2s_rx_vi_fb_mono_ch_mux_enum =
+	SOC_VALUE_ENUM_DOUBLE(0, MSM_BACKEND_DAI_INT4_MI2S_RX, 0, 0,
+	ARRAY_SIZE(int4_mi2s_rx_vi_fb_tx_mono_mux_text),
+	int4_mi2s_rx_vi_fb_tx_mono_mux_text,
+	int4_mi2s_rx_vi_fb_tx_mono_ch_value);
+
+static const struct soc_enum int4_mi2s_rx_vi_fb_stereo_ch_mux_enum =
+	SOC_VALUE_ENUM_DOUBLE(0, MSM_BACKEND_DAI_INT4_MI2S_RX, 0, 0,
+	ARRAY_SIZE(int4_mi2s_rx_vi_fb_tx_stereo_mux_text),
+	int4_mi2s_rx_vi_fb_tx_stereo_mux_text,
+	int4_mi2s_rx_vi_fb_tx_stereo_ch_value);
+
+static const struct snd_kcontrol_new slim0_rx_vi_fb_lch_mux =
+	SOC_DAPM_ENUM_EXT("SLIM0_RX_VI_FB_LCH_MUX",
+	slim0_rx_vi_fb_lch_mux_enum, spkr_prot_get_vi_lch_port,
+	spkr_prot_put_vi_lch_port);
+
+static const struct snd_kcontrol_new slim0_rx_vi_fb_rch_mux =
+	SOC_DAPM_ENUM_EXT("SLIM0_RX_VI_FB_RCH_MUX",
+	slim0_rx_vi_fb_rch_mux_enum, spkr_prot_get_vi_rch_port,
+	spkr_prot_put_vi_rch_port);
+
+static const struct snd_kcontrol_new mi2s_rx_vi_fb_mux =
+	SOC_DAPM_ENUM_EXT("PRI_MI2S_RX_VI_FB_MUX",
+	mi2s_rx_vi_fb_mux_enum, spkr_prot_get_vi_lch_port,
+	spkr_prot_put_vi_lch_port);
+
+static const struct snd_kcontrol_new int4_mi2s_rx_vi_fb_mono_ch_mux =
+	SOC_DAPM_ENUM_EXT("INT4_MI2S_RX_VI_FB_MONO_CH_MUX",
+	int4_mi2s_rx_vi_fb_mono_ch_mux_enum, spkr_prot_get_vi_lch_port,
+	spkr_prot_put_vi_lch_port);
+
+static const struct snd_kcontrol_new int4_mi2s_rx_vi_fb_stereo_ch_mux =
+	SOC_DAPM_ENUM_EXT("INT4_MI2S_RX_VI_FB_STEREO_CH_MUX",
+	int4_mi2s_rx_vi_fb_stereo_ch_mux_enum, spkr_prot_get_vi_rch_port,
+	spkr_prot_put_vi_rch_port);
+
+static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = {
+	/* Frontend AIF */
+	/* Widget name equals to Front-End DAI name<Need confirmation>,
+	 * Stream name must contains substring of front-end dai name
+	 */
+	SND_SOC_DAPM_AIF_IN("MM_DL1", "MultiMedia1 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("MM_DL2", "MultiMedia2 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("MM_DL3", "MultiMedia3 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("MM_DL4", "MultiMedia4 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("MM_DL5", "MultiMedia5 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("MM_DL6", "MultiMedia6 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("MM_DL7", "MultiMedia7 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("MM_DL8", "MultiMedia8 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("MM_DL9", "MultiMedia9 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("MM_DL10", "MultiMedia10 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("MM_DL11", "MultiMedia11 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("MM_DL12", "MultiMedia12 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("MM_DL13", "MultiMedia13 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("MM_DL14", "MultiMedia14 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("MM_DL15", "MultiMedia15 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("MM_DL16", "MultiMedia16 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("MM_DL20", "MultiMedia20 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("MM_DL21", "MultiMedia21 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("MM_DL22", "MultiMedia22 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("MM_DL23", "MultiMedia23 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("MM_DL24", "MultiMedia24 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("MM_DL25", "MultiMedia25 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("MM_DL26", "MultiMedia26 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("VOIP_DL", "VoIP Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("MM_UL1", "MultiMedia1 Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("MM_UL2", "MultiMedia2 Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("MM_UL3", "MultiMedia3 Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("MM_UL4", "MultiMedia4 Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("MM_UL5", "MultiMedia5 Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("MM_UL6", "MultiMedia6 Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("MM_UL8", "MultiMedia8 Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("MM_UL9", "MultiMedia9 Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("MM_UL16", "MultiMedia16 Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("MM_UL17", "MultiMedia17 Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("MM_UL18", "MultiMedia18 Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("MM_UL19", "MultiMedia19 Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("MM_UL20", "MultiMedia20 Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("MM_UL21", "MultiMedia21 Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("MM_UL27", "MultiMedia27 Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("CS-VOICE_DL1", "CS-VOICE Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("CS-VOICE_UL1", "CS-VOICE Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("VOICE2_DL", "Voice2 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("VOICE2_UL", "Voice2 Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("VoLTE_DL", "VoLTE Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("VoLTE_UL", "VoLTE Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("VoWLAN_DL", "VoWLAN Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("VoWLAN_UL", "VoWLAN Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("VOIP_UL", "VoIP Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("VOICEMMODE1_DL",
+		"VoiceMMode1 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("VOICEMMODE1_UL",
+		"VoiceMMode1 Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("VOICEMMODE2_DL",
+		"VoiceMMode2 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("VOICEMMODE2_UL",
+		"VoiceMMode2 Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SLIM0_DL_HL", "SLIMBUS0_HOSTLESS Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SLIM0_UL_HL", "SLIMBUS0_HOSTLESS Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("CPE_LSM_UL_HL", "CPE LSM capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SLIM1_DL_HL", "SLIMBUS1_HOSTLESS Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SLIM1_UL_HL", "SLIMBUS1_HOSTLESS Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SLIM3_DL_HL", "SLIMBUS3_HOSTLESS Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SLIM3_UL_HL", "SLIMBUS3_HOSTLESS Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SLIM4_DL_HL", "SLIMBUS4_HOSTLESS Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SLIM4_UL_HL", "SLIMBUS4_HOSTLESS Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SLIM6_DL_HL", "SLIMBUS6_HOSTLESS Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SLIM6_UL_HL", "SLIMBUS6_HOSTLESS Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SLIM7_DL_HL", "SLIMBUS7_HOSTLESS Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SLIM7_UL_HL", "SLIMBUS7_HOSTLESS Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SLIM8_DL_HL", "SLIMBUS8_HOSTLESS Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SLIM8_UL_HL", "SLIMBUS8_HOSTLESS Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("INTFM_DL_HL", "INT_FM_HOSTLESS Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("INTFM_UL_HL", "INT_FM_HOSTLESS Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("INTHFP_DL_HL", "INT_HFP_BT_HOSTLESS Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("INTHFP_UL_HL", "INT_HFP_BT_HOSTLESS Capture",
+	0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("USBAUDIO_DL_HL", "USBAUDIO_HOSTLESS Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("USBAUDIO_UL_HL", "USBAUDIO_HOSTLESS Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("HDMI_DL_HL", "HDMI_HOSTLESS Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SEC_I2S_DL_HL", "SEC_I2S_RX_HOSTLESS Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("INT0_MI2S_DL_HL",
+		"INT0 MI2S_RX Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("INT4_MI2S_DL_HL",
+		"INT4 MI2S_RX Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("PRI_MI2S_DL_HL",
+		"Primary MI2S_RX Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SEC_MI2S_DL_HL",
+		"Secondary MI2S_RX Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("TERT_MI2S_DL_HL",
+		"Tertiary MI2S_RX Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("QUAT_MI2S_DL_HL",
+		"Quaternary MI2S_RX Hostless Playback",
+		0, 0, 0, 0),
+
+	SND_SOC_DAPM_AIF_IN("AUXPCM_DL_HL", "AUXPCM_HOSTLESS Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AUXPCM_UL_HL", "AUXPCM_HOSTLESS Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("MI2S_UL_HL", "MI2S_TX_HOSTLESS Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("INT3_MI2S_UL_HL",
+		"INT3 MI2S_TX Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("TERT_MI2S_UL_HL",
+		"Tertiary MI2S_TX Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SEC_MI2S_UL_HL",
+		"Secondary MI2S_TX Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("PRI_MI2S_UL_HL",
+		"Primary MI2S_TX Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("MI2S_DL_HL", "MI2S_RX_HOSTLESS Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("DTMF_DL_HL", "DTMF_RX_HOSTLESS Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_UL_HL",
+		"Quaternary MI2S_TX Hostless Capture",
+		0, 0, 0, 0),
+
+	SND_SOC_DAPM_AIF_IN("PRI_TDM_RX_0_DL_HL",
+		"Primary TDM0 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("PRI_TDM_TX_0_UL_HL",
+		"Primary TDM0 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("PRI_TDM_RX_1_DL_HL",
+		"Primary TDM1 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("PRI_TDM_TX_1_UL_HL",
+		"Primary TDM1 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("PRI_TDM_RX_2_DL_HL",
+		"Primary TDM2 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("PRI_TDM_TX_2_UL_HL",
+		"Primary TDM2 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("PRI_TDM_RX_3_DL_HL",
+		"Primary TDM3 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("PRI_TDM_TX_3_UL_HL",
+		"Primary TDM3 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("PRI_TDM_RX_4_DL_HL",
+		"Primary TDM4 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("PRI_TDM_TX_4_UL_HL",
+		"Primary TDM4 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("PRI_TDM_RX_5_DL_HL",
+		"Primary TDM5 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("PRI_TDM_TX_5_UL_HL",
+		"Primary TDM5 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("PRI_TDM_RX_6_DL_HL",
+		"Primary TDM6 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("PRI_TDM_TX_6_UL_HL",
+		"Primary TDM6 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("PRI_TDM_RX_7_DL_HL",
+		"Primary TDM7 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("PRI_TDM_TX_7_UL_HL",
+		"Primary TDM7 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_0_DL_HL",
+		"Secondary TDM0 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_0_UL_HL",
+		"Secondary TDM0 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_1_DL_HL",
+		"Secondary TDM1 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_1_UL_HL",
+		"Secondary TDM1 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_2_DL_HL",
+		"Secondary TDM2 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_2_UL_HL",
+		"Secondary TDM2 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_3_DL_HL",
+		"Secondary TDM3 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_3_UL_HL",
+		"Secondary TDM3 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_4_DL_HL",
+		"Secondary TDM4 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_4_UL_HL",
+		"Secondary TDM4 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_5_DL_HL",
+		"Secondary TDM5 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_5_UL_HL",
+		"Secondary TDM5 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_6_DL_HL",
+		"Secondary TDM6 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_6_UL_HL",
+		"Secondary TDM6 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_7_DL_HL",
+		"Secondary TDM7 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_7_UL_HL",
+		"Secondary TDM7 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_0_DL_HL",
+		"Tertiary TDM0 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_0_UL_HL",
+		"Tertiary TDM0 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_1_DL_HL",
+		"Tertiary TDM1 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_1_UL_HL",
+		"Tertiary TDM1 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_2_DL_HL",
+		"Tertiary TDM2 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_2_UL_HL",
+		"Tertiary TDM2 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_3_DL_HL",
+		"Tertiary TDM3 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_3_UL_HL",
+		"Tertiary TDM3 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_4_DL_HL",
+		"Tertiary TDM4 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_4_UL_HL",
+		"Tertiary TDM4 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_5_DL_HL",
+		"Tertiary TDM5 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_5_UL_HL",
+		"Tertiary TDM5 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_6_DL_HL",
+		"Tertiary TDM6 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_6_UL_HL",
+		"Tertiary TDM6 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_7_DL_HL",
+		"Tertiary TDM7 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_7_UL_HL",
+		"Tertiary TDM7 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_0_DL_HL",
+		"Quaternary TDM0 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_0_UL_HL",
+		"Quaternary TDM0 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_1_DL_HL",
+		"Quaternary TDM1 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_1_UL_HL",
+		"Quaternary TDM1 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_2_DL_HL",
+		"Quaternary TDM2 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_2_UL_HL",
+		"Quaternary TDM2 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_3_DL_HL",
+		"Quaternary TDM3 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_3_UL_HL",
+		"Quaternary TDM3 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_4_DL_HL",
+		"Quaternary TDM4 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_4_UL_HL",
+		"Quaternary TDM4 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_5_DL_HL",
+		"Quaternary TDM5 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_5_UL_HL",
+		"Quaternary TDM5 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_6_DL_HL",
+		"Quaternary TDM6 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_6_UL_HL",
+		"Quaternary TDM6 Hostless Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_7_DL_HL",
+		"Quaternary TDM7 Hostless Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_7_UL_HL",
+		"Quaternary TDM7 Hostless Capture",
+		0, 0, 0, 0),
+
+	/* LSM */
+	SND_SOC_DAPM_AIF_OUT("LSM1_UL_HL", "Listen 1 Audio Service Capture",
+			     0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("LSM2_UL_HL", "Listen 2 Audio Service Capture",
+			     0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("LSM3_UL_HL", "Listen 3 Audio Service Capture",
+				 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("LSM4_UL_HL", "Listen 4 Audio Service Capture",
+						 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("LSM5_UL_HL", "Listen 5 Audio Service Capture",
+				 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("LSM6_UL_HL", "Listen 6 Audio Service Capture",
+				 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("LSM7_UL_HL", "Listen 7 Audio Service Capture",
+				 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("LSM8_UL_HL", "Listen 8 Audio Service Capture",
+			     0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("QCHAT_DL", "QCHAT Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("QCHAT_UL", "QCHAT Capture", 0, 0, 0, 0),
+	/* Backend AIF */
+	/* Stream name equals to backend dai link stream name
+	*/
+	SND_SOC_DAPM_AIF_OUT("PRI_I2S_RX", "Primary I2S Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SEC_I2S_RX", "Secondary I2S Playback",
+				0, 0, 0 , 0),
+	SND_SOC_DAPM_AIF_OUT("SPDIF_RX", "SPDIF Playback", 0, 0, 0 , 0),
+	SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_RX", "Slimbus Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_RX", "Slimbus2 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_RX", "Slimbus5 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("HDMI", "HDMI Playback", 0, 0, 0 , 0),
+	SND_SOC_DAPM_AIF_OUT("DISPLAY_PORT", "Display Port Playback",
+			     0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("MI2S_RX", "MI2S Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_RX", "Quaternary MI2S Playback",
+						0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("TERT_MI2S_RX", "Tertiary MI2S Playback",
+						0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SEC_MI2S_RX", "Secondary MI2S Playback",
+			     0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SEC_MI2S_RX_SD1",
+			"Secondary MI2S Playback SD1",
+			0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("PRI_MI2S_RX", "Primary MI2S Playback",
+			     0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("INT0_MI2S_RX", "INT0 MI2S Playback",
+			     0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("INT2_MI2S_RX", "INT2 MI2S Playback",
+			     0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("INT3_MI2S_RX", "INT3 MI2S Playback",
+			     0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("INT5_MI2S_RX", "INT5 MI2S Playback",
+			     0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("INT4_MI2S_RX", "INT4 MI2S Playback",
+			     0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("INT4_MI2S_TX", "INT4 MI2S Capture",
+			     0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("QUIN_MI2S_RX", "Quinary MI2S Playback",
+						0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("PRI_I2S_TX", "Primary I2S Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("MI2S_TX", "MI2S Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("QUAT_MI2S_TX", "Quaternary MI2S Capture",
+						0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("PRI_MI2S_TX", "Primary MI2S Capture",
+			    0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("TERT_MI2S_TX", "Tertiary MI2S Capture",
+						0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("INT0_MI2S_TX", "INT0 MI2S Capture",
+						0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("INT2_MI2S_TX", "INT2 MI2S Capture",
+						0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("INT3_MI2S_TX", "INT3 MI2S Capture",
+						0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SEC_MI2S_TX", "Secondary MI2S Capture",
+			    0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SLIMBUS_0_TX", "Slimbus Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SLIMBUS_2_TX", "Slimbus2 Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("QUIN_MI2S_TX", "Quinary MI2S Capture",
+						0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SENARY_MI2S_TX", "Senary MI2S Capture",
+						0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("INT_BT_SCO_RX", "Internal BT-SCO Playback",
+				0, 0, 0 , 0),
+	SND_SOC_DAPM_AIF_IN("INT_BT_SCO_TX", "Internal BT-SCO Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("INT_BT_A2DP_RX", "Internal BT-A2DP Playback",
+				0, 0, 0 , 0),
+	SND_SOC_DAPM_AIF_OUT("INT_FM_RX", "Internal FM Playback",
+				0, 0, 0 , 0),
+	SND_SOC_DAPM_AIF_IN("INT_FM_TX", "Internal FM Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("PCM_RX", "AFE Playback",
+				0, 0, 0 , 0),
+	SND_SOC_DAPM_AIF_IN("PCM_TX", "AFE Capture",
+				0, 0, 0 , 0),
+	SND_SOC_DAPM_AIF_OUT("PRI_TDM_RX_0", "Primary TDM0 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("PRI_TDM_TX_0", "Primary TDM0 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("PRI_TDM_RX_1", "Primary TDM1 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("PRI_TDM_TX_1", "Primary TDM1 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("PRI_TDM_RX_2", "Primary TDM2 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("PRI_TDM_TX_2", "Primary TDM2 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("PRI_TDM_RX_3", "Primary TDM3 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("PRI_TDM_TX_3", "Primary TDM3 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("PRI_TDM_RX_4", "Primary TDM4 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("PRI_TDM_TX_4", "Primary TDM4 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("PRI_TDM_RX_5", "Primary TDM5 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("PRI_TDM_TX_5", "Primary TDM5 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("PRI_TDM_RX_6", "Primary TDM6 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("PRI_TDM_TX_6", "Primary TDM6 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("PRI_TDM_RX_7", "Primary TDM7 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("PRI_TDM_TX_7", "Primary TDM7 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_0", "Secondary TDM0 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_0", "Secondary TDM0 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_1", "Secondary TDM1 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_1", "Secondary TDM1 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_2", "Secondary TDM2 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_2", "Secondary TDM2 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_3", "Secondary TDM3 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_3", "Secondary TDM3 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_4", "Secondary TDM4 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_4", "Secondary TDM4 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_5", "Secondary TDM5 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_5", "Secondary TDM5 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_6", "Secondary TDM6 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_6", "Secondary TDM6 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_7", "Secondary TDM7 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_7", "Secondary TDM7 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_0", "Tertiary TDM0 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_0", "Tertiary TDM0 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_1", "Tertiary TDM1 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_1", "Tertiary TDM1 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_2", "Tertiary TDM2 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_2", "Tertiary TDM2 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_3", "Tertiary TDM3 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_3", "Tertiary TDM3 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_4", "Tertiary TDM4 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_4", "Tertiary TDM4 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_5", "Tertiary TDM5 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_5", "Tertiary TDM5 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_6", "Tertiary TDM6 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_6", "Tertiary TDM6 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_7", "Tertiary TDM7 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_7", "Tertiary TDM7 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_0", "Quaternary TDM0 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_0", "Quaternary TDM0 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_1", "Quaternary TDM1 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_1", "Quaternary TDM1 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_2", "Quaternary TDM2 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_2", "Quaternary TDM2 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_3", "Quaternary TDM3 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_3", "Quaternary TDM3 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_4", "Quaternary TDM4 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_4", "Quaternary TDM4 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_5", "Quaternary TDM5 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_5", "Quaternary TDM5 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_6", "Quaternary TDM6 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_6", "Quaternary TDM6 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_7", "Quaternary TDM7 Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_7", "Quaternary TDM7 Capture",
+				0, 0, 0, 0),
+	/* incall */
+	SND_SOC_DAPM_AIF_OUT("VOICE_PLAYBACK_TX", "Voice Farend Playback",
+				0, 0, 0 , 0),
+	SND_SOC_DAPM_AIF_OUT("VOICE2_PLAYBACK_TX", "Voice2 Farend Playback",
+				0, 0, 0 , 0),
+	SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_RX", "Slimbus4 Playback",
+				0, 0, 0 , 0),
+	SND_SOC_DAPM_AIF_IN("INCALL_RECORD_TX", "Voice Uplink Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("INCALL_RECORD_RX", "Voice Downlink Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SLIMBUS_4_TX", "Slimbus4 Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SENARY_TX", "Senary_mi2s Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("INT5_MI2S_TX", "INT5 MI2S Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SLIMBUS_5_TX", "Slimbus5 Capture", 0, 0, 0, 0),
+
+	SND_SOC_DAPM_AIF_OUT("AUX_PCM_RX", "AUX PCM Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("AUX_PCM_TX", "AUX PCM Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SEC_AUX_PCM_RX", "Sec AUX PCM Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SEC_AUX_PCM_TX", "Sec AUX PCM Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("TERT_AUX_PCM_RX", "Tert AUX PCM Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("TERT_AUX_PCM_TX", "Tert AUX PCM Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("QUAT_AUX_PCM_RX", "Quat AUX PCM Playback",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("QUAT_AUX_PCM_TX", "Quat AUX PCM Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("VOICE_STUB_DL", "VOICE_STUB Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("VOICE_STUB_UL", "VOICE_STUB Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("VOICE2_STUB_DL", "VOICE2_STUB Playback",
+			    0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("VOICE2_STUB_UL", "VOICE2_STUB Capture",
+			    0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("VOLTE_STUB_DL", "VOLTE_STUB Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("VOLTE_STUB_UL", "VOLTE_STUB Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("STUB_RX", "Stub Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("STUB_TX", "Stub Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_RX", "Slimbus1 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SLIMBUS_1_TX", "Slimbus1 Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("STUB_1_TX", "Stub1 Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_RX", "Slimbus3 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SLIMBUS_3_TX", "Slimbus3 Capture", 0, 0, 0, 0),
+	/* In- call recording */
+	SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_RX", "Slimbus6 Playback", 0, 0, 0 , 0),
+	SND_SOC_DAPM_AIF_IN("SLIMBUS_6_TX", "Slimbus6 Capture", 0, 0, 0, 0),
+
+	SND_SOC_DAPM_AIF_OUT("SLIMBUS_7_RX", "Slimbus7 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SLIMBUS_7_TX", "Slimbus7 Capture", 0, 0, 0, 0),
+
+	SND_SOC_DAPM_AIF_OUT("SLIMBUS_8_RX", "Slimbus8 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SLIMBUS_8_TX", "Slimbus8 Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("USB_AUDIO_RX", "USB Audio Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("USB_AUDIO_TX", "USB Audio Capture", 0, 0, 0, 0),
+
+	/* Switch Definitions */
+	SND_SOC_DAPM_SWITCH("SLIMBUS_DL_HL", SND_SOC_NOPM, 0, 0,
+				&slim_fm_switch_mixer_controls),
+	SND_SOC_DAPM_SWITCH("SLIMBUS1_DL_HL", SND_SOC_NOPM, 0, 0,
+				&slim1_fm_switch_mixer_controls),
+	SND_SOC_DAPM_SWITCH("SLIMBUS3_DL_HL", SND_SOC_NOPM, 0, 0,
+				&slim3_fm_switch_mixer_controls),
+	SND_SOC_DAPM_SWITCH("SLIMBUS4_DL_HL", SND_SOC_NOPM, 0, 0,
+				&slim4_fm_switch_mixer_controls),
+	SND_SOC_DAPM_SWITCH("SLIMBUS6_DL_HL", SND_SOC_NOPM, 0, 0,
+				&slim6_fm_switch_mixer_controls),
+	SND_SOC_DAPM_SWITCH("PCM_RX_DL_HL", SND_SOC_NOPM, 0, 0,
+				&pcm_rx_switch_mixer_controls),
+	SND_SOC_DAPM_SWITCH("INT0_MI2S_RX_DL_HL", SND_SOC_NOPM, 0, 0,
+				&int0_mi2s_rx_switch_mixer_controls),
+	SND_SOC_DAPM_SWITCH("INT4_MI2S_RX_DL_HL", SND_SOC_NOPM, 0, 0,
+				&int4_mi2s_rx_switch_mixer_controls),
+	SND_SOC_DAPM_SWITCH("PRI_MI2S_RX_DL_HL", SND_SOC_NOPM, 0, 0,
+				&pri_mi2s_rx_switch_mixer_controls),
+	SND_SOC_DAPM_SWITCH("SEC_MI2S_RX_DL_HL", SND_SOC_NOPM, 0, 0,
+				&sec_mi2s_rx_switch_mixer_controls),
+	SND_SOC_DAPM_SWITCH("TERT_MI2S_RX_DL_HL", SND_SOC_NOPM, 0, 0,
+				&tert_mi2s_rx_switch_mixer_controls),
+	SND_SOC_DAPM_SWITCH("QUAT_MI2S_RX_DL_HL", SND_SOC_NOPM, 0, 0,
+				&quat_mi2s_rx_switch_mixer_controls),
+	SND_SOC_DAPM_SWITCH("HFP_PRI_AUX_UL_HL", SND_SOC_NOPM, 0, 0,
+				&hfp_pri_aux_switch_mixer_controls),
+	SND_SOC_DAPM_SWITCH("HFP_AUX_UL_HL", SND_SOC_NOPM, 0, 0,
+				&hfp_aux_switch_mixer_controls),
+	SND_SOC_DAPM_SWITCH("HFP_INT_UL_HL", SND_SOC_NOPM, 0, 0,
+				&hfp_int_switch_mixer_controls),
+	SND_SOC_DAPM_SWITCH("HFP_SLIM7_UL_HL", SND_SOC_NOPM, 0, 0,
+				&hfp_slim7_switch_mixer_controls),
+	SND_SOC_DAPM_SWITCH("USB_DL_HL", SND_SOC_NOPM, 0, 0,
+				&usb_switch_mixer_controls),
+
+	/* Mixer definitions */
+	SND_SOC_DAPM_MIXER("PRI_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+	pri_i2s_rx_mixer_controls, ARRAY_SIZE(pri_i2s_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SEC_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+	sec_i2s_rx_mixer_controls, ARRAY_SIZE(sec_i2s_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SLIMBUS_0_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+	slimbus_rx_mixer_controls, ARRAY_SIZE(slimbus_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SLIMBUS_2_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+	slimbus_2_rx_mixer_controls, ARRAY_SIZE(slimbus_2_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SLIMBUS_5_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+	slimbus_5_rx_mixer_controls, ARRAY_SIZE(slimbus_5_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SLIMBUS_7_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+	slimbus_7_rx_mixer_controls, ARRAY_SIZE(slimbus_7_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("HDMI Mixer", SND_SOC_NOPM, 0, 0,
+	hdmi_mixer_controls, ARRAY_SIZE(hdmi_mixer_controls)),
+	SND_SOC_DAPM_MIXER("DISPLAY_PORT Mixer", SND_SOC_NOPM, 0, 0,
+	display_port_mixer_controls, ARRAY_SIZE(display_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SPDIF_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+	spdif_rx_mixer_controls, ARRAY_SIZE(spdif_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+	mi2s_rx_mixer_controls, ARRAY_SIZE(mi2s_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("QUAT_MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+				quaternary_mi2s_rx_mixer_controls,
+				ARRAY_SIZE(quaternary_mi2s_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("TERT_MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+				tertiary_mi2s_rx_mixer_controls,
+				ARRAY_SIZE(tertiary_mi2s_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SEC_MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+			   secondary_mi2s_rx_mixer_controls,
+			   ARRAY_SIZE(secondary_mi2s_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SEC_MI2S_RX_SD1 Audio Mixer", SND_SOC_NOPM, 0, 0,
+			   secondary_mi2s_rx2_mixer_controls,
+			   ARRAY_SIZE(secondary_mi2s_rx2_mixer_controls)),
+	SND_SOC_DAPM_MIXER("PRI_MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+			   primary_mi2s_rx_mixer_controls,
+			   ARRAY_SIZE(primary_mi2s_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("INT0_MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+			   int0_mi2s_rx_mixer_controls,
+			   ARRAY_SIZE(int0_mi2s_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("INT4_MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+			   int4_mi2s_rx_mixer_controls,
+			   ARRAY_SIZE(int4_mi2s_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("QUIN_MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+				quinary_mi2s_rx_mixer_controls,
+				ARRAY_SIZE(quinary_mi2s_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("PRI_TDM_RX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
+				pri_tdm_rx_0_mixer_controls,
+				ARRAY_SIZE(pri_tdm_rx_0_mixer_controls)),
+	SND_SOC_DAPM_MIXER("PRI_TDM_RX_1 Audio Mixer", SND_SOC_NOPM, 0, 0,
+				pri_tdm_rx_1_mixer_controls,
+				ARRAY_SIZE(pri_tdm_rx_1_mixer_controls)),
+	SND_SOC_DAPM_MIXER("PRI_TDM_RX_2 Audio Mixer", SND_SOC_NOPM, 0, 0,
+				pri_tdm_rx_2_mixer_controls,
+				ARRAY_SIZE(pri_tdm_rx_2_mixer_controls)),
+	SND_SOC_DAPM_MIXER("PRI_TDM_RX_3 Audio Mixer", SND_SOC_NOPM, 0, 0,
+				pri_tdm_rx_3_mixer_controls,
+				ARRAY_SIZE(pri_tdm_rx_3_mixer_controls)),
+	SND_SOC_DAPM_MIXER("PRI_TDM_TX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
+				pri_tdm_tx_0_mixer_controls,
+				ARRAY_SIZE(pri_tdm_tx_0_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SEC_TDM_RX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
+				sec_tdm_rx_0_mixer_controls,
+				ARRAY_SIZE(sec_tdm_rx_0_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SEC_TDM_RX_1 Audio Mixer", SND_SOC_NOPM, 0, 0,
+				sec_tdm_rx_1_mixer_controls,
+				ARRAY_SIZE(sec_tdm_rx_1_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SEC_TDM_RX_2 Audio Mixer", SND_SOC_NOPM, 0, 0,
+				sec_tdm_rx_2_mixer_controls,
+				ARRAY_SIZE(sec_tdm_rx_2_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SEC_TDM_RX_3 Audio Mixer", SND_SOC_NOPM, 0, 0,
+				sec_tdm_rx_3_mixer_controls,
+				ARRAY_SIZE(sec_tdm_rx_3_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SEC_TDM_TX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
+				sec_tdm_tx_0_mixer_controls,
+				ARRAY_SIZE(sec_tdm_tx_0_mixer_controls)),
+	SND_SOC_DAPM_MIXER("TERT_TDM_RX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
+				tert_tdm_rx_0_mixer_controls,
+				ARRAY_SIZE(tert_tdm_rx_0_mixer_controls)),
+	SND_SOC_DAPM_MIXER("TERT_TDM_TX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
+				tert_tdm_tx_0_mixer_controls,
+				ARRAY_SIZE(tert_tdm_tx_0_mixer_controls)),
+	SND_SOC_DAPM_MIXER("TERT_TDM_RX_1 Audio Mixer", SND_SOC_NOPM, 0, 0,
+				tert_tdm_rx_1_mixer_controls,
+				ARRAY_SIZE(tert_tdm_rx_1_mixer_controls)),
+	SND_SOC_DAPM_MIXER("TERT_TDM_RX_2 Audio Mixer", SND_SOC_NOPM, 0, 0,
+				tert_tdm_rx_2_mixer_controls,
+				ARRAY_SIZE(tert_tdm_rx_2_mixer_controls)),
+	SND_SOC_DAPM_MIXER("TERT_TDM_RX_3 Audio Mixer", SND_SOC_NOPM, 0, 0,
+				tert_tdm_rx_3_mixer_controls,
+				ARRAY_SIZE(tert_tdm_rx_3_mixer_controls)),
+	SND_SOC_DAPM_MIXER("TERT_TDM_RX_4 Audio Mixer", SND_SOC_NOPM, 0, 0,
+				tert_tdm_rx_4_mixer_controls,
+				ARRAY_SIZE(tert_tdm_rx_4_mixer_controls)),
+	SND_SOC_DAPM_MIXER("QUAT_TDM_RX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
+				quat_tdm_rx_0_mixer_controls,
+				ARRAY_SIZE(quat_tdm_rx_0_mixer_controls)),
+	SND_SOC_DAPM_MIXER("QUAT_TDM_TX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
+				quat_tdm_tx_0_mixer_controls,
+				ARRAY_SIZE(quat_tdm_tx_0_mixer_controls)),
+	SND_SOC_DAPM_MIXER("QUAT_TDM_RX_1 Audio Mixer", SND_SOC_NOPM, 0, 0,
+				quat_tdm_rx_1_mixer_controls,
+				ARRAY_SIZE(quat_tdm_rx_1_mixer_controls)),
+	SND_SOC_DAPM_MIXER("QUAT_TDM_RX_2 Audio Mixer", SND_SOC_NOPM, 0, 0,
+				quat_tdm_rx_2_mixer_controls,
+				ARRAY_SIZE(quat_tdm_rx_2_mixer_controls)),
+	SND_SOC_DAPM_MIXER("QUAT_TDM_RX_3 Audio Mixer", SND_SOC_NOPM, 0, 0,
+				quat_tdm_rx_3_mixer_controls,
+				ARRAY_SIZE(quat_tdm_rx_3_mixer_controls)),
+	SND_SOC_DAPM_MIXER("MultiMedia1 Mixer", SND_SOC_NOPM, 0, 0,
+	mmul1_mixer_controls, ARRAY_SIZE(mmul1_mixer_controls)),
+	SND_SOC_DAPM_MIXER("MultiMedia2 Mixer", SND_SOC_NOPM, 0, 0,
+	mmul2_mixer_controls, ARRAY_SIZE(mmul2_mixer_controls)),
+	SND_SOC_DAPM_MIXER("MultiMedia3 Mixer", SND_SOC_NOPM, 0, 0,
+	mmul3_mixer_controls, ARRAY_SIZE(mmul3_mixer_controls)),
+	SND_SOC_DAPM_MIXER("MultiMedia4 Mixer", SND_SOC_NOPM, 0, 0,
+	mmul4_mixer_controls, ARRAY_SIZE(mmul4_mixer_controls)),
+	SND_SOC_DAPM_MIXER("MultiMedia5 Mixer", SND_SOC_NOPM, 0, 0,
+	mmul5_mixer_controls, ARRAY_SIZE(mmul5_mixer_controls)),
+	SND_SOC_DAPM_MIXER("MultiMedia6 Mixer", SND_SOC_NOPM, 0, 0,
+	mmul6_mixer_controls, ARRAY_SIZE(mmul6_mixer_controls)),
+	SND_SOC_DAPM_MIXER("MultiMedia8 Mixer", SND_SOC_NOPM, 0, 0,
+	mmul8_mixer_controls, ARRAY_SIZE(mmul8_mixer_controls)),
+	SND_SOC_DAPM_MIXER("MultiMedia9 Mixer", SND_SOC_NOPM, 0, 0,
+	mmul9_mixer_controls, ARRAY_SIZE(mmul9_mixer_controls)),
+	SND_SOC_DAPM_MIXER("MultiMedia16 Mixer", SND_SOC_NOPM, 0, 0,
+	mmul16_mixer_controls, ARRAY_SIZE(mmul16_mixer_controls)),
+	SND_SOC_DAPM_MIXER("MultiMedia17 Mixer", SND_SOC_NOPM, 0, 0,
+	mmul17_mixer_controls, ARRAY_SIZE(mmul17_mixer_controls)),
+	SND_SOC_DAPM_MIXER("MultiMedia18 Mixer", SND_SOC_NOPM, 0, 0,
+	mmul18_mixer_controls, ARRAY_SIZE(mmul18_mixer_controls)),
+	SND_SOC_DAPM_MIXER("MultiMedia19 Mixer", SND_SOC_NOPM, 0, 0,
+	mmul19_mixer_controls, ARRAY_SIZE(mmul19_mixer_controls)),
+	SND_SOC_DAPM_MIXER("MultiMedia20 Mixer", SND_SOC_NOPM, 0, 0,
+	mmul20_mixer_controls, ARRAY_SIZE(mmul20_mixer_controls)),
+	SND_SOC_DAPM_MIXER("MultiMedia21 Mixer", SND_SOC_NOPM, 0, 0,
+	mmul21_mixer_controls, ARRAY_SIZE(mmul21_mixer_controls)),
+	SND_SOC_DAPM_MIXER("MultiMedia27 Mixer", SND_SOC_NOPM, 0, 0,
+	mmul27_mixer_controls, ARRAY_SIZE(mmul27_mixer_controls)),
+	SND_SOC_DAPM_MIXER("AUX_PCM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+	auxpcm_rx_mixer_controls, ARRAY_SIZE(auxpcm_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SEC_AUX_PCM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+	sec_auxpcm_rx_mixer_controls, ARRAY_SIZE(sec_auxpcm_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("TERT_AUX_PCM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+	tert_auxpcm_rx_mixer_controls,
+	ARRAY_SIZE(tert_auxpcm_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("QUAT_AUX_PCM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+	quat_auxpcm_rx_mixer_controls,
+	ARRAY_SIZE(quat_auxpcm_rx_mixer_controls)),
+	/* incall */
+	SND_SOC_DAPM_MIXER("Incall_Music Audio Mixer", SND_SOC_NOPM, 0, 0,
+	incall_music_delivery_mixer_controls,
+	ARRAY_SIZE(incall_music_delivery_mixer_controls)),
+	SND_SOC_DAPM_MIXER("Incall_Music_2 Audio Mixer", SND_SOC_NOPM, 0, 0,
+	incall_music2_delivery_mixer_controls,
+	ARRAY_SIZE(incall_music2_delivery_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SLIMBUS_4_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+	slimbus_4_rx_mixer_controls,
+	ARRAY_SIZE(slimbus_4_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SLIMBUS_6_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+	slimbus_6_rx_mixer_controls,
+	ARRAY_SIZE(slimbus_6_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("USB_AUDIO_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+	usb_audio_rx_mixer_controls,
+	ARRAY_SIZE(usb_audio_rx_mixer_controls)),
+	/* Voice Mixer */
+	SND_SOC_DAPM_MIXER("PRI_RX_Voice Mixer",
+				SND_SOC_NOPM, 0, 0, pri_rx_voice_mixer_controls,
+				ARRAY_SIZE(pri_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SEC_RX_Voice Mixer",
+				SND_SOC_NOPM, 0, 0,
+				sec_i2s_rx_voice_mixer_controls,
+				ARRAY_SIZE(sec_i2s_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SEC_MI2S_RX_Voice Mixer",
+				SND_SOC_NOPM, 0, 0,
+				sec_mi2s_rx_voice_mixer_controls,
+				ARRAY_SIZE(sec_mi2s_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SLIM_0_RX_Voice Mixer",
+				SND_SOC_NOPM, 0, 0,
+				slimbus_rx_voice_mixer_controls,
+				ARRAY_SIZE(slimbus_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("INTERNAL_BT_SCO_RX_Voice Mixer",
+				SND_SOC_NOPM, 0, 0,
+				bt_sco_rx_voice_mixer_controls,
+				ARRAY_SIZE(bt_sco_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("AFE_PCM_RX_Voice Mixer",
+				SND_SOC_NOPM, 0, 0,
+				afe_pcm_rx_voice_mixer_controls,
+				ARRAY_SIZE(afe_pcm_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("AUX_PCM_RX_Voice Mixer",
+				SND_SOC_NOPM, 0, 0,
+				aux_pcm_rx_voice_mixer_controls,
+				ARRAY_SIZE(aux_pcm_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SEC_AUX_PCM_RX_Voice Mixer",
+			      SND_SOC_NOPM, 0, 0,
+			      sec_aux_pcm_rx_voice_mixer_controls,
+			      ARRAY_SIZE(sec_aux_pcm_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("TERT_AUX_PCM_RX_Voice Mixer",
+			      SND_SOC_NOPM, 0, 0,
+			      tert_aux_pcm_rx_voice_mixer_controls,
+			      ARRAY_SIZE(tert_aux_pcm_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("QUAT_AUX_PCM_RX_Voice Mixer",
+			      SND_SOC_NOPM, 0, 0,
+			      quat_aux_pcm_rx_voice_mixer_controls,
+			      ARRAY_SIZE(quat_aux_pcm_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("HDMI_RX_Voice Mixer",
+				SND_SOC_NOPM, 0, 0,
+				hdmi_rx_voice_mixer_controls,
+				ARRAY_SIZE(hdmi_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("MI2S_RX_Voice Mixer",
+				SND_SOC_NOPM, 0, 0,
+				mi2s_rx_voice_mixer_controls,
+				ARRAY_SIZE(mi2s_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("PRI_MI2S_RX_Voice Mixer",
+				SND_SOC_NOPM, 0, 0,
+				pri_mi2s_rx_voice_mixer_controls,
+				ARRAY_SIZE(pri_mi2s_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("INT0_MI2S_RX_Voice Mixer",
+				SND_SOC_NOPM, 0, 0,
+				int0_mi2s_rx_voice_mixer_controls,
+				ARRAY_SIZE(int0_mi2s_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("INT4_MI2S_RX_Voice Mixer",
+				SND_SOC_NOPM, 0, 0,
+				int4_mi2s_rx_voice_mixer_controls,
+				ARRAY_SIZE(int4_mi2s_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("TERT_MI2S_RX_Voice Mixer",
+				SND_SOC_NOPM, 0, 0,
+				tert_mi2s_rx_voice_mixer_controls,
+				ARRAY_SIZE(tert_mi2s_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("QUAT_MI2S_RX_Voice Mixer",
+				SND_SOC_NOPM, 0, 0,
+				quat_mi2s_rx_voice_mixer_controls,
+				ARRAY_SIZE(quat_mi2s_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("QUIN_MI2S_RX_Voice Mixer",
+				SND_SOC_NOPM, 0, 0,
+				quin_mi2s_rx_voice_mixer_controls,
+				ARRAY_SIZE(quin_mi2s_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("QUAT_TDM_RX_2_Voice Mixer",
+				SND_SOC_NOPM, 0, 0,
+				quat_tdm_rx_2_voice_mixer_controls,
+				ARRAY_SIZE(quat_tdm_rx_2_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("Voice_Tx Mixer",
+				SND_SOC_NOPM, 0, 0, tx_voice_mixer_controls,
+				ARRAY_SIZE(tx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("Voice2_Tx Mixer",
+			   SND_SOC_NOPM, 0, 0, tx_voice2_mixer_controls,
+			   ARRAY_SIZE(tx_voice2_mixer_controls)),
+	SND_SOC_DAPM_MIXER("Voip_Tx Mixer",
+				SND_SOC_NOPM, 0, 0, tx_voip_mixer_controls,
+				ARRAY_SIZE(tx_voip_mixer_controls)),
+	SND_SOC_DAPM_MIXER("VoLTE_Tx Mixer",
+				SND_SOC_NOPM, 0, 0, tx_volte_mixer_controls,
+				ARRAY_SIZE(tx_volte_mixer_controls)),
+	SND_SOC_DAPM_MIXER("VoWLAN_Tx Mixer",
+				SND_SOC_NOPM, 0, 0, tx_vowlan_mixer_controls,
+				ARRAY_SIZE(tx_vowlan_mixer_controls)),
+	SND_SOC_DAPM_MIXER("VoiceMMode1_Tx Mixer",
+			   SND_SOC_NOPM, 0, 0, tx_voicemmode1_mixer_controls,
+			   ARRAY_SIZE(tx_voicemmode1_mixer_controls)),
+	SND_SOC_DAPM_MIXER("VoiceMMode2_Tx Mixer",
+			   SND_SOC_NOPM, 0, 0, tx_voicemmode2_mixer_controls,
+			   ARRAY_SIZE(tx_voicemmode2_mixer_controls)),
+	SND_SOC_DAPM_MIXER("INTERNAL_BT_SCO_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+	int_bt_sco_rx_mixer_controls, ARRAY_SIZE(int_bt_sco_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("INTERNAL_A2DP_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+			int_bt_a2dp_rx_mixer_controls,
+			ARRAY_SIZE(int_bt_a2dp_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("INTERNAL_FM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+	int_fm_rx_mixer_controls, ARRAY_SIZE(int_fm_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("AFE_PCM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+	afe_pcm_rx_mixer_controls, ARRAY_SIZE(afe_pcm_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("Voice Stub Tx Mixer", SND_SOC_NOPM, 0, 0,
+	tx_voice_stub_mixer_controls, ARRAY_SIZE(tx_voice_stub_mixer_controls)),
+	SND_SOC_DAPM_MIXER("Voice2 Stub Tx Mixer", SND_SOC_NOPM, 0, 0,
+			   tx_voice2_stub_mixer_controls,
+			   ARRAY_SIZE(tx_voice2_stub_mixer_controls)),
+	SND_SOC_DAPM_MIXER("VoLTE Stub Tx Mixer", SND_SOC_NOPM, 0, 0,
+	tx_volte_stub_mixer_controls, ARRAY_SIZE(tx_volte_stub_mixer_controls)),
+	SND_SOC_DAPM_MIXER("STUB_RX Mixer", SND_SOC_NOPM, 0, 0,
+	stub_rx_mixer_controls, ARRAY_SIZE(stub_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SLIMBUS_1_RX Mixer", SND_SOC_NOPM, 0, 0,
+	slimbus_1_rx_mixer_controls, ARRAY_SIZE(slimbus_1_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SLIMBUS_3_RX_Voice Mixer", SND_SOC_NOPM, 0, 0,
+	slimbus_3_rx_mixer_controls, ARRAY_SIZE(slimbus_3_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SLIM_6_RX_Voice Mixer",
+			SND_SOC_NOPM, 0, 0,
+			slimbus_6_rx_voice_mixer_controls,
+			ARRAY_SIZE(slimbus_6_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SLIM_7_RX_Voice Mixer", SND_SOC_NOPM, 0, 0,
+			   slimbus_7_rx_voice_mixer_controls,
+			   ARRAY_SIZE(slimbus_7_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SLIM_8_RX_Voice Mixer", SND_SOC_NOPM, 0, 0,
+			   slimbus_8_rx_voice_mixer_controls,
+			   ARRAY_SIZE(slimbus_8_rx_voice_mixer_controls)),
+	/* port mixer */
+	SND_SOC_DAPM_MIXER("SLIMBUS_0_RX Port Mixer",
+	SND_SOC_NOPM, 0, 0, sbus_0_rx_port_mixer_controls,
+	ARRAY_SIZE(sbus_0_rx_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("AUX_PCM_RX Port Mixer",
+	SND_SOC_NOPM, 0, 0, aux_pcm_rx_port_mixer_controls,
+	ARRAY_SIZE(aux_pcm_rx_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SEC_AUXPCM_RX Port Mixer",
+	SND_SOC_NOPM, 0, 0, sec_auxpcm_rx_port_mixer_controls,
+	ARRAY_SIZE(sec_auxpcm_rx_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("TERT_AUXPCM_RX Port Mixer",
+	SND_SOC_NOPM, 0, 0, tert_auxpcm_rx_port_mixer_controls,
+	ARRAY_SIZE(tert_auxpcm_rx_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("QUAT_AUXPCM_RX Port Mixer",
+	SND_SOC_NOPM, 0, 0, quat_auxpcm_rx_port_mixer_controls,
+	ARRAY_SIZE(quat_auxpcm_rx_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SLIMBUS_1_RX Port Mixer", SND_SOC_NOPM, 0, 0,
+	sbus_1_rx_port_mixer_controls,
+	ARRAY_SIZE(sbus_1_rx_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("INTERNAL_BT_SCO_RX Port Mixer", SND_SOC_NOPM, 0, 0,
+	bt_sco_rx_port_mixer_controls,
+	ARRAY_SIZE(bt_sco_rx_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("AFE_PCM_RX Port Mixer",
+	SND_SOC_NOPM, 0, 0, afe_pcm_rx_port_mixer_controls,
+	ARRAY_SIZE(afe_pcm_rx_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("HDMI_RX Port Mixer",
+	SND_SOC_NOPM, 0, 0, hdmi_rx_port_mixer_controls,
+	ARRAY_SIZE(hdmi_rx_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("DISPLAY_PORT_RX Port Mixer",
+	SND_SOC_NOPM, 0, 0, display_port_rx_port_mixer_controls,
+	ARRAY_SIZE(display_port_rx_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SEC_I2S_RX Port Mixer",
+	SND_SOC_NOPM, 0, 0, sec_i2s_rx_port_mixer_controls,
+	ARRAY_SIZE(sec_i2s_rx_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SLIMBUS_3_RX Port Mixer",
+	SND_SOC_NOPM, 0, 0, sbus_3_rx_port_mixer_controls,
+	ARRAY_SIZE(sbus_3_rx_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SLIMBUS_6_RX Port Mixer",
+	SND_SOC_NOPM, 0, 0, sbus_6_rx_port_mixer_controls,
+	ARRAY_SIZE(sbus_6_rx_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("MI2S_RX Port Mixer", SND_SOC_NOPM, 0, 0,
+	mi2s_rx_port_mixer_controls, ARRAY_SIZE(mi2s_rx_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("PRI_MI2S_RX Port Mixer", SND_SOC_NOPM, 0, 0,
+	primary_mi2s_rx_port_mixer_controls,
+	ARRAY_SIZE(primary_mi2s_rx_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SEC_MI2S_RX Port Mixer", SND_SOC_NOPM, 0, 0,
+	sec_mi2s_rx_port_mixer_controls,
+	ARRAY_SIZE(sec_mi2s_rx_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("TERT_MI2S_RX Port Mixer", SND_SOC_NOPM, 0, 0,
+	tert_mi2s_rx_port_mixer_controls,
+	ARRAY_SIZE(tert_mi2s_rx_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("QUAT_MI2S_RX Port Mixer", SND_SOC_NOPM, 0, 0,
+	quat_mi2s_rx_port_mixer_controls,
+	ARRAY_SIZE(quat_mi2s_rx_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("PRI_TDM_RX_0 Port Mixer", SND_SOC_NOPM, 0, 0,
+	pri_tdm_rx_0_port_mixer_controls,
+	ARRAY_SIZE(pri_tdm_rx_0_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("PRI_TDM_RX_1 Port Mixer", SND_SOC_NOPM, 0, 0,
+	pri_tdm_rx_1_port_mixer_controls,
+	ARRAY_SIZE(pri_tdm_rx_1_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("PRI_TDM_RX_2 Port Mixer", SND_SOC_NOPM, 0, 0,
+	pri_tdm_rx_2_port_mixer_controls,
+	ARRAY_SIZE(pri_tdm_rx_2_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("PRI_TDM_RX_3 Port Mixer", SND_SOC_NOPM, 0, 0,
+	pri_tdm_rx_3_port_mixer_controls,
+	ARRAY_SIZE(pri_tdm_rx_3_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SEC_TDM_RX_0 Port Mixer", SND_SOC_NOPM, 0, 0,
+	sec_tdm_rx_0_port_mixer_controls,
+	ARRAY_SIZE(sec_tdm_rx_0_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SEC_TDM_RX_1 Port Mixer", SND_SOC_NOPM, 0, 0,
+	sec_tdm_rx_1_port_mixer_controls,
+	ARRAY_SIZE(sec_tdm_rx_1_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SEC_TDM_RX_2 Port Mixer", SND_SOC_NOPM, 0, 0,
+	sec_tdm_rx_2_port_mixer_controls,
+	ARRAY_SIZE(sec_tdm_rx_2_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SEC_TDM_RX_3 Port Mixer", SND_SOC_NOPM, 0, 0,
+	sec_tdm_rx_3_port_mixer_controls,
+	ARRAY_SIZE(sec_tdm_rx_3_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("TERT_TDM_RX_0 Port Mixer", SND_SOC_NOPM, 0, 0,
+	tert_tdm_rx_0_port_mixer_controls,
+	ARRAY_SIZE(tert_tdm_rx_0_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("TERT_TDM_RX_1 Port Mixer", SND_SOC_NOPM, 0, 0,
+	tert_tdm_rx_1_port_mixer_controls,
+	ARRAY_SIZE(tert_tdm_rx_1_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("TERT_TDM_RX_2 Port Mixer", SND_SOC_NOPM, 0, 0,
+	tert_tdm_rx_2_port_mixer_controls,
+	ARRAY_SIZE(tert_tdm_rx_2_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("TERT_TDM_RX_3 Port Mixer", SND_SOC_NOPM, 0, 0,
+	tert_tdm_rx_3_port_mixer_controls,
+	ARRAY_SIZE(tert_tdm_rx_3_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("QUAT_TDM_RX_0 Port Mixer", SND_SOC_NOPM, 0, 0,
+	quat_tdm_rx_0_port_mixer_controls,
+	ARRAY_SIZE(quat_tdm_rx_0_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("QUAT_TDM_RX_1 Port Mixer", SND_SOC_NOPM, 0, 0,
+	quat_tdm_rx_1_port_mixer_controls,
+	ARRAY_SIZE(quat_tdm_rx_1_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("QUAT_TDM_RX_2 Port Mixer", SND_SOC_NOPM, 0, 0,
+	quat_tdm_rx_2_port_mixer_controls,
+	ARRAY_SIZE(quat_tdm_rx_2_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("QUAT_TDM_RX_3 Port Mixer", SND_SOC_NOPM, 0, 0,
+	quat_tdm_rx_3_port_mixer_controls,
+	ARRAY_SIZE(quat_tdm_rx_3_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("INT0_MI2S_RX Port Mixer", SND_SOC_NOPM, 0, 0,
+	int0_mi2s_rx_port_mixer_controls,
+	ARRAY_SIZE(int0_mi2s_rx_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("INT4_MI2S_RX Port Mixer", SND_SOC_NOPM, 0, 0,
+	int4_mi2s_rx_port_mixer_controls,
+	ARRAY_SIZE(int4_mi2s_rx_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("QCHAT_Tx Mixer",
+	SND_SOC_NOPM, 0, 0, tx_qchat_mixer_controls,
+	ARRAY_SIZE(tx_qchat_mixer_controls)),
+	SND_SOC_DAPM_MIXER("USB_AUDIO_RX_Voice Mixer",
+	SND_SOC_NOPM, 0, 0, usb_audio_rx_voice_mixer_controls,
+	ARRAY_SIZE(usb_audio_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("USB_AUDIO_RX Port Mixer",
+	SND_SOC_NOPM, 0, 0, usb_rx_port_mixer_controls,
+	ARRAY_SIZE(usb_rx_port_mixer_controls)),
+	/* lsm mixer definitions */
+	SND_SOC_DAPM_MIXER("LSM1 Mixer", SND_SOC_NOPM, 0, 0,
+	lsm1_mixer_controls, ARRAY_SIZE(lsm1_mixer_controls)),
+	SND_SOC_DAPM_MIXER("LSM2 Mixer", SND_SOC_NOPM, 0, 0,
+	lsm2_mixer_controls, ARRAY_SIZE(lsm2_mixer_controls)),
+	SND_SOC_DAPM_MIXER("LSM3 Mixer", SND_SOC_NOPM, 0, 0,
+	lsm3_mixer_controls, ARRAY_SIZE(lsm3_mixer_controls)),
+	SND_SOC_DAPM_MIXER("LSM4 Mixer", SND_SOC_NOPM, 0, 0,
+	lsm4_mixer_controls, ARRAY_SIZE(lsm4_mixer_controls)),
+	SND_SOC_DAPM_MIXER("LSM5 Mixer", SND_SOC_NOPM, 0, 0,
+	lsm5_mixer_controls, ARRAY_SIZE(lsm5_mixer_controls)),
+	SND_SOC_DAPM_MIXER("LSM6 Mixer", SND_SOC_NOPM, 0, 0,
+	lsm6_mixer_controls, ARRAY_SIZE(lsm6_mixer_controls)),
+	SND_SOC_DAPM_MIXER("LSM7 Mixer", SND_SOC_NOPM, 0, 0,
+	lsm7_mixer_controls, ARRAY_SIZE(lsm7_mixer_controls)),
+	SND_SOC_DAPM_MIXER("LSM8 Mixer", SND_SOC_NOPM, 0, 0,
+	lsm8_mixer_controls, ARRAY_SIZE(lsm8_mixer_controls)),
+	/* Virtual Pins to force backends ON atm */
+	SND_SOC_DAPM_OUTPUT("BE_OUT"),
+	SND_SOC_DAPM_INPUT("BE_IN"),
+
+	SND_SOC_DAPM_MUX("SLIM0_RX_VI_FB_LCH_MUX", SND_SOC_NOPM, 0, 0,
+				&slim0_rx_vi_fb_lch_mux),
+	SND_SOC_DAPM_MUX("SLIM0_RX_VI_FB_RCH_MUX", SND_SOC_NOPM, 0, 0,
+				&slim0_rx_vi_fb_rch_mux),
+	SND_SOC_DAPM_MUX("PRI_MI2S_RX_VI_FB_MUX", SND_SOC_NOPM, 0, 0,
+				&mi2s_rx_vi_fb_mux),
+	SND_SOC_DAPM_MUX("INT4_MI2S_RX_VI_FB_MONO_CH_MUX", SND_SOC_NOPM, 0, 0,
+				&int4_mi2s_rx_vi_fb_mono_ch_mux),
+	SND_SOC_DAPM_MUX("INT4_MI2S_RX_VI_FB_STEREO_CH_MUX", SND_SOC_NOPM, 0, 0,
+				&int4_mi2s_rx_vi_fb_stereo_ch_mux),
+
+	SND_SOC_DAPM_MUX("VOC_EXT_EC MUX", SND_SOC_NOPM, 0, 0,
+			 &voc_ext_ec_mux),
+	SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL1 MUX", SND_SOC_NOPM, 0, 0,
+		&ext_ec_ref_mux_ul1),
+	SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL2 MUX", SND_SOC_NOPM, 0, 0,
+		&ext_ec_ref_mux_ul2),
+	SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL3 MUX", SND_SOC_NOPM, 0, 0,
+		&ext_ec_ref_mux_ul3),
+	SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL4 MUX", SND_SOC_NOPM, 0, 0,
+		&ext_ec_ref_mux_ul4),
+	SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL5 MUX", SND_SOC_NOPM, 0, 0,
+		&ext_ec_ref_mux_ul5),
+	SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL6 MUX", SND_SOC_NOPM, 0, 0,
+		&ext_ec_ref_mux_ul6),
+	SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL8 MUX", SND_SOC_NOPM, 0, 0,
+		&ext_ec_ref_mux_ul8),
+	SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL9 MUX", SND_SOC_NOPM, 0, 0,
+		&ext_ec_ref_mux_ul9),
+	SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL16 MUX", SND_SOC_NOPM, 0, 0,
+		&ext_ec_ref_mux_ul16),
+	SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL17 MUX", SND_SOC_NOPM, 0, 0,
+		&ext_ec_ref_mux_ul17),
+	SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL18 MUX", SND_SOC_NOPM, 0, 0,
+		&ext_ec_ref_mux_ul18),
+	SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL19 MUX", SND_SOC_NOPM, 0, 0,
+		&ext_ec_ref_mux_ul19),
+};
+
+static const struct snd_soc_dapm_route intercon[] = {
+	{"PRI_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"PRI_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"PRI_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"PRI_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"PRI_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"PRI_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"PRI_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"PRI_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"PRI_RX Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"PRI_RX Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"PRI_RX Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"PRI_RX Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"PRI_RX Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"PRI_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"PRI_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"PRI_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"PRI_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"PRI_I2S_RX", NULL, "PRI_RX Audio Mixer"},
+
+	{"SEC_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"SEC_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"SEC_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"SEC_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"SEC_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"SEC_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"SEC_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"SEC_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"SEC_RX Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"SEC_RX Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"SEC_RX Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"SEC_RX Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"SEC_RX Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"SEC_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"SEC_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"SEC_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"SEC_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"SEC_I2S_RX", NULL, "SEC_RX Audio Mixer"},
+
+	{"SLIMBUS_0_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"SLIMBUS_0_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"SLIMBUS_0_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"SLIMBUS_0_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"SLIMBUS_0_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"SLIMBUS_0_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"SLIMBUS_0_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"SLIMBUS_0_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"SLIMBUS_0_RX Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"SLIMBUS_0_RX Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"SLIMBUS_0_RX Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"SLIMBUS_0_RX Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"SLIMBUS_0_RX Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"SLIMBUS_0_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"SLIMBUS_0_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"SLIMBUS_0_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"SLIMBUS_0_RX Audio Mixer", "MultiMedia21", "MM_DL21"},
+	{"SLIMBUS_0_RX Audio Mixer", "MultiMedia22", "MM_DL22"},
+	{"SLIMBUS_0_RX Audio Mixer", "MultiMedia23", "MM_DL23"},
+	{"SLIMBUS_0_RX Audio Mixer", "MultiMedia24", "MM_DL24"},
+	{"SLIMBUS_0_RX Audio Mixer", "MultiMedia25", "MM_DL25"},
+	{"SLIMBUS_0_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"SLIMBUS_0_RX", NULL, "SLIMBUS_0_RX Audio Mixer"},
+
+	{"SLIMBUS_2_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"SLIMBUS_2_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"SLIMBUS_2_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"SLIMBUS_2_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"SLIMBUS_2_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"SLIMBUS_2_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"SLIMBUS_2_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"SLIMBUS_2_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"SLIMBUS_2_RX Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"SLIMBUS_2_RX Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"SLIMBUS_2_RX Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"SLIMBUS_2_RX Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"SLIMBUS_2_RX Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"SLIMBUS_2_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"SLIMBUS_2_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"SLIMBUS_2_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"SLIMBUS_2_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"SLIMBUS_2_RX", NULL, "SLIMBUS_2_RX Audio Mixer"},
+
+	{"SLIMBUS_5_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"SLIMBUS_5_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"SLIMBUS_5_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"SLIMBUS_5_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"SLIMBUS_5_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"SLIMBUS_5_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"SLIMBUS_5_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"SLIMBUS_5_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"SLIMBUS_5_RX Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"SLIMBUS_5_RX Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"SLIMBUS_5_RX Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"SLIMBUS_5_RX Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"SLIMBUS_5_RX Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"SLIMBUS_5_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"SLIMBUS_5_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"SLIMBUS_5_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"SLIMBUS_5_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"SLIMBUS_5_RX", NULL, "SLIMBUS_5_RX Audio Mixer"},
+
+	{"HDMI Mixer", "MultiMedia1", "MM_DL1"},
+	{"HDMI Mixer", "MultiMedia2", "MM_DL2"},
+	{"HDMI Mixer", "MultiMedia3", "MM_DL3"},
+	{"HDMI Mixer", "MultiMedia4", "MM_DL4"},
+	{"HDMI Mixer", "MultiMedia5", "MM_DL5"},
+	{"HDMI Mixer", "MultiMedia6", "MM_DL6"},
+	{"HDMI Mixer", "MultiMedia7", "MM_DL7"},
+	{"HDMI Mixer", "MultiMedia8", "MM_DL8"},
+	{"HDMI Mixer", "MultiMedia9", "MM_DL9"},
+	{"HDMI Mixer", "MultiMedia10", "MM_DL10"},
+	{"HDMI Mixer", "MultiMedia11", "MM_DL11"},
+	{"HDMI Mixer", "MultiMedia12", "MM_DL12"},
+	{"HDMI Mixer", "MultiMedia13", "MM_DL13"},
+	{"HDMI Mixer", "MultiMedia14", "MM_DL14"},
+	{"HDMI Mixer", "MultiMedia15", "MM_DL15"},
+	{"HDMI Mixer", "MultiMedia16", "MM_DL16"},
+	{"HDMI Mixer", "MultiMedia21", "MM_DL21"},
+	{"HDMI Mixer", "MultiMedia22", "MM_DL22"},
+	{"HDMI Mixer", "MultiMedia23", "MM_DL23"},
+	{"HDMI Mixer", "MultiMedia24", "MM_DL24"},
+	{"HDMI Mixer", "MultiMedia25", "MM_DL25"},
+	{"HDMI Mixer", "MultiMedia26", "MM_DL26"},
+	{"HDMI", NULL, "HDMI Mixer"},
+
+	{"DISPLAY_PORT Mixer", "MultiMedia1", "MM_DL1"},
+	{"DISPLAY_PORT Mixer", "MultiMedia2", "MM_DL2"},
+	{"DISPLAY_PORT Mixer", "MultiMedia3", "MM_DL3"},
+	{"DISPLAY_PORT Mixer", "MultiMedia4", "MM_DL4"},
+	{"DISPLAY_PORT Mixer", "MultiMedia5", "MM_DL5"},
+	{"DISPLAY_PORT Mixer", "MultiMedia6", "MM_DL6"},
+	{"DISPLAY_PORT Mixer", "MultiMedia7", "MM_DL7"},
+	{"DISPLAY_PORT Mixer", "MultiMedia8", "MM_DL8"},
+	{"DISPLAY_PORT Mixer", "MultiMedia9", "MM_DL9"},
+	{"DISPLAY_PORT Mixer", "MultiMedia10", "MM_DL10"},
+	{"DISPLAY_PORT Mixer", "MultiMedia11", "MM_DL11"},
+	{"DISPLAY_PORT Mixer", "MultiMedia12", "MM_DL12"},
+	{"DISPLAY_PORT Mixer", "MultiMedia13", "MM_DL13"},
+	{"DISPLAY_PORT Mixer", "MultiMedia14", "MM_DL14"},
+	{"DISPLAY_PORT Mixer", "MultiMedia15", "MM_DL15"},
+	{"DISPLAY_PORT Mixer", "MultiMedia16", "MM_DL16"},
+	{"DISPLAY_PORT Mixer", "MultiMedia26", "MM_DL26"},
+	{"DISPLAY_PORT", NULL, "DISPLAY_PORT Mixer"},
+
+	{"SPDIF_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"SPDIF_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"SPDIF_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"SPDIF_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"SPDIF_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"SPDIF_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"SPDIF_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"SPDIF_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"SPDIF_RX Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"SPDIF_RX Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"SPDIF_RX Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"SPDIF_RX Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"SPDIF_RX Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"SPDIF_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"SPDIF_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"SPDIF_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"SPDIF_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"SPDIF_RX", NULL, "SPDIF_RX Audio Mixer"},
+
+	/* incall */
+	{"Incall_Music Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"Incall_Music Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"Incall_Music Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"Incall_Music Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"VOICE_PLAYBACK_TX", NULL, "Incall_Music Audio Mixer"},
+	{"Incall_Music_2 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"Incall_Music_2 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"Incall_Music_2 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"Incall_Music_2 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"VOICE2_PLAYBACK_TX", NULL, "Incall_Music_2 Audio Mixer"},
+	{"SLIMBUS_4_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"SLIMBUS_4_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"SLIMBUS_4_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"SLIMBUS_4_RX Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"SLIMBUS_4_RX", NULL, "SLIMBUS_4_RX Audio Mixer"},
+
+	{"SLIMBUS_6_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"SLIMBUS_6_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"SLIMBUS_6_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"SLIMBUS_6_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"SLIMBUS_6_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"SLIMBUS_6_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"SLIMBUS_6_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"SLIMBUS_6_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"SLIMBUS_6_RX Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"SLIMBUS_6_RX Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"SLIMBUS_6_RX Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"SLIMBUS_6_RX Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"SLIMBUS_6_RX Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"SLIMBUS_6_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"SLIMBUS_6_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"SLIMBUS_6_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"SLIMBUS_6_RX Audio Mixer", "MultiMedia21", "MM_DL21"},
+	{"SLIMBUS_6_RX Audio Mixer", "MultiMedia22", "MM_DL22"},
+	{"SLIMBUS_6_RX Audio Mixer", "MultiMedia23", "MM_DL23"},
+	{"SLIMBUS_6_RX Audio Mixer", "MultiMedia24", "MM_DL24"},
+	{"SLIMBUS_6_RX Audio Mixer", "MultiMedia25", "MM_DL25"},
+	{"SLIMBUS_6_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"SLIMBUS_6_RX", NULL, "SLIMBUS_6_RX Audio Mixer"},
+
+	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia21", "MM_DL21"},
+	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia22", "MM_DL22"},
+	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia23", "MM_DL23"},
+	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia24", "MM_DL24"},
+	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia25", "MM_DL25"},
+	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"SLIMBUS_7_RX", NULL, "SLIMBUS_7_RX Audio Mixer"},
+
+	{"USB_AUDIO_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"USB_AUDIO_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"USB_AUDIO_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"USB_AUDIO_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"USB_AUDIO_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"USB_AUDIO_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"USB_AUDIO_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"USB_AUDIO_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"USB_AUDIO_RX Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"USB_AUDIO_RX Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"USB_AUDIO_RX Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"USB_AUDIO_RX Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"USB_AUDIO_RX Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"USB_AUDIO_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"USB_AUDIO_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"USB_AUDIO_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"USB_AUDIO_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"USB_AUDIO_RX", NULL, "USB_AUDIO_RX Audio Mixer"},
+
+	{"MultiMedia1 Mixer", "VOC_REC_UL", "INCALL_RECORD_TX"},
+	{"MultiMedia4 Mixer", "VOC_REC_UL", "INCALL_RECORD_TX"},
+	{"MultiMedia8 Mixer", "VOC_REC_UL", "INCALL_RECORD_TX"},
+	{"MultiMedia1 Mixer", "VOC_REC_DL", "INCALL_RECORD_RX"},
+	{"MultiMedia4 Mixer", "VOC_REC_DL", "INCALL_RECORD_RX"},
+	{"MultiMedia8 Mixer", "VOC_REC_DL", "INCALL_RECORD_RX"},
+	{"MultiMedia1 Mixer", "SLIM_4_TX", "SLIMBUS_4_TX"},
+	{"MultiMedia1 Mixer", "SLIM_6_TX", "SLIMBUS_6_TX"},
+	{"MultiMedia1 Mixer", "SLIM_7_TX", "SLIMBUS_7_TX"},
+	{"MultiMedia1 Mixer", "SLIM_8_TX", "SLIMBUS_8_TX"},
+	{"MultiMedia8 Mixer", "SLIM_6_TX", "SLIMBUS_6_TX"},
+	{"MultiMedia8 Mixer", "SLIM_7_TX", "SLIMBUS_7_TX"},
+	{"MultiMedia4 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"MultiMedia17 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"MultiMedia18 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"MultiMedia19 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"MultiMedia8 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"MultiMedia2 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"MultiMedia4 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"MultiMedia17 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"MultiMedia18 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"MultiMedia19 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"MultiMedia8 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"MultiMedia18 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"MultiMedia8 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
+	{"MultiMedia3 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"MultiMedia5 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"MultiMedia16 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"MultiMedia5 Mixer", "SLIM_7_TX", "SLIMBUS_7_TX"},
+	{"MultiMedia5 Mixer", "SLIM_8_TX", "SLIMBUS_8_TX"},
+	{"MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"MI2S_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"MI2S_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"MI2S_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"MI2S_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"MI2S_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"MI2S_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"MI2S_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"MI2S_RX Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"MI2S_RX Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"MI2S_RX Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"MI2S_RX Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"MI2S_RX Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"MI2S_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"MI2S_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"MI2S_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"MI2S_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"MI2S_RX", NULL, "MI2S_RX Audio Mixer"},
+
+	{"QUAT_MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"QUAT_MI2S_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"QUAT_MI2S_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"QUAT_MI2S_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"QUAT_MI2S_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"QUAT_MI2S_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"QUAT_MI2S_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"QUAT_MI2S_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"QUAT_MI2S_RX Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"QUAT_MI2S_RX Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"QUAT_MI2S_RX Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"QUAT_MI2S_RX Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"QUAT_MI2S_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"QUAT_MI2S_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"QUAT_MI2S_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"QUAT_MI2S_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"QUAT_MI2S_RX", NULL, "QUAT_MI2S_RX Audio Mixer"},
+
+	{"TERT_MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"TERT_MI2S_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"TERT_MI2S_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"TERT_MI2S_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"TERT_MI2S_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"TERT_MI2S_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"TERT_MI2S_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"TERT_MI2S_RX Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"TERT_MI2S_RX Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"TERT_MI2S_RX Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"TERT_MI2S_RX Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"TERT_MI2S_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"TERT_MI2S_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"TERT_MI2S_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"TERT_MI2S_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"TERT_MI2S_RX", NULL, "TERT_MI2S_RX Audio Mixer"},
+
+	{"SEC_MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"SEC_MI2S_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"SEC_MI2S_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"SEC_MI2S_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"SEC_MI2S_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"SEC_MI2S_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"SEC_MI2S_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"SEC_MI2S_RX Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"SEC_MI2S_RX Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"SEC_MI2S_RX Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"SEC_MI2S_RX Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"SEC_MI2S_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"SEC_MI2S_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"SEC_MI2S_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"SEC_MI2S_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"SEC_MI2S_RX", NULL, "SEC_MI2S_RX Audio Mixer"},
+
+	{"SEC_MI2S_RX_SD1 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"SEC_MI2S_RX_SD1", NULL, "SEC_MI2S_RX_SD1 Audio Mixer"},
+
+	{"SEC_MI2S_RX Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"SEC_MI2S_RX Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+
+	{"PRI_MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"PRI_MI2S_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"PRI_MI2S_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"PRI_MI2S_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"PRI_MI2S_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"PRI_MI2S_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"PRI_MI2S_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"PRI_MI2S_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"PRI_MI2S_RX Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"PRI_MI2S_RX Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"PRI_MI2S_RX Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"PRI_MI2S_RX Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"PRI_MI2S_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"PRI_MI2S_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"PRI_MI2S_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"PRI_MI2S_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"PRI_MI2S_RX", NULL, "PRI_MI2S_RX Audio Mixer"},
+
+	{"INT0_MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"INT0_MI2S_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"INT0_MI2S_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"INT0_MI2S_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"INT0_MI2S_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"INT0_MI2S_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"INT0_MI2S_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"INT0_MI2S_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"INT0_MI2S_RX Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"INT0_MI2S_RX Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"INT0_MI2S_RX Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"INT0_MI2S_RX Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"INT0_MI2S_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"INT0_MI2S_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"INT0_MI2S_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"INT0_MI2S_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"INT0_MI2S_RX", NULL, "INT0_MI2S_RX Audio Mixer"},
+
+	{"INT4_MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"INT4_MI2S_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"INT4_MI2S_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"INT4_MI2S_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"INT4_MI2S_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"INT4_MI2S_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"INT4_MI2S_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"INT4_MI2S_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"INT4_MI2S_RX Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"INT4_MI2S_RX Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"INT4_MI2S_RX Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"INT4_MI2S_RX Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"INT4_MI2S_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"INT4_MI2S_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"INT4_MI2S_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"INT4_MI2S_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"INT4_MI2S_RX", NULL, "INT4_MI2S_RX Audio Mixer"},
+
+	{"QUIN_MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"QUIN_MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"QUIN_MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"QUIN_MI2S_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"QUIN_MI2S_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"QUIN_MI2S_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"QUIN_MI2S_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"QUIN_MI2S_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"QUIN_MI2S_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"QUIN_MI2S_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"QUIN_MI2S_RX Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"QUIN_MI2S_RX Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"QUIN_MI2S_RX Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"QUIN_MI2S_RX Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"QUIN_MI2S_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"QUIN_MI2S_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"QUIN_MI2S_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"QUIN_MI2S_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"QUIN_MI2S_RX", NULL, "QUIN_MI2S_RX Audio Mixer"},
+
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia21", "MM_DL21"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"PRI_TDM_RX_0", NULL, "PRI_TDM_RX_0 Audio Mixer"},
+
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia21", "MM_DL21"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"PRI_TDM_RX_1", NULL, "PRI_TDM_RX_1 Audio Mixer"},
+
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia21", "MM_DL21"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"PRI_TDM_RX_2", NULL, "PRI_TDM_RX_2 Audio Mixer"},
+
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia21", "MM_DL21"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"PRI_TDM_RX_3", NULL, "PRI_TDM_RX_3 Audio Mixer"},
+
+	{"PRI_TDM_TX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"PRI_TDM_TX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"PRI_TDM_TX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"PRI_TDM_TX_0 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"PRI_TDM_TX_0 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"PRI_TDM_TX_0 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"PRI_TDM_TX_0 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"PRI_TDM_TX_0 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"PRI_TDM_TX_0 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"PRI_TDM_TX_0 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"PRI_TDM_TX_0 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"PRI_TDM_TX_0 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"PRI_TDM_TX_0 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"PRI_TDM_TX_0 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"PRI_TDM_TX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"PRI_TDM_TX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"PRI_TDM_TX_0", NULL, "PRI_TDM_TX_0 Audio Mixer"},
+
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia21", "MM_DL21"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"SEC_TDM_RX_0", NULL, "SEC_TDM_RX_0 Audio Mixer"},
+
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia21", "MM_DL21"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"SEC_TDM_RX_1", NULL, "SEC_TDM_RX_1 Audio Mixer"},
+
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia21", "MM_DL21"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"SEC_TDM_RX_2", NULL, "SEC_TDM_RX_2 Audio Mixer"},
+
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia21", "MM_DL21"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"SEC_TDM_RX_3", NULL, "SEC_TDM_RX_3 Audio Mixer"},
+
+	{"SEC_TDM_TX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"SEC_TDM_TX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"SEC_TDM_TX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"SEC_TDM_TX_0 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"SEC_TDM_TX_0 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"SEC_TDM_TX_0 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"SEC_TDM_TX_0 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"SEC_TDM_TX_0 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"SEC_TDM_TX_0 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"SEC_TDM_TX_0 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"SEC_TDM_TX_0 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"SEC_TDM_TX_0 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"SEC_TDM_TX_0 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"SEC_TDM_TX_0 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"SEC_TDM_TX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"SEC_TDM_TX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"SEC_TDM_TX_0", NULL, "SEC_TDM_TX_0 Audio Mixer"},
+
+	{"TERT_TDM_RX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"TERT_TDM_RX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"TERT_TDM_RX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"TERT_TDM_RX_0 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"TERT_TDM_RX_0 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"TERT_TDM_RX_0 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"TERT_TDM_RX_0 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"TERT_TDM_RX_0 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"TERT_TDM_RX_0 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"TERT_TDM_RX_0 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"TERT_TDM_RX_0 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"TERT_TDM_RX_0 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"TERT_TDM_RX_0 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"TERT_TDM_RX_0 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"TERT_TDM_RX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"TERT_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"TERT_TDM_RX_0 Audio Mixer", "MultiMedia21", "MM_DL21"},
+	{"TERT_TDM_RX_0 Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"TERT_TDM_RX_0", NULL, "TERT_TDM_RX_0 Audio Mixer"},
+
+	{"TERT_TDM_TX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"TERT_TDM_TX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"TERT_TDM_TX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"TERT_TDM_TX_0 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"TERT_TDM_TX_0 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"TERT_TDM_TX_0 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"TERT_TDM_TX_0 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"TERT_TDM_TX_0 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"TERT_TDM_TX_0 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"TERT_TDM_TX_0 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"TERT_TDM_TX_0 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"TERT_TDM_TX_0 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"TERT_TDM_TX_0 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"TERT_TDM_TX_0 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"TERT_TDM_TX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"TERT_TDM_TX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"TERT_TDM_TX_0", NULL, "TERT_TDM_TX_0 Audio Mixer"},
+
+	{"TERT_TDM_RX_1 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"TERT_TDM_RX_1 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"TERT_TDM_RX_1 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"TERT_TDM_RX_1 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"TERT_TDM_RX_1 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"TERT_TDM_RX_1 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"TERT_TDM_RX_1 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"TERT_TDM_RX_1 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"TERT_TDM_RX_1 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"TERT_TDM_RX_1 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"TERT_TDM_RX_1 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"TERT_TDM_RX_1 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"TERT_TDM_RX_1 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"TERT_TDM_RX_1 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"TERT_TDM_RX_1 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"TERT_TDM_RX_1 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"TERT_TDM_RX_1 Audio Mixer", "MultiMedia21", "MM_DL21"},
+	{"TERT_TDM_RX_1 Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"TERT_TDM_RX_1", NULL, "TERT_TDM_RX_1 Audio Mixer"},
+
+	{"TERT_TDM_RX_2 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"TERT_TDM_RX_2 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"TERT_TDM_RX_2 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"TERT_TDM_RX_2 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"TERT_TDM_RX_2 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"TERT_TDM_RX_2 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"TERT_TDM_RX_2 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"TERT_TDM_RX_2 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"TERT_TDM_RX_2 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"TERT_TDM_RX_2 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"TERT_TDM_RX_2 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"TERT_TDM_RX_2 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"TERT_TDM_RX_2 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"TERT_TDM_RX_2 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"TERT_TDM_RX_2 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"TERT_TDM_RX_2 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"TERT_TDM_RX_2 Audio Mixer", "MultiMedia21", "MM_DL21"},
+	{"TERT_TDM_RX_2 Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"TERT_TDM_RX_2", NULL, "TERT_TDM_RX_2 Audio Mixer"},
+
+	{"TERT_TDM_RX_3 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"TERT_TDM_RX_3 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"TERT_TDM_RX_3 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"TERT_TDM_RX_3 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"TERT_TDM_RX_3 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"TERT_TDM_RX_3 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"TERT_TDM_RX_3 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"TERT_TDM_RX_3 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"TERT_TDM_RX_3 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"TERT_TDM_RX_3 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"TERT_TDM_RX_3 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"TERT_TDM_RX_3 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"TERT_TDM_RX_3 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"TERT_TDM_RX_3 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"TERT_TDM_RX_3 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"TERT_TDM_RX_3 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"TERT_TDM_RX_3 Audio Mixer", "MultiMedia21", "MM_DL21"},
+	{"TERT_TDM_RX_3 Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"TERT_TDM_RX_3", NULL, "TERT_TDM_RX_3 Audio Mixer"},
+
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia21", "MM_DL21"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"TERT_TDM_RX_4", NULL, "TERT_TDM_RX_4 Audio Mixer"},
+
+	{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia20", "MM_DL20"},
+	{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia21", "MM_DL21"},
+	{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"QUAT_TDM_RX_0", NULL, "QUAT_TDM_RX_0 Audio Mixer"},
+
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"PRI_TDM_RX_0", NULL, "PRI_TDM_RX_0 Audio Mixer"},
+
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"SEC_TDM_RX_0", NULL, "SEC_TDM_RX_0 Audio Mixer"},
+
+	{"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"QUAT_TDM_TX_0", NULL, "QUAT_TDM_TX_0 Audio Mixer"},
+
+	{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia20", "MM_DL20"},
+	{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia21", "MM_DL21"},
+	{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"QUAT_TDM_RX_1", NULL, "QUAT_TDM_RX_1 Audio Mixer"},
+
+	{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia20", "MM_DL20"},
+	{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia21", "MM_DL21"},
+	{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"QUAT_TDM_RX_2", NULL, "QUAT_TDM_RX_2 Audio Mixer"},
+
+	{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia20", "MM_DL20"},
+	{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia21", "MM_DL21"},
+	{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia26", "MM_DL26"},
+	{"QUAT_TDM_RX_3", NULL, "QUAT_TDM_RX_3 Audio Mixer"},
+
+	{"MultiMedia1 Mixer", "PRI_TX", "PRI_I2S_TX"},
+	{"MultiMedia1 Mixer", "MI2S_TX", "MI2S_TX"},
+	{"MultiMedia2 Mixer", "MI2S_TX", "MI2S_TX"},
+	{"MultiMedia3 Mixer", "MI2S_TX", "MI2S_TX"},
+	{"MultiMedia5 Mixer", "MI2S_TX", "MI2S_TX"},
+	{"MultiMedia16 Mixer", "MI2S_TX", "MI2S_TX"},
+	{"MultiMedia1 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"MultiMedia2 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"MultiMedia6 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"MultiMedia27 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"MultiMedia1 Mixer", "QUIN_MI2S_TX", "QUIN_MI2S_TX"},
+	{"MultiMedia2 Mixer", "QUIN_MI2S_TX", "QUIN_MI2S_TX"},
+	{"MultiMedia1 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"MultiMedia2 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"MultiMedia27 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"MultiMedia1 Mixer", "INT2_MI2S_TX", "INT2_MI2S_TX"},
+	{"MultiMedia2 Mixer", "INT2_MI2S_TX", "INT2_MI2S_TX"},
+	{"MultiMedia1 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
+	{"MultiMedia2 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
+	{"MultiMedia1 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"MultiMedia27 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"MultiMedia1 Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"MultiMedia3 Mixer", "AUX_PCM_TX", "AUX_PCM_TX"},
+	{"MultiMedia5 Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"MultiMedia1 Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"MultiMedia3 Mixer", "SEC_AUX_PCM_TX", "SEC_AUX_PCM_TX"},
+	{"MultiMedia5 Mixer", "SEC_AUX_PCM_TX", "SEC_AUX_PCM_TX"},
+	{"MultiMedia16 Mixer", "AUX_PCM_TX", "AUX_PCM_TX"},
+	{"MultiMedia16 Mixer", "SEC_AUX_PCM_TX", "SEC_AUX_PCM_TX"},
+	{"MultiMedia1 Mixer", "TERT_AUXPCM_UL_TX", "TERT_AUX_PCM_TX"},
+	{"MultiMedia3 Mixer", "TERT_AUX_PCM_TX", "TERT_AUX_PCM_TX"},
+	{"MultiMedia5 Mixer", "TERT_AUX_PCM_TX", "TERT_AUX_PCM_TX"},
+	{"MultiMedia1 Mixer", "QUAT_AUXPCM_UL_TX", "QUAT_AUX_PCM_TX"},
+	{"MultiMedia3 Mixer", "QUAT_AUX_PCM_TX", "QUAT_AUX_PCM_TX"},
+	{"MultiMedia5 Mixer", "QUAT_AUX_PCM_TX", "QUAT_AUX_PCM_TX"},
+	{"MultiMedia16 Mixer", "QUAT_AUX_PCM_TX", "QUAT_AUX_PCM_TX"},
+	{"MultiMedia2 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"MultiMedia2 Mixer", "SLIM_6_TX", "SLIMBUS_6_TX"},
+	{"MultiMedia2 Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
+	{"MultiMedia2 Mixer", "SLIM_8_TX", "SLIMBUS_8_TX"},
+	{"MultiMedia1 Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"MultiMedia1 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"MultiMedia27 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"MultiMedia2 Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"MultiMedia6 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"MultiMedia6 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"MultiMedia3 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"MultiMedia5 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"MultiMedia6 Mixer", "INT2_MI2S_TX", "INT2_MI2S_TX"},
+	{"MultiMedia3 Mixer", "INT2_MI2S_TX", "INT2_MI2S_TX"},
+	{"MultiMedia5 Mixer", "INT2_MI2S_TX", "INT2_MI2S_TX"},
+	{"MultiMedia16 Mixer", "INT2_MI2S_TX", "INT2_MI2S_TX"},
+	{"MultiMedia6 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
+	{"MultiMedia3 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
+	{"MultiMedia5 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
+	{"MultiMedia16 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
+	{"MultiMedia6 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"MultiMedia6 Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"MultiMedia6 Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"MultiMedia6 Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+
+	{"MultiMedia1 Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+	{"MultiMedia1 Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+	{"MultiMedia1 Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+	{"MultiMedia1 Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+	{"MultiMedia1 Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+	{"MultiMedia1 Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+	{"MultiMedia1 Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+	{"MultiMedia1 Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
+	{"MultiMedia1 Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
+	{"MultiMedia1 Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
+	{"MultiMedia1 Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
+	{"MultiMedia1 Mixer", "TERT_TDM_TX_3", "TERT_TDM_TX_3"},
+	{"MultiMedia1 Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"MultiMedia1 Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"MultiMedia1 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"MultiMedia1 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+
+	{"MultiMedia2 Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+	{"MultiMedia2 Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+	{"MultiMedia2 Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+	{"MultiMedia2 Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+	{"MultiMedia2 Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+	{"MultiMedia2 Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+	{"MultiMedia2 Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+	{"MultiMedia2 Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
+	{"MultiMedia2 Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
+	{"MultiMedia2 Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
+	{"MultiMedia2 Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
+	{"MultiMedia2 Mixer", "TERT_TDM_TX_3", "TERT_TDM_TX_3"},
+	{"MultiMedia2 Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"MultiMedia2 Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"MultiMedia2 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"MultiMedia2 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+
+	{"MultiMedia3 Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+	{"MultiMedia3 Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+	{"MultiMedia3 Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+	{"MultiMedia3 Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+	{"MultiMedia3 Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+	{"MultiMedia3 Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+	{"MultiMedia3 Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+	{"MultiMedia3 Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
+	{"MultiMedia3 Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
+	{"MultiMedia3 Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
+	{"MultiMedia3 Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
+	{"MultiMedia3 Mixer", "TERT_TDM_TX_3", "TERT_TDM_TX_3"},
+	{"MultiMedia3 Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"MultiMedia3 Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"MultiMedia3 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"MultiMedia3 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+
+	{"MultiMedia4 Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+	{"MultiMedia4 Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+	{"MultiMedia4 Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+	{"MultiMedia4 Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+	{"MultiMedia4 Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+	{"MultiMedia4 Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+	{"MultiMedia4 Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+	{"MultiMedia4 Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
+	{"MultiMedia4 Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
+	{"MultiMedia4 Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
+	{"MultiMedia4 Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
+	{"MultiMedia4 Mixer", "TERT_TDM_TX_3", "TERT_TDM_TX_3"},
+	{"MultiMedia4 Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"MultiMedia4 Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"MultiMedia4 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"MultiMedia4 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+
+	{"MultiMedia5 Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+	{"MultiMedia5 Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+	{"MultiMedia5 Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+	{"MultiMedia5 Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+	{"MultiMedia5 Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+	{"MultiMedia5 Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+	{"MultiMedia5 Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+	{"MultiMedia5 Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
+	{"MultiMedia5 Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
+	{"MultiMedia5 Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
+	{"MultiMedia5 Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
+	{"MultiMedia5 Mixer", "TERT_TDM_TX_3", "TERT_TDM_TX_3"},
+	{"MultiMedia5 Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"MultiMedia5 Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"MultiMedia5 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"MultiMedia5 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+
+	{"MultiMedia6 Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+	{"MultiMedia6 Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+	{"MultiMedia6 Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+	{"MultiMedia6 Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+	{"MultiMedia6 Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+	{"MultiMedia6 Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+	{"MultiMedia6 Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+	{"MultiMedia6 Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
+	{"MultiMedia6 Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
+	{"MultiMedia6 Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
+	{"MultiMedia6 Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
+	{"MultiMedia6 Mixer", "TERT_TDM_TX_3", "TERT_TDM_TX_3"},
+	{"MultiMedia6 Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"MultiMedia6 Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"MultiMedia6 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"MultiMedia6 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+
+	{"MultiMedia8 Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+	{"MultiMedia8 Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+	{"MultiMedia8 Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+	{"MultiMedia8 Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+	{"MultiMedia8 Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+	{"MultiMedia8 Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+	{"MultiMedia8 Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+	{"MultiMedia8 Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
+	{"MultiMedia8 Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
+	{"MultiMedia8 Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
+	{"MultiMedia8 Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
+	{"MultiMedia8 Mixer", "TERT_TDM_TX_3", "TERT_TDM_TX_3"},
+	{"MultiMedia8 Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"MultiMedia8 Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"MultiMedia8 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"MultiMedia8 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+
+	{"MultiMedia9 Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
+	{"MultiMedia9 Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
+	{"MultiMedia9 Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
+	{"MultiMedia9 Mixer", "TERT_TDM_TX_3", "TERT_TDM_TX_3"},
+	{"MultiMedia9 Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"MultiMedia9 Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"MultiMedia9 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"MultiMedia9 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+
+	{"MultiMedia20 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"MultiMedia20 Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"MultiMedia20 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"MultiMedia20 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"MultiMedia20 Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+	{"MultiMedia20 Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+	{"MultiMedia20 Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+	{"MultiMedia20 Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+	{"MultiMedia20 Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+	{"MultiMedia20 Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+	{"MultiMedia20 Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+	{"MultiMedia20 Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
+	{"MultiMedia20 Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
+	{"MultiMedia20 Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
+	{"MultiMedia20 Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
+	{"MultiMedia20 Mixer", "TERT_TDM_TX_3", "TERT_TDM_TX_3"},
+	{"MultiMedia20 Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"MultiMedia20 Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"MultiMedia20 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"MultiMedia20 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+
+	{"MultiMedia21 Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+	{"MultiMedia21 Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+	{"MultiMedia21 Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+	{"MultiMedia21 Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+	{"MultiMedia21 Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+	{"MultiMedia21 Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+	{"MultiMedia21 Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+	{"MultiMedia21 Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
+	{"MultiMedia21 Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
+	{"MultiMedia21 Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
+	{"MultiMedia21 Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
+	{"MultiMedia21 Mixer", "TERT_TDM_TX_3", "TERT_TDM_TX_3"},
+	{"MultiMedia21 Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"MultiMedia21 Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"MultiMedia21 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"MultiMedia21 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+	{"MultiMedia21 Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"MultiMedia21 Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+
+	{"MultiMedia1 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
+	{"MultiMedia2 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
+	{"MultiMedia4 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
+	{"MultiMedia5 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
+	{"MultiMedia6 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
+	{"MultiMedia8 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
+
+	{"MultiMedia16 Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+	{"MultiMedia16 Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+	{"MultiMedia16 Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+	{"MultiMedia16 Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+	{"MultiMedia16 Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+	{"MultiMedia16 Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+	{"MultiMedia16 Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+	{"MultiMedia16 Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
+	{"MultiMedia16 Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
+	{"MultiMedia16 Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
+	{"MultiMedia16 Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
+	{"MultiMedia16 Mixer", "TERT_TDM_TX_3", "TERT_TDM_TX_3"},
+	{"MultiMedia16 Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"MultiMedia16 Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"MultiMedia16 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"MultiMedia16 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+	{"MultiMedia16 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
+
+	{"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia6", "MM_UL6"},
+	{"INT_BT_SCO_RX", NULL, "INTERNAL_BT_SCO_RX Audio Mixer"},
+
+	{"INTERNAL_A2DP_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"INTERNAL_A2DP_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"INTERNAL_A2DP_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"INTERNAL_A2DP_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"INTERNAL_A2DP_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"INTERNAL_A2DP_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"INTERNAL_A2DP_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"INTERNAL_A2DP_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"INTERNAL_A2DP_RX Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"INTERNAL_A2DP_RX Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"INTERNAL_A2DP_RX Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"INTERNAL_A2DP_RX Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"INTERNAL_A2DP_RX Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"INTERNAL_A2DP_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"INTERNAL_A2DP_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"INTERNAL_A2DP_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"INTERNAL_A2DP_RX Audio Mixer", "MultiMedia6", "MM_UL6"},
+	{"INT_BT_A2DP_RX", NULL, "INTERNAL_A2DP_RX Audio Mixer"},
+
+	{"INTERNAL_FM_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"INTERNAL_FM_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"INTERNAL_FM_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"INTERNAL_FM_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"INTERNAL_FM_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"INTERNAL_FM_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"INTERNAL_FM_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"INTERNAL_FM_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"INTERNAL_FM_RX Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"INTERNAL_FM_RX Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"INTERNAL_FM_RX Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"INTERNAL_FM_RX Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"INTERNAL_FM_RX Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"INTERNAL_FM_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"INTERNAL_FM_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"INTERNAL_FM_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"INT_FM_RX", NULL, "INTERNAL_FM_RX Audio Mixer"},
+
+	{"AFE_PCM_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"AFE_PCM_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"AFE_PCM_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"AFE_PCM_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"AFE_PCM_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"AFE_PCM_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"AFE_PCM_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"AFE_PCM_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"AFE_PCM_RX Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"AFE_PCM_RX Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"AFE_PCM_RX Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"AFE_PCM_RX Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"AFE_PCM_RX Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"AFE_PCM_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"AFE_PCM_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"AFE_PCM_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"PCM_RX", NULL, "AFE_PCM_RX Audio Mixer"},
+
+	{"MultiMedia1 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"MultiMedia3 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"MultiMedia4 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"MultiMedia17 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"MultiMedia18 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"MultiMedia19 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"MultiMedia5 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"MultiMedia8 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"MultiMedia16 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"MultiMedia1 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"MultiMedia4 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"MultiMedia16 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"MultiMedia17 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"MultiMedia18 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"MultiMedia19 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"MultiMedia5 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"MultiMedia6 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"MultiMedia8 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+
+	{"MultiMedia1 Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"MultiMedia3 Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"MultiMedia4 Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"MultiMedia17 Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"MultiMedia18 Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"MultiMedia19 Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"MultiMedia5 Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"MultiMedia8 Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"MultiMedia16 Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"MM_UL1", NULL, "MultiMedia1 Mixer"},
+	{"MultiMedia2 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"MM_UL2", NULL, "MultiMedia2 Mixer"},
+	{"MM_UL3", NULL, "MultiMedia3 Mixer"},
+	{"MM_UL4", NULL, "MultiMedia4 Mixer"},
+	{"MM_UL5", NULL, "MultiMedia5 Mixer"},
+	{"MM_UL6", NULL, "MultiMedia6 Mixer"},
+	{"MM_UL8", NULL, "MultiMedia8 Mixer"},
+	{"MM_UL9", NULL, "MultiMedia9 Mixer"},
+	{"MM_UL16", NULL, "MultiMedia16 Mixer"},
+	{"MM_UL17", NULL, "MultiMedia17 Mixer"},
+	{"MM_UL18", NULL, "MultiMedia18 Mixer"},
+	{"MM_UL19", NULL, "MultiMedia19 Mixer"},
+	{"MM_UL20", NULL, "MultiMedia20 Mixer"},
+	{"MM_UL21", NULL, "MultiMedia21 Mixer"},
+	{"MM_UL27", NULL, "MultiMedia27 Mixer"},
+
+	{"AUX_PCM_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"AUX_PCM_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"AUX_PCM_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"AUX_PCM_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"AUX_PCM_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"AUX_PCM_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"AUX_PCM_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"AUX_PCM_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"AUX_PCM_RX Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"AUX_PCM_RX Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"AUX_PCM_RX Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"AUX_PCM_RX Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"AUX_PCM_RX Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"AUX_PCM_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"AUX_PCM_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"AUX_PCM_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"AUX_PCM_RX Audio Mixer", "MultiMedia21", "MM_DL21"},
+	{"AUX_PCM_RX Audio Mixer", "MultiMedia6", "MM_UL6"},
+	{"AUX_PCM_RX Audio Mixer", "MultiMedia21", "MM_UL21"},
+	{"AUX_PCM_RX", NULL, "AUX_PCM_RX Audio Mixer"},
+
+	{"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia21", "MM_DL21"},
+	{"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia6", "MM_UL6"},
+	{"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia21", "MM_UL21"},
+	{"SEC_AUX_PCM_RX", NULL, "SEC_AUX_PCM_RX Audio Mixer"},
+
+	{"TERT_AUX_PCM_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"TERT_AUX_PCM_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"TERT_AUX_PCM_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"TERT_AUX_PCM_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"TERT_AUX_PCM_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"TERT_AUX_PCM_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"TERT_AUX_PCM_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"TERT_AUX_PCM_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"TERT_AUX_PCM_RX Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"TERT_AUX_PCM_RX Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"TERT_AUX_PCM_RX Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"TERT_AUX_PCM_RX Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"TERT_AUX_PCM_RX Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"TERT_AUX_PCM_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"TERT_AUX_PCM_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"TERT_AUX_PCM_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"TERT_AUX_PCM_RX", NULL, "TERT_AUX_PCM_RX Audio Mixer"},
+
+	{"QUAT_AUX_PCM_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"QUAT_AUX_PCM_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"QUAT_AUX_PCM_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"QUAT_AUX_PCM_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"QUAT_AUX_PCM_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"QUAT_AUX_PCM_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"QUAT_AUX_PCM_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"QUAT_AUX_PCM_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"QUAT_AUX_PCM_RX Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"QUAT_AUX_PCM_RX Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"QUAT_AUX_PCM_RX Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"QUAT_AUX_PCM_RX Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"QUAT_AUX_PCM_RX Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"QUAT_AUX_PCM_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"QUAT_AUX_PCM_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"QUAT_AUX_PCM_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"QUAT_AUX_PCM_RX", NULL, "QUAT_AUX_PCM_RX Audio Mixer"},
+
+	{"MI2S_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"MI2S_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
+	{"MI2S_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"MI2S_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+	{"MI2S_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
+	{"MI2S_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
+	{"MI2S_RX_Voice Mixer", "VoiceMMode1", "VOICEMMODE1_DL"},
+	{"MI2S_RX_Voice Mixer", "VoiceMMode2", "VOICEMMODE2_DL"},
+	{"MI2S_RX", NULL, "MI2S_RX_Voice Mixer"},
+
+	{"PRI_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"PRI_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
+	{"PRI_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+	{"PRI_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
+	{"PRI_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"PRI_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
+	{"PRI_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
+	{"PRI_RX_Voice Mixer", "VoiceMMode1", "VOICEMMODE1_DL"},
+	{"PRI_RX_Voice Mixer", "VoiceMMode2", "VOICEMMODE2_DL"},
+	{"PRI_I2S_RX", NULL, "PRI_RX_Voice Mixer"},
+
+	{"SEC_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"SEC_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
+	{"SEC_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+	{"SEC_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
+	{"SEC_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"SEC_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
+	{"SEC_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
+	{"SEC_I2S_RX", NULL, "SEC_RX_Voice Mixer"},
+
+	{"SEC_MI2S_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"SEC_MI2S_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
+	{"SEC_MI2S_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+	{"SEC_MI2S_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
+	{"SEC_MI2S_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"SEC_MI2S_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
+	{"SEC_MI2S_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
+	{"SEC_MI2S_RX_Voice Mixer", "VoiceMMode1", "VOICEMMODE1_DL"},
+	{"SEC_MI2S_RX_Voice Mixer", "VoiceMMode2", "VOICEMMODE2_DL"},
+	{"SEC_MI2S_RX", NULL, "SEC_MI2S_RX_Voice Mixer"},
+
+	{"SLIM_0_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"SLIM_0_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
+	{"SLIM_0_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+	{"SLIM_0_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
+	{"SLIM_0_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"SLIM_0_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
+	{"SLIM_0_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+	{"SLIM_0_RX_Voice Mixer", "Voice2 Stub", "VOICE2_STUB_DL"},
+	{"SLIM_0_RX_Voice Mixer", "VoLTE Stub", "VOLTE_STUB_DL"},
+	{"SLIM_0_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
+	{"SLIM_0_RX_Voice Mixer", "VoiceMMode1", "VOICEMMODE1_DL"},
+	{"SLIM_0_RX_Voice Mixer", "VoiceMMode2", "VOICEMMODE2_DL"},
+	{"SLIMBUS_0_RX", NULL, "SLIM_0_RX_Voice Mixer"},
+
+	{"SLIM_6_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"SLIM_6_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
+	{"SLIM_6_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+	{"SLIM_6_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
+	{"SLIM_6_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"SLIM_6_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
+	{"SLIM_6_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+	{"SLIM_6_RX_Voice Mixer", "Voice2 Stub", "VOICE2_STUB_DL"},
+	{"SLIM_6_RX_Voice Mixer", "VoLTE Stub", "VOLTE_STUB_DL"},
+	{"SLIM_6_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
+	{"SLIM_6_RX_Voice Mixer", "VoiceMMode1", "VOICEMMODE1_DL"},
+	{"SLIM_6_RX_Voice Mixer", "VoiceMMode2", "VOICEMMODE2_DL"},
+	{"SLIMBUS_6_RX", NULL, "SLIM_6_RX_Voice Mixer"},
+
+	{"USB_AUDIO_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"USB_AUDIO_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
+	{"USB_AUDIO_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+	{"USB_AUDIO_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
+	{"USB_AUDIO_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"USB_AUDIO_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
+	{"USB_AUDIO_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+	{"USB_AUDIO_RX_Voice Mixer", "Voice2 Stub", "VOICE2_STUB_DL"},
+	{"USB_AUDIO_RX_Voice Mixer", "VoLTE Stub", "VOLTE_STUB_DL"},
+	{"USB_AUDIO_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
+	{"USB_AUDIO_RX_Voice Mixer", "VoiceMMode1", "VOICEMMODE1_DL"},
+	{"USB_AUDIO_RX_Voice Mixer", "VoiceMMode2", "VOICEMMODE2_DL"},
+	{"USB_AUDIO_RX", NULL, "USB_AUDIO_RX_Voice Mixer"},
+
+	{"INTERNAL_BT_SCO_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"INTERNAL_BT_SCO_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
+	{"INTERNAL_BT_SCO_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+	{"INTERNAL_BT_SCO_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
+	{"INTERNAL_BT_SCO_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"INTERNAL_BT_SCO_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
+	{"INTERNAL_BT_SCO_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
+	{"INTERNAL_BT_SCO_RX_Voice Mixer", "VoiceMMode1", "VOICEMMODE1_DL"},
+	{"INTERNAL_BT_SCO_RX_Voice Mixer", "VoiceMMode2", "VOICEMMODE2_DL"},
+	{"INTERNAL_BT_SCO_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+	{"INTERNAL_BT_SCO_RX_Voice Mixer", "Voice2 Stub", "VOICE2_STUB_DL"},
+	{"INT_BT_SCO_RX", NULL, "INTERNAL_BT_SCO_RX_Voice Mixer"},
+
+	{"AFE_PCM_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"AFE_PCM_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
+	{"AFE_PCM_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+	{"AFE_PCM_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
+	{"AFE_PCM_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"AFE_PCM_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
+	{"AFE_PCM_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
+	{"AFE_PCM_RX_Voice Mixer", "VoiceMMode1", "VOICEMMODE1_DL"},
+	{"AFE_PCM_RX_Voice Mixer", "VoiceMMode2", "VOICEMMODE2_DL"},
+	{"PCM_RX", NULL, "AFE_PCM_RX_Voice Mixer"},
+
+	{"AUX_PCM_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"AUX_PCM_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
+	{"AUX_PCM_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+	{"AUX_PCM_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
+	{"AUX_PCM_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"AUX_PCM_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
+	{"AUX_PCM_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+	{"AUX_PCM_RX_Voice Mixer", "Voice2 Stub", "VOICE2_STUB_DL"},
+	{"AUX_PCM_RX_Voice Mixer", "VoLTE Stub", "VOLTE_STUB_DL"},
+	{"AUX_PCM_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
+	{"AUX_PCM_RX_Voice Mixer", "VoiceMMode1", "VOICEMMODE1_DL"},
+	{"AUX_PCM_RX_Voice Mixer", "VoiceMMode2", "VOICEMMODE2_DL"},
+	{"AUX_PCM_RX", NULL, "AUX_PCM_RX_Voice Mixer"},
+
+	{"SEC_AUX_PCM_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"SEC_AUX_PCM_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
+	{"SEC_AUX_PCM_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+	{"SEC_AUX_PCM_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
+	{"SEC_AUX_PCM_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"SEC_AUX_PCM_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
+	{"SEC_AUX_PCM_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+	{"SEC_AUX_PCM_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
+	{"SEC_AUX_PCM_RX_Voice Mixer", "VoiceMMode1", "VOICEMMODE1_DL"},
+	{"SEC_AUX_PCM_RX_Voice Mixer", "VoiceMMode2", "VOICEMMODE2_DL"},
+	{"SEC_AUX_PCM_RX", NULL, "SEC_AUX_PCM_RX_Voice Mixer"},
+
+	{"TERT_AUX_PCM_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"TERT_AUX_PCM_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
+	{"TERT_AUX_PCM_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+	{"TERT_AUX_PCM_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
+	{"TERT_AUX_PCM_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"TERT_AUX_PCM_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
+	{"TERT_AUX_PCM_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+	{"TERT_AUX_PCM_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
+	{"TERT_AUX_PCM_RX_Voice Mixer", "VoiceMMode1", "VOICEMMODE1_DL"},
+	{"TERT_AUX_PCM_RX_Voice Mixer", "VoiceMMode2", "VOICEMMODE2_DL"},
+	{"TERT_AUX_PCM_RX", NULL, "TERT_AUX_PCM_RX_Voice Mixer"},
+
+	{"QUAT_AUX_PCM_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"QUAT_AUX_PCM_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
+	{"QUAT_AUX_PCM_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+	{"QUAT_AUX_PCM_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
+	{"QUAT_AUX_PCM_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"QUAT_AUX_PCM_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
+	{"QUAT_AUX_PCM_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+	{"QUAT_AUX_PCM_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
+	{"QUAT_AUX_PCM_RX_Voice Mixer", "VoiceMMode1", "VOICEMMODE1_DL"},
+	{"QUAT_AUX_PCM_RX_Voice Mixer", "VoiceMMode2", "VOICEMMODE2_DL"},
+	{"QUAT_AUX_PCM_RX", NULL, "QUAT_AUX_PCM_RX_Voice Mixer"},
+
+	{"HDMI_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"HDMI_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
+	{"HDMI_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+	{"HDMI_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
+	{"HDMI_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"HDMI_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
+	{"HDMI_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
+	{"HDMI_RX_Voice Mixer", "VoiceMMode1", "VOICEMMODE1_DL"},
+	{"HDMI_RX_Voice Mixer", "VoiceMMode2", "VOICEMMODE2_DL"},
+	{"HDMI", NULL, "HDMI_RX_Voice Mixer"},
+	{"HDMI", NULL, "HDMI_DL_HL"},
+
+	{"MI2S_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"MI2S_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
+	{"MI2S_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"MI2S_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+	{"MI2S_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
+	{"MI2S_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+	{"MI2S_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
+	{"MI2S_RX_Voice Mixer", "VoiceMMode1", "VOICEMMODE1_DL"},
+	{"MI2S_RX_Voice Mixer", "VoiceMMode2", "VOICEMMODE2_DL"},
+	{"MI2S_RX", NULL, "MI2S_RX_Voice Mixer"},
+
+	{"PRI_MI2S_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"PRI_MI2S_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
+	{"PRI_MI2S_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"PRI_MI2S_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+	{"PRI_MI2S_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
+	{"PRI_MI2S_RX_Voice Mixer", "VoLTE Stub", "VOLTE_STUB_DL"},
+	{"PRI_MI2S_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+	{"PRI_MI2S_RX_Voice Mixer", "Voice2 Stub", "VOICE2_STUB_DL"},
+	{"PRI_MI2S_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
+	{"PRI_MI2S_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
+	{"PRI_MI2S_RX_Voice Mixer", "VoiceMMode1", "VOICEMMODE1_DL"},
+	{"PRI_MI2S_RX_Voice Mixer", "VoiceMMode2", "VOICEMMODE2_DL"},
+	{"PRI_MI2S_RX", NULL, "PRI_MI2S_RX_Voice Mixer"},
+
+	{"INT0_MI2S_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"INT0_MI2S_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
+	{"INT0_MI2S_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
+	{"INT0_MI2S_RX_Voice Mixer", "VoiceMMode1", "VOICEMMODE1_DL"},
+	{"INT0_MI2S_RX_Voice Mixer", "VoiceMMode2", "VOICEMMODE2_DL"},
+	{"INT0_MI2S_RX", NULL, "INT0_MI2S_RX_Voice Mixer"},
+
+	{"INT4_MI2S_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"INT4_MI2S_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
+	{"INT4_MI2S_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
+	{"INT4_MI2S_RX_Voice Mixer", "VoiceMMode1", "VOICEMMODE1_DL"},
+	{"INT4_MI2S_RX_Voice Mixer", "VoiceMMode2", "VOICEMMODE2_DL"},
+	{"INT4_MI2S_RX", NULL, "INT4_MI2S_RX_Voice Mixer"},
+
+	{"TERT_MI2S_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"TERT_MI2S_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
+	{"TERT_MI2S_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"TERT_MI2S_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+	{"TERT_MI2S_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
+	{"TERT_MI2S_RX_Voice Mixer", "VoLTE Stub", "VOLTE_STUB_DL"},
+	{"TERT_MI2S_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+	{"TERT_MI2S_RX_Voice Mixer", "Voice2 Stub", "VOICE2_STUB_DL"},
+	{"TERT_MI2S_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
+	{"TERT_MI2S_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
+	{"TERT_MI2S_RX_Voice Mixer", "VoiceMMode1", "VOICEMMODE1_DL"},
+	{"TERT_MI2S_RX_Voice Mixer", "VoiceMMode2", "VOICEMMODE2_DL"},
+	{"TERT_MI2S_RX", NULL, "TERT_MI2S_RX_Voice Mixer"},
+
+	{"QUAT_MI2S_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"QUAT_MI2S_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
+	{"QUAT_MI2S_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"QUAT_MI2S_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+	{"QUAT_MI2S_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
+	{"QUAT_MI2S_RX_Voice Mixer", "VoLTE Stub", "VOLTE_STUB_DL"},
+	{"QUAT_MI2S_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+	{"QUAT_MI2S_RX_Voice Mixer", "Voice2 Stub", "VOICE2_STUB_DL"},
+	{"QUAT_MI2S_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
+	{"QUAT_MI2S_RX_Voice Mixer", "VoiceMMode1", "VOICEMMODE1_DL"},
+	{"QUAT_MI2S_RX_Voice Mixer", "VoiceMMode2", "VOICEMMODE2_DL"},
+	{"QUAT_MI2S_RX", NULL, "QUAT_MI2S_RX_Voice Mixer"},
+
+	{"QUIN_MI2S_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"QUIN_MI2S_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
+	{"QUIN_MI2S_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"QUIN_MI2S_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+	{"QUIN_MI2S_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
+	{"QUIN_MI2S_RX_Voice Mixer", "VoLTE Stub", "VOLTE_STUB_DL"},
+	{"QUIN_MI2S_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+	{"QUIN_MI2S_RX_Voice Mixer", "Voice2 Stub", "VOICE2_STUB_DL"},
+	{"QUIN_MI2S_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
+	{"QUIN_MI2S_RX_Voice Mixer", "VoiceMMode1", "VOICEMMODE1_DL"},
+	{"QUIN_MI2S_RX_Voice Mixer", "VoiceMMode2", "VOICEMMODE2_DL"},
+	{"QUIN_MI2S_RX", NULL, "QUIN_MI2S_RX_Voice Mixer"},
+
+	{"QUAT_TDM_RX_2_Voice Mixer", "VoiceMMode1", "VOICEMMODE1_DL"},
+	{"QUAT_TDM_RX_2", NULL, "QUAT_TDM_RX_2_Voice Mixer"},
+
+	{"VOC_EXT_EC MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
+	{"VOC_EXT_EC MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
+	{"VOC_EXT_EC MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
+	{"VOC_EXT_EC MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
+	{"VOC_EXT_EC MUX", "SLIM_1_TX" ,    "SLIMBUS_1_TX"},
+	{"CS-VOICE_UL1", NULL, "VOC_EXT_EC MUX"},
+	{"VOIP_UL", NULL, "VOC_EXT_EC MUX"},
+	{"VoLTE_UL", NULL, "VOC_EXT_EC MUX"},
+	{"VOICE2_UL", NULL, "VOC_EXT_EC MUX"},
+	{"VoWLAN_UL", NULL, "VOC_EXT_EC MUX"},
+	{"VOICEMMODE1_UL", NULL, "VOC_EXT_EC MUX"},
+	{"VOICEMMODE2_UL", NULL, "VOC_EXT_EC MUX"},
+
+	{"AUDIO_REF_EC_UL1 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
+	{"AUDIO_REF_EC_UL1 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
+	{"AUDIO_REF_EC_UL1 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
+	{"AUDIO_REF_EC_UL1 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
+	{"AUDIO_REF_EC_UL1 MUX", "SLIM_1_TX" , "SLIMBUS_1_TX"},
+	{"AUDIO_REF_EC_UL1 MUX", "QUAT_TDM_TX_1" , "QUAT_TDM_TX_1"},
+	{"AUDIO_REF_EC_UL1 MUX", "QUAT_TDM_RX_0" , "QUAT_TDM_RX_0"},
+	{"AUDIO_REF_EC_UL1 MUX", "QUAT_TDM_RX_1" , "QUAT_TDM_RX_1"},
+	{"AUDIO_REF_EC_UL1 MUX", "QUAT_TDM_RX_2" , "QUAT_TDM_RX_2"},
+	{"AUDIO_REF_EC_UL1 MUX", "TERT_TDM_TX_0" , "TERT_TDM_TX_0"},
+
+	{"AUDIO_REF_EC_UL2 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
+	{"AUDIO_REF_EC_UL2 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
+	{"AUDIO_REF_EC_UL2 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
+	{"AUDIO_REF_EC_UL2 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
+
+	{"AUDIO_REF_EC_UL3 MUX", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"AUDIO_REF_EC_UL3 MUX", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"AUDIO_REF_EC_UL3 MUX", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"AUDIO_REF_EC_UL3 MUX", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+
+	{"AUDIO_REF_EC_UL4 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
+	{"AUDIO_REF_EC_UL4 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
+	{"AUDIO_REF_EC_UL4 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
+	{"AUDIO_REF_EC_UL4 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
+
+	{"AUDIO_REF_EC_UL5 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
+	{"AUDIO_REF_EC_UL5 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
+	{"AUDIO_REF_EC_UL5 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
+	{"AUDIO_REF_EC_UL5 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
+
+	{"AUDIO_REF_EC_UL6 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
+	{"AUDIO_REF_EC_UL6 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
+	{"AUDIO_REF_EC_UL6 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
+	{"AUDIO_REF_EC_UL6 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
+
+	{"AUDIO_REF_EC_UL8 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
+	{"AUDIO_REF_EC_UL8 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
+	{"AUDIO_REF_EC_UL8 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
+	{"AUDIO_REF_EC_UL8 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
+
+	{"AUDIO_REF_EC_UL9 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
+	{"AUDIO_REF_EC_UL9 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
+	{"AUDIO_REF_EC_UL9 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
+	{"AUDIO_REF_EC_UL9 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
+
+	{"AUDIO_REF_EC_UL17 MUX", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"AUDIO_REF_EC_UL17 MUX", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"AUDIO_REF_EC_UL17 MUX", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"AUDIO_REF_EC_UL17 MUX", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+
+	{"AUDIO_REF_EC_UL18 MUX", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"AUDIO_REF_EC_UL18 MUX", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"AUDIO_REF_EC_UL18 MUX", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"AUDIO_REF_EC_UL18 MUX", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+
+	{"AUDIO_REF_EC_UL19 MUX", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"AUDIO_REF_EC_UL19 MUX", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"AUDIO_REF_EC_UL19 MUX", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"AUDIO_REF_EC_UL19 MUX", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+
+	{"MM_UL1", NULL, "AUDIO_REF_EC_UL1 MUX"},
+	{"MM_UL2", NULL, "AUDIO_REF_EC_UL2 MUX"},
+	{"MM_UL3", NULL, "AUDIO_REF_EC_UL3 MUX"},
+	{"MM_UL4", NULL, "AUDIO_REF_EC_UL4 MUX"},
+	{"MM_UL5", NULL, "AUDIO_REF_EC_UL5 MUX"},
+	{"MM_UL6", NULL, "AUDIO_REF_EC_UL6 MUX"},
+	{"MM_UL8", NULL, "AUDIO_REF_EC_UL8 MUX"},
+	{"MM_UL9", NULL, "AUDIO_REF_EC_UL9 MUX"},
+	{"MM_UL16", NULL, "AUDIO_REF_EC_UL16 MUX"},
+	{"MM_UL17", NULL, "AUDIO_REF_EC_UL17 MUX"},
+	{"MM_UL18", NULL, "AUDIO_REF_EC_UL18 MUX"},
+	{"MM_UL19", NULL, "AUDIO_REF_EC_UL19 MUX"},
+
+	{"Voice_Tx Mixer", "PRI_TX_Voice", "PRI_I2S_TX"},
+	{"Voice_Tx Mixer", "PRI_MI2S_TX_Voice", "PRI_MI2S_TX"},
+	{"Voice_Tx Mixer", "MI2S_TX_Voice", "MI2S_TX"},
+	{"Voice_Tx Mixer", "TERT_MI2S_TX_Voice", "TERT_MI2S_TX"},
+	{"Voice_Tx Mixer", "SLIM_0_TX_Voice", "SLIMBUS_0_TX"},
+	{"Voice_Tx Mixer", "SLIM_7_TX_Voice", "SLIMBUS_7_TX"},
+	{"Voice_Tx Mixer", "SLIM_8_TX_Voice", "SLIMBUS_8_TX"},
+	{"Voice_Tx Mixer", "USB_AUDIO_TX_Voice", "USB_AUDIO_TX"},
+	{"Voice_Tx Mixer", "INTERNAL_BT_SCO_TX_Voice", "INT_BT_SCO_TX"},
+	{"Voice_Tx Mixer", "AFE_PCM_TX_Voice", "PCM_TX"},
+	{"Voice_Tx Mixer", "AUX_PCM_TX_Voice", "AUX_PCM_TX"},
+	{"Voice_Tx Mixer", "SEC_AUX_PCM_TX_Voice", "SEC_AUX_PCM_TX"},
+	{"Voice_Tx Mixer", "TERT_AUX_PCM_TX_Voice", "TERT_AUX_PCM_TX"},
+	{"Voice_Tx Mixer", "QUAT_AUX_PCM_TX_Voice", "QUAT_AUX_PCM_TX"},
+	{"Voice_Tx Mixer", "SEC_MI2S_TX_Voice", "SEC_MI2S_TX"},
+	{"CS-VOICE_UL1", NULL, "Voice_Tx Mixer"},
+
+	{"Voice2_Tx Mixer", "PRI_TX_Voice2", "PRI_I2S_TX"},
+	{"Voice2_Tx Mixer", "PRI_MI2S_TX_Voice2", "PRI_MI2S_TX"},
+	{"Voice2_Tx Mixer", "MI2S_TX_Voice2", "MI2S_TX"},
+	{"Voice2_Tx Mixer", "TERT_MI2S_TX_Voice2", "TERT_MI2S_TX"},
+	{"Voice2_Tx Mixer", "SLIM_0_TX_Voice2", "SLIMBUS_0_TX"},
+	{"Voice2_Tx Mixer", "SLIM_7_TX_Voice2", "SLIMBUS_7_TX"},
+	{"Voice2_Tx Mixer", "SLIM_8_TX_Voice2", "SLIMBUS_8_TX"},
+	{"Voice2_Tx Mixer", "USB_AUDIO_TX_Voice2", "USB_AUDIO_TX"},
+	{"Voice2_Tx Mixer", "INTERNAL_BT_SCO_TX_Voice2", "INT_BT_SCO_TX"},
+	{"Voice2_Tx Mixer", "AFE_PCM_TX_Voice2", "PCM_TX"},
+	{"Voice2_Tx Mixer", "AUX_PCM_TX_Voice2", "AUX_PCM_TX"},
+	{"Voice2_Tx Mixer", "SEC_AUX_PCM_TX_Voice2", "SEC_AUX_PCM_TX"},
+	{"Voice2_Tx Mixer", "TERT_AUX_PCM_TX_Voice2", "TERT_AUX_PCM_TX"},
+	{"Voice2_Tx Mixer", "QUAT_AUX_PCM_TX_Voice2", "QUAT_AUX_PCM_TX"},
+	{"VOICE2_UL", NULL, "Voice2_Tx Mixer"},
+
+	{"VoLTE_Tx Mixer", "PRI_TX_VoLTE", "PRI_I2S_TX"},
+	{"VoLTE_Tx Mixer", "SLIM_0_TX_VoLTE", "SLIMBUS_0_TX"},
+	{"VoLTE_Tx Mixer", "SLIM_7_TX_VoLTE", "SLIMBUS_7_TX"},
+	{"VoLTE_Tx Mixer", "SLIM_8_TX_VoLTE", "SLIMBUS_8_TX"},
+	{"VoLTE_Tx Mixer", "USB_AUDIO_TX_VoLTE", "USB_AUDIO_TX"},
+	{"VoLTE_Tx Mixer", "INTERNAL_BT_SCO_TX_VoLTE", "INT_BT_SCO_TX"},
+	{"VoLTE_Tx Mixer", "AFE_PCM_TX_VoLTE", "PCM_TX"},
+	{"VoLTE_Tx Mixer", "AUX_PCM_TX_VoLTE", "AUX_PCM_TX"},
+	{"VoLTE_Tx Mixer", "SEC_AUX_PCM_TX_VoLTE", "SEC_AUX_PCM_TX"},
+	{"VoLTE_Tx Mixer", "TERT_AUX_PCM_TX_VoLTE", "TERT_AUX_PCM_TX"},
+	{"VoLTE_Tx Mixer", "QUAT_AUX_PCM_TX_VoLTE", "QUAT_AUX_PCM_TX"},
+	{"VoLTE_Tx Mixer", "MI2S_TX_VoLTE", "MI2S_TX"},
+	{"VoLTE_Tx Mixer", "PRI_MI2S_TX_VoLTE", "PRI_MI2S_TX"},
+	{"VoLTE_Tx Mixer", "TERT_MI2S_TX_VoLTE", "TERT_MI2S_TX"},
+	{"VoLTE_UL", NULL, "VoLTE_Tx Mixer"},
+
+	{"VoWLAN_Tx Mixer", "PRI_TX_VoWLAN", "PRI_I2S_TX"},
+	{"VoWLAN_Tx Mixer", "SLIM_0_TX_VoWLAN", "SLIMBUS_0_TX"},
+	{"VoWLAN_Tx Mixer", "SLIM_7_TX_VoWLAN", "SLIMBUS_7_TX"},
+	{"VoWLAN_Tx Mixer", "SLIM_8_TX_VoWLAN", "SLIMBUS_8_TX"},
+	{"VoWLAN_Tx Mixer", "USB_AUDIO_TX_VoWLAN", "USB_AUDIO_TX"},
+	{"VoWLAN_Tx Mixer", "INTERNAL_BT_SCO_TX_VoWLAN", "INT_BT_SCO_TX"},
+	{"VoWLAN_Tx Mixer", "AFE_PCM_TX_VoWLAN", "PCM_TX"},
+	{"VoWLAN_Tx Mixer", "AUX_PCM_TX_VoWLAN", "AUX_PCM_TX"},
+	{"VoWLAN_Tx Mixer", "SEC_AUX_PCM_TX_VoWLAN", "SEC_AUX_PCM_TX"},
+	{"VoWLAN_Tx Mixer", "TERT_AUX_PCM_TX_VoWLAN", "TERT_AUX_PCM_TX"},
+	{"VoWLAN_Tx Mixer", "QUAT_AUX_PCM_TX_VoWLAN", "QUAT_AUX_PCM_TX"},
+	{"VoWLAN_Tx Mixer", "MI2S_TX_VoWLAN", "MI2S_TX"},
+	{"VoWLAN_Tx Mixer", "PRI_MI2S_TX_VoWLAN", "PRI_MI2S_TX"},
+	{"VoWLAN_Tx Mixer", "TERT_MI2S_TX_VoWLAN", "TERT_MI2S_TX"},
+	{"VoWLAN_UL", NULL, "VoWLAN_Tx Mixer"},
+
+	{"VoiceMMode1_Tx Mixer", "PRI_TX_MMode1", "PRI_I2S_TX"},
+	{"VoiceMMode1_Tx Mixer", "PRI_MI2S_TX_MMode1", "PRI_MI2S_TX"},
+	{"VoiceMMode1_Tx Mixer", "MI2S_TX_MMode1", "MI2S_TX"},
+	{"VoiceMMode1_Tx Mixer", "TERT_MI2S_TX_MMode1", "TERT_MI2S_TX"},
+	{"VoiceMMode1_Tx Mixer", "INT3_MI2S_TX_MMode1", "INT3_MI2S_TX"},
+	{"VoiceMMode1_Tx Mixer", "SLIM_0_TX_MMode1", "SLIMBUS_0_TX"},
+	{"VoiceMMode1_Tx Mixer", "SLIM_7_TX_MMode1", "SLIMBUS_7_TX"},
+	{"VoiceMMode1_Tx Mixer", "SLIM_8_TX_MMode1", "SLIMBUS_8_TX"},
+	{"VoiceMMode1_Tx Mixer", "USB_AUDIO_TX_MMode1", "USB_AUDIO_TX"},
+	{"VoiceMMode1_Tx Mixer", "INT_BT_SCO_TX_MMode1", "INT_BT_SCO_TX"},
+	{"VoiceMMode1_Tx Mixer", "AFE_PCM_TX_MMode1", "PCM_TX"},
+	{"VoiceMMode1_Tx Mixer", "AUX_PCM_TX_MMode1", "AUX_PCM_TX"},
+	{"VoiceMMode1_Tx Mixer", "SEC_AUX_PCM_TX_MMode1", "SEC_AUX_PCM_TX"},
+	{"VoiceMMode1_Tx Mixer", "TERT_AUX_PCM_TX_MMode1", "TERT_AUX_PCM_TX"},
+	{"VoiceMMode1_Tx Mixer", "QUAT_AUX_PCM_TX_MMode1", "QUAT_AUX_PCM_TX"},
+	{"VoiceMMode1_Tx Mixer", "QUAT_TDM_TX_0_MMode1", "QUAT_TDM_TX_0"},
+	{"VOICEMMODE1_UL", NULL, "VoiceMMode1_Tx Mixer"},
+
+	{"VoiceMMode2_Tx Mixer", "PRI_TX_MMode2", "PRI_I2S_TX"},
+	{"VoiceMMode2_Tx Mixer", "PRI_MI2S_TX_MMode2", "PRI_MI2S_TX"},
+	{"VoiceMMode2_Tx Mixer", "MI2S_TX_MMode2", "MI2S_TX"},
+	{"VoiceMMode2_Tx Mixer", "TERT_MI2S_TX_MMode2", "TERT_MI2S_TX"},
+	{"VoiceMMode2_Tx Mixer", "INT3_MI2S_TX_MMode2", "INT3_MI2S_TX"},
+	{"VoiceMMode2_Tx Mixer", "SLIM_0_TX_MMode2", "SLIMBUS_0_TX"},
+	{"VoiceMMode2_Tx Mixer", "SLIM_7_TX_MMode2", "SLIMBUS_7_TX"},
+	{"VoiceMMode2_Tx Mixer", "SLIM_8_TX_MMode2", "SLIMBUS_8_TX"},
+	{"VoiceMMode2_Tx Mixer", "USB_AUDIO_TX_MMode2", "USB_AUDIO_TX"},
+	{"VoiceMMode2_Tx Mixer", "INT_BT_SCO_TX_MMode2", "INT_BT_SCO_TX"},
+	{"VoiceMMode2_Tx Mixer", "AFE_PCM_TX_MMode2", "PCM_TX"},
+	{"VoiceMMode2_Tx Mixer", "AUX_PCM_TX_MMode2", "AUX_PCM_TX"},
+	{"VoiceMMode2_Tx Mixer", "SEC_AUX_PCM_TX_MMode2", "SEC_AUX_PCM_TX"},
+	{"VoiceMMode2_Tx Mixer", "TERT_AUX_PCM_TX_MMode2", "TERT_AUX_PCM_TX"},
+	{"VoiceMMode2_Tx Mixer", "QUAT_AUX_PCM_TX_MMode2", "QUAT_AUX_PCM_TX"},
+	{"VOICEMMODE2_UL", NULL, "VoiceMMode2_Tx Mixer"},
+
+	{"Voip_Tx Mixer", "PRI_TX_Voip", "PRI_I2S_TX"},
+	{"Voip_Tx Mixer", "MI2S_TX_Voip", "MI2S_TX"},
+	{"Voip_Tx Mixer", "TERT_MI2S_TX_Voip", "TERT_MI2S_TX"},
+	{"Voip_Tx Mixer", "INT3_MI2S_TX_Voip", "INT3_MI2S_TX"},
+	{"Voip_Tx Mixer", "SLIM_0_TX_Voip", "SLIMBUS_0_TX"},
+	{"Voip_Tx Mixer", "SLIM_7_TX_Voip", "SLIMBUS_7_TX"},
+	{"Voip_Tx Mixer", "SLIM_8_TX_Voip", "SLIMBUS_8_TX"},
+	{"Voip_Tx Mixer", "USB_AUDIO_TX_Voip", "USB_AUDIO_TX"},
+	{"Voip_Tx Mixer", "INTERNAL_BT_SCO_TX_Voip", "INT_BT_SCO_TX"},
+	{"Voip_Tx Mixer", "AFE_PCM_TX_Voip", "PCM_TX"},
+	{"Voip_Tx Mixer", "AUX_PCM_TX_Voip", "AUX_PCM_TX"},
+	{"Voip_Tx Mixer", "SEC_AUX_PCM_TX_Voip", "SEC_AUX_PCM_TX"},
+	{"Voip_Tx Mixer", "TERT_AUX_PCM_TX_Voip", "TERT_AUX_PCM_TX"},
+	{"Voip_Tx Mixer", "QUAT_AUX_PCM_TX_Voip", "QUAT_AUX_PCM_TX"},
+	{"Voip_Tx Mixer", "PRI_MI2S_TX_Voip", "PRI_MI2S_TX"},
+	{"VOIP_UL", NULL, "Voip_Tx Mixer"},
+
+	{"SLIMBUS_DL_HL", "Switch", "SLIM0_DL_HL"},
+	{"SLIMBUS_0_RX", NULL, "SLIMBUS_DL_HL"},
+	{"SLIMBUS1_DL_HL", "Switch", "SLIM1_DL_HL"},
+	{"SLIMBUS_1_RX", NULL, "SLIMBUS1_DL_HL"},
+	{"SLIMBUS3_DL_HL", "Switch", "SLIM3_DL_HL"},
+	{"SLIMBUS_3_RX", NULL, "SLIMBUS3_DL_HL"},
+	{"SLIMBUS4_DL_HL", "Switch", "SLIM4_DL_HL"},
+	{"SLIMBUS_4_RX", NULL, "SLIMBUS4_DL_HL"},
+	{"SLIMBUS6_DL_HL", "Switch", "SLIM0_DL_HL"},
+	{"SLIMBUS_6_RX", NULL, "SLIMBUS6_DL_HL"},
+	{"SLIM0_UL_HL", NULL, "SLIMBUS_0_TX"},
+	{"SLIM1_UL_HL", NULL, "SLIMBUS_1_TX"},
+	{"SLIM3_UL_HL", NULL, "SLIMBUS_3_TX"},
+	{"SLIM4_UL_HL", NULL, "SLIMBUS_4_TX"},
+	{"SLIM8_UL_HL", NULL, "SLIMBUS_8_TX"},
+
+	{"LSM1 Mixer", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
+	{"LSM1 Mixer", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
+	{"LSM1 Mixer", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
+	{"LSM1 Mixer", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
+	{"LSM1 Mixer", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
+	{"LSM1 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"LSM1 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"LSM1 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
+	{"LSM1_UL_HL", NULL, "LSM1 Mixer"},
+
+	{"LSM2 Mixer", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
+	{"LSM2 Mixer", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
+	{"LSM2 Mixer", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
+	{"LSM2 Mixer", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
+	{"LSM2 Mixer", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
+	{"LSM2 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"LSM2 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"LSM2 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
+	{"LSM2_UL_HL", NULL, "LSM2 Mixer"},
+
+
+	{"LSM3 Mixer", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
+	{"LSM3 Mixer", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
+	{"LSM3 Mixer", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
+	{"LSM3 Mixer", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
+	{"LSM3 Mixer", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
+	{"LSM3 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"LSM3 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"LSM3 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
+	{"LSM3_UL_HL", NULL, "LSM3 Mixer"},
+
+
+	{"LSM4 Mixer", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
+	{"LSM4 Mixer", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
+	{"LSM4 Mixer", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
+	{"LSM4 Mixer", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
+	{"LSM4 Mixer", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
+	{"LSM4 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"LSM4 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"LSM4 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
+	{"LSM4_UL_HL", NULL, "LSM4 Mixer"},
+
+	{"LSM5 Mixer", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
+	{"LSM5 Mixer", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
+	{"LSM5 Mixer", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
+	{"LSM5 Mixer", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
+	{"LSM5 Mixer", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
+	{"LSM5 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"LSM5 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"LSM5 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
+	{"LSM5_UL_HL", NULL, "LSM5 Mixer"},
+
+	{"LSM6 Mixer", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
+	{"LSM6 Mixer", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
+	{"LSM6 Mixer", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
+	{"LSM6 Mixer", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
+	{"LSM6 Mixer", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
+	{"LSM6 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"LSM6_UL_HL", NULL, "LSM6 Mixer"},
+
+	{"LSM7 Mixer", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
+	{"LSM7 Mixer", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
+	{"LSM7 Mixer", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
+	{"LSM7 Mixer", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
+	{"LSM7 Mixer", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
+	{"LSM7 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"LSM7_UL_HL", NULL, "LSM7 Mixer"},
+
+	{"LSM8 Mixer", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
+	{"LSM8 Mixer", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
+	{"LSM8 Mixer", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
+	{"LSM8 Mixer", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
+	{"LSM8 Mixer", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
+	{"LSM8 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"LSM8_UL_HL", NULL, "LSM8 Mixer"},
+
+
+	{"CPE_LSM_UL_HL", NULL, "BE_IN"},
+	{"QCHAT_Tx Mixer", "PRI_TX_QCHAT", "PRI_I2S_TX"},
+	{"QCHAT_Tx Mixer", "SLIM_0_TX_QCHAT", "SLIMBUS_0_TX"},
+	{"QCHAT_Tx Mixer", "SLIM_7_TX_QCHAT", "SLIMBUS_7_TX"},
+	{"QCHAT_Tx Mixer", "SLIM_8_TX_QCHAT", "SLIMBUS_8_TX"},
+	{"QCHAT_Tx Mixer", "INTERNAL_BT_SCO_TX_QCHAT", "INT_BT_SCO_TX"},
+	{"QCHAT_Tx Mixer", "AFE_PCM_TX_QCHAT", "PCM_TX"},
+	{"QCHAT_Tx Mixer", "AUX_PCM_TX_QCHAT", "AUX_PCM_TX"},
+	{"QCHAT_Tx Mixer", "SEC_AUX_PCM_TX_QCHAT", "SEC_AUX_PCM_TX"},
+	{"QCHAT_Tx Mixer", "TERT_AUX_PCM_TX_QCHAT", "TERT_AUX_PCM_TX"},
+	{"QCHAT_Tx Mixer", "QUAT_AUX_PCM_TX_QCHAT", "QUAT_AUX_PCM_TX"},
+	{"QCHAT_Tx Mixer", "MI2S_TX_QCHAT", "MI2S_TX"},
+	{"QCHAT_Tx Mixer", "PRI_MI2S_TX_QCHAT", "PRI_MI2S_TX"},
+	{"QCHAT_Tx Mixer", "TERT_MI2S_TX_QCHAT", "TERT_MI2S_TX"},
+	{"QCHAT_Tx Mixer", "INT3_MI2S_TX_QCHAT", "INT3_MI2S_TX"},
+	{"QCHAT_Tx Mixer", "USB_AUDIO_TX_QCHAT", "USB_AUDIO_TX"},
+	{"QCHAT_UL", NULL, "QCHAT_Tx Mixer"},
+
+	{"INT_FM_RX", NULL, "INTFM_DL_HL"},
+	{"INTFM_UL_HL", NULL, "INT_FM_TX"},
+	{"INTHFP_UL_HL", NULL, "HFP_PRI_AUX_UL_HL"},
+	{"HFP_PRI_AUX_UL_HL", "Switch", "AUX_PCM_TX"},
+	{"INTHFP_UL_HL", NULL, "HFP_AUX_UL_HL"},
+	{"HFP_AUX_UL_HL", "Switch", "SEC_AUX_PCM_TX"},
+	{"INTHFP_UL_HL", NULL, "HFP_INT_UL_HL"},
+	{"HFP_INT_UL_HL", "Switch", "INT_BT_SCO_TX"},
+	{"SLIM7_UL_HL", NULL, "HFP_SLIM7_UL_HL"},
+	{"HFP_SLIM7_UL_HL", "Switch", "SLIMBUS_7_TX"},
+	{"AUX_PCM_RX", NULL, "AUXPCM_DL_HL"},
+	{"AUX_PCM_RX", NULL, "INTHFP_DL_HL"},
+	{"AUXPCM_UL_HL", NULL, "AUX_PCM_TX"},
+	{"MI2S_RX", NULL, "MI2S_DL_HL"},
+	{"MI2S_UL_HL", NULL, "MI2S_TX"},
+	{"PCM_RX_DL_HL", "Switch", "SLIM0_DL_HL"},
+	{"PCM_RX", NULL, "PCM_RX_DL_HL"},
+
+	/* connect to INT4_MI2S_DL_HL since same pcm_id */
+	{"INT0_MI2S_RX_DL_HL", "Switch", "INT4_MI2S_DL_HL"},
+	{"INT0_MI2S_RX", NULL, "INT0_MI2S_RX_DL_HL"},
+	{"INT4_MI2S_RX_DL_HL", "Switch", "INT4_MI2S_DL_HL"},
+	{"INT4_MI2S_RX", NULL, "INT4_MI2S_RX_DL_HL"},
+	{"PRI_MI2S_RX_DL_HL", "Switch", "PRI_MI2S_DL_HL"},
+	{"PRI_MI2S_RX", NULL, "PRI_MI2S_RX_DL_HL"},
+	{"SEC_MI2S_RX_DL_HL", "Switch", "SEC_MI2S_DL_HL"},
+	{"SEC_MI2S_RX", NULL, "SEC_MI2S_RX_DL_HL"},
+	{"TERT_MI2S_RX_DL_HL", "Switch", "TERT_MI2S_DL_HL"},
+	{"TERT_MI2S_RX", NULL, "TERT_MI2S_RX_DL_HL"},
+
+	{"QUAT_MI2S_RX_DL_HL", "Switch", "QUAT_MI2S_DL_HL"},
+	{"QUAT_MI2S_RX", NULL, "QUAT_MI2S_RX_DL_HL"},
+	{"MI2S_UL_HL", NULL, "TERT_MI2S_TX"},
+	{"INT3_MI2S_UL_HL", NULL, "INT3_MI2S_TX"},
+	{"TERT_MI2S_UL_HL", NULL, "TERT_MI2S_TX"},
+	{"SEC_I2S_RX", NULL, "SEC_I2S_DL_HL"},
+	{"PRI_MI2S_UL_HL", NULL, "PRI_MI2S_TX"},
+	{"SEC_MI2S_UL_HL", NULL, "SEC_MI2S_TX"},
+	{"SEC_MI2S_RX", NULL, "SEC_MI2S_DL_HL"},
+	{"PRI_MI2S_RX", NULL, "PRI_MI2S_DL_HL"},
+	{"TERT_MI2S_RX", NULL, "TERT_MI2S_DL_HL"},
+	{"QUAT_MI2S_UL_HL", NULL, "QUAT_MI2S_TX"},
+
+	{"PRI_TDM_TX_0_UL_HL", NULL, "PRI_TDM_TX_0"},
+	{"PRI_TDM_TX_1_UL_HL", NULL, "PRI_TDM_TX_1"},
+	{"PRI_TDM_TX_2_UL_HL", NULL, "PRI_TDM_TX_2"},
+	{"PRI_TDM_TX_3_UL_HL", NULL, "PRI_TDM_TX_3"},
+	{"PRI_TDM_RX_0", NULL, "PRI_TDM_RX_0_DL_HL"},
+	{"PRI_TDM_RX_1", NULL, "PRI_TDM_RX_1_DL_HL"},
+	{"PRI_TDM_RX_2", NULL, "PRI_TDM_RX_2_DL_HL"},
+	{"PRI_TDM_RX_3", NULL, "PRI_TDM_RX_3_DL_HL"},
+	{"SEC_TDM_TX_0_UL_HL", NULL, "SEC_TDM_TX_0"},
+	{"SEC_TDM_TX_1_UL_HL", NULL, "SEC_TDM_TX_1"},
+	{"SEC_TDM_TX_2_UL_HL", NULL, "SEC_TDM_TX_2"},
+	{"SEC_TDM_TX_3_UL_HL", NULL, "SEC_TDM_TX_3"},
+	{"SEC_TDM_RX_0", NULL, "SEC_TDM_RX_0_DL_HL"},
+	{"SEC_TDM_RX_1", NULL, "SEC_TDM_RX_1_DL_HL"},
+	{"SEC_TDM_RX_2", NULL, "SEC_TDM_RX_2_DL_HL"},
+	{"SEC_TDM_RX_3", NULL, "SEC_TDM_RX_3_DL_HL"},
+	{"TERT_TDM_TX_0_UL_HL", NULL, "TERT_TDM_TX_0"},
+	{"TERT_TDM_TX_1_UL_HL", NULL, "TERT_TDM_TX_1"},
+	{"TERT_TDM_TX_2_UL_HL", NULL, "TERT_TDM_TX_2"},
+	{"TERT_TDM_TX_3_UL_HL", NULL, "TERT_TDM_TX_3"},
+	{"TERT_TDM_RX_0", NULL, "TERT_TDM_RX_0_DL_HL"},
+	{"TERT_TDM_RX_1", NULL, "TERT_TDM_RX_1_DL_HL"},
+	{"TERT_TDM_RX_2", NULL, "TERT_TDM_RX_2_DL_HL"},
+	{"TERT_TDM_RX_3", NULL, "TERT_TDM_RX_3_DL_HL"},
+	{"QUAT_TDM_TX_0_UL_HL", NULL, "QUAT_TDM_TX_0"},
+	{"QUAT_TDM_TX_1_UL_HL", NULL, "QUAT_TDM_TX_1"},
+	{"QUAT_TDM_TX_2_UL_HL", NULL, "QUAT_TDM_TX_2"},
+	{"QUAT_TDM_TX_3_UL_HL", NULL, "QUAT_TDM_TX_3"},
+	{"QUAT_TDM_RX_0", NULL, "QUAT_TDM_RX_0_DL_HL"},
+	{"QUAT_TDM_RX_1", NULL, "QUAT_TDM_RX_1_DL_HL"},
+	{"QUAT_TDM_RX_2", NULL, "QUAT_TDM_RX_2_DL_HL"},
+	{"QUAT_TDM_RX_3", NULL, "QUAT_TDM_RX_3_DL_HL"},
+
+	{"PRI_TDM_RX_0 Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"PRI_TDM_RX_0 Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"PRI_TDM_RX_0 Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"PRI_TDM_RX_0 Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"PRI_TDM_RX_0 Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"PRI_TDM_RX_0 Port Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"PRI_TDM_RX_0 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"PRI_TDM_RX_0 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"PRI_TDM_RX_0 Port Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+	{"PRI_TDM_RX_0 Port Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+	{"PRI_TDM_RX_0 Port Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+	{"PRI_TDM_RX_0 Port Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+	{"PRI_TDM_RX_0 Port Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"PRI_TDM_RX_0 Port Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"PRI_TDM_RX_0 Port Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"PRI_TDM_RX_0 Port Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+	{"PRI_TDM_RX_0", NULL, "PRI_TDM_RX_0 Port Mixer"},
+
+	{"PRI_TDM_RX_1 Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"PRI_TDM_RX_1 Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"PRI_TDM_RX_1 Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"PRI_TDM_RX_1 Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"PRI_TDM_RX_1 Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"PRI_TDM_RX_1 Port Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"PRI_TDM_RX_1 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"PRI_TDM_RX_1 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"PRI_TDM_RX_1 Port Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+	{"PRI_TDM_RX_1 Port Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+	{"PRI_TDM_RX_1 Port Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+	{"PRI_TDM_RX_1 Port Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+	{"PRI_TDM_RX_1 Port Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"PRI_TDM_RX_1 Port Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"PRI_TDM_RX_1 Port Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"PRI_TDM_RX_1 Port Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+	{"PRI_TDM_RX_1", NULL, "PRI_TDM_RX_1 Port Mixer"},
+
+	{"PRI_TDM_RX_2 Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"PRI_TDM_RX_2 Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"PRI_TDM_RX_2 Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"PRI_TDM_RX_2 Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"PRI_TDM_RX_2 Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"PRI_TDM_RX_2 Port Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"PRI_TDM_RX_2 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"PRI_TDM_RX_2 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"PRI_TDM_RX_2 Port Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+	{"PRI_TDM_RX_2 Port Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+	{"PRI_TDM_RX_2 Port Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+	{"PRI_TDM_RX_2 Port Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+	{"PRI_TDM_RX_2 Port Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"PRI_TDM_RX_2 Port Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"PRI_TDM_RX_2 Port Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"PRI_TDM_RX_2 Port Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+	{"PRI_TDM_RX_2", NULL, "PRI_TDM_RX_2 Port Mixer"},
+
+	{"PRI_TDM_RX_3 Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"PRI_TDM_RX_3 Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"PRI_TDM_RX_3 Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"PRI_TDM_RX_3 Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"PRI_TDM_RX_3 Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"PRI_TDM_RX_3 Port Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"PRI_TDM_RX_3 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"PRI_TDM_RX_3 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"PRI_TDM_RX_3 Port Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+	{"PRI_TDM_RX_3 Port Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+	{"PRI_TDM_RX_3 Port Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+	{"PRI_TDM_RX_3 Port Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+	{"PRI_TDM_RX_3 Port Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"PRI_TDM_RX_3 Port Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"PRI_TDM_RX_3 Port Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"PRI_TDM_RX_3 Port Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+	{"PRI_TDM_RX_3", NULL, "PRI_TDM_RX_3 Port Mixer"},
+
+	{"SEC_TDM_RX_0 Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"SEC_TDM_RX_0 Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"SEC_TDM_RX_0 Port Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"SEC_TDM_RX_0 Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"SEC_TDM_RX_0 Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"SEC_TDM_RX_0 Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"SEC_TDM_RX_0 Port Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"SEC_TDM_RX_0 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"SEC_TDM_RX_0 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"SEC_TDM_RX_0 Port Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+	{"SEC_TDM_RX_0 Port Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+	{"SEC_TDM_RX_0 Port Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+	{"SEC_TDM_RX_0 Port Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
+	{"SEC_TDM_RX_0 Port Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"SEC_TDM_RX_0 Port Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"SEC_TDM_RX_0 Port Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"SEC_TDM_RX_0 Port Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+	{"SEC_TDM_RX_0", NULL, "SEC_TDM_RX_0 Port Mixer"},
+
+	{"SEC_TDM_RX_1 Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"SEC_TDM_RX_1 Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"SEC_TDM_RX_1 Port Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"SEC_TDM_RX_1 Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"SEC_TDM_RX_1 Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"SEC_TDM_RX_1 Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"SEC_TDM_RX_1 Port Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"SEC_TDM_RX_1 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"SEC_TDM_RX_1 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"SEC_TDM_RX_1 Port Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+	{"SEC_TDM_RX_1 Port Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+	{"SEC_TDM_RX_1 Port Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+	{"SEC_TDM_RX_1 Port Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
+	{"SEC_TDM_RX_1 Port Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"SEC_TDM_RX_1 Port Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"SEC_TDM_RX_1 Port Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"SEC_TDM_RX_1 Port Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+	{"SEC_TDM_RX_1", NULL, "SEC_TDM_RX_1 Port Mixer"},
+
+	{"SEC_TDM_RX_2 Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"SEC_TDM_RX_2 Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"SEC_TDM_RX_2 Port Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"SEC_TDM_RX_2 Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"SEC_TDM_RX_2 Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"SEC_TDM_RX_2 Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"SEC_TDM_RX_2 Port Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"SEC_TDM_RX_2 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"SEC_TDM_RX_2 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"SEC_TDM_RX_2 Port Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+	{"SEC_TDM_RX_2 Port Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+	{"SEC_TDM_RX_2 Port Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+	{"SEC_TDM_RX_2 Port Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
+	{"SEC_TDM_RX_2 Port Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"SEC_TDM_RX_2 Port Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"SEC_TDM_RX_2 Port Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"SEC_TDM_RX_2 Port Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+	{"SEC_TDM_RX_2", NULL, "SEC_TDM_RX_2 Port Mixer"},
+
+	{"SEC_TDM_RX_3 Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"SEC_TDM_RX_3 Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"SEC_TDM_RX_3 Port Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"SEC_TDM_RX_3 Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"SEC_TDM_RX_3 Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"SEC_TDM_RX_3 Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"SEC_TDM_RX_3 Port Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"SEC_TDM_RX_3 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"SEC_TDM_RX_3 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"SEC_TDM_RX_3 Port Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+	{"SEC_TDM_RX_3 Port Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+	{"SEC_TDM_RX_3 Port Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+	{"SEC_TDM_RX_3 Port Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
+	{"SEC_TDM_RX_3 Port Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"SEC_TDM_RX_3 Port Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"SEC_TDM_RX_3 Port Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"SEC_TDM_RX_3 Port Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+	{"SEC_TDM_RX_3", NULL, "SEC_TDM_RX_3 Port Mixer"},
+
+	{"TERT_TDM_RX_0 Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"TERT_TDM_RX_0 Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"TERT_TDM_RX_0 Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"TERT_TDM_RX_0 Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"TERT_TDM_RX_0 Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"TERT_TDM_RX_0 Port Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"TERT_TDM_RX_0 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"TERT_TDM_RX_0 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"TERT_TDM_RX_0 Port Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
+	{"TERT_TDM_RX_0 Port Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
+	{"TERT_TDM_RX_0 Port Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
+	{"TERT_TDM_RX_0 Port Mixer", "TERT_TDM_TX_3", "TERT_TDM_TX_3"},
+	{"TERT_TDM_RX_0 Port Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"TERT_TDM_RX_0 Port Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"TERT_TDM_RX_0 Port Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"TERT_TDM_RX_0 Port Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+	{"TERT_TDM_RX_0", NULL, "TERT_TDM_RX_0 Port Mixer"},
+
+	{"TERT_TDM_RX_1 Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"TERT_TDM_RX_1 Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"TERT_TDM_RX_1 Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"TERT_TDM_RX_1 Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"TERT_TDM_RX_1 Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"TERT_TDM_RX_1 Port Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"TERT_TDM_RX_1 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"TERT_TDM_RX_1 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"TERT_TDM_RX_1 Port Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
+	{"TERT_TDM_RX_1 Port Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
+	{"TERT_TDM_RX_1 Port Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
+	{"TERT_TDM_RX_1 Port Mixer", "TERT_TDM_TX_3", "TERT_TDM_TX_3"},
+	{"TERT_TDM_RX_1 Port Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"TERT_TDM_RX_1 Port Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"TERT_TDM_RX_1 Port Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"TERT_TDM_RX_1 Port Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+	{"TERT_TDM_RX_1", NULL, "TERT_TDM_RX_1 Port Mixer"},
+
+	{"TERT_TDM_RX_2 Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"TERT_TDM_RX_2 Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"TERT_TDM_RX_2 Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"TERT_TDM_RX_2 Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"TERT_TDM_RX_2 Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"TERT_TDM_RX_2 Port Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"TERT_TDM_RX_2 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"TERT_TDM_RX_2 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"TERT_TDM_RX_2 Port Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
+	{"TERT_TDM_RX_2 Port Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
+	{"TERT_TDM_RX_2 Port Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
+	{"TERT_TDM_RX_2 Port Mixer", "TERT_TDM_TX_3", "TERT_TDM_TX_3"},
+	{"TERT_TDM_RX_2 Port Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"TERT_TDM_RX_2 Port Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"TERT_TDM_RX_2 Port Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"TERT_TDM_RX_2 Port Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+	{"TERT_TDM_RX_2", NULL, "TERT_TDM_RX_2 Port Mixer"},
+
+	{"TERT_TDM_RX_3 Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"TERT_TDM_RX_3 Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"TERT_TDM_RX_3 Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"TERT_TDM_RX_3 Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"TERT_TDM_RX_3 Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"TERT_TDM_RX_3 Port Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"TERT_TDM_RX_3 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"TERT_TDM_RX_3 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"TERT_TDM_RX_3 Port Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
+	{"TERT_TDM_RX_3 Port Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
+	{"TERT_TDM_RX_3 Port Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
+	{"TERT_TDM_RX_3 Port Mixer", "TERT_TDM_TX_3", "TERT_TDM_TX_3"},
+	{"TERT_TDM_RX_3 Port Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"TERT_TDM_RX_3 Port Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"TERT_TDM_RX_3 Port Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"TERT_TDM_RX_3 Port Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+	{"TERT_TDM_RX_3", NULL, "TERT_TDM_RX_3 Port Mixer"},
+
+	{"QUAT_TDM_RX_0 Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"QUAT_TDM_RX_0 Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"QUAT_TDM_RX_0 Port Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"QUAT_TDM_RX_0 Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"QUAT_TDM_RX_0 Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"QUAT_TDM_RX_0 Port Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"QUAT_TDM_RX_0 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"QUAT_TDM_RX_0 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"QUAT_TDM_RX_0 Port Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+	{"QUAT_TDM_RX_0 Port Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+	{"QUAT_TDM_RX_0 Port Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+	{"QUAT_TDM_RX_0 Port Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+	{"QUAT_TDM_RX_0 Port Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
+	{"QUAT_TDM_RX_0 Port Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
+	{"QUAT_TDM_RX_0 Port Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
+	{"QUAT_TDM_RX_0 Port Mixer", "TERT_TDM_TX_3", "TERT_TDM_TX_3"},
+	{"QUAT_TDM_RX_0 Port Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"QUAT_TDM_RX_0 Port Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"QUAT_TDM_RX_0 Port Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"QUAT_TDM_RX_0 Port Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+	{"QUAT_TDM_RX_0", NULL, "QUAT_TDM_RX_0 Port Mixer"},
+
+	{"QUAT_TDM_RX_1 Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"QUAT_TDM_RX_1 Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"QUAT_TDM_RX_1 Port Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"QUAT_TDM_RX_1 Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"QUAT_TDM_RX_1 Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"QUAT_TDM_RX_1 Port Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"QUAT_TDM_RX_1 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"QUAT_TDM_RX_1 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"QUAT_TDM_RX_1 Port Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+	{"QUAT_TDM_RX_1 Port Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+	{"QUAT_TDM_RX_1 Port Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+	{"QUAT_TDM_RX_1 Port Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+	{"QUAT_TDM_RX_1 Port Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
+	{"QUAT_TDM_RX_1 Port Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
+	{"QUAT_TDM_RX_1 Port Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
+	{"QUAT_TDM_RX_1 Port Mixer", "TERT_TDM_TX_3", "TERT_TDM_TX_3"},
+	{"QUAT_TDM_RX_1 Port Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"QUAT_TDM_RX_1 Port Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"QUAT_TDM_RX_1 Port Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"QUAT_TDM_RX_1 Port Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+	{"QUAT_TDM_RX_1", NULL, "QUAT_TDM_RX_1 Port Mixer"},
+
+	{"QUAT_TDM_RX_2 Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"QUAT_TDM_RX_2 Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"QUAT_TDM_RX_2 Port Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"QUAT_TDM_RX_2 Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"QUAT_TDM_RX_2 Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"QUAT_TDM_RX_2 Port Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"QUAT_TDM_RX_2 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"QUAT_TDM_RX_2 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"QUAT_TDM_RX_2 Port Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+	{"QUAT_TDM_RX_2 Port Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+	{"QUAT_TDM_RX_2 Port Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+	{"QUAT_TDM_RX_2 Port Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+	{"QUAT_TDM_RX_2 Port Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
+	{"QUAT_TDM_RX_2 Port Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
+	{"QUAT_TDM_RX_2 Port Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
+	{"QUAT_TDM_RX_2 Port Mixer", "TERT_TDM_TX_3", "TERT_TDM_TX_3"},
+	{"QUAT_TDM_RX_2 Port Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"QUAT_TDM_RX_2 Port Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"QUAT_TDM_RX_2 Port Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"QUAT_TDM_RX_2 Port Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+	{"QUAT_TDM_RX_2", NULL, "QUAT_TDM_RX_2 Port Mixer"},
+
+	{"QUAT_TDM_RX_3 Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"QUAT_TDM_RX_3 Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"QUAT_TDM_RX_3 Port Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"QUAT_TDM_RX_3 Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"QUAT_TDM_RX_3 Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"QUAT_TDM_RX_3 Port Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"QUAT_TDM_RX_3 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"QUAT_TDM_RX_3 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"QUAT_TDM_RX_3 Port Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+	{"QUAT_TDM_RX_3 Port Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+	{"QUAT_TDM_RX_3 Port Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+	{"QUAT_TDM_RX_3 Port Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+	{"QUAT_TDM_RX_3 Port Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
+	{"QUAT_TDM_RX_3 Port Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
+	{"QUAT_TDM_RX_3 Port Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
+	{"QUAT_TDM_RX_3 Port Mixer", "TERT_TDM_TX_3", "TERT_TDM_TX_3"},
+	{"QUAT_TDM_RX_3 Port Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"QUAT_TDM_RX_3 Port Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"QUAT_TDM_RX_3 Port Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"QUAT_TDM_RX_3 Port Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+	{"QUAT_TDM_RX_3", NULL, "QUAT_TDM_RX_3 Port Mixer"},
+
+	{"INT0_MI2S_RX Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"INT0_MI2S_RX Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"INT0_MI2S_RX Port Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"INT0_MI2S_RX Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"INT0_MI2S_RX Port Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
+	{"INT0_MI2S_RX Port Mixer", "SLIM_7_TX", "SLIMBUS_7_TX"},
+	{"INT0_MI2S_RX Port Mixer", "SLIM_8_TX", "SLIMBUS_8_TX"},
+	{"INT0_MI2S_RX Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"INT0_MI2S_RX Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"INT0_MI2S_RX", NULL, "INT0_MI2S_RX Port Mixer"},
+
+	{"INT4_MI2S_RX Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"INT4_MI2S_RX Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"INT4_MI2S_RX Port Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"INT4_MI2S_RX Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"INT4_MI2S_RX Port Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
+	{"INT4_MI2S_RX Port Mixer", "SLIM_7_TX", "SLIMBUS_7_TX"},
+	{"INT4_MI2S_RX Port Mixer", "SLIM_8_TX", "SLIMBUS_8_TX"},
+	{"INT4_MI2S_RX Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"INT4_MI2S_RX Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"INT4_MI2S_RX", NULL, "INT4_MI2S_RX Port Mixer"},
+
+	{"SLIMBUS_0_RX Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"SLIMBUS_0_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"SLIMBUS_0_RX Port Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
+	{"SLIMBUS_0_RX Port Mixer", "SLIM_7_TX", "SLIMBUS_7_TX"},
+	{"SLIMBUS_0_RX Port Mixer", "SLIM_8_TX", "SLIMBUS_8_TX"},
+	{"SLIMBUS_0_RX Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"SLIMBUS_0_RX Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"SLIMBUS_0_RX Port Mixer", "TERT_AUXPCM_UL_TX", "TERT_AUX_PCM_TX"},
+	{"SLIMBUS_0_RX Port Mixer", "QUAT_AUXPCM_UL_TX", "QUAT_AUX_PCM_TX"},
+	{"SLIMBUS_0_RX Port Mixer", "MI2S_TX", "MI2S_TX"},
+	{"SLIMBUS_0_RX Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"SLIMBUS_0_RX Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"SLIMBUS_0_RX Port Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"SLIMBUS_0_RX Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"SLIMBUS_0_RX Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"SLIMBUS_0_RX", NULL, "SLIMBUS_0_RX Port Mixer"},
+	{"AFE_PCM_RX Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"AFE_PCM_RX Port Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
+	{"PCM_RX", NULL, "AFE_PCM_RX Port Mixer"},
+	{"USB_AUDIO_RX Port Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
+	{"USB_AUDIO_RX", NULL, "USB_AUDIO_RX Port Mixer"},
+	{"USB_DL_HL", "Switch", "USBAUDIO_DL_HL"},
+	{"USB_AUDIO_RX", NULL, "USB_DL_HL"},
+	{"USBAUDIO_UL_HL", NULL, "USB_AUDIO_TX"},
+
+
+	{"AUX_PCM_RX Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"AUX_PCM_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"AUX_PCM_RX Port Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
+	{"AUX_PCM_RX Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"AUX_PCM_RX Port Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"AUX_PCM_RX", NULL, "AUX_PCM_RX Port Mixer"},
+
+	{"SEC_AUXPCM_RX Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"SEC_AUXPCM_RX Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"SEC_AUXPCM_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"SEC_AUX_PCM_RX", NULL, "SEC_AUXPCM_RX Port Mixer"},
+
+	{"TERT_AUXPCM_RX Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"TERT_AUXPCM_RX Port Mixer", "TERT_AUXPCM_UL_TX", "TERT_AUX_PCM_TX"},
+	{"TERT_AUXPCM_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"TERT_AUX_PCM_RX", NULL, "TERT_AUXPCM_RX Port Mixer"},
+
+	{"QUAT_AUXPCM_RX Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"QUAT_AUXPCM_RX Port Mixer", "QUAT_AUXPCM_UL_TX", "QUAT_AUX_PCM_TX"},
+	{"QUAT_AUXPCM_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"QUAT_AUX_PCM_RX", NULL, "QUAT_AUXPCM_RX Port Mixer"},
+
+	{"Voice Stub Tx Mixer", "STUB_TX_HL", "STUB_TX"},
+	{"Voice Stub Tx Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
+	{"Voice Stub Tx Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"Voice Stub Tx Mixer", "STUB_1_TX_HL", "STUB_1_TX"},
+	{"Voice Stub Tx Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"Voice Stub Tx Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"Voice Stub Tx Mixer", "TERT_AUXPCM_UL_TX", "TERT_AUX_PCM_TX"},
+	{"Voice Stub Tx Mixer", "QUAT_AUXPCM_UL_TX", "QUAT_AUX_PCM_TX"},
+	{"Voice Stub Tx Mixer", "MI2S_TX", "MI2S_TX"},
+	{"Voice Stub Tx Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"Voice Stub Tx Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"Voice Stub Tx Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
+	{"Voice Stub Tx Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"Voice Stub Tx Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"Voice Stub Tx Mixer", "SLIM_3_TX", "SLIMBUS_3_TX"},
+	{"Voice Stub Tx Mixer", "SLIM_7_TX", "SLIMBUS_7_TX"},
+	{"Voice Stub Tx Mixer", "SLIM_8_TX", "SLIMBUS_8_TX"},
+	{"Voice Stub Tx Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"VOICE_STUB_UL", NULL, "Voice Stub Tx Mixer"},
+
+	{"VoLTE Stub Tx Mixer", "STUB_TX_HL", "STUB_TX"},
+	{"VoLTE Stub Tx Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
+	{"VoLTE Stub Tx Mixer", "STUB_1_TX_HL", "STUB_1_TX"},
+	{"VoLTE Stub Tx Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"VoLTE Stub Tx Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"VoLTE Stub Tx Mixer", "SLIM_3_TX", "SLIMBUS_3_TX"},
+	{"VoLTE Stub Tx Mixer", "SLIM_7_TX", "SLIMBUS_7_TX"},
+	{"VoLTE Stub Tx Mixer", "SLIM_8_TX", "SLIMBUS_8_TX"},
+	{"VoLTE Stub Tx Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"VoLTE Stub Tx Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"VoLTE Stub Tx Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"VOLTE_STUB_UL", NULL, "VoLTE Stub Tx Mixer"},
+
+	{"Voice2 Stub Tx Mixer", "STUB_TX_HL", "STUB_TX"},
+	{"Voice2 Stub Tx Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
+	{"Voice2 Stub Tx Mixer", "STUB_1_TX_HL", "STUB_1_TX"},
+	{"Voice2 Stub Tx Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"Voice2 Stub Tx Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"Voice2 Stub Tx Mixer", "SLIM_3_TX", "SLIMBUS_3_TX"},
+	{"Voice2 Stub Tx Mixer", "SLIM_7_TX", "SLIMBUS_7_TX"},
+	{"Voice2 Stub Tx Mixer", "SLIM_8_TX", "SLIMBUS_8_TX"},
+	{"Voice2 Stub Tx Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"Voice2 Stub Tx Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"Voice2 Stub Tx Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"VOICE2_STUB_UL", NULL, "Voice2 Stub Tx Mixer"},
+
+	{"STUB_RX Mixer", "Voice Stub", "VOICE_STUB_DL"},
+	{"STUB_RX Mixer", "Voice2 Stub", "VOICE2_STUB_DL"},
+	{"STUB_RX Mixer", "VoLTE Stub", "VOLTE_STUB_DL"},
+	{"STUB_RX", NULL, "STUB_RX Mixer"},
+	{"SLIMBUS_1_RX Mixer", "Voice Stub", "VOICE_STUB_DL"},
+	{"SLIMBUS_1_RX Mixer", "Voice2 Stub", "VOICE2_STUB_DL"},
+	{"SLIMBUS_1_RX Mixer", "VoLTE Stub", "VOLTE_STUB_DL"},
+	{"SLIMBUS_1_RX", NULL, "SLIMBUS_1_RX Mixer"},
+	{"AFE_PCM_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+	{"AFE_PCM_RX_Voice Mixer", "Voice2 Stub", "VOICE2_STUB_DL"},
+	{"AFE_PCM_RX_Voice Mixer", "VoLTE Stub", "VOLTE_STUB_DL"},
+	{"SLIMBUS_3_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+	{"SLIMBUS_3_RX_Voice Mixer", "Voice2 Stub", "VOICE2_STUB_DL"},
+	{"SLIMBUS_3_RX_Voice Mixer", "VoLTE Stub", "VOLTE_STUB_DL"},
+	{"SLIMBUS_3_RX", NULL, "SLIMBUS_3_RX_Voice Mixer"},
+
+	{"SLIM_7_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"SLIM_7_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
+	{"SLIM_7_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+	{"SLIM_7_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
+	{"SLIM_7_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"SLIM_7_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
+	{"SLIM_7_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+	{"SLIM_7_RX_Voice Mixer", "Voice2 Stub", "VOICE2_STUB_DL"},
+	{"SLIM_7_RX_Voice Mixer", "VoLTE Stub", "VOLTE_STUB_DL"},
+	{"SLIM_7_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
+	{"SLIM_7_RX_Voice Mixer", "VoiceMMode1", "VOICEMMODE1_DL"},
+	{"SLIM_7_RX_Voice Mixer", "VoiceMMode2", "VOICEMMODE2_DL"},
+	{"SLIMBUS_7_RX", NULL, "SLIM_7_RX_Voice Mixer"},
+
+	{"SLIM_8_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"SLIM_8_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
+	{"SLIM_8_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+	{"SLIM_8_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
+	{"SLIM_8_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"SLIM_8_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
+	{"SLIM_8_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+	{"SLIM_8_RX_Voice Mixer", "Voice2 Stub", "VOICE2_STUB_DL"},
+	{"SLIM_8_RX_Voice Mixer", "VoLTE Stub", "VOLTE_STUB_DL"},
+	{"SLIM_8_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
+	{"SLIM_8_RX_Voice Mixer", "VoiceMMode1", "VOICEMMODE1_DL"},
+	{"SLIM_8_RX_Voice Mixer", "VoiceMMode2", "VOICEMMODE2_DL"},
+	{"SLIMBUS_8_RX", NULL, "SLIM_8_RX_Voice Mixer"},
+
+	{"SLIMBUS_1_RX Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"SLIMBUS_1_RX Port Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"SLIMBUS_1_RX Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"SLIMBUS_1_RX", NULL, "SLIMBUS_1_RX Port Mixer"},
+	{"INTERNAL_BT_SCO_RX Port Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
+	{"INTERNAL_BT_SCO_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"INT_BT_SCO_RX", NULL, "INTERNAL_BT_SCO_RX Port Mixer"},
+	{"SLIMBUS_3_RX Port Mixer", "INTERNAL_BT_SCO_RX", "INT_BT_SCO_RX"},
+	{"SLIMBUS_3_RX Port Mixer", "MI2S_TX", "MI2S_TX"},
+	{"SLIMBUS_3_RX Port Mixer", "AFE_PCM_RX", "PCM_RX"},
+	{"SLIMBUS_3_RX Port Mixer", "AUX_PCM_RX", "AUX_PCM_RX"},
+	{"SLIMBUS_3_RX Port Mixer", "SLIM_0_RX", "SLIMBUS_0_RX"},
+	{"SLIMBUS_3_RX", NULL, "SLIMBUS_3_RX Port Mixer"},
+
+	{"SLIMBUS_6_RX Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"SLIMBUS_6_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"SLIMBUS_6_RX Port Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
+	{"SLIMBUS_6_RX Port Mixer", "SLIM_7_TX", "SLIMBUS_7_TX"},
+	{"SLIMBUS_6_RX Port Mixer", "SLIM_8_TX", "SLIMBUS_8_TX"},
+	{"SLIMBUS_6_RX Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"SLIMBUS_6_RX Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"SLIMBUS_6_RX Port Mixer", "MI2S_TX", "MI2S_TX"},
+	{"SLIMBUS_6_RX Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"SLIMBUS_6_RX Port Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"SLIMBUS_6_RX Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"SLIMBUS_6_RX Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"SLIMBUS_6_RX", NULL, "SLIMBUS_6_RX Port Mixer"},
+
+	{"HDMI_RX Port Mixer", "MI2S_TX", "MI2S_TX"},
+	{"HDMI", NULL, "HDMI_RX Port Mixer"},
+
+	{"DISPLAY_PORT_RX Port Mixer", "MI2S_TX", "MI2S_TX"},
+	{"DISPLAY_PORT", NULL, "DISPLAY_PORT_RX Port Mixer"},
+
+	{"SEC_I2S_RX Port Mixer", "MI2S_TX", "MI2S_TX"},
+	{"SEC_I2S_RX", NULL, "SEC_I2S_RX Port Mixer"},
+
+	{"MI2S_RX Port Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
+	{"MI2S_RX Port Mixer", "MI2S_TX", "MI2S_TX"},
+	{"MI2S_RX", NULL, "MI2S_RX Port Mixer"},
+
+	{"PRI_MI2S_RX Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"PRI_MI2S_RX Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"PRI_MI2S_RX Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"PRI_MI2S_RX Port Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"PRI_MI2S_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"PRI_MI2S_RX Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"PRI_MI2S_RX Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"PRI_MI2S_RX Port Mixer", "SLIM_8_TX", "SLIMBUS_8_TX"},
+	{"PRI_MI2S_RX", NULL, "PRI_MI2S_RX Port Mixer"},
+
+	{"SEC_MI2S_RX Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"SEC_MI2S_RX Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"SEC_MI2S_RX Port Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"SEC_MI2S_RX Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"SEC_MI2S_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"SEC_MI2S_RX Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"SEC_MI2S_RX Port Mixer", "SLIM_8_TX", "SLIMBUS_8_TX"},
+	{"SEC_MI2S_RX", NULL, "SEC_MI2S_RX Port Mixer"},
+
+	{"TERT_MI2S_RX Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"TERT_MI2S_RX Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"TERT_MI2S_RX Port Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"TERT_MI2S_RX Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"TERT_MI2S_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"TERT_MI2S_RX Port Mixer", "SLIM_8_TX", "SLIMBUS_8_TX"},
+	{"TERT_MI2S_RX", NULL, "TERT_MI2S_RX Port Mixer"},
+
+	{"QUAT_MI2S_RX Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"QUAT_MI2S_RX Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"QUAT_MI2S_RX Port Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"QUAT_MI2S_RX Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"QUAT_MI2S_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"QUAT_MI2S_RX Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"QUAT_MI2S_RX Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"QUAT_MI2S_RX Port Mixer", "SLIM_8_TX", "SLIMBUS_8_TX"},
+	{"QUAT_MI2S_RX", NULL, "QUAT_MI2S_RX Port Mixer"},
+
+	/* Backend Enablement */
+
+	{"BE_OUT", NULL, "PRI_I2S_RX"},
+	{"BE_OUT", NULL, "SEC_I2S_RX"},
+	{"BE_OUT", NULL, "SLIMBUS_0_RX"},
+	{"BE_OUT", NULL, "SLIMBUS_1_RX"},
+	{"BE_OUT", NULL, "SLIMBUS_2_RX"},
+	{"BE_OUT", NULL, "SLIMBUS_3_RX"},
+	{"BE_OUT", NULL, "SLIMBUS_4_RX"},
+	{"BE_OUT", NULL, "SLIMBUS_5_RX"},
+	{"BE_OUT", NULL, "SLIMBUS_6_RX"},
+	{"BE_OUT", NULL, "SLIMBUS_7_RX"},
+	{"BE_OUT", NULL, "SLIMBUS_8_RX"},
+	{"BE_OUT", NULL, "USB_AUDIO_RX"},
+	{"BE_OUT", NULL, "HDMI"},
+	{"BE_OUT", NULL, "DISPLAY_PORT"},
+	{"BE_OUT", NULL, "SPDIF_RX"},
+	{"BE_OUT", NULL, "MI2S_RX"},
+	{"BE_OUT", NULL, "QUAT_MI2S_RX"},
+	{"BE_OUT", NULL, "QUIN_MI2S_RX"},
+	{"BE_OUT", NULL, "TERT_MI2S_RX"},
+	{"BE_OUT", NULL, "SEC_MI2S_RX"},
+	{"BE_OUT", NULL, "SEC_MI2S_RX_SD1"},
+	{"BE_OUT", NULL, "PRI_MI2S_RX"},
+	{"BE_OUT", NULL, "INT0_MI2S_RX"},
+	{"BE_OUT", NULL, "INT4_MI2S_RX"},
+	{"BE_OUT", NULL, "INT2_MI2S_RX"},
+	{"BE_OUT", NULL, "INT3_MI2S_RX"},
+	{"BE_OUT", NULL, "INT5_MI2S_RX"},
+	{"BE_OUT", NULL, "INT_BT_SCO_RX"},
+	{"BE_OUT", NULL, "INT_BT_A2DP_RX"},
+	{"BE_OUT", NULL, "INT_FM_RX"},
+	{"BE_OUT", NULL, "PCM_RX"},
+	{"BE_OUT", NULL, "SLIMBUS_3_RX"},
+	{"BE_OUT", NULL, "AUX_PCM_RX"},
+	{"BE_OUT", NULL, "SEC_AUX_PCM_RX"},
+	{"BE_OUT", NULL, "TERT_AUX_PCM_RX"},
+	{"BE_OUT", NULL, "QUAT_AUX_PCM_RX"},
+	{"BE_OUT", NULL, "INT_BT_SCO_RX"},
+	{"BE_OUT", NULL, "INT_FM_RX"},
+	{"BE_OUT", NULL, "PCM_RX"},
+	{"BE_OUT", NULL, "SLIMBUS_3_RX"},
+	{"BE_OUT", NULL, "VOICE_PLAYBACK_TX"},
+	{"BE_OUT", NULL, "VOICE2_PLAYBACK_TX"},
+	{"BE_OUT", NULL, "PRI_TDM_RX_0"},
+	{"BE_OUT", NULL, "PRI_TDM_RX_1"},
+	{"BE_OUT", NULL, "PRI_TDM_RX_2"},
+	{"BE_OUT", NULL, "PRI_TDM_RX_3"},
+	{"BE_OUT", NULL, "SEC_TDM_RX_0"},
+	{"BE_OUT", NULL, "SEC_TDM_RX_1"},
+	{"BE_OUT", NULL, "SEC_TDM_RX_2"},
+	{"BE_OUT", NULL, "SEC_TDM_RX_3"},
+	{"BE_OUT", NULL, "TERT_TDM_RX_0"},
+	{"BE_OUT", NULL, "TERT_TDM_RX_1"},
+	{"BE_OUT", NULL, "TERT_TDM_RX_2"},
+	{"BE_OUT", NULL, "TERT_TDM_RX_3"},
+	{"BE_OUT", NULL, "TERT_TDM_RX_4"},
+	{"BE_OUT", NULL, "QUAT_TDM_RX_0"},
+	{"BE_OUT", NULL, "QUAT_TDM_RX_1"},
+	{"BE_OUT", NULL, "QUAT_TDM_RX_2"},
+	{"BE_OUT", NULL, "QUAT_TDM_RX_3"},
+
+	{"PRI_I2S_TX", NULL, "BE_IN"},
+	{"MI2S_TX", NULL, "BE_IN"},
+	{"QUAT_MI2S_TX", NULL, "BE_IN"},
+	{"QUIN_MI2S_TX", NULL, "BE_IN"},
+	{"PRI_MI2S_TX", NULL, "BE_IN"},
+	{"TERT_MI2S_TX", NULL, "BE_IN"},
+	{"INT0_MI2S_TX", NULL, "BE_IN"},
+	{"INT2_MI2S_TX", NULL, "BE_IN"},
+	{"INT3_MI2S_TX", NULL, "BE_IN"},
+	{"INT4_MI2S_TX", NULL, "BE_IN"},
+	{"INT5_MI2S_TX", NULL, "BE_IN"},
+	{"SEC_MI2S_TX", NULL, "BE_IN"},
+	{"SENARY_MI2S_TX", NULL, "BE_IN" },
+	{"SLIMBUS_0_TX", NULL, "BE_IN" },
+	{"SLIMBUS_1_TX", NULL, "BE_IN" },
+	{"SLIMBUS_3_TX", NULL, "BE_IN" },
+	{"SLIMBUS_4_TX", NULL, "BE_IN" },
+	{"SLIMBUS_5_TX", NULL, "BE_IN" },
+	{"SLIMBUS_6_TX", NULL, "BE_IN" },
+	{"SLIMBUS_7_TX", NULL, "BE_IN" },
+	{"SLIMBUS_8_TX", NULL, "BE_IN" },
+	{"USB_AUDIO_TX", NULL, "BE_IN" },
+	{"INT_BT_SCO_TX", NULL, "BE_IN"},
+	{"INT_FM_TX", NULL, "BE_IN"},
+	{"PCM_TX", NULL, "BE_IN"},
+	{"BE_OUT", NULL, "SLIMBUS_3_RX"},
+	{"BE_OUT", NULL, "STUB_RX"},
+	{"STUB_TX", NULL, "BE_IN"},
+	{"STUB_1_TX", NULL, "BE_IN"},
+	{"BE_OUT", NULL, "AUX_PCM_RX"},
+	{"AUX_PCM_TX", NULL, "BE_IN"},
+	{"SEC_AUX_PCM_TX", NULL, "BE_IN"},
+	{"TERT_AUX_PCM_TX", NULL, "BE_IN"},
+	{"QUAT_AUX_PCM_TX", NULL, "BE_IN"},
+	{"INCALL_RECORD_TX", NULL, "BE_IN"},
+	{"INCALL_RECORD_RX", NULL, "BE_IN"},
+	{"SLIM0_RX_VI_FB_LCH_MUX", "SLIM4_TX", "SLIMBUS_4_TX"},
+	{"SLIM0_RX_VI_FB_RCH_MUX", "SLIM4_TX", "SLIMBUS_4_TX"},
+	{"PRI_MI2S_RX_VI_FB_MUX", "SENARY_TX", "SENARY_TX"},
+	{"INT4_MI2S_RX_VI_FB_MONO_CH_MUX", "INT5_MI2S_TX", "INT5_MI2S_TX"},
+	{"INT4_MI2S_RX_VI_FB_STEREO_CH_MUX", "INT5_MI2S_TX", "INT5_MI2S_TX"},
+	{"SLIMBUS_0_RX", NULL, "SLIM0_RX_VI_FB_LCH_MUX"},
+	{"SLIMBUS_0_RX", NULL, "SLIM0_RX_VI_FB_RCH_MUX"},
+	{"PRI_MI2S_RX", NULL, "PRI_MI2S_RX_VI_FB_MUX"},
+	{"INT4_MI2S_RX", NULL, "INT4_MI2S_RX_VI_FB_MONO_CH_MUX"},
+	{"INT4_MI2S_RX", NULL, "INT4_MI2S_RX_VI_FB_STEREO_CH_MUX"},
+	{"PRI_TDM_TX_0", NULL, "BE_IN"},
+	{"PRI_TDM_TX_1", NULL, "BE_IN"},
+	{"PRI_TDM_TX_2", NULL, "BE_IN"},
+	{"PRI_TDM_TX_3", NULL, "BE_IN"},
+	{"SEC_TDM_TX_0", NULL, "BE_IN"},
+	{"SEC_TDM_TX_1", NULL, "BE_IN"},
+	{"SEC_TDM_TX_2", NULL, "BE_IN"},
+	{"SEC_TDM_TX_3", NULL, "BE_IN"},
+	{"TERT_TDM_TX_0", NULL, "BE_IN"},
+	{"TERT_TDM_TX_1", NULL, "BE_IN"},
+	{"TERT_TDM_TX_2", NULL, "BE_IN"},
+	{"TERT_TDM_TX_3", NULL, "BE_IN"},
+	{"QUAT_TDM_TX_0", NULL, "BE_IN"},
+	{"QUAT_TDM_TX_1", NULL, "BE_IN"},
+	{"QUAT_TDM_TX_2", NULL, "BE_IN"},
+	{"QUAT_TDM_TX_3", NULL, "BE_IN"},
+};
+
+static int msm_pcm_routing_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	unsigned int be_id = rtd->dai_link->be_id;
+
+	if (be_id >= MSM_BACKEND_DAI_MAX) {
+		pr_err("%s: unexpected be_id %d\n", __func__, be_id);
+		return -EINVAL;
+	}
+
+	mutex_lock(&routing_lock);
+	msm_bedais[be_id].sample_rate = params_rate(params);
+	msm_bedais[be_id].channel = params_channels(params);
+	msm_bedais[be_id].format = params_format(params);
+	pr_debug("%s: BE Sample Rate (%d) format (%d) be_id %d\n",
+		__func__, msm_bedais[be_id].sample_rate,
+		msm_bedais[be_id].format, be_id);
+	mutex_unlock(&routing_lock);
+	return 0;
+}
+
+static int msm_pcm_routing_close(struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	unsigned int be_id = rtd->dai_link->be_id;
+	int i, session_type, path_type, topology;
+	struct msm_pcm_routing_bdai_data *bedai;
+	struct msm_pcm_routing_fdai_data *fdai;
+
+	pr_debug("%s: substream->pcm->id:%s\n",
+		 __func__, substream->pcm->id);
+
+	if (be_id >= MSM_BACKEND_DAI_MAX) {
+		pr_err("%s: unexpected be_id %d\n", __func__, be_id);
+		return -EINVAL;
+	}
+
+	bedai = &msm_bedais[be_id];
+	session_type = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+		0 : 1);
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		path_type = ADM_PATH_PLAYBACK;
+	else
+		path_type = ADM_PATH_LIVE_REC;
+
+	mutex_lock(&routing_lock);
+	for_each_set_bit(i, &bedai->fe_sessions[0], MSM_FRONTEND_DAI_MAX) {
+		if (!is_mm_lsm_fe_id(i))
+			continue;
+		fdai = &fe_dai_map[i][session_type];
+		if (fdai->strm_id != INVALID_SESSION) {
+			int idx;
+			int port_id;
+			unsigned long copp =
+				session_copp_map[i][session_type][be_id];
+			for (idx = 0; idx < MAX_COPPS_PER_PORT; idx++)
+				if (test_bit(idx, &copp))
+					break;
+			fdai->be_srate = bedai->sample_rate;
+			port_id = bedai->port_id;
+			topology = adm_get_topology_for_port_copp_idx(port_id,
+								     idx);
+			adm_close(bedai->port_id, fdai->perf_mode, idx);
+			pr_debug("%s: copp:%ld,idx bit fe:%d, type:%d,be:%d topology=0x%x\n",
+				 __func__, copp, i, session_type, be_id,
+				 topology);
+			clear_bit(idx,
+				  &session_copp_map[i][session_type][be_id]);
+			if ((fdai->perf_mode == LEGACY_PCM_MODE) &&
+				(bedai->passthr_mode[i] == LEGACY_PCM))
+				msm_pcm_routing_deinit_pp(bedai->port_id,
+							  topology);
+		}
+	}
+
+	bedai->active = 0;
+	bedai->sample_rate = 0;
+	bedai->channel = 0;
+	mutex_unlock(&routing_lock);
+
+	return 0;
+}
+
+static int msm_pcm_routing_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	unsigned int be_id = rtd->dai_link->be_id;
+	int i, path_type, session_type, topology;
+	struct msm_pcm_routing_bdai_data *bedai;
+	u32 channels, sample_rate;
+	uint16_t bits_per_sample = 16, voc_path_type;
+	struct msm_pcm_routing_fdai_data *fdai;
+	u32 session_id;
+	struct media_format_info voc_be_media_format;
+	bool is_lsm;
+
+	pr_debug("%s: substream->pcm->id:%s\n",
+		 __func__, substream->pcm->id);
+
+	if (be_id >= MSM_BACKEND_DAI_MAX) {
+		pr_err("%s: unexpected be_id %d\n", __func__, be_id);
+		return -EINVAL;
+	}
+
+	bedai = &msm_bedais[be_id];
+
+	mutex_lock(&routing_lock);
+	if (bedai->active == 1)
+		goto done; /* Ignore prepare if back-end already active */
+
+	/* AFE port is not active at this point. However, still
+	 * go ahead setting active flag under the notion that
+	 * QDSP6 is able to handle ADM starting before AFE port
+	 * is started.
+	 */
+	bedai->active = 1;
+
+	for_each_set_bit(i, &bedai->fe_sessions[0], MSM_FRONTEND_DAI_MAX) {
+		if (!(is_mm_lsm_fe_id(i) &&
+				route_check_fe_id_adm_support(i)))
+			continue;
+
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+			if (bedai->passthr_mode[i] != LEGACY_PCM)
+				path_type = ADM_PATH_COMPRESSED_RX;
+			else
+				path_type = ADM_PATH_PLAYBACK;
+			session_type = SESSION_TYPE_RX;
+		} else {
+			path_type = ADM_PATH_LIVE_REC;
+			session_type = SESSION_TYPE_TX;
+		}
+
+		is_lsm = (i >= MSM_FRONTEND_DAI_LSM1) &&
+				 (i <= MSM_FRONTEND_DAI_LSM8);
+		fdai = &fe_dai_map[i][session_type];
+		if (fdai->strm_id != INVALID_SESSION) {
+			int app_type, app_type_idx, copp_idx, acdb_dev_id;
+			if (session_type == SESSION_TYPE_TX &&
+			    fdai->be_srate &&
+			    (fdai->be_srate != bedai->sample_rate)) {
+				pr_debug("%s: flush strm %d diff BE rates\n",
+					__func__,
+					fdai->strm_id);
+
+				if (fdai->event_info.event_func)
+					fdai->event_info.event_func(
+						MSM_PCM_RT_EVT_BUF_RECFG,
+						fdai->event_info.priv_data);
+				fdai->be_srate = 0; /* might not need it */
+			}
+			bits_per_sample = msm_routing_get_bit_width(
+						bedai->format);
+
+			app_type =
+			fe_dai_app_type_cfg[i][session_type][be_id].app_type;
+			if (app_type && is_lsm) {
+				app_type_idx =
+				msm_pcm_routing_get_lsm_app_type_idx(app_type);
+				sample_rate =
+				fe_dai_app_type_cfg[i][session_type][be_id]
+					.sample_rate;
+				bits_per_sample =
+				lsm_app_type_cfg[app_type_idx].bit_width;
+			} else if (app_type) {
+				app_type_idx =
+				msm_pcm_routing_get_app_type_idx(app_type);
+				sample_rate =
+					fe_dai_app_type_cfg[i][session_type]
+							   [be_id].sample_rate;
+				bits_per_sample =
+					app_type_cfg[app_type_idx].bit_width;
+			} else
+				sample_rate = bedai->sample_rate;
+			/*
+			 * check if ADM needs to be configured with different
+			 * channel mapping than backend
+			 */
+			if (!bedai->adm_override_ch)
+				channels = bedai->channel;
+			else
+				channels = bedai->adm_override_ch;
+			acdb_dev_id =
+			fe_dai_app_type_cfg[i][session_type][be_id].acdb_dev_id;
+			topology = msm_routing_get_adm_topology(i, session_type,
+								be_id);
+			copp_idx = adm_open(bedai->port_id, path_type,
+					    sample_rate, channels, topology,
+					    fdai->perf_mode, bits_per_sample,
+					    app_type, acdb_dev_id);
+			if ((copp_idx < 0) ||
+				(copp_idx >= MAX_COPPS_PER_PORT)) {
+				pr_err("%s: adm open failed\n", __func__);
+				mutex_unlock(&routing_lock);
+				return -EINVAL;
+			}
+			pr_debug("%s: setting idx bit of fe:%d, type: %d, be:%d\n",
+				 __func__, i, session_type, be_id);
+			set_bit(copp_idx,
+				&session_copp_map[i][session_type][be_id]);
+
+			if (msm_is_resample_needed(
+				sample_rate,
+				bedai->sample_rate))
+				adm_copp_mfc_cfg(
+					bedai->port_id, copp_idx,
+					bedai->sample_rate);
+
+			msm_pcm_routing_build_matrix(i, session_type, path_type,
+						     fdai->perf_mode,
+						     bedai->passthr_mode[i]);
+			if ((fdai->perf_mode == LEGACY_PCM_MODE) &&
+				(bedai->passthr_mode[i] == LEGACY_PCM))
+				msm_pcm_routing_cfg_pp(bedai->port_id, copp_idx,
+						       topology, channels);
+		}
+	}
+
+	for_each_set_bit(i, &bedai->fe_sessions[0], MSM_FRONTEND_DAI_MAX) {
+		session_id = msm_pcm_routing_get_voc_sessionid(i);
+		if (session_id) {
+			pr_debug("%s voice session_id: 0x%x\n", __func__,
+				 session_id);
+
+			if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+				voc_path_type = RX_PATH;
+			else
+				voc_path_type = TX_PATH;
+
+			voc_set_route_flag(session_id, voc_path_type, 1);
+
+			memset(&voc_be_media_format, 0,
+			       sizeof(struct media_format_info));
+
+			voc_be_media_format.port_id = bedai->port_id;
+			voc_be_media_format.num_channels = bedai->channel;
+			voc_be_media_format.sample_rate = bedai->sample_rate;
+			voc_be_media_format.bits_per_sample = bedai->format;
+			/* Defaulting this to 1 for voice call usecases */
+			voc_be_media_format.channel_mapping[0] = 1;
+
+			voc_set_device_config(session_id, voc_path_type,
+					      &voc_be_media_format);
+
+			if (voc_get_route_flag(session_id, RX_PATH) &&
+				voc_get_route_flag(session_id, TX_PATH))
+					voc_enable_device(session_id);
+		}
+	}
+
+	/* Check if backend is an external ec ref port and set as needed */
+	if (unlikely(bedai->port_id == voc_get_ext_ec_ref_port_id())) {
+
+		memset(&voc_be_media_format, 0,
+		       sizeof(struct media_format_info));
+
+		/* Get format info for ec ref port from msm_bedais[] */
+		voc_be_media_format.port_id = bedai->port_id;
+		voc_be_media_format.num_channels = bedai->channel;
+		voc_be_media_format.bits_per_sample = bedai->format;
+		voc_be_media_format.sample_rate = bedai->sample_rate;
+		/* Defaulting this to 1 for voice call usecases */
+		voc_be_media_format.channel_mapping[0] = 1;
+		voc_set_ext_ec_ref_media_fmt_info(&voc_be_media_format);
+		pr_debug("%s: EC Ref media format info set to port_id=%d, num_channels=%d, bits_per_sample=%d, sample_rate=%d\n",
+			 __func__, voc_be_media_format.port_id,
+			 voc_be_media_format.num_channels,
+			 voc_be_media_format.bits_per_sample,
+			 voc_be_media_format.sample_rate);
+	}
+
+done:
+	mutex_unlock(&routing_lock);
+
+	return 0;
+}
+
+static int msm_routing_send_device_pp_params(int port_id, int copp_idx,
+					     int fe_id)
+{
+	int topo_id, be_idx;
+	unsigned long pp_config = 0;
+	bool mute_on;
+	int latency;
+	bool compr_passthr_mode = true;
+
+	pr_debug("%s: port_id %d, copp_idx %d\n", __func__, port_id, copp_idx);
+
+	if (port_id != HDMI_RX && port_id != DISPLAY_PORT_RX) {
+		pr_err("%s: Device pp params on invalid port %d\n",
+			__func__, port_id);
+		return  -EINVAL;
+	}
+
+	for (be_idx = 0; be_idx < MSM_BACKEND_DAI_MAX; be_idx++) {
+		if (port_id == msm_bedais[be_idx].port_id)
+			break;
+	}
+
+	if (be_idx >= MSM_BACKEND_DAI_MAX) {
+		pr_debug("%s: Invalid be id %d\n", __func__, be_idx);
+		return  -EINVAL;
+	}
+
+	topo_id = adm_get_topology_for_port_copp_idx(port_id, copp_idx);
+	if (topo_id != COMPRESSED_PASSTHROUGH_DEFAULT_TOPOLOGY) {
+		pr_err("%s: Invalid passthrough topology 0x%x\n",
+			__func__, topo_id);
+		return -EINVAL;
+	}
+
+	if ((msm_bedais[be_idx].passthr_mode[fe_id] == LEGACY_PCM) ||
+		(msm_bedais[be_idx].passthr_mode[fe_id] == LISTEN))
+		compr_passthr_mode = false;
+
+	pp_config = msm_bedais_pp_params[be_idx].pp_params_config;
+	if (test_bit(ADM_PP_PARAM_MUTE_BIT, &pp_config)) {
+		pr_debug("%s: ADM_PP_PARAM_MUTE\n", __func__);
+		clear_bit(ADM_PP_PARAM_MUTE_BIT, &pp_config);
+		mute_on = msm_bedais_pp_params[be_idx].mute_on;
+		if ((msm_bedais[be_idx].active) && compr_passthr_mode)
+			adm_send_compressed_device_mute(port_id,
+								copp_idx,
+								mute_on);
+	}
+	if (test_bit(ADM_PP_PARAM_LATENCY_BIT, &pp_config)) {
+		pr_debug("%s: ADM_PP_PARAM_LATENCY\n", __func__);
+		clear_bit(ADM_PP_PARAM_LATENCY_BIT,
+			  &pp_config);
+		latency = msm_bedais_pp_params[be_idx].latency;
+		if ((msm_bedais[be_idx].active) && compr_passthr_mode)
+			adm_send_compressed_device_latency(port_id,
+							   copp_idx,
+							   latency);
+	}
+	return 0;
+}
+
+static bool msm_pcm_routing_test_pp_param(int be_idx, long param_bit)
+{
+	return test_bit(param_bit,
+		&msm_bedais_pp_params[be_idx].pp_params_config);
+}
+
+static void msm_routing_set_pp_param(long param_bit, int value)
+{
+	int be_idx;
+
+	if (value) {
+		for (be_idx = 0; be_idx < MSM_BACKEND_DAI_MAX; be_idx++)
+			set_bit(param_bit,
+				&msm_bedais_pp_params[be_idx].
+				pp_params_config);
+	} else {
+		for (be_idx = 0; be_idx < MSM_BACKEND_DAI_MAX; be_idx++)
+			clear_bit(param_bit,
+				&msm_bedais_pp_params[be_idx].
+				pp_params_config);
+	}
+}
+
+static int msm_routing_put_device_pp_params_mixer(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	int pp_id = ucontrol->value.integer.value[0];
+	int value = ucontrol->value.integer.value[1];
+	int port_id = 0;
+	int be_idx, i, topo_id, idx;
+	bool mute;
+	int latency;
+	bool compr_passthr_mode = true;
+
+	pr_debug("%s: pp_id: 0x%x\n", __func__, pp_id);
+
+	if (pp_id == ADM_PP_PARAM_LIMITER_ID) {
+		msm_routing_set_pp_param(ADM_PP_PARAM_LIMITER_BIT, value);
+		goto done;
+	}
+
+	for (be_idx = 0; be_idx < MSM_BACKEND_DAI_MAX; be_idx++) {
+		port_id = msm_bedais[be_idx].port_id;
+		if (port_id == HDMI_RX || port_id == DISPLAY_PORT_RX)
+			break;
+	}
+
+	if (be_idx >= MSM_BACKEND_DAI_MAX) {
+		pr_debug("%s: Invalid be id %d\n", __func__, be_idx);
+		return  -EINVAL;
+	}
+
+	for_each_set_bit(i, &msm_bedais[be_idx].fe_sessions[0],
+				MSM_FRONTEND_DAI_MM_SIZE) {
+		if ((msm_bedais[be_idx].passthr_mode[i] == LEGACY_PCM) ||
+			(msm_bedais[be_idx].passthr_mode[i] == LISTEN))
+			compr_passthr_mode = false;
+
+		for (idx = 0; idx < MAX_COPPS_PER_PORT; idx++) {
+			unsigned long copp =
+				session_copp_map[i]
+				[SESSION_TYPE_RX][be_idx];
+			if (!test_bit(idx, &copp))
+				continue;
+			topo_id = adm_get_topology_for_port_copp_idx(port_id,
+								     idx);
+			if (topo_id != COMPRESSED_PASSTHROUGH_DEFAULT_TOPOLOGY)
+				continue;
+		pr_debug("%s: port: 0x%x, copp %ld, be active: %d, passt: %d\n",
+			 __func__, port_id, copp, msm_bedais[be_idx].active,
+			 msm_bedais[be_idx].passthr_mode[i]);
+		switch (pp_id) {
+		case ADM_PP_PARAM_MUTE_ID:
+			pr_debug("%s: ADM_PP_PARAM_MUTE\n", __func__);
+			mute = value ? true : false;
+			msm_bedais_pp_params[be_idx].mute_on = mute;
+			set_bit(ADM_PP_PARAM_MUTE_BIT,
+				&msm_bedais_pp_params[be_idx].pp_params_config);
+			if ((msm_bedais[be_idx].active) && compr_passthr_mode)
+				adm_send_compressed_device_mute(port_id,
+					idx, mute);
+			break;
+		case ADM_PP_PARAM_LATENCY_ID:
+			pr_debug("%s: ADM_PP_PARAM_LATENCY\n", __func__);
+			msm_bedais_pp_params[be_idx].latency = value;
+			set_bit(ADM_PP_PARAM_LATENCY_BIT,
+				&msm_bedais_pp_params[be_idx].pp_params_config);
+			latency = msm_bedais_pp_params[be_idx].latency = value;
+			if ((msm_bedais[be_idx].active) && compr_passthr_mode)
+				adm_send_compressed_device_latency(port_id,
+					idx, latency);
+			break;
+		default:
+			pr_info("%s, device pp param %d not supported\n",
+				__func__, pp_id);
+			break;
+		}
+		}
+	}
+done:
+	return 0;
+}
+
+static int msm_routing_get_device_pp_params_mixer(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	pr_debug("%s:msm_routing_get_device_pp_params_mixer", __func__);
+	return 0;
+}
+
+static const struct snd_kcontrol_new device_pp_params_mixer_controls[] = {
+	SOC_SINGLE_MULTI_EXT("Device PP Params", SND_SOC_NOPM, 0, 0xFFFFFFFF,
+	0, 3, msm_routing_get_device_pp_params_mixer,
+	msm_routing_put_device_pp_params_mixer),
+};
+
+static int msm_aptx_dec_license_control_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] =
+			core_get_license_status(ASM_MEDIA_FMT_APTX);
+	pr_debug("%s: status %ld\n", __func__,
+			ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int msm_aptx_dec_license_control_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int32_t status = 0;
+
+	status = core_set_license(ucontrol->value.integer.value[0],
+				APTX_CLASSIC_DEC_LICENSE_ID);
+	pr_debug("%s: status %d\n", __func__, status);
+	return status;
+}
+
+static const struct snd_kcontrol_new aptx_dec_license_controls[] = {
+	SOC_SINGLE_EXT("APTX Dec License", SND_SOC_NOPM, 0,
+	0xFFFF, 0, msm_aptx_dec_license_control_get,
+	msm_aptx_dec_license_control_put),
+};
+
+static int msm_routing_be_dai_name_table_info(struct snd_kcontrol *kcontrol,
+					      struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+	uinfo->count = sizeof(be_dai_name_table);
+	return 0;
+}
+
+static int msm_routing_be_dai_name_table_tlv_get(struct snd_kcontrol *kcontrol,
+						 unsigned int __user *bytes,
+						 unsigned int size)
+{
+	int i;
+	int ret;
+
+	if (size < sizeof(be_dai_name_table)) {
+		pr_err("%s: invalid size %d requested, returning\n",
+			__func__, size);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/*
+	 * Fill be_dai_name_table from msm_bedais table to reduce code changes
+	 * needed when adding new backends
+	 */
+	for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
+		be_dai_name_table[i].be_id = i;
+		strlcpy(be_dai_name_table[i].be_name,
+			msm_bedais[i].name,
+			LPASS_BE_NAME_MAX_LENGTH);
+	}
+
+	ret = copy_to_user(bytes, &be_dai_name_table,
+			   sizeof(be_dai_name_table));
+	if (ret) {
+		pr_err("%s: failed to copy be_dai_name_table\n", __func__);
+		ret = -EFAULT;
+	}
+
+done:
+	return ret;
+}
+
+static const struct snd_kcontrol_new
+	msm_routing_be_dai_name_table_mixer_controls[] = {
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |
+			  SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK,
+		.info = msm_routing_be_dai_name_table_info,
+		.name = "Backend DAI Name Table",
+		.tlv.c = snd_soc_bytes_tlv_callback,
+		.private_value = (unsigned long) &(struct soc_bytes_ext) {
+			.max = sizeof(be_dai_name_table),
+			.get = msm_routing_be_dai_name_table_tlv_get,
+		}
+	},
+};
+
+static int msm_routing_stereo_channel_reverse_control_get(
+			struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = swap_ch;
+	pr_debug("%s: Swap channel value: %ld\n", __func__,
+				ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int msm_routing_stereo_channel_reverse_control_put(
+			struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	int i, idx, be_index, port_id;
+	int ret = 0;
+	unsigned long copp;
+
+	pr_debug("%s Swap channel value:%ld\n", __func__,
+				ucontrol->value.integer.value[0]);
+
+	swap_ch = ucontrol->value.integer.value[0];
+
+	mutex_lock(&routing_lock);
+	for (be_index = 0; be_index < MSM_BACKEND_DAI_MAX; be_index++) {
+		port_id = msm_bedais[be_index].port_id;
+		if (!msm_bedais[be_index].active)
+			continue;
+
+		for_each_set_bit(i, &msm_bedais[be_index].fe_sessions[0],
+				MSM_FRONTEND_DAI_MM_SIZE) {
+			copp = session_copp_map[i][SESSION_TYPE_RX][be_index];
+			for (idx = 0; idx < MAX_COPPS_PER_PORT; idx++) {
+				if (!test_bit(idx, &copp))
+					continue;
+
+				pr_debug("%s: swap channel control of portid:%d, coppid:%d\n",
+					 __func__, port_id, idx);
+				ret = adm_swap_speaker_channels(
+					port_id, idx,
+					msm_bedais[be_index].sample_rate,
+					swap_ch);
+				if (ret) {
+					pr_err("%s:Swap_channel failed, err=%d\n",
+						 __func__, ret);
+					goto done;
+				}
+			}
+		}
+	}
+done:
+	mutex_unlock(&routing_lock);
+	return ret;
+}
+
+static const struct snd_kcontrol_new stereo_channel_reverse_control[] = {
+	SOC_SINGLE_EXT("Swap channel", SND_SOC_NOPM, 0,
+	1, 0, msm_routing_stereo_channel_reverse_control_get,
+	msm_routing_stereo_channel_reverse_control_put),
+};
+
+static struct snd_pcm_ops msm_routing_pcm_ops = {
+	.hw_params	= msm_pcm_routing_hw_params,
+	.close          = msm_pcm_routing_close,
+	.prepare        = msm_pcm_routing_prepare,
+};
+
+int msm_routing_set_downmix_control_data(int be_id, int session_id,
+			struct asm_stream_pan_ctrl_params *dnmix_param)
+{
+	int i, rc = 0;
+	struct adm_pspd_param_data_t data;
+	struct audproc_chmixer_param_coeff dnmix_cfg = {0,};
+	uint16_t variable_payload = 0;
+	char *adm_params = NULL;
+	int port_id, copp_idx = 0;
+	uint32_t params_length = 0;
+	uint16_t ii;
+	uint16_t *dst_gain_ptr = NULL;
+
+	if (be_id < MSM_BACKEND_DAI_PRI_I2S_RX ||
+	    be_id >= MSM_BACKEND_DAI_MAX) {
+		rc = -EINVAL;
+		return rc;
+	}
+	port_id = msm_bedais[be_id].port_id;
+	copp_idx = adm_get_default_copp_idx(port_id);
+	pr_debug("%s: port_id - %d, copp_idx %d session id - %d\n",
+		 __func__, port_id, copp_idx, session_id);
+
+	variable_payload = dnmix_param->num_output_channels * sizeof(uint16_t)+
+			   dnmix_param->num_input_channels * sizeof(uint16_t) +
+			   dnmix_param->num_output_channels *
+			   dnmix_param->num_input_channels * sizeof(uint16_t);
+	i = (variable_payload % sizeof(uint32_t));
+	variable_payload += (i == 0) ? 0 : sizeof(uint32_t) - i;
+
+	params_length = variable_payload +
+			sizeof(struct adm_pspd_param_data_t) +
+			sizeof(struct audproc_chmixer_param_coeff);
+	adm_params = kzalloc(params_length, GFP_KERNEL);
+	if (!adm_params) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	data.module_id  = AUDPROC_MODULE_ID_CHMIXER;
+	data.param_id   = AUDPROC_CHMIXER_PARAM_ID_COEFF;
+	data.param_size = sizeof(struct audproc_chmixer_param_coeff) +
+			  variable_payload;
+	data.reserved   = 0;
+	memcpy((u8 *)adm_params, &data, sizeof(struct adm_pspd_param_data_t));
+
+	dnmix_cfg.index               = 0;
+	dnmix_cfg.num_output_channels = dnmix_param->num_output_channels;
+	dnmix_cfg.num_input_channels  = dnmix_param->num_input_channels;
+	memcpy(((u8 *)adm_params +
+		sizeof(struct adm_pspd_param_data_t)),
+		&dnmix_cfg, sizeof(struct audproc_chmixer_param_coeff));
+
+	memcpy(((u8 *)adm_params +
+		sizeof(struct adm_pspd_param_data_t) +
+		sizeof(struct audproc_chmixer_param_coeff)),
+		dnmix_param->output_channel_map,
+		dnmix_param->num_output_channels * sizeof(uint16_t));
+	memcpy(((u8 *)adm_params +
+		sizeof(struct adm_pspd_param_data_t) +
+		sizeof(struct audproc_chmixer_param_coeff) +
+		dnmix_param->num_output_channels * sizeof(uint16_t)),
+		dnmix_param->input_channel_map,
+		dnmix_param->num_input_channels * sizeof(uint16_t));
+
+	dst_gain_ptr = (uint16_t *) ((u8 *)adm_params +
+		sizeof(struct adm_pspd_param_data_t) +
+		sizeof(struct audproc_chmixer_param_coeff) +
+		(dnmix_param->num_output_channels * sizeof(uint16_t)) +
+		(dnmix_param->num_input_channels * sizeof(uint16_t)));
+	for (ii = 0; ii < dnmix_param->num_output_channels *
+			dnmix_param->num_input_channels; ii++)
+		dst_gain_ptr[ii] = (uint16_t) dnmix_param->gain[ii];
+
+	if (params_length) {
+		rc = adm_set_pspd_matrix_params(port_id,
+						copp_idx,
+						session_id,
+						adm_params,
+						params_length);
+		if (rc) {
+			pr_err("%s: send params failed rc=%d\n", __func__, rc);
+			rc = -EINVAL;
+		}
+	}
+end:
+	kfree(adm_params);
+	return rc;
+}
+
+
+/* Not used but frame seems to require it */
+static int msm_routing_probe(struct snd_soc_platform *platform)
+{
+	snd_soc_dapm_new_controls(&platform->component.dapm, msm_qdsp6_widgets,
+			   ARRAY_SIZE(msm_qdsp6_widgets));
+	snd_soc_dapm_add_routes(&platform->component.dapm, intercon,
+		ARRAY_SIZE(intercon));
+
+	snd_soc_dapm_new_widgets(platform->component.dapm.card);
+
+	snd_soc_add_platform_controls(platform, lsm_controls,
+				      ARRAY_SIZE(lsm_controls));
+
+	snd_soc_add_platform_controls(platform, aanc_slim_0_rx_mux,
+				      ARRAY_SIZE(aanc_slim_0_rx_mux));
+
+	snd_soc_add_platform_controls(platform, msm_voc_session_controls,
+				      ARRAY_SIZE(msm_voc_session_controls));
+
+	snd_soc_add_platform_controls(platform, app_type_cfg_controls,
+				      ARRAY_SIZE(app_type_cfg_controls));
+
+	snd_soc_add_platform_controls(platform, lsm_app_type_cfg_controls,
+				      ARRAY_SIZE(lsm_app_type_cfg_controls));
+
+	snd_soc_add_platform_controls(platform,
+				stereo_to_custom_stereo_controls,
+			ARRAY_SIZE(stereo_to_custom_stereo_controls));
+
+	snd_soc_add_platform_controls(platform, ec_ref_param_controls,
+				ARRAY_SIZE(ec_ref_param_controls));
+
+	snd_soc_add_platform_controls(platform, channel_mixer_controls,
+				ARRAY_SIZE(channel_mixer_controls));
+
+	msm_qti_pp_add_controls(platform);
+
+	msm_dts_srs_tm_add_controls(platform);
+
+	msm_dolby_dap_add_controls(platform);
+
+	snd_soc_add_platform_controls(platform,
+			use_ds1_or_ds2_controls,
+			ARRAY_SIZE(use_ds1_or_ds2_controls));
+
+	snd_soc_add_platform_controls(platform,
+				device_pp_params_mixer_controls,
+				ARRAY_SIZE(device_pp_params_mixer_controls));
+
+	snd_soc_add_platform_controls(platform,
+		msm_routing_be_dai_name_table_mixer_controls,
+		ARRAY_SIZE(msm_routing_be_dai_name_table_mixer_controls));
+
+	snd_soc_add_platform_controls(platform, msm_source_tracking_controls,
+				ARRAY_SIZE(msm_source_tracking_controls));
+	snd_soc_add_platform_controls(platform, adm_channel_config_controls,
+				ARRAY_SIZE(adm_channel_config_controls));
+
+	snd_soc_add_platform_controls(platform, aptx_dec_license_controls,
+					ARRAY_SIZE(aptx_dec_license_controls));
+	snd_soc_add_platform_controls(platform, stereo_channel_reverse_control,
+				ARRAY_SIZE(stereo_channel_reverse_control));
+	return 0;
+}
+
+int msm_routing_pcm_new(struct snd_soc_pcm_runtime *runtime)
+{
+	return msm_pcm_routing_hwdep_new(runtime, msm_bedais);
+}
+
+void msm_routing_pcm_free(struct snd_pcm *pcm)
+{
+	msm_pcm_routing_hwdep_free(pcm);
+}
+
+static struct snd_soc_platform_driver msm_soc_routing_platform = {
+	.ops		= &msm_routing_pcm_ops,
+	.probe		= msm_routing_probe,
+	.pcm_new	= msm_routing_pcm_new,
+	.pcm_free	= msm_routing_pcm_free,
+};
+
+static int msm_routing_pcm_probe(struct platform_device *pdev)
+{
+
+	dev_dbg(&pdev->dev, "dev name %s\n", dev_name(&pdev->dev));
+	return snd_soc_register_platform(&pdev->dev,
+				  &msm_soc_routing_platform);
+}
+
+static int msm_routing_pcm_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_platform(&pdev->dev);
+	return 0;
+}
+
+static const struct of_device_id msm_pcm_routing_dt_match[] = {
+	{.compatible = "qcom,msm-pcm-routing"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, msm_pcm_routing_dt_match);
+
+static struct platform_driver msm_routing_pcm_driver = {
+	.driver = {
+		.name = "msm-pcm-routing",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_pcm_routing_dt_match,
+	},
+	.probe = msm_routing_pcm_probe,
+	.remove = msm_routing_pcm_remove,
+};
+
+int msm_routing_check_backend_enabled(int fedai_id)
+{
+	int i;
+	if (fedai_id > MSM_FRONTEND_DAI_MM_MAX_ID) {
+		/* bad ID assigned in machine driver */
+		pr_err("%s: bad MM ID\n", __func__);
+		return 0;
+	}
+	for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
+		if (test_bit(fedai_id, &msm_bedais[i].fe_sessions[0]))
+			return msm_bedais[i].active;
+	}
+	return 0;
+}
+
+static int get_cal_type_index(int32_t cal_type)
+{
+	int ret = -EINVAL;
+
+	switch (cal_type) {
+	case ADM_TOPOLOGY_CAL_TYPE:
+		ret = ADM_TOPOLOGY_CAL_TYPE_IDX;
+		break;
+	case ADM_LSM_TOPOLOGY_CAL_TYPE:
+		ret = ADM_LSM_TOPOLOGY_CAL_TYPE_IDX;
+		break;
+	default:
+		pr_err("%s: Invalid cal type %d\n", __func__, cal_type);
+	}
+	return ret;
+}
+
+static int msm_routing_set_cal(int32_t cal_type,
+					size_t data_size, void *data)
+{
+	int				ret = 0;
+	int				cal_index;
+	pr_debug("%s\n", __func__);
+
+	cal_index = get_cal_type_index(cal_type);
+	if (cal_index < 0) {
+		pr_err("%s: Could not get cal index %d\n",
+			__func__, cal_index);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = cal_utils_set_cal(data_size, data, cal_data[cal_index], 0, NULL);
+	if (ret < 0) {
+		pr_err("%s: cal_utils_set_cal failed, ret = %d, cal type = %d!\n",
+			__func__, ret, cal_type);
+		ret = -EINVAL;
+		goto done;
+	}
+done:
+	return ret;
+}
+
+static void msm_routing_delete_cal_data(void)
+{
+	pr_debug("%s\n", __func__);
+
+	cal_utils_destroy_cal_types(MAX_ROUTING_CAL_TYPES, &cal_data[0]);
+
+	return;
+}
+
+static int msm_routing_init_cal_data(void)
+{
+	int				ret = 0;
+	struct cal_type_info		cal_type_info[] = {
+		{{ADM_TOPOLOGY_CAL_TYPE,
+		{NULL, NULL, NULL,
+		msm_routing_set_cal, NULL, NULL} },
+		{NULL, NULL, cal_utils_match_buf_num} },
+
+		{{ADM_LSM_TOPOLOGY_CAL_TYPE,
+		{NULL, NULL, NULL,
+		msm_routing_set_cal, NULL, NULL} },
+		{NULL, NULL, cal_utils_match_buf_num} },
+	};
+	pr_debug("%s\n", __func__);
+
+	ret = cal_utils_create_cal_types(MAX_ROUTING_CAL_TYPES, &cal_data[0],
+		&cal_type_info[0]);
+	if (ret < 0) {
+		pr_err("%s: could not create cal type!\n",
+			__func__);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	return ret;
+err:
+	msm_routing_delete_cal_data();
+	return ret;
+}
+
+static int __init msm_soc_routing_platform_init(void)
+{
+	mutex_init(&routing_lock);
+	if (msm_routing_init_cal_data())
+		pr_err("%s: could not init cal data!\n", __func__);
+
+	afe_set_routing_callback(
+		(routing_cb)msm_pcm_get_dev_acdb_id_by_port_id);
+
+	memset(&be_dai_name_table, 0, sizeof(be_dai_name_table));
+	memset(&last_be_id_configured, 0, sizeof(last_be_id_configured));
+
+	return platform_driver_register(&msm_routing_pcm_driver);
+}
+module_init(msm_soc_routing_platform_init);
+
+static void __exit msm_soc_routing_platform_exit(void)
+{
+	msm_routing_delete_cal_data();
+	memset(&be_dai_name_table, 0, sizeof(be_dai_name_table));
+	mutex_destroy(&routing_lock);
+	platform_driver_unregister(&msm_routing_pcm_driver);
+}
+module_exit(msm_soc_routing_platform_exit);
+
+MODULE_DESCRIPTION("MSM routing platform driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-pcm-routing-v2.h linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-pcm-routing-v2.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h	2019-10-29 09:26:26.157227780 +0100
@@ -0,0 +1,509 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MSM_PCM_ROUTING_H
+#define _MSM_PCM_ROUTING_H
+#include <sound/apr_audio-v2.h>
+
+/*
+ * These names are used by HAL to specify the BE. If any changes are
+ * made to the string names or the max name length corresponding
+ * changes need to be made in the HAL to ensure they still match.
+ */
+#define LPASS_BE_NAME_MAX_LENGTH 24
+#define LPASS_BE_PRI_I2S_RX "PRIMARY_I2S_RX"
+#define LPASS_BE_PRI_I2S_TX "PRIMARY_I2S_TX"
+#define LPASS_BE_SLIMBUS_0_RX "SLIMBUS_0_RX"
+#define LPASS_BE_SLIMBUS_0_TX "SLIMBUS_0_TX"
+#define LPASS_BE_HDMI "HDMI"
+#define LPASS_BE_DISPLAY_PORT "DISPLAY_PORT"
+#define LPASS_BE_INT_BT_SCO_RX "INT_BT_SCO_RX"
+#define LPASS_BE_INT_BT_SCO_TX "INT_BT_SCO_TX"
+#define LPASS_BE_INT_BT_A2DP_RX "INT_BT_A2DP_RX"
+#define LPASS_BE_INT_FM_RX "INT_FM_RX"
+#define LPASS_BE_INT_FM_TX "INT_FM_TX"
+#define LPASS_BE_AFE_PCM_RX "RT_PROXY_DAI_001_RX"
+#define LPASS_BE_AFE_PCM_TX "RT_PROXY_DAI_002_TX"
+#define LPASS_BE_AUXPCM_RX "AUX_PCM_RX"
+#define LPASS_BE_AUXPCM_TX "AUX_PCM_TX"
+#define LPASS_BE_SEC_AUXPCM_RX "SEC_AUX_PCM_RX"
+#define LPASS_BE_SEC_AUXPCM_TX "SEC_AUX_PCM_TX"
+#define LPASS_BE_TERT_AUXPCM_RX "TERT_AUX_PCM_RX"
+#define LPASS_BE_TERT_AUXPCM_TX "TERT_AUX_PCM_TX"
+#define LPASS_BE_QUAT_AUXPCM_RX "QUAT_AUX_PCM_RX"
+#define LPASS_BE_QUAT_AUXPCM_TX "QUAT_AUX_PCM_TX"
+#define LPASS_BE_VOICE_PLAYBACK_TX "VOICE_PLAYBACK_TX"
+#define LPASS_BE_VOICE2_PLAYBACK_TX "VOICE2_PLAYBACK_TX"
+#define LPASS_BE_INCALL_RECORD_RX "INCALL_RECORD_RX"
+#define LPASS_BE_INCALL_RECORD_TX "INCALL_RECORD_TX"
+#define LPASS_BE_SEC_I2S_RX "SECONDARY_I2S_RX"
+#define LPASS_BE_SPDIF_RX "SPDIF_RX"
+
+#define LPASS_BE_MI2S_RX "MI2S_RX"
+#define LPASS_BE_MI2S_TX "MI2S_TX"
+#define LPASS_BE_QUAT_MI2S_RX "QUAT_MI2S_RX"
+#define LPASS_BE_QUAT_MI2S_TX "QUAT_MI2S_TX"
+#define LPASS_BE_SEC_MI2S_RX "SEC_MI2S_RX"
+#define LPASS_BE_SEC_MI2S_RX_SD1 "SEC_MI2S_RX_SD1"
+#define LPASS_BE_SEC_MI2S_TX "SEC_MI2S_TX"
+#define LPASS_BE_PRI_MI2S_RX "PRI_MI2S_RX"
+#define LPASS_BE_PRI_MI2S_TX "PRI_MI2S_TX"
+#define LPASS_BE_TERT_MI2S_RX "TERT_MI2S_RX"
+#define LPASS_BE_TERT_MI2S_TX "TERT_MI2S_TX"
+#define LPASS_BE_AUDIO_I2S_RX "AUDIO_I2S_RX"
+#define LPASS_BE_STUB_RX "STUB_RX"
+#define LPASS_BE_STUB_TX "STUB_TX"
+#define LPASS_BE_SLIMBUS_1_RX "SLIMBUS_1_RX"
+#define LPASS_BE_SLIMBUS_1_TX "SLIMBUS_1_TX"
+#define LPASS_BE_STUB_1_TX "STUB_1_TX"
+#define LPASS_BE_SLIMBUS_2_RX "SLIMBUS_2_RX"
+#define LPASS_BE_SLIMBUS_2_TX "SLIMBUS_2_TX"
+#define LPASS_BE_SLIMBUS_3_RX "SLIMBUS_3_RX"
+#define LPASS_BE_SLIMBUS_3_TX "SLIMBUS_3_TX"
+#define LPASS_BE_SLIMBUS_4_RX "SLIMBUS_4_RX"
+#define LPASS_BE_SLIMBUS_4_TX "SLIMBUS_4_TX"
+#define LPASS_BE_SLIMBUS_TX_VI "SLIMBUS_TX_VI"
+#define LPASS_BE_SLIMBUS_5_RX "SLIMBUS_5_RX"
+#define LPASS_BE_SLIMBUS_5_TX "SLIMBUS_5_TX"
+#define LPASS_BE_SLIMBUS_6_RX "SLIMBUS_6_RX"
+#define LPASS_BE_SLIMBUS_6_TX "SLIMBUS_6_TX"
+#define LPASS_BE_QUIN_MI2S_RX "QUIN_MI2S_RX"
+#define LPASS_BE_QUIN_MI2S_TX "QUIN_MI2S_TX"
+#define LPASS_BE_SENARY_MI2S_TX "SENARY_MI2S_TX"
+
+#define LPASS_BE_PRI_TDM_RX_0 "PRI_TDM_RX_0"
+#define LPASS_BE_PRI_TDM_TX_0 "PRI_TDM_TX_0"
+#define LPASS_BE_PRI_TDM_RX_1 "PRI_TDM_RX_1"
+#define LPASS_BE_PRI_TDM_TX_1 "PRI_TDM_TX_1"
+#define LPASS_BE_PRI_TDM_RX_2 "PRI_TDM_RX_2"
+#define LPASS_BE_PRI_TDM_TX_2 "PRI_TDM_TX_2"
+#define LPASS_BE_PRI_TDM_RX_3 "PRI_TDM_RX_3"
+#define LPASS_BE_PRI_TDM_TX_3 "PRI_TDM_TX_3"
+#define LPASS_BE_PRI_TDM_RX_4 "PRI_TDM_RX_4"
+#define LPASS_BE_PRI_TDM_TX_4 "PRI_TDM_TX_4"
+#define LPASS_BE_PRI_TDM_RX_5 "PRI_TDM_RX_5"
+#define LPASS_BE_PRI_TDM_TX_5 "PRI_TDM_TX_5"
+#define LPASS_BE_PRI_TDM_RX_6 "PRI_TDM_RX_6"
+#define LPASS_BE_PRI_TDM_TX_6 "PRI_TDM_TX_6"
+#define LPASS_BE_PRI_TDM_RX_7 "PRI_TDM_RX_7"
+#define LPASS_BE_PRI_TDM_TX_7 "PRI_TDM_TX_7"
+#define LPASS_BE_SEC_TDM_RX_0 "SEC_TDM_RX_0"
+#define LPASS_BE_SEC_TDM_TX_0 "SEC_TDM_TX_0"
+#define LPASS_BE_SEC_TDM_RX_1 "SEC_TDM_RX_1"
+#define LPASS_BE_SEC_TDM_TX_1 "SEC_TDM_TX_1"
+#define LPASS_BE_SEC_TDM_RX_2 "SEC_TDM_RX_2"
+#define LPASS_BE_SEC_TDM_TX_2 "SEC_TDM_TX_2"
+#define LPASS_BE_SEC_TDM_RX_3 "SEC_TDM_RX_3"
+#define LPASS_BE_SEC_TDM_TX_3 "SEC_TDM_TX_3"
+#define LPASS_BE_SEC_TDM_RX_4 "SEC_TDM_RX_4"
+#define LPASS_BE_SEC_TDM_TX_4 "SEC_TDM_TX_4"
+#define LPASS_BE_SEC_TDM_RX_5 "SEC_TDM_RX_5"
+#define LPASS_BE_SEC_TDM_TX_5 "SEC_TDM_TX_5"
+#define LPASS_BE_SEC_TDM_RX_6 "SEC_TDM_RX_6"
+#define LPASS_BE_SEC_TDM_TX_6 "SEC_TDM_TX_6"
+#define LPASS_BE_SEC_TDM_RX_7 "SEC_TDM_RX_7"
+#define LPASS_BE_SEC_TDM_TX_7 "SEC_TDM_TX_7"
+#define LPASS_BE_TERT_TDM_RX_0 "TERT_TDM_RX_0"
+#define LPASS_BE_TERT_TDM_TX_0 "TERT_TDM_TX_0"
+#define LPASS_BE_TERT_TDM_RX_1 "TERT_TDM_RX_1"
+#define LPASS_BE_TERT_TDM_TX_1 "TERT_TDM_TX_1"
+#define LPASS_BE_TERT_TDM_RX_2 "TERT_TDM_RX_2"
+#define LPASS_BE_TERT_TDM_TX_2 "TERT_TDM_TX_2"
+#define LPASS_BE_TERT_TDM_RX_3 "TERT_TDM_RX_3"
+#define LPASS_BE_TERT_TDM_TX_3 "TERT_TDM_TX_3"
+#define LPASS_BE_TERT_TDM_RX_4 "TERT_TDM_RX_4"
+#define LPASS_BE_TERT_TDM_TX_4 "TERT_TDM_TX_4"
+#define LPASS_BE_TERT_TDM_RX_5 "TERT_TDM_RX_5"
+#define LPASS_BE_TERT_TDM_TX_5 "TERT_TDM_TX_5"
+#define LPASS_BE_TERT_TDM_RX_6 "TERT_TDM_RX_6"
+#define LPASS_BE_TERT_TDM_TX_6 "TERT_TDM_TX_6"
+#define LPASS_BE_TERT_TDM_RX_7 "TERT_TDM_RX_7"
+#define LPASS_BE_TERT_TDM_TX_7 "TERT_TDM_TX_7"
+#define LPASS_BE_QUAT_TDM_RX_0 "QUAT_TDM_RX_0"
+#define LPASS_BE_QUAT_TDM_TX_0 "QUAT_TDM_TX_0"
+#define LPASS_BE_QUAT_TDM_RX_1 "QUAT_TDM_RX_1"
+#define LPASS_BE_QUAT_TDM_TX_1 "QUAT_TDM_TX_1"
+#define LPASS_BE_QUAT_TDM_RX_2 "QUAT_TDM_RX_2"
+#define LPASS_BE_QUAT_TDM_TX_2 "QUAT_TDM_TX_2"
+#define LPASS_BE_QUAT_TDM_RX_3 "QUAT_TDM_RX_3"
+#define LPASS_BE_QUAT_TDM_TX_3 "QUAT_TDM_TX_3"
+#define LPASS_BE_QUAT_TDM_RX_4 "QUAT_TDM_RX_4"
+#define LPASS_BE_QUAT_TDM_TX_4 "QUAT_TDM_TX_4"
+#define LPASS_BE_QUAT_TDM_RX_5 "QUAT_TDM_RX_5"
+#define LPASS_BE_QUAT_TDM_TX_5 "QUAT_TDM_TX_5"
+#define LPASS_BE_QUAT_TDM_RX_6 "QUAT_TDM_RX_6"
+#define LPASS_BE_QUAT_TDM_TX_6 "QUAT_TDM_TX_6"
+#define LPASS_BE_QUAT_TDM_RX_7 "QUAT_TDM_RX_7"
+#define LPASS_BE_QUAT_TDM_TX_7 "QUAT_TDM_TX_7"
+
+#define LPASS_BE_SLIMBUS_7_RX "SLIMBUS_7_RX"
+#define LPASS_BE_SLIMBUS_7_TX "SLIMBUS_7_TX"
+#define LPASS_BE_SLIMBUS_8_RX "SLIMBUS_8_RX"
+#define LPASS_BE_SLIMBUS_8_TX "SLIMBUS_8_TX"
+
+#define LPASS_BE_USB_AUDIO_RX "USB_AUDIO_RX"
+#define LPASS_BE_USB_AUDIO_TX "USB_AUDIO_TX"
+
+#define LPASS_BE_INT0_MI2S_RX "INT0_MI2S_RX"
+#define LPASS_BE_INT0_MI2S_TX "INT0_MI2S_TX"
+#define LPASS_BE_INT1_MI2S_RX "INT1_MI2S_RX"
+#define LPASS_BE_INT1_MI2S_TX "INT1_MI2S_TX"
+#define LPASS_BE_INT2_MI2S_RX "INT2_MI2S_RX"
+#define LPASS_BE_INT2_MI2S_TX "INT2_MI2S_TX"
+#define LPASS_BE_INT3_MI2S_RX "INT3_MI2S_RX"
+#define LPASS_BE_INT3_MI2S_TX "INT3_MI2S_TX"
+#define LPASS_BE_INT4_MI2S_RX "INT4_MI2S_RX"
+#define LPASS_BE_INT4_MI2S_TX "INT4_MI2S_TX"
+#define LPASS_BE_INT5_MI2S_RX "INT5_MI2S_RX"
+#define LPASS_BE_INT5_MI2S_TX "INT5_MI2S_TX"
+#define LPASS_BE_INT6_MI2S_RX "INT6_MI2S_RX"
+#define LPASS_BE_INT6_MI2S_TX "INT6_MI2S_TX"
+/* For multimedia front-ends, asm session is allocated dynamically.
+ * Hence, asm session/multimedia front-end mapping has to be maintained.
+ * Due to this reason, additional multimedia front-end must be placed before
+ * non-multimedia front-ends.
+ */
+
+enum {
+	MSM_FRONTEND_DAI_MULTIMEDIA1 = 0,
+	MSM_FRONTEND_DAI_MULTIMEDIA2,
+	MSM_FRONTEND_DAI_MULTIMEDIA3,
+	MSM_FRONTEND_DAI_MULTIMEDIA4,
+	MSM_FRONTEND_DAI_MULTIMEDIA5,
+	MSM_FRONTEND_DAI_MULTIMEDIA6,
+	MSM_FRONTEND_DAI_MULTIMEDIA7,
+	MSM_FRONTEND_DAI_MULTIMEDIA8,
+	MSM_FRONTEND_DAI_MULTIMEDIA9,
+	MSM_FRONTEND_DAI_MULTIMEDIA10,
+	MSM_FRONTEND_DAI_MULTIMEDIA11,
+	MSM_FRONTEND_DAI_MULTIMEDIA12,
+	MSM_FRONTEND_DAI_MULTIMEDIA13,
+	MSM_FRONTEND_DAI_MULTIMEDIA14,
+	MSM_FRONTEND_DAI_MULTIMEDIA15,
+	MSM_FRONTEND_DAI_MULTIMEDIA16,
+	MSM_FRONTEND_DAI_MULTIMEDIA17,
+	MSM_FRONTEND_DAI_MULTIMEDIA18,
+	MSM_FRONTEND_DAI_MULTIMEDIA19,
+	MSM_FRONTEND_DAI_MULTIMEDIA20,
+	MSM_FRONTEND_DAI_MULTIMEDIA21,
+	MSM_FRONTEND_DAI_MULTIMEDIA22,
+	MSM_FRONTEND_DAI_MULTIMEDIA23,
+	MSM_FRONTEND_DAI_MULTIMEDIA24,
+	MSM_FRONTEND_DAI_MULTIMEDIA25,
+	MSM_FRONTEND_DAI_MULTIMEDIA26,
+	MSM_FRONTEND_DAI_MULTIMEDIA27,
+	MSM_FRONTEND_DAI_CS_VOICE,
+	MSM_FRONTEND_DAI_VOIP,
+	MSM_FRONTEND_DAI_AFE_RX,
+	MSM_FRONTEND_DAI_AFE_TX,
+	MSM_FRONTEND_DAI_VOICE_STUB,
+	MSM_FRONTEND_DAI_VOLTE,
+	MSM_FRONTEND_DAI_DTMF_RX,
+	MSM_FRONTEND_DAI_VOICE2,
+	MSM_FRONTEND_DAI_QCHAT,
+	MSM_FRONTEND_DAI_VOLTE_STUB,
+	MSM_FRONTEND_DAI_LSM1,
+	MSM_FRONTEND_DAI_LSM2,
+	MSM_FRONTEND_DAI_LSM3,
+	MSM_FRONTEND_DAI_LSM4,
+	MSM_FRONTEND_DAI_LSM5,
+	MSM_FRONTEND_DAI_LSM6,
+	MSM_FRONTEND_DAI_LSM7,
+	MSM_FRONTEND_DAI_LSM8,
+	MSM_FRONTEND_DAI_VOICE2_STUB,
+	MSM_FRONTEND_DAI_VOWLAN,
+	MSM_FRONTEND_DAI_VOICEMMODE1,
+	MSM_FRONTEND_DAI_VOICEMMODE2,
+	MSM_FRONTEND_DAI_MAX,
+};
+
+#define MSM_FRONTEND_DAI_MM_SIZE (MSM_FRONTEND_DAI_MULTIMEDIA27 + 1)
+#define MSM_FRONTEND_DAI_MM_MAX_ID MSM_FRONTEND_DAI_MULTIMEDIA27
+
+enum {
+	MSM_BACKEND_DAI_PRI_I2S_RX = 0,
+	MSM_BACKEND_DAI_PRI_I2S_TX,
+	MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_BACKEND_DAI_SLIMBUS_0_TX,
+	MSM_BACKEND_DAI_HDMI_RX,
+	MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_BACKEND_DAI_INT_BT_SCO_TX,
+	MSM_BACKEND_DAI_INT_FM_RX,
+	MSM_BACKEND_DAI_INT_FM_TX,
+	MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_BACKEND_DAI_AFE_PCM_TX,
+	MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_BACKEND_DAI_AUXPCM_TX,
+	MSM_BACKEND_DAI_VOICE_PLAYBACK_TX,
+	MSM_BACKEND_DAI_VOICE2_PLAYBACK_TX,
+	MSM_BACKEND_DAI_INCALL_RECORD_RX,
+	MSM_BACKEND_DAI_INCALL_RECORD_TX,
+	MSM_BACKEND_DAI_MI2S_RX,
+	MSM_BACKEND_DAI_MI2S_TX,
+	MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_BACKEND_DAI_SLIMBUS_1_RX,
+	MSM_BACKEND_DAI_SLIMBUS_1_TX,
+	MSM_BACKEND_DAI_SLIMBUS_2_RX,
+	MSM_BACKEND_DAI_SLIMBUS_2_TX,
+	MSM_BACKEND_DAI_SLIMBUS_3_RX,
+	MSM_BACKEND_DAI_SLIMBUS_3_TX,
+	MSM_BACKEND_DAI_SLIMBUS_4_RX,
+	MSM_BACKEND_DAI_SLIMBUS_4_TX,
+	MSM_BACKEND_DAI_SLIMBUS_5_RX,
+	MSM_BACKEND_DAI_SLIMBUS_5_TX,
+	MSM_BACKEND_DAI_SLIMBUS_6_RX,
+	MSM_BACKEND_DAI_SLIMBUS_6_TX,
+	MSM_BACKEND_DAI_SLIMBUS_7_RX,
+	MSM_BACKEND_DAI_SLIMBUS_7_TX,
+	MSM_BACKEND_DAI_SLIMBUS_8_RX,
+	MSM_BACKEND_DAI_SLIMBUS_8_TX,
+	MSM_BACKEND_DAI_EXTPROC_RX,
+	MSM_BACKEND_DAI_EXTPROC_TX,
+	MSM_BACKEND_DAI_EXTPROC_EC_TX,
+	MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+	MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_BACKEND_DAI_SECONDARY_MI2S_TX,
+	MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_BACKEND_DAI_PRI_MI2S_TX,
+	MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+	MSM_BACKEND_DAI_AUDIO_I2S_RX,
+	MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+	MSM_BACKEND_DAI_SPDIF_RX,
+	MSM_BACKEND_DAI_SECONDARY_MI2S_RX_SD1,
+	MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+	MSM_BACKEND_DAI_QUINARY_MI2S_TX,
+	MSM_BACKEND_DAI_SENARY_MI2S_TX,
+	MSM_BACKEND_DAI_PRI_TDM_RX_0,
+	MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_BACKEND_DAI_PRI_TDM_TX_1,
+	MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_BACKEND_DAI_PRI_TDM_TX_2,
+	MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_BACKEND_DAI_PRI_TDM_TX_3,
+	MSM_BACKEND_DAI_PRI_TDM_RX_4,
+	MSM_BACKEND_DAI_PRI_TDM_TX_4,
+	MSM_BACKEND_DAI_PRI_TDM_RX_5,
+	MSM_BACKEND_DAI_PRI_TDM_TX_5,
+	MSM_BACKEND_DAI_PRI_TDM_RX_6,
+	MSM_BACKEND_DAI_PRI_TDM_TX_6,
+	MSM_BACKEND_DAI_PRI_TDM_RX_7,
+	MSM_BACKEND_DAI_PRI_TDM_TX_7,
+	MSM_BACKEND_DAI_SEC_TDM_RX_0,
+	MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_BACKEND_DAI_SEC_TDM_TX_1,
+	MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_BACKEND_DAI_SEC_TDM_TX_2,
+	MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_BACKEND_DAI_SEC_TDM_TX_3,
+	MSM_BACKEND_DAI_SEC_TDM_RX_4,
+	MSM_BACKEND_DAI_SEC_TDM_TX_4,
+	MSM_BACKEND_DAI_SEC_TDM_RX_5,
+	MSM_BACKEND_DAI_SEC_TDM_TX_5,
+	MSM_BACKEND_DAI_SEC_TDM_RX_6,
+	MSM_BACKEND_DAI_SEC_TDM_TX_6,
+	MSM_BACKEND_DAI_SEC_TDM_RX_7,
+	MSM_BACKEND_DAI_SEC_TDM_TX_7,
+	MSM_BACKEND_DAI_TERT_TDM_RX_0,
+	MSM_BACKEND_DAI_TERT_TDM_TX_0,
+	MSM_BACKEND_DAI_TERT_TDM_RX_1,
+	MSM_BACKEND_DAI_TERT_TDM_TX_1,
+	MSM_BACKEND_DAI_TERT_TDM_RX_2,
+	MSM_BACKEND_DAI_TERT_TDM_TX_2,
+	MSM_BACKEND_DAI_TERT_TDM_RX_3,
+	MSM_BACKEND_DAI_TERT_TDM_TX_3,
+	MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_BACKEND_DAI_TERT_TDM_TX_4,
+	MSM_BACKEND_DAI_TERT_TDM_RX_5,
+	MSM_BACKEND_DAI_TERT_TDM_TX_5,
+	MSM_BACKEND_DAI_TERT_TDM_RX_6,
+	MSM_BACKEND_DAI_TERT_TDM_TX_6,
+	MSM_BACKEND_DAI_TERT_TDM_RX_7,
+	MSM_BACKEND_DAI_TERT_TDM_TX_7,
+	MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+	MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+	MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+	MSM_BACKEND_DAI_QUAT_TDM_TX_1,
+	MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+	MSM_BACKEND_DAI_QUAT_TDM_TX_2,
+	MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+	MSM_BACKEND_DAI_QUAT_TDM_TX_3,
+	MSM_BACKEND_DAI_QUAT_TDM_RX_4,
+	MSM_BACKEND_DAI_QUAT_TDM_TX_4,
+	MSM_BACKEND_DAI_QUAT_TDM_RX_5,
+	MSM_BACKEND_DAI_QUAT_TDM_TX_5,
+	MSM_BACKEND_DAI_QUAT_TDM_RX_6,
+	MSM_BACKEND_DAI_QUAT_TDM_TX_6,
+	MSM_BACKEND_DAI_QUAT_TDM_RX_7,
+	MSM_BACKEND_DAI_QUAT_TDM_TX_7,
+	MSM_BACKEND_DAI_INT_BT_A2DP_RX,
+	MSM_BACKEND_DAI_USB_RX,
+	MSM_BACKEND_DAI_USB_TX,
+	MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+	MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+	MSM_BACKEND_DAI_TERT_AUXPCM_TX,
+	MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+	MSM_BACKEND_DAI_QUAT_AUXPCM_TX,
+	MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_BACKEND_DAI_INT0_MI2S_TX,
+	MSM_BACKEND_DAI_INT1_MI2S_RX,
+	MSM_BACKEND_DAI_INT1_MI2S_TX,
+	MSM_BACKEND_DAI_INT2_MI2S_RX,
+	MSM_BACKEND_DAI_INT2_MI2S_TX,
+	MSM_BACKEND_DAI_INT3_MI2S_RX,
+	MSM_BACKEND_DAI_INT3_MI2S_TX,
+	MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_BACKEND_DAI_INT4_MI2S_TX,
+	MSM_BACKEND_DAI_INT5_MI2S_RX,
+	MSM_BACKEND_DAI_INT5_MI2S_TX,
+	MSM_BACKEND_DAI_INT6_MI2S_RX,
+	MSM_BACKEND_DAI_INT6_MI2S_TX,
+	MSM_BACKEND_DAI_MAX,
+};
+
+enum msm_pcm_routing_event {
+	MSM_PCM_RT_EVT_BUF_RECFG,
+	MSM_PCM_RT_EVT_DEVSWITCH,
+	MSM_PCM_RT_EVT_MAX,
+};
+
+enum {
+	EXT_EC_REF_NONE = 0,
+	EXT_EC_REF_PRI_MI2S_TX,
+	EXT_EC_REF_SEC_MI2S_TX,
+	EXT_EC_REF_TERT_MI2S_TX,
+	EXT_EC_REF_QUAT_MI2S_TX,
+	EXT_EC_REF_QUIN_MI2S_TX,
+	EXT_EC_REF_SLIM_1_TX,
+};
+
+#define INVALID_SESSION -1
+#define SESSION_TYPE_RX 0
+#define SESSION_TYPE_TX 1
+#define MAX_SESSION_TYPES 2
+#define INT_RX_VOL_MAX_STEPS 0x2000
+#define INT_RX_VOL_GAIN 0x2000
+
+#define RELEASE_LOCK	0
+#define ACQUIRE_LOCK	1
+
+#define HDMI_RX_ID				0x8001
+
+enum {
+	ADM_PP_PARAM_MUTE_ID,
+	ADM_PP_PARAM_LATENCY_ID,
+	ADM_PP_PARAM_LIMITER_ID
+};
+
+enum {
+	ADM_PP_PARAM_MUTE_BIT		= 0x1,
+	ADM_PP_PARAM_LATENCY_BIT	= 0x2,
+	ADM_PP_PARAM_LIMITER_BIT	= 0x4
+};
+
+#define BE_DAI_PORT_SESSIONS_IDX_MAX		4
+#define BE_DAI_FE_SESSIONS_IDX_MAX		2
+
+enum {
+	ADM_TOPOLOGY_CAL_TYPE_IDX = 0,
+	ADM_LSM_TOPOLOGY_CAL_TYPE_IDX,
+	MAX_ROUTING_CAL_TYPES
+};
+
+struct msm_pcm_routing_evt {
+	void (*event_func)(enum msm_pcm_routing_event, void *);
+	void *priv_data;
+};
+
+struct msm_pcm_routing_bdai_data {
+	u16 port_id; /* AFE port ID */
+	u8 active; /* track if this backend is enabled */
+
+	/* Front-end sessions */
+	unsigned long fe_sessions[BE_DAI_FE_SESSIONS_IDX_MAX];
+	/*
+	 * Track Tx BE ports -> Rx BE ports.
+	 * port_sessions[0] used to track BE 0 to BE 63.
+	 * port_sessions[1] used to track BE 64 to BE 127.
+	 * port_sessions[2] used to track BE 128 to BE 191.
+	 * port_sessions[3] used to track BE 192 to BE 255.
+	 */
+	u64 port_sessions[BE_DAI_PORT_SESSIONS_IDX_MAX];
+
+	unsigned int  sample_rate;
+	unsigned int  channel;
+	unsigned int  format;
+	unsigned int  adm_override_ch;
+	u32 passthr_mode[MSM_FRONTEND_DAI_MAX];
+	char *name;
+};
+
+struct msm_pcm_routing_fdai_data {
+	u16 be_srate; /* track prior backend sample rate for flushing purpose */
+	int strm_id; /* ASM stream ID */
+	int perf_mode;
+	struct msm_pcm_routing_evt event_info;
+};
+
+#define MAX_APP_TYPES	16
+struct msm_pcm_routing_app_type_data {
+	int app_type;
+	u32 sample_rate;
+	int bit_width;
+};
+
+struct msm_pcm_stream_app_type_cfg {
+	int app_type;
+	int acdb_dev_id;
+	int sample_rate;
+};
+
+/* dai_id: front-end ID,
+ * dspst_id:  DSP audio stream ID
+ * stream_type: playback or capture
+ */
+int msm_pcm_routing_reg_phy_stream(int fedai_id, int perf_mode, int dspst_id,
+				   int stream_type);
+void msm_pcm_routing_reg_psthr_stream(int fedai_id, int dspst_id,
+		int stream_type);
+int msm_pcm_routing_reg_phy_compr_stream(int fedai_id, int perf_mode,
+					  int dspst_id, int stream_type,
+					  uint32_t compr_passthr);
+
+int msm_pcm_routing_reg_phy_stream_v2(int fedai_id, int perf_mode,
+				      int dspst_id, int stream_type,
+				      struct msm_pcm_routing_evt event_info);
+
+void msm_pcm_routing_dereg_phy_stream(int fedai_id, int stream_type);
+
+int msm_routing_check_backend_enabled(int fedai_id);
+
+
+void msm_pcm_routing_get_bedai_info(int be_idx,
+				    struct msm_pcm_routing_bdai_data *bedai);
+void msm_pcm_routing_get_fedai_info(int fe_idx, int sess_type,
+				    struct msm_pcm_routing_fdai_data *fe_dai);
+void msm_pcm_routing_acquire_lock(void);
+void msm_pcm_routing_release_lock(void);
+
+int msm_pcm_routing_reg_stream_app_type_cfg(
+	int fedai_id, int session_type, int be_id,
+	struct msm_pcm_stream_app_type_cfg *cfg_data);
+int msm_pcm_routing_get_stream_app_type_cfg(
+	int fedai_id, int session_type, int *be_id,
+	struct msm_pcm_stream_app_type_cfg *cfg_data);
+int msm_routing_set_downmix_control_data(int be_id, int session_id,
+				 struct asm_stream_pan_ctrl_params *pan_param);
+#endif /*_MSM_PCM_H*/
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-pcm-voice-v2.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-pcm-voice-v2.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c	2019-10-29 09:26:26.157227780 +0100
@@ -0,0 +1,782 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/wait.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/initval.h>
+#include <sound/control.h>
+#include <asm/dma.h>
+#include <linux/of_device.h>
+
+#include "msm-pcm-voice-v2.h"
+#include "q6voice.h"
+
+static struct msm_voice voice_info[VOICE_SESSION_INDEX_MAX];
+
+static struct snd_pcm_hardware msm_pcm_hardware = {
+
+	.info =                 (SNDRV_PCM_INFO_INTERLEAVED |
+				SNDRV_PCM_INFO_PAUSE |
+				SNDRV_PCM_INFO_RESUME),
+	.formats =              SNDRV_PCM_FMTBIT_S16_LE,
+	.rates =                SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+	.rate_min =             8000,
+	.rate_max =             16000,
+	.channels_min =         1,
+	.channels_max =         1,
+
+	.buffer_bytes_max =     4096 * 2,
+	.period_bytes_min =     2048,
+	.period_bytes_max =     4096,
+	.periods_min =          2,
+	.periods_max =          4,
+
+	.fifo_size =            0,
+};
+static bool is_volte(struct msm_voice *pvolte)
+{
+	if (pvolte == &voice_info[VOLTE_SESSION_INDEX])
+		return true;
+	else
+		return false;
+}
+
+static bool is_voice2(struct msm_voice *pvoice2)
+{
+	if (pvoice2 == &voice_info[VOICE2_SESSION_INDEX])
+		return true;
+	else
+		return false;
+}
+
+static bool is_qchat(struct msm_voice *pqchat)
+{
+	if (pqchat == &voice_info[QCHAT_SESSION_INDEX])
+		return true;
+	else
+		return false;
+}
+
+static bool is_vowlan(struct msm_voice *pvowlan)
+{
+	if (pvowlan == &voice_info[VOWLAN_SESSION_INDEX])
+		return true;
+	else
+		return false;
+}
+
+static bool is_voicemmode1(struct msm_voice *pvoicemmode1)
+{
+	if (pvoicemmode1 == &voice_info[VOICEMMODE1_INDEX])
+		return true;
+	else
+		return false;
+}
+
+static bool is_voicemmode2(struct msm_voice *pvoicemmode2)
+{
+	if (pvoicemmode2 == &voice_info[VOICEMMODE2_INDEX])
+		return true;
+	else
+		return false;
+}
+
+static uint32_t get_session_id(struct msm_voice *pvoc)
+{
+	uint32_t session_id = 0;
+
+	if (is_volte(pvoc))
+		session_id = voc_get_session_id(VOLTE_SESSION_NAME);
+	else if (is_voice2(pvoc))
+		session_id = voc_get_session_id(VOICE2_SESSION_NAME);
+	else if (is_qchat(pvoc))
+		session_id = voc_get_session_id(QCHAT_SESSION_NAME);
+	else if (is_vowlan(pvoc))
+		session_id = voc_get_session_id(VOWLAN_SESSION_NAME);
+	else if (is_voicemmode1(pvoc))
+		session_id = voc_get_session_id(VOICEMMODE1_NAME);
+	else if (is_voicemmode2(pvoc))
+		session_id = voc_get_session_id(VOICEMMODE2_NAME);
+	else
+		session_id = voc_get_session_id(VOICE_SESSION_NAME);
+
+	return session_id;
+}
+
+
+static int msm_pcm_playback_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_voice *prtd = runtime->private_data;
+
+	pr_debug("%s\n", __func__);
+
+	if (!prtd->playback_start)
+		prtd->playback_start = 1;
+
+	return 0;
+}
+
+static int msm_pcm_capture_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_voice *prtd = runtime->private_data;
+
+	pr_debug("%s\n", __func__);
+
+	if (!prtd->capture_start)
+		prtd->capture_start = 1;
+
+	return 0;
+}
+static int msm_pcm_open(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_voice *voice;
+
+	if (!strncmp("VoLTE", substream->pcm->id, 5)) {
+		voice = &voice_info[VOLTE_SESSION_INDEX];
+		pr_debug("%s: Open VoLTE Substream Id=%s\n",
+			 __func__, substream->pcm->id);
+	} else if (!strncmp("Voice2", substream->pcm->id, 6)) {
+		voice = &voice_info[VOICE2_SESSION_INDEX];
+		pr_debug("%s: Open Voice2 Substream Id=%s\n",
+			 __func__, substream->pcm->id);
+	} else if (!strncmp("QCHAT", substream->pcm->id, 5)) {
+		voice = &voice_info[QCHAT_SESSION_INDEX];
+		pr_debug("%s: Open QCHAT Substream Id=%s\n",
+			 __func__, substream->pcm->id);
+	} else if (!strncmp("VoWLAN", substream->pcm->id, 6)) {
+		voice = &voice_info[VOWLAN_SESSION_INDEX];
+		pr_debug("%s: Open VoWLAN Substream Id=%s\n",
+			 __func__, substream->pcm->id);
+	} else if (!strncmp("VoiceMMode1", substream->pcm->id, 11)) {
+		voice = &voice_info[VOICEMMODE1_INDEX];
+		pr_debug("%s: Open VoiceMMode1 Substream Id=%s\n",
+			 __func__, substream->pcm->id);
+	} else if (!strncmp("VoiceMMode2", substream->pcm->id, 11)) {
+		voice = &voice_info[VOICEMMODE2_INDEX];
+		pr_debug("%s: Open VoiceMMode2 Substream Id=%s\n",
+			 __func__, substream->pcm->id);
+	} else {
+		voice = &voice_info[VOICE_SESSION_INDEX];
+		pr_debug("%s: Open VOICE Substream Id=%s\n",
+			 __func__, substream->pcm->id);
+	}
+	mutex_lock(&voice->lock);
+
+	runtime->hw = msm_pcm_hardware;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		voice->playback_substream = substream;
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		voice->capture_substream = substream;
+
+	voice->instance++;
+	pr_debug("%s: Instance = %d, Stream ID = %s\n",
+			__func__ , voice->instance, substream->pcm->id);
+	runtime->private_data = voice;
+
+	mutex_unlock(&voice->lock);
+
+	return 0;
+}
+static int msm_pcm_playback_close(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_voice *prtd = runtime->private_data;
+
+	pr_debug("%s\n", __func__);
+
+	if (prtd->playback_start)
+		prtd->playback_start = 0;
+
+	prtd->playback_substream = NULL;
+
+	return 0;
+}
+static int msm_pcm_capture_close(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_voice *prtd = runtime->private_data;
+
+	pr_debug("%s\n", __func__);
+
+	if (prtd->capture_start)
+		prtd->capture_start = 0;
+	prtd->capture_substream = NULL;
+
+	return 0;
+}
+static int msm_pcm_close(struct snd_pcm_substream *substream)
+{
+
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_voice *prtd = runtime->private_data;
+	uint32_t session_id = 0;
+	int ret = 0;
+
+	mutex_lock(&prtd->lock);
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		ret = msm_pcm_playback_close(substream);
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		ret = msm_pcm_capture_close(substream);
+
+	prtd->instance--;
+	if (!prtd->playback_start && !prtd->capture_start) {
+		pr_debug("end voice call\n");
+
+		session_id = get_session_id(prtd);
+		if (session_id)
+			voc_end_voice_call(session_id);
+	}
+	mutex_unlock(&prtd->lock);
+
+	return ret;
+}
+static int msm_pcm_prepare(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_voice *prtd = runtime->private_data;
+	uint32_t session_id = 0;
+
+	mutex_lock(&prtd->lock);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		ret = msm_pcm_playback_prepare(substream);
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		ret = msm_pcm_capture_prepare(substream);
+
+	if (prtd->playback_start && prtd->capture_start) {
+		session_id = get_session_id(prtd);
+		if (session_id)
+			voc_start_voice_call(session_id);
+	}
+	mutex_unlock(&prtd->lock);
+
+	return ret;
+}
+
+static int msm_pcm_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params)
+{
+
+	pr_debug("%s: Voice\n", __func__);
+
+	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+
+	return 0;
+}
+
+static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+	int ret = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_voice *prtd = runtime->private_data;
+	uint32_t session_id = 0;
+
+	pr_debug("%s: cmd = %d\n", __func__, cmd);
+
+	session_id = get_session_id(prtd);
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_STOP:
+		pr_debug("Start & Stop Voice call not handled in Trigger.\n");
+	break;
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		pr_debug("%s: resume call session_id = %d\n", __func__,
+			 session_id);
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+			ret = msm_pcm_playback_prepare(substream);
+		else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+			ret = msm_pcm_capture_prepare(substream);
+		if (prtd->playback_start && prtd->capture_start) {
+			if (session_id)
+				voc_resume_voice_call(session_id);
+		}
+	break;
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		pr_debug("%s: pause call session_id=%d\n",
+			 __func__, session_id);
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+			if (prtd->playback_start)
+				prtd->playback_start = 0;
+		} else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+			if (prtd->capture_start)
+				prtd->capture_start = 0;
+		}
+		if (session_id)
+			voc_standby_voice_call(session_id);
+		break;
+	default:
+		ret = -EINVAL;
+	break;
+	}
+	return ret;
+}
+
+static int msm_pcm_ioctl(struct snd_pcm_substream *substream,
+			 unsigned int cmd, void *arg)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_voice *prtd = runtime->private_data;
+	uint32_t session_id = get_session_id(prtd);
+	enum voice_lch_mode lch_mode;
+	int ret = 0;
+
+	switch (cmd) {
+	case SNDRV_VOICE_IOCTL_LCH:
+		if (copy_from_user(&lch_mode, (void *)arg,
+				   sizeof(enum voice_lch_mode))) {
+			pr_err("%s: Copy from user failed, size %zd\n",
+				__func__, sizeof(enum voice_lch_mode));
+
+			ret = -EFAULT;
+			break;
+		}
+
+		pr_debug("%s: %s lch_mode:%d\n",
+			 __func__, substream->pcm->id, lch_mode);
+
+		switch (lch_mode) {
+		case VOICE_LCH_START:
+		case VOICE_LCH_STOP:
+			ret = voc_set_lch(session_id, lch_mode);
+			break;
+
+		default:
+			pr_err("%s: Invalid LCH MODE %d\n", __func__, lch_mode);
+
+			ret = -EFAULT;
+		}
+
+		break;
+	default:
+		pr_debug("%s: Falling into default snd_lib_ioctl cmd 0x%x\n",
+			 __func__, cmd);
+
+		ret = snd_pcm_lib_ioctl(substream, cmd, arg);
+		break;
+	}
+
+	if (!ret)
+		pr_debug("%s: ret %d\n", __func__, ret);
+	else
+		pr_err("%s: cmd 0x%x failed %d\n", __func__, cmd, ret);
+
+	return ret;
+}
+
+static int msm_voice_sidetone_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	int ret;
+	bool sidetone_enable = ucontrol->value.integer.value[0];
+	uint32_t session_id = ALL_SESSION_VSID;
+
+	ret = voc_set_afe_sidetone(session_id, sidetone_enable);
+	pr_debug("%s: AFE Sidetone enable=%d session_id=0x%x ret=%d\n",
+		 __func__, sidetone_enable, session_id, ret);
+	return ret;
+}
+
+static int msm_voice_sidetone_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+
+	ucontrol->value.integer.value[0] = voc_get_afe_sidetone();
+	return 0;
+}
+
+static int msm_voice_gain_put(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = 0;
+	int volume = ucontrol->value.integer.value[0];
+	uint32_t session_id = ucontrol->value.integer.value[1];
+	int ramp_duration = ucontrol->value.integer.value[2];
+
+	if ((volume < 0) || (ramp_duration < 0)
+		|| (ramp_duration > MAX_RAMP_DURATION)) {
+		pr_err(" %s Invalid arguments", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	pr_debug("%s: volume: %d session_id: %#x ramp_duration: %d\n", __func__,
+		volume, session_id, ramp_duration);
+
+	voc_set_rx_vol_step(session_id, RX_PATH, volume, ramp_duration);
+
+done:
+	return ret;
+}
+
+static int msm_voice_mute_put(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = 0;
+	int mute = ucontrol->value.integer.value[0];
+	uint32_t session_id = ucontrol->value.integer.value[1];
+	int ramp_duration = ucontrol->value.integer.value[2];
+
+	if ((mute < 0) || (mute > 1) || (ramp_duration < 0)
+		|| (ramp_duration > MAX_RAMP_DURATION)) {
+		pr_err(" %s Invalid arguments", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	pr_debug("%s: mute=%d session_id=%#x ramp_duration=%d\n", __func__,
+		mute, session_id, ramp_duration);
+
+	ret = voc_set_tx_mute(session_id, TX_PATH, mute, ramp_duration);
+
+done:
+	return ret;
+}
+
+static int msm_voice_tx_device_mute_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = 0;
+	int mute = ucontrol->value.integer.value[0];
+	uint32_t session_id = ucontrol->value.integer.value[1];
+	int ramp_duration = ucontrol->value.integer.value[2];
+
+	if ((mute < 0) || (mute > 1) || (ramp_duration < 0) ||
+	    (ramp_duration > MAX_RAMP_DURATION)) {
+		pr_err(" %s Invalid arguments", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	pr_debug("%s: mute=%d session_id=%#x ramp_duration=%d\n", __func__,
+		 mute, session_id, ramp_duration);
+
+	ret = voc_set_device_mute(session_id, VSS_IVOLUME_DIRECTION_TX,
+				  mute, ramp_duration);
+
+done:
+	return ret;
+}
+
+static int msm_voice_rx_device_mute_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = 0;
+	int mute = ucontrol->value.integer.value[0];
+	uint32_t session_id = ucontrol->value.integer.value[1];
+	int ramp_duration = ucontrol->value.integer.value[2];
+
+	if ((mute < 0) || (mute > 1) || (ramp_duration < 0) ||
+	    (ramp_duration > MAX_RAMP_DURATION)) {
+		pr_err(" %s Invalid arguments", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	pr_debug("%s: mute=%d session_id=%#x ramp_duration=%d\n", __func__,
+		 mute, session_id, ramp_duration);
+
+	voc_set_device_mute(session_id, VSS_IVOLUME_DIRECTION_RX,
+			    mute, ramp_duration);
+
+done:
+	return ret;
+}
+
+
+
+static const char const *tty_mode[] = {"OFF", "HCO", "VCO", "FULL"};
+static const struct soc_enum msm_tty_mode_enum[] = {
+		SOC_ENUM_SINGLE_EXT(4, tty_mode),
+};
+
+static int msm_voice_tty_mode_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] =
+		voc_get_tty_mode(voc_get_session_id(VOICE_SESSION_NAME));
+	return 0;
+}
+
+static int msm_voice_tty_mode_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int tty_mode = ucontrol->value.integer.value[0];
+
+	pr_debug("%s: tty_mode=%d\n", __func__, tty_mode);
+
+	voc_set_tty_mode(voc_get_session_id(VOICE_SESSION_NAME), tty_mode);
+	voc_set_tty_mode(voc_get_session_id(VOICE2_SESSION_NAME), tty_mode);
+	voc_set_tty_mode(voc_get_session_id(VOLTE_SESSION_NAME), tty_mode);
+	voc_set_tty_mode(voc_get_session_id(VOWLAN_SESSION_NAME), tty_mode);
+	voc_set_tty_mode(voc_get_session_id(VOICEMMODE1_NAME), tty_mode);
+	voc_set_tty_mode(voc_get_session_id(VOICEMMODE2_NAME), tty_mode);
+
+	return 0;
+}
+
+static int msm_voice_slowtalk_put(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	int st_enable = ucontrol->value.integer.value[0];
+	uint32_t session_id = ucontrol->value.integer.value[1];
+
+	pr_debug("%s: st enable=%d session_id=%#x\n", __func__, st_enable,
+		 session_id);
+
+	voc_set_pp_enable(session_id,
+			  MODULE_ID_VOICE_MODULE_ST, st_enable);
+
+	return 0;
+}
+
+static int msm_voice_hd_voice_put(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = 0;
+	uint32_t hd_enable = ucontrol->value.integer.value[0];
+	uint32_t session_id = ucontrol->value.integer.value[1];
+
+	pr_debug("%s: HD Voice enable=%d session_id=%#x\n", __func__, hd_enable,
+		 session_id);
+
+	ret = voc_set_hd_enable(session_id, hd_enable);
+
+	return ret;
+}
+
+static int msm_voice_topology_disable_put(struct snd_kcontrol *kcontrol,
+					  struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = 0;
+	int disable = ucontrol->value.integer.value[0];
+	uint32_t session_id = ucontrol->value.integer.value[1];
+
+	if ((disable < 0) || (disable > 1)) {
+		pr_err(" %s Invalid arguments: %d\n", __func__, disable);
+
+		ret = -EINVAL;
+		goto done;
+	}
+	pr_debug("%s: disable = %d, session_id = %d\n", __func__, disable,
+		 session_id);
+
+	ret = voc_disable_topology(session_id, disable);
+
+done:
+	return ret;
+}
+
+static int msm_voice_cvd_version_info(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_info *uinfo)
+{
+	int ret = 0;
+
+	pr_debug("%s:\n", __func__);
+
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+	uinfo->count = CVD_VERSION_STRING_MAX_SIZE;
+
+	return ret;
+}
+
+static int msm_voice_cvd_version_get(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	char cvd_version[CVD_VERSION_STRING_MAX_SIZE] = CVD_VERSION_DEFAULT;
+	int ret;
+
+	pr_debug("%s:\n", __func__);
+
+	ret = voc_get_cvd_version(cvd_version);
+
+	if (ret)
+		pr_err("%s: Error retrieving CVD version, error:%d\n",
+			__func__, ret);
+
+	memcpy(ucontrol->value.bytes.data, cvd_version, sizeof(cvd_version));
+
+	return 0;
+}
+static struct snd_kcontrol_new msm_voice_controls[] = {
+	SOC_SINGLE_MULTI_EXT("Voice Rx Device Mute", SND_SOC_NOPM, 0, VSID_MAX,
+				0, 3, NULL, msm_voice_rx_device_mute_put),
+	SOC_SINGLE_MULTI_EXT("Voice Tx Device Mute", SND_SOC_NOPM, 0, VSID_MAX,
+				0, 3, NULL, msm_voice_tx_device_mute_put),
+	SOC_SINGLE_MULTI_EXT("Voice Tx Mute", SND_SOC_NOPM, 0, VSID_MAX,
+				0, 3, NULL, msm_voice_mute_put),
+	SOC_SINGLE_MULTI_EXT("Voice Rx Gain", SND_SOC_NOPM, 0, VSID_MAX, 0, 3,
+				NULL, msm_voice_gain_put),
+	SOC_ENUM_EXT("TTY Mode", msm_tty_mode_enum[0], msm_voice_tty_mode_get,
+				msm_voice_tty_mode_put),
+	SOC_SINGLE_MULTI_EXT("Slowtalk Enable", SND_SOC_NOPM, 0, VSID_MAX, 0, 2,
+				NULL, msm_voice_slowtalk_put),
+	SOC_SINGLE_MULTI_EXT("Voice Topology Disable", SND_SOC_NOPM, 0,
+			     VSID_MAX, 0, 2, NULL,
+			     msm_voice_topology_disable_put),
+	SOC_SINGLE_MULTI_EXT("HD Voice Enable", SND_SOC_NOPM, 0, VSID_MAX, 0, 2,
+			     NULL, msm_voice_hd_voice_put),
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READ,
+		.iface	= SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name	= "CVD Version",
+		.info	= msm_voice_cvd_version_info,
+		.get	= msm_voice_cvd_version_get,
+	},
+	SOC_SINGLE_MULTI_EXT("Voice Sidetone Enable", SND_SOC_NOPM, 0, 1, 0, 1,
+			     msm_voice_sidetone_get, msm_voice_sidetone_put),
+
+};
+
+static struct snd_pcm_ops msm_pcm_ops = {
+	.open			= msm_pcm_open,
+	.hw_params		= msm_pcm_hw_params,
+	.close			= msm_pcm_close,
+	.prepare		= msm_pcm_prepare,
+	.trigger		= msm_pcm_trigger,
+	.ioctl			= msm_pcm_ioctl,
+	.compat_ioctl		= msm_pcm_ioctl,
+};
+
+
+static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_card *card = rtd->card->snd_card;
+	int ret = 0;
+
+	if (!card->dev->coherent_dma_mask)
+		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	return ret;
+}
+
+static int msm_pcm_voice_probe(struct snd_soc_platform *platform)
+{
+	snd_soc_add_platform_controls(platform, msm_voice_controls,
+					ARRAY_SIZE(msm_voice_controls));
+
+	return 0;
+}
+
+static struct snd_soc_platform_driver msm_soc_platform = {
+	.ops		= &msm_pcm_ops,
+	.pcm_new	= msm_asoc_pcm_new,
+	.probe		= msm_pcm_voice_probe,
+};
+
+static int msm_pcm_probe(struct platform_device *pdev)
+{
+	int rc;
+	bool destroy_cvd = false;
+	bool vote_bms = false;
+	const char *is_destroy_cvd = "qcom,destroy-cvd";
+	const char *is_vote_bms = "qcom,vote-bms";
+
+	if (!is_voc_initialized()) {
+		pr_debug("%s: voice module not initialized yet, deferring probe()\n",
+		       __func__);
+
+		rc = -EPROBE_DEFER;
+		goto done;
+	}
+
+	rc = voc_alloc_cal_shared_memory();
+	if (rc == -EPROBE_DEFER) {
+		pr_debug("%s: memory allocation for calibration deferred %d\n",
+			 __func__, rc);
+
+		goto done;
+	} else if (rc < 0) {
+		pr_err("%s: memory allocation for calibration failed %d\n",
+		       __func__, rc);
+	}
+
+	pr_debug("%s: dev name %s\n",
+			__func__, dev_name(&pdev->dev));
+	destroy_cvd = of_property_read_bool(pdev->dev.of_node,
+						is_destroy_cvd);
+	voc_set_destroy_cvd_flag(destroy_cvd);
+
+	vote_bms = of_property_read_bool(pdev->dev.of_node,
+					 is_vote_bms);
+	voc_set_vote_bms_flag(vote_bms);
+
+	rc = snd_soc_register_platform(&pdev->dev,
+				       &msm_soc_platform);
+
+done:
+	return rc;
+}
+
+static int msm_pcm_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_platform(&pdev->dev);
+	return 0;
+}
+
+static const struct of_device_id msm_voice_dt_match[] = {
+	{.compatible = "qcom,msm-pcm-voice"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, msm_voice_dt_match);
+
+static struct platform_driver msm_pcm_driver = {
+	.driver = {
+		.name = "msm-pcm-voice",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_voice_dt_match,
+	},
+	.probe = msm_pcm_probe,
+	.remove = msm_pcm_remove,
+};
+
+static int __init msm_soc_platform_init(void)
+{
+	int i = 0;
+
+	memset(&voice_info, 0, sizeof(voice_info));
+
+	for (i = 0; i < VOICE_SESSION_INDEX_MAX; i++)
+		mutex_init(&voice_info[i].lock);
+
+	return platform_driver_register(&msm_pcm_driver);
+}
+module_init(msm_soc_platform_init);
+
+static void __exit msm_soc_platform_exit(void)
+{
+	platform_driver_unregister(&msm_pcm_driver);
+}
+module_exit(msm_soc_platform_exit);
+
+MODULE_DESCRIPTION("Voice PCM module platform driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-pcm-voice-v2.h linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.h
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-pcm-voice-v2.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.h	2019-01-22 16:16:29.635301935 +0100
@@ -0,0 +1,42 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MSM_PCM_VOICE_H
+#define _MSM_PCM_VOICE_H
+#include <sound/apr_audio-v2.h>
+
+enum {
+	VOICE_SESSION_INDEX,
+	VOLTE_SESSION_INDEX,
+	VOICE2_SESSION_INDEX,
+	QCHAT_SESSION_INDEX,
+	VOWLAN_SESSION_INDEX,
+	VOICEMMODE1_INDEX,
+	VOICEMMODE2_INDEX,
+	VOICE_SESSION_INDEX_MAX,
+};
+
+struct msm_voice {
+	struct snd_pcm_substream *playback_substream;
+	struct snd_pcm_substream *capture_substream;
+
+	int instance;
+
+	struct mutex lock;
+
+	uint32_t samp_rate;
+	uint32_t channel_mode;
+
+	int playback_start;
+	int capture_start;
+};
+
+#endif /*_MSM_PCM_VOICE_H*/
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-pcm-voip-v2.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-pcm-voip-v2.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c	2019-01-22 16:16:29.639301972 +0100
@@ -0,0 +1,1713 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/wait.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/initval.h>
+#include <sound/control.h>
+#include <asm/dma.h>
+
+#include "msm-pcm-q6-v2.h"
+#include "msm-pcm-routing-v2.h"
+#include "q6voice.h"
+
+#define SHARED_MEM_BUF 2
+#define VOIP_MAX_Q_LEN 10
+#define VOIP_MAX_VOC_PKT_SIZE 4096
+#define VOIP_MIN_VOC_PKT_SIZE 320
+
+/* Length of the DSP frame info header added to the voc packet. */
+#define DSP_FRAME_HDR_LEN 1
+
+#define MODE_IS127		0x2
+#define MODE_4GV_NB		0x3
+#define MODE_4GV_WB		0x4
+#define MODE_AMR		0x5
+#define MODE_AMR_WB		0xD
+#define MODE_PCM		0xC
+#define MODE_4GV_NW		0xE
+#define MODE_G711		0xA
+#define MODE_G711A		0xF
+
+enum msm_audio_g711a_frame_type {
+	MVS_G711A_SPEECH_GOOD,
+	MVS_G711A_SID,
+	MVS_G711A_NO_DATA,
+	MVS_G711A_ERASURE
+};
+
+enum msm_audio_g711a_mode {
+	MVS_G711A_MODE_MULAW,
+	MVS_G711A_MODE_ALAW
+};
+
+enum msm_audio_g711_mode {
+	MVS_G711_MODE_MULAW,
+	MVS_G711_MODE_ALAW
+};
+
+#define VOIP_MODE_MAX		MODE_G711A
+#define VOIP_RATE_MAX		23850
+
+enum format {
+	FORMAT_S16_LE = 2,
+	FORMAT_SPECIAL = 31,
+};
+
+
+enum amr_rate_type {
+	AMR_RATE_4750, /* AMR 4.75 kbps */
+	AMR_RATE_5150, /* AMR 5.15 kbps */
+	AMR_RATE_5900, /* AMR 5.90 kbps */
+	AMR_RATE_6700, /* AMR 6.70 kbps */
+	AMR_RATE_7400, /* AMR 7.40 kbps */
+	AMR_RATE_7950, /* AMR 7.95 kbps */
+	AMR_RATE_10200, /* AMR 10.20 kbps */
+	AMR_RATE_12200, /* AMR 12.20 kbps */
+	AMR_RATE_6600, /* AMR-WB 6.60 kbps */
+	AMR_RATE_8850, /* AMR-WB 8.85 kbps */
+	AMR_RATE_12650, /* AMR-WB 12.65 kbps */
+	AMR_RATE_14250, /* AMR-WB 14.25 kbps */
+	AMR_RATE_15850, /* AMR-WB 15.85 kbps */
+	AMR_RATE_18250, /* AMR-WB 18.25 kbps */
+	AMR_RATE_19850, /* AMR-WB 19.85 kbps */
+	AMR_RATE_23050, /* AMR-WB 23.05 kbps */
+	AMR_RATE_23850, /* AMR-WB 23.85 kbps */
+	AMR_RATE_UNDEF
+};
+
+enum voip_state {
+	VOIP_STOPPED,
+	VOIP_STARTED,
+};
+
+struct voip_frame_hdr {
+	uint32_t timestamp;
+	union {
+		/*
+		 * Bits 0-3: Frame type
+		 * [optional] Bits 16-19: Frame rate
+		 */
+		uint32_t frame_type;
+		uint32_t packet_rate;
+	};
+};
+struct voip_frame {
+	struct voip_frame_hdr frm_hdr;
+	uint32_t pktlen;
+	uint8_t voc_pkt[VOIP_MAX_VOC_PKT_SIZE];
+};
+
+struct voip_buf_node {
+	struct list_head list;
+	struct voip_frame frame;
+};
+
+struct voip_drv_info {
+	enum  voip_state state;
+
+	struct snd_pcm_substream *playback_substream;
+	struct snd_pcm_substream *capture_substream;
+
+	struct list_head in_queue;
+	struct list_head free_in_queue;
+
+	struct list_head out_queue;
+	struct list_head free_out_queue;
+
+	wait_queue_head_t out_wait;
+	wait_queue_head_t in_wait;
+
+	struct mutex lock;
+
+	spinlock_t dsp_lock;
+	spinlock_t dsp_ul_lock;
+
+	bool voip_reset;
+	uint32_t mode;
+	uint32_t rate_type;
+	uint32_t rate;
+	uint32_t dtx_mode;
+
+	uint8_t capture_start;
+	uint8_t playback_start;
+
+	uint8_t playback_prepare;
+	uint8_t capture_prepare;
+
+	unsigned int play_samp_rate;
+	unsigned int cap_samp_rate;
+
+	unsigned int pcm_size;
+	unsigned int pcm_count;
+	unsigned int pcm_playback_irq_pos;      /* IRQ position */
+	unsigned int pcm_playback_buf_pos;      /* position in buffer */
+
+	unsigned int pcm_capture_size;
+	unsigned int pcm_capture_count;
+	unsigned int pcm_capture_irq_pos;       /* IRQ position */
+	unsigned int pcm_capture_buf_pos;       /* position in buffer */
+
+	uint32_t evrc_min_rate;
+	uint32_t evrc_max_rate;
+};
+
+static int voip_get_media_type(uint32_t mode, uint32_t rate_type,
+				unsigned int samp_rate,
+				unsigned int *media_type);
+static int voip_get_rate_type(uint32_t mode,
+				uint32_t rate,
+				uint32_t *rate_type);
+static int voip_config_vocoder(struct snd_pcm_substream *substream);
+static int msm_voip_mode_config_put(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_value *ucontrol);
+static int msm_voip_mode_config_get(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_value *ucontrol);
+static int msm_voip_rate_config_put(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_value *ucontrol);
+static int msm_voip_evrc_min_max_rate_config_put(struct snd_kcontrol *kcontrol,
+					 struct snd_ctl_elem_value *ucontrol);
+static int msm_voip_evrc_min_max_rate_config_get(struct snd_kcontrol *kcontrol,
+					 struct snd_ctl_elem_value *ucontrol);
+
+static struct voip_drv_info voip_info;
+
+static struct snd_pcm_hardware msm_pcm_hardware = {
+	.info =                 (SNDRV_PCM_INFO_MMAP |
+				SNDRV_PCM_INFO_BLOCK_TRANSFER |
+				SNDRV_PCM_INFO_MMAP_VALID |
+				SNDRV_PCM_INFO_INTERLEAVED),
+	.formats =              SNDRV_PCM_FMTBIT_S16_LE |
+				SNDRV_PCM_FMTBIT_SPECIAL,
+	.rates =                SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+	.rate_min =             8000,
+	.rate_max =             48000,
+	.channels_min =         1,
+	.channels_max =         1,
+	.buffer_bytes_max =	sizeof(struct voip_buf_node) * VOIP_MAX_Q_LEN,
+	.period_bytes_min =	VOIP_MIN_VOC_PKT_SIZE,
+	.period_bytes_max =	VOIP_MAX_VOC_PKT_SIZE,
+	.periods_min =		VOIP_MAX_Q_LEN,
+	.periods_max =		VOIP_MAX_Q_LEN,
+	.fifo_size =            0,
+};
+
+
+static int msm_voip_mute_put(struct snd_kcontrol *kcontrol,
+			     struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = 0;
+	int mute = ucontrol->value.integer.value[0];
+	int ramp_duration = ucontrol->value.integer.value[1];
+
+	if ((mute < 0) || (mute > 1) || (ramp_duration < 0)) {
+		pr_err(" %s Invalid arguments", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	pr_debug("%s: mute=%d ramp_duration=%d\n", __func__, mute,
+		ramp_duration);
+
+	voc_set_tx_mute(voc_get_session_id(VOIP_SESSION_NAME), TX_PATH, mute,
+					ramp_duration);
+
+done:
+	return ret;
+}
+
+static int msm_voip_gain_put(struct snd_kcontrol *kcontrol,
+			     struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = 0;
+	int volume = ucontrol->value.integer.value[0];
+	int ramp_duration = ucontrol->value.integer.value[1];
+
+	if ((volume < 0) || (ramp_duration < 0)) {
+		pr_err(" %s Invalid arguments", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	pr_debug("%s: volume: %d ramp_duration: %d\n", __func__, volume,
+		ramp_duration);
+
+	voc_set_rx_vol_step(voc_get_session_id(VOIP_SESSION_NAME),
+						RX_PATH,
+						volume,
+						ramp_duration);
+
+done:
+	return ret;
+}
+
+static int msm_voip_dtx_mode_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	mutex_lock(&voip_info.lock);
+
+	voip_info.dtx_mode  = ucontrol->value.integer.value[0];
+
+	pr_debug("%s: dtx: %d\n", __func__, voip_info.dtx_mode);
+
+	mutex_unlock(&voip_info.lock);
+
+	return 0;
+}
+static int msm_voip_dtx_mode_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	mutex_lock(&voip_info.lock);
+
+	ucontrol->value.integer.value[0] = voip_info.dtx_mode;
+
+	mutex_unlock(&voip_info.lock);
+
+	return 0;
+}
+
+static struct snd_kcontrol_new msm_voip_controls[] = {
+	SOC_SINGLE_MULTI_EXT("Voip Tx Mute", SND_SOC_NOPM, 0,
+			     MAX_RAMP_DURATION,
+			     0, 2, NULL, msm_voip_mute_put),
+	SOC_SINGLE_MULTI_EXT("Voip Rx Gain", SND_SOC_NOPM, 0,
+			     MAX_RAMP_DURATION,
+			     0, 2, NULL, msm_voip_gain_put),
+	SOC_SINGLE_EXT("Voip Mode Config", SND_SOC_NOPM, 0, VOIP_MODE_MAX, 0,
+		       msm_voip_mode_config_get, msm_voip_mode_config_put),
+	SOC_SINGLE_EXT("Voip Rate Config", SND_SOC_NOPM, 0, VOIP_RATE_MAX, 0,
+		       NULL, msm_voip_rate_config_put),
+	SOC_SINGLE_MULTI_EXT("Voip Evrc Min Max Rate Config", SND_SOC_NOPM,
+			     0, VOC_1_RATE, 0, 2,
+			     msm_voip_evrc_min_max_rate_config_get,
+			     msm_voip_evrc_min_max_rate_config_put),
+	SOC_SINGLE_EXT("Voip Dtx Mode", SND_SOC_NOPM, 0, 1, 0,
+		       msm_voip_dtx_mode_get, msm_voip_dtx_mode_put),
+};
+
+static int msm_pcm_voip_probe(struct snd_soc_platform *platform)
+{
+	snd_soc_add_platform_controls(platform, msm_voip_controls,
+					ARRAY_SIZE(msm_voip_controls));
+
+	return 0;
+}
+
+/* sample rate supported */
+static unsigned int supported_sample_rates[] = {8000, 16000, 32000, 48000};
+
+static void voip_ssr_cb_fn(uint32_t opcode, void *private_data)
+{
+
+	/* Notify ASoC to send next playback/Capture to unblock write/read */
+	struct voip_drv_info *prtd = private_data;
+
+	if (opcode == 0xFFFFFFFF) {
+
+		prtd->voip_reset = true;
+		pr_debug("%s: Notify ASoC to send next playback/Capture\n",
+			__func__);
+
+		prtd->pcm_playback_irq_pos += prtd->pcm_count;
+		if (prtd->state == VOIP_STARTED)
+			snd_pcm_period_elapsed(prtd->playback_substream);
+		wake_up(&prtd->out_wait);
+
+		prtd->pcm_capture_irq_pos += prtd->pcm_capture_count;
+		if (prtd->state == VOIP_STARTED)
+			snd_pcm_period_elapsed(prtd->capture_substream);
+		wake_up(&prtd->in_wait);
+
+	} else {
+		pr_err("%s: Invalid opcode during reset : %d\n",
+			__func__, opcode);
+	}
+}
+
+/* capture path */
+static void voip_process_ul_pkt(uint8_t *voc_pkt,
+				uint32_t pkt_len,
+				uint32_t timestamp,
+				void *private_data)
+{
+	struct voip_buf_node *buf_node = NULL;
+	struct voip_drv_info *prtd = private_data;
+	unsigned long dsp_flags;
+
+	if (prtd->capture_substream == NULL)
+		return;
+
+	/* Copy up-link packet into out_queue. */
+	spin_lock_irqsave(&prtd->dsp_ul_lock, dsp_flags);
+
+	/* discarding UL packets till start is received */
+	if (!list_empty(&prtd->free_out_queue) && prtd->capture_start) {
+		buf_node = list_first_entry(&prtd->free_out_queue,
+					struct voip_buf_node, list);
+		list_del(&buf_node->list);
+		switch (prtd->mode) {
+		case MODE_AMR_WB:
+		case MODE_AMR: {
+			/* Remove the DSP frame info header. Header format:
+			 * Bits 0-3: Frame rate
+			 * Bits 4-7: Frame type
+			 */
+			buf_node->frame.frm_hdr.timestamp = timestamp;
+			buf_node->frame.frm_hdr.frame_type =
+						((*voc_pkt) & 0xF0) >> 4;
+			voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
+			buf_node->frame.pktlen = pkt_len - DSP_FRAME_HDR_LEN;
+			memcpy(&buf_node->frame.voc_pkt[0],
+				voc_pkt,
+				buf_node->frame.pktlen);
+
+			list_add_tail(&buf_node->list, &prtd->out_queue);
+			break;
+		}
+		case MODE_IS127:
+		case MODE_4GV_NB:
+		case MODE_4GV_WB:
+		case MODE_4GV_NW: {
+			/* Remove the DSP frame info header.
+			 * Header format:
+			 * Bits 0-3: frame rate
+			 */
+			buf_node->frame.frm_hdr.timestamp = timestamp;
+			buf_node->frame.frm_hdr.packet_rate = (*voc_pkt) & 0x0F;
+			voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
+			buf_node->frame.pktlen = pkt_len - DSP_FRAME_HDR_LEN;
+
+			memcpy(&buf_node->frame.voc_pkt[0],
+				voc_pkt,
+				buf_node->frame.pktlen);
+
+			list_add_tail(&buf_node->list, &prtd->out_queue);
+			break;
+		}
+		case MODE_G711:
+		case MODE_G711A:{
+			/* G711 frames are 10ms each, but the DSP works with
+			 * 20ms frames and sends two 10ms frames per buffer.
+			 * Extract the two frames and put them in separate
+			 * buffers.
+			 */
+			/* Remove the first DSP frame info header.
+			 * Header format: G711A
+			 * Bits 0-1: Frame type
+			 * Bits 2-3: Frame rate
+			 *
+			 * Header format: G711
+			 * Bits 2-3: Frame rate
+			 */
+			if (prtd->mode == MODE_G711A)
+				buf_node->frame.frm_hdr.frame_type =
+							(*voc_pkt) & 0x03;
+			buf_node->frame.frm_hdr.timestamp = timestamp;
+			voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
+
+			/* There are two frames in the buffer. Length of the
+			 * first frame:
+			 */
+			buf_node->frame.pktlen = (pkt_len -
+						  2 * DSP_FRAME_HDR_LEN) / 2;
+
+			memcpy(&buf_node->frame.voc_pkt[0],
+			       voc_pkt,
+			       buf_node->frame.pktlen);
+			voc_pkt = voc_pkt + buf_node->frame.pktlen;
+
+			list_add_tail(&buf_node->list, &prtd->out_queue);
+
+			/* Get another buffer from the free Q and fill in the
+			 * second frame.
+			 */
+			if (!list_empty(&prtd->free_out_queue)) {
+				buf_node =
+					list_first_entry(&prtd->free_out_queue,
+							 struct voip_buf_node,
+							 list);
+				list_del(&buf_node->list);
+
+				/* Remove the second DSP frame info header.
+				 * Header format:
+				 * Bits 0-1: Frame type
+				 * Bits 2-3: Frame rate
+				 */
+
+				if (prtd->mode == MODE_G711A)
+					buf_node->frame.frm_hdr.frame_type =
+							(*voc_pkt) & 0x03;
+				buf_node->frame.frm_hdr.timestamp = timestamp;
+				voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
+
+				/* There are two frames in the buffer. Length
+				 * of the second frame:
+				 */
+				buf_node->frame.pktlen = (pkt_len -
+						2 * DSP_FRAME_HDR_LEN) / 2;
+
+				memcpy(&buf_node->frame.voc_pkt[0],
+				       voc_pkt,
+				       buf_node->frame.pktlen);
+
+				list_add_tail(&buf_node->list,
+					      &prtd->out_queue);
+			} else {
+				/* Drop the second frame */
+				pr_err("%s: UL data dropped, read is slow\n",
+				       __func__);
+			}
+			break;
+		}
+		default: {
+			buf_node->frame.frm_hdr.timestamp = timestamp;
+			buf_node->frame.pktlen = pkt_len;
+			memcpy(&buf_node->frame.voc_pkt[0],
+			       voc_pkt,
+			       buf_node->frame.pktlen);
+			list_add_tail(&buf_node->list, &prtd->out_queue);
+		}
+		}
+		pr_debug("%s: pkt_len =%d, frame.pktlen=%d, timestamp=%d\n",
+			 __func__, pkt_len, buf_node->frame.pktlen, timestamp);
+
+		if (prtd->mode == MODE_PCM)
+			prtd->pcm_capture_irq_pos += buf_node->frame.pktlen;
+		else
+			prtd->pcm_capture_irq_pos += prtd->pcm_capture_count;
+
+		spin_unlock_irqrestore(&prtd->dsp_ul_lock, dsp_flags);
+		snd_pcm_period_elapsed(prtd->capture_substream);
+	} else {
+		spin_unlock_irqrestore(&prtd->dsp_ul_lock, dsp_flags);
+		pr_err("UL data dropped\n");
+	}
+
+	wake_up(&prtd->out_wait);
+}
+
+/* playback path */
+static void voip_process_dl_pkt(uint8_t *voc_pkt, void *private_data)
+{
+	struct voip_buf_node *buf_node = NULL;
+	struct voip_drv_info *prtd = private_data;
+	unsigned long dsp_flags;
+	uint32_t rate_type;
+	uint32_t frame_rate;
+	u32 pkt_len;
+	u8 *voc_addr = NULL;
+
+	if (prtd->playback_substream == NULL)
+		return;
+
+	spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
+
+	if (!list_empty(&prtd->in_queue) && prtd->playback_start) {
+		buf_node = list_first_entry(&prtd->in_queue,
+				struct voip_buf_node, list);
+		list_del(&buf_node->list);
+		switch (prtd->mode) {
+		case MODE_AMR:
+		case MODE_AMR_WB: {
+			*((uint32_t *)voc_pkt) = buf_node->frame.pktlen +
+							DSP_FRAME_HDR_LEN;
+			/* Advance to the header of voip packet */
+			voc_pkt = voc_pkt + sizeof(uint32_t);
+			/*
+			 * Add the DSP frame info header. Header format:
+			 * Bits 0-3: Frame rate
+			 * Bits 4-7: Frame type
+			 */
+			*voc_pkt = ((buf_node->frame.frm_hdr.frame_type &
+				   0x0F) << 4);
+			frame_rate = (buf_node->frame.frm_hdr.frame_type &
+				     0xFFFF0000) >> 16;
+			if (frame_rate) {
+				if (voip_get_rate_type(prtd->mode, frame_rate,
+						       &rate_type)) {
+					pr_err("%s(): fail at getting rate_type\n",
+						__func__);
+				} else
+					prtd->rate_type = rate_type;
+			}
+			*voc_pkt |= prtd->rate_type & 0x0F;
+
+			voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
+			memcpy(voc_pkt,
+				&buf_node->frame.voc_pkt[0],
+				buf_node->frame.pktlen);
+			list_add_tail(&buf_node->list, &prtd->free_in_queue);
+			break;
+		}
+		case MODE_IS127:
+		case MODE_4GV_NB:
+		case MODE_4GV_WB:
+		case MODE_4GV_NW: {
+			*((uint32_t *)voc_pkt) = buf_node->frame.pktlen +
+							 DSP_FRAME_HDR_LEN;
+			/* Advance to the header of voip packet */
+			voc_pkt = voc_pkt + sizeof(uint32_t);
+			/*
+			 * Add the DSP frame info header. Header format:
+			 * Bits 0-3 : Frame rate
+			 */
+			*voc_pkt = buf_node->frame.frm_hdr.packet_rate & 0x0F;
+			voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
+
+			memcpy(voc_pkt,
+				&buf_node->frame.voc_pkt[0],
+				buf_node->frame.pktlen);
+
+			list_add_tail(&buf_node->list, &prtd->free_in_queue);
+			break;
+		}
+		case MODE_G711:
+		case MODE_G711A:{
+			/* G711 frames are 10ms each but the DSP expects 20ms
+			 * worth of data, so send two 10ms frames per buffer.
+			 */
+			/* Add the first DSP frame info header. Header format:
+			 * Bits 0-1: Frame type
+			 * Bits 2-3: Frame rate
+			 */
+			voc_addr = voc_pkt;
+			voc_pkt = voc_pkt + sizeof(uint32_t);
+
+			*voc_pkt = ((prtd->rate_type  & 0x0F) << 2) |
+				    (buf_node->frame.frm_hdr.frame_type & 0x03);
+			voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
+
+			pkt_len = buf_node->frame.pktlen + DSP_FRAME_HDR_LEN;
+
+			memcpy(voc_pkt,
+			       &buf_node->frame.voc_pkt[0],
+			       buf_node->frame.pktlen);
+			voc_pkt = voc_pkt + buf_node->frame.pktlen;
+
+			list_add_tail(&buf_node->list, &prtd->free_in_queue);
+
+			if (!list_empty(&prtd->in_queue)) {
+				/* Get the second buffer. */
+				buf_node = list_first_entry(&prtd->in_queue,
+							struct voip_buf_node,
+							list);
+				list_del(&buf_node->list);
+
+				/* Add the second DSP frame info header.
+				 * Header format:
+				 * Bits 0-1: Frame type
+				 * Bits 2-3: Frame rate
+				 */
+				*voc_pkt = ((prtd->rate_type & 0x0F) << 2) |
+				(buf_node->frame.frm_hdr.frame_type & 0x03);
+				voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
+
+				pkt_len = pkt_len + buf_node->frame.pktlen +
+					   DSP_FRAME_HDR_LEN;
+
+				memcpy(voc_pkt,
+				       &buf_node->frame.voc_pkt[0],
+				       buf_node->frame.pktlen);
+
+				list_add_tail(&buf_node->list,
+					      &prtd->free_in_queue);
+			} else {
+				/* Only 10ms worth of data is available, signal
+				 * erasure frame.
+				 */
+				*voc_pkt = ((prtd->rate_type & 0x0F) << 2) |
+					    (MVS_G711A_ERASURE & 0x03);
+
+				pkt_len = pkt_len + DSP_FRAME_HDR_LEN;
+				pr_debug("%s, Only 10ms read, erase 2nd frame\n",
+					 __func__);
+			}
+			*((uint32_t *)voc_addr) = pkt_len;
+			break;
+		}
+		default: {
+			*((uint32_t *)voc_pkt) = buf_node->frame.pktlen;
+			voc_pkt = voc_pkt + sizeof(uint32_t);
+			memcpy(voc_pkt,
+			       &buf_node->frame.voc_pkt[0],
+			       buf_node->frame.pktlen);
+			list_add_tail(&buf_node->list, &prtd->free_in_queue);
+		}
+		}
+		pr_debug("%s: frame.pktlen=%d\n", __func__,
+			 buf_node->frame.pktlen);
+
+		if (prtd->mode == MODE_PCM)
+			prtd->pcm_playback_irq_pos += buf_node->frame.pktlen;
+		else
+			prtd->pcm_playback_irq_pos += prtd->pcm_count;
+
+		spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
+		snd_pcm_period_elapsed(prtd->playback_substream);
+	} else {
+		*((uint32_t *)voc_pkt) = 0;
+		spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
+		pr_err_ratelimited("DL data not available\n");
+	}
+	wake_up(&prtd->in_wait);
+}
+
+static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
+	.count = ARRAY_SIZE(supported_sample_rates),
+	.list = supported_sample_rates,
+	.mask = 0,
+};
+
+static int msm_pcm_playback_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct voip_drv_info *prtd = runtime->private_data;
+
+	prtd->play_samp_rate = runtime->rate;
+	prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream);
+	prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
+	prtd->pcm_playback_irq_pos = 0;
+	prtd->pcm_playback_buf_pos = 0;
+	prtd->playback_prepare = 1;
+
+	return 0;
+}
+
+static int msm_pcm_capture_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct voip_drv_info *prtd = runtime->private_data;
+	int ret = 0;
+
+	prtd->cap_samp_rate = runtime->rate;
+	prtd->pcm_capture_size  = snd_pcm_lib_buffer_bytes(substream);
+	prtd->pcm_capture_count = snd_pcm_lib_period_bytes(substream);
+	prtd->pcm_capture_irq_pos = 0;
+	prtd->pcm_capture_buf_pos = 0;
+	prtd->capture_prepare = 1;
+	return ret;
+}
+
+static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+	int ret = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct voip_drv_info *prtd = runtime->private_data;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		pr_debug("%s: Trigger start\n", __func__);
+		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+			prtd->capture_start = 1;
+		else
+			prtd->playback_start = 1;
+		break;
+	case SNDRV_PCM_TRIGGER_STOP:
+		pr_debug("SNDRV_PCM_TRIGGER_STOP\n");
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+			prtd->playback_start = 0;
+		else
+			prtd->capture_start = 0;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int msm_pcm_open(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct voip_drv_info *prtd = &voip_info;
+	int ret = 0;
+
+	pr_debug("%s, VoIP\n", __func__);
+	mutex_lock(&prtd->lock);
+
+	runtime->hw = msm_pcm_hardware;
+
+	ret = snd_pcm_hw_constraint_list(runtime, 0,
+					SNDRV_PCM_HW_PARAM_RATE,
+					&constraints_sample_rates);
+	if (ret < 0)
+		pr_debug("snd_pcm_hw_constraint_list failed\n");
+
+	ret = snd_pcm_hw_constraint_integer(runtime,
+					SNDRV_PCM_HW_PARAM_PERIODS);
+	if (ret < 0) {
+		pr_debug("snd_pcm_hw_constraint_integer failed\n");
+		goto err;
+	}
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		prtd->playback_substream = substream;
+	} else {
+		prtd->capture_substream = substream;
+	}
+	runtime->private_data = prtd;
+err:
+	mutex_unlock(&prtd->lock);
+
+	return ret;
+}
+
+static int msm_pcm_playback_copy(struct snd_pcm_substream *substream, int a,
+	snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames)
+{
+	int ret = 0;
+	struct voip_buf_node *buf_node = NULL;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct voip_drv_info *prtd = runtime->private_data;
+	unsigned long dsp_flags;
+
+	int count = frames_to_bytes(runtime, frames);
+	pr_debug("%s: count = %d, frames=%d\n", __func__, count, (int)frames);
+
+	if (prtd->voip_reset) {
+		pr_debug("%s: RESET event happened during VoIP\n", __func__);
+		return -ENETRESET;
+	}
+
+	ret = wait_event_interruptible_timeout(prtd->in_wait,
+				(!list_empty(&prtd->free_in_queue) ||
+				prtd->state == VOIP_STOPPED),
+				1 * HZ);
+	if (prtd->voip_reset) {
+		pr_debug("%s: RESET event happened during VoIP\n", __func__);
+		return -ENETRESET;
+	}
+
+	if (ret > 0) {
+		if (count <= VOIP_MAX_VOC_PKT_SIZE) {
+			spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
+			buf_node =
+				list_first_entry(&prtd->free_in_queue,
+						struct voip_buf_node, list);
+			list_del(&buf_node->list);
+			spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
+			if (prtd->mode == MODE_PCM) {
+				ret = copy_from_user(&buf_node->frame.voc_pkt,
+							buf, count);
+				if (ret) {
+					pr_err("%s: copy from user failed %d\n",
+					       __func__, ret);
+					return -EFAULT;
+				}
+				buf_node->frame.pktlen = count;
+			} else {
+				ret = copy_from_user(&buf_node->frame,
+							buf, count);
+				if (ret) {
+					pr_err("%s: copy from user failed %d\n",
+					       __func__, ret);
+					return -EFAULT;
+				}
+				if (buf_node->frame.pktlen >= count)
+					buf_node->frame.pktlen = count -
+					(sizeof(buf_node->frame.frm_hdr) +
+					 sizeof(buf_node->frame.pktlen));
+			}
+			spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
+			list_add_tail(&buf_node->list, &prtd->in_queue);
+			spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
+		} else {
+			pr_err("%s: Write cnt %d is > VOIP_MAX_VOC_PKT_SIZE\n",
+				__func__, count);
+			ret = -ENOMEM;
+		}
+
+	} else if (ret == 0) {
+		pr_err("%s: No free DL buffs\n", __func__);
+		ret = -ETIMEDOUT;
+	} else {
+		pr_err("%s: playback copy was interrupted %d\n", __func__, ret);
+	}
+
+	return  ret;
+}
+static int msm_pcm_capture_copy(struct snd_pcm_substream *substream,
+		int channel, snd_pcm_uframes_t hwoff, void __user *buf,
+						snd_pcm_uframes_t frames)
+{
+	int ret = 0;
+	int count = 0;
+	struct voip_buf_node *buf_node = NULL;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct voip_drv_info *prtd = runtime->private_data;
+	unsigned long dsp_flags;
+	int size;
+
+	count = frames_to_bytes(runtime, frames);
+
+	pr_debug("%s: count = %d\n", __func__, count);
+
+	if (prtd->voip_reset) {
+		pr_debug("%s: RESET event happened during VoIP\n", __func__);
+		return -ENETRESET;
+	}
+
+	ret = wait_event_interruptible_timeout(prtd->out_wait,
+				(!list_empty(&prtd->out_queue) ||
+				prtd->state == VOIP_STOPPED),
+				1 * HZ);
+
+	if (prtd->voip_reset) {
+		pr_debug("%s: RESET event happened during VoIP\n", __func__);
+		return -ENETRESET;
+	}
+
+	if (ret > 0) {
+
+		if (count <= VOIP_MAX_VOC_PKT_SIZE) {
+			spin_lock_irqsave(&prtd->dsp_ul_lock, dsp_flags);
+			buf_node = list_first_entry(&prtd->out_queue,
+					struct voip_buf_node, list);
+			list_del(&buf_node->list);
+			spin_unlock_irqrestore(&prtd->dsp_ul_lock, dsp_flags);
+			if (prtd->mode == MODE_PCM) {
+				ret = copy_to_user(buf,
+						   &buf_node->frame.voc_pkt,
+						   buf_node->frame.pktlen);
+			} else {
+				size = sizeof(buf_node->frame.frm_hdr) +
+				       sizeof(buf_node->frame.pktlen) +
+				       buf_node->frame.pktlen;
+
+				ret = copy_to_user(buf,
+						   &buf_node->frame,
+						   size);
+			}
+			if (ret) {
+				pr_err("%s: Copy to user retuned %d\n",
+					__func__, ret);
+				ret = -EFAULT;
+			}
+			spin_lock_irqsave(&prtd->dsp_ul_lock, dsp_flags);
+			list_add_tail(&buf_node->list,
+						&prtd->free_out_queue);
+			spin_unlock_irqrestore(&prtd->dsp_ul_lock, dsp_flags);
+		} else {
+			pr_err("%s: Read count %d > VOIP_MAX_VOC_PKT_SIZE\n",
+				__func__, count);
+			ret = -ENOMEM;
+		}
+
+
+	} else if (ret == 0) {
+		pr_err_ratelimited("%s: No UL data available\n", __func__);
+		ret = -ETIMEDOUT;
+	} else {
+		pr_err("%s: Read was interrupted\n", __func__);
+		ret = -ERESTARTSYS;
+	}
+	return ret;
+}
+static int msm_pcm_copy(struct snd_pcm_substream *substream, int a,
+	 snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames)
+{
+	int ret = 0;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		ret = msm_pcm_playback_copy(substream, a, hwoff, buf, frames);
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		ret = msm_pcm_capture_copy(substream, a, hwoff, buf, frames);
+
+	return ret;
+}
+
+static int msm_pcm_close(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+	struct list_head *ptr = NULL;
+	struct list_head *next = NULL;
+	struct voip_buf_node *buf_node = NULL;
+	struct snd_dma_buffer *p_dma_buf, *c_dma_buf;
+	struct snd_pcm_substream *p_substream, *c_substream;
+	struct snd_pcm_runtime *runtime;
+	struct voip_drv_info *prtd;
+	unsigned long dsp_flags;
+
+	if (substream == NULL) {
+		pr_err("substream is NULL\n");
+		return -EINVAL;
+	}
+	runtime = substream->runtime;
+	prtd = runtime->private_data;
+
+	wake_up(&prtd->out_wait);
+
+	mutex_lock(&prtd->lock);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		prtd->playback_prepare = 0;
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		prtd->capture_prepare = 0;
+
+	if (!prtd->playback_prepare && !prtd->capture_prepare) {
+		if (prtd->state == VOIP_STARTED) {
+			prtd->voip_reset = false;
+			prtd->state = VOIP_STOPPED;
+			voc_end_voice_call(
+					voc_get_session_id(VOIP_SESSION_NAME));
+			voc_register_mvs_cb(NULL, NULL, NULL, prtd);
+		}
+		/* release all buffer */
+		/* release in_queue and free_in_queue */
+		pr_debug("release all buffer\n");
+		p_substream = prtd->playback_substream;
+		if (p_substream == NULL) {
+			pr_debug("p_substream is NULL\n");
+			goto capt;
+		}
+		p_dma_buf = &p_substream->dma_buffer;
+		if (p_dma_buf == NULL) {
+			pr_debug("p_dma_buf is NULL\n");
+			goto capt;
+		}
+		if (p_dma_buf->area != NULL) {
+			spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
+			list_for_each_safe(ptr, next, &prtd->in_queue) {
+				buf_node = list_entry(ptr,
+						struct voip_buf_node, list);
+				list_del(&buf_node->list);
+			}
+			list_for_each_safe(ptr, next, &prtd->free_in_queue) {
+				buf_node = list_entry(ptr,
+						struct voip_buf_node, list);
+				list_del(&buf_node->list);
+			}
+			spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
+			dma_free_coherent(p_substream->pcm->card->dev,
+				runtime->hw.buffer_bytes_max, p_dma_buf->area,
+				p_dma_buf->addr);
+			p_dma_buf->area = NULL;
+		}
+		/* release out_queue and free_out_queue */
+capt:		c_substream = prtd->capture_substream;
+		if (c_substream == NULL) {
+			pr_debug("c_substream is NULL\n");
+			goto done;
+		}
+		c_dma_buf = &c_substream->dma_buffer;
+		if (c_substream == NULL) {
+			pr_debug("c_dma_buf is NULL.\n");
+			goto done;
+		}
+		if (c_dma_buf->area != NULL) {
+			spin_lock_irqsave(&prtd->dsp_ul_lock, dsp_flags);
+			list_for_each_safe(ptr, next, &prtd->out_queue) {
+				buf_node = list_entry(ptr,
+						struct voip_buf_node, list);
+				list_del(&buf_node->list);
+			}
+			list_for_each_safe(ptr, next, &prtd->free_out_queue) {
+				buf_node = list_entry(ptr,
+						struct voip_buf_node, list);
+				list_del(&buf_node->list);
+			}
+			spin_unlock_irqrestore(&prtd->dsp_ul_lock, dsp_flags);
+			dma_free_coherent(c_substream->pcm->card->dev,
+				runtime->hw.buffer_bytes_max, c_dma_buf->area,
+				c_dma_buf->addr);
+			c_dma_buf->area = NULL;
+		}
+done:
+		prtd->capture_substream = NULL;
+		prtd->playback_substream = NULL;
+	}
+	mutex_unlock(&prtd->lock);
+
+	return ret;
+}
+
+static int voip_config_vocoder(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct voip_drv_info *prtd = runtime->private_data;
+	uint32_t media_type = 0;
+	uint32_t rate_type = 0;
+	uint32_t evrc_min_rate_type = 0;
+	uint32_t evrc_max_rate_type = 0;
+
+	pr_debug("%s(): mode=%d, playback rate=%d, capture rate=%d\n",
+		 __func__, prtd->mode, prtd->play_samp_rate,
+		 prtd->cap_samp_rate);
+
+	if ((runtime->format != FORMAT_S16_LE &&
+	     runtime->format != FORMAT_SPECIAL) &&
+	    ((prtd->mode == MODE_AMR) || (prtd->mode == MODE_AMR_WB) ||
+	    (prtd->mode == MODE_IS127) || (prtd->mode == MODE_4GV_NB) ||
+	    (prtd->mode == MODE_4GV_WB) || (prtd->mode == MODE_4GV_NW) ||
+	    (prtd->mode == MODE_G711) || (prtd->mode == MODE_G711A))) {
+		pr_err("%s(): mode:%d and format:%u are not matched\n",
+			__func__, prtd->mode, (uint32_t)runtime->format);
+
+		ret =  -EINVAL;
+		goto done;
+	}
+
+	if (runtime->format != FORMAT_S16_LE && (prtd->mode == MODE_PCM)) {
+		pr_err("%s(): mode:%d and format:%u are not matched\n",
+		       __func__, prtd->mode, runtime->format);
+
+		ret =  -EINVAL;
+		goto done;
+	}
+
+	if ((prtd->mode == MODE_PCM) ||
+	    (prtd->mode == MODE_AMR) ||
+	    (prtd->mode == MODE_AMR_WB) ||
+	    (prtd->mode == MODE_G711) ||
+	    (prtd->mode == MODE_G711A)) {
+		ret = voip_get_rate_type(prtd->mode,
+					 prtd->rate,
+					 &rate_type);
+		if (ret < 0) {
+			pr_err("%s(): fail at getting rate_type, ret=%d\n",
+				__func__, ret);
+
+			ret = -EINVAL;
+			goto done;
+		}
+		prtd->rate_type = rate_type;
+		pr_debug("rate_type=%d\n", rate_type);
+
+	} else if ((prtd->mode == MODE_IS127) ||
+		   (prtd->mode == MODE_4GV_NB) ||
+		   (prtd->mode == MODE_4GV_WB) ||
+		   (prtd->mode == MODE_4GV_NW)) {
+		ret = voip_get_rate_type(prtd->mode,
+					 prtd->evrc_min_rate,
+					 &evrc_min_rate_type);
+		if (ret < 0) {
+			pr_err("%s(): fail at getting min rate, ret=%d\n",
+				__func__, ret);
+
+			ret = -EINVAL;
+			goto done;
+		}
+		if (evrc_min_rate_type == VOC_0_RATE)
+			evrc_min_rate_type = VOC_8_RATE;
+
+		ret = voip_get_rate_type(prtd->mode,
+					 prtd->evrc_max_rate,
+					 &evrc_max_rate_type);
+		if (ret < 0) {
+			pr_err("%s(): fail at getting max rate, ret=%d\n",
+				__func__, ret);
+
+			ret = -EINVAL;
+			goto done;
+		}
+		if (evrc_max_rate_type == VOC_0_RATE)
+			evrc_max_rate_type = VOC_1_RATE;
+
+		if (evrc_max_rate_type < evrc_min_rate_type) {
+			pr_err("%s(): Invalid EVRC min max rates: %d, %d\n",
+				__func__, evrc_min_rate_type,
+				evrc_max_rate_type);
+
+			ret = -EINVAL;
+			goto done;
+		}
+		pr_debug("%s(): min rate=%d, max rate=%d\n",
+			  __func__, evrc_min_rate_type, evrc_max_rate_type);
+	}
+	ret = voip_get_media_type(prtd->mode,
+				  prtd->rate_type,
+				  prtd->play_samp_rate,
+				  &media_type);
+	if (ret < 0) {
+		pr_err("%s(): fail at getting media_type, ret=%d\n",
+		       __func__, ret);
+
+		ret = -EINVAL;
+		goto done;
+	}
+	pr_debug("%s(): media_type=%d\n", __func__, media_type);
+
+	if ((prtd->play_samp_rate == 8000 && prtd->cap_samp_rate == 8000) ||
+	    (prtd->play_samp_rate == 16000 && prtd->cap_samp_rate == 16000) ||
+	    (prtd->play_samp_rate == 32000 && prtd->cap_samp_rate == 32000) ||
+	    (prtd->play_samp_rate == 48000 && prtd->cap_samp_rate == 48000)) {
+		voc_config_vocoder(media_type, rate_type,
+				   VSS_NETWORK_ID_VOIP,
+				   voip_info.dtx_mode,
+				   evrc_min_rate_type,
+				   evrc_max_rate_type);
+	} else {
+		pr_debug("%s: Invalid rate playback %d, capture %d\n",
+			 __func__, prtd->play_samp_rate,
+			 prtd->cap_samp_rate);
+
+		ret = -EINVAL;
+	}
+done:
+
+	return ret;
+}
+
+static int msm_pcm_prepare(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct voip_drv_info *prtd = runtime->private_data;
+
+	mutex_lock(&prtd->lock);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		ret = msm_pcm_playback_prepare(substream);
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		ret = msm_pcm_capture_prepare(substream);
+
+	if (prtd->playback_prepare && prtd->capture_prepare
+	    && (prtd->state != VOIP_STARTED)) {
+		ret = voip_config_vocoder(substream);
+		if (ret < 0) {
+			pr_err("%s(): fail at configuring vocoder for voip, ret=%d\n",
+				__func__, ret);
+
+			goto done;
+		}
+
+		/* Initialaizing cb variables */
+		voc_register_mvs_cb(voip_process_ul_pkt,
+				    voip_process_dl_pkt,
+				    voip_ssr_cb_fn, prtd);
+
+		ret = voc_start_voice_call(
+				voc_get_session_id(VOIP_SESSION_NAME));
+
+		if (ret < 0) {
+			pr_err("%s: voc_start_voice_call() failed err %d",
+			       __func__, ret);
+
+			goto done;
+		}
+		prtd->state = VOIP_STARTED;
+	}
+done:
+	mutex_unlock(&prtd->lock);
+
+	return ret;
+}
+
+static snd_pcm_uframes_t
+msm_pcm_playback_pointer(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct voip_drv_info *prtd = runtime->private_data;
+
+	pr_debug("%s\n", __func__);
+	if (prtd->pcm_playback_irq_pos >= prtd->pcm_size)
+		prtd->pcm_playback_irq_pos = 0;
+	return bytes_to_frames(runtime, (prtd->pcm_playback_irq_pos));
+}
+
+static snd_pcm_uframes_t
+msm_pcm_capture_pointer(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct voip_drv_info *prtd = runtime->private_data;
+
+	if (prtd->pcm_capture_irq_pos >= prtd->pcm_capture_size)
+		prtd->pcm_capture_irq_pos = 0;
+	return bytes_to_frames(runtime, (prtd->pcm_capture_irq_pos));
+}
+
+static snd_pcm_uframes_t msm_pcm_pointer(struct snd_pcm_substream *substream)
+{
+	snd_pcm_uframes_t ret = 0;
+	pr_debug("%s\n", __func__);
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		ret = msm_pcm_playback_pointer(substream);
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		ret = msm_pcm_capture_pointer(substream);
+	return ret;
+}
+
+static int msm_pcm_mmap(struct snd_pcm_substream *substream,
+			struct vm_area_struct *vma)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+
+	pr_debug("%s\n", __func__);
+	dma_mmap_coherent(substream->pcm->card->dev, vma,
+				     runtime->dma_area,
+				     runtime->dma_addr,
+				     runtime->dma_bytes);
+	return 0;
+}
+
+static int msm_pcm_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_dma_buffer *dma_buf = &substream->dma_buffer;
+	struct voip_buf_node *buf_node = NULL;
+	int i = 0, offset = 0;
+
+	pr_debug("%s: voip\n", __func__);
+
+	mutex_lock(&voip_info.lock);
+
+	dma_buf->dev.type = SNDRV_DMA_TYPE_DEV;
+	dma_buf->dev.dev = substream->pcm->card->dev;
+	dma_buf->private_data = NULL;
+
+	dma_buf->area = dma_alloc_coherent(substream->pcm->card->dev,
+			runtime->hw.buffer_bytes_max,
+			&dma_buf->addr, GFP_KERNEL);
+	if (!dma_buf->area) {
+		pr_err("%s:MSM VOIP dma_alloc failed\n", __func__);
+		mutex_unlock(&voip_info.lock);
+		return -ENOMEM;
+	}
+
+	dma_buf->bytes = runtime->hw.buffer_bytes_max;
+	memset(dma_buf->area, 0, runtime->hw.buffer_bytes_max);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		for (i = 0; i < VOIP_MAX_Q_LEN; i++) {
+			buf_node = (void *)dma_buf->area + offset;
+
+			list_add_tail(&buf_node->list,
+					&voip_info.free_in_queue);
+			offset = offset + sizeof(struct voip_buf_node);
+		}
+	} else {
+		for (i = 0; i < VOIP_MAX_Q_LEN; i++) {
+			buf_node = (void *) dma_buf->area + offset;
+			list_add_tail(&buf_node->list,
+					&voip_info.free_out_queue);
+			offset = offset + sizeof(struct voip_buf_node);
+		}
+	}
+
+	mutex_unlock(&voip_info.lock);
+
+	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+
+	return 0;
+}
+
+static int msm_voip_mode_config_get(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_value *ucontrol)
+{
+	mutex_lock(&voip_info.lock);
+
+	ucontrol->value.integer.value[0] = voip_info.mode;
+
+	mutex_unlock(&voip_info.lock);
+
+	return 0;
+}
+
+static int msm_voip_mode_config_put(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_value *ucontrol)
+{
+	mutex_lock(&voip_info.lock);
+
+	voip_info.mode = ucontrol->value.integer.value[0];
+
+	pr_debug("%s: mode=%d\n", __func__, voip_info.mode);
+
+	mutex_unlock(&voip_info.lock);
+
+	return 0;
+}
+
+static int msm_voip_rate_config_put(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = 0;
+	int rate = ucontrol->value.integer.value[0];
+
+	mutex_lock(&voip_info.lock);
+
+	if (voip_info.rate != rate) {
+		voip_info.rate = rate;
+		pr_debug("%s: rate=%d\n", __func__, voip_info.rate);
+
+		if (voip_info.state == VOIP_STARTED &&
+		   (voip_info.mode == MODE_AMR ||
+		    voip_info.mode == MODE_AMR_WB)) {
+			ret = voip_config_vocoder(
+					voip_info.capture_substream);
+			if (ret) {
+				pr_err("%s:Failed to configure vocoder, ret=%d\n",
+					__func__, ret);
+
+				goto done;
+			}
+
+			ret = voc_update_amr_vocoder_rate(
+					voc_get_session_id(VOIP_SESSION_NAME));
+			if (ret) {
+				pr_err("%s:Failed to update AMR rate, ret=%d\n",
+					__func__, ret);
+			}
+		}
+	}
+
+done:
+	mutex_unlock(&voip_info.lock);
+
+	return ret;
+}
+
+static int msm_voip_evrc_min_max_rate_config_get(struct snd_kcontrol *kcontrol,
+					 struct snd_ctl_elem_value *ucontrol)
+{
+	mutex_lock(&voip_info.lock);
+
+	ucontrol->value.integer.value[0] = voip_info.evrc_min_rate;
+	ucontrol->value.integer.value[1] = voip_info.evrc_max_rate;
+
+	mutex_unlock(&voip_info.lock);
+
+	return 0;
+}
+
+static int msm_voip_evrc_min_max_rate_config_put(struct snd_kcontrol *kcontrol,
+					 struct snd_ctl_elem_value *ucontrol)
+{
+	mutex_lock(&voip_info.lock);
+
+	voip_info.evrc_min_rate = ucontrol->value.integer.value[0];
+	voip_info.evrc_max_rate = ucontrol->value.integer.value[1];
+
+	pr_debug("%s(): evrc_min_rate=%d,evrc_max_rate=%d\n", __func__,
+		  voip_info.evrc_min_rate, voip_info.evrc_max_rate);
+
+	mutex_unlock(&voip_info.lock);
+
+	return 0;
+}
+
+static int voip_get_rate_type(uint32_t mode, uint32_t rate,
+				 uint32_t *rate_type)
+{
+	int ret = 0;
+
+	switch (mode) {
+	case MODE_AMR: {
+		switch (rate) {
+		case 4750:
+			*rate_type = AMR_RATE_4750;
+			break;
+		case 5150:
+			*rate_type = AMR_RATE_5150;
+			break;
+		case 5900:
+			*rate_type = AMR_RATE_5900;
+			break;
+		case 6700:
+			*rate_type = AMR_RATE_6700;
+			break;
+		case 7400:
+			*rate_type = AMR_RATE_7400;
+			break;
+		case 7950:
+			*rate_type = AMR_RATE_7950;
+			break;
+		case 10200:
+			*rate_type = AMR_RATE_10200;
+			break;
+		case 12200:
+			*rate_type = AMR_RATE_12200;
+			break;
+		default:
+			pr_err("wrong rate for AMR NB.\n");
+			ret = -EINVAL;
+			break;
+		}
+		break;
+	}
+	case MODE_AMR_WB: {
+		switch (rate) {
+		case 6600:
+			*rate_type = AMR_RATE_6600 - AMR_RATE_6600;
+			break;
+		case 8850:
+			*rate_type = AMR_RATE_8850 - AMR_RATE_6600;
+			break;
+		case 12650:
+			*rate_type = AMR_RATE_12650 - AMR_RATE_6600;
+			break;
+		case 14250:
+			*rate_type = AMR_RATE_14250 - AMR_RATE_6600;
+			break;
+		case 15850:
+			*rate_type = AMR_RATE_15850 - AMR_RATE_6600;
+			break;
+		case 18250:
+			*rate_type = AMR_RATE_18250 - AMR_RATE_6600;
+			break;
+		case 19850:
+			*rate_type = AMR_RATE_19850 - AMR_RATE_6600;
+			break;
+		case 23050:
+			*rate_type = AMR_RATE_23050 - AMR_RATE_6600;
+			break;
+		case 23850:
+			*rate_type = AMR_RATE_23850 - AMR_RATE_6600;
+			break;
+		default:
+			pr_err("wrong rate for AMR_WB.\n");
+			ret = -EINVAL;
+			break;
+		}
+		break;
+	}
+	case MODE_PCM: {
+		*rate_type = 0;
+		break;
+	}
+	case MODE_IS127:
+	case MODE_4GV_NB:
+	case MODE_4GV_WB: {
+		switch (rate) {
+		case VOC_0_RATE:
+		case VOC_8_RATE:
+		case VOC_4_RATE:
+		case VOC_2_RATE:
+		case VOC_1_RATE:
+			*rate_type = rate;
+			break;
+		default:
+			pr_err("wrong rate for IS127/4GV_NB/WB.\n");
+			ret = -EINVAL;
+			break;
+		}
+		break;
+	}
+	case MODE_4GV_NW: {
+		switch (rate) {
+		case VOC_0_RATE:
+		case VOC_8_RATE:
+		case VOC_4_RATE:
+		case VOC_2_RATE:
+		case VOC_1_RATE:
+		case VOC_8_RATE_NC:
+			*rate_type = rate;
+			break;
+		default:
+			pr_err("wrong rate for 4GV_NW.\n");
+			ret = -EINVAL;
+			break;
+		}
+		break;
+	}
+	case MODE_G711:
+	case MODE_G711A:
+		*rate_type = rate;
+		break;
+	default:
+		pr_err("wrong mode type.\n");
+		ret = -EINVAL;
+	}
+	pr_debug("%s, mode=%d, rate=%u, rate_type=%d\n",
+		__func__, mode, rate, *rate_type);
+	return ret;
+}
+
+static int voip_get_media_type(uint32_t mode, uint32_t rate_type,
+			       unsigned int samp_rate,
+			       unsigned int *media_type)
+{
+	int ret = 0;
+
+	pr_debug("%s: mode=%d, samp_rate=%d\n", __func__,
+		mode, samp_rate);
+	switch (mode) {
+	case MODE_AMR:
+		*media_type = VSS_MEDIA_ID_AMR_NB_MODEM;
+		break;
+	case MODE_AMR_WB:
+		*media_type = VSS_MEDIA_ID_AMR_WB_MODEM;
+		break;
+	case MODE_PCM:
+		if (samp_rate == 8000)
+			*media_type = VSS_MEDIA_ID_PCM_8_KHZ;
+		else if (samp_rate == 16000)
+			*media_type = VSS_MEDIA_ID_PCM_16_KHZ;
+		else if (samp_rate == 32000)
+			*media_type = VSS_MEDIA_ID_PCM_32_KHZ;
+		else
+			*media_type = VSS_MEDIA_ID_PCM_48_KHZ;
+		break;
+	case MODE_IS127: /* EVRC-A */
+		*media_type = VSS_MEDIA_ID_EVRC_MODEM;
+		break;
+	case MODE_4GV_NB: /* EVRC-B */
+		*media_type = VSS_MEDIA_ID_4GV_NB_MODEM;
+		break;
+	case MODE_4GV_WB: /* EVRC-WB */
+		*media_type = VSS_MEDIA_ID_4GV_WB_MODEM;
+		break;
+	case MODE_4GV_NW: /* EVRC-NW */
+		*media_type = VSS_MEDIA_ID_4GV_NW_MODEM;
+		break;
+	case MODE_G711:
+	case MODE_G711A:
+		if (rate_type == MVS_G711A_MODE_MULAW)
+			*media_type = VSS_MEDIA_ID_G711_MULAW;
+		else
+			*media_type = VSS_MEDIA_ID_G711_ALAW;
+		break;
+	default:
+		pr_debug(" input mode is not supported\n");
+		ret = -EINVAL;
+	}
+
+	pr_debug("%s: media_type is 0x%x\n", __func__, *media_type);
+
+	return ret;
+}
+
+
+static struct snd_pcm_ops msm_pcm_ops = {
+	.open           = msm_pcm_open,
+	.copy		= msm_pcm_copy,
+	.hw_params	= msm_pcm_hw_params,
+	.close          = msm_pcm_close,
+	.prepare        = msm_pcm_prepare,
+	.trigger        = msm_pcm_trigger,
+	.pointer        = msm_pcm_pointer,
+	.mmap		= msm_pcm_mmap,
+};
+
+static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_card *card = rtd->card->snd_card;
+	int ret = 0;
+
+	pr_debug("msm_asoc_pcm_new\n");
+	if (!card->dev->coherent_dma_mask)
+		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	return ret;
+}
+
+static struct snd_soc_platform_driver msm_soc_platform = {
+	.ops		= &msm_pcm_ops,
+	.pcm_new	= msm_asoc_pcm_new,
+	.probe		= msm_pcm_voip_probe,
+};
+
+static int msm_pcm_probe(struct platform_device *pdev)
+{
+	int rc;
+
+	if (!is_voc_initialized()) {
+		pr_debug("%s: voice module not initialized yet, deferring probe()\n",
+		       __func__);
+
+		rc = -EPROBE_DEFER;
+		goto done;
+	}
+
+	rc = voc_alloc_cal_shared_memory();
+	if (rc == -EPROBE_DEFER) {
+		pr_debug("%s: memory allocation for calibration deferred %d\n",
+			 __func__, rc);
+
+		goto done;
+	} else if (rc < 0) {
+		pr_err("%s: memory allocation for calibration failed %d\n",
+		       __func__, rc);
+	}
+
+	rc = voc_alloc_voip_shared_memory();
+	if (rc < 0) {
+		pr_err("%s: error allocating shared mem err %d\n",
+		       __func__, rc);
+	}
+
+
+	pr_debug("%s: dev name %s\n", __func__, dev_name(&pdev->dev));
+	rc = snd_soc_register_platform(&pdev->dev,
+				       &msm_soc_platform);
+
+done:
+	return rc;
+}
+
+static int msm_pcm_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_platform(&pdev->dev);
+	return 0;
+}
+
+static const struct of_device_id msm_voip_dt_match[] = {
+	{.compatible = "qcom,msm-voip-dsp"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, msm_voip_dt_match);
+
+static struct platform_driver msm_pcm_driver = {
+	.driver = {
+		.name = "msm-voip-dsp",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_voip_dt_match,
+	},
+	.probe = msm_pcm_probe,
+	.remove = msm_pcm_remove,
+};
+
+static int __init msm_soc_platform_init(void)
+{
+	memset(&voip_info, 0, sizeof(voip_info));
+	voip_info.mode = MODE_PCM;
+	mutex_init(&voip_info.lock);
+
+	spin_lock_init(&voip_info.dsp_lock);
+	spin_lock_init(&voip_info.dsp_ul_lock);
+
+	init_waitqueue_head(&voip_info.out_wait);
+	init_waitqueue_head(&voip_info.in_wait);
+
+	INIT_LIST_HEAD(&voip_info.in_queue);
+	INIT_LIST_HEAD(&voip_info.free_in_queue);
+	INIT_LIST_HEAD(&voip_info.out_queue);
+	INIT_LIST_HEAD(&voip_info.free_out_queue);
+
+	return platform_driver_register(&msm_pcm_driver);
+}
+module_init(msm_soc_platform_init);
+
+static void __exit msm_soc_platform_exit(void)
+{
+	platform_driver_unregister(&msm_pcm_driver);
+}
+module_exit(msm_soc_platform_exit);
+
+MODULE_DESCRIPTION("PCM module platform driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-qti-pp-config.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-qti-pp-config.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c	2019-10-29 09:26:26.157227780 +0100
@@ -0,0 +1,1401 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/mutex.h>
+#include <sound/control.h>
+#include <sound/q6adm-v2.h>
+#include <sound/q6asm-v2.h>
+#include <sound/q6afe-v2.h>
+#include <sound/asound.h>
+#include <sound/q6audio-v2.h>
+#include <sound/tlv.h>
+
+#include "msm-qti-pp-config.h"
+#include "msm-pcm-routing-v2.h"
+
+/* EQUALIZER */
+/* Equal to Frontend after last of the MULTIMEDIA SESSIONS */
+#define MAX_EQ_SESSIONS		MSM_FRONTEND_DAI_CS_VOICE
+
+enum {
+	EQ_BAND1 = 0,
+	EQ_BAND2,
+	EQ_BAND3,
+	EQ_BAND4,
+	EQ_BAND5,
+	EQ_BAND6,
+	EQ_BAND7,
+	EQ_BAND8,
+	EQ_BAND9,
+	EQ_BAND10,
+	EQ_BAND11,
+	EQ_BAND12,
+	EQ_BAND_MAX,
+};
+
+/* Audio Sphere data structures */
+struct msm_audio_pp_asphere_state_s {
+	uint32_t enabled;
+	uint32_t strength;
+	uint32_t mode;
+	uint32_t version;
+	int  port_id[AFE_MAX_PORTS];
+	int  copp_idx[AFE_MAX_PORTS];
+	bool  initialized;
+	uint32_t enabled_prev;
+	uint32_t strength_prev;
+};
+
+static struct msm_audio_pp_asphere_state_s asphere_state;
+
+struct msm_audio_eq_stream_config	eq_data[MAX_EQ_SESSIONS];
+
+static int msm_route_hfp_vol_control;
+static const DECLARE_TLV_DB_LINEAR(hfp_rx_vol_gain, 0,
+				INT_RX_VOL_MAX_STEPS);
+
+static int msm_route_icc_vol_control;
+static const DECLARE_TLV_DB_LINEAR(icc_rx_vol_gain, 0,
+				INT_RX_VOL_MAX_STEPS);
+
+static int msm_route_pri_auxpcm_lb_vol_ctrl;
+static const DECLARE_TLV_DB_LINEAR(pri_auxpcm_lb_vol_gain, 0,
+				INT_RX_VOL_MAX_STEPS);
+
+static int msm_route_sec_auxpcm_lb_vol_ctrl;
+static const DECLARE_TLV_DB_LINEAR(sec_auxpcm_lb_vol_gain, 0,
+				INT_RX_VOL_MAX_STEPS);
+
+static int msm_multichannel_ec_primary_mic_ch;
+
+static void msm_qti_pp_send_eq_values_(int eq_idx)
+{
+	int result;
+	struct msm_pcm_routing_fdai_data fe_dai;
+	struct audio_client *ac = NULL;
+
+	msm_pcm_routing_get_fedai_info(eq_idx, SESSION_TYPE_RX, &fe_dai);
+	ac = q6asm_get_audio_client(fe_dai.strm_id);
+
+	if (ac == NULL) {
+		pr_err("%s: Could not get audio client for session: %d\n",
+		      __func__, fe_dai.strm_id);
+		goto done;
+	}
+
+	result = q6asm_equalizer(ac, &eq_data[eq_idx]);
+
+	if (result < 0)
+		pr_err("%s: Call to ASM equalizer failed, returned = %d\n",
+		      __func__, result);
+done:
+	return;
+}
+
+static int msm_qti_pp_get_eq_enable_mixer(struct snd_kcontrol *kcontrol,
+					  struct snd_ctl_elem_value *ucontrol)
+{
+	int eq_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->reg;
+
+	if ((eq_idx < 0) || (eq_idx >= MAX_EQ_SESSIONS))
+		return -EINVAL;
+
+	ucontrol->value.integer.value[0] = eq_data[eq_idx].enable;
+
+	pr_debug("%s: EQ #%d enable %d\n", __func__,
+		eq_idx, eq_data[eq_idx].enable);
+	return 0;
+}
+
+static int msm_qti_pp_put_eq_enable_mixer(struct snd_kcontrol *kcontrol,
+					  struct snd_ctl_elem_value *ucontrol)
+{
+	int eq_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->reg;
+	int value = ucontrol->value.integer.value[0];
+
+	if ((eq_idx < 0) || (eq_idx >= MAX_EQ_SESSIONS))
+		return -EINVAL;
+	pr_debug("%s: EQ #%d enable %d\n", __func__,
+		eq_idx, value);
+	eq_data[eq_idx].enable = value;
+	msm_pcm_routing_acquire_lock();
+	msm_qti_pp_send_eq_values_(eq_idx);
+	msm_pcm_routing_release_lock();
+	return 0;
+}
+
+static int msm_qti_pp_get_eq_band_count_audio_mixer(
+					struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	int eq_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->reg;
+
+	if ((eq_idx < 0) || (eq_idx >= MAX_EQ_SESSIONS))
+		return -EINVAL;
+	ucontrol->value.integer.value[0] = eq_data[eq_idx].num_bands;
+
+	pr_debug("%s: EQ #%d bands %d\n", __func__,
+		eq_idx, eq_data[eq_idx].num_bands);
+	return eq_data[eq_idx].num_bands;
+}
+
+static int msm_qti_pp_put_eq_band_count_audio_mixer(
+					struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	int eq_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->reg;
+	int value = ucontrol->value.integer.value[0];
+
+	if ((eq_idx < 0) || (eq_idx >= MAX_EQ_SESSIONS))
+		return -EINVAL;
+
+	pr_debug("%s: EQ #%d bands %d\n", __func__,
+		eq_idx, value);
+	eq_data[eq_idx].num_bands = value;
+	return 0;
+}
+
+static int msm_qti_pp_get_eq_band_audio_mixer(struct snd_kcontrol *kcontrol,
+					    struct snd_ctl_elem_value *ucontrol)
+{
+	int eq_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->reg;
+	int band_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->shift;
+
+	if ((eq_idx < 0) || (eq_idx >= MAX_EQ_SESSIONS) ||
+	    (band_idx < EQ_BAND1) || (band_idx >= EQ_BAND_MAX))
+		return -EINVAL;
+
+	ucontrol->value.integer.value[0] =
+			eq_data[eq_idx].eq_bands[band_idx].band_idx;
+	ucontrol->value.integer.value[1] =
+			eq_data[eq_idx].eq_bands[band_idx].filter_type;
+	ucontrol->value.integer.value[2] =
+			eq_data[eq_idx].eq_bands[band_idx].center_freq_hz;
+	ucontrol->value.integer.value[3] =
+			eq_data[eq_idx].eq_bands[band_idx].filter_gain;
+	ucontrol->value.integer.value[4] =
+			eq_data[eq_idx].eq_bands[band_idx].q_factor;
+
+	pr_debug("%s: band_idx = %d\n", __func__,
+			eq_data[eq_idx].eq_bands[band_idx].band_idx);
+	pr_debug("%s: filter_type = %d\n", __func__,
+			eq_data[eq_idx].eq_bands[band_idx].filter_type);
+	pr_debug("%s: center_freq_hz = %d\n", __func__,
+			eq_data[eq_idx].eq_bands[band_idx].center_freq_hz);
+	pr_debug("%s: filter_gain = %d\n", __func__,
+			eq_data[eq_idx].eq_bands[band_idx].filter_gain);
+	pr_debug("%s: q_factor = %d\n", __func__,
+			eq_data[eq_idx].eq_bands[band_idx].q_factor);
+	return 0;
+}
+
+static int msm_qti_pp_put_eq_band_audio_mixer(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	int eq_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->reg;
+	int band_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->shift;
+
+	if ((eq_idx < 0) || (eq_idx >= MAX_EQ_SESSIONS) ||
+	    (band_idx < EQ_BAND1) || (band_idx >= EQ_BAND_MAX))
+		return -EINVAL;
+
+	eq_data[eq_idx].eq_bands[band_idx].band_idx =
+					ucontrol->value.integer.value[0];
+	eq_data[eq_idx].eq_bands[band_idx].filter_type =
+					ucontrol->value.integer.value[1];
+	eq_data[eq_idx].eq_bands[band_idx].center_freq_hz =
+					ucontrol->value.integer.value[2];
+	eq_data[eq_idx].eq_bands[band_idx].filter_gain =
+					ucontrol->value.integer.value[3];
+	eq_data[eq_idx].eq_bands[band_idx].q_factor =
+					ucontrol->value.integer.value[4];
+	return 0;
+}
+
+#ifdef CONFIG_QTI_PP
+void msm_qti_pp_send_eq_values(int fedai_id)
+{
+	if (eq_data[fedai_id].enable)
+		msm_qti_pp_send_eq_values_(fedai_id);
+}
+
+/* CUSTOM MIXING */
+int msm_qti_pp_send_stereo_to_custom_stereo_cmd(int port_id, int copp_idx,
+						unsigned int session_id,
+						uint16_t op_FL_ip_FL_weight,
+						uint16_t op_FL_ip_FR_weight,
+						uint16_t op_FR_ip_FL_weight,
+						uint16_t op_FR_ip_FR_weight)
+{
+	char *params_value;
+	int *update_params_value32, rc = 0;
+	int16_t *update_params_value16 = 0;
+	uint32_t params_length = CUSTOM_STEREO_PAYLOAD_SIZE * sizeof(uint32_t);
+	uint32_t avail_length = params_length;
+	pr_debug("%s: port_id - %d, session id - %d\n", __func__, port_id,
+		 session_id);
+	params_value = kzalloc(params_length, GFP_KERNEL);
+	if (!params_value) {
+		pr_err("%s, params memory alloc failed\n", __func__);
+		return -ENOMEM;
+	}
+	update_params_value32 = (int *)params_value;
+	if (avail_length < 2 * sizeof(uint32_t))
+		goto skip_send_cmd;
+	*update_params_value32++ = MTMX_MODULE_ID_DEFAULT_CHMIXER;
+	*update_params_value32++ = DEFAULT_CHMIXER_PARAM_ID_COEFF;
+	avail_length = avail_length - (2 * sizeof(uint32_t));
+
+	update_params_value16 = (int16_t *)update_params_value32;
+	if (avail_length < 10 * sizeof(uint16_t))
+		goto skip_send_cmd;
+	*update_params_value16++ = CUSTOM_STEREO_CMD_PARAM_SIZE;
+	/*for alignment only*/
+	*update_params_value16++ = 0;
+	/*index is 32-bit param in little endian*/
+	*update_params_value16++ = CUSTOM_STEREO_INDEX_PARAM;
+	*update_params_value16++ = 0;
+	/*for stereo mixing num out ch*/
+	*update_params_value16++ = CUSTOM_STEREO_NUM_OUT_CH;
+	/*for stereo mixing num in ch*/
+	*update_params_value16++ = CUSTOM_STEREO_NUM_IN_CH;
+
+	/* Out ch map FL/FR*/
+	*update_params_value16++ = PCM_CHANNEL_FL;
+	*update_params_value16++ = PCM_CHANNEL_FR;
+
+	/* In ch map FL/FR*/
+	*update_params_value16++ = PCM_CHANNEL_FL;
+	*update_params_value16++ = PCM_CHANNEL_FR;
+	avail_length = avail_length - (10 * sizeof(uint16_t));
+	/* weighting coefficients as name suggests,
+	mixing will be done according to these coefficients*/
+	if (avail_length < 4 * sizeof(uint16_t))
+		goto skip_send_cmd;
+	*update_params_value16++ = op_FL_ip_FL_weight;
+	*update_params_value16++ = op_FL_ip_FR_weight;
+	*update_params_value16++ = op_FR_ip_FL_weight;
+	*update_params_value16++ = op_FR_ip_FR_weight;
+	avail_length = avail_length - (4 * sizeof(uint16_t));
+	if (params_length) {
+		rc = adm_set_pspd_matrix_params(port_id,
+						copp_idx,
+						session_id,
+						params_value,
+						params_length);
+		if (rc) {
+			pr_err("%s: send params failed rc=%d\n", __func__, rc);
+			kfree(params_value);
+			return -EINVAL;
+		}
+	}
+	kfree(params_value);
+	return 0;
+skip_send_cmd:
+		pr_err("%s: insufficient memory, send cmd failed\n",
+			__func__);
+		kfree(params_value);
+		return -ENOMEM;
+}
+#endif /* CONFIG_QTI_PP */
+
+/* RMS */
+static int msm_qti_pp_get_rms_value_control(struct snd_kcontrol *kcontrol,
+					    struct snd_ctl_elem_value *ucontrol)
+{
+	int rc = 0;
+	int be_idx = 0, copp_idx;
+	char *param_value;
+	int *update_param_value;
+	uint32_t param_length = sizeof(uint32_t);
+	uint32_t param_payload_len = RMS_PAYLOAD_LEN * sizeof(uint32_t);
+	struct msm_pcm_routing_bdai_data msm_bedai;
+	param_value = kzalloc(param_length + param_payload_len, GFP_KERNEL);
+	if (!param_value)
+		return -ENOMEM;
+	msm_pcm_routing_acquire_lock();
+	for (be_idx = 0; be_idx < MSM_BACKEND_DAI_MAX; be_idx++) {
+		msm_pcm_routing_get_bedai_info(be_idx, &msm_bedai);
+		if (msm_bedai.port_id == SLIMBUS_0_TX)
+			break;
+	}
+	if ((be_idx >= MSM_BACKEND_DAI_MAX) || !msm_bedai.active) {
+		pr_debug("%s, back not active to query rms be_idx:%d\n",
+			 __func__, be_idx);
+		rc = -EINVAL;
+		goto get_rms_value_err;
+	}
+	copp_idx = adm_get_default_copp_idx(SLIMBUS_0_TX);
+	if ((copp_idx < 0) || (copp_idx > MAX_COPPS_PER_PORT)) {
+		pr_debug("%s, no active copp to query rms copp_idx:%d\n",
+			 __func__ , copp_idx);
+		rc = -EINVAL;
+		goto get_rms_value_err;
+	}
+	rc = adm_get_params(SLIMBUS_0_TX, copp_idx,
+			RMS_MODULEID_APPI_PASSTHRU,
+			RMS_PARAM_FIRST_SAMPLE,
+			param_length + param_payload_len,
+			param_value);
+	if (rc) {
+		pr_err("%s: get parameters failed rc=%d\n", __func__, rc);
+		rc = -EINVAL;
+		goto get_rms_value_err;
+	}
+	update_param_value = (int *)param_value;
+	ucontrol->value.integer.value[0] = update_param_value[0];
+
+	pr_debug("%s: FROM DSP value[0] 0x%x\n",
+		__func__, update_param_value[0]);
+get_rms_value_err:
+	msm_pcm_routing_release_lock();
+	kfree(param_value);
+	return rc;
+}
+
+static int msm_qti_pp_put_rms_value_control(struct snd_kcontrol *kcontrol,
+					    struct snd_ctl_elem_value *ucontrol)
+{
+	/* not used */
+	return 0;
+}
+
+/* VOLUME */
+static int msm_route_fm_vol_control;
+static int msm_afe_lb_vol_ctrl;
+static int msm_afe_sec_mi2s_lb_vol_ctrl;
+static int msm_afe_tert_mi2s_lb_vol_ctrl;
+static int msm_afe_quat_mi2s_lb_vol_ctrl;
+static int msm_afe_slimbus_7_lb_vol_ctrl;
+static int msm_afe_slimbus_8_lb_vol_ctrl;
+static const DECLARE_TLV_DB_LINEAR(fm_rx_vol_gain, 0, INT_RX_VOL_MAX_STEPS);
+static const DECLARE_TLV_DB_LINEAR(afe_lb_vol_gain, 0, INT_RX_VOL_MAX_STEPS);
+
+static int msm_qti_pp_get_fm_vol_mixer(struct snd_kcontrol *kcontrol,
+				       struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = msm_route_fm_vol_control;
+	return 0;
+}
+
+static int msm_qti_pp_set_fm_vol_mixer(struct snd_kcontrol *kcontrol,
+			    struct snd_ctl_elem_value *ucontrol)
+{
+	afe_loopback_gain(INT_FM_TX , ucontrol->value.integer.value[0]);
+
+	msm_route_fm_vol_control = ucontrol->value.integer.value[0];
+
+	return 0;
+}
+
+static int msm_qti_pp_get_pri_mi2s_lb_vol_mixer(struct snd_kcontrol *kcontrol,
+				       struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = msm_afe_lb_vol_ctrl;
+	return 0;
+}
+
+static int msm_qti_pp_set_pri_mi2s_lb_vol_mixer(struct snd_kcontrol *kcontrol,
+			    struct snd_ctl_elem_value *ucontrol)
+{
+	afe_loopback_gain(AFE_PORT_ID_PRIMARY_MI2S_TX,
+			  ucontrol->value.integer.value[0]);
+
+	msm_afe_lb_vol_ctrl = ucontrol->value.integer.value[0];
+
+	return 0;
+}
+
+static int msm_qti_pp_get_sec_mi2s_lb_vol_mixer(struct snd_kcontrol *kcontrol,
+				       struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = msm_afe_sec_mi2s_lb_vol_ctrl;
+	return 0;
+}
+
+static int msm_qti_pp_set_sec_mi2s_lb_vol_mixer(struct snd_kcontrol *kcontrol,
+			    struct snd_ctl_elem_value *ucontrol)
+{
+	afe_loopback_gain(AFE_PORT_ID_SECONDARY_MI2S_TX,
+			  ucontrol->value.integer.value[0]);
+	msm_afe_sec_mi2s_lb_vol_ctrl = ucontrol->value.integer.value[0];
+
+	return 0;
+}
+
+static int msm_qti_pp_get_tert_mi2s_lb_vol_mixer(struct snd_kcontrol *kcontrol,
+				       struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = msm_afe_tert_mi2s_lb_vol_ctrl;
+	return 0;
+}
+
+static int msm_qti_pp_set_tert_mi2s_lb_vol_mixer(struct snd_kcontrol *kcontrol,
+			    struct snd_ctl_elem_value *ucontrol)
+{
+	afe_loopback_gain(AFE_PORT_ID_TERTIARY_MI2S_TX,
+			  ucontrol->value.integer.value[0]);
+	msm_afe_tert_mi2s_lb_vol_ctrl = ucontrol->value.integer.value[0];
+	return 0;
+}
+
+static int msm_qti_pp_get_slimbus_7_lb_vol_mixer(struct snd_kcontrol *kcontrol,
+				       struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = msm_afe_slimbus_7_lb_vol_ctrl;
+	return 0;
+}
+
+static int msm_qti_pp_set_slimbus_7_lb_vol_mixer(struct snd_kcontrol *kcontrol,
+			    struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = afe_loopback_gain(SLIMBUS_7_TX,
+				ucontrol->value.integer.value[0]);
+
+	if (ret)
+		pr_err("%s: failed to set LB vol for SLIMBUS_7_TX, err %d\n",
+			__func__, ret);
+	else
+		msm_afe_slimbus_7_lb_vol_ctrl =
+				ucontrol->value.integer.value[0];
+
+	return ret;
+}
+
+static int msm_qti_pp_get_slimbus_8_lb_vol_mixer(struct snd_kcontrol *kcontrol,
+				       struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = msm_afe_slimbus_8_lb_vol_ctrl;
+	return 0;
+}
+
+static int msm_qti_pp_set_slimbus_8_lb_vol_mixer(struct snd_kcontrol *kcontrol,
+			    struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = 0;
+
+	ret = afe_loopback_gain(SLIMBUS_8_TX,
+				ucontrol->value.integer.value[0]);
+
+	if (ret)
+		pr_err("%s: failed to set LB vol for SLIMBUS_8_TX", __func__);
+	else
+		msm_afe_slimbus_8_lb_vol_ctrl =
+				ucontrol->value.integer.value[0];
+
+	return ret;
+}
+
+static int msm_qti_pp_get_icc_vol_mixer(struct snd_kcontrol *kcontrol,
+				       struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = msm_route_icc_vol_control;
+	return 0;
+}
+
+static int msm_qti_pp_set_icc_vol_mixer(struct snd_kcontrol *kcontrol,
+			    struct snd_ctl_elem_value *ucontrol)
+{
+	adm_set_mic_gain(AFE_PORT_ID_QUATERNARY_TDM_TX,
+		adm_get_default_copp_idx(AFE_PORT_ID_QUATERNARY_TDM_TX),
+		ucontrol->value.integer.value[0]);
+	msm_route_icc_vol_control = ucontrol->value.integer.value[0];
+	return 0;
+}
+
+static int msm_qti_pp_get_quat_mi2s_fm_vol_mixer(struct snd_kcontrol *kcontrol,
+				       struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = msm_afe_quat_mi2s_lb_vol_ctrl;
+	return 0;
+}
+
+static int msm_qti_pp_set_quat_mi2s_fm_vol_mixer(struct snd_kcontrol *kcontrol,
+			    struct snd_ctl_elem_value *ucontrol)
+{
+	afe_loopback_gain(AFE_PORT_ID_QUATERNARY_MI2S_TX,
+			  ucontrol->value.integer.value[0]);
+
+	msm_afe_quat_mi2s_lb_vol_ctrl = ucontrol->value.integer.value[0];
+
+	return 0;
+}
+
+static int msm_qti_pp_get_hfp_vol_mixer(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = msm_route_hfp_vol_control;
+	return 0;
+}
+
+static int msm_qti_pp_set_hfp_vol_mixer(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	afe_loopback_gain(INT_BT_SCO_TX , ucontrol->value.integer.value[0]);
+
+	msm_route_hfp_vol_control = ucontrol->value.integer.value[0];
+
+	return 0;
+}
+
+static int msm_qti_pp_get_pri_auxpcm_lb_vol_mixer(
+					struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = msm_route_pri_auxpcm_lb_vol_ctrl;
+	pr_debug("%s: Volume = %ld\n", __func__,
+		ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int msm_qti_pp_set_pri_auxpcm_lb_vol_mixer(
+					struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	struct soc_mixer_control *mc =
+	(struct soc_mixer_control *)kcontrol->private_value;
+
+	afe_loopback_gain(mc->reg, ucontrol->value.integer.value[0]);
+
+	msm_route_pri_auxpcm_lb_vol_ctrl = ucontrol->value.integer.value[0];
+
+	return 0;
+}
+
+static int msm_qti_pp_get_sec_auxpcm_lb_vol_mixer(
+					struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = msm_route_sec_auxpcm_lb_vol_ctrl;
+	pr_debug("%s: Volume = %ld\n", __func__,
+		ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int msm_qti_pp_set_sec_auxpcm_lb_vol_mixer(
+					struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	struct soc_mixer_control *mc =
+	(struct soc_mixer_control *)kcontrol->private_value;
+
+	afe_loopback_gain(mc->reg, ucontrol->value.integer.value[0]);
+
+	msm_route_sec_auxpcm_lb_vol_ctrl = ucontrol->value.integer.value[0];
+
+	return 0;
+}
+
+static int msm_qti_pp_get_channel_map_mixer(struct snd_kcontrol *kcontrol,
+					    struct snd_ctl_elem_value *ucontrol)
+{
+	char channel_map[PCM_FORMAT_MAX_NUM_CHANNEL] = {0};
+	int i;
+
+	adm_get_multi_ch_map(channel_map, ADM_PATH_PLAYBACK);
+	for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++)
+		ucontrol->value.integer.value[i] = (unsigned) channel_map[i];
+	return 0;
+}
+
+static int msm_qti_pp_put_channel_map_mixer(struct snd_kcontrol *kcontrol,
+					    struct snd_ctl_elem_value *ucontrol)
+{
+	char channel_map[PCM_FORMAT_MAX_NUM_CHANNEL];
+	int i;
+
+	for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++)
+		channel_map[i] = (char)(ucontrol->value.integer.value[i]);
+	adm_set_multi_ch_map(channel_map, ADM_PATH_PLAYBACK);
+
+	return 0;
+}
+
+/* Audio Sphere functions */
+
+static void msm_qti_pp_asphere_init_state(void)
+{
+	int i;
+	if (asphere_state.initialized)
+		return;
+	asphere_state.initialized = true;
+	for (i = 0; i < AFE_MAX_PORTS; i++) {
+		asphere_state.port_id[i] = -1;
+		asphere_state.copp_idx[i] = -1;
+	}
+	asphere_state.enabled = 0;
+	asphere_state.strength = 0;
+	asphere_state.mode = 0;
+	asphere_state.version = 0;
+	asphere_state.enabled_prev = 0;
+	asphere_state.strength_prev = 0;
+}
+
+static int msm_qti_pp_asphere_send_params(int port_id, int copp_idx, bool force)
+{
+	char *params_value = NULL;
+	uint32_t *update_params_value = NULL;
+	uint32_t param_size = sizeof(uint32_t) +
+			sizeof(struct adm_param_data_v5);
+	int params_length = 0, param_count = 0, ret = 0;
+	bool set_enable = force ||
+			(asphere_state.enabled != asphere_state.enabled_prev);
+	bool set_strength = asphere_state.enabled == 1 && (set_enable ||
+		(asphere_state.strength != asphere_state.strength_prev));
+
+	if (set_enable)
+		param_count++;
+	if (set_strength)
+		param_count++;
+	params_length = param_count * param_size;
+
+	pr_debug("%s: port_id %d, copp_id %d, forced %d, param_count %d\n",
+			__func__, port_id, copp_idx, force, param_count);
+	pr_debug("%s: enable prev:%u cur:%u, strength prev:%u cur:%u\n",
+		__func__, asphere_state.enabled_prev, asphere_state.enabled,
+		asphere_state.strength_prev, asphere_state.strength);
+
+	if (params_length > 0)
+		params_value = kzalloc(params_length, GFP_KERNEL);
+	if (!params_value) {
+		pr_err("%s, params memory alloc failed\n", __func__);
+		return -ENOMEM;
+	}
+	update_params_value = (uint32_t *)params_value;
+	params_length = 0;
+	if (set_strength) {
+		/* add strength command */
+		*update_params_value++ = AUDPROC_MODULE_ID_AUDIOSPHERE;
+		*update_params_value++ = AUDPROC_PARAM_ID_AUDIOSPHERE_STRENGTH;
+		*update_params_value++ = sizeof(uint32_t);
+		*update_params_value++ = asphere_state.strength;
+		params_length += param_size;
+	}
+	if (set_enable) {
+		/* add enable command */
+		*update_params_value++ = AUDPROC_MODULE_ID_AUDIOSPHERE;
+		*update_params_value++ = AUDPROC_PARAM_ID_AUDIOSPHERE_ENABLE;
+		*update_params_value++ = sizeof(uint32_t);
+		*update_params_value++ = asphere_state.enabled;
+		params_length += param_size;
+	}
+	pr_debug("%s, param length: %d\n", __func__, params_length);
+	if (params_length) {
+		ret = adm_send_params_v5(port_id, copp_idx,
+					params_value, params_length);
+		if (ret) {
+			pr_err("%s: setting param failed with err=%d\n",
+				__func__, ret);
+			kfree(params_value);
+			return -EINVAL;
+		}
+	}
+	kfree(params_value);
+	return 0;
+}
+
+#if defined(CONFIG_QTI_PP) && defined(CONFIG_QTI_PP_AUDIOSPHERE)
+int msm_qti_pp_asphere_init(int port_id, int copp_idx)
+{
+	int index = adm_validate_and_get_port_index(port_id);
+
+	pr_debug("%s, port_id %d, copp_id %d\n", __func__, port_id, copp_idx);
+	if (index < 0) {
+		pr_err("%s: Invalid port idx %d port_id %#x\n", __func__, index,
+			port_id);
+		return -EINVAL;
+	}
+	msm_qti_pp_asphere_init_state();
+
+	asphere_state.port_id[index] = port_id;
+	asphere_state.copp_idx[index] = copp_idx;
+
+	if (asphere_state.enabled)
+		msm_qti_pp_asphere_send_params(port_id, copp_idx, true);
+
+	return 0;
+}
+
+void msm_qti_pp_asphere_deinit(int port_id)
+{
+	int index = adm_validate_and_get_port_index(port_id);
+
+	pr_debug("%s, port_id %d\n", __func__, port_id);
+	if (index < 0) {
+		pr_err("%s: Invalid port idx %d port_id %#x\n", __func__, index,
+			port_id);
+		return;
+	}
+
+	if (asphere_state.port_id[index] == port_id) {
+		asphere_state.port_id[index] = -1;
+		asphere_state.copp_idx[index] = -1;
+	}
+}
+#endif
+
+static int msm_qti_pp_asphere_get(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	if (!asphere_state.initialized)
+		return -EAGAIN;
+	ucontrol->value.integer.value[0] = asphere_state.enabled;
+	ucontrol->value.integer.value[1] = asphere_state.strength;
+	pr_debug("%s, enable %u, strength %u\n", __func__,
+			asphere_state.enabled, asphere_state.strength);
+	return 0;
+}
+
+static int msm_qti_pp_asphere_set(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	int32_t enable = ucontrol->value.integer.value[0];
+	int32_t strength = ucontrol->value.integer.value[1];
+	int i;
+
+	pr_debug("%s, enable %u, strength %u\n", __func__, enable, strength);
+
+	msm_qti_pp_asphere_init_state();
+
+	if (enable == 0 || enable == 1) {
+		asphere_state.enabled_prev = asphere_state.enabled;
+		asphere_state.enabled = enable;
+	}
+
+	if (strength >= 0 && strength <= 1000) {
+		asphere_state.strength_prev = asphere_state.strength;
+		asphere_state.strength = strength;
+	}
+
+	if (asphere_state.strength != asphere_state.strength_prev ||
+		asphere_state.enabled != asphere_state.enabled_prev) {
+		for (i = 0; i < AFE_MAX_PORTS; i++) {
+			if (asphere_state.port_id[i] >= 0)
+				msm_qti_pp_asphere_send_params(
+					asphere_state.port_id[i],
+					asphere_state.copp_idx[i],
+					false);
+		}
+	}
+	return 0;
+}
+
+int msm_adsp_init_mixer_ctl_pp_event_queue(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_kcontrol *kctl;
+	const char *deviceNo = "NN";
+	char *mixer_str = NULL;
+	int ctl_len = 0, ret = 0;
+	const char *mixer_ctl_name = DSP_STREAM_CALLBACK;
+	struct dsp_stream_callback_prtd *kctl_prtd = NULL;
+
+	if (!rtd) {
+		pr_err("%s: rtd is NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name,
+		rtd->pcm->device);
+	kctl = snd_soc_card_get_kcontrol(rtd->card, mixer_str);
+	kfree(mixer_str);
+	if (!kctl) {
+		pr_err("%s: failed to get kctl.\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (kctl->private_data != NULL) {
+		pr_err("%s: kctl_prtd is not NULL at initialization.\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	kctl_prtd = kzalloc(sizeof(struct dsp_stream_callback_prtd),
+			GFP_KERNEL);
+	if (!kctl_prtd) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	spin_lock_init(&kctl_prtd->prtd_spin_lock);
+	INIT_LIST_HEAD(&kctl_prtd->event_queue);
+	kctl_prtd->event_count = 0;
+	kctl->private_data = kctl_prtd;
+
+done:
+	return ret;
+}
+
+int msm_adsp_clean_mixer_ctl_pp_event_queue(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_kcontrol *kctl;
+	const char *deviceNo = "NN";
+	char *mixer_str = NULL;
+	int ctl_len = 0, ret = 0;
+	struct dsp_stream_callback_list *node, *n;
+	unsigned long spin_flags;
+	const char *mixer_ctl_name = DSP_STREAM_CALLBACK;
+	struct dsp_stream_callback_prtd *kctl_prtd = NULL;
+
+	if (!rtd) {
+		pr_err("%s: rtd is NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name,
+		rtd->pcm->device);
+	kctl = snd_soc_card_get_kcontrol(rtd->card, mixer_str);
+	kfree(mixer_str);
+	if (!kctl) {
+		pr_err("%s: failed to get kctl.\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	kctl_prtd = (struct dsp_stream_callback_prtd *)
+			kctl->private_data;
+	if (kctl_prtd != NULL) {
+		spin_lock_irqsave(&kctl_prtd->prtd_spin_lock, spin_flags);
+		/* clean the queue */
+		list_for_each_entry_safe(node, n,
+				&kctl_prtd->event_queue, list) {
+			list_del(&node->list);
+			kctl_prtd->event_count--;
+			pr_debug("%s: %d remaining events after del.\n",
+				__func__, kctl_prtd->event_count);
+			kfree(node);
+		}
+		spin_unlock_irqrestore(&kctl_prtd->prtd_spin_lock, spin_flags);
+	}
+
+	kfree(kctl_prtd);
+	kctl->private_data = NULL;
+
+done:
+	return ret;
+}
+
+int msm_adsp_inform_mixer_ctl(struct snd_soc_pcm_runtime *rtd,
+			uint32_t *payload)
+{
+	/* adsp pp event notifier */
+	struct snd_kcontrol *kctl;
+	struct snd_ctl_elem_value control;
+	const char *deviceNo = "NN";
+	char *mixer_str = NULL;
+	int ctl_len = 0, ret = 0;
+	struct dsp_stream_callback_list *new_event;
+	struct dsp_stream_callback_list *oldest_event;
+	unsigned long spin_flags;
+	struct dsp_stream_callback_prtd *kctl_prtd = NULL;
+	struct msm_adsp_event_data *event_data = NULL;
+	const char *mixer_ctl_name = DSP_STREAM_CALLBACK;
+	struct snd_ctl_elem_info kctl_info;
+
+	if (!rtd || !payload) {
+		pr_err("%s: %s is NULL\n", __func__,
+			(!rtd) ? "rtd" : "payload");
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (rtd->card->snd_card == NULL) {
+		pr_err("%s: snd_card is null.\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_ATOMIC);
+	if (!mixer_str) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name,
+		rtd->pcm->device);
+	kctl = snd_soc_card_get_kcontrol(rtd->card, mixer_str);
+	kfree(mixer_str);
+	if (!kctl) {
+		pr_err("%s: failed to get kctl.\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	event_data = (struct msm_adsp_event_data *)payload;
+	kctl->info(kctl, &kctl_info);
+
+	if (event_data->payload_len >
+		kctl_info.count - sizeof(struct msm_adsp_event_data)) {
+		pr_err("%s: payload length exceeds limit of %u bytes.\n",
+			__func__, kctl_info.count);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	kctl_prtd = (struct dsp_stream_callback_prtd *)
+			kctl->private_data;
+	if (kctl_prtd == NULL) {
+		/* queue is not initialized */
+		ret = -EINVAL;
+		pr_err("%s: event queue is not initialized.\n", __func__);
+		goto done;
+	}
+
+	new_event = kzalloc(sizeof(struct dsp_stream_callback_list)
+			+ event_data->payload_len,
+			GFP_ATOMIC);
+	if (new_event == NULL) {
+		ret = -ENOMEM;
+		goto done;
+	}
+	memcpy((void *)&new_event->event, (void *)payload,
+		   event_data->payload_len
+		   + sizeof(struct msm_adsp_event_data));
+
+	spin_lock_irqsave(&kctl_prtd->prtd_spin_lock, spin_flags);
+	while (kctl_prtd->event_count >= DSP_STREAM_CALLBACK_QUEUE_SIZE) {
+		pr_info("%s: queue of size %d is full. delete oldest one.\n",
+			__func__, DSP_STREAM_CALLBACK_QUEUE_SIZE);
+		oldest_event = list_first_entry(&kctl_prtd->event_queue,
+				struct dsp_stream_callback_list, list);
+		pr_info("%s: event deleted: type %d length %d\n",
+			__func__, oldest_event->event.event_type,
+			oldest_event->event.payload_len);
+		list_del(&oldest_event->list);
+		kctl_prtd->event_count--;
+		kfree(oldest_event);
+	}
+
+	list_add_tail(&new_event->list, &kctl_prtd->event_queue);
+	kctl_prtd->event_count++;
+	spin_unlock_irqrestore(&kctl_prtd->prtd_spin_lock, spin_flags);
+
+	control.id = kctl->id;
+	snd_ctl_notify(rtd->card->snd_card,
+			SNDRV_CTL_EVENT_MASK_INFO,
+			&control.id);
+
+done:
+	return ret;
+}
+
+int msm_adsp_stream_cmd_info(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+	uinfo->count =
+		sizeof(((struct snd_ctl_elem_value *)0)->value.bytes.data);
+
+	return 0;
+}
+
+int msm_adsp_stream_callback_get(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	uint32_t payload_size = 0;
+	struct dsp_stream_callback_list *oldest_event;
+	unsigned long spin_flags;
+	struct dsp_stream_callback_prtd *kctl_prtd = NULL;
+	int ret = 0;
+
+	kctl_prtd = (struct dsp_stream_callback_prtd *)
+			kcontrol->private_data;
+	if (kctl_prtd == NULL) {
+		pr_debug("%s: ASM Stream PP event queue is not initialized.\n",
+			 __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	spin_lock_irqsave(&kctl_prtd->prtd_spin_lock, spin_flags);
+	if (list_empty(&kctl_prtd->event_queue)) {
+		pr_debug("%s: ASM Stream PP event queue is empty.\n", __func__);
+		ret = -EINVAL;
+		spin_unlock_irqrestore(&kctl_prtd->prtd_spin_lock, spin_flags);
+		goto done;
+	}
+
+	oldest_event = list_first_entry(&kctl_prtd->event_queue,
+			struct dsp_stream_callback_list, list);
+	list_del(&oldest_event->list);
+	kctl_prtd->event_count--;
+	spin_unlock_irqrestore(&kctl_prtd->prtd_spin_lock, spin_flags);
+
+	payload_size = oldest_event->event.payload_len;
+	pr_debug("%s: event fetched: type %d length %d\n",
+			__func__, oldest_event->event.event_type,
+			oldest_event->event.payload_len);
+	memcpy(ucontrol->value.bytes.data, &oldest_event->event,
+		sizeof(struct msm_adsp_event_data) + payload_size);
+	kfree(oldest_event);
+
+done:
+	return ret;
+}
+
+int msm_adsp_stream_callback_info(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+	uinfo->count =
+		sizeof(((struct snd_ctl_elem_value *)0)->value.bytes.data);
+
+	return 0;
+}
+
+static int msm_multichannel_ec_primary_mic_ch_put(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = 0;
+	int copp_idx = 0;
+	int port_id = AFE_PORT_ID_QUATERNARY_TDM_TX;
+
+	msm_multichannel_ec_primary_mic_ch = ucontrol->value.integer.value[0];
+	pr_debug("%s: msm_multichannel_ec_primary_mic_ch = %u\n",
+		__func__, msm_multichannel_ec_primary_mic_ch);
+	copp_idx = adm_get_default_copp_idx(port_id);
+	if ((copp_idx < 0) || (copp_idx > MAX_COPPS_PER_PORT)) {
+		pr_err("%s : no active copp to query multichannel ec copp_idx: %u\n",
+			__func__, copp_idx);
+		return -EINVAL;
+	}
+	adm_send_set_multichannel_ec_primary_mic_ch(port_id, copp_idx,
+		msm_multichannel_ec_primary_mic_ch);
+
+	return ret;
+}
+
+static int msm_multichannel_ec_primary_mic_ch_get(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = msm_multichannel_ec_primary_mic_ch;
+	pr_debug("%s: msm_multichannel_ec_primary_mic_ch = %lu\n",
+		__func__, ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static const struct  snd_kcontrol_new msm_multichannel_ec_controls[] = {
+	SOC_SINGLE_EXT("Multichannel EC Primary Mic Ch", SND_SOC_NOPM, 0,
+		0xFFFFFFFF, 0, msm_multichannel_ec_primary_mic_ch_get,
+		msm_multichannel_ec_primary_mic_ch_put),
+};
+
+static const struct snd_kcontrol_new int_fm_vol_mixer_controls[] = {
+	SOC_SINGLE_EXT_TLV("Internal FM RX Volume", SND_SOC_NOPM, 0,
+	INT_RX_VOL_GAIN, 0, msm_qti_pp_get_fm_vol_mixer,
+	msm_qti_pp_set_fm_vol_mixer, fm_rx_vol_gain),
+	SOC_SINGLE_EXT_TLV("Quat MI2S FM RX Volume", SND_SOC_NOPM, 0,
+	INT_RX_VOL_GAIN, 0, msm_qti_pp_get_quat_mi2s_fm_vol_mixer,
+	msm_qti_pp_set_quat_mi2s_fm_vol_mixer, fm_rx_vol_gain),
+};
+
+static const struct snd_kcontrol_new pri_mi2s_lb_vol_mixer_controls[] = {
+	SOC_SINGLE_EXT_TLV("PRI MI2S LOOPBACK Volume", SND_SOC_NOPM, 0,
+	INT_RX_VOL_GAIN, 0, msm_qti_pp_get_pri_mi2s_lb_vol_mixer,
+	msm_qti_pp_set_pri_mi2s_lb_vol_mixer, afe_lb_vol_gain),
+};
+
+static const struct snd_kcontrol_new sec_mi2s_lb_vol_mixer_controls[] = {
+	SOC_SINGLE_EXT_TLV("SEC MI2S LOOPBACK Volume", SND_SOC_NOPM, 0,
+	INT_RX_VOL_GAIN, 0, msm_qti_pp_get_sec_mi2s_lb_vol_mixer,
+	msm_qti_pp_set_sec_mi2s_lb_vol_mixer, afe_lb_vol_gain),
+};
+
+static const struct snd_kcontrol_new tert_mi2s_lb_vol_mixer_controls[] = {
+	SOC_SINGLE_EXT_TLV("Tert MI2S LOOPBACK Volume", SND_SOC_NOPM, 0,
+	INT_RX_VOL_GAIN, 0, msm_qti_pp_get_tert_mi2s_lb_vol_mixer,
+	msm_qti_pp_set_tert_mi2s_lb_vol_mixer, afe_lb_vol_gain),
+};
+
+static const struct snd_kcontrol_new slimbus_7_lb_vol_mixer_controls[] = {
+	SOC_SINGLE_EXT_TLV("SLIMBUS_7 LOOPBACK Volume", SND_SOC_NOPM, 0,
+				INT_RX_VOL_GAIN, 0,
+				msm_qti_pp_get_slimbus_7_lb_vol_mixer,
+				msm_qti_pp_set_slimbus_7_lb_vol_mixer,
+				afe_lb_vol_gain),
+};
+
+static const struct snd_kcontrol_new slimbus_8_lb_vol_mixer_controls[] = {
+	SOC_SINGLE_EXT_TLV("SLIMBUS_8 LOOPBACK Volume", SND_SOC_NOPM, 0,
+	INT_RX_VOL_GAIN, 0, msm_qti_pp_get_slimbus_8_lb_vol_mixer,
+	msm_qti_pp_set_slimbus_8_lb_vol_mixer, afe_lb_vol_gain),
+};
+
+static const struct snd_kcontrol_new int_hfp_vol_mixer_controls[] = {
+	SOC_SINGLE_EXT_TLV("Internal HFP RX Volume", SND_SOC_NOPM, 0,
+	INT_RX_VOL_GAIN, 0, msm_qti_pp_get_hfp_vol_mixer,
+	msm_qti_pp_set_hfp_vol_mixer, hfp_rx_vol_gain),
+};
+
+static const struct snd_kcontrol_new int_icc_vol_mixer_controls[] = {
+	SOC_SINGLE_EXT_TLV("Internal ICC Volume", SND_SOC_NOPM, 0,
+	INT_RX_VOL_GAIN, 0, msm_qti_pp_get_icc_vol_mixer,
+	msm_qti_pp_set_icc_vol_mixer, icc_rx_vol_gain),
+};
+
+static const struct snd_kcontrol_new pri_auxpcm_lb_vol_mixer_controls[] = {
+	SOC_SINGLE_EXT_TLV("PRI AUXPCM LOOPBACK Volume",
+	AFE_PORT_ID_PRIMARY_PCM_TX, 0, INT_RX_VOL_GAIN, 0,
+	msm_qti_pp_get_pri_auxpcm_lb_vol_mixer,
+	msm_qti_pp_set_pri_auxpcm_lb_vol_mixer,
+	pri_auxpcm_lb_vol_gain),
+};
+
+static const struct snd_kcontrol_new sec_auxpcm_lb_vol_mixer_controls[] = {
+	SOC_SINGLE_EXT_TLV("SEC AUXPCM LOOPBACK Volume",
+	AFE_PORT_ID_SECONDARY_PCM_TX, 0, INT_RX_VOL_GAIN, 0,
+	msm_qti_pp_get_sec_auxpcm_lb_vol_mixer,
+	msm_qti_pp_set_sec_auxpcm_lb_vol_mixer,
+	sec_auxpcm_lb_vol_gain),
+};
+
+static const struct snd_kcontrol_new multi_ch_channel_map_mixer_controls[] = {
+	SOC_SINGLE_MULTI_EXT("Playback Device Channel Map", SND_SOC_NOPM, 0, 16,
+	0, 8, msm_qti_pp_get_channel_map_mixer,
+	msm_qti_pp_put_channel_map_mixer),
+};
+
+
+static const struct snd_kcontrol_new get_rms_controls[] = {
+	SOC_SINGLE_EXT("Get RMS", SND_SOC_NOPM, 0, 0xFFFFFFFF,
+	0, msm_qti_pp_get_rms_value_control, msm_qti_pp_put_rms_value_control),
+};
+
+static const struct snd_kcontrol_new eq_enable_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1 EQ Enable", SND_SOC_NOPM,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_qti_pp_get_eq_enable_mixer,
+	msm_qti_pp_put_eq_enable_mixer),
+	SOC_SINGLE_EXT("MultiMedia2 EQ Enable", SND_SOC_NOPM,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_qti_pp_get_eq_enable_mixer,
+	msm_qti_pp_put_eq_enable_mixer),
+	SOC_SINGLE_EXT("MultiMedia3 EQ Enable", SND_SOC_NOPM,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_qti_pp_get_eq_enable_mixer,
+	msm_qti_pp_put_eq_enable_mixer),
+};
+
+static const struct snd_kcontrol_new eq_band_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1 EQ Band Count", SND_SOC_NOPM,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 11, 0,
+	msm_qti_pp_get_eq_band_count_audio_mixer,
+	msm_qti_pp_put_eq_band_count_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2 EQ Band Count", SND_SOC_NOPM,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 11, 0,
+	msm_qti_pp_get_eq_band_count_audio_mixer,
+	msm_qti_pp_put_eq_band_count_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3 EQ Band Count", SND_SOC_NOPM,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 11, 0,
+	msm_qti_pp_get_eq_band_count_audio_mixer,
+	msm_qti_pp_put_eq_band_count_audio_mixer),
+};
+
+static const struct snd_kcontrol_new eq_coeff_mixer_controls[] = {
+	SOC_SINGLE_MULTI_EXT("MultiMedia1 EQ Band1", EQ_BAND1,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia1 EQ Band2", EQ_BAND2,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia1 EQ Band3", EQ_BAND3,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia1 EQ Band4", EQ_BAND4,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia1 EQ Band5", EQ_BAND5,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia1 EQ Band6", EQ_BAND6,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia1 EQ Band7", EQ_BAND7,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia1 EQ Band8", EQ_BAND8,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia1 EQ Band9", EQ_BAND9,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia1 EQ Band10", EQ_BAND10,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia1 EQ Band11", EQ_BAND11,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia1 EQ Band12", EQ_BAND12,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia2 EQ Band1", EQ_BAND1,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia2 EQ Band2", EQ_BAND2,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia2 EQ Band3", EQ_BAND3,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia2 EQ Band4", EQ_BAND4,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia2 EQ Band5", EQ_BAND5,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia2 EQ Band6", EQ_BAND6,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia2 EQ Band7", EQ_BAND7,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia2 EQ Band8", EQ_BAND8,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia2 EQ Band9", EQ_BAND9,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia2 EQ Band10", EQ_BAND10,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia2 EQ Band11", EQ_BAND11,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia2 EQ Band12", EQ_BAND12,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia3 EQ Band1", EQ_BAND1,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia3 EQ Band2", EQ_BAND2,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia3 EQ Band3", EQ_BAND3,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia3 EQ Band4", EQ_BAND4,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia3 EQ Band5", EQ_BAND5,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia3 EQ Band6", EQ_BAND6,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia3 EQ Band7", EQ_BAND7,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia3 EQ Band8", EQ_BAND8,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia3 EQ Band9", EQ_BAND9,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia3 EQ Band10", EQ_BAND10,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia3 EQ Band11", EQ_BAND11,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia3 EQ Band12", EQ_BAND12,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 255, 0, 5,
+	msm_qti_pp_get_eq_band_audio_mixer, msm_qti_pp_put_eq_band_audio_mixer),
+};
+
+static const struct snd_kcontrol_new asphere_mixer_controls[] = {
+	SOC_SINGLE_MULTI_EXT("MSM ASphere Set Param", SND_SOC_NOPM, 0,
+	0xFFFFFFFF, 0, 2, msm_qti_pp_asphere_get, msm_qti_pp_asphere_set),
+};
+
+#ifdef CONFIG_QTI_PP
+void msm_qti_pp_add_controls(struct snd_soc_platform *platform)
+{
+	snd_soc_add_platform_controls(platform, int_fm_vol_mixer_controls,
+			ARRAY_SIZE(int_fm_vol_mixer_controls));
+
+	snd_soc_add_platform_controls(platform, pri_mi2s_lb_vol_mixer_controls,
+			ARRAY_SIZE(pri_mi2s_lb_vol_mixer_controls));
+
+	snd_soc_add_platform_controls(platform, sec_mi2s_lb_vol_mixer_controls,
+			ARRAY_SIZE(sec_mi2s_lb_vol_mixer_controls));
+
+	snd_soc_add_platform_controls(platform, tert_mi2s_lb_vol_mixer_controls,
+			ARRAY_SIZE(tert_mi2s_lb_vol_mixer_controls));
+
+	snd_soc_add_platform_controls(platform, slimbus_7_lb_vol_mixer_controls,
+			ARRAY_SIZE(slimbus_7_lb_vol_mixer_controls));
+
+	snd_soc_add_platform_controls(platform, slimbus_8_lb_vol_mixer_controls,
+			ARRAY_SIZE(slimbus_8_lb_vol_mixer_controls));
+
+	snd_soc_add_platform_controls(platform, int_hfp_vol_mixer_controls,
+			ARRAY_SIZE(int_hfp_vol_mixer_controls));
+
+	snd_soc_add_platform_controls(platform, int_icc_vol_mixer_controls,
+			ARRAY_SIZE(int_icc_vol_mixer_controls));
+
+	snd_soc_add_platform_controls(platform,
+			pri_auxpcm_lb_vol_mixer_controls,
+			ARRAY_SIZE(pri_auxpcm_lb_vol_mixer_controls));
+
+	snd_soc_add_platform_controls(platform,
+				sec_auxpcm_lb_vol_mixer_controls,
+			ARRAY_SIZE(sec_auxpcm_lb_vol_mixer_controls));
+
+	snd_soc_add_platform_controls(platform,
+				multi_ch_channel_map_mixer_controls,
+			ARRAY_SIZE(multi_ch_channel_map_mixer_controls));
+
+	snd_soc_add_platform_controls(platform, get_rms_controls,
+			ARRAY_SIZE(get_rms_controls));
+
+	snd_soc_add_platform_controls(platform, eq_enable_mixer_controls,
+			ARRAY_SIZE(eq_enable_mixer_controls));
+
+	snd_soc_add_platform_controls(platform, eq_band_mixer_controls,
+			ARRAY_SIZE(eq_band_mixer_controls));
+
+	snd_soc_add_platform_controls(platform, eq_coeff_mixer_controls,
+			ARRAY_SIZE(eq_coeff_mixer_controls));
+
+	snd_soc_add_platform_controls(platform, asphere_mixer_controls,
+			ARRAY_SIZE(asphere_mixer_controls));
+
+	snd_soc_add_platform_controls(platform, msm_multichannel_ec_controls,
+			ARRAY_SIZE(msm_multichannel_ec_controls));
+}
+#endif /* CONFIG_QTI_PP */
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-qti-pp-config.h linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-qti-pp-config.h
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-qti-pp-config.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-qti-pp-config.h	2019-01-22 16:16:29.639301972 +0100
@@ -0,0 +1,53 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_QTI_PP_H_
+#define _MSM_QTI_PP_H_
+
+#include <sound/soc.h>
+int msm_adsp_inform_mixer_ctl(struct snd_soc_pcm_runtime *rtd,
+			uint32_t *payload);
+int msm_adsp_init_mixer_ctl_pp_event_queue(struct snd_soc_pcm_runtime *rtd);
+int msm_adsp_clean_mixer_ctl_pp_event_queue(struct snd_soc_pcm_runtime *rtd);
+int msm_adsp_stream_cmd_info(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_info *uinfo);
+int msm_adsp_stream_callback_get(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol);
+int msm_adsp_stream_callback_info(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_info *uinfo);
+#ifdef CONFIG_QTI_PP
+void msm_qti_pp_send_eq_values(int fedai_id);
+int msm_qti_pp_send_stereo_to_custom_stereo_cmd(int port_id, int copp_idx,
+						unsigned int session_id,
+						uint16_t op_FL_ip_FL_weight,
+						uint16_t op_FL_ip_FR_weight,
+						uint16_t op_FR_ip_FL_weight,
+						uint16_t op_FR_ip_FR_weight);
+void msm_qti_pp_add_controls(struct snd_soc_platform *platform);
+#else /* CONFIG_QTI_PP */
+#define msm_qti_pp_send_eq_values(fedai_id) do {} while (0)
+#define msm_qti_pp_send_stereo_to_custom_stereo_cmd(port_id, copp_idx, \
+			session_id, op_FL_ip_FL_weight, op_FL_ip_FR_weight, \
+			op_FR_ip_FL_weight, op_FR_ip_FR_weight) (0)
+#define msm_qti_pp_add_controls(platform) do {} while (0)
+#endif /* CONFIG_QTI_PP */
+
+
+#if defined(CONFIG_QTI_PP) && defined(CONFIG_QTI_PP_AUDIOSPHERE)
+int msm_qti_pp_asphere_init(int port_id, int copp_idx);
+void msm_qti_pp_asphere_deinit(int port_id);
+#else
+#define msm_qti_pp_asphere_init(port_id, copp_idx) (0)
+#define msm_qti_pp_asphere_deinit(port_id) do {} while (0)
+#endif
+
+#endif /* _MSM_QTI_PP_H_ */
+
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-transcode-loopback-q6-v2.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-transcode-loopback-q6-v2.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/msm-transcode-loopback-q6-v2.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/msm-transcode-loopback-q6-v2.c	2019-10-29 09:26:26.161227819 +0100
@@ -0,0 +1,1445 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/time.h>
+#include <linux/math64.h>
+#include <linux/wait.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/msm_audio_ion.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/initval.h>
+#include <sound/control.h>
+#include <sound/q6asm-v2.h>
+#include <sound/q6core.h>
+#include <sound/q6audio-v2.h>
+#include <sound/pcm_params.h>
+#include <sound/timer.h>
+#include <sound/tlv.h>
+#include <sound/apr_audio-v2.h>
+#include <sound/compress_params.h>
+#include <sound/compress_offload.h>
+#include <sound/compress_driver.h>
+#include <linux/msm_audio.h>
+
+#include "msm-pcm-routing-v2.h"
+#include "msm-qti-pp-config.h"
+
+#define LOOPBACK_SESSION_MAX_NUM_STREAMS 2
+/* Max volume corresponding to 24dB */
+#define TRANSCODE_LR_VOL_MAX_STEPS 0xFFFF
+
+#define APP_TYPE_CONFIG_IDX_APP_TYPE 0
+#define APP_TYPE_CONFIG_IDX_ACDB_ID 1
+#define APP_TYPE_CONFIG_IDX_SAMPLE_RATE 2
+#define APP_TYPE_CONFIG_IDX_BE_ID 3
+
+static DEFINE_MUTEX(transcode_loopback_session_lock);
+
+struct trans_loopback_pdata {
+	struct snd_compr_stream *cstream[MSM_FRONTEND_DAI_MAX];
+	int32_t ion_fd[MSM_FRONTEND_DAI_MAX];
+	uint32_t master_gain;
+	int perf_mode;
+};
+
+struct loopback_stream {
+	struct snd_compr_stream *cstream;
+	uint32_t codec_format;
+	bool start;
+};
+
+enum loopback_session_state {
+	/* One or both streams not opened */
+	LOOPBACK_SESSION_CLOSE = 0,
+	/* Loopback streams opened */
+	LOOPBACK_SESSION_READY,
+	/* Loopback streams opened and formats configured */
+	LOOPBACK_SESSION_START,
+	/* Trigger issued on either of streams when in START state */
+	LOOPBACK_SESSION_RUN
+};
+
+struct msm_transcode_loopback {
+	struct loopback_stream source;
+	struct loopback_stream sink;
+
+	struct snd_compr_caps source_compr_cap;
+	struct snd_compr_caps sink_compr_cap;
+
+	uint32_t instance;
+	uint32_t num_streams;
+	int session_state;
+
+	struct mutex lock;
+
+	int session_id;
+	struct audio_client *audio_client;
+	int32_t shm_ion_fd;
+	struct ion_client *lib_ion_client;
+	struct ion_client *shm_ion_client;
+	struct ion_handle *lib_ion_handle;
+	struct ion_handle *shm_ion_handle;
+};
+
+/* Transcode loopback global info struct */
+static struct msm_transcode_loopback transcode_info;
+
+static void loopback_event_handler(uint32_t opcode,
+		uint32_t token, uint32_t *payload, void *priv)
+{
+	struct msm_transcode_loopback *trans =
+			(struct msm_transcode_loopback *)priv;
+	struct snd_soc_pcm_runtime *rtd;
+	struct snd_compr_stream *cstream;
+	struct audio_client *ac;
+	int stream_id;
+	int ret;
+
+	if (!trans || !payload) {
+		pr_err("%s: rtd or payload is NULL\n", __func__);
+		return;
+	}
+
+	cstream = trans->sink.cstream;
+	ac = trans->audio_client;
+
+	/*
+	 * Token for rest of the compressed commands use to set
+	 * session id, stream id, dir etc.
+	 */
+	stream_id = q6asm_get_stream_id_from_token(token);
+
+	switch (opcode) {
+	case ASM_STREAM_CMD_ENCDEC_EVENTS:
+	case ASM_IEC_61937_MEDIA_FMT_EVENT:
+		pr_debug("%s: Handling stream event : 0X%x\n",
+			__func__, opcode);
+		rtd = cstream->private_data;
+		if (!rtd) {
+			pr_err("%s: rtd is NULL\n", __func__);
+			return;
+		}
+
+		ret = msm_adsp_inform_mixer_ctl(rtd, payload);
+		if (ret) {
+			pr_err("%s: failed to inform mixer ctrl. err = %d\n",
+				__func__, ret);
+			return;
+		}
+		break;
+	case APR_BASIC_RSP_RESULT: {
+		switch (payload[0]) {
+		case ASM_SESSION_CMD_RUN_V2:
+			pr_debug("%s: ASM_SESSION_CMD_RUN_V2:", __func__);
+			pr_debug("token 0x%x, stream id %d\n", token,
+				  stream_id);
+			break;
+		case ASM_STREAM_CMD_CLOSE:
+			pr_debug("%s: ASM_DATA_CMD_CLOSE:", __func__);
+			pr_debug("token 0x%x, stream id %d\n", token,
+				  stream_id);
+			break;
+		default:
+			break;
+		}
+		break;
+	}
+	default:
+		pr_debug("%s: Not Supported Event opcode[0x%x]\n",
+			  __func__, opcode);
+		break;
+	}
+}
+
+static void populate_codec_list(struct msm_transcode_loopback *trans,
+				struct snd_compr_stream *cstream)
+{
+	struct snd_compr_caps compr_cap;
+
+	pr_debug("%s\n", __func__);
+
+	memset(&compr_cap, 0, sizeof(struct snd_compr_caps));
+
+	if (cstream->direction == SND_COMPRESS_CAPTURE) {
+		compr_cap.direction = SND_COMPRESS_CAPTURE;
+		compr_cap.num_codecs = 3;
+		compr_cap.codecs[0] = SND_AUDIOCODEC_PCM;
+		compr_cap.codecs[1] = SND_AUDIOCODEC_AC3;
+		compr_cap.codecs[2] = SND_AUDIOCODEC_EAC3;
+		memcpy(&trans->source_compr_cap, &compr_cap,
+				sizeof(struct snd_compr_caps));
+	}
+
+	if (cstream->direction == SND_COMPRESS_PLAYBACK) {
+		compr_cap.direction = SND_COMPRESS_PLAYBACK;
+		compr_cap.num_codecs = 1;
+		compr_cap.codecs[0] = SND_AUDIOCODEC_PCM;
+		memcpy(&trans->sink_compr_cap, &compr_cap,
+				sizeof(struct snd_compr_caps));
+	}
+}
+
+static int msm_transcode_map_ion_fd(struct msm_transcode_loopback *trans,
+				    int fd)
+{
+	ion_phys_addr_t paddr;
+	size_t pa_len = 0;
+	int ret = 0;
+
+	ret = msm_audio_ion_phys_assign("audio_lib_mem_client",
+					&trans->lib_ion_client,
+					&trans->lib_ion_handle, fd,
+					&paddr, &pa_len, HLOS_TO_ADSP);
+	if (ret) {
+		pr_err("%s: audio lib ION phys failed, rc = %d\n", __func__,
+			ret);
+		goto done;
+	}
+
+	ret = q6core_add_remove_pool_pages(paddr, pa_len,
+				 ADSP_MEMORY_MAP_HLOS_PHYSPOOL, true);
+	if (ret) {
+		pr_err("%s: add pages failed, rc = %d\n", __func__, ret);
+		/* Assign back to HLOS if add pages cmd failed */
+		msm_audio_ion_phys_free(trans->lib_ion_client,
+					trans->lib_ion_handle,
+					&paddr, &pa_len, ADSP_TO_HLOS);
+	}
+
+done:
+	return ret;
+}
+
+static int msm_transcode_unmap_ion_fd(struct msm_transcode_loopback *trans)
+{
+	ion_phys_addr_t paddr;
+	size_t pa_len = 0;
+	int ret = 0;
+
+	if (!trans->lib_ion_client || !trans->lib_ion_handle) {
+		pr_err("%s: ion_client or ion_handle is NULL", __func__);
+		return -EINVAL;
+	}
+	ret = msm_audio_ion_phys_free(trans->lib_ion_client,
+				      trans->lib_ion_handle,
+				      &paddr, &pa_len, ADSP_TO_HLOS);
+	if (ret) {
+		pr_err("%s: audio lib ION phys failed, rc = %d\n", __func__,
+			ret);
+		goto done;
+	}
+
+	ret = q6core_add_remove_pool_pages(paddr, pa_len,
+					ADSP_MEMORY_MAP_HLOS_PHYSPOOL, false);
+	if (ret)
+		pr_err("%s: remove pages failed, rc = %d\n", __func__, ret);
+
+done:
+	return ret;
+}
+
+static int msm_transcode_loopback_open(struct snd_compr_stream *cstream)
+{
+	int ret = 0;
+	struct snd_compr_runtime *runtime;
+	struct snd_soc_pcm_runtime *rtd;
+	struct msm_transcode_loopback *trans = &transcode_info;
+	struct trans_loopback_pdata *pdata;
+
+	if (cstream == NULL) {
+		pr_err("%s: Invalid substream\n", __func__);
+		return -EINVAL;
+	}
+	runtime = cstream->runtime;
+	rtd = snd_pcm_substream_chip(cstream);
+	pdata = snd_soc_platform_get_drvdata(rtd->platform);
+	pdata->cstream[rtd->dai_link->be_id] = cstream;
+
+	mutex_lock(&trans->lock);
+	if (trans->num_streams > LOOPBACK_SESSION_MAX_NUM_STREAMS) {
+		pr_err("msm_transcode_open failed..invalid stream\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	if (cstream->direction == SND_COMPRESS_CAPTURE) {
+		if (trans->source.cstream == NULL) {
+			trans->source.cstream = cstream;
+			trans->num_streams++;
+		} else {
+			pr_err("%s: capture stream already opened\n",
+				__func__);
+			ret = -EINVAL;
+			goto exit;
+		}
+	} else if (cstream->direction == SND_COMPRESS_PLAYBACK) {
+		if (trans->sink.cstream == NULL) {
+			trans->sink.cstream = cstream;
+			trans->num_streams++;
+		} else {
+			pr_debug("%s: playback stream already opened\n",
+				__func__);
+			ret = -EINVAL;
+			goto exit;
+		}
+		msm_adsp_init_mixer_ctl_pp_event_queue(rtd);
+		if (pdata->ion_fd[rtd->dai_link->be_id] > 0) {
+			ret = msm_transcode_map_ion_fd(trans,
+					pdata->ion_fd[rtd->dai_link->be_id]);
+			if (ret < 0)
+				goto exit;
+		}
+	}
+
+	pr_debug("%s: num stream%d, stream name %s\n", __func__,
+		 trans->num_streams, cstream->name);
+
+	populate_codec_list(trans, cstream);
+
+	if (trans->num_streams == LOOPBACK_SESSION_MAX_NUM_STREAMS)	{
+		pr_debug("%s: Moving loopback session to READY state %d\n",
+			 __func__, trans->session_state);
+		trans->session_state = LOOPBACK_SESSION_READY;
+	}
+
+	runtime->private_data = trans;
+
+exit:
+	mutex_unlock(&trans->lock);
+	return ret;
+}
+
+static void stop_transcoding(struct msm_transcode_loopback *trans)
+{
+	struct snd_soc_pcm_runtime *soc_pcm_rx;
+	struct snd_soc_pcm_runtime *soc_pcm_tx;
+
+	if (trans->audio_client != NULL) {
+		q6asm_cmd(trans->audio_client, CMD_CLOSE);
+
+		if (trans->sink.cstream != NULL) {
+			soc_pcm_rx = trans->sink.cstream->private_data;
+			msm_pcm_routing_dereg_phy_stream(
+					soc_pcm_rx->dai_link->be_id,
+					SND_COMPRESS_PLAYBACK);
+		}
+		if (trans->source.cstream != NULL) {
+			soc_pcm_tx = trans->source.cstream->private_data;
+			msm_pcm_routing_dereg_phy_stream(
+					soc_pcm_tx->dai_link->be_id,
+					SND_COMPRESS_CAPTURE);
+		}
+		q6asm_audio_client_free(trans->audio_client);
+		trans->audio_client = NULL;
+	}
+}
+
+static int msm_transcode_loopback_free(struct snd_compr_stream *cstream)
+{
+	struct snd_compr_runtime *runtime = cstream->runtime;
+	struct msm_transcode_loopback *trans = runtime->private_data;
+	struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(cstream);
+	struct trans_loopback_pdata *pdata = snd_soc_platform_get_drvdata(
+								rtd->platform);
+	int ret = 0;
+	ion_phys_addr_t paddr;
+	size_t pa_len = 0;
+
+	mutex_lock(&trans->lock);
+
+	pr_debug("%s: Transcode loopback end:%d, streams %d\n", __func__,
+		  cstream->direction, trans->num_streams);
+	trans->num_streams--;
+	stop_transcoding(trans);
+
+	if (cstream->direction == SND_COMPRESS_PLAYBACK) {
+		memset(&trans->sink, 0, sizeof(struct loopback_stream));
+		msm_adsp_clean_mixer_ctl_pp_event_queue(rtd);
+		if (trans->shm_ion_fd > 0) {
+			msm_audio_ion_phys_free(trans->shm_ion_client,
+						trans->shm_ion_handle,
+						&paddr, &pa_len, ADSP_TO_HLOS);
+			trans->shm_ion_fd = 0;
+		}
+		if (pdata->ion_fd[rtd->dai_link->be_id] > 0) {
+			msm_transcode_unmap_ion_fd(trans);
+			pdata->ion_fd[rtd->dai_link->be_id] = 0;
+		}
+	} else if (cstream->direction == SND_COMPRESS_CAPTURE) {
+		memset(&trans->source, 0, sizeof(struct loopback_stream));
+	}
+
+	trans->session_state = LOOPBACK_SESSION_CLOSE;
+	mutex_unlock(&trans->lock);
+	return ret;
+}
+
+static int msm_transcode_loopback_trigger(struct snd_compr_stream *cstream,
+					  int cmd)
+{
+	struct snd_compr_runtime *runtime = cstream->runtime;
+	struct msm_transcode_loopback *trans = runtime->private_data;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+
+		if (trans->session_state == LOOPBACK_SESSION_START) {
+			pr_debug("%s: Issue Loopback session %d RUN\n",
+				  __func__, trans->instance);
+			q6asm_run_nowait(trans->audio_client, 0, 0, 0);
+			trans->session_state = LOOPBACK_SESSION_RUN;
+		}
+		break;
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+	case SNDRV_PCM_TRIGGER_STOP:
+		pr_debug("%s: Issue Loopback session %d STOP\n", __func__,
+			  trans->instance);
+		if (trans->session_state == LOOPBACK_SESSION_RUN)
+			q6asm_cmd_nowait(trans->audio_client, CMD_PAUSE);
+		trans->session_state = LOOPBACK_SESSION_START;
+		break;
+
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int msm_transcode_loopback_set_params(struct snd_compr_stream *cstream,
+				struct snd_compr_params *codec_param)
+{
+
+	struct snd_compr_runtime *runtime = cstream->runtime;
+	struct msm_transcode_loopback *trans = runtime->private_data;
+	struct snd_soc_pcm_runtime *soc_pcm_rx;
+	struct snd_soc_pcm_runtime *soc_pcm_tx;
+	struct snd_soc_pcm_runtime *rtd;
+	struct trans_loopback_pdata *pdata;
+	uint32_t bit_width = 16;
+	int ret = 0;
+
+	if (trans == NULL) {
+		pr_err("%s: Invalid param\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&trans->lock);
+
+	rtd = snd_pcm_substream_chip(cstream);
+	pdata = snd_soc_platform_get_drvdata(rtd->platform);
+
+	if (cstream->direction == SND_COMPRESS_PLAYBACK) {
+		if (codec_param->codec.id == SND_AUDIOCODEC_PCM) {
+			trans->sink.codec_format =
+				FORMAT_LINEAR_PCM;
+			switch (codec_param->codec.format) {
+			case SNDRV_PCM_FORMAT_S32_LE:
+				bit_width = 32;
+				break;
+			case SNDRV_PCM_FORMAT_S24_LE:
+				bit_width = 24;
+				break;
+			case SNDRV_PCM_FORMAT_S24_3LE:
+				bit_width = 24;
+				break;
+			case SNDRV_PCM_FORMAT_S16_LE:
+			default:
+				bit_width = 16;
+				break;
+			}
+		} else {
+			pr_debug("%s: unknown sink codec\n", __func__);
+			ret = -EINVAL;
+			goto exit;
+		}
+		trans->sink.start = true;
+	}
+
+	if (cstream->direction == SND_COMPRESS_CAPTURE) {
+		switch (codec_param->codec.id) {
+		case SND_AUDIOCODEC_PCM:
+			pr_debug("Source SND_AUDIOCODEC_PCM\n");
+			trans->source.codec_format =
+				FORMAT_LINEAR_PCM;
+			break;
+		case SND_AUDIOCODEC_AC3:
+			pr_debug("Source SND_AUDIOCODEC_AC3\n");
+			trans->source.codec_format =
+				FORMAT_AC3;
+			break;
+		case SND_AUDIOCODEC_EAC3:
+			pr_debug("Source SND_AUDIOCODEC_EAC3\n");
+			trans->source.codec_format =
+				FORMAT_EAC3;
+			break;
+		default:
+			pr_debug("%s: unknown source codec\n", __func__);
+			ret = -EINVAL;
+			goto exit;
+		}
+		trans->source.start = true;
+	}
+
+	pr_debug("%s: trans->source.start %d trans->sink.start %d trans->source.cstream %pK trans->sink.cstream %pK trans->session_state %d\n",
+			__func__, trans->source.start, trans->sink.start,
+			trans->source.cstream, trans->sink.cstream,
+			trans->session_state);
+
+	if ((trans->session_state == LOOPBACK_SESSION_READY) &&
+			trans->source.start && trans->sink.start) {
+		pr_debug("%s: Moving loopback session to start state\n",
+			  __func__);
+		trans->session_state = LOOPBACK_SESSION_START;
+	}
+
+	if (trans->session_state == LOOPBACK_SESSION_START) {
+		if (trans->audio_client != NULL) {
+			pr_debug("%s: ASM client already opened, closing\n",
+				 __func__);
+			stop_transcoding(trans);
+		}
+
+		trans->audio_client = q6asm_audio_client_alloc(
+				(app_cb)loopback_event_handler, trans);
+		if (!trans->audio_client) {
+			pr_err("%s: Could not allocate memory\n", __func__);
+			ret = -EINVAL;
+			goto exit;
+		}
+		pr_debug("%s: ASM client allocated, callback %pK\n", __func__,
+						loopback_event_handler);
+		trans->session_id = trans->audio_client->session;
+		trans->audio_client->perf_mode = LEGACY_PCM_MODE;
+		ret = q6asm_open_transcode_loopback(trans->audio_client,
+					bit_width,
+					trans->source.codec_format,
+					trans->sink.codec_format);
+		if (ret < 0) {
+			pr_err("%s: Session transcode loopback open failed\n",
+				__func__);
+			q6asm_audio_client_free(trans->audio_client);
+			trans->audio_client = NULL;
+			goto exit;
+		}
+
+		pr_debug("%s: Starting ADM open for loopback bw=%d\n", __func__, bit_width);
+		soc_pcm_rx = trans->sink.cstream->private_data;
+		soc_pcm_tx = trans->source.cstream->private_data;
+		if (trans->source.codec_format != FORMAT_LINEAR_PCM)
+			msm_pcm_routing_reg_phy_compr_stream(
+					soc_pcm_tx->dai_link->be_id,
+					false,
+					trans->session_id,
+					SNDRV_PCM_STREAM_CAPTURE,
+					COMPRESSED_PASSTHROUGH_GEN);
+		else
+			msm_pcm_routing_reg_phy_stream(
+					soc_pcm_tx->dai_link->be_id,
+					trans->audio_client->perf_mode,
+					trans->session_id,
+					SNDRV_PCM_STREAM_CAPTURE);
+		/* Opening Rx ADM in LOW_LATENCY mode by default */
+		msm_pcm_routing_reg_phy_stream(
+					soc_pcm_rx->dai_link->be_id,
+					trans->audio_client->perf_mode,
+					trans->session_id,
+					SNDRV_PCM_STREAM_PLAYBACK);
+		pr_debug("%s: Successfully opened ADM sessions\n", __func__);
+	}
+exit:
+	mutex_unlock(&trans->lock);
+	return ret;
+}
+
+static int msm_transcode_loopback_get_caps(struct snd_compr_stream *cstream,
+				struct snd_compr_caps *arg)
+{
+	struct snd_compr_runtime *runtime;
+	struct msm_transcode_loopback *trans;
+
+	if (!arg || !cstream) {
+		pr_err("%s: Invalid arguments\n", __func__);
+		return -EINVAL;
+	}
+
+	runtime = cstream->runtime;
+	trans = runtime->private_data;
+	pr_debug("%s\n", __func__);
+	if (cstream->direction == SND_COMPRESS_CAPTURE)
+		memcpy(arg, &trans->source_compr_cap,
+		       sizeof(struct snd_compr_caps));
+	else
+		memcpy(arg, &trans->sink_compr_cap,
+		       sizeof(struct snd_compr_caps));
+	return 0;
+}
+
+static int msm_transcode_loopback_set_metadata(struct snd_compr_stream *cstream,
+				struct snd_compr_metadata *metadata)
+{
+	struct snd_soc_pcm_runtime *rtd;
+	struct trans_loopback_pdata *pdata;
+
+	if (!metadata || !cstream) {
+		pr_err("%s: Invalid arguments\n", __func__);
+		return -EINVAL;
+	}
+
+	rtd = snd_pcm_substream_chip(cstream);
+	pdata = snd_soc_platform_get_drvdata(rtd->platform);
+
+	switch (metadata->key) {
+	case SNDRV_COMPRESS_LATENCY_MODE:
+	{
+		switch (metadata->value[0]) {
+		case SNDRV_COMPRESS_LEGACY_LATENCY_MODE:
+			pdata->perf_mode = LEGACY_PCM_MODE;
+			break;
+		case SNDRV_COMPRESS_LOW_LATENCY_MODE:
+			pdata->perf_mode = LOW_LATENCY_PCM_MODE;
+			break;
+		default:
+			pr_debug("%s: Unsupported latency mode %d, default to Legacy\n",
+					__func__, metadata->value[0]);
+			pdata->perf_mode = LEGACY_PCM_MODE;
+			break;
+		}
+	}
+		break;
+	default:
+		pr_debug("%s: Unsupported metadata %d\n",
+				__func__, metadata->key);
+		break;
+	}
+	return 0;
+}
+
+static int msm_transcode_stream_cmd_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	unsigned long fe_id = kcontrol->private_value;
+	struct trans_loopback_pdata *pdata = (struct trans_loopback_pdata *)
+				snd_soc_component_get_drvdata(comp);
+	struct snd_compr_stream *cstream = NULL;
+	struct msm_transcode_loopback *prtd;
+	int ret = 0;
+	struct msm_adsp_event_data *event_data = NULL;
+	uint64_t actual_payload_len = 0;
+
+	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+		pr_err("%s Received invalid fe_id %lu\n",
+			__func__, fe_id);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	cstream = pdata->cstream[fe_id];
+	if (cstream == NULL) {
+		pr_err("%s cstream is null.\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	prtd = cstream->runtime->private_data;
+	if (!prtd) {
+		pr_err("%s: prtd is null.\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (prtd->audio_client == NULL) {
+		pr_err("%s: audio_client is null.\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	event_data = (struct msm_adsp_event_data *)ucontrol->value.bytes.data;
+	if ((event_data->event_type < ADSP_STREAM_PP_EVENT) ||
+	    (event_data->event_type >= ADSP_STREAM_EVENT_MAX)) {
+		pr_err("%s: invalid event_type=%d",
+			 __func__, event_data->event_type);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	actual_payload_len = sizeof(struct msm_adsp_event_data) +
+		event_data->payload_len;
+	if (actual_payload_len >= U32_MAX) {
+		pr_err("%s payload length 0x%X  exceeds limit",
+				__func__, event_data->payload_len);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (event_data->payload_len > sizeof(ucontrol->value.bytes.data)
+		- sizeof(struct msm_adsp_event_data)) {
+		pr_err("%s param length=%d  exceeds limit",
+			 __func__, event_data->payload_len);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = q6asm_send_stream_cmd(prtd->audio_client, event_data);
+	if (ret < 0)
+		pr_err("%s: failed to send stream event cmd, err = %d\n",
+			__func__, ret);
+done:
+	return ret;
+}
+
+static int msm_transcode_shm_ion_fd_map_put(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	unsigned long fe_id = kcontrol->private_value;
+	struct trans_loopback_pdata *pdata = (struct trans_loopback_pdata *)
+				snd_soc_component_get_drvdata(comp);
+	struct snd_compr_stream *cstream = NULL;
+	struct msm_transcode_loopback *prtd;
+	int ret = 0;
+
+	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+		pr_err("%s Received out of bounds invalid fe_id %lu\n",
+			__func__, fe_id);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	cstream = pdata->cstream[fe_id];
+	if (cstream == NULL) {
+		pr_err("%s cstream is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	prtd = cstream->runtime->private_data;
+	if (!prtd) {
+		pr_err("%s: prtd is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (prtd->audio_client == NULL) {
+		pr_err("%s: audio_client is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	memcpy(&prtd->shm_ion_fd, ucontrol->value.bytes.data,
+		sizeof(prtd->shm_ion_fd));
+	ret = q6asm_audio_map_shm_fd(prtd->audio_client,
+				&prtd->shm_ion_client,
+				&prtd->shm_ion_handle, prtd->shm_ion_fd);
+	if (ret < 0)
+		pr_err("%s: failed to map shm mem\n", __func__);
+done:
+	return ret;
+}
+
+
+static int msm_transcode_lib_ion_fd_map_put(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	unsigned long fe_id = kcontrol->private_value;
+	struct trans_loopback_pdata *pdata = (struct trans_loopback_pdata *)
+				snd_soc_component_get_drvdata(comp);
+	int ret = 0;
+
+	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+		pr_err("%s Received out of bounds invalid fe_id %lu\n",
+			__func__, fe_id);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	memcpy(&pdata->ion_fd[fe_id], ucontrol->value.bytes.data,
+		   sizeof(pdata->ion_fd[fe_id]));
+done:
+	return ret;
+}
+
+static int msm_transcode_rtic_event_ack_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	unsigned long fe_id = kcontrol->private_value;
+	struct trans_loopback_pdata *pdata = (struct trans_loopback_pdata *)
+					snd_soc_component_get_drvdata(comp);
+	struct snd_compr_stream *cstream = NULL;
+	struct msm_transcode_loopback *prtd;
+	int ret = 0;
+	int param_length = 0;
+
+	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+		pr_err("%s Received invalid fe_id %lu\n",
+			__func__, fe_id);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	cstream = pdata->cstream[fe_id];
+	if (cstream == NULL) {
+		pr_err("%s cstream is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	prtd = cstream->runtime->private_data;
+	if (!prtd) {
+		pr_err("%s: prtd is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (prtd->audio_client == NULL) {
+		pr_err("%s: audio_client is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	memcpy(&param_length, ucontrol->value.bytes.data,
+		sizeof(param_length));
+	if ((param_length + sizeof(param_length))
+		>= sizeof(ucontrol->value.bytes.data)) {
+		pr_err("%s param length=%d  exceeds limit",
+			__func__, param_length);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = q6asm_send_rtic_event_ack(prtd->audio_client,
+			ucontrol->value.bytes.data + sizeof(param_length),
+			param_length);
+	if (ret < 0)
+		pr_err("%s: failed to send rtic event ack, err = %d\n",
+			__func__, ret);
+done:
+	return ret;
+}
+
+static int msm_transcode_playback_app_type_cfg_put(
+			struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_RX;
+	int be_id = ucontrol->value.integer.value[APP_TYPE_CONFIG_IDX_BE_ID];
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0, 0, 48000};
+	int ret = 0;
+
+	cfg_data.app_type = ucontrol->value.integer.value[
+			    APP_TYPE_CONFIG_IDX_APP_TYPE];
+	cfg_data.acdb_dev_id = ucontrol->value.integer.value[
+			       APP_TYPE_CONFIG_IDX_ACDB_ID];
+	if (ucontrol->value.integer.value[APP_TYPE_CONFIG_IDX_SAMPLE_RATE] != 0)
+		cfg_data.sample_rate = ucontrol->value.integer.value[
+				       APP_TYPE_CONFIG_IDX_SAMPLE_RATE];
+	pr_debug("%s: fe_id %llu session_type %d be_id %d app_type %d acdb_dev_id %d sample_rate- %d\n",
+		__func__, fe_id, session_type, be_id,
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
+	ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+						      be_id, &cfg_data);
+	if (ret < 0)
+		pr_err("%s: msm_transcode_playback_stream_app_type_cfg set failed returned %d\n",
+			__func__, ret);
+
+	return ret;
+}
+
+static int msm_transcode_playback_app_type_cfg_get(
+			struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_RX;
+	int be_id = 0;
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0};
+	int ret = 0;
+
+	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
+						      &be_id, &cfg_data);
+	if (ret < 0) {
+		pr_err("%s: msm_transcode_playback_stream_app_type_cfg get failed returned %d\n",
+			__func__, ret);
+		goto done;
+	}
+
+	ucontrol->value.integer.value[APP_TYPE_CONFIG_IDX_APP_TYPE] =
+					cfg_data.app_type;
+	ucontrol->value.integer.value[APP_TYPE_CONFIG_IDX_ACDB_ID] =
+					cfg_data.acdb_dev_id;
+	ucontrol->value.integer.value[APP_TYPE_CONFIG_IDX_SAMPLE_RATE] =
+					cfg_data.sample_rate;
+	ucontrol->value.integer.value[APP_TYPE_CONFIG_IDX_BE_ID] = be_id;
+	pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+		__func__, fe_id, session_type, be_id,
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
+done:
+	return ret;
+}
+
+static int msm_transcode_set_volume(struct snd_compr_stream *cstream,
+				uint32_t master_gain)
+{
+	int rc = 0;
+	struct msm_transcode_loopback *prtd;
+	struct snd_soc_pcm_runtime *rtd;
+
+	pr_debug("%s: master_gain %d\n", __func__, master_gain);
+	if (!cstream || !cstream->runtime) {
+		pr_err("%s: session not active\n", __func__);
+		return -EPERM;
+	}
+	rtd = cstream->private_data;
+	prtd = cstream->runtime->private_data;
+
+	if (!rtd || !rtd->platform || !prtd || !prtd->audio_client) {
+		pr_err("%s: invalid rtd, prtd or audio client", __func__);
+		return -EINVAL;
+	}
+
+	rc = q6asm_set_volume(prtd->audio_client, master_gain);
+	if (rc < 0)
+		pr_err("%s: Send vol gain command failed rc=%d\n",
+		       __func__, rc);
+
+	return rc;
+}
+
+static int msm_transcode_volume_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	unsigned long fe_id = kcontrol->private_value;
+	struct trans_loopback_pdata *pdata = (struct trans_loopback_pdata *)
+			snd_soc_component_get_drvdata(comp);
+	struct snd_compr_stream *cstream = NULL;
+	uint32_t ret = 0;
+
+	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+		pr_err("%s Received out of bounds fe_id %lu\n",
+			__func__, fe_id);
+		return -EINVAL;
+	}
+
+	cstream = pdata->cstream[fe_id];
+	pdata->master_gain = ucontrol->value.integer.value[0];
+
+	pr_debug("%s: fe_id %lu master_gain %d\n",
+		 __func__, fe_id, pdata->master_gain);
+	if (cstream)
+		ret = msm_transcode_set_volume(cstream, pdata->master_gain);
+	return ret;
+}
+
+static int msm_transcode_volume_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	unsigned long fe_id = kcontrol->private_value;
+
+	struct trans_loopback_pdata *pdata = (struct trans_loopback_pdata *)
+			snd_soc_component_get_drvdata(comp);
+
+	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+		pr_err("%s Received out of bound fe_id %lu\n", __func__, fe_id);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: fe_id %lu\n", __func__, fe_id);
+	ucontrol->value.integer.value[0] = pdata->master_gain;
+
+	return 0;
+}
+
+static int msm_transcode_stream_cmd_control(
+			struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name = DSP_STREAM_CMD;
+	const char *deviceNo = "NN";
+	char *mixer_str = NULL;
+	int ctl_len = 0, ret = 0;
+	struct snd_kcontrol_new fe_loopback_stream_cmd_config_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_adsp_stream_cmd_info,
+		.put = msm_transcode_stream_cmd_put,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s NULL rtd\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+	fe_loopback_stream_cmd_config_control[0].name = mixer_str;
+	fe_loopback_stream_cmd_config_control[0].private_value =
+				rtd->dai_link->be_id;
+	pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+	ret = snd_soc_add_platform_controls(rtd->platform,
+		fe_loopback_stream_cmd_config_control,
+		ARRAY_SIZE(fe_loopback_stream_cmd_config_control));
+	if (ret < 0)
+		pr_err("%s: failed to add ctl %s. err = %d\n",
+			__func__, mixer_str, ret);
+
+	kfree(mixer_str);
+done:
+	return ret;
+}
+
+static int msm_transcode_stream_callback_control(
+			struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name = DSP_STREAM_CALLBACK;
+	const char *deviceNo = "NN";
+	char *mixer_str = NULL;
+	int ctl_len = 0, ret = 0;
+	struct snd_kcontrol *kctl;
+
+	struct snd_kcontrol_new fe_loopback_callback_config_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_adsp_stream_callback_info,
+		.get = msm_adsp_stream_callback_get,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s: rtd is  NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+	fe_loopback_callback_config_control[0].name = mixer_str;
+	fe_loopback_callback_config_control[0].private_value =
+					rtd->dai_link->be_id;
+	pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+	ret = snd_soc_add_platform_controls(rtd->platform,
+			fe_loopback_callback_config_control,
+			ARRAY_SIZE(fe_loopback_callback_config_control));
+	if (ret < 0) {
+		pr_err("%s: failed to add ctl %s. err = %d\n",
+			__func__, mixer_str, ret);
+		ret = -EINVAL;
+		goto free_mixer_str;
+	}
+
+	kctl = snd_soc_card_get_kcontrol(rtd->card, mixer_str);
+	if (!kctl) {
+		pr_err("%s: failed to get kctl %s.\n", __func__, mixer_str);
+		ret = -EINVAL;
+		goto free_mixer_str;
+	}
+
+	kctl->private_data = NULL;
+free_mixer_str:
+	kfree(mixer_str);
+done:
+	return ret;
+}
+
+static int msm_transcode_add_shm_ion_fd_cmd_control(
+					struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name = "Playback ION FD";
+	const char *deviceNo = "NN";
+	char *mixer_str = NULL;
+	int ctl_len = 0, ret = 0;
+	struct snd_kcontrol_new fe_ion_fd_config_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_adsp_stream_cmd_info,
+		.put = msm_transcode_shm_ion_fd_map_put,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s NULL rtd\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+	fe_ion_fd_config_control[0].name = mixer_str;
+	fe_ion_fd_config_control[0].private_value = rtd->dai_link->be_id;
+	pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+	ret = snd_soc_add_platform_controls(rtd->platform,
+				fe_ion_fd_config_control,
+				ARRAY_SIZE(fe_ion_fd_config_control));
+	if (ret < 0)
+		pr_err("%s: failed to add ctl %s\n", __func__, mixer_str);
+
+	kfree(mixer_str);
+done:
+	return ret;
+}
+
+static int msm_transcode_add_lib_ion_fd_cmd_control(
+					struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name = "Playback ION LIB FD";
+	const char *deviceNo = "NN";
+	char *mixer_str = NULL;
+	int ctl_len = 0, ret = 0;
+	struct snd_kcontrol_new fe_ion_fd_config_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_adsp_stream_cmd_info,
+		.put = msm_transcode_lib_ion_fd_map_put,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s NULL rtd\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+	fe_ion_fd_config_control[0].name = mixer_str;
+	fe_ion_fd_config_control[0].private_value = rtd->dai_link->be_id;
+	pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+	ret = snd_soc_add_platform_controls(rtd->platform,
+				fe_ion_fd_config_control,
+				ARRAY_SIZE(fe_ion_fd_config_control));
+	if (ret < 0)
+		pr_err("%s: failed to add ctl %s\n", __func__, mixer_str);
+
+	kfree(mixer_str);
+done:
+	return ret;
+}
+
+static int msm_transcode_add_event_ack_cmd_control(
+					struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name = "Playback Event Ack";
+	const char *deviceNo = "NN";
+	char *mixer_str = NULL;
+	int ctl_len = 0, ret = 0;
+	struct snd_kcontrol_new fe_event_ack_config_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_adsp_stream_cmd_info,
+		.put = msm_transcode_rtic_event_ack_put,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s NULL rtd\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+	fe_event_ack_config_control[0].name = mixer_str;
+	fe_event_ack_config_control[0].private_value = rtd->dai_link->be_id;
+	pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+	ret = snd_soc_add_platform_controls(rtd->platform,
+				fe_event_ack_config_control,
+				ARRAY_SIZE(fe_event_ack_config_control));
+	if (ret < 0)
+		pr_err("%s: failed to add ctl %s\n", __func__, mixer_str);
+
+	kfree(mixer_str);
+done:
+	return ret;
+}
+
+static int msm_transcode_app_type_cfg_info(struct snd_kcontrol *kcontrol,
+				       struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	uinfo->count = 5;
+	uinfo->value.integer.min = 0;
+	uinfo->value.integer.max = 0xFFFFFFFF;
+	return 0;
+}
+
+static int msm_transcode_add_app_type_cfg_control(
+			struct snd_soc_pcm_runtime *rtd)
+{
+	char mixer_str[32];
+	int rc = 0;
+	struct snd_kcontrol_new fe_app_type_cfg_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_transcode_app_type_cfg_info,
+		.put = msm_transcode_playback_app_type_cfg_put,
+		.get = msm_transcode_playback_app_type_cfg_get,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s NULL rtd\n", __func__);
+
+		return -EINVAL;
+	}
+
+	if (rtd->compr->direction == SND_COMPRESS_PLAYBACK) {
+
+		snprintf(mixer_str, sizeof(mixer_str),
+			"Audio Stream %d App Type Cfg",
+			 rtd->pcm->device);
+
+		fe_app_type_cfg_control[0].name = mixer_str;
+		fe_app_type_cfg_control[0].private_value = rtd->dai_link->be_id;
+
+		fe_app_type_cfg_control[0].put =
+				msm_transcode_playback_app_type_cfg_put;
+		fe_app_type_cfg_control[0].get =
+				msm_transcode_playback_app_type_cfg_get;
+
+		pr_debug("Registering new mixer ctl %s", mixer_str);
+		snd_soc_add_platform_controls(rtd->platform,
+					fe_app_type_cfg_control,
+					ARRAY_SIZE(fe_app_type_cfg_control));
+	}
+
+	return rc;
+}
+static int msm_transcode_volume_info(struct snd_kcontrol *kcontrol,
+				 struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	uinfo->count = 1;
+	uinfo->value.integer.min = 0;
+	uinfo->value.integer.max = TRANSCODE_LR_VOL_MAX_STEPS;
+	return 0;
+}
+
+static int msm_transcode_add_volume_control(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_kcontrol_new fe_volume_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Transcode Loopback Rx Volume",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_transcode_volume_info,
+		.get = msm_transcode_volume_get,
+		.put = msm_transcode_volume_put,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s NULL rtd\n", __func__);
+		return -EINVAL;
+	}
+	if (rtd->compr->direction == SND_COMPRESS_PLAYBACK) {
+		fe_volume_control[0].private_value = rtd->dai_link->be_id;
+		pr_debug("Registering new mixer ctl %s",
+			     fe_volume_control[0].name);
+		snd_soc_add_platform_controls(rtd->platform, fe_volume_control,
+						ARRAY_SIZE(fe_volume_control));
+	}
+	return 0;
+}
+
+static int msm_transcode_loopback_new(struct snd_soc_pcm_runtime *rtd)
+{
+	int rc;
+
+	rc = msm_transcode_stream_cmd_control(rtd);
+	if (rc)
+		pr_err("%s: ADSP Stream Cmd Control open failed\n", __func__);
+
+	rc = msm_transcode_stream_callback_control(rtd);
+	if (rc)
+		pr_err("%s: ADSP Stream callback Control open failed\n",
+			__func__);
+
+	rc = msm_transcode_add_shm_ion_fd_cmd_control(rtd);
+	if (rc)
+		pr_err("%s: Could not add transcode shm ion fd Control\n",
+			__func__);
+
+	rc = msm_transcode_add_lib_ion_fd_cmd_control(rtd);
+	if (rc)
+		pr_err("%s: Could not add transcode lib ion fd Control\n",
+			__func__);
+
+	rc = msm_transcode_add_event_ack_cmd_control(rtd);
+	if (rc)
+		pr_err("%s: Could not add transcode event ack Control\n",
+			__func__);
+
+	rc = msm_transcode_add_app_type_cfg_control(rtd);
+	if (rc)
+		pr_err("%s: Could not add Compr App Type Cfg Control\n",
+			__func__);
+
+	rc = msm_transcode_add_volume_control(rtd);
+	if (rc)
+		pr_err("%s: Could not add transcode volume Control\n",
+			__func__);
+
+	return 0;
+}
+
+static struct snd_compr_ops msm_transcode_loopback_ops = {
+	.open			= msm_transcode_loopback_open,
+	.free			= msm_transcode_loopback_free,
+	.trigger		= msm_transcode_loopback_trigger,
+	.set_params		= msm_transcode_loopback_set_params,
+	.get_caps		= msm_transcode_loopback_get_caps,
+	.set_metadata		= msm_transcode_loopback_set_metadata,
+};
+
+
+static int msm_transcode_loopback_probe(struct snd_soc_platform *platform)
+{
+	struct trans_loopback_pdata *pdata = NULL;
+
+	pr_debug("%s\n", __func__);
+	pdata = (struct trans_loopback_pdata *)
+			kzalloc(sizeof(struct trans_loopback_pdata),
+			GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	pdata->perf_mode = LOW_LATENCY_PCM_MODE;
+	snd_soc_platform_set_drvdata(platform, pdata);
+	return 0;
+}
+
+static int msm_transcode_loopback_remove(struct snd_soc_platform *platform)
+{
+	struct trans_loopback_pdata *pdata = NULL;
+
+	pdata = (struct trans_loopback_pdata *)
+			snd_soc_platform_get_drvdata(platform);
+	kfree(pdata);
+	return 0;
+}
+
+static struct snd_soc_platform_driver msm_soc_platform = {
+	.probe		= msm_transcode_loopback_probe,
+	.compr_ops	= &msm_transcode_loopback_ops,
+	.pcm_new	= msm_transcode_loopback_new,
+	.remove		= msm_transcode_loopback_remove,
+};
+
+static int msm_transcode_dev_probe(struct platform_device *pdev)
+{
+	return snd_soc_register_platform(&pdev->dev,
+					&msm_soc_platform);
+}
+
+static int msm_transcode_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_platform(&pdev->dev);
+	return 0;
+}
+
+static const struct of_device_id msm_transcode_loopback_dt_match[] = {
+	{.compatible = "qcom,msm-transcode-loopback"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, msm_transcode_loopback_dt_match);
+
+static struct platform_driver msm_transcode_loopback_driver = {
+	.driver = {
+		.name = "msm-transcode-loopback",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_transcode_loopback_dt_match,
+	},
+	.probe = msm_transcode_dev_probe,
+	.remove = msm_transcode_remove,
+};
+
+static int __init msm_soc_platform_init(void)
+{
+	memset(&transcode_info, 0, sizeof(struct msm_transcode_loopback));
+	mutex_init(&transcode_info.lock);
+	return platform_driver_register(&msm_transcode_loopback_driver);
+}
+module_init(msm_soc_platform_init);
+
+static void __exit msm_soc_platform_exit(void)
+{
+	mutex_destroy(&transcode_info.lock);
+	platform_driver_unregister(&msm_transcode_loopback_driver);
+}
+module_exit(msm_soc_platform_exit);
+
+MODULE_DESCRIPTION("Transcode loopback platform driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/q6adm.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/q6adm.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/q6adm.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/q6adm.c	2019-10-29 09:26:26.161227819 +0100
@@ -0,0 +1,4956 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/jiffies.h>
+#include <linux/uaccess.h>
+#include <linux/atomic.h>
+#include <linux/wait.h>
+#include <linux/qdsp6v2/apr.h>
+#include <sound/apr_audio-v2.h>
+#include <sound/q6adm-v2.h>
+#include <sound/q6audio-v2.h>
+#include <sound/q6afe-v2.h>
+#include <sound/audio_cal_utils.h>
+#include <sound/asound.h>
+#include "msm-dts-srs-tm-config.h"
+#include <sound/adsp_err.h>
+
+#define TIMEOUT_MS 1000
+
+#define RESET_COPP_ID 99
+#define INVALID_COPP_ID 0xFF
+/* Used for inband payload copy, max size is 4k */
+/* 2 is to account for module & param ID in payload */
+#define ADM_GET_PARAMETER_LENGTH  (4096 - APR_HDR_SIZE - 2 * sizeof(uint32_t))
+
+#define ULL_SUPPORTED_BITS_PER_SAMPLE 16
+#define ULL_SUPPORTED_SAMPLE_RATE 48000
+
+#ifndef CONFIG_DOLBY_DAP
+#undef DOLBY_ADM_COPP_TOPOLOGY_ID
+#define DOLBY_ADM_COPP_TOPOLOGY_ID 0xFFFFFFFE
+#endif
+
+#ifndef CONFIG_DOLBY_DS2
+#undef DS2_ADM_COPP_TOPOLOGY_ID
+#define DS2_ADM_COPP_TOPOLOGY_ID 0xFFFFFFFF
+#endif
+
+struct adm_copp {
+
+	atomic_t id[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
+	atomic_t cnt[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
+	atomic_t topology[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
+	atomic_t mode[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
+	atomic_t stat[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
+	atomic_t rate[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
+	atomic_t bit_width[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
+	atomic_t channels[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
+	atomic_t app_type[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
+	atomic_t acdb_id[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
+	wait_queue_head_t wait[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
+	wait_queue_head_t adm_delay_wait[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
+	atomic_t adm_delay_stat[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
+	uint32_t adm_delay[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
+	unsigned long adm_status[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
+};
+
+struct source_tracking_data {
+	struct ion_client *ion_client;
+	struct ion_handle *ion_handle;
+	struct param_outband memmap;
+	int apr_cmd_status;
+};
+
+struct adm_ctl {
+	void *apr;
+
+	struct adm_copp copp;
+
+	atomic_t matrix_map_stat;
+	wait_queue_head_t matrix_map_wait;
+
+	atomic_t adm_stat;
+	wait_queue_head_t adm_wait;
+
+	struct cal_type_data *cal_data[ADM_MAX_CAL_TYPES];
+
+	atomic_t mem_map_handles[ADM_MEM_MAP_INDEX_MAX];
+	atomic_t mem_map_index;
+
+	struct param_outband outband_memmap;
+	struct source_tracking_data sourceTrackingData;
+
+	int set_custom_topology;
+	int ec_ref_rx;
+	int num_ec_ref_rx_chans;
+	int ec_ref_rx_bit_width;
+	int ec_ref_rx_sampling_rate;
+};
+
+static struct adm_ctl			this_adm;
+
+struct adm_multi_ch_map {
+	bool set_channel_map;
+	char channel_mapping[PCM_FORMAT_MAX_NUM_CHANNEL];
+};
+
+#define ADM_MCH_MAP_IDX_PLAYBACK 0
+#define ADM_MCH_MAP_IDX_REC 1
+static struct adm_multi_ch_map multi_ch_maps[2] = {
+							{ false,
+							{0, 0, 0, 0, 0, 0, 0, 0}
+							},
+							{ false,
+							{0, 0, 0, 0, 0, 0, 0, 0}
+							}
+};
+
+static int adm_get_parameters[MAX_COPPS_PER_PORT * ADM_GET_PARAMETER_LENGTH];
+static int adm_module_topo_list[
+	MAX_COPPS_PER_PORT * ADM_GET_TOPO_MODULE_LIST_LENGTH];
+
+int adm_validate_and_get_port_index(int port_id)
+{
+	int index;
+	int ret;
+
+	ret = q6audio_validate_port(port_id);
+	if (ret < 0) {
+		pr_err("%s: port validation failed id 0x%x ret %d\n",
+			__func__, port_id, ret);
+		return -EINVAL;
+	}
+
+	index = afe_get_port_index(port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: Invalid port idx %d port_id 0x%x\n",
+			__func__, index,
+			port_id);
+		return -EINVAL;
+	}
+	pr_debug("%s: port_idx- %d\n", __func__, index);
+	return index;
+}
+
+int adm_get_default_copp_idx(int port_id)
+{
+	int port_idx = adm_validate_and_get_port_index(port_id), idx;
+
+	if (port_idx < 0) {
+		pr_err("%s: Invalid port id: 0x%x", __func__, port_id);
+		return -EINVAL;
+	}
+	pr_debug("%s: port_idx:%d\n", __func__, port_idx);
+	for (idx = 0; idx < MAX_COPPS_PER_PORT; idx++) {
+		if (atomic_read(&this_adm.copp.id[port_idx][idx]) !=
+			RESET_COPP_ID)
+			return idx;
+	}
+	return -EINVAL;
+}
+
+int adm_get_topology_for_port_from_copp_id(int port_id, int copp_id)
+{
+	int port_idx = adm_validate_and_get_port_index(port_id), idx;
+	if (port_idx < 0) {
+		pr_err("%s: Invalid port id: 0x%x", __func__, port_id);
+		return 0;
+	}
+	for (idx = 0; idx < MAX_COPPS_PER_PORT; idx++)
+		if (atomic_read(&this_adm.copp.id[port_idx][idx]) == copp_id)
+			return atomic_read(&this_adm.copp.topology[port_idx]
+								  [idx]);
+	pr_err("%s: Invalid copp_id %d port_id 0x%x\n",
+		__func__, copp_id, port_id);
+	return 0;
+}
+
+int adm_get_topology_for_port_copp_idx(int port_id, int copp_idx)
+{
+	int port_idx = adm_validate_and_get_port_index(port_id);
+	if (port_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
+		pr_err("%s: Invalid port: 0x%x copp id: 0x%x",
+				__func__, port_id, copp_idx);
+		return 0;
+	}
+	return atomic_read(&this_adm.copp.topology[port_idx][copp_idx]);
+}
+
+int adm_get_indexes_from_copp_id(int copp_id, int *copp_idx, int *port_idx)
+{
+	int p_idx, c_idx;
+	for (p_idx = 0; p_idx < AFE_MAX_PORTS; p_idx++) {
+		for (c_idx = 0; c_idx < MAX_COPPS_PER_PORT; c_idx++) {
+			if (atomic_read(&this_adm.copp.id[p_idx][c_idx])
+								== copp_id) {
+				if (copp_idx != NULL)
+					*copp_idx = c_idx;
+				if (port_idx != NULL)
+					*port_idx = p_idx;
+				return 0;
+			}
+		}
+	}
+	return -EINVAL;
+}
+
+static int adm_get_copp_id(int port_idx, int copp_idx)
+{
+	pr_debug("%s: port_idx:%d copp_idx:%d\n", __func__, port_idx, copp_idx);
+
+	if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
+		pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
+		return -EINVAL;
+	}
+	return atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
+}
+
+static int adm_get_idx_if_copp_exists(int port_idx, int topology, int mode,
+				 int rate, int bit_width, int app_type)
+{
+	int idx;
+
+	pr_debug("%s: port_idx-%d, topology-0x%x, mode-%d, rate-%d, bit_width-%d\n",
+		 __func__, port_idx, topology, mode, rate, bit_width);
+
+	for (idx = 0; idx < MAX_COPPS_PER_PORT; idx++)
+		if ((topology ==
+			atomic_read(&this_adm.copp.topology[port_idx][idx])) &&
+		    (mode == atomic_read(&this_adm.copp.mode[port_idx][idx])) &&
+		    (rate == atomic_read(&this_adm.copp.rate[port_idx][idx])) &&
+		    (bit_width ==
+			atomic_read(&this_adm.copp.bit_width[port_idx][idx])) &&
+		    (app_type ==
+			atomic_read(&this_adm.copp.app_type[port_idx][idx])))
+			return idx;
+	return -EINVAL;
+}
+
+static int adm_get_next_available_copp(int port_idx)
+{
+	int idx;
+
+	pr_debug("%s:\n", __func__);
+	for (idx = 0; idx < MAX_COPPS_PER_PORT; idx++) {
+		pr_debug("%s: copp_id:0x%x port_idx:%d idx:%d\n", __func__,
+			 atomic_read(&this_adm.copp.id[port_idx][idx]),
+			 port_idx, idx);
+		if (atomic_read(&this_adm.copp.id[port_idx][idx]) ==
+								RESET_COPP_ID)
+			break;
+	}
+	return idx;
+}
+
+int srs_trumedia_open(int port_id, int copp_idx, __s32 srs_tech_id,
+		      void *srs_params)
+{
+	struct adm_cmd_set_pp_params_inband_v5 *adm_params = NULL;
+	struct adm_cmd_set_pp_params_v5 *adm_params_ = NULL;
+	__s32 sz = 0, param_id, module_id = SRS_TRUMEDIA_MODULE_ID, outband = 0;
+	int ret = 0, port_idx;
+
+	pr_debug("SRS - %s", __func__);
+
+	port_id = afe_convert_virtual_to_portid(port_id);
+	port_idx = adm_validate_and_get_port_index(port_id);
+	if (port_idx < 0) {
+		pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
+		return -EINVAL;
+	}
+	switch (srs_tech_id) {
+	case SRS_ID_GLOBAL: {
+		struct srs_trumedia_params_GLOBAL *glb_params = NULL;
+		sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
+			sizeof(struct srs_trumedia_params_GLOBAL);
+		adm_params = kzalloc(sz, GFP_KERNEL);
+		if (!adm_params) {
+			pr_err("%s, adm params memory alloc failed\n",
+				__func__);
+			return -ENOMEM;
+		}
+		adm_params->payload_size =
+			sizeof(struct srs_trumedia_params_GLOBAL) +
+			sizeof(struct adm_param_data_v5);
+		param_id = SRS_TRUMEDIA_PARAMS;
+		adm_params->params.param_size =
+				sizeof(struct srs_trumedia_params_GLOBAL);
+		glb_params = (struct srs_trumedia_params_GLOBAL *)
+			((u8 *)adm_params +
+			sizeof(struct adm_cmd_set_pp_params_inband_v5));
+		memcpy(glb_params, srs_params,
+			sizeof(struct srs_trumedia_params_GLOBAL));
+		break;
+	}
+	case SRS_ID_WOWHD: {
+		struct srs_trumedia_params_WOWHD *whd_params = NULL;
+		sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
+			sizeof(struct srs_trumedia_params_WOWHD);
+		adm_params = kzalloc(sz, GFP_KERNEL);
+		if (!adm_params) {
+			pr_err("%s, adm params memory alloc failed\n",
+				__func__);
+			return -ENOMEM;
+		}
+		adm_params->payload_size =
+			sizeof(struct srs_trumedia_params_WOWHD) +
+			sizeof(struct adm_param_data_v5);
+		param_id = SRS_TRUMEDIA_PARAMS_WOWHD;
+		adm_params->params.param_size =
+				sizeof(struct srs_trumedia_params_WOWHD);
+		whd_params = (struct srs_trumedia_params_WOWHD *)
+			((u8 *)adm_params +
+			sizeof(struct adm_cmd_set_pp_params_inband_v5));
+		memcpy(whd_params, srs_params,
+				sizeof(struct srs_trumedia_params_WOWHD));
+		break;
+	}
+	case SRS_ID_CSHP: {
+		struct srs_trumedia_params_CSHP *chp_params = NULL;
+		sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
+			sizeof(struct srs_trumedia_params_CSHP);
+		adm_params = kzalloc(sz, GFP_KERNEL);
+		if (!adm_params) {
+			pr_err("%s, adm params memory alloc failed\n",
+				__func__);
+			return -ENOMEM;
+		}
+		adm_params->payload_size =
+			sizeof(struct srs_trumedia_params_CSHP) +
+			sizeof(struct adm_param_data_v5);
+		param_id = SRS_TRUMEDIA_PARAMS_CSHP;
+		adm_params->params.param_size =
+				sizeof(struct srs_trumedia_params_CSHP);
+		chp_params = (struct srs_trumedia_params_CSHP *)
+			((u8 *)adm_params +
+			sizeof(struct adm_cmd_set_pp_params_inband_v5));
+		memcpy(chp_params, srs_params,
+				sizeof(struct srs_trumedia_params_CSHP));
+		break;
+	}
+	case SRS_ID_HPF: {
+		struct srs_trumedia_params_HPF *hpf_params = NULL;
+		sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
+			sizeof(struct srs_trumedia_params_HPF);
+		adm_params = kzalloc(sz, GFP_KERNEL);
+		if (!adm_params) {
+			pr_err("%s, adm params memory alloc failed\n",
+				__func__);
+			return -ENOMEM;
+		}
+		adm_params->payload_size =
+			sizeof(struct srs_trumedia_params_HPF) +
+			sizeof(struct adm_param_data_v5);
+		param_id = SRS_TRUMEDIA_PARAMS_HPF;
+		adm_params->params.param_size =
+				sizeof(struct srs_trumedia_params_HPF);
+		hpf_params = (struct srs_trumedia_params_HPF *)
+			((u8 *)adm_params +
+			sizeof(struct adm_cmd_set_pp_params_inband_v5));
+		memcpy(hpf_params, srs_params,
+			sizeof(struct srs_trumedia_params_HPF));
+		break;
+	}
+	case SRS_ID_AEQ: {
+		int *update_params_ptr = (int *)this_adm.outband_memmap.kvaddr;
+		outband = 1;
+		adm_params = kzalloc(sizeof(struct adm_cmd_set_pp_params_v5),
+				     GFP_KERNEL);
+		adm_params_ = (struct adm_cmd_set_pp_params_v5 *)adm_params;
+		if (!adm_params_) {
+			pr_err("%s, adm params memory alloc failed\n",
+				__func__);
+			return -ENOMEM;
+		}
+
+		sz = sizeof(struct srs_trumedia_params_AEQ);
+		if (update_params_ptr == NULL) {
+			pr_err("ADM_SRS_TRUMEDIA - %s: null memmap for AEQ params\n",
+				__func__);
+			ret = -EINVAL;
+			goto fail_cmd;
+		}
+		param_id = SRS_TRUMEDIA_PARAMS_AEQ;
+		*update_params_ptr++ = module_id;
+		*update_params_ptr++ = param_id;
+		*update_params_ptr++ = sz;
+		memcpy(update_params_ptr, srs_params, sz);
+
+		adm_params_->payload_size = sz + 12;
+
+		break;
+	}
+	case SRS_ID_HL: {
+		struct srs_trumedia_params_HL *hl_params = NULL;
+		sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
+			sizeof(struct srs_trumedia_params_HL);
+		adm_params = kzalloc(sz, GFP_KERNEL);
+		if (!adm_params) {
+			pr_err("%s, adm params memory alloc failed\n",
+				__func__);
+			return -ENOMEM;
+		}
+		adm_params->payload_size =
+			sizeof(struct srs_trumedia_params_HL) +
+			sizeof(struct adm_param_data_v5);
+		param_id = SRS_TRUMEDIA_PARAMS_HL;
+		adm_params->params.param_size =
+			sizeof(struct srs_trumedia_params_HL);
+		hl_params = (struct srs_trumedia_params_HL *)
+			((u8 *)adm_params +
+			sizeof(struct adm_cmd_set_pp_params_inband_v5));
+		memcpy(hl_params, srs_params,
+				sizeof(struct srs_trumedia_params_HL));
+		break;
+	}
+	case SRS_ID_GEQ: {
+		struct srs_trumedia_params_GEQ *geq_params = NULL;
+		sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
+			sizeof(struct srs_trumedia_params_GEQ);
+		adm_params = kzalloc(sz, GFP_KERNEL);
+		if (!adm_params) {
+			pr_err("%s, adm params memory alloc failed\n",
+				__func__);
+			return -ENOMEM;
+		}
+		adm_params->payload_size =
+			sizeof(struct srs_trumedia_params_GEQ) +
+			sizeof(struct adm_param_data_v5);
+		param_id = SRS_TRUMEDIA_PARAMS_GEQ;
+		adm_params->params.param_size =
+			sizeof(struct srs_trumedia_params_GEQ);
+		geq_params = (struct srs_trumedia_params_GEQ *)
+			((u8 *)adm_params +
+			sizeof(struct adm_cmd_set_pp_params_inband_v5));
+		memcpy(geq_params, srs_params,
+			sizeof(struct srs_trumedia_params_GEQ));
+		pr_debug("SRS - %s: GEQ params prepared\n", __func__);
+		break;
+	}
+	default:
+		goto fail_cmd;
+	}
+
+	adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	adm_params->hdr.src_svc = APR_SVC_ADM;
+	adm_params->hdr.src_domain = APR_DOMAIN_APPS;
+	adm_params->hdr.src_port = port_id;
+	adm_params->hdr.dest_svc = APR_SVC_ADM;
+	adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
+	adm_params->hdr.dest_port =
+			atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
+	adm_params->hdr.token = port_idx << 16 | copp_idx;
+	adm_params->hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
+	if (outband && this_adm.outband_memmap.paddr) {
+		adm_params->hdr.pkt_size =
+					sizeof(struct adm_cmd_set_pp_params_v5);
+		adm_params->payload_addr_lsw = lower_32_bits(
+						this_adm.outband_memmap.paddr);
+		adm_params->payload_addr_msw = msm_audio_populate_upper_32_bits(
+						this_adm.outband_memmap.paddr);
+		adm_params->mem_map_handle = atomic_read(&this_adm.
+					mem_map_handles[ADM_SRS_TRUMEDIA]);
+	} else {
+		adm_params->hdr.pkt_size = sz;
+		adm_params->payload_addr_lsw = 0;
+		adm_params->payload_addr_msw = 0;
+		adm_params->mem_map_handle = 0;
+
+		adm_params->params.module_id = module_id;
+		adm_params->params.param_id = param_id;
+		adm_params->params.reserved = 0;
+	}
+
+	pr_debug("SRS - %s: Command was sent now check Q6 - port id = %d, size %d, module id %x, param id %x.\n",
+			__func__, adm_params->hdr.dest_port,
+			adm_params->payload_size, module_id, param_id);
+
+	atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
+	ret = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
+	if (ret < 0) {
+		pr_err("SRS - %s: ADM enable for port %d failed\n", __func__,
+			port_id);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	/* Wait for the callback with copp id */
+	ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+			atomic_read(&this_adm.copp.stat
+			[port_idx][copp_idx]) >= 0,
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: SRS set params timed out port = %d\n",
+			__func__, port_id);
+		ret = -EINVAL;
+		goto fail_cmd;
+	} else if (atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx]) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx])));
+		ret = adsp_err_get_lnx_err_code(
+				atomic_read(&this_adm.copp.stat
+					[port_idx][copp_idx]));
+		goto fail_cmd;
+	}
+
+fail_cmd:
+	kfree(adm_params);
+	return ret;
+}
+
+static int adm_populate_channel_weight(u16 *ptr,
+					struct msm_pcm_channel_mixer *ch_mixer,
+					int channel_index)
+{
+	u16 i, j, start_index = 0;
+
+	if (channel_index > ch_mixer->output_channel) {
+		pr_err("%s: channel index %d is larger than output_channel %d\n",
+			 __func__, channel_index, ch_mixer->output_channel);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < ch_mixer->output_channel; i++) {
+		pr_debug("%s: weight for output %d:", __func__, i);
+		for (j = 0; j < ADM_MAX_CHANNELS; j++)
+			pr_debug(" %d",
+				ch_mixer->channel_weight[i][j]);
+		pr_debug("\n");
+	}
+
+	for (i = 0; i < channel_index; ++i)
+		start_index += ch_mixer->input_channels[i];
+
+	for (i = 0; i < ch_mixer->output_channel; ++i) {
+		for (j = start_index;
+			j < start_index +
+			ch_mixer->input_channels[channel_index]; j++) {
+			*ptr = ch_mixer->channel_weight[i][j];
+			 pr_debug("%s: ptr[%d][%d] = %d\n",
+				__func__, i, j, *ptr);
+			 ptr++;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * adm_programable_channel_mixer
+ *
+ * Receives port_id, copp_idx, session_id, session_type, ch_mixer
+ * and channel_index to send ADM command to mix COPP data.
+ *
+ * port_id - Passed value, port_id for which backend is wanted
+ * copp_idx - Passed value, copp_idx for which COPP is wanted
+ * session_id - Passed value, session_id for which session is needed
+ * session_type - Passed value, session_type for RX or TX
+ * ch_mixer - Passed value, ch_mixer for which channel mixer config is needed
+ * channel_index - Passed value, channel_index for which channel is needed
+ */
+int adm_programable_channel_mixer(int port_id, int copp_idx, int session_id,
+				  int session_type,
+				  struct msm_pcm_channel_mixer *ch_mixer,
+				  int channel_index)
+{
+	struct adm_cmd_set_pspd_mtmx_strtr_params_v5 *adm_params = NULL;
+	struct adm_param_data_v5 data_v5 = {0,};
+	int ret = 0, port_idx, sz = 0, param_size = 0;
+	u16 *adm_pspd_params;
+	u16 *ptr;
+	int index = 0;
+
+	pr_debug("%s: port_id = %d\n", __func__, port_id);
+	port_id = afe_convert_virtual_to_portid(port_id);
+	port_idx = adm_validate_and_get_port_index(port_id);
+	if (port_idx < 0) {
+		pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
+		return -EINVAL;
+	}
+	/*
+	 * First 8 bytes are 4 bytes as rule number, 2 bytes as output
+	 * channel and 2 bytes as input channel.
+	 * 2 * ch_mixer->output_channel means output channel mapping.
+	 * 2 * ch_mixer->input_channels[channel_index]) means input
+	 * channel mapping.
+	 * 2 * ch_mixer->input_channels[channel_index] *
+	 * ch_mixer->output_channel) means the channel mixer weighting
+	 * coefficients.
+	 * param_size needs to be a multiple of 4 bytes.
+	 */
+
+	param_size = 2 * (4 + ch_mixer->output_channel +
+			ch_mixer->input_channels[channel_index] +
+			ch_mixer->input_channels[channel_index] *
+			ch_mixer->output_channel);
+	roundup(param_size, 4);
+
+	sz = sizeof(struct adm_cmd_set_pspd_mtmx_strtr_params_v5) +
+			sizeof(struct default_chmixer_param_id_coeff) +
+			sizeof(struct adm_param_data_v5) + param_size;
+	pr_debug("%s: sz = %d\n", __func__, sz);
+	adm_params = kzalloc(sz, GFP_KERNEL);
+	if (!adm_params)
+		return -ENOMEM;
+
+	adm_params->payload_addr_lsw = 0;
+	adm_params->payload_addr_msw = 0;
+	adm_params->mem_map_handle = 0;
+	adm_params->direction = session_type;
+	adm_params->sessionid = session_id;
+	pr_debug("%s: copp_id = %d, session id  %d\n", __func__,
+		atomic_read(&this_adm.copp.id[port_idx][copp_idx]),
+			session_id);
+	adm_params->deviceid = atomic_read(
+				&this_adm.copp.id[port_idx][copp_idx]);
+	adm_params->reserved = 0;
+
+	data_v5.module_id = MTMX_MODULE_ID_DEFAULT_CHMIXER;
+	data_v5.param_id =  DEFAULT_CHMIXER_PARAM_ID_COEFF;
+	data_v5.reserved = 0;
+	data_v5.param_size = param_size;
+	adm_params->payload_size =
+			sizeof(struct default_chmixer_param_id_coeff) +
+			sizeof(struct adm_param_data_v5) + data_v5.param_size;
+	adm_pspd_params = (u16 *)((u8 *)adm_params +
+			sizeof(struct adm_cmd_set_pspd_mtmx_strtr_params_v5));
+	memcpy(adm_pspd_params, &data_v5, sizeof(data_v5));
+
+	adm_pspd_params = (u16 *)((u8 *)adm_params +
+			sizeof(struct adm_cmd_set_pspd_mtmx_strtr_params_v5)
+			+ sizeof(data_v5));
+
+	adm_pspd_params[0] = ch_mixer->rule;
+	adm_pspd_params[2] = ch_mixer->output_channel;
+	adm_pspd_params[3] = ch_mixer->input_channels[channel_index];
+	index = 4;
+
+	if (ch_mixer->output_channel == 1) {
+		adm_pspd_params[index] = PCM_CHANNEL_FC;
+	} else if (ch_mixer->output_channel == 2) {
+		adm_pspd_params[index] = PCM_CHANNEL_FL;
+		adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
+	} else if (ch_mixer->output_channel == 3) {
+		adm_pspd_params[index] = PCM_CHANNEL_FL;
+		adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
+		adm_pspd_params[index + 2] = PCM_CHANNEL_FC;
+	} else if (ch_mixer->output_channel == 4) {
+		adm_pspd_params[index] = PCM_CHANNEL_FL;
+		adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
+		adm_pspd_params[index + 2] = PCM_CHANNEL_LS;
+		adm_pspd_params[index + 3] = PCM_CHANNEL_RS;
+	} else if (ch_mixer->output_channel == 5) {
+		adm_pspd_params[index] = PCM_CHANNEL_FL;
+		adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
+		adm_pspd_params[index + 2] = PCM_CHANNEL_FC;
+		adm_pspd_params[index + 3] = PCM_CHANNEL_LS;
+		adm_pspd_params[index + 4] = PCM_CHANNEL_RS;
+	} else if (ch_mixer->output_channel == 6) {
+		adm_pspd_params[index] = PCM_CHANNEL_FL;
+		adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
+		adm_pspd_params[index + 2] = PCM_CHANNEL_LFE;
+		adm_pspd_params[index + 3] = PCM_CHANNEL_FC;
+		adm_pspd_params[index + 4] = PCM_CHANNEL_LS;
+		adm_pspd_params[index + 5] = PCM_CHANNEL_RS;
+	} else if (ch_mixer->output_channel == 8) {
+		adm_pspd_params[index] = PCM_CHANNEL_FL;
+		adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
+		adm_pspd_params[index + 2] = PCM_CHANNEL_LFE;
+		adm_pspd_params[index + 3] = PCM_CHANNEL_FC;
+		adm_pspd_params[index + 4] = PCM_CHANNEL_LS;
+		adm_pspd_params[index + 5] = PCM_CHANNEL_RS;
+		adm_pspd_params[index + 6] = PCM_CHANNEL_LB;
+		adm_pspd_params[index + 7] = PCM_CHANNEL_RB;
+	}
+
+	index = index + ch_mixer->output_channel;
+	if (ch_mixer->input_channels[channel_index] == 1) {
+		adm_pspd_params[index] = PCM_CHANNEL_FC;
+	} else if (ch_mixer->input_channels[channel_index] == 2) {
+		adm_pspd_params[index] = PCM_CHANNEL_FL;
+		adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
+	} else if (ch_mixer->input_channels[channel_index] == 3) {
+		adm_pspd_params[index] = PCM_CHANNEL_FL;
+		adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
+		adm_pspd_params[index + 2] = PCM_CHANNEL_FC;
+	} else if (ch_mixer->input_channels[channel_index] == 4) {
+		adm_pspd_params[index] = PCM_CHANNEL_FL;
+		adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
+		adm_pspd_params[index + 2] = PCM_CHANNEL_LS;
+		adm_pspd_params[index + 3] = PCM_CHANNEL_RS;
+	} else if (ch_mixer->input_channels[channel_index] == 5) {
+		adm_pspd_params[index] = PCM_CHANNEL_FL;
+		adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
+		adm_pspd_params[index + 2] = PCM_CHANNEL_FC;
+		adm_pspd_params[index + 3] = PCM_CHANNEL_LS;
+		adm_pspd_params[index + 4] = PCM_CHANNEL_RS;
+	} else if (ch_mixer->input_channels[channel_index] == 6) {
+		adm_pspd_params[index] = PCM_CHANNEL_FL;
+		adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
+		adm_pspd_params[index + 2] = PCM_CHANNEL_LFE;
+		adm_pspd_params[index + 3] = PCM_CHANNEL_FC;
+		adm_pspd_params[index + 4] = PCM_CHANNEL_LS;
+		adm_pspd_params[index + 5] = PCM_CHANNEL_RS;
+	} else if (ch_mixer->input_channels[channel_index] == 8) {
+		adm_pspd_params[index] = PCM_CHANNEL_FL;
+		adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
+		adm_pspd_params[index + 2] = PCM_CHANNEL_LFE;
+		adm_pspd_params[index + 3] = PCM_CHANNEL_FC;
+		adm_pspd_params[index + 4] = PCM_CHANNEL_LS;
+		adm_pspd_params[index + 5] = PCM_CHANNEL_RS;
+		adm_pspd_params[index + 6] = PCM_CHANNEL_LB;
+		adm_pspd_params[index + 7] = PCM_CHANNEL_RB;
+	}
+
+	index = index + ch_mixer->input_channels[channel_index];
+	ret = adm_populate_channel_weight(&adm_pspd_params[index],
+					ch_mixer, channel_index);
+	if (ret) {
+		pr_err("%s: fail to get channel weight with error %d\n",
+			__func__, ret);
+		goto fail_cmd;
+	}
+
+	adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	adm_params->hdr.src_svc = APR_SVC_ADM;
+	adm_params->hdr.src_domain = APR_DOMAIN_APPS;
+	adm_params->hdr.src_port = port_id;
+	adm_params->hdr.dest_svc = APR_SVC_ADM;
+	adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
+	adm_params->hdr.dest_port =
+			atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
+	adm_params->hdr.token = port_idx << 16 | copp_idx;
+	adm_params->hdr.opcode = ADM_CMD_SET_PSPD_MTMX_STRTR_PARAMS_V5;
+	adm_params->hdr.pkt_size = sz;
+	adm_params->payload_addr_lsw = 0;
+	adm_params->payload_addr_msw = 0;
+	adm_params->mem_map_handle = 0;
+	adm_params->reserved = 0;
+
+	ptr = (u16 *)adm_params;
+	for (index = 0; index < (sz / 2); index++)
+		pr_debug("%s: adm_params[%d] = 0x%x\n",
+			__func__, index, (unsigned int)ptr[index]);
+
+	atomic_set(&this_adm.copp.stat[port_idx][copp_idx], 0);
+	ret = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
+	if (ret < 0) {
+		pr_err("%s: Set params failed port %d rc %d\n", __func__,
+			port_id, ret);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+			atomic_read(
+			&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: set params timed out port = %d\n",
+			__func__, port_id);
+		ret = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	ret = 0;
+fail_cmd:
+	kfree(adm_params);
+
+	return ret;
+}
+
+int adm_set_pspd_matrix_params(int port_id, int copp_idx,
+				unsigned int session_id, char *params,
+				uint32_t params_length)
+{
+	struct adm_cmd_set_pspd_mtmx_strtr_params_v5 *adm_params = NULL;
+	int sz, rc = 0, port_idx;
+
+	pr_debug("%s:\n", __func__);
+	port_id = afe_convert_virtual_to_portid(port_id);
+	port_idx = adm_validate_and_get_port_index(port_id);
+	if (port_idx < 0) {
+		pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
+		return -EINVAL;
+	}
+
+	sz = sizeof(struct adm_cmd_set_pspd_mtmx_strtr_params_v5) +
+		params_length;
+	adm_params = kzalloc(sz, GFP_KERNEL);
+	if (!adm_params) {
+		pr_err("%s, adm params memory alloc failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	memcpy(((u8 *)adm_params +
+		sizeof(struct adm_cmd_set_pspd_mtmx_strtr_params_v5)),
+		params, params_length);
+	adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+					APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	adm_params->hdr.pkt_size = sz;
+	adm_params->hdr.src_svc = APR_SVC_ADM;
+	adm_params->hdr.src_domain = APR_DOMAIN_APPS;
+	adm_params->hdr.src_port = port_id;
+	adm_params->hdr.dest_svc = APR_SVC_ADM;
+	adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
+	adm_params->hdr.dest_port = 0; /* Ignored */;
+	adm_params->hdr.token = port_idx << 16 | copp_idx;
+	adm_params->hdr.opcode = ADM_CMD_SET_PSPD_MTMX_STRTR_PARAMS_V5;
+	adm_params->payload_addr_lsw = 0;
+	adm_params->payload_addr_msw = 0;
+	adm_params->mem_map_handle = 0;
+	adm_params->payload_size = params_length;
+	/* direction RX as 0 */
+	adm_params->direction = ADM_MATRIX_ID_AUDIO_RX;
+	/* session id for this cmd to be applied on */
+	adm_params->sessionid = session_id;
+	adm_params->deviceid =
+			atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
+	adm_params->reserved = 0;
+	pr_debug("%s: deviceid %d, session_id %d, src_port %d, dest_port %d\n",
+		__func__, adm_params->deviceid, adm_params->sessionid,
+		adm_params->hdr.src_port, adm_params->hdr.dest_port);
+	atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
+	rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
+	if (rc < 0) {
+		pr_err("%s: Set params failed port = 0x%x rc %d\n",
+			__func__, port_id, rc);
+		rc = -EINVAL;
+		goto set_stereo_to_custom_stereo_return;
+	}
+	/* Wait for the callback */
+	rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+				atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx]) >= 0,
+				msecs_to_jiffies(TIMEOUT_MS));
+	if (!rc) {
+		pr_err("%s: Set params timed out port = 0x%x\n", __func__,
+			port_id);
+		rc = -EINVAL;
+		goto set_stereo_to_custom_stereo_return;
+	} else if (atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx]) > 0) {
+		pr_err("%s: DSP returned error[%s]\n", __func__,
+			adsp_err_get_err_str(atomic_read(
+			&this_adm.copp.stat
+			[port_idx][copp_idx])));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&this_adm.copp.stat
+					[port_idx][copp_idx]));
+		goto set_stereo_to_custom_stereo_return;
+	}
+	rc = 0;
+set_stereo_to_custom_stereo_return:
+	kfree(adm_params);
+	return rc;
+}
+
+int adm_dolby_dap_send_params(int port_id, int copp_idx, char *params,
+			      uint32_t params_length)
+{
+	struct adm_cmd_set_pp_params_v5	*adm_params = NULL;
+	int sz, rc = 0;
+	int port_idx;
+
+	pr_debug("%s:\n", __func__);
+	port_id = afe_convert_virtual_to_portid(port_id);
+	port_idx = adm_validate_and_get_port_index(port_id);
+	if (port_idx < 0) {
+		pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
+		return -EINVAL;
+	}
+
+	sz = sizeof(struct adm_cmd_set_pp_params_v5) + params_length;
+	adm_params = kzalloc(sz, GFP_KERNEL);
+	if (!adm_params) {
+		pr_err("%s, adm params memory alloc failed", __func__);
+		return -ENOMEM;
+	}
+
+	memcpy(((u8 *)adm_params + sizeof(struct adm_cmd_set_pp_params_v5)),
+			params, params_length);
+	adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	adm_params->hdr.pkt_size = sz;
+	adm_params->hdr.src_svc = APR_SVC_ADM;
+	adm_params->hdr.src_domain = APR_DOMAIN_APPS;
+	adm_params->hdr.src_port = port_id;
+	adm_params->hdr.dest_svc = APR_SVC_ADM;
+	adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
+	adm_params->hdr.dest_port =
+			atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
+	adm_params->hdr.token = port_idx << 16 | copp_idx;
+	adm_params->hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
+	adm_params->payload_addr_lsw = 0;
+	adm_params->payload_addr_msw = 0;
+	adm_params->mem_map_handle = 0;
+	adm_params->payload_size = params_length;
+
+	atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
+	rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
+	if (rc < 0) {
+		pr_err("%s: Set params failed port = 0x%x rc %d\n",
+			__func__, port_id, rc);
+		rc = -EINVAL;
+		goto dolby_dap_send_param_return;
+	}
+	/* Wait for the callback */
+	rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+		atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
+		msecs_to_jiffies(TIMEOUT_MS));
+	if (!rc) {
+		pr_err("%s: Set params timed out port = 0x%x\n",
+			 __func__, port_id);
+		rc = -EINVAL;
+		goto dolby_dap_send_param_return;
+	} else if (atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx]) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx])));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&this_adm.copp.stat
+					[port_idx][copp_idx]));
+		goto dolby_dap_send_param_return;
+	}
+	rc = 0;
+dolby_dap_send_param_return:
+	kfree(adm_params);
+	return rc;
+}
+
+int adm_send_params_v5(int port_id, int copp_idx, char *params,
+			      uint32_t params_length)
+{
+	struct adm_cmd_set_pp_params_v5	*adm_params = NULL;
+	int rc = 0;
+	int sz, port_idx;
+
+	pr_debug("%s:\n", __func__);
+	port_id = afe_convert_virtual_to_portid(port_id);
+	port_idx = adm_validate_and_get_port_index(port_id);
+	if (port_idx < 0) {
+		pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
+		return -EINVAL;
+	}
+
+	sz = sizeof(struct adm_cmd_set_pp_params_v5) + params_length;
+	adm_params = kzalloc(sz, GFP_KERNEL);
+	if (!adm_params) {
+		pr_err("%s, adm params memory alloc failed", __func__);
+		return -ENOMEM;
+	}
+
+	memcpy(((u8 *)adm_params + sizeof(struct adm_cmd_set_pp_params_v5)),
+			params, params_length);
+	adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	adm_params->hdr.pkt_size = sz;
+	adm_params->hdr.src_svc = APR_SVC_ADM;
+	adm_params->hdr.src_domain = APR_DOMAIN_APPS;
+	adm_params->hdr.src_port = port_id;
+	adm_params->hdr.dest_svc = APR_SVC_ADM;
+	adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
+	adm_params->hdr.dest_port =
+			atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
+	adm_params->hdr.token = port_idx << 16 | copp_idx;
+	adm_params->hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
+	adm_params->payload_addr_lsw = 0;
+	adm_params->payload_addr_msw = 0;
+	adm_params->mem_map_handle = 0;
+	adm_params->payload_size = params_length;
+
+	atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
+	rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
+	if (rc < 0) {
+		pr_err("%s: Set params failed port = 0x%x rc %d\n",
+			__func__, port_id, rc);
+		rc = -EINVAL;
+		goto send_param_return;
+	}
+	/* Wait for the callback */
+	rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+		atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
+		msecs_to_jiffies(TIMEOUT_MS));
+	if (!rc) {
+		pr_err("%s: Set params timed out port = 0x%x\n",
+			 __func__, port_id);
+		rc = -EINVAL;
+		goto send_param_return;
+	} else if (atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx]) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx])));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&this_adm.copp.stat
+					[port_idx][copp_idx]));
+		goto send_param_return;
+	}
+	rc = 0;
+send_param_return:
+	kfree(adm_params);
+	return rc;
+}
+
+int adm_get_params_v2(int port_id, int copp_idx, uint32_t module_id,
+		      uint32_t param_id, uint32_t params_length,
+		      char *params, uint32_t client_id)
+{
+	struct adm_cmd_get_pp_params_v5 *adm_params = NULL;
+	int rc = 0, i = 0;
+	int port_idx, idx;
+	int *params_data = (int *)params;
+	uint64_t sz = 0;
+
+	port_id = afe_convert_virtual_to_portid(port_id);
+	port_idx = adm_validate_and_get_port_index(port_id);
+	if (port_idx < 0) {
+		pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
+		return -EINVAL;
+	}
+
+	sz = (uint64_t)sizeof(struct adm_cmd_get_pp_params_v5) +
+				(uint64_t)params_length;
+	/*
+	 * Check if the value of "sz" (which is ultimately assigned to
+	 * "hdr.pkt_size") crosses U16_MAX.
+	 */
+	if (sz > U16_MAX) {
+		pr_err("%s: Invalid params_length\n", __func__);
+		return -EINVAL;
+	}
+	adm_params = kzalloc(sz, GFP_KERNEL);
+	if (!adm_params) {
+		pr_err("%s: adm params memory alloc failed", __func__);
+		return -ENOMEM;
+	}
+
+	memcpy(((u8 *)adm_params + sizeof(struct adm_cmd_get_pp_params_v5)),
+		params, params_length);
+	adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+	APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	adm_params->hdr.pkt_size = sz;
+	adm_params->hdr.src_svc = APR_SVC_ADM;
+	adm_params->hdr.src_domain = APR_DOMAIN_APPS;
+	adm_params->hdr.src_port = port_id;
+	adm_params->hdr.dest_svc = APR_SVC_ADM;
+	adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
+	adm_params->hdr.dest_port =
+			atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
+	adm_params->hdr.token = port_idx << 16 | client_id << 8 | copp_idx;
+	adm_params->hdr.opcode = ADM_CMD_GET_PP_PARAMS_V5;
+	adm_params->data_payload_addr_lsw = 0;
+	adm_params->data_payload_addr_msw = 0;
+	adm_params->mem_map_handle = 0;
+	adm_params->module_id = module_id;
+	adm_params->param_id = param_id;
+	adm_params->param_max_size = params_length;
+	adm_params->reserved = 0;
+
+	atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
+	rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
+	if (rc < 0) {
+		pr_err("%s: Failed to Get Params on port_id 0x%x %d\n",
+			__func__, port_id, rc);
+		rc = -EINVAL;
+		goto adm_get_param_return;
+	}
+	/* Wait for the callback with copp id */
+	rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+	atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
+		msecs_to_jiffies(TIMEOUT_MS));
+	if (!rc) {
+		pr_err("%s: get params timed out port_id = 0x%x\n", __func__,
+			port_id);
+		rc = -EINVAL;
+		goto adm_get_param_return;
+	} else if (atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx]) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx])));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&this_adm.copp.stat
+					[port_idx][copp_idx]));
+		goto adm_get_param_return;
+	}
+	idx = ADM_GET_PARAMETER_LENGTH * copp_idx;
+
+	if (adm_get_parameters[idx] < 0) {
+		pr_err("%s: Size is invalid %d\n", __func__,
+			adm_get_parameters[idx]);
+		rc = -EINVAL;
+		goto adm_get_param_return;
+	}
+	if ((params_data) &&
+		(ARRAY_SIZE(adm_get_parameters) >
+		idx) &&
+		(ARRAY_SIZE(adm_get_parameters) >=
+		1+adm_get_parameters[idx]+idx) &&
+		(params_length/sizeof(uint32_t) >=
+		adm_get_parameters[idx])) {
+		for (i = 0; i < adm_get_parameters[idx]; i++)
+			params_data[i] = adm_get_parameters[1+i+idx];
+
+	} else {
+		pr_err("%s: Get param data not copied! get_param array size %zd, index %d, params array size %zd, index %d\n",
+		__func__, ARRAY_SIZE(adm_get_parameters),
+		(1+adm_get_parameters[idx]+idx),
+		params_length/sizeof(int),
+		adm_get_parameters[idx]);
+	}
+	rc = 0;
+adm_get_param_return:
+	kfree(adm_params);
+
+	return rc;
+}
+
+int adm_get_params(int port_id, int copp_idx, uint32_t module_id,
+		   uint32_t param_id, uint32_t params_length, char *params)
+{
+	return adm_get_params_v2(port_id, copp_idx, module_id, param_id,
+				 params_length, params, 0);
+}
+
+int adm_get_pp_topo_module_list(int port_id, int copp_idx, int32_t param_length,
+				char *params)
+{
+	struct adm_cmd_get_pp_topo_module_list_t *adm_pp_module_list = NULL;
+	int sz, rc = 0, i = 0;
+	int port_idx, idx;
+	int32_t *params_data = (int32_t *)params;
+	int *topo_list;
+
+	pr_debug("%s : port_id %x", __func__, port_id);
+	port_id = afe_convert_virtual_to_portid(port_id);
+	port_idx = adm_validate_and_get_port_index(port_id);
+	if (port_idx < 0) {
+		pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
+		return -EINVAL;
+	}
+
+	if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
+		pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
+		return -EINVAL;
+	}
+
+	sz = sizeof(struct adm_cmd_get_pp_topo_module_list_t) + param_length;
+	adm_pp_module_list = kzalloc(sz, GFP_KERNEL);
+	if (!adm_pp_module_list) {
+		pr_err("%s, adm params memory alloc failed", __func__);
+		return -ENOMEM;
+	}
+
+	memcpy(((u8 *)adm_pp_module_list +
+		sizeof(struct adm_cmd_get_pp_topo_module_list_t)),
+		params, param_length);
+	adm_pp_module_list->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+	APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	adm_pp_module_list->hdr.pkt_size = sz;
+	adm_pp_module_list->hdr.src_svc = APR_SVC_ADM;
+	adm_pp_module_list->hdr.src_domain = APR_DOMAIN_APPS;
+	adm_pp_module_list->hdr.src_port = port_id;
+	adm_pp_module_list->hdr.dest_svc = APR_SVC_ADM;
+	adm_pp_module_list->hdr.dest_domain = APR_DOMAIN_ADSP;
+	adm_pp_module_list->hdr.dest_port =
+			atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
+	adm_pp_module_list->hdr.token =  port_idx << 16 | copp_idx;
+	adm_pp_module_list->hdr.opcode = ADM_CMD_GET_PP_TOPO_MODULE_LIST;
+	adm_pp_module_list->param_max_size = param_length;
+	/* Payload address and mmap handle set to zero by kzalloc */
+
+	atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
+
+	rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_pp_module_list);
+	if (rc < 0) {
+		pr_err("%s: Failed to Get Params on port %d\n", __func__,
+			port_id);
+		rc = -EINVAL;
+		goto adm_pp_module_list_l;
+	}
+	/* Wait for the callback with copp id */
+	rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+		atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
+		msecs_to_jiffies(TIMEOUT_MS));
+	if (!rc) {
+		pr_err("%s: get params timed out port = %d\n", __func__,
+			port_id);
+		rc = -EINVAL;
+		goto adm_pp_module_list_l;
+	} else if (atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx]) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx])));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&this_adm.copp.stat
+					[port_idx][copp_idx]));
+		goto adm_pp_module_list_l;
+	}
+	if (params_data) {
+		idx = ADM_GET_TOPO_MODULE_LIST_LENGTH * copp_idx;
+		topo_list = (int *)(adm_module_topo_list + idx);
+		if (param_length <= ADM_GET_TOPO_MODULE_LIST_LENGTH &&
+			idx <
+			(MAX_COPPS_PER_PORT * ADM_GET_TOPO_MODULE_LIST_LENGTH))
+			memcpy(params_data, topo_list, param_length);
+		else
+			pr_debug("%s: i/p size:%d > MAX param size:%d\n",
+				 __func__, param_length,
+				 (int)ADM_GET_TOPO_MODULE_LIST_LENGTH);
+		for (i = 1; i <= params_data[0]; i++)
+			pr_debug("module = 0x%x\n", params_data[i]);
+	}
+	rc = 0;
+adm_pp_module_list_l:
+	kfree(adm_pp_module_list);
+	pr_debug("%s : rc = %d ", __func__, rc);
+	return rc;
+}
+static void adm_callback_debug_print(struct apr_client_data *data)
+{
+	uint32_t *payload;
+	payload = data->payload;
+
+	if (data->payload_size >= 8)
+		pr_debug("%s: code = 0x%x PL#0[0x%x], PL#1[0x%x], size = %d\n",
+			__func__, data->opcode, payload[0], payload[1],
+			data->payload_size);
+	else if (data->payload_size >= 4)
+		pr_debug("%s: code = 0x%x PL#0[0x%x], size = %d\n",
+			__func__, data->opcode, payload[0],
+			data->payload_size);
+	else
+		pr_debug("%s: code = 0x%x, size = %d\n",
+			__func__, data->opcode, data->payload_size);
+}
+
+int adm_set_multi_ch_map(char *channel_map, int path)
+{
+	int idx;
+
+	if (path == ADM_PATH_PLAYBACK) {
+		idx = ADM_MCH_MAP_IDX_PLAYBACK;
+	} else if (path == ADM_PATH_LIVE_REC) {
+		idx = ADM_MCH_MAP_IDX_REC;
+	} else {
+		pr_err("%s: invalid attempt to set path %d\n", __func__, path);
+		return -EINVAL;
+	}
+
+	memcpy(multi_ch_maps[idx].channel_mapping, channel_map,
+		PCM_FORMAT_MAX_NUM_CHANNEL);
+	multi_ch_maps[idx].set_channel_map = true;
+
+	return 0;
+}
+
+int adm_get_multi_ch_map(char *channel_map, int path)
+{
+	int idx;
+
+	if (path == ADM_PATH_PLAYBACK) {
+		idx = ADM_MCH_MAP_IDX_PLAYBACK;
+	} else if (path == ADM_PATH_LIVE_REC) {
+		idx = ADM_MCH_MAP_IDX_REC;
+	} else {
+		pr_err("%s: invalid attempt to get path %d\n", __func__, path);
+		return -EINVAL;
+	}
+
+	if (multi_ch_maps[idx].set_channel_map) {
+		memcpy(channel_map, multi_ch_maps[idx].channel_mapping,
+		       PCM_FORMAT_MAX_NUM_CHANNEL);
+	}
+
+	return 0;
+}
+
+static int32_t adm_callback(struct apr_client_data *data, void *priv)
+{
+	uint32_t *payload;
+	int i, j, port_idx, copp_idx, idx, client_id;
+
+	if (data == NULL) {
+		pr_err("%s: data paramter is null\n", __func__);
+		return -EINVAL;
+	}
+
+	payload = data->payload;
+
+	if (data->opcode == RESET_EVENTS) {
+		pr_debug("%s: Reset event is received: %d %d apr[%pK]\n",
+			__func__,
+			data->reset_event, data->reset_proc, this_adm.apr);
+		if (this_adm.apr) {
+			apr_reset(this_adm.apr);
+			for (i = 0; i < AFE_MAX_PORTS; i++) {
+				for (j = 0; j < MAX_COPPS_PER_PORT; j++) {
+					atomic_set(&this_adm.copp.id[i][j],
+						   RESET_COPP_ID);
+					atomic_set(&this_adm.copp.cnt[i][j], 0);
+					atomic_set(
+					   &this_adm.copp.topology[i][j], 0);
+					atomic_set(&this_adm.copp.mode[i][j],
+						   0);
+					atomic_set(&this_adm.copp.stat[i][j],
+						   0);
+					atomic_set(&this_adm.copp.rate[i][j],
+						   0);
+					atomic_set(
+					&this_adm.copp.channels[i][j],
+						   0);
+					atomic_set(
+					    &this_adm.copp.bit_width[i][j], 0);
+					atomic_set(
+					    &this_adm.copp.app_type[i][j], 0);
+					atomic_set(
+					   &this_adm.copp.acdb_id[i][j], 0);
+					this_adm.copp.adm_status[i][j] =
+						ADM_STATUS_CALIBRATION_REQUIRED;
+				}
+			}
+			this_adm.apr = NULL;
+			cal_utils_clear_cal_block_q6maps(ADM_MAX_CAL_TYPES,
+				this_adm.cal_data);
+			mutex_lock(&this_adm.cal_data
+				[ADM_CUSTOM_TOP_CAL]->lock);
+			this_adm.set_custom_topology = 1;
+			mutex_unlock(&this_adm.cal_data[
+				ADM_CUSTOM_TOP_CAL]->lock);
+			rtac_clear_mapping(ADM_RTAC_CAL);
+			/*
+			 * Free the ION memory and clear the map handles
+			 * for Source Tracking
+			 */
+			if (this_adm.sourceTrackingData.memmap.paddr != 0) {
+				msm_audio_ion_free(
+					this_adm.sourceTrackingData.ion_client,
+					this_adm.sourceTrackingData.ion_handle);
+				this_adm.sourceTrackingData.ion_client = NULL;
+				this_adm.sourceTrackingData.ion_handle = NULL;
+				this_adm.sourceTrackingData.memmap.size = 0;
+				this_adm.sourceTrackingData.memmap.kvaddr =
+									 NULL;
+				this_adm.sourceTrackingData.memmap.paddr = 0;
+				this_adm.sourceTrackingData.apr_cmd_status = -1;
+				atomic_set(&this_adm.mem_map_handles[
+					ADM_MEM_MAP_INDEX_SOURCE_TRACKING], 0);
+			}
+		}
+		return 0;
+	}
+
+	adm_callback_debug_print(data);
+	if (data->payload_size) {
+		copp_idx = (data->token) & 0XFF;
+		port_idx = ((data->token) >> 16) & 0xFF;
+		client_id = ((data->token) >> 8) & 0xFF;
+		if (port_idx < 0 || port_idx >= AFE_MAX_PORTS) {
+			pr_err("%s: Invalid port idx %d token %d\n",
+				__func__, port_idx, data->token);
+			return 0;
+		}
+		if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
+			pr_err("%s: Invalid copp idx %d token %d\n",
+				__func__, copp_idx, data->token);
+			return 0;
+		}
+		if (client_id < 0 || client_id >= ADM_CLIENT_ID_MAX) {
+			pr_err("%s: Invalid client id %d\n", __func__,
+				client_id);
+			return 0;
+		}
+		if (data->opcode == APR_BASIC_RSP_RESULT) {
+			pr_debug("%s: APR_BASIC_RSP_RESULT id 0x%x\n",
+				__func__, payload[0]);
+			if (payload[1] != 0) {
+				pr_err("%s: cmd = 0x%x returned error = 0x%x\n",
+					__func__, payload[0], payload[1]);
+			}
+			switch (payload[0]) {
+			case ADM_CMD_SET_PP_PARAMS_V5:
+				pr_debug("%s: ADM_CMD_SET_PP_PARAMS_V5\n",
+					__func__);
+				if (client_id == ADM_CLIENT_ID_SOURCE_TRACKING)
+					this_adm.sourceTrackingData.
+						apr_cmd_status = payload[1];
+				else if (rtac_make_adm_callback(payload,
+							data->payload_size))
+					break;
+				/*
+				 * if soft volume is called and already
+				 * interrupted break out of the sequence here
+				 */
+			case ADM_CMD_DEVICE_OPEN_V5:
+			case ADM_CMD_DEVICE_CLOSE_V5:
+			case ADM_CMD_DEVICE_OPEN_V6:
+			case ADM_CMD_SET_MTMX_STRTR_DEV_PARAMS_V1:
+				pr_debug("%s: Basic callback received, wake up.\n",
+					__func__);
+				atomic_set(&this_adm.copp.stat[port_idx]
+						[copp_idx], payload[1]);
+				wake_up(
+				&this_adm.copp.wait[port_idx][copp_idx]);
+				break;
+			case ADM_CMD_ADD_TOPOLOGIES:
+				pr_debug("%s: callback received, ADM_CMD_ADD_TOPOLOGIES.\n",
+					__func__);
+				atomic_set(&this_adm.adm_stat, payload[1]);
+				wake_up(&this_adm.adm_wait);
+				break;
+			case ADM_CMD_MATRIX_MAP_ROUTINGS_V5:
+			case ADM_CMD_STREAM_DEVICE_MAP_ROUTINGS_V5:
+				pr_debug("%s: Basic callback received, wake up.\n",
+					__func__);
+				atomic_set(&this_adm.matrix_map_stat,
+					payload[1]);
+				wake_up(&this_adm.matrix_map_wait);
+				break;
+			case ADM_CMD_SHARED_MEM_UNMAP_REGIONS:
+				pr_debug("%s: ADM_CMD_SHARED_MEM_UNMAP_REGIONS\n",
+					__func__);
+				atomic_set(&this_adm.adm_stat, payload[1]);
+				wake_up(&this_adm.adm_wait);
+				break;
+			case ADM_CMD_SHARED_MEM_MAP_REGIONS:
+				pr_debug("%s: ADM_CMD_SHARED_MEM_MAP_REGIONS\n",
+					__func__);
+				/* Should only come here if there is an APR */
+				/* error or malformed APR packet. Otherwise */
+				/* response will be returned as */
+				if (payload[1] != 0) {
+					pr_err("%s: ADM map error, resuming\n",
+						__func__);
+					atomic_set(&this_adm.adm_stat,
+						payload[1]);
+					wake_up(&this_adm.adm_wait);
+				}
+				break;
+			case ADM_CMD_GET_PP_PARAMS_V5:
+				pr_debug("%s: ADM_CMD_GET_PP_PARAMS_V5\n",
+					__func__);
+				/* Should only come here if there is an APR */
+				/* error or malformed APR packet. Otherwise */
+				/* response will be returned as */
+				/* ADM_CMDRSP_GET_PP_PARAMS_V5 */
+				if (client_id ==
+					ADM_CLIENT_ID_SOURCE_TRACKING) {
+					this_adm.sourceTrackingData.
+						apr_cmd_status = payload[1];
+					if (payload[1] != 0)
+						pr_err("%s: ADM get param error = %d\n",
+							__func__, payload[1]);
+
+					atomic_set(&this_adm.copp.stat
+						[port_idx][copp_idx],
+						payload[1]);
+					wake_up(&this_adm.copp.wait
+							[port_idx][copp_idx]);
+				} else {
+					if (payload[1] != 0) {
+						pr_err("%s: ADM get param error = %d, resuming\n",
+							__func__, payload[1]);
+
+						rtac_make_adm_callback(payload,
+							data->payload_size);
+					}
+				}
+				break;
+			case ADM_CMD_SET_PSPD_MTMX_STRTR_PARAMS_V5:
+				pr_debug("%s: ADM_CMD_SET_PSPD_MTMX_STRTR_PARAMS_V5\n",
+					__func__);
+				atomic_set(&this_adm.copp.stat[port_idx]
+						[copp_idx], payload[1]);
+				wake_up(
+				&this_adm.copp.wait[port_idx][copp_idx]);
+				break;
+			case ADM_CMD_GET_PP_TOPO_MODULE_LIST:
+				pr_debug("%s:ADM_CMD_GET_PP_TOPO_MODULE_LIST\n",
+					 __func__);
+				if (payload[1] != 0)
+					pr_err("%s: ADM get topo list error = %d,\n",
+						__func__, payload[1]);
+				break;
+			default:
+				pr_err("%s: Unknown Cmd: 0x%x\n", __func__,
+								payload[0]);
+				break;
+			}
+			return 0;
+		}
+
+		switch (data->opcode) {
+		case ADM_CMDRSP_DEVICE_OPEN_V5:
+		case ADM_CMDRSP_DEVICE_OPEN_V6: {
+			struct adm_cmd_rsp_device_open_v5 *open =
+			(struct adm_cmd_rsp_device_open_v5 *)data->payload;
+
+			if (open->copp_id == INVALID_COPP_ID) {
+				pr_err("%s: invalid coppid rxed %d\n",
+					__func__, open->copp_id);
+				atomic_set(&this_adm.copp.stat[port_idx]
+						[copp_idx], ADSP_EBADPARAM);
+				wake_up(
+				&this_adm.copp.wait[port_idx][copp_idx]);
+				break;
+			}
+			atomic_set(&this_adm.copp.stat
+				[port_idx][copp_idx], payload[0]);
+			atomic_set(&this_adm.copp.id[port_idx][copp_idx],
+				   open->copp_id);
+			pr_debug("%s: coppid rxed=%d\n", __func__,
+				 open->copp_id);
+			wake_up(&this_adm.copp.wait[port_idx][copp_idx]);
+			}
+			break;
+		case ADM_CMDRSP_GET_PP_PARAMS_V5:
+			pr_debug("%s: ADM_CMDRSP_GET_PP_PARAMS_V5\n", __func__);
+			if (payload[0] != 0)
+				pr_err("%s: ADM_CMDRSP_GET_PP_PARAMS_V5 returned error = 0x%x\n",
+					__func__, payload[0]);
+			if (client_id == ADM_CLIENT_ID_SOURCE_TRACKING)
+				this_adm.sourceTrackingData.apr_cmd_status =
+								payload[0];
+			else if (rtac_make_adm_callback(payload,
+					data->payload_size))
+				break;
+
+			idx = ADM_GET_PARAMETER_LENGTH * copp_idx;
+			if ((payload[0] == 0) && (data->payload_size >
+				(4 * sizeof(*payload))) &&
+				(data->payload_size - 4 >=
+				payload[3]) &&
+				(ARRAY_SIZE(adm_get_parameters) >
+				idx) &&
+				(ARRAY_SIZE(adm_get_parameters)-idx-1 >=
+				payload[3])) {
+				adm_get_parameters[idx] = payload[3] /
+							sizeof(uint32_t);
+				/*
+				 * payload[3] is param_size which is
+				 * expressed in number of bytes
+				 */
+				pr_debug("%s: GET_PP PARAM:received parameter length: 0x%x\n",
+					__func__, adm_get_parameters[idx]);
+				/* storing param size then params */
+				for (i = 0; i < payload[3] /
+						sizeof(uint32_t); i++)
+					adm_get_parameters[idx+1+i] =
+							payload[4+i];
+			} else if (payload[0] == 0) {
+				adm_get_parameters[idx] = -1;
+				pr_err("%s: Out of band case, setting size to %d\n",
+					__func__, adm_get_parameters[idx]);
+			} else {
+				adm_get_parameters[idx] = -1;
+				pr_err("%s: GET_PP_PARAMS failed, setting size to %d\n",
+					__func__, adm_get_parameters[idx]);
+			}
+			atomic_set(&this_adm.copp.stat
+				[port_idx][copp_idx], payload[0]);
+			wake_up(&this_adm.copp.wait[port_idx][copp_idx]);
+			break;
+		case ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST:
+			pr_debug("%s: ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST\n",
+				 __func__);
+			if (payload[0] != 0) {
+				pr_err("%s: ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST",
+					 __func__);
+				pr_err(":err = 0x%x\n", payload[0]);
+			} else if (payload[1] >
+				   ((ADM_GET_TOPO_MODULE_LIST_LENGTH /
+				   sizeof(uint32_t)) - 1)) {
+				pr_err("%s: ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST",
+					 __func__);
+				pr_err(":size = %d\n", payload[1]);
+			} else {
+				idx = ADM_GET_TOPO_MODULE_LIST_LENGTH *
+					copp_idx;
+				pr_debug("%s:Num modules payload[1] %d\n",
+					 __func__, payload[1]);
+				adm_module_topo_list[idx] = payload[1];
+				for (i = 1; i <= payload[1]; i++) {
+					adm_module_topo_list[idx+i] =
+						payload[1+i];
+					pr_debug("%s:payload[%d] = %x\n",
+						 __func__, (i+1), payload[1+i]);
+				}
+			}
+			atomic_set(&this_adm.copp.stat
+				[port_idx][copp_idx], payload[0]);
+			wake_up(&this_adm.copp.wait[port_idx][copp_idx]);
+			break;
+		case ADM_CMDRSP_SHARED_MEM_MAP_REGIONS:
+			pr_debug("%s: ADM_CMDRSP_SHARED_MEM_MAP_REGIONS\n",
+				__func__);
+			atomic_set(&this_adm.mem_map_handles[
+				   atomic_read(&this_adm.mem_map_index)],
+				   *payload);
+			atomic_set(&this_adm.adm_stat, 0);
+			wake_up(&this_adm.adm_wait);
+			break;
+		default:
+			pr_err("%s: Unknown cmd:0x%x\n", __func__,
+				data->opcode);
+			break;
+		}
+	}
+	return 0;
+}
+
+static int adm_memory_map_regions(phys_addr_t *buf_add, uint32_t mempool_id,
+			   uint32_t *bufsz, uint32_t bufcnt)
+{
+	struct  avs_cmd_shared_mem_map_regions *mmap_regions = NULL;
+	struct  avs_shared_map_region_payload *mregions = NULL;
+	void    *mmap_region_cmd = NULL;
+	void    *payload = NULL;
+	int     ret = 0;
+	int     i = 0;
+	int     cmd_size = 0;
+
+	pr_debug("%s:\n", __func__);
+	if (this_adm.apr == NULL) {
+		this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
+						0xFFFFFFFF, &this_adm);
+		if (this_adm.apr == NULL) {
+			pr_err("%s: Unable to register ADM\n", __func__);
+			ret = -ENODEV;
+			return ret;
+		}
+		rtac_set_adm_handle(this_adm.apr);
+	}
+
+	cmd_size = sizeof(struct avs_cmd_shared_mem_map_regions)
+			+ sizeof(struct avs_shared_map_region_payload)
+			* bufcnt;
+
+	mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
+	if (!mmap_region_cmd) {
+		pr_err("%s: allocate mmap_region_cmd failed\n", __func__);
+		return -ENOMEM;
+	}
+	mmap_regions = (struct avs_cmd_shared_mem_map_regions *)mmap_region_cmd;
+	mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+								APR_PKT_VER);
+	mmap_regions->hdr.pkt_size = cmd_size;
+	mmap_regions->hdr.src_port = 0;
+
+	mmap_regions->hdr.dest_port = 0;
+	mmap_regions->hdr.token = 0;
+	mmap_regions->hdr.opcode = ADM_CMD_SHARED_MEM_MAP_REGIONS;
+	mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL & 0x00ff;
+	mmap_regions->num_regions = bufcnt & 0x00ff;
+	mmap_regions->property_flag = 0x00;
+
+	pr_debug("%s: map_regions->num_regions = %d\n", __func__,
+				mmap_regions->num_regions);
+	payload = ((u8 *) mmap_region_cmd +
+				sizeof(struct avs_cmd_shared_mem_map_regions));
+	mregions = (struct avs_shared_map_region_payload *)payload;
+
+	for (i = 0; i < bufcnt; i++) {
+		mregions->shm_addr_lsw = lower_32_bits(buf_add[i]);
+		mregions->shm_addr_msw =
+				msm_audio_populate_upper_32_bits(buf_add[i]);
+		mregions->mem_size_bytes = bufsz[i];
+		++mregions;
+	}
+
+	atomic_set(&this_adm.adm_stat, -1);
+	ret = apr_send_pkt(this_adm.apr, (uint32_t *) mmap_region_cmd);
+	if (ret < 0) {
+		pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
+					mmap_regions->hdr.opcode, ret);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	ret = wait_event_timeout(this_adm.adm_wait,
+				 atomic_read(&this_adm.adm_stat) >= 0,
+				 5 * HZ);
+	if (!ret) {
+		pr_err("%s: timeout. waited for memory_map\n", __func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	} else if (atomic_read(&this_adm.adm_stat) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&this_adm.adm_stat)));
+		ret = adsp_err_get_lnx_err_code(
+				atomic_read(&this_adm.adm_stat));
+		goto fail_cmd;
+	}
+fail_cmd:
+	kfree(mmap_region_cmd);
+	return ret;
+}
+
+static int adm_memory_unmap_regions(void)
+{
+	struct  avs_cmd_shared_mem_unmap_regions unmap_regions;
+	int     ret = 0;
+
+	pr_debug("%s:\n", __func__);
+	if (this_adm.apr == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	unmap_regions.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+							APR_PKT_VER);
+	unmap_regions.hdr.pkt_size = sizeof(unmap_regions);
+	unmap_regions.hdr.src_port = 0;
+	unmap_regions.hdr.dest_port = 0;
+	unmap_regions.hdr.token = 0;
+	unmap_regions.hdr.opcode = ADM_CMD_SHARED_MEM_UNMAP_REGIONS;
+	unmap_regions.mem_map_handle = atomic_read(&this_adm.
+		mem_map_handles[atomic_read(&this_adm.mem_map_index)]);
+	atomic_set(&this_adm.adm_stat, -1);
+	ret = apr_send_pkt(this_adm.apr, (uint32_t *) &unmap_regions);
+	if (ret < 0) {
+		pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
+				unmap_regions.hdr.opcode, ret);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	ret = wait_event_timeout(this_adm.adm_wait,
+				 atomic_read(&this_adm.adm_stat) >= 0,
+				 5 * HZ);
+	if (!ret) {
+		pr_err("%s: timeout. waited for memory_unmap\n",
+		       __func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	} else if (atomic_read(&this_adm.adm_stat) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&this_adm.adm_stat)));
+		ret = adsp_err_get_lnx_err_code(
+				atomic_read(&this_adm.adm_stat));
+		goto fail_cmd;
+	} else {
+		pr_debug("%s: Unmap handle 0x%x succeeded\n", __func__,
+			 unmap_regions.mem_map_handle);
+	}
+fail_cmd:
+	return ret;
+}
+
+static int remap_cal_data(struct cal_block_data *cal_block, int cal_index)
+{
+	int ret = 0;
+
+	if (cal_block->map_data.ion_client == NULL) {
+		pr_err("%s: No ION allocation for cal index %d!\n",
+			__func__, cal_index);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if ((cal_block->map_data.map_size > 0) &&
+		(cal_block->map_data.q6map_handle == 0)) {
+		atomic_set(&this_adm.mem_map_index, cal_index);
+		ret = adm_memory_map_regions(&cal_block->cal_data.paddr, 0,
+				(uint32_t *)&cal_block->map_data.map_size, 1);
+		if (ret < 0) {
+			pr_err("%s: ADM mmap did not work! size = %zd ret %d\n",
+				__func__,
+				cal_block->map_data.map_size, ret);
+			pr_debug("%s: ADM mmap did not work! addr = 0x%pK, size = %zd ret %d\n",
+				__func__,
+				&cal_block->cal_data.paddr,
+				cal_block->map_data.map_size, ret);
+			goto done;
+		}
+		cal_block->map_data.q6map_handle = atomic_read(&this_adm.
+			mem_map_handles[cal_index]);
+	}
+done:
+	return ret;
+}
+
+static void send_adm_custom_topology(void)
+{
+	struct cal_block_data		*cal_block = NULL;
+	struct cmd_set_topologies	adm_top;
+	int				cal_index = ADM_CUSTOM_TOP_CAL;
+	int				result;
+
+	if (this_adm.cal_data[cal_index] == NULL)
+		goto done;
+
+	mutex_lock(&this_adm.cal_data[cal_index]->lock);
+	if (!this_adm.set_custom_topology)
+		goto unlock;
+	this_adm.set_custom_topology = 0;
+
+	cal_block = cal_utils_get_only_cal_block(this_adm.cal_data[cal_index]);
+	if (cal_block == NULL)
+		goto unlock;
+
+	pr_debug("%s: Sending cal_index %d\n", __func__, cal_index);
+
+	result = remap_cal_data(cal_block, cal_index);
+	if (result) {
+		pr_err("%s: Remap_cal_data failed for cal %d!\n",
+			__func__, cal_index);
+		goto unlock;
+	}
+	atomic_set(&this_adm.mem_map_index, cal_index);
+	atomic_set(&this_adm.mem_map_handles[cal_index],
+		cal_block->map_data.q6map_handle);
+
+	if (cal_block->cal_data.size == 0) {
+		pr_debug("%s: No ADM cal to send\n", __func__);
+		goto unlock;
+	}
+
+	adm_top.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+		APR_HDR_LEN(20), APR_PKT_VER);
+	adm_top.hdr.pkt_size = sizeof(adm_top);
+	adm_top.hdr.src_svc = APR_SVC_ADM;
+	adm_top.hdr.src_domain = APR_DOMAIN_APPS;
+	adm_top.hdr.src_port = 0;
+	adm_top.hdr.dest_svc = APR_SVC_ADM;
+	adm_top.hdr.dest_domain = APR_DOMAIN_ADSP;
+	adm_top.hdr.dest_port = 0;
+	adm_top.hdr.token = 0;
+	adm_top.hdr.opcode = ADM_CMD_ADD_TOPOLOGIES;
+	adm_top.payload_addr_lsw = lower_32_bits(cal_block->cal_data.paddr);
+	adm_top.payload_addr_msw = msm_audio_populate_upper_32_bits(
+						cal_block->cal_data.paddr);
+	adm_top.mem_map_handle = cal_block->map_data.q6map_handle;
+	adm_top.payload_size = cal_block->cal_data.size;
+
+	atomic_set(&this_adm.adm_stat, -1);
+	pr_debug("%s: Sending ADM_CMD_ADD_TOPOLOGIES payload = 0x%pK, size = %d\n",
+		__func__, &cal_block->cal_data.paddr,
+		adm_top.payload_size);
+	result = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_top);
+	if (result < 0) {
+		pr_err("%s: Set topologies failed payload size = 0x%zd result %d\n",
+			__func__, cal_block->cal_data.size, result);
+		goto unlock;
+	}
+	/* Wait for the callback */
+	result = wait_event_timeout(this_adm.adm_wait,
+				    atomic_read(&this_adm.adm_stat) >= 0,
+				    msecs_to_jiffies(TIMEOUT_MS));
+	if (!result) {
+		pr_err("%s: Set topologies timed out payload size = 0x%zd\n",
+			__func__, cal_block->cal_data.size);
+		goto unlock;
+	} else if (atomic_read(&this_adm.adm_stat) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&this_adm.adm_stat)));
+		result = adsp_err_get_lnx_err_code(
+				atomic_read(&this_adm.adm_stat));
+		goto unlock;
+	}
+unlock:
+	mutex_unlock(&this_adm.cal_data[cal_index]->lock);
+done:
+	return;
+}
+
+static int send_adm_cal_block(int port_id, int copp_idx,
+			      struct cal_block_data *cal_block, int perf_mode,
+			      int app_type, int acdb_id, int sample_rate)
+{
+	s32				result = 0;
+	struct adm_cmd_set_pp_params_v5	adm_params;
+	int port_idx;
+
+	pr_debug("%s: Port id 0x%x sample_rate %d ,\n", __func__,
+			port_id, sample_rate);
+	port_id = afe_convert_virtual_to_portid(port_id);
+	port_idx = adm_validate_and_get_port_index(port_id);
+	if (port_idx < 0) {
+		pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
+		return -EINVAL;
+	}
+	if (!cal_block) {
+		pr_debug("%s: No ADM cal to send for port_id = 0x%x!\n",
+			__func__, port_id);
+		result = -EINVAL;
+		goto done;
+	}
+	if (cal_block->cal_data.size <= 0) {
+		pr_debug("%s: No ADM cal send for port_id = 0x%x!\n",
+			__func__, port_id);
+		result = -EINVAL;
+		goto done;
+	}
+
+	if (perf_mode == LEGACY_PCM_MODE &&
+		((atomic_read(&this_adm.copp.topology[port_idx][copp_idx])) ==
+			DS2_ADM_COPP_TOPOLOGY_ID)) {
+		pr_err("%s: perf_mode %d, topology 0x%x\n", __func__, perf_mode,
+			atomic_read(
+				&this_adm.copp.topology[port_idx][copp_idx]));
+		goto done;
+	}
+
+	adm_params.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+		APR_HDR_LEN(20), APR_PKT_VER);
+	adm_params.hdr.pkt_size = sizeof(adm_params);
+	adm_params.hdr.src_svc = APR_SVC_ADM;
+	adm_params.hdr.src_domain = APR_DOMAIN_APPS;
+	adm_params.hdr.src_port = port_id;
+	adm_params.hdr.dest_svc = APR_SVC_ADM;
+	adm_params.hdr.dest_domain = APR_DOMAIN_ADSP;
+
+	adm_params.hdr.token = port_idx << 16 | copp_idx;
+	adm_params.hdr.dest_port =
+			atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
+	adm_params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
+	adm_params.payload_addr_lsw = lower_32_bits(cal_block->cal_data.paddr);
+	adm_params.payload_addr_msw = msm_audio_populate_upper_32_bits(
+						cal_block->cal_data.paddr);
+	adm_params.mem_map_handle = cal_block->map_data.q6map_handle;
+	adm_params.payload_size = cal_block->cal_data.size;
+
+	atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
+	pr_debug("%s: Sending SET_PARAMS payload = 0x%pK, size = %d\n",
+		__func__, &cal_block->cal_data.paddr,
+		adm_params.payload_size);
+	result = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_params);
+	if (result < 0) {
+		pr_err("%s: Set params failed port 0x%x result %d\n",
+				__func__, port_id, result);
+		pr_debug("%s: Set params failed port = 0x%x payload = 0x%pK result %d\n",
+			__func__, port_id, &cal_block->cal_data.paddr, result);
+		result = -EINVAL;
+		goto done;
+	}
+	/* Wait for the callback */
+	result = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+		atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
+		msecs_to_jiffies(TIMEOUT_MS));
+	if (!result) {
+		pr_err("%s: Set params timed out port = 0x%x\n",
+				__func__, port_id);
+		pr_debug("%s: Set params timed out port = 0x%x, payload = 0x%pK\n",
+			__func__, port_id, &cal_block->cal_data.paddr);
+		result = -EINVAL;
+		goto done;
+	} else if (atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx]) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx])));
+		result = adsp_err_get_lnx_err_code(
+				atomic_read(&this_adm.copp.stat
+					[port_idx][copp_idx]));
+		goto done;
+	}
+
+done:
+	return result;
+}
+
+static struct cal_block_data *adm_find_cal_by_path(int cal_index, int path)
+{
+	struct list_head		*ptr, *next;
+	struct cal_block_data		*cal_block = NULL;
+	struct audio_cal_info_audproc	*audproc_cal_info = NULL;
+	struct audio_cal_info_audvol	*audvol_cal_info = NULL;
+	pr_debug("%s:\n", __func__);
+
+	list_for_each_safe(ptr, next,
+		&this_adm.cal_data[cal_index]->cal_blocks) {
+
+		cal_block = list_entry(ptr,
+			struct cal_block_data, list);
+
+		if (cal_index == ADM_AUDPROC_CAL ||
+		    cal_index == ADM_LSM_AUDPROC_CAL) {
+			audproc_cal_info = cal_block->cal_info;
+			if (audproc_cal_info->path == path)
+				return cal_block;
+		} else if (cal_index == ADM_AUDVOL_CAL) {
+			audvol_cal_info = cal_block->cal_info;
+			if (audvol_cal_info->path == path)
+				return cal_block;
+		}
+	}
+	pr_debug("%s: Can't find ADM cal for cal_index %d, path %d\n",
+		__func__, cal_index, path);
+	return NULL;
+}
+
+static struct cal_block_data *adm_find_cal_by_app_type(int cal_index, int path,
+								int app_type)
+{
+	struct list_head		*ptr, *next;
+	struct cal_block_data		*cal_block = NULL;
+	struct audio_cal_info_audproc	*audproc_cal_info = NULL;
+	struct audio_cal_info_audvol	*audvol_cal_info = NULL;
+	pr_debug("%s\n", __func__);
+
+	list_for_each_safe(ptr, next,
+		&this_adm.cal_data[cal_index]->cal_blocks) {
+
+		cal_block = list_entry(ptr,
+			struct cal_block_data, list);
+
+		if (cal_index == ADM_AUDPROC_CAL ||
+		    cal_index == ADM_LSM_AUDPROC_CAL) {
+			audproc_cal_info = cal_block->cal_info;
+			if ((audproc_cal_info->path == path) &&
+			    (audproc_cal_info->app_type == app_type))
+				return cal_block;
+		} else if (cal_index == ADM_AUDVOL_CAL) {
+			audvol_cal_info = cal_block->cal_info;
+			if ((audvol_cal_info->path == path) &&
+			    (audvol_cal_info->app_type == app_type))
+				return cal_block;
+		}
+	}
+	pr_debug("%s: Can't find ADM cali for cal_index %d, path %d, app %d, defaulting to search by path\n",
+		__func__, cal_index, path, app_type);
+	return adm_find_cal_by_path(cal_index, path);
+}
+
+
+static struct cal_block_data *adm_find_cal(int cal_index, int path,
+					   int app_type, int acdb_id,
+					   int sample_rate)
+{
+	struct list_head		*ptr, *next;
+	struct cal_block_data		*cal_block = NULL;
+	struct audio_cal_info_audproc	*audproc_cal_info = NULL;
+	struct audio_cal_info_audvol	*audvol_cal_info = NULL;
+	pr_debug("%s:\n", __func__);
+
+	list_for_each_safe(ptr, next,
+		&this_adm.cal_data[cal_index]->cal_blocks) {
+
+		cal_block = list_entry(ptr,
+			struct cal_block_data, list);
+
+		if (cal_index == ADM_AUDPROC_CAL ||
+		    cal_index == ADM_LSM_AUDPROC_CAL) {
+			audproc_cal_info = cal_block->cal_info;
+			if ((audproc_cal_info->path == path) &&
+			    (audproc_cal_info->app_type == app_type) &&
+			    (audproc_cal_info->acdb_id == acdb_id) &&
+			    (audproc_cal_info->sample_rate == sample_rate))
+				return cal_block;
+		} else if (cal_index == ADM_AUDVOL_CAL) {
+			audvol_cal_info = cal_block->cal_info;
+			if ((audvol_cal_info->path == path) &&
+			    (audvol_cal_info->app_type == app_type) &&
+			    (audvol_cal_info->acdb_id == acdb_id))
+				return cal_block;
+		}
+	}
+	pr_debug("%s: Can't find ADM cal for cal_index %d, path %d, app %d, acdb_id %d sample_rate %d defaulting to search by app type\n",
+		__func__, cal_index, path, app_type, acdb_id, sample_rate);
+	return adm_find_cal_by_app_type(cal_index, path, app_type);
+}
+
+static int adm_remap_and_send_cal_block(int cal_index, int port_id,
+	int copp_idx, struct cal_block_data *cal_block, int perf_mode,
+	int app_type, int acdb_id, int sample_rate)
+{
+	int ret = 0;
+
+	pr_debug("%s: Sending cal_index cal %d\n", __func__, cal_index);
+	ret = remap_cal_data(cal_block, cal_index);
+	if (ret) {
+		pr_err("%s: Remap_cal_data failed for cal %d!\n",
+			__func__, cal_index);
+		goto done;
+	}
+	ret = send_adm_cal_block(port_id, copp_idx, cal_block, perf_mode,
+				app_type, acdb_id, sample_rate);
+	if (ret < 0)
+		pr_debug("%s: No cal sent for cal_index %d, port_id = 0x%x! ret %d sample_rate %d\n",
+			__func__, cal_index, port_id, ret, sample_rate);
+done:
+	return ret;
+}
+
+static void send_adm_cal_type(int cal_index, int path, int port_id,
+			      int copp_idx, int perf_mode, int app_type,
+			      int acdb_id, int sample_rate)
+{
+	struct cal_block_data		*cal_block = NULL;
+	int ret;
+
+	pr_debug("%s: cal index %d\n", __func__, cal_index);
+
+	if (this_adm.cal_data[cal_index] == NULL) {
+		pr_debug("%s: cal_index %d not allocated!\n",
+			__func__, cal_index);
+		goto done;
+	}
+
+	mutex_lock(&this_adm.cal_data[cal_index]->lock);
+	cal_block = adm_find_cal(cal_index, path, app_type, acdb_id,
+				sample_rate);
+	if (cal_block == NULL)
+		goto unlock;
+
+	ret = adm_remap_and_send_cal_block(cal_index, port_id, copp_idx,
+		cal_block, perf_mode, app_type, acdb_id, sample_rate);
+unlock:
+	mutex_unlock(&this_adm.cal_data[cal_index]->lock);
+done:
+	return;
+}
+
+static int get_cal_path(int path)
+{
+	if (path == 0x1)
+		return RX_DEVICE;
+	else
+		return TX_DEVICE;
+}
+
+static void send_adm_cal(int port_id, int copp_idx, int path, int perf_mode,
+			 int app_type, int acdb_id, int sample_rate,
+			 int passthr_mode)
+{
+	pr_debug("%s: port id 0x%x copp_idx %d\n", __func__, port_id, copp_idx);
+
+	if (passthr_mode != LISTEN)
+		send_adm_cal_type(ADM_AUDPROC_CAL, path, port_id, copp_idx,
+				perf_mode, app_type, acdb_id, sample_rate);
+	else
+		send_adm_cal_type(ADM_LSM_AUDPROC_CAL, path, port_id, copp_idx,
+				  perf_mode, app_type, acdb_id, sample_rate);
+	send_adm_cal_type(ADM_AUDVOL_CAL, path, port_id, copp_idx, perf_mode,
+			  app_type, acdb_id, sample_rate);
+	return;
+}
+
+int adm_connect_afe_port(int mode, int session_id, int port_id)
+{
+	struct adm_cmd_connect_afe_port_v5	cmd;
+	int ret = 0;
+	int port_idx, copp_idx = 0;
+
+	pr_debug("%s: port_id: 0x%x session id:%d mode:%d\n", __func__,
+				port_id, session_id, mode);
+
+	port_id = afe_convert_virtual_to_portid(port_id);
+	port_idx = adm_validate_and_get_port_index(port_id);
+	if (port_idx < 0) {
+		pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
+		return -EINVAL;
+	}
+
+	if (this_adm.apr == NULL) {
+		this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
+						0xFFFFFFFF, &this_adm);
+		if (this_adm.apr == NULL) {
+			pr_err("%s: Unable to register ADM\n", __func__);
+			ret = -ENODEV;
+			return ret;
+		}
+		rtac_set_adm_handle(this_adm.apr);
+	}
+	pr_debug("%s: Port ID 0x%x, index %d\n", __func__, port_id, port_idx);
+
+	cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+			APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	cmd.hdr.pkt_size = sizeof(cmd);
+	cmd.hdr.src_svc = APR_SVC_ADM;
+	cmd.hdr.src_domain = APR_DOMAIN_APPS;
+	cmd.hdr.src_port = port_id;
+	cmd.hdr.dest_svc = APR_SVC_ADM;
+	cmd.hdr.dest_domain = APR_DOMAIN_ADSP;
+	cmd.hdr.dest_port = 0; /* Ignored */
+	cmd.hdr.token = port_idx << 16 | copp_idx;
+	cmd.hdr.opcode = ADM_CMD_CONNECT_AFE_PORT_V5;
+
+	cmd.mode = mode;
+	cmd.session_id = session_id;
+	cmd.afe_port_id = port_id;
+
+	atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
+	ret = apr_send_pkt(this_adm.apr, (uint32_t *)&cmd);
+	if (ret < 0) {
+		pr_err("%s: ADM enable for port_id: 0x%x failed ret %d\n",
+					__func__, port_id, ret);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	/* Wait for the callback with copp id */
+	ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+		atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
+		msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: ADM connect timedout for port_id: 0x%x\n",
+			__func__, port_id);
+		ret = -EINVAL;
+		goto fail_cmd;
+	} else if (atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx]) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx])));
+		ret = adsp_err_get_lnx_err_code(
+				atomic_read(&this_adm.copp.stat
+					[port_idx][copp_idx]));
+		goto fail_cmd;
+	}
+	atomic_inc(&this_adm.copp.cnt[port_idx][copp_idx]);
+	return 0;
+
+fail_cmd:
+
+	return ret;
+}
+
+int adm_arrange_mch_map(struct adm_cmd_device_open_v5 *open, int path,
+			 int channel_mode)
+{
+	int rc = 0, idx;
+	memset(open->dev_channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+	switch (path) {
+	case ADM_PATH_PLAYBACK:
+		idx = ADM_MCH_MAP_IDX_PLAYBACK;
+		break;
+	case ADM_PATH_LIVE_REC:
+	case ADM_PATH_NONLIVE_REC:
+		idx = ADM_MCH_MAP_IDX_REC;
+		break;
+	default:
+		goto non_mch_path;
+	};
+	if ((open->dev_num_channel > 2) && multi_ch_maps[idx].set_channel_map) {
+		memcpy(open->dev_channel_mapping,
+			multi_ch_maps[idx].channel_mapping,
+			PCM_FORMAT_MAX_NUM_CHANNEL);
+	} else {
+		if (channel_mode == 1) {
+			open->dev_channel_mapping[0] = PCM_CHANNEL_FC;
+		} else if (channel_mode == 2) {
+			open->dev_channel_mapping[0] = PCM_CHANNEL_FL;
+			open->dev_channel_mapping[1] = PCM_CHANNEL_FR;
+		} else if (channel_mode == 3) {
+			open->dev_channel_mapping[0] = PCM_CHANNEL_FL;
+			open->dev_channel_mapping[1] = PCM_CHANNEL_FR;
+			open->dev_channel_mapping[2] = PCM_CHANNEL_FC;
+		} else if (channel_mode == 4) {
+			open->dev_channel_mapping[0] = PCM_CHANNEL_FL;
+			open->dev_channel_mapping[1] = PCM_CHANNEL_FR;
+			open->dev_channel_mapping[2] = PCM_CHANNEL_LS;
+			open->dev_channel_mapping[3] = PCM_CHANNEL_RS;
+		} else if (channel_mode == 5) {
+			open->dev_channel_mapping[0] = PCM_CHANNEL_FL;
+			open->dev_channel_mapping[1] = PCM_CHANNEL_FR;
+			open->dev_channel_mapping[2] = PCM_CHANNEL_FC;
+			open->dev_channel_mapping[3] = PCM_CHANNEL_LS;
+			open->dev_channel_mapping[4] = PCM_CHANNEL_RS;
+		} else if (channel_mode == 6) {
+			open->dev_channel_mapping[0] = PCM_CHANNEL_FL;
+			open->dev_channel_mapping[1] = PCM_CHANNEL_FR;
+			open->dev_channel_mapping[2] = PCM_CHANNEL_LFE;
+			open->dev_channel_mapping[3] = PCM_CHANNEL_FC;
+			open->dev_channel_mapping[4] = PCM_CHANNEL_LS;
+			open->dev_channel_mapping[5] = PCM_CHANNEL_RS;
+		} else if (channel_mode == 7) {
+			open->dev_channel_mapping[0] = PCM_CHANNEL_FL;
+			open->dev_channel_mapping[1] = PCM_CHANNEL_FR;
+			open->dev_channel_mapping[2] = PCM_CHANNEL_FC;
+			open->dev_channel_mapping[3] = PCM_CHANNEL_LFE;
+			open->dev_channel_mapping[4] = PCM_CHANNEL_LB;
+			open->dev_channel_mapping[5] = PCM_CHANNEL_RB;
+			open->dev_channel_mapping[6] = PCM_CHANNEL_CS;
+		} else if (channel_mode == 8) {
+			open->dev_channel_mapping[0] = PCM_CHANNEL_FL;
+			open->dev_channel_mapping[1] = PCM_CHANNEL_FR;
+			open->dev_channel_mapping[2] = PCM_CHANNEL_LFE;
+			open->dev_channel_mapping[3] = PCM_CHANNEL_FC;
+			open->dev_channel_mapping[4] = PCM_CHANNEL_LS;
+			open->dev_channel_mapping[5] = PCM_CHANNEL_RS;
+			open->dev_channel_mapping[6] = PCM_CHANNEL_LB;
+			open->dev_channel_mapping[7] = PCM_CHANNEL_RB;
+		} else {
+			pr_err("%s: invalid num_chan %d\n", __func__,
+				channel_mode);
+			rc = -EINVAL;
+			goto inval_ch_mod;
+		}
+	}
+
+non_mch_path:
+inval_ch_mod:
+	return rc;
+}
+
+int adm_arrange_mch_ep2_map(struct adm_cmd_device_open_v6 *open_v6,
+			 int channel_mode)
+{
+	int rc = 0;
+
+	memset(open_v6->dev_channel_mapping_eid2, 0,
+	       PCM_FORMAT_MAX_NUM_CHANNEL);
+
+	if (channel_mode == 1)	{
+		open_v6->dev_channel_mapping_eid2[0] = PCM_CHANNEL_FC;
+	} else if (channel_mode == 2) {
+		open_v6->dev_channel_mapping_eid2[0] = PCM_CHANNEL_FL;
+		open_v6->dev_channel_mapping_eid2[1] = PCM_CHANNEL_FR;
+	} else if (channel_mode == 3)	{
+		open_v6->dev_channel_mapping_eid2[0] = PCM_CHANNEL_FL;
+		open_v6->dev_channel_mapping_eid2[1] = PCM_CHANNEL_FR;
+		open_v6->dev_channel_mapping_eid2[2] = PCM_CHANNEL_FC;
+	} else if (channel_mode == 4) {
+		open_v6->dev_channel_mapping_eid2[0] = PCM_CHANNEL_FL;
+		open_v6->dev_channel_mapping_eid2[1] = PCM_CHANNEL_FR;
+		open_v6->dev_channel_mapping_eid2[2] = PCM_CHANNEL_LS;
+		open_v6->dev_channel_mapping_eid2[3] = PCM_CHANNEL_RS;
+	} else if (channel_mode == 5) {
+		open_v6->dev_channel_mapping_eid2[0] = PCM_CHANNEL_FL;
+		open_v6->dev_channel_mapping_eid2[1] = PCM_CHANNEL_FR;
+		open_v6->dev_channel_mapping_eid2[2] = PCM_CHANNEL_FC;
+		open_v6->dev_channel_mapping_eid2[3] = PCM_CHANNEL_LS;
+		open_v6->dev_channel_mapping_eid2[4] = PCM_CHANNEL_RS;
+	} else if (channel_mode == 6) {
+		open_v6->dev_channel_mapping_eid2[0] = PCM_CHANNEL_FL;
+		open_v6->dev_channel_mapping_eid2[1] = PCM_CHANNEL_FR;
+		open_v6->dev_channel_mapping_eid2[2] = PCM_CHANNEL_LFE;
+		open_v6->dev_channel_mapping_eid2[3] = PCM_CHANNEL_FC;
+		open_v6->dev_channel_mapping_eid2[4] = PCM_CHANNEL_LS;
+		open_v6->dev_channel_mapping_eid2[5] = PCM_CHANNEL_RS;
+	} else if (channel_mode == 8) {
+		open_v6->dev_channel_mapping_eid2[0] = PCM_CHANNEL_FL;
+		open_v6->dev_channel_mapping_eid2[1] = PCM_CHANNEL_FR;
+		open_v6->dev_channel_mapping_eid2[2] = PCM_CHANNEL_LFE;
+		open_v6->dev_channel_mapping_eid2[3] = PCM_CHANNEL_FC;
+		open_v6->dev_channel_mapping_eid2[4] = PCM_CHANNEL_LS;
+		open_v6->dev_channel_mapping_eid2[5] = PCM_CHANNEL_RS;
+		open_v6->dev_channel_mapping_eid2[6] = PCM_CHANNEL_LB;
+		open_v6->dev_channel_mapping_eid2[7] = PCM_CHANNEL_RB;
+	} else {
+		pr_err("%s: invalid num_chan %d\n", __func__,
+			channel_mode);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+int adm_open(int port_id, int path, int rate, int channel_mode, int topology,
+	     int perf_mode, uint16_t bit_width, int app_type, int acdb_id)
+{
+	struct adm_cmd_device_open_v5	open;
+	struct adm_cmd_device_open_v6	open_v6;
+	int ret = 0;
+	int port_idx, flags;
+	int copp_idx = -1;
+	int tmp_port = q6audio_get_port_id(port_id);
+
+	pr_debug("%s:port %#x path:%d rate:%d mode:%d perf_mode:%d,topo_id %d\n",
+		 __func__, port_id, path, rate, channel_mode, perf_mode,
+		 topology);
+
+	port_id = q6audio_convert_virtual_to_portid(port_id);
+	port_idx = adm_validate_and_get_port_index(port_id);
+	if (port_idx < 0) {
+		pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
+		return -EINVAL;
+	}
+
+	if (this_adm.apr == NULL) {
+		this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
+						0xFFFFFFFF, &this_adm);
+		if (this_adm.apr == NULL) {
+			pr_err("%s: Unable to register ADM\n", __func__);
+			return -ENODEV;
+		}
+		rtac_set_adm_handle(this_adm.apr);
+	}
+
+	if (perf_mode == ULL_POST_PROCESSING_PCM_MODE) {
+		flags = ADM_ULL_POST_PROCESSING_DEVICE_SESSION;
+		if ((topology == DOLBY_ADM_COPP_TOPOLOGY_ID) ||
+		    (topology == DS2_ADM_COPP_TOPOLOGY_ID) ||
+		    (topology == SRS_TRUMEDIA_TOPOLOGY_ID))
+			topology = DEFAULT_COPP_TOPOLOGY;
+	} else if (perf_mode == ULTRA_LOW_LATENCY_PCM_MODE) {
+		flags = ADM_ULTRA_LOW_LATENCY_DEVICE_SESSION;
+		topology = NULL_COPP_TOPOLOGY;
+		rate = ULL_SUPPORTED_SAMPLE_RATE;
+		bit_width = ULL_SUPPORTED_BITS_PER_SAMPLE;
+	} else if (perf_mode == LOW_LATENCY_PCM_MODE) {
+		flags = ADM_LOW_LATENCY_DEVICE_SESSION;
+		if ((topology == DOLBY_ADM_COPP_TOPOLOGY_ID) ||
+		    (topology == DS2_ADM_COPP_TOPOLOGY_ID) ||
+		    (topology == SRS_TRUMEDIA_TOPOLOGY_ID))
+			topology = DEFAULT_COPP_TOPOLOGY;
+	} else {
+		if ((path == ADM_PATH_COMPRESSED_RX) ||
+		    (path == ADM_PATH_COMPRESSED_TX))
+			flags = 0;
+		else
+			flags = ADM_LEGACY_DEVICE_SESSION;
+	}
+
+	if ((topology == VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
+	    (topology == VPM_TX_DM_FLUENCE_COPP_TOPOLOGY) ||
+	    (topology == VPM_TX_DM_RFECNS_COPP_TOPOLOGY))
+		rate = 16000;
+
+	/*
+	 * Routing driver reuses the same adm for streams with the same
+	 * app_type, sample_rate etc.
+	 * This isn't allowed for ULL streams as per the DSP interface
+	 */
+	if (perf_mode != ULTRA_LOW_LATENCY_PCM_MODE)
+		copp_idx = adm_get_idx_if_copp_exists(port_idx, topology,
+						      perf_mode,
+						      rate, bit_width,
+						      app_type);
+
+	if (copp_idx < 0) {
+		copp_idx = adm_get_next_available_copp(port_idx);
+		if (copp_idx >= MAX_COPPS_PER_PORT) {
+			pr_err("%s: exceeded copp id %d\n",
+				 __func__, copp_idx);
+			return -EINVAL;
+		} else {
+			atomic_set(&this_adm.copp.cnt[port_idx][copp_idx], 0);
+			atomic_set(&this_adm.copp.topology[port_idx][copp_idx],
+				   topology);
+			atomic_set(&this_adm.copp.mode[port_idx][copp_idx],
+				   perf_mode);
+			atomic_set(&this_adm.copp.rate[port_idx][copp_idx],
+				   rate);
+			atomic_set(&this_adm.copp.channels[port_idx][copp_idx],
+				   channel_mode);
+			atomic_set(&this_adm.copp.bit_width[port_idx][copp_idx],
+				   bit_width);
+			atomic_set(&this_adm.copp.app_type[port_idx][copp_idx],
+				   app_type);
+			atomic_set(&this_adm.copp.acdb_id[port_idx][copp_idx],
+				   acdb_id);
+			set_bit(ADM_STATUS_CALIBRATION_REQUIRED,
+			(void *)&this_adm.copp.adm_status[port_idx][copp_idx]);
+			if ((path != ADM_PATH_COMPRESSED_RX) &&
+			    (path != ADM_PATH_COMPRESSED_TX))
+				send_adm_custom_topology();
+		}
+	}
+
+	if (this_adm.copp.adm_delay[port_idx][copp_idx] &&
+		perf_mode == LEGACY_PCM_MODE) {
+		atomic_set(&this_adm.copp.adm_delay_stat[port_idx][copp_idx],
+			   1);
+		this_adm.copp.adm_delay[port_idx][copp_idx] = 0;
+		wake_up(&this_adm.copp.adm_delay_wait[port_idx][copp_idx]);
+	}
+
+	/* Create a COPP if port id are not enabled */
+	if (atomic_read(&this_adm.copp.cnt[port_idx][copp_idx]) == 0) {
+		pr_debug("%s: open ADM: port_idx: %d, copp_idx: %d\n", __func__,
+			 port_idx, copp_idx);
+	if ((topology == SRS_TRUMEDIA_TOPOLOGY_ID) &&
+	     perf_mode == LEGACY_PCM_MODE) {
+		int res;
+		atomic_set(&this_adm.mem_map_index, ADM_SRS_TRUMEDIA);
+		msm_dts_srs_tm_ion_memmap(&this_adm.outband_memmap);
+		res = adm_memory_map_regions(&this_adm.outband_memmap.paddr, 0,
+		(uint32_t *)&this_adm.outband_memmap.size, 1);
+		if (res < 0) {
+			pr_err("%s: SRS adm_memory_map_regions failed ! addr = 0x%pK, size = %d\n",
+			 __func__, (void *)this_adm.outband_memmap.paddr,
+		(uint32_t)this_adm.outband_memmap.size);
+		}
+	}
+		open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						   APR_HDR_LEN(APR_HDR_SIZE),
+						   APR_PKT_VER);
+		open.hdr.pkt_size = sizeof(open);
+		open.hdr.src_svc = APR_SVC_ADM;
+		open.hdr.src_domain = APR_DOMAIN_APPS;
+		open.hdr.src_port = tmp_port;
+		open.hdr.dest_svc = APR_SVC_ADM;
+		open.hdr.dest_domain = APR_DOMAIN_ADSP;
+		open.hdr.dest_port = tmp_port;
+		open.hdr.token = port_idx << 16 | copp_idx;
+		open.hdr.opcode = ADM_CMD_DEVICE_OPEN_V5;
+		open.flags = flags;
+		open.mode_of_operation = path;
+		open.endpoint_id_1 = tmp_port;
+		open.endpoint_id_2 = 0xFFFF;
+
+		if (this_adm.ec_ref_rx && (path != 1)) {
+			open.endpoint_id_2 = this_adm.ec_ref_rx;
+			this_adm.ec_ref_rx = -1;
+		}
+
+		open.topology_id = topology;
+
+		open.dev_num_channel = channel_mode & 0x00FF;
+		open.bit_width = bit_width;
+		WARN_ON((perf_mode == ULTRA_LOW_LATENCY_PCM_MODE) &&
+			(rate != ULL_SUPPORTED_SAMPLE_RATE));
+		open.sample_rate  = rate;
+
+		ret = adm_arrange_mch_map(&open, path, channel_mode);
+
+		if (ret)
+			return ret;
+
+		pr_debug("%s: port_id=0x%x rate=%d topology_id=0x%X\n",
+			__func__, open.endpoint_id_1, open.sample_rate,
+			open.topology_id);
+
+		atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
+
+		if ((this_adm.num_ec_ref_rx_chans != 0) && (path != 1) &&
+			(open.endpoint_id_2 != 0xFFFF)) {
+			memset(&open_v6, 0,
+				sizeof(struct adm_cmd_device_open_v6));
+			memcpy(&open_v6, &open,
+				sizeof(struct adm_cmd_device_open_v5));
+			open_v6.hdr.opcode = ADM_CMD_DEVICE_OPEN_V6;
+			open_v6.hdr.pkt_size = sizeof(open_v6);
+			open_v6.dev_num_channel_eid2 =
+				this_adm.num_ec_ref_rx_chans;
+			this_adm.num_ec_ref_rx_chans = 0;
+
+			if (this_adm.ec_ref_rx_bit_width != 0) {
+				open_v6.bit_width_eid2 =
+					this_adm.ec_ref_rx_bit_width;
+				this_adm.ec_ref_rx_bit_width = 0;
+			} else {
+				open_v6.bit_width_eid2 = bit_width;
+			}
+
+			if (this_adm.ec_ref_rx_sampling_rate != 0) {
+				open_v6.sample_rate_eid2 =
+					this_adm.ec_ref_rx_sampling_rate;
+				this_adm.ec_ref_rx_sampling_rate = 0;
+			} else {
+				open_v6.sample_rate_eid2 = rate;
+			}
+
+			pr_debug("%s: eid2_channels=%d eid2_bit_width=%d eid2_rate=%d\n",
+				__func__, open_v6.dev_num_channel_eid2,
+				open_v6.bit_width_eid2,
+				open_v6.sample_rate_eid2);
+
+			ret = adm_arrange_mch_ep2_map(&open_v6,
+				open_v6.dev_num_channel_eid2);
+
+			if (ret)
+				return ret;
+
+			ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open_v6);
+		} else {
+			ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
+		}
+		if (ret < 0) {
+			pr_err("%s: port_id: 0x%x for[0x%x] failed %d\n",
+			__func__, tmp_port, port_id, ret);
+			return -EINVAL;
+		}
+		/* Wait for the callback with copp id */
+		ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+			atomic_read(&this_adm.copp.stat
+			[port_idx][copp_idx]) >= 0,
+			msecs_to_jiffies(TIMEOUT_MS));
+		if (!ret) {
+			pr_err("%s: ADM open timedout for port_id: 0x%x for [0x%x]\n",
+						__func__, tmp_port, port_id);
+			return -EINVAL;
+		} else if (atomic_read(&this_adm.copp.stat
+					[port_idx][copp_idx]) > 0) {
+			pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx])));
+			return adsp_err_get_lnx_err_code(
+					atomic_read(&this_adm.copp.stat
+						[port_idx][copp_idx]));
+		}
+	}
+	atomic_inc(&this_adm.copp.cnt[port_idx][copp_idx]);
+	return copp_idx;
+}
+
+void adm_copp_mfc_cfg(int port_id, int copp_idx, int dst_sample_rate)
+{
+	struct audproc_mfc_output_media_fmt mfc_cfg;
+	struct adm_cmd_device_open_v5 open;
+	int port_idx;
+	int sz = 0;
+	int rc  = 0;
+	int i  = 0;
+
+	port_id = q6audio_convert_virtual_to_portid(port_id);
+	port_idx = adm_validate_and_get_port_index(port_id);
+
+	if (port_idx < 0) {
+		pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
+		goto fail_cmd;
+	}
+
+	if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
+		pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
+		goto fail_cmd;
+	}
+
+	sz = sizeof(struct audproc_mfc_output_media_fmt);
+
+	mfc_cfg.params.hdr.hdr_field =
+				APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	mfc_cfg.params.hdr.pkt_size = sz;
+	mfc_cfg.params.hdr.src_svc = APR_SVC_ADM;
+	mfc_cfg.params.hdr.src_domain = APR_DOMAIN_APPS;
+	mfc_cfg.params.hdr.src_port = port_id;
+	mfc_cfg.params.hdr.dest_svc = APR_SVC_ADM;
+	mfc_cfg.params.hdr.dest_domain = APR_DOMAIN_ADSP;
+	mfc_cfg.params.hdr.dest_port =
+			atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
+	mfc_cfg.params.hdr.token = port_idx << 16 | copp_idx;
+	mfc_cfg.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
+	mfc_cfg.params.payload_addr_lsw = 0;
+	mfc_cfg.params.payload_addr_msw = 0;
+	mfc_cfg.params.mem_map_handle = 0;
+	mfc_cfg.params.payload_size = sizeof(mfc_cfg) -
+				sizeof(mfc_cfg.params);
+	mfc_cfg.data.module_id = AUDPROC_MODULE_ID_MFC;
+	mfc_cfg.data.param_id =
+			AUDPROC_PARAM_ID_MFC_OUTPUT_MEDIA_FORMAT;
+	mfc_cfg.data.param_size = mfc_cfg.params.payload_size -
+				sizeof(mfc_cfg.data);
+	mfc_cfg.data.reserved = 0;
+	mfc_cfg.sampling_rate = dst_sample_rate;
+	mfc_cfg.bits_per_sample =
+		atomic_read(&this_adm.copp.bit_width[port_idx][copp_idx]);
+	open.dev_num_channel = mfc_cfg.num_channels =
+		atomic_read(&this_adm.copp.channels[port_idx][copp_idx]);
+
+	rc = adm_arrange_mch_map(&open, ADM_PATH_PLAYBACK,
+		mfc_cfg.num_channels);
+	if (rc < 0) {
+		pr_err("%s: unable to get channal map\n", __func__);
+		goto fail_cmd;
+	}
+
+	for (i = 0; i < mfc_cfg.num_channels; i++)
+		mfc_cfg.channel_type[i] =
+			(uint16_t) open.dev_channel_mapping[i];
+
+	atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
+
+	pr_debug("%s: mfc config: port_idx %d copp_idx  %d copp SR %d copp BW %d copp chan %d o/p SR %d\n",
+			__func__, port_idx, copp_idx,
+			atomic_read(&this_adm.copp.rate[port_idx][copp_idx]),
+			mfc_cfg.bits_per_sample, mfc_cfg.num_channels,
+			mfc_cfg.sampling_rate);
+
+	rc = apr_send_pkt(this_adm.apr, (uint32_t *)&mfc_cfg);
+
+	if (rc < 0) {
+		pr_err("%s: port_id: for[0x%x] failed %d\n",
+		__func__, port_id, rc);
+		goto fail_cmd;
+	}
+	/* Wait for the callback with copp id */
+	rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+		atomic_read(&this_adm.copp.stat
+		[port_idx][copp_idx]) >= 0,
+		msecs_to_jiffies(TIMEOUT_MS));
+	if (!rc) {
+		pr_err("%s: mfc_cfg Set params timed out for port_id: for [0x%x]\n",
+					__func__, port_id);
+		goto fail_cmd;
+	} else if (atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx]) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+			__func__, adsp_err_get_err_str(
+			atomic_read(&this_adm.copp.stat
+			[port_idx][copp_idx])));
+		goto fail_cmd;
+	}
+	rc = 0;
+fail_cmd:
+	return;
+}
+
+
+static int adm_set_mtmx_params_v1(int port_idx, int copp_idx,
+				  int params_length, void *params)
+{
+	struct adm_cmd_set_mtmx_params_v1 *adm_params = NULL;
+	int rc = 0;
+	int sz;
+
+	sz = sizeof(*adm_params) + params_length;
+	adm_params = kzalloc(sz, GFP_KERNEL);
+	if (!adm_params)
+		return -ENOMEM;
+
+	memcpy(((u8 *)adm_params + sizeof(*adm_params)),
+			params, params_length);
+	adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	adm_params->hdr.pkt_size = sz;
+	adm_params->hdr.src_svc = APR_SVC_ADM;
+	adm_params->hdr.src_domain = APR_DOMAIN_APPS;
+	adm_params->hdr.src_port = 0;
+	adm_params->hdr.dest_svc = APR_SVC_ADM;
+	adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
+	adm_params->hdr.dest_port =
+			atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
+	adm_params->hdr.token = port_idx << 16 | copp_idx;
+	adm_params->hdr.opcode = ADM_CMD_SET_MTMX_STRTR_DEV_PARAMS_V1;
+	adm_params->payload_addr_lsw = 0;
+	adm_params->payload_addr_msw = 0;
+	adm_params->mem_map_handle = 0;
+	adm_params->payload_size = params_length;
+	adm_params->copp_id = atomic_read(&this_adm.copp.
+					  id[port_idx][copp_idx]);
+
+	atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
+	rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
+	if (rc < 0) {
+		pr_err("%s: Set params failed port_idx = 0x%x rc %d\n",
+			__func__, port_idx, rc);
+		rc = -EINVAL;
+		goto send_param_return;
+	}
+	/* Wait for the callback */
+	rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+		atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
+		msecs_to_jiffies(TIMEOUT_MS));
+	if (!rc) {
+		pr_err("%s: Set params timed out port_idx = 0x%x\n",
+			 __func__, port_idx);
+		rc = -EINVAL;
+		goto send_param_return;
+	} else if (atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx]) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx])));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&this_adm.copp.stat
+					[port_idx][copp_idx]));
+		goto send_param_return;
+	}
+	rc = 0;
+send_param_return:
+	kfree(adm_params);
+	return rc;
+}
+
+static void adm_enable_mtmx_limiter(int port_idx, int copp_idx)
+{
+	int rc;
+	struct enable_param_v6 adm_param = { {0} };
+
+	adm_param.param.module_id = ADM_MTMX_MODULE_STREAM_LIMITER;
+	adm_param.param.param_id = AUDPROC_PARAM_ID_ENABLE;
+	adm_param.param.param_size = sizeof(adm_param.enable);
+	adm_param.enable = 1;
+
+	rc = adm_set_mtmx_params_v1(port_idx, copp_idx,
+				    sizeof(adm_param), &adm_param);
+	if (rc < 0) {
+		pr_err("%s: adm_set_mtmx_params_v1 failed port_idx = 0x%x rc %d\n",
+			__func__, port_idx, rc);
+		goto done;
+	}
+	set_bit(ADM_STATUS_LIMITER,
+		(void *)&this_adm.copp.adm_status[port_idx][copp_idx]);
+done:
+	return;
+}
+
+static void route_set_opcode_matrix_id(
+			struct adm_cmd_matrix_map_routings_v5 **route_addr,
+			int path, uint32_t passthr_mode)
+{
+	struct adm_cmd_matrix_map_routings_v5 *route = *route_addr;
+
+	switch (path) {
+	case ADM_PATH_PLAYBACK:
+		route->hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS_V5;
+		route->matrix_id = ADM_MATRIX_ID_AUDIO_RX;
+		break;
+	case ADM_PATH_LIVE_REC:
+		if (passthr_mode == LISTEN) {
+			route->hdr.opcode =
+				ADM_CMD_STREAM_DEVICE_MAP_ROUTINGS_V5;
+			route->matrix_id = ADM_MATRIX_ID_LISTEN_TX;
+			break;
+		}
+		/* fall through to set matrix id for non-listen case */
+	case ADM_PATH_NONLIVE_REC:
+		route->hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS_V5;
+		route->matrix_id = ADM_MATRIX_ID_AUDIO_TX;
+		break;
+	case ADM_PATH_COMPRESSED_RX:
+		route->hdr.opcode = ADM_CMD_STREAM_DEVICE_MAP_ROUTINGS_V5;
+		route->matrix_id = ADM_MATRIX_ID_COMPRESSED_AUDIO_RX;
+		break;
+	case ADM_PATH_COMPRESSED_TX:
+		route->hdr.opcode = ADM_CMD_STREAM_DEVICE_MAP_ROUTINGS_V5;
+		route->matrix_id = ADM_MATRIX_ID_COMPRESSED_AUDIO_TX;
+		break;
+	default:
+		pr_err("%s: Wrong path set[%d]\n", __func__, path);
+		break;
+	}
+	pr_debug("%s: opcode 0x%x, matrix id %d\n",
+		 __func__, route->hdr.opcode, route->matrix_id);
+}
+
+int adm_matrix_map(int path, struct route_payload payload_map, int perf_mode,
+			uint32_t passthr_mode)
+{
+	struct adm_cmd_matrix_map_routings_v5	*route;
+	struct adm_session_map_node_v5 *node;
+	uint16_t *copps_list;
+	int cmd_size = 0;
+	int ret = 0, i = 0;
+	void *payload = NULL;
+	void *matrix_map = NULL;
+	int port_idx, copp_idx;
+
+	/* Assumes port_ids have already been validated during adm_open */
+	cmd_size = (sizeof(struct adm_cmd_matrix_map_routings_v5) +
+			sizeof(struct adm_session_map_node_v5) +
+			(sizeof(uint32_t) * payload_map.num_copps));
+	matrix_map = kzalloc(cmd_size, GFP_KERNEL);
+	if (matrix_map == NULL) {
+		pr_err("%s: Mem alloc failed\n", __func__);
+		ret = -EINVAL;
+		return ret;
+	}
+	route = (struct adm_cmd_matrix_map_routings_v5 *)matrix_map;
+
+	route->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	route->hdr.pkt_size = cmd_size;
+	route->hdr.src_svc = 0;
+	route->hdr.src_domain = APR_DOMAIN_APPS;
+	route->hdr.src_port = 0; /* Ignored */;
+	route->hdr.dest_svc = APR_SVC_ADM;
+	route->hdr.dest_domain = APR_DOMAIN_ADSP;
+	route->hdr.dest_port = 0; /* Ignored */;
+	route->hdr.token = 0;
+	route->num_sessions = 1;
+	route_set_opcode_matrix_id(&route, path, passthr_mode);
+
+	payload = ((u8 *)matrix_map +
+			sizeof(struct adm_cmd_matrix_map_routings_v5));
+	node = (struct adm_session_map_node_v5 *)payload;
+
+	node->session_id = payload_map.session_id;
+	node->num_copps = payload_map.num_copps;
+	payload = (u8 *)node + sizeof(struct adm_session_map_node_v5);
+	copps_list = (uint16_t *)payload;
+	for (i = 0; i < payload_map.num_copps; i++) {
+		port_idx =
+		adm_validate_and_get_port_index(payload_map.port_id[i]);
+		if (port_idx < 0) {
+			pr_err("%s: Invalid port_id 0x%x\n", __func__,
+				payload_map.port_id[i]);
+			ret = -EINVAL;
+			goto fail_cmd;
+		}
+		copp_idx = payload_map.copp_idx[i];
+		copps_list[i] = atomic_read(&this_adm.copp.id[port_idx]
+							     [copp_idx]);
+		if (test_bit(ADM_STATUS_LIMITER,
+		    (void *)&payload_map.route_status) &&
+		    ((path == ADM_PATH_PLAYBACK) ||
+		     (path == ADM_PATH_COMPRESSED_RX)))
+			adm_enable_mtmx_limiter(port_idx, copp_idx);
+	}
+	atomic_set(&this_adm.matrix_map_stat, -1);
+
+	ret = apr_send_pkt(this_adm.apr, (uint32_t *)matrix_map);
+	if (ret < 0) {
+		pr_err("%s: routing for syream %d failed ret %d\n",
+			__func__, payload_map.session_id, ret);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	ret = wait_event_timeout(this_adm.matrix_map_wait,
+				atomic_read(&this_adm.matrix_map_stat) >= 0,
+				msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: routing for syream %d failed\n", __func__,
+			payload_map.session_id);
+		ret = -EINVAL;
+		goto fail_cmd;
+	} else if (atomic_read(&this_adm.matrix_map_stat) > 0) {
+		pr_err("%s: DSP returned error[%s]\n", __func__,
+			adsp_err_get_err_str(atomic_read(
+			&this_adm.matrix_map_stat)));
+		ret = adsp_err_get_lnx_err_code(
+				atomic_read(&this_adm.matrix_map_stat));
+		goto fail_cmd;
+	}
+
+	if ((perf_mode != ULTRA_LOW_LATENCY_PCM_MODE) &&
+		 (path != ADM_PATH_COMPRESSED_RX)) {
+		for (i = 0; i < payload_map.num_copps; i++) {
+			port_idx = afe_get_port_index(payload_map.port_id[i]);
+			copp_idx = payload_map.copp_idx[i];
+			if (port_idx < 0 || copp_idx < 0 ||
+			    (copp_idx > MAX_COPPS_PER_PORT - 1)) {
+				pr_err("%s: Invalid idx port_idx %d copp_idx %d\n",
+					__func__, port_idx, copp_idx);
+				continue;
+			}
+			rtac_add_adm_device(payload_map.port_id[i],
+					    atomic_read(&this_adm.copp.id
+							[port_idx][copp_idx]),
+					    get_cal_path(path),
+					    payload_map.session_id,
+					    payload_map.app_type[i],
+					    payload_map.acdb_dev_id[i]);
+
+			if (!test_bit(ADM_STATUS_CALIBRATION_REQUIRED,
+				(void *)&this_adm.copp.adm_status[port_idx]
+								[copp_idx])) {
+				pr_debug("%s: adm copp[0x%x][%d] already sent",
+						__func__, port_idx, copp_idx);
+				continue;
+			}
+			send_adm_cal(payload_map.port_id[i], copp_idx,
+				     get_cal_path(path), perf_mode,
+				     payload_map.app_type[i],
+				     payload_map.acdb_dev_id[i],
+				     payload_map.sample_rate[i],
+				     passthr_mode);
+			/* ADM COPP calibration is already sent */
+			clear_bit(ADM_STATUS_CALIBRATION_REQUIRED,
+				(void *)&this_adm.copp.
+				adm_status[port_idx][copp_idx]);
+			pr_debug("%s: copp_id: %d\n", __func__,
+				 atomic_read(&this_adm.copp.id[port_idx]
+							      [copp_idx]));
+		}
+	}
+
+fail_cmd:
+	kfree(matrix_map);
+	return ret;
+}
+
+void adm_ec_ref_rx_id(int port_id)
+{
+	this_adm.ec_ref_rx = port_id;
+	pr_debug("%s: ec_ref_rx:%d\n", __func__, this_adm.ec_ref_rx);
+}
+
+void adm_num_ec_ref_rx_chans(int num_chans)
+{
+	this_adm.num_ec_ref_rx_chans = num_chans;
+	pr_debug("%s: num_ec_ref_rx_chans:%d\n",
+		__func__, this_adm.num_ec_ref_rx_chans);
+}
+
+void adm_ec_ref_rx_bit_width(int bit_width)
+{
+	this_adm.ec_ref_rx_bit_width = bit_width;
+	pr_debug("%s: ec_ref_rx_bit_width:%d\n",
+		__func__, this_adm.ec_ref_rx_bit_width);
+}
+
+void adm_ec_ref_rx_sampling_rate(int sampling_rate)
+{
+	this_adm.ec_ref_rx_sampling_rate = sampling_rate;
+	pr_debug("%s: ec_ref_rx_sampling_rate:%d\n",
+		__func__, this_adm.ec_ref_rx_sampling_rate);
+}
+
+int adm_close(int port_id, int perf_mode, int copp_idx)
+{
+	struct apr_hdr close;
+
+	int ret = 0, port_idx;
+	int copp_id = RESET_COPP_ID;
+
+	pr_debug("%s: port_id=0x%x perf_mode: %d copp_idx: %d\n", __func__,
+		 port_id, perf_mode, copp_idx);
+
+	port_id = q6audio_convert_virtual_to_portid(port_id);
+	port_idx = adm_validate_and_get_port_index(port_id);
+	if (port_idx < 0) {
+		pr_err("%s: Invalid port_id 0x%x\n",
+			__func__, port_id);
+		return -EINVAL;
+	}
+
+	if ((copp_idx < 0) || (copp_idx >= MAX_COPPS_PER_PORT)) {
+		pr_err("%s: Invalid copp idx: %d\n", __func__, copp_idx);
+		return -EINVAL;
+	}
+
+	if (this_adm.copp.adm_delay[port_idx][copp_idx] && perf_mode
+		== LEGACY_PCM_MODE) {
+		atomic_set(&this_adm.copp.adm_delay_stat[port_idx][copp_idx],
+			   1);
+		this_adm.copp.adm_delay[port_idx][copp_idx] = 0;
+		wake_up(&this_adm.copp.adm_delay_wait[port_idx][copp_idx]);
+	}
+
+	atomic_dec(&this_adm.copp.cnt[port_idx][copp_idx]);
+	if (!(atomic_read(&this_adm.copp.cnt[port_idx][copp_idx]))) {
+		copp_id = adm_get_copp_id(port_idx, copp_idx);
+		pr_debug("%s: Closing ADM port_idx:%d copp_idx:%d copp_id:0x%x\n",
+			 __func__, port_idx, copp_idx, copp_id);
+		if ((!perf_mode) && (this_adm.outband_memmap.paddr != 0) &&
+		    (atomic_read(&this_adm.copp.topology[port_idx][copp_idx]) ==
+			SRS_TRUMEDIA_TOPOLOGY_ID)) {
+			atomic_set(&this_adm.mem_map_index,
+				ADM_SRS_TRUMEDIA);
+			ret = adm_memory_unmap_regions();
+			if (ret < 0) {
+				pr_err("%s: adm mem unmmap err %d",
+					__func__, ret);
+			} else {
+				atomic_set(&this_adm.mem_map_handles
+					   [ADM_SRS_TRUMEDIA], 0);
+			}
+		}
+
+
+		if ((afe_get_port_type(port_id) == MSM_AFE_PORT_TYPE_TX) &&
+		    this_adm.sourceTrackingData.memmap.paddr) {
+			atomic_set(&this_adm.mem_map_index,
+				   ADM_MEM_MAP_INDEX_SOURCE_TRACKING);
+			ret = adm_memory_unmap_regions();
+			if (ret < 0) {
+				pr_err("%s: adm mem unmmap err %d",
+					__func__, ret);
+			}
+			msm_audio_ion_free(
+				this_adm.sourceTrackingData.ion_client,
+				this_adm.sourceTrackingData.ion_handle);
+			this_adm.sourceTrackingData.ion_client = NULL;
+			this_adm.sourceTrackingData.ion_handle = NULL;
+			this_adm.sourceTrackingData.memmap.size = 0;
+			this_adm.sourceTrackingData.memmap.kvaddr = NULL;
+			this_adm.sourceTrackingData.memmap.paddr = 0;
+			this_adm.sourceTrackingData.apr_cmd_status = -1;
+			atomic_set(&this_adm.mem_map_handles[
+					ADM_MEM_MAP_INDEX_SOURCE_TRACKING], 0);
+		}
+
+		close.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+		close.pkt_size = sizeof(close);
+		close.src_svc = APR_SVC_ADM;
+		close.src_domain = APR_DOMAIN_APPS;
+		close.src_port = port_id;
+		close.dest_svc = APR_SVC_ADM;
+		close.dest_domain = APR_DOMAIN_ADSP;
+		close.dest_port = copp_id;
+		close.token = port_idx << 16 | copp_idx;
+		close.opcode = ADM_CMD_DEVICE_CLOSE_V5;
+
+		atomic_set(&this_adm.copp.id[port_idx][copp_idx],
+			   RESET_COPP_ID);
+		atomic_set(&this_adm.copp.cnt[port_idx][copp_idx], 0);
+		atomic_set(&this_adm.copp.topology[port_idx][copp_idx], 0);
+		atomic_set(&this_adm.copp.mode[port_idx][copp_idx], 0);
+		atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
+		atomic_set(&this_adm.copp.rate[port_idx][copp_idx], 0);
+		atomic_set(&this_adm.copp.channels[port_idx][copp_idx], 0);
+		atomic_set(&this_adm.copp.bit_width[port_idx][copp_idx], 0);
+		atomic_set(&this_adm.copp.app_type[port_idx][copp_idx], 0);
+
+		clear_bit(ADM_STATUS_CALIBRATION_REQUIRED,
+			(void *)&this_adm.copp.adm_status[port_idx][copp_idx]);
+		clear_bit(ADM_STATUS_LIMITER,
+			(void *)&this_adm.copp.adm_status[port_idx][copp_idx]);
+
+		ret = apr_send_pkt(this_adm.apr, (uint32_t *)&close);
+		if (ret < 0) {
+			pr_err("%s: ADM close failed %d\n", __func__, ret);
+			return -EINVAL;
+		}
+
+		ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+			atomic_read(&this_adm.copp.stat
+			[port_idx][copp_idx]) >= 0,
+			msecs_to_jiffies(TIMEOUT_MS));
+		if (!ret) {
+			pr_err("%s: ADM cmd Route timedout for port 0x%x\n",
+				__func__, port_id);
+			return -EINVAL;
+		} else if (atomic_read(&this_adm.copp.stat
+					[port_idx][copp_idx]) > 0) {
+			pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx])));
+			return adsp_err_get_lnx_err_code(
+					atomic_read(&this_adm.copp.stat
+						[port_idx][copp_idx]));
+		}
+	}
+
+	if (perf_mode != ULTRA_LOW_LATENCY_PCM_MODE) {
+		pr_debug("%s: remove adm device from rtac\n", __func__);
+		rtac_remove_adm_device(port_id, copp_id);
+	}
+	return 0;
+}
+
+int send_rtac_audvol_cal(void)
+{
+	int ret = 0;
+	int ret2 = 0;
+	int i = 0;
+	int copp_idx, port_idx, acdb_id, app_id, path;
+	struct cal_block_data *cal_block = NULL;
+	struct audio_cal_info_audvol *audvol_cal_info = NULL;
+	struct rtac_adm rtac_adm_data;
+
+	mutex_lock(&this_adm.cal_data[ADM_RTAC_AUDVOL_CAL]->lock);
+
+	cal_block = cal_utils_get_only_cal_block(
+		this_adm.cal_data[ADM_RTAC_AUDVOL_CAL]);
+	if (cal_block == NULL) {
+		pr_err("%s: can't find cal block!\n", __func__);
+		goto unlock;
+	}
+
+	audvol_cal_info = cal_block->cal_info;
+	if (audvol_cal_info == NULL) {
+		pr_err("%s: audvol_cal_info is NULL!\n", __func__);
+		goto unlock;
+	}
+
+	get_rtac_adm_data(&rtac_adm_data);
+	for (; i < rtac_adm_data.num_of_dev; i++) {
+
+		acdb_id = rtac_adm_data.device[i].acdb_dev_id;
+		if (acdb_id == 0)
+			acdb_id = audvol_cal_info->acdb_id;
+
+		app_id = rtac_adm_data.device[i].app_type;
+		if (app_id == 0)
+			app_id = audvol_cal_info->app_type;
+
+		path = afe_get_port_type(rtac_adm_data.device[i].afe_port);
+		if ((acdb_id == audvol_cal_info->acdb_id) &&
+			(app_id == audvol_cal_info->app_type) &&
+			(path == audvol_cal_info->path)) {
+
+			if (adm_get_indexes_from_copp_id(rtac_adm_data.
+				device[i].copp, &copp_idx, &port_idx) != 0) {
+				pr_debug("%s: Copp Id %d is not active\n",
+					__func__,
+					rtac_adm_data.device[i].copp);
+				continue;
+			}
+
+			ret2 = adm_remap_and_send_cal_block(ADM_RTAC_AUDVOL_CAL,
+				rtac_adm_data.device[i].afe_port,
+				copp_idx, cal_block,
+				atomic_read(&this_adm.copp.
+				mode[port_idx][copp_idx]),
+				audvol_cal_info->app_type,
+				audvol_cal_info->acdb_id,
+				atomic_read(&this_adm.copp.
+				rate[port_idx][copp_idx]));
+			if (ret2 < 0) {
+				pr_debug("%s: remap and send failed for copp Id %d, acdb id %d, app type %d, path %d\n",
+					__func__, rtac_adm_data.device[i].copp,
+					audvol_cal_info->acdb_id,
+					audvol_cal_info->app_type,
+					audvol_cal_info->path);
+				ret = ret2;
+			}
+		}
+	}
+unlock:
+	mutex_unlock(&this_adm.cal_data[ADM_RTAC_AUDVOL_CAL]->lock);
+	return ret;
+}
+
+int adm_map_rtac_block(struct rtac_cal_block_data *cal_block)
+{
+	int	result = 0;
+	pr_debug("%s:\n", __func__);
+
+	if (cal_block == NULL) {
+		pr_err("%s: cal_block is NULL!\n",
+			__func__);
+		result = -EINVAL;
+		goto done;
+	}
+
+	if (cal_block->cal_data.paddr == 0) {
+		pr_debug("%s: No address to map!\n",
+			__func__);
+		result = -EINVAL;
+		goto done;
+	}
+
+	if (cal_block->map_data.map_size == 0) {
+		pr_debug("%s: map size is 0!\n",
+			__func__);
+		result = -EINVAL;
+		goto done;
+	}
+
+	/* valid port ID needed for callback use primary I2S */
+	atomic_set(&this_adm.mem_map_index, ADM_RTAC_APR_CAL);
+	result = adm_memory_map_regions(&cal_block->cal_data.paddr, 0,
+					&cal_block->map_data.map_size, 1);
+	if (result < 0) {
+		pr_err("%s: RTAC mmap did not work! size = %d result %d\n",
+			__func__,
+			cal_block->map_data.map_size, result);
+		pr_debug("%s: RTAC mmap did not work! addr = 0x%pK, size = %d\n",
+			__func__,
+			&cal_block->cal_data.paddr,
+			cal_block->map_data.map_size);
+		goto done;
+	}
+
+	cal_block->map_data.map_handle = atomic_read(
+		&this_adm.mem_map_handles[ADM_RTAC_APR_CAL]);
+done:
+	return result;
+}
+
+int adm_unmap_rtac_block(uint32_t *mem_map_handle)
+{
+	int	result = 0;
+	pr_debug("%s:\n", __func__);
+
+	if (mem_map_handle == NULL) {
+		pr_debug("%s: Map handle is NULL, nothing to unmap\n",
+			__func__);
+		goto done;
+	}
+
+	if (*mem_map_handle == 0) {
+		pr_debug("%s: Map handle is 0, nothing to unmap\n",
+			__func__);
+		goto done;
+	}
+
+	if (*mem_map_handle != atomic_read(
+			&this_adm.mem_map_handles[ADM_RTAC_APR_CAL])) {
+		pr_err("%s: Map handles do not match! Unmapping RTAC, RTAC map 0x%x, ADM map 0x%x\n",
+			__func__, *mem_map_handle, atomic_read(
+			&this_adm.mem_map_handles[ADM_RTAC_APR_CAL]));
+
+		/* if mismatch use handle passed in to unmap */
+		atomic_set(&this_adm.mem_map_handles[ADM_RTAC_APR_CAL],
+			   *mem_map_handle);
+	}
+
+	/* valid port ID needed for callback use primary I2S */
+	atomic_set(&this_adm.mem_map_index, ADM_RTAC_APR_CAL);
+	result = adm_memory_unmap_regions();
+	if (result < 0) {
+		pr_debug("%s: adm_memory_unmap_regions failed, error %d\n",
+			__func__, result);
+	} else {
+		atomic_set(&this_adm.mem_map_handles[ADM_RTAC_APR_CAL], 0);
+		*mem_map_handle = 0;
+	}
+done:
+	return result;
+}
+
+static int get_cal_type_index(int32_t cal_type)
+{
+	int ret = -EINVAL;
+
+	switch (cal_type) {
+	case ADM_AUDPROC_CAL_TYPE:
+		ret = ADM_AUDPROC_CAL;
+		break;
+	case ADM_LSM_AUDPROC_CAL_TYPE:
+		ret = ADM_LSM_AUDPROC_CAL;
+		break;
+	case ADM_AUDVOL_CAL_TYPE:
+		ret = ADM_AUDVOL_CAL;
+		break;
+	case ADM_CUST_TOPOLOGY_CAL_TYPE:
+		ret = ADM_CUSTOM_TOP_CAL;
+		break;
+	case ADM_RTAC_INFO_CAL_TYPE:
+		ret = ADM_RTAC_INFO_CAL;
+		break;
+	case ADM_RTAC_APR_CAL_TYPE:
+		ret = ADM_RTAC_APR_CAL;
+		break;
+	case ADM_RTAC_AUDVOL_CAL_TYPE:
+		ret = ADM_RTAC_AUDVOL_CAL;
+		break;
+	default:
+		pr_err("%s: invalid cal type %d!\n", __func__, cal_type);
+	}
+	return ret;
+}
+
+static int adm_alloc_cal(int32_t cal_type, size_t data_size, void *data)
+{
+	int				ret = 0;
+	int				cal_index;
+	pr_debug("%s:\n", __func__);
+
+	cal_index = get_cal_type_index(cal_type);
+	if (cal_index < 0) {
+		pr_err("%s: could not get cal index %d!\n",
+			__func__, cal_index);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = cal_utils_alloc_cal(data_size, data,
+		this_adm.cal_data[cal_index], 0, NULL);
+	if (ret < 0) {
+		pr_err("%s: cal_utils_alloc_block failed, ret = %d, cal type = %d!\n",
+			__func__, ret, cal_type);
+		ret = -EINVAL;
+		goto done;
+	}
+done:
+	return ret;
+}
+
+static int adm_dealloc_cal(int32_t cal_type, size_t data_size, void *data)
+{
+	int				ret = 0;
+	int				cal_index;
+	pr_debug("%s:\n", __func__);
+
+	cal_index = get_cal_type_index(cal_type);
+	if (cal_index < 0) {
+		pr_err("%s: could not get cal index %d!\n",
+			__func__, cal_index);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = cal_utils_dealloc_cal(data_size, data,
+		this_adm.cal_data[cal_index]);
+	if (ret < 0) {
+		pr_err("%s: cal_utils_dealloc_block failed, ret = %d, cal type = %d!\n",
+			__func__, ret, cal_type);
+		ret = -EINVAL;
+		goto done;
+	}
+done:
+	return ret;
+}
+
+static int adm_set_cal(int32_t cal_type, size_t data_size, void *data)
+{
+	int				ret = 0;
+	int				cal_index;
+	pr_debug("%s:\n", __func__);
+
+	cal_index = get_cal_type_index(cal_type);
+	if (cal_index < 0) {
+		pr_err("%s: could not get cal index %d!\n",
+			__func__, cal_index);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = cal_utils_set_cal(data_size, data,
+		this_adm.cal_data[cal_index], 0, NULL);
+	if (ret < 0) {
+		pr_err("%s: cal_utils_set_cal failed, ret = %d, cal type = %d!\n",
+			__func__, ret, cal_type);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (cal_index == ADM_CUSTOM_TOP_CAL) {
+		mutex_lock(&this_adm.cal_data[ADM_CUSTOM_TOP_CAL]->lock);
+		this_adm.set_custom_topology = 1;
+		mutex_unlock(&this_adm.cal_data[ADM_CUSTOM_TOP_CAL]->lock);
+	} else if (cal_index == ADM_RTAC_AUDVOL_CAL) {
+		send_rtac_audvol_cal();
+	}
+done:
+	return ret;
+}
+
+static int adm_map_cal_data(int32_t cal_type,
+			struct cal_block_data *cal_block)
+{
+	int ret = 0;
+	int cal_index;
+	pr_debug("%s:\n", __func__);
+
+	cal_index = get_cal_type_index(cal_type);
+	if (cal_index < 0) {
+		pr_err("%s: could not get cal index %d!\n",
+			__func__, cal_index);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	atomic_set(&this_adm.mem_map_index, cal_index);
+	ret = adm_memory_map_regions(&cal_block->cal_data.paddr, 0,
+		(uint32_t *)&cal_block->map_data.map_size, 1);
+	if (ret < 0) {
+		pr_err("%s: map did not work! cal_type %i ret %d\n",
+			__func__, cal_index, ret);
+		ret = -ENODEV;
+		goto done;
+	}
+	cal_block->map_data.q6map_handle = atomic_read(&this_adm.
+		mem_map_handles[cal_index]);
+done:
+	return ret;
+}
+
+static int adm_unmap_cal_data(int32_t cal_type,
+			struct cal_block_data *cal_block)
+{
+	int ret = 0;
+	int cal_index;
+	pr_debug("%s:\n", __func__);
+
+	cal_index = get_cal_type_index(cal_type);
+	if (cal_index < 0) {
+		pr_err("%s: could not get cal index %d!\n",
+			__func__, cal_index);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (cal_block == NULL) {
+		pr_err("%s: Cal block is NULL!\n",
+						__func__);
+		goto done;
+	}
+
+	if (cal_block->map_data.q6map_handle == 0) {
+		pr_err("%s: Map handle is NULL, nothing to unmap\n",
+				__func__);
+		goto done;
+	}
+
+	atomic_set(&this_adm.mem_map_handles[cal_index],
+		cal_block->map_data.q6map_handle);
+	atomic_set(&this_adm.mem_map_index, cal_index);
+	ret = adm_memory_unmap_regions();
+	if (ret < 0) {
+		pr_err("%s: unmap did not work! cal_type %i ret %d\n",
+			__func__, cal_index, ret);
+		ret = -ENODEV;
+		goto done;
+	}
+	cal_block->map_data.q6map_handle = 0;
+done:
+	return ret;
+}
+
+static void adm_delete_cal_data(void)
+{
+	pr_debug("%s:\n", __func__);
+
+	cal_utils_destroy_cal_types(ADM_MAX_CAL_TYPES, this_adm.cal_data);
+
+	return;
+}
+
+static int adm_init_cal_data(void)
+{
+	int ret = 0;
+	struct cal_type_info	cal_type_info[] = {
+		{{ADM_CUST_TOPOLOGY_CAL_TYPE,
+		{adm_alloc_cal, adm_dealloc_cal, NULL,
+		adm_set_cal, NULL, NULL} },
+		{adm_map_cal_data, adm_unmap_cal_data,
+		cal_utils_match_buf_num} },
+
+		{{ADM_AUDPROC_CAL_TYPE,
+		{adm_alloc_cal, adm_dealloc_cal, NULL,
+		adm_set_cal, NULL, NULL} },
+		{adm_map_cal_data, adm_unmap_cal_data,
+		cal_utils_match_buf_num} },
+
+		{{ADM_LSM_AUDPROC_CAL_TYPE,
+		{adm_alloc_cal, adm_dealloc_cal, NULL,
+		adm_set_cal, NULL, NULL} },
+		{adm_map_cal_data, adm_unmap_cal_data,
+		cal_utils_match_buf_num} },
+
+		{{ADM_AUDVOL_CAL_TYPE,
+		{adm_alloc_cal, adm_dealloc_cal, NULL,
+		adm_set_cal, NULL, NULL} },
+		{adm_map_cal_data, adm_unmap_cal_data,
+		cal_utils_match_buf_num} },
+
+		{{ADM_RTAC_INFO_CAL_TYPE,
+		{NULL, NULL, NULL, NULL, NULL, NULL} },
+		{NULL, NULL, cal_utils_match_buf_num} },
+
+		{{ADM_RTAC_APR_CAL_TYPE,
+		{NULL, NULL, NULL, NULL, NULL, NULL} },
+		{NULL, NULL, cal_utils_match_buf_num} },
+
+		{{SRS_TRUMEDIA_CAL_TYPE,
+		{NULL, NULL, NULL, NULL, NULL, NULL} },
+		{NULL, NULL, cal_utils_match_buf_num} },
+
+		{{ADM_RTAC_AUDVOL_CAL_TYPE,
+		{adm_alloc_cal, adm_dealloc_cal, NULL,
+		adm_set_cal, NULL, NULL} },
+		{adm_map_cal_data, adm_unmap_cal_data,
+		cal_utils_match_buf_num} },
+	};
+	pr_debug("%s:\n", __func__);
+
+	ret = cal_utils_create_cal_types(ADM_MAX_CAL_TYPES, this_adm.cal_data,
+		cal_type_info);
+	if (ret < 0) {
+		pr_err("%s: could not create cal type! ret %d\n",
+			__func__, ret);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	return ret;
+err:
+	adm_delete_cal_data();
+	return ret;
+}
+
+int adm_set_volume(int port_id, int copp_idx, int volume)
+{
+	struct audproc_volume_ctrl_master_gain audproc_vol;
+	int sz = 0;
+	int rc  = 0;
+	int port_idx;
+
+	pr_debug("%s: port_id %d, volume %d\n", __func__, port_id, volume);
+	port_id = afe_convert_virtual_to_portid(port_id);
+	port_idx = adm_validate_and_get_port_index(port_id);
+	if (port_idx < 0) {
+		pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
+		pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
+		return -EINVAL;
+	}
+
+	sz = sizeof(struct audproc_volume_ctrl_master_gain);
+	audproc_vol.params.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	audproc_vol.params.hdr.pkt_size = sz;
+	audproc_vol.params.hdr.src_svc = APR_SVC_ADM;
+	audproc_vol.params.hdr.src_domain = APR_DOMAIN_APPS;
+	audproc_vol.params.hdr.src_port = port_id;
+	audproc_vol.params.hdr.dest_svc = APR_SVC_ADM;
+	audproc_vol.params.hdr.dest_domain = APR_DOMAIN_ADSP;
+	audproc_vol.params.hdr.dest_port =
+			atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
+	audproc_vol.params.hdr.token = port_idx << 16 | copp_idx;
+	audproc_vol.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
+	audproc_vol.params.payload_addr_lsw = 0;
+	audproc_vol.params.payload_addr_msw = 0;
+	audproc_vol.params.mem_map_handle = 0;
+	audproc_vol.params.payload_size = sizeof(audproc_vol) -
+				sizeof(audproc_vol.params);
+	audproc_vol.data.module_id = AUDPROC_MODULE_ID_VOL_CTRL;
+	audproc_vol.data.param_id = AUDPROC_PARAM_ID_VOL_CTRL_MASTER_GAIN;
+	audproc_vol.data.param_size = audproc_vol.params.payload_size -
+						sizeof(audproc_vol.data);
+	audproc_vol.data.reserved = 0;
+	audproc_vol.master_gain = volume;
+
+	atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
+	rc = apr_send_pkt(this_adm.apr, (uint32_t *)&audproc_vol);
+	if (rc < 0) {
+		pr_err("%s: Set params failed port = %#x\n",
+			__func__, port_id);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	/* Wait for the callback */
+	rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+		atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
+		msecs_to_jiffies(TIMEOUT_MS));
+	if (!rc) {
+		pr_err("%s: Vol cntrl Set params timed out port = %#x\n",
+			 __func__, port_id);
+		rc = -EINVAL;
+		goto fail_cmd;
+	} else if (atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx]) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx])));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&this_adm.copp.stat
+					[port_idx][copp_idx]));
+		goto fail_cmd;
+	}
+	rc = 0;
+fail_cmd:
+	return rc;
+}
+
+int adm_set_softvolume(int port_id, int copp_idx,
+			struct audproc_softvolume_params *softvol_param)
+{
+	struct audproc_soft_step_volume_params audproc_softvol;
+	int sz = 0;
+	int rc  = 0;
+	int port_idx;
+
+	pr_debug("%s: period %d step %d curve %d\n", __func__,
+		 softvol_param->period, softvol_param->step,
+		 softvol_param->rampingcurve);
+
+	port_id = afe_convert_virtual_to_portid(port_id);
+	port_idx = adm_validate_and_get_port_index(port_id);
+	if (port_idx < 0) {
+		pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
+		pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
+		return -EINVAL;
+	}
+
+	sz = sizeof(struct audproc_soft_step_volume_params);
+
+	audproc_softvol.params.hdr.hdr_field =
+				APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	audproc_softvol.params.hdr.pkt_size = sz;
+	audproc_softvol.params.hdr.src_svc = APR_SVC_ADM;
+	audproc_softvol.params.hdr.src_domain = APR_DOMAIN_APPS;
+	audproc_softvol.params.hdr.src_port = port_id;
+	audproc_softvol.params.hdr.dest_svc = APR_SVC_ADM;
+	audproc_softvol.params.hdr.dest_domain = APR_DOMAIN_ADSP;
+	audproc_softvol.params.hdr.dest_port =
+			atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
+	audproc_softvol.params.hdr.token = port_idx << 16 | copp_idx;
+	audproc_softvol.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
+	audproc_softvol.params.payload_addr_lsw = 0;
+	audproc_softvol.params.payload_addr_msw = 0;
+	audproc_softvol.params.mem_map_handle = 0;
+	audproc_softvol.params.payload_size = sizeof(audproc_softvol) -
+				sizeof(audproc_softvol.params);
+	audproc_softvol.data.module_id = AUDPROC_MODULE_ID_VOL_CTRL;
+	audproc_softvol.data.param_id =
+			AUDPROC_PARAM_ID_SOFT_VOL_STEPPING_PARAMETERS;
+	audproc_softvol.data.param_size = audproc_softvol.params.payload_size -
+				sizeof(audproc_softvol.data);
+	audproc_softvol.data.reserved = 0;
+	audproc_softvol.period = softvol_param->period;
+	audproc_softvol.step = softvol_param->step;
+	audproc_softvol.ramping_curve = softvol_param->rampingcurve;
+
+	pr_debug("%s: period %d, step %d, curve %d\n", __func__,
+		 audproc_softvol.period, audproc_softvol.step,
+		 audproc_softvol.ramping_curve);
+
+	atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
+	rc = apr_send_pkt(this_adm.apr, (uint32_t *)&audproc_softvol);
+	if (rc < 0) {
+		pr_err("%s: Set params failed port = %#x\n",
+			__func__, port_id);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	/* Wait for the callback */
+	rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+		atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
+		msecs_to_jiffies(TIMEOUT_MS));
+	if (!rc) {
+		pr_err("%s: Soft volume Set params timed out port = %#x\n",
+			 __func__, port_id);
+		rc = -EINVAL;
+		goto fail_cmd;
+	} else if (atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx]) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx])));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&this_adm.copp.stat
+					[port_idx][copp_idx]));
+		goto fail_cmd;
+	}
+	rc = 0;
+fail_cmd:
+	return rc;
+}
+
+int adm_set_mic_gain(int port_id, int copp_idx, int volume)
+{
+	struct adm_set_mic_gain_params	mic_gain_params;
+	int rc = 0;
+	int sz, port_idx;
+
+	pr_debug("%s:\n", __func__);
+	port_id = afe_convert_virtual_to_portid(port_id);
+	port_idx = adm_validate_and_get_port_index(port_id);
+	if (port_idx < 0) {
+		pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
+		return -EINVAL;
+	}
+
+	sz = sizeof(struct adm_set_mic_gain_params);
+
+	mic_gain_params.params.hdr.hdr_field =
+				APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	mic_gain_params.params.hdr.pkt_size = sz;
+	mic_gain_params.params.hdr.src_svc = APR_SVC_ADM;
+	mic_gain_params.params.hdr.src_domain = APR_DOMAIN_APPS;
+	mic_gain_params.params.hdr.src_port = port_id;
+	mic_gain_params.params.hdr.dest_svc = APR_SVC_ADM;
+	mic_gain_params.params.hdr.dest_domain = APR_DOMAIN_ADSP;
+	mic_gain_params.params.hdr.dest_port =
+			atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
+	mic_gain_params.params.hdr.token = port_idx << 16 | copp_idx;
+	mic_gain_params.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
+	mic_gain_params.params.payload_addr_lsw = 0;
+	mic_gain_params.params.payload_addr_msw = 0;
+	mic_gain_params.params.mem_map_handle = 0;
+	mic_gain_params.params.payload_size =
+		sizeof(struct adm_param_data_v5) +
+		sizeof(struct admx_mic_gain);
+	mic_gain_params.data.module_id = ADM_MODULE_IDX_MIC_GAIN_CTRL;
+	mic_gain_params.data.param_id = ADM_PARAM_IDX_MIC_GAIN;
+	mic_gain_params.data.param_size =
+		sizeof(struct admx_mic_gain);
+	mic_gain_params.data.reserved = 0;
+	mic_gain_params.mic_gain_data.tx_mic_gain = volume;
+	mic_gain_params.mic_gain_data.reserved = 0;
+	pr_debug("%s: Mic Gain set to %d at port_id 0x%x\n",
+		__func__, volume, port_id);
+
+	atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
+	rc = apr_send_pkt(this_adm.apr, (uint32_t *)&mic_gain_params);
+	if (rc < 0) {
+		pr_err("%s: Set params failed port = %#x\n",
+			__func__, port_id);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	/* Wait for the callback */
+	rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+		atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
+		msecs_to_jiffies(TIMEOUT_MS));
+	if (!rc) {
+		pr_err("%s: Mic Gain Set params timed out port = %#x\n",
+			 __func__, port_id);
+		rc = -EINVAL;
+		goto fail_cmd;
+	} else if (atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx]) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx])));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&this_adm.copp.stat
+					[port_idx][copp_idx]));
+		goto fail_cmd;
+	}
+	rc = 0;
+fail_cmd:
+	return rc;
+}
+
+int adm_send_set_multichannel_ec_primary_mic_ch(int port_id, int copp_idx,
+			int primary_mic_ch)
+{
+	struct adm_set_sec_primary_ch_params sec_primary_ch_params;
+	int rc = 0;
+	int sz, port_idx;
+
+	pr_debug("%s port_id 0x%x, copp_idx 0x%x, primary_mic_ch %d\n",
+			__func__, port_id,  copp_idx,  primary_mic_ch);
+	port_id = afe_convert_virtual_to_portid(port_id);
+	port_idx = adm_validate_and_get_port_index(port_id);
+	if (port_idx < 0) {
+		pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
+		return -EINVAL;
+	}
+
+	if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
+		pr_err("%s: Invalid copp_idx 0x%x\n", __func__, copp_idx);
+		return -EINVAL;
+	}
+
+	sz = sizeof(struct adm_set_sec_primary_ch_params);
+
+	sec_primary_ch_params.params.hdr.hdr_field =
+			APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+			APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	sec_primary_ch_params.params.hdr.pkt_size = sz;
+	sec_primary_ch_params.params.hdr.src_svc = APR_SVC_ADM;
+	sec_primary_ch_params.params.hdr.src_domain = APR_DOMAIN_APPS;
+	sec_primary_ch_params.params.hdr.src_port = port_id;
+	sec_primary_ch_params.params.hdr.dest_svc = APR_SVC_ADM;
+	sec_primary_ch_params.params.hdr.dest_domain = APR_DOMAIN_ADSP;
+	sec_primary_ch_params.params.hdr.dest_port =
+			atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
+	sec_primary_ch_params.params.hdr.token = port_idx << 16 | copp_idx;
+	sec_primary_ch_params.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
+	sec_primary_ch_params.params.payload_addr_lsw = 0;
+	sec_primary_ch_params.params.payload_addr_msw = 0;
+	sec_primary_ch_params.params.mem_map_handle = 0;
+	sec_primary_ch_params.params.payload_size =
+			sizeof(struct adm_param_data_v5) +
+			sizeof(struct admx_sec_primary_mic_ch);
+	sec_primary_ch_params.data.module_id =
+			AUDPROC_MODULE_ID_VOICE_TX_SECNS;
+	sec_primary_ch_params.data.param_id =
+			AUDPROC_PARAM_IDX_SEC_PRIMARY_MIC_CH;
+	sec_primary_ch_params.data.param_size =
+			sizeof(struct admx_sec_primary_mic_ch);
+	sec_primary_ch_params.data.reserved = 0;
+	sec_primary_ch_params.sec_primary_mic_ch_data.version = 0;
+	sec_primary_ch_params.sec_primary_mic_ch_data.reserved = 0;
+	sec_primary_ch_params.sec_primary_mic_ch_data.sec_primary_mic_ch =
+			primary_mic_ch;
+	sec_primary_ch_params.sec_primary_mic_ch_data.reserved1 = 0;
+
+	atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
+	rc = apr_send_pkt(this_adm.apr, (uint32_t *)&sec_primary_ch_params);
+	if (rc < 0) {
+		pr_err("%s: Set params failed port = %#x\n",
+				__func__, port_id);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	/* Wait for the callback */
+	rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+		atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
+		msecs_to_jiffies(TIMEOUT_MS));
+	if (!rc) {
+		pr_err("%s: Mic Set params timed out port = %#x\n",
+				__func__, port_id);
+		rc = -EINVAL;
+		goto fail_cmd;
+	} else if (atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx]) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx])));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&this_adm.copp.stat
+					[port_idx][copp_idx]));
+		goto fail_cmd;
+	}
+	rc = 0;
+fail_cmd:
+	return rc;
+}
+
+int adm_param_enable(int port_id, int copp_idx, int module_id,  int enable)
+{
+	struct audproc_enable_param_t adm_mod_enable;
+	int sz = 0;
+	int rc  = 0;
+	int port_idx;
+
+	pr_debug("%s port_id %d, module_id 0x%x, enable %d\n",
+		 __func__, port_id,  module_id,  enable);
+	port_id = afe_convert_virtual_to_portid(port_id);
+	port_idx = adm_validate_and_get_port_index(port_id);
+	if (port_idx < 0) {
+		pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
+		pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
+		return -EINVAL;
+	}
+
+	sz = sizeof(struct audproc_enable_param_t);
+
+	adm_mod_enable.pp_params.hdr.hdr_field =
+				APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	adm_mod_enable.pp_params.hdr.pkt_size = sz;
+	adm_mod_enable.pp_params.hdr.src_svc = APR_SVC_ADM;
+	adm_mod_enable.pp_params.hdr.src_domain = APR_DOMAIN_APPS;
+	adm_mod_enable.pp_params.hdr.src_port = port_id;
+	adm_mod_enable.pp_params.hdr.dest_svc = APR_SVC_ADM;
+	adm_mod_enable.pp_params.hdr.dest_domain = APR_DOMAIN_ADSP;
+	adm_mod_enable.pp_params.hdr.dest_port =
+			atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
+	adm_mod_enable.pp_params.hdr.token =  port_idx << 16 | copp_idx;
+	adm_mod_enable.pp_params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
+	adm_mod_enable.pp_params.payload_addr_lsw = 0;
+	adm_mod_enable.pp_params.payload_addr_msw = 0;
+	adm_mod_enable.pp_params.mem_map_handle = 0;
+	adm_mod_enable.pp_params.payload_size = sizeof(adm_mod_enable) -
+				sizeof(adm_mod_enable.pp_params) +
+				sizeof(adm_mod_enable.pp_params.params);
+	adm_mod_enable.pp_params.params.module_id = module_id;
+	adm_mod_enable.pp_params.params.param_id = AUDPROC_PARAM_ID_ENABLE;
+	adm_mod_enable.pp_params.params.param_size =
+		adm_mod_enable.pp_params.payload_size -
+		sizeof(adm_mod_enable.pp_params.params);
+	adm_mod_enable.pp_params.params.reserved = 0;
+	adm_mod_enable.enable = enable;
+
+	atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
+
+	rc = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_mod_enable);
+	if (rc < 0) {
+		pr_err("%s: Set params failed port = %#x\n",
+			__func__, port_id);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	/* Wait for the callback */
+	rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+		atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
+		msecs_to_jiffies(TIMEOUT_MS));
+	if (!rc) {
+		pr_err("%s:  module %x  enable %d timed out on port = %#x\n",
+			 __func__, module_id, enable, port_id);
+		rc = -EINVAL;
+		goto fail_cmd;
+	} else if (atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx]) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx])));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&this_adm.copp.stat
+					[port_idx][copp_idx]));
+		goto fail_cmd;
+	}
+	rc = 0;
+fail_cmd:
+	return rc;
+
+}
+
+int adm_send_calibration(int port_id, int copp_idx, int path, int perf_mode,
+			 int cal_type, char *params, int size)
+{
+
+	struct adm_cmd_set_pp_params_v5	*adm_params = NULL;
+	int sz, rc = 0;
+	int port_idx;
+
+	pr_debug("%s:port_id %d, path %d, perf_mode %d, cal_type %d, size %d\n",
+		 __func__, port_id, path, perf_mode, cal_type, size);
+
+	port_id = afe_convert_virtual_to_portid(port_id);
+	port_idx = adm_validate_and_get_port_index(port_id);
+	if (port_idx < 0) {
+		pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
+		pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
+		return -EINVAL;
+	}
+
+	/* Maps audio_dev_ctrl path definition to ACDB definition */
+	if (get_cal_path(path) != RX_DEVICE) {
+		pr_err("%s: acdb_path %d\n", __func__, path);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	sz = sizeof(struct adm_cmd_set_pp_params_v5) + size;
+	adm_params = kzalloc(sz, GFP_KERNEL);
+	if (!adm_params) {
+		pr_err("%s, adm params memory alloc failed", __func__);
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	memcpy(((u8 *)adm_params + sizeof(struct adm_cmd_set_pp_params_v5)),
+			params, size);
+
+	adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	adm_params->hdr.pkt_size = sz;
+	adm_params->hdr.src_svc = APR_SVC_ADM;
+	adm_params->hdr.src_domain = APR_DOMAIN_APPS;
+	adm_params->hdr.src_port = port_id;
+	adm_params->hdr.dest_svc = APR_SVC_ADM;
+	adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
+	adm_params->hdr.dest_port =
+			atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
+	adm_params->hdr.token = port_idx << 16 | copp_idx;
+	adm_params->hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
+	/* payload address and mmap handle initialized to zero by kzalloc */
+	adm_params->payload_size = size;
+
+	atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
+	rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
+	if (rc < 0) {
+		pr_err("%s: Set params failed port = %#x\n",
+			__func__, port_id);
+		rc = -EINVAL;
+		goto end;
+	}
+	/* Wait for the callback */
+	rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+		atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
+		msecs_to_jiffies(TIMEOUT_MS));
+	if (!rc) {
+		pr_err("%s: Set params timed out port = %#x\n",
+			 __func__, port_id);
+		rc = -EINVAL;
+		goto end;
+	} else if (atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx]) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx])));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&this_adm.copp.stat
+					[port_idx][copp_idx]));
+		goto end;
+	}
+	rc = 0;
+
+end:
+	kfree(adm_params);
+	return rc;
+}
+
+/*
+ * adm_update_wait_parameters must be called with routing driver locks.
+ * adm_reset_wait_parameters must be called with routing driver locks.
+ * set and reset parmeters are seperated to make sure it is always called
+ * under routing driver lock.
+ * adm_wait_timeout is to block until timeout or interrupted. Timeout is
+ * not a an error.
+ */
+int adm_set_wait_parameters(int port_id, int copp_idx)
+{
+
+	int ret = 0, port_idx;
+	pr_debug("%s: port_id 0x%x, copp_idx %d\n", __func__, port_id,
+		 copp_idx);
+	port_id = afe_convert_virtual_to_portid(port_id);
+	port_idx = adm_validate_and_get_port_index(port_id);
+	if (port_idx < 0) {
+		pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
+		pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
+		return -EINVAL;
+	}
+
+	this_adm.copp.adm_delay[port_idx][copp_idx] = 1;
+	atomic_set(&this_adm.copp.adm_delay_stat[port_idx][copp_idx], 0);
+
+end:
+	return ret;
+
+}
+
+int adm_reset_wait_parameters(int port_id, int copp_idx)
+{
+	int ret = 0, port_idx;
+
+	pr_debug("%s: port_id 0x%x copp_idx %d\n", __func__, port_id,
+		 copp_idx);
+	port_id = afe_convert_virtual_to_portid(port_id);
+	port_idx = adm_validate_and_get_port_index(port_id);
+	if (port_idx < 0) {
+		pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
+		pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
+		return -EINVAL;
+	}
+
+	atomic_set(&this_adm.copp.adm_delay_stat[port_idx][copp_idx], 1);
+	this_adm.copp.adm_delay[port_idx][copp_idx] = 0;
+
+end:
+	return ret;
+}
+
+int adm_wait_timeout(int port_id, int copp_idx, int wait_time)
+{
+	int ret = 0, port_idx;
+
+	pr_debug("%s: port_id 0x%x, copp_idx %d, wait_time %d\n", __func__,
+		 port_id, copp_idx, wait_time);
+	port_id = afe_convert_virtual_to_portid(port_id);
+	port_idx = adm_validate_and_get_port_index(port_id);
+	if (port_idx < 0) {
+		pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
+		pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
+		return -EINVAL;
+	}
+
+	ret = wait_event_timeout(
+		this_adm.copp.adm_delay_wait[port_idx][copp_idx],
+		atomic_read(&this_adm.copp.adm_delay_stat[port_idx][copp_idx]),
+		msecs_to_jiffies(wait_time));
+	pr_debug("%s: return %d\n", __func__, ret);
+	if (ret != 0)
+		ret = -EINTR;
+end:
+	pr_debug("%s: return %d--\n", __func__, ret);
+	return ret;
+}
+
+int adm_store_cal_data(int port_id, int copp_idx, int path, int perf_mode,
+		       int cal_index, char *params, int *size)
+{
+	int rc = 0;
+	struct cal_block_data		*cal_block = NULL;
+	int app_type, acdb_id, port_idx, sample_rate;
+
+	if (this_adm.cal_data[cal_index] == NULL) {
+		pr_debug("%s: cal_index %d not allocated!\n",
+			__func__, cal_index);
+		goto end;
+	}
+
+	if (get_cal_path(path) != RX_DEVICE) {
+		pr_debug("%s: Invalid path to store calibration %d\n",
+			 __func__, path);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	port_id = afe_convert_virtual_to_portid(port_id);
+	port_idx = adm_validate_and_get_port_index(port_id);
+	if (port_idx < 0) {
+		pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
+		pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
+		return -EINVAL;
+	}
+
+	acdb_id = atomic_read(&this_adm.copp.acdb_id[port_idx][copp_idx]);
+	app_type = atomic_read(&this_adm.copp.app_type[port_idx][copp_idx]);
+	sample_rate = atomic_read(&this_adm.copp.rate[port_idx][copp_idx]);
+
+	mutex_lock(&this_adm.cal_data[cal_index]->lock);
+	cal_block = adm_find_cal(cal_index, get_cal_path(path), app_type,
+				acdb_id, sample_rate);
+	if (cal_block == NULL)
+		goto unlock;
+
+	if (cal_block->cal_data.size <= 0) {
+		pr_debug("%s: No ADM cal send for port_id = 0x%x!\n",
+			__func__, port_id);
+		rc = -EINVAL;
+		goto unlock;
+	}
+
+	if (cal_index == ADM_AUDPROC_CAL || cal_index == ADM_LSM_AUDPROC_CAL) {
+		if (cal_block->cal_data.size > AUD_PROC_BLOCK_SIZE) {
+			pr_err("%s:audproc:invalid size exp/actual[%zd, %d]\n",
+				__func__, cal_block->cal_data.size, *size);
+			rc = -ENOMEM;
+			goto unlock;
+		}
+	} else if (cal_index == ADM_AUDVOL_CAL) {
+		if (cal_block->cal_data.size > AUD_VOL_BLOCK_SIZE) {
+			pr_err("%s:aud_vol:invalid size exp/actual[%zd, %d]\n",
+				__func__, cal_block->cal_data.size, *size);
+			rc = -ENOMEM;
+			goto unlock;
+		}
+	} else {
+		pr_debug("%s: Not valid calibration for dolby topolgy\n",
+			 __func__);
+		rc = -EINVAL;
+		goto unlock;
+	}
+	memcpy(params, cal_block->cal_data.kvaddr, cal_block->cal_data.size);
+	*size = cal_block->cal_data.size;
+
+	pr_debug("%s:port_id %d, copp_idx %d, path %d",
+		 __func__, port_id, copp_idx, path);
+	pr_debug("perf_mode %d, cal_type %d, size %d\n",
+		 perf_mode, cal_index, *size);
+
+unlock:
+	mutex_unlock(&this_adm.cal_data[cal_index]->lock);
+end:
+	return rc;
+}
+
+int adm_send_compressed_device_mute(int port_id, int copp_idx, bool mute_on)
+{
+	struct adm_set_compressed_device_mute mute_params;
+	int ret = 0;
+	int port_idx;
+
+	pr_debug("%s port_id: 0x%x, copp_idx %d, mute_on: %d\n",
+		 __func__, port_id, copp_idx, mute_on);
+	port_id = afe_convert_virtual_to_portid(port_id);
+	port_idx = adm_validate_and_get_port_index(port_id);
+	if (port_idx < 0 || port_idx >= AFE_MAX_PORTS) {
+		pr_err("%s: Invalid port_id %#x copp_idx %d\n",
+			__func__, port_id, copp_idx);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	mute_params.command.hdr.hdr_field =
+			APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+			APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	mute_params.command.hdr.pkt_size =
+			sizeof(struct adm_set_compressed_device_mute);
+	mute_params.command.hdr.src_svc = APR_SVC_ADM;
+	mute_params.command.hdr.src_domain = APR_DOMAIN_APPS;
+	mute_params.command.hdr.src_port = port_id;
+	mute_params.command.hdr.dest_svc = APR_SVC_ADM;
+	mute_params.command.hdr.dest_domain = APR_DOMAIN_ADSP;
+	mute_params.command.hdr.dest_port =
+			atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
+	mute_params.command.hdr.token = port_idx << 16 | copp_idx;
+	mute_params.command.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
+	mute_params.command.payload_addr_lsw = 0;
+	mute_params.command.payload_addr_msw = 0;
+	mute_params.command.mem_map_handle = 0;
+	mute_params.command.payload_size = sizeof(mute_params) -
+						sizeof(mute_params.command);
+	mute_params.params.module_id = AUDPROC_MODULE_ID_COMPRESSED_MUTE;
+	mute_params.params.param_id = AUDPROC_PARAM_ID_COMPRESSED_MUTE;
+	mute_params.params.param_size = mute_params.command.payload_size -
+					sizeof(mute_params.params);
+	mute_params.params.reserved = 0;
+	mute_params.mute_on = mute_on;
+
+	atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
+	ret = apr_send_pkt(this_adm.apr, (uint32_t *)&mute_params);
+	if (ret < 0) {
+		pr_err("%s: device mute for port %d copp %d failed, ret %d\n",
+			__func__, port_id, copp_idx, ret);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	/* Wait for the callback */
+	ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+		atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
+		msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: send device mute for port %d copp %d failed\n",
+			__func__, port_id, copp_idx);
+		ret = -EINVAL;
+		goto end;
+	} else if (atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx]) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx])));
+		ret = adsp_err_get_lnx_err_code(
+				atomic_read(&this_adm.copp.stat
+					[port_idx][copp_idx]));
+		goto end;
+	}
+	ret = 0;
+end:
+	return ret;
+}
+
+int adm_send_compressed_device_latency(int port_id, int copp_idx, int latency)
+{
+	struct adm_set_compressed_device_latency latency_params;
+	int port_idx;
+	int ret = 0;
+
+	pr_debug("%s port_id: 0x%x, copp_idx %d latency: %d\n", __func__,
+		 port_id, copp_idx, latency);
+	port_id = afe_convert_virtual_to_portid(port_id);
+	port_idx = adm_validate_and_get_port_index(port_id);
+	if (port_idx < 0 || port_idx >= AFE_MAX_PORTS) {
+		pr_err("%s: Invalid port_id %#x copp_idx %d\n",
+			__func__, port_id, copp_idx);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	latency_params.command.hdr.hdr_field =
+			APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+			APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	latency_params.command.hdr.pkt_size =
+			sizeof(struct adm_set_compressed_device_latency);
+	latency_params.command.hdr.src_svc = APR_SVC_ADM;
+	latency_params.command.hdr.src_domain = APR_DOMAIN_APPS;
+	latency_params.command.hdr.src_port = port_id;
+	latency_params.command.hdr.dest_svc = APR_SVC_ADM;
+	latency_params.command.hdr.dest_domain = APR_DOMAIN_ADSP;
+	latency_params.command.hdr.dest_port =
+			atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
+	latency_params.command.hdr.token = port_idx << 16 | copp_idx;
+	latency_params.command.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
+	latency_params.command.payload_addr_lsw = 0;
+	latency_params.command.payload_addr_msw = 0;
+	latency_params.command.mem_map_handle = 0;
+	latency_params.command.payload_size = sizeof(latency_params) -
+						sizeof(latency_params.command);
+	latency_params.params.module_id = AUDPROC_MODULE_ID_COMPRESSED_LATENCY;
+	latency_params.params.param_id = AUDPROC_PARAM_ID_COMPRESSED_LATENCY;
+	latency_params.params.param_size = latency_params.command.payload_size -
+					sizeof(latency_params.params);
+	latency_params.params.reserved = 0;
+	latency_params.latency = latency;
+
+	atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
+	ret = apr_send_pkt(this_adm.apr, (uint32_t *)&latency_params);
+	if (ret < 0) {
+		pr_err("%s: send device latency err %d for port %d copp %d\n",
+			__func__, port_id, copp_idx, ret);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	/* Wait for the callback */
+	ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+		atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
+		msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: send device latency for port %d failed\n", __func__,
+			port_id);
+		ret = -EINVAL;
+		goto end;
+	} else if (atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx]) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx])));
+		ret = adsp_err_get_lnx_err_code(
+				atomic_read(&this_adm.copp.stat
+					[port_idx][copp_idx]));
+		goto end;
+	}
+	ret = 0;
+end:
+	return ret;
+}
+
+/**
+ * adm_swap_speaker_channels
+ *
+ * Receives port_id, copp_idx, sample rate, spk_swap and
+ * send MFC command to swap speaker channel.
+ * Return zero on success. On failure returns nonzero.
+ *
+ * port_id - Passed value, port_id for which channels swap is wanted
+ * copp_idx - Passed value, copp_idx for which channels swap is wanted
+ * sample_rate - Passed value, sample rate used by app type config
+ * spk_swap  - Passed value, spk_swap for check if swap flag is set
+*/
+int adm_swap_speaker_channels(int port_id, int copp_idx,
+			int sample_rate, bool spk_swap)
+{
+	struct audproc_mfc_output_media_fmt mfc_cfg;
+	uint16_t num_channels;
+	int port_idx;
+	int ret  = 0;
+
+	pr_debug("%s: Enter, port_id %d, copp_idx %d\n",
+		  __func__, port_id, copp_idx);
+	port_id = q6audio_convert_virtual_to_portid(port_id);
+	port_idx = adm_validate_and_get_port_index(port_id);
+	if (port_idx < 0 || port_idx >= AFE_MAX_PORTS) {
+		pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
+		pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	num_channels = atomic_read(
+				&this_adm.copp.channels[port_idx][copp_idx]);
+	if (num_channels != 2) {
+		pr_debug("%s: Invalid number of channels: %d\n",
+			__func__, num_channels);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	memset(&mfc_cfg, 0, sizeof(mfc_cfg));
+	mfc_cfg.params.hdr.hdr_field =
+				APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	mfc_cfg.params.hdr.pkt_size =
+				sizeof(mfc_cfg);
+	mfc_cfg.params.hdr.src_svc = APR_SVC_ADM;
+	mfc_cfg.params.hdr.src_domain = APR_DOMAIN_APPS;
+	mfc_cfg.params.hdr.src_port = port_id;
+	mfc_cfg.params.hdr.dest_svc = APR_SVC_ADM;
+	mfc_cfg.params.hdr.dest_domain = APR_DOMAIN_ADSP;
+	mfc_cfg.params.hdr.dest_port =
+			atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
+	mfc_cfg.params.hdr.token = port_idx << 16 | copp_idx;
+	mfc_cfg.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
+	mfc_cfg.params.payload_addr_lsw = 0;
+	mfc_cfg.params.payload_addr_msw = 0;
+	mfc_cfg.params.mem_map_handle = 0;
+	mfc_cfg.params.payload_size = sizeof(mfc_cfg) -
+				sizeof(mfc_cfg.params);
+	mfc_cfg.data.module_id = AUDPROC_MODULE_ID_MFC;
+	mfc_cfg.data.param_id = AUDPROC_PARAM_ID_MFC_OUTPUT_MEDIA_FORMAT;
+	mfc_cfg.data.param_size = mfc_cfg.params.payload_size -
+				sizeof(mfc_cfg.data);
+	mfc_cfg.data.reserved = 0;
+	mfc_cfg.sampling_rate = sample_rate;
+	mfc_cfg.bits_per_sample =
+		atomic_read(&this_adm.copp.bit_width[port_idx][copp_idx]);
+	mfc_cfg.num_channels = num_channels;
+
+	/* Currently applying speaker swap for only 2 channel use case */
+	if (spk_swap) {
+		mfc_cfg.channel_type[0] =
+			(uint16_t) PCM_CHANNEL_FR;
+		mfc_cfg.channel_type[1] =
+			(uint16_t) PCM_CHANNEL_FL;
+	} else {
+		mfc_cfg.channel_type[0] =
+			(uint16_t) PCM_CHANNEL_FL;
+		mfc_cfg.channel_type[1] =
+			(uint16_t) PCM_CHANNEL_FR;
+	}
+
+	atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
+	pr_debug("%s: mfc config: port_idx %d copp_idx  %d copp SR %d copp BW %d copp chan %d\n",
+		__func__, port_idx, copp_idx, mfc_cfg.sampling_rate,
+		mfc_cfg.bits_per_sample, mfc_cfg.num_channels);
+
+	ret = apr_send_pkt(this_adm.apr, (uint32_t *)&mfc_cfg);
+	if (ret < 0) {
+		pr_err("%s: port_id: for[0x%x] failed %d\n",
+		__func__, port_id, ret);
+		goto done;
+	}
+	/* Wait for the callback with copp id */
+	ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+		atomic_read(&this_adm.copp.stat
+		[port_idx][copp_idx]) >= 0,
+		msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: mfc_cfg Set params timed out for port_id: for [0x%x]\n",
+					__func__, port_id);
+		ret = -ETIMEDOUT;
+		goto done;
+	}
+
+	if (atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+			__func__, adsp_err_get_err_str(
+			atomic_read(&this_adm.copp.stat
+			[port_idx][copp_idx])));
+		ret = adsp_err_get_lnx_err_code(
+			atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx]));
+		goto done;
+	}
+
+	pr_debug("%s: mfc_cfg Set params returned success", __func__);
+	ret = 0;
+
+done:
+	return ret;
+}
+EXPORT_SYMBOL(adm_swap_speaker_channels);
+
+int adm_set_sound_focus(int port_id, int copp_idx,
+			struct sound_focus_param soundFocusData)
+{
+	struct adm_set_fluence_soundfocus_param soundfocus_params;
+	int sz = 0;
+	int ret  = 0;
+	int port_idx;
+	int i;
+
+	pr_debug("%s: Enter, port_id %d, copp_idx %d\n",
+		  __func__, port_id, copp_idx);
+
+	port_id = afe_convert_virtual_to_portid(port_id);
+	port_idx = adm_validate_and_get_port_index(port_id);
+	if (port_idx < 0) {
+		pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
+		pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	sz = sizeof(struct adm_set_fluence_soundfocus_param);
+	soundfocus_params.params.hdr.hdr_field =
+		APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE),
+			      APR_PKT_VER);
+	soundfocus_params.params.hdr.pkt_size = sz;
+	soundfocus_params.params.hdr.src_svc = APR_SVC_ADM;
+	soundfocus_params.params.hdr.src_domain = APR_DOMAIN_APPS;
+	soundfocus_params.params.hdr.src_port = port_id;
+	soundfocus_params.params.hdr.dest_svc = APR_SVC_ADM;
+	soundfocus_params.params.hdr.dest_domain = APR_DOMAIN_ADSP;
+	soundfocus_params.params.hdr.dest_port =
+			atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
+	soundfocus_params.params.hdr.token = port_idx << 16 |
+				ADM_CLIENT_ID_SOURCE_TRACKING << 8 | copp_idx;
+	soundfocus_params.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
+	soundfocus_params.params.payload_addr_lsw = 0;
+	soundfocus_params.params.payload_addr_msw = 0;
+	soundfocus_params.params.mem_map_handle = 0;
+	soundfocus_params.params.payload_size = sizeof(soundfocus_params) -
+				sizeof(soundfocus_params.params);
+	soundfocus_params.data.module_id = VOICEPROC_MODULE_ID_GENERIC_TX;
+	soundfocus_params.data.param_id = VOICEPROC_PARAM_ID_FLUENCE_SOUNDFOCUS;
+	soundfocus_params.data.param_size =
+		soundfocus_params.params.payload_size -
+		sizeof(soundfocus_params.data);
+	soundfocus_params.data.reserved = 0;
+
+	memset(&(soundfocus_params.soundfocus_data), 0xFF,
+		sizeof(struct adm_param_fluence_soundfocus_t));
+	for (i = 0; i < MAX_SECTORS; i++) {
+		soundfocus_params.soundfocus_data.start_angles[i] =
+			soundFocusData.start_angle[i];
+		soundfocus_params.soundfocus_data.enables[i] =
+			soundFocusData.enable[i];
+		pr_debug("%s: start_angle[%d] = %d\n",
+			  __func__, i, soundFocusData.start_angle[i]);
+		pr_debug("%s: enable[%d] = %d\n",
+			  __func__, i, soundFocusData.enable[i]);
+	}
+	soundfocus_params.soundfocus_data.gain_step =
+					soundFocusData.gain_step;
+	pr_debug("%s: gain_step = %d\n", __func__, soundFocusData.gain_step);
+
+	soundfocus_params.soundfocus_data.reserved = 0;
+
+	atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
+	ret = apr_send_pkt(this_adm.apr, (uint32_t *)&soundfocus_params);
+	if (ret < 0) {
+		pr_err("%s: Set params failed\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+	/* Wait for the callback */
+	ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+		atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
+		msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: Set params timed out\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (this_adm.sourceTrackingData.apr_cmd_status != 0) {
+		pr_err("%s - set params returned error [%s]\n",
+			__func__, adsp_err_get_err_str(
+			this_adm.sourceTrackingData.apr_cmd_status));
+
+		ret = adsp_err_get_lnx_err_code(
+				this_adm.sourceTrackingData.apr_cmd_status);
+		goto done;
+	}
+
+	ret = 0;
+
+done:
+	pr_debug("%s: Exit, ret=%d\n", __func__, ret);
+
+	return ret;
+}
+
+int adm_get_sound_focus(int port_id, int copp_idx,
+			struct sound_focus_param *soundFocusData)
+{
+	int ret = 0, i;
+	char *params_value;
+	uint32_t param_payload_len = sizeof(struct adm_param_data_v5) +
+				sizeof(struct adm_param_fluence_soundfocus_t);
+	struct adm_param_fluence_soundfocus_t *soundfocus_params;
+
+	pr_debug("%s: Enter, port_id %d, copp_idx %d\n",
+		  __func__, port_id, copp_idx);
+
+	params_value = kzalloc(param_payload_len, GFP_KERNEL);
+	if (!params_value) {
+		pr_err("%s, params memory alloc failed\n", __func__);
+
+		ret = -ENOMEM;
+		goto done;
+	}
+	ret = adm_get_params_v2(port_id, copp_idx,
+				VOICEPROC_MODULE_ID_GENERIC_TX,
+				VOICEPROC_PARAM_ID_FLUENCE_SOUNDFOCUS,
+				param_payload_len,
+				params_value,
+				ADM_CLIENT_ID_SOURCE_TRACKING);
+	if (ret) {
+		pr_err("%s: get parameters failed ret:%d\n", __func__, ret);
+
+		kfree(params_value);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (this_adm.sourceTrackingData.apr_cmd_status != 0) {
+		pr_err("%s - get params returned error [%s]\n",
+			__func__, adsp_err_get_err_str(
+			this_adm.sourceTrackingData.apr_cmd_status));
+
+		kfree(params_value);
+		ret = adsp_err_get_lnx_err_code(
+				this_adm.sourceTrackingData.apr_cmd_status);
+		goto done;
+	}
+
+	soundfocus_params = (struct adm_param_fluence_soundfocus_t *)
+								params_value;
+	for (i = 0; i < MAX_SECTORS; i++) {
+		soundFocusData->start_angle[i] =
+					soundfocus_params->start_angles[i];
+		soundFocusData->enable[i] = soundfocus_params->enables[i];
+		pr_debug("%s: start_angle[%d] = %d\n",
+			  __func__, i, soundFocusData->start_angle[i]);
+		pr_debug("%s: enable[%d] = %d\n",
+			  __func__, i, soundFocusData->enable[i]);
+	}
+	soundFocusData->gain_step = soundfocus_params->gain_step;
+	pr_debug("%s: gain_step = %d\n", __func__, soundFocusData->gain_step);
+
+	kfree(params_value);
+
+done:
+	pr_debug("%s: Exit, ret = %d\n", __func__, ret);
+
+	return ret;
+}
+
+static int adm_source_tracking_alloc_map_memory(void)
+{
+	int ret;
+
+	pr_debug("%s: Enter\n", __func__);
+
+	ret = msm_audio_ion_alloc("SOURCE_TRACKING",
+				  &this_adm.sourceTrackingData.ion_client,
+				  &this_adm.sourceTrackingData.ion_handle,
+				  AUD_PROC_BLOCK_SIZE,
+				  &this_adm.sourceTrackingData.memmap.paddr,
+				  &this_adm.sourceTrackingData.memmap.size,
+				  &this_adm.sourceTrackingData.memmap.kvaddr);
+	if (ret) {
+		pr_err("%s: failed to allocate memory\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	atomic_set(&this_adm.mem_map_index, ADM_MEM_MAP_INDEX_SOURCE_TRACKING);
+	ret = adm_memory_map_regions(&this_adm.sourceTrackingData.memmap.paddr,
+			0,
+			(uint32_t *)&this_adm.sourceTrackingData.memmap.size,
+			1);
+	if (ret < 0) {
+		pr_err("%s: failed to map memory, paddr = 0x%pK, size = %d\n",
+			__func__,
+			(void *)this_adm.sourceTrackingData.memmap.paddr,
+			(uint32_t)this_adm.sourceTrackingData.memmap.size);
+
+		msm_audio_ion_free(this_adm.sourceTrackingData.ion_client,
+				   this_adm.sourceTrackingData.ion_handle);
+		this_adm.sourceTrackingData.ion_client = NULL;
+		this_adm.sourceTrackingData.ion_handle = NULL;
+		this_adm.sourceTrackingData.memmap.size = 0;
+		this_adm.sourceTrackingData.memmap.kvaddr = NULL;
+		this_adm.sourceTrackingData.memmap.paddr = 0;
+		this_adm.sourceTrackingData.apr_cmd_status = -1;
+		atomic_set(&this_adm.mem_map_handles
+				[ADM_MEM_MAP_INDEX_SOURCE_TRACKING], 0);
+
+		ret = -EINVAL;
+		goto done;
+	}
+	ret = 0;
+	pr_debug("%s: paddr = 0x%pK, size = %d, mem_map_handle = 0x%x\n",
+		  __func__, (void *)this_adm.sourceTrackingData.memmap.paddr,
+		  (uint32_t)this_adm.sourceTrackingData.memmap.size,
+		  atomic_read(&this_adm.mem_map_handles
+			      [ADM_MEM_MAP_INDEX_SOURCE_TRACKING]));
+
+done:
+	pr_debug("%s: Exit, ret = %d\n", __func__, ret);
+
+	return ret;
+}
+
+int adm_get_source_tracking(int port_id, int copp_idx,
+			    struct source_tracking_param *sourceTrackingData)
+{
+	struct adm_cmd_get_pp_params_v5 admp;
+	int p_idx, ret = 0, i;
+	struct adm_param_fluence_sourcetracking_t *source_tracking_params;
+
+	pr_debug("%s: Enter, port_id %d, copp_idx %d\n",
+		  __func__, port_id, copp_idx);
+
+	if (!this_adm.sourceTrackingData.memmap.paddr) {
+		/* Allocate and map shared memory for out of band usage */
+		ret = adm_source_tracking_alloc_map_memory();
+		if (ret != 0) {
+			ret = -EINVAL;
+			goto done;
+		}
+	}
+
+	port_id = afe_convert_virtual_to_portid(port_id);
+	p_idx = adm_validate_and_get_port_index(port_id);
+	if (p_idx < 0) {
+		pr_err("%s - invalid port index %i, port id %i, copp idx %i\n",
+			__func__, p_idx, port_id, copp_idx);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	admp.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	admp.hdr.pkt_size = sizeof(admp);
+	admp.hdr.src_svc = APR_SVC_ADM;
+	admp.hdr.src_domain = APR_DOMAIN_APPS;
+	admp.hdr.src_port = port_id;
+	admp.hdr.dest_svc = APR_SVC_ADM;
+	admp.hdr.dest_domain = APR_DOMAIN_ADSP;
+	admp.hdr.dest_port = atomic_read(&this_adm.copp.id[p_idx][copp_idx]);
+	admp.hdr.token = p_idx << 16 | ADM_CLIENT_ID_SOURCE_TRACKING << 8 |
+			 copp_idx;
+	admp.hdr.opcode = ADM_CMD_GET_PP_PARAMS_V5;
+	admp.data_payload_addr_lsw =
+		lower_32_bits(this_adm.sourceTrackingData.memmap.paddr);
+	admp.data_payload_addr_msw =
+		msm_audio_populate_upper_32_bits(
+				this_adm.sourceTrackingData.memmap.paddr);
+	admp.mem_map_handle = atomic_read(&this_adm.mem_map_handles[
+					  ADM_MEM_MAP_INDEX_SOURCE_TRACKING]);
+	admp.module_id = VOICEPROC_MODULE_ID_GENERIC_TX;
+	admp.param_id = VOICEPROC_PARAM_ID_FLUENCE_SOURCETRACKING;
+	admp.param_max_size = sizeof(struct adm_param_fluence_sourcetracking_t)
+				+ sizeof(struct adm_param_data_v5);
+	admp.reserved = 0;
+
+	atomic_set(&this_adm.copp.stat[p_idx][copp_idx], -1);
+
+	ret = apr_send_pkt(this_adm.apr, (uint32_t *)&admp);
+	if (ret < 0) {
+		pr_err("%s - failed to get Source Tracking Params\n",
+			__func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+	ret = wait_event_timeout(this_adm.copp.wait[p_idx][copp_idx],
+			atomic_read(&this_adm.copp.stat[p_idx][copp_idx]) >= 0,
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s - get params timed out\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	} else if (atomic_read(&this_adm.copp.stat
+				[p_idx][copp_idx]) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+			__func__, adsp_err_get_err_str(
+			atomic_read(&this_adm.copp.stat
+			[p_idx][copp_idx])));
+		ret = adsp_err_get_lnx_err_code(
+				atomic_read(&this_adm.copp.stat
+					[p_idx][copp_idx]));
+		goto done;
+	}
+
+	if (this_adm.sourceTrackingData.apr_cmd_status != 0) {
+		pr_err("%s - get params returned error [%s]\n",
+			__func__, adsp_err_get_err_str(
+			this_adm.sourceTrackingData.apr_cmd_status));
+
+		ret = adsp_err_get_lnx_err_code(
+				this_adm.sourceTrackingData.apr_cmd_status);
+		goto done;
+	}
+
+	source_tracking_params = (struct adm_param_fluence_sourcetracking_t *)
+			(this_adm.sourceTrackingData.memmap.kvaddr +
+			 sizeof(struct adm_param_data_v5));
+	for (i = 0; i < MAX_SECTORS; i++) {
+		sourceTrackingData->vad[i] = source_tracking_params->vad[i];
+		pr_debug("%s: vad[%d] = %d\n",
+			  __func__, i, sourceTrackingData->vad[i]);
+	}
+	sourceTrackingData->doa_speech = source_tracking_params->doa_speech;
+	pr_debug("%s: doa_speech = %d\n",
+		  __func__, sourceTrackingData->doa_speech);
+
+	for (i = 0; i < MAX_NOISE_SOURCE_INDICATORS; i++) {
+		sourceTrackingData->doa_noise[i] =
+					source_tracking_params->doa_noise[i];
+		pr_debug("%s: doa_noise[%d] = %d\n",
+			  __func__, i, sourceTrackingData->doa_noise[i]);
+	}
+	for (i = 0; i < MAX_POLAR_ACTIVITY_INDICATORS; i++) {
+		sourceTrackingData->polar_activity[i] =
+				source_tracking_params->polar_activity[i];
+		pr_debug("%s: polar_activity[%d] = %d\n",
+			  __func__, i, sourceTrackingData->polar_activity[i]);
+	}
+
+	ret = 0;
+
+done:
+	pr_debug("%s: Exit, ret=%d\n", __func__, ret);
+
+	return ret;
+}
+
+static int __init adm_init(void)
+{
+	int i = 0, j;
+	this_adm.apr = NULL;
+	this_adm.ec_ref_rx = -1;
+	this_adm.num_ec_ref_rx_chans = 0;
+	this_adm.ec_ref_rx_bit_width = 0;
+	this_adm.ec_ref_rx_sampling_rate = 0;
+	atomic_set(&this_adm.matrix_map_stat, 0);
+	init_waitqueue_head(&this_adm.matrix_map_wait);
+	atomic_set(&this_adm.adm_stat, 0);
+	init_waitqueue_head(&this_adm.adm_wait);
+
+	for (i = 0; i < AFE_MAX_PORTS; i++) {
+		for (j = 0; j < MAX_COPPS_PER_PORT; j++) {
+			atomic_set(&this_adm.copp.id[i][j], RESET_COPP_ID);
+			atomic_set(&this_adm.copp.cnt[i][j], 0);
+			atomic_set(&this_adm.copp.topology[i][j], 0);
+			atomic_set(&this_adm.copp.mode[i][j], 0);
+			atomic_set(&this_adm.copp.stat[i][j], 0);
+			atomic_set(&this_adm.copp.rate[i][j], 0);
+			atomic_set(&this_adm.copp.channels[i][j], 0);
+			atomic_set(&this_adm.copp.bit_width[i][j], 0);
+			atomic_set(&this_adm.copp.app_type[i][j], 0);
+			atomic_set(&this_adm.copp.acdb_id[i][j], 0);
+			init_waitqueue_head(&this_adm.copp.wait[i][j]);
+			atomic_set(&this_adm.copp.adm_delay_stat[i][j], 0);
+			init_waitqueue_head(
+				&this_adm.copp.adm_delay_wait[i][j]);
+			atomic_set(&this_adm.copp.topology[i][j], 0);
+			this_adm.copp.adm_delay[i][j] = 0;
+			this_adm.copp.adm_status[i][j] = 0;
+		}
+	}
+
+	if (adm_init_cal_data())
+		pr_err("%s: could not init cal data!\n", __func__);
+
+	this_adm.sourceTrackingData.ion_client = NULL;
+	this_adm.sourceTrackingData.ion_handle = NULL;
+	this_adm.sourceTrackingData.memmap.size = 0;
+	this_adm.sourceTrackingData.memmap.kvaddr = NULL;
+	this_adm.sourceTrackingData.memmap.paddr = 0;
+	this_adm.sourceTrackingData.apr_cmd_status = -1;
+	atomic_set(&this_adm.mem_map_handles[ADM_MEM_MAP_INDEX_SOURCE_TRACKING],
+		   0);
+
+	return 0;
+}
+
+static void __exit adm_exit(void)
+{
+	adm_delete_cal_data();
+}
+
+device_initcall(adm_init);
+module_exit(adm_exit);
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/q6afe.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/q6afe.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/q6afe.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/q6afe.c	2019-10-29 09:26:26.161227819 +0100
@@ -0,0 +1,7177 @@
+/* Copyright (c) 2012-2017, 2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/wakelock.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/msm_audio_ion.h>
+#include <linux/delay.h>
+#include <sound/apr_audio-v2.h>
+#include <sound/q6afe-v2.h>
+#include <sound/q6audio-v2.h>
+#include "msm-pcm-routing-v2.h"
+#include <sound/audio_cal_utils.h>
+#include <sound/adsp_err.h>
+#include <linux/qdsp6v2/apr_tal.h>
+
+#define WAKELOCK_TIMEOUT	5000
+enum {
+	AFE_COMMON_RX_CAL = 0,
+	AFE_COMMON_TX_CAL,
+	AFE_LSM_TX_CAL,
+	AFE_AANC_CAL,
+	AFE_FB_SPKR_PROT_CAL,
+	AFE_HW_DELAY_CAL,
+	AFE_SIDETONE_CAL,
+	AFE_SIDETONE_IIR_CAL,
+	AFE_TOPOLOGY_CAL,
+	AFE_LSM_TOPOLOGY_CAL,
+	AFE_CUST_TOPOLOGY_CAL,
+	AFE_FB_SPKR_PROT_TH_VI_CAL,
+	AFE_FB_SPKR_PROT_EX_VI_CAL,
+	MAX_AFE_CAL_TYPES
+};
+
+enum fbsp_state {
+	FBSP_INCORRECT_OP_MODE,
+	FBSP_INACTIVE,
+	FBSP_WARMUP,
+	FBSP_IN_PROGRESS,
+	FBSP_SUCCESS,
+	FBSP_FAILED,
+	MAX_FBSP_STATE
+};
+
+static char fbsp_state[MAX_FBSP_STATE][50] = {
+	[FBSP_INCORRECT_OP_MODE] = "incorrect operation mode",
+	[FBSP_INACTIVE] = "port not started",
+	[FBSP_WARMUP] = "waiting for warmup",
+	[FBSP_IN_PROGRESS] = "in progress state",
+	[FBSP_SUCCESS] = "success",
+	[FBSP_FAILED] = "failed"
+};
+
+enum {
+	USE_CALIBRATED_R0TO,
+	USE_SAFE_R0TO
+};
+
+enum {
+	QUICK_CALIB_DISABLE,
+	QUICK_CALIB_ENABLE
+};
+
+enum {
+	Q6AFE_MSM_SPKR_PROCESSING = 0,
+	Q6AFE_MSM_SPKR_CALIBRATION,
+	Q6AFE_MSM_SPKR_FTM_MODE
+};
+
+struct wlock {
+	struct wakeup_source ws;
+};
+
+static struct wlock wl;
+
+struct afe_ctl {
+	void *apr;
+	atomic_t state;
+	atomic_t status;
+	wait_queue_head_t wait[AFE_MAX_PORTS];
+	struct task_struct *task;
+	void (*tx_cb) (uint32_t opcode,
+		uint32_t token, uint32_t *payload, void *priv);
+	void (*rx_cb) (uint32_t opcode,
+		uint32_t token, uint32_t *payload, void *priv);
+	void *tx_private_data;
+	void *rx_private_data;
+	uint32_t mmap_handle;
+
+	int	topology[AFE_MAX_PORTS];
+	struct cal_type_data *cal_data[MAX_AFE_CAL_TYPES];
+
+	atomic_t mem_map_cal_handles[MAX_AFE_CAL_TYPES];
+	atomic_t mem_map_cal_index;
+	u32 afe_cal_mode[AFE_MAX_PORTS];
+
+	u16 dtmf_gen_rx_portid;
+	struct audio_cal_info_spk_prot_cfg	prot_cfg;
+	struct afe_spkr_prot_calib_get_resp	calib_data;
+	struct audio_cal_info_sp_th_vi_ftm_cfg	th_ftm_cfg;
+	struct audio_cal_info_sp_ex_vi_ftm_cfg	ex_ftm_cfg;
+	struct afe_sp_th_vi_get_param_resp	th_vi_resp;
+	struct afe_sp_ex_vi_get_param_resp	ex_vi_resp;
+	struct afe_av_dev_drift_get_param_resp	av_dev_drift_resp;
+	int vi_tx_port;
+	int vi_rx_port;
+	uint32_t afe_sample_rates[AFE_MAX_PORTS];
+	struct aanc_data aanc_info;
+	struct mutex afe_cmd_lock;
+	int set_custom_topology;
+	int dev_acdb_id[AFE_MAX_PORTS];
+	routing_cb rt_cb;
+};
+
+static atomic_t afe_ports_mad_type[SLIMBUS_PORT_LAST - SLIMBUS_0_RX];
+static unsigned long afe_configured_cmd;
+
+static struct afe_ctl this_afe;
+
+#define TIMEOUT_MS 1000
+#define Q6AFE_MAX_VOLUME 0x3FFF
+
+static int pcm_afe_instance[2];
+static int proxy_afe_instance[2];
+bool afe_close_done[2] = {true, true};
+
+#define SIZEOF_CFG_CMD(y) \
+		(sizeof(struct apr_hdr) + sizeof(u16) + (sizeof(struct y)))
+
+static int afe_get_cal_hw_delay(int32_t path,
+				struct audio_cal_hw_delay_entry *entry);
+static int remap_cal_data(struct cal_block_data *cal_block, int cal_index);
+
+int afe_get_topology(int port_id)
+{
+	int topology;
+	int port_index = afe_get_port_index(port_id);
+
+	if ((port_index < 0) || (port_index >= AFE_MAX_PORTS)) {
+		pr_err("%s: Invalid port index %d\n", __func__, port_index);
+		topology = -EINVAL;
+		goto done;
+	}
+
+	topology = this_afe.topology[port_index];
+done:
+	return topology;
+}
+
+void afe_set_aanc_info(struct aanc_data *q6_aanc_info)
+{
+	this_afe.aanc_info.aanc_active = q6_aanc_info->aanc_active;
+	this_afe.aanc_info.aanc_rx_port = q6_aanc_info->aanc_rx_port;
+	this_afe.aanc_info.aanc_tx_port = q6_aanc_info->aanc_tx_port;
+
+	pr_debug("%s: aanc active is %d rx port is 0x%x, tx port is 0x%x\n",
+		__func__,
+		this_afe.aanc_info.aanc_active,
+		this_afe.aanc_info.aanc_rx_port,
+		this_afe.aanc_info.aanc_tx_port);
+}
+
+static void afe_callback_debug_print(struct apr_client_data *data)
+{
+	uint32_t *payload;
+	payload = data->payload;
+
+	if (data->payload_size >= 8)
+		pr_debug("%s: code = 0x%x PL#0[0x%x], PL#1[0x%x], size = %d\n",
+			__func__, data->opcode, payload[0], payload[1],
+			data->payload_size);
+	else if (data->payload_size >= 4)
+		pr_debug("%s: code = 0x%x PL#0[0x%x], size = %d\n",
+			__func__, data->opcode, payload[0],
+			data->payload_size);
+	else
+		pr_debug("%s: code = 0x%x, size = %d\n",
+			__func__, data->opcode, data->payload_size);
+}
+
+static void av_dev_drift_afe_cb_handler(uint32_t *payload,
+					uint32_t payload_size)
+{
+	u32 param_id;
+	struct afe_av_dev_drift_get_param_resp *resp =
+		(struct afe_av_dev_drift_get_param_resp *) payload;
+
+	if (!(&(resp->pdata))) {
+		pr_err("%s: Error: resp pdata is NULL\n", __func__);
+		return;
+	}
+
+	param_id = resp->pdata.param_id;
+	if (param_id == AFE_PARAM_ID_DEV_TIMING_STATS) {
+		if (payload_size < sizeof(this_afe.av_dev_drift_resp)) {
+			pr_err("%s: Error: received size %d, resp size %zu\n",
+				__func__, payload_size,
+				sizeof(this_afe.av_dev_drift_resp));
+			return;
+		}
+		memcpy(&this_afe.av_dev_drift_resp, payload,
+				sizeof(this_afe.av_dev_drift_resp));
+		if (!this_afe.av_dev_drift_resp.status) {
+			atomic_set(&this_afe.state, 0);
+		} else {
+			pr_debug("%s: av_dev_drift_resp status: %d", __func__,
+				  this_afe.av_dev_drift_resp.status);
+			atomic_set(&this_afe.state, -1);
+		}
+	}
+}
+
+static int32_t sp_make_afe_callback(uint32_t *payload, uint32_t payload_size)
+{
+	u32 param_id;
+	struct afe_spkr_prot_calib_get_resp *resp =
+		(struct afe_spkr_prot_calib_get_resp *) payload;
+
+	if (!(&(resp->pdata))) {
+		pr_err("%s: Error: resp pdata is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	param_id = resp->pdata.param_id;
+	if (param_id == AFE_PARAM_ID_CALIB_RES_CFG_V2) {
+		if (payload_size < sizeof(this_afe.calib_data)) {
+			pr_err("%s: Error: received size %d, calib_data size %zu\n",
+				__func__, payload_size,
+				sizeof(this_afe.calib_data));
+			return -EINVAL;
+		}
+		memcpy(&this_afe.calib_data, payload,
+			sizeof(this_afe.calib_data));
+		if (!this_afe.calib_data.status) {
+			atomic_set(&this_afe.state, 0);
+		} else {
+			pr_debug("%s: calib resp status: %d", __func__,
+				  this_afe.calib_data.status);
+			atomic_set(&this_afe.state, -1);
+		}
+	}
+	if (param_id == AFE_PARAM_ID_SP_V2_TH_VI_FTM_PARAMS) {
+		if (payload_size < sizeof(this_afe.th_vi_resp)) {
+			pr_err("%s: Error: received size %d, th_vi_resp size %zu\n",
+				__func__, payload_size,
+				sizeof(this_afe.th_vi_resp));
+			return -EINVAL;
+		}
+		memcpy(&this_afe.th_vi_resp, payload,
+			sizeof(this_afe.th_vi_resp));
+		if (!this_afe.th_vi_resp.status) {
+			atomic_set(&this_afe.state, 0);
+		} else {
+			pr_debug("%s: th vi resp status: %d", __func__,
+				  this_afe.th_vi_resp.status);
+			atomic_set(&this_afe.state, -1);
+		}
+	}
+	if (param_id == AFE_PARAM_ID_SP_V2_EX_VI_FTM_PARAMS) {
+		if (payload_size < sizeof(this_afe.ex_vi_resp)) {
+			pr_err("%s: Error: received size %d, ex_vi_resp size %zu\n",
+				__func__, payload_size,
+				sizeof(this_afe.ex_vi_resp));
+			return -EINVAL;
+		}
+		memcpy(&this_afe.ex_vi_resp, payload,
+			sizeof(this_afe.ex_vi_resp));
+		if (!this_afe.ex_vi_resp.status) {
+			atomic_set(&this_afe.state, 0);
+		} else {
+			pr_debug("%s: ex vi resp status: %d", __func__,
+				  this_afe.ex_vi_resp.status);
+			atomic_set(&this_afe.state, -1);
+		}
+	}
+
+	return 0;
+}
+
+static bool afe_token_is_valid(uint32_t token)
+{
+	if (token >= AFE_MAX_PORTS) {
+		pr_err("%s: token %d is invalid.\n", __func__, token);
+		return false;
+	}
+	return true;
+}
+
+static int32_t afe_callback(struct apr_client_data *data, void *priv)
+{
+	if (!data) {
+		pr_err("%s: Invalid param data\n", __func__);
+		return -EINVAL;
+	}
+	if (data->opcode == RESET_EVENTS) {
+		pr_debug("%s: reset event = %d %d apr[%pK]\n",
+			__func__,
+			data->reset_event, data->reset_proc, this_afe.apr);
+
+		cal_utils_clear_cal_block_q6maps(MAX_AFE_CAL_TYPES,
+			this_afe.cal_data);
+
+		/* Reset the custom topology mode: to resend again to AFE. */
+		mutex_lock(&this_afe.cal_data[AFE_CUST_TOPOLOGY_CAL]->lock);
+		this_afe.set_custom_topology = 1;
+		mutex_unlock(&this_afe.cal_data[AFE_CUST_TOPOLOGY_CAL]->lock);
+		rtac_clear_mapping(AFE_RTAC_CAL);
+
+		if (this_afe.apr) {
+			apr_reset(this_afe.apr);
+			atomic_set(&this_afe.state, 0);
+			this_afe.apr = NULL;
+			rtac_set_afe_handle(this_afe.apr);
+		}
+		/* send info to user */
+		if (this_afe.task == NULL)
+			this_afe.task = current;
+		pr_debug("%s: task_name = %s pid = %d\n",
+			__func__,
+			this_afe.task->comm, this_afe.task->pid);
+
+		/*
+		 * Pass reset events to proxy driver, if cb is registered
+		 */
+		if (this_afe.tx_cb) {
+			this_afe.tx_cb(data->opcode, data->token,
+					data->payload,
+					this_afe.tx_private_data);
+			this_afe.tx_cb = NULL;
+		}
+		if (this_afe.rx_cb) {
+			this_afe.rx_cb(data->opcode, data->token,
+					data->payload,
+					this_afe.rx_private_data);
+			this_afe.rx_cb = NULL;
+		}
+
+		return 0;
+	}
+	afe_callback_debug_print(data);
+	if (data->opcode == AFE_PORT_CMDRSP_GET_PARAM_V2) {
+		uint32_t *payload = data->payload;
+
+		if (!payload || (data->token >= AFE_MAX_PORTS)) {
+			pr_err("%s: Error: size %d payload %pK token %d\n",
+				__func__, data->payload_size,
+				payload, data->token);
+			return -EINVAL;
+		}
+
+		if (payload[2] == AFE_PARAM_ID_DEV_TIMING_STATS) {
+			av_dev_drift_afe_cb_handler(data->payload,
+						    data->payload_size);
+		} else {
+			if (rtac_make_afe_callback(data->payload,
+						   data->payload_size))
+				return 0;
+
+			if (sp_make_afe_callback(data->payload,
+						 data->payload_size))
+				return -EINVAL;
+		}
+		wake_up(&this_afe.wait[data->token]);
+	} else if (data->payload_size) {
+		uint32_t *payload;
+		uint16_t port_id = 0;
+		payload = data->payload;
+		if (data->opcode == APR_BASIC_RSP_RESULT) {
+			pr_debug("%s:opcode = 0x%x cmd = 0x%x status = 0x%x token=%d\n",
+				__func__, data->opcode,
+				payload[0], payload[1], data->token);
+			/* payload[1] contains the error status for response */
+			if (payload[1] != 0) {
+				atomic_set(&this_afe.status, payload[1]);
+				pr_err("%s: cmd = 0x%x returned error = 0x%x\n",
+					__func__, payload[0], payload[1]);
+			}
+			switch (payload[0]) {
+			case AFE_PORT_CMD_SET_PARAM_V2:
+				if (rtac_make_afe_callback(payload,
+					data->payload_size))
+					return 0;
+			case AFE_PORT_CMD_DEVICE_STOP:
+			case AFE_PORT_CMD_DEVICE_START:
+			case AFE_PSEUDOPORT_CMD_START:
+			case AFE_PSEUDOPORT_CMD_STOP:
+			case AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS:
+			case AFE_SERVICE_CMD_SHARED_MEM_UNMAP_REGIONS:
+			case AFE_SERVICE_CMD_UNREGISTER_RT_PORT_DRIVER:
+			case AFE_PORTS_CMD_DTMF_CTL:
+			case AFE_SVC_CMD_SET_PARAM:
+				atomic_set(&this_afe.state, 0);
+				if (afe_token_is_valid(data->token))
+					wake_up(&this_afe.wait[data->token]);
+				else
+					return -EINVAL;
+				break;
+			case AFE_SERVICE_CMD_REGISTER_RT_PORT_DRIVER:
+				break;
+			case AFE_PORT_DATA_CMD_RT_PROXY_PORT_WRITE_V2:
+				port_id = RT_PROXY_PORT_001_TX;
+				break;
+			case AFE_PORT_DATA_CMD_RT_PROXY_PORT_READ_V2:
+				port_id = RT_PROXY_PORT_001_RX;
+				break;
+			case AFE_CMD_ADD_TOPOLOGIES:
+				atomic_set(&this_afe.state, 0);
+				if (afe_token_is_valid(data->token))
+					wake_up(&this_afe.wait[data->token]);
+				else
+					return -EINVAL;
+				pr_debug("%s: AFE_CMD_ADD_TOPOLOGIES cmd 0x%x\n",
+						__func__, payload[1]);
+				break;
+			default:
+				pr_err("%s: Unknown cmd 0x%x\n", __func__,
+						payload[0]);
+				break;
+			}
+		} else if (data->opcode ==
+				AFE_SERVICE_CMDRSP_SHARED_MEM_MAP_REGIONS) {
+			pr_debug("%s: mmap_handle: 0x%x, cal index %d\n",
+				 __func__, payload[0],
+				 atomic_read(&this_afe.mem_map_cal_index));
+			if (atomic_read(&this_afe.mem_map_cal_index) != -1)
+				atomic_set(&this_afe.mem_map_cal_handles[
+					atomic_read(
+					&this_afe.mem_map_cal_index)],
+					(uint32_t)payload[0]);
+			else
+				this_afe.mmap_handle = payload[0];
+			atomic_set(&this_afe.state, 0);
+			if (afe_token_is_valid(data->token))
+				wake_up(&this_afe.wait[data->token]);
+			else
+				return -EINVAL;
+		} else if (data->opcode == AFE_EVENT_RT_PROXY_PORT_STATUS) {
+			port_id = (uint16_t)(0x0000FFFF & payload[0]);
+		}
+		pr_debug("%s: port_id = 0x%x\n", __func__, port_id);
+		switch (port_id) {
+		case RT_PROXY_PORT_001_TX: {
+			if (this_afe.tx_cb) {
+				this_afe.tx_cb(data->opcode, data->token,
+					data->payload,
+					this_afe.tx_private_data);
+			}
+			break;
+		}
+		case RT_PROXY_PORT_001_RX: {
+			if (this_afe.rx_cb) {
+				this_afe.rx_cb(data->opcode, data->token,
+					data->payload,
+					this_afe.rx_private_data);
+			}
+			break;
+		}
+		default:
+			pr_debug("%s: default case 0x%x\n", __func__, port_id);
+			break;
+		}
+	}
+	return 0;
+}
+
+int afe_get_port_type(u16 port_id)
+{
+	int ret;
+
+	switch (port_id) {
+	case PRIMARY_I2S_RX:
+	case SECONDARY_I2S_RX:
+	case MI2S_RX:
+	case HDMI_RX:
+	case DISPLAY_PORT_RX:
+	case AFE_PORT_ID_SPDIF_RX:
+	case SLIMBUS_0_RX:
+	case SLIMBUS_1_RX:
+	case SLIMBUS_2_RX:
+	case SLIMBUS_3_RX:
+	case SLIMBUS_4_RX:
+	case SLIMBUS_5_RX:
+	case SLIMBUS_6_RX:
+	case SLIMBUS_7_RX:
+	case SLIMBUS_8_RX:
+	case INT_BT_SCO_RX:
+	case INT_BT_A2DP_RX:
+	case INT_FM_RX:
+	case VOICE_PLAYBACK_TX:
+	case VOICE2_PLAYBACK_TX:
+	case RT_PROXY_PORT_001_RX:
+	case AUDIO_PORT_ID_I2S_RX:
+	case AFE_PORT_ID_PRIMARY_MI2S_RX:
+	case AFE_PORT_ID_SECONDARY_MI2S_RX:
+	case AFE_PORT_ID_SECONDARY_MI2S_RX_SD1:
+	case AFE_PORT_ID_TERTIARY_MI2S_RX:
+	case AFE_PORT_ID_QUATERNARY_MI2S_RX:
+	case AFE_PORT_ID_QUINARY_MI2S_RX:
+	case AFE_PORT_ID_PRIMARY_PCM_RX:
+	case AFE_PORT_ID_SECONDARY_PCM_RX:
+	case AFE_PORT_ID_TERTIARY_PCM_RX:
+	case AFE_PORT_ID_QUATERNARY_PCM_RX:
+	case AFE_PORT_ID_PRIMARY_TDM_RX:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_1:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_2:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_3:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_4:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_5:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_6:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_7:
+	case AFE_PORT_ID_SECONDARY_TDM_RX:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_1:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_2:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_3:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_4:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_5:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_6:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_7:
+	case AFE_PORT_ID_TERTIARY_TDM_RX:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_1:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_2:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_3:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_4:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_5:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_6:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_7:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_1:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_2:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_3:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_4:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_5:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_6:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_7:
+	case AFE_PORT_ID_USB_RX:
+	case AFE_PORT_ID_INT0_MI2S_RX:
+	case AFE_PORT_ID_INT1_MI2S_RX:
+	case AFE_PORT_ID_INT2_MI2S_RX:
+	case AFE_PORT_ID_INT3_MI2S_RX:
+	case AFE_PORT_ID_INT4_MI2S_RX:
+	case AFE_PORT_ID_INT5_MI2S_RX:
+	case AFE_PORT_ID_INT6_MI2S_RX:
+		ret = MSM_AFE_PORT_TYPE_RX;
+		break;
+
+	case PRIMARY_I2S_TX:
+	case SECONDARY_I2S_TX:
+	case MI2S_TX:
+	case DIGI_MIC_TX:
+	case VOICE_RECORD_TX:
+	case SLIMBUS_0_TX:
+	case SLIMBUS_1_TX:
+	case SLIMBUS_2_TX:
+	case SLIMBUS_3_TX:
+	case SLIMBUS_4_TX:
+	case SLIMBUS_5_TX:
+	case SLIMBUS_6_TX:
+	case SLIMBUS_7_TX:
+	case SLIMBUS_8_TX:
+	case INT_FM_TX:
+	case VOICE_RECORD_RX:
+	case INT_BT_SCO_TX:
+	case RT_PROXY_PORT_001_TX:
+	case AFE_PORT_ID_PRIMARY_MI2S_TX:
+	case AFE_PORT_ID_SECONDARY_MI2S_TX:
+	case AFE_PORT_ID_TERTIARY_MI2S_TX:
+	case AFE_PORT_ID_QUATERNARY_MI2S_TX:
+	case AFE_PORT_ID_QUINARY_MI2S_TX:
+	case AFE_PORT_ID_SENARY_MI2S_TX:
+	case AFE_PORT_ID_PRIMARY_PCM_TX:
+	case AFE_PORT_ID_SECONDARY_PCM_TX:
+	case AFE_PORT_ID_TERTIARY_PCM_TX:
+	case AFE_PORT_ID_QUATERNARY_PCM_TX:
+	case AFE_PORT_ID_PRIMARY_TDM_TX:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_1:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_2:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_3:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_4:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_5:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_6:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_7:
+	case AFE_PORT_ID_SECONDARY_TDM_TX:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_1:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_2:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_3:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_4:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_5:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_6:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_7:
+	case AFE_PORT_ID_TERTIARY_TDM_TX:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_1:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_2:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_3:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_4:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_5:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_6:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_7:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_1:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_2:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_3:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_4:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_5:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_6:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_7:
+	case AFE_PORT_ID_USB_TX:
+	case AFE_PORT_ID_INT0_MI2S_TX:
+	case AFE_PORT_ID_INT1_MI2S_TX:
+	case AFE_PORT_ID_INT2_MI2S_TX:
+	case AFE_PORT_ID_INT3_MI2S_TX:
+	case AFE_PORT_ID_INT4_MI2S_TX:
+	case AFE_PORT_ID_INT5_MI2S_TX:
+	case AFE_PORT_ID_INT6_MI2S_TX:
+		ret = MSM_AFE_PORT_TYPE_TX;
+		break;
+
+	default:
+		WARN_ON(1);
+		pr_err("%s: Invalid port id = 0x%x\n",
+			__func__, port_id);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+int afe_sizeof_cfg_cmd(u16 port_id)
+{
+	int ret_size;
+	switch (port_id) {
+	case PRIMARY_I2S_RX:
+	case PRIMARY_I2S_TX:
+	case SECONDARY_I2S_RX:
+	case SECONDARY_I2S_TX:
+	case MI2S_RX:
+	case MI2S_TX:
+	case AFE_PORT_ID_PRIMARY_MI2S_RX:
+	case AFE_PORT_ID_PRIMARY_MI2S_TX:
+	case AFE_PORT_ID_QUATERNARY_MI2S_RX:
+	case AFE_PORT_ID_QUATERNARY_MI2S_TX:
+	case AFE_PORT_ID_QUINARY_MI2S_RX:
+	case AFE_PORT_ID_QUINARY_MI2S_TX:
+		ret_size = SIZEOF_CFG_CMD(afe_param_id_i2s_cfg);
+		break;
+	case HDMI_RX:
+	case DISPLAY_PORT_RX:
+		ret_size =
+		SIZEOF_CFG_CMD(afe_param_id_hdmi_multi_chan_audio_cfg);
+		break;
+	case SLIMBUS_0_RX:
+	case SLIMBUS_0_TX:
+	case SLIMBUS_1_RX:
+	case SLIMBUS_1_TX:
+	case SLIMBUS_2_RX:
+	case SLIMBUS_2_TX:
+	case SLIMBUS_3_RX:
+	case SLIMBUS_3_TX:
+	case SLIMBUS_4_RX:
+	case SLIMBUS_4_TX:
+	case SLIMBUS_5_RX:
+	case SLIMBUS_5_TX:
+	case SLIMBUS_6_RX:
+	case SLIMBUS_6_TX:
+	case SLIMBUS_7_RX:
+	case SLIMBUS_7_TX:
+	case SLIMBUS_8_RX:
+	case SLIMBUS_8_TX:
+		ret_size = SIZEOF_CFG_CMD(afe_param_id_slimbus_cfg);
+		break;
+	case VOICE_PLAYBACK_TX:
+	case VOICE2_PLAYBACK_TX:
+	case VOICE_RECORD_RX:
+	case VOICE_RECORD_TX:
+		ret_size = SIZEOF_CFG_CMD(afe_param_id_pseudo_port_cfg);
+		break;
+	case RT_PROXY_PORT_001_RX:
+	case RT_PROXY_PORT_001_TX:
+		ret_size = SIZEOF_CFG_CMD(afe_param_id_rt_proxy_port_cfg);
+		break;
+	case AFE_PORT_ID_USB_RX:
+	case AFE_PORT_ID_USB_TX:
+		ret_size = SIZEOF_CFG_CMD(afe_param_id_usb_audio_cfg);
+		break;
+	case AFE_PORT_ID_PRIMARY_PCM_RX:
+	case AFE_PORT_ID_PRIMARY_PCM_TX:
+	case AFE_PORT_ID_SECONDARY_PCM_RX:
+	case AFE_PORT_ID_SECONDARY_PCM_TX:
+	case AFE_PORT_ID_TERTIARY_PCM_RX:
+	case AFE_PORT_ID_TERTIARY_PCM_TX:
+	case AFE_PORT_ID_QUATERNARY_PCM_RX:
+	case AFE_PORT_ID_QUATERNARY_PCM_TX:
+	default:
+		pr_debug("%s: default case 0x%x\n", __func__, port_id);
+		ret_size = SIZEOF_CFG_CMD(afe_param_id_pcm_cfg);
+		break;
+	}
+	return ret_size;
+}
+
+int afe_q6_interface_prepare(void)
+{
+	int ret = 0;
+
+	pr_debug("%s:\n", __func__);
+
+	if (this_afe.apr == NULL) {
+		this_afe.apr = apr_register("ADSP", "AFE", afe_callback,
+			0xFFFFFFFF, &this_afe);
+		if (this_afe.apr == NULL) {
+			pr_err("%s: Unable to register AFE\n", __func__);
+			ret = -ENODEV;
+		}
+		rtac_set_afe_handle(this_afe.apr);
+	}
+	return ret;
+}
+
+/*
+ * afe_apr_send_pkt : returns 0 on success, negative otherwise.
+ */
+static int afe_apr_send_pkt(void *data, wait_queue_head_t *wait)
+{
+	int ret;
+
+	if (wait)
+		atomic_set(&this_afe.state, 1);
+	atomic_set(&this_afe.status, 0);
+	ret = apr_send_pkt(this_afe.apr, data);
+	if (ret > 0) {
+		if (wait) {
+			ret = wait_event_timeout(*wait,
+					(atomic_read(&this_afe.state) == 0),
+					msecs_to_jiffies(TIMEOUT_MS));
+			if (!ret) {
+				ret = -ETIMEDOUT;
+			} else if (atomic_read(&this_afe.status) > 0) {
+				pr_err("%s: DSP returned error[%s]\n", __func__,
+					adsp_err_get_err_str(atomic_read(
+					&this_afe.status)));
+				ret = adsp_err_get_lnx_err_code(
+						atomic_read(&this_afe.status));
+			} else {
+				ret = 0;
+			}
+		} else {
+			ret = 0;
+		}
+	} else if (ret == 0) {
+		pr_err("%s: packet not transmitted\n", __func__);
+		/* apr_send_pkt can return 0 when nothing is transmitted */
+		ret = -EINVAL;
+	}
+
+	pr_debug("%s: leave %d\n", __func__, ret);
+	return ret;
+}
+
+static int afe_send_cal_block(u16 port_id, struct cal_block_data *cal_block)
+{
+	int						result = 0;
+	int						index = 0;
+	struct afe_audioif_config_command_no_payload	afe_cal;
+
+	if (!cal_block) {
+		pr_debug("%s: No AFE cal to send!\n", __func__);
+		result = -EINVAL;
+		goto done;
+	}
+	if (cal_block->cal_data.size <= 0) {
+		pr_debug("%s: AFE cal has invalid size!\n", __func__);
+		result = -EINVAL;
+		goto done;
+	}
+
+	index = q6audio_get_port_index(port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		result = -EINVAL;
+		goto done;
+	}
+
+	afe_cal.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	afe_cal.hdr.pkt_size = sizeof(afe_cal);
+	afe_cal.hdr.src_port = 0;
+	afe_cal.hdr.dest_port = 0;
+	afe_cal.hdr.token = index;
+	afe_cal.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+	afe_cal.param.port_id = port_id;
+	afe_cal.param.payload_size = cal_block->cal_data.size;
+	afe_cal.param.payload_address_lsw =
+		lower_32_bits(cal_block->cal_data.paddr);
+	afe_cal.param.payload_address_msw =
+		msm_audio_populate_upper_32_bits(cal_block->cal_data.paddr);
+	afe_cal.param.mem_map_handle = cal_block->map_data.q6map_handle;
+
+	pr_debug("%s: AFE cal sent for device port = 0x%x, cal size = %zd, cal addr = 0x%pK\n",
+		__func__, port_id,
+		cal_block->cal_data.size, &cal_block->cal_data.paddr);
+
+	result = afe_apr_send_pkt(&afe_cal, &this_afe.wait[index]);
+	if (result)
+		pr_err("%s: AFE cal for port 0x%x failed %d\n",
+		       __func__, port_id, result);
+
+done:
+	return result;
+}
+
+
+static int afe_send_custom_topology_block(struct cal_block_data *cal_block)
+{
+	int	result = 0;
+	int	index = 0;
+	struct cmd_set_topologies afe_cal;
+
+	if (!cal_block) {
+		pr_err("%s: No AFE SVC cal to send!\n", __func__);
+		return -EINVAL;
+	}
+	if (cal_block->cal_data.size <= 0) {
+		pr_err("%s: AFE SVC cal has invalid size: %zd!\n",
+		__func__, cal_block->cal_data.size);
+		return -EINVAL;
+	}
+
+	afe_cal.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	afe_cal.hdr.pkt_size = sizeof(afe_cal);
+	afe_cal.hdr.src_port = 0;
+	afe_cal.hdr.dest_port = 0;
+	afe_cal.hdr.token = index;
+	afe_cal.hdr.opcode = AFE_CMD_ADD_TOPOLOGIES;
+
+	afe_cal.payload_size = cal_block->cal_data.size;
+	afe_cal.payload_addr_lsw =
+		lower_32_bits(cal_block->cal_data.paddr);
+	afe_cal.payload_addr_msw =
+		msm_audio_populate_upper_32_bits(cal_block->cal_data.paddr);
+	afe_cal.mem_map_handle = cal_block->map_data.q6map_handle;
+
+	pr_debug("%s:cmd_id:0x%x calsize:%zd memmap_hdl:0x%x caladdr:0x%pK",
+		__func__, AFE_CMD_ADD_TOPOLOGIES, cal_block->cal_data.size,
+		afe_cal.mem_map_handle, &cal_block->cal_data.paddr);
+
+	result = afe_apr_send_pkt(&afe_cal, &this_afe.wait[index]);
+	if (result)
+		pr_err("%s: AFE send topology for command 0x%x failed %d\n",
+		       __func__, AFE_CMD_ADD_TOPOLOGIES, result);
+
+	return result;
+}
+
+static void afe_send_custom_topology(void)
+{
+	struct cal_block_data   *cal_block = NULL;
+	int cal_index = AFE_CUST_TOPOLOGY_CAL;
+	int ret;
+
+	if (this_afe.cal_data[cal_index] == NULL) {
+		pr_err("%s: cal_index %d not allocated!\n",
+			__func__, cal_index);
+		return;
+	}
+	mutex_lock(&this_afe.cal_data[cal_index]->lock);
+
+	if (!this_afe.set_custom_topology)
+		goto unlock;
+	this_afe.set_custom_topology = 0;
+	cal_block = cal_utils_get_only_cal_block(this_afe.cal_data[cal_index]);
+	if (cal_block == NULL) {
+		pr_err("%s cal_block not found!!\n", __func__);
+		goto unlock;
+	}
+
+	pr_debug("%s: Sending cal_index cal %d\n", __func__, cal_index);
+
+	ret = remap_cal_data(cal_block, cal_index);
+	if (ret) {
+		pr_err("%s: Remap_cal_data failed for cal %d!\n",
+			__func__, cal_index);
+		goto unlock;
+	}
+	ret = afe_send_custom_topology_block(cal_block);
+	if (ret < 0) {
+		pr_err("%s: No cal sent for cal_index %d! ret %d\n",
+			__func__, cal_index, ret);
+		goto unlock;
+	}
+	pr_debug("%s:sent custom topology for AFE\n", __func__);
+unlock:
+	mutex_unlock(&this_afe.cal_data[cal_index]->lock);
+}
+
+static int afe_spk_ramp_dn_cfg(int port)
+{
+	int ret = -EINVAL;
+	int index = 0;
+	struct afe_spkr_prot_config_command config;
+
+	if (afe_get_port_type(port) != MSM_AFE_PORT_TYPE_RX) {
+		pr_debug("%s: port doesn't match 0x%x\n", __func__, port);
+		return 0;
+	}
+	if (this_afe.prot_cfg.mode == MSM_SPKR_PROT_DISABLED ||
+		(this_afe.vi_rx_port != port)) {
+		pr_debug("%s: spkr protection disabled port 0x%x %d 0x%x\n",
+				__func__, port, ret, this_afe.vi_rx_port);
+		return 0;
+	}
+	memset(&config, 0 , sizeof(config));
+	ret = q6audio_validate_port(port);
+	if (ret < 0) {
+		pr_err("%s: Invalid port 0x%x ret %d", __func__, port, ret);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	index = q6audio_get_port_index(port);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+			APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	config.hdr.pkt_size = sizeof(config);
+	config.hdr.src_port = 0;
+	config.hdr.dest_port = 0;
+	config.hdr.token = index;
+
+	config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+	config.param.port_id = q6audio_get_port_id(port);
+	config.param.payload_size =
+		sizeof(config) - sizeof(config.hdr) - sizeof(config.param)
+		- sizeof(config.prot_config);
+	config.pdata.module_id = AFE_MODULE_FB_SPKR_PROT_V2_RX;
+	config.pdata.param_id = AFE_PARAM_ID_FBSP_PTONE_RAMP_CFG;
+	config.pdata.param_size = 0;
+	atomic_set(&this_afe.state, 1);
+	atomic_set(&this_afe.status, 0);
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config);
+	if (ret < 0) {
+		pr_err("%s: port = 0x%x param = 0x%x failed %d\n",
+				__func__, port, config.pdata.param_id, ret);
+		goto fail_cmd;
+	}
+	ret = wait_event_timeout(this_afe.wait[index],
+			(atomic_read(&this_afe.state) == 0),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	if (atomic_read(&this_afe.status) > 0) {
+		pr_err("%s: config cmd failed [%s]\n",
+			__func__, adsp_err_get_err_str(
+			atomic_read(&this_afe.status)));
+		ret = adsp_err_get_lnx_err_code(
+				atomic_read(&this_afe.status));
+		goto fail_cmd;
+	}
+	/* dsp needs atleast 15ms to ramp down pilot tone*/
+	usleep_range(15000, 15010);
+	ret = 0;
+fail_cmd:
+	pr_debug("%s: config.pdata.param_id 0x%x status %d\n",
+		__func__, config.pdata.param_id, ret);
+return ret;
+}
+
+static int afe_spk_prot_prepare(int src_port, int dst_port, int param_id,
+		union afe_spkr_prot_config *prot_config)
+{
+	int ret = -EINVAL;
+	int index = 0;
+	struct afe_spkr_prot_config_command config;
+
+	memset(&config, 0 , sizeof(config));
+	if (!prot_config) {
+		pr_err("%s: Invalid params\n", __func__);
+		goto fail_cmd;
+	}
+	ret = q6audio_validate_port(src_port);
+	if (ret < 0) {
+		pr_err("%s: Invalid src port 0x%x ret %d",
+				__func__, src_port, ret);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	ret = q6audio_validate_port(dst_port);
+	if (ret < 0) {
+		pr_err("%s: Invalid dst port 0x%x ret %d", __func__,
+				dst_port, ret);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	index = q6audio_get_port_index(src_port);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	switch (param_id) {
+	case AFE_PARAM_ID_FBSP_MODE_RX_CFG:
+		config.pdata.module_id = AFE_MODULE_FB_SPKR_PROT_V2_RX;
+		break;
+	case AFE_PARAM_ID_FEEDBACK_PATH_CFG:
+		this_afe.vi_tx_port = src_port;
+		this_afe.vi_rx_port = dst_port;
+		config.pdata.module_id = AFE_MODULE_FEEDBACK;
+		break;
+	/*
+	 * AFE_PARAM_ID_SPKR_CALIB_VI_PROC_CFG_V2 is same as
+	 * AFE_PARAM_ID_SP_V2_TH_VI_MODE_CFG
+	 */
+	case AFE_PARAM_ID_SPKR_CALIB_VI_PROC_CFG_V2:
+	case AFE_PARAM_ID_SP_V2_TH_VI_FTM_CFG:
+		config.pdata.module_id = AFE_MODULE_SPEAKER_PROTECTION_V2_TH_VI;
+		break;
+	case AFE_PARAM_ID_SP_V2_EX_VI_MODE_CFG:
+	case AFE_PARAM_ID_SP_V2_EX_VI_FTM_CFG:
+		config.pdata.module_id = AFE_MODULE_SPEAKER_PROTECTION_V2_EX_VI;
+		break;
+	default:
+		pr_err("%s: default case 0x%x\n", __func__, param_id);
+		goto fail_cmd;
+		break;
+	}
+
+	config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	config.hdr.pkt_size = sizeof(config);
+	config.hdr.src_port = 0;
+	config.hdr.dest_port = 0;
+	config.hdr.token = index;
+
+	config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+	config.param.port_id = q6audio_get_port_id(src_port);
+	config.param.payload_size = sizeof(config) - sizeof(config.hdr)
+		- sizeof(config.param);
+	config.pdata.param_id = param_id;
+	config.pdata.param_size = sizeof(config.prot_config);
+	config.prot_config = *prot_config;
+	atomic_set(&this_afe.state, 1);
+	atomic_set(&this_afe.status, 0);
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config);
+	if (ret < 0) {
+		pr_err("%s: port = 0x%x param = 0x%x failed %d\n",
+		__func__, src_port, param_id, ret);
+		goto fail_cmd;
+	}
+	ret = wait_event_timeout(this_afe.wait[index],
+		(atomic_read(&this_afe.state) == 0),
+		msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	if (atomic_read(&this_afe.status) > 0) {
+		pr_err("%s: config cmd failed [%s]\n",
+			__func__, adsp_err_get_err_str(
+			atomic_read(&this_afe.status)));
+		ret = adsp_err_get_lnx_err_code(
+				atomic_read(&this_afe.status));
+		goto fail_cmd;
+	}
+	ret = 0;
+fail_cmd:
+	pr_debug("%s: config.pdata.param_id 0x%x status %d 0x%x\n",
+	__func__, config.pdata.param_id, ret, src_port);
+	return ret;
+}
+
+static void afe_send_cal_spkr_prot_tx(int port_id)
+{
+	union afe_spkr_prot_config afe_spk_config;
+
+	if (this_afe.cal_data[AFE_FB_SPKR_PROT_CAL] == NULL ||
+	    this_afe.cal_data[AFE_FB_SPKR_PROT_TH_VI_CAL] == NULL ||
+	    this_afe.cal_data[AFE_FB_SPKR_PROT_EX_VI_CAL] == NULL)
+		return;
+
+	mutex_lock(&this_afe.cal_data[AFE_FB_SPKR_PROT_CAL]->lock);
+	if ((this_afe.prot_cfg.mode != MSM_SPKR_PROT_DISABLED) &&
+		(this_afe.vi_tx_port == port_id)) {
+		if (this_afe.prot_cfg.mode ==
+			MSM_SPKR_PROT_CALIBRATION_IN_PROGRESS) {
+			afe_spk_config.vi_proc_cfg.operation_mode =
+					Q6AFE_MSM_SPKR_CALIBRATION;
+			afe_spk_config.vi_proc_cfg.quick_calib_flag =
+					this_afe.prot_cfg.quick_calib_flag;
+		} else {
+			afe_spk_config.vi_proc_cfg.operation_mode =
+					Q6AFE_MSM_SPKR_PROCESSING;
+		}
+
+		if (this_afe.th_ftm_cfg.mode == MSM_SPKR_PROT_IN_FTM_MODE)
+			afe_spk_config.vi_proc_cfg.operation_mode =
+					    Q6AFE_MSM_SPKR_FTM_MODE;
+		afe_spk_config.vi_proc_cfg.minor_version = 1;
+		afe_spk_config.vi_proc_cfg.r0_cali_q24[SP_V2_SPKR_1] =
+			(uint32_t) this_afe.prot_cfg.r0[SP_V2_SPKR_1];
+		afe_spk_config.vi_proc_cfg.r0_cali_q24[SP_V2_SPKR_2] =
+			(uint32_t) this_afe.prot_cfg.r0[SP_V2_SPKR_2];
+		afe_spk_config.vi_proc_cfg.t0_cali_q6[SP_V2_SPKR_1] =
+			(uint32_t) this_afe.prot_cfg.t0[SP_V2_SPKR_1];
+		afe_spk_config.vi_proc_cfg.t0_cali_q6[SP_V2_SPKR_2] =
+			(uint32_t) this_afe.prot_cfg.t0[SP_V2_SPKR_2];
+		if (this_afe.prot_cfg.mode != MSM_SPKR_PROT_NOT_CALIBRATED) {
+			struct asm_spkr_calib_vi_proc_cfg *vi_proc_cfg;
+			vi_proc_cfg = &afe_spk_config.vi_proc_cfg;
+			vi_proc_cfg->r0_t0_selection_flag[SP_V2_SPKR_1] =
+					    USE_CALIBRATED_R0TO;
+			vi_proc_cfg->r0_t0_selection_flag[SP_V2_SPKR_2] =
+					    USE_CALIBRATED_R0TO;
+		} else {
+			struct asm_spkr_calib_vi_proc_cfg *vi_proc_cfg;
+			vi_proc_cfg = &afe_spk_config.vi_proc_cfg;
+			vi_proc_cfg->r0_t0_selection_flag[SP_V2_SPKR_1] =
+							    USE_SAFE_R0TO;
+			vi_proc_cfg->r0_t0_selection_flag[SP_V2_SPKR_2] =
+							    USE_SAFE_R0TO;
+		}
+		if (afe_spk_prot_prepare(port_id, 0,
+			AFE_PARAM_ID_SPKR_CALIB_VI_PROC_CFG_V2,
+						    &afe_spk_config))
+				pr_err("%s: SPKR_CALIB_VI_PROC_CFG failed\n",
+					__func__);
+	}
+	mutex_unlock(&this_afe.cal_data[AFE_FB_SPKR_PROT_CAL]->lock);
+
+	mutex_lock(&this_afe.cal_data[AFE_FB_SPKR_PROT_TH_VI_CAL]->lock);
+	if ((this_afe.th_ftm_cfg.mode == MSM_SPKR_PROT_IN_FTM_MODE) &&
+	    (this_afe.vi_tx_port == port_id)) {
+		afe_spk_config.th_vi_ftm_cfg.minor_version = 1;
+		afe_spk_config.th_vi_ftm_cfg.wait_time_ms[SP_V2_SPKR_1] =
+			this_afe.th_ftm_cfg.wait_time[SP_V2_SPKR_1];
+		afe_spk_config.th_vi_ftm_cfg.wait_time_ms[SP_V2_SPKR_2] =
+			this_afe.th_ftm_cfg.wait_time[SP_V2_SPKR_2];
+		afe_spk_config.th_vi_ftm_cfg.ftm_time_ms[SP_V2_SPKR_1] =
+			this_afe.th_ftm_cfg.ftm_time[SP_V2_SPKR_1];
+		afe_spk_config.th_vi_ftm_cfg.ftm_time_ms[SP_V2_SPKR_2] =
+			this_afe.th_ftm_cfg.ftm_time[SP_V2_SPKR_2];
+
+		if (afe_spk_prot_prepare(port_id, 0,
+					 AFE_PARAM_ID_SP_V2_TH_VI_FTM_CFG,
+					 &afe_spk_config))
+			pr_err("%s: th vi ftm cfg failed\n", __func__);
+		this_afe.th_ftm_cfg.mode = MSM_SPKR_PROT_DISABLED;
+	}
+	mutex_unlock(&this_afe.cal_data[AFE_FB_SPKR_PROT_TH_VI_CAL]->lock);
+
+	mutex_lock(&this_afe.cal_data[AFE_FB_SPKR_PROT_EX_VI_CAL]->lock);
+	if ((this_afe.ex_ftm_cfg.mode == MSM_SPKR_PROT_IN_FTM_MODE) &&
+	    (this_afe.vi_tx_port == port_id)) {
+		afe_spk_config.ex_vi_mode_cfg.minor_version = 1;
+		afe_spk_config.ex_vi_mode_cfg.operation_mode =
+						Q6AFE_MSM_SPKR_FTM_MODE;
+		if (afe_spk_prot_prepare(port_id, 0,
+					 AFE_PARAM_ID_SP_V2_EX_VI_MODE_CFG,
+					 &afe_spk_config))
+			pr_err("%s: ex vi mode cfg failed\n", __func__);
+
+		afe_spk_config.ex_vi_ftm_cfg.minor_version = 1;
+		afe_spk_config.ex_vi_ftm_cfg.wait_time_ms[SP_V2_SPKR_1] =
+			this_afe.ex_ftm_cfg.wait_time[SP_V2_SPKR_1];
+		afe_spk_config.ex_vi_ftm_cfg.wait_time_ms[SP_V2_SPKR_2] =
+			this_afe.ex_ftm_cfg.wait_time[SP_V2_SPKR_2];
+		afe_spk_config.ex_vi_ftm_cfg.ftm_time_ms[SP_V2_SPKR_1] =
+			this_afe.ex_ftm_cfg.ftm_time[SP_V2_SPKR_1];
+		afe_spk_config.ex_vi_ftm_cfg.ftm_time_ms[SP_V2_SPKR_2] =
+			this_afe.ex_ftm_cfg.ftm_time[SP_V2_SPKR_2];
+
+		if (afe_spk_prot_prepare(port_id, 0,
+					 AFE_PARAM_ID_SP_V2_EX_VI_FTM_CFG,
+					 &afe_spk_config))
+			pr_err("%s: ex vi ftm cfg failed\n", __func__);
+		this_afe.ex_ftm_cfg.mode = MSM_SPKR_PROT_DISABLED;
+	}
+	mutex_unlock(&this_afe.cal_data[AFE_FB_SPKR_PROT_EX_VI_CAL]->lock);
+
+}
+
+static void afe_send_cal_spkr_prot_rx(int port_id)
+{
+	union afe_spkr_prot_config afe_spk_config;
+
+	if (this_afe.cal_data[AFE_FB_SPKR_PROT_CAL] == NULL)
+		goto done;
+
+	mutex_lock(&this_afe.cal_data[AFE_FB_SPKR_PROT_CAL]->lock);
+
+	if (this_afe.prot_cfg.mode != MSM_SPKR_PROT_DISABLED &&
+		(this_afe.vi_rx_port == port_id)) {
+		if (this_afe.prot_cfg.mode ==
+			MSM_SPKR_PROT_CALIBRATION_IN_PROGRESS)
+			afe_spk_config.mode_rx_cfg.mode =
+				Q6AFE_MSM_SPKR_CALIBRATION;
+		else
+			afe_spk_config.mode_rx_cfg.mode =
+				Q6AFE_MSM_SPKR_PROCESSING;
+		afe_spk_config.mode_rx_cfg.minor_version = 1;
+		if (afe_spk_prot_prepare(port_id, 0,
+			AFE_PARAM_ID_FBSP_MODE_RX_CFG,
+			&afe_spk_config))
+			pr_err("%s: RX MODE_VI_PROC_CFG failed\n",
+				   __func__);
+	}
+	mutex_unlock(&this_afe.cal_data[AFE_FB_SPKR_PROT_CAL]->lock);
+done:
+	return;
+}
+
+static int afe_send_hw_delay(u16 port_id, u32 rate)
+{
+	struct audio_cal_hw_delay_entry		delay_entry;
+	struct afe_audioif_config_command	config;
+	int index = 0;
+	int ret = -EINVAL;
+
+	pr_debug("%s:\n", __func__);
+
+	memset(&delay_entry, 0, sizeof(delay_entry));
+	delay_entry.sample_rate = rate;
+	if (afe_get_port_type(port_id) == MSM_AFE_PORT_TYPE_TX)
+		ret = afe_get_cal_hw_delay(TX_DEVICE, &delay_entry);
+	else if (afe_get_port_type(port_id) == MSM_AFE_PORT_TYPE_RX)
+		ret = afe_get_cal_hw_delay(RX_DEVICE, &delay_entry);
+
+	/*
+	 * HW delay is only used for IMS calls to sync audio with video
+	 * It is only needed for devices & sample rates used for IMS video
+	 * calls. Values are received from ACDB calbration files
+	 */
+	if (ret != 0) {
+		pr_debug("%s: debug: HW delay info not available %d\n",
+			__func__, ret);
+		goto fail_cmd;
+	}
+
+	index = q6audio_get_port_index(port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	config.hdr.pkt_size = sizeof(config);
+	config.hdr.src_port = 0;
+	config.hdr.dest_port = 0;
+	config.hdr.token = index;
+
+	config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+	config.param.port_id = q6audio_get_port_id(port_id);
+	config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
+				    sizeof(config.param);
+	config.param.payload_address_lsw = 0x00;
+	config.param.payload_address_msw = 0x00;
+	config.param.mem_map_handle = 0x00;
+	config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+	config.pdata.param_id = AFE_PARAM_ID_DEVICE_HW_DELAY;
+	config.pdata.param_size = sizeof(config.port);
+
+	config.port.hw_delay.delay_in_us = delay_entry.delay_usec;
+	config.port.hw_delay.device_hw_delay_minor_version =
+				AFE_API_VERSION_DEVICE_HW_DELAY;
+
+	ret = afe_apr_send_pkt(&config, &this_afe.wait[index]);
+	if (ret) {
+		pr_err("%s: AFE hw delay for port 0x%x failed %d\n",
+		       __func__, port_id, ret);
+		goto fail_cmd;
+	}
+
+fail_cmd:
+	pr_debug("%s: port_id 0x%x rate %u delay_usec %d status %d\n",
+	__func__, port_id, rate, delay_entry.delay_usec, ret);
+	return ret;
+}
+
+static struct cal_block_data *afe_find_cal_topo_id_by_port(
+			struct cal_type_data *cal_type, u16 port_id)
+{
+	struct list_head		*ptr, *next;
+	struct cal_block_data	*cal_block = NULL;
+	int32_t path;
+	struct audio_cal_info_afe_top *afe_top;
+	int afe_port_index = q6audio_get_port_index(port_id);
+
+	if (afe_port_index < 0)
+		goto err_exit;
+
+	list_for_each_safe(ptr, next,
+		&cal_type->cal_blocks) {
+		cal_block = list_entry(ptr,
+			struct cal_block_data, list);
+
+		path = ((afe_get_port_type(port_id) ==
+			MSM_AFE_PORT_TYPE_TX)?(TX_DEVICE):(RX_DEVICE));
+		afe_top =
+		(struct audio_cal_info_afe_top *)cal_block->cal_info;
+		if (afe_top->path == path) {
+			if (this_afe.dev_acdb_id[afe_port_index] > 0) {
+				if (afe_top->acdb_id ==
+				    this_afe.dev_acdb_id[afe_port_index]) {
+					pr_debug("%s: top_id:%x acdb_id:%d afe_port_id:%d\n",
+						 __func__, afe_top->topology,
+						 afe_top->acdb_id,
+						 q6audio_get_port_id(port_id));
+					return cal_block;
+				}
+			} else {
+				pr_debug("%s: top_id:%x acdb_id:%d afe_port:%d\n",
+				 __func__, afe_top->topology, afe_top->acdb_id,
+				 q6audio_get_port_id(port_id));
+				return cal_block;
+			}
+		}
+	}
+
+err_exit:
+	return NULL;
+}
+
+static int afe_get_cal_topology_id(u16 port_id, u32 *topology_id,
+				   int cal_type_index)
+{
+	int ret = 0;
+
+	struct cal_block_data   *cal_block = NULL;
+	struct audio_cal_info_afe_top   *afe_top_info = NULL;
+
+	if (this_afe.cal_data[cal_type_index] == NULL) {
+		pr_debug("%s: [AFE_TOPOLOGY_CAL] not initialized\n", __func__);
+		return -EINVAL;
+	}
+	if (topology_id == NULL) {
+		pr_err("%s: topology_id is NULL\n", __func__);
+		return -EINVAL;
+	}
+	*topology_id = 0;
+
+	mutex_lock(&this_afe.cal_data[cal_type_index]->lock);
+	cal_block = afe_find_cal_topo_id_by_port(
+		this_afe.cal_data[cal_type_index], port_id);
+	if (cal_block == NULL) {
+		pr_debug("%s: [AFE_TOPOLOGY_CAL] not initialized for this port %d\n",
+				__func__, port_id);
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	afe_top_info = ((struct audio_cal_info_afe_top *)
+		cal_block->cal_info);
+	if (!afe_top_info->topology) {
+		pr_debug("%s: invalid topology id : [%d, %d]\n",
+		       __func__, afe_top_info->acdb_id, afe_top_info->topology);
+		ret = -EINVAL;
+		goto unlock;
+	}
+	*topology_id = (u32)afe_top_info->topology;
+
+	pr_debug("%s: port_id = %u acdb_id = %d topology_id = %u ret=%d\n",
+		__func__, port_id, afe_top_info->acdb_id,
+		afe_top_info->topology, ret);
+unlock:
+	mutex_unlock(&this_afe.cal_data[cal_type_index]->lock);
+	return ret;
+}
+
+static int afe_send_port_topology_id(u16 port_id)
+{
+	struct afe_audioif_config_command	config;
+	int index = 0;
+	int ret = 0;
+	u32 topology_id = 0;
+
+	index = q6audio_get_port_index(port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		return -EINVAL;
+	}
+
+	ret = afe_get_cal_topology_id(port_id, &topology_id, AFE_TOPOLOGY_CAL);
+	if (ret < 0) {
+		pr_debug("%s: Check for LSM topology\n", __func__);
+		ret = afe_get_cal_topology_id(port_id, &topology_id,
+					      AFE_LSM_TOPOLOGY_CAL);
+	}
+	if (ret || !topology_id) {
+		pr_debug("%s: AFE port[%d] get_cal_topology[%d] invalid!\n",
+				__func__, port_id, topology_id);
+		goto done;
+	}
+
+	config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	config.hdr.pkt_size = sizeof(config);
+	config.hdr.src_port = 0;
+	config.hdr.dest_port = 0;
+	config.hdr.token = index;
+
+	config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+	config.param.port_id = q6audio_get_port_id(port_id);
+	config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
+				    sizeof(config.param);
+	config.param.payload_address_lsw = 0x00;
+	config.param.payload_address_msw = 0x00;
+	config.param.mem_map_handle = 0x00;
+	config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+	config.pdata.param_id   = AFE_PARAM_ID_SET_TOPOLOGY;
+	config.pdata.param_size =  sizeof(config.port);
+	config.port.topology.minor_version = AFE_API_VERSION_TOPOLOGY_V1;
+	config.port.topology.topology_id = topology_id;
+
+	pr_debug("%s: param PL size=%d iparam_size[%d][%zd %zd %zd %zd] param_id[0x%x]\n",
+		__func__, config.param.payload_size, config.pdata.param_size,
+		sizeof(config), sizeof(config.param), sizeof(config.port),
+		sizeof(struct apr_hdr), config.pdata.param_id);
+
+	ret = afe_apr_send_pkt(&config, &this_afe.wait[index]);
+	if (ret) {
+		pr_err("%s: AFE set topology id enable for port 0x%x failed %d\n",
+			__func__, port_id, ret);
+		goto done;
+	}
+
+	this_afe.topology[index] = topology_id;
+	rtac_update_afe_topology(port_id);
+done:
+	pr_debug("%s: AFE set topology id 0x%x  enable for port 0x%x ret %d\n",
+			__func__, topology_id, port_id, ret);
+	return ret;
+
+}
+
+static int remap_cal_data(struct cal_block_data *cal_block, int cal_index)
+{
+	int ret = 0;
+
+	if (cal_block->map_data.ion_client == NULL) {
+		pr_err("%s: No ION allocation for cal index %d!\n",
+			__func__, cal_index);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if ((cal_block->map_data.map_size > 0) &&
+		(cal_block->map_data.q6map_handle == 0)) {
+		atomic_set(&this_afe.mem_map_cal_index, cal_index);
+		ret = afe_cmd_memory_map(cal_block->cal_data.paddr,
+				cal_block->map_data.map_size);
+		atomic_set(&this_afe.mem_map_cal_index, -1);
+		if (ret < 0) {
+			pr_err("%s: mmap did not work! size = %zd ret %d\n",
+				__func__,
+				cal_block->map_data.map_size, ret);
+			pr_debug("%s: mmap did not work! addr = 0x%pK, size = %zd\n",
+				__func__,
+				&cal_block->cal_data.paddr,
+				cal_block->map_data.map_size);
+			goto done;
+		}
+		cal_block->map_data.q6map_handle = atomic_read(&this_afe.
+			mem_map_cal_handles[cal_index]);
+	}
+done:
+	return ret;
+}
+
+static struct cal_block_data *afe_find_cal(int cal_index, int port_id)
+{
+	struct list_head *ptr, *next;
+	struct cal_block_data *cal_block = NULL;
+	struct audio_cal_info_afe *afe_cal_info = NULL;
+	int afe_port_index = q6audio_get_port_index(port_id);
+
+	pr_debug("%s: cal_index %d port_id %d port_index %d\n", __func__,
+		  cal_index, port_id, afe_port_index);
+	if (afe_port_index < 0) {
+		pr_err("%s: Error getting AFE port index %d\n",
+			__func__, afe_port_index);
+		goto exit;
+	}
+
+	list_for_each_safe(ptr, next,
+			   &this_afe.cal_data[cal_index]->cal_blocks) {
+		cal_block = list_entry(ptr, struct cal_block_data, list);
+		afe_cal_info = cal_block->cal_info;
+		if ((afe_cal_info->acdb_id ==
+		     this_afe.dev_acdb_id[afe_port_index]) &&
+		    (afe_cal_info->sample_rate ==
+		     this_afe.afe_sample_rates[afe_port_index])) {
+			pr_debug("%s: cal block is a match, size is %zd\n",
+				 __func__, cal_block->cal_data.size);
+			goto exit;
+		}
+	}
+	pr_debug("%s: no matching cal_block found\n", __func__);
+	cal_block = NULL;
+
+exit:
+	return cal_block;
+}
+
+static int send_afe_cal_type(int cal_index, int port_id)
+{
+	struct cal_block_data		*cal_block = NULL;
+	int ret;
+	int afe_port_index = q6audio_get_port_index(port_id);
+
+	pr_debug("%s:\n", __func__);
+
+	if (this_afe.cal_data[cal_index] == NULL) {
+		pr_warn("%s: cal_index %d not allocated!\n",
+			__func__, cal_index);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (afe_port_index < 0) {
+		pr_err("%s: Error getting AFE port index %d\n",
+			__func__, afe_port_index);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	mutex_lock(&this_afe.cal_data[cal_index]->lock);
+
+	if (((cal_index == AFE_COMMON_RX_CAL) ||
+	     (cal_index == AFE_COMMON_TX_CAL) ||
+	     (cal_index == AFE_LSM_TX_CAL)) &&
+	    (this_afe.dev_acdb_id[afe_port_index] > 0))
+		cal_block = afe_find_cal(cal_index, port_id);
+	else
+		cal_block = cal_utils_get_only_cal_block(
+				this_afe.cal_data[cal_index]);
+
+	if (cal_block == NULL) {
+		pr_debug("%s cal_block not found!!\n", __func__);
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	pr_debug("%s: Sending cal_index cal %d\n", __func__, cal_index);
+
+	ret = remap_cal_data(cal_block, cal_index);
+	if (ret) {
+		pr_err("%s: Remap_cal_data failed for cal %d!\n",
+			__func__, cal_index);
+		ret = -EINVAL;
+		goto unlock;
+	}
+	ret = afe_send_cal_block(port_id, cal_block);
+	if (ret < 0)
+		pr_debug("%s: No cal sent for cal_index %d, port_id = 0x%x! ret %d\n",
+			__func__, cal_index, port_id, ret);
+unlock:
+	mutex_unlock(&this_afe.cal_data[cal_index]->lock);
+done:
+	return ret;
+}
+
+void afe_send_cal(u16 port_id)
+{
+	int ret;
+
+	pr_debug("%s: port_id=0x%x\n", __func__, port_id);
+
+	if (afe_get_port_type(port_id) == MSM_AFE_PORT_TYPE_TX) {
+		afe_send_cal_spkr_prot_tx(port_id);
+		ret = send_afe_cal_type(AFE_COMMON_TX_CAL, port_id);
+		if (ret < 0)
+			send_afe_cal_type(AFE_LSM_TX_CAL, port_id);
+	} else if (afe_get_port_type(port_id) == MSM_AFE_PORT_TYPE_RX) {
+		afe_send_cal_spkr_prot_rx(port_id);
+		send_afe_cal_type(AFE_COMMON_RX_CAL, port_id);
+	}
+}
+
+int afe_turn_onoff_hw_mad(u16 mad_type, u16 enable)
+{
+	int ret;
+	struct afe_cmd_hw_mad_ctrl config;
+
+	pr_debug("%s: enter\n", __func__);
+	memset(&config, 0, sizeof(config));
+	config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+					     APR_HDR_LEN(APR_HDR_SIZE),
+					     APR_PKT_VER);
+	config.hdr.pkt_size = sizeof(config);
+	config.hdr.src_port = 0;
+	config.hdr.dest_port = 0;
+	config.hdr.token = IDX_GLOBAL_CFG;
+	config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+	config.param.port_id = SLIMBUS_5_TX;
+	config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
+				    sizeof(config.param);
+	config.param.payload_address_lsw = 0x00;
+	config.param.payload_address_msw = 0x00;
+	config.param.mem_map_handle = 0x00;
+	config.pdata.module_id = AFE_MODULE_HW_MAD;
+	config.pdata.param_id = AFE_PARAM_ID_HW_MAD_CTRL;
+	config.pdata.param_size = sizeof(config.payload);
+	config.payload.minor_version = 1;
+	config.payload.mad_type = mad_type;
+	config.payload.mad_enable = enable;
+
+	ret = afe_apr_send_pkt(&config, &this_afe.wait[IDX_GLOBAL_CFG]);
+	if (ret)
+		pr_err("%s: AFE_PARAM_ID_HW_MAD_CTRL failed %d\n", __func__,
+		       ret);
+	return ret;
+}
+
+static int afe_send_slimbus_slave_cfg(
+	struct afe_param_cdc_slimbus_slave_cfg *sb_slave_cfg)
+{
+	int ret;
+	struct afe_svc_cmd_sb_slave_cfg config;
+
+	pr_debug("%s: enter\n", __func__);
+
+	config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+					     APR_HDR_LEN(APR_HDR_SIZE),
+					     APR_PKT_VER);
+	config.hdr.pkt_size = sizeof(config);
+	config.hdr.src_port = 0;
+	config.hdr.dest_port = 0;
+	config.hdr.token = IDX_GLOBAL_CFG;
+	config.hdr.opcode = AFE_SVC_CMD_SET_PARAM;
+	config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
+				    sizeof(config.param);
+	config.param.payload_address_lsw = 0x00;
+	config.param.payload_address_msw = 0x00;
+	config.param.mem_map_handle = 0x00;
+	config.pdata.module_id = AFE_MODULE_CDC_DEV_CFG;
+	config.pdata.param_id = AFE_PARAM_ID_CDC_SLIMBUS_SLAVE_CFG;
+	config.pdata.param_size =
+	    sizeof(struct afe_param_cdc_slimbus_slave_cfg);
+	config.sb_slave_cfg = *sb_slave_cfg;
+
+	ret = afe_apr_send_pkt(&config, &this_afe.wait[IDX_GLOBAL_CFG]);
+	if (ret)
+		pr_err("%s: AFE_PARAM_ID_CDC_SLIMBUS_SLAVE_CFG failed %d\n",
+		       __func__, ret);
+
+	pr_debug("%s: leave %d\n", __func__, ret);
+	return ret;
+}
+
+static int afe_send_codec_reg_page_config(
+	struct afe_param_cdc_reg_page_cfg *cdc_reg_page_cfg)
+{
+	struct afe_svc_cmd_cdc_reg_page_cfg config;
+	int ret;
+
+	config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+					     APR_HDR_LEN(APR_HDR_SIZE),
+					     APR_PKT_VER);
+	config.hdr.pkt_size = sizeof(config);
+	config.hdr.src_port = 0;
+	config.hdr.dest_port = 0;
+	config.hdr.token = IDX_GLOBAL_CFG;
+	config.hdr.opcode = AFE_SVC_CMD_SET_PARAM;
+	config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
+				    sizeof(config.param);
+	config.param.payload_address_lsw = 0x00;
+	config.param.payload_address_msw = 0x00;
+	config.param.mem_map_handle = 0x00;
+	config.pdata.module_id = AFE_MODULE_CDC_DEV_CFG;
+	config.pdata.param_id = AFE_PARAM_ID_CDC_REG_PAGE_CFG;
+	config.pdata.param_size =
+	    sizeof(struct afe_param_cdc_reg_page_cfg);
+	config.cdc_reg_page_cfg = *cdc_reg_page_cfg;
+
+	ret = afe_apr_send_pkt(&config, &this_afe.wait[IDX_GLOBAL_CFG]);
+	if (ret)
+		pr_err("%s: AFE_PARAM_ID_CDC_REG_PAGE_CFG failed %d\n",
+		       __func__, ret);
+
+	return ret;
+}
+
+static int afe_send_codec_reg_config(
+	struct afe_param_cdc_reg_cfg_data *cdc_reg_cfg)
+{
+	int i, j, ret = -EINVAL;
+	int pkt_size, payload_size, reg_per_pkt, num_pkts, num_regs;
+	struct afe_svc_cmd_cdc_reg_cfg *config;
+	struct afe_svc_cmd_set_param *param;
+
+	reg_per_pkt = (APR_MAX_BUF - sizeof(*config)) /
+			sizeof(struct afe_param_cdc_reg_cfg_payload);
+	if (reg_per_pkt > 0) {
+		num_pkts = (cdc_reg_cfg->num_registers / reg_per_pkt) +
+			(cdc_reg_cfg->num_registers % reg_per_pkt == 0 ? 0 : 1);
+	} else {
+		pr_err("%s: Failed to build codec reg config APR packet\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	for (j = 0; j < num_pkts; ++j) {
+		/*
+		 * num_regs is set to reg_per_pkt on each pass through the loop
+		 * except the last, when it is set to the number of registers
+		 * remaining from the total
+		 */
+		num_regs = (j < (num_pkts - 1) ? reg_per_pkt :
+				cdc_reg_cfg->num_registers - (reg_per_pkt * j));
+		payload_size = sizeof(struct afe_param_cdc_reg_cfg_payload) *
+				num_regs;
+		pkt_size = sizeof(*config) + payload_size;
+		pr_debug("%s: pkt_size %d, payload_size %d\n", __func__,
+			 pkt_size, payload_size);
+		config = kzalloc(pkt_size, GFP_KERNEL);
+		if (!config)
+			return -ENOMEM;
+
+		config->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						      APR_HDR_LEN(APR_HDR_SIZE),
+						      APR_PKT_VER);
+		config->hdr.pkt_size = pkt_size;
+		config->hdr.src_port = 0;
+		config->hdr.dest_port = 0;
+		config->hdr.token = IDX_GLOBAL_CFG;
+		config->hdr.opcode = AFE_SVC_CMD_SET_PARAM;
+
+		param = &config->param;
+		param->payload_size = payload_size;
+		param->payload_address_lsw = 0x00;
+		param->payload_address_msw = 0x00;
+		param->mem_map_handle = 0x00;
+
+		for (i = 0; i < num_regs; i++) {
+			config->reg_data[i].common.module_id =
+						AFE_MODULE_CDC_DEV_CFG;
+			config->reg_data[i].common.param_id =
+						AFE_PARAM_ID_CDC_REG_CFG;
+			config->reg_data[i].common.param_size =
+			    sizeof(config->reg_data[i].reg_cfg);
+			config->reg_data[i].reg_cfg =
+				cdc_reg_cfg->reg_data[i + (j * reg_per_pkt)];
+		}
+
+		ret = afe_apr_send_pkt(config, &this_afe.wait[IDX_GLOBAL_CFG]);
+		if (ret) {
+			pr_err("%s: AFE_PARAM_ID_CDC_REG_CFG failed %d\n",
+				__func__, ret);
+			kfree(config);
+			break;
+		}
+		kfree(config);
+	}
+
+	return ret;
+}
+
+static int afe_init_cdc_reg_config(void)
+{
+	int ret;
+	struct afe_svc_cmd_init_cdc_reg_cfg config;
+
+	pr_debug("%s: enter\n", __func__);
+	config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+					     APR_HDR_LEN(APR_HDR_SIZE),
+					     APR_PKT_VER);
+	config.hdr.pkt_size = sizeof(config);
+	config.hdr.src_port = 0;
+	config.hdr.dest_port = 0;
+	config.hdr.token = IDX_GLOBAL_CFG;
+	config.hdr.opcode = AFE_SVC_CMD_SET_PARAM;
+
+	config.param.payload_size = sizeof(struct afe_port_param_data_v2);
+	config.param.payload_address_lsw = 0x00;
+	config.param.payload_address_msw = 0x00;
+	config.param.mem_map_handle = 0x00;
+
+	config.init.module_id = AFE_MODULE_CDC_DEV_CFG;
+	config.init.param_id = AFE_PARAM_ID_CDC_REG_CFG_INIT;
+	config.init.param_size = 0;
+
+	ret = afe_apr_send_pkt(&config, &this_afe.wait[IDX_GLOBAL_CFG]);
+	if (ret) {
+		pr_err("%s: AFE_PARAM_ID_CDC_INIT_REG_CFG failed %d\n",
+		       __func__, ret);
+	}
+
+	return ret;
+}
+
+static int afe_send_slimbus_slave_port_cfg(
+	struct afe_param_slimbus_slave_port_cfg *port_config, u16 port_id)
+{
+	int ret, index;
+	struct afe_cmd_hw_mad_slimbus_slave_port_cfg config;
+
+	pr_debug("%s: enter, port_id =  0x%x\n", __func__, port_id);
+	index = q6audio_get_port_index(port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		return -EINVAL;
+	}
+	ret = q6audio_validate_port(port_id);
+	if (ret < 0) {
+		pr_err("%s: port id = 0x%x ret %d\n", __func__, port_id, ret);
+		return -EINVAL;
+	}
+
+	config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+					     APR_HDR_LEN(APR_HDR_SIZE),
+					     APR_PKT_VER);
+	config.hdr.pkt_size = sizeof(config);
+	config.hdr.src_port = 0;
+	config.hdr.dest_port = 0;
+	config.hdr.token = index;
+	config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+	config.param.port_id = port_id;
+	config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
+				    sizeof(config.param);
+	config.param.payload_address_lsw = 0x00;
+	config.param.payload_address_msw = 0x00;
+	config.param.mem_map_handle = 0x00;
+	config.pdata.module_id = AFE_MODULE_HW_MAD;
+	config.pdata.param_id = AFE_PARAM_ID_SLIMBUS_SLAVE_PORT_CFG;
+	config.pdata.param_size = sizeof(*port_config);
+	config.sb_port_cfg = *port_config;
+
+	ret = afe_apr_send_pkt(&config, &this_afe.wait[index]);
+	if (ret) {
+		pr_err("%s: AFE_PARAM_ID_SLIMBUS_SLAVE_PORT_CFG failed %d\n",
+			__func__, ret);
+	}
+	pr_debug("%s: leave %d\n", __func__, ret);
+	return ret;
+}
+static int afe_aanc_port_cfg(void *apr, uint16_t tx_port, uint16_t rx_port)
+{
+	struct afe_port_cmd_set_aanc_param cfg;
+	int ret = 0;
+	int index = 0;
+
+	pr_debug("%s: tx_port 0x%x, rx_port 0x%x\n",
+		__func__, tx_port, rx_port);
+
+	ret = afe_q6_interface_prepare();
+	if (ret != 0) {
+		pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+		return -EINVAL;
+	}
+
+	index = q6audio_get_port_index(tx_port);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		return -EINVAL;
+	}
+	ret = q6audio_validate_port(tx_port);
+	if (ret < 0) {
+		pr_err("%s: port id: 0x%x ret %d\n", __func__, tx_port, ret);
+		return -EINVAL;
+	}
+	pr_debug("%s: AANC sample rate tx rate: %d rx rate %d\n",
+		__func__, this_afe.aanc_info.aanc_tx_port_sample_rate,
+	       this_afe.aanc_info.aanc_rx_port_sample_rate);
+	/*
+	 * If aanc tx sample rate or rx sample rate is zero, skip aanc
+	 * configuration as AFE resampler will fail for invalid sample
+	 * rates.
+	 */
+	if (!this_afe.aanc_info.aanc_tx_port_sample_rate ||
+	    !this_afe.aanc_info.aanc_rx_port_sample_rate) {
+		return -EINVAL;
+	}
+
+	cfg.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+			APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	cfg.hdr.pkt_size = sizeof(cfg);
+	cfg.hdr.src_port = 0;
+	cfg.hdr.dest_port = 0;
+	cfg.hdr.token = index;
+	cfg.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+
+	cfg.param.port_id = tx_port;
+	cfg.param.payload_size        = sizeof(struct afe_port_param_data_v2) +
+					sizeof(struct afe_param_aanc_port_cfg);
+	cfg.param.payload_address_lsw     = 0;
+	cfg.param.payload_address_msw     = 0;
+	cfg.param.mem_map_handle	  = 0;
+
+	cfg.pdata.module_id = AFE_MODULE_AANC;
+	cfg.pdata.param_id    = AFE_PARAM_ID_AANC_PORT_CONFIG;
+	cfg.pdata.param_size = sizeof(struct afe_param_aanc_port_cfg);
+	cfg.pdata.reserved    = 0;
+
+	cfg.data.aanc_port_cfg.aanc_port_cfg_minor_version =
+		AFE_API_VERSION_AANC_PORT_CONFIG;
+	cfg.data.aanc_port_cfg.tx_port_sample_rate =
+		this_afe.aanc_info.aanc_tx_port_sample_rate;
+	cfg.data.aanc_port_cfg.tx_port_channel_map[0] = AANC_TX_VOICE_MIC;
+	cfg.data.aanc_port_cfg.tx_port_channel_map[1] = AANC_TX_NOISE_MIC;
+	cfg.data.aanc_port_cfg.tx_port_channel_map[2] = AANC_TX_ERROR_MIC;
+	cfg.data.aanc_port_cfg.tx_port_channel_map[3] = AANC_TX_MIC_UNUSED;
+	cfg.data.aanc_port_cfg.tx_port_channel_map[4] = AANC_TX_MIC_UNUSED;
+	cfg.data.aanc_port_cfg.tx_port_channel_map[5] = AANC_TX_MIC_UNUSED;
+	cfg.data.aanc_port_cfg.tx_port_channel_map[6] = AANC_TX_MIC_UNUSED;
+	cfg.data.aanc_port_cfg.tx_port_channel_map[7] = AANC_TX_MIC_UNUSED;
+	cfg.data.aanc_port_cfg.tx_port_num_channels = 3;
+	cfg.data.aanc_port_cfg.rx_path_ref_port_id = rx_port;
+	cfg.data.aanc_port_cfg.ref_port_sample_rate =
+		 this_afe.aanc_info.aanc_rx_port_sample_rate;
+
+	ret = afe_apr_send_pkt((uint32_t *) &cfg, &this_afe.wait[index]);
+	if (ret) {
+		pr_err("%s: AFE AANC port config failed for tx_port 0x%x, rx_port 0x%x ret %d\n",
+			__func__, tx_port, rx_port, ret);
+	}
+
+	return ret;
+}
+
+static int afe_aanc_mod_enable(void *apr, uint16_t tx_port, uint16_t enable)
+{
+	struct afe_port_cmd_set_aanc_param cfg;
+	int ret = 0;
+	int index = 0;
+
+	pr_debug("%s: tx_port 0x%x\n",
+		__func__, tx_port);
+
+	ret = afe_q6_interface_prepare();
+	if (ret != 0) {
+		pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+		return -EINVAL;
+	}
+
+	index = q6audio_get_port_index(tx_port);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		return -EINVAL;
+	}
+	ret = q6audio_validate_port(tx_port);
+	if (ret < 0) {
+		pr_err("%s: port id: 0x%x ret %d\n", __func__, tx_port, ret);
+		return -EINVAL;
+	}
+
+	cfg.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+			APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	cfg.hdr.pkt_size = sizeof(cfg);
+	cfg.hdr.src_port = 0;
+	cfg.hdr.dest_port = 0;
+	cfg.hdr.token = index;
+	cfg.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+
+	cfg.param.port_id = tx_port;
+	cfg.param.payload_size        = sizeof(struct afe_port_param_data_v2) +
+					sizeof(struct afe_mod_enable_param);
+	cfg.param.payload_address_lsw     = 0;
+	cfg.param.payload_address_lsw     = 0;
+	cfg.param.mem_map_handle          = 0;
+
+	cfg.pdata.module_id = AFE_MODULE_AANC;
+	cfg.pdata.param_id    = AFE_PARAM_ID_ENABLE;
+	cfg.pdata.param_size = sizeof(struct afe_mod_enable_param);
+	cfg.pdata.reserved    = 0;
+
+	cfg.data.mod_enable.enable = enable;
+	cfg.data.mod_enable.reserved = 0;
+
+	ret = afe_apr_send_pkt((uint32_t *) &cfg, &this_afe.wait[index]);
+	if (ret) {
+		pr_err("%s: AFE AANC enable failed for tx_port 0x%x ret %d\n",
+			__func__, tx_port, ret);
+	}
+	return ret;
+}
+
+static int afe_send_bank_selection_clip(
+		struct afe_param_id_clip_bank_sel *param)
+{
+	int ret;
+	struct afe_svc_cmd_set_clip_bank_selection config;
+	if (!param) {
+		pr_err("%s: Invalid params", __func__);
+		return -EINVAL;
+	}
+	config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+			APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	config.hdr.pkt_size = sizeof(config);
+	config.hdr.src_port = 0;
+	config.hdr.dest_port = 0;
+	config.hdr.token = IDX_GLOBAL_CFG;
+	config.hdr.opcode = AFE_SVC_CMD_SET_PARAM;
+
+	config.param.payload_size = sizeof(struct afe_port_param_data_v2) +
+				sizeof(struct afe_param_id_clip_bank_sel);
+	config.param.payload_address_lsw = 0x00;
+	config.param.payload_address_msw = 0x00;
+	config.param.mem_map_handle = 0x00;
+
+	config.pdata.module_id = AFE_MODULE_CDC_DEV_CFG;
+	config.pdata.param_id = AFE_PARAM_ID_CLIP_BANK_SEL_CFG;
+	config.pdata.param_size =
+		sizeof(struct afe_param_id_clip_bank_sel);
+	config.bank_sel = *param;
+	ret = afe_apr_send_pkt(&config, &this_afe.wait[IDX_GLOBAL_CFG]);
+	if (ret) {
+		pr_err("%s: AFE_PARAM_ID_CLIP_BANK_SEL_CFG failed %d\n",
+		__func__, ret);
+	}
+	return ret;
+}
+int afe_send_aanc_version(
+	struct afe_param_id_cdc_aanc_version *version_cfg)
+{
+	int ret;
+	struct afe_svc_cmd_cdc_aanc_version config;
+
+	pr_debug("%s: enter\n", __func__);
+	config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+			APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	config.hdr.pkt_size = sizeof(config);
+	config.hdr.src_port = 0;
+	config.hdr.dest_port = 0;
+	config.hdr.token = IDX_GLOBAL_CFG;
+	config.hdr.opcode = AFE_SVC_CMD_SET_PARAM;
+
+	config.param.payload_size = sizeof(struct afe_port_param_data_v2) +
+				sizeof(struct afe_param_id_cdc_aanc_version);
+	config.param.payload_address_lsw = 0x00;
+	config.param.payload_address_msw = 0x00;
+	config.param.mem_map_handle = 0x00;
+
+	config.pdata.module_id = AFE_MODULE_CDC_DEV_CFG;
+	config.pdata.param_id = AFE_PARAM_ID_CDC_AANC_VERSION;
+	config.pdata.param_size =
+		sizeof(struct afe_param_id_cdc_aanc_version);
+	config.version = *version_cfg;
+	ret = afe_apr_send_pkt(&config, &this_afe.wait[IDX_GLOBAL_CFG]);
+	if (ret) {
+		pr_err("%s: AFE_PARAM_ID_CDC_AANC_VERSION failed %d\n",
+		__func__, ret);
+	}
+	return ret;
+}
+
+int afe_port_set_mad_type(u16 port_id, enum afe_mad_type mad_type)
+{
+	int i;
+
+	if (port_id == AFE_PORT_ID_TERTIARY_MI2S_TX ||
+		port_id == AFE_PORT_ID_INT3_MI2S_TX) {
+		mad_type = MAD_SW_AUDIO;
+		return 0;
+	}
+
+	i = port_id - SLIMBUS_0_RX;
+	if (i < 0 || i >= ARRAY_SIZE(afe_ports_mad_type)) {
+		pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
+		return -EINVAL;
+	}
+	atomic_set(&afe_ports_mad_type[i], mad_type);
+	return 0;
+}
+
+enum afe_mad_type afe_port_get_mad_type(u16 port_id)
+{
+	int i;
+
+	if (port_id == AFE_PORT_ID_TERTIARY_MI2S_TX ||
+		port_id == AFE_PORT_ID_INT3_MI2S_TX)
+		return MAD_SW_AUDIO;
+
+	i = port_id - SLIMBUS_0_RX;
+	if (i < 0 || i >= ARRAY_SIZE(afe_ports_mad_type)) {
+		pr_debug("%s: Non Slimbus port_id 0x%x\n", __func__, port_id);
+		return MAD_HW_NONE;
+	}
+	return (enum afe_mad_type) atomic_read(&afe_ports_mad_type[i]);
+}
+
+int afe_set_config(enum afe_config_type config_type, void *config_data, int arg)
+{
+	int ret;
+
+	pr_debug("%s: enter config_type %d\n", __func__, config_type);
+	ret = afe_q6_interface_prepare();
+	if (ret) {
+		pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+		return ret;
+	}
+
+	switch (config_type) {
+	case AFE_SLIMBUS_SLAVE_CONFIG:
+		ret = afe_send_slimbus_slave_cfg(config_data);
+		if (!ret)
+			ret = afe_init_cdc_reg_config();
+		else
+			pr_err("%s: Sending slimbus slave config failed %d\n",
+			       __func__, ret);
+		break;
+	case AFE_CDC_REGISTERS_CONFIG:
+		ret = afe_send_codec_reg_config(config_data);
+		break;
+	case AFE_SLIMBUS_SLAVE_PORT_CONFIG:
+		ret = afe_send_slimbus_slave_port_cfg(config_data, arg);
+		break;
+	case AFE_AANC_VERSION:
+		ret = afe_send_aanc_version(config_data);
+		break;
+	case AFE_CLIP_BANK_SEL:
+		ret = afe_send_bank_selection_clip(config_data);
+		break;
+	case AFE_CDC_CLIP_REGISTERS_CONFIG:
+		ret = afe_send_codec_reg_config(config_data);
+		break;
+	case AFE_CDC_REGISTER_PAGE_CONFIG:
+		ret = afe_send_codec_reg_page_config(config_data);
+		break;
+	default:
+		pr_err("%s: unknown configuration type %d",
+			__func__, config_type);
+		ret = -EINVAL;
+	}
+
+	if (!ret)
+		set_bit(config_type, &afe_configured_cmd);
+
+	return ret;
+}
+
+/*
+ * afe_clear_config - If SSR happens ADSP loses AFE configs, let AFE driver know
+ *		      about the state so client driver can wait until AFE is
+ *		      reconfigured.
+ */
+void afe_clear_config(enum afe_config_type config)
+{
+	clear_bit(config, &afe_configured_cmd);
+}
+
+bool afe_has_config(enum afe_config_type config)
+{
+	return !!test_bit(config, &afe_configured_cmd);
+}
+
+int afe_send_spdif_clk_cfg(struct afe_param_id_spdif_clk_cfg *cfg,
+		u16 port_id)
+{
+	struct afe_spdif_clk_config_command clk_cfg;
+	int ret = 0;
+	int index = 0;
+
+	if (!cfg) {
+		pr_err("%s: Error, no configuration data\n", __func__);
+		ret = -EINVAL;
+		return ret;
+	}
+	index = q6audio_get_port_index(port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		return -EINVAL;
+	}
+	ret = q6audio_validate_port(port_id);
+	if (ret < 0) {
+		pr_err("%s: port id: 0x%x ret %d\n", __func__, port_id, ret);
+		return -EINVAL;
+	}
+
+	ret = afe_q6_interface_prepare();
+	if (ret) {
+		pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+		return ret;
+	}
+	clk_cfg.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+			APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	clk_cfg.hdr.pkt_size = sizeof(clk_cfg);
+	clk_cfg.hdr.src_port = 0;
+	clk_cfg.hdr.dest_port = 0;
+	clk_cfg.hdr.token = index;
+
+	clk_cfg.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+	clk_cfg.param.port_id = q6audio_get_port_id(port_id);
+	clk_cfg.param.payload_address_lsw = 0x00;
+	clk_cfg.param.payload_address_msw = 0x00;
+	clk_cfg.param.mem_map_handle = 0x00;
+	clk_cfg.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+	clk_cfg.pdata.param_id = AFE_PARAM_ID_SPDIF_CLK_CONFIG;
+	clk_cfg.pdata.param_size =  sizeof(clk_cfg.clk_cfg);
+	clk_cfg.param.payload_size = sizeof(clk_cfg) - sizeof(struct apr_hdr)
+		- sizeof(clk_cfg.param);
+	clk_cfg.clk_cfg = *cfg;
+
+	pr_debug("%s: Minor version = 0x%x clk val = %d\n"
+			"clk root = 0x%x\n port id = 0x%x\n",
+			__func__, cfg->clk_cfg_minor_version,
+			cfg->clk_value, cfg->clk_root,
+			q6audio_get_port_id(port_id));
+
+	atomic_set(&this_afe.state, 1);
+	atomic_set(&this_afe.status, 0);
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &clk_cfg);
+	if (ret < 0) {
+		pr_err("%s: AFE send clock config for port 0x%x failed ret = %d\n",
+				__func__, port_id, ret);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	ret = wait_event_timeout(this_afe.wait[index],
+			(atomic_read(&this_afe.state) == 0),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n",
+				__func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	if (atomic_read(&this_afe.status) > 0) {
+		pr_err("%s: config cmd failed [%s]\n",
+			__func__, adsp_err_get_err_str(
+			atomic_read(&this_afe.status)));
+		ret = adsp_err_get_lnx_err_code(
+				atomic_read(&this_afe.status));
+		goto fail_cmd;
+	}
+
+fail_cmd:
+	return ret;
+}
+
+int afe_send_spdif_ch_status_cfg(struct afe_param_id_spdif_ch_status_cfg
+		*ch_status_cfg,	u16 port_id)
+{
+	struct afe_spdif_chstatus_config_command ch_status;
+	int ret = 0;
+	int index = 0;
+
+	if (!ch_status_cfg) {
+		pr_err("%s: Error, no configuration data\n", __func__);
+		ret = -EINVAL;
+		return ret;
+	}
+	index = q6audio_get_port_index(port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		return -EINVAL;
+	}
+	ret = q6audio_validate_port(port_id);
+	if (ret < 0) {
+		pr_err("%s: port id: 0x%x ret %d\n", __func__, port_id, ret);
+		return -EINVAL;
+	}
+
+	ret = afe_q6_interface_prepare();
+	if (ret != 0) {
+		pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+		return ret;
+	}
+	ch_status.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+			APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	ch_status.hdr.pkt_size = sizeof(ch_status_cfg);
+	ch_status.hdr.src_port = 0;
+	ch_status.hdr.dest_port = 0;
+	ch_status.hdr.token = index;
+
+	ch_status.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+	ch_status.param.port_id = q6audio_get_port_id(port_id);
+	ch_status.param.payload_address_lsw = 0x00;
+	ch_status.param.payload_address_msw = 0x00;
+	ch_status.param.mem_map_handle = 0x00;
+	ch_status.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+	ch_status.pdata.param_id = AFE_PARAM_ID_SPDIF_CLK_CONFIG;
+	ch_status.pdata.param_size =  sizeof(ch_status.ch_status);
+	ch_status.param.payload_size = sizeof(ch_status)
+		- sizeof(struct apr_hdr) - sizeof(ch_status.param);
+	ch_status.ch_status = *ch_status_cfg;
+
+	atomic_set(&this_afe.state, 1);
+	atomic_set(&this_afe.status, 0);
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &ch_status);
+	if (ret < 0) {
+		pr_err("%s: AFE send channel status for port 0x%x failed ret = %d\n",
+				__func__, port_id, ret);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	ret = wait_event_timeout(this_afe.wait[index],
+			(atomic_read(&this_afe.state) == 0),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n",
+				__func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	if (atomic_read(&this_afe.status) > 0) {
+		pr_err("%s: config cmd failed [%s]\n",
+			__func__, adsp_err_get_err_str(
+			atomic_read(&this_afe.status)));
+		ret = adsp_err_get_lnx_err_code(
+				atomic_read(&this_afe.status));
+		goto fail_cmd;
+	}
+
+fail_cmd:
+	return ret;
+}
+
+static int afe_send_cmd_port_start(u16 port_id)
+{
+	struct afe_port_cmd_device_start start;
+	int ret, index;
+
+	pr_debug("%s: enter\n", __func__);
+	index = q6audio_get_port_index(port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		return -EINVAL;
+	}
+	ret = q6audio_validate_port(port_id);
+	if (ret < 0) {
+		pr_err("%s: port id: 0x%x ret %d\n", __func__, port_id, ret);
+		return -EINVAL;
+	}
+
+	start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+					    APR_HDR_LEN(APR_HDR_SIZE),
+					    APR_PKT_VER);
+	start.hdr.pkt_size = sizeof(start);
+	start.hdr.src_port = 0;
+	start.hdr.dest_port = 0;
+	start.hdr.token = index;
+	start.hdr.opcode = AFE_PORT_CMD_DEVICE_START;
+	start.port_id = q6audio_get_port_id(port_id);
+	pr_debug("%s: cmd device start opcode[0x%x] port id[0x%x]\n",
+		 __func__, start.hdr.opcode, start.port_id);
+
+	ret = afe_apr_send_pkt(&start, &this_afe.wait[index]);
+	if (ret) {
+		pr_err("%s: AFE enable for port 0x%x failed %d\n", __func__,
+		       port_id, ret);
+	} else if (this_afe.task != current) {
+		this_afe.task = current;
+		pr_debug("task_name = %s pid = %d\n",
+			 this_afe.task->comm, this_afe.task->pid);
+	}
+
+	return ret;
+}
+
+static int afe_aanc_start(uint16_t tx_port_id, uint16_t rx_port_id)
+{
+	int ret;
+
+	pr_debug("%s:  Tx port is 0x%x, Rx port is 0x%x\n",
+		 __func__, tx_port_id, rx_port_id);
+	ret = afe_aanc_port_cfg(this_afe.apr, tx_port_id, rx_port_id);
+	if (ret) {
+		pr_err("%s: Send AANC Port Config failed %d\n",
+			__func__, ret);
+		goto fail_cmd;
+	}
+	send_afe_cal_type(AFE_AANC_CAL, tx_port_id);
+
+fail_cmd:
+	return ret;
+}
+
+int afe_spdif_port_start(u16 port_id, struct afe_spdif_port_config *spdif_port,
+		u32 rate)
+{
+	struct afe_audioif_config_command config;
+	int ret = 0;
+	int index = 0;
+	uint16_t port_index;
+
+	if (!spdif_port) {
+		pr_err("%s: Error, no configuration data\n", __func__);
+		ret = -EINVAL;
+		return ret;
+	}
+
+	pr_debug("%s: port id: 0x%x\n", __func__, port_id);
+
+	index = q6audio_get_port_index(port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		return -EINVAL;
+	}
+	ret = q6audio_validate_port(port_id);
+	if (ret < 0) {
+		pr_err("%s: port id: 0x%x ret %d\n", __func__, port_id, ret);
+		return -EINVAL;
+	}
+
+	afe_send_cal(port_id);
+	afe_send_hw_delay(port_id, rate);
+
+	config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+			APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	config.hdr.pkt_size = sizeof(config);
+	config.hdr.src_port = 0;
+	config.hdr.dest_port = 0;
+	config.hdr.token = index;
+	config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+	config.param.port_id = q6audio_get_port_id(port_id);
+	config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
+		sizeof(config.param);
+	config.param.payload_address_lsw = 0x00;
+	config.param.payload_address_msw = 0x00;
+	config.param.mem_map_handle = 0x00;
+	config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+	config.pdata.param_id = AFE_PARAM_ID_SPDIF_CONFIG;
+	config.pdata.param_size = sizeof(config.port);
+	config.port.spdif = spdif_port->cfg;
+	ret = afe_apr_send_pkt(&config, &this_afe.wait[index]);
+	if (ret) {
+		pr_err("%s: AFE enable for port 0x%x failed ret = %d\n",
+				__func__, port_id, ret);
+		goto fail_cmd;
+	}
+
+	port_index = afe_get_port_index(port_id);
+	if ((port_index >= 0) && (port_index < AFE_MAX_PORTS)) {
+		this_afe.afe_sample_rates[port_index] = rate;
+	} else {
+		pr_err("%s: Invalid port index %d\n", __func__, port_index);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	ret = afe_send_spdif_ch_status_cfg(&spdif_port->ch_status, port_id);
+	if (ret < 0) {
+		pr_err("%s: afe send failed %d\n", __func__, ret);
+		goto fail_cmd;
+	}
+
+	return afe_send_cmd_port_start(port_id);
+
+fail_cmd:
+	return ret;
+}
+
+int afe_send_slot_mapping_cfg(
+	struct afe_param_id_slot_mapping_cfg *slot_mapping_cfg,
+	u16 port_id)
+{
+	struct afe_slot_mapping_config_command config;
+	int ret = 0;
+	int index = 0;
+
+	if (!slot_mapping_cfg) {
+		pr_err("%s: Error, no configuration data\n", __func__);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: port id: 0x%x\n", __func__, port_id);
+
+	index = q6audio_get_port_index(port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		return -EINVAL;
+	}
+	ret = q6audio_validate_port(port_id);
+	if (ret < 0) {
+		pr_err("%s: port id: 0x%x ret %d\n", __func__, port_id, ret);
+		return -EINVAL;
+	}
+
+	memset(&config, 0, sizeof(config));
+	config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+			APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	config.hdr.pkt_size = sizeof(config);
+	config.hdr.src_port = 0;
+	config.hdr.dest_port = 0;
+	config.hdr.token = index;
+
+	config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+	config.param.port_id = q6audio_get_port_id(port_id);
+	config.param.payload_size = sizeof(config)
+		- sizeof(struct apr_hdr) - sizeof(config.param);
+	config.param.payload_address_lsw = 0x00;
+	config.param.payload_address_msw = 0x00;
+	config.param.mem_map_handle = 0x00;
+	config.pdata.module_id = AFE_MODULE_TDM;
+	config.pdata.param_id = AFE_PARAM_ID_PORT_SLOT_MAPPING_CONFIG;
+	config.pdata.param_size =  sizeof(config.slot_mapping);
+	config.slot_mapping = *slot_mapping_cfg;
+
+	atomic_set(&this_afe.state, 1);
+	atomic_set(&this_afe.status, 0);
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config);
+	if (ret < 0) {
+		pr_err("%s: AFE send slot mapping for port 0x%x failed ret = %d\n",
+				__func__, port_id, ret);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	ret = wait_event_timeout(this_afe.wait[index],
+			(atomic_read(&this_afe.state) == 0),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n",
+				__func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	if (atomic_read(&this_afe.status) > 0) {
+		pr_err("%s: config cmd failed [%s]\n",
+			__func__, adsp_err_get_err_str(
+			atomic_read(&this_afe.status)));
+		ret = adsp_err_get_lnx_err_code(
+				atomic_read(&this_afe.status));
+		goto fail_cmd;
+	}
+
+fail_cmd:
+	return ret;
+}
+
+int afe_send_custom_tdm_header_cfg(
+	struct afe_param_id_custom_tdm_header_cfg *custom_tdm_header_cfg,
+	u16 port_id)
+{
+	struct afe_custom_tdm_header_config_command config;
+	int ret = 0;
+	int index = 0;
+
+	if (!custom_tdm_header_cfg) {
+		pr_err("%s: Error, no configuration data\n", __func__);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: port id: 0x%x\n", __func__, port_id);
+
+	index = q6audio_get_port_index(port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		return -EINVAL;
+	}
+	ret = q6audio_validate_port(port_id);
+	if (ret < 0) {
+		pr_err("%s: port id: 0x%x ret %d\n", __func__, port_id, ret);
+		return -EINVAL;
+	}
+
+	memset(&config, 0, sizeof(config));
+	config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+			APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	config.hdr.pkt_size = sizeof(config);
+	config.hdr.src_port = 0;
+	config.hdr.dest_port = 0;
+	config.hdr.token = index;
+
+	config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+	config.param.port_id = q6audio_get_port_id(port_id);
+	config.param.payload_size = sizeof(config)
+		- sizeof(struct apr_hdr) - sizeof(config.param);
+	config.param.payload_address_lsw = 0x00;
+	config.param.payload_address_msw = 0x00;
+	config.param.mem_map_handle = 0x00;
+	config.pdata.module_id = AFE_MODULE_TDM;
+	config.pdata.param_id = AFE_PARAM_ID_CUSTOM_TDM_HEADER_CONFIG;
+	config.pdata.param_size =  sizeof(config.custom_tdm_header);
+	config.custom_tdm_header = *custom_tdm_header_cfg;
+
+	atomic_set(&this_afe.state, 1);
+	atomic_set(&this_afe.status, 0);
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config);
+	if (ret < 0) {
+		pr_err("%s: AFE send custom tdm header for port 0x%x failed ret = %d\n",
+				__func__, port_id, ret);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	ret = wait_event_timeout(this_afe.wait[index],
+			(atomic_read(&this_afe.state) == 0),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n",
+				__func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	if (atomic_read(&this_afe.status) > 0) {
+		pr_err("%s: config cmd failed [%s]\n",
+			__func__, adsp_err_get_err_str(
+			atomic_read(&this_afe.status)));
+		ret = adsp_err_get_lnx_err_code(
+				atomic_read(&this_afe.status));
+		goto fail_cmd;
+	}
+
+fail_cmd:
+	return ret;
+}
+
+int afe_tdm_port_start(u16 port_id, struct afe_tdm_port_config *tdm_port,
+		       u32 rate, u16 num_groups)
+{
+	struct afe_audioif_config_command config;
+	int ret = 0;
+	int index = 0;
+	uint16_t port_index = 0;
+	enum afe_mad_type mad_type = MAD_HW_NONE;
+
+	if (!tdm_port) {
+		pr_err("%s: Error, no configuration data\n", __func__);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: port id: 0x%x\n", __func__, port_id);
+
+	index = q6audio_get_port_index(port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		return -EINVAL;
+	}
+	ret = q6audio_validate_port(port_id);
+	if (ret < 0) {
+		pr_err("%s: port id: 0x%x ret %d\n", __func__, port_id, ret);
+		return -EINVAL;
+	}
+
+	ret = afe_q6_interface_prepare();
+	if (ret != 0) {
+		pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+		return ret;
+	}
+
+	if ((index >= 0) && (index < AFE_MAX_PORTS)) {
+		this_afe.afe_sample_rates[index] = rate;
+
+		if (this_afe.rt_cb)
+			this_afe.dev_acdb_id[index] = this_afe.rt_cb(port_id);
+	}
+
+	/* Also send the topology id here if multiple ports: */
+	port_index = afe_get_port_index(port_id);
+	if (!(this_afe.afe_cal_mode[port_index] == AFE_CAL_MODE_NONE) &&
+	    num_groups > 1) {
+		/* One time call: only for first time */
+		afe_send_custom_topology();
+		afe_send_port_topology_id(port_id);
+		afe_send_cal(port_id);
+		afe_send_hw_delay(port_id, rate);
+	}
+
+	/* Start SW MAD module */
+	mad_type = afe_port_get_mad_type(port_id);
+	pr_debug("%s: port_id 0x%x, mad_type %d\n", __func__, port_id,
+		 mad_type);
+	if (mad_type != MAD_HW_NONE && mad_type != MAD_SW_AUDIO) {
+		if (!afe_has_config(AFE_CDC_REGISTERS_CONFIG) ||
+			!afe_has_config(AFE_SLIMBUS_SLAVE_CONFIG)) {
+				pr_err("%s: AFE isn't configured yet for\n"
+					   "HW MAD try Again\n", __func__);
+				ret = -EAGAIN;
+				goto fail_cmd;
+		}
+		ret = afe_turn_onoff_hw_mad(mad_type, true);
+		if (ret) {
+			pr_err("%s: afe_turn_onoff_hw_mad failed %d\n",
+			       __func__, ret);
+			goto fail_cmd;
+		}
+	}
+
+	memset(&config, 0, sizeof(config));
+	config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+			APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	config.hdr.pkt_size = sizeof(config);
+	config.hdr.src_port = 0;
+	config.hdr.dest_port = 0;
+	config.hdr.token = index;
+	config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+	config.param.port_id = q6audio_get_port_id(port_id);
+	config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
+		sizeof(config.param);
+	config.param.payload_address_lsw = 0x00;
+	config.param.payload_address_msw = 0x00;
+	config.param.mem_map_handle = 0x00;
+	config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+	config.pdata.param_id = AFE_PARAM_ID_TDM_CONFIG;
+	config.pdata.param_size = sizeof(config.port);
+	config.port.tdm = tdm_port->tdm;
+
+	ret = afe_apr_send_pkt(&config, &this_afe.wait[index]);
+	if (ret) {
+		pr_err("%s: AFE enable for port 0x%x failed ret = %d\n",
+				__func__, port_id, ret);
+		goto fail_cmd;
+	}
+
+	port_index = afe_get_port_index(port_id);
+	if ((port_index >= 0) && (port_index < AFE_MAX_PORTS)) {
+		this_afe.afe_sample_rates[port_index] = rate;
+	} else {
+		pr_err("%s: Invalid port index %d\n", __func__, port_index);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	/* slot mapping is not need if there is only one group */
+	if (num_groups > 1) {
+		ret = afe_send_slot_mapping_cfg(&tdm_port->slot_mapping,
+						port_id);
+		if (ret < 0) {
+			pr_err("%s: afe send failed %d\n", __func__, ret);
+			goto fail_cmd;
+		}
+	}
+
+	if (tdm_port->custom_tdm_header.header_type) {
+		ret = afe_send_custom_tdm_header_cfg(
+			&tdm_port->custom_tdm_header, port_id);
+		if (ret < 0) {
+			pr_err("%s: afe send failed %d\n", __func__, ret);
+			goto fail_cmd;
+		}
+	}
+
+	ret = afe_send_cmd_port_start(port_id);
+
+fail_cmd:
+	return ret;
+}
+
+void afe_set_cal_mode(u16 port_id, enum afe_cal_mode afe_cal_mode)
+{
+	uint16_t port_index;
+
+	port_index = afe_get_port_index(port_id);
+	this_afe.afe_cal_mode[port_index] = afe_cal_mode;
+}
+
+void afe_set_routing_callback(routing_cb cb)
+{
+	this_afe.rt_cb = cb;
+}
+
+int afe_port_send_usb_dev_param(u16 port_id, union afe_port_config *afe_config)
+{
+	struct afe_usb_audio_dev_param_command config;
+	int ret = 0, index = 0;
+
+	if (!afe_config) {
+		pr_err("%s: Error, no configuration data\n", __func__);
+		ret = -EINVAL;
+		goto exit;
+	}
+	index = q6audio_get_port_index(port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid! for port ID 0x%x\n",
+				__func__, index, port_id);
+		ret = -EINVAL;
+		goto exit;
+	}
+	memset(&config, 0, sizeof(config));
+	config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	config.hdr.pkt_size = sizeof(config);
+	config.hdr.src_port = 0;
+	config.hdr.dest_port = 0;
+	config.hdr.token = index;
+	config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+	config.param.port_id = q6audio_get_port_id(port_id);
+	config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
+				    sizeof(config.param);
+	config.param.payload_address_lsw = 0x00;
+	config.param.payload_address_msw = 0x00;
+	config.param.mem_map_handle = 0x00;
+	config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+	config.pdata.param_id = AFE_PARAM_ID_USB_AUDIO_DEV_PARAMS;
+	config.pdata.param_size = sizeof(config.usb_dev);
+	config.usb_dev.cfg_minor_version =
+				AFE_API_MINIOR_VERSION_USB_AUDIO_CONFIG;
+	config.usb_dev.dev_token = afe_config->usb_audio.dev_token;
+
+	ret = afe_apr_send_pkt(&config, &this_afe.wait[index]);
+	if (ret) {
+		pr_err("%s: AFE device param cmd failed %d\n",
+			__func__, ret);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	config.pdata.param_id = AFE_PARAM_ID_USB_AUDIO_DEV_LPCM_FMT;
+	config.pdata.param_size = sizeof(config.lpcm_fmt);
+	config.lpcm_fmt.cfg_minor_version =
+		AFE_API_MINIOR_VERSION_USB_AUDIO_CONFIG;
+	config.lpcm_fmt.endian = afe_config->usb_audio.endian;
+
+	ret = afe_apr_send_pkt(&config, &this_afe.wait[index]);
+	if (ret) {
+		pr_err("%s: AFE device param cmd LPCM_FMT failed %d\n",
+			__func__, ret);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+exit:
+	return ret;
+}
+
+static int q6afe_send_enc_config(u16 port_id,
+				 union afe_enc_config_data *cfg, u32 format,
+				 union afe_port_config afe_config,
+				 u16 afe_in_channels, u16 afe_in_bit_width)
+{
+	struct afe_audioif_config_command config;
+	int index;
+	int ret;
+	int payload_size = sizeof(config) - sizeof(struct apr_hdr) -
+				sizeof(config.param) - sizeof(config.port);
+
+	pr_debug("%s:update DSP for enc format = %d\n", __func__, format);
+	if (format != ASM_MEDIA_FMT_SBC && format != ASM_MEDIA_FMT_AAC_V2 &&
+	    format != ASM_MEDIA_FMT_APTX && format != ASM_MEDIA_FMT_APTX_HD) {
+		pr_err("%s:Unsuppported format Ignore AFE config\n", __func__);
+		return 0;
+	}
+	memset(&config, 0, sizeof(config));
+	index = q6audio_get_port_index(port_id);
+	if (index < 0) {
+		pr_err("%s: Invalid index number: %d\n", __func__, index);
+		return -EINVAL;
+	}
+
+	config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	config.hdr.pkt_size = sizeof(config);
+	config.hdr.src_port = 0;
+	config.hdr.dest_port = 0;
+	config.hdr.token = index;
+
+	config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+	config.param.port_id = q6audio_get_port_id(port_id);
+	config.param.payload_size = payload_size + sizeof(config.port.enc_fmt);
+	config.param.payload_address_lsw = 0x00;
+	config.param.payload_address_msw = 0x00;
+	config.param.mem_map_handle = 0x00;
+	config.pdata.module_id = AFE_MODULE_ID_ENCODER;
+	config.pdata.param_id = AFE_ENCODER_PARAM_ID_ENC_FMT_ID;
+	config.pdata.param_size = sizeof(config.port.enc_fmt);
+	config.port.enc_fmt.fmt_id = format;
+	pr_debug("%s:sending AFE_ENCODER_PARAM_ID_ENC_FMT_ID payload: %d\n",
+			__func__, config.param.payload_size);
+	ret = afe_apr_send_pkt(&config, &this_afe.wait[index]);
+	if (ret) {
+		pr_err("%s:unable to send AFE_ENCODER_PARAM_ID_ENC_FMT_ID",
+				__func__);
+		goto exit;
+	}
+
+	config.param.payload_size = payload_size
+					+ sizeof(config.port.enc_blk_param);
+	pr_debug("%s:send AFE_ENCODER_PARAM_ID_ENC_CFG_BLK to DSP payload:%d\n",
+				__func__, config.param.payload_size);
+	config.pdata.param_id = AFE_ENCODER_PARAM_ID_ENC_CFG_BLK;
+	config.pdata.param_size = sizeof(config.port.enc_blk_param);
+	config.port.enc_blk_param.enc_cfg_blk_size =
+			sizeof(config.port.enc_blk_param.enc_blk_config);
+	config.port.enc_blk_param.enc_blk_config = *cfg;
+	ret = afe_apr_send_pkt(&config, &this_afe.wait[index]);
+	if (ret) {
+		pr_err("%s: AFE_ENCODER_PARAM_ID_ENC_CFG_BLK for port 0x%x failed %d\n",
+			__func__, port_id, ret);
+		goto exit;
+	}
+
+	config.param.payload_size =
+			payload_size + sizeof(config.port.enc_pkt_id_param);
+	pr_debug("%s:sending AFE_ENCODER_PARAM_ID_PACKETIZER to DSP payload = %d",
+					__func__, config.param.payload_size);
+	config.pdata.param_id = AFE_ENCODER_PARAM_ID_PACKETIZER_ID;
+	config.pdata.param_size = sizeof(config.port.enc_pkt_id_param);
+	config.port.enc_pkt_id_param.enc_packetizer_id =
+					AFE_MODULE_ID_PACKETIZER_COP;
+	ret = afe_apr_send_pkt(&config, &this_afe.wait[index]);
+	if (ret) {
+		pr_err("%s: AFE_ENCODER_PARAM_ID_PACKETIZER for port 0x%x failed %d\n",
+			__func__, port_id, ret);
+		goto exit;
+	}
+
+	config.param.payload_size =
+			payload_size + sizeof(config.port.media_type);
+	config.pdata.param_size = sizeof(config.port.media_type);
+
+	pr_debug("%s:Sending AFE_API_VERSION_PORT_MEDIA_TYPE to DSP", __func__);
+	config.pdata.module_id = AFE_MODULE_PORT;
+	config.pdata.param_id = AFE_PARAM_ID_PORT_MEDIA_TYPE;
+	config.port.media_type.minor_version = AFE_API_VERSION_PORT_MEDIA_TYPE;
+	config.port.media_type.sample_rate = afe_config.slim_sch.sample_rate;
+	if (afe_in_bit_width)
+		config.port.media_type.bit_width = afe_in_bit_width;
+	else
+		config.port.media_type.bit_width =
+					afe_config.slim_sch.bit_width;
+
+	if (afe_in_channels)
+		config.port.media_type.num_channels = afe_in_channels;
+	else
+		config.port.media_type.num_channels =
+					afe_config.slim_sch.num_channels;
+	config.port.media_type.data_format = AFE_PORT_DATA_FORMAT_PCM;
+	config.port.media_type.reserved = 0;
+
+	ret = afe_apr_send_pkt(&config, &this_afe.wait[index]);
+	if (ret) {
+		pr_err("%s: AFE_API_VERSION_PORT_MEDIA_TYPE for port 0x%x failed %d\n",
+			__func__, port_id, ret);
+		goto exit;
+	}
+
+exit:
+	return ret;
+}
+
+static int __afe_port_start(u16 port_id, union afe_port_config *afe_config,
+			    u32 rate, u16 afe_in_channels, u16 afe_in_bit_width,
+			    union afe_enc_config_data *cfg, u32 enc_format)
+{
+	struct afe_audioif_config_command config;
+	int ret = 0;
+	int cfg_type;
+	int index = 0;
+	enum afe_mad_type mad_type;
+	uint16_t port_index;
+
+	if (!afe_config) {
+		pr_err("%s: Error, no configuration data\n", __func__);
+		ret = -EINVAL;
+		return ret;
+	}
+
+	if ((port_id == RT_PROXY_DAI_001_RX) ||
+		(port_id == RT_PROXY_DAI_002_TX)) {
+		pr_debug("%s: before incrementing pcm_afe_instance %d"\
+			" port_id 0x%x\n", __func__,
+			pcm_afe_instance[port_id & 0x1], port_id);
+		port_id = VIRTUAL_ID_TO_PORTID(port_id);
+		pcm_afe_instance[port_id & 0x1]++;
+		return 0;
+	}
+	if ((port_id == RT_PROXY_DAI_002_RX) ||
+			(port_id == RT_PROXY_DAI_001_TX)) {
+		pr_debug("%s: before incrementing proxy_afe_instance %d"\
+			" port_id 0x%x\n", __func__,
+			proxy_afe_instance[port_id & 0x1], port_id);
+
+		if (!afe_close_done[port_id & 0x1]) {
+			/*close pcm dai corresponding to the proxy dai*/
+			afe_close(port_id - 0x10);
+			pcm_afe_instance[port_id & 0x1]++;
+			pr_debug("%s: reconfigure afe port again\n", __func__);
+		}
+		proxy_afe_instance[port_id & 0x1]++;
+		afe_close_done[port_id & 0x1] = false;
+		port_id = VIRTUAL_ID_TO_PORTID(port_id);
+	}
+
+	pr_debug("%s: port id: 0x%x\n", __func__, port_id);
+
+	index = q6audio_get_port_index(port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		return -EINVAL;
+	}
+	ret = q6audio_validate_port(port_id);
+	if (ret < 0) {
+		pr_err("%s: port id: 0x%x ret %d\n", __func__, port_id, ret);
+		return -EINVAL;
+	}
+
+	ret = afe_q6_interface_prepare();
+	if (ret != 0) {
+		pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+		return ret;
+	}
+
+	if ((index >= 0) && (index < AFE_MAX_PORTS)) {
+		this_afe.afe_sample_rates[index] = rate;
+
+		if (this_afe.rt_cb)
+			this_afe.dev_acdb_id[index] = this_afe.rt_cb(port_id);
+	}
+
+	mutex_lock(&this_afe.afe_cmd_lock);
+	/* Also send the topology id here: */
+	port_index = afe_get_port_index(port_id);
+	if (!(this_afe.afe_cal_mode[port_index] == AFE_CAL_MODE_NONE)) {
+		/* One time call: only for first time */
+		afe_send_custom_topology();
+		afe_send_port_topology_id(port_id);
+		afe_send_cal(port_id);
+		afe_send_hw_delay(port_id, rate);
+	}
+
+	/* Start SW MAD module */
+	mad_type = afe_port_get_mad_type(port_id);
+	pr_debug("%s: port_id 0x%x, mad_type %d\n", __func__, port_id,
+		 mad_type);
+	if (mad_type != MAD_HW_NONE && mad_type != MAD_SW_AUDIO) {
+		if (!afe_has_config(AFE_CDC_REGISTERS_CONFIG) ||
+			!afe_has_config(AFE_SLIMBUS_SLAVE_CONFIG)) {
+				pr_err("%s: AFE isn't configured yet for\n"
+					   "HW MAD try Again\n", __func__);
+				ret = -EAGAIN;
+				goto fail_cmd;
+		}
+		ret = afe_turn_onoff_hw_mad(mad_type, true);
+		if (ret) {
+			pr_err("%s: afe_turn_onoff_hw_mad failed %d\n",
+			       __func__, ret);
+			goto fail_cmd;
+		}
+	}
+
+	if ((this_afe.aanc_info.aanc_active) &&
+	    (this_afe.aanc_info.aanc_tx_port == port_id)) {
+		this_afe.aanc_info.aanc_tx_port_sample_rate = rate;
+		port_index =
+			afe_get_port_index(this_afe.aanc_info.aanc_rx_port);
+		if ((port_index >= 0) && (port_index < AFE_MAX_PORTS)) {
+			this_afe.aanc_info.aanc_rx_port_sample_rate =
+				this_afe.afe_sample_rates[port_index];
+		} else {
+			pr_err("%s: Invalid port index %d\n",
+				__func__, port_index);
+			ret = -EINVAL;
+			goto fail_cmd;
+		}
+		ret = afe_aanc_start(this_afe.aanc_info.aanc_tx_port,
+				this_afe.aanc_info.aanc_rx_port);
+		pr_debug("%s: afe_aanc_start ret %d\n", __func__, ret);
+	}
+
+	if ((port_id == AFE_PORT_ID_USB_RX) ||
+	    (port_id == AFE_PORT_ID_USB_TX)) {
+		ret = afe_port_send_usb_dev_param(port_id, afe_config);
+		if (ret) {
+			pr_err("%s: AFE device param for port 0x%x failed %d\n",
+				   __func__, port_id, ret);
+			ret = -EINVAL;
+			goto fail_cmd;
+		}
+	}
+
+	config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	config.hdr.pkt_size = sizeof(config);
+	config.hdr.src_port = 0;
+	config.hdr.dest_port = 0;
+	config.hdr.token = index;
+
+	switch (port_id) {
+	case AFE_PORT_ID_PRIMARY_PCM_RX:
+	case AFE_PORT_ID_PRIMARY_PCM_TX:
+	case AFE_PORT_ID_SECONDARY_PCM_RX:
+	case AFE_PORT_ID_SECONDARY_PCM_TX:
+	case AFE_PORT_ID_TERTIARY_PCM_RX:
+	case AFE_PORT_ID_TERTIARY_PCM_TX:
+	case AFE_PORT_ID_QUATERNARY_PCM_RX:
+	case AFE_PORT_ID_QUATERNARY_PCM_TX:
+		cfg_type = AFE_PARAM_ID_PCM_CONFIG;
+		break;
+	case PRIMARY_I2S_RX:
+	case PRIMARY_I2S_TX:
+	case SECONDARY_I2S_RX:
+	case SECONDARY_I2S_TX:
+	case MI2S_RX:
+	case MI2S_TX:
+	case AFE_PORT_ID_PRIMARY_MI2S_RX:
+	case AFE_PORT_ID_PRIMARY_MI2S_TX:
+	case AFE_PORT_ID_SECONDARY_MI2S_RX:
+	case AFE_PORT_ID_SECONDARY_MI2S_RX_SD1:
+	case AFE_PORT_ID_SECONDARY_MI2S_TX:
+	case AFE_PORT_ID_TERTIARY_MI2S_RX:
+	case AFE_PORT_ID_TERTIARY_MI2S_TX:
+	case AFE_PORT_ID_QUATERNARY_MI2S_RX:
+	case AFE_PORT_ID_QUATERNARY_MI2S_TX:
+	case AFE_PORT_ID_QUINARY_MI2S_RX:
+	case AFE_PORT_ID_QUINARY_MI2S_TX:
+	case AFE_PORT_ID_SENARY_MI2S_TX:
+	case AFE_PORT_ID_INT0_MI2S_RX:
+	case AFE_PORT_ID_INT0_MI2S_TX:
+	case AFE_PORT_ID_INT1_MI2S_RX:
+	case AFE_PORT_ID_INT1_MI2S_TX:
+	case AFE_PORT_ID_INT2_MI2S_RX:
+	case AFE_PORT_ID_INT2_MI2S_TX:
+	case AFE_PORT_ID_INT3_MI2S_RX:
+	case AFE_PORT_ID_INT3_MI2S_TX:
+	case AFE_PORT_ID_INT4_MI2S_RX:
+	case AFE_PORT_ID_INT4_MI2S_TX:
+	case AFE_PORT_ID_INT5_MI2S_RX:
+	case AFE_PORT_ID_INT5_MI2S_TX:
+	case AFE_PORT_ID_INT6_MI2S_RX:
+	case AFE_PORT_ID_INT6_MI2S_TX:
+		cfg_type = AFE_PARAM_ID_I2S_CONFIG;
+		break;
+	case HDMI_RX:
+	case DISPLAY_PORT_RX:
+		cfg_type = AFE_PARAM_ID_HDMI_CONFIG;
+		break;
+	case VOICE_PLAYBACK_TX:
+	case VOICE2_PLAYBACK_TX:
+	case VOICE_RECORD_RX:
+	case VOICE_RECORD_TX:
+		cfg_type = AFE_PARAM_ID_PSEUDO_PORT_CONFIG;
+		break;
+	case SLIMBUS_0_RX:
+	case SLIMBUS_0_TX:
+	case SLIMBUS_1_RX:
+	case SLIMBUS_1_TX:
+	case SLIMBUS_2_RX:
+	case SLIMBUS_2_TX:
+	case SLIMBUS_3_RX:
+	case SLIMBUS_3_TX:
+	case SLIMBUS_4_RX:
+	case SLIMBUS_4_TX:
+	case SLIMBUS_5_RX:
+	case SLIMBUS_5_TX:
+	case SLIMBUS_6_RX:
+	case SLIMBUS_6_TX:
+	case SLIMBUS_7_RX:
+	case SLIMBUS_7_TX:
+	case SLIMBUS_8_RX:
+	case SLIMBUS_8_TX:
+		cfg_type = AFE_PARAM_ID_SLIMBUS_CONFIG;
+		break;
+	case AFE_PORT_ID_USB_RX:
+	case AFE_PORT_ID_USB_TX:
+		cfg_type = AFE_PARAM_ID_USB_AUDIO_CONFIG;
+		break;
+	case RT_PROXY_PORT_001_RX:
+	case RT_PROXY_PORT_001_TX:
+		cfg_type = AFE_PARAM_ID_RT_PROXY_CONFIG;
+		break;
+	case INT_BT_SCO_RX:
+	case INT_BT_A2DP_RX:
+	case INT_BT_SCO_TX:
+	case INT_FM_RX:
+	case INT_FM_TX:
+		cfg_type = AFE_PARAM_ID_INTERNAL_BT_FM_CONFIG;
+		break;
+	default:
+		pr_err("%s: Invalid port id 0x%x\n", __func__, port_id);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+	config.param.port_id = q6audio_get_port_id(port_id);
+	config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
+				    sizeof(config.param);
+	config.param.payload_address_lsw = 0x00;
+	config.param.payload_address_msw = 0x00;
+	config.param.mem_map_handle = 0x00;
+	config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+	config.pdata.param_id = cfg_type;
+	config.pdata.param_size = sizeof(config.port);
+
+	config.port = *afe_config;
+	if ((enc_format != ASM_MEDIA_FMT_NONE) &&
+	    (cfg_type == AFE_PARAM_ID_SLIMBUS_CONFIG)) {
+		config.port.slim_sch.data_format =
+				AFE_SB_DATA_FORMAT_GENERIC_COMPRESSED;
+	}
+	ret = afe_apr_send_pkt(&config, &this_afe.wait[index]);
+	if (ret) {
+		pr_err("%s: AFE enable for port 0x%x failed %d\n",
+			__func__, port_id, ret);
+		goto fail_cmd;
+	}
+
+	if ((enc_format != ASM_MEDIA_FMT_NONE) &&
+	    (cfg_type == AFE_PARAM_ID_SLIMBUS_CONFIG)) {
+		pr_debug("%s: Found AFE encoder support for SLIMBUS enc_format = %d\n",
+					__func__, enc_format);
+		ret = q6afe_send_enc_config(port_id, cfg, enc_format,
+					    *afe_config, afe_in_channels,
+					    afe_in_bit_width);
+		if (ret) {
+			pr_err("%s: AFE encoder config for port 0x%x failed %d\n",
+				__func__, port_id, ret);
+			goto fail_cmd;
+		}
+	}
+
+	port_index = afe_get_port_index(port_id);
+	if ((port_index >= 0) && (port_index < AFE_MAX_PORTS)) {
+		/*
+		 * If afe_port_start() for tx port called before
+		 * rx port, then aanc rx sample rate is zero. So,
+		 * AANC state machine in AFE will not get triggered.
+		 * Make sure to check whether aanc is active during
+		 * afe_port_start() for rx port and if aanc rx
+		 * sample rate is zero, call afe_aanc_start to configure
+		 * aanc with valid sample rates.
+		 */
+		if (this_afe.aanc_info.aanc_active &&
+		    !this_afe.aanc_info.aanc_rx_port_sample_rate) {
+			this_afe.aanc_info.aanc_rx_port_sample_rate =
+				this_afe.afe_sample_rates[port_index];
+			ret = afe_aanc_start(this_afe.aanc_info.aanc_tx_port,
+					this_afe.aanc_info.aanc_rx_port);
+			pr_debug("%s: afe_aanc_start ret %d\n", __func__, ret);
+		}
+	} else {
+		pr_err("%s: Invalid port index %d\n", __func__, port_index);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	ret = afe_send_cmd_port_start(port_id);
+
+fail_cmd:
+	mutex_unlock(&this_afe.afe_cmd_lock);
+	return ret;
+}
+
+/**
+ * afe_port_start - to configure AFE session with
+ * specified port configuration
+ *
+ * @port_id: AFE port id number
+ * @afe_config: port configutation
+ * @rate: sampling rate of port
+ *
+ * Returns 0 on success or error value on port start failure.
+ */
+int afe_port_start(u16 port_id, union afe_port_config *afe_config,
+		   u32 rate)
+{
+	return __afe_port_start(port_id, afe_config, rate,
+				0, 0, NULL, ASM_MEDIA_FMT_NONE);
+}
+EXPORT_SYMBOL(afe_port_start);
+
+/**
+ * afe_port_start_v2 - to configure AFE session with
+ * specified port configuration and encoder params
+ *
+ * @port_id: AFE port id number
+ * @afe_config: port configutation
+ * @rate: sampling rate of port
+ * @cfg: AFE encoder configuration information to setup encoder
+ * @afe_in_channels: AFE input channel configuration, this needs
+ *  update only if input channel is differ from AFE output
+ *
+ * Returns 0 on success or error value on port start failure.
+ */
+int afe_port_start_v2(u16 port_id, union afe_port_config *afe_config,
+		      u32 rate, u16 afe_in_channels, u16 afe_in_bit_width,
+		      struct afe_enc_config *enc_cfg)
+{
+	return __afe_port_start(port_id, afe_config, rate,
+				afe_in_channels, afe_in_bit_width,
+				&enc_cfg->data, enc_cfg->format);
+}
+EXPORT_SYMBOL(afe_port_start_v2);
+
+int afe_get_port_index(u16 port_id)
+{
+	switch (port_id) {
+	case PRIMARY_I2S_RX: return IDX_PRIMARY_I2S_RX;
+	case PRIMARY_I2S_TX: return IDX_PRIMARY_I2S_TX;
+	case AFE_PORT_ID_PRIMARY_PCM_RX:
+		return IDX_AFE_PORT_ID_PRIMARY_PCM_RX;
+	case AFE_PORT_ID_PRIMARY_PCM_TX:
+		return IDX_AFE_PORT_ID_PRIMARY_PCM_TX;
+	case AFE_PORT_ID_SECONDARY_PCM_RX:
+		return IDX_AFE_PORT_ID_SECONDARY_PCM_RX;
+	case AFE_PORT_ID_SECONDARY_PCM_TX:
+		return IDX_AFE_PORT_ID_SECONDARY_PCM_TX;
+	case AFE_PORT_ID_TERTIARY_PCM_RX:
+		return IDX_AFE_PORT_ID_TERTIARY_PCM_RX;
+	case AFE_PORT_ID_TERTIARY_PCM_TX:
+		return IDX_AFE_PORT_ID_TERTIARY_PCM_TX;
+	case AFE_PORT_ID_QUATERNARY_PCM_RX:
+		return IDX_AFE_PORT_ID_QUATERNARY_PCM_RX;
+	case AFE_PORT_ID_QUATERNARY_PCM_TX:
+		return IDX_AFE_PORT_ID_QUATERNARY_PCM_TX;
+	case SECONDARY_I2S_RX: return IDX_SECONDARY_I2S_RX;
+	case SECONDARY_I2S_TX: return IDX_SECONDARY_I2S_TX;
+	case MI2S_RX: return IDX_MI2S_RX;
+	case MI2S_TX: return IDX_MI2S_TX;
+	case HDMI_RX: return IDX_HDMI_RX;
+	case DISPLAY_PORT_RX: return IDX_DISPLAY_PORT_RX;
+	case AFE_PORT_ID_SPDIF_RX: return IDX_SPDIF_RX;
+	case RSVD_2: return IDX_RSVD_2;
+	case RSVD_3: return IDX_RSVD_3;
+	case DIGI_MIC_TX: return IDX_DIGI_MIC_TX;
+	case VOICE_RECORD_RX: return IDX_VOICE_RECORD_RX;
+	case VOICE_RECORD_TX: return IDX_VOICE_RECORD_TX;
+	case VOICE_PLAYBACK_TX: return IDX_VOICE_PLAYBACK_TX;
+	case VOICE2_PLAYBACK_TX: return IDX_VOICE2_PLAYBACK_TX;
+	case SLIMBUS_0_RX: return IDX_SLIMBUS_0_RX;
+	case SLIMBUS_0_TX: return IDX_SLIMBUS_0_TX;
+	case SLIMBUS_1_RX: return IDX_SLIMBUS_1_RX;
+	case SLIMBUS_1_TX: return IDX_SLIMBUS_1_TX;
+	case SLIMBUS_2_RX: return IDX_SLIMBUS_2_RX;
+	case SLIMBUS_2_TX: return IDX_SLIMBUS_2_TX;
+	case SLIMBUS_3_RX: return IDX_SLIMBUS_3_RX;
+	case SLIMBUS_3_TX: return IDX_SLIMBUS_3_TX;
+	case INT_BT_SCO_RX: return IDX_INT_BT_SCO_RX;
+	case INT_BT_SCO_TX: return IDX_INT_BT_SCO_TX;
+	case INT_BT_A2DP_RX: return IDX_INT_BT_A2DP_RX;
+	case INT_FM_RX: return IDX_INT_FM_RX;
+	case INT_FM_TX: return IDX_INT_FM_TX;
+	case RT_PROXY_PORT_001_RX: return IDX_RT_PROXY_PORT_001_RX;
+	case RT_PROXY_PORT_001_TX: return IDX_RT_PROXY_PORT_001_TX;
+	case SLIMBUS_4_RX: return IDX_SLIMBUS_4_RX;
+	case SLIMBUS_4_TX: return IDX_SLIMBUS_4_TX;
+	case SLIMBUS_5_RX: return IDX_SLIMBUS_5_RX;
+	case SLIMBUS_5_TX: return IDX_SLIMBUS_5_TX;
+	case SLIMBUS_6_RX: return IDX_SLIMBUS_6_RX;
+	case SLIMBUS_6_TX: return IDX_SLIMBUS_6_TX;
+	case SLIMBUS_7_RX: return IDX_SLIMBUS_7_RX;
+	case SLIMBUS_7_TX: return IDX_SLIMBUS_7_TX;
+	case SLIMBUS_8_RX: return IDX_SLIMBUS_8_RX;
+	case SLIMBUS_8_TX: return IDX_SLIMBUS_8_TX;
+	case AFE_PORT_ID_USB_RX: return IDX_AFE_PORT_ID_USB_RX;
+	case AFE_PORT_ID_USB_TX: return IDX_AFE_PORT_ID_USB_TX;
+	case AFE_PORT_ID_PRIMARY_MI2S_RX:
+		return IDX_AFE_PORT_ID_PRIMARY_MI2S_RX;
+	case AFE_PORT_ID_PRIMARY_MI2S_TX:
+		return IDX_AFE_PORT_ID_PRIMARY_MI2S_TX;
+	case AFE_PORT_ID_QUATERNARY_MI2S_RX:
+		return IDX_AFE_PORT_ID_QUATERNARY_MI2S_RX;
+	case AFE_PORT_ID_QUATERNARY_MI2S_TX:
+		return IDX_AFE_PORT_ID_QUATERNARY_MI2S_TX;
+	case AFE_PORT_ID_SECONDARY_MI2S_RX:
+		return IDX_AFE_PORT_ID_SECONDARY_MI2S_RX;
+	case AFE_PORT_ID_SECONDARY_MI2S_TX:
+		return IDX_AFE_PORT_ID_SECONDARY_MI2S_TX;
+	case AFE_PORT_ID_TERTIARY_MI2S_RX:
+		 return IDX_AFE_PORT_ID_TERTIARY_MI2S_RX;
+	case AFE_PORT_ID_TERTIARY_MI2S_TX:
+		 return IDX_AFE_PORT_ID_TERTIARY_MI2S_TX;
+	case AFE_PORT_ID_SECONDARY_MI2S_RX_SD1:
+		return IDX_AFE_PORT_ID_SECONDARY_MI2S_RX_SD1;
+	case AFE_PORT_ID_QUINARY_MI2S_RX:
+		return IDX_AFE_PORT_ID_QUINARY_MI2S_RX;
+	case AFE_PORT_ID_QUINARY_MI2S_TX:
+		return IDX_AFE_PORT_ID_QUINARY_MI2S_TX;
+	case AFE_PORT_ID_SENARY_MI2S_TX:
+		 return IDX_AFE_PORT_ID_SENARY_MI2S_TX;
+	case AFE_PORT_ID_PRIMARY_TDM_RX:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_RX_0;
+	case AFE_PORT_ID_PRIMARY_TDM_TX:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_TX_0;
+	case AFE_PORT_ID_PRIMARY_TDM_RX_1:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_RX_1;
+	case AFE_PORT_ID_PRIMARY_TDM_TX_1:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_TX_1;
+	case AFE_PORT_ID_PRIMARY_TDM_RX_2:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_RX_2;
+	case AFE_PORT_ID_PRIMARY_TDM_TX_2:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_TX_2;
+	case AFE_PORT_ID_PRIMARY_TDM_RX_3:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_RX_3;
+	case AFE_PORT_ID_PRIMARY_TDM_TX_3:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_TX_3;
+	case AFE_PORT_ID_PRIMARY_TDM_RX_4:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_RX_4;
+	case AFE_PORT_ID_PRIMARY_TDM_TX_4:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_TX_4;
+	case AFE_PORT_ID_PRIMARY_TDM_RX_5:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_RX_5;
+	case AFE_PORT_ID_PRIMARY_TDM_TX_5:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_TX_5;
+	case AFE_PORT_ID_PRIMARY_TDM_RX_6:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_RX_6;
+	case AFE_PORT_ID_PRIMARY_TDM_TX_6:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_TX_6;
+	case AFE_PORT_ID_PRIMARY_TDM_RX_7:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_RX_7;
+	case AFE_PORT_ID_PRIMARY_TDM_TX_7:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_TX_7;
+	case AFE_PORT_ID_SECONDARY_TDM_RX:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_RX_0;
+	case AFE_PORT_ID_SECONDARY_TDM_TX:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_TX_0;
+	case AFE_PORT_ID_SECONDARY_TDM_RX_1:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_RX_1;
+	case AFE_PORT_ID_SECONDARY_TDM_TX_1:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_TX_1;
+	case AFE_PORT_ID_SECONDARY_TDM_RX_2:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_RX_2;
+	case AFE_PORT_ID_SECONDARY_TDM_TX_2:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_TX_2;
+	case AFE_PORT_ID_SECONDARY_TDM_RX_3:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_RX_3;
+	case AFE_PORT_ID_SECONDARY_TDM_TX_3:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_TX_3;
+	case AFE_PORT_ID_SECONDARY_TDM_RX_4:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_RX_4;
+	case AFE_PORT_ID_SECONDARY_TDM_TX_4:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_TX_4;
+	case AFE_PORT_ID_SECONDARY_TDM_RX_5:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_RX_5;
+	case AFE_PORT_ID_SECONDARY_TDM_TX_5:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_TX_5;
+	case AFE_PORT_ID_SECONDARY_TDM_RX_6:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_RX_6;
+	case AFE_PORT_ID_SECONDARY_TDM_TX_6:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_TX_6;
+	case AFE_PORT_ID_SECONDARY_TDM_RX_7:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_RX_7;
+	case AFE_PORT_ID_SECONDARY_TDM_TX_7:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_TX_7;
+	case AFE_PORT_ID_TERTIARY_TDM_RX:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_RX_0;
+	case AFE_PORT_ID_TERTIARY_TDM_TX:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_TX_0;
+	case AFE_PORT_ID_TERTIARY_TDM_RX_1:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_RX_1;
+	case AFE_PORT_ID_TERTIARY_TDM_TX_1:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_TX_1;
+	case AFE_PORT_ID_TERTIARY_TDM_RX_2:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_RX_2;
+	case AFE_PORT_ID_TERTIARY_TDM_TX_2:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_TX_2;
+	case AFE_PORT_ID_TERTIARY_TDM_RX_3:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_RX_3;
+	case AFE_PORT_ID_TERTIARY_TDM_TX_3:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_TX_3;
+	case AFE_PORT_ID_TERTIARY_TDM_RX_4:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_RX_4;
+	case AFE_PORT_ID_TERTIARY_TDM_TX_4:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_TX_4;
+	case AFE_PORT_ID_TERTIARY_TDM_RX_5:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_RX_5;
+	case AFE_PORT_ID_TERTIARY_TDM_TX_5:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_TX_5;
+	case AFE_PORT_ID_TERTIARY_TDM_RX_6:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_RX_6;
+	case AFE_PORT_ID_TERTIARY_TDM_TX_6:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_TX_6;
+	case AFE_PORT_ID_TERTIARY_TDM_RX_7:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_RX_7;
+	case AFE_PORT_ID_TERTIARY_TDM_TX_7:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_TX_7;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_RX_0;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_TX_0;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_1:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_RX_1;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_1:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_TX_1;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_2:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_RX_2;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_2:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_TX_2;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_3:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_RX_3;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_3:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_TX_3;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_4:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_RX_4;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_4:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_TX_4;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_5:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_RX_5;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_5:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_TX_5;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_6:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_RX_6;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_6:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_TX_6;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_7:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_RX_7;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_7:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_TX_7;
+	case AFE_PORT_ID_INT0_MI2S_RX:
+		return IDX_AFE_PORT_ID_INT0_MI2S_RX;
+	case AFE_PORT_ID_INT0_MI2S_TX:
+		return IDX_AFE_PORT_ID_INT0_MI2S_TX;
+	case AFE_PORT_ID_INT1_MI2S_RX:
+		return IDX_AFE_PORT_ID_INT1_MI2S_RX;
+	case AFE_PORT_ID_INT1_MI2S_TX:
+		return IDX_AFE_PORT_ID_INT1_MI2S_TX;
+	case AFE_PORT_ID_INT2_MI2S_RX:
+		return IDX_AFE_PORT_ID_INT2_MI2S_RX;
+	case AFE_PORT_ID_INT2_MI2S_TX:
+		return IDX_AFE_PORT_ID_INT2_MI2S_TX;
+	case AFE_PORT_ID_INT3_MI2S_RX:
+		return IDX_AFE_PORT_ID_INT3_MI2S_RX;
+	case AFE_PORT_ID_INT3_MI2S_TX:
+		return IDX_AFE_PORT_ID_INT3_MI2S_TX;
+	case AFE_PORT_ID_INT4_MI2S_RX:
+		return IDX_AFE_PORT_ID_INT4_MI2S_RX;
+	case AFE_PORT_ID_INT4_MI2S_TX:
+		return IDX_AFE_PORT_ID_INT4_MI2S_TX;
+	case AFE_PORT_ID_INT5_MI2S_RX:
+		return IDX_AFE_PORT_ID_INT5_MI2S_RX;
+	case AFE_PORT_ID_INT5_MI2S_TX:
+		return IDX_AFE_PORT_ID_INT5_MI2S_TX;
+	case AFE_PORT_ID_INT6_MI2S_RX:
+		return IDX_AFE_PORT_ID_INT6_MI2S_RX;
+	case AFE_PORT_ID_INT6_MI2S_TX:
+		return IDX_AFE_PORT_ID_INT6_MI2S_TX;
+	default:
+		pr_err("%s: port 0x%x\n", __func__, port_id);
+		return -EINVAL;
+	}
+}
+
+int afe_open(u16 port_id,
+		union afe_port_config *afe_config, int rate)
+{
+	struct afe_port_cmd_device_start start;
+	struct afe_audioif_config_command config;
+	int ret = 0;
+	int cfg_type;
+	int index = 0;
+
+	if (!afe_config) {
+		pr_err("%s: Error, no configuration data\n", __func__);
+		ret = -EINVAL;
+		return ret;
+	}
+
+	pr_err("%s: port_id 0x%x rate %d\n", __func__, port_id, rate);
+
+	index = q6audio_get_port_index(port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		return -EINVAL;
+	}
+	ret = q6audio_validate_port(port_id);
+	if (ret < 0) {
+		pr_err("%s: Invalid port 0x%x ret %d", __func__, port_id, ret);
+		return -EINVAL;
+	}
+
+	if ((port_id == RT_PROXY_DAI_001_RX) ||
+		(port_id == RT_PROXY_DAI_002_TX)) {
+		pr_err("%s: wrong port 0x%x\n", __func__, port_id);
+		return -EINVAL;
+	}
+	if ((port_id == RT_PROXY_DAI_002_RX) ||
+		(port_id == RT_PROXY_DAI_001_TX))
+		port_id = VIRTUAL_ID_TO_PORTID(port_id);
+
+	ret = afe_q6_interface_prepare();
+	if (ret != 0) {
+		pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+		return -EINVAL;
+	}
+
+	if ((index >= 0) && (index < AFE_MAX_PORTS)) {
+		this_afe.afe_sample_rates[index] = rate;
+
+		if (this_afe.rt_cb)
+			this_afe.dev_acdb_id[index] = this_afe.rt_cb(port_id);
+	}
+
+	/* Also send the topology id here: */
+	afe_send_custom_topology(); /* One time call: only for first time  */
+	afe_send_port_topology_id(port_id);
+
+	ret = q6audio_validate_port(port_id);
+	if (ret < 0) {
+		pr_err("%s: Failed : Invalid Port id = 0x%x ret %d\n",
+			__func__, port_id, ret);
+		return -EINVAL;
+	}
+	mutex_lock(&this_afe.afe_cmd_lock);
+
+	config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	config.hdr.pkt_size = sizeof(config);
+	config.hdr.src_port = 0;
+	config.hdr.dest_port = 0;
+	config.hdr.token = index;
+	switch (port_id) {
+	case PRIMARY_I2S_RX:
+	case PRIMARY_I2S_TX:
+		cfg_type = AFE_PARAM_ID_I2S_CONFIG;
+		break;
+	case AFE_PORT_ID_PRIMARY_PCM_RX:
+	case AFE_PORT_ID_PRIMARY_PCM_TX:
+	case AFE_PORT_ID_SECONDARY_PCM_RX:
+	case AFE_PORT_ID_SECONDARY_PCM_TX:
+	case AFE_PORT_ID_TERTIARY_PCM_RX:
+	case AFE_PORT_ID_TERTIARY_PCM_TX:
+	case AFE_PORT_ID_QUATERNARY_PCM_RX:
+	case AFE_PORT_ID_QUATERNARY_PCM_TX:
+		cfg_type = AFE_PARAM_ID_PCM_CONFIG;
+		break;
+	case SECONDARY_I2S_RX:
+	case SECONDARY_I2S_TX:
+	case AFE_PORT_ID_PRIMARY_MI2S_RX:
+	case AFE_PORT_ID_PRIMARY_MI2S_TX:
+	case AFE_PORT_ID_QUATERNARY_MI2S_RX:
+	case AFE_PORT_ID_QUATERNARY_MI2S_TX:
+	case MI2S_RX:
+	case MI2S_TX:
+	case AFE_PORT_ID_QUINARY_MI2S_RX:
+	case AFE_PORT_ID_QUINARY_MI2S_TX:
+	case AFE_PORT_ID_SENARY_MI2S_TX:
+		cfg_type = AFE_PARAM_ID_I2S_CONFIG;
+		break;
+	case HDMI_RX:
+	case DISPLAY_PORT_RX:
+		cfg_type = AFE_PARAM_ID_HDMI_CONFIG;
+		break;
+	case SLIMBUS_0_RX:
+	case SLIMBUS_0_TX:
+	case SLIMBUS_1_RX:
+	case SLIMBUS_1_TX:
+	case SLIMBUS_2_RX:
+	case SLIMBUS_2_TX:
+	case SLIMBUS_3_RX:
+	case SLIMBUS_3_TX:
+	case SLIMBUS_4_RX:
+	case SLIMBUS_4_TX:
+	case SLIMBUS_5_RX:
+	case SLIMBUS_6_RX:
+	case SLIMBUS_6_TX:
+	case SLIMBUS_7_RX:
+	case SLIMBUS_7_TX:
+	case SLIMBUS_8_RX:
+	case SLIMBUS_8_TX:
+		cfg_type = AFE_PARAM_ID_SLIMBUS_CONFIG;
+		break;
+	case AFE_PORT_ID_USB_RX:
+	case AFE_PORT_ID_USB_TX:
+		cfg_type = AFE_PARAM_ID_USB_AUDIO_CONFIG;
+		break;
+	default:
+		pr_err("%s: Invalid port id 0x%x\n",
+			__func__, port_id);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+	config.param.port_id = q6audio_get_port_id(port_id);
+	config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr)
+				 - sizeof(config.param);
+	config.param.payload_address_lsw = 0x00;
+	config.param.payload_address_msw = 0x00;
+	config.param.mem_map_handle = 0x00;
+	config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+	config.pdata.param_id = cfg_type;
+	config.pdata.param_size =  sizeof(config.port);
+
+	config.port = *afe_config;
+	pr_debug("%s: param PL size=%d iparam_size[%d][%zd %zd %zd %zd] param_id[0x%x]\n",
+		__func__, config.param.payload_size, config.pdata.param_size,
+		sizeof(config), sizeof(config.param), sizeof(config.port),
+		sizeof(struct apr_hdr), config.pdata.param_id);
+
+	ret = afe_apr_send_pkt(&config, &this_afe.wait[index]);
+	if (ret) {
+		pr_err("%s: AFE enable for port 0x%x opcode[0x%x]failed %d\n",
+			__func__, port_id, cfg_type, ret);
+		goto fail_cmd;
+	}
+	start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	start.hdr.pkt_size = sizeof(start);
+	start.hdr.src_port = 0;
+	start.hdr.dest_port = 0;
+	start.hdr.token = index;
+	start.hdr.opcode = AFE_PORT_CMD_DEVICE_START;
+	start.port_id = q6audio_get_port_id(port_id);
+	pr_debug("%s: cmd device start opcode[0x%x] port id[0x%x]\n",
+		__func__, start.hdr.opcode, start.port_id);
+
+	ret = afe_apr_send_pkt(&start, &this_afe.wait[index]);
+	if (ret) {
+		pr_err("%s: AFE enable for port 0x%x failed %d\n", __func__,
+				port_id, ret);
+		goto fail_cmd;
+	}
+
+fail_cmd:
+	mutex_unlock(&this_afe.afe_cmd_lock);
+	return ret;
+}
+
+int afe_loopback(u16 enable, u16 rx_port, u16 tx_port)
+{
+	struct afe_loopback_cfg_v1 lb_cmd;
+	int ret = 0;
+	int index = 0;
+
+	if (rx_port == MI2S_RX)
+		rx_port = AFE_PORT_ID_PRIMARY_MI2S_RX;
+	if (tx_port == MI2S_TX)
+		tx_port = AFE_PORT_ID_PRIMARY_MI2S_TX;
+
+	ret = afe_q6_interface_prepare();
+	if (ret != 0) {
+		pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+		return ret;
+	}
+
+	index = q6audio_get_port_index(rx_port);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		return -EINVAL;
+	}
+	ret = q6audio_validate_port(rx_port);
+	if (ret < 0) {
+		pr_err("%s: Invalid port 0x%x ret %d", __func__, rx_port, ret);
+		return -EINVAL;
+	}
+
+	lb_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(20), APR_PKT_VER);
+	lb_cmd.hdr.pkt_size = sizeof(lb_cmd);
+	lb_cmd.hdr.src_port = 0;
+	lb_cmd.hdr.dest_port = 0;
+	lb_cmd.hdr.token = index;
+	lb_cmd.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+	lb_cmd.param.port_id = tx_port;
+	lb_cmd.param.payload_size = (sizeof(lb_cmd) - sizeof(struct apr_hdr) -
+				     sizeof(struct afe_port_cmd_set_param_v2));
+	lb_cmd.param.payload_address_lsw = 0x00;
+	lb_cmd.param.payload_address_msw = 0x00;
+	lb_cmd.param.mem_map_handle = 0x00;
+	lb_cmd.pdata.module_id = AFE_MODULE_LOOPBACK;
+	lb_cmd.pdata.param_id = AFE_PARAM_ID_LOOPBACK_CONFIG;
+	lb_cmd.pdata.param_size = lb_cmd.param.payload_size -
+				  sizeof(struct afe_port_param_data_v2);
+
+	lb_cmd.dst_port_id = rx_port;
+	lb_cmd.routing_mode = LB_MODE_DEFAULT;
+	lb_cmd.enable = (enable ? 1 : 0);
+	lb_cmd.loopback_cfg_minor_version = AFE_API_VERSION_LOOPBACK_CONFIG;
+
+	ret = afe_apr_send_pkt(&lb_cmd, &this_afe.wait[index]);
+	if (ret)
+		pr_err("%s: AFE loopback failed %d\n", __func__, ret);
+	return ret;
+}
+
+int afe_loopback_gain(u16 port_id, u16 volume)
+{
+	struct afe_loopback_gain_per_path_param set_param;
+	int ret = 0;
+	int index = 0;
+
+	if (this_afe.apr == NULL) {
+		this_afe.apr = apr_register("ADSP", "AFE", afe_callback,
+					0xFFFFFFFF, &this_afe);
+		pr_debug("%s: Register AFE\n", __func__);
+		if (this_afe.apr == NULL) {
+			pr_err("%s: Unable to register AFE\n", __func__);
+			ret = -ENODEV;
+			return ret;
+		}
+		rtac_set_afe_handle(this_afe.apr);
+	}
+
+	ret = q6audio_validate_port(port_id);
+	if (ret < 0) {
+		pr_err("%s: Failed : Invalid Port id = 0x%x ret %d\n",
+			__func__, port_id, ret);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	index = q6audio_get_port_index(port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		return -EINVAL;
+	}
+	ret = q6audio_validate_port(port_id);
+	if (ret < 0) {
+		pr_err("%s: Invalid port 0x%x ret %d",
+			__func__, port_id, ret);
+		return -EINVAL;
+	}
+
+	/* RX ports numbers are even .TX ports numbers are odd. */
+	if (port_id % 2 == 0) {
+		pr_err("%s: Failed : afe loopback gain only for TX ports. port_id %d\n",
+				__func__, port_id);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	pr_debug("%s: port 0x%x volume %d\n", __func__, port_id, volume);
+
+	set_param.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	set_param.hdr.pkt_size = sizeof(set_param);
+	set_param.hdr.src_port = 0;
+	set_param.hdr.dest_port = 0;
+	set_param.hdr.token = index;
+	set_param.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+
+	set_param.param.port_id	= port_id;
+	set_param.param.payload_size =
+	    (sizeof(struct afe_loopback_gain_per_path_param) -
+	     sizeof(struct apr_hdr) - sizeof(struct afe_port_cmd_set_param_v2));
+	set_param.param.payload_address_lsw	= 0;
+	set_param.param.payload_address_msw	= 0;
+	set_param.param.mem_map_handle        = 0;
+
+	set_param.pdata.module_id = AFE_MODULE_LOOPBACK;
+	set_param.pdata.param_id = AFE_PARAM_ID_LOOPBACK_GAIN_PER_PATH;
+	set_param.pdata.param_size =
+	    (set_param.param.payload_size -
+	     sizeof(struct afe_port_param_data_v2));
+	set_param.rx_port_id = port_id;
+	set_param.gain = volume;
+
+	ret = afe_apr_send_pkt(&set_param, &this_afe.wait[index]);
+	if (ret) {
+		pr_err("%s: AFE param set failed for port 0x%x ret %d\n",
+					__func__, port_id, ret);
+		goto fail_cmd;
+	}
+
+fail_cmd:
+	return ret;
+}
+
+int afe_pseudo_port_start_nowait(u16 port_id)
+{
+	struct afe_pseudoport_start_command start;
+	int ret = 0;
+
+	pr_debug("%s: port_id=0x%x\n", __func__, port_id);
+	if (this_afe.apr == NULL) {
+		pr_err("%s: AFE APR is not registered\n", __func__);
+		return -ENODEV;
+	}
+
+
+	start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	start.hdr.pkt_size = sizeof(start);
+	start.hdr.src_port = 0;
+	start.hdr.dest_port = 0;
+	start.hdr.token = 0;
+	start.hdr.opcode = AFE_PSEUDOPORT_CMD_START;
+	start.port_id = port_id;
+	start.timing = 1;
+
+	ret = afe_apr_send_pkt(&start, NULL);
+	if (ret) {
+		pr_err("%s: AFE enable for port 0x%x failed %d\n",
+		       __func__, port_id, ret);
+		return ret;
+	}
+	return 0;
+}
+
+int afe_start_pseudo_port(u16 port_id)
+{
+	int ret = 0;
+	struct afe_pseudoport_start_command start;
+	int index = 0;
+
+	pr_debug("%s: port_id = 0x%x\n", __func__, port_id);
+
+	ret = afe_q6_interface_prepare();
+	if (ret != 0) {
+		pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+		return ret;
+	}
+
+	index = q6audio_get_port_index(port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		return -EINVAL;
+	}
+	ret = q6audio_validate_port(port_id);
+	if (ret < 0) {
+		pr_err("%s: Invalid port 0x%x ret %d",
+			__func__, port_id, ret);
+		return -EINVAL;
+	}
+
+	start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	start.hdr.pkt_size = sizeof(start);
+	start.hdr.src_port = 0;
+	start.hdr.dest_port = 0;
+	start.hdr.token = 0;
+	start.hdr.opcode = AFE_PSEUDOPORT_CMD_START;
+	start.port_id = port_id;
+	start.timing = 1;
+	start.hdr.token = index;
+
+	ret = afe_apr_send_pkt(&start, &this_afe.wait[index]);
+	if (ret)
+		pr_err("%s: AFE enable for port 0x%x failed %d\n",
+		       __func__, port_id, ret);
+	return ret;
+}
+
+int afe_pseudo_port_stop_nowait(u16 port_id)
+{
+	int ret = 0;
+	struct afe_pseudoport_stop_command stop;
+	int index = 0;
+
+	pr_debug("%s: port_id = 0x%x\n", __func__, port_id);
+
+	if (this_afe.apr == NULL) {
+		pr_err("%s: AFE is already closed\n", __func__);
+		return -EINVAL;
+	}
+	index = q6audio_get_port_index(port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		return -EINVAL;
+	}
+	ret = q6audio_validate_port(port_id);
+	if (ret < 0) {
+		pr_err("%s: Invalid port 0x%x ret %d",
+			__func__, port_id, ret);
+		return -EINVAL;
+	}
+
+	stop.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	stop.hdr.pkt_size = sizeof(stop);
+	stop.hdr.src_port = 0;
+	stop.hdr.dest_port = 0;
+	stop.hdr.token = 0;
+	stop.hdr.opcode = AFE_PSEUDOPORT_CMD_STOP;
+	stop.port_id = port_id;
+	stop.reserved = 0;
+	stop.hdr.token = index;
+
+	ret = afe_apr_send_pkt(&stop, NULL);
+	if (ret)
+		pr_err("%s: AFE close failed %d\n", __func__, ret);
+
+	return ret;
+}
+
+int afe_port_group_set_param(u16 group_id,
+	union afe_port_group_config *afe_group_config)
+{
+	int ret;
+	struct afe_port_group_create config;
+	int cfg_type;
+
+	if (!afe_group_config) {
+		pr_err("%s: Error, no configuration data\n", __func__);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: group id: 0x%x\n", __func__, group_id);
+
+	ret = afe_q6_interface_prepare();
+	if (ret != 0) {
+		pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+		return ret;
+	}
+
+	switch (group_id) {
+	case AFE_GROUP_DEVICE_ID_PRIMARY_TDM_RX:
+	case AFE_GROUP_DEVICE_ID_PRIMARY_TDM_TX:
+	case AFE_GROUP_DEVICE_ID_SECONDARY_TDM_RX:
+	case AFE_GROUP_DEVICE_ID_SECONDARY_TDM_TX:
+	case AFE_GROUP_DEVICE_ID_TERTIARY_TDM_RX:
+	case AFE_GROUP_DEVICE_ID_TERTIARY_TDM_TX:
+	case AFE_GROUP_DEVICE_ID_QUATERNARY_TDM_RX:
+	case AFE_GROUP_DEVICE_ID_QUATERNARY_TDM_TX:
+		cfg_type = AFE_PARAM_ID_GROUP_DEVICE_TDM_CONFIG;
+		break;
+	default:
+		pr_err("%s: Invalid group id 0x%x\n", __func__, group_id);
+		return -EINVAL;
+	}
+
+	memset(&config, 0, sizeof(config));
+	config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+					     APR_HDR_LEN(APR_HDR_SIZE),
+					     APR_PKT_VER);
+	config.hdr.pkt_size = sizeof(config);
+	config.hdr.src_port = 0;
+	config.hdr.dest_port = 0;
+	config.hdr.token = IDX_GLOBAL_CFG;
+	config.hdr.opcode = AFE_SVC_CMD_SET_PARAM;
+
+	config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
+				    sizeof(config.param);
+	config.param.payload_address_lsw = 0x00;
+	config.param.payload_address_msw = 0x00;
+	config.param.mem_map_handle = 0x00;
+	config.pdata.module_id = AFE_MODULE_GROUP_DEVICE;
+	config.pdata.param_id = cfg_type;
+	config.pdata.param_size = sizeof(config.data);
+	config.data = *afe_group_config;
+
+	ret = afe_apr_send_pkt(&config, &this_afe.wait[IDX_GLOBAL_CFG]);
+	if (ret)
+		pr_err("%s: AFE_PARAM_ID_GROUP_DEVICE_CFG failed %d\n",
+			__func__, ret);
+
+	return ret;
+}
+
+int afe_port_group_enable(u16 group_id,
+	union afe_port_group_config *afe_group_config,
+	u16 enable)
+{
+	int ret;
+	struct afe_port_group_create config;
+
+	pr_debug("%s: group id: 0x%x enable: %d\n", __func__,
+		group_id, enable);
+
+	ret = afe_q6_interface_prepare();
+	if (ret != 0) {
+		pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+		return ret;
+	}
+
+	if (enable) {
+		ret = afe_port_group_set_param(group_id, afe_group_config);
+		if (ret < 0) {
+			pr_err("%s: afe send failed %d\n", __func__, ret);
+			return ret;
+		}
+	}
+
+	memset(&config, 0, sizeof(config));
+	config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+					     APR_HDR_LEN(APR_HDR_SIZE),
+					     APR_PKT_VER);
+	config.hdr.pkt_size = sizeof(config);
+	config.hdr.src_port = 0;
+	config.hdr.dest_port = 0;
+	config.hdr.token = IDX_GLOBAL_CFG;
+	config.hdr.opcode = AFE_SVC_CMD_SET_PARAM;
+
+	config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
+				    sizeof(config.param);
+	config.param.payload_address_lsw = 0x00;
+	config.param.payload_address_msw = 0x00;
+	config.param.mem_map_handle = 0x00;
+	config.pdata.module_id = AFE_MODULE_GROUP_DEVICE;
+	config.pdata.param_id = AFE_PARAM_ID_GROUP_DEVICE_ENABLE;
+	config.pdata.param_size = sizeof(config.data);
+	config.data.group_enable.group_id = group_id;
+	config.data.group_enable.enable = enable;
+
+	ret = afe_apr_send_pkt(&config, &this_afe.wait[IDX_GLOBAL_CFG]);
+	if (ret)
+		pr_err("%s: AFE_PARAM_ID_GROUP_DEVICE_ENABLE failed %d\n",
+			__func__, ret);
+
+	return ret;
+}
+
+int afe_stop_pseudo_port(u16 port_id)
+{
+	int ret = 0;
+	struct afe_pseudoport_stop_command stop;
+	int index = 0;
+
+	pr_debug("%s: port_id = 0x%x\n", __func__, port_id);
+
+	if (this_afe.apr == NULL) {
+		pr_err("%s: AFE is already closed\n", __func__);
+		return -EINVAL;
+	}
+
+	index = q6audio_get_port_index(port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		return -EINVAL;
+	}
+	ret = q6audio_validate_port(port_id);
+	if (ret < 0) {
+		pr_err("%s: Invalid port 0x%x ret %d\n",
+			__func__, port_id, ret);
+		return -EINVAL;
+	}
+
+	stop.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	stop.hdr.pkt_size = sizeof(stop);
+	stop.hdr.src_port = 0;
+	stop.hdr.dest_port = 0;
+	stop.hdr.token = 0;
+	stop.hdr.opcode = AFE_PSEUDOPORT_CMD_STOP;
+	stop.port_id = port_id;
+	stop.reserved = 0;
+	stop.hdr.token = index;
+
+	ret = afe_apr_send_pkt(&stop, &this_afe.wait[index]);
+	if (ret)
+		pr_err("%s: AFE close failed %d\n", __func__, ret);
+
+	return ret;
+}
+
+uint32_t afe_req_mmap_handle(struct afe_audio_client *ac)
+{
+	return ac->mem_map_handle;
+}
+
+struct afe_audio_client *q6afe_audio_client_alloc(void *priv)
+{
+	struct afe_audio_client *ac;
+	int lcnt = 0;
+
+	ac = kzalloc(sizeof(struct afe_audio_client), GFP_KERNEL);
+	if (!ac) {
+		pr_err("%s: cannot allocate audio client for afe\n", __func__);
+		return NULL;
+	}
+	ac->priv = priv;
+
+	init_waitqueue_head(&ac->cmd_wait);
+	INIT_LIST_HEAD(&ac->port[0].mem_map_handle);
+	INIT_LIST_HEAD(&ac->port[1].mem_map_handle);
+	pr_debug("%s: mem_map_handle list init'ed\n", __func__);
+	mutex_init(&ac->cmd_lock);
+	for (lcnt = 0; lcnt <= OUT; lcnt++) {
+		mutex_init(&ac->port[lcnt].lock);
+		spin_lock_init(&ac->port[lcnt].dsp_lock);
+	}
+	atomic_set(&ac->cmd_state, 0);
+
+	return ac;
+}
+
+int q6afe_audio_client_buf_alloc_contiguous(unsigned int dir,
+			struct afe_audio_client *ac,
+			unsigned int bufsz,
+			unsigned int bufcnt)
+{
+	int cnt = 0;
+	int rc = 0;
+	struct afe_audio_buffer *buf;
+	size_t len;
+
+	if (!(ac) || ((dir != IN) && (dir != OUT))) {
+		pr_err("%s: ac %pK dir %d\n", __func__, ac, dir);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: bufsz[%d]bufcnt[%d]\n",
+			__func__,
+			bufsz, bufcnt);
+
+	if (ac->port[dir].buf) {
+		pr_debug("%s: buffer already allocated\n", __func__);
+		return 0;
+	}
+	mutex_lock(&ac->cmd_lock);
+	buf = kzalloc(((sizeof(struct afe_audio_buffer))*bufcnt),
+			GFP_KERNEL);
+
+	if (!buf) {
+		pr_err("%s: null buf\n", __func__);
+		mutex_unlock(&ac->cmd_lock);
+		goto fail;
+	}
+
+	ac->port[dir].buf = buf;
+
+	rc = msm_audio_ion_alloc("afe_client", &buf[0].client,
+				&buf[0].handle, bufsz*bufcnt,
+				&buf[0].phys, &len,
+				&buf[0].data);
+	if (rc) {
+		pr_err("%s: audio ION alloc failed, rc = %d\n",
+			__func__, rc);
+		mutex_unlock(&ac->cmd_lock);
+		goto fail;
+	}
+
+	buf[0].used = dir ^ 1;
+	buf[0].size = bufsz;
+	buf[0].actual_size = bufsz;
+	cnt = 1;
+	while (cnt < bufcnt) {
+		if (bufsz > 0) {
+			buf[cnt].data =  buf[0].data + (cnt * bufsz);
+			buf[cnt].phys =  buf[0].phys + (cnt * bufsz);
+			if (!buf[cnt].data) {
+				pr_err("%s: Buf alloc failed\n",
+							__func__);
+				mutex_unlock(&ac->cmd_lock);
+				goto fail;
+			}
+			buf[cnt].used = dir ^ 1;
+			buf[cnt].size = bufsz;
+			buf[cnt].actual_size = bufsz;
+			pr_debug("%s:  data[%pK]phys[%pK][%pK]\n", __func__,
+				   buf[cnt].data,
+				   &buf[cnt].phys,
+				   &buf[cnt].phys);
+		}
+		cnt++;
+	}
+	ac->port[dir].max_buf_cnt = cnt;
+	mutex_unlock(&ac->cmd_lock);
+	return 0;
+fail:
+	pr_err("%s: jump fail\n", __func__);
+	q6afe_audio_client_buf_free_contiguous(dir, ac);
+	return -EINVAL;
+}
+
+int afe_memory_map(phys_addr_t dma_addr_p, u32 dma_buf_sz,
+			struct afe_audio_client *ac)
+{
+	int ret = 0;
+
+	mutex_lock(&this_afe.afe_cmd_lock);
+	ac->mem_map_handle = 0;
+	ret = afe_cmd_memory_map(dma_addr_p, dma_buf_sz);
+	if (ret < 0) {
+		pr_err("%s: afe_cmd_memory_map failed %d\n",
+			__func__, ret);
+
+		mutex_unlock(&this_afe.afe_cmd_lock);
+		return ret;
+	}
+	ac->mem_map_handle = this_afe.mmap_handle;
+	mutex_unlock(&this_afe.afe_cmd_lock);
+
+	return ret;
+}
+
+int afe_cmd_memory_map(phys_addr_t dma_addr_p, u32 dma_buf_sz)
+{
+	int ret = 0;
+	int cmd_size = 0;
+	void    *payload = NULL;
+	void    *mmap_region_cmd = NULL;
+	struct afe_service_cmd_shared_mem_map_regions *mregion = NULL;
+	struct  afe_service_shared_map_region_payload *mregion_pl = NULL;
+	int index = 0;
+
+	pr_debug("%s:\n", __func__);
+
+	if (this_afe.apr == NULL) {
+		this_afe.apr = apr_register("ADSP", "AFE", afe_callback,
+					0xFFFFFFFF, &this_afe);
+		pr_debug("%s: Register AFE\n", __func__);
+		if (this_afe.apr == NULL) {
+			pr_err("%s: Unable to register AFE\n", __func__);
+			ret = -ENODEV;
+			return ret;
+		}
+		rtac_set_afe_handle(this_afe.apr);
+	}
+	if (dma_buf_sz % SZ_4K != 0) {
+		/*
+		 * The memory allocated by msm_audio_ion_alloc is always 4kB
+		 * aligned, ADSP expects the size to be 4kB aligned as well
+		 * so re-adjusts the  buffer size before passing to ADSP.
+		 */
+		dma_buf_sz = PAGE_ALIGN(dma_buf_sz);
+	}
+
+	cmd_size = sizeof(struct afe_service_cmd_shared_mem_map_regions) \
+		+ sizeof(struct afe_service_shared_map_region_payload);
+
+	mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
+	if (!mmap_region_cmd) {
+		pr_err("%s: allocate mmap_region_cmd failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	mregion = (struct afe_service_cmd_shared_mem_map_regions *)
+							mmap_region_cmd;
+	mregion->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	mregion->hdr.pkt_size = cmd_size;
+	mregion->hdr.src_port = 0;
+	mregion->hdr.dest_port = 0;
+	mregion->hdr.token = 0;
+	mregion->hdr.opcode = AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS;
+	mregion->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
+	mregion->num_regions = 1;
+	mregion->property_flag = 0x00;
+	/* Todo */
+	index = mregion->hdr.token = IDX_RSVD_2;
+
+	payload = ((u8 *) mmap_region_cmd +
+		   sizeof(struct afe_service_cmd_shared_mem_map_regions));
+
+	mregion_pl = (struct afe_service_shared_map_region_payload *)payload;
+
+	mregion_pl->shm_addr_lsw = lower_32_bits(dma_addr_p);
+	mregion_pl->shm_addr_msw = msm_audio_populate_upper_32_bits(dma_addr_p);
+	mregion_pl->mem_size_bytes = dma_buf_sz;
+
+	pr_debug("%s: dma_addr_p 0x%pK , size %d\n", __func__,
+					&dma_addr_p, dma_buf_sz);
+	atomic_set(&this_afe.state, 1);
+	atomic_set(&this_afe.status, 0);
+	this_afe.mmap_handle = 0;
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) mmap_region_cmd);
+	if (ret < 0) {
+		pr_err("%s: AFE memory map cmd failed %d\n",
+		       __func__, ret);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	ret = wait_event_timeout(this_afe.wait[index],
+				 (atomic_read(&this_afe.state) == 0),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	if (atomic_read(&this_afe.status) > 0) {
+		pr_err("%s: config cmd failed [%s]\n",
+			__func__, adsp_err_get_err_str(
+			atomic_read(&this_afe.status)));
+		ret = adsp_err_get_lnx_err_code(
+				atomic_read(&this_afe.status));
+		goto fail_cmd;
+	}
+
+	kfree(mmap_region_cmd);
+	return 0;
+fail_cmd:
+	kfree(mmap_region_cmd);
+	pr_err("%s: fail_cmd\n", __func__);
+	return ret;
+}
+
+int afe_cmd_memory_map_nowait(int port_id, phys_addr_t dma_addr_p,
+		u32 dma_buf_sz)
+{
+	int ret = 0;
+	int cmd_size = 0;
+	void    *payload = NULL;
+	void    *mmap_region_cmd = NULL;
+	struct afe_service_cmd_shared_mem_map_regions *mregion = NULL;
+	struct  afe_service_shared_map_region_payload *mregion_pl = NULL;
+	int index = 0;
+
+	pr_debug("%s:\n", __func__);
+
+	if (this_afe.apr == NULL) {
+		this_afe.apr = apr_register("ADSP", "AFE", afe_callback,
+					0xFFFFFFFF, &this_afe);
+		pr_debug("%s: Register AFE\n", __func__);
+		if (this_afe.apr == NULL) {
+			pr_err("%s: Unable to register AFE\n", __func__);
+			ret = -ENODEV;
+			return ret;
+		}
+		rtac_set_afe_handle(this_afe.apr);
+	}
+	index = q6audio_get_port_index(port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		return -EINVAL;
+	}
+	ret = q6audio_validate_port(port_id);
+	if (ret < 0) {
+		pr_err("%s: Invalid port 0x%x ret %d",
+			__func__, port_id, ret);
+		return -EINVAL;
+	}
+
+	cmd_size = sizeof(struct afe_service_cmd_shared_mem_map_regions)
+		+ sizeof(struct afe_service_shared_map_region_payload);
+
+	mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
+	if (!mmap_region_cmd) {
+		pr_err("%s: allocate mmap_region_cmd failed\n", __func__);
+		return -ENOMEM;
+	}
+	mregion = (struct afe_service_cmd_shared_mem_map_regions *)
+						mmap_region_cmd;
+	mregion->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	mregion->hdr.pkt_size = sizeof(mregion);
+	mregion->hdr.src_port = 0;
+	mregion->hdr.dest_port = 0;
+	mregion->hdr.token = 0;
+	mregion->hdr.opcode = AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS;
+	mregion->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
+	mregion->num_regions = 1;
+	mregion->property_flag = 0x00;
+
+	payload = ((u8 *) mmap_region_cmd +
+		sizeof(struct afe_service_cmd_shared_mem_map_regions));
+	mregion_pl = (struct afe_service_shared_map_region_payload *)payload;
+
+	mregion_pl->shm_addr_lsw = lower_32_bits(dma_addr_p);
+	mregion_pl->shm_addr_msw = msm_audio_populate_upper_32_bits(dma_addr_p);
+	mregion_pl->mem_size_bytes = dma_buf_sz;
+
+	ret = afe_apr_send_pkt(mmap_region_cmd, NULL);
+	if (ret)
+		pr_err("%s: AFE memory map cmd failed %d\n",
+		       __func__, ret);
+	kfree(mmap_region_cmd);
+	return ret;
+}
+int q6afe_audio_client_buf_free_contiguous(unsigned int dir,
+			struct afe_audio_client *ac)
+{
+	struct afe_audio_port_data *port;
+	int cnt = 0;
+	mutex_lock(&ac->cmd_lock);
+	port = &ac->port[dir];
+	if (!port->buf) {
+		pr_err("%s: buf is null\n", __func__);
+		mutex_unlock(&ac->cmd_lock);
+		return 0;
+	}
+	cnt = port->max_buf_cnt - 1;
+
+	if (port->buf[0].data) {
+		pr_debug("%s: data[%pK]phys[%pK][%pK] , client[%pK] handle[%pK]\n",
+			__func__,
+			port->buf[0].data,
+			&port->buf[0].phys,
+			&port->buf[0].phys,
+			port->buf[0].client,
+			port->buf[0].handle);
+		msm_audio_ion_free(port->buf[0].client, port->buf[0].handle);
+		port->buf[0].client = NULL;
+		port->buf[0].handle = NULL;
+	}
+
+	while (cnt >= 0) {
+		port->buf[cnt].data = NULL;
+		port->buf[cnt].phys = 0;
+		cnt--;
+	}
+	port->max_buf_cnt = 0;
+	kfree(port->buf);
+	port->buf = NULL;
+	mutex_unlock(&ac->cmd_lock);
+	return 0;
+}
+
+void q6afe_audio_client_free(struct afe_audio_client *ac)
+{
+	int loopcnt;
+	struct afe_audio_port_data *port;
+	if (!ac) {
+		pr_err("%s: audio client is NULL\n", __func__);
+		return;
+	}
+	for (loopcnt = 0; loopcnt <= OUT; loopcnt++) {
+		port = &ac->port[loopcnt];
+		if (!port->buf)
+			continue;
+		pr_debug("%s: loopcnt = %d\n", __func__, loopcnt);
+		q6afe_audio_client_buf_free_contiguous(loopcnt, ac);
+	}
+	kfree(ac);
+	return;
+}
+
+int afe_cmd_memory_unmap(u32 mem_map_handle)
+{
+	int ret = 0;
+	struct afe_service_cmd_shared_mem_unmap_regions mregion;
+	int index = 0;
+
+	pr_debug("%s: handle 0x%x\n", __func__, mem_map_handle);
+
+	if (this_afe.apr == NULL) {
+		this_afe.apr = apr_register("ADSP", "AFE", afe_callback,
+					0xFFFFFFFF, &this_afe);
+		pr_debug("%s: Register AFE\n", __func__);
+		if (this_afe.apr == NULL) {
+			pr_err("%s: Unable to register AFE\n", __func__);
+			ret = -ENODEV;
+			return ret;
+		}
+		rtac_set_afe_handle(this_afe.apr);
+	}
+
+	mregion.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	mregion.hdr.pkt_size = sizeof(mregion);
+	mregion.hdr.src_port = 0;
+	mregion.hdr.dest_port = 0;
+	mregion.hdr.token = 0;
+	mregion.hdr.opcode = AFE_SERVICE_CMD_SHARED_MEM_UNMAP_REGIONS;
+	mregion.mem_map_handle = mem_map_handle;
+
+	/* Todo */
+	index = mregion.hdr.token = IDX_RSVD_2;
+
+	atomic_set(&this_afe.status, 0);
+	ret = afe_apr_send_pkt(&mregion, &this_afe.wait[index]);
+	if (ret)
+		pr_err("%s: AFE memory unmap cmd failed %d\n",
+		       __func__, ret);
+
+	return ret;
+}
+
+int afe_cmd_memory_unmap_nowait(u32 mem_map_handle)
+{
+	int ret = 0;
+	struct afe_service_cmd_shared_mem_unmap_regions mregion;
+
+	pr_debug("%s: handle 0x%x\n", __func__, mem_map_handle);
+
+	if (this_afe.apr == NULL) {
+		this_afe.apr = apr_register("ADSP", "AFE", afe_callback,
+					0xFFFFFFFF, &this_afe);
+		pr_debug("%s: Register AFE\n", __func__);
+		if (this_afe.apr == NULL) {
+			pr_err("%s: Unable to register AFE\n", __func__);
+			ret = -ENODEV;
+			return ret;
+		}
+		rtac_set_afe_handle(this_afe.apr);
+	}
+
+	mregion.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	mregion.hdr.pkt_size = sizeof(mregion);
+	mregion.hdr.src_port = 0;
+	mregion.hdr.dest_port = 0;
+	mregion.hdr.token = 0;
+	mregion.hdr.opcode = AFE_SERVICE_CMD_SHARED_MEM_UNMAP_REGIONS;
+	mregion.mem_map_handle = mem_map_handle;
+
+	ret = afe_apr_send_pkt(&mregion, NULL);
+	if (ret)
+		pr_err("%s: AFE memory unmap cmd failed %d\n",
+			__func__, ret);
+	return ret;
+}
+
+int afe_register_get_events(u16 port_id,
+		void (*cb) (uint32_t opcode,
+		uint32_t token, uint32_t *payload, void *priv),
+		void *private_data)
+{
+	int ret = 0;
+	struct afe_service_cmd_register_rt_port_driver rtproxy;
+
+	pr_debug("%s: port_id: 0x%x\n", __func__, port_id);
+
+	if (this_afe.apr == NULL) {
+		this_afe.apr = apr_register("ADSP", "AFE", afe_callback,
+					0xFFFFFFFF, &this_afe);
+		pr_debug("%s: Register AFE\n", __func__);
+		if (this_afe.apr == NULL) {
+			pr_err("%s: Unable to register AFE\n", __func__);
+			ret = -ENODEV;
+			return ret;
+		}
+		rtac_set_afe_handle(this_afe.apr);
+	}
+	if ((port_id == RT_PROXY_DAI_002_RX) ||
+		(port_id == RT_PROXY_DAI_001_TX)) {
+		port_id = VIRTUAL_ID_TO_PORTID(port_id);
+	} else {
+		pr_err("%s: wrong port id 0x%x\n", __func__, port_id);
+		return -EINVAL;
+	}
+
+	if (port_id == RT_PROXY_PORT_001_TX) {
+		this_afe.tx_cb = cb;
+		this_afe.tx_private_data = private_data;
+	} else if (port_id == RT_PROXY_PORT_001_RX) {
+		this_afe.rx_cb = cb;
+		this_afe.rx_private_data = private_data;
+	}
+
+	rtproxy.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	rtproxy.hdr.pkt_size = sizeof(rtproxy);
+	rtproxy.hdr.src_port = 1;
+	rtproxy.hdr.dest_port = 1;
+	rtproxy.hdr.opcode = AFE_SERVICE_CMD_REGISTER_RT_PORT_DRIVER;
+	rtproxy.port_id = port_id;
+	rtproxy.reserved = 0;
+
+	ret = afe_apr_send_pkt(&rtproxy, NULL);
+	if (ret)
+		pr_err("%s: AFE  reg. rtproxy_event failed %d\n",
+			   __func__, ret);
+	return ret;
+}
+
+int afe_unregister_get_events(u16 port_id)
+{
+	int ret = 0;
+	struct afe_service_cmd_unregister_rt_port_driver rtproxy;
+	int index = 0;
+
+	pr_debug("%s:\n", __func__);
+
+	if (this_afe.apr == NULL) {
+		this_afe.apr = apr_register("ADSP", "AFE", afe_callback,
+					0xFFFFFFFF, &this_afe);
+		pr_debug("%s: Register AFE\n", __func__);
+		if (this_afe.apr == NULL) {
+			pr_err("%s: Unable to register AFE\n", __func__);
+			ret = -ENODEV;
+			return ret;
+		}
+		rtac_set_afe_handle(this_afe.apr);
+	}
+
+	if ((port_id == RT_PROXY_DAI_002_RX) ||
+		(port_id == RT_PROXY_DAI_001_TX)) {
+		port_id = VIRTUAL_ID_TO_PORTID(port_id);
+	} else {
+		pr_err("%s: wrong port id 0x%x\n", __func__, port_id);
+		return -EINVAL;
+	}
+
+	index = q6audio_get_port_index(port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		return -EINVAL;
+	}
+	ret = q6audio_validate_port(port_id);
+	if (ret < 0) {
+		pr_err("%s: Invalid port 0x%x ret %d", __func__, port_id, ret);
+		return -EINVAL;
+	}
+
+	rtproxy.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	rtproxy.hdr.pkt_size = sizeof(rtproxy);
+	rtproxy.hdr.src_port = 0;
+	rtproxy.hdr.dest_port = 0;
+	rtproxy.hdr.token = 0;
+	rtproxy.hdr.opcode = AFE_SERVICE_CMD_UNREGISTER_RT_PORT_DRIVER;
+	rtproxy.port_id = port_id;
+	rtproxy.reserved = 0;
+
+	rtproxy.hdr.token = index;
+
+	if (port_id == RT_PROXY_PORT_001_TX) {
+		this_afe.tx_cb = NULL;
+		this_afe.tx_private_data = NULL;
+	} else if (port_id == RT_PROXY_PORT_001_RX) {
+		this_afe.rx_cb = NULL;
+		this_afe.rx_private_data = NULL;
+	}
+
+	ret = afe_apr_send_pkt(&rtproxy, &this_afe.wait[index]);
+	if (ret)
+		pr_err("%s: AFE enable Unreg. rtproxy_event failed %d\n",
+			   __func__, ret);
+	return ret;
+}
+
+int afe_rt_proxy_port_write(phys_addr_t buf_addr_p,
+		u32 mem_map_handle, int bytes)
+{
+	int ret = 0;
+	struct afe_port_data_cmd_rt_proxy_port_write_v2 afecmd_wr;
+
+	if (this_afe.apr == NULL) {
+		pr_err("%s: register to AFE is not done\n", __func__);
+		ret = -ENODEV;
+		return ret;
+	}
+	pr_debug("%s: buf_addr_p = 0x%pK bytes = %d\n", __func__,
+						&buf_addr_p, bytes);
+
+	afecmd_wr.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	afecmd_wr.hdr.pkt_size = sizeof(afecmd_wr);
+	afecmd_wr.hdr.src_port = 0;
+	afecmd_wr.hdr.dest_port = 0;
+	afecmd_wr.hdr.token = 0;
+	afecmd_wr.hdr.opcode = AFE_PORT_DATA_CMD_RT_PROXY_PORT_WRITE_V2;
+	afecmd_wr.port_id = RT_PROXY_PORT_001_TX;
+	afecmd_wr.buffer_address_lsw = lower_32_bits(buf_addr_p);
+	afecmd_wr.buffer_address_msw =
+			msm_audio_populate_upper_32_bits(buf_addr_p);
+	afecmd_wr.mem_map_handle = mem_map_handle;
+	afecmd_wr.available_bytes = bytes;
+	afecmd_wr.reserved = 0;
+
+	ret = afe_apr_send_pkt(&afecmd_wr, NULL);
+	if (ret)
+		pr_err("%s: AFE rtproxy write to port 0x%x failed %d\n",
+			   __func__, afecmd_wr.port_id, ret);
+	return ret;
+
+}
+
+int afe_rt_proxy_port_read(phys_addr_t buf_addr_p,
+		u32 mem_map_handle, int bytes)
+{
+	int ret = 0;
+	struct afe_port_data_cmd_rt_proxy_port_read_v2 afecmd_rd;
+
+	if (this_afe.apr == NULL) {
+		pr_err("%s: register to AFE is not done\n", __func__);
+		ret = -ENODEV;
+		return ret;
+	}
+	pr_debug("%s: buf_addr_p = 0x%pK bytes = %d\n", __func__,
+						&buf_addr_p, bytes);
+
+	afecmd_rd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	afecmd_rd.hdr.pkt_size = sizeof(afecmd_rd);
+	afecmd_rd.hdr.src_port = 0;
+	afecmd_rd.hdr.dest_port = 0;
+	afecmd_rd.hdr.token = 0;
+	afecmd_rd.hdr.opcode = AFE_PORT_DATA_CMD_RT_PROXY_PORT_READ_V2;
+	afecmd_rd.port_id = RT_PROXY_PORT_001_RX;
+	afecmd_rd.buffer_address_lsw = lower_32_bits(buf_addr_p);
+	afecmd_rd.buffer_address_msw =
+				msm_audio_populate_upper_32_bits(buf_addr_p);
+	afecmd_rd.available_bytes = bytes;
+	afecmd_rd.mem_map_handle = mem_map_handle;
+
+	ret = afe_apr_send_pkt(&afecmd_rd, NULL);
+	if (ret)
+		pr_err("%s: AFE rtproxy read  cmd to port 0x%x failed %d\n",
+			   __func__, afecmd_rd.port_id, ret);
+	return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *debugfs_afelb;
+static struct dentry *debugfs_afelb_gain;
+
+static int afe_debug_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	pr_info("%s: debug intf %s\n", __func__, (char *) file->private_data);
+	return 0;
+}
+
+static int afe_get_parameters(char *buf, long int *param1, int num_of_par)
+{
+	char *token;
+	int base, cnt;
+
+	token = strsep(&buf, " ");
+
+	for (cnt = 0; cnt < num_of_par; cnt++) {
+		if (token != NULL) {
+			if ((token[1] == 'x') || (token[1] == 'X'))
+				base = 16;
+			else
+				base = 10;
+
+			if (kstrtoul(token, base, &param1[cnt]) != 0) {
+				pr_err("%s: kstrtoul failed\n",
+					__func__);
+				return -EINVAL;
+			}
+
+			token = strsep(&buf, " ");
+		} else {
+			pr_err("%s: token NULL\n", __func__);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+#define AFE_LOOPBACK_ON (1)
+#define AFE_LOOPBACK_OFF (0)
+static ssize_t afe_debug_write(struct file *filp,
+	const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+	char *lb_str = filp->private_data;
+	char lbuf[32];
+	int rc;
+	unsigned long param[5];
+
+	if (cnt > sizeof(lbuf) - 1) {
+		pr_err("%s: cnt %zd size %zd\n", __func__, cnt, sizeof(lbuf)-1);
+		return -EINVAL;
+	}
+
+	rc = copy_from_user(lbuf, ubuf, cnt);
+	if (rc) {
+		pr_err("%s: copy from user failed %d\n", __func__, rc);
+		return -EFAULT;
+	}
+
+	lbuf[cnt] = '\0';
+
+	if (!strcmp(lb_str, "afe_loopback")) {
+		rc = afe_get_parameters(lbuf, param, 3);
+		if (!rc) {
+			pr_info("%s: %lu %lu %lu\n", lb_str, param[0], param[1],
+				param[2]);
+
+			if ((param[0] != AFE_LOOPBACK_ON) && (param[0] !=
+				AFE_LOOPBACK_OFF)) {
+				pr_err("%s: Error, parameter 0 incorrect\n",
+					__func__);
+				rc = -EINVAL;
+				goto afe_error;
+			}
+			if ((q6audio_validate_port(param[1]) < 0) ||
+			    (q6audio_validate_port(param[2])) < 0) {
+				pr_err("%s: Error, invalid afe port\n",
+					__func__);
+			}
+			if (this_afe.apr == NULL) {
+				pr_err("%s: Error, AFE not opened\n", __func__);
+				rc = -EINVAL;
+			} else {
+				rc = afe_loopback(param[0], param[1], param[2]);
+			}
+		} else {
+			pr_err("%s: Error, invalid parameters\n", __func__);
+			rc = -EINVAL;
+		}
+
+	} else if (!strcmp(lb_str, "afe_loopback_gain")) {
+		rc = afe_get_parameters(lbuf, param, 2);
+		if (!rc) {
+			pr_info("%s: %s %lu %lu\n",
+				__func__, lb_str, param[0], param[1]);
+
+			rc = q6audio_validate_port(param[0]);
+			if (rc < 0) {
+				pr_err("%s: Error, invalid afe port %d %lu\n",
+					__func__, rc, param[0]);
+				rc = -EINVAL;
+				goto afe_error;
+			}
+
+			if (param[1] > 100) {
+				pr_err("%s: Error, volume shoud be 0 to 100 percentage param = %lu\n",
+					__func__, param[1]);
+				rc = -EINVAL;
+				goto afe_error;
+			}
+
+			param[1] = (Q6AFE_MAX_VOLUME * param[1]) / 100;
+
+			if (this_afe.apr == NULL) {
+				pr_err("%s: Error, AFE not opened\n", __func__);
+				rc = -EINVAL;
+			} else {
+				rc = afe_loopback_gain(param[0], param[1]);
+			}
+		} else {
+			pr_err("%s: Error, invalid parameters\n", __func__);
+			rc = -EINVAL;
+		}
+	}
+
+afe_error:
+	if (rc == 0)
+		rc = cnt;
+	else
+		pr_err("%s: rc = %d\n", __func__, rc);
+
+	return rc;
+}
+
+static const struct file_operations afe_debug_fops = {
+	.open = afe_debug_open,
+	.write = afe_debug_write
+};
+
+static void config_debug_fs_init(void)
+{
+	debugfs_afelb = debugfs_create_file("afe_loopback",
+	S_IRUGO | S_IWUSR | S_IWGRP, NULL, (void *) "afe_loopback",
+	&afe_debug_fops);
+
+	debugfs_afelb_gain = debugfs_create_file("afe_loopback_gain",
+	S_IRUGO | S_IWUSR | S_IWGRP, NULL, (void *) "afe_loopback_gain",
+	&afe_debug_fops);
+}
+static void config_debug_fs_exit(void)
+{
+	if (debugfs_afelb)
+		debugfs_remove(debugfs_afelb);
+	if (debugfs_afelb_gain)
+		debugfs_remove(debugfs_afelb_gain);
+}
+#else
+static void config_debug_fs_init(void)
+{
+	return;
+}
+static void config_debug_fs_exit(void)
+{
+	return;
+}
+#endif
+
+void afe_set_dtmf_gen_rx_portid(u16 port_id, int set)
+{
+	if (set)
+		this_afe.dtmf_gen_rx_portid = port_id;
+	else if (this_afe.dtmf_gen_rx_portid == port_id)
+		this_afe.dtmf_gen_rx_portid = -1;
+}
+
+int afe_dtmf_generate_rx(int64_t duration_in_ms,
+			 uint16_t high_freq,
+			 uint16_t low_freq, uint16_t gain)
+{
+	int ret = 0;
+	int index = 0;
+	struct afe_dtmf_generation_command cmd_dtmf;
+
+	pr_debug("%s: DTMF AFE Gen\n", __func__);
+
+	if (afe_validate_port(this_afe.dtmf_gen_rx_portid) < 0) {
+		pr_err("%s: Failed : Invalid Port id = 0x%x\n",
+		       __func__, this_afe.dtmf_gen_rx_portid);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	if (this_afe.apr == NULL) {
+		this_afe.apr = apr_register("ADSP", "AFE", afe_callback,
+					    0xFFFFFFFF, &this_afe);
+		pr_debug("%s: Register AFE\n", __func__);
+		if (this_afe.apr == NULL) {
+			pr_err("%s: Unable to register AFE\n", __func__);
+			ret = -ENODEV;
+			return ret;
+		}
+		rtac_set_afe_handle(this_afe.apr);
+	}
+
+	pr_debug("%s: dur=%lld: hfreq=%d lfreq=%d gain=%d portid=0x%x\n",
+		__func__,
+		duration_in_ms, high_freq, low_freq, gain,
+		this_afe.dtmf_gen_rx_portid);
+
+	cmd_dtmf.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	cmd_dtmf.hdr.pkt_size = sizeof(cmd_dtmf);
+	cmd_dtmf.hdr.src_port = 0;
+	cmd_dtmf.hdr.dest_port = 0;
+	cmd_dtmf.hdr.token = 0;
+	cmd_dtmf.hdr.opcode = AFE_PORTS_CMD_DTMF_CTL;
+	cmd_dtmf.duration_in_ms = duration_in_ms;
+	cmd_dtmf.high_freq = high_freq;
+	cmd_dtmf.low_freq = low_freq;
+	cmd_dtmf.gain = gain;
+	cmd_dtmf.num_ports = 1;
+	cmd_dtmf.port_ids = q6audio_get_port_id(this_afe.dtmf_gen_rx_portid);
+
+	atomic_set(&this_afe.state, 1);
+	atomic_set(&this_afe.status, 0);
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &cmd_dtmf);
+	if (ret < 0) {
+		pr_err("%s: AFE DTMF failed for num_ports:%d ids:0x%x\n",
+		       __func__, cmd_dtmf.num_ports, cmd_dtmf.port_ids);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	index = q6audio_get_port_index(this_afe.dtmf_gen_rx_portid);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	ret = wait_event_timeout(this_afe.wait[index],
+		(atomic_read(&this_afe.state) == 0),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	if (atomic_read(&this_afe.status) > 0) {
+		pr_err("%s: config cmd failed [%s]\n",
+			__func__, adsp_err_get_err_str(
+			atomic_read(&this_afe.status)));
+		ret = adsp_err_get_lnx_err_code(
+				atomic_read(&this_afe.status));
+		goto fail_cmd;
+	}
+	return 0;
+
+fail_cmd:
+	pr_err("%s: failed %d\n", __func__, ret);
+	return ret;
+}
+
+static int afe_sidetone_iir(u16 tx_port_id)
+{
+	struct afe_loopback_iir_cfg_v2 iir_sidetone;
+	int ret;
+	int index = 0;
+	uint16_t size = 0;
+	int cal_index = AFE_SIDETONE_IIR_CAL;
+	int iir_pregain = 0;
+	int iir_num_biquad_stages = 0;
+	int iir_enable;
+	struct cal_block_data *cal_block;
+	int mid;
+
+	memset(&iir_sidetone, 0, sizeof(iir_sidetone));
+	index = q6audio_get_port_index(tx_port_id);
+	iir_sidetone.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				     APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	iir_sidetone.hdr.pkt_size = sizeof(iir_sidetone);
+	iir_sidetone.hdr.src_port = 0;
+	iir_sidetone.hdr.dest_port = 0;
+	iir_sidetone.hdr.token = index;
+	iir_sidetone.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+	iir_sidetone.param.port_id = tx_port_id;
+	iir_sidetone.param.payload_address_lsw = 0x00;
+	iir_sidetone.param.payload_address_msw = 0x00;
+	iir_sidetone.param.mem_map_handle = 0x00;
+
+	if (this_afe.cal_data[cal_index] == NULL) {
+		pr_err("%s: cal data is NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+	mutex_lock(&this_afe.cal_data[cal_index]->lock);
+	cal_block = cal_utils_get_only_cal_block(this_afe.cal_data[cal_index]);
+	if (cal_block == NULL) {
+		pr_err("%s: cal_block not found\n ", __func__);
+		mutex_unlock(&this_afe.cal_data[cal_index]->lock);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	iir_pregain = ((struct audio_cal_info_sidetone_iir *)
+			cal_block->cal_info)->pregain;
+	iir_enable = ((struct audio_cal_info_sidetone_iir *)
+			cal_block->cal_info)->iir_enable;
+	iir_num_biquad_stages = ((struct audio_cal_info_sidetone_iir *)
+			cal_block->cal_info)->num_biquad_stages;
+	mid = ((struct audio_cal_info_sidetone_iir *)
+			cal_block->cal_info)->mid;
+
+	/*
+	 * calculate the actual size of payload based on no of stages
+	 * enabled in calibration
+	 */
+	size = (MAX_SIDETONE_IIR_DATA_SIZE / MAX_NO_IIR_FILTER_STAGE) *
+		iir_num_biquad_stages;
+	/*
+	 * For an odd number of stages, 2 bytes of padding are
+	 * required at the end of the payload.
+	 */
+	if (iir_num_biquad_stages % 2) {
+		pr_debug("%s: adding 2 to size:%d\n", __func__, size);
+		size = size + 2;
+	}
+	memcpy(&iir_sidetone.st_iir_filter_config_data.iir_config,
+		&((struct audio_cal_info_sidetone_iir *)
+		cal_block->cal_info)->iir_config,
+		sizeof(iir_sidetone.st_iir_filter_config_data.iir_config));
+	mutex_unlock(&this_afe.cal_data[cal_index]->lock);
+
+	/*
+	 * Calculate the payload size for setparams command
+	 */
+	iir_sidetone.param.payload_size = (sizeof(iir_sidetone) -
+				sizeof(struct apr_hdr) -
+				sizeof(struct afe_port_cmd_set_param_v2) -
+				(MAX_SIDETONE_IIR_DATA_SIZE - size));
+
+	pr_debug("%s: payload size :%d\n", __func__,
+		 iir_sidetone.param.payload_size);
+
+	/*
+	 * Set IIR enable params
+	 */
+	iir_sidetone.st_iir_enable_pdata.module_id = mid;
+	iir_sidetone.st_iir_enable_pdata.param_id =
+			AFE_PARAM_ID_ENABLE;
+	iir_sidetone.st_iir_enable_pdata.param_size =
+			sizeof(iir_sidetone.st_iir_mode_enable_data);
+	iir_sidetone.st_iir_mode_enable_data.enable = iir_enable;
+
+	/*
+	 * Set IIR filter config params
+	 */
+	iir_sidetone.st_iir_filter_config_pdata.module_id = mid;
+	iir_sidetone.st_iir_filter_config_pdata.param_id =
+			AFE_PARAM_ID_SIDETONE_IIR_FILTER_CONFIG;
+	iir_sidetone.st_iir_filter_config_pdata.param_size =
+		sizeof(iir_sidetone.st_iir_filter_config_data.num_biquad_stages)
+		+
+		sizeof(iir_sidetone.st_iir_filter_config_data.pregain) + size;
+	iir_sidetone.st_iir_filter_config_pdata.reserved = 0;
+	iir_sidetone.st_iir_filter_config_data.num_biquad_stages =
+			iir_num_biquad_stages;
+	iir_sidetone.st_iir_filter_config_data.pregain = iir_pregain;
+	pr_debug("%s: tx(0x%x)mid(0x%x)iir_en(%d)stg(%d)gain(0x%x)size(%d)\n",
+		  __func__, tx_port_id, mid,
+		  iir_sidetone.st_iir_mode_enable_data.enable,
+		  iir_sidetone.st_iir_filter_config_data.num_biquad_stages,
+		  iir_sidetone.st_iir_filter_config_data.pregain,
+		  iir_sidetone.st_iir_filter_config_pdata.param_size);
+	ret = afe_apr_send_pkt(&iir_sidetone, &this_afe.wait[index]);
+	if (ret)
+		pr_err("%s: AFE sidetone failed for tx_port(0x%x)\n",
+			 __func__, tx_port_id);
+
+done:
+	return ret;
+
+}
+
+static int afe_sidetone(u16 tx_port_id, u16 rx_port_id, bool enable)
+{
+	struct afe_st_loopback_cfg_v1 cmd_sidetone;
+	int ret;
+	int index;
+	int cal_index = AFE_SIDETONE_CAL;
+	int sidetone_gain;
+	int sidetone_enable;
+	struct cal_block_data *cal_block;
+	int mid = 0;
+
+	memset(&cmd_sidetone, 0, sizeof(cmd_sidetone));
+	if (this_afe.cal_data[cal_index] == NULL) {
+		pr_err("%s: cal data is NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+	mutex_lock(&this_afe.cal_data[cal_index]->lock);
+	cal_block = cal_utils_get_only_cal_block(this_afe.cal_data[cal_index]);
+	if (cal_block == NULL) {
+		pr_err("%s: cal_block not found\n", __func__);
+		mutex_unlock(&this_afe.cal_data[cal_index]->lock);
+		ret = -EINVAL;
+		goto done;
+	}
+	sidetone_gain = ((struct audio_cal_info_sidetone *)
+			 cal_block->cal_info)->gain;
+	sidetone_enable = ((struct audio_cal_info_sidetone *)
+			 cal_block->cal_info)->enable;
+	mid = ((struct audio_cal_info_sidetone *)
+			 cal_block->cal_info)->mid;
+	mutex_unlock(&this_afe.cal_data[cal_index]->lock);
+
+	index = q6audio_get_port_index(tx_port_id);
+	cmd_sidetone.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	cmd_sidetone.hdr.pkt_size = sizeof(cmd_sidetone);
+	cmd_sidetone.hdr.src_port = 0;
+	cmd_sidetone.hdr.dest_port = 0;
+	cmd_sidetone.hdr.token = index;
+	cmd_sidetone.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+	cmd_sidetone.param.port_id = tx_port_id;
+	cmd_sidetone.param.payload_size = (sizeof(cmd_sidetone) -
+			sizeof(struct apr_hdr) -
+			sizeof(struct afe_port_cmd_set_param_v2));
+	cmd_sidetone.param.payload_address_lsw = 0x00;
+	cmd_sidetone.param.payload_address_msw = 0x00;
+	cmd_sidetone.param.mem_map_handle = 0x00;
+	cmd_sidetone.gain_pdata.module_id = AFE_MODULE_LOOPBACK;
+	cmd_sidetone.gain_pdata.param_id = AFE_PARAM_ID_LOOPBACK_GAIN_PER_PATH;
+	/*
+	 * size of actual payload only
+	 */
+	cmd_sidetone.gain_pdata.param_size = sizeof(
+					     struct afe_loopback_sidetone_gain);
+	cmd_sidetone.gain_data.rx_port_id = rx_port_id;
+	cmd_sidetone.gain_data.gain = sidetone_gain;
+
+	cmd_sidetone.cfg_pdata.module_id = AFE_MODULE_LOOPBACK;
+	cmd_sidetone.cfg_pdata.param_id = AFE_PARAM_ID_LOOPBACK_CONFIG;
+	/*
+	 * size of actual payload only
+	 */
+	cmd_sidetone.cfg_pdata.param_size = sizeof(struct loopback_cfg_data);
+	cmd_sidetone.cfg_data.loopback_cfg_minor_version =
+					AFE_API_VERSION_LOOPBACK_CONFIG;
+	cmd_sidetone.cfg_data.dst_port_id = rx_port_id;
+	cmd_sidetone.cfg_data.routing_mode = LB_MODE_SIDETONE;
+	cmd_sidetone.cfg_data.enable = enable;
+
+	pr_debug("%s rx(0x%x) tx(0x%x) enable(%d) mid(0x%x) gain(%d) sidetone_enable(%d)\n",
+		  __func__, rx_port_id, tx_port_id,
+		  enable, mid, sidetone_gain, sidetone_enable);
+
+	ret = afe_apr_send_pkt(&cmd_sidetone, &this_afe.wait[index]);
+	if (ret)
+		pr_err("%s: AFE sidetone send failed for tx_port:%d rx_port:%d ret:%d\n",
+			__func__, tx_port_id, rx_port_id, ret);
+done:
+	return ret;
+}
+
+int afe_sidetone_enable(u16 tx_port_id, u16 rx_port_id, bool enable)
+{
+	int ret;
+	int index;
+
+	index = q6audio_get_port_index(rx_port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		ret = -EINVAL;
+		goto done;
+	}
+	if (q6audio_validate_port(rx_port_id) < 0) {
+		pr_err("%s: Invalid port 0x%x\n",
+				__func__, rx_port_id);
+		ret = -EINVAL;
+		goto done;
+	}
+	index = q6audio_get_port_index(tx_port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		ret = -EINVAL;
+		goto done;
+	}
+	if (q6audio_validate_port(tx_port_id) < 0) {
+		pr_err("%s: Invalid port 0x%x\n",
+				__func__, tx_port_id);
+		ret = -EINVAL;
+		goto done;
+	}
+	if (enable) {
+		ret = afe_sidetone_iir(tx_port_id);
+		if (ret)
+			goto done;
+	}
+
+	ret = afe_sidetone(tx_port_id, rx_port_id, enable);
+
+done:
+	return ret;
+}
+
+int afe_validate_port(u16 port_id)
+{
+	int ret;
+
+	switch (port_id) {
+	case PRIMARY_I2S_RX:
+	case PRIMARY_I2S_TX:
+	case AFE_PORT_ID_PRIMARY_PCM_RX:
+	case AFE_PORT_ID_PRIMARY_PCM_TX:
+	case AFE_PORT_ID_SECONDARY_PCM_RX:
+	case AFE_PORT_ID_SECONDARY_PCM_TX:
+	case AFE_PORT_ID_TERTIARY_PCM_RX:
+	case AFE_PORT_ID_TERTIARY_PCM_TX:
+	case AFE_PORT_ID_QUATERNARY_PCM_RX:
+	case AFE_PORT_ID_QUATERNARY_PCM_TX:
+	case SECONDARY_I2S_RX:
+	case SECONDARY_I2S_TX:
+	case MI2S_RX:
+	case MI2S_TX:
+	case HDMI_RX:
+	case DISPLAY_PORT_RX:
+	case AFE_PORT_ID_SPDIF_RX:
+	case RSVD_2:
+	case RSVD_3:
+	case DIGI_MIC_TX:
+	case VOICE_RECORD_RX:
+	case VOICE_RECORD_TX:
+	case VOICE_PLAYBACK_TX:
+	case VOICE2_PLAYBACK_TX:
+	case SLIMBUS_0_RX:
+	case SLIMBUS_0_TX:
+	case SLIMBUS_1_RX:
+	case SLIMBUS_1_TX:
+	case SLIMBUS_2_RX:
+	case SLIMBUS_2_TX:
+	case SLIMBUS_3_RX:
+	case INT_BT_SCO_RX:
+	case INT_BT_SCO_TX:
+	case INT_BT_A2DP_RX:
+	case INT_FM_RX:
+	case INT_FM_TX:
+	case RT_PROXY_PORT_001_RX:
+	case RT_PROXY_PORT_001_TX:
+	case SLIMBUS_4_RX:
+	case SLIMBUS_4_TX:
+	case SLIMBUS_5_RX:
+	case SLIMBUS_6_RX:
+	case SLIMBUS_6_TX:
+	case SLIMBUS_7_RX:
+	case SLIMBUS_7_TX:
+	case SLIMBUS_8_RX:
+	case SLIMBUS_8_TX:
+	case AFE_PORT_ID_USB_RX:
+	case AFE_PORT_ID_USB_TX:
+	case AFE_PORT_ID_PRIMARY_MI2S_RX:
+	case AFE_PORT_ID_PRIMARY_MI2S_TX:
+	case AFE_PORT_ID_SECONDARY_MI2S_RX:
+	case AFE_PORT_ID_SECONDARY_MI2S_TX:
+	case AFE_PORT_ID_QUATERNARY_MI2S_RX:
+	case AFE_PORT_ID_QUATERNARY_MI2S_TX:
+	case AFE_PORT_ID_TERTIARY_MI2S_RX:
+	case AFE_PORT_ID_TERTIARY_MI2S_TX:
+	case AFE_PORT_ID_QUINARY_MI2S_RX:
+	case AFE_PORT_ID_QUINARY_MI2S_TX:
+	case AFE_PORT_ID_SENARY_MI2S_TX:
+	case AFE_PORT_ID_PRIMARY_TDM_RX:
+	case AFE_PORT_ID_PRIMARY_TDM_TX:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_1:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_1:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_2:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_2:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_3:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_3:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_4:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_4:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_5:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_5:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_6:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_6:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_7:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_7:
+	case AFE_PORT_ID_SECONDARY_TDM_RX:
+	case AFE_PORT_ID_SECONDARY_TDM_TX:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_1:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_1:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_2:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_2:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_3:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_3:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_4:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_4:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_5:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_5:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_6:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_6:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_7:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_7:
+	case AFE_PORT_ID_TERTIARY_TDM_RX:
+	case AFE_PORT_ID_TERTIARY_TDM_TX:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_1:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_1:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_2:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_2:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_3:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_3:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_4:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_4:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_5:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_5:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_6:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_6:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_7:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_7:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_1:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_1:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_2:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_2:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_3:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_3:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_4:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_4:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_5:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_5:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_6:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_6:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_7:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_7:
+	case AFE_PORT_ID_INT0_MI2S_RX:
+	case AFE_PORT_ID_INT1_MI2S_RX:
+	case AFE_PORT_ID_INT2_MI2S_RX:
+	case AFE_PORT_ID_INT3_MI2S_RX:
+	case AFE_PORT_ID_INT4_MI2S_RX:
+	case AFE_PORT_ID_INT5_MI2S_RX:
+	case AFE_PORT_ID_INT6_MI2S_RX:
+	case AFE_PORT_ID_INT0_MI2S_TX:
+	case AFE_PORT_ID_INT1_MI2S_TX:
+	case AFE_PORT_ID_INT2_MI2S_TX:
+	case AFE_PORT_ID_INT3_MI2S_TX:
+	case AFE_PORT_ID_INT4_MI2S_TX:
+	case AFE_PORT_ID_INT5_MI2S_TX:
+	case AFE_PORT_ID_INT6_MI2S_TX:
+	{
+		ret = 0;
+		break;
+	}
+
+	default:
+		pr_err("%s: default ret 0x%x\n", __func__, port_id);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+int afe_convert_virtual_to_portid(u16 port_id)
+{
+	int ret;
+
+	/*
+	 * if port_id is virtual, convert to physical..
+	 * if port_id is already physical, return physical
+	 */
+	if (afe_validate_port(port_id) < 0) {
+		if (port_id == RT_PROXY_DAI_001_RX ||
+		    port_id == RT_PROXY_DAI_001_TX ||
+		    port_id == RT_PROXY_DAI_002_RX ||
+		    port_id == RT_PROXY_DAI_002_TX) {
+			ret = VIRTUAL_ID_TO_PORTID(port_id);
+		} else {
+			pr_err("%s: wrong port 0x%x\n",
+				__func__, port_id);
+			ret = -EINVAL;
+		}
+	} else
+		ret = port_id;
+
+	return ret;
+}
+int afe_port_stop_nowait(int port_id)
+{
+	struct afe_port_cmd_device_stop stop;
+	int ret = 0;
+
+	if (this_afe.apr == NULL) {
+		pr_err("%s: AFE is already closed\n", __func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	pr_debug("%s: port_id = 0x%x\n", __func__, port_id);
+	port_id = q6audio_convert_virtual_to_portid(port_id);
+
+	stop.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	stop.hdr.pkt_size = sizeof(stop);
+	stop.hdr.src_port = 0;
+	stop.hdr.dest_port = 0;
+	stop.hdr.token = 0;
+	stop.hdr.opcode = AFE_PORT_CMD_DEVICE_STOP;
+	stop.port_id = port_id;
+	stop.reserved = 0;
+
+	ret = afe_apr_send_pkt(&stop, NULL);
+	if (ret)
+		pr_err("%s: AFE close failed %d\n", __func__, ret);
+
+fail_cmd:
+	return ret;
+
+}
+
+int afe_close(int port_id)
+{
+	struct afe_port_cmd_device_stop stop;
+	enum afe_mad_type mad_type;
+	int ret = 0;
+	int index = 0;
+	uint16_t port_index;
+
+	if (this_afe.apr == NULL) {
+		pr_err("%s: AFE is already closed\n", __func__);
+		if ((port_id == RT_PROXY_DAI_001_RX) ||
+		    (port_id == RT_PROXY_DAI_002_TX))
+			pcm_afe_instance[port_id & 0x1] = 0;
+		if ((port_id == RT_PROXY_DAI_002_RX) ||
+		    (port_id == RT_PROXY_DAI_001_TX))
+			proxy_afe_instance[port_id & 0x1] = 0;
+		afe_close_done[port_id & 0x1] = true;
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	pr_debug("%s: port_id = 0x%x\n", __func__, port_id);
+	if ((port_id == RT_PROXY_DAI_001_RX) ||
+			(port_id == RT_PROXY_DAI_002_TX)) {
+		pr_debug("%s: before decrementing pcm_afe_instance %d\n",
+			__func__, pcm_afe_instance[port_id & 0x1]);
+		port_id = VIRTUAL_ID_TO_PORTID(port_id);
+		pcm_afe_instance[port_id & 0x1]--;
+		if ((!(pcm_afe_instance[port_id & 0x1] == 0 &&
+			proxy_afe_instance[port_id & 0x1] == 0)) ||
+			afe_close_done[port_id & 0x1] == true)
+			return 0;
+		else
+			afe_close_done[port_id & 0x1] = true;
+	}
+
+	if ((port_id == RT_PROXY_DAI_002_RX) ||
+		(port_id == RT_PROXY_DAI_001_TX)) {
+		pr_debug("%s: before decrementing proxy_afe_instance %d\n",
+			__func__, proxy_afe_instance[port_id & 0x1]);
+		port_id = VIRTUAL_ID_TO_PORTID(port_id);
+		proxy_afe_instance[port_id & 0x1]--;
+		if ((!(pcm_afe_instance[port_id & 0x1] == 0 &&
+			proxy_afe_instance[port_id & 0x1] == 0)) ||
+			afe_close_done[port_id & 0x1] == true)
+			return 0;
+		else
+			afe_close_done[port_id & 0x1] = true;
+	}
+
+	port_id = q6audio_convert_virtual_to_portid(port_id);
+	index = q6audio_get_port_index(port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		return -EINVAL;
+	}
+	ret = q6audio_validate_port(port_id);
+	if (ret < 0) {
+		pr_warn("%s: Not a valid port id 0x%x ret %d\n",
+			__func__, port_id, ret);
+		return -EINVAL;
+	}
+
+	mad_type = afe_port_get_mad_type(port_id);
+	pr_debug("%s: port_id 0x%x, mad_type %d\n", __func__, port_id,
+		 mad_type);
+	if (mad_type != MAD_HW_NONE && mad_type != MAD_SW_AUDIO) {
+		pr_debug("%s: Turn off MAD\n", __func__);
+		ret = afe_turn_onoff_hw_mad(mad_type, false);
+		if (ret) {
+			pr_err("%s: afe_turn_onoff_hw_mad failed %d\n",
+			       __func__, ret);
+			return ret;
+		}
+	} else {
+		pr_debug("%s: Not a MAD port\n", __func__);
+	}
+
+	port_index = afe_get_port_index(port_id);
+	if ((port_index >= 0) && (port_index < AFE_MAX_PORTS)) {
+		this_afe.afe_sample_rates[port_index] = 0;
+		this_afe.topology[port_index] = 0;
+		this_afe.dev_acdb_id[port_index] = 0;
+	} else {
+		pr_err("%s: port %d\n", __func__, port_index);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	if ((port_id == this_afe.aanc_info.aanc_tx_port) &&
+	    (this_afe.aanc_info.aanc_active)) {
+		memset(&this_afe.aanc_info, 0x00, sizeof(this_afe.aanc_info));
+		ret = afe_aanc_mod_enable(this_afe.apr, port_id, 0);
+		if (ret)
+			pr_err("%s: AFE mod disable failed %d\n",
+				__func__, ret);
+	}
+
+	/*
+	 * even if ramp down configuration failed it is not serious enough to
+	 * warrant bailaing out.
+	 */
+	if (afe_spk_ramp_dn_cfg(port_id) < 0)
+		pr_err("%s: ramp down configuration failed\n", __func__);
+
+	stop.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	stop.hdr.pkt_size = sizeof(stop);
+	stop.hdr.src_port = 0;
+	stop.hdr.dest_port = 0;
+	stop.hdr.token = index;
+	stop.hdr.opcode = AFE_PORT_CMD_DEVICE_STOP;
+	stop.port_id = q6audio_get_port_id(port_id);
+	stop.reserved = 0;
+
+	ret = afe_apr_send_pkt(&stop, &this_afe.wait[index]);
+	if (ret)
+		pr_err("%s: AFE close failed %d\n", __func__, ret);
+
+fail_cmd:
+	return ret;
+}
+
+int afe_set_digital_codec_core_clock(u16 port_id,
+				struct afe_digital_clk_cfg *cfg)
+{
+	struct afe_lpass_digital_clk_config_command clk_cfg;
+	int index = 0;
+	int ret = 0;
+
+	if (!cfg) {
+		pr_err("%s: clock cfg is NULL\n", __func__);
+		ret = -EINVAL;
+		return ret;
+	}
+
+	ret = afe_q6_interface_prepare();
+	if (ret != 0) {
+		pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+		return ret;
+	}
+
+	clk_cfg.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	clk_cfg.hdr.pkt_size = sizeof(clk_cfg);
+	clk_cfg.hdr.src_port = 0;
+	clk_cfg.hdr.dest_port = 0;
+	clk_cfg.hdr.token = index;
+
+	clk_cfg.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+	/*default rx port is taken to enable the codec digital clock*/
+	clk_cfg.param.port_id = q6audio_get_port_id(port_id);
+	clk_cfg.param.payload_size = sizeof(clk_cfg) - sizeof(struct apr_hdr)
+						- sizeof(clk_cfg.param);
+	clk_cfg.param.payload_address_lsw = 0x00;
+	clk_cfg.param.payload_address_msw = 0x00;
+	clk_cfg.param.mem_map_handle = 0x00;
+	clk_cfg.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+	clk_cfg.pdata.param_id = AFE_PARAM_ID_INTERNAL_DIGIATL_CDC_CLK_CONFIG;
+	clk_cfg.pdata.param_size =  sizeof(clk_cfg.clk_cfg);
+	clk_cfg.clk_cfg = *cfg;
+
+	pr_debug("%s: Minor version =0x%x clk val = %d\n"
+		 "clk root = 0x%x resrv = 0x%x\n",
+		 __func__, cfg->i2s_cfg_minor_version,
+		 cfg->clk_val, cfg->clk_root, cfg->reserved);
+
+	atomic_set(&this_afe.state, 1);
+	atomic_set(&this_afe.status, 0);
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &clk_cfg);
+	if (ret < 0) {
+		pr_err("%s: AFE enable for port 0x%x ret %d\n",
+		       __func__, port_id, ret);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	ret = wait_event_timeout(this_afe.wait[index],
+			(atomic_read(&this_afe.state) == 0),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	if (atomic_read(&this_afe.status) > 0) {
+		pr_err("%s: config cmd failed [%s]\n",
+			__func__, adsp_err_get_err_str(
+			atomic_read(&this_afe.status)));
+		ret = adsp_err_get_lnx_err_code(
+				atomic_read(&this_afe.status));
+		goto fail_cmd;
+	}
+
+fail_cmd:
+	return ret;
+}
+
+int afe_set_lpass_clock(u16 port_id, struct afe_clk_cfg *cfg)
+{
+	struct afe_lpass_clk_config_command clk_cfg;
+	int index = 0;
+	int ret = 0;
+
+	if (!cfg) {
+		pr_err("%s: clock cfg is NULL\n", __func__);
+		ret = -EINVAL;
+		return ret;
+	}
+	index = q6audio_get_port_index(port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		return -EINVAL;
+	}
+	ret = q6audio_is_digital_pcm_interface(port_id);
+	if (ret < 0) {
+		pr_err("%s: q6audio_is_digital_pcm_interface fail %d\n",
+			__func__, ret);
+		return -EINVAL;
+	}
+
+	ret = afe_q6_interface_prepare();
+	if (ret != 0) {
+		pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+		return ret;
+	}
+
+	mutex_lock(&this_afe.afe_cmd_lock);
+	clk_cfg.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	clk_cfg.hdr.pkt_size = sizeof(clk_cfg);
+	clk_cfg.hdr.src_port = 0;
+	clk_cfg.hdr.dest_port = 0;
+	clk_cfg.hdr.token = index;
+
+	clk_cfg.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+	clk_cfg.param.port_id = q6audio_get_port_id(port_id);
+	clk_cfg.param.payload_size = sizeof(clk_cfg) - sizeof(struct apr_hdr)
+						- sizeof(clk_cfg.param);
+	clk_cfg.param.payload_address_lsw = 0x00;
+	clk_cfg.param.payload_address_msw = 0x00;
+	clk_cfg.param.mem_map_handle = 0x00;
+	clk_cfg.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+	clk_cfg.pdata.param_id = AFE_PARAM_ID_LPAIF_CLK_CONFIG;
+	clk_cfg.pdata.param_size =  sizeof(clk_cfg.clk_cfg);
+	clk_cfg.clk_cfg = *cfg;
+
+	pr_debug("%s: Minor version =0x%x clk val1 = %d\n"
+		 "clk val2 = %d, clk src = 0x%x\n"
+		 "clk root = 0x%x clk mode = 0x%x resrv = 0x%x\n"
+		 "port id = 0x%x\n",
+		 __func__, cfg->i2s_cfg_minor_version,
+		 cfg->clk_val1, cfg->clk_val2, cfg->clk_src,
+		 cfg->clk_root, cfg->clk_set_mode,
+		 cfg->reserved, q6audio_get_port_id(port_id));
+
+	atomic_set(&this_afe.state, 1);
+	atomic_set(&this_afe.status, 0);
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &clk_cfg);
+	if (ret < 0) {
+		pr_err("%s: AFE enable for port 0x%x ret %d\n",
+		       __func__, port_id, ret);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	ret = wait_event_timeout(this_afe.wait[index],
+			(atomic_read(&this_afe.state) == 0),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	if (atomic_read(&this_afe.status) > 0) {
+		pr_err("%s: config cmd failed [%s]\n",
+			__func__, adsp_err_get_err_str(
+			atomic_read(&this_afe.status)));
+		ret = adsp_err_get_lnx_err_code(
+				atomic_read(&this_afe.status));
+		goto fail_cmd;
+	}
+
+fail_cmd:
+	mutex_unlock(&this_afe.afe_cmd_lock);
+	return ret;
+}
+
+int afe_set_lpass_clk_cfg(int index, struct afe_clk_set *cfg)
+{
+	struct afe_lpass_clk_config_command_v2 clk_cfg;
+	int ret = 0;
+
+	if (!cfg) {
+		pr_err("%s: clock cfg is NULL\n", __func__);
+		ret = -EINVAL;
+		return ret;
+	}
+
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: index[%d] invalid!\n", __func__, index);
+		return -EINVAL;
+	}
+
+	ret = afe_q6_interface_prepare();
+	if (ret != 0) {
+		pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+		return ret;
+	}
+
+	mutex_lock(&this_afe.afe_cmd_lock);
+	clk_cfg.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	clk_cfg.hdr.pkt_size = sizeof(clk_cfg);
+	clk_cfg.hdr.src_port = 0;
+	clk_cfg.hdr.dest_port = 0;
+	clk_cfg.hdr.token = index;
+
+	clk_cfg.hdr.opcode = AFE_SVC_CMD_SET_PARAM;
+	clk_cfg.param.payload_size = sizeof(clk_cfg) - sizeof(struct apr_hdr)
+						- sizeof(clk_cfg.param);
+	clk_cfg.param.payload_address_lsw = 0x00;
+	clk_cfg.param.payload_address_msw = 0x00;
+	clk_cfg.param.mem_map_handle = 0x00;
+	clk_cfg.pdata.module_id = AFE_MODULE_CLOCK_SET;
+	clk_cfg.pdata.param_id = AFE_PARAM_ID_CLOCK_SET;
+	clk_cfg.pdata.param_size =  sizeof(clk_cfg.clk_cfg);
+	clk_cfg.clk_cfg = *cfg;
+
+
+	pr_debug("%s: Minor version =0x%x clk id = %d\n"
+		 "clk freq (Hz) = %d, clk attri = 0x%x\n"
+		 "clk root = 0x%x clk enable = 0x%x\n",
+		 __func__, cfg->clk_set_minor_version,
+		 cfg->clk_id, cfg->clk_freq_in_hz, cfg->clk_attri,
+		 cfg->clk_root, cfg->enable);
+
+	atomic_set(&this_afe.state, 1);
+	atomic_set(&this_afe.status, 0);
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &clk_cfg);
+	if (ret < 0) {
+		pr_err("%s: AFE clk cfg failed with ret %d\n",
+		       __func__, ret);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	ret = wait_event_timeout(this_afe.wait[index],
+			(atomic_read(&this_afe.state) == 0),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	} else {
+		/* set ret to 0 as no timeout happened */
+		ret = 0;
+	}
+	if (atomic_read(&this_afe.status) != 0) {
+		pr_err("%s: config cmd failed\n", __func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+fail_cmd:
+	mutex_unlock(&this_afe.afe_cmd_lock);
+	return ret;
+}
+
+int afe_set_lpass_clock_v2(u16 port_id, struct afe_clk_set *cfg)
+{
+	int index = 0;
+	int ret = 0;
+
+	index = q6audio_get_port_index(port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		return -EINVAL;
+	}
+	ret = q6audio_is_digital_pcm_interface(port_id);
+	if (ret < 0) {
+		pr_err("%s: q6audio_is_digital_pcm_interface fail %d\n",
+			__func__, ret);
+		return -EINVAL;
+	}
+
+	ret = afe_set_lpass_clk_cfg(index, cfg);
+	if (ret)
+		pr_err("%s: afe_set_lpass_clk_cfg_v2 failed %d\n",
+			__func__, ret);
+
+	return ret;
+}
+
+int afe_set_lpass_internal_digital_codec_clock(u16 port_id,
+			struct afe_digital_clk_cfg *cfg)
+{
+	struct afe_lpass_digital_clk_config_command clk_cfg;
+	int index = 0;
+	int ret = 0;
+
+	if (!cfg) {
+		pr_err("%s: clock cfg is NULL\n", __func__);
+		ret = -EINVAL;
+		return ret;
+	}
+	index = q6audio_get_port_index(port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		return -EINVAL;
+	}
+	ret = q6audio_is_digital_pcm_interface(port_id);
+	if (ret < 0) {
+		pr_err("%s: q6audio_is_digital_pcm_interface fail %d\n",
+			__func__, ret);
+		return -EINVAL;
+	}
+
+	ret = afe_q6_interface_prepare();
+	if (ret != 0) {
+		pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+		return ret;
+	}
+
+	clk_cfg.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	clk_cfg.hdr.pkt_size = sizeof(clk_cfg);
+	clk_cfg.hdr.src_port = 0;
+	clk_cfg.hdr.dest_port = 0;
+	clk_cfg.hdr.token = index;
+
+	clk_cfg.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+	clk_cfg.param.port_id = q6audio_get_port_id(port_id);
+	clk_cfg.param.payload_size = sizeof(clk_cfg) - sizeof(struct apr_hdr)
+						- sizeof(clk_cfg.param);
+	clk_cfg.param.payload_address_lsw = 0x00;
+	clk_cfg.param.payload_address_msw = 0x00;
+	clk_cfg.param.mem_map_handle = 0x00;
+	clk_cfg.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+	clk_cfg.pdata.param_id = AFE_PARAM_ID_INTERNAL_DIGIATL_CDC_CLK_CONFIG;
+	clk_cfg.pdata.param_size =  sizeof(clk_cfg.clk_cfg);
+	clk_cfg.clk_cfg = *cfg;
+
+	pr_debug("%s: Minor version =0x%x clk val = %d\n"
+		 "clk root = 0x%x resrv = 0x%x port id = 0x%x\n",
+		 __func__, cfg->i2s_cfg_minor_version,
+		 cfg->clk_val, cfg->clk_root, cfg->reserved,
+		 q6audio_get_port_id(port_id));
+
+	atomic_set(&this_afe.state, 1);
+	atomic_set(&this_afe.status, 0);
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &clk_cfg);
+	if (ret < 0) {
+		pr_err("%s: AFE enable for port 0x0x%x ret %d\n",
+		       __func__, port_id, ret);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	ret = wait_event_timeout(this_afe.wait[index],
+			(atomic_read(&this_afe.state) == 0),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	if (atomic_read(&this_afe.status) > 0) {
+		pr_err("%s: config cmd failed [%s]\n",
+			__func__, adsp_err_get_err_str(
+			atomic_read(&this_afe.status)));
+		ret = adsp_err_get_lnx_err_code(
+				atomic_read(&this_afe.status));
+		goto fail_cmd;
+	}
+
+fail_cmd:
+	return ret;
+}
+
+int afe_enable_lpass_core_shared_clock(u16 port_id, u32 enable)
+{
+	struct afe_lpass_core_shared_clk_config_command clk_cfg;
+	int index = 0;
+	int ret = 0;
+
+	index = q6audio_get_port_index(port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		return -EINVAL;
+	}
+	ret = q6audio_is_digital_pcm_interface(port_id);
+	if (ret < 0) {
+		pr_err("%s: q6audio_is_digital_pcm_interface fail %d\n",
+		       __func__, ret);
+		return -EINVAL;
+	}
+
+	ret = afe_q6_interface_prepare();
+	if (ret != 0) {
+		pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+		return ret;
+	}
+
+	mutex_lock(&this_afe.afe_cmd_lock);
+	clk_cfg.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	clk_cfg.hdr.pkt_size = sizeof(clk_cfg);
+	clk_cfg.hdr.src_port = 0;
+	clk_cfg.hdr.dest_port = 0;
+	clk_cfg.hdr.token = index;
+
+	clk_cfg.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+	clk_cfg.param.port_id = q6audio_get_port_id(port_id);
+	clk_cfg.param.payload_size = sizeof(clk_cfg) - sizeof(struct apr_hdr)
+						- sizeof(clk_cfg.param);
+	clk_cfg.param.payload_address_lsw = 0x00;
+	clk_cfg.param.payload_address_msw = 0x00;
+	clk_cfg.param.mem_map_handle = 0x00;
+	clk_cfg.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+	clk_cfg.pdata.param_id = AFE_PARAM_ID_LPASS_CORE_SHARED_CLOCK_CONFIG;
+	clk_cfg.pdata.param_size =  sizeof(clk_cfg.clk_cfg);
+	clk_cfg.clk_cfg.lpass_core_shared_clk_cfg_minor_version =
+				AFE_API_VERSION_LPASS_CORE_SHARED_CLK_CONFIG;
+	clk_cfg.clk_cfg.enable = enable;
+
+	pr_debug("%s: port id = %d, enable = %d\n",
+		 __func__, q6audio_get_port_id(port_id), enable);
+
+	atomic_set(&this_afe.state, 1);
+	atomic_set(&this_afe.status, 0);
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &clk_cfg);
+	if (ret < 0) {
+		pr_err("%s: AFE enable for port 0x%x ret %d\n",
+		       __func__, port_id, ret);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	ret = wait_event_timeout(this_afe.wait[index],
+			(atomic_read(&this_afe.state) == 0),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	if (atomic_read(&this_afe.status) > 0) {
+		pr_err("%s: config cmd failed [%s]\n",
+			__func__, adsp_err_get_err_str(
+			atomic_read(&this_afe.status)));
+		ret = adsp_err_get_lnx_err_code(
+				atomic_read(&this_afe.status));
+		goto fail_cmd;
+	}
+
+fail_cmd:
+	mutex_unlock(&this_afe.afe_cmd_lock);
+	return ret;
+}
+
+int q6afe_check_osr_clk_freq(u32 freq)
+{
+	int ret = 0;
+	switch (freq) {
+	case Q6AFE_LPASS_OSR_CLK_12_P288_MHZ:
+	case Q6AFE_LPASS_OSR_CLK_8_P192_MHZ:
+	case Q6AFE_LPASS_OSR_CLK_6_P144_MHZ:
+	case Q6AFE_LPASS_OSR_CLK_4_P096_MHZ:
+	case Q6AFE_LPASS_OSR_CLK_3_P072_MHZ:
+	case Q6AFE_LPASS_OSR_CLK_2_P048_MHZ:
+	case Q6AFE_LPASS_OSR_CLK_1_P536_MHZ:
+	case Q6AFE_LPASS_OSR_CLK_1_P024_MHZ:
+	case Q6AFE_LPASS_OSR_CLK_768_kHZ:
+	case Q6AFE_LPASS_OSR_CLK_512_kHZ:
+		break;
+	default:
+		pr_err("%s: deafult freq 0x%x\n",
+			__func__, freq);
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+int afe_get_sp_th_vi_ftm_data(struct afe_sp_th_vi_get_param *th_vi)
+{
+	int ret = -EINVAL;
+	int index = 0, port = SLIMBUS_4_TX;
+
+	if (!th_vi) {
+		pr_err("%s: Invalid params\n", __func__);
+		goto done;
+	}
+	if (this_afe.vi_tx_port != -1)
+		port = this_afe.vi_tx_port;
+
+	ret = q6audio_validate_port(port);
+	if (ret < 0) {
+		pr_err("%s: invalid port 0x%x ret %d\n", __func__, port, ret);
+		goto done;
+	}
+	index = q6audio_get_port_index(port);
+	if (index < 0) {
+		pr_err("%s: invalid port 0x%x, index %d\n",
+			__func__, port, index);
+		ret = -EINVAL;
+		goto done;
+	}
+	th_vi->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	th_vi->hdr.pkt_size = sizeof(*th_vi);
+	th_vi->hdr.src_port = 0;
+	th_vi->hdr.dest_port = 0;
+	th_vi->hdr.token = index;
+	th_vi->hdr.opcode =  AFE_PORT_CMD_GET_PARAM_V2;
+	th_vi->get_param.mem_map_handle = 0;
+	th_vi->get_param.module_id = AFE_MODULE_SPEAKER_PROTECTION_V2_TH_VI;
+	th_vi->get_param.param_id = AFE_PARAM_ID_SP_V2_TH_VI_FTM_PARAMS;
+	th_vi->get_param.payload_address_lsw = 0;
+	th_vi->get_param.payload_address_msw = 0;
+	th_vi->get_param.payload_size = sizeof(*th_vi)
+				- sizeof(th_vi->get_param) - sizeof(th_vi->hdr);
+	th_vi->get_param.port_id = q6audio_get_port_id(port);
+	th_vi->pdata.module_id = AFE_MODULE_SPEAKER_PROTECTION_V2_TH_VI;
+	th_vi->pdata.param_id = AFE_PARAM_ID_SP_V2_TH_VI_FTM_PARAMS;
+	th_vi->pdata.param_size = sizeof(th_vi->param);
+	atomic_set(&this_afe.status, 0);
+	atomic_set(&this_afe.state, 1);
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *)th_vi);
+	if (ret < 0) {
+		pr_err("%s: get param port 0x%x param id[0x%x]failed %d\n",
+			 __func__, port, th_vi->get_param.param_id, ret);
+		goto done;
+	}
+	ret = wait_event_timeout(this_afe.wait[index],
+				 (atomic_read(&this_afe.state) == 0),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+	if (atomic_read(&this_afe.status) > 0) {
+		pr_err("%s: config cmd failed [%s]\n",
+			__func__, adsp_err_get_err_str(
+			atomic_read(&this_afe.status)));
+		ret = adsp_err_get_lnx_err_code(atomic_read(&this_afe.status));
+		goto done;
+	}
+	memcpy(&th_vi->param , &this_afe.th_vi_resp.param,
+		sizeof(this_afe.th_vi_resp.param));
+	pr_debug("%s: DC resistance %d %d temp %d %d status %d %d\n",
+		 __func__, th_vi->param.dc_res_q24[SP_V2_SPKR_1],
+		 th_vi->param.dc_res_q24[SP_V2_SPKR_2],
+		 th_vi->param.temp_q22[SP_V2_SPKR_1],
+		 th_vi->param.temp_q22[SP_V2_SPKR_2],
+		 th_vi->param.status[SP_V2_SPKR_1],
+		 th_vi->param.status[SP_V2_SPKR_2]);
+	ret = 0;
+done:
+	return ret;
+}
+
+int afe_get_sp_ex_vi_ftm_data(struct afe_sp_ex_vi_get_param *ex_vi)
+{
+	int ret = -EINVAL;
+	int index = 0, port = SLIMBUS_4_TX;
+
+	if (!ex_vi) {
+		pr_err("%s: Invalid params\n", __func__);
+		goto done;
+	}
+	if (this_afe.vi_tx_port != -1)
+		port = this_afe.vi_tx_port;
+
+	ret = q6audio_validate_port(port);
+	if (ret < 0) {
+		pr_err("%s: invalid port 0x%x ret %d\n", __func__, port, ret);
+		goto done;
+	}
+
+	index = q6audio_get_port_index(port);
+	if (index < 0) {
+		pr_err("%s: invalid index %d port 0x%x\n", __func__,
+			index, port);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ex_vi->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	ex_vi->hdr.pkt_size = sizeof(*ex_vi);
+	ex_vi->hdr.src_port = 0;
+	ex_vi->hdr.dest_port = 0;
+	ex_vi->hdr.token = index;
+	ex_vi->hdr.opcode =  AFE_PORT_CMD_GET_PARAM_V2;
+	ex_vi->get_param.mem_map_handle = 0;
+	ex_vi->get_param.module_id = AFE_MODULE_SPEAKER_PROTECTION_V2_EX_VI;
+	ex_vi->get_param.param_id = AFE_PARAM_ID_SP_V2_EX_VI_FTM_PARAMS;
+	ex_vi->get_param.payload_address_lsw = 0;
+	ex_vi->get_param.payload_address_msw = 0;
+	ex_vi->get_param.payload_size = sizeof(*ex_vi)
+		- sizeof(ex_vi->get_param) - sizeof(ex_vi->hdr);
+	ex_vi->get_param.port_id = q6audio_get_port_id(port);
+	ex_vi->pdata.module_id = AFE_MODULE_SPEAKER_PROTECTION_V2_EX_VI;
+	ex_vi->pdata.param_id = AFE_PARAM_ID_SP_V2_EX_VI_FTM_PARAMS;
+	ex_vi->pdata.param_size = sizeof(ex_vi->param);
+	atomic_set(&this_afe.status, 0);
+	atomic_set(&this_afe.state, 1);
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *)ex_vi);
+	if (ret < 0) {
+		pr_err("%s: get param port 0x%x param id[0x%x]failed %d\n",
+			__func__, port, ex_vi->get_param.param_id, ret);
+		goto done;
+	}
+	ret = wait_event_timeout(this_afe.wait[index],
+				 (atomic_read(&this_afe.state) == 0),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+	if (atomic_read(&this_afe.status) > 0) {
+		pr_err("%s: config cmd failed [%s]\n",
+			__func__, adsp_err_get_err_str(
+			atomic_read(&this_afe.status)));
+		ret = adsp_err_get_lnx_err_code(atomic_read(&this_afe.status));
+		goto done;
+	}
+	memcpy(&ex_vi->param , &this_afe.ex_vi_resp.param,
+		sizeof(this_afe.ex_vi_resp.param));
+	pr_debug("%s: freq %d %d resistance %d %d qfactor %d %d state %d %d\n",
+		 __func__, ex_vi->param.freq_q20[SP_V2_SPKR_1],
+		 ex_vi->param.freq_q20[SP_V2_SPKR_2],
+		 ex_vi->param.resis_q24[SP_V2_SPKR_1],
+		 ex_vi->param.resis_q24[SP_V2_SPKR_2],
+		 ex_vi->param.qmct_q24[SP_V2_SPKR_1],
+		 ex_vi->param.qmct_q24[SP_V2_SPKR_2],
+		 ex_vi->param.status[SP_V2_SPKR_1],
+		 ex_vi->param.status[SP_V2_SPKR_2]);
+	ret = 0;
+done:
+	return ret;
+}
+
+int afe_get_av_dev_drift(struct afe_param_id_dev_timing_stats *timing_stats,
+			 u16 port)
+{
+	int ret = -EINVAL;
+	int index = 0;
+	struct afe_av_dev_drift_get_param av_dev_drift;
+
+	if (!timing_stats) {
+		pr_err("%s: Invalid params\n", __func__);
+		goto exit;
+	}
+
+	ret = q6audio_validate_port(port);
+	if (ret < 0) {
+		pr_err("%s: invalid port 0x%x ret %d\n", __func__, port, ret);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	index = q6audio_get_port_index(port);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: Invalid AFE port index[%d]\n",
+				__func__, index);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	memset(&av_dev_drift, 0, sizeof(struct afe_av_dev_drift_get_param));
+
+	av_dev_drift.hdr.hdr_field =
+		APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	av_dev_drift.hdr.pkt_size = sizeof(av_dev_drift);
+	av_dev_drift.hdr.src_port = 0;
+	av_dev_drift.hdr.dest_port = 0;
+	av_dev_drift.hdr.token = index;
+	av_dev_drift.hdr.opcode =  AFE_PORT_CMD_GET_PARAM_V2;
+	av_dev_drift.get_param.mem_map_handle = 0;
+	av_dev_drift.get_param.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+	av_dev_drift.get_param.param_id = AFE_PARAM_ID_DEV_TIMING_STATS;
+	av_dev_drift.get_param.payload_address_lsw = 0;
+	av_dev_drift.get_param.payload_address_msw = 0;
+	av_dev_drift.get_param.payload_size = sizeof(av_dev_drift)
+		- sizeof(av_dev_drift.get_param) - sizeof(av_dev_drift.hdr);
+	av_dev_drift.get_param.port_id = q6audio_get_port_id(port);
+	av_dev_drift.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+	av_dev_drift.pdata.param_id = AFE_PARAM_ID_DEV_TIMING_STATS;
+	av_dev_drift.pdata.param_size = sizeof(av_dev_drift.timing_stats);
+	atomic_set(&this_afe.status, 0);
+	atomic_set(&this_afe.state, 1);
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *)&av_dev_drift);
+	if (ret < 0) {
+		pr_err("%s: get param port 0x%x param id[0x%x] failed %d\n",
+			__func__, port, av_dev_drift.get_param.param_id, ret);
+		goto exit;
+	}
+
+	ret = wait_event_timeout(this_afe.wait[index],
+			(atomic_read(&this_afe.state) == 0),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	if (atomic_read(&this_afe.status) > 0) {
+		pr_err("%s: config cmd failed [%s]\n",
+				__func__, adsp_err_get_err_str(
+					atomic_read(&this_afe.status)));
+		ret = adsp_err_get_lnx_err_code(
+				atomic_read(&this_afe.status));
+		goto exit;
+	}
+
+	memcpy(timing_stats, &this_afe.av_dev_drift_resp.timing_stats,
+	       sizeof(this_afe.av_dev_drift_resp.timing_stats));
+	ret = 0;
+exit:
+	return ret;
+}
+
+int afe_spk_prot_get_calib_data(struct afe_spkr_prot_get_vi_calib *calib_resp)
+{
+	int ret = -EINVAL;
+	int index = 0, port = SLIMBUS_4_TX;
+
+	if (!calib_resp) {
+		pr_err("%s: Invalid params\n", __func__);
+		goto fail_cmd;
+	}
+	if (this_afe.vi_tx_port != -1)
+		port = this_afe.vi_tx_port;
+
+	ret = q6audio_validate_port(port);
+	if (ret < 0) {
+		pr_err("%s: invalid port 0x%x ret %d\n", __func__, port, ret);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	index = q6audio_get_port_index(port);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	calib_resp->hdr.hdr_field =
+	APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+	APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	calib_resp->hdr.pkt_size = sizeof(*calib_resp);
+	calib_resp->hdr.src_port = 0;
+	calib_resp->hdr.dest_port = 0;
+	calib_resp->hdr.token = index;
+	calib_resp->hdr.opcode =  AFE_PORT_CMD_GET_PARAM_V2;
+	calib_resp->get_param.mem_map_handle = 0;
+	calib_resp->get_param.module_id = AFE_MODULE_FB_SPKR_PROT_VI_PROC_V2;
+	calib_resp->get_param.param_id = AFE_PARAM_ID_CALIB_RES_CFG_V2;
+	calib_resp->get_param.payload_address_lsw = 0;
+	calib_resp->get_param.payload_address_msw = 0;
+	calib_resp->get_param.payload_size = sizeof(*calib_resp)
+		- sizeof(calib_resp->get_param) - sizeof(calib_resp->hdr);
+	calib_resp->get_param.port_id = q6audio_get_port_id(port);
+	calib_resp->pdata.module_id = AFE_MODULE_FB_SPKR_PROT_VI_PROC_V2;
+	calib_resp->pdata.param_id = AFE_PARAM_ID_CALIB_RES_CFG_V2;
+	calib_resp->pdata.param_size = sizeof(calib_resp->res_cfg);
+	atomic_set(&this_afe.status, 0);
+	atomic_set(&this_afe.state, 1);
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *)calib_resp);
+	if (ret < 0) {
+		pr_err("%s: get param port 0x%x param id[0x%x]failed %d\n",
+			   __func__, port, calib_resp->get_param.param_id, ret);
+		goto fail_cmd;
+	}
+	ret = wait_event_timeout(this_afe.wait[index],
+		(atomic_read(&this_afe.state) == 0),
+		msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	if (atomic_read(&this_afe.status) > 0) {
+		pr_err("%s: config cmd failed [%s]\n",
+			__func__, adsp_err_get_err_str(
+			atomic_read(&this_afe.status)));
+		ret = adsp_err_get_lnx_err_code(
+				atomic_read(&this_afe.status));
+		goto fail_cmd;
+	}
+	memcpy(&calib_resp->res_cfg , &this_afe.calib_data.res_cfg,
+		sizeof(this_afe.calib_data.res_cfg));
+	pr_info("%s: state %s resistance %d %d\n", __func__,
+			 fbsp_state[calib_resp->res_cfg.th_vi_ca_state],
+			 calib_resp->res_cfg.r0_cali_q24[SP_V2_SPKR_1],
+			 calib_resp->res_cfg.r0_cali_q24[SP_V2_SPKR_2]);
+	ret = 0;
+fail_cmd:
+	return ret;
+}
+
+int afe_spk_prot_feed_back_cfg(int src_port, int dst_port,
+	int l_ch, int r_ch, u32 enable)
+{
+	int ret = -EINVAL;
+	union afe_spkr_prot_config prot_config;
+	int index = 0;
+
+	if (!enable) {
+		pr_debug("%s: Disable Feedback tx path", __func__);
+		this_afe.vi_tx_port = -1;
+		this_afe.vi_rx_port = -1;
+		return 0;
+	}
+
+	if ((q6audio_validate_port(src_port) < 0) ||
+		(q6audio_validate_port(dst_port) < 0)) {
+		pr_err("%s: invalid ports src 0x%x dst 0x%x",
+			__func__, src_port, dst_port);
+		goto fail_cmd;
+	}
+	if (!l_ch && !r_ch) {
+		pr_err("%s: error ch values zero\n", __func__);
+		goto fail_cmd;
+	}
+	pr_debug("%s: src_port 0x%x  dst_port 0x%x l_ch %d r_ch %d\n",
+		 __func__, src_port, dst_port, l_ch, r_ch);
+	memset(&prot_config, 0, sizeof(prot_config));
+	prot_config.feedback_path_cfg.dst_portid =
+		q6audio_get_port_id(dst_port);
+	if (l_ch) {
+		prot_config.feedback_path_cfg.chan_info[index++] = 1;
+		prot_config.feedback_path_cfg.chan_info[index++] = 2;
+	}
+	if (r_ch) {
+		prot_config.feedback_path_cfg.chan_info[index++] = 3;
+		prot_config.feedback_path_cfg.chan_info[index++] = 4;
+	}
+	prot_config.feedback_path_cfg.num_channels = index;
+	pr_debug("%s no of channels: %d\n", __func__, index);
+	prot_config.feedback_path_cfg.minor_version = 1;
+	ret = afe_spk_prot_prepare(src_port, dst_port,
+			AFE_PARAM_ID_FEEDBACK_PATH_CFG, &prot_config);
+fail_cmd:
+	return ret;
+}
+
+static int get_cal_type_index(int32_t cal_type)
+{
+	int ret = -EINVAL;
+
+	switch (cal_type) {
+	case AFE_COMMON_RX_CAL_TYPE:
+		ret = AFE_COMMON_RX_CAL;
+		break;
+	case AFE_COMMON_TX_CAL_TYPE:
+		ret = AFE_COMMON_TX_CAL;
+		break;
+	case AFE_LSM_TX_CAL_TYPE:
+		ret = AFE_LSM_TX_CAL;
+		break;
+	case AFE_AANC_CAL_TYPE:
+		ret = AFE_AANC_CAL;
+		break;
+	case AFE_HW_DELAY_CAL_TYPE:
+		ret = AFE_HW_DELAY_CAL;
+		break;
+	case AFE_FB_SPKR_PROT_CAL_TYPE:
+		ret = AFE_FB_SPKR_PROT_CAL;
+		break;
+	case AFE_SIDETONE_CAL_TYPE:
+		ret = AFE_SIDETONE_CAL;
+		break;
+	case AFE_SIDETONE_IIR_CAL_TYPE:
+		ret = AFE_SIDETONE_IIR_CAL;
+		break;
+	case AFE_TOPOLOGY_CAL_TYPE:
+		ret = AFE_TOPOLOGY_CAL;
+		break;
+	case AFE_LSM_TOPOLOGY_CAL_TYPE:
+		ret = AFE_LSM_TOPOLOGY_CAL;
+		break;
+	case AFE_CUST_TOPOLOGY_CAL_TYPE:
+		ret = AFE_CUST_TOPOLOGY_CAL;
+		break;
+	default:
+		pr_err("%s: invalid cal type %d!\n", __func__, cal_type);
+	}
+	return ret;
+}
+
+int afe_alloc_cal(int32_t cal_type, size_t data_size,
+						void *data)
+{
+	int				ret = 0;
+	int				cal_index;
+
+	cal_index = get_cal_type_index(cal_type);
+	pr_debug("%s: cal_type = %d cal_index = %d\n",
+		  __func__, cal_type, cal_index);
+
+	if (cal_index < 0) {
+		pr_err("%s: could not get cal index %d!\n",
+			__func__, cal_index);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = cal_utils_alloc_cal(data_size, data,
+		this_afe.cal_data[cal_index], 0, NULL);
+	if (ret < 0) {
+		pr_err("%s: cal_utils_alloc_block failed, ret = %d, cal type = %d!\n",
+			__func__, ret, cal_type);
+		ret = -EINVAL;
+		goto done;
+	}
+done:
+	return ret;
+}
+
+static int afe_dealloc_cal(int32_t cal_type, size_t data_size,
+							void *data)
+{
+	int				ret = 0;
+	int				cal_index;
+	pr_debug("%s:\n", __func__);
+
+	cal_index = get_cal_type_index(cal_type);
+	if (cal_index < 0) {
+		pr_err("%s: could not get cal index %d!\n",
+			__func__, cal_index);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = cal_utils_dealloc_cal(data_size, data,
+		this_afe.cal_data[cal_index]);
+	if (ret < 0) {
+		pr_err("%s: cal_utils_dealloc_block failed, ret = %d, cal type = %d!\n",
+			__func__, ret, cal_type);
+		ret = -EINVAL;
+		goto done;
+	}
+done:
+	return ret;
+}
+
+static int afe_set_cal(int32_t cal_type, size_t data_size,
+						void *data)
+{
+	int				ret = 0;
+	int				cal_index;
+	pr_debug("%s:\n", __func__);
+
+	cal_index = get_cal_type_index(cal_type);
+	if (cal_index < 0) {
+		pr_err("%s: could not get cal index %d!\n",
+			__func__, cal_index);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = cal_utils_set_cal(data_size, data,
+		this_afe.cal_data[cal_index], 0, NULL);
+	if (ret < 0) {
+		pr_err("%s: cal_utils_set_cal failed, ret = %d, cal type = %d!\n",
+			__func__, ret, cal_type);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (cal_index == AFE_CUST_TOPOLOGY_CAL) {
+		mutex_lock(&this_afe.cal_data[AFE_CUST_TOPOLOGY_CAL]->lock);
+		this_afe.set_custom_topology = 1;
+		pr_debug("%s:[AFE_CUSTOM_TOPOLOGY] ret = %d, cal type = %d!\n",
+			__func__, ret, cal_type);
+		mutex_unlock(&this_afe.cal_data[AFE_CUST_TOPOLOGY_CAL]->lock);
+	}
+
+done:
+	return ret;
+}
+
+static struct cal_block_data *afe_find_hw_delay_by_path(
+			struct cal_type_data *cal_type, int path)
+{
+	struct list_head		*ptr, *next;
+	struct cal_block_data		*cal_block = NULL;
+	pr_debug("%s:\n", __func__);
+
+	list_for_each_safe(ptr, next,
+		&cal_type->cal_blocks) {
+
+		cal_block = list_entry(ptr,
+			struct cal_block_data, list);
+
+		if (((struct audio_cal_info_hw_delay *)cal_block->cal_info)
+			->path == path) {
+			return cal_block;
+		}
+	}
+	return NULL;
+}
+
+static int afe_get_cal_hw_delay(int32_t path,
+				struct audio_cal_hw_delay_entry *entry)
+{
+	int ret = 0;
+	int i;
+	struct cal_block_data		*cal_block = NULL;
+	struct audio_cal_hw_delay_data	*hw_delay_info = NULL;
+
+	pr_debug("%s:\n", __func__);
+
+	if (this_afe.cal_data[AFE_HW_DELAY_CAL] == NULL) {
+		pr_err("%s: AFE_HW_DELAY_CAL not initialized\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+	if (entry == NULL) {
+		pr_err("%s: entry is NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+	if ((path >= MAX_PATH_TYPE) || (path < 0)) {
+		pr_err("%s: bad path: %d\n",
+		       __func__, path);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	mutex_lock(&this_afe.cal_data[AFE_HW_DELAY_CAL]->lock);
+	cal_block = afe_find_hw_delay_by_path(
+		this_afe.cal_data[AFE_HW_DELAY_CAL], path);
+	if (cal_block == NULL)
+		goto unlock;
+
+	hw_delay_info = &((struct audio_cal_info_hw_delay *)
+		cal_block->cal_info)->data;
+	if (hw_delay_info->num_entries > MAX_HW_DELAY_ENTRIES) {
+		pr_err("%s: invalid num entries: %d\n",
+		       __func__, hw_delay_info->num_entries);
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	for (i = 0; i < hw_delay_info->num_entries; i++) {
+		if (hw_delay_info->entry[i].sample_rate ==
+			entry->sample_rate) {
+			entry->delay_usec = hw_delay_info->entry[i].delay_usec;
+			break;
+		}
+	}
+	if (i == hw_delay_info->num_entries) {
+		pr_err("%s: Unable to find delay for sample rate %d\n",
+		       __func__, entry->sample_rate);
+		ret = -EFAULT;
+		goto unlock;
+	}
+	pr_debug("%s: Path = %d samplerate = %u usec = %u status %d\n",
+		 __func__, path, entry->sample_rate, entry->delay_usec, ret);
+unlock:
+	mutex_unlock(&this_afe.cal_data[AFE_HW_DELAY_CAL]->lock);
+done:
+	return ret;
+}
+
+static int afe_set_cal_sp_th_vi_ftm_cfg(int32_t cal_type, size_t data_size,
+					void *data)
+{
+	int ret = 0;
+	struct audio_cal_type_sp_th_vi_ftm_cfg *cal_data = data;
+
+	if (this_afe.cal_data[AFE_FB_SPKR_PROT_TH_VI_CAL] == NULL ||
+	    cal_data == NULL ||
+	    data_size != sizeof(*cal_data))
+		goto done;
+
+	pr_debug("%s: cal_type = %d\n", __func__, cal_type);
+	mutex_lock(&this_afe.cal_data[AFE_FB_SPKR_PROT_TH_VI_CAL]->lock);
+	memcpy(&this_afe.th_ftm_cfg, &cal_data->cal_info,
+		sizeof(this_afe.th_ftm_cfg));
+	mutex_unlock(&this_afe.cal_data[AFE_FB_SPKR_PROT_TH_VI_CAL]->lock);
+done:
+	return ret;
+}
+
+static int afe_set_cal_sp_ex_vi_ftm_cfg(int32_t cal_type, size_t data_size,
+					void *data)
+{
+	int ret = 0;
+	struct audio_cal_type_sp_ex_vi_ftm_cfg *cal_data = data;
+
+	if (this_afe.cal_data[AFE_FB_SPKR_PROT_EX_VI_CAL] == NULL ||
+	    cal_data == NULL ||
+	    data_size != sizeof(*cal_data))
+		goto done;
+
+	pr_debug("%s: cal_type = %d\n", __func__, cal_type);
+	mutex_lock(&this_afe.cal_data[AFE_FB_SPKR_PROT_EX_VI_CAL]->lock);
+	memcpy(&this_afe.ex_ftm_cfg, &cal_data->cal_info,
+		sizeof(this_afe.ex_ftm_cfg));
+	mutex_unlock(&this_afe.cal_data[AFE_FB_SPKR_PROT_EX_VI_CAL]->lock);
+done:
+	return ret;
+}
+
+static int afe_set_cal_fb_spkr_prot(int32_t cal_type, size_t data_size,
+								void *data)
+{
+	int ret = 0;
+	struct audio_cal_type_fb_spk_prot_cfg	*cal_data = data;
+	pr_debug("%s:\n", __func__);
+
+	if (this_afe.cal_data[AFE_FB_SPKR_PROT_CAL] == NULL)
+		goto done;
+	if (cal_data == NULL)
+		goto done;
+	if (data_size != sizeof(*cal_data))
+		goto done;
+
+	if (cal_data->cal_info.mode == MSM_SPKR_PROT_CALIBRATION_IN_PROGRESS)
+		__pm_wakeup_event(&wl.ws, jiffies_to_msecs(WAKELOCK_TIMEOUT));
+	mutex_lock(&this_afe.cal_data[AFE_FB_SPKR_PROT_CAL]->lock);
+	memcpy(&this_afe.prot_cfg, &cal_data->cal_info,
+		sizeof(this_afe.prot_cfg));
+	mutex_unlock(&this_afe.cal_data[AFE_FB_SPKR_PROT_CAL]->lock);
+done:
+	return ret;
+}
+
+static int afe_get_cal_sp_th_vi_ftm_param(int32_t cal_type, size_t data_size,
+					  void *data)
+{
+	int i, ret = 0;
+	struct audio_cal_type_sp_th_vi_param *cal_data = data;
+	struct afe_sp_th_vi_get_param th_vi;
+
+	pr_debug("%s: cal_type = %d\n", __func__, cal_type);
+	if (this_afe.cal_data[AFE_FB_SPKR_PROT_TH_VI_CAL] == NULL ||
+	    cal_data == NULL ||
+	    data_size != sizeof(*cal_data))
+		goto done;
+
+	mutex_lock(&this_afe.cal_data[AFE_FB_SPKR_PROT_TH_VI_CAL]->lock);
+	for (i = 0; i < SP_V2_NUM_MAX_SPKRS; i++) {
+		cal_data->cal_info.status[i] = -EINVAL;
+		cal_data->cal_info.r_dc_q24[i] = -1;
+		cal_data->cal_info.temp_q22[i] = -1;
+	}
+	if (!afe_get_sp_th_vi_ftm_data(&th_vi)) {
+		for (i = 0; i < SP_V2_NUM_MAX_SPKRS; i++) {
+			pr_debug("%s: ftm param status = %d\n",
+				  __func__, th_vi.param.status[i]);
+			if (th_vi.param.status[i] == FBSP_IN_PROGRESS) {
+				cal_data->cal_info.status[i] = -EAGAIN;
+			} else if (th_vi.param.status[i] == FBSP_SUCCESS) {
+				cal_data->cal_info.status[i] = 0;
+				cal_data->cal_info.r_dc_q24[i] =
+					th_vi.param.dc_res_q24[i];
+				cal_data->cal_info.temp_q22[i] =
+					th_vi.param.temp_q22[i];
+			}
+		}
+	}
+	mutex_unlock(&this_afe.cal_data[AFE_FB_SPKR_PROT_TH_VI_CAL]->lock);
+done:
+	return ret;
+}
+
+static int afe_get_cal_sp_ex_vi_ftm_param(int32_t cal_type, size_t data_size,
+					  void *data)
+{
+	int i, ret = 0;
+	struct audio_cal_type_sp_ex_vi_param *cal_data = data;
+	struct afe_sp_ex_vi_get_param ex_vi;
+
+	pr_debug("%s: cal_type = %d\n", __func__, cal_type);
+	if (this_afe.cal_data[AFE_FB_SPKR_PROT_EX_VI_CAL] == NULL ||
+	    cal_data == NULL ||
+	    data_size != sizeof(*cal_data))
+		goto done;
+
+	mutex_lock(&this_afe.cal_data[AFE_FB_SPKR_PROT_EX_VI_CAL]->lock);
+	for (i = 0; i < SP_V2_NUM_MAX_SPKRS; i++) {
+		cal_data->cal_info.status[i] = -EINVAL;
+		cal_data->cal_info.freq_q20[i] = -1;
+		cal_data->cal_info.resis_q24[i] = -1;
+		cal_data->cal_info.qmct_q24[i] = -1;
+	}
+	if (!afe_get_sp_ex_vi_ftm_data(&ex_vi)) {
+		for (i = 0; i < SP_V2_NUM_MAX_SPKRS; i++) {
+			pr_debug("%s: ftm param status = %d\n",
+				  __func__, ex_vi.param.status[i]);
+			if (ex_vi.param.status[i] == FBSP_IN_PROGRESS) {
+				cal_data->cal_info.status[i] = -EAGAIN;
+			} else if (ex_vi.param.status[i] == FBSP_SUCCESS) {
+				cal_data->cal_info.status[i] = 0;
+				cal_data->cal_info.freq_q20[i] =
+					ex_vi.param.freq_q20[i];
+				cal_data->cal_info.resis_q24[i] =
+					ex_vi.param.resis_q24[i];
+				cal_data->cal_info.qmct_q24[i] =
+					ex_vi.param.qmct_q24[i];
+			}
+		}
+	}
+	mutex_unlock(&this_afe.cal_data[AFE_FB_SPKR_PROT_EX_VI_CAL]->lock);
+done:
+	return ret;
+}
+
+static int afe_get_cal_fb_spkr_prot(int32_t cal_type, size_t data_size,
+								void *data)
+{
+	int ret = 0;
+	struct audio_cal_type_fb_spk_prot_status	*cal_data = data;
+	struct afe_spkr_prot_get_vi_calib		calib_resp;
+	pr_debug("%s:\n", __func__);
+
+	if (this_afe.cal_data[AFE_FB_SPKR_PROT_CAL] == NULL)
+		goto done;
+	if (cal_data == NULL)
+		goto done;
+	if (data_size != sizeof(*cal_data))
+		goto done;
+
+	mutex_lock(&this_afe.cal_data[AFE_FB_SPKR_PROT_CAL]->lock);
+	if (this_afe.prot_cfg.mode == MSM_SPKR_PROT_CALIBRATED) {
+			cal_data->cal_info.r0[SP_V2_SPKR_1] =
+				this_afe.prot_cfg.r0[SP_V2_SPKR_1];
+			cal_data->cal_info.r0[SP_V2_SPKR_2] =
+				this_afe.prot_cfg.r0[SP_V2_SPKR_2];
+			cal_data->cal_info.status = 0;
+	} else if (this_afe.prot_cfg.mode ==
+				MSM_SPKR_PROT_CALIBRATION_IN_PROGRESS) {
+		/*Call AFE to query the status*/
+		cal_data->cal_info.status = -EINVAL;
+		cal_data->cal_info.r0[SP_V2_SPKR_1] = -1;
+		cal_data->cal_info.r0[SP_V2_SPKR_2] = -1;
+		if (!afe_spk_prot_get_calib_data(&calib_resp)) {
+			if (calib_resp.res_cfg.th_vi_ca_state ==
+							FBSP_IN_PROGRESS)
+				cal_data->cal_info.status = -EAGAIN;
+			else if (calib_resp.res_cfg.th_vi_ca_state ==
+							FBSP_SUCCESS) {
+				cal_data->cal_info.status = 0;
+				cal_data->cal_info.r0[SP_V2_SPKR_1] =
+				calib_resp.res_cfg.r0_cali_q24[SP_V2_SPKR_1];
+				cal_data->cal_info.r0[SP_V2_SPKR_2] =
+				calib_resp.res_cfg.r0_cali_q24[SP_V2_SPKR_2];
+			}
+		}
+		if (!cal_data->cal_info.status) {
+			this_afe.prot_cfg.mode =
+				MSM_SPKR_PROT_CALIBRATED;
+			this_afe.prot_cfg.r0[SP_V2_SPKR_1] =
+				cal_data->cal_info.r0[SP_V2_SPKR_1];
+			this_afe.prot_cfg.r0[SP_V2_SPKR_2] =
+				cal_data->cal_info.r0[SP_V2_SPKR_2];
+		}
+	} else {
+		/*Indicates calibration data is invalid*/
+		cal_data->cal_info.status = -EINVAL;
+		cal_data->cal_info.r0[SP_V2_SPKR_1] = -1;
+		cal_data->cal_info.r0[SP_V2_SPKR_2] = -1;
+	}
+	mutex_unlock(&this_afe.cal_data[AFE_FB_SPKR_PROT_CAL]->lock);
+	__pm_relax(&wl.ws);
+done:
+	return ret;
+}
+
+static int afe_map_cal_data(int32_t cal_type,
+				struct cal_block_data *cal_block)
+{
+	int ret = 0;
+	int cal_index;
+	pr_debug("%s:\n", __func__);
+
+	cal_index = get_cal_type_index(cal_type);
+	if (cal_index < 0) {
+		pr_err("%s: could not get cal index %d!\n",
+			__func__, cal_index);
+		ret = -EINVAL;
+		goto done;
+	}
+
+
+	mutex_lock(&this_afe.afe_cmd_lock);
+	atomic_set(&this_afe.mem_map_cal_index, cal_index);
+	ret = afe_cmd_memory_map(cal_block->cal_data.paddr,
+			cal_block->map_data.map_size);
+	atomic_set(&this_afe.mem_map_cal_index, -1);
+	if (ret < 0) {
+		pr_err("%s: mmap did not work! size = %zd ret %d\n",
+			__func__,
+			cal_block->map_data.map_size, ret);
+		pr_debug("%s: mmap did not work! addr = 0x%pK, size = %zd\n",
+			__func__,
+			&cal_block->cal_data.paddr,
+			cal_block->map_data.map_size);
+		mutex_unlock(&this_afe.afe_cmd_lock);
+		goto done;
+	}
+	cal_block->map_data.q6map_handle = atomic_read(&this_afe.
+		mem_map_cal_handles[cal_index]);
+	mutex_unlock(&this_afe.afe_cmd_lock);
+done:
+	return ret;
+}
+
+static int afe_unmap_cal_data(int32_t cal_type,
+				struct cal_block_data *cal_block)
+{
+	int ret = 0;
+	int cal_index;
+	pr_debug("%s:\n", __func__);
+
+	cal_index = get_cal_type_index(cal_type);
+	if (cal_index < 0) {
+		pr_err("%s: could not get cal index %d!\n",
+			__func__, cal_index);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (cal_block == NULL) {
+		pr_err("%s: Cal block is NULL!\n",
+						__func__);
+		goto done;
+	}
+
+	if (cal_block->map_data.q6map_handle == 0) {
+		pr_err("%s: Map handle is NULL, nothing to unmap\n",
+				__func__);
+		goto done;
+	}
+
+	atomic_set(&this_afe.mem_map_cal_handles[cal_index],
+		cal_block->map_data.q6map_handle);
+	atomic_set(&this_afe.mem_map_cal_index, cal_index);
+	ret = afe_cmd_memory_unmap_nowait(
+		cal_block->map_data.q6map_handle);
+	atomic_set(&this_afe.mem_map_cal_index, -1);
+	if (ret < 0) {
+		pr_err("%s: unmap did not work! cal_type %i ret %d\n",
+			__func__, cal_index, ret);
+	}
+	cal_block->map_data.q6map_handle = 0;
+done:
+	return ret;
+}
+
+static void afe_delete_cal_data(void)
+{
+	pr_debug("%s:\n", __func__);
+
+	cal_utils_destroy_cal_types(MAX_AFE_CAL_TYPES, this_afe.cal_data);
+
+	return;
+}
+
+static int afe_init_cal_data(void)
+{
+	int ret = 0;
+	struct cal_type_info	cal_type_info[] = {
+		{{AFE_COMMON_RX_CAL_TYPE,
+		{afe_alloc_cal, afe_dealloc_cal, NULL,
+		afe_set_cal, NULL, NULL} },
+		{afe_map_cal_data, afe_unmap_cal_data,
+		cal_utils_match_buf_num} },
+
+		{{AFE_COMMON_TX_CAL_TYPE,
+		{afe_alloc_cal, afe_dealloc_cal, NULL,
+		afe_set_cal, NULL, NULL} },
+		{afe_map_cal_data, afe_unmap_cal_data,
+		cal_utils_match_buf_num} },
+
+		{{AFE_LSM_TX_CAL_TYPE,
+		{afe_alloc_cal, afe_dealloc_cal, NULL,
+		afe_set_cal, NULL, NULL} },
+		{afe_map_cal_data, afe_unmap_cal_data,
+		cal_utils_match_buf_num} },
+
+		{{AFE_AANC_CAL_TYPE,
+		{afe_alloc_cal, afe_dealloc_cal, NULL,
+		afe_set_cal, NULL, NULL} },
+		{afe_map_cal_data, afe_unmap_cal_data,
+		cal_utils_match_buf_num} },
+
+		{{AFE_FB_SPKR_PROT_CAL_TYPE,
+		{NULL, NULL, NULL, afe_set_cal_fb_spkr_prot,
+		afe_get_cal_fb_spkr_prot, NULL} },
+		{NULL, NULL, cal_utils_match_buf_num} },
+
+		{{AFE_HW_DELAY_CAL_TYPE,
+		{NULL, NULL, NULL,
+		afe_set_cal, NULL, NULL} },
+		{NULL, NULL, cal_utils_match_buf_num} },
+
+		{{AFE_SIDETONE_CAL_TYPE,
+		{NULL, NULL, NULL,
+		afe_set_cal, NULL, NULL} },
+		{NULL, NULL, cal_utils_match_buf_num} },
+
+		{{AFE_SIDETONE_IIR_CAL_TYPE,
+		{NULL, NULL, NULL,
+		afe_set_cal, NULL, NULL} },
+		{NULL, NULL, cal_utils_match_buf_num} },
+
+		{{AFE_TOPOLOGY_CAL_TYPE,
+		{NULL, NULL, NULL,
+		afe_set_cal, NULL, NULL} },
+		{NULL, NULL,
+		cal_utils_match_buf_num} },
+
+		{{AFE_LSM_TOPOLOGY_CAL_TYPE,
+		{NULL, NULL, NULL,
+		afe_set_cal, NULL, NULL} },
+		{NULL, NULL,
+		cal_utils_match_buf_num} },
+
+		{{AFE_CUST_TOPOLOGY_CAL_TYPE,
+		{afe_alloc_cal, afe_dealloc_cal, NULL,
+		afe_set_cal, NULL, NULL} },
+		{afe_map_cal_data, afe_unmap_cal_data,
+		cal_utils_match_buf_num} },
+
+		{{AFE_FB_SPKR_PROT_TH_VI_CAL_TYPE,
+		{NULL, NULL, NULL, afe_set_cal_sp_th_vi_ftm_cfg,
+		afe_get_cal_sp_th_vi_ftm_param, NULL} },
+		{NULL, NULL, cal_utils_match_buf_num} },
+
+		{{AFE_FB_SPKR_PROT_EX_VI_CAL_TYPE,
+		{NULL, NULL, NULL, afe_set_cal_sp_ex_vi_ftm_cfg,
+		afe_get_cal_sp_ex_vi_ftm_param, NULL} },
+		{NULL, NULL, cal_utils_match_buf_num} },
+	};
+	pr_debug("%s:\n", __func__);
+
+	ret = cal_utils_create_cal_types(MAX_AFE_CAL_TYPES, this_afe.cal_data,
+		cal_type_info);
+	if (ret < 0) {
+		pr_err("%s: could not create cal type! %d\n",
+			__func__, ret);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	return ret;
+err:
+	afe_delete_cal_data();
+	return ret;
+}
+
+int afe_map_rtac_block(struct rtac_cal_block_data *cal_block)
+{
+	int	result = 0;
+	pr_debug("%s:\n", __func__);
+
+	if (cal_block == NULL) {
+		pr_err("%s: cal_block is NULL!\n",
+			__func__);
+		result = -EINVAL;
+		goto done;
+	}
+
+	if (cal_block->cal_data.paddr == 0) {
+		pr_debug("%s: No address to map!\n",
+			__func__);
+		result = -EINVAL;
+		goto done;
+	}
+
+	if (cal_block->map_data.map_size == 0) {
+		pr_debug("%s: map size is 0!\n",
+			__func__);
+		result = -EINVAL;
+		goto done;
+	}
+
+	result = afe_cmd_memory_map(cal_block->cal_data.paddr,
+		cal_block->map_data.map_size);
+	if (result < 0) {
+		pr_err("%s: afe_cmd_memory_map failed for addr = 0x%pK, size = %d, err %d\n",
+			__func__, &cal_block->cal_data.paddr,
+			cal_block->map_data.map_size, result);
+		return result;
+	}
+	cal_block->map_data.map_handle = this_afe.mmap_handle;
+
+done:
+	return result;
+}
+
+int afe_unmap_rtac_block(uint32_t *mem_map_handle)
+{
+	int	result = 0;
+	pr_debug("%s:\n", __func__);
+
+	if (mem_map_handle == NULL) {
+		pr_err("%s: Map handle is NULL, nothing to unmap\n",
+			__func__);
+		goto done;
+	}
+
+	if (*mem_map_handle == 0) {
+		pr_debug("%s: Map handle is 0, nothing to unmap\n",
+			__func__);
+		goto done;
+	}
+
+	result = afe_cmd_memory_unmap(*mem_map_handle);
+	if (result) {
+		pr_err("%s: AFE memory unmap failed %d, handle 0x%x\n",
+		     __func__, result, *mem_map_handle);
+		goto done;
+	} else {
+		*mem_map_handle = 0;
+	}
+
+done:
+	return result;
+}
+
+static int __init afe_init(void)
+{
+	int i = 0, ret;
+
+	atomic_set(&this_afe.state, 0);
+	atomic_set(&this_afe.status, 0);
+	atomic_set(&this_afe.mem_map_cal_index, -1);
+	this_afe.apr = NULL;
+	this_afe.dtmf_gen_rx_portid = -1;
+	this_afe.mmap_handle = 0;
+	this_afe.vi_tx_port = -1;
+	this_afe.vi_rx_port = -1;
+	this_afe.prot_cfg.mode = MSM_SPKR_PROT_DISABLED;
+	this_afe.th_ftm_cfg.mode = MSM_SPKR_PROT_DISABLED;
+	this_afe.ex_ftm_cfg.mode = MSM_SPKR_PROT_DISABLED;
+	mutex_init(&this_afe.afe_cmd_lock);
+	for (i = 0; i < AFE_MAX_PORTS; i++) {
+		this_afe.afe_cal_mode[i] = AFE_CAL_MODE_DEFAULT;
+		this_afe.afe_sample_rates[i] = 0;
+		this_afe.dev_acdb_id[i] = 0;
+		init_waitqueue_head(&this_afe.wait[i]);
+	}
+	wakeup_source_init(&wl.ws, "spkr-prot");
+	ret = afe_init_cal_data();
+	if (ret)
+		pr_err("%s: could not init cal data! %d\n", __func__, ret);
+
+	config_debug_fs_init();
+	return 0;
+}
+
+static void __exit afe_exit(void)
+{
+	afe_delete_cal_data();
+
+	config_debug_fs_exit();
+	mutex_destroy(&this_afe.afe_cmd_lock);
+	wakeup_source_trash(&wl.ws);
+}
+
+device_initcall(afe_init);
+__exitcall(afe_exit);
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/q6asm.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/q6asm.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/q6asm.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/q6asm.c	2019-10-29 09:26:26.161227819 +0100
@@ -0,0 +1,9927 @@
+/*
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/fs.h>
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/dma-mapping.h>
+#include <linux/miscdevice.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/msm_audio.h>
+
+#include <linux/debugfs.h>
+#include <linux/time.h>
+#include <linux/atomic.h>
+#include <linux/msm_audio_ion.h>
+#include <linux/mm.h>
+
+#include <asm/ioctls.h>
+
+#include <linux/memory.h>
+
+#include <sound/apr_audio-v2.h>
+#include <sound/q6asm-v2.h>
+#include <sound/q6core.h>
+#include <sound/q6audio-v2.h>
+#include <sound/audio_cal_utils.h>
+#include <sound/adsp_err.h>
+#include <sound/compress_params.h>
+
+#define TRUE        0x01
+#define FALSE       0x00
+#define SESSION_MAX 9
+#define ASM_MAX_CHANNELS 8
+
+enum {
+	ASM_TOPOLOGY_CAL = 0,
+	ASM_CUSTOM_TOP_CAL,
+	ASM_AUDSTRM_CAL,
+	ASM_RTAC_APR_CAL,
+	ASM_MAX_CAL_TYPES
+};
+
+union asm_token_struct {
+	struct {
+		u8 stream_id;
+		u8 session_id;
+		u8 buf_index;
+		u8 flags;
+	} _token;
+	u32 token;
+} __packed;
+
+
+enum {
+	ASM_DIRECTION_OFFSET,
+	ASM_CMD_NO_WAIT_OFFSET,
+	/*
+	 * Offset is limited to 7 because flags is stored in u8
+	 * field in asm_token_structure defined above. The offset
+	 * starts from 0.
+	 */
+	ASM_MAX_OFFSET = 7,
+};
+
+enum {
+	WAIT_CMD,
+	NO_WAIT_CMD
+};
+
+#define ASM_SET_BIT(n, x)	(n |= 1 << x)
+#define ASM_TEST_BIT(n, x)	((n >> x) & 1)
+
+/* TODO, combine them together */
+static DEFINE_MUTEX(session_lock);
+struct asm_mmap {
+	atomic_t ref_cnt;
+	void *apr;
+};
+
+static struct asm_mmap this_mmap;
+
+struct audio_session {
+	struct audio_client *ac;
+	spinlock_t session_lock;
+};
+/* session id: 0 reserved */
+static struct audio_session session[ASM_ACTIVE_STREAMS_ALLOWED + 1];
+
+struct asm_buffer_node {
+	struct list_head list;
+	phys_addr_t buf_phys_addr;
+	uint32_t  mmap_hdl;
+};
+static int32_t q6asm_srvc_callback(struct apr_client_data *data, void *priv);
+static int32_t q6asm_callback(struct apr_client_data *data, void *priv);
+static void q6asm_add_hdr(struct audio_client *ac, struct apr_hdr *hdr,
+			uint32_t pkt_size, uint32_t cmd_flg);
+static void q6asm_add_hdr_custom_topology(struct audio_client *ac,
+					  struct apr_hdr *hdr,
+					  uint32_t pkt_size);
+static void q6asm_add_hdr_async(struct audio_client *ac, struct apr_hdr *hdr,
+			uint32_t pkt_size, uint32_t cmd_flg);
+static int q6asm_memory_map_regions(struct audio_client *ac, int dir,
+				uint32_t bufsz, uint32_t bufcnt,
+				bool is_contiguous);
+static int q6asm_memory_unmap_regions(struct audio_client *ac, int dir);
+static void q6asm_reset_buf_state(struct audio_client *ac);
+
+static int q6asm_map_channels(u8 *channel_mapping, uint32_t channels,
+				bool use_back_flavor);
+void *q6asm_mmap_apr_reg(void);
+
+static int q6asm_is_valid_session(struct apr_client_data *data, void *priv);
+static int q6asm_get_asm_topology_cal(void);
+static int q6asm_get_asm_app_type_cal(void);
+
+/* for ASM custom topology */
+static struct cal_type_data *cal_data[ASM_MAX_CAL_TYPES];
+static struct audio_buffer common_buf[2];
+static struct audio_client common_client;
+static int set_custom_topology;
+static int topology_map_handle;
+
+struct generic_get_data_ {
+	int valid;
+	int is_inband;
+	int size_in_ints;
+	int ints[];
+};
+static struct generic_get_data_ *generic_get_data;
+
+#ifdef CONFIG_DEBUG_FS
+#define OUT_BUFFER_SIZE 56
+#define IN_BUFFER_SIZE 24
+
+static struct timeval out_cold_tv;
+static struct timeval out_warm_tv;
+static struct timeval out_cont_tv;
+static struct timeval in_cont_tv;
+static long out_enable_flag;
+static long in_enable_flag;
+static struct dentry *out_dentry;
+static struct dentry *in_dentry;
+static int in_cont_index;
+/*This var is used to keep track of first write done for cold output latency */
+static int out_cold_index;
+static char *out_buffer;
+static char *in_buffer;
+
+static uint32_t adsp_reg_event_opcode[] = {
+	ASM_STREAM_CMD_REGISTER_PP_EVENTS,
+	ASM_STREAM_CMD_REGISTER_ENCDEC_EVENTS,
+	ASM_STREAM_CMD_REGISTER_IEC_61937_FMT_UPDATE };
+
+static uint32_t adsp_raise_event_opcode[] = {
+	ASM_STREAM_PP_EVENT,
+	ASM_STREAM_CMD_ENCDEC_EVENTS,
+	ASM_IEC_61937_MEDIA_FMT_EVENT };
+
+static int is_adsp_reg_event(uint32_t cmd)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(adsp_reg_event_opcode); i++) {
+		if (cmd == adsp_reg_event_opcode[i])
+			return i;
+	}
+	return -EINVAL;
+}
+
+static int is_adsp_raise_event(uint32_t cmd)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(adsp_raise_event_opcode); i++) {
+		if (cmd == adsp_raise_event_opcode[i])
+			return i;
+	}
+	return -EINVAL;
+}
+
+static inline void q6asm_set_flag_in_token(union asm_token_struct *asm_token,
+					   int flag, int flag_offset)
+{
+	if (flag)
+		ASM_SET_BIT(asm_token->_token.flags, flag_offset);
+}
+
+static inline int q6asm_get_flag_from_token(union asm_token_struct *asm_token,
+					    int flag_offset)
+{
+	return ASM_TEST_BIT(asm_token->_token.flags, flag_offset);
+}
+
+static inline void q6asm_update_token(u32 *token, u8 session_id, u8 stream_id,
+				      u8 buf_index, u8 dir, u8 nowait_flag)
+{
+	union asm_token_struct asm_token;
+
+	asm_token.token = 0;
+	asm_token._token.session_id = session_id;
+	asm_token._token.stream_id = stream_id;
+	asm_token._token.buf_index = buf_index;
+	q6asm_set_flag_in_token(&asm_token, dir, ASM_DIRECTION_OFFSET);
+	q6asm_set_flag_in_token(&asm_token, nowait_flag,
+				  ASM_CMD_NO_WAIT_OFFSET);
+	*token = asm_token.token;
+}
+
+static inline uint32_t q6asm_get_pcm_format_id(uint32_t media_format_block_ver)
+{
+	uint32_t pcm_format_id;
+
+	switch (media_format_block_ver) {
+	case PCM_MEDIA_FORMAT_V4:
+		pcm_format_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V4;
+		break;
+	case PCM_MEDIA_FORMAT_V3:
+		pcm_format_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V3;
+		break;
+	case PCM_MEDIA_FORMAT_V2:
+	default:
+		pcm_format_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
+		break;
+	}
+	return pcm_format_id;
+}
+
+/*
+ * q6asm_get_buf_index_from_token:
+ *       Retrieve buffer index from token.
+ *
+ * @token: token value sent to ASM service on q6.
+ * Returns buffer index in the read/write commands.
+ */
+uint8_t q6asm_get_buf_index_from_token(uint32_t token)
+{
+	union asm_token_struct asm_token;
+
+	asm_token.token = token;
+	return asm_token._token.buf_index;
+}
+EXPORT_SYMBOL(q6asm_get_buf_index_from_token);
+
+/*
+ * q6asm_get_stream_id_from_token:
+ *       Retrieve stream id from token.
+ *
+ * @token: token value sent to ASM service on q6.
+ * Returns stream id.
+ */
+uint8_t q6asm_get_stream_id_from_token(uint32_t token)
+{
+	union asm_token_struct asm_token;
+
+	asm_token.token = token;
+	return asm_token._token.stream_id;
+}
+EXPORT_SYMBOL(q6asm_get_stream_id_from_token);
+
+static int audio_output_latency_dbgfs_open(struct inode *inode,
+							struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+static ssize_t audio_output_latency_dbgfs_read(struct file *file,
+				char __user *buf, size_t count, loff_t *ppos)
+{
+	if (out_buffer == NULL) {
+		pr_err("%s: out_buffer is null\n", __func__);
+		return 0;
+	}
+	if (count < OUT_BUFFER_SIZE) {
+		pr_err("%s: read size %d exceeds buf size %zd\n", __func__,
+						OUT_BUFFER_SIZE, count);
+		return 0;
+	}
+	snprintf(out_buffer, OUT_BUFFER_SIZE, "%ld,%ld,%ld,%ld,%ld,%ld,",\
+		out_cold_tv.tv_sec, out_cold_tv.tv_usec, out_warm_tv.tv_sec,\
+		out_warm_tv.tv_usec, out_cont_tv.tv_sec, out_cont_tv.tv_usec);
+	return  simple_read_from_buffer(buf, OUT_BUFFER_SIZE, ppos,
+						out_buffer, OUT_BUFFER_SIZE);
+}
+static ssize_t audio_output_latency_dbgfs_write(struct file *file,
+			const char __user *buf, size_t count, loff_t *ppos)
+{
+	char *temp;
+
+	if (count > 2*sizeof(char)) {
+		pr_err("%s: err count is more %zd\n", __func__, count);
+		return -EINVAL;
+	} else {
+		temp  = kmalloc(2*sizeof(char), GFP_KERNEL);
+	}
+
+	out_cold_index = 0;
+
+	if (temp) {
+		if (copy_from_user(temp, buf, 2*sizeof(char))) {
+			pr_err("%s: copy from user failed for size %zd\n",
+				__func__, 2*sizeof(char));
+			kfree(temp);
+			return -EFAULT;
+		}
+		if (!kstrtol(temp, 10, &out_enable_flag)) {
+			kfree(temp);
+			return count;
+		}
+		kfree(temp);
+	}
+	return -EINVAL;
+}
+static const struct file_operations audio_output_latency_debug_fops = {
+	.open = audio_output_latency_dbgfs_open,
+	.read = audio_output_latency_dbgfs_read,
+	.write = audio_output_latency_dbgfs_write
+};
+static int audio_input_latency_dbgfs_open(struct inode *inode,
+							struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+static ssize_t audio_input_latency_dbgfs_read(struct file *file,
+				char __user *buf, size_t count, loff_t *ppos)
+{
+	if (in_buffer == NULL) {
+		pr_err("%s: in_buffer is null\n", __func__);
+		return 0;
+	}
+	if (count < IN_BUFFER_SIZE) {
+		pr_err("%s: read size %d exceeds buf size %zd\n", __func__,
+						IN_BUFFER_SIZE, count);
+		return 0;
+	}
+	snprintf(in_buffer, IN_BUFFER_SIZE, "%ld,%ld,",\
+				in_cont_tv.tv_sec, in_cont_tv.tv_usec);
+	return  simple_read_from_buffer(buf, IN_BUFFER_SIZE, ppos,
+						in_buffer, IN_BUFFER_SIZE);
+}
+static ssize_t audio_input_latency_dbgfs_write(struct file *file,
+			const char __user *buf, size_t count, loff_t *ppos)
+{
+	char *temp;
+
+	if (count > 2*sizeof(char)) {
+		pr_err("%s: err count is more %zd\n", __func__, count);
+		return -EINVAL;
+	} else {
+		temp  = kmalloc(2*sizeof(char), GFP_KERNEL);
+	}
+	if (temp) {
+		if (copy_from_user(temp, buf, 2*sizeof(char))) {
+			pr_err("%s: copy from user failed for size %zd\n",
+				__func__, 2*sizeof(char));
+			kfree(temp);
+			return -EFAULT;
+		}
+		if (!kstrtol(temp, 10, &in_enable_flag)) {
+			kfree(temp);
+			return count;
+		}
+		kfree(temp);
+	}
+	return -EINVAL;
+}
+static const struct file_operations audio_input_latency_debug_fops = {
+	.open = audio_input_latency_dbgfs_open,
+	.read = audio_input_latency_dbgfs_read,
+	.write = audio_input_latency_dbgfs_write
+};
+
+static void config_debug_fs_write_cb(void)
+{
+	if (out_enable_flag) {
+		/* For first Write done log the time and reset
+		out_cold_index*/
+		if (out_cold_index != 1) {
+			do_gettimeofday(&out_cold_tv);
+			pr_debug("COLD: apr_send_pkt at %ld sec %ld microsec\n",
+				out_cold_tv.tv_sec,\
+				out_cold_tv.tv_usec);
+			out_cold_index = 1;
+		}
+		pr_debug("%s: out_enable_flag %ld\n",
+			__func__, out_enable_flag);
+	}
+}
+static void config_debug_fs_read_cb(void)
+{
+	if (in_enable_flag) {
+		/* when in_cont_index == 7, DSP would be
+		* writing into the 8th 512 byte buffer and this
+		* timestamp is tapped here.Once done it then writes
+		* to 9th 512 byte buffer.These two buffers(8th, 9th)
+		* reach the test application in 5th iteration and that
+		* timestamp is tapped at user level. The difference
+		* of these two timestamps gives us the time between
+		* the time at which dsp started filling the sample
+		* required and when it reached the test application.
+		* Hence continuous input latency
+		*/
+		if (in_cont_index == 7) {
+			do_gettimeofday(&in_cont_tv);
+			pr_info("%s: read buffer at %ld sec %ld microsec\n",
+				__func__,
+				in_cont_tv.tv_sec, in_cont_tv.tv_usec);
+		}
+		in_cont_index++;
+	}
+}
+
+static void config_debug_fs_reset_index(void)
+{
+	in_cont_index = 0;
+}
+
+static void config_debug_fs_run(void)
+{
+	if (out_enable_flag) {
+		do_gettimeofday(&out_cold_tv);
+		pr_debug("%s: COLD apr_send_pkt at %ld sec %ld microsec\n",
+			__func__, out_cold_tv.tv_sec, out_cold_tv.tv_usec);
+	}
+}
+
+static void config_debug_fs_write(struct audio_buffer *ab, int offset)
+{
+	if (out_enable_flag) {
+		char zero_pattern[2] = {0x00, 0x00};
+		char *data;
+
+		if ((offset < 0) || (offset > ab->size)) {
+			pr_err("Invalid offset %d", offset);
+			return;
+		}
+
+		data = (char *)ab->data + offset;
+		/* If First two byte is non zero and last two byte
+		is zero then it is warm output pattern */
+		if ((strncmp(data, zero_pattern, 2)) &&
+		(!strncmp((data + 2), zero_pattern, 2))) {
+			do_gettimeofday(&out_warm_tv);
+			pr_debug("%s: WARM:apr_send_pkt at %ld sec %ld microsec\n",
+			 __func__,
+			 out_warm_tv.tv_sec,\
+			out_warm_tv.tv_usec);
+			pr_debug("%s: Warm Pattern Matched\n", __func__);
+		}
+		/* If First two byte is zero and last two byte is
+		non zero then it is cont ouput pattern */
+		else if ((!strncmp(data, zero_pattern, 2))
+		&& (strncmp((data + 2), zero_pattern, 2))) {
+			do_gettimeofday(&out_cont_tv);
+			pr_debug("%s: CONT:apr_send_pkt at %ld sec %ld microsec\n",
+			__func__,
+			out_cont_tv.tv_sec,
+			out_cont_tv.tv_usec);
+			pr_debug("%s: Cont Pattern Matched\n", __func__);
+		}
+	}
+}
+static void config_debug_fs_init(void)
+{
+	out_buffer = kzalloc(OUT_BUFFER_SIZE, GFP_KERNEL);
+	if (out_buffer == NULL) {
+		pr_err("%s: kmalloc() for out_buffer failed\n", __func__);
+		goto outbuf_fail;
+	}
+	in_buffer = kzalloc(IN_BUFFER_SIZE, GFP_KERNEL);
+	if (in_buffer == NULL) {
+		pr_err("%s: kmalloc() for in_buffer failed\n", __func__);
+		goto inbuf_fail;
+	}
+	out_dentry = debugfs_create_file("audio_out_latency_measurement_node",\
+				S_IRUGO | S_IWUSR | S_IWGRP,\
+				NULL, NULL, &audio_output_latency_debug_fops);
+	if (IS_ERR(out_dentry)) {
+		pr_err("%s: debugfs_create_file failed\n", __func__);
+		goto file_fail;
+	}
+	in_dentry = debugfs_create_file("audio_in_latency_measurement_node",\
+				S_IRUGO | S_IWUSR | S_IWGRP,\
+				NULL, NULL, &audio_input_latency_debug_fops);
+	if (IS_ERR(in_dentry)) {
+		pr_err("%s: debugfs_create_file failed\n", __func__);
+		goto file_fail;
+	}
+	return;
+file_fail:
+	kfree(in_buffer);
+inbuf_fail:
+	kfree(out_buffer);
+outbuf_fail:
+	in_buffer = NULL;
+	out_buffer = NULL;
+	return;
+}
+#else
+static void config_debug_fs_write(struct audio_buffer *ab, int offset)
+{
+	return;
+}
+static void config_debug_fs_run(void)
+{
+	return;
+}
+static void config_debug_fs_reset_index(void)
+{
+	return;
+}
+static void config_debug_fs_read_cb(void)
+{
+	return;
+}
+static void config_debug_fs_write_cb(void)
+{
+	return;
+}
+static void config_debug_fs_init(void)
+{
+	return;
+}
+#endif
+
+int q6asm_mmap_apr_dereg(void)
+{
+	int c;
+
+	c = atomic_sub_return(1, &this_mmap.ref_cnt);
+	if (c == 0) {
+		apr_deregister(this_mmap.apr);
+		common_client.mmap_apr = NULL;
+		pr_debug("%s: APR De-Register common port\n", __func__);
+	} else if (c < 0) {
+		pr_err("%s: APR Common Port Already Closed %d\n",
+			__func__, c);
+		atomic_set(&this_mmap.ref_cnt, 0);
+	}
+
+	return 0;
+}
+
+static int q6asm_session_alloc(struct audio_client *ac)
+{
+	int n;
+	for (n = 1; n <= ASM_ACTIVE_STREAMS_ALLOWED; n++) {
+		if (!(session[n].ac)) {
+			session[n].ac = ac;
+			return n;
+		}
+	}
+	pr_err("%s: session not available\n", __func__);
+	return -ENOMEM;
+}
+
+static int q6asm_get_session_id_from_audio_client(struct audio_client *ac)
+{
+	int n;
+	for (n = 1; n <= ASM_ACTIVE_STREAMS_ALLOWED; n++) {
+		if (session[n].ac == ac)
+			return n;
+	}
+	return 0;
+}
+
+static bool q6asm_is_valid_audio_client(struct audio_client *ac)
+{
+	return q6asm_get_session_id_from_audio_client(ac) ? 1 : 0;
+}
+
+static void q6asm_session_free(struct audio_client *ac)
+{
+	int session_id;
+	unsigned long flags;
+
+	pr_debug("%s: sessionid[%d]\n", __func__, ac->session);
+	session_id = ac->session;
+	rtac_remove_popp_from_adm_devices(ac->session);
+	spin_lock_irqsave(&(session[session_id].session_lock), flags);
+	session[ac->session].ac = NULL;
+	ac->session = 0;
+	ac->perf_mode = LEGACY_PCM_MODE;
+	ac->fptr_cache_ops = NULL;
+	ac->cb = NULL;
+	ac->priv = NULL;
+	kfree(ac);
+	ac = NULL;
+	spin_unlock_irqrestore(&(session[session_id].session_lock), flags);
+
+	return;
+}
+
+static uint32_t q6asm_get_next_buf(struct audio_client *ac,
+		uint32_t curr_buf, uint32_t max_buf_cnt)
+{
+	dev_vdbg(ac->dev, "%s: curr_buf = %d, max_buf_cnt = %d\n",
+		 __func__, curr_buf, max_buf_cnt);
+	curr_buf += 1;
+	return (curr_buf >= max_buf_cnt) ? 0 : curr_buf;
+}
+
+static int q6asm_map_cal_memory(int32_t cal_type,
+	struct cal_block_data *cal_block)
+{
+	int			result = 0;
+	struct asm_buffer_node	*buf_node = NULL;
+	struct list_head	*ptr, *next;
+
+	if (cal_block == NULL) {
+		pr_err("%s: cal_block is NULL!\n",
+			__func__);
+		goto done;
+	}
+
+	if (cal_block->cal_data.paddr == 0) {
+		pr_debug("%s: No address to map!\n",
+			__func__);
+		goto done;
+	}
+
+	common_client.mmap_apr = q6asm_mmap_apr_reg();
+	if (common_client.mmap_apr == NULL) {
+		pr_err("%s: q6asm_mmap_apr_reg failed\n",
+			__func__);
+		result = -EPERM;
+		goto done;
+	}
+	common_client.apr = common_client.mmap_apr;
+	if (cal_block->map_data.map_size == 0) {
+		pr_debug("%s: map size is 0!\n",
+			__func__);
+		goto done;
+	}
+
+	/* Use second asm buf to map memory */
+	if (common_client.port[IN].buf == NULL) {
+		pr_err("%s: common buf is NULL\n",
+			__func__);
+		result = -EINVAL;
+		goto done;
+	}
+
+	common_client.port[IN].buf->phys = cal_block->cal_data.paddr;
+
+	result = q6asm_memory_map_regions(&common_client,
+			IN, cal_block->map_data.map_size, 1, 1);
+	if (result < 0) {
+		pr_err("%s: mmap did not work! size = %zd result %d\n",
+			__func__,
+			cal_block->map_data.map_size, result);
+		pr_debug("%s: mmap did not work! addr = 0x%pK, size = %zd\n",
+			__func__,
+			&cal_block->cal_data.paddr,
+			cal_block->map_data.map_size);
+		goto done;
+	}
+
+	list_for_each_safe(ptr, next,
+		&common_client.port[IN].mem_map_handle) {
+		buf_node = list_entry(ptr, struct asm_buffer_node,
+					list);
+		if (buf_node->buf_phys_addr == cal_block->cal_data.paddr) {
+			cal_block->map_data.q6map_handle =  buf_node->mmap_hdl;
+			break;
+		}
+	}
+done:
+	return result;
+}
+
+static int remap_cal_data(int32_t cal_type, struct cal_block_data *cal_block)
+{
+	int ret = 0;
+
+	if (cal_block->map_data.ion_client == NULL) {
+		pr_err("%s: No ION allocation for cal type %d!\n",
+			__func__, cal_type);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if ((cal_block->map_data.map_size > 0) &&
+		(cal_block->map_data.q6map_handle == 0)) {
+
+		ret = q6asm_map_cal_memory(cal_type, cal_block);
+		if (ret < 0) {
+			pr_err("%s: mmap did not work! size = %zd ret %d\n",
+				__func__, cal_block->map_data.map_size, ret);
+			goto done;
+		}
+	}
+done:
+	return ret;
+}
+
+static int q6asm_unmap_cal_memory(int32_t cal_type,
+	struct cal_block_data *cal_block)
+{
+	int			result = 0;
+	int			result2 = 0;
+
+	if (cal_block == NULL) {
+		pr_err("%s: cal_block is NULL!\n",
+			__func__);
+		result = -EINVAL;
+		goto done;
+	}
+
+	if (cal_block->map_data.q6map_handle == 0) {
+		pr_debug("%s: No address to unmap!\n",
+			__func__);
+		result = -EINVAL;
+		goto done;
+	}
+
+	if (common_client.mmap_apr == NULL) {
+		common_client.mmap_apr = q6asm_mmap_apr_reg();
+		if (common_client.mmap_apr == NULL) {
+			pr_err("%s: q6asm_mmap_apr_reg failed\n",
+				__func__);
+			result = -EPERM;
+			goto done;
+		}
+	}
+
+	result2 = q6asm_memory_unmap_regions(&common_client, IN);
+	if (result2 < 0) {
+		pr_err("%s: unmap failed, err %d\n",
+			__func__, result2);
+		result = result2;
+	}
+
+	cal_block->map_data.q6map_handle = 0;
+done:
+	return result;
+}
+
+int q6asm_unmap_cal_data(int cal_type, struct cal_block_data *cal_block)
+{
+	int ret = 0;
+
+	if ((cal_block->map_data.map_size > 0) &&
+		(cal_block->map_data.q6map_handle != 0)) {
+
+		ret = q6asm_unmap_cal_memory(cal_type, cal_block);
+		if (ret < 0) {
+			pr_err("%s: unmap did not work! size = %zd ret %d\n",
+				__func__, cal_block->map_data.map_size, ret);
+			goto done;
+		}
+	}
+done:
+	return ret;
+}
+
+int send_asm_custom_topology(struct audio_client *ac)
+{
+	struct cal_block_data		*cal_block = NULL;
+	struct cmd_set_topologies	asm_top;
+	int result = 0;
+	int result1 = 0;
+
+	if (cal_data[ASM_CUSTOM_TOP_CAL] == NULL)
+		goto done;
+
+	mutex_lock(&cal_data[ASM_CUSTOM_TOP_CAL]->lock);
+	if (!set_custom_topology)
+		goto unlock;
+	set_custom_topology = 0;
+
+	cal_block = cal_utils_get_only_cal_block(cal_data[ASM_CUSTOM_TOP_CAL]);
+	if (cal_block == NULL)
+		goto unlock;
+
+	if (cal_block->cal_data.size == 0) {
+		pr_debug("%s: No cal to send!\n", __func__);
+		goto unlock;
+	}
+
+	pr_debug("%s: Sending cal_index %d\n", __func__, ASM_CUSTOM_TOP_CAL);
+
+	result = remap_cal_data(ASM_CUST_TOPOLOGY_CAL_TYPE, cal_block);
+	if (result) {
+		pr_err("%s: Remap_cal_data failed for cal %d!\n",
+			__func__, ASM_CUSTOM_TOP_CAL);
+		goto unlock;
+	}
+
+	q6asm_add_hdr_custom_topology(ac, &asm_top.hdr, sizeof(asm_top));
+	atomic_set(&ac->mem_state, -1);
+	asm_top.hdr.opcode = ASM_CMD_ADD_TOPOLOGIES;
+	asm_top.payload_addr_lsw = lower_32_bits(cal_block->cal_data.paddr);
+	asm_top.payload_addr_msw = msm_audio_populate_upper_32_bits(
+						cal_block->cal_data.paddr);
+	asm_top.mem_map_handle = cal_block->map_data.q6map_handle;
+	asm_top.payload_size = cal_block->cal_data.size;
+
+	 pr_debug("%s: Sending ASM_CMD_ADD_TOPOLOGIES payload = %pK, size = %d, map handle = 0x%x\n",
+		__func__, &cal_block->cal_data.paddr,
+		asm_top.payload_size, asm_top.mem_map_handle);
+
+	result = apr_send_pkt(ac->apr, (uint32_t *) &asm_top);
+	if (result < 0) {
+		pr_err("%s: Set topologies failed result %d\n",
+			__func__, result);
+		pr_debug("%s: Set topologies failed payload = 0x%pK\n",
+			__func__, &cal_block->cal_data.paddr);
+		goto unmap;
+
+	}
+
+	result = wait_event_timeout(ac->mem_wait,
+			(atomic_read(&ac->mem_state) >= 0), 5*HZ);
+	if (!result) {
+		pr_err("%s: Set topologies failed timeout\n", __func__);
+		pr_debug("%s: Set topologies failed after timedout payload = 0x%pK\n",
+			__func__, &cal_block->cal_data.paddr);
+		result = -ETIMEDOUT;
+		goto unmap;
+	}
+	if (atomic_read(&ac->mem_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+			__func__, adsp_err_get_err_str(
+			atomic_read(&ac->mem_state)));
+		result = adsp_err_get_lnx_err_code(
+			atomic_read(&ac->mem_state));
+		goto unmap;
+	}
+
+unmap:
+	result1 = q6asm_unmap_cal_memory(ASM_CUST_TOPOLOGY_CAL_TYPE,
+		cal_block);
+	if (result1 < 0) {
+		result = result1;
+		pr_debug("%s: unmap cal failed! %d\n", __func__, result);
+	}
+unlock:
+	mutex_unlock(&cal_data[ASM_CUSTOM_TOP_CAL]->lock);
+done:
+	return result;
+}
+
+int q6asm_map_rtac_block(struct rtac_cal_block_data *cal_block)
+{
+	int			result = 0;
+	struct asm_buffer_node	*buf_node = NULL;
+	struct list_head	*ptr, *next;
+	pr_debug("%s:\n", __func__);
+
+	if (cal_block == NULL) {
+		pr_err("%s: cal_block is NULL!\n",
+			__func__);
+		result = -EINVAL;
+		goto done;
+	}
+
+	if (cal_block->cal_data.paddr == 0) {
+		pr_debug("%s: No address to map!\n",
+			__func__);
+		result = -EINVAL;
+		goto done;
+	}
+
+	if (common_client.mmap_apr == NULL) {
+		common_client.mmap_apr = q6asm_mmap_apr_reg();
+		if (common_client.mmap_apr == NULL) {
+			pr_err("%s: q6asm_mmap_apr_reg failed\n",
+				__func__);
+			result = -EPERM;
+			goto done;
+		}
+	}
+
+	if (cal_block->map_data.map_size == 0) {
+		pr_debug("%s: map size is 0!\n",
+			__func__);
+		result = -EINVAL;
+		goto done;
+	}
+
+	/* Use second asm buf to map memory */
+	if (common_client.port[OUT].buf == NULL) {
+		pr_err("%s: common buf is NULL\n",
+			__func__);
+		result = -EINVAL;
+		goto done;
+	}
+
+	common_client.port[OUT].buf->phys = cal_block->cal_data.paddr;
+
+	result = q6asm_memory_map_regions(&common_client,
+			OUT, cal_block->map_data.map_size, 1, 1);
+	if (result < 0) {
+		pr_err("%s: mmap did not work! size = %d result %d\n",
+			__func__,
+			cal_block->map_data.map_size, result);
+		pr_debug("%s: mmap did not work! addr = 0x%pK, size = %d\n",
+			__func__,
+			&cal_block->cal_data.paddr,
+			cal_block->map_data.map_size);
+		goto done;
+	}
+
+	list_for_each_safe(ptr, next,
+		&common_client.port[OUT].mem_map_handle) {
+		buf_node = list_entry(ptr, struct asm_buffer_node,
+					list);
+		if (buf_node->buf_phys_addr == cal_block->cal_data.paddr) {
+			cal_block->map_data.map_handle =  buf_node->mmap_hdl;
+			break;
+		}
+	}
+done:
+	return result;
+}
+
+int q6asm_unmap_rtac_block(uint32_t *mem_map_handle)
+{
+	int	result = 0;
+	int	result2 = 0;
+	pr_debug("%s:\n", __func__);
+
+	if (mem_map_handle == NULL) {
+		pr_debug("%s: Map handle is NULL, nothing to unmap\n",
+			__func__);
+		goto done;
+	}
+
+	if (*mem_map_handle == 0) {
+		pr_debug("%s: Map handle is 0, nothing to unmap\n",
+			__func__);
+		goto done;
+	}
+
+	if (common_client.mmap_apr == NULL) {
+		common_client.mmap_apr = q6asm_mmap_apr_reg();
+		if (common_client.mmap_apr == NULL) {
+			pr_err("%s: q6asm_mmap_apr_reg failed\n",
+				__func__);
+			result = -EPERM;
+			goto done;
+		}
+	}
+
+
+	result2 = q6asm_memory_unmap_regions(&common_client, OUT);
+	if (result2 < 0) {
+		pr_err("%s: unmap failed, err %d\n",
+			__func__, result2);
+		result = result2;
+	} else {
+		*mem_map_handle = 0;
+	}
+
+	result2 = q6asm_mmap_apr_dereg();
+	if (result2 < 0) {
+		pr_err("%s: q6asm_mmap_apr_dereg failed, err %d\n",
+			__func__, result2);
+		result = result2;
+	}
+done:
+	return result;
+}
+
+int q6asm_audio_client_buf_free(unsigned int dir,
+			struct audio_client *ac)
+{
+	struct audio_port_data *port;
+	int cnt = 0;
+	int rc = 0;
+	pr_debug("%s: Session id %d\n", __func__, ac->session);
+	mutex_lock(&ac->cmd_lock);
+	if (ac->io_mode & SYNC_IO_MODE) {
+		port = &ac->port[dir];
+		if (!port->buf) {
+			pr_err("%s: buf NULL\n", __func__);
+			mutex_unlock(&ac->cmd_lock);
+			return 0;
+		}
+		cnt = port->max_buf_cnt - 1;
+
+		if (cnt >= 0) {
+			rc = q6asm_memory_unmap_regions(ac, dir);
+			if (rc < 0)
+				pr_err("%s: Memory_unmap_regions failed %d\n",
+								__func__, rc);
+		}
+
+		while (cnt >= 0) {
+			if (port->buf[cnt].data) {
+				if (!rc || atomic_read(&ac->reset))
+					msm_audio_ion_free(
+						port->buf[cnt].client,
+						port->buf[cnt].handle);
+
+				port->buf[cnt].client = NULL;
+				port->buf[cnt].handle = NULL;
+				port->buf[cnt].data = NULL;
+				port->buf[cnt].phys = 0;
+				--(port->max_buf_cnt);
+			}
+			--cnt;
+		}
+		kfree(port->buf);
+		port->buf = NULL;
+	}
+	mutex_unlock(&ac->cmd_lock);
+	return 0;
+}
+
+int q6asm_audio_client_buf_free_contiguous(unsigned int dir,
+			struct audio_client *ac)
+{
+	struct audio_port_data *port;
+	int cnt = 0;
+	int rc = 0;
+	pr_debug("%s: Session id %d\n", __func__, ac->session);
+	mutex_lock(&ac->cmd_lock);
+	port = &ac->port[dir];
+	if (!port->buf) {
+		mutex_unlock(&ac->cmd_lock);
+		return 0;
+	}
+	cnt = port->max_buf_cnt - 1;
+
+	if (cnt >= 0) {
+		rc = q6asm_memory_unmap(ac, port->buf[0].phys, dir);
+		if (rc < 0)
+			pr_err("%s: Memory_unmap_regions failed %d\n",
+							__func__, rc);
+	}
+
+	if (port->buf[0].data) {
+		pr_debug("%s: data[%pK]phys[%pK][%pK] , client[%pK] handle[%pK]\n",
+			__func__,
+			port->buf[0].data,
+			&port->buf[0].phys,
+			&port->buf[0].phys,
+			port->buf[0].client,
+			port->buf[0].handle);
+		if (!rc || atomic_read(&ac->reset))
+			msm_audio_ion_free(port->buf[0].client,
+					   port->buf[0].handle);
+		port->buf[0].client = NULL;
+		port->buf[0].handle = NULL;
+	}
+
+	while (cnt >= 0) {
+		port->buf[cnt].data = NULL;
+		port->buf[cnt].phys = 0;
+		cnt--;
+	}
+	port->max_buf_cnt = 0;
+	kfree(port->buf);
+	port->buf = NULL;
+	mutex_unlock(&ac->cmd_lock);
+	return 0;
+}
+
+void q6asm_audio_client_free(struct audio_client *ac)
+{
+	int loopcnt;
+	struct audio_port_data *port;
+
+	if (!ac) {
+		pr_err("%s: ac %pK\n", __func__, ac);
+		return;
+	}
+	if (!ac->session) {
+		pr_err("%s: ac session invalid\n", __func__);
+		return;
+	}
+
+	mutex_lock(&session_lock);
+
+	pr_debug("%s: Session id %d\n", __func__, ac->session);
+	if (ac->io_mode & SYNC_IO_MODE) {
+		for (loopcnt = 0; loopcnt <= OUT; loopcnt++) {
+			port = &ac->port[loopcnt];
+			if (!port->buf)
+				continue;
+			pr_debug("%s: loopcnt = %d\n",
+				__func__, loopcnt);
+			q6asm_audio_client_buf_free(loopcnt, ac);
+		}
+	}
+
+	rtac_set_asm_handle(ac->session, NULL);
+	apr_deregister(ac->apr2);
+	apr_deregister(ac->apr);
+	q6asm_mmap_apr_dereg();
+	ac->apr2 = NULL;
+	ac->apr = NULL;
+	ac->mmap_apr = NULL;
+	q6asm_session_free(ac);
+
+	pr_debug("%s: APR De-Register\n", __func__);
+
+/*done:*/
+	mutex_unlock(&session_lock);
+
+	return;
+}
+
+int q6asm_set_io_mode(struct audio_client *ac, uint32_t mode1)
+{
+	uint32_t mode;
+
+	if (ac == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	ac->io_mode &= 0xFF00;
+	mode = (mode1 & 0xF);
+
+	pr_debug("%s: ac->mode after anding with FF00:0x%x,\n",
+		__func__, ac->io_mode);
+
+	if ((mode == ASYNC_IO_MODE) || (mode == SYNC_IO_MODE)) {
+		ac->io_mode |= mode1;
+		pr_debug("%s: Set Mode to 0x%x\n", __func__, ac->io_mode);
+		return 0;
+	} else {
+		pr_err("%s: Not an valid IO Mode:%d\n", __func__, ac->io_mode);
+		return -EINVAL;
+	}
+}
+
+void *q6asm_mmap_apr_reg(void)
+{
+	if ((atomic_read(&this_mmap.ref_cnt) == 0) ||
+	    (this_mmap.apr == NULL)) {
+		this_mmap.apr = apr_register("ADSP", "ASM", \
+					(apr_fn)q6asm_srvc_callback,\
+					0x0FFFFFFFF, &this_mmap);
+		if (this_mmap.apr == NULL) {
+			pr_debug("%s: Unable to register APR ASM common port\n",
+			 __func__);
+			goto fail;
+		}
+	}
+	atomic_inc(&this_mmap.ref_cnt);
+
+	return this_mmap.apr;
+fail:
+	return NULL;
+}
+
+int q6asm_send_stream_cmd(struct audio_client *ac,
+			  struct msm_adsp_event_data *data)
+{
+	char *asm_params = NULL;
+	struct apr_hdr hdr;
+	int rc;
+	uint32_t sz = 0;
+	uint64_t actual_sz = 0;
+
+	if (!data || !ac) {
+		pr_err("%s: %s is NULL\n", __func__,
+			(!data) ? "data" : "ac");
+		rc = -EINVAL;
+		goto done;
+	}
+
+	if (data->event_type >= ARRAY_SIZE(adsp_reg_event_opcode)) {
+		pr_err("%s: event %u out of boundary of array size of (%lu)\n",
+		       __func__, data->event_type,
+		       (long)ARRAY_SIZE(adsp_reg_event_opcode));
+		rc = -EINVAL;
+		goto done;
+	}
+
+	actual_sz = sizeof(struct apr_hdr) + data->payload_len;
+	if (actual_sz > U32_MAX) {
+		pr_err("%s: payload size 0x%X exceeds limit\n",
+				__func__, data->payload_len);
+		rc = -EINVAL;
+		goto done;
+	}
+
+	sz = (uint32_t)actual_sz;
+	asm_params = kzalloc(sz, GFP_KERNEL);
+	if (!asm_params) {
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	q6asm_add_hdr_async(ac, &hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state_pp, -1);
+	hdr.opcode = adsp_reg_event_opcode[data->event_type];
+	memcpy(asm_params, &hdr, sizeof(struct apr_hdr));
+	memcpy(asm_params + sizeof(struct apr_hdr),
+		data->payload, data->payload_len);
+	rc = apr_send_pkt(ac->apr, (uint32_t *) asm_params);
+	if (rc < 0) {
+		pr_err("%s: stream event cmd apr pkt failed\n", __func__);
+		rc = -EINVAL;
+		goto fail_send_param;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+				(atomic_read(&ac->cmd_state_pp) >= 0), 1 * HZ);
+	if (!rc) {
+		pr_err("%s: timeout for stream event cmd resp\n", __func__);
+		rc = -ETIMEDOUT;
+		goto fail_send_param;
+	}
+
+	if (atomic_read(&ac->cmd_state_pp) > 0) {
+		pr_err("%s: DSP returned error[%s] for stream event cmd\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state_pp)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state_pp));
+		goto fail_send_param;
+	}
+
+	rc = 0;
+fail_send_param:
+	kfree(asm_params);
+done:
+	return rc;
+}
+
+struct audio_client *q6asm_audio_client_alloc(app_cb cb, void *priv)
+{
+	struct audio_client *ac;
+	int n;
+	int lcnt = 0;
+	int rc = 0;
+
+	ac = kzalloc(sizeof(struct audio_client), GFP_KERNEL);
+	if (!ac) {
+		pr_err("%s: client not present\n", __func__);
+		return NULL;
+	}
+
+	mutex_lock(&session_lock);
+	n = q6asm_session_alloc(ac);
+	if (n <= 0) {
+		pr_err("%s: ASM Session alloc fail n=%d\n", __func__, n);
+		mutex_unlock(&session_lock);
+		kfree(ac);
+		goto fail_session;
+	}
+	ac->session = n;
+	ac->cb = cb;
+	ac->path_delay = UINT_MAX;
+	ac->priv = priv;
+	ac->io_mode = SYNC_IO_MODE;
+	ac->perf_mode = LEGACY_PCM_MODE;
+	ac->fptr_cache_ops = NULL;
+	/* DSP expects stream id from 1 */
+	ac->stream_id = 1;
+	ac->apr = apr_register("ADSP", "ASM", \
+			(apr_fn)q6asm_callback,\
+			((ac->session) << 8 | 0x0001),\
+			ac);
+
+	if (ac->apr == NULL) {
+		pr_err("%s: Registration with APR failed\n", __func__);
+		mutex_unlock(&session_lock);
+		goto fail_apr1;
+	}
+	ac->apr2 = apr_register("ADSP", "ASM",
+			(apr_fn)q6asm_callback,
+			((ac->session) << 8 | 0x0002),
+			ac);
+
+	if (ac->apr2 == NULL) {
+		pr_err("%s: Registration with APR-2 failed\n", __func__);
+		mutex_unlock(&session_lock);
+		goto fail_apr2;
+	}
+
+	rtac_set_asm_handle(n, ac->apr);
+
+	pr_debug("%s: Registering the common port with APR\n", __func__);
+	ac->mmap_apr = q6asm_mmap_apr_reg();
+	if (ac->mmap_apr == NULL) {
+		mutex_unlock(&session_lock);
+		goto fail_mmap;
+	}
+
+	init_waitqueue_head(&ac->cmd_wait);
+	init_waitqueue_head(&ac->time_wait);
+	init_waitqueue_head(&ac->mem_wait);
+	atomic_set(&ac->time_flag, 1);
+	atomic_set(&ac->reset, 0);
+	INIT_LIST_HEAD(&ac->port[0].mem_map_handle);
+	INIT_LIST_HEAD(&ac->port[1].mem_map_handle);
+	pr_debug("%s: mem_map_handle list init'ed\n", __func__);
+	mutex_init(&ac->cmd_lock);
+	for (lcnt = 0; lcnt <= OUT; lcnt++) {
+		mutex_init(&ac->port[lcnt].lock);
+		spin_lock_init(&ac->port[lcnt].dsp_lock);
+	}
+	atomic_set(&ac->cmd_state, 0);
+	atomic_set(&ac->cmd_state_pp, 0);
+	atomic_set(&ac->mem_state, 0);
+
+	rc = send_asm_custom_topology(ac);
+	if (rc < 0) {
+		mutex_unlock(&session_lock);
+		goto fail_mmap;
+	}
+
+	pr_debug("%s: session[%d]\n", __func__, ac->session);
+
+	mutex_unlock(&session_lock);
+
+	return ac;
+fail_mmap:
+	apr_deregister(ac->apr2);
+fail_apr2:
+	apr_deregister(ac->apr);
+fail_apr1:
+	q6asm_session_free(ac);
+fail_session:
+	return NULL;
+}
+
+struct audio_client *q6asm_get_audio_client(int session_id)
+{
+	if (session_id == ASM_CONTROL_SESSION) {
+		return &common_client;
+	}
+
+	if ((session_id <= 0) || (session_id > ASM_ACTIVE_STREAMS_ALLOWED)) {
+		pr_err("%s: invalid session: %d\n", __func__, session_id);
+		goto err;
+	}
+
+	if (!(session[session_id].ac)) {
+		pr_err("%s: session not active: %d\n", __func__, session_id);
+		goto err;
+	}
+	return session[session_id].ac;
+err:
+	return NULL;
+}
+
+int q6asm_audio_client_buf_alloc(unsigned int dir,
+			struct audio_client *ac,
+			unsigned int bufsz,
+			uint32_t bufcnt)
+{
+	int cnt = 0;
+	int rc = 0;
+	struct audio_buffer *buf;
+	size_t len;
+
+	if (!(ac) || !(bufsz) || ((dir != IN) && (dir != OUT))) {
+		pr_err("%s: ac %pK bufsz %d dir %d\n", __func__, ac, bufsz,
+			dir);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: session[%d]bufsz[%d]bufcnt[%d]\n", __func__, ac->session,
+		bufsz, bufcnt);
+
+	if (ac->session <= 0 || ac->session > ASM_ACTIVE_STREAMS_ALLOWED) {
+		pr_err("%s: Session ID is invalid, session = %d\n", __func__,
+			ac->session);
+		goto fail;
+	}
+
+	if (ac->io_mode & SYNC_IO_MODE) {
+		if (ac->port[dir].buf) {
+			pr_debug("%s: buffer already allocated\n", __func__);
+			return 0;
+		}
+		mutex_lock(&ac->cmd_lock);
+		if (bufcnt > (U32_MAX/sizeof(struct audio_buffer))) {
+			pr_err("%s: Buffer size overflows", __func__);
+			mutex_unlock(&ac->cmd_lock);
+			goto fail;
+		}
+		buf = kzalloc(((sizeof(struct audio_buffer))*bufcnt),
+				GFP_KERNEL);
+
+		if (!buf) {
+			mutex_unlock(&ac->cmd_lock);
+			goto fail;
+		}
+
+		ac->port[dir].buf = buf;
+
+		while (cnt < bufcnt) {
+			if (bufsz > 0) {
+				if (!buf[cnt].data) {
+					rc = msm_audio_ion_alloc("asm_client",
+					&buf[cnt].client, &buf[cnt].handle,
+					      bufsz,
+					      (ion_phys_addr_t *)&buf[cnt].phys,
+					      &len,
+					      &buf[cnt].data);
+					if (rc) {
+						pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
+							__func__, rc);
+						mutex_unlock(&ac->cmd_lock);
+					goto fail;
+					}
+
+					buf[cnt].used = 1;
+					buf[cnt].size = bufsz;
+					buf[cnt].actual_size = bufsz;
+					pr_debug("%s: data[%pK]phys[%pK][%pK]\n",
+						__func__,
+					   buf[cnt].data,
+					   &buf[cnt].phys,
+					   &buf[cnt].phys);
+					cnt++;
+				}
+			}
+		}
+		ac->port[dir].max_buf_cnt = cnt;
+
+		mutex_unlock(&ac->cmd_lock);
+		rc = q6asm_memory_map_regions(ac, dir, bufsz, cnt, 0);
+		if (rc < 0) {
+			pr_err("%s: CMD Memory_map_regions failed %d for size %d\n",
+				__func__, rc, bufsz);
+			goto fail;
+		}
+	}
+	return 0;
+fail:
+	q6asm_audio_client_buf_free(dir, ac);
+	return -EINVAL;
+}
+
+int q6asm_audio_client_buf_alloc_contiguous(unsigned int dir,
+			struct audio_client *ac,
+			unsigned int bufsz,
+			unsigned int bufcnt)
+{
+	int cnt = 0;
+	int rc = 0;
+	struct audio_buffer *buf;
+	size_t len;
+	int bytes_to_alloc;
+
+	if (!(ac) || ((dir != IN) && (dir != OUT))) {
+		pr_err("%s: ac %pK dir %d\n", __func__, ac, dir);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: session[%d]bufsz[%d]bufcnt[%d]\n",
+			__func__, ac->session,
+			bufsz, bufcnt);
+
+	if (ac->session <= 0 || ac->session > ASM_ACTIVE_STREAMS_ALLOWED) {
+		pr_err("%s: Session ID is invalid, session = %d\n", __func__,
+			ac->session);
+		goto fail;
+	}
+
+	if (ac->port[dir].buf) {
+		pr_err("%s: buffer already allocated\n", __func__);
+		return 0;
+	}
+	mutex_lock(&ac->cmd_lock);
+	buf = kzalloc(((sizeof(struct audio_buffer))*bufcnt),
+			GFP_KERNEL);
+
+	if (!buf) {
+		pr_err("%s: buffer allocation failed\n", __func__);
+		mutex_unlock(&ac->cmd_lock);
+		goto fail;
+	}
+
+	ac->port[dir].buf = buf;
+
+	/* check for integer overflow */
+	if ((bufcnt > 0) && ((INT_MAX / bufcnt) < bufsz)) {
+		pr_err("%s: integer overflow\n", __func__);
+		mutex_unlock(&ac->cmd_lock);
+		goto fail;
+	}
+	bytes_to_alloc = bufsz * bufcnt;
+
+	/* The size to allocate should be multiple of 4K bytes */
+	bytes_to_alloc = PAGE_ALIGN(bytes_to_alloc);
+
+	rc = msm_audio_ion_alloc("asm_client", &buf[0].client, &buf[0].handle,
+		bytes_to_alloc,
+		(ion_phys_addr_t *)&buf[0].phys, &len,
+		&buf[0].data);
+	if (rc) {
+		pr_err("%s: Audio ION alloc is failed, rc = %d\n",
+			__func__, rc);
+		mutex_unlock(&ac->cmd_lock);
+		goto fail;
+	}
+
+	buf[0].used = dir ^ 1;
+	buf[0].size = bufsz;
+	buf[0].actual_size = bufsz;
+	cnt = 1;
+	while (cnt < bufcnt) {
+		if (bufsz > 0) {
+			buf[cnt].data =  buf[0].data + (cnt * bufsz);
+			buf[cnt].phys =  buf[0].phys + (cnt * bufsz);
+			if (!buf[cnt].data) {
+				pr_err("%s: Buf alloc failed\n",
+							__func__);
+				mutex_unlock(&ac->cmd_lock);
+				goto fail;
+			}
+			buf[cnt].used = dir ^ 1;
+			buf[cnt].size = bufsz;
+			buf[cnt].actual_size = bufsz;
+			pr_debug("%s: data[%pK]phys[%pK][%pK]\n",
+				__func__,
+				buf[cnt].data,
+				&buf[cnt].phys,
+				&buf[cnt].phys);
+		}
+		cnt++;
+	}
+	ac->port[dir].max_buf_cnt = cnt;
+	mutex_unlock(&ac->cmd_lock);
+	rc = q6asm_memory_map_regions(ac, dir, bufsz, cnt, 1);
+	if (rc < 0) {
+		pr_err("%s: CMD Memory_map_regions failed %d for size %d\n",
+			__func__, rc, bufsz);
+		goto fail;
+	}
+	return 0;
+fail:
+	q6asm_audio_client_buf_free_contiguous(dir, ac);
+	return -EINVAL;
+}
+
+static int32_t q6asm_srvc_callback(struct apr_client_data *data, void *priv)
+{
+	uint32_t dir = 0;
+	uint32_t i = IN;
+	uint32_t *payload;
+	unsigned long dsp_flags;
+	unsigned long flags;
+	struct asm_buffer_node *buf_node = NULL;
+	struct list_head *ptr, *next;
+	union asm_token_struct asm_token;
+
+	struct audio_client *ac = NULL;
+	struct audio_port_data *port;
+
+	int session_id;
+
+	if (!data) {
+		pr_err("%s: Invalid CB\n", __func__);
+		return 0;
+	}
+
+	payload = data->payload;
+
+	if (data->opcode == RESET_EVENTS) {
+		pr_debug("%s: Reset event is received: %d %d apr[%pK]\n",
+				__func__,
+				data->reset_event,
+				data->reset_proc,
+				this_mmap.apr);
+		atomic_set(&this_mmap.ref_cnt, 0);
+		apr_reset(this_mmap.apr);
+		this_mmap.apr = NULL;
+		for (; i <= OUT; i++) {
+			list_for_each_safe(ptr, next,
+				&common_client.port[i].mem_map_handle) {
+				buf_node = list_entry(ptr,
+						struct asm_buffer_node,
+						list);
+				if (buf_node->buf_phys_addr ==
+				common_client.port[i].buf->phys) {
+					list_del(&buf_node->list);
+					kfree(buf_node);
+				}
+			}
+			pr_debug("%s: Clearing custom topology\n", __func__);
+		}
+
+		cal_utils_clear_cal_block_q6maps(ASM_MAX_CAL_TYPES, cal_data);
+		common_client.mmap_apr = NULL;
+		mutex_lock(&cal_data[ASM_CUSTOM_TOP_CAL]->lock);
+		set_custom_topology = 1;
+		mutex_unlock(&cal_data[ASM_CUSTOM_TOP_CAL]->lock);
+		topology_map_handle = 0;
+		rtac_clear_mapping(ASM_RTAC_CAL);
+		return 0;
+	}
+
+	asm_token.token = data->token;
+	session_id = asm_token._token.session_id;
+
+	if ((session_id > 0 && session_id <= ASM_ACTIVE_STREAMS_ALLOWED))
+		spin_lock_irqsave(&(session[session_id].session_lock), flags);
+
+	ac = q6asm_get_audio_client(session_id);
+	dir = q6asm_get_flag_from_token(&asm_token, ASM_DIRECTION_OFFSET);
+
+	if (!ac) {
+		pr_debug("%s: session[%d] already freed\n",
+			 __func__, session_id);
+		if ((session_id > 0 &&
+			session_id <= ASM_ACTIVE_STREAMS_ALLOWED))
+			spin_unlock_irqrestore(
+				&(session[session_id].session_lock), flags);
+		return 0;
+	}
+
+	if (data->payload_size >= 2 * sizeof(uint32_t)) {
+		pr_debug("%s:ptr0[0x%x]ptr1[0x%x]opcode[0x%x] token[0x%x]payload_s[%d] src[%d] dest[%d]sid[%d]dir[%d]\n",
+			__func__, payload[0], payload[1], data->opcode,
+			data->token, data->payload_size, data->src_port,
+			data->dest_port, asm_token._token.session_id, dir);
+		pr_debug("%s:Payload = [0x%x] status[0x%x]\n",
+			__func__, payload[0], payload[1]);
+	} else if (data->payload_size == sizeof(uint32_t)) {
+		pr_debug("%s:ptr0[0x%x]opcode[0x%x] token[0x%x]payload_s[%d] src[%d] dest[%d]sid[%d]dir[%d]\n",
+			__func__, payload[0], data->opcode,
+			data->token, data->payload_size, data->src_port,
+			data->dest_port, asm_token._token.session_id, dir);
+		pr_debug("%s:Payload = [0x%x]\n",
+			__func__, payload[0]);
+	}
+
+	if (data->opcode == APR_BASIC_RSP_RESULT) {
+		switch (payload[0]) {
+		case ASM_CMD_SHARED_MEM_MAP_REGIONS:
+		case ASM_CMD_SHARED_MEM_UNMAP_REGIONS:
+		case ASM_CMD_ADD_TOPOLOGIES:
+			if (data->payload_size >= 2 * sizeof(uint32_t)
+				&& payload[1] != 0) {
+				pr_err("%s: cmd = 0x%x returned error = 0x%x sid:%d\n",
+				       __func__, payload[0], payload[1],
+				       asm_token._token.session_id);
+				if (payload[0] ==
+				    ASM_CMD_SHARED_MEM_UNMAP_REGIONS)
+					atomic_set(&ac->unmap_cb_success, 0);
+
+				atomic_set(&ac->mem_state, payload[1]);
+				wake_up(&ac->mem_wait);
+			} else {
+				if (payload[0] ==
+				    ASM_CMD_SHARED_MEM_UNMAP_REGIONS)
+					atomic_set(&ac->unmap_cb_success, 1);
+			}
+
+			if (atomic_cmpxchg(&ac->mem_state, -1, 0) == -1)
+				wake_up(&ac->mem_wait);
+			if (data->payload_size >= 2 * sizeof(uint32_t))
+				dev_vdbg(ac->dev, "%s: Payload = [0x%x] status[0x%x]\n",
+					__func__, payload[0], payload[1]);
+			break;
+		default:
+			pr_debug("%s: command[0x%x] not expecting rsp\n",
+						__func__, payload[0]);
+			break;
+		}
+		if ((session_id > 0 &&
+			session_id <= ASM_ACTIVE_STREAMS_ALLOWED))
+			spin_unlock_irqrestore(
+				&(session[session_id].session_lock), flags);
+		return 0;
+	}
+
+	port = &ac->port[dir];
+
+	switch (data->opcode) {
+	case ASM_CMDRSP_SHARED_MEM_MAP_REGIONS:{
+		pr_debug("%s:PL#0[0x%x] dir=0x%x s_id=0x%x\n",
+		       __func__, payload[0], dir, asm_token._token.session_id);
+		spin_lock_irqsave(&port->dsp_lock, dsp_flags);
+		if (atomic_cmpxchg(&ac->mem_state, -1, 0) == -1) {
+			ac->port[dir].tmp_hdl = payload[0];
+			wake_up(&ac->mem_wait);
+		}
+		spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
+		break;
+	}
+	case ASM_CMD_SHARED_MEM_UNMAP_REGIONS:{
+		if (data->payload_size >= 2 * sizeof(uint32_t))
+			pr_debug("%s: PL#0[0x%x]PL#1 [0x%x]\n",
+				__func__, payload[0], payload[1]);
+
+		spin_lock_irqsave(&port->dsp_lock, dsp_flags);
+		if (atomic_cmpxchg(&ac->mem_state, -1, 0) == -1)
+			wake_up(&ac->mem_wait);
+		spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
+
+		break;
+	}
+	default:
+		if (data->payload_size >= 2 * sizeof(uint32_t))
+			pr_debug("%s: command[0x%x]success [0x%x]\n",
+				__func__, payload[0], payload[1]);
+	}
+	if (ac->cb)
+		ac->cb(data->opcode, data->token,
+			data->payload, ac->priv);
+	if ((session_id > 0 && session_id <= ASM_ACTIVE_STREAMS_ALLOWED))
+		spin_unlock_irqrestore(
+			&(session[session_id].session_lock), flags);
+
+	return 0;
+}
+
+static void q6asm_process_mtmx_get_param_rsp(struct audio_client *ac,
+				struct asm_mtmx_strtr_get_params_cmdrsp *cmdrsp)
+{
+	struct asm_session_mtmx_strtr_param_session_time_v3_t *time;
+
+	if (cmdrsp->err_code) {
+		dev_err_ratelimited(ac->dev,
+				    "%s: err=%x, mod_id=%x, param_id=%x\n",
+				    __func__, cmdrsp->err_code,
+				    cmdrsp->param_info.module_id,
+				    cmdrsp->param_info.param_id);
+		return;
+	}
+
+	switch (cmdrsp->param_info.module_id) {
+	case ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC:
+		switch (cmdrsp->param_info.param_id) {
+		case ASM_SESSION_MTMX_STRTR_PARAM_SESSION_TIME_V3:
+			time = &cmdrsp->param_data.session_time;
+			dev_vdbg(ac->dev, "%s: GET_TIME_V3, time_lsw=%x, time_msw=%x\n",
+				 __func__, time->session_time_lsw,
+				 time->session_time_msw);
+			ac->time_stamp = (uint64_t)(((uint64_t)
+					 time->session_time_msw << 32) |
+					 time->session_time_lsw);
+			if (time->flags &
+			    ASM_SESSION_MTMX_STRTR_PARAM_STIME_TSTMP_FLG_BMASK)
+				dev_warn_ratelimited(ac->dev,
+						     "%s: recv inval tstmp\n",
+						     __func__);
+			if (atomic_cmpxchg(&ac->time_flag, 1, 0))
+				wake_up(&ac->time_wait);
+
+			break;
+		default:
+			dev_err(ac->dev, "%s: unexpected param_id %x\n",
+				__func__, cmdrsp->param_info.param_id);
+			break;
+		}
+		break;
+	default:
+		dev_err(ac->dev, "%s: unexpected mod_id %x\n",  __func__,
+			cmdrsp->param_info.module_id);
+		break;
+	}
+}
+
+static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
+{
+	int i = 0;
+	struct audio_client *ac = (struct audio_client *)priv;
+	unsigned long dsp_flags;
+	uint32_t *payload;
+	uint32_t wakeup_flag = 1;
+	int32_t  ret = 0;
+	union asm_token_struct asm_token;
+	uint8_t buf_index;
+	struct msm_adsp_event_data *pp_event_package = NULL;
+	uint32_t payload_size = 0;
+	unsigned long flags;
+	int session_id;
+
+	if (ac == NULL) {
+		pr_err("%s: ac NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (data == NULL) {
+		pr_err("%s: data NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	session_id = q6asm_get_session_id_from_audio_client(ac);
+	if (session_id <= 0 || session_id > ASM_ACTIVE_STREAMS_ALLOWED) {
+		pr_err("%s: Session ID is invalid, session = %d\n", __func__,
+			session_id);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&(session[session_id].session_lock), flags);
+
+	if (!q6asm_is_valid_audio_client(ac)) {
+		pr_err("%s: audio client pointer is invalid, ac = %pK\n",
+				__func__, ac);
+		spin_unlock_irqrestore(
+			&(session[session_id].session_lock), flags);
+		return -EINVAL;
+	}
+
+	payload = data->payload;
+	asm_token.token = data->token;
+	if (q6asm_get_flag_from_token(&asm_token, ASM_CMD_NO_WAIT_OFFSET)) {
+		pr_debug("%s: No wait command opcode[0x%x] cmd_opcode:%x\n",
+			 __func__, data->opcode, payload ? payload[0] : 0);
+		wakeup_flag = 0;
+	}
+
+	if (data->opcode == RESET_EVENTS) {
+		atomic_set(&ac->reset, 1);
+		if (ac->apr == NULL) {
+			ac->apr = ac->apr2;
+			ac->apr2 = NULL;
+		}
+		pr_debug("%s: Reset event is received: %d %d apr[%pK]\n",
+			__func__,
+			data->reset_event, data->reset_proc, ac->apr);
+		if (ac->cb)
+			ac->cb(data->opcode, data->token,
+				(uint32_t *)data->payload, ac->priv);
+		apr_reset(ac->apr);
+		ac->apr = NULL;
+		atomic_set(&ac->time_flag, 0);
+		atomic_set(&ac->cmd_state, 0);
+		atomic_set(&ac->mem_state, 0);
+		atomic_set(&ac->cmd_state_pp, 0);
+		wake_up(&ac->time_wait);
+		wake_up(&ac->cmd_wait);
+		wake_up(&ac->mem_wait);
+		spin_unlock_irqrestore(
+			&(session[session_id].session_lock), flags);
+		return 0;
+	}
+
+	dev_vdbg(ac->dev, "%s: session[%d]opcode[0x%x] token[0x%x]payload_size[%d] src[%d] dest[%d]\n",
+		 __func__,
+		ac->session, data->opcode,
+		data->token, data->payload_size, data->src_port,
+		data->dest_port);
+	if ((data->opcode != ASM_DATA_EVENT_RENDERED_EOS) &&
+	    (data->opcode != ASM_DATA_EVENT_EOS) &&
+	    (data->opcode != ASM_SESSION_EVENTX_OVERFLOW) &&
+	    (data->opcode != ASM_SESSION_EVENT_RX_UNDERFLOW)) {
+		if (payload == NULL ||
+				(data->payload_size < (2 * sizeof(uint32_t)))) {
+			pr_err("%s: payload is null or invalid size[%d]\n",
+				__func__, data->payload_size);
+			spin_unlock_irqrestore(
+				&(session[session_id].session_lock), flags);
+			return -EINVAL;
+		}
+		if (data->payload_size >= 2 * sizeof(uint32_t))
+			dev_vdbg(ac->dev, "%s: Payload = [0x%x] status[0x%x] opcode 0x%x\n",
+				__func__, payload[0], payload[1], data->opcode);
+	}
+	if (data->opcode == APR_BASIC_RSP_RESULT) {
+		switch (payload[0]) {
+		case ASM_STREAM_CMD_SET_PP_PARAMS_V2:
+			if (rtac_make_asm_callback(ac->session, payload,
+					data->payload_size))
+				break;
+		case ASM_SESSION_CMD_PAUSE:
+		case ASM_SESSION_CMD_SUSPEND:
+		case ASM_DATA_CMD_EOS:
+		case ASM_STREAM_CMD_CLOSE:
+		case ASM_STREAM_CMD_FLUSH:
+		case ASM_SESSION_CMD_RUN_V2:
+		case ASM_SESSION_CMD_REGISTER_FORX_OVERFLOW_EVENTS:
+		case ASM_STREAM_CMD_FLUSH_READBUFS:
+		pr_debug("%s: session %d opcode 0x%x token 0x%x Payload = [0x%x] src %d dest %d\n",
+			__func__, ac->session, data->opcode, data->token,
+			payload[0], data->src_port, data->dest_port);
+		ret = q6asm_is_valid_session(data, priv);
+		if (ret != 0) {
+			pr_err("%s: session invalid %d\n", __func__, ret);
+			spin_unlock_irqrestore(
+				&(session[session_id].session_lock), flags);
+			return ret;
+		}
+		case ASM_SESSION_CMD_SET_MTMX_STRTR_PARAMS_V2:
+		case ASM_STREAM_CMD_OPEN_READ_V3:
+		case ASM_STREAM_CMD_OPEN_WRITE_V3:
+		case ASM_STREAM_CMD_OPEN_PULL_MODE_WRITE:
+		case ASM_STREAM_CMD_OPEN_PUSH_MODE_READ:
+		case ASM_STREAM_CMD_OPEN_READWRITE_V2:
+		case ASM_STREAM_CMD_OPEN_LOOPBACK_V2:
+		case ASM_STREAM_CMD_OPEN_TRANSCODE_LOOPBACK:
+		case ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2:
+		case ASM_DATA_CMD_IEC_60958_MEDIA_FMT:
+		case ASM_STREAM_CMD_SET_ENCDEC_PARAM:
+		case ASM_STREAM_CMD_SET_ENCDEC_PARAM_V2:
+		case ASM_STREAM_CMD_REGISTER_ENCDEC_EVENTS:
+		case ASM_STREAM_CMD_REGISTER_IEC_61937_FMT_UPDATE:
+		case ASM_DATA_CMD_REMOVE_INITIAL_SILENCE:
+		case ASM_DATA_CMD_REMOVE_TRAILING_SILENCE:
+		case ASM_SESSION_CMD_REGISTER_FOR_RX_UNDERFLOW_EVENTS:
+		case ASM_STREAM_CMD_OPEN_WRITE_COMPRESSED:
+			if (data->payload_size >=
+				2 * sizeof(uint32_t) &&
+				payload[1] != 0) {
+				pr_debug("%s: session %d opcode 0x%x token 0x%x Payload = [0x%x] stat 0x%x src %d dest %d\n",
+					__func__, ac->session,
+					data->opcode, data->token,
+					payload[0], payload[1],
+					data->src_port, data->dest_port);
+				pr_err("%s: cmd = 0x%x returned error = 0x%x\n",
+					__func__,
+					payload[0],
+					payload[1]);
+				if (wakeup_flag) {
+					if ((is_adsp_reg_event(payload[0]) >= 0)
+					      || (payload[0] ==
+					      ASM_STREAM_CMD_SET_PP_PARAMS_V2))
+						atomic_set(&ac->cmd_state_pp,
+								payload[1]);
+					else
+						atomic_set(
+							&ac->cmd_state,
+							payload[1]);
+					wake_up(&ac->cmd_wait);
+				}
+				spin_unlock_irqrestore(
+					&(session[session_id].session_lock),
+					flags);
+				return 0;
+			}
+			if ((is_adsp_reg_event(payload[0]) >= 0) ||
+			    (payload[0] == ASM_STREAM_CMD_SET_PP_PARAMS_V2)) {
+				if (atomic_read(&ac->cmd_state_pp) &&
+					wakeup_flag) {
+					atomic_set(&ac->cmd_state_pp, 0);
+					wake_up(&ac->cmd_wait);
+				}
+			} else {
+				if (atomic_read(&ac->cmd_state) &&
+					wakeup_flag) {
+					atomic_set(&ac->cmd_state, 0);
+					wake_up(&ac->cmd_wait);
+				}
+			}
+			if (ac->cb)
+				ac->cb(data->opcode, data->token,
+					(uint32_t *)data->payload, ac->priv);
+			break;
+		case ASM_CMD_ADD_TOPOLOGIES:
+			if (data->payload_size >=
+				2 * sizeof(uint32_t) &&
+				payload[1] != 0) {
+				pr_debug("%s:Payload = [0x%x]stat[0x%x]\n",
+					__func__, payload[0], payload[1]);
+				pr_err("%s: cmd = 0x%x returned error = 0x%x\n",
+					__func__, payload[0], payload[1]);
+				if (wakeup_flag) {
+					atomic_set(&ac->mem_state, payload[1]);
+					wake_up(&ac->mem_wait);
+				}
+				spin_unlock_irqrestore(
+					&(session[session_id].session_lock),
+					flags);
+				return 0;
+			}
+			if (atomic_read(&ac->mem_state) && wakeup_flag) {
+				atomic_set(&ac->mem_state, 0);
+				wake_up(&ac->mem_wait);
+			}
+			if (ac->cb)
+				ac->cb(data->opcode, data->token,
+					(uint32_t *)data->payload, ac->priv);
+			break;
+		case ASM_DATA_EVENT_WATERMARK: {
+			if (data->payload_size >= 2 * sizeof(uint32_t))
+				pr_debug("%s: Watermark opcode[0x%x] status[0x%x]",
+					__func__, payload[0], payload[1]);
+			break;
+		}
+		case ASM_STREAM_CMD_GET_PP_PARAMS_V2:
+			pr_debug("%s: ASM_STREAM_CMD_GET_PP_PARAMS_V2 session %d opcode 0x%x token 0x%x src %d dest %d\n",
+				__func__, ac->session,
+				data->opcode, data->token,
+				data->src_port, data->dest_port);
+			/* Should only come here if there is an APR */
+			/* error or malformed APR packet. Otherwise */
+			/* response will be returned as */
+			/* ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2 */
+			if (data->payload_size >= 2 * sizeof(uint32_t)) {
+				if (payload[1] != 0) {
+					pr_err("%s: ASM get param error = %d, resuming\n",
+						__func__, payload[1]);
+					rtac_make_asm_callback(ac->session,
+							payload,
+							data->payload_size);
+				}
+			}
+			break;
+		case ASM_STREAM_CMD_REGISTER_PP_EVENTS:
+			pr_debug("%s: ASM_STREAM_CMD_REGISTER_PP_EVENTS session %d opcode 0x%x token 0x%x src %d dest %d\n",
+				__func__, ac->session,
+				data->opcode, data->token,
+				data->src_port, data->dest_port);
+			if (data->payload_size >= 2 * sizeof(uint32_t)) {
+				if (payload[1] != 0)
+					pr_err("%s: ASM get param error = %d, resuming\n",
+						__func__, payload[1]);
+				atomic_set(&ac->cmd_state_pp, payload[1]);
+				wake_up(&ac->cmd_wait);
+			}
+			break;
+		default:
+			pr_debug("%s: command[0x%x] not expecting rsp\n",
+							__func__, payload[0]);
+			break;
+		}
+
+		spin_unlock_irqrestore(
+			&(session[session_id].session_lock), flags);
+		return 0;
+	}
+
+	switch (data->opcode) {
+	case ASM_DATA_EVENT_WRITE_DONE_V2:{
+		struct audio_port_data *port = &ac->port[IN];
+		if (data->payload_size >= 2 * sizeof(uint32_t))
+			dev_vdbg(ac->dev, "%s: Rxed opcode[0x%x] status[0x%x] token[%d]",
+					__func__, payload[0], payload[1],
+					data->token);
+		if (ac->io_mode & SYNC_IO_MODE) {
+			if (port->buf == NULL) {
+				pr_err("%s: Unexpected Write Done\n",
+								__func__);
+				spin_unlock_irqrestore(
+					&(session[session_id].session_lock),
+					flags);
+				return -EINVAL;
+			}
+			spin_lock_irqsave(&port->dsp_lock, dsp_flags);
+			buf_index = asm_token._token.buf_index;
+			if (buf_index < 0 || buf_index >= port->max_buf_cnt) {
+				pr_debug("%s: Invalid buffer index %u\n",
+					__func__, buf_index);
+				spin_unlock_irqrestore(&port->dsp_lock,
+								dsp_flags);
+				spin_unlock_irqrestore(
+					&(session[session_id].session_lock),
+					flags);
+				return -EINVAL;
+			}
+			if (data->payload_size >= 2 * sizeof(uint32_t) &&
+				(lower_32_bits(port->buf[buf_index].phys) !=
+				payload[0] ||
+				msm_audio_populate_upper_32_bits(
+					port->buf[buf_index].phys) !=
+				payload[1])) {
+				pr_debug("%s: Expected addr %pK\n",
+				__func__, &port->buf[buf_index].phys);
+				pr_err("%s: rxedl[0x%x] rxedu [0x%x]\n",
+					__func__, payload[0], payload[1]);
+				spin_unlock_irqrestore(&port->dsp_lock,
+								dsp_flags);
+				spin_unlock_irqrestore(
+					&(session[session_id].session_lock),
+					flags);
+				return -EINVAL;
+			}
+			port->buf[buf_index].used = 1;
+			spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
+
+
+			for (i = 0; i < port->max_buf_cnt; i++)
+				dev_vdbg(ac->dev, "%s %d\n",
+					__func__, port->buf[i].used);
+
+		}
+		config_debug_fs_write_cb();
+		break;
+	}
+	case ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2:
+		pr_debug("%s: ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2 session %d opcode 0x%x token 0x%x src %d dest %d\n",
+				__func__, ac->session, data->opcode,
+				data->token,
+				data->src_port, data->dest_port);
+		if (payload[0] != 0) {
+			pr_err("%s: ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2 returned error = 0x%x\n",
+				__func__, payload[0]);
+		} else if (generic_get_data) {
+			generic_get_data->valid = 1;
+			if (generic_get_data->is_inband) {
+				if (data->payload_size >= 4 * sizeof(uint32_t))
+					pr_debug("%s: payload[1] = 0x%x, payload[2]=0x%x, payload[3]=0x%x\n",
+							__func__,
+							payload[1],
+							payload[2],
+							payload[3]);
+
+				if (data->payload_size >=
+					(4 + (payload[3]>>2))
+					* sizeof(uint32_t)) {
+					generic_get_data->size_in_ints =
+						payload[3]>>2;
+					for (i = 0; i < payload[3]>>2; i++) {
+						generic_get_data->ints[i] =
+							payload[4+i];
+						pr_debug("%s: ASM callback val %i = %i\n",
+							 __func__, i,
+							 payload[4+i]);
+					}
+				}
+				pr_debug("%s: callback size in ints = %i\n",
+					 __func__,
+					generic_get_data->size_in_ints);
+			}
+			if (atomic_read(&ac->cmd_state) && wakeup_flag) {
+				atomic_set(&ac->cmd_state, 0);
+				wake_up(&ac->cmd_wait);
+			}
+			break;
+		}
+		rtac_make_asm_callback(ac->session, payload,
+			data->payload_size);
+		break;
+	case ASM_DATA_EVENT_READ_DONE_V2:{
+
+		struct audio_port_data *port = &ac->port[OUT];
+
+		config_debug_fs_read_cb();
+
+		dev_vdbg(ac->dev, "%s: ReadDone: status=%d buff_add=0x%x act_size=%d offset=%d\n",
+				__func__, payload[READDONE_IDX_STATUS],
+				payload[READDONE_IDX_BUFADD_LSW],
+				payload[READDONE_IDX_SIZE],
+				payload[READDONE_IDX_OFFSET]);
+
+		dev_vdbg(ac->dev, "%s: ReadDone:msw_ts=%d lsw_ts=%d memmap_hdl=0x%x flags=%d id=%d num=%d\n",
+				__func__, payload[READDONE_IDX_MSW_TS],
+				payload[READDONE_IDX_LSW_TS],
+				payload[READDONE_IDX_MEMMAP_HDL],
+				payload[READDONE_IDX_FLAGS],
+				payload[READDONE_IDX_SEQ_ID],
+				payload[READDONE_IDX_NUMFRAMES]);
+
+		if (ac->io_mode & SYNC_IO_MODE) {
+			if (port->buf == NULL) {
+				pr_err("%s: Unexpected Write Done\n", __func__);
+				spin_unlock_irqrestore(
+					&(session[session_id].session_lock),
+					flags);
+				return -EINVAL;
+			}
+			spin_lock_irqsave(&port->dsp_lock, dsp_flags);
+			buf_index = asm_token._token.buf_index;
+			if (buf_index < 0 || buf_index >= port->max_buf_cnt) {
+				pr_debug("%s: Invalid buffer index %u\n",
+					__func__, buf_index);
+				spin_unlock_irqrestore(&port->dsp_lock,
+								dsp_flags);
+				spin_unlock_irqrestore(
+					&(session[session_id].session_lock),
+					flags);
+				return -EINVAL;
+			}
+			port->buf[buf_index].used = 0;
+			if (lower_32_bits(port->buf[buf_index].phys) !=
+			payload[READDONE_IDX_BUFADD_LSW] ||
+			msm_audio_populate_upper_32_bits(
+				port->buf[buf_index].phys) !=
+					payload[READDONE_IDX_BUFADD_MSW]) {
+				dev_vdbg(ac->dev, "%s: Expected addr %pK\n",
+					__func__, &port->buf[buf_index].phys);
+				pr_err("%s: rxedl[0x%x] rxedu[0x%x]\n",
+					__func__,
+				payload[READDONE_IDX_BUFADD_LSW],
+				payload[READDONE_IDX_BUFADD_MSW]);
+				spin_unlock_irqrestore(&port->dsp_lock,
+							dsp_flags);
+				break;
+			}
+			port->buf[buf_index].actual_size =
+				payload[READDONE_IDX_SIZE];
+			spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
+		}
+		break;
+	}
+	case ASM_DATA_EVENT_EOS:
+	case ASM_DATA_EVENT_RENDERED_EOS:
+		pr_debug("%s: EOS ACK received: rxed session %d opcode 0x%x token 0x%x src %d dest %d\n",
+				__func__, ac->session,
+				data->opcode, data->token,
+				data->src_port, data->dest_port);
+		break;
+	case ASM_SESSION_EVENTX_OVERFLOW:
+		pr_debug("%s: ASM_SESSION_EVENTX_OVERFLOW session %d opcode 0x%x token 0x%x src %d dest %d\n",
+				__func__, ac->session,
+				data->opcode, data->token,
+				data->src_port, data->dest_port);
+		break;
+	case ASM_SESSION_EVENT_RX_UNDERFLOW:
+		pr_debug("%s: ASM_SESSION_EVENT_RX_UNDERFLOW session %d opcode 0x%x token 0x%x src %d dest %d\n",
+				__func__, ac->session,
+				data->opcode, data->token,
+				data->src_port, data->dest_port);
+		break;
+	case ASM_SESSION_CMDRSP_GET_SESSIONTIME_V3:
+		if (data->payload_size >= 3 * sizeof(uint32_t)) {
+			dev_vdbg(ac->dev, "%s: ASM_SESSION_CMDRSP_GET_SESSIONTIME_V3, payload[0] = %d, payload[1] = %d, payload[2] = %d\n",
+					 __func__,
+					 payload[0], payload[1], payload[2]);
+			ac->time_stamp =
+				(uint64_t)(((uint64_t)payload[2] << 32) |
+					payload[1]);
+		}
+		if (atomic_cmpxchg(&ac->time_flag, 1, 0))
+			wake_up(&ac->time_wait);
+		break;
+	case ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY:
+	case ASM_DATA_EVENT_ENC_SR_CM_CHANGE_NOTIFY:
+		pr_debug("%s: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY session %d opcode 0x%x token 0x%x src %d dest %d\n",
+				__func__, ac->session,
+				data->opcode, data->token,
+				data->src_port, data->dest_port);
+		if (data->payload_size >= 4 * sizeof(uint32_t))
+			pr_debug("%s: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, payload[0] = %d, payload[1] = %d, payload[2] = %d, payload[3] = %d\n",
+					 __func__,
+					payload[0], payload[1], payload[2],
+					payload[3]);
+		break;
+	case ASM_SESSION_CMDRSP_GET_MTMX_STRTR_PARAMS_V2:
+		q6asm_process_mtmx_get_param_rsp(ac, (void *) payload);
+		break;
+	case ASM_STREAM_PP_EVENT:
+	case ASM_STREAM_CMD_ENCDEC_EVENTS:
+	case ASM_STREAM_CMD_REGISTER_IEC_61937_FMT_UPDATE:
+		if (data->payload_size >= 2 * sizeof(uint32_t))
+			pr_debug("%s: ASM_STREAM_PP_EVENT payload[0][0x%x] payload[1][0x%x]",
+				__func__, payload[0], payload[1]);
+		i = is_adsp_raise_event(data->opcode);
+		if (i < 0) {
+			spin_unlock_irqrestore(
+				&(session[session_id].session_lock), flags);
+			return 0;
+		}
+
+		/* repack payload for asm_stream_pp_event
+		 * package is composed of event type + size + actual payload
+		 */
+		payload_size = data->payload_size;
+		if (payload_size > UINT_MAX - sizeof(payload_size)) {
+			pr_err("%s: payload size = %d exceeds limit.\n",
+				__func__, payload_size);
+			spin_unlock(&(session[session_id].session_lock));
+			return -EINVAL;
+		}
+
+		pp_event_package = kzalloc(payload_size + sizeof(payload_size),
+				GFP_ATOMIC);
+		if (!pp_event_package) {
+			spin_unlock_irqrestore(
+				&(session[session_id].session_lock), flags);
+			return -ENOMEM;
+		}
+
+		pp_event_package->event_type = i;
+		pp_event_package->payload_len = payload_size;
+		memcpy((void *)pp_event_package->payload,
+			data->payload, payload_size);
+		ac->cb(data->opcode, data->token,
+			(void *)pp_event_package, ac->priv);
+		kfree(pp_event_package);
+		spin_unlock_irqrestore(
+			&(session[session_id].session_lock), flags);
+		return 0;
+	case ASM_SESSION_CMDRSP_ADJUST_SESSION_CLOCK_V2:
+		pr_debug("%s: ASM_SESSION_CMDRSP_ADJUST_SESSION_CLOCK_V2 sesion %d status 0x%x msw %u lsw %u\n",
+			 __func__, ac->session, payload[0], payload[2],
+			 payload[1]);
+		wake_up(&ac->cmd_wait);
+		break;
+	case ASM_SESSION_CMDRSP_GET_PATH_DELAY_V2:
+		if (data->payload_size >= 3 * sizeof(uint32_t))
+			pr_debug("%s: ASM_SESSION_CMDRSP_GET_PATH_DELAY_V2 session %d status 0x%x msw %u lsw %u\n",
+				__func__, ac->session,
+				payload[0], payload[2],
+				payload[1]);
+		if (payload[0] == 0 &&
+			data->payload_size >=
+				2 * sizeof(uint32_t)) {
+			atomic_set(&ac->cmd_state, 0);
+			/* ignore msw, as a delay that large shouldn't happen */
+			ac->path_delay = payload[1];
+		} else {
+			atomic_set(&ac->cmd_state, payload[0]);
+			ac->path_delay = UINT_MAX;
+		}
+		wake_up(&ac->cmd_wait);
+		break;
+	}
+	if (ac->cb)
+		ac->cb(data->opcode, data->token,
+			data->payload, ac->priv);
+	spin_unlock_irqrestore(
+		&(session[session_id].session_lock), flags);
+	return 0;
+}
+
+void *q6asm_is_cpu_buf_avail(int dir, struct audio_client *ac, uint32_t *size,
+				uint32_t *index)
+{
+	void *data;
+	unsigned char idx;
+	struct audio_port_data *port;
+
+	if (!ac || ((dir != IN) && (dir != OUT))) {
+		pr_err("%s: ac %pK dir %d\n", __func__, ac, dir);
+		return NULL;
+	}
+
+	if (ac->io_mode & SYNC_IO_MODE) {
+		port = &ac->port[dir];
+
+		mutex_lock(&port->lock);
+		idx = port->cpu_buf;
+		if (port->buf == NULL) {
+			pr_err("%s: Buffer pointer null\n", __func__);
+			mutex_unlock(&port->lock);
+			return NULL;
+		}
+		/*  dir 0: used = 0 means buf in use
+			dir 1: used = 1 means buf in use */
+		if (port->buf[idx].used == dir) {
+			/* To make it more robust, we could loop and get the
+			next avail buf, its risky though */
+			pr_err("%s: Next buf idx[0x%x] not available, dir[%d]\n",
+			 __func__, idx, dir);
+			mutex_unlock(&port->lock);
+			return NULL;
+		}
+		*size = port->buf[idx].actual_size;
+		*index = port->cpu_buf;
+		data = port->buf[idx].data;
+		dev_vdbg(ac->dev, "%s: session[%d]index[%d] data[%pK]size[%d]\n",
+						__func__,
+						ac->session,
+						port->cpu_buf,
+						data, *size);
+		/* By default increase the cpu_buf cnt
+		user accesses this function,increase cpu
+		buf(to avoid another api)*/
+		port->buf[idx].used = dir;
+		port->cpu_buf = q6asm_get_next_buf(ac, port->cpu_buf,
+						   port->max_buf_cnt);
+		mutex_unlock(&port->lock);
+		return data;
+	}
+	return NULL;
+}
+
+int q6asm_cpu_buf_release(int dir, struct audio_client *ac)
+{
+	struct audio_port_data *port;
+	int ret = 0;
+	int idx;
+
+	if (!ac || ((dir != IN) && (dir != OUT))) {
+		pr_err("%s: ac %pK dir %d\n", __func__, ac, dir);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	if (ac->io_mode & SYNC_IO_MODE) {
+		port = &ac->port[dir];
+		mutex_lock(&port->lock);
+		idx = port->cpu_buf;
+		if (port->cpu_buf == 0) {
+			port->cpu_buf = port->max_buf_cnt - 1;
+		} else if (port->cpu_buf < port->max_buf_cnt) {
+			port->cpu_buf = port->cpu_buf - 1;
+		} else {
+			pr_err("%s: buffer index(%d) out of range\n",
+			       __func__, port->cpu_buf);
+			ret = -EINVAL;
+			mutex_unlock(&port->lock);
+			goto exit;
+		}
+		port->buf[port->cpu_buf].used = dir ^ 1;
+		mutex_unlock(&port->lock);
+	}
+exit:
+	return ret;
+}
+
+void *q6asm_is_cpu_buf_avail_nolock(int dir, struct audio_client *ac,
+					uint32_t *size, uint32_t *index)
+{
+	void *data;
+	unsigned char idx;
+	struct audio_port_data *port;
+
+	if (!ac || ((dir != IN) && (dir != OUT))) {
+		pr_err("%s: ac %pK dir %d\n", __func__, ac, dir);
+		return NULL;
+	}
+
+	port = &ac->port[dir];
+
+	idx = port->cpu_buf;
+	if (port->buf == NULL) {
+		pr_err("%s: Buffer pointer null\n", __func__);
+		return NULL;
+	}
+	/*
+	 * dir 0: used = 0 means buf in use
+	 * dir 1: used = 1 means buf in use
+	 */
+	if (port->buf[idx].used == dir) {
+		/*
+		 * To make it more robust, we could loop and get the
+		 * next avail buf, its risky though
+		 */
+		pr_err("%s: Next buf idx[0x%x] not available, dir[%d]\n",
+		 __func__, idx, dir);
+		return NULL;
+	}
+	*size = port->buf[idx].actual_size;
+	*index = port->cpu_buf;
+	data = port->buf[idx].data;
+	dev_vdbg(ac->dev, "%s: session[%d]index[%d] data[%pK]size[%d]\n",
+		__func__, ac->session, port->cpu_buf,
+		data, *size);
+	/*
+	 * By default increase the cpu_buf cnt
+	 * user accesses this function,increase cpu
+	 * buf(to avoid another api)
+	 */
+	port->buf[idx].used = dir;
+	port->cpu_buf = q6asm_get_next_buf(ac, port->cpu_buf,
+					   port->max_buf_cnt);
+	return data;
+}
+
+int q6asm_is_dsp_buf_avail(int dir, struct audio_client *ac)
+{
+	int ret = -1;
+	struct audio_port_data *port;
+	uint32_t idx;
+
+	if (!ac || (dir != OUT)) {
+		pr_err("%s: ac %pK dir %d\n", __func__, ac, dir);
+		return ret;
+	}
+
+	if (ac->io_mode & SYNC_IO_MODE) {
+		port = &ac->port[dir];
+
+		mutex_lock(&port->lock);
+		idx = port->dsp_buf;
+
+		if (port->buf[idx].used == (dir ^ 1)) {
+			/* To make it more robust, we could loop and get the
+			next avail buf, its risky though */
+			pr_err("%s: Next buf idx[0x%x] not available, dir[%d]\n",
+				__func__, idx, dir);
+			mutex_unlock(&port->lock);
+			return ret;
+		}
+		dev_vdbg(ac->dev, "%s: session[%d]dsp_buf=%d cpu_buf=%d\n",
+			__func__,
+			ac->session, port->dsp_buf, port->cpu_buf);
+		ret = ((port->dsp_buf != port->cpu_buf) ? 0 : -1);
+		mutex_unlock(&port->lock);
+	}
+	return ret;
+}
+
+static void __q6asm_add_hdr(struct audio_client *ac, struct apr_hdr *hdr,
+			uint32_t pkt_size, uint32_t cmd_flg, uint32_t stream_id)
+{
+	unsigned long flags;
+
+	dev_vdbg(ac->dev, "%s: pkt_size=%d cmd_flg=%d session=%d stream_id=%d\n",
+			__func__, pkt_size, cmd_flg, ac->session, stream_id);
+	mutex_lock(&ac->cmd_lock);
+	spin_lock_irqsave(&(session[ac->session].session_lock), flags);
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL", __func__);
+		spin_unlock_irqrestore(
+			&(session[ac->session].session_lock), flags);
+		mutex_unlock(&ac->cmd_lock);
+		return;
+	}
+
+	hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \
+			APR_HDR_LEN(sizeof(struct apr_hdr)),\
+			APR_PKT_VER);
+	hdr->src_svc = ((struct apr_svc *)ac->apr)->id;
+	hdr->src_domain = APR_DOMAIN_APPS;
+	hdr->dest_svc = APR_SVC_ASM;
+	hdr->dest_domain = APR_DOMAIN_ADSP;
+	hdr->src_port = ((ac->session << 8) & 0xFF00) | (stream_id);
+	hdr->dest_port = ((ac->session << 8) & 0xFF00) | (stream_id);
+	if (cmd_flg)
+		q6asm_update_token(&hdr->token,
+				   ac->session,
+				   0, /* Stream ID is NA */
+				   0, /* Buffer index is NA */
+				   0, /* Direction flag is NA */
+				   WAIT_CMD);
+
+	hdr->pkt_size  = pkt_size;
+	spin_unlock_irqrestore(
+		&(session[ac->session].session_lock), flags);
+	mutex_unlock(&ac->cmd_lock);
+	return;
+}
+
+static void q6asm_add_hdr(struct audio_client *ac, struct apr_hdr *hdr,
+			uint32_t pkt_size, uint32_t cmd_flg)
+{
+	__q6asm_add_hdr(ac, hdr, pkt_size, cmd_flg, ac->stream_id);
+	return;
+}
+
+static void q6asm_stream_add_hdr(struct audio_client *ac, struct apr_hdr *hdr,
+			uint32_t pkt_size, uint32_t cmd_flg, int32_t stream_id)
+{
+	__q6asm_add_hdr(ac, hdr, pkt_size, cmd_flg, stream_id);
+	return;
+}
+
+static void __q6asm_add_hdr_async(struct audio_client *ac, struct apr_hdr *hdr,
+				  uint32_t pkt_size, uint32_t cmd_flg,
+				  uint32_t stream_id, u8 no_wait_flag)
+{
+	dev_vdbg(ac->dev, "%s: pkt_size = %d, cmd_flg = %d, session = %d stream_id=%d\n",
+			__func__, pkt_size, cmd_flg, ac->session, stream_id);
+	hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \
+			APR_HDR_LEN(sizeof(struct apr_hdr)),\
+			APR_PKT_VER);
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR is NULL", __func__);
+		return;
+	}
+	hdr->src_svc = ((struct apr_svc *)ac->apr)->id;
+	hdr->src_domain = APR_DOMAIN_APPS;
+	hdr->dest_svc = APR_SVC_ASM;
+	hdr->dest_domain = APR_DOMAIN_ADSP;
+	hdr->src_port = ((ac->session << 8) & 0xFF00) | (stream_id);
+	hdr->dest_port = ((ac->session << 8) & 0xFF00) | (stream_id);
+	if (cmd_flg) {
+		q6asm_update_token(&hdr->token,
+				   ac->session,
+				   0, /* Stream ID is NA */
+				   0, /* Buffer index is NA */
+				   0, /* Direction flag is NA */
+				   no_wait_flag);
+
+	}
+	hdr->pkt_size  = pkt_size;
+	return;
+}
+
+static void q6asm_add_hdr_async(struct audio_client *ac, struct apr_hdr *hdr,
+				uint32_t pkt_size, uint32_t cmd_flg)
+{
+	__q6asm_add_hdr_async(ac, hdr, pkt_size, cmd_flg,
+			      ac->stream_id, WAIT_CMD);
+	return;
+}
+
+static void q6asm_stream_add_hdr_async(struct audio_client *ac,
+					struct apr_hdr *hdr, uint32_t pkt_size,
+					uint32_t cmd_flg, int32_t stream_id)
+{
+	__q6asm_add_hdr_async(ac, hdr, pkt_size, cmd_flg,
+			      stream_id, NO_WAIT_CMD);
+	return;
+}
+
+static void q6asm_add_hdr_custom_topology(struct audio_client *ac,
+					  struct apr_hdr *hdr,
+					  uint32_t pkt_size)
+{
+	pr_debug("%s: pkt_size=%d session=%d\n",
+			__func__, pkt_size, ac->session);
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		return;
+	}
+
+	mutex_lock(&ac->cmd_lock);
+	hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+			APR_HDR_LEN(sizeof(struct apr_hdr)),
+			APR_PKT_VER);
+	hdr->src_svc = ((struct apr_svc *)ac->apr)->id;
+	hdr->src_domain = APR_DOMAIN_APPS;
+	hdr->dest_svc = APR_SVC_ASM;
+	hdr->dest_domain = APR_DOMAIN_ADSP;
+	hdr->src_port = ((ac->session << 8) & 0xFF00) | 0x01;
+	hdr->dest_port = 0;
+	q6asm_update_token(&hdr->token,
+			   ac->session,
+			   0, /* Stream ID is NA */
+			   0, /* Buffer index is NA */
+			   0, /* Direction flag is NA */
+			   WAIT_CMD);
+	hdr->pkt_size  = pkt_size;
+	mutex_unlock(&ac->cmd_lock);
+	return;
+}
+
+static void q6asm_add_mmaphdr(struct audio_client *ac, struct apr_hdr *hdr,
+			u32 pkt_size, int dir)
+{
+	pr_debug("%s: pkt size=%d\n",
+		__func__, pkt_size);
+	hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	hdr->src_port = 0;
+	hdr->dest_port = 0;
+	q6asm_update_token(&hdr->token,
+			   ac->session,
+			   0, /* Stream ID is NA */
+			   0, /* Buffer index is NA */
+			   dir,
+			   WAIT_CMD);
+	hdr->pkt_size  = pkt_size;
+	return;
+}
+
+static int __q6asm_open_read(struct audio_client *ac,
+			     uint32_t format, uint16_t bits_per_sample,
+			     uint32_t pcm_format_block_ver,
+			     bool ts_mode)
+{
+	int rc = 0x00;
+	struct asm_stream_cmd_open_read_v3 open;
+
+	config_debug_fs_reset_index();
+
+	if (ac == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	pr_debug("%s: session[%d]\n", __func__, ac->session);
+
+	q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
+	atomic_set(&ac->cmd_state, -1);
+	open.hdr.opcode = ASM_STREAM_CMD_OPEN_READ_V3;
+	/* Stream prio : High, provide meta info with encoded frames */
+	open.src_endpointype = ASM_END_POINT_DEVICE_MATRIX;
+
+	open.preprocopo_id = q6asm_get_asm_topology_cal();
+	open.bits_per_sample = bits_per_sample;
+	open.mode_flags = 0x0;
+
+	ac->topology = open.preprocopo_id;
+	ac->app_type = q6asm_get_asm_app_type_cal();
+	if (ac->perf_mode == LOW_LATENCY_PCM_MODE) {
+		open.mode_flags |= ASM_LOW_LATENCY_TX_STREAM_SESSION <<
+			ASM_SHIFT_STREAM_PERF_MODE_FLAG_IN_OPEN_READ;
+	} else {
+		open.mode_flags |= ASM_LEGACY_STREAM_SESSION <<
+			ASM_SHIFT_STREAM_PERF_MODE_FLAG_IN_OPEN_READ;
+	}
+
+	switch (format) {
+	case FORMAT_LINEAR_PCM:
+		open.mode_flags |= 0x00;
+		open.enc_cfg_id = q6asm_get_pcm_format_id(pcm_format_block_ver);
+		if (ts_mode)
+			open.mode_flags |= ABSOLUTE_TIMESTAMP_ENABLE;
+		break;
+	case FORMAT_MPEG4_AAC:
+		open.mode_flags |= BUFFER_META_ENABLE;
+		open.enc_cfg_id = ASM_MEDIA_FMT_AAC_V2;
+		break;
+	case FORMAT_G711_ALAW_FS:
+		open.mode_flags |= BUFFER_META_ENABLE;
+		open.enc_cfg_id = ASM_MEDIA_FMT_G711_ALAW_FS;
+		break;
+	case FORMAT_G711_MLAW_FS:
+		open.mode_flags |= BUFFER_META_ENABLE;
+		open.enc_cfg_id = ASM_MEDIA_FMT_G711_MLAW_FS;
+		break;
+	case FORMAT_V13K:
+		open.mode_flags |= BUFFER_META_ENABLE;
+		open.enc_cfg_id = ASM_MEDIA_FMT_V13K_FS;
+		break;
+	case FORMAT_EVRC:
+		open.mode_flags |= BUFFER_META_ENABLE;
+		open.enc_cfg_id = ASM_MEDIA_FMT_EVRC_FS;
+		break;
+	case FORMAT_AMRNB:
+		open.mode_flags |= BUFFER_META_ENABLE ;
+		open.enc_cfg_id = ASM_MEDIA_FMT_AMRNB_FS;
+		break;
+	case FORMAT_AMRWB:
+		open.mode_flags |= BUFFER_META_ENABLE ;
+		open.enc_cfg_id = ASM_MEDIA_FMT_AMRWB_FS;
+		break;
+	default:
+		pr_err("%s: Invalid format 0x%x\n",
+			__func__, format);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &open);
+	if (rc < 0) {
+		pr_err("%s: open failed op[0x%x]rc[%d]\n",
+				__func__, open.hdr.opcode, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for open read\n",
+				__func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+
+	ac->io_mode |= TUN_READ_IO_MODE;
+
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+int q6asm_open_read(struct audio_client *ac,
+		uint32_t format)
+{
+	return __q6asm_open_read(ac, format, 16,
+				PCM_MEDIA_FORMAT_V2 /*media fmt block ver*/,
+				false/*ts_mode*/);
+}
+
+int q6asm_open_read_v2(struct audio_client *ac, uint32_t format,
+			uint16_t bits_per_sample)
+{
+	return __q6asm_open_read(ac, format, bits_per_sample,
+				 PCM_MEDIA_FORMAT_V2 /*media fmt block ver*/,
+				 false/*ts_mode*/);
+}
+
+/*
+ * asm_open_read_v3 - Opens audio capture session
+ *
+ * @ac: Client session handle
+ * @format: encoder format
+ * @bits_per_sample: bit width of capture session
+ */
+int q6asm_open_read_v3(struct audio_client *ac, uint32_t format,
+			uint16_t bits_per_sample)
+{
+	return __q6asm_open_read(ac, format, bits_per_sample,
+				 PCM_MEDIA_FORMAT_V3/*media fmt block ver*/,
+				 false/*ts_mode*/);
+}
+EXPORT_SYMBOL(q6asm_open_read_v3);
+
+/*
+ * asm_open_read_v4 - Opens audio capture session
+ *
+ * @ac: Client session handle
+ * @format: encoder format
+ * @bits_per_sample: bit width of capture session
+ * @ts_mode: timestamp mode
+ */
+int q6asm_open_read_v4(struct audio_client *ac, uint32_t format,
+			uint16_t bits_per_sample, bool ts_mode)
+{
+	return __q6asm_open_read(ac, format, bits_per_sample,
+				 PCM_MEDIA_FORMAT_V4 /*media fmt block ver*/,
+				 ts_mode);
+}
+EXPORT_SYMBOL(q6asm_open_read_v4);
+
+int q6asm_open_write_compressed(struct audio_client *ac, uint32_t format,
+				uint32_t passthrough_flag)
+{
+	int rc = 0;
+	struct asm_stream_cmd_open_write_compressed open;
+
+	if (ac == NULL) {
+		pr_err("%s: ac[%pK] NULL\n",  __func__, ac);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	if (ac->apr == NULL) {
+		pr_err("%s: APR handle[%pK] NULL\n", __func__,  ac->apr);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	pr_debug("%s: session[%d] wr_format[0x%x]", __func__, ac->session,
+		format);
+
+	q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
+	open.hdr.opcode = ASM_STREAM_CMD_OPEN_WRITE_COMPRESSED;
+	atomic_set(&ac->cmd_state, -1);
+
+	switch (format) {
+	case FORMAT_AC3:
+		open.fmt_id = ASM_MEDIA_FMT_AC3;
+		break;
+	case FORMAT_EAC3:
+		open.fmt_id = ASM_MEDIA_FMT_EAC3;
+		break;
+	case FORMAT_DTS:
+		open.fmt_id = ASM_MEDIA_FMT_DTS;
+		break;
+	case FORMAT_DSD:
+		open.fmt_id = ASM_MEDIA_FMT_DSD;
+		break;
+	case FORMAT_GEN_COMPR:
+		open.fmt_id = ASM_MEDIA_FMT_GENERIC_COMPRESSED;
+		break;
+	case FORMAT_TRUEHD:
+		open.fmt_id = ASM_MEDIA_FMT_TRUEHD;
+		break;
+	case FORMAT_IEC61937:
+		open.fmt_id = ASM_MEDIA_FMT_IEC;
+		break;
+	default:
+		pr_err("%s: Invalid format[%d]\n", __func__, format);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	/*Below flag indicates the DSP that Compressed audio input
+	stream is not IEC 61937 or IEC 60958 packetizied*/
+	if (passthrough_flag == COMPRESSED_PASSTHROUGH ||
+		passthrough_flag == COMPRESSED_PASSTHROUGH_DSD ||
+		passthrough_flag == COMPRESSED_PASSTHROUGH_GEN) {
+		open.flags = 0x0;
+		pr_debug("%s: Flag 0 COMPRESSED_PASSTHROUGH\n", __func__);
+	} else if (passthrough_flag == COMPRESSED_PASSTHROUGH_CONVERT) {
+		open.flags = 0x8;
+		pr_debug("%s: Flag 8 - COMPRESSED_PASSTHROUGH_CONVERT\n",
+			 __func__);
+	} else if (passthrough_flag == COMPRESSED_PASSTHROUGH_IEC61937) {
+		open.flags = 0x1;
+		pr_debug("%s: Flag 1 - COMPRESSED_PASSTHROUGH_IEC61937\n",
+			 __func__);
+	} else {
+		pr_err("%s: Invalid passthrough type[%d]\n",
+			__func__, passthrough_flag);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &open);
+	if (rc < 0) {
+		pr_err("%s: open failed op[0x%x]rc[%d]\n",
+			__func__, open.hdr.opcode, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+		(atomic_read(&ac->cmd_state) >= 0), 1*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for OPEN_WRITE_COMPR rc[%d]\n",
+			__func__, rc);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+
+	return 0;
+
+fail_cmd:
+	return rc;
+}
+
+static int __q6asm_open_write(struct audio_client *ac, uint32_t format,
+			      uint16_t bits_per_sample, uint32_t stream_id,
+			      bool is_gapless_mode,
+			      uint32_t pcm_format_block_ver)
+{
+	int rc = 0x00;
+	struct asm_stream_cmd_open_write_v3 open;
+
+	if (ac == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	dev_vdbg(ac->dev, "%s: session[%d] wr_format[0x%x]\n",
+		__func__, ac->session, format);
+
+	q6asm_stream_add_hdr(ac, &open.hdr, sizeof(open), TRUE, stream_id);
+	atomic_set(&ac->cmd_state, -1);
+	/*
+	 * Updated the token field with stream/session for compressed playback
+	 * Platform driver must know the the stream with which the command is
+	 * associated
+	 */
+	if (ac->io_mode & COMPRESSED_STREAM_IO)
+		q6asm_update_token(&open.hdr.token,
+				   ac->session,
+				   stream_id,
+				   0, /* Buffer index is NA */
+				   0, /* Direction flag is NA */
+				   WAIT_CMD);
+
+	dev_vdbg(ac->dev, "%s: token = 0x%x, stream_id  %d, session 0x%x\n",
+			__func__, open.hdr.token, stream_id, ac->session);
+	open.hdr.opcode = ASM_STREAM_CMD_OPEN_WRITE_V3;
+	open.mode_flags = 0x00;
+	if (ac->perf_mode == ULL_POST_PROCESSING_PCM_MODE)
+		open.mode_flags |= ASM_ULL_POST_PROCESSING_STREAM_SESSION;
+	else if (ac->perf_mode == ULTRA_LOW_LATENCY_PCM_MODE)
+		open.mode_flags |= ASM_ULTRA_LOW_LATENCY_STREAM_SESSION;
+	else if (ac->perf_mode == LOW_LATENCY_PCM_MODE)
+		open.mode_flags |= ASM_LOW_LATENCY_STREAM_SESSION;
+	else {
+		open.mode_flags |= ASM_LEGACY_STREAM_SESSION;
+		if (is_gapless_mode)
+			open.mode_flags |= 1 << ASM_SHIFT_GAPLESS_MODE_FLAG;
+	}
+
+	/* source endpoint : matrix */
+	open.sink_endpointype = ASM_END_POINT_DEVICE_MATRIX;
+	open.bits_per_sample = bits_per_sample;
+
+	open.postprocopo_id = q6asm_get_asm_topology_cal();
+	if (ac->perf_mode != LEGACY_PCM_MODE)
+		open.postprocopo_id = ASM_STREAM_POSTPROCOPO_ID_NONE;
+
+	pr_debug("%s: perf_mode %d asm_topology 0x%x bps %d\n", __func__,
+		 ac->perf_mode, open.postprocopo_id, open.bits_per_sample);
+
+	/*
+	 * For Gapless playback it will use the same session for next stream,
+	 * So use the same topology
+	 */
+	if (!ac->topology) {
+		ac->topology = open.postprocopo_id;
+		ac->app_type = q6asm_get_asm_app_type_cal();
+	}
+	switch (format) {
+	case FORMAT_LINEAR_PCM:
+		open.dec_fmt_id = q6asm_get_pcm_format_id(pcm_format_block_ver);
+		break;
+	case FORMAT_MPEG4_AAC:
+		open.dec_fmt_id = ASM_MEDIA_FMT_AAC_V2;
+		break;
+	case FORMAT_MPEG4_MULTI_AAC:
+		open.dec_fmt_id = ASM_MEDIA_FMT_AAC_V2;
+		break;
+	case FORMAT_WMA_V9:
+		open.dec_fmt_id = ASM_MEDIA_FMT_WMA_V9_V2;
+		break;
+	case FORMAT_WMA_V10PRO:
+		open.dec_fmt_id = ASM_MEDIA_FMT_WMA_V10PRO_V2;
+		break;
+	case FORMAT_MP3:
+		open.dec_fmt_id = ASM_MEDIA_FMT_MP3;
+		break;
+	case FORMAT_AC3:
+		open.dec_fmt_id = ASM_MEDIA_FMT_AC3;
+		break;
+	case FORMAT_EAC3:
+		open.dec_fmt_id = ASM_MEDIA_FMT_EAC3;
+		break;
+	case FORMAT_MP2:
+		open.dec_fmt_id = ASM_MEDIA_FMT_MP2;
+		break;
+	case FORMAT_FLAC:
+		open.dec_fmt_id = ASM_MEDIA_FMT_FLAC;
+		break;
+	case FORMAT_ALAC:
+		open.dec_fmt_id = ASM_MEDIA_FMT_ALAC;
+		break;
+	case FORMAT_VORBIS:
+		open.dec_fmt_id = ASM_MEDIA_FMT_VORBIS;
+		break;
+	case FORMAT_APE:
+		open.dec_fmt_id = ASM_MEDIA_FMT_APE;
+		break;
+	case FORMAT_DSD:
+		open.dec_fmt_id = ASM_MEDIA_FMT_DSD;
+		break;
+	case FORMAT_APTX:
+		open.dec_fmt_id = ASM_MEDIA_FMT_APTX;
+		break;
+	case FORMAT_GEN_COMPR:
+		open.dec_fmt_id = ASM_MEDIA_FMT_GENERIC_COMPRESSED;
+		break;
+	default:
+		pr_err("%s: Invalid format 0x%x\n", __func__, format);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &open);
+	if (rc < 0) {
+		pr_err("%s: open failed op[0x%x]rc[%d]\n", \
+				__func__, open.hdr.opcode, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for open write\n", __func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	ac->io_mode |= TUN_WRITE_IO_MODE;
+
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+int q6asm_open_write(struct audio_client *ac, uint32_t format)
+{
+	return __q6asm_open_write(ac, format, 16, ac->stream_id,
+				  false /*gapless*/,
+				  PCM_MEDIA_FORMAT_V2 /*pcm_format_block_ver*/);
+}
+
+int q6asm_open_write_v2(struct audio_client *ac, uint32_t format,
+			uint16_t bits_per_sample)
+{
+	return __q6asm_open_write(ac, format, bits_per_sample,
+				  ac->stream_id, false /*gapless*/,
+				  PCM_MEDIA_FORMAT_V2 /*pcm_format_block_ver*/);
+}
+
+/*
+ * q6asm_open_write_v3 - Opens audio playback session
+ *
+ * @ac: Client session handle
+ * @format: decoder format
+ * @bits_per_sample: bit width of playback session
+ */
+int q6asm_open_write_v3(struct audio_client *ac, uint32_t format,
+			uint16_t bits_per_sample)
+{
+	return __q6asm_open_write(ac, format, bits_per_sample,
+				  ac->stream_id, false /*gapless*/,
+				  PCM_MEDIA_FORMAT_V3 /*pcm_format_block_ver*/);
+}
+EXPORT_SYMBOL(q6asm_open_write_v3);
+
+/*
+ * q6asm_open_write_v4 - Opens audio playback session
+ *
+ * @ac: Client session handle
+ * @format: decoder format
+ * @bits_per_sample: bit width of playback session
+ */
+int q6asm_open_write_v4(struct audio_client *ac, uint32_t format,
+			uint16_t bits_per_sample)
+{
+	return __q6asm_open_write(ac, format, bits_per_sample,
+				  ac->stream_id, false /*gapless*/,
+				  PCM_MEDIA_FORMAT_V4 /*pcm_format_block_ver*/);
+}
+EXPORT_SYMBOL(q6asm_open_write_v4);
+
+int q6asm_stream_open_write_v2(struct audio_client *ac, uint32_t format,
+			       uint16_t bits_per_sample, int32_t stream_id,
+			       bool is_gapless_mode)
+{
+	return __q6asm_open_write(ac, format, bits_per_sample,
+				  stream_id, is_gapless_mode,
+				  PCM_MEDIA_FORMAT_V2 /*pcm_format_block_ver*/);
+}
+
+/*
+ * q6asm_stream_open_write_v3 - Creates audio stream for playback
+ *
+ * @ac: Client session handle
+ * @format: asm playback format
+ * @bits_per_sample: bit width of requested stream
+ * @stream_id: stream id of stream to be associated with this session
+ * @is_gapless_mode: true if gapless mode needs to be enabled
+ */
+int q6asm_stream_open_write_v3(struct audio_client *ac, uint32_t format,
+			       uint16_t bits_per_sample, int32_t stream_id,
+			       bool is_gapless_mode)
+{
+	return __q6asm_open_write(ac, format, bits_per_sample,
+				  stream_id, is_gapless_mode,
+				  PCM_MEDIA_FORMAT_V3 /*pcm_format_block_ver*/);
+}
+EXPORT_SYMBOL(q6asm_stream_open_write_v3);
+
+/*
+ * q6asm_stream_open_write_v4 - Creates audio stream for playback
+ *
+ * @ac: Client session handle
+ * @format: asm playback format
+ * @bits_per_sample: bit width of requested stream
+ * @stream_id: stream id of stream to be associated with this session
+ * @is_gapless_mode: true if gapless mode needs to be enabled
+ */
+int q6asm_stream_open_write_v4(struct audio_client *ac, uint32_t format,
+			       uint16_t bits_per_sample, int32_t stream_id,
+			       bool is_gapless_mode)
+{
+	return __q6asm_open_write(ac, format, bits_per_sample,
+				  stream_id, is_gapless_mode,
+				  PCM_MEDIA_FORMAT_V4 /*pcm_format_block_ver*/);
+}
+EXPORT_SYMBOL(q6asm_stream_open_write_v4);
+
+static int __q6asm_open_read_write(struct audio_client *ac, uint32_t rd_format,
+				   uint32_t wr_format, bool is_meta_data_mode,
+				   uint32_t bits_per_sample,
+				   bool overwrite_topology, int topology)
+{
+	int rc = 0x00;
+	struct asm_stream_cmd_open_readwrite_v2 open;
+
+	if (ac == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	pr_debug("%s: session[%d]\n", __func__, ac->session);
+	pr_debug("%s: wr_format[0x%x]rd_format[0x%x]\n",
+			__func__, wr_format, rd_format);
+
+	ac->io_mode |= NT_MODE;
+	q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
+	atomic_set(&ac->cmd_state, -1);
+	open.hdr.opcode = ASM_STREAM_CMD_OPEN_READWRITE_V2;
+
+	open.mode_flags = is_meta_data_mode ? BUFFER_META_ENABLE : 0;
+	open.bits_per_sample = bits_per_sample;
+	/* source endpoint : matrix */
+	open.postprocopo_id = q6asm_get_asm_topology_cal();
+
+	open.postprocopo_id = overwrite_topology ?
+			      topology : open.postprocopo_id;
+	ac->topology = open.postprocopo_id;
+	ac->app_type = q6asm_get_asm_app_type_cal();
+
+
+	switch (wr_format) {
+	case FORMAT_LINEAR_PCM:
+	case FORMAT_MULTI_CHANNEL_LINEAR_PCM:
+		open.dec_fmt_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
+		break;
+	case FORMAT_MPEG4_AAC:
+		open.dec_fmt_id = ASM_MEDIA_FMT_AAC_V2;
+		break;
+	case FORMAT_MPEG4_MULTI_AAC:
+		open.dec_fmt_id = ASM_MEDIA_FMT_AAC_V2;
+		break;
+	case FORMAT_WMA_V9:
+		open.dec_fmt_id = ASM_MEDIA_FMT_WMA_V9_V2;
+		break;
+	case FORMAT_WMA_V10PRO:
+		open.dec_fmt_id = ASM_MEDIA_FMT_WMA_V10PRO_V2;
+		break;
+	case FORMAT_AMRNB:
+		open.dec_fmt_id = ASM_MEDIA_FMT_AMRNB_FS;
+		break;
+	case FORMAT_AMRWB:
+		open.dec_fmt_id = ASM_MEDIA_FMT_AMRWB_FS;
+		break;
+	case FORMAT_AMR_WB_PLUS:
+		open.dec_fmt_id = ASM_MEDIA_FMT_AMR_WB_PLUS_V2;
+		break;
+	case FORMAT_V13K:
+		open.dec_fmt_id = ASM_MEDIA_FMT_V13K_FS;
+		break;
+	case FORMAT_EVRC:
+		open.dec_fmt_id = ASM_MEDIA_FMT_EVRC_FS;
+		break;
+	case FORMAT_EVRCB:
+		open.dec_fmt_id = ASM_MEDIA_FMT_EVRCB_FS;
+		break;
+	case FORMAT_EVRCWB:
+		open.dec_fmt_id = ASM_MEDIA_FMT_EVRCWB_FS;
+		break;
+	case FORMAT_MP3:
+		open.dec_fmt_id = ASM_MEDIA_FMT_MP3;
+		break;
+	case FORMAT_ALAC:
+		open.dec_fmt_id = ASM_MEDIA_FMT_ALAC;
+		break;
+	case FORMAT_APE:
+		open.dec_fmt_id = ASM_MEDIA_FMT_APE;
+		break;
+	case FORMAT_DSD:
+		open.dec_fmt_id = ASM_MEDIA_FMT_DSD;
+		break;
+	case FORMAT_G711_ALAW_FS:
+		open.dec_fmt_id = ASM_MEDIA_FMT_G711_ALAW_FS;
+		break;
+	case FORMAT_G711_MLAW_FS:
+		open.dec_fmt_id = ASM_MEDIA_FMT_G711_MLAW_FS;
+		break;
+	default:
+		pr_err("%s: Invalid format 0x%x\n",
+				__func__, wr_format);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	switch (rd_format) {
+	case FORMAT_LINEAR_PCM:
+	case FORMAT_MULTI_CHANNEL_LINEAR_PCM:
+		open.enc_cfg_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
+		break;
+	case FORMAT_MPEG4_AAC:
+		open.enc_cfg_id = ASM_MEDIA_FMT_AAC_V2;
+		break;
+	case FORMAT_G711_ALAW_FS:
+		open.enc_cfg_id = ASM_MEDIA_FMT_G711_ALAW_FS;
+		break;
+	case FORMAT_G711_MLAW_FS:
+		open.enc_cfg_id = ASM_MEDIA_FMT_G711_MLAW_FS;
+		break;
+	case FORMAT_V13K:
+		open.enc_cfg_id = ASM_MEDIA_FMT_V13K_FS;
+		break;
+	case FORMAT_EVRC:
+		open.enc_cfg_id = ASM_MEDIA_FMT_EVRC_FS;
+		break;
+	case FORMAT_AMRNB:
+		open.enc_cfg_id = ASM_MEDIA_FMT_AMRNB_FS;
+		break;
+	case FORMAT_AMRWB:
+		open.enc_cfg_id = ASM_MEDIA_FMT_AMRWB_FS;
+		break;
+	case FORMAT_ALAC:
+		open.enc_cfg_id = ASM_MEDIA_FMT_ALAC;
+		break;
+	case FORMAT_APE:
+		open.enc_cfg_id = ASM_MEDIA_FMT_APE;
+		break;
+	default:
+		pr_err("%s: Invalid format 0x%x\n",
+				__func__, rd_format);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	dev_vdbg(ac->dev, "%s: rdformat[0x%x]wrformat[0x%x]\n", __func__,
+			open.enc_cfg_id, open.dec_fmt_id);
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &open);
+	if (rc < 0) {
+		pr_err("%s: open failed op[0x%x]rc[%d]\n",
+				__func__, open.hdr.opcode, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for open read-write\n",
+				__func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+int q6asm_open_read_write(struct audio_client *ac, uint32_t rd_format,
+			  uint32_t wr_format)
+{
+	return __q6asm_open_read_write(ac, rd_format, wr_format,
+				       true/*meta data mode*/,
+				       16 /*bits_per_sample*/,
+				       false /*overwrite_topology*/, 0);
+}
+
+int q6asm_open_read_write_v2(struct audio_client *ac, uint32_t rd_format,
+			     uint32_t wr_format, bool is_meta_data_mode,
+			     uint32_t bits_per_sample, bool overwrite_topology,
+			     int topology)
+{
+	return __q6asm_open_read_write(ac, rd_format, wr_format,
+				       is_meta_data_mode, bits_per_sample,
+				       overwrite_topology, topology);
+}
+
+int q6asm_open_loopback_v2(struct audio_client *ac, uint16_t bits_per_sample)
+{
+	int rc = 0x00;
+
+	if (ac == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	pr_debug("%s: session[%d]\n", __func__, ac->session);
+
+	if (ac->perf_mode == LOW_LATENCY_PCM_MODE) {
+		struct asm_stream_cmd_open_transcode_loopback_t open;
+
+		q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
+		atomic_set(&ac->cmd_state, -1);
+		open.hdr.opcode = ASM_STREAM_CMD_OPEN_TRANSCODE_LOOPBACK;
+
+		open.mode_flags = 0;
+		open.src_endpoint_type = 0;
+		open.sink_endpoint_type = 0;
+		open.src_format_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
+		open.sink_format_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
+		/* source endpoint : matrix */
+		open.audproc_topo_id = q6asm_get_asm_topology_cal();
+
+		ac->app_type = q6asm_get_asm_app_type_cal();
+		if (ac->perf_mode == LOW_LATENCY_PCM_MODE)
+			open.mode_flags |= ASM_LOW_LATENCY_STREAM_SESSION;
+		else
+			open.mode_flags |= ASM_LEGACY_STREAM_SESSION;
+		ac->topology = open.audproc_topo_id;
+		open.bits_per_sample = bits_per_sample;
+		open.reserved = 0;
+		pr_debug("%s: opening a transcode_loopback with mode_flags =[%d] session[%d]\n",
+				__func__, open.mode_flags, ac->session);
+
+		rc = apr_send_pkt(ac->apr, (uint32_t *) &open);
+		if (rc < 0) {
+			pr_err("%s: open failed op[0x%x]rc[%d]\n",
+					__func__, open.hdr.opcode, rc);
+			rc = -EINVAL;
+			goto fail_cmd;
+		}
+	} else {/*if(ac->perf_mode == LEGACY_PCM_MODE)*/
+		struct asm_stream_cmd_open_loopback_v2 open;
+
+		q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
+		atomic_set(&ac->cmd_state, -1);
+		open.hdr.opcode = ASM_STREAM_CMD_OPEN_LOOPBACK_V2;
+
+		open.mode_flags = 0;
+		open.src_endpointype = 0;
+		open.sink_endpointype = 0;
+		/* source endpoint : matrix */
+		open.postprocopo_id = q6asm_get_asm_topology_cal();
+
+		ac->app_type = q6asm_get_asm_app_type_cal();
+		ac->topology = open.postprocopo_id;
+		open.bits_per_sample = bits_per_sample;
+		open.reserved = 0;
+		pr_debug("%s: opening a loopback_v2 with mode_flags =[%d] session[%d]\n",
+				__func__, open.mode_flags, ac->session);
+
+		rc = apr_send_pkt(ac->apr, (uint32_t *) &open);
+		if (rc < 0) {
+			pr_err("%s: open failed op[0x%x]rc[%d]\n",
+					__func__, open.hdr.opcode, rc);
+			rc = -EINVAL;
+			goto fail_cmd;
+		}
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for open_loopback\n",
+				__func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+
+int q6asm_open_transcode_loopback(struct audio_client *ac,
+			uint16_t bits_per_sample,
+			uint32_t source_format, uint32_t sink_format)
+{
+	int rc = 0x00;
+	struct asm_stream_cmd_open_transcode_loopback_t open;
+
+	if (ac == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: session[%d]\n", __func__, ac->session);
+
+	q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
+	atomic_set(&ac->cmd_state, -1);
+	open.hdr.opcode = ASM_STREAM_CMD_OPEN_TRANSCODE_LOOPBACK;
+
+	open.mode_flags = 0;
+	open.src_endpoint_type = 0;
+	open.sink_endpoint_type = 0;
+	switch (source_format) {
+	case FORMAT_LINEAR_PCM:
+	case FORMAT_MULTI_CHANNEL_LINEAR_PCM:
+		open.src_format_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V3;
+		break;
+	case FORMAT_AC3:
+		open.src_format_id = ASM_MEDIA_FMT_AC3;
+		break;
+	case FORMAT_EAC3:
+		open.src_format_id = ASM_MEDIA_FMT_EAC3;
+		break;
+	default:
+		pr_err("%s: Unsupported src fmt [%d]\n",
+		       __func__, source_format);
+		return -EINVAL;
+	}
+	switch (sink_format) {
+	case FORMAT_LINEAR_PCM:
+	case FORMAT_MULTI_CHANNEL_LINEAR_PCM:
+		open.sink_format_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V3;
+		break;
+	default:
+		pr_err("%s: Unsupported sink fmt [%d]\n",
+		       __func__, sink_format);
+		return -EINVAL;
+	}
+
+	/* source endpoint : matrix */
+	open.audproc_topo_id = q6asm_get_asm_topology_cal();
+
+	ac->app_type = q6asm_get_asm_app_type_cal();
+	if (ac->perf_mode == LOW_LATENCY_PCM_MODE)
+		open.mode_flags |= ASM_LOW_LATENCY_STREAM_SESSION;
+	else
+		open.mode_flags |= ASM_LEGACY_STREAM_SESSION;
+	ac->topology = open.audproc_topo_id;
+	open.bits_per_sample = bits_per_sample;
+	open.reserved = 0;
+	pr_debug("%s: opening a transcode_loopback with mode_flags =[%d] session[%d]\n",
+		__func__, open.mode_flags, ac->session);
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &open);
+	if (rc < 0) {
+		pr_err("%s: open failed op[0x%x]rc[%d]\n",
+				__func__, open.hdr.opcode, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for open_transcode_loopback\n",
+			__func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+					atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+static
+int q6asm_set_shared_circ_buff(struct audio_client *ac,
+			       struct asm_stream_cmd_open_shared_io *open,
+			       int bufsz, int bufcnt,
+			       int dir)
+{
+	struct audio_buffer *buf_circ;
+	int bytes_to_alloc, rc;
+	size_t len;
+
+	mutex_lock(&ac->cmd_lock);
+
+	if (ac->port[dir].buf) {
+		pr_err("%s: Buffer already allocated\n", __func__);
+		rc = -EINVAL;
+		mutex_unlock(&ac->cmd_lock);
+		goto done;
+	}
+
+	buf_circ = kzalloc(sizeof(struct audio_buffer), GFP_KERNEL);
+
+	if (!buf_circ) {
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	bytes_to_alloc = bufsz * bufcnt;
+	bytes_to_alloc = PAGE_ALIGN(bytes_to_alloc);
+
+	rc = msm_audio_ion_alloc("audio_client", &buf_circ->client,
+			&buf_circ->handle, bytes_to_alloc,
+			(ion_phys_addr_t *)&buf_circ->phys,
+			&len, &buf_circ->data);
+
+	if (rc) {
+		pr_err("%s: Audio ION alloc is failed, rc = %d\n", __func__,
+				rc);
+		kfree(buf_circ);
+		mutex_unlock(&ac->cmd_lock);
+		goto done;
+	}
+
+	ac->port[dir].buf = buf_circ;
+	buf_circ->used = dir ^ 1;
+	buf_circ->size = bytes_to_alloc;
+	buf_circ->actual_size = bytes_to_alloc;
+	memset(buf_circ->data, 0, buf_circ->actual_size);
+
+	ac->port[dir].max_buf_cnt = 1;
+
+	open->shared_circ_buf_mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
+	open->shared_circ_buf_num_regions = 1;
+	open->shared_circ_buf_property_flag = 0x00;
+	open->shared_circ_buf_start_phy_addr_lsw =
+			lower_32_bits(buf_circ->phys);
+	open->shared_circ_buf_start_phy_addr_msw =
+			msm_audio_populate_upper_32_bits(buf_circ->phys);
+	open->shared_circ_buf_size = bufsz * bufcnt;
+
+	open->map_region_circ_buf.shm_addr_lsw = lower_32_bits(buf_circ->phys);
+	open->map_region_circ_buf.shm_addr_msw =
+			msm_audio_populate_upper_32_bits(buf_circ->phys);
+	open->map_region_circ_buf.mem_size_bytes = bytes_to_alloc;
+
+	mutex_unlock(&ac->cmd_lock);
+done:
+	return rc;
+}
+
+
+static
+int q6asm_set_shared_pos_buff(struct audio_client *ac,
+			       struct asm_stream_cmd_open_shared_io *open,
+			       int dir)
+{
+	struct audio_buffer *buf_pos = &ac->shared_pos_buf;
+	int rc;
+	size_t len;
+	int bytes_to_alloc = sizeof(struct asm_shared_position_buffer);
+
+	mutex_lock(&ac->cmd_lock);
+
+	bytes_to_alloc = PAGE_ALIGN(bytes_to_alloc);
+
+	rc = msm_audio_ion_alloc("audio_client", &buf_pos->client,
+			&buf_pos->handle, bytes_to_alloc,
+			(ion_phys_addr_t *)&buf_pos->phys, &len,
+			&buf_pos->data);
+
+	if (rc) {
+		pr_err("%s: Audio pos buf ION alloc is failed, rc = %d\n",
+				__func__, rc);
+		goto done;
+	}
+
+	buf_pos->used = dir ^ 1;
+	buf_pos->size = bytes_to_alloc;
+	buf_pos->actual_size = bytes_to_alloc;
+
+	open->shared_pos_buf_mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
+	open->shared_pos_buf_num_regions = 1;
+	open->shared_pos_buf_property_flag = 0x00;
+	open->shared_pos_buf_phy_addr_lsw = lower_32_bits(buf_pos->phys);
+	open->shared_pos_buf_phy_addr_msw =
+			msm_audio_populate_upper_32_bits(buf_pos->phys);
+
+	open->map_region_pos_buf.shm_addr_lsw = lower_32_bits(buf_pos->phys);
+	open->map_region_pos_buf.shm_addr_msw =
+			msm_audio_populate_upper_32_bits(buf_pos->phys);
+	open->map_region_pos_buf.mem_size_bytes = bytes_to_alloc;
+
+done:
+	mutex_unlock(&ac->cmd_lock);
+	return rc;
+}
+
+/*
+ * q6asm_open_shared_io: Open an ASM session for pull mode (playback)
+ * or push mode (capture).
+ * parameters
+ *   config - session parameters (channels, bits_per_sample, sr)
+ *   dir - stream direction (IN for playback, OUT for capture)
+ * returns 0 if successful, error code otherwise
+ */
+int q6asm_open_shared_io(struct audio_client *ac,
+			 struct shared_io_config *config,
+			 int dir)
+{
+	struct asm_stream_cmd_open_shared_io *open;
+	u8 *channel_mapping;
+	int i, size_of_open, num_watermarks, bufsz, bufcnt, rc, flags = 0;
+
+	if (!ac || !config)
+		return -EINVAL;
+
+	if (config->channels > PCM_FORMAT_MAX_NUM_CHANNEL) {
+		pr_err("%s: Invalid channel count %d\n", __func__,
+			config->channels);
+		return -EINVAL;
+	}
+
+	bufsz = config->bufsz;
+	bufcnt = config->bufcnt;
+	num_watermarks = 0;
+
+	ac->config = *config;
+
+	if (ac->session <= 0 || ac->session > SESSION_MAX) {
+		pr_err("%s: Session %d is out of bounds\n",
+			__func__, ac->session);
+		return -EINVAL;
+	}
+
+	size_of_open = sizeof(struct asm_stream_cmd_open_shared_io) +
+		(sizeof(struct asm_shared_watermark_level) * num_watermarks);
+
+	open = kzalloc(PAGE_ALIGN(size_of_open), GFP_KERNEL);
+	if (!open)
+		return -ENOMEM;
+
+	q6asm_stream_add_hdr(ac, &open->hdr, size_of_open, TRUE,
+				ac->stream_id);
+
+	atomic_set(&ac->cmd_state, 1);
+
+	pr_debug("%s: token = 0x%x, stream_id %d, session 0x%x, perf %d\n",
+		 __func__, open->hdr.token, ac->stream_id, ac->session,
+		 ac->perf_mode);
+
+	open->hdr.opcode =
+		dir == IN ? ASM_STREAM_CMD_OPEN_PULL_MODE_WRITE :
+		ASM_STREAM_CMD_OPEN_PUSH_MODE_READ;
+
+	pr_debug("%s perf_mode %d\n", __func__, ac->perf_mode);
+	if (dir == IN)
+		if (ac->perf_mode == ULL_POST_PROCESSING_PCM_MODE)
+			flags = 4 << ASM_SHIFT_STREAM_PERF_FLAG_PULL_MODE_WRITE;
+		else if (ac->perf_mode == ULTRA_LOW_LATENCY_PCM_MODE)
+			flags = 2 << ASM_SHIFT_STREAM_PERF_FLAG_PULL_MODE_WRITE;
+		else if (ac->perf_mode == LOW_LATENCY_PCM_MODE)
+			flags = 1 << ASM_SHIFT_STREAM_PERF_FLAG_PULL_MODE_WRITE;
+		else
+			pr_err("Invalid perf mode for pull write\n");
+	else
+		if (ac->perf_mode == LOW_LATENCY_PCM_MODE)
+			flags = ASM_LOW_LATENCY_TX_STREAM_SESSION <<
+				ASM_SHIFT_STREAM_PERF_FLAG_PUSH_MODE_READ;
+		else
+			pr_err("Invalid perf mode for push read\n");
+
+	if (flags == 0) {
+		pr_err("%s: Invalid mode[%d]\n", __func__,
+		       ac->perf_mode);
+		kfree(open);
+		return -EINVAL;
+
+	}
+
+	pr_debug("open.mode_flags = 0x%x\n", flags);
+	open->mode_flags = flags;
+	open->endpoint_type = ASM_END_POINT_DEVICE_MATRIX;
+	open->topo_bits_per_sample = config->bits_per_sample;
+
+	open->topo_id = q6asm_get_asm_topology_cal();
+
+	if (config->format == FORMAT_LINEAR_PCM)
+		open->fmt_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V3;
+	else {
+		pr_err("%s: Invalid format[%d]\n", __func__, config->format);
+		rc = -EINVAL;
+		goto done;
+	}
+
+	rc = q6asm_set_shared_circ_buff(ac, open, bufsz, bufcnt, dir);
+
+	if (rc)
+		goto done;
+
+	ac->port[dir].tmp_hdl = 0;
+
+	rc = q6asm_set_shared_pos_buff(ac, open, dir);
+
+	if (rc)
+		goto done;
+
+	/* asm_multi_channel_pcm_fmt_blk_v3 */
+	open->fmt.num_channels = config->channels;
+	open->fmt.bits_per_sample = config->bits_per_sample;
+	open->fmt.sample_rate = config->rate;
+	open->fmt.is_signed = 1;
+	open->fmt.sample_word_size = config->sample_word_size;
+
+	channel_mapping = open->fmt.channel_mapping;
+
+	memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+
+	rc = q6asm_map_channels(channel_mapping, config->channels, false);
+	if (rc) {
+		pr_err("%s: Map channels failed, ret: %d\n", __func__, rc);
+		goto done;
+	}
+
+	open->num_watermark_levels = num_watermarks;
+	for (i = 0; i < num_watermarks; i++) {
+		open->watermark[i].watermark_level_bytes = i *
+				((bufsz * bufcnt) / num_watermarks);
+		pr_debug("%s: Watermark level set for %i\n",
+				__func__,
+				open->watermark[i].watermark_level_bytes);
+	}
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) open);
+	if (rc < 0) {
+		pr_err("%s: Open failed op[0x%x]rc[%d]\n",
+		       __func__, open->hdr.opcode, rc);
+		goto done;
+	}
+
+	pr_debug("%s: sent open apr pkt\n", __func__);
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) <= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: Timeout. Waited for open write apr pkt rc[%d]\n",
+		       __func__, rc);
+		rc = -ETIMEDOUT;
+		goto done;
+	}
+
+	if (atomic_read(&ac->cmd_state) < 0) {
+		pr_err("%s: DSP returned error [%d]\n", __func__,
+				atomic_read(&ac->cmd_state));
+		rc = -EINVAL;
+		goto done;
+	}
+
+	ac->io_mode |= TUN_WRITE_IO_MODE;
+	rc = 0;
+done:
+	kfree(open);
+	return rc;
+}
+EXPORT_SYMBOL(q6asm_open_shared_io);
+
+/*
+ * q6asm_shared_io_buf: Returns handle to the shared circular buffer being
+ * used for pull/push mode.
+ * parameters
+ *   dir - used to identify input/output port
+ * returns buffer handle
+ */
+struct audio_buffer *q6asm_shared_io_buf(struct audio_client *ac,
+					 int dir)
+{
+	struct audio_port_data *port;
+
+	if (!ac) {
+		pr_err("%s: ac is null\n", __func__);
+		return NULL;
+	}
+	port = &ac->port[dir];
+	return port->buf;
+}
+EXPORT_SYMBOL(q6asm_shared_io_buf);
+
+/*
+ * q6asm_shared_io_free: Frees memory allocated for a pull/push session
+ * parameters
+ *  dir - port direction
+ * returns 0 if successful, error otherwise
+ */
+int q6asm_shared_io_free(struct audio_client *ac, int dir)
+{
+	struct audio_port_data *port;
+
+	if (!ac) {
+		pr_err("%s: audio client is null\n", __func__);
+		return -EINVAL;
+	}
+	port = &ac->port[dir];
+	mutex_lock(&ac->cmd_lock);
+	if (port->buf && port->buf->data) {
+		msm_audio_ion_free(port->buf->client, port->buf->handle);
+		port->buf->client = NULL;
+		port->buf->handle = NULL;
+		port->max_buf_cnt = 0;
+		kfree(port->buf);
+		port->buf = NULL;
+	}
+	if (ac->shared_pos_buf.data) {
+		msm_audio_ion_free(ac->shared_pos_buf.client,
+				ac->shared_pos_buf.handle);
+		ac->shared_pos_buf.client = NULL;
+		ac->shared_pos_buf.handle = NULL;
+	}
+	mutex_unlock(&ac->cmd_lock);
+	return 0;
+}
+EXPORT_SYMBOL(q6asm_shared_io_free);
+
+/*
+ * q6asm_get_shared_pos: Returns current read index/write index as observed
+ * by the DSP. Note that this is an offset and iterates from [0,BUF_SIZE - 1]
+ * parameters - (all output)
+ *   read_index - offset
+ *   wall_clk_msw1 - ADSP wallclock msw
+ *   wall_clk_lsw1 - ADSP wallclock lsw
+ * returns 0 if successful, -EAGAIN if DSP failed to update after some
+ * retries
+ */
+int q6asm_get_shared_pos(struct audio_client *ac, uint32_t *read_index,
+			 uint32_t *wall_clk_msw1, uint32_t *wall_clk_lsw1)
+{
+	struct asm_shared_position_buffer *pos_buf;
+	uint32_t frame_cnt1, frame_cnt2;
+	int i, j;
+
+	if (!ac) {
+		pr_err("%s: audio client is null\n", __func__);
+		return -EINVAL;
+	}
+
+	pos_buf = ac->shared_pos_buf.data;
+
+	/* always try to get the latest update in the shared pos buffer */
+	for (i = 0; i < 2; i++) {
+		/* retry until there is an update from DSP */
+		for (j = 0; j < 5; j++) {
+			frame_cnt1 = pos_buf->frame_counter;
+			if (frame_cnt1 != 0)
+				break;
+		}
+
+		*wall_clk_msw1 = pos_buf->wall_clock_us_msw;
+		*wall_clk_lsw1 = pos_buf->wall_clock_us_lsw;
+		*read_index = pos_buf->index;
+		frame_cnt2 = pos_buf->frame_counter;
+
+		if (frame_cnt1 != frame_cnt2)
+			continue;
+		return 0;
+	}
+	pr_err("%s out of tries trying to get a good read, try again\n",
+	       __func__);
+	return -EAGAIN;
+}
+
+int q6asm_run(struct audio_client *ac, uint32_t flags,
+		uint32_t msw_ts, uint32_t lsw_ts)
+{
+	struct asm_session_cmd_run_v2 run;
+	int rc;
+
+	if (ac == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	pr_debug("%s: session[%d]\n", __func__, ac->session);
+
+	q6asm_add_hdr(ac, &run.hdr, sizeof(run), TRUE);
+	atomic_set(&ac->cmd_state, -1);
+
+	run.hdr.opcode = ASM_SESSION_CMD_RUN_V2;
+	run.flags    = flags;
+	run.time_lsw = lsw_ts;
+	run.time_msw = msw_ts;
+
+	config_debug_fs_run();
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &run);
+	if (rc < 0) {
+		pr_err("%s: Commmand run failed[%d]",
+				__func__, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for run success",
+				__func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+static int __q6asm_run_nowait(struct audio_client *ac, uint32_t flags,
+		uint32_t msw_ts, uint32_t lsw_ts, uint32_t stream_id)
+{
+	struct asm_session_cmd_run_v2 run;
+	int rc;
+
+	if (ac == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	pr_debug("%s: session[%d]\n", __func__, ac->session);
+
+	q6asm_stream_add_hdr_async(ac, &run.hdr, sizeof(run), TRUE, stream_id);
+	atomic_set(&ac->cmd_state, 1);
+	run.hdr.opcode = ASM_SESSION_CMD_RUN_V2;
+	run.flags    = flags;
+	run.time_lsw = lsw_ts;
+	run.time_msw = msw_ts;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &run);
+	if (rc < 0) {
+		pr_err("%s: Commmand run failed[%d]", __func__, rc);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int q6asm_run_nowait(struct audio_client *ac, uint32_t flags,
+			uint32_t msw_ts, uint32_t lsw_ts)
+{
+	return __q6asm_run_nowait(ac, flags, msw_ts, lsw_ts, ac->stream_id);
+}
+
+int q6asm_stream_run_nowait(struct audio_client *ac, uint32_t flags,
+			uint32_t msw_ts, uint32_t lsw_ts, uint32_t stream_id)
+{
+	return __q6asm_run_nowait(ac, flags, msw_ts, lsw_ts, stream_id);
+}
+
+int q6asm_enc_cfg_blk_aac(struct audio_client *ac,
+			 uint32_t frames_per_buf,
+			uint32_t sample_rate, uint32_t channels,
+			uint32_t bit_rate, uint32_t mode, uint32_t format)
+{
+	struct asm_aac_enc_cfg_v2 enc_cfg;
+	int rc = 0;
+
+	pr_debug("%s: session[%d]frames[%d]SR[%d]ch[%d]bitrate[%d]mode[%d] format[%d]\n",
+		 __func__, ac->session, frames_per_buf,
+		sample_rate, channels, bit_rate, mode, format);
+
+	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+	atomic_set(&ac->cmd_state, -1);
+
+	enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
+	enc_cfg.encdec.param_size = sizeof(struct asm_aac_enc_cfg_v2) -
+				sizeof(struct asm_stream_cmd_set_encdec_param);
+	enc_cfg.encblk.frames_per_buf = frames_per_buf;
+	enc_cfg.encblk.enc_cfg_blk_size  = enc_cfg.encdec.param_size -
+				sizeof(struct asm_enc_cfg_blk_param_v2);
+	enc_cfg.bit_rate = bit_rate;
+	enc_cfg.enc_mode = mode;
+	enc_cfg.aac_fmt_flag = format;
+	enc_cfg.channel_cfg = channels;
+	enc_cfg.sample_rate = sample_rate;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+	if (rc < 0) {
+		pr_err("%s: Comamnd %d failed %d\n",
+			__func__, ASM_STREAM_CMD_SET_ENCDEC_PARAM, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for FORMAT_UPDATE\n",
+			__func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+int q6asm_enc_cfg_blk_g711(struct audio_client *ac,
+			uint32_t frames_per_buf,
+			uint32_t sample_rate)
+{
+	struct asm_g711_enc_cfg_v2 enc_cfg;
+	int rc = 0;
+
+	pr_debug("%s: session[%d]frames[%d]SR[%d]\n",
+		 __func__, ac->session, frames_per_buf,
+		sample_rate);
+
+	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+	atomic_set(&ac->cmd_state, -1);
+
+	enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
+	enc_cfg.encdec.param_size = sizeof(struct asm_g711_enc_cfg_v2) -
+				sizeof(struct asm_stream_cmd_set_encdec_param);
+	enc_cfg.encblk.frames_per_buf = frames_per_buf;
+	enc_cfg.encblk.enc_cfg_blk_size  = enc_cfg.encdec.param_size -
+				sizeof(struct asm_enc_cfg_blk_param_v2);
+	enc_cfg.sample_rate = sample_rate;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+	if (rc < 0) {
+		pr_err("%s: Comamnd %d failed %d\n",
+			__func__, ASM_STREAM_CMD_SET_ENCDEC_PARAM, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for FORMAT_UPDATE\n",
+			__func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+int q6asm_set_encdec_chan_map(struct audio_client *ac,
+			uint32_t num_channels)
+{
+	struct asm_dec_out_chan_map_param chan_map;
+	u8 *channel_mapping;
+	int rc = 0;
+	pr_debug("%s: Session %d, num_channels = %d\n",
+			 __func__, ac->session, num_channels);
+
+	if (num_channels > MAX_CHAN_MAP_CHANNELS) {
+		pr_err("%s: Invalid channel count %d\n", __func__,
+				num_channels);
+		return -EINVAL;
+	}
+
+	q6asm_add_hdr(ac, &chan_map.hdr, sizeof(chan_map), TRUE);
+	atomic_set(&ac->cmd_state, -1);
+	chan_map.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	chan_map.encdec.param_id = ASM_PARAM_ID_DEC_OUTPUT_CHAN_MAP;
+	chan_map.encdec.param_size = sizeof(struct asm_dec_out_chan_map_param) -
+			 (sizeof(struct apr_hdr) +
+			 sizeof(struct asm_stream_cmd_set_encdec_param));
+	chan_map.num_channels = num_channels;
+	channel_mapping = chan_map.channel_mapping;
+	memset(channel_mapping, PCM_CHANNEL_NULL, MAX_CHAN_MAP_CHANNELS);
+
+	if (q6asm_map_channels(channel_mapping, num_channels, false)) {
+		pr_err("%s: map channels failed %d\n", __func__, num_channels);
+		return -EINVAL;
+	}
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &chan_map);
+	if (rc < 0) {
+		pr_err("%s: Command opcode[0x%x]paramid[0x%x] failed %d\n",
+			   __func__, ASM_STREAM_CMD_SET_ENCDEC_PARAM,
+			   ASM_PARAM_ID_DEC_OUTPUT_CHAN_MAP, rc);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+				 (atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout opcode[0x%x]\n", __func__,
+			   chan_map.hdr.opcode);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+		return rc;
+}
+
+/*
+ * q6asm_enc_cfg_blk_pcm_v4 - sends encoder configuration parameters
+ *
+ * @ac: Client session handle
+ * @rate: sample rate
+ * @channels: number of channels
+ * @bits_per_sample: bit width of encoder session
+ * @use_default_chmap: true if default channel map  to be used
+ * @use_back_flavor: to configure back left and right channel
+ * @channel_map: input channel map
+ * @sample_word_size: Size in bits of the word that holds a sample of a channel
+ * @endianness: endianness of the pcm data
+ * @mode: Mode to provide additional info about the pcm input data
+ */
+int q6asm_enc_cfg_blk_pcm_v4(struct audio_client *ac,
+			     uint32_t rate, uint32_t channels,
+			     uint16_t bits_per_sample, bool use_default_chmap,
+			     bool use_back_flavor, u8 *channel_map,
+			     uint16_t sample_word_size, uint16_t endianness,
+			     uint16_t mode)
+{
+	struct asm_multi_channel_pcm_enc_cfg_v4 enc_cfg;
+	struct asm_enc_cfg_blk_param_v2 enc_fg_blk;
+	u8 *channel_mapping;
+	u32 frames_per_buf = 0;
+	int rc;
+
+	if (!use_default_chmap && (channel_map == NULL)) {
+		pr_err("%s: No valid chan map and can't use default\n",
+				__func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	if (channels > PCM_FORMAT_MAX_NUM_CHANNEL) {
+		pr_err("%s: Invalid channel count %d\n", __func__, channels);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	pr_debug("%s: session[%d]rate[%d]ch[%d]bps[%d]wordsize[%d]\n", __func__,
+		 ac->session, rate, channels,
+		 bits_per_sample, sample_word_size);
+
+	memset(&enc_cfg, 0, sizeof(enc_cfg));
+	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+	atomic_set(&ac->cmd_state, -1);
+	enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
+	enc_cfg.encdec.param_size = sizeof(enc_cfg) - sizeof(enc_cfg.hdr) -
+				    sizeof(enc_cfg.encdec);
+	enc_cfg.encblk.frames_per_buf = frames_per_buf;
+	enc_cfg.encblk.enc_cfg_blk_size = enc_cfg.encdec.param_size -
+					  sizeof(enc_fg_blk);
+	enc_cfg.num_channels = channels;
+	enc_cfg.bits_per_sample = bits_per_sample;
+	enc_cfg.sample_rate = rate;
+	enc_cfg.is_signed = 1;
+	enc_cfg.sample_word_size = sample_word_size;
+	enc_cfg.endianness = endianness;
+	enc_cfg.mode = mode;
+	channel_mapping = enc_cfg.channel_mapping;
+
+	memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+
+	if (use_default_chmap) {
+		pr_debug("%s: setting default channel map for %d channels",
+			 __func__, channels);
+		if (q6asm_map_channels(channel_mapping, channels,
+					use_back_flavor)) {
+			pr_err("%s: map channels failed %d\n",
+			       __func__, channels);
+			rc = -EINVAL;
+			goto fail_cmd;
+		}
+	} else {
+		pr_debug("%s: Using pre-defined channel map", __func__);
+		memcpy(channel_mapping, channel_map,
+			PCM_FORMAT_MAX_NUM_CHANNEL);
+	}
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+	if (rc < 0) {
+		pr_err("%s: Command open failed %d\n", __func__, rc);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout opcode[0x%x]\n",
+		       __func__, enc_cfg.hdr.opcode);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+		       __func__, adsp_err_get_err_str(
+		       atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+EXPORT_SYMBOL(q6asm_enc_cfg_blk_pcm_v4);
+
+/*
+ * q6asm_enc_cfg_blk_pcm_v3 - sends encoder configuration parameters
+ *
+ * @ac: Client session handle
+ * @rate: sample rate
+ * @channels: number of channels
+ * @bits_per_sample: bit width of encoder session
+ * @use_default_chmap: true if default channel map  to be used
+ * @use_back_flavor: to configure back left and right channel
+ * @channel_map: input channel map
+ * @sample_word_size: Size in bits of the word that holds a sample of a channel
+ */
+int q6asm_enc_cfg_blk_pcm_v3(struct audio_client *ac,
+			     uint32_t rate, uint32_t channels,
+			     uint16_t bits_per_sample, bool use_default_chmap,
+			     bool use_back_flavor, u8 *channel_map,
+			     uint16_t sample_word_size)
+{
+	struct asm_multi_channel_pcm_enc_cfg_v3 enc_cfg;
+	struct asm_enc_cfg_blk_param_v2 enc_fg_blk;
+	u8 *channel_mapping;
+	u32 frames_per_buf = 0;
+	int rc;
+
+	if (!use_default_chmap && (channel_map == NULL)) {
+		pr_err("%s: No valid chan map and can't use default\n",
+				__func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	if (channels > PCM_FORMAT_MAX_NUM_CHANNEL) {
+		pr_err("%s: Invalid channel count %d\n", __func__, channels);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	pr_debug("%s: session[%d]rate[%d]ch[%d]bps[%d]wordsize[%d]\n", __func__,
+		 ac->session, rate, channels,
+		 bits_per_sample, sample_word_size);
+
+	memset(&enc_cfg, 0, sizeof(enc_cfg));
+	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+	atomic_set(&ac->cmd_state, -1);
+	enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
+	enc_cfg.encdec.param_size = sizeof(enc_cfg) - sizeof(enc_cfg.hdr) -
+				    sizeof(enc_cfg.encdec);
+	enc_cfg.encblk.frames_per_buf = frames_per_buf;
+	enc_cfg.encblk.enc_cfg_blk_size = enc_cfg.encdec.param_size -
+					  sizeof(enc_fg_blk);
+	enc_cfg.num_channels = channels;
+	enc_cfg.bits_per_sample = bits_per_sample;
+	enc_cfg.sample_rate = rate;
+	enc_cfg.is_signed = 1;
+	enc_cfg.sample_word_size = sample_word_size;
+	channel_mapping = enc_cfg.channel_mapping;
+
+	memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+
+	if (use_default_chmap) {
+		pr_debug("%s: setting default channel map for %d channels",
+			 __func__, channels);
+		if (q6asm_map_channels(channel_mapping, channels,
+					use_back_flavor)) {
+			pr_err("%s: map channels failed %d\n",
+			       __func__, channels);
+			rc = -EINVAL;
+			goto fail_cmd;
+		}
+	} else {
+		pr_debug("%s: Using pre-defined channel map", __func__);
+		memcpy(channel_mapping, channel_map,
+			PCM_FORMAT_MAX_NUM_CHANNEL);
+	}
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+	if (rc < 0) {
+		pr_err("%s: Comamnd open failed %d\n", __func__, rc);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout opcode[0x%x]\n",
+		       __func__, enc_cfg.hdr.opcode);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+		       __func__, adsp_err_get_err_str(
+		       atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+EXPORT_SYMBOL(q6asm_enc_cfg_blk_pcm_v3);
+
+int q6asm_enc_cfg_blk_pcm_v2(struct audio_client *ac,
+		uint32_t rate, uint32_t channels, uint16_t bits_per_sample,
+		bool use_default_chmap, bool use_back_flavor, u8 *channel_map)
+{
+	struct asm_multi_channel_pcm_enc_cfg_v2  enc_cfg;
+	u8 *channel_mapping;
+	u32 frames_per_buf = 0;
+
+	int rc = 0;
+
+	if (!use_default_chmap && (channel_map == NULL)) {
+		pr_err("%s: No valid chan map and can't use default\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (channels > PCM_FORMAT_MAX_NUM_CHANNEL) {
+		pr_err("%s: Invalid channel count %d\n", __func__, channels);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: Session %d, rate = %d, channels = %d\n", __func__,
+			 ac->session, rate, channels);
+
+	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+	atomic_set(&ac->cmd_state, -1);
+	enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
+	enc_cfg.encdec.param_size = sizeof(enc_cfg) - sizeof(enc_cfg.hdr) -
+				sizeof(enc_cfg.encdec);
+	enc_cfg.encblk.frames_per_buf = frames_per_buf;
+	enc_cfg.encblk.enc_cfg_blk_size  = enc_cfg.encdec.param_size -
+					sizeof(struct asm_enc_cfg_blk_param_v2);
+
+	enc_cfg.num_channels = channels;
+	enc_cfg.bits_per_sample = bits_per_sample;
+	enc_cfg.sample_rate = rate;
+	enc_cfg.is_signed = 1;
+	channel_mapping = enc_cfg.channel_mapping;
+
+	memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+
+	if (use_default_chmap) {
+		pr_debug("%s: setting default channel map for %d channels",
+		__func__, channels);
+		if (q6asm_map_channels(channel_mapping, channels,
+					use_back_flavor)) {
+			pr_err("%s: map channels failed %d\n",
+			 __func__, channels);
+			return -EINVAL;
+		}
+	} else {
+		pr_debug("%s: Using pre-defined channel map", __func__);
+		memcpy(channel_mapping, channel_map,
+			PCM_FORMAT_MAX_NUM_CHANNEL);
+	}
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+	if (rc < 0) {
+		pr_err("%s: Comamnd open failed %d\n", __func__, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout opcode[0x%x]\n",
+			__func__, enc_cfg.hdr.opcode);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+static int __q6asm_enc_cfg_blk_pcm_v4(struct audio_client *ac,
+				      uint32_t rate, uint32_t channels,
+				      uint16_t bits_per_sample,
+				      uint16_t sample_word_size,
+				      uint16_t endianness,
+				      uint16_t mode)
+{
+	return q6asm_enc_cfg_blk_pcm_v4(ac, rate, channels,
+					bits_per_sample, true, false, NULL,
+					sample_word_size, endianness, mode);
+}
+
+static int __q6asm_enc_cfg_blk_pcm_v3(struct audio_client *ac,
+				      uint32_t rate, uint32_t channels,
+				      uint16_t bits_per_sample,
+				      uint16_t sample_word_size)
+{
+	return q6asm_enc_cfg_blk_pcm_v3(ac, rate, channels,
+					bits_per_sample, true, false, NULL,
+					sample_word_size);
+}
+
+static int __q6asm_enc_cfg_blk_pcm(struct audio_client *ac,
+		uint32_t rate, uint32_t channels, uint16_t bits_per_sample)
+{
+	return q6asm_enc_cfg_blk_pcm_v2(ac, rate, channels,
+					bits_per_sample, true, false, NULL);
+}
+
+int q6asm_enc_cfg_blk_pcm(struct audio_client *ac,
+			uint32_t rate, uint32_t channels)
+{
+	return __q6asm_enc_cfg_blk_pcm(ac, rate, channels, 16);
+}
+
+int q6asm_enc_cfg_blk_pcm_format_support(struct audio_client *ac,
+		uint32_t rate, uint32_t channels, uint16_t bits_per_sample)
+{
+	 return __q6asm_enc_cfg_blk_pcm(ac, rate, channels, bits_per_sample);
+}
+
+/*
+ * q6asm_enc_cfg_blk_pcm_format_support_v3 - sends encoder configuration
+ *                                           parameters
+ *
+ * @ac: Client session handle
+ * @rate: sample rate
+ * @channels: number of channels
+ * @bits_per_sample: bit width of encoder session
+ * @sample_word_size: Size in bits of the word that holds a sample of a channel
+ */
+int q6asm_enc_cfg_blk_pcm_format_support_v3(struct audio_client *ac,
+					    uint32_t rate, uint32_t channels,
+					    uint16_t bits_per_sample,
+					    uint16_t sample_word_size)
+{
+	 return __q6asm_enc_cfg_blk_pcm_v3(ac, rate, channels,
+					   bits_per_sample, sample_word_size);
+}
+EXPORT_SYMBOL(q6asm_enc_cfg_blk_pcm_format_support_v3);
+
+/*
+ * q6asm_enc_cfg_blk_pcm_format_support_v4 - sends encoder configuration
+ *                                           parameters
+ *
+ * @ac: Client session handle
+ * @rate: sample rate
+ * @channels: number of channels
+ * @bits_per_sample: bit width of encoder session
+ * @sample_word_size: Size in bits of the word that holds a sample of a channel
+ * @endianness: endianness of the pcm data
+ * @mode: Mode to provide additional info about the pcm input data
+ */
+int q6asm_enc_cfg_blk_pcm_format_support_v4(struct audio_client *ac,
+					    uint32_t rate, uint32_t channels,
+					    uint16_t bits_per_sample,
+					    uint16_t sample_word_size,
+					    uint16_t endianness,
+					    uint16_t mode)
+{
+	 return __q6asm_enc_cfg_blk_pcm_v4(ac, rate, channels,
+					   bits_per_sample, sample_word_size,
+					   endianness, mode);
+}
+EXPORT_SYMBOL(q6asm_enc_cfg_blk_pcm_format_support_v4);
+
+int q6asm_enc_cfg_blk_pcm_native(struct audio_client *ac,
+			uint32_t rate, uint32_t channels)
+{
+	struct asm_multi_channel_pcm_enc_cfg_v2  enc_cfg;
+	u8 *channel_mapping;
+	u32 frames_per_buf = 0;
+	int rc = 0;
+
+	if (channels > PCM_FORMAT_MAX_NUM_CHANNEL) {
+		pr_err("%s: Invalid channel count %d\n", __func__, channels);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: Session %d, rate = %d, channels = %d\n", __func__,
+			 ac->session, rate, channels);
+
+	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+	atomic_set(&ac->cmd_state, -1);
+	enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
+	enc_cfg.encdec.param_size = sizeof(enc_cfg) - sizeof(enc_cfg.hdr) -
+				 sizeof(enc_cfg.encdec);
+	enc_cfg.encblk.frames_per_buf = frames_per_buf;
+	enc_cfg.encblk.enc_cfg_blk_size  = enc_cfg.encdec.param_size -
+				sizeof(struct asm_enc_cfg_blk_param_v2);
+
+	enc_cfg.num_channels = 0;/*channels;*/
+	enc_cfg.bits_per_sample = 16;
+	enc_cfg.sample_rate = 0;/*rate;*/
+	enc_cfg.is_signed = 1;
+	channel_mapping = enc_cfg.channel_mapping;
+
+
+	memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+
+	if (q6asm_map_channels(channel_mapping, channels, false)) {
+		pr_err("%s: map channels failed %d\n", __func__, channels);
+		return -EINVAL;
+	}
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+	if (rc < 0) {
+		pr_err("%s: Comamnd open failed %d\n", __func__, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout opcode[0x%x]\n",
+			__func__, enc_cfg.hdr.opcode);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+static int q6asm_map_channels(u8 *channel_mapping, uint32_t channels,
+		bool use_back_flavor)
+{
+	u8 *lchannel_mapping;
+	lchannel_mapping = channel_mapping;
+	pr_debug("%s:  channels passed: %d\n", __func__, channels);
+	if (channels == 1)  {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+	} else if (channels == 2) {
+		lchannel_mapping[0] = PCM_CHANNEL_FL;
+		lchannel_mapping[1] = PCM_CHANNEL_FR;
+	} else if (channels == 3) {
+		lchannel_mapping[0] = PCM_CHANNEL_FL;
+		lchannel_mapping[1] = PCM_CHANNEL_FR;
+		lchannel_mapping[2] = PCM_CHANNEL_FC;
+	} else if (channels == 4) {
+		lchannel_mapping[0] = PCM_CHANNEL_FL;
+		lchannel_mapping[1] = PCM_CHANNEL_FR;
+		lchannel_mapping[2] = use_back_flavor ?
+			PCM_CHANNEL_LB : PCM_CHANNEL_LS;
+		lchannel_mapping[3] = use_back_flavor ?
+			PCM_CHANNEL_RB : PCM_CHANNEL_RS;
+	} else if (channels == 5) {
+		lchannel_mapping[0] = PCM_CHANNEL_FL;
+		lchannel_mapping[1] = PCM_CHANNEL_FR;
+		lchannel_mapping[2] = PCM_CHANNEL_FC;
+		lchannel_mapping[3] = use_back_flavor ?
+			PCM_CHANNEL_LB : PCM_CHANNEL_LS;
+		lchannel_mapping[4] = use_back_flavor ?
+			PCM_CHANNEL_RB : PCM_CHANNEL_RS;
+	} else if (channels == 6) {
+		lchannel_mapping[0] = PCM_CHANNEL_FL;
+		lchannel_mapping[1] = PCM_CHANNEL_FR;
+		lchannel_mapping[2] = PCM_CHANNEL_FC;
+		lchannel_mapping[3] = PCM_CHANNEL_LFE;
+		lchannel_mapping[4] = use_back_flavor ?
+			PCM_CHANNEL_LB : PCM_CHANNEL_LS;
+		lchannel_mapping[5] = use_back_flavor ?
+			PCM_CHANNEL_RB : PCM_CHANNEL_RS;
+	} else if (channels == 7) {
+		/*
+		 * Configured for 5.1 channel mapping + 1 channel for debug
+		 * Can be customized based on DSP.
+		 */
+		lchannel_mapping[0] = PCM_CHANNEL_FL;
+		lchannel_mapping[1] = PCM_CHANNEL_FR;
+		lchannel_mapping[2] = PCM_CHANNEL_FC;
+		lchannel_mapping[3] = PCM_CHANNEL_LFE;
+		lchannel_mapping[4] = use_back_flavor ?
+			PCM_CHANNEL_LB : PCM_CHANNEL_LS;
+		lchannel_mapping[5] = use_back_flavor ?
+			PCM_CHANNEL_RB : PCM_CHANNEL_RS;
+		lchannel_mapping[6] = PCM_CHANNEL_CS;
+	} else if (channels == 8) {
+		lchannel_mapping[0] = PCM_CHANNEL_FL;
+		lchannel_mapping[1] = PCM_CHANNEL_FR;
+		lchannel_mapping[2] = PCM_CHANNEL_FC;
+		lchannel_mapping[3] = PCM_CHANNEL_LFE;
+		lchannel_mapping[4] = PCM_CHANNEL_LB;
+		lchannel_mapping[5] = PCM_CHANNEL_RB;
+		lchannel_mapping[6] = PCM_CHANNEL_LS;
+		lchannel_mapping[7] = PCM_CHANNEL_RS;
+	} else {
+		pr_err("%s: ERROR.unsupported num_ch = %u\n",
+		 __func__, channels);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int q6asm_enable_sbrps(struct audio_client *ac,
+			uint32_t sbr_ps_enable)
+{
+	struct asm_aac_sbr_ps_flag_param  sbrps;
+	u32 frames_per_buf = 0;
+
+	int rc = 0;
+
+	pr_debug("%s: Session %d\n", __func__, ac->session);
+
+	q6asm_add_hdr(ac, &sbrps.hdr, sizeof(sbrps), TRUE);
+	atomic_set(&ac->cmd_state, -1);
+
+	sbrps.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	sbrps.encdec.param_id = ASM_PARAM_ID_AAC_SBR_PS_FLAG;
+	sbrps.encdec.param_size = sizeof(struct asm_aac_sbr_ps_flag_param) -
+				sizeof(struct asm_stream_cmd_set_encdec_param);
+	sbrps.encblk.frames_per_buf = frames_per_buf;
+	sbrps.encblk.enc_cfg_blk_size  = sbrps.encdec.param_size -
+				sizeof(struct asm_enc_cfg_blk_param_v2);
+
+	sbrps.sbr_ps_flag = sbr_ps_enable;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &sbrps);
+	if (rc < 0) {
+		pr_err("%s: Command opcode[0x%x]paramid[0x%x] failed %d\n",
+				__func__,
+				ASM_STREAM_CMD_SET_ENCDEC_PARAM,
+				ASM_PARAM_ID_AAC_SBR_PS_FLAG, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout opcode[0x%x] ", __func__, sbrps.hdr.opcode);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+int q6asm_cfg_dual_mono_aac(struct audio_client *ac,
+			uint16_t sce_left, uint16_t sce_right)
+{
+	struct asm_aac_dual_mono_mapping_param dual_mono;
+
+	int rc = 0;
+
+	pr_debug("%s: Session %d, sce_left = %d, sce_right = %d\n",
+			 __func__, ac->session, sce_left, sce_right);
+
+	q6asm_add_hdr(ac, &dual_mono.hdr, sizeof(dual_mono), TRUE);
+	atomic_set(&ac->cmd_state, -1);
+
+	dual_mono.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	dual_mono.encdec.param_id = ASM_PARAM_ID_AAC_DUAL_MONO_MAPPING;
+	dual_mono.encdec.param_size = sizeof(dual_mono.left_channel_sce) +
+				      sizeof(dual_mono.right_channel_sce);
+	dual_mono.left_channel_sce = sce_left;
+	dual_mono.right_channel_sce = sce_right;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &dual_mono);
+	if (rc < 0) {
+		pr_err("%s: Command opcode[0x%x]paramid[0x%x] failed %d\n",
+				__func__, ASM_STREAM_CMD_SET_ENCDEC_PARAM,
+				ASM_PARAM_ID_AAC_DUAL_MONO_MAPPING, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout opcode[0x%x]\n", __func__,
+						dual_mono.hdr.opcode);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+/* Support for selecting stereo mixing coefficients for B family not done */
+int q6asm_cfg_aac_sel_mix_coef(struct audio_client *ac, uint32_t mix_coeff)
+{
+	struct asm_aac_stereo_mix_coeff_selection_param_v2 aac_mix_coeff;
+	int rc = 0;
+
+	q6asm_add_hdr(ac, &aac_mix_coeff.hdr, sizeof(aac_mix_coeff), TRUE);
+	atomic_set(&ac->cmd_state, -1);
+	aac_mix_coeff.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	aac_mix_coeff.param_id =
+		ASM_PARAM_ID_AAC_STEREO_MIX_COEFF_SELECTION_FLAG_V2;
+	aac_mix_coeff.param_size =
+		sizeof(struct asm_aac_stereo_mix_coeff_selection_param_v2);
+	aac_mix_coeff.aac_stereo_mix_coeff_flag = mix_coeff;
+	pr_debug("%s: mix_coeff = %u\n", __func__, mix_coeff);
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &aac_mix_coeff);
+	if (rc < 0) {
+		pr_err("%s: Command opcode[0x%x]paramid[0x%x] failed %d\n",
+			__func__, ASM_STREAM_CMD_SET_ENCDEC_PARAM,
+			ASM_PARAM_ID_AAC_STEREO_MIX_COEFF_SELECTION_FLAG_V2,
+			rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+		(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout opcode[0x%x]\n",
+			__func__, aac_mix_coeff.hdr.opcode);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+int q6asm_enc_cfg_blk_qcelp(struct audio_client *ac, uint32_t frames_per_buf,
+		uint16_t min_rate, uint16_t max_rate,
+		uint16_t reduced_rate_level, uint16_t rate_modulation_cmd)
+{
+	struct asm_v13k_enc_cfg enc_cfg;
+	int rc = 0;
+
+	pr_debug("%s: session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x] reduced_rate_level[0x%4x]rate_modulation_cmd[0x%4x]\n",
+		 __func__,
+		ac->session, frames_per_buf, min_rate, max_rate,
+		reduced_rate_level, rate_modulation_cmd);
+
+	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+	atomic_set(&ac->cmd_state, -1);
+	enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
+	enc_cfg.encdec.param_size = sizeof(struct asm_v13k_enc_cfg) -
+				sizeof(struct asm_stream_cmd_set_encdec_param);
+	enc_cfg.encblk.frames_per_buf = frames_per_buf;
+	enc_cfg.encblk.enc_cfg_blk_size  = enc_cfg.encdec.param_size -
+				sizeof(struct asm_enc_cfg_blk_param_v2);
+
+	enc_cfg.min_rate = min_rate;
+	enc_cfg.max_rate = max_rate;
+	enc_cfg.reduced_rate_cmd = reduced_rate_level;
+	enc_cfg.rate_mod_cmd = rate_modulation_cmd;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+	if (rc < 0) {
+		pr_err("%s: Comamnd %d failed %d\n",
+			__func__, ASM_STREAM_CMD_SET_ENCDEC_PARAM, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for setencdec v13k resp\n",
+			__func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+int q6asm_enc_cfg_blk_evrc(struct audio_client *ac, uint32_t frames_per_buf,
+		uint16_t min_rate, uint16_t max_rate,
+		uint16_t rate_modulation_cmd)
+{
+	struct asm_evrc_enc_cfg enc_cfg;
+	int rc = 0;
+
+	pr_debug("%s: session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x] rate_modulation_cmd[0x%4x]\n",
+		 __func__, ac->session,
+		frames_per_buf,	min_rate, max_rate, rate_modulation_cmd);
+
+	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+	atomic_set(&ac->cmd_state, -1);
+	enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
+	enc_cfg.encdec.param_size = sizeof(struct asm_evrc_enc_cfg) -
+				sizeof(struct asm_stream_cmd_set_encdec_param);
+	enc_cfg.encblk.frames_per_buf = frames_per_buf;
+	enc_cfg.encblk.enc_cfg_blk_size  = enc_cfg.encdec.param_size -
+				sizeof(struct asm_enc_cfg_blk_param_v2);
+
+	enc_cfg.min_rate = min_rate;
+	enc_cfg.max_rate = max_rate;
+	enc_cfg.rate_mod_cmd = rate_modulation_cmd;
+	enc_cfg.reserved = 0;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+	if (rc < 0) {
+		pr_err("%s: Comamnd %d failed %d\n",
+			__func__, ASM_STREAM_CMD_SET_ENCDEC_PARAM, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for encdec evrc\n", __func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+int q6asm_enc_cfg_blk_amrnb(struct audio_client *ac, uint32_t frames_per_buf,
+			uint16_t band_mode, uint16_t dtx_enable)
+{
+	struct asm_amrnb_enc_cfg enc_cfg;
+	int rc = 0;
+
+	pr_debug("%s: session[%d]frames[%d]band_mode[0x%4x]dtx_enable[0x%4x]\n",
+		__func__, ac->session, frames_per_buf, band_mode, dtx_enable);
+
+	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+	atomic_set(&ac->cmd_state, -1);
+	enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
+	enc_cfg.encdec.param_size = sizeof(struct asm_amrnb_enc_cfg) -
+				sizeof(struct asm_stream_cmd_set_encdec_param);
+	enc_cfg.encblk.frames_per_buf = frames_per_buf;
+	enc_cfg.encblk.enc_cfg_blk_size  = enc_cfg.encdec.param_size -
+				sizeof(struct asm_enc_cfg_blk_param_v2);
+
+	enc_cfg.enc_mode = band_mode;
+	enc_cfg.dtx_mode = dtx_enable;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+	if (rc < 0) {
+		pr_err("%s: Comamnd %d failed %d\n",
+			__func__, ASM_STREAM_CMD_SET_ENCDEC_PARAM, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for set encdec amrnb\n", __func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+int q6asm_enc_cfg_blk_amrwb(struct audio_client *ac, uint32_t frames_per_buf,
+			uint16_t band_mode, uint16_t dtx_enable)
+{
+	struct asm_amrwb_enc_cfg enc_cfg;
+	int rc = 0;
+
+	pr_debug("%s: session[%d]frames[%d]band_mode[0x%4x]dtx_enable[0x%4x]\n",
+		__func__, ac->session, frames_per_buf, band_mode, dtx_enable);
+
+	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+	atomic_set(&ac->cmd_state, -1);
+	enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
+	enc_cfg.encdec.param_size = sizeof(struct asm_amrwb_enc_cfg) -
+				sizeof(struct asm_stream_cmd_set_encdec_param);
+	enc_cfg.encblk.frames_per_buf = frames_per_buf;
+	enc_cfg.encblk.enc_cfg_blk_size  = enc_cfg.encdec.param_size -
+				sizeof(struct asm_enc_cfg_blk_param_v2);
+
+	enc_cfg.enc_mode = band_mode;
+	enc_cfg.dtx_mode = dtx_enable;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+	if (rc < 0) {
+		pr_err("%s: Comamnd %d failed %d\n",
+			__func__, ASM_STREAM_CMD_SET_ENCDEC_PARAM, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for FORMAT_UPDATE\n", __func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+
+static int __q6asm_media_format_block_pcm(struct audio_client *ac,
+				uint32_t rate, uint32_t channels,
+				uint16_t bits_per_sample, int stream_id,
+				bool use_default_chmap, char *channel_map)
+{
+	struct asm_multi_channel_pcm_fmt_blk_v2 fmt;
+	u8 *channel_mapping;
+	int rc = 0;
+
+	if (channels > PCM_FORMAT_MAX_NUM_CHANNEL) {
+		pr_err("%s: Invalid channel count %d\n", __func__, channels);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: session[%d]rate[%d]ch[%d]\n", __func__, ac->session, rate,
+		channels);
+
+	q6asm_stream_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE, stream_id);
+	atomic_set(&ac->cmd_state, -1);
+	/*
+	 * Updated the token field with stream/session for compressed playback
+	 * Platform driver must know the the stream with which the command is
+	 * associated
+	 */
+	if (ac->io_mode & COMPRESSED_STREAM_IO)
+		q6asm_update_token(&fmt.hdr.token,
+				   ac->session,
+				   stream_id,
+				   0, /* Buffer index is NA */
+				   0, /* Direction flag is NA */
+				   WAIT_CMD);
+
+	pr_debug("%s: token = 0x%x, stream_id  %d, session 0x%x\n",
+		  __func__, fmt.hdr.token, stream_id, ac->session);
+
+	fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+	fmt.fmt_blk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+					sizeof(fmt.fmt_blk);
+	fmt.num_channels = channels;
+	fmt.bits_per_sample = bits_per_sample;
+	fmt.sample_rate = rate;
+	fmt.is_signed = 1;
+
+	channel_mapping = fmt.channel_mapping;
+
+	memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+
+	if (use_default_chmap) {
+		if (q6asm_map_channels(channel_mapping, channels, false)) {
+			pr_err("%s: map channels failed %d\n",
+				__func__, channels);
+			return -EINVAL;
+		}
+	} else {
+		memcpy(channel_mapping, channel_map,
+			 PCM_FORMAT_MAX_NUM_CHANNEL);
+	}
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+	if (rc < 0) {
+		pr_err("%s: Comamnd open failed %d\n", __func__, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for format update\n", __func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+static int __q6asm_media_format_block_pcm_v3(struct audio_client *ac,
+					     uint32_t rate, uint32_t channels,
+					     uint16_t bits_per_sample,
+					     int stream_id,
+					     bool use_default_chmap,
+					     char *channel_map,
+					     uint16_t sample_word_size)
+{
+	struct asm_multi_channel_pcm_fmt_blk_param_v3 fmt;
+	u8 *channel_mapping;
+	int rc;
+
+	if (channels > PCM_FORMAT_MAX_NUM_CHANNEL) {
+		pr_err("%s: Invalid channel count %d\n", __func__, channels);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: session[%d]rate[%d]ch[%d]bps[%d]wordsize[%d]\n", __func__,
+		 ac->session, rate, channels,
+		 bits_per_sample, sample_word_size);
+
+	memset(&fmt, 0, sizeof(fmt));
+	q6asm_stream_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE, stream_id);
+	atomic_set(&ac->cmd_state, -1);
+	/*
+	 * Updated the token field with stream/session for compressed playback
+	 * Platform driver must know the the stream with which the command is
+	 * associated
+	 */
+	if (ac->io_mode & COMPRESSED_STREAM_IO)
+		fmt.hdr.token = ((ac->session << 8) & 0xFFFF00) |
+				(stream_id & 0xFF);
+
+	pr_debug("%s: token = 0x%x, stream_id  %d, session 0x%x\n",
+		 __func__, fmt.hdr.token, stream_id, ac->session);
+
+	fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+	fmt.fmt_blk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+					sizeof(fmt.fmt_blk);
+	fmt.param.num_channels = channels;
+	fmt.param.bits_per_sample = bits_per_sample;
+	fmt.param.sample_rate = rate;
+	fmt.param.is_signed = 1;
+	fmt.param.sample_word_size = sample_word_size;
+	channel_mapping = fmt.param.channel_mapping;
+
+	memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+
+	if (use_default_chmap) {
+		if (q6asm_map_channels(channel_mapping, channels, false)) {
+			pr_err("%s: map channels failed %d\n",
+			       __func__, channels);
+			rc = -EINVAL;
+			goto fail_cmd;
+		}
+	} else {
+		memcpy(channel_mapping, channel_map,
+			 PCM_FORMAT_MAX_NUM_CHANNEL);
+	}
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+	if (rc < 0) {
+		pr_err("%s: Comamnd open failed %d\n", __func__, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for format update\n", __func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+			__func__, adsp_err_get_err_str(
+			atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+static int __q6asm_media_format_block_pcm_v4(struct audio_client *ac,
+					     uint32_t rate, uint32_t channels,
+					     uint16_t bits_per_sample,
+					     int stream_id,
+					     bool use_default_chmap,
+					     char *channel_map,
+					     uint16_t sample_word_size,
+					     uint16_t endianness,
+					     uint16_t mode)
+{
+	struct asm_multi_channel_pcm_fmt_blk_param_v4 fmt;
+	u8 *channel_mapping;
+	int rc;
+
+	if (channels > PCM_FORMAT_MAX_NUM_CHANNEL) {
+		pr_err("%s: Invalid channel count %d\n", __func__, channels);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: session[%d]rate[%d]ch[%d]bps[%d]wordsize[%d]\n", __func__,
+		 ac->session, rate, channels,
+		 bits_per_sample, sample_word_size);
+
+	memset(&fmt, 0, sizeof(fmt));
+	q6asm_stream_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE, stream_id);
+	atomic_set(&ac->cmd_state, -1);
+	/*
+	 * Updated the token field with stream/session for compressed playback
+	 * Platform driver must know the the stream with which the command is
+	 * associated
+	 */
+	if (ac->io_mode & COMPRESSED_STREAM_IO)
+		fmt.hdr.token = ((ac->session << 8) & 0xFFFF00) |
+				(stream_id & 0xFF);
+
+	pr_debug("%s: token = 0x%x, stream_id  %d, session 0x%x\n",
+		 __func__, fmt.hdr.token, stream_id, ac->session);
+
+	fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+	fmt.fmt_blk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+					sizeof(fmt.fmt_blk);
+	fmt.param.num_channels = channels;
+	fmt.param.bits_per_sample = bits_per_sample;
+	fmt.param.sample_rate = rate;
+	fmt.param.is_signed = 1;
+	fmt.param.sample_word_size = sample_word_size;
+	fmt.param.endianness = endianness;
+	fmt.param.mode = mode;
+	channel_mapping = fmt.param.channel_mapping;
+
+	memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+
+	if (use_default_chmap) {
+		if (q6asm_map_channels(channel_mapping, channels, false)) {
+			pr_err("%s: map channels failed %d\n",
+			       __func__, channels);
+			rc = -EINVAL;
+			goto fail_cmd;
+		}
+	} else {
+		memcpy(channel_mapping, channel_map,
+			 PCM_FORMAT_MAX_NUM_CHANNEL);
+	}
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+	if (rc < 0) {
+		pr_err("%s: Comamnd open failed %d\n", __func__, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for format update\n", __func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+			__func__, adsp_err_get_err_str(
+			atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+int q6asm_media_format_block_pcm(struct audio_client *ac,
+				uint32_t rate, uint32_t channels)
+{
+	return __q6asm_media_format_block_pcm(ac, rate,
+				channels, 16, ac->stream_id,
+				true, NULL);
+}
+
+int q6asm_media_format_block_pcm_format_support(struct audio_client *ac,
+				uint32_t rate, uint32_t channels,
+				uint16_t bits_per_sample)
+{
+	return __q6asm_media_format_block_pcm(ac, rate,
+				channels, bits_per_sample, ac->stream_id,
+				true, NULL);
+}
+
+int q6asm_media_format_block_pcm_format_support_v2(struct audio_client *ac,
+				uint32_t rate, uint32_t channels,
+				uint16_t bits_per_sample, int stream_id,
+				bool use_default_chmap, char *channel_map)
+{
+	if (!use_default_chmap && (channel_map == NULL)) {
+		pr_err("%s: No valid chan map and can't use default\n",
+			__func__);
+		return -EINVAL;
+	}
+	return __q6asm_media_format_block_pcm(ac, rate,
+				channels, bits_per_sample, stream_id,
+				use_default_chmap, channel_map);
+}
+
+/*
+ * q6asm_media_format_block_pcm_format_support_v3- sends pcm decoder
+ *						    configuration parameters
+ *
+ * @ac: Client session handle
+ * @rate: sample rate
+ * @channels: number of channels
+ * @bits_per_sample: bit width of encoder session
+ * @stream_id: stream id of stream to be associated with this session
+ * @use_default_chmap: true if default channel map  to be used
+ * @channel_map: input channel map
+ * @sample_word_size: Size in bits of the word that holds a sample of a channel
+ */
+int q6asm_media_format_block_pcm_format_support_v3(struct audio_client *ac,
+						   uint32_t rate,
+						   uint32_t channels,
+						   uint16_t bits_per_sample,
+						   int stream_id,
+						   bool use_default_chmap,
+						   char *channel_map,
+						   uint16_t sample_word_size)
+{
+	if (!use_default_chmap && (channel_map == NULL)) {
+		pr_err("%s: No valid chan map and can't use default\n",
+			__func__);
+		return -EINVAL;
+	}
+	return __q6asm_media_format_block_pcm_v3(ac, rate,
+				channels, bits_per_sample, stream_id,
+				use_default_chmap, channel_map,
+				sample_word_size);
+
+}
+EXPORT_SYMBOL(q6asm_media_format_block_pcm_format_support_v3);
+
+/*
+ * q6asm_media_format_block_pcm_format_support_v4- sends pcm decoder
+ *						    configuration parameters
+ *
+ * @ac: Client session handle
+ * @rate: sample rate
+ * @channels: number of channels
+ * @bits_per_sample: bit width of encoder session
+ * @stream_id: stream id of stream to be associated with this session
+ * @use_default_chmap: true if default channel map  to be used
+ * @channel_map: input channel map
+ * @sample_word_size: Size in bits of the word that holds a sample of a channel
+ * @endianness: endianness of the pcm data
+ * @mode: Mode to provide additional info about the pcm input data
+ */
+int q6asm_media_format_block_pcm_format_support_v4(struct audio_client *ac,
+						   uint32_t rate,
+						   uint32_t channels,
+						   uint16_t bits_per_sample,
+						   int stream_id,
+						   bool use_default_chmap,
+						   char *channel_map,
+						   uint16_t sample_word_size,
+						   uint16_t endianness,
+						   uint16_t mode)
+{
+	if (!use_default_chmap && (channel_map == NULL)) {
+		pr_err("%s: No valid chan map and can't use default\n",
+			__func__);
+		return -EINVAL;
+	}
+	return __q6asm_media_format_block_pcm_v4(ac, rate,
+				channels, bits_per_sample, stream_id,
+				use_default_chmap, channel_map,
+				sample_word_size, endianness,
+				mode);
+
+}
+EXPORT_SYMBOL(q6asm_media_format_block_pcm_format_support_v4);
+
+
+static int __q6asm_media_format_block_multi_ch_pcm(struct audio_client *ac,
+				uint32_t rate, uint32_t channels,
+				bool use_default_chmap, char *channel_map,
+				uint16_t bits_per_sample)
+{
+	struct asm_multi_channel_pcm_fmt_blk_v2 fmt;
+	u8 *channel_mapping;
+	int rc = 0;
+
+	if (channels > PCM_FORMAT_MAX_NUM_CHANNEL) {
+		pr_err("%s: Invalid channel count %d\n", __func__, channels);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: session[%d]rate[%d]ch[%d]\n", __func__, ac->session, rate,
+		channels);
+
+	q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+	atomic_set(&ac->cmd_state, -1);
+
+	fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+	fmt.fmt_blk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+					sizeof(fmt.fmt_blk);
+	fmt.num_channels = channels;
+	fmt.bits_per_sample = bits_per_sample;
+	fmt.sample_rate = rate;
+	fmt.is_signed = 1;
+
+	channel_mapping = fmt.channel_mapping;
+
+	memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+
+	if (use_default_chmap) {
+		if (q6asm_map_channels(channel_mapping, channels, false)) {
+			pr_err("%s: map channels failed %d\n",
+				__func__, channels);
+			return -EINVAL;
+		}
+	} else {
+		memcpy(channel_mapping, channel_map,
+			 PCM_FORMAT_MAX_NUM_CHANNEL);
+	}
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+	if (rc < 0) {
+		pr_err("%s: Comamnd open failed %d\n", __func__, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for format update\n", __func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+static int __q6asm_media_format_block_multi_ch_pcm_v3(struct audio_client *ac,
+						      uint32_t rate,
+						      uint32_t channels,
+						      bool use_default_chmap,
+						      char *channel_map,
+						      uint16_t bits_per_sample,
+						      uint16_t sample_word_size)
+{
+	struct asm_multi_channel_pcm_fmt_blk_param_v3 fmt;
+	u8 *channel_mapping;
+	int rc;
+
+	if (channels > PCM_FORMAT_MAX_NUM_CHANNEL) {
+		pr_err("%s: Invalid channel count %d\n", __func__, channels);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: session[%d]rate[%d]ch[%d]bps[%d]wordsize[%d]\n", __func__,
+		 ac->session, rate, channels,
+		 bits_per_sample, sample_word_size);
+
+	memset(&fmt, 0, sizeof(fmt));
+	q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+	atomic_set(&ac->cmd_state, -1);
+
+	fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+	fmt.fmt_blk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+					sizeof(fmt.fmt_blk);
+	fmt.param.num_channels = channels;
+	fmt.param.bits_per_sample = bits_per_sample;
+	fmt.param.sample_rate = rate;
+	fmt.param.is_signed = 1;
+	fmt.param.sample_word_size = sample_word_size;
+	channel_mapping = fmt.param.channel_mapping;
+
+	memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+
+	if (use_default_chmap) {
+		if (q6asm_map_channels(channel_mapping, channels, false)) {
+			pr_err("%s: map channels failed %d\n",
+			       __func__, channels);
+			rc = -EINVAL;
+			goto fail_cmd;
+		}
+	} else {
+		memcpy(channel_mapping, channel_map,
+			 PCM_FORMAT_MAX_NUM_CHANNEL);
+	}
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+	if (rc < 0) {
+		pr_err("%s: Comamnd open failed %d\n", __func__, rc);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for format update\n", __func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+		       __func__, adsp_err_get_err_str(
+		       atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+static int __q6asm_media_format_block_multi_ch_pcm_v4(struct audio_client *ac,
+						      uint32_t rate,
+						      uint32_t channels,
+						      bool use_default_chmap,
+						      char *channel_map,
+						      uint16_t bits_per_sample,
+						      uint16_t sample_word_size,
+						      uint16_t endianness,
+						      uint16_t mode)
+{
+	struct asm_multi_channel_pcm_fmt_blk_param_v4 fmt;
+	u8 *channel_mapping;
+	int rc;
+
+	if (channels > PCM_FORMAT_MAX_NUM_CHANNEL) {
+		pr_err("%s: Invalid channel count %d\n", __func__, channels);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: session[%d]rate[%d]ch[%d]bps[%d]wordsize[%d]\n", __func__,
+		 ac->session, rate, channels,
+		 bits_per_sample, sample_word_size);
+
+	memset(&fmt, 0, sizeof(fmt));
+	q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+	atomic_set(&ac->cmd_state, -1);
+
+	fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+	fmt.fmt_blk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+					sizeof(fmt.fmt_blk);
+	fmt.param.num_channels = channels;
+	fmt.param.bits_per_sample = bits_per_sample;
+	fmt.param.sample_rate = rate;
+	fmt.param.is_signed = 1;
+	fmt.param.sample_word_size = sample_word_size;
+	fmt.param.endianness = endianness;
+	fmt.param.mode = mode;
+	channel_mapping = fmt.param.channel_mapping;
+
+	memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+
+	if (use_default_chmap) {
+		if (q6asm_map_channels(channel_mapping, channels, false)) {
+			pr_err("%s: map channels failed %d\n",
+			       __func__, channels);
+			rc = -EINVAL;
+			goto fail_cmd;
+		}
+	} else {
+		memcpy(channel_mapping, channel_map,
+			 PCM_FORMAT_MAX_NUM_CHANNEL);
+	}
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+	if (rc < 0) {
+		pr_err("%s: Comamnd open failed %d\n", __func__, rc);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for format update\n", __func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+		       __func__, adsp_err_get_err_str(
+		       atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+int q6asm_media_format_block_multi_ch_pcm(struct audio_client *ac,
+		uint32_t rate, uint32_t channels,
+		bool use_default_chmap, char *channel_map)
+{
+	return __q6asm_media_format_block_multi_ch_pcm(ac, rate,
+			channels, use_default_chmap, channel_map, 16);
+}
+
+int q6asm_media_format_block_multi_ch_pcm_v2(
+		struct audio_client *ac,
+		uint32_t rate, uint32_t channels,
+		bool use_default_chmap, char *channel_map,
+		uint16_t bits_per_sample)
+{
+	return __q6asm_media_format_block_multi_ch_pcm(ac, rate,
+			channels, use_default_chmap, channel_map,
+			bits_per_sample);
+}
+
+/*
+ * q6asm_media_format_block_multi_ch_pcm_v3 - sends pcm decoder configuration
+ *                                            parameters
+ *
+ * @ac: Client session handle
+ * @rate: sample rate
+ * @channels: number of channels
+ * @bits_per_sample: bit width of encoder session
+ * @use_default_chmap: true if default channel map  to be used
+ * @channel_map: input channel map
+ * @sample_word_size: Size in bits of the word that holds a sample of a channel
+ */
+int q6asm_media_format_block_multi_ch_pcm_v3(struct audio_client *ac,
+					     uint32_t rate, uint32_t channels,
+					     bool use_default_chmap,
+					     char *channel_map,
+					     uint16_t bits_per_sample,
+					     uint16_t sample_word_size)
+{
+	return __q6asm_media_format_block_multi_ch_pcm_v3(ac, rate, channels,
+							  use_default_chmap,
+							  channel_map,
+							  bits_per_sample,
+							  sample_word_size);
+}
+EXPORT_SYMBOL(q6asm_media_format_block_multi_ch_pcm_v3);
+
+/*
+ * q6asm_media_format_block_multi_ch_pcm_v4 - sends pcm decoder configuration
+ *                                            parameters
+ *
+ * @ac: Client session handle
+ * @rate: sample rate
+ * @channels: number of channels
+ * @bits_per_sample: bit width of encoder session
+ * @use_default_chmap: true if default channel map  to be used
+ * @channel_map: input channel map
+ * @sample_word_size: Size in bits of the word that holds a sample of a channel
+ * @endianness: endianness of the pcm data
+ * @mode: Mode to provide additional info about the pcm input data
+ */
+int q6asm_media_format_block_multi_ch_pcm_v4(struct audio_client *ac,
+					     uint32_t rate, uint32_t channels,
+					     bool use_default_chmap,
+					     char *channel_map,
+					     uint16_t bits_per_sample,
+					     uint16_t sample_word_size,
+					     uint16_t endianness,
+					     uint16_t mode)
+{
+	return __q6asm_media_format_block_multi_ch_pcm_v4(ac, rate, channels,
+							  use_default_chmap,
+							  channel_map,
+							  bits_per_sample,
+							  sample_word_size,
+							  endianness,
+							  mode);
+}
+EXPORT_SYMBOL(q6asm_media_format_block_multi_ch_pcm_v4);
+
+/*
+ * q6asm_media_format_block_gen_compr - set up generic compress format params
+ *
+ * @ac: Client session handle
+ * @rate: sample rate
+ * @channels: number of channels
+ * @use_default_chmap: true if default channel map to be used
+ * @channel_map: input channel map
+ * @bits_per_sample: bit width of gen compress stream
+ */
+int q6asm_media_format_block_gen_compr(struct audio_client *ac,
+				uint32_t rate, uint32_t channels,
+				bool use_default_chmap, char *channel_map,
+				uint16_t bits_per_sample)
+{
+	struct asm_generic_compressed_fmt_blk_t fmt;
+	u8 *channel_mapping;
+	int rc = 0;
+
+	if (channels > PCM_FORMAT_MAX_NUM_CHANNEL) {
+		pr_err("%s: Invalid channel count %d\n", __func__, channels);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: session[%d]rate[%d]ch[%d]bps[%d]\n",
+		 __func__, ac->session, rate,
+		 channels, bits_per_sample);
+
+	memset(&fmt, 0, sizeof(fmt));
+	q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+
+	fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+	fmt.fmt_blk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+					sizeof(fmt.fmt_blk);
+	fmt.num_channels = channels;
+	fmt.bits_per_sample = bits_per_sample;
+	fmt.sampling_rate = rate;
+
+	channel_mapping = fmt.channel_mapping;
+
+	memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+
+	if (use_default_chmap) {
+		if (q6asm_map_channels(channel_mapping, channels, false)) {
+			pr_err("%s: map channels failed %d\n",
+				__func__, channels);
+			return -EINVAL;
+		}
+	} else {
+		memcpy(channel_mapping, channel_map,
+		       PCM_FORMAT_MAX_NUM_CHANNEL);
+	}
+
+	atomic_set(&ac->cmd_state, -1);
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+	if (rc < 0) {
+		pr_err("%s: Comamnd open failed %d\n", __func__, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for format update\n", __func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+			__func__, adsp_err_get_err_str(
+			atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+EXPORT_SYMBOL(q6asm_media_format_block_gen_compr);
+
+
+/*
+ * q6asm_media_format_block_iec - set up IEC61937 (compressed) or IEC60958
+ *                                (pcm) format params. Both audio standards
+ *                                use the same format and are used for
+ *                                HDMI or SPDIF.
+ *
+ * @ac: Client session handle
+ * @rate: sample rate
+ * @channels: number of channels
+ */
+int q6asm_media_format_block_iec(struct audio_client *ac,
+				uint32_t rate, uint32_t channels)
+{
+	struct asm_iec_compressed_fmt_blk_t fmt;
+	int rc = 0;
+
+	pr_debug("%s: session[%d]rate[%d]ch[%d]\n",
+		 __func__, ac->session, rate,
+		 channels);
+
+	memset(&fmt, 0, sizeof(fmt));
+	q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+
+	fmt.hdr.opcode = ASM_DATA_CMD_IEC_60958_MEDIA_FMT;
+	fmt.num_channels = channels;
+	fmt.sampling_rate = rate;
+
+	atomic_set(&ac->cmd_state, -1);
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+	if (rc < 0) {
+		pr_err("%s: Comamnd open failed %d\n", __func__, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for format update\n", __func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+			__func__, adsp_err_get_err_str(
+			atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+EXPORT_SYMBOL(q6asm_media_format_block_iec);
+
+static int __q6asm_media_format_block_multi_aac(struct audio_client *ac,
+				struct asm_aac_cfg *cfg, int stream_id)
+{
+	struct asm_aac_fmt_blk_v2 fmt;
+	int rc = 0;
+
+	pr_debug("%s: session[%d]rate[%d]ch[%d]\n", __func__, ac->session,
+		cfg->sample_rate, cfg->ch_cfg);
+
+	q6asm_stream_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE, stream_id);
+	atomic_set(&ac->cmd_state, -1);
+	/*
+	 * Updated the token field with stream/session for compressed playback
+	 * Platform driver must know the the stream with which the command is
+	 * associated
+	 */
+	if (ac->io_mode & COMPRESSED_STREAM_IO)
+		q6asm_update_token(&fmt.hdr.token,
+				   ac->session,
+				   stream_id,
+				   0, /* Buffer index is NA */
+				   0, /* Direction flag is NA */
+				   WAIT_CMD);
+
+	pr_debug("%s: token = 0x%x, stream_id  %d, session 0x%x\n",
+		  __func__, fmt.hdr.token, stream_id, ac->session);
+	fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+	fmt.fmt_blk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+					sizeof(fmt.fmt_blk);
+	fmt.aac_fmt_flag = cfg->format;
+	fmt.audio_objype = cfg->aot;
+	/* If zero, PCE is assumed to be available in bitstream*/
+	fmt.total_size_of_PCE_bits = 0;
+	fmt.channel_config = cfg->ch_cfg;
+	fmt.sample_rate = cfg->sample_rate;
+
+	pr_debug("%s: format=0x%x cfg_size=%d aac-cfg=0x%x aot=%d ch=%d sr=%d\n",
+			__func__, fmt.aac_fmt_flag, fmt.fmt_blk.fmt_blk_size,
+			fmt.aac_fmt_flag,
+			fmt.audio_objype,
+			fmt.channel_config,
+			fmt.sample_rate);
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+	if (rc < 0) {
+		pr_err("%s: Comamnd open failed %d\n", __func__, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for FORMAT_UPDATE\n", __func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+int q6asm_media_format_block_multi_aac(struct audio_client *ac,
+				struct asm_aac_cfg *cfg)
+{
+	return __q6asm_media_format_block_multi_aac(ac, cfg, ac->stream_id);
+}
+
+int q6asm_media_format_block_aac(struct audio_client *ac,
+			struct asm_aac_cfg *cfg)
+{
+	return __q6asm_media_format_block_multi_aac(ac, cfg, ac->stream_id);
+}
+
+int q6asm_stream_media_format_block_aac(struct audio_client *ac,
+			struct asm_aac_cfg *cfg, int stream_id)
+{
+	return __q6asm_media_format_block_multi_aac(ac, cfg, stream_id);
+}
+
+int q6asm_media_format_block_wma(struct audio_client *ac,
+				void *cfg, int stream_id)
+{
+	struct asm_wmastdv9_fmt_blk_v2 fmt;
+	struct asm_wma_cfg *wma_cfg = (struct asm_wma_cfg *)cfg;
+	int rc = 0;
+
+	pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d], balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x]\n",
+		ac->session, wma_cfg->format_tag, wma_cfg->sample_rate,
+		wma_cfg->ch_cfg, wma_cfg->avg_bytes_per_sec,
+		wma_cfg->block_align, wma_cfg->valid_bits_per_sample,
+		wma_cfg->ch_mask, wma_cfg->encode_opt);
+
+	q6asm_stream_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE, stream_id);
+	atomic_set(&ac->cmd_state, -1);
+
+	fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+	fmt.fmtblk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+					sizeof(fmt.fmtblk);
+	fmt.fmtag = wma_cfg->format_tag;
+	fmt.num_channels = wma_cfg->ch_cfg;
+	fmt.sample_rate = wma_cfg->sample_rate;
+	fmt.avg_bytes_per_sec = wma_cfg->avg_bytes_per_sec;
+	fmt.blk_align = wma_cfg->block_align;
+	fmt.bits_per_sample =
+			wma_cfg->valid_bits_per_sample;
+	fmt.channel_mask = wma_cfg->ch_mask;
+	fmt.enc_options = wma_cfg->encode_opt;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+	if (rc < 0) {
+		pr_err("%s: Comamnd open failed %d\n", __func__, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for FORMAT_UPDATE\n", __func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+int q6asm_media_format_block_wmapro(struct audio_client *ac,
+				void *cfg, int stream_id)
+{
+	struct asm_wmaprov10_fmt_blk_v2 fmt;
+	struct asm_wmapro_cfg *wmapro_cfg = (struct asm_wmapro_cfg *)cfg;
+	int rc = 0;
+
+	pr_debug("%s: session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d], balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x], adv_enc_opt[0x%4x], adv_enc_opt2[0x%8x]\n",
+		__func__,
+		ac->session, wmapro_cfg->format_tag, wmapro_cfg->sample_rate,
+		wmapro_cfg->ch_cfg,  wmapro_cfg->avg_bytes_per_sec,
+		wmapro_cfg->block_align, wmapro_cfg->valid_bits_per_sample,
+		wmapro_cfg->ch_mask, wmapro_cfg->encode_opt,
+		wmapro_cfg->adv_encode_opt, wmapro_cfg->adv_encode_opt2);
+
+	q6asm_stream_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE, stream_id);
+	atomic_set(&ac->cmd_state, -1);
+
+	fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+	fmt.fmtblk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+						sizeof(fmt.fmtblk);
+
+	fmt.fmtag = wmapro_cfg->format_tag;
+	fmt.num_channels = wmapro_cfg->ch_cfg;
+	fmt.sample_rate = wmapro_cfg->sample_rate;
+	fmt.avg_bytes_per_sec =
+				wmapro_cfg->avg_bytes_per_sec;
+	fmt.blk_align = wmapro_cfg->block_align;
+	fmt.bits_per_sample = wmapro_cfg->valid_bits_per_sample;
+	fmt.channel_mask = wmapro_cfg->ch_mask;
+	fmt.enc_options = wmapro_cfg->encode_opt;
+	fmt.usAdvancedEncodeOpt = wmapro_cfg->adv_encode_opt;
+	fmt.advanced_enc_options2 = wmapro_cfg->adv_encode_opt2;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+	if (rc < 0) {
+		pr_err("%s: Comamnd open failed %d\n", __func__, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for FORMAT_UPDATE\n", __func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+int q6asm_media_format_block_amrwbplus(struct audio_client *ac,
+				struct asm_amrwbplus_cfg *cfg)
+{
+	struct asm_amrwbplus_fmt_blk_v2 fmt;
+	int rc = 0;
+
+	pr_debug("%s: session[%d]band-mode[%d]frame-fmt[%d]ch[%d]\n",
+		__func__,
+		ac->session,
+		cfg->amr_band_mode,
+		cfg->amr_frame_fmt,
+		cfg->num_channels);
+
+	q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+	atomic_set(&ac->cmd_state, -1);
+
+	fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+	fmt.fmtblk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+					sizeof(fmt.fmtblk);
+	fmt.amr_frame_fmt = cfg->amr_frame_fmt;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+	if (rc < 0) {
+		pr_err("%s: Comamnd media format update failed.. %d\n",
+			__func__, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+				(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for FORMAT_UPDATE\n", __func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+int q6asm_stream_media_format_block_flac(struct audio_client *ac,
+				struct asm_flac_cfg *cfg, int stream_id)
+{
+	struct asm_flac_fmt_blk_v2 fmt;
+	int rc = 0;
+
+	pr_debug("%s :session[%d] rate[%d] ch[%d] size[%d] stream_id[%d]\n",
+		__func__, ac->session, cfg->sample_rate, cfg->ch_cfg,
+		cfg->sample_size, stream_id);
+
+	q6asm_stream_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE, stream_id);
+	atomic_set(&ac->cmd_state, -1);
+
+	fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+	fmt.fmtblk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+						sizeof(fmt.fmtblk);
+
+	fmt.is_stream_info_present = cfg->stream_info_present;
+	fmt.num_channels = cfg->ch_cfg;
+	fmt.min_blk_size = cfg->min_blk_size;
+	fmt.max_blk_size = cfg->max_blk_size;
+	fmt.sample_rate = cfg->sample_rate;
+	fmt.min_frame_size = cfg->min_frame_size;
+	fmt.max_frame_size = cfg->max_frame_size;
+	fmt.sample_size = cfg->sample_size;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+	if (rc < 0) {
+		pr_err("%s :Comamnd media format update failed %d\n",
+				__func__, rc);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+				(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s :timeout. waited for FORMAT_UPDATE\n", __func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+int q6asm_media_format_block_alac(struct audio_client *ac,
+				struct asm_alac_cfg *cfg, int stream_id)
+{
+	struct asm_alac_fmt_blk_v2 fmt;
+	int rc = 0;
+
+	pr_debug("%s :session[%d]rate[%d]ch[%d]\n", __func__,
+		ac->session, cfg->sample_rate, cfg->num_channels);
+
+	q6asm_stream_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE, stream_id);
+	atomic_set(&ac->cmd_state, -1);
+
+	fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+	fmt.fmtblk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+						sizeof(fmt.fmtblk);
+
+	fmt.frame_length = cfg->frame_length;
+	fmt.compatible_version = cfg->compatible_version;
+	fmt.bit_depth = cfg->bit_depth;
+	fmt.pb = cfg->pb;
+	fmt.mb = cfg->mb;
+	fmt.kb = cfg->kb;
+	fmt.num_channels = cfg->num_channels;
+	fmt.max_run = cfg->max_run;
+	fmt.max_frame_bytes = cfg->max_frame_bytes;
+	fmt.avg_bit_rate = cfg->avg_bit_rate;
+	fmt.sample_rate = cfg->sample_rate;
+	fmt.channel_layout_tag = cfg->channel_layout_tag;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+	if (rc < 0) {
+		pr_err("%s :Comamnd media format update failed %d\n",
+				__func__, rc);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+				(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s :timeout. waited for FORMAT_UPDATE\n", __func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+/*
+ * q6asm_media_format_block_g711 - sends g711 decoder configuration
+ *                                            parameters
+ * @ac: Client session handle
+ * @cfg: Audio stream manager configuration parameters
+ * @stream_id: Stream id
+ */
+int q6asm_media_format_block_g711(struct audio_client *ac,
+				struct asm_g711_dec_cfg *cfg, int stream_id)
+{
+	struct asm_g711_dec_fmt_blk_v2 fmt;
+	int rc = 0;
+
+	if (!ac) {
+		pr_err("%s: audio client is null\n", __func__);
+		return -EINVAL;
+	}
+	if (!cfg) {
+		pr_err("%s: Invalid ASM config\n", __func__);
+		return -EINVAL;
+	}
+
+	if (stream_id <= 0) {
+		pr_err("%s: Invalid stream id\n", __func__);
+		return -EINVAL;
+	}
+
+	pr_debug("%s :session[%d]rate[%d]\n", __func__,
+		ac->session, cfg->sample_rate);
+
+	memset(&fmt, 0, sizeof(struct asm_g711_dec_fmt_blk_v2));
+
+	q6asm_stream_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE, stream_id);
+	atomic_set(&ac->cmd_state, -1);
+
+	fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+	fmt.fmtblk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+						sizeof(fmt.fmtblk);
+
+	fmt.sample_rate = cfg->sample_rate;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+	if (rc < 0) {
+		pr_err("%s :Command media format update failed %d\n",
+				__func__, rc);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+				(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s :timeout. waited for FORMAT_UPDATE\n", __func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+EXPORT_SYMBOL(q6asm_media_format_block_g711);
+
+int q6asm_stream_media_format_block_vorbis(struct audio_client *ac,
+				struct asm_vorbis_cfg *cfg, int stream_id)
+{
+	struct asm_vorbis_fmt_blk_v2 fmt;
+	int rc = 0;
+
+	pr_debug("%s :session[%d] bit_stream_fmt[%d] stream_id[%d]\n",
+		__func__, ac->session, cfg->bit_stream_fmt, stream_id);
+
+	q6asm_stream_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE, stream_id);
+	atomic_set(&ac->cmd_state, -1);
+
+	fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+	fmt.fmtblk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+						sizeof(fmt.fmtblk);
+
+	fmt.bit_stream_fmt = cfg->bit_stream_fmt;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+	if (rc < 0) {
+		pr_err("%s :Comamnd media format update failed %d\n",
+				__func__, rc);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+				(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s :timeout. waited for FORMAT_UPDATE\n", __func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+int q6asm_media_format_block_ape(struct audio_client *ac,
+				struct asm_ape_cfg *cfg, int stream_id)
+{
+	struct asm_ape_fmt_blk_v2 fmt;
+	int rc = 0;
+
+	pr_debug("%s :session[%d]rate[%d]ch[%d]\n", __func__,
+			ac->session, cfg->sample_rate, cfg->num_channels);
+
+	q6asm_stream_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE, stream_id);
+	atomic_set(&ac->cmd_state, -1);
+
+	fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+	fmt.fmtblk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+		sizeof(fmt.fmtblk);
+
+	fmt.compatible_version = cfg->compatible_version;
+	fmt.compression_level = cfg->compression_level;
+	fmt.format_flags = cfg->format_flags;
+	fmt.blocks_per_frame = cfg->blocks_per_frame;
+	fmt.final_frame_blocks = cfg->final_frame_blocks;
+	fmt.total_frames = cfg->total_frames;
+	fmt.bits_per_sample = cfg->bits_per_sample;
+	fmt.num_channels = cfg->num_channels;
+	fmt.sample_rate = cfg->sample_rate;
+	fmt.seek_table_present = cfg->seek_table_present;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+	if (rc < 0) {
+		pr_err("%s :Comamnd media format update failed %d\n",
+				__func__, rc);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s :timeout. waited for FORMAT_UPDATE\n", __func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+/*
+ * q6asm_media_format_block_dsd- Sends DSD Decoder
+ * configuration parameters
+ *
+ * @ac: Client session handle
+ * @cfg: DSD Media Format Configuration.
+ * @stream_id: stream id of stream to be associated with this session
+ *
+ * Return 0 on success or negative error code on failure
+ */
+int q6asm_media_format_block_dsd(struct audio_client *ac,
+				struct asm_dsd_cfg *cfg, int stream_id)
+{
+	struct asm_dsd_fmt_blk_v2 fmt;
+	int rc;
+
+	pr_debug("%s: session[%d] data_rate[%d] ch[%d]\n", __func__,
+		 ac->session, cfg->dsd_data_rate, cfg->num_channels);
+
+	memset(&fmt, 0, sizeof(fmt));
+	q6asm_stream_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE, stream_id);
+
+	fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+	fmt.fmtblk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+					sizeof(fmt.fmtblk);
+
+	fmt.num_version = cfg->num_version;
+	fmt.is_bitwise_big_endian = cfg->is_bitwise_big_endian;
+	fmt.dsd_channel_block_size = cfg->dsd_channel_block_size;
+	fmt.num_channels = cfg->num_channels;
+	fmt.dsd_data_rate = cfg->dsd_data_rate;
+	atomic_set(&ac->cmd_state, -1);
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+	if (rc < 0) {
+		pr_err("%s: Command DSD media format update failed, err: %d\n",
+			__func__, rc);
+		goto done;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+				(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for DSD FORMAT_UPDATE\n", __func__);
+		rc = -ETIMEDOUT;
+		goto done;
+	}
+
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto done;
+	}
+	return 0;
+done:
+	return rc;
+}
+EXPORT_SYMBOL(q6asm_media_format_block_dsd);
+
+int q6asm_stream_media_format_block_aptx_dec(struct audio_client *ac,
+						uint32_t srate, int stream_id)
+{
+	struct asm_aptx_dec_fmt_blk_v2 aptx_fmt;
+	int rc = 0;
+
+	if (!ac->session) {
+		pr_err("%s: ac session invalid\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	pr_debug("%s :session[%d] rate[%d] stream_id[%d]\n",
+		__func__, ac->session, srate, stream_id);
+
+	q6asm_stream_add_hdr(ac, &aptx_fmt.hdr, sizeof(aptx_fmt), TRUE,
+				stream_id);
+	atomic_set(&ac->cmd_state, -1);
+
+	aptx_fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+	aptx_fmt.fmtblk.fmt_blk_size = sizeof(aptx_fmt) - sizeof(aptx_fmt.hdr) -
+						sizeof(aptx_fmt.fmtblk);
+
+	aptx_fmt.sample_rate = srate;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &aptx_fmt);
+	if (rc < 0) {
+		pr_err("%s :Comamnd media format update failed %d\n",
+				__func__, rc);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+				(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s :timeout. waited for FORMAT_UPDATE\n", __func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	rc = 0;
+fail_cmd:
+	return rc;
+}
+
+static int __q6asm_ds1_set_endp_params(struct audio_client *ac, int param_id,
+				int param_value, int stream_id)
+{
+	struct asm_dec_ddp_endp_param_v2 ddp_cfg;
+	int rc = 0;
+
+	pr_debug("%s: session[%d] stream[%d],param_id[%d]param_value[%d]",
+		 __func__, ac->session, stream_id, param_id, param_value);
+
+	q6asm_stream_add_hdr(ac, &ddp_cfg.hdr, sizeof(ddp_cfg), TRUE,
+			     stream_id);
+	atomic_set(&ac->cmd_state, -1);
+	/*
+	 * Updated the token field with stream/session for compressed playback
+	 * Platform driver must know the stream with which the command is
+	 * associated
+	 */
+	if (ac->io_mode & COMPRESSED_STREAM_IO)
+		q6asm_update_token(&ddp_cfg.hdr.token,
+				   ac->session,
+				   stream_id,
+				   0, /* Buffer index is NA */
+				   0, /* Direction flag is NA */
+				   WAIT_CMD);
+	ddp_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	ddp_cfg.encdec.param_id = param_id;
+	ddp_cfg.encdec.param_size = sizeof(struct asm_dec_ddp_endp_param_v2) -
+				(sizeof(struct apr_hdr) +
+				sizeof(struct asm_stream_cmd_set_encdec_param));
+	ddp_cfg.endp_param_value = param_value;
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &ddp_cfg);
+	if (rc < 0) {
+		pr_err("%s: Command opcode[0x%x] failed %d\n",
+			__func__, ASM_STREAM_CMD_SET_ENCDEC_PARAM, rc);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+		(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout opcode[0x%x]\n", __func__,
+			ddp_cfg.hdr.opcode);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+int q6asm_ds1_set_endp_params(struct audio_client *ac,
+			      int param_id, int param_value)
+{
+	return __q6asm_ds1_set_endp_params(ac, param_id, param_value,
+					   ac->stream_id);
+}
+
+int q6asm_ds1_set_stream_endp_params(struct audio_client *ac,
+				     int param_id, int param_value,
+				     int stream_id)
+{
+	return __q6asm_ds1_set_endp_params(ac, param_id, param_value,
+					   stream_id);
+}
+
+int q6asm_memory_map(struct audio_client *ac, phys_addr_t buf_add, int dir,
+				uint32_t bufsz, uint32_t bufcnt)
+{
+	struct avs_cmd_shared_mem_map_regions *mmap_regions = NULL;
+	struct avs_shared_map_region_payload  *mregions = NULL;
+	struct audio_port_data *port = NULL;
+	void	*mmap_region_cmd = NULL;
+	void	*payload = NULL;
+	struct asm_buffer_node *buffer_node = NULL;
+	int	rc = 0;
+	int	cmd_size = 0;
+
+	if (!ac) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (ac->mmap_apr == NULL) {
+		pr_err("%s: mmap APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	pr_debug("%s: Session[%d]\n", __func__, ac->session);
+
+	buffer_node = kmalloc(sizeof(struct asm_buffer_node), GFP_KERNEL);
+	if (!buffer_node) {
+		pr_err("%s: no memory\n", __func__);
+		return -ENOMEM;
+	}
+	cmd_size = sizeof(struct avs_cmd_shared_mem_map_regions)
+			+ sizeof(struct avs_shared_map_region_payload) * bufcnt;
+
+	mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
+	if (mmap_region_cmd == NULL) {
+		pr_err("%s: Mem alloc failed\n", __func__);
+		rc = -EINVAL;
+		kfree(buffer_node);
+		return rc;
+	}
+	mmap_regions = (struct avs_cmd_shared_mem_map_regions *)
+							mmap_region_cmd;
+	q6asm_add_mmaphdr(ac, &mmap_regions->hdr, cmd_size, dir);
+	atomic_set(&ac->mem_state, -1);
+	mmap_regions->hdr.opcode = ASM_CMD_SHARED_MEM_MAP_REGIONS;
+	mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
+	mmap_regions->num_regions = bufcnt & 0x00ff;
+	mmap_regions->property_flag = 0x00;
+	payload = ((u8 *) mmap_region_cmd +
+		sizeof(struct avs_cmd_shared_mem_map_regions));
+	mregions = (struct avs_shared_map_region_payload *)payload;
+
+	ac->port[dir].tmp_hdl = 0;
+	port = &ac->port[dir];
+	pr_debug("%s: buf_add 0x%pK, bufsz: %d\n", __func__,
+		&buf_add, bufsz);
+	mregions->shm_addr_lsw = lower_32_bits(buf_add);
+	mregions->shm_addr_msw = msm_audio_populate_upper_32_bits(buf_add);
+	mregions->mem_size_bytes = bufsz;
+	++mregions;
+
+	rc = apr_send_pkt(ac->mmap_apr, (uint32_t *) mmap_region_cmd);
+	if (rc < 0) {
+		pr_err("%s: mmap op[0x%x]rc[%d]\n", __func__,
+					mmap_regions->hdr.opcode, rc);
+		rc = -EINVAL;
+		kfree(buffer_node);
+		goto fail_cmd;
+	}
+
+	rc = wait_event_timeout(ac->mem_wait,
+			(atomic_read(&ac->mem_state) >= 0 &&
+			 ac->port[dir].tmp_hdl), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for memory_map\n", __func__);
+		rc = -ETIMEDOUT;
+		kfree(buffer_node);
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->mem_state) > 0) {
+		pr_err("%s: DSP returned error[%s] for memory_map\n",
+			__func__, adsp_err_get_err_str(
+			atomic_read(&ac->mem_state)));
+		rc = adsp_err_get_lnx_err_code(
+			atomic_read(&ac->mem_state));
+		kfree(buffer_node);
+		goto fail_cmd;
+	}
+	buffer_node->buf_phys_addr = buf_add;
+	buffer_node->mmap_hdl = ac->port[dir].tmp_hdl;
+	list_add_tail(&buffer_node->list, &ac->port[dir].mem_map_handle);
+	ac->port[dir].tmp_hdl = 0;
+	rc = 0;
+
+fail_cmd:
+	kfree(mmap_region_cmd);
+	return rc;
+}
+
+int q6asm_memory_unmap(struct audio_client *ac, phys_addr_t buf_add, int dir)
+{
+	struct avs_cmd_shared_mem_unmap_regions mem_unmap;
+	struct asm_buffer_node *buf_node = NULL;
+	struct list_head *ptr, *next;
+
+	int rc = 0;
+
+	if (!ac) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (this_mmap.apr == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	pr_debug("%s: Session[%d]\n", __func__, ac->session);
+
+	q6asm_add_mmaphdr(ac, &mem_unmap.hdr,
+			sizeof(struct avs_cmd_shared_mem_unmap_regions),
+			dir);
+	atomic_set(&ac->mem_state, -1);
+	mem_unmap.hdr.opcode = ASM_CMD_SHARED_MEM_UNMAP_REGIONS;
+	mem_unmap.mem_map_handle = 0;
+	list_for_each_safe(ptr, next, &ac->port[dir].mem_map_handle) {
+		buf_node = list_entry(ptr, struct asm_buffer_node,
+						list);
+		if (buf_node->buf_phys_addr == buf_add) {
+			pr_debug("%s: Found the element\n", __func__);
+			mem_unmap.mem_map_handle = buf_node->mmap_hdl;
+			break;
+		}
+	}
+	pr_debug("%s: mem_unmap-mem_map_handle: 0x%x\n",
+		__func__, mem_unmap.mem_map_handle);
+
+	if (mem_unmap.mem_map_handle == 0) {
+		pr_err("%s: Do not send null mem handle to DSP\n", __func__);
+		rc = 0;
+		goto fail_cmd;
+	}
+	rc = apr_send_pkt(ac->mmap_apr, (uint32_t *) &mem_unmap);
+	if (rc < 0) {
+		pr_err("%s: mem_unmap op[0x%x]rc[%d]\n", __func__,
+					mem_unmap.hdr.opcode, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	rc = wait_event_timeout(ac->mem_wait,
+			(atomic_read(&ac->mem_state) >= 0), 5 * HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for memory_unmap of handle 0x%x\n",
+			__func__, mem_unmap.mem_map_handle);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	} else if (atomic_read(&ac->mem_state) > 0) {
+		pr_err("%s DSP returned error [%s] map handle 0x%x\n",
+			__func__, adsp_err_get_err_str(
+			atomic_read(&ac->mem_state)),
+			mem_unmap.mem_map_handle);
+		rc = adsp_err_get_lnx_err_code(
+			atomic_read(&ac->mem_state));
+		goto fail_cmd;
+	} else if (atomic_read(&ac->unmap_cb_success) == 0) {
+		pr_err("%s: Error in mem unmap callback of handle 0x%x\n",
+			__func__, mem_unmap.mem_map_handle);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	rc = 0;
+fail_cmd:
+	list_for_each_safe(ptr, next, &ac->port[dir].mem_map_handle) {
+		buf_node = list_entry(ptr, struct asm_buffer_node,
+						list);
+		if (buf_node->buf_phys_addr == buf_add) {
+			list_del(&buf_node->list);
+			kfree(buf_node);
+			break;
+		}
+	}
+	return rc;
+}
+
+
+static int q6asm_memory_map_regions(struct audio_client *ac, int dir,
+				uint32_t bufsz, uint32_t bufcnt,
+				bool is_contiguous)
+{
+	struct avs_cmd_shared_mem_map_regions *mmap_regions = NULL;
+	struct avs_shared_map_region_payload  *mregions = NULL;
+	struct audio_port_data *port = NULL;
+	struct audio_buffer *ab = NULL;
+	void	*mmap_region_cmd = NULL;
+	void	*payload = NULL;
+	struct asm_buffer_node *buffer_node = NULL;
+	int	rc = 0;
+	int    i = 0;
+	uint32_t cmd_size = 0;
+	uint32_t bufcnt_t;
+	uint32_t bufsz_t;
+
+	if (!ac) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (ac->mmap_apr == NULL) {
+		pr_err("%s: mmap APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	pr_debug("%s: Session[%d]\n", __func__, ac->session);
+
+	bufcnt_t = (is_contiguous) ? 1 : bufcnt;
+	bufsz_t = (is_contiguous) ? (bufsz * bufcnt) : bufsz;
+
+	if (is_contiguous) {
+		/* The size to memory map should be multiple of 4K bytes */
+		bufsz_t = PAGE_ALIGN(bufsz_t);
+	}
+
+	if (bufcnt_t > (UINT_MAX
+			- sizeof(struct avs_cmd_shared_mem_map_regions))
+			/ sizeof(struct avs_shared_map_region_payload)) {
+		pr_err("%s: Unsigned Integer Overflow. bufcnt_t = %u\n",
+				__func__, bufcnt_t);
+		return -EINVAL;
+	}
+
+	cmd_size = sizeof(struct avs_cmd_shared_mem_map_regions)
+			+ (sizeof(struct avs_shared_map_region_payload)
+							* bufcnt_t);
+
+
+	if (bufcnt > (UINT_MAX / sizeof(struct asm_buffer_node))) {
+		pr_err("%s: Unsigned Integer Overflow. bufcnt = %u\n",
+				__func__, bufcnt);
+		return -EINVAL;
+	}
+
+	buffer_node = kzalloc(sizeof(struct asm_buffer_node) * bufcnt,
+				GFP_KERNEL);
+	if (!buffer_node) {
+		pr_err("%s: Mem alloc failed for asm_buffer_node\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
+	if (mmap_region_cmd == NULL) {
+		pr_err("%s: Mem alloc failed\n", __func__);
+		rc = -EINVAL;
+		kfree(buffer_node);
+		return rc;
+	}
+	mmap_regions = (struct avs_cmd_shared_mem_map_regions *)
+							mmap_region_cmd;
+	q6asm_add_mmaphdr(ac, &mmap_regions->hdr, cmd_size, dir);
+	atomic_set(&ac->mem_state, -1);
+	pr_debug("%s: mmap_region=0x%pK token=0x%x\n", __func__,
+		mmap_regions, ((ac->session << 8) | dir));
+
+	mmap_regions->hdr.opcode = ASM_CMD_SHARED_MEM_MAP_REGIONS;
+	mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
+	mmap_regions->num_regions = bufcnt_t; /*bufcnt & 0x00ff; */
+	mmap_regions->property_flag = 0x00;
+	pr_debug("%s: map_regions->nregions = %d\n", __func__,
+		mmap_regions->num_regions);
+	payload = ((u8 *) mmap_region_cmd +
+		sizeof(struct avs_cmd_shared_mem_map_regions));
+	mregions = (struct avs_shared_map_region_payload *)payload;
+
+	ac->port[dir].tmp_hdl = 0;
+	port = &ac->port[dir];
+	for (i = 0; i < bufcnt_t; i++) {
+		ab = &port->buf[i];
+		mregions->shm_addr_lsw = lower_32_bits(ab->phys);
+		mregions->shm_addr_msw =
+				msm_audio_populate_upper_32_bits(ab->phys);
+		mregions->mem_size_bytes = bufsz_t;
+		++mregions;
+	}
+
+	rc = apr_send_pkt(ac->mmap_apr, (uint32_t *) mmap_region_cmd);
+	if (rc < 0) {
+		pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
+					mmap_regions->hdr.opcode, rc);
+		rc = -EINVAL;
+		kfree(buffer_node);
+		goto fail_cmd;
+	}
+
+	rc = wait_event_timeout(ac->mem_wait,
+			(atomic_read(&ac->mem_state) >= 0)
+			 , 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for memory_map\n", __func__);
+		rc = -ETIMEDOUT;
+		kfree(buffer_node);
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->mem_state) > 0) {
+		pr_err("%s DSP returned error for memory_map [%s]\n",
+			__func__, adsp_err_get_err_str(
+			atomic_read(&ac->mem_state)));
+		rc = adsp_err_get_lnx_err_code(
+			atomic_read(&ac->mem_state));
+		kfree(buffer_node);
+		goto fail_cmd;
+	}
+	mutex_lock(&ac->cmd_lock);
+
+	for (i = 0; i < bufcnt; i++) {
+		ab = &port->buf[i];
+		buffer_node[i].buf_phys_addr = ab->phys;
+		buffer_node[i].mmap_hdl = ac->port[dir].tmp_hdl;
+		list_add_tail(&buffer_node[i].list,
+			&ac->port[dir].mem_map_handle);
+		pr_debug("%s: i=%d, bufadd[i] = 0x%pK, maphdl[i] = 0x%x\n",
+			__func__, i, &buffer_node[i].buf_phys_addr,
+			buffer_node[i].mmap_hdl);
+	}
+	ac->port[dir].tmp_hdl = 0;
+	mutex_unlock(&ac->cmd_lock);
+	rc = 0;
+fail_cmd:
+	kfree(mmap_region_cmd);
+	return rc;
+}
+
+static int q6asm_memory_unmap_regions(struct audio_client *ac, int dir)
+{
+	struct avs_cmd_shared_mem_unmap_regions mem_unmap;
+	struct audio_port_data *port = NULL;
+	struct asm_buffer_node *buf_node = NULL;
+	struct list_head *ptr, *next;
+	phys_addr_t buf_add;
+	int	rc = 0;
+	int	cmd_size = 0;
+
+	if (!ac) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (ac->mmap_apr == NULL) {
+		pr_err("%s: mmap APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	pr_debug("%s: Session[%d]\n", __func__, ac->session);
+
+	cmd_size = sizeof(struct avs_cmd_shared_mem_unmap_regions);
+	q6asm_add_mmaphdr(ac, &mem_unmap.hdr, cmd_size, dir);
+	atomic_set(&ac->mem_state, -1);
+	port = &ac->port[dir];
+	buf_add = port->buf->phys;
+	mem_unmap.hdr.opcode = ASM_CMD_SHARED_MEM_UNMAP_REGIONS;
+	mem_unmap.mem_map_handle = 0;
+	list_for_each_safe(ptr, next, &ac->port[dir].mem_map_handle) {
+		buf_node = list_entry(ptr, struct asm_buffer_node,
+						list);
+		if (buf_node->buf_phys_addr == buf_add) {
+			pr_debug("%s: Found the element\n", __func__);
+			mem_unmap.mem_map_handle = buf_node->mmap_hdl;
+			break;
+		}
+	}
+
+	pr_debug("%s: mem_unmap-mem_map_handle: 0x%x\n",
+			__func__, mem_unmap.mem_map_handle);
+
+	if (mem_unmap.mem_map_handle == 0) {
+		pr_err("%s: Do not send null mem handle to DSP\n", __func__);
+		rc = 0;
+		goto fail_cmd;
+	}
+	rc = apr_send_pkt(ac->mmap_apr, (uint32_t *) &mem_unmap);
+	if (rc < 0) {
+		pr_err("mmap_regions op[0x%x]rc[%d]\n",
+				mem_unmap.hdr.opcode, rc);
+		goto fail_cmd;
+	}
+
+	rc = wait_event_timeout(ac->mem_wait,
+			(atomic_read(&ac->mem_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for memory_unmap of handle 0x%x\n",
+			__func__, mem_unmap.mem_map_handle);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	} else if (atomic_read(&ac->mem_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->mem_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->mem_state));
+		goto fail_cmd;
+	} else if (atomic_read(&ac->unmap_cb_success) == 0) {
+		pr_err("%s: Error in mem unmap callback of handle 0x%x\n",
+			__func__, mem_unmap.mem_map_handle);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = 0;
+
+fail_cmd:
+	list_for_each_safe(ptr, next, &ac->port[dir].mem_map_handle) {
+		buf_node = list_entry(ptr, struct asm_buffer_node,
+						list);
+		if (buf_node->buf_phys_addr == buf_add) {
+			list_del(&buf_node->list);
+			kfree(buf_node);
+			break;
+		}
+	}
+	return rc;
+}
+
+int q6asm_set_lrgain(struct audio_client *ac, int left_gain, int right_gain)
+{
+	struct asm_volume_ctrl_multichannel_gain multi_ch_gain;
+	int sz = 0;
+	int rc  = 0;
+
+	if (ac == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	memset(&multi_ch_gain, 0, sizeof(multi_ch_gain));
+	sz = sizeof(struct asm_volume_ctrl_multichannel_gain);
+	q6asm_add_hdr_async(ac, &multi_ch_gain.hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state_pp, -1);
+	multi_ch_gain.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
+	multi_ch_gain.param.data_payload_addr_lsw = 0;
+	multi_ch_gain.param.data_payload_addr_msw = 0;
+	multi_ch_gain.param.mem_map_handle = 0;
+	multi_ch_gain.param.data_payload_size = sizeof(multi_ch_gain) -
+		sizeof(multi_ch_gain.hdr) - sizeof(multi_ch_gain.param);
+	multi_ch_gain.data.module_id = ASM_MODULE_ID_VOL_CTRL;
+	multi_ch_gain.data.param_id = ASM_PARAM_ID_MULTICHANNEL_GAIN;
+	multi_ch_gain.data.param_size = multi_ch_gain.param.data_payload_size -
+		sizeof(multi_ch_gain.data);
+	multi_ch_gain.data.reserved = 0;
+	multi_ch_gain.gain_data[0].channeltype = PCM_CHANNEL_FL;
+	multi_ch_gain.gain_data[0].gain = left_gain << 15;
+	multi_ch_gain.gain_data[1].channeltype = PCM_CHANNEL_FR;
+	multi_ch_gain.gain_data[1].gain = right_gain << 15;
+	multi_ch_gain.num_channels = 2;
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &multi_ch_gain);
+	if (rc < 0) {
+		pr_err("%s: set-params send failed paramid[0x%x] rc %d\n",
+				__func__, multi_ch_gain.data.param_id, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
+				multi_ch_gain.data.param_id);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state_pp) > 0) {
+		pr_err("%s: DSP returned error[%s] , set-params paramid[0x%x]\n",
+					__func__, adsp_err_get_err_str(
+					atomic_read(&ac->cmd_state_pp)),
+					multi_ch_gain.data.param_id);
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state_pp));
+		goto fail_cmd;
+	}
+	rc = 0;
+fail_cmd:
+	return rc;
+}
+
+/*
+ * q6asm_set_multich_gain: set multiple channel gains on an ASM session
+ * @ac: audio client handle
+ * @channels: number of channels caller intends to set gains
+ * @gains: list of gains of audio channels
+ * @ch_map: list of channel mapping. Only valid if use_default is false
+ * @use_default: flag to indicate whether to use default mapping
+ */
+int q6asm_set_multich_gain(struct audio_client *ac, uint32_t channels,
+			   uint32_t *gains, uint8_t *ch_map, bool use_default)
+{
+	struct asm_volume_ctrl_multichannel_gain multich_gain;
+	int sz = 0;
+	int rc  = 0;
+	int i;
+	u8 default_chmap[VOLUME_CONTROL_MAX_CHANNELS];
+
+	if (ac == NULL) {
+		pr_err("%s: ac is NULL\n", __func__);
+		rc = -EINVAL;
+		goto done;
+	}
+	if (ac->apr == NULL) {
+		dev_err(ac->dev, "%s: AC APR handle NULL\n", __func__);
+		rc = -EINVAL;
+		goto done;
+	}
+	if (gains == NULL) {
+		dev_err(ac->dev, "%s: gain_list is NULL\n", __func__);
+		rc = -EINVAL;
+		goto done;
+	}
+	if (channels > VOLUME_CONTROL_MAX_CHANNELS) {
+		dev_err(ac->dev, "%s: Invalid channel count %d\n",
+			__func__, channels);
+		rc = -EINVAL;
+		goto done;
+	}
+	if (!use_default && ch_map == NULL) {
+		dev_err(ac->dev, "%s: NULL channel map\n", __func__);
+		rc = -EINVAL;
+		goto done;
+	}
+
+	memset(&multich_gain, 0, sizeof(multich_gain));
+	sz = sizeof(struct asm_volume_ctrl_multichannel_gain);
+	q6asm_add_hdr_async(ac, &multich_gain.hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state_pp, -1);
+	multich_gain.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
+	multich_gain.param.data_payload_addr_lsw = 0;
+	multich_gain.param.data_payload_addr_msw = 0;
+	multich_gain.param.mem_map_handle = 0;
+	multich_gain.param.data_payload_size = sizeof(multich_gain) -
+		sizeof(multich_gain.hdr) - sizeof(multich_gain.param);
+	multich_gain.data.module_id = ASM_MODULE_ID_VOL_CTRL;
+	multich_gain.data.param_id = ASM_PARAM_ID_MULTICHANNEL_GAIN;
+	multich_gain.data.param_size = multich_gain.param.data_payload_size -
+		sizeof(multich_gain.data);
+	multich_gain.data.reserved = 0;
+
+	if (use_default) {
+		rc = q6asm_map_channels(default_chmap, channels, false);
+		if (rc < 0)
+			goto done;
+		for (i = 0; i < channels; i++) {
+			multich_gain.gain_data[i].channeltype =
+				default_chmap[i];
+			multich_gain.gain_data[i].gain = gains[i] << 15;
+		}
+	} else {
+		for (i = 0; i < channels; i++) {
+			multich_gain.gain_data[i].channeltype = ch_map[i];
+			multich_gain.gain_data[i].gain = gains[i] << 15;
+		}
+	}
+	multich_gain.num_channels = channels;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &multich_gain);
+	if (rc < 0) {
+		pr_err("%s: set-params send failed paramid[0x%x] rc %d\n",
+				__func__, multich_gain.data.param_id, rc);
+		goto done;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
+				multich_gain.data.param_id);
+		rc = -EINVAL;
+		goto done;
+	}
+	if (atomic_read(&ac->cmd_state_pp) > 0) {
+		pr_err("%s: DSP returned error[%d] , set-params paramid[0x%x]\n",
+		       __func__, atomic_read(&ac->cmd_state_pp),
+		       multich_gain.data.param_id);
+		rc = -EINVAL;
+		goto done;
+	}
+	rc = 0;
+done:
+	return rc;
+}
+
+int q6asm_set_mute(struct audio_client *ac, int muteflag)
+{
+	struct asm_volume_ctrl_mute_config mute;
+	int sz = 0;
+	int rc  = 0;
+
+	if (ac == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	sz = sizeof(struct asm_volume_ctrl_mute_config);
+	q6asm_add_hdr_async(ac, &mute.hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state_pp, -1);
+	mute.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
+	mute.param.data_payload_addr_lsw = 0;
+	mute.param.data_payload_addr_msw = 0;
+	mute.param.mem_map_handle = 0;
+	mute.param.data_payload_size = sizeof(mute) -
+		sizeof(mute.hdr) - sizeof(mute.param);
+	mute.data.module_id = ASM_MODULE_ID_VOL_CTRL;
+	mute.data.param_id = ASM_PARAM_ID_VOL_CTRL_MUTE_CONFIG;
+	mute.data.param_size = mute.param.data_payload_size - sizeof(mute.data);
+	mute.data.reserved = 0;
+	mute.mute_flag = muteflag;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &mute);
+	if (rc < 0) {
+		pr_err("%s: set-params send failed paramid[0x%x] rc %d\n",
+				__func__, mute.data.param_id, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
+				mute.data.param_id);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state_pp) > 0) {
+		pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state_pp)),
+				mute.data.param_id);
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state_pp));
+		goto fail_cmd;
+	}
+	rc = 0;
+fail_cmd:
+	return rc;
+}
+
+static int __q6asm_set_volume(struct audio_client *ac, int volume, int instance)
+{
+	struct asm_volume_ctrl_master_gain vol;
+	int sz = 0;
+	int rc  = 0;
+	int module_id;
+
+	if (ac == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	switch (instance) {
+	case SOFT_VOLUME_INSTANCE_2:
+		module_id = ASM_MODULE_ID_VOL_CTRL2;
+		break;
+	case SOFT_VOLUME_INSTANCE_1:
+	default:
+		module_id = ASM_MODULE_ID_VOL_CTRL;
+		break;
+	}
+
+	sz = sizeof(struct asm_volume_ctrl_master_gain);
+	q6asm_add_hdr_async(ac, &vol.hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state_pp, -1);
+	vol.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
+	vol.param.data_payload_addr_lsw = 0;
+	vol.param.data_payload_addr_msw = 0;
+	vol.param.mem_map_handle = 0;
+	vol.param.data_payload_size = sizeof(vol) -
+		sizeof(vol.hdr) - sizeof(vol.param);
+	vol.data.module_id = module_id;
+	vol.data.param_id = ASM_PARAM_ID_VOL_CTRL_MASTER_GAIN;
+	vol.data.param_size = vol.param.data_payload_size - sizeof(vol.data);
+	vol.data.reserved = 0;
+	vol.master_gain = volume;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &vol);
+	if (rc < 0) {
+		pr_err("%s: set-params send failed paramid[0x%x] rc %d\n",
+				__func__, vol.data.param_id, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
+				vol.data.param_id);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state_pp) > 0) {
+		pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state_pp)),
+				vol.data.param_id);
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state_pp));
+		goto fail_cmd;
+	}
+
+	rc = 0;
+fail_cmd:
+	return rc;
+}
+
+int q6asm_set_volume(struct audio_client *ac, int volume)
+{
+	return __q6asm_set_volume(ac, volume, SOFT_VOLUME_INSTANCE_1);
+}
+
+int q6asm_set_volume_v2(struct audio_client *ac, int volume, int instance)
+{
+	return __q6asm_set_volume(ac, volume, instance);
+}
+
+int q6asm_set_aptx_dec_bt_addr(struct audio_client *ac,
+				struct aptx_dec_bt_addr_cfg *cfg)
+{
+	struct aptx_dec_bt_dev_addr paylod;
+	int sz = 0;
+	int rc = 0;
+
+	pr_debug("%s: BT addr nap %d, uap %d, lap %d\n", __func__, cfg->nap,
+			cfg->uap, cfg->lap);
+
+	if (ac == NULL) {
+		pr_err("%s: AC handle NULL\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	sz = sizeof(struct aptx_dec_bt_dev_addr);
+	q6asm_add_hdr_async(ac, &paylod.hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state, -1);
+	paylod.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	paylod.encdec.param_id = APTX_DECODER_BT_ADDRESS;
+	paylod.encdec.param_size = sz - sizeof(paylod.hdr)
+					- sizeof(paylod.encdec);
+	paylod.bt_addr_cfg.lap = cfg->lap;
+	paylod.bt_addr_cfg.uap = cfg->uap;
+	paylod.bt_addr_cfg.nap = cfg->nap;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &paylod);
+	if (rc < 0) {
+		pr_err("%s: set-params send failed paramid[0x%x] rc %d\n",
+				__func__, paylod.encdec.param_id, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
+			paylod.encdec.param_id);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)),
+				paylod.encdec.param_id);
+		rc = adsp_err_get_lnx_err_code(
+			atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	pr_debug("%s: set BT addr is success\n", __func__);
+	rc = 0;
+fail_cmd:
+	return rc;
+}
+
+int q6asm_audio_map_shm_fd(struct audio_client *ac, struct ion_client **client,
+			struct ion_handle **handle, int fd)
+{
+	ion_phys_addr_t paddr;
+	size_t pa_len = 0;
+	int ret;
+	int sz = 0;
+	struct avs_rtic_shared_mem_addr shm;
+
+	if (ac == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	ret = msm_audio_ion_phys_assign("audio_shm_mem_client", client,
+					handle, fd,
+					&paddr, &pa_len, HLOS_TO_ADSP);
+	if (ret) {
+		pr_err("%s: shm ION phys failed, rc = %d\n", __func__, ret);
+		goto fail_cmd;
+	}
+	/* get payload length */
+	sz = sizeof(struct avs_rtic_shared_mem_addr);
+	q6asm_add_hdr_async(ac, &shm.hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state, -1);
+	shm.shm_buf_addr_lsw = lower_32_bits(paddr);
+	shm.shm_buf_addr_msw = msm_audio_populate_upper_32_bits(paddr);
+	shm.buf_size = pa_len;
+	shm.shm_buf_num_regions = 1;
+	shm.shm_buf_mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
+	shm.shm_buf_flag = 0x00;
+	shm.encdec.param_id = AVS_PARAM_ID_RTIC_SHARED_MEMORY_ADDR;
+	shm.encdec.param_size = sizeof(struct avs_rtic_shared_mem_addr) -
+						sizeof(struct apr_hdr) -
+			sizeof(struct asm_stream_cmd_set_encdec_param_v2);
+	shm.encdec.service_id = OUT;
+	shm.encdec.reserved = 0;
+	shm.map_region.shm_addr_lsw = shm.shm_buf_addr_lsw;
+	shm.map_region.shm_addr_msw = shm.shm_buf_addr_msw;
+	shm.map_region.mem_size_bytes = pa_len;
+	shm.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM_V2;
+	ret = apr_send_pkt(ac->apr, (uint32_t *) &shm);
+	if (ret < 0) {
+		pr_err("%s: set-params send failed paramid[0x%x] rc %d\n",
+		       __func__, shm.encdec.param_id, ret);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	ret = wait_event_timeout(ac->cmd_wait,
+				(atomic_read(&ac->cmd_state) >= 0), 1*HZ);
+	if (!ret) {
+		pr_err("%s: timeout, shm.encdec paramid[0x%x]\n", __func__,
+		       shm.encdec.param_id);
+		ret = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s] shm.encdec paramid[0x%x]\n",
+		       __func__,
+		       adsp_err_get_err_str(atomic_read(&ac->cmd_state)),
+		       shm.encdec.param_id);
+		ret = adsp_err_get_lnx_err_code(atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	ret = 0;
+fail_cmd:
+	return ret;
+}
+
+int q6asm_send_rtic_event_ack(struct audio_client *ac,
+			      void *param, uint32_t params_length)
+{
+	char *asm_params = NULL;
+	int sz, rc;
+	struct avs_param_rtic_event_ack ack;
+
+	if (!param || !ac) {
+		pr_err("%s: %s is NULL\n", __func__,
+			(!param) ? "param" : "ac");
+		rc = -EINVAL;
+		goto done;
+	}
+
+	sz = sizeof(struct avs_param_rtic_event_ack) + params_length;
+	asm_params = kzalloc(sz, GFP_KERNEL);
+	if (!asm_params) {
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	 q6asm_stream_add_hdr_async(ac, &ack.hdr,
+			    sizeof(struct avs_param_rtic_event_ack) +
+			    params_length, TRUE, ac->stream_id);
+	ack.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM_V2;
+	ack.encdec.param_id = AVS_PARAM_ID_RTIC_EVENT_ACK;
+	ack.encdec.param_size = params_length;
+	ack.encdec.reserved = 0;
+	ack.encdec.service_id = OUT;
+	memcpy(asm_params, &ack, sizeof(struct avs_param_rtic_event_ack));
+	memcpy(asm_params + sizeof(struct avs_param_rtic_event_ack),
+		param, params_length);
+	rc = apr_send_pkt(ac->apr, (uint32_t *) asm_params);
+	if (rc < 0)
+		pr_err("%s: apr pkt failed for rtic event ack\n", __func__);
+	else
+		rc = 0;
+
+	kfree(asm_params);
+done:
+	return rc;
+}
+
+int q6asm_set_softpause(struct audio_client *ac,
+			struct asm_softpause_params *pause_param)
+{
+	struct asm_soft_pause_params softpause;
+	int sz = 0;
+	int rc  = 0;
+
+	if (ac == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	sz = sizeof(struct asm_soft_pause_params);
+	q6asm_add_hdr_async(ac, &softpause.hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state_pp, -1);
+	softpause.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
+
+	softpause.param.data_payload_addr_lsw = 0;
+	softpause.param.data_payload_addr_msw = 0;
+	softpause.param.mem_map_handle = 0;
+	softpause.param.data_payload_size = sizeof(softpause) -
+		sizeof(softpause.hdr) - sizeof(softpause.param);
+	softpause.data.module_id = ASM_MODULE_ID_VOL_CTRL;
+	softpause.data.param_id = ASM_PARAM_ID_SOFT_PAUSE_PARAMETERS;
+	softpause.data.param_size = softpause.param.data_payload_size -
+		sizeof(softpause.data);
+	softpause.data.reserved = 0;
+	softpause.enable_flag = pause_param->enable;
+	softpause.period = pause_param->period;
+	softpause.step = pause_param->step;
+	softpause.ramping_curve = pause_param->rampingcurve;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &softpause);
+	if (rc < 0) {
+		pr_err("%s: set-params send failed paramid[0x%x] rc %d\n",
+				__func__, softpause.data.param_id, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
+						softpause.data.param_id);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state_pp) > 0) {
+		pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state_pp)),
+				softpause.data.param_id);
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state_pp));
+		goto fail_cmd;
+	}
+	rc = 0;
+fail_cmd:
+	return rc;
+}
+
+static int __q6asm_set_softvolume(struct audio_client *ac,
+				  struct asm_softvolume_params *softvol_param,
+				  int instance)
+{
+	struct asm_soft_step_volume_params softvol;
+	int sz = 0;
+	int rc  = 0;
+	int module_id;
+
+	if (ac == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	switch (instance) {
+	case SOFT_VOLUME_INSTANCE_2:
+		module_id = ASM_MODULE_ID_VOL_CTRL2;
+		break;
+	case SOFT_VOLUME_INSTANCE_1:
+	default:
+		module_id = ASM_MODULE_ID_VOL_CTRL;
+		break;
+	}
+
+	sz = sizeof(struct asm_soft_step_volume_params);
+	q6asm_add_hdr_async(ac, &softvol.hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state_pp, -1);
+	softvol.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
+	softvol.param.data_payload_addr_lsw = 0;
+	softvol.param.data_payload_addr_msw = 0;
+	softvol.param.mem_map_handle = 0;
+	softvol.param.data_payload_size = sizeof(softvol) -
+		sizeof(softvol.hdr) - sizeof(softvol.param);
+	softvol.data.module_id = module_id;
+	softvol.data.param_id = ASM_PARAM_ID_SOFT_VOL_STEPPING_PARAMETERS;
+	softvol.data.param_size = softvol.param.data_payload_size -
+		sizeof(softvol.data);
+	softvol.data.reserved = 0;
+	softvol.period = softvol_param->period;
+	softvol.step = softvol_param->step;
+	softvol.ramping_curve = softvol_param->rampingcurve;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &softvol);
+	if (rc < 0) {
+		pr_err("%s: set-params send failed paramid[0x%x] rc %d\n",
+				__func__, softvol.data.param_id, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
+						softvol.data.param_id);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state_pp) > 0) {
+		pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state_pp)),
+				softvol.data.param_id);
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state_pp));
+		goto fail_cmd;
+	}
+	rc = 0;
+fail_cmd:
+	return rc;
+}
+
+int q6asm_set_softvolume(struct audio_client *ac,
+			 struct asm_softvolume_params *softvol_param)
+{
+	return __q6asm_set_softvolume(ac, softvol_param,
+				      SOFT_VOLUME_INSTANCE_1);
+}
+
+int q6asm_set_softvolume_v2(struct audio_client *ac,
+			    struct asm_softvolume_params *softvol_param,
+			    int instance)
+{
+	return __q6asm_set_softvolume(ac, softvol_param, instance);
+}
+
+int q6asm_set_vol_ctrl_gain_pair(struct audio_client *ac,
+				 struct asm_stream_pan_ctrl_params *pan_param)
+{
+	int sz = 0;
+	int rc = 0;
+	int i = 0;
+	int32_t ch = 0;
+	struct apr_hdr hdr;
+	struct audproc_volume_ctrl_channel_type_gain_pair
+		gain_data[ASM_MAX_CHANNELS];
+	struct asm_stream_cmd_set_pp_params_v2 payload_params;
+	struct asm_stream_param_data_v2 data;
+	uint16_t *asm_params = NULL;
+
+	if (ac == NULL) {
+		pr_err("%s: ac is NULL\n", __func__);
+		rc = -EINVAL;
+		goto fail;
+	}
+	if (ac->apr == NULL) {
+		dev_err(ac->dev, "%s: ac apr handle NULL\n", __func__);
+		rc = -EINVAL;
+		goto fail;
+	}
+
+	sz = sizeof(struct apr_hdr) +
+	     sizeof(struct asm_stream_cmd_set_pp_params_v2) +
+	     sizeof(struct asm_stream_param_data_v2) +
+	     sizeof(uint32_t) +
+	     (sizeof(struct audproc_volume_ctrl_channel_type_gain_pair) *
+		     ASM_MAX_CHANNELS);
+	asm_params = kzalloc(sz, GFP_KERNEL);
+	if (!asm_params) {
+		rc = -ENOMEM;
+		goto fail;
+	}
+
+	q6asm_add_hdr_async(ac, &hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state_pp, -1);
+
+	hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
+	memcpy(((u8 *)asm_params), &hdr, sizeof(struct apr_hdr));
+
+	payload_params.data_payload_addr_lsw = 0;
+	payload_params.data_payload_addr_msw = 0;
+	payload_params.mem_map_handle = 0;
+	payload_params.data_payload_size =
+		sizeof(struct asm_stream_param_data_v2) +
+		sizeof(uint32_t) +
+		(sizeof(struct audproc_volume_ctrl_channel_type_gain_pair) *
+		ASM_MAX_CHANNELS);
+	memcpy(((u8 *)asm_params + sizeof(struct apr_hdr)),
+		&payload_params,
+		sizeof(struct asm_stream_cmd_set_pp_params_v2));
+
+	data.module_id = AUDPROC_MODULE_ID_VOL_CTRL;
+	data.param_id = AUDPROC_PARAM_ID_MULTICHANNEL_GAIN;
+	data.param_size = sizeof(uint32_t) +
+		(sizeof(struct audproc_volume_ctrl_channel_type_gain_pair) *
+		ASM_MAX_CHANNELS);
+	data.reserved = 0;
+	memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) +
+		sizeof(struct asm_stream_cmd_set_pp_params_v2)),
+		&data, sizeof(struct asm_stream_param_data_v2));
+
+	ch = pan_param->num_output_channels;
+	memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) +
+		sizeof(struct asm_stream_cmd_set_pp_params_v2) +
+		sizeof(struct asm_stream_param_data_v2)),
+		&ch,
+		sizeof(uint32_t));
+
+	memset(gain_data, 0,
+		ASM_MAX_CHANNELS *
+		sizeof(struct audproc_volume_ctrl_channel_type_gain_pair));
+	for (i = 0; i < pan_param->num_output_channels; i++) {
+		gain_data[i].channel_type =
+		   pan_param->output_channel_map[i];
+		gain_data[i].gain = pan_param->gain[i];
+	}
+	memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) +
+		sizeof(struct asm_stream_cmd_set_pp_params_v2) +
+		sizeof(struct asm_stream_param_data_v2) +
+		sizeof(uint32_t)),
+		gain_data,
+		ASM_MAX_CHANNELS *
+		sizeof(struct audproc_volume_ctrl_channel_type_gain_pair));
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) asm_params);
+	if (rc < 0) {
+		pr_err("%s: set-params send failed paramid[0x%x] rc %d\n",
+				__func__, data.param_id, rc);
+		goto done;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state_pp) >= 0), 5 * HZ);
+	if (!rc) {
+		pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
+				data.param_id);
+		rc = -EINVAL;
+		goto done;
+	}
+	if (atomic_read(&ac->cmd_state_pp) > 0) {
+		pr_err("%s: DSP returned error[%d], set-params paramid[0x%x]\n",
+		       __func__, atomic_read(&ac->cmd_state_pp),
+		       data.param_id);
+		rc = -EINVAL;
+		goto done;
+	}
+	rc = 0;
+done:
+	kfree(asm_params);
+fail:
+	return rc;
+}
+
+int q6asm_set_mfc_panning_params(struct audio_client *ac,
+				 struct asm_stream_pan_ctrl_params *pan_param)
+{
+	int sz, rc, i;
+	struct audproc_mfc_output_media_fmt mfc_cfg;
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_pp_params_v2 payload_params;
+	struct asm_stream_param_data_v2 data;
+	struct audproc_chmixer_param_coeff pan_cfg;
+	uint16_t variable_payload = 0;
+	char *asm_params = NULL;
+	uint16_t ii;
+	uint16_t *dst_gain_ptr = NULL;
+
+	sz = rc = i = 0;
+	if (ac == NULL) {
+		pr_err("%s: ac handle NULL\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd1;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: ac apr handle NULL\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd1;
+	}
+
+	sz = sizeof(struct audproc_mfc_output_media_fmt);
+	q6asm_add_hdr_async(ac, &mfc_cfg.params.hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state_pp, -1);
+	mfc_cfg.params.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
+	mfc_cfg.params.payload_addr_lsw = 0;
+	mfc_cfg.params.payload_addr_msw = 0;
+	mfc_cfg.params.mem_map_handle = 0;
+	mfc_cfg.params.payload_size = sizeof(mfc_cfg) - sizeof(mfc_cfg.params);
+	mfc_cfg.data.module_id = AUDPROC_MODULE_ID_MFC;
+	mfc_cfg.data.param_id = AUDPROC_PARAM_ID_MFC_OUTPUT_MEDIA_FORMAT;
+	mfc_cfg.data.param_size = mfc_cfg.params.payload_size -
+		sizeof(mfc_cfg.data);
+	mfc_cfg.data.reserved = 0;
+	mfc_cfg.sampling_rate = 0;
+	mfc_cfg.bits_per_sample = 0;
+	mfc_cfg.num_channels = pan_param->num_output_channels;
+	for (i = 0; i < mfc_cfg.num_channels; i++)
+		mfc_cfg.channel_type[i] = pan_param->output_channel_map[i];
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &mfc_cfg);
+	if (rc < 0) {
+		pr_err("%s: set-params send failed paramid[0x%x] rc %d\n",
+				__func__, mfc_cfg.data.param_id, rc);
+		rc = -EINVAL;
+		goto fail_cmd1;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
+						mfc_cfg.data.param_id);
+		rc = -ETIMEDOUT;
+		goto fail_cmd1;
+	}
+	if (atomic_read(&ac->cmd_state_pp) > 0) {
+		pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state_pp)),
+				mfc_cfg.data.param_id);
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state_pp));
+		goto fail_cmd1;
+	}
+
+	variable_payload = pan_param->num_output_channels * sizeof(uint16_t)+
+			pan_param->num_input_channels * sizeof(uint16_t) +
+			pan_param->num_output_channels *
+			pan_param->num_input_channels * sizeof(uint16_t);
+	i = (variable_payload % sizeof(uint32_t));
+	variable_payload += (i == 0) ? 0 : sizeof(uint32_t) - i;
+	sz = sizeof(struct apr_hdr) +
+		sizeof(struct asm_stream_cmd_set_pp_params_v2) +
+		sizeof(struct asm_stream_param_data_v2) +
+		sizeof(struct audproc_chmixer_param_coeff) +
+		variable_payload;
+
+	asm_params = kzalloc(sz, GFP_KERNEL);
+	if (!asm_params) {
+		rc = -ENOMEM;
+		goto fail_cmd1;
+	}
+
+	q6asm_add_hdr_async(ac, &hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state_pp, -1);
+	hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
+	memcpy(((u8 *)asm_params), &hdr, sizeof(struct apr_hdr));
+
+	payload_params.data_payload_addr_lsw = 0;
+	payload_params.data_payload_addr_msw = 0;
+	payload_params.mem_map_handle = 0;
+	payload_params.data_payload_size =
+		sizeof(struct audproc_chmixer_param_coeff) +
+		variable_payload + sizeof(struct asm_stream_param_data_v2);
+	memcpy(((u8 *)asm_params + sizeof(struct apr_hdr)),
+		&payload_params,
+		sizeof(struct asm_stream_cmd_set_pp_params_v2));
+
+	data.module_id = AUDPROC_MODULE_ID_MFC;
+	data.param_id = AUDPROC_CHMIXER_PARAM_ID_COEFF;
+	data.param_size = sizeof(struct audproc_chmixer_param_coeff) +
+				 variable_payload;
+	data.reserved = 0;
+	memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) +
+		sizeof(struct asm_stream_cmd_set_pp_params_v2)),
+		&data, sizeof(struct asm_stream_param_data_v2));
+
+	pan_cfg.index = 0;
+	pan_cfg.num_output_channels = pan_param->num_output_channels;
+	pan_cfg.num_input_channels = pan_param->num_input_channels;
+	memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) +
+		sizeof(struct asm_stream_cmd_set_pp_params_v2) +
+		sizeof(struct asm_stream_param_data_v2)),
+		&pan_cfg, sizeof(struct audproc_chmixer_param_coeff));
+	memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) +
+		sizeof(struct asm_stream_cmd_set_pp_params_v2) +
+		sizeof(struct asm_stream_param_data_v2) +
+		sizeof(struct audproc_chmixer_param_coeff)),
+		pan_param->output_channel_map,
+		pan_param->num_output_channels * sizeof(uint16_t));
+	memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) +
+		sizeof(struct asm_stream_cmd_set_pp_params_v2) +
+		sizeof(struct asm_stream_param_data_v2) +
+		sizeof(struct audproc_chmixer_param_coeff) +
+		pan_param->num_output_channels * sizeof(uint16_t)),
+		pan_param->input_channel_map,
+		pan_param->num_input_channels * sizeof(uint16_t));
+
+	dst_gain_ptr = (uint16_t *) ((u8 *)asm_params + sizeof(struct apr_hdr) +
+		sizeof(struct asm_stream_cmd_set_pp_params_v2) +
+		sizeof(struct asm_stream_param_data_v2) +
+		sizeof(struct audproc_chmixer_param_coeff) +
+		(pan_param->num_output_channels * sizeof(uint16_t)) +
+		(pan_param->num_input_channels * sizeof(uint16_t)));
+	for (ii = 0; ii < pan_param->num_output_channels *
+			pan_param->num_input_channels; ii++)
+		dst_gain_ptr[ii] = (uint16_t) pan_param->gain[ii];
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) asm_params);
+	if (rc < 0) {
+		pr_err("%s: set-params send failed paramid[0x%x] rc %d\n",
+				__func__, data.param_id, rc);
+		rc = -EINVAL;
+		goto fail_cmd2;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
+						data.param_id);
+		rc = -ETIMEDOUT;
+		goto fail_cmd2;
+	}
+	if (atomic_read(&ac->cmd_state_pp) > 0) {
+		pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state_pp)),
+				data.param_id);
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state_pp));
+		goto fail_cmd2;
+	}
+	rc = 0;
+fail_cmd2:
+	kfree(asm_params);
+fail_cmd1:
+	return rc;
+}
+
+int q6asm_equalizer(struct audio_client *ac, void *eq_p)
+{
+	struct asm_eq_params eq;
+	struct msm_audio_eq_stream_config *eq_params = NULL;
+	int i  = 0;
+	int sz = 0;
+	int rc  = 0;
+
+	if (ac == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	if (eq_p == NULL) {
+		pr_err("%s: [%d]: Invalid Eq param\n", __func__, ac->session);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	sz = sizeof(struct asm_eq_params);
+	eq_params = (struct msm_audio_eq_stream_config *) eq_p;
+	q6asm_add_hdr(ac, &eq.hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state_pp, -1);
+
+	eq.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
+	eq.param.data_payload_addr_lsw = 0;
+	eq.param.data_payload_addr_msw = 0;
+	eq.param.mem_map_handle = 0;
+	eq.param.data_payload_size = sizeof(eq) -
+		sizeof(eq.hdr) - sizeof(eq.param);
+	eq.data.module_id = ASM_MODULE_ID_EQUALIZER;
+	eq.data.param_id = ASM_PARAM_ID_EQUALIZER_PARAMETERS;
+	eq.data.param_size = eq.param.data_payload_size - sizeof(eq.data);
+	eq.enable_flag = eq_params->enable;
+	eq.num_bands = eq_params->num_bands;
+
+	pr_debug("%s: enable:%d numbands:%d\n", __func__, eq_params->enable,
+			eq_params->num_bands);
+	for (i = 0; i < eq_params->num_bands; i++) {
+		eq.eq_bands[i].band_idx =
+			eq_params->eq_bands[i].band_idx;
+		eq.eq_bands[i].filterype =
+			eq_params->eq_bands[i].filter_type;
+		eq.eq_bands[i].center_freq_hz =
+			eq_params->eq_bands[i].center_freq_hz;
+		eq.eq_bands[i].filter_gain =
+			eq_params->eq_bands[i].filter_gain;
+		eq.eq_bands[i].q_factor =
+			eq_params->eq_bands[i].q_factor;
+		pr_debug("%s: filter_type:%u bandnum:%d\n", __func__,
+				eq_params->eq_bands[i].filter_type, i);
+		pr_debug("%s: center_freq_hz:%u bandnum:%d\n", __func__,
+				eq_params->eq_bands[i].center_freq_hz, i);
+		pr_debug("%s: filter_gain:%d bandnum:%d\n", __func__,
+				eq_params->eq_bands[i].filter_gain, i);
+		pr_debug("%s: q_factor:%d bandnum:%d\n", __func__,
+				eq_params->eq_bands[i].q_factor, i);
+	}
+	rc = apr_send_pkt(ac->apr, (uint32_t *)&eq);
+	if (rc < 0) {
+		pr_err("%s: set-params send failed paramid[0x%x] rc %d\n",
+				__func__, eq.data.param_id, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
+						eq.data.param_id);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state_pp) > 0) {
+		pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state_pp)),
+				eq.data.param_id);
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state_pp));
+		goto fail_cmd;
+	}
+	rc = 0;
+fail_cmd:
+	return rc;
+}
+
+static int __q6asm_read(struct audio_client *ac, bool is_custom_len_reqd,
+			int len)
+{
+	struct asm_data_cmd_read_v2 read;
+	struct asm_buffer_node *buf_node = NULL;
+	struct list_head *ptr, *next;
+	struct audio_buffer        *ab;
+	int dsp_buf;
+	struct audio_port_data     *port;
+	int rc;
+
+	if (ac == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	if (ac->io_mode & SYNC_IO_MODE) {
+		port = &ac->port[OUT];
+
+		q6asm_add_hdr(ac, &read.hdr, sizeof(read), FALSE);
+
+		mutex_lock(&port->lock);
+
+		dsp_buf = port->dsp_buf;
+		if (port->buf == NULL) {
+			pr_err("%s: buf is NULL\n", __func__);
+			mutex_unlock(&port->lock);
+			return -EINVAL;
+		}
+		ab = &port->buf[dsp_buf];
+
+		dev_vdbg(ac->dev, "%s: session[%d]dsp-buf[%d][%pK]cpu_buf[%d][%pK]\n",
+				__func__,
+				ac->session,
+				dsp_buf,
+				port->buf[dsp_buf].data,
+				port->cpu_buf,
+				&port->buf[port->cpu_buf].phys);
+
+		read.hdr.opcode = ASM_DATA_CMD_READ_V2;
+		read.buf_addr_lsw = lower_32_bits(ab->phys);
+		read.buf_addr_msw = msm_audio_populate_upper_32_bits(ab->phys);
+
+		list_for_each_safe(ptr, next, &ac->port[OUT].mem_map_handle) {
+			buf_node = list_entry(ptr, struct asm_buffer_node,
+					list);
+			if (buf_node->buf_phys_addr == ab->phys)
+				read.mem_map_handle = buf_node->mmap_hdl;
+		}
+		dev_vdbg(ac->dev, "memory_map handle in q6asm_read: [%0x]:",
+				read.mem_map_handle);
+		read.buf_size = is_custom_len_reqd ? len : ab->size;
+		read.seq_id = port->dsp_buf;
+		q6asm_update_token(&read.hdr.token,
+				   0, /* Session ID is NA */
+				   0, /* Stream ID is NA */
+				   port->dsp_buf,
+				   0, /* Direction flag is NA */
+				   WAIT_CMD);
+		port->dsp_buf = q6asm_get_next_buf(ac, port->dsp_buf,
+						   port->max_buf_cnt);
+		mutex_unlock(&port->lock);
+		dev_vdbg(ac->dev, "%s: buf add[%pK] token[0x%x] uid[%d]\n",
+				__func__, &ab->phys, read.hdr.token,
+				read.seq_id);
+		rc = apr_send_pkt(ac->apr, (uint32_t *) &read);
+		if (rc < 0) {
+			pr_err("%s: read op[0x%x]rc[%d]\n",
+					__func__, read.hdr.opcode, rc);
+			goto fail_cmd;
+		}
+		return 0;
+	}
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_read(struct audio_client *ac)
+{
+	return __q6asm_read(ac, false/*is_custom_len_reqd*/, 0);
+}
+int q6asm_read_v2(struct audio_client *ac, uint32_t len)
+{
+	return __q6asm_read(ac, true /*is_custom_len_reqd*/, len);
+}
+
+int q6asm_read_nolock(struct audio_client *ac)
+{
+	struct asm_data_cmd_read_v2 read;
+	struct asm_buffer_node *buf_node = NULL;
+	struct list_head *ptr, *next;
+	struct audio_buffer        *ab;
+	int dsp_buf;
+	struct audio_port_data     *port;
+	int rc;
+
+	if (ac == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	if (ac->io_mode & SYNC_IO_MODE) {
+		port = &ac->port[OUT];
+
+		q6asm_add_hdr_async(ac, &read.hdr, sizeof(read), FALSE);
+
+
+		dsp_buf = port->dsp_buf;
+		ab = &port->buf[dsp_buf];
+
+		dev_vdbg(ac->dev, "%s: session[%d]dsp-buf[%d][%pK]cpu_buf[%d][%pK]\n",
+				__func__,
+				ac->session,
+				dsp_buf,
+				port->buf[dsp_buf].data,
+				port->cpu_buf,
+				&port->buf[port->cpu_buf].phys);
+
+		read.hdr.opcode = ASM_DATA_CMD_READ_V2;
+		read.buf_addr_lsw = lower_32_bits(ab->phys);
+		read.buf_addr_msw = msm_audio_populate_upper_32_bits(ab->phys);
+		read.buf_size = ab->size;
+		read.seq_id = port->dsp_buf;
+		q6asm_update_token(&read.hdr.token,
+				   0, /* Session ID is NA */
+				   0, /* Stream ID is NA */
+				   port->dsp_buf,
+				   0, /* Direction flag is NA */
+				   WAIT_CMD);
+
+		list_for_each_safe(ptr, next, &ac->port[OUT].mem_map_handle) {
+			buf_node = list_entry(ptr, struct asm_buffer_node,
+					list);
+			if (buf_node->buf_phys_addr == ab->phys) {
+				read.mem_map_handle = buf_node->mmap_hdl;
+				break;
+			}
+		}
+
+		port->dsp_buf = q6asm_get_next_buf(ac, port->dsp_buf,
+						   port->max_buf_cnt);
+		dev_vdbg(ac->dev, "%s: buf add[%pK] token[0x%x] uid[%d]\n",
+				__func__, &ab->phys, read.hdr.token,
+				read.seq_id);
+		rc = apr_send_pkt(ac->apr, (uint32_t *) &read);
+		if (rc < 0) {
+			pr_err("%s: read op[0x%x]rc[%d]\n",
+					__func__, read.hdr.opcode, rc);
+			goto fail_cmd;
+		}
+		return 0;
+	}
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_async_write(struct audio_client *ac,
+					  struct audio_aio_write_param *param)
+{
+	int rc = 0;
+	struct asm_data_cmd_write_v2 write;
+	struct asm_buffer_node *buf_node = NULL;
+	struct list_head *ptr, *next;
+	struct audio_buffer        *ab;
+	struct audio_port_data     *port;
+	phys_addr_t lbuf_phys_addr;
+	u32 liomode;
+	u32 io_compressed;
+	u32 io_compressed_stream;
+	int offset = 0;
+
+	if (ac == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	q6asm_stream_add_hdr_async(
+			ac, &write.hdr, sizeof(write), TRUE, ac->stream_id);
+	port = &ac->port[IN];
+	ab = &port->buf[port->dsp_buf];
+
+	/* Pass session id as token for AIO scheme */
+	write.hdr.token = param->uid;
+	write.hdr.opcode = ASM_DATA_CMD_WRITE_V2;
+	write.buf_addr_lsw = lower_32_bits(param->paddr);
+	write.buf_addr_msw = msm_audio_populate_upper_32_bits(param->paddr);
+	write.buf_size = param->len;
+	write.timestamp_msw = param->msw_ts;
+	write.timestamp_lsw = param->lsw_ts;
+	liomode = (ASYNC_IO_MODE | NT_MODE);
+	io_compressed = (ASYNC_IO_MODE | COMPRESSED_IO);
+	io_compressed_stream = (ASYNC_IO_MODE | COMPRESSED_STREAM_IO);
+
+	if (ac->io_mode == liomode)
+		lbuf_phys_addr = (param->paddr - 32);
+	else if (ac->io_mode == io_compressed ||
+			ac->io_mode == io_compressed_stream)
+		lbuf_phys_addr = (param->paddr - param->metadata_len);
+	else {
+		if (param->flags & SET_TIMESTAMP)
+			lbuf_phys_addr = param->paddr -
+				sizeof(struct snd_codec_metadata);
+		else
+			lbuf_phys_addr = param->paddr;
+	}
+	dev_vdbg(ac->dev, "%s: token[0x%x], buf_addr[%pK], buf_size[0x%x], ts_msw[0x%x], ts_lsw[0x%x], lbuf_phys_addr: 0x[%pK]\n",
+			__func__,
+			write.hdr.token, &param->paddr,
+			write.buf_size, write.timestamp_msw,
+			write.timestamp_lsw, &lbuf_phys_addr);
+
+	/* Use 0xFF00 for disabling timestamps */
+	if (param->flags == 0xFF00)
+		write.flags = (0x00000000 | (param->flags & 0x800000FF));
+	else
+		write.flags = (0x80000000 | param->flags);
+	write.flags |= param->last_buffer << ASM_SHIFT_LAST_BUFFER_FLAG;
+	write.seq_id = param->uid;
+	list_for_each_safe(ptr, next, &ac->port[IN].mem_map_handle) {
+		buf_node = list_entry(ptr, struct asm_buffer_node,
+				list);
+		if (buf_node->buf_phys_addr == lbuf_phys_addr) {
+			write.mem_map_handle = buf_node->mmap_hdl;
+			break;
+		}
+	}
+
+	if (ab != NULL) {
+		offset = lbuf_phys_addr - ab->phys;
+		config_debug_fs_write(ab, offset);
+	}
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &write);
+	if (rc < 0) {
+		pr_err("%s: write op[0x%x]rc[%d]\n", __func__,
+				write.hdr.opcode, rc);
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_async_read(struct audio_client *ac,
+					  struct audio_aio_read_param *param)
+{
+	int rc = 0;
+	struct asm_data_cmd_read_v2 read;
+	struct asm_buffer_node *buf_node = NULL;
+	struct list_head *ptr, *next;
+	phys_addr_t lbuf_phys_addr;
+	u32 liomode;
+	u32 io_compressed;
+	int dir = 0;
+
+	if (ac == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	q6asm_add_hdr_async(ac, &read.hdr, sizeof(read), FALSE);
+
+	/* Pass session id as token for AIO scheme */
+	read.hdr.token = param->uid;
+	read.hdr.opcode = ASM_DATA_CMD_READ_V2;
+	read.buf_addr_lsw = lower_32_bits(param->paddr);
+	read.buf_addr_msw = msm_audio_populate_upper_32_bits(param->paddr);
+	read.buf_size = param->len;
+	read.seq_id = param->uid;
+	liomode = (NT_MODE | ASYNC_IO_MODE);
+	io_compressed = (ASYNC_IO_MODE | COMPRESSED_IO);
+	if (ac->io_mode == liomode) {
+		lbuf_phys_addr = (param->paddr - 32);
+		/*legacy wma driver case*/
+		dir = IN;
+	} else if (ac->io_mode == io_compressed) {
+		lbuf_phys_addr = (param->paddr - 64);
+		dir = OUT;
+	} else {
+		if (param->flags & COMPRESSED_TIMESTAMP_FLAG)
+			lbuf_phys_addr = param->paddr -
+				 sizeof(struct snd_codec_metadata);
+		else
+			lbuf_phys_addr = param->paddr;
+		dir = OUT;
+	}
+
+	list_for_each_safe(ptr, next, &ac->port[dir].mem_map_handle) {
+		buf_node = list_entry(ptr, struct asm_buffer_node,
+				list);
+		if (buf_node->buf_phys_addr == lbuf_phys_addr) {
+			read.mem_map_handle = buf_node->mmap_hdl;
+			break;
+		}
+	}
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &read);
+	if (rc < 0) {
+		pr_err("%s: read op[0x%x]rc[%d]\n", __func__,
+				read.hdr.opcode, rc);
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_write(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
+		uint32_t lsw_ts, uint32_t flags)
+{
+	int rc = 0;
+	struct asm_data_cmd_write_v2 write;
+	struct asm_buffer_node *buf_node = NULL;
+	struct audio_port_data *port;
+	struct audio_buffer    *ab;
+	int dsp_buf = 0;
+
+	if (ac == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	dev_vdbg(ac->dev, "%s: session[%d] len=%d\n",
+			__func__, ac->session, len);
+	if (ac->io_mode & SYNC_IO_MODE) {
+		port = &ac->port[IN];
+
+		q6asm_add_hdr(ac, &write.hdr, sizeof(write),
+				FALSE);
+		mutex_lock(&port->lock);
+
+		dsp_buf = port->dsp_buf;
+		ab = &port->buf[dsp_buf];
+
+		q6asm_update_token(&write.hdr.token,
+				   0, /* Session ID is NA */
+				   0, /* Stream ID is NA */
+				   port->dsp_buf,
+				   0, /* Direction flag is NA */
+				   NO_WAIT_CMD);
+		write.hdr.opcode = ASM_DATA_CMD_WRITE_V2;
+		write.buf_addr_lsw = lower_32_bits(ab->phys);
+		write.buf_addr_msw = msm_audio_populate_upper_32_bits(ab->phys);
+		write.buf_size = len;
+		write.seq_id = port->dsp_buf;
+		write.timestamp_lsw = lsw_ts;
+		write.timestamp_msw = msw_ts;
+		/* Use 0xFF00 for disabling timestamps */
+		if (flags == 0xFF00)
+			write.flags = (0x00000000 | (flags & 0x800000FF));
+		else
+			write.flags = (0x80000000 | flags);
+		port->dsp_buf = q6asm_get_next_buf(ac, port->dsp_buf,
+						   port->max_buf_cnt);
+		buf_node = list_first_entry(&ac->port[IN].mem_map_handle,
+				struct asm_buffer_node,
+				list);
+		write.mem_map_handle = buf_node->mmap_hdl;
+
+		dev_vdbg(ac->dev, "%s: ab->phys[%pK]bufadd[0x%x] token[0x%x]buf_id[0x%x]buf_size[0x%x]mmaphdl[0x%x]"
+				, __func__,
+				&ab->phys,
+				write.buf_addr_lsw,
+				write.hdr.token,
+				write.seq_id,
+				write.buf_size,
+				write.mem_map_handle);
+		mutex_unlock(&port->lock);
+
+		config_debug_fs_write(ab, 0);
+
+		rc = apr_send_pkt(ac->apr, (uint32_t *) &write);
+		if (rc < 0) {
+			pr_err("%s: write op[0x%x]rc[%d]\n",
+					__func__, write.hdr.opcode, rc);
+			goto fail_cmd;
+		}
+		return 0;
+	}
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_write_nolock(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
+			uint32_t lsw_ts, uint32_t flags)
+{
+	int rc = 0;
+	struct asm_data_cmd_write_v2 write;
+	struct asm_buffer_node *buf_node = NULL;
+	struct audio_port_data *port;
+	struct audio_buffer    *ab;
+	int dsp_buf = 0;
+
+	if (ac == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	dev_vdbg(ac->dev, "%s: session[%d] len=%d\n",
+			__func__, ac->session, len);
+	if (ac->io_mode & SYNC_IO_MODE) {
+		port = &ac->port[IN];
+
+		q6asm_add_hdr_async(ac, &write.hdr, sizeof(write),
+				FALSE);
+
+		dsp_buf = port->dsp_buf;
+		ab = &port->buf[dsp_buf];
+
+		q6asm_update_token(&write.hdr.token,
+				   0, /* Session ID is NA */
+				   0, /* Stream ID is NA */
+				   port->dsp_buf,
+				   0, /* Direction flag is NA */
+				   NO_WAIT_CMD);
+
+		write.hdr.opcode = ASM_DATA_CMD_WRITE_V2;
+		write.buf_addr_lsw = lower_32_bits(ab->phys);
+		write.buf_addr_msw = msm_audio_populate_upper_32_bits(ab->phys);
+		write.buf_size = len;
+		write.seq_id = port->dsp_buf;
+		write.timestamp_lsw = lsw_ts;
+		write.timestamp_msw = msw_ts;
+		buf_node = list_first_entry(&ac->port[IN].mem_map_handle,
+				struct asm_buffer_node,
+				list);
+		write.mem_map_handle = buf_node->mmap_hdl;
+		/* Use 0xFF00 for disabling timestamps */
+		if (flags == 0xFF00)
+			write.flags = (0x00000000 | (flags & 0x800000FF));
+		else
+			write.flags = (0x80000000 | flags);
+		port->dsp_buf = q6asm_get_next_buf(ac, port->dsp_buf,
+						   port->max_buf_cnt);
+
+		dev_vdbg(ac->dev, "%s: ab->phys[%pK]bufadd[0x%x]token[0x%x] buf_id[0x%x]buf_size[0x%x]mmaphdl[0x%x]"
+				, __func__,
+				&ab->phys,
+				write.buf_addr_lsw,
+				write.hdr.token,
+				write.seq_id,
+				write.buf_size,
+				write.mem_map_handle);
+
+		rc = apr_send_pkt(ac->apr, (uint32_t *) &write);
+		if (rc < 0) {
+			pr_err("%s: write op[0x%x]rc[%d]\n",
+					__func__, write.hdr.opcode, rc);
+			goto fail_cmd;
+		}
+		return 0;
+	}
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_get_session_time(struct audio_client *ac, uint64_t *tstamp)
+{
+	struct asm_mtmx_strtr_get_params mtmx_params;
+	int rc;
+
+	if (ac == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (tstamp == NULL) {
+		pr_err("%s: tstamp NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	q6asm_add_hdr(ac, &mtmx_params.hdr, sizeof(mtmx_params), TRUE);
+	mtmx_params.hdr.opcode = ASM_SESSION_CMD_GET_MTMX_STRTR_PARAMS_V2;
+	mtmx_params.param_info.data_payload_addr_lsw = 0;
+	mtmx_params.param_info.data_payload_addr_msw = 0;
+	mtmx_params.param_info.mem_map_handle = 0;
+	mtmx_params.param_info.direction = (ac->io_mode & TUN_READ_IO_MODE
+					    ? 1 : 0);
+	mtmx_params.param_info.module_id =
+		ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC;
+	mtmx_params.param_info.param_id =
+		ASM_SESSION_MTMX_STRTR_PARAM_SESSION_TIME_V3;
+	mtmx_params.param_info.param_max_size =
+		sizeof(struct asm_stream_param_data_v2) +
+		sizeof(struct asm_session_mtmx_strtr_param_session_time_v3_t);
+	atomic_set(&ac->time_flag, 1);
+
+	dev_vdbg(ac->dev, "%s: session[%d]opcode[0x%x]\n", __func__,
+		 ac->session, mtmx_params.hdr.opcode);
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &mtmx_params);
+	if (rc < 0) {
+		pr_err("%s: Commmand 0x%x failed %d\n", __func__,
+		       mtmx_params.hdr.opcode, rc);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->time_wait,
+			(atomic_read(&ac->time_flag) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout in getting session time from DSP\n",
+				__func__);
+		goto fail_cmd;
+	}
+
+	*tstamp = ac->time_stamp;
+	return 0;
+
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_get_session_time_legacy(struct audio_client *ac, uint64_t *tstamp)
+{
+	struct apr_hdr hdr;
+	int rc;
+
+	if (ac == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (tstamp == NULL) {
+		pr_err("%s: tstamp NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	q6asm_add_hdr(ac, &hdr, sizeof(hdr), TRUE);
+	hdr.opcode = ASM_SESSION_CMD_GET_SESSIONTIME_V3;
+	atomic_set(&ac->time_flag, 1);
+
+	dev_vdbg(ac->dev, "%s: session[%d]opcode[0x%x]\n", __func__,
+			ac->session,
+			hdr.opcode);
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &hdr);
+	if (rc < 0) {
+		pr_err("%s: Commmand 0x%x failed %d\n",
+				__func__, hdr.opcode, rc);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->time_wait,
+			(atomic_read(&ac->time_flag) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout in getting session time from DSP\n",
+				__func__);
+		goto fail_cmd;
+	}
+
+	*tstamp = ac->time_stamp;
+	return 0;
+
+fail_cmd:
+	return -EINVAL;
+}
+
+
+int q6asm_send_audio_effects_params(struct audio_client *ac, char *params,
+				    uint32_t params_length)
+{
+	char *asm_params = NULL;
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_pp_params_v2 payload_params;
+	int sz, rc;
+
+	pr_debug("%s:\n", __func__);
+	if (!ac) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (params == NULL) {
+		pr_err("%s: params NULL\n", __func__);
+		return -EINVAL;
+	}
+	sz = sizeof(struct apr_hdr) +
+		sizeof(struct asm_stream_cmd_set_pp_params_v2) +
+		params_length;
+	asm_params = kzalloc(sz, GFP_KERNEL);
+	if (!asm_params) {
+		pr_err("%s, asm params memory alloc failed", __func__);
+		return -ENOMEM;
+	}
+	q6asm_add_hdr_async(ac, &hdr, (sizeof(struct apr_hdr) +
+				sizeof(struct asm_stream_cmd_set_pp_params_v2) +
+				params_length), TRUE);
+	atomic_set(&ac->cmd_state_pp, -1);
+	hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
+	payload_params.data_payload_addr_lsw = 0;
+	payload_params.data_payload_addr_msw = 0;
+	payload_params.mem_map_handle = 0;
+	payload_params.data_payload_size = params_length;
+	memcpy(((u8 *)asm_params), &hdr, sizeof(struct apr_hdr));
+	memcpy(((u8 *)asm_params + sizeof(struct apr_hdr)), &payload_params,
+			sizeof(struct asm_stream_cmd_set_pp_params_v2));
+	memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) +
+				sizeof(struct asm_stream_cmd_set_pp_params_v2)),
+			params, params_length);
+	rc = apr_send_pkt(ac->apr, (uint32_t *) asm_params);
+	if (rc < 0) {
+		pr_err("%s: audio effects set-params send failed\n", __func__);
+		rc = -EINVAL;
+		goto fail_send_param;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+				(atomic_read(&ac->cmd_state_pp) >= 0), 1*HZ);
+	if (!rc) {
+		pr_err("%s: timeout, audio effects set-params\n", __func__);
+		rc = -ETIMEDOUT;
+		goto fail_send_param;
+	}
+	if (atomic_read(&ac->cmd_state_pp) > 0) {
+		pr_err("%s: DSP returned error[%s] set-params\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state_pp)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state_pp));
+		goto fail_send_param;
+	}
+
+	rc = 0;
+fail_send_param:
+	kfree(asm_params);
+	return rc;
+}
+
+int q6asm_send_mtmx_strtr_window(struct audio_client *ac,
+		struct asm_session_mtmx_strtr_param_window_v2_t *window_param,
+		uint32_t param_id)
+{
+	struct asm_mtmx_strtr_params matrix;
+	int sz = 0;
+	int rc  = 0;
+
+	pr_debug("%s: Window lsw is %d, window msw is %d\n", __func__,
+		  window_param->window_lsw, window_param->window_msw);
+
+	if (!ac) {
+		pr_err("%s: audio client handle is NULL\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	if (ac->apr == NULL) {
+		pr_err("%s: ac->apr is NULL", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	sz = sizeof(struct asm_mtmx_strtr_params);
+	q6asm_add_hdr(ac, &matrix.hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state, -1);
+	matrix.hdr.opcode = ASM_SESSION_CMD_SET_MTMX_STRTR_PARAMS_V2;
+
+	matrix.param.data_payload_addr_lsw = 0;
+	matrix.param.data_payload_addr_msw = 0;
+	matrix.param.mem_map_handle = 0;
+	matrix.param.data_payload_size =
+		sizeof(struct asm_stream_param_data_v2) +
+		sizeof(struct asm_session_mtmx_strtr_param_window_v2_t);
+	matrix.param.direction = 0; /* RX */
+	matrix.data.module_id = ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC;
+	matrix.data.param_id = param_id;
+	matrix.data.param_size =
+		sizeof(struct asm_session_mtmx_strtr_param_window_v2_t);
+	matrix.data.reserved = 0;
+	memcpy(&(matrix.config.window_param),
+	       window_param,
+	       sizeof(struct asm_session_mtmx_strtr_param_window_v2_t));
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &matrix);
+	if (rc < 0) {
+		pr_err("%s: Render window start send failed paramid [0x%x]\n",
+			__func__, matrix.data.param_id);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout, Render window start paramid[0x%x]\n",
+			__func__, matrix.data.param_id);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	rc = 0;
+fail_cmd:
+	return rc;
+}
+
+int q6asm_send_mtmx_strtr_render_mode(struct audio_client *ac,
+		uint32_t render_mode)
+{
+	struct asm_mtmx_strtr_params matrix;
+	struct asm_session_mtmx_strtr_param_render_mode_t render_param;
+	int sz = 0;
+	int rc  = 0;
+
+	pr_debug("%s: render mode is %d\n", __func__, render_mode);
+
+	if (!ac) {
+		pr_err("%s: audio client handle is NULL\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if (ac->apr == NULL) {
+		pr_err("%s: ac->apr is NULL\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if ((render_mode != ASM_SESSION_MTMX_STRTR_PARAM_RENDER_DEFAULT) &&
+	    (render_mode != ASM_SESSION_MTMX_STRTR_PARAM_RENDER_LOCAL_STC)) {
+		pr_err("%s: Invalid render mode %d\n", __func__, render_mode);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	memset(&render_param, 0,
+	       sizeof(struct asm_session_mtmx_strtr_param_render_mode_t));
+	render_param.flags = render_mode;
+
+	memset(&matrix, 0, sizeof(struct asm_mtmx_strtr_params));
+	sz = sizeof(struct asm_mtmx_strtr_params);
+	q6asm_add_hdr(ac, &matrix.hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state, -1);
+	matrix.hdr.opcode = ASM_SESSION_CMD_SET_MTMX_STRTR_PARAMS_V2;
+
+	matrix.param.data_payload_addr_lsw = 0;
+	matrix.param.data_payload_addr_msw = 0;
+	matrix.param.mem_map_handle = 0;
+	matrix.param.data_payload_size =
+		sizeof(struct asm_stream_param_data_v2) +
+		sizeof(struct asm_session_mtmx_strtr_param_render_mode_t);
+	matrix.param.direction = 0; /* RX */
+	matrix.data.module_id = ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC;
+	matrix.data.param_id = ASM_SESSION_MTMX_STRTR_PARAM_RENDER_MODE_CMD;
+	matrix.data.param_size =
+		sizeof(struct asm_session_mtmx_strtr_param_render_mode_t);
+	matrix.data.reserved = 0;
+	memcpy(&(matrix.config.render_param),
+	       &render_param,
+	       sizeof(struct asm_session_mtmx_strtr_param_render_mode_t));
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &matrix);
+	if (rc < 0) {
+		pr_err("%s: Render mode send failed paramid [0x%x]\n",
+			__func__, matrix.data.param_id);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout, Render mode send paramid [0x%x]\n",
+			__func__, matrix.data.param_id);
+		rc = -ETIMEDOUT;
+		goto exit;
+	}
+
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto exit;
+	}
+	rc = 0;
+exit:
+	return rc;
+}
+
+int q6asm_send_mtmx_strtr_clk_rec_mode(struct audio_client *ac,
+		uint32_t clk_rec_mode)
+{
+	struct asm_mtmx_strtr_params matrix;
+	struct asm_session_mtmx_strtr_param_clk_rec_t clk_rec_param;
+	int sz = 0;
+	int rc  = 0;
+
+	pr_debug("%s: clk rec mode is %d\n", __func__, clk_rec_mode);
+
+	if (!ac) {
+		pr_err("%s: audio client handle is NULL\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if (ac->apr == NULL) {
+		pr_err("%s: ac->apr is NULL\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if ((clk_rec_mode != ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC_NONE) &&
+	    (clk_rec_mode != ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC_AUTO)) {
+		pr_err("%s: Invalid clk rec mode %d\n", __func__, clk_rec_mode);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	memset(&clk_rec_param, 0,
+	       sizeof(struct asm_session_mtmx_strtr_param_clk_rec_t));
+	clk_rec_param.flags = clk_rec_mode;
+
+	memset(&matrix, 0, sizeof(struct asm_mtmx_strtr_params));
+	sz = sizeof(struct asm_mtmx_strtr_params);
+	q6asm_add_hdr(ac, &matrix.hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state, -1);
+	matrix.hdr.opcode = ASM_SESSION_CMD_SET_MTMX_STRTR_PARAMS_V2;
+
+	matrix.param.data_payload_addr_lsw = 0;
+	matrix.param.data_payload_addr_msw = 0;
+	matrix.param.mem_map_handle = 0;
+	matrix.param.data_payload_size =
+		sizeof(struct asm_stream_param_data_v2) +
+		sizeof(struct asm_session_mtmx_strtr_param_clk_rec_t);
+	matrix.param.direction = 0; /* RX */
+	matrix.data.module_id = ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC;
+	matrix.data.param_id = ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC_CMD;
+	matrix.data.param_size =
+		sizeof(struct asm_session_mtmx_strtr_param_clk_rec_t);
+	matrix.data.reserved = 0;
+	memcpy(&(matrix.config.clk_rec_param),
+	       &clk_rec_param,
+	       sizeof(struct asm_session_mtmx_strtr_param_clk_rec_t));
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &matrix);
+	if (rc < 0) {
+		pr_err("%s: clk rec mode send failed paramid [0x%x]\n",
+			__func__, matrix.data.param_id);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout, clk rec mode send paramid [0x%x]\n",
+			__func__, matrix.data.param_id);
+		rc = -ETIMEDOUT;
+		goto exit;
+	}
+
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto exit;
+	}
+	rc = 0;
+exit:
+	return rc;
+}
+
+int q6asm_send_mtmx_strtr_enable_adjust_session_clock(struct audio_client *ac,
+		bool enable)
+{
+	struct asm_mtmx_strtr_params matrix;
+	struct asm_session_mtmx_param_adjust_session_time_ctl_t adjust_time;
+	int sz = 0;
+	int rc  = 0;
+
+	pr_debug("%s: adjust session enable %d\n", __func__, enable);
+
+	if (!ac) {
+		pr_err("%s: audio client handle is NULL\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if (ac->apr == NULL) {
+		pr_err("%s: ac->apr is NULL\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	adjust_time.enable = enable;
+	memset(&matrix, 0, sizeof(struct asm_mtmx_strtr_params));
+	sz = sizeof(struct asm_mtmx_strtr_params);
+	q6asm_add_hdr(ac, &matrix.hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state, -1);
+	matrix.hdr.opcode = ASM_SESSION_CMD_SET_MTMX_STRTR_PARAMS_V2;
+
+	matrix.param.data_payload_addr_lsw = 0;
+	matrix.param.data_payload_addr_msw = 0;
+	matrix.param.mem_map_handle = 0;
+	matrix.param.data_payload_size =
+		sizeof(struct asm_stream_param_data_v2) +
+		sizeof(struct asm_session_mtmx_param_adjust_session_time_ctl_t);
+	matrix.param.direction = 0; /* RX */
+	matrix.data.module_id = ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC;
+	matrix.data.param_id = ASM_SESSION_MTMX_PARAM_ADJUST_SESSION_TIME_CTL;
+	matrix.data.param_size =
+		sizeof(struct asm_session_mtmx_param_adjust_session_time_ctl_t);
+	matrix.data.reserved = 0;
+	matrix.config.adj_time_param.enable = adjust_time.enable;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &matrix);
+	if (rc < 0) {
+		pr_err("%s: enable adjust session failed failed paramid [0x%x]\n",
+			__func__, matrix.data.param_id);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: enable adjust session failed failed paramid [0x%x]\n",
+			__func__, matrix.data.param_id);
+		rc = -ETIMEDOUT;
+		goto exit;
+	}
+
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto exit;
+	}
+	rc = 0;
+exit:
+	return rc;
+}
+
+
+static int __q6asm_cmd(struct audio_client *ac, int cmd, uint32_t stream_id)
+{
+	struct apr_hdr hdr;
+	int rc;
+	atomic_t *state;
+	int cnt = 0;
+
+	if (!ac) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	q6asm_stream_add_hdr(ac, &hdr, sizeof(hdr), TRUE, stream_id);
+	atomic_set(&ac->cmd_state, -1);
+	/*
+	 * Updated the token field with stream/session for compressed playback
+	 * Platform driver must know the the stream with which the command is
+	 * associated
+	 */
+	if (ac->io_mode & COMPRESSED_STREAM_IO)
+		q6asm_update_token(&hdr.token,
+				   ac->session,
+				   stream_id,
+				   0, /* Buffer index is NA */
+				   0, /* Direction flag is NA */
+				   WAIT_CMD);
+	pr_debug("%s: token = 0x%x, stream_id  %d, session 0x%x\n",
+			__func__, hdr.token, stream_id, ac->session);
+	switch (cmd) {
+	case CMD_PAUSE:
+		pr_debug("%s: CMD_PAUSE\n", __func__);
+		hdr.opcode = ASM_SESSION_CMD_PAUSE;
+		state = &ac->cmd_state;
+		break;
+	case CMD_SUSPEND:
+		pr_debug("%s: CMD_SUSPEND\n", __func__);
+		hdr.opcode = ASM_SESSION_CMD_SUSPEND;
+		state = &ac->cmd_state;
+		break;
+	case CMD_FLUSH:
+		pr_debug("%s: CMD_FLUSH\n", __func__);
+		hdr.opcode = ASM_STREAM_CMD_FLUSH;
+		state = &ac->cmd_state;
+		break;
+	case CMD_OUT_FLUSH:
+		pr_debug("%s: CMD_OUT_FLUSH\n", __func__);
+		hdr.opcode = ASM_STREAM_CMD_FLUSH_READBUFS;
+		state = &ac->cmd_state;
+		break;
+	case CMD_EOS:
+		pr_debug("%s: CMD_EOS\n", __func__);
+		hdr.opcode = ASM_DATA_CMD_EOS;
+		atomic_set(&ac->cmd_state, 0);
+		state = &ac->cmd_state;
+		break;
+	case CMD_CLOSE:
+		pr_debug("%s: CMD_CLOSE\n", __func__);
+		hdr.opcode = ASM_STREAM_CMD_CLOSE;
+		state = &ac->cmd_state;
+		break;
+	default:
+		pr_err("%s: Invalid format[%d]\n", __func__, cmd);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	pr_debug("%s: session[%d]opcode[0x%x]\n", __func__,
+			ac->session,
+			hdr.opcode);
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &hdr);
+	if (rc < 0) {
+		pr_err("%s: Commmand 0x%x failed %d\n",
+				__func__, hdr.opcode, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait, (atomic_read(state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for response opcode[0x%x]\n",
+				__func__, hdr.opcode);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(state) > 0) {
+		pr_err("%s: DSP returned error[%s] opcode %d\n",
+					__func__, adsp_err_get_err_str(
+					atomic_read(state)),
+					hdr.opcode);
+		rc = adsp_err_get_lnx_err_code(atomic_read(state));
+		goto fail_cmd;
+	}
+
+	if (cmd == CMD_FLUSH)
+		q6asm_reset_buf_state(ac);
+	if (cmd == CMD_CLOSE) {
+		/* check if DSP return all buffers */
+		if (ac->port[IN].buf) {
+			for (cnt = 0; cnt < ac->port[IN].max_buf_cnt;
+					cnt++) {
+				if (ac->port[IN].buf[cnt].used == IN) {
+					dev_vdbg(ac->dev, "Write Buf[%d] not returned\n",
+							cnt);
+				}
+			}
+		}
+		if (ac->port[OUT].buf) {
+			for (cnt = 0; cnt < ac->port[OUT].max_buf_cnt; cnt++) {
+				if (ac->port[OUT].buf[cnt].used == OUT) {
+					dev_vdbg(ac->dev, "Read Buf[%d] not returned\n",
+							cnt);
+				}
+			}
+		}
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+int q6asm_cmd(struct audio_client *ac, int cmd)
+{
+	return __q6asm_cmd(ac, cmd, ac->stream_id);
+}
+
+int q6asm_stream_cmd(struct audio_client *ac, int cmd, uint32_t stream_id)
+{
+	return __q6asm_cmd(ac, cmd, stream_id);
+}
+
+static int __q6asm_cmd_nowait(struct audio_client *ac, int cmd,
+			      uint32_t stream_id)
+{
+	struct apr_hdr hdr;
+	int rc;
+
+	if (!ac) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	q6asm_stream_add_hdr_async(ac, &hdr, sizeof(hdr), TRUE, stream_id);
+	atomic_set(&ac->cmd_state, 1);
+	/*
+	 * Updated the token field with stream/session for compressed playback
+	 * Platform driver must know the the stream with which the command is
+	 * associated
+	 */
+	if (ac->io_mode & COMPRESSED_STREAM_IO)
+		q6asm_update_token(&hdr.token,
+				   ac->session,
+				   stream_id,
+				   0, /* Buffer index is NA */
+				   0, /* Direction flag is NA */
+				   NO_WAIT_CMD);
+
+	pr_debug("%s: token = 0x%x, stream_id  %d, session 0x%x\n",
+			__func__, hdr.token, stream_id, ac->session);
+	switch (cmd) {
+	case CMD_PAUSE:
+		pr_debug("%s: CMD_PAUSE\n", __func__);
+		hdr.opcode = ASM_SESSION_CMD_PAUSE;
+		break;
+	case CMD_EOS:
+		pr_debug("%s: CMD_EOS\n", __func__);
+		hdr.opcode = ASM_DATA_CMD_EOS;
+		break;
+	case CMD_CLOSE:
+		pr_debug("%s: CMD_CLOSE\n", __func__);
+		hdr.opcode = ASM_STREAM_CMD_CLOSE;
+		break;
+	default:
+		pr_err("%s: Invalid format[%d]\n", __func__, cmd);
+		goto fail_cmd;
+	}
+	pr_debug("%s: session[%d]opcode[0x%x]\n", __func__,
+			ac->session,
+			hdr.opcode);
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &hdr);
+	if (rc < 0) {
+		pr_err("%s: Commmand 0x%x failed %d\n",
+				__func__, hdr.opcode, rc);
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_cmd_nowait(struct audio_client *ac, int cmd)
+{
+	pr_debug("%s: stream_id: %d\n", __func__, ac->stream_id);
+	return __q6asm_cmd_nowait(ac, cmd, ac->stream_id);
+}
+
+int q6asm_stream_cmd_nowait(struct audio_client *ac, int cmd,
+			    uint32_t stream_id)
+{
+	pr_debug("%s: stream_id: %d\n", __func__, stream_id);
+	return __q6asm_cmd_nowait(ac, cmd, stream_id);
+}
+
+int __q6asm_send_meta_data(struct audio_client *ac, uint32_t stream_id,
+			  uint32_t initial_samples, uint32_t trailing_samples)
+{
+	struct asm_data_cmd_remove_silence silence;
+	int rc = 0;
+
+	if (!ac) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	pr_debug("%s: session[%d]\n", __func__, ac->session);
+	q6asm_stream_add_hdr_async(ac, &silence.hdr, sizeof(silence), TRUE,
+			stream_id);
+
+	/*
+	 * Updated the token field with stream/session for compressed playback
+	 * Platform driver must know the the stream with which the command is
+	 * associated
+	 */
+	if (ac->io_mode & COMPRESSED_STREAM_IO)
+		q6asm_update_token(&silence.hdr.token,
+				   ac->session,
+				   stream_id,
+				   0, /* Buffer index is NA */
+				   0, /* Direction flag is NA */
+				   NO_WAIT_CMD);
+	pr_debug("%s: token = 0x%x, stream_id  %d, session 0x%x\n",
+			__func__, silence.hdr.token, stream_id, ac->session);
+
+	silence.hdr.opcode = ASM_DATA_CMD_REMOVE_INITIAL_SILENCE;
+	silence.num_samples_to_remove    = initial_samples;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &silence);
+	if (rc < 0) {
+		pr_err("%s: Commmand silence failed[%d]", __func__, rc);
+
+		goto fail_cmd;
+	}
+
+	silence.hdr.opcode = ASM_DATA_CMD_REMOVE_TRAILING_SILENCE;
+	silence.num_samples_to_remove    = trailing_samples;
+
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &silence);
+	if (rc < 0) {
+		pr_err("%s: Commmand silence failed[%d]", __func__, rc);
+		goto fail_cmd;
+	}
+
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_stream_send_meta_data(struct audio_client *ac, uint32_t stream_id,
+		uint32_t initial_samples, uint32_t trailing_samples)
+{
+	return __q6asm_send_meta_data(ac, stream_id, initial_samples,
+				     trailing_samples);
+}
+
+int q6asm_send_meta_data(struct audio_client *ac, uint32_t initial_samples,
+		uint32_t trailing_samples)
+{
+	return __q6asm_send_meta_data(ac, ac->stream_id, initial_samples,
+				     trailing_samples);
+}
+
+static void q6asm_reset_buf_state(struct audio_client *ac)
+{
+	int cnt = 0;
+	int loopcnt = 0;
+	int used;
+	struct audio_port_data *port = NULL;
+
+	if (ac->io_mode & SYNC_IO_MODE) {
+		used = (ac->io_mode & TUN_WRITE_IO_MODE ? 1 : 0);
+		mutex_lock(&ac->cmd_lock);
+		for (loopcnt = 0; loopcnt <= OUT; loopcnt++) {
+			port = &ac->port[loopcnt];
+			cnt = port->max_buf_cnt - 1;
+			port->dsp_buf = 0;
+			port->cpu_buf = 0;
+			while (cnt >= 0) {
+				if (!port->buf)
+					continue;
+				port->buf[cnt].used = used;
+				cnt--;
+			}
+		}
+		mutex_unlock(&ac->cmd_lock);
+	}
+}
+
+int q6asm_reg_tx_overflow(struct audio_client *ac, uint16_t enable)
+{
+	struct asm_session_cmd_regx_overflow tx_overflow;
+	int rc;
+
+	if (!ac) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	pr_debug("%s: session[%d]enable[%d]\n", __func__,
+			ac->session, enable);
+	q6asm_add_hdr(ac, &tx_overflow.hdr, sizeof(tx_overflow), TRUE);
+	atomic_set(&ac->cmd_state, -1);
+
+	tx_overflow.hdr.opcode = \
+				 ASM_SESSION_CMD_REGISTER_FORX_OVERFLOW_EVENTS;
+	/* tx overflow event: enable */
+	tx_overflow.enable_flag = enable;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &tx_overflow);
+	if (rc < 0) {
+		pr_err("%s: tx overflow op[0x%x]rc[%d]\n",
+				__func__, tx_overflow.hdr.opcode, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+				(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for tx overflow\n", __func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+
+	return 0;
+fail_cmd:
+	return rc;
+}
+
+int q6asm_reg_rx_underflow(struct audio_client *ac, uint16_t enable)
+{
+	struct asm_session_cmd_rgstr_rx_underflow rx_underflow;
+	int rc;
+
+	if (!ac) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	pr_debug("%s: session[%d]enable[%d]\n", __func__,
+			ac->session, enable);
+	q6asm_add_hdr_async(ac, &rx_underflow.hdr, sizeof(rx_underflow), FALSE);
+
+	rx_underflow.hdr.opcode =
+		ASM_SESSION_CMD_REGISTER_FOR_RX_UNDERFLOW_EVENTS;
+	/* tx overflow event: enable */
+	rx_underflow.enable_flag = enable;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &rx_underflow);
+	if (rc < 0) {
+		pr_err("%s: tx overflow op[0x%x]rc[%d]\n",
+				__func__, rx_underflow.hdr.opcode, rc);
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_adjust_session_clock(struct audio_client *ac,
+		uint32_t adjust_time_lsw,
+		uint32_t adjust_time_msw)
+{
+	int rc = 0;
+	int sz = 0;
+	struct asm_session_cmd_adjust_session_clock_v2 adjust_clock;
+
+	pr_debug("%s: adjust_time_lsw is %x, adjust_time_msw is %x\n", __func__,
+		  adjust_time_lsw, adjust_time_msw);
+
+	if (!ac) {
+		pr_err("%s: audio client handle is NULL\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	if (ac->apr == NULL) {
+		pr_err("%s: ac->apr is NULL", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	sz = sizeof(struct asm_session_cmd_adjust_session_clock_v2);
+	q6asm_add_hdr(ac, &adjust_clock.hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state, -1);
+	adjust_clock.hdr.opcode = ASM_SESSION_CMD_ADJUST_SESSION_CLOCK_V2;
+
+	adjust_clock.adjustime_lsw = adjust_time_lsw;
+	adjust_clock.adjustime_msw = adjust_time_msw;
+
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &adjust_clock);
+	if (rc < 0) {
+		pr_err("%s: adjust_clock send failed paramid [0x%x]\n",
+			__func__, adjust_clock.hdr.opcode);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout, adjust_clock paramid[0x%x]\n",
+			__func__, adjust_clock.hdr.opcode);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	rc = 0;
+fail_cmd:
+	return rc;
+}
+
+/*
+ * q6asm_get_path_delay() - get the path delay for an audio session
+ * @ac: audio client handle
+ *
+ * Retrieves the current audio DSP path delay for the given audio session.
+ *
+ * Return: 0 on success, error code otherwise
+ */
+int q6asm_get_path_delay(struct audio_client *ac)
+{
+	int rc = 0;
+	struct apr_hdr hdr;
+
+	if (!ac || ac->apr == NULL) {
+		pr_err("%s: invalid audio client\n", __func__);
+		return -EINVAL;
+	}
+
+	hdr.opcode = ASM_SESSION_CMD_GET_PATH_DELAY_V2;
+	q6asm_add_hdr(ac, &hdr, sizeof(hdr), TRUE);
+	atomic_set(&ac->cmd_state, -1);
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &hdr);
+	if (rc < 0) {
+		pr_err("%s: Commmand 0x%x failed %d\n", __func__,
+				hdr.opcode, rc);
+		return rc;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5 * HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for response opcode[0x%x]\n",
+				__func__, hdr.opcode);
+		return -ETIMEDOUT;
+	}
+
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		return rc;
+	}
+
+	return 0;
+}
+
+int q6asm_get_apr_service_id(int session_id)
+{
+	pr_debug("%s:\n", __func__);
+
+	if (session_id <= 0 || session_id > ASM_ACTIVE_STREAMS_ALLOWED) {
+		pr_err("%s: invalid session_id = %d\n", __func__, session_id);
+		return -EINVAL;
+	}
+
+	return ((struct apr_svc *)(session[session_id].ac)->apr)->id;
+}
+
+int q6asm_get_asm_topology(int session_id)
+{
+	int topology = -EINVAL;
+
+	if (session_id <= 0 || session_id > ASM_ACTIVE_STREAMS_ALLOWED) {
+		pr_err("%s: invalid session_id = %d\n", __func__, session_id);
+		goto done;
+	}
+	if (session[session_id].ac == NULL) {
+		pr_err("%s: session not created for session id = %d\n",
+		       __func__, session_id);
+		goto done;
+	}
+	topology = (session[session_id].ac)->topology;
+done:
+	return topology;
+}
+
+int q6asm_get_asm_app_type(int session_id)
+{
+	int app_type = -EINVAL;
+
+	if (session_id <= 0 || session_id > ASM_ACTIVE_STREAMS_ALLOWED) {
+		pr_err("%s: invalid session_id = %d\n", __func__, session_id);
+		goto done;
+	}
+	if (session[session_id].ac == NULL) {
+		pr_err("%s: session not created for session id = %d\n",
+		       __func__, session_id);
+		goto done;
+	}
+	app_type = (session[session_id].ac)->app_type;
+done:
+	return app_type;
+}
+
+static int q6asm_get_asm_topology_cal(void)
+{
+	int topology = DEFAULT_POPP_TOPOLOGY;
+	struct cal_block_data *cal_block = NULL;
+
+	if (cal_data[ASM_TOPOLOGY_CAL] == NULL)
+		goto done;
+
+	mutex_lock(&cal_data[ASM_TOPOLOGY_CAL]->lock);
+	cal_block = cal_utils_get_only_cal_block(cal_data[ASM_TOPOLOGY_CAL]);
+	if (cal_block == NULL)
+		goto unlock;
+
+	topology = ((struct audio_cal_info_asm_top *)
+		cal_block->cal_info)->topology;
+unlock:
+	mutex_unlock(&cal_data[ASM_TOPOLOGY_CAL]->lock);
+done:
+	pr_debug("%s: Using topology %d\n", __func__, topology);
+	return topology;
+}
+
+static int q6asm_get_asm_app_type_cal(void)
+{
+	int app_type = DEFAULT_APP_TYPE;
+	struct cal_block_data *cal_block = NULL;
+
+	if (cal_data[ASM_TOPOLOGY_CAL] == NULL)
+		goto done;
+
+	mutex_lock(&cal_data[ASM_TOPOLOGY_CAL]->lock);
+	cal_block = cal_utils_get_only_cal_block(cal_data[ASM_TOPOLOGY_CAL]);
+	if (cal_block == NULL)
+		goto unlock;
+
+	app_type = ((struct audio_cal_info_asm_top *)
+		cal_block->cal_info)->app_type;
+
+	if (app_type == 0)
+		app_type = DEFAULT_APP_TYPE;
+unlock:
+	mutex_unlock(&cal_data[ASM_TOPOLOGY_CAL]->lock);
+done:
+	pr_debug("%s: Using app_type %d\n", __func__, app_type);
+	return app_type;
+}
+
+int q6asm_send_cal(struct audio_client *ac)
+{
+	struct cal_block_data *cal_block = NULL;
+	struct apr_hdr	hdr;
+	char *asm_params = NULL;
+	struct asm_stream_cmd_set_pp_params_v2 payload_params;
+	int sz, rc = -EINVAL;
+	pr_debug("%s:\n", __func__);
+
+	if (!ac) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		goto done;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		goto done;
+	}
+	if (ac->io_mode & NT_MODE) {
+		pr_debug("%s: called for NT MODE, exiting\n", __func__);
+		goto done;
+	}
+
+	if (cal_data[ASM_AUDSTRM_CAL] == NULL)
+		goto done;
+
+	if (ac->perf_mode == ULTRA_LOW_LATENCY_PCM_MODE) {
+		rc = 0; /* no cal is required, not error case */
+		goto done;
+	}
+
+	mutex_lock(&cal_data[ASM_AUDSTRM_CAL]->lock);
+	cal_block = cal_utils_get_only_cal_block(cal_data[ASM_AUDSTRM_CAL]);
+	if (cal_block == NULL) {
+		pr_err("%s: cal_block is NULL\n",
+			__func__);
+		goto unlock;
+	}
+
+	if (cal_block->cal_data.size == 0) {
+		rc = 0; /* not error case */
+		pr_debug("%s: cal_data.size is 0, don't send cal data\n",
+			__func__);
+		goto unlock;
+	}
+
+	rc = remap_cal_data(ASM_AUDSTRM_CAL_TYPE, cal_block);
+	if (rc) {
+		pr_err("%s: Remap_cal_data failed for cal %d!\n",
+			__func__, ASM_AUDSTRM_CAL);
+		goto unlock;
+	}
+
+	sz = sizeof(struct apr_hdr) +
+		sizeof(struct asm_stream_cmd_set_pp_params_v2);
+	asm_params = kzalloc(sz, GFP_KERNEL);
+	if (!asm_params) {
+		pr_err("%s, asm params memory alloc failed", __func__);
+		rc = -ENOMEM;
+		goto unlock;
+	}
+
+	/* asm_stream_cmd_set_pp_params_v2 has no APR header in it */
+	q6asm_add_hdr_async(ac, &hdr, (sizeof(struct apr_hdr) +
+		sizeof(struct asm_stream_cmd_set_pp_params_v2)), TRUE);
+
+	atomic_set(&ac->cmd_state_pp, -1);
+	hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
+	payload_params.data_payload_addr_lsw =
+			lower_32_bits(cal_block->cal_data.paddr);
+	payload_params.data_payload_addr_msw =
+			msm_audio_populate_upper_32_bits(
+						cal_block->cal_data.paddr);
+	payload_params.mem_map_handle = cal_block->map_data.q6map_handle;
+	payload_params.data_payload_size = cal_block->cal_data.size;
+	memcpy(((u8 *)asm_params), &hdr, sizeof(struct apr_hdr));
+	memcpy(((u8 *)asm_params + sizeof(struct apr_hdr)), &payload_params,
+			sizeof(struct asm_stream_cmd_set_pp_params_v2));
+
+	pr_debug("%s: phyaddr lsw = %x msw = %x, maphdl = %x calsize = %d\n",
+		__func__, payload_params.data_payload_addr_lsw,
+		payload_params.data_payload_addr_msw,
+		payload_params.mem_map_handle,
+		payload_params.data_payload_size);
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) asm_params);
+	if (rc < 0) {
+		pr_err("%s: audio audstrm cal send failed\n", __func__);
+		rc = -EINVAL;
+		goto free;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+				(atomic_read(&ac->cmd_state_pp) >= 0), 5 * HZ);
+	if (!rc) {
+		pr_err("%s: timeout, audio audstrm cal send\n", __func__);
+		rc = -ETIMEDOUT;
+		goto free;
+	}
+	if (atomic_read(&ac->cmd_state_pp) > 0) {
+		pr_err("%s: DSP returned error[%d] audio audstrm cal send\n",
+				__func__, atomic_read(&ac->cmd_state_pp));
+		rc = -EINVAL;
+		goto free;
+	}
+
+	rc = 0;
+
+free:
+	kfree(asm_params);
+unlock:
+	mutex_unlock(&cal_data[ASM_AUDSTRM_CAL]->lock);
+done:
+	return rc;
+}
+
+static int get_cal_type_index(int32_t cal_type)
+{
+	int ret = -EINVAL;
+
+	switch (cal_type) {
+	case ASM_TOPOLOGY_CAL_TYPE:
+		ret = ASM_TOPOLOGY_CAL;
+		break;
+	case ASM_CUST_TOPOLOGY_CAL_TYPE:
+		ret = ASM_CUSTOM_TOP_CAL;
+		break;
+	case ASM_AUDSTRM_CAL_TYPE:
+		ret = ASM_AUDSTRM_CAL;
+		break;
+	case ASM_RTAC_APR_CAL_TYPE:
+		ret = ASM_RTAC_APR_CAL;
+		break;
+	default:
+		pr_err("%s: invalid cal type %d!\n", __func__, cal_type);
+	}
+	return ret;
+}
+
+static int q6asm_alloc_cal(int32_t cal_type,
+				size_t data_size, void *data)
+{
+	int				ret = 0;
+	int				cal_index;
+	pr_debug("%s:\n", __func__);
+
+	cal_index = get_cal_type_index(cal_type);
+	if (cal_index < 0) {
+		pr_err("%s: could not get cal index %d!\n",
+			__func__, cal_index);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = cal_utils_alloc_cal(data_size, data,
+		cal_data[cal_index], 0, NULL);
+	if (ret < 0) {
+		pr_err("%s: cal_utils_alloc_block failed, ret = %d, cal type = %d!\n",
+			__func__, ret, cal_type);
+		ret = -EINVAL;
+		goto done;
+	}
+done:
+	return ret;
+}
+
+static int q6asm_dealloc_cal(int32_t cal_type,
+				size_t data_size, void *data)
+{
+	int				ret = 0;
+	int				cal_index;
+	pr_debug("%s:\n", __func__);
+
+	cal_index = get_cal_type_index(cal_type);
+	if (cal_index < 0) {
+		pr_err("%s: could not get cal index %d!\n",
+			__func__, cal_index);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = cal_utils_dealloc_cal(data_size, data,
+		cal_data[cal_index]);
+	if (ret < 0) {
+		pr_err("%s: cal_utils_dealloc_block failed, ret = %d, cal type = %d!\n",
+			__func__, ret, cal_type);
+		ret = -EINVAL;
+		goto done;
+	}
+done:
+	return ret;
+}
+
+static int q6asm_set_cal(int32_t cal_type,
+			size_t data_size, void *data)
+{
+	int ret = 0;
+	int cal_index;
+	pr_debug("%s:\n", __func__);
+
+	cal_index = get_cal_type_index(cal_type);
+	if (cal_index < 0) {
+		pr_err("%s: could not get cal index %d!\n",
+			__func__, cal_index);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = cal_utils_set_cal(data_size, data,
+		cal_data[cal_index], 0, NULL);
+	if (ret < 0) {
+		pr_err("%s: cal_utils_set_cal failed, ret = %d, cal type = %d!\n",
+			__func__, ret, cal_type);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (cal_index == ASM_CUSTOM_TOP_CAL) {
+		mutex_lock(&cal_data[ASM_CUSTOM_TOP_CAL]->lock);
+		set_custom_topology = 1;
+		mutex_unlock(&cal_data[ASM_CUSTOM_TOP_CAL]->lock);
+	}
+done:
+	return ret;
+}
+
+static void q6asm_delete_cal_data(void)
+{
+	pr_debug("%s:\n", __func__);
+	cal_utils_destroy_cal_types(ASM_MAX_CAL_TYPES, cal_data);
+
+	return;
+}
+
+static int q6asm_init_cal_data(void)
+{
+	int ret = 0;
+	struct cal_type_info	cal_type_info[] = {
+		{{ASM_TOPOLOGY_CAL_TYPE,
+		{NULL, NULL, NULL,
+		q6asm_set_cal, NULL, NULL} },
+		{NULL, NULL, cal_utils_match_buf_num} },
+
+		{{ASM_CUST_TOPOLOGY_CAL_TYPE,
+		{q6asm_alloc_cal, q6asm_dealloc_cal, NULL,
+		q6asm_set_cal, NULL, NULL} },
+		{NULL, q6asm_unmap_cal_memory, cal_utils_match_buf_num} },
+
+		{{ASM_AUDSTRM_CAL_TYPE,
+		{q6asm_alloc_cal, q6asm_dealloc_cal, NULL,
+		q6asm_set_cal, NULL, NULL} },
+		{NULL, q6asm_unmap_cal_memory, cal_utils_match_buf_num} },
+
+		{{ASM_RTAC_APR_CAL_TYPE,
+		{NULL, NULL, NULL, NULL, NULL, NULL} },
+		{NULL, NULL, cal_utils_match_buf_num} }
+	};
+	pr_debug("%s\n", __func__);
+
+	ret = cal_utils_create_cal_types(ASM_MAX_CAL_TYPES, cal_data,
+		cal_type_info);
+	if (ret < 0) {
+		pr_err("%s: could not create cal type! %d\n",
+			__func__, ret);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	return ret;
+err:
+	q6asm_delete_cal_data();
+	return ret;
+}
+
+static int q6asm_is_valid_session(struct apr_client_data *data, void *priv)
+{
+	struct audio_client *ac = (struct audio_client *)priv;
+	union asm_token_struct asm_token;
+
+	asm_token.token = data->token;
+	if (asm_token._token.session_id != ac->session) {
+		pr_err("%s: Invalid session[%d] rxed expected[%d]",
+			__func__, asm_token._token.session_id, ac->session);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int __init q6asm_init(void)
+{
+	int lcnt, ret;
+	pr_debug("%s:\n", __func__);
+
+	memset(session, 0, sizeof(struct audio_session) *
+		(ASM_ACTIVE_STREAMS_ALLOWED + 1));
+	for (lcnt = 0; lcnt <= ASM_ACTIVE_STREAMS_ALLOWED; lcnt++)
+		spin_lock_init(&(session[lcnt].session_lock));
+	set_custom_topology = 1;
+
+	/*setup common client used for cal mem map */
+	common_client.session = ASM_CONTROL_SESSION;
+	common_client.port[0].buf = &common_buf[0];
+	common_client.port[1].buf = &common_buf[1];
+	init_waitqueue_head(&common_client.cmd_wait);
+	init_waitqueue_head(&common_client.time_wait);
+	init_waitqueue_head(&common_client.mem_wait);
+	atomic_set(&common_client.time_flag, 1);
+	INIT_LIST_HEAD(&common_client.port[0].mem_map_handle);
+	INIT_LIST_HEAD(&common_client.port[1].mem_map_handle);
+	mutex_init(&common_client.cmd_lock);
+	for (lcnt = 0; lcnt <= OUT; lcnt++) {
+		mutex_init(&common_client.port[lcnt].lock);
+		spin_lock_init(&common_client.port[lcnt].dsp_lock);
+	}
+	atomic_set(&common_client.cmd_state, 0);
+	atomic_set(&common_client.mem_state, 0);
+
+	ret = q6asm_init_cal_data();
+	if (ret)
+		pr_err("%s: could not init cal data! ret %d\n",
+			__func__, ret);
+
+	config_debug_fs_init();
+
+	return 0;
+}
+
+static void __exit q6asm_exit(void)
+{
+	q6asm_delete_cal_data();
+}
+
+device_initcall(q6asm_init);
+__exitcall(q6asm_exit);
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/q6audio-v2.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/q6audio-v2.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/q6audio-v2.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/q6audio-v2.c	2019-10-29 09:26:26.165227859 +0100
@@ -0,0 +1,807 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/jiffies.h>
+#include <linux/uaccess.h>
+#include <linux/atomic.h>
+#include <sound/q6afe-v2.h>
+#include <sound/q6audio-v2.h>
+
+int q6audio_get_port_index(u16 port_id)
+{
+	switch (port_id) {
+	case PRIMARY_I2S_RX: return IDX_PRIMARY_I2S_RX;
+	case PRIMARY_I2S_TX: return IDX_PRIMARY_I2S_TX;
+	case AFE_PORT_ID_PRIMARY_PCM_RX:
+		return IDX_AFE_PORT_ID_PRIMARY_PCM_RX;
+	case AFE_PORT_ID_PRIMARY_PCM_TX:
+		return IDX_AFE_PORT_ID_PRIMARY_PCM_TX;
+	case AFE_PORT_ID_SECONDARY_PCM_RX:
+		return IDX_AFE_PORT_ID_SECONDARY_PCM_RX;
+	case AFE_PORT_ID_SECONDARY_PCM_TX:
+		return IDX_AFE_PORT_ID_SECONDARY_PCM_TX;
+	case AFE_PORT_ID_TERTIARY_PCM_RX:
+		return IDX_AFE_PORT_ID_TERTIARY_PCM_RX;
+	case AFE_PORT_ID_TERTIARY_PCM_TX:
+		return IDX_AFE_PORT_ID_TERTIARY_PCM_TX;
+	case AFE_PORT_ID_QUATERNARY_PCM_RX:
+		return IDX_AFE_PORT_ID_QUATERNARY_PCM_RX;
+	case AFE_PORT_ID_QUATERNARY_PCM_TX:
+		return IDX_AFE_PORT_ID_QUATERNARY_PCM_TX;
+	case SECONDARY_I2S_RX: return IDX_SECONDARY_I2S_RX;
+	case SECONDARY_I2S_TX: return IDX_SECONDARY_I2S_TX;
+	case MI2S_RX: return IDX_MI2S_RX;
+	case MI2S_TX: return IDX_MI2S_TX;
+	case HDMI_RX: return IDX_HDMI_RX;
+	case DISPLAY_PORT_RX: return IDX_DISPLAY_PORT_RX;
+	case AFE_PORT_ID_SPDIF_RX: return IDX_SPDIF_RX;
+	case RSVD_2: return IDX_RSVD_2;
+	case RSVD_3: return IDX_RSVD_3;
+	case DIGI_MIC_TX: return IDX_DIGI_MIC_TX;
+	case VOICE_RECORD_RX: return IDX_VOICE_RECORD_RX;
+	case VOICE_RECORD_TX: return IDX_VOICE_RECORD_TX;
+	case VOICE_PLAYBACK_TX: return IDX_VOICE_PLAYBACK_TX;
+	case VOICE2_PLAYBACK_TX: return IDX_VOICE2_PLAYBACK_TX;
+	case SLIMBUS_0_RX: return IDX_SLIMBUS_0_RX;
+	case SLIMBUS_0_TX: return IDX_SLIMBUS_0_TX;
+	case SLIMBUS_1_RX: return IDX_SLIMBUS_1_RX;
+	case SLIMBUS_1_TX: return IDX_SLIMBUS_1_TX;
+	case SLIMBUS_2_RX: return IDX_SLIMBUS_2_RX;
+	case SLIMBUS_2_TX: return IDX_SLIMBUS_2_TX;
+	case SLIMBUS_3_RX: return IDX_SLIMBUS_3_RX;
+	case SLIMBUS_3_TX: return IDX_SLIMBUS_3_TX;
+	case SLIMBUS_4_RX: return IDX_SLIMBUS_4_RX;
+	case SLIMBUS_4_TX: return IDX_SLIMBUS_4_TX;
+	case SLIMBUS_5_RX: return IDX_SLIMBUS_5_RX;
+	case SLIMBUS_5_TX: return IDX_SLIMBUS_5_TX;
+	case SLIMBUS_6_RX: return IDX_SLIMBUS_6_RX;
+	case SLIMBUS_6_TX: return IDX_SLIMBUS_6_TX;
+	case SLIMBUS_7_RX: return IDX_SLIMBUS_7_RX;
+	case SLIMBUS_7_TX: return IDX_SLIMBUS_7_TX;
+	case SLIMBUS_8_RX: return IDX_SLIMBUS_8_RX;
+	case SLIMBUS_8_TX: return IDX_SLIMBUS_8_TX;
+	case INT_BT_SCO_RX: return IDX_INT_BT_SCO_RX;
+	case INT_BT_SCO_TX: return IDX_INT_BT_SCO_TX;
+	case INT_BT_A2DP_RX: return IDX_INT_BT_A2DP_RX;
+	case INT_FM_RX: return IDX_INT_FM_RX;
+	case INT_FM_TX: return IDX_INT_FM_TX;
+	case RT_PROXY_PORT_001_RX: return IDX_RT_PROXY_PORT_001_RX;
+	case RT_PROXY_PORT_001_TX: return IDX_RT_PROXY_PORT_001_TX;
+	case AFE_PORT_ID_PRIMARY_MI2S_RX:
+		return IDX_AFE_PORT_ID_PRIMARY_MI2S_RX;
+	case AFE_PORT_ID_PRIMARY_MI2S_TX:
+		return IDX_AFE_PORT_ID_PRIMARY_MI2S_TX;
+	case AFE_PORT_ID_QUATERNARY_MI2S_RX:
+		return IDX_AFE_PORT_ID_QUATERNARY_MI2S_RX;
+	case AFE_PORT_ID_QUATERNARY_MI2S_TX:
+		return IDX_AFE_PORT_ID_QUATERNARY_MI2S_TX;
+	case AFE_PORT_ID_SECONDARY_MI2S_RX:
+		return IDX_AFE_PORT_ID_SECONDARY_MI2S_RX;
+	case AFE_PORT_ID_SECONDARY_MI2S_TX:
+		return IDX_AFE_PORT_ID_SECONDARY_MI2S_TX;
+	case AFE_PORT_ID_TERTIARY_MI2S_RX:
+		return IDX_AFE_PORT_ID_TERTIARY_MI2S_RX;
+	case AFE_PORT_ID_TERTIARY_MI2S_TX:
+		return IDX_AFE_PORT_ID_TERTIARY_MI2S_TX;
+	case AUDIO_PORT_ID_I2S_RX:
+		return IDX_AUDIO_PORT_ID_I2S_RX;
+	case AFE_PORT_ID_SECONDARY_MI2S_RX_SD1:
+		return IDX_AFE_PORT_ID_SECONDARY_MI2S_RX_SD1;
+	case AFE_PORT_ID_PRIMARY_TDM_RX:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_RX_0;
+	case AFE_PORT_ID_PRIMARY_TDM_TX:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_TX_0;
+	case AFE_PORT_ID_PRIMARY_TDM_RX_1:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_RX_1;
+	case AFE_PORT_ID_PRIMARY_TDM_TX_1:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_TX_1;
+	case AFE_PORT_ID_PRIMARY_TDM_RX_2:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_RX_2;
+	case AFE_PORT_ID_PRIMARY_TDM_TX_2:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_TX_2;
+	case AFE_PORT_ID_PRIMARY_TDM_RX_3:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_RX_3;
+	case AFE_PORT_ID_PRIMARY_TDM_TX_3:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_TX_3;
+	case AFE_PORT_ID_PRIMARY_TDM_RX_4:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_RX_4;
+	case AFE_PORT_ID_PRIMARY_TDM_TX_4:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_TX_4;
+	case AFE_PORT_ID_PRIMARY_TDM_RX_5:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_RX_5;
+	case AFE_PORT_ID_PRIMARY_TDM_TX_5:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_TX_5;
+	case AFE_PORT_ID_PRIMARY_TDM_RX_6:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_RX_6;
+	case AFE_PORT_ID_PRIMARY_TDM_TX_6:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_TX_6;
+	case AFE_PORT_ID_PRIMARY_TDM_RX_7:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_RX_7;
+	case AFE_PORT_ID_PRIMARY_TDM_TX_7:
+		return IDX_AFE_PORT_ID_PRIMARY_TDM_TX_7;
+	case AFE_PORT_ID_SECONDARY_TDM_RX:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_RX_0;
+	case AFE_PORT_ID_SECONDARY_TDM_TX:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_TX_0;
+	case AFE_PORT_ID_SECONDARY_TDM_RX_1:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_RX_1;
+	case AFE_PORT_ID_SECONDARY_TDM_TX_1:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_TX_1;
+	case AFE_PORT_ID_SECONDARY_TDM_RX_2:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_RX_2;
+	case AFE_PORT_ID_SECONDARY_TDM_TX_2:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_TX_2;
+	case AFE_PORT_ID_SECONDARY_TDM_RX_3:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_RX_3;
+	case AFE_PORT_ID_SECONDARY_TDM_TX_3:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_TX_3;
+	case AFE_PORT_ID_SECONDARY_TDM_RX_4:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_RX_4;
+	case AFE_PORT_ID_SECONDARY_TDM_TX_4:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_TX_4;
+	case AFE_PORT_ID_SECONDARY_TDM_RX_5:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_RX_5;
+	case AFE_PORT_ID_SECONDARY_TDM_TX_5:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_TX_5;
+	case AFE_PORT_ID_SECONDARY_TDM_RX_6:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_RX_6;
+	case AFE_PORT_ID_SECONDARY_TDM_TX_6:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_TX_6;
+	case AFE_PORT_ID_SECONDARY_TDM_RX_7:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_RX_7;
+	case AFE_PORT_ID_SECONDARY_TDM_TX_7:
+		return IDX_AFE_PORT_ID_SECONDARY_TDM_TX_7;
+	case AFE_PORT_ID_TERTIARY_TDM_RX:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_RX_0;
+	case AFE_PORT_ID_TERTIARY_TDM_TX:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_TX_0;
+	case AFE_PORT_ID_TERTIARY_TDM_RX_1:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_RX_1;
+	case AFE_PORT_ID_TERTIARY_TDM_TX_1:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_TX_1;
+	case AFE_PORT_ID_TERTIARY_TDM_RX_2:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_RX_2;
+	case AFE_PORT_ID_TERTIARY_TDM_TX_2:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_TX_2;
+	case AFE_PORT_ID_TERTIARY_TDM_RX_3:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_RX_3;
+	case AFE_PORT_ID_TERTIARY_TDM_TX_3:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_TX_3;
+	case AFE_PORT_ID_TERTIARY_TDM_RX_4:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_RX_4;
+	case AFE_PORT_ID_TERTIARY_TDM_TX_4:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_TX_4;
+	case AFE_PORT_ID_TERTIARY_TDM_RX_5:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_RX_5;
+	case AFE_PORT_ID_TERTIARY_TDM_TX_5:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_TX_5;
+	case AFE_PORT_ID_TERTIARY_TDM_RX_6:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_RX_6;
+	case AFE_PORT_ID_TERTIARY_TDM_TX_6:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_TX_6;
+	case AFE_PORT_ID_TERTIARY_TDM_RX_7:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_RX_7;
+	case AFE_PORT_ID_TERTIARY_TDM_TX_7:
+		return IDX_AFE_PORT_ID_TERTIARY_TDM_TX_7;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_RX_0;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_TX_0;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_1:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_RX_1;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_1:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_TX_1;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_2:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_RX_2;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_2:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_TX_2;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_3:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_RX_3;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_3:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_TX_3;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_4:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_RX_4;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_4:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_TX_4;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_5:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_RX_5;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_5:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_TX_5;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_6:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_RX_6;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_6:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_TX_6;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_7:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_RX_7;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_7:
+		return IDX_AFE_PORT_ID_QUATERNARY_TDM_TX_7;
+	case AFE_PORT_ID_SENARY_MI2S_TX:
+		return IDX_AFE_PORT_ID_SENARY_MI2S_TX;
+	case AFE_PORT_ID_USB_RX:
+		return IDX_AFE_PORT_ID_USB_RX;
+	case AFE_PORT_ID_USB_TX:
+		return IDX_AFE_PORT_ID_USB_TX;
+	case AFE_PORT_ID_INT0_MI2S_RX:
+		return IDX_AFE_PORT_ID_INT0_MI2S_RX;
+	case AFE_PORT_ID_INT0_MI2S_TX:
+		return IDX_AFE_PORT_ID_INT0_MI2S_TX;
+	case AFE_PORT_ID_INT1_MI2S_RX:
+		return IDX_AFE_PORT_ID_INT1_MI2S_RX;
+	case AFE_PORT_ID_INT1_MI2S_TX:
+		return IDX_AFE_PORT_ID_INT1_MI2S_TX;
+	case AFE_PORT_ID_INT2_MI2S_RX:
+		return IDX_AFE_PORT_ID_INT2_MI2S_RX;
+	case AFE_PORT_ID_INT2_MI2S_TX:
+		return IDX_AFE_PORT_ID_INT2_MI2S_TX;
+	case AFE_PORT_ID_INT3_MI2S_RX:
+		return IDX_AFE_PORT_ID_INT3_MI2S_RX;
+	case AFE_PORT_ID_INT3_MI2S_TX:
+		return IDX_AFE_PORT_ID_INT3_MI2S_TX;
+	case AFE_PORT_ID_INT4_MI2S_RX:
+		return IDX_AFE_PORT_ID_INT4_MI2S_RX;
+	case AFE_PORT_ID_INT4_MI2S_TX:
+		return IDX_AFE_PORT_ID_INT4_MI2S_TX;
+	case AFE_PORT_ID_INT5_MI2S_RX:
+		return IDX_AFE_PORT_ID_INT5_MI2S_RX;
+	case AFE_PORT_ID_INT5_MI2S_TX:
+		return IDX_AFE_PORT_ID_INT5_MI2S_TX;
+	case AFE_PORT_ID_INT6_MI2S_RX:
+		return IDX_AFE_PORT_ID_INT6_MI2S_RX;
+	case AFE_PORT_ID_INT6_MI2S_TX:
+		return IDX_AFE_PORT_ID_INT6_MI2S_TX;
+	default: return -EINVAL;
+	}
+}
+
+int q6audio_get_port_id(u16 port_id)
+{
+	switch (port_id) {
+	case PRIMARY_I2S_RX: return PRIMARY_I2S_RX;
+	case PRIMARY_I2S_TX: return PRIMARY_I2S_TX;
+	case AFE_PORT_ID_PRIMARY_PCM_RX:
+			return AFE_PORT_ID_PRIMARY_PCM_RX;
+	case AFE_PORT_ID_PRIMARY_PCM_TX:
+			return AFE_PORT_ID_PRIMARY_PCM_TX;
+	case AFE_PORT_ID_SECONDARY_PCM_RX:
+			return AFE_PORT_ID_SECONDARY_PCM_RX;
+	case AFE_PORT_ID_SECONDARY_PCM_TX:
+			return AFE_PORT_ID_SECONDARY_PCM_TX;
+	case AFE_PORT_ID_TERTIARY_PCM_RX:
+			return AFE_PORT_ID_TERTIARY_PCM_RX;
+	case AFE_PORT_ID_TERTIARY_PCM_TX:
+			return AFE_PORT_ID_TERTIARY_PCM_TX;
+	case AFE_PORT_ID_QUATERNARY_PCM_RX:
+			return AFE_PORT_ID_QUATERNARY_PCM_RX;
+	case AFE_PORT_ID_QUATERNARY_PCM_TX:
+			return AFE_PORT_ID_QUATERNARY_PCM_TX;
+	case SECONDARY_I2S_RX: return AFE_PORT_ID_SECONDARY_MI2S_RX;
+	case SECONDARY_I2S_TX: return AFE_PORT_ID_SECONDARY_MI2S_TX;
+	case MI2S_RX: return AFE_PORT_ID_PRIMARY_MI2S_RX;
+	case MI2S_TX: return AFE_PORT_ID_PRIMARY_MI2S_TX;
+	case HDMI_RX: return AFE_PORT_ID_MULTICHAN_HDMI_RX;
+	case DISPLAY_PORT_RX:
+			     return AFE_PORT_ID_HDMI_OVER_DP_RX;
+	case AFE_PORT_ID_SPDIF_RX: return AFE_PORT_ID_SPDIF_RX;
+	case RSVD_2: return IDX_RSVD_2;
+	case RSVD_3: return IDX_RSVD_3;
+	case DIGI_MIC_TX: return AFE_PORT_ID_DIGITAL_MIC_TX;
+	case VOICE_RECORD_RX: return AFE_PORT_ID_VOICE_RECORD_RX;
+	case VOICE_RECORD_TX: return AFE_PORT_ID_VOICE_RECORD_TX;
+	case VOICE_PLAYBACK_TX: return AFE_PORT_ID_VOICE_PLAYBACK_TX;
+	case VOICE2_PLAYBACK_TX: return AFE_PORT_ID_VOICE2_PLAYBACK_TX;
+	case SLIMBUS_0_RX: return AFE_PORT_ID_SLIMBUS_MULTI_CHAN_0_RX;
+	case SLIMBUS_0_TX: return AFE_PORT_ID_SLIMBUS_MULTI_CHAN_0_TX;
+	case SLIMBUS_1_RX: return AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_RX;
+	case SLIMBUS_1_TX: return AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_TX;
+	case SLIMBUS_2_RX: return AFE_PORT_ID_SLIMBUS_MULTI_CHAN_2_RX;
+	case SLIMBUS_2_TX: return AFE_PORT_ID_SLIMBUS_MULTI_CHAN_2_TX;
+	case SLIMBUS_3_RX: return AFE_PORT_ID_SLIMBUS_MULTI_CHAN_3_RX;
+	case SLIMBUS_3_TX: return AFE_PORT_ID_SLIMBUS_MULTI_CHAN_3_TX;
+	case SLIMBUS_4_RX: return AFE_PORT_ID_SLIMBUS_MULTI_CHAN_4_RX;
+	case SLIMBUS_4_TX: return AFE_PORT_ID_SLIMBUS_MULTI_CHAN_4_TX;
+	case SLIMBUS_5_RX: return AFE_PORT_ID_SLIMBUS_MULTI_CHAN_5_RX;
+	case SLIMBUS_5_TX: return AFE_PORT_ID_SLIMBUS_MULTI_CHAN_5_TX;
+	case SLIMBUS_6_RX: return AFE_PORT_ID_SLIMBUS_MULTI_CHAN_6_RX;
+	case SLIMBUS_6_TX: return AFE_PORT_ID_SLIMBUS_MULTI_CHAN_6_TX;
+	case SLIMBUS_7_RX: return AFE_PORT_ID_SLIMBUS_MULTI_CHAN_7_RX;
+	case SLIMBUS_7_TX: return AFE_PORT_ID_SLIMBUS_MULTI_CHAN_7_TX;
+	case SLIMBUS_8_RX: return AFE_PORT_ID_SLIMBUS_MULTI_CHAN_8_RX;
+	case SLIMBUS_8_TX: return AFE_PORT_ID_SLIMBUS_MULTI_CHAN_8_TX;
+	case INT_BT_SCO_RX: return AFE_PORT_ID_INTERNAL_BT_SCO_RX;
+	case INT_BT_SCO_TX: return AFE_PORT_ID_INTERNAL_BT_SCO_TX;
+	case INT_BT_A2DP_RX: return AFE_PORT_ID_INTERNAL_BT_A2DP_RX;
+	case INT_FM_RX: return AFE_PORT_ID_INTERNAL_FM_RX;
+	case INT_FM_TX: return AFE_PORT_ID_INTERNAL_FM_TX;
+	case RT_PROXY_PORT_001_RX: return AFE_PORT_ID_RT_PROXY_PORT_001_RX;
+	case RT_PROXY_PORT_001_TX: return AFE_PORT_ID_RT_PROXY_PORT_001_TX;
+	case AFE_PORT_ID_PRIMARY_MI2S_RX:
+			     return AFE_PORT_ID_PRIMARY_MI2S_RX;
+	case AFE_PORT_ID_PRIMARY_MI2S_TX:
+			     return AFE_PORT_ID_PRIMARY_MI2S_TX;
+	case AFE_PORT_ID_QUATERNARY_MI2S_RX:
+			     return AFE_PORT_ID_QUATERNARY_MI2S_RX;
+	case AFE_PORT_ID_QUATERNARY_MI2S_TX:
+			     return AFE_PORT_ID_QUATERNARY_MI2S_TX;
+	case AFE_PORT_ID_SECONDARY_MI2S_RX:
+			     return AFE_PORT_ID_SECONDARY_MI2S_RX;
+	case AFE_PORT_ID_SECONDARY_MI2S_TX:
+			     return AFE_PORT_ID_SECONDARY_MI2S_TX;
+	case AFE_PORT_ID_TERTIARY_MI2S_RX:
+			     return AFE_PORT_ID_TERTIARY_MI2S_RX;
+	case AFE_PORT_ID_TERTIARY_MI2S_TX:
+			     return AFE_PORT_ID_TERTIARY_MI2S_TX;
+	case AUDIO_PORT_ID_I2S_RX:
+			return AUDIO_PORT_ID_I2S_RX;
+	case AFE_PORT_ID_SECONDARY_MI2S_RX_SD1:
+			     return AFE_PORT_ID_SECONDARY_MI2S_RX_SD1;
+	case AFE_PORT_ID_PRIMARY_TDM_RX:
+		return AFE_PORT_ID_PRIMARY_TDM_RX;
+	case AFE_PORT_ID_PRIMARY_TDM_TX:
+		return AFE_PORT_ID_PRIMARY_TDM_TX;
+	case AFE_PORT_ID_PRIMARY_TDM_RX_1:
+		return AFE_PORT_ID_PRIMARY_TDM_RX_1;
+	case AFE_PORT_ID_PRIMARY_TDM_TX_1:
+		return AFE_PORT_ID_PRIMARY_TDM_TX_1;
+	case AFE_PORT_ID_PRIMARY_TDM_RX_2:
+		return AFE_PORT_ID_PRIMARY_TDM_RX_2;
+	case AFE_PORT_ID_PRIMARY_TDM_TX_2:
+		return AFE_PORT_ID_PRIMARY_TDM_TX_2;
+	case AFE_PORT_ID_PRIMARY_TDM_RX_3:
+		return AFE_PORT_ID_PRIMARY_TDM_RX_3;
+	case AFE_PORT_ID_PRIMARY_TDM_TX_3:
+		return AFE_PORT_ID_PRIMARY_TDM_TX_3;
+	case AFE_PORT_ID_PRIMARY_TDM_RX_4:
+		return AFE_PORT_ID_PRIMARY_TDM_RX_4;
+	case AFE_PORT_ID_PRIMARY_TDM_TX_4:
+		return AFE_PORT_ID_PRIMARY_TDM_TX_4;
+	case AFE_PORT_ID_PRIMARY_TDM_RX_5:
+		return AFE_PORT_ID_PRIMARY_TDM_RX_5;
+	case AFE_PORT_ID_PRIMARY_TDM_TX_5:
+		return AFE_PORT_ID_PRIMARY_TDM_TX_5;
+	case AFE_PORT_ID_PRIMARY_TDM_RX_6:
+		return AFE_PORT_ID_PRIMARY_TDM_RX_6;
+	case AFE_PORT_ID_PRIMARY_TDM_TX_6:
+		return AFE_PORT_ID_PRIMARY_TDM_TX_6;
+	case AFE_PORT_ID_PRIMARY_TDM_RX_7:
+		return AFE_PORT_ID_PRIMARY_TDM_RX_7;
+	case AFE_PORT_ID_PRIMARY_TDM_TX_7:
+		return AFE_PORT_ID_PRIMARY_TDM_TX_7;
+	case AFE_PORT_ID_SECONDARY_TDM_RX:
+		return AFE_PORT_ID_SECONDARY_TDM_RX;
+	case AFE_PORT_ID_SECONDARY_TDM_TX:
+		return AFE_PORT_ID_SECONDARY_TDM_TX;
+	case AFE_PORT_ID_SECONDARY_TDM_RX_1:
+		return AFE_PORT_ID_SECONDARY_TDM_RX_1;
+	case AFE_PORT_ID_SECONDARY_TDM_TX_1:
+		return AFE_PORT_ID_SECONDARY_TDM_TX_1;
+	case AFE_PORT_ID_SECONDARY_TDM_RX_2:
+		return AFE_PORT_ID_SECONDARY_TDM_RX_2;
+	case AFE_PORT_ID_SECONDARY_TDM_TX_2:
+		return AFE_PORT_ID_SECONDARY_TDM_TX_2;
+	case AFE_PORT_ID_SECONDARY_TDM_RX_3:
+		return AFE_PORT_ID_SECONDARY_TDM_RX_3;
+	case AFE_PORT_ID_SECONDARY_TDM_TX_3:
+		return AFE_PORT_ID_SECONDARY_TDM_TX_3;
+	case AFE_PORT_ID_SECONDARY_TDM_RX_4:
+		return AFE_PORT_ID_SECONDARY_TDM_RX_4;
+	case AFE_PORT_ID_SECONDARY_TDM_TX_4:
+		return AFE_PORT_ID_SECONDARY_TDM_TX_4;
+	case AFE_PORT_ID_SECONDARY_TDM_RX_5:
+		return AFE_PORT_ID_SECONDARY_TDM_RX_5;
+	case AFE_PORT_ID_SECONDARY_TDM_TX_5:
+		return AFE_PORT_ID_SECONDARY_TDM_TX_5;
+	case AFE_PORT_ID_SECONDARY_TDM_RX_6:
+		return AFE_PORT_ID_SECONDARY_TDM_RX_6;
+	case AFE_PORT_ID_SECONDARY_TDM_TX_6:
+		return AFE_PORT_ID_SECONDARY_TDM_TX_6;
+	case AFE_PORT_ID_SECONDARY_TDM_RX_7:
+		return AFE_PORT_ID_SECONDARY_TDM_RX_7;
+	case AFE_PORT_ID_SECONDARY_TDM_TX_7:
+		return AFE_PORT_ID_SECONDARY_TDM_TX_7;
+	case AFE_PORT_ID_TERTIARY_TDM_RX:
+		return AFE_PORT_ID_TERTIARY_TDM_RX;
+	case AFE_PORT_ID_TERTIARY_TDM_TX:
+		return AFE_PORT_ID_TERTIARY_TDM_TX;
+	case AFE_PORT_ID_TERTIARY_TDM_RX_1:
+		return AFE_PORT_ID_TERTIARY_TDM_RX_1;
+	case AFE_PORT_ID_TERTIARY_TDM_TX_1:
+		return AFE_PORT_ID_TERTIARY_TDM_TX_1;
+	case AFE_PORT_ID_TERTIARY_TDM_RX_2:
+		return AFE_PORT_ID_TERTIARY_TDM_RX_2;
+	case AFE_PORT_ID_TERTIARY_TDM_TX_2:
+		return AFE_PORT_ID_TERTIARY_TDM_TX_2;
+	case AFE_PORT_ID_TERTIARY_TDM_RX_3:
+		return AFE_PORT_ID_TERTIARY_TDM_RX_3;
+	case AFE_PORT_ID_TERTIARY_TDM_TX_3:
+		return AFE_PORT_ID_TERTIARY_TDM_TX_3;
+	case AFE_PORT_ID_TERTIARY_TDM_RX_4:
+		return AFE_PORT_ID_TERTIARY_TDM_RX_4;
+	case AFE_PORT_ID_TERTIARY_TDM_TX_4:
+		return AFE_PORT_ID_TERTIARY_TDM_TX_4;
+	case AFE_PORT_ID_TERTIARY_TDM_RX_5:
+		return AFE_PORT_ID_TERTIARY_TDM_RX_5;
+	case AFE_PORT_ID_TERTIARY_TDM_TX_5:
+		return AFE_PORT_ID_TERTIARY_TDM_TX_5;
+	case AFE_PORT_ID_TERTIARY_TDM_RX_6:
+		return AFE_PORT_ID_TERTIARY_TDM_RX_6;
+	case AFE_PORT_ID_TERTIARY_TDM_TX_6:
+		return AFE_PORT_ID_TERTIARY_TDM_TX_6;
+	case AFE_PORT_ID_TERTIARY_TDM_RX_7:
+		return AFE_PORT_ID_TERTIARY_TDM_RX_7;
+	case AFE_PORT_ID_TERTIARY_TDM_TX_7:
+		return AFE_PORT_ID_TERTIARY_TDM_TX_7;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX:
+		return AFE_PORT_ID_QUATERNARY_TDM_RX;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX:
+		return AFE_PORT_ID_QUATERNARY_TDM_TX;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_1:
+		return AFE_PORT_ID_QUATERNARY_TDM_RX_1;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_1:
+		return AFE_PORT_ID_QUATERNARY_TDM_TX_1;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_2:
+		return AFE_PORT_ID_QUATERNARY_TDM_RX_2;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_2:
+		return AFE_PORT_ID_QUATERNARY_TDM_TX_2;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_3:
+		return AFE_PORT_ID_QUATERNARY_TDM_RX_3;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_3:
+		return AFE_PORT_ID_QUATERNARY_TDM_TX_3;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_4:
+		return AFE_PORT_ID_QUATERNARY_TDM_RX_4;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_4:
+		return AFE_PORT_ID_QUATERNARY_TDM_TX_4;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_5:
+		return AFE_PORT_ID_QUATERNARY_TDM_RX_5;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_5:
+		return AFE_PORT_ID_QUATERNARY_TDM_TX_5;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_6:
+		return AFE_PORT_ID_QUATERNARY_TDM_RX_6;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_6:
+		return AFE_PORT_ID_QUATERNARY_TDM_TX_6;
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_7:
+		return AFE_PORT_ID_QUATERNARY_TDM_RX_7;
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_7:
+		return AFE_PORT_ID_QUATERNARY_TDM_TX_7;
+	case AFE_PORT_ID_SENARY_MI2S_TX:
+		return AFE_PORT_ID_SENARY_MI2S_TX;
+	case AFE_PORT_ID_USB_RX:
+		 return AFE_PORT_ID_USB_RX;
+	case AFE_PORT_ID_USB_TX:
+		 return AFE_PORT_ID_USB_TX;
+	case AFE_PORT_ID_INT0_MI2S_RX:
+		return AFE_PORT_ID_INT0_MI2S_RX;
+	case AFE_PORT_ID_INT0_MI2S_TX:
+		return AFE_PORT_ID_INT0_MI2S_TX;
+	case AFE_PORT_ID_INT1_MI2S_RX:
+		return AFE_PORT_ID_INT1_MI2S_RX;
+	case AFE_PORT_ID_INT1_MI2S_TX:
+		return AFE_PORT_ID_INT1_MI2S_TX;
+	case AFE_PORT_ID_INT2_MI2S_RX:
+		return AFE_PORT_ID_INT2_MI2S_RX;
+	case AFE_PORT_ID_INT2_MI2S_TX:
+		return AFE_PORT_ID_INT2_MI2S_TX;
+	case AFE_PORT_ID_INT3_MI2S_RX:
+		return AFE_PORT_ID_INT3_MI2S_RX;
+	case AFE_PORT_ID_INT3_MI2S_TX:
+		return AFE_PORT_ID_INT3_MI2S_TX;
+	case AFE_PORT_ID_INT4_MI2S_RX:
+		return AFE_PORT_ID_INT4_MI2S_RX;
+	case AFE_PORT_ID_INT4_MI2S_TX:
+		return AFE_PORT_ID_INT4_MI2S_TX;
+	case AFE_PORT_ID_INT5_MI2S_RX:
+		return AFE_PORT_ID_INT5_MI2S_RX;
+	case AFE_PORT_ID_INT5_MI2S_TX:
+		return AFE_PORT_ID_INT5_MI2S_TX;
+	case AFE_PORT_ID_INT6_MI2S_RX:
+		return AFE_PORT_ID_INT6_MI2S_RX;
+	case AFE_PORT_ID_INT6_MI2S_TX:
+		return AFE_PORT_ID_INT6_MI2S_TX;
+	default:
+		pr_warn("%s: Invalid port_id %d\n", __func__, port_id);
+		return -EINVAL;
+	}
+}
+int q6audio_convert_virtual_to_portid(u16 port_id)
+{
+	int ret;
+
+	/* if port_id is virtual, convert to physical..
+	 * if port_id is already physical, return physical
+	 */
+	if (q6audio_validate_port(port_id) < 0) {
+		if (port_id == RT_PROXY_DAI_001_RX ||
+			port_id == RT_PROXY_DAI_001_TX ||
+			port_id == RT_PROXY_DAI_002_RX ||
+			port_id == RT_PROXY_DAI_002_TX)
+			ret = VIRTUAL_ID_TO_PORTID(port_id);
+		else
+			ret = -EINVAL;
+	} else
+		ret = port_id;
+
+	return ret;
+}
+
+int q6audio_is_digital_pcm_interface(u16 port_id)
+{
+	int ret = 0;
+
+	switch (port_id) {
+	case PRIMARY_I2S_RX:
+	case PRIMARY_I2S_TX:
+	case AFE_PORT_ID_PRIMARY_PCM_RX:
+	case AFE_PORT_ID_PRIMARY_PCM_TX:
+	case AFE_PORT_ID_SECONDARY_PCM_RX:
+	case AFE_PORT_ID_SECONDARY_PCM_TX:
+	case AFE_PORT_ID_TERTIARY_PCM_RX:
+	case AFE_PORT_ID_TERTIARY_PCM_TX:
+	case AFE_PORT_ID_QUATERNARY_PCM_RX:
+	case AFE_PORT_ID_QUATERNARY_PCM_TX:
+	case SECONDARY_I2S_RX:
+	case SECONDARY_I2S_TX:
+	case MI2S_RX:
+	case MI2S_TX:
+	case AFE_PORT_ID_TERTIARY_MI2S_TX:
+	case AFE_PORT_ID_TERTIARY_MI2S_RX:
+	case AFE_PORT_ID_QUATERNARY_MI2S_RX:
+	case AFE_PORT_ID_QUATERNARY_MI2S_TX:
+	case AFE_PORT_ID_PRIMARY_MI2S_RX:
+	case AFE_PORT_ID_PRIMARY_MI2S_TX:
+	case AFE_PORT_ID_SECONDARY_MI2S_RX:
+	case AFE_PORT_ID_SECONDARY_MI2S_TX:
+	case AUDIO_PORT_ID_I2S_RX:
+	case AFE_PORT_ID_SECONDARY_MI2S_RX_SD1:
+	case AFE_PORT_ID_PRIMARY_TDM_RX:
+	case AFE_PORT_ID_PRIMARY_TDM_TX:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_1:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_1:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_2:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_2:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_3:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_3:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_4:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_4:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_5:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_5:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_6:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_6:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_7:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_7:
+	case AFE_PORT_ID_SECONDARY_TDM_RX:
+	case AFE_PORT_ID_SECONDARY_TDM_TX:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_1:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_1:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_2:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_2:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_3:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_3:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_4:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_4:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_5:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_5:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_6:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_6:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_7:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_7:
+	case AFE_PORT_ID_TERTIARY_TDM_RX:
+	case AFE_PORT_ID_TERTIARY_TDM_TX:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_1:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_1:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_2:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_2:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_3:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_3:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_4:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_4:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_5:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_5:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_6:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_6:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_7:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_7:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_1:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_1:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_2:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_2:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_3:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_3:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_4:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_4:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_5:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_5:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_6:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_6:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_7:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_7:
+	case AFE_PORT_ID_SENARY_MI2S_TX:
+	case AFE_PORT_ID_INT0_MI2S_RX:
+	case AFE_PORT_ID_INT0_MI2S_TX:
+	case AFE_PORT_ID_INT1_MI2S_RX:
+	case AFE_PORT_ID_INT1_MI2S_TX:
+	case AFE_PORT_ID_INT2_MI2S_RX:
+	case AFE_PORT_ID_INT2_MI2S_TX:
+	case AFE_PORT_ID_INT3_MI2S_RX:
+	case AFE_PORT_ID_INT3_MI2S_TX:
+	case AFE_PORT_ID_INT4_MI2S_RX:
+	case AFE_PORT_ID_INT4_MI2S_TX:
+	case AFE_PORT_ID_INT5_MI2S_RX:
+	case AFE_PORT_ID_INT5_MI2S_TX:
+	case AFE_PORT_ID_INT6_MI2S_RX:
+	case AFE_PORT_ID_INT6_MI2S_TX:
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+int q6audio_validate_port(u16 port_id)
+{
+	int ret;
+
+	switch (port_id) {
+	case PRIMARY_I2S_RX:
+	case PRIMARY_I2S_TX:
+	case AFE_PORT_ID_PRIMARY_PCM_RX:
+	case AFE_PORT_ID_PRIMARY_PCM_TX:
+	case AFE_PORT_ID_SECONDARY_PCM_RX:
+	case AFE_PORT_ID_SECONDARY_PCM_TX:
+	case AFE_PORT_ID_TERTIARY_PCM_RX:
+	case AFE_PORT_ID_TERTIARY_PCM_TX:
+	case AFE_PORT_ID_QUATERNARY_PCM_RX:
+	case AFE_PORT_ID_QUATERNARY_PCM_TX:
+	case SECONDARY_I2S_RX:
+	case SECONDARY_I2S_TX:
+	case MI2S_RX:
+	case MI2S_TX:
+	case HDMI_RX:
+	case DISPLAY_PORT_RX:
+	case RSVD_2:
+	case RSVD_3:
+	case DIGI_MIC_TX:
+	case VOICE_RECORD_RX:
+	case VOICE_RECORD_TX:
+	case VOICE_PLAYBACK_TX:
+	case VOICE2_PLAYBACK_TX:
+	case SLIMBUS_0_RX:
+	case SLIMBUS_0_TX:
+	case SLIMBUS_1_RX:
+	case SLIMBUS_1_TX:
+	case SLIMBUS_2_RX:
+	case SLIMBUS_2_TX:
+	case SLIMBUS_3_RX:
+	case SLIMBUS_3_TX:
+	case SLIMBUS_4_RX:
+	case SLIMBUS_4_TX:
+	case SLIMBUS_5_RX:
+	case SLIMBUS_5_TX:
+	case SLIMBUS_6_RX:
+	case SLIMBUS_6_TX:
+	case SLIMBUS_7_RX:
+	case SLIMBUS_7_TX:
+	case SLIMBUS_8_RX:
+	case SLIMBUS_8_TX:
+	case INT_BT_SCO_RX:
+	case INT_BT_SCO_TX:
+	case INT_BT_A2DP_RX:
+	case INT_FM_RX:
+	case INT_FM_TX:
+	case RT_PROXY_PORT_001_RX:
+	case RT_PROXY_PORT_001_TX:
+	case AFE_PORT_ID_PRIMARY_MI2S_RX:
+	case AFE_PORT_ID_PRIMARY_MI2S_TX:
+	case AFE_PORT_ID_QUATERNARY_MI2S_RX:
+	case AFE_PORT_ID_QUATERNARY_MI2S_TX:
+	case AFE_PORT_ID_SECONDARY_MI2S_RX:
+	case AFE_PORT_ID_SECONDARY_MI2S_TX:
+	case AFE_PORT_ID_SPDIF_RX:
+	case AFE_PORT_ID_TERTIARY_MI2S_RX:
+	case AFE_PORT_ID_TERTIARY_MI2S_TX:
+	case AFE_PORT_ID_SECONDARY_MI2S_RX_SD1:
+	case AFE_PORT_ID_PRIMARY_TDM_RX:
+	case AFE_PORT_ID_PRIMARY_TDM_TX:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_1:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_1:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_2:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_2:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_3:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_3:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_4:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_4:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_5:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_5:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_6:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_6:
+	case AFE_PORT_ID_PRIMARY_TDM_RX_7:
+	case AFE_PORT_ID_PRIMARY_TDM_TX_7:
+	case AFE_PORT_ID_SECONDARY_TDM_RX:
+	case AFE_PORT_ID_SECONDARY_TDM_TX:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_1:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_1:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_2:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_2:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_3:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_3:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_4:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_4:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_5:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_5:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_6:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_6:
+	case AFE_PORT_ID_SECONDARY_TDM_RX_7:
+	case AFE_PORT_ID_SECONDARY_TDM_TX_7:
+	case AFE_PORT_ID_TERTIARY_TDM_RX:
+	case AFE_PORT_ID_TERTIARY_TDM_TX:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_1:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_1:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_2:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_2:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_3:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_3:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_4:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_4:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_5:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_5:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_6:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_6:
+	case AFE_PORT_ID_TERTIARY_TDM_RX_7:
+	case AFE_PORT_ID_TERTIARY_TDM_TX_7:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_1:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_1:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_2:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_2:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_3:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_3:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_4:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_4:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_5:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_5:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_6:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_6:
+	case AFE_PORT_ID_QUATERNARY_TDM_RX_7:
+	case AFE_PORT_ID_QUATERNARY_TDM_TX_7:
+	case AFE_PORT_ID_SENARY_MI2S_TX:
+	case AFE_PORT_ID_USB_RX:
+	case AFE_PORT_ID_USB_TX:
+	case AFE_PORT_ID_INT0_MI2S_RX:
+	case AFE_PORT_ID_INT0_MI2S_TX:
+	case AFE_PORT_ID_INT1_MI2S_RX:
+	case AFE_PORT_ID_INT1_MI2S_TX:
+	case AFE_PORT_ID_INT2_MI2S_RX:
+	case AFE_PORT_ID_INT2_MI2S_TX:
+	case AFE_PORT_ID_INT3_MI2S_RX:
+	case AFE_PORT_ID_INT3_MI2S_TX:
+	case AFE_PORT_ID_INT4_MI2S_RX:
+	case AFE_PORT_ID_INT4_MI2S_TX:
+	case AFE_PORT_ID_INT5_MI2S_RX:
+	case AFE_PORT_ID_INT5_MI2S_TX:
+	case AFE_PORT_ID_INT6_MI2S_RX:
+	case AFE_PORT_ID_INT6_MI2S_TX:
+	{
+		ret = 0;
+		break;
+	}
+
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/q6core.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/q6core.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/q6core.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/q6core.c	2019-10-29 09:26:26.165227859 +0100
@@ -0,0 +1,914 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/qdsp6v2/apr.h>
+#include <soc/qcom/smd.h>
+#include <sound/q6core.h>
+#include <sound/audio_cal_utils.h>
+
+#define TIMEOUT_MS 1000
+/*
+ * AVS bring up in the modem is optimitized for the new
+ * Sub System Restart design and 100 milliseconds timeout
+ * is sufficient to make sure the Q6 will be ready.
+ */
+#define Q6_READY_TIMEOUT_MS 100
+
+enum {
+	META_CAL,
+	CUST_TOP_CAL,
+	CORE_MAX_CAL
+};
+
+struct q6core_str {
+	struct apr_svc *core_handle_q;
+	wait_queue_head_t bus_bw_req_wait;
+	wait_queue_head_t cmd_req_wait;
+	u32 bus_bw_resp_received;
+	enum cmd_flags {
+		FLAG_NONE,
+		FLAG_CMDRSP_LICENSE_RESULT
+	} cmd_resp_received_flag;
+	struct mutex cmd_lock;
+	union {
+		struct avcs_cmdrsp_get_license_validation_result
+						cmdrsp_license_result;
+	} cmd_resp_payload;
+	u32 param;
+	struct cal_type_data *cal_data[CORE_MAX_CAL];
+	uint32_t mem_map_cal_handle;
+	int32_t adsp_status;
+};
+
+static struct q6core_str q6core_lcl;
+
+struct generic_get_data_ {
+	int valid;
+	int size_in_ints;
+	int ints[];
+};
+static struct generic_get_data_ *generic_get_data;
+
+static int32_t aprv2_core_fn_q(struct apr_client_data *data, void *priv)
+{
+	uint32_t *payload1;
+
+	if (data == NULL) {
+		pr_err("%s: data argument is null\n", __func__);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: core msg: payload len = %u, apr resp opcode = 0x%x\n",
+		__func__,
+		data->payload_size, data->opcode);
+
+	switch (data->opcode) {
+
+	case APR_BASIC_RSP_RESULT:{
+
+		if (data->payload_size == 0) {
+			pr_err("%s: APR_BASIC_RSP_RESULT No Payload ",
+					__func__);
+			return 0;
+		}
+
+		payload1 = data->payload;
+
+		switch (payload1[0]) {
+
+		case AVCS_CMD_SHARED_MEM_UNMAP_REGIONS:
+			pr_debug("%s: Cmd = AVCS_CMD_SHARED_MEM_UNMAP_REGIONS status[0x%x]\n",
+				__func__, payload1[1]);
+			q6core_lcl.bus_bw_resp_received = 1;
+			wake_up(&q6core_lcl.bus_bw_req_wait);
+			break;
+		case AVCS_CMD_SHARED_MEM_MAP_REGIONS:
+			pr_debug("%s: Cmd = AVCS_CMD_SHARED_MEM_MAP_REGIONS status[0x%x]\n",
+				__func__, payload1[1]);
+			q6core_lcl.bus_bw_resp_received = 1;
+			wake_up(&q6core_lcl.bus_bw_req_wait);
+			break;
+		case AVCS_CMD_REGISTER_TOPOLOGIES:
+			pr_debug("%s: Cmd = AVCS_CMD_REGISTER_TOPOLOGIES status[0x%x]\n",
+				__func__, payload1[1]);
+			/* -ADSP status to match Linux error standard */
+			q6core_lcl.adsp_status = -payload1[1];
+			q6core_lcl.bus_bw_resp_received = 1;
+			wake_up(&q6core_lcl.bus_bw_req_wait);
+			break;
+		case AVCS_CMD_DEREGISTER_TOPOLOGIES:
+			pr_debug("%s: Cmd = AVCS_CMD_DEREGISTER_TOPOLOGIES status[0x%x]\n",
+				__func__, payload1[1]);
+			q6core_lcl.bus_bw_resp_received = 1;
+			wake_up(&q6core_lcl.bus_bw_req_wait);
+			break;
+		case AVCS_CMD_ADD_POOL_PAGES:
+			pr_debug("%s: Cmd = AVCS_CMD_ADD_POOL_PAGES status[0x%x]\n",
+				__func__, payload1[1]);
+			q6core_lcl.bus_bw_resp_received = 1;
+			wake_up(&q6core_lcl.bus_bw_req_wait);
+			break;
+		case AVCS_CMD_REMOVE_POOL_PAGES:
+			pr_debug("%s: Cmd = AVCS_CMD_REMOVE_POOL_PAGES status[0x%x]\n",
+				__func__, payload1[1]);
+			q6core_lcl.bus_bw_resp_received = 1;
+			wake_up(&q6core_lcl.bus_bw_req_wait);
+			break;
+		default:
+			pr_err("%s: Invalid cmd rsp[0x%x][0x%x] opcode %d\n",
+					__func__,
+					payload1[0], payload1[1], data->opcode);
+			break;
+		}
+		break;
+	}
+
+	case RESET_EVENTS:{
+		pr_debug("%s: Reset event received in Core service\n",
+			__func__);
+		apr_reset(q6core_lcl.core_handle_q);
+		q6core_lcl.core_handle_q = NULL;
+		break;
+	}
+	case AVCS_CMDRSP_SHARED_MEM_MAP_REGIONS:
+		payload1 = data->payload;
+		pr_debug("%s: AVCS_CMDRSP_SHARED_MEM_MAP_REGIONS handle %d\n",
+			__func__, payload1[0]);
+		q6core_lcl.mem_map_cal_handle = payload1[0];
+		q6core_lcl.bus_bw_resp_received = 1;
+		wake_up(&q6core_lcl.bus_bw_req_wait);
+		break;
+	case AVCS_CMDRSP_ADSP_EVENT_GET_STATE:
+		payload1 = data->payload;
+		q6core_lcl.param = payload1[0];
+		pr_debug("%s: Received ADSP get state response 0x%x\n",
+			 __func__, q6core_lcl.param);
+		/* ensure .param is updated prior to .bus_bw_resp_received */
+		wmb();
+		q6core_lcl.bus_bw_resp_received = 1;
+		wake_up(&q6core_lcl.bus_bw_req_wait);
+		break;
+	case AVCS_CMDRSP_GET_LICENSE_VALIDATION_RESULT:
+		payload1 = data->payload;
+		pr_debug("%s: cmd = LICENSE_VALIDATION_RESULT, result = 0x%x\n",
+				__func__, payload1[0]);
+		q6core_lcl.cmd_resp_payload.cmdrsp_license_result.result
+								= payload1[0];
+		q6core_lcl.cmd_resp_received_flag = FLAG_CMDRSP_LICENSE_RESULT;
+		wake_up(&q6core_lcl.cmd_req_wait);
+		break;
+	default:
+		pr_err("%s: Message id from adsp core svc: 0x%x\n",
+			__func__, data->opcode);
+		if (generic_get_data) {
+			generic_get_data->valid = 1;
+			generic_get_data->size_in_ints =
+				data->payload_size/sizeof(int);
+			pr_debug("callback size = %i\n",
+				 data->payload_size);
+			memcpy(generic_get_data->ints, data->payload,
+				data->payload_size);
+			q6core_lcl.bus_bw_resp_received = 1;
+			wake_up(&q6core_lcl.bus_bw_req_wait);
+			break;
+		}
+		break;
+	}
+
+	return 0;
+}
+
+void ocm_core_open(void)
+{
+	if (q6core_lcl.core_handle_q == NULL)
+		q6core_lcl.core_handle_q = apr_register("ADSP", "CORE",
+					aprv2_core_fn_q, 0xFFFFFFFF, NULL);
+	pr_debug("%s: Open_q %pK\n", __func__, q6core_lcl.core_handle_q);
+	if (q6core_lcl.core_handle_q == NULL)
+		pr_err("%s: Unable to register CORE\n", __func__);
+}
+
+struct cal_block_data *cal_utils_get_cal_block_by_key(
+		struct cal_type_data *cal_type, uint32_t key)
+{
+	struct list_head                *ptr, *next;
+	struct cal_block_data           *cal_block = NULL;
+	struct audio_cal_info_metainfo  *metainfo;
+
+	list_for_each_safe(ptr, next,
+		&cal_type->cal_blocks) {
+
+		cal_block = list_entry(ptr,
+			struct cal_block_data, list);
+		metainfo = (struct audio_cal_info_metainfo *)
+			cal_block->cal_info;
+		if (metainfo->nKey != key) {
+			pr_debug("%s: metainfo key mismatch!!! found:%x, needed:%x\n",
+				__func__, metainfo->nKey, key);
+		} else {
+			pr_debug("%s: metainfo key match found", __func__);
+			return cal_block;
+		}
+	}
+	return NULL;
+}
+
+int32_t core_set_license(uint32_t key, uint32_t module_id)
+{
+	struct avcs_cmd_set_license *cmd_setl = NULL;
+	struct cal_block_data *cal_block = NULL;
+	int rc = 0, packet_size = 0;
+
+	pr_debug("%s: key:0x%x, id:0x%x\n", __func__, key, module_id);
+
+	mutex_lock(&(q6core_lcl.cmd_lock));
+	if (q6core_lcl.cal_data[META_CAL] == NULL) {
+		pr_err("%s: cal_data not initialized yet!!\n", __func__);
+		rc = -EINVAL;
+		goto cmd_unlock;
+	}
+
+	mutex_lock(&((q6core_lcl.cal_data[META_CAL])->lock));
+	cal_block = cal_utils_get_cal_block_by_key(
+				q6core_lcl.cal_data[META_CAL], key);
+	if (cal_block == NULL ||
+		cal_block->cal_data.kvaddr == NULL ||
+		cal_block->cal_data.size <= 0) {
+		pr_err("%s: Invalid cal block to send", __func__);
+		rc = -EINVAL;
+		goto cal_data_unlock;
+	}
+
+	packet_size = sizeof(struct avcs_cmd_set_license) +
+					cal_block->cal_data.size;
+	/*round up total packet_size to next 4 byte boundary*/
+	packet_size = ((packet_size + 0x3)>>2)<<2;
+
+	cmd_setl = kzalloc(packet_size, GFP_KERNEL);
+	if (cmd_setl == NULL) {
+		pr_err("%s: kzalloc for cmd_set_license failed for size %d\n",
+							__func__, packet_size);
+		rc  = -ENOMEM;
+		goto cal_data_unlock;
+	}
+
+	ocm_core_open();
+	if (q6core_lcl.core_handle_q == NULL) {
+		pr_err("%s: apr registration for CORE failed\n", __func__);
+		rc  = -ENODEV;
+		goto fail_cmd;
+	}
+
+	cmd_setl->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	cmd_setl->hdr.pkt_size = packet_size;
+	cmd_setl->hdr.src_port = 0;
+	cmd_setl->hdr.dest_port = 0;
+	cmd_setl->hdr.token = 0;
+	cmd_setl->hdr.opcode = AVCS_CMD_SET_LICENSE;
+	cmd_setl->id = module_id;
+	cmd_setl->overwrite = 1;
+	cmd_setl->size = cal_block->cal_data.size;
+	memcpy((uint8_t *)cmd_setl + sizeof(struct avcs_cmd_set_license),
+		cal_block->cal_data.kvaddr,
+		cal_block->cal_data.size);
+	pr_info("%s: Set license opcode=0x%x, id =0x%x, size = %d\n",
+			__func__, cmd_setl->hdr.opcode,
+			cmd_setl->id, cmd_setl->size);
+	rc = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)cmd_setl);
+	if (rc < 0)
+		pr_err("%s: SET_LICENSE failed op[0x%x]rc[%d]\n",
+					__func__, cmd_setl->hdr.opcode, rc);
+
+fail_cmd:
+	kfree(cmd_setl);
+cal_data_unlock:
+	mutex_unlock(&((q6core_lcl.cal_data[META_CAL])->lock));
+cmd_unlock:
+	mutex_unlock(&(q6core_lcl.cmd_lock));
+
+	return rc;
+}
+
+int32_t core_get_license_status(uint32_t module_id)
+{
+	struct avcs_cmd_get_license_validation_result get_lvr_cmd;
+	int ret = 0;
+
+	pr_debug("%s: module_id 0x%x", __func__, module_id);
+
+	mutex_lock(&(q6core_lcl.cmd_lock));
+	ocm_core_open();
+	if (q6core_lcl.core_handle_q == NULL) {
+		pr_err("%s: apr registration for CORE failed\n", __func__);
+		ret  = -ENODEV;
+		goto fail_cmd;
+	}
+
+	get_lvr_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	get_lvr_cmd.hdr.pkt_size =
+		sizeof(struct avcs_cmd_get_license_validation_result);
+
+	get_lvr_cmd.hdr.src_port = 0;
+	get_lvr_cmd.hdr.dest_port = 0;
+	get_lvr_cmd.hdr.token = 0;
+	get_lvr_cmd.hdr.opcode = AVCS_CMD_GET_LICENSE_VALIDATION_RESULT;
+	get_lvr_cmd.id = module_id;
+
+
+	ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *) &get_lvr_cmd);
+	if (ret < 0) {
+		pr_err("%s: license_validation request failed, err %d\n",
+							__func__, ret);
+		ret = -EREMOTE;
+		goto fail_cmd;
+	}
+
+	q6core_lcl.cmd_resp_received_flag &= ~(FLAG_CMDRSP_LICENSE_RESULT);
+	mutex_unlock(&(q6core_lcl.cmd_lock));
+	ret = wait_event_timeout(q6core_lcl.cmd_req_wait,
+			(q6core_lcl.cmd_resp_received_flag ==
+				FLAG_CMDRSP_LICENSE_RESULT),
+				msecs_to_jiffies(TIMEOUT_MS));
+	mutex_lock(&(q6core_lcl.cmd_lock));
+	if (!ret) {
+		pr_err("%s: wait_event timeout for CMDRSP_LICENSE_RESULT\n",
+				__func__);
+		ret = -ETIME;
+		goto fail_cmd;
+	}
+	q6core_lcl.cmd_resp_received_flag &= ~(FLAG_CMDRSP_LICENSE_RESULT);
+	ret = q6core_lcl.cmd_resp_payload.cmdrsp_license_result.result;
+
+fail_cmd:
+	mutex_unlock(&(q6core_lcl.cmd_lock));
+	pr_debug("%s: cmdrsp_license_result.result = 0x%x for module 0x%x\n",
+				__func__, ret, module_id);
+	return ret;
+}
+
+uint32_t core_set_dolby_manufacturer_id(int manufacturer_id)
+{
+	struct adsp_dolby_manufacturer_id payload;
+	int rc = 0;
+	pr_debug("%s: manufacturer_id :%d\n", __func__, manufacturer_id);
+	mutex_lock(&(q6core_lcl.cmd_lock));
+	ocm_core_open();
+	if (q6core_lcl.core_handle_q) {
+		payload.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT,
+			APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+		payload.hdr.pkt_size =
+			sizeof(struct adsp_dolby_manufacturer_id);
+		payload.hdr.src_port = 0;
+		payload.hdr.dest_port = 0;
+		payload.hdr.token = 0;
+		payload.hdr.opcode = ADSP_CMD_SET_DOLBY_MANUFACTURER_ID;
+		payload.manufacturer_id = manufacturer_id;
+		pr_debug("%s: Send Dolby security opcode=0x%x manufacturer ID = %d\n",
+			__func__,
+			payload.hdr.opcode, payload.manufacturer_id);
+		rc = apr_send_pkt(q6core_lcl.core_handle_q,
+						(uint32_t *)&payload);
+		if (rc < 0)
+			pr_err("%s: SET_DOLBY_MANUFACTURER_ID failed op[0x%x]rc[%d]\n",
+				__func__, payload.hdr.opcode, rc);
+	}
+	mutex_unlock(&(q6core_lcl.cmd_lock));
+	return rc;
+}
+
+bool q6core_is_adsp_ready(void)
+{
+	int rc = 0;
+	bool ret = false;
+	struct apr_hdr hdr;
+
+	pr_debug("%s: enter\n", __func__);
+	memset(&hdr, 0, sizeof(hdr));
+	hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				      APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, 0);
+	hdr.opcode = AVCS_CMD_ADSP_EVENT_GET_STATE;
+
+	mutex_lock(&(q6core_lcl.cmd_lock));
+	ocm_core_open();
+	if (q6core_lcl.core_handle_q) {
+		q6core_lcl.bus_bw_resp_received = 0;
+		rc = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)&hdr);
+		if (rc < 0) {
+			pr_err("%s: Get ADSP state APR packet send event %d\n",
+				__func__, rc);
+			goto bail;
+		}
+
+		rc = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
+					(q6core_lcl.bus_bw_resp_received == 1),
+					msecs_to_jiffies(Q6_READY_TIMEOUT_MS));
+		if (rc > 0 && q6core_lcl.bus_bw_resp_received) {
+			/* ensure to read updated param by callback thread */
+			rmb();
+			ret = !!q6core_lcl.param;
+		}
+	}
+bail:
+	pr_debug("%s: leave, rc %d, adsp ready %d\n", __func__, rc, ret);
+	mutex_unlock(&(q6core_lcl.cmd_lock));
+	return ret;
+}
+
+
+static int q6core_map_memory_regions(phys_addr_t *buf_add, uint32_t mempool_id,
+			uint32_t *bufsz, uint32_t bufcnt, uint32_t *map_handle)
+{
+	struct avs_cmd_shared_mem_map_regions *mmap_regions = NULL;
+	struct avs_shared_map_region_payload *mregions = NULL;
+	void *mmap_region_cmd = NULL;
+	void *payload = NULL;
+	int ret = 0;
+	int i = 0;
+	int cmd_size = 0;
+
+	cmd_size = sizeof(struct avs_cmd_shared_mem_map_regions)
+			+ sizeof(struct avs_shared_map_region_payload)
+			* bufcnt;
+
+	mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
+	if (mmap_region_cmd == NULL)
+		return -ENOMEM;
+
+	mmap_regions = (struct avs_cmd_shared_mem_map_regions *)mmap_region_cmd;
+	mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+								APR_PKT_VER);
+	mmap_regions->hdr.pkt_size = cmd_size;
+	mmap_regions->hdr.src_port = 0;
+	mmap_regions->hdr.dest_port = 0;
+	mmap_regions->hdr.token = 0;
+	mmap_regions->hdr.opcode = AVCS_CMD_SHARED_MEM_MAP_REGIONS;
+	mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL & 0x00ff;
+	mmap_regions->num_regions = bufcnt & 0x00ff;
+	mmap_regions->property_flag = 0x00;
+
+	payload = ((u8 *) mmap_region_cmd +
+				sizeof(struct avs_cmd_shared_mem_map_regions));
+	mregions = (struct avs_shared_map_region_payload *)payload;
+
+	for (i = 0; i < bufcnt; i++) {
+		mregions->shm_addr_lsw = lower_32_bits(buf_add[i]);
+		mregions->shm_addr_msw =
+				msm_audio_populate_upper_32_bits(buf_add[i]);
+		mregions->mem_size_bytes = bufsz[i];
+		++mregions;
+	}
+
+	pr_debug("%s: sending memory map, addr %pK, size %d, bufcnt = %d\n",
+		__func__, buf_add, bufsz[0], mmap_regions->num_regions);
+
+	*map_handle = 0;
+	q6core_lcl.bus_bw_resp_received = 0;
+	ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)
+		mmap_regions);
+	if (ret < 0) {
+		pr_err("%s: mmap regions failed %d\n",
+			__func__, ret);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
+				(q6core_lcl.bus_bw_resp_received == 1),
+				msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: timeout. waited for memory map\n", __func__);
+		ret = -ETIME;
+		goto done;
+	}
+
+	*map_handle = q6core_lcl.mem_map_cal_handle;
+done:
+	kfree(mmap_region_cmd);
+	return ret;
+}
+
+static int q6core_memory_unmap_regions(uint32_t mem_map_handle)
+{
+	struct avs_cmd_shared_mem_unmap_regions unmap_regions;
+	int ret = 0;
+
+	memset(&unmap_regions, 0, sizeof(unmap_regions));
+	unmap_regions.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+		APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	unmap_regions.hdr.pkt_size = sizeof(unmap_regions);
+	unmap_regions.hdr.src_svc = APR_SVC_ADSP_CORE;
+	unmap_regions.hdr.src_domain = APR_DOMAIN_APPS;
+	unmap_regions.hdr.src_port = 0;
+	unmap_regions.hdr.dest_svc = APR_SVC_ADSP_CORE;
+	unmap_regions.hdr.dest_domain = APR_DOMAIN_ADSP;
+	unmap_regions.hdr.dest_port = 0;
+	unmap_regions.hdr.token = 0;
+	unmap_regions.hdr.opcode = AVCS_CMD_SHARED_MEM_UNMAP_REGIONS;
+	unmap_regions.mem_map_handle = mem_map_handle;
+
+	q6core_lcl.bus_bw_resp_received = 0;
+
+	pr_debug("%s: unmap regions map handle %d\n",
+		__func__, mem_map_handle);
+
+	ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)
+		&unmap_regions);
+	if (ret < 0) {
+		pr_err("%s: unmap regions failed %d\n",
+			__func__, ret);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
+				(q6core_lcl.bus_bw_resp_received == 1),
+				msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: timeout. waited for memory_unmap\n",
+		       __func__);
+		ret = -ETIME;
+		goto done;
+	}
+done:
+	return ret;
+}
+
+int q6core_add_remove_pool_pages(ion_phys_addr_t buf_add, uint32_t bufsz,
+			uint32_t mempool_id, bool add_pages)
+{
+	struct avs_mem_assign_region mem_pool;
+	int ret = 0, sz;
+
+	if (add_pages)
+		mem_pool.hdr.opcode = AVCS_CMD_ADD_POOL_PAGES;
+	else
+		mem_pool.hdr.opcode = AVCS_CMD_REMOVE_POOL_PAGES;
+
+	/* get payload length */
+	sz = sizeof(struct avs_mem_assign_region);
+	mem_pool.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+					APR_HDR_LEN(sizeof(struct apr_hdr)),
+					APR_PKT_VER);
+	mem_pool.hdr.src_port = 0;
+	mem_pool.hdr.dest_port = 0;
+	mem_pool.hdr.token = 0;
+	mem_pool.hdr.pkt_size = sz;
+	mem_pool.pool_id = mempool_id;
+	mem_pool.size = bufsz;
+	mem_pool.addr_lsw = lower_32_bits(buf_add);
+	mem_pool.addr_msw = msm_audio_populate_upper_32_bits(buf_add);
+	pr_debug("%s: sending memory map, size %d\n",
+		  __func__, bufsz);
+
+	q6core_lcl.bus_bw_resp_received = 0;
+	ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)&mem_pool);
+	if (ret < 0) {
+		pr_err("%s: library map region failed %d\n",
+			__func__, ret);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
+				(q6core_lcl.bus_bw_resp_received == 1),
+				msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: timeout. waited for library memory map\n",
+			__func__);
+		ret = -ETIME;
+		goto done;
+	}
+	ret = 0;
+done:
+	return ret;
+}
+
+static int q6core_dereg_all_custom_topologies(void)
+{
+	int ret = 0;
+	struct avcs_cmd_deregister_topologies dereg_top;
+
+	memset(&dereg_top, 0, sizeof(dereg_top));
+	dereg_top.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+		APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	dereg_top.hdr.pkt_size = sizeof(dereg_top);
+	dereg_top.hdr.src_svc = APR_SVC_ADSP_CORE;
+	dereg_top.hdr.src_domain = APR_DOMAIN_APPS;
+	dereg_top.hdr.src_port = 0;
+	dereg_top.hdr.dest_svc = APR_SVC_ADSP_CORE;
+	dereg_top.hdr.dest_domain = APR_DOMAIN_ADSP;
+	dereg_top.hdr.dest_port = 0;
+	dereg_top.hdr.token = 0;
+	dereg_top.hdr.opcode = AVCS_CMD_DEREGISTER_TOPOLOGIES;
+	dereg_top.payload_addr_lsw = 0;
+	dereg_top.payload_addr_msw = 0;
+	dereg_top.mem_map_handle = 0;
+	dereg_top.payload_size = 0;
+	dereg_top.mode = AVCS_MODE_DEREGISTER_ALL_CUSTOM_TOPOLOGIES;
+
+	q6core_lcl.bus_bw_resp_received = 0;
+
+	pr_debug("%s: Deregister topologies mode %d\n",
+		__func__, dereg_top.mode);
+
+	ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *) &dereg_top);
+	if (ret < 0) {
+		pr_err("%s: Deregister topologies failed %d\n",
+			__func__, ret);
+		goto done;
+	}
+
+	ret = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
+				(q6core_lcl.bus_bw_resp_received == 1),
+				msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout for Deregister topologies\n",
+			__func__);
+		goto done;
+	}
+done:
+	return ret;
+}
+
+static int q6core_send_custom_topologies(void)
+{
+	int ret = 0;
+	int ret2 = 0;
+	struct cal_block_data *cal_block = NULL;
+	struct avcs_cmd_register_topologies reg_top;
+
+	if (!q6core_is_adsp_ready()) {
+		pr_err("%s: ADSP is not ready!\n", __func__);
+		return -ENODEV;
+	}
+
+	memset(&reg_top, 0, sizeof(reg_top));
+	mutex_lock(&q6core_lcl.cal_data[CUST_TOP_CAL]->lock);
+	mutex_lock(&q6core_lcl.cmd_lock);
+
+	cal_block = cal_utils_get_only_cal_block(
+		q6core_lcl.cal_data[CUST_TOP_CAL]);
+	if (cal_block == NULL) {
+		pr_debug("%s: cal block is NULL!\n", __func__);
+		goto unlock;
+	}
+	if (cal_block->cal_data.size <= 0) {
+		pr_debug("%s: cal size is %zd not sending\n",
+			__func__, cal_block->cal_data.size);
+		goto unlock;
+	}
+
+	q6core_dereg_all_custom_topologies();
+
+	ret = q6core_map_memory_regions(&cal_block->cal_data.paddr, 0,
+		(uint32_t *)&cal_block->map_data.map_size, 1,
+		&cal_block->map_data.q6map_handle);
+	if (!ret)  {
+		pr_err("%s: q6core_map_memory_regions failed\n", __func__);
+		goto unlock;
+	}
+
+	reg_top.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+		APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	reg_top.hdr.pkt_size = sizeof(reg_top);
+	reg_top.hdr.src_svc = APR_SVC_ADSP_CORE;
+	reg_top.hdr.src_domain = APR_DOMAIN_APPS;
+	reg_top.hdr.src_port = 0;
+	reg_top.hdr.dest_svc = APR_SVC_ADSP_CORE;
+	reg_top.hdr.dest_domain = APR_DOMAIN_ADSP;
+	reg_top.hdr.dest_port = 0;
+	reg_top.hdr.token = 0;
+	reg_top.hdr.opcode = AVCS_CMD_REGISTER_TOPOLOGIES;
+	reg_top.payload_addr_lsw =
+		lower_32_bits(cal_block->cal_data.paddr);
+	reg_top.payload_addr_msw =
+		msm_audio_populate_upper_32_bits(cal_block->cal_data.paddr);
+	reg_top.mem_map_handle = cal_block->map_data.q6map_handle;
+	reg_top.payload_size = cal_block->cal_data.size;
+
+	q6core_lcl.adsp_status = 0;
+	q6core_lcl.bus_bw_resp_received = 0;
+
+	pr_debug("%s: Register topologies addr %pK, size %zd, map handle %d\n",
+		__func__, &cal_block->cal_data.paddr, cal_block->cal_data.size,
+		cal_block->map_data.q6map_handle);
+
+	ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *) &reg_top);
+	if (ret < 0) {
+		pr_err("%s: Register topologies failed %d\n",
+			__func__, ret);
+		goto unmap;
+	}
+
+	ret = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
+				(q6core_lcl.bus_bw_resp_received == 1),
+				msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout for Register topologies\n",
+			__func__);
+		goto unmap;
+	}
+
+	if (q6core_lcl.adsp_status < 0)
+		ret = q6core_lcl.adsp_status;
+unmap:
+	ret2 = q6core_memory_unmap_regions(cal_block->map_data.q6map_handle);
+	if (!ret2)  {
+		pr_err("%s: q6core_memory_unmap_regions failed for map handle %d\n",
+			__func__, cal_block->map_data.q6map_handle);
+		ret = ret2;
+		goto unlock;
+	}
+
+unlock:
+	mutex_unlock(&q6core_lcl.cmd_lock);
+	mutex_unlock(&q6core_lcl.cal_data[CUST_TOP_CAL]->lock);
+
+	return ret;
+}
+
+static int get_cal_type_index(int32_t cal_type)
+{
+	int ret = -EINVAL;
+
+	switch (cal_type) {
+	case AUDIO_CORE_METAINFO_CAL_TYPE:
+		ret = META_CAL;
+		break;
+	case CORE_CUSTOM_TOPOLOGIES_CAL_TYPE:
+		ret = CUST_TOP_CAL;
+		break;
+	default:
+		pr_err("%s: invalid cal type %d!\n", __func__, cal_type);
+	}
+	return ret;
+}
+
+static int q6core_alloc_cal(int32_t cal_type,
+			    size_t data_size, void *data)
+{
+	int ret = 0;
+	int cal_index;
+
+	cal_index = get_cal_type_index(cal_type);
+	if (cal_index < 0) {
+		pr_err("%s: could not get cal index %d!\n",
+			__func__, cal_index);
+		ret = -EINVAL;
+		goto done;
+	}
+
+
+	ret = cal_utils_alloc_cal(data_size, data,
+		q6core_lcl.cal_data[cal_index], 0, NULL);
+	if (ret < 0) {
+		pr_err("%s: cal_utils_alloc_block failed, ret = %d, cal type = %d!\n",
+			__func__, ret, cal_type);
+		goto done;
+	}
+done:
+	return ret;
+}
+
+static int q6core_dealloc_cal(int32_t cal_type,
+			      size_t data_size, void *data)
+{
+	int ret = 0;
+	int cal_index;
+
+	cal_index = get_cal_type_index(cal_type);
+	if (cal_index < 0) {
+		pr_err("%s: could not get cal index %d!\n",
+			__func__, cal_index);
+		ret = -EINVAL;
+		goto done;
+	}
+
+
+	ret = cal_utils_dealloc_cal(data_size, data,
+					q6core_lcl.cal_data[cal_index]);
+	if (ret < 0) {
+		pr_err("%s: cal_utils_dealloc_block failed, ret = %d, cal type = %d!\n",
+			__func__, ret, cal_type);
+		goto done;
+	}
+done:
+	return ret;
+}
+
+static int q6core_set_cal(int32_t cal_type,
+	size_t data_size, void *data)
+{
+	int ret = 0;
+	int cal_index;
+
+	cal_index = get_cal_type_index(cal_type);
+	if (cal_index < 0) {
+		pr_err("%s: could not get cal index %d!\n",
+			__func__, cal_index);
+		ret = -EINVAL;
+		goto done;
+	}
+
+
+	ret = cal_utils_set_cal(data_size, data,
+				    q6core_lcl.cal_data[cal_index], 0, NULL);
+	if (ret < 0) {
+		pr_err("%s: cal_utils_set_cal failed, ret = %d, cal type = %d!\n",
+		__func__, ret, cal_type);
+		goto done;
+	}
+
+	if (cal_index == CUST_TOP_CAL)
+		ret = q6core_send_custom_topologies();
+done:
+	return ret;
+}
+
+static void q6core_delete_cal_data(void)
+{
+	pr_debug("%s:\n", __func__);
+
+	cal_utils_destroy_cal_types(CORE_MAX_CAL, q6core_lcl.cal_data);
+	return;
+}
+
+
+static int q6core_init_cal_data(void)
+{
+	int ret = 0;
+	struct cal_type_info    cal_type_info[] = {
+		{{AUDIO_CORE_METAINFO_CAL_TYPE,
+		{q6core_alloc_cal, q6core_dealloc_cal, NULL,
+		q6core_set_cal, NULL, NULL} },
+		{NULL, NULL, cal_utils_match_buf_num} },
+
+		{{CORE_CUSTOM_TOPOLOGIES_CAL_TYPE,
+		{q6core_alloc_cal, q6core_dealloc_cal, NULL,
+		q6core_set_cal, NULL, NULL} },
+		{NULL, NULL, cal_utils_match_buf_num} }
+	};
+	pr_debug("%s:\n", __func__);
+
+	ret = cal_utils_create_cal_types(CORE_MAX_CAL,
+		q6core_lcl.cal_data, cal_type_info);
+	if (ret < 0) {
+		pr_err("%s: could not create cal type!\n",
+			__func__);
+		goto err;
+	}
+
+	return ret;
+err:
+	q6core_delete_cal_data();
+	return ret;
+}
+
+static int __init core_init(void)
+{
+	init_waitqueue_head(&q6core_lcl.bus_bw_req_wait);
+	q6core_lcl.bus_bw_resp_received = 0;
+
+	q6core_lcl.core_handle_q = NULL;
+
+	init_waitqueue_head(&q6core_lcl.cmd_req_wait);
+	q6core_lcl.cmd_resp_received_flag = FLAG_NONE;
+	mutex_init(&q6core_lcl.cmd_lock);
+	q6core_lcl.mem_map_cal_handle = 0;
+	q6core_lcl.adsp_status = 0;
+
+	q6core_init_cal_data();
+	return 0;
+}
+module_init(core_init);
+
+static void __exit core_exit(void)
+{
+	mutex_destroy(&q6core_lcl.cmd_lock);
+	q6core_delete_cal_data();
+}
+module_exit(core_exit);
+MODULE_DESCRIPTION("ADSP core driver");
+MODULE_LICENSE("GPL v2");
+
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/q6lsm.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/q6lsm.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/q6lsm.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/q6lsm.c	2019-10-29 09:26:26.165227859 +0100
@@ -0,0 +1,2168 @@
+/*
+ * Copyright (c) 2013-2017, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/fs.h>
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/miscdevice.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/time.h>
+#include <linux/atomic.h>
+#include <sound/apr_audio-v2.h>
+#include <sound/lsm_params.h>
+#include <sound/q6core.h>
+#include <sound/q6lsm.h>
+#include <asm/ioctls.h>
+#include <linux/memory.h>
+#include <linux/msm_audio_ion.h>
+#include <sound/q6afe-v2.h>
+#include <sound/audio_cal_utils.h>
+#include <sound/adsp_err.h>
+
+#define APR_TIMEOUT	(5 * HZ)
+#define LSM_ALIGN_BOUNDARY 512
+#define LSM_SAMPLE_RATE 16000
+#define QLSM_PARAM_ID_MINOR_VERSION 1
+#define QLSM_PARAM_ID_MINOR_VERSION_2 2
+
+static int lsm_afe_port;
+
+enum {
+	LSM_CUSTOM_TOP_IDX,
+	LSM_TOP_IDX,
+	LSM_CAL_IDX,
+	LSM_MAX_CAL_IDX
+};
+
+enum {
+	CMD_STATE_CLEARED = 0,
+	CMD_STATE_WAIT_RESP = 1,
+};
+
+enum {
+	LSM_INVALID_SESSION_ID = 0,
+	LSM_MIN_SESSION_ID = 1,
+	LSM_MAX_SESSION_ID = 8,
+	LSM_CONTROL_SESSION = 0x0F,
+};
+
+#define CHECK_SESSION(x) (x < LSM_MIN_SESSION_ID || x > LSM_MAX_SESSION_ID)
+struct lsm_common {
+	void *apr;
+	atomic_t apr_users;
+	struct lsm_client	common_client[LSM_MAX_SESSION_ID + 1];
+
+	int set_custom_topology;
+	struct cal_type_data	*cal_data[LSM_MAX_CAL_IDX];
+
+	struct mutex apr_lock;
+};
+
+struct lsm_module_param_ids {
+	uint32_t module_id;
+	uint32_t param_id;
+};
+
+static struct lsm_common lsm_common;
+/*
+ * mmap_handle_p can point either client->sound_model.mem_map_handle or
+ * lsm_common.mmap_handle_for_cal.
+ * mmap_lock must be held while accessing this.
+ */
+static spinlock_t mmap_lock;
+static uint32_t *mmap_handle_p;
+
+static spinlock_t lsm_session_lock;
+static struct lsm_client *lsm_session[LSM_MAX_SESSION_ID + 1];
+
+static int q6lsm_mmapcallback(struct apr_client_data *data, void *priv);
+static int q6lsm_send_cal(struct lsm_client *client, u32 set_params_opcode);
+static int q6lsm_memory_map_regions(struct lsm_client *client,
+				    dma_addr_t dma_addr_p, uint32_t dma_buf_sz,
+				    uint32_t *mmap_p);
+static int q6lsm_memory_unmap_regions(struct lsm_client *client,
+				      uint32_t handle);
+
+static void q6lsm_set_param_hdr_info(
+		struct lsm_set_params_hdr *param_hdr,
+		u32 payload_size, u32 addr_lsw, u32 addr_msw,
+		u32 mmap_handle)
+{
+	param_hdr->data_payload_size = payload_size;
+	param_hdr->data_payload_addr_lsw = addr_lsw;
+	param_hdr->data_payload_addr_msw = addr_msw;
+	param_hdr->mem_map_handle = mmap_handle;
+}
+
+static void q6lsm_set_param_common(
+		struct lsm_param_payload_common *common,
+		struct lsm_module_param_ids *ids,
+		u32 param_size, u32 set_param_version)
+{
+	common->module_id = ids->module_id;
+	common->param_id = ids->param_id;
+
+	switch (set_param_version) {
+	case LSM_SESSION_CMD_SET_PARAMS_V2:
+		common->p_size.param_size = param_size;
+		break;
+	case LSM_SESSION_CMD_SET_PARAMS:
+	default:
+		common->p_size.sr.param_size =
+			(u16) param_size;
+		common->p_size.sr.reserved = 0;
+		break;
+	}
+}
+
+static int q6lsm_callback(struct apr_client_data *data, void *priv)
+{
+	struct lsm_client *client = (struct lsm_client *)priv;
+	uint32_t token;
+	uint32_t *payload;
+
+	if (!client || !data) {
+		pr_err("%s: client %pK data %pK\n",
+			__func__, client, data);
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	if (data->opcode == RESET_EVENTS) {
+		pr_debug("%s: SSR event received 0x%x, event 0x%x, proc 0x%x\n",
+			 __func__, data->opcode, data->reset_event,
+			 data->reset_proc);
+
+		cal_utils_clear_cal_block_q6maps(LSM_MAX_CAL_IDX,
+			lsm_common.cal_data);
+		mutex_lock(&lsm_common.cal_data[LSM_CUSTOM_TOP_IDX]->lock);
+		lsm_common.set_custom_topology = 1;
+		mutex_unlock(&lsm_common.cal_data[LSM_CUSTOM_TOP_IDX]->lock);
+		return 0;
+	}
+
+	payload = data->payload;
+	pr_debug("%s: Session %d opcode 0x%x token 0x%x payload size %d\n"
+			 "payload [0] = 0x%x\n", __func__, client->session,
+		data->opcode, data->token, data->payload_size, payload[0]);
+	if (data->opcode == LSM_DATA_EVENT_READ_DONE) {
+		struct lsm_cmd_read_done read_done;
+		token = data->token;
+		if (data->payload_size > sizeof(read_done)) {
+			pr_err("%s: read done error payload size %d expected size %zd\n",
+				__func__, data->payload_size,
+				sizeof(read_done));
+			return -EINVAL;
+		}
+		pr_debug("%s: opcode %x status %x lsw %x msw %x mem_map handle %x\n",
+			__func__, data->opcode, payload[0], payload[1],
+			payload[2], payload[3]);
+		read_done.status = payload[0];
+		read_done.buf_addr_lsw = payload[1];
+		read_done.buf_addr_msw = payload[2];
+		read_done.mem_map_handle = payload[3];
+		read_done.total_size = payload[4];
+		read_done.offset = payload[5];
+		if (client->cb)
+			client->cb(data->opcode, data->token,
+					(void *)&read_done,
+					client->priv);
+		return 0;
+	} else if (data->opcode == APR_BASIC_RSP_RESULT) {
+		token = data->token;
+		switch (payload[0]) {
+		case LSM_SESSION_CMD_START:
+		case LSM_SESSION_CMD_STOP:
+		case LSM_SESSION_CMD_SET_PARAMS:
+		case LSM_SESSION_CMD_OPEN_TX:
+		case LSM_SESSION_CMD_CLOSE_TX:
+		case LSM_SESSION_CMD_REGISTER_SOUND_MODEL:
+		case LSM_SESSION_CMD_DEREGISTER_SOUND_MODEL:
+		case LSM_SESSION_CMD_SHARED_MEM_UNMAP_REGIONS:
+		case LSM_SESSION_CMD_EOB:
+		case LSM_SESSION_CMD_READ:
+		case LSM_SESSION_CMD_OPEN_TX_V2:
+		case LSM_CMD_ADD_TOPOLOGIES:
+		case LSM_SESSION_CMD_SET_PARAMS_V2:
+			if (token != client->session &&
+			    payload[0] !=
+				LSM_SESSION_CMD_DEREGISTER_SOUND_MODEL) {
+				pr_err("%s: Invalid session %d receivced expected %d\n",
+					__func__, token, client->session);
+				return -EINVAL;
+			}
+			client->cmd_err_code = payload[1];
+			if (client->cmd_err_code)
+				pr_err("%s: cmd 0x%x failed status %d\n",
+				__func__, payload[0], client->cmd_err_code);
+			if (atomic_cmpxchg(&client->cmd_state,
+					   CMD_STATE_WAIT_RESP,
+					   CMD_STATE_CLEARED) ==
+					       CMD_STATE_WAIT_RESP)
+				wake_up(&client->cmd_wait);
+			break;
+		default:
+			pr_debug("%s: Unknown command 0x%x\n",
+				__func__, payload[0]);
+			break;
+		}
+		return 0;
+	}
+
+	if (client->cb)
+		client->cb(data->opcode, data->token, data->payload,
+			   client->priv);
+
+	return 0;
+}
+
+static int q6lsm_session_alloc(struct lsm_client *client)
+{
+	unsigned long flags;
+	int n, ret = -ENOMEM;
+
+	spin_lock_irqsave(&lsm_session_lock, flags);
+	for (n = LSM_MIN_SESSION_ID; n <= LSM_MAX_SESSION_ID; n++) {
+		if (!lsm_session[n]) {
+			lsm_session[n] = client;
+			ret = n;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&lsm_session_lock, flags);
+	pr_debug("%s: Alloc Session %d", __func__, n);
+	return ret;
+}
+
+static void q6lsm_session_free(struct lsm_client *client)
+{
+	unsigned long flags;
+	pr_debug("%s: Freeing session ID %d\n", __func__, client->session);
+	spin_lock_irqsave(&lsm_session_lock, flags);
+	lsm_session[client->session] = LSM_INVALID_SESSION_ID;
+	spin_unlock_irqrestore(&lsm_session_lock, flags);
+	client->session = LSM_INVALID_SESSION_ID;
+}
+
+static void *q6lsm_mmap_apr_reg(void)
+{
+	if (atomic_inc_return(&lsm_common.apr_users) == 1) {
+		lsm_common.apr =
+		    apr_register("ADSP", "LSM", q6lsm_mmapcallback,
+				 0x0FFFFFFFF, &lsm_common);
+		if (!lsm_common.apr) {
+			pr_debug("%s: Unable to register APR LSM common port\n",
+				 __func__);
+			atomic_dec(&lsm_common.apr_users);
+		}
+	}
+	return lsm_common.apr;
+}
+
+static int q6lsm_mmap_apr_dereg(void)
+{
+	if (atomic_read(&lsm_common.apr_users) <= 0) {
+		WARN("%s: APR common port already closed\n", __func__);
+	} else {
+		if (atomic_dec_return(&lsm_common.apr_users) == 0) {
+			apr_deregister(lsm_common.apr);
+			pr_debug("%s: APR De-Register common port\n", __func__);
+		}
+	}
+	return 0;
+}
+
+struct lsm_client *q6lsm_client_alloc(lsm_app_cb cb, void *priv)
+{
+	struct lsm_client *client;
+	int n;
+
+	client = kzalloc(sizeof(struct lsm_client), GFP_KERNEL);
+	if (!client)
+		return NULL;
+	n = q6lsm_session_alloc(client);
+	if (n <= 0) {
+		kfree(client);
+		return NULL;
+	}
+	client->session = n;
+	client->cb = cb;
+	client->priv = priv;
+	if (CHECK_SESSION(client->session)) {
+		pr_err("%s: Client session %d\n",
+			__func__, client->session);
+		kfree(client);
+		return NULL;
+	}
+	pr_debug("%s: Client Session %d\n", __func__, client->session);
+	client->apr = apr_register("ADSP", "LSM", q6lsm_callback,
+				   ((client->session) << 8 | client->session),
+				   client);
+
+	if (client->apr == NULL) {
+		pr_err("%s: Registration with APR failed\n", __func__);
+		goto fail;
+	}
+
+	pr_debug("%s: Registering the common port with APR\n", __func__);
+	client->mmap_apr = q6lsm_mmap_apr_reg();
+	if (!client->mmap_apr) {
+		pr_err("%s: APR registration failed\n", __func__);
+		goto fail;
+	}
+
+	init_waitqueue_head(&client->cmd_wait);
+	mutex_init(&client->cmd_lock);
+	atomic_set(&client->cmd_state, CMD_STATE_CLEARED);
+	pr_debug("%s: New client allocated\n", __func__);
+	return client;
+fail:
+	q6lsm_client_free(client);
+	return NULL;
+}
+
+void q6lsm_client_free(struct lsm_client *client)
+{
+	if (!client)
+		return;
+	if (CHECK_SESSION(client->session)) {
+		pr_err("%s: Invalid Session %d\n", __func__, client->session);
+		return;
+	}
+	apr_deregister(client->apr);
+	client->mmap_apr = NULL;
+	q6lsm_session_free(client);
+	q6lsm_mmap_apr_dereg();
+	mutex_destroy(&client->cmd_lock);
+	kfree(client);
+	client = NULL;
+}
+
+/*
+ * q6lsm_apr_send_pkt : If wait == true, hold mutex to prevent from preempting
+ *			other thread's wait.
+ *			If mmap_handle_p != NULL, disable irq and spin lock to
+ *			protect mmap_handle_p
+ */
+static int q6lsm_apr_send_pkt(struct lsm_client *client, void *handle,
+			      void *data, bool wait, uint32_t *mmap_p)
+{
+	int ret;
+	unsigned long flags = 0;
+	struct apr_hdr *msg_hdr = (struct apr_hdr *) data;
+
+	pr_debug("%s: enter wait %d\n", __func__, wait);
+	if (wait)
+		mutex_lock(&lsm_common.apr_lock);
+	if (mmap_p) {
+		WARN_ON(!wait);
+		spin_lock_irqsave(&mmap_lock, flags);
+		mmap_handle_p = mmap_p;
+	}
+	atomic_set(&client->cmd_state, CMD_STATE_WAIT_RESP);
+	client->cmd_err_code = 0;
+	ret = apr_send_pkt(handle, data);
+	if (mmap_p)
+		spin_unlock_irqrestore(&mmap_lock, flags);
+
+	if (ret < 0) {
+		pr_err("%s: apr_send_pkt failed %d\n", __func__, ret);
+	} else if (wait) {
+		ret = wait_event_timeout(client->cmd_wait,
+					 (atomic_read(&client->cmd_state) ==
+					      CMD_STATE_CLEARED),
+					 APR_TIMEOUT);
+		if (likely(ret)) {
+			/* q6 returned error */
+			if (client->cmd_err_code) {
+				pr_err("%s: DSP returned error[%s]\n",
+					__func__, adsp_err_get_err_str(
+					client->cmd_err_code));
+				ret = adsp_err_get_lnx_err_code(
+						client->cmd_err_code);
+			} else {
+				ret = 0;
+			}
+		} else {
+			pr_err("%s: wait timedout, apr_opcode = 0x%x, size = %d\n",
+				__func__, msg_hdr->opcode, msg_hdr->pkt_size);
+			/* ret = 0 means wait timed out */
+			ret = -ETIMEDOUT;
+		}
+	} else {
+		ret = 0;
+	}
+	if (wait)
+		mutex_unlock(&lsm_common.apr_lock);
+
+	pr_debug("%s: leave ret %d\n", __func__, ret);
+	return ret;
+}
+
+static void q6lsm_add_hdr(struct lsm_client *client, struct apr_hdr *hdr,
+			uint32_t pkt_size, bool cmd_flg)
+{
+	pr_debug("%s: pkt_size %d cmd_flg %d session %d\n", __func__,
+		pkt_size, cmd_flg, client->session);
+	hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				       APR_HDR_LEN(sizeof(struct apr_hdr)),
+				       APR_PKT_VER);
+	hdr->src_svc = APR_SVC_LSM;
+	hdr->src_domain = APR_DOMAIN_APPS;
+	hdr->dest_svc = APR_SVC_LSM;
+	hdr->dest_domain = APR_DOMAIN_ADSP;
+	hdr->src_port = ((client->session << 8) & 0xFF00) | client->session;
+	hdr->dest_port = ((client->session << 8) & 0xFF00) | client->session;
+	hdr->pkt_size = pkt_size;
+	if (cmd_flg)
+		hdr->token = client->session;
+}
+
+
+static int q6lsm_send_custom_topologies(struct lsm_client *client)
+{
+	int rc;
+	struct cal_block_data *cal_block = NULL;
+	struct lsm_custom_topologies cstm_top;
+
+	if (lsm_common.cal_data[LSM_CUSTOM_TOP_IDX] == NULL) {
+		pr_err("%s: LSM_CUSTOM_TOP_IDX invalid\n", __func__);
+		rc = -EINVAL;
+		goto done;
+	}
+
+	lsm_common.set_custom_topology = 0;
+
+	mutex_lock(&lsm_common.cal_data[LSM_CUSTOM_TOP_IDX]->lock);
+	cal_block = cal_utils_get_only_cal_block(
+			lsm_common.cal_data[LSM_CUSTOM_TOP_IDX]);
+	if (!cal_block) {
+		pr_err("%s: Cal block for LSM_CUSTOM_TOP_IDX not found\n",
+			__func__);
+		rc = -EINVAL;
+		goto unlock;
+	}
+
+	if (cal_block->cal_data.size <= 0) {
+		pr_err("%s: Invalid size for LSM_CUSTOM_TOP %zd\n",
+			__func__, cal_block->cal_data.size);
+		rc = -EINVAL;
+		goto unlock;
+	}
+
+	memset(&cstm_top, 0, sizeof(cstm_top));
+	/* Map the memory for out-of-band data */
+	rc = q6lsm_memory_map_regions(client, cal_block->cal_data.paddr,
+				      cal_block->map_data.map_size,
+				      &cal_block->map_data.q6map_handle);
+	if (rc < 0) {
+		pr_err("%s: Failed to map custom topologied, err = %d\n",
+			__func__, rc);
+		goto unlock;
+	}
+
+	q6lsm_add_hdr(client, &cstm_top.hdr,
+		      sizeof(cstm_top), true);
+	cstm_top.hdr.opcode = LSM_CMD_ADD_TOPOLOGIES;
+
+	/*
+	 * For ADD_TOPOLOGIES, the dest_port should be 0
+	 * Note that source port cannot be zero as it is used
+	 * to route the response to a specific client registered
+	 * on APR
+	 */
+	cstm_top.hdr.dest_port = 0;
+
+	cstm_top.data_payload_addr_lsw =
+			lower_32_bits(cal_block->cal_data.paddr);
+	cstm_top.data_payload_addr_msw =
+			msm_audio_populate_upper_32_bits(
+					cal_block->cal_data.paddr);
+	cstm_top.mem_map_handle = cal_block->map_data.q6map_handle;
+	cstm_top.buffer_size = cal_block->cal_data.size;
+
+	rc = q6lsm_apr_send_pkt(client, client->apr,
+				&cstm_top, true, NULL);
+	if (rc)
+		pr_err("%s: Failed to add custom top, err = %d\n",
+			__func__, rc);
+	/* go ahead and unmap even if custom top failed */
+	rc = q6lsm_memory_unmap_regions(client,
+					cal_block->map_data.q6map_handle);
+	if (rc) {
+		pr_err("%s: Failed to unmap, err = %d\n",
+			__func__, rc);
+		/* Even if mem unmap failed, treat the cmd as success */
+		rc = 0;
+	}
+
+unlock:
+	mutex_unlock(&lsm_common.cal_data[LSM_CUSTOM_TOP_IDX]->lock);
+done:
+	return rc;
+}
+
+static int q6lsm_do_open_v2(struct lsm_client *client,
+		uint16_t app_id)
+{
+	int rc;
+	struct cal_block_data *cal_block = NULL;
+	struct audio_cal_info_lsm_top *lsm_top;
+	struct lsm_stream_cmd_open_tx_v2 open_v2;
+
+	if (lsm_common.cal_data[LSM_TOP_IDX] == NULL) {
+		pr_err("%s: LSM_TOP_IDX invalid\n", __func__);
+		rc = -EINVAL;
+		goto done;
+	}
+
+	mutex_lock(&lsm_common.cal_data[LSM_TOP_IDX]->lock);
+	cal_block = cal_utils_get_only_cal_block(
+			lsm_common.cal_data[LSM_TOP_IDX]);
+	if (!cal_block) {
+		pr_err("%s: Cal block for LSM_TOP_IDX not found\n",
+			__func__);
+		rc = -EINVAL;
+		goto unlock;
+	}
+
+	lsm_top = (struct audio_cal_info_lsm_top *)
+			cal_block->cal_info;
+	if (!lsm_top) {
+		pr_err("%s: cal_info for LSM_TOP_IDX not found\n",
+			__func__);
+		rc = -EINVAL;
+		goto unlock;
+	}
+
+	pr_debug("%s: topology_id = 0x%x, acdb_id = 0x%x, app_type = 0x%x\n",
+		 __func__, lsm_top->topology, lsm_top->acdb_id,
+		 lsm_top->app_type);
+
+	if (lsm_top->topology == 0) {
+		pr_err("%s: toplogy id not sent for app_type 0x%x\n",
+			__func__, lsm_top->app_type);
+		rc = -EINVAL;
+		goto unlock;
+	}
+
+	client->app_id = lsm_top->app_type;
+	memset(&open_v2, 0, sizeof(open_v2));
+	q6lsm_add_hdr(client, &open_v2.hdr,
+		      sizeof(open_v2), true);
+	open_v2.topology_id = lsm_top->topology;
+	open_v2.hdr.opcode = LSM_SESSION_CMD_OPEN_TX_V2;
+
+	rc = q6lsm_apr_send_pkt(client, client->apr,
+				&open_v2, true, NULL);
+	if (rc)
+		pr_err("%s: open_v2 failed, err = %d\n",
+			__func__, rc);
+	else
+		client->use_topology = true;
+unlock:
+	mutex_unlock(&lsm_common.cal_data[LSM_TOP_IDX]->lock);
+done:
+	return rc;
+
+}
+
+void q6lsm_sm_set_param_data(struct lsm_client *client,
+		struct lsm_params_info *p_info,
+		size_t *offset)
+{
+	struct lsm_param_payload_common *param;
+
+	param = (struct lsm_param_payload_common *)
+			client->sound_model.data;
+	param->module_id = p_info->module_id;
+	param->param_id = p_info->param_id;
+	param->p_size.param_size = client->sound_model.size;
+	*offset = sizeof(*param);
+}
+
+int q6lsm_open(struct lsm_client *client, uint16_t app_id)
+{
+	int rc = 0;
+	struct lsm_stream_cmd_open_tx open;
+
+	/* Add Custom topologies if needed */
+	if (lsm_common.set_custom_topology) {
+		rc = q6lsm_send_custom_topologies(client);
+		if (rc)
+			pr_err("%s: Failed to send cust_top, err = %d\n",
+				__func__, rc);
+	}
+
+	/* Try to open with topology first */
+	rc = q6lsm_do_open_v2(client, app_id);
+	if (!rc)
+		/* open_v2 was successful */
+		goto done;
+
+	pr_debug("%s: try without topology\n",
+		 __func__);
+
+	memset(&open, 0, sizeof(open));
+	q6lsm_add_hdr(client, &open.hdr, sizeof(open), true);
+	switch (client->app_id) {
+	case LSM_VOICE_WAKEUP_APP_ID_V2:
+		open.app_id = client->app_id;
+		break;
+	default:
+		pr_err("%s:  default err 0x%x\n", __func__, client->app_id);
+		rc = -EINVAL;
+		break;
+	}
+
+	open.sampling_rate = LSM_SAMPLE_RATE;
+	open.hdr.opcode = LSM_SESSION_CMD_OPEN_TX;
+	rc = q6lsm_apr_send_pkt(client, client->apr,
+				&open, true, NULL);
+	if (rc)
+		pr_err("%s: Open failed opcode 0x%x, rc %d\n",
+		       __func__, open.hdr.opcode, rc);
+	else
+		client->use_topology = false;
+done:
+	pr_debug("%s: leave %d\n", __func__, rc);
+	return rc;
+}
+
+static int q6lsm_send_confidence_levels(
+		struct lsm_client *client,
+		struct lsm_module_param_ids *ids,
+		u32 set_param_opcode)
+{
+	u8 *packet;
+	size_t pkt_size;
+	struct lsm_cmd_set_params_conf *conf_params;
+	struct apr_hdr *msg_hdr;
+	struct lsm_param_min_confidence_levels *cfl;
+	uint8_t i = 0;
+	uint8_t padd_size = 0;
+	u8 *conf_levels;
+	int rc;
+	u32 payload_size, param_size;
+
+	padd_size = (4 - (client->num_confidence_levels % 4)) - 1;
+	pkt_size = sizeof(*conf_params) + padd_size +
+		   client->num_confidence_levels;
+
+	packet = kzalloc(pkt_size, GFP_KERNEL);
+	if (!packet) {
+		pr_err("%s: no memory for confidence level, size = %zd\n",
+			__func__, pkt_size);
+		return -ENOMEM;
+	}
+
+	conf_params = (struct lsm_cmd_set_params_conf *) packet;
+	conf_levels = (u8 *) (packet + sizeof(*conf_params));
+	msg_hdr = &conf_params->msg_hdr;
+	q6lsm_add_hdr(client, msg_hdr,
+		      pkt_size, true);
+	msg_hdr->opcode = set_param_opcode;
+	payload_size = pkt_size - sizeof(*msg_hdr) -
+		       sizeof(conf_params->params_hdr);
+	q6lsm_set_param_hdr_info(&conf_params->params_hdr,
+				 payload_size, 0, 0, 0);
+	cfl = &conf_params->conf_payload;
+	param_size = ((sizeof(uint8_t) + padd_size +
+		       client->num_confidence_levels)) *
+		      sizeof(uint8_t);
+	q6lsm_set_param_common(&cfl->common, ids,
+			       param_size, set_param_opcode);
+	cfl->num_confidence_levels = client->num_confidence_levels;
+
+	pr_debug("%s: CMD PARAM SIZE = %d\n",
+		 __func__, param_size);
+	pr_debug("%s: Num conf_level = %d\n",
+		 __func__, client->num_confidence_levels);
+
+	memcpy(conf_levels, client->confidence_levels,
+	       client->num_confidence_levels);
+	for (i = 0; i < client->num_confidence_levels; i++)
+		pr_debug("%s: Confidence_level[%d] = %d\n",
+			 __func__, i, conf_levels[i]);
+
+	rc = q6lsm_apr_send_pkt(client, client->apr,
+				packet, true, NULL);
+	if (rc)
+		pr_err("%s: confidence_levels cmd failed, err = %d\n",
+			__func__, rc);
+	kfree(packet);
+	return rc;
+}
+
+static int q6lsm_send_param_opmode(struct lsm_client *client,
+		struct lsm_module_param_ids *opmode_ids,
+		u32 set_param_opcode)
+{
+	int rc;
+	struct lsm_cmd_set_params_opmode opmode_params;
+	struct apr_hdr  *msg_hdr;
+
+	struct lsm_param_op_mode *op_mode;
+	u32 data_payload_size, param_size;
+
+	msg_hdr = &opmode_params.msg_hdr;
+	q6lsm_add_hdr(client, msg_hdr,
+		      sizeof(opmode_params), true);
+	msg_hdr->opcode = set_param_opcode;
+	data_payload_size = sizeof(opmode_params) -
+			    sizeof(*msg_hdr) -
+			    sizeof(opmode_params.params_hdr);
+	q6lsm_set_param_hdr_info(&opmode_params.params_hdr,
+				 data_payload_size, 0, 0, 0);
+	op_mode = &opmode_params.op_mode;
+
+
+	param_size = sizeof(struct lsm_param_op_mode) -
+		     sizeof(op_mode->common);
+	q6lsm_set_param_common(&op_mode->common,
+			       opmode_ids, param_size,
+			       set_param_opcode);
+	op_mode->minor_version = QLSM_PARAM_ID_MINOR_VERSION;
+	op_mode->mode = client->mode;
+	op_mode->reserved = 0;
+	pr_debug("%s: mode = 0x%x", __func__, op_mode->mode);
+
+	rc = q6lsm_apr_send_pkt(client, client->apr,
+				&opmode_params, true, NULL);
+	if (rc)
+		pr_err("%s: Failed set_params opcode 0x%x, rc %d\n",
+		       __func__, msg_hdr->opcode, rc);
+
+	pr_debug("%s: leave %d\n", __func__, rc);
+	return rc;
+}
+
+void set_lsm_port(int lsm_port)
+{
+	lsm_afe_port = lsm_port;
+}
+
+int get_lsm_port(void)
+{
+	return lsm_afe_port;
+}
+
+int q6lsm_set_port_connected(struct lsm_client *client)
+{
+	int rc;
+	struct lsm_cmd_set_connectport connectport;
+	struct lsm_module_param_ids connectport_ids;
+	struct apr_hdr *msg_hdr;
+	struct lsm_param_connect_to_port *connect_to_port;
+	u32 data_payload_size, param_size, set_param_opcode;
+
+	if (client->use_topology) {
+		set_param_opcode = LSM_SESSION_CMD_SET_PARAMS_V2;
+		connectport_ids.module_id = LSM_MODULE_ID_FRAMEWORK;
+		connectport_ids.param_id = LSM_PARAM_ID_CONNECT_TO_PORT;
+	} else {
+		set_param_opcode = LSM_SESSION_CMD_SET_PARAMS;
+		connectport_ids.module_id = LSM_MODULE_ID_VOICE_WAKEUP;
+		connectport_ids.param_id = LSM_PARAM_ID_CONNECT_TO_PORT;
+	}
+	client->connect_to_port = get_lsm_port();
+
+	msg_hdr = &connectport.msg_hdr;
+	q6lsm_add_hdr(client, msg_hdr,
+		      sizeof(connectport), true);
+	msg_hdr->opcode = set_param_opcode;
+	data_payload_size = sizeof(connectport) -
+			    sizeof(*msg_hdr) -
+			    sizeof(connectport.params_hdr);
+	q6lsm_set_param_hdr_info(&connectport.params_hdr,
+				 data_payload_size, 0, 0, 0);
+	connect_to_port = &connectport.connect_to_port;
+
+	param_size = (sizeof(struct lsm_param_connect_to_port) -
+		      sizeof(connect_to_port->common));
+	q6lsm_set_param_common(&connect_to_port->common,
+			       &connectport_ids, param_size,
+			       set_param_opcode);
+	connect_to_port->minor_version = QLSM_PARAM_ID_MINOR_VERSION;
+	connect_to_port->port_id = client->connect_to_port;
+	connect_to_port->reserved = 0;
+	pr_debug("%s: port= %d", __func__, connect_to_port->port_id);
+
+	rc = q6lsm_apr_send_pkt(client, client->apr,
+				&connectport, true, NULL);
+	if (rc)
+		pr_err("%s: Failed set_params opcode 0x%x, rc %d\n",
+			__func__, msg_hdr->opcode, rc);
+
+	return rc;
+}
+static int q6lsm_send_param_polling_enable(struct lsm_client *client,
+		bool poll_en,
+		struct lsm_module_param_ids *poll_enable_ids,
+		u32 set_param_opcode)
+{
+	int rc = 0;
+	struct lsm_cmd_poll_enable cmd;
+	struct apr_hdr  *msg_hdr;
+	struct lsm_param_poll_enable *poll_enable;
+	u32 data_payload_size, param_size;
+
+	msg_hdr = &cmd.msg_hdr;
+	q6lsm_add_hdr(client, msg_hdr,
+		      sizeof(struct lsm_cmd_poll_enable), true);
+	msg_hdr->opcode = set_param_opcode;
+	data_payload_size = sizeof(struct lsm_cmd_poll_enable) -
+			    sizeof(struct apr_hdr) -
+			    sizeof(struct lsm_set_params_hdr);
+	q6lsm_set_param_hdr_info(&cmd.params_hdr,
+				 data_payload_size, 0, 0, 0);
+	poll_enable = &cmd.poll_enable;
+
+	param_size = (sizeof(struct lsm_param_poll_enable) -
+		      sizeof(poll_enable->common));
+	q6lsm_set_param_common(&poll_enable->common,
+			       poll_enable_ids, param_size,
+			       set_param_opcode);
+	poll_enable->minor_version = QLSM_PARAM_ID_MINOR_VERSION;
+	poll_enable->polling_enable = (poll_en) ? 1 : 0;
+	pr_debug("%s: poll enable= %d", __func__, poll_enable->polling_enable);
+
+	rc = q6lsm_apr_send_pkt(client, client->apr,
+				&cmd, true, NULL);
+	if (rc)
+		pr_err("%s: Failed set_params opcode 0x%x, rc %d\n",
+		       __func__, msg_hdr->opcode, rc);
+
+	return rc;
+}
+
+int q6lsm_set_fwk_mode_cfg(struct lsm_client *client,
+			   uint32_t event_mode)
+{
+	int rc = 0;
+	struct lsm_cmd_set_fwk_mode_cfg cmd;
+	struct lsm_module_param_ids fwk_mode_cfg_ids;
+	struct apr_hdr  *msg_hdr;
+	struct lsm_param_fwk_mode_cfg *fwk_mode_cfg;
+	u32 data_payload_size, param_size, set_param_opcode;
+
+	if (client->use_topology) {
+		set_param_opcode = LSM_SESSION_CMD_SET_PARAMS_V2;
+		fwk_mode_cfg_ids.module_id = LSM_MODULE_ID_FRAMEWORK;
+		fwk_mode_cfg_ids.param_id = LSM_PARAM_ID_FWK_MODE_CONFIG;
+	} else {
+		pr_debug("%s: Ignore sending event mode\n", __func__);
+		return rc;
+	}
+
+	msg_hdr = &cmd.msg_hdr;
+	q6lsm_add_hdr(client, msg_hdr,
+		      sizeof(struct lsm_cmd_set_fwk_mode_cfg), true);
+	msg_hdr->opcode = set_param_opcode;
+	data_payload_size = sizeof(struct lsm_cmd_set_fwk_mode_cfg) -
+			    sizeof(struct apr_hdr) -
+			    sizeof(struct lsm_set_params_hdr);
+	q6lsm_set_param_hdr_info(&cmd.params_hdr,
+				 data_payload_size, 0, 0, 0);
+	fwk_mode_cfg = &cmd.fwk_mode_cfg;
+
+	param_size = (sizeof(struct lsm_param_fwk_mode_cfg) -
+		      sizeof(fwk_mode_cfg->common));
+	q6lsm_set_param_common(&fwk_mode_cfg->common,
+			       &fwk_mode_cfg_ids, param_size,
+			       set_param_opcode);
+
+	fwk_mode_cfg->minor_version = QLSM_PARAM_ID_MINOR_VERSION;
+	fwk_mode_cfg->mode = event_mode;
+	pr_debug("%s: mode = %d\n", __func__, fwk_mode_cfg->mode);
+
+	rc = q6lsm_apr_send_pkt(client, client->apr,
+				&cmd, true, NULL);
+	if (rc)
+		pr_err("%s: Failed set_params opcode 0x%x, rc %d\n",
+		       __func__, msg_hdr->opcode, rc);
+	return rc;
+}
+
+static int q6lsm_arrange_mch_map(struct lsm_param_media_fmt *media_fmt,
+			 int channel_count)
+{
+	int rc = 0;
+
+	memset(media_fmt->channel_mapping, 0, LSM_MAX_NUM_CHANNELS);
+
+	switch (channel_count) {
+	case 1:
+		media_fmt->channel_mapping[0] = PCM_CHANNEL_FC;
+		break;
+	case 2:
+		media_fmt->channel_mapping[0] = PCM_CHANNEL_FL;
+		media_fmt->channel_mapping[1] = PCM_CHANNEL_FR;
+		break;
+	case 3:
+		media_fmt->channel_mapping[0] = PCM_CHANNEL_FL;
+		media_fmt->channel_mapping[1] = PCM_CHANNEL_FR;
+		media_fmt->channel_mapping[2] = PCM_CHANNEL_FC;
+		break;
+	case 4:
+		media_fmt->channel_mapping[0] = PCM_CHANNEL_FL;
+		media_fmt->channel_mapping[1] = PCM_CHANNEL_FR;
+		media_fmt->channel_mapping[2] = PCM_CHANNEL_LS;
+		media_fmt->channel_mapping[3] = PCM_CHANNEL_RS;
+		break;
+	default:
+		pr_err("%s: invalid num_chan %d\n", __func__, channel_count);
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+int q6lsm_set_media_fmt_params(struct lsm_client *client)
+{
+	int rc = 0;
+	struct lsm_cmd_set_media_fmt cmd;
+	struct lsm_module_param_ids media_fmt_ids;
+	struct apr_hdr  *msg_hdr;
+	struct lsm_param_media_fmt *media_fmt;
+	u32 data_payload_size, param_size, set_param_opcode;
+	struct lsm_hw_params param = client->hw_params;
+
+	if (client->use_topology) {
+		set_param_opcode = LSM_SESSION_CMD_SET_PARAMS_V2;
+		media_fmt_ids.module_id = LSM_MODULE_ID_FRAMEWORK;
+		media_fmt_ids.param_id = LSM_PARAM_ID_MEDIA_FMT;
+	} else {
+		pr_debug("%s: Ignore sending media format\n", __func__);
+		goto err_ret;
+	}
+
+	msg_hdr = &cmd.msg_hdr;
+	q6lsm_add_hdr(client, msg_hdr,
+		      sizeof(struct lsm_cmd_set_media_fmt), true);
+	msg_hdr->opcode = set_param_opcode;
+	data_payload_size = sizeof(struct lsm_cmd_set_media_fmt) -
+			    sizeof(struct apr_hdr) -
+			    sizeof(struct lsm_set_params_hdr);
+	q6lsm_set_param_hdr_info(&cmd.params_hdr,
+				 data_payload_size, 0, 0, 0);
+	media_fmt = &cmd.media_fmt;
+
+	param_size = (sizeof(struct lsm_param_media_fmt) -
+		      sizeof(media_fmt->common));
+	q6lsm_set_param_common(&media_fmt->common,
+			       &media_fmt_ids, param_size,
+			       set_param_opcode);
+
+	media_fmt->minor_version = QLSM_PARAM_ID_MINOR_VERSION_2;
+	media_fmt->sample_rate = param.sample_rate;
+	media_fmt->num_channels = param.num_chs;
+	media_fmt->bit_width = param.sample_size;
+
+	rc = q6lsm_arrange_mch_map(media_fmt, media_fmt->num_channels);
+	if (rc)
+		goto err_ret;
+
+	pr_debug("%s: sample rate= %d, channels %d bit width %d\n",
+		 __func__, media_fmt->sample_rate, media_fmt->num_channels,
+		 media_fmt->bit_width);
+
+	rc = q6lsm_apr_send_pkt(client, client->apr,
+				&cmd, true, NULL);
+	if (rc)
+		pr_err("%s: Failed set_params opcode 0x%x, rc %d\n",
+		       __func__, msg_hdr->opcode, rc);
+err_ret:
+	return rc;
+}
+
+int q6lsm_set_data(struct lsm_client *client,
+			   enum lsm_detection_mode mode,
+			   bool detectfailure)
+{
+	int rc = 0;
+	struct lsm_module_param_ids opmode_ids;
+	struct lsm_module_param_ids conf_levels_ids;
+
+	if (!client->confidence_levels) {
+		/*
+		 * It is possible that confidence levels are
+		 * not provided. This is not a error condition.
+		 * Return gracefully without any error
+		 */
+		pr_debug("%s: no conf levels to set\n",
+			__func__);
+		return rc;
+	}
+
+	if (mode == LSM_MODE_KEYWORD_ONLY_DETECTION) {
+		client->mode = 0x01;
+	} else if (mode == LSM_MODE_USER_KEYWORD_DETECTION) {
+		client->mode = 0x03;
+	} else {
+		pr_err("%s: Incorrect detection mode %d\n", __func__, mode);
+		rc = -EINVAL;
+		goto err_ret;
+	}
+	client->mode |= detectfailure << 2;
+
+	opmode_ids.module_id = LSM_MODULE_ID_VOICE_WAKEUP;
+	opmode_ids.param_id = LSM_PARAM_ID_OPERATION_MODE;
+
+	rc = q6lsm_send_param_opmode(client, &opmode_ids,
+					LSM_SESSION_CMD_SET_PARAMS);
+	if (rc) {
+		pr_err("%s: Failed to set lsm config params %d\n",
+			__func__, rc);
+		goto err_ret;
+	}
+
+	conf_levels_ids.module_id = LSM_MODULE_ID_VOICE_WAKEUP;
+	conf_levels_ids.param_id = LSM_PARAM_ID_MIN_CONFIDENCE_LEVELS;
+
+	rc = q6lsm_send_confidence_levels(client, &conf_levels_ids,
+					 LSM_SESSION_CMD_SET_PARAMS);
+	if (rc) {
+		pr_err("%s: Failed to send conf_levels, err = %d\n",
+			__func__, rc);
+		goto err_ret;
+	}
+
+	rc = q6lsm_send_cal(client, LSM_SESSION_CMD_SET_PARAMS);
+	if (rc) {
+		pr_err("%s: Failed to send calibration data %d\n",
+			__func__, rc);
+		goto err_ret;
+	}
+
+err_ret:
+	return rc;
+}
+
+int q6lsm_register_sound_model(struct lsm_client *client,
+			       enum lsm_detection_mode mode,
+			       bool detectfailure)
+{
+	int rc;
+	struct lsm_cmd_reg_snd_model cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	rc = q6lsm_set_data(client, mode, detectfailure);
+	if (rc) {
+		pr_err("%s: Failed to set lsm data, err = %d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	q6lsm_add_hdr(client, &cmd.hdr, sizeof(cmd), true);
+	cmd.hdr.opcode = LSM_SESSION_CMD_REGISTER_SOUND_MODEL;
+	cmd.model_addr_lsw = lower_32_bits(client->sound_model.phys);
+	cmd.model_addr_msw = msm_audio_populate_upper_32_bits(
+						client->sound_model.phys);
+	cmd.model_size = client->sound_model.size;
+	/* read updated mem_map_handle by q6lsm_mmapcallback */
+	rmb();
+	cmd.mem_map_handle = client->sound_model.mem_map_handle;
+
+	pr_debug("%s: addr %pK, size %d, handle 0x%x\n", __func__,
+		&client->sound_model.phys, cmd.model_size, cmd.mem_map_handle);
+	rc = q6lsm_apr_send_pkt(client, client->apr, &cmd, true, NULL);
+	if (rc)
+		pr_err("%s: Failed cmd op[0x%x]rc[%d]\n", __func__,
+		       cmd.hdr.opcode, rc);
+	else
+		pr_debug("%s: Register sound model succeeded\n", __func__);
+
+	return rc;
+}
+
+int q6lsm_deregister_sound_model(struct lsm_client *client)
+{
+	int rc;
+	struct lsm_cmd_reg_snd_model cmd;
+
+	if (!client) {
+		pr_err("APR handle NULL\n");
+		return -EINVAL;
+	}
+	if (!client->apr) {
+		pr_err("APR client handle NULL\n");
+		return -EINVAL;
+	}
+
+	if (CHECK_SESSION(client->session)) {
+		pr_err("%s: session[%d]", __func__, client->session);
+		return -EINVAL;
+	}
+
+	memset(&cmd, 0, sizeof(cmd));
+	q6lsm_add_hdr(client, &cmd.hdr, sizeof(cmd.hdr), false);
+	cmd.hdr.opcode = LSM_SESSION_CMD_DEREGISTER_SOUND_MODEL;
+
+	rc = q6lsm_apr_send_pkt(client, client->apr, &cmd.hdr, true, NULL);
+	if (rc) {
+		pr_err("%s: Failed cmd opcode 0x%x, rc %d\n", __func__,
+		       cmd.hdr.opcode, rc);
+	} else {
+		pr_debug("%s: Deregister sound model succeeded\n", __func__);
+	}
+
+	q6lsm_snd_model_buf_free(client);
+
+	return rc;
+}
+
+static void q6lsm_add_mmaphdr(struct lsm_client *client, struct apr_hdr *hdr,
+			      u32 pkt_size, u32 cmd_flg, u32 token)
+{
+	pr_debug("%s: pkt size=%d cmd_flg=%d session=%d\n", __func__, pkt_size,
+		 cmd_flg, client->session);
+	hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				       APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	hdr->src_port = 0x00;
+	hdr->dest_port = client->session;
+	if (cmd_flg)
+		hdr->token = token;
+	hdr->pkt_size = pkt_size;
+	return;
+}
+
+static int q6lsm_memory_map_regions(struct lsm_client *client,
+				    dma_addr_t dma_addr_p, uint32_t dma_buf_sz,
+				    uint32_t *mmap_p)
+{
+	struct avs_cmd_shared_mem_map_regions *mmap_regions = NULL;
+	struct avs_shared_map_region_payload *mregions = NULL;
+	void *mmap_region_cmd = NULL;
+	void *payload = NULL;
+	int rc;
+	int cmd_size = 0;
+
+	pr_debug("%s: dma_addr_p 0x%pK, dma_buf_sz %d, mmap_p 0x%pK, session %d\n",
+		__func__, &dma_addr_p, dma_buf_sz, mmap_p,
+		client->session);
+	if (CHECK_SESSION(client->session)) {
+		pr_err("%s: session[%d]", __func__, client->session);
+		return -EINVAL;
+	}
+	cmd_size = sizeof(struct avs_cmd_shared_mem_map_regions) +
+		   sizeof(struct avs_shared_map_region_payload);
+
+	mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
+	if (!mmap_region_cmd) {
+		pr_err("%s: memory allocation failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	mmap_regions = (struct avs_cmd_shared_mem_map_regions *)mmap_region_cmd;
+	q6lsm_add_mmaphdr(client, &mmap_regions->hdr, cmd_size, true,
+			  (client->session << 8));
+
+	mmap_regions->hdr.opcode = LSM_SESSION_CMD_SHARED_MEM_MAP_REGIONS;
+	mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
+	mmap_regions->num_regions = 1;
+	mmap_regions->property_flag = 0x00;
+	payload = ((u8 *)mmap_region_cmd +
+		   sizeof(struct avs_cmd_shared_mem_map_regions));
+	mregions = (struct avs_shared_map_region_payload *)payload;
+
+	mregions->shm_addr_lsw = lower_32_bits(dma_addr_p);
+	mregions->shm_addr_msw = msm_audio_populate_upper_32_bits(dma_addr_p);
+	mregions->mem_size_bytes = dma_buf_sz;
+
+	rc = q6lsm_apr_send_pkt(client, client->mmap_apr, mmap_region_cmd,
+				true, mmap_p);
+	if (rc)
+		pr_err("%s: Failed mmap_regions opcode 0x%x, rc %d\n",
+			__func__, mmap_regions->hdr.opcode, rc);
+
+	pr_debug("%s: leave %d\n", __func__, rc);
+	kfree(mmap_region_cmd);
+	return rc;
+}
+
+static int q6lsm_memory_unmap_regions(struct lsm_client *client,
+				      uint32_t handle)
+{
+	struct avs_cmd_shared_mem_unmap_regions unmap;
+	int rc = 0;
+	int cmd_size = 0;
+	if (CHECK_SESSION(client->session)) {
+		pr_err("%s: session[%d]", __func__, client->session);
+		return -EINVAL;
+	}
+	cmd_size = sizeof(struct avs_cmd_shared_mem_unmap_regions);
+	q6lsm_add_mmaphdr(client, &unmap.hdr, cmd_size,
+			  true, (client->session << 8));
+	unmap.hdr.opcode = LSM_SESSION_CMD_SHARED_MEM_UNMAP_REGIONS;
+	unmap.mem_map_handle = handle;
+
+	pr_debug("%s: unmap handle 0x%x\n", __func__, unmap.mem_map_handle);
+	rc = q6lsm_apr_send_pkt(client, client->mmap_apr, &unmap, true,
+				NULL);
+	if (rc)
+		pr_err("%s: Failed mmap_regions opcode 0x%x rc %d\n",
+		       __func__, unmap.hdr.opcode, rc);
+
+	return rc;
+}
+
+static int q6lsm_send_cal(struct lsm_client *client,
+			  u32 set_params_opcode)
+{
+	int rc = 0;
+	struct lsm_cmd_set_params params;
+	struct lsm_set_params_hdr *params_hdr = &params.param_hdr;
+	struct apr_hdr *msg_hdr = &params.msg_hdr;
+	struct cal_block_data *cal_block = NULL;
+
+	pr_debug("%s: Session id %d\n", __func__, client->session);
+	if (CHECK_SESSION(client->session)) {
+		pr_err("%s: session[%d]", __func__, client->session);
+		return -EINVAL;
+	}
+
+	if (lsm_common.cal_data[LSM_CAL_IDX] == NULL)
+		goto done;
+
+	mutex_lock(&lsm_common.cal_data[LSM_CAL_IDX]->lock);
+	cal_block = cal_utils_get_only_cal_block(
+		lsm_common.cal_data[LSM_CAL_IDX]);
+
+	if (!cal_block || cal_block->cal_data.size <= 0) {
+		pr_debug("%s: No cal to send!\n", __func__);
+		goto unlock;
+	}
+
+	if (cal_block->cal_data.size != client->lsm_cal_size) {
+		pr_err("%s: Cal size %zd doesn't match lsm cal size %d\n",
+			__func__, cal_block->cal_data.size,
+			client->lsm_cal_size);
+		rc = -EINVAL;
+		goto unlock;
+	}
+	/* Cache mmap address, only map once or if new addr */
+	lsm_common.common_client[client->session].session = client->session;
+	q6lsm_add_hdr(client, msg_hdr, sizeof(params), true);
+	msg_hdr->opcode = set_params_opcode;
+	q6lsm_set_param_hdr_info(params_hdr,
+			cal_block->cal_data.size,
+			lower_32_bits(client->lsm_cal_phy_addr),
+			msm_audio_populate_upper_32_bits(
+				client->lsm_cal_phy_addr),
+			client->sound_model.mem_map_handle);
+
+	pr_debug("%s: Cal Size = %zd", __func__,
+		cal_block->cal_data.size);
+	rc = q6lsm_apr_send_pkt(client, client->apr, &params, true, NULL);
+	if (rc)
+		pr_err("%s: Failed set_params opcode 0x%x, rc %d\n",
+		       __func__, msg_hdr->opcode, rc);
+unlock:
+	mutex_unlock(&lsm_common.cal_data[LSM_CAL_IDX]->lock);
+done:
+	return rc;
+}
+
+
+int q6lsm_snd_model_buf_free(struct lsm_client *client)
+{
+	int rc;
+
+	pr_debug("%s: Session id %d\n", __func__, client->session);
+	if (CHECK_SESSION(client->session)) {
+		pr_err("%s: session[%d]", __func__, client->session);
+		return -EINVAL;
+	}
+
+	mutex_lock(&client->cmd_lock);
+	rc = q6lsm_memory_unmap_regions(client,
+					client->sound_model.mem_map_handle);
+	if (rc)
+		pr_err("%s: CMD Memory_unmap_regions failed %d\n",
+			__func__, rc);
+
+	if (client->sound_model.data) {
+		msm_audio_ion_free(client->sound_model.client,
+				 client->sound_model.handle);
+		client->sound_model.client = NULL;
+		client->sound_model.handle = NULL;
+		client->sound_model.data = NULL;
+		client->sound_model.phys = 0;
+		client->lsm_cal_phy_addr = 0;
+		client->lsm_cal_size = 0;
+	}
+	mutex_unlock(&client->cmd_lock);
+	return rc;
+}
+
+static struct lsm_client *q6lsm_get_lsm_client(int session_id)
+{
+	unsigned long flags;
+	struct lsm_client *client = NULL;
+
+	spin_lock_irqsave(&lsm_session_lock, flags);
+	if (session_id < LSM_MIN_SESSION_ID || session_id > LSM_MAX_SESSION_ID)
+		pr_err("%s: Invalid session %d\n", __func__, session_id);
+	else if (!lsm_session[session_id])
+		pr_err("%s: Not an active session %d\n", __func__, session_id);
+	else
+		client = lsm_session[session_id];
+	spin_unlock_irqrestore(&lsm_session_lock, flags);
+	return client;
+}
+
+/*
+ * q6lsm_mmapcallback : atomic context
+ */
+static int q6lsm_mmapcallback(struct apr_client_data *data, void *priv)
+{
+	unsigned long flags;
+	uint32_t command;
+	uint32_t retcode;
+	uint32_t sid;
+	const uint32_t *payload = data->payload;
+	struct lsm_client *client = NULL;
+
+	if (data->opcode == RESET_EVENTS) {
+		sid = (data->token >> 8) & 0x0F;
+		pr_debug("%s: SSR event received 0x%x, event 0x%x,\n"
+			 "proc 0x%x SID 0x%x\n", __func__, data->opcode,
+			 data->reset_event, data->reset_proc, sid);
+		lsm_common.common_client[sid].lsm_cal_phy_addr = 0;
+		cal_utils_clear_cal_block_q6maps(LSM_MAX_CAL_IDX,
+			lsm_common.cal_data);
+		lsm_common.set_custom_topology = 1;
+		return 0;
+	}
+
+	command = payload[0];
+	retcode = payload[1];
+	sid = (data->token >> 8) & 0x0F;
+	pr_debug("%s: opcode 0x%x command 0x%x return code 0x%x SID 0x%x\n",
+		 __func__, data->opcode, command, retcode, sid);
+	client = q6lsm_get_lsm_client(sid);
+	if (!client) {
+		pr_debug("%s: Session %d already freed\n", __func__, sid);
+		return 0;
+	}
+
+	switch (data->opcode) {
+	case LSM_SESSION_CMDRSP_SHARED_MEM_MAP_REGIONS:
+		if (atomic_read(&client->cmd_state) == CMD_STATE_WAIT_RESP) {
+			spin_lock_irqsave(&mmap_lock, flags);
+			*mmap_handle_p = command;
+			/* spin_unlock_irqrestore implies barrier */
+			spin_unlock_irqrestore(&mmap_lock, flags);
+			atomic_set(&client->cmd_state, CMD_STATE_CLEARED);
+			wake_up(&client->cmd_wait);
+		}
+		break;
+	case APR_BASIC_RSP_RESULT:
+		switch (command) {
+		case LSM_SESSION_CMD_SHARED_MEM_UNMAP_REGIONS:
+			atomic_set(&client->cmd_state, CMD_STATE_CLEARED);
+			wake_up(&client->cmd_wait);
+			break;
+		case LSM_SESSION_CMD_SHARED_MEM_MAP_REGIONS:
+			if (retcode != 0) {
+				/* error state, signal to stop waiting */
+				if (atomic_read(&client->cmd_state) ==
+					CMD_STATE_WAIT_RESP) {
+					spin_lock_irqsave(&mmap_lock, flags);
+					/* implies barrier */
+					spin_unlock_irqrestore(&mmap_lock,
+						flags);
+					atomic_set(&client->cmd_state,
+						CMD_STATE_CLEARED);
+					wake_up(&client->cmd_wait);
+				}
+			}
+		break;
+		default:
+			pr_warn("%s: Unexpected command 0x%x\n", __func__,
+				command);
+		}
+	default:
+		pr_debug("%s: command 0x%x return code 0x%x opcode 0x%x\n",
+			 __func__, command, retcode, data->opcode);
+		break;
+	}
+	if (client->cb)
+		client->cb(data->opcode, data->token,
+			   data->payload, client->priv);
+	return 0;
+}
+
+int q6lsm_snd_model_buf_alloc(struct lsm_client *client, size_t len,
+			      bool allocate_module_data)
+{
+	int rc = -EINVAL;
+	struct cal_block_data		*cal_block = NULL;
+
+	size_t pad_zero = 0, total_mem = 0;
+
+	if (!client || len <= LSM_ALIGN_BOUNDARY)
+		return rc;
+
+	mutex_lock(&client->cmd_lock);
+
+	mutex_lock(&lsm_common.cal_data[LSM_CAL_IDX]->lock);
+	cal_block = cal_utils_get_only_cal_block(
+		lsm_common.cal_data[LSM_CAL_IDX]);
+	if (cal_block == NULL)
+		goto fail;
+
+	pr_debug("%s:Snd Model len = %zd cal size %zd phys addr %pK", __func__,
+		len, cal_block->cal_data.size,
+		&cal_block->cal_data.paddr);
+	if (!cal_block->cal_data.paddr) {
+		pr_err("%s: No LSM calibration set for session", __func__);
+		rc = -EINVAL;
+		goto fail;
+	}
+	if (!client->sound_model.data) {
+
+		/*
+		 * if sound module is sent as set_param
+		 * Then memory needs to be allocated for
+		 * set_param payload as well.
+		 */
+		if (allocate_module_data)
+			len += sizeof(struct lsm_param_payload_common);
+
+		client->sound_model.size = len;
+		pad_zero = (LSM_ALIGN_BOUNDARY -
+			    (len % LSM_ALIGN_BOUNDARY));
+		if ((len > SIZE_MAX - pad_zero) ||
+		    (len + pad_zero >
+		     SIZE_MAX - cal_block->cal_data.size)) {
+			pr_err("%s: invalid allocation size, len = %zd, pad_zero =%zd, cal_size = %zd\n",
+				__func__, len, pad_zero,
+				cal_block->cal_data.size);
+			rc = -EINVAL;
+			goto fail;
+		}
+
+		total_mem = PAGE_ALIGN(pad_zero + len +
+			cal_block->cal_data.size);
+		pr_debug("%s: Pad zeros sound model %zd Total mem %zd\n",
+				 __func__, pad_zero, total_mem);
+		rc = msm_audio_ion_alloc("lsm_client",
+				&client->sound_model.client,
+				&client->sound_model.handle,
+				total_mem,
+				&client->sound_model.phys,
+				&len,
+				&client->sound_model.data);
+		if (rc) {
+			pr_err("%s: Audio ION alloc is failed, rc = %d\n",
+				__func__, rc);
+			goto fail;
+		}
+	pr_debug("%s: Length = %zd\n", __func__, len);
+	client->lsm_cal_phy_addr = (pad_zero +
+				    client->sound_model.phys +
+				    client->sound_model.size);
+	client->lsm_cal_size = cal_block->cal_data.size;
+	memcpy((client->sound_model.data + pad_zero +
+		client->sound_model.size),
+	       (uint32_t *)cal_block->cal_data.kvaddr, client->lsm_cal_size);
+	pr_debug("%s: Copy cal start virt_addr %pK phy_addr %pK\n"
+			 "Offset cal virtual Addr %pK\n", __func__,
+			 client->sound_model.data, &client->sound_model.phys,
+			 (pad_zero + client->sound_model.data +
+			 client->sound_model.size));
+	} else {
+		pr_err("%s: sound model busy\n", __func__);
+		rc = -EBUSY;
+		goto fail;
+	}
+	mutex_unlock(&lsm_common.cal_data[LSM_CAL_IDX]->lock);
+	mutex_unlock(&client->cmd_lock);
+
+	rc = q6lsm_memory_map_regions(client, client->sound_model.phys,
+				      len,
+				      &client->sound_model.mem_map_handle);
+	if (rc) {
+		pr_err("%s: CMD Memory_map_regions failed %d\n", __func__, rc);
+		goto exit;
+	}
+
+	return 0;
+fail:
+	mutex_unlock(&lsm_common.cal_data[LSM_CAL_IDX]->lock);
+	mutex_unlock(&client->cmd_lock);
+exit:
+	q6lsm_snd_model_buf_free(client);
+	return rc;
+}
+
+static int q6lsm_cmd(struct lsm_client *client, int opcode, bool wait)
+{
+	struct apr_hdr hdr;
+	int rc;
+
+	pr_debug("%s: enter opcode %x wait %d\n", __func__, opcode, wait);
+	q6lsm_add_hdr(client, &hdr, sizeof(hdr), true);
+	switch (opcode) {
+	case LSM_SESSION_CMD_START:
+	case LSM_SESSION_CMD_STOP:
+	case LSM_SESSION_CMD_CLOSE_TX:
+	case LSM_SESSION_CMD_EOB:
+		hdr.opcode = opcode;
+		break;
+	default:
+		pr_err("%s: Invalid opcode 0x%x\n", __func__, opcode);
+		return -EINVAL;
+	}
+	rc = q6lsm_apr_send_pkt(client, client->apr, &hdr, wait, NULL);
+	if (rc)
+		pr_err("%s: Failed commmand 0x%x\n", __func__, hdr.opcode);
+
+	pr_debug("%s: leave %d\n", __func__, rc);
+	return rc;
+}
+
+static int q6lsm_send_param_epd_thres(
+		struct lsm_client *client,
+		void *data, struct lsm_module_param_ids *ids)
+{
+	struct snd_lsm_ep_det_thres *ep_det_data;
+	struct lsm_cmd_set_epd_threshold epd_cmd;
+	struct apr_hdr *msg_hdr = &epd_cmd.msg_hdr;
+	struct lsm_set_params_hdr *param_hdr =
+			&epd_cmd.param_hdr;
+	struct lsm_param_epd_thres *epd_thres =
+			&epd_cmd.epd_thres;
+	int rc;
+
+	ep_det_data = (struct snd_lsm_ep_det_thres *) data;
+	q6lsm_add_hdr(client, msg_hdr,
+		      sizeof(epd_cmd), true);
+	msg_hdr->opcode = LSM_SESSION_CMD_SET_PARAMS_V2;
+	q6lsm_set_param_hdr_info(param_hdr,
+		sizeof(*epd_thres), 0, 0, 0);
+	q6lsm_set_param_common(&epd_thres->common, ids,
+		sizeof(*epd_thres) - sizeof(epd_thres->common),
+		LSM_SESSION_CMD_SET_PARAMS_V2);
+	epd_thres->minor_version = QLSM_PARAM_ID_MINOR_VERSION;
+	epd_thres->epd_begin = ep_det_data->epd_begin;
+	epd_thres->epd_end = ep_det_data->epd_end;
+
+	rc = q6lsm_apr_send_pkt(client, client->apr,
+				&epd_cmd, true, NULL);
+	if (unlikely(rc))
+		pr_err("%s: EPD_THRESHOLD failed, rc %d\n",
+			__func__, rc);
+	return rc;
+}
+
+static int q6lsm_send_param_gain(
+		struct lsm_client *client,
+		u16 gain, struct lsm_module_param_ids *ids)
+{
+	struct lsm_cmd_set_gain lsm_cmd_gain;
+	struct apr_hdr *msg_hdr = &lsm_cmd_gain.msg_hdr;
+	struct lsm_param_gain *lsm_gain = &lsm_cmd_gain.lsm_gain;
+	int rc;
+
+	q6lsm_add_hdr(client, msg_hdr,
+		      sizeof(lsm_cmd_gain), true);
+	msg_hdr->opcode = LSM_SESSION_CMD_SET_PARAMS_V2;
+	q6lsm_set_param_hdr_info(&lsm_cmd_gain.param_hdr,
+			sizeof(*lsm_gain), 0, 0, 0);
+	q6lsm_set_param_common(&lsm_gain->common, ids,
+		sizeof(*lsm_gain) - sizeof(lsm_gain->common),
+		LSM_SESSION_CMD_SET_PARAMS_V2);
+	lsm_gain->minor_version = QLSM_PARAM_ID_MINOR_VERSION;
+	lsm_gain->gain = gain;
+	lsm_gain->reserved = 0;
+
+	rc = q6lsm_apr_send_pkt(client, client->apr,
+				&lsm_cmd_gain, true, NULL);
+	if (unlikely(rc))
+		pr_err("%s: LSM_GAIN CMD send failed, rc %d\n",
+			 __func__, rc);
+	return rc;
+}
+
+int q6lsm_set_one_param(struct lsm_client *client,
+	struct lsm_params_info *p_info, void *data,
+	uint32_t param_type)
+{
+	int rc = 0, pkt_sz;
+	struct lsm_module_param_ids ids;
+	u8 *packet;
+
+	memset(&ids, 0, sizeof(ids));
+	switch (param_type) {
+	case LSM_ENDPOINT_DETECT_THRESHOLD: {
+		ids.module_id = p_info->module_id;
+		ids.param_id = p_info->param_id;
+		rc = q6lsm_send_param_epd_thres(client, data,
+						&ids);
+		break;
+	}
+
+	case LSM_OPERATION_MODE: {
+		struct snd_lsm_detect_mode *det_mode = data;
+		struct lsm_module_param_ids opmode_ids;
+
+		if (det_mode->mode == LSM_MODE_KEYWORD_ONLY_DETECTION) {
+			client->mode = 0x01;
+		} else if (det_mode->mode == LSM_MODE_USER_KEYWORD_DETECTION) {
+			client->mode = 0x03;
+		} else {
+			pr_err("%s: Incorrect detection mode %d\n",
+				__func__, det_mode->mode);
+			return -EINVAL;
+		}
+
+		client->mode |= det_mode->detect_failure << 2;
+
+		opmode_ids.module_id = p_info->module_id;
+		opmode_ids.param_id = p_info->param_id;
+
+		rc = q6lsm_send_param_opmode(client, &opmode_ids,
+					LSM_SESSION_CMD_SET_PARAMS_V2);
+		if (rc)
+			pr_err("%s: OPERATION_MODE failed, rc %d\n",
+				__func__, rc);
+		break;
+	}
+
+	case LSM_GAIN: {
+		struct snd_lsm_gain *lsm_gain = (struct snd_lsm_gain *) data;
+		ids.module_id = p_info->module_id;
+		ids.param_id = p_info->param_id;
+		rc = q6lsm_send_param_gain(client, lsm_gain->gain, &ids);
+		if (rc)
+			pr_err("%s: LSM_GAIN command failed, rc %d\n",
+				__func__, rc);
+		break;
+	}
+
+	case LSM_MIN_CONFIDENCE_LEVELS:
+		ids.module_id = p_info->module_id;
+		ids.param_id = p_info->param_id;
+		rc = q6lsm_send_confidence_levels(client, &ids,
+				LSM_SESSION_CMD_SET_PARAMS_V2);
+		if (rc)
+			pr_err("%s: CONFIDENCE_LEVELS cmd failed, rc %d\n",
+				 __func__, rc);
+		break;
+	case LSM_POLLING_ENABLE: {
+		struct snd_lsm_poll_enable *lsm_poll_enable =
+				(struct snd_lsm_poll_enable *) data;
+		ids.module_id = p_info->module_id;
+		ids.param_id = p_info->param_id;
+		rc = q6lsm_send_param_polling_enable(client,
+				lsm_poll_enable->poll_en, &ids,
+				LSM_SESSION_CMD_SET_PARAMS_V2);
+		if (rc)
+			pr_err("%s: POLLING ENABLE cmd failed, rc %d\n",
+				 __func__, rc);
+		break;
+	}
+
+	case LSM_REG_SND_MODEL: {
+		struct lsm_cmd_set_params model_param;
+		u32 payload_size;
+
+		memset(&model_param, 0, sizeof(model_param));
+		q6lsm_add_hdr(client, &model_param.msg_hdr,
+			      sizeof(model_param), true);
+		model_param.msg_hdr.opcode = LSM_SESSION_CMD_SET_PARAMS_V2;
+		payload_size = p_info->param_size +
+			       sizeof(struct lsm_param_payload_common);
+		q6lsm_set_param_hdr_info(&model_param.param_hdr,
+				payload_size,
+				lower_32_bits(client->sound_model.phys),
+				msm_audio_populate_upper_32_bits(
+					client->sound_model.phys),
+				client->sound_model.mem_map_handle);
+
+		rc = q6lsm_apr_send_pkt(client, client->apr,
+					&model_param, true, NULL);
+		if (rc) {
+			pr_err("%s: REG_SND_MODEL failed, rc %d\n",
+				__func__, rc);
+			return rc;
+		}
+
+		rc = q6lsm_send_cal(client, LSM_SESSION_CMD_SET_PARAMS);
+		if (rc)
+			pr_err("%s: Failed to send lsm cal, err = %d\n",
+				__func__, rc);
+		break;
+	}
+
+	case LSM_DEREG_SND_MODEL: {
+		struct lsm_param_payload_common *common;
+		struct lsm_cmd_set_params *param;
+
+		pkt_sz = sizeof(*param) + sizeof(*common);
+		packet = kzalloc(pkt_sz, GFP_KERNEL);
+		if (!packet) {
+			pr_err("%s: No memory for DEREG_SND_MODEL pkt, size = %d\n",
+				__func__, pkt_sz);
+			return -ENOMEM;
+		}
+
+		param = (struct lsm_cmd_set_params *) packet;
+		common = (struct lsm_param_payload_common *)
+				(packet + sizeof(*param));
+		q6lsm_add_hdr(client, &param->msg_hdr, pkt_sz, true);
+		param->msg_hdr.opcode = LSM_SESSION_CMD_SET_PARAMS_V2;
+		q6lsm_set_param_hdr_info(&param->param_hdr,
+					 sizeof(*common),
+					 0, 0, 0);
+		ids.module_id = p_info->module_id;
+		ids.param_id = p_info->param_id;
+		q6lsm_set_param_common(common, &ids, 0,
+				       LSM_SESSION_CMD_SET_PARAMS_V2);
+		rc = q6lsm_apr_send_pkt(client, client->apr,
+					packet, true, NULL);
+		if (rc)
+			pr_err("%s: DEREG_SND_MODEL failed, rc %d\n",
+				__func__, rc);
+		kfree(packet);
+		break;
+	}
+
+	case LSM_CUSTOM_PARAMS: {
+		struct apr_hdr *hdr;
+		u8 *custom_data;
+
+		if (p_info->param_size <
+		    sizeof(struct lsm_param_payload_common)) {
+			pr_err("%s: Invalid param_size %d\n",
+				__func__, p_info->param_size);
+			return -EINVAL;
+		}
+
+		pkt_sz = p_info->param_size + sizeof(*hdr);
+		packet = kzalloc(pkt_sz, GFP_KERNEL);
+		if (!packet) {
+			pr_err("%s: no memory for CUSTOM_PARAMS, size = %d\n",
+				__func__, pkt_sz);
+			return -ENOMEM;
+		}
+
+		hdr = (struct apr_hdr *) packet;
+		custom_data = (u8 *) (packet + sizeof(*hdr));
+		q6lsm_add_hdr(client, hdr, pkt_sz, true);
+		hdr->opcode = LSM_SESSION_CMD_SET_PARAMS_V2;
+		memcpy(custom_data, data, p_info->param_size);
+
+		rc = q6lsm_apr_send_pkt(client, client->apr,
+					packet, true, NULL);
+		if (rc)
+			pr_err("%s: CUSTOM_PARAMS failed, rc %d\n",
+				__func__, rc);
+		kfree(packet);
+		break;
+	}
+	default:
+		pr_err("%s: wrong param_type 0x%x\n",
+			__func__, p_info->param_type);
+	}
+
+	return rc;
+}
+
+
+int q6lsm_start(struct lsm_client *client, bool wait)
+{
+	return q6lsm_cmd(client, LSM_SESSION_CMD_START, wait);
+}
+
+int q6lsm_stop(struct lsm_client *client, bool wait)
+{
+	return q6lsm_cmd(client, LSM_SESSION_CMD_STOP, wait);
+}
+
+int q6lsm_close(struct lsm_client *client)
+{
+	return q6lsm_cmd(client, LSM_SESSION_CMD_CLOSE_TX, true);
+}
+
+int q6lsm_lab_control(struct lsm_client *client, u32 enable)
+{
+	int rc = 0;
+	struct lsm_params_lab_enable lab_enable;
+	struct lsm_params_lab_config lab_config;
+	struct lsm_module_param_ids lab_ids;
+	u32 param_size;
+
+	if (!client) {
+		pr_err("%s: invalid param client %pK\n", __func__, client);
+		return -EINVAL;
+	}
+	/* enable/disable lab on dsp */
+	q6lsm_add_hdr(client, &lab_enable.msg_hdr, sizeof(lab_enable), true);
+	lab_enable.msg_hdr.opcode = LSM_SESSION_CMD_SET_PARAMS;
+	q6lsm_set_param_hdr_info(&lab_enable.params_hdr,
+				 sizeof(struct lsm_lab_enable),
+				 0, 0, 0);
+	param_size = (sizeof(struct lsm_lab_enable) -
+		      sizeof(struct lsm_param_payload_common));
+	lab_ids.module_id = LSM_MODULE_ID_LAB;
+	lab_ids.param_id = LSM_PARAM_ID_LAB_ENABLE;
+	q6lsm_set_param_common(&lab_enable.lab_enable.common,
+				&lab_ids, param_size,
+				LSM_SESSION_CMD_SET_PARAMS);
+	lab_enable.lab_enable.enable = (enable) ? 1 : 0;
+	rc = q6lsm_apr_send_pkt(client, client->apr, &lab_enable, true, NULL);
+	if (rc) {
+		pr_err("%s: Lab enable failed rc %d\n", __func__, rc);
+		return rc;
+	}
+	if (!enable)
+		goto exit;
+	/* lab session is being enabled set the config values */
+	q6lsm_add_hdr(client, &lab_config.msg_hdr, sizeof(lab_config), true);
+	lab_config.msg_hdr.opcode = LSM_SESSION_CMD_SET_PARAMS;
+	q6lsm_set_param_hdr_info(&lab_config.params_hdr,
+				 sizeof(struct lsm_lab_config),
+				 0, 0, 0);
+	lab_ids.module_id = LSM_MODULE_ID_LAB;
+	lab_ids.param_id = LSM_PARAM_ID_LAB_CONFIG;
+	param_size = (sizeof(struct lsm_lab_config) -
+		      sizeof(struct lsm_param_payload_common));
+	q6lsm_set_param_common(&lab_config.lab_config.common,
+			       &lab_ids, param_size,
+			       LSM_SESSION_CMD_SET_PARAMS);
+	lab_config.lab_config.minor_version = 1;
+	lab_config.lab_config.wake_up_latency_ms = 250;
+	rc = q6lsm_apr_send_pkt(client, client->apr, &lab_config, true, NULL);
+	if (rc) {
+		pr_err("%s: Lab config failed rc %d disable lab\n",
+		 __func__, rc);
+		/* Lab config failed disable lab */
+		lab_enable.lab_enable.enable = 0;
+		if (q6lsm_apr_send_pkt(client, client->apr,
+			&lab_enable, true, NULL))
+			pr_err("%s: Lab disable failed\n", __func__);
+	}
+exit:
+	return rc;
+}
+
+int q6lsm_stop_lab(struct lsm_client *client)
+{
+	int rc = 0;
+	if (!client) {
+		pr_err("%s: invalid param client %pK\n", __func__, client);
+		return -EINVAL;
+	}
+	rc = q6lsm_cmd(client, LSM_SESSION_CMD_EOB, true);
+	if (rc)
+		pr_err("%s: Lab stop failed %d\n", __func__, rc);
+	return rc;
+}
+
+int q6lsm_read(struct lsm_client *client, struct lsm_cmd_read *read)
+{
+	int rc = 0;
+	if (!client || !read) {
+		pr_err("%s: Invalid params client %pK read %pK\n", __func__,
+			client, read);
+		return -EINVAL;
+	}
+	pr_debug("%s: read call memmap handle %x address %x%x size %d\n",
+		 __func__, read->mem_map_handle, read->buf_addr_msw,
+		read->buf_addr_lsw, read->buf_size);
+	q6lsm_add_hdr(client, &read->hdr, sizeof(struct lsm_cmd_read), true);
+	read->hdr.opcode = LSM_SESSION_CMD_READ;
+	rc = q6lsm_apr_send_pkt(client, client->apr, read, false, NULL);
+	if (rc)
+		pr_err("%s: read buffer call failed rc %d\n", __func__, rc);
+	return rc;
+}
+
+int q6lsm_lab_buffer_alloc(struct lsm_client *client, bool alloc)
+{
+	int ret = 0, i = 0;
+	size_t allocate_size = 0, len = 0;
+	if (!client) {
+		pr_err("%s: invalid client\n", __func__);
+		return -EINVAL;
+	}
+	if (alloc) {
+		if (client->lab_buffer) {
+			pr_err("%s: buffers are allocated period count %d period size %d\n",
+				__func__,
+				client->hw_params.period_count,
+				client->hw_params.buf_sz);
+			return -EINVAL;
+		}
+		allocate_size = client->hw_params.period_count *
+				client->hw_params.buf_sz;
+		allocate_size = PAGE_ALIGN(allocate_size);
+		client->lab_buffer =
+			kzalloc(sizeof(struct lsm_lab_buffer) *
+			client->hw_params.period_count, GFP_KERNEL);
+		if (!client->lab_buffer) {
+			pr_err("%s: memory allocation for lab buffer failed count %d\n"
+				, __func__,
+				client->hw_params.period_count);
+			return -ENOMEM;
+		}
+		ret = msm_audio_ion_alloc("lsm_lab",
+			&client->lab_buffer[0].client,
+			&client->lab_buffer[0].handle,
+			allocate_size, &client->lab_buffer[0].phys,
+			&len,
+			&client->lab_buffer[0].data);
+		if (ret)
+			pr_err("%s: ion alloc failed ret %d size %zd\n",
+				__func__, ret, allocate_size);
+		else {
+			ret = q6lsm_memory_map_regions(client,
+				client->lab_buffer[0].phys, len,
+				&client->lab_buffer[0].mem_map_handle);
+			if (ret) {
+				pr_err("%s: memory map filed ret %d size %zd\n",
+					__func__, ret, len);
+				msm_audio_ion_free(
+				client->lab_buffer[0].client,
+				client->lab_buffer[0].handle);
+			}
+		}
+		if (ret) {
+			pr_err("%s: alloc lab buffer failed ret %d\n",
+				__func__, ret);
+			kfree(client->lab_buffer);
+			client->lab_buffer = NULL;
+		} else {
+			pr_debug("%s: Memory map handle %x phys %pK size %d\n",
+				__func__,
+				client->lab_buffer[0].mem_map_handle,
+				&client->lab_buffer[0].phys,
+				client->hw_params.buf_sz);
+			for (i = 0; i < client->hw_params.period_count; i++) {
+				client->lab_buffer[i].phys =
+				client->lab_buffer[0].phys +
+				(i * client->hw_params.buf_sz);
+				client->lab_buffer[i].size =
+				client->hw_params.buf_sz;
+				client->lab_buffer[i].data =
+				(u8 *)(client->lab_buffer[0].data) +
+				(i * client->hw_params.buf_sz);
+				client->lab_buffer[i].mem_map_handle =
+				client->lab_buffer[0].mem_map_handle;
+			}
+		}
+	} else {
+		ret = q6lsm_memory_unmap_regions(client,
+			client->lab_buffer[0].mem_map_handle);
+		if (!ret)
+			msm_audio_ion_free(
+			client->lab_buffer[0].client,
+			client->lab_buffer[0].handle);
+		else
+			pr_err("%s: unmap failed not freeing memory\n",
+			__func__);
+		kfree(client->lab_buffer);
+		client->lab_buffer = NULL;
+	}
+	return ret;
+}
+
+static int get_cal_type_index(int32_t cal_type)
+{
+	int ret = -EINVAL;
+
+	switch (cal_type) {
+	case LSM_CUST_TOPOLOGY_CAL_TYPE:
+		ret = LSM_CUSTOM_TOP_IDX;
+		break;
+	case LSM_TOPOLOGY_CAL_TYPE:
+		ret = LSM_TOP_IDX;
+		break;
+	case LSM_CAL_TYPE:
+		ret = LSM_CAL_IDX;
+		break;
+	default:
+		pr_err("%s: invalid cal type %d!\n", __func__, cal_type);
+	}
+	return ret;
+}
+
+static int q6lsm_alloc_cal(int32_t cal_type,
+				size_t data_size, void *data)
+{
+	int				ret = 0;
+	int				cal_index;
+	pr_debug("%s:\n", __func__);
+
+	cal_index = get_cal_type_index(cal_type);
+	if (cal_index < 0) {
+		pr_err("%s: could not get cal index %d!\n",
+			__func__, cal_index);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = cal_utils_alloc_cal(data_size, data,
+		lsm_common.cal_data[cal_index], 0, NULL);
+	if (ret < 0) {
+		pr_err("%s: cal_utils_alloc_block failed, ret = %d, cal type = %d!\n",
+			__func__, ret, cal_type);
+		ret = -EINVAL;
+		goto done;
+	}
+done:
+	return ret;
+}
+
+static int q6lsm_dealloc_cal(int32_t cal_type,
+				size_t data_size, void *data)
+{
+	int				ret = 0;
+	int				cal_index;
+	pr_debug("%s:\n", __func__);
+
+	cal_index = get_cal_type_index(cal_type);
+	if (cal_index < 0) {
+		pr_err("%s: could not get cal index %d!\n",
+			__func__, cal_index);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = cal_utils_dealloc_cal(data_size, data,
+		lsm_common.cal_data[cal_index]);
+	if (ret < 0) {
+		pr_err("%s: cal_utils_dealloc_block failed, ret = %d, cal type = %d!\n",
+			__func__, ret, cal_type);
+		ret = -EINVAL;
+		goto done;
+	}
+done:
+	return ret;
+}
+
+static int q6lsm_set_cal(int32_t cal_type,
+			size_t data_size, void *data)
+{
+	int				ret = 0;
+	int				cal_index;
+	pr_debug("%s:\n", __func__);
+
+	cal_index = get_cal_type_index(cal_type);
+	if (cal_index < 0) {
+		pr_err("%s: could not get cal index %d!\n",
+			__func__, cal_index);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = cal_utils_set_cal(data_size, data,
+		lsm_common.cal_data[cal_index], 0, NULL);
+	if (ret < 0) {
+		pr_err("%s: cal_utils_set_cal failed, ret = %d, cal type = %d!\n",
+			__func__, ret, cal_type);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (cal_index == LSM_CUSTOM_TOP_IDX) {
+		mutex_lock(&lsm_common.cal_data[LSM_CUSTOM_TOP_IDX]->lock);
+		lsm_common.set_custom_topology = 1;
+		mutex_unlock(&lsm_common.cal_data[LSM_CUSTOM_TOP_IDX]->lock);
+	}
+
+done:
+	return ret;
+}
+
+static void lsm_delete_cal_data(void)
+{
+	pr_debug("%s:\n", __func__);
+
+	cal_utils_destroy_cal_types(LSM_MAX_CAL_IDX, lsm_common.cal_data);
+	return;
+}
+
+static int q6lsm_init_cal_data(void)
+{
+	int ret = 0;
+	struct cal_type_info	cal_type_info[] = {
+		{{LSM_CUST_TOPOLOGY_CAL_TYPE,
+		{q6lsm_alloc_cal, q6lsm_dealloc_cal, NULL,
+		q6lsm_set_cal, NULL, NULL} },
+		{NULL, NULL, cal_utils_match_buf_num} },
+
+		{{LSM_TOPOLOGY_CAL_TYPE,
+		{NULL, NULL, NULL,
+		q6lsm_set_cal, NULL, NULL} },
+		{NULL, NULL, cal_utils_match_buf_num} },
+
+		{{LSM_CAL_TYPE,
+		{q6lsm_alloc_cal, q6lsm_dealloc_cal, NULL,
+		q6lsm_set_cal, NULL, NULL} },
+		{NULL, NULL, cal_utils_match_buf_num} }
+	};
+	pr_debug("%s:\n", __func__);
+
+	ret = cal_utils_create_cal_types(LSM_MAX_CAL_IDX,
+		lsm_common.cal_data, cal_type_info);
+	if (ret < 0) {
+		pr_err("%s: could not create cal type!\n",
+			__func__);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	return ret;
+err:
+	lsm_delete_cal_data();
+	return ret;
+}
+
+static int __init q6lsm_init(void)
+{
+	int i = 0;
+	pr_debug("%s:\n", __func__);
+	spin_lock_init(&lsm_session_lock);
+	spin_lock_init(&mmap_lock);
+	mutex_init(&lsm_common.apr_lock);
+	for (; i <= LSM_MAX_SESSION_ID; i++) {
+		lsm_common.common_client[i].session = LSM_CONTROL_SESSION;
+		init_waitqueue_head(&lsm_common.common_client[i].cmd_wait);
+		mutex_init(&lsm_common.common_client[i].cmd_lock);
+		atomic_set(&lsm_common.common_client[i].cmd_state,
+			   CMD_STATE_CLEARED);
+	}
+
+	if (q6lsm_init_cal_data())
+		pr_err("%s: could not init cal data!\n", __func__);
+
+	return 0;
+}
+
+static void __exit q6lsm_exit(void)
+{
+	lsm_delete_cal_data();
+}
+
+device_initcall(q6lsm_init);
+__exitcall(q6lsm_exit);
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/q6voice.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/q6voice.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/q6voice.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/q6voice.c	2019-10-29 09:26:26.165227859 +0100
@@ -0,0 +1,8690 @@
+/*  Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/msm_audio_ion.h>
+
+#include <soc/qcom/socinfo.h>
+#include <linux/qdsp6v2/apr_tal.h>
+
+#include "sound/q6audio-v2.h"
+#include "sound/apr_audio-v2.h"
+#include "sound/q6afe-v2.h"
+#include <sound/audio_cal_utils.h>
+#include "q6voice.h"
+#include <sound/adsp_err.h>
+
+#define TIMEOUT_MS 300
+
+
+#define CMD_STATUS_SUCCESS 0
+#define CMD_STATUS_FAIL 1
+
+enum {
+	VOC_TOKEN_NONE,
+	VOIP_MEM_MAP_TOKEN,
+	VOC_CAL_MEM_MAP_TOKEN,
+	VOC_VOICE_HOST_PCM_MAP_TOKEN,
+	VOC_RTAC_MEM_MAP_TOKEN,
+	VOC_SOURCE_TRACKING_MEM_MAP_TOKEN
+};
+
+struct cvd_version_table cvd_version_table_mapping[CVD_INT_VERSION_MAX] = {
+		{CVD_VERSION_DEFAULT, CVD_INT_VERSION_DEFAULT},
+		{CVD_VERSION_0_0, CVD_INT_VERSION_0_0},
+		{CVD_VERSION_2_1, CVD_INT_VERSION_2_1},
+		{CVD_VERSION_2_2, CVD_INT_VERSION_2_2},
+		{CVD_VERSION_2_3, CVD_INT_VERSION_2_3},
+};
+
+static struct common_data common;
+static bool module_initialized;
+
+static int voice_send_enable_vocproc_cmd(struct voice_data *v);
+static int voice_send_netid_timing_cmd(struct voice_data *v);
+static int voice_send_attach_vocproc_cmd(struct voice_data *v);
+static int voice_send_set_device_cmd(struct voice_data *v);
+static int voice_send_vol_step_cmd(struct voice_data *v);
+static int voice_send_mvm_unmap_memory_physical_cmd(struct voice_data *v,
+						    uint32_t mem_handle);
+static int voice_send_mvm_cal_network_cmd(struct voice_data *v);
+static int voice_send_mvm_media_type_cmd(struct voice_data *v);
+static int voice_send_mvm_cvd_version_cmd(struct voice_data *v);
+static int voice_send_cvs_data_exchange_mode_cmd(struct voice_data *v);
+static int voice_send_cvs_packet_exchange_config_cmd(struct voice_data *v);
+static int voice_set_packet_exchange_mode_and_config(uint32_t session_id,
+						     uint32_t mode);
+
+static int voice_send_cvs_register_cal_cmd(struct voice_data *v);
+static int voice_send_cvs_deregister_cal_cmd(struct voice_data *v);
+static int voice_send_cvp_create_cmd(struct voice_data *v);
+static int voice_send_cvp_register_dev_cfg_cmd(struct voice_data *v);
+static int voice_send_cvp_deregister_dev_cfg_cmd(struct voice_data *v);
+static int voice_send_cvp_register_cal_cmd(struct voice_data *v);
+static int voice_send_cvp_deregister_cal_cmd(struct voice_data *v);
+static int voice_send_cvp_register_vol_cal_cmd(struct voice_data *v);
+static int voice_send_cvp_deregister_vol_cal_cmd(struct voice_data *v);
+static int voice_send_cvp_media_fmt_info_cmd(struct voice_data *v);
+static int voice_send_cvp_device_channels_cmd(struct voice_data *v);
+static int voice_send_cvp_media_format_cmd(struct voice_data *v,
+					   uint32_t param_type);
+static int voice_send_cvp_topology_commit_cmd(struct voice_data *v);
+
+static int voice_cvs_stop_playback(struct voice_data *v);
+static int voice_cvs_start_playback(struct voice_data *v);
+static int voice_cvs_start_record(struct voice_data *v, uint32_t rec_mode);
+static int voice_cvs_stop_record(struct voice_data *v);
+
+static int32_t qdsp_mvm_callback(struct apr_client_data *data, void *priv);
+static int32_t qdsp_cvs_callback(struct apr_client_data *data, void *priv);
+static int32_t qdsp_cvp_callback(struct apr_client_data *data, void *priv);
+
+static int voice_send_set_pp_enable_cmd(struct voice_data *v,
+					uint32_t module_id, int enable);
+static int is_cal_memory_allocated(void);
+static bool is_cvd_version_queried(void);
+static int is_voip_memory_allocated(void);
+static int voice_get_cvd_int_version(char *cvd_ver_string);
+static int voice_alloc_cal_mem_map_table(void);
+static int voice_alloc_rtac_mem_map_table(void);
+static int voice_alloc_oob_shared_mem(void);
+static int voice_free_oob_shared_mem(void);
+static int voice_alloc_oob_mem_table(void);
+static int voice_alloc_and_map_oob_mem(struct voice_data *v);
+static void voice_vote_powerstate_to_bms(struct voice_data *v, bool state);
+
+static struct voice_data *voice_get_session_by_idx(int idx);
+
+static int remap_cal_data(struct cal_block_data *cal_block,
+			  uint32_t session_id);
+static int voice_unmap_cal_memory(int32_t cal_type,
+				  struct cal_block_data *cal_block);
+
+static int is_source_tracking_shared_memomry_allocated(void);
+static int voice_alloc_source_tracking_shared_memory(void);
+static int voice_alloc_and_map_source_tracking_shared_memory(
+						struct voice_data *v);
+static int voice_unmap_and_free_source_tracking_shared_memory(
+						struct voice_data *v);
+static int voice_send_set_sound_focus_cmd(struct voice_data *v,
+				struct sound_focus_param soundFocusData);
+static int voice_send_get_sound_focus_cmd(struct voice_data *v,
+				struct sound_focus_param *soundFocusData);
+static int voice_send_get_source_tracking_cmd(struct voice_data *v,
+			struct source_tracking_param *sourceTrackingData);
+
+static void voice_itr_init(struct voice_session_itr *itr,
+			   u32 session_id)
+{
+	if (itr == NULL)
+		return;
+	itr->session_idx = voice_get_idx_for_session(session_id);
+	if (session_id == ALL_SESSION_VSID)
+		itr->cur_idx = 0;
+	else
+		itr->cur_idx = itr->session_idx;
+
+}
+
+static bool voice_itr_get_next_session(struct voice_session_itr *itr,
+					struct voice_data **voice)
+{
+	bool ret = false;
+
+	if (itr == NULL)
+		return false;
+	pr_debug("%s : cur idx = %d session idx = %d\n",
+			 __func__, itr->cur_idx, itr->session_idx);
+
+	if (itr->cur_idx <= itr->session_idx) {
+		ret = true;
+		*voice = voice_get_session_by_idx(itr->cur_idx);
+		itr->cur_idx++;
+	} else {
+		*voice = NULL;
+	}
+
+	return ret;
+}
+
+static bool voice_is_valid_session_id(uint32_t session_id)
+{
+	bool ret = false;
+
+	switch (session_id) {
+	case VOICE_SESSION_VSID:
+	case VOICE2_SESSION_VSID:
+	case VOLTE_SESSION_VSID:
+	case VOIP_SESSION_VSID:
+	case QCHAT_SESSION_VSID:
+	case VOWLAN_SESSION_VSID:
+	case VOICEMMODE1_VSID:
+	case VOICEMMODE2_VSID:
+	case ALL_SESSION_VSID:
+		ret = true;
+		break;
+	default:
+		pr_err("%s: Invalid session_id : %x\n", __func__, session_id);
+
+		break;
+	}
+
+	return ret;
+}
+
+static u16 voice_get_mvm_handle(struct voice_data *v)
+{
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return 0;
+	}
+
+	pr_debug("%s: mvm_handle %d\n", __func__, v->mvm_handle);
+
+	return v->mvm_handle;
+}
+
+static void voice_set_mvm_handle(struct voice_data *v, u16 mvm_handle)
+{
+	pr_debug("%s: mvm_handle %d\n", __func__, mvm_handle);
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return;
+	}
+
+	v->mvm_handle = mvm_handle;
+}
+
+static u16 voice_get_cvs_handle(struct voice_data *v)
+{
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return 0;
+	}
+
+	pr_debug("%s: cvs_handle %d\n", __func__, v->cvs_handle);
+
+	return v->cvs_handle;
+}
+
+static void voice_set_cvs_handle(struct voice_data *v, u16 cvs_handle)
+{
+	pr_debug("%s: cvs_handle %d\n", __func__, cvs_handle);
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return;
+	}
+
+	v->cvs_handle = cvs_handle;
+}
+
+static u16 voice_get_cvp_handle(struct voice_data *v)
+{
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return 0;
+	}
+
+	pr_debug("%s: cvp_handle %d\n", __func__, v->cvp_handle);
+
+	return v->cvp_handle;
+}
+
+static void voice_set_cvp_handle(struct voice_data *v, u16 cvp_handle)
+{
+	pr_debug("%s: cvp_handle %d\n", __func__, cvp_handle);
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return;
+	}
+
+	v->cvp_handle = cvp_handle;
+}
+
+char *voc_get_session_name(u32 session_id)
+{
+	char *session_name = NULL;
+
+	if (session_id == common.voice[VOC_PATH_PASSIVE].session_id) {
+		session_name = VOICE_SESSION_NAME;
+	} else if (session_id ==
+			common.voice[VOC_PATH_VOLTE_PASSIVE].session_id) {
+		session_name = VOLTE_SESSION_NAME;
+	} else if (session_id ==
+			common.voice[VOC_PATH_QCHAT_PASSIVE].session_id) {
+		session_name = QCHAT_SESSION_NAME;
+	} else if (session_id ==
+			common.voice[VOC_PATH_VOWLAN_PASSIVE].session_id) {
+		session_name = VOWLAN_SESSION_NAME;
+	} else if (session_id ==
+		common.voice[VOC_PATH_VOICEMMODE1_PASSIVE].session_id) {
+		session_name = VOICEMMODE1_NAME;
+	} else if (session_id ==
+		common.voice[VOC_PATH_VOICEMMODE2_PASSIVE].session_id) {
+		session_name = VOICEMMODE2_NAME;
+	} else if (session_id == common.voice[VOC_PATH_FULL].session_id) {
+		session_name = VOIP_SESSION_NAME;
+	}
+	return session_name;
+}
+
+uint32_t voc_get_session_id(char *name)
+{
+	u32 session_id = 0;
+
+	if (name != NULL) {
+		if (!strncmp(name, "Voice session", 13))
+			session_id = common.voice[VOC_PATH_PASSIVE].session_id;
+		else if (!strncmp(name, "Voice2 session", 14))
+			session_id =
+			common.voice[VOC_PATH_VOICE2_PASSIVE].session_id;
+		else if (!strncmp(name, "VoLTE session", 13))
+			session_id =
+			common.voice[VOC_PATH_VOLTE_PASSIVE].session_id;
+		else if (!strncmp(name, "QCHAT session", 13))
+			session_id =
+			common.voice[VOC_PATH_QCHAT_PASSIVE].session_id;
+		else if (!strncmp(name, "VoWLAN session", 14))
+			session_id =
+			common.voice[VOC_PATH_VOWLAN_PASSIVE].session_id;
+		else if (!strcmp(name, "VoiceMMode1"))
+			session_id =
+			common.voice[VOC_PATH_VOICEMMODE1_PASSIVE].session_id;
+		else if (!strcmp(name, "VoiceMMode2"))
+			session_id =
+			common.voice[VOC_PATH_VOICEMMODE2_PASSIVE].session_id;
+		else
+			session_id = common.voice[VOC_PATH_FULL].session_id;
+
+		pr_debug("%s: %s has session id 0x%x\n", __func__, name,
+				session_id);
+	}
+
+	return session_id;
+}
+
+static struct voice_data *voice_get_session(u32 session_id)
+{
+	struct voice_data *v = NULL;
+
+	switch (session_id) {
+	case VOICE_SESSION_VSID:
+		v = &common.voice[VOC_PATH_PASSIVE];
+		break;
+
+	case VOICE2_SESSION_VSID:
+		v = &common.voice[VOC_PATH_VOICE2_PASSIVE];
+		break;
+
+	case VOLTE_SESSION_VSID:
+		v = &common.voice[VOC_PATH_VOLTE_PASSIVE];
+		break;
+
+	case VOIP_SESSION_VSID:
+		v = &common.voice[VOC_PATH_FULL];
+		break;
+
+	case QCHAT_SESSION_VSID:
+		v = &common.voice[VOC_PATH_QCHAT_PASSIVE];
+		break;
+
+	case VOWLAN_SESSION_VSID:
+		v = &common.voice[VOC_PATH_VOWLAN_PASSIVE];
+		break;
+
+	case VOICEMMODE1_VSID:
+		v = &common.voice[VOC_PATH_VOICEMMODE1_PASSIVE];
+		break;
+
+	case VOICEMMODE2_VSID:
+		v = &common.voice[VOC_PATH_VOICEMMODE2_PASSIVE];
+		break;
+
+	case ALL_SESSION_VSID:
+		break;
+
+	default:
+		pr_err("%s: Invalid session_id : %x\n", __func__, session_id);
+
+		break;
+	}
+
+	pr_debug("%s:session_id 0x%x session handle %pK\n",
+		__func__, session_id, v);
+
+	return v;
+}
+
+int voice_get_idx_for_session(u32 session_id)
+{
+	int idx = 0;
+
+	switch (session_id) {
+	case VOICE_SESSION_VSID:
+		idx = VOC_PATH_PASSIVE;
+		break;
+
+	case VOICE2_SESSION_VSID:
+		idx = VOC_PATH_VOICE2_PASSIVE;
+		break;
+
+	case VOLTE_SESSION_VSID:
+		idx = VOC_PATH_VOLTE_PASSIVE;
+		break;
+
+	case VOIP_SESSION_VSID:
+		idx = VOC_PATH_FULL;
+		break;
+
+	case QCHAT_SESSION_VSID:
+		idx = VOC_PATH_QCHAT_PASSIVE;
+		break;
+
+	case VOWLAN_SESSION_VSID:
+		idx = VOC_PATH_VOWLAN_PASSIVE;
+		break;
+
+	case VOICEMMODE1_VSID:
+		idx = VOC_PATH_VOICEMMODE1_PASSIVE;
+		break;
+
+	case VOICEMMODE2_VSID:
+		idx = VOC_PATH_VOICEMMODE2_PASSIVE;
+		break;
+
+	case ALL_SESSION_VSID:
+		idx = MAX_VOC_SESSIONS - 1;
+		break;
+
+	default:
+		pr_err("%s: Invalid session_id : %x\n", __func__, session_id);
+
+		break;
+	}
+
+	return idx;
+}
+
+static struct voice_data *voice_get_session_by_idx(int idx)
+{
+	return ((idx < 0 || idx >= MAX_VOC_SESSIONS) ?
+				NULL : &common.voice[idx]);
+}
+
+static bool is_voip_session(u32 session_id)
+{
+	return (session_id == common.voice[VOC_PATH_FULL].session_id);
+}
+
+static bool is_volte_session(u32 session_id)
+{
+	return (session_id == common.voice[VOC_PATH_VOLTE_PASSIVE].session_id);
+}
+
+static bool is_voice2_session(u32 session_id)
+{
+	return (session_id == common.voice[VOC_PATH_VOICE2_PASSIVE].session_id);
+}
+
+static bool is_qchat_session(u32 session_id)
+{
+	return (session_id == common.voice[VOC_PATH_QCHAT_PASSIVE].session_id);
+}
+
+static bool is_vowlan_session(u32 session_id)
+{
+	return (session_id == common.voice[VOC_PATH_VOWLAN_PASSIVE].session_id);
+}
+
+static bool is_voicemmode1(u32 session_id)
+{
+	return session_id ==
+			common.voice[VOC_PATH_VOICEMMODE1_PASSIVE].session_id;
+}
+
+static bool is_voicemmode2(u32 session_id)
+{
+	return session_id ==
+			common.voice[VOC_PATH_VOICEMMODE2_PASSIVE].session_id;
+}
+
+static bool is_voc_state_active(int voc_state)
+{
+	if ((voc_state == VOC_RUN) ||
+		(voc_state == VOC_CHANGE) ||
+		(voc_state == VOC_STANDBY))
+		return true;
+
+	return false;
+}
+
+static void voc_set_error_state(uint16_t reset_proc)
+{
+	struct voice_data *v = NULL;
+	int i;
+
+	for (i = 0; i < MAX_VOC_SESSIONS; i++) {
+		v = &common.voice[i];
+		if (v != NULL) {
+			v->voc_state = VOC_ERROR;
+			v->rec_info.recording = 0;
+		}
+	}
+}
+
+static bool is_other_session_active(u32 session_id)
+{
+	int i;
+	bool ret = false;
+
+	/* Check if there is other active session except the input one */
+	for (i = 0; i < MAX_VOC_SESSIONS; i++) {
+		if (common.voice[i].session_id == session_id)
+			continue;
+
+		if (is_voc_state_active(common.voice[i].voc_state)) {
+			ret = true;
+			break;
+		}
+	}
+	pr_debug("%s: ret %d\n", __func__, ret);
+
+	return ret;
+}
+
+static bool is_sub1_vsid(u32 session_id)
+{
+	bool ret;
+
+	switch (session_id) {
+	case VOICE_SESSION_VSID:
+	case VOLTE_SESSION_VSID:
+	case VOWLAN_SESSION_VSID:
+	case VOICEMMODE1_VSID:
+		ret = true;
+		break;
+	default:
+		ret = false;
+	}
+
+	return ret;
+}
+
+static bool is_sub2_vsid(u32 session_id)
+{
+	bool ret;
+
+	switch (session_id) {
+	case VOICE2_SESSION_VSID:
+	case VOICEMMODE2_VSID:
+		ret = true;
+		break;
+	default:
+		ret = false;
+	}
+
+	return ret;
+}
+
+static bool is_voice_app_id(u32 session_id)
+{
+	return is_sub1_vsid(session_id) || is_sub2_vsid(session_id);
+}
+
+static void init_session_id(void)
+{
+	common.voice[VOC_PATH_PASSIVE].session_id = VOICE_SESSION_VSID;
+	common.voice[VOC_PATH_VOLTE_PASSIVE].session_id = VOLTE_SESSION_VSID;
+	common.voice[VOC_PATH_VOICE2_PASSIVE].session_id = VOICE2_SESSION_VSID;
+	common.voice[VOC_PATH_FULL].session_id = VOIP_SESSION_VSID;
+	common.voice[VOC_PATH_QCHAT_PASSIVE].session_id = QCHAT_SESSION_VSID;
+	common.voice[VOC_PATH_VOWLAN_PASSIVE].session_id = VOWLAN_SESSION_VSID;
+	common.voice[VOC_PATH_VOICEMMODE1_PASSIVE].session_id =
+							VOICEMMODE1_VSID;
+	common.voice[VOC_PATH_VOICEMMODE2_PASSIVE].session_id =
+							VOICEMMODE2_VSID;
+}
+
+static bool is_cvd_version_queried(void)
+{
+	bool ret = 0;
+
+	if (!strcmp(common.cvd_version, CVD_VERSION_DEFAULT))
+		ret = false;
+	else
+		ret = true;
+
+	return ret;
+}
+
+static int voice_get_cvd_int_version(char *cvd_ver_string)
+{
+	unsigned int idx;
+	int cvd_int_ver = CVD_INT_VERSION_DEFAULT;
+
+	for (idx = 0; idx < CVD_INT_VERSION_MAX; idx++) {
+		if (strcmp((char *)cvd_ver_string,
+			  cvd_version_table_mapping[idx].cvd_ver) == 0) {
+			cvd_int_ver =
+			cvd_version_table_mapping[idx].cvd_ver_int;
+			break;
+		}
+	}
+	return cvd_int_ver;
+}
+
+static int voice_apr_register(uint32_t session_id)
+{
+
+	pr_debug("%s\n", __func__);
+
+	mutex_lock(&common.common_lock);
+
+	/* register callback to APR */
+	if (common.apr_q6_mvm == NULL) {
+		pr_debug("%s: Start to register MVM callback\n", __func__);
+
+		common.apr_q6_mvm = apr_register("ADSP", "MVM",
+						 qdsp_mvm_callback,
+						 0xFFFFFFFF, &common);
+
+		if (common.apr_q6_mvm == NULL) {
+			pr_err("%s: Unable to register MVM\n", __func__);
+			goto err;
+		}
+	}
+
+	if (common.apr_q6_cvs == NULL) {
+		pr_debug("%s: Start to register CVS callback\n", __func__);
+
+		common.apr_q6_cvs = apr_register("ADSP", "CVS",
+						 qdsp_cvs_callback,
+						 0xFFFFFFFF, &common);
+
+		if (common.apr_q6_cvs == NULL) {
+			pr_err("%s: Unable to register CVS\n", __func__);
+			goto err;
+		}
+		rtac_set_voice_handle(RTAC_CVS, common.apr_q6_cvs);
+	}
+
+	if (common.apr_q6_cvp == NULL) {
+		pr_debug("%s: Start to register CVP callback\n", __func__);
+
+		common.apr_q6_cvp = apr_register("ADSP", "CVP",
+						 qdsp_cvp_callback,
+						 0xFFFFFFFF, &common);
+
+		if (common.apr_q6_cvp == NULL) {
+			pr_err("%s: Unable to register CVP\n", __func__);
+			goto err;
+		}
+		rtac_set_voice_handle(RTAC_CVP, common.apr_q6_cvp);
+	}
+
+	mutex_unlock(&common.common_lock);
+
+	return 0;
+
+err:
+	if (common.apr_q6_cvs != NULL) {
+		apr_deregister(common.apr_q6_cvs);
+		common.apr_q6_cvs = NULL;
+		rtac_set_voice_handle(RTAC_CVS, NULL);
+	}
+	if (common.apr_q6_mvm != NULL) {
+		apr_deregister(common.apr_q6_mvm);
+		common.apr_q6_mvm = NULL;
+	}
+
+	mutex_unlock(&common.common_lock);
+
+	return -ENODEV;
+}
+
+static int voice_send_mvm_cvd_version_cmd(struct voice_data *v)
+{
+	int ret;
+	struct apr_hdr cvd_version_get_cmd;
+	void *apr_mvm;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	apr_mvm = common.apr_q6_mvm;
+	if (!apr_mvm) {
+		pr_err("%s: apr_mvm is NULL.\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/* Send command to CVD to retrieve Version */
+	cvd_version_get_cmd.hdr_field = APR_HDR_FIELD(
+				APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE),
+				APR_PKT_VER);
+	cvd_version_get_cmd.pkt_size = APR_PKT_SIZE(
+				APR_HDR_SIZE,
+				sizeof(cvd_version_get_cmd) -
+				APR_HDR_SIZE);
+	cvd_version_get_cmd.src_port =
+		voice_get_idx_for_session(v->session_id);
+	cvd_version_get_cmd.dest_port = 0;
+	cvd_version_get_cmd.token = 0;
+	cvd_version_get_cmd.opcode = VSS_IVERSION_CMD_GET;
+
+	pr_debug("%s: send CVD version get cmd, pkt size = %d\n",
+		 __func__, cvd_version_get_cmd.pkt_size);
+
+	v->mvm_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(apr_mvm,
+			   (uint32_t *) &cvd_version_get_cmd);
+	if (ret < 0) {
+		pr_err("%s: Error sending command\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = wait_event_timeout(v->mvm_wait,
+			(v->mvm_state == CMD_STATUS_SUCCESS),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout, fall back to default\n",
+		       __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+			__func__, adsp_err_get_err_str(
+			v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+			v->async_err);
+		goto done;
+	}
+	ret = 0;
+
+done:
+	if (ret) {
+		strlcpy(common.cvd_version, CVD_VERSION_0_0,
+				sizeof(common.cvd_version));
+	}
+	pr_debug("%s: CVD Version retrieved=%s\n",
+		 __func__, common.cvd_version);
+
+	return ret;
+}
+
+static int voice_send_dual_control_cmd(struct voice_data *v)
+{
+	int ret = 0;
+	struct mvm_modem_dual_control_session_cmd mvm_voice_ctl_cmd;
+	void *apr_mvm;
+	u16 mvm_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_mvm = common.apr_q6_mvm;
+	if (!apr_mvm) {
+		pr_err("%s: apr_mvm is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	pr_debug("%s: Send Dual Control command to MVM\n", __func__);
+	if (!is_voip_session(v->session_id)) {
+		mvm_handle = voice_get_mvm_handle(v);
+		mvm_voice_ctl_cmd.hdr.hdr_field = APR_HDR_FIELD(
+						APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+		mvm_voice_ctl_cmd.hdr.pkt_size = APR_PKT_SIZE(
+						APR_HDR_SIZE,
+						sizeof(mvm_voice_ctl_cmd) -
+						APR_HDR_SIZE);
+		pr_debug("%s: send mvm Voice Ctl pkt size = %d\n",
+			__func__, mvm_voice_ctl_cmd.hdr.pkt_size);
+		mvm_voice_ctl_cmd.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+		mvm_voice_ctl_cmd.hdr.dest_port = mvm_handle;
+		mvm_voice_ctl_cmd.hdr.token = 0;
+		mvm_voice_ctl_cmd.hdr.opcode =
+					VSS_IMVM_CMD_SET_POLICY_DUAL_CONTROL;
+		mvm_voice_ctl_cmd.voice_ctl.enable_flag = true;
+		v->mvm_state = CMD_STATUS_FAIL;
+		v->async_err = 0;
+
+		ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_voice_ctl_cmd);
+		if (ret < 0) {
+			pr_err("%s: Error sending MVM Voice CTL CMD\n",
+							__func__);
+			ret = -EINVAL;
+			goto fail;
+		}
+		ret = wait_event_timeout(v->mvm_wait,
+				(v->mvm_state == CMD_STATUS_SUCCESS),
+				msecs_to_jiffies(TIMEOUT_MS));
+		if (!ret) {
+			pr_err("%s: wait_event timeout\n", __func__);
+			ret = -EINVAL;
+			goto fail;
+		}
+		if (v->async_err > 0) {
+			pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+			ret = adsp_err_get_lnx_err_code(
+					v->async_err);
+			goto fail;
+		}
+	}
+	ret = 0;
+fail:
+	return ret;
+}
+
+
+static int voice_create_mvm_cvs_session(struct voice_data *v)
+{
+	int ret = 0;
+	struct mvm_create_ctl_session_cmd mvm_session_cmd;
+	struct cvs_create_passive_ctl_session_cmd cvs_session_cmd;
+	struct cvs_create_full_ctl_session_cmd cvs_full_ctl_cmd;
+	struct mvm_attach_stream_cmd attach_stream_cmd;
+	void *apr_mvm, *apr_cvs, *apr_cvp;
+	u16 mvm_handle, cvs_handle, cvp_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_mvm = common.apr_q6_mvm;
+	apr_cvs = common.apr_q6_cvs;
+	apr_cvp = common.apr_q6_cvp;
+
+	if (!apr_mvm || !apr_cvs || !apr_cvp) {
+		pr_err("%s: apr_mvm or apr_cvs or apr_cvp is NULL\n", __func__);
+		return -EINVAL;
+	}
+	mvm_handle = voice_get_mvm_handle(v);
+	cvs_handle = voice_get_cvs_handle(v);
+	cvp_handle = voice_get_cvp_handle(v);
+
+	pr_debug("%s: mvm_hdl=%d, cvs_hdl=%d\n", __func__,
+		mvm_handle, cvs_handle);
+	/* send cmd to create mvm session and wait for response */
+
+	if (!mvm_handle) {
+		memset(mvm_session_cmd.mvm_session.name, 0,
+			sizeof(mvm_session_cmd.mvm_session.name));
+		if (!is_voip_session(v->session_id)) {
+			mvm_session_cmd.hdr.hdr_field = APR_HDR_FIELD(
+						APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+			mvm_session_cmd.hdr.pkt_size = APR_PKT_SIZE(
+						APR_HDR_SIZE,
+						sizeof(mvm_session_cmd) -
+						APR_HDR_SIZE);
+			pr_debug("%s: send mvm create session pkt size = %d\n",
+				 __func__, mvm_session_cmd.hdr.pkt_size);
+			mvm_session_cmd.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+			mvm_session_cmd.hdr.dest_port = 0;
+			mvm_session_cmd.hdr.token = 0;
+			mvm_session_cmd.hdr.opcode =
+				VSS_IMVM_CMD_CREATE_PASSIVE_CONTROL_SESSION;
+			if (is_volte_session(v->session_id)) {
+				strlcpy(mvm_session_cmd.mvm_session.name,
+				"default volte voice",
+				strlen("default volte voice")+1);
+			} else if (is_voice2_session(v->session_id)) {
+				strlcpy(mvm_session_cmd.mvm_session.name,
+				VOICE2_SESSION_VSID_STR,
+				strlen(VOICE2_SESSION_VSID_STR)+1);
+			} else if (is_qchat_session(v->session_id)) {
+				strlcpy(mvm_session_cmd.mvm_session.name,
+				QCHAT_SESSION_VSID_STR,
+				strlen(QCHAT_SESSION_VSID_STR)+1);
+			} else if (is_vowlan_session(v->session_id)) {
+				strlcpy(mvm_session_cmd.mvm_session.name,
+				VOWLAN_SESSION_VSID_STR,
+				strlen(VOWLAN_SESSION_VSID_STR)+1);
+			} else if (is_voicemmode1(v->session_id)) {
+				strlcpy(mvm_session_cmd.mvm_session.name,
+				VOICEMMODE1_VSID_STR,
+				strlen(VOICEMMODE1_VSID_STR) + 1);
+			} else if (is_voicemmode2(v->session_id)) {
+				strlcpy(mvm_session_cmd.mvm_session.name,
+				VOICEMMODE2_VSID_STR,
+				strlen(VOICEMMODE2_VSID_STR) + 1);
+			} else {
+				strlcpy(mvm_session_cmd.mvm_session.name,
+				"default modem voice",
+				strlen("default modem voice")+1);
+			}
+
+			v->mvm_state = CMD_STATUS_FAIL;
+			v->async_err = 0;
+
+			ret = apr_send_pkt(apr_mvm,
+					(uint32_t *) &mvm_session_cmd);
+			if (ret < 0) {
+				pr_err("%s: Error sending MVM_CONTROL_SESSION\n",
+				       __func__);
+				goto fail;
+			}
+			ret = wait_event_timeout(v->mvm_wait,
+					(v->mvm_state == CMD_STATUS_SUCCESS),
+					msecs_to_jiffies(TIMEOUT_MS));
+			if (!ret) {
+				pr_err("%s: wait_event timeout\n", __func__);
+				goto fail;
+			}
+			if (v->async_err > 0) {
+				pr_err("%s: DSP returned error[%s]\n",
+					__func__, adsp_err_get_err_str(
+					v->async_err));
+				ret = adsp_err_get_lnx_err_code(
+						v->async_err);
+				goto fail;
+			}
+		} else {
+			pr_debug("%s: creating MVM full ctrl\n", __func__);
+			mvm_session_cmd.hdr.hdr_field =
+					APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+					APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+			mvm_session_cmd.hdr.pkt_size =
+					APR_PKT_SIZE(APR_HDR_SIZE,
+					sizeof(mvm_session_cmd) -
+					APR_HDR_SIZE);
+			mvm_session_cmd.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+			mvm_session_cmd.hdr.dest_port = 0;
+			mvm_session_cmd.hdr.token = 0;
+			mvm_session_cmd.hdr.opcode =
+				VSS_IMVM_CMD_CREATE_FULL_CONTROL_SESSION;
+			strlcpy(mvm_session_cmd.mvm_session.name,
+				"default voip",
+				strlen("default voip")+1);
+
+			v->mvm_state = CMD_STATUS_FAIL;
+			v->async_err = 0;
+
+			ret = apr_send_pkt(apr_mvm,
+					(uint32_t *) &mvm_session_cmd);
+			if (ret < 0) {
+				pr_err("Fail in sending MVM_CONTROL_SESSION\n");
+				goto fail;
+			}
+			ret = wait_event_timeout(v->mvm_wait,
+					 (v->mvm_state == CMD_STATUS_SUCCESS),
+					 msecs_to_jiffies(TIMEOUT_MS));
+			if (!ret) {
+				pr_err("%s: wait_event timeout\n", __func__);
+				goto fail;
+			}
+			if (v->async_err > 0) {
+				pr_err("%s: DSP returned error[%s]\n",
+					__func__, adsp_err_get_err_str(
+					v->async_err));
+				ret = adsp_err_get_lnx_err_code(
+						v->async_err);
+				goto fail;
+			}
+		}
+		/* Get the created MVM handle. */
+		mvm_handle = voice_get_mvm_handle(v);
+	}
+	/* send cmd to create cvs session */
+	if (!cvs_handle) {
+		memset(cvs_session_cmd.cvs_session.name, 0,
+			sizeof(cvs_session_cmd.cvs_session.name));
+		if (!is_voip_session(v->session_id)) {
+			pr_debug("%s: creating CVS passive session\n",
+				 __func__);
+
+			cvs_session_cmd.hdr.hdr_field = APR_HDR_FIELD(
+						APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+			cvs_session_cmd.hdr.pkt_size =
+						APR_PKT_SIZE(APR_HDR_SIZE,
+						sizeof(cvs_session_cmd) -
+						APR_HDR_SIZE);
+			cvs_session_cmd.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+			cvs_session_cmd.hdr.dest_port = 0;
+			cvs_session_cmd.hdr.token = 0;
+			cvs_session_cmd.hdr.opcode =
+				VSS_ISTREAM_CMD_CREATE_PASSIVE_CONTROL_SESSION;
+			if (is_volte_session(v->session_id)) {
+				strlcpy(cvs_session_cmd.cvs_session.name,
+				"default volte voice",
+				strlen("default volte voice")+1);
+			} else if (is_voice2_session(v->session_id)) {
+				strlcpy(cvs_session_cmd.cvs_session.name,
+				VOICE2_SESSION_VSID_STR,
+				strlen(VOICE2_SESSION_VSID_STR)+1);
+			} else if (is_qchat_session(v->session_id)) {
+				strlcpy(cvs_session_cmd.cvs_session.name,
+				QCHAT_SESSION_VSID_STR,
+				strlen(QCHAT_SESSION_VSID_STR)+1);
+			} else if (is_vowlan_session(v->session_id)) {
+				strlcpy(cvs_session_cmd.cvs_session.name,
+				VOWLAN_SESSION_VSID_STR,
+				strlen(VOWLAN_SESSION_VSID_STR)+1);
+			} else if (is_voicemmode1(v->session_id)) {
+				strlcpy(cvs_session_cmd.cvs_session.name,
+				VOICEMMODE1_VSID_STR,
+				strlen(VOICEMMODE1_VSID_STR) + 1);
+			} else if (is_voicemmode2(v->session_id)) {
+				strlcpy(cvs_session_cmd.cvs_session.name,
+				VOICEMMODE2_VSID_STR,
+				strlen(VOICEMMODE2_VSID_STR) + 1);
+			} else {
+			strlcpy(cvs_session_cmd.cvs_session.name,
+				"default modem voice",
+				strlen("default modem voice")+1);
+			}
+			v->cvs_state = CMD_STATUS_FAIL;
+			v->async_err = 0;
+
+			ret = apr_send_pkt(apr_cvs,
+					(uint32_t *) &cvs_session_cmd);
+			if (ret < 0) {
+				pr_err("Fail in sending STREAM_CONTROL_SESSION\n");
+				goto fail;
+			}
+			ret = wait_event_timeout(v->cvs_wait,
+					 (v->cvs_state == CMD_STATUS_SUCCESS),
+					 msecs_to_jiffies(TIMEOUT_MS));
+			if (!ret) {
+				pr_err("%s: wait_event timeout\n", __func__);
+				goto fail;
+			}
+			if (v->async_err > 0) {
+				pr_err("%s: DSP returned error[%s]\n",
+					__func__, adsp_err_get_err_str(
+					v->async_err));
+				ret = adsp_err_get_lnx_err_code(
+						v->async_err);
+				goto fail;
+			}
+			/* Get the created CVS handle. */
+			cvs_handle = voice_get_cvs_handle(v);
+
+		} else {
+			pr_debug("%s: creating CVS full session\n", __func__);
+
+			cvs_full_ctl_cmd.hdr.hdr_field =
+					APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+					APR_HDR_LEN(APR_HDR_SIZE),
+					APR_PKT_VER);
+
+			cvs_full_ctl_cmd.hdr.pkt_size =
+					APR_PKT_SIZE(APR_HDR_SIZE,
+					sizeof(cvs_full_ctl_cmd) -
+					APR_HDR_SIZE);
+
+			cvs_full_ctl_cmd.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+			cvs_full_ctl_cmd.hdr.dest_port = 0;
+			cvs_full_ctl_cmd.hdr.token = 0;
+			cvs_full_ctl_cmd.hdr.opcode =
+				VSS_ISTREAM_CMD_CREATE_FULL_CONTROL_SESSION;
+			cvs_full_ctl_cmd.cvs_session.direction = 2;
+			cvs_full_ctl_cmd.cvs_session.enc_media_type =
+						common.mvs_info.media_type;
+			cvs_full_ctl_cmd.cvs_session.dec_media_type =
+						common.mvs_info.media_type;
+			cvs_full_ctl_cmd.cvs_session.network_id =
+					       common.mvs_info.network_type;
+			strlcpy(cvs_full_ctl_cmd.cvs_session.name,
+				"default q6 voice",
+				strlen("default q6 voice")+1);
+
+			v->cvs_state = CMD_STATUS_FAIL;
+			v->async_err = 0;
+
+			ret = apr_send_pkt(apr_cvs,
+					   (uint32_t *) &cvs_full_ctl_cmd);
+
+			if (ret < 0) {
+				pr_err("%s: Err %d sending CREATE_FULL_CTRL\n",
+					__func__, ret);
+				goto fail;
+			}
+			ret = wait_event_timeout(v->cvs_wait,
+					(v->cvs_state == CMD_STATUS_SUCCESS),
+					msecs_to_jiffies(TIMEOUT_MS));
+			if (!ret) {
+				pr_err("%s: wait_event timeout\n", __func__);
+				goto fail;
+			}
+			if (v->async_err > 0) {
+				pr_err("%s: DSP returned error[%s]\n",
+					__func__, adsp_err_get_err_str(
+					v->async_err));
+				ret = adsp_err_get_lnx_err_code(
+						v->async_err);
+				goto fail;
+			}
+			/* Get the created CVS handle. */
+			cvs_handle = voice_get_cvs_handle(v);
+
+			/* Attach MVM to CVS. */
+			pr_debug("%s: Attach MVM to stream\n", __func__);
+
+			attach_stream_cmd.hdr.hdr_field =
+					APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+					APR_HDR_LEN(APR_HDR_SIZE),
+					APR_PKT_VER);
+			attach_stream_cmd.hdr.pkt_size =
+					APR_PKT_SIZE(APR_HDR_SIZE,
+					sizeof(attach_stream_cmd) -
+					APR_HDR_SIZE);
+			attach_stream_cmd.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+			attach_stream_cmd.hdr.dest_port = mvm_handle;
+			attach_stream_cmd.hdr.token = 0;
+			attach_stream_cmd.hdr.opcode =
+						VSS_IMVM_CMD_ATTACH_STREAM;
+			attach_stream_cmd.attach_stream.handle = cvs_handle;
+
+			v->mvm_state = CMD_STATUS_FAIL;
+			v->async_err = 0;
+			ret = apr_send_pkt(apr_mvm,
+					   (uint32_t *) &attach_stream_cmd);
+			if (ret < 0) {
+				pr_err("%s: Error %d sending ATTACH_STREAM\n",
+				       __func__, ret);
+				goto fail;
+			}
+			ret = wait_event_timeout(v->mvm_wait,
+					 (v->mvm_state == CMD_STATUS_SUCCESS),
+					 msecs_to_jiffies(TIMEOUT_MS));
+			if (!ret) {
+				pr_err("%s: wait_event timeout\n", __func__);
+				goto fail;
+			}
+			if (v->async_err > 0) {
+				pr_err("%s: DSP returned error[%s]\n",
+					__func__, adsp_err_get_err_str(
+					v->async_err));
+				ret = adsp_err_get_lnx_err_code(
+						v->async_err);
+				goto fail;
+			}
+		}
+	}
+	return 0;
+
+fail:
+	return ret;
+}
+
+static int voice_unmap_cal_block(struct voice_data *v, int cal_index)
+{
+	int result = 0;
+	struct cal_block_data *cal_block;
+
+	if (common.cal_data[cal_index] == NULL) {
+		pr_err("%s: Cal type is NULL, index %d!\n",
+			__func__, cal_index);
+
+		goto done;
+	}
+
+	mutex_lock(&common.cal_data[cal_index]->lock);
+	cal_block = cal_utils_get_only_cal_block(
+		common.cal_data[cal_index]);
+	if (cal_block == NULL) {
+		pr_err("%s: Cal block is NULL, index %d!\n",
+			__func__, cal_index);
+
+		result = -EINVAL;
+		goto unlock;
+	}
+
+	if (cal_block->map_data.q6map_handle == 0) {
+		pr_debug("%s: Q6 handle is not set!\n", __func__);
+
+		result = -EINVAL;
+		goto unlock;
+	}
+
+	mutex_lock(&common.common_lock);
+	result = voice_send_mvm_unmap_memory_physical_cmd(
+		v, cal_block->map_data.q6map_handle);
+	if (result)
+		pr_err("%s: Voice_send_mvm_unmap_memory_physical_cmd failed for session 0x%x, err %d!\n",
+			__func__, v->session_id, result);
+
+	cal_block->map_data.q6map_handle = 0;
+	mutex_unlock(&common.common_lock);
+unlock:
+	mutex_unlock(&common.cal_data[cal_index]->lock);
+done:
+	return result;
+}
+
+static int voice_destroy_mvm_cvs_session(struct voice_data *v)
+{
+	int ret = 0;
+	struct mvm_detach_stream_cmd detach_stream;
+	struct apr_hdr mvm_destroy;
+	struct apr_hdr cvs_destroy;
+	void *apr_mvm, *apr_cvs;
+	u16 mvm_handle, cvs_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_mvm = common.apr_q6_mvm;
+	apr_cvs = common.apr_q6_cvs;
+
+	if (!apr_mvm || !apr_cvs) {
+		pr_err("%s: apr_mvm or apr_cvs is NULL\n", __func__);
+		return -EINVAL;
+	}
+	mvm_handle = voice_get_mvm_handle(v);
+	cvs_handle = voice_get_cvs_handle(v);
+
+	/* MVM, CVS sessions are destroyed only for Full control sessions. */
+	if (is_voip_session(v->session_id)) {
+		pr_debug("%s: MVM detach stream, VOC_STATE: %d\n", __func__,
+				v->voc_state);
+
+		/* Detach voice stream. */
+		detach_stream.hdr.hdr_field =
+					APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+					APR_HDR_LEN(APR_HDR_SIZE),
+					APR_PKT_VER);
+		detach_stream.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+					sizeof(detach_stream) - APR_HDR_SIZE);
+		detach_stream.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+		detach_stream.hdr.dest_port = mvm_handle;
+		detach_stream.hdr.token = 0;
+		detach_stream.hdr.opcode = VSS_IMVM_CMD_DETACH_STREAM;
+		detach_stream.detach_stream.handle = cvs_handle;
+
+		v->mvm_state = CMD_STATUS_FAIL;
+		v->async_err = 0;
+		ret = apr_send_pkt(apr_mvm, (uint32_t *) &detach_stream);
+		if (ret < 0) {
+			pr_err("%s: Error %d sending DETACH_STREAM\n",
+			       __func__, ret);
+
+			goto fail;
+		}
+		ret = wait_event_timeout(v->mvm_wait,
+					 (v->mvm_state == CMD_STATUS_SUCCESS),
+					 msecs_to_jiffies(TIMEOUT_MS));
+		if (!ret) {
+			pr_err("%s: wait event timeout\n", __func__);
+
+			goto fail;
+		}
+		if (v->async_err > 0) {
+			pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+			ret = adsp_err_get_lnx_err_code(
+					v->async_err);
+			goto fail;
+		}
+
+		/* Unmap memory */
+		if (v->shmem_info.mem_handle != 0) {
+			ret = voice_send_mvm_unmap_memory_physical_cmd(v,
+						v->shmem_info.mem_handle);
+			if (ret < 0) {
+				pr_err("%s Memory_unmap for voip failed %d\n",
+				       __func__, ret);
+
+				goto fail;
+			}
+			v->shmem_info.mem_handle = 0;
+		}
+	}
+
+	/* Unmap Source Tracking shared memory if mapped earlier */
+	voice_unmap_and_free_source_tracking_shared_memory(v);
+
+	if (is_voip_session(v->session_id) ||
+	    is_qchat_session(v->session_id) ||
+	    is_volte_session(v->session_id) ||
+	    is_vowlan_session(v->session_id) ||
+	    v->voc_state == VOC_ERROR || common.is_destroy_cvd) {
+		/* Destroy CVS. */
+		pr_debug("%s: CVS destroy session\n", __func__);
+
+		cvs_destroy.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						      APR_HDR_LEN(APR_HDR_SIZE),
+						      APR_PKT_VER);
+		cvs_destroy.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+					sizeof(cvs_destroy) - APR_HDR_SIZE);
+		cvs_destroy.src_port =
+				voice_get_idx_for_session(v->session_id);
+		cvs_destroy.dest_port = cvs_handle;
+		cvs_destroy.token = 0;
+		cvs_destroy.opcode = APRV2_IBASIC_CMD_DESTROY_SESSION;
+
+		v->cvs_state = CMD_STATUS_FAIL;
+		v->async_err = 0;
+		ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_destroy);
+		if (ret < 0) {
+			pr_err("%s: Error %d sending CVS DESTROY\n",
+			       __func__, ret);
+
+			goto fail;
+		}
+		ret = wait_event_timeout(v->cvs_wait,
+					 (v->cvs_state == CMD_STATUS_SUCCESS),
+					 msecs_to_jiffies(TIMEOUT_MS));
+		if (!ret) {
+			pr_err("%s: wait event timeout\n", __func__);
+
+			goto fail;
+		}
+		if (v->async_err > 0) {
+			pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+			ret = adsp_err_get_lnx_err_code(
+					v->async_err);
+			goto fail;
+		}
+		cvs_handle = 0;
+		voice_set_cvs_handle(v, cvs_handle);
+
+		/* Unmap physical memory for all calibration buffers */
+		if (!is_other_session_active(v->session_id)) {
+			if (voice_unmap_cal_block(v, CVP_VOCPROC_CAL))
+				pr_err("%s: Unmap VOCPROC cal failed\n",
+					__func__);
+			if (voice_unmap_cal_block(v, CVP_VOCVOL_CAL))
+				pr_err("%s: Unmap VOCVOL cal failed\n",
+					__func__);
+			if (voice_unmap_cal_block(v, CVP_VOCDEV_CFG_CAL))
+				pr_err("%s: Unmap VOCDEV_CFG cal failed\n",
+					__func__);
+			if (voice_unmap_cal_block(v, CVS_VOCSTRM_CAL))
+				pr_err("%s: Unmap VOCSTRM cal failed\n",
+					__func__);
+		}
+
+		/* Destroy MVM. */
+		pr_debug("%s: MVM destroy session\n", __func__);
+
+		mvm_destroy.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						      APR_HDR_LEN(APR_HDR_SIZE),
+						      APR_PKT_VER);
+		mvm_destroy.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+					    sizeof(mvm_destroy) - APR_HDR_SIZE);
+		mvm_destroy.src_port =
+				voice_get_idx_for_session(v->session_id);
+		mvm_destroy.dest_port = mvm_handle;
+		mvm_destroy.token = 0;
+		mvm_destroy.opcode = APRV2_IBASIC_CMD_DESTROY_SESSION;
+
+		v->mvm_state = CMD_STATUS_FAIL;
+		v->async_err = 0;
+
+		ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_destroy);
+		if (ret < 0) {
+			pr_err("%s: Error %d sending MVM DESTROY\n",
+			       __func__, ret);
+
+			goto fail;
+		}
+		ret = wait_event_timeout(v->mvm_wait,
+					 (v->mvm_state == CMD_STATUS_SUCCESS),
+					 msecs_to_jiffies(TIMEOUT_MS));
+		if (!ret) {
+			pr_err("%s: wait event timeout\n", __func__);
+			goto fail;
+		}
+		if (v->async_err > 0) {
+			pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+			ret = adsp_err_get_lnx_err_code(
+					v->async_err);
+			goto fail;
+		}
+		mvm_handle = 0;
+		voice_set_mvm_handle(v, mvm_handle);
+	}
+	return 0;
+fail:
+	return ret;
+}
+
+static int voice_send_tty_mode_cmd(struct voice_data *v)
+{
+	int ret = 0;
+	struct mvm_set_tty_mode_cmd mvm_tty_mode_cmd;
+	void *apr_mvm;
+	u16 mvm_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_mvm = common.apr_q6_mvm;
+
+	if (!apr_mvm) {
+		pr_err("%s: apr_mvm is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	mvm_handle = voice_get_mvm_handle(v);
+
+	/* send tty mode cmd to mvm */
+	mvm_tty_mode_cmd.hdr.hdr_field = APR_HDR_FIELD(
+					APR_MSG_TYPE_SEQ_CMD,
+					APR_HDR_LEN(APR_HDR_SIZE),
+					APR_PKT_VER);
+	mvm_tty_mode_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+					sizeof(mvm_tty_mode_cmd) -
+					APR_HDR_SIZE);
+	pr_debug("%s: pkt size = %d\n",
+		 __func__, mvm_tty_mode_cmd.hdr.pkt_size);
+	mvm_tty_mode_cmd.hdr.src_port =
+			voice_get_idx_for_session(v->session_id);
+	mvm_tty_mode_cmd.hdr.dest_port = mvm_handle;
+	mvm_tty_mode_cmd.hdr.token = 0;
+	mvm_tty_mode_cmd.hdr.opcode = VSS_ISTREAM_CMD_SET_TTY_MODE;
+	mvm_tty_mode_cmd.tty_mode.mode = v->tty_mode;
+	pr_debug("tty mode =%d\n", mvm_tty_mode_cmd.tty_mode.mode);
+
+	v->mvm_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_tty_mode_cmd);
+	if (ret < 0) {
+		pr_err("%s: Error %d sending SET_TTY_MODE\n",
+		       __func__, ret);
+		goto fail;
+	}
+	ret = wait_event_timeout(v->mvm_wait,
+				 (v->mvm_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto fail;
+	}
+	return 0;
+fail:
+	return ret;
+}
+
+static int voice_send_set_pp_enable_cmd(struct voice_data *v,
+					uint32_t module_id, int enable)
+{
+	struct cvs_set_pp_enable_cmd cvs_set_pp_cmd;
+	int ret = 0;
+	void *apr_cvs;
+	u16 cvs_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvs = common.apr_q6_cvs;
+
+	if (!apr_cvs) {
+		pr_err("%s: apr_cvs is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	cvs_handle = voice_get_cvs_handle(v);
+
+	cvs_set_pp_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						     APR_HDR_LEN(APR_HDR_SIZE),
+						     APR_PKT_VER);
+	cvs_set_pp_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+						   sizeof(cvs_set_pp_cmd) -
+						   APR_HDR_SIZE);
+	cvs_set_pp_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id);
+	cvs_set_pp_cmd.hdr.dest_port = cvs_handle;
+	cvs_set_pp_cmd.hdr.token = 0;
+	cvs_set_pp_cmd.hdr.opcode = VSS_ICOMMON_CMD_SET_UI_PROPERTY;
+
+	cvs_set_pp_cmd.vss_set_pp.module_id = module_id;
+	cvs_set_pp_cmd.vss_set_pp.param_id = VOICE_PARAM_MOD_ENABLE;
+	cvs_set_pp_cmd.vss_set_pp.param_size = MOD_ENABLE_PARAM_LEN;
+	cvs_set_pp_cmd.vss_set_pp.reserved = 0;
+	cvs_set_pp_cmd.vss_set_pp.enable = enable;
+	cvs_set_pp_cmd.vss_set_pp.reserved_field = 0;
+	pr_debug("voice_send_set_pp_enable_cmd, module_id=%d, enable=%d\n",
+		module_id, enable);
+
+	v->cvs_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_pp_cmd);
+	if (ret < 0) {
+		pr_err("Fail: sending cvs set pp enable,\n");
+		goto fail;
+	}
+	ret = wait_event_timeout(v->cvs_wait,
+				 (v->cvs_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto fail;
+	}
+	return 0;
+fail:
+	return ret;
+}
+
+static int voice_send_hd_cmd(struct voice_data *v, int enable)
+{
+	struct mvm_set_hd_enable_cmd mvm_set_hd_cmd;
+	int ret = 0;
+	void *apr_mvm;
+	u16 mvm_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	apr_mvm = common.apr_q6_mvm;
+	if (!apr_mvm) {
+		pr_err("%s: apr_mvm is NULL.\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	mvm_handle = voice_get_mvm_handle(v);
+	if (!mvm_handle) {
+		pr_err("%s: mvm_handle is NULL\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	mvm_set_hd_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						     APR_HDR_LEN(APR_HDR_SIZE),
+						     APR_PKT_VER);
+	mvm_set_hd_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+						   sizeof(mvm_set_hd_cmd) -
+						   APR_HDR_SIZE);
+	mvm_set_hd_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id);
+	mvm_set_hd_cmd.hdr.dest_port = mvm_handle;
+	mvm_set_hd_cmd.hdr.token = 0;
+
+	if (enable)
+		mvm_set_hd_cmd.hdr.opcode = VSS_IHDVOICE_CMD_ENABLE;
+	else
+		mvm_set_hd_cmd.hdr.opcode = VSS_IHDVOICE_CMD_DISABLE;
+
+	pr_debug("%s: enable=%d\n", __func__, enable);
+
+	v->mvm_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_set_hd_cmd);
+	if (ret < 0) {
+		pr_err("%s: Failed to sending mvm set HD Voice enable %d\n",
+		       __func__, ret);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = wait_event_timeout(v->mvm_wait,
+				 (v->mvm_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto done;
+	}
+
+done:
+	return ret;
+}
+
+static int voice_set_dtx(struct voice_data *v)
+{
+	int ret = 0;
+	void *apr_cvs;
+	u16 cvs_handle;
+	struct cvs_set_enc_dtx_mode_cmd cvs_set_dtx;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvs = common.apr_q6_cvs;
+
+	if (!apr_cvs) {
+		pr_err("%s: apr_cvs is NULL.\n", __func__);
+		return -EINVAL;
+	}
+
+	cvs_handle = voice_get_cvs_handle(v);
+
+	/* Set DTX */
+	cvs_set_dtx.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+					      APR_HDR_LEN(APR_HDR_SIZE),
+					      APR_PKT_VER);
+	cvs_set_dtx.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+					sizeof(cvs_set_dtx) - APR_HDR_SIZE);
+	cvs_set_dtx.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+	cvs_set_dtx.hdr.dest_port = cvs_handle;
+	cvs_set_dtx.hdr.token = 0;
+	cvs_set_dtx.hdr.opcode = VSS_ISTREAM_CMD_SET_ENC_DTX_MODE;
+	cvs_set_dtx.dtx_mode.enable = common.mvs_info.dtx_mode;
+
+	pr_debug("%s: Setting DTX %d\n", __func__, common.mvs_info.dtx_mode);
+
+	v->cvs_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+
+	ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_dtx);
+	if (ret < 0) {
+		pr_err("%s: Error %d sending SET_DTX\n", __func__, ret);
+		return -EINVAL;
+	}
+
+	ret = wait_event_timeout(v->cvs_wait,
+				 (v->cvs_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		return -EINVAL;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int voice_send_mvm_media_type_cmd(struct voice_data *v)
+{
+	struct vss_imvm_cmd_set_cal_media_type_t mvm_set_cal_media_type;
+	int ret = 0;
+	void *apr_mvm;
+	u16 mvm_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_mvm = common.apr_q6_mvm;
+
+	if (!apr_mvm) {
+		pr_err("%s: apr_mvm is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	mvm_handle = voice_get_mvm_handle(v);
+
+	mvm_set_cal_media_type.hdr.hdr_field =
+					APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+					APR_HDR_LEN(APR_HDR_SIZE),
+					APR_PKT_VER);
+	mvm_set_cal_media_type.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+					sizeof(mvm_set_cal_media_type) -
+					APR_HDR_SIZE);
+	mvm_set_cal_media_type.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+	mvm_set_cal_media_type.hdr.dest_port = mvm_handle;
+	mvm_set_cal_media_type.hdr.token = 0;
+	mvm_set_cal_media_type.hdr.opcode = VSS_IMVM_CMD_SET_CAL_MEDIA_TYPE;
+	mvm_set_cal_media_type.media_id = common.mvs_info.media_type;
+	pr_debug("%s: setting media_id as %x\n",
+		 __func__ , mvm_set_cal_media_type.media_id);
+
+	v->mvm_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_set_cal_media_type);
+	if (ret < 0) {
+		pr_err("%s: Error %d sending media type\n", __func__, ret);
+		goto fail;
+	}
+
+	ret = wait_event_timeout(v->mvm_wait,
+				(v->mvm_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout %d\n", __func__, ret);
+		goto fail;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto fail;
+	}
+	return 0;
+fail:
+	return ret;
+}
+
+static int voice_send_dtmf_rx_detection_cmd(struct voice_data *v,
+					    uint32_t enable)
+{
+	int ret = 0;
+	void *apr_cvs;
+	u16 cvs_handle;
+	struct cvs_set_rx_dtmf_detection_cmd cvs_dtmf_rx_detection;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvs = common.apr_q6_cvs;
+
+	if (!apr_cvs) {
+		pr_err("%s: apr_cvs is NULL.\n", __func__);
+		return -EINVAL;
+	}
+
+	cvs_handle = voice_get_cvs_handle(v);
+
+	/* Set SET_DTMF_RX_DETECTION */
+	cvs_dtmf_rx_detection.hdr.hdr_field =
+				APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+					      APR_HDR_LEN(APR_HDR_SIZE),
+					      APR_PKT_VER);
+	cvs_dtmf_rx_detection.hdr.pkt_size =
+				APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvs_dtmf_rx_detection) - APR_HDR_SIZE);
+	cvs_dtmf_rx_detection.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+	cvs_dtmf_rx_detection.hdr.dest_port = cvs_handle;
+	cvs_dtmf_rx_detection.hdr.token = 0;
+	cvs_dtmf_rx_detection.hdr.opcode =
+					VSS_ISTREAM_CMD_SET_RX_DTMF_DETECTION;
+	cvs_dtmf_rx_detection.cvs_dtmf_det.enable = enable;
+
+	v->cvs_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+
+	ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_dtmf_rx_detection);
+	if (ret < 0) {
+		pr_err("%s: Error %d sending SET_DTMF_RX_DETECTION\n",
+		       __func__,
+		       ret);
+		return -EINVAL;
+	}
+
+	ret = wait_event_timeout(v->cvs_wait,
+				 (v->cvs_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		return -EINVAL;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		return ret;
+	}
+
+	return ret;
+}
+
+static void voice_vote_powerstate_to_bms(struct voice_data *v, bool state)
+{
+	union power_supply_propval pval = {0, };
+
+	if (!v->psy)
+		v->psy = power_supply_get_by_name("bms");
+	if (v->psy && !(is_voip_session(v->session_id) ||
+			is_vowlan_session(v->session_id))) {
+		pval.intval = VMBMS_VOICE_CALL_BIT;
+		if (state) {
+			power_supply_set_property(v->psy,
+				POWER_SUPPLY_PROP_HI_POWER,
+				&pval);
+			pr_debug("%s : Vote High power to BMS\n",
+				__func__);
+		} else {
+			power_supply_set_property(v->psy,
+				POWER_SUPPLY_PROP_LOW_POWER,
+				&pval);
+			pr_debug("%s: Vote low power to BMS\n",
+				__func__);
+		}
+	} else {
+		pr_debug("%s: No OP", __func__);
+	}
+
+}
+
+void voc_disable_dtmf_det_on_active_sessions(void)
+{
+	struct voice_data *v = NULL;
+	int i;
+	for (i = 0; i < MAX_VOC_SESSIONS; i++) {
+		v = &common.voice[i];
+		if ((v->dtmf_rx_detect_en) &&
+			is_voc_state_active(v->voc_state)) {
+
+			pr_debug("disable dtmf det on ses_id=%d\n",
+				 v->session_id);
+			voice_send_dtmf_rx_detection_cmd(v, 0);
+		}
+	}
+}
+
+int voc_enable_dtmf_rx_detection(uint32_t session_id, uint32_t enable)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+		return -EINVAL;
+	}
+
+	mutex_lock(&v->lock);
+	v->dtmf_rx_detect_en = enable;
+
+	if (is_voc_state_active(v->voc_state))
+		ret = voice_send_dtmf_rx_detection_cmd(v,
+						       v->dtmf_rx_detect_en);
+
+	mutex_unlock(&v->lock);
+
+	return ret;
+}
+
+void voc_set_destroy_cvd_flag(bool is_destroy_cvd)
+{
+	pr_debug("%s: %d\n", __func__, is_destroy_cvd);
+	common.is_destroy_cvd = is_destroy_cvd;
+}
+
+void voc_set_vote_bms_flag(bool is_vote_bms)
+{
+	pr_debug("%s: flag value: %d\n", __func__, is_vote_bms);
+	common.is_vote_bms = is_vote_bms;
+}
+
+int voc_alloc_cal_shared_memory(void)
+{
+	int rc = 0;
+
+	mutex_lock(&common.common_lock);
+	if (is_cal_memory_allocated()) {
+		pr_debug("%s: Calibration shared buffer already allocated",
+			 __func__);
+	} else {
+		/* Allocate memory for calibration memory map table. */
+		rc = voice_alloc_cal_mem_map_table();
+		if ((rc < 0) && (rc != -EPROBE_DEFER)) {
+			pr_err("%s: Failed to allocate cal memory, err=%d",
+			       __func__, rc);
+		}
+	}
+	mutex_unlock(&common.common_lock);
+
+	return rc;
+}
+
+int voc_alloc_voip_shared_memory(void)
+{
+	int rc = 0;
+
+	/* Allocate shared memory for OOB Voip */
+	rc = voice_alloc_oob_shared_mem();
+	if (rc < 0) {
+		pr_err("%s: Failed to alloc shared memory for OOB rc:%d\n",
+			   __func__, rc);
+	} else {
+		/* Allocate mem map table for OOB */
+		rc = voice_alloc_oob_mem_table();
+		if (rc < 0) {
+			pr_err("%s: Failed to alloc mem map talbe rc:%d\n",
+			       __func__, rc);
+
+			voice_free_oob_shared_mem();
+		}
+	}
+
+	return rc;
+}
+
+static int is_cal_memory_allocated(void)
+{
+	bool ret;
+
+	if (common.cal_mem_map_table.client != NULL &&
+	    common.cal_mem_map_table.handle != NULL)
+		ret = true;
+	else
+		ret = false;
+
+	return ret;
+}
+
+
+static int free_cal_map_table(void)
+{
+	int ret = 0;
+
+	if ((common.cal_mem_map_table.client == NULL) ||
+		(common.cal_mem_map_table.handle == NULL))
+		goto done;
+
+	ret = msm_audio_ion_free(common.cal_mem_map_table.client,
+		common.cal_mem_map_table.handle);
+	if (ret < 0)
+		pr_err("%s: msm_audio_ion_free failed:\n", __func__);
+
+done:
+	common.cal_mem_map_table.client = NULL;
+	common.cal_mem_map_table.handle = NULL;
+	return ret;
+}
+
+static int is_rtac_memory_allocated(void)
+{
+	bool ret;
+
+	if (common.rtac_mem_map_table.client != NULL &&
+	    common.rtac_mem_map_table.handle != NULL)
+		ret = true;
+	else
+		ret = false;
+
+	return ret;
+}
+
+static int free_rtac_map_table(void)
+{
+	int ret = 0;
+
+	if ((common.rtac_mem_map_table.client == NULL) ||
+		(common.rtac_mem_map_table.handle == NULL))
+		goto done;
+
+	ret = msm_audio_ion_free(common.rtac_mem_map_table.client,
+		common.rtac_mem_map_table.handle);
+	if (ret < 0)
+		pr_err("%s: msm_audio_ion_free failed:\n", __func__);
+
+done:
+	common.rtac_mem_map_table.client = NULL;
+	common.rtac_mem_map_table.handle = NULL;
+	return ret;
+}
+
+
+static int is_voip_memory_allocated(void)
+{
+	bool ret;
+	struct voice_data *v = voice_get_session(
+				common.voice[VOC_PATH_FULL].session_id);
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL, session_id:%d\n", __func__,
+		common.voice[VOC_PATH_FULL].session_id);
+
+		ret = false;
+		goto done;
+	}
+
+	mutex_lock(&common.common_lock);
+	if (v->shmem_info.sh_buf.client != NULL &&
+	    v->shmem_info.sh_buf.handle != NULL)
+		ret = true;
+	else
+		ret = false;
+	mutex_unlock(&common.common_lock);
+
+done:
+	return ret;
+}
+
+static int voice_config_cvs_vocoder_amr_rate(struct voice_data *v)
+{
+	int ret = 0;
+	void *apr_cvs;
+	u16 cvs_handle;
+	struct cvs_set_amr_enc_rate_cmd cvs_set_amr_rate;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+	apr_cvs = common.apr_q6_cvs;
+
+	if (!apr_cvs) {
+		pr_err("%s: apr_cvs is NULL.\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	cvs_handle = voice_get_cvs_handle(v);
+
+	pr_debug("%s: Setting AMR rate. Media Type: %d\n", __func__,
+		 common.mvs_info.media_type);
+
+	cvs_set_amr_rate.hdr.hdr_field =
+			APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+			APR_HDR_LEN(APR_HDR_SIZE),
+			APR_PKT_VER);
+	cvs_set_amr_rate.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+			       sizeof(cvs_set_amr_rate) - APR_HDR_SIZE);
+	cvs_set_amr_rate.hdr.src_port =
+			voice_get_idx_for_session(v->session_id);
+	cvs_set_amr_rate.hdr.dest_port = cvs_handle;
+	cvs_set_amr_rate.hdr.token = 0;
+
+	if (common.mvs_info.media_type == VSS_MEDIA_ID_AMR_NB_MODEM)
+		cvs_set_amr_rate.hdr.opcode =
+				VSS_ISTREAM_CMD_VOC_AMR_SET_ENC_RATE;
+	else if (common.mvs_info.media_type == VSS_MEDIA_ID_AMR_WB_MODEM)
+		cvs_set_amr_rate.hdr.opcode =
+				VSS_ISTREAM_CMD_VOC_AMRWB_SET_ENC_RATE;
+
+	cvs_set_amr_rate.amr_rate.mode = common.mvs_info.rate;
+
+	v->cvs_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+
+	ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_amr_rate);
+	if (ret < 0) {
+		pr_err("%s: Error %d sending SET_AMR_RATE\n",
+		       __func__, ret);
+
+		goto done;
+	}
+	ret = wait_event_timeout(v->cvs_wait,
+				 (v->cvs_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto done;
+	}
+
+	return 0;
+done:
+	return ret;
+}
+
+static int voice_config_cvs_vocoder(struct voice_data *v)
+{
+	int ret = 0;
+	void *apr_cvs;
+	u16 cvs_handle;
+	/* Set media type. */
+	struct cvs_set_media_type_cmd cvs_set_media_cmd;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvs = common.apr_q6_cvs;
+
+	if (!apr_cvs) {
+		pr_err("%s: apr_cvs is NULL.\n", __func__);
+		return -EINVAL;
+	}
+
+	cvs_handle = voice_get_cvs_handle(v);
+
+	cvs_set_media_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	cvs_set_media_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+						sizeof(cvs_set_media_cmd) -
+						APR_HDR_SIZE);
+	cvs_set_media_cmd.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+	cvs_set_media_cmd.hdr.dest_port = cvs_handle;
+	cvs_set_media_cmd.hdr.token = 0;
+	cvs_set_media_cmd.hdr.opcode = VSS_ISTREAM_CMD_SET_MEDIA_TYPE;
+	cvs_set_media_cmd.media_type.tx_media_id = common.mvs_info.media_type;
+	cvs_set_media_cmd.media_type.rx_media_id = common.mvs_info.media_type;
+
+	v->cvs_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+
+	ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_media_cmd);
+	if (ret < 0) {
+		pr_err("%s: Error %d sending SET_MEDIA_TYPE\n",
+			__func__, ret);
+
+		goto fail;
+	}
+	ret = wait_event_timeout(v->cvs_wait,
+				 (v->cvs_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+
+		goto fail;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto fail;
+	}
+	/* Set encoder properties. */
+	switch (common.mvs_info.media_type) {
+	case VSS_MEDIA_ID_EVRC_MODEM:
+	case VSS_MEDIA_ID_4GV_NB_MODEM:
+	case VSS_MEDIA_ID_4GV_WB_MODEM:
+	case VSS_MEDIA_ID_4GV_NW_MODEM: {
+		struct cvs_set_cdma_enc_minmax_rate_cmd cvs_set_cdma_rate;
+
+		pr_debug("Setting EVRC min-max rate\n");
+
+		cvs_set_cdma_rate.hdr.hdr_field =
+					APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+					APR_HDR_LEN(APR_HDR_SIZE),
+					APR_PKT_VER);
+		cvs_set_cdma_rate.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				      sizeof(cvs_set_cdma_rate) - APR_HDR_SIZE);
+		cvs_set_cdma_rate.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+		cvs_set_cdma_rate.hdr.dest_port = cvs_handle;
+		cvs_set_cdma_rate.hdr.token = 0;
+		cvs_set_cdma_rate.hdr.opcode =
+				VSS_ISTREAM_CMD_CDMA_SET_ENC_MINMAX_RATE;
+		cvs_set_cdma_rate.cdma_rate.min_rate =
+				common.mvs_info.evrc_min_rate;
+		cvs_set_cdma_rate.cdma_rate.max_rate =
+				common.mvs_info.evrc_max_rate;
+
+		v->cvs_state = CMD_STATUS_FAIL;
+		v->async_err = 0;
+
+		ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_cdma_rate);
+		if (ret < 0) {
+			pr_err("%s: Error %d sending SET_EVRC_MINMAX_RATE\n",
+			       __func__, ret);
+			goto fail;
+		}
+		ret = wait_event_timeout(v->cvs_wait,
+					 (v->cvs_state == CMD_STATUS_SUCCESS),
+					 msecs_to_jiffies(TIMEOUT_MS));
+		if (!ret) {
+			pr_err("%s: wait_event timeout\n", __func__);
+
+			goto fail;
+		}
+		if (v->async_err > 0) {
+			pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+			ret = adsp_err_get_lnx_err_code(
+					v->async_err);
+			goto fail;
+		}
+
+		if (common.mvs_info.media_type != VSS_MEDIA_ID_EVRC_MODEM) {
+			ret = voice_set_dtx(v);
+			if (ret < 0)
+				goto fail;
+		}
+
+		break;
+	}
+	case VSS_MEDIA_ID_AMR_NB_MODEM:
+	case VSS_MEDIA_ID_AMR_WB_MODEM: {
+		ret = voice_config_cvs_vocoder_amr_rate(v);
+		if (ret) {
+			pr_err("%s: Failed to update vocoder rate. %d\n",
+			       __func__, ret);
+
+			goto fail;
+		}
+
+		ret = voice_set_dtx(v);
+		if (ret < 0)
+			goto fail;
+
+		break;
+	}
+	case VSS_MEDIA_ID_G729:
+	case VSS_MEDIA_ID_G711_ALAW:
+	case VSS_MEDIA_ID_G711_MULAW: {
+		ret = voice_set_dtx(v);
+
+		break;
+	}
+	default:
+		/* Do nothing. */
+		break;
+	}
+	return 0;
+
+fail:
+	return ret;
+}
+
+int voc_update_amr_vocoder_rate(uint32_t session_id)
+{
+	int ret = 0;
+	struct voice_data *v;
+
+	pr_debug("%s: session_id:%d", __func__, session_id);
+
+	v = voice_get_session(session_id);
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL, session_id:%d\n", __func__,
+		       session_id);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	mutex_lock(&v->lock);
+	ret = voice_config_cvs_vocoder_amr_rate(v);
+	mutex_unlock(&v->lock);
+
+done:
+	return ret;
+}
+
+static int voice_send_start_voice_cmd(struct voice_data *v)
+{
+	struct apr_hdr mvm_start_voice_cmd;
+	int ret = 0;
+	void *apr_mvm;
+	u16 mvm_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_mvm = common.apr_q6_mvm;
+
+	if (!apr_mvm) {
+		pr_err("%s: apr_mvm is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	mvm_handle = voice_get_mvm_handle(v);
+
+	mvm_start_voice_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	mvm_start_voice_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(mvm_start_voice_cmd) - APR_HDR_SIZE);
+	pr_debug("send mvm_start_voice_cmd pkt size = %d\n",
+				mvm_start_voice_cmd.pkt_size);
+	mvm_start_voice_cmd.src_port =
+				voice_get_idx_for_session(v->session_id);
+	mvm_start_voice_cmd.dest_port = mvm_handle;
+	mvm_start_voice_cmd.token = 0;
+	mvm_start_voice_cmd.opcode = VSS_IMVM_CMD_START_VOICE;
+
+	v->mvm_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_start_voice_cmd);
+	if (ret < 0) {
+		pr_err("Fail in sending VSS_IMVM_CMD_START_VOICE\n");
+		goto fail;
+	}
+	ret = wait_event_timeout(v->mvm_wait,
+				 (v->mvm_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	} else {
+		if (common.is_vote_bms) {
+			/* vote high power to BMS during call start */
+			voice_vote_powerstate_to_bms(v, true);
+		}
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto fail;
+	}
+	return 0;
+fail:
+	return ret;
+}
+
+static void voc_get_tx_rx_topology(struct voice_data *v,
+				   uint32_t *tx_topology_id,
+				   uint32_t *rx_topology_id)
+{
+	uint32_t tx_id = 0;
+	uint32_t rx_id = 0;
+
+	if (v->lch_mode == VOICE_LCH_START || v->disable_topology) {
+		pr_debug("%s: Setting TX and RX topology to NONE for LCH\n",
+			 __func__);
+
+		tx_id = VSS_IVOCPROC_TOPOLOGY_ID_NONE;
+		rx_id = VSS_IVOCPROC_TOPOLOGY_ID_NONE;
+	} else {
+		tx_id = voice_get_topology(CVP_VOC_TX_TOPOLOGY_CAL);
+		rx_id = voice_get_topology(CVP_VOC_RX_TOPOLOGY_CAL);
+	}
+
+	*tx_topology_id = tx_id;
+	*rx_topology_id = rx_id;
+}
+
+static int voice_send_set_device_cmd(struct voice_data *v)
+{
+	struct cvp_set_device_cmd  cvp_setdev_cmd;
+	int ret = 0;
+	void *apr_cvp;
+	u16 cvp_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvp = common.apr_q6_cvp;
+
+	if (!apr_cvp) {
+		pr_err("%s: apr_cvp is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	cvp_handle = voice_get_cvp_handle(v);
+
+	/* set device and wait for response */
+	cvp_setdev_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	cvp_setdev_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvp_setdev_cmd) - APR_HDR_SIZE);
+	pr_debug(" send create cvp setdev, pkt size = %d\n",
+			cvp_setdev_cmd.hdr.pkt_size);
+	cvp_setdev_cmd.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+	cvp_setdev_cmd.hdr.dest_port = cvp_handle;
+	cvp_setdev_cmd.hdr.token = 0;
+
+	if (voice_get_cvd_int_version(common.cvd_version) >=
+	    CVD_INT_VERSION_2_2)
+		cvp_setdev_cmd.hdr.opcode =
+				VSS_IVOCPROC_CMD_SET_DEVICE_V3;
+	else
+		cvp_setdev_cmd.hdr.opcode =
+				VSS_IVOCPROC_CMD_SET_DEVICE_V2;
+
+	voc_get_tx_rx_topology(v,
+			&cvp_setdev_cmd.cvp_set_device_v2.tx_topology_id,
+			&cvp_setdev_cmd.cvp_set_device_v2.rx_topology_id);
+
+	cvp_setdev_cmd.cvp_set_device_v2.tx_port_id = v->dev_tx.port_id;
+	cvp_setdev_cmd.cvp_set_device_v2.rx_port_id = v->dev_rx.port_id;
+
+	if (common.ec_ref_ext) {
+		cvp_setdev_cmd.cvp_set_device_v2.vocproc_mode =
+				VSS_IVOCPROC_VOCPROC_MODE_EC_EXT_MIXING;
+		cvp_setdev_cmd.cvp_set_device_v2.ec_ref_port_id =
+				common.ec_media_fmt_info.port_id;
+	} else {
+		cvp_setdev_cmd.cvp_set_device_v2.vocproc_mode =
+				    VSS_IVOCPROC_VOCPROC_MODE_EC_INT_MIXING;
+		cvp_setdev_cmd.cvp_set_device_v2.ec_ref_port_id =
+				    VSS_IVOCPROC_PORT_ID_NONE;
+	}
+	pr_debug("topology=%d , tx_port_id=%d, rx_port_id=%d\n",
+		cvp_setdev_cmd.cvp_set_device_v2.tx_topology_id,
+		cvp_setdev_cmd.cvp_set_device_v2.tx_port_id,
+		cvp_setdev_cmd.cvp_set_device_v2.rx_port_id);
+
+	v->cvp_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_setdev_cmd);
+	if (ret < 0) {
+		pr_err("Fail in sending VSS_IVOCPROC_CMD_SET_DEVICE\n");
+		goto fail;
+	}
+
+	ret = wait_event_timeout(v->cvp_wait,
+			(v->cvp_state == CMD_STATUS_SUCCESS),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	return ret;
+}
+
+static int voice_send_stop_voice_cmd(struct voice_data *v)
+{
+	struct apr_hdr mvm_stop_voice_cmd;
+	int ret = 0;
+	void *apr_mvm;
+	u16 mvm_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_mvm = common.apr_q6_mvm;
+
+	if (!apr_mvm) {
+		pr_err("%s: apr_mvm is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	mvm_handle = voice_get_mvm_handle(v);
+
+	mvm_stop_voice_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	mvm_stop_voice_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(mvm_stop_voice_cmd) - APR_HDR_SIZE);
+	pr_debug("send mvm_stop_voice_cmd pkt size = %d\n",
+				mvm_stop_voice_cmd.pkt_size);
+	mvm_stop_voice_cmd.src_port =
+				voice_get_idx_for_session(v->session_id);
+	mvm_stop_voice_cmd.dest_port = mvm_handle;
+	mvm_stop_voice_cmd.token = 0;
+	mvm_stop_voice_cmd.opcode = VSS_IMVM_CMD_STOP_VOICE;
+
+	v->mvm_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_stop_voice_cmd);
+	if (ret < 0) {
+		pr_err("Fail in sending VSS_IMVM_CMD_STOP_VOICE\n");
+		goto fail;
+	}
+	ret = wait_event_timeout(v->mvm_wait,
+				 (v->mvm_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	return ret;
+}
+static int voice_get_cal(struct cal_block_data **cal_block,
+			 int cal_block_idx,
+			 struct cal_block_data **col_data,
+			 int col_data_idx, int session_id)
+{
+	int ret = 0;
+
+	*cal_block = cal_utils_get_only_cal_block(
+		common.cal_data[cal_block_idx]);
+	if (*cal_block == NULL) {
+		pr_err("%s: No cal data for cal %d!\n",
+			__func__, cal_block_idx);
+
+		ret = -ENODEV;
+		goto done;
+	}
+	ret = remap_cal_data(*cal_block, session_id);
+	if (ret < 0) {
+		pr_err("%s: Remap_cal_data failed for cal %d!\n",
+			__func__, cal_block_idx);
+
+		ret = -ENODEV;
+		goto done;
+	}
+
+	if (col_data == NULL)
+		goto done;
+
+	*col_data = cal_utils_get_only_cal_block(
+		common.cal_data[col_data_idx]);
+	if (*col_data == NULL) {
+		pr_err("%s: No cal data for cal %d!\n",
+			__func__, col_data_idx);
+
+		ret = -ENODEV;
+		goto done;
+	}
+done:
+	return ret;
+}
+
+static int voice_send_cvs_register_cal_cmd(struct voice_data *v)
+{
+	struct cvs_register_cal_data_cmd cvs_reg_cal_cmd;
+	struct cal_block_data *cal_block = NULL;
+	struct cal_block_data *col_data = NULL;
+	int ret = 0;
+	memset(&cvs_reg_cal_cmd, 0, sizeof(cvs_reg_cal_cmd));
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (!common.apr_q6_cvs) {
+		pr_err("%s: apr_cvs is NULL\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	mutex_lock(&common.cal_data[CVS_VOCSTRM_CAL]->lock);
+	mutex_lock(&common.cal_data[CVS_VOCSTRM_COL_CAL]->lock);
+
+	ret = voice_get_cal(&cal_block, CVS_VOCSTRM_CAL, &col_data,
+		CVS_VOCSTRM_COL_CAL, v->session_id);
+	if (ret < 0) {
+		pr_err("%s: Voice_get_cal failed for cal %d!\n",
+			__func__, CVS_VOCSTRM_CAL);
+
+		goto unlock;
+	}
+
+	memcpy(&cvs_reg_cal_cmd.cvs_cal_data.column_info[0],
+	       (void *) &((struct audio_cal_info_voc_col *)
+	       col_data->cal_info)->data,
+	       col_data->cal_data.size);
+
+	cvs_reg_cal_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	cvs_reg_cal_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvs_reg_cal_cmd) - APR_HDR_SIZE);
+	cvs_reg_cal_cmd.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+	cvs_reg_cal_cmd.hdr.dest_port = voice_get_cvs_handle(v);
+	cvs_reg_cal_cmd.hdr.token = 0;
+	if (common.is_per_vocoder_cal_enabled)
+		cvs_reg_cal_cmd.hdr.opcode =
+			VSS_ISTREAM_CMD_REGISTER_STATIC_CALIBRATION_DATA;
+	else
+		cvs_reg_cal_cmd.hdr.opcode =
+			VSS_ISTREAM_CMD_REGISTER_CALIBRATION_DATA_V2;
+
+	cvs_reg_cal_cmd.cvs_cal_data.cal_mem_handle =
+		cal_block->map_data.q6map_handle;
+	cvs_reg_cal_cmd.cvs_cal_data.cal_mem_address_lsw =
+		lower_32_bits(cal_block->cal_data.paddr);
+	cvs_reg_cal_cmd.cvs_cal_data.cal_mem_address_msw =
+		msm_audio_populate_upper_32_bits(cal_block->cal_data.paddr);
+	cvs_reg_cal_cmd.cvs_cal_data.cal_mem_size =
+		cal_block->cal_data.size;
+
+	v->cvs_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(common.apr_q6_cvs, (uint32_t *) &cvs_reg_cal_cmd);
+	if (ret < 0) {
+		pr_err("%s: Error %d registering CVS cal\n", __func__, ret);
+
+		ret = -EINVAL;
+		goto unlock;
+	}
+	ret = wait_event_timeout(v->cvs_wait,
+				 (v->cvs_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: Command timeout\n", __func__);
+
+		ret = -EINVAL;
+		goto unlock;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto unlock;
+	}
+unlock:
+	mutex_unlock(&common.cal_data[CVS_VOCSTRM_COL_CAL]->lock);
+	mutex_unlock(&common.cal_data[CVS_VOCSTRM_CAL]->lock);
+done:
+	return ret;
+}
+
+static int voice_send_cvs_deregister_cal_cmd(struct voice_data *v)
+{
+	struct cvs_deregister_cal_data_cmd cvs_dereg_cal_cmd;
+	int ret = 0;
+	memset(&cvs_dereg_cal_cmd, 0, sizeof(cvs_dereg_cal_cmd));
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (!common.apr_q6_cvs) {
+		pr_err("%s: apr_cvs is NULL\n", __func__);
+
+		ret = -EPERM;
+		goto done;
+	}
+
+	cvs_dereg_cal_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	cvs_dereg_cal_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvs_dereg_cal_cmd) - APR_HDR_SIZE);
+	cvs_dereg_cal_cmd.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+	cvs_dereg_cal_cmd.hdr.dest_port = voice_get_cvs_handle(v);
+	cvs_dereg_cal_cmd.hdr.token = 0;
+	if (common.is_per_vocoder_cal_enabled)
+		cvs_dereg_cal_cmd.hdr.opcode =
+			VSS_ISTREAM_CMD_DEREGISTER_STATIC_CALIBRATION_DATA;
+	else
+		cvs_dereg_cal_cmd.hdr.opcode =
+			VSS_ISTREAM_CMD_DEREGISTER_CALIBRATION_DATA;
+
+	v->cvs_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(common.apr_q6_cvs, (uint32_t *) &cvs_dereg_cal_cmd);
+	if (ret < 0) {
+		pr_err("%s: Error %d de-registering CVS cal\n", __func__, ret);
+		goto done;
+	}
+	ret = wait_event_timeout(v->cvs_wait,
+				 (v->cvs_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: Command  timeout\n", __func__);
+		goto done;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto done;
+	}
+
+done:
+	return ret;
+
+}
+
+static int voice_send_cvp_create_cmd(struct voice_data *v)
+{
+	struct cvp_create_full_ctl_session_cmd cvp_session_cmd;
+	void *apr_cvp;
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	apr_cvp = common.apr_q6_cvp;
+	if (!apr_cvp) {
+		pr_err("%s: apr_cvp is NULL.\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/* create cvp session and wait for response */
+	cvp_session_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	cvp_session_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvp_session_cmd) - APR_HDR_SIZE);
+	pr_debug("%s: send create cvp session, pkt size = %d\n",
+		 __func__, cvp_session_cmd.hdr.pkt_size);
+	cvp_session_cmd.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+	cvp_session_cmd.hdr.dest_port = 0;
+	cvp_session_cmd.hdr.token = 0;
+
+	if (voice_get_cvd_int_version(common.cvd_version) >=
+	    CVD_INT_VERSION_2_2)
+		cvp_session_cmd.hdr.opcode =
+				VSS_IVOCPROC_CMD_CREATE_FULL_CONTROL_SESSION_V3;
+	else
+		cvp_session_cmd.hdr.opcode =
+				VSS_IVOCPROC_CMD_CREATE_FULL_CONTROL_SESSION_V2;
+
+	voc_get_tx_rx_topology(v,
+			&cvp_session_cmd.cvp_session.tx_topology_id,
+			&cvp_session_cmd.cvp_session.rx_topology_id);
+
+	cvp_session_cmd.cvp_session.direction = 2; /*tx and rx*/
+	cvp_session_cmd.cvp_session.tx_port_id = v->dev_tx.port_id;
+	cvp_session_cmd.cvp_session.rx_port_id = v->dev_rx.port_id;
+	cvp_session_cmd.cvp_session.profile_id =
+					 VSS_ICOMMON_CAL_NETWORK_ID_NONE;
+	if (common.ec_ref_ext) {
+		cvp_session_cmd.cvp_session.vocproc_mode =
+				VSS_IVOCPROC_VOCPROC_MODE_EC_EXT_MIXING;
+		cvp_session_cmd.cvp_session.ec_ref_port_id =
+				common.ec_media_fmt_info.port_id;
+	} else {
+		cvp_session_cmd.cvp_session.vocproc_mode =
+				 VSS_IVOCPROC_VOCPROC_MODE_EC_INT_MIXING;
+		cvp_session_cmd.cvp_session.ec_ref_port_id =
+						 VSS_IVOCPROC_PORT_ID_NONE;
+	}
+
+	pr_debug("tx_topology: %d tx_port_id=%d, rx_port_id=%d, mode: 0x%x\n",
+		cvp_session_cmd.cvp_session.tx_topology_id,
+		cvp_session_cmd.cvp_session.tx_port_id,
+		cvp_session_cmd.cvp_session.rx_port_id,
+		cvp_session_cmd.cvp_session.vocproc_mode);
+	pr_debug("rx_topology: %d, profile_id: 0x%x, pkt_size: %d\n",
+		cvp_session_cmd.cvp_session.rx_topology_id,
+		cvp_session_cmd.cvp_session.profile_id,
+		cvp_session_cmd.hdr.pkt_size);
+
+	v->cvp_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_session_cmd);
+	if (ret < 0) {
+		pr_err("Fail in sending VOCPROC_FULL_CONTROL_SESSION\n");
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = wait_event_timeout(v->cvp_wait,
+				 (v->cvp_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+			__func__, adsp_err_get_err_str(
+			v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto done;
+	}
+
+done:
+	return ret;
+}
+
+static int voice_send_cvp_register_dev_cfg_cmd(struct voice_data *v)
+{
+	struct cvp_register_dev_cfg_cmd cvp_reg_dev_cfg_cmd;
+	struct cal_block_data *cal_block = NULL;
+	int ret = 0;
+	memset(&cvp_reg_dev_cfg_cmd, 0, sizeof(cvp_reg_dev_cfg_cmd));
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (!common.apr_q6_cvp) {
+		pr_err("%s: apr_cvp is NULL\n", __func__);
+
+		ret = -EPERM;
+		goto done;
+	}
+
+	mutex_lock(&common.cal_data[CVP_VOCDEV_CFG_CAL]->lock);
+
+	ret = voice_get_cal(&cal_block, CVP_VOCDEV_CFG_CAL, NULL,
+		0, v->session_id);
+	if (ret < 0) {
+		pr_err("%s: Voice_get_cal failed for cal %d!\n",
+			__func__, CVP_VOCDEV_CFG_CAL);
+
+		goto unlock;
+	}
+
+	cvp_reg_dev_cfg_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	cvp_reg_dev_cfg_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvp_reg_dev_cfg_cmd) - APR_HDR_SIZE);
+	cvp_reg_dev_cfg_cmd.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+	cvp_reg_dev_cfg_cmd.hdr.dest_port = voice_get_cvp_handle(v);
+	cvp_reg_dev_cfg_cmd.hdr.token = 0;
+	cvp_reg_dev_cfg_cmd.hdr.opcode =
+					VSS_IVOCPROC_CMD_REGISTER_DEVICE_CONFIG;
+
+	cvp_reg_dev_cfg_cmd.cvp_dev_cfg_data.mem_handle =
+		cal_block->map_data.q6map_handle;
+	cvp_reg_dev_cfg_cmd.cvp_dev_cfg_data.mem_address_lsw =
+		lower_32_bits(cal_block->cal_data.paddr);
+	cvp_reg_dev_cfg_cmd.cvp_dev_cfg_data.mem_address_msw =
+		msm_audio_populate_upper_32_bits(cal_block->cal_data.paddr);
+	cvp_reg_dev_cfg_cmd.cvp_dev_cfg_data.mem_size =
+		cal_block->cal_data.size;
+
+	v->cvp_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(common.apr_q6_cvp,
+			   (uint32_t *) &cvp_reg_dev_cfg_cmd);
+	if (ret < 0) {
+		pr_err("%s: Error %d registering CVP dev cfg cal\n",
+		       __func__, ret);
+
+		ret = -EINVAL;
+		goto unlock;
+	}
+	ret = wait_event_timeout(v->cvp_wait,
+				 (v->cvp_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: Command timeout\n", __func__);
+
+		ret = -EINVAL;
+		goto unlock;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto unlock;
+	}
+unlock:
+	mutex_unlock(&common.cal_data[CVP_VOCDEV_CFG_CAL]->lock);
+done:
+	return ret;
+}
+
+static int voice_send_cvp_deregister_dev_cfg_cmd(struct voice_data *v)
+{
+	struct cvp_deregister_dev_cfg_cmd cvp_dereg_dev_cfg_cmd;
+	int ret = 0;
+	memset(&cvp_dereg_dev_cfg_cmd, 0, sizeof(cvp_dereg_dev_cfg_cmd));
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (!common.apr_q6_cvp) {
+		pr_err("%s: apr_cvp is NULL\n", __func__);
+
+		ret = -EPERM;
+		goto done;
+	}
+
+	cvp_dereg_dev_cfg_cmd.hdr.hdr_field =
+				APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+					APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	cvp_dereg_dev_cfg_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvp_dereg_dev_cfg_cmd) - APR_HDR_SIZE);
+	cvp_dereg_dev_cfg_cmd.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+	cvp_dereg_dev_cfg_cmd.hdr.dest_port = voice_get_cvp_handle(v);
+	cvp_dereg_dev_cfg_cmd.hdr.token = 0;
+	cvp_dereg_dev_cfg_cmd.hdr.opcode =
+				VSS_IVOCPROC_CMD_DEREGISTER_DEVICE_CONFIG;
+
+	v->cvp_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(common.apr_q6_cvp,
+			   (uint32_t *) &cvp_dereg_dev_cfg_cmd);
+	if (ret < 0) {
+		pr_err("%s: Error %d de-registering CVP dev cfg cal\n",
+		       __func__, ret);
+		goto done;
+	}
+	ret = wait_event_timeout(v->cvp_wait,
+				 (v->cvp_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: Command timeout\n", __func__);
+		goto done;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto done;
+	}
+
+done:
+	return ret;
+}
+
+static int voice_send_cvp_register_cal_cmd(struct voice_data *v)
+{
+	struct cvp_register_cal_data_cmd cvp_reg_cal_cmd;
+	struct cal_block_data *cal_block = NULL;
+	struct cal_block_data *col_data = NULL;
+	int ret = 0;
+	memset(&cvp_reg_cal_cmd, 0, sizeof(cvp_reg_cal_cmd));
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (!common.apr_q6_cvp) {
+		pr_err("%s: apr_cvp is NULL\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	mutex_lock(&common.cal_data[CVP_VOCPROC_CAL]->lock);
+	mutex_lock(&common.cal_data[CVP_VOCPROC_COL_CAL]->lock);
+
+	ret = voice_get_cal(&cal_block, CVP_VOCPROC_CAL, &col_data,
+		CVP_VOCPROC_COL_CAL, v->session_id);
+	if (ret < 0) {
+		pr_err("%s: Voice_get_cal failed for cal %d!\n",
+			__func__, CVP_VOCPROC_CAL);
+
+		goto unlock;
+	}
+
+	v->dev_tx.dev_id = ((struct audio_cal_info_vocproc *)
+				cal_block->cal_info)->tx_acdb_id;
+	v->dev_rx.dev_id = ((struct audio_cal_info_vocproc *)
+				cal_block->cal_info)->rx_acdb_id;
+	pr_debug("%s: %s: Tx acdb id = %d and Rx acdb id = %d", __func__,
+		 voc_get_session_name(v->session_id), v->dev_tx.dev_id,
+		 v->dev_rx.dev_id);
+
+	memcpy(&cvp_reg_cal_cmd.cvp_cal_data.column_info[0],
+	       (void *) &((struct audio_cal_info_voc_col *)
+	       col_data->cal_info)->data,
+	       col_data->cal_data.size);
+
+	cvp_reg_cal_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	cvp_reg_cal_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvp_reg_cal_cmd) - APR_HDR_SIZE);
+	cvp_reg_cal_cmd.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+	cvp_reg_cal_cmd.hdr.dest_port = voice_get_cvp_handle(v);
+	cvp_reg_cal_cmd.hdr.token = 0;
+	if (common.is_per_vocoder_cal_enabled)
+		cvp_reg_cal_cmd.hdr.opcode =
+			VSS_IVOCPROC_CMD_REGISTER_STATIC_CALIBRATION_DATA;
+	else
+		cvp_reg_cal_cmd.hdr.opcode =
+			VSS_IVOCPROC_CMD_REGISTER_CALIBRATION_DATA_V2;
+
+	cvp_reg_cal_cmd.cvp_cal_data.cal_mem_handle =
+		cal_block->map_data.q6map_handle;
+	cvp_reg_cal_cmd.cvp_cal_data.cal_mem_address_lsw =
+		lower_32_bits(cal_block->cal_data.paddr);
+	cvp_reg_cal_cmd.cvp_cal_data.cal_mem_address_msw =
+		msm_audio_populate_upper_32_bits(cal_block->cal_data.paddr);
+	cvp_reg_cal_cmd.cvp_cal_data.cal_mem_size =
+		cal_block->cal_data.size;
+
+	v->cvp_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(common.apr_q6_cvp, (uint32_t *) &cvp_reg_cal_cmd);
+	if (ret < 0) {
+		pr_err("%s: Error %d registering CVP cal\n", __func__, ret);
+
+		ret = -EINVAL;
+		goto unlock;
+	}
+	ret = wait_event_timeout(v->cvp_wait,
+				 (v->cvp_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: Command timeout\n", __func__);
+
+		ret = -EINVAL;
+		goto unlock;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto unlock;
+	}
+unlock:
+	mutex_unlock(&common.cal_data[CVP_VOCPROC_COL_CAL]->lock);
+	mutex_unlock(&common.cal_data[CVP_VOCPROC_CAL]->lock);
+done:
+	return ret;
+}
+
+static int voice_send_cvp_deregister_cal_cmd(struct voice_data *v)
+{
+	struct cvp_deregister_cal_data_cmd cvp_dereg_cal_cmd;
+	int ret = 0;
+	memset(&cvp_dereg_cal_cmd, 0, sizeof(cvp_dereg_cal_cmd));
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (!common.apr_q6_cvp) {
+		pr_err("%s: apr_cvp is NULL.\n", __func__);
+
+		ret = -EPERM;
+		goto done;
+	}
+
+	cvp_dereg_cal_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	cvp_dereg_cal_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvp_dereg_cal_cmd) - APR_HDR_SIZE);
+	cvp_dereg_cal_cmd.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+	cvp_dereg_cal_cmd.hdr.dest_port = voice_get_cvp_handle(v);
+	cvp_dereg_cal_cmd.hdr.token = 0;
+	if (common.is_per_vocoder_cal_enabled)
+		cvp_dereg_cal_cmd.hdr.opcode =
+			VSS_IVOCPROC_CMD_DEREGISTER_STATIC_CALIBRATION_DATA;
+	else
+		cvp_dereg_cal_cmd.hdr.opcode =
+			VSS_IVOCPROC_CMD_DEREGISTER_CALIBRATION_DATA;
+
+	v->cvp_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(common.apr_q6_cvp, (uint32_t *) &cvp_dereg_cal_cmd);
+	if (ret < 0) {
+		pr_err("%s: Error %d de-registering CVP cal\n", __func__, ret);
+		goto done;
+	}
+	ret = wait_event_timeout(v->cvp_wait,
+				 (v->cvp_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: Command timeout\n", __func__);
+		goto done;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto done;
+	}
+
+done:
+	return ret;
+}
+
+static int voice_send_cvp_register_vol_cal_cmd(struct voice_data *v)
+{
+	struct cvp_register_vol_cal_data_cmd cvp_reg_vol_cal_cmd;
+	struct cal_block_data *cal_block = NULL;
+	struct cal_block_data *col_data = NULL;
+	int ret = 0;
+	memset(&cvp_reg_vol_cal_cmd, 0, sizeof(cvp_reg_vol_cal_cmd));
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (!common.apr_q6_cvp) {
+		pr_err("%s: apr_cvp is NULL\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	mutex_lock(&common.cal_data[CVP_VOCVOL_CAL]->lock);
+	mutex_lock(&common.cal_data[CVP_VOCVOL_COL_CAL]->lock);
+
+	ret = voice_get_cal(&cal_block, CVP_VOCVOL_CAL, &col_data,
+		CVP_VOCVOL_COL_CAL, v->session_id);
+	if (ret < 0) {
+		pr_err("%s: Voice_get_cal failed for cal %d!\n",
+			__func__, CVP_VOCVOL_CAL);
+
+		goto unlock;
+	}
+
+	memcpy(&cvp_reg_vol_cal_cmd.cvp_vol_cal_data.column_info[0],
+	       (void *) &((struct audio_cal_info_voc_col *)
+	       col_data->cal_info)->data,
+	       col_data->cal_data.size);
+
+	cvp_reg_vol_cal_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	cvp_reg_vol_cal_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvp_reg_vol_cal_cmd) - APR_HDR_SIZE);
+	cvp_reg_vol_cal_cmd.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+	cvp_reg_vol_cal_cmd.hdr.dest_port = voice_get_cvp_handle(v);
+	cvp_reg_vol_cal_cmd.hdr.token = 0;
+	if (common.is_per_vocoder_cal_enabled)
+		cvp_reg_vol_cal_cmd.hdr.opcode =
+			VSS_IVOCPROC_CMD_REGISTER_DYNAMIC_CALIBRATION_DATA;
+	else
+		cvp_reg_vol_cal_cmd.hdr.opcode =
+			VSS_IVOCPROC_CMD_REGISTER_VOL_CALIBRATION_DATA;
+
+	cvp_reg_vol_cal_cmd.cvp_vol_cal_data.cal_mem_handle =
+		cal_block->map_data.q6map_handle;
+	cvp_reg_vol_cal_cmd.cvp_vol_cal_data.cal_mem_address_lsw =
+		lower_32_bits(cal_block->cal_data.paddr);
+	cvp_reg_vol_cal_cmd.cvp_vol_cal_data.cal_mem_address_msw =
+		msm_audio_populate_upper_32_bits(cal_block->cal_data.paddr);
+	cvp_reg_vol_cal_cmd.cvp_vol_cal_data.cal_mem_size =
+		cal_block->cal_data.size;
+
+	v->cvp_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(common.apr_q6_cvp,
+			   (uint32_t *) &cvp_reg_vol_cal_cmd);
+	if (ret < 0) {
+		pr_err("%s: Error %d registering CVP vol cal\n", __func__, ret);
+
+		ret = -EINVAL;
+		goto unlock;
+	}
+	ret = wait_event_timeout(v->cvp_wait,
+				 (v->cvp_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: Command timeout\n", __func__);
+
+		ret = -EINVAL;
+		goto unlock;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto unlock;
+	}
+unlock:
+	mutex_unlock(&common.cal_data[CVP_VOCVOL_COL_CAL]->lock);
+	mutex_unlock(&common.cal_data[CVP_VOCVOL_CAL]->lock);
+done:
+	return ret;
+}
+
+static int voice_send_cvp_deregister_vol_cal_cmd(struct voice_data *v)
+{
+	struct cvp_deregister_vol_cal_data_cmd cvp_dereg_vol_cal_cmd;
+	int ret = 0;
+	memset(&cvp_dereg_vol_cal_cmd, 0, sizeof(cvp_dereg_vol_cal_cmd));
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (!common.apr_q6_cvp) {
+		pr_err("%s: apr_cvp is NULL\n", __func__);
+
+		ret = -EPERM;
+		goto done;
+	}
+
+	cvp_dereg_vol_cal_cmd.hdr.hdr_field =
+			APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	cvp_dereg_vol_cal_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvp_dereg_vol_cal_cmd) - APR_HDR_SIZE);
+	cvp_dereg_vol_cal_cmd.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+	cvp_dereg_vol_cal_cmd.hdr.dest_port = voice_get_cvp_handle(v);
+	cvp_dereg_vol_cal_cmd.hdr.token = 0;
+	if (common.is_per_vocoder_cal_enabled)
+		cvp_dereg_vol_cal_cmd.hdr.opcode =
+			VSS_IVOCPROC_CMD_DEREGISTER_DYNAMIC_CALIBRATION_DATA;
+	else
+		cvp_dereg_vol_cal_cmd.hdr.opcode =
+			VSS_IVOCPROC_CMD_DEREGISTER_VOL_CALIBRATION_DATA;
+
+	v->cvp_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(common.apr_q6_cvp,
+			   (uint32_t *) &cvp_dereg_vol_cal_cmd);
+	if (ret < 0) {
+		pr_err("%s: Error %d de-registering CVP vol cal\n",
+		       __func__, ret);
+		goto done;
+	}
+	ret = wait_event_timeout(v->cvp_wait,
+				 (v->cvp_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: Command timeout\n", __func__);
+		goto done;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto done;
+	}
+
+done:
+	return ret;
+}
+
+static int voice_map_memory_physical_cmd(struct voice_data *v,
+					 struct mem_map_table *table_info,
+					 dma_addr_t phys,
+					 uint32_t size,
+					 uint32_t token)
+{
+	struct vss_imemory_cmd_map_physical_t mvm_map_phys_cmd;
+	uint32_t *memtable;
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	if (!common.apr_q6_mvm) {
+		pr_err("%s: apr_mvm is NULL.\n", __func__);
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	if (!table_info->data) {
+		pr_err("%s: memory table is NULL.\n", __func__);
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	memtable = (uint32_t *) table_info->data;
+
+	/*
+	 * Store next table descriptor's address(64 bit) as NULL as there
+	 * is only one memory block
+	 */
+	memtable[0] = 0;
+	memtable[1] = 0;
+
+	/* Store next table descriptor's size */
+	memtable[2] = 0;
+
+	/* Store shared mem adddress (64 bit) */
+	memtable[3] = lower_32_bits(phys);
+	memtable[4] = msm_audio_populate_upper_32_bits(phys);
+
+	/* Store shared memory size */
+	memtable[5] = size;
+
+	mvm_map_phys_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	mvm_map_phys_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(mvm_map_phys_cmd) - APR_HDR_SIZE);
+	mvm_map_phys_cmd.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+	mvm_map_phys_cmd.hdr.dest_port = voice_get_mvm_handle(v);
+	mvm_map_phys_cmd.hdr.token = token;
+	mvm_map_phys_cmd.hdr.opcode = VSS_IMEMORY_CMD_MAP_PHYSICAL;
+
+	mvm_map_phys_cmd.table_descriptor.mem_address_lsw =
+			lower_32_bits(table_info->phys);
+	mvm_map_phys_cmd.table_descriptor.mem_address_msw =
+			msm_audio_populate_upper_32_bits(table_info->phys);
+	mvm_map_phys_cmd.table_descriptor.mem_size =
+			sizeof(struct vss_imemory_block_t) +
+			sizeof(struct vss_imemory_table_descriptor_t);
+	mvm_map_phys_cmd.is_cached = true;
+	mvm_map_phys_cmd.cache_line_size = 128;
+	mvm_map_phys_cmd.access_mask = 3;
+	mvm_map_phys_cmd.page_align = 4096;
+	mvm_map_phys_cmd.min_data_width = 8;
+	mvm_map_phys_cmd.max_data_width = 64;
+
+	pr_debug("%s: next table desc: add: %lld, size: %d\n",
+		 __func__, *((uint64_t *) memtable),
+		 *(((uint32_t *) memtable) + 2));
+	pr_debug("%s: phy add of of mem being mapped LSW:0x%x, MSW:0x%x size: %d\n",
+		 __func__, *(((uint32_t *) memtable) + 3),
+		*(((uint32_t *) memtable) + 4), *(((uint32_t *) memtable) + 5));
+
+	v->mvm_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(common.apr_q6_mvm, (uint32_t *) &mvm_map_phys_cmd);
+	if (ret < 0) {
+		pr_err("%s: Error %d sending mvm map phy cmd\n", __func__, ret);
+
+		goto fail;
+	}
+
+	ret = wait_event_timeout(v->mvm_wait,
+				 (v->mvm_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: Command timeout\n", __func__);
+
+		goto fail;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	return ret;
+}
+
+static int voice_pause_voice_call(struct voice_data *v)
+{
+	struct apr_hdr	mvm_pause_voice_cmd;
+	void		*apr_mvm;
+	int		ret = 0;
+
+	pr_debug("%s\n", __func__);
+
+	if (v == NULL) {
+		pr_err("%s: Voice data is NULL\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	apr_mvm = common.apr_q6_mvm;
+	if (!apr_mvm) {
+		pr_err("%s: apr_mvm is NULL.\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	mvm_pause_voice_cmd.hdr_field =
+		APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+		APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	mvm_pause_voice_cmd.pkt_size =
+		APR_PKT_SIZE(APR_HDR_SIZE,
+		sizeof(mvm_pause_voice_cmd) - APR_HDR_SIZE);
+	mvm_pause_voice_cmd.src_port =
+			voice_get_idx_for_session(v->session_id);
+	mvm_pause_voice_cmd.dest_port = voice_get_mvm_handle(v);
+	mvm_pause_voice_cmd.token = 0;
+	mvm_pause_voice_cmd.opcode = VSS_IMVM_CMD_PAUSE_VOICE;
+	v->mvm_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+
+	pr_debug("%s: send mvm_pause_voice_cmd pkt size = %d\n",
+		__func__, mvm_pause_voice_cmd.pkt_size);
+
+	ret = apr_send_pkt(apr_mvm,
+		(uint32_t *)&mvm_pause_voice_cmd);
+	if (ret < 0) {
+		pr_err("Fail in sending VSS_IMVM_CMD_PAUSE_VOICE\n");
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = wait_event_timeout(v->mvm_wait,
+		(v->mvm_state == CMD_STATUS_SUCCESS),
+		msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: Command timeout\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto done;
+	}
+
+done:
+	return ret;
+}
+
+static int voice_map_cal_memory(struct cal_block_data *cal_block,
+				uint32_t session_id)
+{
+	int result = 0;
+	int voc_index;
+	struct voice_data *v = NULL;
+	pr_debug("%s\n", __func__);
+
+	if (cal_block == NULL) {
+		pr_err("%s: Cal block is NULL!\n", __func__);
+
+		result = -EINVAL;
+		goto done;
+	}
+
+	if (cal_block->cal_data.paddr == 0) {
+		pr_debug("%s: No address to map!\n", __func__);
+
+		result = -EINVAL;
+		goto done;
+	}
+
+	if (cal_block->map_data.map_size == 0) {
+		pr_debug("%s: Map size is 0!\n", __func__);
+
+		result = -EINVAL;
+		goto done;
+	}
+
+	voc_index = voice_get_idx_for_session(session_id);
+	if (voc_index < 0) {
+		pr_err("%s:  Invalid session ID %d\n", __func__, session_id);
+
+		goto done;
+	}
+
+	mutex_lock(&common.common_lock);
+	v = &common.voice[voc_index];
+
+	result = voice_map_memory_physical_cmd(v,
+		&common.cal_mem_map_table,
+		(dma_addr_t)cal_block->cal_data.paddr,
+		cal_block->map_data.map_size,
+		VOC_CAL_MEM_MAP_TOKEN);
+	if (result < 0) {
+		pr_err("%s: Mmap did not work! addr = 0x%pK, size = %zd\n",
+			__func__,
+			&cal_block->cal_data.paddr,
+			cal_block->map_data.map_size);
+
+		goto done_unlock;
+	}
+
+	cal_block->map_data.q6map_handle = common.cal_mem_handle;
+done_unlock:
+	mutex_unlock(&common.common_lock);
+done:
+	return result;
+}
+
+static int remap_cal_data(struct cal_block_data *cal_block,
+			   uint32_t session_id)
+{
+	int ret = 0;
+	pr_debug("%s\n", __func__);
+
+	if (cal_block->map_data.ion_client == NULL) {
+		pr_err("%s: No ION allocation for session_id %d!\n",
+			__func__, session_id);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if ((cal_block->map_data.map_size > 0) &&
+		(cal_block->map_data.q6map_handle == 0)) {
+
+		/* cal type not used */
+		ret = voice_map_cal_memory(cal_block, session_id);
+		if (ret < 0) {
+			pr_err("%s: Mmap did not work! size = %zd\n",
+				__func__, cal_block->map_data.map_size);
+
+			goto done;
+		}
+	} else {
+		pr_debug("%s:  Cal block 0x%pK, size %zd already mapped. Q6 map handle = %d\n",
+			__func__, &cal_block->cal_data.paddr,
+			cal_block->map_data.map_size,
+			cal_block->map_data.q6map_handle);
+	}
+done:
+	return ret;
+}
+
+static int voice_unmap_cal_memory(int32_t cal_type,
+				  struct cal_block_data *cal_block)
+{
+	int result = 0;
+	int result2 = 0;
+	int i;
+	struct voice_data *v = NULL;
+	pr_debug("%s\n", __func__);
+
+	if (cal_block == NULL) {
+		pr_err("%s: Cal block is NULL!\n", __func__);
+
+		result = -EINVAL;
+		goto done;
+	}
+
+	if (cal_block->map_data.q6map_handle == 0) {
+		pr_debug("%s: Q6 handle is not set!\n", __func__);
+
+		result = -EINVAL;
+		goto done;
+	}
+
+	mutex_lock(&common.common_lock);
+
+	for (i = 0; i < MAX_VOC_SESSIONS; i++) {
+		v = &common.voice[i];
+
+		mutex_lock(&v->lock);
+		if (is_voc_state_active(v->voc_state)) {
+			result2 = voice_pause_voice_call(v);
+			if (result2 < 0) {
+				pr_err("%s: Voice_pause_voice_call failed for session 0x%x, err %d!\n",
+					__func__, v->session_id, result2);
+
+				result = result2;
+			}
+
+			if (cal_type == CVP_VOCPROC_DYNAMIC_CAL_TYPE)
+				voice_send_cvp_deregister_vol_cal_cmd(v);
+			else if (cal_type == CVP_VOCPROC_STATIC_CAL_TYPE)
+				voice_send_cvp_deregister_cal_cmd(v);
+			else if (cal_type == CVP_VOCDEV_CFG_CAL_TYPE)
+				voice_send_cvp_deregister_dev_cfg_cmd(v);
+			else if (cal_type == CVS_VOCSTRM_STATIC_CAL_TYPE)
+				voice_send_cvs_deregister_cal_cmd(v);
+			else
+				pr_err("%s: Invalid cal type %d!\n",
+					__func__, cal_type);
+
+			result2 = voice_send_start_voice_cmd(v);
+			if (result2) {
+				pr_err("%s: Voice_send_start_voice_cmd failed for session 0x%x, err %d!\n",
+					__func__, v->session_id, result2);
+
+				result = result2;
+			}
+		}
+
+		if ((cal_block->map_data.q6map_handle != 0) &&
+			(!is_other_session_active(v->session_id))) {
+
+			result2 = voice_send_mvm_unmap_memory_physical_cmd(
+				v, cal_block->map_data.q6map_handle);
+			if (result2) {
+				pr_err("%s: Voice_send_mvm_unmap_memory_physical_cmd failed for session 0x%x, err %d!\n",
+					__func__, v->session_id, result2);
+
+				result = result2;
+			}
+			cal_block->map_data.q6map_handle = 0;
+		}
+		mutex_unlock(&v->lock);
+	}
+	mutex_unlock(&common.common_lock);
+done:
+	return result;
+}
+
+int voc_register_vocproc_vol_table(void)
+{
+	int			result = 0;
+	int			result2 = 0;
+	int			i;
+	struct voice_data	*v = NULL;
+
+	pr_debug("%s\n", __func__);
+
+	mutex_lock(&common.common_lock);
+	for (i = 0; i < MAX_VOC_SESSIONS; i++) {
+		v = &common.voice[i];
+
+		mutex_lock(&v->lock);
+		if (is_voc_state_active(v->voc_state)) {
+			result2 = voice_send_cvp_register_vol_cal_cmd(v);
+			if (result2 < 0) {
+				pr_err("%s: Failed to register vocvol table for session 0x%x!\n",
+					__func__, v->session_id);
+
+				result = result2;
+				/* Still try to register other sessions */
+			}
+		}
+		mutex_unlock(&v->lock);
+	}
+
+	mutex_unlock(&common.common_lock);
+	return result;
+}
+
+int voc_deregister_vocproc_vol_table(void)
+{
+	int			result = 0;
+	int			success = 0;
+	int			i;
+	struct voice_data	*v = NULL;
+
+	pr_debug("%s\n", __func__);
+
+	mutex_lock(&common.common_lock);
+	for (i = 0; i < MAX_VOC_SESSIONS; i++) {
+		v = &common.voice[i];
+
+		mutex_lock(&v->lock);
+		if (is_voc_state_active(v->voc_state)) {
+			result = voice_send_cvp_deregister_vol_cal_cmd(v);
+			if (result < 0) {
+				pr_err("%s: Failed to deregister vocvol table for session 0x%x!\n",
+					__func__, v->session_id);
+
+				mutex_unlock(&v->lock);
+				mutex_unlock(&common.common_lock);
+				if (success) {
+					pr_err("%s: Try to re-register all deregistered sessions!\n",
+						__func__);
+
+					voc_register_vocproc_vol_table();
+				}
+				goto done;
+			} else {
+				success = 1;
+			}
+		}
+		mutex_unlock(&v->lock);
+	}
+	mutex_unlock(&common.common_lock);
+done:
+	return result;
+}
+
+int voc_map_rtac_block(struct rtac_cal_block_data *cal_block)
+{
+	int			result = 0;
+	struct voice_data	*v = NULL;
+
+	pr_debug("%s\n", __func__);
+
+	if (cal_block == NULL) {
+		pr_err("%s: cal_block is NULL!\n",
+			__func__);
+
+		result = -EINVAL;
+		goto done;
+	}
+
+	if (cal_block->cal_data.paddr == 0) {
+		pr_debug("%s: No address to map!\n",
+			__func__);
+
+		result = -EINVAL;
+		goto done;
+	}
+
+	if (cal_block->map_data.map_size == 0) {
+		pr_debug("%s: map size is 0!\n",
+			__func__);
+
+		result = -EINVAL;
+		goto done;
+	}
+
+	mutex_lock(&common.common_lock);
+	/* use first session */
+	v = &common.voice[0];
+	mutex_lock(&v->lock);
+
+	if (!is_rtac_memory_allocated()) {
+		result = voice_alloc_rtac_mem_map_table();
+		if (result < 0) {
+			pr_err("%s: RTAC alloc mem map table did not work! addr = 0x%pK, size = %d\n",
+				__func__,
+				&cal_block->cal_data.paddr,
+				cal_block->map_data.map_size);
+
+			goto done_unlock;
+		}
+	}
+
+	result = voice_map_memory_physical_cmd(v,
+		&common.rtac_mem_map_table,
+		(dma_addr_t)cal_block->cal_data.paddr,
+		cal_block->map_data.map_size,
+		VOC_RTAC_MEM_MAP_TOKEN);
+	if (result < 0) {
+		pr_err("%s: RTAC mmap did not work! addr = 0x%pK, size = %d\n",
+			__func__,
+			&cal_block->cal_data.paddr,
+			cal_block->map_data.map_size);
+
+		free_rtac_map_table();
+		goto done_unlock;
+	}
+
+	cal_block->map_data.map_handle = common.rtac_mem_handle;
+done_unlock:
+	mutex_unlock(&v->lock);
+	mutex_unlock(&common.common_lock);
+done:
+	return result;
+}
+
+int voc_unmap_rtac_block(uint32_t *mem_map_handle)
+{
+	int			result = 0;
+	struct voice_data	*v = NULL;
+
+	pr_debug("%s\n", __func__);
+
+	if (mem_map_handle == NULL) {
+		pr_debug("%s: Map handle is NULL, nothing to unmap\n",
+			__func__);
+
+		goto done;
+	}
+
+	if (*mem_map_handle == 0) {
+		pr_debug("%s: Map handle is 0, nothing to unmap\n",
+			__func__);
+
+		goto done;
+	}
+
+	mutex_lock(&common.common_lock);
+	/* Use first session */
+	/* Only used for apr wait lock */
+	v = &common.voice[0];
+	mutex_lock(&v->lock);
+
+	result = voice_send_mvm_unmap_memory_physical_cmd(
+			v, *mem_map_handle);
+	if (result) {
+		pr_err("%s: voice_send_mvm_unmap_memory_physical_cmd Failed for session 0x%x!\n",
+			__func__, v->session_id);
+	} else {
+		*mem_map_handle = 0;
+		common.rtac_mem_handle = 0;
+		free_rtac_map_table();
+	}
+	mutex_unlock(&v->lock);
+	mutex_unlock(&common.common_lock);
+done:
+	return result;
+}
+
+static int voice_setup_vocproc(struct voice_data *v)
+{
+	int ret = 0;
+
+	ret = voice_send_cvp_create_cmd(v);
+	if (ret < 0) {
+		pr_err("%s: CVP create failed err:%d\n", __func__, ret);
+		goto fail;
+	}
+
+	ret = voice_send_cvp_media_fmt_info_cmd(v);
+	if (ret < 0) {
+		pr_err("%s: Set media format info failed err:%d\n", __func__,
+		       ret);
+		goto fail;
+	}
+
+	ret = voice_send_cvp_topology_commit_cmd(v);
+	if (ret < 0) {
+		pr_err("%s: Set topology commit failed err:%d\n",
+		       __func__, ret);
+		goto fail;
+	}
+
+	voice_send_cvs_register_cal_cmd(v);
+	voice_send_cvp_register_dev_cfg_cmd(v);
+	voice_send_cvp_register_cal_cmd(v);
+	voice_send_cvp_register_vol_cal_cmd(v);
+
+	/* enable vocproc */
+	ret = voice_send_enable_vocproc_cmd(v);
+	if (ret < 0)
+		goto fail;
+
+	/* attach vocproc */
+	ret = voice_send_attach_vocproc_cmd(v);
+	if (ret < 0)
+		goto fail;
+
+	/* send tty mode if tty device is used */
+	voice_send_tty_mode_cmd(v);
+
+	if (is_voip_session(v->session_id)) {
+		ret = voice_send_mvm_cal_network_cmd(v);
+		if (ret < 0)
+			pr_err("%s: voice_send_mvm_cal_network_cmd: %d\n",
+				__func__, ret);
+
+		ret = voice_send_mvm_media_type_cmd(v);
+		if (ret < 0)
+			pr_err("%s: voice_send_mvm_media_type_cmd: %d\n",
+				__func__, ret);
+
+		voice_send_netid_timing_cmd(v);
+	}
+
+	if (v->st_enable && !v->tty_mode)
+		voice_send_set_pp_enable_cmd(v,
+					     MODULE_ID_VOICE_MODULE_ST,
+					     v->st_enable);
+	/* Start in-call music delivery if this feature is enabled */
+	if (v->music_info.play_enable)
+		voice_cvs_start_playback(v);
+
+	/* Start in-call recording if this feature is enabled */
+	if (v->rec_info.rec_enable)
+		voice_cvs_start_record(v, v->rec_info.rec_mode);
+
+	if (v->dtmf_rx_detect_en)
+		voice_send_dtmf_rx_detection_cmd(v, v->dtmf_rx_detect_en);
+
+	if (v->hd_enable)
+		voice_send_hd_cmd(v, v->hd_enable);
+
+	rtac_add_voice(voice_get_cvs_handle(v),
+		voice_get_cvp_handle(v),
+		v->dev_rx.port_id, v->dev_tx.port_id,
+		v->dev_rx.dev_id, v->dev_tx.dev_id,
+		v->session_id);
+
+	return 0;
+
+fail:
+	return ret;
+}
+
+static int voice_send_cvp_device_channels_cmd(struct voice_data *v)
+{
+	int ret = 0;
+	struct  cvp_set_dev_channels_cmd cvp_set_dev_channels_cmd;
+	void *apr_cvp;
+	u16 cvp_handle;
+
+	if (!(voice_get_cvd_int_version(common.cvd_version) >=
+	      CVD_INT_VERSION_2_2)) {
+		pr_debug("%s CVD ver %s doesnt support send_device_channels cmd\n",
+			 __func__, common.cvd_version);
+
+		goto done;
+	}
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	apr_cvp = common.apr_q6_cvp;
+	if (!apr_cvp) {
+		pr_err("%s: apr_cvp is NULL.\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	cvp_handle = voice_get_cvp_handle(v);
+	cvp_set_dev_channels_cmd.hdr.hdr_field =
+			APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+			APR_HDR_LEN(APR_HDR_SIZE),
+			APR_PKT_VER);
+	cvp_set_dev_channels_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+			sizeof(cvp_set_dev_channels_cmd) - APR_HDR_SIZE);
+	cvp_set_dev_channels_cmd.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+	cvp_set_dev_channels_cmd.hdr.dest_port = cvp_handle;
+	cvp_set_dev_channels_cmd.hdr.token = 0;
+	cvp_set_dev_channels_cmd.hdr.opcode =
+				VSS_IVOCPROC_CMD_TOPOLOGY_SET_DEV_CHANNELS;
+	cvp_set_dev_channels_cmd.cvp_set_channels.rx_num_channels =
+				VSS_NUM_DEV_CHANNELS_1;
+	cvp_set_dev_channels_cmd.cvp_set_channels.tx_num_channels =
+				v->dev_tx.no_of_channels;
+
+	v->cvp_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_set_dev_channels_cmd);
+	if (ret < 0) {
+		pr_err("%s: Fail in sending VSS_IVOCPROC_CMD_TOPOLOGY_SET_DEV_CHANNELS\n",
+		       __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = wait_event_timeout(v->cvp_wait,
+				(v->cvp_state == CMD_STATUS_SUCCESS),
+				msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+			__func__, adsp_err_get_err_str(
+			v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto done;
+	}
+
+done:
+	return ret;
+}
+
+static int voice_send_cvp_media_fmt_info_cmd(struct voice_data *v)
+{
+	int ret;
+
+	ret = voice_send_cvp_device_channels_cmd(v);
+	if (ret < 0)
+		goto done;
+
+	if (voice_get_cvd_int_version(common.cvd_version) >=
+	    CVD_INT_VERSION_2_3) {
+		ret = voice_send_cvp_media_format_cmd(v, RX_PATH);
+		if (ret < 0)
+			goto done;
+
+		ret = voice_send_cvp_media_format_cmd(v, TX_PATH);
+		if (ret < 0)
+			goto done;
+
+		if (common.ec_ref_ext)
+			ret = voice_send_cvp_media_format_cmd(v, EC_REF_PATH);
+	}
+
+done:
+	return ret;
+}
+
+static int voice_send_cvp_media_format_cmd(struct voice_data *v,
+					   uint32_t param_type)
+{
+	int ret = 0;
+	struct cvp_set_media_format_cmd cvp_set_media_format_cmd;
+	void *apr_cvp;
+	u16 cvp_handle;
+	struct vss_icommon_param_data_t *media_fmt_param_data =
+		&cvp_set_media_format_cmd.cvp_set_param_v2.param_data;
+	struct vss_param_endpoint_media_format_info_t *media_fmt_info =
+		&media_fmt_param_data->media_format_info;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	apr_cvp = common.apr_q6_cvp;
+	if (!apr_cvp) {
+		pr_err("%s: apr_cvp is NULL.\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	cvp_handle = voice_get_cvp_handle(v);
+	memset(&cvp_set_media_format_cmd, 0, sizeof(cvp_set_media_format_cmd));
+
+	/* Fill header data */
+	cvp_set_media_format_cmd.hdr.hdr_field =
+		APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE),
+			      APR_PKT_VER);
+	cvp_set_media_format_cmd.hdr.pkt_size =
+		APR_PKT_SIZE(APR_HDR_SIZE,
+			     sizeof(cvp_set_media_format_cmd) - APR_HDR_SIZE);
+	cvp_set_media_format_cmd.hdr.src_svc = 0;
+	cvp_set_media_format_cmd.hdr.src_domain = APR_DOMAIN_APPS;
+	cvp_set_media_format_cmd.hdr.src_port =
+		voice_get_idx_for_session(v->session_id);
+	cvp_set_media_format_cmd.hdr.dest_svc = 0;
+	cvp_set_media_format_cmd.hdr.dest_domain = APR_DOMAIN_ADSP;
+	cvp_set_media_format_cmd.hdr.dest_port = cvp_handle;
+	cvp_set_media_format_cmd.hdr.token = VOC_SET_MEDIA_FORMAT_PARAM_TOKEN;
+	cvp_set_media_format_cmd.hdr.opcode = VSS_ICOMMON_CMD_SET_PARAM_V2;
+
+	/* Fill param data */
+	cvp_set_media_format_cmd.cvp_set_param_v2.mem_size =
+		sizeof(struct vss_icommon_param_data_t);
+	media_fmt_param_data->module_id = VSS_MODULE_CVD_GENERIC;
+	media_fmt_param_data->param_size =
+		sizeof(struct vss_param_endpoint_media_format_info_t);
+
+	/* Fill device specific data */
+	switch (param_type) {
+	case RX_PATH:
+		media_fmt_param_data->param_id =
+			VSS_PARAM_RX_PORT_ENDPOINT_MEDIA_INFO;
+		media_fmt_info->port_id = v->dev_rx.port_id;
+		media_fmt_info->num_channels = v->dev_rx.no_of_channels;
+		media_fmt_info->bits_per_sample = v->dev_rx.bits_per_sample;
+		media_fmt_info->sample_rate = v->dev_rx.sample_rate;
+		memcpy(&media_fmt_info->channel_mapping,
+		       &v->dev_rx.channel_mapping, VSS_CHANNEL_MAPPING_SIZE);
+		break;
+
+	case TX_PATH:
+		media_fmt_param_data->param_id =
+			VSS_PARAM_TX_PORT_ENDPOINT_MEDIA_INFO;
+		media_fmt_info->port_id = v->dev_tx.port_id;
+		media_fmt_info->num_channels = v->dev_tx.no_of_channels;
+		media_fmt_info->bits_per_sample = v->dev_tx.bits_per_sample;
+		media_fmt_info->sample_rate = v->dev_tx.sample_rate;
+		memcpy(&media_fmt_info->channel_mapping,
+		       &v->dev_tx.channel_mapping, VSS_CHANNEL_MAPPING_SIZE);
+		break;
+
+	case EC_REF_PATH:
+		media_fmt_param_data->param_id =
+			VSS_PARAM_EC_REF_PORT_ENDPOINT_MEDIA_INFO;
+		media_fmt_info->port_id = common.ec_media_fmt_info.port_id;
+		media_fmt_info->num_channels =
+			common.ec_media_fmt_info.num_channels;
+		media_fmt_info->bits_per_sample =
+			common.ec_media_fmt_info.bits_per_sample;
+		media_fmt_info->sample_rate =
+			common.ec_media_fmt_info.sample_rate;
+		memcpy(&media_fmt_info->channel_mapping,
+		       &common.ec_media_fmt_info.channel_mapping,
+		       VSS_CHANNEL_MAPPING_SIZE);
+		break;
+
+	default:
+		pr_err("%s: Invalid param type %d\n", __func__, param_type);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/* Send command */
+	v->cvp_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_set_media_format_cmd);
+	if (ret < 0) {
+		pr_err("%s: Fail in sending VSS_ICOMMON_CMD_SET_PARAM_V2\n",
+		       __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = wait_event_timeout(v->cvp_wait,
+				 (v->cvp_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s] handle = %d\n", __func__,
+		       adsp_err_get_err_str(v->async_err), cvp_handle);
+		ret = adsp_err_get_lnx_err_code(v->async_err);
+		goto done;
+	}
+
+done:
+	return ret;
+}
+
+static int voice_send_cvp_topology_commit_cmd(struct voice_data *v)
+{
+	int ret = 0;
+	struct apr_hdr cvp_topology_commit_cmd;
+	void *apr_cvp;
+	u16 cvp_handle;
+
+	if (!(voice_get_cvd_int_version(common.cvd_version) >=
+	      CVD_INT_VERSION_2_2)) {
+		pr_debug("%s CVD version string %s doesnt support this command\n",
+			 __func__, common.cvd_version);
+
+		goto done;
+	}
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	apr_cvp = common.apr_q6_cvp;
+	if (!apr_cvp) {
+		pr_err("%s: apr_cvp is NULL.\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	cvp_handle = voice_get_cvp_handle(v);
+	cvp_topology_commit_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	cvp_topology_commit_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvp_topology_commit_cmd) - APR_HDR_SIZE);
+	cvp_topology_commit_cmd.src_port =
+				voice_get_idx_for_session(v->session_id);
+	cvp_topology_commit_cmd.dest_port = cvp_handle;
+	cvp_topology_commit_cmd.token = 0;
+	cvp_topology_commit_cmd.opcode = VSS_IVOCPROC_CMD_TOPOLOGY_COMMIT;
+
+	v->cvp_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_topology_commit_cmd);
+	if (ret < 0) {
+		pr_err("%s: Fail in sending VSS_IVOCPROC_CMD_TOPOLOGY_COMMIT\n",
+		       __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = wait_event_timeout(v->cvp_wait,
+				(v->cvp_state == CMD_STATUS_SUCCESS),
+				msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+			__func__, adsp_err_get_err_str(
+			v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto done;
+	}
+
+done:
+	return ret;
+}
+
+static int voice_send_enable_vocproc_cmd(struct voice_data *v)
+{
+	int ret = 0;
+	struct apr_hdr cvp_enable_cmd;
+	void *apr_cvp;
+	u16 cvp_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvp = common.apr_q6_cvp;
+
+	if (!apr_cvp) {
+		pr_err("%s: apr_cvp is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	cvp_handle = voice_get_cvp_handle(v);
+
+	/* enable vocproc and wait for respose */
+	cvp_enable_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	cvp_enable_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvp_enable_cmd) - APR_HDR_SIZE);
+	pr_debug("cvp_enable_cmd pkt size = %d, cvp_handle=%d\n",
+		cvp_enable_cmd.pkt_size, cvp_handle);
+	cvp_enable_cmd.src_port =
+				voice_get_idx_for_session(v->session_id);
+	cvp_enable_cmd.dest_port = cvp_handle;
+	cvp_enable_cmd.token = 0;
+	cvp_enable_cmd.opcode = VSS_IVOCPROC_CMD_ENABLE;
+
+	v->cvp_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_enable_cmd);
+	if (ret < 0) {
+		pr_err("Fail in sending VSS_IVOCPROC_CMD_ENABLE\n");
+		goto fail;
+	}
+	ret = wait_event_timeout(v->cvp_wait,
+				(v->cvp_state == CMD_STATUS_SUCCESS),
+				msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	return ret;
+}
+
+static int voice_send_mvm_cal_network_cmd(struct voice_data *v)
+{
+	struct vss_imvm_cmd_set_cal_network_t mvm_set_cal_network;
+	int ret = 0;
+	void *apr_mvm;
+	u16 mvm_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_mvm = common.apr_q6_mvm;
+
+	if (!apr_mvm) {
+		pr_err("%s: apr_mvm is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	mvm_handle = voice_get_mvm_handle(v);
+
+	mvm_set_cal_network.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	mvm_set_cal_network.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(mvm_set_cal_network) - APR_HDR_SIZE);
+	mvm_set_cal_network.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+	mvm_set_cal_network.hdr.dest_port = mvm_handle;
+	mvm_set_cal_network.hdr.token = 0;
+	mvm_set_cal_network.hdr.opcode = VSS_IMVM_CMD_SET_CAL_NETWORK;
+	mvm_set_cal_network.network_id = VSS_ICOMMON_CAL_NETWORK_ID_NONE;
+
+	v->mvm_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_set_cal_network);
+	if (ret < 0) {
+		pr_err("%s: Error %d sending SET_NETWORK\n", __func__, ret);
+		goto fail;
+	}
+
+	ret = wait_event_timeout(v->mvm_wait,
+				(v->mvm_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout %d\n", __func__, ret);
+		goto fail;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto fail;
+	}
+	return 0;
+fail:
+	return ret;
+}
+
+static int voice_send_netid_timing_cmd(struct voice_data *v)
+{
+	int ret = 0;
+	void *apr_mvm;
+	u16 mvm_handle;
+	struct mvm_set_network_cmd mvm_set_network;
+	struct mvm_set_voice_timing_cmd mvm_set_voice_timing;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_mvm = common.apr_q6_mvm;
+
+	if (!apr_mvm) {
+		pr_err("%s: apr_mvm is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	mvm_handle = voice_get_mvm_handle(v);
+
+	ret = voice_config_cvs_vocoder(v);
+	if (ret < 0) {
+		pr_err("%s: Error %d configuring CVS voc",
+					__func__, ret);
+		goto fail;
+	}
+	/* Set network ID. */
+	pr_debug("Setting network ID %x\n", common.mvs_info.network_type);
+
+	mvm_set_network.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	mvm_set_network.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+					sizeof(mvm_set_network) - APR_HDR_SIZE);
+	mvm_set_network.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+	mvm_set_network.hdr.dest_port = mvm_handle;
+	mvm_set_network.hdr.token = 0;
+	mvm_set_network.hdr.opcode = VSS_IMVM_CMD_SET_CAL_NETWORK;
+	mvm_set_network.network.network_id = common.mvs_info.network_type;
+
+	v->mvm_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_set_network);
+	if (ret < 0) {
+		pr_err("%s: Error %d sending SET_NETWORK\n", __func__, ret);
+		goto fail;
+	}
+
+	ret = wait_event_timeout(v->mvm_wait,
+				(v->mvm_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto fail;
+	}
+
+	/* Set voice timing. */
+	 pr_debug("Setting voice timing\n");
+
+	mvm_set_voice_timing.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	mvm_set_voice_timing.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+						sizeof(mvm_set_voice_timing) -
+						APR_HDR_SIZE);
+	mvm_set_voice_timing.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+	mvm_set_voice_timing.hdr.dest_port = mvm_handle;
+	mvm_set_voice_timing.hdr.token = 0;
+	mvm_set_voice_timing.hdr.opcode = VSS_ICOMMON_CMD_SET_VOICE_TIMING;
+	mvm_set_voice_timing.timing.mode = 0;
+	mvm_set_voice_timing.timing.enc_offset = 8000;
+	mvm_set_voice_timing.timing.dec_req_offset = 3300;
+	mvm_set_voice_timing.timing.dec_offset = 8300;
+
+	v->mvm_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+
+	ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_set_voice_timing);
+	if (ret < 0) {
+		pr_err("%s: Error %d sending SET_TIMING\n", __func__, ret);
+		goto fail;
+	}
+
+	ret = wait_event_timeout(v->mvm_wait,
+				(v->mvm_state == CMD_STATUS_SUCCESS),
+				msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	return ret;
+}
+
+static int voice_send_attach_vocproc_cmd(struct voice_data *v)
+{
+	int ret = 0;
+	struct mvm_attach_vocproc_cmd mvm_a_vocproc_cmd;
+	void *apr_mvm;
+	u16 mvm_handle, cvp_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_mvm = common.apr_q6_mvm;
+
+	if (!apr_mvm) {
+		pr_err("%s: apr_mvm is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	mvm_handle = voice_get_mvm_handle(v);
+	cvp_handle = voice_get_cvp_handle(v);
+
+	/* attach vocproc and wait for response */
+	mvm_a_vocproc_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	mvm_a_vocproc_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(mvm_a_vocproc_cmd) - APR_HDR_SIZE);
+	pr_debug("send mvm_a_vocproc_cmd pkt size = %d\n",
+		mvm_a_vocproc_cmd.hdr.pkt_size);
+	mvm_a_vocproc_cmd.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+	mvm_a_vocproc_cmd.hdr.dest_port = mvm_handle;
+	mvm_a_vocproc_cmd.hdr.token = 0;
+	mvm_a_vocproc_cmd.hdr.opcode = VSS_IMVM_CMD_ATTACH_VOCPROC;
+	mvm_a_vocproc_cmd.mvm_attach_cvp_handle.handle = cvp_handle;
+
+	v->mvm_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_a_vocproc_cmd);
+	if (ret < 0) {
+		pr_err("Fail in sending VSS_IMVM_CMD_ATTACH_VOCPROC\n");
+		goto fail;
+	}
+	ret = wait_event_timeout(v->mvm_wait,
+				 (v->mvm_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	return ret;
+}
+
+static void voc_update_session_params(struct voice_data *v)
+{
+	/* reset LCH mode */
+	v->lch_mode = 0;
+
+	/* clear disable topology setting */
+	v->disable_topology = false;
+
+	/* clear mute setting */
+	v->dev_rx.dev_mute =  common.default_mute_val;
+	v->dev_tx.dev_mute =  common.default_mute_val;
+	v->stream_rx.stream_mute = common.default_mute_val;
+	v->stream_tx.stream_mute = common.default_mute_val;
+}
+
+static int voice_destroy_vocproc(struct voice_data *v)
+{
+	struct mvm_detach_vocproc_cmd mvm_d_vocproc_cmd;
+	struct apr_hdr cvp_destroy_session_cmd;
+	int ret = 0;
+	void *apr_mvm, *apr_cvp;
+	u16 mvm_handle, cvp_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_mvm = common.apr_q6_mvm;
+	apr_cvp = common.apr_q6_cvp;
+
+	if (!apr_mvm || !apr_cvp) {
+		pr_err("%s: apr_mvm or apr_cvp is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	mvm_handle = voice_get_mvm_handle(v);
+	cvp_handle = voice_get_cvp_handle(v);
+
+	/* disable slowtalk if st_enable is set */
+	if (v->st_enable)
+		voice_send_set_pp_enable_cmd(v, MODULE_ID_VOICE_MODULE_ST, 0);
+
+	/* Disable HD Voice if hd_enable is set */
+	if (v->hd_enable)
+		voice_send_hd_cmd(v, 0);
+
+	/* stop playback or recording */
+	v->music_info.force = 1;
+	voice_cvs_stop_playback(v);
+	voice_cvs_stop_record(v);
+	/* If voice call is active during VoLTE, SRVCC happens.
+	   Start recording on voice session if recording started during VoLTE.
+	 */
+	if (is_volte_session(v->session_id) &&
+	    ((common.voice[VOC_PATH_PASSIVE].voc_state == VOC_RUN) ||
+	     (common.voice[VOC_PATH_PASSIVE].voc_state == VOC_CHANGE))) {
+		if (v->rec_info.rec_enable) {
+			voice_cvs_start_record(
+				&common.voice[VOC_PATH_PASSIVE],
+				v->rec_info.rec_mode);
+			common.srvcc_rec_flag = true;
+
+			pr_debug("%s: switch recording, srvcc_rec_flag %d\n",
+				 __func__, common.srvcc_rec_flag);
+		}
+	}
+
+	/* send stop voice cmd */
+	voice_send_stop_voice_cmd(v);
+
+	/* send stop dtmf detecton cmd */
+	if (v->dtmf_rx_detect_en)
+		voice_send_dtmf_rx_detection_cmd(v, 0);
+
+	/* detach VOCPROC and wait for response from mvm */
+	mvm_d_vocproc_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	mvm_d_vocproc_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(mvm_d_vocproc_cmd) - APR_HDR_SIZE);
+	pr_debug("mvm_d_vocproc_cmd  pkt size = %d\n",
+		mvm_d_vocproc_cmd.hdr.pkt_size);
+	mvm_d_vocproc_cmd.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+	mvm_d_vocproc_cmd.hdr.dest_port = mvm_handle;
+	mvm_d_vocproc_cmd.hdr.token = 0;
+	mvm_d_vocproc_cmd.hdr.opcode = VSS_IMVM_CMD_DETACH_VOCPROC;
+	mvm_d_vocproc_cmd.mvm_detach_cvp_handle.handle = cvp_handle;
+
+	v->mvm_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_d_vocproc_cmd);
+	if (ret < 0) {
+		pr_err("Fail in sending VSS_IMVM_CMD_DETACH_VOCPROC\n");
+		goto fail;
+	}
+	ret = wait_event_timeout(v->mvm_wait,
+				 (v->mvm_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto fail;
+	}
+
+	voice_send_cvp_deregister_vol_cal_cmd(v);
+	voice_send_cvp_deregister_cal_cmd(v);
+	voice_send_cvp_deregister_dev_cfg_cmd(v);
+	voice_send_cvs_deregister_cal_cmd(v);
+
+	/* destrop cvp session */
+	cvp_destroy_session_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	cvp_destroy_session_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvp_destroy_session_cmd) - APR_HDR_SIZE);
+	pr_debug("cvp_destroy_session_cmd pkt size = %d\n",
+		cvp_destroy_session_cmd.pkt_size);
+	cvp_destroy_session_cmd.src_port =
+				voice_get_idx_for_session(v->session_id);
+	cvp_destroy_session_cmd.dest_port = cvp_handle;
+	cvp_destroy_session_cmd.token = 0;
+	cvp_destroy_session_cmd.opcode = APRV2_IBASIC_CMD_DESTROY_SESSION;
+
+	v->cvp_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_destroy_session_cmd);
+	if (ret < 0) {
+		pr_err("Fail in sending APRV2_IBASIC_CMD_DESTROY_SESSION\n");
+		goto fail;
+	}
+	ret = wait_event_timeout(v->cvp_wait,
+				 (v->cvp_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto fail;
+	}
+
+	rtac_remove_voice(voice_get_cvs_handle(v));
+	cvp_handle = 0;
+	voice_set_cvp_handle(v, cvp_handle);
+	return 0;
+fail:
+	return ret;
+}
+
+static int voice_send_mvm_unmap_memory_physical_cmd(struct voice_data *v,
+						    uint32_t mem_handle)
+{
+	struct vss_imemory_cmd_unmap_t mem_unmap;
+	int ret = 0;
+	void *apr_mvm;
+	u16 mvm_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_mvm = common.apr_q6_mvm;
+
+	if (!apr_mvm) {
+		pr_err("%s: apr_mvm is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	mvm_handle = voice_get_mvm_handle(v);
+
+	mem_unmap.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	mem_unmap.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(mem_unmap) - APR_HDR_SIZE);
+	mem_unmap.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+	mem_unmap.hdr.dest_port = mvm_handle;
+	mem_unmap.hdr.token = 0;
+	mem_unmap.hdr.opcode = VSS_IMEMORY_CMD_UNMAP;
+	mem_unmap.mem_handle = mem_handle;
+
+	pr_debug("%s: mem_handle: 0x%x\n", __func__, mem_unmap.mem_handle);
+
+	v->mvm_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(apr_mvm, (uint32_t *) &mem_unmap);
+	if (ret < 0) {
+		pr_err("mem_unmap op[0x%x]ret[%d]\n",
+			mem_unmap.hdr.opcode, ret);
+		goto fail;
+	}
+
+	ret = wait_event_timeout(v->mvm_wait,
+				 (v->mvm_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout %d\n", __func__, ret);
+		goto fail;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto fail;
+	}
+	return 0;
+
+fail:
+	return ret;
+}
+
+static int voice_send_cvs_packet_exchange_config_cmd(struct voice_data *v)
+{
+	struct vss_istream_cmd_set_oob_packet_exchange_config_t
+						 packet_exchange_config_pkt;
+	int ret = 0;
+	void *apr_cvs;
+	u16 cvs_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	apr_cvs = common.apr_q6_cvs;
+
+	if (!apr_cvs) {
+		pr_err("%s: apr_cvs is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	cvs_handle = voice_get_cvs_handle(v);
+
+	packet_exchange_config_pkt.hdr.hdr_field = APR_HDR_FIELD(
+						APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	packet_exchange_config_pkt.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+					sizeof(packet_exchange_config_pkt) -
+					 APR_HDR_SIZE);
+	packet_exchange_config_pkt.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+	packet_exchange_config_pkt.hdr.dest_port = cvs_handle;
+	packet_exchange_config_pkt.hdr.token = 0;
+	packet_exchange_config_pkt.hdr.opcode =
+			 VSS_ISTREAM_CMD_SET_OOB_PACKET_EXCHANGE_CONFIG;
+	packet_exchange_config_pkt.mem_handle = v->shmem_info.mem_handle;
+	/* dec buffer address */
+	packet_exchange_config_pkt.dec_buf_addr_lsw =
+		lower_32_bits(v->shmem_info.sh_buf.buf[0].phys);
+	packet_exchange_config_pkt.dec_buf_addr_msw =
+		msm_audio_populate_upper_32_bits(
+					v->shmem_info.sh_buf.buf[0].phys);
+	packet_exchange_config_pkt.dec_buf_size = 4096;
+	/* enc buffer address */
+	packet_exchange_config_pkt.enc_buf_addr_lsw =
+		lower_32_bits(v->shmem_info.sh_buf.buf[1].phys);
+	packet_exchange_config_pkt.enc_buf_addr_msw =
+		msm_audio_populate_upper_32_bits(
+					v->shmem_info.sh_buf.buf[1].phys);
+	packet_exchange_config_pkt.enc_buf_size = 4096;
+
+	pr_debug("%s: dec buf add: lsw %0x msw %0x, size %d, enc buf add: lsw %0x msw %0x, size %d\n",
+		__func__,
+		packet_exchange_config_pkt.dec_buf_addr_lsw,
+		packet_exchange_config_pkt.dec_buf_addr_msw,
+		packet_exchange_config_pkt.dec_buf_size,
+		packet_exchange_config_pkt.enc_buf_addr_lsw,
+		packet_exchange_config_pkt.enc_buf_addr_msw,
+		packet_exchange_config_pkt.enc_buf_size);
+
+	v->cvs_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(apr_cvs, (uint32_t *) &packet_exchange_config_pkt);
+	if (ret < 0) {
+		pr_err("Failed to send packet exchange config cmd %d\n", ret);
+		goto fail;
+	}
+
+	ret = wait_event_timeout(v->cvs_wait,
+				 (v->cvs_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret)
+		pr_err("%s: wait_event timeout %d\n", __func__, ret);
+
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	return ret;
+}
+
+static int voice_send_cvs_data_exchange_mode_cmd(struct voice_data *v)
+{
+	struct vss_istream_cmd_set_packet_exchange_mode_t data_exchange_pkt;
+	int ret = 0;
+	void *apr_cvs;
+	u16 cvs_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvs = common.apr_q6_cvs;
+
+	if (!apr_cvs) {
+		pr_err("%s: apr_cvs is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	cvs_handle = voice_get_cvs_handle(v);
+
+	data_exchange_pkt.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	data_exchange_pkt.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(data_exchange_pkt) - APR_HDR_SIZE);
+	data_exchange_pkt.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+	data_exchange_pkt.hdr.dest_port = cvs_handle;
+	data_exchange_pkt.hdr.token = 0;
+	data_exchange_pkt.hdr.opcode = VSS_ISTREAM_CMD_SET_PACKET_EXCHANGE_MODE;
+	data_exchange_pkt.mode = VSS_ISTREAM_PACKET_EXCHANGE_MODE_OUT_OF_BAND;
+
+	v->cvs_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(apr_cvs, (uint32_t *) &data_exchange_pkt);
+	if (ret < 0) {
+		pr_err("Failed to send data exchange mode %d\n", ret);
+		goto fail;
+	}
+
+	ret = wait_event_timeout(v->cvs_wait,
+				 (v->cvs_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret)
+		pr_err("%s: wait_event timeout %d\n", __func__, ret);
+
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto fail;
+	}
+	return 0;
+fail:
+	return ret;
+}
+
+static int voice_send_stream_mute_cmd(struct voice_data *v, uint16_t direction,
+				     uint16_t mute_flag, uint32_t ramp_duration)
+{
+	struct cvs_set_mute_cmd cvs_mute_cmd;
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	if (!common.apr_q6_cvs) {
+		pr_err("%s: apr_cvs is NULL.\n", __func__);
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	/* send mute/unmute to cvs */
+	cvs_mute_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	cvs_mute_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+					sizeof(cvs_mute_cmd) - APR_HDR_SIZE);
+	cvs_mute_cmd.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+	cvs_mute_cmd.hdr.dest_port = voice_get_cvs_handle(v);
+	cvs_mute_cmd.hdr.token = 0;
+	cvs_mute_cmd.hdr.opcode = VSS_IVOLUME_CMD_MUTE_V2;
+	cvs_mute_cmd.cvs_set_mute.direction = direction;
+	cvs_mute_cmd.cvs_set_mute.mute_flag = mute_flag;
+	cvs_mute_cmd.cvs_set_mute.ramp_duration_ms = ramp_duration;
+
+	v->cvs_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(common.apr_q6_cvs, (uint32_t *) &cvs_mute_cmd);
+	if (ret < 0) {
+		pr_err("%s: Error %d sending stream mute\n", __func__, ret);
+
+		goto fail;
+	}
+	ret = wait_event_timeout(v->cvs_wait,
+				 (v->cvs_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: Command timeout\n", __func__);
+		goto fail;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	return ret;
+}
+
+static int voice_send_device_mute_cmd(struct voice_data *v, uint16_t direction,
+				     uint16_t mute_flag, uint32_t ramp_duration)
+{
+	struct cvp_set_mute_cmd cvp_mute_cmd;
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	if (!common.apr_q6_cvp) {
+		pr_err("%s: apr_cvp is NULL.\n", __func__);
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	cvp_mute_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	cvp_mute_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+					sizeof(cvp_mute_cmd) - APR_HDR_SIZE);
+	cvp_mute_cmd.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+	cvp_mute_cmd.hdr.dest_port = voice_get_cvp_handle(v);
+	cvp_mute_cmd.hdr.token = 0;
+	cvp_mute_cmd.hdr.opcode = VSS_IVOLUME_CMD_MUTE_V2;
+	cvp_mute_cmd.cvp_set_mute.direction = direction;
+	cvp_mute_cmd.cvp_set_mute.mute_flag = mute_flag;
+	cvp_mute_cmd.cvp_set_mute.ramp_duration_ms = ramp_duration;
+
+	v->cvp_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(common.apr_q6_cvp, (uint32_t *) &cvp_mute_cmd);
+	if (ret < 0) {
+		pr_err("%s: Error %d sending rx device cmd\n", __func__, ret);
+
+		goto fail;
+	}
+	ret = wait_event_timeout(v->cvp_wait,
+				 (v->cvp_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: Command timeout\n", __func__);
+		goto fail;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	return ret;
+}
+
+static int voice_send_vol_step_cmd(struct voice_data *v)
+{
+	struct cvp_set_rx_volume_step_cmd cvp_vol_step_cmd;
+	int ret = 0;
+	void *apr_cvp;
+	u16 cvp_handle;
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvp = common.apr_q6_cvp;
+
+	if (!apr_cvp) {
+		pr_err("%s: apr_cvp is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	cvp_handle = voice_get_cvp_handle(v);
+
+	/* send volume index to cvp */
+	cvp_vol_step_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	cvp_vol_step_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvp_vol_step_cmd) - APR_HDR_SIZE);
+	cvp_vol_step_cmd.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+	cvp_vol_step_cmd.hdr.dest_port = cvp_handle;
+	cvp_vol_step_cmd.hdr.token = 0;
+	cvp_vol_step_cmd.hdr.opcode = VSS_IVOLUME_CMD_SET_STEP;
+	cvp_vol_step_cmd.cvp_set_vol_step.direction = VSS_IVOLUME_DIRECTION_RX;
+	cvp_vol_step_cmd.cvp_set_vol_step.value = v->dev_rx.volume_step_value;
+	cvp_vol_step_cmd.cvp_set_vol_step.ramp_duration_ms =
+					v->dev_rx.volume_ramp_duration_ms;
+	 pr_debug("%s step_value:%d, ramp_duration_ms:%d",
+			__func__,
+			cvp_vol_step_cmd.cvp_set_vol_step.value,
+			cvp_vol_step_cmd.cvp_set_vol_step.ramp_duration_ms);
+
+	v->cvp_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_vol_step_cmd);
+	if (ret < 0) {
+		pr_err("Fail in sending RX VOL step\n");
+		return -EINVAL;
+	}
+	ret = wait_event_timeout(v->cvp_wait,
+				 (v->cvp_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		return -EINVAL;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		return ret;
+	}
+	return 0;
+}
+
+static int voice_cvs_start_record(struct voice_data *v, uint32_t rec_mode)
+{
+	int ret = 0;
+	void *apr_cvs;
+	u16 cvs_handle;
+
+	struct cvs_start_record_cmd cvs_start_record;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvs = common.apr_q6_cvs;
+
+	if (!apr_cvs) {
+		pr_err("%s: apr_cvs is NULL.\n", __func__);
+		return -EINVAL;
+	}
+
+	cvs_handle = voice_get_cvs_handle(v);
+
+	if (!v->rec_info.recording) {
+		cvs_start_record.hdr.hdr_field = APR_HDR_FIELD(
+					APR_MSG_TYPE_SEQ_CMD,
+					APR_HDR_LEN(APR_HDR_SIZE),
+					APR_PKT_VER);
+		cvs_start_record.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				  sizeof(cvs_start_record) - APR_HDR_SIZE);
+		cvs_start_record.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+		cvs_start_record.hdr.dest_port = cvs_handle;
+		cvs_start_record.hdr.token = 0;
+		cvs_start_record.hdr.opcode = VSS_IRECORD_CMD_START;
+
+		cvs_start_record.rec_mode.port_id =
+					VSS_IRECORD_PORT_ID_DEFAULT;
+		if (rec_mode == VOC_REC_UPLINK) {
+			cvs_start_record.rec_mode.rx_tap_point =
+					VSS_IRECORD_TAP_POINT_NONE;
+			cvs_start_record.rec_mode.tx_tap_point =
+					VSS_IRECORD_TAP_POINT_STREAM_END;
+		} else if (rec_mode == VOC_REC_DOWNLINK) {
+			cvs_start_record.rec_mode.rx_tap_point =
+					VSS_IRECORD_TAP_POINT_STREAM_END;
+			cvs_start_record.rec_mode.tx_tap_point =
+					VSS_IRECORD_TAP_POINT_NONE;
+		} else if (rec_mode == VOC_REC_BOTH) {
+			cvs_start_record.rec_mode.rx_tap_point =
+					VSS_IRECORD_TAP_POINT_STREAM_END;
+			cvs_start_record.rec_mode.tx_tap_point =
+					VSS_IRECORD_TAP_POINT_STREAM_END;
+		} else {
+			pr_err("%s: Invalid in-call rec_mode %d\n", __func__,
+				rec_mode);
+
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		v->cvs_state = CMD_STATUS_FAIL;
+		v->async_err = 0;
+
+		ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_start_record);
+		if (ret < 0) {
+			pr_err("%s: Error %d sending START_RECORD\n", __func__,
+				ret);
+
+			goto fail;
+		}
+
+		ret = wait_event_timeout(v->cvs_wait,
+				 (v->cvs_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+
+		if (!ret) {
+			pr_err("%s: wait_event timeout\n", __func__);
+
+			goto fail;
+		}
+		if (v->async_err > 0) {
+			pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+			ret = adsp_err_get_lnx_err_code(
+					v->async_err);
+			goto fail;
+		}
+		v->rec_info.recording = 1;
+	} else {
+		pr_debug("%s: Start record already sent\n", __func__);
+	}
+
+	return 0;
+
+fail:
+	return ret;
+}
+
+static int voice_cvs_stop_record(struct voice_data *v)
+{
+	int ret = 0;
+	void *apr_cvs;
+	u16 cvs_handle;
+	struct apr_hdr cvs_stop_record;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvs = common.apr_q6_cvs;
+
+	if (!apr_cvs) {
+		pr_err("%s: apr_cvs is NULL.\n", __func__);
+		return -EINVAL;
+	}
+
+	cvs_handle = voice_get_cvs_handle(v);
+
+	if (v->rec_info.recording) {
+		cvs_stop_record.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				  APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+		cvs_stop_record.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				  sizeof(cvs_stop_record) - APR_HDR_SIZE);
+		cvs_stop_record.src_port =
+				voice_get_idx_for_session(v->session_id);
+		cvs_stop_record.dest_port = cvs_handle;
+		cvs_stop_record.token = 0;
+		cvs_stop_record.opcode = VSS_IRECORD_CMD_STOP;
+
+		v->cvs_state = CMD_STATUS_FAIL;
+		v->async_err = 0;
+
+		ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_stop_record);
+		if (ret < 0) {
+			pr_err("%s: Error %d sending STOP_RECORD\n",
+				__func__, ret);
+
+			goto fail;
+		}
+
+		ret = wait_event_timeout(v->cvs_wait,
+				 (v->cvs_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+		if (!ret) {
+			pr_err("%s: wait_event timeout\n", __func__);
+
+			goto fail;
+		}
+		if (v->async_err > 0) {
+			pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+			ret = adsp_err_get_lnx_err_code(
+					v->async_err);
+			goto fail;
+		}
+		v->rec_info.recording = 0;
+	} else {
+		pr_debug("%s: Stop record already sent\n", __func__);
+	}
+
+	return 0;
+
+fail:
+	return ret;
+}
+
+int voc_start_record(uint32_t port_id, uint32_t set, uint32_t session_id)
+{
+	int ret = 0;
+	int rec_mode = 0;
+	u16 cvs_handle;
+	int rec_set = 0;
+	struct voice_session_itr itr;
+	struct voice_data *v = NULL;
+
+	/* check if session_id is valid */
+	if (!voice_is_valid_session_id(session_id)) {
+		pr_err("%s: Invalid session id:%u\n", __func__,
+		       session_id);
+
+		return -EINVAL;
+	}
+
+	voice_itr_init(&itr, session_id);
+	pr_debug("%s: session_id:%u\n", __func__, session_id);
+
+	while (voice_itr_get_next_session(&itr, &v)) {
+		if (v == NULL) {
+			pr_err("%s: v is NULL, sessionid:%u\n", __func__,
+				session_id);
+
+			break;
+		}
+		pr_debug("%s: port_id: %d, set: %d, v: %pK\n",
+			 __func__, port_id, set, v);
+
+		mutex_lock(&v->lock);
+		rec_mode = v->rec_info.rec_mode;
+		rec_set = set;
+		if (set) {
+			if ((v->rec_route_state.ul_flag != 0) &&
+				(v->rec_route_state.dl_flag != 0)) {
+				pr_debug("%s: rec mode already set.\n",
+					__func__);
+
+				mutex_unlock(&v->lock);
+				continue;
+			}
+
+			if (port_id == VOICE_RECORD_TX) {
+				if ((v->rec_route_state.ul_flag == 0)
+				&& (v->rec_route_state.dl_flag == 0)) {
+					rec_mode = VOC_REC_UPLINK;
+					v->rec_route_state.ul_flag = 1;
+				} else if ((v->rec_route_state.ul_flag == 0)
+					&& (v->rec_route_state.dl_flag != 0)) {
+					voice_cvs_stop_record(v);
+					rec_mode = VOC_REC_BOTH;
+					v->rec_route_state.ul_flag = 1;
+				}
+			} else if (port_id == VOICE_RECORD_RX) {
+				if ((v->rec_route_state.ul_flag == 0)
+					&& (v->rec_route_state.dl_flag == 0)) {
+					rec_mode = VOC_REC_DOWNLINK;
+					v->rec_route_state.dl_flag = 1;
+				} else if ((v->rec_route_state.ul_flag != 0)
+					&& (v->rec_route_state.dl_flag == 0)) {
+					voice_cvs_stop_record(v);
+					rec_mode = VOC_REC_BOTH;
+					v->rec_route_state.dl_flag = 1;
+				}
+			}
+			rec_set = 1;
+		} else {
+			if ((v->rec_route_state.ul_flag == 0) &&
+				(v->rec_route_state.dl_flag == 0)) {
+				pr_debug("%s: rec already stops.\n",
+					__func__);
+				mutex_unlock(&v->lock);
+				continue;
+			}
+
+			if (port_id == VOICE_RECORD_TX) {
+				if ((v->rec_route_state.ul_flag != 0)
+					&& (v->rec_route_state.dl_flag == 0)) {
+					v->rec_route_state.ul_flag = 0;
+					rec_set = 0;
+				} else if ((v->rec_route_state.ul_flag != 0)
+					&& (v->rec_route_state.dl_flag != 0)) {
+					voice_cvs_stop_record(v);
+					v->rec_route_state.ul_flag = 0;
+					rec_mode = VOC_REC_DOWNLINK;
+					rec_set = 1;
+				}
+			} else if (port_id == VOICE_RECORD_RX) {
+				if ((v->rec_route_state.ul_flag == 0)
+					&& (v->rec_route_state.dl_flag != 0)) {
+					v->rec_route_state.dl_flag = 0;
+					rec_set = 0;
+				} else if ((v->rec_route_state.ul_flag != 0)
+					&& (v->rec_route_state.dl_flag != 0)) {
+					voice_cvs_stop_record(v);
+					v->rec_route_state.dl_flag = 0;
+					rec_mode = VOC_REC_UPLINK;
+					rec_set = 1;
+				}
+			}
+		}
+		pr_debug("%s: mode =%d, set =%d\n", __func__,
+			 rec_mode, rec_set);
+		cvs_handle = voice_get_cvs_handle(v);
+
+		if (cvs_handle != 0) {
+			if (rec_set)
+				ret = voice_cvs_start_record(v, rec_mode);
+			else
+				ret = voice_cvs_stop_record(v);
+		}
+
+		/* During SRVCC, recording will switch from VoLTE session to
+		   voice session.
+		   Then stop recording, need to stop recording on voice session.
+		 */
+		if ((!rec_set) && common.srvcc_rec_flag) {
+			pr_debug("%s, srvcc_rec_flag:%d\n",  __func__,
+				 common.srvcc_rec_flag);
+
+			voice_cvs_stop_record(&common.voice[VOC_PATH_PASSIVE]);
+			common.srvcc_rec_flag = false;
+		}
+
+		/* Cache the value */
+		v->rec_info.rec_enable = rec_set;
+		v->rec_info.rec_mode = rec_mode;
+
+		mutex_unlock(&v->lock);
+	}
+
+	return ret;
+}
+
+static int voice_cvs_start_playback(struct voice_data *v)
+{
+	int ret = 0;
+	struct cvs_start_playback_cmd cvs_start_playback;
+	void *apr_cvs;
+	u16 cvs_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvs = common.apr_q6_cvs;
+
+	if (!apr_cvs) {
+		pr_err("%s: apr_cvs is NULL.\n", __func__);
+		return -EINVAL;
+	}
+
+	cvs_handle = voice_get_cvs_handle(v);
+
+	if (!v->music_info.playing && v->music_info.count) {
+		cvs_start_playback.hdr.hdr_field = APR_HDR_FIELD(
+					APR_MSG_TYPE_SEQ_CMD,
+					APR_HDR_LEN(APR_HDR_SIZE),
+					APR_PKT_VER);
+		cvs_start_playback.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvs_start_playback) - APR_HDR_SIZE);
+		cvs_start_playback.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+		cvs_start_playback.hdr.dest_port = cvs_handle;
+		cvs_start_playback.hdr.token = 0;
+		cvs_start_playback.hdr.opcode = VSS_IPLAYBACK_CMD_START;
+		cvs_start_playback.playback_mode.port_id =
+						v->music_info.port_id;
+
+		v->cvs_state = CMD_STATUS_FAIL;
+		v->async_err = 0;
+
+		ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_start_playback);
+
+		if (ret < 0) {
+			pr_err("%s: Error %d sending START_PLAYBACK\n",
+				__func__, ret);
+
+			goto fail;
+		}
+
+		ret = wait_event_timeout(v->cvs_wait,
+				 (v->cvs_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+		if (!ret) {
+			pr_err("%s: wait_event timeout\n", __func__);
+
+			goto fail;
+		}
+		if (v->async_err > 0) {
+			pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+			ret = adsp_err_get_lnx_err_code(
+					v->async_err);
+			goto fail;
+		}
+
+		v->music_info.playing = 1;
+	} else {
+		pr_debug("%s: Start playback already sent\n", __func__);
+	}
+
+	return 0;
+
+fail:
+	return ret;
+}
+
+static int voice_cvs_stop_playback(struct voice_data *v)
+{
+	 int ret = 0;
+	 struct apr_hdr cvs_stop_playback;
+	 void *apr_cvs;
+	 u16 cvs_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvs = common.apr_q6_cvs;
+
+	if (!apr_cvs) {
+		pr_err("%s: apr_cvs is NULL.\n", __func__);
+		return -EINVAL;
+	}
+
+	cvs_handle = voice_get_cvs_handle(v);
+
+	if (v->music_info.playing && ((!v->music_info.count) ||
+						(v->music_info.force))) {
+		cvs_stop_playback.hdr_field =
+				APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+		cvs_stop_playback.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvs_stop_playback) - APR_HDR_SIZE);
+		cvs_stop_playback.src_port =
+				voice_get_idx_for_session(v->session_id);
+		cvs_stop_playback.dest_port = cvs_handle;
+		cvs_stop_playback.token = 0;
+
+		cvs_stop_playback.opcode = VSS_IPLAYBACK_CMD_STOP;
+
+		v->cvs_state = CMD_STATUS_FAIL;
+		v->async_err = 0;
+
+		ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_stop_playback);
+		if (ret < 0) {
+			pr_err("%s: Error %d sending STOP_PLAYBACK\n",
+			       __func__, ret);
+
+
+			goto fail;
+		}
+
+		ret = wait_event_timeout(v->cvs_wait,
+					 (v->cvs_state == CMD_STATUS_SUCCESS),
+					 msecs_to_jiffies(TIMEOUT_MS));
+		if (!ret) {
+			pr_err("%s: wait_event timeout\n", __func__);
+
+			goto fail;
+		}
+		if (v->async_err > 0) {
+			pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+			ret = adsp_err_get_lnx_err_code(
+					v->async_err);
+			goto fail;
+		}
+
+		v->music_info.playing = 0;
+		v->music_info.force = 0;
+	} else {
+		pr_debug("%s: Stop playback already sent\n", __func__);
+	}
+
+	return 0;
+
+fail:
+	return ret;
+}
+
+static int voc_lch_ops(struct voice_data *v, enum voice_lch_mode lch_mode)
+{
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	switch (lch_mode) {
+	case VOICE_LCH_START:
+
+		ret = voc_end_voice_call(v->session_id);
+		if (ret < 0)
+			pr_err("%s: voice call end failed %d\n",
+				__func__, ret);
+		break;
+	case VOICE_LCH_STOP:
+
+		ret = voc_start_voice_call(v->session_id);
+		if (ret < 0) {
+			pr_err("%s: voice call start failed %d\n",
+				__func__, ret);
+			goto done;
+		}
+		break;
+	default:
+		pr_err("%s: Invalid LCH mode: %d\n",
+			__func__, v->lch_mode);
+		break;
+	}
+done:
+	return ret;
+}
+
+int voc_start_playback(uint32_t set, uint16_t port_id)
+{
+	struct voice_data *v = NULL;
+	int ret = 0;
+	struct voice_session_itr itr;
+	u16 cvs_handle;
+
+	pr_debug("%s port_id = %#x set = %d", __func__, port_id, set);
+
+	voice_itr_init(&itr, ALL_SESSION_VSID);
+	while (voice_itr_get_next_session(&itr, &v)) {
+		if ((v != NULL) &&
+		    (((port_id == VOICE_PLAYBACK_TX) &&
+		       is_sub1_vsid(v->session_id)) ||
+		     ((port_id == VOICE2_PLAYBACK_TX) &&
+		       is_sub2_vsid(v->session_id)))) {
+
+			mutex_lock(&v->lock);
+			v->music_info.port_id = port_id;
+			v->music_info.play_enable = set;
+			if (set)
+				v->music_info.count++;
+			else
+				v->music_info.count--;
+			pr_debug("%s: music_info count=%d\n", __func__,
+				 v->music_info.count);
+
+			cvs_handle = voice_get_cvs_handle(v);
+			if (cvs_handle != 0) {
+				if (set)
+					ret = voice_cvs_start_playback(v);
+				else
+					ret = voice_cvs_stop_playback(v);
+			}
+			mutex_unlock(&v->lock);
+		} else {
+			pr_err("%s: Invalid session\n", __func__);
+		}
+	}
+
+	return ret;
+}
+
+int voc_disable_topology(uint32_t session_id, uint32_t disable)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+		return -EINVAL;
+	}
+
+	mutex_lock(&v->lock);
+
+	v->disable_topology = disable;
+
+	mutex_unlock(&v->lock);
+
+	return ret;
+}
+
+static int voice_set_packet_exchange_mode_and_config(uint32_t session_id,
+						 uint32_t mode)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+		return -EINVAL;
+	}
+
+	if (v->voc_state != VOC_RUN)
+		ret = voice_send_cvs_data_exchange_mode_cmd(v);
+
+	if (ret) {
+		pr_err("%s: Error voice_send_data_exchange_mode_cmd %d\n",
+			__func__, ret);
+		goto fail;
+	}
+
+	ret = voice_send_cvs_packet_exchange_config_cmd(v);
+	if (ret) {
+		pr_err("%s: Error: voice_send_packet_exchange_config_cmd %d\n",
+			__func__, ret);
+		goto fail;
+	}
+
+	return ret;
+fail:
+	return -EINVAL;
+}
+
+int voc_set_tx_mute(uint32_t session_id, uint32_t dir, uint32_t mute,
+		    uint32_t ramp_duration)
+{
+	struct voice_data *v = NULL;
+	int ret = 0;
+	struct voice_session_itr itr;
+
+	voice_itr_init(&itr, session_id);
+	while (voice_itr_get_next_session(&itr, &v)) {
+		if (v != NULL) {
+			mutex_lock(&v->lock);
+			v->stream_tx.stream_mute = mute;
+			v->stream_tx.stream_mute_ramp_duration_ms =
+								ramp_duration;
+			if (is_voc_state_active(v->voc_state) &&
+				(v->lch_mode == 0))
+				ret = voice_send_stream_mute_cmd(v,
+				VSS_IVOLUME_DIRECTION_TX,
+				v->stream_tx.stream_mute,
+				v->stream_tx.stream_mute_ramp_duration_ms);
+			mutex_unlock(&v->lock);
+		} else {
+			pr_err("%s: invalid session_id 0x%x\n", __func__,
+				session_id);
+
+			ret = -EINVAL;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+int voc_set_device_mute(uint32_t session_id, uint32_t dir, uint32_t mute,
+			uint32_t ramp_duration)
+{
+	struct voice_data *v = NULL;
+	int ret = 0;
+	struct voice_session_itr itr;
+
+	voice_itr_init(&itr, session_id);
+	while (voice_itr_get_next_session(&itr, &v)) {
+		if (v != NULL) {
+			mutex_lock(&v->lock);
+			if (dir == VSS_IVOLUME_DIRECTION_TX) {
+				v->dev_tx.dev_mute = mute;
+				v->dev_tx.dev_mute_ramp_duration_ms =
+							ramp_duration;
+			} else {
+				v->dev_rx.dev_mute = mute;
+				v->dev_rx.dev_mute_ramp_duration_ms =
+							ramp_duration;
+			}
+
+			if (((v->voc_state == VOC_RUN) ||
+				(v->voc_state == VOC_STANDBY)) &&
+				(v->lch_mode == 0))
+				ret = voice_send_device_mute_cmd(v,
+							dir,
+							mute,
+							ramp_duration);
+			mutex_unlock(&v->lock);
+		} else {
+			pr_err("%s: invalid session_id 0x%x\n", __func__,
+				session_id);
+
+			ret = -EINVAL;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+int voc_get_rx_device_mute(uint32_t session_id)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+		return -EINVAL;
+	}
+
+	mutex_lock(&v->lock);
+
+	ret = v->dev_rx.dev_mute;
+
+	mutex_unlock(&v->lock);
+
+	return ret;
+}
+
+int voc_set_tty_mode(uint32_t session_id, uint8_t tty_mode)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+		return -EINVAL;
+	}
+
+	mutex_lock(&v->lock);
+
+	v->tty_mode = tty_mode;
+
+	mutex_unlock(&v->lock);
+
+	return ret;
+}
+
+uint8_t voc_get_tty_mode(uint32_t session_id)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+		return -EINVAL;
+	}
+
+	mutex_lock(&v->lock);
+
+	ret = v->tty_mode;
+
+	mutex_unlock(&v->lock);
+
+	return ret;
+}
+
+int voc_set_pp_enable(uint32_t session_id, uint32_t module_id, uint32_t enable)
+{
+	struct voice_data *v = NULL;
+	int ret = 0;
+	struct voice_session_itr itr;
+
+	voice_itr_init(&itr, session_id);
+	while (voice_itr_get_next_session(&itr, &v)) {
+		if (v != NULL) {
+			if (!(is_voice_app_id(v->session_id)))
+				continue;
+
+			mutex_lock(&v->lock);
+			if (module_id == MODULE_ID_VOICE_MODULE_ST)
+				v->st_enable = enable;
+
+			if (v->voc_state == VOC_RUN) {
+				if ((module_id == MODULE_ID_VOICE_MODULE_ST) &&
+				    (!v->tty_mode))
+					ret = voice_send_set_pp_enable_cmd(v,
+						MODULE_ID_VOICE_MODULE_ST,
+						enable);
+			}
+			mutex_unlock(&v->lock);
+		} else {
+			pr_err("%s: invalid session_id 0x%x\n", __func__,
+								session_id);
+			ret =  -EINVAL;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+int voc_set_hd_enable(uint32_t session_id, uint32_t enable)
+{
+	struct voice_data *v = NULL;
+	int ret = 0;
+	struct voice_session_itr itr;
+
+	voice_itr_init(&itr, session_id);
+	while (voice_itr_get_next_session(&itr, &v)) {
+		if (v != NULL) {
+			mutex_lock(&v->lock);
+			v->hd_enable = enable;
+
+			if (v->voc_state == VOC_RUN)
+				ret = voice_send_hd_cmd(v, enable);
+
+			mutex_unlock(&v->lock);
+		} else {
+			pr_err("%s: invalid session_id 0x%x\n", __func__,
+			       session_id);
+			ret =  -EINVAL;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+int voc_set_afe_sidetone(uint32_t session_id, bool sidetone_enable)
+{
+	struct voice_data *v = NULL;
+	int ret = -EINVAL;
+	struct voice_session_itr itr;
+	u16 rx_port, tx_port;
+
+	common.sidetone_enable = sidetone_enable;
+	voice_itr_init(&itr, session_id);
+	while (voice_itr_get_next_session(&itr, &v)) {
+		if (v == NULL) {
+			pr_err("%s: invalid session_id 0x%x\n", __func__,
+				  session_id);
+			ret = -EINVAL;
+			break;
+		}
+		mutex_lock(&v->lock);
+		if (v->voc_state != VOC_RUN) {
+			mutex_unlock(&v->lock);
+			continue;
+		}
+		rx_port = v->dev_rx.port_id;
+		tx_port = v->dev_tx.port_id;
+		ret = afe_sidetone_enable(tx_port, rx_port,
+					  sidetone_enable);
+		if (!ret) {
+			mutex_unlock(&v->lock);
+			break;
+		}
+		mutex_unlock(&v->lock);
+	}
+	return ret;
+}
+
+bool voc_get_afe_sidetone(void)
+{
+	bool ret;
+
+	ret = common.sidetone_enable;
+	return ret;
+}
+
+int voc_get_pp_enable(uint32_t session_id, uint32_t module_id)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+		return -EINVAL;
+	}
+
+	mutex_lock(&v->lock);
+	if (module_id == MODULE_ID_VOICE_MODULE_ST)
+		ret = v->st_enable;
+	mutex_unlock(&v->lock);
+
+	return ret;
+}
+
+int voc_set_rx_vol_step(uint32_t session_id, uint32_t dir, uint32_t vol_step,
+			uint32_t ramp_duration)
+{
+	struct voice_data *v = NULL;
+	int ret = 0;
+	struct voice_session_itr itr;
+
+	pr_debug("%s session id = %#x vol = %u", __func__, session_id,
+		vol_step);
+
+	voice_itr_init(&itr, session_id);
+	while (voice_itr_get_next_session(&itr, &v)) {
+		if (v != NULL) {
+			mutex_lock(&v->lock);
+			v->dev_rx.volume_step_value = vol_step;
+			v->dev_rx.volume_ramp_duration_ms = ramp_duration;
+			if (is_voc_state_active(v->voc_state))
+				ret = voice_send_vol_step_cmd(v);
+			mutex_unlock(&v->lock);
+		} else {
+			pr_err("%s: invalid session_id 0x%x\n", __func__,
+				session_id);
+
+			ret = -EINVAL;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+int voc_set_device_config(uint32_t session_id, uint8_t path_dir,
+			  struct media_format_info *finfo)
+{
+	struct voice_data *v = voice_get_session(session_id);
+
+	if (v == NULL) {
+		pr_err("%s: Invalid session_id 0x%x\n", __func__, session_id);
+
+		return -EINVAL;
+	}
+
+	pr_debug("%s: path_dir=%d port_id=%x, channels=%d, sample_rate=%d, bits_per_sample=%d\n",
+		__func__, path_dir, finfo->port_id, finfo->num_channels,
+		finfo->sample_rate, finfo->bits_per_sample);
+
+	mutex_lock(&v->lock);
+	switch (path_dir) {
+	case RX_PATH:
+		v->dev_rx.port_id = q6audio_get_port_id(finfo->port_id);
+		v->dev_rx.no_of_channels = finfo->num_channels;
+		v->dev_rx.sample_rate = finfo->sample_rate;
+		v->dev_rx.bits_per_sample = finfo->bits_per_sample;
+		memcpy(&v->dev_rx.channel_mapping, &finfo->channel_mapping,
+		       VSS_CHANNEL_MAPPING_SIZE);
+		break;
+	case TX_PATH:
+		v->dev_tx.port_id = q6audio_get_port_id(finfo->port_id);
+		v->dev_tx.no_of_channels = finfo->num_channels;
+		v->dev_tx.sample_rate = finfo->sample_rate;
+		v->dev_tx.bits_per_sample = finfo->bits_per_sample;
+		memcpy(&v->dev_tx.channel_mapping, &finfo->channel_mapping,
+		       VSS_CHANNEL_MAPPING_SIZE);
+		break;
+	default:
+		pr_err("%s: Invalid path_dir %d\n", __func__, path_dir);
+		return -EINVAL;
+	}
+
+	mutex_unlock(&v->lock);
+
+	return 0;
+}
+
+int voc_set_ext_ec_ref_media_fmt_info(struct media_format_info *finfo)
+{
+	mutex_lock(&common.common_lock);
+	if (common.ec_ref_ext) {
+		common.ec_media_fmt_info.num_channels = finfo->num_channels;
+		common.ec_media_fmt_info.bits_per_sample =
+			finfo->bits_per_sample;
+		common.ec_media_fmt_info.sample_rate = finfo->sample_rate;
+		memcpy(&common.ec_media_fmt_info.channel_mapping,
+		       &finfo->channel_mapping, VSS_CHANNEL_MAPPING_SIZE);
+	} else {
+		pr_debug("%s: Ext Ec Ref not active, returning", __func__);
+	}
+	mutex_unlock(&common.common_lock);
+	return 0;
+}
+
+int voc_set_route_flag(uint32_t session_id, uint8_t path_dir, uint8_t set)
+{
+	struct voice_data *v = voice_get_session(session_id);
+
+	if (v == NULL) {
+		pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+		return -EINVAL;
+	}
+
+	pr_debug("%s: path_dir=%d, set=%d\n", __func__, path_dir, set);
+
+	mutex_lock(&v->lock);
+
+	if (path_dir == RX_PATH)
+		v->voc_route_state.rx_route_flag = set;
+	else
+		v->voc_route_state.tx_route_flag = set;
+
+	mutex_unlock(&v->lock);
+
+	return 0;
+}
+
+uint8_t voc_get_route_flag(uint32_t session_id, uint8_t path_dir)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+		return 0;
+	}
+
+	mutex_lock(&v->lock);
+
+	if (path_dir == RX_PATH)
+		ret = v->voc_route_state.rx_route_flag;
+	else
+		ret = v->voc_route_state.tx_route_flag;
+
+	mutex_unlock(&v->lock);
+
+	return ret;
+}
+
+int voc_end_voice_call(uint32_t session_id)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+		return -EINVAL;
+	}
+
+	mutex_lock(&v->lock);
+
+	if (v->voc_state == VOC_RUN || v->voc_state == VOC_ERROR ||
+	    v->voc_state == VOC_CHANGE || v->voc_state == VOC_STANDBY) {
+
+		pr_debug("%s: VOC_STATE: %d\n", __func__, v->voc_state);
+
+		ret = voice_destroy_vocproc(v);
+		if (ret < 0)
+			pr_err("%s:  destroy voice failed\n", __func__);
+
+		voc_update_session_params(v);
+
+		voice_destroy_mvm_cvs_session(v);
+		v->voc_state = VOC_RELEASE;
+		if (common.is_vote_bms) {
+			/* vote low power to BMS during call stop */
+			voice_vote_powerstate_to_bms(v, false);
+		}
+	} else {
+		pr_err("%s: Error: End voice called in state %d\n",
+			__func__, v->voc_state);
+
+		ret = -EINVAL;
+	}
+
+	mutex_unlock(&v->lock);
+	return ret;
+}
+
+int voc_standby_voice_call(uint32_t session_id)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	struct apr_hdr mvm_standby_voice_cmd;
+	void *apr_mvm;
+	u16 mvm_handle;
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	pr_debug("%s: voc state=%d", __func__, v->voc_state);
+
+	if (v->voc_state == VOC_RUN) {
+		apr_mvm = common.apr_q6_mvm;
+		if (!apr_mvm) {
+			pr_err("%s: apr_mvm is NULL.\n", __func__);
+			ret = -EINVAL;
+			goto fail;
+		}
+		mvm_handle = voice_get_mvm_handle(v);
+		mvm_standby_voice_cmd.hdr_field =
+			APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+			APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+		mvm_standby_voice_cmd.pkt_size =
+			APR_PKT_SIZE(APR_HDR_SIZE,
+			sizeof(mvm_standby_voice_cmd) - APR_HDR_SIZE);
+		pr_debug("send mvm_standby_voice_cmd pkt size = %d\n",
+			 mvm_standby_voice_cmd.pkt_size);
+		mvm_standby_voice_cmd.src_port =
+				voice_get_idx_for_session(v->session_id);
+		mvm_standby_voice_cmd.dest_port = mvm_handle;
+		mvm_standby_voice_cmd.token = 0;
+		mvm_standby_voice_cmd.opcode = VSS_IMVM_CMD_STANDBY_VOICE;
+		v->mvm_state = CMD_STATUS_FAIL;
+		ret = apr_send_pkt(apr_mvm,
+				(uint32_t *)&mvm_standby_voice_cmd);
+		if (ret < 0) {
+			pr_err("Fail in sending VSS_IMVM_CMD_STANDBY_VOICE\n");
+			ret = -EINVAL;
+			goto fail;
+		}
+		v->voc_state = VOC_STANDBY;
+	}
+fail:
+	return ret;
+}
+
+int voc_disable_device(uint32_t session_id)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: voc state=%d\n", __func__, v->voc_state);
+
+	mutex_lock(&v->lock);
+	if (v->voc_state == VOC_RUN) {
+		ret = voice_pause_voice_call(v);
+		if (ret < 0) {
+			pr_err("%s: Pause Voice Call failed for session 0x%x, err %d!\n",
+			       __func__, v->session_id, ret);
+			goto done;
+		}
+		rtac_remove_voice(voice_get_cvs_handle(v));
+		voice_send_cvp_deregister_vol_cal_cmd(v);
+		voice_send_cvp_deregister_cal_cmd(v);
+		voice_send_cvp_deregister_dev_cfg_cmd(v);
+
+		v->voc_state = VOC_CHANGE;
+	} else {
+		pr_debug("%s: called in voc state=%d, No_OP\n",
+			 __func__, v->voc_state);
+	}
+
+done:
+	mutex_unlock(&v->lock);
+
+	return ret;
+}
+
+int voc_enable_device(uint32_t session_id)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: voc state=%d\n", __func__, v->voc_state);
+	mutex_lock(&v->lock);
+	if (v->voc_state == VOC_CHANGE) {
+		ret = voice_send_tty_mode_cmd(v);
+		if (ret < 0) {
+			pr_err("%s: Sending TTY mode failed, ret=%d\n",
+			       __func__, ret);
+			/* Not a critical error, allow voice call to continue */
+		}
+
+		if (v->tty_mode) {
+			/* disable slowtalk */
+			voice_send_set_pp_enable_cmd(v,
+						     MODULE_ID_VOICE_MODULE_ST,
+						     0);
+		} else {
+			/* restore slowtalk */
+			voice_send_set_pp_enable_cmd(v,
+						     MODULE_ID_VOICE_MODULE_ST,
+						     v->st_enable);
+		}
+
+		ret = voice_send_set_device_cmd(v);
+		if (ret < 0) {
+			pr_err("%s: Set device failed, ret=%d\n",
+			       __func__, ret);
+			goto done;
+		}
+
+		ret = voice_send_cvp_media_fmt_info_cmd(v);
+		if (ret < 0) {
+			pr_err("%s: Set format failed err:%d\n", __func__, ret);
+			goto done;
+		}
+
+		ret = voice_send_cvp_topology_commit_cmd(v);
+		if (ret < 0) {
+			pr_err("%s:  Set topology commit failed\n", __func__);
+			goto done;
+		}
+
+		voice_send_cvp_register_dev_cfg_cmd(v);
+		voice_send_cvp_register_cal_cmd(v);
+		voice_send_cvp_register_vol_cal_cmd(v);
+
+		rtac_add_voice(voice_get_cvs_handle(v),
+			       voice_get_cvp_handle(v),
+			       v->dev_rx.port_id, v->dev_tx.port_id,
+			       v->dev_rx.dev_id, v->dev_tx.dev_id,
+			       v->session_id);
+
+		ret = voice_send_start_voice_cmd(v);
+		if (ret < 0) {
+			pr_err("%s: Fail in sending START_VOICE, ret=%d\n",
+			       __func__, ret);
+			goto done;
+		}
+		v->voc_state = VOC_RUN;
+	} else {
+		pr_debug("%s: called in voc state=%d, No_OP\n",
+			 __func__, v->voc_state);
+	}
+
+done:
+	mutex_unlock(&v->lock);
+
+	return ret;
+}
+
+int voc_set_lch(uint32_t session_id, enum voice_lch_mode lch_mode)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: Invalid session_id 0x%x\n", __func__, session_id);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	mutex_lock(&v->lock);
+	if (v->lch_mode == lch_mode) {
+		pr_debug("%s: Session %d already in LCH mode %d\n",
+				 __func__, session_id, lch_mode);
+
+		mutex_unlock(&v->lock);
+		goto done;
+	}
+
+	v->lch_mode = lch_mode;
+	mutex_unlock(&v->lock);
+
+	ret = voc_lch_ops(v, v->lch_mode);
+	if (ret < 0) {
+		pr_err("%s: lch ops failed %d\n", __func__, ret);
+		goto done;
+	}
+
+done:
+	return ret;
+}
+
+int voc_resume_voice_call(uint32_t session_id)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	int ret = 0;
+
+	ret = voice_send_start_voice_cmd(v);
+	if (ret < 0) {
+		pr_err("Fail in sending START_VOICE\n");
+		goto fail;
+	}
+	v->voc_state = VOC_RUN;
+	return 0;
+fail:
+	return -EINVAL;
+}
+
+int voc_start_voice_call(uint32_t session_id)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+		return -EINVAL;
+	}
+
+	mutex_lock(&v->lock);
+
+	if (v->voc_state == VOC_ERROR) {
+		pr_debug("%s: VOC in ERR state\n", __func__);
+
+		voice_destroy_mvm_cvs_session(v);
+		v->voc_state = VOC_INIT;
+	}
+
+	if ((v->voc_state == VOC_INIT) ||
+		(v->voc_state == VOC_RELEASE)) {
+		ret = voice_apr_register(session_id);
+		if (ret < 0) {
+			pr_err("%s:  apr register failed\n", __func__);
+			goto fail;
+		}
+
+		if (is_cvd_version_queried()) {
+			pr_debug("%s: Returning the cached value %s\n",
+				 __func__, common.cvd_version);
+		} else {
+			ret = voice_send_mvm_cvd_version_cmd(v);
+			if (ret < 0)
+				pr_debug("%s: Error retrieving CVD version %d\n",
+					 __func__, ret);
+		}
+
+		ret = voice_create_mvm_cvs_session(v);
+		if (ret < 0) {
+			pr_err("create mvm and cvs failed\n");
+			goto fail;
+		}
+
+		if (is_voip_session(session_id)) {
+			/* Allocate oob mem if not already allocated and
+			 * memory map the oob memory block.
+			 */
+			ret = voice_alloc_and_map_oob_mem(v);
+			if (ret < 0) {
+				pr_err("%s: voice_alloc_and_map_oob_mem() failed, ret:%d\n",
+				       __func__, ret);
+
+				goto fail;
+			}
+
+			ret = voice_set_packet_exchange_mode_and_config(
+				session_id,
+				VSS_ISTREAM_PACKET_EXCHANGE_MODE_OUT_OF_BAND);
+			if (ret) {
+				pr_err("%s: Err: exchange_mode_and_config  %d\n",
+					__func__, ret);
+
+				goto fail;
+			}
+		}
+		ret = voice_send_dual_control_cmd(v);
+		if (ret < 0) {
+			pr_err("Err Dual command failed\n");
+			goto fail;
+		}
+		ret = voice_setup_vocproc(v);
+		if (ret < 0) {
+			pr_err("setup voice failed\n");
+			goto fail;
+		}
+
+		ret = voice_send_vol_step_cmd(v);
+		if (ret < 0)
+			pr_err("voice volume failed\n");
+
+		ret = voice_send_stream_mute_cmd(v,
+				VSS_IVOLUME_DIRECTION_TX,
+				v->stream_tx.stream_mute,
+				v->stream_tx.stream_mute_ramp_duration_ms);
+		if (ret < 0)
+			pr_err("voice mute failed\n");
+
+		ret = voice_send_start_voice_cmd(v);
+		if (ret < 0) {
+			pr_err("start voice failed\n");
+			goto fail;
+		}
+
+		v->voc_state = VOC_RUN;
+	} else {
+		pr_err("%s: Error: Start voice called in state %d\n",
+			__func__, v->voc_state);
+
+		ret = -EINVAL;
+		goto fail;
+	}
+fail:
+	mutex_unlock(&v->lock);
+	return ret;
+}
+
+int voc_set_ext_ec_ref_port_id(uint16_t port_id, bool state)
+{
+	int ret = 0;
+
+	mutex_lock(&common.common_lock);
+	if (state == true) {
+		if (port_id == AFE_PORT_INVALID) {
+			pr_err("%s: Invalid port id", __func__);
+			ret = -EINVAL;
+			goto exit;
+		}
+		common.ec_ref_ext = true;
+	} else {
+		common.ec_ref_ext = false;
+	}
+	/* Cache EC Fromat Info in common */
+	common.ec_media_fmt_info.port_id = port_id;
+exit:
+	mutex_unlock(&common.common_lock);
+	return ret;
+}
+
+int voc_get_ext_ec_ref_port_id(void)
+{
+	if (common.ec_ref_ext)
+		return common.ec_media_fmt_info.port_id;
+	else
+		return AFE_PORT_INVALID;
+}
+
+void voc_register_mvs_cb(ul_cb_fn ul_cb,
+			   dl_cb_fn dl_cb,
+			   voip_ssr_cb ssr_cb,
+			   void *private_data)
+{
+	common.mvs_info.ul_cb = ul_cb;
+	common.mvs_info.dl_cb = dl_cb;
+	common.mvs_info.ssr_cb = ssr_cb;
+	common.mvs_info.private_data = private_data;
+}
+
+void voc_register_dtmf_rx_detection_cb(dtmf_rx_det_cb_fn dtmf_rx_ul_cb,
+				       void *private_data)
+{
+	common.dtmf_info.dtmf_rx_ul_cb = dtmf_rx_ul_cb;
+	common.dtmf_info.private_data = private_data;
+}
+
+void voc_config_vocoder(uint32_t media_type,
+			uint32_t rate,
+			uint32_t network_type,
+			uint32_t dtx_mode,
+			uint32_t evrc_min_rate,
+			uint32_t evrc_max_rate)
+{
+	common.mvs_info.media_type = media_type;
+	common.mvs_info.rate = rate;
+	common.mvs_info.network_type = network_type;
+	common.mvs_info.dtx_mode = dtx_mode;
+	common.mvs_info.evrc_min_rate = evrc_min_rate;
+	common.mvs_info.evrc_max_rate = evrc_max_rate;
+}
+
+static int32_t qdsp_mvm_callback(struct apr_client_data *data, void *priv)
+{
+	uint32_t *ptr = NULL;
+	struct common_data *c = NULL;
+	struct voice_data *v = NULL;
+	int i = 0;
+	struct vss_iversion_rsp_get_t *version_rsp = NULL;
+
+	if ((data == NULL) || (priv == NULL)) {
+		pr_err("%s: data or priv is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	c = priv;
+
+	pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__,
+		data->payload_size, data->opcode);
+
+	if (data->opcode == RESET_EVENTS) {
+		pr_debug("%s: Reset event received in Voice service\n",
+				__func__);
+
+		if (common.mvs_info.ssr_cb) {
+			pr_debug("%s: Informing reset event to VoIP\n",
+					__func__);
+			common.mvs_info.ssr_cb(data->opcode,
+					common.mvs_info.private_data);
+		}
+
+		apr_reset(c->apr_q6_mvm);
+		c->apr_q6_mvm = NULL;
+
+		/* clean up memory handle */
+		c->cal_mem_handle = 0;
+		c->rtac_mem_handle = 0;
+		cal_utils_clear_cal_block_q6maps(MAX_VOICE_CAL_TYPES,
+				common.cal_data);
+		rtac_clear_mapping(VOICE_RTAC_CAL);
+
+		/* Sub-system restart is applicable to all sessions. */
+		for (i = 0; i < MAX_VOC_SESSIONS; i++) {
+			c->voice[i].mvm_handle = 0;
+			c->voice[i].shmem_info.mem_handle = 0;
+		}
+
+		/* Free the ION memory and clear handles for Source Tracking */
+		if (is_source_tracking_shared_memomry_allocated()) {
+			msm_audio_ion_free(
+			common.source_tracking_sh_mem.sh_mem_block.client,
+			common.source_tracking_sh_mem.sh_mem_block.handle);
+			common.source_tracking_sh_mem.mem_handle = 0;
+			common.source_tracking_sh_mem.sh_mem_block.client =
+									NULL;
+			common.source_tracking_sh_mem.sh_mem_block.handle =
+									NULL;
+		}
+		/* clean up srvcc rec flag */
+		c->srvcc_rec_flag = false;
+		voc_set_error_state(data->reset_proc);
+		return 0;
+	}
+
+	pr_debug("%s: session_idx 0x%x\n", __func__, data->dest_port);
+
+	v = voice_get_session_by_idx(data->dest_port);
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+
+		return -EINVAL;
+	}
+
+	if (data->opcode == APR_BASIC_RSP_RESULT) {
+		if (data->payload_size) {
+			ptr = data->payload;
+
+			pr_debug("%x %x\n", ptr[0], ptr[1]);
+			/* ping mvm service ACK */
+			switch (ptr[0]) {
+			case VSS_IMVM_CMD_CREATE_PASSIVE_CONTROL_SESSION:
+			case VSS_IMVM_CMD_CREATE_FULL_CONTROL_SESSION:
+				/* Passive session is used for CS call
+				 * Full session is used for VoIP call. */
+				pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]);
+				if (!ptr[1]) {
+					pr_debug("%s: MVM handle is %d\n",
+						 __func__, data->src_port);
+					voice_set_mvm_handle(v, data->src_port);
+				} else
+					pr_err("got NACK for sending MVM create session\n");
+				v->mvm_state = CMD_STATUS_SUCCESS;
+				v->async_err = ptr[1];
+				wake_up(&v->mvm_wait);
+				break;
+			case VSS_IMVM_CMD_START_VOICE:
+			case VSS_IMVM_CMD_ATTACH_VOCPROC:
+			case VSS_IMVM_CMD_STOP_VOICE:
+			case VSS_IMVM_CMD_DETACH_VOCPROC:
+			case VSS_ISTREAM_CMD_SET_TTY_MODE:
+			case APRV2_IBASIC_CMD_DESTROY_SESSION:
+			case VSS_IMVM_CMD_ATTACH_STREAM:
+			case VSS_IMVM_CMD_DETACH_STREAM:
+			case VSS_ICOMMON_CMD_SET_NETWORK:
+			case VSS_ICOMMON_CMD_SET_VOICE_TIMING:
+			case VSS_IMVM_CMD_SET_POLICY_DUAL_CONTROL:
+			case VSS_IMVM_CMD_SET_CAL_NETWORK:
+			case VSS_IMVM_CMD_SET_CAL_MEDIA_TYPE:
+			case VSS_IMEMORY_CMD_MAP_PHYSICAL:
+			case VSS_IMEMORY_CMD_UNMAP:
+			case VSS_IMVM_CMD_PAUSE_VOICE:
+			case VSS_IMVM_CMD_STANDBY_VOICE:
+			case VSS_IHDVOICE_CMD_ENABLE:
+			case VSS_IHDVOICE_CMD_DISABLE:
+				pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]);
+				v->mvm_state = CMD_STATUS_SUCCESS;
+				v->async_err = ptr[1];
+				wake_up(&v->mvm_wait);
+				break;
+			case VSS_IVERSION_CMD_GET:
+				pr_debug("%s: Error retrieving CVD Version, error:%d\n",
+					 __func__, ptr[1]);
+
+				strlcpy(common.cvd_version, CVD_VERSION_0_0,
+					sizeof(common.cvd_version));
+				pr_debug("%s: Fall back to default value, CVD Version = %s\n",
+					 __func__, common.cvd_version);
+
+				v->mvm_state = CMD_STATUS_SUCCESS;
+				v->async_err = ptr[1];
+				wake_up(&v->mvm_wait);
+				break;
+			default:
+				pr_debug("%s: not match cmd = 0x%x\n",
+					__func__, ptr[0]);
+				break;
+			}
+		}
+	} else if (data->opcode == VSS_IMEMORY_RSP_MAP) {
+		pr_debug("%s, Revd VSS_IMEMORY_RSP_MAP response\n", __func__);
+
+		if (data->payload_size && data->token == VOIP_MEM_MAP_TOKEN) {
+			ptr = data->payload;
+			if (ptr[0]) {
+				v->shmem_info.mem_handle = ptr[0];
+				pr_debug("%s: shared mem_handle: 0x[%x]\n",
+					 __func__, v->shmem_info.mem_handle);
+				v->mvm_state = CMD_STATUS_SUCCESS;
+				wake_up(&v->mvm_wait);
+			}
+		} else if (data->payload_size &&
+			   data->token == VOC_CAL_MEM_MAP_TOKEN) {
+			ptr = data->payload;
+			if (ptr[0]) {
+				c->cal_mem_handle = ptr[0];
+
+				pr_debug("%s: cal mem handle 0x%x\n",
+					 __func__, c->cal_mem_handle);
+
+				v->mvm_state = CMD_STATUS_SUCCESS;
+				wake_up(&v->mvm_wait);
+			}
+		} else if (data->payload_size &&
+			   data->token == VOC_VOICE_HOST_PCM_MAP_TOKEN) {
+			ptr = data->payload;
+			if (ptr[0]) {
+				common.voice_host_pcm_mem_handle = ptr[0];
+
+				pr_debug("%s: vhpcm mem handle 0x%x\n",
+					 __func__,
+					 common.voice_host_pcm_mem_handle);
+				v->mvm_state = CMD_STATUS_SUCCESS;
+				wake_up(&v->mvm_wait);
+			}
+		} else if (data->payload_size &&
+				data->token == VOC_RTAC_MEM_MAP_TOKEN) {
+			ptr = data->payload;
+			if (ptr[0]) {
+				c->rtac_mem_handle = ptr[0];
+
+				pr_debug("%s: cal mem handle 0x%x\n",
+					 __func__, c->rtac_mem_handle);
+
+				v->mvm_state = CMD_STATUS_SUCCESS;
+				wake_up(&v->mvm_wait);
+			}
+		} else if (data->payload_size &&
+			   data->token == VOC_SOURCE_TRACKING_MEM_MAP_TOKEN) {
+			ptr = data->payload;
+			if (ptr[0]) {
+				common.source_tracking_sh_mem.mem_handle =
+									ptr[0];
+
+				pr_debug("%s: Source Tracking shared mem handle 0x%x\n",
+					 __func__,
+				   common.source_tracking_sh_mem.mem_handle);
+
+				v->mvm_state = CMD_STATUS_SUCCESS;
+				wake_up(&v->mvm_wait);
+			}
+		} else {
+			pr_err("%s: Unknown mem map token %d\n",
+			       __func__, data->token);
+		}
+	} else if (data->opcode == VSS_IVERSION_RSP_GET) {
+		pr_debug("%s: Received VSS_IVERSION_RSP_GET\n", __func__);
+
+		if (data->payload_size) {
+			version_rsp =
+				(struct vss_iversion_rsp_get_t *)data->payload;
+			memcpy(common.cvd_version, version_rsp->version,
+			       CVD_VERSION_STRING_MAX_SIZE);
+			pr_debug("%s: CVD Version = %s\n",
+				 __func__, common.cvd_version);
+
+			v->mvm_state = CMD_STATUS_SUCCESS;
+			wake_up(&v->mvm_wait);
+		}
+	}
+	return 0;
+}
+
+static int32_t qdsp_cvs_callback(struct apr_client_data *data, void *priv)
+{
+	uint32_t *ptr = NULL;
+	struct common_data *c = NULL;
+	struct voice_data *v = NULL;
+	int i = 0;
+
+	if ((data == NULL) || (priv == NULL)) {
+		pr_err("%s: data or priv is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	c = priv;
+
+	pr_debug("%s: session_id 0x%x\n", __func__, data->dest_port);
+	pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__,
+		data->payload_size, data->opcode);
+
+	if (data->opcode == RESET_EVENTS) {
+		pr_debug("%s: Reset event received in Voice service\n",
+				__func__);
+
+		apr_reset(c->apr_q6_cvs);
+		c->apr_q6_cvs = NULL;
+
+		/* Sub-system restart is applicable to all sessions. */
+		for (i = 0; i < MAX_VOC_SESSIONS; i++)
+			c->voice[i].cvs_handle = 0;
+
+		cal_utils_clear_cal_block_q6maps(MAX_VOICE_CAL_TYPES,
+				common.cal_data);
+
+		/* Free the ION memory and clear handles for Source Tracking */
+		if (is_source_tracking_shared_memomry_allocated()) {
+			msm_audio_ion_free(
+			common.source_tracking_sh_mem.sh_mem_block.client,
+			common.source_tracking_sh_mem.sh_mem_block.handle);
+			common.source_tracking_sh_mem.mem_handle = 0;
+			common.source_tracking_sh_mem.sh_mem_block.client =
+									NULL;
+			common.source_tracking_sh_mem.sh_mem_block.handle =
+									NULL;
+		}
+		voc_set_error_state(data->reset_proc);
+		return 0;
+	}
+
+	v = voice_get_session_by_idx(data->dest_port);
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+
+		return -EINVAL;
+	}
+
+	if (data->opcode == APR_BASIC_RSP_RESULT) {
+		if (data->payload_size) {
+			ptr = data->payload;
+
+			pr_debug("%x %x\n", ptr[0], ptr[1]);
+			if (ptr[1] != 0) {
+				pr_err("%s: cmd = 0x%x returned error = 0x%x\n",
+					__func__, ptr[0], ptr[1]);
+			}
+			/*response from  CVS */
+			switch (ptr[0]) {
+			case VSS_ISTREAM_CMD_CREATE_PASSIVE_CONTROL_SESSION:
+			case VSS_ISTREAM_CMD_CREATE_FULL_CONTROL_SESSION:
+				if (!ptr[1]) {
+					pr_debug("%s: CVS handle is %d\n",
+						 __func__, data->src_port);
+					voice_set_cvs_handle(v, data->src_port);
+				} else
+					pr_err("got NACK for sending CVS create session\n");
+				v->cvs_state = CMD_STATUS_SUCCESS;
+				v->async_err = ptr[1];
+				wake_up(&v->cvs_wait);
+				break;
+			case VSS_IVOLUME_CMD_MUTE_V2:
+			case VSS_ISTREAM_CMD_SET_MEDIA_TYPE:
+			case VSS_ISTREAM_CMD_VOC_AMR_SET_ENC_RATE:
+			case VSS_ISTREAM_CMD_VOC_AMRWB_SET_ENC_RATE:
+			case VSS_ISTREAM_CMD_SET_ENC_DTX_MODE:
+			case VSS_ISTREAM_CMD_CDMA_SET_ENC_MINMAX_RATE:
+			case APRV2_IBASIC_CMD_DESTROY_SESSION:
+			case VSS_ISTREAM_CMD_REGISTER_CALIBRATION_DATA_V2:
+			case VSS_ISTREAM_CMD_DEREGISTER_CALIBRATION_DATA:
+			case VSS_ISTREAM_CMD_REGISTER_STATIC_CALIBRATION_DATA:
+			case VSS_ISTREAM_CMD_DEREGISTER_STATIC_CALIBRATION_DATA:
+			case VSS_ICOMMON_CMD_MAP_MEMORY:
+			case VSS_ICOMMON_CMD_UNMAP_MEMORY:
+			case VSS_ICOMMON_CMD_SET_UI_PROPERTY:
+			case VSS_IPLAYBACK_CMD_START:
+			case VSS_IPLAYBACK_CMD_STOP:
+			case VSS_IRECORD_CMD_START:
+			case VSS_IRECORD_CMD_STOP:
+			case VSS_ISTREAM_CMD_SET_PACKET_EXCHANGE_MODE:
+			case VSS_ISTREAM_CMD_SET_OOB_PACKET_EXCHANGE_CONFIG:
+			case VSS_ISTREAM_CMD_SET_RX_DTMF_DETECTION:
+				pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]);
+				v->cvs_state = CMD_STATUS_SUCCESS;
+				v->async_err = ptr[1];
+				wake_up(&v->cvs_wait);
+				break;
+			case VSS_ICOMMON_CMD_SET_PARAM_V2:
+				pr_debug("%s: VSS_ICOMMON_CMD_SET_PARAM_V2\n",
+					 __func__);
+				rtac_make_voice_callback(RTAC_CVS, ptr,
+							data->payload_size);
+				break;
+			case VSS_ICOMMON_CMD_GET_PARAM_V2:
+				pr_debug("%s: VSS_ICOMMON_CMD_GET_PARAM_V2\n",
+					 __func__);
+				/* Should only come here if there is an APR */
+				/* error or malformed APR packet. Otherwise */
+				/* response will be returned as */
+				/* VSS_ICOMMON_RSP_GET_PARAM */
+				if (ptr[1] != 0) {
+					pr_err("%s: CVP get param error = %d, resuming\n",
+						__func__, ptr[1]);
+					rtac_make_voice_callback(RTAC_CVP,
+						data->payload,
+						data->payload_size);
+				}
+				break;
+			default:
+				pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]);
+				break;
+			}
+		}
+	} else if (data->opcode ==
+			 VSS_ISTREAM_EVT_OOB_NOTIFY_ENC_BUFFER_READY) {
+		int ret = 0;
+		u16 cvs_handle;
+		uint32_t *cvs_voc_pkt;
+		struct cvs_enc_buffer_consumed_cmd send_enc_buf_consumed_cmd;
+		void *apr_cvs;
+
+		pr_debug("Encoder buffer is ready\n");
+
+		apr_cvs = common.apr_q6_cvs;
+		if (!apr_cvs) {
+			pr_err("%s: apr_cvs is NULL\n", __func__);
+			return -EINVAL;
+		}
+		cvs_handle = voice_get_cvs_handle(v);
+
+		send_enc_buf_consumed_cmd.hdr.hdr_field =
+				APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE),
+				APR_PKT_VER);
+		send_enc_buf_consumed_cmd.hdr.pkt_size =
+			APR_PKT_SIZE(APR_HDR_SIZE,
+			sizeof(send_enc_buf_consumed_cmd) - APR_HDR_SIZE);
+
+		send_enc_buf_consumed_cmd.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+		send_enc_buf_consumed_cmd.hdr.dest_port = cvs_handle;
+		send_enc_buf_consumed_cmd.hdr.token = 0;
+		send_enc_buf_consumed_cmd.hdr.opcode =
+			VSS_ISTREAM_EVT_OOB_NOTIFY_ENC_BUFFER_CONSUMED;
+
+		cvs_voc_pkt = v->shmem_info.sh_buf.buf[1].data;
+		if (cvs_voc_pkt != NULL &&  common.mvs_info.ul_cb != NULL) {
+			/* cvs_voc_pkt[0] contains tx timestamp */
+			common.mvs_info.ul_cb((uint8_t *)&cvs_voc_pkt[3],
+					      cvs_voc_pkt[2],
+					      cvs_voc_pkt[0],
+					      common.mvs_info.private_data);
+		} else
+			pr_err("%s: cvs_voc_pkt or ul_cb is NULL\n", __func__);
+
+		ret = apr_send_pkt(apr_cvs,
+			(uint32_t *) &send_enc_buf_consumed_cmd);
+		if (ret < 0) {
+			pr_err("%s: Err send ENC_BUF_CONSUMED_NOTIFY %d\n",
+				__func__, ret);
+			goto fail;
+		}
+	} else if (data->opcode == VSS_ISTREAM_EVT_SEND_ENC_BUFFER) {
+		pr_debug("Recd VSS_ISTREAM_EVT_SEND_ENC_BUFFER\n");
+	} else if (data->opcode ==
+			 VSS_ISTREAM_EVT_OOB_NOTIFY_DEC_BUFFER_REQUEST) {
+		int ret = 0;
+		u16 cvs_handle;
+		uint32_t *cvs_voc_pkt;
+		struct cvs_dec_buffer_ready_cmd send_dec_buf;
+		void *apr_cvs;
+		apr_cvs = common.apr_q6_cvs;
+
+		if (!apr_cvs) {
+			pr_err("%s: apr_cvs is NULL\n", __func__);
+			return -EINVAL;
+		}
+		cvs_handle = voice_get_cvs_handle(v);
+
+		send_dec_buf.hdr.hdr_field =
+				APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE),
+				APR_PKT_VER);
+
+		send_dec_buf.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+					sizeof(send_dec_buf) - APR_HDR_SIZE);
+
+		send_dec_buf.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+		send_dec_buf.hdr.dest_port = cvs_handle;
+		send_dec_buf.hdr.token = 0;
+		send_dec_buf.hdr.opcode =
+				 VSS_ISTREAM_EVT_OOB_NOTIFY_DEC_BUFFER_READY;
+
+		cvs_voc_pkt = (uint32_t *)(v->shmem_info.sh_buf.buf[0].data);
+		if (cvs_voc_pkt != NULL && common.mvs_info.dl_cb != NULL) {
+			/* Set timestamp to 0 and advance the pointer */
+			cvs_voc_pkt[0] = 0;
+			/* Set media_type and advance the pointer */
+			cvs_voc_pkt[1] = common.mvs_info.media_type;
+			common.mvs_info.dl_cb(
+					      (uint8_t *)&cvs_voc_pkt[2],
+					      common.mvs_info.private_data);
+			ret = apr_send_pkt(apr_cvs, (uint32_t *) &send_dec_buf);
+			if (ret < 0) {
+				pr_err("%s: Err send DEC_BUF_READY_NOTIFI %d\n",
+					__func__, ret);
+				goto fail;
+			}
+		} else {
+			pr_debug("%s: voc_pkt or dl_cb is NULL\n", __func__);
+			goto fail;
+		}
+	} else if (data->opcode == VSS_ISTREAM_EVT_REQUEST_DEC_BUFFER) {
+		pr_debug("Recd VSS_ISTREAM_EVT_REQUEST_DEC_BUFFER\n");
+	} else if (data->opcode == VSS_ISTREAM_EVT_SEND_DEC_BUFFER) {
+		pr_debug("Send dec buf resp\n");
+	} else if (data->opcode == APR_RSP_ACCEPTED) {
+		ptr = data->payload;
+		if (ptr[0])
+			pr_debug("%s: APR_RSP_ACCEPTED for 0x%x:\n",
+				 __func__, ptr[0]);
+	} else if (data->opcode == VSS_ISTREAM_EVT_NOT_READY) {
+		pr_debug("Recd VSS_ISTREAM_EVT_NOT_READY\n");
+	} else if (data->opcode == VSS_ISTREAM_EVT_READY) {
+		pr_debug("Recd VSS_ISTREAM_EVT_READY\n");
+	} else if (data->opcode == VSS_ICOMMON_RSP_GET_PARAM) {
+		pr_debug("%s: VSS_ICOMMON_RSP_GET_PARAM\n", __func__);
+		ptr = data->payload;
+		if (ptr[0] != 0) {
+			pr_err("%s: VSS_ICOMMON_RSP_GET_PARAM returned error = 0x%x\n",
+			       __func__, ptr[0]);
+		}
+		rtac_make_voice_callback(RTAC_CVS, data->payload,
+					data->payload_size);
+	}  else if (data->opcode == VSS_ISTREAM_EVT_RX_DTMF_DETECTED) {
+		struct vss_istream_evt_rx_dtmf_detected *dtmf_rx_detected;
+		uint32_t *voc_pkt = data->payload;
+		uint32_t pkt_len = data->payload_size;
+
+		if ((voc_pkt != NULL) &&
+		    (pkt_len ==
+			sizeof(struct vss_istream_evt_rx_dtmf_detected))) {
+
+			dtmf_rx_detected =
+			(struct vss_istream_evt_rx_dtmf_detected *) voc_pkt;
+			pr_debug("RX_DTMF_DETECTED low_freq=%d high_freq=%d\n",
+				 dtmf_rx_detected->low_freq,
+				 dtmf_rx_detected->high_freq);
+			if (c->dtmf_info.dtmf_rx_ul_cb)
+				c->dtmf_info.dtmf_rx_ul_cb((uint8_t *)voc_pkt,
+					voc_get_session_name(v->session_id),
+					c->dtmf_info.private_data);
+		} else {
+			pr_err("Invalid packet\n");
+		}
+	}  else
+		pr_debug("Unknown opcode 0x%x\n", data->opcode);
+
+fail:
+	return 0;
+}
+
+static int32_t qdsp_cvp_callback(struct apr_client_data *data, void *priv)
+{
+	uint32_t *ptr = NULL;
+	struct common_data *c = NULL;
+	struct voice_data *v = NULL;
+	int i = 0;
+
+	if ((data == NULL) || (priv == NULL)) {
+		pr_err("%s: data or priv is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	c = priv;
+
+	if (data->opcode == RESET_EVENTS) {
+		pr_debug("%s: Reset event received in Voice service\n",
+				__func__);
+
+		apr_reset(c->apr_q6_cvp);
+		c->apr_q6_cvp = NULL;
+		cal_utils_clear_cal_block_q6maps(MAX_VOICE_CAL_TYPES,
+				common.cal_data);
+
+		/* Sub-system restart is applicable to all sessions. */
+		for (i = 0; i < MAX_VOC_SESSIONS; i++)
+			c->voice[i].cvp_handle = 0;
+
+		/*
+		 * Free the ION memory and clear handles for
+		 * Source Tracking
+		 */
+		if (is_source_tracking_shared_memomry_allocated()) {
+			msm_audio_ion_free(
+			common.source_tracking_sh_mem.sh_mem_block.client,
+			common.source_tracking_sh_mem.sh_mem_block.handle);
+			common.source_tracking_sh_mem.mem_handle = 0;
+			common.source_tracking_sh_mem.sh_mem_block.client =
+									NULL;
+			common.source_tracking_sh_mem.sh_mem_block.handle =
+									NULL;
+		}
+		voc_set_error_state(data->reset_proc);
+		return 0;
+	}
+
+	v = voice_get_session_by_idx(data->dest_port);
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+
+		return -EINVAL;
+	}
+
+	if (data->opcode == APR_BASIC_RSP_RESULT) {
+		if (data->payload_size) {
+			ptr = data->payload;
+
+			pr_debug("%x %x\n", ptr[0], ptr[1]);
+			if (ptr[1] != 0) {
+				pr_err("%s: cmd = 0x%x returned error = 0x%x\n",
+					__func__, ptr[0], ptr[1]);
+			}
+			switch (ptr[0]) {
+			case VSS_IVOCPROC_CMD_CREATE_FULL_CONTROL_SESSION_V2:
+			case VSS_IVOCPROC_CMD_CREATE_FULL_CONTROL_SESSION_V3:
+			/*response from  CVP */
+				pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]);
+				if (!ptr[1]) {
+					voice_set_cvp_handle(v, data->src_port);
+					pr_debug("status: %d, cvphdl=%d\n",
+						 ptr[1], data->src_port);
+				} else
+					pr_err("got NACK from CVP create session response\n");
+				v->cvp_state = CMD_STATUS_SUCCESS;
+				v->async_err = ptr[1];
+				wake_up(&v->cvp_wait);
+				break;
+			case VSS_IVOCPROC_CMD_SET_DEVICE_V2:
+			case VSS_IVOCPROC_CMD_SET_DEVICE_V3:
+			case VSS_IVOLUME_CMD_SET_STEP:
+			case VSS_IVOCPROC_CMD_ENABLE:
+			case VSS_IVOCPROC_CMD_DISABLE:
+			case APRV2_IBASIC_CMD_DESTROY_SESSION:
+			case VSS_IVOCPROC_CMD_REGISTER_VOL_CALIBRATION_DATA:
+			case VSS_IVOCPROC_CMD_DEREGISTER_VOL_CALIBRATION_DATA:
+			case VSS_IVOCPROC_CMD_REGISTER_CALIBRATION_DATA_V2:
+			case VSS_IVOCPROC_CMD_DEREGISTER_CALIBRATION_DATA:
+			case VSS_IVOCPROC_CMD_REGISTER_DYNAMIC_CALIBRATION_DATA:
+		    case VSS_IVOCPROC_CMD_DEREGISTER_DYNAMIC_CALIBRATION_DATA:
+			case VSS_IVOCPROC_CMD_REGISTER_STATIC_CALIBRATION_DATA:
+		    case VSS_IVOCPROC_CMD_DEREGISTER_STATIC_CALIBRATION_DATA:
+			case VSS_IVOCPROC_CMD_REGISTER_DEVICE_CONFIG:
+			case VSS_IVOCPROC_CMD_DEREGISTER_DEVICE_CONFIG:
+			case VSS_ICOMMON_CMD_MAP_MEMORY:
+			case VSS_ICOMMON_CMD_UNMAP_MEMORY:
+			case VSS_IVOLUME_CMD_MUTE_V2:
+			case VSS_IVPCM_CMD_START_V2:
+			case VSS_IVPCM_CMD_STOP:
+			case VSS_IVOCPROC_CMD_TOPOLOGY_SET_DEV_CHANNELS:
+			case VSS_IVOCPROC_CMD_TOPOLOGY_COMMIT:
+				v->cvp_state = CMD_STATUS_SUCCESS;
+				v->async_err = ptr[1];
+				wake_up(&v->cvp_wait);
+				break;
+			case VSS_IVPCM_EVT_PUSH_BUFFER_V2:
+				break;
+			case VSS_ICOMMON_CMD_SET_PARAM_V2:
+				switch (data->token) {
+				case VOC_SET_MEDIA_FORMAT_PARAM_TOKEN:
+					pr_debug("%s: VSS_ICOMMON_CMD_SET_PARAM_V2 called by voice_send_cvp_media_format_cmd\n",
+						__func__);
+					v->cvp_state = CMD_STATUS_SUCCESS;
+					v->async_err = ptr[1];
+					wake_up(&v->cvp_wait);
+					break;
+				case VOC_RTAC_SET_PARAM_TOKEN:
+					pr_debug("%s: VSS_ICOMMON_CMD_SET_PARAM_V2 called by rtac\n",
+						__func__);
+					rtac_make_voice_callback(
+						RTAC_CVP, ptr,
+						data->payload_size);
+					break;
+				default:
+					pr_debug("%s: invalid token for command VSS_ICOMMON_CMD_SET_PARAM_V2: %d\n",
+						__func__, data->token);
+					break;
+				}
+				break;
+			case VSS_ICOMMON_CMD_GET_PARAM_V2:
+				pr_debug("%s: VSS_ICOMMON_CMD_GET_PARAM_V2\n",
+					 __func__);
+				/* Should only come here if there is an APR */
+				/* error or malformed APR packet. Otherwise */
+				/* response will be returned as */
+				/* VSS_ICOMMON_RSP_GET_PARAM */
+				if (ptr[1] != 0) {
+					pr_err("%s: CVP get param error = %d, resuming\n",
+						__func__, ptr[1]);
+					rtac_make_voice_callback(RTAC_CVP,
+						data->payload,
+						data->payload_size);
+				}
+				break;
+			case VSS_ISOUNDFOCUS_CMD_SET_SECTORS:
+				if (!ptr[1])
+					common.is_sound_focus_resp_success =
+									true;
+				else
+					common.is_sound_focus_resp_success =
+									false;
+				v->cvp_state = CMD_STATUS_SUCCESS;
+				v->async_err = ptr[1];
+				wake_up(&v->cvp_wait);
+				break;
+			case VSS_ISOUNDFOCUS_CMD_GET_SECTORS:
+				/*
+				 * Should only come here if there is an error
+				 * response received from ADSP. Otherwise
+				 * response will be returned as
+				 * VSS_ISOUNDFOCUS_RSP_GET_SECTORS
+				 */
+				pr_err("%s: VSS_ISOUNDFOCUS_CMD_GET_SECTORS failed\n",
+					__func__);
+
+				common.is_sound_focus_resp_success = false;
+				v->cvp_state = CMD_STATUS_SUCCESS;
+				v->async_err = ptr[1];
+				wake_up(&v->cvp_wait);
+				break;
+			case VSS_ISOURCETRACK_CMD_GET_ACTIVITY:
+				if (!ptr[1]) {
+					/* Read data from shared memory */
+					memcpy(&common.sourceTrackingResponse,
+					       common.source_tracking_sh_mem.
+							sh_mem_block.data,
+					       sizeof(struct
+					 vss_isourcetrack_activity_data_t));
+					common.is_source_tracking_resp_success =
+									true;
+				} else {
+					common.is_source_tracking_resp_success =
+									false;
+					pr_err("%s: Error received for source tracking params\n",
+						__func__);
+				}
+				v->cvp_state = CMD_STATUS_SUCCESS;
+				v->async_err = ptr[1];
+				wake_up(&v->cvp_wait);
+				break;
+			default:
+				pr_debug("%s: not match cmd = 0x%x\n",
+					  __func__, ptr[0]);
+				break;
+			}
+		}
+	} else if (data->opcode == VSS_ICOMMON_RSP_GET_PARAM) {
+		pr_debug("%s: VSS_ICOMMON_RSP_GET_PARAM\n", __func__);
+		ptr = data->payload;
+		if (ptr[0] != 0) {
+			pr_err("%s: VSS_ICOMMON_RSP_GET_PARAM returned error = 0x%x\n",
+			       __func__, ptr[0]);
+		}
+		rtac_make_voice_callback(RTAC_CVP, data->payload,
+			data->payload_size);
+	} else if (data->opcode == VSS_IVPCM_EVT_NOTIFY_V2) {
+		if ((data->payload != NULL) && data->payload_size ==
+		    sizeof(struct vss_ivpcm_evt_notify_v2_t) &&
+		    common.hostpcm_info.hostpcm_evt_cb != NULL) {
+			common.hostpcm_info.hostpcm_evt_cb(data->payload,
+					voc_get_session_name(v->session_id),
+					common.hostpcm_info.private_data);
+		}
+	} else if (data->opcode == VSS_ISOUNDFOCUS_RSP_GET_SECTORS) {
+		if (data->payload && (data->payload_size ==
+			sizeof(struct vss_isoundfocus_rsp_get_sectors_t))) {
+			common.is_sound_focus_resp_success = true;
+			memcpy(&common.soundFocusResponse,
+			       (struct vss_isoundfocus_rsp_get_sectors_t *)
+			       data->payload,
+			       sizeof(struct
+					 vss_isoundfocus_rsp_get_sectors_t));
+		} else {
+			common.is_sound_focus_resp_success = false;
+			pr_debug("%s: Invalid payload received from CVD\n",
+				 __func__);
+		}
+		v->cvp_state = CMD_STATUS_SUCCESS;
+		wake_up(&v->cvp_wait);
+	}
+	return 0;
+}
+
+static int voice_free_oob_shared_mem(void)
+{
+	int rc = 0;
+	int cnt = 0;
+	int bufcnt = NUM_OF_BUFFERS;
+	struct voice_data *v = voice_get_session(
+				common.voice[VOC_PATH_FULL].session_id);
+
+	mutex_lock(&common.common_lock);
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+
+		rc = -EINVAL;
+		goto done;
+	}
+
+	rc = msm_audio_ion_free(v->shmem_info.sh_buf.client,
+				v->shmem_info.sh_buf.handle);
+	v->shmem_info.sh_buf.client = NULL;
+	v->shmem_info.sh_buf.handle = NULL;
+	if (rc < 0) {
+		pr_err("%s: Error:%d freeing memory\n", __func__, rc);
+
+		goto done;
+	}
+
+
+	while (cnt < bufcnt) {
+		v->shmem_info.sh_buf.buf[cnt].data =  NULL;
+		v->shmem_info.sh_buf.buf[cnt].phys =  0;
+		cnt++;
+	}
+
+	v->shmem_info.sh_buf.client = NULL;
+	v->shmem_info.sh_buf.handle = NULL;
+
+done:
+	mutex_unlock(&common.common_lock);
+	return rc;
+}
+
+static int voice_alloc_oob_shared_mem(void)
+{
+	int cnt = 0;
+	int rc = 0;
+	size_t len;
+	void *mem_addr;
+	dma_addr_t phys;
+	int bufsz = BUFFER_BLOCK_SIZE;
+	int bufcnt = NUM_OF_BUFFERS;
+	struct voice_data *v = voice_get_session(
+				common.voice[VOC_PATH_FULL].session_id);
+
+	mutex_lock(&common.common_lock);
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+
+		rc = -EINVAL;
+		goto done;
+	}
+
+	rc = msm_audio_ion_alloc("voip_client", &(v->shmem_info.sh_buf.client),
+			&(v->shmem_info.sh_buf.handle),
+			bufsz*bufcnt,
+			&phys, &len,
+			&mem_addr);
+	if (rc < 0) {
+		pr_err("%s: audio ION alloc failed, rc = %d\n",
+			__func__, rc);
+
+		goto done;
+	}
+
+	while (cnt < bufcnt) {
+		v->shmem_info.sh_buf.buf[cnt].data =  mem_addr  + (cnt * bufsz);
+		v->shmem_info.sh_buf.buf[cnt].phys =  phys + (cnt * bufsz);
+		v->shmem_info.sh_buf.buf[cnt].size = bufsz;
+		cnt++;
+	}
+
+	pr_debug("%s buf[0].data:[%pK], buf[0].phys:[%pK], &buf[0].phys:[%pK],\n",
+		 __func__,
+		(void *)v->shmem_info.sh_buf.buf[0].data,
+		&v->shmem_info.sh_buf.buf[0].phys,
+		(void *)&v->shmem_info.sh_buf.buf[0].phys);
+	pr_debug("%s: buf[1].data:[%pK], buf[1].phys[%pK], &buf[1].phys[%pK]\n",
+		__func__,
+		(void *)v->shmem_info.sh_buf.buf[1].data,
+		&v->shmem_info.sh_buf.buf[1].phys,
+		(void *)&v->shmem_info.sh_buf.buf[1].phys);
+
+	memset((void *)v->shmem_info.sh_buf.buf[0].data, 0, (bufsz * bufcnt));
+
+done:
+	mutex_unlock(&common.common_lock);
+	return rc;
+}
+
+static int voice_alloc_oob_mem_table(void)
+{
+	int rc = 0;
+	size_t len;
+	struct voice_data *v = voice_get_session(
+				common.voice[VOC_PATH_FULL].session_id);
+
+	mutex_lock(&common.common_lock);
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+
+		rc = -EINVAL;
+		goto done;
+	}
+
+	rc = msm_audio_ion_alloc("voip_client", &(v->shmem_info.memtbl.client),
+				&(v->shmem_info.memtbl.handle),
+				sizeof(struct vss_imemory_table_t),
+				&v->shmem_info.memtbl.phys,
+				&len,
+				&(v->shmem_info.memtbl.data));
+	if (rc < 0) {
+		pr_err("%s: audio ION alloc failed, rc = %d\n",
+			__func__, rc);
+
+		goto done;
+	}
+
+	v->shmem_info.memtbl.size = sizeof(struct vss_imemory_table_t);
+	pr_debug("%s data[%pK]phys[%pK][%pK]\n", __func__,
+		 (void *)v->shmem_info.memtbl.data,
+		 &v->shmem_info.memtbl.phys,
+		 (void *)&v->shmem_info.memtbl.phys);
+
+done:
+	mutex_unlock(&common.common_lock);
+	return rc;
+}
+
+int voc_send_cvp_start_vocpcm(uint32_t session_id,
+			      struct vss_ivpcm_tap_point *vpcm_tp,
+			      uint32_t no_of_tp)
+{
+	struct cvp_start_cmd cvp_start_cmd;
+	int ret = 0;
+	void *apr_cvp;
+	u16 cvp_handle;
+	struct voice_data *v = voice_get_session(session_id);
+	int i = 0;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+	apr_cvp = common.apr_q6_cvp;
+
+	if (!apr_cvp) {
+		pr_err("%s: apr_cvp is NULL.\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	cvp_handle = voice_get_cvp_handle(v);
+
+	/* Fill the header */
+	cvp_start_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+		APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	cvp_start_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+		sizeof(struct vss_ivpcm_tap_point) * no_of_tp) +
+		sizeof(cvp_start_cmd.vpcm_start_cmd.num_tap_points) +
+		sizeof(cvp_start_cmd.vpcm_start_cmd.mem_handle);
+	cvp_start_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id);
+	cvp_start_cmd.hdr.dest_port = cvp_handle;
+	cvp_start_cmd.hdr.token = 0;
+	cvp_start_cmd.hdr.opcode = VSS_IVPCM_CMD_START_V2;
+
+	for (i = 0; i < no_of_tp; i++) {
+		cvp_start_cmd.vpcm_start_cmd.tap_points[i].tap_point =
+							vpcm_tp[i].tap_point;
+		cvp_start_cmd.vpcm_start_cmd.tap_points[i].direction =
+							vpcm_tp[i].direction;
+		cvp_start_cmd.vpcm_start_cmd.tap_points[i].sampling_rate =
+						    vpcm_tp[i].sampling_rate;
+		cvp_start_cmd.vpcm_start_cmd.tap_points[i].duration = 0;
+	}
+
+	cvp_start_cmd.vpcm_start_cmd.mem_handle =
+				common.voice_host_pcm_mem_handle;
+	cvp_start_cmd.vpcm_start_cmd.num_tap_points = no_of_tp;
+
+	v->cvp_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_start_cmd);
+	if (ret < 0) {
+		pr_err("%s: Fail: sending vocpcm map memory,\n", __func__);
+		goto done;
+	}
+	ret = wait_event_timeout(v->cvp_wait,
+			(v->cvp_state == CMD_STATUS_SUCCESS),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto done;
+	}
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto done;
+	}
+
+done:
+	return ret;
+}
+
+int voc_send_cvp_stop_vocpcm(uint32_t session_id)
+{
+	struct cvp_command vpcm_stop_cmd;
+	int ret = 0;
+	void *apr_cvp;
+	u16 cvp_handle;
+	struct voice_data *v = voice_get_session(session_id);
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+	apr_cvp = common.apr_q6_cvp;
+
+	if (!apr_cvp) {
+		pr_err("%s: apr_cvp is NULL.\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	cvp_handle = voice_get_cvp_handle(v);
+
+	/* fill in the header */
+	vpcm_stop_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	vpcm_stop_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+			 sizeof(vpcm_stop_cmd) - APR_HDR_SIZE);
+	vpcm_stop_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id);
+	vpcm_stop_cmd.hdr.dest_port = cvp_handle;
+	vpcm_stop_cmd.hdr.token = 0;
+	vpcm_stop_cmd.hdr.opcode = VSS_IVPCM_CMD_STOP;
+
+	v->cvp_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(apr_cvp, (uint32_t *) &vpcm_stop_cmd);
+	if (ret < 0) {
+		pr_err("Fail: sending vocpcm stop,\n");
+		goto done;
+	}
+	ret = wait_event_timeout(v->cvp_wait,
+			(v->cvp_state == CMD_STATUS_SUCCESS),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto done;
+	}
+
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto done;
+	}
+
+done:
+	return ret;
+}
+
+int voc_send_cvp_map_vocpcm_memory(uint32_t session_id,
+				   struct mem_map_table *tp_mem_table,
+				   phys_addr_t paddr, uint32_t bufsize)
+{
+	return  voice_map_memory_physical_cmd(voice_get_session(session_id),
+					      tp_mem_table,
+					      (dma_addr_t) paddr, bufsize,
+					      VOC_VOICE_HOST_PCM_MAP_TOKEN);
+}
+
+int voc_send_cvp_unmap_vocpcm_memory(uint32_t session_id)
+{
+	int ret = 0;
+
+	ret =  voice_send_mvm_unmap_memory_physical_cmd(
+				voice_get_session(session_id),
+				common.voice_host_pcm_mem_handle);
+
+	if (ret == 0)
+		common.voice_host_pcm_mem_handle = 0;
+
+	return ret;
+}
+
+int voc_send_cvp_vocpcm_push_buf_evt(uint32_t session_id,
+			struct vss_ivpcm_evt_push_buffer_v2_t *push_buff_evt)
+{
+	struct cvp_push_buf_cmd vpcm_push_buf_cmd;
+	int ret = 0;
+	void *apr_cvp;
+	u16 cvp_handle;
+	struct voice_data *v = voice_get_session(session_id);
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+	apr_cvp = common.apr_q6_cvp;
+
+	if (!apr_cvp) {
+		pr_err("%s: apr_cvp is NULL.\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	memset(&vpcm_push_buf_cmd, 0, sizeof(vpcm_push_buf_cmd));
+	cvp_handle = voice_get_cvp_handle(v);
+
+	/* fill in the header */
+	vpcm_push_buf_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	vpcm_push_buf_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(vpcm_push_buf_cmd) - APR_HDR_SIZE);
+	vpcm_push_buf_cmd.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+	vpcm_push_buf_cmd.hdr.dest_port = cvp_handle;
+	vpcm_push_buf_cmd.hdr.token = 0;
+	vpcm_push_buf_cmd.hdr.opcode = VSS_IVPCM_EVT_PUSH_BUFFER_V2;
+
+	vpcm_push_buf_cmd.vpcm_evt_push_buffer.tap_point =
+					push_buff_evt->tap_point;
+	vpcm_push_buf_cmd.vpcm_evt_push_buffer.push_buf_mask =
+					push_buff_evt->push_buf_mask;
+	vpcm_push_buf_cmd.vpcm_evt_push_buffer.out_buf_mem_address =
+					push_buff_evt->out_buf_mem_address;
+	vpcm_push_buf_cmd.vpcm_evt_push_buffer.in_buf_mem_address =
+					push_buff_evt->in_buf_mem_address;
+	vpcm_push_buf_cmd.vpcm_evt_push_buffer.out_buf_mem_size =
+					push_buff_evt->out_buf_mem_size;
+	vpcm_push_buf_cmd.vpcm_evt_push_buffer.in_buf_mem_size =
+					push_buff_evt->in_buf_mem_size;
+	vpcm_push_buf_cmd.vpcm_evt_push_buffer.sampling_rate =
+					push_buff_evt->sampling_rate;
+	vpcm_push_buf_cmd.vpcm_evt_push_buffer.num_in_channels =
+					push_buff_evt->num_in_channels;
+
+	ret = apr_send_pkt(apr_cvp, (uint32_t *) &vpcm_push_buf_cmd);
+	if (ret < 0) {
+		pr_err("Fail: sending vocpcm map memory,\n");
+		goto done;
+	}
+
+done:
+	return ret;
+}
+
+void voc_register_hpcm_evt_cb(hostpcm_cb_fn hostpcm_cb,
+			      void *private_data)
+{
+	common.hostpcm_info.hostpcm_evt_cb = hostpcm_cb;
+	common.hostpcm_info.private_data = private_data;
+}
+
+void voc_deregister_hpcm_evt_cb(void)
+{
+	common.hostpcm_info.hostpcm_evt_cb = NULL;
+	common.hostpcm_info.private_data = NULL;
+}
+
+int voc_get_cvd_version(char *cvd_version)
+{
+	int ret = 0;
+	struct voice_data *v = voice_get_session(VOICE_SESSION_VSID);
+
+
+	if (v == NULL) {
+		pr_err("%s: invalid session_id 0x%x\n",
+		       __func__, VOICE_SESSION_VSID);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (is_cvd_version_queried()) {
+		pr_debug("%s: Returning the cached value %s\n",
+			 __func__, common.cvd_version);
+
+		goto done;
+	}
+
+	/* Register callback to APR */
+	ret = voice_apr_register(VOICE_SESSION_VSID);
+	if (ret < 0) {
+		pr_err("%s: apr register failed\n", __func__);
+		goto done;
+	}
+
+	mutex_lock(&common.common_lock);
+	mutex_lock(&v->lock);
+	ret = voice_send_mvm_cvd_version_cmd(v);
+	if (ret < 0) {
+		pr_err("%s: voice_send_mvm_cvd_version_cmd failed\n", __func__);
+		goto unlock;
+	}
+	ret = 0;
+
+unlock:
+	mutex_unlock(&v->lock);
+	mutex_unlock(&common.common_lock);
+
+done:
+	if (cvd_version)
+		memcpy(cvd_version, common.cvd_version,
+		       CVD_VERSION_STRING_MAX_SIZE);
+
+	return ret;
+}
+
+static int voice_alloc_cal_mem_map_table(void)
+{
+	int ret = 0;
+	size_t len;
+
+	ret = msm_audio_ion_alloc("voc_cal",
+				&(common.cal_mem_map_table.client),
+				&(common.cal_mem_map_table.handle),
+				sizeof(struct vss_imemory_table_t),
+				&common.cal_mem_map_table.phys,
+				&len,
+				&(common.cal_mem_map_table.data));
+	if ((ret < 0) && (ret != -EPROBE_DEFER)) {
+		pr_err("%s: audio ION alloc failed, rc = %d\n",
+			__func__, ret);
+		goto done;
+	}
+
+	common.cal_mem_map_table.size = sizeof(struct vss_imemory_table_t);
+	pr_debug("%s: data %pK phys %pK\n", __func__,
+		 common.cal_mem_map_table.data,
+		 &common.cal_mem_map_table.phys);
+
+done:
+	return ret;
+}
+
+static int voice_alloc_rtac_mem_map_table(void)
+{
+	int ret = 0;
+	size_t len;
+
+	ret = msm_audio_ion_alloc("voc_rtac_cal",
+			&(common.rtac_mem_map_table.client),
+			&(common.rtac_mem_map_table.handle),
+			sizeof(struct vss_imemory_table_t),
+			&common.rtac_mem_map_table.phys,
+			&len,
+			&(common.rtac_mem_map_table.data));
+	if (ret < 0) {
+		pr_err("%s: audio ION alloc failed, rc = %d\n",
+			__func__, ret);
+		goto done;
+	}
+
+	common.rtac_mem_map_table.size = sizeof(struct vss_imemory_table_t);
+	pr_debug("%s: data %pK phys %pK\n", __func__,
+		 common.rtac_mem_map_table.data,
+		 &common.rtac_mem_map_table.phys);
+
+done:
+	return ret;
+}
+
+static int voice_alloc_and_map_oob_mem(struct voice_data *v)
+{
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+
+		return -EINVAL;
+	}
+
+	if (!is_voip_memory_allocated()) {
+		ret = voc_alloc_voip_shared_memory();
+		if (ret < 0) {
+			pr_err("%s: Failed to create voip oob memory %d\n",
+				   __func__, ret);
+
+			goto done;
+		}
+	}
+
+	ret = voice_map_memory_physical_cmd(v,
+			&v->shmem_info.memtbl,
+			v->shmem_info.sh_buf.buf[0].phys,
+			v->shmem_info.sh_buf.buf[0].size * NUM_OF_BUFFERS,
+			VOIP_MEM_MAP_TOKEN);
+	if (ret) {
+		pr_err("%s: mvm_map_memory_phy failed %d\n",
+			   __func__, ret);
+
+		goto done;
+	}
+
+done:
+	return ret;
+}
+
+uint32_t voice_get_topology(uint32_t topology_idx)
+{
+	uint32_t topology = VSS_IVOCPROC_TOPOLOGY_ID_RX_DEFAULT;
+	struct cal_block_data *cal_block = NULL;
+
+	/* initialize as defualt topology */
+	if (topology_idx == CVP_VOC_RX_TOPOLOGY_CAL) {
+		topology = VSS_IVOCPROC_TOPOLOGY_ID_RX_DEFAULT;
+	} else if (topology_idx == CVP_VOC_TX_TOPOLOGY_CAL) {
+		topology = VSS_IVOCPROC_TOPOLOGY_ID_TX_SM_ECNS;
+	} else {
+		pr_err("%s: cal index %x is invalid!\n",
+			__func__, topology_idx);
+
+		goto done;
+	}
+
+	if (common.cal_data[topology_idx] == NULL) {
+		pr_err("%s: cal type is NULL for cal index %x\n",
+			__func__, topology_idx);
+
+		goto done;
+	}
+
+	mutex_lock(&common.cal_data[topology_idx]->lock);
+	cal_block = cal_utils_get_only_cal_block(
+		common.cal_data[topology_idx]);
+	if (cal_block == NULL) {
+		pr_debug("%s: cal_block not found for cal index %x\n",
+			__func__, topology_idx);
+
+		goto unlock;
+	}
+
+	topology = ((struct audio_cal_info_voc_top *)
+		cal_block->cal_info)->topology;
+unlock:
+	mutex_unlock(&common.cal_data[topology_idx]->lock);
+done:
+	pr_debug("%s: Using topology %d\n", __func__, topology);
+
+	return topology;
+}
+
+static int get_cal_type_index(int32_t cal_type)
+{
+	int ret = -EINVAL;
+
+	switch (cal_type) {
+	case CVP_VOC_RX_TOPOLOGY_CAL_TYPE:
+		ret = CVP_VOC_RX_TOPOLOGY_CAL;
+		break;
+	case CVP_VOC_TX_TOPOLOGY_CAL_TYPE:
+		ret = CVP_VOC_TX_TOPOLOGY_CAL;
+		break;
+	case CVP_VOCPROC_STATIC_CAL_TYPE:
+		ret = CVP_VOCPROC_CAL;
+		break;
+	case CVP_VOCPROC_DYNAMIC_CAL_TYPE:
+		ret = CVP_VOCVOL_CAL;
+		break;
+	case CVS_VOCSTRM_STATIC_CAL_TYPE:
+		ret = CVS_VOCSTRM_CAL;
+		break;
+	case CVP_VOCDEV_CFG_CAL_TYPE:
+		ret = CVP_VOCDEV_CFG_CAL;
+		break;
+	case CVP_VOCPROC_STATIC_COL_CAL_TYPE:
+		ret = CVP_VOCPROC_COL_CAL;
+		break;
+	case CVP_VOCPROC_DYNAMIC_COL_CAL_TYPE:
+		ret = CVP_VOCVOL_COL_CAL;
+		break;
+	case CVS_VOCSTRM_STATIC_COL_CAL_TYPE:
+		ret = CVS_VOCSTRM_COL_CAL;
+		break;
+	case VOICE_RTAC_INFO_CAL_TYPE:
+		ret = VOICE_RTAC_INFO_CAL;
+		break;
+	case VOICE_RTAC_APR_CAL_TYPE:
+		ret = VOICE_RTAC_APR_CAL;
+		break;
+	default:
+		pr_err("%s: Invalid cal type %d!\n", __func__, cal_type);
+	}
+	return ret;
+}
+
+static int voice_prepare_volume_boost(int32_t cal_type,
+					size_t data_size, void *data)
+{
+	return voc_deregister_vocproc_vol_table();
+}
+
+static int voice_enable_volume_boost(int32_t cal_type,
+				size_t data_size, void *data)
+{
+	return voc_register_vocproc_vol_table();
+}
+
+static int voice_alloc_cal(int32_t cal_type,
+			   size_t data_size, void *data)
+{
+	int ret = 0;
+	int cal_index;
+	int cal_version;
+
+	pr_debug("%s\n", __func__);
+
+	cal_version = cal_utils_get_cal_type_version(data);
+	common.is_per_vocoder_cal_enabled =
+			!!(cal_version & PER_VOCODER_CAL_BIT_MASK);
+
+	cal_index = get_cal_type_index(cal_type);
+	if (cal_index < 0) {
+		pr_err("%s: Could not get cal index %d!\n",
+			__func__, cal_index);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = cal_utils_alloc_cal(data_size, data,
+		common.cal_data[cal_index], 0, NULL);
+	if (ret < 0) {
+		pr_err("%s: Cal_utils_alloc_block failed, ret = %d, cal type = %d!\n",
+			__func__, ret, cal_type);
+		ret = -EINVAL;
+		goto done;
+	}
+done:
+	return ret;
+}
+
+static int voice_dealloc_cal(int32_t cal_type,
+			     size_t data_size, void *data)
+{
+	int ret = 0;
+	int cal_index;
+	pr_debug("%s\n", __func__);
+
+	cal_index = get_cal_type_index(cal_type);
+	if (cal_index < 0) {
+		pr_err("%s: Could not get cal index %d!\n",
+			__func__, cal_index);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = cal_utils_dealloc_cal(data_size, data,
+		common.cal_data[cal_index]);
+	if (ret < 0) {
+		pr_err("%s: Cal_utils_dealloc_block failed, ret = %d, cal type = %d!\n",
+			__func__, ret, cal_type);
+
+		ret = -EINVAL;
+		goto done;
+	}
+done:
+	return ret;
+}
+
+static int voice_set_cal(int32_t cal_type,
+			 size_t data_size, void *data)
+{
+	int ret = 0;
+	int cal_index;
+	pr_debug("%s\n", __func__);
+
+	cal_index = get_cal_type_index(cal_type);
+	if (cal_index < 0) {
+		pr_err("%s: Could not get cal index %d!\n",
+			__func__, cal_index);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = cal_utils_set_cal(data_size, data,
+		common.cal_data[cal_index], 0, NULL);
+	if (ret < 0) {
+		pr_err("%s: Cal_utils_set_cal failed, ret = %d, cal type = %d!\n",
+			__func__, ret, cal_type);
+
+		ret = -EINVAL;
+		goto done;
+	}
+done:
+	return ret;
+}
+
+static void voice_delete_cal_data(void)
+{
+	pr_debug("%s\n", __func__);
+
+	cal_utils_destroy_cal_types(MAX_VOICE_CAL_TYPES, common.cal_data);
+
+	return;
+}
+
+static int voice_init_cal_data(void)
+{
+	int ret = 0;
+	struct cal_type_info cal_type_info[] = {
+		{{CVP_VOC_RX_TOPOLOGY_CAL_TYPE,
+		{NULL, NULL, NULL, voice_set_cal, NULL, NULL} },
+		{NULL, NULL, cal_utils_match_buf_num} },
+
+		{{CVP_VOC_TX_TOPOLOGY_CAL_TYPE,
+		{NULL, NULL, NULL, voice_set_cal, NULL, NULL} },
+		{NULL, NULL, cal_utils_match_buf_num} },
+
+		{{CVP_VOCPROC_STATIC_CAL_TYPE,
+		{voice_alloc_cal, voice_dealloc_cal, NULL,
+		voice_set_cal, NULL, NULL} },
+		{NULL, voice_unmap_cal_memory,
+		cal_utils_match_buf_num} },
+
+		{{CVP_VOCPROC_DYNAMIC_CAL_TYPE,
+		{voice_alloc_cal, voice_dealloc_cal,
+		voice_prepare_volume_boost,
+		voice_set_cal, NULL,
+		voice_enable_volume_boost} },
+		{NULL, voice_unmap_cal_memory,
+		cal_utils_match_buf_num} },
+
+		{{CVP_VOCDEV_CFG_CAL_TYPE,
+		{voice_alloc_cal, voice_dealloc_cal, NULL,
+		voice_set_cal, NULL, NULL} },
+		{NULL, voice_unmap_cal_memory,
+		cal_utils_match_buf_num} },
+
+		{{CVP_VOCPROC_STATIC_COL_CAL_TYPE,
+		{NULL, NULL, NULL, voice_set_cal, NULL, NULL} },
+		{NULL, NULL, cal_utils_match_buf_num} },
+
+		{{CVP_VOCPROC_DYNAMIC_COL_CAL_TYPE,
+		{NULL, NULL, NULL, voice_set_cal, NULL, NULL} },
+		{NULL, NULL, cal_utils_match_buf_num} },
+
+		{{CVS_VOCSTRM_STATIC_CAL_TYPE,
+		{voice_alloc_cal, voice_dealloc_cal, NULL,
+		voice_set_cal, NULL, NULL} },
+		{NULL, voice_unmap_cal_memory,
+		cal_utils_match_buf_num} },
+
+		{{CVS_VOCSTRM_STATIC_COL_CAL_TYPE,
+		{NULL, NULL, NULL, voice_set_cal, NULL, NULL} },
+		{NULL, NULL, cal_utils_match_buf_num} },
+
+		{{VOICE_RTAC_INFO_CAL_TYPE,
+		{NULL, NULL, NULL, NULL, NULL, NULL} },
+		{NULL, NULL, cal_utils_match_buf_num} },
+
+		{{VOICE_RTAC_APR_CAL_TYPE,
+		{NULL, NULL, NULL, NULL, NULL, NULL} },
+		{NULL, NULL, cal_utils_match_buf_num} },
+	};
+
+	ret = cal_utils_create_cal_types(MAX_VOICE_CAL_TYPES, common.cal_data,
+		cal_type_info);
+	if (ret < 0) {
+		pr_err("%s: Could not create cal type!\n",
+			__func__);
+
+		ret = -EINVAL;
+		goto err;
+	}
+
+	return ret;
+err:
+	voice_delete_cal_data();
+	memset(&common, 0, sizeof(struct common_data));
+	return ret;
+}
+
+static int voice_send_set_sound_focus_cmd(struct voice_data *v,
+				 struct sound_focus_param soundFocusData)
+{
+	struct cvp_set_sound_focus_param_cmd_t cvp_set_sound_focus_param_cmd;
+	int ret = 0;
+	void *apr_cvp;
+	u16 cvp_handle;
+	int i;
+
+	pr_debug("%s: Enter\n", __func__);
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+	apr_cvp = common.apr_q6_cvp;
+
+	if (!apr_cvp) {
+		pr_err("%s: apr_cvp is NULL.\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+	cvp_handle = voice_get_cvp_handle(v);
+
+	/* send Sound Focus Params to cvp */
+	cvp_set_sound_focus_param_cmd.hdr.hdr_field =
+				APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+					      APR_HDR_LEN(APR_HDR_SIZE),
+					      APR_PKT_VER);
+	cvp_set_sound_focus_param_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+			sizeof(cvp_set_sound_focus_param_cmd) - APR_HDR_SIZE);
+	cvp_set_sound_focus_param_cmd.hdr.src_port =
+				voice_get_idx_for_session(v->session_id);
+	cvp_set_sound_focus_param_cmd.hdr.dest_port = cvp_handle;
+	cvp_set_sound_focus_param_cmd.hdr.token = 0;
+	cvp_set_sound_focus_param_cmd.hdr.opcode =
+					 VSS_ISOUNDFOCUS_CMD_SET_SECTORS;
+
+	memset(&(cvp_set_sound_focus_param_cmd.cvp_set_sound_focus_param), 0xFF,
+		sizeof(struct vss_isoundfocus_cmd_set_sectors_t));
+	for (i = 0; i < MAX_SECTORS; i++) {
+		cvp_set_sound_focus_param_cmd.cvp_set_sound_focus_param.
+			start_angles[i] = soundFocusData.start_angle[i];
+		cvp_set_sound_focus_param_cmd.cvp_set_sound_focus_param.
+			enables[i] = soundFocusData.enable[i];
+		pr_debug("%s: start_angle[%d] = %d\n",
+			  __func__, i, soundFocusData.start_angle[i]);
+		pr_debug("%s: enable[%d] = %d\n",
+			  __func__, i, soundFocusData.enable[i]);
+	}
+	cvp_set_sound_focus_param_cmd.cvp_set_sound_focus_param.gain_step =
+					soundFocusData.gain_step;
+	pr_debug("%s: gain_step = %d\n", __func__, soundFocusData.gain_step);
+
+	v->cvp_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+
+	ret = apr_send_pkt(apr_cvp, (uint32_t *)&cvp_set_sound_focus_param_cmd);
+	if (ret < 0) {
+		pr_err("%s: Error in sending APR command\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+	ret = wait_event_timeout(v->cvp_wait,
+				 (v->cvp_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto done;
+	}
+
+	if (common.is_sound_focus_resp_success) {
+		ret = 0;
+	} else {
+		pr_err("%s: Error in setting sound focus params\n", __func__);
+
+		ret = -EINVAL;
+	}
+
+done:
+	pr_debug("%s: Exit, ret=%d\n", __func__, ret);
+
+	return ret;
+}
+
+int voc_set_sound_focus(struct sound_focus_param soundFocusData)
+{
+	struct voice_data *v = NULL;
+	int ret = -EINVAL;
+	struct voice_session_itr itr;
+
+	pr_debug("%s: Enter\n", __func__);
+
+	mutex_lock(&common.common_lock);
+	voice_itr_init(&itr, ALL_SESSION_VSID);
+	while (voice_itr_get_next_session(&itr, &v)) {
+		if (v != NULL) {
+			mutex_lock(&v->lock);
+			if (is_voc_state_active(v->voc_state) &&
+				(v->lch_mode != VOICE_LCH_START) &&
+				!v->disable_topology)
+				ret = voice_send_set_sound_focus_cmd(v,
+							soundFocusData);
+			mutex_unlock(&v->lock);
+		} else {
+			pr_err("%s: invalid session\n", __func__);
+
+			ret = -EINVAL;
+			break;
+		}
+	}
+	mutex_unlock(&common.common_lock);
+	pr_debug("%s: Exit, ret=%d\n", __func__, ret);
+
+	return ret;
+}
+
+static int voice_send_get_sound_focus_cmd(struct voice_data *v,
+				struct sound_focus_param *soundFocusData)
+{
+	struct apr_hdr cvp_get_sound_focus_param_cmd;
+	int ret = 0;
+	void *apr_cvp;
+	u16 cvp_handle;
+	int i;
+
+	pr_debug("%s: Enter\n", __func__);
+
+	if (!v) {
+		pr_err("%s: v is NULL\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+	apr_cvp = common.apr_q6_cvp;
+
+	if (!apr_cvp) {
+		pr_err("%s: apr_cvp is NULL\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	cvp_handle = voice_get_cvp_handle(v);
+
+	/* send APR command to retrive Sound Focus Params */
+	cvp_get_sound_focus_param_cmd.hdr_field =
+				APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+					      APR_HDR_LEN(APR_HDR_SIZE),
+					      APR_PKT_VER);
+	cvp_get_sound_focus_param_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+			sizeof(cvp_get_sound_focus_param_cmd) - APR_HDR_SIZE);
+	cvp_get_sound_focus_param_cmd.src_port =
+				voice_get_idx_for_session(v->session_id);
+	cvp_get_sound_focus_param_cmd.dest_port = cvp_handle;
+	cvp_get_sound_focus_param_cmd.token = 0;
+	cvp_get_sound_focus_param_cmd.opcode = VSS_ISOUNDFOCUS_CMD_GET_SECTORS;
+
+	v->cvp_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(apr_cvp, (uint32_t *)&cvp_get_sound_focus_param_cmd);
+	if (ret < 0) {
+		pr_err("%s: Error in sending APR command\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+	ret = wait_event_timeout(v->cvp_wait,
+				 (v->cvp_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto done;
+	}
+
+	if (common.is_sound_focus_resp_success) {
+		for (i = 0; i < MAX_SECTORS; i++) {
+			soundFocusData->start_angle[i] =
+				common.soundFocusResponse.start_angles[i];
+			soundFocusData->enable[i] =
+				common.soundFocusResponse.enables[i];
+			pr_debug("%s: start_angle[%d] = %d\n",
+				  __func__, i, soundFocusData->start_angle[i]);
+			pr_debug("%s: enable[%d] = %d\n",
+				  __func__, i, soundFocusData->enable[i]);
+		}
+		soundFocusData->gain_step = common.soundFocusResponse.gain_step;
+		pr_debug("%s: gain_step = %d\n", __func__,
+			  soundFocusData->gain_step);
+
+		common.is_sound_focus_resp_success = false;
+		ret = 0;
+	} else {
+		pr_err("%s: Invalid payload received from CVD\n", __func__);
+
+		ret = -EINVAL;
+	}
+done:
+	pr_debug("%s: Exit, ret=%d\n", __func__, ret);
+
+	return ret;
+}
+
+int voc_get_sound_focus(struct sound_focus_param *soundFocusData)
+{
+	struct voice_data *v = NULL;
+	int ret = -EINVAL;
+	struct voice_session_itr itr;
+
+	pr_debug("%s: Enter\n", __func__);
+
+	mutex_lock(&common.common_lock);
+	voice_itr_init(&itr, ALL_SESSION_VSID);
+	while (voice_itr_get_next_session(&itr, &v)) {
+		if (v) {
+			mutex_lock(&v->lock);
+			if (is_voc_state_active(v->voc_state) &&
+				(v->lch_mode != VOICE_LCH_START) &&
+				!v->disable_topology)
+				ret = voice_send_get_sound_focus_cmd(v,
+							soundFocusData);
+			mutex_unlock(&v->lock);
+		} else {
+			pr_err("%s: invalid session\n", __func__);
+
+			ret =  -EINVAL;
+			break;
+		}
+	}
+	mutex_unlock(&common.common_lock);
+	pr_debug("%s: Exit, ret=%d\n", __func__, ret);
+
+	return ret;
+}
+
+static int is_source_tracking_shared_memomry_allocated(void)
+{
+	bool ret;
+
+	pr_debug("%s: Enter\n", __func__);
+
+	if (common.source_tracking_sh_mem.sh_mem_block.client != NULL &&
+	    common.source_tracking_sh_mem.sh_mem_block.handle != NULL)
+		ret = true;
+	else
+		ret = false;
+
+	pr_debug("%s: Exit\n", __func__);
+
+	return ret;
+}
+
+static int voice_alloc_source_tracking_shared_memory(void)
+{
+	int ret = 0;
+
+	pr_debug("%s: Enter\n", __func__);
+
+	ret = msm_audio_ion_alloc("source_tracking_sh_mem_block",
+		&(common.source_tracking_sh_mem.sh_mem_block.client),
+		&(common.source_tracking_sh_mem.sh_mem_block.handle),
+		BUFFER_BLOCK_SIZE,
+		&(common.source_tracking_sh_mem.sh_mem_block.phys),
+		(size_t *)&(common.source_tracking_sh_mem.sh_mem_block.size),
+		&(common.source_tracking_sh_mem.sh_mem_block.data));
+	if (ret < 0) {
+		pr_err("%s: audio ION alloc failed for sh_mem block, ret = %d\n",
+			__func__, ret);
+
+		ret = -EINVAL;
+		goto done;
+	}
+	memset((void *)(common.source_tracking_sh_mem.sh_mem_block.data), 0,
+		   common.source_tracking_sh_mem.sh_mem_block.size);
+
+	pr_debug("%s: sh_mem_block: phys:[%pK], data:[0x%pK], size:[%zd]\n",
+		 __func__,
+		&(common.source_tracking_sh_mem.sh_mem_block.phys),
+		(void *)(common.source_tracking_sh_mem.sh_mem_block.data),
+		(size_t)(common.source_tracking_sh_mem.sh_mem_block.size));
+
+	ret = msm_audio_ion_alloc("source_tracking_sh_mem_table",
+		&(common.source_tracking_sh_mem.sh_mem_table.client),
+		&(common.source_tracking_sh_mem.sh_mem_table.handle),
+		sizeof(struct vss_imemory_table_t),
+		&(common.source_tracking_sh_mem.sh_mem_table.phys),
+		(size_t *)&(common.source_tracking_sh_mem.sh_mem_table.size),
+		&(common.source_tracking_sh_mem.sh_mem_table.data));
+	if (ret < 0) {
+		pr_err("%s: audio ION alloc failed for sh_mem table, ret = %d\n",
+			__func__, ret);
+
+		ret = msm_audio_ion_free(
+			common.source_tracking_sh_mem.sh_mem_block.client,
+			common.source_tracking_sh_mem.sh_mem_block.handle);
+		common.source_tracking_sh_mem.sh_mem_block.client = NULL;
+		common.source_tracking_sh_mem.sh_mem_block.handle = NULL;
+		if (ret < 0)
+			pr_err("%s: Error:%d freeing memory\n", __func__, ret);
+
+		ret = -EINVAL;
+		goto done;
+	}
+	memset((void *)(common.source_tracking_sh_mem.sh_mem_table.data), 0,
+		common.source_tracking_sh_mem.sh_mem_table.size);
+
+	pr_debug("%s sh_mem_table: phys:[%pK], data:[0x%pK], size:[%zd],\n",
+		 __func__,
+		&(common.source_tracking_sh_mem.sh_mem_table.phys),
+		(void *)(common.source_tracking_sh_mem.sh_mem_table.data),
+		(size_t)(common.source_tracking_sh_mem.sh_mem_table.size));
+
+done:
+	pr_debug("%s: Exit, ret=%d\n", __func__, ret);
+
+	return ret;
+}
+
+static int voice_alloc_and_map_source_tracking_shared_memory(
+						struct voice_data *v)
+{
+	int ret = 0;
+
+	pr_debug("%s: Enter\n", __func__);
+
+	ret = voice_alloc_source_tracking_shared_memory();
+	if (ret) {
+		pr_err("%s: Failed to allocate shared memory %d\n",
+			__func__, ret);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = voice_map_memory_physical_cmd(v,
+			&(common.source_tracking_sh_mem.sh_mem_table),
+			common.source_tracking_sh_mem.sh_mem_block.phys,
+			common.source_tracking_sh_mem.sh_mem_block.size,
+			VOC_SOURCE_TRACKING_MEM_MAP_TOKEN);
+	if (ret) {
+		pr_err("%s: memory mapping failed %d\n",
+			__func__, ret);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+done:
+	pr_debug("%s: Exit, ret=%d\n", __func__, ret);
+
+	return ret;
+}
+
+static int voice_unmap_and_free_source_tracking_shared_memory(
+							struct voice_data *v)
+{
+	int ret = 0;
+
+	pr_debug("%s: Enter\n", __func__);
+
+	if (common.source_tracking_sh_mem.mem_handle != 0) {
+		ret = voice_send_mvm_unmap_memory_physical_cmd(v,
+				common.source_tracking_sh_mem.mem_handle);
+		if (ret < 0) {
+			pr_err("%s: Memory_unmap failed err %d\n",
+				 __func__, ret);
+
+			ret = -EINVAL;
+			goto done;
+		}
+	}
+
+	if ((common.source_tracking_sh_mem.sh_mem_block.client == NULL) ||
+	    (common.source_tracking_sh_mem.sh_mem_block.handle == NULL))
+		goto done;
+
+	ret = msm_audio_ion_free(
+			common.source_tracking_sh_mem.sh_mem_block.client,
+			common.source_tracking_sh_mem.sh_mem_block.handle);
+	if (ret < 0) {
+		pr_err("%s: Error:%d freeing memory\n", __func__, ret);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+done:
+	common.source_tracking_sh_mem.mem_handle = 0;
+	common.source_tracking_sh_mem.sh_mem_block.client = NULL;
+	common.source_tracking_sh_mem.sh_mem_block.handle = NULL;
+	pr_debug("%s: Exit, ret=%d\n", __func__, ret);
+
+	return ret;
+}
+
+static int voice_send_get_source_tracking_cmd(struct voice_data *v,
+			struct source_tracking_param *sourceTrackingData)
+{
+	struct cvp_get_source_tracking_param_cmd_t st_cmd;
+	int ret = 0;
+	void *apr_cvp;
+	u16 cvp_handle;
+	int i;
+
+	pr_debug("%s: Enter\n", __func__);
+
+	if (!v) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvp = common.apr_q6_cvp;
+
+	if (!apr_cvp) {
+		pr_err("%s: apr_cvp is NULL.\n", __func__);
+		return -EINVAL;
+	}
+
+	cvp_handle = voice_get_cvp_handle(v);
+
+	if (!is_source_tracking_shared_memomry_allocated()) {
+		ret = voice_alloc_and_map_source_tracking_shared_memory(v);
+		if (ret) {
+			pr_err("%s: Fail in allocating/mapping shared memory\n",
+				__func__);
+
+			ret = -EINVAL;
+			goto done;
+		}
+	}
+	st_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+					     APR_HDR_LEN(APR_HDR_SIZE),
+					     APR_PKT_VER);
+	st_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+					   sizeof(st_cmd) - APR_HDR_SIZE);
+	st_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id);
+	st_cmd.hdr.dest_port = cvp_handle;
+	st_cmd.hdr.token = 0;
+	st_cmd.hdr.opcode = VSS_ISOURCETRACK_CMD_GET_ACTIVITY;
+
+	st_cmd.cvp_get_source_tracking_param.mem_handle	=
+				 common.source_tracking_sh_mem.mem_handle;
+	st_cmd.cvp_get_source_tracking_param.mem_address_lsw =
+		lower_32_bits(common.source_tracking_sh_mem.sh_mem_block.phys);
+	st_cmd.cvp_get_source_tracking_param.mem_address_msw =
+		msm_audio_populate_upper_32_bits(common.source_tracking_sh_mem.
+					sh_mem_block.phys);
+	st_cmd.cvp_get_source_tracking_param.mem_size =
+		(uint32_t)common.source_tracking_sh_mem.sh_mem_block.size;
+	pr_debug("%s: mem_handle=0x%x, mem_address_lsw=0x%x, msw=0x%x, mem_size=%d\n",
+		 __func__,
+		 st_cmd.cvp_get_source_tracking_param.mem_handle,
+		 st_cmd.cvp_get_source_tracking_param.mem_address_lsw,
+		 st_cmd.cvp_get_source_tracking_param.mem_address_msw,
+		 (uint32_t)st_cmd.cvp_get_source_tracking_param.mem_size);
+
+	v->cvp_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(apr_cvp,
+			   (uint32_t *) &st_cmd);
+	if (ret < 0) {
+		pr_err("%s: Error in sending APR command\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+	ret = wait_event_timeout(v->cvp_wait,
+				 (v->cvp_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				v->async_err));
+		ret = adsp_err_get_lnx_err_code(
+				v->async_err);
+		goto done;
+	}
+
+	if (common.is_source_tracking_resp_success) {
+		for (i = 0; i < MAX_SECTORS; i++) {
+			sourceTrackingData->vad[i] =
+				common.sourceTrackingResponse.voice_active[i];
+			pr_debug("%s: vad[%d] = %d\n",
+				  __func__, i, sourceTrackingData->vad[i]);
+		}
+		sourceTrackingData->doa_speech =
+				common.sourceTrackingResponse.talker_doa;
+		pr_debug("%s: doa_speech = %d\n",
+			  __func__, sourceTrackingData->doa_speech);
+
+		for (i = 0; i < MAX_NOISE_SOURCE_INDICATORS; i++) {
+			sourceTrackingData->doa_noise[i] =
+			 common.sourceTrackingResponse.interferer_doa[i];
+			pr_debug("%s: doa_noise[%d] = %d\n",
+			 __func__, i, sourceTrackingData->doa_noise[i]);
+		}
+		for (i = 0; i < MAX_POLAR_ACTIVITY_INDICATORS; i++) {
+			sourceTrackingData->polar_activity[i] =
+			 common.sourceTrackingResponse.sound_strength[i];
+			pr_debug("%s: polar_activity[%d] = %d\n",
+			 __func__, i, sourceTrackingData->polar_activity[i]);
+		}
+		common.is_source_tracking_resp_success = false;
+		ret = 0;
+	} else {
+		pr_err("%s: Error response received from CVD\n", __func__);
+
+		ret = -EINVAL;
+	}
+done:
+	pr_debug("%s: Exit, ret=%d\n", __func__, ret);
+
+	return ret;
+}
+
+int voc_get_source_tracking(struct source_tracking_param *sourceTrackingData)
+{
+	struct voice_data *v = NULL;
+	int ret = -EINVAL;
+	struct voice_session_itr itr;
+
+	pr_debug("%s: Enter\n", __func__);
+
+	mutex_lock(&common.common_lock);
+
+	voice_itr_init(&itr, ALL_SESSION_VSID);
+	while (voice_itr_get_next_session(&itr, &v)) {
+		if (v != NULL) {
+			mutex_lock(&v->lock);
+			if (is_voc_state_active(v->voc_state) &&
+				(v->lch_mode != VOICE_LCH_START) &&
+				!v->disable_topology)
+				ret = voice_send_get_source_tracking_cmd(v,
+							sourceTrackingData);
+			mutex_unlock(&v->lock);
+		} else {
+			pr_err("%s: invalid session\n", __func__);
+
+			break;
+		}
+	}
+
+	mutex_unlock(&common.common_lock);
+	pr_debug("%s: Exit, ret=%d\n", __func__, ret);
+
+	return ret;
+}
+
+int is_voc_initialized(void)
+{
+	return module_initialized;
+}
+
+static int __init voice_init(void)
+{
+	int rc = 0, i = 0;
+
+	memset(&common, 0, sizeof(struct common_data));
+
+	/* set default value */
+	common.default_mute_val = 0;  /* default is un-mute */
+	common.default_sample_val = 8000;
+	common.default_vol_step_val = 0;
+	common.default_vol_ramp_duration_ms = DEFAULT_VOLUME_RAMP_DURATION;
+	common.default_mute_ramp_duration_ms = DEFAULT_MUTE_RAMP_DURATION;
+
+	/* Initialize EC Ref media format info */
+	common.ec_ref_ext = false;
+	common.ec_media_fmt_info.port_id = AFE_PORT_INVALID;
+	common.ec_media_fmt_info.num_channels = 0;
+	common.ec_media_fmt_info.bits_per_sample = 16;
+	common.ec_media_fmt_info.sample_rate = 8000;
+	memset(&common.ec_media_fmt_info.channel_mapping, 0,
+	       VSS_CHANNEL_MAPPING_SIZE);
+
+	/* Initialize AFE Sidetone Enable */
+	common.sidetone_enable = false;
+
+	/* Initialize MVS info. */
+	common.mvs_info.network_type = VSS_NETWORK_ID_DEFAULT;
+
+	/* Initialize is low memory flag */
+	common.is_destroy_cvd = false;
+
+	/* Initialize CVD version */
+	strlcpy(common.cvd_version, CVD_VERSION_DEFAULT,
+		sizeof(common.cvd_version));
+	/* Initialize Per-Vocoder Calibration flag */
+	common.is_per_vocoder_cal_enabled = false;
+
+	mutex_init(&common.common_lock);
+
+	/* Initialize session id with vsid */
+	init_session_id();
+
+	for (i = 0; i < MAX_VOC_SESSIONS; i++) {
+
+		/* initialize dev_rx and dev_tx */
+		common.voice[i].dev_rx.dev_mute =  common.default_mute_val;
+		common.voice[i].dev_tx.dev_mute =  common.default_mute_val;
+		common.voice[i].dev_rx.volume_step_value =
+					common.default_vol_step_val;
+		common.voice[i].dev_rx.volume_ramp_duration_ms =
+					common.default_vol_ramp_duration_ms;
+		common.voice[i].dev_rx.dev_mute_ramp_duration_ms =
+					common.default_mute_ramp_duration_ms;
+		common.voice[i].dev_tx.dev_mute_ramp_duration_ms =
+					common.default_mute_ramp_duration_ms;
+		common.voice[i].stream_rx.stream_mute = common.default_mute_val;
+		common.voice[i].stream_tx.stream_mute = common.default_mute_val;
+
+		common.voice[i].dev_tx.port_id = 0x100B;
+		common.voice[i].dev_rx.port_id = 0x100A;
+		common.voice[i].dev_tx.dev_id = 0;
+		common.voice[i].dev_rx.dev_id = 0;
+		common.voice[i].dev_tx.no_of_channels = 0;
+		common.voice[i].dev_rx.no_of_channels = 0;
+		common.voice[i].dev_tx.sample_rate = 8000;
+		common.voice[i].dev_rx.sample_rate = 8000;
+		common.voice[i].dev_tx.bits_per_sample = 16;
+		common.voice[i].dev_rx.bits_per_sample = 16;
+		memset(&common.voice[i].dev_tx.channel_mapping, 0,
+		       VSS_CHANNEL_MAPPING_SIZE);
+		memset(&common.voice[i].dev_rx.channel_mapping, 0,
+		       VSS_CHANNEL_MAPPING_SIZE);
+		common.voice[i].sidetone_gain = 0x512;
+		common.voice[i].dtmf_rx_detect_en = 0;
+		common.voice[i].lch_mode = 0;
+		common.voice[i].disable_topology = false;
+
+		common.voice[i].voc_state = VOC_INIT;
+
+		init_waitqueue_head(&common.voice[i].mvm_wait);
+		init_waitqueue_head(&common.voice[i].cvs_wait);
+		init_waitqueue_head(&common.voice[i].cvp_wait);
+
+		mutex_init(&common.voice[i].lock);
+	}
+
+	if (voice_init_cal_data())
+		pr_err("%s: Could not init cal data!\n", __func__);
+
+	if (rc == 0)
+		module_initialized = true;
+
+	pr_debug("%s: rc=%d\n", __func__, rc);
+	return rc;
+}
+
+device_initcall(voice_init);
+
+static void __exit voice_exit(void)
+{
+	voice_delete_cal_data();
+	free_cal_map_table();
+}
+
+__exitcall(voice_exit);
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/q6voice.h linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/q6voice.h
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/q6voice.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/q6voice.h	2019-10-29 09:26:26.169227898 +0100
@@ -0,0 +1,1905 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __QDSP6VOICE_H__
+#define __QDSP6VOICE_H__
+
+#include <linux/qdsp6v2/apr.h>
+#include <linux/qdsp6v2/rtac.h>
+#include <linux/msm_ion.h>
+#include <sound/voice_params.h>
+#include <linux/power_supply.h>
+
+#define MAX_VOC_PKT_SIZE 642
+#define SESSION_NAME_LEN 20
+#define NUM_OF_MEMORY_BLOCKS 1
+#define NUM_OF_BUFFERS 2
+#define VSS_NUM_CHANNELS_MAX 8
+#define VSS_CHANNEL_MAPPING_SIZE (sizeof(uint8_t) * VSS_NUM_CHANNELS_MAX)
+/*
+ * BUFFER BLOCK SIZE based on
+ * the supported page size
+ */
+#define BUFFER_BLOCK_SIZE       4096
+
+#define MAX_COL_INFO_SIZE	324
+
+#define VOC_REC_UPLINK		0x00
+#define VOC_REC_DOWNLINK	0x01
+#define VOC_REC_BOTH		0x02
+
+#define VSS_IVERSION_CMD_GET                 0x00011378
+#define VSS_IVERSION_RSP_GET                 0x00011379
+#define CVD_VERSION_STRING_MAX_SIZE          31
+#define CVD_VERSION_DEFAULT                  ""
+#define CVD_VERSION_0_0                      "0.0"
+#define CVD_VERSION_2_1                      "2.1"
+#define CVD_VERSION_2_2                      "2.2"
+#define CVD_VERSION_2_3                      "2.3"
+
+#define CVD_INT_VERSION_DEFAULT              0
+#define CVD_INT_VERSION_0_0                  1
+#define CVD_INT_VERSION_2_1                  2
+#define CVD_INT_VERSION_2_2                  3
+#define CVD_INT_VERSION_2_3                  4
+#define CVD_INT_VERSION_LAST                 CVD_INT_VERSION_2_3
+#define CVD_INT_VERSION_MAX                  (CVD_INT_VERSION_LAST + 1)
+
+struct cvd_version_table {
+	char cvd_ver[CVD_VERSION_STRING_MAX_SIZE];
+	int cvd_ver_int;
+};
+
+int voc_get_cvd_version(char *);
+
+/* Payload structure for the VSS_IVERSION_RSP_GET command response */
+struct vss_iversion_rsp_get_t {
+	char version[CVD_VERSION_STRING_MAX_SIZE];
+	/* NULL-terminated version string */
+};
+
+enum {
+	CVP_VOC_RX_TOPOLOGY_CAL = 0,
+	CVP_VOC_TX_TOPOLOGY_CAL,
+	CVP_VOCPROC_CAL,
+	CVP_VOCVOL_CAL,
+	CVP_VOCDEV_CFG_CAL,
+	CVP_VOCPROC_COL_CAL,
+	CVP_VOCVOL_COL_CAL,
+	CVS_VOCSTRM_CAL,
+	CVS_VOCSTRM_COL_CAL,
+	VOICE_RTAC_INFO_CAL,
+	VOICE_RTAC_APR_CAL,
+	MAX_VOICE_CAL_TYPES
+};
+
+struct voice_header {
+	uint32_t id;
+	uint32_t data_len;
+};
+
+struct voice_init {
+	struct voice_header hdr;
+	void *cb_handle;
+};
+
+/* Stream information payload structure */
+struct stream_data {
+	uint32_t stream_mute;
+	uint32_t stream_mute_ramp_duration_ms;
+};
+
+/* Device information payload structure */
+struct device_data {
+	uint32_t dev_mute;
+	uint32_t sample_rate;
+	uint16_t bits_per_sample;
+	uint8_t  channel_mapping[VSS_NUM_CHANNELS_MAX];
+	uint32_t enabled;
+	uint32_t dev_id;
+	uint32_t port_id;
+	uint32_t volume_step_value;
+	uint32_t volume_ramp_duration_ms;
+	uint32_t dev_mute_ramp_duration_ms;
+	uint32_t no_of_channels;
+};
+
+/*
+ * Format information structure to match
+ * vss_param_endpoint_media_format_info_t
+ */
+struct media_format_info {
+	uint32_t port_id;
+	uint16_t num_channels;
+	uint16_t bits_per_sample;
+	uint32_t sample_rate;
+	uint8_t  channel_mapping[VSS_NUM_CHANNELS_MAX];
+};
+
+enum {
+	VOC_NO_SET_PARAM_TOKEN = 0,
+	VOC_RTAC_SET_PARAM_TOKEN,
+	VOC_SET_MEDIA_FORMAT_PARAM_TOKEN,
+	VOC_SET_PARAM_TOKEN_MAX
+};
+
+struct voice_dev_route_state {
+	u16 rx_route_flag;
+	u16 tx_route_flag;
+};
+
+struct voice_rec_route_state {
+	u16 ul_flag;
+	u16 dl_flag;
+};
+
+enum {
+	VOC_INIT = 0,
+	VOC_RUN,
+	VOC_CHANGE,
+	VOC_RELEASE,
+	VOC_ERROR,
+	VOC_STANDBY,
+};
+
+struct mem_buffer {
+	dma_addr_t		phys;
+	void			*data;
+	uint32_t		size; /* size of buffer */
+};
+
+struct share_mem_buf {
+	struct ion_handle	*handle;
+	struct ion_client	*client;
+	struct mem_buffer	buf[NUM_OF_BUFFERS];
+};
+
+struct mem_map_table {
+	dma_addr_t		phys;
+	void			*data;
+	size_t			size; /* size of buffer */
+	struct ion_handle	*handle;
+	struct ion_client	*client;
+};
+
+/* Common */
+#define VSS_ICOMMON_CMD_SET_UI_PROPERTY 0x00011103
+/* Set a UI property */
+#define VSS_ICOMMON_CMD_MAP_MEMORY   0x00011025
+#define VSS_ICOMMON_CMD_UNMAP_MEMORY 0x00011026
+/* General shared memory; byte-accessible, 4 kB-aligned. */
+#define VSS_ICOMMON_MAP_MEMORY_SHMEM8_4K_POOL  3
+
+struct vss_icommon_cmd_map_memory_t {
+	uint32_t phys_addr;
+	/* Physical address of a memory region; must be at least
+	 *  4 kB aligned.
+	 */
+
+	uint32_t mem_size;
+	/* Number of bytes in the region; should be a multiple of 32. */
+
+	uint16_t mem_pool_id;
+	/* Type of memory being provided. The memory ID implicitly defines
+	 *  the characteristics of the memory. The characteristics might include
+	 *  alignment type, permissions, etc.
+	 * Memory pool ID. Possible values:
+	 * 3 -- VSS_ICOMMON_MEM_TYPE_SHMEM8_4K_POOL.
+	 */
+} __packed;
+
+struct vss_icommon_cmd_unmap_memory_t {
+	uint32_t phys_addr;
+	/* Physical address of a memory region; must be at least
+	 *  4 kB aligned.
+	 */
+} __packed;
+
+struct vss_map_memory_cmd {
+	struct apr_hdr hdr;
+	struct vss_icommon_cmd_map_memory_t vss_map_mem;
+} __packed;
+
+struct vss_unmap_memory_cmd {
+	struct apr_hdr hdr;
+	struct vss_icommon_cmd_unmap_memory_t vss_unmap_mem;
+} __packed;
+
+struct vss_param_endpoint_media_format_info_t {
+	/* AFE port ID to which this media format corresponds to. */
+	uint32_t port_id;
+	/*
+	 * Number of channels of data.
+	 * Supported values: 1 to 8
+	 */
+	uint16_t num_channels;
+	/*
+	 * Bits per sample of data.
+	 * Supported values: 16 and 24
+	 */
+	uint16_t bits_per_sample;
+	/*
+	 * Sampling rate in Hz.
+	 * Supported values: 8000, 11025, 16000, 22050, 24000, 32000,
+	 * 44100, 48000, 88200, 96000, 176400, and 192000
+	 */
+	uint32_t sample_rate;
+	/*
+	 * The channel[i] mapping describes channel i. Each element i
+	 * of the array describes channel i inside the data buffer. An
+	 * unused or unknown channel is set to 0.
+	 */
+	uint8_t channel_mapping[VSS_NUM_CHANNELS_MAX];
+} __packed;
+
+struct vss_icommon_param_data_t {
+	/* Valid ID of the module. */
+	uint32_t module_id;
+	/* Valid ID of the parameter. */
+	uint32_t param_id;
+	/*
+	 * Data size of the structure relating to the param_id/module_id
+	 * combination in uint8_t bytes.
+	 */
+	uint16_t param_size;
+	/* This field must be set to zero. */
+	uint16_t reserved;
+	/*
+	 * Parameter data payload when inband. Should have size param_size.
+	 * Bit size of payload must be a multiple of 4.
+	 */
+	union {
+		struct vss_param_endpoint_media_format_info_t media_format_info;
+	};
+} __packed;
+
+/* Payload structure for the VSS_ICOMMON_CMD_SET_PARAM_V2 command. */
+struct vss_icommon_cmd_set_param_v2_t {
+	/*
+	 * Pointer to the unique identifier for an address (physical/virtual).
+	 *
+	 * If the parameter data payload is within the message payload
+	 * (in-band), set this field to 0. The parameter data begins at the
+	 * specified data payload address.
+	 *
+	 * If the parameter data is out-of-band, this field is the handle to
+	 * the physical address in the shared memory that holds the parameter
+	 * data.
+	 */
+	uint32_t mem_handle;
+	/*
+	 * Location of the parameter data payload.
+	 *
+	 * The payload is an array of vss_icommon_param_data_t. If the
+	 * mem_handle is 0, this field is ignored.
+	 */
+	uint64_t mem_address;
+	/* Size of the parameter data payload in bytes. */
+	uint32_t mem_size;
+	/* Parameter data payload when the data is inband. */
+	struct vss_icommon_param_data_t param_data;
+} __packed;
+
+/* TO MVM commands */
+#define VSS_IMVM_CMD_CREATE_PASSIVE_CONTROL_SESSION	0x000110FF
+/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_IMVM_CMD_SET_POLICY_DUAL_CONTROL	0x00011327
+/*
+ * VSS_IMVM_CMD_SET_POLICY_DUAL_CONTROL
+ * Description: This command is required to let MVM know
+ * who is in control of session.
+ * Payload: Defined by vss_imvm_cmd_set_policy_dual_control_t.
+ * Result: Wait for APRV2_IBASIC_RSP_RESULT response.
+ */
+
+#define VSS_IMVM_CMD_CREATE_FULL_CONTROL_SESSION	0x000110FE
+/* Create a new full control MVM session. */
+
+#define APRV2_IBASIC_CMD_DESTROY_SESSION		0x0001003C
+/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_IMVM_CMD_ATTACH_STREAM			0x0001123C
+/* Attach a stream to the MVM. */
+
+#define VSS_IMVM_CMD_DETACH_STREAM			0x0001123D
+/* Detach a stream from the MVM. */
+
+#define VSS_IMVM_CMD_ATTACH_VOCPROC		       0x0001123E
+/* Attach a vocproc to the MVM. The MVM will symmetrically connect this vocproc
+ * to all the streams currently attached to it.
+ */
+
+#define VSS_IMVM_CMD_DETACH_VOCPROC			0x0001123F
+/* Detach a vocproc from the MVM. The MVM will symmetrically disconnect this
+ * vocproc from all the streams to which it is currently attached.
+*/
+
+#define VSS_IMVM_CMD_START_VOICE			0x00011190
+/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_IMVM_CMD_STANDBY_VOICE                       0x00011191
+/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_IMVM_CMD_STOP_VOICE				0x00011192
+/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_IMVM_CMD_PAUSE_VOICE			0x0001137D
+/* No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_ISTREAM_CMD_ATTACH_VOCPROC			0x000110F8
+/**< Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_ISTREAM_CMD_DETACH_VOCPROC			0x000110F9
+/**< Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+
+#define VSS_ISTREAM_CMD_SET_TTY_MODE			0x00011196
+/**< Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_ICOMMON_CMD_SET_NETWORK			0x0001119C
+/* Set the network type. */
+
+#define VSS_ICOMMON_CMD_SET_VOICE_TIMING		0x000111E0
+/* Set the voice timing parameters. */
+
+#define VSS_IMEMORY_CMD_MAP_PHYSICAL			0x00011334
+#define VSS_IMEMORY_RSP_MAP				0x00011336
+#define VSS_IMEMORY_CMD_UNMAP				0x00011337
+#define VSS_IMVM_CMD_SET_CAL_NETWORK			0x0001137A
+#define VSS_IMVM_CMD_SET_CAL_MEDIA_TYPE		0x0001137B
+#define VSS_IHDVOICE_CMD_ENABLE				0x000130A2
+#define VSS_IHDVOICE_CMD_DISABLE			0x000130A3
+
+enum msm_audio_voc_rate {
+		VOC_0_RATE, /* Blank frame */
+		VOC_8_RATE, /* 1/8 rate    */
+		VOC_4_RATE, /* 1/4 rate    */
+		VOC_2_RATE, /* 1/2 rate    */
+		VOC_1_RATE,  /* Full rate   */
+		VOC_8_RATE_NC  /* Noncritical 1/8 rate   */
+};
+
+struct vss_istream_cmd_set_tty_mode_t {
+	uint32_t mode;
+	/**<
+	* TTY mode.
+	*
+	* 0 : TTY disabled
+	* 1 : HCO
+	* 2 : VCO
+	* 3 : FULL
+	*/
+} __packed;
+
+struct vss_istream_cmd_attach_vocproc_t {
+	uint16_t handle;
+	/**< Handle of vocproc being attached. */
+} __packed;
+
+struct vss_istream_cmd_detach_vocproc_t {
+	uint16_t handle;
+	/**< Handle of vocproc being detached. */
+} __packed;
+
+struct vss_imvm_cmd_attach_stream_t {
+	uint16_t handle;
+	/* The stream handle to attach. */
+} __packed;
+
+struct vss_imvm_cmd_detach_stream_t {
+	uint16_t handle;
+	/* The stream handle to detach. */
+} __packed;
+
+struct vss_icommon_cmd_set_network_t {
+	uint32_t network_id;
+	/* Network ID. (Refer to VSS_NETWORK_ID_XXX). */
+} __packed;
+
+struct vss_icommon_cmd_set_voice_timing_t {
+	uint16_t mode;
+	/*
+	 * The vocoder frame synchronization mode.
+	 *
+	 * 0 : No frame sync.
+	 * 1 : Hard VFR (20ms Vocoder Frame Reference interrupt).
+	 */
+	uint16_t enc_offset;
+	/*
+	 * The offset in microseconds from the VFR to deliver a Tx vocoder
+	 * packet. The offset should be less than 20000us.
+	 */
+	uint16_t dec_req_offset;
+	/*
+	 * The offset in microseconds from the VFR to request for an Rx vocoder
+	 * packet. The offset should be less than 20000us.
+	 */
+	uint16_t dec_offset;
+	/*
+	 * The offset in microseconds from the VFR to indicate the deadline to
+	 * receive an Rx vocoder packet. The offset should be less than 20000us.
+	 * Rx vocoder packets received after this deadline are not guaranteed to
+	 * be processed.
+	 */
+} __packed;
+
+struct vss_imvm_cmd_create_control_session_t {
+	char name[SESSION_NAME_LEN];
+	/*
+	* A variable-sized stream name.
+	*
+	* The stream name size is the payload size minus the size of the other
+	* fields.
+	*/
+} __packed;
+
+
+struct vss_imvm_cmd_set_policy_dual_control_t {
+	bool enable_flag;
+	/* Set to TRUE to enable modem state machine control */
+} __packed;
+
+struct mvm_attach_vocproc_cmd {
+	struct apr_hdr hdr;
+	struct vss_istream_cmd_attach_vocproc_t mvm_attach_cvp_handle;
+} __packed;
+
+struct mvm_detach_vocproc_cmd {
+	struct apr_hdr hdr;
+	struct vss_istream_cmd_detach_vocproc_t mvm_detach_cvp_handle;
+} __packed;
+
+struct mvm_create_ctl_session_cmd {
+	struct apr_hdr hdr;
+	struct vss_imvm_cmd_create_control_session_t mvm_session;
+} __packed;
+
+struct mvm_modem_dual_control_session_cmd {
+	struct apr_hdr hdr;
+	struct vss_imvm_cmd_set_policy_dual_control_t voice_ctl;
+} __packed;
+
+struct mvm_set_tty_mode_cmd {
+	struct apr_hdr hdr;
+	struct vss_istream_cmd_set_tty_mode_t tty_mode;
+} __packed;
+
+struct mvm_attach_stream_cmd {
+	struct apr_hdr hdr;
+	struct vss_imvm_cmd_attach_stream_t attach_stream;
+} __packed;
+
+struct mvm_detach_stream_cmd {
+	struct apr_hdr hdr;
+	struct vss_imvm_cmd_detach_stream_t detach_stream;
+} __packed;
+
+struct mvm_set_network_cmd {
+	struct apr_hdr hdr;
+	struct vss_icommon_cmd_set_network_t network;
+} __packed;
+
+struct mvm_set_voice_timing_cmd {
+	struct apr_hdr hdr;
+	struct vss_icommon_cmd_set_voice_timing_t timing;
+} __packed;
+
+struct mvm_set_hd_enable_cmd {
+	struct apr_hdr hdr;
+} __packed;
+
+struct vss_imemory_table_descriptor_t {
+	uint32_t mem_address_lsw;
+	uint32_t mem_address_msw;
+	/*
+	 * Base physical address of the table. The address must be aligned
+	 * to LCM( cache_line_size, page_align, max_data_width ), where the
+	 * attributes are specified in #VSS_IMEMORY_CMD_MAP_PHYSICAL, and
+	 * LCM = Least Common Multiple. The table at the address must have
+	 * the format specified by #vss_imemory_table_t.
+	 */
+	uint32_t mem_size;
+	/* Size in bytes of the table. */
+} __packed;
+
+struct vss_imemory_block_t {
+	uint64_t mem_address;
+	/*
+	 * Base address of the memory block. The address is virtual for virtual
+	 * memory and physical for physical memory. The address must be aligned
+	 * to LCM( cache_line_size, page_align, max_data_width ), where the
+	 * attributes are specified in VSS_IMEMORY_CMD_MAP_VIRTUAL or
+	 * VSS_IMEMORY_CMD_MAP_PHYSICAL, and LCM = Least Common Multiple.
+	 */
+	uint32_t mem_size;
+	/*
+	 * Size in bytes of the memory block. The size must be multiple of
+	 * page_align, where page_align is specified in
+	 * VSS_IMEMORY_CMD_MAP_VIRTUAL or #VSS_IMEMORY_CMD_MAP_PHYSICAL.
+	 */
+} __packed;
+
+struct vss_imemory_table_t {
+	struct vss_imemory_table_descriptor_t next_table_descriptor;
+	/*
+	 * Specifies the next table. If there is no next table,
+	 * set the size of the table to 0 and the table address is ignored.
+	 */
+	struct vss_imemory_block_t blocks[NUM_OF_MEMORY_BLOCKS];
+	/* Specifies one ore more memory blocks. */
+} __packed;
+
+struct vss_imemory_cmd_map_physical_t {
+	struct apr_hdr hdr;
+	struct vss_imemory_table_descriptor_t table_descriptor;
+	bool is_cached;
+	/*
+	 * Indicates cached or uncached memory. Supported values:
+	 * TRUE - Cached.
+	 */
+	uint16_t cache_line_size;
+	/* Cache line size in bytes. Supported values: 128 */
+	uint32_t access_mask;
+	/*
+	 * CVD's access permission to the memory while it is mapped.
+	 * Supported values:
+	 * bit 0 - If set, the memory is readable.
+	 * bit 1 - If set, the memory is writable.
+	 */
+	uint32_t page_align;
+	/* Page frame alignment in bytes. Supported values: 4096 */
+	uint8_t min_data_width;
+	/*
+	 * Minimum native data type width in bits that can be accessed.
+	 * Supported values: 8
+	 */
+	uint8_t max_data_width;
+	/*
+	 * Maximum native data type width in bits that can be accessed.
+	 * Supported values: 64
+	 */
+} __packed;
+
+struct vss_imvm_cmd_set_cal_network_t {
+	struct apr_hdr hdr;
+	uint32_t network_id;
+} __packed;
+
+struct vss_imvm_cmd_set_cal_media_type_t {
+	struct apr_hdr hdr;
+	uint32_t media_id;
+} __packed;
+
+struct vss_imemory_cmd_unmap_t {
+	struct apr_hdr hdr;
+	uint32_t mem_handle;
+} __packed;
+
+/* TO CVS commands */
+#define VSS_ISTREAM_CMD_CREATE_PASSIVE_CONTROL_SESSION	0x00011140
+/**< Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_ISTREAM_CMD_CREATE_FULL_CONTROL_SESSION	0x000110F7
+/* Create a new full control stream session. */
+
+#define APRV2_IBASIC_CMD_DESTROY_SESSION		0x0001003C
+
+/*
+ * This command changes the mute setting. The new mute setting will
+ * be applied over the specified ramp duration.
+ */
+#define VSS_IVOLUME_CMD_MUTE_V2				0x0001138B
+
+#define VSS_ISTREAM_CMD_REGISTER_CALIBRATION_DATA_V2    0x00011369
+
+#define VSS_ISTREAM_CMD_DEREGISTER_CALIBRATION_DATA     0x0001127A
+
+#define VSS_ISTREAM_CMD_REGISTER_STATIC_CALIBRATION_DATA        0x0001307D
+#define VSS_ISTREAM_CMD_DEREGISTER_STATIC_CALIBRATION_DATA      0x0001307E
+
+#define VSS_ISTREAM_CMD_SET_MEDIA_TYPE			0x00011186
+/* Set media type on the stream. */
+
+#define VSS_ISTREAM_EVT_SEND_ENC_BUFFER			0x00011015
+/* Event sent by the stream to its client to provide an encoded packet. */
+
+#define VSS_ISTREAM_EVT_REQUEST_DEC_BUFFER		0x00011017
+/* Event sent by the stream to its client requesting for a decoder packet.
+ * The client should respond with a VSS_ISTREAM_EVT_SEND_DEC_BUFFER event.
+ */
+
+#define VSS_ISTREAM_EVT_OOB_NOTIFY_DEC_BUFFER_REQUEST	0x0001136E
+
+#define VSS_ISTREAM_EVT_SEND_DEC_BUFFER			0x00011016
+/* Event sent by the client to the stream in response to a
+ * VSS_ISTREAM_EVT_REQUEST_DEC_BUFFER event, providing a decoder packet.
+ */
+
+#define VSS_ISTREAM_CMD_VOC_AMR_SET_ENC_RATE		0x0001113E
+/* Set AMR encoder rate. */
+
+#define VSS_ISTREAM_CMD_VOC_AMRWB_SET_ENC_RATE		0x0001113F
+/* Set AMR-WB encoder rate. */
+
+#define VSS_ISTREAM_CMD_CDMA_SET_ENC_MINMAX_RATE	0x00011019
+/* Set encoder minimum and maximum rate. */
+
+#define VSS_ISTREAM_CMD_SET_ENC_DTX_MODE		0x0001101D
+/* Set encoder DTX mode. */
+
+#define MODULE_ID_VOICE_MODULE_ST			0x00010EE3
+#define VOICE_PARAM_MOD_ENABLE				0x00010E00
+#define MOD_ENABLE_PARAM_LEN				4
+
+#define VSS_IPLAYBACK_CMD_START				0x000112BD
+/* Start in-call music delivery on the Tx voice path. */
+
+#define VSS_IPLAYBACK_CMD_STOP				0x00011239
+/* Stop the in-call music delivery on the Tx voice path. */
+
+#define VSS_IPLAYBACK_PORT_ID_DEFAULT			0x0000FFFF
+/* Default AFE port ID. */
+
+#define VSS_IPLAYBACK_PORT_ID_VOICE			0x00008005
+/* AFE port ID for VOICE 1. */
+
+#define VSS_IPLAYBACK_PORT_ID_VOICE2			0x00008002
+/* AFE port ID for VOICE 2. */
+
+#define VSS_IRECORD_CMD_START				0x000112BE
+/* Start in-call conversation recording. */
+#define VSS_IRECORD_CMD_STOP				0x00011237
+/* Stop in-call conversation recording. */
+
+#define VSS_IRECORD_PORT_ID_DEFAULT			0x0000FFFF
+/* Default AFE port ID. */
+
+#define VSS_IRECORD_TAP_POINT_NONE			0x00010F78
+/* Indicates no tapping for specified path. */
+
+#define VSS_IRECORD_TAP_POINT_STREAM_END		0x00010F79
+/* Indicates that specified path should be tapped at the end of the stream. */
+
+#define VSS_IRECORD_MODE_TX_RX_STEREO			0x00010F7A
+/* Select Tx on left channel and Rx on right channel. */
+
+#define VSS_IRECORD_MODE_TX_RX_MIXING			0x00010F7B
+/* Select mixed Tx and Rx paths. */
+
+#define VSS_PARAM_TX_PORT_ENDPOINT_MEDIA_INFO		0x00013253
+
+#define VSS_PARAM_RX_PORT_ENDPOINT_MEDIA_INFO		0x00013254
+
+#define VSS_PARAM_EC_REF_PORT_ENDPOINT_MEDIA_INFO	0x00013255
+
+#define VSS_MODULE_CVD_GENERIC				0x0001316E
+
+#define VSS_ISTREAM_EVT_NOT_READY			0x000110FD
+
+#define VSS_ISTREAM_EVT_READY				0x000110FC
+
+#define VSS_ISTREAM_EVT_OOB_NOTIFY_DEC_BUFFER_READY	0x0001136F
+/*notify dsp that decoder buffer is ready*/
+
+#define VSS_ISTREAM_EVT_OOB_NOTIFY_ENC_BUFFER_READY	0x0001136C
+/*dsp notifying client that encoder buffer is ready*/
+
+#define VSS_ISTREAM_EVT_OOB_NOTIFY_ENC_BUFFER_CONSUMED	0x0001136D
+/*notify dsp that encoder buffer is consumed*/
+
+#define VSS_ISTREAM_CMD_SET_OOB_PACKET_EXCHANGE_CONFIG	0x0001136B
+
+#define VSS_ISTREAM_PACKET_EXCHANGE_MODE_INBAND	0
+/* In-band packet exchange mode. */
+
+#define VSS_ISTREAM_PACKET_EXCHANGE_MODE_OUT_OF_BAND	1
+/* Out-of-band packet exchange mode. */
+
+#define VSS_ISTREAM_CMD_SET_PACKET_EXCHANGE_MODE	0x0001136A
+
+struct vss_iplayback_cmd_start_t {
+	uint16_t port_id;
+	/*
+	 * AFE Port ID from which the audio samples are available.
+	 * To use the default AFE pseudo port (0x8005), set this value to
+	 * #VSS_IPLAYBACK_PORT_ID_DEFAULT.
+	 */
+}  __packed;
+
+struct vss_irecord_cmd_start_t {
+	uint32_t rx_tap_point;
+	/* Tap point to use on the Rx path. Supported values are:
+	 * VSS_IRECORD_TAP_POINT_NONE : Do not record Rx path.
+	 * VSS_IRECORD_TAP_POINT_STREAM_END : Rx tap point is at the end of
+	 * the stream.
+	 */
+	uint32_t tx_tap_point;
+	/* Tap point to use on the Tx path. Supported values are:
+	 * VSS_IRECORD_TAP_POINT_NONE : Do not record tx path.
+	 * VSS_IRECORD_TAP_POINT_STREAM_END : Tx tap point is at the end of
+	 * the stream.
+	 */
+	uint16_t port_id;
+	/* AFE Port ID to whcih the conversation recording stream needs to be
+	 * sent. Set this to #VSS_IRECORD_PORT_ID_DEFAULT to use default AFE
+	 * pseudo ports (0x8003 for Rx and 0x8004 for Tx).
+	 */
+	uint32_t mode;
+	/* Recording Mode. The mode parameter value is ignored if the port_id
+	 * is set to #VSS_IRECORD_PORT_ID_DEFAULT.
+	 * The supported values:
+	 * #VSS_IRECORD_MODE_TX_RX_STEREO
+	 * #VSS_IRECORD_MODE_TX_RX_MIXING
+	 */
+} __packed;
+
+struct vss_istream_cmd_create_passive_control_session_t {
+	char name[SESSION_NAME_LEN];
+	/**<
+	* A variable-sized stream name.
+	*
+	* The stream name size is the payload size minus the size of the other
+	* fields.
+	*/
+} __packed;
+
+#define VSS_IVOLUME_DIRECTION_TX	0
+#define VSS_IVOLUME_DIRECTION_RX	1
+
+#define VSS_IVOLUME_MUTE_OFF		0
+#define VSS_IVOLUME_MUTE_ON		1
+
+#define DEFAULT_MUTE_RAMP_DURATION	500
+#define DEFAULT_VOLUME_RAMP_DURATION	20
+#define MAX_RAMP_DURATION		5000
+
+struct vss_ivolume_cmd_mute_v2_t {
+	uint16_t direction;
+	/*
+	 * The direction field sets the direction to apply the mute command.
+	 * The Supported values:
+	 * VSS_IVOLUME_DIRECTION_TX
+	 * VSS_IVOLUME_DIRECTION_RX
+	 */
+	uint16_t mute_flag;
+	/*
+	 * Turn mute on or off. The Supported values:
+	 * VSS_IVOLUME_MUTE_OFF
+	 * VSS_IVOLUME_MUTE_ON
+	 */
+	uint16_t ramp_duration_ms;
+	/*
+	 * Mute change ramp duration in milliseconds.
+	 * The Supported values: 0 to 5000.
+	 */
+} __packed;
+
+struct vss_istream_cmd_create_full_control_session_t {
+	uint16_t direction;
+	/*
+	 * Stream direction.
+	 *
+	 * 0 : TX only
+	 * 1 : RX only
+	 * 2 : TX and RX
+	 * 3 : TX and RX loopback
+	 */
+	uint32_t enc_media_type;
+	/* Tx vocoder type. (Refer to VSS_MEDIA_ID_XXX). */
+	uint32_t dec_media_type;
+	/* Rx vocoder type. (Refer to VSS_MEDIA_ID_XXX). */
+	uint32_t network_id;
+	/* Network ID. (Refer to VSS_NETWORK_ID_XXX). */
+	char name[SESSION_NAME_LEN];
+	/*
+	 * A variable-sized stream name.
+	 *
+	 * The stream name size is the payload size minus the size of the other
+	 * fields.
+	 */
+} __packed;
+
+struct vss_istream_cmd_set_media_type_t {
+	uint32_t rx_media_id;
+	/* Set the Rx vocoder type. (Refer to VSS_MEDIA_ID_XXX). */
+	uint32_t tx_media_id;
+	/* Set the Tx vocoder type. (Refer to VSS_MEDIA_ID_XXX). */
+} __packed;
+
+struct vss_istream_evt_send_enc_buffer_t {
+	uint32_t media_id;
+      /* Media ID of the packet. */
+	uint8_t packet_data[MAX_VOC_PKT_SIZE];
+      /* Packet data buffer. */
+} __packed;
+
+struct vss_istream_evt_send_dec_buffer_t {
+	uint32_t media_id;
+      /* Media ID of the packet. */
+	uint8_t packet_data[MAX_VOC_PKT_SIZE];
+      /* Packet data. */
+} __packed;
+
+struct vss_istream_cmd_voc_amr_set_enc_rate_t {
+	uint32_t mode;
+	/* Set the AMR encoder rate.
+	 *
+	 * 0x00000000 : 4.75 kbps
+	 * 0x00000001 : 5.15 kbps
+	 * 0x00000002 : 5.90 kbps
+	 * 0x00000003 : 6.70 kbps
+	 * 0x00000004 : 7.40 kbps
+	 * 0x00000005 : 7.95 kbps
+	 * 0x00000006 : 10.2 kbps
+	 * 0x00000007 : 12.2 kbps
+	 */
+} __packed;
+
+struct vss_istream_cmd_voc_amrwb_set_enc_rate_t {
+	uint32_t mode;
+	/* Set the AMR-WB encoder rate.
+	 *
+	 * 0x00000000 :  6.60 kbps
+	 * 0x00000001 :  8.85 kbps
+	 * 0x00000002 : 12.65 kbps
+	 * 0x00000003 : 14.25 kbps
+	 * 0x00000004 : 15.85 kbps
+	 * 0x00000005 : 18.25 kbps
+	 * 0x00000006 : 19.85 kbps
+	 * 0x00000007 : 23.05 kbps
+	 * 0x00000008 : 23.85 kbps
+	 */
+} __packed;
+
+struct vss_istream_cmd_cdma_set_enc_minmax_rate_t {
+	uint16_t min_rate;
+	/* Set the lower bound encoder rate.
+	 *
+	 * 0x0000 : Blank frame
+	 * 0x0001 : Eighth rate
+	 * 0x0002 : Quarter rate
+	 * 0x0003 : Half rate
+	 * 0x0004 : Full rate
+	 */
+	uint16_t max_rate;
+	/* Set the upper bound encoder rate.
+	 *
+	 * 0x0000 : Blank frame
+	 * 0x0001 : Eighth rate
+	 * 0x0002 : Quarter rate
+	 * 0x0003 : Half rate
+	 * 0x0004 : Full rate
+	 */
+} __packed;
+
+struct vss_istream_cmd_set_enc_dtx_mode_t {
+	uint32_t enable;
+	/* Toggle DTX on or off.
+	 *
+	 * 0 : Disables DTX
+	 * 1 : Enables DTX
+	 */
+} __packed;
+
+struct vss_istream_cmd_register_calibration_data_v2_t {
+	uint32_t cal_mem_handle;
+	/* Handle to the shared memory that holds the calibration data. */
+	uint32_t cal_mem_address_lsw;
+	uint32_t cal_mem_address_msw;
+	/* Location of calibration data. */
+	uint32_t cal_mem_size;
+	/* Size of the calibration data in bytes. */
+	uint8_t column_info[MAX_COL_INFO_SIZE];
+	/*
+	 * Column info contains the number of columns and the array of columns
+	 * in the calibration table. The order in which the columns are provided
+	 * here must match the order in which they exist in the calibration
+	 * table provided.
+	 */
+} __packed;
+
+struct vss_icommon_cmd_set_ui_property_enable_t {
+	uint32_t module_id;
+	/* Unique ID of the module. */
+	uint32_t param_id;
+	/* Unique ID of the parameter. */
+	uint16_t param_size;
+	/* Size of the parameter in bytes: MOD_ENABLE_PARAM_LEN */
+	uint16_t reserved;
+	/* Reserved; set to 0. */
+	uint16_t enable;
+	uint16_t reserved_field;
+	/* Reserved, set to 0. */
+};
+
+/*
+ * Event sent by the stream to the client that enables Rx DTMF
+ * detection whenever DTMF is detected in the Rx path.
+ *
+ * The DTMF detection feature can only be used to detect DTMF
+ * frequencies as listed in the vss_istream_evt_rx_dtmf_detected_t
+ * structure.
+ */
+
+#define VSS_ISTREAM_EVT_RX_DTMF_DETECTED 0x0001101A
+
+struct vss_istream_cmd_set_rx_dtmf_detection {
+	/*
+	 * Enables/disables Rx DTMF detection
+	 *
+	 * Possible values are
+	 * 0 - disable
+	 * 1 - enable
+	 *
+	 */
+	uint32_t enable;
+};
+
+#define VSS_ISTREAM_CMD_SET_RX_DTMF_DETECTION 0x00011027
+
+struct vss_istream_evt_rx_dtmf_detected {
+	uint16_t low_freq;
+	/*
+	 * Detected low frequency. Possible values:
+	 * 697 Hz
+	 * 770 Hz
+	 * 852 Hz
+	 * 941 Hz
+	 */
+	uint16_t high_freq;
+	/*
+	 * Detected high frequency. Possible values:
+	 * 1209 Hz
+	 * 1336 Hz
+	 * 1477 Hz
+	 * 1633 Hz
+	 */
+};
+
+struct cvs_set_rx_dtmf_detection_cmd {
+	struct apr_hdr hdr;
+	struct vss_istream_cmd_set_rx_dtmf_detection cvs_dtmf_det;
+} __packed;
+
+
+struct cvs_create_passive_ctl_session_cmd {
+	struct apr_hdr hdr;
+	struct vss_istream_cmd_create_passive_control_session_t cvs_session;
+} __packed;
+
+struct cvs_create_full_ctl_session_cmd {
+	struct apr_hdr hdr;
+	struct vss_istream_cmd_create_full_control_session_t cvs_session;
+} __packed;
+
+struct cvs_destroy_session_cmd {
+	struct apr_hdr hdr;
+} __packed;
+
+struct cvs_set_mute_cmd {
+	struct apr_hdr hdr;
+	struct vss_ivolume_cmd_mute_v2_t cvs_set_mute;
+} __packed;
+
+struct cvs_set_media_type_cmd {
+	struct apr_hdr hdr;
+	struct vss_istream_cmd_set_media_type_t media_type;
+} __packed;
+
+struct cvs_send_dec_buf_cmd {
+	struct apr_hdr hdr;
+	struct vss_istream_evt_send_dec_buffer_t dec_buf;
+} __packed;
+
+struct cvs_set_amr_enc_rate_cmd {
+	struct apr_hdr hdr;
+	struct vss_istream_cmd_voc_amr_set_enc_rate_t amr_rate;
+} __packed;
+
+struct cvs_set_amrwb_enc_rate_cmd {
+	struct apr_hdr hdr;
+	struct vss_istream_cmd_voc_amrwb_set_enc_rate_t amrwb_rate;
+} __packed;
+
+struct cvs_set_cdma_enc_minmax_rate_cmd {
+	struct apr_hdr hdr;
+	struct vss_istream_cmd_cdma_set_enc_minmax_rate_t cdma_rate;
+} __packed;
+
+struct cvs_set_enc_dtx_mode_cmd {
+	struct apr_hdr hdr;
+	struct vss_istream_cmd_set_enc_dtx_mode_t dtx_mode;
+} __packed;
+
+struct cvs_register_cal_data_cmd {
+	struct apr_hdr hdr;
+	struct vss_istream_cmd_register_calibration_data_v2_t cvs_cal_data;
+} __packed;
+
+struct cvs_deregister_cal_data_cmd {
+	struct apr_hdr hdr;
+} __packed;
+
+struct cvs_set_pp_enable_cmd {
+	struct apr_hdr hdr;
+	struct vss_icommon_cmd_set_ui_property_enable_t vss_set_pp;
+} __packed;
+struct cvs_start_record_cmd {
+	struct apr_hdr hdr;
+	struct vss_irecord_cmd_start_t rec_mode;
+} __packed;
+
+struct cvs_start_playback_cmd {
+	struct apr_hdr hdr;
+	struct vss_iplayback_cmd_start_t playback_mode;
+} __packed;
+
+struct cvs_dec_buffer_ready_cmd {
+	struct apr_hdr hdr;
+} __packed;
+
+struct cvs_enc_buffer_consumed_cmd {
+	struct apr_hdr hdr;
+} __packed;
+
+struct vss_istream_cmd_set_oob_packet_exchange_config_t {
+	struct apr_hdr hdr;
+	uint32_t mem_handle;
+	uint32_t enc_buf_addr_lsw;
+	uint32_t enc_buf_addr_msw;
+	uint32_t enc_buf_size;
+	uint32_t dec_buf_addr_lsw;
+	uint32_t dec_buf_addr_msw;
+	uint32_t dec_buf_size;
+} __packed;
+
+struct vss_istream_cmd_set_packet_exchange_mode_t {
+	struct apr_hdr hdr;
+	uint32_t mode;
+} __packed;
+
+/* TO CVP commands */
+
+#define VSS_IVOCPROC_CMD_CREATE_FULL_CONTROL_SESSION	0x000100C3
+/**< Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define APRV2_IBASIC_CMD_DESTROY_SESSION		0x0001003C
+
+#define VSS_IVOCPROC_CMD_SET_DEVICE_V2			0x000112C6
+
+#define VSS_IVOCPROC_CMD_SET_DEVICE_V3			0x0001316A
+
+#define VSS_IVOCPROC_CMD_TOPOLOGY_SET_DEV_CHANNELS	0x00013199
+
+#define VSS_IVOCPROC_CMD_TOPOLOGY_COMMIT		0x00013198
+
+#define VSS_IVOCPROC_CMD_SET_VP3_DATA			0x000110EB
+
+#define VSS_IVOLUME_CMD_SET_STEP			0x000112C2
+
+#define VSS_IVOCPROC_CMD_ENABLE				0x000100C6
+/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_IVOCPROC_CMD_DISABLE			0x000110E1
+/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+/*
+ * Registers the memory that contains device specific configuration data with
+ * the vocproc. The client must register device configuration data with the
+ * vocproc that corresponds with the device being set on the vocproc.
+ */
+#define VSS_IVOCPROC_CMD_REGISTER_DEVICE_CONFIG		0x00011371
+
+/*
+ * Deregisters the memory that holds device configuration data from the
+  vocproc.
+*/
+#define VSS_IVOCPROC_CMD_DEREGISTER_DEVICE_CONFIG	0x00011372
+
+#define VSS_IVOCPROC_CMD_REGISTER_CALIBRATION_DATA_V2	0x00011373
+#define VSS_IVOCPROC_CMD_DEREGISTER_CALIBRATION_DATA	0x00011276
+
+#define VSS_IVOCPROC_CMD_REGISTER_VOL_CALIBRATION_DATA	0x00011374
+#define VSS_IVOCPROC_CMD_DEREGISTER_VOL_CALIBRATION_DATA	0x00011375
+
+#define VSS_IVOCPROC_CMD_REGISTER_STATIC_CALIBRATION_DATA       0x00013079
+#define VSS_IVOCPROC_CMD_DEREGISTER_STATIC_CALIBRATION_DATA     0x0001307A
+
+#define VSS_IVOCPROC_CMD_REGISTER_DYNAMIC_CALIBRATION_DATA      0x0001307B
+#define VSS_IVOCPROC_CMD_DEREGISTER_DYNAMIC_CALIBRATION_DATA    0x0001307C
+
+#define VSS_IVOCPROC_TOPOLOGY_ID_NONE			0x00010F70
+#define VSS_IVOCPROC_TOPOLOGY_ID_TX_SM_ECNS		0x00010F71
+#define VSS_IVOCPROC_TOPOLOGY_ID_TX_DM_FLUENCE		0x00010F72
+
+#define VSS_IVOCPROC_TOPOLOGY_ID_RX_DEFAULT		0x00010F77
+
+/* Newtwork IDs */
+#define VSS_ICOMMON_CAL_NETWORK_ID_NONE		0x0001135E
+
+/* Select internal mixing mode. */
+#define VSS_IVOCPROC_VOCPROC_MODE_EC_INT_MIXING	0x00010F7C
+
+/* Select external mixing mode. */
+#define VSS_IVOCPROC_VOCPROC_MODE_EC_EXT_MIXING	0x00010F7D
+
+/* Default AFE port ID. Applicable to Tx and Rx. */
+#define VSS_IVOCPROC_PORT_ID_NONE		0xFFFF
+
+#define VSS_NETWORK_ID_DEFAULT		0x00010037
+
+/* Voice over Internet Protocol (VoIP) network ID. Common for all bands.*/
+#define VSS_NETWORK_ID_VOIP		0x00011362
+
+/* Media types */
+#define VSS_MEDIA_ID_EVRC_MODEM		0x00010FC2
+/* 80-VF690-47 CDMA enhanced variable rate vocoder modem format. */
+#define VSS_MEDIA_ID_AMR_NB_MODEM	0x00010FC6
+/* 80-VF690-47 UMTS AMR-NB vocoder modem format. */
+#define VSS_MEDIA_ID_AMR_WB_MODEM	0x00010FC7
+/* 80-VF690-47 UMTS AMR-WB vocoder modem format. */
+
+#define VSS_MEDIA_ID_PCM_8_KHZ		0x00010FCB
+#define VSS_MEDIA_ID_PCM_16_KHZ		0x00010FCC
+#define VSS_MEDIA_ID_PCM_32_KHZ		0x00010FD9
+#define VSS_MEDIA_ID_PCM_48_KHZ		0x00010FD6
+
+/* Linear PCM (16-bit, little-endian). */
+#define VSS_MEDIA_ID_G711_ALAW		0x00010FCD
+/* G.711 a-law (contains two 10ms vocoder frames). */
+#define VSS_MEDIA_ID_G711_MULAW		0x00010FCE
+/* G.711 mu-law (contains two 10ms vocoder frames). */
+#define VSS_MEDIA_ID_G729		0x00010FD0
+/* G.729AB (contains two 10ms vocoder frames. */
+#define VSS_MEDIA_ID_4GV_NB_MODEM	0x00010FC3
+/*CDMA EVRC-B vocoder modem format */
+#define VSS_MEDIA_ID_4GV_WB_MODEM	0x00010FC4
+/*CDMA EVRC-WB vocoder modem format */
+#define VSS_MEDIA_ID_4GV_NW_MODEM	0x00010FC5
+/*CDMA EVRC-NW vocoder modem format */
+
+#define VSS_IVOCPROC_CMD_CREATE_FULL_CONTROL_SESSION_V2	0x000112BF
+#define VSS_IVOCPROC_CMD_CREATE_FULL_CONTROL_SESSION_V3	0x00013169
+
+#define VSS_NUM_DEV_CHANNELS_1 1
+#define VSS_NUM_DEV_CHANNELS_2 2
+#define VSS_NUM_DEV_CHANNELS_3 3
+#define VSS_NUM_DEV_CHANNELS_4 4
+
+struct vss_ivocproc_cmd_create_full_control_session_v2_t {
+	uint16_t direction;
+	/*
+	 * Vocproc direction. The supported values:
+	 * VSS_IVOCPROC_DIRECTION_RX
+	 * VSS_IVOCPROC_DIRECTION_TX
+	 * VSS_IVOCPROC_DIRECTION_RX_TX
+	 */
+	uint16_t tx_port_id;
+	/*
+	 * Tx device port ID to which the vocproc connects. If a port ID is
+	 * not being supplied, set this to #VSS_IVOCPROC_PORT_ID_NONE.
+	 */
+	uint32_t tx_topology_id;
+	/*
+	 * Tx path topology ID. If a topology ID is not being supplied, set
+	 * this to #VSS_IVOCPROC_TOPOLOGY_ID_NONE.
+	 */
+	uint16_t rx_port_id;
+	/*
+	 * Rx device port ID to which the vocproc connects. If a port ID is
+	 * not being supplied, set this to #VSS_IVOCPROC_PORT_ID_NONE.
+	 */
+	uint32_t rx_topology_id;
+	/*
+	 * Rx path topology ID. If a topology ID is not being supplied, set
+	 * this to #VSS_IVOCPROC_TOPOLOGY_ID_NONE.
+	 */
+	uint32_t profile_id;
+	/* Voice calibration profile ID. */
+	uint32_t vocproc_mode;
+	/*
+	 * Vocproc mode. The supported values:
+	 * VSS_IVOCPROC_VOCPROC_MODE_EC_INT_MIXING
+	 * VSS_IVOCPROC_VOCPROC_MODE_EC_EXT_MIXING
+	 */
+	uint16_t ec_ref_port_id;
+	/*
+	 * Port ID to which the vocproc connects for receiving echo
+	 * cancellation reference signal. If a port ID is not being supplied,
+	 * set this to #VSS_IVOCPROC_PORT_ID_NONE. This parameter value is
+	 * ignored when the vocproc_mode parameter is set to
+	 * VSS_IVOCPROC_VOCPROC_MODE_EC_INT_MIXING.
+	 */
+	char name[SESSION_NAME_LEN];
+	/*
+	 * Session name string used to identify a session that can be shared
+	 * with passive controllers (optional). The string size, including the
+	 * NULL termination character, is limited to 31 characters.
+	 */
+} __packed;
+
+struct vss_ivocproc_cmd_set_volume_index_t {
+	uint16_t vol_index;
+	/*
+	 * Volume index utilized by the vocproc to index into the volume table
+	 * provided in VSS_IVOCPROC_CMD_CACHE_VOLUME_CALIBRATION_TABLE and set
+	 * volume on the VDSP.
+	 */
+} __packed;
+
+struct vss_ivolume_cmd_set_step_t {
+	uint16_t direction;
+	/*
+	* The direction field sets the direction to apply the volume command.
+	* The supported values:
+	* #VSS_IVOLUME_DIRECTION_RX
+	*/
+	uint32_t value;
+	/*
+	* Volume step used to find the corresponding linear volume and
+	* the best match index in the registered volume calibration table.
+	*/
+	uint16_t ramp_duration_ms;
+	/*
+	* Volume change ramp duration in milliseconds.
+	* The supported values: 0 to 5000.
+	*/
+} __packed;
+
+struct vss_ivocproc_cmd_set_device_v2_t {
+	uint16_t tx_port_id;
+	/*
+	 * TX device port ID which vocproc will connect to.
+	 * VSS_IVOCPROC_PORT_ID_NONE means vocproc will not connect to any port.
+	 */
+	uint32_t tx_topology_id;
+	/*
+	 * TX leg topology ID.
+	 * VSS_IVOCPROC_TOPOLOGY_ID_NONE means vocproc does not contain any
+	 * pre/post-processing blocks and is pass-through.
+	 */
+	uint16_t rx_port_id;
+	/*
+	 * RX device port ID which vocproc will connect to.
+	 * VSS_IVOCPROC_PORT_ID_NONE means vocproc will not connect to any port.
+	 */
+	uint32_t rx_topology_id;
+	/*
+	 * RX leg topology ID.
+	 * VSS_IVOCPROC_TOPOLOGY_ID_NONE means vocproc does not contain any
+	 * pre/post-processing blocks and is pass-through.
+	 */
+	uint32_t vocproc_mode;
+	/* Vocproc mode. The supported values:
+	 * VSS_IVOCPROC_VOCPROC_MODE_EC_INT_MIXING - 0x00010F7C
+	 * VSS_IVOCPROC_VOCPROC_MODE_EC_EXT_MIXING - 0x00010F7D
+	 */
+	uint16_t ec_ref_port_id;
+	/* Port ID to which the vocproc connects for receiving
+	 * echo
+	 */
+} __packed;
+
+struct vss_ivocproc_cmd_register_device_config_t {
+	uint32_t mem_handle;
+	/*
+	 * Handle to the shared memory that holds the per-network calibration
+	 * data.
+	 */
+	uint32_t mem_address_lsw;
+	uint32_t mem_address_msw;
+	/* Location of calibration data. */
+	uint32_t mem_size;
+	/* Size of the calibration data in bytes. */
+} __packed;
+
+struct vss_ivocproc_cmd_register_calibration_data_v2_t {
+	uint32_t cal_mem_handle;
+	/*
+	 * Handle to the shared memory that holds the per-network calibration
+	 * data.
+	 */
+	uint32_t cal_mem_address_lsw;
+	uint32_t cal_mem_address_msw;
+	/* Location of calibration data. */
+	uint32_t cal_mem_size;
+	/* Size of the calibration data in bytes. */
+	uint8_t column_info[MAX_COL_INFO_SIZE];
+	/*
+	 * Column info contains the number of columns and the array of columns
+	 * in the calibration table. The order in which the columns are provided
+	 * here must match the order in which they exist in the calibration
+	 * table provided.
+	 */
+} __packed;
+
+struct vss_ivocproc_cmd_register_volume_cal_data_t {
+	uint32_t cal_mem_handle;
+	/*
+	 * Handle to the shared memory that holds the volume calibration
+	 * data.
+	 */
+	uint32_t cal_mem_address_lsw;
+	uint32_t cal_mem_address_msw;
+	/* Location of volume calibration data. */
+	uint32_t cal_mem_size;
+	/* Size of the volume calibration data in bytes. */
+	uint8_t column_info[MAX_COL_INFO_SIZE];
+	/*
+	 * Column info contains the number of columns and the array of columns
+	 * in the calibration table. The order in which the columns are provided
+	 * here must match the order in which they exist in the calibration
+	 * table provided.
+	 */
+} __packed;
+
+struct vss_ivocproc_cmd_topology_set_dev_channels_t {
+	uint16_t tx_num_channels;
+	/*
+	 * Number of Mics.
+	 * Supported values
+	 * 1  VSS_NUM_DEV_CHANNELS_1
+	 * 2  VSS_NUM_DEV_CHANNELS_2
+	 * 3  VSS_NUM_DEV_CHANNELS_3
+	 * 4  VSS_NUM_DEV_CHANNELS_4
+	 */
+	uint16_t rx_num_channels;
+	/*
+	 * Number of speaker channels.
+	 * Supported values
+	 * 1 VSS_NUM_DEV_CHANNELS_1
+	 */
+} __packed;
+
+/* Starts a vocoder PCM session */
+#define VSS_IVPCM_CMD_START_V2	0x00011339
+
+/* Default tap point location on the TX path. */
+#define VSS_IVPCM_TAP_POINT_TX_DEFAULT	0x00011289
+
+/* Default tap point location on the RX path. */
+#define VSS_IVPCM_TAP_POINT_RX_DEFAULT	0x0001128A
+
+/* Indicates tap point direction is output. */
+#define VSS_IVPCM_TAP_POINT_DIR_OUT	0
+
+/* Indicates tap point direction is input. */
+#define VSS_IVPCM_TAP_POINT_DIR_IN	1
+
+/* Indicates tap point direction is output and input. */
+#define VSS_IVPCM_TAP_POINT_DIR_OUT_IN	2
+
+
+#define VSS_IVPCM_SAMPLING_RATE_AUTO	0
+
+/* Indicates 8 KHz vocoder PCM sampling rate. */
+#define VSS_IVPCM_SAMPLING_RATE_8K	8000
+
+/* Indicates 16 KHz vocoder PCM sampling rate. */
+#define VSS_IVPCM_SAMPLING_RATE_16K	16000
+
+/* RX and TX */
+#define MAX_TAP_POINTS_SUPPORTED	2
+
+struct vss_ivpcm_tap_point {
+	uint32_t tap_point;
+	uint16_t direction;
+	uint16_t sampling_rate;
+	uint16_t duration;
+} __packed;
+
+
+struct vss_ivpcm_cmd_start_v2_t {
+	uint32_t mem_handle;
+	uint32_t num_tap_points;
+	struct vss_ivpcm_tap_point tap_points[MAX_TAP_POINTS_SUPPORTED];
+} __packed;
+
+#define VSS_IVPCM_EVT_PUSH_BUFFER_V2	0x0001133A
+
+/* Push buffer event mask indicating output buffer is filled. */
+#define VSS_IVPCM_PUSH_BUFFER_MASK_OUTPUT_BUFFER 1
+
+/* Push buffer event mask indicating input buffer is consumed. */
+#define VSS_IVPCM_PUSH_BUFFER_MASK_INPUT_BUFFER 2
+
+
+struct vss_ivpcm_evt_push_buffer_v2_t {
+	uint32_t tap_point;
+	uint32_t push_buf_mask;
+	uint64_t out_buf_mem_address;
+	uint16_t out_buf_mem_size;
+	uint64_t in_buf_mem_address;
+	uint16_t in_buf_mem_size;
+	uint16_t sampling_rate;
+	uint16_t num_in_channels;
+} __packed;
+
+#define VSS_IVPCM_EVT_NOTIFY_V2 0x0001133B
+
+/* Notify event mask indicates output buffer is filled. */
+#define VSS_IVPCM_NOTIFY_MASK_OUTPUT_BUFFER 1
+
+/* Notify event mask indicates input buffer is consumed. */
+#define VSS_IVPCM_NOTIFY_MASK_INPUT_BUFFER 2
+
+/* Notify event mask indicates a timetick */
+#define VSS_IVPCM_NOTIFY_MASK_TIMETICK 4
+
+/* Notify event mask indicates an error occured in output buffer operation */
+#define VSS_IVPCM_NOTIFY_MASK_OUTPUT_ERROR 8
+
+/* Notify event mask indicates an error occured in input buffer operation */
+#define VSS_IVPCM_NOTIFY_MASK_INPUT_ERROR 16
+
+
+struct vss_ivpcm_evt_notify_v2_t {
+	uint32_t tap_point;
+	uint32_t notify_mask;
+	uint64_t out_buff_addr;
+	uint64_t in_buff_addr;
+	uint16_t filled_out_size;
+	uint16_t request_buf_size;
+	uint16_t sampling_rate;
+	uint16_t num_out_channels;
+} __packed;
+
+struct cvp_start_cmd {
+	struct apr_hdr hdr;
+	struct vss_ivpcm_cmd_start_v2_t vpcm_start_cmd;
+} __packed;
+
+struct cvp_push_buf_cmd {
+	struct apr_hdr hdr;
+	struct vss_ivpcm_evt_push_buffer_v2_t vpcm_evt_push_buffer;
+} __packed;
+
+#define VSS_IVPCM_CMD_STOP 0x0001100B
+
+struct cvp_create_full_ctl_session_cmd {
+	struct apr_hdr hdr;
+	struct vss_ivocproc_cmd_create_full_control_session_v2_t cvp_session;
+} __packed;
+
+struct cvp_command {
+	struct apr_hdr hdr;
+} __packed;
+
+struct cvp_set_device_cmd {
+	struct apr_hdr hdr;
+	struct vss_ivocproc_cmd_set_device_v2_t cvp_set_device_v2;
+} __packed;
+
+struct cvp_set_dev_channels_cmd {
+	struct apr_hdr hdr;
+	struct vss_ivocproc_cmd_topology_set_dev_channels_t cvp_set_channels;
+} __packed;
+
+struct cvp_set_media_format_cmd {
+	struct apr_hdr hdr;
+	struct vss_icommon_cmd_set_param_v2_t cvp_set_param_v2;
+} __packed;
+
+struct cvp_set_vp3_data_cmd {
+	struct apr_hdr hdr;
+} __packed;
+
+struct cvp_set_rx_volume_index_cmd {
+	struct apr_hdr hdr;
+	struct vss_ivocproc_cmd_set_volume_index_t cvp_set_vol_idx;
+} __packed;
+
+struct cvp_set_rx_volume_step_cmd {
+	struct apr_hdr hdr;
+	struct vss_ivolume_cmd_set_step_t cvp_set_vol_step;
+} __packed;
+
+struct cvp_register_dev_cfg_cmd {
+	struct apr_hdr hdr;
+	struct vss_ivocproc_cmd_register_device_config_t cvp_dev_cfg_data;
+} __packed;
+
+struct cvp_deregister_dev_cfg_cmd {
+	struct apr_hdr hdr;
+} __packed;
+
+struct cvp_register_cal_data_cmd {
+	struct apr_hdr hdr;
+	struct vss_ivocproc_cmd_register_calibration_data_v2_t cvp_cal_data;
+} __packed;
+
+struct cvp_deregister_cal_data_cmd {
+	struct apr_hdr hdr;
+} __packed;
+
+struct cvp_register_vol_cal_data_cmd {
+	struct apr_hdr hdr;
+	struct vss_ivocproc_cmd_register_volume_cal_data_t cvp_vol_cal_data;
+} __packed;
+
+struct cvp_deregister_vol_cal_data_cmd {
+	struct apr_hdr hdr;
+} __packed;
+
+struct cvp_set_mute_cmd {
+	struct apr_hdr hdr;
+	struct vss_ivolume_cmd_mute_v2_t cvp_set_mute;
+} __packed;
+
+/* CB for up-link packets. */
+typedef void (*ul_cb_fn)(uint8_t *voc_pkt,
+			 uint32_t pkt_len,
+			 uint32_t timestamp,
+			 void *private_data);
+
+/* CB for down-link packets. */
+typedef void (*dl_cb_fn)(uint8_t *voc_pkt,
+			 void *private_data);
+
+/* CB for DTMF RX Detection */
+typedef void (*dtmf_rx_det_cb_fn)(uint8_t *pkt,
+				  char *session,
+				  void *private_data);
+
+typedef void (*voip_ssr_cb) (uint32_t opcode,
+				void *private_data);
+
+typedef void (*hostpcm_cb_fn)(uint8_t *data,
+			   char *session,
+			   void *private_data);
+
+struct mvs_driver_info {
+	uint32_t media_type;
+	uint32_t rate;
+	uint32_t network_type;
+	uint32_t dtx_mode;
+	ul_cb_fn ul_cb;
+	dl_cb_fn dl_cb;
+	voip_ssr_cb ssr_cb;
+	void *private_data;
+	uint32_t evrc_min_rate;
+	uint32_t evrc_max_rate;
+};
+
+struct dtmf_driver_info {
+	dtmf_rx_det_cb_fn dtmf_rx_ul_cb;
+	void *private_data;
+};
+
+struct hostpcm_driver_info {
+	hostpcm_cb_fn hostpcm_evt_cb;
+	void *private_data;
+};
+
+struct incall_rec_info {
+	uint32_t rec_enable;
+	uint32_t rec_mode;
+	uint32_t recording;
+};
+
+struct incall_music_info {
+	uint32_t play_enable;
+	uint32_t playing;
+	int count;
+	int force;
+	uint16_t port_id;
+};
+
+struct share_memory_info {
+	u32			mem_handle;
+	struct share_mem_buf	sh_buf;
+	struct mem_map_table	memtbl;
+};
+
+#define VSS_ISOUNDFOCUS_CMD_SET_SECTORS     0x00013133
+#define VSS_ISOUNDFOCUS_CMD_GET_SECTORS     0x00013134
+#define VSS_ISOUNDFOCUS_RSP_GET_SECTORS     0x00013135
+#define VSS_ISOURCETRACK_CMD_GET_ACTIVITY   0x00013136
+
+struct vss_isoundfocus_cmd_set_sectors_t {
+	uint16_t start_angles[8];
+	uint8_t enables[8];
+	uint16_t gain_step;
+} __packed;
+
+/* Payload of the VSS_ISOUNDFOCUS_RSP_GET_SECTORS response */
+struct vss_isoundfocus_rsp_get_sectors_t {
+	uint16_t start_angles[8];
+	uint8_t enables[8];
+	uint16_t gain_step;
+} __packed;
+
+struct cvp_set_sound_focus_param_cmd_t {
+	struct apr_hdr hdr;
+	struct vss_isoundfocus_cmd_set_sectors_t cvp_set_sound_focus_param;
+} __packed;
+
+/* Payload structure for the VSS_ISOURCETRACK_CMD_GET_ACTIVITY command */
+struct vss_isourcetrack_cmd_get_activity_t {
+	uint32_t mem_handle;
+	uint32_t mem_address_lsw;
+	uint32_t mem_address_msw;
+	uint32_t mem_size;
+} __packed;
+
+struct cvp_get_source_tracking_param_cmd_t {
+	struct apr_hdr hdr;
+	struct vss_isourcetrack_cmd_get_activity_t
+			cvp_get_source_tracking_param;
+} __packed;
+
+/* Structure for the sound activity data */
+struct vss_isourcetrack_activity_data_t {
+	uint8_t voice_active[8];
+	uint16_t talker_doa;
+	uint16_t interferer_doa[3];
+	uint8_t sound_strength[360];
+} __packed;
+
+struct shared_mem_info {
+	uint32_t mem_handle;
+	struct mem_map_table sh_mem_block;
+	struct mem_map_table sh_mem_table;
+};
+
+struct voice_data {
+	int voc_state;/*INIT, CHANGE, RELEASE, RUN */
+
+	/* Shared mem to store decoder and encoder packets */
+	struct share_memory_info	shmem_info;
+
+	wait_queue_head_t mvm_wait;
+	wait_queue_head_t cvs_wait;
+	wait_queue_head_t cvp_wait;
+
+	/* Cache the values related to Rx and Tx devices */
+	struct device_data dev_rx;
+	struct device_data dev_tx;
+
+	/* Cache the values related to Rx and Tx streams */
+	struct stream_data stream_rx;
+	struct stream_data stream_tx;
+
+	u32 mvm_state;
+	u32 cvs_state;
+	u32 cvp_state;
+
+	u32 async_err;
+
+	/* Handle to MVM in the Q6 */
+	u16 mvm_handle;
+	/* Handle to CVS in the Q6 */
+	u16 cvs_handle;
+	/* Handle to CVP in the Q6 */
+	u16 cvp_handle;
+
+	struct mutex lock;
+
+	bool disable_topology;
+
+	uint16_t sidetone_gain;
+	uint8_t tty_mode;
+	/* slowtalk enable value */
+	uint32_t st_enable;
+	uint32_t hd_enable;
+	uint32_t dtmf_rx_detect_en;
+	/* Local Call Hold mode */
+	uint8_t lch_mode;
+
+	struct voice_dev_route_state voc_route_state;
+
+	u32 session_id;
+
+	struct incall_rec_info rec_info;
+
+	struct incall_music_info music_info;
+
+	struct voice_rec_route_state rec_route_state;
+
+	struct power_supply *psy;
+};
+
+struct cal_mem {
+	struct ion_handle *handle;
+	uint32_t phy;
+	void *buf;
+};
+
+#define MAX_VOC_SESSIONS 8
+
+struct common_data {
+	/* these default values are for all devices */
+	uint32_t default_mute_val;
+	uint32_t default_sample_val;
+	uint32_t default_vol_step_val;
+	uint32_t default_vol_ramp_duration_ms;
+	uint32_t default_mute_ramp_duration_ms;
+	bool ec_ref_ext;
+	struct media_format_info ec_media_fmt_info;
+
+	/* APR to MVM in the Q6 */
+	void *apr_q6_mvm;
+	/* APR to CVS in the Q6 */
+	void *apr_q6_cvs;
+	/* APR to CVP in the Q6 */
+	void *apr_q6_cvp;
+
+	struct cal_type_data *cal_data[MAX_VOICE_CAL_TYPES];
+
+	struct mem_map_table cal_mem_map_table;
+	uint32_t cal_mem_handle;
+
+	struct mem_map_table rtac_mem_map_table;
+	uint32_t rtac_mem_handle;
+
+	uint32_t voice_host_pcm_mem_handle;
+
+	struct cal_mem cvp_cal;
+	struct cal_mem cvs_cal;
+
+	struct mutex common_lock;
+
+	struct mvs_driver_info mvs_info;
+
+	struct dtmf_driver_info dtmf_info;
+
+	struct hostpcm_driver_info hostpcm_info;
+
+	struct voice_data voice[MAX_VOC_SESSIONS];
+
+	bool srvcc_rec_flag;
+	bool is_destroy_cvd;
+	bool is_vote_bms;
+	char cvd_version[CVD_VERSION_STRING_MAX_SIZE];
+	bool is_per_vocoder_cal_enabled;
+	bool is_sound_focus_resp_success;
+	bool is_source_tracking_resp_success;
+	struct vss_isoundfocus_rsp_get_sectors_t soundFocusResponse;
+	struct shared_mem_info source_tracking_sh_mem;
+	struct vss_isourcetrack_activity_data_t sourceTrackingResponse;
+	bool sidetone_enable;
+};
+
+struct voice_session_itr {
+	int cur_idx;
+	int session_idx;
+};
+
+void voc_register_mvs_cb(ul_cb_fn ul_cb,
+			dl_cb_fn dl_cb,
+			voip_ssr_cb ssr_cb,
+			void *private_data);
+
+void voc_register_dtmf_rx_detection_cb(dtmf_rx_det_cb_fn dtmf_rx_ul_cb,
+				       void *private_data);
+
+void voc_config_vocoder(uint32_t media_type,
+			uint32_t rate,
+			uint32_t network_type,
+			uint32_t dtx_mode,
+			uint32_t evrc_min_rate,
+			uint32_t evrc_max_rate);
+
+enum {
+	DEV_RX = 0,
+	DEV_TX,
+};
+
+enum {
+	RX_PATH = 0,
+	TX_PATH,
+	EC_REF_PATH,
+};
+
+#define VOC_PATH_PASSIVE 0
+#define VOC_PATH_FULL 1
+#define VOC_PATH_VOLTE_PASSIVE 2
+#define VOC_PATH_VOICE2_PASSIVE 3
+#define VOC_PATH_QCHAT_PASSIVE 4
+#define VOC_PATH_VOWLAN_PASSIVE 5
+#define VOC_PATH_VOICEMMODE1_PASSIVE 6
+#define VOC_PATH_VOICEMMODE2_PASSIVE 7
+
+#define MAX_SESSION_NAME_LEN 32
+#define VOICE_SESSION_NAME   "Voice session"
+#define VOIP_SESSION_NAME    "VoIP session"
+#define VOLTE_SESSION_NAME   "VoLTE session"
+#define VOICE2_SESSION_NAME  "Voice2 session"
+#define QCHAT_SESSION_NAME   "QCHAT session"
+#define VOWLAN_SESSION_NAME  "VoWLAN session"
+#define VOICEMMODE1_NAME     "VoiceMMode1"
+#define VOICEMMODE2_NAME     "VoiceMMode2"
+
+#define VOICE2_SESSION_VSID_STR      "10DC1000"
+#define QCHAT_SESSION_VSID_STR       "10803000"
+#define VOWLAN_SESSION_VSID_STR      "10002000"
+#define VOICEMMODE1_VSID_STR         "11C05000"
+#define VOICEMMODE2_VSID_STR         "11DC5000"
+#define VOICE_SESSION_VSID           0x10C01000
+#define VOICE2_SESSION_VSID          0x10DC1000
+#define VOLTE_SESSION_VSID           0x10C02000
+#define VOIP_SESSION_VSID            0x10004000
+#define QCHAT_SESSION_VSID           0x10803000
+#define VOWLAN_SESSION_VSID          0x10002000
+#define VOICEMMODE1_VSID             0x11C05000
+#define VOICEMMODE2_VSID             0x11DC5000
+#define ALL_SESSION_VSID             0xFFFFFFFF
+#define VSID_MAX                     ALL_SESSION_VSID
+
+/* called  by alsa driver */
+int voc_set_pp_enable(uint32_t session_id, uint32_t module_id,
+		      uint32_t enable);
+int voc_get_pp_enable(uint32_t session_id, uint32_t module_id);
+int voc_set_hd_enable(uint32_t session_id, uint32_t enable);
+uint8_t voc_get_tty_mode(uint32_t session_id);
+int voc_set_tty_mode(uint32_t session_id, uint8_t tty_mode);
+int voc_start_voice_call(uint32_t session_id);
+int voc_end_voice_call(uint32_t session_id);
+int voc_standby_voice_call(uint32_t session_id);
+int voc_resume_voice_call(uint32_t session_id);
+int voc_set_lch(uint32_t session_id, enum voice_lch_mode lch_mode);
+int voc_set_rx_vol_step(uint32_t session_id, uint32_t dir, uint32_t vol_step,
+			uint32_t ramp_duration);
+int voc_set_tx_mute(uint32_t session_id, uint32_t dir, uint32_t mute,
+		    uint32_t ramp_duration);
+int voc_set_device_mute(uint32_t session_id, uint32_t dir, uint32_t mute,
+			uint32_t ramp_duration);
+int voc_get_rx_device_mute(uint32_t session_id);
+int voc_set_route_flag(uint32_t session_id, uint8_t path_dir, uint8_t set);
+uint8_t voc_get_route_flag(uint32_t session_id, uint8_t path_dir);
+int voc_enable_dtmf_rx_detection(uint32_t session_id, uint32_t enable);
+void voc_disable_dtmf_det_on_active_sessions(void);
+int voc_alloc_cal_shared_memory(void);
+int voc_alloc_voip_shared_memory(void);
+int is_voc_initialized(void);
+int voc_register_vocproc_vol_table(void);
+int voc_deregister_vocproc_vol_table(void);
+int voc_send_cvp_map_vocpcm_memory(uint32_t session_id,
+				   struct mem_map_table *tp_mem_table,
+				   phys_addr_t paddr, uint32_t bufsize);
+int voc_send_cvp_unmap_vocpcm_memory(uint32_t session_id);
+int voc_send_cvp_start_vocpcm(uint32_t session_id,
+			      struct vss_ivpcm_tap_point *vpcm_tp,
+			      uint32_t no_of_tp);
+int voc_send_cvp_vocpcm_push_buf_evt(uint32_t session_id,
+			struct vss_ivpcm_evt_push_buffer_v2_t *push_buff_evt);
+int voc_send_cvp_stop_vocpcm(uint32_t session_id);
+void voc_register_hpcm_evt_cb(hostpcm_cb_fn hostpcm_cb,
+			      void *private_data);
+void voc_deregister_hpcm_evt_cb(void);
+
+int voc_map_rtac_block(struct rtac_cal_block_data *cal_block);
+int voc_unmap_rtac_block(uint32_t *mem_map_handle);
+
+uint32_t voc_get_session_id(char *name);
+
+int voc_start_playback(uint32_t set, uint16_t port_id);
+int voc_start_record(uint32_t port_id, uint32_t set, uint32_t session_id);
+int voice_get_idx_for_session(u32 session_id);
+int voc_set_ext_ec_ref_port_id(uint16_t port_id, bool state);
+int voc_get_ext_ec_ref_port_id(void);
+int voc_set_ext_ec_ref_media_fmt_info(struct media_format_info *finfo);
+int voc_update_amr_vocoder_rate(uint32_t session_id);
+int voc_disable_device(uint32_t session_id);
+int voc_enable_device(uint32_t session_id);
+void voc_set_destroy_cvd_flag(bool is_destroy_cvd);
+void voc_set_vote_bms_flag(bool is_vote_bms);
+int voc_disable_topology(uint32_t session_id, uint32_t disable);
+int voc_set_device_config(uint32_t session_id, uint8_t path_dir,
+			  struct media_format_info *finfo);
+uint32_t voice_get_topology(uint32_t topology_idx);
+int voc_set_sound_focus(struct sound_focus_param sound_focus_param);
+int voc_get_sound_focus(struct sound_focus_param *soundFocusData);
+int voc_get_source_tracking(struct source_tracking_param *sourceTrackingData);
+int voc_set_afe_sidetone(uint32_t session_id, bool sidetone_enable);
+bool voc_get_afe_sidetone(void);
+#endif
diff -Nruw linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/rtac.c linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/rtac.c
--- linux-4.4.115-fbx/sound/soc/msm./qdsp6v2/rtac.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-4.4.115-fbx/sound/soc/msm/qdsp6v2/rtac.c	2019-10-29 09:26:26.169227898 +0100
@@ -0,0 +1,1966 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/msm_audio_calibration.h>
+#include <linux/atomic.h>
+#include <linux/msm_audio_ion.h>
+#include <linux/qdsp6v2/rtac.h>
+#include <linux/compat.h>
+#include <sound/q6asm-v2.h>
+#include <sound/q6afe-v2.h>
+#include <sound/q6adm-v2.h>
+#include <sound/apr_audio-v2.h>
+#include "q6voice.h"
+#include "msm-pcm-routing-v2.h"
+#include <sound/adsp_err.h>
+
+
+/* Max size of payload (buf size - apr header) */
+#define MAX_PAYLOAD_SIZE		4076
+#define RTAC_MAX_ACTIVE_VOICE_COMBOS	2
+#define RTAC_MAX_ACTIVE_POPP		8
+#define RTAC_BUF_SIZE			163840
+
+#define TIMEOUT_MS	1000
+
+struct rtac_cal_block_data	rtac_cal[MAX_RTAC_BLOCKS] = {
+/* ADM_RTAC_CAL */
+	{{RTAC_BUF_SIZE, 0, 0, 0}, {0, 0, 0} },
+/* ASM_RTAC_CAL */
+	{{RTAC_BUF_SIZE, 0, 0, 0}, {0, 0, 0} },
+/* VOICE_RTAC_CAL */
+	{{RTAC_BUF_SIZE, 0, 0, 0}, {0, 0, 0} },
+/* AFE_RTAC_CAL */
+	{{RTAC_BUF_SIZE, 0, 0, 0}, {0, 0, 0} }
+};
+
+struct rtac_common_data {
+	atomic_t			usage_count;
+	atomic_t			apr_err_code;
+	struct mutex			rtac_fops_mutex;
+};
+
+static struct rtac_common_data		rtac_common;
+
+/* APR data */
+struct rtac_apr_data {
+	void			*apr_handle;
+	atomic_t		cmd_state;
+	wait_queue_head_t	cmd_wait;
+};
+
+static struct rtac_apr_data rtac_adm_apr_data;
+static struct rtac_apr_data rtac_asm_apr_data[ASM_ACTIVE_STREAMS_ALLOWED + 1];
+static struct rtac_apr_data	rtac_afe_apr_data;
+static struct rtac_apr_data	rtac_voice_apr_data[RTAC_VOICE_MODES];
+
+/* ADM info & APR */
+static struct rtac_adm		rtac_adm_data;
+static u32			*rtac_adm_buffer;
+
+
+/* ASM APR */
+static u32			*rtac_asm_buffer;
+
+static u32			*rtac_afe_buffer;
+
+/* Voice info & APR */
+struct rtac_voice_data_t {
+	uint32_t	tx_topology_id;
+	uint32_t	rx_topology_id;
+	uint32_t	tx_afe_topology;
+	uint32_t	rx_afe_topology;
+	uint32_t	tx_afe_port;
+	uint32_t	rx_afe_port;
+	uint16_t	cvs_handle;
+	uint16_t	cvp_handle;
+	uint32_t	tx_acdb_id;
+	uint32_t	rx_acdb_id;
+};
+
+struct rtac_voice {
+	uint32_t			num_of_voice_combos;
+	struct rtac_voice_data_t	voice[RTAC_MAX_ACTIVE_VOICE_COMBOS];
+};
+
+struct rtac_afe_user_data {
+	uint32_t	buf_size;
+	uint32_t	cmd_size;
+	uint32_t	port_id;
+	union {
+		struct rtac_afe_set {
+			struct afe_port_cmd_set_param_v2 cmd;
+			struct afe_port_param_data_v2    data;
+		} rtac_afe_set;
+		struct rtac_afe_get {
+			struct afe_port_cmd_get_param_v2 cmd;
+			struct afe_port_param_data_v2    data;
+		} rtac_afe_get;
+	};
+}  __packed;
+
+static struct rtac_voice	rtac_voice_data;
+static u32			*rtac_voice_buffer;
+static u32			voice_session_id[RTAC_MAX_ACTIVE_VOICE_COMBOS];
+
+
+struct mutex			rtac_adm_mutex;
+struct mutex			rtac_adm_apr_mutex;
+struct mutex			rtac_asm_apr_mutex;
+struct mutex			rtac_voice_mutex;
+struct mutex			rtac_voice_apr_mutex;
+struct mutex			rtac_afe_apr_mutex;
+
+int rtac_clear_mapping(uint32_t cal_type)
+{
+	int result = 0;
+	pr_debug("%s\n", __func__);
+
+	if (cal_type >= MAX_RTAC_BLOCKS) {
+		pr_debug("%s: invalid cal type %d\n", __func__, cal_type);
+		result = -EINVAL;
+		goto done;
+	}
+
+	rtac_cal[cal_type].map_data.map_handle = 0;
+done:
+	return result;
+}
+
+int rtac_allocate_cal_buffer(uint32_t cal_type)
+{
+	int result = 0;
+	size_t len;
+	pr_debug("%s\n", __func__);
+
+	if (cal_type >= MAX_RTAC_BLOCKS) {
+		pr_err("%s: cal_type %d is invalid!\n",
+		       __func__, cal_type);
+		result =  -EINVAL;
+		goto done;
+	}
+
+	if (rtac_cal[cal_type].cal_data.paddr != 0) {
+		pr_err("%s: memory already allocated! cal_type %d, paddr 0x%pK\n",
+		       __func__, cal_type, &rtac_cal[cal_type].cal_data.paddr);
+		result = -EPERM;
+		goto done;
+	}
+
+	result = msm_audio_ion_alloc("rtac_client",
+		&rtac_cal[cal_type].map_data.ion_client,
+		&rtac_cal[cal_type].map_data.ion_handle,
+		rtac_cal[cal_type].map_data.map_size,
+		&rtac_cal[cal_type].cal_data.paddr,
+		&len,
+		&rtac_cal[cal_type].cal_data.kvaddr);
+	if (result < 0) {
+		pr_err("%s: ION create client for RTAC failed\n",
+		       __func__);
+		goto done;
+	}
+
+	pr_debug("%s: cal_type %d, paddr 0x%pK, kvaddr 0x%pK, map_size 0x%x\n",
+		__func__, cal_type,
+		&rtac_cal[cal_type].cal_data.paddr,
+		rtac_cal[cal_type].cal_data.kvaddr,
+		rtac_cal[cal_type].map_data.map_size);
+done:
+	return result;
+}
+
+int rtac_free_cal_buffer(uint32_t cal_type)
+{
+	int result = 0;
+	pr_debug("%s\n", __func__);
+
+	if (cal_type >= MAX_RTAC_BLOCKS) {
+		pr_err("%s: cal_type %d is invalid!\n",
+		       __func__, cal_type);
+		result =  -EINVAL;
+		goto done;
+	}
+
+	if (rtac_cal[cal_type].map_data.ion_client == NULL) {
+		pr_debug("%s: cal_type %d not allocated!\n",
+		       __func__, cal_type);
+		goto done;
+	}
+
+	result = msm_audio_ion_free(rtac_cal[cal_type].map_data.ion_client,
+				rtac_cal[cal_type].map_data.ion_handle);
+	if (result < 0) {
+		pr_err("%s: ION free for RTAC failed! cal_type %d, paddr 0x%pK\n",
+		       __func__, cal_type, &rtac_cal[cal_type].cal_data.paddr);
+		goto done;
+	}
+
+	rtac_cal[cal_type].map_data.map_handle = 0;
+	rtac_cal[cal_type].map_data.ion_client = NULL;
+	rtac_cal[cal_type].map_data.ion_handle = NULL;
+	rtac_cal[cal_type].cal_data.size = 0;
+	rtac_cal[cal_type].cal_data.kvaddr = 0;
+	rtac_cal[cal_type].cal_data.paddr = 0;
+done:
+	return result;
+}
+
+int rtac_map_cal_buffer(uint32_t cal_type)
+{
+	int result = 0;
+	pr_debug("%s\n", __func__);
+
+	if (cal_type >= MAX_RTAC_BLOCKS) {
+		pr_err("%s: cal_type %d is invalid!\n",
+		       __func__, cal_type);
+		result =  -EINVAL;
+		goto done;
+	}
+
+	if (rtac_cal[cal_type].map_data.map_handle != 0) {
+		pr_err("%s: already mapped cal_type %d\n",
+			__func__, cal_type);
+		result =  -EPERM;
+		goto done;
+	}
+
+	if (rtac_cal[cal_type].cal_data.paddr == 0) {
+		pr_err("%s: physical address is NULL cal_type %d\n",
+			__func__, cal_type);
+		result =  -EPERM;
+		goto done;
+	}
+
+	switch (cal_type) {
+	case ADM_RTAC_CAL:
+		result = adm_map_rtac_block(&rtac_cal[cal_type]);
+		break;
+	case ASM_RTAC_CAL:
+		result = q6asm_map_rtac_block(&rtac_cal[cal_type]);
+		break;
+	case VOICE_RTAC_CAL:
+		result = voc_map_rtac_block(&rtac_cal[cal_type]);
+		break;
+	case AFE_RTAC_CAL:
+		result = afe_map_rtac_block(&rtac_cal[cal_type]);
+		break;
+	}
+	if (result < 0) {
+		pr_err("%s: map RTAC failed! cal_type %d\n",
+		       __func__, cal_type);
+		goto done;
+	}
+done:
+	return result;
+}
+
+int rtac_unmap_cal_buffer(uint32_t cal_type)
+{
+	int result = 0;
+	pr_debug("%s\n", __func__);
+
+	if (cal_type >= MAX_RTAC_BLOCKS) {
+		pr_err("%s: cal_type %d is invalid!\n",
+		       __func__, cal_type);
+		result =  -EINVAL;
+		goto done;
+	}
+
+	if (rtac_cal[cal_type].map_data.map_handle == 0) {
+		pr_debug("%s: nothing to unmap cal_type %d\n",
+			__func__, cal_type);
+		goto done;
+	}
+
+	switch (cal_type) {
+	case ADM_RTAC_CAL:
+		result = adm_unmap_rtac_block(
+			&rtac_cal[cal_type].map_data.map_handle);
+		break;
+	case ASM_RTAC_CAL:
+		result = q6asm_unmap_rtac_block(
+			&rtac_cal[cal_type].map_data.map_handle);
+		break;
+	case VOICE_RTAC_CAL:
+		result = voc_unmap_rtac_block(
+			&rtac_cal[cal_type].map_data.map_handle);
+		break;
+	case AFE_RTAC_CAL:
+		result = afe_unmap_rtac_block(
+			&rtac_cal[cal_type].map_data.map_handle);
+		break;
+	}
+	if (result < 0) {
+		pr_err("%s: unmap RTAC failed! cal_type %d\n",
+		       __func__, cal_type);
+		goto done;
+	}
+done:
+	return result;
+}
+
+static int rtac_open(struct inode *inode, struct file *f)
+{
+	int	result = 0;
+	pr_debug("%s\n", __func__);
+
+	mutex_lock(&rtac_common.rtac_fops_mutex);
+	atomic_inc(&rtac_common.usage_count);
+	mutex_unlock(&rtac_common.rtac_fops_mutex);
+	return result;
+}
+
+static int rtac_release(struct inode *inode, struct file *f)
+{
+	int	result = 0;
+	int	result2 = 0;
+	int	i;
+	pr_debug("%s\n", __func__);
+
+	mutex_lock(&rtac_common.rtac_fops_mutex);
+	atomic_dec(&rtac_common.usage_count);
+	pr_debug("%s: ref count %d!\n", __func__,
+		atomic_read(&rtac_common.usage_count));
+
+	if (atomic_read(&rtac_common.usage_count) > 0) {
+		mutex_unlock(&rtac_common.rtac_fops_mutex);
+		goto done;
+	}
+
+	for (i = 0; i < MAX_RTAC_BLOCKS; i++) {
+		result2 = rtac_unmap_cal_buffer(i);
+		if (result2 < 0) {
+			pr_err("%s: unmap buffer failed! error %d!\n",
+				__func__, result2);
+			result = result2;
+		}
+
+		result2 = rtac_free_cal_buffer(i);
+		if (result2 < 0) {
+			pr_err("%s: free buffer failed! error %d!\n",
+				__func__, result2);
+			result = result2;
+		}
+	}
+	mutex_unlock(&rtac_common.rtac_fops_mutex);
+done:
+	return result;
+}
+
+
+/* ADM Info */
+void add_popp(u32 dev_idx, u32 port_id, u32 popp_id)
+{
+	u32 i = 0;
+
+	for (; i < rtac_adm_data.device[dev_idx].num_of_popp; i++)
+		if (rtac_adm_data.device[dev_idx].popp[i].popp == popp_id)
+			goto done;
+
+	if (rtac_adm_data.device[dev_idx].num_of_popp ==
+			RTAC_MAX_ACTIVE_POPP) {
+		pr_err("%s, Max POPP!\n", __func__);
+		goto done;
+	}
+	rtac_adm_data.device[dev_idx].popp[
+		rtac_adm_data.device[dev_idx].num_of_popp].popp = popp_id;
+	rtac_adm_data.device[dev_idx].popp[
+		rtac_adm_data.device[dev_idx].num_of_popp].popp_topology =
+		q6asm_get_asm_topology(popp_id);
+	rtac_adm_data.device[dev_idx].popp[
+		rtac_adm_data.device[dev_idx].num_of_popp++].app_type =
+		q6asm_get_asm_app_type(popp_id);
+
+	pr_debug("%s: popp_id = %d, popp topology = 0x%x, popp app type = 0x%x\n",
+		__func__,
+		rtac_adm_data.device[dev_idx].popp[
+			rtac_adm_data.device[dev_idx].num_of_popp - 1].popp,
+		rtac_adm_data.device[dev_idx].popp[
+		rtac_adm_data.device[dev_idx].num_of_popp - 1].popp_topology,
+		rtac_adm_data.device[dev_idx].popp[
+		rtac_adm_data.device[dev_idx].num_of_popp - 1].app_type);
+done:
+	return;
+}
+
+void rtac_update_afe_topology(u32 port_id)
+{
+	u32 i = 0;
+
+	mutex_lock(&rtac_adm_mutex);
+	for (i = 0; i < rtac_adm_data.num_of_dev; i++) {
+		if (rtac_adm_data.device[i].afe_port == port_id) {
+			rtac_adm_data.device[i].afe_topology =
+						afe_get_topology(port_id);
+			pr_debug("%s: port_id = 0x%x topology_id = 0x%x copp_id = %d\n",
+				 __func__, port_id,
+				 rtac_adm_data.device[i].afe_topology,
+				 rtac_adm_data.device[i].copp);
+		}
+	}
+	mutex_unlock(&rtac_adm_mutex);
+}
+
+void rtac_add_adm_device(u32 port_id, u32 copp_id, u32 path_id, u32 popp_id,
+			 u32 app_type, u32 acdb_id)
+{
+	u32 i = 0;
+	pr_debug("%s: num rtac devices %d port_id = %d, copp_id = %d\n",
+		__func__, rtac_adm_data.num_of_dev, port_id, copp_id);
+
+	mutex_lock(&rtac_adm_mutex);
+	if (rtac_adm_data.num_of_dev == RTAC_MAX_ACTIVE_DEVICES) {
+		pr_err("%s, Can't add anymore RTAC devices!\n", __func__);
+		goto done;
+	}
+
+	/* Check if device already added */
+	if (rtac_adm_data.num_of_dev != 0) {
+		for (; i < rtac_adm_data.num_of_dev; i++) {
+			if (rtac_adm_data.device[i].afe_port == port_id &&
+			    rtac_adm_data.device[i].copp == copp_id) {
+				add_popp(i, port_id, popp_id);
+				goto done;
+			}
+			if (rtac_adm_data.device[i].num_of_popp ==
+						RTAC_MAX_ACTIVE_POPP) {
+				pr_err("%s, Max POPP!\n", __func__);
+				goto done;
+			}
+		}
+	}
+
+	/* Add device */
+	rtac_adm_data.num_of_dev++;
+
+	rtac_adm_data.device[i].topology_id =
+		adm_get_topology_for_port_from_copp_id(port_id, copp_id);
+	rtac_adm_data.device[i].afe_topology =
+		afe_get_topology(port_id);
+	rtac_adm_data.device[i].afe_port = port_id;
+	rtac_adm_data.device[i].copp = copp_id;
+	rtac_adm_data.device[i].app_type = app_type;
+	rtac_adm_data.device[i].acdb_dev_id = acdb_id;
+	rtac_adm_data.device[i].popp[
+		rtac_adm_data.device[i].num_of_popp].popp = popp_id;
+	rtac_adm_data.device[i].popp[
+		rtac_adm_data.device[i].num_of_popp].popp_topology =
+		q6asm_get_asm_topology(popp_id);
+	rtac_adm_data.device[i].popp[
+		rtac_adm_data.device[i].num_of_popp++].app_type =
+		q6asm_get_asm_app_type(popp_id);
+
+	pr_debug("%s: topology = 0x%x, afe_topology = 0x%x, port_id = %d, copp_id = %d, app id = 0x%x, acdb id = %d, popp_id = %d, popp topology = 0x%x, popp app type = 0x%x\n",
+		__func__,
+		rtac_adm_data.device[i].topology_id,
+		rtac_adm_data.device[i].afe_topology,
+		rtac_adm_data.device[i].afe_port,
+		rtac_adm_data.device[i].copp,
+		rtac_adm_data.device[i].app_type,
+		rtac_adm_data.device[i].acdb_dev_id,
+		rtac_adm_data.device[i].popp[
+			rtac_adm_data.device[i].num_of_popp - 1].popp,
+		rtac_adm_data.device[i].popp[
+		rtac_adm_data.device[i].num_of_popp - 1].popp_topology,
+		rtac_adm_data.device[i].popp[
+		rtac_adm_data.device[i].num_of_popp - 1].app_type);
+done:
+	mutex_unlock(&rtac_adm_mutex);
+	return;
+}
+
+static void shift_adm_devices(u32 dev_idx)
+{
+	for (; dev_idx < rtac_adm_data.num_of_dev; dev_idx++) {
+		memcpy(&rtac_adm_data.device[dev_idx],
+			&rtac_adm_data.device[dev_idx + 1],
+			sizeof(rtac_adm_data.device[dev_idx]));
+		memset(&rtac_adm_data.device[dev_idx + 1], 0,
+			   sizeof(rtac_adm_data.device[dev_idx]));
+	}
+}
+
+static void shift_popp(u32 copp_idx, u32 popp_idx)
+{
+	for (; popp_idx < rtac_adm_data.device[copp_idx].num_of_popp;
+							popp_idx++) {
+		memcpy(&rtac_adm_data.device[copp_idx].popp[popp_idx].popp,
+			&rtac_adm_data.device[copp_idx].popp[popp_idx + 1].
+			popp, sizeof(uint32_t));
+		memcpy(&rtac_adm_data.device[copp_idx].popp[popp_idx].
+			popp_topology,
+			&rtac_adm_data.device[copp_idx].popp[popp_idx + 1].
+			popp_topology,
+			sizeof(uint32_t));
+		memset(&rtac_adm_data.device[copp_idx].popp[popp_idx + 1].
+			popp, 0, sizeof(uint32_t));
+		memset(&rtac_adm_data.device[copp_idx].popp[popp_idx + 1].
+			popp_topology, 0, sizeof(uint32_t));
+	}
+}
+
+void rtac_remove_adm_device(u32 port_id, u32 copp_id)
+{
+	s32 i;
+	pr_debug("%s: num rtac devices %d port_id = %d, copp_id = %d\n",
+		__func__, rtac_adm_data.num_of_dev, port_id, copp_id);
+
+	mutex_lock(&rtac_adm_mutex);
+	/* look for device */
+	for (i = 0; i < rtac_adm_data.num_of_dev; i++) {
+		if (rtac_adm_data.device[i].afe_port == port_id &&
+		    rtac_adm_data.device[i].copp == copp_id) {
+			memset(&rtac_adm_data.device[i], 0,
+				   sizeof(rtac_adm_data.device[i]));
+			rtac_adm_data.num_of_dev--;
+
+			if (rtac_adm_data.num_of_dev >= 1) {
+				shift_adm_devices(i);
+				break;
+			}
+		}
+	}
+
+	mutex_unlock(&rtac_adm_mutex);
+	return;
+}
+
+void rtac_remove_popp_from_adm_devices(u32 popp_id)
+{
+	s32 i, j;
+	pr_debug("%s: popp_id = %d\n", __func__, popp_id);
+
+	mutex_lock(&rtac_adm_mutex);
+	for (i = 0; i < rtac_adm_data.num_of_dev; i++) {
+		for (j = 0; j < rtac_adm_data.device[i].num_of_popp; j++) {
+			if (rtac_adm_data.device[i].popp[j].popp ==
+								popp_id) {
+				rtac_adm_data.device[i].popp[j].popp = 0;
+				rtac_adm_data.device[i].popp[j].
+					popp_topology = 0;
+				rtac_adm_data.device[i].num_of_popp--;
+				shift_popp(i, j);
+			}
+		}
+	}
+	mutex_unlock(&rtac_adm_mutex);
+}
+
+
+/* Voice Info */
+static void set_rtac_voice_data(int idx, u32 cvs_handle, u32 cvp_handle,
+					u32 rx_afe_port, u32 tx_afe_port,
+					u32 rx_acdb_id, u32 tx_acdb_id,
+					u32 session_id)
+{
+	rtac_voice_data.voice[idx].tx_topology_id =
+		voice_get_topology(CVP_VOC_TX_TOPOLOGY_CAL);
+	rtac_voice_data.voice[idx].rx_topology_id =
+		voice_get_topology(CVP_VOC_RX_TOPOLOGY_CAL);
+	rtac_voice_data.voice[idx].tx_afe_topology =
+		afe_get_topology(tx_afe_port);
+	rtac_voice_data.voice[idx].rx_afe_topology =
+		afe_get_topology(rx_afe_port);
+	rtac_voice_data.voice[idx].tx_afe_port = tx_afe_port;
+	rtac_voice_data.voice[idx].rx_afe_port = rx_afe_port;
+	rtac_voice_data.voice[idx].tx_acdb_id = tx_acdb_id;
+	rtac_voice_data.voice[idx].rx_acdb_id = rx_acdb_id;
+	rtac_voice_data.voice[idx].cvs_handle = cvs_handle;
+	rtac_voice_data.voice[idx].cvp_handle = cvp_handle;
+	pr_debug("%s\n%s: %x\n%s: %d %s: %d\n%s: %d %s: %d\n %s: %d\n %s: %d\n%s: %d %s: %d\n%s",
+		 "<---- Voice Data Info ---->", "Session id", session_id,
+		 "cvs_handle", cvs_handle, "cvp_handle", cvp_handle,
+		 "rx_afe_topology", rtac_voice_data.voice[idx].rx_afe_topology,
+		 "tx_afe_topology", rtac_voice_data.voice[idx].tx_afe_topology,
+		 "rx_afe_port", rx_afe_port, "tx_afe_port", tx_afe_port,
+		 "rx_acdb_id", rx_acdb_id, "tx_acdb_id", tx_acdb_id,
+		 "<-----------End----------->");
+
+	/* Store session ID for voice RTAC */
+	voice_session_id[idx] = session_id;
+}
+
+void rtac_add_voice(u32 cvs_handle, u32 cvp_handle, u32 rx_afe_port,
+			u32 tx_afe_port, u32 rx_acdb_id, u32 tx_acdb_id,
+			u32 session_id)
+{
+	u32 i = 0;
+	pr_debug("%s\n", __func__);
+	mutex_lock(&rtac_voice_mutex);
+
+	if (rtac_voice_data.num_of_voice_combos ==
+			RTAC_MAX_ACTIVE_VOICE_COMBOS) {
+		pr_err("%s, Can't add anymore RTAC devices!\n", __func__);
+		goto done;
+	}
+
+	/* Check if device already added */
+	if (rtac_voice_data.num_of_voice_combos != 0) {
+		for (; i < rtac_voice_data.num_of_voice_combos; i++) {
+			if (rtac_voice_data.voice[i].cvs_handle ==
+							cvs_handle) {
+				set_rtac_voice_data(i, cvs_handle, cvp_handle,
+					rx_afe_port, tx_afe_port, rx_acdb_id,
+					tx_acdb_id, session_id);
+				goto done;
+			}
+		}
+	}
+
+	/* Add device */
+	rtac_voice_data.num_of_voice_combos++;
+	set_rtac_voice_data(i, cvs_handle, cvp_handle,
+				rx_afe_port, tx_afe_port,
+				rx_acdb_id, tx_acdb_id,
+				session_id);
+done:
+	mutex_unlock(&rtac_voice_mutex);
+	return;
+}
+
+static void shift_voice_devices(u32 idx)
+{
+	for (; idx < rtac_voice_data.num_of_voice_combos - 1; idx++) {
+		memcpy(&rtac_voice_data.voice[idx],
+			&rtac_voice_data.voice[idx + 1],
+			sizeof(rtac_voice_data.voice[idx]));
+		voice_session_id[idx] = voice_session_id[idx + 1];
+	}
+}
+
+void rtac_remove_voice(u32 cvs_handle)
+{
+	u32 i = 0;
+	pr_debug("%s\n", __func__);
+
+	mutex_lock(&rtac_voice_mutex);
+	/* look for device */
+	for (i = 0; i < rtac_voice_data.num_of_voice_combos; i++) {
+		if (rtac_voice_data.voice[i].cvs_handle == cvs_handle) {
+			shift_voice_devices(i);
+			rtac_voice_data.num_of_voice_combos--;
+			memset(&rtac_voice_data.voice[
+				rtac_voice_data.num_of_voice_combos], 0,
+				sizeof(rtac_voice_data.voice
+				[rtac_voice_data.num_of_voice_combos]));
+			voice_session_id[rtac_voice_data.num_of_voice_combos]
+				= 0;
+			break;
+		}
+	}
+	mutex_unlock(&rtac_voice_mutex);
+	return;
+}
+
+static u32 get_voice_session_id_cvs(u32 cvs_handle)
+{
+	u32 i;
+
+	for (i = 0; i < rtac_voice_data.num_of_voice_combos; i++) {
+		if (rtac_voice_data.voice[i].cvs_handle == cvs_handle)
+			return voice_session_id[i];
+	}
+
+	pr_err("%s: No voice index for CVS handle %d found returning 0\n",
+	       __func__, cvs_handle);
+	return 0;
+}
+
+static u32 get_voice_session_id_cvp(u32 cvp_handle)
+{
+	u32 i;
+
+	for (i = 0; i < rtac_voice_data.num_of_voice_combos; i++) {
+		if (rtac_voice_data.voice[i].cvp_handle == cvp_handle)
+			return voice_session_id[i];
+	}
+
+	pr_err("%s: No voice index for CVP handle %d found returning 0\n",
+	       __func__, cvp_handle);
+	return 0;
+}
+
+static int get_voice_index(u32 mode, u32 handle)
+{
+	if (mode == RTAC_CVP)
+		return voice_get_idx_for_session(
+			get_voice_session_id_cvp(handle));
+	if (mode == RTAC_CVS)
+		return voice_get_idx_for_session(
+			get_voice_session_id_cvs(handle));
+
+	pr_err("%s: Invalid mode %d, returning 0\n",
+	       __func__, mode);
+	return 0;
+}
+
+
+/* ADM APR */
+void rtac_set_adm_handle(void *handle)
+{
+	pr_debug("%s: handle = %pK\n", __func__, handle);
+
+	mutex_lock(&rtac_adm_apr_mutex);
+	rtac_adm_apr_data.apr_handle = handle;
+	mutex_unlock(&rtac_adm_apr_mutex);
+}
+
+bool rtac_make_adm_callback(uint32_t *payload, u32 payload_size)
+{
+	pr_debug("%s:cmd_state = %d\n", __func__,
+			atomic_read(&rtac_adm_apr_data.cmd_state));
+	if (atomic_read(&rtac_adm_apr_data.cmd_state) != 1)
+		return false;
+
+	pr_debug("%s\n", __func__);
+	if (payload_size == sizeof(uint32_t))
+		atomic_set(&rtac_common.apr_err_code, payload[0]);
+	else if (payload_size == (2*sizeof(uint32_t)))
+		atomic_set(&rtac_common.apr_err_code, payload[1]);
+
+	atomic_set(&rtac_adm_apr_data.cmd_state, 0);
+	wake_up(&rtac_adm_apr_data.cmd_wait);
+	return true;
+}
+
+int send_adm_apr(void *buf, u32 opcode)
+{
+	s32	result;
+	u32	user_buf_size = 0;
+	u32	bytes_returned = 0;
+	u32	copp_id;
+	u32	payload_size;
+	u32	data_size = 0;
+	int	copp_idx;
+	int	port_idx;
+	struct apr_hdr	adm_params;
+	pr_debug("%s\n", __func__);
+
+	if (rtac_cal[ADM_RTAC_CAL].map_data.ion_handle == NULL) {
+		result = rtac_allocate_cal_buffer(ADM_RTAC_CAL);
+		if (result < 0) {
+			pr_err("%s: allocate buffer failed!",
+				__func__);
+			goto done;
+		}
+	}
+
+	if (rtac_cal[ADM_RTAC_CAL].map_data.map_handle == 0) {
+		result = rtac_map_cal_buffer(ADM_RTAC_CAL);
+		if (result < 0) {
+			pr_err("%s: map buffer failed!",
+				__func__);
+			goto done;
+		}
+	}
+
+	if (copy_from_user(&user_buf_size, (void *)buf,
+						sizeof(user_buf_size))) {
+		pr_err("%s: Copy from user failed! buf = 0x%pK\n",
+		       __func__, buf);
+		goto done;
+	}
+	if (user_buf_size <= 0) {
+		pr_err("%s: Invalid buffer size = %d\n",
+			__func__, user_buf_size);
+		goto done;
+	}
+
+	if (copy_from_user(&payload_size, buf + sizeof(u32), sizeof(u32))) {
+		pr_err("%s: Could not copy payload size from user buffer\n",
+			__func__);
+		goto done;
+	}
+
+	if (copy_from_user(&copp_id, buf + 2 * sizeof(u32), sizeof(u32))) {
+		pr_err("%s: Could not copy port id from user buffer\n",
+			__func__);
+		goto done;
+	}
+
+	if (adm_get_indexes_from_copp_id(copp_id, &copp_idx, &port_idx) != 0) {
+		pr_err("%s: Copp Id-%d is not active\n", __func__, copp_id);
+		goto done;
+	}
+
+	mutex_lock(&rtac_adm_apr_mutex);
+	if (rtac_adm_apr_data.apr_handle == NULL) {
+		pr_err("%s: APR not initialized\n", __func__);
+		result = -EINVAL;
+		goto err;
+	}
+
+	if (opcode == ADM_CMD_SET_PP_PARAMS_V5) {
+		/* set payload size to in-band payload */
+		/* set data size to actual out of band payload size */
+		data_size = payload_size - 4 * sizeof(u32);
+		if (data_size > rtac_cal[ADM_RTAC_CAL].map_data.map_size) {
+			pr_err("%s: Invalid data size = %d\n",
+				__func__, data_size);
+			result = -EINVAL;
+			goto err;
+		}
+		payload_size = 4 * sizeof(u32);
+
+		/* Copy buffer to out-of-band payload */
+		if (copy_from_user((void *)
+				rtac_cal[ADM_RTAC_CAL].cal_data.kvaddr,
+				buf + 7 * sizeof(u32), data_size)) {
+			pr_err("%s: Could not copy payload from user buffer\n",
+				__func__);
+			result = -EINVAL;
+			goto err;
+		}
+		/* set payload size in packet */
+		rtac_adm_buffer[8] = data_size;
+	} else {
+		if (payload_size > MAX_PAYLOAD_SIZE) {
+			pr_err("%s: Invalid payload size = %d\n",
+				__func__, payload_size);
+			result = -EINVAL;
+			goto err;
+		}
+
+		/* Copy buffer to in-band payload */
+		if (copy_from_user(rtac_adm_buffer +
+				sizeof(adm_params)/sizeof(u32),
+				buf + 3 * sizeof(u32), payload_size)) {
+			pr_err("%s: Could not copy payload from user buffer\n",
+				__func__);
+			result = -EINVAL;
+			goto err;
+		}
+	}
+
+	/* Pack header */
+	adm_params.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+		APR_HDR_LEN(20), APR_PKT_VER);
+	adm_params.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+		payload_size);
+	adm_params.src_svc = APR_SVC_ADM;
+	adm_params.src_domain = APR_DOMAIN_APPS;
+	adm_params.src_port = copp_id;
+	adm_params.dest_svc = APR_SVC_ADM;
+	adm_params.dest_domain = APR_DOMAIN_ADSP;
+	adm_params.dest_port = copp_id;
+	adm_params.token = port_idx << 16 | copp_idx;
+	adm_params.opcode = opcode;
+
+	/* fill for out-of-band */
+	rtac_adm_buffer[5] =
+		lower_32_bits(rtac_cal[ADM_RTAC_CAL].cal_data.paddr);
+	rtac_adm_buffer[6] =
+		msm_audio_populate_upper_32_bits(
+				rtac_cal[ADM_RTAC_CAL].cal_data.paddr);
+	rtac_adm_buffer[7] = rtac_cal[ADM_RTAC_CAL].map_data.map_handle;
+
+	memcpy(rtac_adm_buffer, &adm_params, sizeof(adm_params));
+	atomic_set(&rtac_adm_apr_data.cmd_state, 1);
+
+	pr_debug("%s: Sending RTAC command ioctl 0x%x, paddr 0x%pK\n",
+		__func__, opcode,
+		&rtac_cal[ADM_RTAC_CAL].cal_data.paddr);
+
+	result = apr_send_pkt(rtac_adm_apr_data.apr_handle,
+					(uint32_t *)rtac_adm_buffer);
+	if (result < 0) {
+		pr_err("%s: Set params failed copp = %d\n", __func__, copp_id);
+		goto err;
+	}
+	/* Wait for the callback */
+	result = wait_event_timeout(rtac_adm_apr_data.cmd_wait,
+		(atomic_read(&rtac_adm_apr_data.cmd_state) == 0),
+		msecs_to_jiffies(TIMEOUT_MS));
+	if (!result) {
+		pr_err("%s: Set params timed out copp = %d\n", __func__,
+			copp_id);
+		goto err;
+	}
+	if (atomic_read(&rtac_common.apr_err_code)) {
+		pr_err("%s: DSP returned error code = [%s], opcode = 0x%x\n",
+			__func__, adsp_err_get_err_str(atomic_read(
+			&rtac_common.apr_err_code)),
+			opcode);
+		result = adsp_err_get_lnx_err_code(
+					atomic_read(
+					&rtac_common.apr_err_code));
+		goto err;
+	}
+
+	if (opcode == ADM_CMD_GET_PP_PARAMS_V5) {
+		bytes_returned = ((u32 *)rtac_cal[ADM_RTAC_CAL].cal_data.
+			kvaddr)[2] + 3 * sizeof(u32);
+
+		if (bytes_returned > rtac_cal[ADM_RTAC_CAL].
+			map_data.map_size) {
+			pr_err("%s: Invalid data size = %d\n",
+				__func__, bytes_returned);
+			result = -EINVAL;
+			goto err;
+		}
+
+		if (bytes_returned > user_buf_size) {
+			pr_err("%s: User buf not big enough, size = 0x%x, returned size = 0x%x\n",
+				__func__, user_buf_size, bytes_returned);
+			result = -EINVAL;
+			goto err;
+		}
+
+		if (copy_to_user(buf, (void *)
+				rtac_cal[ADM_RTAC_CAL].cal_data.kvaddr,
+				bytes_returned)) {
+			pr_err("%s: Could not copy buffer to user,size = %d\n",
+				__func__, bytes_returned);
+			result = -EINVAL;
+			goto err;
+		}
+	} else {
+		bytes_returned = data_size;
+	}
+	mutex_unlock(&rtac_adm_apr_mutex);
+done:
+	return bytes_returned;
+err:
+	mutex_unlock(&rtac_adm_apr_mutex);
+	return result;
+}
+
+
+/* ASM APR */
+void rtac_set_asm_handle(u32 session_id, void *handle)
+{
+	pr_debug("%s\n", __func__);
+
+	mutex_lock(&rtac_asm_apr_mutex);
+	rtac_asm_apr_data[session_id].apr_handle = handle;
+	mutex_unlock(&rtac_asm_apr_mutex);
+}
+
+bool rtac_make_asm_callback(u32 session_id, uint32_t *payload,
+	u32 payload_size)
+{
+	if (atomic_read(&rtac_asm_apr_data[session_id].cmd_state) != 1)
+		return false;
+
+	pr_debug("%s\n", __func__);
+	if (payload_size == sizeof(uint32_t))
+		atomic_set(&rtac_common.apr_err_code, payload[0]);
+	else if (payload_size == (2*sizeof(uint32_t)))
+		atomic_set(&rtac_common.apr_err_code, payload[1]);
+
+	atomic_set(&rtac_asm_apr_data[session_id].cmd_state, 0);
+	wake_up(&rtac_asm_apr_data[session_id].cmd_wait);
+	return true;
+}
+
+int send_rtac_asm_apr(void *buf, u32 opcode)
+{
+	s32	result;
+	u32	user_buf_size = 0;
+	u32	bytes_returned = 0;
+	u32	session_id = 0;
+	u32	payload_size;
+	u32	data_size = 0;
+	struct apr_hdr		asm_params;
+	pr_debug("%s\n", __func__);
+
+	if (rtac_cal[ASM_RTAC_CAL].map_data.ion_handle == NULL) {
+		result = rtac_allocate_cal_buffer(ASM_RTAC_CAL);
+		if (result < 0) {
+			pr_err("%s: allocate buffer failed!",
+				__func__);
+			goto done;
+		}
+	}
+
+	if (rtac_cal[ASM_RTAC_CAL].map_data.map_handle == 0) {
+		result = rtac_map_cal_buffer(ASM_RTAC_CAL);
+		if (result < 0) {
+			pr_err("%s: map buffer failed!",
+				__func__);
+			goto done;
+		}
+	}
+
+	if (copy_from_user(&user_buf_size, (void *)buf,
+						sizeof(user_buf_size))) {
+		pr_err("%s: Copy from user failed! buf = 0x%pK\n",
+		       __func__, buf);
+		goto done;
+	}
+	if (user_buf_size <= 0) {
+		pr_err("%s: Invalid buffer size = %d\n",
+			__func__, user_buf_size);
+		goto done;
+	}
+
+	if (copy_from_user(&payload_size, buf + sizeof(u32), sizeof(u32))) {
+		pr_err("%s: Could not copy payload size from user buffer\n",
+			__func__);
+		goto done;
+	}
+
+	if (copy_from_user(&session_id, buf + 2 * sizeof(u32), sizeof(u32))) {
+		pr_err("%s: Could not copy session id from user buffer\n",
+			__func__);
+		goto done;
+	}
+	if (session_id >= (ASM_ACTIVE_STREAMS_ALLOWED + 1)) {
+		pr_err("%s: Invalid Session = %d\n", __func__, session_id);
+		goto done;
+	}
+
+	mutex_lock(&rtac_asm_apr_mutex);
+	if (rtac_asm_apr_data[session_id].apr_handle == NULL) {
+		pr_err("%s: APR not initialized\n", __func__);
+		result = -EINVAL;
+		goto err;
+	}
+
+	if (opcode == ASM_STREAM_CMD_SET_PP_PARAMS_V2) {
+		/* set payload size to in-band payload */
+		/* set data size to actual out of band payload size */
+		data_size = payload_size - 4 * sizeof(u32);
+		if (data_size > rtac_cal[ASM_RTAC_CAL].map_data.map_size) {
+			pr_err("%s: Invalid data size = %d\n",
+				__func__, data_size);
+			result = -EINVAL;
+			goto err;
+		}
+		payload_size = 4 * sizeof(u32);
+
+		/* Copy buffer to out-of-band payload */
+		if (copy_from_user((void *)
+				rtac_cal[ASM_RTAC_CAL].cal_data.kvaddr,
+				buf + 7 * sizeof(u32), data_size)) {
+			pr_err("%s: Could not copy payload from user buffer\n",
+				__func__);
+			result = -EINVAL;
+			goto err;
+		}
+		/* set payload size in packet */
+		rtac_asm_buffer[8] = data_size;
+
+	} else {
+		if (payload_size > MAX_PAYLOAD_SIZE) {
+			pr_err("%s: Invalid payload size = %d\n",
+				__func__, payload_size);
+			result = -EINVAL;
+			goto err;
+		}
+
+		/* Copy buffer to in-band payload */
+		if (copy_from_user(rtac_asm_buffer +
+				sizeof(asm_params)/sizeof(u32),
+				buf + 3 * sizeof(u32), payload_size)) {
+			pr_err("%s: Could not copy payload from user buffer\n",
+				__func__);
+			result = -EINVAL;
+			goto err;
+		}
+	}
+
+	/* Pack header */
+	asm_params.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+		APR_HDR_LEN(20), APR_PKT_VER);
+	asm_params.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+		payload_size);
+	asm_params.src_svc = q6asm_get_apr_service_id(session_id);
+	asm_params.src_domain = APR_DOMAIN_APPS;
+	asm_params.src_port = (session_id << 8) | 0x0001;
+	asm_params.dest_svc = APR_SVC_ASM;
+	asm_params.dest_domain = APR_DOMAIN_ADSP;
+	asm_params.dest_port = (session_id << 8) | 0x0001;
+	asm_params.token = session_id;
+	asm_params.opcode = opcode;
+
+	/* fill for out-of-band */
+	rtac_asm_buffer[5] =
+		lower_32_bits(rtac_cal[ASM_RTAC_CAL].cal_data.paddr);
+	rtac_asm_buffer[6] =
+		msm_audio_populate_upper_32_bits(
+				rtac_cal[ASM_RTAC_CAL].cal_data.paddr);
+	rtac_asm_buffer[7] = rtac_cal[ASM_RTAC_CAL].map_data.map_handle;
+
+	memcpy(rtac_asm_buffer, &asm_params, sizeof(asm_params));
+	atomic_set(&rtac_asm_apr_data[session_id].cmd_state, 1);
+
+	pr_debug("%s: Sending RTAC command ioctl 0x%x, paddr 0x%pK\n",
+		__func__, opcode,
+		&rtac_cal[ASM_RTAC_CAL].cal_data.paddr);
+
+	result = apr_send_pkt(rtac_asm_apr_data[session_id].apr_handle,
+				(uint32_t *)rtac_asm_buffer);
+	if (result < 0) {
+		pr_err("%s: Set params failed session = %d\n",
+			__func__, session_id);
+		goto err;
+	}
+
+	/* Wait for the callback */
+	result = wait_event_timeout(rtac_asm_apr_data[session_id].cmd_wait,
+		(atomic_read(&rtac_asm_apr_data[session_id].cmd_state) == 0),
+		5 * HZ);
+	if (!result) {
+		pr_err("%s: Set params timed out session = %d\n",
+			__func__, session_id);
+		goto err;
+	}
+	if (atomic_read(&rtac_common.apr_err_code)) {
+		pr_err("%s: DSP returned error code = [%s], opcode = 0x%x\n",
+			__func__, adsp_err_get_err_str(atomic_read(
+			&rtac_common.apr_err_code)),
+			opcode);
+		result = adsp_err_get_lnx_err_code(
+					atomic_read(
+					&rtac_common.apr_err_code));
+		goto err;
+	}
+
+	if (opcode == ASM_STREAM_CMD_GET_PP_PARAMS_V2) {
+		bytes_returned = ((u32 *)rtac_cal[ASM_RTAC_CAL].cal_data.
+			kvaddr)[2] + 3 * sizeof(u32);
+
+		if (bytes_returned > rtac_cal[ASM_RTAC_CAL].
+			map_data.map_size) {
+			pr_err("%s: Invalid data size = %d\n",
+				__func__, bytes_returned);
+			result = -EINVAL;
+			goto err;
+		}
+
+		if (bytes_returned > user_buf_size) {
+			pr_err("%s: User buf not big enough, size = 0x%x, returned size = 0x%x\n",
+				__func__, user_buf_size, bytes_returned);
+			result = -EINVAL;
+			goto err;
+		}
+
+		if (copy_to_user(buf, (void *)
+				rtac_cal[ASM_RTAC_CAL].cal_data.kvaddr,
+				bytes_returned)) {
+			pr_err("%s: Could not copy buffer to user,size = %d\n",
+				 __func__, bytes_returned);
+			result = -EINVAL;
+			goto err;
+		}
+	} else {
+		bytes_returned = data_size;
+	}
+	mutex_unlock(&rtac_asm_apr_mutex);
+done:
+	return bytes_returned;
+err:
+	mutex_unlock(&rtac_asm_apr_mutex);
+	return result;
+}
+
+/* AFE APR */
+void rtac_set_afe_handle(void *handle)
+{
+	mutex_lock(&rtac_afe_apr_mutex);
+	rtac_afe_apr_data.apr_handle = handle;
+	mutex_unlock(&rtac_afe_apr_mutex);
+}
+
+bool rtac_make_afe_callback(uint32_t *payload, uint32_t payload_size)
+{
+	pr_debug("%s:cmd_state = %d\n", __func__,
+			atomic_read(&rtac_afe_apr_data.cmd_state));
+	if (atomic_read(&rtac_afe_apr_data.cmd_state) != 1)
+		return false;
+
+	if (payload_size == sizeof(uint32_t))
+		atomic_set(&rtac_common.apr_err_code, payload[0]);
+	else if (payload_size == (2*sizeof(uint32_t)))
+		atomic_set(&rtac_common.apr_err_code, payload[1]);
+
+	atomic_set(&rtac_afe_apr_data.cmd_state, 0);
+	wake_up(&rtac_afe_apr_data.cmd_wait);
+	return true;
+}
+
+static int fill_afe_apr_hdr(struct apr_hdr *apr_hdr, uint32_t port,
+			 uint32_t opcode, uint32_t apr_msg_size)
+{
+	if (apr_hdr == NULL) {
+		pr_err("%s: invalid APR pointer", __func__);
+		return -EINVAL;
+	}
+
+	apr_hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+		APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	apr_hdr->pkt_size = apr_msg_size;
+	apr_hdr->src_svc = APR_SVC_AFE;
+	apr_hdr->src_domain = APR_DOMAIN_APPS;
+	apr_hdr->src_port = 0;
+	apr_hdr->dest_svc = APR_SVC_AFE;
+	apr_hdr->dest_domain = APR_DOMAIN_ADSP;
+	apr_hdr->dest_port = 0;
+	apr_hdr->token = port;
+	apr_hdr->opcode = opcode;
+
+	return 0;
+
+}
+static int send_rtac_afe_apr(void *buf, uint32_t opcode)
+{
+	int32_t result;
+	uint32_t bytes_returned = 0;
+	uint32_t port_index = 0;
+	uint32_t apr_msg_size = 0;
+	struct rtac_afe_user_data user_afe_buf;
+
+	pr_debug("%s\n", __func__);
+
+	if (rtac_cal[AFE_RTAC_CAL].map_data.ion_handle == NULL) {
+		result = rtac_allocate_cal_buffer(AFE_RTAC_CAL);
+		if (result < 0) {
+			pr_err("%s: allocate buffer failed! ret = %d\n",
+				__func__, result);
+			goto done;
+		}
+	}
+
+	if (rtac_cal[AFE_RTAC_CAL].map_data.map_handle == 0) {
+		result = rtac_map_cal_buffer(AFE_RTAC_CAL);
+		if (result < 0) {
+			pr_err("%s: map buffer failed! ret = %d\n",
+				__func__, result);
+			goto done;
+		}
+	}
+
+	if (copy_from_user(&user_afe_buf, (void *)buf,
+		sizeof(struct rtac_afe_user_data))) {
+		pr_err("%s: Copy from user failed! buf = 0x%pK\n",
+		       __func__, buf);
+		goto done;
+	}
+
+	if (user_afe_buf.buf_size <= 0) {
+		pr_err("%s: Invalid buffer size = %d\n",
+			__func__, user_afe_buf.buf_size);
+		goto done;
+	}
+
+	port_index = q6audio_get_port_index(user_afe_buf.port_id);
+	if (port_index >= AFE_MAX_PORTS) {
+		pr_err("%s: Invalid AFE port = 0x%x\n",
+		       __func__, user_afe_buf.port_id);
+		goto done;
+	}
+
+	mutex_lock(&rtac_afe_apr_mutex);
+	if (rtac_afe_apr_data.apr_handle == NULL) {
+		pr_err("%s: APR not initialized\n", __func__);
+		result = -EINVAL;
+		goto err;
+	}
+	if (opcode == AFE_PORT_CMD_SET_PARAM_V2) {
+		struct afe_port_cmd_set_param_v2 *afe_set_apr_msg;
+
+		/* set data size to actual out of band payload size */
+		if (user_afe_buf.rtac_afe_set.cmd.payload_size >
+			rtac_cal[AFE_RTAC_CAL].map_data.map_size) {
+			pr_err("%s: Invalid data size = %d\n",
+				   __func__,
+				   user_afe_buf.rtac_afe_set.cmd.payload_size);
+			result = -EINVAL;
+			goto err;
+		}
+
+		/* Copy buffer to out-of-band payload */
+		if (copy_from_user((void *)
+				rtac_cal[AFE_RTAC_CAL].cal_data.kvaddr,
+				buf+offsetof(struct rtac_afe_user_data,
+				rtac_afe_set.data),
+				user_afe_buf.rtac_afe_set.cmd.payload_size)) {
+			pr_err("%s: Could not copy payload from user buffer\n",
+				__func__);
+			result = -EINVAL;
+			goto err;
+		}
+
+		/* Copy AFE APR Message */
+		afe_set_apr_msg = (struct afe_port_cmd_set_param_v2 *)
+				((u8 *)rtac_afe_buffer +
+				sizeof(struct apr_hdr));
+		if (copy_from_user((void *)
+				afe_set_apr_msg,
+				buf + offsetof(struct rtac_afe_user_data,
+				rtac_afe_set.cmd) ,
+				sizeof(struct afe_port_cmd_set_param_v2))) {
+			pr_err("%s: Could not copy payload from user buffer\n",
+				__func__);
+			result = -EINVAL;
+			goto err;
+		}
+
+		afe_set_apr_msg->payload_address_lsw =
+			lower_32_bits(rtac_cal[AFE_RTAC_CAL].cal_data.paddr);
+		afe_set_apr_msg->payload_address_msw =
+				msm_audio_populate_upper_32_bits(
+					rtac_cal[AFE_RTAC_CAL].cal_data.paddr);
+		afe_set_apr_msg->mem_map_handle =
+				rtac_cal[AFE_RTAC_CAL].map_data.map_handle;
+
+		apr_msg_size = sizeof(struct apr_hdr) +
+				sizeof(struct afe_port_cmd_set_param_v2);
+
+	} else {
+		struct afe_port_cmd_get_param_v2 *afe_get_apr_msg;
+
+		if (user_afe_buf.cmd_size > MAX_PAYLOAD_SIZE) {
+			pr_err("%s: Invalid payload size = %d\n",
+				__func__, user_afe_buf.cmd_size);
+			result = -EINVAL;
+			goto err;
+		}
+
+		/* Copy buffer to in-band payload */
+		afe_get_apr_msg = (struct afe_port_cmd_get_param_v2 *)
+					((u8 *) rtac_afe_buffer +
+					sizeof(struct apr_hdr));
+		if (copy_from_user((void *)afe_get_apr_msg,
+				buf+offsetof(struct rtac_afe_user_data,
+				rtac_afe_get.cmd),
+			sizeof(struct afe_port_cmd_get_param_v2))) {
+			pr_err("%s: Could not copy payload from user buffer\n",
+				__func__);
+			result = -EINVAL;
+			goto err;
+		}
+
+		afe_get_apr_msg->payload_address_lsw =
+			lower_32_bits(rtac_cal[AFE_RTAC_CAL].cal_data.paddr);
+		afe_get_apr_msg->payload_address_msw =
+				msm_audio_populate_upper_32_bits(
+					rtac_cal[AFE_RTAC_CAL].cal_data.paddr);
+		afe_get_apr_msg->mem_map_handle =
+				rtac_cal[AFE_RTAC_CAL].map_data.map_handle;
+		afe_get_apr_msg->payload_size -= sizeof(struct apr_hdr);
+		apr_msg_size = sizeof(struct apr_hdr) +
+				sizeof(struct afe_port_cmd_get_param_v2);
+	}
+
+	fill_afe_apr_hdr((struct apr_hdr *) rtac_afe_buffer,
+			port_index, opcode, apr_msg_size);
+
+	atomic_set(&rtac_afe_apr_data.cmd_state, 1);
+
+	pr_debug("%s: Sending RTAC command ioctl 0x%x, paddr 0x%pK\n",
+		__func__, opcode,
+		&rtac_cal[AFE_RTAC_CAL].cal_data.paddr);
+
+	result = apr_send_pkt(rtac_afe_apr_data.apr_handle,
+					(uint32_t *)rtac_afe_buffer);
+	if (result < 0) {
+		pr_err("%s: Set params failed port = 0x%x, ret = %d\n",
+			__func__, user_afe_buf.port_id, result);
+		goto err;
+	}
+	/* Wait for the callback */
+	result = wait_event_timeout(rtac_afe_apr_data.cmd_wait,
+		(atomic_read(&rtac_afe_apr_data.cmd_state) == 0),
+		msecs_to_jiffies(TIMEOUT_MS));
+	if (!result) {
+		pr_err("%s: Set params timed out port = 0x%x, ret = %d\n",
+			__func__, user_afe_buf.port_id, result);
+		goto err;
+	}
+	if (atomic_read(&rtac_common.apr_err_code)) {
+		pr_err("%s: DSP returned error code = [%s], opcode = 0x%x\n",
+			__func__, adsp_err_get_err_str(atomic_read(
+			&rtac_common.apr_err_code)),
+			opcode);
+		result = adsp_err_get_lnx_err_code(
+					atomic_read(
+					&rtac_common.apr_err_code));
+		goto err;
+	}
+
+	if (opcode == AFE_PORT_CMD_GET_PARAM_V2) {
+		struct afe_port_param_data_v2 *get_resp;
+		get_resp = (struct afe_port_param_data_v2 *)
+				rtac_cal[AFE_RTAC_CAL].cal_data.kvaddr;
+
+		bytes_returned = get_resp->param_size +
+				sizeof(struct afe_port_param_data_v2);
+
+		if (bytes_returned > rtac_cal[AFE_RTAC_CAL].
+			map_data.map_size) {
+			pr_err("%s: Invalid data size = %d\n",
+				__func__, bytes_returned);
+			result = -EINVAL;
+			goto err;
+		}
+
+		if (bytes_returned > user_afe_buf.buf_size) {
+			pr_err("%s: user size = 0x%x, returned size = 0x%x\n",
+				__func__, user_afe_buf.buf_size,
+				bytes_returned);
+			result = -EINVAL;
+			goto err;
+		}
+
+		if (copy_to_user(buf, (void *)
+				rtac_cal[AFE_RTAC_CAL].cal_data.kvaddr,
+				bytes_returned)) {
+			pr_err("%s: Could not copy buffer to user,size = %d\n",
+				__func__, bytes_returned);
+			result = -EINVAL;
+			goto err;
+		}
+	} else {
+		bytes_returned = user_afe_buf.rtac_afe_set.cmd.payload_size;
+	}
+	mutex_unlock(&rtac_afe_apr_mutex);
+done:
+	return bytes_returned;
+err:
+	mutex_unlock(&rtac_afe_apr_mutex);
+	return result;
+}
+
+/* Voice APR */
+void rtac_set_voice_handle(u32 mode, void *handle)
+{
+	pr_debug("%s\n", __func__);
+
+	mutex_lock(&rtac_voice_apr_mutex);
+	rtac_voice_apr_data[mode].apr_handle = handle;
+	mutex_unlock(&rtac_voice_apr_mutex);
+}
+
+bool rtac_make_voice_callback(u32 mode, uint32_t *payload, u32 payload_size)
+{
+	if ((atomic_read(&rtac_voice_apr_data[mode].cmd_state) != 1) ||
+		(mode >= RTAC_VOICE_MODES))
+		return false;
+
+	pr_debug("%s\n", __func__);
+	if (payload_size == sizeof(uint32_t))
+		atomic_set(&rtac_common.apr_err_code, payload[0]);
+	else if (payload_size == (2*sizeof(uint32_t)))
+		atomic_set(&rtac_common.apr_err_code, payload[1]);
+
+	atomic_set(&rtac_voice_apr_data[mode].cmd_state, 0);
+	wake_up(&rtac_voice_apr_data[mode].cmd_wait);
+	return true;
+}
+
+int send_voice_apr(u32 mode, void *buf, u32 opcode)
+{
+	s32	result;
+	u32	user_buf_size = 0;
+	u32	bytes_returned = 0;
+	u32	payload_size;
+	u32	dest_port;
+	u32	data_size = 0;
+	struct apr_hdr		voice_params;
+	pr_debug("%s\n", __func__);
+
+	if (rtac_cal[VOICE_RTAC_CAL].map_data.ion_handle == NULL) {
+		result = rtac_allocate_cal_buffer(VOICE_RTAC_CAL);
+		if (result < 0) {
+			pr_err("%s: allocate buffer failed!",
+				__func__);
+			goto done;
+		}
+	}
+
+	if (rtac_cal[VOICE_RTAC_CAL].map_data.map_handle == 0) {
+		result = rtac_map_cal_buffer(VOICE_RTAC_CAL);
+		if (result < 0) {
+			pr_err("%s: map buffer failed!",
+				__func__);
+			goto done;
+		}
+	}
+
+	if (copy_from_user(&user_buf_size, (void *)buf,
+						sizeof(user_buf_size))) {
+		pr_err("%s: Copy from user failed! buf = 0x%pK\n",
+		       __func__, buf);
+		goto done;
+	}
+	if (user_buf_size <= 0) {
+		pr_err("%s: Invalid buffer size = %d\n",
+			__func__, user_buf_size);
+		goto done;
+	}
+
+	if (copy_from_user(&payload_size, buf + sizeof(u32), sizeof(u32))) {
+		pr_err("%s: Could not copy payload size from user buffer\n",
+			__func__);
+		goto done;
+	}
+
+	if (copy_from_user(&dest_port, buf + 2 * sizeof(u32), sizeof(u32))) {
+		pr_err("%s: Could not copy port id from user buffer\n",
+			__func__);
+		goto done;
+	}
+
+	if ((mode != RTAC_CVP) && (mode != RTAC_CVS)) {
+		pr_err("%s: Invalid Mode for APR, mode = %d\n",
+			__func__, mode);
+		goto done;
+	}
+
+	mutex_lock(&rtac_voice_apr_mutex);
+	if (rtac_voice_apr_data[mode].apr_handle == NULL) {
+		pr_err("%s: APR not initialized\n", __func__);
+		result = -EINVAL;
+		goto err;
+	}
+
+	if (opcode == VSS_ICOMMON_CMD_SET_PARAM_V2) {
+		/* set payload size to in-band payload */
+		/* set data size to actual out of band payload size */
+		data_size = payload_size - 4 * sizeof(u32);
+		if (data_size > rtac_cal[VOICE_RTAC_CAL].map_data.map_size) {
+			pr_err("%s: Invalid data size = %d\n",
+				__func__, data_size);
+			result = -EINVAL;
+			goto err;
+		}
+		payload_size = 4 * sizeof(u32);
+
+		/* Copy buffer to out-of-band payload */
+		if (copy_from_user((void *)
+				rtac_cal[VOICE_RTAC_CAL].cal_data.kvaddr,
+				buf + 7 * sizeof(u32), data_size)) {
+			pr_err("%s: Could not copy payload from user buffer\n",
+				__func__);
+			result = -EINVAL;
+			goto err;
+		}
+		/* set payload size in packet */
+		rtac_voice_buffer[8] = data_size;
+	} else {
+		if (payload_size > MAX_PAYLOAD_SIZE) {
+			pr_err("%s: Invalid payload size = %d\n",
+					__func__, payload_size);
+			result = -EINVAL;
+			goto err;
+		}
+
+		/* Copy buffer to in-band payload */
+		if (copy_from_user(rtac_voice_buffer +
+				sizeof(voice_params)/sizeof(u32),
+				buf + 3 * sizeof(u32), payload_size)) {
+			pr_err("%s: Could not copy payload from user buffer\n",
+				__func__);
+			result = -EINVAL;
+			goto err;
+		}
+	}
+
+	/* Pack header */
+	voice_params.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+		APR_HDR_LEN(20), APR_PKT_VER);
+	voice_params.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+		payload_size);
+	voice_params.src_svc = 0;
+	voice_params.src_domain = APR_DOMAIN_APPS;
+	voice_params.src_port = get_voice_index(mode, dest_port);
+	voice_params.dest_svc = 0;
+	voice_params.dest_domain = APR_DOMAIN_MODEM;
+	voice_params.dest_port = (u16)dest_port;
+	voice_params.token = (opcode == VSS_ICOMMON_CMD_SET_PARAM_V2) ?
+				     VOC_RTAC_SET_PARAM_TOKEN :
+				     0;
+	voice_params.opcode = opcode;
+
+	/* fill for out-of-band */
+	rtac_voice_buffer[5] = rtac_cal[VOICE_RTAC_CAL].map_data.map_handle;
+	rtac_voice_buffer[6] =
+		lower_32_bits(rtac_cal[VOICE_RTAC_CAL].cal_data.paddr);
+	rtac_voice_buffer[7] =
+		msm_audio_populate_upper_32_bits(
+				rtac_cal[VOICE_RTAC_CAL].cal_data.paddr);
+
+	memcpy(rtac_voice_buffer, &voice_params, sizeof(voice_params));
+	atomic_set(&rtac_voice_apr_data[mode].cmd_state, 1);
+
+	pr_debug("%s: Sending RTAC command ioctl 0x%x, paddr 0x%pK\n",
+		__func__, opcode,
+		&rtac_cal[VOICE_RTAC_CAL].cal_data.paddr);
+
+	result = apr_send_pkt(rtac_voice_apr_data[mode].apr_handle,
+					(uint32_t *)rtac_voice_buffer);
+	if (result < 0) {
+		pr_err("%s: apr_send_pkt failed opcode = %x\n",
+			__func__, opcode);
+		goto err;
+	}
+	/* Wait for the callback */
+	result = wait_event_timeout(rtac_voice_apr_data[mode].cmd_wait,
+		(atomic_read(&rtac_voice_apr_data[mode].cmd_state) == 0),
+		msecs_to_jiffies(TIMEOUT_MS));
+	if (!result) {
+		pr_err("%s: apr_send_pkt timed out opcode = %x\n",
+			__func__, opcode);
+		goto err;
+	}
+	if (atomic_read(&rtac_common.apr_err_code)) {
+		pr_err("%s: DSP returned error code = [%s], opcode = 0x%x\n",
+			__func__, adsp_err_get_err_str(atomic_read(
+			&rtac_common.apr_err_code)),
+			opcode);
+		result = adsp_err_get_lnx_err_code(
+					atomic_read(
+					&rtac_common.apr_err_code));
+		goto err;
+	}
+
+	if (opcode == VSS_ICOMMON_CMD_GET_PARAM_V2) {
+		bytes_returned = ((u32 *)rtac_cal[VOICE_RTAC_CAL].cal_data.
+			kvaddr)[2] + 3 * sizeof(u32);
+
+		if (bytes_returned > rtac_cal[VOICE_RTAC_CAL].
+			map_data.map_size) {
+			pr_err("%s: Invalid data size = %d\n",
+				__func__, bytes_returned);
+			result = -EINVAL;
+			goto err;
+		}
+
+		if (bytes_returned > user_buf_size) {
+			pr_err("%s: User buf not big enough, size = 0x%x, returned size = 0x%x\n",
+				__func__, user_buf_size, bytes_returned);
+			result = -EINVAL;
+			goto err;
+		}
+
+		if (copy_to_user(buf, (void *)
+				rtac_cal[VOICE_RTAC_CAL].cal_data.kvaddr,
+				bytes_returned)) {
+			pr_err("%s: Could not copy buffer to user, size = %d\n",
+				 __func__, bytes_returned);
+			result = -EINVAL;
+			goto err;
+		}
+	} else {
+		bytes_returned = data_size;
+	}
+	mutex_unlock(&rtac_voice_apr_mutex);
+done:
+	return bytes_returned;
+err:
+	mutex_unlock(&rtac_voice_apr_mutex);
+	return result;
+}
+
+void get_rtac_adm_data(struct rtac_adm *adm_data)
+{
+	mutex_lock(&rtac_adm_mutex);
+	memcpy(adm_data, &rtac_adm_data, sizeof(struct rtac_adm));
+	mutex_unlock(&rtac_adm_mutex);
+}
+
+
+static long rtac_ioctl_shared(struct file *f,
+		unsigned int cmd, void *arg)
+{
+	int result = 0;
+	if (!arg) {
+		pr_err("%s: No data sent to driver!\n", __func__);
+		result = -EFAULT;
+		goto done;
+	}
+
+	switch (cmd) {
+	case AUDIO_GET_RTAC_ADM_INFO: {
+		mutex_lock(&rtac_adm_mutex);
+		if (copy_to_user((void *)arg, &rtac_adm_data,
+						sizeof(rtac_adm_data))) {
+			pr_err("%s: copy_to_user failed for AUDIO_GET_RTAC_ADM_INFO\n",
+					__func__);
+			mutex_unlock(&rtac_adm_mutex);
+			return -EFAULT;
+		} else {
+			result = sizeof(rtac_adm_data);
+		}
+		mutex_unlock(&rtac_adm_mutex);
+		break;
+	}
+	case AUDIO_GET_RTAC_VOICE_INFO: {
+		mutex_lock(&rtac_voice_mutex);
+		if (copy_to_user((void *)arg, &rtac_voice_data,
+						sizeof(rtac_voice_data))) {
+			pr_err("%s: copy_to_user failed for AUDIO_GET_RTAC_VOICE_INFO\n",
+					__func__);
+			mutex_unlock(&rtac_voice_mutex);
+			return -EFAULT;
+		} else {
+			result = sizeof(rtac_voice_data);
+		}
+		mutex_unlock(&rtac_voice_mutex);
+		break;
+	}
+
+	case AUDIO_GET_RTAC_ADM_CAL:
+		result = send_adm_apr((void *)arg, ADM_CMD_GET_PP_PARAMS_V5);
+		break;
+	case AUDIO_SET_RTAC_ADM_CAL:
+		result = send_adm_apr((void *)arg, ADM_CMD_SET_PP_PARAMS_V5);
+		break;
+	case AUDIO_GET_RTAC_ASM_CAL:
+		result = send_rtac_asm_apr((void *)arg,
+			ASM_STREAM_CMD_GET_PP_PARAMS_V2);
+		break;
+	case AUDIO_SET_RTAC_ASM_CAL:
+		result = send_rtac_asm_apr((void *)arg,
+			ASM_STREAM_CMD_SET_PP_PARAMS_V2);
+		break;
+	case AUDIO_GET_RTAC_CVS_CAL:
+		result = send_voice_apr(RTAC_CVS, (void *) arg,
+					VSS_ICOMMON_CMD_GET_PARAM_V2);
+		break;
+	case AUDIO_SET_RTAC_CVS_CAL:
+		result = send_voice_apr(RTAC_CVS, (void *) arg,
+					VSS_ICOMMON_CMD_SET_PARAM_V2);
+		break;
+	case AUDIO_GET_RTAC_CVP_CAL:
+		result = send_voice_apr(RTAC_CVP, (void *) arg,
+					VSS_ICOMMON_CMD_GET_PARAM_V2);
+		break;
+	case AUDIO_SET_RTAC_CVP_CAL:
+		result = send_voice_apr(RTAC_CVP, (void *) arg,
+					VSS_ICOMMON_CMD_SET_PARAM_V2);
+		break;
+	case AUDIO_GET_RTAC_AFE_CAL:
+		result = send_rtac_afe_apr((void *)arg,
+			AFE_PORT_CMD_GET_PARAM_V2);
+		break;
+	case AUDIO_SET_RTAC_AFE_CAL:
+		result = send_rtac_afe_apr((void *)arg,
+			AFE_PORT_CMD_SET_PARAM_V2);
+		break;
+	default:
+		pr_err("%s: Invalid IOCTL, command = %d!\n",
+		       __func__, cmd);
+		result = -EINVAL;
+	}
+done:
+	return result;
+}
+
+static long rtac_ioctl(struct file *f,
+		unsigned int cmd, unsigned long arg)
+{
+	int result = 0;
+
+	mutex_lock(&rtac_common.rtac_fops_mutex);
+	if (!arg) {
+		pr_err("%s: No data sent to driver!\n", __func__);
+		result = -EFAULT;
+	} else {
+		result = rtac_ioctl_shared(f, cmd, (void __user *)arg);
+	}
+
+	mutex_unlock(&rtac_common.rtac_fops_mutex);
+	return result;
+}
+
+#ifdef CONFIG_COMPAT
+#define AUDIO_GET_RTAC_ADM_INFO_32   _IOR(CAL_IOCTL_MAGIC, 207, compat_uptr_t)
+#define AUDIO_GET_RTAC_VOICE_INFO_32 _IOR(CAL_IOCTL_MAGIC, 208, compat_uptr_t)
+#define AUDIO_GET_RTAC_ADM_CAL_32 _IOWR(CAL_IOCTL_MAGIC, 209, compat_uptr_t)
+#define AUDIO_SET_RTAC_ADM_CAL_32 _IOWR(CAL_IOCTL_MAGIC, 210, compat_uptr_t)
+#define AUDIO_GET_RTAC_ASM_CAL_32 _IOWR(CAL_IOCTL_MAGIC, 211, compat_uptr_t)
+#define AUDIO_SET_RTAC_ASM_CAL_32 _IOWR(CAL_IOCTL_MAGIC, 212, compat_uptr_t)
+#define AUDIO_GET_RTAC_CVS_CAL_32 _IOWR(CAL_IOCTL_MAGIC, 213, compat_uptr_t)
+#define AUDIO_SET_RTAC_CVS_CAL_32 _IOWR(CAL_IOCTL_MAGIC, 214, compat_uptr_t)
+#define AUDIO_GET_RTAC_CVP_CAL_32 _IOWR(CAL_IOCTL_MAGIC, 215, compat_uptr_t)
+#define AUDIO_SET_RTAC_CVP_CAL_32 _IOWR(CAL_IOCTL_MAGIC, 216, compat_uptr_t)
+#define AUDIO_GET_RTAC_AFE_CAL_32 _IOWR(CAL_IOCTL_MAGIC, 217, compat_uptr_t)
+#define AUDIO_SET_RTAC_AFE_CAL_32 _IOWR(CAL_IOCTL_MAGIC, 218, compat_uptr_t)
+
+static long rtac_compat_ioctl(struct file *f,
+		unsigned int cmd, unsigned long arg)
+{
+	int result = 0;
+
+	mutex_lock(&rtac_common.rtac_fops_mutex);
+	if (!arg) {
+		pr_err("%s: No data sent to driver!\n", __func__);
+		result = -EINVAL;
+		goto done;
+	}
+
+	switch (cmd) {
+	case AUDIO_GET_RTAC_ADM_INFO_32:
+		cmd = AUDIO_GET_RTAC_ADM_INFO;
+		goto process;
+	case AUDIO_GET_RTAC_VOICE_INFO_32:
+		cmd = AUDIO_GET_RTAC_VOICE_INFO;
+		goto process;
+	case AUDIO_GET_RTAC_AFE_CAL_32:
+		cmd = AUDIO_GET_RTAC_AFE_CAL;
+		goto process;
+	case AUDIO_SET_RTAC_AFE_CAL_32:
+		cmd = AUDIO_SET_RTAC_AFE_CAL;
+		goto process;
+	case AUDIO_GET_RTAC_ADM_CAL_32:
+		cmd = AUDIO_GET_RTAC_ADM_CAL;
+		goto process;
+	case AUDIO_SET_RTAC_ADM_CAL_32:
+		cmd = AUDIO_SET_RTAC_ADM_CAL;
+		goto process;
+	case AUDIO_GET_RTAC_ASM_CAL_32:
+		cmd = AUDIO_GET_RTAC_ASM_CAL;
+		goto process;
+	case AUDIO_SET_RTAC_ASM_CAL_32:
+		cmd =  AUDIO_SET_RTAC_ASM_CAL;
+		goto process;
+	case AUDIO_GET_RTAC_CVS_CAL_32:
+		cmd = AUDIO_GET_RTAC_CVS_CAL;
+		goto process;
+	case AUDIO_SET_RTAC_CVS_CAL_32:
+		cmd = AUDIO_SET_RTAC_CVS_CAL;
+		goto process;
+	case AUDIO_GET_RTAC_CVP_CAL_32:
+		cmd =  AUDIO_GET_RTAC_CVP_CAL;
+		goto process;
+	case AUDIO_SET_RTAC_CVP_CAL_32:
+		cmd = AUDIO_SET_RTAC_CVP_CAL;
+process:
+		result = rtac_ioctl_shared(f, cmd, compat_ptr(arg));
+		break;
+	default:
+		result = -EINVAL;
+		pr_err("%s: Invalid IOCTL, command = %d!\n",
+		       __func__, cmd);
+		break;
+	}
+done:
+	mutex_unlock(&rtac_common.rtac_fops_mutex);
+	return result;
+}
+#else
+#define rtac_compat_ioctl NULL
+#endif
+
+static const struct file_operations rtac_fops = {
+	.owner = THIS_MODULE,
+	.open = rtac_open,
+	.release = rtac_release,
+	.unlocked_ioctl = rtac_ioctl,
+	.compat_ioctl = rtac_compat_ioctl,
+};
+
+struct miscdevice rtac_misc = {
+	.minor	= MISC_DYNAMIC_MINOR,
+	.name	= "msm_rtac",
+	.fops	= &rtac_fops,
+};
+
+static int __init rtac_init(void)
+{
+	int i = 0;
+
+	/* Driver */
+	atomic_set(&rtac_common.usage_count, 0);
+	atomic_set(&rtac_common.apr_err_code, 0);
+	mutex_init(&rtac_common.rtac_fops_mutex);
+
+	/* ADM */
+	memset(&rtac_adm_data, 0, sizeof(rtac_adm_data));
+	rtac_adm_apr_data.apr_handle = NULL;
+	atomic_set(&rtac_adm_apr_data.cmd_state, 0);
+	init_waitqueue_head(&rtac_adm_apr_data.cmd_wait);
+	mutex_init(&rtac_adm_mutex);
+	mutex_init(&rtac_adm_apr_mutex);
+
+	rtac_adm_buffer = kzalloc(
+		rtac_cal[ADM_RTAC_CAL].map_data.map_size, GFP_KERNEL);
+	if (rtac_adm_buffer == NULL) {
+		pr_err("%s: Could not allocate payload of size = %d\n",
+			__func__, rtac_cal[ADM_RTAC_CAL].map_data.map_size);
+		goto nomem;
+	}
+
+	/* ASM */
+	for (i = 0; i < ASM_ACTIVE_STREAMS_ALLOWED+1; i++) {
+		rtac_asm_apr_data[i].apr_handle = NULL;
+		atomic_set(&rtac_asm_apr_data[i].cmd_state, 0);
+		init_waitqueue_head(&rtac_asm_apr_data[i].cmd_wait);
+	}
+	mutex_init(&rtac_asm_apr_mutex);
+
+	rtac_asm_buffer = kzalloc(
+		rtac_cal[ASM_RTAC_CAL].map_data.map_size, GFP_KERNEL);
+	if (rtac_asm_buffer == NULL) {
+		pr_err("%s: Could not allocate payload of size = %d\n",
+			__func__, rtac_cal[ASM_RTAC_CAL].map_data.map_size);
+		kzfree(rtac_adm_buffer);
+		goto nomem;
+	}
+
+	/* AFE */
+	rtac_afe_apr_data.apr_handle = NULL;
+	atomic_set(&rtac_afe_apr_data.cmd_state, 0);
+	init_waitqueue_head(&rtac_afe_apr_data.cmd_wait);
+	mutex_init(&rtac_afe_apr_mutex);
+
+	rtac_afe_buffer = kzalloc(
+		rtac_cal[AFE_RTAC_CAL].map_data.map_size, GFP_KERNEL);
+	if (rtac_afe_buffer == NULL) {
+		pr_err("%s: Could not allocate payload of size = %d\n",
+			__func__, rtac_cal[AFE_RTAC_CAL].map_data.map_size);
+		kzfree(rtac_adm_buffer);
+		kzfree(rtac_asm_buffer);
+		goto nomem;
+	}
+
+	/* Voice */
+	memset(&rtac_voice_data, 0, sizeof(rtac_voice_data));
+	for (i = 0; i < RTAC_VOICE_MODES; i++) {
+		rtac_voice_apr_data[i].apr_handle = NULL;
+		atomic_set(&rtac_voice_apr_data[i].cmd_state, 0);
+		init_waitqueue_head(&rtac_voice_apr_data[i].cmd_wait);
+	}
+	mutex_init(&rtac_voice_mutex);
+	mutex_init(&rtac_voice_apr_mutex);
+
+	rtac_voice_buffer = kzalloc(
+		rtac_cal[VOICE_RTAC_CAL].map_data.map_size, GFP_KERNEL);
+	if (rtac_voice_buffer == NULL) {
+		pr_err("%s: Could not allocate payload of size = %d\n",
+			__func__, rtac_cal[VOICE_RTAC_CAL].map_data.map_size);
+		kzfree(rtac_adm_buffer);
+		kzfree(rtac_asm_buffer);
+		kzfree(rtac_afe_buffer);
+		goto nomem;
+	}
+
+	return misc_register(&rtac_misc);
+nomem:
+	return -ENOMEM;
+}
+
+module_init(rtac_init);
+
+MODULE_DESCRIPTION("SoC QDSP6v2 Real-Time Audio Calibration driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/usb/badd.c	2019-01-22 16:16:29.691302442 +0100
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/usb.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/audio-v2.h>
+#include <linux/usb/audio-v3.h>
+
+struct uac3_input_terminal_descriptor badd_baif_in_term_desc = {
+	.bLength = UAC3_DT_INPUT_TERMINAL_SIZE,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype = UAC_INPUT_TERMINAL,
+	.bTerminalID = BADD_IN_TERM_ID_BAIF,
+	.bCSourceID = BADD_CLOCK_SOURCE,
+	.wExTerminalDescrID = 0x0000,
+	.wTerminalDescrStr = 0x0000
+};
+
+struct uac3_input_terminal_descriptor badd_baof_in_term_desc = {
+	.bLength = UAC3_DT_INPUT_TERMINAL_SIZE,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype = UAC_INPUT_TERMINAL,
+	.bTerminalID = BADD_IN_TERM_ID_BAOF,
+	.wTerminalType = UAC_TERMINAL_STREAMING,
+	.bAssocTerminal = 0x00,
+	.bCSourceID = BADD_CLOCK_SOURCE,
+	.bmControls = 0x00000000,
+	.wExTerminalDescrID = 0x0000,
+	.wConnectorsDescrID = 0x0000,
+	.wTerminalDescrStr = 0x0000
+};
+
+struct uac3_output_terminal_descriptor badd_baif_out_term_desc = {
+	.bLength = UAC3_DT_OUTPUT_TERMINAL_SIZE,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
+	.bTerminalID = BADD_OUT_TERM_ID_BAIF,
+	.wTerminalType = UAC_TERMINAL_STREAMING,
+	.bAssocTerminal = 0x00,		/* No associated terminal */
+	.bSourceID = BADD_FU_ID_BAIF,
+	.bCSourceID = BADD_CLOCK_SOURCE,
+	.bmControls = 0x00000000,	/* No controls */
+	.wExTerminalDescrID = 0x0000,
+	.wConnectorsDescrID = 0x0000,
+	.wTerminalDescrStr = 0x0000
+};
+
+struct uac3_output_terminal_descriptor badd_baof_out_term_desc = {
+	.bLength = UAC3_DT_OUTPUT_TERMINAL_SIZE,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
+	.bTerminalID = BADD_OUT_TERM_ID_BAOF,
+	.bSourceID = BADD_FU_ID_BAOF,
+	.bCSourceID = BADD_CLOCK_SOURCE,
+	.wExTerminalDescrID = 0x0000,
+	.wTerminalDescrStr = 0x0000
+};
+
+__u8 monoControls[] = {
+	0x03, 0x00, 0x00, 0x00,
+	0x0c, 0x00, 0x00, 0x00};
+
+__u8 stereoControls[] = {
+	0x03, 0x00, 0x00, 0x00,
+	0x0c, 0x00, 0x00, 0x00,
+	0x0c, 0x00, 0x00, 0x00
+};
+
+__u8 badd_mu_src_ids[] = {BADD_IN_TERM_ID_BAOF, BADD_FU_ID_BAIOF};
+
+struct uac3_mixer_unit_descriptor badd_baiof_mu_desc = {
+	.bLength = UAC3_DT_MIXER_UNIT_SIZE,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype = UAC3_MIXER_UNIT_V3,
+	.bUnitID = BADD_MU_ID_BAIOF,
+	.bNrInPins = 0x02,
+	.baSourceID = badd_mu_src_ids,
+	.bmMixerControls = 0x00,
+	.bmControls = 0x00000000,
+	.wMixerDescrStr = 0x0000
+};
+
+struct uac3_feature_unit_descriptor badd_baif_fu_desc = {
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype = UAC3_FEATURE_UNIT_V3,
+	.bUnitID = BADD_FU_ID_BAIF,
+	.bSourceID = BADD_IN_TERM_ID_BAIF,
+	.wFeatureDescrStr = 0x0000
+};
+
+struct uac3_feature_unit_descriptor badd_baof_fu_desc = {
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype = UAC3_FEATURE_UNIT_V3,
+	.bUnitID = BADD_FU_ID_BAOF,
+	.wFeatureDescrStr = 0x0000
+};
+
+struct uac3_feature_unit_descriptor badd_baiof_fu_desc = {
+	.bLength = 0x0f,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype = UAC3_FEATURE_UNIT_V3,
+	.bUnitID = BADD_FU_ID_BAIOF,
+	.bSourceID = BADD_IN_TERM_ID_BAIF,
+	.bmaControls = monoControls,
+	.wFeatureDescrStr = 0x0000
+};
+
+struct uac3_clock_source_descriptor badd_clock_desc = {
+	.bLength = UAC3_DT_CLOCK_SRC_SIZE,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype = UAC3_CLOCK_SOURCE,
+	.bClockID = BADD_CLOCK_SOURCE,
+	.bmControls = 0x00000001,
+	.bReferenceTerminal = 0x00,
+	.wClockSourceStr = 0x0000
+};
+
+void *badd_desc_list[] = {
+	&badd_baif_in_term_desc,
+	&badd_baof_in_term_desc,
+	&badd_baiof_mu_desc,
+	&badd_baif_fu_desc,
+	&badd_baof_fu_desc,
+	&badd_baiof_fu_desc,
+	&badd_clock_desc
+};
+
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/usb/usb_audio_qmi_svc.c	2019-10-29 09:26:26.197228172 +0100
@@ -0,0 +1,1346 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/audio-v2.h>
+#include <linux/uaccess.h>
+#include <sound/pcm.h>
+#include <sound/core.h>
+#include <sound/asound.h>
+#include <linux/usb.h>
+#include <linux/qmi_encdec.h>
+#include <soc/qcom/msm_qmi_interface.h>
+#include <linux/iommu.h>
+#include <linux/qcom_iommu.h>
+#include <linux/platform_device.h>
+#include <linux/usb/audio-v3.h>
+
+#include "usbaudio.h"
+#include "card.h"
+#include "helper.h"
+#include "pcm.h"
+#include "usb_audio_qmi_v01.h"
+
+#define SND_PCM_CARD_NUM_MASK 0xffff0000
+#define SND_PCM_DEV_NUM_MASK 0xff00
+#define SND_PCM_STREAM_DIRECTION 0xff
+
+#define PREPEND_SID_TO_IOVA(iova, sid) (u64)(((u64)(iova)) | \
+					(((u64)sid) << 32))
+
+/*  event ring iova base address */
+#define IOVA_BASE 0x1000
+
+#define IOVA_XFER_RING_BASE (IOVA_BASE + PAGE_SIZE * (SNDRV_CARDS + 1))
+#define IOVA_XFER_BUF_BASE (IOVA_XFER_RING_BASE + PAGE_SIZE * SNDRV_CARDS * 32)
+#define IOVA_XFER_RING_MAX (IOVA_XFER_BUF_BASE - PAGE_SIZE)
+#define IOVA_XFER_BUF_MAX (0xfffff000 - PAGE_SIZE)
+
+#define MAX_XFER_BUFF_LEN (24 * PAGE_SIZE)
+
+struct iova_info {
+	struct list_head list;
+	unsigned long start_iova;
+	size_t size;
+	bool in_use;
+};
+
+struct intf_info {
+	unsigned long data_xfer_ring_va;
+	size_t data_xfer_ring_size;
+	unsigned long sync_xfer_ring_va;
+	size_t sync_xfer_ring_size;
+	unsigned long xfer_buf_va;
+	size_t xfer_buf_size;
+	phys_addr_t xfer_buf_pa;
+	u8 *xfer_buf;
+	u8 intf_num;
+	u8 pcm_card_num;
+	u8 pcm_dev_num;
+	u8 direction;
+	bool in_use;
+};
+
+struct uaudio_dev {
+	struct usb_device *udev;
+	/* audio control interface */
+	struct usb_host_interface *ctrl_intf;
+	unsigned int card_num;
+	unsigned int usb_core_id;
+	atomic_t in_use;
+	struct kref kref;
+	wait_queue_head_t disconnect_wq;
+
+	/* interface specific */
+	int num_intf;
+	struct intf_info *info;
+};
+
+static struct uaudio_dev uadev[SNDRV_CARDS];
+
+struct uaudio_qmi_dev {
+	struct device *dev;
+	u32 sid;
+	u32 intr_num;
+	struct iommu_domain *domain;
+
+	/* list to keep track of available iova */
+	struct list_head xfer_ring_list;
+	size_t xfer_ring_iova_size;
+	unsigned long curr_xfer_ring_iova;
+	struct list_head xfer_buf_list;
+	size_t xfer_buf_iova_size;
+	unsigned long curr_xfer_buf_iova;
+	/* bit fields representing pcm card enabled */
+	unsigned long card_slot;
+	/* cache event ring phys addr */
+	u64 er_phys_addr;
+};
+
+static struct uaudio_qmi_dev *uaudio_qdev;
+
+struct uaudio_qmi_svc {
+	struct qmi_handle *uaudio_svc_hdl;
+	void *curr_conn;
+	struct work_struct recv_msg_work;
+	struct work_struct qmi_disconnect_work;
+	struct workqueue_struct *uaudio_wq;
+	ktime_t t_request_recvd;
+	ktime_t t_resp_sent;
+};
+
+static struct uaudio_qmi_svc *uaudio_svc;
+
+static struct msg_desc uaudio_stream_req_desc = {
+	.max_msg_len = QMI_UAUDIO_STREAM_REQ_MSG_V01_MAX_MSG_LEN,
+	.msg_id = QMI_UAUDIO_STREAM_REQ_V01,
+	.ei_array = qmi_uaudio_stream_req_msg_v01_ei,
+};
+
+static struct msg_desc uaudio_stream_resp_desc = {
+	.max_msg_len = QMI_UAUDIO_STREAM_RESP_MSG_V01_MAX_MSG_LEN,
+	.msg_id = QMI_UAUDIO_STREAM_RESP_V01,
+	.ei_array = qmi_uaudio_stream_resp_msg_v01_ei,
+};
+
+static struct msg_desc uaudio_stream_ind_desc = {
+	.max_msg_len = QMI_UAUDIO_STREAM_IND_MSG_V01_MAX_MSG_LEN,
+	.msg_id = QMI_UADUIO_STREAM_IND_V01,
+	.ei_array = qmi_uaudio_stream_ind_msg_v01_ei,
+};
+
+enum mem_type {
+	MEM_EVENT_RING,
+	MEM_XFER_RING,
+	MEM_XFER_BUF,
+};
+
+enum usb_qmi_audio_format {
+	USB_QMI_PCM_FORMAT_S8 = 0,
+	USB_QMI_PCM_FORMAT_U8,
+	USB_QMI_PCM_FORMAT_S16_LE,
+	USB_QMI_PCM_FORMAT_S16_BE,
+	USB_QMI_PCM_FORMAT_U16_LE,
+	USB_QMI_PCM_FORMAT_U16_BE,
+	USB_QMI_PCM_FORMAT_S24_LE,
+	USB_QMI_PCM_FORMAT_S24_BE,
+	USB_QMI_PCM_FORMAT_U24_LE,
+	USB_QMI_PCM_FORMAT_U24_BE,
+	USB_QMI_PCM_FORMAT_S24_3LE,
+	USB_QMI_PCM_FORMAT_S24_3BE,
+	USB_QMI_PCM_FORMAT_U24_3LE,
+	USB_QMI_PCM_FORMAT_U24_3BE,
+	USB_QMI_PCM_FORMAT_S32_LE,
+	USB_QMI_PCM_FORMAT_S32_BE,
+	USB_QMI_PCM_FORMAT_U32_LE,
+	USB_QMI_PCM_FORMAT_U32_BE,
+};
+
+static enum usb_audio_device_speed_enum_v01
+get_speed_info(enum usb_device_speed udev_speed)
+{
+	switch (udev_speed) {
+	case USB_SPEED_LOW:
+		return USB_AUDIO_DEVICE_SPEED_LOW_V01;
+	case USB_SPEED_FULL:
+		return USB_AUDIO_DEVICE_SPEED_FULL_V01;
+	case USB_SPEED_HIGH:
+		return USB_AUDIO_DEVICE_SPEED_HIGH_V01;
+	case USB_SPEED_SUPER:
+		return USB_AUDIO_DEVICE_SPEED_SUPER_V01;
+	case USB_SPEED_SUPER_PLUS:
+		return USB_AUDIO_DEVICE_SPEED_SUPER_PLUS_V01;
+	default:
+		pr_err("%s: udev speed %d\n", __func__, udev_speed);
+		return USB_AUDIO_DEVICE_SPEED_INVALID_V01;
+	}
+}
+
+static unsigned long uaudio_get_iova(unsigned long *curr_iova,
+	size_t *curr_iova_size, struct list_head *head, size_t size)
+{
+	struct iova_info *info, *new_info = NULL;
+	struct list_head *curr_head;
+	unsigned long va = 0;
+	size_t tmp_size = size;
+	bool found = false;
+
+	if (size % PAGE_SIZE) {
+		pr_err("%s: size %zu is not page size multiple\n", __func__,
+			size);
+		goto done;
+	}
+
+	if (size > *curr_iova_size) {
+		pr_err("%s: size %zu > curr size %zu\n", __func__, size,
+			*curr_iova_size);
+		goto done;
+	}
+	if (*curr_iova_size == 0) {
+		pr_err("%s: iova mapping is full\n", __func__);
+		goto done;
+	}
+
+	list_for_each_entry(info, head, list) {
+		/* exact size iova_info */
+		if (!info->in_use && info->size == size) {
+			info->in_use = true;
+			va = info->start_iova;
+			*curr_iova_size -= size;
+			found = true;
+			pr_debug("%s: exact size :%zu found\n", __func__, size);
+			goto done;
+		} else if (!info->in_use && tmp_size >= info->size) {
+			if (!new_info)
+				new_info = info;
+			pr_debug("%s: partial size: %zu found\n", __func__,
+				info->size);
+			tmp_size -= info->size;
+			if (tmp_size)
+				continue;
+
+			va = new_info->start_iova;
+			for (curr_head = &new_info->list; curr_head !=
+			&info->list; curr_head = curr_head->next) {
+				new_info = list_entry(curr_head, struct
+						iova_info, list);
+				new_info->in_use = true;
+			}
+			info->in_use = true;
+			*curr_iova_size -= size;
+			found = true;
+			goto done;
+		} else {
+			/* iova region in use */
+			new_info = NULL;
+			tmp_size = size;
+		}
+	}
+
+	info = kzalloc(sizeof(struct iova_info), GFP_KERNEL);
+	if (!info) {
+		va = 0;
+		goto done;
+	}
+
+	va = info->start_iova = *curr_iova;
+	info->size = size;
+	info->in_use = true;
+	*curr_iova += size;
+	*curr_iova_size -= size;
+	found = true;
+	list_add_tail(&info->list, head);
+
+done:
+	if (!found)
+		pr_err("%s: unable to find %zu size iova\n", __func__, size);
+	else
+		pr_debug("%s: va:%lu curr_iova:%lu curr_iova_size:%zu\n",
+		__func__, va, *curr_iova, *curr_iova_size);
+
+	return va;
+}
+
+static unsigned long uaudio_iommu_map(enum mem_type mtype, phys_addr_t pa,
+		size_t size)
+{
+	unsigned long va = 0;
+	bool map = true;
+	int ret;
+
+	switch (mtype) {
+	case MEM_EVENT_RING:
+		va = IOVA_BASE;
+		/* er already mapped */
+		if (uaudio_qdev->er_phys_addr == pa)
+			map = false;
+		break;
+	case MEM_XFER_RING:
+		va = uaudio_get_iova(&uaudio_qdev->curr_xfer_ring_iova,
+		&uaudio_qdev->xfer_ring_iova_size, &uaudio_qdev->xfer_ring_list,
+		size);
+		break;
+	case MEM_XFER_BUF:
+		va = uaudio_get_iova(&uaudio_qdev->curr_xfer_buf_iova,
+		&uaudio_qdev->xfer_buf_iova_size, &uaudio_qdev->xfer_buf_list,
+		size);
+		break;
+	default:
+		pr_err("%s: unknown mem type %d\n", __func__, mtype);
+	}
+
+	if (!va)
+		map = false;
+
+	if (!map)
+		goto done;
+
+	pr_debug("%s: map pa %pa to iova %lu for memtype %d\n", __func__, &pa,
+		va, mtype);
+	ret = iommu_map(uaudio_qdev->domain, va, pa, size,
+		IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
+	if (ret)
+		pr_err("%s:failed to map pa:%pa iova:%lu memtype:%d ret:%d\n",
+			__func__, &pa, va, mtype, ret);
+done:
+	return va;
+}
+
+static void uaudio_put_iova(unsigned long va, size_t size, struct list_head
+	*head, size_t *curr_iova_size)
+{
+	struct iova_info *info;
+	size_t tmp_size = size;
+	bool found = false;
+
+	list_for_each_entry(info, head, list) {
+		if (info->start_iova == va) {
+			if (!info->in_use) {
+				pr_err("%s: va %lu is not in use\n", __func__,
+					va);
+				return;
+			}
+			found = true;
+			info->in_use = false;
+			if (info->size == size)
+				goto done;
+		}
+
+		if (found && tmp_size >= info->size) {
+			info->in_use = false;
+			tmp_size -= info->size;
+			if (!tmp_size)
+				goto done;
+		}
+	}
+
+	if (!found) {
+		pr_err("%s: unable to find the va %lu\n", __func__, va);
+		return;
+	}
+done:
+	*curr_iova_size += size;
+	pr_debug("%s: curr_iova_size %zu\n", __func__, *curr_iova_size);
+}
+
+static void uaudio_iommu_unmap(enum mem_type mtype, unsigned long va,
+	size_t size)
+{
+	size_t umap_size;
+	bool unmap = true;
+
+	if (!va || !size)
+		return;
+
+	switch (mtype) {
+	case MEM_EVENT_RING:
+		if (uaudio_qdev->er_phys_addr)
+			uaudio_qdev->er_phys_addr = 0;
+		else
+			unmap = false;
+		break;
+
+	case MEM_XFER_RING:
+		uaudio_put_iova(va, size, &uaudio_qdev->xfer_ring_list,
+		&uaudio_qdev->xfer_ring_iova_size);
+		break;
+	case MEM_XFER_BUF:
+		uaudio_put_iova(va, size, &uaudio_qdev->xfer_buf_list,
+		&uaudio_qdev->xfer_buf_iova_size);
+		break;
+	default:
+		pr_err("%s: unknown mem type %d\n", __func__, mtype);
+		unmap = false;
+	}
+
+	if (!unmap)
+		return;
+
+	pr_debug("%s: unmap iova %lu for memtype %d\n", __func__, va, mtype);
+
+	umap_size = iommu_unmap(uaudio_qdev->domain, va, size);
+	if (umap_size != size)
+		pr_err("%s: unmapped size %zu for iova %lu\n", __func__,
+		umap_size, va);
+}
+
+static int prepare_qmi_response(struct snd_usb_substream *subs,
+		struct qmi_uaudio_stream_req_msg_v01 *req_msg,
+		struct qmi_uaudio_stream_resp_msg_v01 *resp, int info_idx)
+{
+	struct usb_interface *iface;
+	struct usb_host_interface *alts;
+	struct usb_interface_descriptor *altsd;
+	struct usb_host_endpoint *ep;
+	struct uac_format_type_i_continuous_descriptor *fmt;
+	struct uac_format_type_i_discrete_descriptor *fmt_v1;
+	struct uac_format_type_i_ext_descriptor *fmt_v2;
+	struct uac1_as_header_descriptor *as;
+	int ret = -ENODEV;
+	int protocol, card_num, pcm_dev_num;
+	void *hdr_ptr;
+	u8 *xfer_buf;
+	u32 len, mult, remainder, xfer_buf_len;
+	unsigned long va, tr_data_va = 0, tr_sync_va = 0, xfer_buf_va = 0;
+	phys_addr_t xhci_pa, xfer_buf_pa;
+
+	iface = usb_ifnum_to_if(subs->dev, subs->interface);
+	if (!iface) {
+		pr_err("%s: interface # %d does not exist\n", __func__,
+			subs->interface);
+		goto err;
+	}
+
+	pcm_dev_num = (req_msg->usb_token & SND_PCM_DEV_NUM_MASK) >> 8;
+	card_num = (req_msg->usb_token & SND_PCM_CARD_NUM_MASK) >> 16;
+	xfer_buf_len = req_msg->xfer_buff_size;
+
+	alts = &iface->altsetting[subs->altset_idx];
+	altsd = get_iface_desc(alts);
+	protocol = altsd->bInterfaceProtocol;
+
+	/* get format type */
+	if (protocol != UAC_VERSION_3) {
+		fmt = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL,
+				UAC_FORMAT_TYPE);
+		if (!fmt) {
+			pr_err("%s: %u:%d : no UAC_FORMAT_TYPE desc\n",
+				__func__, subs->interface, subs->altset_idx);
+			goto err;
+		}
+	}
+
+	if (!uadev[card_num].ctrl_intf) {
+		pr_err("%s: audio ctrl intf info not cached\n", __func__);
+		goto err;
+	}
+
+	if (protocol != UAC_VERSION_3) {
+		hdr_ptr = snd_usb_find_csint_desc(
+				uadev[card_num].ctrl_intf->extra,
+				uadev[card_num].ctrl_intf->extralen,
+				NULL, UAC_HEADER);
+		if (!hdr_ptr) {
+			pr_err("%s: no UAC_HEADER desc\n", __func__);
+			goto err;
+		}
+	}
+
+	if (protocol == UAC_VERSION_1) {
+		as = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL,
+			UAC_AS_GENERAL);
+		if (!as) {
+			pr_err("%s: %u:%d : no UAC_AS_GENERAL desc\n", __func__,
+				subs->interface, subs->altset_idx);
+			goto err;
+		}
+		resp->data_path_delay = as->bDelay;
+		resp->data_path_delay_valid = 1;
+		fmt_v1 = (struct uac_format_type_i_discrete_descriptor *)fmt;
+		resp->usb_audio_subslot_size = fmt_v1->bSubframeSize;
+		resp->usb_audio_subslot_size_valid = 1;
+
+		resp->usb_audio_spec_revision =
+			((struct uac1_ac_header_descriptor *)hdr_ptr)->bcdADC;
+		resp->usb_audio_spec_revision_valid = 1;
+	} else if (protocol == UAC_VERSION_2) {
+		fmt_v2 = (struct uac_format_type_i_ext_descriptor *)fmt;
+		resp->usb_audio_subslot_size = fmt_v2->bSubslotSize;
+		resp->usb_audio_subslot_size_valid = 1;
+
+		resp->usb_audio_spec_revision =
+			((struct uac2_ac_header_descriptor *)hdr_ptr)->bcdADC;
+		resp->usb_audio_spec_revision_valid = 1;
+	} else if (protocol == UAC_VERSION_3) {
+		switch (le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize)) {
+		case BADD_MAXPSIZE_SYNC_MONO_16:
+		case BADD_MAXPSIZE_SYNC_STEREO_16:
+		case BADD_MAXPSIZE_ASYNC_MONO_16:
+		case BADD_MAXPSIZE_ASYNC_STEREO_16: {
+			resp->usb_audio_subslot_size = SUBSLOTSIZE_16_BIT;
+			break;
+		}
+
+		case BADD_MAXPSIZE_SYNC_MONO_24:
+		case BADD_MAXPSIZE_SYNC_STEREO_24:
+		case BADD_MAXPSIZE_ASYNC_MONO_24:
+		case BADD_MAXPSIZE_ASYNC_STEREO_24: {
+			resp->usb_audio_subslot_size = SUBSLOTSIZE_24_BIT;
+			break;
+		}
+
+		default:
+			pr_err("%d: %u: Invalid wMaxPacketSize\n",
+				subs->interface, subs->altset_idx);
+			ret = -EINVAL;
+			goto err;
+		}
+		resp->usb_audio_subslot_size_valid = 1;
+	} else {
+		pr_err("%s: unknown protocol version %x\n", __func__, protocol);
+		goto err;
+	}
+
+	resp->slot_id = subs->dev->slot_id;
+	resp->slot_id_valid = 1;
+
+	memcpy(&resp->std_as_opr_intf_desc, &alts->desc, sizeof(alts->desc));
+	resp->std_as_opr_intf_desc_valid = 1;
+
+	ep = usb_pipe_endpoint(subs->dev, subs->data_endpoint->pipe);
+	if (!ep) {
+		pr_err("%s: data ep # %d context is null\n", __func__,
+			subs->data_endpoint->ep_num);
+		goto err;
+	}
+	memcpy(&resp->std_as_data_ep_desc, &ep->desc, sizeof(ep->desc));
+	resp->std_as_data_ep_desc_valid = 1;
+
+	xhci_pa = usb_get_xfer_ring_dma_addr(subs->dev, ep);
+	if (!xhci_pa) {
+		pr_err("%s:failed to get data ep ring dma address\n", __func__);
+		goto err;
+	}
+
+	resp->xhci_mem_info.tr_data.pa = xhci_pa;
+
+	if (subs->sync_endpoint) {
+		ep = usb_pipe_endpoint(subs->dev, subs->sync_endpoint->pipe);
+		if (!ep) {
+			pr_debug("%s: implicit fb on data ep\n", __func__);
+			goto skip_sync_ep;
+		}
+		memcpy(&resp->std_as_sync_ep_desc, &ep->desc, sizeof(ep->desc));
+		resp->std_as_sync_ep_desc_valid = 1;
+
+		xhci_pa = usb_get_xfer_ring_dma_addr(subs->dev, ep);
+		if (!xhci_pa) {
+			pr_err("%s:failed to get sync ep ring dma address\n",
+				__func__);
+			goto err;
+		}
+		resp->xhci_mem_info.tr_sync.pa = xhci_pa;
+	}
+
+skip_sync_ep:
+	resp->interrupter_num = uaudio_qdev->intr_num;
+	resp->interrupter_num_valid = 1;
+
+	ret = usb_get_controller_id(subs->dev);
+	if (ret < 0)
+		goto err;
+
+	resp->controller_num =  ret;
+	resp->controller_num_valid = 1;
+
+	/*  map xhci data structures PA memory to iova */
+
+	/* event ring */
+	ret = usb_sec_event_ring_setup(subs->dev, resp->interrupter_num);
+	if (ret) {
+		pr_err("%s: failed to setup sec event ring ret %d\n", __func__,
+			ret);
+		goto err;
+	}
+	xhci_pa = usb_get_sec_event_ring_dma_addr(subs->dev,
+			resp->interrupter_num);
+	if (!xhci_pa) {
+		pr_err("%s: failed to get sec event ring dma address\n",
+		__func__);
+		goto err;
+	}
+
+	va = uaudio_iommu_map(MEM_EVENT_RING, xhci_pa, PAGE_SIZE);
+	if (!va)
+		goto err;
+
+	resp->xhci_mem_info.evt_ring.va = PREPEND_SID_TO_IOVA(va,
+						uaudio_qdev->sid);
+	resp->xhci_mem_info.evt_ring.pa = xhci_pa;
+	resp->xhci_mem_info.evt_ring.size = PAGE_SIZE;
+	uaudio_qdev->er_phys_addr = xhci_pa;
+
+	resp->speed_info = get_speed_info(subs->dev->speed);
+	if (resp->speed_info == USB_AUDIO_DEVICE_SPEED_INVALID_V01)
+		goto unmap_er;
+
+	resp->speed_info_valid = 1;
+
+	/* data transfer ring */
+	xhci_pa = resp->xhci_mem_info.tr_data.pa;
+	va = uaudio_iommu_map(MEM_XFER_RING, xhci_pa, PAGE_SIZE);
+	if (!va)
+		goto unmap_er;
+
+	tr_data_va = va;
+	resp->xhci_mem_info.tr_data.va = PREPEND_SID_TO_IOVA(va,
+						uaudio_qdev->sid);
+	resp->xhci_mem_info.tr_data.size = PAGE_SIZE;
+
+	/* sync transfer ring */
+	if (!resp->xhci_mem_info.tr_sync.pa)
+		goto skip_sync;
+
+	xhci_pa = resp->xhci_mem_info.tr_sync.pa;
+	va = uaudio_iommu_map(MEM_XFER_RING, xhci_pa, PAGE_SIZE);
+	if (!va)
+		goto unmap_data;
+
+	tr_sync_va = va;
+	resp->xhci_mem_info.tr_sync.va = PREPEND_SID_TO_IOVA(va,
+						uaudio_qdev->sid);
+	resp->xhci_mem_info.tr_sync.size = PAGE_SIZE;
+
+skip_sync:
+	/* xfer buffer, multiple of 4K only */
+	if (!xfer_buf_len)
+		xfer_buf_len = PAGE_SIZE;
+
+	mult = xfer_buf_len / PAGE_SIZE;
+	remainder = xfer_buf_len % PAGE_SIZE;
+	len = mult * PAGE_SIZE;
+	len += remainder ? PAGE_SIZE : 0;
+
+	if (len > MAX_XFER_BUFF_LEN) {
+		pr_err("%s: req buf len %d > max buf len %lu, setting %lu\n",
+		__func__, len, MAX_XFER_BUFF_LEN, MAX_XFER_BUFF_LEN);
+		len = MAX_XFER_BUFF_LEN;
+	}
+
+	xfer_buf = usb_alloc_coherent(subs->dev, len, GFP_KERNEL, &xfer_buf_pa);
+	if (!xfer_buf)
+		goto unmap_sync;
+
+	resp->xhci_mem_info.xfer_buff.pa = xfer_buf_pa;
+	resp->xhci_mem_info.xfer_buff.size = len;
+
+	va = uaudio_iommu_map(MEM_XFER_BUF, xfer_buf_pa, len);
+	if (!va)
+		goto unmap_sync;
+
+	xfer_buf_va = va;
+	resp->xhci_mem_info.xfer_buff.va = PREPEND_SID_TO_IOVA(va,
+						uaudio_qdev->sid);
+
+	resp->xhci_mem_info_valid = 1;
+
+	if (!atomic_read(&uadev[card_num].in_use)) {
+		kref_init(&uadev[card_num].kref);
+		init_waitqueue_head(&uadev[card_num].disconnect_wq);
+		uadev[card_num].num_intf =
+			subs->dev->config->desc.bNumInterfaces;
+		uadev[card_num].info =
+			kzalloc(sizeof(struct intf_info) *
+			uadev[card_num].num_intf, GFP_KERNEL);
+		if (!uadev[card_num].info) {
+			ret = -ENOMEM;
+			goto unmap_xfer_buf;
+		}
+		uadev[card_num].udev = subs->dev;
+		atomic_set(&uadev[card_num].in_use, 1);
+	} else {
+		kref_get(&uadev[card_num].kref);
+	}
+
+	uadev[card_num].card_num = card_num;
+	uadev[card_num].usb_core_id = resp->controller_num;
+
+	/* cache intf specific info to use it for unmap and free xfer buf */
+	uadev[card_num].info[info_idx].data_xfer_ring_va = tr_data_va;
+	uadev[card_num].info[info_idx].data_xfer_ring_size = PAGE_SIZE;
+	uadev[card_num].info[info_idx].sync_xfer_ring_va = tr_sync_va;
+	uadev[card_num].info[info_idx].sync_xfer_ring_size = PAGE_SIZE;
+	uadev[card_num].info[info_idx].xfer_buf_va = xfer_buf_va;
+	uadev[card_num].info[info_idx].xfer_buf_pa = xfer_buf_pa;
+	uadev[card_num].info[info_idx].xfer_buf_size = len;
+	uadev[card_num].info[info_idx].xfer_buf = xfer_buf;
+	uadev[card_num].info[info_idx].pcm_card_num = card_num;
+	uadev[card_num].info[info_idx].pcm_dev_num = pcm_dev_num;
+	uadev[card_num].info[info_idx].direction = subs->direction;
+	uadev[card_num].info[info_idx].intf_num = subs->interface;
+	uadev[card_num].info[info_idx].in_use = true;
+
+	set_bit(card_num, &uaudio_qdev->card_slot);
+
+	return 0;
+
+unmap_xfer_buf:
+	uaudio_iommu_unmap(MEM_XFER_BUF, xfer_buf_va, len);
+unmap_sync:
+	usb_free_coherent(subs->dev, len, xfer_buf, xfer_buf_pa);
+	uaudio_iommu_unmap(MEM_XFER_RING, tr_sync_va, PAGE_SIZE);
+unmap_data:
+	uaudio_iommu_unmap(MEM_XFER_RING, tr_data_va, PAGE_SIZE);
+unmap_er:
+	uaudio_iommu_unmap(MEM_EVENT_RING, IOVA_BASE, PAGE_SIZE);
+err:
+	return ret;
+}
+
+static void uaudio_dev_intf_cleanup(struct usb_device *udev,
+	struct intf_info *info)
+{
+	uaudio_iommu_unmap(MEM_XFER_RING, info->data_xfer_ring_va,
+		info->data_xfer_ring_size);
+	info->data_xfer_ring_va = 0;
+	info->data_xfer_ring_size = 0;
+
+	uaudio_iommu_unmap(MEM_XFER_RING, info->sync_xfer_ring_va,
+		info->sync_xfer_ring_size);
+	info->sync_xfer_ring_va = 0;
+	info->sync_xfer_ring_size = 0;
+
+	uaudio_iommu_unmap(MEM_XFER_BUF, info->xfer_buf_va,
+		info->xfer_buf_size);
+	info->xfer_buf_va = 0;
+
+	usb_free_coherent(udev, info->xfer_buf_size,
+		info->xfer_buf, info->xfer_buf_pa);
+	info->xfer_buf_size = 0;
+	info->xfer_buf = NULL;
+	info->xfer_buf_pa = 0;
+
+	info->in_use = false;
+}
+
+static void uaudio_dev_cleanup(struct uaudio_dev *dev)
+{
+	int if_idx;
+
+	/* free xfer buffer and unmap xfer ring and buf per interface */
+	for (if_idx = 0; if_idx < dev->num_intf; if_idx++) {
+		if (!dev->info[if_idx].in_use)
+			continue;
+		uaudio_dev_intf_cleanup(dev->udev, &dev->info[if_idx]);
+		pr_debug("%s: release resources: intf# %d card# %d\n", __func__,
+			dev->info[if_idx].intf_num, dev->card_num);
+	}
+
+	dev->num_intf = 0;
+
+	/* free interface info */
+	kfree(dev->info);
+	dev->info = NULL;
+
+	clear_bit(dev->card_num, &uaudio_qdev->card_slot);
+
+	/* all audio devices are disconnected */
+	if (!uaudio_qdev->card_slot) {
+		uaudio_iommu_unmap(MEM_EVENT_RING, IOVA_BASE, PAGE_SIZE);
+		usb_sec_event_ring_cleanup(dev->udev, uaudio_qdev->intr_num);
+		pr_debug("%s: all audio devices disconnected\n", __func__);
+	}
+
+	dev->udev = NULL;
+}
+
+static void uaudio_disconnect_cb(struct snd_usb_audio *chip)
+{
+	int ret;
+	struct uaudio_dev *dev;
+	int card_num = chip->card_num;
+	struct uaudio_qmi_svc *svc = uaudio_svc;
+	struct qmi_uaudio_stream_ind_msg_v01 disconnect_ind = {0};
+
+	pr_debug("%s: for card# %d\n", __func__, card_num);
+
+	if (card_num >=  SNDRV_CARDS) {
+		pr_err("%s: invalid card number\n", __func__);
+		return;
+	}
+
+	mutex_lock(&chip->dev_lock);
+	dev = &uadev[card_num];
+
+	/* clean up */
+	if (!dev->udev) {
+		pr_debug("%s: no clean up required\n", __func__);
+		goto done;
+	}
+
+	if (atomic_read(&dev->in_use)) {
+		mutex_unlock(&chip->dev_lock);
+
+		pr_debug("%s: sending qmi indication disconnect\n", __func__);
+		disconnect_ind.dev_event = USB_AUDIO_DEV_DISCONNECT_V01;
+		disconnect_ind.slot_id = dev->udev->slot_id;
+		disconnect_ind.controller_num = dev->usb_core_id;
+		disconnect_ind.controller_num_valid = 1;
+		ret = qmi_send_ind(svc->uaudio_svc_hdl, svc->curr_conn,
+				&uaudio_stream_ind_desc, &disconnect_ind,
+				sizeof(disconnect_ind));
+		if (ret < 0) {
+			pr_err("%s: qmi send failed wiht err: %d\n",
+					__func__, ret);
+			return;
+		}
+
+		ret = wait_event_interruptible(dev->disconnect_wq,
+				!atomic_read(&dev->in_use));
+		if (ret < 0) {
+			pr_debug("%s: failed with ret %d\n", __func__, ret);
+			return;
+		}
+		mutex_lock(&chip->dev_lock);
+	}
+
+	uaudio_dev_cleanup(dev);
+done:
+	mutex_unlock(&chip->dev_lock);
+}
+
+static void uaudio_dev_release(struct kref *kref)
+{
+	struct uaudio_dev *dev = container_of(kref, struct uaudio_dev, kref);
+
+	pr_debug("%s for dev %pK\n", __func__, dev);
+
+	atomic_set(&dev->in_use, 0);
+
+	clear_bit(dev->card_num, &uaudio_qdev->card_slot);
+
+	/* all audio devices are disconnected */
+	if (!uaudio_qdev->card_slot) {
+		usb_sec_event_ring_cleanup(dev->udev, uaudio_qdev->intr_num);
+		uaudio_iommu_unmap(MEM_EVENT_RING, IOVA_BASE, PAGE_SIZE);
+		pr_debug("%s: all audio devices disconnected\n", __func__);
+	}
+
+	wake_up(&dev->disconnect_wq);
+}
+
+/* maps audio format received over QMI to asound.h based pcm format */
+static int map_pcm_format(unsigned int fmt_received)
+{
+	switch (fmt_received) {
+	case USB_QMI_PCM_FORMAT_S8:
+		return SNDRV_PCM_FORMAT_S8;
+	case USB_QMI_PCM_FORMAT_U8:
+		return SNDRV_PCM_FORMAT_U8;
+	case USB_QMI_PCM_FORMAT_S16_LE:
+		return SNDRV_PCM_FORMAT_S16_LE;
+	case USB_QMI_PCM_FORMAT_S16_BE:
+		return SNDRV_PCM_FORMAT_S16_BE;
+	case USB_QMI_PCM_FORMAT_U16_LE:
+		return SNDRV_PCM_FORMAT_U16_LE;
+	case USB_QMI_PCM_FORMAT_U16_BE:
+		return SNDRV_PCM_FORMAT_U16_BE;
+	case USB_QMI_PCM_FORMAT_S24_LE:
+		return SNDRV_PCM_FORMAT_S24_LE;
+	case USB_QMI_PCM_FORMAT_S24_BE:
+		return SNDRV_PCM_FORMAT_S24_BE;
+	case USB_QMI_PCM_FORMAT_U24_LE:
+		return SNDRV_PCM_FORMAT_U24_LE;
+	case USB_QMI_PCM_FORMAT_U24_BE:
+		return SNDRV_PCM_FORMAT_U24_BE;
+	case USB_QMI_PCM_FORMAT_S24_3LE:
+		return SNDRV_PCM_FORMAT_S24_3LE;
+	case USB_QMI_PCM_FORMAT_S24_3BE:
+		return SNDRV_PCM_FORMAT_S24_3BE;
+	case USB_QMI_PCM_FORMAT_U24_3LE:
+		return SNDRV_PCM_FORMAT_U24_3LE;
+	case USB_QMI_PCM_FORMAT_U24_3BE:
+		return SNDRV_PCM_FORMAT_U24_3BE;
+	case USB_QMI_PCM_FORMAT_S32_LE:
+		return SNDRV_PCM_FORMAT_S32_LE;
+	case USB_QMI_PCM_FORMAT_S32_BE:
+		return SNDRV_PCM_FORMAT_S32_BE;
+	case USB_QMI_PCM_FORMAT_U32_LE:
+		return SNDRV_PCM_FORMAT_U32_LE;
+	case USB_QMI_PCM_FORMAT_U32_BE:
+		return SNDRV_PCM_FORMAT_U32_BE;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int info_idx_from_ifnum(int card_num, int intf_num, bool enable)
+{
+	int i;
+
+	/*
+	  * default index 0 is used when info is allocated upon
+	  * first enable audio stream req for a pcm device
+	  */
+	if (enable && !uadev[card_num].info)
+		return 0;
+
+	for (i = 0; i < uadev[card_num].num_intf; i++) {
+		if (enable && !uadev[card_num].info[i].in_use)
+			return i;
+		else if (!enable &&
+				uadev[card_num].info[i].intf_num == intf_num)
+			return i;
+	}
+
+	return -EINVAL;
+}
+
+static int handle_uaudio_stream_req(void *req_h, void *req)
+{
+	struct qmi_uaudio_stream_req_msg_v01 *req_msg;
+	struct qmi_uaudio_stream_resp_msg_v01 resp = {{0}, 0};
+	struct snd_usb_substream *subs;
+	struct snd_usb_audio *chip = NULL;
+	struct uaudio_qmi_svc *svc = uaudio_svc;
+	struct intf_info *info;
+	int pcm_format;
+	u8 pcm_card_num, pcm_dev_num, direction;
+	int info_idx = -EINVAL, ret = 0;
+
+	req_msg = (struct qmi_uaudio_stream_req_msg_v01 *)req;
+
+	if (!req_msg->audio_format_valid || !req_msg->bit_rate_valid ||
+	!req_msg->number_of_ch_valid || !req_msg->xfer_buff_size_valid) {
+		pr_err("%s: invalid request msg\n", __func__);
+		ret = -EINVAL;
+		goto response;
+	}
+
+	direction = req_msg->usb_token & SND_PCM_STREAM_DIRECTION;
+	pcm_dev_num = (req_msg->usb_token & SND_PCM_DEV_NUM_MASK) >> 8;
+	pcm_card_num = (req_msg->usb_token & SND_PCM_CARD_NUM_MASK) >> 16;
+
+	pr_debug("%s:card#:%d dev#:%d dir:%d en:%d fmt:%d rate:%d #ch:%d\n",
+		__func__, pcm_card_num, pcm_dev_num, direction, req_msg->enable,
+		req_msg->audio_format, req_msg->bit_rate,
+		req_msg->number_of_ch);
+
+	if (pcm_card_num >= SNDRV_CARDS) {
+		pr_err("%s: invalid card # %u", __func__, pcm_card_num);
+		ret = -EINVAL;
+		goto response;
+	}
+
+	pcm_format = map_pcm_format(req_msg->audio_format);
+	if (pcm_format == -EINVAL) {
+		pr_err("%s: unsupported pcm format received %d\n",
+		__func__, req_msg->audio_format);
+		ret = -EINVAL;
+		goto response;
+	}
+
+	subs = find_snd_usb_substream(pcm_card_num, pcm_dev_num, direction,
+					&chip, uaudio_disconnect_cb);
+	if (!subs || !chip || atomic_read(&chip->shutdown)) {
+		pr_err("%s: can't find substream for card# %u, dev# %u dir%u\n",
+			__func__, pcm_card_num, pcm_dev_num, direction);
+		ret = -ENODEV;
+		goto response;
+	}
+
+	mutex_lock(&chip->dev_lock);
+	info_idx = info_idx_from_ifnum(pcm_card_num, subs->interface,
+		req_msg->enable);
+	if (atomic_read(&chip->shutdown) || !subs->stream || !subs->stream->pcm
+			|| !subs->stream->chip) {
+		ret = -ENODEV;
+		mutex_unlock(&chip->dev_lock);
+		goto response;
+	}
+
+	if (req_msg->enable) {
+		if (info_idx < 0) {
+			pr_err("%s interface# %d already in use card# %d\n",
+				__func__, subs->interface, pcm_card_num);
+			ret = -EBUSY;
+			mutex_unlock(&chip->dev_lock);
+			goto response;
+		}
+	}
+
+	subs->pcm_format = pcm_format;
+	subs->channels = req_msg->number_of_ch;
+	subs->cur_rate = req_msg->bit_rate;
+	uadev[pcm_card_num].ctrl_intf = chip->ctrl_intf;
+
+	ret = snd_usb_enable_audio_stream(subs, req_msg->enable);
+
+	if (!ret && req_msg->enable)
+		ret = prepare_qmi_response(subs, req_msg, &resp, info_idx);
+
+	mutex_unlock(&chip->dev_lock);
+
+response:
+	if (!req_msg->enable && ret != -EINVAL) {
+		if (info_idx >= 0) {
+			mutex_lock(&chip->dev_lock);
+			info = &uadev[pcm_card_num].info[info_idx];
+			uaudio_dev_intf_cleanup(uadev[pcm_card_num].udev, info);
+			pr_debug("%s:release resources: intf# %d card# %d\n",
+				__func__, subs->interface, pcm_card_num);
+			mutex_unlock(&chip->dev_lock);
+		}
+		if (atomic_read(&uadev[pcm_card_num].in_use))
+			kref_put(&uadev[pcm_card_num].kref,
+					uaudio_dev_release);
+	}
+
+	resp.usb_token = req_msg->usb_token;
+	resp.usb_token_valid = 1;
+	resp.internal_status = ret;
+	resp.internal_status_valid = 1;
+	resp.status = ret ? USB_AUDIO_STREAM_REQ_FAILURE_V01 : ret;
+	resp.status_valid = 1;
+	ret = qmi_send_resp_from_cb(svc->uaudio_svc_hdl, svc->curr_conn, req_h,
+			&uaudio_stream_resp_desc, &resp, sizeof(resp));
+
+	svc->t_resp_sent = ktime_get();
+
+	pr_debug("%s: t_resp sent - t_req recvd (in ms) %lld\n", __func__,
+		ktime_to_ms(ktime_sub(svc->t_resp_sent, svc->t_request_recvd)));
+
+	return ret;
+}
+
+static int uaudio_qmi_svc_connect_cb(struct qmi_handle *handle,
+			       void *conn_h)
+{
+	struct uaudio_qmi_svc *svc = uaudio_svc;
+
+	if (svc->uaudio_svc_hdl != handle || !conn_h) {
+		pr_err("%s: handle mismatch\n", __func__);
+		return -EINVAL;
+	}
+	if (svc->curr_conn) {
+		pr_err("%s: Service is busy\n", __func__);
+		return -ECONNREFUSED;
+	}
+	svc->curr_conn = conn_h;
+	return 0;
+}
+
+static void uaudio_qmi_disconnect_work(struct work_struct *w)
+{
+	struct intf_info *info;
+	int idx, if_idx;
+	struct snd_usb_substream *subs;
+	struct snd_usb_audio *chip = NULL;
+
+	/* find all active intf for set alt 0 and cleanup usb audio dev */
+	for (idx = 0; idx < SNDRV_CARDS; idx++) {
+		if (!atomic_read(&uadev[idx].in_use))
+			continue;
+
+		for (if_idx = 0; if_idx < uadev[idx].num_intf; if_idx++) {
+			if (!uadev[idx].info || !uadev[idx].info[if_idx].in_use)
+				continue;
+			info = &uadev[idx].info[if_idx];
+			subs = find_snd_usb_substream(info->pcm_card_num,
+							info->pcm_dev_num,
+							info->direction,
+							&chip,
+							uaudio_disconnect_cb);
+			if (!subs || !chip || atomic_read(&chip->shutdown)) {
+				pr_debug("%s:no subs for c#%u, dev#%u dir%u\n",
+					__func__, info->pcm_card_num,
+					info->pcm_dev_num,
+					info->direction);
+				continue;
+			}
+			snd_usb_enable_audio_stream(subs, 0);
+		}
+		atomic_set(&uadev[idx].in_use, 0);
+		mutex_lock(&chip->dev_lock);
+		uaudio_dev_cleanup(&uadev[idx]);
+		mutex_unlock(&chip->dev_lock);
+	}
+}
+
+static int uaudio_qmi_svc_disconnect_cb(struct qmi_handle *handle,
+				  void *conn_h)
+{
+	struct uaudio_qmi_svc *svc = uaudio_svc;
+
+	if (svc->uaudio_svc_hdl != handle || svc->curr_conn != conn_h) {
+		pr_err("%s: handle mismatch\n", __func__);
+		return -EINVAL;
+	}
+
+	svc->curr_conn = NULL;
+	queue_work(svc->uaudio_wq, &svc->qmi_disconnect_work);
+
+	return 0;
+}
+
+static int uaudio_qmi_svc_req_cb(struct qmi_handle *handle, void *conn_h,
+			void *req_h, unsigned int msg_id, void *req)
+{
+	int ret;
+	struct uaudio_qmi_svc *svc = uaudio_svc;
+
+	if (svc->uaudio_svc_hdl != handle || svc->curr_conn != conn_h) {
+		pr_err("%s: handle mismatch\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (msg_id) {
+	case QMI_UAUDIO_STREAM_REQ_V01:
+		ret = handle_uaudio_stream_req(req_h, req);
+		break;
+
+	default:
+		ret = -ENOTSUPP;
+		break;
+	}
+	return ret;
+}
+
+static int uaudio_qmi_svc_req_desc_cb(unsigned int msg_id,
+	struct msg_desc **req_desc)
+{
+	int ret;
+
+	pr_debug("%s: msg_id %d\n", __func__, msg_id);
+
+	switch (msg_id) {
+	case QMI_UAUDIO_STREAM_REQ_V01:
+		*req_desc = &uaudio_stream_req_desc;
+		ret = sizeof(struct qmi_uaudio_stream_req_msg_v01);
+		break;
+
+	default:
+		ret = -ENOTSUPP;
+		break;
+	}
+	return ret;
+}
+
+static void uaudio_qmi_svc_recv_msg(struct work_struct *w)
+{
+	int ret;
+	struct uaudio_qmi_svc *svc = container_of(w, struct uaudio_qmi_svc,
+		recv_msg_work);
+
+	do {
+		pr_debug("%s: Notified about a Receive Event", __func__);
+	} while ((ret = qmi_recv_msg(svc->uaudio_svc_hdl)) == 0);
+
+	if (ret != -ENOMSG)
+		pr_err("%s: Error receiving message\n", __func__);
+}
+
+static void uaudio_qmi_svc_ntfy(struct qmi_handle *handle,
+		enum qmi_event_type event, void *priv)
+{
+	struct uaudio_qmi_svc *svc = uaudio_svc;
+
+	pr_debug("%s: event %d", __func__, event);
+
+	svc->t_request_recvd = ktime_get();
+
+	switch (event) {
+	case QMI_RECV_MSG:
+		queue_work(svc->uaudio_wq, &svc->recv_msg_work);
+		break;
+	default:
+		break;
+	}
+}
+
+static struct qmi_svc_ops_options uaudio_svc_ops_options = {
+	.version = 1,
+	.service_id = UAUDIO_STREAM_SERVICE_ID_V01,
+	.service_vers = UAUDIO_STREAM_SERVICE_VERS_V01,
+	.connect_cb = uaudio_qmi_svc_connect_cb,
+	.disconnect_cb = uaudio_qmi_svc_disconnect_cb,
+	.req_desc_cb = uaudio_qmi_svc_req_desc_cb,
+	.req_cb = uaudio_qmi_svc_req_cb,
+};
+
+static int uaudio_qmi_plat_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct device_node *node = pdev->dev.of_node;
+
+	uaudio_qdev = devm_kzalloc(&pdev->dev, sizeof(struct uaudio_qmi_dev),
+		GFP_KERNEL);
+	if (!uaudio_qdev)
+		return -ENOMEM;
+
+	uaudio_qdev->dev = &pdev->dev;
+
+	ret = of_property_read_u32(node, "qcom,usb-audio-stream-id",
+				&uaudio_qdev->sid);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to read sid.\n");
+		return -ENODEV;
+	}
+
+	ret = of_property_read_u32(node, "qcom,usb-audio-intr-num",
+				&uaudio_qdev->intr_num);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to read intr num.\n");
+		return -ENODEV;
+	}
+
+	uaudio_qdev->domain = iommu_domain_alloc(msm_iommu_get_bus(&pdev->dev));
+	if (!uaudio_qdev->domain) {
+		dev_err(&pdev->dev, "failed to callocate iommu domain\n");
+		return -ENODEV;
+	}
+
+	/* attach to external processor iommu */
+	ret = iommu_attach_device(uaudio_qdev->domain, &pdev->dev);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to attach device ret = %d\n", ret);
+		goto free_domain;
+	}
+
+	/* initialize xfer ring and xfer buf iova list */
+	INIT_LIST_HEAD(&uaudio_qdev->xfer_ring_list);
+	uaudio_qdev->curr_xfer_ring_iova = IOVA_XFER_RING_BASE;
+	uaudio_qdev->xfer_ring_iova_size =
+			IOVA_XFER_RING_MAX - IOVA_XFER_RING_BASE;
+
+	INIT_LIST_HEAD(&uaudio_qdev->xfer_buf_list);
+	uaudio_qdev->curr_xfer_buf_iova = IOVA_XFER_BUF_BASE;
+	uaudio_qdev->xfer_buf_iova_size =
+		IOVA_XFER_BUF_MAX - IOVA_XFER_BUF_BASE;
+
+	return 0;
+
+free_domain:
+	iommu_domain_free(uaudio_qdev->domain);
+	return ret;
+}
+
+static int uaudio_qmi_plat_remove(struct platform_device *pdev)
+{
+	iommu_detach_device(uaudio_qdev->domain, &pdev->dev);
+	iommu_domain_free(uaudio_qdev->domain);
+	uaudio_qdev->domain = NULL;
+
+	return 0;
+}
+
+static const struct of_device_id of_uaudio_matach[] = {
+	{
+		.compatible = "qcom,usb-audio-qmi-dev",
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, of_uaudio_matach);
+
+static struct platform_driver uaudio_qmi_driver = {
+	.probe		= uaudio_qmi_plat_probe,
+	.remove		= uaudio_qmi_plat_remove,
+	.driver		= {
+		.name	= "uaudio-qmi",
+		.of_match_table	= of_uaudio_matach,
+	},
+};
+
+static int uaudio_qmi_svc_init(void)
+{
+	int ret;
+	struct uaudio_qmi_svc *svc;
+
+	svc = kzalloc(sizeof(struct uaudio_qmi_svc), GFP_KERNEL);
+	if (!svc)
+		return -ENOMEM;
+
+	svc->uaudio_wq = create_singlethread_workqueue("uaudio_svc");
+	if (!svc->uaudio_wq) {
+		ret = -ENOMEM;
+		goto free_svc;
+	}
+
+	svc->uaudio_svc_hdl = qmi_handle_create(uaudio_qmi_svc_ntfy, NULL);
+	if (!svc->uaudio_svc_hdl) {
+		pr_err("%s: Error creating svc_hdl\n", __func__);
+		ret = -EFAULT;
+		goto destroy_uaudio_wq;
+	}
+
+	ret = qmi_svc_register(svc->uaudio_svc_hdl, &uaudio_svc_ops_options);
+	if (ret < 0) {
+		pr_err("%s:Error registering uaudio svc %d\n", __func__, ret);
+		goto destroy_svc_handle;
+	}
+
+	INIT_WORK(&svc->recv_msg_work, uaudio_qmi_svc_recv_msg);
+	INIT_WORK(&svc->qmi_disconnect_work, uaudio_qmi_disconnect_work);
+
+	uaudio_svc = svc;
+
+	return 0;
+
+destroy_svc_handle:
+	qmi_handle_destroy(svc->uaudio_svc_hdl);
+destroy_uaudio_wq:
+	destroy_workqueue(svc->uaudio_wq);
+free_svc:
+	kfree(svc);
+	return ret;
+}
+
+static void uaudio_qmi_svc_exit(void)
+{
+	struct uaudio_qmi_svc *svc = uaudio_svc;
+
+	qmi_svc_unregister(svc->uaudio_svc_hdl);
+	flush_workqueue(svc->uaudio_wq);
+	qmi_handle_destroy(svc->uaudio_svc_hdl);
+	destroy_workqueue(svc->uaudio_wq);
+	kfree(svc);
+	uaudio_svc = NULL;
+}
+
+static int __init uaudio_qmi_plat_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&uaudio_qmi_driver);
+	if (ret)
+		return ret;
+
+	return uaudio_qmi_svc_init();
+}
+
+static void __exit uaudio_qmi_plat_exit(void)
+{
+	uaudio_qmi_svc_exit();
+	platform_driver_unregister(&uaudio_qmi_driver);
+}
+
+module_init(uaudio_qmi_plat_init);
+module_exit(uaudio_qmi_plat_exit);
+
+MODULE_DESCRIPTION("USB AUDIO QMI Service Driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/usb/usb_audio_qmi_v01.c	2019-10-29 09:26:26.197228172 +0100
@@ -0,0 +1,891 @@
+ /* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/qmi_encdec.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+
+#include "usb_audio_qmi_v01.h"
+
+static struct elem_info mem_info_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct mem_info_v01,
+					   va),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct mem_info_v01,
+					   pa),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct mem_info_v01,
+					   size),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info apps_mem_info_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct mem_info_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct apps_mem_info_v01,
+					   evt_ring),
+		.ei_array      = mem_info_v01_ei,
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct mem_info_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct apps_mem_info_v01,
+					   tr_data),
+		.ei_array      = mem_info_v01_ei,
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct mem_info_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct apps_mem_info_v01,
+					   tr_sync),
+		.ei_array      = mem_info_v01_ei,
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct mem_info_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct apps_mem_info_v01,
+					   xfer_buff),
+		.ei_array      = mem_info_v01_ei,
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct mem_info_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct apps_mem_info_v01,
+					   dcba),
+		.ei_array      = mem_info_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info usb_endpoint_descriptor_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   bLength),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   bDescriptorType),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   bEndpointAddress),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   bmAttributes),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   wMaxPacketSize),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   bInterval),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   bRefresh),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   bSynchAddress),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info usb_interface_descriptor_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bLength),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bDescriptorType),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bInterfaceNumber),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bAlternateSetting),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bNumEndpoints),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bInterfaceClass),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bInterfaceSubClass),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bInterfaceProtocol),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   iInterface),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info qmi_uaudio_stream_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   enable),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   usb_token),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   audio_format_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   audio_format),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   number_of_ch_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   number_of_ch),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   bit_rate_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   bit_rate),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   xfer_buff_size_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   xfer_buff_size),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info qmi_uaudio_stream_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					status_valid),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum usb_audio_stream_status_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					status),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					internal_status_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					internal_status),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					slot_id_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					slot_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					usb_token_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					usb_token),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					std_as_opr_intf_desc_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct usb_interface_descriptor_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					std_as_opr_intf_desc),
+		.ei_array      = usb_interface_descriptor_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					std_as_data_ep_desc_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct usb_endpoint_descriptor_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					std_as_data_ep_desc),
+		.ei_array      = usb_endpoint_descriptor_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					std_as_sync_ep_desc_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct usb_endpoint_descriptor_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					std_as_sync_ep_desc),
+		.ei_array      = usb_endpoint_descriptor_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					usb_audio_spec_revision_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					usb_audio_spec_revision),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x18,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					data_path_delay_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x18,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					data_path_delay),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x19,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					usb_audio_subslot_size_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x19,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					usb_audio_subslot_size),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1A,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					xhci_mem_info_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct apps_mem_info_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1A,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					xhci_mem_info),
+		.ei_array      = apps_mem_info_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1B,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					interrupter_num_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1B,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					interrupter_num),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1C,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					speed_info_valid),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum usb_audio_device_speed_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1C,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					speed_info),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1D,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					controller_num_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1D,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					controller_num),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info qmi_uaudio_stream_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(
+				enum usb_audio_device_indication_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   dev_event),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   slot_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   usb_token_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   usb_token),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   std_as_opr_intf_desc_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct usb_interface_descriptor_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   std_as_opr_intf_desc),
+		.ei_array      = usb_interface_descriptor_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   std_as_data_ep_desc_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct usb_endpoint_descriptor_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   std_as_data_ep_desc),
+		.ei_array      = usb_endpoint_descriptor_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   std_as_sync_ep_desc_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct usb_endpoint_descriptor_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   std_as_sync_ep_desc),
+		.ei_array      = usb_endpoint_descriptor_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   usb_audio_spec_revision_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   usb_audio_spec_revision),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   data_path_delay_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   data_path_delay),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   usb_audio_subslot_size_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   usb_audio_subslot_size),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   xhci_mem_info_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct apps_mem_info_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   xhci_mem_info),
+		.ei_array      = apps_mem_info_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x18,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   interrupter_num_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x18,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   interrupter_num),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x19,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   controller_num_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x19,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   controller_num),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
--- /dev/null	2019-10-24 16:47:25.704025805 +0200
+++ linux-4.4.115-fbx/sound/usb/usb_audio_qmi_v01.h	2019-10-29 09:26:26.197228172 +0100
@@ -0,0 +1,167 @@
+ /* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef USB_QMI_V01_H
+#define USB_QMI_V01_H
+
+#define UAUDIO_STREAM_SERVICE_ID_V01 0x41D
+#define UAUDIO_STREAM_SERVICE_VERS_V01 0x01
+
+#define QMI_UAUDIO_STREAM_RESP_V01 0x0001
+#define QMI_UAUDIO_STREAM_REQ_V01 0x0001
+#define QMI_UADUIO_STREAM_IND_V01 0x0001
+
+
+struct mem_info_v01 {
+	uint64_t va;
+	uint64_t pa;
+	uint32_t size;
+};
+
+struct apps_mem_info_v01 {
+	struct mem_info_v01 evt_ring;
+	struct mem_info_v01 tr_data;
+	struct mem_info_v01 tr_sync;
+	struct mem_info_v01 xfer_buff;
+	struct mem_info_v01 dcba;
+};
+
+struct usb_endpoint_descriptor_v01 {
+	uint8_t bLength;
+	uint8_t bDescriptorType;
+	uint8_t bEndpointAddress;
+	uint8_t bmAttributes;
+	uint16_t wMaxPacketSize;
+	uint8_t bInterval;
+	uint8_t bRefresh;
+	uint8_t bSynchAddress;
+};
+
+struct usb_interface_descriptor_v01 {
+	uint8_t bLength;
+	uint8_t bDescriptorType;
+	uint8_t bInterfaceNumber;
+	uint8_t bAlternateSetting;
+	uint8_t bNumEndpoints;
+	uint8_t bInterfaceClass;
+	uint8_t bInterfaceSubClass;
+	uint8_t bInterfaceProtocol;
+	uint8_t iInterface;
+};
+
+enum usb_audio_stream_status_enum_v01 {
+	USB_AUDIO_STREAM_STATUS_ENUM_MIN_VAL_V01 = INT_MIN,
+	USB_AUDIO_STREAM_REQ_SUCCESS_V01 = 0,
+	USB_AUDIO_STREAM_REQ_FAILURE_V01 = 1,
+	USB_AUDIO_STREAM_REQ_FAILURE_NOT_FOUND_V01 = 2,
+	USB_AUDIO_STREAM_REQ_FAILURE_INVALID_PARAM_V01 = 3,
+	USB_AUDIO_STREAM_REQ_FAILURE_MEMALLOC_V01 = 4,
+	USB_AUDIO_STREAM_STATUS_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+enum usb_audio_device_indication_enum_v01 {
+	USB_AUDIO_DEVICE_INDICATION_ENUM_MIN_VAL_V01 = INT_MIN,
+	USB_AUDIO_DEV_CONNECT_V01 = 0,
+	USB_AUDIO_DEV_DISCONNECT_V01 = 1,
+	USB_AUDIO_DEV_SUSPEND_V01 = 2,
+	USB_AUDIO_DEV_RESUME_V01 = 3,
+	USB_AUDIO_DEVICE_INDICATION_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+enum usb_audio_device_speed_enum_v01 {
+	USB_AUDIO_DEVICE_SPEED_ENUM_MIN_VAL_V01 = INT_MIN,
+	USB_AUDIO_DEVICE_SPEED_INVALID_V01 = 0,
+	USB_AUDIO_DEVICE_SPEED_LOW_V01 = 1,
+	USB_AUDIO_DEVICE_SPEED_FULL_V01 = 2,
+	USB_AUDIO_DEVICE_SPEED_HIGH_V01 = 3,
+	USB_AUDIO_DEVICE_SPEED_SUPER_V01 = 4,
+	USB_AUDIO_DEVICE_SPEED_SUPER_PLUS_V01 = 5,
+	USB_AUDIO_DEVICE_SPEED_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+struct qmi_uaudio_stream_req_msg_v01 {
+	uint8_t enable;
+	uint32_t usb_token;
+	uint8_t audio_format_valid;
+	uint32_t audio_format;
+	uint8_t number_of_ch_valid;
+	uint32_t number_of_ch;
+	uint8_t bit_rate_valid;
+	uint32_t bit_rate;
+	uint8_t xfer_buff_size_valid;
+	uint32_t xfer_buff_size;
+};
+#define QMI_UAUDIO_STREAM_REQ_MSG_V01_MAX_MSG_LEN 39
+extern struct elem_info qmi_uaudio_stream_req_msg_v01_ei[];
+
+struct qmi_uaudio_stream_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	uint8_t status_valid;
+	enum usb_audio_stream_status_enum_v01 status;
+	uint8_t internal_status_valid;
+	uint32_t internal_status;
+	uint8_t slot_id_valid;
+	uint32_t slot_id;
+	uint8_t usb_token_valid;
+	uint32_t usb_token;
+	uint8_t std_as_opr_intf_desc_valid;
+	struct usb_interface_descriptor_v01 std_as_opr_intf_desc;
+	uint8_t std_as_data_ep_desc_valid;
+	struct usb_endpoint_descriptor_v01 std_as_data_ep_desc;
+	uint8_t std_as_sync_ep_desc_valid;
+	struct usb_endpoint_descriptor_v01 std_as_sync_ep_desc;
+	uint8_t usb_audio_spec_revision_valid;
+	uint16_t usb_audio_spec_revision;
+	uint8_t data_path_delay_valid;
+	uint8_t data_path_delay;
+	uint8_t usb_audio_subslot_size_valid;
+	uint8_t usb_audio_subslot_size;
+	uint8_t xhci_mem_info_valid;
+	struct apps_mem_info_v01 xhci_mem_info;
+	uint8_t interrupter_num_valid;
+	uint8_t interrupter_num;
+	uint8_t speed_info_valid;
+	enum usb_audio_device_speed_enum_v01 speed_info;
+	uint8_t controller_num_valid;
+	uint8_t controller_num;
+};
+#define QMI_UAUDIO_STREAM_RESP_MSG_V01_MAX_MSG_LEN 202
+extern struct elem_info qmi_uaudio_stream_resp_msg_v01_ei[];
+
+struct qmi_uaudio_stream_ind_msg_v01 {
+	enum usb_audio_device_indication_enum_v01 dev_event;
+	uint32_t slot_id;
+	uint8_t usb_token_valid;
+	uint32_t usb_token;
+	uint8_t std_as_opr_intf_desc_valid;
+	struct usb_interface_descriptor_v01 std_as_opr_intf_desc;
+	uint8_t std_as_data_ep_desc_valid;
+	struct usb_endpoint_descriptor_v01 std_as_data_ep_desc;
+	uint8_t std_as_sync_ep_desc_valid;
+	struct usb_endpoint_descriptor_v01 std_as_sync_ep_desc;
+	uint8_t usb_audio_spec_revision_valid;
+	uint16_t usb_audio_spec_revision;
+	uint8_t data_path_delay_valid;
+	uint8_t data_path_delay;
+	uint8_t usb_audio_subslot_size_valid;
+	uint8_t usb_audio_subslot_size;
+	uint8_t xhci_mem_info_valid;
+	struct apps_mem_info_v01 xhci_mem_info;
+	uint8_t interrupter_num_valid;
+	uint8_t interrupter_num;
+	uint8_t controller_num_valid;
+	uint8_t controller_num;
+};
+#define QMI_UAUDIO_STREAM_IND_MSG_V01_MAX_MSG_LEN 181
+extern struct elem_info qmi_uaudio_stream_ind_msg_v01_ei[];
+
+#endif
